diff --git a/.cirrus.yml b/.cirrus.yml index 02c43a074a..4895987da4 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -10,9 +10,9 @@ windows_msys2_task: memory: 8G env: CIRRUS_SHELL: powershell - MSYS: winsymlinks:nativestrict + MSYS: winsymlinks:native MSYSTEM: MINGW64 - MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2021-04-19/msys2-base-x86_64-20210419.sfx.exe + MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe MSYS2_FINGERPRINT: 0 MSYS2_PACKAGES: " diffutils git grep make pkg-config sed @@ -32,7 +32,6 @@ windows_msys2_task: mingw-w64-x86_64-libgcrypt mingw-w64-x86_64-libpng mingw-w64-x86_64-libssh - mingw-w64-x86_64-libxml2 mingw-w64-x86_64-snappy mingw-w64-x86_64-libusb mingw-w64-x86_64-usbredir diff --git a/.gitattributes b/.gitattributes index 07f430e944..a217cb7bfe 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,4 @@ *.c.inc diff=c *.h.inc diff=c +*.m diff=objc *.py diff=python diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f7db4c4a2c..6e2f74efb2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: artifact_name: xemu-win-release artifact_filename: xemu-win-release.zip env: - DOCKER_IMAGE_NAME: ghcr.io/xemu-project/xemu-win64-toolchain:sha-99d540a + DOCKER_IMAGE_NAME: ghcr.io/xemu-project/xemu-win64-toolchain:sha-d0d3e7b steps: - name: Download source package diff --git a/.gitignore b/.gitignore index 7b867d98ba..bfac196fad 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,13 @@ /GNUmakefile /build/ +/.cache/ +/.vscode/ *.pyc .sdk .stgit-* .git-submodule-status +.clang-format +.gdb_history cscope.* tags TAGS @@ -15,6 +19,7 @@ GTAGS *.depend_raw *.swp *.patch +*.gcov build.log /dist diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml new file mode 100644 index 0000000000..69b36c148a --- /dev/null +++ b/.gitlab-ci.d/base.yml @@ -0,0 +1,72 @@ + +# The order of rules defined here is critically important. +# They are evaluated in order and first match wins. +# +# Thus we group them into a number of stages, ordered from +# most restrictive to least restrictive +# +.base_job_template: + rules: + ############################################################# + # Stage 1: exclude scenarios where we definitely don't + # want jobs to run + ############################################################# + + # Cirrus jobs can't run unless the creds / target repo are set + - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)' + when: never + + # Publishing jobs should only run on the default branch in upstream + - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' + when: never + + # Non-publishing jobs should only run on staging branches in upstream + - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/' + when: never + + # Jobs only intended for forks should always be skipped on upstream + - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"' + when: never + + # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set + - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: never + + # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set + - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: never + + + ############################################################# + # Stage 2: fine tune execution of jobs in specific scenarios + # where the catch all logic is inapprorpaite + ############################################################# + + # Optional jobs should not be run unless manually triggered + - if: '$QEMU_JOB_OPTIONAL' + when: manual + allow_failure: true + + # Skipped jobs should not be run unless manually triggered + - if: '$QEMU_JOB_SKIPPED' + when: manual + allow_failure: true + + # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset + - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: manual + allow_failure: true + + + ############################################################# + # Stage 3: catch all logic applying to any job not matching + # an earlier criteria + ############################################################# + + # Forks pipeline jobs don't start automatically unless + # QEMU_CI=2 is set + - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: manual + + # Jobs can run if any jobs they depend on were successfull + - when: on_success diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index fcbcc4e627..73ecfabb8d 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -1,4 +1,5 @@ .native_build_job_template: + extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest before_script: @@ -26,7 +27,8 @@ make -j"$JOBS" $MAKE_CHECK_ARGS ; fi -.native_test_job_template: +.common_test_job_template: + extends: .base_job_template stage: test image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest script: @@ -37,8 +39,18 @@ # Avoid recompiling by hiding ninja with NINJA=":" - make NINJA=":" $MAKE_CHECK_ARGS -.acceptance_test_job_template: - extends: .native_test_job_template +.native_test_job_template: + extends: .common_test_job_template + artifacts: + name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" + expire_in: 7 days + paths: + - build/meson-logs/testlog.txt + reports: + junit: build/meson-logs/testlog.junit.xml + +.avocado_test_job_template: + extends: .common_test_job_template cache: key: "${CI_JOB_NAME}-cache" paths: @@ -67,15 +79,5 @@ after_script: - cd build - du -chs ${CI_PROJECT_DIR}/avocado-cache - rules: - # Only run these jobs if running on the mainstream namespace, - # or if the user set the QEMU_CI_AVOCADO_TESTING variable (either - # in its namespace setting or via git-push option, see documentation - # in /.gitlab-ci.yml of this repository). - - if: '$CI_PROJECT_NAMESPACE == "qemu-project"' - when: on_success - - if: '$QEMU_CI_AVOCADO_TESTING' - when: on_success - # Otherwise, set to manual (the jobs are created but not run). - - when: manual - allow_failure: true + variables: + QEMU_JOB_AVOCADO: 1 diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index 903ee65f32..10886bb414 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -24,16 +24,16 @@ check-system-alpine: artifacts: true variables: IMAGE: alpine - MAKE_CHECK_ARGS: check + MAKE_CHECK_ARGS: check-unit check-qtest -acceptance-system-alpine: - extends: .acceptance_test_job_template +avocado-system-alpine: + extends: .avocado_test_job_template needs: - job: build-system-alpine artifacts: true variables: IMAGE: alpine - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-ubuntu: extends: .native_build_job_template @@ -41,7 +41,7 @@ build-system-ubuntu: job: amd64-ubuntu2004-container variables: IMAGE: ubuntu2004 - CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-slirp=system + CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu microblazeel-softmmu mips64el-softmmu MAKE_CHECK_ARGS: check-build @@ -59,14 +59,14 @@ check-system-ubuntu: IMAGE: ubuntu2004 MAKE_CHECK_ARGS: check -acceptance-system-ubuntu: - extends: .acceptance_test_job_template +avocado-system-ubuntu: + extends: .avocado_test_job_template needs: - job: build-system-ubuntu artifacts: true variables: IMAGE: ubuntu2004 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-debian: extends: .native_build_job_template @@ -74,7 +74,6 @@ build-system-debian: job: amd64-debian-container variables: IMAGE: debian-amd64 - CONFIGURE_ARGS: --enable-fdt=system TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu MAKE_CHECK_ARGS: check-build @@ -92,14 +91,26 @@ check-system-debian: IMAGE: debian-amd64 MAKE_CHECK_ARGS: check -acceptance-system-debian: - extends: .acceptance_test_job_template +avocado-system-debian: + extends: .avocado_test_job_template needs: - job: build-system-debian artifacts: true variables: IMAGE: debian-amd64 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado + +crash-test-debian: + extends: .native_test_job_template + needs: + - job: build-system-debian + artifacts: true + variables: + IMAGE: debian-amd64 + script: + - cd build + - make NINJA=":" check-venv + - tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 build-system-fedora: extends: .native_build_job_template @@ -108,7 +119,7 @@ build-system-fedora: variables: IMAGE: fedora CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs - --enable-fdt=system --enable-slirp=system --enable-capstone=system + --enable-fdt=system --enable-slirp --enable-capstone TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu MAKE_CHECK_ARGS: check-build @@ -126,14 +137,27 @@ check-system-fedora: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-system-fedora: - extends: .acceptance_test_job_template +avocado-system-fedora: + extends: .avocado_test_job_template needs: - job: build-system-fedora artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado + +crash-test-fedora: + extends: .native_test_job_template + needs: + - job: build-system-fedora + artifacts: true + variables: + IMAGE: fedora + script: + - cd build + - make NINJA=":" check-venv + - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc + - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 build-system-centos: extends: .native_build_job_template @@ -142,7 +166,8 @@ build-system-centos: variables: IMAGE: centos8 CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system - --enable-modules --enable-trace-backends=dtrace + --enable-modules --enable-trace-backends=dtrace --enable-docs + --enable-vfio-user-server TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu MAKE_CHECK_ARGS: check-build @@ -160,14 +185,14 @@ check-system-centos: IMAGE: centos8 MAKE_CHECK_ARGS: check -acceptance-system-centos: - extends: .acceptance_test_job_template +avocado-system-centos: + extends: .avocado_test_job_template needs: - job: build-system-centos artifacts: true variables: IMAGE: centos8 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-opensuse: extends: .native_build_job_template @@ -192,95 +217,16 @@ check-system-opensuse: IMAGE: opensuse-leap MAKE_CHECK_ARGS: check -acceptance-system-opensuse: - extends: .acceptance_test_job_template +avocado-system-opensuse: + extends: .avocado_test_job_template needs: - job: build-system-opensuse artifacts: true variables: IMAGE: opensuse-leap - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado -build-disabled: - extends: .native_build_job_template - needs: - job: amd64-fedora-container - variables: - IMAGE: fedora - CONFIGURE_ARGS: - --disable-attr - --disable-auth-pam - --disable-avx2 - --disable-bochs - --disable-brlapi - --disable-bzip2 - --disable-cap-ng - --disable-capstone - --disable-cloop - --disable-coroutine-pool - --disable-curl - --disable-curses - --disable-dmg - --disable-docs - --disable-gcrypt - --disable-glusterfs - --disable-gnutls - --disable-gtk - --disable-guest-agent - --disable-iconv - --disable-keyring - --disable-kvm - --disable-libiscsi - --disable-libpmem - --disable-libssh - --disable-libudev - --disable-libusb - --disable-libxml2 - --disable-linux-aio - --disable-live-block-migration - --disable-lzo - --disable-malloc-trim - --disable-mpath - --disable-nettle - --disable-numa - --disable-opengl - --disable-parallels - --disable-pie - --disable-qcow1 - --disable-qed - --disable-qom-cast-debug - --disable-rbd - --disable-rdma - --disable-replication - --disable-sdl - --disable-seccomp - --disable-slirp - --disable-smartcard - --disable-snappy - --disable-sparse - --disable-spice - --disable-strip - --disable-tpm - --disable-usb-redir - --disable-vdi - --disable-vhost-crypto - --disable-vhost-net - --disable-vhost-scsi - --disable-vhost-kernel - --disable-vhost-user - --disable-vhost-vdpa - --disable-vhost-vsock - --disable-virglrenderer - --disable-vnc - --disable-vte - --disable-vvfat - --disable-xen - --disable-zstd - TARGETS: arm-softmmu i386-softmmu ppc64-softmmu mips64-softmmu - s390x-softmmu i386-linux-user - MAKE_CHECK_ARGS: check-qtest SPEED=slow - # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by # the configure script. The container doesn't contain Xen headers so # Xen accelerator is not detected / selected. As result it build the @@ -305,11 +251,11 @@ build-tcg-disabled: - cd tests/qemu-iotests/ - ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048 052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163 - 170 171 183 184 192 194 208 221 222 226 227 236 253 277 + 170 171 183 184 192 194 208 221 226 227 236 253 277 image-fleecing - ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122 124 132 139 142 144 145 151 152 155 157 165 194 196 200 202 - 208 209 216 218 222 227 234 246 247 248 250 254 255 257 258 - 260 261 262 263 264 270 272 273 277 279 + 208 209 216 218 227 234 246 247 248 250 254 255 257 258 + 260 261 262 263 264 270 272 273 277 279 image-fleecing build-user: extends: .native_build_job_template @@ -381,6 +327,7 @@ clang-user: extends: .native_build_job_template needs: job: amd64-debian-user-cross-container + timeout: 70m variables: IMAGE: debian-all-test-cross CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system @@ -392,12 +339,10 @@ clang-user: # On gitlab runners, default value sometimes end up calling 2 lds concurrently and # triggers an Out-Of-Memory error # -# Since slirp callbacks are used in QEMU Timers, slirp needs to be compiled together -# with QEMU and linked as a static library to avoid false positives in CFI checks. -# This can be accomplished by using -enable-slirp=git, which avoids the use of -# a system-wide version of the library +# Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with +# CFI builds, and thus have to disable it here. # -# Split in three sets of build/check/acceptance to limit the execution time of each +# Split in three sets of build/check/avocado to limit the execution time of each # job build-cfi-aarch64: extends: .native_build_job_template @@ -408,20 +353,18 @@ build-cfi-aarch64: AR: llvm-ar IMAGE: fedora CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug - --enable-safe-stack --enable-slirp=git + --enable-safe-stack --disable-slirp TARGETS: aarch64-softmmu MAKE_CHECK_ARGS: check-build - timeout: 70m + # FIXME: This job is often failing, likely due to out-of-memory problems in + # the constrained containers of the shared runners. Thus this is marked as + # skipped until the situation has been solved. + QEMU_JOB_SKIPPED: 1 + timeout: 90m artifacts: expire_in: 2 days paths: - build - rules: - # FIXME: This job is often failing, likely due to out-of-memory problems in - # the constrained containers of the shared runners. Thus this is marked as - # manual until the situation has been solved. - - when: manual - allow_failure: true check-cfi-aarch64: extends: .native_test_job_template @@ -432,14 +375,14 @@ check-cfi-aarch64: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-aarch64: - extends: .acceptance_test_job_template +avocado-cfi-aarch64: + extends: .avocado_test_job_template needs: - job: build-cfi-aarch64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-cfi-ppc64-s390x: extends: .native_build_job_template @@ -450,20 +393,18 @@ build-cfi-ppc64-s390x: AR: llvm-ar IMAGE: fedora CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug - --enable-safe-stack --enable-slirp=git + --enable-safe-stack --disable-slirp TARGETS: ppc64-softmmu s390x-softmmu MAKE_CHECK_ARGS: check-build - timeout: 70m + # FIXME: This job is often failing, likely due to out-of-memory problems in + # the constrained containers of the shared runners. Thus this is marked as + # skipped until the situation has been solved. + QEMU_JOB_SKIPPED: 1 + timeout: 80m artifacts: expire_in: 2 days paths: - build - rules: - # FIXME: This job is often failing, likely due to out-of-memory problems in - # the constrained containers of the shared runners. Thus this is marked as - # manual until the situation has been solved. - - when: manual - allow_failure: true check-cfi-ppc64-s390x: extends: .native_test_job_template @@ -474,14 +415,14 @@ check-cfi-ppc64-s390x: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-ppc64-s390x: - extends: .acceptance_test_job_template +avocado-cfi-ppc64-s390x: + extends: .avocado_test_job_template needs: - job: build-cfi-ppc64-s390x artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-cfi-x86_64: extends: .native_build_job_template @@ -492,7 +433,7 @@ build-cfi-x86_64: AR: llvm-ar IMAGE: fedora CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug - --enable-safe-stack --enable-slirp=git + --enable-safe-stack --disable-slirp TARGETS: x86_64-softmmu MAKE_CHECK_ARGS: check-build timeout: 70m @@ -510,14 +451,14 @@ check-cfi-x86_64: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-x86_64: - extends: .acceptance_test_job_template +avocado-cfi-x86_64: + extends: .avocado_test_job_template needs: - job: build-cfi-x86_64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado tsan-build: extends: .native_build_job_template @@ -526,37 +467,10 @@ tsan-build: variables: IMAGE: ubuntu2004 CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10 - --enable-trace-backends=ust --enable-fdt=system --enable-slirp=system + --enable-trace-backends=ust --enable-fdt=system --disable-slirp TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user MAKE_CHECK_ARGS: bench V=1 -# These targets are on the way out -build-deprecated: - extends: .native_build_job_template - needs: - job: amd64-debian-user-cross-container - variables: - IMAGE: debian-all-test-cross - CONFIGURE_ARGS: --disable-tools - MAKE_CHECK_ARGS: build-tcg - TARGETS: ppc64abi32-linux-user - artifacts: - expire_in: 2 days - paths: - - build - -# We split the check-tcg step as test failures are expected but we still -# want to catch the build breaking. -check-deprecated: - extends: .native_test_job_template - needs: - - job: build-deprecated - artifacts: true - variables: - IMAGE: debian-all-test-cross - MAKE_CHECK_ARGS: check-tcg - allow_failure: true - # gprof/gcov are GCC features build-gprof-gcov: extends: .native_build_job_template @@ -580,7 +494,17 @@ check-gprof-gcov: IMAGE: ubuntu2004 MAKE_CHECK_ARGS: check after_script: - - ${CI_PROJECT_DIR}/scripts/ci/coverage-summary.sh + - cd build + - gcovr --xml-pretty --exclude-unreachable-branches --print-summary + -o coverage.xml --root ${CI_PROJECT_DIR} . *.p + coverage: /^\s*lines:\s*\d+.\d+\%/ + artifacts: + name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA} + expire_in: 2 days + reports: + coverage_report: + coverage_format: cobertura + path: build/coverage.xml build-oss-fuzz: extends: .native_build_job_template @@ -599,8 +523,6 @@ build-oss-fuzz: echo Testing ${fuzzer} ... ; "${fuzzer}" -runs=1 -seed=1 || exit 1 ; done - # Unrelated to fuzzer: run some tests with -fsanitize=address - - cd build-oss-fuzz && make check-qtest-i386 check-unit build-tci: extends: .native_build_job_template @@ -649,20 +571,25 @@ build-without-default-devices: build-without-default-features: extends: .native_build_job_template needs: - job: amd64-debian-container + job: amd64-fedora-container variables: - IMAGE: debian-amd64 - CONFIGURE_ARGS: --without-default-features --disable-user - --target-list-exclude=arm-softmmu,i386-softmmu,mipsel-softmmu,mips64-softmmu,ppc-softmmu - MAKE_CHECK_ARGS: check-unit + IMAGE: fedora + CONFIGURE_ARGS: + --without-default-features + --disable-capstone + --disable-pie + --disable-qom-cast-debug + --disable-strip + TARGETS: avr-softmmu i386-softmmu mips64-softmmu s390x-softmmu sh4-softmmu + sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user + MAKE_CHECK_ARGS: check-unit check-qtest SPEED=slow build-libvhost-user: + extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/fedora:latest needs: job: amd64-fedora-container - before_script: - - dnf install -y meson ninja-build script: - mkdir subprojects/libvhost-user/build - cd subprojects/libvhost-user/build @@ -675,10 +602,13 @@ build-tools-and-docs-debian: extends: .native_build_job_template needs: job: amd64-debian-container + # when running on 'master' we use pre-existing container + optional: true variables: IMAGE: debian-amd64 - MAKE_CHECK_ARGS: check-unit check-softfloat ctags TAGS cscope + MAKE_CHECK_ARGS: check-unit ctags TAGS cscope CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools + QEMU_JOB_PUBLISH: 1 artifacts: expire_in: 2 days paths: @@ -698,6 +628,7 @@ build-tools-and-docs-debian: # that users can see the results of their commits, regardless # of what topic branch they're currently using pages: + extends: .base_job_template image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest stage: test needs: @@ -715,10 +646,5 @@ pages: artifacts: paths: - public - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' - when: on_success - - if: '$CI_PROJECT_NAMESPACE == "qemu-project"' - when: never - - if: '$CI_PROJECT_NAMESPACE != "qemu-project"' - when: on_success + variables: + QEMU_JOB_PUBLISH: 1 diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 675db69622..634a73a742 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -11,9 +11,11 @@ # special care, because we can't just override it at the GitLab CI job # definition level or we risk breaking it completely. .cirrus_build_job: + extends: .base_job_template stage: build image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master needs: [] + timeout: 80m allow_failure: true script: - source .gitlab-ci.d/cirrus/$NAME.vars @@ -35,12 +37,12 @@ -e "s|[@]PIP3@|$PIP3|g" -e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g" -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g" - -e "s|[@]TEST_TARGETSS@|$TEST_TARGETSS|g" + -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g" <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml - cat .gitlab-ci.d/cirrus/$NAME.yml - cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml - rules: - - if: "$CIRRUS_GITHUB_REPO && $CIRRUS_API_TOKEN" + variables: + QEMU_JOB_CIRRUS: 1 x64-freebsd-12-build: extends: .cirrus_build_job @@ -48,14 +50,11 @@ x64-freebsd-12-build: NAME: freebsd-12 CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-12-2 + CIRRUS_VM_IMAGE_NAME: freebsd-12-3 CIRRUS_VM_CPUS: 8 CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update INSTALL_COMMAND: pkg install -y - # TODO: Enable gnutls again once FreeBSD's libtasn1 got fixed - # See: https://gitlab.com/gnutls/libtasn1/-/merge_requests/71 - CONFIGURE_ARGS: --disable-gnutls TEST_TARGETS: check x64-freebsd-13-build: @@ -64,24 +63,61 @@ x64-freebsd-13-build: NAME: freebsd-13 CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-13-0 + CIRRUS_VM_IMAGE_NAME: freebsd-13-1 CIRRUS_VM_CPUS: 8 CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update INSTALL_COMMAND: pkg install -y TEST_TARGETS: check -x64-macos-11-base-build: +aarch64-macos-12-base-build: extends: .cirrus_build_job variables: - NAME: macos-11 - CIRRUS_VM_INSTANCE_TYPE: osx_instance + NAME: macos-12 + CIRRUS_VM_INSTANCE_TYPE: macos_instance CIRRUS_VM_IMAGE_SELECTOR: image - CIRRUS_VM_IMAGE_NAME: big-sur-base + CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-monterey-base:latest CIRRUS_VM_CPUS: 12 CIRRUS_VM_RAM: 24G UPDATE_COMMAND: brew update INSTALL_COMMAND: brew install - PATH_EXTRA: /usr/local/opt/ccache/libexec:/usr/local/opt/gettext/bin - PKG_CONFIG_PATH: /usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/ncurses/lib/pkgconfig:/usr/local/opt/readline/lib/pkgconfig + PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin + PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64 + + +# The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job +.cirrus_kvm_job: + extends: .base_job_template + stage: build + image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master + needs: [] + timeout: 80m + script: + - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g" + -e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g" + -e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g" + -e "s|[@]NAME@|$NAME|g" + -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g" + -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g" + <.gitlab-ci.d/cirrus/kvm-build.yml >.gitlab-ci.d/cirrus/$NAME.yml + - cat .gitlab-ci.d/cirrus/$NAME.yml + - cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml + variables: + QEMU_JOB_CIRRUS: 1 + QEMU_JOB_OPTIONAL: 1 + + +x86-netbsd: + extends: .cirrus_kvm_job + variables: + NAME: netbsd + CONFIGURE_ARGS: --target-list=x86_64-softmmu,ppc64-softmmu,aarch64-softmmu + TEST_TARGETS: check + +x86-openbsd: + extends: .cirrus_kvm_job + variables: + NAME: openbsd + CONFIGURE_ARGS: --target-list=i386-softmmu,riscv64-softmmu,mips64-softmmu + TEST_TARGETS: check diff --git a/.gitlab-ci.d/cirrus/build.yml b/.gitlab-ci.d/cirrus/build.yml index 857bdc5536..7ef6af8d33 100644 --- a/.gitlab-ci.d/cirrus/build.yml +++ b/.gitlab-ci.d/cirrus/build.yml @@ -13,6 +13,7 @@ env: PYTHON: "@PYTHON@" MAKE: "@MAKE@" CONFIGURE_ARGS: "@CONFIGURE_ARGS@" + TEST_TARGETS: "@TEST_TARGETS@" build_task: install_script: @@ -31,5 +32,6 @@ build_task: - $MAKE -j$(sysctl -n hw.ncpu) - for TARGET in $TEST_TARGETS ; do - $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ; + $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; done diff --git a/.gitlab-ci.d/cirrus/freebsd-12.vars b/.gitlab-ci.d/cirrus/freebsd-12.vars index 2099b21354..e3fc3235b9 100644 --- a/.gitlab-ci.d/cirrus/freebsd-12.vars +++ b/.gitlab-ci.d/cirrus/freebsd-12.vars @@ -2,12 +2,15 @@ # # $ lcitool variables freebsd-12 qemu # -# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1 +# https://gitlab.com/libvirt/libvirt-ci -PACKAGING_COMMAND='pkg' CCACHE='/usr/local/bin/ccache' +CPAN_PKGS='' +CROSS_PKGS='' MAKE='/usr/local/bin/gmake' NINJA='/usr/local/bin/ninja' -PYTHON='/usr/local/bin/python3' +PACKAGING_COMMAND='pkg' PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 libxml2 llvm lttng-ust lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PYPI_PKGS='' +PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/freebsd-13.vars b/.gitlab-ci.d/cirrus/freebsd-13.vars index 323fe806d5..9f56babd9c 100644 --- a/.gitlab-ci.d/cirrus/freebsd-13.vars +++ b/.gitlab-ci.d/cirrus/freebsd-13.vars @@ -2,12 +2,15 @@ # # $ lcitool variables freebsd-13 qemu # -# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1 +# https://gitlab.com/libvirt/libvirt-ci -PACKAGING_COMMAND='pkg' CCACHE='/usr/local/bin/ccache' +CPAN_PKGS='' +CROSS_PKGS='' MAKE='/usr/local/bin/gmake' NINJA='/usr/local/bin/ninja' -PYTHON='/usr/local/bin/python3' +PACKAGING_COMMAND='pkg' PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 libxml2 llvm lttng-ust lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PYPI_PKGS='' +PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/kvm-build.yml b/.gitlab-ci.d/cirrus/kvm-build.yml new file mode 100644 index 0000000000..4334fabf39 --- /dev/null +++ b/.gitlab-ci.d/cirrus/kvm-build.yml @@ -0,0 +1,31 @@ +container: + image: fedora:35 + cpu: 4 + memory: 8Gb + kvm: true + +env: + CIRRUS_CLONE_DEPTH: 1 + CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@" + CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@" + CI_COMMIT_SHA: "@CI_COMMIT_SHA@" + +@NAME@_task: + @NAME@_vm_cache: + folder: $HOME/.cache/qemu-vm + install_script: + - dnf update -y + - dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget + clone_script: + - git clone --depth 100 "$CI_REPOSITORY_URL" . + - git fetch origin "$CI_COMMIT_REF_NAME" + - git reset --hard "$CI_COMMIT_SHA" + build_script: + - if [ -f $HOME/.cache/qemu-vm/images/@NAME@.img ]; then + make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) + EXTRA_CONFIGURE_OPTS="@CONFIGURE_ARGS@" + BUILD_TARGET="@TEST_TARGETS@" ; + else + make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) BUILD_TARGET=help + EXTRA_CONFIGURE_OPTS="--disable-system --disable-user --disable-tools" ; + fi diff --git a/.gitlab-ci.d/cirrus/macos-11.vars b/.gitlab-ci.d/cirrus/macos-11.vars deleted file mode 100644 index cbec8a44a3..0000000000 --- a/.gitlab-ci.d/cirrus/macos-11.vars +++ /dev/null @@ -1,15 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool variables macos-11 qemu -# -# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1 - -PACKAGING_COMMAND='brew' -CCACHE='/usr/local/bin/ccache' -MAKE='/usr/local/bin/gmake' -NINJA='/usr/local/bin/ninja' -PYTHON='/usr/local/bin/python3' -PIP3='/usr/local/bin/pip3' -PKGS='bash bc bzip2 capstone ccache cpanminus ctags curl dbus diffutils gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb libxml2 llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd' -PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme virtualenv' -CPAN_PKGS='Test::Harness' diff --git a/.gitlab-ci.d/cirrus/macos-12.vars b/.gitlab-ci.d/cirrus/macos-12.vars new file mode 100644 index 0000000000..ef9e14b373 --- /dev/null +++ b/.gitlab-ci.d/cirrus/macos-12.vars @@ -0,0 +1,16 @@ +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool variables macos-12 qemu +# +# https://gitlab.com/libvirt/libvirt-ci + +CCACHE='/opt/homebrew/bin/ccache' +CPAN_PKGS='' +CROSS_PKGS='' +MAKE='/opt/homebrew/bin/gmake' +NINJA='/opt/homebrew/bin/ninja' +PACKAGING_COMMAND='brew' +PIP3='/opt/homebrew/bin/pip3' +PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd' +PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme' +PYTHON='/opt/homebrew/bin/python3' diff --git a/.gitlab-ci.d/container-core.yml b/.gitlab-ci.d/container-core.yml index e8dd1f476a..08f8450fa1 100644 --- a/.gitlab-ci.d/container-core.yml +++ b/.gitlab-ci.d/container-core.yml @@ -10,8 +10,3 @@ amd64-fedora-container: extends: .container_job_template variables: NAME: fedora - -amd64-debian10-container: - extends: .container_job_template - variables: - NAME: debian10 diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml index 0fcebe363a..2d560e9764 100644 --- a/.gitlab-ci.d/container-cross.yml +++ b/.gitlab-ci.d/container-cross.yml @@ -1,64 +1,49 @@ alpha-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-alpha-cross amd64-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-amd64-cross amd64-debian-user-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-all-test-cross arm64-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-arm64-cross -arm64-test-debian-cross-container: - extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian11-container'] - variables: - NAME: debian-arm64-test-cross - armel-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-armel-cross armhf-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-armhf-cross # We never want to build hexagon in the CI system and by default we # always want to refer to the master registry where it lives. hexagon-cross-container: + extends: .base_job_template image: docker:stable stage: containers - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project"' - when: never - - when: always variables: NAME: debian-hexagon-cross GIT_DEPTH: 1 + QEMU_JOB_ONLY_FORKS: 1 services: - docker:dind before_script: @@ -77,92 +62,88 @@ hexagon-cross-container: hppa-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-hppa-cross m68k-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-m68k-cross mips64-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-mips64-cross mips64el-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-mips64el-cross mips-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-mips-cross mipsel-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-mipsel-cross powerpc-test-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian11-container'] + stage: containers variables: NAME: debian-powerpc-test-cross ppc64el-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-ppc64el-cross riscv64-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers + # as we are currently based on 'sid/unstable' we may break so... + allow_failure: true variables: NAME: debian-riscv64-cross +# we can however build TCG tests using a non-sid base +riscv64-debian-test-cross-container: + extends: .container_job_template + stage: containers + variables: + NAME: debian-riscv64-test-cross + s390x-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-s390x-cross sh4-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-sh4-cross sparc64-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-sparc64-cross tricore-debian-cross-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-tricore-cross diff --git a/.gitlab-ci.d/container-template.yml b/.gitlab-ci.d/container-template.yml index 1baecd9460..c434b9c8f3 100644 --- a/.gitlab-ci.d/container-template.yml +++ b/.gitlab-ci.d/container-template.yml @@ -1,4 +1,5 @@ .container_job_template: + extends: .base_job_template image: docker:stable stage: containers services: diff --git a/.gitlab-ci.d/containers.yml b/.gitlab-ci.d/containers.yml index cd06d3f5f4..96d2a3b58b 100644 --- a/.gitlab-ci.d/containers.yml +++ b/.gitlab-ci.d/containers.yml @@ -7,33 +7,17 @@ amd64-alpine-container: variables: NAME: alpine -amd64-debian11-container: - extends: .container_job_template - variables: - NAME: debian11 - amd64-debian-container: extends: .container_job_template - stage: containers-layer2 - needs: ['amd64-debian10-container'] + stage: containers variables: NAME: debian-amd64 -amd64-ubuntu1804-container: - extends: .container_job_template - variables: - NAME: ubuntu1804 - amd64-ubuntu2004-container: extends: .container_job_template variables: NAME: ubuntu2004 -amd64-ubuntu-container: - extends: .container_job_template - variables: - NAME: ubuntu - amd64-opensuse-leap-container: extends: .container_job_template variables: diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml index 10d22dcf6c..5e8892fd49 100644 --- a/.gitlab-ci.d/crossbuild-template.yml +++ b/.gitlab-ci.d/crossbuild-template.yml @@ -1,4 +1,5 @@ .cross_system_build_job: + extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest timeout: 80m @@ -14,7 +15,7 @@ - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS - if grep -q "EXESUF=.exe" config-host.mak; then make installer; - version="$(git describe --match v[0-9]*)"; + version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)"; mv -v qemu-setup*.exe qemu-setup-${version}.exe; fi @@ -24,6 +25,7 @@ # KVM), and set extra options (such disabling other accelerators) via the # $EXTRA_CONFIGURE_OPTS variable. .cross_accel_build_job: + extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest timeout: 30m @@ -36,6 +38,7 @@ - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS .cross_user_build_job: + extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest script: @@ -43,5 +46,8 @@ - cd build - PKG_CONFIG_PATH=$PKG_CONFIG_PATH ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS - --disable-system + --disable-system --target-list-exclude="aarch64_be-linux-user + alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user + nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user + xtensa-linux-user $CROSS_SKIP_TARGETS" - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index f10168db2e..c4cd96433d 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -62,26 +62,14 @@ cross-i386-user: cross-i386-tci: extends: .cross_accel_build_job timeout: 60m + needs: + job: i386-fedora-cross-container variables: IMAGE: fedora-i386-cross ACCEL: tcg-interpreter EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user MAKE_CHECK_ARGS: check check-tcg -cross-mips-system: - extends: .cross_system_build_job - needs: - job: mips-debian-cross-container - variables: - IMAGE: debian-mips-cross - -cross-mips-user: - extends: .cross_user_build_job - needs: - job: mips-debian-cross-container - variables: - IMAGE: debian-mips-cross - cross-mipsel-system: extends: .cross_system_build_job needs: @@ -124,6 +112,25 @@ cross-ppc64el-user: variables: IMAGE: debian-ppc64el-cross +# The riscv64 cross-builds currently use a 'sid' container to get +# compilers and libraries. Until something more stable is found we +# allow_failure so as not to block CI. +cross-riscv64-system: + extends: .cross_system_build_job + allow_failure: true + needs: + job: riscv64-debian-cross-container + variables: + IMAGE: debian-riscv64-cross + +cross-riscv64-user: + extends: .cross_user_build_job + allow_failure: true + needs: + job: riscv64-debian-cross-container + variables: + IMAGE: debian-riscv64-cross + cross-s390x-system: extends: .cross_system_build_job needs: diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml index 564b94565d..97f99e29c2 100644 --- a/.gitlab-ci.d/custom-runners.yml +++ b/.gitlab-ci.d/custom-runners.yml @@ -13,226 +13,8 @@ variables: GIT_STRATEGY: clone -# All ubuntu-18.04 jobs should run successfully in an environment -# setup by the scripts/ci/setup/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 18.04/20.04" -ubuntu-18.04-s390x-all-linux-static: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - # --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763 - # --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages - - mkdir build - - cd build - - ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - - make --output-sync -j`nproc` check-tcg V=1 - -ubuntu-18.04-s390x-all: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --disable-libssh - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-18.04-s390x-alldbg: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --enable-debug --disable-libssh - - make clean - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-18.04-s390x-clang: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - - if: "$S390X_RUNNER_AVAILABLE" - when: manual - script: - - mkdir build - - cd build - - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-18.04-s390x-tci: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --disable-libssh --enable-tcg-interpreter - - make --output-sync -j`nproc` - -ubuntu-18.04-s390x-notcg: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_18.04 - - s390x - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - - if: "$S390X_RUNNER_AVAILABLE" - when: manual - script: - - mkdir build - - cd build - - ../configure --disable-libssh --disable-tcg - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -# All ubuntu-20.04 jobs should run successfully in an environment -# setup by the scripts/ci/setup/qemu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 18.04/20.04" -ubuntu-20.04-aarch64-all-linux-static: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - # --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763 - # --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages - - mkdir build - - cd build - - ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - - make --output-sync -j`nproc` check-tcg V=1 - -ubuntu-20.04-aarch64-all: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --disable-libssh - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-20.04-aarch64-alldbg: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --enable-debug --disable-libssh - - make clean - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-20.04-aarch64-clang: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - - if: "$S390X_RUNNER_AVAILABLE" - when: manual - script: - - mkdir build - - cd build - - ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 - -ubuntu-20.04-aarch64-tci: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$S390X_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --disable-libssh --enable-tcg-interpreter - - make --output-sync -j`nproc` - -ubuntu-20.04-aarch64-notcg: - allow_failure: true - needs: [] - stage: build - tags: - - ubuntu_20.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - - if: "$S390X_RUNNER_AVAILABLE" - when: manual - script: - - mkdir build - - cd build - - ../configure --disable-libssh --disable-tcg - - make --output-sync -j`nproc` - - make --output-sync -j`nproc` check V=1 +include: + - local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml' + - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml' + - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml' + - local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml' diff --git a/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml new file mode 100644 index 0000000000..068b0c4335 --- /dev/null +++ b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml @@ -0,0 +1,30 @@ +centos-stream-8-x86_64: + allow_failure: true + needs: [] + stage: build + tags: + - centos_stream_8 + - x86_64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE" + artifacts: + name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" + when: on_failure + expire_in: 7 days + paths: + - build/tests/results/latest/results.xml + - build/tests/results/latest/test-results + reports: + junit: build/tests/results/latest/results.xml + before_script: + - JOBS=$(expr $(nproc) + 1) + script: + - mkdir build + - cd build + - ../scripts/ci/org.centos/stream/8/x86_64/configure + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make -j"$JOBS" + - make NINJA=":" check + || { cat meson-logs/testlog.txt; exit 1; } ; + - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado diff --git a/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml new file mode 100644 index 0000000000..0c835939db --- /dev/null +++ b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml @@ -0,0 +1,131 @@ +# All ubuntu-20.04 jobs should run successfully in an environment +# setup by the scripts/ci/setup/build-environment.yml task +# "Install basic packages to build QEMU on Ubuntu 20.04/20.04" + +ubuntu-20.04-s390x-all-linux-static: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$S390X_RUNNER_AVAILABLE" + script: + # --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763 + # --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages + - mkdir build + - cd build + - ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc` + - make --output-sync -j`nproc` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + - make --output-sync -j`nproc` check-tcg V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-20.04-s390x-all: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + timeout: 75m + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$S390X_RUNNER_AVAILABLE" + script: + - mkdir build + - cd build + - ../configure --disable-libssh + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc` + - make --output-sync -j`nproc` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-20.04-s390x-alldbg: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$S390X_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --enable-debug --disable-libssh + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make clean + - make --output-sync -j`nproc` + - make --output-sync -j`nproc` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-20.04-s390x-clang: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$S390X_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc` + - make --output-sync -j`nproc` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-20.04-s390x-tci: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$S390X_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-libssh --enable-tcg-interpreter + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc` + +ubuntu-20.04-s390x-notcg: + needs: [] + stage: build + tags: + - ubuntu_20.04 + - s390x + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$S390X_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-libssh --disable-tcg + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc` + - make --output-sync -j`nproc` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml new file mode 100644 index 0000000000..1a2f9b8dbe --- /dev/null +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml @@ -0,0 +1,25 @@ +# All ubuntu-22.04 jobs should run successfully in an environment +# setup by the scripts/ci/setup/qemu/build-environment.yml task +# "Install basic packages to build QEMU on Ubuntu 20.04" + +ubuntu-22.04-aarch32-all: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch32 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH32_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --cross-prefix=arm-linux-gnueabihf- + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml new file mode 100644 index 0000000000..ce0b18af6f --- /dev/null +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml @@ -0,0 +1,130 @@ +# All ubuntu-20.04 jobs should run successfully in an environment +# setup by the scripts/ci/setup/qemu/build-environment.yml task +# "Install basic packages to build QEMU on Ubuntu 20.04" + +ubuntu-22.04-aarch64-all-linux-static: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$AARCH64_RUNNER_AVAILABLE" + script: + - mkdir build + - cd build + # Disable -static-pie due to build error with system libc: + # https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438 + - ../configure --enable-debug --static --disable-system --disable-pie + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + - make --output-sync -j`nproc --ignore=40` check-tcg V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-22.04-aarch64-all: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-22.04-aarch64-alldbg: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$AARCH64_RUNNER_AVAILABLE" + script: + - mkdir build + - cd build + - ../configure --enable-debug + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make clean + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-22.04-aarch64-clang: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; + +ubuntu-22.04-aarch64-tci: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --enable-tcg-interpreter + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + +ubuntu-22.04-aarch64-notcg: + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-tcg + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check V=1 + || { cat meson-logs/testlog.txt; exit 1; } ; diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml index 62497ba47f..314e101745 100644 --- a/.gitlab-ci.d/edk2.yml +++ b/.gitlab-ci.d/edk2.yml @@ -1,56 +1,85 @@ # All jobs needing docker-edk2 must use the same rules it uses. .edk2_job_rules: - rules: # Only run this job when ... - - changes: - # this file is modified - - .gitlab-ci.d/edk2.yml - # or the Dockerfile is modified - - .gitlab-ci.d/edk2/Dockerfile - # or roms/edk2/ is modified (submodule updated) - - roms/edk2/* - when: on_success - - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 'edk2' - when: on_success - - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 'EDK2' - when: on_success + rules: + # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set + - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: never + + # In forks, if QEMU_CI=1 is set, then create manual job + # if any of the files affecting the build are touched + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' + changes: + - .gitlab-ci.d/edk2.yml + - .gitlab-ci.d/edk2/Dockerfile + - roms/edk2/* + when: manual + + # In forks, if QEMU_CI=1 is set, then create manual job + # if the branch/tag starts with 'edk2' + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/' + when: manual + + # In forks, if QEMU_CI=1 is set, then create manual job + # if last commit msg contains 'EDK2' (case insensitive) + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i' + when: manual + + # Run if any files affecting the build output are touched + - changes: + - .gitlab-ci.d/edk2.yml + - .gitlab-ci.d/edk2/Dockerfile + - roms/edk2/* + when: on_success + + # Run if the branch/tag starts with 'edk2' + - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' + when: on_success + + # Run if last commit msg contains 'EDK2' (case insensitive) + - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' + when: on_success docker-edk2: - extends: .edk2_job_rules - stage: containers - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - GIT_DEPTH: 3 - IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build - # We don't use TLS - DOCKER_HOST: tcp://docker:2375 - DOCKER_TLS_CERTDIR: "" - before_script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - script: - - docker pull $IMAGE_TAG || true - - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - --tag $IMAGE_TAG .gitlab-ci.d/edk2 - - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - - docker push $IMAGE_TAG + extends: .edk2_job_rules + stage: containers + image: docker:19.03.1 + services: + - docker:19.03.1-dind + variables: + GIT_DEPTH: 3 + IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build + # We don't use TLS + DOCKER_HOST: tcp://docker:2375 + DOCKER_TLS_CERTDIR: "" + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - docker pull $IMAGE_TAG || true + - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + --tag $IMAGE_TAG .gitlab-ci.d/edk2 + - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + - docker push $IMAGE_TAG build-edk2: - extends: .edk2_job_rules - stage: build - needs: ['docker-edk2'] - artifacts: - paths: # 'artifacts.zip' will contains the following files: - - pc-bios/edk2*bz2 - - pc-bios/edk2-licenses.txt - - edk2-stdout.log - - edk2-stderr.log - image: $CI_REGISTRY_IMAGE:edk2-cross-build - variables: - GIT_DEPTH: 3 - script: # Clone the required submodules and build EDK2 - - git submodule update --init roms/edk2 - - git -C roms/edk2 submodule update --init - - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) - - echo "=== Using ${JOBS} simultaneous jobs ===" - - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2 + extends: .edk2_job_rules + stage: build + needs: ['docker-edk2'] + artifacts: + paths: # 'artifacts.zip' will contains the following files: + - pc-bios/edk2*bz2 + - pc-bios/edk2-licenses.txt + - edk2-stdout.log + - edk2-stderr.log + image: $CI_REGISTRY_IMAGE:edk2-cross-build + variables: + GIT_DEPTH: 3 + script: # Clone the required submodules and build EDK2 + - git submodule update --init roms/edk2 + - git -C roms/edk2 submodule update --init -- + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 + BaseTools/Source/C/BrotliCompress/brotli + CryptoPkg/Library/OpensslLib/openssl + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli + - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) + - echo "=== Using ${JOBS} simultaneous jobs ===" + - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2 diff --git a/.gitlab-ci.d/edk2/Dockerfile b/.gitlab-ci.d/edk2/Dockerfile index b4584d1cf6..bbe50ff832 100644 --- a/.gitlab-ci.d/edk2/Dockerfile +++ b/.gitlab-ci.d/edk2/Dockerfile @@ -1,9 +1,9 @@ # # Docker image to cross-compile EDK2 firmware binaries # -FROM ubuntu:16.04 +FROM ubuntu:18.04 -MAINTAINER Philippe Mathieu-Daudé +MAINTAINER Philippe Mathieu-Daudé # Install packages required to build EDK2 RUN apt update \ @@ -20,7 +20,7 @@ RUN apt update \ iasl \ make \ nasm \ - python \ + python3 \ uuid-dev \ && \ \ diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml index 5e0a2477c5..04ed5a3ea1 100644 --- a/.gitlab-ci.d/opensbi.yml +++ b/.gitlab-ci.d/opensbi.yml @@ -1,63 +1,85 @@ # All jobs needing docker-opensbi must use the same rules it uses. .opensbi_job_rules: - rules: # Only run this job when ... - - changes: - # this file is modified - - .gitlab-ci.d/opensbi.yml - # or the Dockerfile is modified - - .gitlab-ci.d/opensbi/Dockerfile - when: on_success - - changes: # or roms/opensbi/ is modified (submodule updated) - - roms/opensbi/* - when: on_success - - if: '$CI_COMMIT_REF_NAME =~ /^opensbi/' # or the branch/tag starts with 'opensbi' - when: on_success - - if: '$CI_COMMIT_MESSAGE =~ /opensbi/i' # or last commit description contains 'OpenSBI' - when: on_success + rules: + # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set + - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' + when: never + + # In forks, if QEMU_CI=1 is set, then create manual job + # if any files affecting the build output are touched + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' + changes: + - .gitlab-ci.d/opensbi.yml + - .gitlab-ci.d/opensbi/Dockerfile + - roms/opensbi/* + when: manual + + # In forks, if QEMU_CI=1 is set, then create manual job + # if the branch/tag starts with 'opensbi' + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^opensbi/' + when: manual + + # In forks, if QEMU_CI=1 is set, then create manual job + # if the last commit msg contains 'OpenSBI' (case insensitive) + - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i' + when: manual + + # Run if any files affecting the build output are touched + - changes: + - .gitlab-ci.d/opensbi.yml + - .gitlab-ci.d/opensbi/Dockerfile + - roms/opensbi/* + when: on_success + + # Run if the branch/tag starts with 'opensbi' + - if: '$CI_COMMIT_REF_NAME =~ /^opensbi/' + when: on_success + + # Run if the last commit msg contains 'OpenSBI' (case insensitive) + - if: '$CI_COMMIT_MESSAGE =~ /opensbi/i' + when: on_success docker-opensbi: - extends: .opensbi_job_rules - stage: containers - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - GIT_DEPTH: 3 - IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build - # We don't use TLS - DOCKER_HOST: tcp://docker:2375 - DOCKER_TLS_CERTDIR: "" - before_script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - script: - - docker pull $IMAGE_TAG || true - - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - --tag $IMAGE_TAG .gitlab-ci.d/opensbi - - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - - docker push $IMAGE_TAG + extends: .opensbi_job_rules + stage: containers + image: docker:19.03.1 + services: + - docker:19.03.1-dind + variables: + GIT_DEPTH: 3 + IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build + # We don't use TLS + DOCKER_HOST: tcp://docker:2375 + DOCKER_TLS_CERTDIR: "" + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - docker pull $IMAGE_TAG || true + - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + --tag $IMAGE_TAG .gitlab-ci.d/opensbi + - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + - docker push $IMAGE_TAG build-opensbi: - extends: .opensbi_job_rules - stage: build - needs: ['docker-opensbi'] - artifacts: - paths: # 'artifacts.zip' will contains the following files: - - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin - - pc-bios/opensbi-riscv32-generic-fw_dynamic.elf - - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin - - pc-bios/opensbi-riscv64-generic-fw_dynamic.elf - - opensbi32-generic-stdout.log - - opensbi32-generic-stderr.log - - opensbi64-generic-stdout.log - - opensbi64-generic-stderr.log - image: $CI_REGISTRY_IMAGE:opensbi-cross-build - variables: - GIT_DEPTH: 3 - script: # Clone the required submodules and build OpenSBI - - git submodule update --init roms/opensbi - - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) - - echo "=== Using ${JOBS} simultaneous jobs ===" - - make -j${JOBS} -C roms/opensbi clean - - make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2 - - make -j${JOBS} -C roms/opensbi clean - - make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2 + extends: .opensbi_job_rules + stage: build + needs: ['docker-opensbi'] + artifacts: + paths: # 'artifacts.zip' will contains the following files: + - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin + - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin + - opensbi32-generic-stdout.log + - opensbi32-generic-stderr.log + - opensbi64-generic-stdout.log + - opensbi64-generic-stderr.log + image: $CI_REGISTRY_IMAGE:opensbi-cross-build + variables: + GIT_DEPTH: 3 + script: # Clone the required submodules and build OpenSBI + - git submodule update --init roms/opensbi + - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) + - echo "=== Using ${JOBS} simultaneous jobs ===" + - make -j${JOBS} -C roms/opensbi clean + - make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2 + - make -j${JOBS} -C roms/opensbi clean + - make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2 diff --git a/.gitlab-ci.d/qemu-project.yml b/.gitlab-ci.d/qemu-project.yml index b3d79bc429..691d9bf5dc 100644 --- a/.gitlab-ci.d/qemu-project.yml +++ b/.gitlab-ci.d/qemu-project.yml @@ -2,6 +2,7 @@ # https://gitlab.com/qemu-project/qemu/-/pipelines include: + - local: '/.gitlab-ci.d/base.yml' - local: '/.gitlab-ci.d/stages.yml' - local: '/.gitlab-ci.d/edk2.yml' - local: '/.gitlab-ci.d/opensbi.yml' @@ -11,3 +12,4 @@ include: - local: '/.gitlab-ci.d/static_checks.yml' - local: '/.gitlab-ci.d/custom-runners.yml' - local: '/.gitlab-ci.d/cirrus.yml' + - local: '/.gitlab-ci.d/windows.yml' diff --git a/.gitlab-ci.d/stages.yml b/.gitlab-ci.d/stages.yml index f50826018d..f92f57a27d 100644 --- a/.gitlab-ci.d/stages.yml +++ b/.gitlab-ci.d/stages.yml @@ -3,6 +3,5 @@ # - test (for test stages, using build artefacts from a build stage) stages: - containers - - containers-layer2 - build - test diff --git a/.gitlab-ci.d/static_checks.yml b/.gitlab-ci.d/static_checks.yml index 96dbd9e310..289ad1359e 100644 --- a/.gitlab-ci.d/static_checks.yml +++ b/.gitlab-ci.d/static_checks.yml @@ -1,32 +1,30 @@ check-patch: + extends: .base_job_template stage: build - image: $CI_REGISTRY_IMAGE/qemu/centos8:latest - needs: - job: amd64-centos8-container + image: python:3.10-alpine + needs: [] script: - .gitlab-ci.d/check-patch.py variables: GIT_DEPTH: 1000 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' - when: never - - when: on_success - allow_failure: true + QEMU_JOB_ONLY_FORKS: 1 + before_script: + - apk -U add git perl + allow_failure: true check-dco: + extends: .base_job_template stage: build - image: $CI_REGISTRY_IMAGE/qemu/centos8:latest - needs: - job: amd64-centos8-container + image: python:3.10-alpine + needs: [] script: .gitlab-ci.d/check-dco.py variables: GIT_DEPTH: 1000 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' - when: never - - when: on_success + before_script: + - apk -U add git check-python-pipenv: + extends: .base_job_template stage: test image: $CI_REGISTRY_IMAGE/qemu/python:latest script: @@ -37,6 +35,7 @@ check-python-pipenv: job: python-container check-python-tox: + extends: .base_job_template stage: test image: $CI_REGISTRY_IMAGE/qemu/python:latest script: @@ -44,6 +43,6 @@ check-python-tox: variables: GIT_DEPTH: 1 QEMU_TOX_EXTRA_ARGS: --skip-missing-interpreters=false + QEMU_JOB_OPTIONAL: 1 needs: job: python-container - allow_failure: true diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml new file mode 100644 index 0000000000..a3e7a37022 --- /dev/null +++ b/.gitlab-ci.d/windows.yml @@ -0,0 +1,99 @@ +.shared_msys2_builder: + extends: .base_job_template + tags: + - shared-windows + - windows + - windows-1809 + cache: + key: "${CI_JOB_NAME}-cache" + paths: + - ${CI_PROJECT_DIR}/msys64/var/cache + needs: [] + stage: build + timeout: 70m + before_script: + - If ( !(Test-Path -Path msys64\var\cache ) ) { + mkdir msys64\var\cache + } + - If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) { + Invoke-WebRequest + "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe" + -outfile "msys64\var\cache\msys2.exe" + } + - msys64\var\cache\msys2.exe -y + - ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw) + -replace '--refresh-keys', '--version') | + Set-Content -Path ${CI_PROJECT_DIR}\msys64\etc\\post-install\\07-pacman-key.post + - .\msys64\usr\bin\bash -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf" + - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update + - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update + - taskkill /F /FI "MODULES eq msys-2.0.dll" + +msys2-64bit: + extends: .shared_msys2_builder + script: + - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed + bison diffutils flex + git grep make sed + mingw-w64-x86_64-capstone + mingw-w64-x86_64-curl + mingw-w64-x86_64-cyrus-sasl + mingw-w64-x86_64-gcc + mingw-w64-x86_64-glib2 + mingw-w64-x86_64-gnutls + mingw-w64-x86_64-libnfs + mingw-w64-x86_64-libpng + mingw-w64-x86_64-libssh + mingw-w64-x86_64-libtasn1 + mingw-w64-x86_64-libusb + mingw-w64-x86_64-nettle + mingw-w64-x86_64-ninja + mingw-w64-x86_64-pixman + mingw-w64-x86_64-pkgconf + mingw-w64-x86_64-python + mingw-w64-x86_64-SDL2 + mingw-w64-x86_64-SDL2_image + mingw-w64-x86_64-snappy + mingw-w64-x86_64-usbredir + mingw-w64-x86_64-zstd " + - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory + - $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment + - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink + - .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu + --enable-capstone --without-default-devices' + - .\msys64\usr\bin\bash -lc 'make' + - .\msys64\usr\bin\bash -lc 'make check || { cat build/meson-logs/testlog.txt; exit 1; } ;' + +msys2-32bit: + extends: .shared_msys2_builder + script: + - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed + bison diffutils flex + git grep make sed + mingw-w64-i686-capstone + mingw-w64-i686-curl + mingw-w64-i686-cyrus-sasl + mingw-w64-i686-gcc + mingw-w64-i686-glib2 + mingw-w64-i686-gnutls + mingw-w64-i686-gtk3 + mingw-w64-i686-libgcrypt + mingw-w64-i686-libjpeg-turbo + mingw-w64-i686-libssh + mingw-w64-i686-libtasn1 + mingw-w64-i686-libusb + mingw-w64-i686-lzo2 + mingw-w64-i686-ninja + mingw-w64-i686-pixman + mingw-w64-i686-pkgconf + mingw-w64-i686-python + mingw-w64-i686-snappy + mingw-w64-i686-usbredir " + - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory + - $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment + - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink + - mkdir output + - cd output + - ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu" + - ..\msys64\usr\bin\bash -lc 'make' + - ..\msys64\usr\bin\bash -lc 'make check || { cat meson-logs/testlog.txt; exit 1; } ;' diff --git a/.gitmodules b/.gitmodules index 43b686a804..3afcd9b0c3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,9 +31,6 @@ [submodule "ui/keycodemapdb"] path = ui/keycodemapdb url = https://gitlab.com/qemu-project/keycodemapdb.git -[submodule "capstone"] - path = capstone - url = https://gitlab.com/qemu-project/capstone.git [submodule "roms/seabios-hppa"] path = roms/seabios-hppa url = https://gitlab.com/qemu-project/seabios-hppa.git @@ -49,9 +46,6 @@ [submodule "roms/edk2"] path = roms/edk2 url = https://gitlab.com/qemu-project/edk2.git -[submodule "slirp"] - path = slirp - url = https://gitlab.com/qemu-project/libslirp.git [submodule "roms/opensbi"] path = roms/opensbi url = https://gitlab.com/qemu-project/opensbi.git @@ -64,6 +58,12 @@ [submodule "roms/vbootrom"] path = roms/vbootrom url = https://gitlab.com/qemu-project/vbootrom.git +[submodule "tests/lcitool/libvirt-ci"] + path = tests/lcitool/libvirt-ci + url = https://gitlab.com/libvirt/libvirt-ci.git +[submodule "subprojects/libvfio-user"] + path = subprojects/libvfio-user + url = https://gitlab.com/qemu-project/libvfio-user.git [submodule "ui/thirdparty/imgui"] path = ui/thirdparty/imgui url = https://github.com/ocornut/imgui.git diff --git a/.mailmap b/.mailmap index f029d1c21f..35dddbe27b 100644 --- a/.mailmap +++ b/.mailmap @@ -28,7 +28,11 @@ Thiemo Seufer ths malc malc # Corrupted Author fields +Aaron Larson alarson@ddci.com +Andreas Färber Andreas Färber +Jason Wang Jason Wang Marek Dolata mkdolata@us.ibm.com +Michael Ellerman michael@ozlabs.org Nick Hudson hnick@vmware.com # There is also a: @@ -50,25 +54,34 @@ Aleksandar Rikalo Aleksandar Rikalo Alexander Graf Anthony Liguori Anthony Liguori +Christian Borntraeger Filip Bozuta -Frederic Konrad +Frederic Konrad +Frederic Konrad Greg Kurz Huacai Chen Huacai Chen James Hogan -Leif Lindholm +Leif Lindholm +Leif Lindholm Radoslaw Biernacki +Paul Brook Paul Burton Paul Burton Paul Burton Paul Burton +Philippe Mathieu-Daudé +Philippe Mathieu-Daudé +Philippe Mathieu-Daudé Stefan Brankovic Yongbok Kim # Also list preferred name forms where people have changed their # git author config, or had utf8/latin1 encoding issues. Aaron Lindsay +Aaron Larson Alexey Gerasimenko +Alex Chen Alex Ivanov Andreas Färber Bandan Das @@ -99,9 +112,11 @@ Gautham R. Shenoy Gautham R. Shenoy Gonglei (Arei) Guang Wang +Haibin Zhang Hailiang Zhang Hanna Reitz Hervé Poussineau +Hyman Huang Jakub Jermář Jakub Jermář Jean-Christophe Dubois @@ -135,9 +150,11 @@ Nicholas Thomas Nikunj A Dadhania Orit Wasserman Paolo Bonzini +Pan Nengyuan Pavel Dovgaluk Pavel Dovgaluk Pavel Dovgaluk +Peter Chubb Peter Crosthwaite Peter Crosthwaite Peter Crosthwaite diff --git a/.travis.yml b/.travis.yml index 0faddf7b4e..fb3baabca9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,3 @@ -# The current Travis default is a VM based 16.04 Xenial on GCE -# Additional builds with specific requirements for a full VM need to -# be added as additional matrix: entries later on os: linux dist: focal language: c @@ -190,7 +187,7 @@ jobs: - name: "[s390x] GCC check-tcg" arch: s390x - dist: bionic + dist: focal addons: apt_packages: - libaio-dev @@ -225,7 +222,7 @@ jobs: - BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$? - | if [ "$BUILD_RC" -eq 0 ] ; then - mv pc-bios/s390-ccw/*.img pc-bios/ ; + mv pc-bios/s390-ccw/*.img qemu-bundle/usr/local/share/qemu ; ${TEST_CMD} ; else $(exit $BUILD_RC); @@ -233,7 +230,7 @@ jobs: - name: "[s390x] GCC (other-softmmu)" arch: s390x - dist: bionic + dist: focal addons: apt_packages: - libaio-dev @@ -263,10 +260,11 @@ jobs: - name: "[s390x] GCC (user)" arch: s390x - dist: bionic + dist: focal addons: apt_packages: - libgcrypt20-dev + - libglib2.0-dev - libgnutls28-dev - ninja-build env: @@ -274,7 +272,7 @@ jobs: - name: "[s390x] Clang (disable-tcg)" arch: s390x - dist: bionic + dist: focal compiler: clang addons: apt_packages: @@ -305,26 +303,3 @@ jobs: - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools --host-cc=clang --cxx=clang++" - UNRELIABLE=true - - # Release builds - # The make-release script expect a QEMU version, so our tag must start with a 'v'. - # This is the case when release candidate tags are created. - - name: "Release tarball" - if: tag IS present AND tag =~ /^v\d+\.\d+(\.\d+)?(-\S*)?$/ - env: - # We want to build from the release tarball - - BUILD_DIR="release/build/dir" SRC_DIR="../../.." - - BASE_CONFIG="--prefix=$PWD/dist" - - CONFIG="--target-list=x86_64-softmmu,aarch64-softmmu,armeb-linux-user,ppc-linux-user" - - TEST_CMD="make install -j${JOBS}" - - QEMU_VERSION="${TRAVIS_TAG:1}" - - CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-default" - script: - - make -C ${SRC_DIR} qemu-${QEMU_VERSION}.tar.bz2 - - ls -l ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 - - tar -xf ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 && cd qemu-${QEMU_VERSION} - - mkdir -p release-build && cd release-build - - ../configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; } - - make install - allow_failures: - - env: UNRELIABLE=true diff --git a/Kconfig.host b/Kconfig.host index 24255ef441..d763d89269 100644 --- a/Kconfig.host +++ b/Kconfig.host @@ -22,15 +22,12 @@ config TPM config VHOST_USER bool - select VHOST config VHOST_VDPA bool - select VHOST config VHOST_KERNEL bool - select VHOST config VIRTFS bool @@ -41,3 +38,11 @@ config PVRDMA config MULTIPROCESS_ALLOWED bool imply MULTIPROCESS + +config FUZZ + bool + select SPARSE_MEM + +config VFIO_USER_SERVER_ALLOWED + bool + imply VFIO_USER_SERVER diff --git a/MAINTAINERS b/MAINTAINERS index 6b3697962c..6966490c94 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23,7 +23,7 @@ Descriptions of section entries: W: Web-page with status/info Q: Patchwork web based patch tracking system site T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. - S: Status, one of the following: + S: Status, one of the following (keep in sync with docs/devel/maintainers.rst): Supported: Someone is actually paid to look after this. Maintained: Someone actually looks after it. Odd Fixes: It has a maintainer but they don't have time to do @@ -84,7 +84,6 @@ T: git https://github.com/vivier/qemu.git trivial-patches Architecture support -------------------- S390 general architecture support -M: Cornelia Huck M: Thomas Huth S: Supported F: configs/devices/s390x-softmmu/default.mak @@ -106,9 +105,14 @@ F: docs/system/target-s390x.rst F: docs/system/s390x/ F: tests/migration/s390x/ K: ^Subject:.*(?i)s390x? -T: git https://gitlab.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org +MIPS general architecture support +M: Philippe Mathieu-Daudé +R: Jiaxun Yang +S: Odd Fixes +K: ^Subject:.*(?i)mips + Guest CPU cores (TCG) --------------------- Overall TCG CPUs @@ -159,9 +163,6 @@ F: tests/qtest/arm-cpu-features.c F: hw/arm/ F: hw/cpu/a*mpcore.c F: include/hw/cpu/a*mpcore.h -F: disas/arm.c -F: disas/arm-a64.cc -F: disas/libvixl/ F: docs/system/target-arm.rst F: docs/system/arm/cpu-features.rst @@ -171,7 +172,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/smmu* F: include/hw/arm/smmu* -F: tests/acceptance/smmu.py +F: tests/avocado/smmu.py AVR TCG CPUs M: Michael Rolnik @@ -179,7 +180,7 @@ S: Maintained F: docs/system/target-avr.rst F: gdb-xml/avr-cpu.xml F: target/avr/ -F: tests/acceptance/machine_avr6.py +F: tests/avocado/machine_avr6.py CRIS TCG CPUs M: Edgar E. Iglesias @@ -205,10 +206,14 @@ HPPA (PA-RISC) TCG CPUs M: Richard Henderson S: Maintained F: target/hppa/ -F: hw/hppa/ F: disas/hppa.c -F: hw/net/*i82596* -F: include/hw/net/lasi_82596.h + +LoongArch TCG CPUs +M: Song Gao +M: Xiaojuan Yang +S: Maintained +F: target/loongarch/ +F: tests/tcg/loongarch64/ M68K TCG CPUs M: Laurent Vivier @@ -222,32 +227,19 @@ S: Maintained F: target/microblaze/ F: hw/microblaze/ F: disas/microblaze.c +F: tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh +F: tests/tcg/nios2/Makefile.target MIPS TCG CPUs -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Aurelien Jarno R: Jiaxun Yang R: Aleksandar Rikalo S: Odd Fixes F: target/mips/ -F: configs/devices/mips*/* -F: disas/mips.c +F: disas/*mips.c F: docs/system/cpu-models-mips.rst.inc -F: hw/intc/mips_gic.c -F: hw/mips/ -F: hw/misc/mips_* -F: hw/timer/mips_gictimer.c -F: include/hw/intc/mips_gic.h -F: include/hw/mips/ -F: include/hw/misc/mips_* -F: include/hw/timer/mips_gictimer.h F: tests/tcg/mips/ -K: ^Subject:.*(?i)mips - -MIPS TCG CPUs (nanoMIPS ISA) -S: Orphan -F: disas/nanomips.* -F: target/mips/tcg/*nanomips* NiosII TCG CPUs M: Chris Wulff @@ -257,6 +249,7 @@ F: target/nios2/ F: hw/nios2/ F: disas/nios2.c F: configs/devices/nios2-softmmu/default.mak +F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh OpenRISC TCG CPUs M: Stafford Horne @@ -266,15 +259,16 @@ F: hw/openrisc/ F: tests/tcg/openrisc/ PowerPC TCG CPUs -M: David Gibson -M: Greg Kurz +M: Daniel Henrique Barboza +R: Cédric Le Goater +R: David Gibson +R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: target/ppc/ -F: hw/ppc/ -F: include/hw/ppc/ -F: disas/ppc.c -F: tests/acceptance/machine_ppc.py +F: hw/ppc/ppc.c +F: hw/ppc/ppc_booke.c +F: include/hw/ppc/ppc.h RISC-V TCG CPUs M: Palmer Dabbelt @@ -288,6 +282,13 @@ F: include/hw/riscv/ F: linux-user/host/riscv32/ F: linux-user/host/riscv64/ +RISC-V XVentanaCondOps extension +M: Philipp Tomsich +L: qemu-riscv@nongnu.org +S: Supported +F: target/riscv/XVentanaCondOps.decode +F: target/riscv/insn_trans/trans_xventanacondops.c.inc + RENESAS RX CPUs R: Yoshinori Sato S: Orphan @@ -296,12 +297,11 @@ F: target/rx/ S390 TCG CPUs M: Richard Henderson M: David Hildenbrand +R: Ilya Leoshkevich S: Maintained F: target/s390x/ F: target/s390x/tcg -F: target/s390x/cpu_models_*.[ch] F: hw/s390x/ -F: disas/s390.c F: tests/tcg/s390x/ L: qemu-s390x@nongnu.org @@ -326,13 +326,12 @@ F: disas/sparc.c X86 TCG CPUs M: Paolo Bonzini M: Richard Henderson -M: Eduardo Habkost +M: Eduardo Habkost S: Maintained F: target/i386/tcg/ F: tests/tcg/i386/ F: tests/tcg/x86_64/ F: hw/i386/ -F: disas/i386.c F: docs/system/cpu-models-x86* T: git https://gitlab.com/ehabkost/qemu.git x86-next @@ -386,29 +385,23 @@ F: target/mips/kvm* F: target/mips/sysemu/ PPC KVM CPUs -M: David Gibson -M: Greg Kurz +M: Daniel Henrique Barboza +R: Cédric Le Goater +R: David Gibson +R: Greg Kurz S: Maintained F: target/ppc/kvm.c S390 KVM CPUs M: Halil Pasic -M: Cornelia Huck -M: Christian Borntraeger +M: Christian Borntraeger S: Supported F: target/s390x/kvm/ -F: target/s390x/ioinst.[ch] F: target/s390x/machine.c F: target/s390x/sigp.c -F: target/s390x/cpu_features*.[ch] -F: target/s390x/cpu_models.[ch] F: hw/s390x/pv.c F: include/hw/s390x/pv.h -F: hw/intc/s390_flic.c -F: hw/intc/s390_flic_kvm.c -F: include/hw/s390x/s390_flic.h F: gdb-xml/s390*.xml -T: git https://gitlab.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -417,7 +410,10 @@ M: Paolo Bonzini M: Marcelo Tosatti L: kvm@vger.kernel.org S: Supported +F: docs/system/i386/amd-memory-encryption.rst +F: docs/system/i386/sgx.rst F: target/i386/kvm/ +F: target/i386/sev* F: scripts/kvm/vmxcap Guest CPU Cores (other accelerators) @@ -433,6 +429,11 @@ F: accel/accel-*.c F: accel/Makefile.objs F: accel/stubs/Makefile.objs +Apple Silicon HVF CPUs +M: Alexander Graf +S: Maintained +F: target/arm/hvf/ + X86 HVF CPUs M: Cameron Esfahani M: Roman Bolshakov @@ -486,7 +487,6 @@ Guest CPU Cores (HAXM) --------------------- X86 HAXM CPUs M: Wenchao Wang -M: Colin Xu L: haxm-team@intel.com W: https://github.com/intel/haxm/issues S: Maintained @@ -541,6 +541,14 @@ F: */*win32* F: include/*/*win32* X: qga/*win32* F: qemu.nsi +F: scripts/nsis.py + +Darwin (macOS, iOS) +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: .gitlab-ci.d/cirrus/macos-* +F: */*.m +F: scripts/entitlement.sh Alpha Machines -------------- @@ -617,6 +625,7 @@ F: hw/intc/gic_internal.h F: hw/misc/a9scu.c F: hw/misc/arm11scu.c F: hw/misc/arm_l2x0.c +F: hw/misc/armv7m_ras.c F: hw/timer/a9gtimer* F: hw/timer/arm* F: include/hw/arm/arm*.h @@ -626,6 +635,7 @@ F: include/hw/misc/arm11scu.h F: include/hw/timer/a9gtimer.h F: include/hw/timer/arm_mptimer.h F: include/hw/timer/armv7m_systick.h +F: include/hw/misc/armv7m_ras.h F: tests/qtest/test-arm-mptimer.c Exynos @@ -634,7 +644,7 @@ M: Peter Maydell L: qemu-arm@nongnu.org S: Odd Fixes F: hw/*/exynos* -F: include/hw/arm/exynos4210.h +F: include/hw/*/exynos* Calxeda Highbank M: Rob Herring @@ -653,7 +663,7 @@ S: Odd Fixes F: include/hw/arm/digic.h F: hw/*/digic* F: include/hw/*/digic* -F: tests/acceptance/machine_arm_canona1100.py +F: tests/avocado/machine_arm_canona1100.py F: docs/system/arm/digic.rst Goldfish RTC @@ -666,7 +676,7 @@ F: include/hw/rtc/goldfish_rtc.h Gumstix M: Peter Maydell -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/gumstix.c @@ -704,7 +714,7 @@ S: Maintained F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h -F: tests/acceptance/machine_arm_integratorcp.py +F: tests/avocado/machine_arm_integratorcp.py F: docs/system/arm/integratorcp.rst MCIMX6UL EVK / i.MX6ul @@ -773,6 +783,8 @@ M: Peter Maydell L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/musicpal.c +F: hw/net/mv88w8618_eth.c +F: include/hw/net/mv88w8618_eth.h F: docs/system/arm/musicpal.rst Nuvoton NPCM7xx @@ -801,7 +813,7 @@ F: include/hw/display/blizzard.h F: include/hw/input/lm832x.h F: include/hw/input/tsc2xxx.h F: include/hw/misc/cbus.h -F: tests/acceptance/machine_arm_n8x0.py +F: tests/avocado/machine_arm_n8x0.py F: docs/system/arm/nseries.rst Palm @@ -815,8 +827,7 @@ F: docs/system/arm/palm.rst Raspberry Pi M: Peter Maydell -R: Andrew Baumann -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/raspi.c @@ -875,7 +886,7 @@ F: include/hw/ssi/imx_spi.h SBSA-REF M: Radoslaw Biernacki M: Peter Maydell -R: Leif Lindholm +R: Leif Lindholm L: qemu-arm@nongnu.org S: Maintained F: hw/arm/sbsa-ref.c @@ -927,6 +938,7 @@ S: Maintained F: hw/arm/virt* F: include/hw/arm/virt.h F: docs/system/arm/virt.rst +F: tests/avocado/machine_aarch64_virt.py Xilinx Zynq M: Edgar E. Iglesias @@ -955,6 +967,12 @@ F: hw/display/dpcd.c F: include/hw/display/dpcd.h F: docs/system/arm/xlnx-versal-virt.rst +Xilinx Versal OSPI +M: Francisco Iglesias +S: Maintained +F: hw/ssi/xlnx-versal-ospi.c +F: include/hw/ssi/xlnx-versal-ospi.h + ARM ACPI Subsystem M: Shannon Zhao L: qemu-arm@nongnu.org @@ -1042,6 +1060,7 @@ F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h F: docs/system/arm/aspeed.rst F: tests/qtest/*aspeed* +F: hw/arm/fby35.c NRF51 M: Joel Stanley @@ -1071,7 +1090,7 @@ F: include/hw/misc/avr_power.h F: hw/misc/avr_power.c Arduino -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé S: Maintained F: hw/avr/arduino.c @@ -1091,8 +1110,31 @@ R: Helge Deller S: Odd Fixes F: configs/devices/hppa-softmmu/default.mak F: hw/hppa/ +F: hw/net/*i82596* +F: hw/misc/lasi.c +F: hw/pci-host/dino.c +F: include/hw/misc/lasi.h +F: include/hw/net/lasi_82596.h +F: include/hw/pci-host/dino.h F: pc-bios/hppa-firmware.img +LoongArch Machines +------------------ +Virt +M: Xiaojuan Yang +M: Song Gao +S: Maintained +F: docs/system/loongarch/virt.rst +F: configs/targets/loongarch64-softmmu.mak +F: configs/devices/loongarch64-softmmu/default.mak +F: hw/loongarch/ +F: include/hw/loongarch/virt.h +F: include/hw/intc/loongarch_*.h +F: hw/intc/loongarch_*.c +F: include/hw/pci-host/ls7a.h +F: hw/rtc/ls7a_rtc.c +F: gdb-xml/loongarch*.xml + M68K Machines ------------- an5206 @@ -1153,7 +1195,7 @@ M: Edgar E. Iglesias S: Maintained F: hw/microblaze/petalogix_s3adsp1800_mmu.c F: include/hw/char/xilinx_uartlite.h -F: tests/acceptance/machine_microblaze.py +F: tests/avocado/machine_microblaze.py petalogix_ml605 M: Edgar E. Iglesias @@ -1162,6 +1204,13 @@ F: hw/microblaze/petalogix_ml605_mmu.c MIPS Machines ------------- +Overall MIPS Machines +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: configs/devices/mips*/* +F: hw/mips/ +F: include/hw/mips/ + Jazz M: Hervé Poussineau R: Aleksandar Rikalo @@ -1171,7 +1220,7 @@ F: hw/display/jazz_led.c F: hw/dma/rc4030.c Malta -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Aurelien Jarno S: Odd Fixes F: hw/isa/piix4.c @@ -1179,8 +1228,8 @@ F: hw/acpi/piix4.c F: hw/mips/malta.c F: hw/mips/gt64xxx_pci.c F: include/hw/southbridge/piix.h -F: tests/acceptance/linux_ssh_mips_malta.py -F: tests/acceptance/machine_mips_malta.py +F: tests/avocado/linux_ssh_mips_malta.py +F: tests/avocado/machine_mips_malta.py Mipssim R: Aleksandar Rikalo @@ -1190,7 +1239,7 @@ F: hw/net/mipsnet.c Fuloong 2E M: Huacai Chen -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Jiaxun Yang S: Odd Fixes F: hw/mips/fuloong2e.c @@ -1198,7 +1247,7 @@ F: hw/isa/vt82c686.c F: hw/pci-host/bonito.c F: hw/usb/vt82c686-uhci-pci.c F: include/hw/isa/vt82c686.h -F: tests/acceptance/machine_mips_fuloong2e.py +F: tests/avocado/machine_mips_fuloong2e.py Loongson-3 virtual platforms M: Huacai Chen @@ -1208,7 +1257,7 @@ F: hw/intc/loongson_liointc.c F: hw/mips/loongson3_bootp.c F: hw/mips/loongson3_bootp.h F: hw/mips/loongson3_virt.c -F: tests/acceptance/machine_mips_loongson3v.py +F: tests/avocado/machine_mips_loongson3v.py Boston M: Paul Burton @@ -1228,25 +1277,20 @@ F: hw/openrisc/openrisc_sim.c PowerPC Machines ---------------- -405 -M: David Gibson -M: Greg Kurz +405 (ref405ep) L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc405_boards.c Bamboo -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc440_bamboo.c +F: tests/avocado/ppc_bamboo.py e500 -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/e500* F: hw/gpio/mpc8xxx.c F: hw/i2c/mpc_i2c.c @@ -1255,21 +1299,21 @@ F: hw/pci-host/ppce500.c F: include/hw/ppc/ppc_e500.h F: include/hw/pci-host/ppce500.h F: pc-bios/u-boot.e500 +F: hw/intc/openpic_kvm.h +F: include/hw/ppc/openpic_kvm.h mpc8544ds -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/mpc8544ds.c F: hw/ppc/mpc8544_guts.c +F: tests/avocado/ppc_mpc8544ds.py New World (mac99) M: Mark Cave-Ayland -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Odd Fixes +F: docs/system/ppc/powermac.rst F: hw/ppc/mac_newworld.c F: hw/pci-host/uninorth.c F: hw/pci-bridge/dec.[hc] @@ -1279,6 +1323,7 @@ F: hw/nvram/mac_nvram.c F: hw/input/adb* F: include/hw/misc/macio/ F: include/hw/misc/mos6522.h +F: include/hw/nvram/mac_nvram.h F: include/hw/ppc/mac_dbdma.h F: include/hw/pci-host/uninorth.h F: include/hw/input/adb* @@ -1286,10 +1331,9 @@ F: pc-bios/qemu_vga.ndrv Old World (g3beige) M: Mark Cave-Ayland -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Odd Fixes +F: docs/system/ppc/powermac.rst F: hw/ppc/mac_oldworld.c F: hw/pci-host/grackle.c F: hw/misc/macio/ @@ -1297,14 +1341,14 @@ F: hw/intc/heathrow_pic.c F: hw/input/adb* F: include/hw/intc/heathrow_pic.h F: include/hw/input/adb* +F: include/hw/pci-host/grackle.h F: pc-bios/qemu_vga.ndrv PReP M: Hervé Poussineau -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained +F: docs/system/ppc/prep.rst F: hw/ppc/prep.c F: hw/ppc/prep_systemio.c F: hw/ppc/rs6000_mc.c @@ -1315,31 +1359,33 @@ F: hw/dma/i82374.c F: hw/rtc/m48t59-isa.c F: include/hw/isa/pc87312.h F: include/hw/rtc/m48t59.h -F: tests/acceptance/ppc_prep_40p.py +F: tests/avocado/ppc_prep_40p.py -sPAPR -M: David Gibson -M: Greg Kurz +sPAPR (pseries) +M: Daniel Henrique Barboza +R: Cédric Le Goater +R: David Gibson +R: Greg Kurz L: qemu-ppc@nongnu.org -S: Supported +S: Maintained F: hw/*/spapr* F: include/hw/*/spapr* F: hw/*/xics* F: include/hw/*/xics* F: pc-bios/slof.bin -F: docs/specs/ppc-spapr-hcalls.txt -F: docs/specs/ppc-spapr-hotplug.txt +F: docs/system/ppc/pseries.rst +F: docs/specs/ppc-spapr-* F: tests/qtest/spapr* F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* +F: tests/avocado/ppc_pseries.py PowerNV (Non-Virtualized) M: Cédric Le Goater -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Maintained +S: Odd Fixes +F: docs/system/ppc/powernv.rst F: hw/ppc/pnv* F: hw/intc/pnv* F: hw/intc/xics_pnv.c @@ -1354,11 +1400,10 @@ M: Edgar E. Iglesias L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/virtex_ml507.c +F: tests/avocado/ppc_virtex_ml507.py sam460ex M: BALATON Zoltan -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/sam460ex.c @@ -1372,7 +1417,6 @@ F: roms/u-boot-sam460ex pegasos2 M: BALATON Zoltan -R: David Gibson L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/pegasos2.c @@ -1382,6 +1426,8 @@ F: include/hw/pci-host/mv64361.h Virtual Open Firmware (VOF) M: Alexey Kardashevskiy +R: Cédric Le Goater +R: Daniel Henrique Barboza R: David Gibson R: Greg Kurz L: qemu-ppc@nongnu.org @@ -1443,7 +1489,7 @@ R: Yoshinori Sato S: Orphan F: docs/system/target-rx.rst F: hw/rx/rx-gdbsim.c -F: tests/acceptance/machine_rx_gdbsim.py +F: tests/avocado/machine_rx_gdbsim.py SH4 Machines ------------ @@ -1497,7 +1543,7 @@ F: include/hw/pci-host/sabre.h F: hw/pci-bridge/simba.c F: include/hw/pci-bridge/simba.h F: pc-bios/openbios-sparc64 -F: tests/acceptance/machine_sparc64_sun4u.py +F: tests/avocado/machine_sparc64_sun4u.py Sun4v M: Artyom Tarasenko @@ -1508,34 +1554,29 @@ F: include/hw/rtc/sun4v-rtc.h Leon3 M: Fabien Chouteau -M: KONRAD Frederic +M: Frederic Konrad S: Maintained F: hw/sparc/leon3.c F: hw/*/grlib* F: include/hw/*/grlib* -F: tests/acceptance/machine_sparc_leon3.py +F: tests/avocado/machine_sparc_leon3.py S390 Machines ------------- S390 Virtio-ccw -M: Cornelia Huck M: Halil Pasic -M: Christian Borntraeger +M: Christian Borntraeger +M: Eric Farman S: Supported -F: hw/char/sclp*.[hc] -F: hw/char/terminal3270.c F: hw/s390x/ F: include/hw/s390x/ -F: hw/watchdog/wdt_diag288.c -F: include/hw/watchdog/wdt_diag288.h F: configs/devices/s390x-softmmu/default.mak -F: tests/acceptance/machine_s390_ccw_virtio.py -T: git https://gitlab.com/cohuck/qemu.git s390-next +F: tests/avocado/machine_s390_ccw_virtio.py T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org S390-ccw boot -M: Christian Borntraeger +M: Christian Borntraeger M: Thomas Huth S: Supported F: hw/s390x/ipl.* @@ -1553,6 +1594,38 @@ F: hw/s390x/s390-pci* F: include/hw/s390x/s390-pci* L: qemu-s390x@nongnu.org +S390 channel subsystem +M: Halil Pasic +M: Christian Borntraeger +M: Eric Farman +S: Supported +F: hw/s390x/ccw-device.[ch] +F: hw/s390x/css.c +F: hw/s390x/css-bridge.c +F: include/hw/s390x/css.h +F: include/hw/s390x/css-bridge.h +F: include/hw/s390x/ioinst.h +F: target/s390x/ioinst.c +L: qemu-s390x@nongnu.org + +S390 CPU models +M: David Hildenbrand +S: Maintained +F: target/s390x/cpu_features*.[ch] +F: target/s390x/cpu_models.[ch] +L: qemu-s390x@nongnu.org + +S390 SCLP-backed devices +M: Halil Pasic +M: Christian Borntraeger +S: Supported +F: include/hw/s390x/event-facility.h +F: include/hw/s390x/sclp.h +F: hw/char/sclp*.[hc] +F: hw/s390x/event-facility.c +F: hw/s390x/sclp*.c +L: qemu-s390x@nongnu.org + X86 Machines ------------ PC @@ -1616,19 +1689,22 @@ microvm M: Sergio Lopez M: Paolo Bonzini S: Maintained -F: docs/microvm.rst +F: docs/system/i386/microvm.rst F: hw/i386/microvm.c F: include/hw/i386/microvm.h F: pc-bios/bios-microvm.bin Machine core -M: Eduardo Habkost +M: Eduardo Habkost M: Marcel Apfelbaum +R: Philippe Mathieu-Daudé +R: Yanan Wang S: Supported F: cpu.c F: hw/core/cpu.c F: hw/core/machine-qmp-cmds.c F: hw/core/machine.c +F: hw/core/machine-smp.c F: hw/core/null-machine.c F: hw/core/numa.c F: hw/cpu/cluster.c @@ -1638,6 +1714,7 @@ F: include/hw/boards.h F: include/hw/core/cpu.h F: include/hw/cpu/cluster.h F: include/sysemu/numa.h +F: tests/unit/test-smp-parse.c T: git https://gitlab.com/ehabkost/qemu.git machine-next Xtensa Machines @@ -1660,9 +1737,19 @@ F: hw/net/opencores_eth.c Devices ------- +Overall Audio frontends +M: Gerd Hoffmann +S: Odd Fixes +F: hw/audio/ +F: include/hw/audio/ +F: tests/qtest/ac97-test.c +F: tests/qtest/es1370-test.c +F: tests/qtest/intel-hda-test.c +F: tests/qtest/fuzz-sb16-test.c + Xilinx CAN -M: Vikram Garhwal -M: Francisco Iglesias +M: Vikram Garhwal +M: Francisco Iglesias S: Maintained F: hw/net/can/xlnx-* F: include/hw/net/xlnx-* @@ -1710,6 +1797,12 @@ F: include/hw/block/fdc.h F: tests/qtest/fdc-test.c T: git https://gitlab.com/jsnow/qemu.git ide +Hyper-V VMBus +M: Maciej S. Szmigiero +S: Odd Fixes +F: hw/hyperv/vmbus.c +F: include/hw/hyperv/vmbus*.h + OMAP M: Peter Maydell L: qemu-arm@nongnu.org @@ -1736,13 +1829,20 @@ F: qapi/pci.json F: docs/pci* F: docs/specs/*pci* +PCIE DOE +M: Huai-Cheng Kuo +M: Chris Browy +S: Supported +F: include/hw/pci/pcie_doe.h +F: hw/pci/pcie_doe.c + ACPI/SMBIOS M: Michael S. Tsirkin M: Igor Mammedov +R: Ani Sinha S: Supported F: include/hw/acpi/* F: include/hw/firmware/smbios.h -F: hw/mem/* F: hw/acpi/* F: hw/smbios/* F: hw/i386/acpi-build.[hc] @@ -1751,6 +1851,25 @@ F: qapi/acpi.json F: tests/qtest/bios-tables-test* F: tests/qtest/acpi-utils.[hc] F: tests/data/acpi/ +F: docs/specs/acpi_cpu_hotplug.rst +F: docs/specs/acpi_mem_hotplug.rst +F: docs/specs/acpi_nvdimm.rst +F: docs/specs/acpi_pci_hotplug.rst +F: docs/specs/acpi_hw_reduced_hotplug.rst + +ACPI/VIOT +M: Jean-Philippe Brucker +S: Supported +F: hw/acpi/viot.c +F: hw/acpi/viot.h + +ACPI/AVOCADO/BIOSBITS +M: Ani Sinha +M: Michael S. Tsirkin +S: Supported +F: tests/avocado/acpi-bits/* +F: tests/avocado/acpi-bits.py +F: docs/devel/acpi-bits.rst ACPI/HEST/GHES R: Dongjiu Geng @@ -1761,9 +1880,8 @@ F: include/hw/acpi/ghes.h F: docs/specs/acpi_hest_ghes.rst ppc4xx -M: David Gibson L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc4*.c F: hw/i2c/ppc4xx_i2c.c F: include/hw/ppc/ppc4xx.h @@ -1787,7 +1905,7 @@ F: docs/virtio-net-failover.rst T: git https://github.com/jasowang/qemu.git net Parallel NOR Flash devices -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé T: git https://gitlab.com/philmd/qemu.git pflash-next S: Maintained F: hw/block/pflash_cfi*.c @@ -1802,13 +1920,14 @@ F: hw/scsi/* F: tests/qtest/virtio-scsi-test.c F: tests/qtest/fuzz-virtio-scsi-test.c F: tests/qtest/am53c974-test.c +F: tests/qtest/fuzz-lsi53c895a-test.c T: git https://github.com/bonzini/qemu.git scsi-next SSI M: Alistair Francis S: Maintained F: hw/ssi/* -F: hw/block/m25p80.c +F: hw/block/m25p80* F: include/hw/ssi/ssi.h X: hw/ssi/xilinx_* F: tests/qtest/m25p80-test.c @@ -1819,7 +1938,7 @@ S: Maintained F: hw/ssi/xilinx_* SD (Secure Card) -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé M: Bin Meng L: qemu-block@nongnu.org S: Odd Fixes @@ -1855,7 +1974,6 @@ F: docs/igd-assign.txt F: docs/devel/vfio-migration.rst vfio-ccw -M: Cornelia Huck M: Eric Farman M: Matthew Rosato S: Supported @@ -1863,7 +1981,6 @@ F: hw/vfio/ccw.c F: hw/s390x/s390-ccw.c F: include/hw/s390x/s390-ccw.h F: include/hw/s390x/vfio-ccw.h -T: git https://gitlab.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org vfio-ap @@ -1888,6 +2005,7 @@ F: docs/interop/vhost-user.rst F: contrib/vhost-user-*/ F: backends/vhost-user.c F: include/sysemu/vhost-user-backend.h +F: subprojects/libvhost-user/ virtio M: Michael S. Tsirkin @@ -1895,6 +2013,7 @@ S: Supported F: hw/*/virtio* F: hw/virtio/Makefile.objs F: hw/virtio/trace-events +F: qapi/virtio.json F: net/vhost-user.c F: include/hw/virtio/ @@ -1902,6 +2021,7 @@ virtio-balloon M: Michael S. Tsirkin M: David Hildenbrand S: Maintained +F: docs/interop/virtio-balloon-stats.rst F: hw/virtio/virtio-balloon*.c F: include/hw/virtio/virtio-balloon.h F: softmmu/balloon.c @@ -1924,17 +2044,20 @@ virtio-blk M: Stefan Hajnoczi L: qemu-block@nongnu.org S: Supported +F: hw/block/virtio-blk-common.c F: hw/block/virtio-blk.c F: hw/block/dataplane/* +F: include/hw/virtio/virtio-blk-common.h F: tests/qtest/virtio-blk-test.c T: git https://github.com/stefanha/qemu.git block virtio-ccw M: Cornelia Huck M: Halil Pasic +M: Eric Farman S: Supported F: hw/s390x/virtio-ccw*.[hc] -F: hw/s390x/vhost-vsock-ccw.c +F: hw/s390x/vhost-*-ccw.c T: git https://gitlab.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -1991,6 +2114,14 @@ F: hw/virtio/vhost-user-rng-pci.c F: include/hw/virtio/vhost-user-rng.h F: tools/vhost-user-rng/* +vhost-user-gpio +M: Alex Bennée +R: Viresh Kumar +S: Maintained +F: hw/virtio/vhost-user-gpio* +F: include/hw/virtio/vhost-user-gpio.h +F: tests/qtest/libqos/virtio-gpio.* + virtio-crypto M: Gonglei S: Supported @@ -2050,14 +2181,6 @@ F: qapi/rocker.json F: tests/rocker/ F: docs/specs/rocker.txt -NVDIMM -M: Xiao Guangrong -S: Maintained -F: hw/acpi/nvdimm.c -F: hw/mem/nvdimm.c -F: include/hw/mem/nvdimm.h -F: docs/nvdimm.txt - e1000x M: Dmitry Fleytman S: Maintained @@ -2090,6 +2213,7 @@ Generic Loader M: Alistair Francis S: Maintained F: hw/core/generic-loader.c +F: hw/core/uboot_image.h F: include/hw/core/generic-loader.h F: docs/system/generic-loader.rst @@ -2098,7 +2222,7 @@ M: Alex Bennée S: Maintained F: hw/core/guest-loader.c F: docs/system/guest-loader.rst -F: tests/acceptance/boot_xen.py +F: tests/avocado/boot_xen.py Intel Hexadecimal Object File Loader M: Su Hang @@ -2115,6 +2239,7 @@ F: tests/qtest/prom-env-test.c VM Generation ID S: Orphan +R: Ani Sinha F: hw/acpi/vmgenid.c F: include/hw/acpi/vmgenid.h F: docs/specs/vmgenid.txt @@ -2122,21 +2247,23 @@ F: tests/qtest/vmgenid-test.c F: stubs/vmgenid.c LED -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé S: Maintained F: include/hw/misc/led.h F: hw/misc/led.c Unimplemented device M: Peter Maydell -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé +R: Ani Sinha S: Maintained F: include/hw/misc/unimp.h F: hw/misc/unimp.c Empty slot M: Artyom Tarasenko -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé +R: Ani Sinha S: Maintained F: include/hw/misc/empty_slot.h F: hw/misc/empty_slot.c @@ -2168,11 +2295,13 @@ S: Maintained F: contrib/vhost-user-blk/ F: contrib/vhost-user-scsi/ F: hw/block/vhost-user-blk.c +F: hw/block/virtio-blk-common.c F: hw/scsi/vhost-user-scsi.c F: hw/virtio/vhost-user-blk-pci.c F: hw/virtio/vhost-user-scsi-pci.c F: include/hw/virtio/vhost-user-blk.h F: include/hw/virtio/vhost-user-scsi.h +F: include/hw/virtio/virtio-blk-common.h vhost-user-gpu M: Marc-André Lureau @@ -2197,13 +2326,13 @@ F: qemu-edid.c PIIX4 South Bridge (i82371AB) M: Hervé Poussineau -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé S: Maintained F: hw/isa/piix4.c F: include/hw/southbridge/piix.h Firmware configuration (fw_cfg) -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Gerd Hoffmann S: Supported F: docs/specs/fw_cfg.txt @@ -2217,10 +2346,8 @@ T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE M: Cédric Le Goater -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org -S: Supported +S: Odd Fixes F: hw/*/*xive* F: include/hw/*/*xive* F: docs/*/*xive* @@ -2254,19 +2381,139 @@ F: net/can/* F: hw/net/can/* F: include/net/can_*.h +OpenPIC interrupt controller +M: Mark Cave-Ayland +S: Odd Fixes +F: hw/intc/openpic.c +F: include/hw/ppc/openpic.h + +MIPS CPS +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: hw/misc/mips_* +F: include/hw/misc/mips_* + +MIPS GIC +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: hw/intc/mips_gic.c +F: hw/timer/mips_gictimer.c +F: include/hw/intc/mips_gic.h +F: include/hw/timer/mips_gictimer.h + +S390 3270 device +M: Halil Pasic +M: Christian Borntraeger +S: Odd fixes +F: include/hw/s390x/3270-ccw.h +F: hw/char/terminal3270.c +F: hw/s390x/3270-ccw.c +L: qemu-s390x@nongnu.org + +S390 diag 288 watchdog +M: Halil Pasic +M: Christian Borntraeger +S: Supported +F: hw/watchdog/wdt_diag288.c +F: include/hw/watchdog/wdt_diag288.h +L: qemu-s390x@nongnu.org + +S390 storage key device +M: Halil Pasic +M: Christian Borntraeger +S: Supported +F: hw/s390x/storage-keys.h +F: hw/390x/s390-skeys*.c +L: qemu-s390x@nongnu.org + +S390 storage attribute device +M: Halil Pasic +M: Christian Borntraeger +S: Supported +F: hw/s390x/storage-attributes.h +F: hw/s390/s390-stattrib*.c +L: qemu-s390x@nongnu.org + +S390 floating interrupt controller +M: Halil Pasic +M: Christian Borntraeger +M: David Hildenbrand +S: Supported +F: hw/intc/s390_flic*.c +F: include/hw/s390x/s390_flic.h +L: qemu-s390x@nongnu.org + +CanoKey +M: Hongren (Zenithal) Zheng +S: Maintained +R: Canokeys.org +F: hw/usb/canokey.c +F: hw/usb/canokey.h +F: docs/system/devices/canokey.rst + Subsystems ---------- -Audio +Overall Audio backends M: Gerd Hoffmann S: Odd Fixes F: audio/ -F: hw/audio/ -F: include/hw/audio/ +X: audio/alsaaudio.c +X: audio/coreaudio.c +X: audio/dsound* +X: audio/jackaudio.c +X: audio/ossaudio.c +X: audio/paaudio.c +X: audio/sdlaudio.c +X: audio/sndioaudio.c +X: audio/spiceaudio.c F: qapi/audio.json -F: tests/qtest/ac97-test.c -F: tests/qtest/es1370-test.c -F: tests/qtest/intel-hda-test.c -F: tests/qtest/fuzz-sb16-test.c + +ALSA Audio backend +M: Gerd Hoffmann +R: Christian Schoenebeck +S: Odd Fixes +F: audio/alsaaudio.c + +Core Audio framework backend +M: Gerd Hoffmann +M: Philippe Mathieu-Daudé +R: Christian Schoenebeck +R: Akihiko Odaki +S: Odd Fixes +F: audio/coreaudio.c + +DSound Audio backend +M: Gerd Hoffmann +S: Odd Fixes +F: audio/dsound* + +JACK Audio Connection Kit backend +M: Gerd Hoffmann +R: Christian Schoenebeck +S: Odd Fixes +F: audio/jackaudio.c + +Open Sound System (OSS) Audio backend +M: Gerd Hoffmann +S: Odd Fixes +F: audio/ossaudio.c + +PulseAudio backend +M: Gerd Hoffmann +S: Odd Fixes +F: audio/paaudio.c + +SDL Audio backend +M: Gerd Hoffmann +R: Thomas Huth +S: Odd Fixes +F: audio/sdlaudio.c + +Sndio Audio backend +M: Gerd Hoffmann +R: Alexandre Ratchov +S: Odd Fixes +F: audio/sndioaudio.c Block layer core M: Kevin Wolf @@ -2276,7 +2523,10 @@ S: Supported F: block* F: block/ F: hw/block/ +F: qapi/block*.json +F: qapi/transaction.json F: include/block/ +F: include/sysemu/block-*.h F: qemu-img* F: docs/tools/qemu-img.rst F: qemu-io* @@ -2322,7 +2572,7 @@ F: scsi/* Block Jobs M: John Snow -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy L: qemu-block@nongnu.org S: Supported F: blockjob.c @@ -2336,29 +2586,30 @@ F: block/stream.c F: block/mirror.c F: qapi/job.json F: block/block-copy.c -F: include/block/block-copy.c -F: block/backup-top.h -F: block/backup-top.c +F: include/block/block-copy.h +F: block/reqlist.c +F: include/block/reqlist.h +F: block/copy-before-write.h +F: block/copy-before-write.c +F: block/snapshot-access.c F: include/block/aio_task.h F: block/aio_task.c F: util/qemu-co-shared-resource.c F: include/qemu/co-shared-resource.h T: git https://gitlab.com/jsnow/qemu.git jobs -T: git https://src.openvz.org/scm/~vsementsov/qemu.git jobs +T: git https://gitlab.com/vsementsov/qemu.git block -Block QAPI, monitor, command line -M: Markus Armbruster +Compute Express Link +M: Ben Widawsky +M: Jonathan Cameron S: Supported -F: blockdev.c -F: blockdev-hmp-cmds.c -F: block/qapi.c -F: qapi/block*.json -F: qapi/transaction.json -T: git https://repo.or.cz/qemu/armbru.git block-next +F: hw/cxl/ +F: hw/mem/cxl_type3.c +F: include/hw/cxl/ Dirty Bitmaps M: Eric Blake -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy R: John Snow L: qemu-block@nongnu.org S: Supported @@ -2372,6 +2623,7 @@ F: util/hbitmap.c F: tests/unit/test-hbitmap.c F: docs/interop/bitmaps.rst T: git https://repo.or.cz/qemu/ericb.git bitmaps +T: git https://gitlab.com/vsementsov/qemu.git block Character device backends M: Marc-André Lureau @@ -2442,17 +2694,19 @@ F: scripts/coccinelle/errp-guard.cocci GDB stub M: Alex Bennée -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé S: Maintained -F: gdbstub* +F: gdbstub/* F: include/exec/gdbstub.h F: gdb-xml/ F: tests/tcg/multiarch/gdbstub/ +F: scripts/feature_to_c.sh Memory API M: Paolo Bonzini M: Peter Xu M: David Hildenbrand +R: Philippe Mathieu-Daudé S: Supported F: include/exec/ioport.h F: include/exec/memop.h @@ -2468,6 +2722,19 @@ F: softmmu/physmem.c F: include/exec/memory-internal.h F: scripts/coccinelle/memory-region-housekeeping.cocci +Memory devices +M: David Hildenbrand +M: Igor Mammedov +R: Xiao Guangrong +S: Supported +F: hw/mem/memory-device.c +F: hw/mem/nvdimm.c +F: hw/mem/pc-dimm.c +F: include/hw/mem/memory-device.h +F: include/hw/mem/nvdimm.h +F: include/hw/mem/pc-dimm.h +F: docs/nvdimm.txt + SPICE M: Gerd Hoffmann S: Odd Fixes @@ -2489,6 +2756,8 @@ F: util/drm.c Cocoa graphics M: Peter Maydell +M: Philippe Mathieu-Daudé +R: Akihiko Odaki S: Odd Fixes F: ui/cocoa.m @@ -2508,6 +2777,7 @@ F: softmmu/cpu-throttle.c F: softmmu/cpu-timers.c F: softmmu/icount.c F: softmmu/runstate-action.c +F: softmmu/runstate.c F: qapi/run-state.json Read, Copy, Update (RCU) @@ -2566,29 +2836,30 @@ F: backends/cryptodev*.c Python library M: John Snow M: Cleber Rosa -R: Eduardo Habkost +R: Beraldo Leal S: Maintained F: python/ T: git https://gitlab.com/jsnow/qemu.git python Python scripts -M: Eduardo Habkost +M: John Snow M: Cleber Rosa S: Odd Fixes F: scripts/*.py F: tests/*.py Benchmark util -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy S: Maintained F: scripts/simplebench/ -T: git https://src.openvz.org/scm/~vsementsov/qemu.git simplebench +T: git https://gitlab.com/vsementsov/qemu.git simplebench Transactions helper -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy S: Maintained F: include/qemu/transactions.h F: util/transactions.c +T: git https://gitlab.com/vsementsov/qemu.git block QAPI M: Markus Armbruster @@ -2637,6 +2908,7 @@ T: git https://repo.or.cz/qemu/armbru.git qapi-next QEMU Guest Agent M: Michael Roth +M: Konstantin Kostiuk S: Maintained F: qga/ F: docs/interop/qemu-ga.rst @@ -2645,10 +2917,18 @@ F: scripts/qemu-guest-agent/ F: tests/unit/test-qga.c T: git https://github.com/mdroth/qemu.git qga +QEMU Guest Agent Win32 +M: Konstantin Kostiuk +S: Maintained +F: qga/*win32* +F: qga/vss-win32/ +F: qga/installer/ +T: git https://github.com/kostyanf14/qemu.git qga-win32 + QOM M: Paolo Bonzini R: Daniel P. Berrange -R: Eduardo Habkost +R: Eduardo Habkost S: Supported F: docs/qdev-device-use.txt F: hw/core/qdev* @@ -2668,7 +2948,7 @@ F: tests/unit/check-qom-proplist.c F: tests/unit/test-qdev-global-props.c QOM boilerplate conversion script -M: Eduardo Habkost +M: Eduardo Habkost S: Maintained F: scripts/codeconverter/ @@ -2706,6 +2986,8 @@ R: Paolo Bonzini R: Bandan Das R: Stefan Hajnoczi R: Thomas Huth +R: Darren Kenny +R: Qiuhao Li S: Maintained F: tests/qtest/fuzz/ F: tests/qtest/fuzz-*test.c @@ -2723,7 +3005,6 @@ F: include/hw/registerfields.h SLIRP M: Samuel Thibault S: Maintained -F: slirp/ F: net/slirp.c F: include/net/slirp.h T: git https://people.debian.org/~sthibault/qemu.git slirp @@ -2788,17 +3069,22 @@ D-Bus M: Marc-André Lureau S: Maintained F: backends/dbus-vmstate.c -F: tests/dbus-vmstate* +F: ui/dbus* +F: audio/dbus* F: util/dbus.c +F: include/ui/dbus* F: include/qemu/dbus.h -F: docs/interop/dbus.rst -F: docs/interop/dbus-vmstate.rst +F: docs/interop/dbus* +F: docs/sphinx/dbus* +F: docs/sphinx/fakedbusdoc.py +F: tests/qtest/dbus* Seccomp -M: Eduardo Otubo -S: Supported +M: Daniel P. Berrange +S: Odd Fixes F: softmmu/qemu-seccomp.c F: include/sysemu/seccomp.h +F: tests/unit/test-seccomp.c Cryptography M: Daniel P. Berrange @@ -2883,7 +3169,7 @@ F: include/qemu/yank.h F: qapi/yank.json COLO Framework -M: zhanghailiang +M: Hailiang Zhang S: Maintained F: migration/colo* F: include/migration/colo.h @@ -2892,12 +3178,13 @@ F: docs/COLO-FT.txt COLO Proxy M: Zhang Chen -M: Li Zhijian +M: Li Zhijian S: Supported F: docs/colo-proxy.txt F: net/colo* F: net/filter-rewriter.c F: net/filter-mirror.c +F: tests/qtest/test-filter* Record/replay M: Pavel Dovgalyuk @@ -2910,8 +3197,9 @@ F: net/filter-replay.c F: include/sysemu/replay.h F: docs/replay.txt F: stubs/replay.c -F: tests/acceptance/replay_kernel.py -F: tests/acceptance/reverse_debugging.py +F: tests/avocado/replay_kernel.py +F: tests/avocado/replay_linux.py +F: tests/avocado/reverse_debugging.py F: qapi/replay.json IOVA Tree @@ -2937,16 +3225,29 @@ F: include/hw/i2c/smbus_master.h F: include/hw/i2c/smbus_slave.h F: include/hw/i2c/smbus_eeprom.h +PMBus +M: Titus Rwantare +S: Maintained +F: hw/i2c/pmbus_device.c +F: hw/sensor/adm1272.c +F: hw/sensor/isl_pmbus_vr.c +F: hw/sensor/max34451.c +F: include/hw/i2c/pmbus_device.h +F: include/hw/sensor/isl_pmbus_vr.h +F: tests/qtest/adm1272-test.c +F: tests/qtest/max34451-test.c +F: tests/qtest/isl_pmbus_vr-test.c + Firmware schema specifications -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Daniel P. Berrange R: Kashyap Chamarthy S: Maintained F: docs/interop/firmware.json EDK2 Firmware -M: Philippe Mathieu-Daudé -R: Gerd Hoffmann +M: Philippe Mathieu-Daudé +M: Gerd Hoffmann S: Supported F: hw/i386/*ovmf* F: pc-bios/descriptors/??-edk2-*.json @@ -2991,8 +3292,9 @@ Usermode Emulation Overall usermode emulation M: Riku Voipio S: Maintained -F: thunk.c F: accel/tcg/user-exec*.c +F: include/user/ +F: common-user/ BSD user M: Warner Losh @@ -3000,6 +3302,7 @@ R: Kyle Evans S: Maintained F: bsd-user/ F: configs/targets/*-bsd-user.mak +F: tests/vm/*bsd T: git https://github.com/qemu-bsd-user/qemu-bsd-user bsd-user-rebase-3.1 Linux user @@ -3028,7 +3331,7 @@ S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ F: tests/plugin/ -F: tests/acceptance/tcg_plugins.py +F: tests/avocado/tcg_plugins.py F: contrib/plugins/ AArch64 TCG target @@ -3036,24 +3339,25 @@ M: Richard Henderson S: Maintained L: qemu-arm@nongnu.org F: tcg/aarch64/ -F: disas/arm-a64.cc -F: disas/libvixl/ ARM TCG target M: Richard Henderson S: Maintained L: qemu-arm@nongnu.org F: tcg/arm/ -F: disas/arm.c i386 TCG target M: Richard Henderson S: Maintained F: tcg/i386/ -F: disas/i386.c + +LoongArch64 TCG target +M: WANG Xuerui +S: Maintained +F: tcg/loongarch64/ MIPS TCG target -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé R: Aurelien Jarno R: Huacai Chen R: Jiaxun Yang @@ -3065,7 +3369,6 @@ PPC TCG target M: Richard Henderson S: Odd Fixes F: tcg/ppc/ -F: disas/ppc.c RISC-V TCG target M: Palmer Dabbelt @@ -3079,12 +3382,11 @@ S390 TCG target M: Richard Henderson S: Maintained F: tcg/s390/ -F: disas/s390.c L: qemu-s390x@nongnu.org SPARC TCG target S: Odd Fixes -F: tcg/sparc/ +F: tcg/sparc64/ F: disas/sparc.c TCI TCG target @@ -3121,6 +3423,12 @@ L: qemu-block@nongnu.org S: Maintained F: block/vdi.c +blkio +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Maintained +F: block/blkio.c + iSCSI M: Ronnie Sahlberg M: Paolo Bonzini @@ -3132,7 +3440,7 @@ F: block/iscsi-opts.c Network Block Device (NBD) M: Eric Blake -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy L: qemu-block@nongnu.org S: Maintained F: block/nbd* @@ -3142,8 +3450,9 @@ F: qemu-nbd.* F: blockdev-nbd.c F: docs/interop/nbd.txt F: docs/tools/qemu-nbd.rst +F: tests/qemu-iotests/tests/*nbd* T: git https://repo.or.cz/qemu/ericb.git nbd -T: git https://src.openvz.org/scm/~vsementsov/qemu.git nbd +T: git https://gitlab.com/vsementsov/qemu.git block NFS M: Peter Lieven @@ -3177,7 +3486,7 @@ F: block/null.c NVMe Block Driver M: Stefan Hajnoczi R: Fam Zheng -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé L: qemu-block@nongnu.org S: Supported F: block/nvme* @@ -3228,13 +3537,13 @@ F: block/dmg.c parallels M: Stefan Hajnoczi M: Denis V. Lunev -M: Vladimir Sementsov-Ogievskiy +M: Vladimir Sementsov-Ogievskiy L: qemu-block@nongnu.org S: Supported F: block/parallels.c F: block/parallels-ext.c F: docs/interop/parallels.txt -T: git https://src.openvz.org/scm/~vsementsov/qemu.git parallels +T: git https://gitlab.com/vsementsov/qemu.git block qed M: Stefan Hajnoczi @@ -3307,6 +3616,8 @@ M: Coiby Xu S: Maintained F: block/export/vhost-user-blk-server.c F: block/export/vhost-user-blk-server.h +F: block/export/virtio-blk-handler.c +F: block/export/virtio-blk-handler.h F: include/qemu/vhost-user-server.h F: tests/qtest/libqos/vhost-user-blk.c F: tests/qtest/libqos/vhost-user-blk.h @@ -3319,6 +3630,13 @@ L: qemu-block@nongnu.org S: Supported F: block/export/fuse.c +VDUSE library and block device exports +M: Xie Yongji +S: Maintained +F: subprojects/libvduse/ +F: block/export/vduse-blk.c +F: block/export/vduse-blk.h + Replication M: Wen Congyang M: Xie Changlong @@ -3344,6 +3662,7 @@ S: Maintained F: semihosting/ F: include/semihosting/ F: tests/tcg/multiarch/arm-compat-semi/ +F: tests/tcg/aarch64/system/semiheap.c Multi-process QEMU M: Elena Ufimtseva @@ -3368,6 +3687,11 @@ F: hw/remote/proxy-memory-listener.c F: include/hw/remote/proxy-memory-listener.h F: hw/remote/iohub.c F: include/hw/remote/iohub.h +F: subprojects/libvfio-user +F: hw/remote/vfio-user-obj.c +F: include/hw/remote/vfio-user-obj.h +F: hw/remote/iommu.c +F: include/hw/remote/iommu.h EBPF: M: Jason Wang @@ -3381,18 +3705,19 @@ Build and test automation ------------------------- Build and test automation, general continuous integration M: Alex Bennée -M: Philippe Mathieu-Daudé +M: Philippe Mathieu-Daudé M: Thomas Huth R: Wainer dos Santos Moschetta -R: Willian Rampazzo +R: Beraldo Leal S: Maintained -F: .github/lockdown.yml +F: .github/workflows/lockdown.yml F: .gitlab-ci.yml F: .gitlab-ci.d/ F: .travis.yml F: scripts/ci/ F: tests/docker/ F: tests/vm/ +F: tests/lcitool/ F: scripts/archive-source.sh W: https://gitlab.com/qemu-project/qemu/pipelines W: https://travis-ci.org/qemu/qemu @@ -3401,7 +3726,8 @@ FreeBSD Hosted Continuous Integration M: Ed Maste M: Li-Wen Hsu S: Maintained -F: .cirrus.yml +F: .gitlab-ci.d/cirrus/freebsd* +F: tests/vm/freebsd W: https://cirrus-ci.com/github/qemu/qemu Windows Hosted Continuous Integration @@ -3412,18 +3738,25 @@ W: https://cirrus-ci.com/github/qemu/qemu Guest Test Compilation Support M: Alex Bennée -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé S: Maintained -F: tests/tcg/Makefile -F: tests/tcg/Makefile.include +F: tests/tcg/Makefile.target -Acceptance (Integration) Testing with the Avocado framework +Integration Testing with the Avocado framework W: https://trello.com/b/6Qi1pxVn/avocado-qemu R: Cleber Rosa -R: Philippe Mathieu-Daudé +R: Philippe Mathieu-Daudé R: Wainer dos Santos Moschetta +R: Beraldo Leal S: Odd Fixes -F: tests/acceptance/ +F: tests/avocado/ + +GitLab custom runner (Works On Arm Sponsored) +M: Alex Bennée +M: Philippe Mathieu-Daudé +S: Maintained +F: .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml +F: .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml Documentation ------------- @@ -3444,6 +3777,29 @@ F: docs/about/deprecated.rst Build System ------------ +Meson +M: Paolo Bonzini +R: Marc-André Lureau +R: Daniel P. Berrange +R: Thomas Huth +R: Philippe Mathieu-Daudé +S: Maintained +F: meson.build +F: meson_options.txt +F: scripts/meson-buildoptions.* +F: scripts/check_sparse.py +F: scripts/symlink-install-tree.py + +Top Level Makefile and configure +M: Paolo Bonzini +R: Alex Bennée +R: Thomas Huth +S: Maintained +F: Makefile +F: configure +F: scripts/mtest2make.py +F: tests/Makefile.include + GIT submodules M: Daniel P. Berrange S: Odd Fixes diff --git a/Makefile b/Makefile index 0e053f3be0..1270f698b4 100644 --- a/Makefile +++ b/Makefile @@ -42,6 +42,9 @@ configure: ; ifneq ($(wildcard config-host.mak),) include config-host.mak +include Makefile.prereqs +Makefile.prereqs: config-host.mak + git-submodule-update: .git-submodule-status: git-submodule-update config-host.mak Makefile: .git-submodule-status @@ -87,7 +90,7 @@ x := $(shell rm -rf meson-private meson-info meson-logs) endif # 1. ensure config-host.mak is up-to-date -config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios $(SRC_PATH)/QEMU_VERSION +config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/QEMU_VERSION @echo config-host.mak is out-of-date, running configure @if test -f meson-private/coredata.dat; then \ ./config.status --skip-meson; \ @@ -124,6 +127,12 @@ ifneq ($(MESON),) Makefile.mtest: build.ninja scripts/mtest2make.py $(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@ -include Makefile.mtest + +.PHONY: update-buildoptions +all update-buildoptions: $(SRC_PATH)/scripts/meson-buildoptions.sh +$(SRC_PATH)/scripts/meson-buildoptions.sh: $(SRC_PATH)/meson_options.txt + $(MESON) introspect --buildoptions $(SRC_PATH)/meson.build | $(PYTHON) \ + scripts/meson-buildoptions.py > $@.tmp && mv $@.tmp $@ endif # 4. Rules to bridge to other makefiles @@ -137,9 +146,9 @@ MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS)))) MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq) NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \ $(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \ - + -d keepdepfile ninja-cmd-goals = $(or $(MAKECMDGOALS), all) -ninja-cmd-goals += $(foreach t, $(.tests), $(.test.deps.$t)) +ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g)) makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall # "ninja -t targets" also lists all prerequisites. If build system @@ -153,15 +162,12 @@ $(ninja-targets): run-ninja # --output-sync line. run-ninja: config-host.mak ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),) - +$(quiet-@)$(if $(MAKE.nq),@:, $(NINJA) -d keepdepfile \ - $(NINJAFLAGS) $(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat) + +$(if $(MAKE.nq),@:,$(quiet-@)$(NINJA) $(NINJAFLAGS) \ + $(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat) endif endif -# Force configure to re-run if the API symbols are updated ifeq ($(CONFIG_PLUGIN),y) -config-host.mak: $(SRC_PATH)/plugins/qemu-plugins.symbols - .PHONY: plugins plugins: $(call quiet-command,\ @@ -183,30 +189,27 @@ include $(SRC_PATH)/tests/Makefile.include all: recurse-all -ROM_DIRS = $(addprefix pc-bios/, $(ROMS)) -ROM_DIRS_RULES=$(foreach t, all clean, $(addsuffix /$(t), $(ROM_DIRS))) -# Only keep -O and -g cflags -.PHONY: $(ROM_DIRS_RULES) -$(ROM_DIRS_RULES): +ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS))) +.PHONY: $(ROMS_RULES) +$(ROMS_RULES): $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) .PHONY: recurse-all recurse-clean -recurse-all: $(addsuffix /all, $(ROM_DIRS)) -recurse-clean: $(addsuffix /clean, $(ROM_DIRS)) +recurse-all: $(addsuffix /all, $(ROMS)) +recurse-clean: $(addsuffix /clean, $(ROMS)) +recurse-distclean: $(addsuffix /distclean, $(ROMS)) ###################################################################### clean: recurse-clean -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean || : -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) clean-ctlist || : -# avoid old build problems by removing potentially incorrect old files - rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h - find . \( -name '*.so' -o -name '*.dll' -o -name '*.[oda]' \) -type f \ + find . \( -name '*.so' -o -name '*.dll' -o \ + -name '*.[oda]' -o -name '*.gcno' \) -type f \ ! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-aarch64.a \ ! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \ -exec rm {} + - rm -f TAGS cscope.* *.pod *~ */*~ - rm -f fsdev/*.pod scsi/*.pod + rm -f TAGS cscope.* *~ */*~ VERSION = $(shell cat $(SRC_PATH)/QEMU_VERSION) @@ -215,12 +218,12 @@ dist: qemu-$(VERSION).tar.bz2 qemu-%.tar.bz2: $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)" -distclean: clean +distclean: clean recurse-distclean -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || : - rm -f config-host.mak config-host.h* config-poison.h - rm -f tests/tcg/config-*.mak - rm -f config-all-disas.mak config.status - rm -f roms/seabios/config.mak roms/vgabios/config.mak + rm -f config-host.mak Makefile.prereqs qemu-bundle + rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak + rm -f config.status + rm -f roms/seabios/config.mak rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols rm -f *-config-target.h *-config-devices.mak *-config-devices.h rm -rf meson-private meson-logs meson-info compile_commands.json @@ -229,7 +232,8 @@ distclean: clean rm -f linux-headers/asm rm -Rf .sdk -find-src-path = find "$(SRC_PATH)/" -path "$(SRC_PATH)/meson" -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) +find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \ + -type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) .PHONY: ctags ctags: @@ -250,7 +254,7 @@ gtags: "GTAGS", "Remove old $@ files") $(call quiet-command, \ (cd $(SRC_PATH) && \ - $(find-src-path) | gtags -f -), \ + $(find-src-path) -print | gtags -f -), \ "GTAGS", "Re-index $(SRC_PATH)") .PHONY: TAGS @@ -280,6 +284,7 @@ cscope: # Needed by "meson install" export DESTDIR +include $(SRC_PATH)/tests/lcitool/Makefile.include include $(SRC_PATH)/tests/docker/Makefile.include include $(SRC_PATH)/tests/vm/Makefile.include @@ -309,6 +314,7 @@ endif @echo 'Test targets:' $(call print-help,check,Run all tests (check-help for details)) $(call print-help,bench,Run all benchmarks) + $(call print-help,lcitool-help,Help about targets for managing build environment manifests) $(call print-help,docker-help,Help about targets running tests inside containers) $(call print-help,vm-help,Help about targets running tests inside VM) @echo '' diff --git a/QEMU_VERSION b/QEMU_VERSION index dfda3e0b4f..2bbaead448 100644 --- a/QEMU_VERSION +++ b/QEMU_VERSION @@ -1 +1 @@ -6.1.0 +7.2.4 diff --git a/README.rst b/README.rst index 79b19f1481..21df79ef43 100644 --- a/README.rst +++ b/README.rst @@ -39,7 +39,7 @@ Documentation can be found hosted online at current development version that is available at ``_ is generated from the ``docs/`` folder in the source tree, and is built by `Sphinx -_`. +`_. Building @@ -59,9 +59,9 @@ of other UNIX targets. The simple steps to build QEMU are: Additional information can also be found online via the QEMU website: -* ``_ -* ``_ -* ``_ +* ``_ +* ``_ +* ``_ Submitting patches @@ -78,14 +78,14 @@ format-patch' and/or 'git send-email' to format & send the mail to the qemu-devel@nongnu.org mailing list. All patches submitted must contain a 'Signed-off-by' line from the author. Patches should follow the guidelines set out in the `style section -` of +`_ of the Developers Guide. Additional information on submitting patches can be found online via the QEMU website -* ``_ -* ``_ +* ``_ +* ``_ The QEMU website is also maintained under source control. @@ -144,7 +144,7 @@ reported via GitLab. For additional information on bug reporting consult: -* ``_ +* ``_ ChangeLog @@ -168,4 +168,4 @@ main methods being email and IRC Information on additional methods of contacting the community can be found online via the QEMU website: -* ``_ +* ``_ diff --git a/accel/accel-common.c b/accel/accel-common.c index 7b8ec7e0f7..df72cc989a 100644 --- a/accel/accel-common.c +++ b/accel/accel-common.c @@ -49,6 +49,14 @@ AccelClass *accel_find(const char *opt_name) return ac; } +/* Return the name of the current accelerator */ +const char *current_accel_name(void) +{ + AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + + return ac->name; +} + static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque) { CPUClass *cc = CPU_CLASS(klass); @@ -121,6 +129,16 @@ bool accel_cpu_realizefn(CPUState *cpu, Error **errp) return true; } +int accel_supported_gdbstub_sstep_flags(void) +{ + AccelState *accel = current_accel(); + AccelClass *acc = ACCEL_GET_CLASS(accel); + if (acc->gdbstub_supported_sstep_flags) { + return acc->gdbstub_supported_sstep_flags(); + } + return 0; +} + static const TypeInfo accel_cpu_type = { .name = TYPE_ACCEL_CPU, .parent = TYPE_OBJECT, diff --git a/accel/accel-softmmu.c b/accel/accel-softmmu.c index 67276e4f52..f9cdafb148 100644 --- a/accel/accel-softmmu.c +++ b/accel/accel-softmmu.c @@ -66,6 +66,7 @@ void accel_init_ops_interfaces(AccelClass *ac) { const char *ac_name; char *ops_name; + ObjectClass *oc; AccelOpsClass *ops; ac_name = object_class_get_name(OBJECT_CLASS(ac)); @@ -73,8 +74,13 @@ void accel_init_ops_interfaces(AccelClass *ac) ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name); ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name)); + oc = module_object_class_by_name(ops_name); + if (!oc) { + error_report("fatal: could not load module for type '%s'", ops_name); + exit(1); + } g_free(ops_name); - + ops = ACCEL_OPS_CLASS(oc); /* * all accelerators need to define ops, providing at least a mandatory * non-NULL create_vcpu_thread operation. diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index 10429fdfb2..d6a1b8d0a2 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -21,8 +21,6 @@ static void *dummy_cpu_thread_fn(void *arg) { CPUState *cpu = arg; - sigset_t waitset; - int r; rcu_register_thread(); @@ -32,8 +30,13 @@ static void *dummy_cpu_thread_fn(void *arg) cpu->can_do_io = 1; current_cpu = cpu; +#ifndef _WIN32 + sigset_t waitset; + int r; + sigemptyset(&waitset); sigaddset(&waitset, SIG_IPI); +#endif /* signal CPU creation */ cpu_thread_signal_created(cpu); @@ -41,6 +44,7 @@ static void *dummy_cpu_thread_fn(void *arg) do { qemu_mutex_unlock_iothread(); +#ifndef _WIN32 do { int sig; r = sigwait(&waitset, &sig); @@ -49,6 +53,9 @@ static void *dummy_cpu_thread_fn(void *arg) perror("sigwait"); exit(1); } +#else + qemu_sem_wait(&cpu->sem); +#endif qemu_mutex_lock_iothread(); qemu_wait_io_event(cpu); } while (!cpu->unplug); @@ -69,4 +76,7 @@ void dummy_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); +#ifdef _WIN32 + qemu_sem_init(&cpu->sem, 0); +#endif } diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index d1691be989..24913ca9c4 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -60,6 +60,10 @@ HVFState *hvf_state; +#ifdef __aarch64__ +#define HV_VM_DEFAULT NULL +#endif + /* Memory slots */ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) @@ -116,11 +120,12 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) { hvf_slot *mem; MemoryRegion *area = section->mr; - bool writeable = !area->readonly && !area->rom_device; + bool writable = !area->readonly && !area->rom_device; hv_memory_flags_t flags; + uint64_t page_size = qemu_real_host_page_size(); if (!memory_region_is_ram(area)) { - if (writeable) { + if (writable) { return; } else if (!memory_region_is_romd(area)) { /* @@ -131,6 +136,12 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) } } + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + /* Not page aligned, so we can not map as RAM */ + add = false; + } + mem = hvf_find_overlap_slot( section->offset_within_address_space, int128_get64(section->size)); @@ -239,12 +250,12 @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) if (on) { slot->flags |= HVF_SLOT_LOG; hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ); + HV_MEMORY_READ | HV_MEMORY_EXEC); /* stop tracking region*/ } else { slot->flags &= ~HVF_SLOT_LOG; hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ | HV_MEMORY_WRITE); + HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); } } @@ -291,6 +302,7 @@ static void hvf_region_del(MemoryListener *listener, } static MemoryListener hvf_memory_listener = { + .name = "hvf", .priority = 10, .region_add = hvf_region_add, .region_del = hvf_region_del, @@ -316,7 +328,7 @@ static int hvf_accel_init(MachineState *ms) s = g_new0(HVFState, 1); - s->num_slots = 32; + s->num_slots = ARRAY_SIZE(s->slots); for (x = 0; x < s->num_slots; ++x) { s->slots[x].size = 0; s->slots[x].slot_id = x; @@ -324,7 +336,8 @@ static int hvf_accel_init(MachineState *ms) hvf_state = s; memory_listener_register(&hvf_memory_listener, &address_space_memory); - return 0; + + return hvf_arch_init(); } static void hvf_accel_class_init(ObjectClass *oc, void *data) @@ -365,17 +378,20 @@ static int hvf_init_vcpu(CPUState *cpu) cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); /* init cpu signals */ - sigset_t set; struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); - pthread_sigmask(SIG_BLOCK, NULL, &set); - sigdelset(&set, SIG_IPI); + pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); + sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); +#ifdef __aarch64__ + r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); +#else r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); +#endif cpu->vcpu_dirty = 1; assert_hvf_ok(r); @@ -451,6 +467,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = hvf_start_vcpu_thread; + ops->kick_vcpu_thread = hvf_kick_vcpu_thread; ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; ops->synchronize_post_init = hvf_cpu_synchronize_post_init; diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c index f185b0830a..0043f4d308 100644 --- a/accel/hvf/hvf-all.c +++ b/accel/hvf/hvf-all.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "sysemu/hvf.h" #include "sysemu/hvf_int.h" diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 7516c67a3f..fbf4fe3497 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -16,12 +16,14 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include "sysemu/kvm.h" #include "sysemu/kvm_int.h" #include "sysemu/runstate.h" #include "sysemu/cpus.h" #include "qemu/guest-random.h" #include "qapi/error.h" +#include #include "kvm-cpus.h" static void *kvm_vcpu_thread_fn(void *arg) @@ -74,15 +76,34 @@ static void kvm_start_vcpu_thread(CPUState *cpu) cpu, QEMU_THREAD_JOINABLE); } +static bool kvm_vcpu_thread_is_idle(CPUState *cpu) +{ + return !kvm_halt_in_kernel(); +} + +static bool kvm_cpus_are_resettable(void) +{ + return !kvm_enabled() || kvm_cpu_check_are_resettable(); +} + static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = kvm_start_vcpu_thread; + ops->cpu_thread_is_idle = kvm_vcpu_thread_is_idle; + ops->cpus_are_resettable = kvm_cpus_are_resettable; ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset; ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; + +#ifdef KVM_CAP_SET_GUEST_DEBUG + ops->supports_guest_debug = kvm_supports_guest_debug; + ops->insert_breakpoint = kvm_insert_breakpoint; + ops->remove_breakpoint = kvm_remove_breakpoint; + ops->remove_all_breakpoints = kvm_remove_all_breakpoints; +#endif } static const TypeInfo kvm_accel_ops_type = { diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 0125c17edb..f99b0becd8 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -45,8 +45,10 @@ #include "qemu/guest-random.h" #include "sysemu/hw_accel.h" #include "kvm-cpus.h" +#include "sysemu/dirtylimit.h" #include "hw/boards.h" +#include "monitor/stats.h" /* This check must be after config-host.h is included */ #ifdef CONFIG_EVENTFD @@ -59,7 +61,11 @@ #ifdef PAGE_SIZE #undef PAGE_SIZE #endif -#define PAGE_SIZE qemu_real_host_page_size +#define PAGE_SIZE qemu_real_host_page_size() + +#ifndef KVM_GUESTDBG_BLOCKIRQ +#define KVM_GUESTDBG_BLOCKIRQ 0 +#endif //#define DEBUG_KVM @@ -71,86 +77,12 @@ do { } while (0) #endif -#define KVM_MSI_HASHTAB_SIZE 256 - struct KVMParkedVcpu { unsigned long vcpu_id; int kvm_fd; QLIST_ENTRY(KVMParkedVcpu) node; }; -enum KVMDirtyRingReaperState { - KVM_DIRTY_RING_REAPER_NONE = 0, - /* The reaper is sleeping */ - KVM_DIRTY_RING_REAPER_WAIT, - /* The reaper is reaping for dirty pages */ - KVM_DIRTY_RING_REAPER_REAPING, -}; - -/* - * KVM reaper instance, responsible for collecting the KVM dirty bits - * via the dirty ring. - */ -struct KVMDirtyRingReaper { - /* The reaper thread */ - QemuThread reaper_thr; - volatile uint64_t reaper_iteration; /* iteration number of reaper thr */ - volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */ -}; - -struct KVMState -{ - AccelState parent_obj; - - int nr_slots; - int fd; - int vmfd; - int coalesced_mmio; - int coalesced_pio; - struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; - bool coalesced_flush_in_progress; - int vcpu_events; - int robust_singlestep; - int debugregs; -#ifdef KVM_CAP_SET_GUEST_DEBUG - QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; -#endif - int max_nested_state_len; - int many_ioeventfds; - int intx_set_mask; - int kvm_shadow_mem; - bool kernel_irqchip_allowed; - bool kernel_irqchip_required; - OnOffAuto kernel_irqchip_split; - bool sync_mmu; - uint64_t manual_dirty_log_protect; - /* The man page (and posix) say ioctl numbers are signed int, but - * they're not. Linux, glibc and *BSD all treat ioctl numbers as - * unsigned, and treating them as signed here can break things */ - unsigned irq_set_ioctl; - unsigned int sigmask_len; - GHashTable *gsimap; -#ifdef KVM_CAP_IRQ_ROUTING - struct kvm_irq_routing *irq_routes; - int nr_allocated_irq_routes; - unsigned long *used_gsi_bitmap; - unsigned int gsi_count; - QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; -#endif - KVMMemoryListener memory_listener; - QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; - - /* For "info mtree -f" to tell if an MR is registered in KVM */ - int nr_as; - struct KVMAs { - KVMMemoryListener *ml; - AddressSpace *as; - } *as; - uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ - uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ - struct KVMDirtyRingReaper reaper; -}; - KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_split_irqchip; @@ -168,6 +100,8 @@ bool kvm_vm_attributes_allowed; bool kvm_direct_msi_allowed; bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; +bool kvm_has_guest_debug; +static int kvm_sstep_flags; static bool kvm_immediate_exit; static hwaddr kvm_max_slot_size = ~0; @@ -318,14 +252,14 @@ static hwaddr kvm_align_section(MemoryRegionSection *section, with sub-page size and unaligned start address. Pad the start address to next and truncate size to previous page boundary. */ aligned = ROUND_UP(section->offset_within_address_space, - qemu_real_host_page_size); + qemu_real_host_page_size()); delta = aligned - section->offset_within_address_space; *start = aligned; if (delta > size) { return 0; } - return (size - delta) & qemu_real_host_page_mask; + return (size - delta) & qemu_real_host_page_mask(); } int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, @@ -469,6 +403,8 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) cpu->kvm_fd = ret; cpu->kvm_state = s; cpu->vcpu_dirty = true; + cpu->dirty_pages = 0; + cpu->throttle_us_per_full = 0; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { @@ -619,7 +555,7 @@ static void kvm_log_stop(MemoryListener *listener, static void kvm_slot_sync_dirty_pages(KVMSlot *slot) { ram_addr_t start = slot->ram_start_offset; - ram_addr_t pages = slot->memory_size / qemu_real_host_page_size; + ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); } @@ -655,7 +591,7 @@ static void kvm_slot_init_dirty_bitmap(KVMSlot *mem) * And mem->memory_size is aligned to it (otherwise this mem can't * be registered to KVM). */ - hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size, + hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), /*HOST_LONG_BITS*/ 64) / 8; mem->dirty_bmap = g_malloc0(bitmap_size); mem->dirty_bmap_size = bitmap_size; @@ -700,7 +636,7 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, mem = &kml->slots[slot_id]; if (!mem->memory_size || offset >= - (mem->memory_size / qemu_real_host_page_size)) { + (mem->memory_size / qemu_real_host_page_size())) { return; } @@ -709,12 +645,32 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) { - return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; + /* + * Read the flags before the value. Pairs with barrier in + * KVM's kvm_dirty_ring_push() function. + */ + return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; } static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) { - gfn->flags = KVM_DIRTY_GFN_F_RESET; + /* + * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS + * sees the full content of the ring: + * + * CPU0 CPU1 CPU2 + * ------------------------------------------------------------------------------ + * fill gfn0 + * store-rel flags for gfn0 + * load-acq flags for gfn0 + * store-rel RESET for gfn0 + * ioctl(RESET_RINGS) + * load-acq flags for gfn0 + * check if flags have RESET + * + * The synchronization goes from CPU2 to CPU0 to CPU1. + */ + qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); } /* @@ -743,22 +699,26 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) count++; } cpu->kvm_fetch_index = fetch; + cpu->dirty_pages += count; return count; } /* Must be with slots_lock held */ -static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) +static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) { int ret; - CPUState *cpu; uint64_t total = 0; int64_t stamp; stamp = get_clock(); - CPU_FOREACH(cpu) { - total += kvm_dirty_ring_reap_one(s, cpu); + if (cpu) { + total = kvm_dirty_ring_reap_one(s, cpu); + } else { + CPU_FOREACH(cpu) { + total += kvm_dirty_ring_reap_one(s, cpu); + } } if (total) { @@ -779,7 +739,7 @@ static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) * Currently for simplicity, we must hold BQL before calling this. We can * consider to drop the BQL if we're clear with all the race conditions. */ -static uint64_t kvm_dirty_ring_reap(KVMState *s) +static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu) { uint64_t total; @@ -799,7 +759,7 @@ static uint64_t kvm_dirty_ring_reap(KVMState *s) * reset below. */ kvm_slots_lock(); - total = kvm_dirty_ring_reap_locked(s); + total = kvm_dirty_ring_reap_locked(s, cpu); kvm_slots_unlock(); return total; @@ -846,7 +806,7 @@ static void kvm_dirty_ring_flush(void) * vcpus out in a synchronous way. */ kvm_cpu_synchronize_kick_all(); - kvm_dirty_ring_reap(kvm_state); + kvm_dirty_ring_reap(kvm_state, NULL); trace_kvm_dirty_ring_flush(1); } @@ -887,7 +847,7 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ #define KVM_CLEAR_LOG_SHIFT 6 -#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT) +#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT) #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, @@ -896,7 +856,7 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, KVMState *s = kvm_state; uint64_t end, bmap_start, start_delta, bmap_npages; struct kvm_clear_dirty_log d; - unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; + unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size(); int ret; /* @@ -1129,6 +1089,7 @@ static void kvm_coalesce_pio_del(MemoryListener *listener, } static MemoryListener kvm_coalesced_pio_listener = { + .name = "kvm-coalesced-pio", .coalesced_io_add = kvm_coalesce_pio_add, .coalesced_io_del = kvm_coalesce_pio_del, }; @@ -1193,8 +1154,8 @@ void kvm_hwpoison_page_add(ram_addr_t ram_addr) static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) { -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) - /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN + /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN * endianness, but the memory core hands them in target endianness. * For example, PPC is always treated as big-endian even if running * on KVM and on PPC64LE. Correct here. @@ -1326,7 +1287,7 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) void kvm_set_max_memslot_size(hwaddr max_slot_size) { g_assert( - ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size + ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size ); kvm_max_slot_size = max_slot_size; } @@ -1337,13 +1298,13 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, KVMSlot *mem; int err; MemoryRegion *mr = section->mr; - bool writeable = !mr->readonly && !mr->rom_device; + bool writable = !mr->readonly && !mr->rom_device; hwaddr start_addr, size, slot_size, mr_offset; ram_addr_t ram_start_offset; void *ram; if (!memory_region_is_ram(mr)) { - if (writeable || !kvm_readonly_mem_allowed) { + if (writable || !kvm_readonly_mem_allowed) { return; } else if (!mr->romd_mode) { /* If the memory device is not in romd_mode, then we actually want @@ -1389,7 +1350,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, * Not easy. Let's cross the fingers until it's fixed. */ if (kvm_state->kvm_dirty_ring_size) { - kvm_dirty_ring_reap_locked(kvm_state); + kvm_dirty_ring_reap_locked(kvm_state, NULL); } else { kvm_slot_get_dirty_log(kvm_state, mem); } @@ -1457,11 +1418,16 @@ static void *kvm_dirty_ring_reaper_thread(void *data) */ sleep(1); + /* keep sleeping so that dirtylimit not be interfered by reaper */ + if (dirtylimit_in_service()) { + continue; + } + trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(s); + kvm_dirty_ring_reap(s, NULL); qemu_mutex_unlock_iothread(); r->reaper_iteration++; @@ -1633,11 +1599,11 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener, } void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, - AddressSpace *as, int as_id) + AddressSpace *as, int as_id, const char *name) { int i; - kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); + kml->slots = g_new0(KVMSlot, s->nr_slots); kml->as_id = as_id; for (i = 0; i < s->nr_slots; i++) { @@ -1649,6 +1615,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, kml->listener.log_start = kvm_log_start; kml->listener.log_stop = kvm_log_stop; kml->listener.priority = 10; + kml->listener.name = name; if (s->kvm_dirty_ring_size) { kml->listener.log_sync_global = kvm_log_sync_global; @@ -1669,6 +1636,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, } static MemoryListener kvm_io_listener = { + .name = "kvm-io", .eventfd_add = kvm_io_ioeventfd_add, .eventfd_del = kvm_io_ioeventfd_del, .priority = 10, @@ -1930,7 +1898,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) return virq; } - route = g_malloc0(sizeof(KVMMSIRoute)); + route = g_new0(KVMMSIRoute, 1); route->kroute.gsi = virq; route->kroute.type = KVM_IRQ_ROUTING_MSI; route->kroute.flags = 0; @@ -1950,10 +1918,11 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) return kvm_set_irq(s, route->kroute.gsi, 1); } -int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) { struct kvm_irq_routing_entry kroute = {}; int virq; + KVMState *s = c->s; MSIMessage msg = {0, 0}; if (pci_available && dev) { @@ -1993,7 +1962,7 @@ int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) kvm_add_routing_entry(s, &kroute); kvm_arch_add_msi_route_post(&kroute, vector, dev); - kvm_irqchip_commit_routes(s); + c->changes++; return virq; } @@ -2151,7 +2120,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) abort(); } -int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) { return -ENOSYS; } @@ -2242,7 +2211,7 @@ static void kvm_irqchip_create(KVMState *s) ret = kvm_arch_irqchip_create(s); if (ret == 0) { if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { - perror("Split IRQ chip mode not supported."); + error_report("Split IRQ chip mode not supported."); exit(1); } else { ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); @@ -2293,6 +2262,20 @@ bool kvm_vcpu_id_is_valid(int vcpu_id) return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); } +bool kvm_dirty_ring_enabled(void) +{ + return kvm_state->kvm_dirty_ring_size ? true : false; +} + +static void query_stats_cb(StatsResultList **result, StatsTarget target, + strList *names, strList *targets, Error **errp); +static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp); + +uint32_t kvm_dirty_ring_size(void) +{ + return kvm_state->kvm_dirty_ring_size; +} + static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -2324,7 +2307,7 @@ static int kvm_init(MachineState *ms) * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ - assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); + assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size()); s->sigmask_len = 8; @@ -2469,7 +2452,7 @@ static int kvm_init(MachineState *ms) ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes); if (ret) { error_report("Enabling of KVM dirty ring failed: %s. " - "Suggested mininum value is 1024.", strerror(-ret)); + "Suggested minimum value is 1024.", strerror(-ret)); goto err; } @@ -2554,6 +2537,25 @@ static int kvm_init(MachineState *ms) kvm_ioeventfd_any_length_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); +#ifdef KVM_CAP_SET_GUEST_DEBUG + kvm_has_guest_debug = + (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); +#endif + + kvm_sstep_flags = 0; + if (kvm_has_guest_debug) { + kvm_sstep_flags = SSTEP_ENABLE; + +#if defined KVM_CAP_SET_GUEST_DEBUG2 + int guest_debug_flags = + kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2); + + if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) { + kvm_sstep_flags |= SSTEP_NOIRQ; + } +#endif + } + kvm_state = s; ret = kvm_arch_init(ms, s); @@ -2579,7 +2581,7 @@ static int kvm_init(MachineState *ms) s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, - &address_space_memory, 0); + &address_space_memory, 0, "kvm-memory"); if (kvm_eventfds_allowed) { memory_listener_register(&kvm_io_listener, &address_space_io); @@ -2602,6 +2604,11 @@ static int kvm_init(MachineState *ms) } } + if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) { + add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb, + query_stats_schemas_cb); + } + return 0; err: @@ -2921,8 +2928,19 @@ int kvm_cpu_exec(CPUState *cpu) */ trace_kvm_dirty_ring_full(cpu->cpu_index); qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(kvm_state); + /* + * We throttle vCPU by making it sleep once it exit from kernel + * due to dirty ring full. In the dirtylimit scenario, reaping + * all vCPUs after a single vCPU dirty ring get full result in + * the miss of sleep, so just reap the ring-fulled vCPU. + */ + if (dirtylimit_in_service()) { + kvm_dirty_ring_reap(kvm_state, cpu); + } else { + kvm_dirty_ring_reap(kvm_state, NULL); + } qemu_mutex_unlock_iothread(); + dirtylimit_vcpu_execute(cpu); ret = 0; break; case KVM_EXIT_SYSTEM_EVENT: @@ -3183,6 +3201,10 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) if (cpu->singlestep_enabled) { data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + + if (cpu->singlestep_enabled & SSTEP_NOIRQ) { + data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ; + } } kvm_arch_update_guest_debug(cpu, &data.dbg); @@ -3191,8 +3213,13 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) return data.err; } -int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) +bool kvm_supports_guest_debug(void) +{ + /* probed during kvm_init() */ + return kvm_has_guest_debug; +} + +int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) { struct kvm_sw_breakpoint *bp; int err; @@ -3204,7 +3231,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, return 0; } - bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); + bp = g_new(struct kvm_sw_breakpoint, 1); bp->pc = addr; bp->use_count = 1; err = kvm_arch_insert_sw_breakpoint(cpu, bp); @@ -3230,8 +3257,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, return 0; } -int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) +int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) { struct kvm_sw_breakpoint *bp; int err; @@ -3295,28 +3321,6 @@ void kvm_remove_all_breakpoints(CPUState *cpu) } } -#else /* !KVM_CAP_SET_GUEST_DEBUG */ - -int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) -{ - return -EINVAL; -} - -int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) -{ - return -EINVAL; -} - -int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) -{ - return -EINVAL; -} - -void kvm_remove_all_breakpoints(CPUState *cpu) -{ -} #endif /* !KVM_CAP_SET_GUEST_DEBUG */ static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) @@ -3614,6 +3618,19 @@ static void kvm_accel_instance_init(Object *obj) s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; /* KVM dirty ring is by default off */ s->kvm_dirty_ring_size = 0; + s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; + s->notify_window = 0; +} + +/** + * kvm_gdbstub_sstep_flags(): + * + * Returns: SSTEP_* flags that KVM supports for guest debug. The + * support is probed during kvm_init() + */ +static int kvm_gdbstub_sstep_flags(void) +{ + return kvm_sstep_flags; } static void kvm_accel_class_init(ObjectClass *oc, void *data) @@ -3623,6 +3640,7 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data) ac->init_machine = kvm_init; ac->has_memory = kvm_accel_has_memory; ac->allowed = &kvm_allowed; + ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; object_class_property_add(oc, "kernel-irqchip", "on|off|split", NULL, kvm_set_kernel_irqchip, @@ -3641,6 +3659,8 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, "dirty-ring-size", "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); + + kvm_arch_accel_class_init(oc); } static const TypeInfo kvm_accel_type = { @@ -3657,3 +3677,406 @@ static void kvm_type_init(void) } type_init(kvm_type_init); + +typedef struct StatsArgs { + union StatsResultsType { + StatsResultList **stats; + StatsSchemaList **schema; + } result; + strList *names; + Error **errp; +} StatsArgs; + +static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc, + uint64_t *stats_data, + StatsList *stats_list, + Error **errp) +{ + + Stats *stats; + uint64List *val_list = NULL; + + /* Only add stats that we understand. */ + switch (pdesc->flags & KVM_STATS_TYPE_MASK) { + case KVM_STATS_TYPE_CUMULATIVE: + case KVM_STATS_TYPE_INSTANT: + case KVM_STATS_TYPE_PEAK: + case KVM_STATS_TYPE_LINEAR_HIST: + case KVM_STATS_TYPE_LOG_HIST: + break; + default: + return stats_list; + } + + switch (pdesc->flags & KVM_STATS_UNIT_MASK) { + case KVM_STATS_UNIT_NONE: + case KVM_STATS_UNIT_BYTES: + case KVM_STATS_UNIT_CYCLES: + case KVM_STATS_UNIT_SECONDS: + case KVM_STATS_UNIT_BOOLEAN: + break; + default: + return stats_list; + } + + switch (pdesc->flags & KVM_STATS_BASE_MASK) { + case KVM_STATS_BASE_POW10: + case KVM_STATS_BASE_POW2: + break; + default: + return stats_list; + } + + /* Alloc and populate data list */ + stats = g_new0(Stats, 1); + stats->name = g_strdup(pdesc->name); + stats->value = g_new0(StatsValue, 1);; + + if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { + stats->value->u.boolean = *stats_data; + stats->value->type = QTYPE_QBOOL; + } else if (pdesc->size == 1) { + stats->value->u.scalar = *stats_data; + stats->value->type = QTYPE_QNUM; + } else { + int i; + for (i = 0; i < pdesc->size; i++) { + QAPI_LIST_PREPEND(val_list, stats_data[i]); + } + stats->value->u.list = val_list; + stats->value->type = QTYPE_QLIST; + } + + QAPI_LIST_PREPEND(stats_list, stats); + return stats_list; +} + +static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc, + StatsSchemaValueList *list, + Error **errp) +{ + StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1); + schema_entry->value = g_new0(StatsSchemaValue, 1); + + switch (pdesc->flags & KVM_STATS_TYPE_MASK) { + case KVM_STATS_TYPE_CUMULATIVE: + schema_entry->value->type = STATS_TYPE_CUMULATIVE; + break; + case KVM_STATS_TYPE_INSTANT: + schema_entry->value->type = STATS_TYPE_INSTANT; + break; + case KVM_STATS_TYPE_PEAK: + schema_entry->value->type = STATS_TYPE_PEAK; + break; + case KVM_STATS_TYPE_LINEAR_HIST: + schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; + schema_entry->value->bucket_size = pdesc->bucket_size; + schema_entry->value->has_bucket_size = true; + break; + case KVM_STATS_TYPE_LOG_HIST: + schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; + break; + default: + goto exit; + } + + switch (pdesc->flags & KVM_STATS_UNIT_MASK) { + case KVM_STATS_UNIT_NONE: + break; + case KVM_STATS_UNIT_BOOLEAN: + schema_entry->value->has_unit = true; + schema_entry->value->unit = STATS_UNIT_BOOLEAN; + break; + case KVM_STATS_UNIT_BYTES: + schema_entry->value->has_unit = true; + schema_entry->value->unit = STATS_UNIT_BYTES; + break; + case KVM_STATS_UNIT_CYCLES: + schema_entry->value->has_unit = true; + schema_entry->value->unit = STATS_UNIT_CYCLES; + break; + case KVM_STATS_UNIT_SECONDS: + schema_entry->value->has_unit = true; + schema_entry->value->unit = STATS_UNIT_SECONDS; + break; + default: + goto exit; + } + + schema_entry->value->exponent = pdesc->exponent; + if (pdesc->exponent) { + switch (pdesc->flags & KVM_STATS_BASE_MASK) { + case KVM_STATS_BASE_POW10: + schema_entry->value->has_base = true; + schema_entry->value->base = 10; + break; + case KVM_STATS_BASE_POW2: + schema_entry->value->has_base = true; + schema_entry->value->base = 2; + break; + default: + goto exit; + } + } + + schema_entry->value->name = g_strdup(pdesc->name); + schema_entry->next = list; + return schema_entry; +exit: + g_free(schema_entry->value); + g_free(schema_entry); + return list; +} + +/* Cached stats descriptors */ +typedef struct StatsDescriptors { + const char *ident; /* cache key, currently the StatsTarget */ + struct kvm_stats_desc *kvm_stats_desc; + struct kvm_stats_header kvm_stats_header; + QTAILQ_ENTRY(StatsDescriptors) next; +} StatsDescriptors; + +static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors = + QTAILQ_HEAD_INITIALIZER(stats_descriptors); + +/* + * Return the descriptors for 'target', that either have already been read + * or are retrieved from 'stats_fd'. + */ +static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd, + Error **errp) +{ + StatsDescriptors *descriptors; + const char *ident; + struct kvm_stats_desc *kvm_stats_desc; + struct kvm_stats_header *kvm_stats_header; + size_t size_desc; + ssize_t ret; + + ident = StatsTarget_str(target); + QTAILQ_FOREACH(descriptors, &stats_descriptors, next) { + if (g_str_equal(descriptors->ident, ident)) { + return descriptors; + } + } + + descriptors = g_new0(StatsDescriptors, 1); + + /* Read stats header */ + kvm_stats_header = &descriptors->kvm_stats_header; + ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header)); + if (ret != sizeof(*kvm_stats_header)) { + error_setg(errp, "KVM stats: failed to read stats header: " + "expected %zu actual %zu", + sizeof(*kvm_stats_header), ret); + g_free(descriptors); + return NULL; + } + size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; + + /* Read stats descriptors */ + kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); + ret = pread(stats_fd, kvm_stats_desc, + size_desc * kvm_stats_header->num_desc, + kvm_stats_header->desc_offset); + + if (ret != size_desc * kvm_stats_header->num_desc) { + error_setg(errp, "KVM stats: failed to read stats descriptors: " + "expected %zu actual %zu", + size_desc * kvm_stats_header->num_desc, ret); + g_free(descriptors); + g_free(kvm_stats_desc); + return NULL; + } + descriptors->kvm_stats_desc = kvm_stats_desc; + descriptors->ident = ident; + QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next); + return descriptors; +} + +static void query_stats(StatsResultList **result, StatsTarget target, + strList *names, int stats_fd, Error **errp) +{ + struct kvm_stats_desc *kvm_stats_desc; + struct kvm_stats_header *kvm_stats_header; + StatsDescriptors *descriptors; + g_autofree uint64_t *stats_data = NULL; + struct kvm_stats_desc *pdesc; + StatsList *stats_list = NULL; + size_t size_desc, size_data = 0; + ssize_t ret; + int i; + + descriptors = find_stats_descriptors(target, stats_fd, errp); + if (!descriptors) { + return; + } + + kvm_stats_header = &descriptors->kvm_stats_header; + kvm_stats_desc = descriptors->kvm_stats_desc; + size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; + + /* Tally the total data size; read schema data */ + for (i = 0; i < kvm_stats_header->num_desc; ++i) { + pdesc = (void *)kvm_stats_desc + i * size_desc; + size_data += pdesc->size * sizeof(*stats_data); + } + + stats_data = g_malloc0(size_data); + ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); + + if (ret != size_data) { + error_setg(errp, "KVM stats: failed to read data: " + "expected %zu actual %zu", size_data, ret); + return; + } + + for (i = 0; i < kvm_stats_header->num_desc; ++i) { + uint64_t *stats; + pdesc = (void *)kvm_stats_desc + i * size_desc; + + /* Add entry to the list */ + stats = (void *)stats_data + pdesc->offset; + if (!apply_str_list_filter(pdesc->name, names)) { + continue; + } + stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp); + } + + if (!stats_list) { + return; + } + + switch (target) { + case STATS_TARGET_VM: + add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list); + break; + case STATS_TARGET_VCPU: + add_stats_entry(result, STATS_PROVIDER_KVM, + current_cpu->parent_obj.canonical_path, + stats_list); + break; + default: + g_assert_not_reached(); + } +} + +static void query_stats_schema(StatsSchemaList **result, StatsTarget target, + int stats_fd, Error **errp) +{ + struct kvm_stats_desc *kvm_stats_desc; + struct kvm_stats_header *kvm_stats_header; + StatsDescriptors *descriptors; + struct kvm_stats_desc *pdesc; + StatsSchemaValueList *stats_list = NULL; + size_t size_desc; + int i; + + descriptors = find_stats_descriptors(target, stats_fd, errp); + if (!descriptors) { + return; + } + + kvm_stats_header = &descriptors->kvm_stats_header; + kvm_stats_desc = descriptors->kvm_stats_desc; + size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; + + /* Tally the total data size; read schema data */ + for (i = 0; i < kvm_stats_header->num_desc; ++i) { + pdesc = (void *)kvm_stats_desc + i * size_desc; + stats_list = add_kvmschema_entry(pdesc, stats_list, errp); + } + + add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list); +} + +static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data) +{ + StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr; + int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); + Error *local_err = NULL; + + if (stats_fd == -1) { + error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); + error_propagate(kvm_stats_args->errp, local_err); + return; + } + query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, + kvm_stats_args->names, stats_fd, kvm_stats_args->errp); + close(stats_fd); +} + +static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data) +{ + StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr; + int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); + Error *local_err = NULL; + + if (stats_fd == -1) { + error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); + error_propagate(kvm_stats_args->errp, local_err); + return; + } + query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, + kvm_stats_args->errp); + close(stats_fd); +} + +static void query_stats_cb(StatsResultList **result, StatsTarget target, + strList *names, strList *targets, Error **errp) +{ + KVMState *s = kvm_state; + CPUState *cpu; + int stats_fd; + + switch (target) { + case STATS_TARGET_VM: + { + stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); + if (stats_fd == -1) { + error_setg_errno(errp, errno, "KVM stats: ioctl failed"); + return; + } + query_stats(result, target, names, stats_fd, errp); + close(stats_fd); + break; + } + case STATS_TARGET_VCPU: + { + StatsArgs stats_args; + stats_args.result.stats = result; + stats_args.names = names; + stats_args.errp = errp; + CPU_FOREACH(cpu) { + if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { + continue; + } + run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args)); + } + break; + } + default: + break; + } +} + +void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) +{ + StatsArgs stats_args; + KVMState *s = kvm_state; + int stats_fd; + + stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); + if (stats_fd == -1) { + error_setg_errno(errp, errno, "KVM stats: ioctl failed"); + return; + } + query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp); + close(stats_fd); + + if (first_cpu) { + stats_args.result.schema = result; + stats_args.errp = errp; + run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args)); + } +} diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h index bf0bd1bee4..fd63fe6a59 100644 --- a/accel/kvm/kvm-cpus.h +++ b/accel/kvm/kvm-cpus.h @@ -18,5 +18,9 @@ void kvm_destroy_vcpu(CPUState *cpu); void kvm_cpu_synchronize_post_reset(CPUState *cpu); void kvm_cpu_synchronize_post_init(CPUState *cpu); void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu); +bool kvm_supports_guest_debug(void); +int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); +int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); +void kvm_remove_all_breakpoints(CPUState *cpu); #endif /* KVM_CPUS_H */ diff --git a/accel/kvm/meson.build b/accel/kvm/meson.build index 8d219bea50..397a1fe1fd 100644 --- a/accel/kvm/meson.build +++ b/accel/kvm/meson.build @@ -3,6 +3,5 @@ kvm_ss.add(files( 'kvm-all.c', 'kvm-accel-ops.c', )) -kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss) diff --git a/accel/meson.build b/accel/meson.build index dfd808d2c8..259c35c4c8 100644 --- a/accel/meson.build +++ b/accel/meson.build @@ -2,17 +2,19 @@ specific_ss.add(files('accel-common.c')) softmmu_ss.add(files('accel-softmmu.c')) user_ss.add(files('accel-user.c')) -subdir('hvf') -subdir('qtest') -subdir('kvm') subdir('tcg') -subdir('xen') -subdir('stubs') +if have_system + subdir('hvf') + subdir('qtest') + subdir('kvm') + subdir('xen') + subdir('stubs') +endif dummy_ss = ss.source_set() dummy_ss.add(files( 'dummy-cpus.c', )) -specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: dummy_ss) +specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: dummy_ss) specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss) diff --git a/accel/qtest/meson.build b/accel/qtest/meson.build index 4c65600293..176d990ae1 100644 --- a/accel/qtest/meson.build +++ b/accel/qtest/meson.build @@ -1,2 +1 @@ -qtest_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], - if_true: files('qtest.c')) +qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c')) diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c index 7e6b8110d5..f6056ac836 100644 --- a/accel/qtest/qtest.c +++ b/accel/qtest/qtest.c @@ -20,7 +20,6 @@ #include "qemu/accel.h" #include "sysemu/qtest.h" #include "sysemu/cpus.h" -#include "sysemu/cpu-timers.h" #include "qemu/guest-random.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" diff --git a/accel/stubs/hax-stub.c b/accel/stubs/hax-stub.c index 49077f88e3..2fe31aaa9a 100644 --- a/accel/stubs/hax-stub.c +++ b/accel/stubs/hax-stub.c @@ -16,6 +16,8 @@ #include "qemu/osdep.h" #include "sysemu/hax.h" +bool hax_allowed; + int hax_sync_vcpus(void) { return 0; diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 5b1d00a222..5d2dd8f351 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -12,10 +12,7 @@ #include "qemu/osdep.h" #include "sysemu/kvm.h" - -#ifndef CONFIG_USER_ONLY #include "hw/pci/msi.h" -#endif KVMState *kvm_state; bool kvm_kernel_irqchip; @@ -49,27 +46,6 @@ int kvm_has_many_ioeventfds(void) return 0; } -int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) -{ - return -ENOSYS; -} - -int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) -{ - return -EINVAL; -} - -int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type) -{ - return -EINVAL; -} - -void kvm_remove_all_breakpoints(CPUState *cpu) -{ -} - int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) { return 1; @@ -80,8 +56,7 @@ int kvm_on_sigbus(int code, void *addr) return 1; } -#ifndef CONFIG_USER_ONLY -int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) { return -ENOSYS; } @@ -147,4 +122,13 @@ bool kvm_arm_supports_user_irq(void) { return false; } -#endif + +bool kvm_dirty_ring_enabled(void) +{ + return false; +} + +uint32_t kvm_dirty_ring_size(void) +{ + return 0; +} diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build index 12dd1539af..0249b9258f 100644 --- a/accel/stubs/meson.build +++ b/accel/stubs/meson.build @@ -1,4 +1,7 @@ -specific_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c')) -specific_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) -specific_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) -specific_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) +sysemu_stubs_ss = ss.source_set() +sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c')) +sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) +sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) +sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) + +specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss) diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c index d8162673ae..c1b05767c0 100644 --- a/accel/stubs/tcg-stub.c +++ b/accel/stubs/tcg-stub.c @@ -21,6 +21,17 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) { } +void tcg_flush_jmp_cache(CPUState *cpu) +{ +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr) +{ + g_assert_not_reached(); +} + void *probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { @@ -28,12 +39,12 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, g_assert_not_reached(); } -void QEMU_NORETURN cpu_loop_exit(CPUState *cpu) +G_NORETURN void cpu_loop_exit(CPUState *cpu) { g_assert_not_reached(); } -void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) { g_assert_not_reached(); } diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 6c0339f610..6602d7689f 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,56 +13,23 @@ * See the COPYING file in the top-level directory. */ -static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) -{ - CPUState *cpu = env_cpu(env); - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); - - trace_guest_mem_before_exec(cpu, addr, info); - trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST); - - return info; -} - static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); } #if HAVE_ATOMIC128 -static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) -{ - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); - - trace_guest_mem_before_exec(env_cpu(env), addr, info); - - return info; -} - static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); -} - -static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) -{ - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true); - - trace_guest_mem_before_exec(env_cpu(env), addr, info); - - return info; + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); } static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } #endif diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 8098a1be31..404a530f7c 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -19,7 +19,6 @@ */ #include "qemu/plugin.h" -#include "trace/mem.h" #if DATA_SIZE == 16 # define SUFFIX o @@ -64,7 +63,7 @@ the ATOMIC_NAME macro, and redefined below. */ #if DATA_SIZE == 1 # define END -#elif defined(HOST_WORDS_BIGENDIAN) +#elif HOST_BIG_ENDIAN # define END _be #else # define END _le @@ -72,12 +71,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, cmpv, newv); @@ -85,64 +83,60 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return ret; } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr); DATA_TYPE val; - uint16_t info = atomic_trace_ld_pre(env, addr, oi); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, info); + atomic_trace_ld_post(env, addr, oi); return val; } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr); - uint16_t info = atomic_trace_st_pre(env, addr, oi); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, info); + atomic_trace_st_post(env, addr, oi); } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); ret = qatomic_xchg__nocheck(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return ret; } #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return ret; \ } @@ -167,12 +161,11 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE cmp, old, new, val = xval; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ cmp = qatomic_read__nocheck(haddr); \ do { \ @@ -180,7 +173,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ } while (cmp != old); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return RET; \ } @@ -203,7 +196,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) /* Define reverse-host-endian atomic operations. Note that END is used within the ATOMIC_NAME macro. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define END _le #else # define END _be @@ -211,12 +204,11 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); @@ -224,65 +216,61 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return BSWAP(ret); } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr); DATA_TYPE val; - uint16_t info = atomic_trace_ld_pre(env, addr, oi); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, info); + atomic_trace_ld_post(env, addr, oi); return BSWAP(val); } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr); - uint16_t info = atomic_trace_st_pre(env, addr, oi); val = BSWAP(val); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, info); + atomic_trace_st_post(env, addr, oi); } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); ABI_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return BSWAP(ret); } #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return BSWAP(ret); \ } @@ -304,12 +292,11 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE ldo, ldn, old, new, val = xval; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ ldn = qatomic_read__nocheck(haddr); \ do { \ @@ -317,7 +304,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ } while (ldo != ldn); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return RET; \ } diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c index be6fe45aa5..c7bc8c6efa 100644 --- a/accel/tcg/cpu-exec-common.c +++ b/accel/tcg/cpu-exec-common.c @@ -71,7 +71,7 @@ void cpu_loop_exit(CPUState *cpu) void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) { if (pc) { - cpu_restore_state(cpu, pc, true); + cpu_restore_state(cpu, pc); } cpu_loop_exit(cpu); } diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 96ffbfeec1..84a0bcaa90 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -18,8 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/qemu-print.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "hw/core/tcg-cpu-ops.h" #include "trace.h" #include "disas/disas.h" @@ -38,7 +40,9 @@ #include "exec/cpu-all.h" #include "sysemu/cpu-timers.h" #include "sysemu/replay.h" +#include "sysemu/tcg.h" #include "exec/helper-proto.h" +#include "tb-jmp-cache.h" #include "tb-hash.h" #include "tb-context.h" #include "internal.h" @@ -167,22 +171,123 @@ uint32_t curr_cflags(CPUState *cpu) return cflags; } +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t page_addr0; + uint32_t flags; + uint32_t cflags; + uint32_t trace_vcpu_dstate; +}; + +static bool tb_lookup_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) && + tb_page_addr0(tb) == desc->page_addr0 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && + (tb_cflags(tb) & ~CF_INVALID) == desc->cflags) { + /* check next page if needed */ + tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); + if (tb_phys_page1 == -1) { + return true; + } else { + tb_page_addr_t phys_page1; + target_ulong virt_page1; + + /* + * We know that the first page matched, and an otherwise valid TB + * encountered an incomplete instruction at the end of that page, + * therefore we know that generating a new TB from the current PC + * must also require reading from the next page -- even if the + * second pages do not match, and therefore the resulting insn + * is different for the new TB. Therefore any exception raised + * here by the faulting lookup is not premature. + */ + virt_page1 = TARGET_PAGE_ALIGN(desc->pc); + phys_page1 = get_page_addr_code(desc->env, virt_page1); + if (tb_phys_page1 == phys_page1) { + return true; + } + } + } + return false; +} + +static TranslationBlock * +tb_htable_lookup_common(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags, const struct qht *ht, + qht_lookup_func_t func) +{ + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.cflags = cflags; + desc.trace_vcpu_dstate = *cpu->trace_dstate; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + if (phys_pc == -1) { + return NULL; + } + desc.page_addr0 = phys_pc; + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc), + flags, cflags, *cpu->trace_dstate); + return qht_lookup_custom(ht, &desc, h, func); +} + +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags) +{ + return tb_htable_lookup_common(cpu, pc, cs_base, flags, cflags, + &tb_ctx.htable, tb_lookup_cmp); +} + +static bool inv_tb_lookup_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + return tb_lookup_cmp(p, d) && + tb->ihash == tb_code_hash_func(desc->env, desc->pc, tb->size); +} + +TranslationBlock *inv_tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags) +{ + return tb_htable_lookup_common(cpu, pc, cs_base, flags, cflags, + &tb_ctx.inv_htable, inv_tb_lookup_cmp); +} + /* Might cause an exception, so have a longjmp destination ready */ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, uint32_t cflags) { TranslationBlock *tb; + CPUJumpCache *jc; uint32_t hash; /* we should never be trying to look up an INVALID tb */ tcg_debug_assert(!(cflags & CF_INVALID)); hash = tb_jmp_cache_hash_func(pc); - tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); + jc = cpu->tb_jmp_cache; + tb = tb_jmp_cache_get_tb(jc, hash); if (likely(tb && - tb->pc == pc && + tb_jmp_cache_get_pc(jc, hash, tb) == pc && tb->cs_base == cs_base && tb->flags == flags && tb->trace_vcpu_dstate == *cpu->trace_dstate && @@ -193,16 +298,14 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, if (tb == NULL) { return NULL; } - qatomic_set(&cpu->tb_jmp_cache[hash], tb); + tb_jmp_cache_set(jc, hash, tb, pc); return tb; } -static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, - const TranslationBlock *tb) +static void log_cpu_exec(target_ulong pc, CPUState *cpu, + const TranslationBlock *tb) { - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) - && qemu_log_in_addr_range(pc)) { - + if (qemu_log_in_addr_range(pc)) { qemu_log_mask(CPU_LOG_EXEC, "Trace %d: %p [" TARGET_FMT_lx "/" TARGET_FMT_lx "/%08x/%08x] %s\n", @@ -211,32 +314,30 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, #if defined(DEBUG_DISAS) if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { - FILE *logfile = qemu_log_lock(); - int flags = 0; + FILE *logfile = qemu_log_trylock(); + if (logfile) { + int flags = 0; - if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { - flags |= CPU_DUMP_FPU; - } + if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { + flags |= CPU_DUMP_FPU; + } #if defined(TARGET_I386) - flags |= CPU_DUMP_CCOP; + flags |= CPU_DUMP_CCOP; #endif - log_cpu_state(cpu, flags); - qemu_log_unlock(logfile); + cpu_dump_state(cpu, logfile, flags); + qemu_log_unlock(logfile); + } } #endif /* DEBUG_DISAS */ } } -static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, - uint32_t *cflags) +static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, + uint32_t *cflags) { CPUBreakpoint *bp; bool match_page = false; - if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { - return false; - } - /* * Singlestep overrides breakpoints. * This requirement is visible in the record-replay tests, where @@ -297,6 +398,13 @@ static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, return false; } +static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && + check_for_breakpoints_slow(cpu, pc, cflags); +} + /** * helper_lookup_tb_ptr: quick check for next tb * @env: current cpu state @@ -324,7 +432,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) return tcg_code_gen_epilogue; } - log_cpu_exec(pc, cpu, tb); + if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { + log_cpu_exec(pc, cpu, tb); + } return tb->tc.ptr; } @@ -347,7 +457,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) TranslationBlock *last_tb; const void *tb_ptr = itb->tc.ptr; - log_cpu_exec(itb->pc, cpu, itb); + if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { + log_cpu_exec(log_pc(cpu, itb), cpu, itb); + } qemu_thread_jit_execute(); ret = tcg_qemu_tb_exec(env, tb_ptr); @@ -371,18 +483,34 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) * of the start of the TB. */ CPUClass *cc = CPU_GET_CLASS(cpu); - qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, - "Stopped execution of TB chain before %p [" - TARGET_FMT_lx "] %s\n", - last_tb->tc.ptr, last_tb->pc, - lookup_symbol(last_tb->pc)); + if (cc->tcg_ops->synchronize_from_tb) { cc->tcg_ops->synchronize_from_tb(cpu, last_tb); } else { + assert(!TARGET_TB_PCREL); assert(cc->set_pc); - cc->set_pc(cpu, last_tb->pc); + cc->set_pc(cpu, tb_pc(last_tb)); + } + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + target_ulong pc = log_pc(cpu, last_tb); + if (qemu_log_in_addr_range(pc)) { + qemu_log("Stopped execution of TB chain before %p [" + TARGET_FMT_lx "] %s\n", + last_tb->tc.ptr, pc, lookup_symbol(pc)); + } } } + + /* + * If gdb single-step, and we haven't raised another exception, + * raise a debug exception. Single-step with another exception + * is handled in cpu_handle_exception. + */ + if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { + cpu->exception_index = EXCP_DEBUG; + cpu_loop_exit(cpu); + } + return last_tb; } @@ -407,7 +535,7 @@ static void cpu_exec_exit(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu) { - CPUArchState *env = (CPUArchState *)cpu->env_ptr; + CPUArchState *env = cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags, cflags; @@ -446,12 +574,11 @@ void cpu_exec_step_atomic(CPUState *cpu) cpu_tb_exec(cpu, tb, &tb_exit); cpu_exec_exit(cpu); } else { - /* - * The mmap_lock is dropped by tb_gen_code if it runs out of - * memory. - */ #ifndef CONFIG_SOFTMMU - tcg_debug_assert(!have_mmap_lock()); + clear_helper_retaddr(); + if (have_mmap_lock()) { + mmap_unlock(); + } #endif if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); @@ -460,7 +587,6 @@ void cpu_exec_step_atomic(CPUState *cpu) qemu_plugin_disable_mem_helpers(cpu); } - /* * As we start the exclusive region before codegen we must still * be in the region if we longjump out of either the codegen or @@ -471,93 +597,6 @@ void cpu_exec_step_atomic(CPUState *cpu) end_exclusive(); } -struct tb_desc { - target_ulong pc; - target_ulong cs_base; - CPUArchState *env; - tb_page_addr_t phys_page1; - uint32_t flags; - uint32_t cflags; - uint32_t trace_vcpu_dstate; -}; - -static bool tb_lookup_cmp(const void *p, const void *d) -{ - const TranslationBlock *tb = p; - const struct tb_desc *desc = d; - - if (tb->pc == desc->pc && - tb->page_addr[0] == desc->phys_page1 && - tb->cs_base == desc->cs_base && - tb->flags == desc->flags && - tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && - (tb_cflags(tb) & ~CF_INVALID) == desc->cflags) { - /* check next page if needed */ - if (tb->page_addr[1] == -1) { - return true; - } else { - tb_page_addr_t phys_page2; - target_ulong virt_page2; - - virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - phys_page2 = get_page_addr_code(desc->env, virt_page2); - if (tb->page_addr[1] == phys_page2) { - return true; - } - } - } - return false; -} - -static bool inv_tb_lookup_cmp(const void *p, const void *d) -{ - const TranslationBlock *tb = p; - const struct tb_desc *desc = d; - - return tb_lookup_cmp(p, d) && - tb->ihash == tb_code_hash_func(desc->env, tb->pc, tb->size); -} - -static TranslationBlock * -tb_htable_lookup_common(CPUState *cpu, target_ulong pc, target_ulong cs_base, - uint32_t flags, uint32_t cflags, const struct qht *ht, - qht_lookup_func_t func) -{ - tb_page_addr_t phys_pc; - struct tb_desc desc; - uint32_t h; - - desc.env = (CPUArchState *)cpu->env_ptr; - desc.cs_base = cs_base; - desc.flags = flags; - desc.cflags = cflags; - desc.trace_vcpu_dstate = *cpu->trace_dstate; - desc.pc = pc; - phys_pc = get_page_addr_code(desc.env, pc); - if (phys_pc == -1) { - return NULL; - } - desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; - h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); - return qht_lookup_custom(ht, &desc, h, func); -} - -TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, - target_ulong cs_base, uint32_t flags, - uint32_t cflags) -{ - return tb_htable_lookup_common(cpu, pc, cs_base, flags, cflags, - &tb_ctx.htable, tb_lookup_cmp); -} - -TranslationBlock *inv_tb_htable_lookup(CPUState *cpu, target_ulong pc, - target_ulong cs_base, uint32_t flags, - uint32_t cflags) -{ - return tb_htable_lookup_common(cpu, pc, cs_base, flags, cflags, - &tb_ctx.inv_htable, inv_tb_lookup_cmp); -} - void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) { if (TCG_TARGET_HAS_direct_jump) { @@ -600,11 +639,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, qemu_spin_unlock(&tb_next->jmp_lock); - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, - "Linking TBs %p [" TARGET_FMT_lx - "] index %d -> %p [" TARGET_FMT_lx "]\n", - tb->tc.ptr, tb->pc, n, - tb_next->tc.ptr, tb_next->pc); + qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", + tb->tc.ptr, n, tb_next->tc.ptr); return; out_unlock_next: @@ -614,8 +650,9 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, static inline bool cpu_handle_halt(CPUState *cpu) { +#ifndef CONFIG_USER_ONLY if (cpu->halted) { -#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) +#if defined(TARGET_I386) if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { X86CPU *x86_cpu = X86_CPU(cpu); qemu_mutex_lock_iothread(); @@ -623,13 +660,14 @@ static inline bool cpu_handle_halt(CPUState *cpu) cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); qemu_mutex_unlock_iothread(); } -#endif +#endif /* TARGET_I386 */ if (!cpu_has_work(cpu)) { return true; } cpu->halted = 0; } +#endif /* !CONFIG_USER_ONLY */ return false; } @@ -657,7 +695,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) if (replay_has_exception() && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { /* Execute just one insn to trigger exception pending in the log */ - cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) + | CF_NOIRQ | 1; } #endif return false; @@ -677,8 +716,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) loop */ #if defined(TARGET_I386) CPUClass *cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->do_interrupt(cpu); -#endif + cc->tcg_ops->fake_user_interrupt(cpu); +#endif /* TARGET_I386 */ *ret = cpu->exception_index; cpu->exception_index = -1; return true; @@ -711,6 +750,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) return false; } +#ifndef CONFIG_USER_ONLY /* * CPU_INTERRUPT_POLL is a virtual event which gets converted into a * "real" interrupt event later. It does not need to be recorded for @@ -724,11 +764,19 @@ static inline bool need_replay_interrupt(int interrupt_request) return true; #endif } +#endif /* !CONFIG_USER_ONLY */ static inline bool cpu_handle_interrupt(CPUState *cpu, TranslationBlock **last_tb) { - CPUClass *cc = CPU_GET_CLASS(cpu); + /* + * If we have requested custom cflags with CF_NOIRQ we should + * skip checking here. Any pending interrupts will get picked up + * by the next TB we execute under normal cflags. + */ + if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { + return false; + } /* Clear the interrupt flag now since we're processing * cpu->interrupt_request and cpu->exit_request. @@ -751,6 +799,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, qemu_mutex_unlock_iothread(); return true; } +#if !defined(CONFIG_USER_ONLY) if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { /* Do nothing */ } else if (interrupt_request & CPU_INTERRUPT_HALT) { @@ -779,12 +828,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, qemu_mutex_unlock_iothread(); return true; } -#endif +#endif /* !TARGET_I386 */ /* The target hook has 3 exit conditions: False when the interrupt isn't processed, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ else { + CPUClass *cc = CPU_GET_CLASS(cpu); + if (cc->tcg_ops->cpu_exec_interrupt && cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { if (need_replay_interrupt(interrupt_request)) { @@ -795,14 +846,19 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, * raised when single-stepping so that GDB doesn't miss the * next instruction. */ - cpu->exception_index = - (cpu->singlestep_enabled ? EXCP_DEBUG : -1); + if (unlikely(cpu->singlestep_enabled)) { + cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); + return true; + } + cpu->exception_index = -1; *last_tb = NULL; } /* The target hook may have updated the 'cpu->interrupt_request'; * reload the 'interrupt_request' value */ interrupt_request = cpu->interrupt_request; } +#endif /* !CONFIG_USER_ONLY */ if (interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as @@ -830,11 +886,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, } static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, + target_ulong pc, TranslationBlock **last_tb, int *tb_exit) { int32_t insns_left; - trace_exec_tb(tb, tb->pc); + trace_exec_tb(tb, pc); tb = cpu_tb_exec(cpu, tb, tb_exit); if (*tb_exit != TB_EXIT_REQUESTED) { *last_tb = tb; @@ -925,7 +982,10 @@ int cpu_exec(CPUState *cpu) #endif #ifndef CONFIG_SOFTMMU - tcg_debug_assert(!have_mmap_lock()); + clear_helper_retaddr(); + if (have_mmap_lock()) { + mmap_unlock(); + } #endif if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); @@ -967,6 +1027,8 @@ int cpu_exec(CPUState *cpu) tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { + uint32_t h; + mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); mmap_unlock(); @@ -974,7 +1036,8 @@ int cpu_exec(CPUState *cpu) * We add the TB in the virtual pc hash table * for the fast lookup */ - qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + h = tb_jmp_cache_hash_func(pc); + tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc); } #ifndef CONFIG_USER_ONLY @@ -984,7 +1047,7 @@ int cpu_exec(CPUState *cpu) * direct jump to a TB spanning two pages because the mapping * for the second page can change. */ - if (tb->page_addr[1] != -1) { + if (tb_page_addr1(tb) != -1) { last_tb = NULL; } #endif @@ -993,7 +1056,7 @@ int cpu_exec(CPUState *cpu) tb_add_jump(last_tb, tb_exit, tb); } - cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); /* Try to align the host and virtual clocks if the guest is in advance */ @@ -1016,44 +1079,106 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp) cc->tcg_ops->initialize(); tcg_target_initialized = true; } - tlb_init(cpu); - qemu_plugin_vcpu_init_hook(cpu); + cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); + tlb_init(cpu); #ifndef CONFIG_USER_ONLY tcg_iommu_init_notifier_list(cpu); #endif /* !CONFIG_USER_ONLY */ + /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ } /* undo the initializations in reverse order */ void tcg_exec_unrealizefn(CPUState *cpu) { + qemu_plugin_vcpu_exit_hook(cpu); #ifndef CONFIG_USER_ONLY tcg_iommu_free_notifier_list(cpu); #endif /* !CONFIG_USER_ONLY */ - qemu_plugin_vcpu_exit_hook(cpu); tlb_destroy(cpu); + g_free(cpu->tb_jmp_cache); } #ifndef CONFIG_USER_ONLY -void dump_drift_info(void) +static void dump_drift_info(GString *buf) { if (!icount_enabled()) { return; } - qemu_printf("Host - Guest clock %"PRIi64" ms\n", - (cpu_get_clock() - icount_get()) / SCALE_MS); + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); if (icount_align_option) { - qemu_printf("Max guest delay %"PRIi64" ms\n", - -max_delay / SCALE_MS); - qemu_printf("Max guest advance %"PRIi64" ms\n", - max_advance / SCALE_MS); + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); } else { - qemu_printf("Max guest delay NA\n"); - qemu_printf("Max guest advance NA\n"); + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); } } +HumanReadableText *qmp_x_query_jit(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "JIT information is only available with accel=tcg"); + return NULL; + } + + dump_exec_info(buf); + dump_drift_info(buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_opcount(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "Opcode count information is only available with accel=tcg"); + return NULL; + } + + tcg_dump_op_count(buf); + + return human_readable_text_from_str(buf); +} + +#ifdef CONFIG_PROFILER + +int64_t dev_time; + +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + static int64_t last_cpu_exec_time; + int64_t cpu_exec_time; + int64_t delta; + + cpu_exec_time = tcg_cpu_exec_time(); + delta = cpu_exec_time - last_cpu_exec_time; + + g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", + dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); + g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", + delta, delta / (double)NANOSECONDS_PER_SECOND); + last_cpu_exec_time = cpu_exec_time; + dev_time = 0; + + return human_readable_text_from_str(buf); +} +#else +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + error_setg(errp, "Internal profiler not compiled"); + return NULL; +} +#endif + #endif /* !CONFIG_USER_ONLY */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f7e0951124..9ea4904fac 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -34,12 +34,12 @@ #include "qemu/atomic128.h" #include "exec/translate-all.h" #include "trace/trace-root.h" -#include "trace/mem.h" #include "tb-hash.h" #include "internal.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif +#include "tcg/tcg-ldst.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -100,21 +100,14 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) { - unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); + int i, i0 = tb_jmp_cache_hash_page(page_addr); + CPUJumpCache *jc = cpu->tb_jmp_cache; for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { - qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); + qatomic_set(&jc->array[i0 + i].tb, NULL); } } -static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) -{ - /* Discard jump cache entries for any tb which might potentially - overlap the flushed page. */ - tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); - tb_jmp_cache_clear_page(cpu, addr); -} - /** * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * @desc: The CPUTLBDesc portion of the TLB @@ -200,13 +193,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, } g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); tlb_window_reset(desc, now, 0); /* desc->n_used_entries is cleared by the caller */ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_try_new(CPUTLBEntry, new_size); - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); /* * If the allocations fail, try smaller sizes. We just freed some @@ -215,7 +208,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, * allocations to fail though, so we progressively reduce the allocation * size, aborting if we cannot even allocate the smallest TLB we support. */ - while (fast->table == NULL || desc->iotlb == NULL) { + while (fast->table == NULL || desc->fulltlb == NULL) { if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { error_report("%s: %s", __func__, strerror(errno)); abort(); @@ -224,9 +217,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); fast->table = g_try_new(CPUTLBEntry, new_size); - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); } } @@ -258,7 +251,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) desc->n_used_entries = 0; fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_new(CPUTLBEntry, n_entries); - desc->iotlb = g_new(CPUIOTLBEntry, n_entries); + desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); tlb_mmu_flush_locked(desc, fast); } @@ -299,7 +292,7 @@ void tlb_destroy(CPUState *cpu) CPUTLBDescFast *fast = &env_tlb(env)->f[i]; g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); } } @@ -364,7 +357,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) qemu_spin_unlock(&env_tlb(env)->c.lock); - cpu_tb_jmp_cache_clear(cpu); + tcg_flush_jmp_cache(cpu); if (to_clean == ALL_MMUIDX_BITS) { qatomic_set(&env_tlb(env)->c.full_flush_count, @@ -541,7 +534,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, } qemu_spin_unlock(&env_tlb(env)->c.lock); - tb_flush_jmp_cache(cpu, addr); + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed page, which includes the previous. + */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); } /** @@ -783,8 +781,23 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, } qemu_spin_unlock(&env_tlb(env)->c.lock); - for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { - tb_flush_jmp_cache(cpu, d.addr + i); + /* + * If the length is larger than the jump cache size, then it will take + * longer to clear each entry individually than it will to clear it all. + */ + if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { + tcg_flush_jmp_cache(cpu); + return; + } + + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed pages, which includes the previous. + */ + d.addr -= TARGET_PAGE_SIZE; + for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { + tb_jmp_cache_clear_page(cpu, d.addr); + d.addr += TARGET_PAGE_SIZE; } } @@ -942,7 +955,8 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, can be detected */ void tlb_protect_code(ram_addr_t ram_addr) { - cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, + TARGET_PAGE_SIZE, DIRTY_MEMORY_CODE); } @@ -1086,16 +1100,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; } -/* Add a new TLB entry. At most one entry for a given virtual address +/* + * Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. */ -void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, target_ulong size) +void tlb_set_page_full(CPUState *cpu, int mmu_idx, + target_ulong vaddr, CPUTLBEntryFull *full) { CPUArchState *env = cpu->env_ptr; CPUTLB *tlb = env_tlb(env); @@ -1108,35 +1122,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, CPUTLBEntry *te, tn; hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; - int asidx = cpu_asidx_from_attrs(cpu, attrs); - int wp_flags; + int asidx, wp_flags, prot; bool is_ram, is_romd; assert_cpu_is_self(cpu); - if (size <= TARGET_PAGE_SIZE) { + if (full->lg_page_size <= TARGET_PAGE_BITS) { sz = TARGET_PAGE_SIZE; } else { - tlb_add_large_page(env, mmu_idx, vaddr, size); - sz = size; + sz = (hwaddr)1 << full->lg_page_size; + tlb_add_large_page(env, mmu_idx, vaddr, sz); } vaddr_page = vaddr & TARGET_PAGE_MASK; - paddr_page = paddr & TARGET_PAGE_MASK; + paddr_page = full->phys_addr & TARGET_PAGE_MASK; + prot = full->prot; + asidx = cpu_asidx_from_attrs(cpu, full->attrs); section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, - &xlat, &sz, attrs, &prot); + &xlat, &sz, full->attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", - vaddr, paddr, prot, mmu_idx); + vaddr, full->phys_addr, prot, mmu_idx); address = vaddr_page; - if (size < TARGET_PAGE_SIZE) { + if (full->lg_page_size < TARGET_PAGE_BITS) { /* Repeat the MMU check and TLB fill on every access. */ address |= TLB_INVALID_MASK; } - if (attrs.byte_swap) { + if (full->attrs.byte_swap) { address |= TLB_BSWAP; } @@ -1215,7 +1230,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, /* Evict the old entry into the victim tlb. */ copy_tlb_helper_locked(tv, te); - desc->viotlb[vidx] = desc->iotlb[index]; + desc->vfulltlb[vidx] = desc->fulltlb[index]; tlb_n_used_entries_dec(env, mmu_idx); } @@ -1232,8 +1247,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ - desc->iotlb[index].addr = iotlb - vaddr_page; - desc->iotlb[index].attrs = attrs; + desc->fulltlb[index] = *full; + desc->fulltlb[index].xlat_section = iotlb - vaddr_page; + desc->fulltlb[index].phys_addr = paddr_page; + desc->fulltlb[index].prot = prot; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1268,9 +1285,21 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, qemu_spin_unlock(&tlb->c.lock); } -/* Add a new TLB entry, but without specifying the memory - * transaction attributes to be used. - */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUTLBEntryFull full = { + .phys_addr = paddr, + .attrs = attrs, + .prot = prot, + .lg_page_size = ctz64(size) + }; + + assert(is_power_of_2(size)); + tlb_set_page_full(cpu, mmu_idx, vaddr, &full); +} + void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) @@ -1279,18 +1308,6 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, prot, mmu_idx, size); } -static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) -{ - ram_addr_t ram_addr; - - ram_addr = qemu_ram_addr_from_host(ptr); - if (ram_addr == RAM_ADDR_INVALID) { - error_report("Bad ram pointer %p", ptr); - abort(); - } - return ram_addr; -} - /* * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must @@ -1299,15 +1316,14 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) static void tlb_fill(CPUState *cpu, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - CPUClass *cc = CPU_GET_CLASS(cpu); bool ok; /* * This is not a probe, so only valid return is success; failure * should result in exception + longjmp to the cpu loop. */ - ok = cc->tcg_ops->tlb_fill(cpu, addr, size, - access_type, mmu_idx, false, retaddr); + ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, + access_type, mmu_idx, false, retaddr); assert(ok); } @@ -1315,9 +1331,8 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); + cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, + mmu_idx, retaddr); } static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, @@ -1337,7 +1352,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, } } -static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, +static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, int mmu_idx, target_ulong addr, uintptr_t retaddr, MMUAccessType access_type, MemOp op) { @@ -1349,9 +1364,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); mr = section->mr; - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; cpu->mem_io_pc = retaddr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); @@ -1361,14 +1376,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); + r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, - mmu_idx, iotlbentry->attrs, r, retaddr); + mmu_idx, full->attrs, r, retaddr); } if (locked) { qemu_mutex_unlock_iothread(); @@ -1378,22 +1393,21 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, } /* - * Save a potentially trashed IOTLB entry for later lookup by plugin. - * This is read by tlb_plugin_lookup if the iotlb entry doesn't match + * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. + * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match * because of the side effect of io_writex changing memory layout. */ -static void save_iotlb_data(CPUState *cs, hwaddr addr, - MemoryRegionSection *section, hwaddr mr_offset) +static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, + hwaddr mr_offset) { #ifdef CONFIG_PLUGIN SavedIOTLB *saved = &cs->saved_iotlb; - saved->addr = addr; saved->section = section; saved->mr_offset = mr_offset; #endif } -static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, +static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, int mmu_idx, uint64_t val, target_ulong addr, uintptr_t retaddr, MemOp op) { @@ -1404,9 +1418,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); mr = section->mr; - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } @@ -1416,20 +1430,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, * The memory_region_dispatch may trigger a flush/resize * so for plugins we save the iotlb_data just in case. */ - save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); + save_iotlb_data(cpu, section, mr_offset); if (!qemu_mutex_iothread_locked()) { qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); + r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), - MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + MMU_DATA_STORE, mmu_idx, full->attrs, r, retaddr); } if (locked) { @@ -1476,9 +1490,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, copy_tlb_helper_locked(vtlb, &tmptlb); qemu_spin_unlock(&env_tlb(env)->c.lock); - CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; - CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; - tmpio = *io; *io = *vio; *vio = tmpio; + CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; + CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; + CPUTLBEntryFull tmpf; + tmpf = *f1; *f1 = *f2; *f2 = tmpf; return true; } } @@ -1490,65 +1505,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) -/* - * Return a ram_addr_t for the virtual address for execution. - * - * Return -1 if we can't translate and execute from an entire page - * of RAM. This will force us to execute by loading and translating - * one insn at a time, without caching. - * - * NOTE: This function will trigger an exception if the page is - * not executable. - */ -tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, - void **hostp) -{ - uintptr_t mmu_idx = cpu_mmu_index(env, true); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - void *p; - - if (unlikely(!tlb_hit(entry->addr_code, addr))) { - if (!VICTIM_TLB_HIT(addr_code, addr)) { - tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { - /* - * The MMU protection covers a smaller range than a target - * page, so we must redo the MMU check for every insn. - */ - return -1; - } - } - assert(tlb_hit(entry->addr_code, addr)); - } - - if (unlikely(entry->addr_code & TLB_MMIO)) { - /* The region is not backed by RAM. */ - if (hostp) { - *hostp = NULL; - } - return -1; - } - - p = (void *)((uintptr_t)addr + entry->addend); - if (hostp) { - *hostp = p; - } - return qemu_ram_addr_from_host_nofail(p); -} - -tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) -{ - return get_page_addr_code_hostp(env, addr, NULL); -} - static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, - CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) + CPUTLBEntryFull *full, uintptr_t retaddr) { - ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; + ram_addr_t ram_addr = mem_vaddr + full->xlat_section; trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); @@ -1575,7 +1535,8 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, static int probe_access_internal(CPUArchState *env, target_ulong addr, int fault_size, MMUAccessType access_type, int mmu_idx, bool nonfault, - void **phost, uintptr_t retaddr) + void **phost, CPUTLBEntryFull **pfull, + uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); @@ -1598,25 +1559,36 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, } tlb_addr = tlb_read_ofs(entry, elt_ofs); + flags = TLB_FLAGS_MASK; page_addr = addr & TARGET_PAGE_MASK; if (!tlb_hit_page(tlb_addr, page_addr)) { if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { CPUState *cs = env_cpu(env); - CPUClass *cc = CPU_GET_CLASS(cs); - if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, - mmu_idx, nonfault, retaddr)) { + if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, + mmu_idx, nonfault, retaddr)) { /* Non-faulting page table read failed. */ *phost = NULL; + *pfull = NULL; return TLB_INVALID_MASK; } /* TLB resize via tlb_fill may have moved the entry. */ + index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); + + /* + * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, + * to force the next access through tlb_fill. We've just + * called tlb_fill, so we know that this entry *is* valid. + */ + flags &= ~TLB_INVALID_MASK; } tlb_addr = tlb_read_ofs(entry, elt_ofs); } - flags = tlb_addr & TLB_FLAGS_MASK; + flags &= tlb_addr; + + *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { @@ -1629,37 +1601,44 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, return flags; } -int probe_access_flags(CPUArchState *env, target_ulong addr, - MMUAccessType access_type, int mmu_idx, - bool nonfault, void **phost, uintptr_t retaddr) +int probe_access_full(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, CPUTLBEntryFull **pfull, + uintptr_t retaddr) { - int flags; - - flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, - nonfault, phost, retaddr); + int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + nonfault, phost, pfull, retaddr); /* Handle clean RAM pages. */ if (unlikely(flags & TLB_NOTDIRTY)) { - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); flags &= ~TLB_NOTDIRTY; } return flags; } +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr) +{ + CPUTLBEntryFull *full; + + return probe_access_full(env, addr, access_type, mmu_idx, + nonfault, phost, &full, retaddr); +} + void *probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { + CPUTLBEntryFull *full; void *host; int flags; g_assert(-(addr | TARGET_PAGE_MASK) >= size); flags = probe_access_internal(env, addr, size, access_type, mmu_idx, - false, &host, retaddr); + false, &host, &full, retaddr); /* Per the interface, size == 0 merely faults the access. */ if (size == 0) { @@ -1667,24 +1646,21 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, } if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - /* Handle watchpoints. */ if (flags & TLB_WATCHPOINT) { int wp_access = (access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ); #ifdef XBOX mem_check_access_callback_vaddr(env_cpu(env), addr, size, wp_access, - iotlbentry); + full); #endif cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, wp_access, retaddr); + full->attrs, wp_access, retaddr); } /* Handle clean RAM pages. */ if (flags & TLB_NOTDIRTY) { - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, 1, full, retaddr); } } @@ -1694,16 +1670,44 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, MMUAccessType access_type, int mmu_idx) { + CPUTLBEntryFull *full; void *host; int flags; flags = probe_access_internal(env, addr, 0, access_type, - mmu_idx, true, &host, 0); + mmu_idx, true, &host, &full, 0); /* No combination of flags are expected by the caller. */ return flags ? NULL : host; } +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) +{ + CPUTLBEntryFull *full; + void *p; + + (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, + cpu_mmu_index(env, true), false, &p, &full, 0); + if (p == NULL) { + return -1; + } + if (hostp) { + *hostp = p; + } + return qemu_ram_addr_from_host_nofail(p); +} + #ifdef CONFIG_PLUGIN /* * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. @@ -1715,7 +1719,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, * should have just filled the TLB. The one corner case is io_writex * which can cause TLB flushes and potential resizing of the TLBs * losing the information we need. In those cases we need to recover - * data from a copy of the iotlbentry. As long as this always occurs + * data from a copy of the CPUTLBEntryFull. As long as this always occurs * from the same thread (which a mem callback will be) this is safe. */ @@ -1730,11 +1734,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, if (likely(tlb_hit(tlb_addr, addr))) { /* We must have an iotlb entry for MMIO */ if (tlb_addr & TLB_MMIO) { - CPUIOTLBEntry *iotlbentry; - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + CPUTLBEntryFull *full; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; data->is_io = true; - data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); - data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + data->v.io.section = + iotlb_to_section(cpu, full->xlat_section, full->attrs); + data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; } else { data->is_io = false; data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); @@ -1758,10 +1763,10 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, int size, int prot, + MemOpIdx oi, int size, int prot, uintptr_t retaddr) { - size_t mmu_idx = get_mmuidx(oi); + uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); uintptr_t index; @@ -1769,6 +1774,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, target_ulong tlb_addr; void *hostaddr; + tcg_debug_assert(mmu_idx < NB_MMU_MODES); + /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -1819,7 +1826,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } else /* if (prot & PAGE_READ) */ { tlb_addr = tlbe->addr_read; if (!tlb_hit(tlb_addr, addr)) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { + if (!VICTIM_TLB_HIT(addr_read, addr)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); @@ -1840,7 +1847,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, if (unlikely(tlb_addr & TLB_NOTDIRTY)) { notdirty_write(env_cpu(env), addr, size, - &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); + &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); } return hostaddr; @@ -1849,6 +1856,25 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * In the case of the helper_*_mmu functions, we will have done this by + * using the MemOp to look up the helper during code generation. + * + * In the case of the cpu_*_mmu functions, this is up to the caller. + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + /* * Load Helpers * @@ -1859,7 +1885,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, */ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); + MemOpIdx oi, uintptr_t retaddr); static inline uint64_t QEMU_ALWAYS_INLINE load_memop(const void *haddr, MemOp op) @@ -1875,9 +1901,9 @@ load_memop(const void *haddr, MemOp op) return (uint32_t)ldl_be_p(haddr); case MO_LEUL: return (uint32_t)ldl_le_p(haddr); - case MO_BEQ: + case MO_BEUQ: return ldq_be_p(haddr); - case MO_LEQ: + case MO_LEUQ: return ldq_le_p(haddr); default: qemu_build_not_reached(); @@ -1885,22 +1911,24 @@ load_memop(const void *haddr, MemOp op) } static inline uint64_t QEMU_ALWAYS_INLINE -load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, +load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; const size_t tlb_off = code_read ? offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); const MMUAccessType access_type = code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; - unsigned a_bits = get_alignment_bits(get_memop(oi)); + const unsigned a_bits = get_alignment_bits(get_memop(oi)); + const size_t size = memop_size(op); + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index; + CPUTLBEntry *entry; + target_ulong tlb_addr; void *haddr; uint64_t res; - size_t size = memop_size(op); + + tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -1908,6 +1936,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, mmu_idx, retaddr); } + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = code_read ? entry->addr_code : entry->addr_read; + /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, @@ -1923,7 +1955,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; + CPUTLBEntryFull *full; bool need_swap; /* For anything that is unaligned, recurse through full_load. */ @@ -1931,25 +1963,25 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, goto do_unaligned_access; } - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { #ifdef XBOX mem_check_access_callback_vaddr(env_cpu(env), addr, size, - BP_MEM_READ, iotlbentry); + BP_MEM_READ, full); #endif /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, BP_MEM_READ, retaddr); + full->attrs, BP_MEM_READ, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (likely(tlb_addr & TLB_MMIO)) { - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, + return io_readx(env, full, mmu_idx, addr, retaddr, access_type, op ^ (need_swap * MO_BSWAP)); } @@ -2005,80 +2037,87 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, */ static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_UB); return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); } tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_ldub_mmu(env, addr, oi, retaddr); } static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_LEUW); return load_helper(env, addr, oi, retaddr, MO_LEUW, false, full_le_lduw_mmu); } tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_le_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEUW); return load_helper(env, addr, oi, retaddr, MO_BEUW, false, full_be_lduw_mmu); } tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_be_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_LEUL); return load_helper(env, addr, oi, retaddr, MO_LEUL, false, full_le_ldul_mmu); } tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_le_ldul_mmu(env, addr, oi, retaddr); } static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEUL); return load_helper(env, addr, oi, retaddr, MO_BEUL, false, full_be_ldul_mmu); } tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_be_ldul_mmu(env, addr, oi, retaddr); } uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, MO_LEQ, false, + validate_memop(oi, MO_LEUQ); + return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, helper_le_ldq_mmu); } uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, MO_BEQ, false, + validate_memop(oi, MO_BEUQ); + return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, helper_be_ldq_mmu); } @@ -2089,31 +2128,31 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); } @@ -2123,193 +2162,55 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, */ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t retaddr, - MemOp op, FullLoadHelper *full_load) + MemOpIdx oi, uintptr_t retaddr, + FullLoadHelper *full_load) { - uint16_t meminfo; - TCGMemOpIdx oi; uint64_t ret; - meminfo = trace_mem_get_info(op, mmu_idx, false); - trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); - - op &= ~MO_SIGN; - oi = make_memop_idx(op, mmu_idx); ret = full_load(env, addr, oi, retaddr); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); - + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); + return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); } -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, - full_ldub_mmu); + return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); } -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); + return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); } -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, - full_be_lduw_mmu); + return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); } -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); + return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); } -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); + return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); } -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); -} - -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, - full_le_lduw_mmu); -} - -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); -} - -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); -} - -uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldub_data_ra(env, ptr, 0); -} - -int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsb_data_ra(env, ptr, 0); -} - -uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_lduw_be_data_ra(env, ptr, 0); -} - -int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsw_be_data_ra(env, ptr, 0); -} - -uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldl_be_data_ra(env, ptr, 0); -} - -uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldq_be_data_ra(env, ptr, 0); -} - -uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_lduw_le_data_ra(env, ptr, 0); -} - -int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsw_le_data_ra(env, ptr, 0); -} - -uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldl_le_data_ra(env, ptr, 0); -} - -uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldq_le_data_ra(env, ptr, 0); + return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); } /* @@ -2335,10 +2236,10 @@ store_memop(void *haddr, uint64_t val, MemOp op) case MO_LEUL: stl_le_p(haddr, val); break; - case MO_BEQ: + case MO_BEUQ: stq_be_p(haddr, val); break; - case MO_LEQ: + case MO_LEUQ: stq_le_p(haddr, val); break; default: @@ -2346,6 +2247,9 @@ store_memop(void *haddr, uint64_t val, MemOp op) } } +static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + static void __attribute__((noinline)) store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, uintptr_t retaddr, size_t size, uintptr_t mmu_idx, @@ -2354,26 +2258,25 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); uintptr_t index, index2; CPUTLBEntry *entry, *entry2; - target_ulong page2, tlb_addr, tlb_addr2; - TCGMemOpIdx oi; + target_ulong page1, page2, tlb_addr, tlb_addr2; + MemOpIdx oi; size_t size2; int i; /* * Ensure the second page is in the TLB. Note that the first page * is already guaranteed to be filled, and that the second page - * cannot evict the first. + * cannot evict the first. An exception to this rule is PAGE_WRITE_INV + * handling: the first page could have evicted itself. */ - - // FIXME: Upstream patch for this - if ((addr & ~TARGET_PAGE_MASK) + size - 1 >= TARGET_PAGE_SIZE) { - - page2 = (addr + size) & TARGET_PAGE_MASK; + page1 = addr & TARGET_PAGE_MASK; + page2 = (addr + size) & TARGET_PAGE_MASK; + if (page1 != page2) { size2 = (addr + size) & ~TARGET_PAGE_MASK; index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); - tlb_addr2 = tlb_addr_write(entry2); + if (!tlb_hit_page(tlb_addr2, page2)) { if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, @@ -2381,11 +2284,8 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); } - tlb_addr2 = tlb_addr_write(entry2); } - } else { - /* The access happens on a single page */ size2 = 0; } @@ -2400,19 +2300,19 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, if (unlikely(tlb_addr & TLB_WATCHPOINT)) { #ifdef XBOX mem_check_access_callback_vaddr(env_cpu(env), addr, size - size2, - BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].iotlb[index]); + BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].fulltlb[index]); #endif cpu_check_watchpoint(env_cpu(env), addr, size - size2, - env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, BP_MEM_WRITE, retaddr); } - if (size2 > 0 && unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + if (size2 && unlikely(tlb_addr2 & TLB_WATCHPOINT)) { #ifdef XBOX mem_check_access_callback_vaddr(env_cpu(env), page2, size2, - BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].iotlb[index2]); + BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].fulltlb[index2]); #endif cpu_check_watchpoint(env_cpu(env), page2, size2, - env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, BP_MEM_WRITE, retaddr); } @@ -2426,29 +2326,31 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, for (i = 0; i < size; ++i) { /* Big-endian extract. */ uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); - helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); + full_stb_mmu(env, addr + i, val8, oi, retaddr); } } else { for (i = 0; i < size; ++i) { /* Little-endian extract. */ uint8_t val8 = val >> (i * 8); - helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); + full_stb_mmu(env, addr + i, val8, oi, retaddr); } } } static inline void QEMU_ALWAYS_INLINE store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) + MemOpIdx oi, uintptr_t retaddr, MemOp op) { - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = tlb_addr_write(entry); const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); - unsigned a_bits = get_alignment_bits(get_memop(oi)); + const unsigned a_bits = get_alignment_bits(get_memop(oi)); + const size_t size = memop_size(op); + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index; + CPUTLBEntry *entry; + target_ulong tlb_addr; void *haddr; - size_t size = memop_size(op); + + tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -2456,6 +2358,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, mmu_idx, retaddr); } + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = tlb_addr_write(entry); + /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, @@ -2470,7 +2376,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; + CPUTLBEntryFull *full; bool need_swap; /* For anything that is unaligned, recurse through byte stores. */ @@ -2478,25 +2384,25 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, goto do_unaligned_access; } - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { #ifdef XBOX mem_check_access_callback_vaddr(env_cpu(env), addr, size, - BP_MEM_WRITE, iotlbentry); + BP_MEM_WRITE, full); #endif /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, BP_MEM_WRITE, retaddr); + full->attrs, BP_MEM_WRITE, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (tlb_addr & TLB_MMIO) { - io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + io_writex(env, full, mmu_idx, val, addr, retaddr, op ^ (need_swap * MO_BSWAP)); return; } @@ -2508,7 +2414,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Handle clean RAM pages. */ if (tlb_addr & TLB_NOTDIRTY) { - notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, size, full, retaddr); } haddr = (void *)((uintptr_t)addr + entry->addend); @@ -2540,187 +2446,144 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, store_memop(haddr, val, op); } -void __attribute__((noinline)) -helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +static void __attribute__((noinline)) +full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_UB); store_helper(env, addr, val, oi, retaddr, MO_UB); } -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_stb_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); store_helper(env, addr, val, oi, retaddr, MO_LEUW); } -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_le_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); store_helper(env, addr, val, oi, retaddr, MO_BEUW); } -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_be_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); store_helper(env, addr, val, oi, retaddr, MO_LEUL); } -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_le_stl_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); store_helper(env, addr, val, oi, retaddr, MO_BEUL); } -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, MO_LEQ); + full_be_stl_mmu(env, addr, val, oi, retaddr); +} + +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUQ); + store_helper(env, addr, val, oi, retaddr, MO_LEUQ); } void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, MO_BEQ); + validate_memop(oi, MO_BEUQ); + store_helper(env, addr, val, oi, retaddr, MO_BEUQ); } /* * Store Helpers for cpu_ldst.h */ -static inline void QEMU_ALWAYS_INLINE -cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr, MemOp op) +typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t retaddr); + +static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t ra, + FullStoreHelper *full_store) { - TCGMemOpIdx oi; - uint16_t meminfo; - - meminfo = trace_mem_get_info(op, mmu_idx, true); - trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); - - oi = make_memop_idx(op, mmu_idx); - store_helper(env, addr, val, oi, retaddr, op); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); + full_store(env, addr, val, oi, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); + cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); } -void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); } -void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); } -void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); + cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); } -void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); } -void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); } -void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); + cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); } -void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, - uint64_t val, uintptr_t retaddr) -{ - cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, - uint64_t val, uintptr_t retaddr) -{ - cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stb_data_ra(env, ptr, val, 0); -} - -void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stw_be_data_ra(env, ptr, val, 0); -} - -void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stl_be_data_ra(env, ptr, val, 0); -} - -void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) -{ - cpu_stq_be_data_ra(env, ptr, val, 0); -} - -void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stw_le_data_ra(env, ptr, val, 0); -} - -void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stl_le_data_ra(env, ptr, val, 0); -} - -void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) -{ - cpu_stq_le_data_ra(env, ptr, val, 0); -} +#include "ldst_common.c.inc" /* * First set of functions passes in OI and RETADDR. @@ -2731,7 +2594,6 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) #define ATOMIC_MMU_CLEANUP -#define ATOMIC_MMU_IDX get_mmuidx(oi) #include "atomic_common.c.inc" @@ -2757,57 +2619,57 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) /* Code access functions. */ static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); } uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); return full_ldub_code(env, addr, oi, 0); } void cpu_ld_code(CPUArchState *env, abi_ptr addr, size_t len, uint8_t *out) { - TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); for (size_t i = 0; i < len; i++) { out[i] = full_ldub_code(env, addr+i, oi, 0); } } static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); } uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); return full_lduw_code(env, addr, oi, 0); } static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); } uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); return full_ldl_code(env, addr, oi, 0); } static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); + return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); } uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); return full_ldq_code(env, addr, oi, 0); } diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c index a6e72fdb3e..bb67941420 100644 --- a/accel/tcg/hmp.c +++ b/accel/tcg/hmp.c @@ -1,29 +1,14 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" #include "exec/exec-all.h" #include "monitor/monitor.h" -#include "sysemu/tcg.h" - -static void hmp_info_jit(Monitor *mon, const QDict *qdict) -{ - if (!tcg_enabled()) { - error_report("JIT information is only available with accel=tcg"); - return; - } - - dump_exec_info(); - dump_drift_info(); -} - -static void hmp_info_opcount(Monitor *mon, const QDict *qdict) -{ - dump_opcount_info(); -} static void hmp_tcg_register(void) { - monitor_register_hmp("jit", true, hmp_info_jit); - monitor_register_hmp("opcount", true, hmp_info_opcount); + monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); + monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); } type_init(hmp_tcg_register); diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h index 881bc1ede0..cb13bade4f 100644 --- a/accel/tcg/internal.h +++ b/accel/tcg/internal.h @@ -11,12 +11,112 @@ #include "exec/exec-all.h" +/* + * Access to the various translations structures need to be serialised + * via locks for consistency. In user-mode emulation access to the + * memory related structures are protected with mmap_lock. + * In !user-mode we use per-page locks. + */ +#ifdef CONFIG_SOFTMMU +#define assert_memory_lock() +#else +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#endif + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + uintptr_t first_tb; +#ifdef CONFIG_USER_ONLY + unsigned long flags; + void *target_data; +#endif +#ifdef CONFIG_SOFTMMU + QemuSpin lock; +#endif +} PageDesc; + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* + * L1 Mapping properties + */ +extern int v_l1_size; +extern int v_l1_shift; +extern int v_l2_levels; + +/* + * The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +extern void *l1_map[V_L1_MAX_SIZE]; + +PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc); + +static inline PageDesc *page_find(tb_page_addr_t index) +{ + return page_find_alloc(index, false); +} + +/* list iterators for lists of tagged pointers in TranslationBlock */ +#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ + for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ + tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ + tb = (TranslationBlock *)((uintptr_t)tb & ~1)) + +#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ + TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) + +#define TB_FOR_EACH_JMP(head_tb, tb, n) \ + TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) + +/* In user-mode page locks aren't used; mmap_lock is enough */ +#ifdef CONFIG_USER_ONLY +#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) +static inline void page_lock(PageDesc *pd) { } +static inline void page_unlock(PageDesc *pd) { } +#else +#ifdef CONFIG_DEBUG_TCG +void do_assert_page_locked(const PageDesc *pd, const char *file, int line); +#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) +#else +#define assert_page_locked(pd) +#endif +void page_lock(PageDesc *pd); +void page_unlock(PageDesc *pd); +#endif +#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) +void assert_no_pages_locked(void); +#else +static inline void assert_no_pages_locked(void) { } +#endif + TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags); - -void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); +G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); void page_init(void); void tb_htable_init(void); +void tb_reset_jump(TranslationBlock *tb, int n); +TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2); +bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); +void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t host_pc); + +/* Return the current PC from CPU, which may be cached in TB. */ +static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) +{ +#if TARGET_TB_PCREL + return cpu->cc->get_pc(cpu); +#else + return tb_pc(tb); +#endif +} #endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc new file mode 100644 index 0000000000..6ac8d871a3 --- /dev/null +++ b/accel/tcg/ldst_common.c.inc @@ -0,0 +1,307 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + return cpu_ldw_be_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + return cpu_ldl_be_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); + return cpu_ldq_be_mmu(env, addr, oi, ra); +} + +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + return cpu_ldw_le_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + return cpu_ldl_le_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); + return cpu_ldq_le_mmu(env, addr, oi, ra); +} + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + cpu_stb_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + cpu_stw_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + cpu_stl_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); + cpu_stq_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + cpu_stw_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + cpu_stl_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); + cpu_stq_le_mmu(env, addr, val, oi, ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int8_t)cpu_ldub_data_ra(env, addr, ra); +} + +uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldub_data_ra(env, addr, 0); +} + +int cpu_ldsb_data(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_data(env, addr); +} + +uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_be_data_ra(env, addr, 0); +} + +int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_be_data(env, addr); +} + +uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_be_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_be_data_ra(env, addr, 0); +} + +uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_le_data_ra(env, addr, 0); +} + +int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_le_data(env, addr); +} + +uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_le_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_le_data_ra(env, addr, 0); +} + +void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stb_data_ra(env, addr, val, 0); +} + +void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_be_data_ra(env, addr, val, 0); +} + +void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_be_data_ra(env, addr, val, 0); +} + +void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_be_data_ra(env, addr, val, 0); +} + +void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_le_data_ra(env, addr, val, 0); +} + +void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_le_data_ra(env, addr, val, 0); +} + +void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_le_data_ra(env, addr, val, 0); +} diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index 137a1a44cc..75e1dffb4d 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -3,6 +3,7 @@ tcg_ss.add(files( 'tcg-all.c', 'cpu-exec-common.c', 'cpu-exec.c', + 'tb-maint.c', 'tcg-runtime-gvec.c', 'tcg-runtime.c', 'translate-all.c', @@ -10,7 +11,7 @@ tcg_ss.add(files( )) tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c')) -tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c'), libdl]) +tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')]) specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 88e25c6df9..80dff68934 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -45,7 +45,6 @@ #include "qemu/osdep.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" -#include "trace/mem.h" #include "exec/exec-all.h" #include "exec/plugin-gen.h" #include "exec/translator.h" @@ -163,11 +162,7 @@ static void gen_empty_mem_helper(void) static void gen_plugin_cb_start(enum plugin_gen_from from, enum plugin_gen_cb type, unsigned wr) { - TCGOp *op; - tcg_gen_plugin_cb_start(from, type, wr); - op = tcg_last_op(); - QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link); } static void gen_wrapped(enum plugin_gen_from from, @@ -211,9 +206,9 @@ static void gen_mem_wrapped(enum plugin_gen_cb type, const union mem_gen_fn *f, TCGv addr, uint32_t info, bool is_mem) { - int wr = !!(info & TRACE_MEM_ST); + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); - gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr); + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); if (is_mem) { f->mem_fn(addr, info); } else { @@ -707,62 +702,6 @@ static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, inject_mem_disable_helper(insn, begin_op); } -static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op, - int insn_idx) -{ - enum plugin_gen_from from = begin_op->args[0]; - enum plugin_gen_cb type = begin_op->args[1]; - - switch (from) { - case PLUGIN_GEN_FROM_TB: - switch (type) { - case PLUGIN_GEN_CB_UDATA: - plugin_gen_tb_udata(ptb, begin_op); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_tb_inline(ptb, begin_op); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_FROM_INSN: - switch (type) { - case PLUGIN_GEN_CB_UDATA: - plugin_gen_insn_udata(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_insn_inline(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_ENABLE_MEM_HELPER: - plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_FROM_MEM: - switch (type) { - case PLUGIN_GEN_CB_MEM: - plugin_gen_mem_regular(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_mem_inline(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_AFTER_INSN: - switch (type) { - case PLUGIN_GEN_DISABLE_MEM_HELPER: - plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - default: - g_assert_not_reached(); - } -} - /* #define DEBUG_PLUGIN_GEN_OPS */ static void pr_ops(void) { @@ -820,42 +759,129 @@ static void pr_ops(void) static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) { TCGOp *op; - int insn_idx; + int insn_idx = -1; pr_ops(); - insn_idx = -1; - QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) { - enum plugin_gen_from from = op->args[0]; - enum plugin_gen_cb type = op->args[1]; - tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start); - /* ENABLE_MEM_HELPER is the first callback of an instruction */ - if (from == PLUGIN_GEN_FROM_INSN && - type == PLUGIN_GEN_ENABLE_MEM_HELPER) { + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + switch (op->opc) { + case INDEX_op_insn_start: insn_idx++; + break; + case INDEX_op_plugin_cb_start: + { + enum plugin_gen_from from = op->args[0]; + enum plugin_gen_cb type = op->args[1]; + + switch (from) { + case PLUGIN_GEN_FROM_TB: + { + g_assert(insn_idx == -1); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_tb_udata(plugin_tb, op); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_tb_inline(plugin_tb, op); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_insn_udata(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_insn_inline(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_MEM: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_MEM: + plugin_gen_mem_regular(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_mem_inline(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + + break; + } + case PLUGIN_GEN_AFTER_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_DISABLE_MEM_HELPER: + plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + default: + g_assert_not_reached(); + } + break; + } + default: + /* plugins don't care about any other ops */ + break; } - plugin_inject_cb(plugin_tb, op, insn_idx); } pr_ops(); } -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only) +bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, + bool mem_only) { - struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; bool ret = false; if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + int i; + + /* reset callbacks */ + for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { + if (ptb->cbs[i]) { + g_array_set_size(ptb->cbs[i], 0); + } + } + ptb->n = 0; + ret = true; - QSIMPLEQ_INIT(&tcg_ctx->plugin_ops); - ptb->vaddr = tb->pc; + ptb->vaddr = db->pc_first; ptb->vaddr2 = -1; - get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); + ptb->haddr1 = db->host_addr[0]; ptb->haddr2 = NULL; ptb->mem_only = mem_only; plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); } + + tcg_ctx->plugin_insn = NULL; + return ret; } @@ -864,9 +890,8 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; struct qemu_plugin_insn *pinsn; - pinsn = qemu_plugin_tb_insn_get(ptb); + pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); tcg_ctx->plugin_insn = pinsn; - pinsn->vaddr = db->pc_next; plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); /* @@ -874,16 +899,15 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) * Note that we skip this when haddr1 == NULL, e.g. when we're * fetching instructions from a region not backed by RAM. */ - if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && - unlikely((db->pc_next & TARGET_PAGE_MASK) != - (db->pc_first & TARGET_PAGE_MASK))) { - get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, - &ptb->haddr2); - ptb->vaddr2 = db->pc_next; - } - if (likely(ptb->vaddr2 == -1)) { + if (ptb->haddr1 == NULL) { + pinsn->haddr = NULL; + } else if (is_same_page(db, db->pc_next)) { pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; } else { + if (ptb->vaddr2 == -1) { + ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); + get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2); + } pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; } } @@ -893,23 +917,19 @@ void plugin_gen_insn_end(void) plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); } +/* + * There are cases where we never get to finalise a translation - for + * example a page fault during translation. As a result we shouldn't + * do any clean-up here and make sure things are reset in + * plugin_gen_tb_start. + */ void plugin_gen_tb_end(CPUState *cpu) { struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; - int i; /* collect instrumentation requests */ qemu_plugin_tb_trans_cb(cpu, ptb); /* inject the instrumentation at the appropriate places */ plugin_gen_inject(ptb); - - /* clean up */ - for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { - if (ptb->cbs[i]) { - g_array_set_size(ptb->cbs[i], 0); - } - } - ptb->n = 0; - tcg_ctx->plugin_insn = NULL; } diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h index da7a230a1b..9854116427 100644 --- a/accel/tcg/tb-hash.h +++ b/accel/tcg/tb-hash.h @@ -24,6 +24,7 @@ #include "exec/exec-all.h" #include "qemu/xxhash.h" #include "qemu/fast-hash.h" +#include "tb-jmp-cache.h" #ifdef CONFIG_SOFTMMU diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h new file mode 100644 index 0000000000..ff5ffc8fc2 --- /dev/null +++ b/accel/tcg/tb-jmp-cache.h @@ -0,0 +1,65 @@ +/* + * The per-CPU TranslationBlock jump cache. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ACCEL_TCG_TB_JMP_CACHE_H +#define ACCEL_TCG_TB_JMP_CACHE_H + +#define TB_JMP_CACHE_BITS 12 +#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) + +/* + * Accessed in parallel; all accesses to 'tb' must be atomic. + * For TARGET_TB_PCREL, accesses to 'pc' must be protected by + * a load_acquire/store_release to 'tb'. + */ +struct CPUJumpCache { + struct { + TranslationBlock *tb; +#if TARGET_TB_PCREL + target_ulong pc; +#endif + } array[TB_JMP_CACHE_SIZE]; +}; + +static inline TranslationBlock * +tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash) +{ +#if TARGET_TB_PCREL + /* Use acquire to ensure current load of pc from jc. */ + return qatomic_load_acquire(&jc->array[hash].tb); +#else + /* Use rcu_read to ensure current load of pc from *tb. */ + return qatomic_rcu_read(&jc->array[hash].tb); +#endif +} + +static inline target_ulong +tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb) +{ +#if TARGET_TB_PCREL + return jc->array[hash].pc; +#else + return tb_pc(tb); +#endif +} + +static inline void +tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash, + TranslationBlock *tb, target_ulong pc) +{ +#if TARGET_TB_PCREL + jc->array[hash].pc = pc; + /* Use store_release on tb to ensure pc is written first. */ + qatomic_store_release(&jc->array[hash].tb, tb); +#else + /* Use the pc value already stored in tb->pc. */ + qatomic_set(&jc->array[hash].tb, tb); +#endif +} + +#endif /* ACCEL_TCG_TB_JMP_CACHE_H */ diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c new file mode 100644 index 0000000000..474416b571 --- /dev/null +++ b/accel/tcg/tb-maint.c @@ -0,0 +1,722 @@ +/* + * Translation Block Maintaince + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "exec/cputlb.h" +#include "exec/log.h" +#include "exec/exec-all.h" +#include "exec/translate-all.h" +#include "sysemu/tcg.h" +#include "tcg/tcg.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + + +static bool tb_cmp(const void *ap, const void *bp) +{ + const TranslationBlock *a = ap; + const TranslationBlock *b = bp; + + return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) && + a->cs_base == b->cs_base && + a->flags == b->flags && + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && + a->trace_vcpu_dstate == b->trace_vcpu_dstate && + tb_page_addr0(a) == tb_page_addr0(b) && + tb_page_addr1(a) == tb_page_addr1(b)); +} + +static bool inv_tb_cmp(const void *ap, const void *bp) +{ + const TranslationBlock *a = ap, *b = bp; + return tb_cmp(ap, bp) && a->ihash == b->ihash; +} + +void tb_htable_init(void) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); + qht_init(&tb_ctx.inv_htable, inv_tb_cmp, CODE_GEN_HTABLE_SIZE, mode); +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_lock(&pd[i]); + pd[i].first_tb = (uintptr_t)NULL; + page_unlock(&pd[i]); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(void) +{ + int i, l1_sz = v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(v_l2_levels, l1_map + i); + } +} + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + bool did_flush = false; + + mmap_lock(); + /* If it is already been done on request of another CPU, just retry. */ + if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + did_flush = true; + + CPU_FOREACH(cpu) { + tcg_flush_jmp_cache(cpu); + } + + qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + qht_reset_size(&tb_ctx.inv_htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(); + + tcg_region_reset_all(); + /* XXX: flush processor icache at this point if cache flush is expensive */ + qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + +done: + mmap_unlock(); + if (did_flush) { + qemu_plugin_flush_cb(); + } +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + + if (cpu_in_exclusive_context(cpu)) { + do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); + } else { + async_safe_run_on_cpu(cpu, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); + } + } +} + +/* + * user-mode: call with mmap_lock held + * !user-mode: call with @pd->lock held + */ +static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *pprev; + unsigned int n1; + + assert_page_locked(pd); + pprev = &pd->first_tb; + PAGE_FOR_EACH_TB(pd, tb1, n1) { + if (tb1 == tb) { + *pprev = tb1->page_next[n1]; + return; + } + pprev = &tb1->page_next[n1]; + } + g_assert_not_reached(); +} + +/* remove @orig from its @n_orig-th jump list */ +static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) +{ + uintptr_t ptr, ptr_locked; + TranslationBlock *dest; + TranslationBlock *tb; + uintptr_t *pprev; + int n; + + /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ + ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); + dest = (TranslationBlock *)(ptr & ~1); + if (dest == NULL) { + return; + } + + qemu_spin_lock(&dest->jmp_lock); + /* + * While acquiring the lock, the jump might have been removed if the + * destination TB was invalidated; check again. + */ + ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); + if (ptr_locked != ptr) { + qemu_spin_unlock(&dest->jmp_lock); + /* + * The only possibility is that the jump was unlinked via + * tb_jump_unlink(dest). Seeing here another destination would be a bug, + * because we set the LSB above. + */ + g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); + return; + } + /* + * We first acquired the lock, and since the destination pointer matches, + * we know for sure that @orig is in the jmp list. + */ + pprev = &dest->jmp_list_head; + TB_FOR_EACH_JMP(dest, tb, n) { + if (tb == orig && n == n_orig) { + *pprev = tb->jmp_list_next[n]; + /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ + qemu_spin_unlock(&dest->jmp_lock); + return; + } + pprev = &tb->jmp_list_next[n]; + } + g_assert_not_reached(); +} + +/* + * Reset the jump entry 'n' of a TB so that it is not chained to another TB. + */ +void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *dest) +{ + TranslationBlock *tb; + int n; + + qemu_spin_lock(&dest->jmp_lock); + + TB_FOR_EACH_JMP(dest, tb, n) { + tb_reset_jump(tb, n); + qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); + /* No need to clear the list entry; setting the dest ptr is enough */ + } + dest->jmp_list_head = (uintptr_t)NULL; + + qemu_spin_unlock(&dest->jmp_lock); +} + +static void tb_jmp_cache_inval_tb(TranslationBlock *tb) +{ + CPUState *cpu; + + if (TARGET_TB_PCREL) { + /* A TB may be at any virtual address */ + CPU_FOREACH(cpu) { + tcg_flush_jmp_cache(cpu); + } + } else { + uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb)); + + CPU_FOREACH(cpu) { + CPUJumpCache *jc = cpu->tb_jmp_cache; + + if (qatomic_read(&jc->array[h].tb) == tb) { + qatomic_set(&jc->array[h].tb, NULL); + } + } + } +} + +/* + * In user-mode, call with mmap_lock held. + * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' + * locks held. + */ +static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) +{ + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + uint32_t orig_cflags = tb_cflags(tb); + void *existing = NULL; + + assert_memory_lock(); + + /* make sure no further incoming jumps will be chained to this TB */ + qemu_spin_lock(&tb->jmp_lock); + qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); + qemu_spin_unlock(&tb->jmp_lock); + + /* remove the TB from the hash list */ + phys_pc = tb_page_addr0(tb); + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + tb->flags, orig_cflags, tb->trace_vcpu_dstate); + if (!qht_remove(&tb_ctx.htable, tb, h)) { + return; + } + + qht_insert(&tb_ctx.inv_htable, tb, h, &existing); + g_assert(existing == NULL); + + /* remove the TB from the page list */ + if (rm_from_page_list) { + p = page_find(phys_pc >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + phys_pc = tb_page_addr1(tb); + if (phys_pc != -1) { + p = page_find(phys_pc >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + } + } + + /* remove the TB from the hash list */ + tb_jmp_cache_inval_tb(tb); + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + qatomic_set(&tb_ctx.tb_phys_invalidate_count, + tb_ctx.tb_phys_invalidate_count + 1); +} + +static void tb_phys_invalidate__locked(TranslationBlock *tb) +{ + qemu_thread_jit_write(); + do_tb_phys_invalidate(tb, true); + qemu_thread_jit_execute(); +} + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) +{ + PageDesc *p1, *p2; + tb_page_addr_t page1; + tb_page_addr_t page2; + + assert_memory_lock(); + g_assert(phys1 != -1); + + page1 = phys1 >> TARGET_PAGE_BITS; + page2 = phys2 >> TARGET_PAGE_BITS; + + p1 = page_find_alloc(page1, alloc); + if (ret_p1) { + *ret_p1 = p1; + } + if (likely(phys2 == -1)) { + page_lock(p1); + return; + } else if (page1 == page2) { + page_lock(p1); + if (ret_p2) { + *ret_p2 = p1; + } + return; + } + p2 = page_find_alloc(page2, alloc); + if (ret_p2) { + *ret_p2 = p2; + } + if (page1 < page2) { + page_lock(p1); + page_lock(p2); + } else { + page_lock(p2); + page_lock(p1); + } +} + +#ifdef CONFIG_USER_ONLY +static inline void page_lock_tb(const TranslationBlock *tb) { } +static inline void page_unlock_tb(const TranslationBlock *tb) { } +#else +/* lock the page(s) of a TB in the correct acquisition order */ +static void page_lock_tb(const TranslationBlock *tb) +{ + page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false); +} + +static void page_unlock_tb(const TranslationBlock *tb) +{ + PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS); + + page_unlock(p1); + if (unlikely(tb_page_addr1(tb) != -1)) { + PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS); + + if (p2 != p1) { + page_unlock(p2); + } + } +} +#endif + +/* + * Invalidate one TB. + * Called with mmap_lock held in user-mode. + */ +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) +{ + if (page_addr == -1 && tb_page_addr0(tb) != -1) { + page_lock_tb(tb); + do_tb_phys_invalidate(tb, true); + page_unlock_tb(tb); + } else { + do_tb_phys_invalidate(tb, false); + } +} + +/* + * Add the tb in the target page and protect it if necessary. + * Called with mmap_lock held for user-mode emulation. + * Called with @p->lock held in !user-mode. + */ +static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + assert_page_locked(p); + + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != (uintptr_t)NULL; +#endif + p->first_tb = (uintptr_t)tb | n; + +#if defined(CONFIG_USER_ONLY) + /* translator_loop() must have made all TB pages non-writable */ + assert(!(p->flags & PAGE_WRITE)); +#else + /* + * If some code is already present, then the pages are already + * protected. So we handle the case where only the first TB is + * allocated in a physical page. + */ + if (!page_already_protected) { + tlb_protect_code(page_addr); + } +#endif +} + +/* + * Add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + * + * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. + * Note that in !user-mode, another thread might have already added a TB + * for the same block of guest code that @tb corresponds to. In that case, + * the caller should discard the original @tb, and use instead the returned TB. + */ +TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + PageDesc *p; + PageDesc *p2 = NULL; + void *existing_tb = NULL; + uint32_t h; + + assert_memory_lock(); + tcg_debug_assert(!(tb->cflags & CF_INVALID)); + + /* + * Add the TB to the page list, acquiring first the pages's locks. + * We keep the locks held until after inserting the TB in the hash table, + * so that if the insertion fails we know for sure that the TBs are still + * in the page descriptors. + * Note that inserting into the hash table first isn't an option, since + * we can only insert TBs that are fully initialized. + */ + page_lock_pair(&p, phys_pc, &p2, phys_page2, true); + tb_page_add(p, tb, 0, phys_pc); + if (p2) { + tb_page_add(p2, tb, 1, phys_page2); + } + + /* add in the hash table */ + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + tb->flags, tb->cflags, tb->trace_vcpu_dstate); + qht_insert(&tb_ctx.htable, tb, h, &existing_tb); + + /* remove TB from the page(s) if we couldn't insert it */ + if (unlikely(existing_tb)) { + tb_page_remove(p, tb); + if (p2) { + tb_page_remove(p2, tb); + } + tb = existing_tb; + } + + if (p2 && p2 != p) { + page_unlock(p2); + } + page_unlock(p); + return tb; +} + +/* + * @p must be non-NULL. + * user-mode: call with mmap_lock held. + * !user-mode: call with all @pages locked. + */ +static void +tb_invalidate_phys_page_range__locked(struct page_collection *pages, + PageDesc *p, tb_page_addr_t start, + tb_page_addr_t end, + uintptr_t retaddr) +{ + TranslationBlock *tb; +#ifndef XBOX + tb_page_addr_t tb_start, tb_end; +#endif + int n; +#ifdef TARGET_HAS_PRECISE_SMC + CPUState *cpu = current_cpu; + bool current_tb_not_found = retaddr != 0; + bool current_tb_modified = false; + TranslationBlock *current_tb = NULL; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_page_locked(p); + + /* + * We remove all the TBs in the range [start, end[. + * XXX: see if in some cases it could be faster to invalidate all the code + */ + PAGE_FOR_EACH_TB(p, tb, n) { + assert_page_locked(p); +#ifndef XBOX + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb_page_addr0(tb); + tb_end = tb_start + tb->size; + } else { + tb_start = tb_page_addr1(tb); + tb_end = tb_start + ((tb_page_addr0(tb) + tb->size) + & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#else + { +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = false; + /* now we have a real cpu fault */ + current_tb = tcg_tb_lookup(retaddr); + } + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop + * its execution. We could be more precise by checking + * that the modification is after the current PC, but it + * would require a specialized function to partially + * restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, retaddr); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate__locked(tb); + } + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + tlb_unprotect_code(start); + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + page_collection_unlock(pages); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +/* + * Invalidate all TBs which intersect with the target physical + * address page @addr. + * + * Called with mmap_lock held for user-mode emulation + */ +void tb_invalidate_phys_page(tb_page_addr_t addr) +{ + struct page_collection *pages; + tb_page_addr_t start, end; + PageDesc *p; + + assert_memory_lock(); + + p = page_find(addr >> TARGET_PAGE_BITS); + if (p == NULL) { + return; + } + + start = addr & TARGET_PAGE_MASK; + end = start + TARGET_PAGE_SIZE; + pages = page_collection_lock(start, end); + tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + page_collection_unlock(pages); +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *pages; + tb_page_addr_t next; + + assert_memory_lock(); + + pages = page_collection_lock(start, end); + for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + start < end; + start = next, next += TARGET_PAGE_SIZE) { + PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); + tb_page_addr_t bound = MIN(next, end); + + if (pd == NULL) { + continue; + } + tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); + } + page_collection_unlock(pages); +} + +#ifdef CONFIG_SOFTMMU +/* + * len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + * + * Call with all @pages in the range [@start, @start + len[ locked. + */ +void tb_invalidate_phys_page_fast(struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr) +{ + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + + assert_page_locked(p); + tb_invalidate_phys_page_range__locked(pages, p, start, start + len, + retaddr); +} +#else +/* + * Called with mmap_lock held. If pc is not 0 then it indicates the + * host PC of the faulting store instruction that caused this invalidate. + * Returns true if the caller needs to abort execution of the current + * TB (because it was modified by this store and the guest CPU has + * precise-SMC semantics). + */ +bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = current_cpu; + bool current_tb_modified = false; +#endif + + assert_memory_lock(); + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return false; + } + +#ifdef TARGET_HAS_PRECISE_SMC + if (p->first_tb && pc != 0) { + current_tb = tcg_tb_lookup(pc); + } +#endif + assert_page_locked(p); + PAGE_FOR_EACH_TB(p, tb, n) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop its execution. + * We could be more precise by checking that the modification is + * after the current PC, but it would require a specialized + * function to partially restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, pc); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, addr); + } + p->first_tb = (uintptr_t)NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + return true; + } +#endif + + return false; +} +#endif diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index ea42d1d51b..84cc7421be 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -24,9 +24,8 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "sysemu/tcg.h" #include "sysemu/replay.h" +#include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" @@ -85,8 +84,7 @@ void icount_handle_deadline(void) * Don't interrupt cpu thread, when these events are waiting * (i.e., there is no checkpoint) */ - if (deadline == 0 - && (replay_mode != REPLAY_MODE_PLAY || replay_has_checkpoint())) { + if (deadline == 0) { icount_notify_aio_contexts(); } } @@ -110,8 +108,14 @@ void icount_prepare_for_run(CPUState *cpu) replay_mutex_lock(); - if (cpu->icount_budget == 0 && replay_has_checkpoint()) { + if (cpu->icount_budget == 0) { + /* + * We're called without the iothread lock, so must take it while + * we're calling timer handlers. + */ + qemu_mutex_lock_iothread(); icount_notify_aio_contexts(); + qemu_mutex_unlock_iothread(); } } diff --git a/accel/tcg/tcg-accel-ops-icount.h b/accel/tcg/tcg-accel-ops-icount.h index d884aa2aaa..1b6fd9c607 100644 --- a/accel/tcg/tcg-accel-ops-icount.h +++ b/accel/tcg/tcg-accel-ops-icount.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef TCG_CPUS_ICOUNT_H -#define TCG_CPUS_ICOUNT_H +#ifndef TCG_ACCEL_OPS_ICOUNT_H +#define TCG_ACCEL_OPS_ICOUNT_H void icount_handle_deadline(void); void icount_prepare_for_run(CPUState *cpu); @@ -16,4 +16,4 @@ void icount_process_data(CPUState *cpu); void icount_handle_interrupt(CPUState *cpu, int mask); -#endif /* TCG_CPUS_ICOUNT_H */ +#endif /* TCG_ACCEL_OPS_ICOUNT_H */ diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index 847d2079d2..d50239e0e2 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -24,10 +24,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "sysemu/tcg.h" #include "sysemu/replay.h" +#include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" +#include "qemu/notify.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" #include "hw/boards.h" @@ -35,6 +36,26 @@ #include "tcg-accel-ops.h" #include "tcg-accel-ops-mttcg.h" +typedef struct MttcgForceRcuNotifier { + Notifier notifier; + CPUState *cpu; +} MttcgForceRcuNotifier; + +static void do_nothing(CPUState *cpu, run_on_cpu_data d) +{ +} + +static void mttcg_force_rcu(Notifier *notify, void *data) +{ + CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu; + + /* + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures + * that there are no deadlocks. + */ + async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL); +} + /* * In the multi-threaded case each vCPU has its own thread. The TLS * variable current_cpu can be used deep in the code to find the @@ -43,12 +64,16 @@ static void *mttcg_cpu_thread_fn(void *arg) { + MttcgForceRcuNotifier force_rcu; CPUState *cpu = arg; assert(tcg_enabled()); g_assert(!icount_enabled()); rcu_register_thread(); + force_rcu.notifier.notify = mttcg_force_rcu; + force_rcu.cpu = cpu; + rcu_add_force_rcu_notifier(&force_rcu.notifier); tcg_register_thread(); qemu_mutex_lock_iothread(); @@ -100,6 +125,7 @@ static void *mttcg_cpu_thread_fn(void *arg) tcg_cpus_destroy(cpu); qemu_mutex_unlock_iothread(); + rcu_remove_force_rcu_notifier(&force_rcu.notifier); rcu_unregister_thread(); return NULL; } @@ -116,7 +142,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu) g_assert(tcg_enabled()); tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); - cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->thread = g_new0(QemuThread, 1); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); diff --git a/accel/tcg/tcg-accel-ops-mttcg.h b/accel/tcg/tcg-accel-ops-mttcg.h index 9fdc5a2ab5..8ffa7a9a9f 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.h +++ b/accel/tcg/tcg-accel-ops-mttcg.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef TCG_CPUS_MTTCG_H -#define TCG_CPUS_MTTCG_H +#ifndef TCG_ACCEL_OPS_MTTCG_H +#define TCG_ACCEL_OPS_MTTCG_H /* kick MTTCG vCPU thread */ void mttcg_kick_vcpu_thread(CPUState *cpu); @@ -16,4 +16,4 @@ void mttcg_kick_vcpu_thread(CPUState *cpu); /* start an mttcg vCPU thread */ void mttcg_start_vcpu_thread(CPUState *cpu); -#endif /* TCG_CPUS_MTTCG_H */ +#endif /* TCG_ACCEL_OPS_MTTCG_H */ diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index c02c061ecb..290833a37f 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -24,10 +24,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "sysemu/tcg.h" #include "sysemu/replay.h" +#include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" +#include "qemu/notify.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" @@ -50,7 +51,7 @@ void rr_kick_vcpu_thread(CPUState *unused) * * The kick timer is responsible for moving single threaded vCPU * emulation on to the next vCPU. If more than one vCPU is running a - * timer event with force a cpu->exit so the next vCPU can get + * timer event we force a cpu->exit so the next vCPU can get * scheduled. * * The timer is removed if all vCPUs are idle and restarted again once @@ -60,8 +61,6 @@ void rr_kick_vcpu_thread(CPUState *unused) static QEMUTimer *rr_kick_vcpu_timer; static CPUState *rr_current_cpu; -#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) - static inline int64_t rr_next_kick_time(void) { return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; @@ -135,6 +134,11 @@ static void rr_deal_with_unplugged_cpus(void) } } +static void rr_force_rcu(Notifier *notify, void *data) +{ + rr_kick_next_cpu(); +} + /* * In the single-threaded case each vCPU is simulated in turn. If * there is more than a single vCPU we create a simple timer to kick @@ -145,10 +149,13 @@ static void rr_deal_with_unplugged_cpus(void) static void *rr_cpu_thread_fn(void *arg) { + Notifier force_rcu; CPUState *cpu = arg; assert(tcg_enabled()); rcu_register_thread(); + force_rcu.notify = rr_force_rcu; + rcu_add_force_rcu_notifier(&force_rcu); tcg_register_thread(); qemu_mutex_lock_iothread(); @@ -257,6 +264,7 @@ static void *rr_cpu_thread_fn(void *arg) rr_deal_with_unplugged_cpus(); } + rcu_remove_force_rcu_notifier(&force_rcu); rcu_unregister_thread(); return NULL; } @@ -271,8 +279,8 @@ void rr_start_vcpu_thread(CPUState *cpu) tcg_cpu_init_cflags(cpu, false); if (!single_tcg_cpu_thread) { - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + cpu->thread = g_new0(QemuThread, 1); + cpu->halt_cond = g_new0(QemuCond, 1); qemu_cond_init(cpu->halt_cond); /* share a single thread for all cpus with TCG */ diff --git a/accel/tcg/tcg-accel-ops-rr.h b/accel/tcg/tcg-accel-ops-rr.h index 54f6ae6e86..2a76a29612 100644 --- a/accel/tcg/tcg-accel-ops-rr.h +++ b/accel/tcg/tcg-accel-ops-rr.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef TCG_CPUS_RR_H -#define TCG_CPUS_RR_H +#ifndef TCG_ACCEL_OPS_RR_H +#define TCG_ACCEL_OPS_RR_H #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) @@ -18,4 +18,4 @@ void rr_kick_vcpu_thread(CPUState *unused); /* start the round robin vcpu thread */ void rr_start_vcpu_thread(CPUState *cpu); -#endif /* TCG_CPUS_RR_H */ +#endif /* TCG_ACCEL_OPS_RR_H */ diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 1a8e8390bd..19cbf1db3a 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -26,12 +26,14 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "sysemu/tcg.h" #include "sysemu/replay.h" +#include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" +#include "exec/hwaddr.h" +#include "exec/gdbstub.h" #include "tcg-accel-ops.h" #include "tcg-accel-ops-mttcg.h" @@ -91,23 +93,120 @@ void tcg_handle_interrupt(CPUState *cpu, int mask) } } +static bool tcg_supports_guest_debug(void) +{ + return true; +} + +/* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */ +static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) +{ + static const int xlat[] = { + [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE, + [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ, + [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS, + }; + + CPUClass *cc = CPU_GET_CLASS(cpu); + int cputype = xlat[gdbtype]; + + if (cc->gdb_stop_before_watchpoint) { + cputype |= BP_STOP_BEFORE_ACCESS; + } + return cputype; +} + +static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + CPUState *cpu; + int err = 0; + + switch (type) { + case GDB_BREAKPOINT_SW: + case GDB_BREAKPOINT_HW: + CPU_FOREACH(cpu) { + err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL); + if (err) { + break; + } + } + return err; + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + CPU_FOREACH(cpu) { + err = cpu_watchpoint_insert(cpu, addr, len, + xlat_gdb_type(cpu, type), NULL); + if (err) { + break; + } + } + return err; + default: + return -ENOSYS; + } +} + +static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + CPUState *cpu; + int err = 0; + + switch (type) { + case GDB_BREAKPOINT_SW: + case GDB_BREAKPOINT_HW: + CPU_FOREACH(cpu) { + err = cpu_breakpoint_remove(cpu, addr, BP_GDB); + if (err) { + break; + } + } + return err; + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + CPU_FOREACH(cpu) { + err = cpu_watchpoint_remove(cpu, addr, len, + xlat_gdb_type(cpu, type)); + if (err) { + break; + } + } + return err; + default: + return -ENOSYS; + } +} + +static inline void tcg_remove_all_breakpoints(CPUState *cpu) +{ + cpu_breakpoint_remove_all(cpu, BP_GDB); + cpu_watchpoint_remove_all(cpu, BP_GDB); +} + static void tcg_accel_ops_init(AccelOpsClass *ops) { if (qemu_tcg_mttcg_enabled()) { ops->create_vcpu_thread = mttcg_start_vcpu_thread; ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; ops->handle_interrupt = tcg_handle_interrupt; - } else if (icount_enabled()) { - ops->create_vcpu_thread = rr_start_vcpu_thread; - ops->kick_vcpu_thread = rr_kick_vcpu_thread; - ops->handle_interrupt = icount_handle_interrupt; - ops->get_virtual_clock = icount_get; - ops->get_elapsed_ticks = icount_get; } else { ops->create_vcpu_thread = rr_start_vcpu_thread; ops->kick_vcpu_thread = rr_kick_vcpu_thread; - ops->handle_interrupt = tcg_handle_interrupt; + + if (icount_enabled()) { + ops->handle_interrupt = icount_handle_interrupt; + ops->get_virtual_clock = icount_get; + ops->get_elapsed_ticks = icount_get; + } else { + ops->handle_interrupt = tcg_handle_interrupt; + } } + + ops->supports_guest_debug = tcg_supports_guest_debug; + ops->insert_breakpoint = tcg_insert_breakpoint; + ops->remove_breakpoint = tcg_remove_breakpoint; + ops->remove_all_breakpoints = tcg_remove_all_breakpoints; } static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h index 6a5fcef889..f9bc6330e2 100644 --- a/accel/tcg/tcg-accel-ops.h +++ b/accel/tcg/tcg-accel-ops.h @@ -9,8 +9,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef TCG_CPUS_H -#define TCG_CPUS_H +#ifndef TCG_ACCEL_OPS_H +#define TCG_ACCEL_OPS_H #include "sysemu/cpus.h" @@ -19,4 +19,4 @@ int tcg_cpus_exec(CPUState *cpu); void tcg_handle_interrupt(CPUState *cpu, int mask); void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); -#endif /* TCG_CPUS_H */ +#endif /* TCG_ACCEL_OPS_H */ diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index d6336a9c96..30b503fb22 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -24,8 +24,8 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "sysemu/tcg.h" +#include "sysemu/replay.h" #include "sysemu/cpu-timers.h" #include "tcg/tcg.h" #include "qapi/error.h" @@ -208,12 +208,28 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp) s->splitwx_enabled = value; } +static int tcg_gdbstub_supported_sstep_flags(void) +{ + /* + * In replay mode all events will come from the log and can't be + * suppressed otherwise we would break determinism. However as those + * events are tied to the number of executed instructions we won't see + * them occurring every time we single step. + */ + if (replay_mode != REPLAY_MODE_NONE) { + return SSTEP_ENABLE; + } else { + return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER; + } +} + static void tcg_accel_class_init(ObjectClass *oc, void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "tcg"; ac->init_machine = tcg_init_machine; ac->allowed = &tcg_allowed; + ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; object_class_property_add_str(oc, "thread", tcg_get_thread, diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index ef6b3d019e..058dafca0a 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #define NO_CPU_IO_DEFS #include "trace.h" @@ -47,78 +46,24 @@ #include "exec/cputlb.h" #include "exec/translate-all.h" +#include "exec/translator.h" #include "qemu/bitmap.h" #include "qemu/qemu-print.h" #include "qemu/timer.h" #include "qemu/main-loop.h" +#include "qemu/cacheinfo.h" #include "exec/log.h" #include "sysemu/cpus.h" #include "sysemu/cpu-timers.h" #include "sysemu/tcg.h" #include "qapi/error.h" #include "hw/core/tcg-cpu-ops.h" +#include "tb-jmp-cache.h" #include "tb-hash.h" #include "tb-context.h" #include "internal.h" -/* #define DEBUG_TB_INVALIDATE */ -/* #define DEBUG_TB_FLUSH */ /* make various TB consistency checks */ -/* #define DEBUG_TB_CHECK */ - -#ifdef DEBUG_TB_INVALIDATE -#define DEBUG_TB_INVALIDATE_GATE 1 -#else -#define DEBUG_TB_INVALIDATE_GATE 0 -#endif - -#ifdef DEBUG_TB_FLUSH -#define DEBUG_TB_FLUSH_GATE 1 -#else -#define DEBUG_TB_FLUSH_GATE 0 -#endif - -#if !defined(CONFIG_USER_ONLY) -/* TB consistency checks only implemented for usermode emulation. */ -#undef DEBUG_TB_CHECK -#endif - -#ifdef DEBUG_TB_CHECK -#define DEBUG_TB_CHECK_GATE 1 -#else -#define DEBUG_TB_CHECK_GATE 0 -#endif - -/* Access to the various translations structures need to be serialised via locks - * for consistency. - * In user-mode emulation access to the memory related structures are protected - * with mmap_lock. - * In !user-mode we use per-page locks. - */ -#ifdef CONFIG_SOFTMMU -#define assert_memory_lock() -#else -#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) -#endif - -#define SMC_BITMAP_USE_THRESHOLD 10 - -typedef struct PageDesc { - /* list of TBs intersecting this ram page */ - uintptr_t first_tb; -#ifdef CONFIG_SOFTMMU - /* in order to optimize self modifying code, we count the number - of lookups we do to a given page to use a bitmap */ - unsigned long *code_bitmap; - unsigned int code_write_count; -#else - unsigned long flags; - void *target_data; -#endif -#ifndef CONFIG_USER_ONLY - QemuSpin lock; -#endif -} PageDesc; /** * struct page_entry - page descriptor entry @@ -164,18 +109,6 @@ struct page_collection { struct page_entry *max; }; -/* list iterators for lists of tagged pointers in TranslationBlock */ -#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ - for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ - tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ - tb = (TranslationBlock *)((uintptr_t)tb & ~1)) - -#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ - TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) - -#define TB_FOR_EACH_JMP(head_tb, tb, n) \ - TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) - /* * In system mode we want L1_MAP to be based on ram offsets, * while in user mode we want it to be based on virtual addresses. @@ -193,10 +126,6 @@ struct page_collection { # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) #endif -/* Size of the L2 (and L3, etc) page tables. */ -#define V_L2_BITS 10 -#define V_L2_SIZE (1 << V_L2_BITS) - /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > sizeof_field(TranslationBlock, trace_vcpu_dstate) @@ -205,18 +134,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > /* * L1 Mapping properties */ -static int v_l1_size; -static int v_l1_shift; -static int v_l2_levels; +int v_l1_size; +int v_l1_shift; +int v_l2_levels; -/* The bottom level has pointers to PageDesc, and is indexed by - * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. - */ -#define V_L1_MIN_BITS 4 -#define V_L1_MAX_BITS (V_L2_BITS + 3) -#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) - -static void *l1_map[V_L1_MAX_SIZE]; +void *l1_map[V_L1_MAX_SIZE]; TBContext tb_ctx; @@ -304,7 +226,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { if (i == 0) { - prev = (j == 0 ? tb->pc : 0); + prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0); } else { prev = tcg_ctx->gen_insn_data[i - 1][j]; } @@ -325,60 +247,77 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) return p - block; } -/* The cpu state corresponding to 'searched_pc' is restored. - * When reset_icount is true, current TB will be interrupted and - * icount should be recalculated. - */ -static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, - uintptr_t searched_pc, bool reset_icount) +static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, + uint64_t *data) { - target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; - uintptr_t host_pc = (uintptr_t)tb->tc.ptr; - CPUArchState *env = cpu->env_ptr; + uintptr_t iter_pc = (uintptr_t)tb->tc.ptr; const uint8_t *p = tb->tc.ptr + tb->tc.size; int i, j, num_insns = tb->icount; -#ifdef CONFIG_PROFILER - TCGProfile *prof = &tcg_ctx->prof; - int64_t ti = profile_getclock(); -#endif - searched_pc -= GETPC_ADJ; + host_pc -= GETPC_ADJ; - if (searched_pc < host_pc) { + if (host_pc < iter_pc) { return -1; } - /* Reconstruct the stored insn data while looking for the point at - which the end of the insn exceeds the searched_pc. */ + memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); + if (!TARGET_TB_PCREL) { + data[0] = tb_pc(tb); + } + + /* + * Reconstruct the stored insn data while looking for the point + * at which the end of the insn exceeds host_pc. + */ for (i = 0; i < num_insns; ++i) { for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { data[j] += decode_sleb128(&p); } - host_pc += decode_sleb128(&p); - if (host_pc > searched_pc) { - goto found; + iter_pc += decode_sleb128(&p); + if (iter_pc > host_pc) { + return num_insns - i; } } return -1; +} - found: - if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { - assert(icount_enabled()); - /* Reset the cycle counter to the start of the block - and shift if to the number of actually executed instructions */ - cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; +/* + * The cpu state corresponding to 'host_pc' is restored in + * preparation for exiting the TB. + */ +void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t host_pc) +{ + uint64_t data[TARGET_INSN_START_WORDS]; +#ifdef CONFIG_PROFILER + TCGProfile *prof = &tcg_ctx->prof; + int64_t ti = profile_getclock(); +#endif + int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); + + if (insns_left < 0) { + return; } - restore_state_to_opc(env, tb, data); + + if (tb_cflags(tb) & CF_USE_ICOUNT) { + assert(icount_enabled()); + /* + * Reset the cycle counter to the start of the block and + * shift if to the number of actually executed instructions. + */ + cpu_neg(cpu)->icount_decr.u16.low += insns_left; + } + + cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); #ifdef CONFIG_PROFILER qatomic_set(&prof->restore_time, prof->restore_time + profile_getclock() - ti); qatomic_set(&prof->restore_count, prof->restore_count + 1); #endif - return 0; } -bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) { /* * The host_pc has to be in the rx region of the code buffer. @@ -393,13 +332,24 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { TranslationBlock *tb = tcg_tb_lookup(host_pc); if (tb) { - cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); + cpu_restore_state_from_tb(cpu, tb, host_pc); return true; } } return false; } +bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data) +{ + if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { + TranslationBlock *tb = tcg_tb_lookup(host_pc); + if (tb) { + return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0; + } + } + return false; +} + void page_init(void) { page_size_init(); @@ -471,7 +421,7 @@ void page_init(void) #endif } -static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) +PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) { PageDesc *pd; void **lp; @@ -537,31 +487,8 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) return pd + (index & (V_L2_SIZE - 1)); } -static inline PageDesc *page_find(tb_page_addr_t index) -{ - return page_find_alloc(index, 0); -} - -static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, - PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); - /* In user-mode page locks aren't used; mmap_lock is enough */ #ifdef CONFIG_USER_ONLY - -#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) - -static inline void page_lock(PageDesc *pd) -{ } - -static inline void page_unlock(PageDesc *pd) -{ } - -static inline void page_lock_tb(const TranslationBlock *tb) -{ } - -static inline void page_unlock_tb(const TranslationBlock *tb) -{ } - struct page_collection * page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) { @@ -610,8 +537,7 @@ static void page_unlock__debug(const PageDesc *pd) g_assert(removed); } -static void -do_assert_page_locked(const PageDesc *pd, const char *file, int line) +void do_assert_page_locked(const PageDesc *pd, const char *file, int line) { if (unlikely(!page_is_locked(pd))) { error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", @@ -620,8 +546,6 @@ do_assert_page_locked(const PageDesc *pd, const char *file, int line) } } -#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) - void assert_no_pages_locked(void) { ht_pages_locked_debug_init(); @@ -630,50 +554,23 @@ void assert_no_pages_locked(void) #else /* !CONFIG_DEBUG_TCG */ -#define assert_page_locked(pd) - -static inline void page_lock__debug(const PageDesc *pd) -{ -} - -static inline void page_unlock__debug(const PageDesc *pd) -{ -} +static inline void page_lock__debug(const PageDesc *pd) { } +static inline void page_unlock__debug(const PageDesc *pd) { } #endif /* CONFIG_DEBUG_TCG */ -static inline void page_lock(PageDesc *pd) +void page_lock(PageDesc *pd) { page_lock__debug(pd); qemu_spin_lock(&pd->lock); } -static inline void page_unlock(PageDesc *pd) +void page_unlock(PageDesc *pd) { qemu_spin_unlock(&pd->lock); page_unlock__debug(pd); } -/* lock the page(s) of a TB in the correct acquisition order */ -static inline void page_lock_tb(const TranslationBlock *tb) -{ - page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); -} - -static inline void page_unlock_tb(const TranslationBlock *tb) -{ - PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); - - page_unlock(p1); - if (unlikely(tb->page_addr[1] != -1)) { - PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); - - if (p2 != p1) { - page_unlock(p2); - } - } -} - static inline struct page_entry * page_entry_new(PageDesc *pd, tb_page_addr_t index) { @@ -824,9 +721,9 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) } assert_page_locked(pd); PAGE_FOR_EACH_TB(pd, tb, n) { - if (page_trylock_add(set, tb->page_addr[0]) || - (tb->page_addr[1] != -1 && - page_trylock_add(set, tb->page_addr[1]))) { + if (page_trylock_add(set, tb_page_addr0(tb)) || + (tb_page_addr1(tb) != -1 && + page_trylock_add(set, tb_page_addr1(tb)))) { /* drop all locks, and reacquire in order */ g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; @@ -845,572 +742,35 @@ void page_collection_unlock(struct page_collection *set) #endif /* !CONFIG_USER_ONLY */ -static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, - PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) -{ - PageDesc *p1, *p2; - tb_page_addr_t page1; - tb_page_addr_t page2; - - assert_memory_lock(); - g_assert(phys1 != -1); - - page1 = phys1 >> TARGET_PAGE_BITS; - page2 = phys2 >> TARGET_PAGE_BITS; - - p1 = page_find_alloc(page1, alloc); - if (ret_p1) { - *ret_p1 = p1; - } - if (likely(phys2 == -1)) { - page_lock(p1); - return; - } else if (page1 == page2) { - page_lock(p1); - if (ret_p2) { - *ret_p2 = p1; - } - return; - } - p2 = page_find_alloc(page2, alloc); - if (ret_p2) { - *ret_p2 = p2; - } - if (page1 < page2) { - page_lock(p1); - page_lock(p2); - } else { - page_lock(p2); - page_lock(p1); - } -} - -static bool tb_cmp(const void *ap, const void *bp) -{ - const TranslationBlock *a = ap; - const TranslationBlock *b = bp; - - return a->pc == b->pc && - a->cs_base == b->cs_base && - a->flags == b->flags && - (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && - a->trace_vcpu_dstate == b->trace_vcpu_dstate && - a->page_addr[0] == b->page_addr[0] && - a->page_addr[1] == b->page_addr[1]; -} - -static bool inv_tb_cmp(const void *ap, const void *bp) -{ - const TranslationBlock *a = ap, *b = bp; - return tb_cmp(ap, bp) && a->ihash == b->ihash; -} - -void tb_htable_init(void) -{ - unsigned int mode = QHT_MODE_AUTO_RESIZE; - - qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); - qht_init(&tb_ctx.inv_htable, inv_tb_cmp, CODE_GEN_HTABLE_SIZE, mode); -} - -/* call with @p->lock held */ -static inline void invalidate_page_bitmap(PageDesc *p) -{ - assert_page_locked(p); -#ifdef CONFIG_SOFTMMU - g_free(p->code_bitmap); - p->code_bitmap = NULL; - p->code_write_count = 0; -#endif -} - -/* Set to NULL all the 'first_tb' fields in all PageDescs. */ -static void page_flush_tb_1(int level, void **lp) -{ - int i; - - if (*lp == NULL) { - return; - } - if (level == 0) { - PageDesc *pd = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - page_lock(&pd[i]); - pd[i].first_tb = (uintptr_t)NULL; - invalidate_page_bitmap(pd + i); - page_unlock(&pd[i]); - } - } else { - void **pp = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - page_flush_tb_1(level - 1, pp + i); - } - } -} - -static void page_flush_tb(void) -{ - int i, l1_sz = v_l1_size; - - for (i = 0; i < l1_sz; i++) { - page_flush_tb_1(v_l2_levels, l1_map + i); - } -} - -static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) -{ - const TranslationBlock *tb = value; - size_t *size = data; - - *size += tb->tc.size; - return false; -} - -/* flush all the translation blocks */ -static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) -{ - bool did_flush = false; - - mmap_lock(); - /* If it is already been done on request of another CPU, - * just retry. - */ - if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { - goto done; - } - did_flush = true; - - if (DEBUG_TB_FLUSH_GATE) { - size_t nb_tbs = tcg_nb_tbs(); - size_t host_size = 0; - - tcg_tb_foreach(tb_host_size_iter, &host_size); - printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", - tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); - } - - CPU_FOREACH(cpu) { - cpu_tb_jmp_cache_clear(cpu); - } - - qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); - qht_reset_size(&tb_ctx.inv_htable, CODE_GEN_HTABLE_SIZE); - - page_flush_tb(); - - tcg_region_reset_all(); - /* XXX: flush processor icache at this point if cache flush is - expensive */ - qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); - -done: - mmap_unlock(); - if (did_flush) { - qemu_plugin_flush_cb(); - } -} - -void tb_flush(CPUState *cpu) -{ - if (tcg_enabled()) { - unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); - - if (cpu_in_exclusive_context(cpu)) { - do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); - } else { - async_safe_run_on_cpu(cpu, do_tb_flush, - RUN_ON_CPU_HOST_INT(tb_flush_count)); - } - } -} - /* - * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, - * so in order to prevent bit rot we compile them unconditionally in user-mode, - * and let the optimizer get rid of them by wrapping their user-only callers - * with if (DEBUG_TB_CHECK_GATE). + * Isolate the portion of code gen which can setjmp/longjmp. + * Return the size of the generated code, or negative on error. */ -#ifdef CONFIG_USER_ONLY - -static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) +static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, + target_ulong pc, void *host_pc, + int *max_insns, int64_t *ti) { - TranslationBlock *tb = p; - target_ulong addr = *(target_ulong *)userp; - - if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { - printf("ERROR invalidate: address=" TARGET_FMT_lx - " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); - } -} - -/* verify that all the pages have correct rights for code - * - * Called with mmap_lock held. - */ -static void tb_invalidate_check(target_ulong address) -{ - address &= TARGET_PAGE_MASK; - qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); -} - -static void do_tb_page_check(void *p, uint32_t hash, void *userp) -{ - TranslationBlock *tb = p; - int flags1, flags2; - - flags1 = page_get_flags(tb->pc); - flags2 = page_get_flags(tb->pc + tb->size - 1); - if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { - printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", - (long)tb->pc, tb->size, flags1, flags2); - } -} - -/* verify that all the pages have correct rights for code */ -static void tb_page_check(void) -{ - qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); -} - -#endif /* CONFIG_USER_ONLY */ - -/* - * user-mode: call with mmap_lock held - * !user-mode: call with @pd->lock held - */ -static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) -{ - TranslationBlock *tb1; - uintptr_t *pprev; - unsigned int n1; - - assert_page_locked(pd); - pprev = &pd->first_tb; - PAGE_FOR_EACH_TB(pd, tb1, n1) { - if (tb1 == tb) { - *pprev = tb1->page_next[n1]; - return; - } - pprev = &tb1->page_next[n1]; - } - g_assert_not_reached(); -} - -/* remove @orig from its @n_orig-th jump list */ -static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) -{ - uintptr_t ptr, ptr_locked; - TranslationBlock *dest; - TranslationBlock *tb; - uintptr_t *pprev; - int n; - - /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ - ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); - dest = (TranslationBlock *)(ptr & ~1); - if (dest == NULL) { - return; + int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); + if (unlikely(ret != 0)) { + return ret; } - qemu_spin_lock(&dest->jmp_lock); - /* - * While acquiring the lock, the jump might have been removed if the - * destination TB was invalidated; check again. - */ - ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); - if (ptr_locked != ptr) { - qemu_spin_unlock(&dest->jmp_lock); - /* - * The only possibility is that the jump was unlinked via - * tb_jump_unlink(dest). Seeing here another destination would be a bug, - * because we set the LSB above. - */ - g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); - return; - } - /* - * We first acquired the lock, and since the destination pointer matches, - * we know for sure that @orig is in the jmp list. - */ - pprev = &dest->jmp_list_head; - TB_FOR_EACH_JMP(dest, tb, n) { - if (tb == orig && n == n_orig) { - *pprev = tb->jmp_list_next[n]; - /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ - qemu_spin_unlock(&dest->jmp_lock); - return; - } - pprev = &tb->jmp_list_next[n]; - } - g_assert_not_reached(); -} + tcg_func_start(tcg_ctx); -/* reset the jump entry 'n' of a TB so that it is not chained to - another TB */ -static inline void tb_reset_jump(TranslationBlock *tb, int n) -{ - uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); - tb_set_jmp_target(tb, n, addr); -} + tcg_ctx->cpu = env_cpu(env); + gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc); + assert(tb->size != 0); + tcg_ctx->cpu = NULL; + *max_insns = tb->icount; -/* remove any jumps to the TB */ -static inline void tb_jmp_unlink(TranslationBlock *dest) -{ - TranslationBlock *tb; - int n; - - qemu_spin_lock(&dest->jmp_lock); - - TB_FOR_EACH_JMP(dest, tb, n) { - tb_reset_jump(tb, n); - qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); - /* No need to clear the list entry; setting the dest ptr is enough */ - } - dest->jmp_list_head = (uintptr_t)NULL; - - qemu_spin_unlock(&dest->jmp_lock); -} - -/* - * In user-mode, call with mmap_lock held. - * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' - * locks held. - */ -static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) -{ - CPUState *cpu; - PageDesc *p; - uint32_t h; - tb_page_addr_t phys_pc; - uint32_t orig_cflags = tb_cflags(tb); - void *existing = NULL; - - assert_memory_lock(); - - /* make sure no further incoming jumps will be chained to this TB */ - qemu_spin_lock(&tb->jmp_lock); - qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); - qemu_spin_unlock(&tb->jmp_lock); - - /* remove the TB from the hash list */ - phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, - tb->trace_vcpu_dstate); - if (!qht_remove(&tb_ctx.htable, tb, h)) { - return; - } - - qht_insert(&tb_ctx.inv_htable, tb, h, &existing); - g_assert(existing == NULL); - - /* remove the TB from the page list */ - if (rm_from_page_list) { - p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); - tb_page_remove(p, tb); - invalidate_page_bitmap(p); - if (tb->page_addr[1] != -1) { - p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); - tb_page_remove(p, tb); - invalidate_page_bitmap(p); - } - } - - /* remove the TB from the hash list */ - h = tb_jmp_cache_hash_func(tb->pc); - CPU_FOREACH(cpu) { - if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { - qatomic_set(&cpu->tb_jmp_cache[h], NULL); - } - } - - /* suppress this TB from the two jump lists */ - tb_remove_from_jmp_list(tb, 0); - tb_remove_from_jmp_list(tb, 1); - - /* suppress any remaining jumps to this TB */ - tb_jmp_unlink(tb); - - qatomic_set(&tb_ctx.tb_phys_invalidate_count, - tb_ctx.tb_phys_invalidate_count + 1); -} - -static void tb_phys_invalidate__locked(TranslationBlock *tb) -{ - qemu_thread_jit_write(); - do_tb_phys_invalidate(tb, true); - qemu_thread_jit_execute(); -} - -/* invalidate one TB - * - * Called with mmap_lock held in user-mode. - */ -void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) -{ - if (page_addr == -1 && tb->page_addr[0] != -1) { - page_lock_tb(tb); - do_tb_phys_invalidate(tb, true); - page_unlock_tb(tb); - } else { - do_tb_phys_invalidate(tb, false); - } -} - -#ifdef CONFIG_SOFTMMU -/* call with @p->lock held */ -static void build_page_bitmap(PageDesc *p) -{ - int n, tb_start, tb_end; - TranslationBlock *tb; - - assert_page_locked(p); - p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); - - PAGE_FOR_EACH_TB(p, tb, n) { - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->pc & ~TARGET_PAGE_MASK; - tb_end = tb_start + tb->size; - if (tb_end > TARGET_PAGE_SIZE) { - tb_end = TARGET_PAGE_SIZE; - } - } else { - tb_start = 0; - tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); - } -} +#ifdef CONFIG_PROFILER + qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1); + qatomic_set(&tcg_ctx->prof.interm_time, + tcg_ctx->prof.interm_time + profile_getclock() - *ti); + *ti = profile_getclock(); #endif -/* add the tb in the target page and protect it if necessary - * - * Called with mmap_lock held for user-mode emulation. - * Called with @p->lock held in !user-mode. - */ -static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, - unsigned int n, tb_page_addr_t page_addr) -{ -#ifndef CONFIG_USER_ONLY - bool page_already_protected; -#endif - - assert_page_locked(p); - - tb->page_addr[n] = page_addr; - tb->page_next[n] = p->first_tb; -#ifndef CONFIG_USER_ONLY - page_already_protected = p->first_tb != (uintptr_t)NULL; -#endif - p->first_tb = (uintptr_t)tb | n; - invalidate_page_bitmap(p); - -#if defined(CONFIG_USER_ONLY) - if (p->flags & PAGE_WRITE) { - target_ulong addr; - PageDesc *p2; - int prot; - - /* force the host page as non writable (writes will have a - page fault + mprotect overhead) */ - page_addr &= qemu_host_page_mask; - prot = 0; - for (addr = page_addr; addr < page_addr + qemu_host_page_size; - addr += TARGET_PAGE_SIZE) { - - p2 = page_find(addr >> TARGET_PAGE_BITS); - if (!p2) { - continue; - } - prot |= p2->flags; - p2->flags &= ~PAGE_WRITE; - } - mprotect(g2h_untagged(page_addr), qemu_host_page_size, - (prot & PAGE_BITS) & ~PAGE_WRITE); - if (DEBUG_TB_INVALIDATE_GATE) { - printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); - } - } -#else - /* if some code is already present, then the pages are already - protected. So we handle the case where only the first TB is - allocated in a physical page */ - if (!page_already_protected) { - tlb_protect_code(page_addr); - } -#endif -} - -/* - * Add a new TB and link it to the physical page tables. phys_page2 is - * (-1) to indicate that only one page contains the TB. - * - * Called with mmap_lock held for user-mode emulation. - * - * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. - * Note that in !user-mode, another thread might have already added a TB - * for the same block of guest code that @tb corresponds to. In that case, - * the caller should discard the original @tb, and use instead the returned TB. - */ -static TranslationBlock * -tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, - tb_page_addr_t phys_page2) -{ - PageDesc *p; - PageDesc *p2 = NULL; - void *existing_tb = NULL; - uint32_t h; - - assert_memory_lock(); - tcg_debug_assert(!(tb->cflags & CF_INVALID)); - - /* - * Add the TB to the page list, acquiring first the pages's locks. - * We keep the locks held until after inserting the TB in the hash table, - * so that if the insertion fails we know for sure that the TBs are still - * in the page descriptors. - * Note that inserting into the hash table first isn't an option, since - * we can only insert TBs that are fully initialized. - */ - page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); - tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); - if (p2) { - tb_page_add(p2, tb, 1, phys_page2); - } else { - tb->page_addr[1] = -1; - } - - /* add in the hash table */ - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, - tb->trace_vcpu_dstate); - qht_insert(&tb_ctx.htable, tb, h, &existing_tb); - - /* remove TB from the page(s) if we couldn't insert it */ - if (unlikely(existing_tb)) { - tb_page_remove(p, tb); - invalidate_page_bitmap(p); - if (p2) { - tb_page_remove(p2, tb); - invalidate_page_bitmap(p2); - } - tb = existing_tb; - } - - if (p2 && p2 != p) { - page_unlock(p2); - } - page_unlock(p); - -#ifdef CONFIG_USER_ONLY - if (DEBUG_TB_CHECK_GATE) { - tb_page_check(); - } -#endif - return tb; + return tcg_gen_code(tcg_ctx, tb, pc); } /* Called with mmap_lock held for user mode emulation. */ @@ -1420,19 +780,20 @@ TranslationBlock *tb_gen_code(CPUState *cpu, { CPUArchState *env = cpu->env_ptr; TranslationBlock *tb, *existing_tb; - tb_page_addr_t phys_pc, phys_page2; - target_ulong virt_page2; + tb_page_addr_t phys_pc; tcg_insn_unit *gen_code_buf; int gen_code_size, search_size, max_insns; #ifdef CONFIG_PROFILER TCGProfile *prof = &tcg_ctx->prof; - int64_t ti; #endif + int64_t ti; + void *host_pc; + bool recycled = false; assert_memory_lock(); qemu_thread_jit_write(); - phys_pc = get_page_addr_code(env, pc); + phys_pc = get_page_addr_code_hostp(env, pc, &host_pc); if (phys_pc == -1) { /* Generate a one-shot TB with 1 insn in it */ @@ -1450,10 +811,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu, qemu_spin_lock(&tb->jmp_lock); qatomic_set(&tb->cflags, tb->cflags & ~CF_INVALID); qemu_spin_unlock(&tb->jmp_lock); - uint32_t h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb), + uint32_t h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + tb->flags, tb_cflags(tb), tb->trace_vcpu_dstate); bool removed = qht_remove(&tb_ctx.inv_htable, tb, h); g_assert(removed); + recycled = true; goto recycle_tb; } @@ -1470,11 +833,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu, gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); +#if !TARGET_TB_PCREL tb->pc = pc; +#endif tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; tb->trace_vcpu_dstate = *cpu->trace_dstate; + tb_set_page_addr0(tb, phys_pc); + tb_set_page_addr1(tb, -1); tcg_ctx->tb_cflags = cflags; tb_overflow: @@ -1484,43 +851,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu, ti = profile_getclock(); #endif - gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); - if (unlikely(gen_code_size != 0)) { - goto error_return; - } + trace_translate_block(tb, pc, tb->tc.ptr); - tcg_func_start(tcg_ctx); - - tcg_ctx->cpu = env_cpu(env); - gen_intermediate_code(cpu, tb, max_insns); - assert(tb->size != 0); - tcg_ctx->cpu = NULL; - max_insns = tb->icount; - - trace_translate_block(tb, tb->pc, tb->tc.ptr); - - /* generate machine code */ - tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; - tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; - tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; - if (TCG_TARGET_HAS_direct_jump) { - tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; - tcg_ctx->tb_jmp_target_addr = NULL; - } else { - tcg_ctx->tb_jmp_insn_offset = NULL; - tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; - } - -#ifdef CONFIG_PROFILER - qatomic_set(&prof->tb_count, prof->tb_count + 1); - qatomic_set(&prof->interm_time, - prof->interm_time + profile_getclock() - ti); - ti = profile_getclock(); -#endif - - gen_code_size = tcg_gen_code(tcg_ctx, tb); + gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); if (unlikely(gen_code_size < 0)) { - error_return: switch (gen_code_size) { case -1: /* @@ -1574,70 +908,76 @@ TranslationBlock *tb_gen_code(CPUState *cpu, #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && - qemu_log_in_addr_range(tb->pc)) { - FILE *logfile = qemu_log_lock(); - int code_size, data_size; - const tcg_target_ulong *rx_data_gen_ptr; - size_t chunk_start; - int insn = 0; + qemu_log_in_addr_range(pc)) { + FILE *logfile = qemu_log_trylock(); + if (logfile) { + int code_size, data_size; + const tcg_target_ulong *rx_data_gen_ptr; + size_t chunk_start; + int insn = 0; - if (tcg_ctx->data_gen_ptr) { - rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); - code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; - data_size = gen_code_size - code_size; - } else { - rx_data_gen_ptr = 0; - code_size = gen_code_size; - data_size = 0; - } - - /* Dump header and the first instruction */ - qemu_log("OUT: [size=%d]\n", gen_code_size); - qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", - tcg_ctx->gen_insn_data[insn][0]); - chunk_start = tcg_ctx->gen_insn_end_off[insn]; - log_disas(tb->tc.ptr, chunk_start); - - /* - * Dump each instruction chunk, wrapping up empty chunks into - * the next instruction. The whole array is offset so the - * first entry is the beginning of the 2nd instruction. - */ - while (insn < tb->icount) { - size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; - if (chunk_end > chunk_start) { - qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", - tcg_ctx->gen_insn_data[insn][0]); - log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); - chunk_start = chunk_end; + if (tcg_ctx->data_gen_ptr) { + rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); + code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; + data_size = gen_code_size - code_size; + } else { + rx_data_gen_ptr = 0; + code_size = gen_code_size; + data_size = 0; } - insn++; - } - if (chunk_start < code_size) { - qemu_log(" -- tb slow paths + alignment\n"); - log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); - } + /* Dump header and the first instruction */ + fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); + fprintf(logfile, + " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", + tcg_ctx->gen_insn_data[insn][0]); + chunk_start = tcg_ctx->gen_insn_end_off[insn]; + disas(logfile, tb->tc.ptr, chunk_start); - /* Finally dump any data we may have after the block */ - if (data_size) { - int i; - qemu_log(" data: [size=%d]\n", data_size); - for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { - if (sizeof(tcg_target_ulong) == 8) { - qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", - (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); - } else if (sizeof(tcg_target_ulong) == 4) { - qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", - (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); - } else { - qemu_build_not_reached(); + /* + * Dump each instruction chunk, wrapping up empty chunks into + * the next instruction. The whole array is offset so the + * first entry is the beginning of the 2nd instruction. + */ + while (insn < tb->icount) { + size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; + if (chunk_end > chunk_start) { + fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n", + tcg_ctx->gen_insn_data[insn][0]); + disas(logfile, tb->tc.ptr + chunk_start, + chunk_end - chunk_start); + chunk_start = chunk_end; + } + insn++; + } + + if (chunk_start < code_size) { + fprintf(logfile, " -- tb slow paths + alignment\n"); + disas(logfile, tb->tc.ptr + chunk_start, + code_size - chunk_start); + } + + /* Finally dump any data we may have after the block */ + if (data_size) { + int i; + fprintf(logfile, " data: [size=%d]\n", data_size); + for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { + if (sizeof(tcg_target_ulong) == 8) { + fprintf(logfile, + "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else if (sizeof(tcg_target_ulong) == 4) { + fprintf(logfile, + "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else { + qemu_build_not_reached(); + } } } + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); } - qemu_log("\n"); - qemu_log_flush(); - qemu_log_unlock(logfile); } #endif @@ -1664,13 +1004,11 @@ recycle_tb: } /* - * If the TB is not associated with a physical RAM page then - * it must be a temporary one-insn TB, and we have nothing to do - * except fill in the page_addr[] fields. Return early before - * attempting to link to other TBs or add to the lookup table. + * If the TB is not associated with a physical RAM page then it must be + * a temporary one-insn TB, and we have nothing left to do. Return early + * before attempting to link to other TBs or add to the lookup table. */ - if (phys_pc == -1) { - tb->page_addr[0] = tb->page_addr[1] = -1; + if (tb_page_addr0(tb) == -1) { return tb; } @@ -1681,296 +1019,25 @@ recycle_tb: */ tcg_tb_insert(tb); - /* check next page if needed */ - virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; - phys_page2 = -1; - if ((pc & TARGET_PAGE_MASK) != virt_page2) { - phys_page2 = get_page_addr_code(env, virt_page2); - } /* * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. */ - existing_tb = tb_link_page(tb, phys_pc, phys_page2); + existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb)); /* if the TB already exists, discard what we just translated */ if (unlikely(existing_tb != tb)) { - uintptr_t orig_aligned = (uintptr_t)gen_code_buf; + if (!recycled) { + uintptr_t orig_aligned = (uintptr_t)gen_code_buf; - orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); - qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); + orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); + qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); + } tcg_tb_remove(tb); return existing_tb; } return tb; } -/* - * @p must be non-NULL. - * user-mode: call with mmap_lock held. - * !user-mode: call with all @pages locked. - */ -static void -tb_invalidate_phys_page_range__locked(struct page_collection *pages, - PageDesc *p, tb_page_addr_t start, - tb_page_addr_t end, - uintptr_t retaddr) -{ - TranslationBlock *tb; -#ifndef XBOX - tb_page_addr_t tb_start, tb_end; -#endif - int n; -#ifdef TARGET_HAS_PRECISE_SMC - CPUState *cpu = current_cpu; - CPUArchState *env = NULL; - bool current_tb_not_found = retaddr != 0; - bool current_tb_modified = false; - TranslationBlock *current_tb = NULL; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - uint32_t current_flags = 0; -#endif /* TARGET_HAS_PRECISE_SMC */ - - assert_page_locked(p); - -#if defined(TARGET_HAS_PRECISE_SMC) - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - - /* we remove all the TBs in the range [start, end[ */ - /* XXX: see if in some cases it could be faster to invalidate all - the code */ - PAGE_FOR_EACH_TB(p, tb, n) { - assert_page_locked(p); -#ifndef XBOX - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - tb_end = tb_start + tb->size; - } else { - tb_start = tb->page_addr[1]; - tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - if (!(tb_end <= start || tb_start >= end)) { -#else - { -#endif -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_not_found) { - current_tb_not_found = false; - /* now we have a real cpu fault */ - current_tb = tcg_tb_lookup(retaddr); - } - if (current_tb == tb && - (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { - /* - * If we are modifying the current TB, we must stop - * its execution. We could be more precise by checking - * that the modification is after the current PC, but it - * would require a specialized function to partially - * restore the CPU state. - */ - current_tb_modified = true; - cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate__locked(tb); - } - } -#if !defined(CONFIG_USER_ONLY) - /* if no code remaining, no need to continue to use slow writes */ - if (!p->first_tb) { - invalidate_page_bitmap(p); - tlb_unprotect_code(start); - } -#endif -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - page_collection_unlock(pages); - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | curr_cflags(cpu); - mmap_unlock(); - cpu_loop_exit_noexc(cpu); - } -#endif -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end must refer to the *same* physical page. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - * - * Called with mmap_lock held for user-mode emulation - */ -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) -{ - struct page_collection *pages; - PageDesc *p; - - assert_memory_lock(); - - p = page_find(start >> TARGET_PAGE_BITS); - if (p == NULL) { - return; - } - pages = page_collection_lock(start, end); - tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); - page_collection_unlock(pages); -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - * - * Called with mmap_lock held for user-mode emulation. - */ -#ifdef CONFIG_SOFTMMU -void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) -#else -void tb_invalidate_phys_range(target_ulong start, target_ulong end) -#endif -{ - struct page_collection *pages; - tb_page_addr_t next; - - assert_memory_lock(); - - pages = page_collection_lock(start, end); - for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - start < end; - start = next, next += TARGET_PAGE_SIZE) { - PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); - tb_page_addr_t bound = MIN(next, end); - - if (pd == NULL) { - continue; - } - tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); - } - page_collection_unlock(pages); -} - -#ifdef CONFIG_SOFTMMU -/* len must be <= 8 and start must be a multiple of len. - * Called via softmmu_template.h when code areas are written to with - * iothread mutex not held. - * - * Call with all @pages in the range [@start, @start + len[ locked. - */ -void tb_invalidate_phys_page_fast(struct page_collection *pages, - tb_page_addr_t start, int len, - uintptr_t retaddr) -{ - PageDesc *p; - - assert_memory_lock(); - - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - - assert_page_locked(p); - if (!p->code_bitmap && - ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { - build_page_bitmap(p); - } - if (p->code_bitmap) { - unsigned int nr; - unsigned long b; - - nr = start & ~TARGET_PAGE_MASK; - b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); - if (b & ((1 << len) - 1)) { - goto do_invalidate; - } - } else { - do_invalidate: - tb_invalidate_phys_page_range__locked(pages, p, start, start + len, - retaddr); - } -} -#else -/* Called with mmap_lock held. If pc is not 0 then it indicates the - * host PC of the faulting store instruction that caused this invalidate. - * Returns true if the caller needs to abort execution of the current - * TB (because it was modified by this store and the guest CPU has - * precise-SMC semantics). - */ -static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) -{ - TranslationBlock *tb; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - TranslationBlock *current_tb = NULL; - CPUState *cpu = current_cpu; - CPUArchState *env = NULL; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - uint32_t current_flags = 0; -#endif - - assert_memory_lock(); - - addr &= TARGET_PAGE_MASK; - p = page_find(addr >> TARGET_PAGE_BITS); - if (!p) { - return false; - } - -#ifdef TARGET_HAS_PRECISE_SMC - if (p->first_tb && pc != 0) { - current_tb = tcg_tb_lookup(pc); - } - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - assert_page_locked(p); - PAGE_FOR_EACH_TB(p, tb, n) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb == tb && - (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - cpu_restore_state_from_tb(cpu, current_tb, pc, true); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate(tb, addr); - } - p->first_tb = (uintptr_t)NULL; -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | curr_cflags(cpu); - return true; - } -#endif - - return false; -} -#endif - /* user-mode: call with mmap_lock held */ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) { @@ -1981,7 +1048,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) tb = tcg_tb_lookup(retaddr); if (tb) { /* We can use retranslation to find the PC. */ - cpu_restore_state_from_tb(cpu, tb, retaddr, true); + cpu_restore_state_from_tb(cpu, tb, retaddr); tb_phys_invalidate(tb, -1); } else { /* The exception probably happened in a helper. The CPU state should @@ -2017,7 +1084,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", (void *)retaddr); } - cpu_restore_state_from_tb(cpu, tb, retaddr, true); + cpu_restore_state_from_tb(cpu, tb, retaddr); /* * Some guests must re-execute the branch when re-executing a delay @@ -2040,14 +1107,18 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) */ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, - "cpu_io_recompile: rewound execution of TB to " - TARGET_FMT_lx "\n", tb->pc); + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + target_ulong pc = log_pc(cpu, tb); + if (qemu_log_in_addr_range(pc)) { + qemu_log("cpu_io_recompile: rewound execution of TB to " + TARGET_FMT_lx "\n", pc); + } + } cpu_loop_exit_noexc(cpu); } -static void print_qht_statistics(struct qht_stats hst) +static void print_qht_statistics(struct qht_stats hst, GString *buf) { uint32_t hgram_opts; size_t hgram_bins; @@ -2056,9 +1127,11 @@ static void print_qht_statistics(struct qht_stats hst) if (!hst.head_buckets) { return; } - qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", - hst.used_head_buckets, hst.head_buckets, - (double)hst.used_head_buckets / hst.head_buckets * 100); + g_string_append_printf(buf, "TB hash buckets %zu/%zu " + "(%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / + hst.head_buckets * 100); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; @@ -2066,8 +1139,9 @@ static void print_qht_statistics(struct qht_stats hst) hgram_opts |= QDIST_PR_NODECIMAL; } hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); - qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", - qdist_avg(&hst.occupancy) * 100, hgram); + g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " + "Histogram: %s\n", + qdist_avg(&hst.occupancy) * 100, hgram); g_free(hgram); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; @@ -2079,8 +1153,9 @@ static void print_qht_statistics(struct qht_stats hst) hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; } hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); - qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n", - qdist_avg(&hst.chain), hgram); + g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " + "Histogram: %s\n", + qdist_avg(&hst.chain), hgram); g_free(hgram); } @@ -2105,7 +1180,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) if (tb->size > tst->max_target_size) { tst->max_target_size = tb->size; } - if (tb->page_addr[1] != -1) { + if (tb_page_addr1(tb) != -1) { tst->cross_page++; } if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { @@ -2117,7 +1192,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) return false; } -void dump_exec_info(void) +void dump_exec_info(GString *buf) { struct tb_tree_stats tst = {}; struct qht_stats hst; @@ -2126,49 +1201,48 @@ void dump_exec_info(void) tcg_tb_foreach(tb_tree_stats_iter, &tst); nb_tbs = tst.nb_tbs; /* XXX: avoid using doubles ? */ - qemu_printf("Translation buffer state:\n"); + g_string_append_printf(buf, "Translation buffer state:\n"); /* * Report total code size including the padding and TB structs; * otherwise users might think "-accel tcg,tb-size" is not honoured. * For avg host size we use the precise numbers from tb_tree_stats though. */ - qemu_printf("gen code size %zu/%zu\n", - tcg_code_size(), tcg_code_capacity()); - qemu_printf("TB count %zu\n", nb_tbs); - qemu_printf("TB avg target size %zu max=%zu bytes\n", - nb_tbs ? tst.target_size / nb_tbs : 0, - tst.max_target_size); - qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n", - nb_tbs ? tst.host_size / nb_tbs : 0, - tst.target_size ? (double)tst.host_size / tst.target_size : 0); - qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page, - nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); - qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", - tst.direct_jmp_count, - nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, - tst.direct_jmp2_count, - nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); + g_string_append_printf(buf, "gen code size %zu/%zu\n", + tcg_code_size(), tcg_code_capacity()); + g_string_append_printf(buf, "TB count %zu\n", nb_tbs); + g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", + nb_tbs ? tst.target_size / nb_tbs : 0, + tst.max_target_size); + g_string_append_printf(buf, "TB avg host size %zu bytes " + "(expansion ratio: %0.1f)\n", + nb_tbs ? tst.host_size / nb_tbs : 0, + tst.target_size ? + (double)tst.host_size / tst.target_size : 0); + g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", + tst.cross_page, + nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); + g_string_append_printf(buf, "direct jump count %zu (%zu%%) " + "(2 jumps=%zu %zu%%)\n", + tst.direct_jmp_count, + nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, + tst.direct_jmp2_count, + nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); qht_statistics_init(&tb_ctx.htable, &hst); - print_qht_statistics(hst); + print_qht_statistics(hst, buf); qht_statistics_destroy(&hst); - qemu_printf("\nStatistics:\n"); - qemu_printf("TB flush count %u\n", - qatomic_read(&tb_ctx.tb_flush_count)); - qemu_printf("TB invalidate count %u\n", - qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + g_string_append_printf(buf, "\nStatistics:\n"); + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); tlb_flush_counts(&flush_full, &flush_part, &flush_elide); - qemu_printf("TLB full flushes %zu\n", flush_full); - qemu_printf("TLB partial flushes %zu\n", flush_part); - qemu_printf("TLB elided flushes %zu\n", flush_elide); - tcg_dump_info(); -} - -void dump_opcount_info(void) -{ - tcg_dump_op_count(); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); + tcg_dump_info(buf); } #else /* CONFIG_USER_ONLY */ @@ -2303,13 +1377,22 @@ int page_get_flags(target_ulong address) return p->flags; } +/* + * Allow the target to decide if PAGE_TARGET_[12] may be reset. + * By default, they are not kept. + */ +#ifndef PAGE_TARGET_STICKY +#define PAGE_TARGET_STICKY 0 +#endif +#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) + /* Modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positioned automatically depending on PAGE_WRITE. The mmap_lock should already be held. */ void page_set_flags(target_ulong start, target_ulong end, int flags) { target_ulong addr, len; - bool reset_target_data; + bool reset, inval_tb = false; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates @@ -2326,50 +1409,34 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) if (flags & PAGE_WRITE) { flags |= PAGE_WRITE_ORG; } - reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + reset = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + if (reset) { + page_reset_target_data(start, end); + } flags &= ~PAGE_RESET; for (addr = start, len = end - start; len != 0; len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true); - /* If the write protection bit is set, then we invalidate - the code inside. */ - if (!(p->flags & PAGE_WRITE) && - (flags & PAGE_WRITE) && - p->first_tb) { - tb_invalidate_phys_page(addr, 0); - } - if (reset_target_data) { - g_free(p->target_data); - p->target_data = NULL; - p->flags = flags; - } else { - /* Using mprotect on a page does not change MAP_ANON. */ - p->flags = (p->flags & PAGE_ANON) | flags; + /* + * If the page was executable, but is reset, or is no longer + * executable, or has become writable, then invalidate any code. + */ + if ((p->flags & PAGE_EXEC) + && (reset || + !(flags & PAGE_EXEC) || + (flags & ~p->flags & PAGE_WRITE))) { + inval_tb = true; } + /* Using mprotect on a page does not change sticky bits. */ + p->flags = (reset ? 0 : p->flags & PAGE_STICKY) | flags; } -} -void *page_get_target_data(target_ulong address) -{ - PageDesc *p = page_find(address >> TARGET_PAGE_BITS); - return p ? p->target_data : NULL; -} - -void *page_alloc_target_data(target_ulong address, size_t size) -{ - PageDesc *p = page_find(address >> TARGET_PAGE_BITS); - void *ret = NULL; - - if (p->flags & PAGE_VALID) { - ret = p->target_data; - if (!ret) { - p->target_data = ret = g_malloc0(size); - } + if (inval_tb) { + tb_invalidate_phys_range(start, end); } - return ret; } int page_check_range(target_ulong start, target_ulong len, int flags) @@ -2427,6 +1494,35 @@ int page_check_range(target_ulong start, target_ulong len, int flags) return 0; } +void page_protect(tb_page_addr_t page_addr) +{ + target_ulong addr; + PageDesc *p; + int prot; + + p = page_find(page_addr >> TARGET_PAGE_BITS); + if (p && (p->flags & PAGE_WRITE)) { + /* + * Force the host page as non writable (writes will have a page fault + + * mprotect overhead). + */ + page_addr &= qemu_host_page_mask; + prot = 0; + for (addr = page_addr; addr < page_addr + qemu_host_page_size; + addr += TARGET_PAGE_SIZE) { + + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + continue; + } + prot |= p->flags; + p->flags &= ~PAGE_WRITE; + } + mprotect(g2h_untagged(page_addr), qemu_host_page_size, + (prot & PAGE_BITS) & ~PAGE_WRITE); + } +} + /* called from signal handler: invalidate the code and unprotect the * page. Return 0 if the fault was not handled, 1 if it was handled, * and 2 if it was handled but the caller must cause the TB to be @@ -2478,12 +1574,8 @@ int page_unprotect(target_ulong address, uintptr_t pc) /* and since the content will be modified, we must invalidate the corresponding translated code. */ - current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); -#ifdef CONFIG_USER_ONLY - if (DEBUG_TB_CHECK_GATE) { - tb_invalidate_check(addr); - } -#endif + current_tb_invalidated |= + tb_invalidate_phys_page_unwind(addr, pc); } mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, prot & PAGE_BITS); @@ -2497,6 +1589,24 @@ int page_unprotect(target_ulong address, uintptr_t pc) } #endif /* CONFIG_USER_ONLY */ +/* + * Called by generic code at e.g. cpu reset after cpu creation, + * therefore we must be prepared to allocate the jump cache. + */ +void tcg_flush_jmp_cache(CPUState *cpu) +{ + CPUJumpCache *jc = cpu->tb_jmp_cache; + + /* During early initialization, the cache may not yet be allocated. */ + if (unlikely(jc == NULL)) { + return; + } + + for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) { + qatomic_set(&jc->array[i].tb, NULL); + } +} + /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ void tcg_flush_softmmu_tlb(CPUState *cs) { diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 7b054719eb..3a04b02f62 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -43,20 +43,27 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; } -void translator_loop(const TranslatorOps *ops, DisasContextBase *db, - CPUState *cpu, TranslationBlock *tb, int max_insns) +void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc, + const TranslatorOps *ops, DisasContextBase *db) { uint32_t cflags = tb_cflags(tb); bool plugin_enabled; /* Initialize DisasContext */ db->tb = tb; - db->pc_first = tb->pc; - db->pc_next = db->pc_first; + db->pc_first = pc; + db->pc_next = pc; db->is_jmp = DISAS_NEXT; db->num_insns = 0; db->max_insns = max_insns; db->singlestep_enabled = cflags & CF_SINGLE_STEP; + db->host_addr[0] = host_pc; + db->host_addr[1] = NULL; + +#ifdef CONFIG_USER_ONLY + page_protect(pc); +#endif ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ @@ -69,7 +76,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, ops->tb_start(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ - plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); + plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); while (true) { db->num_insns++; @@ -126,16 +133,120 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, /* The disas_log hook may use these values rather than recompute. */ tb->size = db->pc_next - db->pc_first; tb->icount = db->num_insns; - tb->ihash = tb_code_hash_func(cpu->env_ptr, tb->pc, tb->size); + tb->ihash = tb_code_hash_func(cpu->env_ptr, db->pc_first, tb->size); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(db->pc_first)) { - FILE *logfile = qemu_log_lock(); - qemu_log("----------------\n"); - ops->disas_log(db, cpu); - qemu_log("\n"); - qemu_log_unlock(logfile); + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "----------------\n"); + ops->disas_log(db, cpu, logfile); + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); + } } #endif } + +static void *translator_access(CPUArchState *env, DisasContextBase *db, + target_ulong pc, size_t len) +{ + void *host; + target_ulong base, end; + TranslationBlock *tb; + + tb = db->tb; + + /* Use slow path if first page is MMIO. */ + if (unlikely(tb_page_addr0(tb) == -1)) { + return NULL; + } + + end = pc + len - 1; + if (likely(is_same_page(db, end))) { + host = db->host_addr[0]; + base = db->pc_first; + } else { + host = db->host_addr[1]; + base = TARGET_PAGE_ALIGN(db->pc_first); + if (host == NULL) { + tb_page_addr_t phys_page = + get_page_addr_code_hostp(env, base, &db->host_addr[1]); + /* We cannot handle MMIO as second page. */ + assert(phys_page != -1); + tb_set_page_addr1(tb, phys_page); +#ifdef CONFIG_USER_ONLY + page_protect(end); +#endif + host = db->host_addr[1]; + } + + /* Use slow path when crossing pages. */ + if (is_same_page(db, pc)) { + return NULL; + } + } + + tcg_debug_assert(pc >= base); + return host + (pc - base); +} + +uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc) +{ + uint8_t ret; + void *p = translator_access(env, db, pc, sizeof(ret)); + + if (p) { + plugin_insn_append(pc, p, sizeof(ret)); + return ldub_p(p); + } + ret = cpu_ldub_code(env, pc); + plugin_insn_append(pc, &ret, sizeof(ret)); + return ret; +} + +uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc) +{ + uint16_t ret, plug; + void *p = translator_access(env, db, pc, sizeof(ret)); + + if (p) { + plugin_insn_append(pc, p, sizeof(ret)); + return lduw_p(p); + } + ret = cpu_lduw_code(env, pc); + plug = tswap16(ret); + plugin_insn_append(pc, &plug, sizeof(ret)); + return ret; +} + +uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc) +{ + uint32_t ret, plug; + void *p = translator_access(env, db, pc, sizeof(ret)); + + if (p) { + plugin_insn_append(pc, p, sizeof(ret)); + return ldl_p(p); + } + ret = cpu_ldl_code(env, pc); + plug = tswap32(ret); + plugin_insn_append(pc, &plug, sizeof(ret)); + return ret; +} + +uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc) +{ + uint64_t ret, plug; + void *p = translator_access(env, db, pc, sizeof(ret)); + + if (p) { + plugin_insn_append(pc, p, sizeof(ret)); + return ldq_p(p); + } + ret = cpu_ldq_code(env, pc); + plug = tswap64(ret); + plugin_insn_append(pc, &plug, sizeof(ret)); + return ret; +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 90d1a2d327..fb7d6ee9e9 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -27,48 +27,18 @@ #include "exec/helper-proto.h" #include "qemu/atomic128.h" #include "trace/trace-root.h" -#include "trace/mem.h" - -#undef EAX -#undef ECX -#undef EDX -#undef EBX -#undef ESP -#undef EBP -#undef ESI -#undef EDI -#undef EIP -#ifdef __linux__ -#include -#endif +#include "tcg/tcg-ldst.h" +#include "internal.h" __thread uintptr_t helper_retaddr; //#define DEBUG_SIGNAL -/* exit the current TB from a signal handler. The host registers are - restored in a state compatible with the CPU emulator +/* + * Adjust the pc to pass to cpu_restore_state; return the memop type. */ -static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu, - sigset_t *old_set) +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) { - /* XXX: use siglongjmp ? */ - sigprocmask(SIG_SETMASK, old_set, NULL); - cpu_loop_exit_noexc(cpu); -} - -/* 'pc' is the host PC at which the exception was raised. 'address' is - the effective address of the memory exception. 'is_write' is 1 if a - write caused the exception and otherwise 0'. 'old_set' is the - signal set which should be restored */ -static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, - int is_write, sigset_t *old_set) -{ - CPUState *cpu = current_cpu; - CPUClass *cc; - unsigned long address = (unsigned long)info->si_addr; - MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; - switch (helper_retaddr) { default: /* @@ -77,7 +47,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * pointer into the generated code that will unwind to the * correct guest pc. */ - pc = helper_retaddr; + *pc = helper_retaddr; break; case 0: @@ -97,7 +67,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * Therefore, adjust to compensate for what will be done later * by cpu_restore_state_from_tb. */ - pc += GETPC_ADJ; + *pc += GETPC_ADJ; break; case 1: @@ -110,121 +80,97 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * (and if the translator doesn't handle page boundaries correctly * there's little we can do about that here). Therefore, do not * trigger the unwinder. - * - * Like tb_gen_code, release the memory lock before cpu_loop_exit. */ - pc = 0; - access_type = MMU_INST_FETCH; - mmap_unlock(); - break; + *pc = 0; + return MMU_INST_FETCH; } - /* For synchronous signals we expect to be coming from the vCPU - * thread (so current_cpu should be valid) and either from running - * code or during translation which can fault as we cross pages. - * - * If neither is true then something has gone wrong and we should - * abort rather than try and restart the vCPU execution. - */ - if (!cpu || !cpu->running) { - printf("qemu:%s received signal outside vCPU context @ pc=0x%" - PRIxPTR "\n", __func__, pc); - abort(); + return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; +} + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @guest_addr: the guest address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + * + * Note that it is important that we don't call page_unprotect() unless + * this is really a "write to nonwritable page" fault, because + * page_unprotect() assumes that if it is called for an access to + * a page that's writable this means we had two threads racing and + * another thread got there first and already made the page writable; + * so we will retry the access. If we were to call page_unprotect() + * for some other kind of fault that should really be passed to the + * guest, we'd end up in an infinite loop of retrying the faulting access. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr) +{ + switch (page_unprotect(guest_addr, host_pc)) { + case 0: + /* + * Fault not caused by a page marked unwritable to protect + * cached translations, must be the guest binary's problem. + */ + return false; + case 1: + /* + * Fault caused by protection of cached translation; TBs + * invalidated, so resume execution. + */ + return true; + case 2: + /* + * Fault caused by protection of cached translation, and the + * currently executing TB was modified and must be exited immediately. + */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit_noexc(cpu); + /* NORETURN */ + default: + g_assert_not_reached(); } - -#if defined(DEBUG_SIGNAL) - printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", - pc, address, is_write, *(unsigned long *)old_set); -#endif - /* XXX: locking issue */ - /* Note that it is important that we don't call page_unprotect() unless - * this is really a "write to nonwriteable page" fault, because - * page_unprotect() assumes that if it is called for an access to - * a page that's writeable this means we had two threads racing and - * another thread got there first and already made the page writeable; - * so we will retry the access. If we were to call page_unprotect() - * for some other kind of fault that should really be passed to the - * guest, we'd end up in an infinite loop of retrying the faulting - * access. - */ - if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR && - h2g_valid(address)) { - switch (page_unprotect(h2g(address), pc)) { - case 0: - /* Fault not caused by a page marked unwritable to protect - * cached translations, must be the guest binary's problem. - */ - break; - case 1: - /* Fault caused by protection of cached translation; TBs - * invalidated, so resume execution. Retain helper_retaddr - * for a possible second fault. - */ - return 1; - case 2: - /* Fault caused by protection of cached translation, and the - * currently executing TB was modified and must be exited - * immediately. Clear helper_retaddr for next execution. - */ - clear_helper_retaddr(); - cpu_exit_tb_from_sighandler(cpu, old_set); - /* NORETURN */ - - default: - g_assert_not_reached(); - } - } - - /* Convert forcefully to guest address space, invalid addresses - are still valid segv ones */ - address = h2g_nocheck(address); - - /* - * There is no way the target can handle this other than raising - * an exception. Undo signal and retaddr state prior to longjmp. - */ - sigprocmask(SIG_SETMASK, old_set, NULL); - clear_helper_retaddr(); - - cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->tlb_fill(cpu, address, 0, access_type, - MMU_USER_IDX, false, pc); - g_assert_not_reached(); } static int probe_access_internal(CPUArchState *env, target_ulong addr, int fault_size, MMUAccessType access_type, bool nonfault, uintptr_t ra) { - int flags; + int acc_flag; + bool maperr; switch (access_type) { case MMU_DATA_STORE: - flags = PAGE_WRITE; + acc_flag = PAGE_WRITE_ORG; break; case MMU_DATA_LOAD: - flags = PAGE_READ; + acc_flag = PAGE_READ; break; case MMU_INST_FETCH: - flags = PAGE_EXEC; + acc_flag = PAGE_EXEC; break; default: g_assert_not_reached(); } - if (!guest_addr_valid_untagged(addr) || - page_check_range(addr, 1, flags) < 0) { - if (nonfault) { - return TLB_INVALID_MASK; - } else { - CPUState *cpu = env_cpu(env); - CPUClass *cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, - MMU_USER_IDX, false, ra); - g_assert_not_reached(); + if (guest_addr_valid_untagged(addr)) { + int page_flags = page_get_flags(addr); + if (page_flags & acc_flag) { + return 0; /* success */ } + maperr = !(page_flags & PAGE_VALID); + } else { + maperr = true; } - return 0; + + if (nonfault) { + return TLB_INVALID_MASK; + } + + cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); } int probe_access_flags(CPUArchState *env, target_ulong addr, @@ -250,935 +196,286 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, return size ? g2h(env_cpu(env), addr) : NULL; } -#if defined(__i386__) - -#if defined(__NetBSD__) -#include -#include - -#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include -#include - -#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__OpenBSD__) -#include -#define EIP_sig(context) ((context)->sc_eip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#else -#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP 0xe -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) { - siginfo_t *info = pinfo; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int trapno; + int flags; -#ifndef REG_EIP -/* for glibc 2.1 */ -#define REG_EIP EIP -#define REG_ERR ERR -#define REG_TRAPNO TRAPNO -#endif - pc = EIP_sig(uc); - trapno = TRAP_sig(uc); - return handle_cpu_signal(pc, info, - trapno == PAGE_FAULT_TRAP ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} + flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); + g_assert(flags == 0); -#elif defined(__x86_64__) - -#ifdef __NetBSD__ -#include -#define PC_sig(context) _UC_MACHINE_PC(context) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__OpenBSD__) -#include -#define PC_sig(context) ((context)->sc_rip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include -#include - -#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#else -#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP 0xe -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - unsigned long pc; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - - pc = PC_sig(uc); - return handle_cpu_signal(pc, info, - TRAP_sig(uc) == PAGE_FAULT_TRAP ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} - -#elif defined(_ARCH_PPC) - -/*********************************************************************** - * signal context platform-specific definitions - * From Wine - */ -#ifdef linux -/* All Registers access - only for local access */ -#define REG_sig(reg_name, context) \ - ((context)->uc_mcontext.regs->reg_name) -/* Gpr Registers access */ -#define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) -/* Program counter */ -#define IAR_sig(context) REG_sig(nip, context) -/* Machine State Register (Supervisor) */ -#define MSR_sig(context) REG_sig(msr, context) -/* Count register */ -#define CTR_sig(context) REG_sig(ctr, context) -/* User's integer exception register */ -#define XER_sig(context) REG_sig(xer, context) -/* Link register */ -#define LR_sig(context) REG_sig(link, context) -/* Condition register */ -#define CR_sig(context) REG_sig(ccr, context) - -/* Float Registers access */ -#define FLOAT_sig(reg_num, context) \ - (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) -#define FPSCR_sig(context) \ - (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) -/* Exception Registers access */ -#define DAR_sig(context) REG_sig(dar, context) -#define DSISR_sig(context) REG_sig(dsisr, context) -#define TRAP_sig(context) REG_sig(trap, context) -#endif /* linux */ - -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) -#define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) -#define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) -#define XER_sig(context) ((context)->uc_mcontext.mc_xer) -#define LR_sig(context) ((context)->uc_mcontext.mc_lr) -#define CR_sig(context) ((context)->uc_mcontext.mc_cr) -/* Exception Registers access */ -#define DAR_sig(context) ((context)->uc_mcontext.mc_dar) -#define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) -#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - ucontext_t *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int is_write; - - pc = IAR_sig(uc); - is_write = 0; -#if 0 - /* ppc 4xx case */ - if (DSISR_sig(uc) & 0x00800000) { - is_write = 1; + if (hostp) { + *hostp = g2h_untagged(addr); } -#else - if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { - is_write = 1; - } -#endif - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); + return addr; } -#elif defined(__alpha__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) +void page_reset_target_data(target_ulong start, target_ulong end) { - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uint32_t *pc = uc->uc_mcontext.sc_pc; - uint32_t insn = *pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster */ - switch (insn >> 26) { - case 0x0d: /* stw */ - case 0x0e: /* stb */ - case 0x0f: /* stq_u */ - case 0x24: /* stf */ - case 0x25: /* stg */ - case 0x26: /* sts */ - case 0x27: /* stt */ - case 0x2c: /* stl */ - case 0x2d: /* stq */ - case 0x2e: /* stl_c */ - case 0x2f: /* stq_c */ - is_write = 1; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} -#elif defined(__sparc__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - int is_write; - uint32_t insn; -#if !defined(__arch64__) || defined(CONFIG_SOLARIS) - uint32_t *regs = (uint32_t *)(info + 1); - void *sigmask = (regs + 20); - /* XXX: is there a standard glibc define ? */ - unsigned long pc = regs[1]; -#else -#ifdef __linux__ - struct sigcontext *sc = puc; - unsigned long pc = sc->sigc_regs.tpc; - void *sigmask = (void *)sc->sigc_mask; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; - unsigned long pc = uc->sc_pc; - void *sigmask = (void *)(long)uc->sc_mask; -#elif defined(__NetBSD__) - ucontext_t *uc = puc; - unsigned long pc = _UC_MACHINE_PC(uc); - void *sigmask = (void *)&uc->uc_sigmask; -#endif -#endif - - /* XXX: need kernel patch to get write flag faster */ - is_write = 0; - insn = *(uint32_t *)pc; - if ((insn >> 30) == 3) { - switch ((insn >> 19) & 0x3f) { - case 0x05: /* stb */ - case 0x15: /* stba */ - case 0x06: /* sth */ - case 0x16: /* stha */ - case 0x04: /* st */ - case 0x14: /* sta */ - case 0x07: /* std */ - case 0x17: /* stda */ - case 0x0e: /* stx */ - case 0x1e: /* stxa */ - case 0x24: /* stf */ - case 0x34: /* stfa */ - case 0x27: /* stdf */ - case 0x37: /* stdfa */ - case 0x26: /* stqf */ - case 0x36: /* stqfa */ - case 0x25: /* stfsr */ - case 0x3c: /* casa */ - case 0x3e: /* casxa */ - is_write = 1; - break; - } - } - return handle_cpu_signal(pc, info, is_write, sigmask); -} - -#elif defined(__arm__) - -#if defined(__NetBSD__) -#include -#include -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) - ucontext_t *uc = puc; - siginfo_t *si = pinfo; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - uint32_t fsr; - int is_write; - -#if defined(__NetBSD__) - pc = uc->uc_mcontext.__gregs[_REG_R15]; -#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) - pc = uc->uc_mcontext.gregs[R15]; -#else - pc = uc->uc_mcontext.arm_pc; -#endif - -#ifdef __NetBSD__ - fsr = si->si_trap; -#else - fsr = uc->uc_mcontext.error_code; -#endif - /* - * In the FSR, bit 11 is WnR, assuming a v6 or - * later processor. On v5 we will always report - * this as a read, which will fail later. - */ - is_write = extract32(fsr, 11, 1); - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__aarch64__) - -#if defined(__NetBSD__) - -#include -#include - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - ucontext_t *uc = puc; - siginfo_t *si = pinfo; - unsigned long pc; - int is_write; - uint32_t esr; - - pc = uc->uc_mcontext.__gregs[_REG_PC]; - esr = si->si_trap; +#ifdef TARGET_PAGE_DATA_SIZE + target_ulong addr, len; /* - * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC - * is 0b10010x: then bit 6 is the WnR bit + * This function should never be called with addresses outside the + * guest address space. If this assert fires, it probably indicates + * a missing call to h2g_valid. */ - is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; - return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask); -} + assert(end - 1 <= GUEST_ADDR_MAX); + assert(start < end); + assert_memory_lock(); -#else + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); -#ifndef ESR_MAGIC -/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ -#define ESR_MAGIC 0x45535201 -struct esr_context { - struct _aarch64_ctx head; - uint64_t esr; -}; + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + + g_free(p->target_data); + p->target_data = NULL; + } #endif - -static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc) -{ - return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; } -static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) +#ifdef TARGET_PAGE_DATA_SIZE +void *page_get_target_data(target_ulong address) { - return (struct _aarch64_ctx *)((char *)hdr + hdr->size); + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + void *ret = p->target_data; + + if (!ret) { + ret = g_malloc0(TARGET_PAGE_DATA_SIZE); + p->target_data = ret; + } + return ret; } - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uintptr_t pc = uc->uc_mcontext.pc; - bool is_write; - struct _aarch64_ctx *hdr; - struct esr_context const *esrctx = NULL; - - /* Find the esr_context, which has the WnR bit in it */ - for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) { - if (hdr->magic == ESR_MAGIC) { - esrctx = (struct esr_context const *)hdr; - break; - } - } - - if (esrctx) { - /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */ - uint64_t esr = esrctx->esr; - is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; - } else { - /* - * Fall back to parsing instructions; will only be needed - * for really ancient (pre-3.16) kernels. - */ - uint32_t insn = *(uint32_t *)pc; - - is_write = ((insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ - || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ - || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ - || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ - || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ - || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ - || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ - /* Ignore bits 10, 11 & 21, controlling indexing. */ - || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ - || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ - /* Ignore bits 23 & 24, controlling indexing. */ - || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */ - } - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} -#endif - -#elif defined(__s390__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - unsigned long pc; - uint16_t *pinsn; - int is_write = 0; - - pc = uc->uc_mcontext.psw.addr; - - /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead - of the normal 2 arguments. The 3rd argument contains the "int_code" - from the hardware which does in fact contain the is_write value. - The rt signal handler, as far as I can tell, does not give this value - at all. Not that we could get to it from here even if it were. */ - /* ??? This is not even close to complete, since it ignores all - of the read-modify-write instructions. */ - pinsn = (uint16_t *)pc; - switch (pinsn[0] >> 8) { - case 0x50: /* ST */ - case 0x42: /* STC */ - case 0x40: /* STH */ - is_write = 1; - break; - case 0xc4: /* RIL format insns */ - switch (pinsn[0] & 0xf) { - case 0xf: /* STRL */ - case 0xb: /* STGRL */ - case 0x7: /* STHRL */ - is_write = 1; - } - break; - case 0xe3: /* RXY format insns */ - switch (pinsn[2] & 0xff) { - case 0x50: /* STY */ - case 0x24: /* STG */ - case 0x72: /* STCY */ - case 0x70: /* STHY */ - case 0x8e: /* STPQ */ - case 0x3f: /* STRVH */ - case 0x3e: /* STRV */ - case 0x2f: /* STRVG */ - is_write = 1; - } - break; - } - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__mips__) - -#if defined(__misp16) || defined(__mips_micromips) -#error "Unsupported encoding" -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uintptr_t pc = uc->uc_mcontext.pc; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* Detect all store instructions at program counter. */ - switch((insn >> 26) & 077) { - case 050: /* SB */ - case 051: /* SH */ - case 052: /* SWL */ - case 053: /* SW */ - case 054: /* SDL */ - case 055: /* SDR */ - case 056: /* SWR */ - case 070: /* SC */ - case 071: /* SWC1 */ - case 074: /* SCD */ - case 075: /* SDC1 */ - case 077: /* SD */ -#if !defined(__mips_isa_rev) || __mips_isa_rev < 6 - case 072: /* SWC2 */ - case 076: /* SDC2 */ -#endif - is_write = 1; - break; - case 023: /* COP1X */ - /* Required in all versions of MIPS64 since - MIPS64r1 and subsequent versions of MIPS32r2. */ - switch (insn & 077) { - case 010: /* SWXC1 */ - case 011: /* SDXC1 */ - case 015: /* SUXC1 */ - is_write = 1; - } - break; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__riscv) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - greg_t pc = uc->uc_mcontext.__gregs[REG_PC]; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* Detect store by reading the instruction at the program - counter. Note: we currently only generate 32-bit - instructions so we thus only detect 32-bit stores */ - switch (((insn >> 0) & 0b11)) { - case 3: - switch (((insn >> 2) & 0b11111)) { - case 8: - switch (((insn >> 12) & 0b111)) { - case 0: /* sb */ - case 1: /* sh */ - case 2: /* sw */ - case 3: /* sd */ - case 4: /* sq */ - is_write = 1; - break; - default: - break; - } - break; - case 9: - switch (((insn >> 12) & 0b111)) { - case 2: /* fsw */ - case 3: /* fsd */ - case 4: /* fsq */ - is_write = 1; - break; - default: - break; - } - break; - default: - break; - } - } - - /* Check for compressed instructions */ - switch (((insn >> 13) & 0b111)) { - case 7: - switch (insn & 0b11) { - case 0: /*c.sd */ - case 2: /* c.sdsp */ - is_write = 1; - break; - default: - break; - } - break; - case 6: - switch (insn & 0b11) { - case 0: /* c.sw */ - case 3: /* c.swsp */ - is_write = 1; - break; - default: - break; - } - break; - default: - break; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#else - -#error host CPU specific signal handler needed - #endif /* The softmmu versions of these helpers are in cputlb.c. */ -uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr) +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) { - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false); +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldub_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +void helper_unaligned_ld(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); +} + +void helper_unaligned_st(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); +} + +static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra, MMUAccessType type) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(ra); return ret; } -int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr) +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - int ret; - uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false); + void *haddr; + uint8_t ret; - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsb_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = lduw_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr) -{ - int ret; - uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsw_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldl_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint64_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldq_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = lduw_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr) -{ - int ret; - uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsw_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldl_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint64_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldq_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldub_data(env, ptr); + validate_memop(oi, MO_UB); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldub_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - int ret; + void *haddr; + uint16_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldsb_data(env, ptr); + validate_memop(oi, MO_BEUW); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint32_t ret; - set_helper_retaddr(retaddr); - ret = cpu_lduw_be_data(env, ptr); + validate_memop(oi, MO_BEUL); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - int ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldsw_be_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldl_be_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint64_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldq_be_data(env, ptr); + validate_memop(oi, MO_BEUQ); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_LEUW); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; uint32_t ret; - set_helper_retaddr(retaddr); - ret = cpu_lduw_le_data(env, ptr); + validate_memop(oi, MO_LEUL); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_le_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - int ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldsw_le_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldl_le_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint64_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldq_le_data(env, ptr); + validate_memop(oi, MO_LEUQ); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_le_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOpIdx oi, uintptr_t ra) { - uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true); + void *haddr; - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stb_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stw_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stl_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stq_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stw_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stl_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stq_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) -{ - set_helper_retaddr(retaddr); - cpu_stb_data(env, ptr, val); + validate_memop(oi, MO_UB); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stb_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stw_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEUW); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stl_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEUL); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t retaddr) +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stq_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEUQ); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stw_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEUW); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stl_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEUL); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t retaddr) +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stq_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEUQ); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) @@ -1221,20 +518,33 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) return ret; } +#include "ldst_common.c.inc" + /* * Do not allow unaligned operations to proceed. Return the host address. * * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, int size, int prot, + MemOpIdx oi, int size, int prot, uintptr_t retaddr) { + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; + cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); + } + /* Enforce qemu required alignment. */ if (unlikely(addr & (size - 1))) { cpu_loop_exit_atomic(env_cpu(env), retaddr); } - void *ret = g2h(env_cpu(env), addr); + + ret = g2h(env_cpu(env), addr); set_helper_retaddr(retaddr); return ret; } @@ -1249,7 +559,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #define ATOMIC_NAME(X) \ glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) -#define ATOMIC_MMU_IDX MMU_USER_IDX #define DATA_SIZE 1 #include "atomic_template.h" diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c index 2b9789e647..7a2a94cd42 100644 --- a/audio/alsaaudio.c +++ b/audio/alsaaudio.c @@ -72,7 +72,7 @@ struct alsa_params_obt { snd_pcm_uframes_t samples; }; -static void GCC_FMT_ATTR (2, 3) alsa_logerr (int err, const char *fmt, ...) +static void G_GNUC_PRINTF (2, 3) alsa_logerr (int err, const char *fmt, ...) { va_list ap; @@ -83,7 +83,7 @@ static void GCC_FMT_ATTR (2, 3) alsa_logerr (int err, const char *fmt, ...) AUD_log (AUDIO_CAP, "Reason: %s\n", snd_strerror (err)); } -static void GCC_FMT_ATTR (3, 4) alsa_logerr2 ( +static void G_GNUC_PRINTF (3, 4) alsa_logerr2 ( int err, const char *typ, const char *fmt, @@ -602,6 +602,42 @@ static int alsa_open(bool in, struct alsa_params_req *req, return -1; } +static size_t alsa_buffer_get_free(HWVoiceOut *hw) +{ + ALSAVoiceOut *alsa = (ALSAVoiceOut *)hw; + snd_pcm_sframes_t avail; + size_t alsa_free, generic_free, generic_in_use; + + avail = snd_pcm_avail_update(alsa->handle); + if (avail < 0) { + if (avail == -EPIPE) { + if (!alsa_recover(alsa->handle)) { + avail = snd_pcm_avail_update(alsa->handle); + } + } + if (avail < 0) { + alsa_logerr(avail, + "Could not obtain number of available frames\n"); + avail = 0; + } + } + + alsa_free = avail * hw->info.bytes_per_frame; + generic_free = audio_generic_buffer_get_free(hw); + generic_in_use = hw->samples * hw->info.bytes_per_frame - generic_free; + if (generic_in_use) { + /* + * This code can only be reached in the unlikely case that + * snd_pcm_avail_update() returned a larger number of frames + * than snd_pcm_writei() could write. Make sure that all + * remaining bytes in the generic buffer can be written. + */ + alsa_free = alsa_free > generic_in_use ? alsa_free - generic_in_use : 0; + } + + return alsa_free; +} + static size_t alsa_write(HWVoiceOut *hw, void *buf, size_t len) { ALSAVoiceOut *alsa = (ALSAVoiceOut *) hw; @@ -916,6 +952,7 @@ static struct audio_pcm_ops alsa_pcm_ops = { .init_out = alsa_init_out, .fini_out = alsa_fini_out, .write = alsa_write, + .buffer_get_free = alsa_buffer_get_free, .run_buffer_out = audio_generic_run_buffer_out, .enable_out = alsa_enable_out, diff --git a/audio/audio.c b/audio/audio.c index 54a153c0ef..065602ce1b 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -32,7 +32,8 @@ #include "qapi/qapi-visit-audio.h" #include "qemu/cutils.h" #include "qemu/module.h" -#include "qemu-common.h" +#include "qemu/help_option.h" +#include "sysemu/sysemu.h" #include "sysemu/replay.h" #include "sysemu/runstate.h" #include "ui/qemu-spice.h" @@ -72,20 +73,24 @@ void audio_driver_register(audio_driver *drv) audio_driver *audio_driver_lookup(const char *name) { struct audio_driver *d; + Error *local_err = NULL; + int rv; QLIST_FOREACH(d, &audio_drivers, next) { if (strcmp(name, d->name) == 0) { return d; } } - - audio_module_load_one(name); - QLIST_FOREACH(d, &audio_drivers, next) { - if (strcmp(name, d->name) == 0) { - return d; + rv = audio_module_load(name, &local_err); + if (rv > 0) { + QLIST_FOREACH(d, &audio_drivers, next) { + if (strcmp(name, d->name) == 0) { + return d; + } } + } else if (rv < 0) { + error_report_err(local_err); } - return NULL; } @@ -117,7 +122,6 @@ int audio_bug (const char *funcname, int cond) AUD_log (NULL, "I am sorry\n"); } AUD_log (NULL, "Context:\n"); - abort(); } return cond; @@ -548,65 +552,45 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw) return live; } -static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) +static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples) { - size_t clipped = 0; - size_t pos = hw->mix_buf->pos; + size_t conv = 0; + STSampleBuffer *conv_buf = hw->conv_buf; - while (len) { - st_sample *src = hw->mix_buf->samples + pos; - uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame); - size_t samples_till_end_of_buf = hw->mix_buf->size - pos; - size_t samples_to_clip = MIN(len, samples_till_end_of_buf); + while (samples) { + uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame); + size_t proc = MIN(samples, conv_buf->size - conv_buf->pos); - hw->clip(dst, src, samples_to_clip); - - pos = (pos + samples_to_clip) % hw->mix_buf->size; - len -= samples_to_clip; - clipped += samples_to_clip; + hw->conv(conv_buf->samples + conv_buf->pos, src, proc); + conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size; + samples -= proc; + conv += proc; } + + return conv; } /* * Soft voice (capture) */ -static size_t audio_pcm_sw_get_rpos_in(SWVoiceIn *sw) -{ - HWVoiceIn *hw = sw->hw; - ssize_t live = hw->total_samples_captured - sw->total_hw_samples_acquired; - ssize_t rpos; - - if (audio_bug(__func__, live < 0 || live > hw->conv_buf->size)) { - dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); - return 0; - } - - rpos = hw->conv_buf->pos - live; - if (rpos >= 0) { - return rpos; - } else { - return hw->conv_buf->size + rpos; - } -} - static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size) { HWVoiceIn *hw = sw->hw; size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0; struct st_sample *src, *dst = sw->buf; - rpos = audio_pcm_sw_get_rpos_in(sw) % hw->conv_buf->size; - live = hw->total_samples_captured - sw->total_hw_samples_acquired; + if (!live) { + return 0; + } if (audio_bug(__func__, live > hw->conv_buf->size)) { dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); return 0; } + rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size); + samples = size / sw->info.bytes_per_frame; - if (!live) { - return 0; - } swlim = (live * sw->ratio) >> 32; swlim = MIN (swlim, samples); @@ -632,7 +616,7 @@ static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size) total += isamp; } - if (hw->pcm_ops && !hw->pcm_ops->volume_in) { + if (!hw->pcm_ops->volume_in) { mixeng_volume (sw->buf, ret, &sw->vol); } @@ -683,12 +667,38 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live) return 0; } +static size_t audio_pcm_hw_get_free(HWVoiceOut *hw) +{ + return (hw->pcm_ops->buffer_get_free ? hw->pcm_ops->buffer_get_free(hw) : + INT_MAX) / hw->info.bytes_per_frame; +} + +static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) +{ + size_t clipped = 0; + size_t pos = hw->mix_buf->pos; + + while (len) { + st_sample *src = hw->mix_buf->samples + pos; + uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame); + size_t samples_till_end_of_buf = hw->mix_buf->size - pos; + size_t samples_to_clip = MIN(len, samples_till_end_of_buf); + + hw->clip(dst, src, samples_to_clip); + + pos = (pos + samples_to_clip) % hw->mix_buf->size; + len -= samples_to_clip; + clipped += samples_to_clip; + } +} + /* * Soft voice (playback) */ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size) { - size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, swlim, blck; + size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck; + size_t hw_free; size_t ret = 0, pos = 0, total = 0; if (!sw) { @@ -711,27 +721,28 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size) } wpos = (sw->hw->mix_buf->pos + live) % hwsamples; - samples = size / sw->info.bytes_per_frame; dead = hwsamples - live; - swlim = ((int64_t) dead << 32) / sw->ratio; - swlim = MIN (swlim, samples); - if (swlim) { - sw->conv (sw->buf, buf, swlim); + hw_free = audio_pcm_hw_get_free(sw->hw); + hw_free = hw_free > live ? hw_free - live : 0; + samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio; + samples = MIN(samples, size / sw->info.bytes_per_frame); + if (samples) { + sw->conv(sw->buf, buf, samples); - if (sw->hw->pcm_ops && !sw->hw->pcm_ops->volume_out) { - mixeng_volume (sw->buf, swlim, &sw->vol); + if (!sw->hw->pcm_ops->volume_out) { + mixeng_volume(sw->buf, samples, &sw->vol); } } - while (swlim) { + while (samples) { dead = hwsamples - live; left = hwsamples - wpos; blck = MIN (dead, left); if (!blck) { break; } - isamp = swlim; + isamp = samples; osamp = blck; st_rate_flow_mix ( sw->rate, @@ -741,7 +752,7 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size) &osamp ); ret += isamp; - swlim -= isamp; + samples -= isamp; pos += isamp; live += osamp; wpos = (wpos + osamp) % hwsamples; @@ -979,6 +990,18 @@ void AUD_set_active_in (SWVoiceIn *sw, int on) } } +/** + * audio_frontend_frames_in() - returns the number of frames the resampling + * code generates from frames_in frames + * + * @sw: audio recording frontend + * @frames_in: number of frames + */ +static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in) +{ + return (int64_t)frames_in * sw->ratio >> 32; +} + static size_t audio_get_avail (SWVoiceIn *sw) { size_t live; @@ -995,12 +1018,24 @@ static size_t audio_get_avail (SWVoiceIn *sw) } ldebug ( - "%s: get_avail live %zu ret %" PRId64 "\n", + "%s: get_avail live %zu frontend frames %zu\n", SW_NAME (sw), - live, (((int64_t) live << 32) / sw->ratio) * sw->info.bytes_per_frame + live, audio_frontend_frames_in(sw, live) ); - return (((int64_t) live << 32) / sw->ratio) * sw->info.bytes_per_frame; + return live; +} + +/** + * audio_frontend_frames_out() - returns the number of frames needed to + * get frames_out frames after resampling + * + * @sw: audio playback frontend + * @frames_out: number of frames + */ +static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out) +{ + return ((int64_t)frames_out << 32) / sw->ratio; } static size_t audio_get_free(SWVoiceOut *sw) @@ -1022,13 +1057,11 @@ static size_t audio_get_free(SWVoiceOut *sw) dead = sw->hw->mix_buf->size - live; #ifdef DEBUG_OUT - dolog ("%s: get_free live %zu dead %zu ret %" PRId64 "\n", - SW_NAME (sw), - live, dead, (((int64_t) dead << 32) / sw->ratio) * - sw->info.bytes_per_frame); + dolog("%s: get_free live %zu dead %zu frontend frames %zu\n", + SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead)); #endif - return (((int64_t) dead << 32) / sw->ratio) * sw->info.bytes_per_frame; + return dead; } static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos, @@ -1111,8 +1144,12 @@ static void audio_run_out (AudioState *s) HWVoiceOut *hw = NULL; SWVoiceOut *sw; - if (!audio_get_pdo_out(s->dev)->mixing_engine) { - while ((hw = audio_pcm_hw_find_any_enabled_out(s, hw))) { + while ((hw = audio_pcm_hw_find_any_enabled_out(s, hw))) { + size_t played, live, prev_rpos; + size_t hw_free = audio_pcm_hw_get_free(hw); + int nb_live; + + if (!audio_get_pdo_out(s->dev)->mixing_engine) { /* there is exactly 1 sw for each hw with no mixeng */ sw = hw->sw_head.lh_first; @@ -1125,15 +1162,34 @@ static void audio_run_out (AudioState *s) } if (sw->active) { - sw->callback.fn(sw->callback.opaque, INT_MAX); + sw->callback.fn(sw->callback.opaque, + hw_free * sw->info.bytes_per_frame); + } + + if (hw->pcm_ops->run_buffer_out) { + hw->pcm_ops->run_buffer_out(hw); + } + + continue; + } + + for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { + if (sw->active) { + size_t sw_free = audio_get_free(sw); + size_t free; + + if (hw_free > sw->total_hw_samples_mixed) { + free = audio_frontend_frames_out(sw, + MIN(sw_free, hw_free - sw->total_hw_samples_mixed)); + } else { + free = 0; + } + if (free > 0) { + sw->callback.fn(sw->callback.opaque, + free * sw->info.bytes_per_frame); + } } } - return; - } - - while ((hw = audio_pcm_hw_find_any_enabled_out(s, hw))) { - size_t played, live, prev_rpos, free; - int nb_live; live = audio_pcm_hw_get_live_out (hw, &nb_live); if (!nb_live) { @@ -1163,14 +1219,6 @@ static void audio_run_out (AudioState *s) } if (!live) { - for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { - if (sw->active) { - free = audio_get_free (sw); - if (free > 0) { - sw->callback.fn (sw->callback.opaque, free); - } - } - } if (hw->pcm_ops->run_buffer_out) { hw->pcm_ops->run_buffer_out(hw); } @@ -1211,13 +1259,6 @@ static void audio_run_out (AudioState *s) if (!sw->total_hw_samples_mixed) { sw->empty = 1; } - - if (sw->active) { - free = audio_get_free (sw); - if (free > 0) { - sw->callback.fn (sw->callback.opaque, free); - } - } } } } @@ -1225,7 +1266,6 @@ static void audio_run_out (AudioState *s) static size_t audio_pcm_hw_run_in(HWVoiceIn *hw, size_t samples) { size_t conv = 0; - STSampleBuffer *conv_buf = hw->conv_buf; if (hw->pcm_ops->run_buffer_in) { hw->pcm_ops->run_buffer_in(hw); @@ -1241,11 +1281,7 @@ static size_t audio_pcm_hw_run_in(HWVoiceIn *hw, size_t samples) break; } - proc = MIN(size / hw->info.bytes_per_frame, - conv_buf->size - conv_buf->pos); - - hw->conv(conv_buf->samples + conv_buf->pos, buf, proc); - conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size; + proc = audio_pcm_hw_conv_in(hw, buf, size / hw->info.bytes_per_frame); samples -= proc; conv += proc; @@ -1289,11 +1325,13 @@ static void audio_run_in (AudioState *s) sw->total_hw_samples_acquired -= min; if (sw->active) { + size_t sw_avail = audio_get_avail(sw); size_t avail; - avail = audio_get_avail (sw); + avail = audio_frontend_frames_in(sw, sw_avail); if (avail > 0) { - sw->callback.fn (sw->callback.opaque, avail); + sw->callback.fn(sw->callback.opaque, + avail * sw->info.bytes_per_frame); } } } @@ -1394,12 +1432,10 @@ void audio_generic_run_buffer_in(HWVoiceIn *hw) void *audio_generic_get_buffer_in(HWVoiceIn *hw, size_t *size) { - ssize_t start = (ssize_t)hw->pos_emul - hw->pending_emul; + size_t start; - if (start < 0) { - start += hw->size_emul; - } - assert(start >= 0 && start < hw->size_emul); + start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul); + assert(start < hw->size_emul); *size = MIN(*size, hw->pending_emul); *size = MIN(*size, hw->size_emul - start); @@ -1412,16 +1448,22 @@ void audio_generic_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size) hw->pending_emul -= size; } +size_t audio_generic_buffer_get_free(HWVoiceOut *hw) +{ + if (hw->buf_emul) { + return hw->size_emul - hw->pending_emul; + } else { + return hw->samples * hw->info.bytes_per_frame; + } +} + void audio_generic_run_buffer_out(HWVoiceOut *hw) { while (hw->pending_emul) { - size_t write_len, written; - ssize_t start = ((ssize_t) hw->pos_emul) - hw->pending_emul; + size_t write_len, written, start; - if (start < 0) { - start += hw->size_emul; - } - assert(start >= 0 && start < hw->size_emul); + start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul); + assert(start < hw->size_emul); write_len = MIN(hw->pending_emul, hw->size_emul - start); @@ -1462,6 +1504,12 @@ size_t audio_generic_write(HWVoiceOut *hw, void *buf, size_t size) { size_t total = 0; + if (hw->pcm_ops->buffer_get_free) { + size_t free = hw->pcm_ops->buffer_get_free(hw); + + size = MIN(size, free); + } + while (total < size) { size_t dst_size = size - total; size_t copy_size, proc; @@ -1483,10 +1531,6 @@ size_t audio_generic_write(HWVoiceOut *hw, void *buf, size_t size) } } - if (hw->pcm_ops->run_buffer_out) { - hw->pcm_ops->run_buffer_out(hw); - } - return total; } @@ -1716,7 +1760,7 @@ static AudioState *audio_init(Audiodev *dev, const char *name) audio_validate_opts(dev, &error_abort); } - s = g_malloc0(sizeof(AudioState)); + s = g_new0(AudioState, 1); s->dev = dev; QLIST_INIT (&s->hw_head_out); @@ -1726,20 +1770,19 @@ static AudioState *audio_init(Audiodev *dev, const char *name) atexit(audio_cleanup); atexit_registered = true; } - QTAILQ_INSERT_TAIL(&audio_states, s, list); s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s); s->nb_hw_voices_out = audio_get_pdo_out(dev)->voices; s->nb_hw_voices_in = audio_get_pdo_in(dev)->voices; - if (s->nb_hw_voices_out <= 0) { + if (s->nb_hw_voices_out < 1) { dolog ("Bogus number of playback voices %d, setting to 1\n", s->nb_hw_voices_out); s->nb_hw_voices_out = 1; } - if (s->nb_hw_voices_in <= 0) { + if (s->nb_hw_voices_in < 0) { dolog ("Bogus number of capture voices %d, setting to 0\n", s->nb_hw_voices_in); s->nb_hw_voices_in = 0; @@ -1752,6 +1795,10 @@ static AudioState *audio_init(Audiodev *dev, const char *name) } else { dolog ("Unknown audio driver `%s'\n", drvname); } + if (!done) { + free_audio_state(s); + return NULL; + } } else { for (i = 0; audio_prio_list[i]; i++) { AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]); @@ -1789,6 +1836,7 @@ static AudioState *audio_init(Audiodev *dev, const char *name) "(Audio can continue looping even after stopping the VM)\n"); } + QTAILQ_INSERT_TAIL(&audio_states, s, list); QLIST_INIT (&s->card_head); vmstate_register (NULL, 0, &vmstate_audio, s); return s; @@ -1821,6 +1869,7 @@ void AUD_remove_card (QEMUSoundCard *card) g_free (card->name); } +static struct audio_pcm_ops capture_pcm_ops; CaptureVoiceOut *AUD_add_capture( AudioState *s, @@ -1866,6 +1915,7 @@ CaptureVoiceOut *AUD_add_capture( hw = &cap->hw; hw->s = s; + hw->pcm_ops = &capture_pcm_ops; QLIST_INIT (&hw->sw_head); QLIST_INIT (&cap->cb_head); @@ -2000,11 +2050,13 @@ void audio_create_pdos(Audiodev *dev) CASE(NONE, none, ); CASE(ALSA, alsa, Alsa); CASE(COREAUDIO, coreaudio, Coreaudio); + CASE(DBUS, dbus, ); CASE(DSOUND, dsound, ); CASE(JACK, jack, Jack); CASE(OSS, oss, Oss); CASE(PA, pa, Pa); CASE(SDL, sdl, Sdl); + CASE(SNDIO, sndio, ); CASE(SPICE, spice, ); CASE(WAV, wav, ); @@ -2077,29 +2129,57 @@ static void audio_validate_opts(Audiodev *dev, Error **errp) } } +void audio_help(void) +{ + int i; + + printf("Available audio drivers:\n"); + + for (i = 0; i < AUDIODEV_DRIVER__MAX; i++) { + audio_driver *driver = audio_driver_lookup(AudiodevDriver_str(i)); + if (driver) { + printf("%s\n", driver->name); + } + } +} + void audio_parse_option(const char *opt) { - AudiodevListEntry *e; Audiodev *dev = NULL; + if (is_help_option(opt)) { + audio_help(); + exit(EXIT_SUCCESS); + } Visitor *v = qobject_input_visitor_new_str(opt, "driver", &error_fatal); visit_type_Audiodev(v, NULL, &dev, &error_fatal); visit_free(v); + audio_define(dev); +} + +void audio_define(Audiodev *dev) +{ + AudiodevListEntry *e; + audio_validate_opts(dev, &error_fatal); - e = g_malloc0(sizeof(AudiodevListEntry)); + e = g_new0(AudiodevListEntry, 1); e->dev = dev; QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next); } -void audio_init_audiodevs(void) +bool audio_init_audiodevs(void) { AudiodevListEntry *e; QSIMPLEQ_FOREACH(e, &audiodevs, next) { - audio_init(e->dev, NULL); + if (!audio_init(e->dev, NULL)) { + return false; + } } + + return true; } audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo) @@ -2197,26 +2277,39 @@ void audio_rate_start(RateCtl *rate) rate->start_ticks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } -size_t audio_rate_get_bytes(struct audio_pcm_info *info, RateCtl *rate, - size_t bytes_avail) +size_t audio_rate_peek_bytes(RateCtl *rate, struct audio_pcm_info *info) { int64_t now; int64_t ticks; int64_t bytes; - int64_t samples; - size_t ret; + int64_t frames; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ticks = now - rate->start_ticks; bytes = muldiv64(ticks, info->bytes_per_second, NANOSECONDS_PER_SECOND); - samples = (bytes - rate->bytes_sent) / info->bytes_per_frame; - if (samples < 0 || samples > 65536) { - AUD_log(NULL, "Resetting rate control (%" PRId64 " samples)\n", samples); + frames = (bytes - rate->bytes_sent) / info->bytes_per_frame; + if (frames < 0 || frames > 65536) { + AUD_log(NULL, "Resetting rate control (%" PRId64 " frames)\n", frames); audio_rate_start(rate); - samples = 0; + frames = 0; } - ret = MIN(samples * info->bytes_per_frame, bytes_avail); - rate->bytes_sent += ret; - return ret; + return frames * info->bytes_per_frame; +} + +void audio_rate_add_bytes(RateCtl *rate, size_t bytes_used) +{ + rate->bytes_sent += bytes_used; +} + +size_t audio_rate_get_bytes(RateCtl *rate, struct audio_pcm_info *info, + size_t bytes_avail) +{ + size_t bytes; + + bytes = audio_rate_peek_bytes(rate, info); + bytes = MIN(bytes, bytes_avail); + audio_rate_add_bytes(rate, bytes); + + return bytes; } diff --git a/audio/audio.h b/audio/audio.h index c8bde536b5..01bdc567fb 100644 --- a/audio/audio.h +++ b/audio/audio.h @@ -32,7 +32,7 @@ typedef void (*audio_callback_fn) (void *opaque, int avail); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define AUDIO_HOST_ENDIANNESS 1 #else #define AUDIO_HOST_ENDIANNESS 0 @@ -91,8 +91,8 @@ typedef struct QEMUAudioTimeStamp { uint64_t old_ts; } QEMUAudioTimeStamp; -void AUD_vlog (const char *cap, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0); -void AUD_log (const char *cap, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); +void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3); void AUD_register_card (const char *name, QEMUSoundCard *card); void AUD_remove_card (QEMUSoundCard *card); @@ -168,8 +168,10 @@ void audio_sample_to_uint64(const void *samples, int pos, void audio_sample_from_uint64(void *samples, int pos, uint64_t left, uint64_t right); +void audio_define(Audiodev *audio); void audio_parse_option(const char *opt); -void audio_init_audiodevs(void); +bool audio_init_audiodevs(void); +void audio_help(void); void audio_legacy_help(void); AudioState *audio_state_by_name(const char *name); diff --git a/audio/audio_int.h b/audio/audio_int.h index 6d685e24a3..e87ce014a0 100644 --- a/audio/audio_int.h +++ b/audio/audio_int.h @@ -31,6 +31,10 @@ #endif #include "mixeng.h" +#ifdef CONFIG_GIO +#include +#endif + struct audio_pcm_ops; struct audio_callback { @@ -140,6 +144,9 @@ struct audio_driver { const char *descr; void *(*init) (Audiodev *); void (*fini) (void *); +#ifdef CONFIG_GIO + void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager); +#endif struct audio_pcm_ops *pcm_ops; int can_be_default; int max_voices_out; @@ -154,10 +161,14 @@ struct audio_pcm_ops { void (*fini_out)(HWVoiceOut *hw); size_t (*write) (HWVoiceOut *hw, void *buf, size_t size); void (*run_buffer_out)(HWVoiceOut *hw); + /* + * Get the free output buffer size. This is an upper limit. The size + * returned by function get_buffer_out may be smaller. + */ + size_t (*buffer_get_free)(HWVoiceOut *hw); /* * get a buffer that after later can be passed to put_buffer_out; optional * returns the buffer, and writes it's size to size (in bytes) - * this is unrelated to the above buffer_size_out function */ void *(*get_buffer_out)(HWVoiceOut *hw, size_t *size); /* @@ -183,6 +194,7 @@ void audio_generic_run_buffer_in(HWVoiceIn *hw); void *audio_generic_get_buffer_in(HWVoiceIn *hw, size_t *size); void audio_generic_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size); void audio_generic_run_buffer_out(HWVoiceOut *hw); +size_t audio_generic_buffer_get_free(HWVoiceOut *hw); void *audio_generic_get_buffer_out(HWVoiceOut *hw, size_t *size); size_t audio_generic_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size); size_t audio_generic_write(HWVoiceOut *hw, void *buf, size_t size); @@ -251,7 +263,9 @@ typedef struct RateCtl { } RateCtl; void audio_rate_start(RateCtl *rate); -size_t audio_rate_get_bytes(struct audio_pcm_info *info, RateCtl *rate, +size_t audio_rate_peek_bytes(RateCtl *rate, struct audio_pcm_info *info); +void audio_rate_add_bytes(RateCtl *rate, size_t bytes_used); +size_t audio_rate_get_bytes(RateCtl *rate, struct audio_pcm_info *info, size_t bytes_avail); static inline size_t audio_ring_dist(size_t dst, size_t src, size_t len) @@ -259,6 +273,19 @@ static inline size_t audio_ring_dist(size_t dst, size_t src, size_t len) return (dst >= src) ? (dst - src) : (len - src + dst); } +/** + * audio_ring_posb() - returns new position in ringbuffer in backward + * direction at given distance + * + * @pos: current position in ringbuffer + * @dist: distance in ringbuffer to walk in reverse direction + * @len: size of ringbuffer + */ +static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len) +{ + return pos >= dist ? pos - dist : len - dist + pos; +} + #define dolog(fmt, ...) AUD_log(AUDIO_CAP, fmt, ## __VA_ARGS__) #ifdef DEBUG diff --git a/audio/audio_legacy.c b/audio/audio_legacy.c index 0fe827b057..595949f52c 100644 --- a/audio/audio_legacy.c +++ b/audio/audio_legacy.c @@ -328,8 +328,8 @@ static void handle_per_direction( static AudiodevListEntry *legacy_opt(const char *drvname) { - AudiodevListEntry *e = g_malloc0(sizeof(AudiodevListEntry)); - e->dev = g_malloc0(sizeof(Audiodev)); + AudiodevListEntry *e = g_new0(AudiodevListEntry, 1); + e->dev = g_new0(Audiodev, 1); e->dev->id = g_strdup(drvname); e->dev->driver = qapi_enum_parse( &AudiodevDriver_lookup, drvname, -1, &error_abort); @@ -508,7 +508,7 @@ static void lv_free(Visitor *v) static Visitor *legacy_visitor_new(void) { - LegacyPrintVisitor *lv = g_malloc0(sizeof(LegacyPrintVisitor)); + LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1); lv->visitor.start_struct = lv_start_struct; lv->visitor.end_struct = lv_end_struct; diff --git a/audio/audio_template.h b/audio/audio_template.h index c6714946aa..720a32e57e 100644 --- a/audio/audio_template.h +++ b/audio/audio_template.h @@ -110,7 +110,11 @@ static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw) return 0; } +#ifdef DAC samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio; +#else + samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32; +#endif sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample)); if (!sw->buf) { @@ -327,6 +331,8 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev) case AUDIODEV_DRIVER_COREAUDIO: return qapi_AudiodevCoreaudioPerDirectionOptions_base( dev->u.coreaudio.TYPE); + case AUDIODEV_DRIVER_DBUS: + return dev->u.dbus.TYPE; case AUDIODEV_DRIVER_DSOUND: return dev->u.dsound.TYPE; case AUDIODEV_DRIVER_JACK: @@ -337,6 +343,8 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev) return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE); case AUDIODEV_DRIVER_SDL: return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE); + case AUDIODEV_DRIVER_SNDIO: + return dev->u.sndio.TYPE; case AUDIODEV_DRIVER_SPICE: return dev->u.spice.TYPE; case AUDIODEV_DRIVER_WAV: diff --git a/audio/audio_win_int.c b/audio/audio_win_int.c index 5ea8157dfc..316f118f50 100644 --- a/audio/audio_win_int.c +++ b/audio/audio_win_int.c @@ -1,7 +1,6 @@ /* public domain */ #include "qemu/osdep.h" -#include "qemu-common.h" #define AUDIO_CAP "win-int" #include diff --git a/audio/coreaudio.c b/audio/coreaudio.m similarity index 95% rename from audio/coreaudio.c rename to audio/coreaudio.m index d8a21d3e50..4695291621 100644 --- a/audio/coreaudio.c +++ b/audio/coreaudio.m @@ -44,10 +44,15 @@ typedef struct coreaudioVoiceOut { bool enabled; } coreaudioVoiceOut; +#if !defined(MAC_OS_VERSION_12_0) \ + || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0) +#define kAudioObjectPropertyElementMain kAudioObjectPropertyElementMaster +#endif + static const AudioObjectPropertyAddress voice_addr = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; static OSStatus coreaudio_get_voice(AudioDeviceID *id) @@ -69,7 +74,7 @@ static OSStatus coreaudio_get_framesizerange(AudioDeviceID id, AudioObjectPropertyAddress addr = { kAudioDevicePropertyBufferFrameSizeRange, kAudioDevicePropertyScopeOutput, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; return AudioObjectGetPropertyData(id, @@ -86,7 +91,7 @@ static OSStatus coreaudio_get_framesize(AudioDeviceID id, UInt32 *framesize) AudioObjectPropertyAddress addr = { kAudioDevicePropertyBufferFrameSize, kAudioDevicePropertyScopeOutput, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; return AudioObjectGetPropertyData(id, @@ -103,7 +108,7 @@ static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize) AudioObjectPropertyAddress addr = { kAudioDevicePropertyBufferFrameSize, kAudioDevicePropertyScopeOutput, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; return AudioObjectSetPropertyData(id, @@ -121,7 +126,7 @@ static OSStatus coreaudio_set_streamformat(AudioDeviceID id, AudioObjectPropertyAddress addr = { kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; return AudioObjectSetPropertyData(id, @@ -138,7 +143,7 @@ static OSStatus coreaudio_get_isrunning(AudioDeviceID id, UInt32 *result) AudioObjectPropertyAddress addr = { kAudioDevicePropertyDeviceIsRunning, kAudioDevicePropertyScopeOutput, - kAudioObjectPropertyElementMaster + kAudioObjectPropertyElementMain }; return AudioObjectGetPropertyData(id, @@ -206,7 +211,7 @@ static void coreaudio_logstatus (OSStatus status) AUD_log (AUDIO_CAP, "Reason: %s\n", str); } -static void GCC_FMT_ATTR (2, 3) coreaudio_logerr ( +static void G_GNUC_PRINTF (2, 3) coreaudio_logerr ( OSStatus status, const char *fmt, ... @@ -221,7 +226,7 @@ static void GCC_FMT_ATTR (2, 3) coreaudio_logerr ( coreaudio_logstatus (status); } -static void GCC_FMT_ATTR (3, 4) coreaudio_logerr2 ( +static void G_GNUC_PRINTF (3, 4) coreaudio_logerr2 ( OSStatus status, const char *typ, const char *fmt, @@ -283,6 +288,7 @@ static int coreaudio_buf_unlock (coreaudioVoiceOut *core, const char *fn_name) coreaudio_buf_unlock(core, "coreaudio_" #name); \ return ret; \ } +COREAUDIO_WRAPPER_FUNC(buffer_get_free, size_t, (HWVoiceOut *hw), (hw)) COREAUDIO_WRAPPER_FUNC(get_buffer_out, void *, (HWVoiceOut *hw, size_t *size), (hw, size)) COREAUDIO_WRAPPER_FUNC(put_buffer_out, size_t, @@ -333,12 +339,10 @@ static OSStatus audioDeviceIOProc( len = frameCount * hw->info.bytes_per_frame; while (len) { - size_t write_len; - ssize_t start = ((ssize_t) hw->pos_emul) - hw->pending_emul; - if (start < 0) { - start += hw->size_emul; - } - assert(start >= 0 && start < hw->size_emul); + size_t write_len, start; + + start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul); + assert(start < hw->size_emul); write_len = MIN(MIN(hw->pending_emul, len), hw->size_emul - start); @@ -541,7 +545,6 @@ static OSStatus handle_voice_change( const AudioObjectPropertyAddress *in_addresses, void *in_client_data) { - OSStatus status; coreaudioVoiceOut *core = in_client_data; qemu_mutex_lock_iothread(); @@ -550,13 +553,12 @@ static OSStatus handle_voice_change( fini_out_device(core); } - status = init_out_device(core); - if (!status) { + if (!init_out_device(core)) { update_device_playback_state(core); } qemu_mutex_unlock_iothread(); - return status; + return 0; } static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as, @@ -604,6 +606,8 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as, coreaudio_playback_logerr(status, "Could not remove voice property change listener\n"); } + + return -1; } return 0; @@ -654,6 +658,8 @@ static struct audio_pcm_ops coreaudio_pcm_ops = { .fini_out = coreaudio_fini_out, /* wrapper for audio_generic_write */ .write = coreaudio_write, + /* wrapper for audio_generic_buffer_get_free */ + .buffer_get_free = coreaudio_buffer_get_free, /* wrapper for audio_generic_get_buffer_out */ .get_buffer_out = coreaudio_get_buffer_out, /* wrapper for audio_generic_put_buffer_out */ diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c new file mode 100644 index 0000000000..722df0355e --- /dev/null +++ b/audio/dbusaudio.c @@ -0,0 +1,654 @@ +/* + * QEMU DBus audio + * + * Copyright (c) 2021 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/dbus.h" + +#include +#include "ui/dbus-display1.h" + +#define AUDIO_CAP "dbus" +#include "audio.h" +#include "audio_int.h" +#include "trace.h" + +#define DBUS_DISPLAY1_AUDIO_PATH DBUS_DISPLAY1_ROOT "/Audio" + +#define DBUS_AUDIO_NSAMPLES 1024 /* could be configured? */ + +typedef struct DBusAudio { + GDBusObjectManagerServer *server; + GDBusObjectSkeleton *audio; + QemuDBusDisplay1Audio *iface; + GHashTable *out_listeners; + GHashTable *in_listeners; +} DBusAudio; + +typedef struct DBusVoiceOut { + HWVoiceOut hw; + bool enabled; + RateCtl rate; + + void *buf; + size_t buf_pos; + size_t buf_size; + + bool has_volume; + Volume volume; +} DBusVoiceOut; + +typedef struct DBusVoiceIn { + HWVoiceIn hw; + bool enabled; + RateCtl rate; + + bool has_volume; + Volume volume; +} DBusVoiceIn; + +static void *dbus_get_buffer_out(HWVoiceOut *hw, size_t *size) +{ + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + + if (!vo->buf) { + vo->buf_size = hw->samples * hw->info.bytes_per_frame; + vo->buf = g_malloc(vo->buf_size); + vo->buf_pos = 0; + } + + *size = MIN(vo->buf_size - vo->buf_pos, *size); + *size = audio_rate_get_bytes(&vo->rate, &hw->info, *size); + + return vo->buf + vo->buf_pos; + +} + +static size_t dbus_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioOutListener *listener = NULL; + g_autoptr(GBytes) bytes = NULL; + g_autoptr(GVariant) v_data = NULL; + + assert(buf == vo->buf + vo->buf_pos && vo->buf_pos + size <= vo->buf_size); + vo->buf_pos += size; + + trace_dbus_audio_put_buffer_out(size); + + if (vo->buf_pos < vo->buf_size) { + return size; + } + + bytes = g_bytes_new_take(g_steal_pointer(&vo->buf), vo->buf_size); + v_data = g_variant_new_from_bytes(G_VARIANT_TYPE("ay"), bytes, TRUE); + g_variant_ref_sink(v_data); + + g_hash_table_iter_init(&iter, da->out_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + qemu_dbus_display1_audio_out_listener_call_write( + listener, + (uintptr_t)hw, + v_data, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + + return size; +} + +#if HOST_BIG_ENDIAN +#define AUDIO_HOST_BE TRUE +#else +#define AUDIO_HOST_BE FALSE +#endif + +static void +dbus_init_out_listener(QemuDBusDisplay1AudioOutListener *listener, + HWVoiceOut *hw) +{ + qemu_dbus_display1_audio_out_listener_call_init( + listener, + (uintptr_t)hw, + hw->info.bits, + hw->info.is_signed, + hw->info.is_float, + hw->info.freq, + hw->info.nchannels, + hw->info.bytes_per_frame, + hw->info.bytes_per_second, + hw->info.swap_endianness ? !AUDIO_HOST_BE : AUDIO_HOST_BE, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static int +dbus_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioOutListener *listener = NULL; + + audio_pcm_init_info(&hw->info, as); + hw->samples = DBUS_AUDIO_NSAMPLES; + audio_rate_start(&vo->rate); + + g_hash_table_iter_init(&iter, da->out_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + dbus_init_out_listener(listener, hw); + } + return 0; +} + +static void +dbus_fini_out(HWVoiceOut *hw) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioOutListener *listener = NULL; + + g_hash_table_iter_init(&iter, da->out_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + qemu_dbus_display1_audio_out_listener_call_fini( + listener, + (uintptr_t)hw, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + + g_clear_pointer(&vo->buf, g_free); +} + +static void +dbus_enable_out(HWVoiceOut *hw, bool enable) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioOutListener *listener = NULL; + + vo->enabled = enable; + if (enable) { + audio_rate_start(&vo->rate); + } + + g_hash_table_iter_init(&iter, da->out_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + qemu_dbus_display1_audio_out_listener_call_set_enabled( + listener, (uintptr_t)hw, enable, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } +} + +static void +dbus_volume_out_listener(HWVoiceOut *hw, + QemuDBusDisplay1AudioOutListener *listener) +{ + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + Volume *vol = &vo->volume; + g_autoptr(GBytes) bytes = NULL; + GVariant *v_vol = NULL; + + if (!vo->has_volume) { + return; + } + + assert(vol->channels < sizeof(vol->vol)); + bytes = g_bytes_new(vol->vol, vol->channels); + v_vol = g_variant_new_from_bytes(G_VARIANT_TYPE("ay"), bytes, TRUE); + qemu_dbus_display1_audio_out_listener_call_set_volume( + listener, (uintptr_t)hw, vol->mute, v_vol, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static void +dbus_volume_out(HWVoiceOut *hw, Volume *vol) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioOutListener *listener = NULL; + + vo->has_volume = true; + vo->volume = *vol; + + g_hash_table_iter_init(&iter, da->out_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + dbus_volume_out_listener(hw, listener); + } +} + +static void +dbus_init_in_listener(QemuDBusDisplay1AudioInListener *listener, HWVoiceIn *hw) +{ + qemu_dbus_display1_audio_in_listener_call_init( + listener, + (uintptr_t)hw, + hw->info.bits, + hw->info.is_signed, + hw->info.is_float, + hw->info.freq, + hw->info.nchannels, + hw->info.bytes_per_frame, + hw->info.bytes_per_second, + hw->info.swap_endianness ? !AUDIO_HOST_BE : AUDIO_HOST_BE, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static int +dbus_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioInListener *listener = NULL; + + audio_pcm_init_info(&hw->info, as); + hw->samples = DBUS_AUDIO_NSAMPLES; + audio_rate_start(&vo->rate); + + g_hash_table_iter_init(&iter, da->in_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + dbus_init_in_listener(listener, hw); + } + return 0; +} + +static void +dbus_fini_in(HWVoiceIn *hw) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + GHashTableIter iter; + QemuDBusDisplay1AudioInListener *listener = NULL; + + g_hash_table_iter_init(&iter, da->in_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + qemu_dbus_display1_audio_in_listener_call_fini( + listener, + (uintptr_t)hw, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } +} + +static void +dbus_volume_in_listener(HWVoiceIn *hw, + QemuDBusDisplay1AudioInListener *listener) +{ + DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); + Volume *vol = &vo->volume; + g_autoptr(GBytes) bytes = NULL; + GVariant *v_vol = NULL; + + if (!vo->has_volume) { + return; + } + + assert(vol->channels < sizeof(vol->vol)); + bytes = g_bytes_new(vol->vol, vol->channels); + v_vol = g_variant_new_from_bytes(G_VARIANT_TYPE("ay"), bytes, TRUE); + qemu_dbus_display1_audio_in_listener_call_set_volume( + listener, (uintptr_t)hw, vol->mute, v_vol, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static void +dbus_volume_in(HWVoiceIn *hw, Volume *vol) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioInListener *listener = NULL; + + vo->has_volume = true; + vo->volume = *vol; + + g_hash_table_iter_init(&iter, da->in_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + dbus_volume_in_listener(hw, listener); + } +} + +static size_t +dbus_read(HWVoiceIn *hw, void *buf, size_t size) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + /* DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); */ + GHashTableIter iter; + QemuDBusDisplay1AudioInListener *listener = NULL; + + trace_dbus_audio_read(size); + + /* size = audio_rate_get_bytes(&vo->rate, &hw->info, size); */ + + g_hash_table_iter_init(&iter, da->in_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + g_autoptr(GVariant) v_data = NULL; + const char *data; + gsize n = 0; + + if (qemu_dbus_display1_audio_in_listener_call_read_sync( + listener, + (uintptr_t)hw, + size, + G_DBUS_CALL_FLAGS_NONE, -1, + &v_data, NULL, NULL)) { + data = g_variant_get_fixed_array(v_data, &n, 1); + g_warn_if_fail(n <= size); + size = MIN(n, size); + memcpy(buf, data, size); + break; + } + } + + return size; +} + +static void +dbus_enable_in(HWVoiceIn *hw, bool enable) +{ + DBusAudio *da = (DBusAudio *)hw->s->drv_opaque; + DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); + GHashTableIter iter; + QemuDBusDisplay1AudioInListener *listener = NULL; + + vo->enabled = enable; + if (enable) { + audio_rate_start(&vo->rate); + } + + g_hash_table_iter_init(&iter, da->in_listeners); + while (g_hash_table_iter_next(&iter, NULL, (void **)&listener)) { + qemu_dbus_display1_audio_in_listener_call_set_enabled( + listener, (uintptr_t)hw, enable, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } +} + +static void * +dbus_audio_init(Audiodev *dev) +{ + DBusAudio *da = g_new0(DBusAudio, 1); + + da->out_listeners = g_hash_table_new_full(g_str_hash, g_str_equal, + g_free, g_object_unref); + da->in_listeners = g_hash_table_new_full(g_str_hash, g_str_equal, + g_free, g_object_unref); + return da; +} + +static void +dbus_audio_fini(void *opaque) +{ + DBusAudio *da = opaque; + + if (da->server) { + g_dbus_object_manager_server_unexport(da->server, + DBUS_DISPLAY1_AUDIO_PATH); + } + g_clear_object(&da->audio); + g_clear_object(&da->iface); + g_clear_pointer(&da->in_listeners, g_hash_table_unref); + g_clear_pointer(&da->out_listeners, g_hash_table_unref); + g_clear_object(&da->server); + g_free(da); +} + +static void +listener_out_vanished_cb(GDBusConnection *connection, + gboolean remote_peer_vanished, + GError *error, + DBusAudio *da) +{ + char *name = g_object_get_data(G_OBJECT(connection), "name"); + + g_hash_table_remove(da->out_listeners, name); +} + +static void +listener_in_vanished_cb(GDBusConnection *connection, + gboolean remote_peer_vanished, + GError *error, + DBusAudio *da) +{ + char *name = g_object_get_data(G_OBJECT(connection), "name"); + + g_hash_table_remove(da->in_listeners, name); +} + +static gboolean +dbus_audio_register_listener(AudioState *s, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_listener, + bool out) +{ + DBusAudio *da = s->drv_opaque; + const char *sender = g_dbus_method_invocation_get_sender(invocation); + g_autoptr(GDBusConnection) listener_conn = NULL; + g_autoptr(GError) err = NULL; + g_autoptr(GSocket) socket = NULL; + g_autoptr(GSocketConnection) socket_conn = NULL; + g_autofree char *guid = g_dbus_generate_guid(); + GHashTable *listeners = out ? da->out_listeners : da->in_listeners; + GObject *listener; + int fd; + + trace_dbus_audio_register(sender, out ? "out" : "in"); + + if (g_hash_table_contains(listeners, sender)) { + g_dbus_method_invocation_return_error(invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_INVALID, + "`%s` is already registered!", + sender); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err); + if (err) { + g_dbus_method_invocation_return_error(invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't get peer fd: %s", + err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + socket = g_socket_new_from_fd(fd, &err); + if (err) { + g_dbus_method_invocation_return_error(invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't make a socket: %s", + err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + socket_conn = g_socket_connection_factory_create_connection(socket); + if (out) { + qemu_dbus_display1_audio_complete_register_out_listener( + da->iface, invocation, NULL); + } else { + qemu_dbus_display1_audio_complete_register_in_listener( + da->iface, invocation, NULL); + } + + listener_conn = + g_dbus_connection_new_sync( + G_IO_STREAM(socket_conn), + guid, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER, + NULL, NULL, &err); + if (err) { + error_report("Failed to setup peer connection: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + listener = out ? + G_OBJECT(qemu_dbus_display1_audio_out_listener_proxy_new_sync( + listener_conn, + G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, + NULL, + "/org/qemu/Display1/AudioOutListener", + NULL, + &err)) : + G_OBJECT(qemu_dbus_display1_audio_in_listener_proxy_new_sync( + listener_conn, + G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, + NULL, + "/org/qemu/Display1/AudioInListener", + NULL, + &err)); + if (!listener) { + error_report("Failed to setup proxy: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (out) { + HWVoiceOut *hw; + + QLIST_FOREACH(hw, &s->hw_head_out, entries) { + DBusVoiceOut *vo = container_of(hw, DBusVoiceOut, hw); + QemuDBusDisplay1AudioOutListener *l = + QEMU_DBUS_DISPLAY1_AUDIO_OUT_LISTENER(listener); + + dbus_init_out_listener(l, hw); + qemu_dbus_display1_audio_out_listener_call_set_enabled( + l, (uintptr_t)hw, vo->enabled, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + } else { + HWVoiceIn *hw; + + QLIST_FOREACH(hw, &s->hw_head_in, entries) { + DBusVoiceIn *vo = container_of(hw, DBusVoiceIn, hw); + QemuDBusDisplay1AudioInListener *l = + QEMU_DBUS_DISPLAY1_AUDIO_IN_LISTENER(listener); + + dbus_init_in_listener( + QEMU_DBUS_DISPLAY1_AUDIO_IN_LISTENER(listener), hw); + qemu_dbus_display1_audio_in_listener_call_set_enabled( + l, (uintptr_t)hw, vo->enabled, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + } + + g_object_set_data_full(G_OBJECT(listener_conn), "name", + g_strdup(sender), g_free); + g_hash_table_insert(listeners, g_strdup(sender), listener); + g_object_connect(listener_conn, + "signal::closed", + out ? listener_out_vanished_cb : listener_in_vanished_cb, + da, + NULL); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_audio_register_out_listener(AudioState *s, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_listener) +{ + return dbus_audio_register_listener(s, invocation, + fd_list, arg_listener, true); + +} + +static gboolean +dbus_audio_register_in_listener(AudioState *s, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_listener) +{ + return dbus_audio_register_listener(s, invocation, + fd_list, arg_listener, false); +} + +static void +dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server) +{ + DBusAudio *da = s->drv_opaque; + + g_assert(da); + g_assert(!da->server); + + da->server = g_object_ref(server); + + da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH); + da->iface = qemu_dbus_display1_audio_skeleton_new(); + g_object_connect(da->iface, + "swapped-signal::handle-register-in-listener", + dbus_audio_register_in_listener, s, + "swapped-signal::handle-register-out-listener", + dbus_audio_register_out_listener, s, + NULL); + + g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(da->audio), + G_DBUS_INTERFACE_SKELETON(da->iface)); + g_dbus_object_manager_server_export(da->server, da->audio); +} + +static struct audio_pcm_ops dbus_pcm_ops = { + .init_out = dbus_init_out, + .fini_out = dbus_fini_out, + .write = audio_generic_write, + .get_buffer_out = dbus_get_buffer_out, + .put_buffer_out = dbus_put_buffer_out, + .enable_out = dbus_enable_out, + .volume_out = dbus_volume_out, + + .init_in = dbus_init_in, + .fini_in = dbus_fini_in, + .read = dbus_read, + .run_buffer_in = audio_generic_run_buffer_in, + .enable_in = dbus_enable_in, + .volume_in = dbus_volume_in, +}; + +static struct audio_driver dbus_audio_driver = { + .name = "dbus", + .descr = "Timer based audio exposed with DBus interface", + .init = dbus_audio_init, + .fini = dbus_audio_fini, + .set_dbus_server = dbus_audio_set_server, + .pcm_ops = &dbus_pcm_ops, + .can_be_default = 1, + .max_voices_out = INT_MAX, + .max_voices_in = INT_MAX, + .voice_size_out = sizeof(DBusVoiceOut), + .voice_size_in = sizeof(DBusVoiceIn) +}; + +static void register_audio_dbus(void) +{ + audio_driver_register(&dbus_audio_driver); +} +type_init(register_audio_dbus); + +module_dep("ui-dbus") diff --git a/audio/dsoundaudio.c b/audio/dsoundaudio.c index cfc79c129e..3fb67ec3ee 100644 --- a/audio/dsoundaudio.c +++ b/audio/dsoundaudio.c @@ -222,7 +222,7 @@ static void dsound_log_hresult (HRESULT hr) AUD_log (AUDIO_CAP, "Reason: %s\n", str); } -static void GCC_FMT_ATTR (2, 3) dsound_logerr ( +static void G_GNUC_PRINTF (2, 3) dsound_logerr ( HRESULT hr, const char *fmt, ... @@ -237,7 +237,7 @@ static void GCC_FMT_ATTR (2, 3) dsound_logerr ( dsound_log_hresult (hr); } -static void GCC_FMT_ATTR (3, 4) dsound_logerr2 ( +static void G_GNUC_PRINTF (3, 4) dsound_logerr2 ( HRESULT hr, const char *typ, const char *fmt, @@ -427,22 +427,18 @@ static void dsound_enable_out(HWVoiceOut *hw, bool enable) } } -static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size) +static size_t dsound_buffer_get_free(HWVoiceOut *hw) { DSoundVoiceOut *ds = (DSoundVoiceOut *) hw; LPDIRECTSOUNDBUFFER dsb = ds->dsound_buffer; HRESULT hr; - DWORD ppos, wpos, act_size; - size_t req_size; - int err; - void *ret; + DWORD ppos, wpos; hr = IDirectSoundBuffer_GetCurrentPosition( dsb, &ppos, ds->first_time ? &wpos : NULL); if (FAILED(hr)) { dsound_logerr(hr, "Could not get playback buffer position\n"); - *size = 0; - return NULL; + return 0; } if (ds->first_time) { @@ -450,13 +446,20 @@ static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size) ds->first_time = false; } - req_size = audio_ring_dist(ppos, hw->pos_emul, hw->size_emul); - req_size = MIN(req_size, hw->size_emul - hw->pos_emul); + return audio_ring_dist(ppos, hw->pos_emul, hw->size_emul); +} - if (req_size == 0) { - *size = 0; - return NULL; - } +static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size) +{ + DSoundVoiceOut *ds = (DSoundVoiceOut *)hw; + LPDIRECTSOUNDBUFFER dsb = ds->dsound_buffer; + DWORD act_size; + size_t req_size; + int err; + void *ret; + + req_size = MIN(*size, hw->size_emul - hw->pos_emul); + assert(req_size > 0); err = dsound_lock_out(dsb, &hw->info, hw->pos_emul, req_size, &ret, NULL, &act_size, NULL, false, ds->s); @@ -536,13 +539,12 @@ static void *dsound_get_buffer_in(HWVoiceIn *hw, size_t *size) DSoundVoiceIn *ds = (DSoundVoiceIn *) hw; LPDIRECTSOUNDCAPTUREBUFFER dscb = ds->dsound_capture_buffer; HRESULT hr; - DWORD cpos, rpos, act_size; + DWORD rpos, act_size; size_t req_size; int err; void *ret; - hr = IDirectSoundCaptureBuffer_GetCurrentPosition( - dscb, &cpos, ds->first_time ? &rpos : NULL); + hr = IDirectSoundCaptureBuffer_GetCurrentPosition(dscb, NULL, &rpos); if (FAILED(hr)) { dsound_logerr(hr, "Could not get capture buffer position\n"); *size = 0; @@ -554,7 +556,7 @@ static void *dsound_get_buffer_in(HWVoiceIn *hw, size_t *size) ds->first_time = false; } - req_size = audio_ring_dist(cpos, hw->pos_emul, hw->size_emul); + req_size = audio_ring_dist(rpos, hw->pos_emul, hw->size_emul); req_size = MIN(*size, MIN(req_size, hw->size_emul - hw->pos_emul)); if (req_size == 0) { @@ -621,7 +623,7 @@ static void *dsound_audio_init(Audiodev *dev) { int err; HRESULT hr; - dsound *s = g_malloc0(sizeof(dsound)); + dsound *s = g_new0(dsound, 1); AudiodevDsoundOptions *dso; assert(dev->driver == AUDIODEV_DRIVER_DSOUND); @@ -700,6 +702,7 @@ static struct audio_pcm_ops dsound_pcm_ops = { .init_out = dsound_init_out, .fini_out = dsound_fini_out, .write = audio_generic_write, + .buffer_get_free = dsound_buffer_get_free, .get_buffer_out = dsound_get_buffer_out, .put_buffer_out = dsound_put_buffer_out, .enable_out = dsound_enable_out, diff --git a/audio/jackaudio.c b/audio/jackaudio.c index e7de6d5433..5bdf3d7a78 100644 --- a/audio/jackaudio.c +++ b/audio/jackaudio.c @@ -97,9 +97,9 @@ static void qjack_buffer_create(QJackBuffer *buffer, int channels, int frames) buffer->used = 0; buffer->rptr = 0; buffer->wptr = 0; - buffer->data = g_malloc(channels * sizeof(float *)); + buffer->data = g_new(float *, channels); for (int i = 0; i < channels; ++i) { - buffer->data[i] = g_malloc(frames * sizeof(float)); + buffer->data[i] = g_new(float, frames); } } @@ -453,7 +453,7 @@ static int qjack_client_init(QJackClient *c) jack_on_shutdown(c->client, qjack_shutdown, c); /* allocate and register the ports */ - c->port = g_malloc(sizeof(jack_port_t *) * c->nchannels); + c->port = g_new(jack_port_t *, c->nchannels); for (int i = 0; i < c->nchannels; ++i) { char port_name[16]; @@ -483,8 +483,8 @@ static int qjack_client_init(QJackClient *c) c->buffersize = 512; } - /* create a 2 period buffer */ - qjack_buffer_create(&c->fifo, c->nchannels, c->buffersize * 2); + /* create a 3 period buffer */ + qjack_buffer_create(&c->fifo, c->nchannels, c->buffersize * 3); qjack_client_connect_ports(c); c->state = QJACK_STATE_RUNNING; @@ -622,6 +622,7 @@ static void qjack_enable_in(HWVoiceIn *hw, bool enable) ji->c.enabled = enable; } +#if !defined(WIN32) && defined(CONFIG_PTHREAD_SETNAME_NP_W_TID) static int qjack_thread_creator(jack_native_thread_t *thread, const pthread_attr_t *attr, void *(*function)(void *), void *arg) { @@ -635,6 +636,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread, return ret; } +#endif static void *qjack_init(Audiodev *dev) { @@ -650,6 +652,7 @@ static struct audio_pcm_ops jack_pcm_ops = { .init_out = qjack_init_out, .fini_out = qjack_fini_out, .write = qjack_write, + .buffer_get_free = audio_generic_buffer_get_free, .run_buffer_out = audio_generic_run_buffer_out, .enable_out = qjack_enable_out, @@ -687,7 +690,9 @@ static void register_audio_jack(void) { qemu_mutex_init(&qjack_shutdown_lock); audio_driver_register(&jack_driver); +#if !defined(WIN32) && defined(CONFIG_PTHREAD_SETNAME_NP_W_TID) jack_set_thread_creator(qjack_thread_creator); +#endif jack_set_error_function(qjack_error); jack_set_info_function(qjack_info); } diff --git a/audio/meson.build b/audio/meson.build index 7d53b0f920..34aed78342 100644 --- a/audio/meson.build +++ b/audio/meson.build @@ -7,24 +7,30 @@ softmmu_ss.add(files( 'wavcapture.c', )) -softmmu_ss.add(when: [coreaudio, 'CONFIG_AUDIO_COREAUDIO'], if_true: files('coreaudio.c')) -softmmu_ss.add(when: [dsound, 'CONFIG_AUDIO_DSOUND'], if_true: files('dsoundaudio.c')) -softmmu_ss.add(when: ['CONFIG_AUDIO_WIN_INT'], if_true: files('audio_win_int.c')) +softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m')) +softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c')) audio_modules = {} foreach m : [ - ['CONFIG_AUDIO_ALSA', 'alsa', alsa, 'alsaaudio.c'], - ['CONFIG_AUDIO_OSS', 'oss', oss, 'ossaudio.c'], - ['CONFIG_AUDIO_PA', 'pa', pulse, 'paaudio.c'], - ['CONFIG_AUDIO_SDL', 'sdl', sdl, 'sdlaudio.c'], - ['CONFIG_AUDIO_JACK', 'jack', jack, 'jackaudio.c'], - ['CONFIG_SPICE', 'spice', spice, 'spiceaudio.c'] + ['alsa', alsa, files('alsaaudio.c')], + ['oss', oss, files('ossaudio.c')], + ['pa', pulse, files('paaudio.c')], + ['sdl', sdl, files('sdlaudio.c')], + ['jack', jack, files('jackaudio.c')], + ['sndio', sndio, files('sndioaudio.c')], + ['spice', spice, files('spiceaudio.c')] ] - if config_host.has_key(m[0]) + if m[1].found() module_ss = ss.source_set() - module_ss.add(when: m[2], if_true: files(m[3])) - audio_modules += {m[1] : module_ss} + module_ss.add(m[1], m[2]) + audio_modules += {m[0] : module_ss} endif endforeach +if dbus_display + module_ss = ss.source_set() + module_ss.add(when: gio, if_true: files('dbusaudio.c')) + audio_modules += {'dbus': module_ss} +endif + modules += {'audio': audio_modules} diff --git a/audio/mixeng.c b/audio/mixeng.c index f27deb165b..100a306d6f 100644 --- a/audio/mixeng.c +++ b/audio/mixeng.c @@ -342,13 +342,13 @@ f_sample *mixeng_clip_float[2] = { void audio_sample_to_uint64(const void *samples, int pos, uint64_t *left, uint64_t *right) { - const struct st_sample *sample = samples; - sample += pos; #ifdef FLOAT_MIXENG error_report( "Coreaudio and floating point samples are not supported by replay yet"); abort(); #else + const struct st_sample *sample = samples; + sample += pos; *left = sample->l; *right = sample->r; #endif @@ -357,13 +357,13 @@ void audio_sample_to_uint64(const void *samples, int pos, void audio_sample_from_uint64(void *samples, int pos, uint64_t left, uint64_t right) { - struct st_sample *sample = samples; - sample += pos; #ifdef FLOAT_MIXENG error_report( "Coreaudio and floating point samples are not supported by replay yet"); abort(); #else + struct st_sample *sample = samples; + sample += pos; sample->l = left; sample->r = right; #endif diff --git a/audio/noaudio.c b/audio/noaudio.c index aac87dbc93..4fdee5adec 100644 --- a/audio/noaudio.c +++ b/audio/noaudio.c @@ -44,7 +44,7 @@ typedef struct NoVoiceIn { static size_t no_write(HWVoiceOut *hw, void *buf, size_t len) { NoVoiceOut *no = (NoVoiceOut *) hw; - return audio_rate_get_bytes(&hw->info, &no->rate, len); + return audio_rate_get_bytes(&no->rate, &hw->info, len); } static int no_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) @@ -89,7 +89,7 @@ static void no_fini_in (HWVoiceIn *hw) static size_t no_read(HWVoiceIn *hw, void *buf, size_t size) { NoVoiceIn *no = (NoVoiceIn *) hw; - int64_t bytes = audio_rate_get_bytes(&hw->info, &no->rate, size); + int64_t bytes = audio_rate_get_bytes(&no->rate, &hw->info, size); audio_pcm_info_clear_buf(&hw->info, buf, bytes / hw->info.bytes_per_frame); return bytes; @@ -118,6 +118,7 @@ static struct audio_pcm_ops no_pcm_ops = { .init_out = no_init_out, .fini_out = no_fini_out, .write = no_write, + .buffer_get_free = audio_generic_buffer_get_free, .run_buffer_out = audio_generic_run_buffer_out, .enable_out = no_enable_out, diff --git a/audio/ossaudio.c b/audio/ossaudio.c index 60eff66424..8e075edb70 100644 --- a/audio/ossaudio.c +++ b/audio/ossaudio.c @@ -63,7 +63,7 @@ struct oss_params { int fragsize; }; -static void GCC_FMT_ATTR (2, 3) oss_logerr (int err, const char *fmt, ...) +static void G_GNUC_PRINTF (2, 3) oss_logerr (int err, const char *fmt, ...) { va_list ap; @@ -74,7 +74,7 @@ static void GCC_FMT_ATTR (2, 3) oss_logerr (int err, const char *fmt, ...) AUD_log (AUDIO_CAP, "Reason: %s\n", strerror (err)); } -static void GCC_FMT_ATTR (3, 4) oss_logerr2 ( +static void G_GNUC_PRINTF (3, 4) oss_logerr2 ( int err, const char *typ, const char *fmt, @@ -389,11 +389,23 @@ static void oss_run_buffer_out(HWVoiceOut *hw) } } +static size_t oss_buffer_get_free(HWVoiceOut *hw) +{ + OSSVoiceOut *oss = (OSSVoiceOut *)hw; + + if (oss->mmapped) { + return oss_get_available_bytes(oss); + } else { + return audio_generic_buffer_get_free(hw); + } +} + static void *oss_get_buffer_out(HWVoiceOut *hw, size_t *size) { - OSSVoiceOut *oss = (OSSVoiceOut *) hw; + OSSVoiceOut *oss = (OSSVoiceOut *)hw; + if (oss->mmapped) { - *size = MIN(oss_get_available_bytes(oss), hw->size_emul - hw->pos_emul); + *size = hw->size_emul - hw->pos_emul; return hw->buf_emul + hw->pos_emul; } else { return audio_generic_get_buffer_out(hw, size); @@ -750,6 +762,7 @@ static struct audio_pcm_ops oss_pcm_ops = { .init_out = oss_init_out, .fini_out = oss_fini_out, .write = oss_write, + .buffer_get_free = oss_buffer_get_free, .run_buffer_out = oss_run_buffer_out, .get_buffer_out = oss_get_buffer_out, .put_buffer_out = oss_put_buffer_out, diff --git a/audio/paaudio.c b/audio/paaudio.c index 75401d5391..e91116f239 100644 --- a/audio/paaudio.c +++ b/audio/paaudio.c @@ -43,7 +43,7 @@ typedef struct { static void qpa_conn_fini(PAConnection *c); -static void GCC_FMT_ATTR (2, 3) qpa_logerr (int err, const char *fmt, ...) +static void G_GNUC_PRINTF (2, 3) qpa_logerr (int err, const char *fmt, ...) { va_list ap; @@ -201,13 +201,11 @@ unlock_and_fail: return 0; } -static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size) +static size_t qpa_buffer_get_free(HWVoiceOut *hw) { - PAVoiceOut *p = (PAVoiceOut *) hw; + PAVoiceOut *p = (PAVoiceOut *)hw; PAConnection *c = p->g->conn; - void *ret; size_t l; - int r; pa_threaded_mainloop_lock(c->mainloop); @@ -216,7 +214,6 @@ static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size) if (pa_stream_get_state(p->stream) != PA_STREAM_READY) { /* wait for stream to become ready */ l = 0; - ret = NULL; goto unlock; } @@ -224,16 +221,33 @@ static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size) CHECK_SUCCESS_GOTO(c, l != (size_t) -1, unlock_and_fail, "pa_stream_writable_size failed\n"); +unlock: + pa_threaded_mainloop_unlock(c->mainloop); + return l; + +unlock_and_fail: + pa_threaded_mainloop_unlock(c->mainloop); + return 0; +} + +static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size) +{ + PAVoiceOut *p = (PAVoiceOut *)hw; + PAConnection *c = p->g->conn; + void *ret; + int r; + + pa_threaded_mainloop_lock(c->mainloop); + + CHECK_DEAD_GOTO(c, p->stream, unlock_and_fail, + "pa_threaded_mainloop_lock failed\n"); + *size = -1; r = pa_stream_begin_write(p->stream, &ret, size); CHECK_SUCCESS_GOTO(c, r >= 0, unlock_and_fail, "pa_stream_begin_write failed\n"); -unlock: pa_threaded_mainloop_unlock(c->mainloop); - if (*size > l) { - *size = l; - } return ret; unlock_and_fail: @@ -535,11 +549,8 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as, } audio_pcm_init_info (&hw->info, &obt_as); - /* - * This is wrong. hw->samples counts in frames. hw->samples will be - * number of channels times larger than expected. - */ - hw->samples = audio_buffer_samples( + /* hw->samples counts in frames */ + hw->samples = audio_buffer_frames( qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440); return 0; @@ -587,11 +598,8 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) } audio_pcm_init_info (&hw->info, &obt_as); - /* - * This is wrong. hw->samples counts in frames. hw->samples will be - * number of channels times larger than expected. - */ - hw->samples = audio_buffer_samples( + /* hw->samples counts in frames */ + hw->samples = audio_buffer_frames( qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440); return 0; @@ -744,7 +752,7 @@ static int qpa_validate_per_direction_opts(Audiodev *dev, { if (!pdo->has_latency) { pdo->has_latency = true; - pdo->latency = 15000; + pdo->latency = 46440; } return 1; } @@ -752,7 +760,7 @@ static int qpa_validate_per_direction_opts(Audiodev *dev, /* common */ static void *qpa_conn_init(const char *server) { - PAConnection *c = g_malloc0(sizeof(PAConnection)); + PAConnection *c = g_new0(PAConnection, 1); QTAILQ_INSERT_TAIL(&pa_conns, c, list); c->mainloop = pa_threaded_mainloop_new(); @@ -841,7 +849,7 @@ static void *qpa_audio_init(Audiodev *dev) return NULL; } - g = g_malloc0(sizeof(paaudio)); + g = g_new0(paaudio, 1); server = popts->has_server ? popts->server : NULL; g->dev = dev; @@ -901,6 +909,7 @@ static struct audio_pcm_ops qpa_pcm_ops = { .init_out = qpa_init_out, .fini_out = qpa_fini_out, .write = qpa_write, + .buffer_get_free = qpa_buffer_get_free, .get_buffer_out = qpa_get_buffer_out, .put_buffer_out = qpa_put_buffer_out, .volume_out = qpa_volume_out, diff --git a/audio/rate_template.h b/audio/rate_template.h index f94c940c61..b432719ebb 100644 --- a/audio/rate_template.h +++ b/audio/rate_template.h @@ -72,11 +72,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, ilast = *ibuf++; rate->ipos++; - /* if ipos overflow, there is a infinite loop */ - if (rate->ipos == 0xffffffff) { - rate->ipos = 1; - rate->opos = rate->opos & 0xffffffff; - } /* See if we finished the input buffer yet */ if (ibuf >= iend) { goto the_end; @@ -85,6 +80,12 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, icur = *ibuf; + /* wrap ipos and opos around long before they overflow */ + if (rate->ipos >= 0x10001) { + rate->ipos = 1; + rate->opos &= 0xffffffff; + } + /* interpolate */ #ifdef FLOAT_MIXENG #ifdef RECIPROCAL diff --git a/audio/sdlaudio.c b/audio/sdlaudio.c index c68c62a3e4..68a237b76b 100644 --- a/audio/sdlaudio.c +++ b/audio/sdlaudio.c @@ -55,7 +55,7 @@ typedef struct SDLVoiceIn { SDL_AudioDeviceID devid; } SDLVoiceIn; -static void GCC_FMT_ATTR (1, 2) sdl_logerr (const char *fmt, ...) +static void G_GNUC_PRINTF (1, 2) sdl_logerr (const char *fmt, ...) { va_list ap; @@ -224,12 +224,11 @@ static void sdl_callback_out(void *opaque, Uint8 *buf, int len) /* dolog("callback_out: len=%d avail=%zu\n", len, hw->pending_emul); */ while (hw->pending_emul && len) { - size_t write_len; - ssize_t start = (ssize_t)hw->pos_emul - hw->pending_emul; - if (start < 0) { - start += hw->size_emul; - } - assert(start >= 0 && start < hw->size_emul); + size_t write_len, start; + + start = audio_ring_posb(hw->pos_emul, hw->pending_emul, + hw->size_emul); + assert(start < hw->size_emul); write_len = MIN(MIN(hw->pending_emul, len), hw->size_emul - start); @@ -310,6 +309,7 @@ static void sdl_callback_in(void *opaque, Uint8 *buf, int len) SDL_UnlockAudioDevice(sdl->devid); \ } +SDL_WRAPPER_FUNC(buffer_get_free, size_t, (HWVoiceOut *hw), (hw), Out) SDL_WRAPPER_FUNC(get_buffer_out, void *, (HWVoiceOut *hw, size_t *size), (hw, size), Out) SDL_WRAPPER_FUNC(put_buffer_out, size_t, @@ -347,11 +347,8 @@ static int sdl_init_out(HWVoiceOut *hw, struct audsettings *as, req.freq = as->freq; req.format = aud_to_sdlfmt (as->fmt); req.channels = as->nchannels; - /* - * This is wrong. SDL samples are QEMU frames. The buffer size will be - * the requested buffer size multiplied by the number of channels. - */ - req.samples = audio_buffer_samples( + /* SDL samples are QEMU frames */ + req.samples = audio_buffer_frames( qapi_AudiodevSdlPerDirectionOptions_base(spdo), as, 11610); req.callback = sdl_callback_out; req.userdata = sdl; @@ -472,6 +469,8 @@ static struct audio_pcm_ops sdl_pcm_ops = { .fini_out = sdl_fini_out, /* wrapper for audio_generic_write */ .write = sdl_write, + /* wrapper for audio_generic_buffer_get_free */ + .buffer_get_free = sdl_buffer_get_free, /* wrapper for audio_generic_get_buffer_out */ .get_buffer_out = sdl_get_buffer_out, /* wrapper for audio_generic_put_buffer_out */ diff --git a/audio/sndioaudio.c b/audio/sndioaudio.c new file mode 100644 index 0000000000..7c45276d36 --- /dev/null +++ b/audio/sndioaudio.c @@ -0,0 +1,565 @@ +/* + * SPDX-License-Identifier: ISC + * + * Copyright (c) 2019 Alexandre Ratchov + */ + +/* + * TODO : + * + * Use a single device and open it in full-duplex rather than + * opening it twice (once for playback once for recording). + * + * This is the only way to ensure that playback doesn't drift with respect + * to recording, which is what guest systems expect. + */ + +#include +#include +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "audio.h" +#include "trace.h" + +#define AUDIO_CAP "sndio" +#include "audio_int.h" + +/* default latency in microseconds if no option is set */ +#define SNDIO_LATENCY_US 50000 + +typedef struct SndioVoice { + union { + HWVoiceOut out; + HWVoiceIn in; + } hw; + struct sio_par par; + struct sio_hdl *hdl; + struct pollfd *pfds; + struct pollindex { + struct SndioVoice *self; + int index; + } *pindexes; + unsigned char *buf; + size_t buf_size; + size_t sndio_pos; + size_t qemu_pos; + unsigned int mode; + unsigned int nfds; + bool enabled; +} SndioVoice; + +typedef struct SndioConf { + const char *devname; + unsigned int latency; +} SndioConf; + +/* needed for forward reference */ +static void sndio_poll_in(void *arg); +static void sndio_poll_out(void *arg); + +/* + * stop polling descriptors + */ +static void sndio_poll_clear(SndioVoice *self) +{ + struct pollfd *pfd; + int i; + + for (i = 0; i < self->nfds; i++) { + pfd = &self->pfds[i]; + qemu_set_fd_handler(pfd->fd, NULL, NULL, NULL); + } + + self->nfds = 0; +} + +/* + * write data to the device until it blocks or + * all of our buffered data is written + */ +static void sndio_write(SndioVoice *self) +{ + size_t todo, n; + + todo = self->qemu_pos - self->sndio_pos; + + /* + * transfer data to device, until it blocks + */ + while (todo > 0) { + n = sio_write(self->hdl, self->buf + self->sndio_pos, todo); + if (n == 0) { + break; + } + self->sndio_pos += n; + todo -= n; + } + + if (self->sndio_pos == self->buf_size) { + /* + * we complete the block + */ + self->sndio_pos = 0; + self->qemu_pos = 0; + } +} + +/* + * read data from the device until it blocks or + * there no room any longer + */ +static void sndio_read(SndioVoice *self) +{ + size_t todo, n; + + todo = self->buf_size - self->sndio_pos; + + /* + * transfer data from the device, until it blocks + */ + while (todo > 0) { + n = sio_read(self->hdl, self->buf + self->sndio_pos, todo); + if (n == 0) { + break; + } + self->sndio_pos += n; + todo -= n; + } +} + +/* + * Set handlers for all descriptors libsndio needs to + * poll + */ +static void sndio_poll_wait(SndioVoice *self) +{ + struct pollfd *pfd; + int events, i; + + events = 0; + if (self->mode == SIO_PLAY) { + if (self->sndio_pos < self->qemu_pos) { + events |= POLLOUT; + } + } else { + if (self->sndio_pos < self->buf_size) { + events |= POLLIN; + } + } + + /* + * fill the given array of descriptors with the events sndio + * wants, they are different from our 'event' variable because + * sndio may use descriptors internally. + */ + self->nfds = sio_pollfd(self->hdl, self->pfds, events); + + for (i = 0; i < self->nfds; i++) { + pfd = &self->pfds[i]; + if (pfd->fd < 0) { + continue; + } + qemu_set_fd_handler(pfd->fd, + (pfd->events & POLLIN) ? sndio_poll_in : NULL, + (pfd->events & POLLOUT) ? sndio_poll_out : NULL, + &self->pindexes[i]); + pfd->revents = 0; + } +} + +/* + * call-back called when one of the descriptors + * became readable or writable + */ +static void sndio_poll_event(SndioVoice *self, int index, int event) +{ + int revents; + + /* + * ensure we're not called twice this cycle + */ + sndio_poll_clear(self); + + /* + * make self->pfds[] look as we're returning from poll syscal, + * this is how sio_revents expects events to be. + */ + self->pfds[index].revents = event; + + /* + * tell sndio to handle events and return whether we can read or + * write without blocking. + */ + revents = sio_revents(self->hdl, self->pfds); + if (self->mode == SIO_PLAY) { + if (revents & POLLOUT) { + sndio_write(self); + } + + if (self->qemu_pos < self->buf_size) { + audio_run(self->hw.out.s, "sndio_out"); + } + } else { + if (revents & POLLIN) { + sndio_read(self); + } + + if (self->qemu_pos < self->sndio_pos) { + audio_run(self->hw.in.s, "sndio_in"); + } + } + + /* + * audio_run() may have changed state + */ + if (self->enabled) { + sndio_poll_wait(self); + } +} + +/* + * return the upper limit of the amount of free play buffer space + */ +static size_t sndio_buffer_get_free(HWVoiceOut *hw) +{ + SndioVoice *self = (SndioVoice *) hw; + + return self->buf_size - self->qemu_pos; +} + +/* + * return a buffer where data to play can be stored, + * its size is stored in the location pointed by the size argument. + */ +static void *sndio_get_buffer_out(HWVoiceOut *hw, size_t *size) +{ + SndioVoice *self = (SndioVoice *) hw; + + *size = self->buf_size - self->qemu_pos; + return self->buf + self->qemu_pos; +} + +/* + * put back to sndio back-end a buffer returned by sndio_get_buffer_out() + */ +static size_t sndio_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size) +{ + SndioVoice *self = (SndioVoice *) hw; + + self->qemu_pos += size; + sndio_poll_wait(self); + return size; +} + +/* + * return a buffer from where recorded data is available, + * its size is stored in the location pointed by the size argument. + * it may not exceed the initial value of "*size". + */ +static void *sndio_get_buffer_in(HWVoiceIn *hw, size_t *size) +{ + SndioVoice *self = (SndioVoice *) hw; + size_t todo, max_todo; + + /* + * unlike the get_buffer_out() method, get_buffer_in() + * must return a buffer of at most the given size, see audio.c + */ + max_todo = *size; + + todo = self->sndio_pos - self->qemu_pos; + if (todo > max_todo) { + todo = max_todo; + } + + *size = todo; + return self->buf + self->qemu_pos; +} + +/* + * discard the given amount of recorded data + */ +static void sndio_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size) +{ + SndioVoice *self = (SndioVoice *) hw; + + self->qemu_pos += size; + if (self->qemu_pos == self->buf_size) { + self->qemu_pos = 0; + self->sndio_pos = 0; + } + sndio_poll_wait(self); +} + +/* + * call-back called when one of our descriptors becomes writable + */ +static void sndio_poll_out(void *arg) +{ + struct pollindex *pindex = (struct pollindex *) arg; + + sndio_poll_event(pindex->self, pindex->index, POLLOUT); +} + +/* + * call-back called when one of our descriptors becomes readable + */ +static void sndio_poll_in(void *arg) +{ + struct pollindex *pindex = (struct pollindex *) arg; + + sndio_poll_event(pindex->self, pindex->index, POLLIN); +} + +static void sndio_fini(SndioVoice *self) +{ + if (self->hdl) { + sio_close(self->hdl); + self->hdl = NULL; + } + + g_free(self->pfds); + g_free(self->pindexes); + g_free(self->buf); +} + +static int sndio_init(SndioVoice *self, + struct audsettings *as, int mode, Audiodev *dev) +{ + AudiodevSndioOptions *opts = &dev->u.sndio; + unsigned long long latency; + const char *dev_name; + struct sio_par req; + unsigned int nch; + int i, nfds; + + dev_name = opts->has_dev ? opts->dev : SIO_DEVANY; + latency = opts->has_latency ? opts->latency : SNDIO_LATENCY_US; + + /* open the device in non-blocking mode */ + self->hdl = sio_open(dev_name, mode, 1); + if (self->hdl == NULL) { + dolog("failed to open device\n"); + return -1; + } + + self->mode = mode; + + sio_initpar(&req); + + switch (as->fmt) { + case AUDIO_FORMAT_S8: + req.bits = 8; + req.sig = 1; + break; + case AUDIO_FORMAT_U8: + req.bits = 8; + req.sig = 0; + break; + case AUDIO_FORMAT_S16: + req.bits = 16; + req.sig = 1; + break; + case AUDIO_FORMAT_U16: + req.bits = 16; + req.sig = 0; + break; + case AUDIO_FORMAT_S32: + req.bits = 32; + req.sig = 1; + break; + case AUDIO_FORMAT_U32: + req.bits = 32; + req.sig = 0; + break; + default: + dolog("unknown audio sample format\n"); + return -1; + } + + if (req.bits > 8) { + req.le = as->endianness ? 0 : 1; + } + + req.rate = as->freq; + if (mode == SIO_PLAY) { + req.pchan = as->nchannels; + } else { + req.rchan = as->nchannels; + } + + /* set on-device buffer size */ + req.appbufsz = req.rate * latency / 1000000; + + if (!sio_setpar(self->hdl, &req)) { + dolog("failed set audio params\n"); + goto fail; + } + + if (!sio_getpar(self->hdl, &self->par)) { + dolog("failed get audio params\n"); + goto fail; + } + + nch = (mode == SIO_PLAY) ? self->par.pchan : self->par.rchan; + + /* + * With the default setup, sndio supports any combination of parameters + * so these checks are mostly to catch configuration errors. + */ + if (self->par.bits != req.bits || self->par.bps != req.bits / 8 || + self->par.sig != req.sig || (req.bits > 8 && self->par.le != req.le) || + self->par.rate != as->freq || nch != as->nchannels) { + dolog("unsupported audio params\n"); + goto fail; + } + + /* + * we use one block as buffer size; this is how + * transfers get well aligned + */ + self->buf_size = self->par.round * self->par.bps * nch; + + self->buf = g_malloc(self->buf_size); + if (self->buf == NULL) { + dolog("failed to allocate audio buffer\n"); + goto fail; + } + + nfds = sio_nfds(self->hdl); + + self->pfds = g_malloc_n(nfds, sizeof(struct pollfd)); + if (self->pfds == NULL) { + dolog("failed to allocate pollfd structures\n"); + goto fail; + } + + self->pindexes = g_malloc_n(nfds, sizeof(struct pollindex)); + if (self->pindexes == NULL) { + dolog("failed to allocate pollindex structures\n"); + goto fail; + } + + for (i = 0; i < nfds; i++) { + self->pindexes[i].self = self; + self->pindexes[i].index = i; + } + + return 0; +fail: + sndio_fini(self); + return -1; +} + +static void sndio_enable(SndioVoice *self, bool enable) +{ + if (enable) { + sio_start(self->hdl); + self->enabled = true; + sndio_poll_wait(self); + } else { + self->enabled = false; + sndio_poll_clear(self); + sio_stop(self->hdl); + } +} + +static void sndio_enable_out(HWVoiceOut *hw, bool enable) +{ + SndioVoice *self = (SndioVoice *) hw; + + sndio_enable(self, enable); +} + +static void sndio_enable_in(HWVoiceIn *hw, bool enable) +{ + SndioVoice *self = (SndioVoice *) hw; + + sndio_enable(self, enable); +} + +static int sndio_init_out(HWVoiceOut *hw, struct audsettings *as, void *opaque) +{ + SndioVoice *self = (SndioVoice *) hw; + + if (sndio_init(self, as, SIO_PLAY, opaque) == -1) { + return -1; + } + + audio_pcm_init_info(&hw->info, as); + hw->samples = self->par.round; + return 0; +} + +static int sndio_init_in(HWVoiceIn *hw, struct audsettings *as, void *opaque) +{ + SndioVoice *self = (SndioVoice *) hw; + + if (sndio_init(self, as, SIO_REC, opaque) == -1) { + return -1; + } + + audio_pcm_init_info(&hw->info, as); + hw->samples = self->par.round; + return 0; +} + +static void sndio_fini_out(HWVoiceOut *hw) +{ + SndioVoice *self = (SndioVoice *) hw; + + sndio_fini(self); +} + +static void sndio_fini_in(HWVoiceIn *hw) +{ + SndioVoice *self = (SndioVoice *) hw; + + sndio_fini(self); +} + +static void *sndio_audio_init(Audiodev *dev) +{ + assert(dev->driver == AUDIODEV_DRIVER_SNDIO); + return dev; +} + +static void sndio_audio_fini(void *opaque) +{ +} + +static struct audio_pcm_ops sndio_pcm_ops = { + .init_out = sndio_init_out, + .fini_out = sndio_fini_out, + .enable_out = sndio_enable_out, + .write = audio_generic_write, + .buffer_get_free = sndio_buffer_get_free, + .get_buffer_out = sndio_get_buffer_out, + .put_buffer_out = sndio_put_buffer_out, + .init_in = sndio_init_in, + .fini_in = sndio_fini_in, + .read = audio_generic_read, + .enable_in = sndio_enable_in, + .get_buffer_in = sndio_get_buffer_in, + .put_buffer_in = sndio_put_buffer_in, +}; + +static struct audio_driver sndio_audio_driver = { + .name = "sndio", + .descr = "sndio https://sndio.org", + .init = sndio_audio_init, + .fini = sndio_audio_fini, + .pcm_ops = &sndio_pcm_ops, + .can_be_default = 1, + .max_voices_out = INT_MAX, + .max_voices_in = INT_MAX, + .voice_size_out = sizeof(SndioVoice), + .voice_size_in = sizeof(SndioVoice) +}; + +static void register_audio_sndio(void) +{ + audio_driver_register(&sndio_audio_driver); +} + +type_init(register_audio_sndio); diff --git a/audio/spiceaudio.c b/audio/spiceaudio.c index a8d370fe6f..d17ef1a25e 100644 --- a/audio/spiceaudio.c +++ b/audio/spiceaudio.c @@ -120,6 +120,13 @@ static void line_out_fini (HWVoiceOut *hw) spice_server_remove_interface (&out->sin.base); } +static size_t line_out_get_free(HWVoiceOut *hw) +{ + SpiceVoiceOut *out = container_of(hw, SpiceVoiceOut, hw); + + return audio_rate_peek_bytes(&out->rate, &hw->info); +} + static void *line_out_get_buffer(HWVoiceOut *hw, size_t *size) { SpiceVoiceOut *out = container_of(hw, SpiceVoiceOut, hw); @@ -133,8 +140,6 @@ static void *line_out_get_buffer(HWVoiceOut *hw, size_t *size) *size = MIN((out->fsize - out->fpos) << 2, *size); } - *size = audio_rate_get_bytes(&hw->info, &out->rate, *size); - return out->frame + out->fpos; } @@ -142,6 +147,8 @@ static size_t line_out_put_buffer(HWVoiceOut *hw, void *buf, size_t size) { SpiceVoiceOut *out = container_of(hw, SpiceVoiceOut, hw); + audio_rate_add_bytes(&out->rate, size); + if (buf) { assert(buf == out->frame + out->fpos && out->fpos <= out->fsize); out->fpos += size >> 2; @@ -232,10 +239,13 @@ static void line_in_fini (HWVoiceIn *hw) static size_t line_in_read(HWVoiceIn *hw, void *buf, size_t len) { SpiceVoiceIn *in = container_of (hw, SpiceVoiceIn, hw); - uint64_t to_read = audio_rate_get_bytes(&hw->info, &in->rate, len) >> 2; + uint64_t to_read = audio_rate_get_bytes(&in->rate, &hw->info, len) >> 2; size_t ready = spice_server_record_get_samples(&in->sin, buf, to_read); - /* XXX: do we need this? */ + /* + * If the client didn't send new frames, it most likely disconnected. + * Generate silence in this case to avoid a stalled audio stream. + */ if (ready == 0) { memset(buf, 0, to_read << 2); ready = to_read; @@ -282,6 +292,7 @@ static struct audio_pcm_ops audio_callbacks = { .init_out = line_out_init, .fini_out = line_out_fini, .write = audio_generic_write, + .buffer_get_free = line_out_get_free, .get_buffer_out = line_out_get_buffer, .put_buffer_out = line_out_put_buffer, .enable_out = line_out_enable, diff --git a/audio/trace-events b/audio/trace-events index 957c92337b..e1ab643add 100644 --- a/audio/trace-events +++ b/audio/trace-events @@ -13,6 +13,11 @@ alsa_resume_out(void) "Resuming suspended output stream" # ossaudio.c oss_version(int version) "OSS version = 0x%x" +# dbusaudio.c +dbus_audio_register(const char *s, const char *dir) "sender = %s, dir = %s" +dbus_audio_put_buffer_out(size_t len) "len = %zu" +dbus_audio_read(size_t len) "len = %zu" + # audio.c audio_timer_start(int interval) "interval %d ms" audio_timer_stop(void) "" diff --git a/audio/wavaudio.c b/audio/wavaudio.c index 20e6853f85..3e1d84db83 100644 --- a/audio/wavaudio.c +++ b/audio/wavaudio.c @@ -42,7 +42,7 @@ typedef struct WAVVoiceOut { static size_t wav_write_out(HWVoiceOut *hw, void *buf, size_t len) { WAVVoiceOut *wav = (WAVVoiceOut *) hw; - int64_t bytes = audio_rate_get_bytes(&hw->info, &wav->rate, len); + int64_t bytes = audio_rate_get_bytes(&wav->rate, &hw->info, len); assert(bytes % hw->info.bytes_per_frame == 0); if (bytes && fwrite(buf, bytes, 1, wav->f) != 1) { @@ -197,6 +197,7 @@ static struct audio_pcm_ops wav_pcm_ops = { .init_out = wav_init_out, .fini_out = wav_fini_out, .write = wav_write_out, + .buffer_get_free = audio_generic_buffer_get_free, .run_buffer_out = audio_generic_run_buffer_out, .enable_out = wav_enable_out, }; diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c index 0671bf9f3e..cda6ca3b71 100644 --- a/backends/cryptodev-builtin.c +++ b/backends/cryptodev-builtin.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "standard-headers/linux/virtio_crypto.h" #include "crypto/cipher.h" +#include "crypto/akcipher.h" #include "qom/object.h" @@ -42,10 +43,11 @@ typedef struct CryptoDevBackendBuiltinSession { QCryptoCipher *cipher; uint8_t direction; /* encryption or decryption */ uint8_t type; /* cipher? hash? aead? */ + QCryptoAkCipher *akcipher; QTAILQ_ENTRY(CryptoDevBackendBuiltinSession) next; } CryptoDevBackendBuiltinSession; -/* Max number of symmetric sessions */ +/* Max number of symmetric/asymmetric sessions */ #define MAX_NUM_SESSIONS 256 #define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512 @@ -80,15 +82,17 @@ static void cryptodev_builtin_init( backend->conf.crypto_services = 1u << VIRTIO_CRYPTO_SERVICE_CIPHER | 1u << VIRTIO_CRYPTO_SERVICE_HASH | - 1u << VIRTIO_CRYPTO_SERVICE_MAC; + 1u << VIRTIO_CRYPTO_SERVICE_MAC | + 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; + backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; /* * Set the Maximum length of crypto request. * Why this value? Just avoid to overflow when * memory allocation for each crypto request. */ - backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo); + backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo); backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN; backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN; @@ -148,6 +152,55 @@ err: return -1; } +static int cryptodev_builtin_get_rsa_hash_algo( + int virtio_rsa_hash, Error **errp) +{ + switch (virtio_rsa_hash) { + case VIRTIO_CRYPTO_RSA_MD5: + return QCRYPTO_HASH_ALG_MD5; + + case VIRTIO_CRYPTO_RSA_SHA1: + return QCRYPTO_HASH_ALG_SHA1; + + case VIRTIO_CRYPTO_RSA_SHA256: + return QCRYPTO_HASH_ALG_SHA256; + + case VIRTIO_CRYPTO_RSA_SHA512: + return QCRYPTO_HASH_ALG_SHA512; + + default: + error_setg(errp, "Unsupported rsa hash algo: %d", virtio_rsa_hash); + return -1; + } +} + +static int cryptodev_builtin_set_rsa_options( + int virtio_padding_algo, + int virtio_hash_algo, + QCryptoAkCipherOptionsRSA *opt, + Error **errp) +{ + if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) { + int hash_alg; + + hash_alg = cryptodev_builtin_get_rsa_hash_algo(virtio_hash_algo, errp); + if (hash_alg < 0) { + return -1; + } + opt->hash_alg = hash_alg; + opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1; + return 0; + } + + if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_RAW_PADDING) { + opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + return 0; + } + + error_setg(errp, "Unsupported rsa padding algo: %d", virtio_padding_algo); + return -1; +} + static int cryptodev_builtin_create_cipher_session( CryptoDevBackendBuiltin *builtin, CryptoDevBackendSymSessionInfo *sess_info, @@ -240,78 +293,158 @@ static int cryptodev_builtin_create_cipher_session( return index; } -static int64_t cryptodev_builtin_sym_create_session( - CryptoDevBackend *backend, - CryptoDevBackendSymSessionInfo *sess_info, - uint32_t queue_index, Error **errp) +static int cryptodev_builtin_create_akcipher_session( + CryptoDevBackendBuiltin *builtin, + CryptoDevBackendAsymSessionInfo *sess_info, + Error **errp) { - CryptoDevBackendBuiltin *builtin = - CRYPTODEV_BACKEND_BUILTIN(backend); - int64_t session_id = -1; - int ret; + CryptoDevBackendBuiltinSession *sess; + QCryptoAkCipher *akcipher; + int index; + QCryptoAkCipherKeyType type; + QCryptoAkCipherOptions opts; - switch (sess_info->op_code) { - case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: - ret = cryptodev_builtin_create_cipher_session( - builtin, sess_info, errp); - if (ret < 0) { - return ret; - } else { - session_id = ret; + switch (sess_info->algo) { + case VIRTIO_CRYPTO_AKCIPHER_RSA: + opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; + if (cryptodev_builtin_set_rsa_options(sess_info->u.rsa.padding_algo, + sess_info->u.rsa.hash_algo, &opts.u.rsa, errp) != 0) { + return -1; } break; - case VIRTIO_CRYPTO_HASH_CREATE_SESSION: - case VIRTIO_CRYPTO_MAC_CREATE_SESSION: + + /* TODO support DSA&ECDSA until qemu crypto framework support these */ + default: - error_setg(errp, "Unsupported opcode :%" PRIu32 "", - sess_info->op_code); + error_setg(errp, "Unsupported akcipher alg %u", sess_info->algo); return -1; } - return session_id; + switch (sess_info->keytype) { + case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; + break; + + case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; + break; + + default: + error_setg(errp, "Unsupported akcipher keytype %u", sess_info->keytype); + return -1; + } + + index = cryptodev_builtin_get_unused_session_index(builtin); + if (index < 0) { + error_setg(errp, "Total number of sessions created exceeds %u", + MAX_NUM_SESSIONS); + return -1; + } + + akcipher = qcrypto_akcipher_new(&opts, type, sess_info->key, + sess_info->keylen, errp); + if (!akcipher) { + return -1; + } + + sess = g_new0(CryptoDevBackendBuiltinSession, 1); + sess->akcipher = akcipher; + + builtin->sessions[index] = sess; + + return index; } -static int cryptodev_builtin_sym_close_session( +static int cryptodev_builtin_create_session( CryptoDevBackend *backend, - uint64_t session_id, - uint32_t queue_index, Error **errp) + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) { CryptoDevBackendBuiltin *builtin = CRYPTODEV_BACKEND_BUILTIN(backend); + CryptoDevBackendSymSessionInfo *sym_sess_info; + CryptoDevBackendAsymSessionInfo *asym_sess_info; + int ret, status; + Error *local_error = NULL; + + switch (sess_info->op_code) { + case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: + sym_sess_info = &sess_info->u.sym_sess_info; + ret = cryptodev_builtin_create_cipher_session( + builtin, sym_sess_info, &local_error); + break; + + case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION: + asym_sess_info = &sess_info->u.asym_sess_info; + ret = cryptodev_builtin_create_akcipher_session( + builtin, asym_sess_info, &local_error); + break; + + case VIRTIO_CRYPTO_HASH_CREATE_SESSION: + case VIRTIO_CRYPTO_MAC_CREATE_SESSION: + default: + error_setg(&local_error, "Unsupported opcode :%" PRIu32 "", + sess_info->op_code); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + if (local_error) { + error_report_err(local_error); + } + if (ret < 0) { + status = -VIRTIO_CRYPTO_ERR; + } else { + sess_info->session_id = ret; + status = VIRTIO_CRYPTO_OK; + } + if (cb) { + cb(opaque, status); + } + return 0; +} + +static int cryptodev_builtin_close_session( + CryptoDevBackend *backend, + uint64_t session_id, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + CryptoDevBackendBuiltin *builtin = + CRYPTODEV_BACKEND_BUILTIN(backend); + CryptoDevBackendBuiltinSession *session; assert(session_id < MAX_NUM_SESSIONS && builtin->sessions[session_id]); - qcrypto_cipher_free(builtin->sessions[session_id]->cipher); - g_free(builtin->sessions[session_id]); + session = builtin->sessions[session_id]; + if (session->cipher) { + qcrypto_cipher_free(session->cipher); + } else if (session->akcipher) { + qcrypto_akcipher_free(session->akcipher); + } + + g_free(session); builtin->sessions[session_id] = NULL; + if (cb) { + cb(opaque, VIRTIO_CRYPTO_OK); + } return 0; } static int cryptodev_builtin_sym_operation( - CryptoDevBackend *backend, - CryptoDevBackendSymOpInfo *op_info, - uint32_t queue_index, Error **errp) + CryptoDevBackendBuiltinSession *sess, + CryptoDevBackendSymOpInfo *op_info, Error **errp) { - CryptoDevBackendBuiltin *builtin = - CRYPTODEV_BACKEND_BUILTIN(backend); - CryptoDevBackendBuiltinSession *sess; int ret; - if (op_info->session_id >= MAX_NUM_SESSIONS || - builtin->sessions[op_info->session_id] == NULL) { - error_setg(errp, "Cannot find a valid session id: %" PRIu64 "", - op_info->session_id); - return -VIRTIO_CRYPTO_INVSESS; - } - if (op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { error_setg(errp, "Algorithm chain is unsupported for cryptdoev-builtin"); return -VIRTIO_CRYPTO_NOTSUPP; } - sess = builtin->sessions[op_info->session_id]; - if (op_info->iv_len > 0) { ret = qcrypto_cipher_setiv(sess->cipher, op_info->iv, op_info->iv_len, errp); @@ -333,9 +466,109 @@ static int cryptodev_builtin_sym_operation( return -VIRTIO_CRYPTO_ERR; } } + return VIRTIO_CRYPTO_OK; } +static int cryptodev_builtin_asym_operation( + CryptoDevBackendBuiltinSession *sess, uint32_t op_code, + CryptoDevBackendAsymOpInfo *op_info, Error **errp) +{ + int ret; + + switch (op_code) { + case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT: + ret = qcrypto_akcipher_encrypt(sess->akcipher, + op_info->src, op_info->src_len, + op_info->dst, op_info->dst_len, errp); + break; + + case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: + ret = qcrypto_akcipher_decrypt(sess->akcipher, + op_info->src, op_info->src_len, + op_info->dst, op_info->dst_len, errp); + break; + + case VIRTIO_CRYPTO_AKCIPHER_SIGN: + ret = qcrypto_akcipher_sign(sess->akcipher, + op_info->src, op_info->src_len, + op_info->dst, op_info->dst_len, errp); + break; + + case VIRTIO_CRYPTO_AKCIPHER_VERIFY: + ret = qcrypto_akcipher_verify(sess->akcipher, + op_info->src, op_info->src_len, + op_info->dst, op_info->dst_len, errp); + break; + + default: + return -VIRTIO_CRYPTO_ERR; + } + + if (ret < 0) { + if (op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) { + return -VIRTIO_CRYPTO_KEY_REJECTED; + } + return -VIRTIO_CRYPTO_ERR; + } + + /* Buffer is too short, typically the driver should handle this case */ + if (unlikely(ret > op_info->dst_len)) { + if (errp && !*errp) { + error_setg(errp, "dst buffer too short"); + } + + return -VIRTIO_CRYPTO_ERR; + } + + op_info->dst_len = ret; + + return VIRTIO_CRYPTO_OK; +} + +static int cryptodev_builtin_operation( + CryptoDevBackend *backend, + CryptoDevBackendOpInfo *op_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + CryptoDevBackendBuiltin *builtin = + CRYPTODEV_BACKEND_BUILTIN(backend); + CryptoDevBackendBuiltinSession *sess; + CryptoDevBackendSymOpInfo *sym_op_info; + CryptoDevBackendAsymOpInfo *asym_op_info; + enum CryptoDevBackendAlgType algtype = op_info->algtype; + int status = -VIRTIO_CRYPTO_ERR; + Error *local_error = NULL; + + if (op_info->session_id >= MAX_NUM_SESSIONS || + builtin->sessions[op_info->session_id] == NULL) { + error_setg(&local_error, "Cannot find a valid session id: %" PRIu64 "", + op_info->session_id); + return -VIRTIO_CRYPTO_INVSESS; + } + + sess = builtin->sessions[op_info->session_id]; + if (algtype == CRYPTODEV_BACKEND_ALG_SYM) { + sym_op_info = op_info->u.sym_op_info; + status = cryptodev_builtin_sym_operation(sess, sym_op_info, + &local_error); + } else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) { + asym_op_info = op_info->u.asym_op_info; + status = cryptodev_builtin_asym_operation(sess, op_info->op_code, + asym_op_info, &local_error); + } + + if (local_error) { + error_report_err(local_error); + } + if (cb) { + cb(opaque, status); + } + return 0; +} + static void cryptodev_builtin_cleanup( CryptoDevBackend *backend, Error **errp) @@ -348,7 +581,7 @@ static void cryptodev_builtin_cleanup( for (i = 0; i < MAX_NUM_SESSIONS; i++) { if (builtin->sessions[i] != NULL) { - cryptodev_builtin_sym_close_session(backend, i, 0, &error_abort); + cryptodev_builtin_close_session(backend, i, 0, NULL, NULL); } } @@ -370,9 +603,9 @@ cryptodev_builtin_class_init(ObjectClass *oc, void *data) bc->init = cryptodev_builtin_init; bc->cleanup = cryptodev_builtin_cleanup; - bc->create_session = cryptodev_builtin_sym_create_session; - bc->close_session = cryptodev_builtin_sym_close_session; - bc->do_sym_op = cryptodev_builtin_sym_operation; + bc->create_session = cryptodev_builtin_create_session; + bc->close_session = cryptodev_builtin_close_session; + bc->do_op = cryptodev_builtin_operation; } static const TypeInfo cryptodev_builtin_info = { diff --git a/backends/cryptodev-lkcf.c b/backends/cryptodev-lkcf.c new file mode 100644 index 0000000000..133bd706a4 --- /dev/null +++ b/backends/cryptodev-lkcf.c @@ -0,0 +1,645 @@ +/* + * QEMU Cryptodev backend for QEMU cipher APIs + * + * Copyright (c) 2022 Bytedance.Inc + * + * Authors: + * lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "crypto/cipher.h" +#include "crypto/akcipher.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qemu/thread.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "qom/object.h" +#include "sysemu/cryptodev.h" +#include "standard-headers/linux/virtio_crypto.h" + +#include +#include + +/** + * @TYPE_CRYPTODEV_BACKEND_LKCF: + * name of backend that uses linux kernel crypto framework + */ +#define TYPE_CRYPTODEV_BACKEND_LKCF "cryptodev-backend-lkcf" + +OBJECT_DECLARE_SIMPLE_TYPE(CryptoDevBackendLKCF, CRYPTODEV_BACKEND_LKCF) + +#define INVALID_KEY_ID -1 +#define MAX_SESSIONS 256 +#define NR_WORKER_THREAD 64 + +#define KCTL_KEY_TYPE_PKEY "asymmetric" +/** + * Here the key is uploaded to the thread-keyring of worker thread, at least + * util linux-6.0: + * 1. process keyring seems to behave unexpectedly if main-thread does not + * create the keyring before creating any other thread. + * 2. at present, the guest kernel never perform multiple operations on a + * session. + * 3. it can reduce the load of the main-loop because the key passed by the + * guest kernel has been already checked. + */ +#define KCTL_KEY_RING KEY_SPEC_THREAD_KEYRING + +typedef struct CryptoDevBackendLKCFSession { + uint8_t *key; + size_t keylen; + QCryptoAkCipherKeyType keytype; + QCryptoAkCipherOptions akcipher_opts; +} CryptoDevBackendLKCFSession; + +typedef struct CryptoDevBackendLKCF CryptoDevBackendLKCF; +typedef struct CryptoDevLKCFTask CryptoDevLKCFTask; +struct CryptoDevLKCFTask { + CryptoDevBackendLKCFSession *sess; + CryptoDevBackendOpInfo *op_info; + CryptoDevCompletionFunc cb; + void *opaque; + int status; + CryptoDevBackendLKCF *lkcf; + QSIMPLEQ_ENTRY(CryptoDevLKCFTask) queue; +}; + +typedef struct CryptoDevBackendLKCF { + CryptoDevBackend parent_obj; + CryptoDevBackendLKCFSession *sess[MAX_SESSIONS]; + QSIMPLEQ_HEAD(, CryptoDevLKCFTask) requests; + QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses; + QemuMutex mutex; + QemuCond cond; + QemuMutex rsp_mutex; + + /** + * There is no async interface for asymmetric keys like AF_ALG sockets, + * we don't seem to have better way than create a lots of thread. + */ + QemuThread worker_threads[NR_WORKER_THREAD]; + bool running; + int eventfd; +} CryptoDevBackendLKCF; + +static void *cryptodev_lkcf_worker(void *arg); +static int cryptodev_lkcf_close_session(CryptoDevBackend *backend, + uint64_t session_id, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); + +static void cryptodev_lkcf_handle_response(void *opaque) +{ + CryptoDevBackendLKCF *lkcf = (CryptoDevBackendLKCF *)opaque; + QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses; + CryptoDevLKCFTask *task, *next; + eventfd_t nevent; + + QSIMPLEQ_INIT(&responses); + eventfd_read(lkcf->eventfd, &nevent); + + qemu_mutex_lock(&lkcf->rsp_mutex); + QSIMPLEQ_PREPEND(&responses, &lkcf->responses); + qemu_mutex_unlock(&lkcf->rsp_mutex); + + QSIMPLEQ_FOREACH_SAFE(task, &responses, queue, next) { + if (task->cb) { + task->cb(task->opaque, task->status); + } + g_free(task); + } +} + +static int cryptodev_lkcf_set_op_desc(QCryptoAkCipherOptions *opts, + char *key_desc, + size_t desc_len, + Error **errp) +{ + QCryptoAkCipherOptionsRSA *rsa_opt; + if (opts->alg != QCRYPTO_AKCIPHER_ALG_RSA) { + error_setg(errp, "Unsupported alg: %u", opts->alg); + return -1; + } + + rsa_opt = &opts->u.rsa; + if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALG_PKCS1) { + snprintf(key_desc, desc_len, "enc=%s hash=%s", + QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg), + QCryptoHashAlgorithm_str(rsa_opt->hash_alg)); + + } else { + snprintf(key_desc, desc_len, "enc=%s", + QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg)); + } + return 0; +} + +static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg, + int virtio_hash_alg, + QCryptoAkCipherOptionsRSA *opt, + Error **errp) +{ + if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) { + opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1; + + switch (virtio_hash_alg) { + case VIRTIO_CRYPTO_RSA_MD5: + opt->hash_alg = QCRYPTO_HASH_ALG_MD5; + break; + + case VIRTIO_CRYPTO_RSA_SHA1: + opt->hash_alg = QCRYPTO_HASH_ALG_SHA1; + break; + + case VIRTIO_CRYPTO_RSA_SHA256: + opt->hash_alg = QCRYPTO_HASH_ALG_SHA256; + break; + + case VIRTIO_CRYPTO_RSA_SHA512: + opt->hash_alg = QCRYPTO_HASH_ALG_SHA512; + break; + + default: + error_setg(errp, "Unsupported rsa hash algo: %d", virtio_hash_alg); + return -1; + } + return 0; + } + + if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_RAW_PADDING) { + opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + return 0; + } + + error_setg(errp, "Unsupported rsa padding algo: %u", virtio_padding_alg); + return -1; +} + +static int cryptodev_lkcf_get_unused_session_index(CryptoDevBackendLKCF *lkcf) +{ + size_t i; + + for (i = 0; i < MAX_SESSIONS; i++) { + if (lkcf->sess[i] == NULL) { + return i; + } + } + return -1; +} + +static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp) +{ + /* Only support one queue */ + int queues = backend->conf.peers.queues, i; + CryptoDevBackendClient *cc; + CryptoDevBackendLKCF *lkcf = + CRYPTODEV_BACKEND_LKCF(backend); + + if (queues != 1) { + error_setg(errp, + "Only support one queue in cryptodev-builtin backend"); + return; + } + lkcf->eventfd = eventfd(0, 0); + if (lkcf->eventfd < 0) { + error_setg(errp, "Failed to create eventfd: %d", errno); + return; + } + + cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL); + cc->info_str = g_strdup_printf("cryptodev-lkcf0"); + cc->queue_index = 0; + cc->type = CRYPTODEV_BACKEND_TYPE_LKCF; + backend->conf.peers.ccs[0] = cc; + + backend->conf.crypto_services = + 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER; + backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; + lkcf->running = true; + + QSIMPLEQ_INIT(&lkcf->requests); + QSIMPLEQ_INIT(&lkcf->responses); + qemu_mutex_init(&lkcf->mutex); + qemu_mutex_init(&lkcf->rsp_mutex); + qemu_cond_init(&lkcf->cond); + for (i = 0; i < NR_WORKER_THREAD; i++) { + qemu_thread_create(&lkcf->worker_threads[i], "lkcf-worker", + cryptodev_lkcf_worker, lkcf, 0); + } + qemu_set_fd_handler( + lkcf->eventfd, cryptodev_lkcf_handle_response, NULL, lkcf); + cryptodev_backend_set_ready(backend, true); +} + +static void cryptodev_lkcf_cleanup(CryptoDevBackend *backend, Error **errp) +{ + CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend); + size_t i; + int queues = backend->conf.peers.queues; + CryptoDevBackendClient *cc; + CryptoDevLKCFTask *task, *next; + + qemu_mutex_lock(&lkcf->mutex); + lkcf->running = false; + qemu_mutex_unlock(&lkcf->mutex); + qemu_cond_broadcast(&lkcf->cond); + + close(lkcf->eventfd); + for (i = 0; i < NR_WORKER_THREAD; i++) { + qemu_thread_join(&lkcf->worker_threads[i]); + } + + QSIMPLEQ_FOREACH_SAFE(task, &lkcf->requests, queue, next) { + if (task->cb) { + task->cb(task->opaque, task->status); + } + g_free(task); + } + + QSIMPLEQ_FOREACH_SAFE(task, &lkcf->responses, queue, next) { + if (task->cb) { + task->cb(task->opaque, task->status); + } + g_free(task); + } + + qemu_mutex_destroy(&lkcf->mutex); + qemu_cond_destroy(&lkcf->cond); + qemu_mutex_destroy(&lkcf->rsp_mutex); + + for (i = 0; i < MAX_SESSIONS; i++) { + if (lkcf->sess[i] != NULL) { + cryptodev_lkcf_close_session(backend, i, 0, NULL, NULL); + } + } + + for (i = 0; i < queues; i++) { + cc = backend->conf.peers.ccs[i]; + if (cc) { + cryptodev_backend_free_client(cc); + backend->conf.peers.ccs[i] = NULL; + } + } + + cryptodev_backend_set_ready(backend, false); +} + +static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task) +{ + CryptoDevBackendLKCFSession *session = task->sess; + CryptoDevBackendAsymOpInfo *asym_op_info; + bool kick = false; + int ret, status, op_code = task->op_info->op_code; + size_t p8info_len; + g_autofree uint8_t *p8info = NULL; + Error *local_error = NULL; + key_serial_t key_id = INVALID_KEY_ID; + char op_desc[64]; + g_autoptr(QCryptoAkCipher) akcipher = NULL; + + /** + * We only offload private key session: + * 1. currently, the Linux kernel can only accept public key wrapped + * with X.509 certificates, but unfortunately the cost of making a + * ceritificate with public key is too expensive. + * 2. generally, public key related compution is fast, just compute it with + * thread-pool. + */ + if (session->keytype == QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE) { + if (qcrypto_akcipher_export_p8info(&session->akcipher_opts, + session->key, session->keylen, + &p8info, &p8info_len, + &local_error) != 0 || + cryptodev_lkcf_set_op_desc(&session->akcipher_opts, op_desc, + sizeof(op_desc), &local_error) != 0) { + error_report_err(local_error); + } else { + key_id = add_key(KCTL_KEY_TYPE_PKEY, "lkcf-backend-priv-key", + p8info, p8info_len, KCTL_KEY_RING); + } + } + + if (key_id < 0) { + if (!qcrypto_akcipher_supports(&session->akcipher_opts)) { + status = -VIRTIO_CRYPTO_NOTSUPP; + goto out; + } + akcipher = qcrypto_akcipher_new(&session->akcipher_opts, + session->keytype, + session->key, session->keylen, + &local_error); + if (!akcipher) { + status = -VIRTIO_CRYPTO_ERR; + goto out; + } + } + + asym_op_info = task->op_info->u.asym_op_info; + switch (op_code) { + case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT: + if (key_id >= 0) { + ret = keyctl_pkey_encrypt(key_id, op_desc, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len); + } else { + ret = qcrypto_akcipher_encrypt(akcipher, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len, &local_error); + } + break; + + case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: + if (key_id >= 0) { + ret = keyctl_pkey_decrypt(key_id, op_desc, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len); + } else { + ret = qcrypto_akcipher_decrypt(akcipher, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len, &local_error); + } + break; + + case VIRTIO_CRYPTO_AKCIPHER_SIGN: + if (key_id >= 0) { + ret = keyctl_pkey_sign(key_id, op_desc, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len); + } else { + ret = qcrypto_akcipher_sign(akcipher, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len, &local_error); + } + break; + + case VIRTIO_CRYPTO_AKCIPHER_VERIFY: + if (key_id >= 0) { + ret = keyctl_pkey_verify(key_id, op_desc, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len); + } else { + ret = qcrypto_akcipher_verify(akcipher, + asym_op_info->src, asym_op_info->src_len, + asym_op_info->dst, asym_op_info->dst_len, &local_error); + } + break; + + default: + error_setg(&local_error, "Unknown opcode: %u", op_code); + status = -VIRTIO_CRYPTO_ERR; + goto out; + } + + if (ret < 0) { + if (!local_error) { + if (errno != EKEYREJECTED) { + error_report("Failed do operation with keyctl: %d", errno); + } + } else { + error_report_err(local_error); + } + status = op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY ? + -VIRTIO_CRYPTO_KEY_REJECTED : -VIRTIO_CRYPTO_ERR; + } else { + status = VIRTIO_CRYPTO_OK; + asym_op_info->dst_len = ret; + } + +out: + if (key_id >= 0) { + keyctl_unlink(key_id, KCTL_KEY_RING); + } + task->status = status; + + qemu_mutex_lock(&task->lkcf->rsp_mutex); + if (QSIMPLEQ_EMPTY(&task->lkcf->responses)) { + kick = true; + } + QSIMPLEQ_INSERT_TAIL(&task->lkcf->responses, task, queue); + qemu_mutex_unlock(&task->lkcf->rsp_mutex); + + if (kick) { + eventfd_write(task->lkcf->eventfd, 1); + } +} + +static void *cryptodev_lkcf_worker(void *arg) +{ + CryptoDevBackendLKCF *backend = (CryptoDevBackendLKCF *)arg; + CryptoDevLKCFTask *task; + + for (;;) { + task = NULL; + qemu_mutex_lock(&backend->mutex); + while (backend->running && QSIMPLEQ_EMPTY(&backend->requests)) { + qemu_cond_wait(&backend->cond, &backend->mutex); + } + if (backend->running) { + task = QSIMPLEQ_FIRST(&backend->requests); + QSIMPLEQ_REMOVE_HEAD(&backend->requests, queue); + } + qemu_mutex_unlock(&backend->mutex); + + /* stopped */ + if (!task) { + break; + } + cryptodev_lkcf_execute_task(task); + } + + return NULL; +} + +static int cryptodev_lkcf_operation( + CryptoDevBackend *backend, + CryptoDevBackendOpInfo *op_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + CryptoDevBackendLKCF *lkcf = + CRYPTODEV_BACKEND_LKCF(backend); + CryptoDevBackendLKCFSession *sess; + enum CryptoDevBackendAlgType algtype = op_info->algtype; + CryptoDevLKCFTask *task; + + if (op_info->session_id >= MAX_SESSIONS || + lkcf->sess[op_info->session_id] == NULL) { + error_report("Cannot find a valid session id: %" PRIu64 "", + op_info->session_id); + return -VIRTIO_CRYPTO_INVSESS; + } + + sess = lkcf->sess[op_info->session_id]; + if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) { + error_report("algtype not supported: %u", algtype); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + task = g_new0(CryptoDevLKCFTask, 1); + task->op_info = op_info; + task->cb = cb; + task->opaque = opaque; + task->sess = sess; + task->lkcf = lkcf; + task->status = -VIRTIO_CRYPTO_ERR; + + qemu_mutex_lock(&lkcf->mutex); + QSIMPLEQ_INSERT_TAIL(&lkcf->requests, task, queue); + qemu_mutex_unlock(&lkcf->mutex); + qemu_cond_signal(&lkcf->cond); + + return VIRTIO_CRYPTO_OK; +} + +static int cryptodev_lkcf_create_asym_session( + CryptoDevBackendLKCF *lkcf, + CryptoDevBackendAsymSessionInfo *sess_info, + uint64_t *session_id) +{ + Error *local_error = NULL; + int index; + g_autofree CryptoDevBackendLKCFSession *sess = + g_new0(CryptoDevBackendLKCFSession, 1); + + switch (sess_info->algo) { + case VIRTIO_CRYPTO_AKCIPHER_RSA: + sess->akcipher_opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; + if (cryptodev_lkcf_set_rsa_opt( + sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo, + &sess->akcipher_opts.u.rsa, &local_error) != 0) { + error_report_err(local_error); + return -VIRTIO_CRYPTO_ERR; + } + break; + + default: + error_report("Unsupported asym alg %u", sess_info->algo); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + switch (sess_info->keytype) { + case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; + break; + + case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; + break; + + default: + error_report("Unknown akcipher keytype: %u", sess_info->keytype); + return -VIRTIO_CRYPTO_ERR; + } + + index = cryptodev_lkcf_get_unused_session_index(lkcf); + if (index < 0) { + error_report("Total number of sessions created exceeds %u", + MAX_SESSIONS); + return -VIRTIO_CRYPTO_ERR; + } + + sess->keylen = sess_info->keylen; + sess->key = g_malloc(sess_info->keylen); + memcpy(sess->key, sess_info->key, sess_info->keylen); + + lkcf->sess[index] = g_steal_pointer(&sess); + *session_id = index; + + return VIRTIO_CRYPTO_OK; +} + +static int cryptodev_lkcf_create_session( + CryptoDevBackend *backend, + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + CryptoDevBackendAsymSessionInfo *asym_sess_info; + CryptoDevBackendLKCF *lkcf = + CRYPTODEV_BACKEND_LKCF(backend); + int ret; + + switch (sess_info->op_code) { + case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION: + asym_sess_info = &sess_info->u.asym_sess_info; + ret = cryptodev_lkcf_create_asym_session( + lkcf, asym_sess_info, &sess_info->session_id); + break; + + default: + ret = -VIRTIO_CRYPTO_NOTSUPP; + error_report("Unsupported opcode: %" PRIu32 "", + sess_info->op_code); + break; + } + if (cb) { + cb(opaque, ret); + } + return 0; +} + +static int cryptodev_lkcf_close_session(CryptoDevBackend *backend, + uint64_t session_id, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend); + CryptoDevBackendLKCFSession *session; + + assert(session_id < MAX_SESSIONS && lkcf->sess[session_id]); + session = lkcf->sess[session_id]; + lkcf->sess[session_id] = NULL; + + g_free(session->key); + g_free(session); + + if (cb) { + cb(opaque, VIRTIO_CRYPTO_OK); + } + return 0; +} + +static void cryptodev_lkcf_class_init(ObjectClass *oc, void *data) +{ + CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc); + + bc->init = cryptodev_lkcf_init; + bc->cleanup = cryptodev_lkcf_cleanup; + bc->create_session = cryptodev_lkcf_create_session; + bc->close_session = cryptodev_lkcf_close_session; + bc->do_op = cryptodev_lkcf_operation; +} + +static const TypeInfo cryptodev_builtin_info = { + .name = TYPE_CRYPTODEV_BACKEND_LKCF, + .parent = TYPE_CRYPTODEV_BACKEND, + .class_init = cryptodev_lkcf_class_init, + .instance_size = sizeof(CryptoDevBackendLKCF), +}; + +static void cryptodev_lkcf_register_types(void) +{ + type_register_static(&cryptodev_builtin_info); +} + +type_init(cryptodev_lkcf_register_types); diff --git a/backends/cryptodev-vhost-user.c b/backends/cryptodev-vhost-user.c index bedb452474..ab3028e045 100644 --- a/backends/cryptodev-vhost-user.c +++ b/backends/cryptodev-vhost-user.c @@ -259,15 +259,61 @@ static int64_t cryptodev_vhost_user_sym_create_session( return -1; } -static int cryptodev_vhost_user_sym_close_session( +static int cryptodev_vhost_user_create_session( + CryptoDevBackend *backend, + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) +{ + uint32_t op_code = sess_info->op_code; + CryptoDevBackendSymSessionInfo *sym_sess_info; + int64_t ret; + Error *local_error = NULL; + int status; + + switch (op_code) { + case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: + case VIRTIO_CRYPTO_HASH_CREATE_SESSION: + case VIRTIO_CRYPTO_MAC_CREATE_SESSION: + case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: + sym_sess_info = &sess_info->u.sym_sess_info; + ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info, + queue_index, &local_error); + break; + + default: + error_setg(&local_error, "Unsupported opcode :%" PRIu32 "", + sess_info->op_code); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + if (local_error) { + error_report_err(local_error); + } + if (ret < 0) { + status = -VIRTIO_CRYPTO_ERR; + } else { + sess_info->session_id = ret; + status = VIRTIO_CRYPTO_OK; + } + if (cb) { + cb(opaque, status); + } + return 0; +} + +static int cryptodev_vhost_user_close_session( CryptoDevBackend *backend, uint64_t session_id, - uint32_t queue_index, Error **errp) + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) { CryptoDevBackendClient *cc = backend->conf.peers.ccs[queue_index]; CryptoDevBackendVhost *vhost_crypto; - int ret; + int ret = -1, status; vhost_crypto = cryptodev_vhost_user_get_vhost(cc, backend, queue_index); if (vhost_crypto) { @@ -275,12 +321,17 @@ static int cryptodev_vhost_user_sym_close_session( ret = dev->vhost_ops->vhost_crypto_close_session(dev, session_id); if (ret < 0) { - return -1; + status = -VIRTIO_CRYPTO_ERR; } else { - return 0; + status = VIRTIO_CRYPTO_OK; } + } else { + status = -VIRTIO_CRYPTO_NOTSUPP; } - return -1; + if (cb) { + cb(opaque, status); + } + return 0; } static void cryptodev_vhost_user_cleanup( @@ -313,7 +364,7 @@ static void cryptodev_vhost_user_set_chardev(Object *obj, CRYPTODEV_BACKEND_VHOST_USER(obj); if (s->opened) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'chardev' can no longer be set"); } else { g_free(s->chr_name); s->chr_name = g_strdup(value); @@ -351,9 +402,9 @@ cryptodev_vhost_user_class_init(ObjectClass *oc, void *data) bc->init = cryptodev_vhost_user_init; bc->cleanup = cryptodev_vhost_user_cleanup; - bc->create_session = cryptodev_vhost_user_sym_create_session; - bc->close_session = cryptodev_vhost_user_sym_close_session; - bc->do_sym_op = NULL; + bc->create_session = cryptodev_vhost_user_create_session; + bc->close_session = cryptodev_vhost_user_close_session; + bc->do_op = NULL; object_class_property_add_str(oc, "chardev", cryptodev_vhost_user_get_chardev, diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c index bc13e466b4..572f87b3be 100644 --- a/backends/cryptodev-vhost.c +++ b/backends/cryptodev-vhost.c @@ -94,7 +94,7 @@ cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto, goto fail_notifiers; } - r = vhost_dev_start(&crypto->dev, dev); + r = vhost_dev_start(&crypto->dev, dev, false); if (r < 0) { goto fail_start; } @@ -111,7 +111,7 @@ static void cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto, VirtIODevice *dev) { - vhost_dev_stop(&crypto->dev, dev); + vhost_dev_stop(&crypto->dev, dev, false); vhost_dev_disable_notifiers(&crypto->dev, dev); } diff --git a/backends/cryptodev.c b/backends/cryptodev.c index bf52476166..54ee8c81f5 100644 --- a/backends/cryptodev.c +++ b/backends/cryptodev.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/config-file.h" +#include "qemu/error-report.h" #include "qom/object_interfaces.h" #include "hw/virtio/virtio-crypto.h" @@ -39,7 +40,7 @@ cryptodev_backend_new_client(const char *model, { CryptoDevBackendClient *cc; - cc = g_malloc0(sizeof(CryptoDevBackendClient)); + cc = g_new0(CryptoDevBackendClient, 1); cc->model = g_strdup(model); if (name) { cc->name = g_strdup(name); @@ -72,71 +73,72 @@ void cryptodev_backend_cleanup( } } -int64_t cryptodev_backend_sym_create_session( +int cryptodev_backend_create_session( CryptoDevBackend *backend, - CryptoDevBackendSymSessionInfo *sess_info, - uint32_t queue_index, Error **errp) + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(backend); if (bc->create_session) { - return bc->create_session(backend, sess_info, queue_index, errp); + return bc->create_session(backend, sess_info, queue_index, cb, opaque); } - - return -1; + return -VIRTIO_CRYPTO_NOTSUPP; } -int cryptodev_backend_sym_close_session( +int cryptodev_backend_close_session( CryptoDevBackend *backend, uint64_t session_id, - uint32_t queue_index, Error **errp) + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(backend); if (bc->close_session) { - return bc->close_session(backend, session_id, queue_index, errp); + return bc->close_session(backend, session_id, queue_index, cb, opaque); } - - return -1; + return -VIRTIO_CRYPTO_NOTSUPP; } -static int cryptodev_backend_sym_operation( +static int cryptodev_backend_operation( CryptoDevBackend *backend, - CryptoDevBackendSymOpInfo *op_info, - uint32_t queue_index, Error **errp) + CryptoDevBackendOpInfo *op_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(backend); - if (bc->do_sym_op) { - return bc->do_sym_op(backend, op_info, queue_index, errp); + if (bc->do_op) { + return bc->do_op(backend, op_info, queue_index, cb, opaque); } - - return -VIRTIO_CRYPTO_ERR; + return -VIRTIO_CRYPTO_NOTSUPP; } int cryptodev_backend_crypto_operation( CryptoDevBackend *backend, - void *opaque, - uint32_t queue_index, Error **errp) + void *opaque1, + uint32_t queue_index, + CryptoDevCompletionFunc cb, void *opaque2) { - VirtIOCryptoReq *req = opaque; + VirtIOCryptoReq *req = opaque1; + CryptoDevBackendOpInfo *op_info = &req->op_info; + enum CryptoDevBackendAlgType algtype = req->flags; - if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { - CryptoDevBackendSymOpInfo *op_info; - op_info = req->u.sym_op_info; - - return cryptodev_backend_sym_operation(backend, - op_info, queue_index, errp); - } else { - error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "", - req->flags); - return -VIRTIO_CRYPTO_NOTSUPP; + if ((algtype != CRYPTODEV_BACKEND_ALG_SYM) + && (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) { + error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype); + return -VIRTIO_CRYPTO_NOTSUPP; } - return -VIRTIO_CRYPTO_ERR; + return cryptodev_backend_operation(backend, op_info, queue_index, + cb, opaque2); } static void diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c index 9cfd758c42..57369ec0f2 100644 --- a/backends/dbus-vmstate.c +++ b/backends/dbus-vmstate.c @@ -114,14 +114,19 @@ dbus_get_proxies(DBusVMState *self, GError **err) "org.qemu.VMState1", NULL, err); if (!proxy) { - return NULL; + if (err != NULL && *err != NULL) { + warn_report("%s: Failed to create proxy: %s", + __func__, (*err)->message); + g_clear_error(err); + } + continue; } result = g_dbus_proxy_get_cached_property(proxy, "Id"); if (!result) { - g_set_error_literal(err, G_IO_ERROR, G_IO_ERROR_FAILED, - "VMState Id property is missing."); - return NULL; + warn_report("%s: VMState Id property is missing.", __func__); + g_clear_object(&proxy); + continue; } id = g_variant_dup_string(result, &size); diff --git a/backends/dbus-vmstate1.xml b/backends/dbus-vmstate1.xml new file mode 100644 index 0000000000..601ee8dc7e --- /dev/null +++ b/backends/dbus-vmstate1.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c new file mode 100644 index 0000000000..037292d267 --- /dev/null +++ b/backends/hostmem-epc.c @@ -0,0 +1,81 @@ +/* + * QEMU host SGX EPC memory backend + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include + +#include "qemu/osdep.h" +#include "qom/object_interfaces.h" +#include "qapi/error.h" +#include "sysemu/hostmem.h" +#include "hw/i386/hostmem-epc.h" + +static void +sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) +{ + uint32_t ram_flags; + char *name; + int fd; + + if (!backend->size) { + error_setg(errp, "can't create backend with size 0"); + return; + } + + fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, + "failed to open /dev/sgx_vepc to alloc SGX EPC"); + return; + } + + name = object_get_canonical_path(OBJECT(backend)); + ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; + memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), + name, backend->size, ram_flags, + fd, 0, errp); + g_free(name); +} + +static void sgx_epc_backend_instance_init(Object *obj) +{ + HostMemoryBackend *m = MEMORY_BACKEND(obj); + + m->share = true; + m->merge = false; + m->dump = false; +} + +static void sgx_epc_backend_class_init(ObjectClass *oc, void *data) +{ + HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); + + bc->alloc = sgx_epc_backend_memory_alloc; +} + +static const TypeInfo sgx_epc_backed_info = { + .name = TYPE_MEMORY_BACKEND_EPC, + .parent = TYPE_MEMORY_BACKEND, + .instance_init = sgx_epc_backend_instance_init, + .class_init = sgx_epc_backend_class_init, + .instance_size = sizeof(HostMemoryBackendEpc), +}; + +static void register_types(void) +{ + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd >= 0) { + close(fd); + + type_register_static(&sgx_epc_backed_info); + } +} + +type_init(register_types); diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index cd038024fa..25141283c4 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -14,6 +14,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "qemu/madvise.h" #include "sysemu/hostmem.h" #include "qom/object_interfaces.h" #include "qom/object.h" diff --git a/backends/hostmem.c b/backends/hostmem.c index 4c05862ed5..8640294c10 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -19,6 +19,7 @@ #include "qemu/config-file.h" #include "qom/object_interfaces.h" #include "qemu/mmap-alloc.h" +#include "qemu/madvise.h" #ifdef CONFIG_NUMA #include @@ -231,7 +232,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value, void *ptr = memory_region_get_ram_ptr(&backend->mr); uint64_t sz = memory_region_size(&backend->mr); - os_mem_prealloc(fd, ptr, sz, backend->prealloc_threads, &local_err); + qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, + backend->prealloc_context, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -273,7 +275,7 @@ static void host_memory_backend_init(Object *obj) backend->merge = machine_mem_merge(machine); backend->dump = machine_dump_guest_core(machine); backend->reserve = true; - backend->prealloc_threads = 1; + backend->prealloc_threads = machine->smp.cpus; } static void host_memory_backend_post_init(Object *obj) @@ -305,22 +307,12 @@ bool host_memory_backend_is_mapped(HostMemoryBackend *backend) return backend->is_mapped; } -#ifdef __linux__ size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) { - Object *obj = OBJECT(memdev); - char *path = object_property_get_str(obj, "mem-path", NULL); - size_t pagesize = qemu_mempath_getpagesize(path); - - g_free(path); + size_t pagesize = qemu_ram_pagesize(memdev->mr.ram_block); + g_assert(pagesize >= qemu_real_host_page_size()); return pagesize; } -#else -size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) -{ - return qemu_real_host_page_size; -} -#endif static void host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) @@ -392,8 +384,9 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) * specified NUMA policy in place. */ if (backend->prealloc) { - os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz, - backend->prealloc_threads, &local_err); + qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, + backend->prealloc_threads, + backend->prealloc_context, &local_err); if (local_err) { goto out; } @@ -501,6 +494,11 @@ host_memory_backend_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, "prealloc-threads", "Number of CPU threads to use for prealloc"); + object_class_property_add_link(oc, "prealloc-context", + TYPE_THREAD_CONTEXT, offsetof(HostMemoryBackend, prealloc_context), + object_property_allow_set_link, OBJ_PROP_LINK_STRONG); + object_class_property_set_description(oc, "prealloc-context", + "Context to use for creating CPU threads for preallocation"); object_class_property_add(oc, "size", "int", host_memory_backend_get_size, host_memory_backend_set_size, diff --git a/backends/meson.build b/backends/meson.build index d4221831fc..954e658b25 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -12,9 +12,17 @@ softmmu_ss.add([files( softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c')) softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c')) softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c')) -softmmu_ss.add(when: ['CONFIG_VHOST_USER', 'CONFIG_VIRTIO'], if_true: files('vhost-user.c')) +if keyutils.found() + softmmu_ss.add(keyutils, files('cryptodev-lkcf.c')) +endif +if have_vhost_user + softmmu_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c')) +endif softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c')) -softmmu_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VHOST_CRYPTO'], if_true: files('cryptodev-vhost-user.c')) -softmmu_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus-vmstate.c'), gio]) +if have_vhost_user_crypto + softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c')) +endif +softmmu_ss.add(when: gio, if_true: files('dbus-vmstate.c')) +softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c')) subdir('tpm') diff --git a/backends/rng-egd.c b/backends/rng-egd.c index 4de142b9dc..684c3cf3d6 100644 --- a/backends/rng-egd.c +++ b/backends/rng-egd.c @@ -116,7 +116,7 @@ static void rng_egd_set_chardev(Object *obj, const char *value, Error **errp) RngEgd *s = RNG_EGD(b); if (b->opened) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'chardev' can no longer be set"); } else { g_free(s->chr_name); s->chr_name = g_strdup(value); diff --git a/backends/rng-random.c b/backends/rng-random.c index 7add272edd..80eb5be138 100644 --- a/backends/rng-random.c +++ b/backends/rng-random.c @@ -96,7 +96,7 @@ static void rng_random_set_filename(Object *obj, const char *filename, RngRandom *s = RNG_RANDOM(obj); if (b->opened) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'filename' can no longer be set"); return; } diff --git a/backends/rng.c b/backends/rng.c index 3757b04485..6c7bf64426 100644 --- a/backends/rng.c +++ b/backends/rng.c @@ -48,24 +48,10 @@ static bool rng_backend_prop_get_opened(Object *obj, Error **errp) static void rng_backend_complete(UserCreatable *uc, Error **errp) { - object_property_set_bool(OBJECT(uc), "opened", true, errp); -} - -static void rng_backend_prop_set_opened(Object *obj, bool value, Error **errp) -{ - RngBackend *s = RNG_BACKEND(obj); + RngBackend *s = RNG_BACKEND(uc); RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); Error *local_err = NULL; - if (value == s->opened) { - return; - } - - if (!value && s->opened) { - error_setg(errp, QERR_PERMISSION_DENIED); - return; - } - if (k->opened) { k->opened(s, &local_err); if (local_err) { @@ -122,7 +108,7 @@ static void rng_backend_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "opened", rng_backend_prop_get_opened, - rng_backend_prop_set_opened); + NULL); } static const TypeInfo rng_backend_info = { diff --git a/backends/tpm/meson.build b/backends/tpm/meson.build index 857929082e..7f2503f84e 100644 --- a/backends/tpm/meson.build +++ b/backends/tpm/meson.build @@ -1,8 +1,6 @@ -tpm_ss = ss.source_set() - -tpm_ss.add(files('tpm_backend.c')) -tpm_ss.add(files('tpm_util.c')) -tpm_ss.add(when: 'CONFIG_TPM_PASSTHROUGH', if_true: files('tpm_passthrough.c')) -tpm_ss.add(when: 'CONFIG_TPM_EMULATOR', if_true: files('tpm_emulator.c')) - -softmmu_ss.add_all(when: 'CONFIG_TPM', if_true: tpm_ss) +if have_tpm + softmmu_ss.add(files('tpm_backend.c')) + softmmu_ss.add(files('tpm_util.c')) + softmmu_ss.add(when: 'CONFIG_TPM_PASSTHROUGH', if_true: files('tpm_passthrough.c')) + softmmu_ss.add(when: 'CONFIG_TPM_EMULATOR', if_true: files('tpm_emulator.c')) +endif diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index e5f1063ab6..49cc3d749d 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -32,8 +32,10 @@ #include "qemu/sockets.h" #include "qemu/lockable.h" #include "io/channel-socket.h" +#include "sysemu/runstate.h" #include "sysemu/tpm_backend.h" #include "sysemu/tpm_util.h" +#include "sysemu/runstate.h" #include "tpm_int.h" #include "tpm_ioctl.h" #include "migration/blocker.h" @@ -81,6 +83,9 @@ struct TPMEmulator { unsigned int established_flag_cached:1; TPMBlobBuffers state_blobs; + + bool relock_storage; + VMChangeStateEntry *vmstate; }; struct tpm_error { @@ -302,6 +307,35 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb) return 0; } +static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu) +{ + ptm_lockstorage pls; + + if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, PTM_CAP_LOCK_STORAGE)) { + trace_tpm_emulator_lock_storage_cmd_not_supt(); + return 0; + } + + /* give failing side 300 * 10ms time to release lock */ + pls.u.req.retries = cpu_to_be32(300); + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls, + sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) { + error_report("tpm-emulator: Could not lock storage within 3 seconds: " + "%s", strerror(errno)); + return -1; + } + + pls.u.resp.tpm_result = be32_to_cpu(pls.u.resp.tpm_result); + if (pls.u.resp.tpm_result != 0) { + error_report("tpm-emulator: TPM result for CMD_LOCK_STORAGE: 0x%x %s", + pls.u.resp.tpm_result, + tpm_emulator_strerror(pls.u.resp.tpm_result)); + return -1; + } + + return 0; +} + static int tpm_emulator_set_buffer_size(TPMBackend *tb, size_t wanted_size, size_t *actual_size) @@ -383,6 +417,15 @@ err_exit: static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize) { + /* TPM startup will be done from post_load hook */ + if (runstate_check(RUN_STATE_INMIGRATE)) { + if (buffersize != 0) { + return tpm_emulator_set_buffer_size(tb, buffersize, NULL); + } + + return 0; + } + return tpm_emulator_startup_tpm_resume(tb, buffersize, false); } @@ -492,8 +535,7 @@ static int tpm_emulator_block_migration(TPMEmulator *tpm_emu) error_setg(&tpm_emu->migration_blocker, "Migration disabled: TPM emulator does not support " "migration"); - migrate_add_blocker(tpm_emu->migration_blocker, &err); - if (err) { + if (migrate_add_blocker(tpm_emu->migration_blocker, &err) < 0) { error_report_err(err); error_free(tpm_emu->migration_blocker); tpm_emu->migration_blocker = NULL; @@ -624,7 +666,7 @@ static TpmTypeOptions *tpm_emulator_get_tpm_options(TPMBackend *tb) TPMEmulator *tpm_emu = TPM_EMULATOR(tb); TpmTypeOptions *options = g_new0(TpmTypeOptions, 1); - options->type = TPM_TYPE_OPTIONS_KIND_EMULATOR; + options->type = TPM_TYPE_EMULATOR; options->u.emulator.data = QAPI_CLONE(TPMEmulatorOptions, tpm_emu->options); return options; @@ -844,13 +886,34 @@ static int tpm_emulator_pre_save(void *opaque) { TPMBackend *tb = opaque; TPMEmulator *tpm_emu = TPM_EMULATOR(tb); + int ret; trace_tpm_emulator_pre_save(); tpm_backend_finish_sync(tb); /* get the state blobs from the TPM */ - return tpm_emulator_get_state_blobs(tpm_emu); + ret = tpm_emulator_get_state_blobs(tpm_emu); + + tpm_emu->relock_storage = ret == 0; + + return ret; +} + +static void tpm_emulator_vm_state_change(void *opaque, bool running, + RunState state) +{ + TPMBackend *tb = opaque; + TPMEmulator *tpm_emu = TPM_EMULATOR(tb); + + trace_tpm_emulator_vm_state_change(running, state); + + if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) { + return; + } + + /* lock storage after migration fall-back */ + tpm_emulator_lock_storage(tpm_emu); } /* @@ -912,6 +975,9 @@ static void tpm_emulator_inst_init(Object *obj) tpm_emu->options = g_new0(TPMEmulatorOptions, 1); tpm_emu->cur_locty_number = ~0; qemu_mutex_init(&tpm_emu->mutex); + tpm_emu->vmstate = + qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change, + tpm_emu); vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_tpm_emulator, obj); @@ -961,6 +1027,7 @@ static void tpm_emulator_inst_finalize(Object *obj) tpm_sized_buffer_reset(&state_blobs->savestate); qemu_mutex_destroy(&tpm_emu->mutex); + qemu_del_vm_change_state_handler(tpm_emu->vmstate); vmstate_unregister(NULL, &vmstate_tpm_emulator, obj); } diff --git a/backends/tpm/tpm_ioctl.h b/backends/tpm/tpm_ioctl.h index bd6c12cb86..e506ef5160 100644 --- a/backends/tpm/tpm_ioctl.h +++ b/backends/tpm/tpm_ioctl.h @@ -5,12 +5,19 @@ * * This file is licensed under the terms of the 3-clause BSD license */ +#ifndef _TPM_IOCTL_H_ +#define _TPM_IOCTL_H_ -#ifndef TPM_IOCTL_H -#define TPM_IOCTL_H +#if defined(__CYGWIN__) +# define __USE_LINUX_IOCTL_DEFS +#endif +#include +#include +#ifndef _WIN32 #include #include +#endif #ifdef HAVE_SYS_IOCCOM_H #include @@ -194,6 +201,48 @@ struct ptm_setbuffersize { } u; }; +#define PTM_GETINFO_SIZE (3 * 1024) +/* + * PTM_GET_INFO: Get info about the TPM implementation (from libtpms) + * + * This request allows to indirectly call TPMLIB_GetInfo(flags) and + * retrieve information from libtpms. + * Only one transaction is currently necessary for returning results + * to a client. Therefore, totlength and length will be the same if + * offset is 0. + */ +struct ptm_getinfo { + union { + struct { + uint64_t flags; + uint32_t offset; /* offset from where to read */ + uint32_t pad; /* 32 bit arch */ + } req; /* request */ + struct { + ptm_res tpm_result; + uint32_t totlength; + uint32_t length; + char buffer[PTM_GETINFO_SIZE]; + } resp; /* response */ + } u; +}; + +#define SWTPM_INFO_TPMSPECIFICATION ((uint64_t)1 << 0) +#define SWTPM_INFO_TPMATTRIBUTES ((uint64_t)1 << 1) + +/* + * PTM_LOCK_STORAGE: Lock the storage and retry n times + */ +struct ptm_lockstorage { + union { + struct { + uint32_t retries; /* number of retries */ + } req; /* request */ + struct { + ptm_res tpm_result; + } resp; /* reponse */ + } u; +}; typedef uint64_t ptm_cap; typedef struct ptm_est ptm_est; @@ -205,6 +254,8 @@ typedef struct ptm_getstate ptm_getstate; typedef struct ptm_setstate ptm_setstate; typedef struct ptm_getconfig ptm_getconfig; typedef struct ptm_setbuffersize ptm_setbuffersize; +typedef struct ptm_getinfo ptm_getinfo; +typedef struct ptm_lockstorage ptm_lockstorage; /* capability flags returned by PTM_GET_CAPABILITY */ #define PTM_CAP_INIT (1) @@ -221,7 +272,11 @@ typedef struct ptm_setbuffersize ptm_setbuffersize; #define PTM_CAP_GET_CONFIG (1 << 11) #define PTM_CAP_SET_DATAFD (1 << 12) #define PTM_CAP_SET_BUFFERSIZE (1 << 13) +#define PTM_CAP_GET_INFO (1 << 14) +#define PTM_CAP_SEND_COMMAND_HEADER (1 << 15) +#define PTM_CAP_LOCK_STORAGE (1 << 16) +#ifndef _WIN32 enum { PTM_GET_CAPABILITY = _IOR('P', 0, ptm_cap), PTM_INIT = _IOWR('P', 1, ptm_init), @@ -240,7 +295,10 @@ enum { PTM_GET_CONFIG = _IOR('P', 14, ptm_getconfig), PTM_SET_DATAFD = _IOR('P', 15, ptm_res), PTM_SET_BUFFERSIZE = _IOWR('P', 16, ptm_setbuffersize), + PTM_GET_INFO = _IOWR('P', 17, ptm_getinfo), + PTM_LOCK_STORAGE = _IOWR('P', 18, ptm_lockstorage), }; +#endif /* * Commands used by the non-CUSE TPMs @@ -253,23 +311,25 @@ enum { * and ptm_set_state:u.req.data) are 0xffffffff. */ enum { - CMD_GET_CAPABILITY = 1, - CMD_INIT, - CMD_SHUTDOWN, - CMD_GET_TPMESTABLISHED, - CMD_SET_LOCALITY, - CMD_HASH_START, - CMD_HASH_DATA, - CMD_HASH_END, - CMD_CANCEL_TPM_CMD, - CMD_STORE_VOLATILE, - CMD_RESET_TPMESTABLISHED, - CMD_GET_STATEBLOB, - CMD_SET_STATEBLOB, - CMD_STOP, - CMD_GET_CONFIG, - CMD_SET_DATAFD, - CMD_SET_BUFFERSIZE, + CMD_GET_CAPABILITY = 1, /* 0x01 */ + CMD_INIT, /* 0x02 */ + CMD_SHUTDOWN, /* 0x03 */ + CMD_GET_TPMESTABLISHED, /* 0x04 */ + CMD_SET_LOCALITY, /* 0x05 */ + CMD_HASH_START, /* 0x06 */ + CMD_HASH_DATA, /* 0x07 */ + CMD_HASH_END, /* 0x08 */ + CMD_CANCEL_TPM_CMD, /* 0x09 */ + CMD_STORE_VOLATILE, /* 0x0a */ + CMD_RESET_TPMESTABLISHED, /* 0x0b */ + CMD_GET_STATEBLOB, /* 0x0c */ + CMD_SET_STATEBLOB, /* 0x0d */ + CMD_STOP, /* 0x0e */ + CMD_GET_CONFIG, /* 0x0f */ + CMD_SET_DATAFD, /* 0x10 */ + CMD_SET_BUFFERSIZE, /* 0x11 */ + CMD_GET_INFO, /* 0x12 */ + CMD_LOCK_STORAGE, /* 0x13 */ }; -#endif /* TPM_IOCTL_H */ +#endif /* _TPM_IOCTL_H_ */ diff --git a/backends/tpm/tpm_passthrough.c b/backends/tpm/tpm_passthrough.c index 21b7459183..5a2f74db1b 100644 --- a/backends/tpm/tpm_passthrough.c +++ b/backends/tpm/tpm_passthrough.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/sockets.h" @@ -321,7 +320,7 @@ static TpmTypeOptions *tpm_passthrough_get_tpm_options(TPMBackend *tb) { TpmTypeOptions *options = g_new0(TpmTypeOptions, 1); - options->type = TPM_TYPE_OPTIONS_KIND_PASSTHROUGH; + options->type = TPM_TYPE_PASSTHROUGH; options->u.passthrough.data = QAPI_CLONE(TPMPassthroughOptions, TPM_PASSTHROUGH(tb)->options); diff --git a/backends/tpm/trace-events b/backends/tpm/trace-events index 3298766dd7..1ecef42a07 100644 --- a/backends/tpm/trace-events +++ b/backends/tpm/trace-events @@ -20,6 +20,8 @@ tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t max tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu" tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d" tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support CANCEL_TPM_CMD" +tpm_emulator_lock_storage_cmd_not_supt(void) "Backend does not support LOCK_STORAGE" +tpm_emulator_vm_state_change(int running, int state) "state change to running %d state %d" tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2" tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2" tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified" diff --git a/backends/vhost-user.c b/backends/vhost-user.c index 10b39992d2..7bfcaef976 100644 --- a/backends/vhost-user.c +++ b/backends/vhost-user.c @@ -85,7 +85,7 @@ vhost_user_backend_start(VhostUserBackend *b) } b->dev.acked_features = b->vdev->guest_features; - ret = vhost_dev_start(&b->dev, b->vdev); + ret = vhost_dev_start(&b->dev, b->vdev, true); if (ret < 0) { error_report("Error start vhost dev"); goto err_guest_notifiers; @@ -120,7 +120,7 @@ vhost_user_backend_stop(VhostUserBackend *b) return; } - vhost_dev_stop(&b->dev, b->vdev); + vhost_dev_stop(&b->dev, b->vdev, true); if (k->set_guest_notifiers) { ret = k->set_guest_notifiers(qbus->parent, @@ -141,7 +141,7 @@ static void set_chardev(Object *obj, const char *value, Error **errp) Chardev *chr; if (b->completed) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'chardev' can no longer be set"); return; } diff --git a/block.c b/block.c index b3a12fb318..dbc28ccc30 100644 --- a/block.c +++ b/block.c @@ -49,6 +49,8 @@ #include "qemu/timer.h" #include "qemu/cutils.h" #include "qemu/id.h" +#include "qemu/range.h" +#include "qemu/rcu.h" #include "block/coroutines.h" #ifdef CONFIG_BSD @@ -65,12 +67,15 @@ #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ +/* Protected by BQL */ static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); +/* Protected by BQL */ static QTAILQ_HEAD(, BlockDriverState) all_bdrv_states = QTAILQ_HEAD_INITIALIZER(all_bdrv_states); +/* Protected by BQL */ static QLIST_HEAD(, BlockDriver) bdrv_drivers = QLIST_HEAD_INITIALIZER(bdrv_drivers); @@ -82,11 +87,12 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, BdrvChildRole child_role, Error **errp); +static bool bdrv_recurse_has_child(BlockDriverState *bs, + BlockDriverState *child); + static void bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs); -static void bdrv_remove_file_or_backing_child(BlockDriverState *bs, - BdrvChild *child, - Transaction *tran); +static void bdrv_remove_child(BdrvChild *child, Transaction *tran); static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, Transaction *tran); @@ -96,6 +102,12 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, static void bdrv_reopen_commit(BDRVReopenState *reopen_state); static void bdrv_reopen_abort(BDRVReopenState *reopen_state); +static bool bdrv_backing_overridden(BlockDriverState *bs); + +static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); + /* If non-zero, use only whitelisted block drivers */ static int use_bdrv_whitelist; @@ -123,8 +135,9 @@ size_t bdrv_opt_mem_align(BlockDriverState *bs) { if (!bs || !bs->drv) { /* page size or 4k (hdd sector size) should be on the safe side */ - return MAX(4096, qemu_real_host_page_size); + return MAX(4096, qemu_real_host_page_size()); } + IO_CODE(); return bs->bl.opt_mem_alignment; } @@ -133,8 +146,9 @@ size_t bdrv_min_mem_align(BlockDriverState *bs) { if (!bs || !bs->drv) { /* page size or 4k (hdd sector size) should be on the safe side */ - return MAX(4096, qemu_real_host_page_size); + return MAX(4096, qemu_real_host_page_size()); } + IO_CODE(); return bs->bl.min_mem_alignment; } @@ -260,12 +274,15 @@ void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix, * image is inactivated. */ bool bdrv_is_read_only(BlockDriverState *bs) { + IO_CODE(); return !(bs->open_flags & BDRV_O_RDWR); } int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, bool ignore_allow_rdw, Error **errp) { + IO_CODE(); + /* Do not set read_only if copy_on_read is enabled */ if (bs->copy_on_read && read_only) { error_setg(errp, "Can't set node '%s' to r/o with copy-on-read enabled", @@ -299,6 +316,7 @@ int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg, Error **errp) { int ret = 0; + IO_CODE(); if (!(bs->open_flags & BDRV_O_RDWR)) { return 0; @@ -375,12 +393,14 @@ static char *bdrv_make_absolute_filename(BlockDriverState *relative_to, char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp) { + GLOBAL_STATE_CODE(); return bdrv_make_absolute_filename(bs, bs->backing_file, errp); } void bdrv_register(BlockDriver *bdrv) { assert(bdrv->format_name); + GLOBAL_STATE_CODE(); QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); } @@ -389,6 +409,8 @@ BlockDriverState *bdrv_new(void) BlockDriverState *bs; int i; + GLOBAL_STATE_CODE(); + bs = g_new0(BlockDriverState, 1); QLIST_INIT(&bs->dirty_bitmaps); for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { @@ -401,6 +423,9 @@ BlockDriverState *bdrv_new(void) qemu_co_queue_init(&bs->flush_queue); + qemu_co_mutex_init(&bs->bsc_modify_lock); + bs->block_status_cache = g_new0(BdrvBlockStatusCache, 1); + for (i = 0; i < bdrv_drain_all_count; i++) { bdrv_drained_begin(bs); } @@ -413,6 +438,7 @@ BlockDriverState *bdrv_new(void) static BlockDriver *bdrv_do_find_format(const char *format_name) { BlockDriver *drv1; + GLOBAL_STATE_CODE(); QLIST_FOREACH(drv1, &bdrv_drivers, list) { if (!strcmp(drv1->format_name, format_name)) { @@ -428,6 +454,8 @@ BlockDriver *bdrv_find_format(const char *format_name) BlockDriver *drv1; int i; + GLOBAL_STATE_CODE(); + drv1 = bdrv_do_find_format(format_name); if (drv1) { return drv1; @@ -436,12 +464,18 @@ BlockDriver *bdrv_find_format(const char *format_name) /* The driver isn't registered, maybe we need to load a module */ for (i = 0; i < (int)ARRAY_SIZE(block_driver_modules); ++i) { if (!strcmp(block_driver_modules[i].format_name, format_name)) { - block_module_load_one(block_driver_modules[i].library_name); + Error *local_err = NULL; + int rv = block_module_load(block_driver_modules[i].library_name, + &local_err); + if (rv > 0) { + return bdrv_do_find_format(format_name); + } else if (rv < 0) { + error_report_err(local_err); + } break; } } - - return bdrv_do_find_format(format_name); + return NULL; } static int bdrv_format_is_whitelisted(const char *format_name, bool read_only) @@ -477,6 +511,7 @@ static int bdrv_format_is_whitelisted(const char *format_name, bool read_only) int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) { + GLOBAL_STATE_CODE(); return bdrv_format_is_whitelisted(drv->format_name, read_only); } @@ -500,6 +535,7 @@ static void coroutine_fn bdrv_create_co_entry(void *opaque) CreateCo *cco = opaque; assert(cco->drv); + GLOBAL_STATE_CODE(); ret = cco->drv->bdrv_co_create_opts(cco->drv, cco->filename, cco->opts, &local_err); @@ -512,6 +548,8 @@ int bdrv_create(BlockDriver *drv, const char* filename, { int ret; + GLOBAL_STATE_CODE(); + Coroutine *co; CreateCo cco = { .drv = drv, @@ -566,6 +604,8 @@ static int64_t create_file_fallback_truncate(BlockBackend *blk, int64_t size; int ret; + GLOBAL_STATE_CODE(); + ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0, &local_err); if (ret < 0 && ret != -ENOTSUP) { @@ -597,16 +637,19 @@ static int64_t create_file_fallback_truncate(BlockBackend *blk, * Helper function for bdrv_create_file_fallback(): Zero the first * sector to remove any potentially pre-existing image header. */ -static int create_file_fallback_zero_first_sector(BlockBackend *blk, - int64_t current_size, - Error **errp) +static int coroutine_fn +create_file_fallback_zero_first_sector(BlockBackend *blk, + int64_t current_size, + Error **errp) { int64_t bytes_to_clear; int ret; + GLOBAL_STATE_CODE(); + bytes_to_clear = MIN(current_size, BDRV_SECTOR_SIZE); if (bytes_to_clear) { - ret = blk_pwrite_zeroes(blk, 0, bytes_to_clear, BDRV_REQ_MAY_UNMAP); + ret = blk_co_pwrite_zeroes(blk, 0, bytes_to_clear, BDRV_REQ_MAY_UNMAP); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to clear the new image's first sector"); @@ -635,6 +678,8 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv, Error *local_err = NULL; int ret; + GLOBAL_STATE_CODE(); + size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, @@ -687,6 +732,8 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) QDict *qdict; int ret; + GLOBAL_STATE_CODE(); + drv = bdrv_find_protocol(filename, true, errp); if (drv == NULL) { return -ENOENT; @@ -731,6 +778,7 @@ int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp) Error *local_err = NULL; int ret; + IO_CODE(); assert(bs != NULL); if (!bs->drv) { @@ -756,6 +804,7 @@ void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs) { Error *local_err = NULL; int ret; + IO_CODE(); if (!bs) { return; @@ -784,6 +833,7 @@ int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) { BlockDriver *drv = bs->drv; BlockDriverState *filtered = bdrv_filter_bs(bs); + GLOBAL_STATE_CODE(); if (drv && drv->bdrv_probe_blocksizes) { return drv->bdrv_probe_blocksizes(bs, bsz); @@ -804,6 +854,7 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo) { BlockDriver *drv = bs->drv; BlockDriverState *filtered = bdrv_filter_bs(bs); + GLOBAL_STATE_CODE(); if (drv && drv->bdrv_probe_geometry) { return drv->bdrv_probe_geometry(bs, geo); @@ -816,38 +867,42 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo) /* * Create a uniquely-named empty temporary file. - * Return 0 upon success, otherwise a negative errno value. + * Return the actual file name used upon success, otherwise NULL. + * This string should be freed with g_free() when not needed any longer. + * + * Note: creating a temporary file for the caller to (re)open is + * inherently racy. Use g_file_open_tmp() instead whenever practical. */ -int get_tmp_filename(char *filename, int size) +char *create_tmp_file(Error **errp) { -#ifdef _WIN32 - char temp_dir[MAX_PATH]; - /* GetTempFileName requires that its output buffer (4th param) - have length MAX_PATH or greater. */ - assert(size >= MAX_PATH); - return (GetTempPath(MAX_PATH, temp_dir) - && GetTempFileName(temp_dir, "qem", 0, filename) - ? 0 : -GetLastError()); -#else int fd; const char *tmpdir; - tmpdir = getenv("TMPDIR"); - if (!tmpdir) { + g_autofree char *filename = NULL; + + tmpdir = g_get_tmp_dir(); +#ifndef _WIN32 + /* + * See commit 69bef79 ("block: use /var/tmp instead of /tmp for -snapshot") + * + * This function is used to create temporary disk images (like -snapshot), + * so the files can become very large. /tmp is often a tmpfs where as + * /var/tmp is usually on a disk, so more appropriate for disk images. + */ + if (!g_strcmp0(tmpdir, "/tmp")) { tmpdir = "/var/tmp"; } - if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { - return -EOVERFLOW; - } - fd = mkstemp(filename); - if (fd < 0) { - return -errno; - } - if (close(fd) != 0) { - unlink(filename); - return -errno; - } - return 0; #endif + + filename = g_strdup_printf("%s/vl.XXXXXX", tmpdir); + fd = g_mkstemp(filename); + if (fd < 0) { + error_setg_errno(errp, errno, "Could not open temporary file '%s'", + filename); + return NULL; + } + close(fd); + + return g_steal_pointer(&filename); } /* @@ -858,6 +913,7 @@ static BlockDriver *find_hdev_driver(const char *filename) { int score_max = 0, score; BlockDriver *drv = NULL, *d; + GLOBAL_STATE_CODE(); QLIST_FOREACH(d, &bdrv_drivers, list) { if (d->bdrv_probe_device) { @@ -875,6 +931,7 @@ static BlockDriver *find_hdev_driver(const char *filename) static BlockDriver *bdrv_do_find_protocol(const char *protocol) { BlockDriver *drv1; + GLOBAL_STATE_CODE(); QLIST_FOREACH(drv1, &bdrv_drivers, list) { if (drv1->protocol_name && !strcmp(drv1->protocol_name, protocol)) { @@ -895,6 +952,7 @@ BlockDriver *bdrv_find_protocol(const char *filename, const char *p; int i; + GLOBAL_STATE_CODE(); /* TODO Drivers without bdrv_file_open must be specified explicitly */ /* @@ -929,12 +987,16 @@ BlockDriver *bdrv_find_protocol(const char *filename, for (i = 0; i < (int)ARRAY_SIZE(block_driver_modules); ++i) { if (block_driver_modules[i].protocol_name && !strcmp(block_driver_modules[i].protocol_name, protocol)) { - block_module_load_one(block_driver_modules[i].library_name); + int rv = block_module_load(block_driver_modules[i].library_name, errp); + if (rv > 0) { + drv1 = bdrv_do_find_protocol(protocol); + } else if (rv < 0) { + return NULL; + } break; } } - drv1 = bdrv_do_find_protocol(protocol); if (!drv1) { error_setg(errp, "Unknown protocol '%s'", protocol); } @@ -960,6 +1022,7 @@ BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, { int score_max = 0, score; BlockDriver *drv = NULL, *d; + IO_CODE(); QLIST_FOREACH(d, &bdrv_drivers, list) { if (d->bdrv_probe) { @@ -981,13 +1044,15 @@ static int find_image_format(BlockBackend *file, const char *filename, uint8_t buf[BLOCK_PROBE_BUF_SIZE]; int ret = 0; + GLOBAL_STATE_CODE(); + /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ if (blk_is_sg(file) || !blk_is_inserted(file) || blk_getlength(file) == 0) { *pdrv = &bdrv_raw; return ret; } - ret = blk_pread(file, 0, buf, sizeof(buf)); + ret = blk_pread(file, 0, sizeof(buf), buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read image for determining its " "format"); @@ -995,14 +1060,16 @@ static int find_image_format(BlockBackend *file, const char *filename, return ret; } - drv = bdrv_probe_all(buf, ret, filename); + drv = bdrv_probe_all(buf, sizeof(buf), filename); if (!drv) { error_setg(errp, "Could not determine image format: No compatible " "driver found"); - ret = -ENOENT; + *pdrv = NULL; + return -ENOENT; } + *pdrv = drv; - return ret; + return 0; } /** @@ -1012,6 +1079,7 @@ static int find_image_format(BlockBackend *file, const char *filename, int refresh_total_sectors(BlockDriverState *bs, int64_t hint) { BlockDriver *drv = bs->drv; + IO_CODE(); if (!drv) { return -ENOMEDIUM; @@ -1046,6 +1114,7 @@ int refresh_total_sectors(BlockDriverState *bs, int64_t hint) static void bdrv_join_options(BlockDriverState *bs, QDict *options, QDict *old_options) { + GLOBAL_STATE_CODE(); if (bs->drv && bs->drv->bdrv_join_options) { bs->drv->bdrv_join_options(options, old_options); } else { @@ -1062,6 +1131,7 @@ static BlockdevDetectZeroesOptions bdrv_parse_detect_zeroes(QemuOpts *opts, BlockdevDetectZeroesOptions detect_zeroes = qapi_enum_parse(&BlockdevDetectZeroesOptions_lookup, value, BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF, &local_err); + GLOBAL_STATE_CODE(); g_free(value); if (local_err) { error_propagate(errp, local_err); @@ -1177,22 +1247,17 @@ static void bdrv_child_cb_drained_end(BdrvChild *child, static int bdrv_child_cb_inactivate(BdrvChild *child) { BlockDriverState *bs = child->opaque; + GLOBAL_STATE_CODE(); assert(bs->open_flags & BDRV_O_INACTIVE); return 0; } -static bool bdrv_child_cb_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore, Error **errp) +static bool bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockDriverState *bs = child->opaque; - return bdrv_can_set_aio_context(bs, ctx, ignore, errp); -} - -static void bdrv_child_cb_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore) -{ - BlockDriverState *bs = child->opaque; - return bdrv_set_aio_context_ignore(bs, ctx, ignore); + return bdrv_change_aio_context(bs, ctx, visited, tran, errp); } /* @@ -1203,6 +1268,7 @@ static void bdrv_child_cb_set_aio_ctx(BdrvChild *child, AioContext *ctx, static void bdrv_temp_snapshot_options(int *child_flags, QDict *child_options, int parent_flags, QDict *parent_options) { + GLOBAL_STATE_CODE(); *child_flags = (parent_flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY; /* For temporary files, unconditional cache=unsafe is fine */ @@ -1223,6 +1289,7 @@ static void bdrv_backing_attach(BdrvChild *c) BlockDriverState *parent = c->opaque; BlockDriverState *backing_hd = c->bs; + GLOBAL_STATE_CODE(); assert(!parent->backing_blocker); error_setg(&parent->backing_blocker, "node is used as backing hd of '%s'", @@ -1261,6 +1328,7 @@ static void bdrv_backing_detach(BdrvChild *c) { BlockDriverState *parent = c->opaque; + GLOBAL_STATE_CODE(); assert(parent->backing_blocker); bdrv_op_unblock_all(c->bs, parent->backing_blocker); error_free(parent->backing_blocker); @@ -1273,6 +1341,7 @@ static int bdrv_backing_update_filename(BdrvChild *c, BlockDriverState *base, BlockDriverState *parent = c->opaque; bool read_only = bdrv_is_read_only(parent); int ret; + GLOBAL_STATE_CODE(); if (read_only) { ret = bdrv_reopen_set_read_only(parent, false, errp); @@ -1304,6 +1373,7 @@ static void bdrv_inherited_options(BdrvChildRole role, bool parent_is_format, int parent_flags, QDict *parent_options) { int flags = parent_flags; + GLOBAL_STATE_CODE(); /* * First, decide whether to set, clear, or leave BDRV_O_PROTOCOL. @@ -1379,8 +1449,41 @@ static void bdrv_child_cb_attach(BdrvChild *child) { BlockDriverState *bs = child->opaque; - if (child->role & BDRV_CHILD_COW) { + assert_bdrv_graph_writable(bs); + QLIST_INSERT_HEAD(&bs->children, child, next); + if (bs->drv->is_filter || (child->role & BDRV_CHILD_FILTERED)) { + /* + * Here we handle filters and block/raw-format.c when it behave like + * filter. They generally have a single PRIMARY child, which is also the + * FILTERED child, and that they may have multiple more children, which + * are neither PRIMARY nor FILTERED. And never we have a COW child here. + * So bs->file will be the PRIMARY child, unless the PRIMARY child goes + * into bs->backing on exceptional cases; and bs->backing will be + * nothing else. + */ + assert(!(child->role & BDRV_CHILD_COW)); + if (child->role & BDRV_CHILD_PRIMARY) { + assert(child->role & BDRV_CHILD_FILTERED); + assert(!bs->backing); + assert(!bs->file); + + if (bs->drv->filtered_child_is_backing) { + bs->backing = child; + } else { + bs->file = child; + } + } else { + assert(!(child->role & BDRV_CHILD_FILTERED)); + } + } else if (child->role & BDRV_CHILD_COW) { + assert(bs->drv->supports_backing); + assert(!(child->role & BDRV_CHILD_PRIMARY)); + assert(!bs->backing); + bs->backing = child; bdrv_backing_attach(child); + } else if (child->role & BDRV_CHILD_PRIMARY) { + assert(!bs->file); + bs->file = child; } bdrv_apply_subtree_drain(child, bs); @@ -1395,6 +1498,15 @@ static void bdrv_child_cb_detach(BdrvChild *child) } bdrv_unapply_subtree_drain(child, bs); + + assert_bdrv_graph_writable(bs); + QLIST_REMOVE(child, next); + if (child == bs->backing) { + assert(child != bs->file); + bs->backing = NULL; + } else if (child == bs->file) { + bs->file = NULL; + } } static int bdrv_child_cb_update_filename(BdrvChild *c, BlockDriverState *base, @@ -1409,6 +1521,7 @@ static int bdrv_child_cb_update_filename(BdrvChild *c, BlockDriverState *base, AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c) { BlockDriverState *bs = c->opaque; + IO_CODE(); return bdrv_get_aio_context(bs); } @@ -1423,20 +1536,21 @@ const BdrvChildClass child_of_bds = { .attach = bdrv_child_cb_attach, .detach = bdrv_child_cb_detach, .inactivate = bdrv_child_cb_inactivate, - .can_set_aio_ctx = bdrv_child_cb_can_set_aio_ctx, - .set_aio_ctx = bdrv_child_cb_set_aio_ctx, + .change_aio_ctx = bdrv_child_cb_change_aio_ctx, .update_filename = bdrv_child_cb_update_filename, .get_parent_aio_context = child_of_bds_get_parent_aio_context, }; AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c) { + IO_CODE(); return c->klass->get_parent_aio_context(c); } static int bdrv_open_flags(BlockDriverState *bs, int flags) { int open_flags = flags; + GLOBAL_STATE_CODE(); /* * Clear flags that are internal to the block layer before opening the @@ -1449,6 +1563,8 @@ static int bdrv_open_flags(BlockDriverState *bs, int flags) static void update_flags_from_options(int *flags, QemuOpts *opts) { + GLOBAL_STATE_CODE(); + *flags &= ~(BDRV_O_CACHE_MASK | BDRV_O_RDWR | BDRV_O_AUTO_RDONLY); if (qemu_opt_get_bool_del(opts, BDRV_OPT_CACHE_NO_FLUSH, false)) { @@ -1470,6 +1586,7 @@ static void update_flags_from_options(int *flags, QemuOpts *opts) static void update_options_from_flags(QDict *options, int flags) { + GLOBAL_STATE_CODE(); if (!qdict_haskey(options, BDRV_OPT_CACHE_DIRECT)) { qdict_put_bool(options, BDRV_OPT_CACHE_DIRECT, flags & BDRV_O_NOCACHE); } @@ -1491,6 +1608,7 @@ static void bdrv_assign_node_name(BlockDriverState *bs, Error **errp) { char *gen_node_name = NULL; + GLOBAL_STATE_CODE(); if (!node_name) { node_name = gen_node_name = id_generate(ID_BLOCK); @@ -1535,6 +1653,7 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, { Error *local_err = NULL; int i, ret; + GLOBAL_STATE_CODE(); bdrv_assign_node_name(bs, node_name, &local_err); if (local_err) { @@ -1565,6 +1684,20 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, goto open_failed; } + assert(!(bs->supported_read_flags & ~BDRV_REQ_MASK)); + assert(!(bs->supported_write_flags & ~BDRV_REQ_MASK)); + + /* + * Always allow the BDRV_REQ_REGISTERED_BUF optimization hint. This saves + * drivers that pass read/write requests through to a child the trouble of + * declaring support explicitly. + * + * Drivers must not propagate this flag accidentally when they initiate I/O + * to a bounce buffer. That case should be rare though. + */ + bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF; + bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF; + ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); @@ -1592,23 +1725,35 @@ open_failed: bs->drv = NULL; if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); - bs->file = NULL; + assert(!bs->file); } g_free(bs->opaque); bs->opaque = NULL; return ret; } -BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, - int flags, Error **errp) +/* + * Create and open a block node. + * + * @options is a QDict of options to pass to the block drivers, or NULL for an + * empty set of options. The reference to the QDict belongs to the block layer + * after the call (even on failure), so if the caller intends to reuse the + * dictionary, it needs to use qobject_ref() before calling bdrv_open. + */ +BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv, + const char *node_name, + QDict *options, int flags, + Error **errp) { BlockDriverState *bs; int ret; + GLOBAL_STATE_CODE(); + bs = bdrv_new(); bs->open_flags = flags; - bs->explicit_options = qdict_new(); - bs->options = qdict_new(); + bs->options = options ?: qdict_new(); + bs->explicit_options = qdict_clone_shallow(bs->options); bs->opaque = NULL; update_options_from_flags(bs->options, flags); @@ -1626,6 +1771,14 @@ BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, return bs; } +/* Create and open a block node. */ +BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, + int flags, Error **errp) +{ + GLOBAL_STATE_CODE(); + return bdrv_new_open_driver_opts(drv, node_name, NULL, flags, errp); +} + QemuOptsList bdrv_runtime_opts = { .name = "bdrv_common", .head = QTAILQ_HEAD_INITIALIZER(bdrv_runtime_opts.head), @@ -1717,6 +1870,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file, assert(bs->file == NULL); assert(options != NULL && bs->options != options); + GLOBAL_STATE_CODE(); opts = qemu_opts_create(&bdrv_runtime_opts, NULL, 0, &error_abort); if (!qemu_opts_absorb_qdict(opts, options, errp)) { @@ -1842,6 +1996,7 @@ static QDict *parse_json_filename(const char *filename, Error **errp) QObject *options_obj; QDict *options; int ret; + GLOBAL_STATE_CODE(); ret = strstart(filename, "json:", &filename); assert(ret); @@ -1869,6 +2024,7 @@ static void parse_json_protocol(QDict *options, const char **pfilename, { QDict *json_options; Error *local_err = NULL; + GLOBAL_STATE_CODE(); /* Parse json: pseudo-protocol */ if (!*pfilename || !g_str_has_prefix(*pfilename, "json:")) { @@ -1903,6 +2059,8 @@ static int bdrv_fill_options(QDict **options, const char *filename, BlockDriver *drv = NULL; Error *local_err = NULL; + GLOBAL_STATE_CODE(); + /* * Caution: while qdict_get_try_str() is fine, getting non-string * types would require more care. When @options come from @@ -2024,11 +2182,13 @@ static bool bdrv_is_writable_after_reopen(BlockDriverState *bs, */ bool bdrv_is_writable(BlockDriverState *bs) { + IO_CODE(); return bdrv_is_writable_after_reopen(bs, NULL); } static char *bdrv_child_user_desc(BdrvChild *c) { + GLOBAL_STATE_CODE(); return c->klass->get_parent_desc(c); } @@ -2045,6 +2205,7 @@ static bool bdrv_a_allow_b(BdrvChild *a, BdrvChild *b, Error **errp) assert(a->bs); assert(a->bs == b->bs); + GLOBAL_STATE_CODE(); if ((b->perm & a->shared_perm) == b->perm) { return true; @@ -2068,6 +2229,7 @@ static bool bdrv_a_allow_b(BdrvChild *a, BdrvChild *b, Error **errp) static bool bdrv_parent_perms_conflict(BlockDriverState *bs, Error **errp) { BdrvChild *a, *b; + GLOBAL_STATE_CODE(); /* * During the loop we'll look at each pair twice. That's correct because @@ -2096,6 +2258,7 @@ static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs, uint64_t *nperm, uint64_t *nshared) { assert(bs->drv && bs->drv->bdrv_child_perm); + GLOBAL_STATE_CODE(); bs->drv->bdrv_child_perm(bs, c, role, reopen_queue, parent_perm, parent_shared, nperm, nshared); @@ -2122,6 +2285,8 @@ static GSList *bdrv_topological_dfs(GSList *list, GHashTable *found, BdrvChild *child; g_autoptr(GHashTable) local_found = NULL; + GLOBAL_STATE_CODE(); + if (!found) { assert(!list); found = local_found = g_hash_table_new(NULL, NULL); @@ -2149,6 +2314,8 @@ static void bdrv_child_set_perm_abort(void *opaque) { BdrvChildSetPermState *s = opaque; + GLOBAL_STATE_CODE(); + s->child->perm = s->old_perm; s->child->shared_perm = s->old_shared_perm; } @@ -2162,6 +2329,7 @@ static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, Transaction *tran) { BdrvChildSetPermState *s = g_new(BdrvChildSetPermState, 1); + GLOBAL_STATE_CODE(); *s = (BdrvChildSetPermState) { .child = c, @@ -2179,6 +2347,7 @@ static void bdrv_drv_set_perm_commit(void *opaque) { BlockDriverState *bs = opaque; uint64_t cumulative_perms, cumulative_shared_perms; + GLOBAL_STATE_CODE(); if (bs->drv->bdrv_set_perm) { bdrv_get_cumulative_perm(bs, &cumulative_perms, @@ -2190,6 +2359,7 @@ static void bdrv_drv_set_perm_commit(void *opaque) static void bdrv_drv_set_perm_abort(void *opaque) { BlockDriverState *bs = opaque; + GLOBAL_STATE_CODE(); if (bs->drv->bdrv_abort_perm_update) { bs->drv->bdrv_abort_perm_update(bs); @@ -2205,6 +2375,7 @@ static int bdrv_drv_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Transaction *tran, Error **errp) { + GLOBAL_STATE_CODE(); if (!bs->drv) { return 0; } @@ -2231,6 +2402,7 @@ typedef struct BdrvReplaceChildState { static void bdrv_replace_child_commit(void *opaque) { BdrvReplaceChildState *s = opaque; + GLOBAL_STATE_CODE(); bdrv_unref(s->old_bs); } @@ -2240,6 +2412,7 @@ static void bdrv_replace_child_abort(void *opaque) BdrvReplaceChildState *s = opaque; BlockDriverState *new_bs = s->child->bs; + GLOBAL_STATE_CODE(); /* old_bs reference is transparently moved from @s to @s->child */ bdrv_replace_child_noperm(s->child, s->old_bs); bdrv_unref(new_bs); @@ -2286,6 +2459,7 @@ static int bdrv_node_refresh_perm(BlockDriverState *bs, BlockReopenQueue *q, BdrvChild *c; int ret; uint64_t cumulative_perms, cumulative_shared_perms; + GLOBAL_STATE_CODE(); bdrv_get_cumulative_perm(bs, &cumulative_perms, &cumulative_shared_perms); @@ -2354,6 +2528,7 @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, { int ret; BlockDriverState *bs; + GLOBAL_STATE_CODE(); for ( ; list; list = list->next) { bs = list->data; @@ -2378,6 +2553,8 @@ void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, uint64_t cumulative_perms = 0; uint64_t cumulative_shared_perms = BLK_PERM_ALL; + GLOBAL_STATE_CODE(); + QLIST_FOREACH(c, &bs->parents, next_parent) { cumulative_perms |= c->perm; cumulative_shared_perms &= c->shared_perm; @@ -2397,7 +2574,6 @@ char *bdrv_perm_names(uint64_t perm) { BLK_PERM_WRITE, "write" }, { BLK_PERM_WRITE_UNCHANGED, "write unchanged" }, { BLK_PERM_RESIZE, "resize" }, - { BLK_PERM_GRAPH_MOD, "change children" }, { 0, NULL } }; @@ -2422,6 +2598,7 @@ static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) int ret; Transaction *tran = tran_new(); g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); + GLOBAL_STATE_CODE(); ret = bdrv_list_refresh_perms(list, NULL, tran, errp); tran_finalize(tran, ret); @@ -2436,6 +2613,8 @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, Transaction *tran = tran_new(); int ret; + GLOBAL_STATE_CODE(); + bdrv_child_set_perm(c, perm, shared, tran); ret = bdrv_refresh_perms(c->bs, &local_err); @@ -2466,6 +2645,8 @@ int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp) uint64_t parent_perms, parent_shared; uint64_t perms, shared; + GLOBAL_STATE_CODE(); + bdrv_get_cumulative_perm(bs, &parent_perms, &parent_shared); bdrv_child_perm(bs, c->bs, c, c->role, NULL, parent_perms, parent_shared, &perms, &shared); @@ -2484,6 +2665,7 @@ static void bdrv_filter_default_perms(BlockDriverState *bs, BdrvChild *c, uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared) { + GLOBAL_STATE_CODE(); *nperm = perm & DEFAULT_PERM_PASSTHROUGH; *nshared = (shared & DEFAULT_PERM_PASSTHROUGH) | DEFAULT_PERM_UNCHANGED; } @@ -2495,6 +2677,7 @@ static void bdrv_default_perms_for_cow(BlockDriverState *bs, BdrvChild *c, uint64_t *nperm, uint64_t *nshared) { assert(role & BDRV_CHILD_COW); + GLOBAL_STATE_CODE(); /* * We want consistent read from backing files if the parent needs it. @@ -2513,8 +2696,7 @@ static void bdrv_default_perms_for_cow(BlockDriverState *bs, BdrvChild *c, shared = 0; } - shared |= BLK_PERM_CONSISTENT_READ | BLK_PERM_GRAPH_MOD | - BLK_PERM_WRITE_UNCHANGED; + shared |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; if (bs->open_flags & BDRV_O_INACTIVE) { shared |= BLK_PERM_WRITE | BLK_PERM_RESIZE; @@ -2532,6 +2714,7 @@ static void bdrv_default_perms_for_storage(BlockDriverState *bs, BdrvChild *c, { int flags; + GLOBAL_STATE_CODE(); assert(role & (BDRV_CHILD_METADATA | BDRV_CHILD_DATA)); flags = bdrv_reopen_get_flags(reopen_queue, bs); @@ -2614,6 +2797,7 @@ void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c, uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared) { + GLOBAL_STATE_CODE(); if (role & BDRV_CHILD_FILTERED) { assert(!(role & (BDRV_CHILD_DATA | BDRV_CHILD_METADATA | BDRV_CHILD_COW))); @@ -2638,7 +2822,6 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm) [BLOCK_PERMISSION_WRITE] = BLK_PERM_WRITE, [BLOCK_PERMISSION_WRITE_UNCHANGED] = BLK_PERM_WRITE_UNCHANGED, [BLOCK_PERMISSION_RESIZE] = BLK_PERM_RESIZE, - [BLOCK_PERMISSION_GRAPH_MOD] = BLK_PERM_GRAPH_MOD, }; QEMU_BUILD_BUG_ON(ARRAY_SIZE(permissions) != BLOCK_PERMISSION__MAX); @@ -2657,6 +2840,8 @@ static void bdrv_replace_child_noperm(BdrvChild *child, int drain_saldo; assert(!child->frozen); + assert(old_bs != new_bs); + GLOBAL_STATE_CODE(); if (old_bs && new_bs) { assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)); @@ -2681,12 +2866,14 @@ static void bdrv_replace_child_noperm(BdrvChild *child, if (child->klass->detach) { child->klass->detach(child); } + assert_bdrv_graph_writable(old_bs); QLIST_REMOVE(child, next_parent); } child->bs = new_bs; if (new_bs) { + assert_bdrv_graph_writable(new_bs); QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent); /* @@ -2716,23 +2903,24 @@ static void bdrv_replace_child_noperm(BdrvChild *child, } } -static void bdrv_child_free(void *opaque) -{ - BdrvChild *c = opaque; - - g_free(c->name); - g_free(c); -} - -static void bdrv_remove_empty_child(BdrvChild *child) +/** + * Free the given @child. + * + * The child must be empty (i.e. `child->bs == NULL`) and it must be + * unused (i.e. not in a children list). + */ +static void bdrv_child_free(BdrvChild *child) { assert(!child->bs); - QLIST_SAFE_REMOVE(child, next); - bdrv_child_free(child); + GLOBAL_STATE_CODE(); + assert(!child->next.le_prev); /* not in children list */ + + g_free(child->name); + g_free(child); } typedef struct BdrvAttachChildCommonState { - BdrvChild **child; + BdrvChild *child; AioContext *old_parent_ctx; AioContext *old_child_ctx; } BdrvAttachChildCommonState; @@ -2740,30 +2928,35 @@ typedef struct BdrvAttachChildCommonState { static void bdrv_attach_child_common_abort(void *opaque) { BdrvAttachChildCommonState *s = opaque; - BdrvChild *child = *s->child; - BlockDriverState *bs = child->bs; + BlockDriverState *bs = s->child->bs; - bdrv_replace_child_noperm(child, NULL); + GLOBAL_STATE_CODE(); + bdrv_replace_child_noperm(s->child, NULL); if (bdrv_get_aio_context(bs) != s->old_child_ctx) { - bdrv_try_set_aio_context(bs, s->old_child_ctx, &error_abort); + bdrv_try_change_aio_context(bs, s->old_child_ctx, NULL, &error_abort); } - if (bdrv_child_get_parent_aio_context(child) != s->old_parent_ctx) { - GSList *ignore = g_slist_prepend(NULL, child); + if (bdrv_child_get_parent_aio_context(s->child) != s->old_parent_ctx) { + Transaction *tran; + GHashTable *visited; + bool ret; - child->klass->can_set_aio_ctx(child, s->old_parent_ctx, &ignore, - &error_abort); - g_slist_free(ignore); - ignore = g_slist_prepend(NULL, child); - child->klass->set_aio_ctx(child, s->old_parent_ctx, &ignore); + tran = tran_new(); - g_slist_free(ignore); + /* No need to visit `child`, because it has been detached already */ + visited = g_hash_table_new(NULL, NULL); + ret = s->child->klass->change_aio_ctx(s->child, s->old_parent_ctx, + visited, tran, &error_abort); + g_hash_table_destroy(visited); + + /* transaction is supposed to always succeed */ + assert(ret == true); + tran_commit(tran); } bdrv_unref(bs); - bdrv_remove_empty_child(child); - *s->child = NULL; + bdrv_child_free(s->child); } static TransactionActionDrv bdrv_attach_child_common_drv = { @@ -2774,29 +2967,24 @@ static TransactionActionDrv bdrv_attach_child_common_drv = { /* * Common part of attaching bdrv child to bs or to blk or to job * - * Resulting new child is returned through @child. - * At start *@child must be NULL. - * @child is saved to a new entry of @tran, so that *@child could be reverted to - * NULL on abort(). So referenced variable must live at least until transaction - * end. - * * Function doesn't update permissions, caller is responsible for this. + * + * Returns new created child. */ -static int bdrv_attach_child_common(BlockDriverState *child_bs, - const char *child_name, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - uint64_t perm, uint64_t shared_perm, - void *opaque, BdrvChild **child, - Transaction *tran, Error **errp) +static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, + const char *child_name, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + uint64_t perm, uint64_t shared_perm, + void *opaque, + Transaction *tran, Error **errp) { BdrvChild *new_child; AioContext *parent_ctx; AioContext *child_ctx = bdrv_get_aio_context(child_bs); - assert(child); - assert(*child == NULL); assert(child_class->get_parent_desc); + GLOBAL_STATE_CODE(); new_child = g_new(BdrvChild, 1); *new_child = (BdrvChild) { @@ -2817,91 +3005,84 @@ static int bdrv_attach_child_common(BlockDriverState *child_bs, parent_ctx = bdrv_child_get_parent_aio_context(new_child); if (child_ctx != parent_ctx) { Error *local_err = NULL; - int ret = bdrv_try_set_aio_context(child_bs, parent_ctx, &local_err); + int ret = bdrv_try_change_aio_context(child_bs, parent_ctx, NULL, + &local_err); - if (ret < 0 && child_class->can_set_aio_ctx) { - GSList *ignore = g_slist_prepend(NULL, new_child); - if (child_class->can_set_aio_ctx(new_child, child_ctx, &ignore, - NULL)) - { + if (ret < 0 && child_class->change_aio_ctx) { + Transaction *tran = tran_new(); + GHashTable *visited = g_hash_table_new(NULL, NULL); + bool ret_child; + + g_hash_table_add(visited, new_child); + ret_child = child_class->change_aio_ctx(new_child, child_ctx, + visited, tran, NULL); + if (ret_child == true) { error_free(local_err); ret = 0; - g_slist_free(ignore); - ignore = g_slist_prepend(NULL, new_child); - child_class->set_aio_ctx(new_child, child_ctx, &ignore); } - g_slist_free(ignore); + tran_finalize(tran, ret_child == true ? 0 : -1); + g_hash_table_destroy(visited); } if (ret < 0) { error_propagate(errp, local_err); - bdrv_remove_empty_child(new_child); - return ret; + bdrv_child_free(new_child); + return NULL; } } bdrv_ref(child_bs); bdrv_replace_child_noperm(new_child, child_bs); - *child = new_child; - BdrvAttachChildCommonState *s = g_new(BdrvAttachChildCommonState, 1); *s = (BdrvAttachChildCommonState) { - .child = child, + .child = new_child, .old_parent_ctx = parent_ctx, .old_child_ctx = child_ctx, }; tran_add(tran, &bdrv_attach_child_common_drv, s); - return 0; + return new_child; } /* - * Variable referenced by @child must live at least until transaction end. - * (see bdrv_attach_child_common() doc for details) - * * Function doesn't update permissions, caller is responsible for this. */ -static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, - BlockDriverState *child_bs, - const char *child_name, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - BdrvChild **child, - Transaction *tran, - Error **errp) +static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs, + BlockDriverState *child_bs, + const char *child_name, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + Transaction *tran, + Error **errp) { - int ret; uint64_t perm, shared_perm; assert(parent_bs->drv); + GLOBAL_STATE_CODE(); + + if (bdrv_recurse_has_child(child_bs, parent_bs)) { + error_setg(errp, "Making '%s' a %s child of '%s' would create a cycle", + child_bs->node_name, child_name, parent_bs->node_name); + return NULL; + } bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, perm, shared_perm, &perm, &shared_perm); - ret = bdrv_attach_child_common(child_bs, child_name, child_class, - child_role, perm, shared_perm, parent_bs, - child, tran, errp); - if (ret < 0) { - return ret; - } - - QLIST_INSERT_HEAD(&parent_bs->children, *child, next); - /* - * child is removed in bdrv_attach_child_common_abort(), so don't care to - * abort this change separately. - */ - - return 0; + return bdrv_attach_child_common(child_bs, child_name, child_class, + child_role, perm, shared_perm, parent_bs, + tran, errp); } static void bdrv_detach_child(BdrvChild *child) { BlockDriverState *old_bs = child->bs; + GLOBAL_STATE_CODE(); bdrv_replace_child_noperm(child, NULL); - bdrv_remove_empty_child(child); + bdrv_child_free(child); if (old_bs) { /* @@ -2915,7 +3096,7 @@ static void bdrv_detach_child(BdrvChild *child) * When the parent requiring a non-default AioContext is removed, the * node moves back to the main AioContext */ - bdrv_try_set_aio_context(old_bs, qemu_get_aio_context(), NULL); + bdrv_try_change_aio_context(old_bs, qemu_get_aio_context(), NULL, NULL); } } @@ -2937,13 +3118,16 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, void *opaque, Error **errp) { int ret; - BdrvChild *child = NULL; + BdrvChild *child; Transaction *tran = tran_new(); - ret = bdrv_attach_child_common(child_bs, child_name, child_class, + GLOBAL_STATE_CODE(); + + child = bdrv_attach_child_common(child_bs, child_name, child_class, child_role, perm, shared_perm, opaque, - &child, tran, errp); - if (ret < 0) { + tran, errp); + if (!child) { + ret = -EINVAL; goto out; } @@ -2951,11 +3135,10 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, out: tran_finalize(tran, ret); - /* child is unset on failure by bdrv_attach_child_common_abort() */ - assert((ret < 0) == !child); bdrv_unref(child_bs); - return child; + + return ret < 0 ? NULL : child; } /* @@ -2977,12 +3160,15 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, Error **errp) { int ret; - BdrvChild *child = NULL; + BdrvChild *child; Transaction *tran = tran_new(); - ret = bdrv_attach_child_noperm(parent_bs, child_bs, child_name, child_class, - child_role, &child, tran, errp); - if (ret < 0) { + GLOBAL_STATE_CODE(); + + child = bdrv_attach_child_noperm(parent_bs, child_bs, child_name, + child_class, child_role, tran, errp); + if (!child) { + ret = -EINVAL; goto out; } @@ -2993,12 +3179,10 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, out: tran_finalize(tran, ret); - /* child is unset on failure by bdrv_attach_child_common_abort() */ - assert((ret < 0) == !child); bdrv_unref(child_bs); - return child; + return ret < 0 ? NULL : child; } /* Callers must ensure that child->frozen is false. */ @@ -3006,6 +3190,8 @@ void bdrv_root_unref_child(BdrvChild *child) { BlockDriverState *child_bs; + GLOBAL_STATE_CODE(); + child_bs = child->bs; bdrv_detach_child(child); bdrv_unref(child_bs); @@ -3080,6 +3266,7 @@ static void bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child, /* Callers must ensure that child->frozen is false. */ void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child) { + GLOBAL_STATE_CODE(); if (child == NULL) { return; } @@ -3092,6 +3279,7 @@ void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child) static void bdrv_parent_cb_change_media(BlockDriverState *bs, bool load) { BdrvChild *c; + GLOBAL_STATE_CODE(); QLIST_FOREACH(c, &bs->parents, next_parent) { if (c->klass->change_media) { c->klass->change_media(c, load); @@ -3136,12 +3324,13 @@ static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, bool is_backing, Transaction *tran, Error **errp) { - int ret = 0; bool update_inherits_from = bdrv_inherits_from_recursive(child_bs, parent_bs); BdrvChild *child = is_backing ? parent_bs->backing : parent_bs->file; BdrvChildRole role; + GLOBAL_STATE_CODE(); + if (!parent_bs->drv) { /* * Node without drv is an object without a class :/. TODO: finally fix @@ -3185,21 +3374,19 @@ static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, if (child) { bdrv_unset_inherits_from(parent_bs, child, tran); - bdrv_remove_file_or_backing_child(parent_bs, child, tran); + bdrv_remove_child(child, tran); } if (!child_bs) { goto out; } - ret = bdrv_attach_child_noperm(parent_bs, child_bs, - is_backing ? "backing" : "file", - &child_of_bds, role, - is_backing ? &parent_bs->backing : - &parent_bs->file, - tran, errp); - if (ret < 0) { - return ret; + child = bdrv_attach_child_noperm(parent_bs, child_bs, + is_backing ? "backing" : "file", + &child_of_bds, role, + tran, errp); + if (!child) { + return -EINVAL; } @@ -3221,6 +3408,7 @@ static int bdrv_set_backing_noperm(BlockDriverState *bs, BlockDriverState *backing_hd, Transaction *tran, Error **errp) { + GLOBAL_STATE_CODE(); return bdrv_set_file_or_backing_noperm(bs, backing_hd, true, tran, errp); } @@ -3230,6 +3418,9 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, int ret; Transaction *tran = tran_new(); + GLOBAL_STATE_CODE(); + bdrv_drained_begin(bs); + ret = bdrv_set_backing_noperm(bs, backing_hd, tran, errp); if (ret < 0) { goto out; @@ -3239,6 +3430,8 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, out: tran_finalize(tran, ret); + bdrv_drained_end(bs); + return ret; } @@ -3265,6 +3458,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, QDict *tmp_parent_options = NULL; Error *local_err = NULL; + GLOBAL_STATE_CODE(); + if (bs->backing != NULL) { goto free_exit; } @@ -3424,6 +3619,8 @@ BdrvChild *bdrv_open_child(const char *filename, { BlockDriverState *bs; + GLOBAL_STATE_CODE(); + bs = bdrv_open_child_bs(filename, options, bdref_key, parent, child_class, child_role, allow_none, errp); if (bs == NULL) { @@ -3434,6 +3631,29 @@ BdrvChild *bdrv_open_child(const char *filename, errp); } +/* + * Wrapper on bdrv_open_child() for most popular case: open primary child of bs. + */ +int bdrv_open_file_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, Error **errp) +{ + BdrvChildRole role; + + /* commit_top and mirror_top don't use this function */ + assert(!parent->drv->filtered_child_is_backing); + role = parent->drv->is_filter ? + (BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY) : BDRV_CHILD_IMAGE; + + if (!bdrv_open_child(filename, options, bdref_key, parent, + &child_of_bds, role, false, errp)) + { + return -EINVAL; + } + + return 0; +} + /* * TODO Future callers may need to specify parent/child_class in order for * option inheritance to work. Existing callers use it for the root node. @@ -3446,6 +3666,8 @@ BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp) const char *reference = NULL; Visitor *v = NULL; + GLOBAL_STATE_CODE(); + if (ref->type == QTYPE_QSTRING) { reference = ref->u.reference; } else { @@ -3481,13 +3703,14 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, QDict *snapshot_options, Error **errp) { - /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ - char *tmp_filename = g_malloc0(PATH_MAX + 1); + g_autofree char *tmp_filename = NULL; int64_t total_size; QemuOpts *opts = NULL; BlockDriverState *bs_snapshot = NULL; int ret; + GLOBAL_STATE_CODE(); + /* if snapshot, we create a temporary backing file and open it instead of opening 'filename' directly */ @@ -3499,9 +3722,8 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, } /* Create the temporary image */ - ret = get_tmp_filename(tmp_filename, PATH_MAX + 1); - if (ret < 0) { - error_setg_errno(errp, -ret, "Could not get temporary filename"); + tmp_filename = create_tmp_file(errp); + if (!tmp_filename) { goto out; } @@ -3535,7 +3757,6 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, out: qobject_unref(snapshot_options); - g_free(tmp_filename); return bs_snapshot; } @@ -3575,6 +3796,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, assert(!child_class || !flags); assert(!child_class == !parent); + GLOBAL_STATE_CODE(); if (reference) { bool options_non_empty = options ? qdict_size(options) : false; @@ -3843,6 +4065,8 @@ close_and_fail: BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp) { + GLOBAL_STATE_CODE(); + return bdrv_open_inherit(filename, reference, options, flags, NULL, NULL, 0, errp); } @@ -3959,6 +4183,7 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, * important to avoid graph changes between the recursive queuing here and * bdrv_reopen_multiple(). */ assert(bs->quiesce_counter > 0); + GLOBAL_STATE_CODE(); if (bs_queue == NULL) { bs_queue = g_new0(BlockReopenQueue, 1); @@ -4097,12 +4322,15 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, QDict *options, bool keep_old_opts) { + GLOBAL_STATE_CODE(); + return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false, NULL, 0, keep_old_opts); } void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) { + GLOBAL_STATE_CODE(); if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { @@ -4144,6 +4372,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) assert(qemu_get_current_aio_context() == qemu_get_aio_context()); assert(bs_queue != NULL); + GLOBAL_STATE_CODE(); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { ctx = bdrv_get_aio_context(bs_entry->state.bs); @@ -4250,6 +4479,8 @@ int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, BlockReopenQueue *queue; int ret; + GLOBAL_STATE_CODE(); + bdrv_subtree_drained_begin(bs); if (ctx != qemu_get_aio_context()) { aio_context_release(ctx); @@ -4271,6 +4502,8 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, { QDict *opts = qdict_new(); + GLOBAL_STATE_CODE(); + qdict_put_bool(opts, BDRV_OPT_READ_ONLY, read_only); return bdrv_reopen(bs, opts, true, errp); @@ -4305,6 +4538,8 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, QObject *value; const char *str; + GLOBAL_STATE_CODE(); + value = qdict_get(reopen_state->options, child_name); if (value == NULL) { return 0; @@ -4403,6 +4638,7 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, assert(reopen_state != NULL); assert(reopen_state->bs->drv != NULL); + GLOBAL_STATE_CODE(); drv = reopen_state->bs->drv; /* This function and each driver's bdrv_reopen_prepare() remove @@ -4613,6 +4849,7 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state) bs = reopen_state->bs; drv = bs->drv; assert(drv != NULL); + GLOBAL_STATE_CODE(); /* If there are any driver level actions to take */ if (drv->bdrv_reopen_commit) { @@ -4654,6 +4891,7 @@ static void bdrv_reopen_abort(BDRVReopenState *reopen_state) assert(reopen_state != NULL); drv = reopen_state->bs->drv; assert(drv != NULL); + GLOBAL_STATE_CODE(); if (drv->bdrv_reopen_abort) { drv->bdrv_reopen_abort(reopen_state); @@ -4666,6 +4904,7 @@ static void bdrv_close(BlockDriverState *bs) BdrvAioNotifier *ban, *ban_next; BdrvChild *child, *next; + GLOBAL_STATE_CODE(); assert(!bs->refcnt); bdrv_drained_begin(bs); /* complete I/O */ @@ -4684,8 +4923,8 @@ static void bdrv_close(BlockDriverState *bs) bdrv_unref_child(bs, child); } - bs->backing = NULL; - bs->file = NULL; + assert(!bs->backing); + assert(!bs->file); g_free(bs->opaque); bs->opaque = NULL; qatomic_set(&bs->copy_on_read, 0); @@ -4700,6 +4939,8 @@ static void bdrv_close(BlockDriverState *bs) bs->explicit_options = NULL; qobject_unref(bs->full_open_options); bs->full_open_options = NULL; + g_free(bs->block_status_cache); + bs->block_status_cache = NULL; bdrv_release_named_dirty_bitmaps(bs); assert(QLIST_EMPTY(&bs->dirty_bitmaps)); @@ -4722,6 +4963,7 @@ static void bdrv_close(BlockDriverState *bs) void bdrv_close_all(void) { + GLOBAL_STATE_CODE(); assert(job_next(NULL) == NULL); /* Drop references from requests still in flight, such as canceled block @@ -4813,56 +5055,19 @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to) return ret; } -typedef struct BdrvRemoveFilterOrCowChild { - BdrvChild *child; - bool is_backing; -} BdrvRemoveFilterOrCowChild; - -static void bdrv_remove_filter_or_cow_child_abort(void *opaque) +static void bdrv_remove_child_commit(void *opaque) { - BdrvRemoveFilterOrCowChild *s = opaque; - BlockDriverState *parent_bs = s->child->opaque; - - QLIST_INSERT_HEAD(&parent_bs->children, s->child, next); - if (s->is_backing) { - parent_bs->backing = s->child; - } else { - parent_bs->file = s->child; - } - - /* - * We don't have to restore child->bs here to undo bdrv_replace_child_tran() - * because that function is transactionable and it registered own completion - * entries in @tran, so .abort() for bdrv_replace_child_safe() will be - * called automatically. - */ + GLOBAL_STATE_CODE(); + bdrv_child_free(opaque); } -static void bdrv_remove_filter_or_cow_child_commit(void *opaque) -{ - BdrvRemoveFilterOrCowChild *s = opaque; - - bdrv_child_free(s->child); -} - -static TransactionActionDrv bdrv_remove_filter_or_cow_child_drv = { - .abort = bdrv_remove_filter_or_cow_child_abort, - .commit = bdrv_remove_filter_or_cow_child_commit, - .clean = g_free, +static TransactionActionDrv bdrv_remove_child_drv = { + .commit = bdrv_remove_child_commit, }; -/* - * A function to remove backing or file child of @bs. - * Function doesn't update permissions, caller is responsible for this. - */ -static void bdrv_remove_file_or_backing_child(BlockDriverState *bs, - BdrvChild *child, - Transaction *tran) +/* Function doesn't update permissions, caller is responsible for this. */ +static void bdrv_remove_child(BdrvChild *child, Transaction *tran) { - BdrvRemoveFilterOrCowChild *s; - - assert(child == bs->backing || child == bs->file); - if (!child) { return; } @@ -4871,19 +5076,7 @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs, bdrv_replace_child_tran(child, NULL, tran); } - s = g_new(BdrvRemoveFilterOrCowChild, 1); - *s = (BdrvRemoveFilterOrCowChild) { - .child = child, - .is_backing = (child == bs->backing), - }; - tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s); - - QLIST_SAFE_REMOVE(child, next); - if (s->is_backing) { - bs->backing = NULL; - } else { - bs->file = NULL; - } + tran_add(tran, &bdrv_remove_child_drv, child); } /* @@ -4894,7 +5087,7 @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs, static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, Transaction *tran) { - bdrv_remove_file_or_backing_child(bs, bdrv_filter_or_cow_child(bs), tran); + bdrv_remove_child(bdrv_filter_or_cow_child(bs), tran); } static int bdrv_replace_node_noperm(BlockDriverState *from, @@ -4904,6 +5097,8 @@ static int bdrv_replace_node_noperm(BlockDriverState *from, { BdrvChild *c, *next; + GLOBAL_STATE_CODE(); + QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { assert(c->bs == from); if (!should_update_child(c, to)) { @@ -4946,6 +5141,8 @@ static int bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to_cow_parent = NULL; int ret; + GLOBAL_STATE_CODE(); + if (detach_subchain) { assert(bdrv_chain_contains(from, to)); assert(from != to); @@ -5004,11 +5201,15 @@ out: int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, Error **errp) { + GLOBAL_STATE_CODE(); + return bdrv_replace_node_common(from, to, true, false, errp); } int bdrv_drop_filter(BlockDriverState *bs, Error **errp) { + GLOBAL_STATE_CODE(); + return bdrv_replace_node_common(bs, bdrv_filter_or_cow_bs(bs), true, true, errp); } @@ -5029,14 +5230,18 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, Error **errp) { int ret; + BdrvChild *child; Transaction *tran = tran_new(); + GLOBAL_STATE_CODE(); + assert(!bs_new->backing); - ret = bdrv_attach_child_noperm(bs_new, bs_top, "backing", - &child_of_bds, bdrv_backing_role(bs_new), - &bs_new->backing, tran, errp); - if (ret < 0) { + child = bdrv_attach_child_noperm(bs_new, bs_top, "backing", + &child_of_bds, bdrv_backing_role(bs_new), + tran, errp); + if (!child) { + ret = -EINVAL; goto out; } @@ -5054,10 +5259,44 @@ out: return ret; } +/* Not for empty child */ +int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, + Error **errp) +{ + int ret; + Transaction *tran = tran_new(); + g_autoptr(GHashTable) found = NULL; + g_autoptr(GSList) refresh_list = NULL; + BlockDriverState *old_bs = child->bs; + + GLOBAL_STATE_CODE(); + + bdrv_ref(old_bs); + bdrv_drained_begin(old_bs); + bdrv_drained_begin(new_bs); + + bdrv_replace_child_tran(child, new_bs, tran); + + found = g_hash_table_new(NULL, NULL); + refresh_list = bdrv_topological_dfs(refresh_list, found, old_bs); + refresh_list = bdrv_topological_dfs(refresh_list, found, new_bs); + + ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); + + tran_finalize(tran, ret); + + bdrv_drained_end(old_bs); + bdrv_drained_end(new_bs); + bdrv_unref(old_bs); + + return ret; +} + static void bdrv_delete(BlockDriverState *bs) { assert(bdrv_op_blocker_is_empty(bs)); assert(!bs->refcnt); + GLOBAL_STATE_CODE(); /* remove from list, if necessary */ if (bs->node_name[0] != '\0') { @@ -5070,29 +5309,63 @@ static void bdrv_delete(BlockDriverState *bs) g_free(bs); } -BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, + +/* + * Replace @bs by newly created block node. + * + * @options is a QDict of options to pass to the block drivers, or NULL for an + * empty set of options. The reference to the QDict belongs to the block layer + * after the call (even on failure), so if the caller intends to reuse the + * dictionary, it needs to use qobject_ref() before calling bdrv_open. + */ +BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, int flags, Error **errp) { - BlockDriverState *new_node_bs; - Error *local_err = NULL; + ERRP_GUARD(); + int ret; + BlockDriverState *new_node_bs = NULL; + const char *drvname, *node_name; + BlockDriver *drv; - new_node_bs = bdrv_open(NULL, NULL, node_options, flags, errp); - if (new_node_bs == NULL) { + drvname = qdict_get_try_str(options, "driver"); + if (!drvname) { + error_setg(errp, "driver is not specified"); + goto fail; + } + + drv = bdrv_find_format(drvname); + if (!drv) { + error_setg(errp, "Unknown driver: '%s'", drvname); + goto fail; + } + + node_name = qdict_get_try_str(options, "node-name"); + + GLOBAL_STATE_CODE(); + + new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags, + errp); + options = NULL; /* bdrv_new_open_driver() eats options */ + if (!new_node_bs) { error_prepend(errp, "Could not create node: "); - return NULL; + goto fail; } bdrv_drained_begin(bs); - bdrv_replace_node(bs, new_node_bs, &local_err); + ret = bdrv_replace_node(bs, new_node_bs, errp); bdrv_drained_end(bs); - if (local_err) { - bdrv_unref(new_node_bs); - error_propagate(errp, local_err); - return NULL; + if (ret < 0) { + error_prepend(errp, "Could not replace node: "); + goto fail; } return new_node_bs; + +fail: + qobject_unref(options); + bdrv_unref(new_node_bs); + return NULL; } /* @@ -5105,6 +5378,7 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, int coroutine_fn bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { + IO_CODE(); if (bs->drv == NULL) { return -ENOMEDIUM; } @@ -5130,6 +5404,8 @@ int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, BlockDriver *drv = bs->drv; int ret; + GLOBAL_STATE_CODE(); + if (!drv) { return -ENOMEDIUM; } @@ -5171,6 +5447,9 @@ int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, BlockDriverState *bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs) { + + GLOBAL_STATE_CODE(); + bs = bdrv_skip_filters(bs); active = bdrv_skip_filters(active); @@ -5188,6 +5467,8 @@ BlockDriverState *bdrv_find_overlay(BlockDriverState *active, /* Given a BDS, searches for the base layer. */ BlockDriverState *bdrv_find_base(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); + return bdrv_find_overlay(bs, NULL); } @@ -5202,6 +5483,8 @@ bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base, BlockDriverState *i; BdrvChild *child; + GLOBAL_STATE_CODE(); + for (i = bs; i != base; i = child_bs(child)) { child = bdrv_filter_or_cow_child(i); @@ -5228,6 +5511,8 @@ int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base, BlockDriverState *i; BdrvChild *child; + GLOBAL_STATE_CODE(); + if (bdrv_is_backing_chain_frozen(bs, base, errp)) { return -EPERM; } @@ -5262,6 +5547,8 @@ void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base) BlockDriverState *i; BdrvChild *child; + GLOBAL_STATE_CODE(); + for (i = bs; i != base; i = child_bs(child)) { child = bdrv_filter_or_cow_child(i); if (child) { @@ -5311,6 +5598,8 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, g_autoptr(GSList) updated_children = NULL; GSList *p; + GLOBAL_STATE_CODE(); + bdrv_ref(top); bdrv_subtree_drained_begin(top); @@ -5332,8 +5621,6 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, update_inherits_from = bdrv_inherits_from_recursive(base, explicit_top); /* success - we can delete the intermediate states, and link top->base */ - /* TODO Check graph modification op blockers (BLK_PERM_GRAPH_MOD) once - * we've figured out how they should work. */ if (!backing_file_str) { bdrv_refresh_filename(base); backing_file_str = base->filename; @@ -5424,6 +5711,8 @@ static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs) int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) { BlockDriver *drv = bs->drv; + IO_CODE(); + if (!drv) { return -ENOMEDIUM; } @@ -5473,6 +5762,7 @@ int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, BlockDriverState *in_bs, Error **errp) { + IO_CODE(); if (!drv->bdrv_measure) { error_setg(errp, "Block driver '%s' does not support size measurement", drv->format_name); @@ -5488,6 +5778,7 @@ BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, int64_t bdrv_nb_sectors(BlockDriverState *bs) { BlockDriver *drv = bs->drv; + IO_CODE(); if (!drv) return -ENOMEDIUM; @@ -5508,6 +5799,7 @@ int64_t bdrv_nb_sectors(BlockDriverState *bs) int64_t bdrv_getlength(BlockDriverState *bs) { int64_t ret = bdrv_nb_sectors(bs); + IO_CODE(); if (ret < 0) { return ret; @@ -5522,12 +5814,14 @@ int64_t bdrv_getlength(BlockDriverState *bs) void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) { int64_t nb_sectors = bdrv_nb_sectors(bs); + IO_CODE(); *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; } bool bdrv_is_sg(BlockDriverState *bs) { + IO_CODE(); return bs->sg; } @@ -5537,6 +5831,7 @@ bool bdrv_is_sg(BlockDriverState *bs) bool bdrv_supports_compressed_writes(BlockDriverState *bs) { BlockDriverState *filtered; + IO_CODE(); if (!bs->drv || !block_driver_can_compress(bs->drv)) { return false; @@ -5556,6 +5851,7 @@ bool bdrv_supports_compressed_writes(BlockDriverState *bs) const char *bdrv_get_format_name(BlockDriverState *bs) { + IO_CODE(); return bs->drv ? bs->drv->format_name : NULL; } @@ -5572,6 +5868,8 @@ void bdrv_iterate_format(void (*it)(void *opaque, const char *name), int i; const char **formats = NULL; + GLOBAL_STATE_CODE(); + QLIST_FOREACH(drv, &bdrv_drivers, list) { if (drv->format_name) { bool found = false; @@ -5630,6 +5928,7 @@ BlockDriverState *bdrv_find_node(const char *node_name) BlockDriverState *bs; assert(node_name); + GLOBAL_STATE_CODE(); QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { if (!strcmp(node_name, bs->node_name)) { @@ -5646,6 +5945,8 @@ BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, BlockDeviceInfoList *list; BlockDriverState *bs; + GLOBAL_STATE_CODE(); + list = NULL; QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { BlockDeviceInfo *info = bdrv_block_device_info(NULL, bs, flat, errp); @@ -5721,6 +6022,7 @@ static void xdbg_graph_add_edge(XDbgBlockGraphConstructor *gr, void *parent, { BlockPermission qapi_perm; XDbgBlockGraphEdge *edge; + GLOBAL_STATE_CODE(); edge = g_new0(XDbgBlockGraphEdge, 1); @@ -5751,6 +6053,8 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) BdrvChild *child; XDbgBlockGraphConstructor *gr = xdbg_graph_new(); + GLOBAL_STATE_CODE(); + for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { char *allocated_name = NULL; const char *name = blk_name(blk); @@ -5766,13 +6070,16 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) } } - for (job = block_job_next(NULL); job; job = block_job_next(job)) { - GSList *el; + WITH_JOB_LOCK_GUARD() { + for (job = block_job_next_locked(NULL); job; + job = block_job_next_locked(job)) { + GSList *el; - xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, - job->job.id); - for (el = job->nodes; el; el = el->next) { - xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); + xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, + job->job.id); + for (el = job->nodes; el; el = el->next) { + xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); + } } } @@ -5794,6 +6101,8 @@ BlockDriverState *bdrv_lookup_bs(const char *device, BlockBackend *blk; BlockDriverState *bs; + GLOBAL_STATE_CODE(); + if (device) { blk = blk_by_name(device); @@ -5825,6 +6134,9 @@ BlockDriverState *bdrv_lookup_bs(const char *device, * return false. If either argument is NULL, return false. */ bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) { + + GLOBAL_STATE_CODE(); + while (top && top != base) { top = bdrv_filter_or_cow_bs(top); } @@ -5834,6 +6146,7 @@ bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) BlockDriverState *bdrv_next_node(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); if (!bs) { return QTAILQ_FIRST(&graph_bdrv_states); } @@ -5842,6 +6155,7 @@ BlockDriverState *bdrv_next_node(BlockDriverState *bs) BlockDriverState *bdrv_next_all_states(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); if (!bs) { return QTAILQ_FIRST(&all_bdrv_states); } @@ -5850,6 +6164,7 @@ BlockDriverState *bdrv_next_all_states(BlockDriverState *bs) const char *bdrv_get_node_name(const BlockDriverState *bs) { + IO_CODE(); return bs->node_name; } @@ -5857,6 +6172,7 @@ const char *bdrv_get_parent_name(const BlockDriverState *bs) { BdrvChild *c; const char *name; + IO_CODE(); /* If multiple parents have a name, just pick the first one. */ QLIST_FOREACH(c, &bs->parents, next_parent) { @@ -5874,6 +6190,7 @@ const char *bdrv_get_parent_name(const BlockDriverState *bs) /* TODO check what callers really want: bs->node_name or blk_name() */ const char *bdrv_get_device_name(const BlockDriverState *bs) { + IO_CODE(); return bdrv_get_parent_name(bs) ?: ""; } @@ -5883,22 +6200,26 @@ const char *bdrv_get_device_name(const BlockDriverState *bs) * absent, then this returns an empty (non-null) string. */ const char *bdrv_get_device_or_node_name(const BlockDriverState *bs) { + IO_CODE(); return bdrv_get_parent_name(bs) ?: bs->node_name; } int bdrv_get_flags(BlockDriverState *bs) { + IO_CODE(); return bs->open_flags; } int bdrv_has_zero_init_1(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); return 1; } int bdrv_has_zero_init(BlockDriverState *bs) { BlockDriverState *filtered; + GLOBAL_STATE_CODE(); if (!bs->drv) { return 0; @@ -5924,6 +6245,7 @@ int bdrv_has_zero_init(BlockDriverState *bs) bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) { + IO_CODE(); if (!(bs->open_flags & BDRV_O_UNMAP)) { return false; } @@ -5934,6 +6256,7 @@ bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) void bdrv_get_backing_filename(BlockDriverState *bs, char *filename, int filename_size) { + IO_CODE(); pstrcpy(filename, filename_size, bs->backing_file); } @@ -5941,6 +6264,7 @@ int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { int ret; BlockDriver *drv = bs->drv; + IO_CODE(); /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ if (!drv) { return -ENOMEDIUM; @@ -5969,6 +6293,7 @@ ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, Error **errp) { BlockDriver *drv = bs->drv; + IO_CODE(); if (drv && drv->bdrv_get_specific_info) { return drv->bdrv_get_specific_info(bs, errp); } @@ -5978,6 +6303,7 @@ ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs) { BlockDriver *drv = bs->drv; + IO_CODE(); if (!drv || !drv->bdrv_get_specific_stats) { return NULL; } @@ -5986,6 +6312,7 @@ BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs) void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event) { + IO_CODE(); if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { return; } @@ -5995,6 +6322,7 @@ void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event) static BlockDriverState *bdrv_find_debug_node(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { bs = bdrv_primary_bs(bs); } @@ -6010,6 +6338,7 @@ static BlockDriverState *bdrv_find_debug_node(BlockDriverState *bs) int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, const char *tag) { + GLOBAL_STATE_CODE(); bs = bdrv_find_debug_node(bs); if (bs) { return bs->drv->bdrv_debug_breakpoint(bs, event, tag); @@ -6020,6 +6349,7 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) { + GLOBAL_STATE_CODE(); bs = bdrv_find_debug_node(bs); if (bs) { return bs->drv->bdrv_debug_remove_breakpoint(bs, tag); @@ -6030,6 +6360,7 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) int bdrv_debug_resume(BlockDriverState *bs, const char *tag) { + GLOBAL_STATE_CODE(); while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { bs = bdrv_primary_bs(bs); } @@ -6043,6 +6374,7 @@ int bdrv_debug_resume(BlockDriverState *bs, const char *tag) bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) { + GLOBAL_STATE_CODE(); while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { bs = bdrv_primary_bs(bs); } @@ -6070,6 +6402,8 @@ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, BlockDriverState *retval = NULL; BlockDriverState *bs_below; + GLOBAL_STATE_CODE(); + if (!bs || !bs->drv || !backing_file) { return NULL; } @@ -6180,19 +6514,21 @@ void bdrv_init_with_whitelist(void) bdrv_init(); } -int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) +int bdrv_activate(BlockDriverState *bs, Error **errp) { BdrvChild *child, *parent; Error *local_err = NULL; int ret; BdrvDirtyBitmap *bm; + GLOBAL_STATE_CODE(); + if (!bs->drv) { return -ENOMEDIUM; } QLIST_FOREACH(child, &bs->children, next) { - bdrv_co_invalidate_cache(child->bs, &local_err); + bdrv_activate(child->bs, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; @@ -6205,7 +6541,7 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) * Note that the required permissions of inactive images are always a * subset of the permissions required after activating the image. This * allows us to just get the permissions upfront without restricting - * drv->bdrv_invalidate_cache(). + * bdrv_co_invalidate_cache(). * * It also means that in error cases, we don't have to try and revert to * the old permissions (which is an operation that could fail, too). We can @@ -6220,13 +6556,10 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) return ret; } - if (bs->drv->bdrv_co_invalidate_cache) { - bs->drv->bdrv_co_invalidate_cache(bs, &local_err); - if (local_err) { - bs->open_flags |= BDRV_O_INACTIVE; - error_propagate(errp, local_err); - return -EINVAL; - } + ret = bdrv_invalidate_cache(bs, errp); + if (ret < 0) { + bs->open_flags |= BDRV_O_INACTIVE; + return ret; } FOR_EACH_DIRTY_BITMAP(bs, bm) { @@ -6255,17 +6588,37 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) return 0; } -void bdrv_invalidate_cache_all(Error **errp) +int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) +{ + Error *local_err = NULL; + IO_CODE(); + + assert(!(bs->open_flags & BDRV_O_INACTIVE)); + + if (bs->drv->bdrv_co_invalidate_cache) { + bs->drv->bdrv_co_invalidate_cache(bs, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -EINVAL; + } + } + + return 0; +} + +void bdrv_activate_all(Error **errp) { BlockDriverState *bs; BdrvNextIterator it; + GLOBAL_STATE_CODE(); + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { AioContext *aio_context = bdrv_get_aio_context(bs); int ret; aio_context_acquire(aio_context); - ret = bdrv_invalidate_cache(bs, errp); + ret = bdrv_activate(bs, errp); aio_context_release(aio_context); if (ret < 0) { bdrv_next_cleanup(&it); @@ -6277,6 +6630,7 @@ void bdrv_invalidate_cache_all(Error **errp) static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active) { BdrvChild *parent; + GLOBAL_STATE_CODE(); QLIST_FOREACH(parent, &bs->parents, next_parent) { if (parent->klass->parent_is_bds) { @@ -6294,6 +6648,9 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs) { BdrvChild *child, *parent; int ret; + uint64_t cumulative_perms, cumulative_shared_perms; + + GLOBAL_STATE_CODE(); if (!bs->drv) { return -ENOMEDIUM; @@ -6324,6 +6681,13 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs) } } + bdrv_get_cumulative_perm(bs, &cumulative_perms, + &cumulative_shared_perms); + if (cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { + /* Our inactive parents still need write access. Inactivation failed. */ + return -EPERM; + } + bs->open_flags |= BDRV_O_INACTIVE; /* @@ -6351,6 +6715,8 @@ int bdrv_inactivate_all(void) int ret = 0; GSList *aio_ctxs = NULL, *ctx; + GLOBAL_STATE_CODE(); + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { AioContext *aio_context = bdrv_get_aio_context(bs); @@ -6394,6 +6760,7 @@ bool bdrv_is_inserted(BlockDriverState *bs) { BlockDriver *drv = bs->drv; BdrvChild *child; + IO_CODE(); if (!drv) { return false; @@ -6415,6 +6782,7 @@ bool bdrv_is_inserted(BlockDriverState *bs) void bdrv_eject(BlockDriverState *bs, bool eject_flag) { BlockDriver *drv = bs->drv; + IO_CODE(); if (drv && drv->bdrv_eject) { drv->bdrv_eject(bs, eject_flag); @@ -6428,7 +6796,7 @@ void bdrv_eject(BlockDriverState *bs, bool eject_flag) void bdrv_lock_medium(BlockDriverState *bs, bool locked) { BlockDriver *drv = bs->drv; - + IO_CODE(); trace_bdrv_lock_medium(bs, locked); if (drv && drv->bdrv_lock_medium) { @@ -6439,6 +6807,7 @@ void bdrv_lock_medium(BlockDriverState *bs, bool locked) /* Get a reference to bs */ void bdrv_ref(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); bs->refcnt++; } @@ -6447,6 +6816,7 @@ void bdrv_ref(BlockDriverState *bs) * deleted. */ void bdrv_unref(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); if (!bs) { return; } @@ -6464,6 +6834,7 @@ struct BdrvOpBlocker { bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) { BdrvOpBlocker *blocker; + GLOBAL_STATE_CODE(); assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); if (!QLIST_EMPTY(&bs->op_blockers[op])) { blocker = QLIST_FIRST(&bs->op_blockers[op]); @@ -6478,6 +6849,7 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason) { BdrvOpBlocker *blocker; + GLOBAL_STATE_CODE(); assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); blocker = g_new0(BdrvOpBlocker, 1); @@ -6488,6 +6860,7 @@ void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason) void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason) { BdrvOpBlocker *blocker, *next; + GLOBAL_STATE_CODE(); assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) { if (blocker->reason == reason) { @@ -6500,6 +6873,7 @@ void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason) void bdrv_op_block_all(BlockDriverState *bs, Error *reason) { int i; + GLOBAL_STATE_CODE(); for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { bdrv_op_block(bs, i, reason); } @@ -6508,6 +6882,7 @@ void bdrv_op_block_all(BlockDriverState *bs, Error *reason) void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason) { int i; + GLOBAL_STATE_CODE(); for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { bdrv_op_unblock(bs, i, reason); } @@ -6516,7 +6891,7 @@ void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason) bool bdrv_op_blocker_is_empty(BlockDriverState *bs) { int i; - + GLOBAL_STATE_CODE(); for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { if (!QLIST_EMPTY(&bs->op_blockers[i])) { return false; @@ -6538,6 +6913,8 @@ void bdrv_img_create(const char *filename, const char *fmt, Error *local_err = NULL; int ret = 0; + GLOBAL_STATE_CODE(); + /* Find driver and parse its options */ drv = bdrv_find_format(fmt); if (!drv) { @@ -6715,6 +7092,7 @@ out: AioContext *bdrv_get_aio_context(BlockDriverState *bs) { + IO_CODE(); return bs ? bs->aio_context : qemu_get_aio_context(); } @@ -6723,6 +7101,7 @@ AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs) Coroutine *self = qemu_coroutine_self(); AioContext *old_ctx = qemu_coroutine_get_aio_context(self); AioContext *new_ctx; + IO_CODE(); /* * Increase bs->in_flight to ensure that this operation is completed before @@ -6737,6 +7116,7 @@ AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs) void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx) { + IO_CODE(); aio_co_reschedule_self(old_ctx); bdrv_dec_in_flight(bs); } @@ -6770,11 +7150,13 @@ void coroutine_fn bdrv_co_unlock(BlockDriverState *bs) void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co) { + IO_CODE(); aio_co_enter(bdrv_get_aio_context(bs), co); } static void bdrv_do_remove_aio_context_notifier(BdrvAioNotifier *ban) { + GLOBAL_STATE_CODE(); QLIST_REMOVE(ban, list); g_free(ban); } @@ -6784,6 +7166,7 @@ static void bdrv_detach_aio_context(BlockDriverState *bs) BdrvAioNotifier *baf, *baf_tmp; assert(!bs->walking_aio_notifiers); + GLOBAL_STATE_CODE(); bs->walking_aio_notifiers = true; QLIST_FOREACH_SAFE(baf, &bs->aio_notifiers, list, baf_tmp) { if (baf->deleted) { @@ -6804,6 +7187,7 @@ static void bdrv_detach_aio_context(BlockDriverState *bs) if (bs->quiesce_counter) { aio_enable_external(bs->aio_context); } + assert_bdrv_graph_writable(bs); bs->aio_context = NULL; } @@ -6811,11 +7195,13 @@ static void bdrv_attach_aio_context(BlockDriverState *bs, AioContext *new_context) { BdrvAioNotifier *ban, *ban_tmp; + GLOBAL_STATE_CODE(); if (bs->quiesce_counter) { aio_disable_external(new_context); } + assert_bdrv_graph_writable(bs); bs->aio_context = new_context; if (bs->drv && bs->drv->bdrv_attach_aio_context) { @@ -6834,6 +7220,91 @@ static void bdrv_attach_aio_context(BlockDriverState *bs, bs->walking_aio_notifiers = false; } +typedef struct BdrvStateSetAioContext { + AioContext *new_ctx; + BlockDriverState *bs; +} BdrvStateSetAioContext; + +static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, + Transaction *tran, + Error **errp) +{ + GLOBAL_STATE_CODE(); + if (g_hash_table_contains(visited, c)) { + return true; + } + g_hash_table_add(visited, c); + + /* + * A BdrvChildClass that doesn't handle AioContext changes cannot + * tolerate any AioContext changes + */ + if (!c->klass->change_aio_ctx) { + char *user = bdrv_child_user_desc(c); + error_setg(errp, "Changing iothreads is not supported by %s", user); + g_free(user); + return false; + } + if (!c->klass->change_aio_ctx(c, ctx, visited, tran, errp)) { + assert(!errp || *errp); + return false; + } + return true; +} + +bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) +{ + GLOBAL_STATE_CODE(); + if (g_hash_table_contains(visited, c)) { + return true; + } + g_hash_table_add(visited, c); + return bdrv_change_aio_context(c->bs, ctx, visited, tran, errp); +} + +static void bdrv_set_aio_context_clean(void *opaque) +{ + BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; + BlockDriverState *bs = (BlockDriverState *) state->bs; + + /* Paired with bdrv_drained_begin in bdrv_change_aio_context() */ + bdrv_drained_end(bs); + + g_free(state); +} + +static void bdrv_set_aio_context_commit(void *opaque) +{ + BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; + BlockDriverState *bs = (BlockDriverState *) state->bs; + AioContext *new_context = state->new_ctx; + AioContext *old_context = bdrv_get_aio_context(bs); + assert_bdrv_graph_writable(bs); + + /* + * Take the old AioContex when detaching it from bs. + * At this point, new_context lock is already acquired, and we are now + * also taking old_context. This is safe as long as bdrv_detach_aio_context + * does not call AIO_POLL_WHILE(). + */ + if (old_context != qemu_get_aio_context()) { + aio_context_acquire(old_context); + } + bdrv_detach_aio_context(bs); + if (old_context != qemu_get_aio_context()) { + aio_context_release(old_context); + } + bdrv_attach_aio_context(bs, new_context); +} + +static TransactionActionDrv set_aio_context = { + .commit = bdrv_set_aio_context_commit, + .clean = bdrv_set_aio_context_clean, +}; + /* * Changes the AioContext used for fd handlers, timers, and BHs by this * BlockDriverState and all its children and parents. @@ -6844,175 +7315,129 @@ static void bdrv_attach_aio_context(BlockDriverState *bs, * must not own the AioContext lock for new_context (unless new_context is the * same as the current context of bs). * - * @ignore will accumulate all visited BdrvChild object. The caller is + * @visited will accumulate all visited BdrvChild objects. The caller is * responsible for freeing the list afterwards. */ -void bdrv_set_aio_context_ignore(BlockDriverState *bs, - AioContext *new_context, GSList **ignore) -{ - AioContext *old_context = bdrv_get_aio_context(bs); - GSList *children_to_process = NULL; - GSList *parents_to_process = NULL; - GSList *entry; - BdrvChild *child, *parent; - - g_assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - - if (old_context == new_context) { - return; - } - - bdrv_drained_begin(bs); - - QLIST_FOREACH(child, &bs->children, next) { - if (g_slist_find(*ignore, child)) { - continue; - } - *ignore = g_slist_prepend(*ignore, child); - children_to_process = g_slist_prepend(children_to_process, child); - } - - QLIST_FOREACH(parent, &bs->parents, next_parent) { - if (g_slist_find(*ignore, parent)) { - continue; - } - *ignore = g_slist_prepend(*ignore, parent); - parents_to_process = g_slist_prepend(parents_to_process, parent); - } - - for (entry = children_to_process; - entry != NULL; - entry = g_slist_next(entry)) { - child = entry->data; - bdrv_set_aio_context_ignore(child->bs, new_context, ignore); - } - g_slist_free(children_to_process); - - for (entry = parents_to_process; - entry != NULL; - entry = g_slist_next(entry)) { - parent = entry->data; - assert(parent->klass->set_aio_ctx); - parent->klass->set_aio_ctx(parent, new_context, ignore); - } - g_slist_free(parents_to_process); - - bdrv_detach_aio_context(bs); - - /* Acquire the new context, if necessary */ - if (qemu_get_aio_context() != new_context) { - aio_context_acquire(new_context); - } - - bdrv_attach_aio_context(bs, new_context); - - /* - * If this function was recursively called from - * bdrv_set_aio_context_ignore(), there may be nodes in the - * subtree that have not yet been moved to the new AioContext. - * Release the old one so bdrv_drained_end() can poll them. - */ - if (qemu_get_aio_context() != old_context) { - aio_context_release(old_context); - } - - bdrv_drained_end(bs); - - if (qemu_get_aio_context() != old_context) { - aio_context_acquire(old_context); - } - if (qemu_get_aio_context() != new_context) { - aio_context_release(new_context); - } -} - -static bool bdrv_parent_can_set_aio_context(BdrvChild *c, AioContext *ctx, - GSList **ignore, Error **errp) -{ - if (g_slist_find(*ignore, c)) { - return true; - } - *ignore = g_slist_prepend(*ignore, c); - - /* - * A BdrvChildClass that doesn't handle AioContext changes cannot - * tolerate any AioContext changes - */ - if (!c->klass->can_set_aio_ctx) { - char *user = bdrv_child_user_desc(c); - error_setg(errp, "Changing iothreads is not supported by %s", user); - g_free(user); - return false; - } - if (!c->klass->can_set_aio_ctx(c, ctx, ignore, errp)) { - assert(!errp || *errp); - return false; - } - return true; -} - -bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx, - GSList **ignore, Error **errp) -{ - if (g_slist_find(*ignore, c)) { - return true; - } - *ignore = g_slist_prepend(*ignore, c); - return bdrv_can_set_aio_context(c->bs, ctx, ignore, errp); -} - -/* @ignore will accumulate all visited BdrvChild object. The caller is - * responsible for freeing the list afterwards. */ -bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx, - GSList **ignore, Error **errp) +static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BdrvChild *c; + BdrvStateSetAioContext *state; + + GLOBAL_STATE_CODE(); if (bdrv_get_aio_context(bs) == ctx) { return true; } QLIST_FOREACH(c, &bs->parents, next_parent) { - if (!bdrv_parent_can_set_aio_context(c, ctx, ignore, errp)) { + if (!bdrv_parent_change_aio_context(c, ctx, visited, tran, errp)) { return false; } } + QLIST_FOREACH(c, &bs->children, next) { - if (!bdrv_child_can_set_aio_context(c, ctx, ignore, errp)) { + if (!bdrv_child_change_aio_context(c, ctx, visited, tran, errp)) { return false; } } + state = g_new(BdrvStateSetAioContext, 1); + *state = (BdrvStateSetAioContext) { + .new_ctx = ctx, + .bs = bs, + }; + + /* Paired with bdrv_drained_end in bdrv_set_aio_context_clean() */ + bdrv_drained_begin(bs); + + tran_add(tran, &set_aio_context, state); + return true; } -int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp) +/* + * Change bs's and recursively all of its parents' and children's AioContext + * to the given new context, returning an error if that isn't possible. + * + * If ignore_child is not NULL, that child (and its subgraph) will not + * be touched. + * + * This function still requires the caller to take the bs current + * AioContext lock, otherwise draining will fail since AIO_WAIT_WHILE + * assumes the lock is always held if bs is in another AioContext. + * For the same reason, it temporarily also holds the new AioContext, since + * bdrv_drained_end calls BDRV_POLL_WHILE that assumes the lock is taken too. + * Therefore the new AioContext lock must not be taken by the caller. + */ +int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp) { - GSList *ignore; - bool ret; + Transaction *tran; + GHashTable *visited; + int ret; + AioContext *old_context = bdrv_get_aio_context(bs); + GLOBAL_STATE_CODE(); - ignore = ignore_child ? g_slist_prepend(NULL, ignore_child) : NULL; - ret = bdrv_can_set_aio_context(bs, ctx, &ignore, errp); - g_slist_free(ignore); + /* + * Recursion phase: go through all nodes of the graph. + * Take care of checking that all nodes support changing AioContext + * and drain them, builing a linear list of callbacks to run if everything + * is successful (the transaction itself). + */ + tran = tran_new(); + visited = g_hash_table_new(NULL, NULL); + if (ignore_child) { + g_hash_table_add(visited, ignore_child); + } + ret = bdrv_change_aio_context(bs, ctx, visited, tran, errp); + g_hash_table_destroy(visited); + + /* + * Linear phase: go through all callbacks collected in the transaction. + * Run all callbacks collected in the recursion to switch all nodes + * AioContext lock (transaction commit), or undo all changes done in the + * recursion (transaction abort). + */ if (!ret) { + /* Just run clean() callbacks. No AioContext changed. */ + tran_abort(tran); return -EPERM; } - ignore = ignore_child ? g_slist_prepend(NULL, ignore_child) : NULL; - bdrv_set_aio_context_ignore(bs, ctx, &ignore); - g_slist_free(ignore); + /* + * Release old AioContext, it won't be needed anymore, as all + * bdrv_drained_begin() have been called already. + */ + if (qemu_get_aio_context() != old_context) { + aio_context_release(old_context); + } + + /* + * Acquire new AioContext since bdrv_drained_end() is going to be called + * after we switched all nodes in the new AioContext, and the function + * assumes that the lock of the bs is always taken. + */ + if (qemu_get_aio_context() != ctx) { + aio_context_acquire(ctx); + } + + tran_commit(tran); + + if (qemu_get_aio_context() != ctx) { + aio_context_release(ctx); + } + + /* Re-acquire the old AioContext, since the caller takes and releases it. */ + if (qemu_get_aio_context() != old_context) { + aio_context_acquire(old_context); + } return 0; } -int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, - Error **errp) -{ - return bdrv_child_try_set_aio_context(bs, ctx, NULL, errp); -} - void bdrv_add_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *new_context, void *opaque), void (*detach_aio_context)(void *opaque), void *opaque) @@ -7023,6 +7448,7 @@ void bdrv_add_aio_context_notifier(BlockDriverState *bs, .detach_aio_context = detach_aio_context, .opaque = opaque }; + GLOBAL_STATE_CODE(); QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list); } @@ -7034,6 +7460,7 @@ void bdrv_remove_aio_context_notifier(BlockDriverState *bs, void *opaque) { BdrvAioNotifier *ban, *ban_next; + GLOBAL_STATE_CODE(); QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { if (ban->attached_aio_context == attached_aio_context && @@ -7058,6 +7485,7 @@ int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts, bool force, Error **errp) { + GLOBAL_STATE_CODE(); if (!bs->drv) { error_setg(errp, "Node is ejected"); return -ENOMEDIUM; @@ -7088,6 +7516,8 @@ bool bdrv_recurse_can_replace(BlockDriverState *bs, { BlockDriverState *filtered; + GLOBAL_STATE_CODE(); + if (!bs || !bs->drv) { return false; } @@ -7128,6 +7558,8 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, BlockDriverState *to_replace_bs = bdrv_find_node(node_name); AioContext *aio_context; + GLOBAL_STATE_CODE(); + if (!to_replace_bs) { error_setg(errp, "Failed to find node with node-name='%s'", node_name); return NULL; @@ -7255,8 +7687,9 @@ static bool append_strong_runtime_options(QDict *d, BlockDriverState *bs) /* Note: This function may return false positives; it may return true * even if opening the backing file specified by bs's image header * would result in exactly bs->backing. */ -bool bdrv_backing_overridden(BlockDriverState *bs) +static bool bdrv_backing_overridden(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); if (bs->backing) { return strcmp(bs->auto_backing_file, bs->backing->bs->filename); @@ -7289,6 +7722,8 @@ void bdrv_refresh_filename(BlockDriverState *bs) bool generate_json_filename; /* Whether our default implementation should fill exact_filename (false) or not (true) */ + GLOBAL_STATE_CODE(); + if (!drv) { return; } @@ -7411,6 +7846,8 @@ char *bdrv_dirname(BlockDriverState *bs, Error **errp) BlockDriver *drv = bs->drv; BlockDriverState *child_bs; + GLOBAL_STATE_CODE(); + if (!drv) { error_setg(errp, "Node '%s' is ejected", bs->node_name); return NULL; @@ -7442,7 +7879,7 @@ char *bdrv_dirname(BlockDriverState *bs, Error **errp) void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, Error **errp) { - + GLOBAL_STATE_CODE(); if (!parent_bs->drv || !parent_bs->drv->bdrv_add_child) { error_setg(errp, "The node %s does not support adding a child", bdrv_get_device_or_node_name(parent_bs)); @@ -7462,6 +7899,7 @@ void bdrv_del_child(BlockDriverState *parent_bs, BdrvChild *child, Error **errp) { BdrvChild *tmp; + GLOBAL_STATE_CODE(); if (!parent_bs->drv || !parent_bs->drv->bdrv_del_child) { error_setg(errp, "The node %s does not support removing a child", bdrv_get_device_or_node_name(parent_bs)); @@ -7489,6 +7927,7 @@ int bdrv_make_empty(BdrvChild *c, Error **errp) BlockDriver *drv = c->bs->drv; int ret; + GLOBAL_STATE_CODE(); assert(c->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)); if (!drv->bdrv_make_empty) { @@ -7513,6 +7952,8 @@ int bdrv_make_empty(BdrvChild *c, Error **errp) */ BdrvChild *bdrv_cow_child(BlockDriverState *bs) { + IO_CODE(); + if (!bs || !bs->drv) { return NULL; } @@ -7536,6 +7977,7 @@ BdrvChild *bdrv_cow_child(BlockDriverState *bs) BdrvChild *bdrv_filter_child(BlockDriverState *bs) { BdrvChild *c; + IO_CODE(); if (!bs || !bs->drv) { return NULL; @@ -7567,6 +8009,7 @@ BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs) { BdrvChild *cow_child = bdrv_cow_child(bs); BdrvChild *filter_child = bdrv_filter_child(bs); + IO_CODE(); /* Filter nodes cannot have COW backing files */ assert(!(cow_child && filter_child)); @@ -7587,6 +8030,7 @@ BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs) BdrvChild *bdrv_primary_child(BlockDriverState *bs) { BdrvChild *c, *found = NULL; + IO_CODE(); QLIST_FOREACH(c, &bs->children, next) { if (c->role & BDRV_CHILD_PRIMARY) { @@ -7639,6 +8083,7 @@ static BlockDriverState *bdrv_do_skip_filters(BlockDriverState *bs, */ BlockDriverState *bdrv_skip_implicit_filters(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); return bdrv_do_skip_filters(bs, true); } @@ -7648,6 +8093,7 @@ BlockDriverState *bdrv_skip_implicit_filters(BlockDriverState *bs) */ BlockDriverState *bdrv_skip_filters(BlockDriverState *bs) { + IO_CODE(); return bdrv_do_skip_filters(bs, false); } @@ -7657,5 +8103,81 @@ BlockDriverState *bdrv_skip_filters(BlockDriverState *bs) */ BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs) { + IO_CODE(); return bdrv_skip_filters(bdrv_cow_bs(bdrv_skip_filters(bs))); } + +/** + * Check whether [offset, offset + bytes) overlaps with the cached + * block-status data region. + * + * If so, and @pnum is not NULL, set *pnum to `bsc.data_end - offset`, + * which is what bdrv_bsc_is_data()'s interface needs. + * Otherwise, *pnum is not touched. + */ +static bool bdrv_bsc_range_overlaps_locked(BlockDriverState *bs, + int64_t offset, int64_t bytes, + int64_t *pnum) +{ + BdrvBlockStatusCache *bsc = qatomic_rcu_read(&bs->block_status_cache); + bool overlaps; + + overlaps = + qatomic_read(&bsc->valid) && + ranges_overlap(offset, bytes, bsc->data_start, + bsc->data_end - bsc->data_start); + + if (overlaps && pnum) { + *pnum = bsc->data_end - offset; + } + + return overlaps; +} + +/** + * See block_int.h for this function's documentation. + */ +bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum) +{ + IO_CODE(); + RCU_READ_LOCK_GUARD(); + return bdrv_bsc_range_overlaps_locked(bs, offset, 1, pnum); +} + +/** + * See block_int.h for this function's documentation. + */ +void bdrv_bsc_invalidate_range(BlockDriverState *bs, + int64_t offset, int64_t bytes) +{ + IO_CODE(); + RCU_READ_LOCK_GUARD(); + + if (bdrv_bsc_range_overlaps_locked(bs, offset, bytes, NULL)) { + qatomic_set(&bs->block_status_cache->valid, false); + } +} + +/** + * See block_int.h for this function's documentation. + */ +void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes) +{ + BdrvBlockStatusCache *new_bsc = g_new(BdrvBlockStatusCache, 1); + BdrvBlockStatusCache *old_bsc; + IO_CODE(); + + *new_bsc = (BdrvBlockStatusCache) { + .valid = true, + .data_start = offset, + .data_end = offset + bytes, + }; + + QEMU_LOCK_GUARD(&bs->bsc_modify_lock); + + old_bsc = qatomic_rcu_read(&bs->block_status_cache); + qatomic_rcu_set(&bs->block_status_cache, new_bsc); + if (old_bsc) { + g_free_rcu(old_bsc, rcu); + } +} diff --git a/block/accounting.c b/block/accounting.c index 2030851d79..2829745377 100644 --- a/block/accounting.c +++ b/block/accounting.c @@ -38,13 +38,31 @@ void block_acct_init(BlockAcctStats *stats) if (qtest_enabled()) { clock_type = QEMU_CLOCK_VIRTUAL; } + stats->account_invalid = true; + stats->account_failed = true; } -void block_acct_setup(BlockAcctStats *stats, bool account_invalid, - bool account_failed) +static bool bool_from_onoffauto(OnOffAuto val, bool def) { - stats->account_invalid = account_invalid; - stats->account_failed = account_failed; + switch (val) { + case ON_OFF_AUTO_AUTO: + return def; + case ON_OFF_AUTO_ON: + return true; + case ON_OFF_AUTO_OFF: + return false; + default: + abort(); + } +} + +void block_acct_setup(BlockAcctStats *stats, enum OnOffAuto account_invalid, + enum OnOffAuto account_failed) +{ + stats->account_invalid = bool_from_onoffauto(account_invalid, + stats->account_invalid); + stats->account_failed = bool_from_onoffauto(account_failed, + stats->account_failed); } void block_acct_cleanup(BlockAcctStats *stats) diff --git a/block/aio_task.c b/block/aio_task.c index 88989fa248..9bd17ea2c1 100644 --- a/block/aio_task.c +++ b/block/aio_task.c @@ -98,6 +98,8 @@ AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks) { AioTaskPool *pool = g_new0(AioTaskPool, 1); + assert(max_busy_tasks > 0); + pool->main_co = qemu_coroutine_self(); pool->max_busy_tasks = max_busy_tasks; diff --git a/block/amend.c b/block/amend.c index 392df9ef83..f696a006e3 100644 --- a/block/amend.c +++ b/block/amend.c @@ -53,10 +53,31 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp) return ret; } +static int blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp) +{ + if (s->bs->drv->bdrv_amend_pre_run) { + return s->bs->drv->bdrv_amend_pre_run(s->bs, errp); + } + + return 0; +} + +static void blockdev_amend_free(Job *job) +{ + BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common); + + if (s->bs->drv->bdrv_amend_clean) { + s->bs->drv->bdrv_amend_clean(s->bs); + } + + bdrv_unref(s->bs); +} + static const JobDriver blockdev_amend_job_driver = { .instance_size = sizeof(BlockdevAmendJob), .job_type = JOB_TYPE_AMEND, .run = blockdev_amend_run, + .free = blockdev_amend_free, }; void qmp_x_blockdev_amend(const char *job_id, @@ -110,8 +131,15 @@ void qmp_x_blockdev_amend(const char *job_id, return; } + bdrv_ref(bs); s->bs = bs, s->opts = QAPI_CLONE(BlockdevAmendOptions, options), s->force = has_force ? force : false; + + if (blockdev_amend_pre_run(s, errp)) { + job_early_fail(&s->common); + return; + } + job_start(&s->common); } diff --git a/block/backup-top.c b/block/backup-top.c deleted file mode 100644 index 425e3778be..0000000000 --- a/block/backup-top.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * backup-top filter driver - * - * The driver performs Copy-Before-Write (CBW) operation: it is injected above - * some node, and before each write it copies _old_ data to the target node. - * - * Copyright (c) 2018-2019 Virtuozzo International GmbH. - * - * Author: - * Sementsov-Ogievskiy Vladimir - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include "qemu/osdep.h" - -#include "sysemu/block-backend.h" -#include "qemu/cutils.h" -#include "qapi/error.h" -#include "block/block_int.h" -#include "block/qdict.h" -#include "block/block-copy.h" - -#include "block/backup-top.h" - -typedef struct BDRVBackupTopState { - BlockCopyState *bcs; - BdrvChild *target; - int64_t cluster_size; -} BDRVBackupTopState; - -static coroutine_fn int backup_top_co_preadv( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) -{ - return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); -} - -static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, BdrvRequestFlags flags) -{ - BDRVBackupTopState *s = bs->opaque; - uint64_t off, end; - - if (flags & BDRV_REQ_WRITE_UNCHANGED) { - return 0; - } - - off = QEMU_ALIGN_DOWN(offset, s->cluster_size); - end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size); - - return block_copy(s->bcs, off, end - off, true); -} - -static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) -{ - int ret = backup_top_cbw(bs, offset, bytes, 0); - if (ret < 0) { - return ret; - } - - return bdrv_co_pdiscard(bs->backing, offset, bytes); -} - -static int coroutine_fn backup_top_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) -{ - int ret = backup_top_cbw(bs, offset, bytes, flags); - if (ret < 0) { - return ret; - } - - return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); -} - -static coroutine_fn int backup_top_co_pwritev(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, - QEMUIOVector *qiov, int flags) -{ - int ret = backup_top_cbw(bs, offset, bytes, flags); - if (ret < 0) { - return ret; - } - - return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); -} - -static int coroutine_fn backup_top_co_flush(BlockDriverState *bs) -{ - if (!bs->backing) { - return 0; - } - - return bdrv_co_flush(bs->backing->bs); -} - -static void backup_top_refresh_filename(BlockDriverState *bs) -{ - if (bs->backing == NULL) { - /* - * we can be here after failed bdrv_attach_child in - * bdrv_set_backing_hd - */ - return; - } - pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), - bs->backing->bs->filename); -} - -static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c, - BdrvChildRole role, - BlockReopenQueue *reopen_queue, - uint64_t perm, uint64_t shared, - uint64_t *nperm, uint64_t *nshared) -{ - if (!(role & BDRV_CHILD_FILTERED)) { - /* - * Target child - * - * Share write to target (child_file), to not interfere - * with guest writes to its disk which may be in target backing chain. - * Can't resize during a backup block job because we check the size - * only upfront. - */ - *nshared = BLK_PERM_ALL & ~BLK_PERM_RESIZE; - *nperm = BLK_PERM_WRITE; - } else { - /* Source child */ - bdrv_default_perms(bs, c, role, reopen_queue, - perm, shared, nperm, nshared); - - if (perm & BLK_PERM_WRITE) { - *nperm = *nperm | BLK_PERM_CONSISTENT_READ; - } - *nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE); - } -} - -BlockDriver bdrv_backup_top_filter = { - .format_name = "backup-top", - .instance_size = sizeof(BDRVBackupTopState), - - .bdrv_co_preadv = backup_top_co_preadv, - .bdrv_co_pwritev = backup_top_co_pwritev, - .bdrv_co_pwrite_zeroes = backup_top_co_pwrite_zeroes, - .bdrv_co_pdiscard = backup_top_co_pdiscard, - .bdrv_co_flush = backup_top_co_flush, - - .bdrv_refresh_filename = backup_top_refresh_filename, - - .bdrv_child_perm = backup_top_child_perm, - - .is_filter = true, -}; - -BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, - BlockDriverState *target, - const char *filter_node_name, - uint64_t cluster_size, - BackupPerf *perf, - BdrvRequestFlags write_flags, - BlockCopyState **bcs, - Error **errp) -{ - ERRP_GUARD(); - int ret; - BDRVBackupTopState *state; - BlockDriverState *top; - bool appended = false; - - assert(source->total_sectors == target->total_sectors); - - top = bdrv_new_open_driver(&bdrv_backup_top_filter, filter_node_name, - BDRV_O_RDWR, errp); - if (!top) { - return NULL; - } - - state = top->opaque; - top->total_sectors = source->total_sectors; - top->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | - (BDRV_REQ_FUA & source->supported_write_flags); - top->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | - ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) & - source->supported_zero_flags); - - bdrv_ref(target); - state->target = bdrv_attach_child(top, target, "target", &child_of_bds, - BDRV_CHILD_DATA, errp); - if (!state->target) { - bdrv_unref(target); - bdrv_unref(top); - return NULL; - } - - bdrv_drained_begin(source); - - ret = bdrv_append(top, source, errp); - if (ret < 0) { - error_prepend(errp, "Cannot append backup-top filter: "); - goto fail; - } - appended = true; - - state->cluster_size = cluster_size; - state->bcs = block_copy_state_new(top->backing, state->target, - cluster_size, perf->use_copy_range, - write_flags, errp); - if (!state->bcs) { - error_prepend(errp, "Cannot create block-copy-state: "); - goto fail; - } - *bcs = state->bcs; - - bdrv_drained_end(source); - - return top; - -fail: - if (appended) { - bdrv_backup_top_drop(top); - } else { - bdrv_unref(top); - } - - bdrv_drained_end(source); - - return NULL; -} - -void bdrv_backup_top_drop(BlockDriverState *bs) -{ - BDRVBackupTopState *s = bs->opaque; - - bdrv_drop_filter(bs, &error_abort); - - block_copy_state_free(s->bcs); - - bdrv_unref(bs); -} diff --git a/block/backup.c b/block/backup.c index bd3614ce70..6a9ad97a53 100644 --- a/block/backup.c +++ b/block/backup.c @@ -27,13 +27,11 @@ #include "qemu/bitmap.h" #include "qemu/error-report.h" -#include "block/backup-top.h" - -#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) +#include "block/copy-before-write.h" typedef struct BackupBlockJob { BlockJob common; - BlockDriverState *backup_top; + BlockDriverState *cbw; BlockDriverState *source_bs; BlockDriverState *target_bs; @@ -104,7 +102,7 @@ static void backup_clean(Job *job) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); block_job_remove_all_bdrv(&s->common); - bdrv_backup_top_drop(s->backup_top); + bdrv_cbw_drop(s->cbw); } void backup_do_checkpoint(BlockJob *job, Error **errp) @@ -230,23 +228,19 @@ out: static void backup_init_bcs_bitmap(BackupBlockJob *job) { - bool ret; uint64_t estimate; BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs); if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) { - ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap, - NULL, true); - assert(ret); - } else { - if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { - /* - * We can't hog the coroutine to initialize this thoroughly. - * Set a flag and resume work when we are able to yield safely. - */ - block_copy_set_skip_unallocated(job->bcs, true); - } - bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len); + bdrv_clear_dirty_bitmap(bcs_bitmap, NULL); + bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap, NULL, + true); + } else if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { + /* + * We can't hog the coroutine to initialize this thoroughly. + * Set a flag and resume work when we are able to yield safely. + */ + block_copy_set_skip_unallocated(job->bcs, true); } estimate = bdrv_get_dirty_count(bcs_bitmap); @@ -315,7 +309,7 @@ static void coroutine_fn backup_pause(Job *job) } } -static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed) +static void backup_set_speed(BlockJob *job, int64_t speed) { BackupBlockJob *s = container_of(job, BackupBlockJob, common); @@ -331,11 +325,12 @@ static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed) } } -static void backup_cancel(Job *job, bool force) +static bool backup_cancel(Job *job, bool force) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); bdrv_cancel_in_flight(s->target_bs); + return true; } static const BlockJobDriver backup_job_driver = { @@ -354,43 +349,6 @@ static const BlockJobDriver backup_job_driver = { .set_speed = backup_set_speed, }; -static int64_t backup_calculate_cluster_size(BlockDriverState *target, - Error **errp) -{ - int ret; - BlockDriverInfo bdi; - bool target_does_cow = bdrv_backing_chain_next(target); - - /* - * If there is no backing file on the target, we cannot rely on COW if our - * backup cluster size is smaller than the target cluster size. Even for - * targets with a backing file, try to avoid COW if possible. - */ - ret = bdrv_get_info(target, &bdi); - if (ret == -ENOTSUP && !target_does_cow) { - /* Cluster size is not defined */ - warn_report("The target block device doesn't provide " - "information about the block size and it doesn't have a " - "backing file. The default block size of %u bytes is " - "used. If the actual block size of the target exceeds " - "this default, the backup may be unusable", - BACKUP_CLUSTER_SIZE_DEFAULT); - return BACKUP_CLUSTER_SIZE_DEFAULT; - } else if (ret < 0 && !target_does_cow) { - error_setg_errno(errp, -ret, - "Couldn't determine the cluster size of the target image, " - "which has no backing file"); - error_append_hint(errp, - "Aborting, since this may create an unusable destination image\n"); - return ret; - } else if (ret < 0 && target_does_cow) { - /* Not fatal; just trudge on ahead. */ - return BACKUP_CLUSTER_SIZE_DEFAULT; - } - - return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); -} - BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, BlockDriverState *target, int64_t speed, MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, @@ -407,12 +365,12 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, int64_t len, target_len; BackupBlockJob *job = NULL; int64_t cluster_size; - BdrvRequestFlags write_flags; - BlockDriverState *backup_top = NULL; + BlockDriverState *cbw = NULL; BlockCopyState *bcs = NULL; assert(bs); assert(target); + GLOBAL_STATE_CODE(); /* QMP interface protects us from these cases */ assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL); @@ -449,13 +407,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, return NULL; } - cluster_size = backup_calculate_cluster_size(target, errp); - if (cluster_size < 0) { - goto error; - } - - if (perf->max_workers < 1) { - error_setg(errp, "max-workers must be greater than zero"); + if (perf->max_workers < 1 || perf->max_workers > INT_MAX) { + error_setg(errp, "max-workers must be between 1 and %d", INT_MAX); return NULL; } @@ -465,13 +418,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, return NULL; } - if (perf->max_chunk && perf->max_chunk < cluster_size) { - error_setg(errp, "Required max-chunk (%" PRIi64 ") is less than backup " - "cluster size (%" PRIi64 ")", perf->max_chunk, cluster_size); - return NULL; - } - - if (sync_bitmap) { /* If we need to write to this bitmap, check that we can: */ if (bitmap_mode != BITMAP_SYNC_MODE_NEVER && @@ -504,39 +450,28 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, goto error; } - /* - * If source is in backing chain of target assume that target is going to be - * used for "image fleecing", i.e. it should represent a kind of snapshot of - * source at backup-start point in time. And target is going to be read by - * somebody (for example, used as NBD export) during backup job. - * - * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid - * intersection of backup writes and third party reads from target, - * otherwise reading from target we may occasionally read already updated by - * guest data. - * - * For more information see commit f8d59dfb40bb and test - * tests/qemu-iotests/222 - */ - write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) | - (compress ? BDRV_REQ_WRITE_COMPRESSED : 0), + cbw = bdrv_cbw_append(bs, target, filter_node_name, &bcs, errp); + if (!cbw) { + goto error; + } - backup_top = bdrv_backup_top_append(bs, target, filter_node_name, - cluster_size, perf, - write_flags, &bcs, errp); - if (!backup_top) { + cluster_size = block_copy_cluster_size(bcs); + + if (perf->max_chunk && perf->max_chunk < cluster_size) { + error_setg(errp, "Required max-chunk (%" PRIi64 ") is less than backup " + "cluster size (%" PRIi64 ")", perf->max_chunk, cluster_size); goto error; } /* job->len is fixed, so we can't allow resize */ - job = block_job_create(job_id, &backup_job_driver, txn, backup_top, + job = block_job_create(job_id, &backup_job_driver, txn, cbw, 0, BLK_PERM_ALL, speed, creation_flags, cb, opaque, errp); if (!job) { goto error; } - job->backup_top = backup_top; + job->cbw = cbw; job->source_bs = bs; job->target_bs = target; job->on_source_error = on_source_error; @@ -549,10 +484,11 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, job->len = len; job->perf = *perf; + block_copy_set_copy_opts(bcs, perf->use_copy_range, compress); block_copy_set_progress_meter(bcs, &job->common.job.progress); block_copy_set_speed(bcs, speed); - /* Required permissions are already taken by backup-top target */ + /* Required permissions are taken by copy-before-write filter target */ block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, &error_abort); @@ -562,8 +498,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, if (sync_bitmap) { bdrv_reclaim_dirty_bitmap(sync_bitmap, NULL); } - if (backup_top) { - bdrv_backup_top_drop(backup_top); + if (cbw) { + bdrv_cbw_drop(cbw); } return NULL; diff --git a/block/blkdebug.c b/block/blkdebug.c index 8b67554bec..4265ca125e 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -503,12 +503,9 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the image file */ - bs->file = bdrv_open_child(qemu_opt_get(opts, "x-image"), options, "image", - bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(qemu_opt_get(opts, "x-image"), options, "image", + bs, errp); + if (ret < 0) { goto out; } @@ -631,8 +628,8 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkdebug_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int err; @@ -652,8 +649,8 @@ blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkdebug_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int err; @@ -672,7 +669,7 @@ blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); } -static int blkdebug_co_flush(BlockDriverState *bs) +static int coroutine_fn blkdebug_co_flush(BlockDriverState *bs) { int err = rule_check(bs, 0, 0, BLKDEBUG_IO_TYPE_FLUSH); @@ -684,7 +681,7 @@ static int blkdebug_co_flush(BlockDriverState *bs) } static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { uint32_t align = MAX(bs->bl.request_alignment, @@ -717,7 +714,7 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { uint32_t align = bs->bl.pdiscard_alignment; int err; diff --git a/block/blkio.c b/block/blkio.c new file mode 100644 index 0000000000..5eae3adfaf --- /dev/null +++ b/block/blkio.c @@ -0,0 +1,1046 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * libblkio BlockDriver + * + * Copyright Red Hat, Inc. + * + * Author: + * Stefan Hajnoczi + */ + +#include "qemu/osdep.h" +#include +#include "block/block_int.h" +#include "exec/memory.h" +#include "exec/cpu-common.h" /* for qemu_ram_get_fd() */ +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qapi/qmp/qdict.h" +#include "qemu/module.h" +#include "exec/memory.h" /* for ram_block_discard_disable() */ + +/* + * Keep the QEMU BlockDriver names identical to the libblkio driver names. + * Using macros instead of typing out the string literals avoids typos. + */ +#define DRIVER_IO_URING "io_uring" +#define DRIVER_NVME_IO_URING "nvme-io_uring" +#define DRIVER_VIRTIO_BLK_VFIO_PCI "virtio-blk-vfio-pci" +#define DRIVER_VIRTIO_BLK_VHOST_USER "virtio-blk-vhost-user" +#define DRIVER_VIRTIO_BLK_VHOST_VDPA "virtio-blk-vhost-vdpa" + +/* + * Allocated bounce buffers are kept in a list sorted by buffer address. + */ +typedef struct BlkioBounceBuf { + QLIST_ENTRY(BlkioBounceBuf) next; + + /* The bounce buffer */ + struct iovec buf; +} BlkioBounceBuf; + +typedef struct { + /* + * libblkio is not thread-safe so this lock protects ->blkio and + * ->blkioq. + */ + QemuMutex blkio_lock; + struct blkio *blkio; + struct blkioq *blkioq; /* make this multi-queue in the future... */ + int completion_fd; + + /* + * Polling fetches the next completion into this field. + * + * No lock is necessary since only one thread calls aio_poll() and invokes + * fd and poll handlers. + */ + struct blkio_completion poll_completion; + + /* + * Protects ->bounce_pool, ->bounce_bufs, ->bounce_available. + * + * Lock ordering: ->bounce_lock before ->blkio_lock. + */ + CoMutex bounce_lock; + + /* Bounce buffer pool */ + struct blkio_mem_region bounce_pool; + + /* Sorted list of allocated bounce buffers */ + QLIST_HEAD(, BlkioBounceBuf) bounce_bufs; + + /* Queue for coroutines waiting for bounce buffer space */ + CoQueue bounce_available; + + /* The value of the "mem-region-alignment" property */ + size_t mem_region_alignment; + + /* Can we skip adding/deleting blkio_mem_regions? */ + bool needs_mem_regions; + + /* Are file descriptors necessary for blkio_mem_regions? */ + bool needs_mem_region_fd; + + /* Are madvise(MADV_DONTNEED)-style operations unavailable? */ + bool may_pin_mem_regions; +} BDRVBlkioState; + +/* Called with s->bounce_lock held */ +static int blkio_resize_bounce_pool(BDRVBlkioState *s, int64_t bytes) +{ + /* There can be no allocated bounce buffers during resize */ + assert(QLIST_EMPTY(&s->bounce_bufs)); + + /* Pad size to reduce frequency of resize calls */ + bytes += 128 * 1024; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + int ret; + + if (s->bounce_pool.addr) { + blkio_unmap_mem_region(s->blkio, &s->bounce_pool); + blkio_free_mem_region(s->blkio, &s->bounce_pool); + memset(&s->bounce_pool, 0, sizeof(s->bounce_pool)); + } + + /* Automatically freed when s->blkio is destroyed */ + ret = blkio_alloc_mem_region(s->blkio, &s->bounce_pool, bytes); + if (ret < 0) { + return ret; + } + + ret = blkio_map_mem_region(s->blkio, &s->bounce_pool); + if (ret < 0) { + blkio_free_mem_region(s->blkio, &s->bounce_pool); + memset(&s->bounce_pool, 0, sizeof(s->bounce_pool)); + return ret; + } + } + + return 0; +} + +/* Called with s->bounce_lock held */ +static bool +blkio_do_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce, + int64_t bytes) +{ + void *addr = s->bounce_pool.addr; + BlkioBounceBuf *cur = NULL; + BlkioBounceBuf *prev = NULL; + ptrdiff_t space; + + /* + * This is just a linear search over the holes between requests. An + * efficient allocator would be nice. + */ + QLIST_FOREACH(cur, &s->bounce_bufs, next) { + space = cur->buf.iov_base - addr; + if (bytes <= space) { + QLIST_INSERT_BEFORE(cur, bounce, next); + bounce->buf.iov_base = addr; + bounce->buf.iov_len = bytes; + return true; + } + + addr = cur->buf.iov_base + cur->buf.iov_len; + prev = cur; + } + + /* Is there space after the last request? */ + space = s->bounce_pool.addr + s->bounce_pool.len - addr; + if (bytes > space) { + return false; + } + if (prev) { + QLIST_INSERT_AFTER(prev, bounce, next); + } else { + QLIST_INSERT_HEAD(&s->bounce_bufs, bounce, next); + } + bounce->buf.iov_base = addr; + bounce->buf.iov_len = bytes; + return true; +} + +static int coroutine_fn +blkio_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce, + int64_t bytes) +{ + /* + * Ensure fairness: first time around we join the back of the queue, + * subsequently we join the front so we don't lose our place. + */ + CoQueueWaitFlags wait_flags = 0; + + QEMU_LOCK_GUARD(&s->bounce_lock); + + /* Ensure fairness: don't even try if other requests are already waiting */ + if (!qemu_co_queue_empty(&s->bounce_available)) { + qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock, + wait_flags); + wait_flags = CO_QUEUE_WAIT_FRONT; + } + + while (true) { + if (blkio_do_alloc_bounce_buffer(s, bounce, bytes)) { + /* Kick the next queued request since there may be space */ + qemu_co_queue_next(&s->bounce_available); + return 0; + } + + /* + * If there are no in-flight requests then the pool was simply too + * small. + */ + if (QLIST_EMPTY(&s->bounce_bufs)) { + bool ok; + int ret; + + ret = blkio_resize_bounce_pool(s, bytes); + if (ret < 0) { + /* Kick the next queued request since that may fail too */ + qemu_co_queue_next(&s->bounce_available); + return ret; + } + + ok = blkio_do_alloc_bounce_buffer(s, bounce, bytes); + assert(ok); /* must have space this time */ + return 0; + } + + qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock, + wait_flags); + wait_flags = CO_QUEUE_WAIT_FRONT; + } +} + +static void coroutine_fn blkio_free_bounce_buffer(BDRVBlkioState *s, + BlkioBounceBuf *bounce) +{ + QEMU_LOCK_GUARD(&s->bounce_lock); + + QLIST_REMOVE(bounce, next); + + /* Wake up waiting coroutines since space may now be available */ + qemu_co_queue_next(&s->bounce_available); +} + +/* For async to .bdrv_co_*() conversion */ +typedef struct { + Coroutine *coroutine; + int ret; +} BlkioCoData; + +static void blkio_completion_fd_read(void *opaque) +{ + BlockDriverState *bs = opaque; + BDRVBlkioState *s = bs->opaque; + uint64_t val; + int ret; + + /* Polling may have already fetched a completion */ + if (s->poll_completion.user_data != NULL) { + BlkioCoData *cod = s->poll_completion.user_data; + cod->ret = s->poll_completion.ret; + + /* Clear it in case aio_co_wake() enters a nested event loop */ + s->poll_completion.user_data = NULL; + + aio_co_wake(cod->coroutine); + } + + /* Reset completion fd status */ + ret = read(s->completion_fd, &val, sizeof(val)); + + /* Ignore errors, there's nothing we can do */ + (void)ret; + + /* + * Reading one completion at a time makes nested event loop re-entrancy + * simple. Change this loop to get multiple completions in one go if it + * becomes a performance bottleneck. + */ + while (true) { + struct blkio_completion completion; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + ret = blkioq_do_io(s->blkioq, &completion, 0, 1, NULL); + } + if (ret != 1) { + break; + } + + BlkioCoData *cod = completion.user_data; + cod->ret = completion.ret; + aio_co_wake(cod->coroutine); + } +} + +static bool blkio_completion_fd_poll(void *opaque) +{ + BlockDriverState *bs = opaque; + BDRVBlkioState *s = bs->opaque; + int ret; + + /* Just in case we already fetched a completion */ + if (s->poll_completion.user_data != NULL) { + return true; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + ret = blkioq_do_io(s->blkioq, &s->poll_completion, 0, 1, NULL); + } + return ret == 1; +} + +static void blkio_completion_fd_poll_ready(void *opaque) +{ + blkio_completion_fd_read(opaque); +} + +static void blkio_attach_aio_context(BlockDriverState *bs, + AioContext *new_context) +{ + BDRVBlkioState *s = bs->opaque; + + aio_set_fd_handler(new_context, + s->completion_fd, + false, + blkio_completion_fd_read, + NULL, + blkio_completion_fd_poll, + blkio_completion_fd_poll_ready, + bs); +} + +static void blkio_detach_aio_context(BlockDriverState *bs) +{ + BDRVBlkioState *s = bs->opaque; + + aio_set_fd_handler(bdrv_get_aio_context(bs), + s->completion_fd, + false, NULL, NULL, NULL, NULL, NULL); +} + +/* Call with s->blkio_lock held to submit I/O after enqueuing a new request */ +static void blkio_submit_io(BlockDriverState *bs) +{ + if (qatomic_read(&bs->io_plugged) == 0) { + BDRVBlkioState *s = bs->opaque; + + blkioq_do_io(s->blkioq, NULL, 0, 0, NULL); + } +} + +static int coroutine_fn +blkio_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) +{ + BDRVBlkioState *s = bs->opaque; + BlkioCoData cod = { + .coroutine = qemu_coroutine_self(), + }; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkioq_discard(s->blkioq, offset, bytes, &cod, 0); + blkio_submit_io(bs); + } + + qemu_coroutine_yield(); + return cod.ret; +} + +static int coroutine_fn +blkio_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) +{ + BlkioCoData cod = { + .coroutine = qemu_coroutine_self(), + }; + BDRVBlkioState *s = bs->opaque; + bool use_bounce_buffer = + s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF); + BlkioBounceBuf bounce; + struct iovec *iov = qiov->iov; + int iovcnt = qiov->niov; + + if (use_bounce_buffer) { + int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes); + if (ret < 0) { + return ret; + } + + iov = &bounce.buf; + iovcnt = 1; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkioq_readv(s->blkioq, offset, iov, iovcnt, &cod, 0); + blkio_submit_io(bs); + } + + qemu_coroutine_yield(); + + if (use_bounce_buffer) { + if (cod.ret == 0) { + qemu_iovec_from_buf(qiov, 0, + bounce.buf.iov_base, + bounce.buf.iov_len); + } + + blkio_free_bounce_buffer(s, &bounce); + } + + return cod.ret; +} + +static int coroutine_fn blkio_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) +{ + uint32_t blkio_flags = (flags & BDRV_REQ_FUA) ? BLKIO_REQ_FUA : 0; + BlkioCoData cod = { + .coroutine = qemu_coroutine_self(), + }; + BDRVBlkioState *s = bs->opaque; + bool use_bounce_buffer = + s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF); + BlkioBounceBuf bounce; + struct iovec *iov = qiov->iov; + int iovcnt = qiov->niov; + + if (use_bounce_buffer) { + int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes); + if (ret < 0) { + return ret; + } + + qemu_iovec_to_buf(qiov, 0, bounce.buf.iov_base, bytes); + iov = &bounce.buf; + iovcnt = 1; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkioq_writev(s->blkioq, offset, iov, iovcnt, &cod, blkio_flags); + blkio_submit_io(bs); + } + + qemu_coroutine_yield(); + + if (use_bounce_buffer) { + blkio_free_bounce_buffer(s, &bounce); + } + + return cod.ret; +} + +static int coroutine_fn blkio_co_flush(BlockDriverState *bs) +{ + BDRVBlkioState *s = bs->opaque; + BlkioCoData cod = { + .coroutine = qemu_coroutine_self(), + }; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkioq_flush(s->blkioq, &cod, 0); + blkio_submit_io(bs); + } + + qemu_coroutine_yield(); + return cod.ret; +} + +static int coroutine_fn blkio_co_pwrite_zeroes(BlockDriverState *bs, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) +{ + BDRVBlkioState *s = bs->opaque; + BlkioCoData cod = { + .coroutine = qemu_coroutine_self(), + }; + uint32_t blkio_flags = 0; + + if (flags & BDRV_REQ_FUA) { + blkio_flags |= BLKIO_REQ_FUA; + } + if (!(flags & BDRV_REQ_MAY_UNMAP)) { + blkio_flags |= BLKIO_REQ_NO_UNMAP; + } + if (flags & BDRV_REQ_NO_FALLBACK) { + blkio_flags |= BLKIO_REQ_NO_FALLBACK; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkioq_write_zeroes(s->blkioq, offset, bytes, &cod, blkio_flags); + blkio_submit_io(bs); + } + + qemu_coroutine_yield(); + return cod.ret; +} + +static void blkio_io_unplug(BlockDriverState *bs) +{ + BDRVBlkioState *s = bs->opaque; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkio_submit_io(bs); + } +} + +typedef enum { + BMRR_OK, + BMRR_SKIP, + BMRR_FAIL, +} BlkioMemRegionResult; + +/* + * Produce a struct blkio_mem_region for a given address and size. + * + * This function produces identical results when called multiple times with the + * same arguments. This property is necessary because blkio_unmap_mem_region() + * must receive the same struct blkio_mem_region field values that were passed + * to blkio_map_mem_region(). + */ +static BlkioMemRegionResult +blkio_mem_region_from_host(BlockDriverState *bs, + void *host, size_t size, + struct blkio_mem_region *region, + Error **errp) +{ + BDRVBlkioState *s = bs->opaque; + int fd = -1; + ram_addr_t fd_offset = 0; + + if (((uintptr_t)host | size) % s->mem_region_alignment) { + error_setg(errp, "unaligned buf %p with size %zu", host, size); + return BMRR_FAIL; + } + + /* Attempt to find the fd for the underlying memory */ + if (s->needs_mem_region_fd) { + RAMBlock *ram_block; + RAMBlock *end_block; + ram_addr_t offset; + + /* + * bdrv_register_buf() is called with the BQL held so mr lives at least + * until this function returns. + */ + ram_block = qemu_ram_block_from_host(host, false, &fd_offset); + if (ram_block) { + fd = qemu_ram_get_fd(ram_block); + } + if (fd == -1) { + /* + * Ideally every RAMBlock would have an fd. pc-bios and other + * things don't. Luckily they are usually not I/O buffers and we + * can just ignore them. + */ + return BMRR_SKIP; + } + + /* Make sure the fd covers the entire range */ + end_block = qemu_ram_block_from_host(host + size - 1, false, &offset); + if (ram_block != end_block) { + error_setg(errp, "registered buffer at %p with size %zu extends " + "beyond RAMBlock", host, size); + return BMRR_FAIL; + } + } + + *region = (struct blkio_mem_region){ + .addr = host, + .len = size, + .fd = fd, + .fd_offset = fd_offset, + }; + return BMRR_OK; +} + +static bool blkio_register_buf(BlockDriverState *bs, void *host, size_t size, + Error **errp) +{ + BDRVBlkioState *s = bs->opaque; + struct blkio_mem_region region; + BlkioMemRegionResult region_result; + int ret; + + /* + * Mapping memory regions conflicts with RAM discard (virtio-mem) when + * there is pinning, so only do it when necessary. + */ + if (!s->needs_mem_regions && s->may_pin_mem_regions) { + return true; + } + + region_result = blkio_mem_region_from_host(bs, host, size, ®ion, errp); + if (region_result == BMRR_SKIP) { + return true; + } else if (region_result != BMRR_OK) { + return false; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + ret = blkio_map_mem_region(s->blkio, ®ion); + } + + if (ret < 0) { + error_setg(errp, "Failed to add blkio mem region %p with size %zu: %s", + host, size, blkio_get_error_msg()); + return false; + } + return true; +} + +static void blkio_unregister_buf(BlockDriverState *bs, void *host, size_t size) +{ + BDRVBlkioState *s = bs->opaque; + struct blkio_mem_region region; + + /* See blkio_register_buf() */ + if (!s->needs_mem_regions && s->may_pin_mem_regions) { + return; + } + + if (blkio_mem_region_from_host(bs, host, size, ®ion, NULL) != BMRR_OK) { + return; + } + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + blkio_unmap_mem_region(s->blkio, ®ion); + } +} + +static int blkio_io_uring_open(BlockDriverState *bs, QDict *options, int flags, + Error **errp) +{ + const char *filename = qdict_get_str(options, "filename"); + BDRVBlkioState *s = bs->opaque; + int ret; + + ret = blkio_set_str(s->blkio, "path", filename); + qdict_del(options, "filename"); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to set path: %s", + blkio_get_error_msg()); + return ret; + } + + if (flags & BDRV_O_NOCACHE) { + ret = blkio_set_bool(s->blkio, "direct", true); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to set direct: %s", + blkio_get_error_msg()); + return ret; + } + } + + return 0; +} + +static int blkio_nvme_io_uring(BlockDriverState *bs, QDict *options, int flags, + Error **errp) +{ + const char *path = qdict_get_try_str(options, "path"); + BDRVBlkioState *s = bs->opaque; + int ret; + + if (!path) { + error_setg(errp, "missing 'path' option"); + return -EINVAL; + } + + ret = blkio_set_str(s->blkio, "path", path); + qdict_del(options, "path"); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to set path: %s", + blkio_get_error_msg()); + return ret; + } + + if (!(flags & BDRV_O_NOCACHE)) { + error_setg(errp, "cache.direct=off is not supported"); + return -EINVAL; + } + + return 0; +} + +static int blkio_virtio_blk_common_open(BlockDriverState *bs, + QDict *options, int flags, Error **errp) +{ + const char *path = qdict_get_try_str(options, "path"); + BDRVBlkioState *s = bs->opaque; + int ret; + + if (!path) { + error_setg(errp, "missing 'path' option"); + return -EINVAL; + } + + ret = blkio_set_str(s->blkio, "path", path); + qdict_del(options, "path"); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to set path: %s", + blkio_get_error_msg()); + return ret; + } + + if (!(flags & BDRV_O_NOCACHE)) { + error_setg(errp, "cache.direct=off is not supported"); + return -EINVAL; + } + return 0; +} + +static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags, + Error **errp) +{ + const char *blkio_driver = bs->drv->protocol_name; + BDRVBlkioState *s = bs->opaque; + int ret; + + ret = blkio_create(blkio_driver, &s->blkio); + if (ret < 0) { + error_setg_errno(errp, -ret, "blkio_create failed: %s", + blkio_get_error_msg()); + return ret; + } + + if (strcmp(blkio_driver, DRIVER_IO_URING) == 0) { + ret = blkio_io_uring_open(bs, options, flags, errp); + } else if (strcmp(blkio_driver, DRIVER_NVME_IO_URING) == 0) { + ret = blkio_nvme_io_uring(bs, options, flags, errp); + } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VFIO_PCI) == 0) { + ret = blkio_virtio_blk_common_open(bs, options, flags, errp); + } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_USER) == 0) { + ret = blkio_virtio_blk_common_open(bs, options, flags, errp); + } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_VDPA) == 0) { + ret = blkio_virtio_blk_common_open(bs, options, flags, errp); + } else { + g_assert_not_reached(); + } + if (ret < 0) { + blkio_destroy(&s->blkio); + return ret; + } + + if (!(flags & BDRV_O_RDWR)) { + ret = blkio_set_bool(s->blkio, "read-only", true); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to set read-only: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + return ret; + } + } + + ret = blkio_connect(s->blkio); + if (ret < 0) { + error_setg_errno(errp, -ret, "blkio_connect failed: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + return ret; + } + + ret = blkio_get_bool(s->blkio, + "needs-mem-regions", + &s->needs_mem_regions); + if (ret < 0) { + error_setg_errno(errp, -ret, + "failed to get needs-mem-regions: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + return ret; + } + + ret = blkio_get_bool(s->blkio, + "needs-mem-region-fd", + &s->needs_mem_region_fd); + if (ret < 0) { + error_setg_errno(errp, -ret, + "failed to get needs-mem-region-fd: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + return ret; + } + + ret = blkio_get_uint64(s->blkio, + "mem-region-alignment", + &s->mem_region_alignment); + if (ret < 0) { + error_setg_errno(errp, -ret, + "failed to get mem-region-alignment: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + return ret; + } + + ret = blkio_get_bool(s->blkio, + "may-pin-mem-regions", + &s->may_pin_mem_regions); + if (ret < 0) { + /* Be conservative (assume pinning) if the property is not supported */ + s->may_pin_mem_regions = s->needs_mem_regions; + } + + /* + * Notify if libblkio drivers pin memory and prevent features like + * virtio-mem from working. + */ + if (s->may_pin_mem_regions) { + ret = ram_block_discard_disable(true); + if (ret < 0) { + error_setg_errno(errp, -ret, "ram_block_discard_disable() failed"); + blkio_destroy(&s->blkio); + return ret; + } + } + + ret = blkio_start(s->blkio); + if (ret < 0) { + error_setg_errno(errp, -ret, "blkio_start failed: %s", + blkio_get_error_msg()); + blkio_destroy(&s->blkio); + if (s->may_pin_mem_regions) { + ram_block_discard_disable(false); + } + return ret; + } + + bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF; + bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | + BDRV_REQ_NO_FALLBACK; + + qemu_mutex_init(&s->blkio_lock); + qemu_co_mutex_init(&s->bounce_lock); + qemu_co_queue_init(&s->bounce_available); + QLIST_INIT(&s->bounce_bufs); + s->blkioq = blkio_get_queue(s->blkio, 0); + s->completion_fd = blkioq_get_completion_fd(s->blkioq); + + blkio_attach_aio_context(bs, bdrv_get_aio_context(bs)); + return 0; +} + +static void blkio_close(BlockDriverState *bs) +{ + BDRVBlkioState *s = bs->opaque; + + /* There is no destroy() API for s->bounce_lock */ + + qemu_mutex_destroy(&s->blkio_lock); + blkio_detach_aio_context(bs); + blkio_destroy(&s->blkio); + + if (s->may_pin_mem_regions) { + ram_block_discard_disable(false); + } +} + +static int64_t blkio_getlength(BlockDriverState *bs) +{ + BDRVBlkioState *s = bs->opaque; + uint64_t capacity; + int ret; + + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { + ret = blkio_get_uint64(s->blkio, "capacity", &capacity); + } + if (ret < 0) { + return -ret; + } + + return capacity; +} + +static int coroutine_fn blkio_truncate(BlockDriverState *bs, int64_t offset, + bool exact, PreallocMode prealloc, + BdrvRequestFlags flags, Error **errp) +{ + int64_t current_length; + + if (prealloc != PREALLOC_MODE_OFF) { + error_setg(errp, "Unsupported preallocation mode '%s'", + PreallocMode_str(prealloc)); + return -ENOTSUP; + } + + current_length = blkio_getlength(bs); + + if (offset > current_length) { + error_setg(errp, "Cannot grow device"); + return -EINVAL; + } else if (exact && offset != current_length) { + error_setg(errp, "Cannot resize device"); + return -ENOTSUP; + } + + return 0; +} + +static int blkio_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) +{ + return 0; +} + +static void blkio_refresh_limits(BlockDriverState *bs, Error **errp) +{ + BDRVBlkioState *s = bs->opaque; + QEMU_LOCK_GUARD(&s->blkio_lock); + int value; + int ret; + + ret = blkio_get_int(s->blkio, "request-alignment", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to get \"request-alignment\": %s", + blkio_get_error_msg()); + return; + } + bs->bl.request_alignment = value; + if (bs->bl.request_alignment < 1 || + bs->bl.request_alignment >= INT_MAX || + !is_power_of_2(bs->bl.request_alignment)) { + error_setg(errp, "invalid \"request-alignment\" value %" PRIu32 ", " + "must be a power of 2 less than INT_MAX", + bs->bl.request_alignment); + return; + } + + ret = blkio_get_int(s->blkio, "optimal-io-size", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to get \"optimal-io-size\": %s", + blkio_get_error_msg()); + return; + } + bs->bl.opt_transfer = value; + if (bs->bl.opt_transfer > INT_MAX || + (bs->bl.opt_transfer % bs->bl.request_alignment)) { + error_setg(errp, "invalid \"optimal-io-size\" value %" PRIu32 ", must " + "be a multiple of %" PRIu32, bs->bl.opt_transfer, + bs->bl.request_alignment); + return; + } + + ret = blkio_get_int(s->blkio, "max-transfer", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to get \"max-transfer\": %s", + blkio_get_error_msg()); + return; + } + bs->bl.max_transfer = value; + if ((bs->bl.max_transfer % bs->bl.request_alignment) || + (bs->bl.opt_transfer && (bs->bl.max_transfer % bs->bl.opt_transfer))) { + error_setg(errp, "invalid \"max-transfer\" value %" PRIu32 ", must be " + "a multiple of %" PRIu32 " and %" PRIu32 " (if non-zero)", + bs->bl.max_transfer, bs->bl.request_alignment, + bs->bl.opt_transfer); + return; + } + + ret = blkio_get_int(s->blkio, "buf-alignment", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to get \"buf-alignment\": %s", + blkio_get_error_msg()); + return; + } + if (value < 1) { + error_setg(errp, "invalid \"buf-alignment\" value %d, must be " + "positive", value); + return; + } + bs->bl.min_mem_alignment = value; + + ret = blkio_get_int(s->blkio, "optimal-buf-alignment", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, + "failed to get \"optimal-buf-alignment\": %s", + blkio_get_error_msg()); + return; + } + if (value < 1) { + error_setg(errp, "invalid \"optimal-buf-alignment\" value %d, " + "must be positive", value); + return; + } + bs->bl.opt_mem_alignment = value; + + ret = blkio_get_int(s->blkio, "max-segments", &value); + if (ret < 0) { + error_setg_errno(errp, -ret, "failed to get \"max-segments\": %s", + blkio_get_error_msg()); + return; + } + if (value < 1) { + error_setg(errp, "invalid \"max-segments\" value %d, must be positive", + value); + return; + } + bs->bl.max_iov = value; +} + +/* + * TODO + * Missing libblkio APIs: + * - block_status + * - co_invalidate_cache + * + * Out of scope? + * - create + * - truncate + */ + +#define BLKIO_DRIVER(name, ...) \ + { \ + .format_name = name, \ + .protocol_name = name, \ + .instance_size = sizeof(BDRVBlkioState), \ + .bdrv_file_open = blkio_file_open, \ + .bdrv_close = blkio_close, \ + .bdrv_getlength = blkio_getlength, \ + .bdrv_co_truncate = blkio_truncate, \ + .bdrv_get_info = blkio_get_info, \ + .bdrv_attach_aio_context = blkio_attach_aio_context, \ + .bdrv_detach_aio_context = blkio_detach_aio_context, \ + .bdrv_co_pdiscard = blkio_co_pdiscard, \ + .bdrv_co_preadv = blkio_co_preadv, \ + .bdrv_co_pwritev = blkio_co_pwritev, \ + .bdrv_co_flush_to_disk = blkio_co_flush, \ + .bdrv_co_pwrite_zeroes = blkio_co_pwrite_zeroes, \ + .bdrv_io_unplug = blkio_io_unplug, \ + .bdrv_refresh_limits = blkio_refresh_limits, \ + .bdrv_register_buf = blkio_register_buf, \ + .bdrv_unregister_buf = blkio_unregister_buf, \ + __VA_ARGS__ \ + } + +static BlockDriver bdrv_io_uring = BLKIO_DRIVER( + DRIVER_IO_URING, + .bdrv_needs_filename = true, +); + +static BlockDriver bdrv_nvme_io_uring = BLKIO_DRIVER( + DRIVER_NVME_IO_URING, +); + +static BlockDriver bdrv_virtio_blk_vfio_pci = BLKIO_DRIVER( + DRIVER_VIRTIO_BLK_VFIO_PCI +); + +static BlockDriver bdrv_virtio_blk_vhost_user = BLKIO_DRIVER( + DRIVER_VIRTIO_BLK_VHOST_USER +); + +static BlockDriver bdrv_virtio_blk_vhost_vdpa = BLKIO_DRIVER( + DRIVER_VIRTIO_BLK_VHOST_VDPA +); + +static void bdrv_blkio_init(void) +{ + bdrv_register(&bdrv_io_uring); + bdrv_register(&bdrv_nvme_io_uring); + bdrv_register(&bdrv_virtio_blk_vfio_pci); + bdrv_register(&bdrv_virtio_blk_vhost_user); + bdrv_register(&bdrv_virtio_blk_vhost_vdpa); +} + +block_init(bdrv_blkio_init); diff --git a/block/blklogwrites.c b/block/blklogwrites.c index b7579370a3..cef9efe55d 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -107,8 +107,8 @@ static uint64_t blk_log_writes_find_cur_log_sector(BdrvChild *log, struct log_write_entry cur_entry; while (cur_idx < nr_entries) { - int read_ret = bdrv_pread(log, cur_sector << sector_bits, &cur_entry, - sizeof(cur_entry)); + int read_ret = bdrv_pread(log, cur_sector << sector_bits, + sizeof(cur_entry), &cur_entry, 0); if (read_ret < 0) { error_setg_errno(errp, -read_ret, "Failed to read log entry %"PRIu64, cur_idx); @@ -155,11 +155,8 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the file */ - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, false, - errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { goto fail; } @@ -190,7 +187,7 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, log_sb.nr_entries = cpu_to_le64(0); log_sb.sectorsize = cpu_to_le32(BDRV_SECTOR_SIZE); } else { - ret = bdrv_pread(s->log_file, 0, &log_sb, sizeof(log_sb)); + ret = bdrv_pread(s->log_file, 0, sizeof(log_sb), &log_sb, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read log superblock"); goto fail_log; @@ -257,10 +254,6 @@ fail_log: s->log_file = NULL; } fail: - if (ret < 0) { - bdrv_unref_child(bs, bs->file); - bs->file = NULL; - } qemu_opts_del(opts); return ret; } @@ -301,8 +294,8 @@ static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp) } static int coroutine_fn -blk_log_writes_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blk_log_writes_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); } @@ -460,16 +453,16 @@ blk_log_writes_co_do_file_pdiscard(BlkLogWritesFileReq *fr) } static int coroutine_fn -blk_log_writes_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blk_log_writes_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return blk_log_writes_co_log(bs, offset, bytes, qiov, flags, blk_log_writes_co_do_file_pwritev, 0, false); } static int coroutine_fn -blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, - BdrvRequestFlags flags) +blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, + int64_t bytes, BdrvRequestFlags flags) { return blk_log_writes_co_log(bs, offset, bytes, NULL, flags, blk_log_writes_co_do_file_pwrite_zeroes, 0, @@ -484,9 +477,9 @@ static int coroutine_fn blk_log_writes_co_flush_to_disk(BlockDriverState *bs) } static int coroutine_fn -blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) +blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { - return blk_log_writes_co_log(bs, offset, count, NULL, 0, + return blk_log_writes_co_log(bs, offset, bytes, NULL, 0, blk_log_writes_co_do_file_pdiscard, LOG_DISCARD_FLAG, false); } diff --git a/block/blkreplay.c b/block/blkreplay.c index 4a247752fd..76a0b8d12a 100644 --- a/block/blkreplay.c +++ b/block/blkreplay.c @@ -26,11 +26,8 @@ static int blkreplay_open(BlockDriverState *bs, QDict *options, int flags, int ret; /* Open the image file */ - bs->file = bdrv_open_child(NULL, options, "image", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "image", bs, errp); + if (ret < 0) { goto fail; } @@ -72,7 +69,7 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs, } static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); @@ -83,7 +80,7 @@ static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); @@ -94,7 +91,7 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -105,7 +102,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pdiscard(bs->file, offset, bytes); diff --git a/block/blkverify.c b/block/blkverify.c index 188d7632fa..c60a2dc624 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -16,6 +16,7 @@ #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/memalign.h" typedef struct { BdrvChild *test_file; @@ -43,7 +44,7 @@ typedef struct BlkverifyRequest { QEMUIOVector *raw_qiov; /* cloned I/O vector for raw file */ } BlkverifyRequest; -static void GCC_FMT_ATTR(2, 3) blkverify_err(BlkverifyRequest *r, +static void G_GNUC_PRINTF(2, 3) blkverify_err(BlkverifyRequest *r, const char *fmt, ...) { va_list ap; @@ -121,12 +122,9 @@ static int blkverify_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the raw file */ - bs->file = bdrv_open_child(qemu_opt_get(opts, "x-raw"), options, "raw", - bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(qemu_opt_get(opts, "x-raw"), options, "raw", + bs, errp); + if (ret < 0) { goto fail; } @@ -221,8 +219,8 @@ blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset, } static int coroutine_fn -blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlkverifyRequest r; QEMUIOVector raw_qiov; @@ -234,8 +232,8 @@ blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, qemu_iovec_init(&raw_qiov, qiov->niov); qemu_iovec_clone(&raw_qiov, qiov, buf); - ret = blkverify_co_prwv(bs, &r, offset, bytes, qiov, &raw_qiov, flags, - false); + ret = blkverify_co_prwv(bs, &r, offset, bytes, qiov, &raw_qiov, + flags & ~BDRV_REQ_REGISTERED_BUF, false); cmp_offset = qemu_iovec_compare(qiov, &raw_qiov); if (cmp_offset != -1) { @@ -250,14 +248,14 @@ blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkverify_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlkverifyRequest r; return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true); } -static int blkverify_co_flush(BlockDriverState *bs) +static int coroutine_fn blkverify_co_flush(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; diff --git a/block/block-backend.c b/block/block-backend.c index deb55c272e..d98a96ff37 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -14,6 +14,7 @@ #include "sysemu/block-backend.h" #include "block/block_int.h" #include "block/blockjob.h" +#include "block/coroutines.h" #include "block/throttle-groups.h" #include "hw/qdev-core.h" #include "sysemu/blockdev.h" @@ -55,9 +56,6 @@ struct BlockBackend { const BlockDevOps *dev_ops; void *dev_opaque; - /* the block size for which the guest device expects atomicity */ - int guest_block_size; - /* If the BDS tree is removed, some of its options are stored here (which * can be used to restore those options in the new BDS on insert) */ BlockBackendRootState root_state; @@ -78,6 +76,7 @@ struct BlockBackend { bool allow_aio_context_change; bool allow_write_beyond_eof; + /* Protected by BQL */ NotifierList remove_bs_notifiers, insert_bs_notifiers; QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers; @@ -110,12 +109,14 @@ static const AIOCBInfo block_backend_aiocb_info = { static void drive_info_del(DriveInfo *dinfo); static BlockBackend *bdrv_first_blk(BlockDriverState *bs); -/* All BlockBackends */ +/* All BlockBackends. Protected by BQL. */ static QTAILQ_HEAD(, BlockBackend) block_backends = QTAILQ_HEAD_INITIALIZER(block_backends); -/* All BlockBackends referenced by the monitor and which are iterated through by - * blk_next() */ +/* + * All BlockBackends referenced by the monitor and which are iterated through by + * blk_next(). Protected by BQL. + */ static QTAILQ_HEAD(, BlockBackend) monitor_block_backends = QTAILQ_HEAD_INITIALIZER(monitor_block_backends); @@ -133,10 +134,9 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter); static void blk_root_change_media(BdrvChild *child, bool load); static void blk_root_resize(BdrvChild *child); -static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore, Error **errp); -static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore); +static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); static char *blk_root_get_parent_desc(BdrvChild *child) { @@ -189,6 +189,7 @@ static void blk_root_activate(BdrvChild *child, Error **errp) { BlockBackend *blk = child->opaque; Error *local_err = NULL; + uint64_t saved_shared_perm; if (!blk->disable_perm) { return; @@ -196,12 +197,22 @@ static void blk_root_activate(BdrvChild *child, Error **errp) blk->disable_perm = false; + /* + * blk->shared_perm contains the permissions we want to share once + * migration is really completely done. For now, we need to share + * all; but we also need to retain blk->shared_perm, which is + * overwritten by a successful blk_set_perm() call. Save it and + * restore it below. + */ + saved_shared_perm = blk->shared_perm; + blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err); if (local_err) { error_propagate(errp, local_err); blk->disable_perm = true; return; } + blk->shared_perm = saved_shared_perm; if (runstate_check(RUN_STATE_INMIGRATE)) { /* Activation can happen when migration process is still active, for @@ -224,6 +235,7 @@ static void blk_root_activate(BdrvChild *child, Error **errp) void blk_set_force_allow_inactivate(BlockBackend *blk) { + GLOBAL_STATE_CODE(); blk->force_allow_inactivate = true; } @@ -299,6 +311,7 @@ static void blk_root_detach(BdrvChild *child) static AioContext *blk_root_get_parent_aio_context(BdrvChild *c) { BlockBackend *blk = c->opaque; + IO_CODE(); return blk_get_aio_context(blk); } @@ -321,8 +334,7 @@ static const BdrvChildClass child_root = { .attach = blk_root_attach, .detach = blk_root_detach, - .can_set_aio_ctx = blk_root_can_set_aio_ctx, - .set_aio_ctx = blk_root_set_aio_ctx, + .change_aio_ctx = blk_root_change_aio_ctx, .get_parent_aio_context = blk_root_get_parent_aio_context, }; @@ -342,6 +354,8 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) { BlockBackend *blk; + GLOBAL_STATE_CODE(); + blk = g_new0(BlockBackend, 1); blk->refcnt = 1; blk->ctx = ctx; @@ -379,6 +393,8 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, { BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm); + GLOBAL_STATE_CODE(); + if (blk_insert_bs(blk, bs, errp) < 0) { blk_unref(blk); return NULL; @@ -407,6 +423,8 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, uint64_t perm = 0; uint64_t shared = BLK_PERM_ALL; + GLOBAL_STATE_CODE(); + /* * blk_new_open() is mainly used in .bdrv_create implementations and the * tools where sharing isn't a major concern because the BDS stays private @@ -484,6 +502,7 @@ static void drive_info_del(DriveInfo *dinfo) int blk_get_refcnt(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk ? blk->refcnt : 0; } @@ -494,6 +513,7 @@ int blk_get_refcnt(BlockBackend *blk) void blk_ref(BlockBackend *blk) { assert(blk->refcnt > 0); + GLOBAL_STATE_CODE(); blk->refcnt++; } @@ -504,6 +524,7 @@ void blk_ref(BlockBackend *blk) */ void blk_unref(BlockBackend *blk) { + GLOBAL_STATE_CODE(); if (blk) { assert(blk->refcnt > 0); if (blk->refcnt > 1) { @@ -524,6 +545,7 @@ void blk_unref(BlockBackend *blk) */ BlockBackend *blk_all_next(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&block_backends); } @@ -532,6 +554,8 @@ void blk_remove_all_bs(void) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); + while ((blk = blk_all_next(blk)) != NULL) { AioContext *ctx = blk_get_aio_context(blk); @@ -555,6 +579,7 @@ void blk_remove_all_bs(void) */ BlockBackend *blk_next(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk ? QTAILQ_NEXT(blk, monitor_link) : QTAILQ_FIRST(&monitor_block_backends); } @@ -621,6 +646,7 @@ static void bdrv_next_reset(BdrvNextIterator *it) BlockDriverState *bdrv_first(BdrvNextIterator *it) { + GLOBAL_STATE_CODE(); bdrv_next_reset(it); return bdrv_next(it); } @@ -658,6 +684,7 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) { assert(!blk->name); assert(name && name[0]); + GLOBAL_STATE_CODE(); if (!id_wellformed(name)) { error_setg(errp, "Invalid device name"); @@ -685,6 +712,8 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) */ void monitor_remove_blk(BlockBackend *blk) { + GLOBAL_STATE_CODE(); + if (!blk->name) { return; } @@ -700,6 +729,7 @@ void monitor_remove_blk(BlockBackend *blk) */ const char *blk_name(const BlockBackend *blk) { + IO_CODE(); return blk->name ?: ""; } @@ -711,6 +741,7 @@ BlockBackend *blk_by_name(const char *name) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); assert(name); while ((blk = blk_next(blk)) != NULL) { if (!strcmp(name, blk->name)) { @@ -725,12 +756,16 @@ BlockBackend *blk_by_name(const char *name) */ BlockDriverState *blk_bs(BlockBackend *blk) { + IO_CODE(); return blk->root ? blk->root->bs : NULL; } static BlockBackend *bdrv_first_blk(BlockDriverState *bs) { BdrvChild *child; + + GLOBAL_STATE_CODE(); + QLIST_FOREACH(child, &bs->parents, next_parent) { if (child->klass == &child_root) { return child->opaque; @@ -745,6 +780,7 @@ static BlockBackend *bdrv_first_blk(BlockDriverState *bs) */ bool bdrv_has_blk(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); return bdrv_first_blk(bs) != NULL; } @@ -755,6 +791,7 @@ bool bdrv_is_root_node(BlockDriverState *bs) { BdrvChild *c; + GLOBAL_STATE_CODE(); QLIST_FOREACH(c, &bs->parents, next_parent) { if (c->klass != &child_root) { return false; @@ -769,6 +806,7 @@ bool bdrv_is_root_node(BlockDriverState *bs) */ DriveInfo *blk_legacy_dinfo(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->legacy_dinfo; } @@ -780,6 +818,7 @@ DriveInfo *blk_legacy_dinfo(BlockBackend *blk) DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) { assert(!blk->legacy_dinfo); + GLOBAL_STATE_CODE(); return blk->legacy_dinfo = dinfo; } @@ -790,6 +829,7 @@ DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); while ((blk = blk_next(blk)) != NULL) { if (blk->legacy_dinfo == dinfo) { @@ -804,6 +844,7 @@ BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) */ BlockBackendPublic *blk_get_public(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return &blk->public; } @@ -812,6 +853,7 @@ BlockBackendPublic *blk_get_public(BlockBackend *blk) */ BlockBackend *blk_by_public(BlockBackendPublic *public) { + GLOBAL_STATE_CODE(); return container_of(public, BlockBackend, public); } @@ -821,16 +863,24 @@ BlockBackend *blk_by_public(BlockBackendPublic *public) void blk_remove_bs(BlockBackend *blk) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - BlockDriverState *bs; BdrvChild *root; + GLOBAL_STATE_CODE(); + notifier_list_notify(&blk->remove_bs_notifiers, blk); if (tgm->throttle_state) { - bs = blk_bs(blk); + BlockDriverState *bs = blk_bs(blk); + + /* + * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for + * example, if a temporary filter node is removed by a blockjob. + */ + bdrv_ref(bs); bdrv_drained_begin(bs); throttle_group_detach_aio_context(tgm); throttle_group_attach_aio_context(tgm, qemu_get_aio_context()); bdrv_drained_end(bs); + bdrv_unref(bs); } blk_update_root_state(blk); @@ -851,6 +901,7 @@ void blk_remove_bs(BlockBackend *blk) int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; + GLOBAL_STATE_CODE(); bdrv_ref(bs); blk->root = bdrv_root_attach_child(bs, "root", &child_root, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, @@ -869,6 +920,15 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) return 0; } +/* + * Change BlockDriverState associated with @blk. + */ +int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp) +{ + GLOBAL_STATE_CODE(); + return bdrv_replace_child_bs(blk->root, new_bs, errp); +} + /* * Sets the permission bitmasks that the user of the BlockBackend needs. */ @@ -876,6 +936,7 @@ int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, Error **errp) { int ret; + GLOBAL_STATE_CODE(); if (blk->root && !blk->disable_perm) { ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); @@ -892,6 +953,7 @@ int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) { + GLOBAL_STATE_CODE(); *perm = blk->perm; *shared_perm = blk->shared_perm; } @@ -902,6 +964,7 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) */ int blk_attach_dev(BlockBackend *blk, DeviceState *dev) { + GLOBAL_STATE_CODE(); if (blk->dev) { return -EBUSY; } @@ -927,10 +990,10 @@ int blk_attach_dev(BlockBackend *blk, DeviceState *dev) void blk_detach_dev(BlockBackend *blk, DeviceState *dev) { assert(blk->dev == dev); + GLOBAL_STATE_CODE(); blk->dev = NULL; blk->dev_ops = NULL; blk->dev_opaque = NULL; - blk->guest_block_size = 512; blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort); blk_unref(blk); } @@ -940,6 +1003,7 @@ void blk_detach_dev(BlockBackend *blk, DeviceState *dev) */ DeviceState *blk_get_attached_dev(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->dev; } @@ -948,6 +1012,7 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk) char *blk_get_attached_dev_id(BlockBackend *blk) { DeviceState *dev = blk->dev; + IO_CODE(); if (!dev) { return g_strdup(""); @@ -968,6 +1033,8 @@ BlockBackend *blk_by_dev(void *dev) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); + assert(dev != NULL); while ((blk = blk_all_next(blk)) != NULL) { if (blk->dev == dev) { @@ -985,11 +1052,12 @@ BlockBackend *blk_by_dev(void *dev) void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque) { + GLOBAL_STATE_CODE(); blk->dev_ops = ops; blk->dev_opaque = opaque; /* Are we currently quiesced? Should we enforce this right now? */ - if (blk->quiesce_counter && ops->drained_begin) { + if (blk->quiesce_counter && ops && ops->drained_begin) { ops->drained_begin(opaque); } } @@ -1006,6 +1074,7 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, */ void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp) { + GLOBAL_STATE_CODE(); if (blk->dev_ops && blk->dev_ops->change_media_cb) { bool tray_was_open, tray_is_open; Error *local_err = NULL; @@ -1038,6 +1107,7 @@ static void blk_root_change_media(BdrvChild *child, bool load) */ bool blk_dev_has_removable_media(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); } @@ -1046,6 +1116,7 @@ bool blk_dev_has_removable_media(BlockBackend *blk) */ bool blk_dev_has_tray(BlockBackend *blk) { + IO_CODE(); return blk->dev_ops && blk->dev_ops->is_tray_open; } @@ -1055,6 +1126,7 @@ bool blk_dev_has_tray(BlockBackend *blk) */ void blk_dev_eject_request(BlockBackend *blk, bool force) { + GLOBAL_STATE_CODE(); if (blk->dev_ops && blk->dev_ops->eject_request_cb) { blk->dev_ops->eject_request_cb(blk->dev_opaque, force); } @@ -1065,6 +1137,7 @@ void blk_dev_eject_request(BlockBackend *blk, bool force) */ bool blk_dev_is_tray_open(BlockBackend *blk) { + IO_CODE(); if (blk_dev_has_tray(blk)) { return blk->dev_ops->is_tray_open(blk->dev_opaque); } @@ -1077,6 +1150,7 @@ bool blk_dev_is_tray_open(BlockBackend *blk) */ bool blk_dev_is_medium_locked(BlockBackend *blk) { + GLOBAL_STATE_CODE(); if (blk->dev_ops && blk->dev_ops->is_medium_locked) { return blk->dev_ops->is_medium_locked(blk->dev_opaque); } @@ -1097,6 +1171,7 @@ static void blk_root_resize(BdrvChild *child) void blk_iostatus_enable(BlockBackend *blk) { + GLOBAL_STATE_CODE(); blk->iostatus_enabled = true; blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; } @@ -1105,6 +1180,7 @@ void blk_iostatus_enable(BlockBackend *blk) * enables it _and_ the VM is configured to stop on errors */ bool blk_iostatus_is_enabled(const BlockBackend *blk) { + IO_CODE(); return (blk->iostatus_enabled && (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || @@ -1113,16 +1189,19 @@ bool blk_iostatus_is_enabled(const BlockBackend *blk) BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->iostatus; } void blk_iostatus_disable(BlockBackend *blk) { + GLOBAL_STATE_CODE(); blk->iostatus_enabled = false; } void blk_iostatus_reset(BlockBackend *blk) { + GLOBAL_STATE_CODE(); if (blk_iostatus_is_enabled(blk)) { blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; } @@ -1130,6 +1209,7 @@ void blk_iostatus_reset(BlockBackend *blk) void blk_iostatus_set_err(BlockBackend *blk, int error) { + IO_CODE(); assert(blk_iostatus_is_enabled(blk)); if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : @@ -1139,25 +1219,28 @@ void blk_iostatus_set_err(BlockBackend *blk, int error) void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) { + IO_CODE(); blk->allow_write_beyond_eof = allow; } void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) { + IO_CODE(); blk->allow_aio_context_change = allow; } void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) { + IO_CODE(); blk->disable_request_queuing = disable; } static int blk_check_byte_request(BlockBackend *blk, int64_t offset, - size_t size) + int64_t bytes) { int64_t len; - if (size > INT_MAX) { + if (bytes < 0) { return -EIO; } @@ -1175,7 +1258,7 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset, return len; } - if (offset > len || len - offset < size) { + if (offset > len || len - offset < bytes) { return -EIO; } } @@ -1197,11 +1280,13 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static int coroutine_fn -blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, BdrvRequestFlags flags) +blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) { int ret; BlockDriverState *bs; + IO_CODE(); blk_wait_while_drained(blk); @@ -1222,19 +1307,46 @@ blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes, bytes, false); } - ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags); + ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset, + flags); bdrv_dec_in_flight(bs); return ret; } +int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes, + void *buf, BdrvRequestFlags flags) +{ + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + IO_OR_GS_CODE(); + + assert(bytes <= SIZE_MAX); + + return blk_co_preadv(blk, offset, bytes, &qiov, flags); +} + int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; + IO_OR_GS_CODE(); blk_inc_in_flight(blk); - ret = blk_do_preadv(blk, offset, bytes, qiov, flags); + ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags); + blk_dec_in_flight(blk); + + return ret; +} + +int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, BdrvRequestFlags flags) +{ + int ret; + IO_OR_GS_CODE(); + + blk_inc_in_flight(blk); + ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags); blk_dec_in_flight(blk); return ret; @@ -1242,12 +1354,13 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static int coroutine_fn -blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, size_t qiov_offset, - BdrvRequestFlags flags) +blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) { int ret; BlockDriverState *bs; + IO_CODE(); blk_wait_while_drained(blk); @@ -1278,23 +1391,36 @@ blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes, } int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, - unsigned int bytes, + int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { int ret; + IO_OR_GS_CODE(); blk_inc_in_flight(blk); - ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); + ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); blk_dec_in_flight(blk); return ret; } +int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes, + const void *buf, BdrvRequestFlags flags) +{ + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + IO_OR_GS_CODE(); + + assert(bytes <= SIZE_MAX); + + return blk_co_pwritev(blk, offset, bytes, &qiov, flags); +} + int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { + IO_OR_GS_CODE(); return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); } @@ -1306,72 +1432,21 @@ typedef struct BlkRwCo { BdrvRequestFlags flags; } BlkRwCo; -static void blk_read_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size, - qiov, rwco->flags); - aio_wait_kick(); -} - -static void blk_write_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size, - qiov, 0, rwco->flags); - aio_wait_kick(); -} - -static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, - int64_t bytes, CoroutineEntry co_entry, - BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - BlkRwCo rwco = { - .blk = blk, - .offset = offset, - .iobuf = &qiov, - .flags = flags, - .ret = NOT_DONE, - }; - - blk_inc_in_flight(blk); - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - co_entry(&rwco); - } else { - Coroutine *co = qemu_coroutine_create(co_entry, &rwco); - bdrv_coroutine_enter(blk_bs(blk), co); - BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); - } - blk_dec_in_flight(blk); - - return rwco.ret; -} - -int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags) -{ - return blk_prw(blk, offset, NULL, bytes, blk_write_entry, - flags | BDRV_REQ_ZERO_WRITE); -} - int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) { + GLOBAL_STATE_CODE(); return bdrv_make_zero(blk->root, flags); } void blk_inc_in_flight(BlockBackend *blk) { + IO_CODE(); qatomic_inc(&blk->in_flight); } void blk_dec_in_flight(BlockBackend *blk) { + IO_CODE(); qatomic_dec(&blk->in_flight); aio_wait_kick(); } @@ -1390,6 +1465,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, void *opaque, int ret) { struct BlockBackendAIOCB *acb; + IO_CODE(); blk_inc_in_flight(blk); acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); @@ -1404,7 +1480,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, typedef struct BlkAioEmAIOCB { BlockAIOCB common; BlkRwCo rwco; - int bytes; + int64_t bytes; bool has_returned; } BlkAioEmAIOCB; @@ -1436,7 +1512,8 @@ static void blk_aio_complete_bh(void *opaque) blk_aio_complete(acb); } -static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, +static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, + int64_t bytes, void *iobuf, CoroutineEntry co_entry, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) @@ -1468,60 +1545,42 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, return &acb->common; } -static void blk_aio_read_entry(void *opaque) +static void coroutine_fn blk_aio_read_entry(void *opaque) { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; QEMUIOVector *qiov = rwco->iobuf; assert(qiov->size == acb->bytes); - rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes, - qiov, rwco->flags); + rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov, + 0, rwco->flags); blk_aio_complete(acb); } -static void blk_aio_write_entry(void *opaque) +static void coroutine_fn blk_aio_write_entry(void *opaque) { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; QEMUIOVector *qiov = rwco->iobuf; assert(!qiov || qiov->size == acb->bytes); - rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, - qiov, 0, rwco->flags); + rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, + qiov, 0, rwco->flags); blk_aio_complete(acb); } BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int count, BdrvRequestFlags flags, + int64_t bytes, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { - return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry, + IO_CODE(); + return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry, flags | BDRV_REQ_ZERO_WRITE, cb, opaque); } -int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) -{ - int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0); - if (ret < 0) { - return ret; - } - return count; -} - -int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count, - BdrvRequestFlags flags) -{ - int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry, - flags); - if (ret < 0) { - return ret; - } - return count; -} - int64_t blk_getlength(BlockBackend *blk) { + IO_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; } @@ -1531,6 +1590,7 @@ int64_t blk_getlength(BlockBackend *blk) void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) { + IO_CODE(); if (!blk_bs(blk)) { *nb_sectors_ptr = 0; } else { @@ -1540,6 +1600,7 @@ void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) int64_t blk_nb_sectors(BlockBackend *blk) { + IO_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; } @@ -1551,6 +1612,8 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); + assert((uint64_t)qiov->size <= INT64_MAX); return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_read_entry, flags, cb, opaque); } @@ -1559,24 +1622,30 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); + assert((uint64_t)qiov->size <= INT64_MAX); return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_write_entry, flags, cb, opaque); } void blk_aio_cancel(BlockAIOCB *acb) { + GLOBAL_STATE_CODE(); bdrv_aio_cancel(acb); } void blk_aio_cancel_async(BlockAIOCB *acb) { + IO_CODE(); bdrv_aio_cancel_async(acb); } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static int coroutine_fn -blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) { + IO_CODE(); + blk_wait_while_drained(blk); if (!blk_is_available(blk)) { @@ -1586,26 +1655,25 @@ blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) return bdrv_co_ioctl(blk_bs(blk), req, buf); } -static void blk_ioctl_entry(void *opaque) +int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, + void *buf) { - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; + int ret; + IO_OR_GS_CODE(); - rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base); - aio_wait_kick(); + blk_inc_in_flight(blk); + ret = blk_co_do_ioctl(blk, req, buf); + blk_dec_in_flight(blk); + + return ret; } -int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) -{ - return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); -} - -static void blk_aio_ioctl_entry(void *opaque) +static void coroutine_fn blk_aio_ioctl_entry(void *opaque) { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); + rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); blk_aio_complete(acb); } @@ -1613,14 +1681,16 @@ static void blk_aio_ioctl_entry(void *opaque) BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque); } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static int coroutine_fn -blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes) +blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) { int ret; + IO_CODE(); blk_wait_while_drained(blk); @@ -1632,52 +1702,42 @@ blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes) return bdrv_co_pdiscard(blk->root, offset, bytes); } -static void blk_aio_pdiscard_entry(void *opaque) +static void coroutine_fn blk_aio_pdiscard_entry(void *opaque) { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); + rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); blk_aio_complete(acb); } BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, cb, opaque); } -int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) +int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes) { int ret; + IO_OR_GS_CODE(); blk_inc_in_flight(blk); - ret = blk_do_pdiscard(blk, offset, bytes); + ret = blk_co_do_pdiscard(blk, offset, bytes); blk_dec_in_flight(blk); return ret; } -static void blk_pdiscard_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size); - aio_wait_kick(); -} - -int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes) -{ - return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0); -} - /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn blk_do_flush(BlockBackend *blk) +static int coroutine_fn blk_co_do_flush(BlockBackend *blk) { blk_wait_while_drained(blk); + IO_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; @@ -1686,49 +1746,41 @@ static int coroutine_fn blk_do_flush(BlockBackend *blk) return bdrv_co_flush(blk_bs(blk)); } -static void blk_aio_flush_entry(void *opaque) +static void coroutine_fn blk_aio_flush_entry(void *opaque) { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_flush(rwco->blk); + rwco->ret = blk_co_do_flush(rwco->blk); blk_aio_complete(acb); } BlockAIOCB *blk_aio_flush(BlockBackend *blk, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); } int coroutine_fn blk_co_flush(BlockBackend *blk) { int ret; + IO_OR_GS_CODE(); blk_inc_in_flight(blk); - ret = blk_do_flush(blk); + ret = blk_co_do_flush(blk); blk_dec_in_flight(blk); return ret; } -static void blk_flush_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - rwco->ret = blk_do_flush(rwco->blk); - aio_wait_kick(); -} - -int blk_flush(BlockBackend *blk) -{ - return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); -} - void blk_drain(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { + bdrv_ref(bs); bdrv_drained_begin(bs); } @@ -1738,6 +1790,7 @@ void blk_drain(BlockBackend *blk) if (bs) { bdrv_drained_end(bs); + bdrv_unref(bs); } } @@ -1745,6 +1798,8 @@ void blk_drain_all(void) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); + bdrv_drain_all_begin(); while ((blk = blk_all_next(blk)) != NULL) { @@ -1764,12 +1819,14 @@ void blk_drain_all(void) void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, BlockdevOnError on_write_error) { + GLOBAL_STATE_CODE(); blk->on_read_error = on_read_error; blk->on_write_error = on_write_error; } BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) { + IO_CODE(); return is_read ? blk->on_read_error : blk->on_write_error; } @@ -1777,6 +1834,7 @@ BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, int error) { BlockdevOnError on_err = blk_get_on_error(blk, is_read); + IO_CODE(); switch (on_err) { case BLOCKDEV_ON_ERROR_ENOSPC: @@ -1816,6 +1874,7 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action, bool is_read, int error) { assert(error >= 0); + IO_CODE(); if (action == BLOCK_ERROR_ACTION_STOP) { /* First set the iostatus, so that "info block" returns an iostatus @@ -1847,6 +1906,7 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action, bool blk_supports_write_perm(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { return !bdrv_is_read_only(bs); @@ -1861,12 +1921,14 @@ bool blk_supports_write_perm(BlockBackend *blk) */ bool blk_is_writable(BlockBackend *blk) { + IO_CODE(); return blk->perm & BLK_PERM_WRITE; } bool blk_is_sg(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (!bs) { return false; @@ -1877,41 +1939,47 @@ bool blk_is_sg(BlockBackend *blk) bool blk_enable_write_cache(BlockBackend *blk) { + IO_CODE(); return blk->enable_write_cache; } void blk_set_enable_write_cache(BlockBackend *blk, bool wce) { + IO_CODE(); blk->enable_write_cache = wce; } -void blk_invalidate_cache(BlockBackend *blk, Error **errp) +void blk_activate(BlockBackend *blk, Error **errp) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (!bs) { error_setg(errp, "Device '%s' has no medium", blk->name); return; } - bdrv_invalidate_cache(bs, errp); + bdrv_activate(bs, errp); } bool blk_is_inserted(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); return bs && bdrv_is_inserted(bs); } bool blk_is_available(BlockBackend *blk) { + IO_CODE(); return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); } void blk_lock_medium(BlockBackend *blk, bool locked) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); if (bs) { bdrv_lock_medium(bs, locked); @@ -1922,6 +1990,7 @@ void blk_eject(BlockBackend *blk, bool eject_flag) { BlockDriverState *bs = blk_bs(blk); char *id; + IO_CODE(); if (bs) { bdrv_eject(bs, eject_flag); @@ -1938,6 +2007,7 @@ void blk_eject(BlockBackend *blk, bool eject_flag) int blk_get_flags(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { return bdrv_get_flags(bs); @@ -1950,6 +2020,7 @@ int blk_get_flags(BlockBackend *blk) uint32_t blk_get_request_alignment(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE; } @@ -1958,6 +2029,7 @@ uint64_t blk_get_max_hw_transfer(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); uint64_t max = INT_MAX; + IO_CODE(); if (bs) { max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer); @@ -1971,6 +2043,7 @@ uint32_t blk_get_max_transfer(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); uint32_t max = INT_MAX; + IO_CODE(); if (bs) { max = MIN_NON_ZERO(max, bs->bl.max_transfer); @@ -1978,29 +2051,35 @@ uint32_t blk_get_max_transfer(BlockBackend *blk) return ROUND_DOWN(max, blk_get_request_alignment(blk)); } -int blk_get_max_iov(BlockBackend *blk) +int blk_get_max_hw_iov(BlockBackend *blk) { - return blk->root->bs->bl.max_iov; + IO_CODE(); + return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov, + blk->root->bs->bl.max_iov); } -void blk_set_guest_block_size(BlockBackend *blk, int align) +int blk_get_max_iov(BlockBackend *blk) { - blk->guest_block_size = align; + IO_CODE(); + return blk->root->bs->bl.max_iov; } void *blk_try_blockalign(BlockBackend *blk, size_t size) { + IO_CODE(); return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); } void *blk_blockalign(BlockBackend *blk, size_t size) { + IO_CODE(); return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); } bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (!bs) { return false; @@ -2012,6 +2091,7 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { bdrv_op_unblock(bs, op, reason); @@ -2021,6 +2101,7 @@ void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) void blk_op_block_all(BlockBackend *blk, Error *reason) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { bdrv_op_block_all(bs, reason); @@ -2030,6 +2111,7 @@ void blk_op_block_all(BlockBackend *blk, Error *reason) void blk_op_unblock_all(BlockBackend *blk, Error *reason) { BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); if (bs) { bdrv_op_unblock_all(bs, reason); @@ -2039,6 +2121,7 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason) AioContext *blk_get_aio_context(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); if (bs) { AioContext *ctx = bdrv_get_aio_context(blk_bs(blk)); @@ -2062,56 +2145,92 @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, int ret; if (bs) { + bdrv_ref(bs); + if (update_root_node) { - ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root, - errp); + /* + * update_root_node MUST be false for blk_root_set_aio_ctx_commit(), + * as we are already in the commit function of a transaction. + */ + ret = bdrv_try_change_aio_context(bs, new_context, blk->root, errp); if (ret < 0) { + bdrv_unref(bs); return ret; } } + /* + * Make blk->ctx consistent with the root node before we invoke any + * other operations like drain that might inquire blk->ctx + */ + blk->ctx = new_context; if (tgm->throttle_state) { bdrv_drained_begin(bs); throttle_group_detach_aio_context(tgm); throttle_group_attach_aio_context(tgm, new_context); bdrv_drained_end(bs); } + + bdrv_unref(bs); + } else { + blk->ctx = new_context; } - blk->ctx = new_context; return 0; } int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, Error **errp) { + GLOBAL_STATE_CODE(); return blk_do_set_aio_context(blk, new_context, true, errp); } -static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore, Error **errp) +typedef struct BdrvStateBlkRootContext { + AioContext *new_ctx; + BlockBackend *blk; +} BdrvStateBlkRootContext; + +static void blk_root_set_aio_ctx_commit(void *opaque) { - BlockBackend *blk = child->opaque; + BdrvStateBlkRootContext *s = opaque; + BlockBackend *blk = s->blk; - if (blk->allow_aio_context_change) { - return true; - } - - /* Only manually created BlockBackends that are not attached to anything - * can change their AioContext without updating their user. */ - if (!blk->name || blk->dev) { - /* TODO Add BB name/QOM path */ - error_setg(errp, "Cannot change iothread of active block backend"); - return false; - } - - return true; + blk_do_set_aio_context(blk, s->new_ctx, false, &error_abort); } -static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, - GSList **ignore) +static TransactionActionDrv set_blk_root_context = { + .commit = blk_root_set_aio_ctx_commit, + .clean = g_free, +}; + +static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockBackend *blk = child->opaque; - blk_do_set_aio_context(blk, ctx, false, &error_abort); + BdrvStateBlkRootContext *s; + + if (!blk->allow_aio_context_change) { + /* + * Manually created BlockBackends (those with a name) that are not + * attached to anything can change their AioContext without updating + * their user; return an error for others. + */ + if (!blk->name || blk->dev) { + /* TODO Add BB name/QOM path */ + error_setg(errp, "Cannot change iothread of active block backend"); + return false; + } + } + + s = g_new(BdrvStateBlkRootContext, 1); + *s = (BdrvStateBlkRootContext) { + .new_ctx = ctx, + .blk = blk, + }; + + tran_add(tran, &set_blk_root_context, s); + return true; } void blk_add_aio_context_notifier(BlockBackend *blk, @@ -2120,6 +2239,7 @@ void blk_add_aio_context_notifier(BlockBackend *blk, { BlockBackendAioNotifier *notifier; BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); notifier = g_new(BlockBackendAioNotifier, 1); notifier->attached_aio_context = attached_aio_context; @@ -2142,6 +2262,8 @@ void blk_remove_aio_context_notifier(BlockBackend *blk, BlockBackendAioNotifier *notifier; BlockDriverState *bs = blk_bs(blk); + GLOBAL_STATE_CODE(); + if (bs) { bdrv_remove_aio_context_notifier(bs, attached_aio_context, detach_aio_context, opaque); @@ -2162,17 +2284,20 @@ void blk_remove_aio_context_notifier(BlockBackend *blk, void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) { + GLOBAL_STATE_CODE(); notifier_list_add(&blk->remove_bs_notifiers, notify); } void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) { + GLOBAL_STATE_CODE(); notifier_list_add(&blk->insert_bs_notifiers, notify); } void blk_io_plug(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); if (bs) { bdrv_io_plug(bs); @@ -2182,6 +2307,7 @@ void blk_io_plug(BlockBackend *blk) void blk_io_unplug(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); + IO_CODE(); if (bs) { bdrv_io_unplug(bs); @@ -2190,44 +2316,52 @@ void blk_io_unplug(BlockBackend *blk) BlockAcctStats *blk_get_stats(BlockBackend *blk) { + IO_CODE(); return &blk->stats; } void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, BlockCompletionFunc *cb, void *opaque) { + IO_CODE(); return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); } int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { + IO_OR_GS_CODE(); return blk_co_pwritev(blk, offset, bytes, NULL, flags | BDRV_REQ_ZERO_WRITE); } -int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, - int count) +int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset, + int64_t bytes, const void *buf) { - return blk_prw(blk, offset, (void *) buf, count, blk_write_entry, - BDRV_REQ_WRITE_COMPRESSED); + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + IO_OR_GS_CODE(); + return blk_co_pwritev_part(blk, offset, bytes, &qiov, 0, + BDRV_REQ_WRITE_COMPRESSED); } -int blk_truncate(BlockBackend *blk, int64_t offset, bool exact, - PreallocMode prealloc, BdrvRequestFlags flags, Error **errp) +int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact, + PreallocMode prealloc, BdrvRequestFlags flags, + Error **errp) { + IO_OR_GS_CODE(); if (!blk_is_available(blk)) { error_setg(errp, "No medium inserted"); return -ENOMEDIUM; } - return bdrv_truncate(blk->root, offset, exact, prealloc, flags, errp); + return bdrv_co_truncate(blk->root, offset, exact, prealloc, flags, errp); } int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, int64_t pos, int size) { int ret; + GLOBAL_STATE_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; @@ -2247,6 +2381,7 @@ int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) { + GLOBAL_STATE_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; } @@ -2256,6 +2391,7 @@ int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) { + GLOBAL_STATE_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; } @@ -2265,6 +2401,7 @@ int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) { + GLOBAL_STATE_CODE(); if (!blk_is_available(blk)) { return -ENOMEDIUM; } @@ -2278,6 +2415,7 @@ int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) */ void blk_update_root_state(BlockBackend *blk) { + GLOBAL_STATE_CODE(); assert(blk->root); blk->root_state.open_flags = blk->root->bs->open_flags; @@ -2290,6 +2428,7 @@ void blk_update_root_state(BlockBackend *blk) */ bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->root_state.detect_zeroes; } @@ -2299,17 +2438,20 @@ bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) */ int blk_get_open_flags_from_root_state(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->root_state.open_flags; } BlockBackendRootState *blk_get_root_state(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return &blk->root_state; } int blk_commit_all(void) { BlockBackend *blk = NULL; + GLOBAL_STATE_CODE(); while ((blk = blk_all_next(blk)) != NULL) { AioContext *aio_context = blk_get_aio_context(blk); @@ -2334,6 +2476,7 @@ int blk_commit_all(void) /* throttling disk I/O limits */ void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) { + GLOBAL_STATE_CODE(); throttle_group_config(&blk->public.throttle_group_member, cfg); } @@ -2342,12 +2485,15 @@ void blk_io_limits_disable(BlockBackend *blk) BlockDriverState *bs = blk_bs(blk); ThrottleGroupMember *tgm = &blk->public.throttle_group_member; assert(tgm->throttle_state); + GLOBAL_STATE_CODE(); if (bs) { + bdrv_ref(bs); bdrv_drained_begin(bs); } throttle_group_unregister_tgm(tgm); if (bs) { bdrv_drained_end(bs); + bdrv_unref(bs); } } @@ -2355,12 +2501,14 @@ void blk_io_limits_disable(BlockBackend *blk) void blk_io_limits_enable(BlockBackend *blk, const char *group) { assert(!blk->public.throttle_group_member.throttle_state); + GLOBAL_STATE_CODE(); throttle_group_register_tgm(&blk->public.throttle_group_member, group, blk_get_aio_context(blk)); } void blk_io_limits_update_group(BlockBackend *blk, const char *group) { + GLOBAL_STATE_CODE(); /* this BB is not part of any group */ if (!blk->public.throttle_group_member.throttle_state) { return; @@ -2426,22 +2574,37 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) } } -void blk_register_buf(BlockBackend *blk, void *host, size_t size) +bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp) { - bdrv_register_buf(blk_bs(blk), host, size); + BlockDriverState *bs = blk_bs(blk); + + GLOBAL_STATE_CODE(); + + if (bs) { + return bdrv_register_buf(bs, host, size, errp); + } + return true; } -void blk_unregister_buf(BlockBackend *blk, void *host) +void blk_unregister_buf(BlockBackend *blk, void *host, size_t size) { - bdrv_unregister_buf(blk_bs(blk), host); + BlockDriverState *bs = blk_bs(blk); + + GLOBAL_STATE_CODE(); + + if (bs) { + bdrv_unregister_buf(bs, host, size); + } } int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, BlockBackend *blk_out, int64_t off_out, - int bytes, BdrvRequestFlags read_flags, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { int r; + IO_CODE(); + r = blk_check_byte_request(blk_in, off_in, bytes); if (r) { return r; @@ -2457,11 +2620,13 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, const BdrvChild *blk_root(BlockBackend *blk) { + GLOBAL_STATE_CODE(); return blk->root; } int blk_make_empty(BlockBackend *blk, Error **errp) { + GLOBAL_STATE_CODE(); if (!blk_is_available(blk)) { error_setg(errp, "No medium inserted"); return -ENOMEDIUM; diff --git a/block/block-copy.c b/block/block-copy.c index 0becad52da..bb947afdda 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -17,16 +17,20 @@ #include "trace.h" #include "qapi/error.h" #include "block/block-copy.h" +#include "block/reqlist.h" #include "sysemu/block-backend.h" #include "qemu/units.h" #include "qemu/coroutine.h" #include "block/aio_task.h" +#include "qemu/error-report.h" +#include "qemu/memalign.h" #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) #define BLOCK_COPY_MAX_BUFFER (1 * MiB) #define BLOCK_COPY_MAX_MEM (128 * MiB) #define BLOCK_COPY_MAX_WORKERS 64 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ +#define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16) typedef enum { COPY_READ_WRITE_CLUSTER, @@ -81,7 +85,6 @@ typedef struct BlockCopyTask { */ BlockCopyState *s; BlockCopyCallState *call_state; - int64_t offset; /* * @method can also be set again in the while loop of * block_copy_dirty_clusters(), but it is never accessed concurrently @@ -92,21 +95,17 @@ typedef struct BlockCopyTask { BlockCopyMethod method; /* - * Fields whose state changes throughout the execution - * Protected by lock in BlockCopyState. + * Generally, req is protected by lock in BlockCopyState, Still req.offset + * is only set on task creation, so may be read concurrently after creation. + * req.bytes is changed at most once, and need only protecting the case of + * parallel read while updating @bytes value in block_copy_task_shrink(). */ - CoQueue wait_queue; /* coroutines blocked on this task */ - /* - * Only protect the case of parallel read while updating @bytes - * value in block_copy_task_shrink(). - */ - int64_t bytes; - QLIST_ENTRY(BlockCopyTask) list; + BlockReq req; } BlockCopyTask; static int64_t task_end(BlockCopyTask *task) { - return task->offset + task->bytes; + return task->req.offset + task->req.bytes; } typedef struct BlockCopyState { @@ -134,7 +133,7 @@ typedef struct BlockCopyState { CoMutex lock; int64_t in_flight_bytes; BlockCopyMethod method; - QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */ + BlockReqList reqs; QLIST_HEAD(, BlockCopyCallState) calls; /* * skip_unallocated: @@ -158,42 +157,6 @@ typedef struct BlockCopyState { RateLimit rate_limit; } BlockCopyState; -/* Called with lock held */ -static BlockCopyTask *find_conflicting_task(BlockCopyState *s, - int64_t offset, int64_t bytes) -{ - BlockCopyTask *t; - - QLIST_FOREACH(t, &s->tasks, list) { - if (offset + bytes > t->offset && offset < t->offset + t->bytes) { - return t; - } - } - - return NULL; -} - -/* - * If there are no intersecting tasks return false. Otherwise, wait for the - * first found intersecting tasks to finish and return true. - * - * Called with lock held. May temporary release the lock. - * Return value of 0 proves that lock was NOT released. - */ -static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset, - int64_t bytes) -{ - BlockCopyTask *task = find_conflicting_task(s, offset, bytes); - - if (!task) { - return false; - } - - qemu_co_queue_wait(&task->wait_queue, &s->lock); - - return true; -} - /* Called with lock held */ static int64_t block_copy_chunk_size(BlockCopyState *s) { @@ -237,7 +200,7 @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state, bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); /* region is dirty, so no existent tasks possible in it */ - assert(!find_conflicting_task(s, offset, bytes)); + assert(!reqlist_find_conflict(&s->reqs, offset, bytes)); bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); s->in_flight_bytes += bytes; @@ -247,12 +210,9 @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state, .task.func = block_copy_task_entry, .s = s, .call_state = call_state, - .offset = offset, - .bytes = bytes, .method = s->method, }; - qemu_co_queue_init(&task->wait_queue); - QLIST_INSERT_HEAD(&s->tasks, task, list); + reqlist_init_req(&s->reqs, &task->req, offset, bytes); return task; } @@ -268,32 +228,34 @@ static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, int64_t new_bytes) { QEMU_LOCK_GUARD(&task->s->lock); - if (new_bytes == task->bytes) { + if (new_bytes == task->req.bytes) { return; } - assert(new_bytes > 0 && new_bytes < task->bytes); + assert(new_bytes > 0 && new_bytes < task->req.bytes); - task->s->in_flight_bytes -= task->bytes - new_bytes; + task->s->in_flight_bytes -= task->req.bytes - new_bytes; bdrv_set_dirty_bitmap(task->s->copy_bitmap, - task->offset + new_bytes, task->bytes - new_bytes); + task->req.offset + new_bytes, + task->req.bytes - new_bytes); - task->bytes = new_bytes; - qemu_co_queue_restart_all(&task->wait_queue); + reqlist_shrink_req(&task->req, new_bytes); } static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) { QEMU_LOCK_GUARD(&task->s->lock); - task->s->in_flight_bytes -= task->bytes; + task->s->in_flight_bytes -= task->req.bytes; if (ret < 0) { - bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes); + bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset, + task->req.bytes); } - QLIST_REMOVE(task, list); - progress_set_remaining(task->s->progress, - bdrv_get_dirty_count(task->s->copy_bitmap) + - task->s->in_flight_bytes); - qemu_co_queue_restart_all(&task->wait_queue); + if (task->s->progress) { + progress_set_remaining(task->s->progress, + bdrv_get_dirty_count(task->s->copy_bitmap) + + task->s->in_flight_bytes); + } + reqlist_remove_req(&task->req); } void block_copy_state_free(BlockCopyState *s) @@ -315,35 +277,14 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) target->bs->bl.max_transfer)); } -BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, - int64_t cluster_size, bool use_copy_range, - BdrvRequestFlags write_flags, Error **errp) +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, + bool compress) { - BlockCopyState *s; - BdrvDirtyBitmap *copy_bitmap; + /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */ + s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) | + (compress ? BDRV_REQ_WRITE_COMPRESSED : 0); - copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, - errp); - if (!copy_bitmap) { - return NULL; - } - bdrv_disable_dirty_bitmap(copy_bitmap); - - s = g_new(BlockCopyState, 1); - *s = (BlockCopyState) { - .source = source, - .target = target, - .copy_bitmap = copy_bitmap, - .cluster_size = cluster_size, - .len = bdrv_dirty_bitmap_size(copy_bitmap), - .write_flags = write_flags, - .mem = shres_create(BLOCK_COPY_MAX_MEM), - .max_transfer = QEMU_ALIGN_DOWN( - block_copy_max_transfer(source, target), - cluster_size), - }; - - if (s->max_transfer < cluster_size) { + if (s->max_transfer < s->cluster_size) { /* * copy_range does not respect max_transfer. We don't want to bother * with requests smaller than block-copy cluster size, so fallback to @@ -351,7 +292,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, * behalf). */ s->method = COPY_READ_WRITE_CLUSTER; - } else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) { + } else if (compress) { /* Compression supports only cluster-size writes and no copy-range. */ s->method = COPY_READ_WRITE_CLUSTER; } else { @@ -361,10 +302,113 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, */ s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE; } +} + +static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, + Error **errp) +{ + int ret; + BlockDriverInfo bdi; + bool target_does_cow = bdrv_backing_chain_next(target); + + /* + * If there is no backing file on the target, we cannot rely on COW if our + * backup cluster size is smaller than the target cluster size. Even for + * targets with a backing file, try to avoid COW if possible. + */ + ret = bdrv_get_info(target, &bdi); + if (ret == -ENOTSUP && !target_does_cow) { + /* Cluster size is not defined */ + warn_report("The target block device doesn't provide " + "information about the block size and it doesn't have a " + "backing file. The default block size of %u bytes is " + "used. If the actual block size of the target exceeds " + "this default, the backup may be unusable", + BLOCK_COPY_CLUSTER_SIZE_DEFAULT); + return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; + } else if (ret < 0 && !target_does_cow) { + error_setg_errno(errp, -ret, + "Couldn't determine the cluster size of the target image, " + "which has no backing file"); + error_append_hint(errp, + "Aborting, since this may create an unusable destination image\n"); + return ret; + } else if (ret < 0 && target_does_cow) { + /* Not fatal; just trudge on ahead. */ + return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; + } + + return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); +} + +BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, + const BdrvDirtyBitmap *bitmap, + Error **errp) +{ + ERRP_GUARD(); + BlockCopyState *s; + int64_t cluster_size; + BdrvDirtyBitmap *copy_bitmap; + bool is_fleecing; + + cluster_size = block_copy_calculate_cluster_size(target->bs, errp); + if (cluster_size < 0) { + return NULL; + } + + copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, + errp); + if (!copy_bitmap) { + return NULL; + } + bdrv_disable_dirty_bitmap(copy_bitmap); + if (bitmap) { + if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) { + error_prepend(errp, "Failed to merge bitmap '%s' to internal " + "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap)); + bdrv_release_dirty_bitmap(copy_bitmap); + return NULL; + } + } else { + bdrv_set_dirty_bitmap(copy_bitmap, 0, + bdrv_dirty_bitmap_size(copy_bitmap)); + } + + /* + * If source is in backing chain of target assume that target is going to be + * used for "image fleecing", i.e. it should represent a kind of snapshot of + * source at backup-start point in time. And target is going to be read by + * somebody (for example, used as NBD export) during backup job. + * + * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid + * intersection of backup writes and third party reads from target, + * otherwise reading from target we may occasionally read already updated by + * guest data. + * + * For more information see commit f8d59dfb40bb and test + * tests/qemu-iotests/222 + */ + is_fleecing = bdrv_chain_contains(target->bs, source->bs); + + s = g_new(BlockCopyState, 1); + *s = (BlockCopyState) { + .source = source, + .target = target, + .copy_bitmap = copy_bitmap, + .cluster_size = cluster_size, + .len = bdrv_dirty_bitmap_size(copy_bitmap), + .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0), + .mem = shres_create(BLOCK_COPY_MAX_MEM), + .max_transfer = QEMU_ALIGN_DOWN( + block_copy_max_transfer(source, target), + cluster_size), + }; + + block_copy_set_copy_opts(s, false, false); ratelimit_init(&s->rate_limit); qemu_co_mutex_init(&s->lock); - QLIST_INIT(&s->tasks); + QLIST_INIT(&s->reqs); QLIST_INIT(&s->calls); return s; @@ -397,7 +441,7 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool, aio_task_pool_wait_slot(pool); if (aio_task_pool_status(pool) < 0) { - co_put_to_shres(task->s->mem, task->bytes); + co_put_to_shres(task->s->mem, task->req.bytes); block_copy_task_end(task, -ECANCELED); g_free(task); return -ECANCELED; @@ -510,7 +554,8 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) BlockCopyMethod method = t->method; int ret; - ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read); + ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, + &error_is_read); WITH_QEMU_LOCK_GUARD(&s->lock) { if (s->method == t->method) { @@ -522,11 +567,11 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) t->call_state->ret = ret; t->call_state->error_is_read = error_is_read; } - } else { - progress_work_done(s->progress, t->bytes); + } else if (s->progress) { + progress_work_done(s->progress, t->req.bytes); } } - co_put_to_shres(s->mem, t->bytes); + co_put_to_shres(s->mem, t->req.bytes); block_copy_task_end(t, ret); return ret; @@ -606,6 +651,18 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, } } +void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes) +{ + QEMU_LOCK_GUARD(&s->lock); + + bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); + if (s->progress) { + progress_set_remaining(s->progress, + bdrv_get_dirty_count(s->copy_bitmap) + + s->in_flight_bytes); + } +} + /* * Reset bits in copy_bitmap starting at offset if they represent unallocated * data in the image. May reset subsequent contiguous bits. @@ -626,12 +683,7 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, bytes = clusters * s->cluster_size; if (!ret) { - qemu_co_mutex_lock(&s->lock); - bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); - progress_set_remaining(s->progress, - bdrv_get_dirty_count(s->copy_bitmap) + - s->in_flight_bytes); - qemu_co_mutex_unlock(&s->lock); + block_copy_reset(s, offset, bytes); } *count = bytes; @@ -678,22 +730,22 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state) trace_block_copy_skip_range(s, offset, bytes); break; } - if (task->offset > offset) { - trace_block_copy_skip_range(s, offset, task->offset - offset); + if (task->req.offset > offset) { + trace_block_copy_skip_range(s, offset, task->req.offset - offset); } found_dirty = true; - ret = block_copy_block_status(s, task->offset, task->bytes, + ret = block_copy_block_status(s, task->req.offset, task->req.bytes, &status_bytes); assert(ret >= 0); /* never fail */ - if (status_bytes < task->bytes) { + if (status_bytes < task->req.bytes) { block_copy_task_shrink(task, status_bytes); } if (qatomic_read(&s->skip_unallocated) && !(ret & BDRV_BLOCK_ALLOCATED)) { block_copy_task_end(task, 0); - trace_block_copy_skip_range(s, task->offset, task->bytes); + trace_block_copy_skip_range(s, task->req.offset, task->req.bytes); offset = task_end(task); bytes = end - offset; g_free(task); @@ -714,11 +766,11 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state) } } - ratelimit_calculate_delay(&s->rate_limit, task->bytes); + ratelimit_calculate_delay(&s->rate_limit, task->req.bytes); - trace_block_copy_process(s, task->offset); + trace_block_copy_process(s, task->req.offset); - co_get_from_shres(s->mem, task->bytes); + co_get_from_shres(s->mem, task->req.bytes); offset = task_end(task); bytes = end - offset; @@ -786,8 +838,8 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) * Check that there is no task we still need to * wait to complete */ - ret = block_copy_wait_one(s, call_state->offset, - call_state->bytes); + ret = reqlist_wait_one(&s->reqs, call_state->offset, + call_state->bytes, &s->lock); if (ret == 0) { /* * No pending tasks, but check again the bitmap in this @@ -795,7 +847,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) * between this and the critical section in * block_copy_dirty_clusters(). * - * block_copy_wait_one return value 0 also means that it + * reqlist_wait_one return value 0 also means that it * didn't release the lock. So, we are still in the same * critical section, not interrupted by any concurrent * access to state. @@ -831,23 +883,42 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) return ret; } -int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, - bool ignore_ratelimit) +static void coroutine_fn block_copy_async_co_entry(void *opaque) { - BlockCopyCallState call_state = { + block_copy_common(opaque); +} + +int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, + bool ignore_ratelimit, uint64_t timeout_ns, + BlockCopyAsyncCallbackFunc cb, + void *cb_opaque) +{ + int ret; + BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); + + *call_state = (BlockCopyCallState) { .s = s, .offset = start, .bytes = bytes, .ignore_ratelimit = ignore_ratelimit, .max_workers = BLOCK_COPY_MAX_WORKERS, + .cb = cb, + .cb_opaque = cb_opaque, }; - return block_copy_common(&call_state); -} + ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns, + g_free); + if (ret < 0) { + assert(ret == -ETIMEDOUT); + block_copy_call_cancel(call_state); + /* call_state will be freed by running coroutine. */ + return ret; + } -static void coroutine_fn block_copy_async_co_entry(void *opaque) -{ - block_copy_common(opaque); + ret = call_state->ret; + g_free(call_state); + + return ret; } BlockCopyCallState *block_copy_async(BlockCopyState *s, @@ -933,6 +1004,11 @@ BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) return s->copy_bitmap; } +int64_t block_copy_cluster_size(BlockCopyState *s) +{ + return s->cluster_size; +} + void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) { qatomic_set(&s->skip_unallocated, skip); diff --git a/block/block-ram-registrar.c b/block/block-ram-registrar.c new file mode 100644 index 0000000000..25dbafa789 --- /dev/null +++ b/block/block-ram-registrar.c @@ -0,0 +1,58 @@ +/* + * BlockBackend RAM Registrar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "sysemu/block-backend.h" +#include "sysemu/block-ram-registrar.h" +#include "qapi/error.h" + +static void ram_block_added(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) +{ + BlockRAMRegistrar *r = container_of(n, BlockRAMRegistrar, notifier); + Error *err = NULL; + + if (!r->ok) { + return; /* don't try again if we've already failed */ + } + + if (!blk_register_buf(r->blk, host, max_size, &err)) { + error_report_err(err); + ram_block_notifier_remove(&r->notifier); + r->ok = false; + } +} + +static void ram_block_removed(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) +{ + BlockRAMRegistrar *r = container_of(n, BlockRAMRegistrar, notifier); + blk_unregister_buf(r->blk, host, max_size); +} + +void blk_ram_registrar_init(BlockRAMRegistrar *r, BlockBackend *blk) +{ + r->blk = blk; + r->notifier = (RAMBlockNotifier){ + .ram_block_added = ram_block_added, + .ram_block_removed = ram_block_removed, + + /* + * .ram_block_resized() is not necessary because we use the max_size + * value that does not change across resize. + */ + }; + r->ok = true; + + ram_block_notifier_add(&r->notifier); +} + +void blk_ram_registrar_destroy(BlockRAMRegistrar *r) +{ + if (r->ok) { + ram_block_notifier_remove(&r->notifier); + } +} diff --git a/block/bochs.c b/block/bochs.c index 2f010ab40a..e30e3908d9 100644 --- a/block/bochs.c +++ b/block/bochs.c @@ -110,13 +110,12 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } - ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs)); + ret = bdrv_pread(bs->file, 0, sizeof(bochs), &bochs, 0); if (ret < 0) { return ret; } @@ -150,8 +149,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags, return -ENOMEM; } - ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap, - s->catalog_size * 4); + ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_size * 4, + s->catalog_bitmap, 0); if (ret < 0) { goto fail; } @@ -224,8 +223,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) (s->extent_blocks + s->bitmap_blocks)); /* read in bitmap for current extent */ - ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), - &bitmap_entry, 1); + ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), 1, + &bitmap_entry, 0); if (ret < 0) { return ret; } @@ -238,8 +237,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) } static int coroutine_fn -bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +bochs_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVBochsState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/cloop.c b/block/cloop.c index c99192a57f..3ff975a94d 100644 --- a/block/cloop.c +++ b/block/cloop.c @@ -71,14 +71,13 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } /* read header */ - ret = bdrv_pread(bs->file, 128, &s->block_size, 4); + ret = bdrv_pread(bs->file, 128, 4, &s->block_size, 0); if (ret < 0) { return ret; } @@ -104,7 +103,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags, return -EINVAL; } - ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4); + ret = bdrv_pread(bs->file, 128 + 4, 4, &s->n_blocks, 0); if (ret < 0) { return ret; } @@ -135,7 +134,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags, return -ENOMEM; } - ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size); + ret = bdrv_pread(bs->file, 128 + 4 + 4, offsets_size, s->offsets, 0); if (ret < 0) { goto fail; } @@ -220,9 +219,9 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num) int ret; uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num]; - ret = bdrv_pread(bs->file, s->offsets[block_num], - s->compressed_block, bytes); - if (ret != bytes) { + ret = bdrv_pread(bs->file, s->offsets[block_num], bytes, + s->compressed_block, 0); + if (ret < 0) { return -1; } @@ -245,8 +244,8 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num) } static int coroutine_fn -cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVCloopState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/commit.c b/block/commit.c index 42792b4556..0029b31944 100644 --- a/block/commit.c +++ b/block/commit.c @@ -20,6 +20,7 @@ #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qemu/ratelimit.h" +#include "qemu/memalign.h" #include "sysemu/block-backend.h" enum { @@ -134,7 +135,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp) } if (base_len < len) { - ret = blk_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL); + ret = blk_co_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL); if (ret) { return ret; } @@ -207,7 +208,7 @@ static const BlockJobDriver commit_job_driver = { }; static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); } @@ -237,6 +238,7 @@ static BlockDriver bdrv_commit_top = { .bdrv_child_perm = bdrv_commit_top_child_perm, .is_filter = true, + .filtered_child_is_backing = true, }; void commit_start(const char *job_id, BlockDriverState *bs, @@ -253,6 +255,8 @@ void commit_start(const char *job_id, BlockDriverState *bs, uint64_t base_perms, iter_shared_perms; int ret; + GLOBAL_STATE_CODE(); + assert(top != bs); if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) { error_setg(errp, "Invalid files for merge: top and base are the same"); @@ -370,7 +374,6 @@ void commit_start(const char *job_id, BlockDriverState *bs, s->base = blk_new(s->common.job.aio_context, base_perms, BLK_PERM_CONSISTENT_READ - | BLK_PERM_GRAPH_MOD | BLK_PERM_WRITE_UNCHANGED); ret = blk_insert_bs(s->base, base, errp); if (ret < 0) { @@ -433,6 +436,8 @@ int bdrv_commit(BlockDriverState *bs) QEMU_AUTO_VFREE uint8_t *buf = NULL; Error *local_err = NULL; + GLOBAL_STATE_CODE(); + if (!drv) return -ENOMEDIUM; @@ -523,12 +528,12 @@ int bdrv_commit(BlockDriverState *bs) goto ro_cleanup; } if (ret) { - ret = blk_pread(src, offset, buf, n); + ret = blk_pread(src, offset, n, buf, 0); if (ret < 0) { goto ro_cleanup; } - ret = blk_pwrite(backing, offset, buf, n, 0); + ret = blk_pwrite(backing, offset, n, buf, 0); if (ret < 0) { goto ro_cleanup; } diff --git a/block/copy-before-write.c b/block/copy-before-write.c new file mode 100644 index 0000000000..4abaa7339e --- /dev/null +++ b/block/copy-before-write.c @@ -0,0 +1,564 @@ +/* + * copy-before-write filter driver + * + * The driver performs Copy-Before-Write (CBW) operation: it is injected above + * some node, and before each write it copies _old_ data to the target node. + * + * Copyright (c) 2018-2021 Virtuozzo International GmbH. + * + * Author: + * Sementsov-Ogievskiy Vladimir + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/qmp/qjson.h" + +#include "sysemu/block-backend.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "block/block_int.h" +#include "block/qdict.h" +#include "block/block-copy.h" + +#include "block/copy-before-write.h" +#include "block/reqlist.h" + +#include "qapi/qapi-visit-block-core.h" + +typedef struct BDRVCopyBeforeWriteState { + BlockCopyState *bcs; + BdrvChild *target; + OnCbwError on_cbw_error; + uint32_t cbw_timeout_ns; + + /* + * @lock: protects access to @access_bitmap, @done_bitmap and + * @frozen_read_reqs + */ + CoMutex lock; + + /* + * @access_bitmap: represents areas allowed for reading by fleecing user. + * Reading from non-dirty areas leads to -EACCES. + */ + BdrvDirtyBitmap *access_bitmap; + + /* + * @done_bitmap: represents areas that was successfully copied to @target by + * copy-before-write operations. + */ + BdrvDirtyBitmap *done_bitmap; + + /* + * @frozen_read_reqs: current read requests for fleecing user in bs->file + * node. These areas must not be rewritten by guest. + */ + BlockReqList frozen_read_reqs; + + /* + * @snapshot_error is normally zero. But on first copy-before-write failure + * when @on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT, @snapshot_error takes + * value of this error (<0). After that all in-flight and further + * snapshot-API requests will fail with that error. + */ + int snapshot_error; +} BDRVCopyBeforeWriteState; + +static coroutine_fn int cbw_co_preadv( + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) +{ + return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); +} + +static void block_copy_cb(void *opaque) +{ + BlockDriverState *bs = opaque; + + bdrv_dec_in_flight(bs); +} + +/* + * Do copy-before-write operation. + * + * On failure guest request must be failed too. + * + * On success, we also wait for all in-flight fleecing read requests in source + * node, and it's guaranteed that after cbw_do_copy_before_write() successful + * return there are no such requests and they will never appear. + */ +static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs, + uint64_t offset, uint64_t bytes, BdrvRequestFlags flags) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + int ret; + uint64_t off, end; + int64_t cluster_size = block_copy_cluster_size(s->bcs); + + if (flags & BDRV_REQ_WRITE_UNCHANGED) { + return 0; + } + + if (s->snapshot_error) { + return 0; + } + + off = QEMU_ALIGN_DOWN(offset, cluster_size); + end = QEMU_ALIGN_UP(offset + bytes, cluster_size); + + /* + * Increase in_flight, so that in case of timed-out block-copy, the + * remaining background block_copy() request (which can't be immediately + * cancelled by timeout) is presented in bs->in_flight. This way we are + * sure that on bs close() we'll previously wait for all timed-out but yet + * running block_copy calls. + */ + bdrv_inc_in_flight(bs); + ret = block_copy(s->bcs, off, end - off, true, s->cbw_timeout_ns, + block_copy_cb, bs); + if (ret < 0 && s->on_cbw_error == ON_CBW_ERROR_BREAK_GUEST_WRITE) { + return ret; + } + + WITH_QEMU_LOCK_GUARD(&s->lock) { + if (ret < 0) { + assert(s->on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT); + if (!s->snapshot_error) { + s->snapshot_error = ret; + } + } else { + bdrv_set_dirty_bitmap(s->done_bitmap, off, end - off); + } + reqlist_wait_all(&s->frozen_read_reqs, off, end - off, &s->lock); + } + + return 0; +} + +static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs, + int64_t offset, int64_t bytes) +{ + int ret = cbw_do_copy_before_write(bs, offset, bytes, 0); + if (ret < 0) { + return ret; + } + + return bdrv_co_pdiscard(bs->file, offset, bytes); +} + +static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) +{ + int ret = cbw_do_copy_before_write(bs, offset, bytes, flags); + if (ret < 0) { + return ret; + } + + return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); +} + +static coroutine_fn int cbw_co_pwritev(BlockDriverState *bs, + int64_t offset, + int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + int ret = cbw_do_copy_before_write(bs, offset, bytes, flags); + if (ret < 0) { + return ret; + } + + return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); +} + +static int coroutine_fn cbw_co_flush(BlockDriverState *bs) +{ + if (!bs->file) { + return 0; + } + + return bdrv_co_flush(bs->file->bs); +} + +/* + * If @offset not accessible - return NULL. + * + * Otherwise, set @pnum to some bytes that accessible from @file (@file is set + * to bs->file or to s->target). Return newly allocated BlockReq object that + * should be than passed to cbw_snapshot_read_unlock(). + * + * It's guaranteed that guest writes will not interact in the region until + * cbw_snapshot_read_unlock() called. + */ +static coroutine_fn BlockReq * +cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes, + int64_t *pnum, BdrvChild **file) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + BlockReq *req = g_new(BlockReq, 1); + bool done; + + QEMU_LOCK_GUARD(&s->lock); + + if (s->snapshot_error) { + g_free(req); + return NULL; + } + + if (bdrv_dirty_bitmap_next_zero(s->access_bitmap, offset, bytes) != -1) { + g_free(req); + return NULL; + } + + done = bdrv_dirty_bitmap_status(s->done_bitmap, offset, bytes, pnum); + if (done) { + /* + * Special invalid BlockReq, that is handled in + * cbw_snapshot_read_unlock(). We don't need to lock something to read + * from s->target. + */ + *req = (BlockReq) {.offset = -1, .bytes = -1}; + *file = s->target; + } else { + reqlist_init_req(&s->frozen_read_reqs, req, offset, bytes); + *file = bs->file; + } + + return req; +} + +static coroutine_fn void +cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + + if (req->offset == -1 && req->bytes == -1) { + g_free(req); + return; + } + + QEMU_LOCK_GUARD(&s->lock); + + reqlist_remove_req(req); + g_free(req); +} + +static coroutine_fn int +cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset) +{ + BlockReq *req; + BdrvChild *file; + int ret; + + /* TODO: upgrade to async loop using AioTask */ + while (bytes) { + int64_t cur_bytes; + + req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &file); + if (!req) { + return -EACCES; + } + + ret = bdrv_co_preadv_part(file, offset, cur_bytes, + qiov, qiov_offset, 0); + cbw_snapshot_read_unlock(bs, req); + if (ret < 0) { + return ret; + } + + bytes -= cur_bytes; + offset += cur_bytes; + qiov_offset += cur_bytes; + } + + return 0; +} + +static int coroutine_fn +cbw_co_snapshot_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + BlockReq *req; + int ret; + int64_t cur_bytes; + BdrvChild *child; + + req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &child); + if (!req) { + return -EACCES; + } + + ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file); + if (child == s->target) { + /* + * We refer to s->target only for areas that we've written to it. + * And we can not report unallocated blocks in s->target: this will + * break generic block-status-above logic, that will go to + * copy-before-write filtered child in this case. + */ + assert(ret & BDRV_BLOCK_ALLOCATED); + } + + cbw_snapshot_read_unlock(bs, req); + + return ret; +} + +static int coroutine_fn cbw_co_pdiscard_snapshot(BlockDriverState *bs, + int64_t offset, int64_t bytes) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + + WITH_QEMU_LOCK_GUARD(&s->lock) { + bdrv_reset_dirty_bitmap(s->access_bitmap, offset, bytes); + } + + block_copy_reset(s->bcs, offset, bytes); + + return bdrv_co_pdiscard(s->target, offset, bytes); +} + +static void cbw_refresh_filename(BlockDriverState *bs) +{ + pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), + bs->file->bs->filename); +} + +static void cbw_child_perm(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, + BlockReopenQueue *reopen_queue, + uint64_t perm, uint64_t shared, + uint64_t *nperm, uint64_t *nshared) +{ + if (!(role & BDRV_CHILD_FILTERED)) { + /* + * Target child + * + * Share write to target (child_file), to not interfere + * with guest writes to its disk which may be in target backing chain. + * Can't resize during a backup block job because we check the size + * only upfront. + */ + *nshared = BLK_PERM_ALL & ~BLK_PERM_RESIZE; + *nperm = BLK_PERM_WRITE; + } else { + /* Source child */ + bdrv_default_perms(bs, c, role, reopen_queue, + perm, shared, nperm, nshared); + + if (!QLIST_EMPTY(&bs->parents)) { + if (perm & BLK_PERM_WRITE) { + *nperm = *nperm | BLK_PERM_CONSISTENT_READ; + } + *nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE); + } + } +} + +static BlockdevOptions *cbw_parse_options(QDict *options, Error **errp) +{ + BlockdevOptions *opts = NULL; + Visitor *v = NULL; + + qdict_put_str(options, "driver", "copy-before-write"); + + v = qobject_input_visitor_new_flat_confused(options, errp); + if (!v) { + goto out; + } + + visit_type_BlockdevOptions(v, NULL, &opts, errp); + if (!opts) { + goto out; + } + + /* + * Delete options which we are going to parse through BlockdevOptions + * object for original options. + */ + qdict_extract_subqdict(options, NULL, "bitmap"); + qdict_del(options, "on-cbw-error"); + qdict_del(options, "cbw-timeout"); + +out: + visit_free(v); + qdict_del(options, "driver"); + + return opts; +} + +static int cbw_open(BlockDriverState *bs, QDict *options, int flags, + Error **errp) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + BdrvDirtyBitmap *bitmap = NULL; + int64_t cluster_size; + g_autoptr(BlockdevOptions) full_opts = NULL; + BlockdevOptionsCbw *opts; + int ret; + + full_opts = cbw_parse_options(options, errp); + if (!full_opts) { + return -EINVAL; + } + assert(full_opts->driver == BLOCKDEV_DRIVER_COPY_BEFORE_WRITE); + opts = &full_opts->u.copy_before_write; + + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; + } + + s->target = bdrv_open_child(NULL, options, "target", bs, &child_of_bds, + BDRV_CHILD_DATA, false, errp); + if (!s->target) { + return -EINVAL; + } + + if (opts->has_bitmap) { + bitmap = block_dirty_bitmap_lookup(opts->bitmap->node, + opts->bitmap->name, NULL, errp); + if (!bitmap) { + return -EINVAL; + } + } + s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error : + ON_CBW_ERROR_BREAK_GUEST_WRITE; + s->cbw_timeout_ns = opts->has_cbw_timeout ? + opts->cbw_timeout * NANOSECONDS_PER_SECOND : 0; + + bs->total_sectors = bs->file->bs->total_sectors; + bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | + (BDRV_REQ_FUA & bs->file->bs->supported_write_flags); + bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | + ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) & + bs->file->bs->supported_zero_flags); + + s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp); + if (!s->bcs) { + error_prepend(errp, "Cannot create block-copy-state: "); + return -EINVAL; + } + + cluster_size = block_copy_cluster_size(s->bcs); + + s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); + if (!s->done_bitmap) { + return -EINVAL; + } + bdrv_disable_dirty_bitmap(s->done_bitmap); + + /* s->access_bitmap starts equal to bcs bitmap */ + s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); + if (!s->access_bitmap) { + return -EINVAL; + } + bdrv_disable_dirty_bitmap(s->access_bitmap); + bdrv_dirty_bitmap_merge_internal(s->access_bitmap, + block_copy_dirty_bitmap(s->bcs), NULL, + true); + + qemu_co_mutex_init(&s->lock); + QLIST_INIT(&s->frozen_read_reqs); + + return 0; +} + +static void cbw_close(BlockDriverState *bs) +{ + BDRVCopyBeforeWriteState *s = bs->opaque; + + bdrv_release_dirty_bitmap(s->access_bitmap); + bdrv_release_dirty_bitmap(s->done_bitmap); + + block_copy_state_free(s->bcs); + s->bcs = NULL; +} + +BlockDriver bdrv_cbw_filter = { + .format_name = "copy-before-write", + .instance_size = sizeof(BDRVCopyBeforeWriteState), + + .bdrv_open = cbw_open, + .bdrv_close = cbw_close, + + .bdrv_co_preadv = cbw_co_preadv, + .bdrv_co_pwritev = cbw_co_pwritev, + .bdrv_co_pwrite_zeroes = cbw_co_pwrite_zeroes, + .bdrv_co_pdiscard = cbw_co_pdiscard, + .bdrv_co_flush = cbw_co_flush, + + .bdrv_co_preadv_snapshot = cbw_co_preadv_snapshot, + .bdrv_co_pdiscard_snapshot = cbw_co_pdiscard_snapshot, + .bdrv_co_snapshot_block_status = cbw_co_snapshot_block_status, + + .bdrv_refresh_filename = cbw_refresh_filename, + + .bdrv_child_perm = cbw_child_perm, + + .is_filter = true, +}; + +BlockDriverState *bdrv_cbw_append(BlockDriverState *source, + BlockDriverState *target, + const char *filter_node_name, + BlockCopyState **bcs, + Error **errp) +{ + ERRP_GUARD(); + BDRVCopyBeforeWriteState *state; + BlockDriverState *top; + QDict *opts; + + assert(source->total_sectors == target->total_sectors); + GLOBAL_STATE_CODE(); + + opts = qdict_new(); + qdict_put_str(opts, "driver", "copy-before-write"); + if (filter_node_name) { + qdict_put_str(opts, "node-name", filter_node_name); + } + qdict_put_str(opts, "file", bdrv_get_node_name(source)); + qdict_put_str(opts, "target", bdrv_get_node_name(target)); + + top = bdrv_insert_node(source, opts, BDRV_O_RDWR, errp); + if (!top) { + return NULL; + } + + state = top->opaque; + *bcs = state->bcs; + + return top; +} + +void bdrv_cbw_drop(BlockDriverState *bs) +{ + GLOBAL_STATE_CODE(); + bdrv_drop_filter(bs, &error_abort); + bdrv_unref(bs); +} + +static void cbw_init(void) +{ + bdrv_register(&bdrv_cbw_filter); +} + +block_init(cbw_init); diff --git a/block/backup-top.h b/block/copy-before-write.h similarity index 56% rename from block/backup-top.h rename to block/copy-before-write.h index b28b0031c4..6e72bb25e9 100644 --- a/block/backup-top.h +++ b/block/copy-before-write.h @@ -1,10 +1,10 @@ /* - * backup-top filter driver + * copy-before-write filter driver * * The driver performs Copy-Before-Write (CBW) operation: it is injected above * some node, and before each write it copies _old_ data to the target node. * - * Copyright (c) 2018-2019 Virtuozzo International GmbH. + * Copyright (c) 2018-2021 Virtuozzo International GmbH. * * Author: * Sementsov-Ogievskiy Vladimir @@ -23,20 +23,24 @@ * along with this program. If not, see . */ -#ifndef BACKUP_TOP_H -#define BACKUP_TOP_H +#ifndef COPY_BEFORE_WRITE_H +#define COPY_BEFORE_WRITE_H #include "block/block_int.h" #include "block/block-copy.h" -BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, - BlockDriverState *target, - const char *filter_node_name, - uint64_t cluster_size, - BackupPerf *perf, - BdrvRequestFlags write_flags, - BlockCopyState **bcs, - Error **errp); -void bdrv_backup_top_drop(BlockDriverState *bs); +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ -#endif /* BACKUP_TOP_H */ +BlockDriverState *bdrv_cbw_append(BlockDriverState *source, + BlockDriverState *target, + const char *filter_node_name, + BlockCopyState **bcs, + Error **errp); +void bdrv_cbw_drop(BlockDriverState *bs); + +#endif /* COPY_BEFORE_WRITE_H */ diff --git a/block/copy-on-read.c b/block/copy-on-read.c index c428682272..815ac1d835 100644 --- a/block/copy-on-read.c +++ b/block/copy-on-read.c @@ -41,12 +41,11 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags, BDRVStateCOR *state = bs->opaque; /* Find a bottom node name, if any */ const char *bottom_node = qdict_get_try_str(options, "bottom"); + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_read_flags = BDRV_REQ_PREFETCH; @@ -128,10 +127,10 @@ static int64_t cor_getlength(BlockDriverState *bs) static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { int64_t n; int local_flags; @@ -181,10 +180,11 @@ static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs, static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset, flags); @@ -192,7 +192,7 @@ static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs, static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -200,15 +200,15 @@ static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn cor_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } static int coroutine_fn cor_co_pwritev_compressed(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov) { return bdrv_co_pwritev(bs->file, offset, bytes, qiov, diff --git a/block/copy-on-read.h b/block/copy-on-read.h index 7bf405dccd..1d8ad38c74 100644 --- a/block/copy-on-read.h +++ b/block/copy-on-read.h @@ -22,11 +22,11 @@ * along with this program. If not, see . */ -#ifndef BLOCK_COPY_ON_READ -#define BLOCK_COPY_ON_READ +#ifndef BLOCK_COPY_ON_READ_H +#define BLOCK_COPY_ON_READ_H #include "block/block_int.h" void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs); -#endif /* BLOCK_COPY_ON_READ */ +#endif /* BLOCK_COPY_ON_READ_H */ diff --git a/block/coroutines.h b/block/coroutines.h index 514d169d23..3a2bad564f 100644 --- a/block/coroutines.h +++ b/block/coroutines.h @@ -22,22 +22,25 @@ * THE SOFTWARE. */ -#ifndef BLOCK_COROUTINES_INT_H -#define BLOCK_COROUTINES_INT_H +#ifndef BLOCK_COROUTINES_H +#define BLOCK_COROUTINES_H #include "block/block_int.h" +/* For blk_bs() in generated block/block-gen.c */ +#include "sysemu/block-backend.h" + +/* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + int coroutine_fn bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp); -int generated_co_wrapper -bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, BdrvRequestFlags flags); -int generated_co_wrapper -bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, BdrvRequestFlags flags); - int coroutine_fn bdrv_co_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, @@ -49,6 +52,25 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, int64_t *map, BlockDriverState **file, int *depth); + +int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos); +int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos); + +int coroutine_fn +nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking, + Error **errp); + + +/* + * "I/O or GS" API functions. These functions can run without + * the BQL, but only in one specific iothread/main loop. + * + * See include/block/block-io.h for more information about + * the "I/O or GS" API. + */ + int generated_co_wrapper bdrv_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, @@ -60,16 +82,7 @@ bdrv_common_block_status_above(BlockDriverState *bs, int64_t *map, BlockDriverState **file, int *depth); - -int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs, - QEMUIOVector *qiov, int64_t pos); -int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs, - QEMUIOVector *qiov, int64_t pos); - int generated_co_wrapper -nbd_do_establish_connection(BlockDriverState *bs, Error **errp); -int coroutine_fn -nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp); +nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp); - -#endif /* BLOCK_COROUTINES_INT_H */ +#endif /* BLOCK_COROUTINES_H */ diff --git a/block/create.c b/block/create.c index 89812669df..4df43f11f4 100644 --- a/block/create.c +++ b/block/create.c @@ -42,6 +42,8 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp) BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common); int ret; + GLOBAL_STATE_CODE(); + job_progress_set_remaining(&s->common, 1); ret = s->drv->bdrv_co_create(s->opts, errp); job_progress_update(&s->common, 1); diff --git a/block/crypto.c b/block/crypto.c index 1d30fde38e..2fb8add458 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -31,6 +31,7 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #include "crypto.h" typedef struct BlockCrypto BlockCrypto; @@ -54,40 +55,40 @@ static int block_crypto_probe_generic(QCryptoBlockFormat format, } -static ssize_t block_crypto_read_func(QCryptoBlock *block, - size_t offset, - uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int block_crypto_read_func(QCryptoBlock *block, + size_t offset, + uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp) { BlockDriverState *bs = opaque; ssize_t ret; - ret = bdrv_pread(bs->file, offset, buf, buflen); + ret = bdrv_pread(bs->file, offset, buflen, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read encryption header"); return ret; } - return ret; + return 0; } -static ssize_t block_crypto_write_func(QCryptoBlock *block, - size_t offset, - const uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int block_crypto_write_func(QCryptoBlock *block, + size_t offset, + const uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp) { BlockDriverState *bs = opaque; ssize_t ret; - ret = bdrv_pwrite(bs->file, offset, buf, buflen); + ret = bdrv_pwrite(bs->file, offset, buflen, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write encryption header"); return ret; } - return ret; + return 0; } @@ -98,28 +99,28 @@ struct BlockCryptoCreateData { }; -static ssize_t block_crypto_create_write_func(QCryptoBlock *block, - size_t offset, - const uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int block_crypto_create_write_func(QCryptoBlock *block, + size_t offset, + const uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp) { struct BlockCryptoCreateData *data = opaque; ssize_t ret; - ret = blk_pwrite(data->blk, offset, buf, buflen, 0); + ret = blk_pwrite(data->blk, offset, buflen, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write encryption header"); return ret; } - return ret; + return 0; } -static ssize_t block_crypto_create_init_func(QCryptoBlock *block, - size_t headerlen, - void *opaque, - Error **errp) +static int block_crypto_create_init_func(QCryptoBlock *block, + size_t headerlen, + void *opaque, + Error **errp) { struct BlockCryptoCreateData *data = opaque; Error *local_error = NULL; @@ -138,7 +139,7 @@ static ssize_t block_crypto_create_init_func(QCryptoBlock *block, data->prealloc, 0, &local_error); if (ret >= 0) { - return ret; + return 0; } error: @@ -260,15 +261,14 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, { BlockCrypto *crypto = bs->opaque; QemuOpts *opts = NULL; - int ret = -EINVAL; + int ret; QCryptoBlockOpenOptions *open_opts = NULL; unsigned int cflags = 0; QDict *cryptoopts = NULL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_write_flags = BDRV_REQ_FUA & @@ -276,6 +276,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, opts = qemu_opts_create(opts_spec, NULL, 0, &error_abort); if (!qemu_opts_absorb_qdict(opts, options, errp)) { + ret = -EINVAL; goto cleanup; } @@ -284,6 +285,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, open_opts = block_crypto_open_opts_init(cryptoopts, errp); if (!open_opts) { + ret = -EINVAL; goto cleanup; } @@ -397,8 +399,8 @@ static int block_crypto_reopen_prepare(BDRVReopenState *state, #define BLOCK_CRYPTO_MAX_IO_SIZE (1024 * 1024) static coroutine_fn int -block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +block_crypto_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockCrypto *crypto = bs->opaque; uint64_t cur_bytes; /* number of bytes in current iteration */ @@ -409,7 +411,6 @@ block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, uint64_t sector_size = qcrypto_block_get_sector_size(crypto->block); uint64_t payload_offset = qcrypto_block_get_payload_offset(crypto->block); - assert(!flags); assert(payload_offset < INT64_MAX); assert(QEMU_IS_ALIGNED(offset, sector_size)); assert(QEMU_IS_ALIGNED(bytes, sector_size)); @@ -460,8 +461,8 @@ block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, static coroutine_fn int -block_crypto_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +block_crypto_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockCrypto *crypto = bs->opaque; uint64_t cur_bytes; /* number of bytes in current iteration */ @@ -472,7 +473,8 @@ block_crypto_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, uint64_t sector_size = qcrypto_block_get_sector_size(crypto->block); uint64_t payload_offset = qcrypto_block_get_payload_offset(crypto->block); - assert(!(flags & ~BDRV_REQ_FUA)); + flags &= ~BDRV_REQ_REGISTERED_BUF; + assert(payload_offset < INT64_MAX); assert(QEMU_IS_ALIGNED(offset, sector_size)); assert(QEMU_IS_ALIGNED(bytes, sector_size)); @@ -777,6 +779,37 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp) return spec_info; } +static int +block_crypto_amend_prepare(BlockDriverState *bs, Error **errp) +{ + BlockCrypto *crypto = bs->opaque; + int ret; + + /* apply for exclusive read/write permissions to the underlying file */ + crypto->updating_keys = true; + ret = bdrv_child_refresh_perms(bs, bs->file, errp); + if (ret < 0) { + /* Well, in this case we will not be updating any keys */ + crypto->updating_keys = false; + } + return ret; +} + +static void +block_crypto_amend_cleanup(BlockDriverState *bs) +{ + BlockCrypto *crypto = bs->opaque; + Error *errp = NULL; + + /* release exclusive read/write permissions to the underlying file */ + crypto->updating_keys = false; + bdrv_child_refresh_perms(bs, bs->file, &errp); + + if (errp) { + error_report_err(errp); + } +} + static int block_crypto_amend_options_generic_luks(BlockDriverState *bs, QCryptoBlockAmendOptions *amend_options, @@ -784,30 +817,17 @@ block_crypto_amend_options_generic_luks(BlockDriverState *bs, Error **errp) { BlockCrypto *crypto = bs->opaque; - int ret; assert(crypto); assert(crypto->block); - /* apply for exclusive read/write permissions to the underlying file*/ - crypto->updating_keys = true; - ret = bdrv_child_refresh_perms(bs, bs->file, errp); - if (ret) { - goto cleanup; - } - - ret = qcrypto_block_amend_options(crypto->block, - block_crypto_read_func, - block_crypto_write_func, - bs, - amend_options, - force, - errp); -cleanup: - /* release exclusive read/write permissions to the underlying file*/ - crypto->updating_keys = false; - bdrv_child_refresh_perms(bs, bs->file, errp); - return ret; + return qcrypto_block_amend_options(crypto->block, + block_crypto_read_func, + block_crypto_write_func, + bs, + amend_options, + force, + errp); } static int @@ -833,8 +853,16 @@ block_crypto_amend_options_luks(BlockDriverState *bs, if (!amend_options) { goto cleanup; } + + ret = block_crypto_amend_prepare(bs, errp); + if (ret) { + goto perm_cleanup; + } ret = block_crypto_amend_options_generic_luks(bs, amend_options, force, errp); + +perm_cleanup: + block_crypto_amend_cleanup(bs); cleanup: qapi_free_QCryptoBlockAmendOptions(amend_options); return ret; @@ -931,6 +959,8 @@ static BlockDriver bdrv_crypto_luks = { .bdrv_get_specific_info = block_crypto_get_specific_info_luks, .bdrv_amend_options = block_crypto_amend_options_luks, .bdrv_co_amend = block_crypto_co_amend_luks, + .bdrv_amend_pre_run = block_crypto_amend_prepare, + .bdrv_amend_clean = block_crypto_amend_cleanup, .is_format = true, diff --git a/block/curl.c b/block/curl.c index 50e741a0d7..0b125095e3 100644 --- a/block/curl.c +++ b/block/curl.c @@ -37,8 +37,15 @@ // #define DEBUG_VERBOSE +/* CURL 7.85.0 switches to a string based API for specifying + * the desired protocols. + */ +#if LIBCURL_VERSION_NUM >= 0x075500 +#define PROTOCOLS "HTTP,HTTPS,FTP,FTPS" +#else #define PROTOCOLS (CURLPROTO_HTTP | CURLPROTO_HTTPS | \ CURLPROTO_FTP | CURLPROTO_FTPS) +#endif #define CURL_NUM_STATES 8 #define CURL_NUM_ACB 8 @@ -125,7 +132,7 @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque) BDRVCURLState *s = socket->s; aio_set_fd_handler(s->aio_context, socket->fd, false, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); return true; } @@ -173,19 +180,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, switch (action) { case CURL_POLL_IN: aio_set_fd_handler(s->aio_context, fd, false, - curl_multi_do, NULL, NULL, socket); + curl_multi_do, NULL, NULL, NULL, socket); break; case CURL_POLL_OUT: aio_set_fd_handler(s->aio_context, fd, false, - NULL, curl_multi_do, NULL, socket); + NULL, curl_multi_do, NULL, NULL, socket); break; case CURL_POLL_INOUT: aio_set_fd_handler(s->aio_context, fd, false, - curl_multi_do, curl_multi_do, NULL, socket); + curl_multi_do, curl_multi_do, + NULL, NULL, socket); break; case CURL_POLL_REMOVE: aio_set_fd_handler(s->aio_context, fd, false, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); break; } @@ -457,59 +465,90 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state) if (!state->curl) { return -EIO; } - curl_easy_setopt(state->curl, CURLOPT_URL, s->url); - curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER, - (long) s->sslverify); - curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST, - s->sslverify ? 2L : 0L); - if (s->cookie) { - curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie); + if (curl_easy_setopt(state->curl, CURLOPT_URL, s->url) || + curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER, + (long) s->sslverify) || + curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST, + s->sslverify ? 2L : 0L)) { + goto err; + } + if (s->cookie) { + if (curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie)) { + goto err; + } + } + if (curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout) || + curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, + (void *)curl_read_cb) || + curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state) || + curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state) || + curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1) || + curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1) || + curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1) || + curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg) || + curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1)) { + goto err; } - curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout); - curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, - (void *)curl_read_cb); - curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state); - curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state); - curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1); - curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1); - curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg); - curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1); - if (s->username) { - curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username); + if (curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username)) { + goto err; + } } if (s->password) { - curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password); + if (curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password)) { + goto err; + } } if (s->proxyusername) { - curl_easy_setopt(state->curl, - CURLOPT_PROXYUSERNAME, s->proxyusername); + if (curl_easy_setopt(state->curl, + CURLOPT_PROXYUSERNAME, s->proxyusername)) { + goto err; + } } if (s->proxypassword) { - curl_easy_setopt(state->curl, - CURLOPT_PROXYPASSWORD, s->proxypassword); + if (curl_easy_setopt(state->curl, + CURLOPT_PROXYPASSWORD, s->proxypassword)) { + goto err; + } } /* Restrict supported protocols to avoid security issues in the more * obscure protocols. For example, do not allow POP3/SMTP/IMAP see * CVE-2013-0249. * - * Restricting protocols is only supported from 7.19.4 upwards. + * Restricting protocols is only supported from 7.19.4 upwards. Note: + * version 7.85.0 deprecates CURLOPT_*PROTOCOLS in favour of a string + * based CURLOPT_*PROTOCOLS_STR API. */ -#if LIBCURL_VERSION_NUM >= 0x071304 - curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS); - curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS); +#if LIBCURL_VERSION_NUM >= 0x075500 + if (curl_easy_setopt(state->curl, + CURLOPT_PROTOCOLS_STR, PROTOCOLS) || + curl_easy_setopt(state->curl, + CURLOPT_REDIR_PROTOCOLS_STR, PROTOCOLS)) { + goto err; + } +#elif LIBCURL_VERSION_NUM >= 0x071304 + if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) || + curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) { + goto err; + } #endif #ifdef DEBUG_VERBOSE - curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1); + if (curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1)) { + goto err; + } #endif } state->s = s; return 0; + +err: + curl_easy_cleanup(state->curl); + state->curl = NULL; + return -EIO; } /* Called with s->mutex held. */ @@ -646,7 +685,12 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, const char *file; const char *cookie; const char *cookie_secret; - double d; + /* CURL >= 7.55.0 uses curl_off_t for content length instead of a double */ +#if LIBCURL_VERSION_NUM >= 0x073700 + curl_off_t cl; +#else + double cl; +#endif const char *secretid; const char *protocol_delimiter; int ret; @@ -758,37 +802,51 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, // Get file size if (curl_init_state(s, state) < 0) { + pstrcpy(state->errmsg, CURL_ERROR_SIZE, + "curl library initialization failed."); goto out; } s->accept_range = false; - curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); - curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, - curl_header_cb); - curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s); - if (curl_easy_perform(state->curl)) - goto out; - if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) { + if (curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1) || + curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb) || + curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s)) { + pstrcpy(state->errmsg, CURL_ERROR_SIZE, + "curl library initialization failed."); goto out; } + if (curl_easy_perform(state->curl)) + goto out; + /* CURL 7.55.0 deprecates CURLINFO_CONTENT_LENGTH_DOWNLOAD in favour of + * the *_T version which returns a more sensible type for content length. + */ +#if LIBCURL_VERSION_NUM >= 0x073700 + if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD_T, &cl)) { + goto out; + } +#else + if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &cl)) { + goto out; + } +#endif /* Prior CURL 7.19.4 return value of 0 could mean that the file size is not * know or the size is zero. From 7.19.4 CURL returns -1 if size is not * known and zero if it is really zero-length file. */ #if LIBCURL_VERSION_NUM >= 0x071304 - if (d < 0) { + if (cl < 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server didn't report file size."); goto out; } #else - if (d <= 0) { + if (cl <= 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Unknown file size or zero-length file."); goto out; } #endif - s->len = d; + s->len = cl; if ((!strncasecmp(s->url, "http://", strlen("http://")) || !strncasecmp(s->url, "https://", strlen("https://"))) @@ -827,7 +885,7 @@ out_noclean: return -EINVAL; } -static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) +static void coroutine_fn curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) { CURLState *state; int running; @@ -878,9 +936,8 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) snprintf(state->range, 127, "%" PRIu64 "-%" PRIu64, start, end); trace_curl_setup_preadv(acb->bytes, start, state->range); - curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range); - - if (curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) { + if (curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range) || + curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) { state->acb[0] = NULL; acb->ret = -EIO; @@ -896,7 +953,8 @@ out: } static int coroutine_fn curl_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { CURLAIOCB acb = { .co = qemu_coroutine_self(), diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index 0ef46163e3..bf3dc0512a 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -309,10 +309,7 @@ BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *parent, return NULL; } - if (!hbitmap_merge(parent->bitmap, successor->bitmap, parent->bitmap)) { - error_setg(errp, "Merging of parent and successor bitmap failed"); - return NULL; - } + hbitmap_merge(parent->bitmap, successor->bitmap, parent->bitmap); parent->disabled = successor->disabled; parent->busy = false; @@ -496,6 +493,7 @@ static void coroutine_fn bdrv_co_can_store_new_dirty_bitmap_entry(void *opaque) bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, uint32_t granularity, Error **errp) { + IO_CODE(); if (qemu_in_coroutine()) { return bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp); } else { @@ -656,6 +654,7 @@ void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out) { + IO_CODE(); assert(!bdrv_dirty_bitmap_readonly(bitmap)); bdrv_dirty_bitmaps_lock(bitmap->bs); if (!out) { @@ -673,6 +672,7 @@ void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup) { HBitmap *tmp = bitmap->bitmap; assert(!bdrv_dirty_bitmap_readonly(bitmap)); + GLOBAL_STATE_CODE(); bitmap->bitmap = backup; hbitmap_free(tmp); } @@ -737,6 +737,7 @@ void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap) void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes) { BdrvDirtyBitmap *bitmap; + IO_CODE(); if (QLIST_EMPTY(&bs->dirty_bitmaps)) { return; @@ -875,16 +876,25 @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, dirty_start, dirty_count); } +bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes, int64_t *count) +{ + return hbitmap_status(bitmap->bitmap, offset, bytes, count); +} + /** * bdrv_merge_dirty_bitmap: merge src into dest. * Ensures permissions on bitmaps are reasonable; use for public API. * * @backup: If provided, make a copy of dest here prior to merge. + * + * Returns true on success, false on failure. In case of failure bitmaps are + * untouched. */ -void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, +bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, HBitmap **backup, Error **errp) { - bool ret; + bool ret = false; bdrv_dirty_bitmaps_lock(dest->bs); if (src->bs != dest->bs) { @@ -899,35 +909,39 @@ void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, goto out; } - if (!hbitmap_can_merge(dest->bitmap, src->bitmap)) { - error_setg(errp, "Bitmaps are incompatible and can't be merged"); + if (bdrv_dirty_bitmap_size(src) != bdrv_dirty_bitmap_size(dest)) { + error_setg(errp, "Bitmaps are of different sizes (destination size is %" + PRId64 ", source size is %" PRId64 ") and can't be merged", + bdrv_dirty_bitmap_size(dest), bdrv_dirty_bitmap_size(src)); goto out; } - ret = bdrv_dirty_bitmap_merge_internal(dest, src, backup, false); - assert(ret); + bdrv_dirty_bitmap_merge_internal(dest, src, backup, false); + ret = true; out: bdrv_dirty_bitmaps_unlock(dest->bs); if (src->bs != dest->bs) { bdrv_dirty_bitmaps_unlock(src->bs); } + + return ret; } /** * bdrv_dirty_bitmap_merge_internal: merge src into dest. * Does NOT check bitmap permissions; not suitable for use as public API. + * @dest, @src and @backup (if not NULL) must have same size. * * @backup: If provided, make a copy of dest here prior to merge. * @lock: If true, lock and unlock bitmaps on the way in/out. - * returns true if the merge succeeded; false if unattempted. */ -bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, +void bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, HBitmap **backup, bool lock) { - bool ret; + IO_CODE(); assert(!bdrv_dirty_bitmap_readonly(dest)); assert(!bdrv_dirty_bitmap_inconsistent(dest)); @@ -943,9 +957,9 @@ bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, if (backup) { *backup = dest->bitmap; dest->bitmap = hbitmap_alloc(dest->size, hbitmap_granularity(*backup)); - ret = hbitmap_merge(*backup, src->bitmap, dest->bitmap); + hbitmap_merge(*backup, src->bitmap, dest->bitmap); } else { - ret = hbitmap_merge(dest->bitmap, src->bitmap, dest->bitmap); + hbitmap_merge(dest->bitmap, src->bitmap, dest->bitmap); } if (lock) { @@ -954,6 +968,4 @@ bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, bdrv_dirty_bitmaps_unlock(src->bs); } } - - return ret; } diff --git a/block/dmg.c b/block/dmg.c index ef35a505f2..675e840ca5 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -27,6 +27,7 @@ #include "qemu/bswap.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "qemu/memalign.h" #include "dmg.h" int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in, @@ -76,7 +77,7 @@ static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result) uint64_t buffer; int ret; - ret = bdrv_pread(bs->file, offset, &buffer, 8); + ret = bdrv_pread(bs->file, offset, 8, &buffer, 0); if (ret < 0) { return ret; } @@ -90,7 +91,7 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result) uint32_t buffer; int ret; - ret = bdrv_pread(bs->file, offset, &buffer, 4); + ret = bdrv_pread(bs->file, offset, 4, &buffer, 0); if (ret < 0) { return ret; } @@ -171,7 +172,7 @@ static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp) offset = length - 511 - 512; } length = length < 515 ? length : 515; - ret = bdrv_pread(file, offset, buffer, length); + ret = bdrv_pread(file, offset, length, buffer, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed while reading UDIF trailer"); return ret; @@ -253,6 +254,25 @@ static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds, for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) { s->types[i] = buff_read_uint32(buffer, offset); if (!dmg_is_known_block_type(s->types[i])) { + switch (s->types[i]) { + case UDBZ: + warn_report_once("dmg-bzip2 module is missing, accessing bzip2 " + "compressed blocks will result in I/O errors"); + break; + case ULFO: + warn_report_once("dmg-lzfse module is missing, accessing lzfse " + "compressed blocks will result in I/O errors"); + break; + case UDCM: + case UDLE: + /* Comments and last entry can be ignored without problems */ + break; + default: + warn_report_once("Image contains chunks of unknown type %x, " + "accessing them will result in I/O errors", + s->types[i]); + break; + } chunk_count--; i--; offset += 40; @@ -351,7 +371,7 @@ static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, offset += 4; buffer = g_realloc(buffer, count); - ret = bdrv_pread(bs->file, offset, buffer, count); + ret = bdrv_pread(bs->file, offset, count, buffer, 0); if (ret < 0) { goto fail; } @@ -388,8 +408,8 @@ static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds, buffer = g_malloc(info_length + 1); buffer[info_length] = '\0'; - ret = bdrv_pread(bs->file, info_begin, buffer, info_length); - if (ret != info_length) { + ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0); + if (ret < 0) { ret = -EINVAL; goto fail; } @@ -439,14 +459,21 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; + } + /* + * NB: if uncompress submodules are absent, + * ie block_module_load return value == 0, the function pointers + * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL. + */ + if (block_module_load("dmg-bz2", errp) < 0) { + return -EINVAL; + } + if (block_module_load("dmg-lzfse", errp) < 0) { return -EINVAL; } - - block_module_load_one("dmg-bz2"); - block_module_load_one("dmg-lzfse"); s->n_chunks = 0; s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; @@ -608,9 +635,9 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) case UDZO: { /* zlib compressed */ /* we need to buffer, because only the chunk as whole can be * inflated. */ - ret = bdrv_pread(bs->file, s->offsets[chunk], - s->compressed_chunk, s->lengths[chunk]); - if (ret != s->lengths[chunk]) { + ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], + s->compressed_chunk, 0); + if (ret < 0) { return -1; } @@ -634,9 +661,9 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) } /* we need to buffer, because only the chunk as whole can be * inflated. */ - ret = bdrv_pread(bs->file, s->offsets[chunk], - s->compressed_chunk, s->lengths[chunk]); - if (ret != s->lengths[chunk]) { + ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], + s->compressed_chunk, 0); + if (ret < 0) { return -1; } @@ -655,9 +682,9 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) } /* we need to buffer, because only the chunk as whole can be * inflated. */ - ret = bdrv_pread(bs->file, s->offsets[chunk], - s->compressed_chunk, s->lengths[chunk]); - if (ret != s->lengths[chunk]) { + ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], + s->compressed_chunk, 0); + if (ret < 0) { return -1; } @@ -671,9 +698,9 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) } break; case UDRW: /* copy */ - ret = bdrv_pread(bs->file, s->offsets[chunk], - s->uncompressed_chunk, s->lengths[chunk]); - if (ret != s->lengths[chunk]) { + ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], + s->uncompressed_chunk, 0); + if (ret < 0) { return -1; } break; @@ -689,8 +716,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) } static int coroutine_fn -dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVDMGState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/export/export.c b/block/export/export.c index 6d3b9964c8..7cc0c25c1c 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -26,6 +26,9 @@ #ifdef CONFIG_VHOST_USER_BLK_SERVER #include "vhost-user-blk-server.h" #endif +#ifdef CONFIG_VDUSE_BLK_EXPORT +#include "vduse-blk.h" +#endif static const BlockExportDriver *blk_exp_drivers[] = { &blk_exp_nbd, @@ -35,6 +38,9 @@ static const BlockExportDriver *blk_exp_drivers[] = { #ifdef CONFIG_FUSE &blk_exp_fuse, #endif +#ifdef CONFIG_VDUSE_BLK_EXPORT + &blk_exp_vduse_blk, +#endif }; /* Only accessed from the main thread */ @@ -123,7 +129,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) /* Ignore errors with fixed-iothread=false */ set_context_errp = fixed_iothread ? errp : NULL; - ret = bdrv_try_set_aio_context(bs, new_ctx, set_context_errp); + ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp); if (ret == 0) { aio_context_release(ctx); aio_context_acquire(new_ctx); @@ -139,7 +145,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) * access since the export could be available before migration handover. * ctx was acquired in the caller. */ - bdrv_invalidate_cache(bs, NULL); + bdrv_activate(bs, NULL); perm = BLK_PERM_CONSISTENT_READ; if (export->writable) { diff --git a/block/export/fuse.c b/block/export/fuse.c index fc7b07d2b5..1b26ddfcf3 100644 --- a/block/export/fuse.c +++ b/block/export/fuse.c @@ -19,6 +19,7 @@ #define FUSE_USE_VERSION 31 #include "qemu/osdep.h" +#include "qemu/memalign.h" #include "block/aio.h" #include "block/block.h" #include "block/export.h" @@ -31,6 +32,13 @@ #include #include +#if defined(CONFIG_FALLOCATE_ZERO_RANGE) +#include +#endif + +#ifdef __linux__ +#include +#endif /* Prevent overly long bounce buffer allocations */ #define FUSE_MAX_BOUNCE_BYTES (MIN(BDRV_REQUEST_MAX_BYTES, 64 * 1024 * 1024)) @@ -79,8 +87,8 @@ static int fuse_export_create(BlockExport *blk_exp, assert(blk_exp_args->type == BLOCK_EXPORT_TYPE_FUSE); - /* For growable exports, take the RESIZE permission */ - if (args->growable) { + /* For growable and writable exports, take the RESIZE permission */ + if (args->growable || blk_exp_args->writable) { uint64_t blk_perm, blk_shared_perm; blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm); @@ -216,7 +224,7 @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint, aio_set_fd_handler(exp->common.ctx, fuse_session_fd(exp->fuse_session), true, - read_from_fuse_export, NULL, NULL, exp); + read_from_fuse_export, NULL, NULL, NULL, exp); exp->fd_handler_set_up = true; return 0; @@ -260,7 +268,7 @@ static void fuse_export_shutdown(BlockExport *blk_exp) if (exp->fd_handler_set_up) { aio_set_fd_handler(exp->common.ctx, fuse_session_fd(exp->fuse_session), true, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); exp->fd_handler_set_up = false; } } @@ -385,14 +393,23 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size, { uint64_t blk_perm, blk_shared_perm; BdrvRequestFlags truncate_flags = 0; - int ret; + bool add_resize_perm; + int ret, ret_check; + + /* Growable and writable exports have a permanent RESIZE permission */ + add_resize_perm = !exp->growable && !exp->writable; if (req_zero_write) { truncate_flags |= BDRV_REQ_ZERO_WRITE; } - /* Growable exports have a permanent RESIZE permission */ - if (!exp->growable) { + if (add_resize_perm) { + + if (!qemu_in_main_thread()) { + /* Changing permissions like below only works in the main thread */ + return -EPERM; + } + blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm); ret = blk_set_perm(exp->common.blk, blk_perm | BLK_PERM_RESIZE, @@ -405,9 +422,11 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size, ret = blk_truncate(exp->common.blk, size, true, prealloc, truncate_flags, NULL); - if (!exp->growable) { + if (add_resize_perm) { /* Must succeed, because we are only giving up the RESIZE permission */ - blk_set_perm(exp->common.blk, blk_perm, blk_shared_perm, &error_abort); + ret_check = blk_set_perm(exp->common.blk, blk_perm, + blk_shared_perm, &error_abort); + assert(ret_check == 0); } return ret; @@ -535,7 +554,7 @@ static void fuse_read(fuse_req_t req, fuse_ino_t inode, return; } - ret = blk_pread(exp->common.blk, offset, buf, size); + ret = blk_pread(exp->common.blk, offset, size, buf, 0); if (ret >= 0) { fuse_reply_buf(req, buf, size); } else { @@ -588,7 +607,7 @@ static void fuse_write(fuse_req_t req, fuse_ino_t inode, const char *buf, } } - ret = blk_pwrite(exp->common.blk, offset, buf, size, 0); + ret = blk_pwrite(exp->common.blk, offset, size, buf, 0); if (ret >= 0) { fuse_reply_write(req, size); } else { @@ -618,11 +637,33 @@ static void fuse_fallocate(fuse_req_t req, fuse_ino_t inode, int mode, return; } +#ifdef CONFIG_FALLOCATE_PUNCH_HOLE if (mode & FALLOC_FL_KEEP_SIZE) { length = MIN(length, blk_len - offset); } +#endif /* CONFIG_FALLOCATE_PUNCH_HOLE */ - if (mode & FALLOC_FL_PUNCH_HOLE) { + if (!mode) { + /* We can only fallocate at the EOF with a truncate */ + if (offset < blk_len) { + fuse_reply_err(req, EOPNOTSUPP); + return; + } + + if (offset > blk_len) { + /* No preallocation needed here */ + ret = fuse_do_truncate(exp, offset, true, PREALLOC_MODE_OFF); + if (ret < 0) { + fuse_reply_err(req, -ret); + return; + } + } + + ret = fuse_do_truncate(exp, offset + length, true, + PREALLOC_MODE_FALLOC); + } +#ifdef CONFIG_FALLOCATE_PUNCH_HOLE + else if (mode & FALLOC_FL_PUNCH_HOLE) { if (!(mode & FALLOC_FL_KEEP_SIZE)) { fuse_reply_err(req, EINVAL); return; @@ -636,6 +677,7 @@ static void fuse_fallocate(fuse_req_t req, fuse_ino_t inode, int mode, length -= size; } while (ret == 0 && length > 0); } +#endif /* CONFIG_FALLOCATE_PUNCH_HOLE */ #ifdef CONFIG_FALLOCATE_ZERO_RANGE else if (mode & FALLOC_FL_ZERO_RANGE) { if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + length > blk_len) { @@ -658,25 +700,7 @@ static void fuse_fallocate(fuse_req_t req, fuse_ino_t inode, int mode, } while (ret == 0 && length > 0); } #endif /* CONFIG_FALLOCATE_ZERO_RANGE */ - else if (!mode) { - /* We can only fallocate at the EOF with a truncate */ - if (offset < blk_len) { - fuse_reply_err(req, EOPNOTSUPP); - return; - } - - if (offset > blk_len) { - /* No preallocation needed here */ - ret = fuse_do_truncate(exp, offset, true, PREALLOC_MODE_OFF); - if (ret < 0) { - fuse_reply_err(req, -ret); - return; - } - } - - ret = fuse_do_truncate(exp, offset + length, true, - PREALLOC_MODE_FALLOC); - } else { + else { ret = -EOPNOTSUPP; } diff --git a/block/export/meson.build b/block/export/meson.build index 0a08e384c7..c60116f455 100644 --- a/block/export/meson.build +++ b/block/export/meson.build @@ -1,7 +1,12 @@ blockdev_ss.add(files('export.c')) if have_vhost_user_blk_server - blockdev_ss.add(files('vhost-user-blk-server.c')) + blockdev_ss.add(files('vhost-user-blk-server.c', 'virtio-blk-handler.c')) endif blockdev_ss.add(when: fuse, if_true: files('fuse.c')) + +if have_vduse_blk_export + blockdev_ss.add(files('vduse-blk.c', 'virtio-blk-handler.c')) + blockdev_ss.add(libvduse) +endif diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c new file mode 100644 index 0000000000..f101c24c3f --- /dev/null +++ b/block/export/vduse-blk.c @@ -0,0 +1,374 @@ +/* + * Export QEMU block device via VDUSE + * + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * + * Author: + * Xie Yongji + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "block/export.h" +#include "qemu/error-report.h" +#include "util/block-helpers.h" +#include "subprojects/libvduse/libvduse.h" +#include "virtio-blk-handler.h" + +#include "standard-headers/linux/virtio_blk.h" + +#define VDUSE_DEFAULT_NUM_QUEUE 1 +#define VDUSE_DEFAULT_QUEUE_SIZE 256 + +typedef struct VduseBlkExport { + BlockExport export; + VirtioBlkHandler handler; + VduseDev *dev; + uint16_t num_queues; + char *recon_file; + unsigned int inflight; +} VduseBlkExport; + +typedef struct VduseBlkReq { + VduseVirtqElement elem; + VduseVirtq *vq; +} VduseBlkReq; + +static void vduse_blk_inflight_inc(VduseBlkExport *vblk_exp) +{ + vblk_exp->inflight++; +} + +static void vduse_blk_inflight_dec(VduseBlkExport *vblk_exp) +{ + if (--vblk_exp->inflight == 0) { + aio_wait_kick(); + } +} + +static void vduse_blk_req_complete(VduseBlkReq *req, size_t in_len) +{ + vduse_queue_push(req->vq, &req->elem, in_len); + vduse_queue_notify(req->vq); + + free(req); +} + +static void coroutine_fn vduse_blk_virtio_process_req(void *opaque) +{ + VduseBlkReq *req = opaque; + VduseVirtq *vq = req->vq; + VduseDev *dev = vduse_queue_get_dev(vq); + VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + VirtioBlkHandler *handler = &vblk_exp->handler; + VduseVirtqElement *elem = &req->elem; + struct iovec *in_iov = elem->in_sg; + struct iovec *out_iov = elem->out_sg; + unsigned in_num = elem->in_num; + unsigned out_num = elem->out_num; + int in_len; + + in_len = virtio_blk_process_req(handler, in_iov, + out_iov, in_num, out_num); + if (in_len < 0) { + free(req); + return; + } + + vduse_blk_req_complete(req, in_len); + vduse_blk_inflight_dec(vblk_exp); +} + +static void vduse_blk_vq_handler(VduseDev *dev, VduseVirtq *vq) +{ + VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + + while (1) { + VduseBlkReq *req; + + req = vduse_queue_pop(vq, sizeof(VduseBlkReq)); + if (!req) { + break; + } + req->vq = vq; + + Coroutine *co = + qemu_coroutine_create(vduse_blk_virtio_process_req, req); + + vduse_blk_inflight_inc(vblk_exp); + qemu_coroutine_enter(co); + } +} + +static void on_vduse_vq_kick(void *opaque) +{ + VduseVirtq *vq = opaque; + VduseDev *dev = vduse_queue_get_dev(vq); + int fd = vduse_queue_get_fd(vq); + eventfd_t kick_data; + + if (eventfd_read(fd, &kick_data) == -1) { + error_report("failed to read data from eventfd"); + return; + } + + vduse_blk_vq_handler(dev, vq); +} + +static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq) +{ + VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + + aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), + true, on_vduse_vq_kick, NULL, NULL, NULL, vq); + /* Make sure we don't miss any kick afer reconnecting */ + eventfd_write(vduse_queue_get_fd(vq), 1); +} + +static void vduse_blk_disable_queue(VduseDev *dev, VduseVirtq *vq) +{ + VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + + aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), + true, NULL, NULL, NULL, NULL, NULL); +} + +static const VduseOps vduse_blk_ops = { + .enable_queue = vduse_blk_enable_queue, + .disable_queue = vduse_blk_disable_queue, +}; + +static void on_vduse_dev_kick(void *opaque) +{ + VduseDev *dev = opaque; + + vduse_dev_handler(dev); +} + +static void vduse_blk_attach_ctx(VduseBlkExport *vblk_exp, AioContext *ctx) +{ + int i; + + aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), + true, on_vduse_dev_kick, NULL, NULL, NULL, + vblk_exp->dev); + + for (i = 0; i < vblk_exp->num_queues; i++) { + VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); + int fd = vduse_queue_get_fd(vq); + + if (fd < 0) { + continue; + } + aio_set_fd_handler(vblk_exp->export.ctx, fd, true, + on_vduse_vq_kick, NULL, NULL, NULL, vq); + } +} + +static void vduse_blk_detach_ctx(VduseBlkExport *vblk_exp) +{ + int i; + + for (i = 0; i < vblk_exp->num_queues; i++) { + VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); + int fd = vduse_queue_get_fd(vq); + + if (fd < 0) { + continue; + } + aio_set_fd_handler(vblk_exp->export.ctx, fd, + true, NULL, NULL, NULL, NULL, NULL); + } + aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), + true, NULL, NULL, NULL, NULL, NULL); + + AIO_WAIT_WHILE(vblk_exp->export.ctx, vblk_exp->inflight > 0); +} + + +static void blk_aio_attached(AioContext *ctx, void *opaque) +{ + VduseBlkExport *vblk_exp = opaque; + + vblk_exp->export.ctx = ctx; + vduse_blk_attach_ctx(vblk_exp, ctx); +} + +static void blk_aio_detach(void *opaque) +{ + VduseBlkExport *vblk_exp = opaque; + + vduse_blk_detach_ctx(vblk_exp); + vblk_exp->export.ctx = NULL; +} + +static void vduse_blk_resize(void *opaque) +{ + BlockExport *exp = opaque; + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + struct virtio_blk_config config; + + config.capacity = + cpu_to_le64(blk_getlength(exp->blk) >> VIRTIO_BLK_SECTOR_BITS); + vduse_dev_update_config(vblk_exp->dev, sizeof(config.capacity), + offsetof(struct virtio_blk_config, capacity), + (char *)&config.capacity); +} + +static const BlockDevOps vduse_block_ops = { + .resize_cb = vduse_blk_resize, +}; + +static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, + Error **errp) +{ + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + BlockExportOptionsVduseBlk *vblk_opts = &opts->u.vduse_blk; + uint64_t logical_block_size = VIRTIO_BLK_SECTOR_SIZE; + uint16_t num_queues = VDUSE_DEFAULT_NUM_QUEUE; + uint16_t queue_size = VDUSE_DEFAULT_QUEUE_SIZE; + Error *local_err = NULL; + struct virtio_blk_config config = { 0 }; + uint64_t features; + int i, ret; + + if (vblk_opts->has_num_queues) { + num_queues = vblk_opts->num_queues; + if (num_queues == 0) { + error_setg(errp, "num-queues must be greater than 0"); + return -EINVAL; + } + } + + if (vblk_opts->has_queue_size) { + queue_size = vblk_opts->queue_size; + if (queue_size <= 2 || !is_power_of_2(queue_size) || + queue_size > VIRTQUEUE_MAX_SIZE) { + error_setg(errp, "queue-size is invalid"); + return -EINVAL; + } + } + + if (vblk_opts->has_logical_block_size) { + logical_block_size = vblk_opts->logical_block_size; + check_block_size(exp->id, "logical-block-size", logical_block_size, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -EINVAL; + } + } + vblk_exp->num_queues = num_queues; + vblk_exp->handler.blk = exp->blk; + vblk_exp->handler.serial = g_strdup(vblk_opts->has_serial ? + vblk_opts->serial : ""); + vblk_exp->handler.logical_block_size = logical_block_size; + vblk_exp->handler.writable = opts->writable; + + config.capacity = + cpu_to_le64(blk_getlength(exp->blk) >> VIRTIO_BLK_SECTOR_BITS); + config.seg_max = cpu_to_le32(queue_size - 2); + config.min_io_size = cpu_to_le16(1); + config.opt_io_size = cpu_to_le32(1); + config.num_queues = cpu_to_le16(num_queues); + config.blk_size = cpu_to_le32(logical_block_size); + config.max_discard_sectors = cpu_to_le32(VIRTIO_BLK_MAX_DISCARD_SECTORS); + config.max_discard_seg = cpu_to_le32(1); + config.discard_sector_alignment = + cpu_to_le32(logical_block_size >> VIRTIO_BLK_SECTOR_BITS); + config.max_write_zeroes_sectors = + cpu_to_le32(VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS); + config.max_write_zeroes_seg = cpu_to_le32(1); + + features = vduse_get_virtio_features() | + (1ULL << VIRTIO_BLK_F_SEG_MAX) | + (1ULL << VIRTIO_BLK_F_TOPOLOGY) | + (1ULL << VIRTIO_BLK_F_BLK_SIZE) | + (1ULL << VIRTIO_BLK_F_FLUSH) | + (1ULL << VIRTIO_BLK_F_DISCARD) | + (1ULL << VIRTIO_BLK_F_WRITE_ZEROES); + + if (num_queues > 1) { + features |= 1ULL << VIRTIO_BLK_F_MQ; + } + if (!opts->writable) { + features |= 1ULL << VIRTIO_BLK_F_RO; + } + + vblk_exp->dev = vduse_dev_create(vblk_opts->name, VIRTIO_ID_BLOCK, 0, + features, num_queues, + sizeof(struct virtio_blk_config), + (char *)&config, &vduse_blk_ops, + vblk_exp); + if (!vblk_exp->dev) { + error_setg(errp, "failed to create vduse device"); + ret = -ENOMEM; + goto err_dev; + } + + vblk_exp->recon_file = g_strdup_printf("%s/vduse-blk-%s", + g_get_tmp_dir(), vblk_opts->name); + if (vduse_set_reconnect_log_file(vblk_exp->dev, vblk_exp->recon_file)) { + error_setg(errp, "failed to set reconnect log file"); + ret = -EINVAL; + goto err; + } + + for (i = 0; i < num_queues; i++) { + vduse_dev_setup_queue(vblk_exp->dev, i, queue_size); + } + + aio_set_fd_handler(exp->ctx, vduse_dev_get_fd(vblk_exp->dev), true, + on_vduse_dev_kick, NULL, NULL, NULL, vblk_exp->dev); + + blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, + vblk_exp); + + blk_set_dev_ops(exp->blk, &vduse_block_ops, exp); + + return 0; +err: + vduse_dev_destroy(vblk_exp->dev); + g_free(vblk_exp->recon_file); +err_dev: + g_free(vblk_exp->handler.serial); + return ret; +} + +static void vduse_blk_exp_delete(BlockExport *exp) +{ + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + int ret; + + blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, + vblk_exp); + blk_set_dev_ops(exp->blk, NULL, NULL); + ret = vduse_dev_destroy(vblk_exp->dev); + if (ret != -EBUSY) { + unlink(vblk_exp->recon_file); + } + g_free(vblk_exp->recon_file); + g_free(vblk_exp->handler.serial); +} + +static void vduse_blk_exp_request_shutdown(BlockExport *exp) +{ + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + + aio_context_acquire(vblk_exp->export.ctx); + vduse_blk_detach_ctx(vblk_exp); + aio_context_acquire(vblk_exp->export.ctx); +} + +const BlockExportDriver blk_exp_vduse_blk = { + .type = BLOCK_EXPORT_TYPE_VDUSE_BLK, + .instance_size = sizeof(VduseBlkExport), + .create = vduse_blk_exp_create, + .delete = vduse_blk_exp_delete, + .request_shutdown = vduse_blk_exp_request_shutdown, +}; diff --git a/block/export/vduse-blk.h b/block/export/vduse-blk.h new file mode 100644 index 0000000000..c4eeb1b70e --- /dev/null +++ b/block/export/vduse-blk.h @@ -0,0 +1,20 @@ +/* + * Export QEMU block device via VDUSE + * + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * + * Author: + * Xie Yongji + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef VDUSE_BLK_H +#define VDUSE_BLK_H + +#include "block/export.h" + +extern const BlockExportDriver blk_exp_vduse_blk; + +#endif /* VDUSE_BLK_H */ diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 1862563336..3409d9e02e 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -17,31 +17,15 @@ #include "vhost-user-blk-server.h" #include "qapi/error.h" #include "qom/object_interfaces.h" -#include "sysemu/block-backend.h" #include "util/block-helpers.h" - -/* - * Sector units are 512 bytes regardless of the - * virtio_blk_config->blk_size value. - */ -#define VIRTIO_BLK_SECTOR_BITS 9 -#define VIRTIO_BLK_SECTOR_SIZE (1ull << VIRTIO_BLK_SECTOR_BITS) +#include "virtio-blk-handler.h" enum { VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1, - VHOST_USER_BLK_MAX_DISCARD_SECTORS = 32768, - VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS = 32768, -}; -struct virtio_blk_inhdr { - unsigned char status; }; typedef struct VuBlkReq { VuVirtqElement elem; - int64_t sector_num; - size_t size; - struct virtio_blk_inhdr *in; - struct virtio_blk_outhdr out; VuServer *server; struct VuVirtq *vq; } VuBlkReq; @@ -50,246 +34,45 @@ typedef struct VuBlkReq { typedef struct { BlockExport export; VuServer vu_server; - uint32_t blk_size; + VirtioBlkHandler handler; QIOChannelSocket *sioc; struct virtio_blk_config blkcfg; - bool writable; } VuBlkExport; -static void vu_blk_req_complete(VuBlkReq *req) +static void vu_blk_req_complete(VuBlkReq *req, size_t in_len) { VuDev *vu_dev = &req->server->vu_dev; - /* IO size with 1 extra status byte */ - vu_queue_push(vu_dev, req->vq, &req->elem, req->size + 1); + vu_queue_push(vu_dev, req->vq, &req->elem, in_len); vu_queue_notify(vu_dev, req->vq); free(req); } -static bool vu_blk_sect_range_ok(VuBlkExport *vexp, uint64_t sector, - size_t size) -{ - uint64_t nb_sectors; - uint64_t total_sectors; - - if (size % VIRTIO_BLK_SECTOR_SIZE) { - return false; - } - - nb_sectors = size >> VIRTIO_BLK_SECTOR_BITS; - - QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != VIRTIO_BLK_SECTOR_SIZE); - if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { - return false; - } - if ((sector << VIRTIO_BLK_SECTOR_BITS) % vexp->blk_size) { - return false; - } - blk_get_geometry(vexp->export.blk, &total_sectors); - if (sector > total_sectors || nb_sectors > total_sectors - sector) { - return false; - } - return true; -} - -static int coroutine_fn -vu_blk_discard_write_zeroes(VuBlkExport *vexp, struct iovec *iov, - uint32_t iovcnt, uint32_t type) -{ - BlockBackend *blk = vexp->export.blk; - struct virtio_blk_discard_write_zeroes desc; - ssize_t size; - uint64_t sector; - uint32_t num_sectors; - uint32_t max_sectors; - uint32_t flags; - int bytes; - - /* Only one desc is currently supported */ - if (unlikely(iov_size(iov, iovcnt) > sizeof(desc))) { - return VIRTIO_BLK_S_UNSUPP; - } - - size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc)); - if (unlikely(size != sizeof(desc))) { - error_report("Invalid size %zd, expected %zu", size, sizeof(desc)); - return VIRTIO_BLK_S_IOERR; - } - - sector = le64_to_cpu(desc.sector); - num_sectors = le32_to_cpu(desc.num_sectors); - flags = le32_to_cpu(desc.flags); - max_sectors = (type == VIRTIO_BLK_T_WRITE_ZEROES) ? - VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS : - VHOST_USER_BLK_MAX_DISCARD_SECTORS; - - /* This check ensures that 'bytes' fits in an int */ - if (unlikely(num_sectors > max_sectors)) { - return VIRTIO_BLK_S_IOERR; - } - - bytes = num_sectors << VIRTIO_BLK_SECTOR_BITS; - - if (unlikely(!vu_blk_sect_range_ok(vexp, sector, bytes))) { - return VIRTIO_BLK_S_IOERR; - } - - /* - * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard - * and write zeroes commands if any unknown flag is set. - */ - if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { - return VIRTIO_BLK_S_UNSUPP; - } - - if (type == VIRTIO_BLK_T_WRITE_ZEROES) { - int blk_flags = 0; - - if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { - blk_flags |= BDRV_REQ_MAY_UNMAP; - } - - if (blk_co_pwrite_zeroes(blk, sector << VIRTIO_BLK_SECTOR_BITS, - bytes, blk_flags) == 0) { - return VIRTIO_BLK_S_OK; - } - } else if (type == VIRTIO_BLK_T_DISCARD) { - /* - * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for - * discard commands if the unmap flag is set. - */ - if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { - return VIRTIO_BLK_S_UNSUPP; - } - - if (blk_co_pdiscard(blk, sector << VIRTIO_BLK_SECTOR_BITS, - bytes) == 0) { - return VIRTIO_BLK_S_OK; - } - } - - return VIRTIO_BLK_S_IOERR; -} - +/* Called with server refcount increased, must decrease before returning */ static void coroutine_fn vu_blk_virtio_process_req(void *opaque) { VuBlkReq *req = opaque; VuServer *server = req->server; VuVirtqElement *elem = &req->elem; - uint32_t type; - VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); - BlockBackend *blk = vexp->export.blk; - + VirtioBlkHandler *handler = &vexp->handler; struct iovec *in_iov = elem->in_sg; struct iovec *out_iov = elem->out_sg; unsigned in_num = elem->in_num; unsigned out_num = elem->out_num; + int in_len; - /* refer to hw/block/virtio_blk.c */ - if (elem->out_num < 1 || elem->in_num < 1) { - error_report("virtio-blk request missing headers"); - goto err; + in_len = virtio_blk_process_req(handler, in_iov, out_iov, + in_num, out_num); + if (in_len < 0) { + free(req); + vhost_user_server_unref(server); + return; } - if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, - sizeof(req->out)) != sizeof(req->out))) { - error_report("virtio-blk request outhdr too short"); - goto err; - } - - iov_discard_front(&out_iov, &out_num, sizeof(req->out)); - - if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { - error_report("virtio-blk request inhdr too short"); - goto err; - } - - /* We always touch the last byte, so just see how big in_iov is. */ - req->in = (void *)in_iov[in_num - 1].iov_base - + in_iov[in_num - 1].iov_len - - sizeof(struct virtio_blk_inhdr); - iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); - - type = le32_to_cpu(req->out.type); - switch (type & ~VIRTIO_BLK_T_BARRIER) { - case VIRTIO_BLK_T_IN: - case VIRTIO_BLK_T_OUT: { - QEMUIOVector qiov; - int64_t offset; - ssize_t ret = 0; - bool is_write = type & VIRTIO_BLK_T_OUT; - req->sector_num = le64_to_cpu(req->out.sector); - - if (is_write && !vexp->writable) { - req->in->status = VIRTIO_BLK_S_IOERR; - break; - } - - if (is_write) { - qemu_iovec_init_external(&qiov, out_iov, out_num); - } else { - qemu_iovec_init_external(&qiov, in_iov, in_num); - } - - if (unlikely(!vu_blk_sect_range_ok(vexp, - req->sector_num, - qiov.size))) { - req->in->status = VIRTIO_BLK_S_IOERR; - break; - } - - offset = req->sector_num << VIRTIO_BLK_SECTOR_BITS; - - if (is_write) { - ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0); - } else { - ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0); - } - if (ret >= 0) { - req->in->status = VIRTIO_BLK_S_OK; - } else { - req->in->status = VIRTIO_BLK_S_IOERR; - } - break; - } - case VIRTIO_BLK_T_FLUSH: - if (blk_co_flush(blk) == 0) { - req->in->status = VIRTIO_BLK_S_OK; - } else { - req->in->status = VIRTIO_BLK_S_IOERR; - } - break; - case VIRTIO_BLK_T_GET_ID: { - size_t size = MIN(iov_size(&elem->in_sg[0], in_num), - VIRTIO_BLK_ID_BYTES); - snprintf(elem->in_sg[0].iov_base, size, "%s", "vhost_user_blk"); - req->in->status = VIRTIO_BLK_S_OK; - req->size = elem->in_sg[0].iov_len; - break; - } - case VIRTIO_BLK_T_DISCARD: - case VIRTIO_BLK_T_WRITE_ZEROES: { - if (!vexp->writable) { - req->in->status = VIRTIO_BLK_S_IOERR; - break; - } - - req->in->status = vu_blk_discard_write_zeroes(vexp, out_iov, out_num, - type); - break; - } - default: - req->in->status = VIRTIO_BLK_S_UNSUPP; - break; - } - - vu_blk_req_complete(req); - return; - -err: - free(req); + vu_blk_req_complete(req, in_len); + vhost_user_server_unref(server); } static void vu_blk_process_vq(VuDev *vu_dev, int idx) @@ -310,6 +93,8 @@ static void vu_blk_process_vq(VuDev *vu_dev, int idx) Coroutine *co = qemu_coroutine_create(vu_blk_virtio_process_req, req); + + vhost_user_server_ref(server); qemu_coroutine_enter(co); } } @@ -343,7 +128,7 @@ static uint64_t vu_blk_get_features(VuDev *dev) 1ull << VIRTIO_RING_F_EVENT_IDX | 1ull << VHOST_USER_F_PROTOCOL_FEATURES; - if (!vexp->writable) { + if (!vexp->handler.writable) { features |= 1ull << VIRTIO_BLK_F_RO; } @@ -450,12 +235,12 @@ vu_blk_initialize_config(BlockDriverState *bs, config->opt_io_size = cpu_to_le32(1); config->num_queues = cpu_to_le16(num_queues); config->max_discard_sectors = - cpu_to_le32(VHOST_USER_BLK_MAX_DISCARD_SECTORS); + cpu_to_le32(VIRTIO_BLK_MAX_DISCARD_SECTORS); config->max_discard_seg = cpu_to_le32(1); config->discard_sector_alignment = cpu_to_le32(blk_size >> VIRTIO_BLK_SECTOR_BITS); config->max_write_zeroes_sectors - = cpu_to_le32(VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS); + = cpu_to_le32(VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS); config->max_write_zeroes_seg = cpu_to_le32(1); } @@ -475,7 +260,6 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, uint64_t logical_block_size; uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT; - vexp->writable = opts->writable; vexp->blkcfg.wce = 0; if (vu_opts->has_logical_block_size) { @@ -489,8 +273,6 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, error_propagate(errp, local_err); return -EINVAL; } - vexp->blk_size = logical_block_size; - blk_set_guest_block_size(exp->blk, logical_block_size); if (vu_opts->has_num_queues) { num_queues = vu_opts->num_queues; @@ -499,6 +281,10 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, error_setg(errp, "num-queues must be greater than 0"); return -EINVAL; } + vexp->handler.blk = exp->blk; + vexp->handler.serial = g_strdup("vhost_user_blk"); + vexp->handler.logical_block_size = logical_block_size; + vexp->handler.writable = opts->writable; vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg, logical_block_size, num_queues); @@ -510,6 +296,7 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, num_queues, &vu_blk_iface, errp)) { blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, vexp); + g_free(vexp->handler.serial); return -EADDRNOTAVAIL; } @@ -522,6 +309,7 @@ static void vu_blk_exp_delete(BlockExport *exp) blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, vexp); + g_free(vexp->handler.serial); } const BlockExportDriver blk_exp_vhost_user_blk = { diff --git a/block/export/virtio-blk-handler.c b/block/export/virtio-blk-handler.c new file mode 100644 index 0000000000..313666e8ab --- /dev/null +++ b/block/export/virtio-blk-handler.c @@ -0,0 +1,240 @@ +/* + * Handler for virtio-blk I/O + * + * Copyright (c) 2020 Red Hat, Inc. + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * + * Author: + * Coiby Xu + * Xie Yongji + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "virtio-blk-handler.h" + +#include "standard-headers/linux/virtio_blk.h" + +struct virtio_blk_inhdr { + unsigned char status; +}; + +static bool virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, + uint64_t sector, size_t size) +{ + uint64_t nb_sectors; + uint64_t total_sectors; + + if (size % VIRTIO_BLK_SECTOR_SIZE) { + return false; + } + + nb_sectors = size >> VIRTIO_BLK_SECTOR_BITS; + + QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != VIRTIO_BLK_SECTOR_SIZE); + if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return false; + } + if ((sector << VIRTIO_BLK_SECTOR_BITS) % block_size) { + return false; + } + blk_get_geometry(blk, &total_sectors); + if (sector > total_sectors || nb_sectors > total_sectors - sector) { + return false; + } + return true; +} + +static int coroutine_fn +virtio_blk_discard_write_zeroes(VirtioBlkHandler *handler, struct iovec *iov, + uint32_t iovcnt, uint32_t type) +{ + BlockBackend *blk = handler->blk; + struct virtio_blk_discard_write_zeroes desc; + ssize_t size; + uint64_t sector; + uint32_t num_sectors; + uint32_t max_sectors; + uint32_t flags; + int bytes; + + /* Only one desc is currently supported */ + if (unlikely(iov_size(iov, iovcnt) > sizeof(desc))) { + return VIRTIO_BLK_S_UNSUPP; + } + + size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc)); + if (unlikely(size != sizeof(desc))) { + error_report("Invalid size %zd, expected %zu", size, sizeof(desc)); + return VIRTIO_BLK_S_IOERR; + } + + sector = le64_to_cpu(desc.sector); + num_sectors = le32_to_cpu(desc.num_sectors); + flags = le32_to_cpu(desc.flags); + max_sectors = (type == VIRTIO_BLK_T_WRITE_ZEROES) ? + VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS : + VIRTIO_BLK_MAX_DISCARD_SECTORS; + + /* This check ensures that 'bytes' fits in an int */ + if (unlikely(num_sectors > max_sectors)) { + return VIRTIO_BLK_S_IOERR; + } + + bytes = num_sectors << VIRTIO_BLK_SECTOR_BITS; + + if (unlikely(!virtio_blk_sect_range_ok(blk, handler->logical_block_size, + sector, bytes))) { + return VIRTIO_BLK_S_IOERR; + } + + /* + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard + * and write zeroes commands if any unknown flag is set. + */ + if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { + return VIRTIO_BLK_S_UNSUPP; + } + + if (type == VIRTIO_BLK_T_WRITE_ZEROES) { + int blk_flags = 0; + + if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { + blk_flags |= BDRV_REQ_MAY_UNMAP; + } + + if (blk_co_pwrite_zeroes(blk, sector << VIRTIO_BLK_SECTOR_BITS, + bytes, blk_flags) == 0) { + return VIRTIO_BLK_S_OK; + } + } else if (type == VIRTIO_BLK_T_DISCARD) { + /* + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for + * discard commands if the unmap flag is set. + */ + if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { + return VIRTIO_BLK_S_UNSUPP; + } + + if (blk_co_pdiscard(blk, sector << VIRTIO_BLK_SECTOR_BITS, + bytes) == 0) { + return VIRTIO_BLK_S_OK; + } + } + + return VIRTIO_BLK_S_IOERR; +} + +int coroutine_fn virtio_blk_process_req(VirtioBlkHandler *handler, + struct iovec *in_iov, + struct iovec *out_iov, + unsigned int in_num, + unsigned int out_num) +{ + BlockBackend *blk = handler->blk; + struct virtio_blk_inhdr *in; + struct virtio_blk_outhdr out; + uint32_t type; + int in_len; + + if (out_num < 1 || in_num < 1) { + error_report("virtio-blk request missing headers"); + return -EINVAL; + } + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &out, + sizeof(out)) != sizeof(out))) { + error_report("virtio-blk request outhdr too short"); + return -EINVAL; + } + + iov_discard_front(&out_iov, &out_num, sizeof(out)); + + if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { + error_report("virtio-blk request inhdr too short"); + return -EINVAL; + } + + /* We always touch the last byte, so just see how big in_iov is. */ + in_len = iov_size(in_iov, in_num); + in = (void *)in_iov[in_num - 1].iov_base + + in_iov[in_num - 1].iov_len + - sizeof(struct virtio_blk_inhdr); + iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); + + type = le32_to_cpu(out.type); + switch (type & ~VIRTIO_BLK_T_BARRIER) { + case VIRTIO_BLK_T_IN: + case VIRTIO_BLK_T_OUT: { + QEMUIOVector qiov; + int64_t offset; + ssize_t ret = 0; + bool is_write = type & VIRTIO_BLK_T_OUT; + int64_t sector_num = le64_to_cpu(out.sector); + + if (is_write && !handler->writable) { + in->status = VIRTIO_BLK_S_IOERR; + break; + } + + if (is_write) { + qemu_iovec_init_external(&qiov, out_iov, out_num); + } else { + qemu_iovec_init_external(&qiov, in_iov, in_num); + } + + if (unlikely(!virtio_blk_sect_range_ok(blk, + handler->logical_block_size, + sector_num, qiov.size))) { + in->status = VIRTIO_BLK_S_IOERR; + break; + } + + offset = sector_num << VIRTIO_BLK_SECTOR_BITS; + + if (is_write) { + ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0); + } else { + ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0); + } + if (ret >= 0) { + in->status = VIRTIO_BLK_S_OK; + } else { + in->status = VIRTIO_BLK_S_IOERR; + } + break; + } + case VIRTIO_BLK_T_FLUSH: + if (blk_co_flush(blk) == 0) { + in->status = VIRTIO_BLK_S_OK; + } else { + in->status = VIRTIO_BLK_S_IOERR; + } + break; + case VIRTIO_BLK_T_GET_ID: { + size_t size = MIN(strlen(handler->serial) + 1, + MIN(iov_size(in_iov, in_num), + VIRTIO_BLK_ID_BYTES)); + iov_from_buf(in_iov, in_num, 0, handler->serial, size); + in->status = VIRTIO_BLK_S_OK; + break; + } + case VIRTIO_BLK_T_DISCARD: + case VIRTIO_BLK_T_WRITE_ZEROES: + if (!handler->writable) { + in->status = VIRTIO_BLK_S_IOERR; + break; + } + in->status = virtio_blk_discard_write_zeroes(handler, out_iov, + out_num, type); + break; + default: + in->status = VIRTIO_BLK_S_UNSUPP; + break; + } + + return in_len; +} diff --git a/block/export/virtio-blk-handler.h b/block/export/virtio-blk-handler.h new file mode 100644 index 0000000000..150d44cff2 --- /dev/null +++ b/block/export/virtio-blk-handler.h @@ -0,0 +1,37 @@ +/* + * Handler for virtio-blk I/O + * + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * + * Author: + * Xie Yongji + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef VIRTIO_BLK_HANDLER_H +#define VIRTIO_BLK_HANDLER_H + +#include "sysemu/block-backend.h" + +#define VIRTIO_BLK_SECTOR_BITS 9 +#define VIRTIO_BLK_SECTOR_SIZE (1ULL << VIRTIO_BLK_SECTOR_BITS) + +#define VIRTIO_BLK_MAX_DISCARD_SECTORS 32768 +#define VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS 32768 + +typedef struct { + BlockBackend *blk; + char *serial; + uint32_t logical_block_size; + bool writable; +} VirtioBlkHandler; + +int coroutine_fn virtio_blk_process_req(VirtioBlkHandler *handler, + struct iovec *in_iov, + struct iovec *out_iov, + unsigned int in_num, + unsigned int out_num); + +#endif /* VIRTIO_BLK_HANDLER_H */ diff --git a/block/file-posix.c b/block/file-posix.c index cb9bffe047..b9647c5ffc 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -31,6 +30,7 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qemu/units.h" +#include "qemu/memalign.h" #include "trace.h" #include "block/thread-pool.h" #include "qemu/iov.h" @@ -106,10 +106,6 @@ #include #endif -#ifdef CONFIG_XFS -#include -#endif - /* OS X does not have O_DSYNC */ #ifndef O_DSYNC #ifdef O_SYNC @@ -150,21 +146,20 @@ typedef struct BDRVRawState { uint64_t locked_perm; uint64_t locked_shared_perm; + uint64_t aio_max_batch; + int perm_change_fd; int perm_change_flags; BDRVReopenState *reopen_state; -#ifdef CONFIG_XFS - bool is_xfs:1; -#endif bool has_discard:1; bool has_write_zeroes:1; - bool discard_zeroes:1; bool use_linux_aio:1; bool use_linux_io_uring:1; int page_cache_inconsistent; /* errno from fdatasync failure */ bool has_fallocate; bool needs_alignment; + bool force_alignment; bool drop_cache; bool check_cache_dropped; struct { @@ -349,6 +344,17 @@ static bool dio_byte_aligned(int fd) return false; } +static bool raw_needs_alignment(BlockDriverState *bs) +{ + BDRVRawState *s = bs->opaque; + + if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) { + return true; + } + + return s->force_alignment; +} + /* Check if read is allowed with given memory buffer and length. * * This function is used to check O_DIRECT memory buffer and request alignment. @@ -378,7 +384,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) { BDRVRawState *s = bs->opaque; char *buf; - size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); + size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size()); size_t alignments[] = {1, 512, 1024, 2048, 4096}; /* For SCSI generic devices the alignment is not really used. @@ -395,14 +401,22 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) { bs->bl.request_alignment = 0; } -#ifdef CONFIG_XFS - if (s->is_xfs) { - struct dioattr da; - if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) { - bs->bl.request_alignment = da.d_miniosz; - /* The kernel returns wrong information for d_mem */ - /* s->buf_align = da.d_mem; */ - } + +#ifdef __linux__ + /* + * The XFS ioctl definitions are shipped in extra packages that might + * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl + * here, we simply use our own definition instead: + */ + struct xfs_dioattr { + uint32_t d_mem; + uint32_t d_miniosz; + uint32_t d_maxiosz; + } da; + if (ioctl(fd, _IOR('X', 30, struct xfs_dioattr), &da) >= 0) { + bs->bl.request_alignment = da.d_miniosz; + /* The kernel returns wrong information for d_mem */ + /* s->buf_align = da.d_mem; */ } #endif @@ -530,6 +544,11 @@ static QemuOptsList raw_runtime_opts = { .type = QEMU_OPT_STRING, .help = "host AIO implementation (threads, native, io_uring)", }, + { + .name = "aio-max-batch", + .type = QEMU_OPT_NUMBER, + .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)", + }, { .name = "locking", .type = QEMU_OPT_STRING, @@ -609,6 +628,8 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); #endif + s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0); + locking = qapi_enum_parse(&OnOffAuto_lookup, qemu_opt_get(opts, "locking"), ON_OFF_AUTO_AUTO, &local_err); @@ -719,9 +740,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, s->has_discard = true; s->has_write_zeroes = true; - if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) { - s->needs_alignment = true; - } if (fstat(s->fd, &st) < 0) { ret = -errno; @@ -736,7 +754,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, ret = -EINVAL; goto fail; } else { - s->discard_zeroes = true; s->has_fallocate = true; } } else { @@ -750,19 +767,12 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, } if (S_ISBLK(st.st_mode)) { -#ifdef BLKDISCARDZEROES - unsigned int arg; - if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) { - s->discard_zeroes = true; - } -#endif #ifdef __linux__ /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do * not rely on the contents of discarded blocks unless using O_DIRECT. * Same for BLKZEROOUT. */ if (!(bs->open_flags & BDRV_O_NOCACHE)) { - s->discard_zeroes = false; s->has_write_zeroes = false; } #endif @@ -775,15 +785,10 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, * so QEMU makes sure all IO operations on the device are aligned * to sector size, or else FreeBSD will reject them with EINVAL. */ - s->needs_alignment = true; - } -#endif - -#ifdef CONFIG_XFS - if (platform_test_xfs_fd(s->fd)) { - s->is_xfs = true; + s->force_alignment = true; } #endif + s->needs_alignment = raw_needs_alignment(bs); bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; if (S_ISREG(st.st_mode)) { @@ -1008,6 +1013,21 @@ static int raw_handle_perm_lock(BlockDriverState *bs, return ret; } +/* Sets a specific flag */ +static int fcntl_setfl(int fd, int flag) +{ + int flags; + + flags = fcntl(fd, F_GETFL); + if (flags == -1) { + return -errno; + } + if (fcntl(fd, F_SETFL, flags | flag) == -1) { + return -errno; + } + return 0; +} + static int raw_reconfigure_getfd(BlockDriverState *bs, int flags, int *open_flags, uint64_t perm, bool force_dup, Error **errp) @@ -1242,9 +1262,11 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) BDRVRawState *s = bs->opaque; struct stat st; + s->needs_alignment = raw_needs_alignment(bs); raw_probe_alignment(bs, s->fd, errp); + bs->bl.min_mem_alignment = s->buf_align; - bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size); + bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size()); /* * Maximum transfers are best effort, so it is okay to ignore any @@ -1264,7 +1286,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) } #endif - if (bs->sg || S_ISBLK(st.st_mode)) { + if (bdrv_is_sg(bs) || S_ISBLK(st.st_mode)) { int ret = hdev_get_max_hw_transfer(s->fd, &st); if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { @@ -1273,7 +1295,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) ret = hdev_get_max_segments(s->fd, &st); if (ret > 0) { - bs->bl.max_iov = ret; + bs->bl.max_hw_iov = ret; } } } @@ -1705,7 +1727,7 @@ static int handle_aiocb_write_zeroes(void *opaque) */ warn_report_once("Your file system is misbehaving: " "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " - "Please report this bug to your file sytem " + "Please report this bug to your file system " "vendor."); } else if (ret != -ENOTSUP) { return ret; @@ -1807,7 +1829,7 @@ static int handle_aiocb_copy_range(void *opaque) static int handle_aiocb_discard(void *opaque) { RawPosixAIOData *aiocb = opaque; - int ret = -EOPNOTSUPP; + int ret = -ENOTSUP; BDRVRawState *s = aiocb->bs->opaque; if (!s->has_discard) { @@ -1829,7 +1851,7 @@ static int handle_aiocb_discard(void *opaque) #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); - ret = translate_err(-errno); + ret = translate_err(ret); #elif defined(__APPLE__) && (__MACH__) fpunchhole_t fpunchhole; fpunchhole.fp_flags = 0; @@ -1869,7 +1891,7 @@ static int allocate_first_block(int fd, size_t max_size) size_t write_size = (max_size < MAX_BLOCKSIZE) ? BDRV_SECTOR_SIZE : MAX_BLOCKSIZE; - size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); + size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size()); void *buf; ssize_t n; int ret; @@ -2030,6 +2052,28 @@ static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs, return thread_pool_submit_co(pool, func, arg); } +/* + * Check if all memory in this vector is sector aligned. + */ +static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) +{ + int i; + size_t alignment = bdrv_min_mem_align(bs); + size_t len = bs->bl.request_alignment; + IO_CODE(); + + for (i = 0; i < qiov->niov; i++) { + if ((uintptr_t) qiov->iov[i].iov_base % alignment) { + return false; + } + if (qiov->iov[i].iov_len % len) { + return false; + } + } + + return true; +} + static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int type) { @@ -2057,7 +2101,8 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, } else if (s->use_linux_aio) { LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); assert(qiov->size == bytes); - return laio_co_submit(bs, aio, s->fd, offset, qiov, type); + return laio_co_submit(bs, aio, s->fd, offset, qiov, type, + s->aio_max_batch); #endif } @@ -2077,18 +2122,17 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb); } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { - assert(flags == 0); return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); } @@ -2115,7 +2159,7 @@ static void raw_aio_unplug(BlockDriverState *bs) #ifdef CONFIG_LINUX_AIO if (s->use_linux_aio) { LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); - laio_io_unplug(bs, aio); + laio_io_unplug(bs, aio, s->aio_max_batch); } #endif #ifdef CONFIG_LINUX_IO_URING @@ -2126,7 +2170,7 @@ static void raw_aio_unplug(BlockDriverState *bs) #endif } -static int raw_co_flush_to_disk(BlockDriverState *bs) +static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; @@ -2744,7 +2788,8 @@ static int find_allocation(BlockDriverState *bs, off_t start, * the specified offset) that are known to be in the same * allocated/unallocated state. * - * 'bytes' is the max value 'pnum' should be set to. + * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may + * well exceed it. */ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, bool want_zero, @@ -2782,7 +2827,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } else if (data == offset) { /* On a data extent, compute bytes to the end of the extent, * possibly including a partial sector at EOF. */ - *pnum = MIN(bytes, hole - offset); + *pnum = hole - offset; /* * We are not allowed to return partial sectors, though, so @@ -2801,7 +2846,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } else { /* On a hole, compute bytes to the beginning of the next extent. */ assert(hole == offset); - *pnum = MIN(bytes, data - offset); + *pnum = data - offset; ret = BDRV_BLOCK_ZERO; } *map = offset; @@ -2941,7 +2986,8 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) } static coroutine_fn int -raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) +raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, + bool blkdev) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; @@ -2965,13 +3011,13 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) } static coroutine_fn int -raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { return raw_do_pdiscard(bs, offset, bytes, false); } static int coroutine_fn -raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, +raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, BdrvRequestFlags flags, bool blkdev) { BDRVRawState *s = bs->opaque; @@ -3039,7 +3085,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, static int coroutine_fn raw_co_pwrite_zeroes( BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false); } @@ -3202,8 +3248,8 @@ static void raw_abort_perm_update(BlockDriverState *bs) } static int coroutine_fn raw_co_copy_range_from( - BlockDriverState *bs, BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, uint64_t bytes, + BlockDriverState *bs, BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, @@ -3212,10 +3258,10 @@ static int coroutine_fn raw_co_copy_range_from( static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -3300,17 +3346,23 @@ BlockDriver bdrv_file = { #if defined(__APPLE__) && defined(__MACH__) static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, CFIndex maxPathSize, int flags); + +#if !defined(MAC_OS_VERSION_12_0) \ + || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0) +#define IOMainPort IOMasterPort +#endif + static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator) { kern_return_t kernResult = KERN_FAILURE; - mach_port_t masterPort; + mach_port_t mainPort; CFMutableDictionaryRef classesToMatch; const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass}; char *mediaType = NULL; - kernResult = IOMasterPort( MACH_PORT_NULL, &masterPort ); + kernResult = IOMainPort(MACH_PORT_NULL, &mainPort); if ( KERN_SUCCESS != kernResult ) { - printf( "IOMasterPort returned %d\n", kernResult ); + printf("IOMainPort returned %d\n", kernResult); } int index; @@ -3323,7 +3375,7 @@ static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator) } CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey), kCFBooleanTrue); - kernResult = IOServiceGetMatchingServices(masterPort, classesToMatch, + kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch, mediaIterator); if (kernResult != KERN_SUCCESS) { error_report("Note: IOServiceGetMatchingServices returned %d", @@ -3590,7 +3642,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) #endif /* linux */ static coroutine_fn int -hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { BDRVRawState *s = bs->opaque; int ret; @@ -3604,7 +3656,7 @@ hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) } static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int rc; diff --git a/block/file-win32.c b/block/file-win32.c index 8a8ab436e7..b69de11096 100644 --- a/block/file-win32.c +++ b/block/file-win32.c @@ -59,6 +59,10 @@ typedef struct BDRVRawState { QEMUWin32AIOState *aio; } BDRVRawState; +typedef struct BDRVRawReopenState { + HANDLE hfile; +} BDRVRawReopenState; + /* * Read/writes the data to/from a given linear buffer. * @@ -412,8 +416,8 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, OPEN_EXISTING, overlapped, NULL); #else s->hfile = CreateFileW(wfilename, access_flags, - FILE_SHARE_READ, NULL, - OPEN_EXISTING, overlapped, NULL); + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, + OPEN_EXISTING, overlapped, NULL); #endif g_free(wfilename); if (s->hfile == INVALID_HANDLE_VALUE) { @@ -458,8 +462,8 @@ fail: } static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; @@ -473,8 +477,8 @@ static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs, } static BlockAIOCB *raw_aio_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; @@ -664,6 +668,97 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv, return raw_co_create(&options, errp); } +static int raw_reopen_prepare(BDRVReopenState *state, + BlockReopenQueue *queue, Error **errp) +{ + BDRVRawState *s = state->bs->opaque; + BDRVRawReopenState *rs; + int access_flags; + DWORD overlapped; + int ret = 0; + + if (s->type != FTYPE_FILE) { + error_setg(errp, "Can only reopen files"); + return -EINVAL; + } + + rs = g_new0(BDRVRawReopenState, 1); + + /* + * We do not support changing any options (only flags). By leaving + * all options in state->options, we tell the generic reopen code + * that we do not support changing any of them, so it will verify + * that their values did not change. + */ + + raw_parse_flags(state->flags, s->aio != NULL, &access_flags, &overlapped); + rs->hfile = CreateFile(state->bs->filename, access_flags, + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, + OPEN_EXISTING, overlapped, NULL); + + if (rs->hfile == INVALID_HANDLE_VALUE) { + int err = GetLastError(); + + error_setg_win32(errp, err, "Could not reopen '%s'", + state->bs->filename); + if (err == ERROR_ACCESS_DENIED) { + ret = -EACCES; + } else { + ret = -EINVAL; + } + goto fail; + } + + if (s->aio) { + ret = win32_aio_attach(s->aio, rs->hfile); + if (ret < 0) { + error_setg_errno(errp, -ret, "Could not enable AIO"); + CloseHandle(rs->hfile); + goto fail; + } + } + + state->opaque = rs; + + return 0; + +fail: + g_free(rs); + state->opaque = NULL; + + return ret; +} + +static void raw_reopen_commit(BDRVReopenState *state) +{ + BDRVRawState *s = state->bs->opaque; + BDRVRawReopenState *rs = state->opaque; + + assert(rs != NULL); + + CloseHandle(s->hfile); + s->hfile = rs->hfile; + + g_free(rs); + state->opaque = NULL; +} + +static void raw_reopen_abort(BDRVReopenState *state) +{ + BDRVRawReopenState *rs = state->opaque; + + if (!rs) { + return; + } + + if (rs->hfile != INVALID_HANDLE_VALUE) { + CloseHandle(rs->hfile); + } + + g_free(rs); + state->opaque = NULL; +} + static QemuOptsList raw_create_opts = { .name = "raw-create-opts", .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head), @@ -689,6 +784,10 @@ BlockDriver bdrv_file = { .bdrv_co_create_opts = raw_co_create_opts, .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_reopen_prepare = raw_reopen_prepare, + .bdrv_reopen_commit = raw_reopen_commit, + .bdrv_reopen_abort = raw_reopen_abort, + .bdrv_aio_preadv = raw_aio_preadv, .bdrv_aio_pwritev = raw_aio_pwritev, .bdrv_aio_flush = raw_aio_flush, diff --git a/block/filter-compress.c b/block/filter-compress.c index 5136371bf8..305716c86c 100644 --- a/block/filter-compress.c +++ b/block/filter-compress.c @@ -30,11 +30,9 @@ static int compress_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + int ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } if (!bs->file->bs->drv || !block_driver_can_compress(bs->file->bs->drv)) { @@ -63,10 +61,10 @@ static int64_t compress_getlength(BlockDriverState *bs) static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset, flags); @@ -74,10 +72,11 @@ static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs, static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset, flags | BDRV_REQ_WRITE_COMPRESSED); @@ -85,7 +84,7 @@ static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs, static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -93,7 +92,7 @@ static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn compress_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } diff --git a/block/gluster.c b/block/gluster.c index e8ee14c8e9..7c90f7ba4b 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -891,6 +891,7 @@ out: static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp) { bs->bl.max_transfer = GLUSTER_MAX_TRANSFER; + bs->bl.max_pdiscard = MIN(SIZE_MAX, INT64_MAX); } static int qemu_gluster_reopen_prepare(BDRVReopenState *state, @@ -1003,19 +1004,19 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state) #ifdef CONFIG_GLUSTERFS_ZEROFILL static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int size, + int64_t bytes, BdrvRequestFlags flags) { int ret; GlusterAIOCB acb; BDRVGlusterState *s = bs->opaque; - acb.size = size; + acb.size = bytes; acb.ret = 0; acb.coroutine = qemu_coroutine_self(); acb.aio_context = bdrv_get_aio_context(bs); - ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb); + ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb); if (ret < 0) { return -errno; } @@ -1235,7 +1236,6 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, QEMUIOVector *qiov, int flags) { - assert(!flags); return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); } @@ -1297,18 +1297,20 @@ error: #ifdef CONFIG_GLUSTERFS_DISCARD static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs, - int64_t offset, int size) + int64_t offset, int64_t bytes) { int ret; GlusterAIOCB acb; BDRVGlusterState *s = bs->opaque; + assert(bytes <= SIZE_MAX); /* rely on max_pdiscard */ + acb.size = 0; acb.ret = 0; acb.coroutine = qemu_coroutine_self(); acb.aio_context = bdrv_get_aio_context(bs); - ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb); + ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb); if (ret < 0) { return -errno; } @@ -1461,7 +1463,8 @@ exit: * the specified offset) that are known to be in the same * allocated/unallocated state. * - * 'bytes' is the max value 'pnum' should be set to. + * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may + * well exceed it. * * (Based on raw_co_block_status() from file-posix.c.) */ @@ -1477,6 +1480,8 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs, off_t data = 0, hole = 0; int ret = -EINVAL; + assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment)); + if (!s->fd) { return ret; } @@ -1500,12 +1505,26 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs, } else if (data == offset) { /* On a data extent, compute bytes to the end of the extent, * possibly including a partial sector at EOF. */ - *pnum = MIN(bytes, hole - offset); + *pnum = hole - offset; + + /* + * We are not allowed to return partial sectors, though, so + * round up if necessary. + */ + if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) { + int64_t file_length = qemu_gluster_getlength(bs); + if (file_length > 0) { + /* Ignore errors, this is just a safeguard */ + assert(hole == file_length); + } + *pnum = ROUND_UP(*pnum, bs->bl.request_alignment); + } + ret = BDRV_BLOCK_DATA; } else { /* On a hole, compute bytes to the beginning of the next extent. */ assert(hole == offset); - *pnum = MIN(bytes, data - offset); + *pnum = data - offset; ret = BDRV_BLOCK_ZERO; } @@ -1535,7 +1554,6 @@ static BlockDriver bdrv_gluster = { .format_name = "gluster", .protocol_name = "gluster", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -1565,7 +1583,6 @@ static BlockDriver bdrv_gluster_tcp = { .format_name = "gluster", .protocol_name = "gluster+tcp", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -1595,7 +1612,6 @@ static BlockDriver bdrv_gluster_unix = { .format_name = "gluster", .protocol_name = "gluster+unix", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -1631,7 +1647,6 @@ static BlockDriver bdrv_gluster_rdma = { .format_name = "gluster", .protocol_name = "gluster+rdma", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, diff --git a/block/io.c b/block/io.c index a19942718b..bbaa0d1b2d 100644 --- a/block/io.c +++ b/block/io.c @@ -32,6 +32,7 @@ #include "block/coroutines.h" #include "block/write-threshold.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -70,8 +71,10 @@ static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, void bdrv_parent_drained_end_single(BdrvChild *c) { int drained_end_counter = 0; + AioContext *ctx = bdrv_child_get_parent_aio_context(c); + IO_OR_GS_CODE(); bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); - BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); + AIO_WAIT_WHILE(ctx, qatomic_read(&drained_end_counter) > 0); } static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, @@ -114,12 +117,14 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) { + AioContext *ctx = bdrv_child_get_parent_aio_context(c); + IO_OR_GS_CODE(); c->parent_quiesce_counter++; if (c->klass->drained_begin) { c->klass->drained_begin(c); } if (poll) { - BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); + AIO_WAIT_WHILE(ctx, bdrv_parent_drained_poll_single(c)); } } @@ -136,6 +141,7 @@ static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) dst->min_mem_alignment = MAX(dst->min_mem_alignment, src->min_mem_alignment); dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); + dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); } typedef struct BdrvRefreshLimitsState { @@ -163,6 +169,8 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) BdrvChild *c; bool have_limits; + GLOBAL_STATE_CODE(); + if (tran) { BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); *s = (BdrvRefreshLimitsState) { @@ -188,10 +196,6 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) QLIST_FOREACH(c, &bs->children, next) { if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) { - bdrv_refresh_limits(c->bs, tran, errp); - if (*errp) { - return; - } bdrv_merge_limits(&bs->bl, &c->bs->bl); have_limits = true; } @@ -199,7 +203,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) if (!have_limits) { bs->bl.min_mem_alignment = 512; - bs->bl.opt_mem_alignment = qemu_real_host_page_size; + bs->bl.opt_mem_alignment = qemu_real_host_page_size(); /* Safe default since most protocols use readv()/writev()/etc */ bs->bl.max_iov = IOV_MAX; @@ -225,12 +229,14 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) */ void bdrv_enable_copy_on_read(BlockDriverState *bs) { + IO_CODE(); qatomic_inc(&bs->copy_on_read); } void bdrv_disable_copy_on_read(BlockDriverState *bs) { int old = qatomic_fetch_dec(&bs->copy_on_read); + IO_CODE(); assert(old >= 1); } @@ -302,6 +308,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, BdrvChild *ignore_parent, bool ignore_bds_parents) { BdrvChild *child, *next; + IO_OR_GS_CODE(); if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { return true; @@ -425,6 +432,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent, bool ignore_bds_parents) { + IO_OR_GS_CODE(); assert(!qemu_in_coroutine()); /* Stop things in parent-to-child order */ @@ -476,11 +484,13 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, void bdrv_drained_begin(BlockDriverState *bs) { + IO_OR_GS_CODE(); bdrv_do_drained_begin(bs, false, NULL, false, true); } void bdrv_subtree_drained_begin(BlockDriverState *bs) { + IO_OR_GS_CODE(); bdrv_do_drained_begin(bs, true, NULL, false, true); } @@ -537,18 +547,21 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, void bdrv_drained_end(BlockDriverState *bs) { int drained_end_counter = 0; + IO_OR_GS_CODE(); bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); } void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) { + IO_CODE(); bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); } void bdrv_subtree_drained_end(BlockDriverState *bs) { int drained_end_counter = 0; + IO_OR_GS_CODE(); bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); } @@ -556,6 +569,7 @@ void bdrv_subtree_drained_end(BlockDriverState *bs) void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) { int i; + IO_OR_GS_CODE(); for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { bdrv_do_drained_begin(child->bs, true, child, false, true); @@ -566,6 +580,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) { int drained_end_counter = 0; int i; + IO_OR_GS_CODE(); for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { bdrv_do_drained_end(child->bs, true, child, false, @@ -575,22 +590,9 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); } -/* - * Wait for pending requests to complete on a single BlockDriverState subtree, - * and suspend block driver's internal I/O until next request arrives. - * - * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState - * AioContext. - */ -void coroutine_fn bdrv_co_drain(BlockDriverState *bs) -{ - assert(qemu_in_coroutine()); - bdrv_drained_begin(bs); - bdrv_drained_end(bs); -} - void bdrv_drain(BlockDriverState *bs) { + IO_OR_GS_CODE(); bdrv_drained_begin(bs); bdrv_drained_end(bs); } @@ -611,6 +613,7 @@ static bool bdrv_drain_all_poll(void) { BlockDriverState *bs = NULL; bool result = false; + GLOBAL_STATE_CODE(); /* bdrv_drain_poll() can't make changes to the graph and we are holding the * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ @@ -639,6 +642,7 @@ static bool bdrv_drain_all_poll(void) void bdrv_drain_all_begin(void) { BlockDriverState *bs = NULL; + GLOBAL_STATE_CODE(); if (qemu_in_coroutine()) { bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); @@ -681,6 +685,7 @@ void bdrv_drain_all_begin(void) void bdrv_drain_all_end_quiesce(BlockDriverState *bs) { int drained_end_counter = 0; + GLOBAL_STATE_CODE(); g_assert(bs->quiesce_counter > 0); g_assert(!bs->refcnt); @@ -695,6 +700,7 @@ void bdrv_drain_all_end(void) { BlockDriverState *bs = NULL; int drained_end_counter = 0; + GLOBAL_STATE_CODE(); /* * bdrv queue is managed by record/replay, @@ -722,6 +728,7 @@ void bdrv_drain_all_end(void) void bdrv_drain_all(void) { + GLOBAL_STATE_CODE(); bdrv_drain_all_begin(); bdrv_drain_all_end(); } @@ -731,7 +738,7 @@ void bdrv_drain_all(void) * * This function should be called when a tracked request is completing. */ -static void tracked_request_end(BdrvTrackedRequest *req) +static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) { if (req->serialising) { qatomic_dec(&req->bs->serialising_in_flight); @@ -746,11 +753,11 @@ static void tracked_request_end(BdrvTrackedRequest *req) /** * Add an active request to the tracked requests list */ -static void tracked_request_begin(BdrvTrackedRequest *req, - BlockDriverState *bs, - int64_t offset, - int64_t bytes, - enum BdrvTrackedRequestType type) +static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, + BlockDriverState *bs, + int64_t offset, + int64_t bytes, + enum BdrvTrackedRequestType type) { bdrv_check_request(offset, bytes, &error_abort); @@ -789,7 +796,7 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req, } /* Called with self->bs->reqs_lock held */ -static BdrvTrackedRequest * +static coroutine_fn BdrvTrackedRequest * bdrv_find_conflicting_request(BdrvTrackedRequest *self) { BdrvTrackedRequest *req; @@ -823,20 +830,16 @@ bdrv_find_conflicting_request(BdrvTrackedRequest *self) } /* Called with self->bs->reqs_lock held */ -static bool coroutine_fn +static void coroutine_fn bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) { BdrvTrackedRequest *req; - bool waited = false; while ((req = bdrv_find_conflicting_request(self))) { self->waiting_for = req; qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); self->waiting_for = NULL; - waited = true; } - - return waited; } /* Called with req->bs->reqs_lock held */ @@ -866,6 +869,7 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) { BdrvTrackedRequest *req; Coroutine *self = qemu_coroutine_self(); + IO_CODE(); QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req->co == self) { @@ -885,7 +889,7 @@ void bdrv_round_to_clusters(BlockDriverState *bs, int64_t *cluster_bytes) { BlockDriverInfo bdi; - + IO_CODE(); if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { *cluster_offset = offset; *cluster_bytes = bytes; @@ -911,54 +915,53 @@ static int bdrv_get_cluster_size(BlockDriverState *bs) void bdrv_inc_in_flight(BlockDriverState *bs) { + IO_CODE(); qatomic_inc(&bs->in_flight); } void bdrv_wakeup(BlockDriverState *bs) { + IO_CODE(); aio_wait_kick(); } void bdrv_dec_in_flight(BlockDriverState *bs) { + IO_CODE(); qatomic_dec(&bs->in_flight); bdrv_wakeup(bs); } -static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) +static void coroutine_fn +bdrv_wait_serialising_requests(BdrvTrackedRequest *self) { BlockDriverState *bs = self->bs; - bool waited = false; if (!qatomic_read(&bs->serialising_in_flight)) { - return false; + return; } qemu_co_mutex_lock(&bs->reqs_lock); - waited = bdrv_wait_serialising_requests_locked(self); + bdrv_wait_serialising_requests_locked(self); qemu_co_mutex_unlock(&bs->reqs_lock); - - return waited; } -bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, +void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, uint64_t align) { - bool waited; + IO_CODE(); qemu_co_mutex_lock(&req->bs->reqs_lock); tracked_request_set_serialising(req, align); - waited = bdrv_wait_serialising_requests_locked(req); + bdrv_wait_serialising_requests_locked(req); qemu_co_mutex_unlock(&req->bs->reqs_lock); - - return waited; } -static int bdrv_check_qiov_request(int64_t offset, int64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, - Error **errp) +int bdrv_check_qiov_request(int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + Error **errp) { /* * Check generic offset/bytes correctness @@ -1036,13 +1039,6 @@ static int bdrv_check_request32(int64_t offset, int64_t bytes, return 0; } -int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, - int64_t bytes, BdrvRequestFlags flags) -{ - return bdrv_pwritev(child, offset, bytes, NULL, - BDRV_REQ_ZERO_WRITE | flags); -} - /* * Completely zero out a block device with the help of bdrv_pwrite_zeroes. * The operation is sped up by checking the block status and only writing @@ -1057,6 +1053,7 @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) int ret; int64_t target_size, bytes, offset = 0; BlockDriverState *bs = child->bs; + IO_CODE(); target_size = bdrv_getlength(bs); if (target_size < 0) { @@ -1084,59 +1081,25 @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) } } -/* See bdrv_pwrite() for the return codes */ -int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes) -{ - int ret; - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - if (bytes < 0) { - return -EINVAL; - } - - ret = bdrv_preadv(child, offset, bytes, &qiov, 0); - - return ret < 0 ? ret : bytes; -} - -/* Return no. of bytes on success or < 0 on error. Important errors are: - -EIO generic I/O error (may happen for all errors) - -ENOMEDIUM No media inserted. - -EINVAL Invalid offset or number of bytes - -EACCES Trying to write a read-only device -*/ -int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, - int64_t bytes) -{ - int ret; - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - if (bytes < 0) { - return -EINVAL; - } - - ret = bdrv_pwritev(child, offset, bytes, &qiov, 0); - - return ret < 0 ? ret : bytes; -} - /* * Writes to the file and ensures that no writes are reordered across this * request (acts as a barrier) * * Returns 0 on success, -errno in error cases. */ -int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, - const void *buf, int64_t count) +int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags) { int ret; + IO_CODE(); - ret = bdrv_pwrite(child, offset, buf, count); + ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); if (ret < 0) { return ret; } - ret = bdrv_flush(child->bs); + ret = bdrv_co_flush(child->bs); if (ret < 0) { return ret; } @@ -1169,8 +1132,7 @@ static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, int ret; bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); - assert(!(flags & ~BDRV_REQ_MASK)); - assert(!(flags & BDRV_REQ_NO_FALLBACK)); + assert(!(flags & ~bs->supported_read_flags)); if (!drv) { return -ENOMEDIUM; @@ -1230,26 +1192,33 @@ out: static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; + bool emulate_fua = false; int64_t sector_num; unsigned int nb_sectors; QEMUIOVector local_qiov; int ret; bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); - assert(!(flags & ~BDRV_REQ_MASK)); - assert(!(flags & BDRV_REQ_NO_FALLBACK)); if (!drv) { return -ENOMEDIUM; } + if ((flags & BDRV_REQ_FUA) && + (~bs->supported_write_flags & BDRV_REQ_FUA)) { + flags &= ~BDRV_REQ_FUA; + emulate_fua = true; + } + + flags &= bs->supported_write_flags; + if (drv->bdrv_co_pwritev_part) { ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, - flags & bs->supported_write_flags); - flags &= ~bs->supported_write_flags; + flags); goto emulate_flags; } @@ -1259,9 +1228,7 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, } if (drv->bdrv_co_pwritev) { - ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, - flags & bs->supported_write_flags); - flags &= ~bs->supported_write_flags; + ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); goto emulate_flags; } @@ -1271,10 +1238,8 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, .coroutine = qemu_coroutine_self(), }; - acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, - flags & bs->supported_write_flags, + acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, bdrv_co_io_em_complete, &co); - flags &= ~bs->supported_write_flags; if (acb == NULL) { ret = -EIO; } else { @@ -1292,12 +1257,10 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, assert(bytes <= BDRV_REQUEST_MAX_BYTES); assert(drv->bdrv_co_writev); - ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, - flags & bs->supported_write_flags); - flags &= ~bs->supported_write_flags; + ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); emulate_flags: - if (ret == 0 && (flags & BDRV_REQ_FUA)) { + if (ret == 0 && emulate_fua) { ret = bdrv_co_flush(bs); } @@ -1525,11 +1488,14 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), align); - /* TODO: We would need a per-BDS .supported_read_flags and + /* + * TODO: We would need a per-BDS .supported_read_flags and * potential fallback support, if we ever implement any read flags * to pass through to drivers. For now, there aren't any - * passthrough flags. */ - assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); + * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. + */ + assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | + BDRV_REQ_REGISTERED_BUF))); /* Handle Copy on Read and associated serialisation */ if (flags & BDRV_REQ_COPY_ON_READ) { @@ -1570,7 +1536,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, goto out; } - assert(!(flags & ~bs->supported_read_flags)); + assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); if (bytes <= max_bytes && bytes <= max_transfer) { @@ -1673,10 +1639,10 @@ static bool bdrv_init_padding(BlockDriverState *bs, return true; } -static int bdrv_padding_rmw_read(BdrvChild *child, - BdrvTrackedRequest *req, - BdrvRequestPadding *pad, - bool zero_middle) +static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child, + BdrvTrackedRequest *req, + BdrvRequestPadding *pad, + bool zero_middle) { QEMUIOVector local_qiov; BlockDriverState *bs = child->bs; @@ -1759,7 +1725,8 @@ static void bdrv_padding_destroy(BdrvRequestPadding *pad) static int bdrv_pad_request(BlockDriverState *bs, QEMUIOVector **qiov, size_t *qiov_offset, int64_t *offset, int64_t *bytes, - BdrvRequestPadding *pad, bool *padded) + BdrvRequestPadding *pad, bool *padded, + BdrvRequestFlags *flags) { int ret; @@ -1787,6 +1754,10 @@ static int bdrv_pad_request(BlockDriverState *bs, if (padded) { *padded = true; } + if (flags) { + /* Can't use optimization hint with bounce buffer */ + *flags &= ~BDRV_REQ_REGISTERED_BUF; + } return 0; } @@ -1795,6 +1766,7 @@ int coroutine_fn bdrv_co_preadv(BdrvChild *child, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { + IO_CODE(); return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); } @@ -1807,6 +1779,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, BdrvTrackedRequest req; BdrvRequestPadding pad; int ret; + IO_CODE(); trace_bdrv_co_preadv_part(bs, offset, bytes, flags); @@ -1839,7 +1812,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, } ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, - NULL); + NULL, &flags); if (ret < 0) { goto fail; } @@ -1868,7 +1841,8 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int head = 0; int tail = 0; - int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); + int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, + INT64_MAX); int alignment = MAX(bs->bl.pwrite_zeroes_alignment, bs->bl.request_alignment); int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); @@ -1883,6 +1857,14 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, return -ENOTSUP; } + /* By definition there is no user buffer so this flag doesn't make sense */ + if (flags & BDRV_REQ_REGISTERED_BUF) { + return -EINVAL; + } + + /* Invalidate the cached block-status data range if this write overlaps */ + bdrv_bsc_invalidate_range(bs, offset, bytes); + assert(alignment % bs->bl.request_alignment == 0); head = offset % alignment; tail = (offset + bytes) % alignment; @@ -2070,7 +2052,8 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, */ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, int64_t offset, int64_t bytes, - int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) + int64_t align, QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) { BlockDriverState *bs = child->bs; BlockDriver *drv = bs->drv; @@ -2104,6 +2087,9 @@ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { flags |= BDRV_REQ_MAY_UNMAP; } + + /* Can't use optimization hint with bufferless zero write */ + flags &= ~BDRV_REQ_REGISTERED_BUF; } if (ret < 0) { @@ -2164,8 +2150,12 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, bool padding; BdrvRequestPadding pad; + /* This flag doesn't make sense for padding or zero writes */ + flags &= ~BDRV_REQ_REGISTERED_BUF; + padding = bdrv_init_padding(bs, offset, bytes, &pad); if (padding) { + assert(!(flags & BDRV_REQ_NO_WAIT)); bdrv_make_request_serialising(req, align); bdrv_padding_rmw_read(child, req, &pad, true); @@ -2223,6 +2213,7 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { + IO_CODE(); return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); } @@ -2236,6 +2227,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, BdrvRequestPadding pad; int ret; bool padded = false; + IO_CODE(); trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); @@ -2243,7 +2235,11 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, return -ENOMEDIUM; } - ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); + if (flags & BDRV_REQ_ZERO_WRITE) { + ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); + } else { + ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); + } if (ret < 0) { return ret; } @@ -2274,7 +2270,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, * alignment only if there is no ZERO flag. */ ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, - &padded); + &padded, &flags); if (ret < 0) { return ret; } @@ -2296,6 +2292,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, * serialize the request to prevent interactions of the * widened region with other transactions. */ + assert(!(flags & BDRV_REQ_NO_WAIT)); bdrv_make_request_serialising(&req, align); bdrv_padding_rmw_read(child, &req, &pad, false); } @@ -2315,6 +2312,7 @@ out: int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes, BdrvRequestFlags flags) { + IO_CODE(); trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); if (!(child->bs->open_flags & BDRV_O_UNMAP)) { @@ -2334,6 +2332,8 @@ int bdrv_flush_all(void) BlockDriverState *bs = NULL; int result = 0; + GLOBAL_STATE_CODE(); + /* * bdrv queue is managed by record/replay, * creating new flush request for stopping @@ -2447,9 +2447,69 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; if (bs->drv->bdrv_co_block_status) { - ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, - aligned_bytes, pnum, &local_map, - &local_file); + /* + * Use the block-status cache only for protocol nodes: Format + * drivers are generally quick to inquire the status, but protocol + * drivers often need to get information from outside of qemu, so + * we do not have control over the actual implementation. There + * have been cases where inquiring the status took an unreasonably + * long time, and we can do nothing in qemu to fix it. + * This is especially problematic for images with large data areas, + * because finding the few holes in them and giving them special + * treatment does not gain much performance. Therefore, we try to + * cache the last-identified data region. + * + * Second, limiting ourselves to protocol nodes allows us to assume + * the block status for data regions to be DATA | OFFSET_VALID, and + * that the host offset is the same as the guest offset. + * + * Note that it is possible that external writers zero parts of + * the cached regions without the cache being invalidated, and so + * we may report zeroes as data. This is not catastrophic, + * however, because reporting zeroes as data is fine. + */ + if (QLIST_EMPTY(&bs->children) && + bdrv_bsc_is_data(bs, aligned_offset, pnum)) + { + ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; + local_file = bs; + local_map = aligned_offset; + } else { + ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, + aligned_bytes, pnum, &local_map, + &local_file); + + /* + * Note that checking QLIST_EMPTY(&bs->children) is also done when + * the cache is queried above. Technically, we do not need to check + * it here; the worst that can happen is that we fill the cache for + * non-protocol nodes, and then it is never used. However, filling + * the cache requires an RCU update, so double check here to avoid + * such an update if possible. + * + * Check want_zero, because we only want to update the cache when we + * have accurate information about what is zero and what is data. + */ + if (want_zero && + ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && + QLIST_EMPTY(&bs->children)) + { + /* + * When a protocol driver reports BLOCK_OFFSET_VALID, the + * returned local_map value must be the same as the offset we + * have passed (aligned_offset), and local_bs must be the node + * itself. + * Assert this, because we follow this rule when reading from + * the cache (see the `local_file = bs` and + * `local_map = aligned_offset` assignments above), and the + * result the cache delivers must be the same as the driver + * would deliver. + */ + assert(local_file == bs); + assert(local_map == aligned_offset); + bdrv_bsc_fill(bs, aligned_offset, *pnum); + } + } } else { /* Default code for filters */ @@ -2568,6 +2628,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, BlockDriverState *p; int64_t eof = 0; int dummy; + IO_CODE(); assert(!include_base || base); /* Can't include NULL base */ @@ -2657,6 +2718,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { + IO_CODE(); return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, pnum, map, file, NULL); } @@ -2664,6 +2726,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { + IO_CODE(); return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), offset, bytes, pnum, map, file); } @@ -2680,13 +2743,14 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, { int ret; int64_t pnum = bytes; + IO_CODE(); if (!bytes) { return 1; } - ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, - bytes, &pnum, NULL, NULL, NULL); + ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, + bytes, &pnum, NULL, NULL, NULL); if (ret < 0) { return ret; @@ -2695,11 +2759,12 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); } -int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, - int64_t bytes, int64_t *pnum) +int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, + int64_t *pnum) { int ret; int64_t dummy; + IO_CODE(); ret = bdrv_common_block_status_above(bs, bs, true, false, offset, bytes, pnum ? pnum : &dummy, NULL, @@ -2736,6 +2801,7 @@ int bdrv_is_allocated_above(BlockDriverState *top, int ret = bdrv_common_block_status_above(top, base, include_base, false, offset, bytes, pnum, NULL, NULL, &depth); + IO_CODE(); if (ret < 0) { return ret; } @@ -2751,7 +2817,13 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; BlockDriverState *child_bs = bdrv_primary_bs(bs); - int ret = -ENOTSUP; + int ret; + IO_CODE(); + + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } if (!drv) { return -ENOMEDIUM; @@ -2763,6 +2835,8 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) ret = drv->bdrv_load_vmstate(bs, qiov, pos); } else if (child_bs) { ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); + } else { + ret = -ENOTSUP; } bdrv_dec_in_flight(bs); @@ -2775,7 +2849,13 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; BlockDriverState *child_bs = bdrv_primary_bs(bs); - int ret = -ENOTSUP; + int ret; + IO_CODE(); + + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } if (!drv) { return -ENOMEDIUM; @@ -2787,6 +2867,8 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) ret = drv->bdrv_save_vmstate(bs, qiov, pos); } else if (child_bs) { ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); + } else { + ret = -ENOTSUP; } bdrv_dec_in_flight(bs); @@ -2799,6 +2881,7 @@ int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, { QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); int ret = bdrv_writev_vmstate(bs, &qiov, pos); + IO_CODE(); return ret < 0 ? ret : size; } @@ -2808,6 +2891,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, { QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); int ret = bdrv_readv_vmstate(bs, &qiov, pos); + IO_CODE(); return ret < 0 ? ret : size; } @@ -2817,6 +2901,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, void bdrv_aio_cancel(BlockAIOCB *acb) { + IO_CODE(); qemu_aio_ref(acb); bdrv_aio_cancel_async(acb); while (acb->refcnt > 1) { @@ -2841,6 +2926,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb) * In either case the completion callback must be called. */ void bdrv_aio_cancel_async(BlockAIOCB *acb) { + IO_CODE(); if (acb->aiocb_info->cancel_async) { acb->aiocb_info->cancel_async(acb); } @@ -2855,6 +2941,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) BdrvChild *child; int current_gen; int ret = 0; + IO_CODE(); bdrv_inc_in_flight(bs); @@ -2976,9 +3063,11 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes) { BdrvTrackedRequest req; - int max_pdiscard, ret; + int ret; + int64_t max_pdiscard; int head, tail, align; BlockDriverState *bs = child->bs; + IO_CODE(); if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { return -ENOMEDIUM; @@ -3002,6 +3091,9 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, return 0; } + /* Invalidate the cached block-status data range if this discard overlaps */ + bdrv_bsc_invalidate_range(bs, offset, bytes); + /* Discard is advisory, but some devices track and coalesce * unaligned requests, so we must pass everything down rather than * round here. Still, most devices will just silently ignore @@ -3020,7 +3112,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, goto out; } - max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), + max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), align); assert(max_pdiscard >= bs->bl.request_alignment); @@ -3087,13 +3179,14 @@ out: return ret; } -int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) +int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) { BlockDriver *drv = bs->drv; CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; BlockAIOCB *acb; + IO_CODE(); bdrv_inc_in_flight(bs); if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { @@ -3118,17 +3211,20 @@ out: void *qemu_blockalign(BlockDriverState *bs, size_t size) { + IO_CODE(); return qemu_memalign(bdrv_opt_mem_align(bs), size); } void *qemu_blockalign0(BlockDriverState *bs, size_t size) { + IO_CODE(); return memset(qemu_blockalign(bs, size), 0, size); } void *qemu_try_blockalign(BlockDriverState *bs, size_t size) { size_t align = bdrv_opt_mem_align(bs); + IO_CODE(); /* Ensure that NULL is never returned on success */ assert(align > 0); @@ -3142,6 +3238,7 @@ void *qemu_try_blockalign(BlockDriverState *bs, size_t size) void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) { void *mem = qemu_try_blockalign(bs, size); + IO_CODE(); if (mem) { memset(mem, 0, size); @@ -3150,29 +3247,10 @@ void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) return mem; } -/* - * Check if all memory in this vector is sector aligned. - */ -bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) -{ - int i; - size_t alignment = bdrv_min_mem_align(bs); - - for (i = 0; i < qiov->niov; i++) { - if ((uintptr_t) qiov->iov[i].iov_base % alignment) { - return false; - } - if (qiov->iov[i].iov_len % alignment) { - return false; - } - } - - return true; -} - void bdrv_io_plug(BlockDriverState *bs) { BdrvChild *child; + IO_CODE(); QLIST_FOREACH(child, &bs->children, next) { bdrv_io_plug(child->bs); @@ -3189,6 +3267,7 @@ void bdrv_io_plug(BlockDriverState *bs) void bdrv_io_unplug(BlockDriverState *bs) { BdrvChild *child; + IO_CODE(); assert(bs->io_plugged); if (qatomic_fetch_dec(&bs->io_plugged) == 1) { @@ -3203,27 +3282,57 @@ void bdrv_io_unplug(BlockDriverState *bs) } } -void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) +/* Helper that undoes bdrv_register_buf() when it fails partway through */ +static void bdrv_register_buf_rollback(BlockDriverState *bs, + void *host, + size_t size, + BdrvChild *final_child) { BdrvChild *child; - if (bs->drv && bs->drv->bdrv_register_buf) { - bs->drv->bdrv_register_buf(bs, host, size); - } QLIST_FOREACH(child, &bs->children, next) { - bdrv_register_buf(child->bs, host, size); + if (child == final_child) { + break; + } + + bdrv_unregister_buf(child->bs, host, size); + } + + if (bs->drv && bs->drv->bdrv_unregister_buf) { + bs->drv->bdrv_unregister_buf(bs, host, size); } } -void bdrv_unregister_buf(BlockDriverState *bs, void *host) +bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, + Error **errp) { BdrvChild *child; - if (bs->drv && bs->drv->bdrv_unregister_buf) { - bs->drv->bdrv_unregister_buf(bs, host); + GLOBAL_STATE_CODE(); + if (bs->drv && bs->drv->bdrv_register_buf) { + if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { + return false; + } } QLIST_FOREACH(child, &bs->children, next) { - bdrv_unregister_buf(child->bs, host); + if (!bdrv_register_buf(child->bs, host, size, errp)) { + bdrv_register_buf_rollback(bs, host, size, child); + return false; + } + } + return true; +} + +void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) +{ + BdrvChild *child; + + GLOBAL_STATE_CODE(); + if (bs->drv && bs->drv->bdrv_unregister_buf) { + bs->drv->bdrv_unregister_buf(bs, host, size); + } + QLIST_FOREACH(child, &bs->children, next) { + bdrv_unregister_buf(child->bs, host, size); } } @@ -3239,6 +3348,8 @@ static int coroutine_fn bdrv_co_copy_range_internal( /* TODO We can support BDRV_REQ_NO_FALLBACK here */ assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); + assert(!(read_flags & BDRV_REQ_NO_WAIT)); + assert(!(write_flags & BDRV_REQ_NO_WAIT)); if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) { return -ENOMEDIUM; @@ -3313,6 +3424,7 @@ int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { + IO_CODE(); trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, @@ -3329,6 +3441,7 @@ int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { + IO_CODE(); trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, @@ -3340,6 +3453,7 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { + IO_CODE(); return bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); @@ -3372,7 +3486,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, BdrvTrackedRequest req; int64_t old_size, new_bytes; int ret; - + IO_CODE(); /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ if (!drv) { @@ -3490,6 +3604,7 @@ out: void bdrv_cancel_in_flight(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); if (!bs || !bs->drv) { return; } @@ -3498,3 +3613,75 @@ void bdrv_cancel_in_flight(BlockDriverState *bs) bs->drv->bdrv_cancel_in_flight(bs); } } + +int coroutine_fn +bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset) +{ + BlockDriverState *bs = child->bs; + BlockDriver *drv = bs->drv; + int ret; + IO_CODE(); + + if (!drv) { + return -ENOMEDIUM; + } + + if (!drv->bdrv_co_preadv_snapshot) { + return -ENOTSUP; + } + + bdrv_inc_in_flight(bs); + ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); + bdrv_dec_in_flight(bs); + + return ret; +} + +int coroutine_fn +bdrv_co_snapshot_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file) +{ + BlockDriver *drv = bs->drv; + int ret; + IO_CODE(); + + if (!drv) { + return -ENOMEDIUM; + } + + if (!drv->bdrv_co_snapshot_block_status) { + return -ENOTSUP; + } + + bdrv_inc_in_flight(bs); + ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, + pnum, map, file); + bdrv_dec_in_flight(bs); + + return ret; +} + +int coroutine_fn +bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) +{ + BlockDriver *drv = bs->drv; + int ret; + IO_CODE(); + + if (!drv) { + return -ENOMEDIUM; + } + + if (!drv->bdrv_co_pdiscard_snapshot) { + return -ENOTSUP; + } + + bdrv_inc_in_flight(bs); + ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); + bdrv_dec_in_flight(bs); + + return ret; +} diff --git a/block/io_uring.c b/block/io_uring.c index dfa475cc87..973e15d876 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -10,7 +10,6 @@ */ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "block/aio.h" #include "qemu/queue.h" #include "block/block.h" @@ -73,12 +72,8 @@ static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb) /** * luring_resubmit_short_read: * - * Before Linux commit 9d93a3f5a0c ("io_uring: punt short reads to async - * context") a buffered I/O request with the start of the file range in the - * page cache could result in a short read. Applications need to resubmit the - * remaining read request. - * - * This is a slow path but recent kernels never take it. + * Short reads are rare but may occur. The remaining read request needs to be + * resubmitted. */ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, int nread) @@ -89,7 +84,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, trace_luring_resubmit_short_read(s, luringcb, nread); /* Update read position */ - luringcb->total_read = nread; + luringcb->total_read += nread; remaining = luringcb->qiov->size - luringcb->total_read; /* Shorten qiov */ @@ -103,7 +98,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, remaining); /* Update sqe */ - luringcb->sqeq.off = nread; + luringcb->sqeq.off += nread; luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; luringcb->sqeq.len = luringcb->resubmit_qiov.niov; @@ -292,12 +287,14 @@ static bool qemu_luring_poll_cb(void *opaque) { LuringState *s = opaque; - if (io_uring_cq_ready(&s->ring)) { - luring_process_completions_and_submit(s); - return true; - } + return io_uring_cq_ready(&s->ring); +} - return false; +static void qemu_luring_poll_ready(void *opaque) +{ + LuringState *s = opaque; + + luring_process_completions_and_submit(s); } static void ioq_init(LuringQueue *io_q) @@ -402,8 +399,8 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, void luring_detach_aio_context(LuringState *s, AioContext *old_context) { - aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL, - s); + aio_set_fd_handler(old_context, s->ring.ring_fd, false, + NULL, NULL, NULL, NULL, s); qemu_bh_delete(s->completion_bh); s->aio_context = NULL; } @@ -413,7 +410,8 @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context) s->aio_context = new_context; s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, - qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s); + qemu_luring_completion_cb, NULL, + qemu_luring_poll_cb, qemu_luring_poll_ready, s); } LuringState *luring_init(Error **errp) diff --git a/block/iscsi.c b/block/iscsi.c index 4d2a416ce7..1bba42a71b 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -28,7 +28,7 @@ #include #include #include -#include "qemu-common.h" +#include "sysemu/sysemu.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/bitops.h" @@ -268,6 +268,7 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, timer_mod(&iTask->retry_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time); iTask->do_retry = 1; + return; } else if (status == SCSI_STATUS_CHECK_CONDITION) { int error = iscsi_translate_sense(&task->sense); if (error == EAGAIN) { @@ -290,7 +291,8 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, } } -static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) +static void coroutine_fn +iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) { *iTask = (struct IscsiTask) { .co = qemu_coroutine_self(), @@ -363,7 +365,7 @@ iscsi_set_events(IscsiLun *iscsilun) false, (ev & POLLIN) ? iscsi_process_read : NULL, (ev & POLLOUT) ? iscsi_process_write : NULL, - NULL, + NULL, NULL, iscsilun); iscsilun->events = ev; } @@ -427,14 +429,14 @@ static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun) return sector * BDRV_SECTOR_SIZE / iscsilun->block_size; } -static bool is_byte_request_lun_aligned(int64_t offset, int count, +static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes, IscsiLun *iscsilun) { - if (offset % iscsilun->block_size || count % iscsilun->block_size) { + if (offset % iscsilun->block_size || bytes % iscsilun->block_size) { error_report("iSCSI misaligned request: " "iscsilun->block_size %u, offset %" PRIi64 - ", count %d", - iscsilun->block_size, offset, count); + ", bytes %" PRIi64, + iscsilun->block_size, offset, bytes); return false; } return true; @@ -781,9 +783,6 @@ retry: iscsi_allocmap_set_allocated(iscsilun, offset, *pnum); } - if (*pnum > bytes) { - *pnum = bytes; - } out_unlock: qemu_mutex_unlock(&iscsilun->mutex); g_free(iTask.err_str); @@ -1141,7 +1140,8 @@ iscsi_getlength(BlockDriverState *bs) } static int -coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, + int64_t bytes) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; @@ -1157,6 +1157,12 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) return 0; } + /* + * We don't want to overflow list.num which is uint32_t. + * We rely on our max_pdiscard. + */ + assert(bytes / iscsilun->block_size <= UINT32_MAX); + list.lba = offset / iscsilun->block_size; list.num = bytes / iscsilun->block_size; @@ -1205,12 +1211,12 @@ out_unlock: static int coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; uint64_t lba; - uint32_t nb_blocks; + uint64_t nb_blocks; bool use_16_for_ws = iscsilun->use_16_for_rw; int r = 0; @@ -1250,11 +1256,21 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (use_16_for_ws) { + /* + * iscsi_writesame16_task num_blocks argument is uint32_t. We rely here + * on our max_pwrite_zeroes limit. + */ + assert(nb_blocks <= UINT32_MAX); iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 0, 0, iscsi_co_generic_cb, &iTask); } else { + /* + * iscsi_writesame10_task num_blocks argument is uint16_t. We rely here + * on our max_pwrite_zeroes limit. + */ + assert(nb_blocks <= UINT16_MAX); iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), @@ -1520,7 +1536,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs) IscsiLun *iscsilun = bs->opaque; aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), - false, NULL, NULL, NULL, NULL); + false, NULL, NULL, NULL, NULL, NULL); iscsilun->events = 0; if (iscsilun->nop_timer) { @@ -2051,7 +2067,7 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; unsigned int block_size = MAX(BDRV_SECTOR_SIZE, iscsilun->block_size); - assert(iscsilun->block_size >= BDRV_SECTOR_SIZE || bs->sg); + assert(iscsilun->block_size >= BDRV_SECTOR_SIZE || bdrv_is_sg(bs)); bs->bl.request_alignment = block_size; @@ -2064,20 +2080,19 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) } if (iscsilun->lbp.lbpu) { - if (iscsilun->bl.max_unmap < 0xffffffff / block_size) { - bs->bl.max_pdiscard = - iscsilun->bl.max_unmap * iscsilun->block_size; - } + bs->bl.max_pdiscard = + MIN_NON_ZERO(iscsilun->bl.max_unmap * iscsilun->block_size, + (uint64_t)UINT32_MAX * iscsilun->block_size); bs->bl.pdiscard_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } else { bs->bl.pdiscard_alignment = iscsilun->block_size; } - if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) { - bs->bl.max_pwrite_zeroes = - iscsilun->bl.max_ws_len * iscsilun->block_size; - } + bs->bl.max_pwrite_zeroes = + MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size, + max_xfer_len * iscsilun->block_size); + if (iscsilun->lbp.lbpws) { bs->bl.pwrite_zeroes_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; @@ -2172,10 +2187,10 @@ static void coroutine_fn iscsi_co_invalidate_cache(BlockDriverState *bs, static int coroutine_fn iscsi_co_copy_range_from(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -2313,10 +2328,10 @@ static void iscsi_xcopy_data(struct iscsi_data *data, static int coroutine_fn iscsi_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { diff --git a/block/linux-aio.c b/block/linux-aio.c index 0dab507b71..d2cfb7f523 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -263,12 +263,15 @@ static bool qemu_laio_poll_cb(void *opaque) LinuxAioState *s = container_of(e, LinuxAioState, e); struct io_event *events; - if (!io_getevents_peek(s->ctx, &events)) { - return false; - } + return io_getevents_peek(s->ctx, &events); +} + +static void qemu_laio_poll_ready(EventNotifier *opaque) +{ + EventNotifier *e = opaque; + LinuxAioState *s = container_of(e, LinuxAioState, e); qemu_laio_process_completions_and_submit(s); - return true; } static void ioq_init(LaioQueue *io_q) @@ -334,30 +337,53 @@ static void ioq_submit(LinuxAioState *s) } } +static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch) +{ + uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; + + /* + * AIO context can be shared between multiple block devices, so + * `dev_max_batch` allows reducing the batch size for latency-sensitive + * devices. + */ + max_batch = MIN_NON_ZERO(dev_max_batch, max_batch); + + /* limit the batch with the number of available events */ + max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch); + + return max_batch; +} + void laio_io_plug(BlockDriverState *bs, LinuxAioState *s) { s->io_q.plugged++; } -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s) +void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, + uint64_t dev_max_batch) { assert(s->io_q.plugged); - if (--s->io_q.plugged == 0 && - !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { + s->io_q.plugged--; + + /* + * Why max batch checking is performed here: + * Another BDS may have queued requests with a higher dev_max_batch and + * therefore in_queue could now exceed our dev_max_batch. Re-check the max + * batch so we can honor our device's dev_max_batch. + */ + if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) || + (!s->io_q.plugged && + !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) { ioq_submit(s); } } static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, - int type) + int type, uint64_t dev_max_batch) { LinuxAioState *s = laiocb->ctx; struct iocb *iocbs = &laiocb->iocb; QEMUIOVector *qiov = laiocb->qiov; - int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; - - /* limit the batch with the number of available events */ - max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch); switch (type) { case QEMU_AIO_WRITE: @@ -378,7 +404,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, s->io_q.in_queue++; if (!s->io_q.blocked && (!s->io_q.plugged || - s->io_q.in_queue >= max_batch)) { + s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) { ioq_submit(s); } @@ -386,7 +412,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, } int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type) + uint64_t offset, QEMUIOVector *qiov, int type, + uint64_t dev_max_batch) { int ret; struct qemu_laiocb laiocb = { @@ -398,7 +425,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, .qiov = qiov, }; - ret = laio_do_submit(fd, &laiocb, offset, type); + ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch); if (ret < 0) { return ret; } @@ -411,7 +438,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) { - aio_set_event_notifier(old_context, &s->e, false, NULL, NULL); + aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL); qemu_bh_delete(s->completion_bh); s->aio_context = NULL; } @@ -422,7 +449,8 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); aio_set_event_notifier(new_context, &s->e, false, qemu_laio_completion_cb, - qemu_laio_poll_cb); + qemu_laio_poll_cb, + qemu_laio_poll_ready); } LinuxAioState *laio_init(Error **errp) @@ -433,7 +461,7 @@ LinuxAioState *laio_init(Error **errp) s = g_malloc0(sizeof(*s)); rc = event_notifier_init(&s->e, false); if (rc < 0) { - error_setg_errno(errp, -rc, "failed to to initialize event notifier"); + error_setg_errno(errp, -rc, "failed to initialize event notifier"); goto out_free_state; } diff --git a/block/meson.build b/block/meson.build index 0450914c7a..b7c68b83a3 100644 --- a/block/meson.build +++ b/block/meson.build @@ -4,7 +4,7 @@ block_ss.add(files( 'aio_task.c', 'amend.c', 'backup.c', - 'backup-top.c', + 'copy-before-write.c', 'blkdebug.c', 'blklogwrites.c', 'blkverify.c', @@ -32,7 +32,9 @@ block_ss.add(files( 'qcow2.c', 'quorum.c', 'raw-format.c', + 'reqlist.c', 'snapshot.c', + 'snapshot-access.c', 'throttle-groups.c', 'throttle.c', 'vhdx-endian.c', @@ -44,34 +46,54 @@ block_ss.add(files( ), zstd, zlib, gnutls) softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('blkreplay.c')) +softmmu_ss.add(files('block-ram-registrar.c')) + +if get_option('qcow1').allowed() + block_ss.add(files('qcow.c')) +endif +if get_option('vdi').allowed() + block_ss.add(files('vdi.c')) +endif +if get_option('cloop').allowed() + block_ss.add(files('cloop.c')) +endif +if get_option('bochs').allowed() + block_ss.add(files('bochs.c')) +endif +if get_option('vvfat').allowed() + block_ss.add(files('vvfat.c')) +endif +if get_option('dmg').allowed() + block_ss.add(files('dmg.c')) +endif +if get_option('qed').allowed() + block_ss.add(files( + 'qed-check.c', + 'qed-cluster.c', + 'qed-l2-cache.c', + 'qed-table.c', + 'qed.c', + )) +endif +if get_option('parallels').allowed() + block_ss.add(files('parallels.c', 'parallels-ext.c')) +endif -block_ss.add(when: 'CONFIG_QCOW1', if_true: files('qcow.c')) -block_ss.add(when: 'CONFIG_VDI', if_true: files('vdi.c')) -block_ss.add(when: 'CONFIG_CLOOP', if_true: files('cloop.c')) -block_ss.add(when: 'CONFIG_BOCHS', if_true: files('bochs.c')) -block_ss.add(when: 'CONFIG_VVFAT', if_true: files('vvfat.c')) -block_ss.add(when: 'CONFIG_DMG', if_true: files('dmg.c')) -block_ss.add(when: 'CONFIG_QED', if_true: files( - 'qed-check.c', - 'qed-cluster.c', - 'qed-l2-cache.c', - 'qed-table.c', - 'qed.c', -)) -block_ss.add(when: [libxml2, 'CONFIG_PARALLELS'], - if_true: files('parallels.c', 'parallels-ext.c')) block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c')) block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit]) block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c')) block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c')) -block_ss.add(when: 'CONFIG_REPLICATION', if_true: files('replication.c')) -block_ss.add(when: ['CONFIG_LINUX_AIO', libaio], if_true: files('linux-aio.c')) +if not get_option('replication').disabled() + block_ss.add(files('replication.c')) +endif +block_ss.add(when: libaio, if_true: files('linux-aio.c')) block_ss.add(when: linux_io_uring, if_true: files('io_uring.c')) block_modules = {} modsrc = [] foreach m : [ + [blkio, 'blkio', files('blkio.c')], [curl, 'curl', files('curl.c')], [glusterfs, 'gluster', files('gluster.c')], [libiscsi, 'iscsi', [files('iscsi.c'), libm]], @@ -90,7 +112,7 @@ foreach m : [ endforeach # those are not exactly regular block modules, so treat them apart -if 'CONFIG_DMG' in config_host +if get_option('dmg').allowed() foreach m : [ [liblzfse, 'dmg-lzfse', liblzfse, 'dmg-lzfse.c'], [libbzip2, 'dmg-bz2', [glib, libbzip2], 'dmg-bz2.c'] @@ -113,8 +135,12 @@ block_ss.add(module_block_h) wrapper_py = find_program('../scripts/block-coroutine-wrapper.py') block_gen_c = custom_target('block-gen.c', output: 'block-gen.c', - input: files('../include/block/block.h', - 'coroutines.h'), + input: files( + '../include/block/block-io.h', + '../include/block/block-global-state.h', + '../include/sysemu/block-backend-io.h', + 'coroutines.h' + ), command: [wrapper_py, '@OUTPUT@', '@INPUT@']) block_ss.add(block_gen_c) diff --git a/block/mirror.c b/block/mirror.c index 98fc66eabf..251adc5ae0 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -23,6 +23,7 @@ #include "qapi/qmp/qerror.h" #include "qemu/ratelimit.h" #include "qemu/bitmap.h" +#include "qemu/memalign.h" #define MAX_IN_FLIGHT 16 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ @@ -56,7 +57,6 @@ typedef struct MirrorBlockJob { bool zero_target; MirrorCopyMode copy_mode; BlockdevOnError on_source_error, on_target_error; - bool synced; /* Set when the target is synced (dirty bitmap is clean, nothing * in flight) and the job is running in active mode */ bool actively_synced; @@ -73,7 +73,7 @@ typedef struct MirrorBlockJob { uint64_t last_pause_ns; unsigned long *in_flight_bitmap; - int in_flight; + unsigned in_flight; int64_t bytes_in_flight; QTAILQ_HEAD(, MirrorOp) ops_in_flight; int ret; @@ -82,6 +82,7 @@ typedef struct MirrorBlockJob { int max_iov; bool initial_zeroing_ongoing; int in_active_write_counter; + int64_t active_write_bytes_in_flight; bool prepared; bool in_drain; } MirrorBlockJob; @@ -121,7 +122,6 @@ typedef enum MirrorMethod { static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, int error) { - s->synced = false; s->actively_synced = false; if (read) { return block_job_error_action(&s->common, s->on_source_error, @@ -160,18 +160,25 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, if (ranges_overlap(self_start_chunk, self_nb_chunks, op_start_chunk, op_nb_chunks)) { - /* - * If the operation is already (indirectly) waiting for us, or - * will wait for us as soon as it wakes up, then just go on - * (instead of producing a deadlock in the former case). - */ - if (op->waiting_for_op) { - continue; + if (self) { + /* + * If the operation is already (indirectly) waiting for us, + * or will wait for us as soon as it wakes up, then just go + * on (instead of producing a deadlock in the former case). + */ + if (op->waiting_for_op) { + continue; + } + + self->waiting_for_op = op; } - self->waiting_for_op = op; qemu_co_queue_wait(&op->waiting_requests, NULL); - self->waiting_for_op = NULL; + + if (self) { + self->waiting_for_op = NULL; + } + break; } } @@ -298,19 +305,21 @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, } static inline void coroutine_fn -mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) +mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) { MirrorOp *op; QTAILQ_FOREACH(op, &s->ops_in_flight, next) { - /* Do not wait on pseudo ops, because it may in turn wait on + /* + * Do not wait on pseudo ops, because it may in turn wait on * some other operation to start, which may in fact be the * caller of this function. Since there is only one pseudo op * at any given time, we will always find some real operation - * to wait on. */ - if (!op->is_pseudo_op && op->is_in_flight && - op->is_active_write == active) - { + * to wait on. + * Also, do not wait on active operations, because they do not + * use up in-flight slots. + */ + if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) { qemu_co_queue_wait(&op->waiting_requests, NULL); return; } @@ -318,13 +327,6 @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) abort(); } -static inline void coroutine_fn -mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) -{ - /* Only non-active operations use up in-flight slots */ - mirror_wait_for_any_operation(s, false); -} - /* Perform a mirror copy operation. * * *op->bytes_handled is set to the number of bytes copied after and @@ -488,6 +490,13 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) } bdrv_dirty_bitmap_unlock(s->dirty_bitmap); + /* + * Wait for concurrent requests to @offset. The next loop will limit the + * copied area based on in_flight_bitmap so we only copy an area that does + * not overlap with concurrent in-flight requests. Still, we would like to + * copy something, so wait until there are at least no more requests to the + * very beginning of the area. + */ mirror_wait_on_conflicts(NULL, s, offset, 1); job_pause_point(&s->common.job); @@ -766,13 +775,6 @@ static int mirror_exit_common(Job *job) block_job_remove_all_bdrv(bjob); bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); - /* We just changed the BDS the job BB refers to (with either or both of the - * bdrv_replace_node() calls), so switch the BB back so the cleanup does - * the right thing. We don't need any permissions any more now. */ - blk_remove_bs(bjob->blk); - blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); - blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); - bs_opaque->job = NULL; bdrv_drained_end(src); @@ -895,6 +897,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) BlockDriverState *bs = s->mirror_top_bs->backing->bs; BlockDriverState *target_bs = blk_bs(s->target); bool need_drain = true; + BlockDeviceIoStatus iostatus; int64_t length; int64_t target_length; BlockDriverInfo bdi; @@ -922,8 +925,8 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) * active layer. */ if (s->base == blk_bs(s->target)) { if (s->bdev_length > target_length) { - ret = blk_truncate(s->target, s->bdev_length, false, - PREALLOC_MODE_OFF, 0, NULL); + ret = blk_co_truncate(s->target, s->bdev_length, false, + PREALLOC_MODE_OFF, 0, NULL); if (ret < 0) { goto immediate_exit; } @@ -937,12 +940,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) if (s->bdev_length == 0) { /* Transition to the READY state and wait for complete. */ job_transition_to_ready(&s->common.job); - s->synced = true; s->actively_synced = true; - while (!job_is_cancelled(&s->common.job) && !s->should_complete) { + while (!job_cancel_requested(&s->common.job) && !s->should_complete) { job_yield(&s->common.job); } - s->common.job.cancelled = false; goto immediate_exit; } @@ -990,12 +991,6 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) int64_t cnt, delta; bool should_complete; - /* Do not start passive operations while there are active - * writes in progress */ - while (s->in_active_write_counter) { - mirror_wait_for_any_operation(s, true); - } - if (s->ret < 0) { ret = s->ret; goto immediate_exit; @@ -1003,19 +998,29 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) job_pause_point(&s->common.job); + if (job_is_cancelled(&s->common.job)) { + ret = 0; + goto immediate_exit; + } + cnt = bdrv_get_dirty_count(s->dirty_bitmap); /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is * the number of bytes currently being processed; together those are * the current remaining operation length */ - job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); + job_progress_set_remaining(&s->common.job, + s->bytes_in_flight + cnt + + s->active_write_bytes_in_flight); /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that bdrv_drain_all() returns. * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is * an error, or when the source is clean, whichever comes first. */ delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; + WITH_JOB_LOCK_GUARD() { + iostatus = s->common.iostatus; + } if (delta < BLOCK_JOB_SLICE_TIME && - s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { + iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); @@ -1029,7 +1034,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) should_complete = false; if (s->in_flight == 0 && cnt == 0) { trace_mirror_before_flush(s); - if (!s->synced) { + if (!job_is_ready(&s->common.job)) { if (mirror_flush(s) < 0) { /* Go check s->ret. */ continue; @@ -1040,14 +1045,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) * the target in a consistent state. */ job_transition_to_ready(&s->common.job); - s->synced = true; if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { s->actively_synced = true; } } should_complete = s->should_complete || - job_is_cancelled(&s->common.job); + job_cancel_requested(&s->common.job); cnt = bdrv_get_dirty_count(s->dirty_bitmap); } @@ -1066,6 +1070,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) s->in_drain = true; bdrv_drained_begin(bs); + + /* Must be zero because we are drained */ + assert(s->in_active_write_counter == 0); + cnt = bdrv_get_dirty_count(s->dirty_bitmap); if (cnt > 0 || mirror_flush(s) < 0) { bdrv_drained_end(bs); @@ -1077,24 +1085,17 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); - s->common.job.cancelled = false; need_drain = false; break; } - ret = 0; - - if (s->synced && !should_complete) { + if (job_is_ready(&s->common.job) && !should_complete) { delay_ns = (s->in_flight == 0 && cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); } - trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); + trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), + delay_ns); job_sleep_ns(&s->common.job, delay_ns); - if (job_is_cancelled(&s->common.job) && - (!s->synced || s->common.job.force_cancel)) - { - break; - } s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } @@ -1104,8 +1105,7 @@ immediate_exit: * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ - assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && - job_is_cancelled(&s->common.job))); + assert(ret < 0 || job_is_cancelled(&s->common.job)); assert(need_drain); mirror_wait_for_all_io(s); } @@ -1128,7 +1128,7 @@ static void mirror_complete(Job *job, Error **errp) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); - if (!s->synced) { + if (!job_is_ready(job)) { error_setg(errp, "The active block job '%s' cannot be completed", job->id); return; @@ -1147,10 +1147,7 @@ static void mirror_complete(Job *job, Error **errp) replace_aio_context = bdrv_get_aio_context(s->to_replace); aio_context_acquire(replace_aio_context); - /* TODO Translate this into permission system. Current definition of - * GRAPH_MOD would require to request it for the parents; they might - * not even be BlockDriverStates, however, so a BdrvChild can't address - * them. May need redefinition of GRAPH_MOD. */ + /* TODO Translate this into child freeze system. */ error_setg(&s->replace_blocker, "block device is in use by block-job-complete"); bdrv_op_block_all(s->to_replace, s->replace_blocker); @@ -1162,8 +1159,10 @@ static void mirror_complete(Job *job, Error **errp) s->should_complete = true; /* If the job is paused, it will be re-entered when it is resumed */ - if (!job->paused) { - job_enter(job); + WITH_JOB_LOCK_GUARD() { + if (!job->paused) { + job_enter_cond_locked(job, NULL); + } } } @@ -1183,21 +1182,37 @@ static bool mirror_drained_poll(BlockJob *job) * from one of our own drain sections, to avoid a deadlock waiting for * ourselves. */ - if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { - return true; + WITH_JOB_LOCK_GUARD() { + if (!s->common.job.paused && !job_is_cancelled_locked(&job->job) + && !s->in_drain) { + return true; + } } return !!s->in_flight; } -static void mirror_cancel(Job *job, bool force) +static bool mirror_cancel(Job *job, bool force) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockDriverState *target = blk_bs(s->target); - if (force || !job_is_ready(job)) { + /* + * Before the job is READY, we treat any cancellation like a + * force-cancellation. + */ + force = force || !job_is_ready(job); + + if (force) { bdrv_cancel_in_flight(target); } + return force; +} + +static bool commit_active_cancel(Job *job, bool force) +{ + /* Same as above in mirror_cancel() */ + return force || !job_is_ready(job); } static const BlockJobDriver mirror_job_driver = { @@ -1227,6 +1242,7 @@ static const BlockJobDriver commit_active_job_driver = { .abort = mirror_abort, .pause = mirror_pause, .complete = mirror_complete, + .cancel = commit_active_cancel, }, .drained_poll = mirror_drained_poll, }; @@ -1293,6 +1309,7 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, } job_progress_increase_remaining(&job->common.job, bytes); + job->active_write_bytes_in_flight += bytes; switch (method) { case MIRROR_METHOD_COPY: @@ -1314,6 +1331,7 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, abort(); } + job->active_write_bytes_in_flight -= bytes; if (ret >= 0) { job_progress_update(&job->common.job, bytes); } else { @@ -1362,6 +1380,19 @@ static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, s->in_active_write_counter++; + /* + * Wait for concurrent requests affecting the area. If there are already + * running requests that are copying off now-to-be stale data in the area, + * we must wait for them to finish before we begin writing fresh data to the + * target so that the write operations appear in the correct order. + * Note that background requests (see mirror_iteration()) in contrast only + * wait for conflicting requests at the start of the dirty area, and then + * (based on the in_flight_bitmap) truncate the area to copy so it will not + * conflict with any requests beyond that. For active writes, however, we + * cannot truncate that area. The request from our parent must be blocked + * until the area is copied in full. Therefore, we must wait for the whole + * area to become free of concurrent requests. + */ mirror_wait_on_conflicts(op, s, offset, bytes); bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); @@ -1395,7 +1426,7 @@ static void coroutine_fn active_write_settle(MirrorOp *op) } static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); } @@ -1407,10 +1438,13 @@ static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorOp *op = NULL; MirrorBDSOpaque *s = bs->opaque; int ret = 0; - bool copy_to_target; + bool copy_to_target = false; - copy_to_target = s->job->ret >= 0 && - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + if (s->job) { + copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + } if (copy_to_target) { op = active_write_prepare(s->job, offset, bytes); @@ -1449,16 +1483,19 @@ out: } static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { MirrorBDSOpaque *s = bs->opaque; QEMUIOVector bounce_qiov; void *bounce_buf; int ret = 0; - bool copy_to_target; + bool copy_to_target = false; - copy_to_target = s->job->ret >= 0 && - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + if (s->job) { + copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + } if (copy_to_target) { /* The guest might concurrently modify the data to write; but @@ -1471,6 +1508,8 @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, qemu_iovec_init(&bounce_qiov, 1); qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); qiov = &bounce_qiov; + + flags &= ~BDRV_REQ_REGISTERED_BUF; } ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, @@ -1494,14 +1533,14 @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) } static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, flags); } static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, NULL, 0); @@ -1572,6 +1611,7 @@ static BlockDriver bdrv_mirror_top = { .bdrv_child_perm = bdrv_mirror_top_child_perm, .is_filter = true, + .filtered_child_is_backing = true, }; static BlockJob *mirror_start_job( @@ -1658,7 +1698,7 @@ static BlockJob *mirror_start_job( s = block_job_create(job_id, driver, NULL, mirror_top_bs, BLK_PERM_CONSISTENT_READ, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | - BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, + BLK_PERM_WRITE, speed, creation_flags, cb, opaque, errp); if (!s) { goto fail; @@ -1702,9 +1742,7 @@ static BlockJob *mirror_start_job( target_perms |= BLK_PERM_RESIZE; } - target_shared_perms |= BLK_PERM_CONSISTENT_READ - | BLK_PERM_WRITE - | BLK_PERM_GRAPH_MOD; + target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE; } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { /* * We may want to allow this in the future, but it would @@ -1715,10 +1753,6 @@ static BlockJob *mirror_start_job( goto fail; } - if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) { - target_perms |= BLK_PERM_GRAPH_MOD; - } - s->target = blk_new(s->common.job.aio_context, target_perms, target_shared_perms); ret = blk_insert_bs(s->target, target, errp); @@ -1865,6 +1899,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs, bool is_none_mode; BlockDriverState *base; + GLOBAL_STATE_CODE(); + if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || (mode == MIRROR_SYNC_MODE_BITMAP)) { error_setg(errp, "Sync mode '%s' not supported", @@ -1890,6 +1926,8 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, bool base_read_only; BlockJob *job; + GLOBAL_STATE_CODE(); + base_read_only = bdrv_is_read_only(base); if (base_read_only) { diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c index 9f11deec64..282363606f 100644 --- a/block/monitor/bitmap-qmp-cmds.c +++ b/block/monitor/bitmap-qmp-cmds.c @@ -56,6 +56,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, BlockDriverState *bs; BdrvDirtyBitmap *bitmap; + GLOBAL_STATE_CODE(); + if (!node) { error_setg(errp, "Node cannot be NULL"); return NULL; @@ -155,6 +157,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, BdrvDirtyBitmap *bitmap; AioContext *aio_context; + GLOBAL_STATE_CODE(); + bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp); if (!bitmap || !bs) { return NULL; @@ -253,25 +257,21 @@ void qmp_block_dirty_bitmap_disable(const char *node, const char *name, } BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target, - BlockDirtyBitmapMergeSourceList *bms, + BlockDirtyBitmapOrStrList *bms, HBitmap **backup, Error **errp) { BlockDriverState *bs; - BdrvDirtyBitmap *dst, *src, *anon; - BlockDirtyBitmapMergeSourceList *lst; - Error *local_err = NULL; + BdrvDirtyBitmap *dst, *src; + BlockDirtyBitmapOrStrList *lst; + HBitmap *local_backup = NULL; + + GLOBAL_STATE_CODE(); dst = block_dirty_bitmap_lookup(node, target, &bs, errp); if (!dst) { return NULL; } - anon = bdrv_create_dirty_bitmap(bs, bdrv_dirty_bitmap_granularity(dst), - NULL, errp); - if (!anon) { - return NULL; - } - for (lst = bms; lst; lst = lst->next) { switch (lst->value->type) { const char *name, *node; @@ -280,8 +280,7 @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target, src = bdrv_find_dirty_bitmap(bs, name); if (!src) { error_setg(errp, "Dirty bitmap '%s' not found", name); - dst = NULL; - goto out; + goto fail; } break; case QTYPE_QDICT: @@ -289,32 +288,40 @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target, name = lst->value->u.external.name; src = block_dirty_bitmap_lookup(node, name, NULL, errp); if (!src) { - dst = NULL; - goto out; + goto fail; } break; default: abort(); } - bdrv_merge_dirty_bitmap(anon, src, NULL, &local_err); - if (local_err) { - error_propagate(errp, local_err); - dst = NULL; - goto out; + /* We do backup only for first merge operation */ + if (!bdrv_merge_dirty_bitmap(dst, src, + local_backup ? NULL : &local_backup, + errp)) + { + goto fail; } } - /* Merge into dst; dst is unchanged on failure. */ - bdrv_merge_dirty_bitmap(dst, anon, backup, errp); + if (backup) { + *backup = local_backup; + } else { + hbitmap_free(local_backup); + } - out: - bdrv_release_dirty_bitmap(anon); return dst; + +fail: + if (local_backup) { + bdrv_restore_dirty_bitmap(dst, local_backup); + } + + return NULL; } void qmp_block_dirty_bitmap_merge(const char *node, const char *target, - BlockDirtyBitmapMergeSourceList *bitmaps, + BlockDirtyBitmapOrStrList *bitmaps, Error **errp) { block_dirty_bitmap_merge(node, target, bitmaps, NULL, errp); diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index 3e6670c963..cf21b5e40a 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -101,7 +101,7 @@ void hmp_drive_add(Monitor *mon, const QDict *qdict) return; } - opts = drive_def(optstr); + opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false); if (!opts) return; @@ -213,15 +213,17 @@ void hmp_commit(Monitor *mon, const QDict *qdict) error_report("Device '%s' not found", device); return; } - if (!blk_is_available(blk)) { - error_report("Device '%s' has no medium", device); - return; - } bs = bdrv_skip_implicit_filters(blk_bs(blk)); aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); + if (!blk_is_available(blk)) { + error_report("Device '%s' has no medium", device); + aio_context_release(aio_context); + return; + } + ret = bdrv_commit(bs); aio_context_release(aio_context); @@ -251,10 +253,10 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict) if (!filename) { error_setg(&err, QERR_MISSING_PARAMETER, "target"); - hmp_handle_error(mon, err); - return; + goto end; } qmp_drive_mirror(&mirror, &err); +end: hmp_handle_error(mon, err); } @@ -281,11 +283,11 @@ void hmp_drive_backup(Monitor *mon, const QDict *qdict) if (!filename) { error_setg(&err, QERR_MISSING_PARAMETER, "target"); - hmp_handle_error(mon, err); - return; + goto end; } qmp_drive_backup(&backup, &err); +end: hmp_handle_error(mon, err); } @@ -356,8 +358,7 @@ void hmp_snapshot_blkdev(Monitor *mon, const QDict *qdict) * will be taken internally. Today it's actually required. */ error_setg(&err, QERR_MISSING_PARAMETER, "snapshot-file"); - hmp_handle_error(mon, err); - return; + goto end; } mode = reuse ? NEW_IMAGE_MODE_EXISTING : NEW_IMAGE_MODE_ABSOLUTE_PATHS; @@ -365,6 +366,7 @@ void hmp_snapshot_blkdev(Monitor *mon, const QDict *qdict) filename, false, NULL, !!format, format, true, mode, &err); +end: hmp_handle_error(mon, err); } @@ -489,7 +491,7 @@ void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict) hmp_handle_error(mon, err); } -void hmp_block_resize(Monitor *mon, const QDict *qdict) +void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict) { const char *device = qdict_get_str(qdict, "device"); int64_t size = qdict_get_int(qdict, "size"); @@ -638,16 +640,16 @@ static void print_block_info(Monitor *mon, BlockInfo *info, assert(!info || !info->has_inserted || info->inserted == inserted); if (info && *info->device) { - monitor_printf(mon, "%s", info->device); + monitor_puts(mon, info->device); if (inserted && inserted->has_node_name) { monitor_printf(mon, " (%s)", inserted->node_name); } } else { assert(info || inserted); - monitor_printf(mon, "%s", - inserted && inserted->has_node_name ? inserted->node_name - : info && info->has_qdev ? info->qdev - : ""); + monitor_puts(mon, + inserted && inserted->has_node_name ? inserted->node_name + : info && info->has_qdev ? info->qdev + : ""); } if (inserted) { diff --git a/block/nbd.c b/block/nbd.c index f6ff1c4fb4..7d485c86d2 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -35,7 +35,6 @@ #include "qemu/option.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" -#include "qemu/atomic.h" #include "qapi/qapi-visit-sockets.h" #include "qapi/qmp/qstring.h" @@ -57,7 +56,7 @@ typedef struct { Coroutine *coroutine; uint64_t offset; /* original offset of the request */ - bool receiving; /* waiting for connection_co? */ + bool receiving; /* sleeping in the yield in nbd_receive_replies */ } NBDClientRequest; typedef enum NBDClientState { @@ -71,29 +70,39 @@ typedef struct BDRVNBDState { QIOChannel *ioc; /* The current I/O channel */ NBDExportInfo info; - CoMutex send_mutex; - CoQueue free_sema; - Coroutine *connection_co; - Coroutine *teardown_co; - QemuCoSleep reconnect_sleep; - bool drained; - bool wait_drained_end; - int in_flight; + /* + * Protects state, free_sema, in_flight, requests[].coroutine, + * reconnect_delay_timer. + */ + QemuMutex requests_lock; NBDClientState state; - bool wait_in_flight; - + CoQueue free_sema; + unsigned in_flight; + NBDClientRequest requests[MAX_NBD_REQUESTS]; QEMUTimer *reconnect_delay_timer; - NBDClientRequest requests[MAX_NBD_REQUESTS]; + /* Protects sending data on the socket. */ + CoMutex send_mutex; + + /* + * Protects receiving reply headers from the socket, as well as the + * fields reply and requests[].receiving + */ + CoMutex receive_mutex; NBDReply reply; + + QEMUTimer *open_timer; + BlockDriverState *bs; /* Connection parameters */ uint32_t reconnect_delay; + uint32_t open_timeout; SocketAddress *saddr; - char *export, *tlscredsid; + char *export; + char *tlscredsid; QCryptoTLSCreds *tlscreds; - const char *hostname; + char *tlshostname; char *x_dirty_bitmap; bool alloc_depth; @@ -111,6 +120,10 @@ static void nbd_clear_bdrvstate(BlockDriverState *bs) yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name)); + /* Must not leave timers behind that would access freed data */ + assert(!s->reconnect_delay_timer); + assert(!s->open_timer); + object_unref(OBJECT(s->tlscreds)); qapi_free_SocketAddress(s->saddr); s->saddr = NULL; @@ -118,42 +131,57 @@ static void nbd_clear_bdrvstate(BlockDriverState *bs) s->export = NULL; g_free(s->tlscredsid); s->tlscredsid = NULL; + g_free(s->tlshostname); + s->tlshostname = NULL; g_free(s->x_dirty_bitmap); s->x_dirty_bitmap = NULL; } -static bool nbd_client_connected(BDRVNBDState *s) +/* Called with s->receive_mutex taken. */ +static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req) { - return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED; + if (req->receiving) { + req->receiving = false; + aio_co_wake(req->coroutine); + return true; + } + + return false; } -static void nbd_channel_error(BDRVNBDState *s, int ret) +static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s) { + int i; + + QEMU_LOCK_GUARD(&s->receive_mutex); + for (i = 0; i < MAX_NBD_REQUESTS; i++) { + if (nbd_recv_coroutine_wake_one(&s->requests[i])) { + return; + } + } +} + +/* Called with s->requests_lock held. */ +static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret) +{ + if (s->state == NBD_CLIENT_CONNECTED) { + qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + } + if (ret == -EIO) { - if (nbd_client_connected(s)) { + if (s->state == NBD_CLIENT_CONNECTED) { s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT : NBD_CLIENT_CONNECTING_NOWAIT; } } else { - if (nbd_client_connected(s)) { - qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); - } s->state = NBD_CLIENT_QUIT; } } -static void nbd_recv_coroutines_wake_all(BDRVNBDState *s) +static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret) { - int i; - - for (i = 0; i < MAX_NBD_REQUESTS; i++) { - NBDClientRequest *req = &s->requests[i]; - - if (req->coroutine && req->receiving) { - req->receiving = false; - aio_co_wake(req->coroutine); - } - } + QEMU_LOCK_GUARD(&s->requests_lock); + nbd_channel_error_locked(s, ret); } static void reconnect_delay_timer_del(BDRVNBDState *s) @@ -168,22 +196,18 @@ static void reconnect_delay_timer_cb(void *opaque) { BDRVNBDState *s = opaque; - if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { - s->state = NBD_CLIENT_CONNECTING_NOWAIT; - while (qemu_co_enter_next(&s->free_sema, NULL)) { - /* Resume all queued requests */ - } - } - reconnect_delay_timer_del(s); + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + if (s->state != NBD_CLIENT_CONNECTING_WAIT) { + return; + } + s->state = NBD_CLIENT_CONNECTING_NOWAIT; + } + nbd_co_establish_connection_cancel(s->conn); } static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) { - if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) { - return; - } - assert(!s->reconnect_delay_timer); s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME, @@ -192,125 +216,58 @@ static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) timer_mod(s->reconnect_delay_timer, expire_time_ns); } -static void nbd_client_detach_aio_context(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - /* Timer is deleted in nbd_client_co_drain_begin() */ - assert(!s->reconnect_delay_timer); - /* - * If reconnect is in progress we may have no ->ioc. It will be - * re-instantiated in the proper aio context once the connection is - * reestablished. - */ - if (s->ioc) { - qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc)); - } -} - -static void nbd_client_attach_aio_context_bh(void *opaque) -{ - BlockDriverState *bs = opaque; - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - if (s->connection_co) { - /* - * The node is still drained, so we know the coroutine has yielded in - * nbd_read_eof(), the only place where bs->in_flight can reach 0, or - * it is entered for the first time. Both places are safe for entering - * the coroutine. - */ - qemu_aio_coroutine_enter(bs->aio_context, s->connection_co); - } - bdrv_dec_in_flight(bs); -} - -static void nbd_client_attach_aio_context(BlockDriverState *bs, - AioContext *new_context) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - /* - * s->connection_co is either yielded from nbd_receive_reply or from - * nbd_co_reconnect_loop() - */ - if (nbd_client_connected(s)) { - qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context); - } - - bdrv_inc_in_flight(bs); - - /* - * Need to wait here for the BH to run because the BH must run while the - * node is still drained. - */ - aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs); -} - -static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - s->drained = true; - qemu_co_sleep_wake(&s->reconnect_sleep); - - nbd_co_establish_connection_cancel(s->conn); - - reconnect_delay_timer_del(s); - - if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { - s->state = NBD_CLIENT_CONNECTING_NOWAIT; - qemu_co_queue_restart_all(&s->free_sema); - } -} - -static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - s->drained = false; - if (s->wait_drained_end) { - s->wait_drained_end = false; - aio_co_wake(s->connection_co); - } -} - - static void nbd_teardown_connection(BlockDriverState *bs) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; + assert(!s->in_flight); + if (s->ioc) { - /* finish any pending coroutines */ qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), + nbd_yank, s->bs); + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; } - s->state = NBD_CLIENT_QUIT; - if (s->connection_co) { - qemu_co_sleep_wake(&s->reconnect_sleep); - nbd_co_establish_connection_cancel(s->conn); + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + s->state = NBD_CLIENT_QUIT; } - if (qemu_in_coroutine()) { - s->teardown_co = qemu_coroutine_self(); - /* connection_co resumes us when it terminates */ - qemu_coroutine_yield(); - s->teardown_co = NULL; - } else { - BDRV_POLL_WHILE(bs, s->connection_co); - } - assert(!s->connection_co); } -static bool nbd_client_connecting(BDRVNBDState *s) +static void open_timer_del(BDRVNBDState *s) { - NBDClientState state = qatomic_load_acquire(&s->state); - return state == NBD_CLIENT_CONNECTING_WAIT || - state == NBD_CLIENT_CONNECTING_NOWAIT; + if (s->open_timer) { + timer_free(s->open_timer); + s->open_timer = NULL; + } } -static bool nbd_client_connecting_wait(BDRVNBDState *s) +static void open_timer_cb(void *opaque) { - return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT; + BDRVNBDState *s = opaque; + + nbd_co_establish_connection_cancel(s->conn); + open_timer_del(s); +} + +static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) +{ + assert(!s->open_timer); + s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs), + QEMU_CLOCK_REALTIME, + SCALE_NS, + open_timer_cb, s); + timer_mod(s->open_timer, expire_time_ns); +} + +static bool nbd_client_will_reconnect(BDRVNBDState *s) +{ + /* + * Called only after a socket error, so this is not performance sensitive. + */ + QEMU_LOCK_GUARD(&s->requests_lock); + return s->state == NBD_CLIENT_CONNECTING_WAIT; } /* @@ -359,14 +316,15 @@ static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp) } int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, - Error **errp) + bool blocking, Error **errp) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; int ret; + IO_CODE(); assert(!s->ioc); - s->ioc = nbd_co_establish_connection(s->conn, &s->info, true, errp); + s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); if (!s->ioc) { return -ECONNREFUSED; } @@ -396,41 +354,45 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs)); /* successfully connected */ - s->state = NBD_CLIENT_CONNECTED; - qemu_co_queue_restart_all(&s->free_sema); + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + s->state = NBD_CLIENT_CONNECTED; + } return 0; } +/* Called with s->requests_lock held. */ +static bool nbd_client_connecting(BDRVNBDState *s) +{ + return s->state == NBD_CLIENT_CONNECTING_WAIT || + s->state == NBD_CLIENT_CONNECTING_NOWAIT; +} + +/* Called with s->requests_lock taken. */ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) { - if (!nbd_client_connecting(s)) { - return; - } - - /* Wait for completion of all in-flight requests */ - - qemu_co_mutex_lock(&s->send_mutex); - - while (s->in_flight > 0) { - qemu_co_mutex_unlock(&s->send_mutex); - nbd_recv_coroutines_wake_all(s); - s->wait_in_flight = true; - qemu_coroutine_yield(); - s->wait_in_flight = false; - qemu_co_mutex_lock(&s->send_mutex); - } - - qemu_co_mutex_unlock(&s->send_mutex); - - if (!nbd_client_connecting(s)) { - return; - } + int ret; + bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT; /* * Now we are sure that nobody is accessing the channel, and no one will * try until we set the state to CONNECTED. */ + assert(nbd_client_connecting(s)); + assert(s->in_flight == 1); + + trace_nbd_reconnect_attempt(s->bs->in_flight); + + if (blocking && !s->reconnect_delay_timer) { + /* + * It's the first reconnect attempt after switching to + * NBD_CLIENT_CONNECTING_WAIT + */ + g_assert(s->reconnect_delay); + reconnect_delay_timer_init(s, + qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + s->reconnect_delay * NANOSECONDS_PER_SECOND); + } /* Finalize previous connection if any */ if (s->ioc) { @@ -441,158 +403,107 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) s->ioc = NULL; } - nbd_co_do_establish_connection(s->bs, NULL); -} - -static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s) -{ - uint64_t timeout = 1 * NANOSECONDS_PER_SECOND; - uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND; - - if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { - reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + - s->reconnect_delay * NANOSECONDS_PER_SECOND); - } - - nbd_reconnect_attempt(s); - - while (nbd_client_connecting(s)) { - if (s->drained) { - bdrv_dec_in_flight(s->bs); - s->wait_drained_end = true; - while (s->drained) { - /* - * We may be entered once from nbd_client_attach_aio_context_bh - * and then from nbd_client_co_drain_end. So here is a loop. - */ - qemu_coroutine_yield(); - } - bdrv_inc_in_flight(s->bs); - } else { - qemu_co_sleep_ns_wakeable(&s->reconnect_sleep, - QEMU_CLOCK_REALTIME, timeout); - if (s->drained) { - continue; - } - if (timeout < max_timeout) { - timeout *= 2; - } - } - - nbd_reconnect_attempt(s); - } + qemu_mutex_unlock(&s->requests_lock); + ret = nbd_co_do_establish_connection(s->bs, blocking, NULL); + trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight); + qemu_mutex_lock(&s->requests_lock); + /* + * The reconnect attempt is done (maybe successfully, maybe not), so + * we no longer need this timer. Delete it so it will not outlive + * this I/O request (so draining removes all timers). + */ reconnect_delay_timer_del(s); } -static coroutine_fn void nbd_connection_entry(void *opaque) +static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) { - BDRVNBDState *s = opaque; - uint64_t i; - int ret = 0; - Error *local_err = NULL; + int ret; + uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2; + QEMU_LOCK_GUARD(&s->receive_mutex); - while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) { - /* - * The NBD client can only really be considered idle when it has - * yielded from qio_channel_readv_all_eof(), waiting for data. This is - * the point where the additional scheduled coroutine entry happens - * after nbd_client_attach_aio_context(). - * - * Therefore we keep an additional in_flight reference all the time and - * only drop it temporarily here. - */ - - if (nbd_client_connecting(s)) { - nbd_co_reconnect_loop(s); + while (true) { + if (s->reply.handle == handle) { + /* We are done */ + return 0; } - if (!nbd_client_connected(s)) { + if (s->reply.handle != 0) { + /* + * Some other request is being handled now. It should already be + * woken by whoever set s->reply.handle (or never wait in this + * yield). So, we should not wake it here. + */ + ind2 = HANDLE_TO_INDEX(s, s->reply.handle); + assert(!s->requests[ind2].receiving); + + s->requests[ind].receiving = true; + qemu_co_mutex_unlock(&s->receive_mutex); + + qemu_coroutine_yield(); + /* + * We may be woken for 2 reasons: + * 1. From this function, executing in parallel coroutine, when our + * handle is received. + * 2. From nbd_co_receive_one_chunk(), when previous request is + * finished and s->reply.handle set to 0. + * Anyway, it's OK to lock the mutex and go to the next iteration. + */ + + qemu_co_mutex_lock(&s->receive_mutex); + assert(!s->requests[ind].receiving); continue; } + /* We are under mutex and handle is 0. We have to do the dirty work. */ assert(s->reply.handle == 0); - ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err); - - if (local_err) { - trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err)); - error_free(local_err); - local_err = NULL; - } + ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL); if (ret <= 0) { - nbd_channel_error(s, ret ? ret : -EIO); - continue; + ret = ret ? ret : -EIO; + nbd_channel_error(s, ret); + return ret; } - - /* - * There's no need for a mutex on the receive side, because the - * handler acts as a synchronization point and ensures that only - * one coroutine is called until the reply finishes. - */ - i = HANDLE_TO_INDEX(s, s->reply.handle); - if (i >= MAX_NBD_REQUESTS || - !s->requests[i].coroutine || - !s->requests[i].receiving || - (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply)) - { + if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) { nbd_channel_error(s, -EINVAL); - continue; + return -EINVAL; } - - /* - * We're woken up again by the request itself. Note that there - * is no race between yielding and reentering connection_co. This - * is because: - * - * - if the request runs on the same AioContext, it is only - * entered after we yield - * - * - if the request runs on a different AioContext, reentering - * connection_co happens through a bottom half, which can only - * run after we yield. - */ - s->requests[i].receiving = false; - aio_co_wake(s->requests[i].coroutine); - qemu_coroutine_yield(); + ind2 = HANDLE_TO_INDEX(s, s->reply.handle); + if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) { + nbd_channel_error(s, -EINVAL); + return -EINVAL; + } + if (s->reply.handle == handle) { + /* We are done */ + return 0; + } + nbd_recv_coroutine_wake_one(&s->requests[ind2]); } - - qemu_co_queue_restart_all(&s->free_sema); - nbd_recv_coroutines_wake_all(s); - bdrv_dec_in_flight(s->bs); - - s->connection_co = NULL; - if (s->ioc) { - qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc)); - yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), - nbd_yank, s->bs); - object_unref(OBJECT(s->ioc)); - s->ioc = NULL; - } - - if (s->teardown_co) { - aio_co_wake(s->teardown_co); - } - aio_wait_kick(); } -static int nbd_co_send_request(BlockDriverState *bs, - NBDRequest *request, - QEMUIOVector *qiov) +static int coroutine_fn nbd_co_send_request(BlockDriverState *bs, + NBDRequest *request, + QEMUIOVector *qiov) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; int rc, i = -1; - qemu_co_mutex_lock(&s->send_mutex); - while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) { - qemu_co_queue_wait(&s->free_sema, &s->send_mutex); - } - - if (!nbd_client_connected(s)) { - rc = -EIO; - goto err; + qemu_mutex_lock(&s->requests_lock); + while (s->in_flight == MAX_NBD_REQUESTS || + (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) { + qemu_co_queue_wait(&s->free_sema, &s->requests_lock); } s->in_flight++; + if (s->state != NBD_CLIENT_CONNECTED) { + if (nbd_client_connecting(s)) { + nbd_reconnect_attempt(s); + qemu_co_queue_restart_all(&s->free_sema); + } + if (s->state != NBD_CLIENT_CONNECTED) { + rc = -EIO; + goto err; + } + } for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->requests[i].coroutine == NULL) { @@ -600,13 +511,13 @@ static int nbd_co_send_request(BlockDriverState *bs, } } - g_assert(qemu_in_coroutine()); assert(i < MAX_NBD_REQUESTS); - s->requests[i].coroutine = qemu_coroutine_self(); s->requests[i].offset = request->from; s->requests[i].receiving = false; + qemu_mutex_unlock(&s->requests_lock); + qemu_co_mutex_lock(&s->send_mutex); request->handle = INDEX_TO_HANDLE(s, i); assert(s->ioc); @@ -614,33 +525,27 @@ static int nbd_co_send_request(BlockDriverState *bs, if (qiov) { qio_channel_set_cork(s->ioc, true); rc = nbd_send_request(s->ioc, request); - if (nbd_client_connected(s) && rc >= 0) { - if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, - NULL) < 0) { - rc = -EIO; - } - } else if (rc >= 0) { + if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, + NULL) < 0) { rc = -EIO; } qio_channel_set_cork(s->ioc, false); } else { rc = nbd_send_request(s->ioc, request); } + qemu_co_mutex_unlock(&s->send_mutex); -err: if (rc < 0) { - nbd_channel_error(s, rc); + qemu_mutex_lock(&s->requests_lock); +err: + nbd_channel_error_locked(s, rc); if (i != -1) { s->requests[i].coroutine = NULL; - s->in_flight--; - } - if (s->in_flight == 0 && s->wait_in_flight) { - aio_co_wake(s->connection_co); - } else { - qemu_co_queue_next(&s->free_sema); } + s->in_flight--; + qemu_co_queue_next(&s->free_sema); + qemu_mutex_unlock(&s->requests_lock); } - qemu_co_mutex_unlock(&s->send_mutex); return rc; } @@ -827,9 +732,9 @@ static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, return 0; } -static int nbd_co_receive_offset_data_payload(BDRVNBDState *s, - uint64_t orig_offset, - QEMUIOVector *qiov, Error **errp) +static int coroutine_fn +nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset, + QEMUIOVector *qiov, Error **errp) { QEMUIOVector sub_qiov; uint64_t offset; @@ -935,11 +840,8 @@ static coroutine_fn int nbd_co_do_receive_one_chunk( } *request_ret = 0; - /* Wait until we're woken up by nbd_connection_entry. */ - s->requests[i].receiving = true; - qemu_coroutine_yield(); - assert(!s->requests[i].receiving); - if (!nbd_client_connected(s)) { + ret = nbd_receive_replies(s, handle); + if (ret < 0) { error_setg(errp, "Connection closed"); return -EIO; } @@ -1031,14 +933,7 @@ static coroutine_fn int nbd_co_receive_one_chunk( } s->reply.handle = 0; - if (s->connection_co && !s->wait_in_flight) { - /* - * We must check s->wait_in_flight, because we may entered by - * nbd_recv_coroutines_wake_all(), in this case we should not - * wake connection_co here, it will woken by last request. - */ - aio_co_wake(s->connection_co); - } + nbd_recv_coroutines_wake(s); return ret; } @@ -1088,21 +983,17 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret) * nbd_reply_chunk_iter_receive * The pointer stored in @payload requires g_free() to free it. */ -static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, - NBDReplyChunkIter *iter, - uint64_t handle, - QEMUIOVector *qiov, NBDReply *reply, - void **payload) +static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s, + NBDReplyChunkIter *iter, + uint64_t handle, + QEMUIOVector *qiov, + NBDReply *reply, + void **payload) { int ret, request_ret; NBDReply local_reply; NBDStructuredReplyChunk *chunk; Error *local_err = NULL; - if (!nbd_client_connected(s)) { - error_setg(&local_err, "Connection closed"); - nbd_iter_channel_error(iter, -EIO, &local_err); - goto break_loop; - } if (iter->done) { /* Previous iteration was last. */ @@ -1123,7 +1014,7 @@ static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, } /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */ - if (nbd_reply_is_simple(reply) || !nbd_client_connected(s)) { + if (nbd_reply_is_simple(reply) || iter->ret < 0) { goto break_loop; } @@ -1145,22 +1036,17 @@ static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, return true; break_loop: + qemu_mutex_lock(&s->requests_lock); s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL; - - qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; - if (s->in_flight == 0 && s->wait_in_flight) { - aio_co_wake(s->connection_co); - } else { - qemu_co_queue_next(&s->free_sema); - } - qemu_co_mutex_unlock(&s->send_mutex); + qemu_co_queue_next(&s->free_sema); + qemu_mutex_unlock(&s->requests_lock); return false; } -static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle, - int *request_ret, Error **errp) +static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle, + int *request_ret, Error **errp) { NBDReplyChunkIter iter; @@ -1173,9 +1059,9 @@ static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle, return iter.ret; } -static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle, - uint64_t offset, QEMUIOVector *qiov, - int *request_ret, Error **errp) +static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle, + uint64_t offset, QEMUIOVector *qiov, + int *request_ret, Error **errp) { NBDReplyChunkIter iter; NBDReply reply; @@ -1225,10 +1111,10 @@ static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle, return iter.ret; } -static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s, - uint64_t handle, uint64_t length, - NBDExtent *extent, - int *request_ret, Error **errp) +static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s, + uint64_t handle, uint64_t length, + NBDExtent *extent, + int *request_ret, Error **errp) { NBDReplyChunkIter iter; NBDReply reply; @@ -1285,8 +1171,8 @@ static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s, return iter.ret; } -static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, - QEMUIOVector *write_qiov) +static int coroutine_fn nbd_co_request(BlockDriverState *bs, NBDRequest *request, + QEMUIOVector *write_qiov) { int ret, request_ret; Error *local_err = NULL; @@ -1317,13 +1203,14 @@ static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, error_free(local_err); local_err = NULL; } - } while (ret < 0 && nbd_client_connecting_wait(s)); + } while (ret < 0 && nbd_client_will_reconnect(s)); return ret ? ret : request_ret; } -static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { int ret, request_ret; Error *local_err = NULL; @@ -1335,7 +1222,6 @@ static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, }; assert(bytes <= NBD_MAX_BUFFER_SIZE); - assert(!flags); if (!bytes) { return 0; @@ -1375,13 +1261,14 @@ static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, error_free(local_err); local_err = NULL; } - } while (ret < 0 && nbd_client_connecting_wait(s)); + } while (ret < 0 && nbd_client_will_reconnect(s)); return ret ? ret : request_ret; } -static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int coroutine_fn nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { @@ -1404,16 +1291,18 @@ static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, return nbd_co_request(bs, &request, qiov); } -static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) +static int coroutine_fn nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, + int64_t bytes, BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_WRITE_ZEROES, .from = offset, - .len = bytes, + .len = bytes, /* .len is uint32_t actually */ }; + assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */ + assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { return -ENOTSUP; @@ -1437,7 +1326,7 @@ static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, return nbd_co_request(bs, &request, NULL); } -static int nbd_client_co_flush(BlockDriverState *bs) +static int coroutine_fn nbd_client_co_flush(BlockDriverState *bs) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_FLUSH }; @@ -1452,16 +1341,18 @@ static int nbd_client_co_flush(BlockDriverState *bs) return nbd_co_request(bs, &request, NULL); } -static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, - int bytes) +static int coroutine_fn nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, + int64_t bytes) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_TRIM, .from = offset, - .len = bytes, + .len = bytes, /* len is uint32_t */ }; + assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */ + assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { return 0; @@ -1528,7 +1419,7 @@ static int coroutine_fn nbd_client_co_block_status( error_free(local_err); local_err = NULL; } - } while (ret < 0 && nbd_client_connecting_wait(s)); + } while (ret < 0 && nbd_client_will_reconnect(s)); if (ret < 0 || request_ret < 0) { return ret ? ret : request_ret; @@ -1560,8 +1451,9 @@ static void nbd_yank(void *opaque) BlockDriverState *bs = opaque; BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - qatomic_store_release(&s->state, NBD_CLIENT_QUIT); + QEMU_LOCK_GUARD(&s->requests_lock); qio_channel_shutdown(QIO_CHANNEL(s->ioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + s->state = NBD_CLIENT_QUIT; } static void nbd_client_close(BlockDriverState *bs) @@ -1880,6 +1772,11 @@ static QemuOptsList nbd_runtime_opts = { .type = QEMU_OPT_STRING, .help = "ID of the TLS credentials to use", }, + { + .name = "tls-hostname", + .type = QEMU_OPT_STRING, + .help = "Override hostname for validating TLS x509 certificate", + }, { .name = "x-dirty-bitmap", .type = QEMU_OPT_STRING, @@ -1897,6 +1794,15 @@ static QemuOptsList nbd_runtime_opts = { "future requests before a successful reconnect will " "immediately fail. Default 0", }, + { + .name = "open-timeout", + .type = QEMU_OPT_NUMBER, + .help = "In seconds. If zero, the nbd driver tries the connection " + "only once, and fails to open if the connection fails. " + "If non-zero, the nbd driver will repeat connection " + "attempts until successful or until @open-timeout seconds " + "have elapsed. Default 0", + }, { /* end of list */ } }, }; @@ -1937,12 +1843,11 @@ static int nbd_process_options(BlockDriverState *bs, QDict *options, goto error; } - /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */ - if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) { - error_setg(errp, "TLS only supported over IP sockets"); - goto error; + s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname")); + if (!s->tlshostname && + s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { + s->tlshostname = g_strdup(s->saddr->u.inet.host); } - s->hostname = s->saddr->u.inet.host; } s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap")); @@ -1952,6 +1857,7 @@ static int nbd_process_options(BlockDriverState *bs, QDict *options, } s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0); + s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0); ret = 0; @@ -1967,8 +1873,10 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags, BDRVNBDState *s = (BDRVNBDState *)bs->opaque; s->bs = bs; - qemu_co_mutex_init(&s->send_mutex); + qemu_mutex_init(&s->requests_lock); qemu_co_queue_init(&s->free_sema); + qemu_co_mutex_init(&s->send_mutex); + qemu_co_mutex_init(&s->receive_mutex); if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) { return -EEXIST; @@ -1980,26 +1888,39 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags, } s->conn = nbd_client_connection_new(s->saddr, true, s->export, - s->x_dirty_bitmap, s->tlscreds); + s->x_dirty_bitmap, s->tlscreds, + s->tlshostname); - /* TODO: Configurable retry-until-timeout behaviour. */ - ret = nbd_do_establish_connection(bs, errp); + if (s->open_timeout) { + nbd_client_connection_enable_retry(s->conn); + open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + s->open_timeout * NANOSECONDS_PER_SECOND); + } + + s->state = NBD_CLIENT_CONNECTING_WAIT; + ret = nbd_do_establish_connection(bs, true, errp); if (ret < 0) { goto fail; } - s->connection_co = qemu_coroutine_create(nbd_connection_entry, s); - bdrv_inc_in_flight(bs); - aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co); + /* + * The connect attempt is done, so we no longer need this timer. + * Delete it, because we do not want it to be around when this node + * is drained or closed. + */ + open_timer_del(s); + + nbd_client_connection_enable_retry(s->conn); return 0; fail: + open_timer_del(s); nbd_clear_bdrvstate(bs); return ret; } -static int nbd_co_flush(BlockDriverState *bs) +static int coroutine_fn nbd_co_flush(BlockDriverState *bs) { return nbd_client_co_flush(bs); } @@ -2129,6 +2050,7 @@ static const char *const nbd_strong_runtime_opts[] = { "port", "export", "tls-creds", + "tls-hostname", "server.", NULL @@ -2140,9 +2062,48 @@ static void nbd_cancel_in_flight(BlockDriverState *bs) reconnect_delay_timer_del(s); + qemu_mutex_lock(&s->requests_lock); if (s->state == NBD_CLIENT_CONNECTING_WAIT) { s->state = NBD_CLIENT_CONNECTING_NOWAIT; - qemu_co_queue_restart_all(&s->free_sema); + } + qemu_mutex_unlock(&s->requests_lock); + + nbd_co_establish_connection_cancel(s->conn); +} + +static void nbd_attach_aio_context(BlockDriverState *bs, + AioContext *new_context) +{ + BDRVNBDState *s = bs->opaque; + + /* The open_timer is used only during nbd_open() */ + assert(!s->open_timer); + + /* + * The reconnect_delay_timer is scheduled in I/O paths when the + * connection is lost, to cancel the reconnection attempt after a + * given time. Once this attempt is done (successfully or not), + * nbd_reconnect_attempt() ensures the timer is deleted before the + * respective I/O request is resumed. + * Since the AioContext can only be changed when a node is drained, + * the reconnect_delay_timer cannot be active here. + */ + assert(!s->reconnect_delay_timer); + + if (s->ioc) { + qio_channel_attach_aio_context(s->ioc, new_context); + } +} + +static void nbd_detach_aio_context(BlockDriverState *bs) +{ + BDRVNBDState *s = bs->opaque; + + assert(!s->open_timer); + assert(!s->reconnect_delay_timer); + + if (s->ioc) { + qio_channel_detach_aio_context(s->ioc); } } @@ -2164,15 +2125,14 @@ static BlockDriver bdrv_nbd = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static BlockDriver bdrv_nbd_tcp = { @@ -2193,15 +2153,14 @@ static BlockDriver bdrv_nbd_tcp = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static BlockDriver bdrv_nbd_unix = { @@ -2222,15 +2181,14 @@ static BlockDriver bdrv_nbd_unix = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static void bdrv_nbd_init(void) diff --git a/block/nfs.c b/block/nfs.c index 9aeaefb364..ece22353ac 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -197,7 +197,7 @@ static void nfs_set_events(NFSClient *client) false, (ev & POLLIN) ? nfs_process_read : NULL, (ev & POLLOUT) ? nfs_process_write : NULL, - NULL, client); + NULL, NULL, client); } client->events = ev; @@ -223,7 +223,7 @@ static void nfs_process_write(void *arg) qemu_mutex_unlock(&client->mutex); } -static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) +static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) { *task = (NFSRPC) { .co = qemu_coroutine_self(), @@ -262,9 +262,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, nfs_co_generic_bh_cb, task); } -static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *iov, - int flags) +static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *iov, + BdrvRequestFlags flags) { NFSClient *client = bs->opaque; NFSRPC task; @@ -296,9 +296,9 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, return 0; } -static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *iov, - int flags) +static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *iov, + BdrvRequestFlags flags) { NFSClient *client = bs->opaque; NFSRPC task; @@ -372,7 +372,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs) NFSClient *client = bs->opaque; aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, NULL, NULL, NULL, NULL); + false, NULL, NULL, NULL, NULL, NULL); client->events = 0; } @@ -390,7 +390,7 @@ static void nfs_client_close(NFSClient *client) if (client->context) { qemu_mutex_lock(&client->mutex); aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, NULL, NULL, NULL, NULL); + false, NULL, NULL, NULL, NULL, NULL); qemu_mutex_unlock(&client->mutex); if (client->fh) { nfs_close(client->context, client->fh); @@ -418,7 +418,11 @@ static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts, int flags, int open_flags, Error **errp) { int64_t ret = -EINVAL; +#ifdef _WIN32 + struct __stat64 st; +#else struct stat st; +#endif char *file = NULL, *strp = NULL; qemu_mutex_init(&client->mutex); @@ -781,7 +785,11 @@ static int nfs_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { NFSClient *client = state->bs->opaque; +#ifdef _WIN32 + struct __stat64 st; +#else struct stat st; +#endif int ret = 0; if (state->flags & BDRV_O_RDWR && bdrv_is_read_only(state->bs)) { diff --git a/block/null.c b/block/null.c index cc9b1d4ea7..75f7d0db40 100644 --- a/block/null.c +++ b/block/null.c @@ -116,8 +116,9 @@ static coroutine_fn int null_co_common(BlockDriverState *bs) } static coroutine_fn int null_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVNullState *s = bs->opaque; @@ -129,8 +130,9 @@ static coroutine_fn int null_co_preadv(BlockDriverState *bs, } static coroutine_fn int null_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return null_co_common(bs); } @@ -187,8 +189,8 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs, } static BlockAIOCB *null_aio_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { @@ -202,8 +204,8 @@ static BlockAIOCB *null_aio_preadv(BlockDriverState *bs, } static BlockAIOCB *null_aio_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { diff --git a/block/nvme.c b/block/nvme.c index e8dbbc2317..656624c585 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -21,6 +21,7 @@ #include "qemu/module.h" #include "qemu/cutils.h" #include "qemu/option.h" +#include "qemu/memalign.h" #include "qemu/vfio-helpers.h" #include "block/block_int.h" #include "sysemu/replay.h" @@ -168,31 +169,35 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, size_t bytes; int r; - bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size); + bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size()); q->head = q->tail = 0; - q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes); + q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes); if (!q->queue) { error_setg(errp, "Cannot allocate queue"); return false; } memset(q->queue, 0, bytes); - r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova); + r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp); if (r) { - error_setg(errp, "Cannot map queue"); - return false; + error_prepend(errp, "Cannot map queue: "); } - return true; + return r == 0; +} + +static void nvme_free_queue(NVMeQueue *q) +{ + qemu_vfree(q->queue); } static void nvme_free_queue_pair(NVMeQueuePair *q) { - trace_nvme_free_queue_pair(q->index, q); + trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq); if (q->completion_bh) { qemu_bh_delete(q->completion_bh); } + nvme_free_queue(&q->sq); + nvme_free_queue(&q->cq); qemu_vfree(q->prp_list_pages); - qemu_vfree(q->sq.queue); - qemu_vfree(q->cq.queue); qemu_mutex_destroy(&q->lock); g_free(q); } @@ -202,8 +207,9 @@ static void nvme_free_req_queue_cb(void *opaque) NVMeQueuePair *q = opaque; qemu_mutex_lock(&q->lock); - while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) { - /* Retry all pending requests */ + while (q->free_req_head != -1 && + qemu_co_enter_next(&q->free_req_queue, &q->lock)) { + /* Retry waiting requests */ } qemu_mutex_unlock(&q->lock); } @@ -220,14 +226,16 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s, q = g_try_new0(NVMeQueuePair, 1); if (!q) { + error_setg(errp, "Cannot allocate queue pair"); return NULL; } trace_nvme_create_queue_pair(idx, q, size, aio_context, event_notifier_get_fd(s->irq_notifier)); bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS, - qemu_real_host_page_size); - q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes); + qemu_real_host_page_size()); + q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes); if (!q->prp_list_pages) { + error_setg(errp, "Cannot allocate PRP page list"); goto fail; } memset(q->prp_list_pages, 0, bytes); @@ -237,8 +245,9 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s, qemu_co_queue_init(&q->free_req_queue); q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q); r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes, - false, &prp_list_iova); + false, &prp_list_iova, errp); if (r) { + error_prepend(errp, "Cannot map buffer for DMA: "); goto fail; } q->free_req_head = -1; @@ -284,34 +293,42 @@ static void nvme_kick(NVMeQueuePair *q) q->need_kick = 0; } -/* Find a free request element if any, otherwise: - * a) if in coroutine context, try to wait for one to become available; - * b) if not in coroutine, return NULL; - */ -static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) +static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q) { NVMeRequest *req; - qemu_mutex_lock(&q->lock); - - while (q->free_req_head == -1) { - if (qemu_in_coroutine()) { - trace_nvme_free_req_queue_wait(q->s, q->index); - qemu_co_queue_wait(&q->free_req_queue, &q->lock); - } else { - qemu_mutex_unlock(&q->lock); - return NULL; - } - } - req = &q->reqs[q->free_req_head]; q->free_req_head = req->free_req_next; req->free_req_next = -1; - - qemu_mutex_unlock(&q->lock); return req; } +/* Return a free request element if any, otherwise return NULL. */ +static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) +{ + QEMU_LOCK_GUARD(&q->lock); + if (q->free_req_head == -1) { + return NULL; + } + return nvme_get_free_req_nofail_locked(q); +} + +/* + * Wait for a free request to become available if necessary, then + * return it. + */ +static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) +{ + QEMU_LOCK_GUARD(&q->lock); + + while (q->free_req_head == -1) { + trace_nvme_free_req_queue_wait(q->s, q->index); + qemu_co_queue_wait(&q->free_req_queue, &q->lock); + } + + return nvme_get_free_req_nofail_locked(q); +} + /* With q->lock */ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) { @@ -497,7 +514,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) AioContext *aio_context = bdrv_get_aio_context(bs); NVMeRequest *req; int ret = -EINPROGRESS; - req = nvme_get_free_req(q); + req = nvme_get_free_req_nowait(q); if (!req) { return -EBUSY; } @@ -512,10 +529,10 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) { BDRVNVMeState *s = bs->opaque; bool ret = false; - union { + QEMU_AUTO_VFREE union { NvmeIdCtrl ctrl; NvmeIdNs ns; - } *id; + } *id = NULL; NvmeLBAF *lbaf; uint16_t oncs; int r; @@ -524,16 +541,16 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) .opcode = NVME_ADM_CMD_IDENTIFY, .cdw10 = cpu_to_le32(0x1), }; - size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size); + size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size()); - id = qemu_try_memalign(qemu_real_host_page_size, id_size); + id = qemu_try_memalign(qemu_real_host_page_size(), id_size); if (!id) { error_setg(errp, "Cannot allocate buffer for identify response"); goto out; } - r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova); + r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp); if (r) { - error_setg(errp, "Cannot map buffer for DMA"); + error_prepend(errp, "Cannot map buffer for DMA: "); goto out; } @@ -593,15 +610,12 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) s->blkshift = lbaf->ds; out: qemu_vfio_dma_unmap(s->vfio, id); - qemu_vfree(id); return ret; } -static bool nvme_poll_queue(NVMeQueuePair *q) +static void nvme_poll_queue(NVMeQueuePair *q) { - bool progress = false; - const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; @@ -612,30 +626,23 @@ static bool nvme_poll_queue(NVMeQueuePair *q) * cannot race with itself. */ if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { - return false; + return; } qemu_mutex_lock(&q->lock); while (nvme_process_completion(q)) { /* Keep polling */ - progress = true; } qemu_mutex_unlock(&q->lock); - - return progress; } -static bool nvme_poll_queues(BDRVNVMeState *s) +static void nvme_poll_queues(BDRVNVMeState *s) { - bool progress = false; int i; for (i = 0; i < s->queue_count; i++) { - if (nvme_poll_queue(s->queues[i])) { - progress = true; - } + nvme_poll_queue(s->queues[i]); } - return progress; } static void nvme_handle_event(EventNotifier *n) @@ -696,8 +703,30 @@ static bool nvme_poll_cb(void *opaque) EventNotifier *e = opaque; BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier[MSIX_SHARED_IRQ_IDX]); + int i; - return nvme_poll_queues(s); + for (i = 0; i < s->queue_count; i++) { + NVMeQueuePair *q = s->queues[i]; + const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; + NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; + + /* + * q->lock isn't needed because nvme_process_completion() only runs in + * the event loop thread and cannot race with itself. + */ + if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) { + return true; + } + } + return false; +} + +static void nvme_poll_ready(EventNotifier *e) +{ + BDRVNVMeState *s = container_of(e, BDRVNVMeState, + irq_notifier[MSIX_SHARED_IRQ_IDX]); + + nvme_poll_queues(s); } static int nvme_init(BlockDriverState *bs, const char *device, int namespace, @@ -832,7 +861,8 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, } aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, nvme_handle_event, nvme_poll_cb); + false, nvme_handle_event, nvme_poll_cb, + nvme_poll_ready); if (!nvme_identify(bs, namespace, errp)) { ret = -EIO; @@ -917,7 +947,7 @@ static void nvme_close(BlockDriverState *bs) g_free(s->queues); aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, NULL, NULL); + false, NULL, NULL, NULL); event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map, 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE); @@ -1017,6 +1047,7 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, uint64_t *pagelist = req->prp_list_page; int i, j, r; int entries = 0; + Error *local_err = NULL, **errp = NULL; assert(qiov->size); assert(QEMU_IS_ALIGNED(qiov->size, s->page_size)); @@ -1025,11 +1056,11 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, bool retry = true; uint64_t iova; size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len, - qemu_real_host_page_size); + qemu_real_host_page_size()); try_map: r = qemu_vfio_dma_map(s->vfio, qiov->iov[i].iov_base, - len, true, &iova); + len, true, &iova, errp); if (r == -ENOSPC) { /* * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA @@ -1064,6 +1095,8 @@ try_map: goto fail; } } + errp = &local_err; + goto try_map; } if (r) { @@ -1107,6 +1140,9 @@ fail: * because they are already mapped before calling this function; for * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by * calling qemu_vfio_dma_reset_temporary when necessary. */ + if (local_err) { + error_reportf_err(local_err, "Cannot map buffer for DMA: "); + } return r; } @@ -1196,8 +1232,8 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs, for (i = 0; i < qiov->niov; ++i) { if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, - qemu_real_host_page_size) || - !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) { + qemu_real_host_page_size()) || + !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) { trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, qiov->iov[i].iov_len, s->page_size); return false; @@ -1206,14 +1242,16 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs, return true; } -static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, bool is_write, int flags) +static coroutine_fn int nvme_co_prw(BlockDriverState *bs, + uint64_t offset, uint64_t bytes, + QEMUIOVector *qiov, bool is_write, + int flags) { BDRVNVMeState *s = bs->opaque; int r; - uint8_t *buf = NULL; + QEMU_AUTO_VFREE uint8_t *buf = NULL; QEMUIOVector local_qiov; - size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size); + size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size()); assert(QEMU_IS_ALIGNED(offset, s->page_size)); assert(QEMU_IS_ALIGNED(bytes, s->page_size)); assert(bytes <= s->max_transfer); @@ -1223,7 +1261,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } s->stats.unaligned_accesses++; trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); - buf = qemu_try_memalign(qemu_real_host_page_size, len); + buf = qemu_try_memalign(qemu_real_host_page_size(), len); if (!buf) { return -ENOMEM; @@ -1238,20 +1276,21 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, if (!r && !is_write) { qemu_iovec_from_buf(qiov, 0, buf, bytes); } - qemu_vfree(buf); return r; } static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return nvme_co_prw(bs, offset, bytes, qiov, false, flags); } static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return nvme_co_prw(bs, offset, bytes, qiov, true, flags); } @@ -1286,19 +1325,29 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { BDRVNVMeState *s = bs->opaque; NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; NVMeRequest *req; - - uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; + uint32_t cdw12; if (!s->supports_write_zeroes) { return -ENOTSUP; } + if (bytes == 0) { + return 0; + } + + cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; + /* + * We should not lose information. pwrite_zeroes_alignment and + * max_pwrite_zeroes guarantees it. + */ + assert(((cdw12 + 1) << s->blkshift) == bytes); + NvmeCmd cmd = { .opcode = NVME_CMD_WRITE_ZEROES, .nsid = cpu_to_le32(s->nsid), @@ -1340,12 +1389,12 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, int64_t offset, - int bytes) + int64_t bytes) { BDRVNVMeState *s = bs->opaque; NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; NVMeRequest *req; - NvmeDsmRange *buf; + QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL; QEMUIOVector local_qiov; int ret; @@ -1367,6 +1416,14 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, assert(s->queue_count > 1); + /* + * Filling the @buf requires @offset and @bytes to satisfy restrictions + * defined in nvme_refresh_limits(). + */ + assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift)); + assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift)); + assert((bytes >> s->blkshift) <= UINT32_MAX); + buf = qemu_try_memalign(s->page_size, s->page_size); if (!buf) { return -ENOMEM; @@ -1412,7 +1469,6 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, trace_nvme_dsm_done(s, offset, bytes, ret); out: qemu_iovec_destroy(&local_qiov); - qemu_vfree(buf); return ret; } @@ -1462,6 +1518,18 @@ static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) bs->bl.opt_mem_alignment = s->page_size; bs->bl.request_alignment = s->page_size; bs->bl.max_transfer = s->max_transfer; + + /* + * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get + * at most 0xFFFF + */ + bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16); + bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment, + 1UL << s->blkshift); + + bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift; + bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment, + 1UL << s->blkshift); } static void nvme_detach_aio_context(BlockDriverState *bs) @@ -1477,7 +1545,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs) aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, NULL, NULL); + false, NULL, NULL, NULL); } static void nvme_attach_aio_context(BlockDriverState *bs, @@ -1487,7 +1555,8 @@ static void nvme_attach_aio_context(BlockDriverState *bs, s->aio_context = new_context; aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, nvme_handle_event, nvme_poll_cb); + false, nvme_handle_event, nvme_poll_cb, + nvme_poll_ready); for (unsigned i = 0; i < s->queue_count; i++) { NVMeQueuePair *q = s->queues[i]; @@ -1518,21 +1587,22 @@ static void nvme_aio_unplug(BlockDriverState *bs) } } -static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size) +static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size, + Error **errp) { int ret; BDRVNVMeState *s = bs->opaque; - ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL); - if (ret) { - /* FIXME: we may run out of IOVA addresses after repeated - * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap - * doesn't reclaim addresses for fixed mappings. */ - error_report("nvme_register_buf failed: %s", strerror(-ret)); - } + /* + * FIXME: we may run out of IOVA addresses after repeated + * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap + * doesn't reclaim addresses for fixed mappings. + */ + ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp); + return ret == 0; } -static void nvme_unregister_buf(BlockDriverState *bs, void *host) +static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size) { BDRVNVMeState *s = bs->opaque; diff --git a/block/parallels-ext.c b/block/parallels-ext.c index e0dd0975c6..c9dbbf5089 100644 --- a/block/parallels-ext.c +++ b/block/parallels-ext.c @@ -29,6 +29,7 @@ #include "parallels.h" #include "crypto/hash.h" #include "qemu/uuid.h" +#include "qemu/memalign.h" #define PARALLELS_FORMAT_EXTENSION_MAGIC 0xAB234CEF23DCEA87ULL @@ -92,8 +93,8 @@ static int parallels_load_bitmap_data(BlockDriverState *bs, if (entry == 1) { bdrv_dirty_bitmap_deserialize_ones(bitmap, offset, count, false); } else { - ret = bdrv_pread(bs->file, entry << BDRV_SECTOR_BITS, buf, - s->cluster_size); + ret = bdrv_pread(bs->file, entry << BDRV_SECTOR_BITS, + s->cluster_size, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read bitmap data cluster"); @@ -260,7 +261,7 @@ static int parallels_parse_format_extension(BlockDriverState *bs, break; default: - error_setg(errp, "Unknown feature: 0x%" PRIu64, fh.magic); + error_setg(errp, "Unknown feature: 0x%" PRIx64, fh.magic); goto fail; } @@ -285,7 +286,7 @@ int parallels_read_format_extension(BlockDriverState *bs, assert(ext_off > 0); - ret = bdrv_pread(bs->file, ext_off, ext_cluster, s->cluster_size); + ret = bdrv_pread(bs->file, ext_off, s->cluster_size, ext_cluster, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read Format Extension cluster"); goto out; diff --git a/block/parallels.c b/block/parallels.c index 6ebad2a2bb..fa08c1104b 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -41,6 +41,7 @@ #include "qapi/qapi-visit-block-core.h" #include "qemu/bswap.h" #include "qemu/bitmap.h" +#include "qemu/memalign.h" #include "migration/blocker.h" #include "parallels.h" @@ -164,8 +165,9 @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, return start_off; } -static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, int *pnum) +static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, int *pnum) { int ret = 0; BDRVParallelsState *s = bs->opaque; @@ -203,18 +205,18 @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, * force the safer-but-slower fallocate. */ if (s->prealloc_mode == PRL_PREALLOC_MODE_TRUNCATE) { - ret = bdrv_truncate(bs->file, - (s->data_end + space) << BDRV_SECTOR_BITS, - false, PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, - NULL); + ret = bdrv_co_truncate(bs->file, + (s->data_end + space) << BDRV_SECTOR_BITS, + false, PREALLOC_MODE_OFF, + BDRV_REQ_ZERO_WRITE, NULL); if (ret == -ENOTSUP) { s->prealloc_mode = PRL_PREALLOC_MODE_FALLOCATE; } } if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) { - ret = bdrv_pwrite_zeroes(bs->file, - s->data_end << BDRV_SECTOR_BITS, - space << BDRV_SECTOR_BITS, 0); + ret = bdrv_co_pwrite_zeroes(bs->file, + s->data_end << BDRV_SECTOR_BITS, + space << BDRV_SECTOR_BITS, 0); } if (ret < 0) { return ret; @@ -240,8 +242,8 @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, return ret; } - ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE, - nb_cow_bytes, buf, 0); + ret = bdrv_co_pwrite(bs->file, s->data_end * BDRV_SECTOR_SIZE, + nb_cow_bytes, buf, 0); qemu_vfree(buf); if (ret < 0) { return ret; @@ -276,8 +278,8 @@ static coroutine_fn int parallels_co_flush_to_os(BlockDriverState *bs) if (off + to_write > s->header_size) { to_write = s->header_size - off; } - ret = bdrv_pwrite(bs->file, off, (uint8_t *)s->header + off, - to_write); + ret = bdrv_co_pwrite(bs->file, off, to_write, + (uint8_t *)s->header + off, 0); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); return ret; @@ -327,7 +329,6 @@ static coroutine_fn int parallels_co_writev(BlockDriverState *bs, QEMUIOVector hd_qiov; int ret = 0; - assert(!flags); qemu_iovec_init(&hd_qiov, qiov->niov); while (nb_sectors > 0) { @@ -480,7 +481,7 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs, ret = 0; if (flush_bat) { - ret = bdrv_pwrite_sync(bs->file, 0, s->header, s->header_size); + ret = bdrv_co_pwrite_sync(bs->file, 0, s->header_size, s->header, 0); if (ret < 0) { res->check_errors++; goto out; @@ -502,8 +503,8 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs, * In order to really repair the image, we must shrink it. * That means we have to pass exact=true. */ - ret = bdrv_truncate(bs->file, res->image_end_offset, true, - PREALLOC_MODE_OFF, 0, &local_err); + ret = bdrv_co_truncate(bs->file, res->image_end_offset, true, + PREALLOC_MODE_OFF, 0, &local_err); if (ret < 0) { error_report_err(local_err); res->check_errors++; @@ -598,12 +599,12 @@ static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts, memset(tmp, 0, sizeof(tmp)); memcpy(tmp, &header, sizeof(header)); - ret = blk_pwrite(blk, 0, tmp, BDRV_SECTOR_SIZE, 0); + ret = blk_co_pwrite(blk, 0, BDRV_SECTOR_SIZE, tmp, 0); if (ret < 0) { goto exit; } - ret = blk_pwrite_zeroes(blk, BDRV_SECTOR_SIZE, - (bat_sectors - 1) << BDRV_SECTOR_BITS, 0); + ret = blk_co_pwrite_zeroes(blk, BDRV_SECTOR_SIZE, + (bat_sectors - 1) << BDRV_SECTOR_BITS, 0); if (ret < 0) { goto exit; } @@ -722,7 +723,7 @@ static int parallels_update_header(BlockDriverState *bs) if (size > s->header_size) { size = s->header_size; } - return bdrv_pwrite_sync(bs->file, 0, s->header, size); + return bdrv_pwrite_sync(bs->file, 0, size, s->header, 0); } static int parallels_open(BlockDriverState *bs, QDict *options, int flags, @@ -735,13 +736,12 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, Error *local_err = NULL; char *buf; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } - ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph)); + ret = bdrv_pread(bs->file, 0, sizeof(ph), &ph, 0); if (ret < 0) { goto fail; } @@ -797,7 +797,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, s->header_size = size; } - ret = bdrv_pread(bs->file, 0, s->header, s->header_size); + ret = bdrv_pread(bs->file, 0, s->header_size, s->header, 0); if (ret < 0) { goto fail; } @@ -869,11 +869,11 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, } } - s->bat_dirty_block = 4 * qemu_real_host_page_size; + s->bat_dirty_block = 4 * qemu_real_host_page_size(); s->bat_dirty_bmap = bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block)); - /* Disable migration until bdrv_invalidate_cache method is added */ + /* Disable migration until bdrv_activate method is added */ error_setg(&s->migration_blocker, "The Parallels format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); diff --git a/block/preallocate.c b/block/preallocate.c index b619206304..d50ee7f49b 100644 --- a/block/preallocate.c +++ b/block/preallocate.c @@ -134,6 +134,7 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVPreallocateState *s = bs->opaque; + int ret; /* * s->data_end and friends should be initialized on permission update. @@ -141,11 +142,9 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags, */ s->file_end = s->zero_start = s->data_end = -EINVAL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) { @@ -227,15 +226,15 @@ static void preallocate_reopen_abort(BDRVReopenState *state) } static coroutine_fn int preallocate_co_preadv_part( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags) + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset, flags); } static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } @@ -276,6 +275,10 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset, int64_t end = offset + bytes; int64_t prealloc_start, prealloc_end; int ret; + uint32_t file_align = bs->file->bs->bl.request_alignment; + uint32_t prealloc_align = MAX(s->opts.prealloc_align, file_align); + + assert(QEMU_IS_ALIGNED(prealloc_align, file_align)); if (!has_prealloc_perms(bs)) { /* We don't have state neither should try to recover it */ @@ -320,9 +323,14 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset, /* Now we want new preallocation, as request writes beyond s->file_end. */ - prealloc_start = want_merge_zero ? MIN(offset, s->file_end) : s->file_end; - prealloc_end = QEMU_ALIGN_UP(end + s->opts.prealloc_size, - s->opts.prealloc_align); + prealloc_start = QEMU_ALIGN_UP( + want_merge_zero ? MIN(offset, s->file_end) : s->file_end, + file_align); + prealloc_end = QEMU_ALIGN_UP( + MAX(prealloc_start, end) + s->opts.prealloc_size, + prealloc_align); + + want_merge_zero = want_merge_zero && (prealloc_start <= offset); ret = bdrv_co_pwrite_zeroes( bs->file, prealloc_start, prealloc_end - prealloc_start, @@ -337,7 +345,7 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset, } static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { bool want_merge_zero = !(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK)); @@ -349,11 +357,11 @@ static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs, } static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { handle_write(bs, offset, bytes, false); diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c index 8498402ad4..680c7ee342 100644 --- a/block/qapi-sysemu.c +++ b/block/qapi-sysemu.c @@ -318,6 +318,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, bool has_id, const char *id, const char *filename, bool has_format, const char *format, + bool has_force, bool force, bool has_read_only, BlockdevChangeReadOnlyMode read_only, Error **errp) @@ -380,7 +381,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, rc = do_open_tray(has_device ? device : NULL, has_id ? id : NULL, - false, &err); + force, &err); if (rc && rc != -ENOSYS) { error_propagate(errp, err); goto fail; diff --git a/block/qcow.c b/block/qcow.c index f8919a44d1..daa38839ab 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -32,6 +32,7 @@ #include "qemu/option.h" #include "qemu/bswap.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #include #include "qapi/qmp/qdict.h" #include "qapi/qmp/qstring.h" @@ -91,7 +92,8 @@ typedef struct BDRVQcowState { static QemuOptsList qcow_create_opts; -static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset); +static int coroutine_fn decompress_cluster(BlockDriverState *bs, + uint64_t cluster_offset); static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) { @@ -120,14 +122,12 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, qdict_extract_subqdict(options, &encryptopts, "encrypt."); encryptfmt = qdict_get_try_str(encryptopts, "format"); - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { goto fail; } - ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); + ret = bdrv_pread(bs->file, 0, sizeof(header), &header, 0); if (ret < 0) { goto fail; } @@ -259,8 +259,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } - ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, - s->l1_size * sizeof(uint64_t)); + ret = bdrv_pread(bs->file, s->l1_table_offset, + s->l1_size * sizeof(uint64_t), s->l1_table, 0); if (ret < 0) { goto fail; } @@ -290,8 +290,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, ret = -EINVAL; goto fail; } - ret = bdrv_pread(bs->file, header.backing_file_offset, - bs->auto_backing_file, len); + ret = bdrv_pread(bs->file, header.backing_file_offset, len, + bs->auto_backing_file, 0); if (ret < 0) { goto fail; } @@ -350,10 +350,11 @@ static int qcow_reopen_prepare(BDRVReopenState *state, * return 0 if not allocated, 1 if *result is assigned, and negative * errno on failure. */ -static int get_cluster_offset(BlockDriverState *bs, - uint64_t offset, int allocate, - int compressed_size, - int n_start, int n_end, uint64_t *result) +static int coroutine_fn get_cluster_offset(BlockDriverState *bs, + uint64_t offset, int allocate, + int compressed_size, + int n_start, int n_end, + uint64_t *result) { BDRVQcowState *s = bs->opaque; int min_index, i, j, l1_index, l2_index, ret; @@ -380,9 +381,9 @@ static int get_cluster_offset(BlockDriverState *bs, s->l1_table[l1_index] = l2_offset; tmp = cpu_to_be64(l2_offset); BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); - ret = bdrv_pwrite_sync(bs->file, - s->l1_table_offset + l1_index * sizeof(tmp), - &tmp, sizeof(tmp)); + ret = bdrv_co_pwrite_sync(bs->file, + s->l1_table_offset + l1_index * sizeof(tmp), + sizeof(tmp), &tmp, 0); if (ret < 0) { return ret; } @@ -413,14 +414,14 @@ static int get_cluster_offset(BlockDriverState *bs, BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); if (new_l2_table) { memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); - ret = bdrv_pwrite_sync(bs->file, l2_offset, l2_table, - s->l2_size * sizeof(uint64_t)); + ret = bdrv_co_pwrite_sync(bs->file, l2_offset, + s->l2_size * sizeof(uint64_t), l2_table, 0); if (ret < 0) { return ret; } } else { - ret = bdrv_pread(bs->file, l2_offset, l2_table, - s->l2_size * sizeof(uint64_t)); + ret = bdrv_co_pread(bs->file, l2_offset, + s->l2_size * sizeof(uint64_t), l2_table, 0); if (ret < 0) { return ret; } @@ -452,8 +453,8 @@ static int get_cluster_offset(BlockDriverState *bs, cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size); /* write the cluster content */ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); - ret = bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache, - s->cluster_size); + ret = bdrv_co_pwrite(bs->file, cluster_offset, s->cluster_size, + s->cluster_cache, 0); if (ret < 0) { return ret; } @@ -468,8 +469,9 @@ static int get_cluster_offset(BlockDriverState *bs, if (cluster_offset + s->cluster_size > INT64_MAX) { return -E2BIG; } - ret = bdrv_truncate(bs->file, cluster_offset + s->cluster_size, - false, PREALLOC_MODE_OFF, 0, NULL); + ret = bdrv_co_truncate(bs->file, + cluster_offset + s->cluster_size, + false, PREALLOC_MODE_OFF, 0, NULL); if (ret < 0) { return ret; } @@ -491,10 +493,9 @@ static int get_cluster_offset(BlockDriverState *bs, return -EIO; } BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); - ret = bdrv_pwrite(bs->file, - cluster_offset + i, - s->cluster_data, - BDRV_SECTOR_SIZE); + ret = bdrv_co_pwrite(bs->file, cluster_offset + i, + BDRV_SECTOR_SIZE, + s->cluster_data, 0); if (ret < 0) { return ret; } @@ -514,8 +515,8 @@ static int get_cluster_offset(BlockDriverState *bs, } else { BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE); } - ret = bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp), - &tmp, sizeof(tmp)); + ret = bdrv_co_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp), + sizeof(tmp), &tmp, 0); if (ret < 0) { return ret; } @@ -585,7 +586,8 @@ static int decompress_buffer(uint8_t *out_buf, int out_buf_size, return 0; } -static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) +static int coroutine_fn decompress_cluster(BlockDriverState *bs, + uint64_t cluster_offset) { BDRVQcowState *s = bs->opaque; int ret, csize; @@ -596,8 +598,8 @@ static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) csize = cluster_offset >> (63 - s->cluster_bits); csize &= (s->cluster_size - 1); BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); - ret = bdrv_pread(bs->file, coffset, s->cluster_data, csize); - if (ret != csize) + ret = bdrv_co_pread(bs->file, coffset, csize, s->cluster_data, 0); + if (ret < 0) return -1; if (decompress_buffer(s->cluster_cache, s->cluster_size, s->cluster_data, csize) < 0) { @@ -617,9 +619,9 @@ static void qcow_refresh_limits(BlockDriverState *bs, Error **errp) bs->bl.request_alignment = BDRV_SECTOR_SIZE; } -static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQcowState *s = bs->opaque; int offset_in_cluster; @@ -628,7 +630,6 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, uint8_t *buf; void *orig_buf; - assert(!flags); if (qiov->niov > 1) { buf = orig_buf = qemu_try_blockalign(bs, qiov->size); if (buf == NULL) { @@ -714,9 +715,9 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, return ret; } -static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQcowState *s = bs->opaque; int offset_in_cluster; @@ -725,7 +726,6 @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset, uint8_t *buf; void *orig_buf; - assert(!flags); s->cluster_cache_offset = -1; /* disable compressed cache */ /* We must always copy the iov when encrypting, so we @@ -890,15 +890,15 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, } /* write all the data */ - ret = blk_pwrite(qcow_blk, 0, &header, sizeof(header), 0); - if (ret != sizeof(header)) { + ret = blk_co_pwrite(qcow_blk, 0, sizeof(header), &header, 0); + if (ret < 0) { goto exit; } if (qcow_opts->has_backing_file) { - ret = blk_pwrite(qcow_blk, sizeof(header), - qcow_opts->backing_file, backing_filename_len, 0); - if (ret != backing_filename_len) { + ret = blk_co_pwrite(qcow_blk, sizeof(header), backing_filename_len, + qcow_opts->backing_file, 0); + if (ret < 0) { goto exit; } } @@ -906,9 +906,9 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, tmp = g_malloc0(BDRV_SECTOR_SIZE); for (i = 0; i < DIV_ROUND_UP(sizeof(uint64_t) * l1_size, BDRV_SECTOR_SIZE); i++) { - ret = blk_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i, - tmp, BDRV_SECTOR_SIZE, 0); - if (ret != BDRV_SECTOR_SIZE) { + ret = blk_co_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i, + BDRV_SECTOR_SIZE, tmp, 0); + if (ret < 0) { g_free(tmp); goto exit; } @@ -1029,8 +1029,8 @@ static int qcow_make_empty(BlockDriverState *bs) int ret; memset(s->l1_table, 0, l1_length); - if (bdrv_pwrite_sync(bs->file, s->l1_table_offset, s->l1_table, - l1_length) < 0) + if (bdrv_pwrite_sync(bs->file, s->l1_table_offset, l1_length, s->l1_table, + 0) < 0) return -1; ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length, false, PREALLOC_MODE_OFF, 0, NULL); @@ -1047,8 +1047,8 @@ static int qcow_make_empty(BlockDriverState *bs) /* XXX: put compressed sectors first, then all the cluster aligned tables to avoid losing bytes in alignment */ static coroutine_fn int -qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov) +qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; z_stream strm; diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index 8fb4731551..3dff99ba06 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -115,7 +115,7 @@ static int update_header_sync(BlockDriverState *bs) return bdrv_flush(bs->file->bs); } -static inline void bitmap_table_to_be(uint64_t *bitmap_table, size_t size) +static inline void bitmap_table_bswap_be(uint64_t *bitmap_table, size_t size) { size_t i; @@ -234,8 +234,8 @@ static int bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb, } assert(tb->size <= BME_MAX_TABLE_SIZE); - ret = bdrv_pread(bs->file, tb->offset, - table, tb->size * BME_TABLE_ENTRY_SIZE); + ret = bdrv_pread(bs->file, tb->offset, tb->size * BME_TABLE_ENTRY_SIZE, + table, 0); if (ret < 0) { goto fail; } @@ -317,7 +317,7 @@ static int load_bitmap_data(BlockDriverState *bs, * already cleared */ } } else { - ret = bdrv_pread(bs->file, data_offset, buf, s->cluster_size); + ret = bdrv_pread(bs->file, data_offset, s->cluster_size, buf, 0); if (ret < 0) { goto finish; } @@ -575,7 +575,7 @@ static Qcow2BitmapList *bitmap_list_load(BlockDriverState *bs, uint64_t offset, } dir_end = dir + size; - ret = bdrv_pread(bs->file, offset, dir, size); + ret = bdrv_pread(bs->file, offset, size, dir, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read bitmap directory"); goto fail; @@ -787,10 +787,10 @@ static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list, } } - /* Actually, even in in-place case ignoring QCOW2_OL_BITMAP_DIRECTORY is not - * necessary, because we drop QCOW2_AUTOCLEAR_BITMAPS when updating bitmap - * directory in-place (actually, turn-off the extension), which is checked - * in qcow2_check_metadata_overlap() */ + /* Actually, even in the in-place case ignoring QCOW2_OL_BITMAP_DIRECTORY + * is not necessary, because we drop QCOW2_AUTOCLEAR_BITMAPS when updating + * bitmap directory in-place (actually, turn-off the extension), which is + * checked in qcow2_check_metadata_overlap() */ ret = qcow2_pre_write_overlap_check( bs, in_place ? QCOW2_OL_BITMAP_DIRECTORY : 0, dir_offset, dir_size, false); @@ -798,7 +798,7 @@ static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list, goto fail; } - ret = bdrv_pwrite(bs->file, dir_offset, dir, dir_size); + ret = bdrv_pwrite(bs->file, dir_offset, dir_size, dir, 0); if (ret < 0) { goto fail; } @@ -955,8 +955,8 @@ static void set_readonly_helper(gpointer bitmap, gpointer value) * If header_updated is not NULL then it is set appropriately regardless of * the return value. */ -bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, - Error **errp) +bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs, + bool *header_updated, Error **errp) { BDRVQcow2State *s = bs->opaque; Qcow2BitmapList *bm_list; @@ -1208,7 +1208,7 @@ int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp) } } - g_slist_foreach(ro_dirty_bitmaps, set_readonly_helper, false); + g_slist_foreach(ro_dirty_bitmaps, set_readonly_helper, (gpointer)false); ret = 0; out: @@ -1339,7 +1339,7 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, goto fail; } - ret = bdrv_pwrite(bs->file, off, buf, s->cluster_size); + ret = bdrv_pwrite(bs->file, off, s->cluster_size, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write bitmap '%s' to file", bm_name); @@ -1401,9 +1401,10 @@ static int store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp) goto fail; } - bitmap_table_to_be(tb, tb_size); - ret = bdrv_pwrite(bs->file, tb_offset, tb, tb_size * sizeof(tb[0])); + bitmap_table_bswap_be(tb, tb_size); + ret = bdrv_pwrite(bs->file, tb_offset, tb_size * sizeof(tb[0]), tb, 0); if (ret < 0) { + bitmap_table_bswap_be(tb, tb_size); error_setg_errno(errp, -ret, "Failed to write bitmap '%s' to file", bm_name); goto fail; diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c index 7444b9c4ab..54b2d5f4de 100644 --- a/block/qcow2-cache.c +++ b/block/qcow2-cache.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "qemu/memalign.h" #include "qcow2.h" #include "trace.h" @@ -74,7 +75,7 @@ static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables) /* Using MADV_DONTNEED to discard memory is a Linux-specific feature */ #ifdef CONFIG_LINUX void *t = qcow2_cache_get_table_addr(c, i); - int align = qemu_real_host_page_size; + int align = qemu_real_host_page_size(); size_t mem_size = (size_t) c->table_size * num_tables; size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t; size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align); @@ -222,8 +223,8 @@ static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i) BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE); } - ret = bdrv_pwrite(bs->file, c->entries[i].offset, - qcow2_cache_get_table_addr(c, i), c->table_size); + ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->table_size, + qcow2_cache_get_table_addr(c, i), 0); if (ret < 0) { return ret; } @@ -378,9 +379,8 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); } - ret = bdrv_pread(bs->file, offset, - qcow2_cache_get_table_addr(c, i), - c->table_size); + ret = bdrv_pread(bs->file, offset, c->table_size, + qcow2_cache_get_table_addr(c, i), 0); if (ret < 0) { return ret; } diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index bd0597842f..40ed847f97 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -28,9 +28,11 @@ #include "qapi/error.h" #include "qcow2.h" #include "qemu/bswap.h" +#include "qemu/memalign.h" #include "trace.h" -int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) +int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs, + uint64_t exact_size) { BDRVQcow2State *s = bs->opaque; int new_l1_size, i, ret; @@ -46,14 +48,14 @@ int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) #endif BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); - ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + - new_l1_size * L1E_SIZE, - (s->l1_size - new_l1_size) * L1E_SIZE, 0); + ret = bdrv_co_pwrite_zeroes(bs->file, + s->l1_table_offset + new_l1_size * L1E_SIZE, + (s->l1_size - new_l1_size) * L1E_SIZE, 0); if (ret < 0) { goto fail; } - ret = bdrv_flush(bs->file->bs); + ret = bdrv_co_flush(bs->file->bs); if (ret < 0) { goto fail; } @@ -158,8 +160,8 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); for(i = 0; i < s->l1_size; i++) new_l1_table[i] = cpu_to_be64(new_l1_table[i]); - ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, - new_l1_table, new_l1_size2); + ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_size2, + new_l1_table, 0); if (ret < 0) goto fail; for(i = 0; i < s->l1_size; i++) @@ -170,7 +172,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, stl_be_p(data, new_l1_size); stq_be_p(data + 4, new_l1_table_offset); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), - data, sizeof(data)); + sizeof(data), data, 0); if (ret < 0) { goto fail; } @@ -248,7 +250,7 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + L1E_SIZE * l1_start_index, - buf, bufsize); + bufsize, buf, 0); if (ret < 0) { return ret; } @@ -505,7 +507,20 @@ static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, return -ENOMEDIUM; } - /* Call .bdrv_co_readv() directly instead of using the public block-layer + /* + * We never deal with requests that don't satisfy + * bdrv_check_qiov_request(), and aligning requests to clusters never + * breaks this condition. So, do some assertions before calling + * bs->drv->bdrv_co_preadv_part() which has int64_t arguments. + */ + assert(src_cluster_offset <= INT64_MAX); + assert(src_cluster_offset + offset_in_cluster <= INT64_MAX); + /* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */ + assert((uint64_t)qiov->size <= INT64_MAX); + bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size, + qiov, 0, &error_abort); + /* + * Call .bdrv_co_readv() directly instead of using the public block-layer * interface. This avoids double I/O throttling and request tracking, * which can lead to deadlock when block layer copy-on-read is enabled. */ @@ -556,8 +571,7 @@ static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, * offset needs to be aligned to a cluster boundary. * * If the cluster is unallocated then *host_offset will be 0. - * If the cluster is compressed then *host_offset will contain the - * complete compressed cluster descriptor. + * If the cluster is compressed then *host_offset will contain the l2 entry. * * On entry, *bytes is the maximum number of contiguous bytes starting at * offset that we are interested in. @@ -660,7 +674,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, ret = -EIO; goto fail; } - *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK; + *host_offset = l2_entry; break; case QCOW2_SUBCLUSTER_ZERO_PLAIN: case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: @@ -810,10 +824,10 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset, * * Return 0 on success and -errno in error cases */ -int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, - uint64_t offset, - int compressed_size, - uint64_t *host_offset) +int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, + uint64_t offset, + int compressed_size, + uint64_t *host_offset) { BDRVQcow2State *s = bs->opaque; int l2_index, ret; @@ -871,7 +885,7 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, return 0; } -static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) +static int coroutine_fn perform_cow(BlockDriverState *bs, QCowL2Meta *m) { BDRVQcow2State *s = bs->opaque; Qcow2COWRegion *start = &m->cow_start; @@ -1011,7 +1025,8 @@ fail: return ret; } -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, + QCowL2Meta *m) { BDRVQcow2State *s = bs->opaque; int i, j = 0, l2_index, ret; @@ -1384,8 +1399,9 @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, * information on cluster allocation may be invalid now. The caller * must start over anyway, so consider *cur_bytes undefined. */ -static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, - uint64_t *cur_bytes, QCowL2Meta **m) +static int coroutine_fn handle_dependencies(BlockDriverState *bs, + uint64_t guest_offset, + uint64_t *cur_bytes, QCowL2Meta **m) { BDRVQcow2State *s = bs->opaque; QCowL2Meta *old_alloc; @@ -1400,29 +1416,47 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, if (end <= old_start || start >= old_end) { /* No intersection */ + continue; + } + + if (old_alloc->keep_old_clusters && + (end <= l2meta_cow_start(old_alloc) || + start >= l2meta_cow_end(old_alloc))) + { + /* + * Clusters intersect but COW areas don't. And cluster itself is + * already allocated. So, there is no actual conflict. + */ + continue; + } + + /* Conflict */ + + if (start < old_start) { + /* Stop at the start of a running allocation */ + bytes = old_start - start; } else { - if (start < old_start) { - /* Stop at the start of a running allocation */ - bytes = old_start - start; - } else { - bytes = 0; - } + bytes = 0; + } - /* Stop if already an l2meta exists. After yielding, it wouldn't - * be valid any more, so we'd have to clean up the old L2Metas - * and deal with requests depending on them before starting to - * gather new ones. Not worth the trouble. */ - if (bytes == 0 && *m) { - *cur_bytes = 0; - return 0; - } + /* + * Stop if an l2meta already exists. After yielding, it wouldn't + * be valid any more, so we'd have to clean up the old L2Metas + * and deal with requests depending on them before starting to + * gather new ones. Not worth the trouble. + */ + if (bytes == 0 && *m) { + *cur_bytes = 0; + return 0; + } - if (bytes == 0) { - /* Wait for the dependency to complete. We need to recheck - * the free/allocated clusters when we continue. */ - qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); - return -EAGAIN; - } + if (bytes == 0) { + /* + * Wait for the dependency to complete. We need to recheck + * the free/allocated clusters when we continue. + */ + qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); + return -EAGAIN; } } @@ -1455,8 +1489,9 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, * * -errno: in error cases */ -static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, - uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) +static int coroutine_fn handle_copied(BlockDriverState *bs, + uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes, + QCowL2Meta **m) { BDRVQcow2State *s = bs->opaque; int l2_index; @@ -1620,8 +1655,9 @@ static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, * * -errno: in error cases */ -static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, - uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) +static int coroutine_fn handle_alloc(BlockDriverState *bs, + uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes, + QCowL2Meta **m) { BDRVQcow2State *s = bs->opaque; int l2_index; @@ -1741,9 +1777,10 @@ out: * * Return 0 on success and -errno in error cases */ -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, - unsigned int *bytes, uint64_t *host_offset, - QCowL2Meta **m) +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, + unsigned int *bytes, + uint64_t *host_offset, + QCowL2Meta **m) { BDRVQcow2State *s = bs->opaque; uint64_t start, remaining; @@ -2074,8 +2111,8 @@ out: return ret; } -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, int flags) +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, int flags) { BDRVQcow2State *s = bs->opaque; uint64_t end_offset = offset + bytes; @@ -2229,7 +2266,8 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, (void **)&l2_slice); } else { /* load inactive L2 tables from disk */ - ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); + ret = bdrv_pread(bs->file, slice_offset, slice_size2, + l2_slice, 0); } if (ret < 0) { goto fail; @@ -2345,8 +2383,8 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, goto fail; } - ret = bdrv_pwrite(bs->file, slice_offset, - l2_slice, slice_size2); + ret = bdrv_pwrite(bs->file, slice_offset, slice_size2, + l2_slice, 0); if (ret < 0) { goto fail; } @@ -2439,8 +2477,8 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs, l1_table = new_l1_table; - ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, - l1_table, l1_size2); + ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, l1_size2, + l1_table, 0); if (ret < 0) { goto fail; } @@ -2463,3 +2501,18 @@ fail: g_free(l1_table); return ret; } + +void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, + uint64_t *coffset, int *csize) +{ + BDRVQcow2State *s = bs->opaque; + int nb_csectors; + + assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED); + + *coffset = l2_entry & s->cluster_offset_mask; + + nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1; + *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - + (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1)); +} diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index 8e649b008e..81264740f0 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -28,13 +28,16 @@ #include "qemu/range.h" #include "qemu/bswap.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #include "trace.h" static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size, uint64_t max); -static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, - int64_t offset, int64_t length, uint64_t addend, - bool decrease, enum qcow2_discard_type type); + +G_GNUC_WARN_UNUSED_RESULT +static int update_refcount(BlockDriverState *bs, + int64_t offset, int64_t length, uint64_t addend, + bool decrease, enum qcow2_discard_type type); static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index); static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index); @@ -94,7 +97,7 @@ static void update_max_refcount_table_index(BDRVQcow2State *s) s->max_refcount_table_index = i; } -int qcow2_refcount_init(BlockDriverState *bs) +int coroutine_fn qcow2_refcount_init(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; unsigned int refcount_table_size2, i; @@ -115,8 +118,8 @@ int qcow2_refcount_init(BlockDriverState *bs) goto fail; } BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); - ret = bdrv_pread(bs->file, s->refcount_table_offset, - s->refcount_table, refcount_table_size2); + ret = bdrv_co_pread(bs->file, s->refcount_table_offset, + refcount_table_size2, s->refcount_table, 0); if (ret < 0) { goto fail; } @@ -436,7 +439,7 @@ static int alloc_refcount_block(BlockDriverState *bs, BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + refcount_table_index * REFTABLE_ENTRY_SIZE, - &data64, sizeof(data64)); + sizeof(data64), &data64, 0); if (ret < 0) { goto fail; } @@ -681,8 +684,8 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset, } BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); - ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, - table_size * REFTABLE_ENTRY_SIZE); + ret = bdrv_pwrite_sync(bs->file, table_offset, + table_size * REFTABLE_ENTRY_SIZE, new_table, 0); if (ret < 0) { goto fail; } @@ -701,7 +704,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset, BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), - &data, sizeof(data)); + sizeof(data), &data, 0); if (ret < 0) { goto fail; } @@ -802,12 +805,12 @@ found: /* XXX: cache several refcount block clusters ? */ /* @addend is the absolute value of the addend; if @decrease is set, @addend * will be subtracted from the current refcount, otherwise it will be added */ -static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, - int64_t offset, - int64_t length, - uint64_t addend, - bool decrease, - enum qcow2_discard_type type) +static int update_refcount(BlockDriverState *bs, + int64_t offset, + int64_t length, + uint64_t addend, + bool decrease, + enum qcow2_discard_type type) { BDRVQcow2State *s = bs->opaque; int64_t start, last, cluster_offset; @@ -1177,11 +1180,11 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, switch (ctype) { case QCOW2_CLUSTER_COMPRESSED: { - int64_t offset = (l2_entry & s->cluster_offset_mask) - & QCOW2_COMPRESSED_SECTOR_MASK; - int size = QCOW2_COMPRESSED_SECTOR_SIZE * - (((l2_entry >> s->csize_shift) & s->csize_mask) + 1); - qcow2_free_clusters(bs, offset, size, type); + uint64_t coffset; + int csize; + + qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); + qcow2_free_clusters(bs, coffset, csize, type); } break; case QCOW2_CLUSTER_NORMAL: @@ -1203,7 +1206,7 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, } } -int coroutine_fn qcow2_write_caches(BlockDriverState *bs) +int qcow2_write_caches(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; int ret; @@ -1223,7 +1226,7 @@ int coroutine_fn qcow2_write_caches(BlockDriverState *bs) return 0; } -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs) +int qcow2_flush_caches(BlockDriverState *bs) { int ret = qcow2_write_caches(bs); if (ret < 0) { @@ -1247,7 +1250,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs, bool l1_allocated = false; int64_t old_entry, old_l2_offset; unsigned slice, slice_size2, n_slices; - int i, j, l1_modified = 0, nb_csectors; + int i, j, l1_modified = 0; int ret; assert(addend >= -1 && addend <= 1); @@ -1271,7 +1274,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs, } l1_allocated = true; - ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); + ret = bdrv_pread(bs->file, l1_table_offset, l1_size2, l1_table, 0); if (ret < 0) { goto fail; } @@ -1318,14 +1321,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs, switch (qcow2_get_cluster_type(bs, entry)) { case QCOW2_CLUSTER_COMPRESSED: - nb_csectors = ((entry >> s->csize_shift) & - s->csize_mask) + 1; if (addend != 0) { - uint64_t coffset = (entry & s->cluster_offset_mask) - & QCOW2_COMPRESSED_SECTOR_MASK; + uint64_t coffset; + int csize; + + qcow2_parse_compressed_l2_entry(bs, entry, + &coffset, &csize); ret = update_refcount( - bs, coffset, - nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE, + bs, coffset, csize, abs(addend), addend < 0, QCOW2_DISCARD_SNAPSHOT); if (ret < 0) { @@ -1432,8 +1435,8 @@ fail: cpu_to_be64s(&l1_table[i]); } - ret = bdrv_pwrite_sync(bs->file, l1_table_offset, - l1_table, l1_size2); + ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_size2, l1_table, + 0); for (i = 0; i < l1_size; i++) { be64_to_cpus(&l1_table[i]); @@ -1587,6 +1590,66 @@ enum { CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */ }; +/* + * Fix L2 entry by making it QCOW2_CLUSTER_ZERO_PLAIN (or making all its present + * subclusters QCOW2_SUBCLUSTER_ZERO_PLAIN). + * + * This function decrements res->corruptions on success, so the caller is + * responsible to increment res->corruptions prior to the call. + * + * On failure in-memory @l2_table may be modified. + */ +static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res, + uint64_t l2_offset, + uint64_t *l2_table, int l2_index, bool active, + bool *metadata_overlap) +{ + BDRVQcow2State *s = bs->opaque; + int ret; + int idx = l2_index * (l2_entry_size(s) / sizeof(uint64_t)); + uint64_t l2e_offset = l2_offset + (uint64_t)l2_index * l2_entry_size(s); + int ign = active ? QCOW2_OL_ACTIVE_L2 : QCOW2_OL_INACTIVE_L2; + + if (has_subclusters(s)) { + uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, l2_index); + + /* Allocated subclusters become zero */ + l2_bitmap |= l2_bitmap << 32; + l2_bitmap &= QCOW_L2_BITMAP_ALL_ZEROES; + + set_l2_bitmap(s, l2_table, l2_index, l2_bitmap); + set_l2_entry(s, l2_table, l2_index, 0); + } else { + set_l2_entry(s, l2_table, l2_index, QCOW_OFLAG_ZERO); + } + + ret = qcow2_pre_write_overlap_check(bs, ign, l2e_offset, l2_entry_size(s), + false); + if (metadata_overlap) { + *metadata_overlap = ret < 0; + } + if (ret < 0) { + fprintf(stderr, "ERROR: Overlap check failed\n"); + goto fail; + } + + ret = bdrv_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s), + &l2_table[idx], 0); + if (ret < 0) { + fprintf(stderr, "ERROR: Failed to overwrite L2 " + "table entry: %s\n", strerror(-ret)); + goto fail; + } + + res->corruptions--; + res->corruptions_fixed++; + return 0; + +fail: + res->check_errors++; + return ret; +} + /* * Increases the refcount in the given refcount table for the all clusters * referenced in the L2 table. While doing so, performs some checks on L2 @@ -1601,26 +1664,41 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, int flags, BdrvCheckMode fix, bool active) { BDRVQcow2State *s = bs->opaque; - uint64_t *l2_table, l2_entry; + uint64_t l2_entry, l2_bitmap; uint64_t next_contiguous_offset = 0; - int i, l2_size, nb_csectors, ret; + int i, ret; + size_t l2_size_bytes = s->l2_size * l2_entry_size(s); + g_autofree uint64_t *l2_table = g_malloc(l2_size_bytes); + bool metadata_overlap; /* Read L2 table from disk */ - l2_size = s->l2_size * l2_entry_size(s); - l2_table = g_malloc(l2_size); - - ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size); + ret = bdrv_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0); if (ret < 0) { fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); res->check_errors++; - goto fail; + return ret; } /* Do the actual checks */ - for(i = 0; i < s->l2_size; i++) { - l2_entry = get_l2_entry(s, l2_table, i); + for (i = 0; i < s->l2_size; i++) { + uint64_t coffset; + int csize; + QCow2ClusterType type; - switch (qcow2_get_cluster_type(bs, l2_entry)) { + l2_entry = get_l2_entry(s, l2_table, i); + l2_bitmap = get_l2_bitmap(s, l2_table, i); + type = qcow2_get_cluster_type(bs, l2_entry); + + if (type != QCOW2_CLUSTER_COMPRESSED) { + /* Check reserved bits of Standard Cluster Descriptor */ + if (l2_entry & L2E_STD_RESERVED_MASK) { + fprintf(stderr, "ERROR found l2 entry with reserved bits set: " + "%" PRIx64 "\n", l2_entry); + res->corruptions++; + } + } + + switch (type) { case QCOW2_CLUSTER_COMPRESSED: /* Compressed clusters don't have QCOW_OFLAG_COPIED */ if (l2_entry & QCOW_OFLAG_COPIED) { @@ -1638,23 +1716,28 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, break; } + if (l2_bitmap) { + fprintf(stderr, "ERROR compressed cluster %d with non-zero " + "subcluster allocation bitmap, entry=0x%" PRIx64 "\n", + i, l2_entry); + res->corruptions++; + break; + } + /* Mark cluster as used */ - nb_csectors = ((l2_entry >> s->csize_shift) & - s->csize_mask) + 1; - l2_entry &= s->cluster_offset_mask; + qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); ret = qcow2_inc_refcounts_imrt( - bs, res, refcount_table, refcount_table_size, - l2_entry & QCOW2_COMPRESSED_SECTOR_MASK, - nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE); + bs, res, refcount_table, refcount_table_size, coffset, csize); if (ret < 0) { - goto fail; + return ret; } if (flags & CHECK_FRAG_INFO) { res->bfi.allocated_clusters++; res->bfi.compressed_clusters++; - /* Compressed clusters are fragmented by nature. Since they + /* + * Compressed clusters are fragmented by nature. Since they * take up sub-sector space but we only have sector granularity * I/O we need to re-read the same sectors even for adjacent * compressed clusters. @@ -1668,13 +1751,19 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, { uint64_t offset = l2_entry & L2E_OFFSET_MASK; + if ((l2_bitmap >> 32) & l2_bitmap) { + res->corruptions++; + fprintf(stderr, "ERROR offset=%" PRIx64 ": Allocated " + "cluster has corrupted subcluster allocation bitmap\n", + offset); + } + /* Correct offsets are cluster aligned */ if (offset_into_cluster(s, offset)) { bool contains_data; res->corruptions++; if (has_subclusters(s)) { - uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, i); contains_data = (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC); } else { contains_data = !(l2_entry & QCOW_OFLAG_ZERO); @@ -1687,40 +1776,30 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", offset); if (fix & BDRV_FIX_ERRORS) { - int idx = i * (l2_entry_size(s) / sizeof(uint64_t)); - uint64_t l2e_offset = - l2_offset + (uint64_t)i * l2_entry_size(s); - int ign = active ? QCOW2_OL_ACTIVE_L2 : - QCOW2_OL_INACTIVE_L2; - - l2_entry = has_subclusters(s) ? 0 : QCOW_OFLAG_ZERO; - set_l2_entry(s, l2_table, i, l2_entry); - ret = qcow2_pre_write_overlap_check(bs, ign, - l2e_offset, l2_entry_size(s), false); - if (ret < 0) { - fprintf(stderr, "ERROR: Overlap check failed\n"); - res->check_errors++; - /* Something is seriously wrong, so abort checking - * this L2 table */ - goto fail; + ret = fix_l2_entry_by_zero(bs, res, l2_offset, + l2_table, i, active, + &metadata_overlap); + if (metadata_overlap) { + /* + * Something is seriously wrong, so abort checking + * this L2 table. + */ + return ret; } - ret = bdrv_pwrite_sync(bs->file, l2e_offset, - &l2_table[idx], - l2_entry_size(s)); - if (ret < 0) { - fprintf(stderr, "ERROR: Failed to overwrite L2 " - "table entry: %s\n", strerror(-ret)); - res->check_errors++; - /* Do not abort, continue checking the rest of this - * L2 table's entries */ - } else { - res->corruptions--; - res->corruptions_fixed++; - /* Skip marking the cluster as used - * (it is unused now) */ + if (ret == 0) { + /* + * Skip marking the cluster as used + * (it is unused now). + */ continue; } + + /* + * Failed to fix. + * Do not abort, continue checking the rest of this + * L2 table's entries. + */ } } else { fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is " @@ -1743,14 +1822,23 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, refcount_table_size, offset, s->cluster_size); if (ret < 0) { - goto fail; + return ret; } } break; } case QCOW2_CLUSTER_ZERO_PLAIN: + /* Impossible when image has subclusters */ + assert(!l2_bitmap); + break; + case QCOW2_CLUSTER_UNALLOCATED: + if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) { + res->corruptions++; + fprintf(stderr, "ERROR: Unallocated " + "cluster has non-zero subcluster allocation map\n"); + } break; default: @@ -1758,12 +1846,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, } } - g_free(l2_table); return 0; - -fail: - g_free(l2_table); - return ret; } /* @@ -1782,71 +1865,79 @@ static int check_refcounts_l1(BlockDriverState *bs, int flags, BdrvCheckMode fix, bool active) { BDRVQcow2State *s = bs->opaque; - uint64_t *l1_table = NULL, l2_offset, l1_size2; + size_t l1_size_bytes = l1_size * L1E_SIZE; + g_autofree uint64_t *l1_table = NULL; + uint64_t l2_offset; int i, ret; - l1_size2 = l1_size * L1E_SIZE; + if (!l1_size) { + return 0; + } /* Mark L1 table as used */ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size, - l1_table_offset, l1_size2); + l1_table_offset, l1_size_bytes); if (ret < 0) { - goto fail; + return ret; + } + + l1_table = g_try_malloc(l1_size_bytes); + if (l1_table == NULL) { + res->check_errors++; + return -ENOMEM; } /* Read L1 table entries from disk */ - if (l1_size2 > 0) { - l1_table = g_try_malloc(l1_size2); - if (l1_table == NULL) { - ret = -ENOMEM; - res->check_errors++; - goto fail; - } - ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); - if (ret < 0) { - fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); - res->check_errors++; - goto fail; - } - for(i = 0;i < l1_size; i++) - be64_to_cpus(&l1_table[i]); + ret = bdrv_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0); + if (ret < 0) { + fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); + res->check_errors++; + return ret; + } + + for (i = 0; i < l1_size; i++) { + be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ - for(i = 0; i < l1_size; i++) { - l2_offset = l1_table[i]; - if (l2_offset) { - /* Mark L2 table as used */ - l2_offset &= L1E_OFFSET_MASK; - ret = qcow2_inc_refcounts_imrt(bs, res, - refcount_table, refcount_table_size, - l2_offset, s->cluster_size); - if (ret < 0) { - goto fail; - } + for (i = 0; i < l1_size; i++) { + if (!l1_table[i]) { + continue; + } - /* L2 tables are cluster aligned */ - if (offset_into_cluster(s, l2_offset)) { - fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " - "cluster aligned; L1 entry corrupted\n", l2_offset); - res->corruptions++; - } + if (l1_table[i] & L1E_RESERVED_MASK) { + fprintf(stderr, "ERROR found L1 entry with reserved bits set: " + "%" PRIx64 "\n", l1_table[i]); + res->corruptions++; + } - /* Process and check L2 entries */ - ret = check_refcounts_l2(bs, res, refcount_table, - refcount_table_size, l2_offset, flags, - fix, active); - if (ret < 0) { - goto fail; - } + l2_offset = l1_table[i] & L1E_OFFSET_MASK; + + /* Mark L2 table as used */ + ret = qcow2_inc_refcounts_imrt(bs, res, + refcount_table, refcount_table_size, + l2_offset, s->cluster_size); + if (ret < 0) { + return ret; + } + + /* L2 tables are cluster aligned */ + if (offset_into_cluster(s, l2_offset)) { + fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " + "cluster aligned; L1 entry corrupted\n", l2_offset); + res->corruptions++; + } + + /* Process and check L2 entries */ + ret = check_refcounts_l2(bs, res, refcount_table, + refcount_table_size, l2_offset, flags, + fix, active); + if (ret < 0) { + return ret; } } - g_free(l1_table); - return 0; -fail: - g_free(l1_table); - return ret; + return 0; } /* @@ -1913,8 +2004,8 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, } } - ret = bdrv_pread(bs->file, l2_offset, l2_table, - s->l2_size * l2_entry_size(s)); + ret = bdrv_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s), + l2_table, 0); if (ret < 0) { fprintf(stderr, "ERROR: Could not read L2 table: %s\n", strerror(-ret)); @@ -1967,8 +2058,8 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, goto fail; } - ret = bdrv_pwrite(bs->file, l2_offset, l2_table, - s->cluster_size); + ret = bdrv_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, + 0); if (ret < 0) { fprintf(stderr, "ERROR: Could not write L2 table: %s\n", strerror(-ret)); @@ -2001,9 +2092,17 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, for(i = 0; i < s->refcount_table_size; i++) { uint64_t offset, cluster; - offset = s->refcount_table[i]; + offset = s->refcount_table[i] & REFT_OFFSET_MASK; cluster = offset >> s->cluster_bits; + if (s->refcount_table[i] & REFT_RESERVED_MASK) { + fprintf(stderr, "ERROR refcount table entry %" PRId64 " has " + "reserved bits set\n", i); + res->corruptions++; + *rebuild = true; + continue; + } + /* Refcount blocks are cluster aligned */ if (offset_into_cluster(s, offset)) { fprintf(stderr, "ERROR refcount block %" PRId64 " is not " @@ -2338,10 +2437,165 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs, return cluster << s->cluster_bits; } +/* + * Helper function for rebuild_refcount_structure(). + * + * Scan the range of clusters [first_cluster, end_cluster) for allocated + * clusters and write all corresponding refblocks to disk. The refblock + * and allocation data is taken from the in-memory refcount table + * *refcount_table[] (of size *nb_clusters), which is basically one big + * (unlimited size) refblock for the whole image. + * + * For these refblocks, clusters are allocated using said in-memory + * refcount table. Care is taken that these allocations are reflected + * in the refblocks written to disk. + * + * The refblocks' offsets are written into a reftable, which is + * *on_disk_reftable_ptr[] (of size *on_disk_reftable_entries_ptr). If + * that reftable is of insufficient size, it will be resized to fit. + * This reftable is not written to disk. + * + * (If *on_disk_reftable_ptr is not NULL, the entries within are assumed + * to point to existing valid refblocks that do not need to be allocated + * again.) + * + * Return whether the on-disk reftable array was resized (true/false), + * or -errno on error. + */ +static int rebuild_refcounts_write_refblocks( + BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters, + int64_t first_cluster, int64_t end_cluster, + uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr, + Error **errp + ) +{ + BDRVQcow2State *s = bs->opaque; + int64_t cluster; + int64_t refblock_offset, refblock_start, refblock_index; + int64_t first_free_cluster = 0; + uint64_t *on_disk_reftable = *on_disk_reftable_ptr; + uint32_t on_disk_reftable_entries = *on_disk_reftable_entries_ptr; + void *on_disk_refblock; + bool reftable_grown = false; + int ret; + + for (cluster = first_cluster; cluster < end_cluster; cluster++) { + /* Check all clusters to find refblocks that contain non-zero entries */ + if (!s->get_refcount(*refcount_table, cluster)) { + continue; + } + + /* + * This cluster is allocated, so we need to create a refblock + * for it. The data we will write to disk is just the + * respective slice from *refcount_table, so it will contain + * accurate refcounts for all clusters belonging to this + * refblock. After we have written it, we will therefore skip + * all remaining clusters in this refblock. + */ + + refblock_index = cluster >> s->refcount_block_bits; + refblock_start = refblock_index << s->refcount_block_bits; + + if (on_disk_reftable_entries > refblock_index && + on_disk_reftable[refblock_index]) + { + /* + * We can get here after a `goto write_refblocks`: We have a + * reftable from a previous run, and the refblock is already + * allocated. No need to allocate it again. + */ + refblock_offset = on_disk_reftable[refblock_index]; + } else { + int64_t refblock_cluster_index; + + /* Don't allocate a cluster in a refblock already written to disk */ + if (first_free_cluster < refblock_start) { + first_free_cluster = refblock_start; + } + refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table, + nb_clusters, + &first_free_cluster); + if (refblock_offset < 0) { + error_setg_errno(errp, -refblock_offset, + "ERROR allocating refblock"); + return refblock_offset; + } + + refblock_cluster_index = refblock_offset / s->cluster_size; + if (refblock_cluster_index >= end_cluster) { + /* + * We must write the refblock that holds this refblock's + * refcount + */ + end_cluster = refblock_cluster_index + 1; + } + + if (on_disk_reftable_entries <= refblock_index) { + on_disk_reftable_entries = + ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE, + s->cluster_size) / REFTABLE_ENTRY_SIZE; + on_disk_reftable = + g_try_realloc(on_disk_reftable, + on_disk_reftable_entries * + REFTABLE_ENTRY_SIZE); + if (!on_disk_reftable) { + error_setg(errp, "ERROR allocating reftable memory"); + return -ENOMEM; + } + + memset(on_disk_reftable + *on_disk_reftable_entries_ptr, 0, + (on_disk_reftable_entries - + *on_disk_reftable_entries_ptr) * + REFTABLE_ENTRY_SIZE); + + *on_disk_reftable_ptr = on_disk_reftable; + *on_disk_reftable_entries_ptr = on_disk_reftable_entries; + + reftable_grown = true; + } else { + assert(on_disk_reftable); + } + on_disk_reftable[refblock_index] = refblock_offset; + } + + /* Refblock is allocated, write it to disk */ + + ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset, + s->cluster_size, false); + if (ret < 0) { + error_setg_errno(errp, -ret, "ERROR writing refblock"); + return ret; + } + + /* + * The refblock is simply a slice of *refcount_table. + * Note that the size of *refcount_table is always aligned to + * whole clusters, so the write operation will not result in + * out-of-bounds accesses. + */ + on_disk_refblock = (void *)((char *) *refcount_table + + refblock_index * s->cluster_size); + + ret = bdrv_pwrite(bs->file, refblock_offset, s->cluster_size, + on_disk_refblock, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, "ERROR writing refblock"); + return ret; + } + + /* This refblock is done, skip to its end */ + cluster = refblock_start + s->refcount_block_size - 1; + } + + return reftable_grown; +} + /* * Creates a new refcount structure based solely on the in-memory information - * given through *refcount_table. All necessary allocations will be reflected - * in that array. + * given through *refcount_table (this in-memory information is basically just + * the concatenation of all refblocks). All necessary allocations will be + * reflected in that array. * * On success, the old refcount structure is leaked (it will be covered by the * new refcount structure). @@ -2349,15 +2603,18 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs, static int rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, - int64_t *nb_clusters) + int64_t *nb_clusters, + Error **errp) { BDRVQcow2State *s = bs->opaque; - int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0; - int64_t refblock_offset, refblock_start, refblock_index; - uint32_t reftable_size = 0; + int64_t reftable_offset = -1; + int64_t reftable_length = 0; + int64_t reftable_clusters; + int64_t refblock_index; + uint32_t on_disk_reftable_entries = 0; uint64_t *on_disk_reftable = NULL; - void *on_disk_refblock; int ret = 0; + int reftable_size_changed = 0; struct { uint64_t reftable_offset; uint32_t reftable_clusters; @@ -2365,162 +2622,145 @@ static int rebuild_refcount_structure(BlockDriverState *bs, qcow2_cache_empty(bs, s->refcount_block_cache); -write_refblocks: - for (; cluster < *nb_clusters; cluster++) { - if (!s->get_refcount(*refcount_table, cluster)) { - continue; - } + /* + * For each refblock containing entries, we try to allocate a + * cluster (in the in-memory refcount table) and write its offset + * into on_disk_reftable[]. We then write the whole refblock to + * disk (as a slice of the in-memory refcount table). + * This is done by rebuild_refcounts_write_refblocks(). + * + * Once we have scanned all clusters, we try to find space for the + * reftable. This will dirty the in-memory refcount table (i.e. + * make it differ from the refblocks we have already written), so we + * need to run rebuild_refcounts_write_refblocks() again for the + * range of clusters where the reftable has been allocated. + * + * This second run might make the reftable grow again, in which case + * we will need to allocate another space for it, which is why we + * repeat all this until the reftable stops growing. + * + * (This loop will terminate, because with every cluster the + * reftable grows, it can accomodate a multitude of more refcounts, + * so that at some point this must be able to cover the reftable + * and all refblocks describing it.) + * + * We then convert the reftable to big-endian and write it to disk. + * + * Note that we never free any reftable allocations. Doing so would + * needlessly complicate the algorithm: The eventual second check + * run we do will clean up all leaks we have caused. + */ - refblock_index = cluster >> s->refcount_block_bits; - refblock_start = refblock_index << s->refcount_block_bits; - - /* Don't allocate a cluster in a refblock already written to disk */ - if (first_free_cluster < refblock_start) { - first_free_cluster = refblock_start; - } - refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table, - nb_clusters, &first_free_cluster); - if (refblock_offset < 0) { - fprintf(stderr, "ERROR allocating refblock: %s\n", - strerror(-refblock_offset)); - res->check_errors++; - ret = refblock_offset; - goto fail; - } - - if (reftable_size <= refblock_index) { - uint32_t old_reftable_size = reftable_size; - uint64_t *new_on_disk_reftable; - - reftable_size = ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE, - s->cluster_size) / REFTABLE_ENTRY_SIZE; - new_on_disk_reftable = g_try_realloc(on_disk_reftable, - reftable_size * - REFTABLE_ENTRY_SIZE); - if (!new_on_disk_reftable) { - res->check_errors++; - ret = -ENOMEM; - goto fail; - } - on_disk_reftable = new_on_disk_reftable; - - memset(on_disk_reftable + old_reftable_size, 0, - (reftable_size - old_reftable_size) * REFTABLE_ENTRY_SIZE); - - /* The offset we have for the reftable is now no longer valid; - * this will leak that range, but we can easily fix that by running - * a leak-fixing check after this rebuild operation */ - reftable_offset = -1; - } else { - assert(on_disk_reftable); - } - on_disk_reftable[refblock_index] = refblock_offset; - - /* If this is apparently the last refblock (for now), try to squeeze the - * reftable in */ - if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits && - reftable_offset < 0) - { - uint64_t reftable_clusters = size_to_clusters(s, reftable_size * - REFTABLE_ENTRY_SIZE); - reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, - refcount_table, nb_clusters, - &first_free_cluster); - if (reftable_offset < 0) { - fprintf(stderr, "ERROR allocating reftable: %s\n", - strerror(-reftable_offset)); - res->check_errors++; - ret = reftable_offset; - goto fail; - } - } - - ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset, - s->cluster_size, false); - if (ret < 0) { - fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); - goto fail; - } - - /* The size of *refcount_table is always cluster-aligned, therefore the - * write operation will not overflow */ - on_disk_refblock = (void *)((char *) *refcount_table + - refblock_index * s->cluster_size); - - ret = bdrv_pwrite(bs->file, refblock_offset, on_disk_refblock, - s->cluster_size); - if (ret < 0) { - fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); - goto fail; - } - - /* Go to the end of this refblock */ - cluster = refblock_start + s->refcount_block_size - 1; + reftable_size_changed = + rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters, + 0, *nb_clusters, + &on_disk_reftable, + &on_disk_reftable_entries, errp); + if (reftable_size_changed < 0) { + res->check_errors++; + ret = reftable_size_changed; + goto fail; } - if (reftable_offset < 0) { - uint64_t post_refblock_start, reftable_clusters; + /* + * There was no reftable before, so rebuild_refcounts_write_refblocks() + * must have increased its size (from 0 to something). + */ + assert(reftable_size_changed); + + do { + int64_t reftable_start_cluster, reftable_end_cluster; + int64_t first_free_cluster = 0; + + reftable_length = on_disk_reftable_entries * REFTABLE_ENTRY_SIZE; + reftable_clusters = size_to_clusters(s, reftable_length); - post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size); - reftable_clusters = - size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE); - /* Not pretty but simple */ - if (first_free_cluster < post_refblock_start) { - first_free_cluster = post_refblock_start; - } reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, refcount_table, nb_clusters, &first_free_cluster); if (reftable_offset < 0) { - fprintf(stderr, "ERROR allocating reftable: %s\n", - strerror(-reftable_offset)); + error_setg_errno(errp, -reftable_offset, + "ERROR allocating reftable"); res->check_errors++; ret = reftable_offset; goto fail; } - goto write_refblocks; - } + /* + * We need to update the affected refblocks, so re-run the + * write_refblocks loop for the reftable's range of clusters. + */ + assert(offset_into_cluster(s, reftable_offset) == 0); + reftable_start_cluster = reftable_offset / s->cluster_size; + reftable_end_cluster = reftable_start_cluster + reftable_clusters; + reftable_size_changed = + rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters, + reftable_start_cluster, + reftable_end_cluster, + &on_disk_reftable, + &on_disk_reftable_entries, errp); + if (reftable_size_changed < 0) { + res->check_errors++; + ret = reftable_size_changed; + goto fail; + } - for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { + /* + * If the reftable size has changed, we will need to find a new + * allocation, repeating the loop. + */ + } while (reftable_size_changed); + + /* The above loop must have run at least once */ + assert(reftable_offset >= 0); + + /* + * All allocations are done, all refblocks are written, convert the + * reftable to big-endian and write it to disk. + */ + + for (refblock_index = 0; refblock_index < on_disk_reftable_entries; + refblock_index++) + { cpu_to_be64s(&on_disk_reftable[refblock_index]); } - ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, - reftable_size * REFTABLE_ENTRY_SIZE, + ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, reftable_length, false); if (ret < 0) { - fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); + error_setg_errno(errp, -ret, "ERROR writing reftable"); goto fail; } - assert(reftable_size < INT_MAX / REFTABLE_ENTRY_SIZE); - ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable, - reftable_size * REFTABLE_ENTRY_SIZE); + assert(reftable_length < INT_MAX); + ret = bdrv_pwrite(bs->file, reftable_offset, reftable_length, + on_disk_reftable, 0); if (ret < 0) { - fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); + error_setg_errno(errp, -ret, "ERROR writing reftable"); goto fail; } /* Enter new reftable into the image header */ reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset); reftable_offset_and_clusters.reftable_clusters = - cpu_to_be32(size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE)); + cpu_to_be32(reftable_clusters); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), - &reftable_offset_and_clusters, - sizeof(reftable_offset_and_clusters)); + sizeof(reftable_offset_and_clusters), + &reftable_offset_and_clusters, 0); if (ret < 0) { - fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret)); + error_setg_errno(errp, -ret, "ERROR setting reftable"); goto fail; } - for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { + for (refblock_index = 0; refblock_index < on_disk_reftable_entries; + refblock_index++) + { be64_to_cpus(&on_disk_reftable[refblock_index]); } s->refcount_table = on_disk_reftable; s->refcount_table_offset = reftable_offset; - s->refcount_table_size = reftable_size; + s->refcount_table_size = on_disk_reftable_entries; update_max_refcount_table_index(s); return 0; @@ -2577,11 +2817,13 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, if (rebuild && (fix & BDRV_FIX_ERRORS)) { BdrvCheckResult old_res = *res; int fresh_leaks = 0; + Error *local_err = NULL; fprintf(stderr, "Rebuilding refcount structure\n"); ret = rebuild_refcount_structure(bs, res, &refcount_table, - &nb_clusters); + &nb_clusters, &local_err); if (ret < 0) { + error_report_err(local_err); goto fail; } @@ -2767,7 +3009,7 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, return -ENOMEM; } - ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); + ret = bdrv_pread(bs->file, l1_ofs, l1_sz2, l1, 0); if (ret < 0) { g_free(l1); return ret; @@ -2938,7 +3180,7 @@ static int flush_refblock(BlockDriverState *bs, uint64_t **reftable, return ret; } - ret = bdrv_pwrite(bs->file, offset, refblock, s->cluster_size); + ret = bdrv_pwrite(bs->file, offset, s->cluster_size, refblock, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write refblock"); return ret; @@ -3210,8 +3452,9 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, cpu_to_be64s(&new_reftable[i]); } - ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable, - new_reftable_size * REFTABLE_ENTRY_SIZE); + ret = bdrv_pwrite(bs->file, new_reftable_offset, + new_reftable_size * REFTABLE_ENTRY_SIZE, new_reftable, + 0); for (i = 0; i < new_reftable_size; i++) { be64_to_cpus(&new_reftable[i]); @@ -3316,8 +3559,8 @@ static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset) return covering_refblock_offset; } -static int qcow2_discard_refcount_block(BlockDriverState *bs, - uint64_t discard_block_offs) +static int coroutine_fn +qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs) { BDRVQcow2State *s = bs->opaque; int64_t refblock_offs; @@ -3373,7 +3616,7 @@ static int qcow2_discard_refcount_block(BlockDriverState *bs, return 0; } -int qcow2_shrink_reftable(BlockDriverState *bs) +int coroutine_fn qcow2_shrink_reftable(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; uint64_t *reftable_tmp = @@ -3414,8 +3657,9 @@ int qcow2_shrink_reftable(BlockDriverState *bs) reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]); } - ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset, reftable_tmp, - s->refcount_table_size * REFTABLE_ENTRY_SIZE); + ret = bdrv_co_pwrite_sync(bs->file, s->refcount_table_offset, + s->refcount_table_size * REFTABLE_ENTRY_SIZE, + reftable_tmp, 0); /* * If the write in the reftable failed the image may contain a partially * overwritten reftable. In this case it would be better to clear the @@ -3462,7 +3706,7 @@ int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) return -EIO; } -int qcow2_detect_metadata_preallocation(BlockDriverState *bs) +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; int64_t i, end_cluster, cluster_count = 0, threshold; diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c index 71ddb08c21..62e8a0335d 100644 --- a/block/qcow2-snapshot.c +++ b/block/qcow2-snapshot.c @@ -29,6 +29,7 @@ #include "qemu/bswap.h" #include "qemu/error-report.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" static void qcow2_free_single_snapshot(BlockDriverState *bs, int i) { @@ -107,7 +108,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read statically sized part of the snapshot header */ offset = ROUND_UP(offset, 8); - ret = bdrv_pread(bs->file, offset, &h, sizeof(h)); + ret = bdrv_pread(bs->file, offset, sizeof(h), &h, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -145,8 +146,8 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, } /* Read known extra data */ - ret = bdrv_pread(bs->file, offset, &extra, - MIN(sizeof(extra), sn->extra_data_size)); + ret = bdrv_pread(bs->file, offset, + MIN(sizeof(extra), sn->extra_data_size), &extra, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -183,8 +184,8 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Store unknown extra data */ unknown_extra_data_size = sn->extra_data_size - sizeof(extra); sn->unknown_extra_data = g_malloc(unknown_extra_data_size); - ret = bdrv_pread(bs->file, offset, sn->unknown_extra_data, - unknown_extra_data_size); + ret = bdrv_pread(bs->file, offset, unknown_extra_data_size, + sn->unknown_extra_data, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); @@ -195,7 +196,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read snapshot ID */ sn->id_str = g_malloc(id_str_size + 1); - ret = bdrv_pread(bs->file, offset, sn->id_str, id_str_size); + ret = bdrv_pread(bs->file, offset, id_str_size, sn->id_str, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -205,7 +206,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read snapshot name */ sn->name = g_malloc(name_size + 1); - ret = bdrv_pread(bs->file, offset, sn->name, name_size); + ret = bdrv_pread(bs->file, offset, name_size, sn->name, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -348,13 +349,13 @@ int qcow2_write_snapshots(BlockDriverState *bs) h.name_size = cpu_to_be16(name_size); offset = ROUND_UP(offset, 8); - ret = bdrv_pwrite(bs->file, offset, &h, sizeof(h)); + ret = bdrv_pwrite(bs->file, offset, sizeof(h), &h, 0); if (ret < 0) { goto fail; } offset += sizeof(h); - ret = bdrv_pwrite(bs->file, offset, &extra, sizeof(extra)); + ret = bdrv_pwrite(bs->file, offset, sizeof(extra), &extra, 0); if (ret < 0) { goto fail; } @@ -368,21 +369,21 @@ int qcow2_write_snapshots(BlockDriverState *bs) assert(unknown_extra_data_size <= BDRV_REQUEST_MAX_BYTES); assert(sn->unknown_extra_data); - ret = bdrv_pwrite(bs->file, offset, sn->unknown_extra_data, - unknown_extra_data_size); + ret = bdrv_pwrite(bs->file, offset, unknown_extra_data_size, + sn->unknown_extra_data, 0); if (ret < 0) { goto fail; } offset += unknown_extra_data_size; } - ret = bdrv_pwrite(bs->file, offset, sn->id_str, id_str_size); + ret = bdrv_pwrite(bs->file, offset, id_str_size, sn->id_str, 0); if (ret < 0) { goto fail; } offset += id_str_size; - ret = bdrv_pwrite(bs->file, offset, sn->name, name_size); + ret = bdrv_pwrite(bs->file, offset, name_size, sn->name, 0); if (ret < 0) { goto fail; } @@ -405,7 +406,7 @@ int qcow2_write_snapshots(BlockDriverState *bs) header_data.snapshots_offset = cpu_to_be64(snapshots_offset); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots), - &header_data, sizeof(header_data)); + sizeof(header_data), &header_data, 0); if (ret < 0) { goto fail; } @@ -440,8 +441,9 @@ int coroutine_fn qcow2_check_read_snapshot_table(BlockDriverState *bs, } QEMU_PACKED snapshot_table_pointer; /* qcow2_do_open() discards this information in check mode */ - ret = bdrv_pread(bs->file, offsetof(QCowHeader, nb_snapshots), - &snapshot_table_pointer, sizeof(snapshot_table_pointer)); + ret = bdrv_co_pread(bs->file, offsetof(QCowHeader, nb_snapshots), + sizeof(snapshot_table_pointer), &snapshot_table_pointer, + 0); if (ret < 0) { result->check_errors++; fprintf(stderr, "ERROR failed to read the snapshot table pointer from " @@ -510,9 +512,9 @@ int coroutine_fn qcow2_check_read_snapshot_table(BlockDriverState *bs, assert(fix & BDRV_FIX_ERRORS); snapshot_table_pointer.nb_snapshots = cpu_to_be32(s->nb_snapshots); - ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots), - &snapshot_table_pointer.nb_snapshots, - sizeof(snapshot_table_pointer.nb_snapshots)); + ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots), + sizeof(snapshot_table_pointer.nb_snapshots), + &snapshot_table_pointer.nb_snapshots, 0); if (ret < 0) { result->check_errors++; fprintf(stderr, "ERROR failed to update the snapshot count in the " @@ -692,8 +694,8 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) goto fail; } - ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table, - s->l1_size * L1E_SIZE); + ret = bdrv_pwrite(bs->file, sn->l1_table_offset, s->l1_size * L1E_SIZE, + l1_table, 0); if (ret < 0) { goto fail; } @@ -828,8 +830,8 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) goto fail; } - ret = bdrv_pread(bs->file, sn->l1_table_offset, - sn_l1_table, sn_l1_bytes); + ret = bdrv_pread(bs->file, sn->l1_table_offset, sn_l1_bytes, sn_l1_table, + 0); if (ret < 0) { goto fail; } @@ -847,8 +849,8 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) goto fail; } - ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset, sn_l1_table, - cur_l1_bytes); + ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset, cur_l1_bytes, + sn_l1_table, 0); if (ret < 0) { goto fail; } @@ -1050,8 +1052,8 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs, return -ENOMEM; } - ret = bdrv_pread(bs->file, sn->l1_table_offset, - new_l1_table, new_l1_bytes); + ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_bytes, + new_l1_table, 0); if (ret < 0) { error_setg(errp, "Failed to read l1 table for snapshot"); qemu_vfree(new_l1_table); diff --git a/block/qcow2.c b/block/qcow2.c index 9f1b6461c8..4d6666d3ff 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -38,6 +38,7 @@ #include "qemu/option_int.h" #include "qemu/cutils.h" #include "qemu/bswap.h" +#include "qemu/memalign.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" #include "crypto.h" @@ -74,7 +75,7 @@ typedef struct { static int coroutine_fn qcow2_co_preadv_compressed(BlockDriverState *bs, - uint64_t cluster_descriptor, + uint64_t l2_entry, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, @@ -93,9 +94,9 @@ static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) } -static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, - uint8_t *buf, size_t buflen, - void *opaque, Error **errp) +static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, + uint8_t *buf, size_t buflen, + void *opaque, Error **errp) { BlockDriverState *bs = opaque; BDRVQcow2State *s = bs->opaque; @@ -106,18 +107,18 @@ static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, return -1; } - ret = bdrv_pread(bs->file, - s->crypto_header.offset + offset, buf, buflen); + ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf, + 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read encryption header"); return -1; } - return ret; + return 0; } -static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, - void *opaque, Error **errp) +static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, + void *opaque, Error **errp) { BlockDriverState *bs = opaque; BDRVQcow2State *s = bs->opaque; @@ -150,13 +151,13 @@ static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, return -1; } - return ret; + return 0; } -static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, - const uint8_t *buf, size_t buflen, - void *opaque, Error **errp) +static int qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, + const uint8_t *buf, size_t buflen, + void *opaque, Error **errp) { BlockDriverState *bs = opaque; BDRVQcow2State *s = bs->opaque; @@ -167,13 +168,13 @@ static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, return -1; } - ret = bdrv_pwrite(bs->file, - s->crypto_header.offset + offset, buf, buflen); + ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf, + 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read encryption header"); return -1; } - return ret; + return 0; } static QDict* @@ -226,7 +227,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, printf("attempting to read extended header in offset %lu\n", offset); #endif - ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); + ret = bdrv_pread(bs->file, offset, sizeof(ext), &ext, 0); if (ret < 0) { error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " "pread fail from offset %" PRIu64, offset); @@ -254,7 +255,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, sizeof(bs->backing_format)); return 2; } - ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); + ret = bdrv_pread(bs->file, offset, ext.len, bs->backing_format, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " "Could not read format name"); @@ -270,10 +271,11 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, case QCOW2_EXT_MAGIC_FEATURE_TABLE: if (p_feature_table != NULL) { void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); - ret = bdrv_pread(bs->file, offset , feature_table, ext.len); + ret = bdrv_pread(bs->file, offset, ext.len, feature_table, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " "Could not read table"); + g_free(feature_table); return ret; } @@ -295,7 +297,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, return -EINVAL; } - ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); + ret = bdrv_pread(bs->file, offset, ext.len, &s->crypto_header, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to read CRYPTO header extension"); @@ -351,7 +353,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, break; } - ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); + ret = bdrv_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); if (ret < 0) { error_setg_errno(errp, -ret, "bitmaps_ext: " "Could not read ext header"); @@ -415,7 +417,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, case QCOW2_EXT_MAGIC_DATA_FILE: { s->image_data_file = g_malloc0(ext.len + 1); - ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len); + ret = bdrv_pread(bs->file, offset, ext.len, s->image_data_file, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: Could not read data file name"); @@ -439,7 +441,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, uext->len = ext.len; QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); - ret = bdrv_pread(bs->file, offset , uext->data, uext->len); + ret = bdrv_pread(bs->file, offset, uext->len, uext->data, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: unknown extension: " "Could not read data"); @@ -515,12 +517,9 @@ int qcow2_mark_dirty(BlockDriverState *bs) } val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); - ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), - &val, sizeof(val)); - if (ret < 0) { - return ret; - } - ret = bdrv_flush(bs->file->bs); + ret = bdrv_pwrite_sync(bs->file, + offsetof(QCowHeader, incompatible_features), + sizeof(val), &val, 0); if (ret < 0) { return ret; } @@ -1295,7 +1294,8 @@ static int validate_compression_type(BDRVQcow2State *s, Error **errp) /* Called with s->lock held. */ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, - int flags, Error **errp) + int flags, bool open_data_file, + Error **errp) { ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; @@ -1306,7 +1306,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, uint64_t l1_vm_state_index; bool update_header = false; - ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); + ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read qcow2 header"); goto fail; @@ -1382,8 +1382,9 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, if (header.header_length > sizeof(header)) { s->unknown_header_fields_size = header.header_length - sizeof(header); s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); - ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, - s->unknown_header_fields_size); + ret = bdrv_co_pread(bs->file, sizeof(header), + s->unknown_header_fields_size, + s->unknown_header_fields, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " "fields"); @@ -1578,8 +1579,8 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, ret = -ENOMEM; goto fail; } - ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, - s->l1_size * L1E_SIZE); + ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE, + s->l1_table, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read L1 table"); goto fail; @@ -1613,50 +1614,52 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, goto fail; } - /* Open external data file */ - s->data_file = bdrv_open_child(NULL, options, "data-file", bs, - &child_of_bds, BDRV_CHILD_DATA, - true, errp); - if (*errp) { - ret = -EINVAL; - goto fail; - } + if (open_data_file) { + /* Open external data file */ + s->data_file = bdrv_open_child(NULL, options, "data-file", bs, + &child_of_bds, BDRV_CHILD_DATA, + true, errp); + if (*errp) { + ret = -EINVAL; + goto fail; + } - if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { - if (!s->data_file && s->image_data_file) { - s->data_file = bdrv_open_child(s->image_data_file, options, - "data-file", bs, &child_of_bds, - BDRV_CHILD_DATA, false, errp); + if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { + if (!s->data_file && s->image_data_file) { + s->data_file = bdrv_open_child(s->image_data_file, options, + "data-file", bs, &child_of_bds, + BDRV_CHILD_DATA, false, errp); + if (!s->data_file) { + ret = -EINVAL; + goto fail; + } + } if (!s->data_file) { + error_setg(errp, "'data-file' is required for this image"); ret = -EINVAL; goto fail; } - } - if (!s->data_file) { - error_setg(errp, "'data-file' is required for this image"); - ret = -EINVAL; - goto fail; - } - /* No data here */ - bs->file->role &= ~BDRV_CHILD_DATA; + /* No data here */ + bs->file->role &= ~BDRV_CHILD_DATA; - /* Must succeed because we have given up permissions if anything */ - bdrv_child_refresh_perms(bs, bs->file, &error_abort); - } else { - if (s->data_file) { - error_setg(errp, "'data-file' can only be set for images with an " - "external data file"); - ret = -EINVAL; - goto fail; - } + /* Must succeed because we have given up permissions if anything */ + bdrv_child_refresh_perms(bs, bs->file, &error_abort); + } else { + if (s->data_file) { + error_setg(errp, "'data-file' can only be set for images with " + "an external data file"); + ret = -EINVAL; + goto fail; + } - s->data_file = bs->file; + s->data_file = bs->file; - if (data_file_is_raw(bs)) { - error_setg(errp, "data-file-raw requires a data file"); - ret = -EINVAL; - goto fail; + if (data_file_is_raw(bs)) { + error_setg(errp, "data-file-raw requires a data file"); + ret = -EINVAL; + goto fail; + } } } @@ -1694,16 +1697,27 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, ret = -EINVAL; goto fail; } - ret = bdrv_pread(bs->file, header.backing_file_offset, - bs->auto_backing_file, len); + + s->image_backing_file = g_malloc(len + 1); + ret = bdrv_co_pread(bs->file, header.backing_file_offset, len, + s->image_backing_file, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read backing file name"); goto fail; } - bs->auto_backing_file[len] = '\0'; - pstrcpy(bs->backing_file, sizeof(bs->backing_file), - bs->auto_backing_file); - s->image_backing_file = g_strdup(bs->auto_backing_file); + s->image_backing_file[len] = '\0'; + + /* + * Update only when something has changed. This function is called by + * qcow2_co_invalidate_cache(), and we do not want to reset + * auto_backing_file unless necessary. + */ + if (!g_str_equal(s->image_backing_file, bs->backing_file)) { + pstrcpy(bs->backing_file, sizeof(bs->backing_file), + s->image_backing_file); + pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), + s->image_backing_file); + } } /* @@ -1838,7 +1852,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, fail: g_free(s->image_data_file); - if (has_data_file(bs)) { + if (open_data_file && has_data_file(bs)) { bdrv_unref_child(bs, s->data_file); s->data_file = NULL; } @@ -1875,7 +1889,8 @@ static void coroutine_fn qcow2_open_entry(void *opaque) BDRVQcow2State *s = qoc->bs->opaque; qemu_co_mutex_lock(&s->lock); - qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); + qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, + qoc->errp); qemu_co_mutex_unlock(&s->lock); } @@ -1890,11 +1905,11 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, .errp = errp, .ret = -EINPROGRESS }; + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } /* Initialise locks */ @@ -2205,7 +2220,7 @@ typedef struct Qcow2AioTask { BlockDriverState *bs; QCow2SubclusterType subcluster_type; /* only for read */ - uint64_t host_offset; /* or full descriptor in compressed clusters */ + uint64_t host_offset; /* or l2_entry for compressed read */ uint64_t offset; uint64_t bytes; QEMUIOVector *qiov; @@ -2310,9 +2325,10 @@ static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task) } static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { BDRVQcow2State *s = bs->opaque; int ret = 0; @@ -2432,7 +2448,7 @@ static bool merge_cow(uint64_t offset, unsigned bytes, * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. * Note that returning 0 does not guarantee non-zero data. */ -static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) +static int coroutine_fn is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) { /* * This check is designed for optimization shortcut so it must be @@ -2450,7 +2466,8 @@ static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) m->cow_end.nb_bytes); } -static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) +static int coroutine_fn handle_alloc_space(BlockDriverState *bs, + QCowL2Meta *l2meta) { BDRVQcow2State *s = bs->opaque; QCowL2Meta *m; @@ -2596,8 +2613,8 @@ static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task) } static coroutine_fn int qcow2_co_pwritev_part( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags) + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { BDRVQcow2State *s = bs->opaque; int offset_in_cluster; @@ -2712,7 +2729,7 @@ static int qcow2_inactivate(BlockDriverState *bs) return result; } -static void qcow2_close(BlockDriverState *bs) +static void qcow2_do_close(BlockDriverState *bs, bool close_data_file) { BDRVQcow2State *s = bs->opaque; qemu_vfree(s->l1_table); @@ -2738,7 +2755,7 @@ static void qcow2_close(BlockDriverState *bs) g_free(s->image_backing_file); g_free(s->image_backing_format); - if (has_data_file(bs)) { + if (close_data_file && has_data_file(bs)) { bdrv_unref_child(bs, s->data_file); s->data_file = NULL; } @@ -2747,11 +2764,17 @@ static void qcow2_close(BlockDriverState *bs) qcow2_free_snapshots(bs); } +static void qcow2_close(BlockDriverState *bs) +{ + qcow2_do_close(bs, true); +} + static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp) { ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; + BdrvChild *data_file; int flags = s->flags; QCryptoBlock *crypto = NULL; QDict *options; @@ -2765,14 +2788,24 @@ static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, crypto = s->crypto; s->crypto = NULL; - qcow2_close(bs); + /* + * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it, + * and then prevent qcow2_do_open() from opening it), because this function + * runs in the I/O path and as such we must not invoke global-state + * functions like bdrv_unref_child() and bdrv_open_child(). + */ + qcow2_do_close(bs, false); + + data_file = s->data_file; memset(s, 0, sizeof(BDRVQcow2State)); + s->data_file = data_file; + options = qdict_clone_shallow(bs->options); flags &= ~BDRV_O_INACTIVE; qemu_co_mutex_lock(&s->lock); - ret = qcow2_do_open(bs, options, flags, errp); + ret = qcow2_do_open(bs, options, flags, false, errp); qemu_co_mutex_unlock(&s->lock); qobject_unref(options); if (ret < 0) { @@ -3059,7 +3092,7 @@ int qcow2_update_header(BlockDriverState *bs) } /* Write the new header */ - ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); + ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0); if (ret < 0) { goto fail; } @@ -3646,7 +3679,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) cpu_to_be64(QCOW2_INCOMPAT_EXTL2); } - ret = blk_pwrite(blk, 0, header, cluster_size, 0); + ret = blk_co_pwrite(blk, 0, cluster_size, header, 0); g_free(header); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write qcow2 header"); @@ -3656,7 +3689,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) /* Write a refcount table with one refcount block */ refcount_table = g_malloc0(2 * cluster_size); refcount_table[0] = cpu_to_be64(2 * cluster_size); - ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); + ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0); g_free(refcount_table); if (ret < 0) { @@ -3711,8 +3744,8 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) } /* Okay, now that we have a valid image, let's give it the right size */ - ret = blk_truncate(blk, qcow2_opts->size, false, qcow2_opts->preallocation, - 0, errp); + ret = blk_co_truncate(blk, qcow2_opts->size, false, + qcow2_opts->preallocation, 0, errp); if (ret < 0) { error_prepend(errp, "Could not resize image: "); goto out; @@ -3940,7 +3973,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) } static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int ret; BDRVQcow2State *s = bs->opaque; @@ -3995,7 +4028,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, } static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { int ret; BDRVQcow2State *s = bs->opaque; @@ -4025,9 +4058,9 @@ static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, static int coroutine_fn qcow2_co_copy_range_from(BlockDriverState *bs, - BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, - uint64_t bytes, BdrvRequestFlags read_flags, + BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { BDRVQcow2State *s = bs->opaque; @@ -4108,9 +4141,9 @@ out: static int coroutine_fn qcow2_co_copy_range_to(BlockDriverState *bs, - BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, - uint64_t bytes, BdrvRequestFlags read_flags, + BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { BDRVQcow2State *s = bs->opaque; @@ -4528,8 +4561,8 @@ static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, /* write updated header.size */ offset = cpu_to_be64(offset); - ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), - &offset, sizeof(offset)); + ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size), + sizeof(offset), &offset, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to update the image size"); goto fail; @@ -4630,7 +4663,7 @@ static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task) */ static coroutine_fn int qcow2_co_pwritev_compressed_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset) { BDRVQcow2State *s = bs->opaque; @@ -4693,22 +4726,19 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs, static int coroutine_fn qcow2_co_preadv_compressed(BlockDriverState *bs, - uint64_t cluster_descriptor, + uint64_t l2_entry, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, size_t qiov_offset) { BDRVQcow2State *s = bs->opaque; - int ret = 0, csize, nb_csectors; + int ret = 0, csize; uint64_t coffset; uint8_t *buf, *out_buf; int offset_in_cluster = offset_into_cluster(s, offset); - coffset = cluster_descriptor & s->cluster_offset_mask; - nb_csectors = ((cluster_descriptor >> s->csize_shift) & s->csize_mask) + 1; - csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - - (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK); + qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); buf = g_try_malloc(csize); if (!buf) { @@ -4809,7 +4839,7 @@ static int make_completely_empty(BlockDriverState *bs) l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), - &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); + sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0); if (ret < 0) { goto fail_broken_refcounts; } @@ -4840,8 +4870,8 @@ static int make_completely_empty(BlockDriverState *bs) /* Enter the first refblock into the reftable */ rt_entry = cpu_to_be64(2 * s->cluster_size); - ret = bdrv_pwrite_sync(bs->file, s->cluster_size, - &rt_entry, sizeof(rt_entry)); + ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry), + &rt_entry, 0); if (ret < 0) { goto fail_broken_refcounts; } @@ -5230,24 +5260,87 @@ static int qcow2_has_zero_init(BlockDriverState *bs) } } -static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, - int64_t pos) +/* + * Check the request to vmstate. On success return + * qcow2_vm_state_offset(bs) + @pos + */ +static int64_t qcow2_check_vmstate_request(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos) { BDRVQcow2State *s = bs->opaque; + int64_t vmstate_offset = qcow2_vm_state_offset(s); + int ret; - BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); - return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos, - qiov->size, qiov, 0, 0); + /* Incoming requests must be OK */ + bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort); + + if (INT64_MAX - pos < vmstate_offset) { + return -EIO; + } + + pos += vmstate_offset; + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } + + return pos; } -static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, - int64_t pos) +static coroutine_fn int qcow2_save_vmstate(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos) { - BDRVQcow2State *s = bs->opaque; + int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); + if (offset < 0) { + return offset; + } + + BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); + return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); +} + +static coroutine_fn int qcow2_load_vmstate(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos) +{ + int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); + if (offset < 0) { + return offset; + } BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); - return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos, - qiov->size, qiov, 0, 0); + return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); +} + +static int qcow2_has_compressed_clusters(BlockDriverState *bs) +{ + int64_t offset = 0; + int64_t bytes = bdrv_getlength(bs); + + if (bytes < 0) { + return bytes; + } + + while (bytes != 0) { + int ret; + QCow2SubclusterType type; + unsigned int cur_bytes = MIN(INT_MAX, bytes); + uint64_t host_offset; + + ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset, + &type); + if (ret < 0) { + return ret; + } + + if (type == QCOW2_SUBCLUSTER_COMPRESSED) { + return 1; + } + + offset += cur_bytes; + bytes -= cur_bytes; + } + + return 0; } /* @@ -5307,9 +5400,10 @@ static int qcow2_downgrade(BlockDriverState *bs, int target_version, * the first place; if that happens nonetheless, returning -ENOTSUP is the * best thing to do anyway */ - if (s->incompatible_features) { + if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) { error_setg(errp, "Cannot downgrade an image with incompatible features " - "%#" PRIx64 " set", s->incompatible_features); + "0x%" PRIx64 " set", + s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION); return -ENOTSUP; } @@ -5327,6 +5421,27 @@ static int qcow2_downgrade(BlockDriverState *bs, int target_version, return ret; } + if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { + ret = qcow2_has_compressed_clusters(bs); + if (ret < 0) { + error_setg(errp, "Failed to check block status"); + return -EINVAL; + } + if (ret) { + error_setg(errp, "Cannot downgrade an image with zstd compression " + "type and existing compressed clusters"); + return -ENOTSUP; + } + /* + * No compressed clusters for now, so just chose default zlib + * compression. + */ + s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION; + s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; + } + + assert(s->incompatible_features == 0); + s->qcow_version = target_version; ret = qcow2_update_header(bs); if (ret < 0) { diff --git a/block/qcow2.h b/block/qcow2.h index 0fe5f74ed3..2285f18a73 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -110,7 +110,6 @@ /* Defined in the qcow2 spec (compressed cluster descriptor) */ #define QCOW2_COMPRESSED_SECTOR_SIZE 512U -#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL)) /* Must be at least 2 to cover COW */ #define MIN_L2_CACHE_SIZE 2 /* cache entries */ @@ -587,10 +586,12 @@ typedef enum QCow2MetadataOverlap { (QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2) #define L1E_OFFSET_MASK 0x00fffffffffffe00ULL +#define L1E_RESERVED_MASK 0x7f000000000001ffULL #define L2E_OFFSET_MASK 0x00fffffffffffe00ULL -#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL +#define L2E_STD_RESERVED_MASK 0x3f000000000001feULL #define REFT_OFFSET_MASK 0xfffffffffffffe00ULL +#define REFT_RESERVED_MASK 0x1ffULL #define INV_OFFSET (-1ULL) @@ -837,7 +838,7 @@ int qcow2_update_header(BlockDriverState *bs); void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, int64_t size, const char *message_format, ...) - GCC_FMT_ATTR(5, 6); + G_GNUC_PRINTF(5, 6); int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, uint64_t entries, size_t entry_len, @@ -845,7 +846,7 @@ int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, Error **errp); /* qcow2-refcount.c functions */ -int qcow2_refcount_init(BlockDriverState *bs); +int coroutine_fn qcow2_refcount_init(BlockDriverState *bs); void qcow2_refcount_close(BlockDriverState *bs); int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index, @@ -873,8 +874,8 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, int qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t l1_table_offset, int l1_size, int addend); -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs); -int coroutine_fn qcow2_write_caches(BlockDriverState *bs); +int qcow2_flush_caches(BlockDriverState *bs); +int qcow2_write_caches(BlockDriverState *bs); int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); @@ -892,14 +893,14 @@ int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, BlockDriverAmendStatusCB *status_cb, void *cb_opaque, Error **errp); -int qcow2_shrink_reftable(BlockDriverState *bs); +int coroutine_fn qcow2_shrink_reftable(BlockDriverState *bs); int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); -int qcow2_detect_metadata_preallocation(BlockDriverState *bs); +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs); /* qcow2-cluster.c functions */ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size); -int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size); +int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size); int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index); int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, uint8_t *buf, int nb_sectors, bool enc, Error **errp); @@ -907,21 +908,24 @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, unsigned int *bytes, uint64_t *host_offset, QCow2SubclusterType *subcluster_type); -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, - unsigned int *bytes, uint64_t *host_offset, - QCowL2Meta **m); -int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, - uint64_t offset, - int compressed_size, - uint64_t *host_offset); +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, + unsigned int *bytes, + uint64_t *host_offset, QCowL2Meta **m); +int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, + uint64_t offset, + int compressed_size, + uint64_t *host_offset); +void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, + uint64_t *coffset, int *csize); -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m); +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, + QCowL2Meta *m); void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, uint64_t bytes, enum qcow2_discard_type type, bool full_discard); -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, int flags); +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, int flags); int qcow2_expand_zero_clusters(BlockDriverState *bs, BlockDriverAmendStatusCB *status_cb, @@ -978,8 +982,8 @@ void qcow2_cache_discard(Qcow2Cache *c, void *table); int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, int64_t *refcount_table_size); -bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, - Error **errp); +bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs, + bool *header_updated, Error **errp); bool qcow2_get_bitmap_info_list(BlockDriverState *bs, Qcow2BitmapInfoList **info_list, Error **errp); int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp); @@ -987,13 +991,13 @@ int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp); bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, bool release_stored, Error **errp); int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp); -bool qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, - const char *name, - uint32_t granularity, - Error **errp); -int qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, - const char *name, - Error **errp); +bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, + const char *name, + uint32_t granularity, + Error **errp); +int coroutine_fn qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, + const char *name, + Error **errp); bool qcow2_supports_persistent_dirty_bitmap(BlockDriverState *bs); uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *bs, uint32_t cluster_size); diff --git a/block/qed-l2-cache.c b/block/qed-l2-cache.c index b548362398..caf2c024c2 100644 --- a/block/qed-l2-cache.c +++ b/block/qed-l2-cache.c @@ -51,6 +51,7 @@ */ #include "qemu/osdep.h" +#include "qemu/memalign.h" #include "trace.h" #include "qed.h" diff --git a/block/qed-table.c b/block/qed-table.c index 405d446cbe..aa203f2627 100644 --- a/block/qed-table.c +++ b/block/qed-table.c @@ -17,6 +17,7 @@ #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ #include "qed.h" #include "qemu/bswap.h" +#include "qemu/memalign.h" /* Called with table_lock held. */ static int coroutine_fn qed_read_table(BDRVQEDState *s, uint64_t offset, @@ -99,7 +100,7 @@ static int coroutine_fn qed_write_table(BDRVQEDState *s, uint64_t offset, } if (flush) { - ret = bdrv_flush(s->bs); + ret = bdrv_co_flush(s->bs); if (ret < 0) { goto out; } diff --git a/block/qed.c b/block/qed.c index f45c640513..2f36ad342c 100644 --- a/block/qed.c +++ b/block/qed.c @@ -20,6 +20,7 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/memalign.h" #include "trace.h" #include "qed.h" #include "sysemu/block-backend.h" @@ -86,14 +87,9 @@ static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) int qed_write_header_sync(BDRVQEDState *s) { QEDHeader le; - int ret; qed_header_cpu_to_le(&s->header, &le); - ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); - if (ret != sizeof(le)) { - return ret; - } - return 0; + return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0); } /** @@ -206,7 +202,7 @@ static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, if (n >= buflen) { return -EINVAL; } - ret = bdrv_pread(file, offset, buf, n); + ret = bdrv_pread(file, offset, n, buf, 0); if (ret < 0) { return ret; } @@ -258,7 +254,7 @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) return l2_table; } -static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) +static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s) { qemu_co_mutex_lock(&s->table_lock); @@ -277,7 +273,7 @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) return true; } -static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) +static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s) { qemu_co_mutex_lock(&s->table_lock); assert(s->allocating_write_reqs_plugged); @@ -391,7 +387,7 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int64_t file_size; int ret; - ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); + ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0); if (ret < 0) { error_setg(errp, "Failed to read QED header"); return ret; @@ -449,6 +445,8 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options, } if ((s->header.features & QED_F_BACKING_FILE)) { + g_autofree char *backing_file_str = NULL; + if ((uint64_t)s->header.backing_filename_offset + s->header.backing_filename_size > s->header.cluster_size * s->header.header_size) { @@ -456,16 +454,21 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options, return -EINVAL; } + backing_file_str = g_malloc(sizeof(bs->backing_file)); ret = qed_read_string(bs->file, s->header.backing_filename_offset, s->header.backing_filename_size, - bs->auto_backing_file, - sizeof(bs->auto_backing_file)); + backing_file_str, sizeof(bs->backing_file)); if (ret < 0) { error_setg(errp, "Failed to read backing filename"); return ret; } - pstrcpy(bs->backing_file, sizeof(bs->backing_file), - bs->auto_backing_file); + + if (!g_str_equal(backing_file_str, bs->backing_file)) { + pstrcpy(bs->backing_file, sizeof(bs->backing_file), + backing_file_str); + pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), + backing_file_str); + } if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); @@ -489,7 +492,7 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options, } /* From here on only known autoclear feature bits are valid */ - bdrv_flush(bs->file->bs); + bdrv_co_flush(bs->file->bs); } s->l1_table = qed_alloc_table(s); @@ -558,11 +561,11 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, .errp = errp, .ret = -EINPROGRESS }; + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bdrv_qed_init_state(bs); @@ -582,6 +585,7 @@ static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) BDRVQEDState *s = bs->opaque; bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; + bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size); } /* We have nothing to do for QED reopen, stubs just return @@ -689,7 +693,7 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, * The QED format associates file length with allocation status, * so a new file (which is empty) must have a length of 0. */ - ret = blk_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp); + ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp); if (ret < 0) { goto out; } @@ -708,18 +712,18 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, } qed_header_cpu_to_le(&header, &le_header); - ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); + ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0); if (ret < 0) { goto out; } - ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file, - header.backing_filename_size, 0); + ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size, + qed_opts->backing_file, 0); if (ret < 0) { goto out; } l1_table = g_malloc0(l1_size); - ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); + ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0); if (ret < 0) { goto out; } @@ -1391,13 +1395,12 @@ static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags) { - assert(!flags); return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE); } static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { BDRVQEDState *s = bs->opaque; @@ -1408,6 +1411,12 @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, */ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); + /* + * QED is not prepared for 63bit write-zero requests, so rely on + * max_pwrite_zeroes. + */ + assert(bytes <= INT_MAX); + /* Fall back if the request is not aligned */ if (qed_offset_into_cluster(s, offset) || qed_offset_into_cluster(s, bytes)) { @@ -1537,7 +1546,7 @@ static int bdrv_qed_change_backing_file(BlockDriverState *bs, } /* Write new header */ - ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); + ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0); g_free(buffer); if (ret == 0) { memcpy(&s->header, &new_header, sizeof(new_header)); diff --git a/block/quorum.c b/block/quorum.c index f2c0805000..f9e6539ceb 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -17,6 +17,7 @@ #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/memalign.h" #include "block/block_int.h" #include "block/coroutines.h" #include "block/qdict.h" @@ -160,11 +161,10 @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b) return a->l == b->l; } -static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs, - QEMUIOVector *qiov, - uint64_t offset, - uint64_t bytes, - int flags) +static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs, + QEMUIOVector *qiov, + uint64_t offset, uint64_t bytes, + int flags) { BDRVQuorumState *s = bs->opaque; QuorumAIOCB *acb = g_new(QuorumAIOCB, 1); @@ -232,8 +232,6 @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb) return false; } -static int read_fifo_child(QuorumAIOCB *acb); - static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) { int i; @@ -272,7 +270,7 @@ static void quorum_report_bad_versions(BDRVQuorumState *s, } } -static void quorum_rewrite_entry(void *opaque) +static void coroutine_fn quorum_rewrite_entry(void *opaque) { QuorumCo *co = opaque; QuorumAIOCB *acb = co->acb; @@ -573,7 +571,7 @@ free_exit: quorum_free_vote_list(&acb->votes); } -static void read_quorum_children_entry(void *opaque) +static void coroutine_fn read_quorum_children_entry(void *opaque) { QuorumCo *co = opaque; QuorumAIOCB *acb = co->acb; @@ -601,7 +599,7 @@ static void read_quorum_children_entry(void *opaque) } } -static int read_quorum_children(QuorumAIOCB *acb) +static int coroutine_fn read_quorum_children(QuorumAIOCB *acb) { BDRVQuorumState *s = acb->bs->opaque; int i; @@ -642,7 +640,7 @@ static int read_quorum_children(QuorumAIOCB *acb) return acb->vote_ret; } -static int read_fifo_child(QuorumAIOCB *acb) +static int coroutine_fn read_fifo_child(QuorumAIOCB *acb) { BDRVQuorumState *s = acb->bs->opaque; int n, ret; @@ -663,8 +661,10 @@ static int read_fifo_child(QuorumAIOCB *acb) return ret; } -static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int coroutine_fn quorum_co_preadv(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQuorumState *s = bs->opaque; QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); @@ -683,7 +683,7 @@ static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset, return ret; } -static void write_quorum_entry(void *opaque) +static void coroutine_fn write_quorum_entry(void *opaque) { QuorumCo *co = opaque; QuorumAIOCB *acb = co->acb; @@ -714,8 +714,9 @@ static void write_quorum_entry(void *opaque) } } -static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int coroutine_fn quorum_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQuorumState *s = bs->opaque; QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); @@ -744,8 +745,9 @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset, return ret; } -static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) +static int coroutine_fn quorum_co_pwrite_zeroes(BlockDriverState *bs, + int64_t offset, int64_t bytes, + BdrvRequestFlags flags) { return quorum_co_pwritev(bs, offset, bytes, NULL, diff --git a/block/raw-format.c b/block/raw-format.c index 7717578ed6..a68014ef0b 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -31,6 +31,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/memalign.h" typedef struct BDRVRawState { uint64_t offset; @@ -181,8 +182,8 @@ static void raw_reopen_abort(BDRVReopenState *state) } /* Check and adjust the offset, against 'offset' and 'size' options. */ -static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset, - uint64_t bytes, bool is_write) +static inline int raw_adjust_offset(BlockDriverState *bs, int64_t *offset, + int64_t bytes, bool is_write) { BDRVRawState *s = bs->opaque; @@ -201,9 +202,9 @@ static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset, return 0; } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { int ret; @@ -216,9 +217,9 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { void *buf = NULL; BlockDriver *drv; @@ -257,6 +258,8 @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, qemu_iovec_add(&local_qiov, buf, 512); qemu_iovec_concat(&local_qiov, qiov, 512, qiov->size - 512); qiov = &local_qiov; + + flags &= ~BDRV_REQ_REGISTERED_BUF; } ret = raw_adjust_offset(bs, &offset, bytes, true); @@ -289,12 +292,12 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int ret; - ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true); + ret = raw_adjust_offset(bs, &offset, bytes, true); if (ret) { return ret; } @@ -302,11 +305,11 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { int ret; - ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true); + ret = raw_adjust_offset(bs, &offset, bytes, true); if (ret) { return ret; } @@ -410,7 +413,8 @@ static void raw_lock_medium(BlockDriverState *bs, bool locked) bdrv_lock_medium(bs->file->bs, locked); } -static int raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) +static int coroutine_fn raw_co_ioctl(BlockDriverState *bs, + unsigned long int req, void *buf) { BDRVRawState *s = bs->opaque; if (s->offset || s->has_size) { @@ -456,13 +460,13 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, file_role = BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - file_role, false, errp); + bdrv_open_child(NULL, options, "file", bs, &child_of_bds, + file_role, false, errp); if (!bs->file) { return -EINVAL; } - bs->sg = bs->file->bs->sg; + bs->sg = bdrv_is_sg(bs->file->bs); bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | (BDRV_REQ_FUA & bs->file->bs->supported_write_flags); bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | @@ -488,7 +492,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - if (bs->sg && (s->offset || s->has_size)) { + if (bdrv_is_sg(bs) && (s->offset || s->has_size)) { error_setg(errp, "Cannot use offset/size with SCSI generic devices"); return -EINVAL; } @@ -532,10 +536,10 @@ static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo) static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -551,10 +555,10 @@ static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs, static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -580,6 +584,25 @@ static void raw_cancel_in_flight(BlockDriverState *bs) bdrv_cancel_in_flight(bs->file->bs); } +static void raw_child_perm(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, + BlockReopenQueue *reopen_queue, + uint64_t parent_perm, uint64_t parent_shared, + uint64_t *nperm, uint64_t *nshared) +{ + bdrv_default_perms(bs, c, role, reopen_queue, parent_perm, + parent_shared, nperm, nshared); + + /* + * bdrv_default_perms() may add WRITE and/or RESIZE (see comment in + * bdrv_default_perms_for_storage() for an explanation) but we only need + * them if they are in parent_perm. Drop WRITE and RESIZE whenever possible + * to avoid permission conflicts. + */ + *nperm &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE); + *nperm |= parent_perm & (BLK_PERM_WRITE | BLK_PERM_RESIZE); +} + BlockDriver bdrv_raw = { .format_name = "raw", .instance_size = sizeof(BDRVRawState), @@ -588,7 +611,7 @@ BlockDriver bdrv_raw = { .bdrv_reopen_commit = &raw_reopen_commit, .bdrv_reopen_abort = &raw_reopen_abort, .bdrv_open = &raw_open, - .bdrv_child_perm = bdrv_default_perms, + .bdrv_child_perm = raw_child_perm, .bdrv_co_create_opts = &raw_co_create_opts, .bdrv_co_preadv = &raw_co_preadv, .bdrv_co_pwritev = &raw_co_pwritev, diff --git a/block/rbd.c b/block/rbd.c index dcf82b15b8..f826410f40 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -97,6 +97,12 @@ typedef struct RBDTask { int64_t ret; } RBDTask; +typedef struct RBDDiffIterateReq { + uint64_t offs; + uint64_t bytes; + bool exists; +} RBDDiffIterateReq; + static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, BlockdevOptionsRbd *opts, bool cache, const char *keypairs, const char *secretid, @@ -825,6 +831,26 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, error_setg_errno(errp, -r, "error opening pool %s", opts->pool); goto failed_shutdown; } + +#ifdef HAVE_RBD_NAMESPACE_EXISTS + if (opts->has_q_namespace && strlen(opts->q_namespace) > 0) { + bool exists; + + r = rbd_namespace_exists(*io_ctx, opts->q_namespace, &exists); + if (r < 0) { + error_setg_errno(errp, -r, "error checking namespace"); + goto failed_ioctx_destroy; + } + + if (!exists) { + error_setg(errp, "namespace '%s' does not exist", + opts->q_namespace); + r = -ENOENT; + goto failed_ioctx_destroy; + } + } +#endif + /* * Set the namespace after opening the io context on the pool, * if nspace == NULL or if nspace == "", it is just as we did nothing @@ -834,6 +860,10 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, r = 0; goto out; +#ifdef HAVE_RBD_NAMESPACE_EXISTS +failed_ioctx_destroy: + rados_ioctx_destroy(*io_ctx); +#endif failed_shutdown: rados_shutdown(*cluster); out: @@ -1101,6 +1131,20 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs, assert(!qiov || qiov->size == bytes); + if (cmd == RBD_AIO_WRITE || cmd == RBD_AIO_WRITE_ZEROES) { + /* + * RBD APIs don't allow us to write more than actual size, so in order + * to support growing images, we resize the image before write + * operations that exceed the current size. + */ + if (offset + bytes > s->image_size) { + int r = qemu_rbd_resize(bs, offset + bytes); + if (r < 0) { + return r; + } + } + } + r = rbd_aio_create_completion(&task, (rbd_callback_t) qemu_rbd_completion_cb, &c); if (r < 0) { @@ -1164,30 +1208,18 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs, } static int -coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_READ); } static int -coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { - BDRVRBDState *s = bs->opaque; - /* - * RBD APIs don't allow us to write more than actual size, so in order - * to support growing images, we resize the image before write - * operations that exceed the current size. - */ - if (offset + bytes > s->image_size) { - int r = qemu_rbd_resize(bs, offset + bytes); - if (r < 0) { - return r; - } - } return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_WRITE); } @@ -1197,17 +1229,17 @@ static int coroutine_fn qemu_rbd_co_flush(BlockDriverState *bs) } static int coroutine_fn qemu_rbd_co_pdiscard(BlockDriverState *bs, - int64_t offset, int count) + int64_t offset, int64_t bytes) { - return qemu_rbd_start_co(bs, offset, count, NULL, 0, RBD_AIO_DISCARD); + return qemu_rbd_start_co(bs, offset, bytes, NULL, 0, RBD_AIO_DISCARD); } #ifdef LIBRBD_SUPPORTS_WRITE_ZEROES static int coroutine_fn qemu_rbd_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int count, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { - return qemu_rbd_start_co(bs, offset, count, NULL, flags, + return qemu_rbd_start_co(bs, offset, bytes, NULL, flags, RBD_AIO_WRITE_ZEROES); } #endif @@ -1259,6 +1291,149 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, return spec_info; } +/* + * rbd_diff_iterate2 allows to interrupt the exection by returning a negative + * value in the callback routine. Choose a value that does not conflict with + * an existing exitcode and return it if we want to prematurely stop the + * execution because we detected a change in the allocation status. + */ +#define QEMU_RBD_EXIT_DIFF_ITERATE2 -9000 + +static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len, + int exists, void *opaque) +{ + RBDDiffIterateReq *req = opaque; + + assert(req->offs + req->bytes <= offs); + + /* treat a hole like an unallocated area and bail out */ + if (!exists) { + return 0; + } + + if (!req->exists && offs > req->offs) { + /* + * we started in an unallocated area and hit the first allocated + * block. req->bytes must be set to the length of the unallocated area + * before the allocated area. stop further processing. + */ + req->bytes = offs - req->offs; + return QEMU_RBD_EXIT_DIFF_ITERATE2; + } + + if (req->exists && offs > req->offs + req->bytes) { + /* + * we started in an allocated area and jumped over an unallocated area, + * req->bytes contains the length of the allocated area before the + * unallocated area. stop further processing. + */ + return QEMU_RBD_EXIT_DIFF_ITERATE2; + } + + req->bytes += len; + req->exists = true; + + return 0; +} + +static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, + int64_t bytes, int64_t *pnum, + int64_t *map, + BlockDriverState **file) +{ + BDRVRBDState *s = bs->opaque; + int status, r; + RBDDiffIterateReq req = { .offs = offset }; + uint64_t features, flags; + uint64_t head = 0; + + assert(offset + bytes <= s->image_size); + + /* default to all sectors allocated */ + status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; + *map = offset; + *file = bs; + *pnum = bytes; + + /* check if RBD image supports fast-diff */ + r = rbd_get_features(s->image, &features); + if (r < 0) { + return status; + } + if (!(features & RBD_FEATURE_FAST_DIFF)) { + return status; + } + + /* check if RBD fast-diff result is valid */ + r = rbd_get_flags(s->image, &flags); + if (r < 0) { + return status; + } + if (flags & RBD_FLAG_FAST_DIFF_INVALID) { + return status; + } + +#if LIBRBD_VERSION_CODE < LIBRBD_VERSION(1, 17, 0) + /* + * librbd had a bug until early 2022 that affected all versions of ceph that + * supported fast-diff. This bug results in reporting of incorrect offsets + * if the offset parameter to rbd_diff_iterate2 is not object aligned. + * Work around this bug by rounding down the offset to object boundaries. + * This is OK because we call rbd_diff_iterate2 with whole_object = true. + * However, this workaround only works for non cloned images with default + * striping. + * + * See: https://tracker.ceph.com/issues/53784 + */ + + /* check if RBD image has non-default striping enabled */ + if (features & RBD_FEATURE_STRIPINGV2) { + return status; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + /* + * check if RBD image is a clone (= has a parent). + * + * rbd_get_parent_info is deprecated from Nautilus onwards, but the + * replacement rbd_get_parent is not present in Luminous and Mimic. + */ + if (rbd_get_parent_info(s->image, NULL, 0, NULL, 0, NULL, 0) != -ENOENT) { + return status; + } +#pragma GCC diagnostic pop + + head = req.offs & (s->object_size - 1); + req.offs -= head; + bytes += head; +#endif + + r = rbd_diff_iterate2(s->image, NULL, req.offs, bytes, true, true, + qemu_rbd_diff_iterate_cb, &req); + if (r < 0 && r != QEMU_RBD_EXIT_DIFF_ITERATE2) { + return status; + } + assert(req.bytes <= bytes); + if (!req.exists) { + if (r == 0) { + /* + * rbd_diff_iterate2 does not invoke callbacks for unallocated + * areas. This here catches the case where no callback was + * invoked at all (req.bytes == 0). + */ + assert(req.bytes == 0); + req.bytes = bytes; + } + status = BDRV_BLOCK_ZERO | BDRV_BLOCK_OFFSET_VALID; + } + + assert(req.bytes > head); + *pnum = req.bytes - head; + return status; +} + static int64_t qemu_rbd_getlength(BlockDriverState *bs) { BDRVRBDState *s = bs->opaque; @@ -1494,6 +1669,7 @@ static BlockDriver bdrv_rbd = { #ifdef LIBRBD_SUPPORTS_WRITE_ZEROES .bdrv_co_pwrite_zeroes = qemu_rbd_co_pwrite_zeroes, #endif + .bdrv_co_block_status = qemu_rbd_co_block_status, .bdrv_snapshot_create = qemu_rbd_snap_create, .bdrv_snapshot_delete = qemu_rbd_snap_remove, diff --git a/block/replication.c b/block/replication.c index 32444b9a8f..f1eed25e43 100644 --- a/block/replication.c +++ b/block/replication.c @@ -88,11 +88,9 @@ static int replication_open(BlockDriverState *bs, QDict *options, const char *mode; const char *top_id; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } ret = -EINVAL; @@ -142,6 +140,7 @@ static void replication_close(BlockDriverState *bs) { BDRVReplicationState *s = bs->opaque; Job *commit_job; + GLOBAL_STATE_CODE(); if (s->stage == BLOCK_REPLICATION_RUNNING) { replication_stop(s->rs, false, NULL); @@ -149,7 +148,7 @@ static void replication_close(BlockDriverState *bs) if (s->stage == BLOCK_REPLICATION_FAILOVER) { commit_job = &s->commit_job->job; assert(commit_job->aio_context == qemu_get_current_aio_context()); - job_cancel_sync(commit_job); + job_cancel_sync(commit_job, false); } if (s->mode == REPLICATION_MODE_SECONDARY) { @@ -260,7 +259,6 @@ static coroutine_fn int replication_co_writev(BlockDriverState *bs, int ret; int64_t n; - assert(!flags); ret = replication_get_io_status(s); if (ret < 0) { goto out; @@ -726,7 +724,9 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * disk, secondary disk in backup_job_completed(). */ if (s->backup_job) { - job_cancel_sync(&s->backup_job->job); + aio_context_release(aio_context); + job_cancel_sync(&s->backup_job->job, true); + aio_context_acquire(aio_context); } if (!failover) { diff --git a/block/reqlist.c b/block/reqlist.c new file mode 100644 index 0000000000..08cb57cfa4 --- /dev/null +++ b/block/reqlist.c @@ -0,0 +1,85 @@ +/* + * reqlist API + * + * Copyright (C) 2013 Proxmox Server Solutions + * Copyright (c) 2021 Virtuozzo International GmbH. + * + * Authors: + * Dietmar Maurer (dietmar@proxmox.com) + * Vladimir Sementsov-Ogievskiy + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/range.h" + +#include "block/reqlist.h" + +void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset, + int64_t bytes) +{ + assert(!reqlist_find_conflict(reqs, offset, bytes)); + + *req = (BlockReq) { + .offset = offset, + .bytes = bytes, + }; + qemu_co_queue_init(&req->wait_queue); + QLIST_INSERT_HEAD(reqs, req, list); +} + +BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset, + int64_t bytes) +{ + BlockReq *r; + + QLIST_FOREACH(r, reqs, list) { + if (ranges_overlap(offset, bytes, r->offset, r->bytes)) { + return r; + } + } + + return NULL; +} + +bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset, + int64_t bytes, CoMutex *lock) +{ + BlockReq *r = reqlist_find_conflict(reqs, offset, bytes); + + if (!r) { + return false; + } + + qemu_co_queue_wait(&r->wait_queue, lock); + + return true; +} + +void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset, + int64_t bytes, CoMutex *lock) +{ + while (reqlist_wait_one(reqs, offset, bytes, lock)) { + /* continue */ + } +} + +void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes) +{ + if (new_bytes == req->bytes) { + return; + } + + assert(new_bytes > 0 && new_bytes < req->bytes); + + req->bytes = new_bytes; + qemu_co_queue_restart_all(&req->wait_queue); +} + +void coroutine_fn reqlist_remove_req(BlockReq *req) +{ + QLIST_REMOVE(req, list); + qemu_co_queue_restart_all(&req->wait_queue); +} diff --git a/block/snapshot-access.c b/block/snapshot-access.c new file mode 100644 index 0000000000..0a30ec6cd9 --- /dev/null +++ b/block/snapshot-access.c @@ -0,0 +1,132 @@ +/* + * snapshot_access block driver + * + * Copyright (c) 2022 Virtuozzo International GmbH. + * + * Author: + * Sementsov-Ogievskiy Vladimir + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" + +#include "sysemu/block-backend.h" +#include "qemu/cutils.h" +#include "block/block_int.h" + +static coroutine_fn int +snapshot_access_co_preadv_part(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) +{ + if (flags) { + return -ENOTSUP; + } + + return bdrv_co_preadv_snapshot(bs->file, offset, bytes, qiov, qiov_offset); +} + +static int coroutine_fn +snapshot_access_co_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, + int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file) +{ + return bdrv_co_snapshot_block_status(bs->file->bs, want_zero, offset, + bytes, pnum, map, file); +} + +static int coroutine_fn snapshot_access_co_pdiscard(BlockDriverState *bs, + int64_t offset, int64_t bytes) +{ + return bdrv_co_pdiscard_snapshot(bs->file->bs, offset, bytes); +} + +static int coroutine_fn +snapshot_access_co_pwrite_zeroes(BlockDriverState *bs, + int64_t offset, int64_t bytes, + BdrvRequestFlags flags) +{ + return -ENOTSUP; +} + +static coroutine_fn int +snapshot_access_co_pwritev_part(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) +{ + return -ENOTSUP; +} + + +static void snapshot_access_refresh_filename(BlockDriverState *bs) +{ + pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), + bs->file->bs->filename); +} + +static int snapshot_access_open(BlockDriverState *bs, QDict *options, int flags, + Error **errp) +{ + bdrv_open_child(NULL, options, "file", bs, &child_of_bds, + BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, + false, errp); + if (!bs->file) { + return -EINVAL; + } + + bs->total_sectors = bs->file->bs->total_sectors; + + return 0; +} + +static void snapshot_access_child_perm(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, + BlockReopenQueue *reopen_queue, + uint64_t perm, uint64_t shared, + uint64_t *nperm, uint64_t *nshared) +{ + /* + * Currently, we don't need any permissions. If bs->file provides + * snapshot-access API, we can use it. + */ + *nperm = 0; + *nshared = BLK_PERM_ALL; +} + +BlockDriver bdrv_snapshot_access_drv = { + .format_name = "snapshot-access", + + .bdrv_open = snapshot_access_open, + + .bdrv_co_preadv_part = snapshot_access_co_preadv_part, + .bdrv_co_pwritev_part = snapshot_access_co_pwritev_part, + .bdrv_co_pwrite_zeroes = snapshot_access_co_pwrite_zeroes, + .bdrv_co_pdiscard = snapshot_access_co_pdiscard, + .bdrv_co_block_status = snapshot_access_co_block_status, + + .bdrv_refresh_filename = snapshot_access_refresh_filename, + + .bdrv_child_perm = snapshot_access_child_perm, +}; + +static void snapshot_access_init(void) +{ + bdrv_register(&bdrv_snapshot_access_drv); +} + +block_init(snapshot_access_init); diff --git a/block/snapshot.c b/block/snapshot.c index ccacda8bd5..e22ac3eac6 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -57,6 +57,8 @@ int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info, QEMUSnapshotInfo *sn_tab, *sn; int nb_sns, i, ret; + GLOBAL_STATE_CODE(); + ret = -ENOENT; nb_sns = bdrv_snapshot_list(bs, &sn_tab); if (nb_sns < 0) { @@ -105,6 +107,7 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs, bool ret = false; assert(id || name); + GLOBAL_STATE_CODE(); nb_sns = bdrv_snapshot_list(bs, &sn_tab); if (nb_sns < 0) { @@ -148,41 +151,29 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs, } /** - * Return a pointer to the child BDS pointer to which we can fall + * Return a pointer to child of given BDS to which we can fall * back if the given BDS does not support snapshots. * Return NULL if there is no BDS to (safely) fall back to. - * - * We need to return an indirect pointer because bdrv_snapshot_goto() - * has to modify the BdrvChild pointer. */ -static BdrvChild **bdrv_snapshot_fallback_ptr(BlockDriverState *bs) +static BdrvChild *bdrv_snapshot_fallback_child(BlockDriverState *bs) { - BdrvChild **fallback; + BdrvChild *fallback = bdrv_primary_child(bs); BdrvChild *child; - /* - * The only BdrvChild pointers that are safe to modify (and which - * we can thus return a reference to) are bs->file and - * bs->backing. - */ - fallback = &bs->file; - if (!*fallback && bs->drv && bs->drv->is_filter) { - fallback = &bs->backing; - } - - if (!*fallback) { + /* We allow fallback only to primary child */ + if (!fallback) { return NULL; } /* * Check that there are no other children that would need to be * snapshotted. If there are, it is not safe to fall back to - * *fallback. + * fallback. */ QLIST_FOREACH(child, &bs->children, next) { if (child->role & (BDRV_CHILD_DATA | BDRV_CHILD_METADATA | BDRV_CHILD_FILTERED) && - child != *fallback) + child != fallback) { return NULL; } @@ -193,13 +184,13 @@ static BdrvChild **bdrv_snapshot_fallback_ptr(BlockDriverState *bs) static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs) { - BdrvChild **child_ptr = bdrv_snapshot_fallback_ptr(bs); - return child_ptr ? (*child_ptr)->bs : NULL; + return child_bs(bdrv_snapshot_fallback_child(bs)); } int bdrv_can_snapshot(BlockDriverState *bs) { BlockDriver *drv = bs->drv; + GLOBAL_STATE_CODE(); if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { return 0; } @@ -220,6 +211,9 @@ int bdrv_snapshot_create(BlockDriverState *bs, { BlockDriver *drv = bs->drv; BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs); + + GLOBAL_STATE_CODE(); + if (!drv) { return -ENOMEDIUM; } @@ -237,9 +231,11 @@ int bdrv_snapshot_goto(BlockDriverState *bs, Error **errp) { BlockDriver *drv = bs->drv; - BdrvChild **fallback_ptr; + BdrvChild *fallback; int ret, open_ret; + GLOBAL_STATE_CODE(); + if (!drv) { error_setg(errp, "Block driver is closed"); return -ENOMEDIUM; @@ -258,13 +254,13 @@ int bdrv_snapshot_goto(BlockDriverState *bs, return ret; } - fallback_ptr = bdrv_snapshot_fallback_ptr(bs); - if (fallback_ptr) { + fallback = bdrv_snapshot_fallback_child(bs); + if (fallback) { QDict *options; QDict *file_options; Error *local_err = NULL; - BlockDriverState *fallback_bs = (*fallback_ptr)->bs; - char *subqdict_prefix = g_strdup_printf("%s.", (*fallback_ptr)->name); + BlockDriverState *fallback_bs = fallback->bs; + char *subqdict_prefix = g_strdup_printf("%s.", fallback->name); options = qdict_clone_shallow(bs->options); @@ -275,8 +271,8 @@ int bdrv_snapshot_goto(BlockDriverState *bs, qobject_unref(file_options); g_free(subqdict_prefix); - /* Force .bdrv_open() below to re-attach fallback_bs on *fallback_ptr */ - qdict_put_str(options, (*fallback_ptr)->name, + /* Force .bdrv_open() below to re-attach fallback_bs on fallback */ + qdict_put_str(options, fallback->name, bdrv_get_node_name(fallback_bs)); /* Now close bs, apply the snapshot on fallback_bs, and re-open bs */ @@ -285,8 +281,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* .bdrv_open() will re-attach it */ - bdrv_unref_child(bs, *fallback_ptr); - *fallback_ptr = NULL; + bdrv_unref_child(bs, fallback); ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); @@ -300,15 +295,12 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* - * fallback_ptr is &bs->file or &bs->backing. *fallback_ptr - * was closed above and set to NULL, but the .bdrv_open() call - * has opened it again, because we set the respective option - * (with the qdict_put_str() call above). - * Assert that .bdrv_open() has attached some child on - * *fallback_ptr, and that it has attached the one we wanted - * it to (i.e., fallback_bs). + * fallback was a primary child. It was closed above and set to NULL, + * but the .bdrv_open() call has opened it again, because we set the + * respective option (with the qdict_put_str() call above). + * Assert that .bdrv_open() has attached the right BDS as primary child. */ - assert(*fallback_ptr && fallback_bs == (*fallback_ptr)->bs); + assert(bdrv_primary_bs(bs) == fallback_bs); bdrv_unref(fallback_bs); return ret; } @@ -348,6 +340,8 @@ int bdrv_snapshot_delete(BlockDriverState *bs, BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs); int ret; + GLOBAL_STATE_CODE(); + if (!drv) { error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs)); return -ENOMEDIUM; @@ -380,6 +374,8 @@ int bdrv_snapshot_list(BlockDriverState *bs, { BlockDriver *drv = bs->drv; BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs); + + GLOBAL_STATE_CODE(); if (!drv) { return -ENOMEDIUM; } @@ -419,6 +415,8 @@ int bdrv_snapshot_load_tmp(BlockDriverState *bs, { BlockDriver *drv = bs->drv; + GLOBAL_STATE_CODE(); + if (!drv) { error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs)); return -ENOMEDIUM; @@ -447,6 +445,8 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs, int ret; Error *local_err = NULL; + GLOBAL_STATE_CODE(); + ret = bdrv_snapshot_load_tmp(bs, id_or_name, NULL, &local_err); if (ret == -ENOENT || ret == -EINVAL) { error_free(local_err); @@ -515,6 +515,8 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); + if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return false; } @@ -549,6 +551,8 @@ int bdrv_all_delete_snapshot(const char *name, g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); + if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return -1; } @@ -588,6 +592,8 @@ int bdrv_all_goto_snapshot(const char *name, g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); + if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return -1; } @@ -622,6 +628,8 @@ int bdrv_all_has_snapshot(const char *name, g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); + if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return -1; } @@ -663,6 +671,7 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn, { g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return -1; @@ -703,6 +712,8 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs, g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + GLOBAL_STATE_CODE(); + if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { return NULL; } diff --git a/block/ssh.c b/block/ssh.c index e0fbb4934b..04726d4ecb 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -108,7 +108,7 @@ static void ssh_state_free(BDRVSSHState *s) } } -static void GCC_FMT_ATTR(3, 4) +static void G_GNUC_PRINTF(3, 4) session_error_setg(Error **errp, BDRVSSHState *s, const char *fs, ...) { va_list args; @@ -133,7 +133,7 @@ session_error_setg(Error **errp, BDRVSSHState *s, const char *fs, ...) g_free(msg); } -static void GCC_FMT_ATTR(3, 4) +static void G_GNUC_PRINTF(3, 4) sftp_error_setg(Error **errp, BDRVSSHState *s, const char *fs, ...) { va_list args; @@ -386,14 +386,28 @@ static int compare_fingerprint(const unsigned char *fingerprint, size_t len, return *host_key_check - '\0'; } +static char *format_fingerprint(const unsigned char *fingerprint, size_t len) +{ + static const char *hex = "0123456789abcdef"; + char *ret = g_new0(char, (len * 2) + 1); + for (size_t i = 0; i < len; i++) { + ret[i * 2] = hex[((fingerprint[i] >> 4) & 0xf)]; + ret[(i * 2) + 1] = hex[(fingerprint[i] & 0xf)]; + } + ret[len * 2] = '\0'; + return ret; +} + static int check_host_key_hash(BDRVSSHState *s, const char *hash, - enum ssh_publickey_hash_type type, Error **errp) + enum ssh_publickey_hash_type type, const char *typestr, + Error **errp) { int r; ssh_key pubkey; unsigned char *server_hash; size_t server_hash_len; + const char *keytype; r = ssh_get_server_publickey(s->session, &pubkey); if (r != SSH_OK) { @@ -401,6 +415,8 @@ check_host_key_hash(BDRVSSHState *s, const char *hash, return -EINVAL; } + keytype = ssh_key_type_to_char(ssh_key_type(pubkey)); + r = ssh_get_publickey_hash(pubkey, type, &server_hash, &server_hash_len); ssh_key_free(pubkey); if (r != 0) { @@ -410,12 +426,16 @@ check_host_key_hash(BDRVSSHState *s, const char *hash, } r = compare_fingerprint(server_hash, server_hash_len, hash); - ssh_clean_pubkey_hash(&server_hash); if (r != 0) { - error_setg(errp, "remote host key does not match host_key_check '%s'", - hash); + g_autofree char *server_fp = format_fingerprint(server_hash, + server_hash_len); + error_setg(errp, "remote host %s key fingerprint '%s:%s' " + "does not match host_key_check '%s:%s'", + keytype, typestr, server_fp, typestr, hash); + ssh_clean_pubkey_hash(&server_hash); return -EPERM; } + ssh_clean_pubkey_hash(&server_hash); return 0; } @@ -436,13 +456,16 @@ static int check_host_key(BDRVSSHState *s, SshHostKeyCheck *hkc, Error **errp) case SSH_HOST_KEY_CHECK_MODE_HASH: if (hkc->u.hash.type == SSH_HOST_KEY_CHECK_HASH_TYPE_MD5) { return check_host_key_hash(s, hkc->u.hash.hash, - SSH_PUBLICKEY_HASH_MD5, errp); + SSH_PUBLICKEY_HASH_MD5, "md5", + errp); } else if (hkc->u.hash.type == SSH_HOST_KEY_CHECK_HASH_TYPE_SHA1) { return check_host_key_hash(s, hkc->u.hash.hash, - SSH_PUBLICKEY_HASH_SHA1, errp); + SSH_PUBLICKEY_HASH_SHA1, "sha1", + errp); } else if (hkc->u.hash.type == SSH_HOST_KEY_CHECK_HASH_TYPE_SHA256) { return check_host_key_hash(s, hkc->u.hash.hash, - SSH_PUBLICKEY_HASH_SHA256, errp); + SSH_PUBLICKEY_HASH_SHA256, "sha256", + errp); } g_assert_not_reached(); break; @@ -556,6 +579,11 @@ static bool ssh_process_legacy_options(QDict *output_opts, qdict_put_str(output_opts, "host-key-check.type", "sha1"); qdict_put_str(output_opts, "host-key-check.hash", &host_key_check[5]); + } else if (strncmp(host_key_check, "sha256:", 7) == 0) { + qdict_put_str(output_opts, "host-key-check.mode", "hash"); + qdict_put_str(output_opts, "host-key-check.type", "sha256"); + qdict_put_str(output_opts, "host-key-check.hash", + &host_key_check[7]); } else if (strcmp(host_key_check, "yes") == 0) { qdict_put_str(output_opts, "host-key-check.mode", "known_hosts"); } else { @@ -990,7 +1018,7 @@ static void restart_coroutine(void *opaque) AioContext *ctx = bdrv_get_aio_context(bs); trace_ssh_restart_coroutine(restart->co); - aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL); + aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL); aio_co_wake(restart->co); } @@ -1020,7 +1048,7 @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) trace_ssh_co_yield(s->sock, rd_handler, wr_handler); aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, - false, rd_handler, wr_handler, NULL, &restart); + false, rd_handler, wr_handler, NULL, NULL, &restart); qemu_coroutine_yield(); trace_ssh_co_yield_back(s->sock); } @@ -1101,9 +1129,9 @@ static coroutine_fn int ssh_co_readv(BlockDriverState *bs, return ret; } -static int ssh_write(BDRVSSHState *s, BlockDriverState *bs, - int64_t offset, size_t size, - QEMUIOVector *qiov) +static coroutine_fn int ssh_write(BDRVSSHState *s, BlockDriverState *bs, + int64_t offset, size_t size, + QEMUIOVector *qiov) { ssize_t r; size_t written; @@ -1168,7 +1196,6 @@ static coroutine_fn int ssh_co_writev(BlockDriverState *bs, BDRVSSHState *s = bs->opaque; int ret; - assert(!flags); qemu_co_mutex_lock(&s->lock); ret = ssh_write(s, bs, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, qiov); diff --git a/block/stream.c b/block/stream.c index 97bee482dc..694709bd25 100644 --- a/block/stream.c +++ b/block/stream.c @@ -33,6 +33,7 @@ enum { typedef struct StreamBlockJob { BlockJob common; + BlockBackend *blk; BlockDriverState *base_overlay; /* COW overlay (stream from this) */ BlockDriverState *above_base; /* Node directly above the base */ BlockDriverState *cor_filter_bs; @@ -54,8 +55,8 @@ static int stream_prepare(Job *job) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs); - BlockDriverState *base = bdrv_filter_or_cow_bs(s->above_base); - BlockDriverState *unfiltered_base = bdrv_skip_filters(base); + BlockDriverState *base; + BlockDriverState *unfiltered_base; Error *local_err = NULL; int ret = 0; @@ -63,6 +64,15 @@ static int stream_prepare(Job *job) bdrv_cor_filter_drop(s->cor_filter_bs); s->cor_filter_bs = NULL; + bdrv_subtree_drained_begin(s->above_base); + + base = bdrv_filter_or_cow_bs(s->above_base); + if (base) { + bdrv_ref(base); + } + + unfiltered_base = bdrv_skip_filters(base); + if (bdrv_cow_child(unfiltered_bs)) { const char *base_id = NULL, *base_fmt = NULL; if (unfiltered_base) { @@ -71,31 +81,39 @@ static int stream_prepare(Job *job) base_fmt = unfiltered_base->drv->format_name; } } + bdrv_set_backing_hd(unfiltered_bs, base, &local_err); ret = bdrv_change_backing_file(unfiltered_bs, base_id, base_fmt, false); if (local_err) { error_report_err(local_err); - return -EPERM; + ret = -EPERM; + goto out; } } +out: + if (base) { + bdrv_unref(base); + } + bdrv_subtree_drained_end(s->above_base); return ret; } static void stream_clean(Job *job) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); - BlockJob *bjob = &s->common; if (s->cor_filter_bs) { bdrv_cor_filter_drop(s->cor_filter_bs); s->cor_filter_bs = NULL; } + blk_unref(s->blk); + s->blk = NULL; + /* Reopen the image back in read-only mode if necessary */ if (s->bs_read_only) { /* Give up write permissions before making it read-only */ - blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); bdrv_reopen_set_read_only(s->target_bs, true, NULL); } @@ -105,7 +123,6 @@ static void stream_clean(Job *job) static int coroutine_fn stream_run(Job *job, Error **errp) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); - BlockBackend *blk = s->common.blk; BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs); int64_t len; int64_t offset = 0; @@ -156,7 +173,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp) } trace_stream_one_iteration(s, offset, n, ret); if (copy) { - ret = stream_populate(blk, offset, n); + ret = stream_populate(s->blk, offset, n); } if (ret < 0) { BlockErrorAction action = @@ -216,6 +233,8 @@ void stream_start(const char *job_id, BlockDriverState *bs, QDict *opts; int ret; + GLOBAL_STATE_CODE(); + assert(!(base && bottom)); assert(!(backing_file_str && bottom)); @@ -291,13 +310,24 @@ void stream_start(const char *job_id, BlockDriverState *bs, } s = block_job_create(job_id, &stream_job_driver, NULL, cor_filter_bs, - BLK_PERM_CONSISTENT_READ, - basic_flags | BLK_PERM_WRITE, + 0, BLK_PERM_ALL, speed, creation_flags, NULL, NULL, errp); if (!s) { goto fail; } + s->blk = blk_new_with_bs(cor_filter_bs, BLK_PERM_CONSISTENT_READ, + basic_flags | BLK_PERM_WRITE, errp); + if (!s->blk) { + goto fail; + } + /* + * Disable request queuing in the BlockBackend to avoid deadlocks on drain: + * The job reports that it's busy until it reaches a pause point. + */ + blk_set_disable_request_queuing(s->blk, true); + blk_set_allow_aio_context_change(s->blk, true); + /* * Prevent concurrent jobs trying to modify the graph structure here, we * already have our own plans. Also don't allow resize as the image size is diff --git a/block/throttle.c b/block/throttle.c index b685166ad4..131eba3ab4 100644 --- a/block/throttle.c +++ b/block/throttle.c @@ -78,11 +78,9 @@ static int throttle_open(BlockDriverState *bs, QDict *options, char *group; int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_write_flags = bs->file->bs->supported_write_flags | BDRV_REQ_WRITE_UNCHANGED; @@ -112,8 +110,9 @@ static int64_t throttle_getlength(BlockDriverState *bs) } static int coroutine_fn throttle_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; @@ -123,8 +122,9 @@ static int coroutine_fn throttle_co_preadv(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; throttle_group_co_io_limits_intercept(tgm, bytes, true); @@ -133,7 +133,7 @@ static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; @@ -143,7 +143,7 @@ static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { ThrottleGroupMember *tgm = bs->opaque; throttle_group_co_io_limits_intercept(tgm, bytes, true); @@ -152,15 +152,15 @@ static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov) { return throttle_co_pwritev(bs, offset, bytes, qiov, BDRV_REQ_WRITE_COMPRESSED); } -static int throttle_co_flush(BlockDriverState *bs) +static int coroutine_fn throttle_co_flush(BlockDriverState *bs) { return bdrv_co_flush(bs->file->bs); } diff --git a/block/trace-events b/block/trace-events index b3d2b1e62c..48dbf10c66 100644 --- a/block/trace-events +++ b/block/trace-events @@ -5,8 +5,8 @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" # block-backend.c -blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" -blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" +blk_co_preadv(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x" +blk_co_pwritev(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x" blk_root_attach(void *child, void *blk, void *bs) "child %p blk %p bs %p" blk_root_detach(void *child, void *blk, void *bs) "child %p blk %p bs %p" @@ -75,13 +75,13 @@ luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p l # qcow2.c qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t host_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu" -qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d" +qcow2_writev_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 qcow2_writev_done_req(void *co, int ret) "co %p ret %d" qcow2_writev_start_part(void *co) "co %p" qcow2_writev_done_part(void *co, int cur_bytes) "co %p cur_bytes %d" qcow2_writev_data(void *co, uint64_t offset) "co %p offset 0x%" PRIx64 -qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d" -qcow2_pwrite_zeroes(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d" +qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 +qcow2_pwrite_zeroes(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 qcow2_skip_cow(void *co, uint64_t offset, int nb_clusters) "co %p offset 0x%" PRIx64 " nb_clusters %d" # qcow2-cluster.c @@ -152,12 +152,12 @@ nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p off nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x" nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset 0x%"PRIx64" bytes %"PRId64" niov %d is_write %d" nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" ret %d" -nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64"" -nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d" +nvme_dsm(void *s, int64_t offset, int64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64"" +nvme_dsm_done(void *s, int64_t offset, int64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d" nvme_dma_map_flush(void *s) "s %p" nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u" -nvme_create_queue_pair(unsigned q_index, void *q, unsigned size, void *aio_context, int fd) "index %u q %p size %u aioctx %p fd %d" -nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p" +nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d" +nvme_free_queue_pair(unsigned q_index, void *q, void *cq, void *sq) "index %u q %p cq %p sq %p" nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d" nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64 nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d" @@ -172,6 +172,8 @@ nbd_read_reply_entry_fail(int ret, const char *err) "ret = %d, err: %s" nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s" nbd_client_handshake(const char *export_name) "export '%s'" nbd_client_handshake_success(const char *export_name) "export '%s'" +nbd_reconnect_attempt(unsigned in_flight) "in_flight %u" +nbd_reconnect_attempt_result(int ret, unsigned in_flight) "ret %d in_flight %u" # ssh.c ssh_restart_coroutine(void *co) "co=%p" diff --git a/block/vdi.c b/block/vdi.c index 548f8a057b..c0c111c4b9 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -64,6 +64,7 @@ #include "qemu/coroutine.h" #include "qemu/cutils.h" #include "qemu/uuid.h" +#include "qemu/memalign.h" /* Code configuration options. */ @@ -376,15 +377,14 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, int ret; QemuUUID uuid_link, uuid_parent; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } logout("\n"); - ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); + ret = bdrv_pread(bs->file, 0, sizeof(header), &header, 0); if (ret < 0) { goto fail; } @@ -484,8 +484,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } - ret = bdrv_pread(bs->file, header.offset_bmap, s->bmap, - bmap_size * SECTOR_SIZE); + ret = bdrv_pread(bs->file, header.offset_bmap, bmap_size * SECTOR_SIZE, + s->bmap, 0); if (ret < 0) { goto fail_free_bmap; } @@ -544,8 +544,8 @@ static int coroutine_fn vdi_co_block_status(BlockDriverState *bs, } static int coroutine_fn -vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vdi_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVdiState *s = bs->opaque; QEMUIOVector local_qiov; @@ -600,8 +600,8 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVdiState *s = bs->opaque; QEMUIOVector local_qiov; @@ -663,7 +663,8 @@ vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, * so this full-cluster write does not overlap a partial write * of the same cluster, issued from the "else" branch. */ - ret = bdrv_pwrite(bs->file, data_offset, block, s->block_size); + ret = bdrv_co_pwrite(bs->file, data_offset, s->block_size, block, + 0); qemu_co_rwlock_unlock(&s->bmap_lock); } else { nonallocating_write: @@ -708,7 +709,7 @@ nonallocating_write: assert(VDI_IS_ALLOCATED(bmap_first)); *header = s->header; vdi_header_to_le(header); - ret = bdrv_pwrite(bs->file, 0, header, sizeof(*header)); + ret = bdrv_co_pwrite(bs->file, 0, sizeof(*header), header, 0); g_free(header); if (ret < 0) { @@ -725,11 +726,11 @@ nonallocating_write: base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE; logout("will write %u block map sectors starting from entry %u\n", n_sectors, bmap_first); - ret = bdrv_pwrite(bs->file, offset * SECTOR_SIZE, base, - n_sectors * SECTOR_SIZE); + ret = bdrv_co_pwrite(bs->file, offset * SECTOR_SIZE, + n_sectors * SECTOR_SIZE, base, 0); } - return ret < 0 ? ret : 0; + return ret; } static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, @@ -844,7 +845,7 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, vdi_header_print(&header); } vdi_header_to_le(&header); - ret = blk_pwrite(blk, offset, &header, sizeof(header), 0); + ret = blk_co_pwrite(blk, offset, sizeof(header), &header, 0); if (ret < 0) { error_setg(errp, "Error writing header"); goto exit; @@ -865,7 +866,7 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, bmap[i] = VDI_UNALLOCATED; } } - ret = blk_pwrite(blk, offset, bmap, bmap_size, 0); + ret = blk_co_pwrite(blk, offset, bmap_size, bmap, 0); if (ret < 0) { error_setg(errp, "Error writing bmap"); goto exit; @@ -874,8 +875,8 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, } if (image_type == VDI_TYPE_STATIC) { - ret = blk_truncate(blk, offset + blocks * block_size, false, - PREALLOC_MODE_OFF, 0, errp); + ret = blk_co_truncate(blk, offset + blocks * block_size, false, + PREALLOC_MODE_OFF, 0, errp); if (ret < 0) { error_prepend(errp, "Failed to statically allocate file"); goto exit; diff --git a/block/vhdx-log.c b/block/vhdx-log.c index 7672161d95..0866897a85 100644 --- a/block/vhdx-log.c +++ b/block/vhdx-log.c @@ -23,6 +23,7 @@ #include "block/block_int.h" #include "qemu/error-report.h" #include "qemu/bswap.h" +#include "qemu/memalign.h" #include "vhdx.h" @@ -83,7 +84,7 @@ static int vhdx_log_peek_hdr(BlockDriverState *bs, VHDXLogEntries *log, offset = log->offset + read; - ret = bdrv_pread(bs->file, offset, hdr, sizeof(VHDXLogEntryHeader)); + ret = bdrv_pread(bs->file, offset, sizeof(VHDXLogEntryHeader), hdr, 0); if (ret < 0) { goto exit; } @@ -143,7 +144,7 @@ static int vhdx_log_read_sectors(BlockDriverState *bs, VHDXLogEntries *log, } offset = log->offset + read; - ret = bdrv_pread(bs->file, offset, buffer, VHDX_LOG_SECTOR_SIZE); + ret = bdrv_pread(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer, 0); if (ret < 0) { goto exit; } @@ -193,8 +194,8 @@ static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log, /* full */ break; } - ret = bdrv_pwrite(bs->file, offset, buffer_tmp, - VHDX_LOG_SECTOR_SIZE); + ret = bdrv_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp, + 0); if (ret < 0) { goto exit; } @@ -465,8 +466,8 @@ static int vhdx_log_flush_desc(BlockDriverState *bs, VHDXLogDescriptor *desc, /* count is only > 1 if we are writing zeroes */ for (i = 0; i < count; i++) { - ret = bdrv_pwrite_sync(bs->file, file_offset, buffer, - VHDX_LOG_SECTOR_SIZE); + ret = bdrv_pwrite_sync(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE, + buffer, 0); if (ret < 0) { goto exit; } @@ -969,8 +970,8 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, if (i == 0 && leading_length) { /* partial sector at the front of the buffer */ - ret = bdrv_pread(bs->file, file_offset, merged_sector, - VHDX_LOG_SECTOR_SIZE); + ret = bdrv_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE, + merged_sector, 0); if (ret < 0) { goto exit; } @@ -979,10 +980,9 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, sector_write = merged_sector; } else if (i == sectors - 1 && trailing_length) { /* partial sector at the end of the buffer */ - ret = bdrv_pread(bs->file, - file_offset, - merged_sector + trailing_length, - VHDX_LOG_SECTOR_SIZE - trailing_length); + ret = bdrv_pread(bs->file, file_offset + trailing_length, + VHDX_LOG_SECTOR_SIZE - trailing_length, + merged_sector + trailing_length, 0); if (ret < 0) { goto exit; } diff --git a/block/vhdx.c b/block/vhdx.c index 356ec4c455..bad9ca691b 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -25,6 +25,7 @@ #include "qemu/crc32c.h" #include "qemu/bswap.h" #include "qemu/error-report.h" +#include "qemu/memalign.h" #include "vhdx.h" #include "migration/blocker.h" #include "qemu/uuid.h" @@ -325,7 +326,7 @@ static int vhdx_write_header(BdrvChild *file, VHDXHeader *hdr, buffer = qemu_blockalign(bs_file, VHDX_HEADER_SIZE); if (read) { /* if true, we can't assume the extra reserved bytes are 0 */ - ret = bdrv_pread(file, offset, buffer, VHDX_HEADER_SIZE); + ret = bdrv_pread(file, offset, VHDX_HEADER_SIZE, buffer, 0); if (ret < 0) { goto exit; } @@ -339,7 +340,7 @@ static int vhdx_write_header(BdrvChild *file, VHDXHeader *hdr, vhdx_header_le_export(hdr, header_le); vhdx_update_checksum(buffer, VHDX_HEADER_SIZE, offsetof(VHDXHeader, checksum)); - ret = bdrv_pwrite_sync(file, offset, header_le, sizeof(VHDXHeader)); + ret = bdrv_pwrite_sync(file, offset, sizeof(VHDXHeader), header_le, 0); exit: qemu_vfree(buffer); @@ -439,8 +440,8 @@ static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s, /* We have to read the whole VHDX_HEADER_SIZE instead of * sizeof(VHDXHeader), because the checksum is over the whole * region */ - ret = bdrv_pread(bs->file, VHDX_HEADER1_OFFSET, buffer, - VHDX_HEADER_SIZE); + ret = bdrv_pread(bs->file, VHDX_HEADER1_OFFSET, VHDX_HEADER_SIZE, buffer, + 0); if (ret < 0) { goto fail; } @@ -456,8 +457,8 @@ static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s, } } - ret = bdrv_pread(bs->file, VHDX_HEADER2_OFFSET, buffer, - VHDX_HEADER_SIZE); + ret = bdrv_pread(bs->file, VHDX_HEADER2_OFFSET, VHDX_HEADER_SIZE, buffer, + 0); if (ret < 0) { goto fail; } @@ -530,8 +531,8 @@ static int vhdx_open_region_tables(BlockDriverState *bs, BDRVVHDXState *s) * whole block */ buffer = qemu_blockalign(bs, VHDX_HEADER_BLOCK_SIZE); - ret = bdrv_pread(bs->file, VHDX_REGION_TABLE_OFFSET, buffer, - VHDX_HEADER_BLOCK_SIZE); + ret = bdrv_pread(bs->file, VHDX_REGION_TABLE_OFFSET, + VHDX_HEADER_BLOCK_SIZE, buffer, 0); if (ret < 0) { goto fail; } @@ -643,8 +644,8 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s) buffer = qemu_blockalign(bs, VHDX_METADATA_TABLE_MAX_SIZE); - ret = bdrv_pread(bs->file, s->metadata_rt.file_offset, buffer, - VHDX_METADATA_TABLE_MAX_SIZE); + ret = bdrv_pread(bs->file, s->metadata_rt.file_offset, + VHDX_METADATA_TABLE_MAX_SIZE, buffer, 0); if (ret < 0) { goto exit; } @@ -749,8 +750,9 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s) ret = bdrv_pread(bs->file, s->metadata_entries.file_parameters_entry.offset + s->metadata_rt.file_offset, + sizeof(s->params), &s->params, - sizeof(s->params)); + 0); if (ret < 0) { goto exit; @@ -784,24 +786,27 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s) ret = bdrv_pread(bs->file, s->metadata_entries.virtual_disk_size_entry.offset + s->metadata_rt.file_offset, + sizeof(uint64_t), &s->virtual_disk_size, - sizeof(uint64_t)); + 0); if (ret < 0) { goto exit; } ret = bdrv_pread(bs->file, s->metadata_entries.logical_sector_size_entry.offset + s->metadata_rt.file_offset, + sizeof(uint32_t), &s->logical_sector_size, - sizeof(uint32_t)); + 0); if (ret < 0) { goto exit; } ret = bdrv_pread(bs->file, s->metadata_entries.phys_sector_size_entry.offset + s->metadata_rt.file_offset, + sizeof(uint32_t), &s->physical_sector_size, - sizeof(uint32_t)); + 0); if (ret < 0) { goto exit; } @@ -996,10 +1001,9 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, uint64_t signature; Error *local_err = NULL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } s->bat = NULL; @@ -1009,7 +1013,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, QLIST_INIT(&s->regions); /* validate the file signature */ - ret = bdrv_pread(bs->file, 0, &signature, sizeof(uint64_t)); + ret = bdrv_pread(bs->file, 0, sizeof(uint64_t), &signature, 0); if (ret < 0) { goto fail; } @@ -1068,7 +1072,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } - ret = bdrv_pread(bs->file, s->bat_offset, s->bat, s->bat_rt.length); + ret = bdrv_pread(bs->file, s->bat_offset, s->bat_rt.length, s->bat, 0); if (ret < 0) { goto fail; } @@ -1337,7 +1341,6 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num, uint64_t bat_prior_offset = 0; bool bat_update = false; - assert(!flags); qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); @@ -1660,13 +1663,13 @@ static int vhdx_create_new_metadata(BlockBackend *blk, VHDX_META_FLAGS_IS_VIRTUAL_DISK; vhdx_metadata_entry_le_export(&md_table_entry[4]); - ret = blk_pwrite(blk, metadata_offset, buffer, VHDX_HEADER_BLOCK_SIZE, 0); + ret = blk_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0); if (ret < 0) { goto exit; } - ret = blk_pwrite(blk, metadata_offset + (64 * KiB), entry_buffer, - VHDX_METADATA_ENTRY_BUFFER_SIZE, 0); + ret = blk_pwrite(blk, metadata_offset + (64 * KiB), + VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0); if (ret < 0) { goto exit; } @@ -1751,7 +1754,7 @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s, s->bat[sinfo.bat_idx] = cpu_to_le64(s->bat[sinfo.bat_idx]); sector_num += s->sectors_per_block; } - ret = blk_pwrite(blk, file_offset, s->bat, length, 0); + ret = blk_pwrite(blk, file_offset, length, s->bat, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write the BAT"); goto exit; @@ -1855,15 +1858,15 @@ static int vhdx_create_new_region_table(BlockBackend *blk, } /* Now write out the region headers to disk */ - ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, buffer, - VHDX_HEADER_BLOCK_SIZE, 0); + ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE, + buffer, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write first region table"); goto exit; } - ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, buffer, - VHDX_HEADER_BLOCK_SIZE, 0); + ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE, + buffer, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write second region table"); goto exit; @@ -2007,15 +2010,15 @@ static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts, creator = g_utf8_to_utf16("QEMU v" QEMU_VERSION, -1, NULL, &creator_items, NULL); signature = cpu_to_le64(VHDX_FILE_SIGNATURE); - ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET, &signature, sizeof(signature), - 0); + ret = blk_co_pwrite(blk, VHDX_FILE_ID_OFFSET, sizeof(signature), &signature, + 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write file signature"); goto delete_and_exit; } if (creator) { - ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET + sizeof(signature), - creator, creator_items * sizeof(gunichar2), 0); + ret = blk_co_pwrite(blk, VHDX_FILE_ID_OFFSET + sizeof(signature), + creator_items * sizeof(gunichar2), creator, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to write creator field"); goto delete_and_exit; diff --git a/block/vmdk.c b/block/vmdk.c index 4499f136bd..26376352b9 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -33,6 +33,7 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qemu/bswap.h" +#include "qemu/memalign.h" #include "migration/blocker.h" #include "qemu/cutils.h" #include @@ -60,6 +61,7 @@ #define VMDK_ZEROED (-3) #define BLOCK_OPT_ZEROED_GRAIN "zeroed_grain" +#define BLOCK_OPT_TOOLSVERSION "toolsversion" typedef struct { uint32_t version; @@ -176,6 +178,10 @@ typedef struct BDRVVmdkState { char *create_type; } BDRVVmdkState; +typedef struct BDRVVmdkReopenState { + bool *extents_using_bs_file; +} BDRVVmdkReopenState; + typedef struct VmdkMetaData { unsigned int l1_index; unsigned int l2_index; @@ -301,7 +307,7 @@ static int vmdk_read_cid(BlockDriverState *bs, int parent, uint32_t *pcid) int ret; desc = g_malloc0(DESC_SIZE); - ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE); + ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0); if (ret < 0) { goto out; } @@ -342,7 +348,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) desc = g_malloc0(DESC_SIZE); tmp_desc = g_malloc0(DESC_SIZE); - ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE); + ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0); if (ret < 0) { goto out; } @@ -362,7 +368,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) pstrcat(desc, DESC_SIZE, tmp_desc); } - ret = bdrv_pwrite_sync(bs->file, s->desc_offset, desc, DESC_SIZE); + ret = bdrv_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0); out: g_free(desc); @@ -398,15 +404,63 @@ static int vmdk_is_cid_valid(BlockDriverState *bs) return 1; } -/* We have nothing to do for VMDK reopen, stubs just return success */ static int vmdk_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { + BDRVVmdkState *s; + BDRVVmdkReopenState *rs; + int i; + assert(state != NULL); assert(state->bs != NULL); + assert(state->opaque == NULL); + + s = state->bs->opaque; + + rs = g_new0(BDRVVmdkReopenState, 1); + state->opaque = rs; + + /* + * Check whether there are any extents stored in bs->file; if bs->file + * changes, we will need to update their .file pointers to follow suit + */ + rs->extents_using_bs_file = g_new(bool, s->num_extents); + for (i = 0; i < s->num_extents; i++) { + rs->extents_using_bs_file[i] = s->extents[i].file == state->bs->file; + } + return 0; } +static void vmdk_reopen_clean(BDRVReopenState *state) +{ + BDRVVmdkReopenState *rs = state->opaque; + + g_free(rs->extents_using_bs_file); + g_free(rs); + state->opaque = NULL; +} + +static void vmdk_reopen_commit(BDRVReopenState *state) +{ + BDRVVmdkState *s = state->bs->opaque; + BDRVVmdkReopenState *rs = state->opaque; + int i; + + for (i = 0; i < s->num_extents; i++) { + if (rs->extents_using_bs_file[i]) { + s->extents[i].file = state->bs->file; + } + } + + vmdk_reopen_clean(state); +} + +static void vmdk_reopen_abort(BDRVReopenState *state) +{ + vmdk_reopen_clean(state); +} + static int vmdk_parent_open(BlockDriverState *bs) { char *p_name; @@ -415,11 +469,10 @@ static int vmdk_parent_open(BlockDriverState *bs) int ret; desc = g_malloc0(DESC_SIZE + 1); - ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE); + ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0); if (ret < 0) { goto out; } - ret = 0; p_name = strstr(desc, "parentFileNameHint"); if (p_name != NULL) { @@ -535,10 +588,8 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent, return -ENOMEM; } - ret = bdrv_pread(extent->file, - extent->l1_table_offset, - extent->l1_table, - l1_size); + ret = bdrv_pread(extent->file, extent->l1_table_offset, l1_size, + extent->l1_table, 0); if (ret < 0) { bdrv_refresh_filename(extent->file->bs); error_setg_errno(errp, -ret, @@ -562,10 +613,8 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent, ret = -ENOMEM; goto fail_l1; } - ret = bdrv_pread(extent->file, - extent->l1_backup_table_offset, - extent->l1_backup_table, - l1_size); + ret = bdrv_pread(extent->file, extent->l1_backup_table_offset, + l1_size, extent->l1_backup_table, 0); if (ret < 0) { bdrv_refresh_filename(extent->file->bs); error_setg_errno(errp, -ret, @@ -597,7 +646,7 @@ static int vmdk_open_vmfs_sparse(BlockDriverState *bs, VMDK3Header header; VmdkExtent *extent = NULL; - ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header)); + ret = bdrv_pread(file, sizeof(magic), sizeof(header), &header, 0); if (ret < 0) { bdrv_refresh_filename(file->bs); error_setg_errno(errp, -ret, @@ -761,7 +810,7 @@ static int vmdk_open_se_sparse(BlockDriverState *bs, assert(sizeof(const_header) == SECTOR_SIZE); - ret = bdrv_pread(file, 0, &const_header, sizeof(const_header)); + ret = bdrv_pread(file, 0, sizeof(const_header), &const_header, 0); if (ret < 0) { bdrv_refresh_filename(file->bs); error_setg_errno(errp, -ret, @@ -778,9 +827,8 @@ static int vmdk_open_se_sparse(BlockDriverState *bs, assert(sizeof(volatile_header) == SECTOR_SIZE); - ret = bdrv_pread(file, - const_header.volatile_header_offset * SECTOR_SIZE, - &volatile_header, sizeof(volatile_header)); + ret = bdrv_pread(file, const_header.volatile_header_offset * SECTOR_SIZE, + sizeof(volatile_header), &volatile_header, 0); if (ret < 0) { bdrv_refresh_filename(file->bs); error_setg_errno(errp, -ret, @@ -850,13 +898,13 @@ static char *vmdk_read_desc(BdrvChild *file, uint64_t desc_offset, Error **errp) size = MIN(size, (1 << 20) - 1); /* avoid unbounded allocation */ buf = g_malloc(size + 1); - ret = bdrv_pread(file, desc_offset, buf, size); + ret = bdrv_pread(file, desc_offset, size, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read from file"); g_free(buf); return NULL; } - buf[ret] = 0; + buf[size] = 0; return buf; } @@ -874,7 +922,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs, int64_t l1_backup_offset = 0; bool compressed; - ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header)); + ret = bdrv_pread(file, sizeof(magic), sizeof(header), &header, 0); if (ret < 0) { bdrv_refresh_filename(file->bs); error_setg_errno(errp, -ret, @@ -925,9 +973,8 @@ static int vmdk_open_vmdk4(BlockDriverState *bs, } QEMU_PACKED eos_marker; } QEMU_PACKED footer; - ret = bdrv_pread(file, - bs->file->bs->total_sectors * 512 - 1536, - &footer, sizeof(footer)); + ret = bdrv_pread(file, bs->file->bs->total_sectors * 512 - 1536, + sizeof(footer), &footer, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read footer"); return ret; @@ -1261,10 +1308,9 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, BDRVVmdkState *s = bs->opaque; uint32_t magic; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } buf = vmdk_read_desc(bs->file, 0, errp); @@ -1357,13 +1403,13 @@ static void vmdk_refresh_limits(BlockDriverState *bs, Error **errp) * [@skip_start_sector, @skip_end_sector) is not copied or written, and leave * it for call to write user data in the request. */ -static int get_whole_cluster(BlockDriverState *bs, - VmdkExtent *extent, - uint64_t cluster_offset, - uint64_t offset, - uint64_t skip_start_bytes, - uint64_t skip_end_bytes, - bool zeroed) +static int coroutine_fn get_whole_cluster(BlockDriverState *bs, + VmdkExtent *extent, + uint64_t cluster_offset, + uint64_t offset, + uint64_t skip_start_bytes, + uint64_t skip_end_bytes, + bool zeroed) { int ret = VMDK_OK; int64_t cluster_bytes; @@ -1394,16 +1440,16 @@ static int get_whole_cluster(BlockDriverState *bs, if (copy_from_backing) { /* qcow2 emits this on bs->file instead of bs->backing */ BLKDBG_EVENT(extent->file, BLKDBG_COW_READ); - ret = bdrv_pread(bs->backing, offset, whole_grain, - skip_start_bytes); + ret = bdrv_co_pread(bs->backing, offset, skip_start_bytes, + whole_grain, 0); if (ret < 0) { ret = VMDK_ERROR; goto exit; } } BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE); - ret = bdrv_pwrite(extent->file, cluster_offset, whole_grain, - skip_start_bytes); + ret = bdrv_co_pwrite(extent->file, cluster_offset, skip_start_bytes, + whole_grain, 0); if (ret < 0) { ret = VMDK_ERROR; goto exit; @@ -1414,18 +1460,18 @@ static int get_whole_cluster(BlockDriverState *bs, if (copy_from_backing) { /* qcow2 emits this on bs->file instead of bs->backing */ BLKDBG_EVENT(extent->file, BLKDBG_COW_READ); - ret = bdrv_pread(bs->backing, offset + skip_end_bytes, - whole_grain + skip_end_bytes, - cluster_bytes - skip_end_bytes); + ret = bdrv_co_pread(bs->backing, offset + skip_end_bytes, + cluster_bytes - skip_end_bytes, + whole_grain + skip_end_bytes, 0); if (ret < 0) { ret = VMDK_ERROR; goto exit; } } BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE); - ret = bdrv_pwrite(extent->file, cluster_offset + skip_end_bytes, - whole_grain + skip_end_bytes, - cluster_bytes - skip_end_bytes); + ret = bdrv_co_pwrite(extent->file, cluster_offset + skip_end_bytes, + cluster_bytes - skip_end_bytes, + whole_grain + skip_end_bytes, 0); if (ret < 0) { ret = VMDK_ERROR; goto exit; @@ -1438,29 +1484,29 @@ exit: return ret; } -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, - uint32_t offset) +static int coroutine_fn vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, + uint32_t offset) { offset = cpu_to_le32(offset); /* update L2 table */ BLKDBG_EVENT(extent->file, BLKDBG_L2_UPDATE); - if (bdrv_pwrite(extent->file, - ((int64_t)m_data->l2_offset * 512) - + (m_data->l2_index * sizeof(offset)), - &offset, sizeof(offset)) < 0) { + if (bdrv_co_pwrite(extent->file, + ((int64_t)m_data->l2_offset * 512) + + (m_data->l2_index * sizeof(offset)), + sizeof(offset), &offset, 0) < 0) { return VMDK_ERROR; } /* update backup L2 table */ if (extent->l1_backup_table_offset != 0) { m_data->l2_offset = extent->l1_backup_table[m_data->l1_index]; - if (bdrv_pwrite(extent->file, - ((int64_t)m_data->l2_offset * 512) - + (m_data->l2_index * sizeof(offset)), - &offset, sizeof(offset)) < 0) { + if (bdrv_co_pwrite(extent->file, + ((int64_t)m_data->l2_offset * 512) + + (m_data->l2_index * sizeof(offset)), + sizeof(offset), &offset, 0) < 0) { return VMDK_ERROR; } } - if (bdrv_flush(extent->file->bs) < 0) { + if (bdrv_co_flush(extent->file->bs) < 0) { return VMDK_ERROR; } if (m_data->l2_cache_entry) { @@ -1490,14 +1536,14 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, * VMDK_UNALLOC if cluster is not mapped and @allocate is false. * VMDK_ERROR if failed. */ -static int get_cluster_offset(BlockDriverState *bs, - VmdkExtent *extent, - VmdkMetaData *m_data, - uint64_t offset, - bool allocate, - uint64_t *cluster_offset, - uint64_t skip_start_bytes, - uint64_t skip_end_bytes) +static int coroutine_fn get_cluster_offset(BlockDriverState *bs, + VmdkExtent *extent, + VmdkMetaData *m_data, + uint64_t offset, + bool allocate, + uint64_t *cluster_offset, + uint64_t skip_start_bytes, + uint64_t skip_end_bytes) { unsigned int l1_index, l2_offset, l2_index; int min_index, i, j; @@ -1577,11 +1623,11 @@ static int get_cluster_offset(BlockDriverState *bs, } l2_table = (char *)extent->l2_cache + (min_index * l2_size_bytes); BLKDBG_EVENT(extent->file, BLKDBG_L2_LOAD); - if (bdrv_pread(extent->file, + if (bdrv_co_pread(extent->file, (int64_t)l2_offset * 512, - l2_table, - l2_size_bytes - ) != l2_size_bytes) { + l2_size_bytes, + l2_table, 0 + ) < 0) { return VMDK_ERROR; } @@ -1739,10 +1785,11 @@ static int coroutine_fn vmdk_co_block_status(BlockDriverState *bs, return ret; } -static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, - int64_t offset_in_cluster, QEMUIOVector *qiov, - uint64_t qiov_offset, uint64_t n_bytes, - uint64_t offset) +static int coroutine_fn +vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, + int64_t offset_in_cluster, QEMUIOVector *qiov, + uint64_t qiov_offset, uint64_t n_bytes, + uint64_t offset) { int ret; VmdkGrainMarker *data = NULL; @@ -1820,9 +1867,10 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, return ret; } -static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, - int64_t offset_in_cluster, QEMUIOVector *qiov, - int bytes) +static int coroutine_fn +vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, + int64_t offset_in_cluster, QEMUIOVector *qiov, + int bytes) { int ret; int cluster_bytes, buf_bytes; @@ -1849,9 +1897,8 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, cluster_buf = g_malloc(buf_bytes); uncomp_buf = g_malloc(cluster_bytes); BLKDBG_EVENT(extent->file, BLKDBG_READ_COMPRESSED); - ret = bdrv_pread(extent->file, - cluster_offset, - cluster_buf, buf_bytes); + ret = bdrv_co_pread(extent->file, cluster_offset, buf_bytes, cluster_buf, + 0); if (ret < 0) { goto out; } @@ -1888,8 +1935,8 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, } static int coroutine_fn -vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVmdkState *s = bs->opaque; int ret; @@ -1969,9 +2016,9 @@ fail: * * Returns: error code with 0 for success. */ -static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - bool zeroed, bool zero_dry_run) +static int coroutine_fn vmdk_pwritev(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, QEMUIOVector *qiov, + bool zeroed, bool zero_dry_run) { BDRVVmdkState *s = bs->opaque; VmdkExtent *extent = NULL; @@ -2068,8 +2115,8 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, } static int coroutine_fn -vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vmdk_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVmdkState *s = bs->opaque; @@ -2080,8 +2127,8 @@ vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov) +vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov) { if (bytes == 0) { /* The caller will write bytes 0 to signal EOF. @@ -2096,8 +2143,8 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, return length; } length = QEMU_ALIGN_UP(length, BDRV_SECTOR_SIZE); - ret = bdrv_truncate(s->extents[i].file, length, false, - PREALLOC_MODE_OFF, 0, NULL); + ret = bdrv_co_truncate(s->extents[i].file, length, false, + PREALLOC_MODE_OFF, 0, NULL); if (ret < 0) { return ret; } @@ -2109,7 +2156,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, static int coroutine_fn vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { int ret; @@ -2190,12 +2237,12 @@ static int vmdk_init_extent(BlockBackend *blk, header.check_bytes[3] = 0xa; /* write all the data */ - ret = blk_pwrite(blk, 0, &magic, sizeof(magic), 0); + ret = blk_pwrite(blk, 0, sizeof(magic), &magic, 0); if (ret < 0) { error_setg(errp, QERR_IO_ERROR); goto exit; } - ret = blk_pwrite(blk, sizeof(magic), &header, sizeof(header), 0); + ret = blk_pwrite(blk, sizeof(magic), sizeof(header), &header, 0); if (ret < 0) { error_setg(errp, QERR_IO_ERROR); goto exit; @@ -2215,7 +2262,7 @@ static int vmdk_init_extent(BlockBackend *blk, gd_buf[i] = cpu_to_le32(tmp); } ret = blk_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE, - gd_buf, gd_buf_size, 0); + gd_buf_size, gd_buf, 0); if (ret < 0) { error_setg(errp, QERR_IO_ERROR); goto exit; @@ -2227,7 +2274,7 @@ static int vmdk_init_extent(BlockBackend *blk, gd_buf[i] = cpu_to_le32(tmp); } ret = blk_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE, - gd_buf, gd_buf_size, 0); + gd_buf_size, gd_buf, 0); if (ret < 0) { error_setg(errp, QERR_IO_ERROR); } @@ -2344,6 +2391,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, BlockdevVmdkAdapterType adapter_type, const char *backing_file, const char *hw_version, + const char *toolsversion, bool compat6, bool zeroed_grain, vmdk_create_extent_fn extent_fn, @@ -2384,7 +2432,8 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, "ddb.geometry.cylinders = \"%" PRId64 "\"\n" "ddb.geometry.heads = \"%" PRIu32 "\"\n" "ddb.geometry.sectors = \"63\"\n" - "ddb.adapterType = \"%s\"\n"; + "ddb.adapterType = \"%s\"\n" + "ddb.toolsVersion = \"%s\"\n"; ext_desc_lines = g_string_new(NULL); @@ -2401,6 +2450,9 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, if (!hw_version) { hw_version = "4"; } + if (!toolsversion) { + toolsversion = "2147483647"; + } if (adapter_type != BLOCKDEV_VMDK_ADAPTER_TYPE_IDE) { /* that's the number of heads with which vmware operates when @@ -2525,14 +2577,15 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, size / (int64_t)(63 * number_heads * BDRV_SECTOR_SIZE), number_heads, - BlockdevVmdkAdapterType_str(adapter_type)); + BlockdevVmdkAdapterType_str(adapter_type), + toolsversion); desc_len = strlen(desc); /* the descriptor offset = 0x200 */ if (!split && !flat) { desc_offset = 0x200; } - ret = blk_pwrite(blk, desc_offset, desc, desc_len, 0); + ret = blk_co_pwrite(blk, desc_offset, desc_len, desc, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write description"); goto exit; @@ -2540,7 +2593,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, /* bdrv_pwrite write padding zeros to align to sector, we don't need that * for description file */ if (desc_offset == 0) { - ret = blk_truncate(blk, desc_len, false, PREALLOC_MODE_OFF, 0, errp); + ret = blk_co_truncate(blk, desc_len, false, PREALLOC_MODE_OFF, 0, errp); if (ret < 0) { goto exit; } @@ -2617,6 +2670,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, BlockdevVmdkAdapterType adapter_type_enum; char *backing_file = NULL; char *hw_version = NULL; + char *toolsversion = NULL; char *fmt = NULL; BlockdevVmdkSubformat subformat; int ret = 0; @@ -2649,6 +2703,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE); backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); hw_version = qemu_opt_get_del(opts, BLOCK_OPT_HWVERSION); + toolsversion = qemu_opt_get_del(opts, BLOCK_OPT_TOOLSVERSION); compat6 = qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false); if (strcmp(hw_version, "undefined") == 0) { g_free(hw_version); @@ -2692,14 +2747,15 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, .opts = opts, }; ret = vmdk_co_do_create(total_size, subformat, adapter_type_enum, - backing_file, hw_version, compat6, zeroed_grain, - vmdk_co_create_opts_cb, &data, errp); + backing_file, hw_version, toolsversion, compat6, + zeroed_grain, vmdk_co_create_opts_cb, &data, errp); exit: g_free(backing_fmt); g_free(adapter_type); g_free(backing_file); g_free(hw_version); + g_free(toolsversion); g_free(fmt); g_free(desc); g_free(path); @@ -2782,6 +2838,7 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options, opts->adapter_type, opts->backing_file, opts->hwversion, + opts->toolsversion, false, opts->zeroed_grain, vmdk_co_create_cb, @@ -3031,6 +3088,11 @@ static QemuOptsList vmdk_create_opts = { .help = "VMDK hardware version", .def_value_str = "undefined" }, + { + .name = BLOCK_OPT_TOOLSVERSION, + .type = QEMU_OPT_STRING, + .help = "VMware guest tools version", + }, { .name = BLOCK_OPT_SUBFMT, .type = QEMU_OPT_STRING, @@ -3055,6 +3117,8 @@ static BlockDriver bdrv_vmdk = { .bdrv_open = vmdk_open, .bdrv_co_check = vmdk_co_check, .bdrv_reopen_prepare = vmdk_reopen_prepare, + .bdrv_reopen_commit = vmdk_reopen_commit, + .bdrv_reopen_abort = vmdk_reopen_abort, .bdrv_child_perm = bdrv_default_perms, .bdrv_co_preadv = vmdk_co_preadv, .bdrv_co_pwritev = vmdk_co_pwritev, diff --git a/block/vpc.c b/block/vpc.c index 17a705b482..95841f259a 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -33,6 +33,7 @@ #include "migration/blocker.h" #include "qemu/bswap.h" #include "qemu/uuid.h" +#include "qemu/memalign.h" #include "qapi/qmp/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" @@ -232,10 +233,9 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, int ret; int64_t bs_size; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } opts = qemu_opts_create(&vpc_runtime_opts, NULL, 0, &error_abort); @@ -251,7 +251,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } - ret = bdrv_pread(bs->file, 0, &s->footer, sizeof(s->footer)); + ret = bdrv_pread(bs->file, 0, sizeof(s->footer), &s->footer, 0); if (ret < 0) { error_setg(errp, "Unable to read VHD header"); goto fail; @@ -271,12 +271,13 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, } /* If a fixed disk, the footer is found only at the end of the file */ - ret = bdrv_pread(bs->file, offset - sizeof(*footer), - footer, sizeof(*footer)); + ret = bdrv_pread(bs->file, offset - sizeof(*footer), sizeof(*footer), + footer, 0); if (ret < 0) { goto fail; } - if (strncmp(footer->creator, "conectix", 8)) { + if (strncmp(footer->creator, "conectix", 8) || + be32_to_cpu(footer->type) != VHD_FIXED) { error_setg(errp, "invalid VPC image"); ret = -EINVAL; goto fail; @@ -345,7 +346,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, if (disk_type == VHD_DYNAMIC) { ret = bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), - &dyndisk_header, sizeof(dyndisk_header)); + sizeof(dyndisk_header), &dyndisk_header, 0); if (ret < 0) { error_setg(errp, "Error reading dynamic VHD header"); goto fail; @@ -399,8 +400,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, s->bat_offset = be64_to_cpu(dyndisk_header.table_offset); - ret = bdrv_pread(bs->file, s->bat_offset, s->pagetable, - pagetable_size); + ret = bdrv_pread(bs->file, s->bat_offset, pagetable_size, + s->pagetable, 0); if (ret < 0) { error_setg(errp, "Error reading pagetable"); goto fail; @@ -514,7 +515,8 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, s->last_bitmap_offset = bitmap_offset; memset(bitmap, 0xff, s->bitmap_size); - r = bdrv_pwrite_sync(bs->file, bitmap_offset, bitmap, s->bitmap_size); + r = bdrv_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap, + 0); if (r < 0) { *err = r; return -2; @@ -536,7 +538,7 @@ static int rewrite_footer(BlockDriverState *bs) BDRVVPCState *s = bs->opaque; int64_t offset = s->free_data_block_offset; - ret = bdrv_pwrite_sync(bs->file, offset, &s->footer, sizeof(s->footer)); + ret = bdrv_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0); if (ret < 0) return ret; @@ -570,8 +572,8 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset) /* Initialize the block's bitmap */ memset(bitmap, 0xff, s->bitmap_size); - ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset, bitmap, - s->bitmap_size); + ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset, + s->bitmap_size, bitmap, 0); if (ret < 0) { return ret; } @@ -585,7 +587,7 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset) /* Write BAT entry to disk */ bat_offset = s->bat_offset + (4 * index); bat_value = cpu_to_be32(s->pagetable[index]); - ret = bdrv_pwrite_sync(bs->file, bat_offset, &bat_value, 4); + ret = bdrv_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0); if (ret < 0) goto fail; @@ -608,8 +610,8 @@ static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) } static int coroutine_fn -vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vpc_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVPCState *s = bs->opaque; int ret; @@ -658,8 +660,8 @@ fail: } static int coroutine_fn -vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vpc_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVPCState *s = bs->opaque; int64_t image_offset; @@ -831,13 +833,13 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, block_size = 0x200000; num_bat_entries = DIV_ROUND_UP(total_sectors, block_size / 512); - ret = blk_pwrite(blk, offset, footer, sizeof(*footer), 0); + ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0); if (ret < 0) { goto fail; } offset = 1536 + ((num_bat_entries * 4 + 511) & ~511); - ret = blk_pwrite(blk, offset, footer, sizeof(*footer), 0); + ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0); if (ret < 0) { goto fail; } @@ -847,7 +849,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, memset(bat_sector, 0xFF, 512); for (i = 0; i < DIV_ROUND_UP(num_bat_entries * 4, 512); i++) { - ret = blk_pwrite(blk, offset, bat_sector, 512, 0); + ret = blk_pwrite(blk, offset, 512, bat_sector, 0); if (ret < 0) { goto fail; } @@ -875,7 +877,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, /* Write the header */ offset = 512; - ret = blk_pwrite(blk, offset, &dyndisk_header, sizeof(dyndisk_header), 0); + ret = blk_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0); if (ret < 0) { goto fail; } @@ -898,8 +900,8 @@ static int create_fixed_disk(BlockBackend *blk, VHDFooter *footer, return ret; } - ret = blk_pwrite(blk, total_size - sizeof(*footer), - footer, sizeof(*footer), 0); + ret = blk_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer), + footer, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to write VHD header"); return ret; diff --git a/block/vvfat.c b/block/vvfat.c index 34bf1e3a86..723c91216e 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include +#include #include "qapi/error.h" #include "block/block_int.h" #include "block/qdict.h" @@ -499,7 +500,7 @@ static bool valid_filename(const unsigned char *name) (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c > 127 || - strchr("$%'-_@~`!(){}^#&.+,;=[]", c) != NULL)) + strchr(" $%'-_@~`!(){}^#&.+,;=[]", c) != NULL)) { return false; } @@ -882,7 +883,7 @@ static int read_directory(BDRVVVFATState* s, int mapping_index) return 0; } -static inline uint32_t sector2cluster(BDRVVVFATState* s,off_t sector_num) +static inline int32_t sector2cluster(BDRVVVFATState* s,off_t sector_num) { return (sector_num - s->offset_to_root_dir) / s->sectors_per_cluster; } @@ -1230,6 +1231,7 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, dirname, cyls, heads, secs)); s->sector_count = cyls * heads * secs - s->offset_to_bootsector; + bs->total_sectors = cyls * heads * secs; if (qemu_opt_get_bool(opts, "rw", false)) { if (!bdrv_is_read_only(bs)) { @@ -1250,8 +1252,6 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, } } - bs->total_sectors = cyls * heads * secs; - if (init_directories(s, dirname, heads, secs, errp)) { ret = -EIO; goto fail; @@ -1279,8 +1279,18 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); - ret = 0; + qemu_opts_del(opts); + + return 0; + fail: + g_free(s->qcow_filename); + s->qcow_filename = NULL; + g_free(s->cluster_buffer); + s->cluster_buffer = NULL; + g_free(s->used_clusters); + s->used_clusters = NULL; + qemu_opts_del(opts); return ret; } @@ -1479,8 +1489,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num, DLOG(fprintf(stderr, "sectors %" PRId64 "+%" PRId64 " allocated\n", sector_num, n >> BDRV_SECTOR_BITS)); - if (bdrv_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, - buf + i * 0x200, n) < 0) { + if (bdrv_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, n, + buf + i * 0x200, 0) < 0) { return -1; } i += (n >> BDRV_SECTOR_BITS) - 1; @@ -1522,8 +1532,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num, } static int coroutine_fn -vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVVFATState *s = bs->opaque; @@ -1969,7 +1979,8 @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s, return -1; } res = bdrv_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE, - s->cluster_buffer, BDRV_SECTOR_SIZE); + BDRV_SECTOR_SIZE, s->cluster_buffer, + 0); if (res < 0) { return -2; } @@ -2716,13 +2727,9 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s) mapping_t* mapping; int j, parent_path_len; -#ifdef __MINGW32__ - if (mkdir(commit->path)) + if (g_mkdir(commit->path, 0755)) { return -5; -#else - if (mkdir(commit->path, 0755)) - return -5; -#endif + } mapping = insert_mapping(s, commit->param.mkdir.cluster, commit->param.mkdir.cluster + 1); @@ -2972,6 +2979,7 @@ static int vvfat_write(BlockDriverState *bs, int64_t sector_num, { BDRVVVFATState *s = bs->opaque; int i, ret; + int first_cluster, last_cluster; DLOG(checkpoint()); @@ -2982,17 +2990,52 @@ DLOG(checkpoint()); vvfat_close_current_file(s); + if (sector_num == s->offset_to_bootsector && nb_sectors == 1) { + /* + * Write on bootsector. Allow only changing the reserved1 field, + * used to mark volume dirtiness + */ + unsigned char *bootsector = s->first_sectors + + s->offset_to_bootsector * 0x200; + /* + * LATER TODO: if FAT32, this is wrong (see init_directories(), + * which always creates a FAT16 bootsector) + */ + const int reserved1_offset = offsetof(bootsector_t, u.fat16.reserved1); + + for (i = 0; i < 0x200; i++) { + if (i != reserved1_offset && bootsector[i] != buf[i]) { + fprintf(stderr, "Tried to write to protected bootsector\n"); + return -1; + } + } + + /* Update bootsector with the only updatable byte, and return success */ + bootsector[reserved1_offset] = buf[reserved1_offset]; + return 0; + } + /* * Some sanity checks: * - do not allow writing to the boot sector */ - if (sector_num < s->offset_to_fat) return -1; - for (i = sector2cluster(s, sector_num); - i <= sector2cluster(s, sector_num + nb_sectors - 1);) { - mapping_t* mapping = find_mapping_for_cluster(s, i); + /* + * Values will be negative for writes to the FAT, which is located before + * the root directory. + */ + first_cluster = sector2cluster(s, sector_num); + last_cluster = sector2cluster(s, sector_num + nb_sectors - 1); + + for (i = first_cluster; i <= last_cluster;) { + mapping_t *mapping = NULL; + + if (i >= 0) { + mapping = find_mapping_for_cluster(s, i); + } + if (mapping) { if (mapping->read_only) { fprintf(stderr, "Tried to write to write-protected file %s\n", @@ -3032,25 +3075,27 @@ DLOG(checkpoint()); } } i = mapping->end; - } else + } else { i++; + } } /* * Use qcow backend. Commit later. */ DLOG(fprintf(stderr, "Write to qcow backend: %d + %d\n", (int)sector_num, nb_sectors)); - ret = bdrv_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE, buf, - nb_sectors * BDRV_SECTOR_SIZE); + ret = bdrv_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE, + nb_sectors * BDRV_SECTOR_SIZE, buf, 0); if (ret < 0) { fprintf(stderr, "Error writing to qcow backend\n"); return ret; } - for (i = sector2cluster(s, sector_num); - i <= sector2cluster(s, sector_num + nb_sectors - 1); i++) - if (i >= 0) + for (i = first_cluster; i <= last_cluster; i++) { + if (i >= 0) { s->used_clusters[i] |= USED_ALLOCATED; + } + } DLOG(checkpoint()); /* TODO: add timeout */ @@ -3061,8 +3106,8 @@ DLOG(checkpoint()); } static int coroutine_fn -vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVVFATState *s = bs->opaque; @@ -3118,14 +3163,13 @@ static int enable_write_target(BlockDriverState *bs, Error **errp) int size = sector2cluster(s, s->sector_count); QDict *options; - s->used_clusters = calloc(size, 1); + s->used_clusters = g_malloc0(size); array_init(&(s->commits), sizeof(commit_t)); - s->qcow_filename = g_malloc(PATH_MAX); - ret = get_tmp_filename(s->qcow_filename, PATH_MAX); - if (ret < 0) { - error_setg_errno(errp, -ret, "can't create temporary file"); + s->qcow_filename = create_tmp_file(errp); + if (!s->qcow_filename) { + ret = -ENOENT; goto err; } @@ -3137,8 +3181,8 @@ static int enable_write_target(BlockDriverState *bs, Error **errp) } opts = qemu_opts_create(bdrv_qcow->create_opts, NULL, 0, &error_abort); - qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s->sector_count * 512, - &error_abort); + qemu_opt_set_number(opts, BLOCK_OPT_SIZE, + bs->total_sectors * BDRV_SECTOR_SIZE, &error_abort); qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, "fat:", &error_abort); ret = bdrv_create(bdrv_qcow, s->qcow_filename, opts, errp); @@ -3166,8 +3210,6 @@ static int enable_write_target(BlockDriverState *bs, Error **errp) return 0; err: - g_free(s->qcow_filename); - s->qcow_filename = NULL; return ret; } diff --git a/block/win32-aio.c b/block/win32-aio.c index b7221a272f..aadc7b1bc3 100644 --- a/block/win32-aio.c +++ b/block/win32-aio.c @@ -29,6 +29,7 @@ #include "block/raw-aio.h" #include "qemu/event_notifier.h" #include "qemu/iov.h" +#include "qemu/memalign.h" #include #include @@ -172,7 +173,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile) void win32_aio_detach_aio_context(QEMUWin32AIOState *aio, AioContext *old_context) { - aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL); + aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL); aio->aio_ctx = NULL; } @@ -181,7 +182,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio, { aio->aio_ctx = new_context; aio_set_event_notifier(new_context, &aio->e, false, - win32_aio_completion_cb, NULL); + win32_aio_completion_cb, NULL, NULL); } QEMUWin32AIOState *win32_aio_init(void) diff --git a/blockdev-nbd.c b/blockdev-nbd.c index bdfa7ed3a5..012256bb02 100644 --- a/blockdev-nbd.c +++ b/blockdev-nbd.c @@ -30,18 +30,23 @@ typedef struct NBDServerData { } NBDServerData; static NBDServerData *nbd_server; -static bool is_qemu_nbd; +static int qemu_nbd_connections = -1; /* Non-negative if this is qemu-nbd */ static void nbd_update_server_watch(NBDServerData *s); -void nbd_server_is_qemu_nbd(bool value) +void nbd_server_is_qemu_nbd(int max_connections) { - is_qemu_nbd = value; + qemu_nbd_connections = max_connections; } bool nbd_server_is_running(void) { - return nbd_server || is_qemu_nbd; + return nbd_server || qemu_nbd_connections >= 0; +} + +int nbd_server_max_connections(void) +{ + return nbd_server ? nbd_server->max_connections : qemu_nbd_connections; } static void nbd_blockdev_client_closed(NBDClient *client, bool ignored) @@ -148,12 +153,6 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds, if (!nbd_server->tlscreds) { goto error; } - - /* TODO SOCKET_ADDRESS_TYPE_FD where fd has AF_INET or AF_INET6 */ - if (addr->type != SOCKET_ADDRESS_TYPE_INET) { - error_setg(errp, "TLS is only supported with IPv4/IPv6"); - goto error; - } } nbd_server->tlsauthz = g_strdup(tls_authz); @@ -217,8 +216,14 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp) QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd, qapi_NbdServerAddOptions_base(arg)); if (arg->has_bitmap) { + BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1); + + *el = (BlockDirtyBitmapOrStr) { + .type = QTYPE_QSTRING, + .u.local = g_strdup(arg->bitmap), + }; export_opts->u.nbd.has_bitmaps = true; - QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, g_strdup(arg->bitmap)); + QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, el); } /* diff --git a/blockdev.c b/blockdev.c index e6d0c0f305..0f0529d330 100644 --- a/blockdev.c +++ b/blockdev.c @@ -56,7 +56,6 @@ #include "sysemu/iothread.h" #include "block/block_int.h" #include "block/trace.h" -#include "sysemu/arch_init.h" #include "sysemu/runstate.h" #include "sysemu/replay.h" #include "qemu/cutils.h" @@ -64,11 +63,13 @@ #include "qemu/main-loop.h" #include "qemu/throttle-options.h" +/* Protected by BQL */ QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states = QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states); void bdrv_set_monitor_owned(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list); } @@ -112,6 +113,8 @@ void override_max_devs(BlockInterfaceType type, int max_devs) BlockBackend *blk; DriveInfo *dinfo; + GLOBAL_STATE_CODE(); + if (max_devs <= 0) { return; } @@ -141,20 +144,30 @@ void blockdev_mark_auto_del(BlockBackend *blk) DriveInfo *dinfo = blk_legacy_dinfo(blk); BlockJob *job; + GLOBAL_STATE_CODE(); + if (!dinfo) { return; } - for (job = block_job_next(NULL); job; job = block_job_next(job)) { - if (block_job_has_bdrv(job, blk_bs(blk))) { - AioContext *aio_context = job->job.aio_context; - aio_context_acquire(aio_context); + JOB_LOCK_GUARD(); - job_cancel(&job->job, false); - - aio_context_release(aio_context); + do { + job = block_job_next_locked(NULL); + while (job && (job->job.cancelled || + job->job.deferred_to_main_loop || + !block_job_has_bdrv(job, blk_bs(blk)))) + { + job = block_job_next_locked(job); } - } + if (job) { + /* + * This drops the job lock temporarily and polls, so we need to + * restart processing the list from the start after this. + */ + job_cancel_locked(&job->job, false); + } + } while (job); dinfo->auto_del = 1; } @@ -162,6 +175,7 @@ void blockdev_mark_auto_del(BlockBackend *blk) void blockdev_auto_del(BlockBackend *blk) { DriveInfo *dinfo = blk_legacy_dinfo(blk); + GLOBAL_STATE_CODE(); if (dinfo && dinfo->auto_del) { monitor_remove_blk(blk); @@ -169,23 +183,6 @@ void blockdev_auto_del(BlockBackend *blk) } } -/** - * Returns the current mapping of how many units per bus - * a particular interface can support. - * - * A positive integer indicates n units per bus. - * 0 implies the mapping has not been established. - * -1 indicates an invalid BlockInterfaceType was given. - */ -int drive_get_max_devs(BlockInterfaceType type) -{ - if (type >= IF_IDE && type < IF_COUNT) { - return if_max_devs[type]; - } - - return -1; -} - static int drive_index_to_bus_id(BlockInterfaceType type, int index) { int max_devs = if_max_devs[type]; @@ -198,17 +195,14 @@ static int drive_index_to_unit_id(BlockInterfaceType type, int index) return max_devs ? index % max_devs : index; } -QemuOpts *drive_def(const char *optstr) -{ - return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false); -} - QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, const char *optstr) { QemuOpts *opts; - opts = drive_def(optstr); + GLOBAL_STATE_CODE(); + + opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false); if (!opts) { return NULL; } @@ -228,6 +222,8 @@ DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit) BlockBackend *blk; DriveInfo *dinfo; + GLOBAL_STATE_CODE(); + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { dinfo = blk_legacy_dinfo(blk); if (dinfo && dinfo->type == type @@ -250,6 +246,8 @@ void drive_check_orphaned(void) Location loc; bool orphans = false; + GLOBAL_STATE_CODE(); + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { dinfo = blk_legacy_dinfo(blk); /* @@ -283,6 +281,7 @@ void drive_check_orphaned(void) DriveInfo *drive_get_by_index(BlockInterfaceType type, int index) { + GLOBAL_STATE_CODE(); return drive_get(type, drive_index_to_bus_id(type, index), drive_index_to_unit_id(type, index)); @@ -294,6 +293,8 @@ int drive_get_max_bus(BlockInterfaceType type) BlockBackend *blk; DriveInfo *dinfo; + GLOBAL_STATE_CODE(); + max_bus = -1; for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { dinfo = blk_legacy_dinfo(blk); @@ -304,16 +305,6 @@ int drive_get_max_bus(BlockInterfaceType type) return max_bus; } -/* Get a block device. This should only be used for single-drive devices - (e.g. SD/Floppy/MTD). Multi-disk devices (scsi/ide) should use the - appropriate bus. */ -DriveInfo *drive_get_next(BlockInterfaceType type) -{ - static int next_block_unit[IF_COUNT]; - - return drive_get(type, 0, next_block_unit[type]++); -} - static void bdrv_format_print(void *opaque, const char *name) { qemu_printf(" %s", name); @@ -472,6 +463,17 @@ static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags, } } +static OnOffAuto account_get_opt(QemuOpts *opts, const char *name) +{ + if (!qemu_opt_find(opts, name)) { + return ON_OFF_AUTO_AUTO; + } + if (qemu_opt_get_bool(opts, name, true)) { + return ON_OFF_AUTO_ON; + } + return ON_OFF_AUTO_OFF; +} + /* Takes the ownership of bs_opts */ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, Error **errp) @@ -479,7 +481,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, const char *buf; int bdrv_flags = 0; int on_read_error, on_write_error; - bool account_invalid, account_failed; + OnOffAuto account_invalid, account_failed; bool writethrough, read_only; BlockBackend *blk; BlockDriverState *bs; @@ -513,8 +515,8 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, /* extract parameters */ snapshot = qemu_opt_get_bool(opts, "snapshot", 0); - account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true); - account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true); + account_invalid = account_get_opt(opts, "stats-account-invalid"); + account_failed = account_get_opt(opts, "stats-account-failed"); writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true); @@ -661,6 +663,7 @@ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) { int bdrv_flags = 0; + GLOBAL_STATE_CODE(); /* bdrv_open() defaults to the values in bdrv_flags (for compatibility * with other callers) rather than what we want as the real defaults. * Apply the defaults here instead. */ @@ -679,6 +682,7 @@ void blockdev_close_all_bdrv_states(void) { BlockDriverState *bs, *next_bs; + GLOBAL_STATE_CODE(); QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) { AioContext *ctx = bdrv_get_aio_context(bs); @@ -691,6 +695,7 @@ void blockdev_close_all_bdrv_states(void) /* Iterates over the list of monitor-owned BlockDriverStates */ BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs) { + GLOBAL_STATE_CODE(); return bs ? QTAILQ_NEXT(bs, monitor_list) : QTAILQ_FIRST(&monitor_bdrv_states); } @@ -792,6 +797,8 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, int i; bool locked; + GLOBAL_STATE_CODE(); + /* Change legacy command line options into QMP ones */ static const struct { const char *from; @@ -1216,6 +1223,8 @@ typedef struct BlkActionState BlkActionState; * * Only prepare() may fail. In a single transaction, only one of commit() or * abort() will be called. clean() will always be called if it is present. + * + * Always run under BQL. */ typedef struct BlkActionOps { size_t instance_size; @@ -1277,7 +1286,7 @@ static void internal_snapshot_prepare(BlkActionState *common, BlockDriverState *bs; QEMUSnapshotInfo old_sn, *sn; bool ret; - qemu_timeval tv; + int64_t rt; BlockdevSnapshotInternal *internal; InternalSnapshotState *state; AioContext *aio_context; @@ -1347,9 +1356,9 @@ static void internal_snapshot_prepare(BlkActionState *common, /* 3. take the snapshot */ sn = &state->sn; pstrcpy(sn->name, sizeof(sn->name), name); - qemu_gettimeofday(&tv); - sn->date_sec = tv.tv_sec; - sn->date_nsec = tv.tv_usec * 1000; + rt = g_get_real_time(); + sn->date_sec = rt / G_USEC_PER_SEC; + sn->date_nsec = (rt % G_USEC_PER_SEC) * 1000; sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (replay_mode != REPLAY_MODE_NONE) { sn->icount = replay_get_current_icount(); @@ -1640,8 +1649,8 @@ static void external_snapshot_abort(BlkActionState *common) aio_context_release(aio_context); aio_context_acquire(tmp_context); - ret = bdrv_try_set_aio_context(state->old_bs, - aio_context, NULL); + ret = bdrv_try_change_aio_context(state->old_bs, + aio_context, NULL, NULL); assert(ret == 0); aio_context_release(tmp_context); @@ -1802,12 +1811,12 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) goto out; } - /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); aio_context_release(aio_context); aio_context_acquire(old_context); - ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); aio_context_release(old_context); @@ -1852,14 +1861,7 @@ static void drive_backup_abort(BlkActionState *common) DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); if (state->job) { - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - - job_cancel_sync(&state->job->job); - - aio_context_release(aio_context); + job_cancel_sync(&state->job->job, true); } } @@ -1909,12 +1911,12 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp) return; } - /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ aio_context = bdrv_get_aio_context(bs); old_context = bdrv_get_aio_context(target_bs); aio_context_acquire(old_context); - ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { aio_context_release(old_context); return; @@ -1953,14 +1955,7 @@ static void blockdev_backup_abort(BlkActionState *common) BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); if (state->job) { - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - - job_cancel_sync(&state->job->job); - - aio_context_release(aio_context); + job_cancel_sync(&state->job->job, true); } } @@ -2325,6 +2320,8 @@ static TransactionProperties *get_transaction_properties( /* * 'Atomic' group operations. The operations are performed as a set, and if * any fail then we roll back all operations in the group. + * + * Always run under BQL. */ void qmp_transaction(TransactionActionList *dev_list, bool has_props, @@ -2336,6 +2333,8 @@ void qmp_transaction(TransactionActionList *dev_list, BlkActionState *state, *next; Error *local_err = NULL; + GLOBAL_STATE_CODE(); + QTAILQ_HEAD(, BlkActionState) snap_bdrv_states; QTAILQ_INIT(&snap_bdrv_states); @@ -2468,7 +2467,7 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device, bdrv_co_unlock(bs); old_ctx = bdrv_co_enter(bs); - blk_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp); + blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp); bdrv_co_leave(bs, old_ctx); bdrv_co_lock(bs); @@ -3214,12 +3213,12 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) !bdrv_has_zero_init(target_bs))); - /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); aio_context_release(aio_context); aio_context_acquire(old_context); - ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); aio_context_release(old_context); @@ -3286,12 +3285,12 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, zero_target = (sync == MIRROR_SYNC_MODE_FULL); - /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); aio_context = bdrv_get_aio_context(bs); aio_context_acquire(old_context); - ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); aio_context_release(old_context); aio_context_acquire(aio_context); @@ -3317,17 +3316,16 @@ out: aio_context_release(aio_context); } -/* Get a block job using its ID and acquire its AioContext */ -static BlockJob *find_block_job(const char *id, AioContext **aio_context, - Error **errp) +/* + * Get a block job using its ID. Called with job_mutex held. + */ +static BlockJob *find_block_job_locked(const char *id, Error **errp) { BlockJob *job; assert(id != NULL); - *aio_context = NULL; - - job = block_job_get(id); + job = block_job_get_locked(id); if (!job) { error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, @@ -3335,30 +3333,30 @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context, return NULL; } - *aio_context = blk_get_aio_context(job->blk); - aio_context_acquire(*aio_context); - return job; } void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(device, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(device, errp); if (!job) { return; } - block_job_set_speed(job, speed, errp); - aio_context_release(aio_context); + block_job_set_speed_locked(job, speed, errp); } void qmp_block_job_cancel(const char *device, bool has_force, bool force, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(device, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(device, errp); if (!job) { return; @@ -3368,97 +3366,94 @@ void qmp_block_job_cancel(const char *device, force = false; } - if (job_user_paused(&job->job) && !force) { + if (job_user_paused_locked(&job->job) && !force) { error_setg(errp, "The block job for device '%s' is currently paused", device); - goto out; + return; } trace_qmp_block_job_cancel(job); - job_user_cancel(&job->job, force, errp); -out: - aio_context_release(aio_context); + job_user_cancel_locked(&job->job, force, errp); } void qmp_block_job_pause(const char *device, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(device, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(device, errp); if (!job) { return; } trace_qmp_block_job_pause(job); - job_user_pause(&job->job, errp); - aio_context_release(aio_context); + job_user_pause_locked(&job->job, errp); } void qmp_block_job_resume(const char *device, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(device, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(device, errp); if (!job) { return; } trace_qmp_block_job_resume(job); - job_user_resume(&job->job, errp); - aio_context_release(aio_context); + job_user_resume_locked(&job->job, errp); } void qmp_block_job_complete(const char *device, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(device, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(device, errp); if (!job) { return; } trace_qmp_block_job_complete(job); - job_complete(&job->job, errp); - aio_context_release(aio_context); + job_complete_locked(&job->job, errp); } void qmp_block_job_finalize(const char *id, Error **errp) { - AioContext *aio_context; - BlockJob *job = find_block_job(id, &aio_context, errp); + BlockJob *job; + + JOB_LOCK_GUARD(); + job = find_block_job_locked(id, errp); if (!job) { return; } trace_qmp_block_job_finalize(job); - job_ref(&job->job); - job_finalize(&job->job, errp); + job_ref_locked(&job->job); + job_finalize_locked(&job->job, errp); - /* - * Job's context might have changed via job_finalize (and job_txn_apply - * automatically acquires the new one), so make sure we release the correct - * one. - */ - aio_context = blk_get_aio_context(job->blk); - job_unref(&job->job); - aio_context_release(aio_context); + job_unref_locked(&job->job); } void qmp_block_job_dismiss(const char *id, Error **errp) { - AioContext *aio_context; - BlockJob *bjob = find_block_job(id, &aio_context, errp); + BlockJob *bjob; Job *job; + JOB_LOCK_GUARD(); + bjob = find_block_job_locked(id, errp); + if (!bjob) { return; } trace_qmp_block_job_dismiss(bjob); job = &bjob->job; - job_dismiss(&job, errp); - aio_context_release(aio_context); + job_dismiss_locked(&job, errp); } void qmp_change_backing_file(const char *device, @@ -3572,6 +3567,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) { BlockReopenQueue *queue = NULL; GSList *drained = NULL; + GSList *p; /* Add each one of the BDS that we want to reopen to the queue */ for (; reopen_list != NULL; reopen_list = reopen_list->next) { @@ -3621,7 +3617,15 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) fail: bdrv_reopen_queue_free(queue); - g_slist_free_full(drained, (GDestroyNotify) bdrv_subtree_drained_end); + for (p = drained; p; p = p->next) { + BlockDriverState *bs = p->data; + AioContext *ctx = bdrv_get_aio_context(bs); + + aio_context_acquire(ctx); + bdrv_subtree_drained_end(bs); + aio_context_release(ctx); + } + g_slist_free(drained); } void qmp_blockdev_del(const char *node_name, Error **errp) @@ -3629,6 +3633,8 @@ void qmp_blockdev_del(const char *node_name, Error **errp) AioContext *aio_context; BlockDriverState *bs; + GLOBAL_STATE_CODE(); + bs = bdrv_find_node(node_name); if (!bs) { error_setg(errp, "Failed to find node with node-name='%s'", node_name); @@ -3724,17 +3730,16 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) BlockJobInfoList *head = NULL, **tail = &head; BlockJob *job; - for (job = block_job_next(NULL); job; job = block_job_next(job)) { + JOB_LOCK_GUARD(); + + for (job = block_job_next_locked(NULL); job; + job = block_job_next_locked(job)) { BlockJobInfo *value; - AioContext *aio_context; if (block_job_is_internal(job)) { continue; } - aio_context = blk_get_aio_context(job->blk); - aio_context_acquire(aio_context); - value = block_job_query(job, errp); - aio_context_release(aio_context); + value = block_job_query_locked(job, errp); if (!value) { qapi_free_BlockJobInfoList(head); return NULL; @@ -3781,7 +3786,7 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, old_context = bdrv_get_aio_context(bs); aio_context_acquire(old_context); - bdrv_try_set_aio_context(bs, new_context, errp); + bdrv_try_change_aio_context(bs, new_context, NULL, errp); aio_context_release(old_context); } diff --git a/blockjob.c b/blockjob.c index 4bad1408cb..f51d4e18f3 100644 --- a/blockjob.c +++ b/blockjob.c @@ -36,21 +36,6 @@ #include "qemu/main-loop.h" #include "qemu/timer.h" -/* - * The block job API is composed of two categories of functions. - * - * The first includes functions used by the monitor. The monitor is - * peculiar in that it accesses the block job list with block_job_get, and - * therefore needs consistency across block_job_get and the actual operation - * (e.g. block_job_set_speed). The consistency is achieved with - * aio_context_acquire/release. These functions are declared in blockjob.h. - * - * The second includes functions used by the block job drivers and sometimes - * by the core block layer. These do not care about locking, because the - * whole coroutine runs under the AioContext lock, and are declared in - * blockjob_int.h. - */ - static bool is_block_job(Job *job) { return job_type(job) == JOB_TYPE_BACKUP || @@ -59,20 +44,22 @@ static bool is_block_job(Job *job) job_type(job) == JOB_TYPE_STREAM; } -BlockJob *block_job_next(BlockJob *bjob) +BlockJob *block_job_next_locked(BlockJob *bjob) { Job *job = bjob ? &bjob->job : NULL; + GLOBAL_STATE_CODE(); do { - job = job_next(job); + job = job_next_locked(job); } while (job && !is_block_job(job)); return job ? container_of(job, BlockJob, job) : NULL; } -BlockJob *block_job_get(const char *id) +BlockJob *block_job_get_locked(const char *id) { - Job *job = job_get(id); + Job *job = job_get_locked(id); + GLOBAL_STATE_CODE(); if (job && is_block_job(job)) { return container_of(job, BlockJob, job); @@ -81,12 +68,18 @@ BlockJob *block_job_get(const char *id) } } +BlockJob *block_job_get(const char *id) +{ + JOB_LOCK_GUARD(); + return block_job_get_locked(id); +} + void block_job_free(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); + GLOBAL_STATE_CODE(); block_job_remove_all_bdrv(bjob); - blk_unref(bjob->blk); ratelimit_destroy(&bjob->limit); error_free(bjob->blocker); } @@ -112,8 +105,10 @@ static bool child_job_drained_poll(BdrvChild *c) /* An inactive or completed job doesn't have any pending requests. Jobs * with !job->busy are either already paused or have a pause point after * being reentered, so no job driver code will run before they pause. */ - if (!job->busy || job_is_completed(job)) { - return false; + WITH_JOB_LOCK_GUARD() { + if (!job->busy || job_is_completed_locked(job)) { + return false; + } } /* Otherwise, assume that it isn't fully stopped yet, but allow the job to @@ -131,42 +126,55 @@ static void child_job_drained_end(BdrvChild *c, int *drained_end_counter) job_resume(&job->job); } -static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx, - GSList **ignore, Error **errp) +typedef struct BdrvStateChildJobContext { + AioContext *new_ctx; + BlockJob *job; +} BdrvStateChildJobContext; + +static void child_job_set_aio_ctx_commit(void *opaque) +{ + BdrvStateChildJobContext *s = opaque; + BlockJob *job = s->job; + + job_set_aio_context(&job->job, s->new_ctx); +} + +static TransactionActionDrv change_child_job_context = { + .commit = child_job_set_aio_ctx_commit, + .clean = g_free, +}; + +static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockJob *job = c->opaque; + BdrvStateChildJobContext *s; GSList *l; for (l = job->nodes; l; l = l->next) { BdrvChild *sibling = l->data; - if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) { + if (!bdrv_child_change_aio_context(sibling, ctx, visited, + tran, errp)) { return false; } } + + s = g_new(BdrvStateChildJobContext, 1); + *s = (BdrvStateChildJobContext) { + .new_ctx = ctx, + .job = job, + }; + + tran_add(tran, &change_child_job_context, s); return true; } -static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, - GSList **ignore) -{ - BlockJob *job = c->opaque; - GSList *l; - - for (l = job->nodes; l; l = l->next) { - BdrvChild *sibling = l->data; - if (g_slist_find(*ignore, sibling)) { - continue; - } - *ignore = g_slist_prepend(*ignore, sibling); - bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); - } - - job->job.aio_context = ctx; -} - static AioContext *child_job_get_parent_aio_context(BdrvChild *c) { BlockJob *job = c->opaque; + IO_CODE(); + JOB_LOCK_GUARD(); return job->job.aio_context; } @@ -176,14 +184,14 @@ static const BdrvChildClass child_job = { .drained_begin = child_job_drained_begin, .drained_poll = child_job_drained_poll, .drained_end = child_job_drained_end, - .can_set_aio_ctx = child_job_can_set_aio_ctx, - .set_aio_ctx = child_job_set_aio_ctx, + .change_aio_ctx = child_job_change_aio_ctx, .stay_at_node = true, .get_parent_aio_context = child_job_get_parent_aio_context, }; void block_job_remove_all_bdrv(BlockJob *job) { + GLOBAL_STATE_CODE(); /* * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(), * which will also traverse job->nodes, so consume the list one by @@ -206,6 +214,7 @@ void block_job_remove_all_bdrv(BlockJob *job) bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) { GSList *el; + GLOBAL_STATE_CODE(); for (el = job->nodes; el; el = el->next) { BdrvChild *c = el->data; @@ -222,6 +231,7 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, { BdrvChild *c; bool need_context_ops; + GLOBAL_STATE_CODE(); bdrv_ref(bs); @@ -245,7 +255,8 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, return 0; } -static void block_job_on_idle(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_on_idle_locked(Notifier *n, void *opaque) { aio_wait_kick(); } @@ -266,12 +277,14 @@ static bool job_timer_pending(Job *job) return timer_pending(&job->sleep_timer); } -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) { const BlockJobDriver *drv = block_job_driver(job); int64_t old_speed = job->speed; - if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { + GLOBAL_STATE_CODE(); + + if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { return false; } if (speed < 0) { @@ -285,7 +298,9 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) job->speed = speed; if (drv->set_speed) { + job_unlock(); drv->set_speed(job, speed); + job_lock(); } if (speed && speed <= old_speed) { @@ -293,21 +308,30 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) } /* kick only if a timer is pending */ - job_enter_cond(&job->job, job_timer_pending); + job_enter_cond_locked(&job->job, job_timer_pending); return true; } +static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) +{ + JOB_LOCK_GUARD(); + return block_job_set_speed_locked(job, speed, errp); +} + int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) { + IO_CODE(); return ratelimit_calculate_delay(&job->limit, n); } -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) { BlockJobInfo *info; uint64_t progress_current, progress_total; + GLOBAL_STATE_CODE(); + if (block_job_is_internal(job)) { error_setg(errp, "Cannot query QEMU internal jobs"); return NULL; @@ -319,13 +343,13 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) info = g_new0(BlockJobInfo, 1); info->type = g_strdup(job_type_str(&job->job)); info->device = g_strdup(job->job.id); - info->busy = qatomic_read(&job->job.busy); + info->busy = job->job.busy; info->paused = job->job.pause_count > 0; info->offset = progress_current; info->len = progress_total; info->speed = job->speed; info->io_status = job->iostatus; - info->ready = job_is_ready(&job->job), + info->ready = job_is_ready_locked(&job->job), info->status = job->job.status; info->auto_finalize = job->job.auto_finalize; info->auto_dismiss = job->job.auto_dismiss; @@ -338,7 +362,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) return info; } -static void block_job_iostatus_set_err(BlockJob *job, int error) +/* Called with job lock held */ +static void block_job_iostatus_set_err_locked(BlockJob *job, int error) { if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : @@ -346,7 +371,8 @@ static void block_job_iostatus_set_err(BlockJob *job, int error) } } -static void block_job_event_cancelled(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_cancelled_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; uint64_t progress_current, progress_total; @@ -365,7 +391,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque) job->speed); } -static void block_job_event_completed(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_completed_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; const char *msg = NULL; @@ -391,7 +418,8 @@ static void block_job_event_completed(Notifier *n, void *opaque) msg); } -static void block_job_event_pending(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_pending_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; @@ -403,7 +431,8 @@ static void block_job_event_pending(Notifier *n, void *opaque) job->job.id); } -static void block_job_event_ready(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_ready_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; uint64_t progress_current, progress_total; @@ -423,32 +452,22 @@ static void block_job_event_ready(Notifier *n, void *opaque) } -/* - * API for block job drivers and the block layer. These functions are - * declared in blockjob_int.h. - */ - void *block_job_create(const char *job_id, const BlockJobDriver *driver, JobTxn *txn, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, int64_t speed, int flags, BlockCompletionFunc *cb, void *opaque, Error **errp) { - BlockBackend *blk; BlockJob *job; + int ret; + GLOBAL_STATE_CODE(); if (job_id == NULL && !(flags & JOB_INTERNAL)) { job_id = bdrv_get_device_name(bs); } - blk = blk_new_with_bs(bs, perm, shared_perm, errp); - if (!blk) { - return NULL; - } - - job = job_create(job_id, &driver->job_driver, txn, blk_get_aio_context(blk), + job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs), flags, cb, opaque, errp); if (job == NULL) { - blk_unref(blk); return NULL; } @@ -458,43 +477,46 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, ratelimit_init(&job->limit); - job->blk = blk; + job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked; + job->finalize_completed_notifier.notify = block_job_event_completed_locked; + job->pending_notifier.notify = block_job_event_pending_locked; + job->ready_notifier.notify = block_job_event_ready_locked; + job->idle_notifier.notify = block_job_on_idle_locked; - job->finalize_cancelled_notifier.notify = block_job_event_cancelled; - job->finalize_completed_notifier.notify = block_job_event_completed; - job->pending_notifier.notify = block_job_event_pending; - job->ready_notifier.notify = block_job_event_ready; - job->idle_notifier.notify = block_job_on_idle; - - notifier_list_add(&job->job.on_finalize_cancelled, - &job->finalize_cancelled_notifier); - notifier_list_add(&job->job.on_finalize_completed, - &job->finalize_completed_notifier); - notifier_list_add(&job->job.on_pending, &job->pending_notifier); - notifier_list_add(&job->job.on_ready, &job->ready_notifier); - notifier_list_add(&job->job.on_idle, &job->idle_notifier); + WITH_JOB_LOCK_GUARD() { + notifier_list_add(&job->job.on_finalize_cancelled, + &job->finalize_cancelled_notifier); + notifier_list_add(&job->job.on_finalize_completed, + &job->finalize_completed_notifier); + notifier_list_add(&job->job.on_pending, &job->pending_notifier); + notifier_list_add(&job->job.on_ready, &job->ready_notifier); + notifier_list_add(&job->job.on_idle, &job->idle_notifier); + } error_setg(&job->blocker, "block device is in use by block job: %s", job_type_str(&job->job)); - block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); + + ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp); + if (ret < 0) { + goto fail; + } bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); - /* Disable request queuing in the BlockBackend to avoid deadlocks on drain: - * The job reports that it's busy until it reaches a pause point. */ - blk_set_disable_request_queuing(blk, true); - blk_set_allow_aio_context_change(blk, true); - if (!block_job_set_speed(job, speed, errp)) { - job_early_fail(&job->job); - return NULL; + goto fail; } return job; + +fail: + job_early_fail(&job->job); + return NULL; } -void block_job_iostatus_reset(BlockJob *job) +void block_job_iostatus_reset_locked(BlockJob *job) { + GLOBAL_STATE_CODE(); if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { return; } @@ -502,9 +524,16 @@ void block_job_iostatus_reset(BlockJob *job) job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; } +static void block_job_iostatus_reset(BlockJob *job) +{ + JOB_LOCK_GUARD(); + block_job_iostatus_reset_locked(job); +} + void block_job_user_resume(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); + GLOBAL_STATE_CODE(); block_job_iostatus_reset(bjob); } @@ -512,6 +541,7 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, int is_read, int error) { BlockErrorAction action; + IO_CODE(); switch (on_err) { case BLOCKDEV_ON_ERROR_ENOSPC: @@ -538,12 +568,23 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, action); } if (action == BLOCK_ERROR_ACTION_STOP) { - if (!job->job.user_paused) { - job_pause(&job->job); - /* make the pause user visible, which will be resumed from QMP. */ - job->job.user_paused = true; + WITH_JOB_LOCK_GUARD() { + if (!job->job.user_paused) { + job_pause_locked(&job->job); + /* + * make the pause user visible, which will be + * resumed from QMP. + */ + job->job.user_paused = true; + } + block_job_iostatus_set_err_locked(job, error); } - block_job_iostatus_set_err(job, error); } return action; } + +AioContext *block_job_get_aio_context(BlockJob *job) +{ + GLOBAL_STATE_CODE(); + return job->job.aio_context; +} diff --git a/bsd-user/arm/signal.c b/bsd-user/arm/signal.c new file mode 100644 index 0000000000..2b1dd745d1 --- /dev/null +++ b/bsd-user/arm/signal.c @@ -0,0 +1,221 @@ +/* + * arm signal functions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu.h" + +/* + * Compare to arm/arm/machdep.c sendsig() + * Assumes that target stack frame memory is locked. + */ +abi_long set_sigtramp_args(CPUARMState *env, int sig, + struct target_sigframe *frame, + abi_ulong frame_addr, + struct target_sigaction *ka) +{ + /* + * Arguments to signal handler: + * r0 = signal number + * r1 = siginfo pointer + * r2 = ucontext pointer + * r5 = ucontext pointer + * pc = signal handler pointer + * sp = sigframe struct pointer + * lr = sigtramp at base of user stack + */ + + env->regs[0] = sig; + env->regs[1] = frame_addr + + offsetof(struct target_sigframe, sf_si); + env->regs[2] = frame_addr + + offsetof(struct target_sigframe, sf_uc); + + /* the trampoline uses r5 as the uc address */ + env->regs[5] = frame_addr + + offsetof(struct target_sigframe, sf_uc); + env->regs[TARGET_REG_PC] = ka->_sa_handler & ~1; + env->regs[TARGET_REG_SP] = frame_addr; + env->regs[TARGET_REG_LR] = TARGET_PS_STRINGS - TARGET_SZSIGCODE; + /* + * Low bit indicates whether or not we're entering thumb mode. + */ + cpsr_write(env, (ka->_sa_handler & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr); + + return 0; +} + +static abi_long get_vfpcontext(CPUARMState *env, abi_ulong frame_addr, + struct target_sigframe *frame) +{ + /* see sendsig and get_vfpcontext in sys/arm/arm/exec_machdep.c */ + target_mcontext_vfp_t *vfp = &frame->sf_vfp; + target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext; + + /* Assumes that mcp and vfp are locked */ + for (int i = 0; i < 32; i++) { + vfp->mcv_reg[i] = tswap64(*aa32_vfp_dreg(env, i)); + } + vfp->mcv_fpscr = tswap32(vfp_get_fpscr(env)); + mcp->mc_vfp_size = tswap32(sizeof(*vfp)); + mcp->mc_vfp_ptr = tswap32(frame_addr + ((uintptr_t)vfp - (uintptr_t)frame)); + return 0; +} + +/* + * Compare to arm/arm/exec_machdep.c get_mcontext() + * Assumes that the memory is locked if mcp points to user memory. + */ +abi_long get_mcontext(CPUARMState *env, target_mcontext_t *mcp, int flags) +{ + uint32_t *gr = mcp->__gregs; + + gr[TARGET_REG_CPSR] = tswap32(cpsr_read(env)); + if (flags & TARGET_MC_GET_CLEAR_RET) { + gr[TARGET_REG_R0] = 0; + gr[TARGET_REG_CPSR] &= ~CPSR_C; + } else { + gr[TARGET_REG_R0] = tswap32(env->regs[0]); + } + + gr[TARGET_REG_R1] = tswap32(env->regs[1]); + gr[TARGET_REG_R2] = tswap32(env->regs[2]); + gr[TARGET_REG_R3] = tswap32(env->regs[3]); + gr[TARGET_REG_R4] = tswap32(env->regs[4]); + gr[TARGET_REG_R5] = tswap32(env->regs[5]); + gr[TARGET_REG_R6] = tswap32(env->regs[6]); + gr[TARGET_REG_R7] = tswap32(env->regs[7]); + gr[TARGET_REG_R8] = tswap32(env->regs[8]); + gr[TARGET_REG_R9] = tswap32(env->regs[9]); + gr[TARGET_REG_R10] = tswap32(env->regs[10]); + gr[TARGET_REG_R11] = tswap32(env->regs[11]); + gr[TARGET_REG_R12] = tswap32(env->regs[12]); + + gr[TARGET_REG_SP] = tswap32(env->regs[13]); + gr[TARGET_REG_LR] = tswap32(env->regs[14]); + gr[TARGET_REG_PC] = tswap32(env->regs[15]); + + /* + * FreeBSD's get_mcontext doesn't save VFP info, but sets the pointer and + * size to zero. Applications that need the VFP state use + * sysarch(ARM_GET_VFPSTATE) and are expected to adjust mcontext after that. + */ + mcp->mc_vfp_size = 0; + mcp->mc_vfp_ptr = 0; + memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare)); + + return 0; +} + +/* + * Compare to arm/arm/exec_machdep.c sendsig() + * Assumes that the memory is locked if frame points to user memory. + */ +abi_long setup_sigframe_arch(CPUARMState *env, abi_ulong frame_addr, + struct target_sigframe *frame, int flags) +{ + target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext; + + get_mcontext(env, mcp, flags); + get_vfpcontext(env, frame_addr, frame); + return 0; +} + +/* Compare to arm/arm/exec_machdep.c set_mcontext() */ +abi_long set_mcontext(CPUARMState *env, target_mcontext_t *mcp, int srflag) +{ + int err = 0; + const uint32_t *gr = mcp->__gregs; + uint32_t cpsr, ccpsr = cpsr_read(env); + uint32_t fpscr, mask; + + cpsr = tswap32(gr[TARGET_REG_CPSR]); + /* + * Only allow certain bits to change, reject attempted changes to non-user + * bits. In addition, make sure we're headed for user mode and none of the + * interrupt bits are set. + */ + if ((ccpsr & ~CPSR_USER) != (cpsr & ~CPSR_USER)) { + return -TARGET_EINVAL; + } + if ((cpsr & CPSR_M) != ARM_CPU_MODE_USR || + (cpsr & (CPSR_I | CPSR_F)) != 0) { + return -TARGET_EINVAL; + } + + /* + * The movs pc,lr instruction that implements the return to userland masks + * these bits out. + */ + mask = cpsr & CPSR_T ? 0x1 : 0x3; + + /* + * Make sure that we either have no vfp, or it's the correct size. + * FreeBSD just ignores it, though, so maybe we'll need to adjust + * things below instead. + */ + if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(target_mcontext_vfp_t)) { + return -TARGET_EINVAL; + } + + env->regs[0] = tswap32(gr[TARGET_REG_R0]); + env->regs[1] = tswap32(gr[TARGET_REG_R1]); + env->regs[2] = tswap32(gr[TARGET_REG_R2]); + env->regs[3] = tswap32(gr[TARGET_REG_R3]); + env->regs[4] = tswap32(gr[TARGET_REG_R4]); + env->regs[5] = tswap32(gr[TARGET_REG_R5]); + env->regs[6] = tswap32(gr[TARGET_REG_R6]); + env->regs[7] = tswap32(gr[TARGET_REG_R7]); + env->regs[8] = tswap32(gr[TARGET_REG_R8]); + env->regs[9] = tswap32(gr[TARGET_REG_R9]); + env->regs[10] = tswap32(gr[TARGET_REG_R10]); + env->regs[11] = tswap32(gr[TARGET_REG_R11]); + env->regs[12] = tswap32(gr[TARGET_REG_R12]); + + env->regs[13] = tswap32(gr[TARGET_REG_SP]); + env->regs[14] = tswap32(gr[TARGET_REG_LR]); + env->regs[15] = tswap32(gr[TARGET_REG_PC] & ~mask); + if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr != 0) { + /* see set_vfpcontext in sys/arm/arm/exec_machdep.c */ + target_mcontext_vfp_t *vfp; + + vfp = lock_user(VERIFY_READ, mcp->mc_vfp_ptr, sizeof(*vfp), 1); + for (int i = 0; i < 32; i++) { + __get_user(*aa32_vfp_dreg(env, i), &vfp->mcv_reg[i]); + } + __get_user(fpscr, &vfp->mcv_fpscr); + vfp_set_fpscr(env, fpscr); + unlock_user(vfp, mcp->mc_vfp_ptr, sizeof(target_ucontext_t)); + + /* + * linux-user sets fpexc, fpinst and fpinst2, but these aren't in + * FreeBSD's mcontext, what to do? + */ + } + cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); + + return err; +} + +/* Compare to arm/arm/machdep.c sys_sigreturn() */ +abi_long get_ucontext_sigreturn(CPUARMState *env, abi_ulong target_sf, + abi_ulong *target_uc) +{ + *target_uc = target_sf; + + return 0; +} diff --git a/bsd-user/arm/target.h b/bsd-user/arm/target.h new file mode 100644 index 0000000000..7c423ec575 --- /dev/null +++ b/bsd-user/arm/target.h @@ -0,0 +1,21 @@ +/* + * Intel general target stuff that's common to all i386 details + * + * Copyright (c) 2022 M. Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_H +#define TARGET_H + +/* + * arm EABI 'lumps' the registers for 64-bit args. + */ +static inline bool regpairs_aligned(void *cpu_env) +{ + return true; +} + +#endif /* TARGET_H */ + diff --git a/bsd-user/sparc/target_syscall.h b/bsd-user/arm/target_arch.h similarity index 60% rename from bsd-user/sparc/target_syscall.h rename to bsd-user/arm/target_arch.h index 151284754b..561934bbd2 100644 --- a/bsd-user/sparc/target_syscall.h +++ b/bsd-user/arm/target_arch.h @@ -1,6 +1,7 @@ /* - * sparc dependent system call definitions + * ARM 32-bit specific prototypes for bsd-user * + * Copyright (c) 2013 Stacey D. Son * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,22 +16,13 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, see . */ -#ifndef TARGET_SYSCALL_H -#define TARGET_SYSCALL_H -struct target_pt_regs { - abi_ulong psr; - abi_ulong pc; - abi_ulong npc; - abi_ulong y; - abi_ulong u_regs[16]; -}; +#ifndef TARGET_ARCH_H +#define TARGET_ARCH_H -#define UNAME_MACHINE "sun4" -#define TARGET_HW_MACHINE "sparc" -#define TARGET_HW_MACHINE_ARCH "sparc" +#include "qemu.h" -#define TARGET_SPARC_UTRAP_INSTALL 1 -#define TARGET_SPARC_SIGTRAMP_INSTALL 2 +void target_cpu_set_tls(CPUARMState *env, target_ulong newtls); +target_ulong target_cpu_get_tls(CPUARMState *env); -#endif /* TARGET_SYSCALL_H */ +#endif /* TARGET_ARCH_H */ diff --git a/bsd-user/arm/target_arch_cpu.c b/bsd-user/arm/target_arch_cpu.c new file mode 100644 index 0000000000..02bf9149d5 --- /dev/null +++ b/bsd-user/arm/target_arch_cpu.c @@ -0,0 +1,39 @@ +/* + * arm cpu related code + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "target_arch.h" + +void target_cpu_set_tls(CPUARMState *env, target_ulong newtls) +{ + if (access_secure_reg(env)) { + env->cp15.tpidrurw_s = newtls; + env->cp15.tpidruro_s = newtls; + return; + } + + env->cp15.tpidr_el[0] = newtls; + env->cp15.tpidrro_el[0] = newtls; +} + +target_ulong target_cpu_get_tls(CPUARMState *env) +{ + if (access_secure_reg(env)) { + return env->cp15.tpidruro_s; + } + return env->cp15.tpidrro_el[0]; +} diff --git a/bsd-user/arm/target_arch_cpu.h b/bsd-user/arm/target_arch_cpu.h new file mode 100644 index 0000000000..517d008764 --- /dev/null +++ b/bsd-user/arm/target_arch_cpu.h @@ -0,0 +1,213 @@ +/* + * arm cpu init and loop + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_CPU_H +#define TARGET_ARCH_CPU_H + +#include "target_arch.h" +#include "signal-common.h" + +#define TARGET_DEFAULT_CPU_MODEL "any" + +static inline void target_cpu_init(CPUARMState *env, + struct target_pt_regs *regs) +{ + int i; + + cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC, + CPSRWriteByInstr); + for (i = 0; i < 16; i++) { + env->regs[i] = regs->uregs[i]; + } +} + +static inline void target_cpu_loop(CPUARMState *env) +{ + int trapnr, si_signo, si_code; + CPUState *cs = env_cpu(env); + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { + case EXCP_UDEF: + case EXCP_NOCP: + case EXCP_INVSTATE: + /* + * See arm/arm/undefined.c undefinedinstruction(); + * + * A number of details aren't emulated (they likely don't matter): + * o Misaligned PC generates ILL_ILLADR (these can't come from qemu) + * o Thumb-2 instructions generate ILLADR + * o Both modes implement coprocessor instructions, which we don't + * do here. FreeBSD just implements them for the VFP coprocessor + * and special kernel breakpoints, trace points, dtrace, etc. + */ + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->regs[15]); + break; + case EXCP_SWI: + { + int ret; + abi_ulong params = get_sp_from_cpustate(env); + int32_t syscall_nr = env->regs[7]; + int32_t arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8; + + /* See arm/arm/syscall.c cpu_fetch_syscall_args() */ + if (syscall_nr == TARGET_FREEBSD_NR_syscall) { + syscall_nr = env->regs[0]; + arg1 = env->regs[1]; + arg2 = env->regs[2]; + arg3 = env->regs[3]; + get_user_s32(arg4, params); + params += sizeof(int32_t); + get_user_s32(arg5, params); + params += sizeof(int32_t); + get_user_s32(arg6, params); + params += sizeof(int32_t); + get_user_s32(arg7, params); + arg8 = 0; + } else if (syscall_nr == TARGET_FREEBSD_NR___syscall) { + syscall_nr = env->regs[0]; + arg1 = env->regs[2]; + arg2 = env->regs[3]; + get_user_s32(arg3, params); + params += sizeof(int32_t); + get_user_s32(arg4, params); + params += sizeof(int32_t); + get_user_s32(arg5, params); + params += sizeof(int32_t); + get_user_s32(arg6, params); + arg7 = 0; + arg8 = 0; + } else { + arg1 = env->regs[0]; + arg2 = env->regs[1]; + arg3 = env->regs[2]; + arg4 = env->regs[3]; + get_user_s32(arg5, params); + params += sizeof(int32_t); + get_user_s32(arg6, params); + params += sizeof(int32_t); + get_user_s32(arg7, params); + params += sizeof(int32_t); + get_user_s32(arg8, params); + } + ret = do_freebsd_syscall(env, syscall_nr, arg1, arg2, arg3, + arg4, arg5, arg6, arg7, arg8); + /* + * Compare to arm/arm/vm_machdep.c + * cpu_set_syscall_retval() + */ + if (-TARGET_EJUSTRETURN == ret) { + /* + * Returning from a successful sigreturn syscall. + * Avoid clobbering register state. + */ + break; + } + if (-TARGET_ERESTART == ret) { + env->regs[15] -= env->thumb ? 2 : 4; + break; + } + if ((unsigned int)ret >= (unsigned int)(-515)) { + ret = -ret; + cpsr_write(env, CPSR_C, CPSR_C, CPSRWriteByInstr); + env->regs[0] = ret; + } else { + cpsr_write(env, 0, CPSR_C, CPSRWriteByInstr); + env->regs[0] = ret; /* XXX need to handle lseek()? */ + /* env->regs[1] = 0; */ + } + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + /* + * See arm/arm/trap-v6.c prefetch_abort_handler() and + * data_abort_handler() + * + * However, FreeBSD maps these to a generic value and then uses that + * to maybe fault in pages in vm/vm_fault.c:vm_fault_trap(). I + * believe that the indirection maps the same as Linux, but haven't + * chased down every single possible indirection. + */ + + /* For user-only we don't set TTBCR_EAE, so look at the FSR. */ + switch (env->exception.fsr & 0x1f) { + case 0x1: /* Alignment */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + case 0x3: /* Access flag fault, level 1 */ + case 0x6: /* Access flag fault, level 2 */ + case 0x9: /* Domain fault, level 1 */ + case 0xb: /* Domain fault, level 2 */ + case 0xd: /* Permission fault, level 1 */ + case 0xf: /* Permission fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_ACCERR; + break; + case 0x5: /* Translation fault, level 1 */ + case 0x7: /* Translation fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MAPERR; + break; + default: + g_assert_not_reached(); + } + force_sig_fault(si_signo, si_code, env->exception.vaddress); + break; + case EXCP_DEBUG: + case EXCP_BKPT: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]); + break; + case EXCP_YIELD: + /* nothing to do here for user-mode, just resume guest code */ + break; + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + default: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, 0); + abort(); + } /* switch() */ + process_pending_signals(env); + } /* for (;;) */ +} + +static inline void target_cpu_clone_regs(CPUARMState *env, target_ulong newsp) +{ + if (newsp) { + env->regs[13] = newsp; + } + env->regs[0] = 0; +} + +static inline void target_cpu_reset(CPUArchState *env) +{ +} + +#endif /* TARGET_ARCH_CPU_H */ diff --git a/bsd-user/arm/target_arch_elf.h b/bsd-user/arm/target_arch_elf.h new file mode 100644 index 0000000000..935bce347f --- /dev/null +++ b/bsd-user/arm/target_arch_elf.h @@ -0,0 +1,129 @@ +/* + * arm ELF definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_ELF_H +#define TARGET_ARCH_ELF_H + +#define ELF_START_MMAP 0x80000000 +#define ELF_ET_DYN_LOAD_ADDR 0x500000 + +#define elf_check_arch(x) ((x) == EM_ARM) + +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_ARM + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#define ELF_HWCAP get_elf_hwcap() +#define ELF_HWCAP2 get_elf_hwcap2() + +#define GET_FEATURE(feat, hwcap) \ + do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) + +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + +enum { + ARM_HWCAP_ARM_SWP = 1 << 0, + ARM_HWCAP_ARM_HALF = 1 << 1, + ARM_HWCAP_ARM_THUMB = 1 << 2, + ARM_HWCAP_ARM_26BIT = 1 << 3, + ARM_HWCAP_ARM_FAST_MULT = 1 << 4, + ARM_HWCAP_ARM_FPA = 1 << 5, + ARM_HWCAP_ARM_VFP = 1 << 6, + ARM_HWCAP_ARM_EDSP = 1 << 7, + ARM_HWCAP_ARM_JAVA = 1 << 8, + ARM_HWCAP_ARM_IWMMXT = 1 << 9, + ARM_HWCAP_ARM_CRUNCH = 1 << 10, + ARM_HWCAP_ARM_THUMBEE = 1 << 11, + ARM_HWCAP_ARM_NEON = 1 << 12, + ARM_HWCAP_ARM_VFPv3 = 1 << 13, + ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, + ARM_HWCAP_ARM_TLS = 1 << 15, + ARM_HWCAP_ARM_VFPv4 = 1 << 16, + ARM_HWCAP_ARM_IDIVA = 1 << 17, + ARM_HWCAP_ARM_IDIVT = 1 << 18, + ARM_HWCAP_ARM_VFPD32 = 1 << 19, + ARM_HWCAP_ARM_LPAE = 1 << 20, + ARM_HWCAP_ARM_EVTSTRM = 1 << 21, +}; + +enum { + ARM_HWCAP2_ARM_AES = 1 << 0, + ARM_HWCAP2_ARM_PMULL = 1 << 1, + ARM_HWCAP2_ARM_SHA1 = 1 << 2, + ARM_HWCAP2_ARM_SHA2 = 1 << 3, + ARM_HWCAP2_ARM_CRC32 = 1 << 4, +}; + +static uint32_t get_elf_hwcap(void) +{ + ARMCPU *cpu = ARM_CPU(thread_cpu); + uint32_t hwcaps = 0; + + hwcaps |= ARM_HWCAP_ARM_SWP; + hwcaps |= ARM_HWCAP_ARM_HALF; + hwcaps |= ARM_HWCAP_ARM_THUMB; + hwcaps |= ARM_HWCAP_ARM_FAST_MULT; + + /* probe for the extra features */ + /* EDSP is in v5TE and above */ + GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); + GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); + GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); + GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); + GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); + GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); + GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); + GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); + GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); + + if (cpu_isar_feature(aa32_fpsp_v3, cpu) || + cpu_isar_feature(aa32_fpdp_v3, cpu)) { + hwcaps |= ARM_HWCAP_ARM_VFPv3; + if (cpu_isar_feature(aa32_simd_r32, cpu)) { + hwcaps |= ARM_HWCAP_ARM_VFPD32; + } else { + hwcaps |= ARM_HWCAP_ARM_VFPv3D16; + } + } + GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); + + return hwcaps; +} + +static uint32_t get_elf_hwcap2(void) +{ + ARMCPU *cpu = ARM_CPU(thread_cpu); + uint32_t hwcaps = 0; + + GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); + GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); + GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); + GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); + GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); + return hwcaps; +} + +#undef GET_FEATURE +#undef GET_FEATURE_ID + +#endif /* TARGET_ARCH_ELF_H */ diff --git a/bsd-user/arm/target_arch_reg.h b/bsd-user/arm/target_arch_reg.h new file mode 100644 index 0000000000..070fa24da1 --- /dev/null +++ b/bsd-user/arm/target_arch_reg.h @@ -0,0 +1,60 @@ +/* + * FreeBSD arm register structures + * + * Copyright (c) 2015 Stacey Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_REG_H +#define TARGET_ARCH_REG_H + +/* See sys/arm/include/reg.h */ +typedef struct target_reg { + uint32_t r[13]; + uint32_t r_sp; + uint32_t r_lr; + uint32_t r_pc; + uint32_t r_cpsr; +} target_reg_t; + +typedef struct target_fp_reg { + uint32_t fp_exponent; + uint32_t fp_mantissa_hi; + u_int32_t fp_mantissa_lo; +} target_fp_reg_t; + +typedef struct target_fpreg { + uint32_t fpr_fpsr; + target_fp_reg_t fpr[8]; +} target_fpreg_t; + +#define tswapreg(ptr) tswapal(ptr) + +static inline void target_copy_regs(target_reg_t *regs, const CPUARMState *env) +{ + int i; + + for (i = 0; i < 13; i++) { + regs->r[i] = tswapreg(env->regs[i + 1]); + } + regs->r_sp = tswapreg(env->regs[13]); + regs->r_lr = tswapreg(env->regs[14]); + regs->r_pc = tswapreg(env->regs[15]); + regs->r_cpsr = tswapreg(cpsr_read((CPUARMState *)env)); +} + +#undef tswapreg + +#endif /* TARGET_ARCH_REG_H */ diff --git a/bsd-user/arm/target_arch_signal.h b/bsd-user/arm/target_arch_signal.h new file mode 100644 index 0000000000..02b2b33e07 --- /dev/null +++ b/bsd-user/arm/target_arch_signal.h @@ -0,0 +1,89 @@ +/* + * arm signal definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGNAL_H +#define TARGET_ARCH_SIGNAL_H + +#include "cpu.h" + +#define TARGET_REG_R0 0 +#define TARGET_REG_R1 1 +#define TARGET_REG_R2 2 +#define TARGET_REG_R3 3 +#define TARGET_REG_R4 4 +#define TARGET_REG_R5 5 +#define TARGET_REG_R6 6 +#define TARGET_REG_R7 7 +#define TARGET_REG_R8 8 +#define TARGET_REG_R9 9 +#define TARGET_REG_R10 10 +#define TARGET_REG_R11 11 +#define TARGET_REG_R12 12 +#define TARGET_REG_R13 13 +#define TARGET_REG_R14 14 +#define TARGET_REG_R15 15 +#define TARGET_REG_CPSR 16 +#define TARGET__NGREG 17 +/* Convenience synonyms */ +#define TARGET_REG_FP TARGET_REG_R11 +#define TARGET_REG_SP TARGET_REG_R13 +#define TARGET_REG_LR TARGET_REG_R14 +#define TARGET_REG_PC TARGET_REG_R15 + +#define TARGET_INSN_SIZE 4 /* arm instruction size */ + +/* Size of the signal trampolin code. See _sigtramp(). */ +#define TARGET_SZSIGCODE ((abi_ulong)(9 * TARGET_INSN_SIZE)) + +/* compare to arm/include/_limits.h */ +#define TARGET_MINSIGSTKSZ (1024 * 4) /* min sig stack size */ +#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768) /* recommended size */ + +/* + * Floating point register state + */ +typedef struct target_mcontext_vfp { + abi_ullong mcv_reg[32]; + abi_ulong mcv_fpscr; +} target_mcontext_vfp_t; + +typedef struct target_mcontext { + abi_uint __gregs[TARGET__NGREG]; + + /* + * Originally, rest of this structure was named __fpu, 35 * 4 bytes + * long, never accessed from kernel. + */ + abi_ulong mc_vfp_size; + abi_ptr mc_vfp_ptr; + abi_int mc_spare[33]; +} target_mcontext_t; + +#define TARGET_MCONTEXT_SIZE 208 +#define TARGET_UCONTEXT_SIZE 260 + +#include "target_os_ucontext.h" + +struct target_sigframe { + target_siginfo_t sf_si; /* saved siginfo */ + target_ucontext_t sf_uc; /* saved ucontext */ + target_mcontext_vfp_t sf_vfp; /* actual saved VFP context */ +}; + +#endif /* TARGET_ARCH_SIGNAL_H */ diff --git a/bsd-user/arm/target_arch_sigtramp.h b/bsd-user/arm/target_arch_sigtramp.h new file mode 100644 index 0000000000..06198045ed --- /dev/null +++ b/bsd-user/arm/target_arch_sigtramp.h @@ -0,0 +1,49 @@ +/* + * arm sysarch() system call emulation + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGTRAMP_H +#define TARGET_ARCH_SIGTRAMP_H + +/* Compare to arm/arm/locore.S ENTRY_NP(sigcode) */ +static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc, + unsigned sys_sigreturn) +{ + int i; + uint32_t sys_exit = TARGET_FREEBSD_NR_exit; + uint32_t sigtramp_code[] = { + /* 1 */ 0xE1A0000D, /* mov r0, sp */ + /* 2 */ 0xE2800000 + sigf_uc, /* add r0, r0, #SIGF_UC */ + /* 3 */ 0xE59F700C, /* ldr r7, [pc, #12] */ + /* 4 */ 0xEF000000 + sys_sigreturn, /* swi (SYS_sigreturn) */ + /* 5 */ 0xE59F7008, /* ldr r7, [pc, #8] */ + /* 6 */ 0xEF000000 + sys_exit, /* swi (SYS_exit)*/ + /* 7 */ 0xEAFFFFFA, /* b . -16 */ + /* 8 */ sys_sigreturn, + /* 9 */ sys_exit + }; + + G_STATIC_ASSERT(sizeof(sigtramp_code) == TARGET_SZSIGCODE); + + for (i = 0; i < 9; i++) { + tswap32s(&sigtramp_code[i]); + } + + return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE); +} +#endif /* TARGET_ARCH_SIGTRAMP_H */ diff --git a/bsd-user/arm/target_arch_sysarch.h b/bsd-user/arm/target_arch_sysarch.h index 632a5cd453..5cb7864197 100644 --- a/bsd-user/arm/target_arch_sysarch.h +++ b/bsd-user/arm/target_arch_sysarch.h @@ -17,8 +17,8 @@ * along with this program; if not, see . */ -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ +#ifndef TARGET_ARCH_SYSARCH_H +#define TARGET_ARCH_SYSARCH_H #include "target_syscall.h" #include "target_arch.h" @@ -75,4 +75,4 @@ static inline void do_freebsd_arch_print_sysarch( } } -#endif /*!BSD_USER_ARCH_SYSARCH_H_ */ +#endif /* TARGET_ARCH_SYSARCH_H */ diff --git a/bsd-user/arm/target_arch_thread.h b/bsd-user/arm/target_arch_thread.h new file mode 100644 index 0000000000..fd257f313d --- /dev/null +++ b/bsd-user/arm/target_arch_thread.h @@ -0,0 +1,81 @@ +/* + * arm thread support + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_THREAD_H +#define TARGET_ARCH_THREAD_H + +/* Compare to arm/arm/vm_machdep.c cpu_set_upcall_kse() */ +static inline void target_thread_set_upcall(CPUARMState *env, abi_ulong entry, + abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size) +{ + abi_ulong sp; + + /* + * Make sure the stack is properly aligned. + * arm/include/param.h (STACKLIGN() macro) + */ + sp = (u_int)(stack_base + stack_size) & ~0x7; + + /* sp = stack base */ + env->regs[13] = sp; + /* pc = start function entry */ + env->regs[15] = entry & 0xfffffffe; + /* r0 = arg */ + env->regs[0] = arg; + env->spsr = ARM_CPU_MODE_USR; + /* + * Thumb mode is encoded by the low bit in the entry point (since ARM can't + * execute at odd addresses). When it's set, set the Thumb bit (T) in the + * CPSR. + */ + cpsr_write(env, (entry & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr); +} + +static inline void target_thread_init(struct target_pt_regs *regs, + struct image_info *infop) +{ + abi_long stack = infop->start_stack; + memset(regs, 0, sizeof(*regs)); + regs->ARM_cpsr = ARM_CPU_MODE_USR; + /* + * Thumb mode is encoded by the low bit in the entry point (since ARM can't + * execute at odd addresses). When it's set, set the Thumb bit (T) in the + * CPSR. + */ + if (infop->entry & 1) { + regs->ARM_cpsr |= CPSR_T; + } + regs->ARM_pc = infop->entry & 0xfffffffe; + regs->ARM_sp = stack; + regs->ARM_lr = infop->entry & 0xfffffffe; + /* + * FreeBSD kernel passes the ps_strings pointer in r0. This is used by some + * programs to set status messages that we see in ps. bsd-user doesn't + * support that functionality, so it's ignored. When set to 0, FreeBSD's csu + * code ignores it. For the static case, r1 and r2 are effectively ignored + * by the csu __startup() routine. For the dynamic case, rtld saves r0 but + * generates r1 and r2 and passes them into the csu _startup. + * + * r0 ps_strings 0 passed since ps arg setting not supported + * r1 obj_main ignored by _start(), so 0 passed + * r2 cleanup generated by rtld or ignored by _start(), so 0 passed + */ +} + +#endif /* TARGET_ARCH_THREAD_H */ diff --git a/bsd-user/arm/target_arch_vmparam.h b/bsd-user/arm/target_arch_vmparam.h new file mode 100644 index 0000000000..3fb69aff51 --- /dev/null +++ b/bsd-user/arm/target_arch_vmparam.h @@ -0,0 +1,49 @@ +/* + * arm VM parameters definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_VMPARAM_H +#define TARGET_ARCH_VMPARAM_H + +#include "cpu.h" + +/* compare to sys/arm/include/vmparam.h */ +#define TARGET_MAXTSIZ (64 * MiB) /* max text size */ +#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */ +#define TARGET_MAXDSIZ (512 * MiB) /* max data size */ +#define TARGET_DFLSSIZ (4 * MiB) /* initial stack size limit */ +#define TARGET_MAXSSIZ (64 * MiB) /* max stack size */ +#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */ + +#define TARGET_RESERVED_VA 0xf7000000 + + /* KERNBASE - 512 MB */ +#define TARGET_VM_MAXUSER_ADDRESS (0xc0000000 - (512 * MiB)) +#define TARGET_USRSTACK TARGET_VM_MAXUSER_ADDRESS + +static inline abi_ulong get_sp_from_cpustate(CPUARMState *state) +{ + return state->regs[13]; /* sp */ +} + +static inline void set_second_rval(CPUARMState *state, abi_ulong retval2) +{ + state->regs[1] = retval2; +} + +#endif /* TARGET_ARCH_VMPARAM_H */ diff --git a/bsd-user/arm/target_syscall.h b/bsd-user/arm/target_syscall.h index ef4b37f017..5804a53541 100644 --- a/bsd-user/arm/target_syscall.h +++ b/bsd-user/arm/target_syscall.h @@ -1,5 +1,24 @@ -#ifndef BSD_USER_ARCH_SYSCALL_H_ -#define BSD_USER_ARCH_SYSCALL_H_ +/* + * arm cpu system call stubs + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef ARM_TARGET_SYSCALL_H +#define ARM_TARGET_SYSCALL_H struct target_pt_regs { abi_long uregs[17]; @@ -31,6 +50,6 @@ struct target_pt_regs { #define TARGET_FREEBSD_ARM_GET_TP 3 #define TARGET_HW_MACHINE "arm" -#define TARGET_HW_MACHINE_ARCH "armv6" +#define TARGET_HW_MACHINE_ARCH "armv7" -#endif /* !BSD_USER_ARCH_SYSCALL_H_ */ +#endif /* ARM_TARGET_SYSCALL_H */ diff --git a/bsd-user/bsd-file.h b/bsd-user/bsd-file.h new file mode 100644 index 0000000000..588e0c50d4 --- /dev/null +++ b/bsd-user/bsd-file.h @@ -0,0 +1,942 @@ +/* + * file related system call shims and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef BSD_FILE_H +#define BSD_FILE_H + +#include "qemu/path.h" + +#define LOCK_PATH(p, arg) \ +do { \ + (p) = lock_user_string(arg); \ + if ((p) == NULL) { \ + return -TARGET_EFAULT; \ + } \ +} while (0) + +#define UNLOCK_PATH(p, arg) unlock_user(p, arg, 0) + +#define LOCK_PATH2(p1, arg1, p2, arg2) \ +do { \ + (p1) = lock_user_string(arg1); \ + if ((p1) == NULL) { \ + return -TARGET_EFAULT; \ + } \ + (p2) = lock_user_string(arg2); \ + if ((p2) == NULL) { \ + unlock_user(p1, arg1, 0); \ + return -TARGET_EFAULT; \ + } \ +} while (0) + +#define UNLOCK_PATH2(p1, arg1, p2, arg2) \ +do { \ + unlock_user(p2, arg2, 0); \ + unlock_user(p1, arg1, 0); \ +} while (0) + +extern struct iovec *lock_iovec(int type, abi_ulong target_addr, int count, + int copy); +extern void unlock_iovec(struct iovec *vec, abi_ulong target_addr, int count, + int copy); + +int safe_open(const char *path, int flags, mode_t mode); +int safe_openat(int fd, const char *path, int flags, mode_t mode); + +ssize_t safe_read(int fd, void *buf, size_t nbytes); +ssize_t safe_pread(int fd, void *buf, size_t nbytes, off_t offset); +ssize_t safe_readv(int fd, const struct iovec *iov, int iovcnt); +ssize_t safe_preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset); + +ssize_t safe_write(int fd, void *buf, size_t nbytes); +ssize_t safe_pwrite(int fd, void *buf, size_t nbytes, off_t offset); +ssize_t safe_writev(int fd, const struct iovec *iov, int iovcnt); +ssize_t safe_pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset); + +/* read(2) */ +static abi_long do_bsd_read(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + void *p; + + p = lock_user(VERIFY_WRITE, arg2, arg3, 0); + if (p == NULL) { + return -TARGET_EFAULT; + } + ret = get_errno(safe_read(arg1, p, arg3)); + unlock_user(p, arg2, ret); + + return ret; +} + +/* pread(2) */ +static abi_long do_bsd_pread(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) +{ + abi_long ret; + void *p; + + p = lock_user(VERIFY_WRITE, arg2, arg3, 0); + if (p == NULL) { + return -TARGET_EFAULT; + } + if (regpairs_aligned(cpu_env) != 0) { + arg4 = arg5; + arg5 = arg6; + } + ret = get_errno(safe_pread(arg1, p, arg3, target_arg64(arg4, arg5))); + unlock_user(p, arg2, ret); + + return ret; +} + +/* readv(2) */ +static abi_long do_bsd_readv(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); + + if (vec != NULL) { + ret = get_errno(safe_readv(arg1, vec, arg3)); + unlock_iovec(vec, arg2, arg3, 1); + } else { + ret = -host_to_target_errno(errno); + } + + return ret; +} + +/* preadv(2) */ +static abi_long do_bsd_preadv(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) +{ + abi_long ret; + struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 1); + + if (vec != NULL) { + if (regpairs_aligned(cpu_env) != 0) { + arg4 = arg5; + arg5 = arg6; + } + ret = get_errno(safe_preadv(arg1, vec, arg3, target_arg64(arg4, arg5))); + unlock_iovec(vec, arg2, arg3, 0); + } else { + ret = -host_to_target_errno(errno); + } + + return ret; +} + +/* write(2) */ +static abi_long do_bsd_write(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long nbytes, ret; + void *p; + + /* nbytes < 0 implies that it was larger than SIZE_MAX. */ + nbytes = arg3; + if (nbytes < 0) { + return -TARGET_EINVAL; + } + p = lock_user(VERIFY_READ, arg2, nbytes, 1); + if (p == NULL) { + return -TARGET_EFAULT; + } + ret = get_errno(safe_write(arg1, p, arg3)); + unlock_user(p, arg2, 0); + + return ret; +} + +/* pwrite(2) */ +static abi_long do_bsd_pwrite(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) +{ + abi_long ret; + void *p; + + p = lock_user(VERIFY_READ, arg2, arg3, 1); + if (p == NULL) { + return -TARGET_EFAULT; + } + if (regpairs_aligned(cpu_env) != 0) { + arg4 = arg5; + arg5 = arg6; + } + ret = get_errno(safe_pwrite(arg1, p, arg3, target_arg64(arg4, arg5))); + unlock_user(p, arg2, 0); + + return ret; +} + +/* writev(2) */ +static abi_long do_bsd_writev(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); + + if (vec != NULL) { + ret = get_errno(safe_writev(arg1, vec, arg3)); + unlock_iovec(vec, arg2, arg3, 0); + } else { + ret = -host_to_target_errno(errno); + } + + return ret; +} + +/* pwritev(2) */ +static abi_long do_bsd_pwritev(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) +{ + abi_long ret; + struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); + + if (vec != NULL) { + if (regpairs_aligned(cpu_env) != 0) { + arg4 = arg5; + arg5 = arg6; + } + ret = get_errno(safe_pwritev(arg1, vec, arg3, target_arg64(arg4, arg5))); + unlock_iovec(vec, arg2, arg3, 0); + } else { + ret = -host_to_target_errno(errno); + } + + return ret; +} + +/* open(2) */ +static abi_long do_bsd_open(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(safe_open(path(p), target_to_host_bitmask(arg2, + fcntl_flags_tbl), arg3)); + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* openat(2) */ +static abi_long do_bsd_openat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(safe_openat(arg1, path(p), + target_to_host_bitmask(arg3, fcntl_flags_tbl), arg4)); + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* close(2) */ +static abi_long do_bsd_close(abi_long arg1) +{ + return get_errno(close(arg1)); +} + +/* fdatasync(2) */ +static abi_long do_bsd_fdatasync(abi_long arg1) +{ + return get_errno(fdatasync(arg1)); +} + +/* fsync(2) */ +static abi_long do_bsd_fsync(abi_long arg1) +{ + return get_errno(fsync(arg1)); +} + +/* closefrom(2) */ +static abi_long do_bsd_closefrom(abi_long arg1) +{ + closefrom(arg1); /* returns void */ + return get_errno(0); +} + +/* revoke(2) */ +static abi_long do_bsd_revoke(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(revoke(p)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* access(2) */ +static abi_long do_bsd_access(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(access(path(p), arg2)); + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* eaccess(2) */ +static abi_long do_bsd_eaccess(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(eaccess(path(p), arg2)); + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* faccessat(2) */ +static abi_long do_bsd_faccessat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(faccessat(arg1, p, arg3, arg4)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* chdir(2) */ +static abi_long do_bsd_chdir(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(chdir(p)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchdir(2) */ +static abi_long do_bsd_fchdir(abi_long arg1) +{ + return get_errno(fchdir(arg1)); +} + +/* rename(2) */ +static abi_long do_bsd_rename(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg1, p2, arg2); + ret = get_errno(rename(p1, p2)); /* XXX path(p1), path(p2) */ + UNLOCK_PATH2(p1, arg1, p2, arg2); + + return ret; +} + +/* renameat(2) */ +static abi_long do_bsd_renameat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg2, p2, arg4); + ret = get_errno(renameat(arg1, p1, arg3, p2)); + UNLOCK_PATH2(p1, arg2, p2, arg4); + + return ret; +} + +/* link(2) */ +static abi_long do_bsd_link(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg1, p2, arg2); + ret = get_errno(link(p1, p2)); /* XXX path(p1), path(p2) */ + UNLOCK_PATH2(p1, arg1, p2, arg2); + + return ret; +} + +/* linkat(2) */ +static abi_long do_bsd_linkat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg2, p2, arg4); + ret = get_errno(linkat(arg1, p1, arg3, p2, arg5)); + UNLOCK_PATH2(p1, arg2, p2, arg4); + + return ret; +} + +/* unlink(2) */ +static abi_long do_bsd_unlink(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(unlink(p)); /* XXX path(p) */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* unlinkat(2) */ +static abi_long do_bsd_unlinkat(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(unlinkat(arg1, p, arg3)); /* XXX path(p) */ + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* mkdir(2) */ +static abi_long do_bsd_mkdir(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(mkdir(p, arg2)); /* XXX path(p) */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* mkdirat(2) */ +static abi_long do_bsd_mkdirat(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(mkdirat(arg1, p, arg3)); + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* rmdir(2) */ +static abi_long do_bsd_rmdir(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(rmdir(p)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* undocumented __getcwd(char *buf, size_t len) system call */ +static abi_long do_bsd___getcwd(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + p = lock_user(VERIFY_WRITE, arg1, arg2, 0); + if (p == NULL) { + return -TARGET_EFAULT; + } + ret = safe_syscall(SYS___getcwd, p, arg2); + unlock_user(p, arg1, ret == 0 ? strlen(p) + 1 : 0); + + return get_errno(ret); +} + +/* dup(2) */ +static abi_long do_bsd_dup(abi_long arg1) +{ + return get_errno(dup(arg1)); +} + +/* dup2(2) */ +static abi_long do_bsd_dup2(abi_long arg1, abi_long arg2) +{ + return get_errno(dup2(arg1, arg2)); +} + +/* truncate(2) */ +static abi_long do_bsd_truncate(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + if (regpairs_aligned(cpu_env) != 0) { + arg2 = arg3; + arg3 = arg4; + } + ret = get_errno(truncate(p, target_arg64(arg2, arg3))); + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* ftruncate(2) */ +static abi_long do_bsd_ftruncate(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4) +{ + if (regpairs_aligned(cpu_env) != 0) { + arg2 = arg3; + arg3 = arg4; + } + return get_errno(ftruncate(arg1, target_arg64(arg2, arg3))); +} + +/* acct(2) */ +static abi_long do_bsd_acct(abi_long arg1) +{ + abi_long ret; + void *p; + + if (arg1 == 0) { + ret = get_errno(acct(NULL)); + } else { + LOCK_PATH(p, arg1); + ret = get_errno(acct(path(p))); + UNLOCK_PATH(p, arg1); + } + return ret; +} + +/* sync(2) */ +static abi_long do_bsd_sync(void) +{ + sync(); + return 0; +} + +/* mount(2) */ +static abi_long do_bsd_mount(abi_long arg1, abi_long arg2, abi_long arg3, + abi_long arg4) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg1, p2, arg2); + /* + * XXX arg4 should be locked, but it isn't clear how to do that since it may + * be not be a NULL-terminated string. + */ + if (arg4 == 0) { + ret = get_errno(mount(p1, p2, arg3, NULL)); /* XXX path(p2)? */ + } else { + ret = get_errno(mount(p1, p2, arg3, g2h_untagged(arg4))); /* XXX path(p2)? */ + } + UNLOCK_PATH2(p1, arg1, p2, arg2); + + return ret; +} + +/* unmount(2) */ +static abi_long do_bsd_unmount(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(unmount(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* nmount(2) */ +static abi_long do_bsd_nmount(abi_long arg1, abi_long count, + abi_long flags) +{ + abi_long ret; + struct iovec *vec = lock_iovec(VERIFY_READ, arg1, count, 1); + + if (vec != NULL) { + ret = get_errno(nmount(vec, count, flags)); + unlock_iovec(vec, arg1, count, 0); + } else { + return -TARGET_EFAULT; + } + + return ret; +} + +/* symlink(2) */ +static abi_long do_bsd_symlink(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg1, p2, arg2); + ret = get_errno(symlink(p1, p2)); /* XXX path(p1), path(p2) */ + UNLOCK_PATH2(p1, arg1, p2, arg2); + + return ret; +} + +/* symlinkat(2) */ +static abi_long do_bsd_symlinkat(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH2(p1, arg1, p2, arg3); + ret = get_errno(symlinkat(p1, arg2, p2)); /* XXX path(p1), path(p2) */ + UNLOCK_PATH2(p1, arg1, p2, arg3); + + return ret; +} + +/* readlink(2) */ +static abi_long do_bsd_readlink(CPUArchState *env, abi_long arg1, + abi_long arg2, abi_long arg3) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH(p1, arg1); + p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); + if (p2 == NULL) { + UNLOCK_PATH(p1, arg1); + return -TARGET_EFAULT; + } + if (strcmp(p1, "/proc/curproc/file") == 0) { + CPUState *cpu = env_cpu(env); + TaskState *ts = (TaskState *)cpu->opaque; + strncpy(p2, ts->bprm->fullpath, arg3); + ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3); + } else { + ret = get_errno(readlink(path(p1), p2, arg3)); + } + unlock_user(p2, arg2, ret); + UNLOCK_PATH(p1, arg1); + + return ret; +} + +/* readlinkat(2) */ +static abi_long do_bsd_readlinkat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p1, *p2; + + LOCK_PATH(p1, arg2); + p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); + if (p2 == NULL) { + UNLOCK_PATH(p1, arg2); + return -TARGET_EFAULT; + } + ret = get_errno(readlinkat(arg1, p1, p2, arg4)); + unlock_user(p2, arg3, ret); + UNLOCK_PATH(p1, arg2); + + return ret; +} + +/* chmod(2) */ +static abi_long do_bsd_chmod(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(chmod(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchmod(2) */ +static abi_long do_bsd_fchmod(abi_long arg1, abi_long arg2) +{ + return get_errno(fchmod(arg1, arg2)); +} + +/* lchmod(2) */ +static abi_long do_bsd_lchmod(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(lchmod(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchmodat(2) */ +static abi_long do_bsd_fchmodat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(fchmodat(arg1, p, arg3, arg4)); + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* pre-ino64 mknod(2) */ +static abi_long do_bsd_freebsd11_mknod(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(syscall(SYS_freebsd11_mknod, p, arg2, arg3)); + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* pre-ino64 mknodat(2) */ +static abi_long do_bsd_freebsd11_mknodat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(syscall(SYS_freebsd11_mknodat, arg1, p, arg3, arg4)); + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* post-ino64 mknodat(2) */ +static abi_long do_bsd_mknodat(void *cpu_env, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, + abi_long arg6) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + /* 32-bit arch's use two 32 registers for 64 bit return value */ + if (regpairs_aligned(cpu_env) != 0) { + ret = get_errno(mknodat(arg1, p, arg3, target_arg64(arg5, arg6))); + } else { + ret = get_errno(mknodat(arg1, p, arg3, target_arg64(arg4, arg5))); + } + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* chown(2) */ +static abi_long do_bsd_chown(abi_long arg1, abi_long arg2, abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(chown(p, arg2, arg3)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchown(2) */ +static abi_long do_bsd_fchown(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + return get_errno(fchown(arg1, arg2, arg3)); +} + +/* lchown(2) */ +static abi_long do_bsd_lchown(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(lchown(p, arg2, arg3)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchownat(2) */ +static abi_long do_bsd_fchownat(abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(fchownat(arg1, p, arg3, arg4, arg5)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* chflags(2) */ +static abi_long do_bsd_chflags(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(chflags(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* lchflags(2) */ +static abi_long do_bsd_lchflags(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(lchflags(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fchflags(2) */ +static abi_long do_bsd_fchflags(abi_long arg1, abi_long arg2) +{ + return get_errno(fchflags(arg1, arg2)); +} + +/* chroot(2) */ +static abi_long do_bsd_chroot(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(chroot(p)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* flock(2) */ +static abi_long do_bsd_flock(abi_long arg1, abi_long arg2) +{ + return get_errno(flock(arg1, arg2)); +} + +/* mkfifo(2) */ +static abi_long do_bsd_mkfifo(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(mkfifo(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* mkfifoat(2) */ +static abi_long do_bsd_mkfifoat(abi_long arg1, abi_long arg2, + abi_long arg3) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg2); + ret = get_errno(mkfifoat(arg1, p, arg3)); + UNLOCK_PATH(p, arg2); + + return ret; +} + +/* pathconf(2) */ +static abi_long do_bsd_pathconf(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(pathconf(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* lpathconf(2) */ +static abi_long do_bsd_lpathconf(abi_long arg1, abi_long arg2) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(lpathconf(p, arg2)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +/* fpathconf(2) */ +static abi_long do_bsd_fpathconf(abi_long arg1, abi_long arg2) +{ + return get_errno(fpathconf(arg1, arg2)); +} + +/* undelete(2) */ +static abi_long do_bsd_undelete(abi_long arg1) +{ + abi_long ret; + void *p; + + LOCK_PATH(p, arg1); + ret = get_errno(undelete(p)); /* XXX path(p)? */ + UNLOCK_PATH(p, arg1); + + return ret; +} + +#endif /* BSD_FILE_H */ diff --git a/bsd-user/bsd-mman.h b/bsd-user/bsd-mman.h deleted file mode 100644 index 910e8c1921..0000000000 --- a/bsd-user/bsd-mman.h +++ /dev/null @@ -1,121 +0,0 @@ -/*- - * Copyright (c) 1982, 1986, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mman.h 8.2 (Berkeley) 1/9/95 - * $FreeBSD: src/sys/sys/mman.h,v 1.42 2008/03/28 04:29:27 ps Exp $ - */ - -#define TARGET_FREEBSD_MAP_RESERVED0080 0x0080 /* previously misimplemented MAP_INHERIT */ -#define TARGET_FREEBSD_MAP_RESERVED0100 0x0100 /* previously unimplemented MAP_NOEXTEND */ -#define TARGET_FREEBSD_MAP_STACK 0x0400 /* region grows down, like a stack */ -#define TARGET_FREEBSD_MAP_NOSYNC 0x0800 /* page to but do not sync underlying file */ - -#define TARGET_FREEBSD_MAP_FLAGMASK 0x1ff7 - -/* $NetBSD: mman.h,v 1.42 2008/11/18 22:13:49 ad Exp $ */ - -/*- - * Copyright (c) 1982, 1986, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mman.h 8.2 (Berkeley) 1/9/95 - */ -#define TARGET_NETBSD_MAP_INHERIT 0x0080 /* region is retained after exec */ -#define TARGET_NETBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, even within break */ -#define TARGET_NETBSD_MAP_WIRED 0x0800 /* mlock() mapping when it is established */ - -#define TARGET_NETBSD_MAP_STACK 0x2000 /* allocated from memory, swap space (stack) */ - -#define TARGET_NETBSD_MAP_FLAGMASK 0x3ff7 - -/* $OpenBSD: mman.h,v 1.18 2003/07/21 22:52:19 tedu Exp $ */ -/* $NetBSD: mman.h,v 1.11 1995/03/26 20:24:23 jtc Exp $ */ - -/*- - * Copyright (c) 1982, 1986, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mman.h 8.1 (Berkeley) 6/2/93 - */ - -#define TARGET_OPENBSD_MAP_INHERIT 0x0080 /* region is retained after exec */ -#define TARGET_OPENBSD_MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */ -#define TARGET_OPENBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, even within heap */ - -#define TARGET_OPENBSD_MAP_FLAGMASK 0x17f7 - -// XXX -#define TARGET_BSD_MAP_FLAGMASK 0x3ff7 diff --git a/bsd-user/bsd-proc.h b/bsd-user/bsd-proc.h new file mode 100644 index 0000000000..68b66e571d --- /dev/null +++ b/bsd-user/bsd-proc.h @@ -0,0 +1,42 @@ +/* + * process related system call shims and definitions + * + * Copyright (c) 2013-2014 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef BSD_PROC_H_ +#define BSD_PROC_H_ + +#include +#include +#include +#include +#include + +/* exit(2) */ +static inline abi_long do_bsd_exit(void *cpu_env, abi_long arg1) +{ +#ifdef TARGET_GPROF + _mcleanup(); +#endif + gdb_exit(arg1); + qemu_plugin_user_exit(); + _exit(arg1); + + return 0; +} + +#endif /* !BSD_PROC_H_ */ diff --git a/bsd-user/bsdload.c b/bsd-user/bsdload.c index 8d83f21eda..5b3c061a45 100644 --- a/bsd-user/bsdload.c +++ b/bsd-user/bsdload.c @@ -1,11 +1,24 @@ -/* Code for loading BSD executables. Mostly linux kernel code. */ +/* + * Load BSD executables. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ #include "qemu/osdep.h" #include "qemu.h" -#define TARGET_NGROUPS 32 - /* ??? This should really be somewhere else. */ abi_long memcpy_to_target(abi_ulong dest, const void *src, unsigned long len) @@ -83,7 +96,7 @@ static int prepare_binprm(struct bsd_binprm *bprm) /* Construct the envp and argv tables on the target stack. */ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, - abi_ulong stringp, int push_ptr) + abi_ulong stringp) { int n = sizeof(abi_ulong); abi_ulong envp; @@ -93,13 +106,6 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, envp = sp; sp -= (argc + 1) * n; argv = sp; - if (push_ptr) { - /* FIXME - handle put_user() failures */ - sp -= n; - put_user_ual(envp, sp); - sp -= n; - put_user_ual(argv, sp); - } sp -= n; /* FIXME - handle put_user() failures */ put_user_ual(argc, sp); @@ -124,36 +130,70 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, return sp; } -int loader_exec(const char *filename, char **argv, char **envp, - struct target_pt_regs *regs, struct image_info *infop) +static bool is_there(const char *candidate) { - struct bsd_binprm bprm; - int retval; - int i; + struct stat fin; - bprm.p = TARGET_PAGE_SIZE * MAX_ARG_PAGES - sizeof(unsigned int); - for (i = 0 ; i < MAX_ARG_PAGES ; i++) { /* clear page-table */ - bprm.page[i] = NULL; + /* XXX work around access(2) false positives for superuser */ + if (access(candidate, X_OK) == 0 && stat(candidate, &fin) == 0 && + S_ISREG(fin.st_mode) && (getuid() != 0 || + (fin.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0)) { + return true; } - retval = open(filename, O_RDONLY); + + return false; +} + +int loader_exec(const char *filename, char **argv, char **envp, + struct target_pt_regs *regs, struct image_info *infop, + struct bsd_binprm *bprm) +{ + char *path, fullpath[PATH_MAX]; + int retval, i; + + bprm->p = TARGET_PAGE_SIZE * MAX_ARG_PAGES; + for (i = 0; i < MAX_ARG_PAGES; i++) { /* clear page-table */ + bprm->page[i] = NULL; + } + + if (strchr(filename, '/') != NULL) { + path = realpath(filename, fullpath); + if (path == NULL) { + /* Failed to resolve. */ + return -1; + } + if (!is_there(path)) { + return -1; + } + } else { + path = g_find_program_in_path(filename); + if (path == NULL) { + return -1; + } + } + + retval = open(path, O_RDONLY); if (retval < 0) { + g_free(path); return retval; } - bprm.fd = retval; - bprm.filename = (char *)filename; - bprm.argc = count(argv); - bprm.argv = argv; - bprm.envc = count(envp); - bprm.envp = envp; - retval = prepare_binprm(&bprm); + bprm->fullpath = path; + bprm->fd = retval; + bprm->filename = (char *)filename; + bprm->argc = count(argv); + bprm->argv = argv; + bprm->envc = count(envp); + bprm->envp = envp; + + retval = prepare_binprm(bprm); if (retval >= 0) { - if (bprm.buf[0] == 0x7f - && bprm.buf[1] == 'E' - && bprm.buf[2] == 'L' - && bprm.buf[3] == 'F') { - retval = load_elf_binary(&bprm, regs, infop); + if (bprm->buf[0] == 0x7f + && bprm->buf[1] == 'E' + && bprm->buf[2] == 'L' + && bprm->buf[3] == 'F') { + retval = load_elf_binary(bprm, regs, infop); } else { fprintf(stderr, "Unknown binary format\n"); return -1; @@ -168,7 +208,7 @@ int loader_exec(const char *filename, char **argv, char **envp, /* Something went wrong, return the inode and free the argument pages*/ for (i = 0 ; i < MAX_ARG_PAGES ; i++) { - g_free(bprm.page[i]); + g_free(bprm->page[i]); } return retval; } diff --git a/bsd-user/elfcore.c b/bsd-user/elfcore.c new file mode 100644 index 0000000000..c49d9280e2 --- /dev/null +++ b/bsd-user/elfcore.c @@ -0,0 +1,10 @@ +/* Stubbed out version of core dump support, explicitly in public domain */ + +static int elf_core_dump(int signr, CPUArchState *env) +{ + struct elf_note en = { 0 }; + + bswap_note(&en); + + return 0; +} diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c index 6edceb3ea6..f8edb22f2a 100644 --- a/bsd-user/elfload.c +++ b/bsd-user/elfload.c @@ -1,4 +1,21 @@ -/* This is the Linux kernel elf-loading code, ported into user space */ +/* + * ELF loading code + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ #include "qemu/osdep.h" @@ -6,558 +23,31 @@ #include "disas/disas.h" #include "qemu/path.h" -#ifdef _ARCH_PPC64 -#undef ARCH_DLINFO -#undef ELF_PLATFORM -#undef ELF_HWCAP -#undef ELF_CLASS -#undef ELF_DATA -#undef ELF_ARCH -#endif +static abi_ulong target_auxents; /* Where the AUX entries are in target */ +static size_t target_auxents_sz; /* Size of AUX entries including AT_NULL */ -/* from personality.h */ +#include "target_arch_reg.h" +#include "target_os_elf.h" +#include "target_os_stack.h" +#include "target_os_thread.h" +#include "target_os_user.h" -/* - * Flags for bug emulation. - * - * These occupy the top three bytes. - */ -enum { - ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ - FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors - * (signal handling) - */ - MMAP_PAGE_ZERO = 0x0100000, - ADDR_COMPAT_LAYOUT = 0x0200000, - READ_IMPLIES_EXEC = 0x0400000, - ADDR_LIMIT_32BIT = 0x0800000, - SHORT_INODE = 0x1000000, - WHOLE_SECONDS = 0x2000000, - STICKY_TIMEOUTS = 0x4000000, - ADDR_LIMIT_3GB = 0x8000000, -}; +abi_ulong target_stksiz; +abi_ulong target_stkbas; -/* - * Personality types. - * - * These go in the low byte. Avoid using the top bit, it will - * conflict with error returns. - */ -enum { - PER_LINUX = 0x0000, - PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, - PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, - PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, - PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, - PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | - WHOLE_SECONDS | SHORT_INODE, - PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, - PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, - PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, - PER_BSD = 0x0006, - PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, - PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, - PER_LINUX32 = 0x0008, - PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, - PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ - PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ - PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ - PER_RISCOS = 0x000c, - PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, - PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, - PER_OSF4 = 0x000f, /* OSF/1 v4 */ - PER_HPUX = 0x0010, - PER_MASK = 0x00ff, -}; - -/* - * Return the base personality without flags. - */ -#define personality(pers) (pers & PER_MASK) - -/* this flag is uneffective under linux too, should be deleted */ -#ifndef MAP_DENYWRITE -#define MAP_DENYWRITE 0 -#endif - -/* should probably go in elf.h */ -#ifndef ELIBBAD -#define ELIBBAD 80 -#endif - -#ifdef TARGET_I386 - -#define ELF_PLATFORM get_elf_platform() - -static const char *get_elf_platform(void) -{ - static char elf_platform[] = "i386"; - int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); - if (family > 6) - family = 6; - if (family >= 3) - elf_platform[1] = '0' + family; - return elf_platform; -} - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ - X86CPU *cpu = X86_CPU(thread_cpu); - - return cpu->env.features[FEAT_1_EDX]; -} - -#ifdef TARGET_X86_64 -#define ELF_START_MMAP 0x2aaaaab000ULL -#define elf_check_arch(x) (((x) == ELF_ARCH)) - -#define ELF_CLASS ELFCLASS64 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_X86_64 - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->rax = 0; - regs->rsp = infop->start_stack; - regs->rip = infop->entry; - if (bsd_type == target_freebsd) { - regs->rdi = infop->start_stack; - } -} - -#else - -#define ELF_START_MMAP 0x80000000 - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) (((x) == EM_386) || ((x) == EM_486)) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_386 - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->esp = infop->start_stack; - regs->eip = infop->entry; - - /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program - starts %edx contains a pointer to a function which might be - registered using `atexit'. This provides a mean for the - dynamic linker to call DT_FINI functions for shared libraries - that have been loaded before the code runs. - - A value of 0 tells we have no such handler. */ - regs->edx = 0; -} -#endif - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif - -#ifdef TARGET_ARM - -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_ARM) - -#define ELF_CLASS ELFCLASS32 -#ifdef TARGET_WORDS_BIGENDIAN -#define ELF_DATA ELFDATA2MSB -#else -#define ELF_DATA ELFDATA2LSB -#endif -#define ELF_ARCH EM_ARM - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - abi_long stack = infop->start_stack; - memset(regs, 0, sizeof(*regs)); - regs->ARM_cpsr = 0x10; - if (infop->entry & 1) - regs->ARM_cpsr |= CPSR_T; - regs->ARM_pc = infop->entry & 0xfffffffe; - regs->ARM_sp = infop->start_stack; - /* FIXME - what to for failure of get_user()? */ - get_user_ual(regs->ARM_r2, stack + 8); /* envp */ - get_user_ual(regs->ARM_r1, stack + 4); /* envp */ - /* XXX: it seems that r0 is zeroed after ! */ - regs->ARM_r0 = 0; - /* For uClinux PIC binaries. */ - /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ - regs->ARM_r10 = infop->start_data; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -enum -{ - ARM_HWCAP_ARM_SWP = 1 << 0, - ARM_HWCAP_ARM_HALF = 1 << 1, - ARM_HWCAP_ARM_THUMB = 1 << 2, - ARM_HWCAP_ARM_26BIT = 1 << 3, - ARM_HWCAP_ARM_FAST_MULT = 1 << 4, - ARM_HWCAP_ARM_FPA = 1 << 5, - ARM_HWCAP_ARM_VFP = 1 << 6, - ARM_HWCAP_ARM_EDSP = 1 << 7, -}; - -#define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \ - | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \ - | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP) - -#endif - -#ifdef TARGET_SPARC -#ifdef TARGET_SPARC64 - -#define ELF_START_MMAP 0x80000000 - -#ifndef TARGET_ABI32 -#define elf_check_arch(x) ((x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS) -#else -#define elf_check_arch(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC) -#endif - -#define ELF_CLASS ELFCLASS64 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_SPARCV9 - -#define STACK_BIAS 2047 - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ -#ifndef TARGET_ABI32 - regs->tstate = 0; -#endif - regs->pc = infop->entry; - regs->npc = regs->pc + 4; - regs->y = 0; -#ifdef TARGET_ABI32 - regs->u_regs[14] = infop->start_stack - 16 * 4; -#else - if (personality(infop->personality) == PER_LINUX32) - regs->u_regs[14] = infop->start_stack - 16 * 4; - else { - regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; - if (bsd_type == target_freebsd) { - regs->u_regs[8] = infop->start_stack; - regs->u_regs[11] = infop->start_stack; - } - } -#endif -} - -#else -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_SPARC) - -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_SPARC - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->psr = 0; - regs->pc = infop->entry; - regs->npc = regs->pc + 4; - regs->y = 0; - regs->u_regs[14] = infop->start_stack - 16 * 4; -} - -#endif -#endif - -#ifdef TARGET_PPC - -#define ELF_START_MMAP 0x80000000 - -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) - -#define elf_check_arch(x) ((x) == EM_PPC64) - -#define ELF_CLASS ELFCLASS64 - -#else - -#define elf_check_arch(x) ((x) == EM_PPC) - -#define ELF_CLASS ELFCLASS32 - -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -#define ELF_DATA ELFDATA2MSB -#else -#define ELF_DATA ELFDATA2LSB -#endif -#define ELF_ARCH EM_PPC - -/* - * We need to put in some extra aux table entries to tell glibc what - * the cache block size is, so it can use the dcbz instruction safely. - */ -#define AT_DCACHEBSIZE 19 -#define AT_ICACHEBSIZE 20 -#define AT_UCACHEBSIZE 21 -/* A special ignored type value for PPC, for glibc compatibility. */ -#define AT_IGNOREPPC 22 -/* - * The requirements here are: - * - keep the final alignment of sp (sp & 0xf) - * - make sure the 32-bit value at the first 16 byte aligned position of - * AUXV is greater than 16 for glibc compatibility. - * AT_IGNOREPPC is used for that. - * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, - * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. - */ -#define DLINFO_ARCH_ITEMS 5 -#define ARCH_DLINFO \ -do { \ - NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \ - NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \ - NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ - /* \ - * Now handle glibc compatibility. \ - */ \ - NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ - NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ - } while (0) - -static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) -{ - abi_ulong pos = infop->start_stack; - abi_ulong tmp; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) - abi_ulong entry, toc; -#endif - - _regs->gpr[1] = infop->start_stack; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) - get_user_u64(entry, infop->entry); - entry += infop->load_addr; - get_user_u64(toc, infop->entry + 8); - toc += infop->load_addr; - _regs->gpr[2] = toc; - infop->entry = entry; -#endif - _regs->nip = infop->entry; - /* Note that isn't exactly what regular kernel does - * but this is what the ABI wants and is needed to allow - * execution of PPC BSD programs. - */ - /* FIXME - what to for failure of get_user()? */ - get_user_ual(_regs->gpr[3], pos); - pos += sizeof(abi_ulong); - _regs->gpr[4] = pos; - for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) { - get_user_ual(tmp, pos); - } - _regs->gpr[5] = pos; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif - -#ifdef TARGET_MIPS - -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_MIPS) - -#ifdef TARGET_MIPS64 -#define ELF_CLASS ELFCLASS64 -#else -#define ELF_CLASS ELFCLASS32 -#endif -#ifdef TARGET_WORDS_BIGENDIAN -#define ELF_DATA ELFDATA2MSB -#else -#define ELF_DATA ELFDATA2LSB -#endif -#define ELF_ARCH EM_MIPS - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->cp0_status = 2 << CP0St_KSU; - regs->cp0_epc = infop->entry; - regs->regs[29] = infop->start_stack; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif /* TARGET_MIPS */ - -#ifdef TARGET_SH4 - -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_SH) - -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_SH - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - /* Check other registers XXXXX */ - regs->pc = infop->entry; - regs->regs[15] = infop->start_stack; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif - -#ifdef TARGET_CRIS - -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_CRIS) - -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_CRIS - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->erp = infop->entry; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - -#endif - -#ifdef TARGET_M68K - -#define ELF_START_MMAP 0x80000000 - -#define elf_check_arch(x) ((x) == EM_68K) - -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_68K - -/* ??? Does this need to do anything? -#define ELF_PLAT_INIT(_r) */ - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->usp = infop->start_stack; - regs->sr = 0; - regs->pc = infop->entry; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - -#endif - -#ifdef TARGET_ALPHA - -#define ELF_START_MMAP (0x30000000000ULL) - -#define elf_check_arch(x) ((x) == ELF_ARCH) - -#define ELF_CLASS ELFCLASS64 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_ALPHA - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->pc = infop->entry; - regs->ps = 8; - regs->usp = infop->start_stack; - regs->unique = infop->start_data; /* ? */ - printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", - regs->unique, infop->start_data); -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - -#endif /* TARGET_ALPHA */ - -#ifndef ELF_PLATFORM -#define ELF_PLATFORM (NULL) -#endif - -#ifndef ELF_HWCAP -#define ELF_HWCAP 0 -#endif - -#ifdef TARGET_ABI32 -#undef ELF_CLASS -#define ELF_CLASS ELFCLASS32 -#undef bswaptls -#define bswaptls(ptr) bswap32s(ptr) -#endif - -#include "elf.h" - -struct exec -{ - unsigned int a_info; /* Use macros N_MAGIC, etc for access */ - unsigned int a_text; /* length of text, in bytes */ - unsigned int a_data; /* length of data, in bytes */ - unsigned int a_bss; /* length of uninitialized data area, in bytes */ - unsigned int a_syms; /* length of symbol table data in file, in bytes */ - unsigned int a_entry; /* start address */ - unsigned int a_trsize; /* length of relocation info for text, in bytes */ - unsigned int a_drsize; /* length of relocation info for data, in bytes */ -}; - - -#define N_MAGIC(exec) ((exec).a_info & 0xffff) -#define OMAGIC 0407 -#define NMAGIC 0410 -#define ZMAGIC 0413 -#define QMAGIC 0314 - -/* max code+data+bss space allocated to elf interpreter */ -#define INTERP_MAP_SIZE (32 * 1024 * 1024) - -/* max code+data+bss+brk space allocated to ET_DYN executables */ -#define ET_DYN_MAP_SIZE (128 * 1024 * 1024) - -/* Necessary parameters */ -#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE -#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1)) -#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1)) - -#define INTERPRETER_NONE 0 -#define INTERPRETER_AOUT 1 -#define INTERPRETER_ELF 2 - -#define DLINFO_ITEMS 12 +static int elf_core_dump(int signr, CPUArchState *env); +static int load_elf_sections(const struct elfhdr *hdr, struct elf_phdr *phdr, + int fd, abi_ulong rbase, abi_ulong *baddrp); static inline void memcpy_fromfs(void *to, const void *from, unsigned long n) { - memcpy(to, from, n); + memcpy(to, from, n); } -static int load_aout_interp(void *exptr, int interp_fd); - #ifdef BSWAP_NEEDED static void bswap_ehdr(struct elfhdr *ehdr) { - bswap16s(&ehdr->e_type); /* Object file type */ + bswap16s(&ehdr->e_type); /* Object file type */ bswap16s(&ehdr->e_machine); /* Architecture */ bswap32s(&ehdr->e_version); /* Object file version */ bswaptls(&ehdr->e_entry); /* Entry point virtual address */ @@ -565,37 +55,45 @@ static void bswap_ehdr(struct elfhdr *ehdr) bswaptls(&ehdr->e_shoff); /* Section header table file offset */ bswap32s(&ehdr->e_flags); /* Processor-specific flags */ bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ - bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ + bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ bswap16s(&ehdr->e_phnum); /* Program header table entry count */ - bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ + bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ bswap16s(&ehdr->e_shnum); /* Section header table entry count */ - bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ + bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ } -static void bswap_phdr(struct elf_phdr *phdr) +static void bswap_phdr(struct elf_phdr *phdr, int phnum) { - bswap32s(&phdr->p_type); /* Segment type */ - bswaptls(&phdr->p_offset); /* Segment file offset */ - bswaptls(&phdr->p_vaddr); /* Segment virtual address */ - bswaptls(&phdr->p_paddr); /* Segment physical address */ - bswaptls(&phdr->p_filesz); /* Segment size in file */ - bswaptls(&phdr->p_memsz); /* Segment size in memory */ - bswap32s(&phdr->p_flags); /* Segment flags */ - bswaptls(&phdr->p_align); /* Segment alignment */ + int i; + + for (i = 0; i < phnum; i++, phdr++) { + bswap32s(&phdr->p_type); /* Segment type */ + bswap32s(&phdr->p_flags); /* Segment flags */ + bswaptls(&phdr->p_offset); /* Segment file offset */ + bswaptls(&phdr->p_vaddr); /* Segment virtual address */ + bswaptls(&phdr->p_paddr); /* Segment physical address */ + bswaptls(&phdr->p_filesz); /* Segment size in file */ + bswaptls(&phdr->p_memsz); /* Segment size in memory */ + bswaptls(&phdr->p_align); /* Segment alignment */ + } } -static void bswap_shdr(struct elf_shdr *shdr) +static void bswap_shdr(struct elf_shdr *shdr, int shnum) { - bswap32s(&shdr->sh_name); - bswap32s(&shdr->sh_type); - bswaptls(&shdr->sh_flags); - bswaptls(&shdr->sh_addr); - bswaptls(&shdr->sh_offset); - bswaptls(&shdr->sh_size); - bswap32s(&shdr->sh_link); - bswap32s(&shdr->sh_info); - bswaptls(&shdr->sh_addralign); - bswaptls(&shdr->sh_entsize); + int i; + + for (i = 0; i < shnum; i++, shdr++) { + bswap32s(&shdr->sh_name); + bswap32s(&shdr->sh_type); + bswaptls(&shdr->sh_flags); + bswaptls(&shdr->sh_addr); + bswaptls(&shdr->sh_offset); + bswaptls(&shdr->sh_size); + bswap32s(&shdr->sh_link); + bswap32s(&shdr->sh_info); + bswaptls(&shdr->sh_addralign); + bswaptls(&shdr->sh_entsize); + } } static void bswap_sym(struct elf_sym *sym) @@ -605,7 +103,25 @@ static void bswap_sym(struct elf_sym *sym) bswaptls(&sym->st_size); bswap16s(&sym->st_shndx); } -#endif + +static void bswap_note(struct elf_note *en) +{ + bswap32s(&en->n_namesz); + bswap32s(&en->n_descsz); + bswap32s(&en->n_type); +} + +#else /* ! BSWAP_NEEDED */ + +static void bswap_ehdr(struct elfhdr *ehdr) { } +static void bswap_phdr(struct elf_phdr *phdr, int phnum) { } +static void bswap_shdr(struct elf_shdr *shdr, int shnum) { } +static void bswap_sym(struct elf_sym *sym) { } +static void bswap_note(struct elf_note *en) { } + +#endif /* ! BSWAP_NEEDED */ + +#include "elfcore.c" /* * 'copy_elf_strings()' copies argument/envelope strings from user @@ -629,10 +145,12 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page, exit(-1); } tmp1 = tmp; - while (*tmp++); + while (*tmp++) { + continue; + } len = tmp - tmp1; if (p < len) { /* this shouldn't happen - 128kB */ - return 0; + return 0; } while (len) { --p; --tmp; --len; @@ -642,14 +160,14 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page, if (!pag) { pag = g_try_malloc0(TARGET_PAGE_SIZE); page[p / TARGET_PAGE_SIZE] = pag; - if (!pag) + if (!pag) { return 0; + } } } if (len == 0 || offset == 0) { *(pag + offset) = *tmp; - } - else { + } else { int bytes_to_copy = (len > offset) ? offset : len; tmp -= bytes_to_copy; p -= bytes_to_copy; @@ -662,216 +180,124 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page, return p; } -static abi_ulong setup_arg_pages(abi_ulong p, struct bsd_binprm *bprm, - struct image_info *info) +static void setup_arg_pages(struct bsd_binprm *bprm, struct image_info *info, + abi_ulong *stackp, abi_ulong *stringp) { - abi_ulong stack_base, size, error; - int i; + abi_ulong stack_base, size; + abi_long addr; - /* Create enough stack to hold everything. If we don't use - * it for args, we'll use it for something else... + /* + * Create enough stack to hold everything. If we don't use it for args, + * we'll use it for something else... */ - size = x86_stack_size; - if (size < MAX_ARG_PAGES * TARGET_PAGE_SIZE) - size = MAX_ARG_PAGES * TARGET_PAGE_SIZE; - error = target_mmap(0, - size + qemu_host_page_size, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, - -1, 0); - if (error == -1) { + size = target_dflssiz; + stack_base = TARGET_USRSTACK - size; + addr = target_mmap(stack_base , size + qemu_host_page_size, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (addr == -1) { perror("stk mmap"); exit(-1); } /* we reserve one extra page at the top of the stack as guard */ - target_mprotect(error + size, qemu_host_page_size, PROT_NONE); + target_mprotect(addr + size, qemu_host_page_size, PROT_NONE); - stack_base = error + size - MAX_ARG_PAGES * TARGET_PAGE_SIZE; - p += stack_base; + target_stksiz = size; + target_stkbas = addr; - for (i = 0 ; i < MAX_ARG_PAGES ; i++) { - if (bprm->page[i]) { - info->rss++; - /* FIXME - check return value of memcpy_to_target() for failure */ - memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); - g_free(bprm->page[i]); - } - stack_base += TARGET_PAGE_SIZE; + if (setup_initial_stack(bprm, stackp, stringp) != 0) { + perror("stk setup"); + exit(-1); } - return p; } static void set_brk(abi_ulong start, abi_ulong end) { - /* page-align the start and end addresses... */ - start = HOST_PAGE_ALIGN(start); - end = HOST_PAGE_ALIGN(end); - if (end <= start) - return; - if (target_mmap(start, end - start, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { - perror("cannot mmap brk"); - exit(-1); - } + /* page-align the start and end addresses... */ + start = HOST_PAGE_ALIGN(start); + end = HOST_PAGE_ALIGN(end); + if (end <= start) { + return; + } + if (target_mmap(start, end - start, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { + perror("cannot mmap brk"); + exit(-1); + } } -/* We need to explicitly zero any fractional pages after the data - section (i.e. bss). This would contain the junk from the file that - should not be in memory. */ +/* + * We need to explicitly zero any fractional pages after the data + * section (i.e. bss). This would contain the junk from the file that + * should not be in memory. + */ static void padzero(abi_ulong elf_bss, abi_ulong last_bss) { - abi_ulong nbyte; + abi_ulong nbyte; - if (elf_bss >= last_bss) - return; + if (elf_bss >= last_bss) { + return; + } - /* XXX: this is really a hack : if the real host page size is - smaller than the target page size, some pages after the end - of the file may not be mapped. A better fix would be to - patch target_mmap(), but it is more complicated as the file - size must be known */ - if (qemu_real_host_page_size < qemu_host_page_size) { - abi_ulong end_addr, end_addr1; - end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss); - end_addr = HOST_PAGE_ALIGN(elf_bss); - if (end_addr1 < end_addr) { - mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0); - } + /* + * XXX: this is really a hack : if the real host page size is + * smaller than the target page size, some pages after the end + * of the file may not be mapped. A better fix would be to + * patch target_mmap(), but it is more complicated as the file + * size must be known. + */ + if (qemu_real_host_page_size() < qemu_host_page_size) { + abi_ulong end_addr, end_addr1; + end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss); + end_addr = HOST_PAGE_ALIGN(elf_bss); + if (end_addr1 < end_addr) { + mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0); } + } - nbyte = elf_bss & (qemu_host_page_size - 1); - if (nbyte) { - nbyte = qemu_host_page_size - nbyte; - do { - /* FIXME - what to do if put_user() fails? */ - put_user_u8(0, elf_bss); - elf_bss++; - } while (--nbyte); - } + nbyte = elf_bss & (qemu_host_page_size - 1); + if (nbyte) { + nbyte = qemu_host_page_size - nbyte; + do { + /* FIXME - what to do if put_user() fails? */ + put_user_u8(0, elf_bss); + elf_bss++; + } while (--nbyte); + } } - -static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, - struct elfhdr * exec, - abi_ulong load_addr, - abi_ulong load_bias, - abi_ulong interp_load_addr, int ibcs, - struct image_info *info) -{ - abi_ulong sp; - int size; - abi_ulong u_platform; - const char *k_platform; - const int n = sizeof(elf_addr_t); - - sp = p; - u_platform = 0; - k_platform = ELF_PLATFORM; - if (k_platform) { - size_t len = strlen(k_platform) + 1; - sp -= (len + n - 1) & ~(n - 1); - u_platform = sp; - /* FIXME - check return value of memcpy_to_target() for failure */ - memcpy_to_target(sp, k_platform, len); - } - /* - * Force 16 byte _final_ alignment here for generality. - */ - sp = sp & ~(abi_ulong)15; - size = (DLINFO_ITEMS + 1) * 2; - if (k_platform) - size += 2; -#ifdef DLINFO_ARCH_ITEMS - size += DLINFO_ARCH_ITEMS * 2; -#endif - size += envc + argc + 2; - size += (!ibcs ? 3 : 1); /* argc itself */ - size *= n; - if (size & 15) - sp -= 16 - (size & 15); - - /* This is correct because Linux defines - * elf_addr_t as Elf32_Off / Elf64_Off - */ -#define NEW_AUX_ENT(id, val) do { \ - sp -= n; put_user_ual(val, sp); \ - sp -= n; put_user_ual(id, sp); \ - } while (0) - - NEW_AUX_ENT(AT_NULL, 0); - - /* There must be exactly DLINFO_ITEMS entries here. */ - NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); - NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr))); - NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); - NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); - NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); - NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); - NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); - NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); - NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); - NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); - NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); - NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); - NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); - if (k_platform) - NEW_AUX_ENT(AT_PLATFORM, u_platform); -#ifdef ARCH_DLINFO - /* - * ARCH_DLINFO must come last so platform specific code can enforce - * special alignment requirements on the AUXV if necessary (eg. PPC). - */ - ARCH_DLINFO; -#endif -#undef NEW_AUX_ENT - - sp = loader_build_argptr(envc, argc, sp, p, !ibcs); - return sp; -} - - static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex, int interpreter_fd, abi_ulong *interp_load_addr) { struct elf_phdr *elf_phdata = NULL; - struct elf_phdr *eppnt; - abi_ulong load_addr = 0; - int load_addr_set = 0; + abi_ulong rbase; int retval; - abi_ulong last_bss, elf_bss; - abi_ulong error; - int i; + abi_ulong baddr, error; - elf_bss = 0; - last_bss = 0; error = 0; -#ifdef BSWAP_NEEDED bswap_ehdr(interp_elf_ex); -#endif /* First of all, some simple consistency checks */ - if ((interp_elf_ex->e_type != ET_EXEC && - interp_elf_ex->e_type != ET_DYN) || - !elf_check_arch(interp_elf_ex->e_machine)) { + if ((interp_elf_ex->e_type != ET_EXEC && interp_elf_ex->e_type != ET_DYN) || + !elf_check_arch(interp_elf_ex->e_machine)) { return ~((abi_ulong)0UL); } /* Now read in all of the header information */ - - if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) + if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) { return ~(abi_ulong)0UL; + } - elf_phdata = (struct elf_phdr *) - malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); + elf_phdata = (struct elf_phdr *) malloc(sizeof(struct elf_phdr) * + interp_elf_ex->e_phnum); - if (!elf_phdata) + if (!elf_phdata) { return ~((abi_ulong)0UL); + } /* * If the size of this structure has changed, then punt, since @@ -884,109 +310,44 @@ static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex, retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); if (retval >= 0) { - retval = read(interpreter_fd, - (char *) elf_phdata, - sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); + retval = read(interpreter_fd, (char *) elf_phdata, + sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); } if (retval < 0) { perror("load_elf_interp"); exit(-1); - free (elf_phdata); + free(elf_phdata); return retval; } -#ifdef BSWAP_NEEDED - eppnt = elf_phdata; - for (i = 0; ie_phnum; i++, eppnt++) { - bswap_phdr(eppnt); - } -#endif + bswap_phdr(elf_phdata, interp_elf_ex->e_phnum); + rbase = 0; if (interp_elf_ex->e_type == ET_DYN) { - /* in order to avoid hardcoding the interpreter load - address in qemu, we allocate a big enough memory zone */ - error = target_mmap(0, INTERP_MAP_SIZE, - PROT_NONE, MAP_PRIVATE | MAP_ANON, - -1, 0); - if (error == -1) { + /* + * In order to avoid hardcoding the interpreter load + * address in qemu, we allocate a big enough memory zone. + */ + rbase = target_mmap(0, INTERP_MAP_SIZE, PROT_NONE, + MAP_PRIVATE | MAP_ANON, -1, 0); + if (rbase == -1) { perror("mmap"); exit(-1); } - load_addr = error; - load_addr_set = 1; } - eppnt = elf_phdata; - for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) - if (eppnt->p_type == PT_LOAD) { - int elf_type = MAP_PRIVATE | MAP_DENYWRITE; - int elf_prot = 0; - abi_ulong vaddr = 0; - abi_ulong k; - - if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; - if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; - if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; - if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { - elf_type |= MAP_FIXED; - vaddr = eppnt->p_vaddr; - } - error = target_mmap(load_addr + TARGET_ELF_PAGESTART(vaddr), - eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), - elf_prot, - elf_type, - interpreter_fd, - eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); - - if (error == -1) { - /* Real error */ - close(interpreter_fd); - free(elf_phdata); - return ~((abi_ulong)0UL); - } - - if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { - load_addr = error; - load_addr_set = 1; - } - - /* - * Find the end of the file mapping for this phdr, and keep - * track of the largest address we see for this. - */ - k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; - if (k > elf_bss) elf_bss = k; - - /* - * Do the same thing for the memory mapping - between - * elf_bss and last_bss is the bss section. - */ - k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; - if (k > last_bss) last_bss = k; - } + error = load_elf_sections(interp_elf_ex, elf_phdata, interpreter_fd, rbase, + &baddr); + if (error != 0) { + perror("load_elf_sections"); + exit(-1); + } /* Now use mmap to map the library into memory. */ - close(interpreter_fd); - - /* - * Now fill out the bss section. First pad the last page up - * to the page boundary, and then perform a mmap to make sure - * that there are zeromapped pages up to and including the last - * bss page. - */ - padzero(elf_bss, last_bss); - elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ - - /* Map the last of the bss segment */ - if (last_bss > elf_bss) { - target_mmap(elf_bss, last_bss - elf_bss, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0); - } free(elf_phdata); - *interp_load_addr = load_addr; - return ((abi_ulong) interp_elf_ex->e_entry) + load_addr; + *interp_load_addr = baddr; + return ((abi_ulong) interp_elf_ex->e_entry) + rbase; } static int symfind(const void *s0, const void *s1) @@ -1010,7 +371,7 @@ static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) struct elf_sym *syms = s->disas_symtab.elf64; #endif - // binary search + /* binary search */ struct elf_sym *sym; sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); @@ -1026,9 +387,8 @@ static int symcmp(const void *s0, const void *s1) { struct elf_sym *sym0 = (struct elf_sym *)s0; struct elf_sym *sym1 = (struct elf_sym *)s1; - return (sym0->st_value < sym1->st_value) - ? -1 - : ((sym0->st_value > sym1->st_value) ? 1 : 0); + return (sym0->st_value < sym1->st_value) ? -1 : + ((sym0->st_value > sym1->st_value) ? 1 : 0); } /* Best attempt to load symbols from this ELF object. */ @@ -1042,27 +402,24 @@ static void load_symbols(struct elfhdr *hdr, int fd) lseek(fd, hdr->e_shoff, SEEK_SET); for (i = 0; i < hdr->e_shnum; i++) { - if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) + if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) { return; -#ifdef BSWAP_NEEDED - bswap_shdr(&sechdr); -#endif + } + bswap_shdr(&sechdr, 1); if (sechdr.sh_type == SHT_SYMTAB) { symtab = sechdr; - lseek(fd, hdr->e_shoff - + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); - if (read(fd, &strtab, sizeof(strtab)) - != sizeof(strtab)) + lseek(fd, hdr->e_shoff + sizeof(sechdr) * sechdr.sh_link, + SEEK_SET); + if (read(fd, &strtab, sizeof(strtab)) != sizeof(strtab)) { return; -#ifdef BSWAP_NEEDED - bswap_shdr(&strtab); -#endif + } + bswap_shdr(&strtab, 1); goto found; } } return; /* Shouldn't happen... */ - found: +found: /* Now know where the strtab and symtab are. Snarf them. */ s = malloc(sizeof(*s)); syms = malloc(symtab.sh_size); @@ -1089,10 +446,8 @@ static void load_symbols(struct elfhdr *hdr, int fd) i = 0; while (i < nsyms) { -#ifdef BSWAP_NEEDED bswap_sym(syms + i); -#endif - // Throw away entries which we do not need. + /* Throw away entries which we do not need. */ if (syms[i].st_shndx == SHN_UNDEF || syms[i].st_shndx >= SHN_LORESERVE || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { @@ -1109,10 +464,12 @@ static void load_symbols(struct elfhdr *hdr, int fd) i++; } - /* Attempt to free the storage associated with the local symbols - that we threw away. Whether or not this has any effect on the - memory allocation depends on the malloc implementation and how - many symbols we managed to discard. */ + /* + * Attempt to free the storage associated with the local symbols + * that we threw away. Whether or not this has any effect on the + * memory allocation depends on the malloc implementation and how + * many symbols we managed to discard. + */ new_syms = realloc(syms, nsyms * sizeof(*syms)); if (new_syms == NULL) { free(s); @@ -1143,54 +500,137 @@ static void load_symbols(struct elfhdr *hdr, int fd) syminfos = s; } +/* Check the elf header and see if this a target elf binary. */ +int is_target_elf_binary(int fd) +{ + uint8_t buf[128]; + struct elfhdr elf_ex; + + if (lseek(fd, 0L, SEEK_SET) < 0) { + return 0; + } + if (read(fd, buf, sizeof(buf)) < 0) { + return 0; + } + + elf_ex = *((struct elfhdr *)buf); + bswap_ehdr(&elf_ex); + + if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || + (!elf_check_arch(elf_ex.e_machine))) { + return 0; + } else { + return 1; + } +} + +static int +load_elf_sections(const struct elfhdr *hdr, struct elf_phdr *phdr, int fd, + abi_ulong rbase, abi_ulong *baddrp) +{ + struct elf_phdr *elf_ppnt; + abi_ulong baddr; + int i; + bool first; + + /* + * Now we do a little grungy work by mmaping the ELF image into + * the correct location in memory. At this point, we assume that + * the image should be loaded at fixed address, not at a variable + * address. + */ + first = true; + for (i = 0, elf_ppnt = phdr; i < hdr->e_phnum; i++, elf_ppnt++) { + int elf_prot = 0; + abi_ulong error; + + /* XXX Skip memsz == 0. */ + if (elf_ppnt->p_type != PT_LOAD) { + continue; + } + + if (elf_ppnt->p_flags & PF_R) { + elf_prot |= PROT_READ; + } + if (elf_ppnt->p_flags & PF_W) { + elf_prot |= PROT_WRITE; + } + if (elf_ppnt->p_flags & PF_X) { + elf_prot |= PROT_EXEC; + } + + error = target_mmap(TARGET_ELF_PAGESTART(rbase + elf_ppnt->p_vaddr), + (elf_ppnt->p_filesz + + TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), + elf_prot, + (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), + fd, + (elf_ppnt->p_offset - + TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); + if (error == -1) { + perror("mmap"); + exit(-1); + } else if (elf_ppnt->p_memsz != elf_ppnt->p_filesz) { + abi_ulong start_bss, end_bss; + + start_bss = rbase + elf_ppnt->p_vaddr + elf_ppnt->p_filesz; + end_bss = rbase + elf_ppnt->p_vaddr + elf_ppnt->p_memsz; + + /* + * Calling set_brk effectively mmaps the pages that we need for the + * bss and break sections. + */ + set_brk(start_bss, end_bss); + padzero(start_bss, end_bss); + } + + if (first) { + baddr = TARGET_ELF_PAGESTART(rbase + elf_ppnt->p_vaddr); + first = false; + } + } + + if (baddrp != NULL) { + *baddrp = baddr; + } + return 0; +} + int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, struct image_info *info) { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; - struct exec interp_ex; int interpreter_fd = -1; /* avoid warning */ - abi_ulong load_addr, load_bias; - int load_addr_set = 0; - unsigned int interpreter_type = INTERPRETER_NONE; - unsigned char ibcs2_interpreter; + abi_ulong load_addr; int i; - struct elf_phdr * elf_ppnt; + struct elf_phdr *elf_ppnt; struct elf_phdr *elf_phdata; - abi_ulong elf_bss, k, elf_brk; - int retval; - char * elf_interpreter; - abi_ulong elf_entry, interp_load_addr = 0; - abi_ulong start_code, end_code, start_data, end_data; + abi_ulong elf_brk; + int error, retval; + char *elf_interpreter; + abi_ulong baddr, elf_entry, et_dyn_addr, interp_load_addr = 0; abi_ulong reloc_func_desc = 0; -#ifdef LOW_ELF_STACK - abi_ulong elf_stack = ~((abi_ulong)0UL); -#endif - char passed_fileno[6]; - ibcs2_interpreter = 0; load_addr = 0; - load_bias = 0; elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ -#ifdef BSWAP_NEEDED bswap_ehdr(&elf_ex); -#endif /* First of all, some simple consistency checks */ if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || - (!elf_check_arch(elf_ex.e_machine))) { + (!elf_check_arch(elf_ex.e_machine))) { return -ENOEXEC; } bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p); - bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page,bprm->p); - bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page,bprm->p); + bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page, bprm->p); + bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page, bprm->p); if (!bprm->p) { retval = -E2BIG; } /* Now read in all of the header information */ - elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum); + elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize * elf_ex.e_phnum); if (elf_phdata == NULL) { return -ENOMEM; } @@ -1208,42 +648,23 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, return -errno; } -#ifdef BSWAP_NEEDED - elf_ppnt = elf_phdata; - for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++) { - bswap_phdr(elf_ppnt); - } -#endif + bswap_phdr(elf_phdata, elf_ex.e_phnum); elf_ppnt = elf_phdata; - elf_bss = 0; elf_brk = 0; elf_interpreter = NULL; - start_code = ~((abi_ulong)0UL); - end_code = 0; - start_data = 0; - end_data = 0; - interp_ex.a_info = 0; - - for (i = 0;i < elf_ex.e_phnum; i++) { + for (i = 0; i < elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { - if (elf_interpreter != NULL) - { + if (elf_interpreter != NULL) { free(elf_phdata); free(elf_interpreter); close(bprm->fd); return -EINVAL; } - /* This is the program interpreter used for - * shared libraries - for now assume that this - * is an a.out format binary - */ - elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); - if (elf_interpreter == NULL) { free(elf_phdata); close(bprm->fd); @@ -1259,26 +680,11 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, exit(-1); } - /* If the program interpreter is one of these two, - then assume an iBCS2 image. Otherwise assume - a native linux image. */ - - /* JRP - Need to add X86 lib dir stuff here... */ - - if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 || - strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { - ibcs2_interpreter = 1; - } - -#if 0 - printf("Using ELF interpreter %s\n", path(elf_interpreter)); -#endif if (retval >= 0) { retval = open(path(elf_interpreter), O_RDONLY); if (retval >= 0) { interpreter_fd = retval; - } - else { + } else { perror(elf_interpreter); exit(-1); /* retval = -errno; */ @@ -1292,8 +698,7 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, } } if (retval >= 0) { - interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */ - interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */ + interp_elf_ex = *((struct elfhdr *) bprm->buf); } if (retval < 0) { perror("load_elf_binary3"); @@ -1309,20 +714,8 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { - interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; - - /* Now figure out which format our binary is */ - if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && - (N_MAGIC(interp_ex) != QMAGIC)) { - interpreter_type = INTERPRETER_ELF; - } - if (interp_elf_ex.e_ident[0] != 0x7f || - strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) { - interpreter_type &= ~INTERPRETER_ELF; - } - - if (!interpreter_type) { + strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) { free(elf_interpreter); free(elf_phdata); close(bprm->fd); @@ -1330,27 +723,15 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, } } - /* OK, we are done with that, now set up the arg stuff, - and then start this sucker up */ - - { - char *passed_p; - - if (interpreter_type == INTERPRETER_AOUT) { - snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd); - passed_p = passed_fileno; - - if (elf_interpreter) { - bprm->p = copy_elf_strings(1, &passed_p, bprm->page, bprm->p); - bprm->argc++; - } - } - if (!bprm->p) { - free(elf_interpreter); - free(elf_phdata); - close(bprm->fd); - return -E2BIG; - } + /* + * OK, we are done with that, now set up the arg stuff, and then start this + * sucker up + */ + if (!bprm->p) { + free(elf_interpreter); + free(elf_phdata); + close(bprm->fd); + return -E2BIG; } /* OK, This is the point of no return */ @@ -1360,129 +741,49 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, info->mmap = 0; elf_entry = (abi_ulong) elf_ex.e_entry; - /* - * In case where user has not explicitly set the guest_base, we - * probe here that should we set it automatically. - */ - if (!have_guest_base) { - /* - * Go through ELF program header table and find out whether - * any of the segments drop below our current mmap_min_addr and - * in that case set guest_base to corresponding address. - */ - for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; - i++, elf_ppnt++) { - if (elf_ppnt->p_type != PT_LOAD) - continue; - if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) { - guest_base = HOST_PAGE_ALIGN(mmap_min_addr); - break; - } + /* XXX Join this with PT_INTERP search? */ + baddr = 0; + for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { + if (elf_ppnt->p_type != PT_LOAD) { + continue; } + baddr = elf_ppnt->p_vaddr; + break; } - /* Do this so that we can load the interpreter, if need be. We will - change some of these later */ + et_dyn_addr = 0; + if (elf_ex.e_type == ET_DYN && baddr == 0) { + et_dyn_addr = ELF_ET_DYN_LOAD_ADDR; + } + + /* + * Do this so that we can load the interpreter, if need be. We will + * change some of these later + */ info->rss = 0; - bprm->p = setup_arg_pages(bprm->p, bprm, info); + setup_arg_pages(bprm, info, &bprm->p, &bprm->stringp); info->start_stack = bprm->p; - /* Now we do a little grungy work by mmaping the ELF image into - * the correct location in memory. At this point, we assume that - * the image should be loaded at fixed address, not at a variable - * address. - */ + info->elf_flags = elf_ex.e_flags; + error = load_elf_sections(&elf_ex, elf_phdata, bprm->fd, et_dyn_addr, + &load_addr); for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { - int elf_prot = 0; - int elf_flags = 0; - abi_ulong error; - - if (elf_ppnt->p_type != PT_LOAD) + if (elf_ppnt->p_type != PT_LOAD) { continue; - - if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; - if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; - if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; - elf_flags = MAP_PRIVATE | MAP_DENYWRITE; - if (elf_ex.e_type == ET_EXEC || load_addr_set) { - elf_flags |= MAP_FIXED; - } else if (elf_ex.e_type == ET_DYN) { - /* Try and get dynamic programs out of the way of the default mmap - base, as well as whatever program they might try to exec. This - is because the brk will follow the loader, and is not movable. */ - /* NOTE: for qemu, we do a big mmap to get enough space - without hardcoding any address */ - error = target_mmap(0, ET_DYN_MAP_SIZE, - PROT_NONE, MAP_PRIVATE | MAP_ANON, - -1, 0); - if (error == -1) { - perror("mmap"); - exit(-1); - } - load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); } - - error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), - (elf_ppnt->p_filesz + - TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), - elf_prot, - (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), - bprm->fd, - (elf_ppnt->p_offset - - TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); - if (error == -1) { - perror("mmap"); - exit(-1); - } - -#ifdef LOW_ELF_STACK - if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) - elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); -#endif - - if (!load_addr_set) { - load_addr_set = 1; - load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; - if (elf_ex.e_type == ET_DYN) { - load_bias += error - - TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); - load_addr += load_bias; - reloc_func_desc = load_bias; - } - } - k = elf_ppnt->p_vaddr; - if (k < start_code) - start_code = k; - if (start_data < k) - start_data = k; - k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; - if (k > elf_bss) - elf_bss = k; - if ((elf_ppnt->p_flags & PF_X) && end_code < k) - end_code = k; - if (end_data < k) - end_data = k; - k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; - if (k > elf_brk) elf_brk = k; + if (elf_ppnt->p_memsz > elf_ppnt->p_filesz) + elf_brk = MAX(elf_brk, et_dyn_addr + elf_ppnt->p_vaddr + + elf_ppnt->p_memsz); + } + if (error != 0) { + perror("load_elf_sections"); + exit(-1); } - elf_entry += load_bias; - elf_bss += load_bias; - elf_brk += load_bias; - start_code += load_bias; - end_code += load_bias; - start_data += load_bias; - end_data += load_bias; - if (elf_interpreter) { - if (interpreter_type & 1) { - elf_entry = load_aout_interp(&interp_ex, interpreter_fd); - } - else if (interpreter_type & 2) { - elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, - &interp_load_addr); - } + elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, + &interp_load_addr); reloc_func_desc = interp_load_addr; close(interpreter_fd); @@ -1494,72 +795,40 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, exit(-1); return 0; } + } else { + interp_load_addr = et_dyn_addr; + elf_entry += interp_load_addr; } free(elf_phdata); - if (qemu_log_enabled()) + if (qemu_log_enabled()) { load_symbols(&elf_ex, bprm->fd); - - if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd); - info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX); - -#ifdef LOW_ELF_STACK - info->start_stack = bprm->p = elf_stack - 4; -#endif - bprm->p = create_elf_tables(bprm->p, - bprm->argc, - bprm->envc, - &elf_ex, - load_addr, load_bias, - interp_load_addr, - (interpreter_type == INTERPRETER_AOUT ? 0 : 1), - info); - info->load_addr = reloc_func_desc; - info->start_brk = info->brk = elf_brk; - info->end_code = end_code; - info->start_code = start_code; - info->start_data = start_data; - info->end_data = end_data; - info->start_stack = bprm->p; - - /* Calling set_brk effectively mmaps the pages that we need for the bss and break - sections */ - set_brk(elf_bss, elf_brk); - - padzero(elf_bss, elf_brk); - -#if 0 - printf("(start_brk) %x\n" , info->start_brk); - printf("(end_code) %x\n" , info->end_code); - printf("(start_code) %x\n" , info->start_code); - printf("(end_data) %x\n" , info->end_data); - printf("(start_stack) %x\n" , info->start_stack); - printf("(brk) %x\n" , info->brk); -#endif - - if (info->personality == PER_SVR4) - { - /* Why this, you ask??? Well SVr4 maps page 0 as read-only, - and some applications "depend" upon this behavior. - Since we do not have the power to recompile these, we - emulate the SVr4 behavior. Sigh. */ - target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, - MAP_FIXED | MAP_PRIVATE, -1, 0); } + close(bprm->fd); + + bprm->p = target_create_elf_tables(bprm->p, bprm->argc, bprm->envc, + bprm->stringp, &elf_ex, load_addr, + et_dyn_addr, interp_load_addr, info); + info->load_addr = reloc_func_desc; + info->start_brk = info->brk = elf_brk; + info->start_stack = bprm->p; + info->load_bias = 0; + info->entry = elf_entry; +#ifdef USE_ELF_CORE_DUMP + bprm->core_dump = &elf_core_dump; +#else + bprm->core_dump = NULL; +#endif + return 0; } -static int load_aout_interp(void *exptr, int interp_fd) -{ - printf("a.out interpreter not yet supported\n"); - return(0); -} - void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) { - init_thread(regs, infop); + + target_thread_init(regs, infop); } diff --git a/bsd-user/errno_defs.h b/bsd-user/errno_defs.h index 1efa502a12..f3e8ac3488 100644 --- a/bsd-user/errno_defs.h +++ b/bsd-user/errno_defs.h @@ -1,6 +1,3 @@ -/* $OpenBSD: errno.h,v 1.20 2007/09/03 14:37:52 millert Exp $ */ -/* $NetBSD: errno.h,v 1.10 1996/01/20 01:33:53 jtc Exp $ */ - /* * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. @@ -37,6 +34,9 @@ * @(#)errno.h 8.5 (Berkeley) 1/21/94 */ +#ifndef ERRNO_DEFS_H +#define ERRNO_DEFS_H + #define TARGET_EPERM 1 /* Operation not permitted */ #define TARGET_ENOENT 2 /* No such file or directory */ #define TARGET_ESRCH 3 /* No such process */ @@ -147,3 +147,14 @@ #define TARGET_EIDRM 89 /* Identifier removed */ #define TARGET_ENOMSG 90 /* No message of desired type */ #define TARGET_ELAST 90 /* Must be equal largest errno */ + +/* Internal errors: */ +#define TARGET_EJUSTRETURN 254 /* Just return without modifing regs */ +#define TARGET_ERESTART 255 /* Restart syscall */ + +#include "special-errno.h" + +_Static_assert(TARGET_ERESTART == QEMU_ERESTARTSYS, + "TARGET_ERESTART and QEMU_ERESTARTSYS expected to match"); + +#endif /* ERRNO_DEFS_H */ diff --git a/bsd-user/freebsd/host-os.h b/bsd-user/freebsd/host-os.h new file mode 100644 index 0000000000..40cae72ec9 --- /dev/null +++ b/bsd-user/freebsd/host-os.h @@ -0,0 +1,25 @@ +/* + * FreeBSD host dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HOST_OS_H +#define HOST_OS_H + +#define HOST_DEFAULT_BSD_TYPE target_freebsd + +#endif /* HOST_OS_H */ diff --git a/bsd-user/freebsd/meson.build b/bsd-user/freebsd/meson.build new file mode 100644 index 0000000000..f87c788e84 --- /dev/null +++ b/bsd-user/freebsd/meson.build @@ -0,0 +1,4 @@ +bsd_user_ss.add(files( + 'os-sys.c', + 'os-syscall.c', +)) diff --git a/bsd-user/freebsd/os-sys.c b/bsd-user/freebsd/os-sys.c new file mode 100644 index 0000000000..309e27b9d6 --- /dev/null +++ b/bsd-user/freebsd/os-sys.c @@ -0,0 +1,27 @@ +/* + * FreeBSD sysctl() and sysarch() system call emulation + * + * Copyright (c) 2013-15 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu.h" +#include "target_arch_sysarch.h" + +/* sysarch() is architecture dependent. */ +abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2) +{ + return do_freebsd_arch_sysarch(cpu_env, arg1, arg2); +} diff --git a/bsd-user/freebsd/os-syscall.c b/bsd-user/freebsd/os-syscall.c new file mode 100644 index 0000000000..57996cad8a --- /dev/null +++ b/bsd-user/freebsd/os-syscall.c @@ -0,0 +1,534 @@ +/* + * BSD syscalls + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2013-2014 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * We need the FreeBSD "legacy" definitions. Rust needs the FreeBSD 11 system + * calls since it doesn't use libc at all, so we have to emulate that despite + * FreeBSD 11 being EOL'd. + */ +#define _WANT_FREEBSD11_STAT +#define _WANT_FREEBSD11_STATFS +#define _WANT_FREEBSD11_DIRENT +#define _WANT_KERNEL_ERRNO +#define _WANT_SEMUN +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/path.h" +#include +#include +#include +#include +#include +#include + +#include "qemu.h" +#include "signal-common.h" +#include "user/syscall-trace.h" + +#include "bsd-file.h" +#include "bsd-proc.h" + +/* I/O */ +safe_syscall3(int, open, const char *, path, int, flags, mode_t, mode); +safe_syscall4(int, openat, int, fd, const char *, path, int, flags, mode_t, + mode); + +safe_syscall3(ssize_t, read, int, fd, void *, buf, size_t, nbytes); +safe_syscall4(ssize_t, pread, int, fd, void *, buf, size_t, nbytes, off_t, + offset); +safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt); +safe_syscall4(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, + off_t, offset); + +safe_syscall3(ssize_t, write, int, fd, void *, buf, size_t, nbytes); +safe_syscall4(ssize_t, pwrite, int, fd, void *, buf, size_t, nbytes, off_t, + offset); +safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt); +safe_syscall4(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, + off_t, offset); + +void target_set_brk(abi_ulong new_brk) +{ +} + +/* + * errno conversion. + */ +abi_long get_errno(abi_long ret) +{ + if (ret == -1) { + return -host_to_target_errno(errno); + } else { + return ret; + } +} + +int host_to_target_errno(int err) +{ + /* + * All the BSDs have the property that the error numbers are uniform across + * all architectures for a given BSD, though they may vary between different + * BSDs. + */ + return err; +} + +bool is_error(abi_long ret) +{ + return (abi_ulong)ret >= (abi_ulong)(-4096); +} + +/* + * Unlocks a iovec. Unlike unlock_iovec, it assumes the tvec array itself is + * already locked from target_addr. It will be unlocked as well as all the iovec + * elements. + */ +static void helper_unlock_iovec(struct target_iovec *target_vec, + abi_ulong target_addr, struct iovec *vec, + int count, int copy) +{ + for (int i = 0; i < count; i++) { + abi_ulong base = tswapal(target_vec[i].iov_base); + + if (vec[i].iov_base) { + unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); + } + } + unlock_user(target_vec, target_addr, 0); +} + +struct iovec *lock_iovec(int type, abi_ulong target_addr, + int count, int copy) +{ + struct target_iovec *target_vec; + struct iovec *vec; + abi_ulong total_len, max_len; + int i; + int err = 0; + + if (count == 0) { + errno = 0; + return NULL; + } + if (count < 0 || count > IOV_MAX) { + errno = EINVAL; + return NULL; + } + + vec = g_try_new0(struct iovec, count); + if (vec == NULL) { + errno = ENOMEM; + return NULL; + } + + target_vec = lock_user(VERIFY_READ, target_addr, + count * sizeof(struct target_iovec), 1); + if (target_vec == NULL) { + err = EFAULT; + goto fail2; + } + + max_len = 0x7fffffff & MIN(TARGET_PAGE_MASK, PAGE_MASK); + total_len = 0; + + for (i = 0; i < count; i++) { + abi_ulong base = tswapal(target_vec[i].iov_base); + abi_long len = tswapal(target_vec[i].iov_len); + + if (len < 0) { + err = EINVAL; + goto fail; + } else if (len == 0) { + /* Zero length pointer is ignored. */ + vec[i].iov_base = 0; + } else { + vec[i].iov_base = lock_user(type, base, len, copy); + /* + * If the first buffer pointer is bad, this is a fault. But + * subsequent bad buffers will result in a partial write; this is + * realized by filling the vector with null pointers and zero + * lengths. + */ + if (!vec[i].iov_base) { + if (i == 0) { + err = EFAULT; + goto fail; + } else { + /* + * Fail all the subsequent addresses, they are already + * zero'd. + */ + goto out; + } + } + if (len > max_len - total_len) { + len = max_len - total_len; + } + } + vec[i].iov_len = len; + total_len += len; + } +out: + unlock_user(target_vec, target_addr, 0); + return vec; + +fail: + helper_unlock_iovec(target_vec, target_addr, vec, i, copy); +fail2: + g_free(vec); + errno = err; + return NULL; +} + +void unlock_iovec(struct iovec *vec, abi_ulong target_addr, + int count, int copy) +{ + struct target_iovec *target_vec; + + target_vec = lock_user(VERIFY_READ, target_addr, + count * sizeof(struct target_iovec), 1); + if (target_vec) { + helper_unlock_iovec(target_vec, target_addr, vec, count, copy); + } + + g_free(vec); +} + +/* + * All errnos that freebsd_syscall() returns must be -TARGET_. + */ +static abi_long freebsd_syscall(void *cpu_env, int num, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5, abi_long arg6, abi_long arg7, + abi_long arg8) +{ + abi_long ret; + + switch (num) { + /* + * process system calls + */ + case TARGET_FREEBSD_NR_exit: /* exit(2) */ + ret = do_bsd_exit(cpu_env, arg1); + break; + + /* + * File system calls. + */ + case TARGET_FREEBSD_NR_read: /* read(2) */ + ret = do_bsd_read(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_pread: /* pread(2) */ + ret = do_bsd_pread(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR_readv: /* readv(2) */ + ret = do_bsd_readv(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_preadv: /* preadv(2) */ + ret = do_bsd_preadv(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + + case TARGET_FREEBSD_NR_write: /* write(2) */ + ret = do_bsd_write(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_pwrite: /* pwrite(2) */ + ret = do_bsd_pwrite(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR_writev: /* writev(2) */ + ret = do_bsd_writev(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_pwritev: /* pwritev(2) */ + ret = do_bsd_pwritev(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR_open: /* open(2) */ + ret = do_bsd_open(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_openat: /* openat(2) */ + ret = do_bsd_openat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_close: /* close(2) */ + ret = do_bsd_close(arg1); + break; + + case TARGET_FREEBSD_NR_fdatasync: /* fdatasync(2) */ + ret = do_bsd_fdatasync(arg1); + break; + + case TARGET_FREEBSD_NR_fsync: /* fsync(2) */ + ret = do_bsd_fsync(arg1); + break; + + case TARGET_FREEBSD_NR_freebsd12_closefrom: /* closefrom(2) */ + ret = do_bsd_closefrom(arg1); + break; + + case TARGET_FREEBSD_NR_revoke: /* revoke(2) */ + ret = do_bsd_revoke(arg1); + break; + + case TARGET_FREEBSD_NR_access: /* access(2) */ + ret = do_bsd_access(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_eaccess: /* eaccess(2) */ + ret = do_bsd_eaccess(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_faccessat: /* faccessat(2) */ + ret = do_bsd_faccessat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_chdir: /* chdir(2) */ + ret = do_bsd_chdir(arg1); + break; + + case TARGET_FREEBSD_NR_fchdir: /* fchdir(2) */ + ret = do_bsd_fchdir(arg1); + break; + + case TARGET_FREEBSD_NR_rename: /* rename(2) */ + ret = do_bsd_rename(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_renameat: /* renameat(2) */ + ret = do_bsd_renameat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_link: /* link(2) */ + ret = do_bsd_link(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_linkat: /* linkat(2) */ + ret = do_bsd_linkat(arg1, arg2, arg3, arg4, arg5); + break; + + case TARGET_FREEBSD_NR_unlink: /* unlink(2) */ + ret = do_bsd_unlink(arg1); + break; + + case TARGET_FREEBSD_NR_unlinkat: /* unlinkat(2) */ + ret = do_bsd_unlinkat(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_mkdir: /* mkdir(2) */ + ret = do_bsd_mkdir(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_mkdirat: /* mkdirat(2) */ + ret = do_bsd_mkdirat(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_rmdir: /* rmdir(2) (XXX no rmdirat()?) */ + ret = do_bsd_rmdir(arg1); + break; + + case TARGET_FREEBSD_NR___getcwd: /* undocumented __getcwd() */ + ret = do_bsd___getcwd(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_dup: /* dup(2) */ + ret = do_bsd_dup(arg1); + break; + + case TARGET_FREEBSD_NR_dup2: /* dup2(2) */ + ret = do_bsd_dup2(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_truncate: /* truncate(2) */ + ret = do_bsd_truncate(cpu_env, arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_ftruncate: /* ftruncate(2) */ + ret = do_bsd_ftruncate(cpu_env, arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_acct: /* acct(2) */ + ret = do_bsd_acct(arg1); + break; + + case TARGET_FREEBSD_NR_sync: /* sync(2) */ + ret = do_bsd_sync(); + break; + + case TARGET_FREEBSD_NR_mount: /* mount(2) */ + ret = do_bsd_mount(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_unmount: /* unmount(2) */ + ret = do_bsd_unmount(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_nmount: /* nmount(2) */ + ret = do_bsd_nmount(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_symlink: /* symlink(2) */ + ret = do_bsd_symlink(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_symlinkat: /* symlinkat(2) */ + ret = do_bsd_symlinkat(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_readlink: /* readlink(2) */ + ret = do_bsd_readlink(cpu_env, arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_readlinkat: /* readlinkat(2) */ + ret = do_bsd_readlinkat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_chmod: /* chmod(2) */ + ret = do_bsd_chmod(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_fchmod: /* fchmod(2) */ + ret = do_bsd_fchmod(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_lchmod: /* lchmod(2) */ + ret = do_bsd_lchmod(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_fchmodat: /* fchmodat(2) */ + ret = do_bsd_fchmodat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_freebsd11_mknod: /* mknod(2) */ + ret = do_bsd_freebsd11_mknod(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_freebsd11_mknodat: /* mknodat(2) */ + ret = do_bsd_freebsd11_mknodat(arg1, arg2, arg3, arg4); + break; + + case TARGET_FREEBSD_NR_mknodat: /* mknodat(2) */ + ret = do_bsd_mknodat(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR_chown: /* chown(2) */ + ret = do_bsd_chown(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_fchown: /* fchown(2) */ + ret = do_bsd_fchown(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_lchown: /* lchown(2) */ + ret = do_bsd_lchown(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_fchownat: /* fchownat(2) */ + ret = do_bsd_fchownat(arg1, arg2, arg3, arg4, arg5); + break; + + case TARGET_FREEBSD_NR_chflags: /* chflags(2) */ + ret = do_bsd_chflags(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_lchflags: /* lchflags(2) */ + ret = do_bsd_lchflags(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_fchflags: /* fchflags(2) */ + ret = do_bsd_fchflags(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_chroot: /* chroot(2) */ + ret = do_bsd_chroot(arg1); + break; + + case TARGET_FREEBSD_NR_flock: /* flock(2) */ + ret = do_bsd_flock(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_mkfifo: /* mkfifo(2) */ + ret = do_bsd_mkfifo(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_mkfifoat: /* mkfifoat(2) */ + ret = do_bsd_mkfifoat(arg1, arg2, arg3); + break; + + case TARGET_FREEBSD_NR_pathconf: /* pathconf(2) */ + ret = do_bsd_pathconf(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_lpathconf: /* lpathconf(2) */ + ret = do_bsd_lpathconf(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_fpathconf: /* fpathconf(2) */ + ret = do_bsd_fpathconf(arg1, arg2); + break; + + case TARGET_FREEBSD_NR_undelete: /* undelete(2) */ + ret = do_bsd_undelete(arg1); + break; + + default: + qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); + ret = -TARGET_ENOSYS; + break; + } + + return ret; +} + +/* + * do_freebsd_syscall() should always have a single exit point at the end so + * that actions, such as logging of syscall results, can be performed. This + * as a wrapper around freebsd_syscall() so that actually happens. Since + * that is a singleton, modern compilers will inline it anyway... + */ +abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5, abi_long arg6, abi_long arg7, + abi_long arg8) +{ + CPUState *cpu = env_cpu(cpu_env); + int ret; + + trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + if (do_strace) { + print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); + } + + ret = freebsd_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6, + arg7, arg8); + if (do_strace) { + print_freebsd_syscall_ret(num, ret); + } + trace_guest_user_syscall_ret(cpu, num, ret); + + return ret; +} + +void syscall_init(void) +{ +} diff --git a/bsd-user/freebsd/strace.list b/bsd-user/freebsd/strace.list index b01b5f36e8..275d2dbe27 100644 --- a/bsd-user/freebsd/strace.list +++ b/bsd-user/freebsd/strace.list @@ -33,10 +33,6 @@ { TARGET_FREEBSD_NR___syscall, "__syscall", NULL, NULL, NULL }, { TARGET_FREEBSD_NR___sysctl, "__sysctl", NULL, print_sysctl, NULL }, { TARGET_FREEBSD_NR__umtx_op, "_umtx_op", "%s(%#x, %d, %d, %#x, %#x)", NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR__umtx_lock, "__umtx_lock", NULL, NULL, NULL }, -{ TARGET_FREEBSD_NR__umtx_unlock, "__umtx_unlock", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_accept, "accept", "%s(%d,%#x,%#x)", NULL, NULL }, { TARGET_FREEBSD_NR_accept4, "accept4", "%s(%d,%d,%#x,%#x)", NULL, NULL }, { TARGET_FREEBSD_NR_access, "access", "%s(\"%s\",%#o)", NULL, NULL }, @@ -49,10 +45,6 @@ { TARGET_FREEBSD_NR_cap_fcntls_get, "cap_fcntls_get", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_fcntls_limit, "cap_fcntls_limit", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_getmode, "cap_getmode", NULL, NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR_cap_getrights, "cap_getrights", NULL, NULL, NULL }, -{ TARGET_FREEBSD_NR_cap_new, "cap_new", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_cap_ioctls_get, "cap_ioctls_get", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_ioctls_limit, "cap_ioctls_limit", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_rights_limit, "cap_rights_limit", NULL, NULL, NULL }, @@ -146,9 +138,6 @@ { TARGET_FREEBSD_NR_freebsd11_kevent, "freebsd11_kevent", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_kevent, "kevent", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_kill, "kill", NULL, NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR_killpg, "killpg", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_kqueue, "kqueue", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_ktrace, "ktrace", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_lchown, "lchown", NULL, NULL, NULL }, diff --git a/bsd-user/freebsd/target_os_elf.h b/bsd-user/freebsd/target_os_elf.h new file mode 100644 index 0000000000..9df17d56d8 --- /dev/null +++ b/bsd-user/freebsd/target_os_elf.h @@ -0,0 +1,138 @@ +/* + * freebsd ELF definitions + * + * Copyright (c) 2013-15 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_ELF_H +#define TARGET_OS_ELF_H + +#include "target_arch_elf.h" +#include "elf.h" + +#define bsd_get_ncpu() 1 /* until we pull in bsd-proc.[hc] */ + +/* this flag is uneffective under linux too, should be deleted */ +#ifndef MAP_DENYWRITE +#define MAP_DENYWRITE 0 +#endif + +/* should probably go in elf.h */ +#ifndef ELIBBAD +#define ELIBBAD 80 +#endif + +#ifndef ELF_PLATFORM +#define ELF_PLATFORM (NULL) +#endif + +/* XXX Look at the other conflicting AT_* values. */ +#define FREEBSD_AT_NCPUS 19 +#define FREEBSD_AT_HWCAP 25 +#define FREEBSD_AT_HWCAP2 26 + +#ifdef TARGET_ABI32 +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 +#undef bswaptls +#define bswaptls(ptr) bswap32s(ptr) +#endif + +/* max code+data+bss space allocated to elf interpreter */ +#define INTERP_MAP_SIZE (32 * 1024 * 1024) + +/* max code+data+bss+brk space allocated to ET_DYN executables */ +#define ET_DYN_MAP_SIZE (128 * 1024 * 1024) + +/* Necessary parameters */ +#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE +#define TARGET_ELF_PAGESTART(_v) ((_v) & \ + ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1)) +#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1)) + +#define DLINFO_ITEMS 14 + +static abi_ulong target_create_elf_tables(abi_ulong p, int argc, int envc, + abi_ulong stringp, + struct elfhdr *exec, + abi_ulong load_addr, + abi_ulong load_bias, + abi_ulong interp_load_addr, + struct image_info *info) +{ + abi_ulong features, sp; + int size; + const int n = sizeof(elf_addr_t); + + target_auxents_sz = 0; + sp = p; + /* + * Force 16 byte _final_ alignment here for generality. + */ + sp = sp & ~(abi_ulong)15; + size = (DLINFO_ITEMS + 1) * 2; + size += envc + argc + 2; + size += 1; /* argc itself */ + size *= n; + if (size & 15) { + sp -= 16 - (size & 15); + } + + /* + * FreeBSD defines elf_addr_t as Elf32_Off / Elf64_Off + */ +#define NEW_AUX_ENT(id, val) do { \ + sp -= n; put_user_ual(val, sp); \ + sp -= n; put_user_ual(id, sp); \ + target_auxents_sz += 2 * n; \ + } while (0) + + NEW_AUX_ENT(AT_NULL, 0); + + /* There must be exactly DLINFO_ITEMS entries here. */ + NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); + NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr))); + NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); + NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); + NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); + NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); + NEW_AUX_ENT(FREEBSD_AT_NCPUS, (abi_ulong)bsd_get_ncpu()); + NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); + features = ELF_HWCAP; + NEW_AUX_ENT(FREEBSD_AT_HWCAP, features); +#ifdef ELF_HWCAP2 + features = ELF_HWCAP2; + NEW_AUX_ENT(FREEBSD_AT_HWCAP2, features); +#endif + NEW_AUX_ENT(AT_UID, (abi_ulong)getuid()); + NEW_AUX_ENT(AT_EUID, (abi_ulong)geteuid()); + NEW_AUX_ENT(AT_GID, (abi_ulong)getgid()); + NEW_AUX_ENT(AT_EGID, (abi_ulong)getegid()); + target_auxents = sp; /* Note where the aux entries are in the target */ +#ifdef ARCH_DLINFO + /* + * ARCH_DLINFO must come last so platform specific code can enforce + * special alignment requirements on the AUXV if necessary (eg. PPC). + */ + ARCH_DLINFO; +#endif +#undef NEW_AUX_ENT + + sp = loader_build_argptr(envc, argc, sp, stringp); + return sp; +} + +#endif /* TARGET_OS_ELF_H */ diff --git a/bsd-user/freebsd/target_os_siginfo.h b/bsd-user/freebsd/target_os_siginfo.h new file mode 100644 index 0000000000..4573738752 --- /dev/null +++ b/bsd-user/freebsd/target_os_siginfo.h @@ -0,0 +1,159 @@ +/* + * FreeBSD siginfo related definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_SIGINFO_H +#define TARGET_OS_SIGINFO_H + +#define TARGET_NSIG 128 +#define TARGET_NSIG_BPW (sizeof(uint32_t) * 8) +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) + +/* this struct defines a stack used during syscall handling */ +typedef struct target_sigaltstack { + abi_long ss_sp; + abi_ulong ss_size; + abi_long ss_flags; +} target_stack_t; + +typedef struct { + uint32_t __bits[TARGET_NSIG_WORDS]; +} target_sigset_t; + +struct target_sigaction { + abi_ulong _sa_handler; + int32_t sa_flags; + target_sigset_t sa_mask; +}; + +typedef union target_sigval { + int32_t sival_int; + abi_ulong sival_ptr; + int32_t sigval_int; + abi_ulong sigval_ptr; +} target_sigval_t; + +typedef struct target_siginfo { + int32_t si_signo; /* signal number */ + int32_t si_errno; /* errno association */ + int32_t si_code; /* signal code */ + int32_t si_pid; /* sending process */ + int32_t si_uid; /* sender's ruid */ + int32_t si_status; /* exit value */ + abi_ulong si_addr; /* faulting instruction */ + union target_sigval si_value; /* signal value */ + union { + struct { + int32_t _trapno; /* machine specific trap code */ + } _fault; + + /* POSIX.1b timers */ + struct { + int32_t _timerid; + int32_t _overrun; + } _timer; + + struct { + int32_t _mqd; + } _mesgp; + + /* SIGPOLL -- Not really genreated in FreeBSD ??? */ + struct { + int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + } _poll; + + struct { + int _mqd; + } _mesgq; + + struct { + /* + * Syscall number for signals delivered as a result of system calls + * denied by Capsicum. + */ + int _syscall; + } _capsicum; + + /* Spare for future growth */ + struct { + abi_long __spare1__; + int32_t __spare2_[7]; + } __spare__; + } _reason; +} target_siginfo_t; + +struct target_sigevent { + abi_int sigev_notify; + abi_int sigev_signo; + target_sigval_t sigev_value; + union { + abi_int _threadid; + + /* + * The kernel (and thus QEMU) never looks at these; + * they're only used as part of the ABI between a + * userspace program and libc. + */ + struct { + abi_ulong _function; + abi_ulong _attribute; + } _sigev_thread; + abi_ushort _kevent_flags; + abi_long _pad[8]; + } _sigev_un; +}; + +#define target_si_signo si_signo +#define target_si_code si_code +#define target_si_errno si_errno +#define target_si_addr si_addr + +/* SIGILL si_codes */ +#define TARGET_ILL_ILLOPC (1) /* Illegal opcode. */ +#define TARGET_ILL_ILLOPN (2) /* Illegal operand. */ +#define TARGET_ILL_ILLADR (3) /* Illegal addressing mode. */ +#define TARGET_ILL_ILLTRP (4) /* Illegal trap. */ +#define TARGET_ILL_PRVOPC (5) /* Privileged opcode. */ +#define TARGET_ILL_PRVREG (6) /* Privileged register. */ +#define TARGET_ILL_COPROC (7) /* Coprocessor error. */ +#define TARGET_ILL_BADSTK (8) /* Internal stack error. */ + +/* SIGSEGV si_codes */ +#define TARGET_SEGV_MAPERR (1) /* address not mapped to object */ +#define TARGET_SEGV_ACCERR (2) /* invalid permissions for mapped object */ + +/* SIGTRAP si_codes */ +#define TARGET_TRAP_BRKPT (1) /* process beakpoint */ +#define TARGET_TRAP_TRACE (2) /* process trace trap */ + +/* SIGBUS si_codes */ +#define TARGET_BUS_ADRALN (1) +#define TARGET_BUS_ADRERR (2) +#define TARGET_BUS_OBJERR (3) + +/* SIGFPE codes */ +#define TARGET_FPE_INTOVF (1) /* Integer overflow. */ +#define TARGET_FPE_INTDIV (2) /* Integer divide by zero. */ +#define TARGET_FPE_FLTDIV (3) /* Floating point divide by zero. */ +#define TARGET_FPE_FLTOVF (4) /* Floating point overflow. */ +#define TARGET_FPE_FLTUND (5) /* Floating point underflow. */ +#define TARGET_FPE_FLTRES (6) /* Floating point inexact result. */ +#define TARGET_FPE_FLTINV (7) /* Invalid floating point operation. */ +#define TARGET_FPE_FLTSUB (8) /* Subscript out of range. */ + +#endif /* TARGET_OS_SIGINFO_H */ diff --git a/bsd-user/freebsd/target_os_signal.h b/bsd-user/freebsd/target_os_signal.h new file mode 100644 index 0000000000..5030abb52b --- /dev/null +++ b/bsd-user/freebsd/target_os_signal.h @@ -0,0 +1,81 @@ +#ifndef TARGET_OS_SIGNAL_H +#define TARGET_OS_SIGNAL_H + +#include "target_os_siginfo.h" +#include "target_arch_signal.h" + +abi_long setup_sigframe_arch(CPUArchState *env, abi_ulong frame_addr, + struct target_sigframe *frame, int flags); + +/* Compare to sys/signal.h */ +#define TARGET_SIGHUP 1 /* hangup */ +#define TARGET_SIGINT 2 /* interrupt */ +#define TARGET_SIGQUIT 3 /* quit */ +#define TARGET_SIGILL 4 /* illegal instruction (not reset when caught) */ +#define TARGET_SIGTRAP 5 /* trace trap (not reset when caught) */ +#define TARGET_SIGABRT 6 /* abort() */ +#define TARGET_SIGIOT SIGABRT /* compatibility */ +#define TARGET_SIGEMT 7 /* EMT instruction */ +#define TARGET_SIGFPE 8 /* floating point exception */ +#define TARGET_SIGKILL 9 /* kill (cannot be caught or ignored) */ +#define TARGET_SIGBUS 10 /* bus error */ +#define TARGET_SIGSEGV 11 /* segmentation violation */ +#define TARGET_SIGSYS 12 /* bad argument to system call */ +#define TARGET_SIGPIPE 13 /* write on a pipe with no one to read it */ +#define TARGET_SIGALRM 14 /* alarm clock */ +#define TARGET_SIGTERM 15 /* software termination signal from kill */ +#define TARGET_SIGURG 16 /* urgent condition on IO channel */ +#define TARGET_SIGSTOP 17 /* sendable stop signal not from tty */ +#define TARGET_SIGTSTP 18 /* stop signal from tty */ +#define TARGET_SIGCONT 19 /* continue a stopped process */ +#define TARGET_SIGCHLD 20 /* to parent on child stop or exit */ +#define TARGET_SIGTTIN 21 /* to readers pgrp upon background tty read */ +#define TARGET_SIGTTOU 22 /* like TTIN for output if(tp->t_local<OSTOP)*/ +#define TARGET_SIGIO 23 /* input/output possible signal */ +#define TARGET_SIGXCPU 24 /* exceeded CPU time limit */ +#define TARGET_SIGXFSZ 25 /* exceeded file size limit */ +#define TARGET_SIGVTALRM 26 /* virtual time alarm */ +#define TARGET_SIGPROF 27 /* profiling time alarm */ +#define TARGET_SIGWINCH 28 /* window size changes */ +#define TARGET_SIGINFO 29 /* information request */ +#define TARGET_SIGUSR1 30 /* user defined signal 1 */ +#define TARGET_SIGUSR2 31 /* user defined signal 2 */ +#define TARGET_SIGTHR 32 /* reserved by thread library */ +#define TARGET_SIGLWP SIGTHR /* compatibility */ +#define TARGET_SIGLIBRT 33 /* reserved by the real-time library */ +#define TARGET_SIGRTMIN 65 +#define TARGET_SIGRTMAX 126 + +/* + * Language spec says we must list exactly one parameter, even though we + * actually supply three. Ugh! + */ +#define TARGET_SIG_DFL ((abi_long)0) /* default signal handling */ +#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */ +#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */ + +#define TARGET_SA_ONSTACK 0x0001 /* take signal on signal stack */ +#define TARGET_SA_RESTART 0x0002 /* restart system on signal return */ +#define TARGET_SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ +#define TARGET_SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ +#define TARGET_SA_NOCLDWAIT 0x0020 /* don't create zombies (assign to pid 1) */ +#define TARGET_SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +#define TARGET_SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ +#define TARGET_SA_SIGINFO 0x0040 /* generate siginfo_t */ + +/* + * Flags for sigprocmask: + */ +#define TARGET_SIG_BLOCK 1 /* block specified signal set */ +#define TARGET_SIG_UNBLOCK 2 /* unblock specified signal set */ +#define TARGET_SIG_SETMASK 3 /* set specified signal set */ + +#define TARGET_BADSIG SIG_ERR + +/* + * sigaltstack control + */ +#define TARGET_SS_ONSTACK 0x0001 /* take signals on alternate stack */ +#define TARGET_SS_DISABLE 0x0004 /* disable taking signals on alternate stack*/ + +#endif /* TARGET_OS_SIGNAL_H */ diff --git a/bsd-user/freebsd/target_os_stack.h b/bsd-user/freebsd/target_os_stack.h new file mode 100644 index 0000000000..0590133291 --- /dev/null +++ b/bsd-user/freebsd/target_os_stack.h @@ -0,0 +1,181 @@ +/* + * FreeBSD setup_initial_stack() implementation. + * + * Copyright (c) 2013-14 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_STACK_H +#define TARGET_OS_STACK_H + +#include +#include "target_arch_sigtramp.h" +#include "qemu/guest-random.h" + +/* + * The inital FreeBSD stack is as follows: + * (see kern/kern_exec.c exec_copyout_strings() ) + * + * Hi Address -> char **ps_argvstr (struct ps_strings for ps, w, etc.) + * unsigned ps_nargvstr + * char **ps_envstr + * PS_STRINGS -> unsigned ps_nenvstr + * + * machine dependent sigcode (sv_sigcode of size + * sv_szsigcode) + * + * execpath (absolute image path for rtld) + * + * SSP Canary (sizeof(long) * 8) + * + * page sizes array (usually sizeof(u_long) ) + * + * "destp" -> argv, env strings (up to 262144 bytes) + */ +static inline int setup_initial_stack(struct bsd_binprm *bprm, + abi_ulong *ret_addr, abi_ulong *stringp) +{ + int i; + abi_ulong stack_hi_addr; + size_t execpath_len, stringspace; + abi_ulong destp, argvp, envp, p; + struct target_ps_strings ps_strs; + char canary[sizeof(abi_long) * 8]; + + stack_hi_addr = p = target_stkbas + target_stksiz; + + /* Save some space for ps_strings. */ + p -= sizeof(struct target_ps_strings); + + /* Add machine depedent sigcode. */ + p -= TARGET_SZSIGCODE; + if (setup_sigtramp(p, (unsigned)offsetof(struct target_sigframe, sf_uc), + TARGET_FREEBSD_NR_sigreturn)) { + errno = EFAULT; + return -1; + } + if (bprm->fullpath) { + execpath_len = strlen(bprm->fullpath) + 1; + p -= roundup(execpath_len, sizeof(abi_ulong)); + if (memcpy_to_target(p, bprm->fullpath, execpath_len)) { + errno = EFAULT; + return -1; + } + } + /* Add canary for SSP. */ + qemu_guest_getrandom_nofail(canary, sizeof(canary)); + p -= roundup(sizeof(canary), sizeof(abi_ulong)); + if (memcpy_to_target(p, canary, sizeof(canary))) { + errno = EFAULT; + return -1; + } + /* Add page sizes array. */ + p -= sizeof(abi_ulong); + if (put_user_ual(TARGET_PAGE_SIZE, p)) { + errno = EFAULT; + return -1; + } + /* + * Deviate from FreeBSD stack layout: force stack to new page here + * so that signal trampoline is not sharing the page with user stack + * frames. This is actively harmful in qemu as it marks pages with + * code it translated as read-only, which is somewhat problematic + * for user trying to use the stack as intended. + */ + p = rounddown(p, TARGET_PAGE_SIZE); + + /* Calculate the string space needed */ + stringspace = 0; + for (i = 0; i < bprm->argc; ++i) { + stringspace += strlen(bprm->argv[i]) + 1; + } + for (i = 0; i < bprm->envc; ++i) { + stringspace += strlen(bprm->envp[i]) + 1; + } + if (stringspace > TARGET_ARG_MAX) { + errno = ENOMEM; + return -1; + } + /* Make room for the argv and envp strings */ + destp = rounddown(p - stringspace, sizeof(abi_ulong)); + p = argvp = destp - (bprm->argc + bprm->envc + 2) * sizeof(abi_ulong); + /* Remember the strings pointer */ + if (stringp) { + *stringp = destp; + } + /* + * Add argv strings. Note that the argv[] vectors are added by + * loader_build_argptr() + */ + /* XXX need to make room for auxargs */ + ps_strs.ps_argvstr = tswapl(argvp); + ps_strs.ps_nargvstr = tswap32(bprm->argc); + for (i = 0; i < bprm->argc; ++i) { + size_t len = strlen(bprm->argv[i]) + 1; + + if (memcpy_to_target(destp, bprm->argv[i], len)) { + errno = EFAULT; + return -1; + } + if (put_user_ual(destp, argvp)) { + errno = EFAULT; + return -1; + } + argvp += sizeof(abi_ulong); + destp += len; + } + if (put_user_ual(0, argvp)) { + errno = EFAULT; + return -1; + } + /* + * Add env strings. Note that the envp[] vectors are added by + * loader_build_argptr(). + */ + envp = argvp + sizeof(abi_ulong); + ps_strs.ps_envstr = tswapl(envp); + ps_strs.ps_nenvstr = tswap32(bprm->envc); + for (i = 0; i < bprm->envc; ++i) { + size_t len = strlen(bprm->envp[i]) + 1; + + if (memcpy_to_target(destp, bprm->envp[i], len)) { + errno = EFAULT; + return -1; + } + if (put_user_ual(destp, envp)) { + errno = EFAULT; + return -1; + } + envp += sizeof(abi_ulong); + destp += len; + } + if (put_user_ual(0, envp)) { + errno = EFAULT; + return -1; + } + if (memcpy_to_target(stack_hi_addr - sizeof(ps_strs), &ps_strs, + sizeof(ps_strs))) { + errno = EFAULT; + return -1; + } + + if (ret_addr) { + *ret_addr = p; + } + + return 0; + } + +#endif /* TARGET_OS_STACK_H */ diff --git a/bsd-user/freebsd/target_os_thread.h b/bsd-user/freebsd/target_os_thread.h new file mode 100644 index 0000000000..1b32cebd26 --- /dev/null +++ b/bsd-user/freebsd/target_os_thread.h @@ -0,0 +1,25 @@ +/* + * FreeBSD thread dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_THREAD_H +#define TARGET_OS_THREAD_H + +#include "target_arch_thread.h" + +#endif /* TARGET_OS_THREAD_H */ diff --git a/bsd-user/freebsd/target_os_ucontext.h b/bsd-user/freebsd/target_os_ucontext.h new file mode 100644 index 0000000000..b196b1c629 --- /dev/null +++ b/bsd-user/freebsd/target_os_ucontext.h @@ -0,0 +1,44 @@ +/* + * FreeBSD has a common ucontext definition for all architectures. + * + * Copyright 2021 Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later OR BSD-3-Clause + */ +#ifndef TARGET_OS_UCONTEXT_H +#define TARGET_OS_UCONTEXT_H + +/* + * Defines the common bits for all of FreeBSD's architectures. Has to be + * included AFTER the MD target_mcontext_t is defined, however, so can't + * be in the grab-bag that is target_os_signal.h. + */ + +/* See FreeBSD's sys/ucontext.h */ +#define TARGET_MC_GET_CLEAR_RET 0x0001 + +/* FreeBSD's sys/_ucontext.h structures */ +typedef struct target_ucontext { + target_sigset_t uc_sigmask; + target_mcontext_t uc_mcontext; + abi_ulong uc_link; + target_stack_t uc_stack; + int32_t uc_flags; + int32_t __spare__[4]; +} target_ucontext_t; + +G_STATIC_ASSERT(TARGET_MCONTEXT_SIZE == sizeof(target_mcontext_t)); +G_STATIC_ASSERT(TARGET_UCONTEXT_SIZE == sizeof(target_ucontext_t)); + +struct target_sigframe; + +abi_long set_sigtramp_args(CPUArchState *env, int sig, + struct target_sigframe *frame, + abi_ulong frame_addr, + struct target_sigaction *ka); +abi_long get_mcontext(CPUArchState *env, target_mcontext_t *mcp, int flags); +abi_long set_mcontext(CPUArchState *env, target_mcontext_t *mcp, int srflag); +abi_long get_ucontext_sigreturn(CPUArchState *env, abi_ulong target_sf, + abi_ulong *target_uc); + +#endif /* TARGET_OS_UCONTEXT_H */ diff --git a/bsd-user/freebsd/target_os_user.h b/bsd-user/freebsd/target_os_user.h new file mode 100644 index 0000000000..f036a32343 --- /dev/null +++ b/bsd-user/freebsd/target_os_user.h @@ -0,0 +1,329 @@ +/* + * sys/user.h definitions + * + * Copyright (c) 2015 Stacey D. Son (sson at FreeBSD) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_USER_H +#define TARGET_OS_USER_H + +/* + * from sys/priority.h + */ +struct target_priority { + uint8_t pri_class; /* Scheduling class. */ + uint8_t pri_level; /* Normal priority level. */ + uint8_t pri_native; /* Priority before propogation. */ + uint8_t pri_user; /* User priority based on p_cpu and p_nice. */ +}; + +/* + * sys/caprights.h + */ +#define TARGET_CAP_RIGHTS_VERSION 0 + +typedef struct target_cap_rights { + uint64_t cr_rights[TARGET_CAP_RIGHTS_VERSION + 2]; +} target_cap_rights_t; + +/* + * From sys/_socketaddr_storage.h + * + */ +#define TARGET_SS_MAXSIZE 128U +#define TARGET_SS_ALIGNSIZE (sizeof(__int64_t)) +#define TARGET_SS_PAD1SIZE (TARGET_SS_ALIGNSIZE - sizeof(unsigned char) - \ + sizeof(uint8_t)) +#define TARGET_SS_PAD2SIZE (TARGET_SS_MAXSIZE - sizeof(unsigned char) - \ + sizeof(uint8_t) - TARGET_SS_PAD1SIZE - TARGET_SS_ALIGNSIZE) + +struct target_sockaddr_storage { + unsigned char ss_len; /* address length */ + uint8_t ss_family; /* address family */ + char __ss_pad1[TARGET_SS_PAD1SIZE]; + __int64_t __ss_align; /* force desired struct alignment */ + char __ss_pad2[TARGET_SS_PAD2SIZE]; +}; + +/* + * from sys/user.h + */ +#define TARGET_KI_NSPARE_INT 2 +#define TARGET_KI_NSPARE_LONG 12 +#define TARGET_KI_NSPARE_PTR 6 + +#define TARGET_WMESGLEN 8 +#define TARGET_LOCKNAMELEN 8 +#define TARGET_TDNAMLEN 16 +#define TARGET_COMMLEN 19 +#define TARGET_KI_EMULNAMELEN 16 +#define TARGET_KI_NGROUPS 16 +#define TARGET_LOGNAMELEN 17 +#define TARGET_LOGINCLASSLEN 17 + +#define TARGET_KF_TYPE_NONE 0 +#define TARGET_KF_TYPE_VNODE 1 +#define TARGET_KF_TYPE_SOCKET 2 +#define TARGET_KF_TYPE_PIPE 3 +#define TARGET_KF_TYPE_FIFO 4 +#define TARGET_KF_TYPE_KQUEUE 5 +#define TARGET_KF_TYPE_CRYPTO 6 +#define TARGET_KF_TYPE_MQUEUE 7 +#define TARGET_KF_TYPE_SHM 8 +#define TARGET_KF_TYPE_SEM 9 +#define TARGET_KF_TYPE_PTS 10 +#define TARGET_KF_TYPE_PROCDESC 11 +#define TARGET_KF_TYPE_DEV 12 +#define TARGET_KF_TYPE_UNKNOWN 255 + +struct target_kinfo_proc { + int32_t ki_structsize; /* size of this structure */ + int32_t ki_layout; /* reserved: layout identifier */ + abi_ulong ki_args; /* address of command arguments */ + abi_ulong ki_paddr; /* address of proc */ + abi_ulong ki_addr; /* kernel virtual addr of u-area */ + abi_ulong ki_tracep; /* pointer to trace file */ + abi_ulong ki_textvp; /* pointer to executable file */ + abi_ulong ki_fd; /* pointer to open file info */ + abi_ulong ki_vmspace; /* pointer to kernel vmspace struct */ + abi_ulong ki_wchan; /* sleep address */ + int32_t ki_pid; /* Process identifier */ + int32_t ki_ppid; /* parent process id */ + int32_t ki_pgid; /* process group id */ + int32_t ki_tpgid; /* tty process group id */ + int32_t ki_sid; /* Process session ID */ + int32_t ki_tsid; /* Terminal session ID */ + int16_t ki_jobc; /* job control counter */ + int16_t ki_spare_short1; /* unused (just here for alignment) */ + int32_t ki_tdev__freebsd11; /* controlling tty dev */ + target_sigset_t ki_siglist; /* Signals arrived but not delivered */ + target_sigset_t ki_sigmask; /* Current signal mask */ + target_sigset_t ki_sigignore; /* Signals being ignored */ + target_sigset_t ki_sigcatch; /* Signals being caught by user */ + + int32_t ki_uid; /* effective user id */ + int32_t ki_ruid; /* Real user id */ + int32_t ki_svuid; /* Saved effective user id */ + int32_t ki_rgid; /* Real group id */ + int32_t ki_svgid; /* Saved effective group id */ + int16_t ki_ngroups; /* number of groups */ + int16_t ki_spare_short2; /* unused (just here for alignment) */ + int32_t ki_groups[TARGET_KI_NGROUPS]; /* groups */ + + abi_long ki_size; /* virtual size */ + + abi_long ki_rssize; /* current resident set size in pages */ + abi_long ki_swrss; /* resident set size before last swap */ + abi_long ki_tsize; /* text size (pages) XXX */ + abi_long ki_dsize; /* data size (pages) XXX */ + abi_long ki_ssize; /* stack size (pages) */ + + uint16_t ki_xstat; /* Exit status for wait & stop signal */ + uint16_t ki_acflag; /* Accounting flags */ + + uint32_t ki_pctcpu; /* %cpu for process during ki_swtime */ + + uint32_t ki_estcpu; /* Time averaged value of ki_cpticks */ + uint32_t ki_slptime; /* Time since last blocked */ + uint32_t ki_swtime; /* Time swapped in or out */ + uint32_t ki_cow; /* number of copy-on-write faults */ + uint64_t ki_runtime; /* Real time in microsec */ + + struct target_freebsd_timeval ki_start; /* starting time */ + struct target_freebsd_timeval ki_childtime; /* time used by process children */ + + abi_long ki_flag; /* P_* flags */ + abi_long ki_kiflag; /* KI_* flags (below) */ + int32_t ki_traceflag; /* Kernel trace points */ + char ki_stat; /* S* process status */ + int8_t ki_nice; /* Process "nice" value */ + char ki_lock; /* Process lock (prevent swap) count */ + char ki_rqindex; /* Run queue index */ + u_char ki_oncpu_old; /* Which cpu we are on (legacy) */ + u_char ki_lastcpu_old; /* Last cpu we were on (legacy) */ + char ki_tdname[TARGET_TDNAMLEN + 1]; /* thread name */ + char ki_wmesg[TARGET_WMESGLEN + 1]; /* wchan message */ + char ki_login[TARGET_LOGNAMELEN + 1]; /* setlogin name */ + char ki_lockname[TARGET_LOCKNAMELEN + 1]; /* lock name */ + char ki_comm[TARGET_COMMLEN + 1]; /* command name */ + char ki_emul[TARGET_KI_EMULNAMELEN + 1]; /* emulation name */ + char ki_loginclass[TARGET_LOGINCLASSLEN + 1]; /* login class */ + + char ki_sparestrings[50]; /* spare string space */ + int32_t ki_spareints[TARGET_KI_NSPARE_INT]; /* spare room for growth */ + uint64_t ki_tdev; /* controlling tty dev */ + int32_t ki_oncpu; /* Which cpu we are on */ + int32_t ki_lastcpu; /* Last cpu we were on */ + int32_t ki_tracer; /* Pid of tracing process */ + int32_t ki_flag2; /* P2_* flags */ + int32_t ki_fibnum; /* Default FIB number */ + uint32_t ki_cr_flags; /* Credential flags */ + int32_t ki_jid; /* Process jail ID */ + int32_t ki_numthreads; /* XXXKSE number of threads in total */ + + int32_t ki_tid; /* XXXKSE thread id */ + + struct target_priority ki_pri; /* process priority */ + struct target_freebsd_rusage ki_rusage; /* process rusage statistics */ + /* XXX - most fields in ki_rusage_ch are not (yet) filled in */ + struct target_freebsd_rusage ki_rusage_ch; /* rusage of children processes */ + + + abi_ulong ki_pcb; /* kernel virtual addr of pcb */ + abi_ulong ki_kstack; /* kernel virtual addr of stack */ + abi_ulong ki_udata; /* User convenience pointer */ + abi_ulong ki_tdaddr; /* address of thread */ + + abi_ulong ki_spareptrs[TARGET_KI_NSPARE_PTR]; /* spare room for growth */ + abi_long ki_sparelongs[TARGET_KI_NSPARE_LONG];/* spare room for growth */ + abi_long ki_sflag; /* PS_* flags */ + abi_long ki_tdflags; /* XXXKSE kthread flag */ +}; + +struct target_kinfo_file { + int32_t kf_structsize; /* Variable size of record. */ + int32_t kf_type; /* Descriptor type. */ + int32_t kf_fd; /* Array index. */ + int32_t kf_ref_count; /* Reference count. */ + int32_t kf_flags; /* Flags. */ + int32_t kf_pad0; /* Round to 64 bit alignment. */ + int64_t kf_offset; /* Seek location. */ + union { + struct { + uint32_t kf_spareint; + /* Socket domain. */ + int kf_sock_domain0; + /* Socket type. */ + int kf_sock_type0; + /* Socket protocol. */ + int kf_sock_protocol0; + /* Socket address. */ + struct sockaddr_storage kf_sa_local; + /* Peer address. */ + struct sockaddr_storage kf_sa_peer; + /* Address of so_pcb. */ + uint64_t kf_sock_pcb; + /* Address of inp_ppcb. */ + uint64_t kf_sock_inpcb; + /* Address of unp_conn. */ + uint64_t kf_sock_unpconn; + /* Send buffer state. */ + uint16_t kf_sock_snd_sb_state; + /* Receive buffer state. */ + uint16_t kf_sock_rcv_sb_state; + /* Round to 64 bit alignment. */ + uint32_t kf_sock_pad0; + } kf_sock; + struct { + /* Vnode type. */ + int kf_file_type; + /* Space for future use */ + int kf_spareint[3]; + uint64_t kf_spareint64[30]; + /* Vnode filesystem id. */ + uint64_t kf_file_fsid; + /* File device. */ + uint64_t kf_file_rdev; + /* Global file id. */ + uint64_t kf_file_fileid; + /* File size. */ + uint64_t kf_file_size; + /* Vnode filesystem id, FreeBSD 11 compat. */ + uint32_t kf_file_fsid_freebsd11; + /* File device, FreeBSD 11 compat. */ + uint32_t kf_file_rdev_freebsd11; + /* File mode. */ + uint16_t kf_file_mode; + /* Round to 64 bit alignment. */ + uint16_t kf_file_pad0; + uint32_t kf_file_pad1; + } kf_file; + struct { + uint32_t kf_spareint[4]; + uint64_t kf_spareint64[32]; + uint32_t kf_sem_value; + uint16_t kf_sem_mode; + } kf_sem; + struct { + uint32_t kf_spareint[4]; + uint64_t kf_spareint64[32]; + uint64_t kf_pipe_addr; + uint64_t kf_pipe_peer; + uint32_t kf_pipe_buffer_cnt; + /* Round to 64 bit alignment. */ + uint32_t kf_pipe_pad0[3]; + } kf_pipe; + struct { + uint32_t kf_spareint[4]; + uint64_t kf_spareint64[32]; + uint32_t kf_pts_dev_freebsd11; + uint32_t kf_pts_pad0; + uint64_t kf_pts_dev; + /* Round to 64 bit alignment. */ + uint32_t kf_pts_pad1[4]; + } kf_pts; + struct { + uint32_t kf_spareint[4]; + uint64_t kf_spareint64[32]; + int32_t kf_pid; + } kf_proc; + } kf_un; + uint16_t kf_status; /* Status flags. */ + uint16_t kf_pad1; /* Round to 32 bit alignment. */ + int32_t _kf_ispare0; /* Space for more stuff. */ + target_cap_rights_t kf_cap_rights; /* Capability rights. */ + uint64_t _kf_cap_spare; /* Space for future cap_rights_t. */ + /* Truncated before copyout in sysctl */ + char kf_path[PATH_MAX]; /* Path to file, if any. */ +}; + +struct target_kinfo_vmentry { + int32_t kve_structsize; /* Variable size of record. */ + int32_t kve_type; /* Type of map entry. */ + uint64_t kve_start; /* Starting address. */ + uint64_t kve_end; /* Finishing address. */ + uint64_t kve_offset; /* Mapping offset in object */ + uint64_t kve_vn_fileid; /* inode number if vnode */ + uint32_t kve_vn_fsid_freebsd11; /* dev_t of vnode location */ + int32_t kve_flags; /* Flags on map entry. */ + int32_t kve_resident; /* Number of resident pages. */ + int32_t kve_private_resident; /* Number of private pages. */ + int32_t kve_protection; /* Protection bitmask. */ + int32_t kve_ref_count; /* VM obj ref count. */ + int32_t kve_shadow_count; /* VM obj shadow count. */ + int32_t kve_vn_type; /* Vnode type. */ + uint64_t kve_vn_size; /* File size. */ + uint32_t kve_vn_rdev_freebsd11; /* Device id if device. */ + uint16_t kve_vn_mode; /* File mode. */ + uint16_t kve_status; /* Status flags. */ +#if (__FreeBSD_version >= 1300501 && __FreeBSD_version < 1400000) || \ + __FreeBSD_version >= 1400009 + union { + uint64_t _kve_vn_fsid; /* dev_t of vnode location */ + uint64_t _kve_obj; /* handle of anon obj */ + } kve_type_spec; +#define kve_vn_fsid kve_type_spec._kve_vn_fsid +#define kve_obj kve_type_spec._kve_obj +#else + uint64_t kve_vn_fsid; /* dev_t of vnode location */ +#endif + uint64_t kve_vn_rdev; /* Device id if device. */ + int _kve_ispare[8]; /* Space for more stuff. */ + /* Truncated before copyout in sysctl */ + char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ +}; + +#endif /* TARGET_OS_USER_H */ diff --git a/bsd-user/freebsd/target_os_vmparam.h b/bsd-user/freebsd/target_os_vmparam.h new file mode 100644 index 0000000000..8457dd3913 --- /dev/null +++ b/bsd-user/freebsd/target_os_vmparam.h @@ -0,0 +1,39 @@ +/* + * FreeBSD VM parameters definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_VMPARAM_H +#define TARGET_OS_VMPARAM_H + +#include "target_arch_vmparam.h" + +/* Compare to sys/exec.h */ +struct target_ps_strings { + abi_ulong ps_argvstr; + uint32_t ps_nargvstr; + abi_ulong ps_envstr; + uint32_t ps_nenvstr; +}; + +extern abi_ulong target_stkbas; +extern abi_ulong target_stksiz; + +#define TARGET_PS_STRINGS ((target_stkbas + target_stksiz) - \ + sizeof(struct target_ps_strings)) + +#endif /* TARGET_OS_VMPARAM_H */ diff --git a/bsd-user/host/arm/host-signal.h b/bsd-user/host/arm/host-signal.h new file mode 100644 index 0000000000..56679bd699 --- /dev/null +++ b/bsd-user/host/arm/host-signal.h @@ -0,0 +1,35 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2021 Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ARM_HOST_SIGNAL_H +#define ARM_HOST_SIGNAL_H + +#include + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.__gregs[_REG_PC]; +} + +static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) +{ + uc->uc_mcontext.__gregs[_REG_PC] = pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + /* + * In the FSR, bit 11 is WnR. FreeBSD returns this as part of the + * si_info.si_trapno. + */ + uint32_t fsr = info->si_trapno; + + return extract32(fsr, 11, 1); +} + +#endif diff --git a/bsd-user/host/i386/host-signal.h b/bsd-user/host/i386/host-signal.h new file mode 100644 index 0000000000..ffdfaba534 --- /dev/null +++ b/bsd-user/host/i386/host-signal.h @@ -0,0 +1,38 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2021 Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef I386_HOST_SIGNAL_H +#define I386_HOST_SIGNAL_H + +#include +#include +#include +#include +#include + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.mc_eip; +} + +static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) +{ + uc->uc_mcontext.mc_eip = pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + /* + * Look in sys/i386/i386/trap.c. NOTE: mc_err == tr_err due to type punning + * between a trapframe and mcontext on FreeBSD/i386. + */ + return uc->uc_mcontext.mc_trapno == T_PAGEFLT && + uc->uc_mcontext.mc_err & PGEX_W; +} + +#endif diff --git a/bsd-user/host/x86_64/host-signal.h b/bsd-user/host/x86_64/host-signal.h new file mode 100644 index 0000000000..32ac4e4180 --- /dev/null +++ b/bsd-user/host/x86_64/host-signal.h @@ -0,0 +1,38 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2021 Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef X86_64_HOST_SIGNAL_H +#define X86_64_HOST_SIGNAL_H + +#include +#include +#include +#include +#include + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.mc_rip; +} + +static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) +{ + uc->uc_mcontext.mc_rip = pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + /* + * Look in sys/amd64/amd64/trap.c. NOTE: mc_err == tr_err due to type + * punning between a trapframe and mcontext on FreeBSD/amd64. + */ + return uc->uc_mcontext.mc_trapno == T_PAGEFLT && + uc->uc_mcontext.mc_err & PGEX_W; +} + +#endif diff --git a/bsd-user/i386/signal.c b/bsd-user/i386/signal.c new file mode 100644 index 0000000000..5dd975ce56 --- /dev/null +++ b/bsd-user/i386/signal.c @@ -0,0 +1,68 @@ +/* + * i386 dependent signal definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu.h" + +/* + * Compare to i386/i386/machdep.c sendsig() + * Assumes that target stack frame memory is locked. + */ +abi_long set_sigtramp_args(CPUX86State *env, int sig, + struct target_sigframe *frame, + abi_ulong frame_addr, + struct target_sigaction *ka) +{ + /* XXX return -TARGET_EOPNOTSUPP; */ + return 0; +} + +/* + * Compare to i386/i386/exec_machdep.c sendsig() + * Assumes that the memory is locked if frame points to user memory. + */ +abi_long setup_sigframe_arch(CPUX86State *env, abi_ulong frame_addr, + struct target_sigframe *frame, int flags) +{ + target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext; + + get_mcontext(env, mcp, flags); + return 0; +} + +/* Compare to i386/i386/machdep.c get_mcontext() */ +abi_long get_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int flags) +{ + /* XXX */ + return -TARGET_EOPNOTSUPP; +} + +/* Compare to i386/i386/machdep.c set_mcontext() */ +abi_long set_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int srflag) +{ + /* XXX */ + return -TARGET_EOPNOTSUPP; +} + +abi_long get_ucontext_sigreturn(CPUX86State *regs, abi_ulong target_sf, + abi_ulong *target_uc) +{ + /* XXX */ + *target_uc = 0; + return -TARGET_EOPNOTSUPP; +} diff --git a/bsd-user/i386/target.h b/bsd-user/i386/target.h new file mode 100644 index 0000000000..ddd3b8ec08 --- /dev/null +++ b/bsd-user/i386/target.h @@ -0,0 +1,20 @@ +/* + * Intel general target stuff that's common to all i386 details + * + * Copyright (c) 2022 M. Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_H +#define TARGET_H + +/* + * i386 doesn't 'lump' the registers for 64-bit args. + */ +static inline bool regpairs_aligned(void *cpu_env) +{ + return false; +} + +#endif /* TARGET_H */ diff --git a/bsd-user/sparc64/target_syscall.h b/bsd-user/i386/target_arch.h similarity index 59% rename from bsd-user/sparc64/target_syscall.h rename to bsd-user/i386/target_arch.h index b7d986a76d..9595e60f09 100644 --- a/bsd-user/sparc64/target_syscall.h +++ b/bsd-user/i386/target_arch.h @@ -1,6 +1,7 @@ /* - * sparc64 dependent system call definitions + * Intel x86 specific prototypes for bsd-user * + * Copyright (c) 2013 Stacey D. Son * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,23 +16,16 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, see . */ -#ifndef TARGET_SYSCALL_H -#define TARGET_SYSCALL_H -struct target_pt_regs { - abi_ulong u_regs[16]; - abi_ulong tstate; - abi_ulong pc; - abi_ulong npc; - abi_ulong y; - abi_ulong fprs; -}; +#ifndef TARGET_ARCH_H +#define TARGET_ARCH_H -#define UNAME_MACHINE "sun4u" -#define TARGET_HW_MACHINE "sparc" -#define TARGET_HW_MACHINE_ARCH "sparc64" +/* target_arch_cpu.c */ +void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit, + int flags); +void bsd_i386_set_idt(int n, unsigned int dpl); +void bsd_i386_set_idt_base(uint64_t base); -#define TARGET_SPARC_UTRAP_INSTALL 1 -#define TARGET_SPARC_SIGTRAMP_INSTALL 2 +#define target_cpu_set_tls(env, newtls) -#endif /* TARGET_SYSCALL_H */ +#endif /* TARGET_ARCH_H */ diff --git a/bsd-user/i386/target_arch_cpu.c b/bsd-user/i386/target_arch_cpu.c new file mode 100644 index 0000000000..d349e45299 --- /dev/null +++ b/bsd-user/i386/target_arch_cpu.c @@ -0,0 +1,71 @@ +/* + * i386 cpu related code + * + * Copyright (c) 2013 Stacey Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu.h" +#include "qemu/timer.h" + +#include "target_arch.h" + +static uint64_t *idt_table; + +uint64_t cpu_get_tsc(CPUX86State *env) +{ + return cpu_get_host_ticks(); +} + +void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit, + int flags) +{ + unsigned int e1, e2; + uint32_t *p; + e1 = (addr << 16) | (limit & 0xffff); + e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); + e2 |= flags; + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); +} + + +static void set_gate(void *ptr, unsigned int type, unsigned int dpl, + uint32_t addr, unsigned int sel) +{ + uint32_t *p, e1, e2; + e1 = (addr & 0xffff) | (sel << 16); + e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); +} + +/* only dpl matters as we do only user space emulation */ +void bsd_i386_set_idt(int n, unsigned int dpl) +{ + set_gate(idt_table + n, 0, dpl, 0, 0); +} + +void bsd_i386_set_idt_base(uint64_t base) +{ + idt_table = g2h_untagged(base); +} + diff --git a/bsd-user/i386/target_arch_cpu.h b/bsd-user/i386/target_arch_cpu.h new file mode 100644 index 0000000000..d792dc720f --- /dev/null +++ b/bsd-user/i386/target_arch_cpu.h @@ -0,0 +1,198 @@ +/* + * i386 cpu init and loop + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_CPU_H +#define TARGET_ARCH_CPU_H + +#include "target_arch.h" +#include "signal-common.h" + +#define TARGET_DEFAULT_CPU_MODEL "qemu32" + +static inline void target_cpu_init(CPUX86State *env, + struct target_pt_regs *regs) +{ + uint64_t *gdt_table; + + env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; + env->hflags |= HF_PE_MASK | HF_CPL_MASK; + if (env->features[FEAT_1_EDX] & CPUID_SSE) { + env->cr[4] |= CR4_OSFXSR_MASK; + env->hflags |= HF_OSFXSR_MASK; + } + + /* flags setup : we activate the IRQs by default as in user mode */ + env->eflags |= IF_MASK; + + /* register setup */ + env->regs[R_EAX] = regs->eax; + env->regs[R_EBX] = regs->ebx; + env->regs[R_ECX] = regs->ecx; + env->regs[R_EDX] = regs->edx; + env->regs[R_ESI] = regs->esi; + env->regs[R_EDI] = regs->edi; + env->regs[R_EBP] = regs->ebp; + env->regs[R_ESP] = regs->esp; + env->eip = regs->eip; + + /* interrupt setup */ + env->idt.limit = 255; + + env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), + PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + bsd_i386_set_idt_base(env->idt.base); + bsd_i386_set_idt(0, 0); + bsd_i386_set_idt(1, 0); + bsd_i386_set_idt(2, 0); + bsd_i386_set_idt(3, 3); + bsd_i386_set_idt(4, 3); + bsd_i386_set_idt(5, 0); + bsd_i386_set_idt(6, 0); + bsd_i386_set_idt(7, 0); + bsd_i386_set_idt(8, 0); + bsd_i386_set_idt(9, 0); + bsd_i386_set_idt(10, 0); + bsd_i386_set_idt(11, 0); + bsd_i386_set_idt(12, 0); + bsd_i386_set_idt(13, 0); + bsd_i386_set_idt(14, 0); + bsd_i386_set_idt(15, 0); + bsd_i386_set_idt(16, 0); + bsd_i386_set_idt(17, 0); + bsd_i386_set_idt(18, 0); + bsd_i386_set_idt(19, 0); + bsd_i386_set_idt(0x80, 3); + + /* segment setup */ + env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, + PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; + gdt_table = g2h_untagged(env->gdt.base); + + bsd_i386_write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); + + bsd_i386_write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); + + cpu_x86_load_seg(env, R_CS, __USER_CS); + cpu_x86_load_seg(env, R_SS, __USER_DS); + cpu_x86_load_seg(env, R_DS, __USER_DS); + cpu_x86_load_seg(env, R_ES, __USER_DS); + cpu_x86_load_seg(env, R_FS, __USER_DS); + cpu_x86_load_seg(env, R_GS, __USER_DS); + /* This hack makes Wine work... */ + env->segs[R_FS].selector = 0; +} + +static inline void target_cpu_loop(CPUX86State *env) +{ + CPUState *cs = env_cpu(env); + int trapnr; + abi_ulong pc; + /* target_siginfo_t info; */ + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + switch (trapnr) { + case 0x80: { + /* syscall from int $0x80 */ + abi_ulong params = (abi_ulong) env->regs[R_ESP] + + sizeof(int32_t); + int32_t syscall_nr = env->regs[R_EAX]; + int32_t arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8; + + if (syscall_nr == TARGET_FREEBSD_NR_syscall) { + get_user_s32(syscall_nr, params); + params += sizeof(int32_t); + } else if (syscall_nr == TARGET_FREEBSD_NR___syscall) { + get_user_s32(syscall_nr, params); + params += sizeof(int64_t); + } + get_user_s32(arg1, params); + params += sizeof(int32_t); + get_user_s32(arg2, params); + params += sizeof(int32_t); + get_user_s32(arg3, params); + params += sizeof(int32_t); + get_user_s32(arg4, params); + params += sizeof(int32_t); + get_user_s32(arg5, params); + params += sizeof(int32_t); + get_user_s32(arg6, params); + params += sizeof(int32_t); + get_user_s32(arg7, params); + params += sizeof(int32_t); + get_user_s32(arg8, params); + env->regs[R_EAX] = do_freebsd_syscall(env, + syscall_nr, + arg1, + arg2, + arg3, + arg4, + arg5, + arg6, + arg7, + arg8); + } + if (((abi_ulong)env->regs[R_EAX]) >= (abi_ulong)(-515)) { + env->regs[R_EAX] = -env->regs[R_EAX]; + env->eflags |= CC_C; + } else { + env->eflags &= ~CC_C; + } + break; + + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + + default: + pc = env->segs[R_CS].base + env->eip; + fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - " + "aborting\n", (long)pc, trapnr); + abort(); + } + process_pending_signals(env); + } +} + +static inline void target_cpu_clone_regs(CPUX86State *env, target_ulong newsp) +{ + if (newsp) { + env->regs[R_ESP] = newsp; + } + env->regs[R_EAX] = 0; +} + +static inline void target_cpu_reset(CPUArchState *env) +{ + cpu_reset(env_cpu(env)); +} + +#endif /* TARGET_ARCH_CPU_H */ diff --git a/bsd-user/i386/target_arch_elf.h b/bsd-user/i386/target_arch_elf.h new file mode 100644 index 0000000000..cbcd1f08e2 --- /dev/null +++ b/bsd-user/i386/target_arch_elf.h @@ -0,0 +1,36 @@ +/* + * i386 ELF definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_ELF_H +#define TARGET_ARCH_ELF_H + +#define ELF_START_MMAP 0x80000000 +#define ELF_ET_DYN_LOAD_ADDR 0x01001000 +#define elf_check_arch(x) (((x) == EM_386) || ((x) == EM_486)) + +#define ELF_HWCAP 0 /* FreeBSD doesn't do AT_HWCAP{,2} on x86 */ + +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_386 + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#endif /* TARGET_ARCH_ELF_H */ diff --git a/bsd-user/i386/target_arch_reg.h b/bsd-user/i386/target_arch_reg.h new file mode 100644 index 0000000000..8123109697 --- /dev/null +++ b/bsd-user/i386/target_arch_reg.h @@ -0,0 +1,82 @@ +/* + * FreeBSD i386 register structures + * + * Copyright (c) 2015 Stacey Son + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_REG_H +#define TARGET_ARCH_REG_H + +/* See sys/i386/include/reg.h */ +typedef struct target_reg { + uint32_t r_fs; + uint32_t r_es; + uint32_t r_ds; + uint32_t r_edi; + uint32_t r_esi; + uint32_t r_ebp; + uint32_t r_isp; + uint32_t r_ebx; + uint32_t r_edx; + uint32_t r_ecx; + uint32_t r_eax; + uint32_t r_trapno; + uint32_t r_err; + uint32_t r_eip; + uint32_t r_cs; + uint32_t r_eflags; + uint32_t r_esp; + uint32_t r_ss; + uint32_t r_gs; +} target_reg_t; + +typedef struct target_fpreg { + uint32_t fpr_env[7]; + uint8_t fpr_acc[8][10]; + uint32_t fpr_ex_sw; + uint8_t fpr_pad[64]; +} target_fpreg_t; + +static inline void target_copy_regs(target_reg_t *regs, const CPUX86State *env) +{ + + regs->r_fs = env->segs[R_FS].selector & 0xffff; + regs->r_es = env->segs[R_ES].selector & 0xffff; + regs->r_ds = env->segs[R_DS].selector & 0xffff; + + regs->r_edi = env->regs[R_EDI]; + regs->r_esi = env->regs[R_ESI]; + regs->r_ebp = env->regs[R_EBP]; + /* regs->r_isp = env->regs[R_ISP]; XXX */ + regs->r_ebx = env->regs[R_EBX]; + regs->r_edx = env->regs[R_EDX]; + regs->r_ecx = env->regs[R_ECX]; + regs->r_eax = env->regs[R_EAX]; + /* regs->r_trapno = env->regs[R_TRAPNO]; XXX */ + regs->r_err = env->error_code; /* XXX ? */ + regs->r_eip = env->eip; + + regs->r_cs = env->segs[R_CS].selector & 0xffff; + + regs->r_eflags = env->eflags; + regs->r_esp = env->regs[R_ESP]; + + regs->r_ss = env->segs[R_SS].selector & 0xffff; + regs->r_gs = env->segs[R_GS].selector & 0xffff; +} + +#endif /* TARGET_ARCH_REG_H */ diff --git a/bsd-user/i386/target_arch_signal.h b/bsd-user/i386/target_arch_signal.h new file mode 100644 index 0000000000..279dadc22c --- /dev/null +++ b/bsd-user/i386/target_arch_signal.h @@ -0,0 +1,91 @@ +/* + * i386 dependent signal definitions + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef TARGET_ARCH_SIGNAL_H +#define TARGET_ARCH_SIGNAL_H + +#include "cpu.h" + +/* Size of the signal trampolin code placed on the stack. */ +#define TARGET_SZSIGCODE 0 + +/* compare to x86/include/_limits.h */ +#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */ +#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */ + +typedef struct target_mcontext { + abi_ulong mc_onstack; /* XXX - sigcontext compat. */ + abi_ulong mc_gs; /* machine state (struct trapframe) */ + abi_ulong mc_fs; + abi_ulong mc_es; + abi_ulong mc_ds; + abi_ulong mc_edi; + abi_ulong mc_esi; + abi_ulong mc_ebp; + abi_ulong mc_isp; + abi_ulong mc_ebx; + abi_ulong mc_edx; + abi_ulong mc_ecx; + abi_ulong mc_eax; + abi_ulong mc_trapno; + abi_ulong mc_err; + abi_ulong mc_eip; + abi_ulong mc_cs; + abi_ulong mc_eflags; + abi_ulong mc_esp; + abi_ulong mc_ss; + + int32_t mc_len; /* sizeof(mcontext_t) */ +#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */ +#define _MC_FPFMT_387 0x10001 +#define _MC_FPFMT_XMM 0x10002 + int32_t mc_fpformat; +#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */ +#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */ +#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */ + int32_t mc_ownedfp; + abi_ulong mc_flags; + /* + * See for the internals of mc_fpstate[]. + */ + int32_t mc_fpstate[128] __aligned(16); + + abi_ulong mc_fsbase; + abi_ulong mc_gsbase; + + abi_ulong mc_xfpustate; + abi_ulong mc_xfpustate_len; + + int32_t mc_spare2[4]; +} target_mcontext_t; + +#define TARGET_MCONTEXT_SIZE 640 +#define TARGET_UCONTEXT_SIZE 704 + +#include "target_os_ucontext.h" + +struct target_sigframe { + abi_ulong sf_signum; + abi_ulong sf_siginfo; /* code or pointer to sf_si */ + abi_ulong sf_ucontext; /* points to sf_uc */ + abi_ulong sf_addr; /* undocumented 4th arg */ + target_ucontext_t sf_uc; /* = *sf_uncontext */ + target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/ + uint32_t __spare__[2]; +}; + +#endif /* TARGET_ARCH_SIGNAL_H */ diff --git a/bsd-user/i386/target_arch_sigtramp.h b/bsd-user/i386/target_arch_sigtramp.h new file mode 100644 index 0000000000..ef94cc864f --- /dev/null +++ b/bsd-user/i386/target_arch_sigtramp.h @@ -0,0 +1,29 @@ +/* + * Intel i386 sigcode for bsd-user + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGTRAMP_H +#define TARGET_ARCH_SIGTRAMP_H + +static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc, + unsigned sys_sigreturn) +{ + + return 0; +} +#endif /* TARGET_ARCH_SIGTRAMP_H */ diff --git a/bsd-user/i386/target_arch_sysarch.h b/bsd-user/i386/target_arch_sysarch.h index e9ab98ec32..db8fee6380 100644 --- a/bsd-user/i386/target_arch_sysarch.h +++ b/bsd-user/i386/target_arch_sysarch.h @@ -17,8 +17,8 @@ * along with this program; if not, see . */ -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ +#ifndef TARGET_ARCH_SYSARCH_H +#define TARGET_ARCH_SYSARCH_H #include "target_syscall.h" @@ -74,4 +74,4 @@ static inline void do_freebsd_arch_print_sysarch( TARGET_ABI_FMT_lx ")", name->name, (int)arg1, arg2, arg3, arg4); } -#endif /* !BSD_USER_ARCH_SYSARCH_H_ */ +#endif /* TARGET_ARCH_SYSARCH_H */ diff --git a/bsd-user/i386/target_arch_thread.h b/bsd-user/i386/target_arch_thread.h new file mode 100644 index 0000000000..cee2148d94 --- /dev/null +++ b/bsd-user/i386/target_arch_thread.h @@ -0,0 +1,48 @@ +/* + * i386 thread support + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_THREAD_H +#define TARGET_ARCH_THREAD_H + +/* Compare to vm_machdep.c cpu_set_upcall_kse() */ +static inline void target_thread_set_upcall(CPUX86State *regs, abi_ulong entry, + abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size) +{ + /* XXX */ +} + +static inline void target_thread_init(struct target_pt_regs *regs, + struct image_info *infop) +{ + regs->esp = infop->start_stack; + regs->eip = infop->entry; + + /* + * SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx + * contains a pointer to a function which might be registered using + * `atexit'. This provides a mean for the dynamic linker to call DT_FINI + * functions for shared libraries that have been loaded before the code + * runs. + * + * A value of 0 tells we have no such handler. + */ + regs->edx = 0; +} + +#endif /* TARGET_ARCH_THREAD_H */ diff --git a/bsd-user/i386/target_arch_vmparam.h b/bsd-user/i386/target_arch_vmparam.h new file mode 100644 index 0000000000..79db420e59 --- /dev/null +++ b/bsd-user/i386/target_arch_vmparam.h @@ -0,0 +1,47 @@ +/* + * i386 VM parameters definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_VMPARAM_H +#define TARGET_ARCH_VMPARAM_H + +#include "cpu.h" + +/* compare to i386/include/vmparam.h */ +#define TARGET_MAXTSIZ (128 * MiB) /* max text size */ +#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */ +#define TARGET_MAXDSIZ (512 * MiB) /* max data size */ +#define TARGET_DFLSSIZ (8 * MiB) /* initial stack size limit */ +#define TARGET_MAXSSIZ (64 * MiB) /* max stack size */ +#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */ + +#define TARGET_RESERVED_VA 0xf7000000 + +#define TARGET_USRSTACK (0xbfc00000) + +static inline abi_ulong get_sp_from_cpustate(CPUX86State *state) +{ + return state->regs[R_ESP]; +} + +static inline void set_second_rval(CPUX86State *state, abi_ulong retval2) +{ + state->regs[R_EDX] = retval2; +} + +#endif /* TARGET_ARCH_VMPARAM_H */ diff --git a/bsd-user/include/special-errno.h b/bsd-user/include/special-errno.h new file mode 100644 index 0000000000..03599d9b5a --- /dev/null +++ b/bsd-user/include/special-errno.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * QEMU internal errno values for implementing user-only POSIX. + * + * Copyright (c) 2021 Linaro, Ltd. + */ + +#ifndef SPECIAL_ERRNO_H +#define SPECIAL_ERRNO_H + +/* + * All of these are QEMU internal, not visible to the guest. + * They should be chosen so as to not overlap with any host + * or guest errno. + */ + +/* + * This is returned when a system call should be restarted, to tell the + * main loop that it should wind the guest PC backwards so it will + * re-execute the syscall after handling any pending signals. + */ +#define QEMU_ERESTARTSYS 255 + +#endif /* SPECIAL_ERRNO_H */ diff --git a/bsd-user/main.c b/bsd-user/main.c index fe66204b6b..6f09180d65 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -1,7 +1,8 @@ /* - * qemu user main + * qemu bsd user main * * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2013-14 Stacey Son * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,11 +18,15 @@ * along with this program; if not, see . */ +#include +#include +#include +#include + #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qemu/units.h" #include "qemu/accel.h" -#include "sysemu/tcg.h" #include "qemu-version.h" #include @@ -39,496 +44,107 @@ #include "qemu/cutils.h" #include "exec/log.h" #include "trace/control.h" +#include "crypto/init.h" +#include "qemu/guest-random.h" + +#include "host-os.h" +#include "target_arch_cpu.h" int singlestep; -unsigned long mmap_min_addr; uintptr_t guest_base; bool have_guest_base; +/* + * When running 32-on-64 we should make sure we can fit all of the possible + * guest address space into a contiguous chunk of virtual host memory. + * + * This way we will never overlap with our own libraries or binaries or stack + * or anything else that QEMU maps. + * + * Many cpus reserve the high bit (or more than one for some 64-bit cpus) + * of the address for the kernel. Some cpus rely on this and user space + * uses the high bit(s) for pointer tagging and the like. For them, we + * must preserve the expected address space. + */ +#ifndef MAX_RESERVED_VA +# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS +# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ + (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) +/* + * There are a number of places where we assign reserved_va to a variable + * of type abi_ulong and expect it to fit. Avoid the last page. + */ +# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK) +# else +# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS) +# endif +# else +# define MAX_RESERVED_VA 0 +# endif +#endif + +/* + * That said, reserving *too* much vm space via mmap can run into problems + * with rlimits, oom due to page table creation, etc. We will still try it, + * if directed by the command-line option, but not by default. + */ +#if HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32 +unsigned long reserved_va = MAX_RESERVED_VA; +#else unsigned long reserved_va; +#endif static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; const char *qemu_uname_release; -enum BSDType bsd_type; +char qemu_proc_pathname[PATH_MAX]; /* full path to exeutable */ -/* - * XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so - * we allocate a bigger stack. Need a better solution, for example - * by remapping the process stack directly at the right place - */ -unsigned long x86_stack_size = 512 * 1024; +unsigned long target_maxtsiz = TARGET_MAXTSIZ; /* max text size */ +unsigned long target_dfldsiz = TARGET_DFLDSIZ; /* initial data size limit */ +unsigned long target_maxdsiz = TARGET_MAXDSIZ; /* max data size */ +unsigned long target_dflssiz = TARGET_DFLSSIZ; /* initial data size limit */ +unsigned long target_maxssiz = TARGET_MAXSSIZ; /* max stack size */ +unsigned long target_sgrowsiz = TARGET_SGROWSIZ; /* amount to grow stack */ -void gemu_log(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); - va_end(ap); -} - -#if defined(TARGET_I386) -int cpu_get_pic_interrupt(CPUX86State *env) -{ - return -1; -} -#endif +/* Helper routines for implementing atomic operations. */ void fork_start(void) { + start_exclusive(); + cpu_list_lock(); + mmap_fork_start(); } void fork_end(int child) { if (child) { + CPUState *cpu, *next_cpu; + /* + * Child processes created by fork() only have a single thread. Discard + * information about the parent threads. + */ + CPU_FOREACH_SAFE(cpu, next_cpu) { + if (cpu != thread_cpu) { + QTAILQ_REMOVE_RCU(&cpus, cpu, node); + } + } + mmap_fork_end(child); + /* + * qemu_init_cpu_list() takes care of reinitializing the exclusive + * state, so we don't need to end_exclusive() here. + */ + qemu_init_cpu_list(); gdbserver_fork(thread_cpu); + } else { + mmap_fork_end(child); + cpu_list_unlock(); + end_exclusive(); } } -#ifdef TARGET_I386 -/***********************************************************/ -/* CPUX86 core interface */ - -uint64_t cpu_get_tsc(CPUX86State *env) +void cpu_loop(CPUArchState *env) { - return cpu_get_host_ticks(); + target_cpu_loop(env); } -static void write_dt(void *ptr, unsigned long addr, unsigned long limit, - int flags) -{ - unsigned int e1, e2; - uint32_t *p; - e1 = (addr << 16) | (limit & 0xffff); - e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); - e2 |= flags; - p = ptr; - p[0] = tswap32(e1); - p[1] = tswap32(e2); -} - -static uint64_t *idt_table; -#ifdef TARGET_X86_64 -static void set_gate64(void *ptr, unsigned int type, unsigned int dpl, - uint64_t addr, unsigned int sel) -{ - uint32_t *p, e1, e2; - e1 = (addr & 0xffff) | (sel << 16); - e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); - p = ptr; - p[0] = tswap32(e1); - p[1] = tswap32(e2); - p[2] = tswap32(addr >> 32); - p[3] = 0; -} -/* only dpl matters as we do only user space emulation */ -static void set_idt(int n, unsigned int dpl) -{ - set_gate64(idt_table + n * 2, 0, dpl, 0, 0); -} -#else -static void set_gate(void *ptr, unsigned int type, unsigned int dpl, - uint32_t addr, unsigned int sel) -{ - uint32_t *p, e1, e2; - e1 = (addr & 0xffff) | (sel << 16); - e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); - p = ptr; - p[0] = tswap32(e1); - p[1] = tswap32(e2); -} - -/* only dpl matters as we do only user space emulation */ -static void set_idt(int n, unsigned int dpl) -{ - set_gate(idt_table + n, 0, dpl, 0, 0); -} -#endif - -void cpu_loop(CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - int trapnr; - abi_ulong pc; - /* target_siginfo_t info; */ - - for (;;) { - cpu_exec_start(cs); - trapnr = cpu_exec(cs); - cpu_exec_end(cs); - process_queued_cpu_work(cs); - - switch (trapnr) { - case 0x80: - /* syscall from int $0x80 */ - if (bsd_type == target_freebsd) { - abi_ulong params = (abi_ulong) env->regs[R_ESP] + - sizeof(int32_t); - int32_t syscall_nr = env->regs[R_EAX]; - int32_t arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8; - - if (syscall_nr == TARGET_FREEBSD_NR_syscall) { - get_user_s32(syscall_nr, params); - params += sizeof(int32_t); - } else if (syscall_nr == TARGET_FREEBSD_NR___syscall) { - get_user_s32(syscall_nr, params); - params += sizeof(int64_t); - } - get_user_s32(arg1, params); - params += sizeof(int32_t); - get_user_s32(arg2, params); - params += sizeof(int32_t); - get_user_s32(arg3, params); - params += sizeof(int32_t); - get_user_s32(arg4, params); - params += sizeof(int32_t); - get_user_s32(arg5, params); - params += sizeof(int32_t); - get_user_s32(arg6, params); - params += sizeof(int32_t); - get_user_s32(arg7, params); - params += sizeof(int32_t); - get_user_s32(arg8, params); - env->regs[R_EAX] = do_freebsd_syscall(env, - syscall_nr, - arg1, - arg2, - arg3, - arg4, - arg5, - arg6, - arg7, - arg8); - } else { /* if (bsd_type == target_openbsd) */ - env->regs[R_EAX] = do_openbsd_syscall(env, - env->regs[R_EAX], - env->regs[R_EBX], - env->regs[R_ECX], - env->regs[R_EDX], - env->regs[R_ESI], - env->regs[R_EDI], - env->regs[R_EBP]); - } - if (((abi_ulong)env->regs[R_EAX]) >= (abi_ulong)(-515)) { - env->regs[R_EAX] = -env->regs[R_EAX]; - env->eflags |= CC_C; - } else { - env->eflags &= ~CC_C; - } - break; -#ifndef TARGET_ABI32 - case EXCP_SYSCALL: - /* syscall from syscall instruction */ - if (bsd_type == target_freebsd) { - env->regs[R_EAX] = do_freebsd_syscall(env, - env->regs[R_EAX], - env->regs[R_EDI], - env->regs[R_ESI], - env->regs[R_EDX], - env->regs[R_ECX], - env->regs[8], - env->regs[9], 0, 0); - } else { /* if (bsd_type == target_openbsd) */ - env->regs[R_EAX] = do_openbsd_syscall(env, - env->regs[R_EAX], - env->regs[R_EDI], - env->regs[R_ESI], - env->regs[R_EDX], - env->regs[10], - env->regs[8], - env->regs[9]); - } - env->eip = env->exception_next_eip; - if (((abi_ulong)env->regs[R_EAX]) >= (abi_ulong)(-515)) { - env->regs[R_EAX] = -env->regs[R_EAX]; - env->eflags |= CC_C; - } else { - env->eflags &= ~CC_C; - } - break; -#endif - case EXCP_INTERRUPT: - /* just indicate that signals should be handled asap */ - break; - default: - pc = env->segs[R_CS].base + env->eip; - fprintf(stderr, - "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", - (long)pc, trapnr); - abort(); - } - process_pending_signals(env); - } -} -#endif - -#ifdef TARGET_SPARC -#define SPARC64_STACK_BIAS 2047 - -/* #define DEBUG_WIN */ -/* - * WARNING: dealing with register windows _is_ complicated. More info - * can be found at http://www.sics.se/~psm/sparcstack.html - */ -static inline int get_reg_index(CPUSPARCState *env, int cwp, int index) -{ - index = (index + cwp * 16) % (16 * env->nwindows); - /* - * wrap handling : if cwp is on the last window, then we use the - * registers 'after' the end - */ - if (index < 8 && env->cwp == env->nwindows - 1) { - index += 16 * env->nwindows; - } - return index; -} - -/* save the register window 'cwp1' */ -static inline void save_window_offset(CPUSPARCState *env, int cwp1) -{ - unsigned int i; - abi_ulong sp_ptr; - - sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; -#ifdef TARGET_SPARC64 - if (sp_ptr & 3) { - sp_ptr += SPARC64_STACK_BIAS; - } -#endif -#if defined(DEBUG_WIN) - printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n", - sp_ptr, cwp1); -#endif - for (i = 0; i < 16; i++) { - /* FIXME - what to do if put_user() fails? */ - put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); - sp_ptr += sizeof(abi_ulong); - } -} - -static void save_window(CPUSPARCState *env) -{ -#ifndef TARGET_SPARC64 - unsigned int new_wim; - new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) & - ((1LL << env->nwindows) - 1); - save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); - env->wim = new_wim; -#else - /* - * cansave is zero if the spill trap handler is triggered by `save` and - * nonzero if triggered by a `flushw` - */ - save_window_offset(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2)); - env->cansave++; - env->canrestore--; -#endif -} - -static void restore_window(CPUSPARCState *env) -{ -#ifndef TARGET_SPARC64 - unsigned int new_wim; -#endif - unsigned int i, cwp1; - abi_ulong sp_ptr; - -#ifndef TARGET_SPARC64 - new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) & - ((1LL << env->nwindows) - 1); -#endif - - /* restore the invalid window */ - cwp1 = cpu_cwp_inc(env, env->cwp + 1); - sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; -#ifdef TARGET_SPARC64 - if (sp_ptr & 3) { - sp_ptr += SPARC64_STACK_BIAS; - } -#endif -#if defined(DEBUG_WIN) - printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n", - sp_ptr, cwp1); -#endif - for (i = 0; i < 16; i++) { - /* FIXME - what to do if get_user() fails? */ - get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); - sp_ptr += sizeof(abi_ulong); - } -#ifdef TARGET_SPARC64 - env->canrestore++; - if (env->cleanwin < env->nwindows - 1) { - env->cleanwin++; - } - env->cansave--; -#else - env->wim = new_wim; -#endif -} - -static void flush_windows(CPUSPARCState *env) -{ - int offset, cwp1; - - offset = 1; - for (;;) { - /* if restore would invoke restore_window(), then we can stop */ - cwp1 = cpu_cwp_inc(env, env->cwp + offset); -#ifndef TARGET_SPARC64 - if (env->wim & (1 << cwp1)) { - break; - } -#else - if (env->canrestore == 0) { - break; - } - env->cansave++; - env->canrestore--; -#endif - save_window_offset(env, cwp1); - offset++; - } - cwp1 = cpu_cwp_inc(env, env->cwp + 1); -#ifndef TARGET_SPARC64 - /* set wim so that restore will reload the registers */ - env->wim = 1 << cwp1; -#endif -#if defined(DEBUG_WIN) - printf("flush_windows: nb=%d\n", offset - 1); -#endif -} - -void cpu_loop(CPUSPARCState *env) -{ - CPUState *cs = env_cpu(env); - int trapnr, ret, syscall_nr; - /* target_siginfo_t info; */ - - while (1) { - cpu_exec_start(cs); - trapnr = cpu_exec(cs); - cpu_exec_end(cs); - process_queued_cpu_work(cs); - - switch (trapnr) { -#ifndef TARGET_SPARC64 - case 0x80: -#else - /* FreeBSD uses 0x141 for syscalls too */ - case 0x141: - if (bsd_type != target_freebsd) { - goto badtrap; - } - /* fallthrough */ - case 0x100: -#endif - syscall_nr = env->gregs[1]; - if (bsd_type == target_freebsd) - ret = do_freebsd_syscall(env, syscall_nr, - env->regwptr[0], env->regwptr[1], - env->regwptr[2], env->regwptr[3], - env->regwptr[4], env->regwptr[5], - 0, 0); - else if (bsd_type == target_netbsd) - ret = do_netbsd_syscall(env, syscall_nr, - env->regwptr[0], env->regwptr[1], - env->regwptr[2], env->regwptr[3], - env->regwptr[4], env->regwptr[5]); - else { /* if (bsd_type == target_openbsd) */ -#if defined(TARGET_SPARC64) - syscall_nr &= ~(TARGET_OPENBSD_SYSCALL_G7RFLAG | - TARGET_OPENBSD_SYSCALL_G2RFLAG); -#endif - ret = do_openbsd_syscall(env, syscall_nr, - env->regwptr[0], env->regwptr[1], - env->regwptr[2], env->regwptr[3], - env->regwptr[4], env->regwptr[5]); - } - if ((unsigned int)ret >= (unsigned int)(-515)) { - ret = -ret; -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) - env->xcc |= PSR_CARRY; -#else - env->psr |= PSR_CARRY; -#endif - } else { -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) - env->xcc &= ~PSR_CARRY; -#else - env->psr &= ~PSR_CARRY; -#endif - } - env->regwptr[0] = ret; - /* next instruction */ -#if defined(TARGET_SPARC64) - if (bsd_type == target_openbsd && - env->gregs[1] & TARGET_OPENBSD_SYSCALL_G2RFLAG) { - env->pc = env->gregs[2]; - env->npc = env->pc + 4; - } else if (bsd_type == target_openbsd && - env->gregs[1] & TARGET_OPENBSD_SYSCALL_G7RFLAG) { - env->pc = env->gregs[7]; - env->npc = env->pc + 4; - } else { - env->pc = env->npc; - env->npc = env->npc + 4; - } -#else - env->pc = env->npc; - env->npc = env->npc + 4; -#endif - break; - case 0x83: /* flush windows */ -#ifdef TARGET_ABI32 - case 0x103: -#endif - flush_windows(env); - /* next instruction */ - env->pc = env->npc; - env->npc = env->npc + 4; - break; -#ifndef TARGET_SPARC64 - case TT_WIN_OVF: /* window overflow */ - save_window(env); - break; - case TT_WIN_UNF: /* window underflow */ - restore_window(env); - break; - case TT_TFAULT: - case TT_DFAULT: - break; -#else - case TT_SPILL: /* window overflow */ - save_window(env); - break; - case TT_FILL: /* window underflow */ - restore_window(env); - break; - case TT_TFAULT: - case TT_DFAULT: - break; -#endif - case EXCP_INTERRUPT: - /* just indicate that signals should be handled asap */ - break; - case EXCP_DEBUG: - { - gdb_handlesig(cs, TARGET_SIGTRAP); - } - break; - default: -#ifdef TARGET_SPARC64 - badtrap: -#endif - printf("Unhandled trap: 0x%x\n", trapnr); - cpu_dump_state(cs, stderr, 0); - exit(1); - } - process_pending_signals(env); - } -} - -#endif - static void usage(void) { printf("qemu-" TARGET_NAME " version " QEMU_FULL_VERSION @@ -546,13 +162,11 @@ static void usage(void) "-E var=value sets/modifies targets environment variable(s)\n" "-U var unsets targets environment variable(s)\n" "-B address set guest_base address to address\n" - "-bsd type select emulated BSD type FreeBSD/NetBSD/OpenBSD (default)\n" "\n" "Debug options:\n" "-d item1[,...] enable logging of specified items\n" " (use '-d help' for a list of log items)\n" "-D logfile write logs to 'logfile' (default stderr)\n" - "-p pagesize set the host page size to 'pagesize'\n" "-singlestep always run in singlestep mode\n" "-strace log system calls\n" "-trace [[enable=]][,events=][,file=]\n" @@ -572,11 +186,20 @@ static void usage(void) , TARGET_NAME, interp_prefix, - x86_stack_size); + target_dflssiz); exit(1); } -THREAD CPUState *thread_cpu; +__thread CPUState *thread_cpu; + +void stop_all_tasks(void) +{ + /* + * We trust when using NPTL (pthreads) start_exclusive() handles thread + * stopping correctly. + */ + start_exclusive(); +} bool qemu_cpu_is_self(CPUState *cpu) { @@ -589,16 +212,55 @@ void qemu_cpu_kick(CPUState *cpu) } /* Assumes contents are already zeroed. */ -void init_task_state(TaskState *ts) +static void init_task_state(TaskState *ts) { - int i; + ts->sigaltstack_used = (struct target_sigaltstack) { + .ss_sp = 0, + .ss_size = 0, + .ss_flags = TARGET_SS_DISABLE, + }; +} - ts->used = 1; - ts->first_free = ts->sigqueue_table; - for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) { - ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1]; +void gemu_log(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); +} + +static void +adjust_ssize(void) +{ + struct rlimit rl; + + if (getrlimit(RLIMIT_STACK, &rl) != 0) { + return; + } + + target_maxssiz = MIN(target_maxssiz, rl.rlim_max); + target_dflssiz = MIN(MAX(target_dflssiz, rl.rlim_cur), target_maxssiz); + + rl.rlim_max = target_maxssiz; + rl.rlim_cur = target_dflssiz; + setrlimit(RLIMIT_STACK, &rl); +} + +static void save_proc_pathname(char *argv0) +{ + int mib[4]; + size_t len; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PATHNAME; + mib[3] = -1; + + len = sizeof(qemu_proc_pathname); + if (sysctl(mib, 4, qemu_proc_pathname, &len, NULL, 0)) { + perror("sysctl"); } - ts->sigqueue_table[i].next = NULL; } int main(int argc, char **argv) @@ -608,9 +270,11 @@ int main(int argc, char **argv) const char *cpu_type; const char *log_file = NULL; const char *log_mask = NULL; + const char *seed_optarg = NULL; struct target_pt_regs regs1, *regs = ®s1; struct image_info info1, *info = &info1; - TaskState ts1, *ts = &ts1; + struct bsd_binprm bprm; + TaskState *ts; CPUArchState *env; CPUState *cpu; int optind, rv; @@ -618,12 +282,16 @@ int main(int argc, char **argv) const char *gdbstub = NULL; char **target_environ, **wrk; envlist_t *envlist = NULL; - bsd_type = target_openbsd; + char *argv0 = NULL; + + adjust_ssize(); if (argc <= 1) { usage(); } + save_proc_pathname(argv[0]); + error_init(argv[0]); module_call_init(MODULE_INIT_TRACE); qemu_init_cpu_list(); @@ -678,14 +346,17 @@ int main(int argc, char **argv) } } else if (!strcmp(r, "s")) { r = argv[optind++]; - rv = qemu_strtoul(r, &r, 0, &x86_stack_size); - if (rv < 0 || x86_stack_size <= 0) { + rv = qemu_strtoul(r, &r, 0, &target_dflssiz); + if (rv < 0 || target_dflssiz <= 0) { usage(); } if (*r == 'M') { - x86_stack_size *= MiB; + target_dflssiz *= 1024 * 1024; } else if (*r == 'k' || *r == 'K') { - x86_stack_size *= KiB; + target_dflssiz *= 1024; + } + if (target_dflssiz > target_maxssiz) { + usage(); } } else if (!strcmp(r, "L")) { interp_prefix = argv[optind++]; @@ -717,46 +388,41 @@ int main(int argc, char **argv) have_guest_base = true; } else if (!strcmp(r, "drop-ld-preload")) { (void) envlist_unsetenv(envlist, "LD_PRELOAD"); - } else if (!strcmp(r, "bsd")) { - if (!strcasecmp(argv[optind], "freebsd")) { - bsd_type = target_freebsd; - } else if (!strcasecmp(argv[optind], "netbsd")) { - bsd_type = target_netbsd; - } else if (!strcasecmp(argv[optind], "openbsd")) { - bsd_type = target_openbsd; - } else { - usage(); - } - optind++; + } else if (!strcmp(r, "seed")) { + seed_optarg = optarg; } else if (!strcmp(r, "singlestep")) { singlestep = 1; } else if (!strcmp(r, "strace")) { do_strace = 1; } else if (!strcmp(r, "trace")) { trace_opt_parse(optarg); + } else if (!strcmp(r, "0")) { + argv0 = argv[optind++]; } else { usage(); } } /* init debug */ - qemu_log_needs_buffers(); - qemu_set_log_filename(log_file, &error_fatal); - if (log_mask) { - int mask; - - mask = qemu_str_to_log_mask(log_mask); - if (!mask) { - qemu_print_log_usage(stdout); - exit(1); + { + int mask = 0; + if (log_mask) { + mask = qemu_str_to_log_mask(log_mask); + if (!mask) { + qemu_print_log_usage(stdout); + exit(1); + } } - qemu_set_log(mask); + qemu_set_log_filename_flags(log_file, mask, &error_fatal); } if (optind >= argc) { usage(); } filename = argv[optind]; + if (argv0) { + argv[optind] = argv0; + } if (!trace_init_backends()) { exit(1); @@ -766,6 +432,9 @@ int main(int argc, char **argv) /* Zero out regs */ memset(regs, 0, sizeof(struct target_pt_regs)); + /* Zero bsd params */ + memset(&bprm, 0, sizeof(bprm)); + /* Zero out image_info */ memset(info, 0, sizeof(struct image_info)); @@ -773,24 +442,11 @@ int main(int argc, char **argv) init_paths(interp_prefix); if (cpu_model == NULL) { -#if defined(TARGET_I386) -#ifdef TARGET_X86_64 - cpu_model = "qemu64"; -#else - cpu_model = "qemu32"; -#endif -#elif defined(TARGET_SPARC) -#ifdef TARGET_SPARC64 - cpu_model = "TI UltraSparc II"; -#else - cpu_model = "Fujitsu MB86904"; -#endif -#else - cpu_model = "any"; -#endif + cpu_model = TARGET_DEFAULT_CPU_MODEL; } cpu_type = parse_cpu_option(cpu_model); + /* init tcg before creating CPUs and to get qemu_host_page_size */ { AccelClass *ac = ACCEL_GET_CLASS(current_accel()); @@ -800,9 +456,7 @@ int main(int argc, char **argv) } cpu = cpu_create(cpu_type); env = cpu->env_ptr; -#if defined(TARGET_SPARC) || defined(TARGET_PPC) cpu_reset(cpu); -#endif thread_cpu = cpu; if (getenv("QEMU_STRACE")) { @@ -812,36 +466,31 @@ int main(int argc, char **argv) target_environ = envlist_to_environ(envlist, NULL); envlist_free(envlist); + if (reserved_va) { + mmap_next_start = reserved_va; + } + + { + Error *err = NULL; + if (seed_optarg != NULL) { + qemu_guest_random_seed_main(seed_optarg, &err); + } else { + qcrypto_init(&err); + } + if (err) { + error_reportf_err(err, "cannot initialize crypto: "); + exit(1); + } + } + /* * Now that page sizes are configured we can do * proper page alignment for guest_base. */ guest_base = HOST_PAGE_ALIGN(guest_base); - /* - * Read in mmap_min_addr kernel parameter. This value is used - * When loading the ELF image to determine whether guest_base - * is needed. - * - * When user has explicitly set the quest base, we skip this - * test. - */ - if (!have_guest_base) { - FILE *fp; - - fp = fopen("/proc/sys/vm/mmap_min_addr", "r"); - if (fp != NULL) { - unsigned long tmp; - if (fscanf(fp, "%lu", &tmp) == 1) { - mmap_min_addr = tmp; - qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", - mmap_min_addr); - } - fclose(fp); - } - } - - if (loader_exec(filename, argv + optind, target_environ, regs, info) != 0) { + if (loader_exec(filename, argv + optind, target_environ, regs, info, + &bprm) != 0) { printf("Error loading %s\n", filename); _exit(1); } @@ -853,22 +502,38 @@ int main(int argc, char **argv) g_free(target_environ); if (qemu_loglevel_mask(CPU_LOG_PAGE)) { - qemu_log("guest_base %p\n", (void *)guest_base); - log_page_dump("binary load"); + FILE *f = qemu_log_trylock(); + if (f) { + fprintf(f, "guest_base %p\n", (void *)guest_base); + fprintf(f, "page layout changed following binary load\n"); + page_dump(f); - qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk); - qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code); - qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", - info->start_code); - qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", - info->start_data); - qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data); - qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", - info->start_stack); - qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); - qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); + fprintf(f, "start_brk 0x" TARGET_ABI_FMT_lx "\n", + info->start_brk); + fprintf(f, "end_code 0x" TARGET_ABI_FMT_lx "\n", + info->end_code); + fprintf(f, "start_code 0x" TARGET_ABI_FMT_lx "\n", + info->start_code); + fprintf(f, "start_data 0x" TARGET_ABI_FMT_lx "\n", + info->start_data); + fprintf(f, "end_data 0x" TARGET_ABI_FMT_lx "\n", + info->end_data); + fprintf(f, "start_stack 0x" TARGET_ABI_FMT_lx "\n", + info->start_stack); + fprintf(f, "brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); + fprintf(f, "entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); + + qemu_log_unlock(f); + } } + /* build Task State */ + ts = g_new0(TaskState, 1); + init_task_state(ts); + ts->info = info; + ts->bprm = &bprm; + cpu->opaque = ts; + target_set_brk(info->brk); syscall_init(); signal_init(); @@ -880,143 +545,7 @@ int main(int argc, char **argv) */ tcg_prologue_init(tcg_ctx); - /* build Task State */ - memset(ts, 0, sizeof(TaskState)); - init_task_state(ts); - ts->info = info; - cpu->opaque = ts; - -#if defined(TARGET_I386) - env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; - env->hflags |= HF_PE_MASK | HF_CPL_MASK; - if (env->features[FEAT_1_EDX] & CPUID_SSE) { - env->cr[4] |= CR4_OSFXSR_MASK; - env->hflags |= HF_OSFXSR_MASK; - } -#ifndef TARGET_ABI32 - /* enable 64 bit mode if possible */ - if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { - fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n"); - exit(1); - } - env->cr[4] |= CR4_PAE_MASK; - env->efer |= MSR_EFER_LMA | MSR_EFER_LME; - env->hflags |= HF_LMA_MASK; -#endif - - /* flags setup : we activate the IRQs by default as in user mode */ - env->eflags |= IF_MASK; - - /* linux register setup */ -#ifndef TARGET_ABI32 - env->regs[R_EAX] = regs->rax; - env->regs[R_EBX] = regs->rbx; - env->regs[R_ECX] = regs->rcx; - env->regs[R_EDX] = regs->rdx; - env->regs[R_ESI] = regs->rsi; - env->regs[R_EDI] = regs->rdi; - env->regs[R_EBP] = regs->rbp; - env->regs[R_ESP] = regs->rsp; - env->eip = regs->rip; -#else - env->regs[R_EAX] = regs->eax; - env->regs[R_EBX] = regs->ebx; - env->regs[R_ECX] = regs->ecx; - env->regs[R_EDX] = regs->edx; - env->regs[R_ESI] = regs->esi; - env->regs[R_EDI] = regs->edi; - env->regs[R_EBP] = regs->ebp; - env->regs[R_ESP] = regs->esp; - env->eip = regs->eip; -#endif - - /* linux interrupt setup */ -#ifndef TARGET_ABI32 - env->idt.limit = 511; -#else - env->idt.limit = 255; -#endif - env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), - PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - idt_table = g2h_untagged(env->idt.base); - set_idt(0, 0); - set_idt(1, 0); - set_idt(2, 0); - set_idt(3, 3); - set_idt(4, 3); - set_idt(5, 0); - set_idt(6, 0); - set_idt(7, 0); - set_idt(8, 0); - set_idt(9, 0); - set_idt(10, 0); - set_idt(11, 0); - set_idt(12, 0); - set_idt(13, 0); - set_idt(14, 0); - set_idt(15, 0); - set_idt(16, 0); - set_idt(17, 0); - set_idt(18, 0); - set_idt(19, 0); - set_idt(0x80, 3); - - /* linux segment setup */ - { - uint64_t *gdt_table; - env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, - PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; - gdt_table = g2h_untagged(env->gdt.base); -#ifdef TARGET_ABI32 - write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | - (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); -#else - /* 64 bit code segment */ - write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | - DESC_L_MASK | - (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); -#endif - write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | - (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); - } - - cpu_x86_load_seg(env, R_CS, __USER_CS); - cpu_x86_load_seg(env, R_SS, __USER_DS); -#ifdef TARGET_ABI32 - cpu_x86_load_seg(env, R_DS, __USER_DS); - cpu_x86_load_seg(env, R_ES, __USER_DS); - cpu_x86_load_seg(env, R_FS, __USER_DS); - cpu_x86_load_seg(env, R_GS, __USER_DS); - /* This hack makes Wine work... */ - env->segs[R_FS].selector = 0; -#else - cpu_x86_load_seg(env, R_DS, 0); - cpu_x86_load_seg(env, R_ES, 0); - cpu_x86_load_seg(env, R_FS, 0); - cpu_x86_load_seg(env, R_GS, 0); -#endif -#elif defined(TARGET_SPARC) - { - int i; - env->pc = regs->pc; - env->npc = regs->npc; - env->y = regs->y; - for (i = 0; i < 8; i++) { - env->gregs[i] = regs->u_regs[i]; - } - for (i = 0; i < 8; i++) { - env->regwptr[i] = regs->u_regs[i + 8]; - } - } -#else -#error unsupported target CPU -#endif + target_cpu_init(env, regs); if (gdbstub) { gdbserver_start(gdbstub); diff --git a/bsd-user/meson.build b/bsd-user/meson.build index 0369549340..5243122fc5 100644 --- a/bsd-user/meson.build +++ b/bsd-user/meson.build @@ -1,3 +1,11 @@ +if not have_bsd_user + subdir_done() +endif + +bsd_user_ss = ss.source_set() + +common_user_inc += include_directories('include') + bsd_user_ss.add(files( 'bsdload.c', 'elfload.c', @@ -5,6 +13,10 @@ bsd_user_ss.add(files( 'mmap.c', 'signal.c', 'strace.c', - 'syscall.c', 'uaccess.c', )) + +# Pull in the OS-specific build glue, if any +subdir(targetos) + +specific_ss.add_all(when: 'CONFIG_BSD_USER', if_true: bsd_user_ss) diff --git a/bsd-user/mips/target_arch_sysarch.h b/bsd-user/mips/target_arch_sysarch.h deleted file mode 100644 index 6da803a408..0000000000 --- a/bsd-user/mips/target_arch_sysarch.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * mips sysarch() system call emulation - * - * Copyright (c) 2013 Stacey D. Son - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ - -#include "target_syscall.h" -#include "target_arch.h" - -static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op, - abi_ulong parms) -{ - int ret = 0; - - switch (op) { - case TARGET_MIPS_SET_TLS: - target_cpu_set_tls(env, parms); - break; - - case TARGET_MIPS_GET_TLS: - if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) { - ret = -TARGET_EFAULT; - } - break; - - default: - ret = -TARGET_EINVAL; - break; - } - - return ret; -} - -static inline void do_freebsd_arch_print_sysarch( - const struct syscallname *name, abi_long arg1, abi_long arg2, - abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) -{ - - switch (arg1) { - case TARGET_MIPS_SET_TLS: - gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2); - break; - - case TARGET_MIPS_GET_TLS: - gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2); - break; - - default: - gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2); - } -} - -#endif /*!BSD_USER_ARCH_SYSARCH_H_ */ diff --git a/bsd-user/mips/target_syscall.h b/bsd-user/mips/target_syscall.h deleted file mode 100644 index aacc6ddf9f..0000000000 --- a/bsd-user/mips/target_syscall.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * mips system call definitions - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#ifndef _MIPS_SYSCALL_H_ -#define _MIPS_SYSCALL_H_ - -/* - * struct target_pt_regs defines the way the registers are stored on the stack - * during a system call. - */ - -struct target_pt_regs { - /* Saved main processor registers. */ - abi_ulong regs[32]; - - /* Saved special registers. */ - abi_ulong cp0_status; - abi_ulong lo; - abi_ulong hi; - abi_ulong cp0_badvaddr; - abi_ulong cp0_cause; - abi_ulong cp0_epc; -}; - -#if defined(TARGET_WORDS_BIGENDIAN) -#define UNAME_MACHINE "mips" -#else -#define UNAME_MACHINE "mipsel" -#endif - -#define TARGET_HW_MACHINE "mips" -#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE - -/* sysarch() commands */ -#define TARGET_MIPS_SET_TLS 1 -#define TARGET_MIPS_GET_TLS 2 - -#endif /* !_MIPS_SYSCALL_H_ */ diff --git a/bsd-user/mips64/target_arch_sysarch.h b/bsd-user/mips64/target_arch_sysarch.h deleted file mode 100644 index e6f9c00d5f..0000000000 --- a/bsd-user/mips64/target_arch_sysarch.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * mips64 sysarch() system call emulation - * - * Copyright (c) 2013 Stacey D. Son - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ - -#include "target_syscall.h" -#include "target_arch.h" - -static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op, - abi_ulong parms) -{ - int ret = 0; - - switch (op) { - case TARGET_MIPS_SET_TLS: - target_cpu_set_tls(env, parms); - break; - - case TARGET_MIPS_GET_TLS: - if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) { - ret = -TARGET_EFAULT; - } - break; - - default: - ret = -TARGET_EINVAL; - break; - } - - return ret; -} - -static inline void do_freebsd_arch_print_sysarch( - const struct syscallname *name, abi_long arg1, abi_long arg2, - abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) -{ - - switch (arg1) { - case TARGET_MIPS_SET_TLS: - gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2); - break; - - case TARGET_MIPS_GET_TLS: - gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2); - break; - - default: - gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2); - } -} - -#endif /*!BSD_USER_ARCH_SYSARCH_H_ */ diff --git a/bsd-user/mips64/target_syscall.h b/bsd-user/mips64/target_syscall.h deleted file mode 100644 index bf4c598b13..0000000000 --- a/bsd-user/mips64/target_syscall.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * mips64 system call definitions - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#ifndef _MIPS64_SYSCALL_H_ -#define _MIPS64_SYSCALL_H_ - -/* - * struct target_pt_regs defines the way the registers are stored on the stack - * during a system call. - */ - -struct target_pt_regs { - /* Saved main processor registers. */ - abi_ulong regs[32]; - - /* Saved special registers. */ - abi_ulong cp0_status; - abi_ulong lo; - abi_ulong hi; - abi_ulong cp0_badvaddr; - abi_ulong cp0_cause; - abi_ulong cp0_epc; -}; - - -#if defined(TARGET_WORDS_BIGENDIAN) -#define UNAME_MACHINE "mips64" -#else -#define UNAME_MACHINE "mips64el" -#endif - -#define TARGET_HW_MACHINE "mips" -#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE - -/* sysarch() commands */ -#define TARGET_MIPS_SET_TLS 1 -#define TARGET_MIPS_GET_TLS 2 - -#endif /* !_MIPS64_SYSCALL_H_ */ diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index 0ac1b92706..d6c5a344c9 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -19,11 +19,6 @@ #include "qemu/osdep.h" #include "qemu.h" -#include "qemu-common.h" -#include "bsd-mman.h" -#include "exec/exec-all.h" - -//#define DEBUG_MMAP static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; @@ -69,14 +64,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) abi_ulong end, host_start, host_end, addr; int prot1, ret; -#ifdef DEBUG_MMAP - printf("mprotect: start=0x" TARGET_FMT_lx - " len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len, - prot & PROT_READ ? 'r' : '-', - prot & PROT_WRITE ? 'w' : '-', - prot & PROT_EXEC ? 'x' : '-'); -#endif - + qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx + " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, + prot & PROT_READ ? 'r' : '-', + prot & PROT_WRITE ? 'w' : '-', + prot & PROT_EXEC ? 'x' : '-'); if ((start & ~TARGET_PAGE_MASK) != 0) return -EINVAL; len = TARGET_PAGE_ALIGN(len); @@ -134,7 +126,27 @@ error: return ret; } -/* map an incomplete host page */ +/* + * map an incomplete host page + * + * mmap_frag can be called with a valid fd, if flags doesn't contain one of + * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we + * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be + * added. + * + * * If fd is valid (not -1) we want to map the pages with MAP_ANON. + * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it + * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD + * in sys/vm/vm_mmap.c. + * * If flags contains MAP_ANON it doesn't matter if we add it or not. + * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't + * matter if we add it or not either. See enforcing of constraints for + * MAP_STACK in kern_mmap. + * + * Don't add MAP_ANON for the flags that use fd == -1 without specifying the + * flags directly, with the assumption that future flags that require fd == -1 + * will also not require MAP_ANON. + */ static int mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong end, int prot, int flags, int fd, abi_ulong offset) @@ -154,9 +166,9 @@ static int mmap_frag(abi_ulong real_start, } if (prot1 == 0) { - /* no page was there, so we allocate one */ + /* no page was there, so we allocate one. See also above. */ void *p = mmap(host_start, qemu_host_page_size, prot, - flags | MAP_ANON, -1, 0); + flags | ((fd != -1) ? MAP_ANON : 0), -1, 0); if (p == MAP_FAILED) return -1; prot1 = prot; @@ -164,7 +176,7 @@ static int mmap_frag(abi_ulong real_start, prot1 &= PAGE_BITS; prot_new = prot | prot1; - if (!(flags & MAP_ANON)) { + if (fd != -1) { /* msync() won't work here, so we return an error if write is possible while it is a shared mapping */ if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && @@ -176,144 +188,379 @@ static int mmap_frag(abi_ulong real_start, mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); /* read the corresponding file data */ - pread(fd, g2h_untagged(start), end - start, offset); + if (pread(fd, g2h_untagged(start), end - start, offset) == -1) { + return -1; + } /* put final protection */ if (prot_new != (prot1 | PROT_WRITE)) mprotect(host_start, qemu_host_page_size, prot_new); } else { - /* just update the protection */ if (prot_new != prot1) { mprotect(host_start, qemu_host_page_size, prot_new); } + if (prot_new & PROT_WRITE) { + memset(g2h_untagged(start), 0, end - start); + } } return 0; } -static abi_ulong mmap_next_start = 0x40000000; +#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 +# define TASK_UNMAPPED_BASE (1ul << 38) +#else +# define TASK_UNMAPPED_BASE 0x40000000 +#endif +abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; unsigned long last_brk; -/* find a free memory area of size 'size'. The search starts at - 'start'. If 'start' == 0, then a default start address is used. - Return -1 if error. -*/ -/* page_init() marks pages used by the host as reserved to be sure not - to use them. */ -static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) +/* + * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest + * address space. + */ +static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, + abi_ulong alignment) { - abi_ulong addr, addr1, addr_start; + abi_ulong addr; + abi_ulong end_addr; int prot; - unsigned long new_brk; + int looped = 0; - new_brk = (unsigned long)sbrk(0); - if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) { - /* This is a hack to catch the host allocating memory with brk(). - If it uses mmap then we loose. - FIXME: We really want to avoid the host allocating memory in - the first place, and maybe leave some slack to avoid switching - to mmap. */ - page_set_flags(last_brk & TARGET_PAGE_MASK, - TARGET_PAGE_ALIGN(new_brk), - PAGE_RESERVED); + if (size > reserved_va) { + return (abi_ulong)-1; + } + + size = HOST_PAGE_ALIGN(size) + alignment; + end_addr = start + size; + if (end_addr > reserved_va) { + end_addr = reserved_va; + } + addr = end_addr - qemu_host_page_size; + + while (1) { + if (addr > end_addr) { + if (looped) { + return (abi_ulong)-1; + } + end_addr = reserved_va; + addr = end_addr - qemu_host_page_size; + looped = 1; + continue; + } + prot = page_get_flags(addr); + if (prot) { + end_addr = addr; + } + if (end_addr - addr >= size) { + break; + } + addr -= qemu_host_page_size; + } + + if (start == mmap_next_start) { + mmap_next_start = addr; + } + /* addr is sufficiently low to align it up */ + if (alignment != 0) { + addr = (addr + alignment) & ~(alignment - 1); + } + return addr; +} + +/* + * Find and reserve a free memory area of size 'size'. The search + * starts at 'start'. + * It must be called with mmap_lock() held. + * Return -1 if error. + */ +static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, + abi_ulong alignment) +{ + void *ptr, *prev; + abi_ulong addr; + int flags; + int wrapped, repeat; + + /* If 'start' == 0, then a default start address is used. */ + if (start == 0) { + start = mmap_next_start; + } else { + start &= qemu_host_page_mask; } - last_brk = new_brk; size = HOST_PAGE_ALIGN(size); - start = start & qemu_host_page_mask; - addr = start; - if (addr == 0) - addr = mmap_next_start; - addr_start = addr; - for (;;) { - prot = 0; - for (addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) { - prot |= page_get_flags(addr1); - } - if (prot == 0) - break; - addr += qemu_host_page_size; - /* we found nothing */ - if (addr == addr_start) - return (abi_ulong)-1; + + if (reserved_va) { + return mmap_find_vma_reserved(start, size, + (alignment != 0 ? 1 << alignment : 0)); } - if (start == 0) - mmap_next_start = addr + size; - return addr; + + addr = start; + wrapped = repeat = 0; + prev = 0; + flags = MAP_ANON | MAP_PRIVATE; + if (alignment != 0) { + flags |= MAP_ALIGNED(alignment); + } + + for (;; prev = ptr) { + /* + * Reserve needed memory area to avoid a race. + * It should be discarded using: + * - mmap() with MAP_FIXED flag + * - mremap() with MREMAP_FIXED flag + * - shmat() with SHM_REMAP flag + */ + ptr = mmap(g2h_untagged(addr), size, PROT_NONE, + flags, -1, 0); + + /* ENOMEM, if host address space has no memory */ + if (ptr == MAP_FAILED) { + return (abi_ulong)-1; + } + + /* + * Count the number of sequential returns of the same address. + * This is used to modify the search algorithm below. + */ + repeat = (ptr == prev ? repeat + 1 : 0); + + if (h2g_valid(ptr + size - 1)) { + addr = h2g(ptr); + + if ((addr & ~TARGET_PAGE_MASK) == 0) { + /* Success. */ + if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { + mmap_next_start = addr + size; + } + return addr; + } + + /* The address is not properly aligned for the target. */ + switch (repeat) { + case 0: + /* + * Assume the result that the kernel gave us is the + * first with enough free space, so start again at the + * next higher target page. + */ + addr = TARGET_PAGE_ALIGN(addr); + break; + case 1: + /* + * Sometimes the kernel decides to perform the allocation + * at the top end of memory instead. + */ + addr &= TARGET_PAGE_MASK; + break; + case 2: + /* Start over at low memory. */ + addr = 0; + break; + default: + /* Fail. This unaligned block must the last. */ + addr = -1; + break; + } + } else { + /* + * Since the result the kernel gave didn't fit, start + * again at low memory. If any repetition, fail. + */ + addr = (repeat ? -1 : 0); + } + + /* Unmap and try again. */ + munmap(ptr, size); + + /* ENOMEM if we checked the whole of the target address space. */ + if (addr == (abi_ulong)-1) { + return (abi_ulong)-1; + } else if (addr == 0) { + if (wrapped) { + return (abi_ulong)-1; + } + wrapped = 1; + /* + * Don't actually use 0 when wrapping, instead indicate + * that we'd truly like an allocation in low memory. + */ + addr = TARGET_PAGE_SIZE; + } else if (wrapped && addr >= start) { + return (abi_ulong)-1; + } + } +} + +abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) +{ + return mmap_find_vma_aligned(start, size, 0); } /* NOTE: all the constants are the HOST ones */ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, - int flags, int fd, abi_ulong offset) + int flags, int fd, off_t offset) { abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; - unsigned long host_start; mmap_lock(); -#ifdef DEBUG_MMAP - { - printf("mmap: start=0x" TARGET_FMT_lx - " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=", - start, len, - prot & PROT_READ ? 'r' : '-', - prot & PROT_WRITE ? 'w' : '-', - prot & PROT_EXEC ? 'x' : '-'); - if (flags & MAP_FIXED) - printf("MAP_FIXED "); - if (flags & MAP_ANON) - printf("MAP_ANON "); - switch (flags & TARGET_BSD_MAP_FLAGMASK) { - case MAP_PRIVATE: - printf("MAP_PRIVATE "); - break; - case MAP_SHARED: - printf("MAP_SHARED "); - break; - default: - printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK); - break; + if (qemu_loglevel_mask(CPU_LOG_PAGE)) { + qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx + " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", + start, len, + prot & PROT_READ ? 'r' : '-', + prot & PROT_WRITE ? 'w' : '-', + prot & PROT_EXEC ? 'x' : '-'); + if (flags & MAP_ALIGNMENT_MASK) { + qemu_log("MAP_ALIGNED(%u) ", + (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); } - printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset); + if (flags & MAP_GUARD) { + qemu_log("MAP_GUARD "); + } + if (flags & MAP_FIXED) { + qemu_log("MAP_FIXED "); + } + if (flags & MAP_ANON) { + qemu_log("MAP_ANON "); + } + if (flags & MAP_EXCL) { + qemu_log("MAP_EXCL "); + } + if (flags & MAP_PRIVATE) { + qemu_log("MAP_PRIVATE "); + } + if (flags & MAP_SHARED) { + qemu_log("MAP_SHARED "); + } + if (flags & MAP_NOCORE) { + qemu_log("MAP_NOCORE "); + } + if (flags & MAP_STACK) { + qemu_log("MAP_STACK "); + } + qemu_log("fd=%d offset=0x%lx\n", fd, offset); + } + + if ((flags & MAP_ANON) && fd != -1) { + errno = EINVAL; + goto fail; + } + if (flags & MAP_STACK) { + if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != + (PROT_READ | PROT_WRITE))) { + errno = EINVAL; + goto fail; + } + } + if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 || + offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | + /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */ + MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) { + errno = EINVAL; + goto fail; } -#endif if (offset & ~TARGET_PAGE_MASK) { errno = EINVAL; goto fail; } - len = TARGET_PAGE_ALIGN(len); - if (len == 0) - goto the_end; - real_start = start & qemu_host_page_mask; + if (len == 0) { + errno = EINVAL; + goto fail; + } + /* Check for overflows */ + len = TARGET_PAGE_ALIGN(len); + if (len == 0) { + errno = ENOMEM; + goto fail; + } + + real_start = start & qemu_host_page_mask; + host_offset = offset & qemu_host_page_mask; + + /* + * If the user is asking for the kernel to find a location, do that + * before we truncate the length for mapping files below. + */ if (!(flags & MAP_FIXED)) { - abi_ulong mmap_start; - void *p; - host_offset = offset & qemu_host_page_mask; host_len = len + offset - host_offset; host_len = HOST_PAGE_ALIGN(host_len); - mmap_start = mmap_find_vma(real_start, host_len); - if (mmap_start == (abi_ulong)-1) { + if ((flags & MAP_ALIGNMENT_MASK) != 0) + start = mmap_find_vma_aligned(real_start, host_len, + (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); + else + start = mmap_find_vma(real_start, host_len); + if (start == (abi_ulong)-1) { errno = ENOMEM; goto fail; } - /* Note: we prefer to control the mapping address. It is - especially important if qemu_host_page_size > - qemu_real_host_page_size */ - p = mmap(g2h_untagged(mmap_start), - host_len, prot, flags | MAP_FIXED, fd, host_offset); + } + + /* + * When mapping files into a memory area larger than the file, accesses + * to pages beyond the file size will cause a SIGBUS. + * + * For example, if mmaping a file of 100 bytes on a host with 4K pages + * emulating a target with 8K pages, the target expects to be able to + * access the first 8K. But the host will trap us on any access beyond + * 4K. + * + * When emulating a target with a larger page-size than the hosts, we + * may need to truncate file maps at EOF and add extra anonymous pages + * up to the targets page boundary. + */ + + if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) { + struct stat sb; + + if (fstat(fd, &sb) == -1) { + goto fail; + } + + /* Are we trying to create a map beyond EOF?. */ + if (offset + len > sb.st_size) { + /* + * If so, truncate the file map at eof aligned with + * the hosts real pagesize. Additional anonymous maps + * will be created beyond EOF. + */ + len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); + } + } + + if (!(flags & MAP_FIXED)) { + unsigned long host_start; + void *p; + + host_len = len + offset - host_offset; + host_len = HOST_PAGE_ALIGN(host_len); + + /* + * Note: we prefer to control the mapping address. It is + * especially important if qemu_host_page_size > + * qemu_real_host_page_size + */ + p = mmap(g2h_untagged(start), host_len, prot, + flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0); if (p == MAP_FAILED) goto fail; /* update start so that it points to the file position at 'offset' */ host_start = (unsigned long)p; - if (!(flags & MAP_ANON)) + if (fd != -1) { + p = mmap(g2h_untagged(start), len, prot, + flags | MAP_FIXED, fd, host_offset); + if (p == MAP_FAILED) { + munmap(g2h_untagged(start), host_len); + goto fail; + } host_start += offset - host_offset; + } start = h2g(host_start); } else { - int flg; - target_ulong addr; - if (start & ~TARGET_PAGE_MASK) { errno = EINVAL; goto fail; @@ -321,20 +568,26 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, end = start + len; real_end = HOST_PAGE_ALIGN(end); - for (addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) { - flg = page_get_flags(addr); - if (flg & PAGE_RESERVED) { - errno = ENXIO; - goto fail; - } + /* + * Test if requested memory area fits target address space + * It can fail only on 64-bit host with 32-bit target. + * On any other target/host host mmap() handles this error correctly. + */ + if (!guest_range_valid_untagged(start, len)) { + errno = EINVAL; + goto fail; } - /* worst case: we cannot map the file because the offset is not - aligned, so we read it */ - if (!(flags & MAP_ANON) && + /* + * worst case: we cannot map the file because the offset is not + * aligned, so we read it + */ + if (fd != -1 && (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { - /* msync() won't work here, so we return an error if write is - possible while it is a shared mapping */ + /* + * msync() won't work here, so we return an error if write is + * possible while it is a shared mapping + */ if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && (prot & PROT_WRITE)) { errno = EINVAL; @@ -345,17 +598,22 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, -1, 0); if (retaddr == -1) goto fail; - pread(fd, g2h_untagged(start), len, offset); + if (pread(fd, g2h_untagged(start), len, offset) == -1) { + goto fail; + } if (!(prot & PROT_WRITE)) { ret = target_mprotect(start, len, prot); - if (ret != 0) { - start = ret; - goto the_end; - } + assert(ret == 0); } goto the_end; } + /* Reject the mapping if any page within the range is mapped */ + if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) { + errno = EINVAL; + goto fail; + } + /* handle the start of the mapping */ if (start > real_start) { if (real_end == real_start + qemu_host_page_size) { @@ -375,7 +633,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, /* handle the end of the mapping */ if (end < real_end) { ret = mmap_frag(real_end - qemu_host_page_size, - real_end - qemu_host_page_size, real_end, + real_end - qemu_host_page_size, end, prot, flags, fd, offset + real_end - qemu_host_page_size - start); if (ret == -1) @@ -401,7 +659,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, page_set_flags(start, start + len, prot | PAGE_VALID); the_end: #ifdef DEBUG_MMAP - printf("ret=0x" TARGET_FMT_lx "\n", start); + printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); page_dump(stdout); printf("\n"); #endif @@ -412,13 +670,57 @@ fail: return -1; } +static void mmap_reserve(abi_ulong start, abi_ulong size) +{ + abi_ulong real_start; + abi_ulong real_end; + abi_ulong addr; + abi_ulong end; + int prot; + + real_start = start & qemu_host_page_mask; + real_end = HOST_PAGE_ALIGN(start + size); + end = start + size; + if (start > real_start) { + /* handle host page containing start */ + prot = 0; + for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { + prot |= page_get_flags(addr); + } + if (real_end == real_start + qemu_host_page_size) { + for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { + prot |= page_get_flags(addr); + } + end = real_end; + } + if (prot != 0) { + real_start += qemu_host_page_size; + } + } + if (end < real_end) { + prot = 0; + for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { + prot |= page_get_flags(addr); + } + if (prot != 0) { + real_end -= qemu_host_page_size; + } + } + if (real_start != real_end) { + mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, + MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + } +} + int target_munmap(abi_ulong start, abi_ulong len) { abi_ulong end, real_start, real_end, addr; int prot, ret; #ifdef DEBUG_MMAP - printf("munmap: start=0x%lx len=0x%lx\n", start, len); + printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x" + TARGET_ABI_FMT_lx "\n", + start, len); #endif if (start & ~TARGET_PAGE_MASK) return -EINVAL; @@ -457,11 +759,16 @@ int target_munmap(abi_ulong start, abi_ulong len) ret = 0; /* unmap what we can */ if (real_start < real_end) { - ret = munmap(g2h_untagged(real_start), real_end - real_start); + if (reserved_va) { + mmap_reserve(real_start, real_end - real_start); + } else { + ret = munmap(g2h_untagged(real_start), real_end - real_start); + } } - if (ret == 0) + if (ret == 0) { page_set_flags(start, start + len, 0); + } mmap_unlock(); return ret; } diff --git a/bsd-user/netbsd/host-os.h b/bsd-user/netbsd/host-os.h new file mode 100644 index 0000000000..7c14b1ea78 --- /dev/null +++ b/bsd-user/netbsd/host-os.h @@ -0,0 +1,25 @@ +/* + * NetBSD host dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HOST_OS_H +#define HOST_OS_H + +#define HOST_DEFAULT_BSD_TYPE target_netbsd + +#endif /* HOST_OS_H */ diff --git a/bsd-user/netbsd/target_os_elf.h b/bsd-user/netbsd/target_os_elf.h new file mode 100644 index 0000000000..2f3cb20871 --- /dev/null +++ b/bsd-user/netbsd/target_os_elf.h @@ -0,0 +1,147 @@ +/* + * netbsd ELF definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_ELF_H +#define TARGET_OS_ELF_H + +#include "target_arch_elf.h" +#include "elf.h" + +/* this flag is uneffective under linux too, should be deleted */ +#ifndef MAP_DENYWRITE +#define MAP_DENYWRITE 0 +#endif + +/* should probably go in elf.h */ +#ifndef ELIBBAD +#define ELIBBAD 80 +#endif + +#ifndef ELF_PLATFORM +#define ELF_PLATFORM (NULL) +#endif + +#ifndef ELF_HWCAP +#define ELF_HWCAP 0 +#endif + +#ifdef TARGET_ABI32 +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 +#undef bswaptls +#define bswaptls(ptr) bswap32s(ptr) +#endif + +/* max code+data+bss space allocated to elf interpreter */ +#define INTERP_MAP_SIZE (32 * 1024 * 1024) + +/* max code+data+bss+brk space allocated to ET_DYN executables */ +#define ET_DYN_MAP_SIZE (128 * 1024 * 1024) + +/* Necessary parameters */ +#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE +#define TARGET_ELF_PAGESTART(_v) ((_v) & \ + ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1)) +#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1)) + +#define DLINFO_ITEMS 12 + +static abi_ulong target_create_elf_tables(abi_ulong p, int argc, int envc, + abi_ulong stringp, + struct elfhdr *exec, + abi_ulong load_addr, + abi_ulong load_bias, + abi_ulong interp_load_addr, + struct image_info *info) +{ + abi_ulong sp; + int size; + abi_ulong u_platform; + const char *k_platform; + const int n = sizeof(elf_addr_t); + + sp = p; + u_platform = 0; + k_platform = ELF_PLATFORM; + if (k_platform) { + size_t len = strlen(k_platform) + 1; + sp -= (len + n - 1) & ~(n - 1); + u_platform = sp; + /* FIXME - check return value of memcpy_to_target() for failure */ + memcpy_to_target(sp, k_platform, len); + } + /* + * Force 16 byte _final_ alignment here for generality. + */ + sp = sp & ~(abi_ulong)15; + size = (DLINFO_ITEMS + 1) * 2; + if (k_platform) { + size += 2; + } +#ifdef DLINFO_ARCH_ITEMS + size += DLINFO_ARCH_ITEMS * 2; +#endif + size += envc + argc + 2; + size += 1; /* argc itself */ + size *= n; + if (size & 15) { + sp -= 16 - (size & 15); + } + + /* + * NetBSD defines elf_addr_t as Elf32_Off / Elf64_Off + */ +#define NEW_AUX_ENT(id, val) do { \ + sp -= n; put_user_ual(val, sp); \ + sp -= n; put_user_ual(id, sp); \ + } while (0) + + NEW_AUX_ENT(AT_NULL, 0); + + /* There must be exactly DLINFO_ITEMS entries here. */ + NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); + NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr))); + NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); + NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); + NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); + NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); + NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); + NEW_AUX_ENT(AT_UID, (abi_ulong)getuid()); + NEW_AUX_ENT(AT_EUID, (abi_ulong)geteuid()); + NEW_AUX_ENT(AT_GID, (abi_ulong)getgid()); + NEW_AUX_ENT(AT_EGID, (abi_ulong)getegid()); + NEW_AUX_ENT(AT_HWCAP, (abi_ulong)ELF_HWCAP); + NEW_AUX_ENT(AT_CLKTCK, (abi_ulong)sysconf(_SC_CLK_TCK)); + if (k_platform) { + NEW_AUX_ENT(AT_PLATFORM, u_platform); + } +#ifdef ARCH_DLINFO + /* + * ARCH_DLINFO must come last so platform specific code can enforce + * special alignment requirements on the AUXV if necessary (eg. PPC). + */ + ARCH_DLINFO; +#endif +#undef NEW_AUX_ENT + + sp = loader_build_argptr(envc, argc, sp, stringp); + return sp; +} + +#endif /* TARGET_OS_ELF_H */ diff --git a/bsd-user/netbsd/target_os_siginfo.h b/bsd-user/netbsd/target_os_siginfo.h new file mode 100644 index 0000000000..eb57e0a309 --- /dev/null +++ b/bsd-user/netbsd/target_os_siginfo.h @@ -0,0 +1,82 @@ +#ifndef TARGET_OS_SIGINFO_H +#define TARGET_OS_SIGINFO_H + +#define TARGET_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ +#define TARGET_NSIG_BPW (sizeof(uint32_t) * 8) +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) + +/* this struct defines a stack used during syscall handling */ +typedef struct target_sigaltstack { + abi_long ss_sp; + abi_ulong ss_size; + abi_long ss_flags; +} target_stack_t; + +typedef struct { + uint32_t __bits[TARGET_NSIG_WORDS]; +} target_sigset_t + +struct target_sigaction { + abi_ulong _sa_handler; + int32_t sa_flags; + target_sigset_t sa_mask; +}; + +/* Compare to sys/siginfo.h */ +typedef union target_sigval { + int sival_int; + abi_ulong sival_ptr; +} target_sigval_t; + +struct target_ksiginfo { + int32_t _signo; + int32_t _code; + int32_t _errno; +#if TARGET_ABI_BITS == 64 + int32_t _pad; +#endif + union { + struct { + int32_t _pid; + int32_t _uid; + target_sigval_t _value; + } _rt; + + struct { + int32_t _pid; + int32_t _uid; + int32_t _struct; + /* clock_t _utime; */ + /* clock_t _stime; */ + } _child; + + struct { + abi_ulong _addr; + int32_t _trap; + } _fault; + + struct { + long _band; + int _fd; + } _poll; + } _reason; +}; + +typedef union target_siginfo { + int8_t si_pad[128]; + struct target_ksiginfo _info; +} target_siginfo_t; + +#define target_si_signo _info._signo +#define target_si_code _info._code +#define target_si_errno _info._errno +#define target_si_addr _info._reason._fault._addr + +#define TARGET_SEGV_MAPERR 1 +#define TARGET_SEGV_ACCERR 2 + +#define TARGET_TRAP_BRKPT 1 +#define TARGET_TRAP_TRACE 2 + + +#endif /* TARGET_OS_SIGINFO_H */ diff --git a/bsd-user/netbsd/target_os_signal.h b/bsd-user/netbsd/target_os_signal.h new file mode 100644 index 0000000000..4ee4f768e0 --- /dev/null +++ b/bsd-user/netbsd/target_os_signal.h @@ -0,0 +1,69 @@ +#ifndef TARGET_OS_SIGNAL_H +#define TARGET_OS_SIGNAL_H + +#include "target_os_siginfo.h" +#include "target_arch_signal.h" + +#define TARGET_SIGHUP 1 /* hangup */ +#define TARGET_SIGINT 2 /* interrupt */ +#define TARGET_SIGQUIT 3 /* quit */ +#define TARGET_SIGILL 4 /* illegal instruction (not reset when caught) */ +#define TARGET_SIGTRAP 5 /* trace trap (not reset when caught) */ +#define TARGET_SIGABRT 6 /* abort() */ +#define TARGET_SIGIOT SIGABRT /* compatibility */ +#define TARGET_SIGEMT 7 /* EMT instruction */ +#define TARGET_SIGFPE 8 /* floating point exception */ +#define TARGET_SIGKILL 9 /* kill (cannot be caught or ignored) */ +#define TARGET_SIGBUS 10 /* bus error */ +#define TARGET_SIGSEGV 11 /* segmentation violation */ +#define TARGET_SIGSYS 12 /* bad argument to system call */ +#define TARGET_SIGPIPE 13 /* write on a pipe with no one to read it */ +#define TARGET_SIGALRM 14 /* alarm clock */ +#define TARGET_SIGTERM 15 /* software termination signal from kill */ +#define TARGET_SIGURG 16 /* urgent condition on IO channel */ +#define TARGET_SIGSTOP 17 /* sendable stop signal not from tty */ +#define TARGET_SIGTSTP 18 /* stop signal from tty */ +#define TARGET_SIGCONT 19 /* continue a stopped process */ +#define TARGET_SIGCHLD 20 /* to parent on child stop or exit */ +#define TARGET_SIGTTIN 21 /* to readers pgrp upon background tty read */ +#define TARGET_SIGTTOU 22 /* like TTIN for out if (tp->t_local<OSTOP) */ +#define TARGET_SIGIO 23 /* input/output possible signal */ +#define TARGET_SIGXCPU 24 /* exceeded CPU time limit */ +#define TARGET_SIGXFSZ 25 /* exceeded file size limit */ +#define TARGET_SIGVTALRM 26 /* virtual time alarm */ +#define TARGET_SIGPROF 27 /* profiling time alarm */ +#define TARGET_SIGWINCH 28 /* window size changes */ +#define TARGET_SIGINFO 29 /* information request */ +#define TARGET_SIGUSR1 30 /* user defined signal 1 */ +#define TARGET_SIGUSR2 31 /* user defined signal 2 */ + +/* + * Language spec says we must list exactly one parameter, even though we + * actually supply three. Ugh! + */ +#define TARGET_SIG_DFL ((void (*)(int))0) +#define TARGET_SIG_IGN ((void (*)(int))1) +#define TARGET_SIG_ERR ((void (*)(int))-1) + +#define TARGET_SA_ONSTACK 0x0001 /* take signal on signal stack */ +#define TARGET_SA_RESTART 0x0002 /* restart system on signal return */ +#define TARGET_SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ +#define TARGET_SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ +#define TARGET_SA_NOCLDWAIT 0x0020 /* don't create zombies (assign to pid 1) */ +#define TARGET_SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +#define TARGET_SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ +#define TARGET_SA_SIGINFO 0x0040 /* generate siginfo_t */ + +/* + * Flags for sigprocmask: + */ +#define TARGET_SIG_BLOCK 1 /* block specified signal set */ +#define TARGET_SIG_UNBLOCK 2 /* unblock specified signal set */ +#define TARGET_SIG_SETMASK 3 /* set specified signal set */ + +#define TARGET_BADSIG SIG_ERR + +#define TARGET_SS_ONSTACK 0x0001 /* take signals on alternate stack */ +#define TARGET_SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ + +#endif /* TARGET_OS_SIGNAL_H */ diff --git a/bsd-user/netbsd/target_os_stack.h b/bsd-user/netbsd/target_os_stack.h new file mode 100644 index 0000000000..8349e9149b --- /dev/null +++ b/bsd-user/netbsd/target_os_stack.h @@ -0,0 +1,56 @@ +/* + * NetBSD setup_initial_stack() implementation. + * + * Copyright (c) 2013-14 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_STACK_H +#define TARGET_OS_STACK_H + +#include "target_arch_sigtramp.h" + +static inline int setup_initial_stack(struct bsd_binprm *bprm, abi_ulong *p, + abi_ulong *stringp) +{ + int i; + abi_ulong stack_base; + + stack_base = (target_stkbas + target_stksiz) - + MAX_ARG_PAGES * TARGET_PAGE_SIZE; + if (p) { + *p = stack_base; + } + if (stringp) { + *stringp = stack_base; + } + + for (i = 0; i < MAX_ARG_PAGES; i++) { + if (bprm->page[i]) { + info->rss++; + if (!memcpy_to_target(stack_base, bprm->page[i], + TARGET_PAGE_SIZE)) { + errno = EFAULT; + return -1; + } + g_free(bprm->page[i]); + } + stack_base += TARGET_PAGE_SIZE; + } + + return 0; +} + +#endif /* TARGET_OS_STACK_H */ diff --git a/bsd-user/netbsd/target_os_thread.h b/bsd-user/netbsd/target_os_thread.h new file mode 100644 index 0000000000..8ccfa16e4b --- /dev/null +++ b/bsd-user/netbsd/target_os_thread.h @@ -0,0 +1,25 @@ +/* + * NetBSD thread dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_THREAD_H +#define TARGET_OS_THREAD_H + +#include "target_arch_thread.h" + +#endif /* TARGET_OS_THREAD_H */ diff --git a/bsd-user/openbsd/host-os.h b/bsd-user/openbsd/host-os.h new file mode 100644 index 0000000000..b9222335d4 --- /dev/null +++ b/bsd-user/openbsd/host-os.h @@ -0,0 +1,25 @@ +/* + * OpenBSD host dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HOST_OS_H +#define HOST_OS_H + +#define HOST_DEFAULT_BSD_TYPE target_openbsd + +#endif /* HOST_OS_H */ diff --git a/bsd-user/openbsd/target_os_elf.h b/bsd-user/openbsd/target_os_elf.h new file mode 100644 index 0000000000..6dca9c5a85 --- /dev/null +++ b/bsd-user/openbsd/target_os_elf.h @@ -0,0 +1,147 @@ +/* + * openbsd ELF definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_ELF_H +#define TARGET_OS_ELF_H + +#include "target_arch_elf.h" +#include "elf.h" + +/* this flag is uneffective under linux too, should be deleted */ +#ifndef MAP_DENYWRITE +#define MAP_DENYWRITE 0 +#endif + +/* should probably go in elf.h */ +#ifndef ELIBBAD +#define ELIBBAD 80 +#endif + +#ifndef ELF_PLATFORM +#define ELF_PLATFORM (NULL) +#endif + +#ifndef ELF_HWCAP +#define ELF_HWCAP 0 +#endif + +#ifdef TARGET_ABI32 +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 +#undef bswaptls +#define bswaptls(ptr) bswap32s(ptr) +#endif + +/* max code+data+bss space allocated to elf interpreter */ +#define INTERP_MAP_SIZE (32 * 1024 * 1024) + +/* max code+data+bss+brk space allocated to ET_DYN executables */ +#define ET_DYN_MAP_SIZE (128 * 1024 * 1024) + +/* Necessary parameters */ +#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE +#define TARGET_ELF_PAGESTART(_v) ((_v) & \ + ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1)) +#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1)) + +#define DLINFO_ITEMS 12 + +static abi_ulong target_create_elf_tables(abi_ulong p, int argc, int envc, + abi_ulong stringp, + struct elfhdr *exec, + abi_ulong load_addr, + abi_ulong load_bias, + abi_ulong interp_load_addr, + struct image_info *info) +{ + abi_ulong sp; + int size; + abi_ulong u_platform; + const char *k_platform; + const int n = sizeof(elf_addr_t); + + sp = p; + u_platform = 0; + k_platform = ELF_PLATFORM; + if (k_platform) { + size_t len = strlen(k_platform) + 1; + sp -= (len + n - 1) & ~(n - 1); + u_platform = sp; + /* FIXME - check return value of memcpy_to_target() for failure */ + memcpy_to_target(sp, k_platform, len); + } + /* + * Force 16 byte _final_ alignment here for generality. + */ + sp = sp & ~(abi_ulong)15; + size = (DLINFO_ITEMS + 1) * 2; + if (k_platform) { + size += 2; + } +#ifdef DLINFO_ARCH_ITEMS + size += DLINFO_ARCH_ITEMS * 2; +#endif + size += envc + argc + 2; + size += 1; /* argc itself */ + size *= n; + if (size & 15) { + sp -= 16 - (size & 15); + } + + /* + * OpenBSD defines elf_addr_t as Elf32_Off / Elf64_Off + */ +#define NEW_AUX_ENT(id, val) do { \ + sp -= n; put_user_ual(val, sp); \ + sp -= n; put_user_ual(id, sp); \ + } while (0) + + NEW_AUX_ENT(AT_NULL, 0); + + /* There must be exactly DLINFO_ITEMS entries here. */ + NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); + NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr))); + NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); + NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); + NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); + NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); + NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); + NEW_AUX_ENT(AT_UID, (abi_ulong)getuid()); + NEW_AUX_ENT(AT_EUID, (abi_ulong)geteuid()); + NEW_AUX_ENT(AT_GID, (abi_ulong)getgid()); + NEW_AUX_ENT(AT_EGID, (abi_ulong)getegid()); + NEW_AUX_ENT(AT_HWCAP, (abi_ulong)ELF_HWCAP); + NEW_AUX_ENT(AT_CLKTCK, (abi_ulong)sysconf(_SC_CLK_TCK)); + if (k_platform) { + NEW_AUX_ENT(AT_PLATFORM, u_platform); + } +#ifdef ARCH_DLINFO + /* + * ARCH_DLINFO must come last so platform specific code can enforce + * special alignment requirements on the AUXV if necessary (eg. PPC). + */ + ARCH_DLINFO; +#endif +#undef NEW_AUX_ENT + + sp = loader_build_argptr(envc, argc, sp, stringp); + return sp; +} + +#endif /* TARGET_OS_ELF_H */ diff --git a/bsd-user/openbsd/target_os_siginfo.h b/bsd-user/openbsd/target_os_siginfo.h new file mode 100644 index 0000000000..732009a820 --- /dev/null +++ b/bsd-user/openbsd/target_os_siginfo.h @@ -0,0 +1,82 @@ +#ifndef TARGET_OS_SIGINFO_H +#define TARGET_OS_SIGINFO_H + +#define TARGET_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ +#define TARGET_NSIG_BPW (sizeof(uint32_t) * 8) +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) + +/* this struct defines a stack used during syscall handling */ +typedef struct target_sigaltstack { + abi_long ss_sp; + abi_ulong ss_size; + abi_long ss_flags; +} target_stack_t; + +typedef struct { + uint32_t __bits[TARGET_NSIG_WORDS]; +} target_sigset_t + +struct target_sigaction { + abi_ulong _sa_handler; + int32_t sa_flags; + target_sigset_t sa_mask; +}; + +/* Compare to sys/siginfo.h */ +typedef union target_sigval { + int sival_int; + abi_ulong sival_ptr; +} target_sigval_t; + +struct target_ksiginfo { + int32_t _signo; + int32_t _code; + int32_t _errno; +#if TARGET_ABI_BITS == 64 + int32_t _pad; +#endif + union { + struct { + int32_t _pid; + int32_t _uid; + target_sigval_t _value; + } _rt; + + struct { + int32_t _pid; + int32_t _uid; + int32_t _struct; + /* clock_t _utime; */ + /* clock_t _stime; */ + } _child; + + struct { + abi_ulong _addr; + int32_t _trap; + } _fault; + + struct { + long _band; + int _fd; + } _poll; + } _reason; +}; + +typedef union target_siginfo { + int8_t si_pad[128]; + struct target_ksiginfo _info; +} target_siginfo_t; + +#define target_si_signo _info._signo +#define target_si_code _info._code +#define target_si_errno _info._errno +#define target_si_addr _info._reason._fault._addr + +#define TARGET_SEGV_MAPERR 1 +#define TARGET_SEGV_ACCERR 2 + +#define TARGET_TRAP_BRKPT 1 +#define TARGET_TRAP_TRACE 2 + + +#endif /* TARGET_OS_SIGINFO_H */ diff --git a/bsd-user/openbsd/target_os_signal.h b/bsd-user/openbsd/target_os_signal.h new file mode 100644 index 0000000000..4ee4f768e0 --- /dev/null +++ b/bsd-user/openbsd/target_os_signal.h @@ -0,0 +1,69 @@ +#ifndef TARGET_OS_SIGNAL_H +#define TARGET_OS_SIGNAL_H + +#include "target_os_siginfo.h" +#include "target_arch_signal.h" + +#define TARGET_SIGHUP 1 /* hangup */ +#define TARGET_SIGINT 2 /* interrupt */ +#define TARGET_SIGQUIT 3 /* quit */ +#define TARGET_SIGILL 4 /* illegal instruction (not reset when caught) */ +#define TARGET_SIGTRAP 5 /* trace trap (not reset when caught) */ +#define TARGET_SIGABRT 6 /* abort() */ +#define TARGET_SIGIOT SIGABRT /* compatibility */ +#define TARGET_SIGEMT 7 /* EMT instruction */ +#define TARGET_SIGFPE 8 /* floating point exception */ +#define TARGET_SIGKILL 9 /* kill (cannot be caught or ignored) */ +#define TARGET_SIGBUS 10 /* bus error */ +#define TARGET_SIGSEGV 11 /* segmentation violation */ +#define TARGET_SIGSYS 12 /* bad argument to system call */ +#define TARGET_SIGPIPE 13 /* write on a pipe with no one to read it */ +#define TARGET_SIGALRM 14 /* alarm clock */ +#define TARGET_SIGTERM 15 /* software termination signal from kill */ +#define TARGET_SIGURG 16 /* urgent condition on IO channel */ +#define TARGET_SIGSTOP 17 /* sendable stop signal not from tty */ +#define TARGET_SIGTSTP 18 /* stop signal from tty */ +#define TARGET_SIGCONT 19 /* continue a stopped process */ +#define TARGET_SIGCHLD 20 /* to parent on child stop or exit */ +#define TARGET_SIGTTIN 21 /* to readers pgrp upon background tty read */ +#define TARGET_SIGTTOU 22 /* like TTIN for out if (tp->t_local<OSTOP) */ +#define TARGET_SIGIO 23 /* input/output possible signal */ +#define TARGET_SIGXCPU 24 /* exceeded CPU time limit */ +#define TARGET_SIGXFSZ 25 /* exceeded file size limit */ +#define TARGET_SIGVTALRM 26 /* virtual time alarm */ +#define TARGET_SIGPROF 27 /* profiling time alarm */ +#define TARGET_SIGWINCH 28 /* window size changes */ +#define TARGET_SIGINFO 29 /* information request */ +#define TARGET_SIGUSR1 30 /* user defined signal 1 */ +#define TARGET_SIGUSR2 31 /* user defined signal 2 */ + +/* + * Language spec says we must list exactly one parameter, even though we + * actually supply three. Ugh! + */ +#define TARGET_SIG_DFL ((void (*)(int))0) +#define TARGET_SIG_IGN ((void (*)(int))1) +#define TARGET_SIG_ERR ((void (*)(int))-1) + +#define TARGET_SA_ONSTACK 0x0001 /* take signal on signal stack */ +#define TARGET_SA_RESTART 0x0002 /* restart system on signal return */ +#define TARGET_SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ +#define TARGET_SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ +#define TARGET_SA_NOCLDWAIT 0x0020 /* don't create zombies (assign to pid 1) */ +#define TARGET_SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +#define TARGET_SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ +#define TARGET_SA_SIGINFO 0x0040 /* generate siginfo_t */ + +/* + * Flags for sigprocmask: + */ +#define TARGET_SIG_BLOCK 1 /* block specified signal set */ +#define TARGET_SIG_UNBLOCK 2 /* unblock specified signal set */ +#define TARGET_SIG_SETMASK 3 /* set specified signal set */ + +#define TARGET_BADSIG SIG_ERR + +#define TARGET_SS_ONSTACK 0x0001 /* take signals on alternate stack */ +#define TARGET_SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ + +#endif /* TARGET_OS_SIGNAL_H */ diff --git a/bsd-user/openbsd/target_os_stack.h b/bsd-user/openbsd/target_os_stack.h new file mode 100644 index 0000000000..264a658608 --- /dev/null +++ b/bsd-user/openbsd/target_os_stack.h @@ -0,0 +1,56 @@ +/* + * OpenBSD setup_initial_stack() implementation. + * + * Copyright (c) 2013-14 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_STACK_H +#define TARGET_OS_STACK_H + +#include "target_arch_sigtramp.h" + +static inline int setup_initial_stack(struct bsd_binprm *bprm, abi_ulong *p, + abi_ulong *stringp) +{ + int i; + abi_ulong stack_base; + + stack_base = (target_stkbas + target_stksiz) - + MAX_ARG_PAGES * TARGET_PAGE_SIZE; + if (p) { + *p = stack_base; + } + if (stringp) { + *stringp = stack_base; + } + + for (i = 0; i < MAX_ARG_PAGES; i++) { + if (bprm->page[i]) { + info->rss++; + if (!memcpy_to_target(stack_base, bprm->page[i], + TARGET_PAGE_SIZE)) { + errno = EFAULT; + return -1; + } + g_free(bprm->page[i]); + } + stack_base += TARGET_PAGE_SIZE; + } + + return 0; +} + +#endif /* TARGET_OS_STACK_H */ diff --git a/bsd-user/openbsd/target_os_thread.h b/bsd-user/openbsd/target_os_thread.h new file mode 100644 index 0000000000..c3adc6712f --- /dev/null +++ b/bsd-user/openbsd/target_os_thread.h @@ -0,0 +1,25 @@ +/* + * OpenBSD thread dependent code and definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_OS_THREAD_H +#define TARGET_OS_THREAD_H + +#include "target_arch_thread.h" + +#endif /* TARGET_OS_THREAD_H */ diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index c02e8a5ca1..be6105385e 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -17,41 +17,34 @@ #ifndef QEMU_H #define QEMU_H - +#include "qemu/osdep.h" #include "cpu.h" +#include "qemu/units.h" #include "exec/cpu_ldst.h" +#include "exec/exec-all.h" #undef DEBUG_REMAP -#ifdef DEBUG_REMAP -#endif /* DEBUG_REMAP */ #include "exec/user/abitypes.h" extern char **environ; -enum BSDType { - target_freebsd, - target_netbsd, - target_openbsd, -}; -extern enum BSDType bsd_type; - +#include "exec/user/thunk.h" +#include "target_arch.h" #include "syscall_defs.h" #include "target_syscall.h" +#include "target_os_vmparam.h" +#include "target_os_signal.h" +#include "target.h" #include "exec/gdbstub.h" -#if defined(CONFIG_USE_NPTL) -#define THREAD __thread -#else -#define THREAD -#endif - /* * This struct is used to hold certain information about the image. Basically, * it replicates in user space what would be certain task_struct fields in the * kernel */ struct image_info { + abi_ulong load_bias; abi_ulong load_addr; abi_ulong start_code; abi_ulong end_code; @@ -66,20 +59,14 @@ struct image_info { abi_ulong entry; abi_ulong code_offset; abi_ulong data_offset; - int personality; -}; - -#define MAX_SIGQUEUE_SIZE 1024 - -struct sigqueue { - struct sigqueue *next; + abi_ulong arg_start; + abi_ulong arg_end; + uint32_t elf_flags; }; struct emulated_sigtable { int pending; /* true if signal is pending */ - struct sigqueue *first; - /* in order to always have memory for the first signal, we put it here */ - struct sigqueue info; + target_siginfo_t info; }; /* @@ -89,27 +76,57 @@ typedef struct TaskState { pid_t ts_tid; /* tid (or pid) of this task */ struct TaskState *next; - int used; /* non zero if used */ + struct bsd_binprm *bprm; struct image_info *info; + struct emulated_sigtable sync_signal; + /* + * TODO: Since we block all signals while returning to the main CPU + * loop, this needn't be an array + */ struct emulated_sigtable sigtab[TARGET_NSIG]; - struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */ - struct sigqueue *first_free; /* first free siginfo queue entry */ - int signal_pending; /* non zero if a signal may be pending */ + /* + * Nonzero if process_pending_signals() needs to do something (either + * handle a pending signal or unblock signals). + * This flag is written from a signal handler so should be accessed via + * the qatomic_read() and qatomic_set() functions. (It is not accessed + * from multiple threads.) + */ + int signal_pending; + /* True if we're leaving a sigsuspend and sigsuspend_mask is valid. */ + bool in_sigsuspend; + /* + * This thread's signal mask, as requested by the guest program. + * The actual signal mask of this thread may differ: + * + we don't let SIGSEGV and SIGBUS be blocked while running guest code + * + sometimes we block all signals to avoid races + */ + sigset_t signal_mask; + /* + * The signal mask imposed by a guest sigsuspend syscall, if we are + * currently in the middle of such a syscall + */ + sigset_t sigsuspend_mask; - uint8_t stack[]; + /* This thread's sigaltstack, if it has one */ + struct target_sigaltstack sigaltstack_used; } __attribute__((aligned(16))) TaskState; -void init_task_state(TaskState *ts); +void stop_all_tasks(void); extern const char *qemu_uname_release; -extern unsigned long mmap_min_addr; /* - * MAX_ARG_PAGES defines the number of pages allocated for arguments - * and envelope for the new program. 32 should suffice, this gives - * a maximum env+arg of 128kB w/4KB pages! + * TARGET_ARG_MAX defines the number of bytes allocated for arguments + * and envelope for the new program. 256k should suffice for a reasonable + * maxiumum env+arg in 32-bit environments, bump it up to 512k for !ILP32 + * platforms. */ -#define MAX_ARG_PAGES 32 +#if TARGET_ABI_BITS > 32 +#define TARGET_ARG_MAX (512 * KiB) +#else +#define TARGET_ARG_MAX (256 * KiB) +#endif +#define MAX_ARG_PAGES (TARGET_ARG_MAX / TARGET_PAGE_SIZE) /* * This structure is used to hold the arguments that are @@ -119,24 +136,29 @@ struct bsd_binprm { char buf[128]; void *page[MAX_ARG_PAGES]; abi_ulong p; + abi_ulong stringp; int fd; int e_uid, e_gid; int argc, envc; char **argv; char **envp; - char *filename; /* Name of binary */ + char *filename; /* (Given) Name of binary */ + char *fullpath; /* Full path of binary */ + int (*core_dump)(int, CPUArchState *); }; void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, - abi_ulong stringp, int push_ptr); + abi_ulong stringp); int loader_exec(const char *filename, char **argv, char **envp, - struct target_pt_regs *regs, struct image_info *infop); + struct target_pt_regs *regs, struct image_info *infop, + struct bsd_binprm *bprm); int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, struct image_info *info); int load_flt_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs, struct image_info *info); +int is_target_elf_binary(int fd); abi_long memcpy_to_target(abi_ulong dest, const void *src, unsigned long len); @@ -153,8 +175,8 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6); -void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -extern THREAD CPUState *thread_cpu; +void gemu_log(const char *fmt, ...) G_GNUC_PRINTF(1, 2); +extern __thread CPUState *thread_cpu; void cpu_loop(CPUArchState *env); char *target_strerror(int err); int get_osversion(void); @@ -189,30 +211,49 @@ print_openbsd_syscall(int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6); void print_openbsd_syscall_ret(int num, abi_long ret); +/** + * print_taken_signal: + * @target_signum: target signal being taken + * @tinfo: target_siginfo_t which will be passed to the guest for the signal + * + * Print strace output indicating that this signal is being taken by the guest, + * in a format similar to: + * --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} --- + */ +void print_taken_signal(int target_signum, const target_siginfo_t *tinfo); extern int do_strace; -/* signal.c */ -void process_pending_signals(CPUArchState *cpu_env); -void signal_init(void); -long do_sigreturn(CPUArchState *env); -long do_rt_sigreturn(CPUArchState *env); -abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); - /* mmap.c */ int target_mprotect(abi_ulong start, abi_ulong len, int prot); abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, - int flags, int fd, abi_ulong offset); + int flags, int fd, off_t offset); int target_munmap(abi_ulong start, abi_ulong len); abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, abi_ulong new_size, unsigned long flags, abi_ulong new_addr); int target_msync(abi_ulong start, abi_ulong len, int flags); extern unsigned long last_brk; +extern abi_ulong mmap_next_start; +abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size); void mmap_fork_start(void); void mmap_fork_end(int child); /* main.c */ -extern unsigned long x86_stack_size; +extern char qemu_proc_pathname[]; +extern unsigned long target_maxtsiz; +extern unsigned long target_dfldsiz; +extern unsigned long target_maxdsiz; +extern unsigned long target_dflssiz; +extern unsigned long target_maxssiz; +extern unsigned long target_sgrowsiz; + +/* os-syscall.c */ +abi_long get_errno(abi_long ret); +bool is_error(abi_long ret); +int host_to_target_errno(int err); + +/* os-sys.c */ +abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2); /* user access */ @@ -421,8 +462,21 @@ static inline void *lock_user_string(abi_ulong guest_addr) #define unlock_user_struct(host_ptr, guest_addr, copy) \ unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0) -#if defined(CONFIG_USE_NPTL) -#include +static inline uint64_t target_arg64(uint32_t word0, uint32_t word1) +{ +#if TARGET_ABI_BITS == 32 +#if TARGET_BIG_ENDIAN + return ((uint64_t)word0 << 32) | word1; +#else + return ((uint64_t)word1 << 32) | word0; #endif +#else /* TARGET_ABI_BITS != 32 */ + return word0; +#endif /* TARGET_ABI_BITS != 32 */ +} + +#include + +#include "user/safe-syscall.h" #endif /* QEMU_H */ diff --git a/bsd-user/signal-common.h b/bsd-user/signal-common.h new file mode 100644 index 0000000000..6f90345bb2 --- /dev/null +++ b/bsd-user/signal-common.h @@ -0,0 +1,75 @@ +/* + * Emulation of BSD signals + * + * Copyright (c) 2013 Stacey Son + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SIGNAL_COMMON_H +#define SIGNAL_COMMON_H + +/** + * block_signals: block all signals while handling this guest syscall + * + * Block all signals, and arrange that the signal mask is returned to + * its correct value for the guest before we resume execution of guest code. + * If this function returns non-zero, then the caller should immediately + * return -TARGET_ERESTARTSYS to the main loop, which will take the pending + * signal and restart execution of the syscall. + * If block_signals() returns zero, then the caller can continue with + * emulation of the system call knowing that no signals can be taken + * (and therefore that no race conditions will result). + * This should only be called once, because if it is called a second time + * it will always return non-zero. (Think of it like a mutex that can't + * be recursively locked.) + * Signals will be unblocked again by process_pending_signals(). + * + * Return value: non-zero if there was a pending signal, zero if not. + */ +int block_signals(void); /* Returns non zero if signal pending */ + +long do_rt_sigreturn(CPUArchState *env); +int do_sigaction(int sig, const struct target_sigaction *act, + struct target_sigaction *oact); +abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); +long do_sigreturn(CPUArchState *env, abi_ulong addr); +void force_sig_fault(int sig, int code, abi_ulong addr); +int host_to_target_signal(int sig); +void host_to_target_sigset(target_sigset_t *d, const sigset_t *s); +void process_pending_signals(CPUArchState *env); +void queue_signal(CPUArchState *env, int sig, int si_type, + target_siginfo_t *info); +void signal_init(void); +int target_to_host_signal(int sig); +void target_to_host_sigset(sigset_t *d, const target_sigset_t *s); + +/* + * Within QEMU the top 8 bits of si_code indicate which of the parts of the + * union in target_siginfo is valid. This only applies between + * host_to_target_siginfo_noswap() and tswap_siginfo(); it does not appear + * either within host siginfo_t or in target_siginfo structures which we get + * from the guest userspace program. Linux kenrels use this internally, but BSD + * kernels don't do this, but its a useful abstraction. + * + * The linux-user version of this uses the top 16 bits, but FreeBSD's SI_USER + * and other signal indepenent SI_ codes have bit 16 set, so we only use the top + * byte instead. + * + * For FreeBSD, we have si_pid, si_uid, si_status, and si_addr always. Linux and + * {Open,Net}BSD have a different approach (where their reason field is larger, + * but whose siginfo has fewer fields always). + * + * QEMU_SI_CAPSICUM is currently only FreeBSD 14 current only, so only define + * it where _capsicum is available. + */ +#define QEMU_SI_NOINFO 0 /* nothing other than si_signo valid */ +#define QEMU_SI_FAULT 1 /* _fault is valid in _reason */ +#define QEMU_SI_TIMER 2 /* _timer is valid in _reason */ +#define QEMU_SI_MESGQ 3 /* _mesgq is valid in _reason */ +#define QEMU_SI_POLL 4 /* _poll is valid in _reason */ +#if defined(__FreeBSD_version) && __FreeBSD_version >= 1400026 +#define QEMU_SI_CAPSICUM 5 /* _capsicum is valid in _reason */ +#endif + +#endif diff --git a/bsd-user/signal.c b/bsd-user/signal.c index ad6d935569..58a5386395 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -2,6 +2,7 @@ * Emulation of BSD signals * * Copyright (c) 2003 - 2008 Fabrice Bellard + * Copyright (c) 2013 Stacey Son * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,14 +17,1036 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, see . */ -#include "qemu/osdep.h" +#include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu.h" +#include "signal-common.h" +#include "trace.h" +#include "hw/core/tcg-cpu-ops.h" +#include "host-signal.h" + +static struct target_sigaction sigact_table[TARGET_NSIG]; +static void host_signal_handler(int host_sig, siginfo_t *info, void *puc); +static void target_to_host_sigset_internal(sigset_t *d, + const target_sigset_t *s); + +static inline int on_sig_stack(TaskState *ts, unsigned long sp) +{ + return sp - ts->sigaltstack_used.ss_sp < ts->sigaltstack_used.ss_size; +} + +static inline int sas_ss_flags(TaskState *ts, unsigned long sp) +{ + return ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE : + on_sig_stack(ts, sp) ? SS_ONSTACK : 0; +} + +/* + * The BSD ABIs use the same singal numbers across all the CPU architectures, so + * (unlike Linux) these functions are just the identity mapping. This might not + * be true for XyzBSD running on AbcBSD, which doesn't currently work. + */ +int host_to_target_signal(int sig) +{ + return sig; +} + +int target_to_host_signal(int sig) +{ + return sig; +} + +static inline void target_sigemptyset(target_sigset_t *set) +{ + memset(set, 0, sizeof(*set)); +} + +static inline void target_sigaddset(target_sigset_t *set, int signum) +{ + signum--; + uint32_t mask = (uint32_t)1 << (signum % TARGET_NSIG_BPW); + set->__bits[signum / TARGET_NSIG_BPW] |= mask; +} + +static inline int target_sigismember(const target_sigset_t *set, int signum) +{ + signum--; + abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); + return (set->__bits[signum / TARGET_NSIG_BPW] & mask) != 0; +} + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + ucontext_t *uc = (ucontext_t *)puc; + uintptr_t pcreg = host_signal_pc(uc); + + if (pcreg > (uintptr_t)safe_syscall_start + && pcreg < (uintptr_t)safe_syscall_end) { + host_signal_set_pc(uc, (uintptr_t)safe_syscall_start); + } +} + +/* + * Note: The following take advantage of the BSD signal property that all + * signals are available on all architectures. + */ +static void host_to_target_sigset_internal(target_sigset_t *d, + const sigset_t *s) +{ + int i; + + target_sigemptyset(d); + for (i = 1; i <= NSIG; i++) { + if (sigismember(s, i)) { + target_sigaddset(d, host_to_target_signal(i)); + } + } +} + +void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) +{ + target_sigset_t d1; + int i; + + host_to_target_sigset_internal(&d1, s); + for (i = 0; i < _SIG_WORDS; i++) { + d->__bits[i] = tswap32(d1.__bits[i]); + } +} + +static void target_to_host_sigset_internal(sigset_t *d, + const target_sigset_t *s) +{ + int i; + + sigemptyset(d); + for (i = 1; i <= TARGET_NSIG; i++) { + if (target_sigismember(s, i)) { + sigaddset(d, target_to_host_signal(i)); + } + } +} + +void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) +{ + target_sigset_t s1; + int i; + + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + s1.__bits[i] = tswap32(s->__bits[i]); + } + target_to_host_sigset_internal(d, &s1); +} + +static bool has_trapno(int tsig) +{ + return tsig == TARGET_SIGILL || + tsig == TARGET_SIGFPE || + tsig == TARGET_SIGSEGV || + tsig == TARGET_SIGBUS || + tsig == TARGET_SIGTRAP; +} + +/* Siginfo conversion. */ + +/* + * Populate tinfo w/o swapping based on guessing which fields are valid. + */ +static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, + const siginfo_t *info) +{ + int sig = host_to_target_signal(info->si_signo); + int si_code = info->si_code; + int si_type; + + /* + * Make sure we that the variable portion of the target siginfo is zeroed + * out so we don't leak anything into that. + */ + memset(&tinfo->_reason, 0, sizeof(tinfo->_reason)); + + /* + * This is awkward, because we have to use a combination of the si_code and + * si_signo to figure out which of the union's members are valid.o We + * therefore make our best guess. + * + * Once we have made our guess, we record it in the top 16 bits of + * the si_code, so that tswap_siginfo() later can use it. + * tswap_siginfo() will strip these top bits out before writing + * si_code to the guest (sign-extending the lower bits). + */ + tinfo->si_signo = sig; + tinfo->si_errno = info->si_errno; + tinfo->si_code = info->si_code; + tinfo->si_pid = info->si_pid; + tinfo->si_uid = info->si_uid; + tinfo->si_status = info->si_status; + tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr; + /* + * si_value is opaque to kernel. On all FreeBSD platforms, + * sizeof(sival_ptr) >= sizeof(sival_int) so the following + * always will copy the larger element. + */ + tinfo->si_value.sival_ptr = + (abi_ulong)(unsigned long)info->si_value.sival_ptr; + + switch (si_code) { + /* + * All the SI_xxx codes that are defined here are global to + * all the signals (they have values that none of the other, + * more specific signal info will set). + */ + case SI_USER: + case SI_LWP: + case SI_KERNEL: + case SI_QUEUE: + case SI_ASYNCIO: + /* + * Only the fixed parts are valid (though FreeBSD doesn't always + * set all the fields to non-zero values. + */ + si_type = QEMU_SI_NOINFO; + break; + case SI_TIMER: + tinfo->_reason._timer._timerid = info->_reason._timer._timerid; + tinfo->_reason._timer._overrun = info->_reason._timer._overrun; + si_type = QEMU_SI_TIMER; + break; + case SI_MESGQ: + tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd; + si_type = QEMU_SI_MESGQ; + break; + default: + /* + * We have to go based on the signal number now to figure out + * what's valid. + */ + si_type = QEMU_SI_NOINFO; + if (has_trapno(sig)) { + tinfo->_reason._fault._trapno = info->_reason._fault._trapno; + si_type = QEMU_SI_FAULT; + } +#ifdef TARGET_SIGPOLL + /* + * FreeBSD never had SIGPOLL, but emulates it for Linux so there's + * a chance it may popup in the future. + */ + if (sig == TARGET_SIGPOLL) { + tinfo->_reason._poll._band = info->_reason._poll._band; + si_type = QEMU_SI_POLL; + } +#endif + /* + * Unsure that this can actually be generated, and our support for + * capsicum is somewhere between weak and non-existant, but if we get + * one, then we know what to save. + */ +#ifdef QEMU_SI_CAPSICUM + if (sig == TARGET_SIGTRAP) { + tinfo->_reason._capsicum._syscall = + info->_reason._capsicum._syscall; + si_type = QEMU_SI_CAPSICUM; + } +#endif + break; + } + tinfo->si_code = deposit32(si_code, 24, 8, si_type); +} + +static void tswap_siginfo(target_siginfo_t *tinfo, const target_siginfo_t *info) +{ + int si_type = extract32(info->si_code, 24, 8); + int si_code = sextract32(info->si_code, 0, 24); + + __put_user(info->si_signo, &tinfo->si_signo); + __put_user(info->si_errno, &tinfo->si_errno); + __put_user(si_code, &tinfo->si_code); /* Zero out si_type, it's internal */ + __put_user(info->si_pid, &tinfo->si_pid); + __put_user(info->si_uid, &tinfo->si_uid); + __put_user(info->si_status, &tinfo->si_status); + __put_user(info->si_addr, &tinfo->si_addr); + /* + * Unswapped, because we passed it through mostly untouched. si_value is + * opaque to the kernel, so we didn't bother with potentially wasting cycles + * to swap it into host byte order. + */ + tinfo->si_value.sival_ptr = info->si_value.sival_ptr; + + /* + * We can use our internal marker of which fields in the structure + * are valid, rather than duplicating the guesswork of + * host_to_target_siginfo_noswap() here. + */ + switch (si_type) { + case QEMU_SI_NOINFO: /* No additional info */ + break; + case QEMU_SI_FAULT: + __put_user(info->_reason._fault._trapno, + &tinfo->_reason._fault._trapno); + break; + case QEMU_SI_TIMER: + __put_user(info->_reason._timer._timerid, + &tinfo->_reason._timer._timerid); + __put_user(info->_reason._timer._overrun, + &tinfo->_reason._timer._overrun); + break; + case QEMU_SI_MESGQ: + __put_user(info->_reason._mesgq._mqd, &tinfo->_reason._mesgq._mqd); + break; + case QEMU_SI_POLL: + /* Note: Not generated on FreeBSD */ + __put_user(info->_reason._poll._band, &tinfo->_reason._poll._band); + break; +#ifdef QEMU_SI_CAPSICUM + case QEMU_SI_CAPSICUM: + __put_user(info->_reason._capsicum._syscall, + &tinfo->_reason._capsicum._syscall); + break; +#endif + default: + g_assert_not_reached(); + } +} + +int block_signals(void) +{ + TaskState *ts = (TaskState *)thread_cpu->opaque; + sigset_t set; + + /* + * It's OK to block everything including SIGSEGV, because we won't run any + * further guest code before unblocking signals in + * process_pending_signals(). We depend on the FreeBSD behaivor here where + * this will only affect this thread's signal mask. We don't use + * pthread_sigmask which might seem more correct because that routine also + * does odd things with SIGCANCEL to implement pthread_cancel(). + */ + sigfillset(&set); + sigprocmask(SIG_SETMASK, &set, 0); + + return qatomic_xchg(&ts->signal_pending, 1); +} + +/* Returns 1 if given signal should dump core if not handled. */ +static int core_dump_signal(int sig) +{ + switch (sig) { + case TARGET_SIGABRT: + case TARGET_SIGFPE: + case TARGET_SIGILL: + case TARGET_SIGQUIT: + case TARGET_SIGSEGV: + case TARGET_SIGTRAP: + case TARGET_SIGBUS: + return 1; + default: + return 0; + } +} + +/* Abort execution with signal. */ +static G_NORETURN +void dump_core_and_abort(int target_sig) +{ + CPUArchState *env = thread_cpu->env_ptr; + CPUState *cpu = env_cpu(env); + TaskState *ts = cpu->opaque; + int core_dumped = 0; + int host_sig; + struct sigaction act; + + host_sig = target_to_host_signal(target_sig); + gdb_signalled(env, target_sig); + + /* Dump core if supported by target binary format */ + if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { + stop_all_tasks(); + core_dumped = + ((*ts->bprm->core_dump)(target_sig, env) == 0); + } + if (core_dumped) { + struct rlimit nodump; + + /* + * We already dumped the core of target process, we don't want + * a coredump of qemu itself. + */ + getrlimit(RLIMIT_CORE, &nodump); + nodump.rlim_cur = 0; + setrlimit(RLIMIT_CORE, &nodump); + (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) " + "- %s\n", target_sig, strsignal(host_sig), "core dumped"); + } + + /* + * The proper exit code for dying from an uncaught signal is + * -. The kernel doesn't allow exit() or _exit() to pass + * a negative value. To get the proper exit code we need to + * actually die from an uncaught signal. Here the default signal + * handler is installed, we send ourself a signal and we wait for + * it to arrive. + */ + memset(&act, 0, sizeof(act)); + sigfillset(&act.sa_mask); + act.sa_handler = SIG_DFL; + sigaction(host_sig, &act, NULL); + + kill(getpid(), host_sig); + + /* + * Make sure the signal isn't masked (just reuse the mask inside + * of act). + */ + sigdelset(&act.sa_mask, host_sig); + sigsuspend(&act.sa_mask); + + /* unreachable */ + abort(); +} + +/* + * Queue a signal so that it will be send to the virtual CPU as soon as + * possible. + */ +void queue_signal(CPUArchState *env, int sig, int si_type, + target_siginfo_t *info) +{ + CPUState *cpu = env_cpu(env); + TaskState *ts = cpu->opaque; + + trace_user_queue_signal(env, sig); + + info->si_code = deposit32(info->si_code, 24, 8, si_type); + + ts->sync_signal.info = *info; + ts->sync_signal.pending = sig; + /* Signal that a new signal is pending. */ + qatomic_set(&ts->signal_pending, 1); + return; +} + +static int fatal_signal(int sig) +{ + + switch (sig) { + case TARGET_SIGCHLD: + case TARGET_SIGURG: + case TARGET_SIGWINCH: + case TARGET_SIGINFO: + /* Ignored by default. */ + return 0; + case TARGET_SIGCONT: + case TARGET_SIGSTOP: + case TARGET_SIGTSTP: + case TARGET_SIGTTIN: + case TARGET_SIGTTOU: + /* Job control signals. */ + return 0; + default: + return 1; + } +} + +/* + * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the + * 'force' part is handled in process_pending_signals(). + */ +void force_sig_fault(int sig, int code, abi_ulong addr) +{ + CPUState *cpu = thread_cpu; + CPUArchState *env = cpu->env_ptr; + target_siginfo_t info = {}; + + info.si_signo = sig; + info.si_errno = 0; + info.si_code = code; + info.si_addr = addr; + queue_signal(env, sig, QEMU_SI_FAULT, &info); +} + +static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) +{ + CPUArchState *env = thread_cpu->env_ptr; + CPUState *cpu = env_cpu(env); + TaskState *ts = cpu->opaque; + target_siginfo_t tinfo; + ucontext_t *uc = puc; + struct emulated_sigtable *k; + int guest_sig; + uintptr_t pc = 0; + bool sync_sig = false; + + /* + * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special + * handling wrt signal blocking and unwinding. + */ + if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { + MMUAccessType access_type; + uintptr_t host_addr; + abi_ptr guest_addr; + bool is_write; + + host_addr = (uintptr_t)info->si_addr; + + /* + * Convert forcefully to guest address space: addresses outside + * reserved_va are still valid to report via SEGV_MAPERR. + */ + guest_addr = h2g_nocheck(host_addr); + + pc = host_signal_pc(uc); + is_write = host_signal_write(info, uc); + access_type = adjust_signal_pc(&pc, is_write); + + if (host_sig == SIGSEGV) { + bool maperr = true; + + if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { + /* If this was a write to a TB protected page, restart. */ + if (is_write && + handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, + pc, guest_addr)) { + return; + } + + /* + * With reserved_va, the whole address space is PROT_NONE, + * which means that we may get ACCERR when we want MAPERR. + */ + if (page_get_flags(guest_addr) & PAGE_VALID) { + maperr = false; + } else { + info->si_code = SEGV_MAPERR; + } + } + + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); + cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); + } else { + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); + if (info->si_code == BUS_ADRALN) { + cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); + } + } + + sync_sig = true; + } + + /* Get the target signal number. */ + guest_sig = host_to_target_signal(host_sig); + if (guest_sig < 1 || guest_sig > TARGET_NSIG) { + return; + } + trace_user_host_signal(cpu, host_sig, guest_sig); + + host_to_target_siginfo_noswap(&tinfo, info); + + k = &ts->sigtab[guest_sig - 1]; + k->info = tinfo; + k->pending = guest_sig; + ts->signal_pending = 1; + + /* + * For synchronous signals, unwind the cpu state to the faulting + * insn and then exit back to the main loop so that the signal + * is delivered immediately. + */ + if (sync_sig) { + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, pc); + } + + rewind_if_in_safe_syscall(puc); + + /* + * Block host signals until target signal handler entered. We + * can't block SIGSEGV or SIGBUS while we're executing guest + * code in case the guest code provokes one in the window between + * now and it getting out to the main loop. Signals will be + * unblocked again in process_pending_signals(). + */ + sigfillset(&uc->uc_sigmask); + sigdelset(&uc->uc_sigmask, SIGSEGV); + sigdelset(&uc->uc_sigmask, SIGBUS); + + /* Interrupt the virtual CPU as soon as possible. */ + cpu_exit(thread_cpu); +} + +/* do_sigaltstack() returns target values and errnos. */ +/* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */ +abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) +{ + TaskState *ts = (TaskState *)thread_cpu->opaque; + int ret; + target_stack_t oss; + + if (uoss_addr) { + /* Save current signal stack params */ + oss.ss_sp = tswapl(ts->sigaltstack_used.ss_sp); + oss.ss_size = tswapl(ts->sigaltstack_used.ss_size); + oss.ss_flags = tswapl(sas_ss_flags(ts, sp)); + } + + if (uss_addr) { + target_stack_t *uss; + target_stack_t ss; + size_t minstacksize = TARGET_MINSIGSTKSZ; + + ret = -TARGET_EFAULT; + if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { + goto out; + } + __get_user(ss.ss_sp, &uss->ss_sp); + __get_user(ss.ss_size, &uss->ss_size); + __get_user(ss.ss_flags, &uss->ss_flags); + unlock_user_struct(uss, uss_addr, 0); + + ret = -TARGET_EPERM; + if (on_sig_stack(ts, sp)) { + goto out; + } + + ret = -TARGET_EINVAL; + if (ss.ss_flags != TARGET_SS_DISABLE + && ss.ss_flags != TARGET_SS_ONSTACK + && ss.ss_flags != 0) { + goto out; + } + + if (ss.ss_flags == TARGET_SS_DISABLE) { + ss.ss_size = 0; + ss.ss_sp = 0; + } else { + ret = -TARGET_ENOMEM; + if (ss.ss_size < minstacksize) { + goto out; + } + } + + ts->sigaltstack_used.ss_sp = ss.ss_sp; + ts->sigaltstack_used.ss_size = ss.ss_size; + } + + if (uoss_addr) { + ret = -TARGET_EFAULT; + if (copy_to_user(uoss_addr, &oss, sizeof(oss))) { + goto out; + } + } + + ret = 0; +out: + return ret; +} + +/* do_sigaction() return host values and errnos */ +int do_sigaction(int sig, const struct target_sigaction *act, + struct target_sigaction *oact) +{ + struct target_sigaction *k; + struct sigaction act1; + int host_sig; + int ret = 0; + + if (sig < 1 || sig > TARGET_NSIG) { + return -TARGET_EINVAL; + } + + if ((sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) && + act != NULL && act->_sa_handler != TARGET_SIG_DFL) { + return -TARGET_EINVAL; + } + + if (block_signals()) { + return -TARGET_ERESTART; + } + + k = &sigact_table[sig - 1]; + if (oact) { + oact->_sa_handler = tswapal(k->_sa_handler); + oact->sa_flags = tswap32(k->sa_flags); + oact->sa_mask = k->sa_mask; + } + if (act) { + k->_sa_handler = tswapal(act->_sa_handler); + k->sa_flags = tswap32(act->sa_flags); + k->sa_mask = act->sa_mask; + + /* Update the host signal state. */ + host_sig = target_to_host_signal(sig); + if (host_sig != SIGSEGV && host_sig != SIGBUS) { + memset(&act1, 0, sizeof(struct sigaction)); + sigfillset(&act1.sa_mask); + act1.sa_flags = SA_SIGINFO; + if (k->sa_flags & TARGET_SA_RESTART) { + act1.sa_flags |= SA_RESTART; + } + /* + * Note: It is important to update the host kernel signal mask to + * avoid getting unexpected interrupted system calls. + */ + if (k->_sa_handler == TARGET_SIG_IGN) { + act1.sa_sigaction = (void *)SIG_IGN; + } else if (k->_sa_handler == TARGET_SIG_DFL) { + if (fatal_signal(sig)) { + act1.sa_sigaction = host_signal_handler; + } else { + act1.sa_sigaction = (void *)SIG_DFL; + } + } else { + act1.sa_sigaction = host_signal_handler; + } + ret = sigaction(host_sig, &act1, NULL); + } + } + return ret; +} + +static inline abi_ulong get_sigframe(struct target_sigaction *ka, + CPUArchState *env, size_t frame_size) +{ + TaskState *ts = (TaskState *)thread_cpu->opaque; + abi_ulong sp; + + /* Use default user stack */ + sp = get_sp_from_cpustate(env); + + if ((ka->sa_flags & TARGET_SA_ONSTACK) && sas_ss_flags(ts, sp) == 0) { + sp = ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size; + } + +/* TODO: make this a target_arch function / define */ +#if defined(TARGET_ARM) + return (sp - frame_size) & ~7; +#elif defined(TARGET_AARCH64) + return (sp - frame_size) & ~15; +#else + return sp - frame_size; +#endif +} + +/* compare to $M/$M/exec_machdep.c sendsig and sys/kern/kern_sig.c sigexit */ + +static void setup_frame(int sig, int code, struct target_sigaction *ka, + target_sigset_t *set, target_siginfo_t *tinfo, CPUArchState *env) +{ + struct target_sigframe *frame; + abi_ulong frame_addr; + int i; + + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + trace_user_setup_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + unlock_user_struct(frame, frame_addr, 1); + dump_core_and_abort(TARGET_SIGILL); + return; + } + + memset(frame, 0, sizeof(*frame)); + setup_sigframe_arch(env, frame_addr, frame, 0); + + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user(set->__bits[i], &frame->sf_uc.uc_sigmask.__bits[i]); + } + + if (tinfo) { + frame->sf_si.si_signo = tinfo->si_signo; + frame->sf_si.si_errno = tinfo->si_errno; + frame->sf_si.si_code = tinfo->si_code; + frame->sf_si.si_pid = tinfo->si_pid; + frame->sf_si.si_uid = tinfo->si_uid; + frame->sf_si.si_status = tinfo->si_status; + frame->sf_si.si_addr = tinfo->si_addr; + /* see host_to_target_siginfo_noswap() for more details */ + frame->sf_si.si_value.sival_ptr = tinfo->si_value.sival_ptr; + /* + * At this point, whatever is in the _reason union is complete + * and in target order, so just copy the whole thing over, even + * if it's too large for this specific signal. + * host_to_target_siginfo_noswap() and tswap_siginfo() have ensured + * that's so. + */ + memcpy(&frame->sf_si._reason, &tinfo->_reason, + sizeof(tinfo->_reason)); + } + + set_sigtramp_args(env, sig, frame, frame_addr, ka); + + unlock_user_struct(frame, frame_addr, 1); +} + +static int reset_signal_mask(target_ucontext_t *ucontext) +{ + int i; + sigset_t blocked; + target_sigset_t target_set; + TaskState *ts = (TaskState *)thread_cpu->opaque; + + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + if (__get_user(target_set.__bits[i], + &ucontext->uc_sigmask.__bits[i])) { + return -TARGET_EFAULT; + } + } + target_to_host_sigset_internal(&blocked, &target_set); + ts->signal_mask = blocked; + + return 0; +} + +/* See sys/$M/$M/exec_machdep.c sigreturn() */ +long do_sigreturn(CPUArchState *env, abi_ulong addr) +{ + long ret; + abi_ulong target_ucontext; + target_ucontext_t *ucontext = NULL; + + /* Get the target ucontext address from the stack frame */ + ret = get_ucontext_sigreturn(env, addr, &target_ucontext); + if (is_error(ret)) { + return ret; + } + trace_user_do_sigreturn(env, addr); + if (!lock_user_struct(VERIFY_READ, ucontext, target_ucontext, 0)) { + goto badframe; + } + + /* Set the register state back to before the signal. */ + if (set_mcontext(env, &ucontext->uc_mcontext, 1)) { + goto badframe; + } + + /* And reset the signal mask. */ + if (reset_signal_mask(ucontext)) { + goto badframe; + } + + unlock_user_struct(ucontext, target_ucontext, 0); + return -TARGET_EJUSTRETURN; + +badframe: + if (ucontext != NULL) { + unlock_user_struct(ucontext, target_ucontext, 0); + } + return -TARGET_EFAULT; +} void signal_init(void) { + TaskState *ts = (TaskState *)thread_cpu->opaque; + struct sigaction act; + struct sigaction oact; + int i; + int host_sig; + + /* Set the signal mask from the host mask. */ + sigprocmask(0, 0, &ts->signal_mask); + + sigfillset(&act.sa_mask); + act.sa_sigaction = host_signal_handler; + act.sa_flags = SA_SIGINFO; + + for (i = 1; i <= TARGET_NSIG; i++) { +#ifdef CONFIG_GPROF + if (i == TARGET_SIGPROF) { + continue; + } +#endif + host_sig = target_to_host_signal(i); + sigaction(host_sig, NULL, &oact); + if (oact.sa_sigaction == (void *)SIG_IGN) { + sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; + } else if (oact.sa_sigaction == (void *)SIG_DFL) { + sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; + } + /* + * If there's already a handler installed then something has + * gone horribly wrong, so don't even try to handle that case. + * Install some handlers for our own use. We need at least + * SIGSEGV and SIGBUS, to detect exceptions. We can not just + * trap all signals because it affects syscall interrupt + * behavior. But do trap all default-fatal signals. + */ + if (fatal_signal(i)) { + sigaction(host_sig, &act, NULL); + } + } } -void process_pending_signals(CPUArchState *cpu_env) +static void handle_pending_signal(CPUArchState *env, int sig, + struct emulated_sigtable *k) { + CPUState *cpu = env_cpu(env); + TaskState *ts = cpu->opaque; + struct target_sigaction *sa; + int code; + sigset_t set; + abi_ulong handler; + target_siginfo_t tinfo; + target_sigset_t target_old_set; + + trace_user_handle_signal(env, sig); + + k->pending = 0; + + sig = gdb_handlesig(cpu, sig); + if (!sig) { + sa = NULL; + handler = TARGET_SIG_IGN; + } else { + sa = &sigact_table[sig - 1]; + handler = sa->_sa_handler; + } + + if (do_strace) { + print_taken_signal(sig, &k->info); + } + + if (handler == TARGET_SIG_DFL) { + /* + * default handler : ignore some signal. The other are job + * control or fatal. + */ + if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || + sig == TARGET_SIGTTOU) { + kill(getpid(), SIGSTOP); + } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG && + sig != TARGET_SIGINFO && sig != TARGET_SIGWINCH && + sig != TARGET_SIGCONT) { + dump_core_and_abort(sig); + } + } else if (handler == TARGET_SIG_IGN) { + /* ignore sig */ + } else if (handler == TARGET_SIG_ERR) { + dump_core_and_abort(sig); + } else { + /* compute the blocked signals during the handler execution */ + sigset_t *blocked_set; + + target_to_host_sigset(&set, &sa->sa_mask); + /* + * SA_NODEFER indicates that the current signal should not be + * blocked during the handler. + */ + if (!(sa->sa_flags & TARGET_SA_NODEFER)) { + sigaddset(&set, target_to_host_signal(sig)); + } + + /* + * Save the previous blocked signal state to restore it at the + * end of the signal execution (see do_sigreturn). + */ + host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); + + blocked_set = ts->in_sigsuspend ? + &ts->sigsuspend_mask : &ts->signal_mask; + sigorset(&ts->signal_mask, blocked_set, &set); + ts->in_sigsuspend = false; + sigprocmask(SIG_SETMASK, &ts->signal_mask, NULL); + + /* XXX VM86 on x86 ??? */ + + code = k->info.si_code; /* From host, so no si_type */ + /* prepare the stack frame of the virtual CPU */ + if (sa->sa_flags & TARGET_SA_SIGINFO) { + tswap_siginfo(&tinfo, &k->info); + setup_frame(sig, code, sa, &target_old_set, &tinfo, env); + } else { + setup_frame(sig, code, sa, &target_old_set, NULL, env); + } + if (sa->sa_flags & TARGET_SA_RESETHAND) { + sa->_sa_handler = TARGET_SIG_DFL; + } + } +} + +void process_pending_signals(CPUArchState *env) +{ + CPUState *cpu = env_cpu(env); + int sig; + sigset_t *blocked_set, set; + struct emulated_sigtable *k; + TaskState *ts = cpu->opaque; + + while (qatomic_read(&ts->signal_pending)) { + sigfillset(&set); + sigprocmask(SIG_SETMASK, &set, 0); + + restart_scan: + sig = ts->sync_signal.pending; + if (sig) { + /* + * Synchronous signals are forced by the emulated CPU in some way. + * If they are set to ignore, restore the default handler (see + * sys/kern_sig.c trapsignal() and execsigs() for this behavior) + * though maybe this is done only when forcing exit for non SIGCHLD. + */ + if (sigismember(&ts->signal_mask, target_to_host_signal(sig)) || + sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { + sigdelset(&ts->signal_mask, target_to_host_signal(sig)); + sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; + } + handle_pending_signal(env, sig, &ts->sync_signal); + } + + k = ts->sigtab; + for (sig = 1; sig <= TARGET_NSIG; sig++, k++) { + blocked_set = ts->in_sigsuspend ? + &ts->sigsuspend_mask : &ts->signal_mask; + if (k->pending && + !sigismember(blocked_set, target_to_host_signal(sig))) { + handle_pending_signal(env, sig, k); + /* + * Restart scan from the beginning, as handle_pending_signal + * might have resulted in a new synchronous signal (eg SIGSEGV). + */ + goto restart_scan; + } + } + + /* + * Unblock signals and check one more time. Unblocking signals may cause + * us to take another host signal, which will set signal_pending again. + */ + qatomic_set(&ts->signal_pending, 0); + ts->in_sigsuspend = false; + set = ts->signal_mask; + sigdelset(&set, SIGSEGV); + sigdelset(&set, SIGBUS); + sigprocmask(SIG_SETMASK, &set, 0); + } + ts->in_sigsuspend = false; +} + +void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, bool maperr, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigsegv) { + tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); + } + + force_sig_fault(TARGET_SIGSEGV, + maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, + addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); +} + +void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigbus) { + tcg_ops->record_sigbus(cpu, addr, access_type, ra); + } + + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); } diff --git a/bsd-user/sparc/target_arch_sysarch.h b/bsd-user/sparc/target_arch_sysarch.h deleted file mode 100644 index d0b85ef6bb..0000000000 --- a/bsd-user/sparc/target_arch_sysarch.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPARC sysarch() system call emulation - * - * Copyright (c) 2013 Stacey D. Son - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ - -#include "target_syscall.h" - -static inline abi_long do_freebsd_arch_sysarch(void *env, int op, - abi_ulong parms) -{ - int ret = 0; - - switch (op) { - case TARGET_SPARC_SIGTRAMP_INSTALL: - /* XXX not currently handled */ - case TARGET_SPARC_UTRAP_INSTALL: - /* XXX not currently handled */ - default: - ret = -TARGET_EINVAL; - break; - } - - return ret; -} - -static inline void do_freebsd_arch_print_sysarch( - const struct syscallname *name, abi_long arg1, abi_long arg2, - abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) -{ - - gemu_log("%s(%d, " TARGET_ABI_FMT_lx ", " TARGET_ABI_FMT_lx ", " - TARGET_ABI_FMT_lx ")", name->name, (int)arg1, arg2, arg3, arg4); -} - -#endif /*!BSD_USER_ARCH_SYSARCH_H_ */ diff --git a/bsd-user/sparc64/target_arch_sysarch.h b/bsd-user/sparc64/target_arch_sysarch.h deleted file mode 100644 index e6f17c1504..0000000000 --- a/bsd-user/sparc64/target_arch_sysarch.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPARC64 sysarch() system call emulation - * - * Copyright (c) 2013 Stacey D. Son - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ - -#include "target_syscall.h" - -static inline abi_long do_freebsd_arch_sysarch(void *env, int op, - abi_ulong parms) -{ - int ret = 0; - - switch (op) { - case TARGET_SPARC_SIGTRAMP_INSTALL: - /* XXX not currently handled */ - case TARGET_SPARC_UTRAP_INSTALL: - /* XXX not currently handled */ - default: - ret = -TARGET_EINVAL; - break; - } - - return ret; -} - -static inline void do_freebsd_arch_print_sysarch( - const struct syscallname *name, abi_long arg1, abi_long arg2, - abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) -{ - - gemu_log("%s(%d, " TARGET_ABI_FMT_lx ", " TARGET_ABI_FMT_lx ", " - TARGET_ABI_FMT_lx ")", name->name, (int)arg1, arg2, arg3, arg4); -} - -#endif /*!BSD_USER_ARCH_SYSARCH_H_ */ diff --git a/bsd-user/strace.c b/bsd-user/strace.c index be40b8a20c..a77d10dd6b 100644 --- a/bsd-user/strace.c +++ b/bsd-user/strace.c @@ -31,6 +31,24 @@ int do_strace; /* * Utility functions */ +static const char * +get_comma(int last) +{ + return (last) ? "" : ","; +} + +/* + * Prints out raw parameter using given format. Caller needs + * to do byte swapping if needed. + */ +static void +print_raw_param(const char *fmt, abi_long param, int last) +{ + char format[64]; + + (void)snprintf(format, sizeof(format), "%s%s", fmt, get_comma(last)); + gemu_log(format, param); +} static void print_sysctl(const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, @@ -239,3 +257,82 @@ void print_openbsd_syscall_ret(int num, abi_long ret) print_syscall_ret(num, ret, openbsd_scnames, ARRAY_SIZE(openbsd_scnames)); } + +static void +print_signal(abi_ulong arg, int last) +{ + const char *signal_name = NULL; + switch (arg) { + case TARGET_SIGHUP: + signal_name = "SIGHUP"; + break; + case TARGET_SIGINT: + signal_name = "SIGINT"; + break; + case TARGET_SIGQUIT: + signal_name = "SIGQUIT"; + break; + case TARGET_SIGILL: + signal_name = "SIGILL"; + break; + case TARGET_SIGABRT: + signal_name = "SIGABRT"; + break; + case TARGET_SIGFPE: + signal_name = "SIGFPE"; + break; + case TARGET_SIGKILL: + signal_name = "SIGKILL"; + break; + case TARGET_SIGSEGV: + signal_name = "SIGSEGV"; + break; + case TARGET_SIGPIPE: + signal_name = "SIGPIPE"; + break; + case TARGET_SIGALRM: + signal_name = "SIGALRM"; + break; + case TARGET_SIGTERM: + signal_name = "SIGTERM"; + break; + case TARGET_SIGUSR1: + signal_name = "SIGUSR1"; + break; + case TARGET_SIGUSR2: + signal_name = "SIGUSR2"; + break; + case TARGET_SIGCHLD: + signal_name = "SIGCHLD"; + break; + case TARGET_SIGCONT: + signal_name = "SIGCONT"; + break; + case TARGET_SIGSTOP: + signal_name = "SIGSTOP"; + break; + case TARGET_SIGTTIN: + signal_name = "SIGTTIN"; + break; + case TARGET_SIGTTOU: + signal_name = "SIGTTOU"; + break; + } + if (signal_name == NULL) { + print_raw_param("%ld", arg, last); + return; + } + gemu_log("%s%s", signal_name, get_comma(last)); +} + +void print_taken_signal(int target_signum, const target_siginfo_t *tinfo) +{ + /* + * Print the strace output for a signal being taken: + * --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} --- + */ + gemu_log("%d ", getpid()); + gemu_log("--- "); + print_signal(target_signum, 1); + gemu_log(" ---\n"); +} diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c deleted file mode 100644 index 3f44311396..0000000000 --- a/bsd-user/syscall.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * BSD syscalls - * - * Copyright (c) 2003 - 2008 Fabrice Bellard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#include "qemu/osdep.h" -#include "qemu/cutils.h" -#include "qemu/path.h" -#include -#include -#include -#include - -#include "qemu.h" -#include "qemu-common.h" -#include "user/syscall-trace.h" - -//#define DEBUG - -static abi_ulong target_brk; -static abi_ulong target_original_brk; - -static inline abi_long get_errno(abi_long ret) -{ - if (ret == -1) - /* XXX need to translate host -> target errnos here */ - return -(errno); - else - return ret; -} - -#define target_to_host_bitmask(x, tbl) (x) - -static inline int is_error(abi_long ret) -{ - return (abi_ulong)ret >= (abi_ulong)(-4096); -} - -void target_set_brk(abi_ulong new_brk) -{ - target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); -} - -/* do_obreak() must return target errnos. */ -static abi_long do_obreak(abi_ulong new_brk) -{ - abi_ulong brk_page; - abi_long mapped_addr; - int new_alloc_size; - - if (!new_brk) - return 0; - if (new_brk < target_original_brk) - return -TARGET_EINVAL; - - brk_page = HOST_PAGE_ALIGN(target_brk); - - /* If the new brk is less than this, set it and we're done... */ - if (new_brk < brk_page) { - target_brk = new_brk; - return 0; - } - - /* We need to allocate more memory after the brk... */ - new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1); - mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, - PROT_READ|PROT_WRITE, - MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0)); - - if (!is_error(mapped_addr)) - target_brk = new_brk; - else - return mapped_addr; - - return 0; -} - -#if defined(TARGET_I386) -static abi_long do_freebsd_sysarch(CPUX86State *env, int op, abi_ulong parms) -{ - abi_long ret = 0; - abi_ulong val; - int idx; - - switch (op) { -#ifdef TARGET_ABI32 - case TARGET_FREEBSD_I386_SET_GSBASE: - case TARGET_FREEBSD_I386_SET_FSBASE: - if (op == TARGET_FREEBSD_I386_SET_GSBASE) -#else - case TARGET_FREEBSD_AMD64_SET_GSBASE: - case TARGET_FREEBSD_AMD64_SET_FSBASE: - if (op == TARGET_FREEBSD_AMD64_SET_GSBASE) -#endif - idx = R_GS; - else - idx = R_FS; - if (get_user(val, parms, abi_ulong)) - return -TARGET_EFAULT; - cpu_x86_load_seg(env, idx, 0); - env->segs[idx].base = val; - break; -#ifdef TARGET_ABI32 - case TARGET_FREEBSD_I386_GET_GSBASE: - case TARGET_FREEBSD_I386_GET_FSBASE: - if (op == TARGET_FREEBSD_I386_GET_GSBASE) -#else - case TARGET_FREEBSD_AMD64_GET_GSBASE: - case TARGET_FREEBSD_AMD64_GET_FSBASE: - if (op == TARGET_FREEBSD_AMD64_GET_GSBASE) -#endif - idx = R_GS; - else - idx = R_FS; - val = env->segs[idx].base; - if (put_user(val, parms, abi_ulong)) - return -TARGET_EFAULT; - break; - /* XXX handle the others... */ - default: - ret = -TARGET_EINVAL; - break; - } - return ret; -} -#endif - -#ifdef TARGET_SPARC -static abi_long do_freebsd_sysarch(void *env, int op, abi_ulong parms) -{ - /* XXX handle - * TARGET_FREEBSD_SPARC_UTRAP_INSTALL, - * TARGET_FREEBSD_SPARC_SIGTRAMP_INSTALL - */ - return -TARGET_EINVAL; -} -#endif - -#ifdef __FreeBSD__ -/* - * XXX this uses the undocumented oidfmt interface to find the kind of - * a requested sysctl, see /sys/kern/kern_sysctl.c:sysctl_sysctl_oidfmt() - * (this is mostly copied from src/sbin/sysctl/sysctl.c) - */ -static int -oidfmt(int *oid, int len, char *fmt, uint32_t *kind) -{ - int qoid[CTL_MAXNAME+2]; - uint8_t buf[BUFSIZ]; - int i; - size_t j; - - qoid[0] = 0; - qoid[1] = 4; - memcpy(qoid + 2, oid, len * sizeof(int)); - - j = sizeof(buf); - i = sysctl(qoid, len + 2, buf, &j, 0, 0); - if (i) - return i; - - if (kind) - *kind = *(uint32_t *)buf; - - if (fmt) - strcpy(fmt, (char *)(buf + sizeof(uint32_t))); - return (0); -} - -/* - * try and convert sysctl return data for the target. - * XXX doesn't handle CTLTYPE_OPAQUE and CTLTYPE_STRUCT. - */ -static int sysctl_oldcvt(void *holdp, size_t holdlen, uint32_t kind) -{ - switch (kind & CTLTYPE) { - case CTLTYPE_INT: - case CTLTYPE_UINT: - *(uint32_t *)holdp = tswap32(*(uint32_t *)holdp); - break; -#ifdef TARGET_ABI32 - case CTLTYPE_LONG: - case CTLTYPE_ULONG: - *(uint32_t *)holdp = tswap32(*(long *)holdp); - break; -#else - case CTLTYPE_LONG: - *(uint64_t *)holdp = tswap64(*(long *)holdp); - break; - case CTLTYPE_ULONG: - *(uint64_t *)holdp = tswap64(*(unsigned long *)holdp); - break; -#endif -#ifdef CTLTYPE_U64 - case CTLTYPE_S64: - case CTLTYPE_U64: -#else - case CTLTYPE_QUAD: -#endif - *(uint64_t *)holdp = tswap64(*(uint64_t *)holdp); - break; - case CTLTYPE_STRING: - break; - default: - /* XXX unhandled */ - return -1; - } - return 0; -} - -/* XXX this needs to be emulated on non-FreeBSD hosts... */ -static abi_long do_freebsd_sysctl(abi_ulong namep, int32_t namelen, abi_ulong oldp, - abi_ulong oldlenp, abi_ulong newp, abi_ulong newlen) -{ - abi_long ret; - void *hnamep, *holdp, *hnewp = NULL; - size_t holdlen; - abi_ulong oldlen = 0; - int32_t *snamep = g_malloc(sizeof(int32_t) * namelen), *p, *q, i; - uint32_t kind = 0; - - if (oldlenp) - get_user_ual(oldlen, oldlenp); - if (!(hnamep = lock_user(VERIFY_READ, namep, namelen, 1))) - return -TARGET_EFAULT; - if (newp && !(hnewp = lock_user(VERIFY_READ, newp, newlen, 1))) - return -TARGET_EFAULT; - if (!(holdp = lock_user(VERIFY_WRITE, oldp, oldlen, 0))) - return -TARGET_EFAULT; - holdlen = oldlen; - for (p = hnamep, q = snamep, i = 0; i < namelen; p++, i++) - *q++ = tswap32(*p); - oidfmt(snamep, namelen, NULL, &kind); - /* XXX swap hnewp */ - ret = get_errno(sysctl(snamep, namelen, holdp, &holdlen, hnewp, newlen)); - if (!ret) - sysctl_oldcvt(holdp, holdlen, kind); - put_user_ual(holdlen, oldlenp); - unlock_user(hnamep, namep, 0); - unlock_user(holdp, oldp, holdlen); - if (hnewp) - unlock_user(hnewp, newp, 0); - g_free(snamep); - return ret; -} -#endif - -/* FIXME - * lock_iovec()/unlock_iovec() have a return code of 0 for success where - * other lock functions have a return code of 0 for failure. - */ -static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, - int count, int copy) -{ - struct target_iovec *target_vec; - abi_ulong base; - int i; - - target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); - if (!target_vec) - return -TARGET_EFAULT; - for (i = 0;i < count; i++) { - base = tswapl(target_vec[i].iov_base); - vec[i].iov_len = tswapl(target_vec[i].iov_len); - if (vec[i].iov_len != 0) { - vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); - /* Don't check lock_user return value. We must call writev even - if a element has invalid base address. */ - } else { - /* zero length pointer is ignored */ - vec[i].iov_base = NULL; - } - } - unlock_user (target_vec, target_addr, 0); - return 0; -} - -static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, - int count, int copy) -{ - struct target_iovec *target_vec; - abi_ulong base; - int i; - - target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); - if (!target_vec) - return -TARGET_EFAULT; - for (i = 0;i < count; i++) { - if (target_vec[i].iov_base) { - base = tswapl(target_vec[i].iov_base); - unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); - } - } - unlock_user (target_vec, target_addr, 0); - - return 0; -} - -/* do_syscall() should always have a single exit point at the end so - that actions, such as logging of syscall results, can be performed. - All errnos that do_syscall() returns must be -TARGET_. */ -abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, - abi_long arg2, abi_long arg3, abi_long arg4, - abi_long arg5, abi_long arg6, abi_long arg7, - abi_long arg8) -{ - CPUState *cpu = env_cpu(cpu_env); - abi_long ret; - void *p; - -#ifdef DEBUG - gemu_log("freebsd syscall %d\n", num); -#endif - record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); - - if (do_strace) - print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); - - switch (num) { - case TARGET_FREEBSD_NR_exit: -#ifdef CONFIG_GPROF - _mcleanup(); -#endif - gdb_exit(arg1); - qemu_plugin_user_exit(); - /* XXX: should free thread stack and CPU env */ - _exit(arg1); - ret = 0; /* avoid warning */ - break; - case TARGET_FREEBSD_NR_read: - if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) - goto efault; - ret = get_errno(read(arg1, p, arg3)); - unlock_user(p, arg2, ret); - break; - case TARGET_FREEBSD_NR_write: - if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) - goto efault; - ret = get_errno(write(arg1, p, arg3)); - unlock_user(p, arg2, 0); - break; - case TARGET_FREEBSD_NR_writev: - { - int count = arg3; - struct iovec *vec; - - vec = alloca(count * sizeof(struct iovec)); - if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) - goto efault; - ret = get_errno(writev(arg1, vec, count)); - unlock_iovec(vec, arg2, count, 0); - } - break; - case TARGET_FREEBSD_NR_open: - if (!(p = lock_user_string(arg1))) - goto efault; - ret = get_errno(open(path(p), - target_to_host_bitmask(arg2, fcntl_flags_tbl), - arg3)); - unlock_user(p, arg1, 0); - break; - case TARGET_FREEBSD_NR_mmap: - ret = get_errno(target_mmap(arg1, arg2, arg3, - target_to_host_bitmask(arg4, mmap_flags_tbl), - arg5, - arg6)); - break; - case TARGET_FREEBSD_NR_mprotect: - ret = get_errno(target_mprotect(arg1, arg2, arg3)); - break; - case TARGET_FREEBSD_NR_break: - ret = do_obreak(arg1); - break; -#ifdef __FreeBSD__ - case TARGET_FREEBSD_NR___sysctl: - ret = do_freebsd_sysctl(arg1, arg2, arg3, arg4, arg5, arg6); - break; -#endif - case TARGET_FREEBSD_NR_sysarch: - ret = do_freebsd_sysarch(cpu_env, arg1, arg2); - break; - case TARGET_FREEBSD_NR_syscall: - case TARGET_FREEBSD_NR___syscall: - ret = do_freebsd_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,arg7,arg8,0); - break; - default: - ret = get_errno(syscall(num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)); - break; - } - fail: -#ifdef DEBUG - gemu_log(" = %ld\n", ret); -#endif - if (do_strace) - print_freebsd_syscall_ret(num, ret); - - record_syscall_return(cpu, num, ret); - return ret; - efault: - ret = -TARGET_EFAULT; - goto fail; -} - -abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, - abi_long arg2, abi_long arg3, abi_long arg4, - abi_long arg5, abi_long arg6) -{ - CPUState *cpu = env_cpu(cpu_env); - abi_long ret; - void *p; - -#ifdef DEBUG - gemu_log("netbsd syscall %d\n", num); -#endif - - record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); - - if (do_strace) - print_netbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); - - switch (num) { - case TARGET_NETBSD_NR_exit: -#ifdef CONFIG_GPROF - _mcleanup(); -#endif - gdb_exit(arg1); - qemu_plugin_user_exit(); - /* XXX: should free thread stack and CPU env */ - _exit(arg1); - ret = 0; /* avoid warning */ - break; - case TARGET_NETBSD_NR_read: - if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) - goto efault; - ret = get_errno(read(arg1, p, arg3)); - unlock_user(p, arg2, ret); - break; - case TARGET_NETBSD_NR_write: - if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) - goto efault; - ret = get_errno(write(arg1, p, arg3)); - unlock_user(p, arg2, 0); - break; - case TARGET_NETBSD_NR_open: - if (!(p = lock_user_string(arg1))) - goto efault; - ret = get_errno(open(path(p), - target_to_host_bitmask(arg2, fcntl_flags_tbl), - arg3)); - unlock_user(p, arg1, 0); - break; - case TARGET_NETBSD_NR_mmap: - ret = get_errno(target_mmap(arg1, arg2, arg3, - target_to_host_bitmask(arg4, mmap_flags_tbl), - arg5, - arg6)); - break; - case TARGET_NETBSD_NR_mprotect: - ret = get_errno(target_mprotect(arg1, arg2, arg3)); - break; - case TARGET_NETBSD_NR_syscall: - case TARGET_NETBSD_NR___syscall: - ret = do_netbsd_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0); - break; - default: - ret = syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); - break; - } - fail: -#ifdef DEBUG - gemu_log(" = %ld\n", ret); -#endif - if (do_strace) - print_netbsd_syscall_ret(num, ret); - - record_syscall_return(cpu, num, ret); - return ret; - efault: - ret = -TARGET_EFAULT; - goto fail; -} - -abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, - abi_long arg2, abi_long arg3, abi_long arg4, - abi_long arg5, abi_long arg6) -{ - CPUState *cpu = env_cpu(cpu_env); - abi_long ret; - void *p; - -#ifdef DEBUG - gemu_log("openbsd syscall %d\n", num); -#endif - - record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); - - if (do_strace) - print_openbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); - - switch (num) { - case TARGET_OPENBSD_NR_exit: -#ifdef CONFIG_GPROF - _mcleanup(); -#endif - gdb_exit(arg1); - qemu_plugin_user_exit(); - /* XXX: should free thread stack and CPU env */ - _exit(arg1); - ret = 0; /* avoid warning */ - break; - case TARGET_OPENBSD_NR_read: - if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) - goto efault; - ret = get_errno(read(arg1, p, arg3)); - unlock_user(p, arg2, ret); - break; - case TARGET_OPENBSD_NR_write: - if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) - goto efault; - ret = get_errno(write(arg1, p, arg3)); - unlock_user(p, arg2, 0); - break; - case TARGET_OPENBSD_NR_open: - if (!(p = lock_user_string(arg1))) - goto efault; - ret = get_errno(open(path(p), - target_to_host_bitmask(arg2, fcntl_flags_tbl), - arg3)); - unlock_user(p, arg1, 0); - break; - case TARGET_OPENBSD_NR_mmap: - ret = get_errno(target_mmap(arg1, arg2, arg3, - target_to_host_bitmask(arg4, mmap_flags_tbl), - arg5, - arg6)); - break; - case TARGET_OPENBSD_NR_mprotect: - ret = get_errno(target_mprotect(arg1, arg2, arg3)); - break; - case TARGET_OPENBSD_NR_syscall: - case TARGET_OPENBSD_NR___syscall: - ret = do_openbsd_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0); - break; - default: - ret = syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); - break; - } - fail: -#ifdef DEBUG - gemu_log(" = %ld\n", ret); -#endif - if (do_strace) - print_openbsd_syscall_ret(num, ret); - - record_syscall_return(cpu, num, ret); - return ret; - efault: - ret = -TARGET_EFAULT; - goto fail; -} - -void syscall_init(void) -{ -} diff --git a/bsd-user/syscall_defs.h b/bsd-user/syscall_defs.h index 207ddeecbf..b6d113d24a 100644 --- a/bsd-user/syscall_defs.h +++ b/bsd-user/syscall_defs.h @@ -1,105 +1,27 @@ -/* $OpenBSD: signal.h,v 1.19 2006/01/08 14:20:16 millert Exp $ */ -/* $NetBSD: signal.h,v 1.21 1996/02/09 18:25:32 christos Exp $ */ - /* - * Copyright (c) 1982, 1986, 1989, 1991, 1993 - * The Regents of the University of California. All rights reserved. - * (c) UNIX System Laboratories, Inc. - * All or some portions of this file are derived from material licensed - * to the University of California by American Telephone and Telegraph - * Co. or Unix System Laboratories, Inc. and are reproduced herein with - * the permission of UNIX System Laboratories, Inc. + * System call related declarations * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. + * Copyright (c) 2013-15 Stacey D. Son (sson at FreeBSD) * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * - * @(#)signal.h 8.2 (Berkeley) 1/21/94 + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . */ -#define TARGET_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ +#ifndef SYSCALL_DEFS_H +#define SYSCALL_DEFS_H -#define TARGET_SIGHUP 1 /* hangup */ -#define TARGET_SIGINT 2 /* interrupt */ -#define TARGET_SIGQUIT 3 /* quit */ -#define TARGET_SIGILL 4 /* illegal instruction (not reset when caught) */ -#define TARGET_SIGTRAP 5 /* trace trap (not reset when caught) */ -#define TARGET_SIGABRT 6 /* abort() */ -#define TARGET_SIGIOT SIGABRT /* compatibility */ -#define TARGET_SIGEMT 7 /* EMT instruction */ -#define TARGET_SIGFPE 8 /* floating point exception */ -#define TARGET_SIGKILL 9 /* kill (cannot be caught or ignored) */ -#define TARGET_SIGBUS 10 /* bus error */ -#define TARGET_SIGSEGV 11 /* segmentation violation */ -#define TARGET_SIGSYS 12 /* bad argument to system call */ -#define TARGET_SIGPIPE 13 /* write on a pipe with no one to read it */ -#define TARGET_SIGALRM 14 /* alarm clock */ -#define TARGET_SIGTERM 15 /* software termination signal from kill */ -#define TARGET_SIGURG 16 /* urgent condition on IO channel */ -#define TARGET_SIGSTOP 17 /* sendable stop signal not from tty */ -#define TARGET_SIGTSTP 18 /* stop signal from tty */ -#define TARGET_SIGCONT 19 /* continue a stopped process */ -#define TARGET_SIGCHLD 20 /* to parent on child stop or exit */ -#define TARGET_SIGTTIN 21 /* to readers pgrp upon background tty read */ -#define TARGET_SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */ -#define TARGET_SIGIO 23 /* input/output possible signal */ -#define TARGET_SIGXCPU 24 /* exceeded CPU time limit */ -#define TARGET_SIGXFSZ 25 /* exceeded file size limit */ -#define TARGET_SIGVTALRM 26 /* virtual time alarm */ -#define TARGET_SIGPROF 27 /* profiling time alarm */ -#define TARGET_SIGWINCH 28 /* window size changes */ -#define TARGET_SIGINFO 29 /* information request */ -#define TARGET_SIGUSR1 30 /* user defined signal 1 */ -#define TARGET_SIGUSR2 31 /* user defined signal 2 */ - -/* - * Language spec says we must list exactly one parameter, even though we - * actually supply three. Ugh! - */ -#define TARGET_SIG_DFL (void (*)(int))0 -#define TARGET_SIG_IGN (void (*)(int))1 -#define TARGET_SIG_ERR (void (*)(int))-1 - -#define TARGET_SA_ONSTACK 0x0001 /* take signal on signal stack */ -#define TARGET_SA_RESTART 0x0002 /* restart system on signal return */ -#define TARGET_SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ -#define TARGET_SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ -#define TARGET_SA_NOCLDWAIT 0x0020 /* don't create zombies (assign to pid 1) */ -#define TARGET_SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ -#define TARGET_SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ -#define TARGET_SA_SIGINFO 0x0040 /* generate siginfo_t */ - -/* - * Flags for sigprocmask: - */ -#define TARGET_SIG_BLOCK 1 /* block specified signal set */ -#define TARGET_SIG_UNBLOCK 2 /* unblock specified signal set */ -#define TARGET_SIG_SETMASK 3 /* set specified signal set */ - -#define TARGET_BADSIG SIG_ERR - -#define TARGET_SS_ONSTACK 0x0001 /* take signals on alternate stack */ -#define TARGET_SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ +#include +#include #include "errno_defs.h" @@ -107,8 +29,205 @@ #include "netbsd/syscall_nr.h" #include "openbsd/syscall_nr.h" +/* + * machine/_types.h + * or x86/_types.h + */ + +/* + * time_t seems to be very inconsistly defined for the different *BSD's... + * + * FreeBSD uses a 64bits time_t except on i386 + * so we have to add a special case here. + * + * On NetBSD time_t is always defined as an int64_t. On OpenBSD time_t + * is always defined as an int. + * + */ +#if (!defined(TARGET_I386)) +typedef int64_t target_freebsd_time_t; +#else +typedef int32_t target_freebsd_time_t; +#endif + struct target_iovec { abi_long iov_base; /* Starting address */ abi_long iov_len; /* Number of bytes */ }; +/* + * sys/mman.h + */ +#define TARGET_FREEBSD_MAP_RESERVED0080 0x0080 /* previously misimplemented */ + /* MAP_INHERIT */ +#define TARGET_FREEBSD_MAP_RESERVED0100 0x0100 /* previously unimplemented */ + /* MAP_NOEXTEND */ +#define TARGET_FREEBSD_MAP_STACK 0x0400 /* region grows down, like a */ + /* stack */ +#define TARGET_FREEBSD_MAP_NOSYNC 0x0800 /* page to but do not sync */ + /* underlying file */ + +#define TARGET_FREEBSD_MAP_FLAGMASK 0x1ff7 + +#define TARGET_NETBSD_MAP_INHERIT 0x0080 /* region is retained after */ + /* exec */ +#define TARGET_NETBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, even */ + /* within break */ +#define TARGET_NETBSD_MAP_WIRED 0x0800 /* mlock() mapping when it is */ + /* established */ + +#define TARGET_NETBSD_MAP_STACK 0x2000 /* allocated from memory, */ + /* swap space (stack) */ + +#define TARGET_NETBSD_MAP_FLAGMASK 0x3ff7 + +#define TARGET_OPENBSD_MAP_INHERIT 0x0080 /* region is retained after */ + /* exec */ +#define TARGET_OPENBSD_MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change */ + /* file size */ +#define TARGET_OPENBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, */ + /* even within heap */ + +#define TARGET_OPENBSD_MAP_FLAGMASK 0x17f7 + +/* XXX */ +#define TARGET_BSD_MAP_FLAGMASK 0x3ff7 + +/* + * sys/time.h + * sys/timex.h + */ + +typedef abi_long target_freebsd_suseconds_t; + +/* compare to sys/timespec.h */ +struct target_freebsd_timespec { + target_freebsd_time_t tv_sec; /* seconds */ + abi_long tv_nsec; /* and nanoseconds */ +#if !defined(TARGET_I386) && TARGET_ABI_BITS == 32 + abi_long _pad; +#endif +}; + +#define TARGET_CPUCLOCK_WHICH_PID 0 +#define TARGET_CPUCLOCK_WHICH_TID 1 + +/* sys/umtx.h */ +struct target_freebsd__umtx_time { + struct target_freebsd_timespec _timeout; + uint32_t _flags; + uint32_t _clockid; +}; + +struct target_freebsd_timeval { + target_freebsd_time_t tv_sec; /* seconds */ + target_freebsd_suseconds_t tv_usec;/* and microseconds */ +#if !defined(TARGET_I386) && TARGET_ABI_BITS == 32 + abi_long _pad; +#endif +}; + +/* + * sys/resource.h + */ +#if defined(__FreeBSD__) +#define TARGET_RLIM_INFINITY RLIM_INFINITY +#else +#define TARGET_RLIM_INFINITY ((abi_ulong)-1) +#endif + +#define TARGET_RLIMIT_CPU 0 +#define TARGET_RLIMIT_FSIZE 1 +#define TARGET_RLIMIT_DATA 2 +#define TARGET_RLIMIT_STACK 3 +#define TARGET_RLIMIT_CORE 4 +#define TARGET_RLIMIT_RSS 5 +#define TARGET_RLIMIT_MEMLOCK 6 +#define TARGET_RLIMIT_NPROC 7 +#define TARGET_RLIMIT_NOFILE 8 +#define TARGET_RLIMIT_SBSIZE 9 +#define TARGET_RLIMIT_AS 10 +#define TARGET_RLIMIT_NPTS 11 +#define TARGET_RLIMIT_SWAP 12 + +struct target_rlimit { + uint64_t rlim_cur; + uint64_t rlim_max; +}; + +struct target_freebsd_rusage { + struct target_freebsd_timeval ru_utime; /* user time used */ + struct target_freebsd_timeval ru_stime; /* system time used */ + abi_long ru_maxrss; /* maximum resident set size */ + abi_long ru_ixrss; /* integral shared memory size */ + abi_long ru_idrss; /* integral unshared data size */ + abi_long ru_isrss; /* integral unshared stack size */ + abi_long ru_minflt; /* page reclaims */ + abi_long ru_majflt; /* page faults */ + abi_long ru_nswap; /* swaps */ + abi_long ru_inblock; /* block input operations */ + abi_long ru_oublock; /* block output operations */ + abi_long ru_msgsnd; /* messages sent */ + abi_long ru_msgrcv; /* messages received */ + abi_long ru_nsignals; /* signals received */ + abi_long ru_nvcsw; /* voluntary context switches */ + abi_long ru_nivcsw; /* involuntary context switches */ +}; + +struct target_freebsd__wrusage { + struct target_freebsd_rusage wru_self; + struct target_freebsd_rusage wru_children; +}; + +#define safe_syscall0(type, name) \ +type safe_##name(void) \ +{ \ + return safe_syscall(SYS_##name); \ +} + +#define safe_syscall1(type, name, type1, arg1) \ +type safe_##name(type1 arg1) \ +{ \ + return safe_syscall(SYS_##name, arg1); \ +} + +#define safe_syscall2(type, name, type1, arg1, type2, arg2) \ +type safe_##name(type1 arg1, type2 arg2) \ +{ \ + return safe_syscall(SYS_##name, arg1, arg2); \ +} + +#define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ +type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ +{ \ + return safe_syscall(SYS_##name, arg1, arg2, arg3); \ +} + +#define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4) \ +type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + return safe_syscall(SYS_##name, arg1, arg2, arg3, arg4); \ +} + +#define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5) \ +type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ + type5 arg5) \ +{ \ + return safe_syscall(SYS_##name, arg1, arg2, arg3, arg4, arg5); \ +} + +#define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6) \ +type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ + type5 arg5, type6 arg6) \ +{ \ + return safe_syscall(SYS_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ +} + +/* So far all target and host bitmasks are the same */ +#define target_to_host_bitmask(x, tbl) (x) +#define host_to_target_bitmask(x, tbl) (x) + +#endif /* SYSCALL_DEFS_H */ diff --git a/bsd-user/trace-events b/bsd-user/trace-events new file mode 100644 index 0000000000..843896f627 --- /dev/null +++ b/bsd-user/trace-events @@ -0,0 +1,11 @@ +# See docs/tracing.txt for syntax documentation. + +# bsd-user/signal.c +user_setup_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 +user_setup_rt_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 +user_do_rt_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 +user_do_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 +user_dump_core_and_abort(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)" +user_handle_signal(void *env, int target_sig) "env=%p signal %d" +user_host_signal(void *env, int host_sig, int target_sig) "env=%p signal %d (target %d(" +user_queue_signal(void *env, int target_sig) "env=%p signal %d" diff --git a/bsd-user/trace.h b/bsd-user/trace.h new file mode 100644 index 0000000000..593c0204ad --- /dev/null +++ b/bsd-user/trace.h @@ -0,0 +1 @@ +#include "trace/trace-bsd_user.h" diff --git a/bsd-user/x86_64/signal.c b/bsd-user/x86_64/signal.c new file mode 100644 index 0000000000..c3875bc4c6 --- /dev/null +++ b/bsd-user/x86_64/signal.c @@ -0,0 +1,68 @@ +/* + * x86_64 signal definitions + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu.h" + +/* + * Compare to amd64/amd64/machdep.c sendsig() + * Assumes that target stack frame memory is locked. + */ +abi_long set_sigtramp_args(CPUX86State *regs, + int sig, struct target_sigframe *frame, abi_ulong frame_addr, + struct target_sigaction *ka) +{ + /* XXX return -TARGET_EOPNOTSUPP; */ + return 0; +} + +/* + * Compare to amd64/amd64/exec_machdep.c sendsig() + * Assumes that the memory is locked if frame points to user memory. + */ +abi_long setup_sigframe_arch(CPUX86State *env, abi_ulong frame_addr, + struct target_sigframe *frame, int flags) +{ + target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext; + + get_mcontext(env, mcp, flags); + return 0; +} + +/* Compare to amd64/amd64/machdep.c get_mcontext() */ +abi_long get_mcontext(CPUX86State *regs, + target_mcontext_t *mcp, int flags) +{ + /* XXX */ + return -TARGET_EOPNOTSUPP; +} + +/* Compare to amd64/amd64/machdep.c set_mcontext() */ +abi_long set_mcontext(CPUX86State *regs, + target_mcontext_t *mcp, int srflag) +{ + /* XXX */ + return -TARGET_EOPNOTSUPP; +} + +abi_long get_ucontext_sigreturn(CPUX86State *regs, + abi_ulong target_sf, abi_ulong *target_uc) +{ + /* XXX */ + *target_uc = 0; + return -TARGET_EOPNOTSUPP; +} diff --git a/bsd-user/x86_64/target.h b/bsd-user/x86_64/target.h new file mode 100644 index 0000000000..0cf0e2a14a --- /dev/null +++ b/bsd-user/x86_64/target.h @@ -0,0 +1,21 @@ +/* + * Intel general target stuff that's common to all x86_64 details + * + * Copyright (c) 2022 M. Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_H +#define TARGET_H + +/* + * x86 doesn't 'lump' the registers for 64-bit args, all args are 64 bits. + */ +static inline bool regpairs_aligned(void *cpu_env) +{ + return false; +} + +#endif /* TARGET_H */ + diff --git a/bsd-user/x86_64/target_arch.h b/bsd-user/x86_64/target_arch.h new file mode 100644 index 0000000000..09bd974889 --- /dev/null +++ b/bsd-user/x86_64/target_arch.h @@ -0,0 +1,31 @@ +/* + * Intel x86_64 specific prototypes for bsd-user + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_H +#define TARGET_ARCH_H + +/* target_arch_cpu.c */ +void bsd_x86_64_write_dt(void *ptr, unsigned long addr, unsigned long limit, + int flags); +void bsd_x86_64_set_idt(int n, unsigned int dpl); +void bsd_x86_64_set_idt_base(uint64_t base); + +#define target_cpu_set_tls(env, newtls) + +#endif /* TARGET_ARCH_H */ diff --git a/bsd-user/x86_64/target_arch_cpu.c b/bsd-user/x86_64/target_arch_cpu.c new file mode 100644 index 0000000000..be7bd10720 --- /dev/null +++ b/bsd-user/x86_64/target_arch_cpu.c @@ -0,0 +1,71 @@ +/* + * x86_64 cpu related code + * + * Copyright (c) 2013 Stacey Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu.h" +#include "qemu/timer.h" + +#include "target_arch.h" + +static uint64_t *idt_table; + +uint64_t cpu_get_tsc(CPUX86State *env) +{ + return cpu_get_host_ticks(); +} + +void bsd_x86_64_write_dt(void *ptr, unsigned long addr, + unsigned long limit, int flags) +{ + unsigned int e1, e2; + uint32_t *p; + e1 = (addr << 16) | (limit & 0xffff); + e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); + e2 |= flags; + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); +} + +static void set_gate64(void *ptr, unsigned int type, unsigned int dpl, + uint64_t addr, unsigned int sel) +{ + uint32_t *p, e1, e2; + e1 = (addr & 0xffff) | (sel << 16); + e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); + p[2] = tswap32(addr >> 32); + p[3] = 0; +} + +/* only dpl matters as we do only user space emulation */ +void bsd_x86_64_set_idt(int n, unsigned int dpl) +{ + set_gate64(idt_table + n * 2, 0, dpl, 0, 0); +} + +void bsd_x86_64_set_idt_base(uint64_t base) +{ + idt_table = g2h_untagged(base); +} diff --git a/bsd-user/x86_64/target_arch_cpu.h b/bsd-user/x86_64/target_arch_cpu.h new file mode 100644 index 0000000000..4094d61da1 --- /dev/null +++ b/bsd-user/x86_64/target_arch_cpu.h @@ -0,0 +1,177 @@ +/* + * x86_64 cpu init and loop + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_CPU_H +#define TARGET_ARCH_CPU_H + +#include "target_arch.h" +#include "signal-common.h" + +#define TARGET_DEFAULT_CPU_MODEL "qemu64" + +static inline void target_cpu_init(CPUX86State *env, + struct target_pt_regs *regs) +{ + uint64_t *gdt_table; + + env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; + env->hflags |= HF_PE_MASK | HF_CPL_MASK; + if (env->features[FEAT_1_EDX] & CPUID_SSE) { + env->cr[4] |= CR4_OSFXSR_MASK; + env->hflags |= HF_OSFXSR_MASK; + } + + /* enable 64 bit mode if possible */ + if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { + fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n"); + exit(1); + } + env->cr[4] |= CR4_PAE_MASK; + env->efer |= MSR_EFER_LMA | MSR_EFER_LME; + env->hflags |= HF_LMA_MASK; + + /* flags setup : we activate the IRQs by default as in user mode */ + env->eflags |= IF_MASK; + + /* register setup */ + env->regs[R_EAX] = regs->rax; + env->regs[R_EBX] = regs->rbx; + env->regs[R_ECX] = regs->rcx; + env->regs[R_EDX] = regs->rdx; + env->regs[R_ESI] = regs->rsi; + env->regs[R_EDI] = regs->rdi; + env->regs[R_EBP] = regs->rbp; + env->regs[R_ESP] = regs->rsp; + env->eip = regs->rip; + + /* interrupt setup */ + env->idt.limit = 511; + + env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), + PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + bsd_x86_64_set_idt_base(env->idt.base); + bsd_x86_64_set_idt(0, 0); + bsd_x86_64_set_idt(1, 0); + bsd_x86_64_set_idt(2, 0); + bsd_x86_64_set_idt(3, 3); + bsd_x86_64_set_idt(4, 3); + bsd_x86_64_set_idt(5, 0); + bsd_x86_64_set_idt(6, 0); + bsd_x86_64_set_idt(7, 0); + bsd_x86_64_set_idt(8, 0); + bsd_x86_64_set_idt(9, 0); + bsd_x86_64_set_idt(10, 0); + bsd_x86_64_set_idt(11, 0); + bsd_x86_64_set_idt(12, 0); + bsd_x86_64_set_idt(13, 0); + bsd_x86_64_set_idt(14, 0); + bsd_x86_64_set_idt(15, 0); + bsd_x86_64_set_idt(16, 0); + bsd_x86_64_set_idt(17, 0); + bsd_x86_64_set_idt(18, 0); + bsd_x86_64_set_idt(19, 0); + bsd_x86_64_set_idt(0x80, 3); + + /* segment setup */ + env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, + PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; + gdt_table = g2h_untagged(env->gdt.base); + + /* 64 bit code segment */ + bsd_x86_64_write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | DESC_L_MASK + | (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); + + bsd_x86_64_write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); + + cpu_x86_load_seg(env, R_CS, __USER_CS); + cpu_x86_load_seg(env, R_SS, __USER_DS); + cpu_x86_load_seg(env, R_DS, 0); + cpu_x86_load_seg(env, R_ES, 0); + cpu_x86_load_seg(env, R_FS, 0); + cpu_x86_load_seg(env, R_GS, 0); +} + +static inline void target_cpu_loop(CPUX86State *env) +{ + CPUState *cs = env_cpu(env); + int trapnr; + abi_ulong pc; + /* target_siginfo_t info; */ + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + switch (trapnr) { + case EXCP_SYSCALL: + /* syscall from syscall instruction */ + env->regs[R_EAX] = do_freebsd_syscall(env, + env->regs[R_EAX], + env->regs[R_EDI], + env->regs[R_ESI], + env->regs[R_EDX], + env->regs[R_ECX], + env->regs[8], + env->regs[9], 0, 0); + env->eip = env->exception_next_eip; + if (((abi_ulong)env->regs[R_EAX]) >= (abi_ulong)(-515)) { + env->regs[R_EAX] = -env->regs[R_EAX]; + env->eflags |= CC_C; + } else { + env->eflags &= ~CC_C; + } + break; + + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + + default: + pc = env->segs[R_CS].base + env->eip; + fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - " + "aborting\n", (long)pc, trapnr); + abort(); + } + process_pending_signals(env); + } +} + +static inline void target_cpu_clone_regs(CPUX86State *env, target_ulong newsp) +{ + if (newsp) { + env->regs[R_ESP] = newsp; + } + env->regs[R_EAX] = 0; +} + +static inline void target_cpu_reset(CPUArchState *env) +{ + cpu_reset(env_cpu(env)); +} + +#endif /* TARGET_ARCH_CPU_H */ diff --git a/bsd-user/x86_64/target_arch_elf.h b/bsd-user/x86_64/target_arch_elf.h new file mode 100644 index 0000000000..b244711888 --- /dev/null +++ b/bsd-user/x86_64/target_arch_elf.h @@ -0,0 +1,36 @@ +/* + * x86_64 ELF definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_ELF_H +#define TARGET_ARCH_ELF_H + +#define ELF_START_MMAP 0x2aaaaab000ULL +#define ELF_ET_DYN_LOAD_ADDR 0x01021000 +#define elf_check_arch(x) (((x) == ELF_ARCH)) + +#define ELF_HWCAP 0 /* FreeBSD doesn't do AT_HWCAP{,2} on x86 */ + +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_X86_64 + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#endif /* TARGET_ARCH_ELF_H */ diff --git a/bsd-user/x86_64/target_arch_reg.h b/bsd-user/x86_64/target_arch_reg.h new file mode 100644 index 0000000000..7a766de918 --- /dev/null +++ b/bsd-user/x86_64/target_arch_reg.h @@ -0,0 +1,92 @@ +/* + * FreeBSD amd64 register structures + * + * Copyright (c) 2015 Stacey Son + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_REG_H +#define TARGET_ARCH_REG_H + +/* See sys/amd64/include/reg.h */ +typedef struct target_reg { + uint64_t r_r15; + uint64_t r_r14; + uint64_t r_r13; + uint64_t r_r12; + uint64_t r_r11; + uint64_t r_r10; + uint64_t r_r9; + uint64_t r_r8; + uint64_t r_rdi; + uint64_t r_rsi; + uint64_t r_rbp; + uint64_t r_rbx; + uint64_t r_rdx; + uint64_t r_rcx; + uint64_t r_rax; + uint32_t r_trapno; + uint16_t r_fs; + uint16_t r_gs; + uint32_t r_err; + uint16_t r_es; + uint16_t r_ds; + uint64_t r_rip; + uint64_t r_cs; + uint64_t r_rflags; + uint64_t r_rsp; + uint64_t r_ss; +} target_reg_t; + +typedef struct target_fpreg { + uint64_t fpr_env[4]; + uint8_t fpr_acc[8][16]; + uint8_t fpr_xacc[16][16]; + uint64_t fpr_spare[12]; +} target_fpreg_t; + +static inline void target_copy_regs(target_reg_t *regs, const CPUX86State *env) +{ + + regs->r_r15 = env->regs[15]; + regs->r_r14 = env->regs[14]; + regs->r_r13 = env->regs[13]; + regs->r_r12 = env->regs[12]; + regs->r_r11 = env->regs[11]; + regs->r_r10 = env->regs[10]; + regs->r_r9 = env->regs[9]; + regs->r_r8 = env->regs[8]; + regs->r_rdi = env->regs[R_EDI]; + regs->r_rsi = env->regs[R_ESI]; + regs->r_rbp = env->regs[R_EBP]; + regs->r_rbx = env->regs[R_EBX]; + regs->r_rdx = env->regs[R_EDX]; + regs->r_rcx = env->regs[R_ECX]; + regs->r_rax = env->regs[R_EAX]; + /* regs->r_trapno = env->regs[R_TRAPNO]; XXX */ + regs->r_fs = env->segs[R_FS].selector & 0xffff; + regs->r_gs = env->segs[R_GS].selector & 0xffff; + regs->r_err = env->error_code; /* XXX ? */ + regs->r_es = env->segs[R_ES].selector & 0xffff; + regs->r_ds = env->segs[R_DS].selector & 0xffff; + regs->r_rip = env->eip; + regs->r_cs = env->segs[R_CS].selector & 0xffff; + regs->r_rflags = env->eflags; + regs->r_rsp = env->regs[R_ESP]; + regs->r_ss = env->segs[R_SS].selector & 0xffff; +} + +#endif /* TARGET_ARCH_REG_H */ diff --git a/bsd-user/x86_64/target_arch_signal.h b/bsd-user/x86_64/target_arch_signal.h new file mode 100644 index 0000000000..ca24bf1e7f --- /dev/null +++ b/bsd-user/x86_64/target_arch_signal.h @@ -0,0 +1,100 @@ +/* + * x86_64 signal definitions + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGNAL_H +#define TARGET_ARCH_SIGNAL_H + +#include "cpu.h" + +/* Size of the signal trampolin code placed on the stack. */ +#define TARGET_SZSIGCODE 0 + +/* compare to x86/include/_limits.h */ +#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */ +#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */ + +typedef struct target_mcontext { + abi_ulong mc_onstack; /* XXX - sigcontext compat. */ + abi_ulong mc_rdi; /* machine state (struct trapframe) */ + abi_ulong mc_rsi; + abi_ulong mc_rdx; + abi_ulong mc_rcx; + abi_ulong mc_r8; + abi_ulong mc_r9; + abi_ulong mc_rax; + abi_ulong mc_rbx; + abi_ulong mc_rbp; + abi_ulong mc_r10; + abi_ulong mc_r11; + abi_ulong mc_r12; + abi_ulong mc_r13; + abi_ulong mc_r14; + abi_ulong mc_r15; + uint32_t mc_trapno; + uint16_t mc_fs; + uint16_t mc_gs; + abi_ulong mc_addr; + uint32_t mc_flags; + uint16_t mc_es; + uint16_t mc_ds; + abi_ulong mc_err; + abi_ulong mc_rip; + abi_ulong mc_cs; + abi_ulong mc_rflags; + abi_ulong mc_rsp; + abi_ulong mc_ss; + + abi_long mc_len; /* sizeof(mcontext_t) */ + +#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */ +#define _MC_FPFMT_XMM 0x10002 + abi_long mc_fpformat; +#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */ +#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */ +#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */ + abi_long mc_ownedfp; + /* + * See for the internals of mc_fpstate[]. + */ + abi_long mc_fpstate[64] __aligned(16); + + abi_ulong mc_fsbase; + abi_ulong mc_gsbase; + + abi_ulong mc_xfpustate; + abi_ulong mc_xfpustate_len; + + abi_long mc_spare[4]; +} target_mcontext_t; + +#define TARGET_MCONTEXT_SIZE 800 +#define TARGET_UCONTEXT_SIZE 880 + +#include "target_os_ucontext.h" + +struct target_sigframe { + abi_ulong sf_signum; + abi_ulong sf_siginfo; /* code or pointer to sf_si */ + abi_ulong sf_ucontext; /* points to sf_uc */ + abi_ulong sf_addr; /* undocumented 4th arg */ + target_ucontext_t sf_uc; /* = *sf_uncontext */ + target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/ + uint32_t __spare__[2]; +}; + +#endif /* TARGET_ARCH_SIGNAL_H */ diff --git a/bsd-user/x86_64/target_arch_sigtramp.h b/bsd-user/x86_64/target_arch_sigtramp.h new file mode 100644 index 0000000000..01da614098 --- /dev/null +++ b/bsd-user/x86_64/target_arch_sigtramp.h @@ -0,0 +1,29 @@ +/* + * Intel x86_64 sigcode for bsd-user + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGTRAMP_H +#define TARGET_ARCH_SIGTRAMP_H + +static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc, + unsigned sys_sigreturn) +{ + + return 0; +} +#endif /* TARGET_ARCH_SIGTRAMP_H */ diff --git a/bsd-user/x86_64/target_arch_sysarch.h b/bsd-user/x86_64/target_arch_sysarch.h index 5c36fc0752..152cb8bcb8 100644 --- a/bsd-user/x86_64/target_arch_sysarch.h +++ b/bsd-user/x86_64/target_arch_sysarch.h @@ -16,8 +16,8 @@ * along with this program; if not, see . */ -#ifndef BSD_USER_ARCH_SYSARCH_H_ -#define BSD_USER_ARCH_SYSARCH_H_ +#ifndef TARGET_ARCH_SYSARCH_H +#define TARGET_ARCH_SYSARCH_H #include "target_syscall.h" @@ -73,4 +73,4 @@ static inline void do_freebsd_arch_print_sysarch( TARGET_ABI_FMT_lx ")", name->name, (int)arg1, arg2, arg3, arg4); } -#endif /*! BSD_USER_ARCH_SYSARCH_H_ */ +#endif /* TARGET_ARCH_SYSARCH_H */ diff --git a/bsd-user/x86_64/target_arch_thread.h b/bsd-user/x86_64/target_arch_thread.h new file mode 100644 index 0000000000..52c28906d6 --- /dev/null +++ b/bsd-user/x86_64/target_arch_thread.h @@ -0,0 +1,39 @@ +/* + * x86_64 thread support + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_THREAD_H +#define TARGET_ARCH_THREAD_H + +/* Compare to vm_machdep.c cpu_set_upcall_kse() */ +static inline void target_thread_set_upcall(CPUX86State *regs, abi_ulong entry, + abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size) +{ + /* XXX */ +} + +static inline void target_thread_init(struct target_pt_regs *regs, + struct image_info *infop) +{ + regs->rax = 0; + regs->rsp = infop->start_stack; + regs->rip = infop->entry; + regs->rdi = infop->start_stack; +} + +#endif /* TARGET_ARCH_THREAD_H */ diff --git a/bsd-user/x86_64/target_arch_vmparam.h b/bsd-user/x86_64/target_arch_vmparam.h new file mode 100644 index 0000000000..6797623a6b --- /dev/null +++ b/bsd-user/x86_64/target_arch_vmparam.h @@ -0,0 +1,47 @@ +/* + * Intel x86_64 VM parameters definitions + * + * Copyright (c) 2013 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_VMPARAM_H +#define TARGET_ARCH_VMPARAM_H + +#include "cpu.h" + +/* compare to amd64/include/vmparam.h */ +#define TARGET_MAXTSIZ (128 * MiB) /* max text size */ +#define TARGET_DFLDSIZ (32 * GiB) /* initial data size limit */ +#define TARGET_MAXDSIZ (32 * GiB) /* max data size */ +#define TARGET_DFLSSIZ (8 * MiB) /* initial stack size limit */ +#define TARGET_MAXSSIZ (512 * MiB) /* max stack size */ +#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */ + +#define TARGET_VM_MAXUSER_ADDRESS (0x00007fffff000000UL) + +#define TARGET_USRSTACK (TARGET_VM_MAXUSER_ADDRESS - TARGET_PAGE_SIZE) + +static inline abi_ulong get_sp_from_cpustate(CPUX86State *state) +{ + return state->regs[R_ESP]; +} + +static inline void set_second_rval(CPUX86State *state, abi_ulong retval2) +{ + state->regs[R_EDX] = retval2; +} + +#endif /* TARGET_ARCH_VMPARAM_H */ diff --git a/capstone b/capstone deleted file mode 160000 index f8b1b83301..0000000000 --- a/capstone +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f8b1b833015a4ae47110ed068e0deb7106ced66d diff --git a/chardev/baum.c b/chardev/baum.c index 79d618e350..0a0d12661a 100644 --- a/chardev/baum.c +++ b/chardev/baum.c @@ -87,6 +87,9 @@ #define BUF_SIZE 256 +#define X_MAX 84 +#define Y_MAX 1 + struct BaumChardev { Chardev parent; @@ -244,11 +247,11 @@ static int baum_deferred_init(BaumChardev *baum) brlapi_perror("baum: brlapi__getDisplaySize"); return 0; } - if (baum->y > 1) { - baum->y = 1; + if (baum->y > Y_MAX) { + baum->y = Y_MAX; } - if (baum->x > 84) { - baum->x = 84; + if (baum->x > X_MAX) { + baum->x = X_MAX; } con = qemu_console_lookup_by_index(0); @@ -296,7 +299,8 @@ static void baum_chr_accept_input(struct Chardev *chr) static void baum_write_packet(BaumChardev *baum, const uint8_t *buf, int len) { Chardev *chr = CHARDEV(baum); - uint8_t io_buf[1 + 2 * len], *cur = io_buf; + g_autofree uint8_t *io_buf = g_malloc(1 + 2 * len); + uint8_t *cur = io_buf; int room; *cur++ = ESC; while (len--) @@ -380,9 +384,9 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len) switch (req) { case BAUM_REQ_DisplayData: { - uint8_t cells[baum->x * baum->y], c; - uint8_t text[baum->x * baum->y]; - uint8_t zero[baum->x * baum->y]; + uint8_t cells[X_MAX * Y_MAX], c; + uint8_t text[X_MAX * Y_MAX]; + uint8_t zero[X_MAX * Y_MAX]; int cursor = BRLAPI_CURSOR_OFF; int i; @@ -405,7 +409,7 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len) } timer_del(baum->cellCount_timer); - memset(zero, 0, sizeof(zero)); + memset(zero, 0, baum->x * baum->y); brlapi_writeArguments_t wa = { .displayNumber = BRLAPI_DISPLAY_DEFAULT, diff --git a/chardev/char-fd.c b/chardev/char-fd.c index 93c56913b4..cf78454841 100644 --- a/chardev/char-fd.c +++ b/chardev/char-fd.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/module.h" #include "qemu/sockets.h" #include "qapi/error.h" @@ -213,8 +212,8 @@ void qemu_chr_open_fd(Chardev *chr, FDChardev *s = FD_CHARDEV(chr); g_autofree char *name = NULL; - if (fd_out >= 0) { - qemu_set_nonblock(fd_out); + if (fd_out >= 0 && !g_unix_set_fd_nonblocking(fd_out, true, NULL)) { + assert(!"Failed to set FD nonblocking"); } if (fd_out == fd_in && fd_in >= 0) { diff --git a/chardev/char-io.c b/chardev/char-io.c index 8ced184160..4451128cba 100644 --- a/chardev/char-io.c +++ b/chardev/char-io.c @@ -122,7 +122,7 @@ int io_channel_send_full(QIOChannel *ioc, ret = qio_channel_writev_full( ioc, &iov, 1, - fds, nfds, NULL); + fds, nfds, 0, NULL); if (ret == QIO_CHANNEL_ERR_BLOCK) { if (offset) { return offset; diff --git a/chardev/char-mux.c b/chardev/char-mux.c index 5baf419010..ee2d47b20d 100644 --- a/chardev/char-mux.c +++ b/chardev/char-mux.c @@ -28,6 +28,7 @@ #include "qemu/option.h" #include "chardev/char.h" #include "sysemu/block-backend.h" +#include "qapi/qapi-commands-control.h" #include "chardev-internal.h" /* MUX driver for serial I/O splitting */ @@ -157,7 +158,7 @@ static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch) { const char *term = "QEMU: Terminated\n\r"; qemu_chr_write_all(chr, (uint8_t *)term, strlen(term)); - exit(0); + qmp_quit(NULL); break; } case 's': @@ -386,10 +387,9 @@ void suspend_mux_open(void) static int chardev_options_parsed_cb(Object *child, void *opaque) { Chardev *chr = (Chardev *)child; - ChardevClass *class = CHARDEV_GET_CLASS(chr); - if (!chr->be_open && class->chr_options_parsed) { - class->chr_options_parsed(chr); + if (!chr->be_open && CHARDEV_IS_MUX(chr)) { + open_muxes(chr); } return 0; @@ -412,7 +412,6 @@ static void char_mux_class_init(ObjectClass *oc, void *data) cc->chr_accept_input = mux_chr_accept_input; cc->chr_add_watch = mux_chr_add_watch; cc->chr_be_event = mux_chr_be_event; - cc->chr_options_parsed = open_muxes; cc->chr_update_read_handler = mux_chr_update_read_handlers; } diff --git a/chardev/char-pipe.c b/chardev/char-pipe.c index 7eca5d9a56..66d3b85091 100644 --- a/chardev/char-pipe.c +++ b/chardev/char-pipe.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/main-loop.h" #include "qemu/module.h" diff --git a/chardev/char-pty.c b/chardev/char-pty.c index a2d1e7c985..53f25c6bbd 100644 --- a/chardev/char-pty.c +++ b/chardev/char-pty.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "chardev/char.h" #include "io/channel-file.h" @@ -197,6 +196,117 @@ static void char_pty_finalize(Object *obj) qemu_chr_be_event(chr, CHR_EVENT_CLOSED); } +#if defined HAVE_PTY_H +# include +#elif defined CONFIG_BSD +# include +# if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) +# include +# else +# include +# endif +#elif defined CONFIG_SOLARIS +# include +# include +#else +# include +#endif + +#ifdef __sun__ + +#if !defined(HAVE_OPENPTY) +/* Once illumos has openpty(), this is going to be removed. */ +static int openpty(int *amaster, int *aslave, char *name, + struct termios *termp, struct winsize *winp) +{ + const char *slave; + int mfd = -1, sfd = -1; + + *amaster = *aslave = -1; + + mfd = open("/dev/ptmx", O_RDWR | O_NOCTTY); + if (mfd < 0) { + goto err; + } + + if (grantpt(mfd) == -1 || unlockpt(mfd) == -1) { + goto err; + } + + if ((slave = ptsname(mfd)) == NULL) { + goto err; + } + + if ((sfd = open(slave, O_RDONLY | O_NOCTTY)) == -1) { + goto err; + } + + if (ioctl(sfd, I_PUSH, "ptem") == -1 || + (termp != NULL && tcgetattr(sfd, termp) < 0)) { + goto err; + } + + *amaster = mfd; + *aslave = sfd; + + if (winp) { + ioctl(sfd, TIOCSWINSZ, winp); + } + + return 0; + +err: + if (sfd != -1) { + close(sfd); + } + close(mfd); + return -1; +} +#endif + +static void cfmakeraw (struct termios *termios_p) +{ + termios_p->c_iflag &= + ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); + termios_p->c_oflag &= ~OPOST; + termios_p->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + termios_p->c_cflag &= ~(CSIZE | PARENB); + termios_p->c_cflag |= CS8; + + termios_p->c_cc[VMIN] = 0; + termios_p->c_cc[VTIME] = 0; +} +#endif + +/* like openpty() but also makes it raw; return master fd */ +static int qemu_openpty_raw(int *aslave, char *pty_name) +{ + int amaster; + struct termios tty; +#if defined(__OpenBSD__) || defined(__DragonFly__) + char pty_buf[PATH_MAX]; +#define q_ptsname(x) pty_buf +#else + char *pty_buf = NULL; +#define q_ptsname(x) ptsname(x) +#endif + + if (openpty(&amaster, aslave, pty_buf, NULL, NULL) < 0) { + return -1; + } + + /* Set raw attributes on the pty. */ + tcgetattr(*aslave, &tty); + cfmakeraw(&tty); + tcsetattr(*aslave, TCSAFLUSH, &tty); + + if (pty_name) { + strcpy(pty_name, q_ptsname(amaster)); + } + + return amaster; +} + static void char_pty_open(Chardev *chr, ChardevBackend *backend, bool *be_opened, @@ -214,7 +324,10 @@ static void char_pty_open(Chardev *chr, } close(slave_fd); - qemu_set_nonblock(master_fd); + if (!g_unix_set_fd_nonblocking(master_fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return; + } chr->filename = g_strdup_printf("pty:%s", pty_name); qemu_printf("char device redirected to %s (label %s)\n", diff --git a/chardev/char-serial.c b/chardev/char-serial.c index 7c3d84ae24..4b0b83d5b4 100644 --- a/chardev/char-serial.c +++ b/chardev/char-serial.c @@ -271,7 +271,10 @@ static void qmp_chardev_open_serial(Chardev *chr, if (fd < 0) { return; } - qemu_set_nonblock(fd); + if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return; + } tty_serial_init(fd, 115200, 'N', 8, 1); qemu_chr_open_fd(chr, fd, fd); diff --git a/chardev/char-socket.c b/chardev/char-socket.c index c43668cc15..b00efb1482 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -25,9 +25,7 @@ #include "qemu/osdep.h" #include "chardev/char.h" #include "io/channel-socket.h" -#include "io/channel-tls.h" #include "io/channel-websock.h" -#include "io/net-listener.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/option.h" @@ -37,61 +35,7 @@ #include "qemu/yank.h" #include "chardev/char-io.h" -#include "qom/object.h" - -/***********************************************************/ -/* TCP Net console */ - -#define TCP_MAX_FDS 16 - -typedef struct { - char buf[21]; - size_t buflen; -} TCPChardevTelnetInit; - -typedef enum { - TCP_CHARDEV_STATE_DISCONNECTED, - TCP_CHARDEV_STATE_CONNECTING, - TCP_CHARDEV_STATE_CONNECTED, -} TCPChardevState; - -struct SocketChardev { - Chardev parent; - QIOChannel *ioc; /* Client I/O channel */ - QIOChannelSocket *sioc; /* Client master channel */ - QIONetListener *listener; - GSource *hup_source; - QCryptoTLSCreds *tls_creds; - char *tls_authz; - TCPChardevState state; - int max_size; - int do_telnetopt; - int do_nodelay; - int *read_msgfds; - size_t read_msgfds_num; - int *write_msgfds; - size_t write_msgfds_num; - bool registered_yank; - - SocketAddress *addr; - bool is_listen; - bool is_telnet; - bool is_tn3270; - GSource *telnet_source; - TCPChardevTelnetInit *telnet_init; - - bool is_websock; - - GSource *reconnect_timer; - int64_t reconnect_time; - bool connect_err_reported; - - QIOTask *connect_task; -}; -typedef struct SocketChardev SocketChardev; - -DECLARE_INSTANCE_CHECKER(SocketChardev, SOCKET_CHARDEV, - TYPE_CHARDEV_SOCKET) +#include "chardev/char-socket.h" static gboolean socket_reconnect_timeout(gpointer opaque); static void tcp_chr_telnet_init(Chardev *chr); @@ -346,13 +290,6 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) NULL); } - if (ret == QIO_CHANNEL_ERR_BLOCK) { - errno = EAGAIN; - ret = -1; - } else if (ret == -1) { - errno = EIO; - } - if (msgfds_num) { /* close and clean read_msgfds */ for (i = 0; i < s->read_msgfds_num; i++) { @@ -374,13 +311,20 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) } /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */ - qemu_set_block(fd); + qemu_socket_set_block(fd); #ifndef MSG_CMSG_CLOEXEC qemu_set_cloexec(fd); #endif } + if (ret == QIO_CHANNEL_ERR_BLOCK) { + errno = EAGAIN; + ret = -1; + } else if (ret == -1) { + errno = EIO; + } + return ret; } @@ -581,6 +525,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) { SocketChardev *s = SOCKET_CHARDEV(chr); int size; + int saved_errno; if (s->state != TCP_CHARDEV_STATE_CONNECTED) { return 0; @@ -588,6 +533,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) qio_channel_set_blocking(s->ioc, true, NULL); size = tcp_chr_recv(chr, (void *) buf, len); + saved_errno = errno; if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) { qio_channel_set_blocking(s->ioc, false, NULL); } @@ -596,6 +542,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) tcp_chr_disconnect(chr); } + errno = saved_errno; return size; } @@ -610,12 +557,10 @@ static char *qemu_chr_compute_filename(SocketChardev *s) const char *left = "", *right = ""; switch (ss->ss_family) { -#ifndef _WIN32 case AF_UNIX: return g_strdup_printf("unix:%s%s", ((struct sockaddr_un *)(ss))->sun_path, s->is_listen ? ",server=on" : ""); -#endif case AF_INET6: left = "["; right = "]"; @@ -1120,6 +1065,7 @@ static void char_socket_finalize(Object *obj) qio_net_listener_set_client_func_full(s->listener, NULL, NULL, NULL, chr->gcontext); object_unref(OBJECT(s->listener)); + s->listener = NULL; } if (s->tls_creds) { object_unref(OBJECT(s->tls_creds)); @@ -1248,6 +1194,10 @@ static int qmp_chardev_open_socket_server(Chardev *chr, qio_net_listener_set_name(s->listener, name); g_free(name); + if (s->addr->type == SOCKET_ADDRESS_TYPE_FD && !*s->addr->u.fd.str) { + goto skip_listen; + } + if (qio_net_listener_open_sync(s->listener, s->addr, 1, errp) < 0) { object_unref(OBJECT(s->listener)); s->listener = NULL; @@ -1256,6 +1206,8 @@ static int qmp_chardev_open_socket_server(Chardev *chr, qapi_free_SocketAddress(s->addr); s->addr = socket_local_address(s->listener->sioc[0]->fd, errp); + +skip_listen: update_disconnected_filename(s); if (is_waitconnect) { @@ -1419,10 +1371,12 @@ static void qmp_chardev_open_socket(Chardev *chr, } qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE); +#ifndef _WIN32 /* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */ if (addr->type == SOCKET_ADDRESS_TYPE_UNIX) { qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS); } +#endif /* * In the chardev-change special-case, we shouldn't register a new yank @@ -1466,9 +1420,9 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, SocketAddressLegacy *addr; ChardevSocket *sock; - if ((!!path + !!fd + !!host) != 1) { + if ((!!path + !!fd + !!host) > 1) { error_setg(errp, - "Exactly one of 'path', 'fd' or 'host' required"); + "None or one of 'path', 'fd' or 'host' option required."); return; } @@ -1520,7 +1474,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, addr = g_new0(SocketAddressLegacy, 1); if (path) { UnixSocketAddress *q_unix; - addr->type = SOCKET_ADDRESS_LEGACY_KIND_UNIX; + addr->type = SOCKET_ADDRESS_TYPE_UNIX; q_unix = addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); q_unix->path = g_strdup(path); #ifdef CONFIG_LINUX @@ -1530,7 +1484,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, q_unix->abstract = abstract; #endif } else if (host) { - addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; + addr->type = SOCKET_ADDRESS_TYPE_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); *addr->u.inet.data = (InetSocketAddress) { .host = g_strdup(host), @@ -1542,12 +1496,10 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, .has_ipv6 = qemu_opt_get(opts, "ipv6"), .ipv6 = qemu_opt_get_bool(opts, "ipv6", 0), }; - } else if (fd) { - addr->type = SOCKET_ADDRESS_LEGACY_KIND_FD; + } else { + addr->type = SOCKET_ADDRESS_TYPE_FD; addr->u.fd.data = g_new(String, 1); addr->u.fd.data->str = g_strdup(fd); - } else { - g_assert_not_reached(); } sock->addr = addr; } diff --git a/chardev/char-stdio.c b/chardev/char-stdio.c index 403da308c9..3c648678ab 100644 --- a/chardev/char-stdio.c +++ b/chardev/char-stdio.c @@ -103,7 +103,10 @@ static void qemu_chr_open_stdio(Chardev *chr, stdio_in_use = true; old_fd0_flags = fcntl(0, F_GETFL); tcgetattr(0, &oldtty); - qemu_set_nonblock(0); + if (!g_unix_set_fd_nonblocking(0, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return; + } atexit(term_exit); memset(&act, 0, sizeof(act)); diff --git a/chardev/char-udp.c b/chardev/char-udp.c index 16b5dbce58..6756e69924 100644 --- a/chardev/char-udp.c +++ b/chardev/char-udp.c @@ -165,7 +165,7 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend, qemu_chr_parse_common(opts, qapi_ChardevUdp_base(udp)); addr = g_new0(SocketAddressLegacy, 1); - addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; + addr->type = SOCKET_ADDRESS_TYPE_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); *addr->u.inet.data = (InetSocketAddress) { .host = g_strdup(host), @@ -180,7 +180,7 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend, if (has_local) { udp->has_local = true; addr = g_new0(SocketAddressLegacy, 1); - addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; + addr->type = SOCKET_ADDRESS_TYPE_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); *addr->u.inet.data = (InetSocketAddress) { .host = g_strdup(localaddr), diff --git a/chardev/char-win-stdio.c b/chardev/char-win-stdio.c index a4771ab82e..eb830eabd9 100644 --- a/chardev/char-win-stdio.c +++ b/chardev/char-win-stdio.c @@ -146,6 +146,8 @@ static void qemu_chr_open_stdio(Chardev *chr, bool *be_opened, Error **errp) { + ChardevStdio *opts = backend->u.stdio.data; + bool stdio_allow_signal = !opts->has_signal || opts->signal; WinStdioChardev *stdio = WIN_STDIO_CHARDEV(chr); DWORD dwMode; int is_console = 0; @@ -193,7 +195,11 @@ static void qemu_chr_open_stdio(Chardev *chr, if (is_console) { /* set the terminal in raw mode */ /* ENABLE_QUICK_EDIT_MODE | ENABLE_EXTENDED_FLAGS */ - dwMode |= ENABLE_PROCESSED_INPUT; + if (stdio_allow_signal) { + dwMode |= ENABLE_PROCESSED_INPUT; + } else { + dwMode &= ~ENABLE_PROCESSED_INPUT; + } } SetConsoleMode(stdio->hStdIn, dwMode); diff --git a/chardev/char.c b/chardev/char.c index 225b84e094..852d9d5f1b 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -197,7 +197,7 @@ int qemu_chr_be_can_write(Chardev *s) return be->chr_can_read(be->opaque); } -void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len) +void qemu_chr_be_write_impl(Chardev *s, const uint8_t *buf, int len) { CharBackend *be = s->be; @@ -206,7 +206,7 @@ void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len) } } -void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len) +void qemu_chr_be_write(Chardev *s, const uint8_t *buf, int len) { if (qemu_chr_replay(s)) { if (replay_mode == REPLAY_MODE_PLAY) { @@ -255,18 +255,15 @@ static void qemu_char_open(Chardev *chr, ChardevBackend *backend, ChardevCommon *common = backend ? backend->u.null.data : NULL; if (common && common->has_logfile) { - int flags = O_WRONLY | O_CREAT; + int flags = O_WRONLY; if (common->has_logappend && common->logappend) { flags |= O_APPEND; } else { flags |= O_TRUNC; } - chr->logfd = qemu_open_old(common->logfile, flags, 0666); + chr->logfd = qemu_create(common->logfile, flags, 0666, errp); if (chr->logfd < 0) { - error_setg_errno(errp, errno, - "Unable to open logfile %s", - common->logfile); return; } } diff --git a/chardev/chardev-internal.h b/chardev/chardev-internal.h index aba0240759..4e03af3147 100644 --- a/chardev/chardev-internal.h +++ b/chardev/chardev-internal.h @@ -21,6 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ + #ifndef CHARDEV_INTERNAL_H #define CHARDEV_INTERNAL_H @@ -64,4 +65,4 @@ void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event); Object *get_chardevs_root(void); -#endif /* CHAR_MUX_H */ +#endif /* CHARDEV_INTERNAL_H */ diff --git a/chardev/meson.build b/chardev/meson.build index 32377af383..664f77b887 100644 --- a/chardev/meson.build +++ b/chardev/meson.build @@ -12,11 +12,11 @@ chardev_ss.add(files( 'char-udp.c', 'char.c', )) -chardev_ss.add(when: 'CONFIG_POSIX', if_true: files( +chardev_ss.add(when: 'CONFIG_POSIX', if_true: [files( 'char-fd.c', 'char-parallel.c', 'char-pty.c', -)) +), util]) chardev_ss.add(when: 'CONFIG_WIN32', if_true: files( 'char-console.c', 'char-win-stdio.c', @@ -35,7 +35,7 @@ if brlapi.found() chardev_modules += { 'baum': module_ss } endif -if config_host.has_key('CONFIG_SPICE') +if spice.found() module_ss = ss.source_set() module_ss.add(when: [spice], if_true: files('spice.c')) chardev_modules += { 'spice': module_ss } diff --git a/chardev/msmouse.c b/chardev/msmouse.c index eb9231dcdb..ab8fe981d6 100644 --- a/chardev/msmouse.c +++ b/chardev/msmouse.c @@ -24,23 +24,45 @@ #include "qemu/osdep.h" #include "qemu/module.h" +#include "qemu/fifo8.h" #include "chardev/char.h" +#include "chardev/char-serial.h" #include "ui/console.h" #include "ui/input.h" #include "qom/object.h" -#define MSMOUSE_LO6(n) ((n) & 0x3f) -#define MSMOUSE_HI2(n) (((n) & 0xc0) >> 6) +#define MSMOUSE_LO6(n) ((n) & 0x3f) +#define MSMOUSE_HI2(n) (((n) & 0xc0) >> 6) +#define MSMOUSE_PWR(cm) (cm & (CHR_TIOCM_RTS | CHR_TIOCM_DTR)) + +/* Serial PnP for 6 bit devices/mice sends all ASCII chars - 0x20 */ +#define M(c) (c - 0x20) +/* Serial fifo size. */ +#define MSMOUSE_BUF_SZ 64 + +/* Mouse ID: Send "M3" cause we behave like a 3 button logitech mouse. */ +const uint8_t mouse_id[] = {'M', '3'}; +/* + * PnP start "(", PnP version (1.0), vendor ID, product ID, '\\', + * serial ID (omitted), '\\', MS class name, '\\', driver ID (omitted), '\\', + * product description, checksum, ")" + * Missing parts are inserted later. + */ +const uint8_t pnp_data[] = {M('('), 1, '$', M('Q'), M('M'), M('U'), + M('0'), M('0'), M('0'), M('1'), + M('\\'), M('\\'), + M('M'), M('O'), M('U'), M('S'), M('E'), + M('\\'), M('\\')}; struct MouseChardev { Chardev parent; QemuInputHandlerState *hs; + int tiocm; int axis[INPUT_AXIS__MAX]; bool btns[INPUT_BUTTON__MAX]; bool btnc[INPUT_BUTTON__MAX]; - uint8_t outbuf[32]; - int outlen; + Fifo8 outbuf; }; typedef struct MouseChardev MouseChardev; @@ -51,20 +73,18 @@ DECLARE_INSTANCE_CHECKER(MouseChardev, MOUSE_CHARDEV, static void msmouse_chr_accept_input(Chardev *chr) { MouseChardev *mouse = MOUSE_CHARDEV(chr); - int len; + uint32_t len, avail; len = qemu_chr_be_can_write(chr); - if (len > mouse->outlen) { - len = mouse->outlen; - } - if (!len) { - return; - } + avail = fifo8_num_used(&mouse->outbuf); + while (len > 0 && avail > 0) { + const uint8_t *buf; + uint32_t size; - qemu_chr_be_write(chr, mouse->outbuf, len); - mouse->outlen -= len; - if (mouse->outlen) { - memmove(mouse->outbuf, mouse->outbuf + len, mouse->outlen); + buf = fifo8_pop_buf(&mouse->outbuf, MIN(len, avail), &size); + qemu_chr_be_write(chr, buf, size); + len = qemu_chr_be_can_write(chr); + avail -= size; } } @@ -91,12 +111,11 @@ static void msmouse_queue_event(MouseChardev *mouse) mouse->btnc[INPUT_BUTTON_MIDDLE]) { bytes[3] |= (mouse->btns[INPUT_BUTTON_MIDDLE] ? 0x20 : 0x00); mouse->btnc[INPUT_BUTTON_MIDDLE] = false; - count = 4; + count++; } - if (mouse->outlen <= sizeof(mouse->outbuf) - count) { - memcpy(mouse->outbuf + mouse->outlen, bytes, count); - mouse->outlen += count; + if (fifo8_num_free(&mouse->outbuf) >= count) { + fifo8_push_all(&mouse->outbuf, bytes, count); } else { /* queue full -> drop event */ } @@ -109,6 +128,11 @@ static void msmouse_input_event(DeviceState *dev, QemuConsole *src, InputMoveEvent *move; InputBtnEvent *btn; + /* Ignore events if serial mouse powered down. */ + if (!MSMOUSE_PWR(mouse->tiocm)) { + return; + } + switch (evt->type) { case INPUT_EVENT_KIND_REL: move = evt->u.rel.data; @@ -132,6 +156,11 @@ static void msmouse_input_sync(DeviceState *dev) MouseChardev *mouse = MOUSE_CHARDEV(dev); Chardev *chr = CHARDEV(dev); + /* Ignore events if serial mouse powered down. */ + if (!MSMOUSE_PWR(mouse->tiocm)) { + return; + } + msmouse_queue_event(mouse); msmouse_chr_accept_input(chr); } @@ -142,13 +171,6 @@ static int msmouse_chr_write(struct Chardev *s, const uint8_t *buf, int len) return len; } -static void char_msmouse_finalize(Object *obj) -{ - MouseChardev *mouse = MOUSE_CHARDEV(obj); - - qemu_input_handler_unregister(mouse->hs); -} - static QemuInputHandler msmouse_handler = { .name = "QEMU Microsoft Mouse", .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, @@ -156,6 +178,81 @@ static QemuInputHandler msmouse_handler = { .sync = msmouse_input_sync, }; +static int msmouse_ioctl(Chardev *chr, int cmd, void *arg) +{ + MouseChardev *mouse = MOUSE_CHARDEV(chr); + int c, i, j; + uint8_t bytes[MSMOUSE_BUF_SZ / 2]; + int *targ = (int *)arg; + const uint8_t hexchr[16] = {M('0'), M('1'), M('2'), M('3'), M('4'), M('5'), + M('6'), M('7'), M('8'), M('9'), M('A'), M('B'), + M('C'), M('D'), M('E'), M('F')}; + + switch (cmd) { + case CHR_IOCTL_SERIAL_SET_TIOCM: + c = mouse->tiocm; + mouse->tiocm = *(int *)arg; + if (MSMOUSE_PWR(mouse->tiocm)) { + if (!MSMOUSE_PWR(c)) { + /* + * Power on after reset: Send ID and PnP data + * No need to check fifo space as it is empty at this point. + */ + fifo8_push_all(&mouse->outbuf, mouse_id, sizeof(mouse_id)); + /* Add PnP data: */ + fifo8_push_all(&mouse->outbuf, pnp_data, sizeof(pnp_data)); + /* + * Add device description from qemu handler name. + * Make sure this all fits into the queue beforehand! + */ + c = M(')'); + for (i = 0; msmouse_handler.name[i]; i++) { + bytes[i] = M(msmouse_handler.name[i]); + c += bytes[i]; + } + /* Calc more of checksum */ + for (j = 0; j < sizeof(pnp_data); j++) { + c += pnp_data[j]; + } + c &= 0xff; + bytes[i++] = hexchr[c >> 4]; + bytes[i++] = hexchr[c & 0x0f]; + bytes[i++] = M(')'); + fifo8_push_all(&mouse->outbuf, bytes, i); + /* Start sending data to serial. */ + msmouse_chr_accept_input(chr); + } + break; + } + /* + * Reset mouse buffers on power down. + * Mouse won't send anything without power. + */ + fifo8_reset(&mouse->outbuf); + memset(mouse->axis, 0, sizeof(mouse->axis)); + memset(mouse->btns, false, sizeof(mouse->btns)); + memset(mouse->btnc, false, sizeof(mouse->btns)); + break; + case CHR_IOCTL_SERIAL_GET_TIOCM: + /* Remember line control status. */ + *targ = mouse->tiocm; + break; + default: + return -ENOTSUP; + } + return 0; +} + +static void char_msmouse_finalize(Object *obj) +{ + MouseChardev *mouse = MOUSE_CHARDEV(obj); + + if (mouse->hs) { + qemu_input_handler_unregister(mouse->hs); + } + fifo8_destroy(&mouse->outbuf); +} + static void msmouse_chr_open(Chardev *chr, ChardevBackend *backend, bool *be_opened, @@ -166,6 +263,8 @@ static void msmouse_chr_open(Chardev *chr, *be_opened = false; mouse->hs = qemu_input_handler_register((DeviceState *)mouse, &msmouse_handler); + mouse->tiocm = 0; + fifo8_create(&mouse->outbuf, MSMOUSE_BUF_SZ); } static void char_msmouse_class_init(ObjectClass *oc, void *data) @@ -175,6 +274,7 @@ static void char_msmouse_class_init(ObjectClass *oc, void *data) cc->open = msmouse_chr_open; cc->chr_write = msmouse_chr_write; cc->chr_accept_input = msmouse_chr_accept_input; + cc->chr_ioctl = msmouse_ioctl; } static const TypeInfo char_msmouse_type_info = { diff --git a/chardev/wctablet.c b/chardev/wctablet.c index 95e005f5a5..43bdf6b608 100644 --- a/chardev/wctablet.c +++ b/chardev/wctablet.c @@ -319,8 +319,9 @@ static void wctablet_chr_finalize(Object *obj) { TabletChardev *tablet = WCTABLET_CHARDEV(obj); - qemu_input_handler_unregister(tablet->hs); - g_free(tablet); + if (tablet->hs) { + qemu_input_handler_unregister(tablet->hs); + } } static void wctablet_chr_open(Chardev *chr, diff --git a/common-user/host/aarch64/safe-syscall.inc.S b/common-user/host/aarch64/safe-syscall.inc.S new file mode 100644 index 0000000000..b8fd5b553e --- /dev/null +++ b/common-user/host/aarch64/safe-syscall.inc.S @@ -0,0 +1,88 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, #function + .type safe_syscall_start, #function + .type safe_syscall_end, #function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + /* The syscall calling convention isn't the same as the + * C one: + * we enter with x0 == &signal_pending + * x1 == syscall number + * x2 ... x7, (stack) == syscall arguments + * and return the result in x0 + * and the syscall instruction needs + * x8 == syscall number + * x0 ... x6 == syscall arguments + * and returns the result in x0 + * Shuffle everything around appropriately. + */ + mov x9, x0 /* signal_pending pointer */ + mov x8, x1 /* syscall number */ + mov x0, x2 /* syscall arguments */ + mov x1, x3 + mov x2, x4 + mov x3, x5 + mov x4, x6 + mov x5, x7 + ldr x6, [sp] + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + ldr w10, [x9] + cbnz w10, 2f + svc 0x0 +safe_syscall_end: + + /* code path for having successfully executed the syscall */ +#if defined(__linux__) + /* Linux kernel returns (small) negative errno. */ + cmp x0, #-4096 + b.hi 0f +#elif defined(__FreeBSD__) + /* FreeBSD kernel returns positive errno and C bit set. */ + b.cs 1f +#else +#error "unsupported os" +#endif + ret + +#if defined(__linux__) + /* code path setting errno */ +0: neg w0, w0 + b safe_syscall_set_errno_tail +#endif + + /* code path when we didn't execute the syscall */ +2: mov w0, #QEMU_ERESTARTSYS +1: b safe_syscall_set_errno_tail + + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/arm/safe-syscall.inc.S b/common-user/host/arm/safe-syscall.inc.S new file mode 100644 index 0000000000..bbfb89634e --- /dev/null +++ b/common-user/host/arm/safe-syscall.inc.S @@ -0,0 +1,108 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, %function + + .cfi_sections .debug_frame + + .text + .syntax unified + .arm + .align 2 + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .fnstart + .cfi_startproc + mov r12, sp /* save entry stack */ + push { r4, r5, r6, r7, r8, lr } + .save { r4, r5, r6, r7, r8, lr } + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r5, 4 + .cfi_rel_offset r6, 8 + .cfi_rel_offset r7, 12 + .cfi_rel_offset r8, 16 + .cfi_rel_offset lr, 20 + + /* The syscall calling convention isn't the same as the C one: + * we enter with r0 == &signal_pending + * r1 == syscall number + * r2, r3, [sp+0] ... [sp+12] == syscall arguments + * and return the result in r0 + * and the syscall instruction needs + * r7 == syscall number + * r0 ... r6 == syscall arguments + * and returns the result in r0 + * Shuffle everything around appropriately. + * Note the 16 bytes that we pushed to save registers. + */ + mov r8, r0 /* copy signal_pending */ + mov r7, r1 /* syscall number */ + mov r0, r2 /* syscall args */ + mov r1, r3 + ldm r12, { r2, r3, r4, r5, r6 } + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + ldr r12, [r8] /* signal_pending */ + tst r12, r12 + bne 2f + swi 0 +safe_syscall_end: + + /* code path for having successfully executed the syscall */ +#if defined(__linux__) + /* Linux kernel returns (small) negative errno. */ + cmp r0, #-4096 + neghi r0, r0 + bhi 1f +#elif defined(__FreeBSD__) + /* FreeBSD kernel returns positive errno and C bit set. */ + bcs 1f +#else +#error "unsupported os" +#endif + pop { r4, r5, r6, r7, r8, pc } + + /* code path when we didn't execute the syscall */ +2: mov r0, #QEMU_ERESTARTSYS + + /* code path setting errno */ +1: pop { r4, r5, r6, r7, r8, lr } + .cfi_adjust_cfa_offset -24 + .cfi_restore r4 + .cfi_restore r5 + .cfi_restore r6 + .cfi_restore r7 + .cfi_restore r8 + .cfi_restore lr + b safe_syscall_set_errno_tail + + .fnend + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/i386/safe-syscall.inc.S b/common-user/host/i386/safe-syscall.inc.S new file mode 100644 index 0000000000..db2ed09839 --- /dev/null +++ b/common-user/host/i386/safe-syscall.inc.S @@ -0,0 +1,127 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + push %ebp + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset ebp, 0 + push %esi + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset esi, 0 + push %edi + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset edi, 0 + push %ebx + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset ebx, 0 + + /* The syscall calling convention isn't the same as the C one: + * we enter with 0(%esp) == return address + * 4(%esp) == &signal_pending + * 8(%esp) == syscall number + * 12(%esp) ... 32(%esp) == syscall arguments + * and return the result in eax + * and the syscall instruction needs + * eax == syscall number + * ebx, ecx, edx, esi, edi, ebp == syscall arguments + * and returns the result in eax + * Shuffle everything around appropriately. + * Note the 16 bytes that we pushed to save registers. + */ + mov 12+16(%esp), %ebx /* the syscall arguments */ + mov 16+16(%esp), %ecx + mov 20+16(%esp), %edx + mov 24+16(%esp), %esi + mov 28+16(%esp), %edi + mov 32+16(%esp), %ebp + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + mov 4+16(%esp), %eax /* signal_pending */ + cmpl $0, (%eax) + jnz 2f + mov 8+16(%esp), %eax /* syscall number */ + int $0x80 +safe_syscall_end: + + /* code path for having successfully executed the syscall */ +#if defined(__linux__) + /* Linux kernel returns (small) negative errno. */ + cmp $-4095, %eax + jae 0f +#elif defined(__FreeBSD__) + /* FreeBSD kernel returns positive errno and C bit set. */ + jc 1f +#else +#error "unsupported os" +#endif + pop %ebx + .cfi_remember_state + .cfi_adjust_cfa_offset -4 + .cfi_restore ebx + pop %edi + .cfi_adjust_cfa_offset -4 + .cfi_restore edi + pop %esi + .cfi_adjust_cfa_offset -4 + .cfi_restore esi + pop %ebp + .cfi_adjust_cfa_offset -4 + .cfi_restore ebp + ret + .cfi_restore_state + +#if defined(__linux__) +0: neg %eax + jmp 1f +#endif + + /* code path when we didn't execute the syscall */ +2: mov $QEMU_ERESTARTSYS, %eax + + /* code path setting errno */ +1: pop %ebx + .cfi_adjust_cfa_offset -4 + .cfi_restore ebx + pop %edi + .cfi_adjust_cfa_offset -4 + .cfi_restore edi + pop %esi + .cfi_adjust_cfa_offset -4 + .cfi_restore esi + pop %ebp + .cfi_adjust_cfa_offset -4 + .cfi_restore ebp + mov %eax, 4(%esp) + jmp safe_syscall_set_errno_tail + + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/loongarch64/safe-syscall.inc.S b/common-user/host/loongarch64/safe-syscall.inc.S new file mode 100644 index 0000000000..b88a069c45 --- /dev/null +++ b/common-user/host/loongarch64/safe-syscall.inc.S @@ -0,0 +1,90 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Ported to LoongArch by WANG Xuerui + * + * Based on safe-syscall.inc.S code for RISC-V, + * originally written by Richard Henderson + * Copyright (C) 2018 Linaro, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + .type safe_syscall_start, @function + .type safe_syscall_end, @function + + /* + * This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + /* + * The syscall calling convention is nearly the same as C: + * we enter with a0 == &signal_pending + * a1 == syscall number + * a2 ... a7 == syscall arguments + * and return the result in a0 + * and the syscall instruction needs + * a7 == syscall number + * a0 ... a5 == syscall arguments + * and returns the result in a0 + * Shuffle everything around appropriately. + */ + move $t0, $a0 /* signal_pending pointer */ + move $t1, $a1 /* syscall number */ + move $a0, $a2 /* syscall arguments */ + move $a1, $a3 + move $a2, $a4 + move $a3, $a5 + move $a4, $a6 + move $a5, $a7 + move $a7, $t1 + + /* + * We need to preserve the signal_pending pointer but t0 is + * clobbered by syscalls on LoongArch, so we need to move it + * somewhere else, ideally both preserved across syscalls and + * clobbered by procedure calls so we don't have to allocate a + * stack frame; a6 is just the register we want here. + */ + move $a6, $t0 + + /* + * This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* If signal_pending is non-zero, don't do the call */ + ld.w $t1, $a6, 0 + bnez $t1, 2f + syscall 0 +safe_syscall_end: + /* code path for having successfully executed the syscall */ + li.w $t2, -4096 + bgtu $a0, $t2, 0f + jr $ra + + /* code path setting errno */ +0: sub.d $a0, $zero, $a0 + b safe_syscall_set_errno_tail + + /* code path when we didn't execute the syscall */ +2: li.w $a0, QEMU_ERESTARTSYS + b safe_syscall_set_errno_tail + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/mips/safe-syscall.inc.S b/common-user/host/mips/safe-syscall.inc.S new file mode 100644 index 0000000000..6a44614970 --- /dev/null +++ b/common-user/host/mips/safe-syscall.inc.S @@ -0,0 +1,149 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2021 Linaro, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "sys/regdef.h" +#include "sys/asm.h" + + .text + .set nomips16 + .set reorder + + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_start, @function + .type safe_syscall_end, @function + + /* + * This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ + +#if _MIPS_SIM == _ABIO32 +/* 8 * 4 = 32 for outgoing parameters; 1 * 4 for s0 save; 1 * 4 for align. */ +#define FRAME 40 +#define OFS_S0 32 +#else +/* 1 * 8 for s0 save; 1 * 8 for align. */ +#define FRAME 16 +#define OFS_S0 0 +#endif + + +NESTED(safe_syscall_base, FRAME, ra) + .cfi_startproc + PTR_ADDIU sp, sp, -FRAME + .cfi_adjust_cfa_offset FRAME + REG_S s0, OFS_S0(sp) + .cfi_rel_offset s0, OFS_S0 +#if _MIPS_SIM == _ABIO32 + /* + * The syscall calling convention is nearly the same as C: + * we enter with a0 == &signal_pending + * a1 == syscall number + * a2, a3, stack == syscall arguments + * and return the result in a0 + * and the syscall instruction needs + * v0 == syscall number + * a0 ... a3, stack == syscall arguments + * and returns the result in v0 + * Shuffle everything around appropriately. + */ + move s0, a0 /* signal_pending pointer */ + move v0, a1 /* syscall number */ + move a0, a2 /* syscall arguments */ + move a1, a3 + lw a2, FRAME+16(sp) + lw a3, FRAME+20(sp) + lw t4, FRAME+24(sp) + lw t5, FRAME+28(sp) + lw t6, FRAME+32(sp) + lw t7, FRAME+40(sp) + sw t4, 16(sp) + sw t5, 20(sp) + sw t6, 24(sp) + sw t7, 28(sp) +#else + /* + * The syscall calling convention is nearly the same as C: + * we enter with a0 == &signal_pending + * a1 == syscall number + * a2 ... a7 == syscall arguments + * and return the result in a0 + * and the syscall instruction needs + * v0 == syscall number + * a0 ... a5 == syscall arguments + * and returns the result in v0 + * Shuffle everything around appropriately. + */ + move s0, a0 /* signal_pending pointer */ + move v0, a1 /* syscall number */ + move a0, a2 /* syscall arguments */ + move a1, a3 + move a2, a4 + move a3, a5 + move a4, a6 + move a5, a7 +#endif + + /* + * This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* If signal_pending is non-zero, don't do the call */ + lw t1, 0(s0) + bnez t1, 2f + syscall +safe_syscall_end: + + /* code path for having successfully executed the syscall */ + REG_L s0, OFS_S0(sp) + PTR_ADDIU sp, sp, FRAME + .cfi_remember_state + .cfi_adjust_cfa_offset -FRAME + .cfi_restore s0 + bnez a3, 1f + jr ra + .cfi_restore_state + + /* code path when we didn't execute the syscall */ +2: REG_L s0, OFS_S0(sp) + PTR_ADDIU sp, sp, FRAME + .cfi_adjust_cfa_offset -FRAME + .cfi_restore s0 + li v0, QEMU_ERESTARTSYS + + /* code path setting errno */ + /* + * We didn't setup GP on entry, optimistic of the syscall success. + * We must do so now to load the address of the helper, as required + * by the ABI, into t9. + * + * Note that SETUP_GPX and SETUP_GPX64 are themselves conditional, + * so we can simply let the one that's not empty succeed. + */ +1: USE_ALT_CP(t0) + SETUP_GPX(t1) + SETUP_GPX64(t0, t1) + move a0, v0 + PTR_LA t9, safe_syscall_set_errno_tail + jr t9 + + .cfi_endproc +END(safe_syscall_base) diff --git a/common-user/host/ppc64/safe-syscall.inc.S b/common-user/host/ppc64/safe-syscall.inc.S new file mode 100644 index 0000000000..947a850dfd --- /dev/null +++ b/common-user/host/ppc64/safe-syscall.inc.S @@ -0,0 +1,94 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + .text + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +#if _CALL_ELF == 2 +safe_syscall_base: + .cfi_startproc + .localentry safe_syscall_base,0 +#else + .section ".opd","aw" + .align 3 +safe_syscall_base: + .quad .L.safe_syscall_base,.TOC.@tocbase,0 + .previous +.L.safe_syscall_base: + .cfi_startproc +#endif + /* We enter with r3 == &signal_pending + * r4 == syscall number + * r5 ... r10 == syscall arguments + * and return the result in r3 + * and the syscall instruction needs + * r0 == syscall number + * r3 ... r8 == syscall arguments + * and returns the result in r3 + * Shuffle everything around appropriately. + */ + std 14, 16(1) /* Preserve r14 in SP+16 */ + .cfi_offset 14, 16 + mr 14, 3 /* signal_pending */ + mr 0, 4 /* syscall number */ + mr 3, 5 /* syscall arguments */ + mr 4, 6 + mr 5, 7 + mr 6, 8 + mr 7, 9 + mr 8, 10 + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + lwz 12, 0(14) + cmpwi 0, 12, 0 + bne- 2f + sc +safe_syscall_end: + /* code path when we did execute the syscall */ + ld 14, 16(1) /* restore r14 */ + bso- 1f + blr + + /* code path when we didn't execute the syscall */ +2: ld 14, 16(1) /* restore r14 */ + addi 3, 0, QEMU_ERESTARTSYS + + /* code path setting errno */ +1: b safe_syscall_set_errno_tail + nop /* per abi, for the linker to modify */ + + .cfi_endproc + +#if _CALL_ELF == 2 + .size safe_syscall_base, .-safe_syscall_base +#else + .size safe_syscall_base, .-.L.safe_syscall_base + .size .L.safe_syscall_base, .-.L.safe_syscall_base +#endif diff --git a/common-user/host/riscv/safe-syscall.inc.S b/common-user/host/riscv/safe-syscall.inc.S new file mode 100644 index 0000000000..dfe83c300e --- /dev/null +++ b/common-user/host/riscv/safe-syscall.inc.S @@ -0,0 +1,79 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2018 Linaro, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + .type safe_syscall_start, @function + .type safe_syscall_end, @function + + /* + * This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + /* + * The syscall calling convention is nearly the same as C: + * we enter with a0 == &signal_pending + * a1 == syscall number + * a2 ... a7 == syscall arguments + * and return the result in a0 + * and the syscall instruction needs + * a7 == syscall number + * a0 ... a5 == syscall arguments + * and returns the result in a0 + * Shuffle everything around appropriately. + */ + mv t0, a0 /* signal_pending pointer */ + mv t1, a1 /* syscall number */ + mv a0, a2 /* syscall arguments */ + mv a1, a3 + mv a2, a4 + mv a3, a5 + mv a4, a6 + mv a5, a7 + mv a7, t1 + + /* + * This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* If signal_pending is non-zero, don't do the call */ + lw t1, 0(t0) + bnez t1, 2f + scall +safe_syscall_end: + /* code path for having successfully executed the syscall */ + li t2, -4096 + bgtu a0, t2, 0f + ret + + /* code path setting errno */ +0: neg a0, a0 + j safe_syscall_set_errno_tail + + /* code path when we didn't execute the syscall */ +2: li a0, QEMU_ERESTARTSYS + j safe_syscall_set_errno_tail + + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/s390x/safe-syscall.inc.S b/common-user/host/s390x/safe-syscall.inc.S new file mode 100644 index 0000000000..2ccbaa2402 --- /dev/null +++ b/common-user/host/s390x/safe-syscall.inc.S @@ -0,0 +1,98 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + stmg %r6,%r15,48(%r15) /* save all call-saved registers */ + .cfi_offset %r15,-40 + .cfi_offset %r14,-48 + .cfi_offset %r13,-56 + .cfi_offset %r12,-64 + .cfi_offset %r11,-72 + .cfi_offset %r10,-80 + .cfi_offset %r9,-88 + .cfi_offset %r8,-96 + .cfi_offset %r7,-104 + .cfi_offset %r6,-112 + lgr %r1,%r15 + lg %r0,8(%r15) /* load eos */ + aghi %r15,-160 + .cfi_adjust_cfa_offset 160 + stg %r1,0(%r15) /* store back chain */ + stg %r0,8(%r15) /* store eos */ + + /* + * The syscall calling convention isn't the same as the C one: + * we enter with r2 == &signal_pending + * r3 == syscall number + * r4, r5, r6, (stack) == syscall arguments + * and return the result in r2 + * and the syscall instruction needs + * r1 == syscall number + * r2 ... r7 == syscall arguments + * and returns the result in r2 + * Shuffle everything around appropriately. + */ + lgr %r8,%r2 /* signal_pending pointer */ + lgr %r1,%r3 /* syscall number */ + lgr %r2,%r4 /* syscall args */ + lgr %r3,%r5 + lgr %r4,%r6 + lmg %r5,%r7,320(%r15) + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + icm %r0,15,0(%r8) + jne 2f + svc 0 +safe_syscall_end: + + /* code path for having successfully executed the syscall */ + lg %r15,0(%r15) /* load back chain */ + .cfi_remember_state + .cfi_adjust_cfa_offset -160 + lmg %r6,%r15,48(%r15) /* load saved registers */ + + lghi %r0, -4095 /* check for syscall error */ + clgr %r2, %r0 + blr %r14 /* return on success */ + lcr %r2, %r2 /* create positive errno */ + jg safe_syscall_set_errno_tail + .cfi_restore_state + + /* code path when we didn't execute the syscall */ +2: lg %r15,0(%r15) /* load back chain */ + .cfi_adjust_cfa_offset -160 + lmg %r6,%r15,48(%r15) /* load saved registers */ + lghi %r2, QEMU_ERESTARTSYS + jg safe_syscall_set_errno_tail + + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/host/sparc64/safe-syscall.inc.S b/common-user/host/sparc64/safe-syscall.inc.S new file mode 100644 index 0000000000..c7be8f2d25 --- /dev/null +++ b/common-user/host/sparc64/safe-syscall.inc.S @@ -0,0 +1,90 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by common-user/safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2021 Linaro, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .text + .balign 4 + + .register %g2, #scratch + .register %g3, #scratch + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + .type safe_syscall_start, @function + .type safe_syscall_end, @function + +#define STACK_BIAS 2047 +#define WINDOW_SIZE 16 * 8 +#define PARAM(N) STACK_BIAS + WINDOW_SIZE + N * 8 + + /* + * This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + */ +safe_syscall_base: + .cfi_startproc + /* + * The syscall calling convention isn't the same as the C one: + * we enter with o0 == &signal_pending + * o1 == syscall number + * o2 ... o5, (stack) == syscall arguments + * and return the result in x0 + * and the syscall instruction needs + * g1 == syscall number + * o0 ... o5 == syscall arguments + * and returns the result in o0 + * Shuffle everything around appropriately. + */ + mov %o0, %g2 /* signal_pending pointer */ + mov %o1, %g1 /* syscall number */ + mov %o2, %o0 /* syscall arguments */ + mov %o3, %o1 + mov %o4, %o2 + mov %o5, %o3 + ldx [%sp + PARAM(6)], %o4 + ldx [%sp + PARAM(7)], %o5 + + /* + * This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + lduw [%g2], %g3 + brnz,pn %g3, 2f + nop + ta 0x6d +safe_syscall_end: + /* code path for having successfully executed the syscall */ + bcs,pn %xcc, 1f + nop + retl + nop + + /* code path when we didn't execute the syscall */ +2: set QEMU_ERESTARTSYS, %o0 + + /* code path setting errno */ +1: mov %o7, %g1 + call safe_syscall_set_errno_tail + mov %g1, %o7 + + .cfi_endproc + .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/x86_64/safe-syscall.inc.S b/common-user/host/x86_64/safe-syscall.inc.S similarity index 80% rename from linux-user/host/x86_64/safe-syscall.inc.S rename to common-user/host/x86_64/safe-syscall.inc.S index f36992daa3..d1a67a303a 100644 --- a/linux-user/host/x86_64/safe-syscall.inc.S +++ b/common-user/host/x86_64/safe-syscall.inc.S @@ -1,7 +1,7 @@ /* * safe-syscall.inc.S : host-specific assembly fragment * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S + * This is intended to be included by common-user/safe-syscall.S * * Copyright (C) 2015 Timothy Edward Baldwin * @@ -19,9 +19,6 @@ * first argument an 'int *' to the signal_pending flag, the * second one the system call number (as a 'long'), and all further * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. */ safe_syscall_base: .cfi_startproc @@ -35,9 +32,9 @@ safe_syscall_base: .cfi_adjust_cfa_offset 8 .cfi_rel_offset rbp, 0 - /* The syscall calling convention isn't the same as the - * C one: - * we enter with rdi == *signal_pending + /* + * The syscall calling convention isn't the same as the C one: + * we enter with rdi == &signal_pending * rsi == syscall number * rdx, rcx, r8, r9, (stack), (stack) == syscall arguments * and return the result in rax @@ -67,25 +64,43 @@ safe_syscall_base: */ safe_syscall_start: /* if signal_pending is non-zero, don't do the call */ - cmpl $0, (%rbp) - jnz 1f + cmpl $0, (%rbp) + jnz 2f syscall safe_syscall_end: + /* code path for having successfully executed the syscall */ +#if defined(__linux__) + /* Linux kernel returns (small) negative errno. */ + cmp $-4095, %rax + jae 0f +#elif defined(__FreeBSD__) + /* FreeBSD kernel returns positive errno and C bit set. */ + jc 1f +#else +#error "unsupported os" +#endif pop %rbp .cfi_remember_state .cfi_def_cfa_offset 8 .cfi_restore rbp ret - -1: - /* code path when we didn't execute the syscall */ .cfi_restore_state - mov $-TARGET_ERESTARTSYS, %rax - pop %rbp + +#if defined(__linux__) +0: neg %eax + jmp 1f +#endif + + /* code path when we didn't execute the syscall */ +2: mov $QEMU_ERESTARTSYS, %eax + + /* code path setting errno */ +1: pop %rbp .cfi_def_cfa_offset 8 .cfi_restore rbp - ret + mov %eax, %edi + jmp safe_syscall_set_errno_tail .cfi_endproc .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/meson.build b/common-user/meson.build new file mode 100644 index 0000000000..ac9de5b9e3 --- /dev/null +++ b/common-user/meson.build @@ -0,0 +1,10 @@ +if not have_user + subdir_done() +endif + +common_user_inc += include_directories('host/' / host_arch) + +user_ss.add(files( + 'safe-syscall.S', + 'safe-syscall-error.c', +)) diff --git a/common-user/safe-syscall-error.c b/common-user/safe-syscall-error.c new file mode 100644 index 0000000000..cf74b504f8 --- /dev/null +++ b/common-user/safe-syscall-error.c @@ -0,0 +1,25 @@ +/* + * safe-syscall-error.c: errno setting fragment + * This is intended to be invoked by safe-syscall.S + * + * Written by Richard Henderson + * Copyright (C) 2021 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "user/safe-syscall.h" + +/* + * This is intended to be invoked via tail-call on the error path + * from the assembly in host/arch/safe-syscall.inc.S. This takes + * care of the host specific addressing of errno. + * Return -1 to finalize the return value for safe_syscall_base. + */ +long safe_syscall_set_errno_tail(int value) +{ + errno = value; + return -1; +} diff --git a/linux-user/safe-syscall.S b/common-user/safe-syscall.S similarity index 91% rename from linux-user/safe-syscall.S rename to common-user/safe-syscall.S index 42ea7c40ba..74f7e35694 100644 --- a/linux-user/safe-syscall.S +++ b/common-user/safe-syscall.S @@ -10,15 +10,12 @@ * See the COPYING file in the top-level directory. */ -#include "hostdep.h" -#include "target_errno_defs.h" +#include "special-errno.h" /* We have the correct host directory on our include path * so that this will pull in the right fragment for the architecture. */ -#ifdef HAVE_SAFE_SYSCALL #include "safe-syscall.inc.S" -#endif /* We must specifically say that we're happy for the stack to not be * executable, otherwise the toolchain will default to assuming our diff --git a/configs/devices/hppa-softmmu/default.mak b/configs/devices/hppa-softmmu/default.mak index b64c5eb3ff..b0364bb88f 100644 --- a/configs/devices/hppa-softmmu/default.mak +++ b/configs/devices/hppa-softmmu/default.mak @@ -6,4 +6,4 @@ # Boards: # -CONFIG_DINO=y +CONFIG_HPPA_B160L=y diff --git a/configs/devices/i386-softmmu/default.mak b/configs/devices/i386-softmmu/default.mak index 94e947653c..b4c920b531 100644 --- a/configs/devices/i386-softmmu/default.mak +++ b/configs/devices/i386-softmmu/default.mak @@ -22,6 +22,7 @@ #CONFIG_TPM_CRB=n #CONFIG_TPM_TIS_ISA=n #CONFIG_VTD=n +#CONFIG_SGX=n # Boards: # diff --git a/configs/devices/loongarch64-softmmu/default.mak b/configs/devices/loongarch64-softmmu/default.mak new file mode 100644 index 0000000000..928bc117ef --- /dev/null +++ b/configs/devices/loongarch64-softmmu/default.mak @@ -0,0 +1,3 @@ +# Default configuration for loongarch64-softmmu + +CONFIG_LOONGARCH_VIRT=y diff --git a/configs/devices/mips-softmmu/common.mak b/configs/devices/mips-softmmu/common.mak index ea78fe7275..416161f833 100644 --- a/configs/devices/mips-softmmu/common.mak +++ b/configs/devices/mips-softmmu/common.mak @@ -7,7 +7,7 @@ CONFIG_ISA_BUS=y CONFIG_PCI=y CONFIG_PCI_DEVICES=y CONFIG_VGA_ISA=y -CONFIG_VGA_ISA_MM=y +CONFIG_VGA_MMIO=y CONFIG_VGA_CIRRUS=y CONFIG_VMWARE_VGA=y CONFIG_SERIAL=y @@ -18,15 +18,11 @@ CONFIG_PCSPK=y CONFIG_PCKBD=y CONFIG_FDC=y CONFIG_ACPI=y -CONFIG_ACPI_X86=y -CONFIG_ACPI_MEMORY_HOTPLUG=y -CONFIG_ACPI_NVDIMM=y -CONFIG_ACPI_CPU_HOTPLUG=y +CONFIG_ACPI_PIIX4=y CONFIG_APM=y CONFIG_I8257=y CONFIG_PIIX4=y CONFIG_IDE_ISA=y -CONFIG_IDE_PIIX=y CONFIG_PFLASH_CFI01=y CONFIG_I8259=y CONFIG_MC146818RTC=y diff --git a/configs/devices/mips64el-softmmu/default.mak b/configs/devices/mips64el-softmmu/default.mak index c610749ac1..d5188f7ea5 100644 --- a/configs/devices/mips64el-softmmu/default.mak +++ b/configs/devices/mips64el-softmmu/default.mak @@ -1,7 +1,6 @@ # Default configuration for mips64el-softmmu include ../mips-softmmu/common.mak -CONFIG_IDE_VIA=y CONFIG_FULOONG=y CONFIG_LOONGSON3V=y CONFIG_ATI_VGA=y diff --git a/configs/devices/or1k-softmmu/default.mak b/configs/devices/or1k-softmmu/default.mak index 168101c39a..89c39e3123 100644 --- a/configs/devices/or1k-softmmu/default.mak +++ b/configs/devices/or1k-softmmu/default.mak @@ -3,3 +3,4 @@ # Boards: # CONFIG_OR1K_SIM=y +CONFIG_OR1K_VIRT=y diff --git a/configs/devices/ppc-softmmu/default.mak b/configs/devices/ppc-softmmu/default.mak index 658a454426..a887f5438b 100644 --- a/configs/devices/ppc-softmmu/default.mak +++ b/configs/devices/ppc-softmmu/default.mak @@ -1,7 +1,8 @@ # Default configuration for ppc-softmmu # For embedded PPCs: -CONFIG_E500=y +CONFIG_E500PLAT=y +CONFIG_MPC8544DS=y CONFIG_PPC405=y CONFIG_PPC440=y CONFIG_VIRTEX=y diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak index 4713253709..db552f1839 100644 --- a/configs/targets/aarch64-linux-user.mak +++ b/configs/targets/aarch64-linux-user.mak @@ -1,5 +1,6 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml TARGET_HAS_BFLT=y +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak index 7703127674..d489e6da83 100644 --- a/configs/targets/aarch64-softmmu.mak +++ b/configs/targets/aarch64-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_NEED_FDT=y diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak index fae831558d..dc78044fb1 100644 --- a/configs/targets/aarch64_be-linux-user.mak +++ b/configs/targets/aarch64_be-linux-user.mak @@ -1,6 +1,7 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_WORDS_BIGENDIAN=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_BIG_ENDIAN=y +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml TARGET_HAS_BFLT=y +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/arm-bsd-user.mak b/configs/targets/arm-bsd-user.mak new file mode 100644 index 0000000000..cb143e6426 --- /dev/null +++ b/configs/targets/arm-bsd-user.mak @@ -0,0 +1,2 @@ +TARGET_ARCH=arm +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml diff --git a/configs/targets/arm-linux-user.mak b/configs/targets/arm-linux-user.mak index e741ffd4d3..7f5d65794c 100644 --- a/configs/targets/arm-linux-user.mak +++ b/configs/targets/arm-linux-user.mak @@ -1,6 +1,7 @@ TARGET_ARCH=arm TARGET_SYSTBL_ABI=common,oabi TARGET_SYSTBL=syscall.tbl -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_HAS_BFLT=y +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/arm-softmmu.mak b/configs/targets/arm-softmmu.mak index 84a98f4818..92c8349b96 100644 --- a/configs/targets/arm-softmmu.mak +++ b/configs/targets/arm-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_NEED_FDT=y diff --git a/configs/targets/armeb-linux-user.mak b/configs/targets/armeb-linux-user.mak index 255e44e8b0..943d0d87bf 100644 --- a/configs/targets/armeb-linux-user.mak +++ b/configs/targets/armeb-linux-user.mak @@ -1,7 +1,8 @@ TARGET_ARCH=arm TARGET_SYSTBL_ABI=common,oabi TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_BIG_ENDIAN=y +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_HAS_BFLT=y +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak index f01e0a7b9e..db873a8796 100644 --- a/configs/targets/hppa-linux-user.mak +++ b/configs/targets/hppa-linux-user.mak @@ -2,4 +2,4 @@ TARGET_ARCH=hppa TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak index e3e71eb21b..44f07b0332 100644 --- a/configs/targets/hppa-softmmu.mak +++ b/configs/targets/hppa-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=hppa TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/i386-softmmu.mak b/configs/targets/i386-softmmu.mak index 5babf71895..6b3c99fc86 100644 --- a/configs/targets/i386-softmmu.mak +++ b/configs/targets/i386-softmmu.mak @@ -1,3 +1,4 @@ TARGET_ARCH=i386 TARGET_SUPPORTS_MTTCG=y +TARGET_NEED_FDT=y TARGET_XML_FILES= gdb-xml/i386-32bit.xml diff --git a/configs/targets/loongarch64-linux-user.mak b/configs/targets/loongarch64-linux-user.mak new file mode 100644 index 0000000000..7d1b964020 --- /dev/null +++ b/configs/targets/loongarch64-linux-user.mak @@ -0,0 +1,3 @@ +# Default configuration for loongarch64-linux-user +TARGET_ARCH=loongarch64 +TARGET_BASE_ARCH=loongarch diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak new file mode 100644 index 0000000000..9abc99056f --- /dev/null +++ b/configs/targets/loongarch64-softmmu.mak @@ -0,0 +1,5 @@ +TARGET_ARCH=loongarch64 +TARGET_BASE_ARCH=loongarch +TARGET_SUPPORTS_MTTCG=y +TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml +TARGET_NEED_FDT=y diff --git a/configs/targets/m68k-linux-user.mak b/configs/targets/m68k-linux-user.mak index 805d16c6ab..579b5d299c 100644 --- a/configs/targets/m68k-linux-user.mak +++ b/configs/targets/m68k-linux-user.mak @@ -1,6 +1,6 @@ TARGET_ARCH=m68k TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml TARGET_HAS_BFLT=y diff --git a/configs/targets/m68k-softmmu.mak b/configs/targets/m68k-softmmu.mak index 5df1a2b7d7..bbcd0bada6 100644 --- a/configs/targets/m68k-softmmu.mak +++ b/configs/targets/m68k-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=m68k -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml diff --git a/configs/targets/microblaze-linux-user.mak b/configs/targets/microblaze-linux-user.mak index 2a25bf2fa3..4249a37f65 100644 --- a/configs/targets/microblaze-linux-user.mak +++ b/configs/targets/microblaze-linux-user.mak @@ -1,5 +1,5 @@ TARGET_ARCH=microblaze TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y diff --git a/configs/targets/microblaze-softmmu.mak b/configs/targets/microblaze-softmmu.mak index 33f2a00402..8385e2d333 100644 --- a/configs/targets/microblaze-softmmu.mak +++ b/configs/targets/microblaze-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=microblaze -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y TARGET_NEED_FDT=y diff --git a/configs/targets/mips-linux-user.mak b/configs/targets/mips-linux-user.mak index 19f5779831..71fa77d464 100644 --- a/configs/targets/mips-linux-user.mak +++ b/configs/targets/mips-linux-user.mak @@ -3,4 +3,4 @@ TARGET_ABI_MIPSO32=y TARGET_SYSTBL_ABI=o32 TARGET_SYSTBL=syscall_o32.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mips-softmmu.mak b/configs/targets/mips-softmmu.mak index 8a49999a47..7787a4d94c 100644 --- a/configs/targets/mips-softmmu.mak +++ b/configs/targets/mips-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=mips TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/mips64-linux-user.mak b/configs/targets/mips64-linux-user.mak index 32fd1acdf2..5a4771f22d 100644 --- a/configs/targets/mips64-linux-user.mak +++ b/configs/targets/mips64-linux-user.mak @@ -4,4 +4,4 @@ TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n64 TARGET_SYSTBL=syscall_n64.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mips64-softmmu.mak b/configs/targets/mips64-softmmu.mak index ece25b9624..568d66650c 100644 --- a/configs/targets/mips64-softmmu.mak +++ b/configs/targets/mips64-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mipsn32-linux-user.mak b/configs/targets/mipsn32-linux-user.mak index b8c2441ad0..1e80b302fc 100644 --- a/configs/targets/mipsn32-linux-user.mak +++ b/configs/targets/mipsn32-linux-user.mak @@ -5,4 +5,4 @@ TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n32 TARGET_SYSTBL=syscall_n32.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/nios2-softmmu.mak b/configs/targets/nios2-softmmu.mak index 9a372f0717..1e93b54cd1 100644 --- a/configs/targets/nios2-softmmu.mak +++ b/configs/targets/nios2-softmmu.mak @@ -1 +1,2 @@ TARGET_ARCH=nios2 +TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/or1k-linux-user.mak b/configs/targets/or1k-linux-user.mak index 1dfb93e46d..39558f77ec 100644 --- a/configs/targets/or1k-linux-user.mak +++ b/configs/targets/or1k-linux-user.mak @@ -1,2 +1,2 @@ TARGET_ARCH=openrisc -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/or1k-softmmu.mak b/configs/targets/or1k-softmmu.mak index 1dfb93e46d..432f855a30 100644 --- a/configs/targets/or1k-softmmu.mak +++ b/configs/targets/or1k-softmmu.mak @@ -1,2 +1,4 @@ TARGET_ARCH=openrisc -TARGET_WORDS_BIGENDIAN=y +TARGET_SUPPORTS_MTTCG=y +TARGET_BIG_ENDIAN=y +TARGET_NEED_FDT=y diff --git a/configs/targets/ppc-linux-user.mak b/configs/targets/ppc-linux-user.mak index ca4187e4aa..cc0439a528 100644 --- a/configs/targets/ppc-linux-user.mak +++ b/configs/targets/ppc-linux-user.mak @@ -1,5 +1,5 @@ TARGET_ARCH=ppc TARGET_SYSTBL_ABI=common,nospu,32 TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml diff --git a/configs/targets/ppc-softmmu.mak b/configs/targets/ppc-softmmu.mak index f4eef1819a..774440108f 100644 --- a/configs/targets/ppc-softmmu.mak +++ b/configs/targets/ppc-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=ppc -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml TARGET_NEED_FDT=y diff --git a/configs/targets/ppc64-linux-user.mak b/configs/targets/ppc64-linux-user.mak index 3133346676..4d81969f4a 100644 --- a/configs/targets/ppc64-linux-user.mak +++ b/configs/targets/ppc64-linux-user.mak @@ -3,5 +3,5 @@ TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc TARGET_SYSTBL_ABI=common,nospu,64 TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml diff --git a/configs/targets/ppc64-softmmu.mak b/configs/targets/ppc64-softmmu.mak index 84fbf46be9..ddf0c39617 100644 --- a/configs/targets/ppc64-softmmu.mak +++ b/configs/targets/ppc64-softmmu.mak @@ -1,6 +1,6 @@ TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml TARGET_NEED_FDT=y diff --git a/configs/targets/ppc64abi32-linux-user.mak b/configs/targets/ppc64abi32-linux-user.mak deleted file mode 100644 index 0945451081..0000000000 --- a/configs/targets/ppc64abi32-linux-user.mak +++ /dev/null @@ -1,8 +0,0 @@ -TARGET_ARCH=ppc64 -TARGET_ABI32=y -TARGET_BASE_ARCH=ppc -TARGET_ABI_DIR=ppc -TARGET_SYSTBL_ABI=common,nospu,32 -TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y -TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml diff --git a/configs/targets/riscv32-linux-user.mak b/configs/targets/riscv32-linux-user.mak index bd2f1fd497..9761618e67 100644 --- a/configs/targets/riscv32-linux-user.mak +++ b/configs/targets/riscv32-linux-user.mak @@ -2,4 +2,5 @@ TARGET_ARCH=riscv32 TARGET_BASE_ARCH=riscv TARGET_ABI_DIR=riscv TARGET_XML_FILES= gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-32bit-virtual.xml +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/riscv64-linux-user.mak b/configs/targets/riscv64-linux-user.mak index 4aca7662ce..cfd1fd382f 100644 --- a/configs/targets/riscv64-linux-user.mak +++ b/configs/targets/riscv64-linux-user.mak @@ -2,4 +2,5 @@ TARGET_ARCH=riscv64 TARGET_BASE_ARCH=riscv TARGET_ABI_DIR=riscv TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml +CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/s390x-linux-user.mak b/configs/targets/s390x-linux-user.mak index 9e31ce6457..e2978248ed 100644 --- a/configs/targets/s390x-linux-user.mak +++ b/configs/targets/s390x-linux-user.mak @@ -1,5 +1,5 @@ TARGET_ARCH=s390x TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml diff --git a/configs/targets/s390x-softmmu.mak b/configs/targets/s390x-softmmu.mak index fd9fbd870d..258b4cf358 100644 --- a/configs/targets/s390x-softmmu.mak +++ b/configs/targets/s390x-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=s390x -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml diff --git a/configs/targets/sh4eb-linux-user.mak b/configs/targets/sh4eb-linux-user.mak index 9b6fb4c1bb..6724165efe 100644 --- a/configs/targets/sh4eb-linux-user.mak +++ b/configs/targets/sh4eb-linux-user.mak @@ -2,5 +2,5 @@ TARGET_ARCH=sh4 TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y diff --git a/configs/targets/sh4eb-softmmu.mak b/configs/targets/sh4eb-softmmu.mak index 382e9a80f8..dc8b30bf7a 100644 --- a/configs/targets/sh4eb-softmmu.mak +++ b/configs/targets/sh4eb-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=sh4 TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak index 53dc7aaed5..00e7bc1f07 100644 --- a/configs/targets/sparc-linux-user.mak +++ b/configs/targets/sparc-linux-user.mak @@ -2,4 +2,4 @@ TARGET_ARCH=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak index 9ba3d7b07f..a849190f01 100644 --- a/configs/targets/sparc-softmmu.mak +++ b/configs/targets/sparc-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=sparc TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak index e4c51df3dc..a65c0951a1 100644 --- a/configs/targets/sparc32plus-linux-user.mak +++ b/configs/targets/sparc32plus-linux-user.mak @@ -5,4 +5,4 @@ TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak index 9d23ab4a26..20fcb93fa4 100644 --- a/configs/targets/sparc64-linux-user.mak +++ b/configs/targets/sparc64-linux-user.mak @@ -4,4 +4,4 @@ TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak index 8dd3217800..c626ac3eae 100644 --- a/configs/targets/sparc64-softmmu.mak +++ b/configs/targets/sparc64-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc TARGET_ALIGNED_ONLY=y -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y diff --git a/configs/targets/x86_64-softmmu.mak b/configs/targets/x86_64-softmmu.mak index 75e42bc840..197817c943 100644 --- a/configs/targets/x86_64-softmmu.mak +++ b/configs/targets/x86_64-softmmu.mak @@ -1,4 +1,5 @@ TARGET_ARCH=x86_64 TARGET_BASE_ARCH=i386 TARGET_SUPPORTS_MTTCG=y +TARGET_NEED_FDT=y TARGET_XML_FILES= gdb-xml/i386-64bit.xml diff --git a/configs/targets/xtensaeb-linux-user.mak b/configs/targets/xtensaeb-linux-user.mak index 1ea0f1ba91..bce2d1d65d 100644 --- a/configs/targets/xtensaeb-linux-user.mak +++ b/configs/targets/xtensaeb-linux-user.mak @@ -1,5 +1,5 @@ TARGET_ARCH=xtensa TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y diff --git a/configs/targets/xtensaeb-softmmu.mak b/configs/targets/xtensaeb-softmmu.mak index 405cf5acbb..b02e11b820 100644 --- a/configs/targets/xtensaeb-softmmu.mak +++ b/configs/targets/xtensaeb-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=xtensa -TARGET_WORDS_BIGENDIAN=y +TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/configure b/configure index 9a4cd7f789..706200c198 100755 --- a/configure +++ b/configure @@ -57,7 +57,7 @@ GNUmakefile: ; EOF cd build - exec $source_path/configure "$@" + exec "$source_path/configure" "$@" fi # Temporary directory used for files created while @@ -67,8 +67,7 @@ fi # it when configure exits.) TMPDIR1="config-temp" rm -rf "${TMPDIR1}" -mkdir -p "${TMPDIR1}" -if [ $? -ne 0 ]; then +if ! mkdir -p "${TMPDIR1}"; then echo "ERROR: failed to create temporary directory" exit 1 fi @@ -76,9 +75,8 @@ fi TMPB="qemu-conf" TMPC="${TMPDIR1}/${TMPB}.c" TMPO="${TMPDIR1}/${TMPB}.o" -TMPCXX="${TMPDIR1}/${TMPB}.cxx" +TMPM="${TMPDIR1}/${TMPB}.m" TMPE="${TMPDIR1}/${TMPB}.exe" -TMPTXT="${TMPDIR1}/${TMPB}.txt" rm -f config.log @@ -109,6 +107,20 @@ error_exit() { } do_compiler() { + # Run the compiler, capturing its output to the log. First argument + # is compiler binary to execute. + compiler="$1" + shift + if test -n "$BASH_VERSION"; then eval ' + echo >>config.log " +funcs: ${FUNCNAME[*]} +lines: ${BASH_LINENO[*]}" + '; fi + echo $compiler "$@" >> config.log + $compiler "$@" >> config.log 2>&1 || return $? +} + +do_compiler_werror() { # Run the compiler, capturing its output to the log. First argument # is compiler binary to execute. compiler="$1" @@ -142,11 +154,11 @@ lines: ${BASH_LINENO[*]}" } do_cc() { - do_compiler "$cc" "$@" + do_compiler_werror "$cc" $CPU_CFLAGS "$@" } -do_cxx() { - do_compiler "$cxx" "$@" +do_objc() { + do_compiler_werror "$objcc" $CPU_CFLAGS "$@" } # Append $2 to the variable named $1, with space separation @@ -154,34 +166,16 @@ add_to() { eval $1=\${$1:+\"\$$1 \"}\$2 } -update_cxxflags() { - # Set QEMU_CXXFLAGS from QEMU_CFLAGS by filtering out those - # options which some versions of GCC's C++ compiler complain about - # because they only make sense for C programs. - QEMU_CXXFLAGS="$QEMU_CXXFLAGS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" - CONFIGURE_CXXFLAGS=$(echo "$CONFIGURE_CFLAGS" | sed s/-std=gnu11//) - for arg in $QEMU_CFLAGS; do - case $arg in - -Wstrict-prototypes|-Wmissing-prototypes|-Wnested-externs|\ - -Wold-style-declaration|-Wold-style-definition|-Wredundant-decls) - ;; - *) - QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }$arg - ;; - esac - done -} - compile_object() { local_cflags="$1" - do_cc $CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC + do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC } compile_prog() { local_cflags="$1" local_ldflags="$2" - do_cc $CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC \ - $LDFLAGS $CONFIGURE_LDFLAGS $QEMU_LDFLAGS $local_ldflags + do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC \ + $LDFLAGS $EXTRA_LDFLAGS $CONFIGURE_LDFLAGS $QEMU_LDFLAGS $local_ldflags } # symbolically link $1 to $2. Portable version of "ln -sf". @@ -216,18 +210,10 @@ version_ge () { done } -have_backend () { - echo "$trace_backends" | grep "$1" >/dev/null -} - glob() { eval test -z '"${1#'"$2"'}"' } -ld_has() { - $ld --help 2>/dev/null | grep ".$1" >/dev/null 2>&1 -} - if printf %s\\n "$source_path" "$PWD" | grep -q "[[:space:]:]"; then error_exit "main directory cannot contain spaces nor colons" @@ -235,20 +221,10 @@ fi # default parameters cpu="" -iasl="iasl" -interp_prefix="/usr/gnemul/qemu-%M" static="no" cross_compile="no" cross_prefix="" -audio_drv_list="" -block_drv_rw_whitelist="" -block_drv_ro_whitelist="" -block_drv_whitelist_tools="no" host_cc="cc" -audio_win_int="" -libs_qga="" -debug_info="yes" -lto="false" stack_protector="" safe_stack="" use_containers="yes" @@ -291,166 +267,40 @@ for opt do esac done -auto="disabled" +EXTRA_CFLAGS="" +EXTRA_CXXFLAGS="" +EXTRA_OBJCFLAGS="" +EXTRA_LDFLAGS="" -brlapi="$auto" -curl="$auto" -iconv="$auto" -curses="$auto" -docs="$auto" -fdt="$auto" -netmap="no" -sdl="enabled" -sdl_image="$auto" -coreaudio="auto" -virtiofsd="$auto" -virtfs="$auto" -libudev="$auto" -mpath="$auto" -vnc="disabled" -sparse="$auto" -vde="$default_feature" -vnc_sasl="$auto" -vnc_jpeg="$auto" -vnc_png="$auto" -xkbcommon="$auto" -xen="$default_feature" -xen_ctrl_version="$default_feature" -xen_pci_passthrough="$auto" -linux_aio="$default_feature" -linux_io_uring="$auto" -cap_ng="$auto" -attr="$auto" -xfs="$default_feature" -tcg="enabled" -membarrier="$default_feature" -vhost_kernel="$default_feature" -vhost_net="$default_feature" -vhost_crypto="$default_feature" -vhost_scsi="$default_feature" -vhost_vsock="$default_feature" -vhost_user="no" -vhost_user_blk_server="$auto" -vhost_user_fs="$default_feature" -vhost_vdpa="$default_feature" -bpf="$auto" -kvm="$auto" -hax="$auto" -hvf="$auto" -whpx="$auto" -nvmm="$auto" -rdma="$default_feature" -pvrdma="$default_feature" -gprof="no" debug_tcg="no" -debug="no" -renderdoc="$auto" sanitizers="no" tsan="no" -fortify_source="$default_feature" -strip_opt="yes" -tcg_interpreter="false" -bigendian="no" -mingw32="no" -gcov="no" +fortify_source="yes" EXESUF="" -HOST_DSOSUF=".so" modules="no" -module_upgrades="no" prefix="/usr/local" qemu_suffix="qemu" -slirp="internal" -oss_lib="" -bsd="no" -linux="no" -solaris="no" -profiler="no" -cocoa="$auto" softmmu="yes" -linux_user="no" -bsd_user="no" -blobs="true" -pkgversion="" +linux_user="" +bsd_user="" pie="" -qom_cast_debug="yes" -trace_backends="nop" -trace_file="trace" -spice="$default_feature" -spice_protocol="$auto" -rbd="$auto" -smartcard="$auto" -u2f="$auto" -libusb="$auto" -usb_redir="$auto" -opengl="yes" -cpuid_h="no" -avx2_opt="$default_feature" -capstone="$auto" -lzo="$auto" -snappy="$auto" -bzip2="$auto" -lzfse="$auto" -zstd="$auto" -guest_agent="$default_feature" -guest_agent_with_vss="no" -guest_agent_ntddscsi="no" -guest_agent_msi="$auto" -vss_win32_sdk="$default_feature" -win_sdk="no" -want_tools="$default_feature" -libiscsi="$auto" -libnfs="$auto" coroutine="" -coroutine_pool="yes" -debug_stack_usage="no" -crypto_afalg="no" -cfi="false" -cfi_debug="false" -seccomp="$auto" -glusterfs="$auto" -gtk="$auto" -tls_priority="NORMAL" -gnutls="auto" -nettle="auto" -gcrypt="auto" -auth_pam="auto" -vte="auto" -virglrenderer="auto" -tpm="$default_feature" -libssh="$default_feature" -live_block_migration=${default_feature:-yes} -numa="$default_feature" -tcmalloc="no" -jemalloc="no" -replication=${default_feature:-yes} -bochs=${default_feature:-yes} -cloop=${default_feature:-yes} -dmg=${default_feature:-yes} -qcow1=${default_feature:-yes} -vdi=${default_feature:-yes} -vvfat=${default_feature:-yes} -qed=${default_feature:-yes} -parallels=${default_feature:-yes} -libxml2="$auto" -debug_mutex="no" -libpmem="$auto" -default_devices="true" plugins="$default_feature" -fuzzing="no" -rng_none="no" -secret_keyring="$default_feature" -libdaxctl="$auto" meson="" ninja="" +bindir="bin" skip_meson=no -gettext="$auto" -fuse="$auto" -fuse_lseek="$auto" -multiprocess="$auto" -slirp_smbd="$default_feature" +vfio_user_server="disabled" -malloc_trim="auto" -gio="$default_feature" +# The following Meson options are handled manually (still they +# are included in the automatically generated help message) + +# 1. Track which submodules are needed +fdt="auto" + +# 2. Automatically enable/disable other options +tcg="auto" +cfi="false" # parse CC options second for opt do @@ -465,28 +315,29 @@ for opt do ;; --cpu=*) cpu="$optarg" ;; - --extra-cflags=*) QEMU_CFLAGS="$QEMU_CFLAGS $optarg" - QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" + --extra-cflags=*) + EXTRA_CFLAGS="$EXTRA_CFLAGS $optarg" + EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS $optarg" + EXTRA_OBJCFLAGS="$EXTRA_OBJCFLAGS $optarg" + ;; + --extra-cxxflags=*) EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS $optarg" ;; - --extra-cxxflags=*) QEMU_CXXFLAGS="$QEMU_CXXFLAGS $optarg" + --extra-objcflags=*) EXTRA_OBJCFLAGS="$EXTRA_OBJCFLAGS $optarg" ;; - --extra-ldflags=*) QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" - EXTRA_LDFLAGS="$optarg" - ;; - --enable-debug-info) debug_info="yes" - ;; - --disable-debug-info) debug_info="no" + --extra-ldflags=*) EXTRA_LDFLAGS="$EXTRA_LDFLAGS $optarg" ;; --cross-cc-*[!a-zA-Z0-9_-]*=*) error_exit "Passed bad --cross-cc-FOO option" ;; - --cross-cc-cflags-*) cc_arch=${opt#--cross-cc-flags-}; cc_arch=${cc_arch%%=*} + --cross-cc-cflags-*) cc_arch=${opt#--cross-cc-cflags-}; cc_arch=${cc_arch%%=*} eval "cross_cc_cflags_${cc_arch}=\$optarg" - cross_cc_vars="$cross_cc_vars cross_cc_cflags_${cc_arch}" ;; --cross-cc-*) cc_arch=${opt#--cross-cc-}; cc_arch=${cc_arch%%=*} - cc_archs="$cc_archs $cc_arch" eval "cross_cc_${cc_arch}=\$optarg" - cross_cc_vars="$cross_cc_vars cross_cc_${cc_arch}" + ;; + --cross-prefix-*[!a-zA-Z0-9_-]*=*) error_exit "Passed bad --cross-prefix-FOO option" + ;; + --cross-prefix-*) cc_arch=${opt#--cross-prefix-}; cc_arch=${cc_arch%%=*} + eval "cross_prefix_${cc_arch}=\$optarg" ;; esac done @@ -513,12 +364,13 @@ fi ar="${AR-${cross_prefix}ar}" as="${AS-${cross_prefix}as}" ccas="${CCAS-$cc}" -cpp="${CPP-$cc -E}" objcopy="${OBJCOPY-${cross_prefix}objcopy}" ld="${LD-${cross_prefix}ld}" ranlib="${RANLIB-${cross_prefix}ranlib}" nm="${NM-${cross_prefix}nm}" +smbd="$SMBD" strip="${STRIP-${cross_prefix}strip}" +widl="${WIDL-${cross_prefix}widl}" windres="${WINDRES-${cross_prefix}windres}" pkg_config_exe="${PKG_CONFIG-${cross_prefix}pkg-config}" query_pkg_config() { @@ -532,11 +384,13 @@ sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}" # left shift of signed integers is well defined and has the expected # 2s-complement style results. (Both clang and gcc agree that it # provides these semantics.) -QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv $QEMU_CFLAGS" +QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv" QEMU_CFLAGS="-Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" +QEMU_LDFLAGS= + # Flags that are needed during configure but later taken care of by Meson CONFIGURE_CFLAGS="-std=gnu11 -Wall" CONFIGURE_LDFLAGS= @@ -566,55 +420,94 @@ int main(void) { return 0; } EOF } -write_c_fuzzer_skeleton() { - cat > $TMPC < -#include -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } -EOF -} - if check_define __linux__ ; then - targetos="Linux" + targetos=linux elif check_define _WIN32 ; then - targetos='MINGW32' + targetos=windows elif check_define __OpenBSD__ ; then - targetos='OpenBSD' + targetos=openbsd elif check_define __sun__ ; then - targetos='SunOS' + targetos=sunos elif check_define __HAIKU__ ; then - targetos='Haiku' + targetos=haiku elif check_define __FreeBSD__ ; then - targetos='FreeBSD' + targetos=freebsd elif check_define __FreeBSD_kernel__ && check_define __GLIBC__; then - targetos='GNU/kFreeBSD' + targetos=gnu/kfreebsd elif check_define __DragonFly__ ; then - targetos='DragonFly' + targetos=dragonfly elif check_define __NetBSD__; then - targetos='NetBSD' + targetos=netbsd elif check_define __APPLE__; then - targetos='Darwin' + targetos=darwin else # This is a fatal error, but don't report it yet, because we # might be going to just print the --help text, or it might # be the result of a missing compiler. - targetos='bogus' + targetos=bogus fi -# Some host OSes need non-standard checks for which CPU to use. -# Note that these checks are broken for cross-compilation: if you're -# cross-compiling to one of these OSes then you'll need to specify -# the correct CPU with the --cpu option. +# OS specific + +mingw32="no" +bsd="no" +linux="no" +solaris="no" case $targetos in -Darwin) - HOST_DSOSUF=".dylib" - ;; -SunOS) +windows) + mingw32="yes" + plugins="no" + pie="no" +;; +gnu/kfreebsd) + bsd="yes" +;; +freebsd) + bsd="yes" + make="${MAKE-gmake}" + # needed for kinfo_getvmmap(3) in libutil.h +;; +dragonfly) + bsd="yes" + make="${MAKE-gmake}" +;; +netbsd) + bsd="yes" + make="${MAKE-gmake}" +;; +openbsd) + bsd="yes" + make="${MAKE-gmake}" +;; +darwin) + bsd="yes" + darwin="yes" + # Disable attempts to use ObjectiveC features in os/object.h since they + # won't work when we're compiling with gcc as a C compiler. + QEMU_CFLAGS="-DOS_OBJECT_USE_OBJC=0 $QEMU_CFLAGS" +;; +sunos) + solaris="yes" + make="${MAKE-gmake}" +# needed for CMSG_ macros in sys/socket.h + QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS" +# needed for TIOCWIN* defines in termios.h + QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS" # $(uname -m) returns i86pc even on an x86_64 box, so default based on isainfo + # Note that this check is broken for cross-compilation: if you're + # cross-compiling to one of these OSes then you'll need to specify + # the correct CPU with the --cpu option. if test -z "$cpu" && test "$(isainfo -k)" = "amd64"; then cpu="x86_64" fi +;; +haiku) + pie="no" + QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS -D_BSD_SOURCE -fPIC $QEMU_CFLAGS" +;; +linux) + linux="yes" +;; esac if test ! -z "$cpu" ; then @@ -653,143 +546,58 @@ elif check_define __s390__ ; then cpu="s390" fi elif check_define __riscv ; then - if check_define _LP64 ; then - cpu="riscv64" - else - cpu="riscv32" - fi + cpu="riscv" elif check_define __arm__ ; then cpu="arm" elif check_define __aarch64__ ; then cpu="aarch64" +elif check_define __loongarch64 ; then + cpu="loongarch64" else cpu=$(uname -m) fi -ARCH= -# Normalise host CPU name and set ARCH. +# Normalise host CPU name, set multilib cflags # Note that this case should only have supported host CPUs, not guests. case "$cpu" in - ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64) - ;; - ppc64le) - ARCH="ppc64" - ;; + armv*b|armv*l|arm) + cpu="arm" ;; + i386|i486|i586|i686|i86pc|BePC) cpu="i386" - ;; + CPU_CFLAGS="-m32" ;; + x32) + cpu="x86_64" + CPU_CFLAGS="-mx32" ;; x86_64|amd64) cpu="x86_64" - ;; - armv*b|armv*l|arm) - cpu="arm" - ;; - aarch64) - cpu="aarch64" - ;; + # ??? Only extremely old AMD cpus do not have cmpxchg16b. + # If we truly care, we should simply detect this case at + # runtime and generate the fallback to serial emulation. + CPU_CFLAGS="-m64 -mcx16" ;; + mips*) - cpu="mips" - ;; + cpu="mips" ;; + + ppc) + CPU_CFLAGS="-m32" ;; + ppc64) + CPU_CFLAGS="-m64 -mbig-endian" ;; + ppc64le) + cpu="ppc64" + CPU_CFLAGS="-m64 -mlittle-endian" ;; + + s390) + CPU_CFLAGS="-m31" ;; + s390x) + CPU_CFLAGS="-m64" ;; + sparc|sun4[cdmuv]) cpu="sparc" - ;; - *) - # This will result in either an error or falling back to TCI later - ARCH=unknown - ;; + CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" ;; + sparc64) + CPU_CFLAGS="-m64 -mcpu=ultrasparc" ;; esac -if test -z "$ARCH"; then - ARCH="$cpu" -fi - -# OS specific - -case $targetos in -MINGW32*) - mingw32="yes" - audio_possible_drivers="dsound sdl" - if check_include dsound.h; then - audio_drv_list="dsound" - else - audio_drv_list="" - fi - supported_os="yes" - plugins="no" - pie="no" -;; -GNU/kFreeBSD) - bsd="yes" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" -;; -FreeBSD) - bsd="yes" - make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" - # needed for kinfo_getvmmap(3) in libutil.h - netmap="" # enable netmap autodetect -;; -DragonFly) - bsd="yes" - make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" -;; -NetBSD) - bsd="yes" - make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl" - oss_lib="-lossaudio" -;; -OpenBSD) - bsd="yes" - make="${MAKE-gmake}" - audio_drv_list="try-sdl" - audio_possible_drivers="sdl" -;; -Darwin) - bsd="yes" - darwin="yes" - audio_drv_list="try-coreaudio try-sdl" - audio_possible_drivers="coreaudio sdl" - # Disable attempts to use ObjectiveC features in os/object.h since they - # won't work when we're compiling with gcc as a C compiler. - QEMU_CFLAGS="-DOS_OBJECT_USE_OBJC=0 $QEMU_CFLAGS" -;; -SunOS) - solaris="yes" - make="${MAKE-gmake}" - smbd="${SMBD-/usr/sfw/sbin/smbd}" - if test -f /usr/include/sys/soundcard.h ; then - audio_drv_list="oss try-sdl" - fi - audio_possible_drivers="oss sdl" -# needed for CMSG_ macros in sys/socket.h - QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS" -# needed for TIOCWIN* defines in termios.h - QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS" -;; -Haiku) - haiku="yes" - pie="no" - QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS -D_BSD_SOURCE -fPIC $QEMU_CFLAGS" -;; -Linux) - audio_drv_list="try-pa oss" - audio_possible_drivers="oss alsa sdl pa" - linux="yes" - linux_user="yes" - vhost_user=${default_feature:-yes} -;; -esac - -if [ "$bsd" = "yes" ] ; then - if [ "$darwin" != "yes" ] ; then - bsd_user="yes" - fi -fi : ${make=${MAKE-make}} @@ -828,17 +636,47 @@ fi if test "$mingw32" = "yes" ; then EXESUF=".exe" - HOST_DSOSUF=".dll" # MinGW needs -mthreads for TLS and macro _MT. CONFIGURE_CFLAGS="-mthreads $CONFIGURE_CFLAGS" write_c_skeleton; prefix="/qemu" + bindir="" qemu_suffix="" - libs_qga="-lws2_32 -lwinmm -lpowrprof -lwtsapi32 -lwininet -liphlpapi -lnetapi32 $libs_qga" fi werror="" +meson_option_build_array() { + printf '[' + (if test "$targetos" = windows; then + IFS=\; + else + IFS=: + fi + for e in $1; do + printf '"""' + # backslash escape any '\' and '"' characters + printf "%s" "$e" | sed -e 's/\([\"]\)/\\\1/g' + printf '""",' + done) + printf ']\n' +} + +. "$source_path/scripts/meson-buildoptions.sh" + +meson_options= +meson_option_add() { + meson_options="$meson_options $(quote_sh "$1")" +} +meson_option_parse() { + meson_options="$meson_options $(_meson_option_parse "$@")" + if test $? -eq 1; then + echo "ERROR: unknown option $1" + echo "Try '$0 --help' for more information" + exit 1 + fi +} + for opt do optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in @@ -848,8 +686,6 @@ for opt do ;; --prefix=*) prefix="$optarg" ;; - --interp-prefix=*) interp_prefix="$optarg" - ;; --cross-prefix=*) ;; --cc=*) @@ -858,8 +694,6 @@ for opt do ;; --cxx=*) ;; - --iasl=*) iasl="$optarg" - ;; --objcc=*) objcc="$optarg" ;; --make=*) make="$optarg" @@ -868,8 +702,6 @@ for opt do ;; --python=*) python="$optarg" ; explicit_python=yes ;; - --sphinx-build=*) sphinx_build="$optarg" - ;; --skip-meson) skip_meson=yes ;; --meson=*) meson="$optarg" @@ -882,24 +714,24 @@ for opt do ;; --extra-cxxflags=*) ;; + --extra-objcflags=*) + ;; --extra-ldflags=*) ;; - --enable-debug-info) - ;; - --disable-debug-info) - ;; --cross-cc-*) ;; + --cross-prefix-*) + ;; + --enable-debug-info) meson_option_add -Ddebug=true + ;; + --disable-debug-info) meson_option_add -Ddebug=false + ;; --enable-modules) modules="yes" ;; --disable-modules) modules="no" ;; - --disable-module-upgrades) module_upgrades="no" - ;; - --enable-module-upgrades) module_upgrades="yes" - ;; --cpu=*) ;; --target-list=*) target_list="$optarg" @@ -912,16 +744,9 @@ for opt do error_exit "Can't mix --target-list-exclude with --target-list" fi ;; - --enable-trace-backends=*) trace_backends="$optarg" + --with-default-devices) meson_option_add -Ddefault_devices=true ;; - # XXX: backwards compatibility - --enable-trace-backend=*) trace_backends="$optarg" - ;; - --with-trace-file=*) trace_file="$optarg" - ;; - --with-default-devices) default_devices="true" - ;; - --without-default-devices) default_devices="false" + --without-default-devices) meson_option_add -Ddefault_devices=false ;; --with-devices-*[!a-zA-Z0-9_-]*=*) error_exit "Passed bad --with-devices-FOO option" ;; @@ -937,38 +762,14 @@ for opt do ;; --without-default-features) # processed above ;; - --enable-gprof) gprof="yes" - ;; - --enable-gcov) gcov="yes" - ;; --static) static="yes" QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS" ;; - --mandir=*) mandir="$optarg" - ;; --bindir=*) bindir="$optarg" ;; - --libdir=*) libdir="$optarg" - ;; - --libexecdir=*) libexecdir="$optarg" - ;; - --includedir=*) includedir="$optarg" - ;; - --datadir=*) datadir="$optarg" - ;; --with-suffix=*) qemu_suffix="$optarg" ;; - --docdir=*) docdir="$optarg" - ;; - --localedir=*) localedir="$optarg" - ;; - --sysconfdir=*) sysconfdir="$optarg" - ;; - --localstatedir=*) local_statedir="$optarg" - ;; - --firmwarepath=*) firmwarepath="$optarg" - ;; --host=*|--build=*|\ --disable-dependency-tracking|\ --sbindir=*|--sharedstatedir=*|\ @@ -979,54 +780,6 @@ for opt do # configure to be used by RPM and similar macros that set # lots of directory switches by default. ;; - --disable-sdl) sdl="disabled" - ;; - --enable-sdl) sdl="enabled" - ;; - --disable-sdl-image) sdl_image="disabled" - ;; - --enable-sdl-image) sdl_image="enabled" - ;; - --disable-qom-cast-debug) qom_cast_debug="no" - ;; - --enable-qom-cast-debug) qom_cast_debug="yes" - ;; - --disable-virtfs) virtfs="disabled" - ;; - --enable-virtfs) virtfs="enabled" - ;; - --disable-libudev) libudev="disabled" - ;; - --enable-libudev) libudev="enabled" - ;; - --disable-virtiofsd) virtiofsd="disabled" - ;; - --enable-virtiofsd) virtiofsd="enabled" - ;; - --disable-mpath) mpath="disabled" - ;; - --enable-mpath) mpath="enabled" - ;; - --disable-vnc) vnc="disabled" - ;; - --enable-vnc) vnc="enabled" - ;; - --disable-gettext) gettext="disabled" - ;; - --enable-gettext) gettext="enabled" - ;; - --oss-lib=*) oss_lib="$optarg" - ;; - --audio-drv-list=*) audio_drv_list="$optarg" - ;; - --block-drv-rw-whitelist=*|--block-drv-whitelist=*) block_drv_rw_whitelist=$(echo "$optarg" | sed -e 's/,/ /g') - ;; - --block-drv-ro-whitelist=*) block_drv_ro_whitelist=$(echo "$optarg" | sed -e 's/,/ /g') - ;; - --enable-block-drv-whitelist-in-tools) block_drv_whitelist_tools="yes" - ;; - --disable-block-drv-whitelist-in-tools) block_drv_whitelist_tools="no" - ;; --enable-debug-tcg) debug_tcg="yes" ;; --disable-debug-tcg) debug_tcg="no" @@ -1034,9 +787,8 @@ for opt do --enable-debug) # Enable debugging options that aren't excessively noisy debug_tcg="yes" - debug_mutex="yes" - debug="yes" - strip_opt="no" + meson_option_parse --enable-debug-mutex "" + meson_option_add -Doptimization=0 fortify_source="no" ;; --enable-sanitizers) sanitizers="yes" @@ -1047,123 +799,15 @@ for opt do ;; --disable-tsan) tsan="no" ;; - --enable-sparse) sparse="enabled" - ;; - --disable-sparse) sparse="disabled" - ;; --disable-fortify-source) fortify_source="no" ;; --enable-fortify-source) fortify_source="yes" ;; - --disable-strip) strip_opt="no" - ;; - --disable-vnc-sasl) vnc_sasl="disabled" - ;; - --enable-vnc-sasl) vnc_sasl="enabled" - ;; - --disable-vnc-jpeg) vnc_jpeg="disabled" - ;; - --enable-vnc-jpeg) vnc_jpeg="enabled" - ;; - --disable-vnc-png) vnc_png="disabled" - ;; - --enable-vnc-png) vnc_png="enabled" - ;; - --disable-slirp) slirp="disabled" - ;; - --enable-slirp) slirp="enabled" - ;; - --enable-slirp=git) slirp="internal" - ;; - --enable-slirp=system) slirp="system" - ;; - --disable-vde) vde="no" - ;; - --enable-vde) vde="yes" - ;; - --disable-netmap) netmap="no" - ;; - --enable-netmap) netmap="yes" - ;; - --disable-xen) xen="disabled" - ;; - --enable-xen) xen="enabled" - ;; - --disable-xen-pci-passthrough) xen_pci_passthrough="disabled" - ;; - --enable-xen-pci-passthrough) xen_pci_passthrough="enabled" - ;; - --disable-brlapi) brlapi="disabled" - ;; - --enable-brlapi) brlapi="enabled" - ;; - --disable-kvm) kvm="disabled" - ;; - --enable-kvm) kvm="enabled" - ;; - --disable-hax) hax="disabled" - ;; - --enable-hax) hax="enabled" - ;; - --disable-hvf) hvf="disabled" - ;; - --enable-hvf) hvf="enabled" - ;; - --disable-nvmm) nvmm="disabled" - ;; - --enable-nvmm) nvmm="enabled" - ;; - --disable-renderdoc) renderdoc="disabled" - ;; - --enable-renderdoc) renderdoc="enabled" - ;; - --disable-whpx) whpx="disabled" - ;; - --enable-whpx) whpx="enabled" - ;; - --disable-tcg-interpreter) tcg_interpreter="false" - ;; - --enable-tcg-interpreter) tcg_interpreter="true" - ;; - --disable-cap-ng) cap_ng="disabled" - ;; - --enable-cap-ng) cap_ng="enabled" - ;; --disable-tcg) tcg="disabled" plugins="no" ;; --enable-tcg) tcg="enabled" ;; - --disable-malloc-trim) malloc_trim="disabled" - ;; - --enable-malloc-trim) malloc_trim="enabled" - ;; - --disable-spice) spice="no" - ;; - --enable-spice) - spice_protocol="yes" - spice="yes" - ;; - --disable-spice-protocol) - spice_protocol="no" - spice="no" - ;; - --enable-spice-protocol) spice_protocol="yes" - ;; - --disable-libiscsi) libiscsi="disabled" - ;; - --enable-libiscsi) libiscsi="enabled" - ;; - --disable-libnfs) libnfs="disabled" - ;; - --enable-libnfs) libnfs="enabled" - ;; - --enable-profiler) profiler="yes" - ;; - --disable-cocoa) cocoa="disabled" - ;; - --enable-cocoa) cocoa="enabled" - ;; --disable-system) softmmu="no" ;; --enable-system) softmmu="yes" @@ -1189,10 +833,6 @@ for opt do ;; --disable-werror) werror="no" ;; - --enable-lto) lto="true" - ;; - --disable-lto) lto="false" - ;; --enable-stack-protector) stack_protector="yes" ;; --disable-stack-protector) stack_protector="no" @@ -1203,189 +843,22 @@ for opt do ;; --enable-cfi) cfi="true"; - lto="true"; + meson_option_add -Db_lto=true ;; --disable-cfi) cfi="false" ;; - --enable-cfi-debug) cfi_debug="true" - ;; - --disable-cfi-debug) cfi_debug="false" - ;; - --disable-curses) curses="disabled" - ;; - --enable-curses) curses="enabled" - ;; - --disable-iconv) iconv="disabled" - ;; - --enable-iconv) iconv="enabled" - ;; - --disable-curl) curl="disabled" - ;; - --enable-curl) curl="enabled" - ;; --disable-fdt) fdt="disabled" ;; --enable-fdt) fdt="enabled" ;; --enable-fdt=git) fdt="internal" ;; - --enable-fdt=system) fdt="system" - ;; - --disable-linux-aio) linux_aio="no" - ;; - --enable-linux-aio) linux_aio="yes" - ;; - --disable-linux-io-uring) linux_io_uring="disabled" - ;; - --enable-linux-io-uring) linux_io_uring="enabled" - ;; - --disable-attr) attr="disabled" - ;; - --enable-attr) attr="enabled" - ;; - --disable-membarrier) membarrier="no" - ;; - --enable-membarrier) membarrier="yes" - ;; - --disable-bpf) bpf="disabled" - ;; - --enable-bpf) bpf="enabled" - ;; - --disable-blobs) blobs="false" - ;; - --with-pkgversion=*) pkgversion="$optarg" + --enable-fdt=*) fdt="$optarg" ;; --with-coroutine=*) coroutine="$optarg" ;; - --disable-coroutine-pool) coroutine_pool="no" - ;; - --enable-coroutine-pool) coroutine_pool="yes" - ;; - --enable-debug-stack-usage) debug_stack_usage="yes" - ;; - --enable-crypto-afalg) crypto_afalg="yes" - ;; - --disable-crypto-afalg) crypto_afalg="no" - ;; - --disable-docs) docs="disabled" - ;; - --enable-docs) docs="enabled" - ;; - --disable-vhost-net) vhost_net="no" - ;; - --enable-vhost-net) vhost_net="yes" - ;; - --disable-vhost-crypto) vhost_crypto="no" - ;; - --enable-vhost-crypto) vhost_crypto="yes" - ;; - --disable-vhost-scsi) vhost_scsi="no" - ;; - --enable-vhost-scsi) vhost_scsi="yes" - ;; - --disable-vhost-vsock) vhost_vsock="no" - ;; - --enable-vhost-vsock) vhost_vsock="yes" - ;; - --disable-vhost-user-blk-server) vhost_user_blk_server="disabled" - ;; - --enable-vhost-user-blk-server) vhost_user_blk_server="enabled" - ;; - --disable-vhost-user-fs) vhost_user_fs="no" - ;; - --enable-vhost-user-fs) vhost_user_fs="yes" - ;; - --disable-opengl) opengl="no" - ;; - --enable-opengl) opengl="yes" - ;; - --disable-rbd) rbd="disabled" - ;; - --enable-rbd) rbd="enabled" - ;; - --disable-xfsctl) xfs="no" - ;; - --enable-xfsctl) xfs="yes" - ;; - --disable-smartcard) smartcard="disabled" - ;; - --enable-smartcard) smartcard="enabled" - ;; - --disable-u2f) u2f="disabled" - ;; - --enable-u2f) u2f="enabled" - ;; - --disable-libusb) libusb="disabled" - ;; - --enable-libusb) libusb="enabled" - ;; - --disable-usb-redir) usb_redir="disabled" - ;; - --enable-usb-redir) usb_redir="enabled" - ;; --disable-zlib-test) ;; - --disable-lzo) lzo="disabled" - ;; - --enable-lzo) lzo="enabled" - ;; - --disable-snappy) snappy="disabled" - ;; - --enable-snappy) snappy="enabled" - ;; - --disable-bzip2) bzip2="disabled" - ;; - --enable-bzip2) bzip2="enabled" - ;; - --enable-lzfse) lzfse="enabled" - ;; - --disable-lzfse) lzfse="disabled" - ;; - --disable-zstd) zstd="disabled" - ;; - --enable-zstd) zstd="enabled" - ;; - --enable-guest-agent) guest_agent="yes" - ;; - --disable-guest-agent) guest_agent="no" - ;; - --enable-guest-agent-msi) guest_agent_msi="enabled" - ;; - --disable-guest-agent-msi) guest_agent_msi="disabled" - ;; - --with-vss-sdk) vss_win32_sdk="" - ;; - --with-vss-sdk=*) vss_win32_sdk="$optarg" - ;; - --without-vss-sdk) vss_win32_sdk="no" - ;; - --with-win-sdk) win_sdk="" - ;; - --with-win-sdk=*) win_sdk="$optarg" - ;; - --without-win-sdk) win_sdk="no" - ;; - --enable-tools) want_tools="yes" - ;; - --disable-tools) want_tools="no" - ;; - --enable-seccomp) seccomp="enabled" - ;; - --disable-seccomp) seccomp="disabled" - ;; - --disable-glusterfs) glusterfs="disabled" - ;; - --disable-avx2) avx2_opt="no" - ;; - --enable-avx2) avx2_opt="yes" - ;; - --disable-avx512f) avx512f_opt="no" - ;; - --enable-avx512f) avx512f_opt="yes" - ;; - - --enable-glusterfs) glusterfs="enabled" - ;; --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2 ;; @@ -1395,153 +868,11 @@ for opt do --enable-uuid|--disable-uuid) echo "$0: $opt is obsolete, UUID support is always built" >&2 ;; - --disable-gtk) gtk="disabled" - ;; - --enable-gtk) gtk="enabled" - ;; - --tls-priority=*) tls_priority="$optarg" - ;; - --disable-gnutls) gnutls="disabled" - ;; - --enable-gnutls) gnutls="enabled" - ;; - --disable-nettle) nettle="disabled" - ;; - --enable-nettle) nettle="enabled" - ;; - --disable-gcrypt) gcrypt="disabled" - ;; - --enable-gcrypt) gcrypt="enabled" - ;; - --disable-auth-pam) auth_pam="disabled" - ;; - --enable-auth-pam) auth_pam="enabled" - ;; - --enable-rdma) rdma="yes" - ;; - --disable-rdma) rdma="no" - ;; - --enable-pvrdma) pvrdma="yes" - ;; - --disable-pvrdma) pvrdma="no" - ;; - --disable-vte) vte="disabled" - ;; - --enable-vte) vte="enabled" - ;; - --disable-virglrenderer) virglrenderer="disabled" - ;; - --enable-virglrenderer) virglrenderer="enabled" - ;; - --disable-tpm) tpm="no" - ;; - --enable-tpm) tpm="yes" - ;; - --disable-libssh) libssh="no" - ;; - --enable-libssh) libssh="yes" - ;; - --disable-live-block-migration) live_block_migration="no" - ;; - --enable-live-block-migration) live_block_migration="yes" - ;; - --disable-numa) numa="no" - ;; - --enable-numa) numa="yes" - ;; - --disable-libxml2) libxml2="disabled" - ;; - --enable-libxml2) libxml2="enabled" - ;; - --disable-tcmalloc) tcmalloc="no" - ;; - --enable-tcmalloc) tcmalloc="yes" - ;; - --disable-jemalloc) jemalloc="no" - ;; - --enable-jemalloc) jemalloc="yes" - ;; - --disable-replication) replication="no" - ;; - --enable-replication) replication="yes" - ;; - --disable-bochs) bochs="no" - ;; - --enable-bochs) bochs="yes" - ;; - --disable-cloop) cloop="no" - ;; - --enable-cloop) cloop="yes" - ;; - --disable-dmg) dmg="no" - ;; - --enable-dmg) dmg="yes" - ;; - --disable-qcow1) qcow1="no" - ;; - --enable-qcow1) qcow1="yes" - ;; - --disable-vdi) vdi="no" - ;; - --enable-vdi) vdi="yes" - ;; - --disable-vvfat) vvfat="no" - ;; - --enable-vvfat) vvfat="yes" - ;; - --disable-qed) qed="no" - ;; - --enable-qed) qed="yes" - ;; - --disable-parallels) parallels="no" - ;; - --enable-parallels) parallels="yes" - ;; - --disable-vhost-user) vhost_user="no" - ;; - --enable-vhost-user) vhost_user="yes" - ;; - --disable-vhost-vdpa) vhost_vdpa="no" - ;; - --enable-vhost-vdpa) vhost_vdpa="yes" - ;; - --disable-vhost-kernel) vhost_kernel="no" - ;; - --enable-vhost-kernel) vhost_kernel="yes" - ;; - --disable-capstone) capstone="disabled" - ;; - --enable-capstone) capstone="enabled" - ;; - --enable-capstone=git) capstone="internal" - ;; - --enable-capstone=system) capstone="system" - ;; --with-git=*) git="$optarg" ;; - --enable-git-update) - git_submodules_action="update" - echo "--enable-git-update deprecated, use --with-git-submodules=update" - ;; - --disable-git-update) - git_submodules_action="validate" - echo "--disable-git-update deprecated, use --with-git-submodules=validate" - ;; --with-git-submodules=*) git_submodules_action="$optarg" ;; - --enable-debug-mutex) debug_mutex=yes - ;; - --disable-debug-mutex) debug_mutex=no - ;; - --enable-libpmem) libpmem="enabled" - ;; - --disable-libpmem) libpmem="disabled" - ;; - --enable-xkbcommon) xkbcommon="enabled" - ;; - --disable-xkbcommon) xkbcommon="disabled" - ;; --enable-plugins) if test "$mingw32" = "yes"; then error_exit "TCG plugins not currently supported on Windows platforms" else @@ -1554,48 +885,23 @@ for opt do ;; --disable-containers) use_containers="no" ;; - --enable-fuzzing) fuzzing=yes - ;; - --disable-fuzzing) fuzzing=no - ;; --gdb=*) gdb_bin="$optarg" ;; - --enable-rng-none) rng_none=yes + # backwards compatibility options + --enable-trace-backend=*) meson_option_parse "--enable-trace-backends=$optarg" "$optarg" ;; - --disable-rng-none) rng_none=no + --disable-blobs) meson_option_parse --disable-install-blobs "" ;; - --enable-keyring) secret_keyring="yes" + --enable-vfio-user-server) vfio_user_server="enabled" ;; - --disable-keyring) secret_keyring="no" + --disable-vfio-user-server) vfio_user_server="disabled" ;; - --enable-libdaxctl) libdaxctl="enabled" + --enable-tcmalloc) meson_option_parse --enable-malloc=tcmalloc tcmalloc ;; - --disable-libdaxctl) libdaxctl="disabled" + --enable-jemalloc) meson_option_parse --enable-malloc=jemalloc jemalloc ;; - --enable-fuse) fuse="enabled" - ;; - --disable-fuse) fuse="disabled" - ;; - --enable-fuse-lseek) fuse_lseek="enabled" - ;; - --disable-fuse-lseek) fuse_lseek="disabled" - ;; - --enable-multiprocess) multiprocess="enabled" - ;; - --disable-multiprocess) multiprocess="disabled" - ;; - --enable-gio) gio=yes - ;; - --disable-gio) gio=no - ;; - --enable-slirp-smbd) slirp_smbd=yes - ;; - --disable-slirp-smbd) slirp_smbd=no - ;; - *) - echo "ERROR: unknown option $opt" - echo "Try '$0 --help' for more information" - exit 1 + # everything else has the same name in configure and meson + --*) meson_option_parse "$opt" "$optarg" ;; esac done @@ -1642,82 +948,24 @@ case $git_submodules_action in ;; esac -libdir="${libdir:-$prefix/lib}" -libexecdir="${libexecdir:-$prefix/libexec}" -includedir="${includedir:-$prefix/include}" - -if test "$mingw32" = "yes" ; then - bindir="${bindir:-$prefix}" -else - bindir="${bindir:-$prefix/bin}" -fi -mandir="${mandir:-$prefix/share/man}" -datadir="${datadir:-$prefix/share}" -docdir="${docdir:-$prefix/share/doc}" -sysconfdir="${sysconfdir:-$prefix/etc}" -local_statedir="${local_statedir:-$prefix/var}" -firmwarepath="${firmwarepath:-$datadir/qemu-firmware}" -localedir="${localedir:-$datadir/locale}" - -case "$cpu" in - ppc) - CPU_CFLAGS="-m32" - QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" - ;; - ppc64) - CPU_CFLAGS="-m64" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - sparc) - CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" - QEMU_LDFLAGS="-m32 -mv8plus $QEMU_LDFLAGS" - ;; - sparc64) - CPU_CFLAGS="-m64 -mcpu=ultrasparc" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - s390) - CPU_CFLAGS="-m31" - QEMU_LDFLAGS="-m31 $QEMU_LDFLAGS" - ;; - s390x) - CPU_CFLAGS="-m64" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - i386) - CPU_CFLAGS="-m32" - QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" - ;; - x86_64) - # ??? Only extremely old AMD cpus do not have cmpxchg16b. - # If we truly care, we should simply detect this case at - # runtime and generate the fallback to serial emulation. - CPU_CFLAGS="-m64 -mcx16" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - x32) - CPU_CFLAGS="-mx32" - QEMU_LDFLAGS="-mx32 $QEMU_LDFLAGS" - ;; - # No special flags required for other host CPUs -esac - -eval "cross_cc_${cpu}=\$cc" -cross_cc_vars="$cross_cc_vars cross_cc_${cpu}" -QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" - -# For user-mode emulation the host arch has to be one we explicitly -# support, even if we're using TCI. -if [ "$ARCH" = "unknown" ]; then - bsd_user="no" - linux_user="no" -fi - default_target_list="" -deprecated_targets_list=ppc64abi32-linux-user -deprecated_features="" mak_wilds="" +if [ "$linux_user" != no ]; then + if [ "$targetos" = linux ] && [ -d "$source_path/linux-user/include/host/$cpu" ]; then + linux_user=yes + elif [ "$linux_user" = yes ]; then + error_exit "linux-user not supported on this architecture" + fi +fi +if [ "$bsd_user" != no ]; then + if [ "$bsd_user" = "" ]; then + test $targetos = freebsd && bsd_user=yes + fi + if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$targetos" ]; then + error_exit "bsd-user not supported on this host OS" + fi +fi if [ "$softmmu" = "yes" ]; then mak_wilds="${mak_wilds} $source_path/configs/targets/*-softmmu.mak" fi @@ -1728,16 +976,6 @@ if [ "$bsd_user" = "yes" ]; then mak_wilds="${mak_wilds} $source_path/configs/targets/*-bsd-user.mak" fi -# If the user doesn't explicitly specify a deprecated target we will -# skip it. -if test -z "$target_list"; then - if test -z "$target_list_exclude"; then - target_list_exclude="$deprecated_targets_list" - else - target_list_exclude="$target_list_exclude,$deprecated_targets_list" - fi -fi - for config in $mak_wilds; do target="$(basename "$config" .mak)" if echo "$target_list_exclude" | grep -vq "$target"; then @@ -1745,9 +983,6 @@ for config in $mak_wilds; do fi done -# Enumerate public trace backends for --help output -trace_backend_list=$(echo $(grep -le '^PUBLIC = True$' "$source_path"/scripts/tracetool/backend/*.py | sed -e 's/^.*\/\(.*\)\.py$/\1/')) - if test x"$show_help" = x"yes" ; then cat << EOF @@ -1757,31 +992,27 @@ Options: [defaults in brackets after descriptions] Standard options: --help print this message --prefix=PREFIX install in PREFIX [$prefix] - --interp-prefix=PREFIX where to find shared libraries, etc. - use %M for cpu name [$interp_prefix] - --target-list=LIST set target list (default: build all non-deprecated) + --target-list=LIST set target list (default: build all) $(echo Available targets: $default_target_list | \ fold -s -w 53 | sed -e 's/^/ /') -$(echo Deprecated targets: $deprecated_targets_list | \ - fold -s -w 53 | sed -e 's/^/ /') --target-list-exclude=LIST exclude a set of targets from the default target-list Advanced options (experts only): --cross-prefix=PREFIX use PREFIX for compile tools, PREFIX can be blank [$cross_prefix] --cc=CC use C compiler CC [$cc] - --iasl=IASL use ACPI compiler IASL [$iasl] --host-cc=CC use C compiler CC [$host_cc] for code run at build time --cxx=CXX use C++ compiler CXX [$cxx] --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] - --extra-cflags=CFLAGS append extra C compiler flags QEMU_CFLAGS - --extra-cxxflags=CXXFLAGS append extra C++ compiler flags QEMU_CXXFLAGS + --extra-cflags=CFLAGS append extra C compiler flags CFLAGS + --extra-cxxflags=CXXFLAGS append extra C++ compiler flags CXXFLAGS + --extra-objcflags=OBJCFLAGS append extra Objective C compiler flags OBJCFLAGS --extra-ldflags=LDFLAGS append extra linker flags LDFLAGS --cross-cc-ARCH=CC use compiler when building ARCH guest test cases - --cross-cc-flags-ARCH= use compiler flags when building ARCH guest tests + --cross-cc-cflags-ARCH= use compiler flags when building ARCH guest tests + --cross-prefix-ARCH=PREFIX cross compiler prefix when building ARCH guest test cases --make=MAKE use specified make [$make] --python=PYTHON use specified python [$python] - --sphinx-build=SPHINX use specified sphinx-build [$sphinx_build] --meson=MESON use specified meson [$meson] --ninja=NINJA use specified ninja [$ninja] --smbd=SMBD use specified smbd [$smbd] @@ -1790,19 +1021,8 @@ Advanced options (experts only): --with-git-submodules=validate fail if git submodules are not up to date --with-git-submodules=ignore do not update or check git submodules (default if no .git dir) --static enable static build [$static] - --mandir=PATH install man pages in PATH - --datadir=PATH install firmware in PATH/$qemu_suffix - --localedir=PATH install translation in PATH/$qemu_suffix - --docdir=PATH install documentation in PATH/$qemu_suffix --bindir=PATH install binaries in PATH - --libdir=PATH install libraries in PATH - --libexecdir=PATH install helper binaries in PATH - --sysconfdir=PATH install config in PATH/$qemu_suffix - --localstatedir=PATH install local state in PATH (set at runtime on win32) - --firmwarepath=PATH search PATH for firmware files - --efi-aarch64=PATH PATH of efi file to use for aarch64 VMs. --with-suffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir/docdir [$qemu_suffix] - --with-pkgversion=VERS use specified string as sub-version of the package --without-default-features default all --enable-* options to "disabled" --without-default-devices do not include any device that is not needed to start the emulator (only use if you are including @@ -1811,175 +1031,28 @@ Advanced options (experts only): --enable-debug enable common debug build options --enable-sanitizers enable default sanitizers --enable-tsan enable thread sanitizer - --disable-strip disable stripping binaries --disable-werror disable compilation abort on warning --disable-stack-protector disable compiler-provided stack protection - --audio-drv-list=LIST set audio drivers list: - Available drivers: $audio_possible_drivers - --block-drv-whitelist=L Same as --block-drv-rw-whitelist=L - --block-drv-rw-whitelist=L - set block driver read-write whitelist - (by default affects only QEMU, not tools like qemu-img) - --block-drv-ro-whitelist=L - set block driver read-only whitelist - (by default affects only QEMU, not tools like qemu-img) - --enable-block-drv-whitelist-in-tools - use block whitelist also in tools instead of only QEMU - --enable-trace-backends=B Set trace backend - Available backends: $trace_backend_list - --with-trace-file=NAME Full PATH,NAME of file to store traces - Default:trace- - --disable-slirp disable SLIRP userspace network connectivity - --enable-tcg-interpreter enable TCI (TCG with bytecode interpreter, experimental and slow) - --enable-malloc-trim enable libc malloc_trim() for memory optimization - --oss-lib path to OSS library --cpu=CPU Build for host CPU [$cpu] --with-coroutine=BACKEND coroutine backend. Supported options: ucontext, sigaltstack, windows - --enable-gcov enable test coverage analysis with gcov - --disable-blobs disable installing provided firmware blobs - --with-vss-sdk=SDK-path enable Windows VSS support in QEMU Guest Agent - --with-win-sdk=SDK-path path to Windows Platform SDK (to build VSS .tlb) - --tls-priority default TLS protocol/cipher priority string - --enable-gprof QEMU profiling with gprof - --enable-profiler profiler support - --enable-debug-stack-usage - track the maximum stack usage of stacks created by qemu_alloc_stack --enable-plugins enable plugins via shared library loading --disable-containers don't use containers for cross-building --gdb=GDB-path gdb to use for gdbstub tests [$gdb_bin] - -Optional features, enabled with --enable-FEATURE and -disabled with --disable-FEATURE, default is enabled if available -(unless built with --without-default-features): - +EOF + meson_options_help +cat << EOF system all system emulation targets user supported user emulation targets linux-user all linux usermode emulation targets bsd-user all BSD usermode emulation targets - docs build documentation - guest-agent build the QEMU Guest Agent - guest-agent-msi build guest agent Windows MSI installation package pie Position Independent Executables modules modules support (non-Windows) - module-upgrades try to load modules from alternate paths for upgrades debug-tcg TCG debugging (default is disabled) debug-info debugging information - lto Enable Link-Time Optimization. - sparse sparse checker safe-stack SafeStack Stack Smash Protection. Depends on clang/llvm >= 3.7 and requires coroutine backend ucontext. - cfi Enable Control-Flow Integrity for indirect function calls. - In case of a cfi violation, QEMU is terminated with SIGILL - Depends on lto and is incompatible with modules - Automatically enables Link-Time Optimization (lto) - cfi-debug In case of a cfi violation, a message containing the line that - triggered the error is written to stderr. After the error, - QEMU is still terminated with SIGILL - gnutls GNUTLS cryptography support - nettle nettle cryptography support - gcrypt libgcrypt cryptography support - auth-pam PAM access control - sdl SDL UI - sdl-image SDL Image support for icons - gtk gtk UI - vte vte support for the gtk UI - curses curses UI - iconv font glyph conversion support - vnc VNC UI support - vnc-sasl SASL encryption for VNC server - vnc-jpeg JPEG lossy compression for VNC server - vnc-png PNG compression for VNC server - cocoa Cocoa UI (Mac OS X only) - virtfs VirtFS - virtiofsd build virtiofs daemon (virtiofsd) - libudev Use libudev to enumerate host devices - mpath Multipath persistent reservation passthrough - xen xen backend driver support - xen-pci-passthrough PCI passthrough support for Xen - brlapi BrlAPI (Braile) - curl curl connectivity - membarrier membarrier system call (for Linux 4.14+ or Windows) - fdt fdt device tree - kvm KVM acceleration support - hax HAX acceleration support - hvf Hypervisor.framework acceleration support - nvmm NVMM acceleration support - whpx Windows Hypervisor Platform acceleration support - rdma Enable RDMA-based migration - pvrdma Enable PVRDMA support - vde support for vde network - netmap support for netmap network - linux-aio Linux AIO support - linux-io-uring Linux io_uring support - cap-ng libcap-ng support - attr attr and xattr support - vhost-net vhost-net kernel acceleration support - vhost-vsock virtio sockets device support - vhost-scsi vhost-scsi kernel target support - vhost-crypto vhost-user-crypto backend support - vhost-kernel vhost kernel backend support - vhost-user vhost-user backend support - vhost-user-blk-server vhost-user-blk server support - vhost-vdpa vhost-vdpa kernel backend support - bpf BPF kernel support - spice spice - spice-protocol spice-protocol - rbd rados block device (rbd) - libiscsi iscsi support - libnfs nfs support - smartcard smartcard support (libcacard) - u2f U2F support (u2f-emu) - libusb libusb (for usb passthrough) - live-block-migration Block migration in the main migration stream - usb-redir usb network redirection support - lzo support of lzo compression library - snappy support of snappy compression library - bzip2 support of bzip2 compression library - (for reading bzip2-compressed dmg images) - lzfse support of lzfse compression library - (for reading lzfse-compressed dmg images) - zstd support for zstd compression library - (for migration compression and qcow2 cluster compression) - seccomp seccomp support - coroutine-pool coroutine freelist (better performance) - glusterfs GlusterFS backend - tpm TPM support - libssh ssh block device support - numa libnuma support - libxml2 for Parallels image format - tcmalloc tcmalloc support - jemalloc jemalloc support - avx2 AVX2 optimization support - avx512f AVX512F optimization support - replication replication support - opengl opengl support - virglrenderer virgl rendering support - xfsctl xfsctl support - qom-cast-debug cast debugging support - tools build qemu-io, qemu-nbd and qemu-img tools - bochs bochs image format support - cloop cloop image format support - dmg dmg image format support - qcow1 qcow v1 image format support - vdi vdi image format support - vvfat vvfat image format support - qed qed image format support - parallels parallels image format support - crypto-afalg Linux AF_ALG crypto backend driver - capstone capstone disassembler support - debug-mutex mutex debugging support - libpmem libpmem support - xkbcommon xkbcommon support - rng-none dummy RNG, avoid using /dev/(u)random and getrandom() - libdaxctl libdaxctl support - fuse FUSE block device export - fuse-lseek SEEK_HOLE/SEEK_DATA support for FUSE exports - multiprocess Out of process device emulation support - gio libgio support - slirp-smbd use smbd (at path --smbd=*) in slirp networking - renderdoc improved RenderDoc frame capture support NOTE: The object files are built at the place where configure is launched EOF @@ -1987,7 +1060,7 @@ exit 0 fi # Remove old dependency files to make sure that they get properly regenerated -rm -f */config-devices.mak.d +rm -f ./*/config-devices.mak.d if test -z "$python" then @@ -2005,16 +1078,13 @@ if ! $python -c 'import sys; sys.exit(sys.version_info < (3,6))'; then "Use --python=/path/to/python to specify a supported Python." fi -# Preserve python version since some functionality is dependent on it -python_version=$($python -c 'import sys; print("%d.%d.%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2]))' 2>/dev/null) - # Suppress writing compiled files python="$python -B" if test -z "$meson"; then - if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.55.3; then + if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.61.5; then meson=meson - elif test $git_submodules_action != 'ignore' ; then + elif test "$git_submodules_action" != 'ignore' ; then meson=git elif test -e "${source_path}/meson/meson.py" ; then meson=internal @@ -2111,8 +1181,8 @@ cat > $TMPC << EOF # endif # endif #elif defined(__GNUC__) && defined(__GNUC_MINOR__) -# if __GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ < 5) -# error You need at least GCC v7.5.0 to compile QEMU +# if __GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ < 4) +# error You need at least GCC v7.4.0 to compile QEMU # endif #else # error You either need GCC or Clang to compiler QEMU @@ -2120,7 +1190,7 @@ cat > $TMPC << EOF int main (void) { return 0; } EOF if ! compile_prog "" "" ; then - error_exit "You need at least GCC v7.5 or Clang v6.0 (or XCode Clang v10.0)" + error_exit "You need at least GCC v7.4 or Clang v6.0 (or XCode Clang v10.0)" fi # Accumulate -Wfoo and -Wno-bar separately. @@ -2151,6 +1221,7 @@ add_to nowarn_flags -Wno-string-plus-int add_to nowarn_flags -Wno-typedef-redefinition add_to nowarn_flags -Wno-tautological-type-limit-compare add_to nowarn_flags -Wno-psabi +add_to nowarn_flags -Wno-gnu-variable-sized-type-not-at-end gcc_flags="$warn_flags $nowarn_flags" @@ -2164,17 +1235,34 @@ cc_has_warning_flag() { compile_prog "-Werror $optflag" "" } +objcc_has_warning_flag() { + cat > $TMPM < $TMPC << EOF int main(int argc, char *argv[]) { - char arr[64], *p = arr, *c = argv[0]; + char arr[64], *p = arr, *c = argv[argc - 1]; while (*c) { *p++ = *c++; } @@ -2219,11 +1307,6 @@ if test "$modules" = "yes" && test "$mingw32" = "yes" ; then error_exit "Modules are not available for Windows" fi -# module_upgrades is only reasonable if modules are enabled -if test "$modules" = "no" && test "$module_upgrades" = "yes" ; then - error_exit "Can't enable module-upgrades as Modules are not enabled" -fi - # Static linking is not possible with plugins, modules or PIE if test "$static" = "yes" ; then if test "$modules" = "yes" ; then @@ -2235,17 +1318,7 @@ if test "$static" = "yes" ; then plugins="no" fi fi - -# Unconditional check for compiler __thread support - cat > $TMPC << EOF -static __thread int tls_var; -int main(void) { return tls_var; } -EOF - -if ! compile_prog "-Werror" "" ; then - error_exit "Your compiler does not support the __thread specifier for " \ - "Thread-Local Storage (TLS). Please upgrade to a version that does." -fi +test "$plugins" = "" && plugins=yes cat > $TMPC << EOF @@ -2258,24 +1331,24 @@ static THREAD int tls_var; int main(void) { return tls_var; } EOF -# Check we support --no-pie first; we will need this for building ROMs. -if compile_prog "-Werror -fno-pie" "-no-pie"; then - CFLAGS_NOPIE="-fno-pie" -fi - +# Meson currently only handles pie as a boolean for now so if we have +# explicitly disabled PIE we need to extend our cflags because it wont. if test "$static" = "yes"; then if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" - QEMU_LDFLAGS="-static-pie $QEMU_LDFLAGS" pie="yes" elif test "$pie" = "yes"; then error_exit "-static-pie not available due to missing toolchain support" else - QEMU_LDFLAGS="-static $QEMU_LDFLAGS" pie="no" + QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS" fi elif test "$pie" = "no"; then - CONFIGURE_CFLAGS="$CFLAGS_NOPIE $CONFIGURE_CFLAGS" + if compile_prog "-Werror -fno-pie" "-no-pie"; then + CONFIGURE_CFLAGS="-fno-pie $CONFIGURE_CFLAGS" + CONFIGURE_LDFLAGS="-no-pie $CONFIGURE_LDFLAGS" + QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS" + fi elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" CONFIGURE_LDFLAGS="-pie $CONFIGURE_LDFLAGS" @@ -2287,12 +1360,6 @@ else pie="no" fi -# Detect support for PT_GNU_RELRO + DT_BIND_NOW. -# The combination is known as "full relro", because .got.plt is read-only too. -if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then - QEMU_LDFLAGS="-Wl,-z,relro -Wl,-z,now $QEMU_LDFLAGS" -fi - ########################################## # __sync_fetch_and_and requires at least -march=i486. Many toolchains # use i686 as default anyway, but for those that don't, an explicit @@ -2318,26 +1385,6 @@ EOF fi fi -######################################### -# Solaris specific configure tool chain decisions - -if test "$solaris" = "yes" ; then - if has ar; then - : - else - if test -f /usr/ccs/bin/ar ; then - error_exit "No path includes ar" \ - "Add /usr/ccs/bin to your path and rerun configure" - fi - error_exit "No path includes ar" - fi -fi - -if test "$tcg" = "enabled"; then - git_submodules="$git_submodules tests/fp/berkeley-testfloat-3" - git_submodules="$git_submodules tests/fp/berkeley-softfloat-3" -fi - if test -z "${target_list+xxx}" ; then default_targets=yes for target in $default_target_list; do @@ -2360,13 +1407,6 @@ else done fi -for target in $target_list; do - # if a deprecated target is enabled we note it here - if echo "$deprecated_targets_list" | grep -q "$target"; then - add_to deprecated_features $target - fi -done - # see if system emulation was really requested case " $target_list " in *"-softmmu "*) softmmu=yes @@ -2375,133 +1415,44 @@ case " $target_list " in ;; esac -feature_not_found() { - feature=$1 - remedy=$2 +if test "$tcg" = "auto"; then + if test -z "$target_list"; then + tcg="disabled" + else + tcg="enabled" + fi +fi - error_exit "User requested feature $feature" \ - "configure was not able to find it." \ - "$remedy" -} +if test "$tcg" = "enabled"; then + git_submodules="$git_submodules tests/fp/berkeley-testfloat-3" + git_submodules="$git_submodules tests/fp/berkeley-softfloat-3" +fi -# --- +########################################## # big/little endian test cat > $TMPC << EOF -#include -short big_endian[] = { 0x4269, 0x4765, 0x4e64, 0x4961, 0x4e00, 0, }; -short little_endian[] = { 0x694c, 0x7454, 0x654c, 0x6e45, 0x6944, 0x6e41, 0, }; -int main(int argc, char *argv[]) -{ - return printf("%s %s\n", (char *)big_endian, (char *)little_endian); -} +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# error LITTLE +#endif +int main(void) { return 0; } EOF -if compile_prog ; then - if strings -a $TMPE | grep -q BiGeNdIaN ; then - bigendian="yes" - elif strings -a $TMPE | grep -q LiTtLeEnDiAn ; then - bigendian="no" - else - echo big/little test failed - exit 1 - fi +if ! compile_prog ; then + bigendian="no" else + cat > $TMPC << EOF +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# error BIG +#endif +int main(void) { return 0; } +EOF + + if ! compile_prog ; then + bigendian="yes" + else echo big/little test failed exit 1 -fi - -########################################## -# system tools -if test -z "$want_tools"; then - if test "$softmmu" = "no"; then - want_tools=no - else - want_tools=yes - fi -fi - -########################################## -# Disable features only meaningful for system-mode emulation -if test "$softmmu" = "no"; then - audio_drv_list="" -fi - -########################################## -# L2TPV3 probe - -cat > $TMPC < -#include -int main(void) { return sizeof(struct mmsghdr); } -EOF -if compile_prog "" "" ; then - l2tpv3=yes -else - l2tpv3=no -fi - -cat > $TMPC < -int main(int argc, char *argv[]) { - return mlockall(MCL_FUTURE); -} -EOF -if compile_prog "" "" ; then - have_mlockall=yes -else - have_mlockall=no -fi - -######################################### -# vhost interdependencies and host support - -# vhost backends -if test "$vhost_user" = "yes" && test "$linux" != "yes"; then - error_exit "vhost-user is only available on Linux" -fi -test "$vhost_vdpa" = "" && vhost_vdpa=$linux -if test "$vhost_vdpa" = "yes" && test "$linux" != "yes"; then - error_exit "vhost-vdpa is only available on Linux" -fi -test "$vhost_kernel" = "" && vhost_kernel=$linux -if test "$vhost_kernel" = "yes" && test "$linux" != "yes"; then - error_exit "vhost-kernel is only available on Linux" -fi - -# vhost-kernel devices -test "$vhost_scsi" = "" && vhost_scsi=$vhost_kernel -if test "$vhost_scsi" = "yes" && test "$vhost_kernel" != "yes"; then - error_exit "--enable-vhost-scsi requires --enable-vhost-kernel" -fi -test "$vhost_vsock" = "" && vhost_vsock=$vhost_kernel -if test "$vhost_vsock" = "yes" && test "$vhost_kernel" != "yes"; then - error_exit "--enable-vhost-vsock requires --enable-vhost-kernel" -fi - -# vhost-user backends -test "$vhost_net_user" = "" && vhost_net_user=$vhost_user -if test "$vhost_net_user" = "yes" && test "$vhost_user" = "no"; then - error_exit "--enable-vhost-net-user requires --enable-vhost-user" -fi -test "$vhost_crypto" = "" && vhost_crypto=$vhost_user -if test "$vhost_crypto" = "yes" && test "$vhost_user" = "no"; then - error_exit "--enable-vhost-crypto requires --enable-vhost-user" -fi -test "$vhost_user_fs" = "" && vhost_user_fs=$vhost_user -if test "$vhost_user_fs" = "yes" && test "$vhost_user" = "no"; then - error_exit "--enable-vhost-user-fs requires --enable-vhost-user" -fi -#vhost-vdpa backends -test "$vhost_net_vdpa" = "" && vhost_net_vdpa=$vhost_vdpa -if test "$vhost_net_vdpa" = "yes" && test "$vhost_vdpa" = "no"; then - error_exit "--enable-vhost-net-vdpa requires --enable-vhost-vdpa" -fi - -# OR the vhost-kernel, vhost-vdpa and vhost-user values for simplicity -if test "$vhost_net" = ""; then - test "$vhost_net_user" = "yes" && vhost_net=yes - test "$vhost_net_vdpa" = "yes" && vhost_net=yes - test "$vhost_kernel" = "yes" && vhost_net=yes + fi fi ########################################## @@ -2511,697 +1462,17 @@ if ! has "$pkg_config_exe"; then error_exit "pkg-config binary '$pkg_config_exe' not found" fi -########################################## -# NPTL probe - -if test "$linux_user" = "yes"; then - cat > $TMPC < -#include -int main(void) { -#if !defined(CLONE_SETTLS) || !defined(FUTEX_WAIT) -#error bork -#endif - return 0; -} -EOF - if ! compile_object ; then - feature_not_found "nptl" "Install glibc and linux kernel headers." - fi -fi - -########################################## -# xen probe - -if test "$xen" != "disabled" ; then - # Check whether Xen library path is specified via --extra-ldflags to avoid - # overriding this setting with pkg-config output. If not, try pkg-config - # to obtain all needed flags. - - if ! echo $EXTRA_LDFLAGS | grep tools/libxc > /dev/null && \ - $pkg_config --exists xencontrol ; then - xen_ctrl_version="$(printf '%d%02d%02d' \ - $($pkg_config --modversion xencontrol | sed 's/\./ /g') )" - xen=enabled - xen_pc="xencontrol xenstore xenforeignmemory xengnttab" - xen_pc="$xen_pc xenevtchn xendevicemodel" - if $pkg_config --exists xentoolcore; then - xen_pc="$xen_pc xentoolcore" - fi - xen_cflags="$($pkg_config --cflags $xen_pc)" - xen_libs="$($pkg_config --libs $xen_pc)" - else - - xen_libs="-lxenstore -lxenctrl" - xen_stable_libs="-lxenforeignmemory -lxengnttab -lxenevtchn" - - # First we test whether Xen headers and libraries are available. - # If no, we are done and there is no Xen support. - # If yes, more tests are run to detect the Xen version. - - # Xen (any) - cat > $TMPC < -int main(void) { - return 0; -} -EOF - if ! compile_prog "" "$xen_libs" ; then - # Xen not found - if test "$xen" = "enabled" ; then - feature_not_found "xen" "Install xen devel" - fi - xen=disabled - - # Xen unstable - elif - cat > $TMPC < -#include -int main(void) { - xendevicemodel_handle *xd; - xenforeignmemory_handle *xfmem; - - xd = xendevicemodel_open(0, 0); - xendevicemodel_pin_memory_cacheattr(xd, 0, 0, 0, 0); - - xfmem = xenforeignmemory_open(0, 0); - xenforeignmemory_map_resource(xfmem, 0, 0, 0, 0, 0, NULL, 0, 0); - - return 0; -} -EOF - compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs -lxentoolcore" - then - xen_stable_libs="-lxendevicemodel $xen_stable_libs -lxentoolcore" - xen_ctrl_version=41100 - xen=enabled - elif - cat > $TMPC < -#include -int main(void) { - xenforeignmemory_handle *xfmem; - - xfmem = xenforeignmemory_open(0, 0); - xenforeignmemory_map2(xfmem, 0, 0, 0, 0, 0, 0, 0); - xentoolcore_restrict_all(0); - - return 0; -} -EOF - compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs -lxentoolcore" - then - xen_stable_libs="-lxendevicemodel $xen_stable_libs -lxentoolcore" - xen_ctrl_version=41000 - xen=enabled - elif - cat > $TMPC < -int main(void) { - xendevicemodel_handle *xd; - - xd = xendevicemodel_open(0, 0); - xendevicemodel_close(xd); - - return 0; -} -EOF - compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs" - then - xen_stable_libs="-lxendevicemodel $xen_stable_libs" - xen_ctrl_version=40900 - xen=enabled - elif - cat > $TMPC < -#include -#include -#include -#include -#include -#include -#if !defined(HVM_MAX_VCPUS) -# error HVM_MAX_VCPUS not defined -#endif -int main(void) { - xc_interface *xc = NULL; - xenforeignmemory_handle *xfmem; - xenevtchn_handle *xe; - xengnttab_handle *xg; - xengnttab_grant_copy_segment_t* seg = NULL; - - xs_daemon_open(); - - xc = xc_interface_open(0, 0, 0); - xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); - xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); - xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); - xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); - - xfmem = xenforeignmemory_open(0, 0); - xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0); - - xe = xenevtchn_open(0, 0); - xenevtchn_fd(xe); - - xg = xengnttab_open(0, 0); - xengnttab_grant_copy(xg, 0, seg); - - return 0; -} -EOF - compile_prog "" "$xen_libs $xen_stable_libs" - then - xen_ctrl_version=40800 - xen=enabled - elif - cat > $TMPC < -#include -#include -#include -#include -#include -#include -#if !defined(HVM_MAX_VCPUS) -# error HVM_MAX_VCPUS not defined -#endif -int main(void) { - xc_interface *xc = NULL; - xenforeignmemory_handle *xfmem; - xenevtchn_handle *xe; - xengnttab_handle *xg; - - xs_daemon_open(); - - xc = xc_interface_open(0, 0, 0); - xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); - xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); - xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); - xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); - - xfmem = xenforeignmemory_open(0, 0); - xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0); - - xe = xenevtchn_open(0, 0); - xenevtchn_fd(xe); - - xg = xengnttab_open(0, 0); - xengnttab_map_grant_ref(xg, 0, 0, 0); - - return 0; -} -EOF - compile_prog "" "$xen_libs $xen_stable_libs" - then - xen_ctrl_version=40701 - xen=enabled - - # Xen 4.6 - elif - cat > $TMPC < -#include -#include -#include -#if !defined(HVM_MAX_VCPUS) -# error HVM_MAX_VCPUS not defined -#endif -int main(void) { - xc_interface *xc; - xs_daemon_open(); - xc = xc_interface_open(0, 0, 0); - xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); - xc_gnttab_open(NULL, 0); - xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); - xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); - xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); - xc_reserved_device_memory_map(xc, 0, 0, 0, 0, NULL, 0); - return 0; -} -EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=40600 - xen=enabled - - # Xen 4.5 - elif - cat > $TMPC < -#include -#include -#include -#if !defined(HVM_MAX_VCPUS) -# error HVM_MAX_VCPUS not defined -#endif -int main(void) { - xc_interface *xc; - xs_daemon_open(); - xc = xc_interface_open(0, 0, 0); - xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); - xc_gnttab_open(NULL, 0); - xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); - xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); - xc_hvm_create_ioreq_server(xc, 0, 0, NULL); - return 0; -} -EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=40500 - xen=enabled - - elif - cat > $TMPC < -#include -#include -#include -#if !defined(HVM_MAX_VCPUS) -# error HVM_MAX_VCPUS not defined -#endif -int main(void) { - xc_interface *xc; - xs_daemon_open(); - xc = xc_interface_open(0, 0, 0); - xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); - xc_gnttab_open(NULL, 0); - xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); - xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); - return 0; -} -EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=40200 - xen=enabled - - else - if test "$xen" = "enabled" ; then - feature_not_found "xen (unsupported version)" \ - "Install a supported xen (xen 4.2 or newer)" - fi - xen=disabled - fi - - if test "$xen" = enabled; then - if test $xen_ctrl_version -ge 40701 ; then - xen_libs="$xen_libs $xen_stable_libs " - fi - fi - fi -fi - -########################################## -# RDMA needs OpenFabrics libraries -if test "$rdma" != "no" ; then - cat > $TMPC < -int main(void) { return 0; } -EOF - rdma_libs="-lrdmacm -libverbs -libumad" - if compile_prog "" "$rdma_libs" ; then - rdma="yes" - else - if test "$rdma" = "yes" ; then - error_exit \ - " OpenFabrics librdmacm/libibverbs/libibumad not present." \ - " Your options:" \ - " (1) Fast: Install infiniband packages (devel) from your distro." \ - " (2) Cleanest: Install libraries from www.openfabrics.org" \ - " (3) Also: Install softiwarp if you don't have RDMA hardware" - fi - rdma="no" - fi -fi - -########################################## -# PVRDMA detection - -cat > $TMPC < - -int -main(void) -{ - char buf = 0; - void *addr = &buf; - addr = mremap(addr, 0, 1, MREMAP_MAYMOVE | MREMAP_FIXED); - - return 0; -} -EOF - -if test "$rdma" = "yes" ; then - case "$pvrdma" in - "") - if compile_prog "" ""; then - pvrdma="yes" - else - pvrdma="no" - fi - ;; - "yes") - if ! compile_prog "" ""; then - error_exit "PVRDMA is not supported since mremap is not implemented" - fi - pvrdma="yes" - ;; - "no") - pvrdma="no" - ;; - esac -else - if test "$pvrdma" = "yes" ; then - error_exit "PVRDMA requires rdma suppport" - fi - pvrdma="no" -fi - -# Let's see if enhanced reg_mr is supported -if test "$pvrdma" = "yes" ; then - -cat > $TMPC < - -int -main(void) -{ - struct ibv_mr *mr; - struct ibv_pd *pd = NULL; - size_t length = 10; - uint64_t iova = 0; - int access = 0; - void *addr = NULL; - - mr = ibv_reg_mr_iova(pd, addr, length, iova, access); - - ibv_dereg_mr(mr); - - return 0; -} -EOF - if ! compile_prog "" "-libverbs"; then - QEMU_CFLAGS="$QEMU_CFLAGS -DLEGACY_RDMA_REG_MR" - fi -fi - -########################################## -# xfsctl() probe, used for file-posix.c -if test "$xfs" != "no" ; then - cat > $TMPC << EOF -#include /* NULL */ -#include -int main(void) -{ - xfsctl(NULL, 0, 0, NULL); - return 0; -} -EOF - if compile_prog "" "" ; then - xfs="yes" - else - if test "$xfs" = "yes" ; then - feature_not_found "xfs" "Install xfsprogs/xfslibs devel" - fi - xfs=no - fi -fi - -########################################## -# vde libraries probe -if test "$vde" != "no" ; then - vde_libs="-lvdeplug" - cat > $TMPC << EOF -#include -int main(void) -{ - struct vde_open_args a = {0, 0, 0}; - char s[] = ""; - vde_open(s, s, &a); - return 0; -} -EOF - if compile_prog "" "$vde_libs" ; then - vde=yes - else - if test "$vde" = "yes" ; then - feature_not_found "vde" "Install vde (Virtual Distributed Ethernet) devel" - fi - vde=no - fi -fi - -########################################## -# netmap support probe -# Apart from looking for netmap headers, we make sure that the host API version -# supports the netmap backend (>=11). The upper bound (15) is meant to simulate -# a minor/major version number. Minor new features will be marked with values up -# to 15, and if something happens that requires a change to the backend we will -# move above 15, submit the backend fixes and modify this two bounds. -if test "$netmap" != "no" ; then - cat > $TMPC << EOF -#include -#include -#include -#include -#if (NETMAP_API < 11) || (NETMAP_API > 15) -#error -#endif -int main(void) { return 0; } -EOF - if compile_prog "" "" ; then - netmap=yes - else - if test "$netmap" = "yes" ; then - feature_not_found "netmap" - fi - netmap=no - fi -fi - -########################################## -# detect CoreAudio -if test "$coreaudio" != "no" ; then - coreaudio_libs="-framework CoreAudio" - cat > $TMPC << EOF -#include -int main(void) -{ - return (int)AudioGetCurrentHostTime(); -} -EOF - if compile_prog "" "$coreaudio_libs" ; then - coreaudio=yes - else - coreaudio=no - fi -fi - -########################################## -# Sound support libraries probe - -audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/,/ /g') -for drv in $audio_drv_list; do - case $drv in - alsa | try-alsa) - if $pkg_config alsa --exists; then - alsa_libs=$($pkg_config alsa --libs) - alsa_cflags=$($pkg_config alsa --cflags) - alsa=yes - if test "$drv" = "try-alsa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-alsa/alsa/') - fi - else - if test "$drv" = "try-alsa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-alsa//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - pa | try-pa) - if $pkg_config libpulse --exists; then - libpulse=yes - pulse_libs=$($pkg_config libpulse --libs) - pulse_cflags=$($pkg_config libpulse --cflags) - if test "$drv" = "try-pa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-pa/pa/') - fi - else - if test "$drv" = "try-pa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-pa//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - sdl) - if test "$sdl" = "no"; then - error_exit "sdl not found or disabled, can not use sdl audio driver" - fi - ;; - - try-sdl) - if test "$sdl" = "no"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-sdl//') - else - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-sdl/sdl/') - fi - ;; - - coreaudio | try-coreaudio) - if test "$coreaudio" = "no"; then - if test "$drv" = "try-coreaudio"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-coreaudio//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv is available." - fi - else - coreaudio_libs="-framework CoreAudio" - if test "$drv" = "try-coreaudio"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-coreaudio/coreaudio/') - fi - fi - ;; - - dsound) - dsound_libs="-lole32 -ldxguid" - audio_win_int="yes" - ;; - - oss) - oss_libs="$oss_lib" - ;; - - jack | try-jack) - if $pkg_config jack --exists; then - libjack=yes - jack_libs=$($pkg_config jack --libs) - if test "$drv" = "try-jack"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-jack/jack/') - fi - else - if test "$drv" = "try-jack"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-jack//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - *) - echo "$audio_possible_drivers" | grep -q "\<$drv\>" || { - error_exit "Unknown driver '$drv' selected" \ - "Possible drivers are: $audio_possible_drivers" - } - ;; - esac -done - -########################################## -# plugin linker support probe - -if test "$plugins" != "no"; then - - ######################################### - # See if --dynamic-list is supported by the linker - - ld_dynamic_list="no" - cat > $TMPTXT < $TMPC < -void foo(void); - -void foo(void) -{ - printf("foo\n"); -} - -int main(void) -{ - foo(); - return 0; -} -EOF - - if compile_prog "" "-Wl,--dynamic-list=$TMPTXT" ; then - ld_dynamic_list="yes" - fi - - ######################################### - # See if -exported_symbols_list is supported by the linker - - ld_exported_symbols_list="no" - cat > $TMPTXT < $TMPC < -int main(void) -{ - g_dbus_proxy_new_sync(0, 0, 0, 0, 0, 0, 0, 0); - return 0; -} -EOF - if compile_prog "$gio_cflags" "$gio_libs" ; then - pass=yes - else - pass=no - fi - - if test "$pass" = "yes" && - $pkg_config --atleast-version=$glib_req_ver gio-unix-2.0; then - gio_cflags="$gio_cflags $($pkg_config --cflags gio-unix-2.0)" - gio_libs="$gio_libs $($pkg_config --libs gio-unix-2.0)" - fi - fi - - if test "$pass" = "no"; then - if test "$gio" = "yes"; then - feature_not_found "gio" "Install libgio >= 2.0" - else - gio=no - fi - else - gio=yes - fi -fi - # Sanity check that the current size_t matches the # size that glib thinks it should be. This catches # problems on multi-arch where people try to build @@ -3298,7 +1530,7 @@ static void foo_free(Foo *f) { g_free(f); } -G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free) int main(void) { return 0; } EOF if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then @@ -3308,149 +1540,6 @@ if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then fi fi -########################################## -# SHA command probe for modules -if test "$modules" = yes; then - shacmd_probe="sha1sum sha1 shasum" - for c in $shacmd_probe; do - if has $c; then - shacmd="$c" - break - fi - done - if test "$shacmd" = ""; then - error_exit "one of the checksum commands is required to enable modules: $shacmd_probe" - fi -fi - -########################################## -# pthread probe -PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2" - -pthread=no -cat > $TMPC << EOF -#include -static void *f(void *p) { return NULL; } -int main(void) { - pthread_t thread; - pthread_create(&thread, 0, f, 0); - return 0; -} -EOF -if compile_prog "" "" ; then - pthread=yes -else - for pthread_lib in $PTHREADLIBS_LIST; do - if compile_prog "" "$pthread_lib" ; then - pthread=yes - break - fi - done -fi - -if test "$mingw32" != yes && test "$pthread" = no; then - error_exit "pthread check failed" \ - "Make sure to have the pthread libs and headers installed." -fi - -# check for pthread_setname_np with thread id -pthread_setname_np_w_tid=no -cat > $TMPC << EOF -#include - -static void *f(void *p) { return NULL; } -int main(void) -{ - pthread_t thread; - pthread_create(&thread, 0, f, 0); - pthread_setname_np(thread, "QEMU"); - return 0; -} -EOF -if compile_prog "" "$pthread_lib" ; then - pthread_setname_np_w_tid=yes -fi - -# check for pthread_setname_np without thread id -pthread_setname_np_wo_tid=no -cat > $TMPC << EOF -#include - -static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; } -int main(void) -{ - pthread_t thread; - pthread_create(&thread, 0, f, 0); - return 0; -} -EOF -if compile_prog "" "$pthread_lib" ; then - pthread_setname_np_wo_tid=yes -fi - -########################################## -# libssh probe -if test "$libssh" != "no" ; then - if $pkg_config --exists "libssh >= 0.8.7"; then - libssh_cflags=$($pkg_config libssh --cflags) - libssh_libs=$($pkg_config libssh --libs) - libssh=yes - else - if test "$libssh" = "yes" ; then - error_exit "libssh required for --enable-libssh" - fi - libssh=no - fi -fi - -########################################## -# linux-aio probe - -if test "$linux_aio" != "no" ; then - cat > $TMPC < -#include -#include -int main(void) { io_setup(0, NULL); io_set_eventfd(NULL, 0); eventfd(0, 0); return 0; } -EOF - if compile_prog "" "-laio" ; then - linux_aio=yes - else - if test "$linux_aio" = "yes" ; then - feature_not_found "linux AIO" "Install libaio devel" - fi - linux_aio=no - fi -fi - -########################################## -# TPM emulation is only on POSIX - -if test "$tpm" = ""; then - if test "$mingw32" = "yes"; then - tpm=no - else - tpm=yes - fi -elif test "$tpm" = "yes"; then - if test "$mingw32" = "yes" ; then - error_exit "TPM emulation only available on POSIX systems" - fi -fi - -########################################## -# iovec probe -cat > $TMPC < -#include -#include -int main(void) { return sizeof(struct iovec); } -EOF -iovec=no -if compile_prog "" "" ; then - iovec=yes -fi - ########################################## # fdt probe @@ -3461,355 +1550,14 @@ case "$fdt" in ;; esac -########################################## -# opengl probe (for sdl2, gtk) - -gbm="no" -if $pkg_config gbm; then - gbm_cflags="$($pkg_config --cflags gbm)" - gbm_libs="$($pkg_config --libs gbm)" - gbm="yes" -fi - -# XBOX: Just skip this, don't need it rn -if false ; then - -if test "$opengl" != "no" ; then - epoxy=no - if $pkg_config epoxy; then - cat > $TMPC << EOF -#include -int main(void) { return 0; } -EOF - if compile_prog "" "" ; then - epoxy=yes - fi - fi - - if test "$epoxy" = "yes" ; then - opengl_cflags="$($pkg_config --cflags epoxy)" - opengl_libs="$($pkg_config --libs epoxy)" - opengl=yes - else - if test "$opengl" = "yes" ; then - feature_not_found "opengl" "Please install epoxy with EGL" - fi - opengl_cflags="" - opengl_libs="" - opengl=no - fi -fi - -fi # XBOX - -# XBOX -if test "$opengl" != "no" ; then - if test "$darwin" = "yes" ; then - opengl_libs="$opengl_libs -framework OpenGL" - elif test "$mingw32" = "yes" ; then - opengl_libs="$opengl_libs -lopengl32 -lgdi32" - elif test "$linux" = "yes" ; then - opengl_libs="$opengl_libs -lGLU -lGL" - fi - opengl_libs="$opengl_libs $($pkg_config --libs epoxy)" - opengl_cflags="$opengl_cflags $($pkg_config --cflags epoxy)" - QEMU_CFLAGS="$QEMU_CFLAGS $opengl_cflags" -fi # test "$opengl" != "no" - ########################################## # epoxy probe - -# if $pkg_config --exists epoxy ; then -# epoxy_libs=$($pkg_config --libs epoxy) -# epoxy_flags=$($pkg_config --cflags epoxy) -# else -# error_exit "epoxy not present." \ -# "Please install the epoxy devel package." -# fi -# LIBS="$LIBS $epoxy_libs" -# QEMU_CFLAGS="$QEMU_CFLAGS $epoxy_flags" - - -########################################## -# gtk probe - -# Depend on GTK for native file dialog boxes -# FIXME: Add fallback when GTK is unavailable -xemu_gtk="no" -if test "$linux" = "yes" ; then - xemu_gtk="yes" -fi - -# FIXME: Remove duplication with existing GTK detection -if test "$xemu_gtk" != "no"; then - gtkpackage="gtk+-3.0" - gtkversion="3.14.0" - if $pkg_config --exists "$gtkpackage >= $gtkversion"; then - xemu_gtk_cflags=$($pkg_config --cflags $gtkpackage) - xemu_gtk_libs=$($pkg_config --libs $gtkpackage) - xemu_gtk_version=$($pkg_config --modversion $gtkpackage) - elif test "$xemu_gtk" = "yes"; then - feature_not_found "gtk" "Install gtk3-devel" - fi -fi - -########################################## -# openssl probe -if $pkg_config --exists openssl; then - openssl_cflags=$($pkg_config --cflags openssl) - openssl_libs=$($pkg_config --libs openssl) +if $pkg_config --libs --silence-errors epoxy > /dev/null 2>&1 ; then + epoxy_libs=$($pkg_config --libs --silence-errors epoxy) + epoxy_cflags=$($pkg_config --cflags --silence-errors epoxy) else - feature_not_found "openssl" "Install openssl-dev" -fi - -########################################## -# libpcap probe -if test "$mingw32" = "yes"; then - libpcap_cflags="-I$source_path/winpcap-loader/include" - libpcap_libs="-lws2_32" -else - if $pkg_config --exists "libpcap"; then - libpcap_cflags=$($pkg_config --cflags libpcap) - libpcap_libs=$($pkg_config --libs libpcap) - else - feature_not_found "libpcap" "Install libpcap" - fi -fi - -########################################## -# libsamplerate probe -if $pkg_config --exists samplerate; then - libsamplerate_cflags=$($pkg_config --cflags samplerate) - libsamplerate_libs=$($pkg_config --libs samplerate) -else - feature_not_found "samplerate" "Install libsamplerate0-dev" -fi - -########################################## -# libnuma probe - -if test "$numa" != "no" ; then - cat > $TMPC << EOF -#include -int main(void) { return numa_available(); } -EOF - - if compile_prog "" "-lnuma" ; then - numa=yes - numa_libs="-lnuma" - else - if test "$numa" = "yes" ; then - feature_not_found "numa" "install numactl devel" - fi - numa=no - fi -fi - -malloc=system -if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then - echo "ERROR: tcmalloc && jemalloc can't be used at the same time" - exit 1 -elif test "$tcmalloc" = "yes" ; then - malloc=tcmalloc -elif test "$jemalloc" = "yes" ; then - malloc=jemalloc -fi - -# check for usbfs -have_usbfs=no -if test "$linux_user" = "yes"; then - cat > $TMPC << EOF -#include - -#ifndef USBDEVFS_GET_CAPABILITIES -#error "USBDEVFS_GET_CAPABILITIES undefined" -#endif - -#ifndef USBDEVFS_DISCONNECT_CLAIM -#error "USBDEVFS_DISCONNECT_CLAIM undefined" -#endif - -int main(void) -{ - return 0; -} -EOF - if compile_prog "" ""; then - have_usbfs=yes - fi -fi - -########################################## -# spice probe -if test "$spice_protocol" != "no" ; then - spice_protocol_cflags=$($pkg_config --cflags spice-protocol 2>/dev/null) - if $pkg_config --atleast-version=0.12.3 spice-protocol; then - spice_protocol="yes" - else - if test "$spice_protocol" = "yes" ; then - feature_not_found "spice_protocol" \ - "Install spice-protocol(>=0.12.3) devel" - fi - spice_protocol="no" - fi -fi - -if test "$spice" != "no" ; then - cat > $TMPC << EOF -#include -int main(void) { spice_server_new(); return 0; } -EOF - spice_cflags=$($pkg_config --cflags spice-protocol spice-server 2>/dev/null) - spice_libs=$($pkg_config --libs spice-protocol spice-server 2>/dev/null) - if $pkg_config --atleast-version=0.12.5 spice-server && \ - test "$spice_protocol" = "yes" && \ - compile_prog "$spice_cflags" "$spice_libs" ; then - spice="yes" - else - if test "$spice" = "yes" ; then - feature_not_found "spice" \ - "Install spice-server(>=0.12.5) devel" - fi - spice="no" - fi -fi - -########################################## -# check if we have VSS SDK headers for win - -if test "$mingw32" = "yes" && test "$guest_agent" != "no" && \ - test "$vss_win32_sdk" != "no" ; then - case "$vss_win32_sdk" in - "") vss_win32_include="-isystem $source_path" ;; - *\ *) # The SDK is installed in "Program Files" by default, but we cannot - # handle path with spaces. So we symlink the headers into ".sdk/vss". - vss_win32_include="-isystem $source_path/.sdk/vss" - symlink "$vss_win32_sdk/inc" "$source_path/.sdk/vss/inc" - ;; - *) vss_win32_include="-isystem $vss_win32_sdk" - esac - cat > $TMPC << EOF -#define __MIDL_user_allocate_free_DEFINED__ -#include -int main(void) { return VSS_CTX_BACKUP; } -EOF - if compile_prog "$vss_win32_include" "" ; then - guest_agent_with_vss="yes" - QEMU_CFLAGS="$QEMU_CFLAGS $vss_win32_include" - libs_qga="-lole32 -loleaut32 -lshlwapi -lstdc++ -Wl,--enable-stdcall-fixup $libs_qga" - qga_vss_provider="qga/vss-win32/qga-vss.dll qga/vss-win32/qga-vss.tlb" - else - if test "$vss_win32_sdk" != "" ; then - echo "ERROR: Please download and install Microsoft VSS SDK:" - echo "ERROR: http://www.microsoft.com/en-us/download/details.aspx?id=23490" - echo "ERROR: On POSIX-systems, you can extract the SDK headers by:" - echo "ERROR: scripts/extract-vsssdk-headers setup.exe" - echo "ERROR: The headers are extracted in the directory \`inc'." - feature_not_found "VSS support" - fi - guest_agent_with_vss="no" - fi -fi - -########################################## -# lookup Windows platform SDK (if not specified) -# The SDK is needed only to build .tlb (type library) file of guest agent -# VSS provider from the source. It is usually unnecessary because the -# pre-compiled .tlb file is included. - -if test "$mingw32" = "yes" && test "$guest_agent" != "no" && \ - test "$guest_agent_with_vss" = "yes" ; then - if test -z "$win_sdk"; then - programfiles="$PROGRAMFILES" - test -n "$PROGRAMW6432" && programfiles="$PROGRAMW6432" - if test -n "$programfiles"; then - win_sdk=$(ls -d "$programfiles/Microsoft SDKs/Windows/v"* | tail -1) 2>/dev/null - else - feature_not_found "Windows SDK" - fi - elif test "$win_sdk" = "no"; then - win_sdk="" - fi -fi - -########################################## -# check if mingw environment provides a recent ntddscsi.h -if test "$mingw32" = "yes" && test "$guest_agent" != "no"; then - cat > $TMPC << EOF -#include -#include -int main(void) { -#if !defined(IOCTL_SCSI_GET_ADDRESS) -#error Missing required ioctl definitions -#endif - SCSI_ADDRESS addr = { .Lun = 0, .TargetId = 0, .PathId = 0 }; - return addr.Lun; -} -EOF - if compile_prog "" "" ; then - guest_agent_ntddscsi=yes - libs_qga="-lsetupapi -lcfgmgr32 $libs_qga" - fi -fi - -########################################## -# capstone - -case "$capstone" in - auto | enabled | internal) - # Simpler to always update submodule, even if not needed. - git_submodules="${git_submodules} capstone" - ;; -esac - -########################################## -# check if we have posix_syslog - -posix_syslog=no -cat > $TMPC << EOF -#include -int main(void) { openlog("qemu", LOG_PID, LOG_DAEMON); syslog(LOG_INFO, "configure"); return 0; } -EOF -if compile_prog "" "" ; then - posix_syslog=yes -fi - -########################################## -# check if trace backend exists - -$python "$source_path/scripts/tracetool.py" "--backends=$trace_backends" --check-backends > /dev/null 2> /dev/null -if test "$?" -ne 0 ; then - error_exit "invalid trace backends" \ - "Please choose supported trace backends." -fi - -########################################## -# For 'ust' backend, test if ust headers are present -if have_backend "ust"; then - if $pkg_config lttng-ust --exists; then - lttng_ust_libs=$($pkg_config --libs lttng-ust) - else - error_exit "Trace backend 'ust' missing lttng-ust header files" - fi -fi - -########################################## -# For 'dtrace' backend, test if 'dtrace' command is present -if have_backend "dtrace"; then - if ! has 'dtrace' ; then - error_exit "dtrace command is not found in PATH $PATH" - fi - trace_backend_stap="no" - if has 'stap' ; then - trace_backend_stap="yes" - - # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol - # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility - # instead. QEMU --enable-modules depends on this because the SystemTap - # semaphores are linked into the main binary and not the module's shared - # object. - QEMU_CFLAGS="$QEMU_CFLAGS -DSTAP_SDT_V2" - fi + error_exit "epoxy not present." \ + "Please install the epoxy devel package." fi ########################################## @@ -3853,7 +1601,7 @@ else ;; ucontext) if test "$ucontext_works" != "yes"; then - feature_not_found "ucontext" + error_exit "'ucontext' backend requested but makecontext not available" fi ;; sigaltstack) @@ -3867,24 +1615,13 @@ else esac fi -if test "$coroutine_pool" = ""; then - coroutine_pool=yes -fi - -if test "$debug_stack_usage" = "yes"; then - if test "$coroutine_pool" = "yes"; then - echo "WARN: disabling coroutine pool for stack usage debugging" - coroutine_pool=no - fi -fi - ################################################## # SafeStack if test "$safe_stack" = "yes"; then cat > $TMPC << EOF -int main(int argc, char *argv[]) +int main(void) { #if ! __has_feature(safe_stack) #error SafeStack Disabled @@ -3906,7 +1643,7 @@ EOF fi else cat > $TMPC << EOF -int main(int argc, char *argv[]) +int main(void) { #if defined(__has_feature) #if __has_feature(safe_stack) @@ -3940,174 +1677,6 @@ else # "$safe_stack" = "" fi fi -######################################## -# check if cpuid.h is usable. - -cat > $TMPC << EOF -#include -int main(void) { - unsigned a, b, c, d; - int max = __get_cpuid_max(0, 0); - - if (max >= 1) { - __cpuid(1, a, b, c, d); - } - - if (max >= 7) { - __cpuid_count(7, 0, a, b, c, d); - } - - return 0; -} -EOF -if compile_prog "" "" ; then - cpuid_h=yes -fi - -########################################## -# avx2 optimization requirement check -# -# There is no point enabling this if cpuid.h is not usable, -# since we won't be able to select the new routines. - -if test "$cpuid_h" = "yes" && test "$avx2_opt" != "no"; then - cat > $TMPC << EOF -#pragma GCC push_options -#pragma GCC target("avx2") -#include -#include -static int bar(void *a) { - __m256i x = *(__m256i *)a; - return _mm256_testz_si256(x, x); -} -int main(int argc, char *argv[]) { return bar(argv[0]); } -EOF - if compile_object "-Werror" ; then - avx2_opt="yes" - else - avx2_opt="no" - fi -fi - -########################################## -# avx512f optimization requirement check -# -# There is no point enabling this if cpuid.h is not usable, -# since we won't be able to select the new routines. -# by default, it is turned off. -# if user explicitly want to enable it, check environment - -if test "$cpuid_h" = "yes" && test "$avx512f_opt" = "yes"; then - cat > $TMPC << EOF -#pragma GCC push_options -#pragma GCC target("avx512f") -#include -#include -static int bar(void *a) { - __m512i x = *(__m512i *)a; - return _mm512_test_epi64_mask(x, x); -} -int main(int argc, char *argv[]) -{ - return bar(argv[0]); -} -EOF - if ! compile_object "-Werror" ; then - avx512f_opt="no" - fi -else - avx512f_opt="no" -fi - -######################################## -# check if __[u]int128_t is usable. - -int128=no -cat > $TMPC << EOF -__int128_t a; -__uint128_t b; -int main (void) { - a = a + b; - b = a * b; - a = a * a; - return 0; -} -EOF -if compile_prog "" "" ; then - int128=yes -fi - -######################################### -# See if 128-bit atomic operations are supported. - -atomic128=no -if test "$int128" = "yes"; then - cat > $TMPC << EOF -int main(void) -{ - unsigned __int128 x = 0, y = 0; - y = __atomic_load(&x, 0); - __atomic_store(&x, y, 0); - __atomic_compare_exchange(&x, &y, x, 0, 0, 0); - return 0; -} -EOF - if compile_prog "" "" ; then - atomic128=yes - fi -fi - -cmpxchg128=no -if test "$int128" = yes && test "$atomic128" = no; then - cat > $TMPC << EOF -int main(void) -{ - unsigned __int128 x = 0, y = 0; - __sync_val_compare_and_swap_16(&x, y, x); - return 0; -} -EOF - if compile_prog "" "" ; then - cmpxchg128=yes - fi -fi - -######################################### -# See if 64-bit atomic operations are supported. -# Note that without __atomic builtins, we can only -# assume atomic loads/stores max at pointer size. - -cat > $TMPC << EOF -#include -int main(void) -{ - uint64_t x = 0, y = 0; - y = __atomic_load_n(&x, __ATOMIC_RELAXED); - __atomic_store_n(&x, y, __ATOMIC_RELAXED); - __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); - __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); - __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); - return 0; -} -EOF -if compile_prog "" "" ; then - atomic64=yes -fi - -######################################## -# check if getauxval is available. - -getauxval=no -cat > $TMPC << EOF -#include -int main(void) { - return getauxval(AT_HWCAP) == 0; -} -EOF -if compile_prog "" "" ; then - getauxval=yes -fi - ######################################## # check if ccache is interfering with # semantic analysis of macros @@ -4120,7 +1689,7 @@ static const int Z = 1; #define TAUT(X) ((X) == Z) #define PAREN(X, Y) (X == Y) #define ID(X) (X) -int main(int argc, char *argv[]) +int main(void) { int x = 0, y = 0; x = ID(x); @@ -4150,105 +1719,6 @@ if test "$fortify_source" != "no"; then fi fi -########################################## -# check if struct fsxattr is available via linux/fs.h - -have_fsxattr=no -cat > $TMPC << EOF -#include -struct fsxattr foo; -int main(void) { - return 0; -} -EOF -if compile_prog "" "" ; then - have_fsxattr=yes -fi - -########################################## -# check for usable membarrier system call -if test "$membarrier" = "yes"; then - have_membarrier=no - if test "$mingw32" = "yes" ; then - have_membarrier=yes - elif test "$linux" = "yes" ; then - cat > $TMPC << EOF - #include - #include - #include - #include - int main(void) { - syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0); - syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0); - exit(0); - } -EOF - if compile_prog "" "" ; then - have_membarrier=yes - fi - fi - if test "$have_membarrier" = "no"; then - feature_not_found "membarrier" "membarrier system call not available" - fi -else - # Do not enable it by default even for Mingw32, because it doesn't - # work on Wine. - membarrier=no -fi - -########################################## -# check for usable AF_VSOCK environment -have_af_vsock=no -cat > $TMPC << EOF -#include -#include -#include -#if !defined(AF_VSOCK) -# error missing AF_VSOCK flag -#endif -#include -int main(void) { - int sock, ret; - struct sockaddr_vm svm; - socklen_t len = sizeof(svm); - sock = socket(AF_VSOCK, SOCK_STREAM, 0); - ret = getpeername(sock, (struct sockaddr *)&svm, &len); - if ((ret == -1) && (errno == ENOTCONN)) { - return 0; - } - return -1; -} -EOF -if compile_prog "" "" ; then - have_af_vsock=yes -fi - -########################################## -# check for usable AF_ALG environment -have_afalg=no -cat > $TMPC << EOF -#include -#include -#include -#include -int main(void) { - int sock; - sock = socket(AF_ALG, SOCK_SEQPACKET, 0); - return sock; -} -EOF -if compile_prog "" "" ; then - have_afalg=yes -fi -if test "$crypto_afalg" = "yes" -then - if test "$have_afalg" != "yes" - then - error_exit "AF_ALG requested but could not be detected" - fi -fi - - ########################################## # checks for sanitizers @@ -4296,18 +1766,6 @@ EOF fi fi -########################################## -# checks for fuzzer -if test "$fuzzing" = "yes" && test -z "${LIB_FUZZING_ENGINE+xxx}"; then - write_c_fuzzer_skeleton - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=fuzzer" ""; then - have_fuzzer=yes - else - error_exit "Your compiler doesn't support -fsanitize=fuzzer" - exit 1 - fi -fi - # Thread sanitizer is, for now, much noisier than the other sanitizers; # keep it separate until that is not the case. if test "$tsan" = "yes" && test "$sanitizers" = "yes"; then @@ -4333,62 +1791,429 @@ EOF fi ########################################## -# check for slirp +# functions to probe cross compilers -case "$slirp" in - auto | enabled | internal) - # Simpler to always update submodule, even if not needed. - git_submodules="${git_submodules} slirp" - ;; -esac - -# Check for slirp smbd dupport -: ${smbd=${SMBD-/usr/sbin/smbd}} -if test "$slirp_smbd" != "no" ; then - if test "$mingw32" = "yes" ; then - if test "$slirp_smbd" = "yes" ; then - error_exit "Host smbd not supported on this platform." +container="no" +if test $use_containers = "yes" && (has "docker" || has "podman"); then + case $($python "$source_path"/tests/docker/docker.py probe) in + *docker) container=docker ;; + podman) container=podman ;; + no) container=no ;; + esac + if test "$container" != "no"; then + docker_py="$python $source_path/tests/docker/docker.py --engine $container" fi - slirp_smbd=no - else - slirp_smbd=yes - fi fi +# cross compilers defaults, can be overridden with --cross-cc-ARCH +: ${cross_prefix_aarch64="aarch64-linux-gnu-"} +: ${cross_prefix_aarch64_be="$cross_prefix_aarch64"} +: ${cross_prefix_alpha="alpha-linux-gnu-"} +: ${cross_prefix_arm="arm-linux-gnueabihf-"} +: ${cross_prefix_armeb="$cross_prefix_arm"} +: ${cross_prefix_hexagon="hexagon-unknown-linux-musl-"} +: ${cross_prefix_loongarch64="loongarch64-unknown-linux-gnu-"} +: ${cross_prefix_hppa="hppa-linux-gnu-"} +: ${cross_prefix_i386="i686-linux-gnu-"} +: ${cross_prefix_m68k="m68k-linux-gnu-"} +: ${cross_prefix_microblaze="microblaze-linux-musl-"} +: ${cross_prefix_mips64el="mips64el-linux-gnuabi64-"} +: ${cross_prefix_mips64="mips64-linux-gnuabi64-"} +: ${cross_prefix_mipsel="mipsel-linux-gnu-"} +: ${cross_prefix_mips="mips-linux-gnu-"} +: ${cross_prefix_nios2="nios2-linux-gnu-"} +: ${cross_prefix_ppc="powerpc-linux-gnu-"} +: ${cross_prefix_ppc64="powerpc64-linux-gnu-"} +: ${cross_prefix_ppc64le="$cross_prefix_ppc64"} +: ${cross_prefix_riscv64="riscv64-linux-gnu-"} +: ${cross_prefix_s390x="s390x-linux-gnu-"} +: ${cross_prefix_sh4="sh4-linux-gnu-"} +: ${cross_prefix_sparc64="sparc64-linux-gnu-"} +: ${cross_prefix_sparc="$cross_prefix_sparc64"} +: ${cross_prefix_x86_64="x86_64-linux-gnu-"} + +: ${cross_cc_aarch64_be="$cross_cc_aarch64"} +: ${cross_cc_cflags_aarch64_be="-mbig-endian"} +: ${cross_cc_armeb="$cross_cc_arm"} +: ${cross_cc_cflags_armeb="-mbig-endian"} +: ${cross_cc_hexagon="hexagon-unknown-linux-musl-clang"} +: ${cross_cc_cflags_hexagon="-mv67 -O2 -static"} +: ${cross_cc_cflags_i386="-m32"} +: ${cross_cc_cflags_ppc="-m32 -mbig-endian"} +: ${cross_cc_cflags_ppc64="-m64 -mbig-endian"} +: ${cross_cc_ppc64le="$cross_cc_ppc64"} +: ${cross_cc_cflags_ppc64le="-m64 -mlittle-endian"} +: ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"} +: ${cross_cc_sparc="$cross_cc_sparc64"} +: ${cross_cc_cflags_sparc="-m32 -mcpu=supersparc"} +: ${cross_cc_cflags_x86_64="-m64"} + +compute_target_variable() { + eval "$2=" + if eval test -n "\"\${cross_prefix_$1}\""; then + if eval has "\"\${cross_prefix_$1}\$3\""; then + eval "$2=\"\${cross_prefix_$1}\$3\"" + fi + fi +} + +have_target() { + for i; do + case " $target_list " in + *" $i "*) return 0;; + *) ;; + esac + done + return 1 +} + +# probe_target_compiler TARGET +# +# Look for a compiler for the given target, either native or cross. +# Set variables target_* if a compiler is found, and container_cross_* +# if a Docker-based cross-compiler image is known for the target. +# Set got_cross_cc to yes/no depending on whether a non-container-based +# compiler was found. +# +# If TARGET is a user-mode emulation target, also set build_static to +# "y" if static linking is possible. +# +probe_target_compiler() { + # reset all output variables + got_cross_cc=no + container_image= + container_hosts= + container_cross_cc= + container_cross_ar= + container_cross_as= + container_cross_ld= + container_cross_nm= + container_cross_objcopy= + container_cross_ranlib= + container_cross_strip= + + # We shall skip configuring the target compiler if the user didn't + # bother enabling an appropriate guest. This avoids building + # extraneous firmware images and tests. + if test "${target_list#*$1}" != "$1"; then + break; + else + return 1 + fi + + target_arch=${1%%-*} + case $target_arch in + aarch64) container_hosts="x86_64 aarch64" ;; + alpha) container_hosts=x86_64 ;; + arm) container_hosts="x86_64 aarch64" ;; + cris) container_hosts=x86_64 ;; + hexagon) container_hosts=x86_64 ;; + hppa) container_hosts=x86_64 ;; + i386) container_hosts=x86_64 ;; + loongarch64) container_hosts=x86_64 ;; + m68k) container_hosts=x86_64 ;; + microblaze) container_hosts=x86_64 ;; + mips64el) container_hosts=x86_64 ;; + mips64) container_hosts=x86_64 ;; + mipsel) container_hosts=x86_64 ;; + mips) container_hosts=x86_64 ;; + nios2) container_hosts=x86_64 ;; + ppc) container_hosts=x86_64 ;; + ppc64|ppc64le) container_hosts=x86_64 ;; + riscv64) container_hosts=x86_64 ;; + s390x) container_hosts=x86_64 ;; + sh4) container_hosts=x86_64 ;; + sparc64) container_hosts=x86_64 ;; + tricore) container_hosts=x86_64 ;; + x86_64) container_hosts="aarch64 ppc64el x86_64" ;; + xtensa*) container_hosts=x86_64 ;; + esac + + for host in $container_hosts; do + test "$container" != no || continue + test "$host" = "$cpu" || continue + case $target_arch in + aarch64) + # We don't have any bigendian build tools so we only use this for AArch64 + container_image=debian-arm64-cross + container_cross_prefix=aarch64-linux-gnu- + container_cross_cc=${container_cross_prefix}gcc-10 + ;; + alpha) + container_image=debian-alpha-cross + container_cross_prefix=alpha-linux-gnu- + ;; + arm) + # We don't have any bigendian build tools so we only use this for ARM + container_image=debian-armhf-cross + container_cross_prefix=arm-linux-gnueabihf- + ;; + cris) + container_image=fedora-cris-cross + container_cross_prefix=cris-linux-gnu- + ;; + hexagon) + container_image=debian-hexagon-cross + container_cross_prefix=hexagon-unknown-linux-musl- + container_cross_cc=${container_cross_prefix}clang + ;; + hppa) + container_image=debian-hppa-cross + container_cross_prefix=hppa-linux-gnu- + ;; + i386) + container_image=fedora-i386-cross + container_cross_prefix= + ;; + loongarch64) + container_image=debian-loongarch-cross + container_cross_prefix=loongarch64-unknown-linux-gnu- + ;; + m68k) + container_image=debian-m68k-cross + container_cross_prefix=m68k-linux-gnu- + ;; + microblaze) + container_image=debian-microblaze-cross + container_cross_prefix=microblaze-linux-musl- + ;; + mips64el) + container_image=debian-mips64el-cross + container_cross_prefix=mips64el-linux-gnuabi64- + ;; + mips64) + container_image=debian-mips64-cross + container_cross_prefix=mips64-linux-gnuabi64- + ;; + mipsel) + container_image=debian-mipsel-cross + container_cross_prefix=mipsel-linux-gnu- + ;; + mips) + container_image=debian-mips-cross + container_cross_prefix=mips-linux-gnu- + ;; + nios2) + container_image=debian-nios2-cross + container_cross_prefix=nios2-linux-gnu- + ;; + ppc) + container_image=debian-powerpc-test-cross + container_cross_prefix=powerpc-linux-gnu- + container_cross_cc=${container_cross_prefix}gcc-10 + ;; + ppc64|ppc64le) + container_image=debian-powerpc-test-cross + container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- + container_cross_cc=${container_cross_prefix}gcc-10 + ;; + riscv64) + container_image=debian-riscv64-test-cross + container_cross_prefix=riscv64-linux-gnu- + ;; + s390x) + container_image=debian-s390x-cross + container_cross_prefix=s390x-linux-gnu- + ;; + sh4) + container_image=debian-sh4-cross + container_cross_prefix=sh4-linux-gnu- + ;; + sparc64) + container_image=debian-sparc64-cross + container_cross_prefix=sparc64-linux-gnu- + ;; + tricore) + container_image=debian-tricore-cross + container_cross_prefix=tricore- + container_cross_as=tricore-as + container_cross_ld=tricore-ld + break + ;; + x86_64) + container_image=debian-amd64-cross + container_cross_prefix=x86_64-linux-gnu- + ;; + xtensa*) + container_hosts=x86_64 + container_image=debian-xtensa-cross + + # default to the dc232b cpu + container_cross_prefix=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf- + ;; + esac + : ${container_cross_cc:=${container_cross_prefix}gcc} + : ${container_cross_ar:=${container_cross_prefix}ar} + : ${container_cross_as:=${container_cross_prefix}as} + : ${container_cross_ld:=${container_cross_prefix}ld} + : ${container_cross_nm:=${container_cross_prefix}nm} + : ${container_cross_objcopy:=${container_cross_prefix}objcopy} + : ${container_cross_ranlib:=${container_cross_prefix}ranlib} + : ${container_cross_strip:=${container_cross_prefix}strip} + done + + try=cross + case "$target_arch:$cpu" in + aarch64_be:aarch64 | \ + armeb:arm | \ + i386:x86_64 | \ + mips*:mips64 | \ + ppc*:ppc64 | \ + sparc:sparc64 | \ + "$cpu:$cpu") + try='native cross' ;; + esac + eval "target_cflags=\${cross_cc_cflags_$target_arch}" + for thistry in $try; do + case $thistry in + native) + target_cc=$cc + target_ccas=$ccas + target_ar=$ar + target_as=$as + target_ld=$ld + target_nm=$nm + target_objcopy=$objcopy + target_ranlib=$ranlib + target_strip=$strip + ;; + cross) + target_cc= + if eval test -n "\"\${cross_cc_$target_arch}\""; then + if eval has "\"\${cross_cc_$target_arch}\""; then + eval "target_cc=\"\${cross_cc_$target_arch}\"" + fi + else + compute_target_variable $target_arch target_cc gcc + fi + target_ccas=$target_cc + compute_target_variable $target_arch target_ar ar + compute_target_variable $target_arch target_as as + compute_target_variable $target_arch target_ld ld + compute_target_variable $target_arch target_nm nm + compute_target_variable $target_arch target_objcopy objcopy + compute_target_variable $target_arch target_ranlib ranlib + compute_target_variable $target_arch target_strip strip + ;; + esac + + if test -n "$target_cc"; then + case $target_arch in + i386|x86_64) + if $target_cc --version | grep -qi "clang"; then + continue + fi + ;; + esac + elif test -n "$target_as" && test -n "$target_ld"; then + # Special handling for assembler only targets + case $target in + tricore-softmmu) + build_static= + got_cross_cc=yes + break + ;; + *) + continue + ;; + esac + else + continue + fi + + write_c_skeleton + case $1 in + *-softmmu) + if do_compiler "$target_cc" $target_cflags -o $TMPO -c $TMPC && + do_compiler "$target_cc" $target_cflags -r -nostdlib -o "${TMPDIR1}/${TMPB}2.o" "$TMPO" -lgcc; then + got_cross_cc=yes + break + fi + ;; + *) + if do_compiler "$target_cc" $target_cflags -o $TMPE $TMPC -static ; then + build_static=y + got_cross_cc=yes + break + fi + if do_compiler "$target_cc" $target_cflags -o $TMPE $TMPC ; then + build_static= + got_cross_cc=yes + break + fi + ;; + esac + done + if test $got_cross_cc != yes; then + build_static= + target_cc= + target_ccas= + target_ar= + target_as= + target_ld= + target_nm= + target_objcopy= + target_ranlib= + target_strip= + fi + test -n "$target_cc" +} + +write_target_makefile() { + echo "EXTRA_CFLAGS=$target_cflags" + if test -z "$target_cc" && test -z "$target_as"; then + test -z "$container_image" && error_exit "Internal error: could not find cross compiler for $1?" + echo "$1: docker-image-$container_image" >> Makefile.prereqs + if test -n "$container_cross_cc"; then + echo "CC=$docker_py cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --" + echo "CCAS=$docker_py cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --" + fi + echo "AR=$docker_py cc --cc $container_cross_ar -i qemu/$container_image -s $source_path --" + echo "AS=$docker_py cc --cc $container_cross_as -i qemu/$container_image -s $source_path --" + echo "LD=$docker_py cc --cc $container_cross_ld -i qemu/$container_image -s $source_path --" + echo "NM=$docker_py cc --cc $container_cross_nm -i qemu/$container_image -s $source_path --" + echo "OBJCOPY=$docker_py cc --cc $container_cross_objcopy -i qemu/$container_image -s $source_path --" + echo "RANLIB=$docker_py cc --cc $container_cross_ranlib -i qemu/$container_image -s $source_path --" + echo "STRIP=$docker_py cc --cc $container_cross_strip -i qemu/$container_image -s $source_path --" + else + if test -n "$target_cc"; then + echo "CC=$target_cc" + echo "CCAS=$target_ccas" + fi + if test -n "$target_ar"; then + echo "AR=$target_ar" + fi + if test -n "$target_as"; then + echo "AS=$target_as" + fi + if test -n "$target_ld"; then + echo "LD=$target_ld" + fi + if test -n "$target_nm"; then + echo "NM=$target_nm" + fi + if test -n "$target_objcopy"; then + echo "OBJCOPY=$target_objcopy" + fi + if test -n "$target_ranlib"; then + echo "RANLIB=$target_ranlib" + fi + if test -n "$target_strip"; then + echo "STRIP=$target_strip" + fi + fi +} + ########################################## -# check for usable __NR_keyctl syscall +# check for vfio_user_server -if test "$linux" = "yes" ; then - - have_keyring=no - cat > $TMPC << EOF -#include -#include -#include -#include -int main(void) { - return syscall(__NR_keyctl, KEYCTL_READ, 0, NULL, NULL, 0); -} -EOF - if compile_prog "" "" ; then - have_keyring=yes +case "$vfio_user_server" in + enabled ) + if test "$git_submodules_action" != "ignore"; then + git_submodules="${git_submodules} subprojects/libvfio-user" fi -fi -if test "$secret_keyring" != "no" -then - if test "$have_keyring" = "yes" - then - secret_keyring=yes - else - if test "$secret_keyring" = "yes" - then - error_exit "syscall __NR_keyctl requested, \ -but not implemented on your system" - else - secret_keyring=no - fi - fi -fi + ;; +esac ########################################## # End of CC checks @@ -4396,23 +2221,8 @@ fi write_c_skeleton -if test "$gcov" = "yes" ; then - : -elif test "$fortify_source" = "yes" ; then +if test "$fortify_source" = "yes" ; then QEMU_CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" - debug=no -fi - -case "$ARCH" in -alpha) - # Ensure there's only a single GP - QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS" -;; -esac - -if test "$gprof" = "yes" ; then - QEMU_CFLAGS="-p $QEMU_CFLAGS" - QEMU_LDFLAGS="-p $QEMU_LDFLAGS" fi if test "$have_asan" = "yes"; then @@ -4442,44 +2252,6 @@ if test "$have_ubsan" = "yes"; then fi ########################################## - -# Exclude --warn-common with TSan to suppress warnings from the TSan libraries. -if test "$solaris" = "no" && test "$tsan" = "no"; then - if $ld --version 2>/dev/null | grep "GNU ld" >/dev/null 2>/dev/null ; then - QEMU_LDFLAGS="-Wl,--warn-common $QEMU_LDFLAGS" - fi -fi - -# Use ASLR, no-SEH and DEP if available -if test "$mingw32" = "yes" ; then - flags="--no-seh --nxcompat" - - # Disable ASLR for debug builds to allow debugging with gdb - if test "$debug" = "no" ; then - flags="--dynamicbase $flags" - fi - - for flag in $flags; do - if ld_has $flag ; then - QEMU_LDFLAGS="-Wl,$flag $QEMU_LDFLAGS" - fi - done -fi - -# Probe for guest agent support/options - -if [ "$guest_agent" != "no" ]; then - if [ "$softmmu" = no -a "$want_tools" = no ] ; then - guest_agent=no - elif [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" -o "$mingw32" = "yes" ] ; then - guest_agent=yes - elif [ "$guest_agent" != yes ]; then - guest_agent=no - else - error_exit "Guest agent is not supported on this platform" - fi -fi - # Guest agent Windows MSI package if test "$QEMU_GA_MANUFACTURER" = ""; then @@ -4492,76 +2264,84 @@ if test "$QEMU_GA_VERSION" = ""; then QEMU_GA_VERSION=$(cat $source_path/QEMU_VERSION) fi -QEMU_GA_MSI_MINGW_DLL_PATH="$($pkg_config --variable=prefix glib-2.0)/bin" + +####################################### +# cross-compiled firmware targets + +# Set up build tree symlinks that point back into the source tree +# (these can be both files and directories). +# Caution: avoid adding files or directories here using wildcards. This +# will result in problems later if a new file matching the wildcard is +# added to the source tree -- nothing will cause configure to be rerun +# so the build tree will be missing the link back to the new file, and +# tests might fail. Prefer to keep the relevant files in their own +# directory and symlink the directory instead. +LINKS="Makefile" +LINKS="$LINKS pc-bios/optionrom/Makefile" +LINKS="$LINKS pc-bios/s390-ccw/Makefile" +LINKS="$LINKS pc-bios/vof/Makefile" +LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit +LINKS="$LINKS tests/avocado tests/data" +LINKS="$LINKS tests/qemu-iotests/check" +LINKS="$LINKS python" +LINKS="$LINKS contrib/plugins/Makefile " +for f in $LINKS ; do + if [ -e "$source_path/$f" ]; then + mkdir -p "$(dirname ./"$f")" + symlink "$source_path/$f" "$f" + fi +done + +echo "# Automatically generated by configure - do not modify" > Makefile.prereqs # Mac OS X ships with a broken assembler roms= -if { test "$cpu" = "i386" || test "$cpu" = "x86_64"; } && \ - test "$targetos" != "Darwin" && test "$targetos" != "SunOS" && \ - test "$targetos" != "Haiku" && test "$softmmu" = yes ; then - # Different host OS linkers have different ideas about the name of the ELF - # emulation. Linux and OpenBSD/amd64 use 'elf_i386'; FreeBSD uses the _fbsd - # variant; OpenBSD/i386 uses the _obsd variant; and Windows uses i386pe. - for emu in elf_i386 elf_i386_fbsd elf_i386_obsd i386pe; do - if "$ld" -verbose 2>&1 | grep -q "^[[:space:]]*$emu[[:space:]]*$"; then - ld_i386_emulation="$emu" - roms="optionrom" - break - fi - done +if have_target i386-softmmu x86_64-softmmu && \ + test "$targetos" != "darwin" && test "$targetos" != "sunos" && \ + test "$targetos" != "haiku" && \ + probe_target_compiler i386-softmmu; then + roms="pc-bios/optionrom" + config_mak=pc-bios/optionrom/config.mak + echo "# Automatically generated by configure - do not modify" > $config_mak + echo "TOPSRC_DIR=$source_path" >> $config_mak + write_target_makefile >> $config_mak fi -# Only build s390-ccw bios if we're on s390x and the compiler has -march=z900 -# or -march=z10 (which is the lowest architecture level that Clang supports) -if test "$cpu" = "s390x" ; then +if have_target ppc-softmmu ppc64-softmmu && \ + probe_target_compiler ppc-softmmu; then + roms="$roms pc-bios/vof" + config_mak=pc-bios/vof/config.mak + echo "# Automatically generated by configure - do not modify" > $config_mak + echo "SRC_DIR=$source_path/pc-bios/vof" >> $config_mak + write_target_makefile >> $config_mak +fi + +# Only build s390-ccw bios if the compiler has -march=z900 or -march=z10 +# (which is the lowest architecture level that Clang supports) +if have_target s390x-softmmu && probe_target_compiler s390x-softmmu; then write_c_skeleton - compile_prog "-march=z900" "" + do_compiler "$target_cc" $target_cc_cflags -march=z900 -o $TMPO -c $TMPC has_z900=$? - if [ $has_z900 = 0 ] || compile_object "-march=z10 -msoft-float -Werror"; then + if [ $has_z900 = 0 ] || do_compiler "$target_cc" $target_cc_cflags -march=z10 -msoft-float -Werror -o $TMPO -c $TMPC; then if [ $has_z900 != 0 ]; then echo "WARNING: Your compiler does not support the z900!" echo " The s390-ccw bios will only work with guest CPUs >= z10." fi - roms="$roms s390-ccw" + roms="$roms pc-bios/s390-ccw" + config_mak=pc-bios/s390-ccw/config-host.mak + echo "# Automatically generated by configure - do not modify" > $config_mak + echo "SRC_PATH=$source_path/pc-bios/s390-ccw" >> $config_mak + write_target_makefile >> $config_mak # SLOF is required for building the s390-ccw firmware on s390x, # since it is using the libnet code from SLOF for network booting. git_submodules="${git_submodules} roms/SLOF" fi fi -# Check that the C++ compiler exists and works with the C compiler. -# All the QEMU_CXXFLAGS are based on QEMU_CFLAGS. Keep this at the end to don't miss any other that could be added. -if has $cxx; then - cat > $TMPC < $TMPCXX <> $config_host_mak echo "GIT_SUBMODULES=$git_submodules" >> $config_host_mak echo "GIT_SUBMODULES_ACTION=$git_submodules_action" >> $config_host_mak -echo "ARCH=$ARCH" >> $config_host_mak - if test "$debug_tcg" = "yes" ; then echo "CONFIG_DEBUG_TCG=y" >> $config_host_mak fi -if test "$strip_opt" = "yes" ; then - echo "STRIP=${strip}" >> $config_host_mak -fi -if test "$bigendian" = "yes" ; then - echo "HOST_WORDS_BIGENDIAN=y" >> $config_host_mak -fi if test "$mingw32" = "yes" ; then echo "CONFIG_WIN32=y" >> $config_host_mak - if test "$guest_agent_with_vss" = "yes" ; then - echo "CONFIG_QGA_VSS=y" >> $config_host_mak - echo "QGA_VSS_PROVIDER=$qga_vss_provider" >> $config_host_mak - echo "WIN_SDK=\"$win_sdk\"" >> $config_host_mak - fi - if test "$guest_agent_ntddscsi" = "yes" ; then - echo "CONFIG_QGA_NTDDSCSI=y" >> $config_host_mak - fi - echo "QEMU_GA_MSI_MINGW_DLL_PATH=${QEMU_GA_MSI_MINGW_DLL_PATH}" >> $config_host_mak echo "QEMU_GA_MANUFACTURER=${QEMU_GA_MANUFACTURER}" >> $config_host_mak echo "QEMU_GA_DISTRO=${QEMU_GA_DISTRO}" >> $config_host_mak echo "QEMU_GA_VERSION=${QEMU_GA_VERSION}" >> $config_host_mak @@ -4615,210 +2378,21 @@ fi if test "$solaris" = "yes" ; then echo "CONFIG_SOLARIS=y" >> $config_host_mak fi -if test "$haiku" = "yes" ; then - echo "CONFIG_HAIKU=y" >> $config_host_mak -fi if test "$static" = "yes" ; then echo "CONFIG_STATIC=y" >> $config_host_mak fi -if test "$profiler" = "yes" ; then - echo "CONFIG_PROFILER=y" >> $config_host_mak -fi -if test "$want_tools" = "yes" ; then - echo "CONFIG_TOOLS=y" >> $config_host_mak -fi -if test "$guest_agent" = "yes" ; then - echo "CONFIG_GUEST_AGENT=y" >> $config_host_mak -fi -if test "$slirp_smbd" = "yes" ; then - echo "CONFIG_SLIRP_SMBD=y" >> $config_host_mak - echo "CONFIG_SMBD_COMMAND=\"$smbd\"" >> $config_host_mak -fi -if test "$vde" = "yes" ; then - echo "CONFIG_VDE=y" >> $config_host_mak - echo "VDE_LIBS=$vde_libs" >> $config_host_mak -fi -if test "$netmap" = "yes" ; then - echo "CONFIG_NETMAP=y" >> $config_host_mak -fi -if test "$l2tpv3" = "yes" ; then - echo "CONFIG_L2TPV3=y" >> $config_host_mak -fi -if test "$gprof" = "yes" ; then - echo "CONFIG_GPROF=y" >> $config_host_mak -fi -echo "CONFIG_AUDIO_DRIVERS=$audio_drv_list" >> $config_host_mak -for drv in $audio_drv_list; do - def=CONFIG_AUDIO_$(echo $drv | LC_ALL=C tr '[a-z]' '[A-Z]') - echo "$def=y" >> $config_host_mak -done -if test "$alsa" = "yes" ; then - echo "CONFIG_ALSA=y" >> $config_host_mak -fi -echo "ALSA_LIBS=$alsa_libs" >> $config_host_mak -echo "ALSA_CFLAGS=$alsa_cflags" >> $config_host_mak -if test "$libpulse" = "yes" ; then - echo "CONFIG_LIBPULSE=y" >> $config_host_mak -fi -echo "PULSE_LIBS=$pulse_libs" >> $config_host_mak -echo "PULSE_CFLAGS=$pulse_cflags" >> $config_host_mak -echo "COREAUDIO_LIBS=$coreaudio_libs" >> $config_host_mak -echo "DSOUND_LIBS=$dsound_libs" >> $config_host_mak -echo "OSS_LIBS=$oss_libs" >> $config_host_mak -if test "$libjack" = "yes" ; then - echo "CONFIG_LIBJACK=y" >> $config_host_mak -fi -echo "JACK_LIBS=$jack_libs" >> $config_host_mak -if test "$audio_win_int" = "yes" ; then - echo "CONFIG_AUDIO_WIN_INT=y" >> $config_host_mak -fi -echo "CONFIG_BDRV_RW_WHITELIST=$block_drv_rw_whitelist" >> $config_host_mak -echo "CONFIG_BDRV_RO_WHITELIST=$block_drv_ro_whitelist" >> $config_host_mak -if test "$block_drv_whitelist_tools" = "yes" ; then - echo "CONFIG_BDRV_WHITELIST_TOOLS=y" >> $config_host_mak -fi -if test "$xfs" = "yes" ; then - echo "CONFIG_XFS=y" >> $config_host_mak -fi -qemu_version=$(head $source_path/QEMU_VERSION) -echo "PKGVERSION=$pkgversion" >>$config_host_mak echo "SRC_PATH=$source_path" >> $config_host_mak echo "TARGET_DIRS=$target_list" >> $config_host_mak if test "$modules" = "yes"; then - # $shacmd can generate a hash started with digit, which the compiler doesn't - # like as an symbol. So prefix it with an underscore - echo "CONFIG_STAMP=_$( (echo $qemu_version; echo $pkgversion; cat $0) | $shacmd - | cut -f1 -d\ )" >> $config_host_mak echo "CONFIG_MODULES=y" >> $config_host_mak fi -if test "$module_upgrades" = "yes"; then - echo "CONFIG_MODULE_UPGRADES=y" >> $config_host_mak -fi -if test "$have_usbfs" = "yes" ; then - echo "CONFIG_USBFS=y" >> $config_host_mak -fi -if test "$gio" = "yes" ; then - echo "CONFIG_GIO=y" >> $config_host_mak - echo "GIO_CFLAGS=$gio_cflags" >> $config_host_mak - echo "GIO_LIBS=$gio_libs" >> $config_host_mak -fi -if test "$gdbus_codegen" != "" ; then - echo "GDBUS_CODEGEN=$gdbus_codegen" >> $config_host_mak -fi -echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak - -# Work around a system header bug with some kernel/XFS header -# versions where they both try to define 'struct fsxattr': -# xfs headers will not try to redefine structs from linux headers -# if this macro is set. -if test "$have_fsxattr" = "yes" ; then - echo "HAVE_FSXATTR=y" >> $config_host_mak -fi -if test "$xen" = "enabled" ; then - echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak - echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak - echo "XEN_CFLAGS=$xen_cflags" >> $config_host_mak - echo "XEN_LIBS=$xen_libs" >> $config_host_mak -fi -if test "$linux_aio" = "yes" ; then - echo "CONFIG_LINUX_AIO=y" >> $config_host_mak -fi -if test "$vhost_scsi" = "yes" ; then - echo "CONFIG_VHOST_SCSI=y" >> $config_host_mak -fi -if test "$vhost_net" = "yes" ; then - echo "CONFIG_VHOST_NET=y" >> $config_host_mak -fi -if test "$vhost_net_user" = "yes" ; then - echo "CONFIG_VHOST_NET_USER=y" >> $config_host_mak -fi -if test "$vhost_net_vdpa" = "yes" ; then - echo "CONFIG_VHOST_NET_VDPA=y" >> $config_host_mak -fi -if test "$vhost_crypto" = "yes" ; then - echo "CONFIG_VHOST_CRYPTO=y" >> $config_host_mak -fi -if test "$vhost_vsock" = "yes" ; then - echo "CONFIG_VHOST_VSOCK=y" >> $config_host_mak - if test "$vhost_user" = "yes" ; then - echo "CONFIG_VHOST_USER_VSOCK=y" >> $config_host_mak - fi -fi -if test "$vhost_kernel" = "yes" ; then - echo "CONFIG_VHOST_KERNEL=y" >> $config_host_mak -fi -if test "$vhost_user" = "yes" ; then - echo "CONFIG_VHOST_USER=y" >> $config_host_mak -fi -if test "$vhost_vdpa" = "yes" ; then - echo "CONFIG_VHOST_VDPA=y" >> $config_host_mak -fi -if test "$vhost_user_fs" = "yes" ; then - echo "CONFIG_VHOST_USER_FS=y" >> $config_host_mak -fi -if test "$iovec" = "yes" ; then - echo "CONFIG_IOVEC=y" >> $config_host_mak -fi -if test "$membarrier" = "yes" ; then - echo "CONFIG_MEMBARRIER=y" >> $config_host_mak -fi -if test "$tcg" = "enabled" -a "$tcg_interpreter" = "true" ; then - echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak -fi - -if test "$spice_protocol" = "yes" ; then - echo "CONFIG_SPICE_PROTOCOL=y" >> $config_host_mak - echo "SPICE_PROTOCOL_CFLAGS=$spice_protocol_cflags" >> $config_host_mak -fi -if test "$spice" = "yes" ; then - echo "CONFIG_SPICE=y" >> $config_host_mak - echo "SPICE_CFLAGS=$spice_cflags $spice_protocol_cflags" >> $config_host_mak - echo "SPICE_LIBS=$spice_libs" >> $config_host_mak -fi - -if test "$opengl" = "yes" ; then - echo "CONFIG_OPENGL=y" >> $config_host_mak - echo "OPENGL_CFLAGS=$opengl_cflags" >> $config_host_mak - echo "OPENGL_LIBS=$opengl_libs" >> $config_host_mak -fi - -if test "$gbm" = "yes" ; then - echo "CONFIG_GBM=y" >> $config_host_mak - echo "GBM_LIBS=$gbm_libs" >> $config_host_mak - echo "GBM_CFLAGS=$gbm_cflags" >> $config_host_mak -fi - - -if test "$avx2_opt" = "yes" ; then - echo "CONFIG_AVX2_OPT=y" >> $config_host_mak -fi - -if test "$avx512f_opt" = "yes" ; then - echo "CONFIG_AVX512F_OPT=y" >> $config_host_mak -fi # XXX: suppress that if [ "$bsd" = "yes" ] ; then echo "CONFIG_BSD=y" >> $config_host_mak fi -if test "$qom_cast_debug" = "yes" ; then - echo "CONFIG_QOM_CAST_DEBUG=y" >> $config_host_mak -fi - echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak -if test "$coroutine_pool" = "yes" ; then - echo "CONFIG_COROUTINE_POOL=1" >> $config_host_mak -else - echo "CONFIG_COROUTINE_POOL=0" >> $config_host_mak -fi - -if test "$debug_stack_usage" = "yes" ; then - echo "CONFIG_DEBUG_STACK_USAGE=y" >> $config_host_mak -fi - -if test "$crypto_afalg" = "yes" ; then - echo "CONFIG_AF_ALG=y" >> $config_host_mak -fi if test "$have_asan_iface_fiber" = "yes" ; then echo "CONFIG_ASAN_IFACE_FIBER=y" >> $config_host_mak @@ -4828,207 +2402,22 @@ if test "$have_tsan" = "yes" && test "$have_tsan_iface_fiber" = "yes" ; then echo "CONFIG_TSAN=y" >> $config_host_mak fi -if test "$cpuid_h" = "yes" ; then - echo "CONFIG_CPUID_H=y" >> $config_host_mak -fi - -if test "$int128" = "yes" ; then - echo "CONFIG_INT128=y" >> $config_host_mak -fi - -if test "$atomic128" = "yes" ; then - echo "CONFIG_ATOMIC128=y" >> $config_host_mak -fi - -if test "$cmpxchg128" = "yes" ; then - echo "CONFIG_CMPXCHG128=y" >> $config_host_mak -fi - -if test "$atomic64" = "yes" ; then - echo "CONFIG_ATOMIC64=y" >> $config_host_mak -fi - -if test "$getauxval" = "yes" ; then - echo "CONFIG_GETAUXVAL=y" >> $config_host_mak -fi - -if test "$libssh" = "yes" ; then - echo "CONFIG_LIBSSH=y" >> $config_host_mak - echo "LIBSSH_CFLAGS=$libssh_cflags" >> $config_host_mak - echo "LIBSSH_LIBS=$libssh_libs" >> $config_host_mak -fi - -if test "$live_block_migration" = "yes" ; then - echo "CONFIG_LIVE_BLOCK_MIGRATION=y" >> $config_host_mak -fi - -if test "$tpm" = "yes"; then - echo 'CONFIG_TPM=y' >> $config_host_mak -fi - -echo "TRACE_BACKENDS=$trace_backends" >> $config_host_mak -if have_backend "nop"; then - echo "CONFIG_TRACE_NOP=y" >> $config_host_mak -fi -if have_backend "simple"; then - echo "CONFIG_TRACE_SIMPLE=y" >> $config_host_mak - # Set the appropriate trace file. - trace_file="\"$trace_file-\" FMT_pid" -fi -if have_backend "log"; then - echo "CONFIG_TRACE_LOG=y" >> $config_host_mak -fi -if have_backend "ust"; then - echo "CONFIG_TRACE_UST=y" >> $config_host_mak - echo "LTTNG_UST_LIBS=$lttng_ust_libs" >> $config_host_mak -fi -if have_backend "dtrace"; then - echo "CONFIG_TRACE_DTRACE=y" >> $config_host_mak - if test "$trace_backend_stap" = "yes" ; then - echo "CONFIG_TRACE_SYSTEMTAP=y" >> $config_host_mak - fi -fi -if have_backend "ftrace"; then - if test "$linux" = "yes" ; then - echo "CONFIG_TRACE_FTRACE=y" >> $config_host_mak - else - feature_not_found "ftrace(trace backend)" "ftrace requires Linux" - fi -fi -if have_backend "syslog"; then - if test "$posix_syslog" = "yes" ; then - echo "CONFIG_TRACE_SYSLOG=y" >> $config_host_mak - else - feature_not_found "syslog(trace backend)" "syslog not available" - fi -fi -echo "CONFIG_TRACE_FILE=$trace_file" >> $config_host_mak - -if test "$rdma" = "yes" ; then - echo "CONFIG_RDMA=y" >> $config_host_mak - echo "RDMA_LIBS=$rdma_libs" >> $config_host_mak -fi - -if test "$pvrdma" = "yes" ; then - echo "CONFIG_PVRDMA=y" >> $config_host_mak -fi - -echo "XEMU_GTK_CFLAGS=$xemu_gtk_cflags" >> $config_host_mak -echo "XEMU_GTK_LIBS=$xemu_gtk_libs" >> $config_host_mak - -echo "OPENSSL_CFLAGS=$openssl_cflags" >> $config_host_mak -echo "OPENSSL_LIBS=$openssl_libs" >> $config_host_mak - -echo "LIBPCAP_CFLAGS=$libpcap_cflags" >> $config_host_mak -echo "LIBPCAP_LIBS=$libpcap_libs" >> $config_host_mak - -echo "LIBSAMPLERATE_CFLAGS=$libsamplerate_cflags" >> $config_host_mak -echo "LIBSAMPLERATE_LIBS=$libsamplerate_libs" >> $config_host_mak - -if test "$replication" = "yes" ; then - echo "CONFIG_REPLICATION=y" >> $config_host_mak -fi - -if test "$have_af_vsock" = "yes" ; then - echo "CONFIG_AF_VSOCK=y" >> $config_host_mak -fi - -if test "$debug_mutex" = "yes" ; then - echo "CONFIG_DEBUG_MUTEX=y" >> $config_host_mak -fi - -# Hold two types of flag: -# CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on -# a thread we have a handle to -# CONFIG_PTHREAD_SETNAME_NP_W_TID - A way of doing it on a particular -# platform -if test "$pthread_setname_np_w_tid" = "yes" ; then - echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak - echo "CONFIG_PTHREAD_SETNAME_NP_W_TID=y" >> $config_host_mak -elif test "$pthread_setname_np_wo_tid" = "yes" ; then - echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak - echo "CONFIG_PTHREAD_SETNAME_NP_WO_TID=y" >> $config_host_mak -fi - -if test "$bochs" = "yes" ; then - echo "CONFIG_BOCHS=y" >> $config_host_mak -fi -if test "$cloop" = "yes" ; then - echo "CONFIG_CLOOP=y" >> $config_host_mak -fi -if test "$dmg" = "yes" ; then - echo "CONFIG_DMG=y" >> $config_host_mak -fi -if test "$qcow1" = "yes" ; then - echo "CONFIG_QCOW1=y" >> $config_host_mak -fi -if test "$vdi" = "yes" ; then - echo "CONFIG_VDI=y" >> $config_host_mak -fi -if test "$vvfat" = "yes" ; then - echo "CONFIG_VVFAT=y" >> $config_host_mak -fi -if test "$qed" = "yes" ; then - echo "CONFIG_QED=y" >> $config_host_mak -fi -if test "$parallels" = "yes" ; then - echo "CONFIG_PARALLELS=y" >> $config_host_mak -fi -if test "$have_mlockall" = "yes" ; then - echo "HAVE_MLOCKALL=y" >> $config_host_mak -fi -if test "$fuzzing" = "yes" ; then - # If LIB_FUZZING_ENGINE is set, assume we are running on OSS-Fuzz, and the - # needed CFLAGS have already been provided - if test -z "${LIB_FUZZING_ENGINE+xxx}" ; then - # Add CFLAGS to tell clang to add fuzzer-related instrumentation to all the - # compiled code. - QEMU_CFLAGS="$QEMU_CFLAGS -fsanitize=fuzzer-no-link" - # To build non-fuzzer binaries with --enable-fuzzing, link everything with - # fsanitize=fuzzer-no-link. Otherwise, the linker will be unable to bind - # the fuzzer-related callbacks added by instrumentation. - QEMU_LDFLAGS="$QEMU_LDFLAGS -fsanitize=fuzzer-no-link" - # For the actual fuzzer binaries, we need to link against the libfuzzer - # library. Provide the flags for doing this in FUZZ_EXE_LDFLAGS. The meson - # rule for the fuzzer adds these to the link_args. They need to be - # configurable, to support OSS-Fuzz - FUZZ_EXE_LDFLAGS="-fsanitize=fuzzer" - else - FUZZ_EXE_LDFLAGS="$LIB_FUZZING_ENGINE" - fi -fi - if test "$plugins" = "yes" ; then echo "CONFIG_PLUGIN=y" >> $config_host_mak - # Copy the export object list to the build dir - if test "$ld_dynamic_list" = "yes" ; then - echo "CONFIG_HAS_LD_DYNAMIC_LIST=yes" >> $config_host_mak - ld_symbols=qemu-plugins-ld.symbols - cp "$source_path/plugins/qemu-plugins.symbols" $ld_symbols - elif test "$ld_exported_symbols_list" = "yes" ; then - echo "CONFIG_HAS_LD_EXPORTED_SYMBOLS_LIST=yes" >> $config_host_mak - ld64_symbols=qemu-plugins-ld64.symbols - echo "# Automatically generated by configure - do not modify" > $ld64_symbols - grep 'qemu_' "$source_path/plugins/qemu-plugins.symbols" | sed 's/;//g' | \ - sed -E 's/^[[:space:]]*(.*)/_\1/' >> $ld64_symbols - else - error_exit \ - "If \$plugins=yes, either \$ld_dynamic_list or " \ - "\$ld_exported_symbols_list should have been set to 'yes'." - fi fi if test -n "$gdb_bin"; then gdb_version=$($gdb_bin --version | head -n 1) if version_ge ${gdb_version##* } 9.1; then echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak + else + gdb_bin="" fi fi -if test "$secret_keyring" = "yes" ; then - echo "CONFIG_SECRET_KEYRING=y" >> $config_host_mak +if test "$container" != no; then + echo "ENGINE=$container" >> $config_host_mak fi - echo "ROMS=$roms" >> $config_host_mak echo "MAKE=$make" >> $config_host_mak echo "PYTHON=$python" >> $config_host_mak @@ -5036,47 +2425,27 @@ echo "GENISOIMAGE=$genisoimage" >> $config_host_mak echo "MESON=$meson" >> $config_host_mak echo "NINJA=$ninja" >> $config_host_mak echo "CC=$cc" >> $config_host_mak -echo "HOST_CC=$host_cc" >> $config_host_mak -if $iasl -h > /dev/null 2>&1; then - echo "CONFIG_IASL=$iasl" >> $config_host_mak -fi -echo "AR=$ar" >> $config_host_mak -echo "AS=$as" >> $config_host_mak -echo "CCAS=$ccas" >> $config_host_mak -echo "CPP=$cpp" >> $config_host_mak -echo "OBJCOPY=$objcopy" >> $config_host_mak -echo "LD=$ld" >> $config_host_mak -echo "CFLAGS_NOPIE=$CFLAGS_NOPIE" >> $config_host_mak echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak -echo "QEMU_CXXFLAGS=$QEMU_CXXFLAGS" >> $config_host_mak +echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak echo "GLIB_LIBS=$glib_libs" >> $config_host_mak +echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak +echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak -echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak -echo "HOST_DSOSUF=$HOST_DSOSUF" >> $config_host_mak -echo "LIBS_QGA=$libs_qga" >> $config_host_mak -if test "$gcov" = "yes" ; then - echo "CONFIG_GCOV=y" >> $config_host_mak -fi -if test "$fuzzing" != "no"; then - echo "CONFIG_FUZZ=y" >> $config_host_mak -fi -echo "FUZZ_EXE_LDFLAGS=$FUZZ_EXE_LDFLAGS" >> $config_host_mak - -if test "$rng_none" = "yes"; then - echo "CONFIG_RNG_NONE=y" >> $config_host_mak -fi +# FIXME: Use meson +echo "EPOXY_CFLAGS=$epoxy_cflags" >> $config_host_mak +echo "EPOXY_LIBS=$epoxy_libs" >> $config_host_mak # use included Linux headers if test "$linux" = "yes" ; then mkdir -p linux-headers case "$cpu" in - i386|x86_64|x32) + i386|x86_64) linux_arch=x86 ;; - ppc|ppc64|ppc64le) + ppc|ppc64) linux_arch=powerpc ;; s390x) @@ -5085,6 +2454,9 @@ if test "$linux" = "yes" ; then aarch64) linux_arch=arm64 ;; + loongarch*) + linux_arch=loongarch + ;; mips64) linux_arch=mips ;; @@ -5101,24 +2473,18 @@ fi for target in $target_list; do target_dir="$target" - target_name=$(echo $target | cut -d '-' -f 1) - mkdir -p $target_dir + target_name=$(echo $target | cut -d '-' -f 1)$EXESUF + mkdir -p "$target_dir" case $target in *-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;; *) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;; esac done -echo "CONFIG_QEMU_INTERP_PREFIX=$interp_prefix" | sed 's/%M/@0@/' >> $config_host_mak if test "$default_targets" = "yes"; then echo "CONFIG_DEFAULT_TARGETS=y" >> $config_host_mak fi -if test "$numa" = "yes"; then - echo "CONFIG_NUMA=y" >> $config_host_mak - echo "NUMA_LIBS=$numa_libs" >> $config_host_mak -fi - if test "$ccache_cpp2" = "yes"; then echo "export CCACHE_CPP2=y" >> $config_host_mak fi @@ -5127,88 +2493,60 @@ if test "$safe_stack" = "yes"; then echo "CONFIG_SAFESTACK=y" >> $config_host_mak fi -# If we're using a separate build tree, set it up now. -# DIRS are directories which we simply mkdir in the build tree; -# LINKS are things to symlink back into the source tree -# (these can be both files and directories). -# Caution: do not add files or directories here using wildcards. This -# will result in problems later if a new file matching the wildcard is -# added to the source tree -- nothing will cause configure to be rerun -# so the build tree will be missing the link back to the new file, and -# tests might fail. Prefer to keep the relevant files in their own -# directory and symlink the directory instead. -# UNLINK is used to remove symlinks from older development versions -# that might get into the way when doing "git update" without doing -# a "make distclean" in between. -DIRS="tests tests/tcg tests/qapi-schema tests/qtest/libqos" -DIRS="$DIRS tests/qtest tests/qemu-iotests tests/vm tests/fp tests/qgraph" -DIRS="$DIRS docs docs/interop fsdev scsi" -DIRS="$DIRS pc-bios/optionrom pc-bios/s390-ccw" -DIRS="$DIRS roms/seabios" -DIRS="$DIRS contrib/plugins/" -LINKS="Makefile" -LINKS="$LINKS tests/tcg/Makefile.target" -LINKS="$LINKS pc-bios/optionrom/Makefile" -LINKS="$LINKS pc-bios/s390-ccw/Makefile" -LINKS="$LINKS roms/seabios/Makefile" -LINKS="$LINKS pc-bios/qemu-icon.bmp" -LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit -LINKS="$LINKS tests/acceptance tests/data" -LINKS="$LINKS tests/qemu-iotests/check" -LINKS="$LINKS python" -LINKS="$LINKS contrib/plugins/Makefile " -UNLINK="pc-bios/keymaps" -for bios_file in \ - $source_path/pc-bios/*.bin \ - $source_path/pc-bios/*.elf \ - $source_path/pc-bios/*.lid \ - $source_path/pc-bios/*.rom \ - $source_path/pc-bios/*.dtb \ - $source_path/pc-bios/*.img \ - $source_path/pc-bios/openbios-* \ - $source_path/pc-bios/u-boot.* \ - $source_path/pc-bios/edk2-*.fd.bz2 \ - $source_path/pc-bios/palcode-* -do - LINKS="$LINKS pc-bios/$(basename $bios_file)" -done -mkdir -p $DIRS -for f in $LINKS ; do - if [ -e "$source_path/$f" ]; then - symlink "$source_path/$f" "$f" - fi -done -for f in $UNLINK ; do - if [ -L "$f" ]; then - rm -f "$f" - fi -done +# tests/tcg configuration +(config_host_mak=tests/tcg/config-host.mak +mkdir -p tests/tcg +echo "# Automatically generated by configure - do not modify" > $config_host_mak +echo "SRC_PATH=$source_path" >> $config_host_mak +echo "HOST_CC=$host_cc" >> $config_host_mak -(for i in $cross_cc_vars; do - export $i -done -export target_list source_path use_containers ARCH -$source_path/tests/tcg/configure.sh) +# versioned checked in the main config_host.mak above +if test -n "$gdb_bin"; then + echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak +fi +if test "$plugins" = "yes" ; then + echo "CONFIG_PLUGIN=y" >> $config_host_mak +fi -# temporary config to build submodules -for rom in seabios; do - config_mak=roms/$rom/config.mak - echo "# Automatically generated by configure - do not modify" > $config_mak - echo "SRC_PATH=$source_path/roms/$rom" >> $config_mak - echo "AS=$as" >> $config_mak - echo "CCAS=$ccas" >> $config_mak - echo "CC=$cc" >> $config_mak - echo "BCC=bcc" >> $config_mak - echo "CPP=$cpp" >> $config_mak - echo "OBJCOPY=objcopy" >> $config_mak - echo "IASL=$iasl" >> $config_mak - echo "LD=$ld" >> $config_mak - echo "RANLIB=$ranlib" >> $config_mak +tcg_tests_targets= +for target in $target_list; do + arch=${target%%-*} + + case $target in + xtensa*-linux-user) + # the toolchain is not complete with headers, only build softmmu tests + continue + ;; + *-softmmu) + test -f "$source_path/tests/tcg/$arch/Makefile.softmmu-target" || continue + qemu="qemu-system-$arch" + ;; + *-linux-user|*-bsd-user) + qemu="qemu-$arch" + ;; + esac + + if probe_target_compiler $target || test -n "$container_image"; then + test -n "$container_image" && build_static=y + mkdir -p "tests/tcg/$target" + config_target_mak=tests/tcg/$target/config-target.mak + ln -sf "$source_path/tests/tcg/Makefile.target" "tests/tcg/$target/Makefile" + echo "# Automatically generated by configure - do not modify" > "$config_target_mak" + echo "TARGET_NAME=$arch" >> "$config_target_mak" + echo "TARGET=$target" >> "$config_target_mak" + write_target_makefile "build-tcg-tests-$target" >> "$config_target_mak" + echo "BUILD_STATIC=$build_static" >> "$config_target_mak" + echo "QEMU=$PWD/$qemu" >> "$config_target_mak" + echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs + tcg_tests_targets="$tcg_tests_targets $target" + fi done +echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> config-host.mak) if test "$skip_meson" = no; then cross="config-meson.cross.new" meson_quote() { + test $# = 0 && return echo "'$(echo $* | sed "s/ /','/g")'" } @@ -5221,16 +2559,16 @@ if test "$skip_meson" = no; then echo "${a}-softmmu = '$c'" >> $cross done - test -z "$cxx" && echo "link_language = 'c'" >> $cross echo "[built-in options]" >> $cross - echo "c_args = [${CFLAGS:+$(meson_quote $CFLAGS)}]" >> $cross - echo "cpp_args = [${CXXFLAGS:+$(meson_quote $CXXFLAGS)}]" >> $cross - echo "c_link_args = [${LDFLAGS:+$(meson_quote $LDFLAGS)}]" >> $cross - echo "cpp_link_args = [${LDFLAGS:+$(meson_quote $LDFLAGS)}]" >> $cross + echo "c_args = [$(meson_quote $CFLAGS $EXTRA_CFLAGS)]" >> $cross + echo "cpp_args = [$(meson_quote $CXXFLAGS $EXTRA_CXXFLAGS)]" >> $cross + test -n "$objcc" && echo "objc_args = [$(meson_quote $OBJCFLAGS $EXTRA_OBJCFLAGS)]" >> $cross + echo "c_link_args = [$(meson_quote $CFLAGS $LDFLAGS $EXTRA_CFLAGS $EXTRA_LDFLAGS)]" >> $cross + echo "cpp_link_args = [$(meson_quote $CXXFLAGS $LDFLAGS $EXTRA_CXXFLAGS $EXTRA_LDFLAGS)]" >> $cross echo "[binaries]" >> $cross - echo "c = [$(meson_quote $cc)]" >> $cross - test -n "$cxx" && echo "cpp = [$(meson_quote $cxx)]" >> $cross - test -n "$objcc" && echo "objc = [$(meson_quote $objcc)]" >> $cross + echo "c = [$(meson_quote $cc $CPU_CFLAGS)]" >> $cross + test -n "$cxx" && echo "cpp = [$(meson_quote $cxx $CPU_CFLAGS)]" >> $cross + test -n "$objcc" && echo "objc = [$(meson_quote $objcc $CPU_CFLAGS)]" >> $cross echo "ar = [$(meson_quote $ar)]" >> $cross echo "nm = [$(meson_quote $nm)]" >> $cross echo "pkgconfig = [$(meson_quote $pkg_config_exe)]" >> $cross @@ -5239,31 +2577,18 @@ if test "$skip_meson" = no; then echo "sdl2-config = [$(meson_quote $sdl2_config)]" >> $cross fi echo "strip = [$(meson_quote $strip)]" >> $cross + echo "widl = [$(meson_quote $widl)]" >> $cross echo "windres = [$(meson_quote $windres)]" >> $cross if test "$cross_compile" = "yes"; then cross_arg="--cross-file config-meson.cross" echo "[host_machine]" >> $cross - if test "$mingw32" = "yes" ; then - echo "system = 'windows'" >> $cross - fi - if test "$linux" = "yes" ; then - echo "system = 'linux'" >> $cross - fi - if test "$darwin" = "yes" ; then - echo "system = 'darwin'" >> $cross - fi - case "$ARCH" in + echo "system = '$targetos'" >> $cross + case "$cpu" in i386) echo "cpu_family = 'x86'" >> $cross ;; - x86_64|x32) - echo "cpu_family = 'x86_64'" >> $cross - ;; - ppc64le) - echo "cpu_family = 'ppc64'" >> $cross - ;; *) - echo "cpu_family = '$ARCH'" >> $cross + echo "cpu_family = '$cpu'" >> $cross ;; esac echo "cpu = '$cpu'" >> $cross @@ -5278,56 +2603,25 @@ if test "$skip_meson" = no; then mv $cross config-meson.cross rm -rf meson-private meson-info meson-logs - unset staticpic - if ! version_ge "$($meson --version)" 0.56.0; then - staticpic=$(if test "$pie" = yes; then echo true; else echo false; fi) - fi - NINJA=$ninja $meson setup \ - --prefix "$prefix" \ - --libdir "$libdir" \ - --libexecdir "$libexecdir" \ - --bindir "$bindir" \ - --includedir "$includedir" \ - --datadir "$datadir" \ - --mandir "$mandir" \ - --sysconfdir "$sysconfdir" \ - --localedir "$localedir" \ - --localstatedir "$local_statedir" \ - -Ddocdir="$docdir" \ - -Dqemu_firmwarepath="$firmwarepath" \ - -Dqemu_suffix="$qemu_suffix" \ - -Doptimization=$(if test "$debug" = yes; then echo g; else echo 3; fi) \ - -Ddebug=$(if test "$debug_info" = yes; then echo true; else echo false; fi) \ - -Dwerror=$(if test "$werror" = yes; then echo true; else echo false; fi) \ - -Dstrip=$(if test "$strip_opt" = yes; then echo true; else echo false; fi) \ - -Db_pie=$(if test "$pie" = yes; then echo true; else echo false; fi) \ - ${staticpic:+-Db_staticpic=$staticpic} \ - -Db_coverage=$(if test "$gcov" = yes; then echo true; else echo false; fi) \ - -Db_lto=$lto -Dcfi=$cfi -Dcfi_debug=$cfi_debug \ - -Dmalloc=$malloc -Dmalloc_trim=$malloc_trim -Dsparse=$sparse \ - -Dkvm=$kvm -Dhax=$hax -Dwhpx=$whpx -Dhvf=$hvf -Dnvmm=$nvmm \ - -Dxen=$xen -Dxen_pci_passthrough=$xen_pci_passthrough -Dtcg=$tcg \ - -Dcocoa=$cocoa -Dgtk=$gtk -Dmpath=$mpath -Dsdl=$sdl -Dsdl_image=$sdl_image \ - -Dlibusb=$libusb -Dsmartcard=$smartcard -Dusb_redir=$usb_redir -Dvte=$vte \ - -Dvnc=$vnc -Dvnc_sasl=$vnc_sasl -Dvnc_jpeg=$vnc_jpeg -Dvnc_png=$vnc_png \ - -Dgettext=$gettext -Dxkbcommon=$xkbcommon -Du2f=$u2f -Dvirtiofsd=$virtiofsd \ - -Dcapstone=$capstone -Dslirp=$slirp -Dfdt=$fdt -Dbrlapi=$brlapi \ - -Dcurl=$curl -Dglusterfs=$glusterfs -Dbzip2=$bzip2 -Dlibiscsi=$libiscsi \ - -Dlibnfs=$libnfs -Diconv=$iconv -Dcurses=$curses -Dlibudev=$libudev\ - -Drbd=$rbd -Dlzo=$lzo -Dsnappy=$snappy -Dlzfse=$lzfse -Dlibxml2=$libxml2 \ - -Dlibdaxctl=$libdaxctl -Dlibpmem=$libpmem -Dlinux_io_uring=$linux_io_uring \ - -Dgnutls=$gnutls -Dnettle=$nettle -Dgcrypt=$gcrypt -Dauth_pam=$auth_pam \ - -Dzstd=$zstd -Dseccomp=$seccomp -Dvirtfs=$virtfs -Dcap_ng=$cap_ng \ - -Dattr=$attr -Ddefault_devices=$default_devices -Dvirglrenderer=$virglrenderer \ - -Ddocs=$docs -Dsphinx_build=$sphinx_build -Dinstall_blobs=$blobs \ - -Dvhost_user_blk_server=$vhost_user_blk_server -Dmultiprocess=$multiprocess \ - -Dfuse=$fuse -Dfuse_lseek=$fuse_lseek -Dguest_agent_msi=$guest_agent_msi -Dbpf=$bpf \ - -Drenderdoc=$renderdoc \ - $(if test "$default_feature" = no; then echo "-Dauto_features=disabled"; fi) \ - -Dtcg_interpreter=$tcg_interpreter \ - $cross_arg \ - "$PWD" "$source_path" + # Built-in options + test "$bindir" != "bin" && meson_option_add "-Dbindir=$bindir" + test "$default_feature" = no && meson_option_add -Dauto_features=disabled + test "$pie" = no && meson_option_add -Db_pie=false + test "$werror" = yes && meson_option_add -Dwerror=true + + # QEMU options + test "$cfi" != false && meson_option_add "-Dcfi=$cfi" + test "$fdt" != auto && meson_option_add "-Dfdt=$fdt" + test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE" + test "$qemu_suffix" != qemu && meson_option_add "-Dqemu_suffix=$qemu_suffix" + test "$smbd" != '' && meson_option_add "-Dsmbd=$smbd" + test "$tcg" != enabled && meson_option_add "-Dtcg=$tcg" + test "$vfio_user_server" != auto && meson_option_add "-Dvfio_user_server=$vfio_user_server" + run_meson() { + NINJA=$ninja $meson setup --prefix "$prefix" "$@" $cross_arg "$PWD" "$source_path" + } + eval run_meson $meson_options if test "$?" -ne 0 ; then error_exit "meson setup failed" fi @@ -5338,27 +2632,11 @@ else perl -i -ne ' s/^gettext = true$/gettext = auto/; s/^gettext = false$/gettext = disabled/; + /^b_staticpic/ && next; print;' meson-private/cmd_line.txt fi fi -if test -n "${deprecated_features}"; then - echo "Warning, deprecated features enabled." - echo "Please see docs/about/deprecated.rst" - echo " features: ${deprecated_features}" -fi - -# Create list of config switches that should be poisoned in common code... -# but filter out CONFIG_TCG and CONFIG_USER_ONLY which are special. -target_configs_h=$(ls *-config-devices.h *-config-target.h 2>/dev/null) -if test -n "$target_configs_h" ; then - sed -n -e '/CONFIG_TCG/d' -e '/CONFIG_USER_ONLY/d' \ - -e '/^#define / { s///; s/ .*//; s/^/#pragma GCC poison /p; }' \ - $target_configs_h | sort -u > config-poison.h -else - :> config-poison.h -fi - # Save the configure command line for later reuse. cat <config.status #!/bin/sh @@ -5387,14 +2665,15 @@ preserve_env() { preserve_env AR preserve_env AS preserve_env CC -preserve_env CPP +preserve_env CFLAGS preserve_env CXX -preserve_env INSTALL +preserve_env CXXFLAGS preserve_env LD +preserve_env LDFLAGS preserve_env LD_LIBRARY_PATH -preserve_env LIBTOOL preserve_env MAKE preserve_env NM +preserve_env OBJCFLAGS preserve_env OBJCOPY preserve_env PATH preserve_env PKG_CONFIG @@ -5404,6 +2683,7 @@ preserve_env PYTHON preserve_env SDL2_CONFIG preserve_env SMBD preserve_env STRIP +preserve_env WIDL preserve_env WINDRES printf "exec" >>config.status diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c index d09e607431..bd7650a7a2 100644 --- a/contrib/elf2dmp/download.c +++ b/contrib/elf2dmp/download.c @@ -25,21 +25,19 @@ int download_url(const char *name, const char *url) goto out_curl; } - curl_easy_setopt(curl, CURLOPT_URL, url); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, file); - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); - curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); - - if (curl_easy_perform(curl) != CURLE_OK) { - err = 1; - fclose(file); + if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK + || curl_easy_perform(curl) != CURLE_OK) { unlink(name); - goto out_curl; + fclose(file); + err = 1; + } else { + err = fclose(file); } - err = fclose(file); - out_curl: curl_easy_cleanup(curl); diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c index 20b477d582..d77b8f98f7 100644 --- a/contrib/elf2dmp/main.c +++ b/contrib/elf2dmp/main.c @@ -125,6 +125,7 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { eprintf("Failed to extract entire KDBG\n"); + free(kdbg); return NULL; } @@ -141,10 +142,10 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, return kdbg; } -static void win_context_init_from_qemu_cpu_state(WinContext *ctx, +static void win_context_init_from_qemu_cpu_state(WinContext64 *ctx, QEMUCPUState *s) { - WinContext win_ctx = (WinContext){ + WinContext64 win_ctx = (WinContext64){ .ContextFlags = WIN_CTX_X64 | WIN_CTX_INT | WIN_CTX_SEG | WIN_CTX_CTL, .MxCsr = INITIAL_MXCSR, @@ -302,7 +303,7 @@ static int fill_context(KDDEBUGGER_DATA64 *kdbg, for (i = 0; i < qe->state_nr; i++) { uint64_t Prcb; uint64_t Context; - WinContext ctx; + WinContext64 ctx; QEMUCPUState *s = qe->state[i]; if (va_space_rw(vs, kdbg->KiProcessorBlock + sizeof(Prcb) * i, diff --git a/contrib/elf2dmp/meson.build b/contrib/elf2dmp/meson.build index 4d86cb390a..6707d43c4f 100644 --- a/contrib/elf2dmp/meson.build +++ b/contrib/elf2dmp/meson.build @@ -1,5 +1,5 @@ if curl.found() - executable('elf2dmp', files('main.c', 'addrspace.c', 'download.c', 'pdb.c', 'qemu_elf.c'), + executable('elf2dmp', files('main.c', 'addrspace.c', 'download.c', 'pdb.c', 'qemu_elf.c'), genh, dependencies: [glib, curl], install: true) endif diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c index b3a6547068..adcfa7e154 100644 --- a/contrib/elf2dmp/pdb.c +++ b/contrib/elf2dmp/pdb.c @@ -215,6 +215,10 @@ out_symbols: static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) { + if (hdr->block_size == 0) { + return 1; + } + memset(r->file_used, 0, sizeof(r->file_used)); r->ds.header = hdr; r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr + diff --git a/contrib/elf2dmp/qemu_elf.c b/contrib/elf2dmp/qemu_elf.c index b601b6d7ba..ebda60dcb8 100644 --- a/contrib/elf2dmp/qemu_elf.c +++ b/contrib/elf2dmp/qemu_elf.c @@ -118,6 +118,53 @@ static void exit_states(QEMU_Elf *qe) free(qe->state); } +static bool check_ehdr(QEMU_Elf *qe) +{ + Elf64_Ehdr *ehdr = qe->map; + + if (sizeof(Elf64_Ehdr) > qe->size) { + eprintf("Invalid input dump file size\n"); + return false; + } + + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { + eprintf("Invalid ELF signature, input file is not ELF\n"); + return false; + } + + if (ehdr->e_ident[EI_CLASS] != ELFCLASS64 || + ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { + eprintf("Invalid ELF class or byte order, must be 64-bit LE\n"); + return false; + } + + if (ehdr->e_ident[EI_VERSION] != EV_CURRENT) { + eprintf("Invalid ELF version\n"); + return false; + } + + if (ehdr->e_machine != EM_X86_64) { + eprintf("Invalid input dump architecture, only x86_64 is supported\n"); + return false; + } + + if (ehdr->e_type != ET_CORE) { + eprintf("Invalid ELF type, must be core file\n"); + return false; + } + + /* + * ELF dump file must contain one PT_NOTE and at least one PT_LOAD to + * restore physical address space. + */ + if (ehdr->e_phnum < 2) { + eprintf("Invalid number of ELF program headers\n"); + return false; + } + + return true; +} + int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) { GError *gerr = NULL; @@ -133,6 +180,12 @@ int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) qe->map = g_mapped_file_get_contents(qe->gmf); qe->size = g_mapped_file_get_length(qe->gmf); + if (!check_ehdr(qe)) { + eprintf("Input file has the wrong format\n"); + err = 1; + goto out_unmap; + } + if (init_states(qe)) { eprintf("Failed to extract QEMU CPU states\n"); err = 1; diff --git a/contrib/gitdm/aliases b/contrib/gitdm/aliases index 4792413ce7..e26b00a71d 100644 --- a/contrib/gitdm/aliases +++ b/contrib/gitdm/aliases @@ -34,8 +34,10 @@ malc@c046a42c-6fe2-441c-8c8c-71466251a162 av1474@comtv.ru # canonical emails liq3ea@163.com liq3ea@gmail.com -# some broken tags +# some broken DCO tags yuval.shaia.ml.gmail.com yuval.shaia.ml@gmail.com +jasowang jasowang@redhat.com +nicta.com.au peter.chubb@nicta.com.au # There is also a: # (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162> diff --git a/contrib/gitdm/domain-map b/contrib/gitdm/domain-map index 2800d9f986..3727918641 100644 --- a/contrib/gitdm/domain-map +++ b/contrib/gitdm/domain-map @@ -10,6 +10,7 @@ bytedance.com ByteDance cmss.chinamobile.com China Mobile citrix.com Citrix crudebyte.com Crudebyte +chinatelecom.cn China Telecom eldorado.org.br Instituto de Pesquisas Eldorado fujitsu.com Fujitsu google.com Google @@ -19,6 +20,7 @@ ibm.com IBM igalia.com Igalia intel.com Intel linaro.org Linaro +loongson.cn Loongson Technology lwn.net LWN microsoft.com Microsoft mvista.com MontaVista diff --git a/contrib/gitdm/group-map-academics b/contrib/gitdm/group-map-academics index 44745ca85b..082458e1bd 100644 --- a/contrib/gitdm/group-map-academics +++ b/contrib/gitdm/group-map-academics @@ -19,3 +19,9 @@ edu.cn # Boston University bu.edu + +# Institute of Software Chinese Academy of Sciences +iscas.ac.cn + +# Université Grenoble Alpes +univ-grenoble-alpes.fr diff --git a/contrib/gitdm/group-map-individuals b/contrib/gitdm/group-map-individuals index f816aa8770..53883cc526 100644 --- a/contrib/gitdm/group-map-individuals +++ b/contrib/gitdm/group-map-individuals @@ -34,3 +34,6 @@ bmeng.cn@gmail.com liq3ea@gmail.com chetan4windows@gmail.com akihiko.odaki@gmail.com +paul@nowt.org +git@xen0n.name +simon@simonsafar.com diff --git a/contrib/ivshmem-client/meson.build b/contrib/ivshmem-client/meson.build index 1b171efb4f..ce8dcca84d 100644 --- a/contrib/ivshmem-client/meson.build +++ b/contrib/ivshmem-client/meson.build @@ -1,4 +1,4 @@ -executable('ivshmem-client', files('ivshmem-client.c', 'main.c'), +executable('ivshmem-client', files('ivshmem-client.c', 'main.c'), genh, dependencies: glib, build_by_default: targetos == 'linux', install: false) diff --git a/contrib/ivshmem-server/ivshmem-server.c b/contrib/ivshmem-server/ivshmem-server.c index 39a6ffdb5d..2f3c7320a6 100644 --- a/contrib/ivshmem-server/ivshmem-server.c +++ b/contrib/ivshmem-server/ivshmem-server.c @@ -146,7 +146,7 @@ ivshmem_server_handle_new_conn(IvshmemServer *server) return -1; } - qemu_set_nonblock(newfd); + qemu_socket_set_nonblock(newfd); IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd); /* allocate new structure for this peer */ diff --git a/contrib/ivshmem-server/meson.build b/contrib/ivshmem-server/meson.build index 3a53942201..c6c3c82e89 100644 --- a/contrib/ivshmem-server/meson.build +++ b/contrib/ivshmem-server/meson.build @@ -1,4 +1,4 @@ -executable('ivshmem-server', files('ivshmem-server.c', 'main.c'), +executable('ivshmem-server', files('ivshmem-server.c', 'main.c'), genh, dependencies: [qemuutil, rt], build_by_default: targetos == 'linux', install: false) diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile index 54ac5ccd9f..23e0396687 100644 --- a/contrib/plugins/Makefile +++ b/contrib/plugins/Makefile @@ -20,6 +20,7 @@ NAMES += howvec NAMES += lockstep NAMES += hwprofile NAMES += cache +NAMES += drcov SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES))) @@ -28,6 +29,7 @@ SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES))) CFLAGS = $(GLIB_CFLAGS) CFLAGS += -fPIC -Wall $(filter -W%, $(QEMU_CFLAGS)) CFLAGS += $(if $(findstring no-psabi,$(QEMU_CFLAGS)),-Wpsabi) +CFLAGS += $(if $(CONFIG_DEBUG_TCG), -ggdb -O0) CFLAGS += -I$(SRC_PATH)/include/qemu all: $(SONAMES) diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index 066ea6d8ec..ac1510aaa1 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -11,24 +11,20 @@ #include +#define STRTOLL(x) g_ascii_strtoll(x, NULL, 10) + QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; static GHashTable *miss_ht; -static GMutex mtx; +static GMutex hashtable_lock; static GRand *rng; static int limit; static bool sys; -static uint64_t dmem_accesses; -static uint64_t dmisses; - -static uint64_t imem_accesses; -static uint64_t imisses; - enum EvictionPolicy { LRU, FIFO, @@ -42,7 +38,7 @@ enum EvictionPolicy policy; * put in any of the blocks inside the set. The number of block per set is * called the associativity (assoc). * - * Each block contains the the stored tag and a valid bit. Since this is not + * Each block contains the stored tag and a valid bit. Since this is not * a functional simulator, the data itself is not stored. We only identify * whether a block is in the cache or not by searching for its tag. * @@ -80,14 +76,17 @@ typedef struct { int blksize_shift; uint64_t set_mask; uint64_t tag_mask; + uint64_t accesses; + uint64_t misses; } Cache; typedef struct { char *disas_str; const char *symbol; uint64_t addr; - uint64_t dmisses; - uint64_t imisses; + uint64_t l1_dmisses; + uint64_t l1_imisses; + uint64_t l2_misses; } InsnData; void (*update_hit)(Cache *cache, int set, int blk); @@ -96,7 +95,23 @@ void (*update_miss)(Cache *cache, int set, int blk); void (*metadata_init)(Cache *cache); void (*metadata_destroy)(Cache *cache); -Cache *dcache, *icache; +static int cores; +static Cache **l1_dcaches, **l1_icaches; + +static bool use_l2; +static Cache **l2_ucaches; + +static GMutex *l1_dcache_locks; +static GMutex *l1_icache_locks; +static GMutex *l2_ucache_locks; + +static uint64_t l1_dmem_accesses; +static uint64_t l1_imem_accesses; +static uint64_t l1_imisses; +static uint64_t l1_dmisses; + +static uint64_t l2_mem_accesses; +static uint64_t l2_misses; static int pow_of_two(int num) { @@ -233,20 +248,24 @@ static bool bad_cache_params(int blksize, int assoc, int cachesize) static Cache *cache_init(int blksize, int assoc, int cachesize) { - if (bad_cache_params(blksize, assoc, cachesize)) { - return NULL; - } - Cache *cache; int i; uint64_t blk_mask; + /* + * This function shall not be called directly, and hence expects suitable + * parameters. + */ + g_assert(!bad_cache_params(blksize, assoc, cachesize)); + cache = g_new(Cache, 1); cache->assoc = assoc; cache->cachesize = cachesize; cache->num_sets = cachesize / (blksize * assoc); cache->sets = g_new(CacheSet, cache->num_sets); cache->blksize_shift = pow_of_two(blksize); + cache->accesses = 0; + cache->misses = 0; for (i = 0; i < cache->num_sets; i++) { cache->sets[i].blocks = g_new0(CacheBlock, assoc); @@ -263,6 +282,24 @@ static Cache *cache_init(int blksize, int assoc, int cachesize) return cache; } +static Cache **caches_init(int blksize, int assoc, int cachesize) +{ + Cache **caches; + int i; + + if (bad_cache_params(blksize, assoc, cachesize)) { + return NULL; + } + + caches = g_new(Cache *, cores); + + for (i = 0; i < cores; i++) { + caches[i] = cache_init(blksize, assoc, cachesize); + } + + return caches; +} + static int get_invalid_block(Cache *cache, uint64_t set) { int i; @@ -353,7 +390,9 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, { uint64_t effective_addr; struct qemu_plugin_hwaddr *hwaddr; + int cache_idx; InsnData *insn; + bool hit_in_l1; hwaddr = qemu_plugin_get_hwaddr(info, vaddr); if (hwaddr && qemu_plugin_hwaddr_is_io(hwaddr)) { @@ -361,32 +400,66 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, } effective_addr = hwaddr ? qemu_plugin_hwaddr_phys_addr(hwaddr) : vaddr; + cache_idx = vcpu_index % cores; - g_mutex_lock(&mtx); - if (!access_cache(dcache, effective_addr)) { + g_mutex_lock(&l1_dcache_locks[cache_idx]); + hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr); + if (!hit_in_l1) { insn = (InsnData *) userdata; - insn->dmisses++; - dmisses++; + __atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST); + l1_dcaches[cache_idx]->misses++; } - dmem_accesses++; - g_mutex_unlock(&mtx); + l1_dcaches[cache_idx]->accesses++; + g_mutex_unlock(&l1_dcache_locks[cache_idx]); + + if (hit_in_l1 || !use_l2) { + /* No need to access L2 */ + return; + } + + g_mutex_lock(&l2_ucache_locks[cache_idx]); + if (!access_cache(l2_ucaches[cache_idx], effective_addr)) { + insn = (InsnData *) userdata; + __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); + l2_ucaches[cache_idx]->misses++; + } + l2_ucaches[cache_idx]->accesses++; + g_mutex_unlock(&l2_ucache_locks[cache_idx]); } static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata) { uint64_t insn_addr; InsnData *insn; + int cache_idx; + bool hit_in_l1; - g_mutex_lock(&mtx); insn_addr = ((InsnData *) userdata)->addr; - if (!access_cache(icache, insn_addr)) { + cache_idx = vcpu_index % cores; + g_mutex_lock(&l1_icache_locks[cache_idx]); + hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr); + if (!hit_in_l1) { insn = (InsnData *) userdata; - insn->imisses++; - imisses++; + __atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST); + l1_icaches[cache_idx]->misses++; } - imem_accesses++; - g_mutex_unlock(&mtx); + l1_icaches[cache_idx]->accesses++; + g_mutex_unlock(&l1_icache_locks[cache_idx]); + + if (hit_in_l1 || !use_l2) { + /* No need to access L2 */ + return; + } + + g_mutex_lock(&l2_ucache_locks[cache_idx]); + if (!access_cache(l2_ucaches[cache_idx], insn_addr)) { + insn = (InsnData *) userdata; + __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); + l2_ucaches[cache_idx]->misses++; + } + l2_ucaches[cache_idx]->accesses++; + g_mutex_unlock(&l2_ucache_locks[cache_idx]); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -411,7 +484,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) * new entries for those instructions. Instead, we fetch the same * entry from the hash table and register it for the callback again. */ - g_mutex_lock(&mtx); + g_mutex_lock(&hashtable_lock); data = g_hash_table_lookup(miss_ht, GUINT_TO_POINTER(effective_addr)); if (data == NULL) { data = g_new0(InsnData, 1); @@ -421,7 +494,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) g_hash_table_insert(miss_ht, GUINT_TO_POINTER(effective_addr), (gpointer) data); } - g_mutex_unlock(&mtx); + g_mutex_unlock(&hashtable_lock); qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem_access, QEMU_PLUGIN_CB_NO_REGS, @@ -453,12 +526,69 @@ static void cache_free(Cache *cache) g_free(cache); } +static void caches_free(Cache **caches) +{ + int i; + + for (i = 0; i < cores; i++) { + cache_free(caches[i]); + } +} + +static void append_stats_line(GString *line, uint64_t l1_daccess, + uint64_t l1_dmisses, uint64_t l1_iaccess, + uint64_t l1_imisses, uint64_t l2_access, + uint64_t l2_misses) +{ + double l1_dmiss_rate, l1_imiss_rate, l2_miss_rate; + + l1_dmiss_rate = ((double) l1_dmisses) / (l1_daccess) * 100.0; + l1_imiss_rate = ((double) l1_imisses) / (l1_iaccess) * 100.0; + + g_string_append_printf(line, "%-14lu %-12lu %9.4lf%% %-14lu %-12lu" + " %9.4lf%%", + l1_daccess, + l1_dmisses, + l1_daccess ? l1_dmiss_rate : 0.0, + l1_iaccess, + l1_imisses, + l1_iaccess ? l1_imiss_rate : 0.0); + + if (use_l2) { + l2_miss_rate = ((double) l2_misses) / (l2_access) * 100.0; + g_string_append_printf(line, " %-12lu %-11lu %10.4lf%%", + l2_access, + l2_misses, + l2_access ? l2_miss_rate : 0.0); + } + + g_string_append(line, "\n"); +} + +static void sum_stats(void) +{ + int i; + + g_assert(cores > 1); + for (i = 0; i < cores; i++) { + l1_imisses += l1_icaches[i]->misses; + l1_dmisses += l1_dcaches[i]->misses; + l1_imem_accesses += l1_icaches[i]->accesses; + l1_dmem_accesses += l1_dcaches[i]->accesses; + + if (use_l2) { + l2_misses += l2_ucaches[i]->misses; + l2_mem_accesses += l2_ucaches[i]->accesses; + } + } +} + static int dcmp(gconstpointer a, gconstpointer b) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; - return insn_a->dmisses < insn_b->dmisses ? 1 : -1; + return insn_a->l1_dmisses < insn_b->l1_dmisses ? 1 : -1; } static int icmp(gconstpointer a, gconstpointer b) @@ -466,24 +596,52 @@ static int icmp(gconstpointer a, gconstpointer b) InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; - return insn_a->imisses < insn_b->imisses ? 1 : -1; + return insn_a->l1_imisses < insn_b->l1_imisses ? 1 : -1; +} + +static int l2_cmp(gconstpointer a, gconstpointer b) +{ + InsnData *insn_a = (InsnData *) a; + InsnData *insn_b = (InsnData *) b; + + return insn_a->l2_misses < insn_b->l2_misses ? 1 : -1; } static void log_stats(void) { - g_autoptr(GString) rep = g_string_new(""); - g_string_append_printf(rep, - "Data accesses: %lu, Misses: %lu\nMiss rate: %lf%%\n\n", - dmem_accesses, - dmisses, - ((double) dmisses / (double) dmem_accesses) * 100.0); + int i; + Cache *icache, *dcache, *l2_cache; - g_string_append_printf(rep, - "Instruction accesses: %lu, Misses: %lu\nMiss rate: %lf%%\n\n", - imem_accesses, - imisses, - ((double) imisses / (double) imem_accesses) * 100.0); + g_autoptr(GString) rep = g_string_new("core #, data accesses, data misses," + " dmiss rate, insn accesses," + " insn misses, imiss rate"); + if (use_l2) { + g_string_append(rep, ", l2 accesses, l2 misses, l2 miss rate"); + } + + g_string_append(rep, "\n"); + + for (i = 0; i < cores; i++) { + g_string_append_printf(rep, "%-8d", i); + dcache = l1_dcaches[i]; + icache = l1_icaches[i]; + l2_cache = use_l2 ? l2_ucaches[i] : NULL; + append_stats_line(rep, dcache->accesses, dcache->misses, + icache->accesses, icache->misses, + l2_cache ? l2_cache->accesses : 0, + l2_cache ? l2_cache->misses : 0); + } + + if (cores > 1) { + sum_stats(); + g_string_append_printf(rep, "%-8s", "sum"); + append_stats_line(rep, l1_dmem_accesses, l1_dmisses, + l1_imem_accesses, l1_imisses, + l2_cache ? l2_mem_accesses : 0, l2_cache ? l2_misses : 0); + } + + g_string_append(rep, "\n"); qemu_plugin_outs(rep->str); } @@ -504,7 +662,7 @@ static void log_top_insns(void) if (insn->symbol) { g_string_append_printf(rep, " (%s)", insn->symbol); } - g_string_append_printf(rep, ", %ld, %s\n", insn->dmisses, + g_string_append_printf(rep, ", %ld, %s\n", insn->l1_dmisses, insn->disas_str); } @@ -517,10 +675,28 @@ static void log_top_insns(void) if (insn->symbol) { g_string_append_printf(rep, " (%s)", insn->symbol); } - g_string_append_printf(rep, ", %ld, %s\n", insn->imisses, + g_string_append_printf(rep, ", %ld, %s\n", insn->l1_imisses, insn->disas_str); } + if (!use_l2) { + goto finish; + } + + miss_insns = g_list_sort(miss_insns, l2_cmp); + g_string_append_printf(rep, "%s", "\naddress, L2 misses, instruction\n"); + + for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { + insn = (InsnData *) curr->data; + g_string_append_printf(rep, "0x%" PRIx64, insn->addr); + if (insn->symbol) { + g_string_append_printf(rep, " (%s)", insn->symbol); + } + g_string_append_printf(rep, ", %ld, %s\n", insn->l2_misses, + insn->disas_str); + } + +finish: qemu_plugin_outs(rep->str); g_list_free(miss_insns); } @@ -530,8 +706,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) log_stats(); log_top_insns(); - cache_free(dcache); - cache_free(icache); + caches_free(l1_dcaches); + caches_free(l1_icaches); + + g_free(l1_dcache_locks); + g_free(l1_icache_locks); + + if (use_l2) { + caches_free(l2_ucaches); + g_free(l2_ucache_locks); + } g_hash_table_destroy(miss_ht); } @@ -563,45 +747,69 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { int i; - int iassoc, iblksize, icachesize; - int dassoc, dblksize, dcachesize; + int l1_iassoc, l1_iblksize, l1_icachesize; + int l1_dassoc, l1_dblksize, l1_dcachesize; + int l2_assoc, l2_blksize, l2_cachesize; limit = 32; sys = info->system_emulation; - dassoc = 8; - dblksize = 64; - dcachesize = dblksize * dassoc * 32; + l1_dassoc = 8; + l1_dblksize = 64; + l1_dcachesize = l1_dblksize * l1_dassoc * 32; - iassoc = 8; - iblksize = 64; - icachesize = iblksize * iassoc * 32; + l1_iassoc = 8; + l1_iblksize = 64; + l1_icachesize = l1_iblksize * l1_iassoc * 32; + + l2_assoc = 16; + l2_blksize = 64; + l2_cachesize = l2_assoc * l2_blksize * 2048; policy = LRU; + cores = sys ? qemu_plugin_n_vcpus() : 1; + for (i = 0; i < argc; i++) { char *opt = argv[i]; - if (g_str_has_prefix(opt, "iblksize=")) { - iblksize = g_ascii_strtoll(opt + 9, NULL, 10); - } else if (g_str_has_prefix(opt, "iassoc=")) { - iassoc = g_ascii_strtoll(opt + 7, NULL, 10); - } else if (g_str_has_prefix(opt, "icachesize=")) { - icachesize = g_ascii_strtoll(opt + 11, NULL, 10); - } else if (g_str_has_prefix(opt, "dblksize=")) { - dblksize = g_ascii_strtoll(opt + 9, NULL, 10); - } else if (g_str_has_prefix(opt, "dassoc=")) { - dassoc = g_ascii_strtoll(opt + 7, NULL, 10); - } else if (g_str_has_prefix(opt, "dcachesize=")) { - dcachesize = g_ascii_strtoll(opt + 11, NULL, 10); - } else if (g_str_has_prefix(opt, "limit=")) { - limit = g_ascii_strtoll(opt + 6, NULL, 10); - } else if (g_str_has_prefix(opt, "evict=")) { - gchar *p = opt + 6; - if (g_strcmp0(p, "rand") == 0) { + g_autofree char **tokens = g_strsplit(opt, "=", 2); + + if (g_strcmp0(tokens[0], "iblksize") == 0) { + l1_iblksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "iassoc") == 0) { + l1_iassoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "icachesize") == 0) { + l1_icachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dblksize") == 0) { + l1_dblksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dassoc") == 0) { + l1_dassoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dcachesize") == 0) { + l1_dcachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "limit") == 0) { + limit = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "cores") == 0) { + cores = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2cachesize") == 0) { + use_l2 = true; + l2_cachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2blksize") == 0) { + use_l2 = true; + l2_blksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2assoc") == 0) { + use_l2 = true; + l2_assoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &use_l2)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "evict") == 0) { + if (g_strcmp0(tokens[1], "rand") == 0) { policy = RAND; - } else if (g_strcmp0(p, "lru") == 0) { + } else if (g_strcmp0(tokens[1], "lru") == 0) { policy = LRU; - } else if (g_strcmp0(p, "fifo") == 0) { + } else if (g_strcmp0(tokens[1], "fifo") == 0) { policy = FIFO; } else { fprintf(stderr, "invalid eviction policy: %s\n", opt); @@ -615,22 +823,34 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, policy_init(); - dcache = cache_init(dblksize, dassoc, dcachesize); - if (!dcache) { - const char *err = cache_config_error(dblksize, dassoc, dcachesize); + l1_dcaches = caches_init(l1_dblksize, l1_dassoc, l1_dcachesize); + if (!l1_dcaches) { + const char *err = cache_config_error(l1_dblksize, l1_dassoc, l1_dcachesize); fprintf(stderr, "dcache cannot be constructed from given parameters\n"); fprintf(stderr, "%s\n", err); return -1; } - icache = cache_init(iblksize, iassoc, icachesize); - if (!icache) { - const char *err = cache_config_error(iblksize, iassoc, icachesize); + l1_icaches = caches_init(l1_iblksize, l1_iassoc, l1_icachesize); + if (!l1_icaches) { + const char *err = cache_config_error(l1_iblksize, l1_iassoc, l1_icachesize); fprintf(stderr, "icache cannot be constructed from given parameters\n"); fprintf(stderr, "%s\n", err); return -1; } + l2_ucaches = use_l2 ? caches_init(l2_blksize, l2_assoc, l2_cachesize) : NULL; + if (!l2_ucaches && use_l2) { + const char *err = cache_config_error(l2_blksize, l2_assoc, l2_cachesize); + fprintf(stderr, "L2 cache cannot be constructed from given parameters\n"); + fprintf(stderr, "%s\n", err); + return -1; + } + + l1_dcache_locks = g_new0(GMutex, cores); + l1_icache_locks = g_new0(GMutex, cores); + l2_ucache_locks = use_l2 ? g_new0(GMutex, cores) : NULL; + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); diff --git a/contrib/plugins/drcov.c b/contrib/plugins/drcov.c new file mode 100644 index 0000000000..b4a855adaf --- /dev/null +++ b/contrib/plugins/drcov.c @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2021, Ivanov Arkady + * + * Drcov - a DynamoRIO-based tool that collects coverage information + * from a binary. Primary goal this script is to have coverage log + * files that work in Lighthouse. + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +static char header[] = "DRCOV VERSION: 2\n" + "DRCOV FLAVOR: drcov-64\n" + "Module Table: version 2, count 1\n" + "Columns: id, base, end, entry, path\n"; + +static FILE *fp; +static const char *file_name = "file.drcov.trace"; +static GMutex lock; + +typedef struct { + uint32_t start; + uint16_t size; + uint16_t mod_id; + bool exec; +} bb_entry_t; + +/* Translated blocks */ +static GPtrArray *blocks; + +static void printf_header(unsigned long count) +{ + fprintf(fp, "%s", header); + const char *path = qemu_plugin_path_to_binary(); + uint64_t start_code = qemu_plugin_start_code(); + uint64_t end_code = qemu_plugin_end_code(); + uint64_t entry = qemu_plugin_entry_code(); + fprintf(fp, "0, 0x%lx, 0x%lx, 0x%lx, %s\n", + start_code, end_code, entry, path); + fprintf(fp, "BB Table: %ld bbs\n", count); +} + +static void printf_char_array32(uint32_t data) +{ + const uint8_t *bytes = (const uint8_t *)(&data); + fwrite(bytes, sizeof(char), sizeof(data), fp); +} + +static void printf_char_array16(uint16_t data) +{ + const uint8_t *bytes = (const uint8_t *)(&data); + fwrite(bytes, sizeof(char), sizeof(data), fp); +} + + +static void printf_el(gpointer data, gpointer user_data) +{ + bb_entry_t *bb = (bb_entry_t *)data; + if (bb->exec) { + printf_char_array32(bb->start); + printf_char_array16(bb->size); + printf_char_array16(bb->mod_id); + } + g_free(bb); +} + +static void count_block(gpointer data, gpointer user_data) +{ + unsigned long *count = (unsigned long *) user_data; + bb_entry_t *bb = (bb_entry_t *)data; + if (bb->exec) { + *count = *count + 1; + } +} + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + unsigned long count = 0; + g_mutex_lock(&lock); + g_ptr_array_foreach(blocks, count_block, &count); + + /* Print function */ + printf_header(count); + g_ptr_array_foreach(blocks, printf_el, NULL); + + /* Clear */ + g_ptr_array_free(blocks, true); + + fclose(fp); + + g_mutex_unlock(&lock); +} + +static void plugin_init(void) +{ + fp = fopen(file_name, "wb"); + blocks = g_ptr_array_sized_new(128); +} + +static void vcpu_tb_exec(unsigned int cpu_index, void *udata) +{ + bb_entry_t *bb = (bb_entry_t *) udata; + + g_mutex_lock(&lock); + bb->exec = true; + g_mutex_unlock(&lock); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t pc = qemu_plugin_tb_vaddr(tb); + size_t n = qemu_plugin_tb_n_insns(tb); + + g_mutex_lock(&lock); + + bb_entry_t *bb = g_new0(bb_entry_t, 1); + for (int i = 0; i < n; i++) { + bb->size += qemu_plugin_insn_size(qemu_plugin_tb_get_insn(tb, i)); + } + + bb->start = pc; + bb->mod_id = 0; + bb->exec = false; + g_ptr_array_add(blocks, bb); + + g_mutex_unlock(&lock); + qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, + QEMU_PLUGIN_CB_NO_REGS, + (void *)bb); + +} + +QEMU_PLUGIN_EXPORT +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + g_autofree char **tokens = g_strsplit(argv[i], "=", 2); + if (g_strcmp0(tokens[0], "filename") == 0) { + file_name = g_strdup(tokens[1]); + } + } + + plugin_init(); + + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + + return 0; +} diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c index 2de9f0d7d4..e255bd21fd 100644 --- a/contrib/plugins/execlog.c +++ b/contrib/plugins/execlog.c @@ -18,7 +18,29 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; /* Store last executed instruction on each vCPU as a GString */ -GArray *last_exec; +static GPtrArray *last_exec; +static GMutex expand_array_lock; + +static GPtrArray *imatches; +static GArray *amatches; + +/* + * Expand last_exec array. + * + * As we could have multiple threads trying to do this we need to + * serialise the expansion under a lock. Threads accessing already + * created entries can continue without issue even if the ptr array + * gets reallocated during resize. + */ +static void expand_last_exec(int cpu_index) +{ + g_mutex_lock(&expand_array_lock); + while (cpu_index >= last_exec->len) { + GString *s = g_string_new(NULL); + g_ptr_array_add(last_exec, s); + } + g_mutex_unlock(&expand_array_lock); +} /** * Add memory read or write information to current instruction log @@ -30,7 +52,7 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info, /* Find vCPU in array */ g_assert(cpu_index < last_exec->len); - s = g_array_index(last_exec, GString *, cpu_index); + s = g_ptr_array_index(last_exec, cpu_index); /* Indicate type of memory access */ if (qemu_plugin_mem_is_store(info)) { @@ -58,16 +80,15 @@ static void vcpu_insn_exec(unsigned int cpu_index, void *udata) GString *s; /* Find or create vCPU in array */ - while (cpu_index >= last_exec->len) { - s = g_string_new(NULL); - g_array_append_val(last_exec, s); + if (cpu_index >= last_exec->len) { + expand_last_exec(cpu_index); } - s = g_array_index(last_exec, GString *, cpu_index); + s = g_ptr_array_index(last_exec, cpu_index); /* Print previous instruction in cache */ if (s->len) { qemu_plugin_outs(s->str); - qemu_plugin_outs("s\n"); + qemu_plugin_outs("\n"); } /* Store new instruction in cache */ @@ -85,12 +106,13 @@ static void vcpu_insn_exec(unsigned int cpu_index, void *udata) static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) { struct qemu_plugin_insn *insn; - uint64_t insn_vaddr; - uint32_t insn_opcode; - char *insn_disas; + bool skip = (imatches || amatches); size_t n = qemu_plugin_tb_n_insns(tb); for (size_t i = 0; i < n; i++) { + char *insn_disas; + uint64_t insn_vaddr; + /* * `insn` is shared between translations in QEMU, copy needed data here. * `output` is never freed as it might be used multiple times during @@ -99,20 +121,55 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) * a limitation for CISC architectures. */ insn = qemu_plugin_tb_get_insn(tb, i); - insn_vaddr = qemu_plugin_insn_vaddr(insn); - insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn)); insn_disas = qemu_plugin_insn_disas(insn); - char *output = g_strdup_printf("0x%"PRIx64", 0x%"PRIx32", \"%s\"", - insn_vaddr, insn_opcode, insn_disas); + insn_vaddr = qemu_plugin_insn_vaddr(insn); - /* Register callback on memory read or write */ - qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, - QEMU_PLUGIN_CB_NO_REGS, - QEMU_PLUGIN_MEM_RW, NULL); + /* + * If we are filtering we better check out if we have any + * hits. The skip "latches" so we can track memory accesses + * after the instruction we care about. + */ + if (skip && imatches) { + int j; + for (j = 0; j < imatches->len && skip; j++) { + char *m = g_ptr_array_index(imatches, j); + if (g_str_has_prefix(insn_disas, m)) { + skip = false; + } + } + } + + if (skip && amatches) { + int j; + for (j = 0; j < amatches->len && skip; j++) { + uint64_t v = g_array_index(amatches, uint64_t, j); + if (v == insn_vaddr) { + skip = false; + } + } + } + + if (skip) { + g_free(insn_disas); + } else { + uint32_t insn_opcode; + insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn)); + char *output = g_strdup_printf("0x%"PRIx64", 0x%"PRIx32", \"%s\"", + insn_vaddr, insn_opcode, insn_disas); + + /* Register callback on memory read or write */ + qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, + QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_MEM_RW, NULL); + + /* Register callback on instruction */ + qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec, + QEMU_PLUGIN_CB_NO_REGS, output); + + /* reset skip */ + skip = (imatches || amatches); + } - /* Register callback on instruction */ - qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec, - QEMU_PLUGIN_CB_NO_REGS, output); } } @@ -124,7 +181,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) guint i; GString *s; for (i = 0; i < last_exec->len; i++) { - s = g_array_index(last_exec, GString *, i); + s = g_ptr_array_index(last_exec, i); if (s->str) { qemu_plugin_outs(s->str); qemu_plugin_outs("\n"); @@ -132,6 +189,25 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) } } +/* Add a match to the array of matches */ +static void parse_insn_match(char *match) +{ + if (!imatches) { + imatches = g_ptr_array_new(); + } + g_ptr_array_add(imatches, match); +} + +static void parse_vaddr_match(char *match) +{ + uint64_t v = g_ascii_strtoull(match, NULL, 16); + + if (!amatches) { + amatches = g_array_new(false, true, sizeof(uint64_t)); + } + g_array_append_val(amatches, v); +} + /** * Install the plugin */ @@ -143,7 +219,24 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, * Initialize dynamic array to cache vCPU instruction. In user mode * we don't know the size before emulation. */ - last_exec = g_array_new(FALSE, FALSE, sizeof(GString *)); + if (info->system_emulation) { + last_exec = g_ptr_array_sized_new(info->system.max_vcpus); + } else { + last_exec = g_ptr_array_new(); + } + + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "ifilter") == 0) { + parse_insn_match(tokens[1]); + } else if (g_strcmp0(tokens[0], "afilter") == 0) { + parse_vaddr_match(tokens[1]); + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } /* Register translation block and exit callbacks */ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c index 4b08340143..062200a7a4 100644 --- a/contrib/plugins/hotblocks.c +++ b/contrib/plugins/hotblocks.c @@ -133,8 +133,18 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { - if (argc && strcmp(argv[0], "inline") == 0) { - do_inline = true; + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "inline") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } } plugin_init(); diff --git a/contrib/plugins/hotpages.c b/contrib/plugins/hotpages.c index bf53267532..0d12910af6 100644 --- a/contrib/plugins/hotpages.c +++ b/contrib/plugins/hotpages.c @@ -169,16 +169,26 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, for (i = 0; i < argc; i++) { char *opt = argv[i]; - if (g_strcmp0(opt, "reads") == 0) { - sort_by = SORT_R; - } else if (g_strcmp0(opt, "writes") == 0) { - sort_by = SORT_W; - } else if (g_strcmp0(opt, "address") == 0) { - sort_by = SORT_A; - } else if (g_strcmp0(opt, "io") == 0) { - track_io = true; - } else if (g_str_has_prefix(opt, "pagesize=")) { - page_size = g_ascii_strtoull(opt + 9, NULL, 10); + g_autofree char **tokens = g_strsplit(opt, "=", -1); + + if (g_strcmp0(tokens[0], "sortby") == 0) { + if (g_strcmp0(tokens[1], "reads") == 0) { + sort_by = SORT_R; + } else if (g_strcmp0(tokens[1], "writes") == 0) { + sort_by = SORT_W; + } else if (g_strcmp0(tokens[1], "address") == 0) { + sort_by = SORT_A; + } else { + fprintf(stderr, "invalid value to sortby: %s\n", tokens[1]); + return -1; + } + } else if (g_strcmp0(tokens[0], "io") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &track_io)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "pagesize") == 0) { + page_size = g_ascii_strtoull(tokens[1], NULL, 10); } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c index 600f7facc1..4a5ec3d936 100644 --- a/contrib/plugins/howvec.c +++ b/contrib/plugins/howvec.c @@ -333,23 +333,34 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, for (i = 0; i < argc; i++) { char *p = argv[i]; - if (strcmp(p, "inline") == 0) { - do_inline = true; - } else if (strcmp(p, "verbose") == 0) { - verbose = true; - } else { + g_autofree char **tokens = g_strsplit(p, "=", -1); + if (g_strcmp0(tokens[0], "inline") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", p); + return -1; + } + } else if (g_strcmp0(tokens[0], "verbose") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &verbose)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", p); + return -1; + } + } else if (g_strcmp0(tokens[0], "count") == 0) { + char *value = tokens[1]; int j; CountType type = COUNT_INDIVIDUAL; - if (*p == '!') { + if (*value == '!') { type = COUNT_NONE; - p++; + value++; } for (j = 0; j < class_table_sz; j++) { - if (strcmp(p, class_table[j].opt) == 0) { + if (strcmp(value, class_table[j].opt) == 0) { class_table[j].what = type; break; } } + } else { + fprintf(stderr, "option parsing failed: %s\n", p); + return -1; } } diff --git a/contrib/plugins/hwprofile.c b/contrib/plugins/hwprofile.c index faf216ac00..691d4edb0c 100644 --- a/contrib/plugins/hwprofile.c +++ b/contrib/plugins/hwprofile.c @@ -259,27 +259,42 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { int i; + g_autoptr(GString) matches_raw = g_string_new(""); for (i = 0; i < argc; i++) { char *opt = argv[i]; - if (g_strcmp0(opt, "read") == 0) { - rw = QEMU_PLUGIN_MEM_R; - } else if (g_strcmp0(opt, "write") == 0) { - rw = QEMU_PLUGIN_MEM_W; - } else if (g_strcmp0(opt, "pattern") == 0) { - pattern = true; - } else if (g_strcmp0(opt, "source") == 0) { - source = true; - } else if (g_str_has_prefix(opt, "match")) { - gchar **parts = g_strsplit(opt, "=", 2); + g_autofree char **tokens = g_strsplit(opt, "=", 2); + + if (g_strcmp0(tokens[0], "track") == 0) { + if (g_strcmp0(tokens[1], "read") == 0) { + rw = QEMU_PLUGIN_MEM_R; + } else if (g_strcmp0(tokens[1], "write") == 0) { + rw = QEMU_PLUGIN_MEM_W; + } else { + fprintf(stderr, "invalid value for track: %s\n", tokens[1]); + return -1; + } + } else if (g_strcmp0(tokens[0], "pattern") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &pattern)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "source") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &source)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "match") == 0) { check_match = true; - matches = g_strsplit(parts[1], ",", -1); - g_strfreev(parts); + g_string_append_printf(matches_raw, "%s,", tokens[1]); } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; } } + if (check_match) { + matches = g_strsplit(matches_raw->str, ",", -1); + } if (source && pattern) { fprintf(stderr, "can only currently track either source or pattern.\n"); diff --git a/contrib/plugins/lockstep.c b/contrib/plugins/lockstep.c index 7fd35eb669..a41ffe83fa 100644 --- a/contrib/plugins/lockstep.c +++ b/contrib/plugins/lockstep.c @@ -319,22 +319,35 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, int argc, char **argv) { int i; - - if (!argc || !argv[0]) { - qemu_plugin_outs("Need a socket path to talk to other instance."); - return -1; - } + g_autofree char *sock_path = NULL; for (i = 0; i < argc; i++) { char *p = argv[i]; - if (strcmp(p, "verbose") == 0) { - verbose = true; - } else if (!setup_unix_socket(argv[0])) { - qemu_plugin_outs("Failed to setup socket for communications."); + g_autofree char **tokens = g_strsplit(p, "=", 2); + + if (g_strcmp0(tokens[0], "verbose") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &verbose)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", p); + return -1; + } + } else if (g_strcmp0(tokens[0], "sockpath") == 0) { + sock_path = tokens[1]; + } else { + fprintf(stderr, "option parsing failed: %s\n", p); return -1; } } + if (sock_path == NULL) { + fprintf(stderr, "Need a socket path to talk to other instance.\n"); + return -1; + } + + if (!setup_unix_socket(sock_path)) { + fprintf(stderr, "Failed to setup socket for communications.\n"); + return -1; + } + our_id = id; qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); diff --git a/contrib/rdmacm-mux/meson.build b/contrib/rdmacm-mux/meson.build index 6cc5016747..36c9c89630 100644 --- a/contrib/rdmacm-mux/meson.build +++ b/contrib/rdmacm-mux/meson.build @@ -1,8 +1,6 @@ -if 'CONFIG_PVRDMA' in config_host - # if not found, CONFIG_PVRDMA should not be set +if have_pvrdma # FIXME: broken on big endian architectures - libumad = cc.find_library('ibumad', required: true) - executable('rdmacm-mux', files('main.c'), + executable('rdmacm-mux', files('main.c'), genh, dependencies: [glib, libumad], build_by_default: false, install: false) diff --git a/contrib/vhost-user-blk/meson.build b/contrib/vhost-user-blk/meson.build index 601ea15ef5..dcb9e2ffcd 100644 --- a/contrib/vhost-user-blk/meson.build +++ b/contrib/vhost-user-blk/meson.build @@ -1,5 +1,4 @@ -# FIXME: broken on 32-bit architectures executable('vhost-user-blk', files('vhost-user-blk.c'), dependencies: [qemuutil, vhost_user], - build_by_default: false, + build_by_default: targetos == 'linux', install: false) diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c index d14b2896bf..d6932a2645 100644 --- a/contrib/vhost-user-blk/vhost-user-blk.c +++ b/contrib/vhost-user-blk/vhost-user-blk.c @@ -106,10 +106,7 @@ static void vub_req_complete(VubReq *req) req->size + 1); vu_queue_notify(vu_dev, req->vq); - if (req->elem) { - free(req->elem); - } - + g_free(req->elem); g_free(req); } @@ -146,7 +143,7 @@ vub_readv(VubReq *req, struct iovec *iov, uint32_t iovcnt) req->size = vub_iov_size(iov, iovcnt); rc = preadv(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512); if (rc < 0) { - fprintf(stderr, "%s, Sector %"PRIu64", Size %lu failed with %s\n", + fprintf(stderr, "%s, Sector %"PRIu64", Size %zu failed with %s\n", vdev_blk->blk_name, req->sector_num, req->size, strerror(errno)); return -1; @@ -169,7 +166,7 @@ vub_writev(VubReq *req, struct iovec *iov, uint32_t iovcnt) req->size = vub_iov_size(iov, iovcnt); rc = pwritev(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512); if (rc < 0) { - fprintf(stderr, "%s, Sector %"PRIu64", Size %lu failed with %s\n", + fprintf(stderr, "%s, Sector %"PRIu64", Size %zu failed with %s\n", vdev_blk->blk_name, req->sector_num, req->size, strerror(errno)); return -1; @@ -188,7 +185,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt, size = vub_iov_size(iov, iovcnt); if (size != sizeof(*desc)) { - fprintf(stderr, "Invalid size %ld, expect %ld\n", size, sizeof(*desc)); + fprintf(stderr, "Invalid size %zd, expect %zd\n", size, sizeof(*desc)); return -1; } buf = g_new0(char, size); @@ -243,7 +240,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk, /* refer to hw/block/virtio_blk.c */ if (elem->out_num < 1 || elem->in_num < 1) { fprintf(stderr, "virtio-blk request missing headers\n"); - free(elem); + g_free(elem); return -1; } @@ -325,7 +322,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk, return 0; err: - free(elem); + g_free(elem); g_free(req); return -1; } @@ -593,7 +590,8 @@ static GOptionEntry entries[] = { {"blk-file", 'b', 0, G_OPTION_ARG_FILENAME, &opt_blk_file, "block device or file path", "PATH"}, { "read-only", 'r', 0, G_OPTION_ARG_NONE, &opt_read_only, - "Enable read-only", NULL } + "Enable read-only", NULL }, + { NULL, }, }; int main(int argc, char **argv) diff --git a/contrib/vhost-user-gpu/meson.build b/contrib/vhost-user-gpu/meson.build index 4cb52a91d7..c8883c2d8e 100644 --- a/contrib/vhost-user-gpu/meson.build +++ b/contrib/vhost-user-gpu/meson.build @@ -1,6 +1,4 @@ -if 'CONFIG_TOOLS' in config_host and virgl.found() \ - and 'CONFIG_GBM' in config_host and 'CONFIG_LINUX' in config_host \ - and pixman.found() +if have_vhost_user_gpu executable('vhost-user-gpu', files('vhost-user-gpu.c', 'virgl.c', 'vugbm.c'), dependencies: [qemuutil, pixman, gbm, virgl, vhost_user, opengl], install: true, diff --git a/contrib/vhost-user-gpu/vhost-user-gpu.c b/contrib/vhost-user-gpu/vhost-user-gpu.c index 611360e6b4..bfb8d93cf8 100644 --- a/contrib/vhost-user-gpu/vhost-user-gpu.c +++ b/contrib/vhost-user-gpu/vhost-user-gpu.c @@ -455,7 +455,7 @@ vg_create_mapping_iov(VuGpu *g, return -1; } - *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); + *iov = g_new0(struct iovec, ab->nr_entries); for (i = 0; i < ab->nr_entries; i++) { uint64_t len = ents[i].length; (*iov)[i].iov_len = ents[i].length; diff --git a/contrib/vhost-user-gpu/vugbm.c b/contrib/vhost-user-gpu/vugbm.c index fb15d0372c..503d0a4566 100644 --- a/contrib/vhost-user-gpu/vugbm.c +++ b/contrib/vhost-user-gpu/vugbm.c @@ -53,7 +53,7 @@ struct udmabuf_create { static size_t udmabuf_get_size(struct vugbm_buffer *buf) { - return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size); + return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size()); } static bool diff --git a/contrib/vhost-user-scsi/vhost-user-scsi.c b/contrib/vhost-user-scsi/vhost-user-scsi.c index 4f6e3e2a24..9ef61cf5a7 100644 --- a/contrib/vhost-user-scsi/vhost-user-scsi.c +++ b/contrib/vhost-user-scsi/vhost-user-scsi.c @@ -351,34 +351,59 @@ fail: /** vhost-user-scsi **/ +static int opt_fdnum = -1; +static char *opt_socket_path; +static gboolean opt_print_caps; +static char *iscsi_uri; + +static GOptionEntry entries[] = { + { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps, + "Print capabilities", NULL }, + { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum, + "Use inherited fd socket", "FDNUM" }, + { "iscsi-uri", 'i', 0, G_OPTION_ARG_FILENAME, &iscsi_uri, + "iSCSI URI to connect to", "FDNUM" }, + { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path, + "Use UNIX socket path", "PATH" }, + { NULL, } +}; + int main(int argc, char **argv) { VusDev *vdev_scsi = NULL; - char *unix_fn = NULL; - char *iscsi_uri = NULL; - int lsock = -1, csock = -1, opt, err = EXIT_SUCCESS; + int lsock = -1, csock = -1, err = EXIT_SUCCESS; - while ((opt = getopt(argc, argv, "u:i:")) != -1) { - switch (opt) { - case 'h': - goto help; - case 'u': - unix_fn = g_strdup(optarg); - break; - case 'i': - iscsi_uri = g_strdup(optarg); - break; - default: - goto help; - } + GError *error = NULL; + GOptionContext *context; + + context = g_option_context_new(NULL); + g_option_context_add_main_entries(context, entries, NULL); + if (!g_option_context_parse(context, &argc, &argv, &error)) { + g_printerr("Option parsing failed: %s\n", error->message); + exit(EXIT_FAILURE); } - if (!unix_fn || !iscsi_uri) { + + if (opt_print_caps) { + g_print("{\n"); + g_print(" \"type\": \"scsi\"\n"); + g_print("}\n"); + goto out; + } + + if (!iscsi_uri) { goto help; } - lsock = unix_sock_new(unix_fn); - if (lsock < 0) { - goto err; + if (opt_socket_path) { + lsock = unix_sock_new(opt_socket_path); + if (lsock < 0) { + exit(EXIT_FAILURE); + } + } else if (opt_fdnum < 0) { + g_print("%s\n", g_option_context_get_help(context, true, NULL)); + exit(EXIT_FAILURE); + } else { + lsock = opt_fdnum; } csock = accept(lsock, NULL, NULL); @@ -408,15 +433,18 @@ out: if (vdev_scsi) { g_main_loop_unref(vdev_scsi->loop); g_free(vdev_scsi); - unlink(unix_fn); } if (csock >= 0) { close(csock); } if (lsock >= 0) { close(lsock); + + if (opt_socket_path) { + unlink(opt_socket_path); + } } - g_free(unix_fn); + g_free(opt_socket_path); g_free(iscsi_uri); return err; @@ -426,10 +454,12 @@ err: goto out; help: - fprintf(stderr, "Usage: %s [ -u unix_sock_path -i iscsi_uri ] | [ -h ]\n", + fprintf(stderr, "Usage: %s [ -s socket-path -i iscsi-uri -f fd -p print-capabilities ] | [ -h ]\n", argv[0]); - fprintf(stderr, " -u path to unix socket\n"); - fprintf(stderr, " -i iscsi uri for lun 0\n"); + fprintf(stderr, " -s, --socket-path=SOCKET_PATH path to unix socket\n"); + fprintf(stderr, " -i, --iscsi-uri=ISCSI_URI iscsi uri for lun 0\n"); + fprintf(stderr, " -f, --fd=FILE_DESCRIPTOR file-descriptor\n"); + fprintf(stderr, " -p, --print-capabilities=PRINT_CAPABILITIES denotes print-capabilities\n"); fprintf(stderr, " -h print help and quit\n"); goto err; diff --git a/cpu.c b/cpu.c index e1799a15bc..4a7d865427 100644 --- a/cpu.c +++ b/cpu.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "exec/target_page.h" @@ -35,10 +34,13 @@ #include "sysemu/tcg.h" #include "sysemu/kvm.h" #include "sysemu/replay.h" +#include "exec/cpu-common.h" +#include "exec/exec-all.h" #include "exec/translate-all.h" #include "exec/log.h" #include "hw/core/accel-cpu.h" #include "trace/trace-root.h" +#include "qemu/accel.h" uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; @@ -129,20 +131,25 @@ const VMStateDescription vmstate_cpu_common = { void cpu_exec_realizefn(CPUState *cpu, Error **errp) { -#ifndef CONFIG_USER_ONLY - CPUClass *cc = CPU_GET_CLASS(cpu); -#endif + /* cache the cpu class for the hotpath */ + cpu->cc = CPU_GET_CLASS(cpu); - cpu_list_add(cpu); if (!accel_cpu_realizefn(cpu, errp)) { return; } -#ifdef CONFIG_TCG + /* NB: errp parameter is unused currently */ if (tcg_enabled()) { tcg_exec_realizefn(cpu, errp); } -#endif /* CONFIG_TCG */ + + /* Wait until cpu initialization complete before exposing cpu. */ + cpu_list_add(cpu); + + /* Plugin initialization must wait until cpu_index assigned. */ + if (tcg_enabled()) { + qemu_plugin_vcpu_init_hook(cpu); + } #ifdef CONFIG_USER_ONLY assert(qdev_get_vmsd(DEVICE(cpu)) == NULL || @@ -151,8 +158,8 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); } - if (cc->sysemu_ops->legacy_vmsd != NULL) { - vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu); + if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { + vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu); } #endif /* CONFIG_USER_ONLY */ } @@ -169,16 +176,64 @@ void cpu_exec_unrealizefn(CPUState *cpu) vmstate_unregister(NULL, &vmstate_cpu_common, cpu); } #endif -#ifdef CONFIG_TCG - /* NB: errp parameter is unused currently */ if (tcg_enabled()) { tcg_exec_unrealizefn(cpu); } -#endif /* CONFIG_TCG */ cpu_list_remove(cpu); } +/* + * This can't go in hw/core/cpu.c because that file is compiled only + * once for both user-mode and system builds. + */ +static Property cpu_common_props[] = { +#ifdef CONFIG_USER_ONLY + /* + * Create a property for the user-only object, so users can + * adjust prctl(PR_SET_UNALIGN) from the command-line. + * Has no effect if the target does not support the feature. + */ + DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState, + prctl_unalign_sigbus, false), +#else + /* + * Create a memory property for softmmu CPU object, so users can + * wire up its memory. The default if no link is set up is to use + * the system address space. + */ + DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, + MemoryRegion *), +#endif + DEFINE_PROP_END_OF_LIST(), +}; + +static bool cpu_get_start_powered_off(Object *obj, Error **errp) +{ + CPUState *cpu = CPU(obj); + return cpu->start_powered_off; +} + +static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) +{ + CPUState *cpu = CPU(obj); + cpu->start_powered_off = value; +} + +void cpu_class_init_props(DeviceClass *dc) +{ + ObjectClass *oc = OBJECT_CLASS(dc); + + device_class_set_props(dc, cpu_common_props); + /* + * We can't use DEFINE_PROP_BOOL in the Property array for this + * property, because we want this to be settable after realize. + */ + object_class_property_add_bool(oc, "start-powered-off", + cpu_get_start_powered_off, + cpu_set_start_powered_off); +} + void cpu_exec_initfn(CPUState *cpu) { cpu->as = NULL; @@ -218,11 +273,19 @@ const char *parse_cpu_option(const char *cpu_option) return cpu_type; } +void list_cpus(const char *optarg) +{ + /* XXX: implement xxx_cpu_list for targets that still miss it */ +#if defined(cpu_list) + cpu_list(); +#endif +} + #if defined(CONFIG_USER_ONLY) void tb_invalidate_phys_addr(target_ulong addr) { mmap_lock(); - tb_invalidate_phys_page_range(addr, addr + 1); + tb_invalidate_phys_page(addr); mmap_unlock(); } #else @@ -243,7 +306,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) return; } ram_addr = memory_region_get_ram_addr(mr) + addr; - tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); + tb_invalidate_phys_page(ram_addr); } #endif @@ -343,14 +406,14 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...) fprintf(stderr, "\n"); cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); if (qemu_log_separate()) { - FILE *logfile = qemu_log_lock(); - qemu_log("qemu: fatal: "); - qemu_log_vprintf(fmt, ap2); - qemu_log("\n"); - log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); - qemu_log_flush(); - qemu_log_unlock(logfile); - qemu_log_close(); + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "qemu: fatal: "); + vfprintf(logfile, fmt, ap2); + fprintf(logfile, "\n"); + cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP); + qemu_log_unlock(logfile); + } } va_end(ap2); va_end(ap); @@ -369,11 +432,11 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...) /* physical memory access (slow version, mainly for debug) */ #if defined(CONFIG_USER_ONLY) -int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - void *ptr, target_ulong len, bool is_write) +int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, + void *ptr, size_t len, bool is_write) { int flags; - target_ulong l, page; + vaddr l, page; void * p; uint8_t *buf = ptr; @@ -412,7 +475,7 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, bool target_words_bigendian(void) { -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN return true; #else return false; @@ -424,7 +487,7 @@ void page_size_init(void) /* NOTE: we can always suppose that qemu_host_page_size >= TARGET_PAGE_SIZE */ if (qemu_host_page_size == 0) { - qemu_host_page_size = qemu_real_host_page_size; + qemu_host_page_size = qemu_real_host_page_size(); } if (qemu_host_page_size < TARGET_PAGE_SIZE) { qemu_host_page_size = TARGET_PAGE_SIZE; diff --git a/cpus-common.c b/cpus-common.c index 6e73d3e58d..793364dc0e 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -73,6 +73,12 @@ static int cpu_get_free_index(void) } CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +static unsigned int cpu_list_generation_id; + +unsigned int cpu_list_generation_id_get(void) +{ + return cpu_list_generation_id; +} void cpu_list_add(CPUState *cpu) { @@ -84,6 +90,7 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); + cpu_list_generation_id++; } void cpu_list_remove(CPUState *cpu) @@ -96,6 +103,7 @@ void cpu_list_remove(CPUState *cpu) QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; + cpu_list_generation_id++; } CPUState *qemu_get_cpu(int index) @@ -160,7 +168,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; @@ -305,7 +313,7 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; diff --git a/crypto/akcipher-gcrypt.c.inc b/crypto/akcipher-gcrypt.c.inc new file mode 100644 index 0000000000..abb1fb272e --- /dev/null +++ b/crypto/akcipher-gcrypt.c.inc @@ -0,0 +1,595 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "crypto/akcipher.h" +#include "crypto/random.h" +#include "qapi/error.h" +#include "sysemu/cryptodev.h" +#include "rsakey.h" + +typedef struct QCryptoGcryptRSA { + QCryptoAkCipher akcipher; + gcry_sexp_t key; + QCryptoRSAPaddingAlgorithm padding_alg; + QCryptoHashAlgorithm hash_alg; +} QCryptoGcryptRSA; + +static void qcrypto_gcrypt_rsa_free(QCryptoAkCipher *akcipher) +{ + QCryptoGcryptRSA *rsa = (QCryptoGcryptRSA *)akcipher; + if (!rsa) { + return; + } + + gcry_sexp_release(rsa->key); + g_free(rsa); +} + +static QCryptoGcryptRSA *qcrypto_gcrypt_rsa_new( + const QCryptoAkCipherOptionsRSA *opt, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp); + +QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp) +{ + switch (opts->alg) { + case QCRYPTO_AKCIPHER_ALG_RSA: + return (QCryptoAkCipher *)qcrypto_gcrypt_rsa_new( + &opts->u.rsa, type, key, keylen, errp); + + default: + error_setg(errp, "Unsupported algorithm: %u", opts->alg); + return NULL; + } + + return NULL; +} + +static void qcrypto_gcrypt_set_rsa_size(QCryptoAkCipher *akcipher, gcry_mpi_t n) +{ + size_t key_size = (gcry_mpi_get_nbits(n) + 7) / 8; + akcipher->max_plaintext_len = key_size; + akcipher->max_ciphertext_len = key_size; + akcipher->max_dgst_len = key_size; + akcipher->max_signature_len = key_size; +} + +static int qcrypto_gcrypt_parse_rsa_private_key( + QCryptoGcryptRSA *rsa, + const uint8_t *key, size_t keylen, Error **errp) +{ + g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( + QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); + gcry_mpi_t n = NULL, e = NULL, d = NULL, p = NULL, q = NULL, u = NULL; + bool compute_mul_inv = false; + int ret = -1; + gcry_error_t err; + + if (!rsa_key) { + return ret; + } + + err = gcry_mpi_scan(&n, GCRYMPI_FMT_STD, + rsa_key->n.data, rsa_key->n.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter n: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_mpi_scan(&e, GCRYMPI_FMT_STD, + rsa_key->e.data, rsa_key->e.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter e: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_mpi_scan(&d, GCRYMPI_FMT_STD, + rsa_key->d.data, rsa_key->d.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter d: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_mpi_scan(&p, GCRYMPI_FMT_STD, + rsa_key->p.data, rsa_key->p.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter p: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_mpi_scan(&q, GCRYMPI_FMT_STD, + rsa_key->q.data, rsa_key->q.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter q: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + if (gcry_mpi_cmp_ui(p, 0) > 0 && gcry_mpi_cmp_ui(q, 0) > 0) { + compute_mul_inv = true; + + u = gcry_mpi_new(0); + if (gcry_mpi_cmp(p, q) > 0) { + gcry_mpi_swap(p, q); + } + gcry_mpi_invm(u, p, q); + } + + if (compute_mul_inv) { + err = gcry_sexp_build(&rsa->key, NULL, + "(private-key (rsa (n %m) (e %m) (d %m) (p %m) (q %m) (u %m)))", + n, e, d, p, q, u); + } else { + err = gcry_sexp_build(&rsa->key, NULL, + "(private-key (rsa (n %m) (e %m) (d %m)))", n, e, d); + } + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build RSA private key: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + qcrypto_gcrypt_set_rsa_size((QCryptoAkCipher *)rsa, n); + ret = 0; + +cleanup: + gcry_mpi_release(n); + gcry_mpi_release(e); + gcry_mpi_release(d); + gcry_mpi_release(p); + gcry_mpi_release(q); + gcry_mpi_release(u); + return ret; +} + +static int qcrypto_gcrypt_parse_rsa_public_key(QCryptoGcryptRSA *rsa, + const uint8_t *key, + size_t keylen, + Error **errp) +{ + + g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( + QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); + gcry_mpi_t n = NULL, e = NULL; + int ret = -1; + gcry_error_t err; + + if (!rsa_key) { + return ret; + } + + err = gcry_mpi_scan(&n, GCRYMPI_FMT_STD, + rsa_key->n.data, rsa_key->n.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter n: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_mpi_scan(&e, GCRYMPI_FMT_STD, + rsa_key->e.data, rsa_key->e.len, NULL); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to parse RSA parameter e: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_sexp_build(&rsa->key, NULL, + "(public-key (rsa (n %m) (e %m)))", n, e); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build RSA public key: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + qcrypto_gcrypt_set_rsa_size((QCryptoAkCipher *)rsa, n); + ret = 0; + +cleanup: + gcry_mpi_release(n); + gcry_mpi_release(e); + return ret; +} + +static int qcrypto_gcrypt_rsa_encrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, + Error **errp) +{ + QCryptoGcryptRSA *rsa = (QCryptoGcryptRSA *)akcipher; + int ret = -1; + gcry_sexp_t data_sexp = NULL, cipher_sexp = NULL; + gcry_sexp_t cipher_sexp_item = NULL; + gcry_mpi_t cipher_mpi = NULL; + const char *result; + gcry_error_t err; + size_t actual_len; + + if (in_len > akcipher->max_plaintext_len) { + error_setg(errp, "Plaintext length is greater than key size: %d", + akcipher->max_plaintext_len); + return ret; + } + + err = gcry_sexp_build(&data_sexp, NULL, + "(data (flags %s) (value %b))", + QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg), + in_len, in); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build plaintext: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_pk_encrypt(&cipher_sexp, data_sexp, rsa->key); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to encrypt: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + /* S-expression of cipher: (enc-val (rsa (a a-mpi))) */ + cipher_sexp_item = gcry_sexp_find_token(cipher_sexp, "a", 0); + if (!cipher_sexp_item || gcry_sexp_length(cipher_sexp_item) != 2) { + error_setg(errp, "Invalid ciphertext result"); + goto cleanup; + } + + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + cipher_mpi = gcry_sexp_nth_mpi(cipher_sexp_item, 1, GCRYMPI_FMT_USG); + if (!cipher_mpi) { + error_setg(errp, "Invalid ciphertext result"); + goto cleanup; + } + err = gcry_mpi_print(GCRYMPI_FMT_USG, out, out_len, + &actual_len, cipher_mpi); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to print MPI: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + if (actual_len > out_len) { + error_setg(errp, "Ciphertext buffer length is too small"); + goto cleanup; + } + + /* We always padding leading-zeros for RSA-RAW */ + if (actual_len < out_len) { + memmove((uint8_t *)out + (out_len - actual_len), out, actual_len); + memset(out, 0, out_len - actual_len); + } + ret = out_len; + + } else { + result = gcry_sexp_nth_data(cipher_sexp_item, 1, &actual_len); + if (!result) { + error_setg(errp, "Invalid ciphertext result"); + goto cleanup; + } + if (actual_len > out_len) { + error_setg(errp, "Ciphertext buffer length is too small"); + goto cleanup; + } + memcpy(out, result, actual_len); + ret = actual_len; + } + +cleanup: + gcry_sexp_release(data_sexp); + gcry_sexp_release(cipher_sexp); + gcry_sexp_release(cipher_sexp_item); + gcry_mpi_release(cipher_mpi); + return ret; +} + +static int qcrypto_gcrypt_rsa_decrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, + Error **errp) +{ + QCryptoGcryptRSA *rsa = (QCryptoGcryptRSA *)akcipher; + int ret = -1; + gcry_sexp_t data_sexp = NULL, cipher_sexp = NULL; + gcry_mpi_t data_mpi = NULL; + gcry_error_t err; + size_t actual_len; + const char *result; + + if (in_len > akcipher->max_ciphertext_len) { + error_setg(errp, "Ciphertext length is greater than key size: %d", + akcipher->max_ciphertext_len); + return ret; + } + + err = gcry_sexp_build(&cipher_sexp, NULL, + "(enc-val (flags %s) (rsa (a %b) ))", + QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg), + in_len, in); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build ciphertext: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_pk_decrypt(&data_sexp, cipher_sexp, rsa->key); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to decrypt: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + /* S-expression of plaintext: (value plaintext) */ + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + data_mpi = gcry_sexp_nth_mpi(data_sexp, 1, GCRYMPI_FMT_USG); + if (!data_mpi) { + error_setg(errp, "Invalid plaintext result"); + goto cleanup; + } + err = gcry_mpi_print(GCRYMPI_FMT_USG, out, out_len, + &actual_len, data_mpi); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to print MPI: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + if (actual_len > out_len) { + error_setg(errp, "Plaintext buffer length is too small"); + goto cleanup; + } + /* We always padding leading-zeros for RSA-RAW */ + if (actual_len < out_len) { + memmove((uint8_t *)out + (out_len - actual_len), out, actual_len); + memset(out, 0, out_len - actual_len); + } + ret = out_len; + } else { + result = gcry_sexp_nth_data(data_sexp, 1, &actual_len); + if (!result) { + error_setg(errp, "Invalid plaintext result"); + goto cleanup; + } + if (actual_len > out_len) { + error_setg(errp, "Plaintext buffer length is too small"); + goto cleanup; + } + memcpy(out, result, actual_len); + ret = actual_len; + } + +cleanup: + gcry_sexp_release(cipher_sexp); + gcry_sexp_release(data_sexp); + gcry_mpi_release(data_mpi); + return ret; +} + +static int qcrypto_gcrypt_rsa_sign(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp) +{ + QCryptoGcryptRSA *rsa = (QCryptoGcryptRSA *)akcipher; + int ret = -1; + gcry_sexp_t dgst_sexp = NULL, sig_sexp = NULL; + gcry_sexp_t sig_sexp_item = NULL; + const char *result; + gcry_error_t err; + size_t actual_len; + + if (in_len > akcipher->max_dgst_len) { + error_setg(errp, "Data length is greater than key size: %d", + akcipher->max_dgst_len); + return ret; + } + + if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) { + error_setg(errp, "Invalid padding %u", rsa->padding_alg); + return ret; + } + + err = gcry_sexp_build(&dgst_sexp, NULL, + "(data (flags pkcs1) (hash %s %b))", + QCryptoHashAlgorithm_str(rsa->hash_alg), + in_len, in); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build dgst: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_pk_sign(&sig_sexp, dgst_sexp, rsa->key); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to make signature: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + /* S-expression of signature: (sig-val (rsa (s s-mpi))) */ + sig_sexp_item = gcry_sexp_find_token(sig_sexp, "s", 0); + if (!sig_sexp_item || gcry_sexp_length(sig_sexp_item) != 2) { + error_setg(errp, "Invalid signature result"); + goto cleanup; + } + + result = gcry_sexp_nth_data(sig_sexp_item, 1, &actual_len); + if (!result) { + error_setg(errp, "Invalid signature result"); + goto cleanup; + } + + if (actual_len > out_len) { + error_setg(errp, "Signature buffer length is too small"); + goto cleanup; + } + memcpy(out, result, actual_len); + ret = actual_len; + +cleanup: + gcry_sexp_release(dgst_sexp); + gcry_sexp_release(sig_sexp); + gcry_sexp_release(sig_sexp_item); + + return ret; +} + +static int qcrypto_gcrypt_rsa_verify(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + const void *in2, size_t in2_len, + Error **errp) +{ + QCryptoGcryptRSA *rsa = (QCryptoGcryptRSA *)akcipher; + int ret = -1; + gcry_sexp_t sig_sexp = NULL, dgst_sexp = NULL; + gcry_error_t err; + + if (in_len > akcipher->max_signature_len) { + error_setg(errp, "Signature length is greater than key size: %d", + akcipher->max_signature_len); + return ret; + } + + if (in2_len > akcipher->max_dgst_len) { + error_setg(errp, "Data length is greater than key size: %d", + akcipher->max_dgst_len); + return ret; + } + + if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) { + error_setg(errp, "Invalid padding %u", rsa->padding_alg); + return ret; + } + + err = gcry_sexp_build(&sig_sexp, NULL, + "(sig-val (rsa (s %b)))", in_len, in); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build signature: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_sexp_build(&dgst_sexp, NULL, + "(data (flags pkcs1) (hash %s %b))", + QCryptoHashAlgorithm_str(rsa->hash_alg), + in2_len, in2); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to build dgst: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + + err = gcry_pk_verify(sig_sexp, dgst_sexp, rsa->key); + if (gcry_err_code(err) != 0) { + error_setg(errp, "Failed to verify signature: %s/%s", + gcry_strsource(err), gcry_strerror(err)); + goto cleanup; + } + ret = 0; + +cleanup: + gcry_sexp_release(dgst_sexp); + gcry_sexp_release(sig_sexp); + + return ret; +} + +QCryptoAkCipherDriver gcrypt_rsa = { + .encrypt = qcrypto_gcrypt_rsa_encrypt, + .decrypt = qcrypto_gcrypt_rsa_decrypt, + .sign = qcrypto_gcrypt_rsa_sign, + .verify = qcrypto_gcrypt_rsa_verify, + .free = qcrypto_gcrypt_rsa_free, +}; + +static QCryptoGcryptRSA *qcrypto_gcrypt_rsa_new( + const QCryptoAkCipherOptionsRSA *opt, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp) +{ + QCryptoGcryptRSA *rsa = g_new0(QCryptoGcryptRSA, 1); + rsa->padding_alg = opt->padding_alg; + rsa->hash_alg = opt->hash_alg; + rsa->akcipher.driver = &gcrypt_rsa; + + switch (type) { + case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + if (qcrypto_gcrypt_parse_rsa_private_key(rsa, key, keylen, errp) != 0) { + goto error; + } + break; + + case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + if (qcrypto_gcrypt_parse_rsa_public_key(rsa, key, keylen, errp) != 0) { + goto error; + } + break; + + default: + error_setg(errp, "Unknown akcipher key type %d", type); + goto error; + } + + return rsa; + +error: + qcrypto_gcrypt_rsa_free((QCryptoAkCipher *)rsa); + return NULL; +} + + +bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts) +{ + switch (opts->alg) { + case QCRYPTO_AKCIPHER_ALG_RSA: + switch (opts->u.rsa.padding_alg) { + case QCRYPTO_RSA_PADDING_ALG_RAW: + return true; + + case QCRYPTO_RSA_PADDING_ALG_PKCS1: + switch (opts->u.rsa.hash_alg) { + case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALG_SHA512: + return true; + + default: + return false; + } + + default: + return false; + } + + default: + return true; + } +} diff --git a/crypto/akcipher-nettle.c.inc b/crypto/akcipher-nettle.c.inc new file mode 100644 index 0000000000..02699e6e6d --- /dev/null +++ b/crypto/akcipher-nettle.c.inc @@ -0,0 +1,451 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "crypto/akcipher.h" +#include "crypto/random.h" +#include "qapi/error.h" +#include "sysemu/cryptodev.h" +#include "rsakey.h" + +typedef struct QCryptoNettleRSA { + QCryptoAkCipher akcipher; + struct rsa_public_key pub; + struct rsa_private_key priv; + QCryptoRSAPaddingAlgorithm padding_alg; + QCryptoHashAlgorithm hash_alg; +} QCryptoNettleRSA; + +static void qcrypto_nettle_rsa_free(QCryptoAkCipher *akcipher) +{ + QCryptoNettleRSA *rsa = (QCryptoNettleRSA *)akcipher; + if (!rsa) { + return; + } + + rsa_public_key_clear(&rsa->pub); + rsa_private_key_clear(&rsa->priv); + g_free(rsa); +} + +static QCryptoAkCipher *qcrypto_nettle_rsa_new( + const QCryptoAkCipherOptionsRSA *opt, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp); + +QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp) +{ + switch (opts->alg) { + case QCRYPTO_AKCIPHER_ALG_RSA: + return qcrypto_nettle_rsa_new(&opts->u.rsa, type, key, keylen, errp); + + default: + error_setg(errp, "Unsupported algorithm: %u", opts->alg); + return NULL; + } + + return NULL; +} + +static void qcrypto_nettle_rsa_set_akcipher_size(QCryptoAkCipher *akcipher, + int key_size) +{ + akcipher->max_plaintext_len = key_size; + akcipher->max_ciphertext_len = key_size; + akcipher->max_signature_len = key_size; + akcipher->max_dgst_len = key_size; +} + +static int qcrypt_nettle_parse_rsa_private_key(QCryptoNettleRSA *rsa, + const uint8_t *key, + size_t keylen, + Error **errp) +{ + g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( + QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); + + if (!rsa_key) { + return -1; + } + + nettle_mpz_init_set_str_256_u(rsa->pub.n, rsa_key->n.len, rsa_key->n.data); + nettle_mpz_init_set_str_256_u(rsa->pub.e, rsa_key->e.len, rsa_key->e.data); + nettle_mpz_init_set_str_256_u(rsa->priv.d, rsa_key->d.len, rsa_key->d.data); + nettle_mpz_init_set_str_256_u(rsa->priv.p, rsa_key->p.len, rsa_key->p.data); + nettle_mpz_init_set_str_256_u(rsa->priv.q, rsa_key->q.len, rsa_key->q.data); + nettle_mpz_init_set_str_256_u(rsa->priv.a, rsa_key->dp.len, + rsa_key->dp.data); + nettle_mpz_init_set_str_256_u(rsa->priv.b, rsa_key->dq.len, + rsa_key->dq.data); + nettle_mpz_init_set_str_256_u(rsa->priv.c, rsa_key->u.len, rsa_key->u.data); + + if (!rsa_public_key_prepare(&rsa->pub)) { + error_setg(errp, "Failed to check RSA key"); + return -1; + } + + /** + * Since in the kernel's unit test, the p, q, a, b, c of some + * private keys is 0, only the simplest length check is done here + */ + if (rsa_key->p.len > 1 && + rsa_key->q.len > 1 && + rsa_key->dp.len > 1 && + rsa_key->dq.len > 1 && + rsa_key->u.len > 1) { + if (!rsa_private_key_prepare(&rsa->priv)) { + error_setg(errp, "Failed to check RSA key"); + return -1; + } + } else { + rsa->priv.size = rsa->pub.size; + } + qcrypto_nettle_rsa_set_akcipher_size( + (QCryptoAkCipher *)rsa, rsa->priv.size); + + return 0; +} + +static int qcrypt_nettle_parse_rsa_public_key(QCryptoNettleRSA *rsa, + const uint8_t *key, + size_t keylen, + Error **errp) +{ + g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( + QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); + + if (!rsa_key) { + return -1; + } + nettle_mpz_init_set_str_256_u(rsa->pub.n, rsa_key->n.len, rsa_key->n.data); + nettle_mpz_init_set_str_256_u(rsa->pub.e, rsa_key->e.len, rsa_key->e.data); + + if (!rsa_public_key_prepare(&rsa->pub)) { + error_setg(errp, "Failed to check RSA key"); + return -1; + } + qcrypto_nettle_rsa_set_akcipher_size( + (QCryptoAkCipher *)rsa, rsa->pub.size); + + return 0; +} + +static void wrap_nettle_random_func(void *ctx, size_t len, uint8_t *out) +{ + qcrypto_random_bytes(out, len, &error_abort); +} + +static int qcrypto_nettle_rsa_encrypt(QCryptoAkCipher *akcipher, + const void *data, size_t data_len, + void *enc, size_t enc_len, + Error **errp) +{ + + QCryptoNettleRSA *rsa = (QCryptoNettleRSA *)akcipher; + mpz_t c; + int ret = -1; + + if (data_len > rsa->pub.size) { + error_setg(errp, "Plaintext length %zu is greater than key size: %zu", + data_len, rsa->pub.size); + return ret; + } + + if (enc_len < rsa->pub.size) { + error_setg(errp, "Ciphertext buffer length %zu is less than " + "key size: %zu", enc_len, rsa->pub.size); + return ret; + } + + /* Nettle do not support RSA encryption without any padding */ + switch (rsa->padding_alg) { + case QCRYPTO_RSA_PADDING_ALG_RAW: + error_setg(errp, "RSA with raw padding is not supported"); + break; + + case QCRYPTO_RSA_PADDING_ALG_PKCS1: + mpz_init(c); + if (rsa_encrypt(&rsa->pub, NULL, wrap_nettle_random_func, + data_len, (uint8_t *)data, c) != 1) { + error_setg(errp, "Failed to encrypt"); + } else { + nettle_mpz_get_str_256(enc_len, (uint8_t *)enc, c); + ret = nettle_mpz_sizeinbase_256_u(c); + } + mpz_clear(c); + break; + + default: + error_setg(errp, "Unknown padding"); + } + + return ret; +} + +static int qcrypto_nettle_rsa_decrypt(QCryptoAkCipher *akcipher, + const void *enc, size_t enc_len, + void *data, size_t data_len, + Error **errp) +{ + QCryptoNettleRSA *rsa = (QCryptoNettleRSA *)akcipher; + mpz_t c; + int ret = -1; + + if (enc_len > rsa->priv.size) { + error_setg(errp, "Ciphertext length %zu is greater than key size: %zu", + enc_len, rsa->priv.size); + return ret; + } + + switch (rsa->padding_alg) { + case QCRYPTO_RSA_PADDING_ALG_RAW: + error_setg(errp, "RSA with raw padding is not supported"); + break; + + case QCRYPTO_RSA_PADDING_ALG_PKCS1: + nettle_mpz_init_set_str_256_u(c, enc_len, enc); + if (!rsa_decrypt(&rsa->priv, &data_len, (uint8_t *)data, c)) { + error_setg(errp, "Failed to decrypt"); + } else { + ret = data_len; + } + + mpz_clear(c); + break; + + default: + error_setg(errp, "Unknown padding algorithm: %d", rsa->padding_alg); + } + + return ret; +} + +static int qcrypto_nettle_rsa_sign(QCryptoAkCipher *akcipher, + const void *data, size_t data_len, + void *sig, size_t sig_len, Error **errp) +{ + QCryptoNettleRSA *rsa = (QCryptoNettleRSA *)akcipher; + int ret = -1, rv; + mpz_t s; + + /** + * The RSA algorithm cannot be used for signature/verification + * without padding. + */ + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + error_setg(errp, "Try to make signature without padding"); + return ret; + } + + if (data_len > rsa->priv.size) { + error_setg(errp, "Data length %zu is greater than key size: %zu", + data_len, rsa->priv.size); + return ret; + } + + if (sig_len < rsa->priv.size) { + error_setg(errp, "Signature buffer length %zu is less than " + "key size: %zu", sig_len, rsa->priv.size); + return ret; + } + + mpz_init(s); + switch (rsa->hash_alg) { + case QCRYPTO_HASH_ALG_MD5: + rv = rsa_md5_sign_digest(&rsa->priv, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA1: + rv = rsa_sha1_sign_digest(&rsa->priv, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA256: + rv = rsa_sha256_sign_digest(&rsa->priv, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA512: + rv = rsa_sha512_sign_digest(&rsa->priv, data, s); + break; + + default: + error_setg(errp, "Unknown hash algorithm: %d", rsa->hash_alg); + goto cleanup; + } + + if (rv != 1) { + error_setg(errp, "Failed to make signature"); + goto cleanup; + } + nettle_mpz_get_str_256(sig_len, (uint8_t *)sig, s); + ret = nettle_mpz_sizeinbase_256_u(s); + +cleanup: + mpz_clear(s); + + return ret; +} + +static int qcrypto_nettle_rsa_verify(QCryptoAkCipher *akcipher, + const void *sig, size_t sig_len, + const void *data, size_t data_len, + Error **errp) +{ + QCryptoNettleRSA *rsa = (QCryptoNettleRSA *)akcipher; + + int ret = -1, rv; + mpz_t s; + + /** + * The RSA algorithm cannot be used for signature/verification + * without padding. + */ + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + error_setg(errp, "Try to verify signature without padding"); + return ret; + } + if (data_len > rsa->pub.size) { + error_setg(errp, "Data length %zu is greater than key size: %zu", + data_len, rsa->pub.size); + return ret; + } + if (sig_len < rsa->pub.size) { + error_setg(errp, "Signature length %zu is greater than key size: %zu", + sig_len, rsa->pub.size); + return ret; + } + + nettle_mpz_init_set_str_256_u(s, sig_len, sig); + switch (rsa->hash_alg) { + case QCRYPTO_HASH_ALG_MD5: + rv = rsa_md5_verify_digest(&rsa->pub, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA1: + rv = rsa_sha1_verify_digest(&rsa->pub, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA256: + rv = rsa_sha256_verify_digest(&rsa->pub, data, s); + break; + + case QCRYPTO_HASH_ALG_SHA512: + rv = rsa_sha512_verify_digest(&rsa->pub, data, s); + break; + + default: + error_setg(errp, "Unsupported hash algorithm: %d", rsa->hash_alg); + goto cleanup; + } + + if (rv != 1) { + error_setg(errp, "Failed to verify signature"); + goto cleanup; + } + ret = 0; + +cleanup: + mpz_clear(s); + + return ret; +} + +QCryptoAkCipherDriver nettle_rsa = { + .encrypt = qcrypto_nettle_rsa_encrypt, + .decrypt = qcrypto_nettle_rsa_decrypt, + .sign = qcrypto_nettle_rsa_sign, + .verify = qcrypto_nettle_rsa_verify, + .free = qcrypto_nettle_rsa_free, +}; + +static QCryptoAkCipher *qcrypto_nettle_rsa_new( + const QCryptoAkCipherOptionsRSA *opt, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp) +{ + QCryptoNettleRSA *rsa = g_new0(QCryptoNettleRSA, 1); + + rsa->padding_alg = opt->padding_alg; + rsa->hash_alg = opt->hash_alg; + rsa->akcipher.driver = &nettle_rsa; + rsa_public_key_init(&rsa->pub); + rsa_private_key_init(&rsa->priv); + + switch (type) { + case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + if (qcrypt_nettle_parse_rsa_private_key(rsa, key, keylen, errp) != 0) { + goto error; + } + break; + + case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + if (qcrypt_nettle_parse_rsa_public_key(rsa, key, keylen, errp) != 0) { + goto error; + } + break; + + default: + error_setg(errp, "Unknown akcipher key type %d", type); + goto error; + } + + return (QCryptoAkCipher *)rsa; + +error: + qcrypto_nettle_rsa_free((QCryptoAkCipher *)rsa); + return NULL; +} + + +bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts) +{ + switch (opts->alg) { + case QCRYPTO_AKCIPHER_ALG_RSA: + switch (opts->u.rsa.padding_alg) { + case QCRYPTO_RSA_PADDING_ALG_PKCS1: + switch (opts->u.rsa.hash_alg) { + case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALG_SHA512: + return true; + + default: + return false; + } + + case QCRYPTO_RSA_PADDING_ALG_RAW: + default: + return false; + } + break; + + default: + return false; + } +} diff --git a/crypto/akcipher.c b/crypto/akcipher.c new file mode 100644 index 0000000000..e4bbc6e5f1 --- /dev/null +++ b/crypto/akcipher.c @@ -0,0 +1,126 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: zhenwei pi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "crypto/akcipher.h" +#include "akcipherpriv.h" +#include "der.h" +#include "rsakey.h" + +#if defined(CONFIG_GCRYPT) +#include "akcipher-gcrypt.c.inc" +#elif defined(CONFIG_NETTLE) && defined(CONFIG_HOGWEED) +#include "akcipher-nettle.c.inc" +#else +QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, + Error **errp) +{ + QCryptoAkCipher *akcipher = NULL; + + return akcipher; +} + +bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts) +{ + return false; +} +#endif + +int qcrypto_akcipher_encrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp) +{ + const QCryptoAkCipherDriver *drv = akcipher->driver; + + return drv->encrypt(akcipher, in, in_len, out, out_len, errp); +} + +int qcrypto_akcipher_decrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp) +{ + const QCryptoAkCipherDriver *drv = akcipher->driver; + + return drv->decrypt(akcipher, in, in_len, out, out_len, errp); +} + +int qcrypto_akcipher_sign(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp) +{ + const QCryptoAkCipherDriver *drv = akcipher->driver; + + return drv->sign(akcipher, in, in_len, out, out_len, errp); +} + +int qcrypto_akcipher_verify(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + const void *in2, size_t in2_len, Error **errp) +{ + const QCryptoAkCipherDriver *drv = akcipher->driver; + + return drv->verify(akcipher, in, in_len, in2, in2_len, errp); +} + +int qcrypto_akcipher_max_plaintext_len(QCryptoAkCipher *akcipher) +{ + return akcipher->max_plaintext_len; +} + +int qcrypto_akcipher_max_ciphertext_len(QCryptoAkCipher *akcipher) +{ + return akcipher->max_ciphertext_len; +} + +int qcrypto_akcipher_max_signature_len(QCryptoAkCipher *akcipher) +{ + return akcipher->max_signature_len; +} + +int qcrypto_akcipher_max_dgst_len(QCryptoAkCipher *akcipher) +{ + return akcipher->max_dgst_len; +} + +void qcrypto_akcipher_free(QCryptoAkCipher *akcipher) +{ + const QCryptoAkCipherDriver *drv = akcipher->driver; + + drv->free(akcipher); +} + +int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts, + uint8_t *key, size_t keylen, + uint8_t **dst, size_t *dst_len, + Error **errp) +{ + switch (opts->alg) { + case QCRYPTO_AKCIPHER_ALG_RSA: + qcrypto_akcipher_rsakey_export_p8info(key, keylen, dst, dst_len); + return 0; + + default: + error_setg(errp, "Unsupported algorithm: %u", opts->alg); + return -1; + } +} diff --git a/crypto/akcipherpriv.h b/crypto/akcipherpriv.h new file mode 100644 index 0000000000..739f639bcf --- /dev/null +++ b/crypto/akcipherpriv.h @@ -0,0 +1,55 @@ +/* + * QEMU Crypto asymmetric algorithms + * + * Copyright (c) 2022 Bytedance + * Author: zhenwei pi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QCRYPTO_AKCIPHERPRIV_H +#define QCRYPTO_AKCIPHERPRIV_H + +#include "qapi/qapi-types-crypto.h" + +typedef struct QCryptoAkCipherDriver QCryptoAkCipherDriver; + +struct QCryptoAkCipher { + QCryptoAkCipherAlgorithm alg; + QCryptoAkCipherKeyType type; + int max_plaintext_len; + int max_ciphertext_len; + int max_signature_len; + int max_dgst_len; + QCryptoAkCipherDriver *driver; +}; + +struct QCryptoAkCipherDriver { + int (*encrypt)(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp); + int (*decrypt)(QCryptoAkCipher *akcipher, + const void *out, size_t out_len, + void *in, size_t in_len, Error **errp); + int (*sign)(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp); + int (*verify)(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + const void *in2, size_t in2_len, Error **errp); + void (*free)(QCryptoAkCipher *akcipher); +}; + +#endif /* QCRYPTO_AKCIPHER_H */ diff --git a/crypto/block-luks-priv.h b/crypto/block-luks-priv.h new file mode 100644 index 0000000000..90a20d432b --- /dev/null +++ b/crypto/block-luks-priv.h @@ -0,0 +1,143 @@ +/* + * QEMU Crypto block device encryption LUKS format + * + * Copyright (c) 2015-2016 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/bswap.h" + +#include "block-luks.h" + +#include "crypto/hash.h" +#include "crypto/afsplit.h" +#include "crypto/pbkdf.h" +#include "crypto/secret.h" +#include "crypto/random.h" +#include "qemu/uuid.h" + +#include "qemu/coroutine.h" +#include "qemu/bitmap.h" + +/* + * Reference for the LUKS format implemented here is + * + * docs/on-disk-format.pdf + * + * in 'cryptsetup' package source code + * + * This file implements the 1.2.1 specification, dated + * Oct 16, 2011. + */ + +typedef struct QCryptoBlockLUKSHeader QCryptoBlockLUKSHeader; +typedef struct QCryptoBlockLUKSKeySlot QCryptoBlockLUKSKeySlot; + + +/* The following constants are all defined by the LUKS spec */ +#define QCRYPTO_BLOCK_LUKS_VERSION 1 + +#define QCRYPTO_BLOCK_LUKS_MAGIC_LEN 6 +#define QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN 32 +#define QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN 32 +#define QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN 32 +#define QCRYPTO_BLOCK_LUKS_DIGEST_LEN 20 +#define QCRYPTO_BLOCK_LUKS_SALT_LEN 32 +#define QCRYPTO_BLOCK_LUKS_UUID_LEN 40 +#define QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS 8 +#define QCRYPTO_BLOCK_LUKS_STRIPES 4000 +#define QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS 1000 +#define QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS 1000 +#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET 4096 + +#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED 0x0000DEAD +#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED 0x00AC71F3 + +#define QCRYPTO_BLOCK_LUKS_SECTOR_SIZE 512LL + +#define QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS 2000 +#define QCRYPTO_BLOCK_LUKS_ERASE_ITERATIONS 40 + +static const char qcrypto_block_luks_magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN] = { + 'L', 'U', 'K', 'S', 0xBA, 0xBE +}; + +/* + * This struct is written to disk in big-endian format, + * but operated upon in native-endian format. + */ +struct QCryptoBlockLUKSKeySlot { + /* state of keyslot, enabled/disable */ + uint32_t active; + /* iterations for PBKDF2 */ + uint32_t iterations; + /* salt for PBKDF2 */ + uint8_t salt[QCRYPTO_BLOCK_LUKS_SALT_LEN]; + /* start sector of key material */ + uint32_t key_offset_sector; + /* number of anti-forensic stripes */ + uint32_t stripes; +}; + +/* + * This struct is written to disk in big-endian format, + * but operated upon in native-endian format. + */ +struct QCryptoBlockLUKSHeader { + /* 'L', 'U', 'K', 'S', '0xBA', '0xBE' */ + char magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN]; + + /* LUKS version, currently 1 */ + uint16_t version; + + /* cipher name specification (aes, etc) */ + char cipher_name[QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN]; + + /* cipher mode specification (cbc-plain, xts-essiv:sha256, etc) */ + char cipher_mode[QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN]; + + /* hash specification (sha256, etc) */ + char hash_spec[QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN]; + + /* start offset of the volume data (in 512 byte sectors) */ + uint32_t payload_offset_sector; + + /* Number of key bytes */ + uint32_t master_key_len; + + /* master key checksum after PBKDF2 */ + uint8_t master_key_digest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN]; + + /* salt for master key PBKDF2 */ + uint8_t master_key_salt[QCRYPTO_BLOCK_LUKS_SALT_LEN]; + + /* iterations for master key PBKDF2 */ + uint32_t master_key_iterations; + + /* UUID of the partition in standard ASCII representation */ + uint8_t uuid[QCRYPTO_BLOCK_LUKS_UUID_LEN]; + + /* key slots */ + QCryptoBlockLUKSKeySlot key_slots[QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS]; +}; + + +void +qcrypto_block_luks_to_disk_endian(QCryptoBlockLUKSHeader *hdr); +void +qcrypto_block_luks_from_disk_endian(QCryptoBlockLUKSHeader *hdr); diff --git a/crypto/block-luks.c b/crypto/block-luks.c index fe8f04ffb2..df2b4105d6 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -23,6 +23,7 @@ #include "qemu/bswap.h" #include "block-luks.h" +#include "block-luks-priv.h" #include "crypto/hash.h" #include "crypto/afsplit.h" @@ -46,37 +47,6 @@ */ typedef struct QCryptoBlockLUKS QCryptoBlockLUKS; -typedef struct QCryptoBlockLUKSHeader QCryptoBlockLUKSHeader; -typedef struct QCryptoBlockLUKSKeySlot QCryptoBlockLUKSKeySlot; - - -/* The following constants are all defined by the LUKS spec */ -#define QCRYPTO_BLOCK_LUKS_VERSION 1 - -#define QCRYPTO_BLOCK_LUKS_MAGIC_LEN 6 -#define QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN 32 -#define QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN 32 -#define QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN 32 -#define QCRYPTO_BLOCK_LUKS_DIGEST_LEN 20 -#define QCRYPTO_BLOCK_LUKS_SALT_LEN 32 -#define QCRYPTO_BLOCK_LUKS_UUID_LEN 40 -#define QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS 8 -#define QCRYPTO_BLOCK_LUKS_STRIPES 4000 -#define QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS 1000 -#define QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS 1000 -#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET 4096 - -#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED 0x0000DEAD -#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED 0x00AC71F3 - -#define QCRYPTO_BLOCK_LUKS_SECTOR_SIZE 512LL - -#define QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS 2000 -#define QCRYPTO_BLOCK_LUKS_ERASE_ITERATIONS 40 - -static const char qcrypto_block_luks_magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN] = { - 'L', 'U', 'K', 'S', 0xBA, 0xBE -}; typedef struct QCryptoBlockLUKSNameMap QCryptoBlockLUKSNameMap; struct QCryptoBlockLUKSNameMap { @@ -134,69 +104,7 @@ qcrypto_block_luks_cipher_name_map[] = { { "twofish", qcrypto_block_luks_cipher_size_map_twofish }, }; - -/* - * This struct is written to disk in big-endian format, - * but operated upon in native-endian format. - */ -struct QCryptoBlockLUKSKeySlot { - /* state of keyslot, enabled/disable */ - uint32_t active; - /* iterations for PBKDF2 */ - uint32_t iterations; - /* salt for PBKDF2 */ - uint8_t salt[QCRYPTO_BLOCK_LUKS_SALT_LEN]; - /* start sector of key material */ - uint32_t key_offset_sector; - /* number of anti-forensic stripes */ - uint32_t stripes; -}; - QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSKeySlot) != 48); - - -/* - * This struct is written to disk in big-endian format, - * but operated upon in native-endian format. - */ -struct QCryptoBlockLUKSHeader { - /* 'L', 'U', 'K', 'S', '0xBA', '0xBE' */ - char magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN]; - - /* LUKS version, currently 1 */ - uint16_t version; - - /* cipher name specification (aes, etc) */ - char cipher_name[QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN]; - - /* cipher mode specification (cbc-plain, xts-essiv:sha256, etc) */ - char cipher_mode[QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN]; - - /* hash specification (sha256, etc) */ - char hash_spec[QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN]; - - /* start offset of the volume data (in 512 byte sectors) */ - uint32_t payload_offset_sector; - - /* Number of key bytes */ - uint32_t master_key_len; - - /* master key checksum after PBKDF2 */ - uint8_t master_key_digest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN]; - - /* salt for master key PBKDF2 */ - uint8_t master_key_salt[QCRYPTO_BLOCK_LUKS_SALT_LEN]; - - /* iterations for master key PBKDF2 */ - uint32_t master_key_iterations; - - /* UUID of the partition in standard ASCII representation */ - uint8_t uuid[QCRYPTO_BLOCK_LUKS_UUID_LEN]; - - /* key slots */ - QCryptoBlockLUKSKeySlot key_slots[QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS]; -}; - QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSHeader) != 592); @@ -254,7 +162,7 @@ static int qcrypto_block_luks_cipher_name_lookup(const char *name, } } - error_setg(errp, "Algorithm %s with key size %d bytes not supported", + error_setg(errp, "Algorithm '%s' with key size %d bytes not supported", name, key_bytes); return 0; } @@ -290,7 +198,7 @@ static int qcrypto_block_luks_name_lookup(const char *name, int ret = qapi_enum_parse(map, name, -1, NULL); if (ret < 0) { - error_setg(errp, "%s %s not supported", type, name); + error_setg(errp, "%s '%s' not supported", type, name); return 0; } return ret; @@ -440,6 +348,51 @@ qcrypto_block_luks_splitkeylen_sectors(const QCryptoBlockLUKS *luks, return ROUND_UP(splitkeylen_sectors, header_sectors); } + +void +qcrypto_block_luks_to_disk_endian(QCryptoBlockLUKSHeader *hdr) +{ + size_t i; + + /* + * Everything on disk uses Big Endian (tm), so flip header fields + * before writing them + */ + cpu_to_be16s(&hdr->version); + cpu_to_be32s(&hdr->payload_offset_sector); + cpu_to_be32s(&hdr->master_key_len); + cpu_to_be32s(&hdr->master_key_iterations); + + for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { + cpu_to_be32s(&hdr->key_slots[i].active); + cpu_to_be32s(&hdr->key_slots[i].iterations); + cpu_to_be32s(&hdr->key_slots[i].key_offset_sector); + cpu_to_be32s(&hdr->key_slots[i].stripes); + } +} + +void +qcrypto_block_luks_from_disk_endian(QCryptoBlockLUKSHeader *hdr) +{ + size_t i; + + /* + * The header is always stored in big-endian format, so + * convert everything to native + */ + be16_to_cpus(&hdr->version); + be32_to_cpus(&hdr->payload_offset_sector); + be32_to_cpus(&hdr->master_key_len); + be32_to_cpus(&hdr->master_key_iterations); + + for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { + be32_to_cpus(&hdr->key_slots[i].active); + be32_to_cpus(&hdr->key_slots[i].iterations); + be32_to_cpus(&hdr->key_slots[i].key_offset_sector); + be32_to_cpus(&hdr->key_slots[i].stripes); + } +} + /* * Stores the main LUKS header, taking care of endianess */ @@ -451,28 +404,13 @@ qcrypto_block_luks_store_header(QCryptoBlock *block, { const QCryptoBlockLUKS *luks = block->opaque; Error *local_err = NULL; - size_t i; g_autofree QCryptoBlockLUKSHeader *hdr_copy = NULL; /* Create a copy of the header */ hdr_copy = g_new0(QCryptoBlockLUKSHeader, 1); memcpy(hdr_copy, &luks->header, sizeof(QCryptoBlockLUKSHeader)); - /* - * Everything on disk uses Big Endian (tm), so flip header fields - * before writing them - */ - cpu_to_be16s(&hdr_copy->version); - cpu_to_be32s(&hdr_copy->payload_offset_sector); - cpu_to_be32s(&hdr_copy->master_key_len); - cpu_to_be32s(&hdr_copy->master_key_iterations); - - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - cpu_to_be32s(&hdr_copy->key_slots[i].active); - cpu_to_be32s(&hdr_copy->key_slots[i].iterations); - cpu_to_be32s(&hdr_copy->key_slots[i].key_offset_sector); - cpu_to_be32s(&hdr_copy->key_slots[i].stripes); - } + qcrypto_block_luks_to_disk_endian(hdr_copy); /* Write out the partition header and key slot headers */ writefunc(block, 0, (const uint8_t *)hdr_copy, sizeof(*hdr_copy), @@ -495,8 +433,7 @@ qcrypto_block_luks_load_header(QCryptoBlock *block, void *opaque, Error **errp) { - ssize_t rv; - size_t i; + int rv; QCryptoBlockLUKS *luks = block->opaque; /* @@ -512,21 +449,7 @@ qcrypto_block_luks_load_header(QCryptoBlock *block, return rv; } - /* - * The header is always stored in big-endian format, so - * convert everything to native - */ - be16_to_cpus(&luks->header.version); - be32_to_cpus(&luks->header.payload_offset_sector); - be32_to_cpus(&luks->header.master_key_len); - be32_to_cpus(&luks->header.master_key_iterations); - - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - be32_to_cpus(&luks->header.key_slots[i].active); - be32_to_cpus(&luks->header.key_slots[i].iterations); - be32_to_cpus(&luks->header.key_slots[i].key_offset_sector); - be32_to_cpus(&luks->header.key_slots[i].stripes); - } + qcrypto_block_luks_from_disk_endian(&luks->header); return 0; } @@ -554,6 +477,36 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) return -1; } + if (!memchr(luks->header.cipher_name, '\0', + sizeof(luks->header.cipher_name))) { + error_setg(errp, "LUKS header cipher name is not NUL terminated"); + return -1; + } + + if (!memchr(luks->header.cipher_mode, '\0', + sizeof(luks->header.cipher_mode))) { + error_setg(errp, "LUKS header cipher mode is not NUL terminated"); + return -1; + } + + if (!memchr(luks->header.hash_spec, '\0', + sizeof(luks->header.hash_spec))) { + error_setg(errp, "LUKS header hash spec is not NUL terminated"); + return -1; + } + + if (luks->header.payload_offset_sector < + DIV_ROUND_UP(QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET, + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) { + error_setg(errp, "LUKS payload is overlapping with the header"); + return -1; + } + + if (luks->header.master_key_iterations == 0) { + error_setg(errp, "LUKS key iteration count is zero"); + return -1; + } + /* Check all keyslots for corruption */ for (i = 0 ; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS ; i++) { @@ -564,8 +517,9 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) header_sectors, slot1->stripes); - if (slot1->stripes == 0) { - error_setg(errp, "Keyslot %zu is corrupted (stripes == 0)", i); + if (slot1->stripes != QCRYPTO_BLOCK_LUKS_STRIPES) { + error_setg(errp, "Keyslot %zu is corrupted (stripes %d != %d)", + i, slot1->stripes, QCRYPTO_BLOCK_LUKS_STRIPES); return -1; } @@ -576,6 +530,20 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) return -1; } + if (slot1->active == QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED && + slot1->iterations == 0) { + error_setg(errp, "Keyslot %zu iteration count is zero", i); + return -1; + } + + if (start1 < DIV_ROUND_UP(QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET, + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) { + error_setg(errp, + "Keyslot %zu is overlapping with the LUKS header", + i); + return -1; + } + if (start1 + len1 > luks->header.payload_offset_sector) { error_setg(errp, "Keyslot %zu is overlapping with the encrypted payload", @@ -624,7 +592,7 @@ qcrypto_block_luks_parse_header(QCryptoBlockLUKS *luks, Error **errp) */ ivgen_name = strchr(cipher_mode, '-'); if (!ivgen_name) { - error_setg(errp, "Unexpected cipher mode string format %s", + error_setg(errp, "Unexpected cipher mode string format '%s'", luks->header.cipher_mode); return -1; } @@ -856,7 +824,7 @@ qcrypto_block_luks_store_key(QCryptoBlock *block, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, splitkey, splitkeylen, opaque, - errp) != splitkeylen) { + errp) < 0) { goto cleanup; } @@ -903,7 +871,7 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, g_autofree uint8_t *splitkey = NULL; size_t splitkeylen; g_autofree uint8_t *possiblekey = NULL; - ssize_t rv; + int rv; g_autoptr(QCryptoCipher) cipher = NULL; uint8_t keydigest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN]; g_autoptr(QCryptoIVGen) ivgen = NULL; @@ -1193,7 +1161,7 @@ qcrypto_block_luks_erase_key(QCryptoBlock *block, garbagesplitkey, splitkeylen, opaque, - &local_err) != splitkeylen) { + &local_err) < 0) { error_propagate(errp, local_err); return -1; } diff --git a/crypto/block.c b/crypto/block.c index eb057948b5..7bb4b74a37 100644 --- a/crypto/block.c +++ b/crypto/block.c @@ -115,7 +115,7 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, } -static ssize_t qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block, +static int qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque, Error **errp) { size_t *headerlenp = opaque; @@ -126,12 +126,12 @@ static ssize_t qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block, } -static ssize_t qcrypto_block_headerlen_hdr_write_func(QCryptoBlock *block, +static int qcrypto_block_headerlen_hdr_write_func(QCryptoBlock *block, size_t offset, const uint8_t *buf, size_t buflen, void *opaque, Error **errp) { /* Discard the bytes, we're not actually writing to an image */ - return buflen; + return 0; } diff --git a/crypto/cipher-afalg.c b/crypto/cipher-afalg.c index 052355a8a9..3df8fc54c0 100644 --- a/crypto/cipher-afalg.c +++ b/crypto/cipher-afalg.c @@ -12,7 +12,6 @@ */ #include "qemu/osdep.h" #include "qemu/sockets.h" -#include "qemu-common.h" #include "qapi/error.h" #include "crypto/cipher.h" #include "cipherpriv.h" @@ -84,8 +83,8 @@ qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgorithm alg, g_free(name); /* setkey */ - if (qemu_setsockopt(afalg->tfmfd, SOL_ALG, ALG_SET_KEY, key, - nkey) != 0) { + if (setsockopt(afalg->tfmfd, SOL_ALG, ALG_SET_KEY, key, + nkey) != 0) { error_setg_errno(errp, errno, "Set key failed"); qcrypto_afalg_comm_free(afalg); return NULL; diff --git a/crypto/der.c b/crypto/der.c new file mode 100644 index 0000000000..dab3fe4f24 --- /dev/null +++ b/crypto/der.c @@ -0,0 +1,452 @@ +/* + * QEMU Crypto ASN.1 DER decoder + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "crypto/der.h" + +typedef struct QCryptoDerEncodeNode { + uint8_t tag; + struct QCryptoDerEncodeNode *parent; + struct QCryptoDerEncodeNode *next; + /* for constructed type, data is null */ + const uint8_t *data; + size_t dlen; +} QCryptoDerEncodeNode; + +typedef struct QCryptoEncodeContext { + QCryptoDerEncodeNode root; + QCryptoDerEncodeNode *current_parent; + QCryptoDerEncodeNode *tail; +} QCryptoEncodeContext; + +enum QCryptoDERTypeTag { + QCRYPTO_DER_TYPE_TAG_BOOL = 0x1, + QCRYPTO_DER_TYPE_TAG_INT = 0x2, + QCRYPTO_DER_TYPE_TAG_BIT_STR = 0x3, + QCRYPTO_DER_TYPE_TAG_OCT_STR = 0x4, + QCRYPTO_DER_TYPE_TAG_NULL = 0x5, + QCRYPTO_DER_TYPE_TAG_OID = 0x6, + QCRYPTO_DER_TYPE_TAG_SEQ = 0x10, + QCRYPTO_DER_TYPE_TAG_SET = 0x11, +}; + +enum QCryptoDERTagClass { + QCRYPTO_DER_TAG_CLASS_UNIV = 0x0, + QCRYPTO_DER_TAG_CLASS_APPL = 0x1, + QCRYPTO_DER_TAG_CLASS_CONT = 0x2, + QCRYPTO_DER_TAG_CLASS_PRIV = 0x3, +}; + +enum QCryptoDERTagEnc { + QCRYPTO_DER_TAG_ENC_PRIM = 0x0, + QCRYPTO_DER_TAG_ENC_CONS = 0x1, +}; + +#define QCRYPTO_DER_TAG_ENC_MASK 0x20 +#define QCRYPTO_DER_TAG_ENC_SHIFT 5 + +#define QCRYPTO_DER_TAG_CLASS_MASK 0xc0 +#define QCRYPTO_DER_TAG_CLASS_SHIFT 6 + +#define QCRYPTO_DER_TAG_VAL_MASK 0x1f +#define QCRYPTO_DER_SHORT_LEN_MASK 0x80 + +#define QCRYPTO_DER_TAG(class, enc, val) \ + (((class) << QCRYPTO_DER_TAG_CLASS_SHIFT) | \ + ((enc) << QCRYPTO_DER_TAG_ENC_SHIFT) | (val)) + +/** + * qcrypto_der_encode_length: + * @src_len: the length of source data + * @dst: distination to save the encoded 'length', if dst is NULL, only compute + * the expected buffer size in bytes. + * @dst_len: output parameter, indicates how many bytes wrote. + * + * Encode the 'length' part of TLV tuple. + */ +static void qcrypto_der_encode_length(size_t src_len, + uint8_t *dst, size_t *dst_len) +{ + size_t max_length = 0xFF; + uint8_t length_bytes = 0, header_byte; + + if (src_len < QCRYPTO_DER_SHORT_LEN_MASK) { + header_byte = src_len; + *dst_len = 1; + } else { + for (length_bytes = 1; max_length < src_len; length_bytes++) { + max_length = (max_length << 8) + max_length; + } + header_byte = length_bytes; + header_byte |= QCRYPTO_DER_SHORT_LEN_MASK; + *dst_len = length_bytes + 1; + } + if (!dst) { + return; + } + *dst++ = header_byte; + /* Bigendian length bytes */ + for (; length_bytes > 0; length_bytes--) { + *dst++ = ((src_len >> (length_bytes - 1) * 8) & 0xFF); + } +} + +static uint8_t qcrypto_der_peek_byte(const uint8_t **data, size_t *dlen) +{ + return **data; +} + +static void qcrypto_der_cut_nbytes(const uint8_t **data, + size_t *dlen, + size_t nbytes) +{ + *data += nbytes; + *dlen -= nbytes; +} + +static uint8_t qcrypto_der_cut_byte(const uint8_t **data, size_t *dlen) +{ + uint8_t val = qcrypto_der_peek_byte(data, dlen); + + qcrypto_der_cut_nbytes(data, dlen, 1); + + return val; +} + +static int qcrypto_der_invoke_callback(QCryptoDERDecodeCb cb, void *ctx, + const uint8_t *value, size_t vlen, + Error **errp) +{ + if (!cb) { + return 0; + } + + return cb(ctx, value, vlen, errp); +} + +static int qcrypto_der_extract_definite_data(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, + Error **errp) +{ + const uint8_t *value; + size_t vlen = 0; + uint8_t byte_count = qcrypto_der_cut_byte(data, dlen); + + /* short format of definite-length */ + if (!(byte_count & QCRYPTO_DER_SHORT_LEN_MASK)) { + if (byte_count > *dlen) { + error_setg(errp, "Invalid content length: %u", byte_count); + return -1; + } + + value = *data; + vlen = byte_count; + qcrypto_der_cut_nbytes(data, dlen, vlen); + + if (qcrypto_der_invoke_callback(cb, ctx, value, vlen, errp) != 0) { + return -1; + } + return vlen; + } + + /* Ignore highest bit */ + byte_count &= ~QCRYPTO_DER_SHORT_LEN_MASK; + + /* + * size_t is enough to store the value of length, although the DER + * encoding standard supports larger length. + */ + if (byte_count > sizeof(size_t)) { + error_setg(errp, "Invalid byte count of content length: %u", + byte_count); + return -1; + } + + if (byte_count > *dlen) { + error_setg(errp, "Invalid content length: %u", byte_count); + return -1; + } + while (byte_count--) { + vlen <<= 8; + vlen += qcrypto_der_cut_byte(data, dlen); + } + + if (vlen > *dlen) { + error_setg(errp, "Invalid content length: %zu", vlen); + return -1; + } + + value = *data; + qcrypto_der_cut_nbytes(data, dlen, vlen); + + if (qcrypto_der_invoke_callback(cb, ctx, value, vlen, errp) != 0) { + return -1; + } + return vlen; +} + +static int qcrypto_der_extract_data(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, + Error **errp) +{ + uint8_t val; + if (*dlen < 1) { + error_setg(errp, "Need more data"); + return -1; + } + val = qcrypto_der_peek_byte(data, dlen); + + /* must use definite length format */ + if (val == QCRYPTO_DER_SHORT_LEN_MASK) { + error_setg(errp, "Only definite length format is allowed"); + return -1; + } + + return qcrypto_der_extract_definite_data(data, dlen, cb, ctx, errp); +} + +static int qcrypto_der_decode_tlv(const uint8_t expected_tag, + const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, + void *ctx, Error **errp) +{ + const uint8_t *saved_data = *data; + size_t saved_dlen = *dlen; + uint8_t tag; + int data_length; + + if (*dlen < 1) { + error_setg(errp, "Need more data"); + return -1; + } + tag = qcrypto_der_cut_byte(data, dlen); + if (tag != expected_tag) { + error_setg(errp, "Unexpected tag: expected: %u, actual: %u", + expected_tag, tag); + goto error; + } + + data_length = qcrypto_der_extract_data(data, dlen, cb, ctx, errp); + if (data_length < 0) { + goto error; + } + return data_length; + +error: + *data = saved_data; + *dlen = saved_dlen; + return -1; +} + +int qcrypto_der_decode_int(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + const uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_INT); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +int qcrypto_der_decode_seq(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_CONS, + QCRYPTO_DER_TYPE_TAG_SEQ); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +int qcrypto_der_decode_octet_str(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_OCT_STR); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +int qcrypto_der_decode_bit_str(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_BIT_STR); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +int qcrypto_der_decode_oid(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_OID); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +int qcrypto_der_decode_ctx_tag(const uint8_t **data, size_t *dlen, int tag_id, + QCryptoDERDecodeCb cb, void *ctx, Error **errp) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_CONT, + QCRYPTO_DER_TAG_ENC_CONS, + tag_id); + return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp); +} + +static void qcrypto_der_encode_prim(QCryptoEncodeContext *ctx, uint8_t tag, + const uint8_t *data, size_t dlen) +{ + QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1); + size_t nbytes_len; + + node->tag = tag; + node->data = data; + node->dlen = dlen; + node->parent = ctx->current_parent; + + qcrypto_der_encode_length(dlen, NULL, &nbytes_len); + /* 1 byte for Tag, nbyte_len for Length, and dlen for Value */ + node->parent->dlen += 1 + nbytes_len + dlen; + + ctx->tail->next = node; + ctx->tail = node; +} + +QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void) +{ + QCryptoEncodeContext *ctx = g_new0(QCryptoEncodeContext, 1); + ctx->current_parent = &ctx->root; + ctx->tail = &ctx->root; + return ctx; +} + +static void qcrypto_der_encode_cons_begin(QCryptoEncodeContext *ctx, + uint8_t tag) +{ + QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1); + + node->tag = tag; + node->parent = ctx->current_parent; + ctx->current_parent = node; + ctx->tail->next = node; + ctx->tail = node; +} + +static void qcrypto_der_encode_cons_end(QCryptoEncodeContext *ctx) +{ + QCryptoDerEncodeNode *cons_node = ctx->current_parent; + size_t nbytes_len; + + qcrypto_der_encode_length(cons_node->dlen, NULL, &nbytes_len); + /* 1 byte for Tag, nbyte_len for Length, and dlen for Value */ + cons_node->parent->dlen += 1 + nbytes_len + cons_node->dlen; + ctx->current_parent = cons_node->parent; +} + +void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_CONS, + QCRYPTO_DER_TYPE_TAG_SEQ); + qcrypto_der_encode_cons_begin(ctx, tag); +} + +void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx) +{ + qcrypto_der_encode_cons_end(ctx); +} + +void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_OID); + qcrypto_der_encode_prim(ctx, tag, src, src_len); +} + +void qcrypto_der_encode_int(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_INT); + qcrypto_der_encode_prim(ctx, tag, src, src_len); +} + +void qcrypto_der_encode_null(QCryptoEncodeContext *ctx) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_NULL); + qcrypto_der_encode_prim(ctx, tag, NULL, 0); +} + +void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_OCT_STR); + qcrypto_der_encode_prim(ctx, tag, src, src_len); +} + +void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx) +{ + uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, + QCRYPTO_DER_TAG_ENC_PRIM, + QCRYPTO_DER_TYPE_TAG_OCT_STR); + qcrypto_der_encode_cons_begin(ctx, tag); +} + +void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx) +{ + qcrypto_der_encode_cons_end(ctx); +} + +size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx) +{ + return ctx->root.dlen; +} + +void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx, + uint8_t *dst) +{ + QCryptoDerEncodeNode *node, *prev; + size_t len; + + for (prev = &ctx->root; + (node = prev->next) && (prev->next = node->next, 1);) { + /* Tag */ + *dst++ = node->tag; + + /* Length */ + qcrypto_der_encode_length(node->dlen, dst, &len); + dst += len; + + /* Value */ + if (node->data) { + memcpy(dst, node->data, node->dlen); + dst += node->dlen; + } + g_free(node); + } + g_free(ctx); +} diff --git a/crypto/der.h b/crypto/der.h new file mode 100644 index 0000000000..0e895bbeec --- /dev/null +++ b/crypto/der.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QCRYPTO_ASN1_DECODER_H +#define QCRYPTO_ASN1_DECODER_H + +#include "qapi/error.h" + +typedef struct QCryptoEncodeContext QCryptoEncodeContext; + +/* rsaEncryption: 1.2.840.113549.1.1.1 */ +#define QCRYPTO_OID_rsaEncryption "\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01" + +/* Simple decoder used to parse DER encoded rsa keys. */ + +/** + * @opaque: user context. + * @value: the starting address of |value| part of 'Tag-Length-Value' pattern. + * @vlen: length of the |value|. + * Returns: 0 for success, any other value is considered an error. + */ +typedef int (*QCryptoDERDecodeCb) (void *opaque, const uint8_t *value, + size_t vlen, Error **errp); + +/** + * qcrypto_der_decode_int: + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Decode integer from DER-encoded data. + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded INTEGER will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_int(const uint8_t **data, + size_t *dlen, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); +/** + * qcrypto_der_decode_seq: + * + * Decode sequence from DER-encoded data, similar with der_decode_int. + * + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded SEQUENCE will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_seq(const uint8_t **data, + size_t *dlen, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); + +/** + * qcrypto_der_decode_oid: + * + * Decode OID from DER-encoded data, similar with der_decode_int. + * + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded OID will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_oid(const uint8_t **data, + size_t *dlen, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); + +/** + * qcrypto_der_decode_octet_str: + * + * Decode OCTET STRING from DER-encoded data, similar with der_decode_int. + * + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded OCTET STRING will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_octet_str(const uint8_t **data, + size_t *dlen, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); + +/** + * qcrypto_der_decode_bit_str: + * + * Decode BIT STRING from DER-encoded data, similar with der_decode_int. + * + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded BIT STRING will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_bit_str(const uint8_t **data, + size_t *dlen, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); + + +/** + * qcrypto_der_decode_ctx_tag: + * + * Decode context specific tag + * + * @data: pointer to address of input data + * @dlen: pointer to length of input data + * @tag: expected value of context specific tag + * @cb: callback invoked when decode succeed, if cb equals NULL, no + * callback will be invoked + * @opaque: parameter passed to cb + * + * Returns: On success, *data points to rest data, and *dlen + * will be set to the rest length of data, if cb is not NULL, must + * return 0 to make decode success, at last, the length of the data + * part of the decoded BIT STRING will be returned. Otherwise, -1 is + * returned and the valued of *data and *dlen keep unchanged. + */ +int qcrypto_der_decode_ctx_tag(const uint8_t **data, + size_t *dlen, int tag_id, + QCryptoDERDecodeCb cb, + void *opaque, + Error **errp); + +/** + * qcrypto_der_encode_ctx_new: + * + * Allocate a context used for der encoding. + */ +QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void); + +/** + * qcrypto_der_encode_seq_begin: + * @ctx: the encode context. + * + * Start encoding a SEQUENCE for ctx. + * + */ +void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx); + +/** + * qcrypto_der_encode_seq_begin: + * @ctx: the encode context. + * + * Finish uencoding a SEQUENCE for ctx. + * + */ +void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx); + + +/** + * qcrypto_der_encode_oid: + * @ctx: the encode context. + * @src: the source data of oid, note it should be already encoded, this + * function only add tag and length part for it. + * + * Encode an oid into ctx. + */ +void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len); + +/** + * qcrypto_der_encode_int: + * @ctx: the encode context. + * @src: the source data of integer, note it should be already encoded, this + * function only add tag and length part for it. + * + * Encode an integer into ctx. + */ +void qcrypto_der_encode_int(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len); + +/** + * qcrypto_der_encode_null: + * @ctx: the encode context. + * + * Encode a null into ctx. + */ +void qcrypto_der_encode_null(QCryptoEncodeContext *ctx); + +/** + * qcrypto_der_encode_octet_str: + * @ctx: the encode context. + * @src: the source data of the octet string. + * + * Encode a octet string into ctx. + */ +void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx, + const uint8_t *src, size_t src_len); + +/** + * qcrypto_der_encode_octet_str_begin: + * @ctx: the encode context. + * + * Start encoding a octet string, All fields between + * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end + * are encoded as an octet string. This is useful when we need to encode a + * encoded SEQUNCE as OCTET STRING. + */ +void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx); + +/** + * qcrypto_der_encode_octet_str_end: + * @ctx: the encode context. + * + * Finish encoding a octet string, All fields between + * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end + * are encoded as an octet string. This is useful when we need to encode a + * encoded SEQUNCE as OCTET STRING. + */ +void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx); + +/** + * qcrypto_der_encode_ctx_buffer_len: + * @ctx: the encode context. + * + * Compute the expected buffer size to save all encoded things. + */ +size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx); + +/** + * qcrypto_der_encode_ctx_flush_and_free: + * @ctx: the encode context. + * @dst: the distination to save the encoded data, the length of dst should + * not less than qcrypto_der_encode_cxt_buffer_len + * + * Flush all encoded data into dst, then free ctx. + */ +void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx, + uint8_t *dst); + +#endif /* QCRYPTO_ASN1_DECODER_H */ diff --git a/crypto/hash-afalg.c b/crypto/hash-afalg.c index cf34c694af..3ebea39292 100644 --- a/crypto/hash-afalg.c +++ b/crypto/hash-afalg.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "qemu/iov.h" #include "qemu/sockets.h" -#include "qemu-common.h" #include "qapi/error.h" #include "crypto/hash.h" #include "crypto/hmac.h" @@ -88,8 +87,8 @@ qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgorithm alg, /* HMAC needs setkey */ if (is_hmac) { - if (qemu_setsockopt(afalg->tfmfd, SOL_ALG, ALG_SET_KEY, - key, nkey) != 0) { + if (setsockopt(afalg->tfmfd, SOL_ALG, ALG_SET_KEY, + key, nkey) != 0) { error_setg_errno(errp, errno, "Set hmac key failed"); qcrypto_afalg_comm_free(afalg); return NULL; diff --git a/crypto/ivgen-plain.h b/crypto/ivgen-plain.h index 43db898809..ebae6acd04 100644 --- a/crypto/ivgen-plain.h +++ b/crypto/ivgen-plain.h @@ -17,11 +17,11 @@ * License along with this library; if not, see . */ -#ifndef QCRYPTO_IVGEN_PLAIN_H__ -#define QCRYPTO_IVGEN_PLAIN_H__ +#ifndef QCRYPTO_IVGEN_PLAIN_H +#define QCRYPTO_IVGEN_PLAIN_H #include "ivgenpriv.h" extern struct QCryptoIVGenDriver qcrypto_ivgen_plain; -#endif /* QCRYPTO_IVGEN_PLAIN_H__ */ +#endif /* QCRYPTO_IVGEN_PLAIN_H */ diff --git a/crypto/meson.build b/crypto/meson.build index 95a6a83504..5f03a30d34 100644 --- a/crypto/meson.build +++ b/crypto/meson.build @@ -1,10 +1,12 @@ crypto_ss.add(genh) crypto_ss.add(files( 'afsplit.c', + 'akcipher.c', 'block-luks.c', 'block-qcow.c', 'block.c', 'cipher.c', + 'der.c', 'hash.c', 'hmac.c', 'ivgen-essiv.c', @@ -19,10 +21,14 @@ crypto_ss.add(files( 'tlscredspsk.c', 'tlscredsx509.c', 'tlssession.c', + 'rsakey.c', )) if nettle.found() crypto_ss.add(nettle, files('hash-nettle.c', 'hmac-nettle.c', 'pbkdf-nettle.c')) + if hogweed.found() + crypto_ss.add(gmp, hogweed) + endif if xts == 'private' crypto_ss.add(files('xts.c')) endif @@ -34,10 +40,15 @@ else crypto_ss.add(files('hash-glib.c', 'hmac-glib.c', 'pbkdf-stub.c')) endif -crypto_ss.add(when: 'CONFIG_SECRET_KEYRING', if_true: files('secret_keyring.c')) -crypto_ss.add(when: 'CONFIG_AF_ALG', if_true: files('afalg.c', 'cipher-afalg.c', 'hash-afalg.c')) +if have_keyring + crypto_ss.add(files('secret_keyring.c')) +endif +if have_afalg + crypto_ss.add(if_true: files('afalg.c', 'cipher-afalg.c', 'hash-afalg.c')) +endif crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c')) +util_ss.add(files('sm4.c')) util_ss.add(files('aes.c')) util_ss.add(files('init.c')) if gnutls.found() @@ -48,7 +59,7 @@ if gcrypt.found() util_ss.add(gcrypt, files('random-gcrypt.c')) elif gnutls.found() util_ss.add(gnutls, files('random-gnutls.c')) -elif 'CONFIG_RNG_NONE' in config_host +elif get_option('rng_none') util_ss.add(files('random-none.c')) else util_ss.add(files('random-platform.c')) diff --git a/crypto/pbkdf.c b/crypto/pbkdf.c index 3775ddc6c5..8d198c152c 100644 --- a/crypto/pbkdf.c +++ b/crypto/pbkdf.c @@ -24,6 +24,11 @@ #ifndef _WIN32 #include #endif +#ifdef CONFIG_DARWIN +#include +#include +#include +#endif static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms, @@ -45,6 +50,24 @@ static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms, /* QuadPart is units of 100ns and we want ms as unit */ *val_ms = thread_time.QuadPart / 10000ll; return 0; +#elif defined(CONFIG_DARWIN) + mach_port_t thread; + kern_return_t kr; + mach_msg_type_number_t count; + thread_basic_info_data_t info; + + thread = mach_thread_self(); + count = THREAD_BASIC_INFO_COUNT; + kr = thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count); + mach_port_deallocate(mach_task_self(), thread); + if (kr != KERN_SUCCESS || (info.flags & TH_FLAGS_IDLE) != 0) { + error_setg_errno(errp, errno, "Unable to get thread CPU usage"); + return -1; + } + + *val_ms = ((info.user_time.seconds * 1000ll) + + (info.user_time.microseconds / 1000)); + return 0; #elif defined(RUSAGE_THREAD) struct rusage ru; if (getrusage(RUSAGE_THREAD, &ru) < 0) { diff --git a/crypto/rsakey-builtin.c.inc b/crypto/rsakey-builtin.c.inc new file mode 100644 index 0000000000..aeeacc8f9b --- /dev/null +++ b/crypto/rsakey-builtin.c.inc @@ -0,0 +1,200 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "der.h" +#include "rsakey.h" + +static int extract_mpi(void *ctx, const uint8_t *value, + size_t vlen, Error **errp) +{ + QCryptoAkCipherMPI *mpi = (QCryptoAkCipherMPI *)ctx; + if (vlen == 0) { + error_setg(errp, "Empty mpi field"); + return -1; + } + mpi->data = g_memdup2(value, vlen); + mpi->len = vlen; + return 0; +} + +static int extract_version(void *ctx, const uint8_t *value, + size_t vlen, Error **errp) +{ + uint8_t *version = (uint8_t *)ctx; + if (vlen != 1 || *value > 1) { + error_setg(errp, "Invalid rsakey version"); + return -1; + } + *version = *value; + return 0; +} + +static int extract_seq_content(void *ctx, const uint8_t *value, + size_t vlen, Error **errp) +{ + const uint8_t **content = (const uint8_t **)ctx; + if (vlen == 0) { + error_setg(errp, "Empty sequence"); + return -1; + } + *content = value; + return 0; +} + +/** + * + * RsaPubKey ::= SEQUENCE { + * n INTEGER + * e INTEGER + * } + */ +static QCryptoAkCipherRSAKey *qcrypto_builtin_rsa_public_key_parse( + const uint8_t *key, size_t keylen, Error **errp) +{ + QCryptoAkCipherRSAKey *rsa = g_new0(QCryptoAkCipherRSAKey, 1); + const uint8_t *seq; + size_t seq_length; + int decode_ret; + + decode_ret = qcrypto_der_decode_seq(&key, &keylen, + extract_seq_content, &seq, errp); + if (decode_ret < 0 || keylen != 0) { + goto error; + } + seq_length = decode_ret; + + if (qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, + &rsa->n, errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, + &rsa->e, errp) < 0) { + goto error; + } + if (seq_length != 0) { + goto error; + } + + return rsa; + +error: + if (errp && !*errp) { + error_setg(errp, "Invalid RSA public key"); + } + qcrypto_akcipher_rsakey_free(rsa); + return NULL; +} + +/** + * RsaPrivKey ::= SEQUENCE { + * version INTEGER + * n INTEGER + * e INTEGER + * d INTEGER + * p INTEGER + * q INTEGER + * dp INTEGER + * dq INTEGER + * u INTEGER + * otherPrimeInfos OtherPrimeInfos OPTIONAL + * } + */ +static QCryptoAkCipherRSAKey *qcrypto_builtin_rsa_private_key_parse( + const uint8_t *key, size_t keylen, Error **errp) +{ + QCryptoAkCipherRSAKey *rsa = g_new0(QCryptoAkCipherRSAKey, 1); + uint8_t version; + const uint8_t *seq; + int decode_ret; + size_t seq_length; + + decode_ret = qcrypto_der_decode_seq(&key, &keylen, extract_seq_content, + &seq, errp); + if (decode_ret < 0 || keylen != 0) { + goto error; + } + seq_length = decode_ret; + + decode_ret = qcrypto_der_decode_int(&seq, &seq_length, extract_version, + &version, errp); + + if (qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, + &rsa->n, errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, + &rsa->e, errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, + &rsa->d, errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, &rsa->p, + errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, &rsa->q, + errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, &rsa->dp, + errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, &rsa->dq, + errp) < 0 || + qcrypto_der_decode_int(&seq, &seq_length, extract_mpi, &rsa->u, + errp) < 0) { + goto error; + } + + /** + * According to the standard, otherPrimeInfos must be present for version 1. + * There is no strict verification here, this is to be compatible with + * the unit test of the kernel. TODO: remove this until linux kernel's + * unit-test is fixed. + */ + if (version == 1 && seq_length != 0) { + if (qcrypto_der_decode_seq(&seq, &seq_length, NULL, NULL, errp) < 0) { + goto error; + } + if (seq_length != 0) { + goto error; + } + return rsa; + } + if (seq_length != 0) { + goto error; + } + + return rsa; + +error: + if (errp && !*errp) { + error_setg(errp, "Invalid RSA private key"); + } + qcrypto_akcipher_rsakey_free(rsa); + return NULL; +} + +QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse( + QCryptoAkCipherKeyType type, const uint8_t *key, + size_t keylen, Error **errp) +{ + switch (type) { + case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + return qcrypto_builtin_rsa_private_key_parse(key, keylen, errp); + + case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + return qcrypto_builtin_rsa_public_key_parse(key, keylen, errp); + + default: + error_setg(errp, "Unknown key type: %d", type); + return NULL; + } +} diff --git a/crypto/rsakey-nettle.c.inc b/crypto/rsakey-nettle.c.inc new file mode 100644 index 0000000000..cc49872e78 --- /dev/null +++ b/crypto/rsakey-nettle.c.inc @@ -0,0 +1,158 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "rsakey.h" + +static bool DumpMPI(struct asn1_der_iterator *i, QCryptoAkCipherMPI *mpi) +{ + mpi->data = g_memdup2(i->data, i->length); + mpi->len = i->length; + return true; +} + +static bool GetMPI(struct asn1_der_iterator *i, QCryptoAkCipherMPI *mpi) +{ + if (asn1_der_iterator_next(i) != ASN1_ITERATOR_PRIMITIVE || + i->type != ASN1_INTEGER) { + return false; + } + return DumpMPI(i, mpi); +} + +/** + * RsaPrivKey ::= SEQUENCE { + * version INTEGER + * n INTEGER + * e INTEGER + * d INTEGER + * p INTEGER + * q INTEGER + * dp INTEGER + * dq INTEGER + * u INTEGER + * otherPrimeInfos OtherPrimeInfos OPTIONAL + * } + */ +static QCryptoAkCipherRSAKey *qcrypto_nettle_rsa_private_key_parse( + const uint8_t *key, size_t keylen, Error **errp) +{ + QCryptoAkCipherRSAKey *rsa = g_new0(QCryptoAkCipherRSAKey, 1); + struct asn1_der_iterator i; + uint32_t version; + int tag; + + /* Parse entire struct */ + if (asn1_der_iterator_first(&i, keylen, key) != ASN1_ITERATOR_CONSTRUCTED || + i.type != ASN1_SEQUENCE || + asn1_der_decode_constructed_last(&i) != ASN1_ITERATOR_PRIMITIVE || + i.type != ASN1_INTEGER || + !asn1_der_get_uint32(&i, &version) || + version > 1 || + !GetMPI(&i, &rsa->n) || + !GetMPI(&i, &rsa->e) || + !GetMPI(&i, &rsa->d) || + !GetMPI(&i, &rsa->p) || + !GetMPI(&i, &rsa->q) || + !GetMPI(&i, &rsa->dp) || + !GetMPI(&i, &rsa->dq) || + !GetMPI(&i, &rsa->u)) { + goto error; + } + + if (version == 1) { + tag = asn1_der_iterator_next(&i); + /** + * According to the standard otherPrimeInfos must be present for + * version 1. There is no strict verification here, this is to be + * compatible with the unit test of the kernel. TODO: remove this + * until linux-kernel's unit-test is fixed; + */ + if (tag == ASN1_ITERATOR_END) { + return rsa; + } + if (tag != ASN1_ITERATOR_CONSTRUCTED || + i.type != ASN1_SEQUENCE) { + goto error; + } + } + + if (asn1_der_iterator_next(&i) != ASN1_ITERATOR_END) { + goto error; + } + + return rsa; + +error: + error_setg(errp, "Failed to parse RSA private key"); + qcrypto_akcipher_rsakey_free(rsa); + return NULL; +} + +/** + * RsaPubKey ::= SEQUENCE { + * n INTEGER + * e INTEGER + * } + */ +static QCryptoAkCipherRSAKey *qcrypto_nettle_rsa_public_key_parse( + const uint8_t *key, size_t keylen, Error **errp) +{ + + QCryptoAkCipherRSAKey *rsa = g_new0(QCryptoAkCipherRSAKey, 1); + struct asn1_der_iterator i; + + if (asn1_der_iterator_first(&i, keylen, key) != ASN1_ITERATOR_CONSTRUCTED || + i.type != ASN1_SEQUENCE || + asn1_der_decode_constructed_last(&i) != ASN1_ITERATOR_PRIMITIVE || + !DumpMPI(&i, &rsa->n) || + !GetMPI(&i, &rsa->e) || + asn1_der_iterator_next(&i) != ASN1_ITERATOR_END) { + goto error; + } + + return rsa; + +error: + error_setg(errp, "Failed to parse RSA public key"); + qcrypto_akcipher_rsakey_free(rsa); + return NULL; +} + +QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse( + QCryptoAkCipherKeyType type, const uint8_t *key, + size_t keylen, Error **errp) +{ + switch (type) { + case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + return qcrypto_nettle_rsa_private_key_parse(key, keylen, errp); + + case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + return qcrypto_nettle_rsa_public_key_parse(key, keylen, errp); + + default: + error_setg(errp, "Unknown key type: %d", type); + return NULL; + } +} diff --git a/crypto/rsakey.c b/crypto/rsakey.c new file mode 100644 index 0000000000..7d6f273aef --- /dev/null +++ b/crypto/rsakey.c @@ -0,0 +1,86 @@ +/* + * QEMU Crypto RSA key parser + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "der.h" +#include "rsakey.h" + +void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *rsa_key) +{ + if (!rsa_key) { + return; + } + g_free(rsa_key->n.data); + g_free(rsa_key->e.data); + g_free(rsa_key->d.data); + g_free(rsa_key->p.data); + g_free(rsa_key->q.data); + g_free(rsa_key->dp.data); + g_free(rsa_key->dq.data); + g_free(rsa_key->u.data); + g_free(rsa_key); +} + +/** + * PKCS#8 private key info for RSA + * + * PrivateKeyInfo ::= SEQUENCE { + * version INTEGER, + * privateKeyAlgorithm PrivateKeyAlgorithmIdentifier, + * privateKey OCTET STRING, + * attributes [0] IMPLICIT Attributes OPTIONAL + * } + */ +void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key, + size_t keylen, + uint8_t **dst, + size_t *dlen) +{ + QCryptoEncodeContext *ctx = qcrypto_der_encode_ctx_new(); + uint8_t version = 0; + + qcrypto_der_encode_seq_begin(ctx); + + /* version */ + qcrypto_der_encode_int(ctx, &version, sizeof(version)); + + /* algorithm identifier */ + qcrypto_der_encode_seq_begin(ctx); + qcrypto_der_encode_oid(ctx, (uint8_t *)QCRYPTO_OID_rsaEncryption, + sizeof(QCRYPTO_OID_rsaEncryption) - 1); + qcrypto_der_encode_null(ctx); + qcrypto_der_encode_seq_end(ctx); + + /* RSA private key */ + qcrypto_der_encode_octet_str(ctx, key, keylen); + + qcrypto_der_encode_seq_end(ctx); + + *dlen = qcrypto_der_encode_ctx_buffer_len(ctx); + *dst = g_malloc(*dlen); + qcrypto_der_encode_ctx_flush_and_free(ctx, *dst); +} + +#if defined(CONFIG_NETTLE) && defined(CONFIG_HOGWEED) +#include "rsakey-nettle.c.inc" +#else +#include "rsakey-builtin.c.inc" +#endif diff --git a/crypto/rsakey.h b/crypto/rsakey.h new file mode 100644 index 0000000000..00b3eccec7 --- /dev/null +++ b/crypto/rsakey.h @@ -0,0 +1,101 @@ +/* + * QEMU Crypto RSA key parser + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QCRYPTO_RSAKEY_H +#define QCRYPTO_RSAKEY_H + +#include "qemu/host-utils.h" +#include "crypto/akcipher.h" + +typedef struct QCryptoAkCipherRSAKey QCryptoAkCipherRSAKey; +typedef struct QCryptoAkCipherMPI QCryptoAkCipherMPI; + +/** + * Multiple precious integer, encoded as two' complement, + * copied directly from DER encoded ASN.1 structures. + */ +struct QCryptoAkCipherMPI { + uint8_t *data; + size_t len; +}; + +/* See rfc2437: https://datatracker.ietf.org/doc/html/rfc2437 */ +struct QCryptoAkCipherRSAKey { + /* The modulus */ + QCryptoAkCipherMPI n; + /* The public exponent */ + QCryptoAkCipherMPI e; + /* The private exponent */ + QCryptoAkCipherMPI d; + /* The first factor */ + QCryptoAkCipherMPI p; + /* The second factor */ + QCryptoAkCipherMPI q; + /* The first factor's exponent */ + QCryptoAkCipherMPI dp; + /* The second factor's exponent */ + QCryptoAkCipherMPI dq; + /* The CRT coefficient */ + QCryptoAkCipherMPI u; +}; + +/** + * Parse DER encoded ASN.1 RSA keys, expected ASN.1 schemas: + * RsaPrivKey ::= SEQUENCE { + * version INTEGER + * n INTEGER + * e INTEGER + * d INTEGER + * p INTEGER + * q INTEGER + * dp INTEGER + * dq INTEGER + * u INTEGER + * otherPrimeInfos OtherPrimeInfos OPTIONAL + * } + * + * RsaPubKey ::= SEQUENCE { + * n INTEGER + * e INTEGER + * } + * + * Returns: On success QCryptoAkCipherRSAKey is returned, otherwise returns NULL + */ +QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse( + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t keylen, Error **errp); + +/** + * qcrypto_akcipher_rsakey_export_as_p8info: + * + * Export RSA private key to PKCS#8 private key info. + */ +void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key, + size_t keylen, + uint8_t **dst, + size_t *dlen); + +void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *key); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipherRSAKey, + qcrypto_akcipher_rsakey_free); + +#endif diff --git a/crypto/secret_common.c b/crypto/secret_common.c index 714a15d5e5..3441c44ca8 100644 --- a/crypto/secret_common.c +++ b/crypto/secret_common.c @@ -138,36 +138,44 @@ static void qcrypto_secret_decode(const uint8_t *input, static void -qcrypto_secret_prop_set_loaded(Object *obj, - bool value, - Error **errp) +qcrypto_secret_complete(UserCreatable *uc, Error **errp) { - QCryptoSecretCommon *secret = QCRYPTO_SECRET_COMMON(obj); + QCryptoSecretCommon *secret = QCRYPTO_SECRET_COMMON(uc); QCryptoSecretCommonClass *sec_class - = QCRYPTO_SECRET_COMMON_GET_CLASS(obj); + = QCRYPTO_SECRET_COMMON_GET_CLASS(uc); - if (value) { - Error *local_err = NULL; - uint8_t *input = NULL; - size_t inputlen = 0; - uint8_t *output = NULL; - size_t outputlen = 0; + Error *local_err = NULL; + uint8_t *input = NULL; + size_t inputlen = 0; + uint8_t *output = NULL; + size_t outputlen = 0; - if (sec_class->load_data) { - sec_class->load_data(secret, &input, &inputlen, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - } else { - error_setg(errp, "%s provides no 'load_data' method'", - object_get_typename(obj)); + if (sec_class->load_data) { + sec_class->load_data(secret, &input, &inputlen, &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } + } else { + error_setg(errp, "%s provides no 'load_data' method'", + object_get_typename(OBJECT(uc))); + return; + } - if (secret->keyid) { - qcrypto_secret_decrypt(secret, input, inputlen, - &output, &outputlen, &local_err); + if (secret->keyid) { + qcrypto_secret_decrypt(secret, input, inputlen, + &output, &outputlen, &local_err); + g_free(input); + if (local_err) { + error_propagate(errp, local_err); + return; + } + input = output; + inputlen = outputlen; + } else { + if (secret->format == QCRYPTO_SECRET_FORMAT_BASE64) { + qcrypto_secret_decode(input, inputlen, + &output, &outputlen, &local_err); g_free(input); if (local_err) { error_propagate(errp, local_err); @@ -175,26 +183,11 @@ qcrypto_secret_prop_set_loaded(Object *obj, } input = output; inputlen = outputlen; - } else { - if (secret->format == QCRYPTO_SECRET_FORMAT_BASE64) { - qcrypto_secret_decode(input, inputlen, - &output, &outputlen, &local_err); - g_free(input); - if (local_err) { - error_propagate(errp, local_err); - return; - } - input = output; - inputlen = outputlen; - } } - - secret->rawdata = input; - secret->rawlen = inputlen; - } else if (secret->rawdata) { - error_setg(errp, "Cannot unload secret"); - return; } + + secret->rawdata = input; + secret->rawlen = inputlen; } @@ -268,13 +261,6 @@ qcrypto_secret_prop_get_keyid(Object *obj, } -static void -qcrypto_secret_complete(UserCreatable *uc, Error **errp) -{ - object_property_set_bool(OBJECT(uc), "loaded", true, errp); -} - - static void qcrypto_secret_finalize(Object *obj) { @@ -294,7 +280,7 @@ qcrypto_secret_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "loaded", qcrypto_secret_prop_get_loaded, - qcrypto_secret_prop_set_loaded); + NULL); object_class_property_add_enum(oc, "format", "QCryptoSecretFormat", &QCryptoSecretFormat_lookup, diff --git a/crypto/sm4.c b/crypto/sm4.c new file mode 100644 index 0000000000..9f0cd452c7 --- /dev/null +++ b/crypto/sm4.c @@ -0,0 +1,49 @@ +/* + * QEMU crypto sm4 support + * + * Copyright (C) 2013 - 2018 Linaro Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "crypto/sm4.h" + +uint8_t const sm4_sbox[] = { + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, +}; + diff --git a/crypto/tlscredsanon.c b/crypto/tlscredsanon.c index 6fb83639ec..c0d23a0ef3 100644 --- a/crypto/tlscredsanon.c +++ b/crypto/tlscredsanon.c @@ -119,16 +119,11 @@ qcrypto_tls_creds_anon_unload(QCryptoTLSCredsAnon *creds G_GNUC_UNUSED) static void -qcrypto_tls_creds_anon_prop_set_loaded(Object *obj, - bool value, - Error **errp) +qcrypto_tls_creds_anon_complete(UserCreatable *uc, Error **errp) { - QCryptoTLSCredsAnon *creds = QCRYPTO_TLS_CREDS_ANON(obj); + QCryptoTLSCredsAnon *creds = QCRYPTO_TLS_CREDS_ANON(uc); - qcrypto_tls_creds_anon_unload(creds); - if (value) { - qcrypto_tls_creds_anon_load(creds, errp); - } + qcrypto_tls_creds_anon_load(creds, errp); } @@ -163,13 +158,6 @@ qcrypto_tls_creds_anon_prop_get_loaded(Object *obj G_GNUC_UNUSED, #endif /* ! CONFIG_GNUTLS */ -static void -qcrypto_tls_creds_anon_complete(UserCreatable *uc, Error **errp) -{ - object_property_set_bool(OBJECT(uc), "loaded", true, errp); -} - - static void qcrypto_tls_creds_anon_finalize(Object *obj) { @@ -188,7 +176,7 @@ qcrypto_tls_creds_anon_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "loaded", qcrypto_tls_creds_anon_prop_get_loaded, - qcrypto_tls_creds_anon_prop_set_loaded); + NULL); } diff --git a/crypto/tlscredspsk.c b/crypto/tlscredspsk.c index 752f2d92be..546cad1c5a 100644 --- a/crypto/tlscredspsk.c +++ b/crypto/tlscredspsk.c @@ -109,7 +109,12 @@ qcrypto_tls_creds_psk_load(QCryptoTLSCredsPSK *creds, goto cleanup; } - gnutls_psk_set_server_credentials_file(creds->data.server, pskfile); + ret = gnutls_psk_set_server_credentials_file(creds->data.server, pskfile); + if (ret < 0) { + error_setg(errp, "Cannot set PSK server credentials: %s", + gnutls_strerror(ret)); + goto cleanup; + } gnutls_psk_set_server_dh_params(creds->data.server, creds->parent_obj.dh_params); } else { @@ -135,8 +140,13 @@ qcrypto_tls_creds_psk_load(QCryptoTLSCredsPSK *creds, goto cleanup; } - gnutls_psk_set_client_credentials(creds->data.client, - username, &key, GNUTLS_PSK_KEY_HEX); + ret = gnutls_psk_set_client_credentials(creds->data.client, + username, &key, GNUTLS_PSK_KEY_HEX); + if (ret < 0) { + error_setg(errp, "Cannot set PSK client credentials: %s", + gnutls_strerror(ret)); + goto cleanup; + } } rv = 0; @@ -188,16 +198,11 @@ qcrypto_tls_creds_psk_unload(QCryptoTLSCredsPSK *creds G_GNUC_UNUSED) static void -qcrypto_tls_creds_psk_prop_set_loaded(Object *obj, - bool value, - Error **errp) +qcrypto_tls_creds_psk_complete(UserCreatable *uc, Error **errp) { - QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj); + QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(uc); - qcrypto_tls_creds_psk_unload(creds); - if (value) { - qcrypto_tls_creds_psk_load(creds, errp); - } + qcrypto_tls_creds_psk_load(creds, errp); } @@ -232,13 +237,6 @@ qcrypto_tls_creds_psk_prop_get_loaded(Object *obj G_GNUC_UNUSED, #endif /* ! CONFIG_GNUTLS */ -static void -qcrypto_tls_creds_psk_complete(UserCreatable *uc, Error **errp) -{ - object_property_set_bool(OBJECT(uc), "loaded", true, errp); -} - - static void qcrypto_tls_creds_psk_finalize(Object *obj) { @@ -276,7 +274,7 @@ qcrypto_tls_creds_psk_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "loaded", qcrypto_tls_creds_psk_prop_get_loaded, - qcrypto_tls_creds_psk_prop_set_loaded); + NULL); object_class_property_add_str(oc, "username", qcrypto_tls_creds_psk_prop_get_username, qcrypto_tls_creds_psk_prop_set_username); diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c index 32948a6bdc..d14313925d 100644 --- a/crypto/tlscredsx509.c +++ b/crypto/tlscredsx509.c @@ -687,16 +687,11 @@ qcrypto_tls_creds_x509_unload(QCryptoTLSCredsX509 *creds G_GNUC_UNUSED) static void -qcrypto_tls_creds_x509_prop_set_loaded(Object *obj, - bool value, - Error **errp) +qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp) { - QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); + QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(uc); - qcrypto_tls_creds_x509_unload(creds); - if (value) { - qcrypto_tls_creds_x509_load(creds, errp); - } + qcrypto_tls_creds_x509_load(creds, errp); } @@ -814,13 +809,6 @@ qcrypto_tls_creds_x509_reload(QCryptoTLSCreds *creds, Error **errp) #endif /* ! CONFIG_GNUTLS */ -static void -qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp) -{ - object_property_set_bool(OBJECT(uc), "loaded", true, errp); -} - - static void qcrypto_tls_creds_x509_init(Object *obj) { @@ -852,7 +840,7 @@ qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "loaded", qcrypto_tls_creds_x509_prop_get_loaded, - qcrypto_tls_creds_x509_prop_set_loaded); + NULL); object_class_property_add_bool(oc, "sanity-check", qcrypto_tls_creds_x509_prop_get_sanity, qcrypto_tls_creds_x509_prop_set_sanity); diff --git a/crypto/tlssession.c b/crypto/tlssession.c index a8db8c76d1..b302d835d2 100644 --- a/crypto/tlssession.c +++ b/crypto/tlssession.c @@ -373,6 +373,12 @@ qcrypto_tls_session_check_certificate(QCryptoTLSSession *session, session->hostname); goto error; } + } else { + if (session->creds->endpoint == + QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) { + error_setg(errp, "No hostname for certificate validation"); + goto error; + } } } diff --git a/disas.c b/disas.c index 3dab4482d1..94d3b45042 100644 --- a/disas.c +++ b/disas.c @@ -83,18 +83,18 @@ static int print_insn_objdump(bfd_vma pc, disassemble_info *info, const char *prefix) { int i, n = info->buffer_length; - uint8_t *buf = g_malloc(n); + g_autofree uint8_t *buf = g_malloc(n); - info->read_memory_func(pc, buf, n, info); - - for (i = 0; i < n; ++i) { - if (i % 32 == 0) { - info->fprintf_func(info->stream, "\n%s: ", prefix); + if (info->read_memory_func(pc, buf, n, info) == 0) { + for (i = 0; i < n; ++i) { + if (i % 32 == 0) { + info->fprintf_func(info->stream, "\n%s: ", prefix); + } + info->fprintf_func(info->stream, "%02x", buf[i]); } - info->fprintf_func(info->stream, "%02x", buf[i]); + } else { + info->fprintf_func(info->stream, "unable to read memory"); } - - g_free(buf); return n; } @@ -126,7 +126,7 @@ static void initialize_debug_target(CPUDebug *s, CPUState *cpu) s->cpu = cpu; s->info.read_memory_func = target_read_memory; s->info.print_address_func = print_address; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN s->info.endian = BFD_ENDIAN_BIG; #else s->info.endian = BFD_ENDIAN_LITTLE; @@ -144,7 +144,7 @@ static void initialize_debug_host(CPUDebug *s) s->info.read_memory_func = host_read_memory; s->info.print_address_func = host_print_address; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN s->info.endian = BFD_ENDIAN_BIG; #else s->info.endian = BFD_ENDIAN_LITTLE; @@ -153,21 +153,17 @@ static void initialize_debug_host(CPUDebug *s) s->info.print_insn = print_insn_tci; #elif defined(__i386__) s->info.mach = bfd_mach_i386_i386; - s->info.print_insn = print_insn_i386; s->info.cap_arch = CS_ARCH_X86; s->info.cap_mode = CS_MODE_32; s->info.cap_insn_unit = 1; s->info.cap_insn_split = 8; #elif defined(__x86_64__) s->info.mach = bfd_mach_x86_64; - s->info.print_insn = print_insn_i386; s->info.cap_arch = CS_ARCH_X86; s->info.cap_mode = CS_MODE_64; s->info.cap_insn_unit = 1; s->info.cap_insn_split = 8; #elif defined(_ARCH_PPC) - s->info.disassembler_options = (char *)"any"; - s->info.print_insn = print_insn_ppc; s->info.cap_arch = CS_ARCH_PPC; # ifdef _ARCH_PPC64 s->info.cap_mode = CS_MODE_64; @@ -182,9 +178,6 @@ static void initialize_debug_host(CPUDebug *s) #endif #elif defined(__aarch64__) s->info.cap_arch = CS_ARCH_ARM64; -# ifdef CONFIG_ARM_A64_DIS - s->info.print_insn = print_insn_arm_a64; -# endif #elif defined(__alpha__) s->info.print_insn = print_insn_alpha; #elif defined(__sparc__) @@ -192,7 +185,6 @@ static void initialize_debug_host(CPUDebug *s) s->info.mach = bfd_mach_sparc_v9b; #elif defined(__arm__) /* TCG only generates code for arm mode. */ - s->info.print_insn = print_insn_arm; s->info.cap_arch = CS_ARCH_ARM; #elif defined(__MIPSEB__) s->info.print_insn = print_insn_big_mips; @@ -201,7 +193,6 @@ static void initialize_debug_host(CPUDebug *s) #elif defined(__m68k__) s->info.print_insn = print_insn_m68k; #elif defined(__s390__) - s->info.print_insn = print_insn_s390; s->info.cap_arch = CS_ARCH_SYSZ; s->info.cap_insn_unit = 2; s->info.cap_insn_split = 6; @@ -248,7 +239,7 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code, } } -static int plugin_printf(FILE *stream, const char *fmt, ...) +static int gstring_printf(FILE *stream, const char *fmt, ...) { /* We abuse the FILE parameter to pass a GString. */ GString *s = (GString *)stream; @@ -279,7 +270,7 @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size) GString *ds = g_string_new(NULL); initialize_debug_target(&s, cpu); - s.info.fprintf_func = plugin_printf; + s.info.fprintf_func = gstring_printf; s.info.stream = (FILE *)ds; /* abuse this slot */ s.info.buffer_vma = addr; s.info.buffer_length = size; @@ -367,15 +358,19 @@ void monitor_disas(Monitor *mon, CPUState *cpu, { int count, i; CPUDebug s; + g_autoptr(GString) ds = g_string_new(""); initialize_debug_target(&s, cpu); - s.info.fprintf_func = qemu_fprintf; + s.info.fprintf_func = gstring_printf; + s.info.stream = (FILE *)ds; /* abuse this slot */ + if (is_physical) { s.info.read_memory_func = physical_read_memory; } s.info.buffer_vma = pc; if (s.info.cap_arch >= 0 && cap_disas_monitor(&s.info, pc, nb_insn)) { + monitor_puts(mon, ds->str); return; } @@ -385,13 +380,16 @@ void monitor_disas(Monitor *mon, CPUState *cpu, return; } - for(i = 0; i < nb_insn; i++) { - monitor_printf(mon, "0x" TARGET_FMT_lx ": ", pc); + for (i = 0; i < nb_insn; i++) { + g_string_append_printf(ds, "0x" TARGET_FMT_lx ": ", pc); count = s.info.print_insn(pc, &s.info); - monitor_printf(mon, "\n"); - if (count < 0) - break; + g_string_append_c(ds, '\n'); + if (count < 0) { + break; + } pc += count; } + + monitor_puts(mon, ds->str); } #endif diff --git a/disas/arm-a64.cc b/disas/arm-a64.cc deleted file mode 100644 index a1402a2e07..0000000000 --- a/disas/arm-a64.cc +++ /dev/null @@ -1,101 +0,0 @@ -/* - * ARM A64 disassembly output wrapper to libvixl - * Copyright (c) 2013 Linaro Limited - * Written by Claudio Fontana - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include "qemu/osdep.h" -#include "disas/dis-asm.h" - -#include "vixl/a64/disasm-a64.h" - -using namespace vixl; - -static Decoder *vixl_decoder = NULL; -static Disassembler *vixl_disasm = NULL; - -/* We don't use libvixl's PrintDisassembler because its output - * is a little unhelpful (trailing newlines, for example). - * Instead we use our own very similar variant so we have - * control over the format. - */ -class QEMUDisassembler : public Disassembler { -public: - QEMUDisassembler() : printf_(NULL), stream_(NULL) { } - ~QEMUDisassembler() { } - - void SetStream(FILE *stream) { - stream_ = stream; - } - - void SetPrintf(fprintf_function printf_fn) { - printf_ = printf_fn; - } - -protected: - virtual void ProcessOutput(const Instruction *instr) { - printf_(stream_, "%08" PRIx32 " %s", - instr->InstructionBits(), GetOutput()); - } - -private: - fprintf_function printf_; - FILE *stream_; -}; - -static int vixl_is_initialized(void) -{ - return vixl_decoder != NULL; -} - -static void vixl_init() { - vixl_decoder = new Decoder(); - vixl_disasm = new QEMUDisassembler(); - vixl_decoder->AppendVisitor(vixl_disasm); -} - -#define INSN_SIZE 4 - -/* Disassemble ARM A64 instruction. This is our only entry - * point from QEMU's C code. - */ -int print_insn_arm_a64(uint64_t addr, disassemble_info *info) -{ - uint8_t bytes[INSN_SIZE]; - uint32_t instrval; - const Instruction *instr; - int status; - - status = info->read_memory_func(addr, bytes, INSN_SIZE, info); - if (status != 0) { - info->memory_error_func(status, addr, info); - return -1; - } - - if (!vixl_is_initialized()) { - vixl_init(); - } - - ((QEMUDisassembler *)vixl_disasm)->SetPrintf(info->fprintf_func); - ((QEMUDisassembler *)vixl_disasm)->SetStream(info->stream); - - instrval = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24; - instr = reinterpret_cast(&instrval); - vixl_disasm->MapCodeAddress(addr, instr); - vixl_decoder->Decode(instr); - - return INSN_SIZE; -} diff --git a/disas/arm.c b/disas/arm.c deleted file mode 100644 index 7d940f2396..0000000000 --- a/disas/arm.c +++ /dev/null @@ -1,4012 +0,0 @@ -/* Instruction printing code for the ARM - Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 - 2007, Free Software Foundation, Inc. - Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) - Modification by James G. Smith (jsmith@cygnus.co.uk) - - This file is part of libopcodes. - - This program is free software; you can redistribute it and/or modify it under - the terms of the GNU General Public License as published by the Free - Software Foundation; either version 2 of the License, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, see . */ - -/* Start of qemu specific additions. Mostly this is stub definitions - for things we don't care about. */ - -#include "qemu/osdep.h" -#include "disas/dis-asm.h" - -#define ARM_EXT_V1 0 -#define ARM_EXT_V2 0 -#define ARM_EXT_V2S 0 -#define ARM_EXT_V3 0 -#define ARM_EXT_V3M 0 -#define ARM_EXT_V4 0 -#define ARM_EXT_V4T 0 -#define ARM_EXT_V5 0 -#define ARM_EXT_V5T 0 -#define ARM_EXT_V5ExP 0 -#define ARM_EXT_V5E 0 -#define ARM_EXT_V5J 0 -#define ARM_EXT_V6 0 -#define ARM_EXT_V6K 0 -#define ARM_EXT_V6Z 0 -#define ARM_EXT_V6T2 0 -#define ARM_EXT_V7 0 -#define ARM_EXT_DIV 0 - -/* Co-processor space extensions. */ -#define ARM_CEXT_XSCALE 0 -#define ARM_CEXT_MAVERICK 0 -#define ARM_CEXT_IWMMXT 0 - -#define FPU_FPA_EXT_V1 0 -#define FPU_FPA_EXT_V2 0 -#define FPU_VFP_EXT_NONE 0 -#define FPU_VFP_EXT_V1xD 0 -#define FPU_VFP_EXT_V1 0 -#define FPU_VFP_EXT_V2 0 -#define FPU_MAVERICK 0 -#define FPU_VFP_EXT_V3 0 -#define FPU_NEON_EXT_V1 0 - -/* Assume host uses ieee float. */ -static void floatformat_to_double (unsigned char *data, double *dest) -{ - union { - uint32_t i; - float f; - } u; - u.i = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); - *dest = u.f; -} - -static int arm_read_memory(bfd_vma memaddr, bfd_byte *b, int length, - struct disassemble_info *info) -{ - assert((info->flags & INSN_ARM_BE32) == 0 || length == 2 || length == 4); - - if ((info->flags & INSN_ARM_BE32) != 0 && length == 2) { - memaddr ^= 2; - } - return info->read_memory_func(memaddr, b, length, info); -} - -/* End of qemu specific additions. */ - -struct opcode32 -{ - unsigned long arch; /* Architecture defining this insn. */ - unsigned long value, mask; /* Recognise insn if (op&mask)==value. */ - const char *assembler; /* How to disassemble this insn. */ -}; - -struct opcode16 -{ - unsigned long arch; /* Architecture defining this insn. */ - unsigned short value, mask; /* Recognise insn if (op&mask)==value. */ - const char *assembler; /* How to disassemble this insn. */ -}; - -/* print_insn_coprocessor recognizes the following format control codes: - - %% % - - %c print condition code (always bits 28-31 in ARM mode) - %q print shifter argument - %u print condition code (unconditional in ARM mode) - %A print address for ldc/stc/ldf/stf instruction - %B print vstm/vldm register list - %C print vstr/vldr address operand - %I print cirrus signed shift immediate: bits 0..3|4..6 - %F print the COUNT field of a LFM/SFM instruction. - %P print floating point precision in arithmetic insn - %Q print floating point precision in ldf/stf insn - %R print floating point rounding mode - - %r print as an ARM register - %d print the bitfield in decimal - %k print immediate for VFPv3 conversion instruction - %x print the bitfield in hex - %X print the bitfield as 1 hex digit without leading "0x" - %f print a floating point constant if >7 else a - floating point register - %w print as an iWMMXt width field - [bhwd]ss/us - %g print as an iWMMXt 64-bit register - %G print as an iWMMXt general purpose or control register - %D print as a NEON D register - %Q print as a NEON Q register - - %y print a single precision VFP reg. - Codes: 0=>Sm, 1=>Sd, 2=>Sn, 3=>multi-list, 4=>Sm pair - %z print a double precision VFP reg - Codes: 0=>Dm, 1=>Dd, 2=>Dn, 3=>multi-list - - %'c print specified char iff bitfield is all ones - %`c print specified char iff bitfield is all zeroes - %?ab... select from array of values in big endian order - - %L print as an iWMMXt N/M width field. - %Z print the Immediate of a WSHUFH instruction. - %l like 'A' except use byte offsets for 'B' & 'H' - versions. - %i print 5-bit immediate in bits 8,3..0 - (print "32" when 0) - %r print register offset address for wldt/wstr instruction -*/ - -/* Common coprocessor opcodes shared between Arm and Thumb-2. */ - -static const struct opcode32 coprocessor_opcodes[] = -{ - /* XScale instructions. */ - {ARM_CEXT_XSCALE, 0x0e200010, 0x0fff0ff0, "mia%c\tacc0, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e280010, 0x0fff0ff0, "miaph%c\tacc0, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e2c0010, 0x0ffc0ff0, "mia%17'T%17`B%16'T%16`B%c\tacc0, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0c400000, 0x0ff00fff, "mar%c\tacc0, %12-15r, %16-19r"}, - {ARM_CEXT_XSCALE, 0x0c500000, 0x0ff00fff, "mra%c\t%12-15r, %16-19r, acc0"}, - - /* Intel Wireless MMX technology instructions. */ -#define FIRST_IWMMXT_INSN 0x0e130130 -#define IWMMXT_INSN_COUNT 73 - {ARM_CEXT_IWMMXT, 0x0e130130, 0x0f3f0fff, "tandc%22-23w%c\t%12-15r"}, - {ARM_CEXT_XSCALE, 0x0e400010, 0x0ff00f3f, "tbcst%6-7w%c\t%16-19g, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e130170, 0x0f3f0ff8, "textrc%22-23w%c\t%12-15r, #%0-2d"}, - {ARM_CEXT_XSCALE, 0x0e100070, 0x0f300ff0, "textrm%3?su%22-23w%c\t%12-15r, %16-19g, #%0-2d"}, - {ARM_CEXT_XSCALE, 0x0e600010, 0x0ff00f38, "tinsr%6-7w%c\t%16-19g, %12-15r, #%0-2d"}, - {ARM_CEXT_XSCALE, 0x0e000110, 0x0ff00fff, "tmcr%c\t%16-19G, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0c400000, 0x0ff00ff0, "tmcrr%c\t%0-3g, %12-15r, %16-19r"}, - {ARM_CEXT_XSCALE, 0x0e2c0010, 0x0ffc0e10, "tmia%17?tb%16?tb%c\t%5-8g, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e200010, 0x0fff0e10, "tmia%c\t%5-8g, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e280010, 0x0fff0e10, "tmiaph%c\t%5-8g, %0-3r, %12-15r"}, - {ARM_CEXT_XSCALE, 0x0e100030, 0x0f300fff, "tmovmsk%22-23w%c\t%12-15r, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e100110, 0x0ff00ff0, "tmrc%c\t%12-15r, %16-19G"}, - {ARM_CEXT_XSCALE, 0x0c500000, 0x0ff00ff0, "tmrrc%c\t%12-15r, %16-19r, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e130150, 0x0f3f0fff, "torc%22-23w%c\t%12-15r"}, - {ARM_CEXT_XSCALE, 0x0e130190, 0x0f3f0fff, "torvsc%22-23w%c\t%12-15r"}, - {ARM_CEXT_XSCALE, 0x0e2001c0, 0x0f300fff, "wabs%22-23w%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e0001c0, 0x0f300fff, "wacc%22-23w%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e000180, 0x0f000ff0, "wadd%20-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e2001a0, 0x0f300ff0, "waddbhus%22?ml%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ea001a0, 0x0ff00ff0, "waddsubhx%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000020, 0x0f800ff0, "waligni%c\t%12-15g, %16-19g, %0-3g, #%20-22d"}, - {ARM_CEXT_XSCALE, 0x0e800020, 0x0fc00ff0, "walignr%20-21d%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e200000, 0x0fe00ff0, "wand%20'n%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e800000, 0x0fa00ff0, "wavg2%22?hb%20'r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e400000, 0x0fe00ff0, "wavg4%20'r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000060, 0x0f300ff0, "wcmpeq%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e100060, 0x0f100ff0, "wcmpgt%21?su%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0xfc500100, 0xfe500f00, "wldrd\t%12-15g, %r"}, - {ARM_CEXT_XSCALE, 0xfc100100, 0xfe500f00, "wldrw\t%12-15G, %A"}, - {ARM_CEXT_XSCALE, 0x0c100000, 0x0e100e00, "wldr%L%c\t%12-15g, %l"}, - {ARM_CEXT_XSCALE, 0x0e400100, 0x0fc00ff0, "wmac%21?su%20'z%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e800100, 0x0fc00ff0, "wmadd%21?su%20'x%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ec00100, 0x0fd00ff0, "wmadd%21?sun%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000160, 0x0f100ff0, "wmax%21?su%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000080, 0x0f100fe0, "wmerge%c\t%12-15g, %16-19g, %0-3g, #%21-23d"}, - {ARM_CEXT_XSCALE, 0x0e0000a0, 0x0f800ff0, "wmia%21?tb%20?tb%22'n%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e800120, 0x0f800ff0, "wmiaw%21?tb%20?tb%22'n%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e100160, 0x0f100ff0, "wmin%21?su%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000100, 0x0fc00ff0, "wmul%21?su%20?ml%23'r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ed00100, 0x0fd00ff0, "wmul%21?sumr%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ee000c0, 0x0fe00ff0, "wmulwsm%20`r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ec000c0, 0x0fe00ff0, "wmulwum%20`r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0eb000c0, 0x0ff00ff0, "wmulwl%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e8000a0, 0x0f800ff0, "wqmia%21?tb%20?tb%22'n%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e100080, 0x0fd00ff0, "wqmulm%21'r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ec000e0, 0x0fd00ff0, "wqmulwm%21'r%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000000, 0x0ff00ff0, "wor%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000080, 0x0f000ff0, "wpack%20-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0xfe300040, 0xff300ef0, "wror%22-23w\t%12-15g, %16-19g, #%i"}, - {ARM_CEXT_XSCALE, 0x0e300040, 0x0f300ff0, "wror%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e300140, 0x0f300ff0, "wror%22-23wg%c\t%12-15g, %16-19g, %0-3G"}, - {ARM_CEXT_XSCALE, 0x0e000120, 0x0fa00ff0, "wsad%22?hb%20'z%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e0001e0, 0x0f000ff0, "wshufh%c\t%12-15g, %16-19g, #%Z"}, - {ARM_CEXT_XSCALE, 0xfe100040, 0xff300ef0, "wsll%22-23w\t%12-15g, %16-19g, #%i"}, - {ARM_CEXT_XSCALE, 0x0e100040, 0x0f300ff0, "wsll%22-23w%8'g%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e100148, 0x0f300ffc, "wsll%22-23w%8'g%c\t%12-15g, %16-19g, %0-3G"}, - {ARM_CEXT_XSCALE, 0xfe000040, 0xff300ef0, "wsra%22-23w\t%12-15g, %16-19g, #%i"}, - {ARM_CEXT_XSCALE, 0x0e000040, 0x0f300ff0, "wsra%22-23w%8'g%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e000148, 0x0f300ffc, "wsra%22-23w%8'g%c\t%12-15g, %16-19g, %0-3G"}, - {ARM_CEXT_XSCALE, 0xfe200040, 0xff300ef0, "wsrl%22-23w\t%12-15g, %16-19g, #%i"}, - {ARM_CEXT_XSCALE, 0x0e200040, 0x0f300ff0, "wsrl%22-23w%8'g%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e200148, 0x0f300ffc, "wsrl%22-23w%8'g%c\t%12-15g, %16-19g, %0-3G"}, - {ARM_CEXT_XSCALE, 0xfc400100, 0xfe500f00, "wstrd\t%12-15g, %r"}, - {ARM_CEXT_XSCALE, 0xfc000100, 0xfe500f00, "wstrw\t%12-15G, %A"}, - {ARM_CEXT_XSCALE, 0x0c000000, 0x0e100e00, "wstr%L%c\t%12-15g, %l"}, - {ARM_CEXT_XSCALE, 0x0e0001a0, 0x0f000ff0, "wsub%20-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0ed001c0, 0x0ff00ff0, "wsubaddhx%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e1001c0, 0x0f300ff0, "wabsdiff%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e0000c0, 0x0fd00fff, "wunpckeh%21?sub%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e4000c0, 0x0fd00fff, "wunpckeh%21?suh%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e8000c0, 0x0fd00fff, "wunpckeh%21?suw%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e0000e0, 0x0f100fff, "wunpckel%21?su%22-23w%c\t%12-15g, %16-19g"}, - {ARM_CEXT_XSCALE, 0x0e1000c0, 0x0f300ff0, "wunpckih%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e1000e0, 0x0f300ff0, "wunpckil%22-23w%c\t%12-15g, %16-19g, %0-3g"}, - {ARM_CEXT_XSCALE, 0x0e100000, 0x0ff00ff0, "wxor%c\t%12-15g, %16-19g, %0-3g"}, - - /* Floating point coprocessor (FPA) instructions */ - {FPU_FPA_EXT_V1, 0x0e000100, 0x0ff08f10, "adf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e100100, 0x0ff08f10, "muf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e200100, 0x0ff08f10, "suf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e300100, 0x0ff08f10, "rsf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e400100, 0x0ff08f10, "dvf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e500100, 0x0ff08f10, "rdf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e600100, 0x0ff08f10, "pow%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e700100, 0x0ff08f10, "rpw%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e800100, 0x0ff08f10, "rmf%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e900100, 0x0ff08f10, "fml%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ea00100, 0x0ff08f10, "fdv%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0eb00100, 0x0ff08f10, "frd%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ec00100, 0x0ff08f10, "pol%c%P%R\t%12-14f, %16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e008100, 0x0ff08f10, "mvf%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e108100, 0x0ff08f10, "mnf%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e208100, 0x0ff08f10, "abs%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e308100, 0x0ff08f10, "rnd%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e408100, 0x0ff08f10, "sqt%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e508100, 0x0ff08f10, "log%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e608100, 0x0ff08f10, "lgn%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e708100, 0x0ff08f10, "exp%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e808100, 0x0ff08f10, "sin%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e908100, 0x0ff08f10, "cos%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ea08100, 0x0ff08f10, "tan%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0eb08100, 0x0ff08f10, "asn%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ec08100, 0x0ff08f10, "acs%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ed08100, 0x0ff08f10, "atn%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ee08100, 0x0ff08f10, "urd%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ef08100, 0x0ff08f10, "nrm%c%P%R\t%12-14f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0e000110, 0x0ff00f1f, "flt%c%P%R\t%16-18f, %12-15r"}, - {FPU_FPA_EXT_V1, 0x0e100110, 0x0fff0f98, "fix%c%R\t%12-15r, %0-2f"}, - {FPU_FPA_EXT_V1, 0x0e200110, 0x0fff0fff, "wfs%c\t%12-15r"}, - {FPU_FPA_EXT_V1, 0x0e300110, 0x0fff0fff, "rfs%c\t%12-15r"}, - {FPU_FPA_EXT_V1, 0x0e400110, 0x0fff0fff, "wfc%c\t%12-15r"}, - {FPU_FPA_EXT_V1, 0x0e500110, 0x0fff0fff, "rfc%c\t%12-15r"}, - {FPU_FPA_EXT_V1, 0x0e90f110, 0x0ff8fff0, "cmf%c\t%16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0eb0f110, 0x0ff8fff0, "cnf%c\t%16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ed0f110, 0x0ff8fff0, "cmfe%c\t%16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0ef0f110, 0x0ff8fff0, "cnfe%c\t%16-18f, %0-3f"}, - {FPU_FPA_EXT_V1, 0x0c000100, 0x0e100f00, "stf%c%Q\t%12-14f, %A"}, - {FPU_FPA_EXT_V1, 0x0c100100, 0x0e100f00, "ldf%c%Q\t%12-14f, %A"}, - {FPU_FPA_EXT_V2, 0x0c000200, 0x0e100f00, "sfm%c\t%12-14f, %F, %A"}, - {FPU_FPA_EXT_V2, 0x0c100200, 0x0e100f00, "lfm%c\t%12-14f, %F, %A"}, - - /* Register load/store */ - {FPU_NEON_EXT_V1, 0x0d200b00, 0x0fb00f01, "vstmdb%c\t%16-19r%21'!, %B"}, - {FPU_NEON_EXT_V1, 0x0d300b00, 0x0fb00f01, "vldmdb%c\t%16-19r%21'!, %B"}, - {FPU_NEON_EXT_V1, 0x0c800b00, 0x0f900f01, "vstmia%c\t%16-19r%21'!, %B"}, - {FPU_NEON_EXT_V1, 0x0c900b00, 0x0f900f01, "vldmia%c\t%16-19r%21'!, %B"}, - {FPU_NEON_EXT_V1, 0x0d000b00, 0x0f300f00, "vstr%c\t%12-15,22D, %C"}, - {FPU_NEON_EXT_V1, 0x0d100b00, 0x0f300f00, "vldr%c\t%12-15,22D, %C"}, - - /* Data transfer between ARM and NEON registers */ - {FPU_NEON_EXT_V1, 0x0e800b10, 0x0ff00f70, "vdup%c.32\t%16-19,7D, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0e800b30, 0x0ff00f70, "vdup%c.16\t%16-19,7D, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0ea00b10, 0x0ff00f70, "vdup%c.32\t%16-19,7Q, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0ea00b30, 0x0ff00f70, "vdup%c.16\t%16-19,7Q, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0ec00b10, 0x0ff00f70, "vdup%c.8\t%16-19,7D, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0ee00b10, 0x0ff00f70, "vdup%c.8\t%16-19,7Q, %12-15r"}, - {FPU_NEON_EXT_V1, 0x0c400b10, 0x0ff00fd0, "vmov%c\t%0-3,5D, %12-15r, %16-19r"}, - {FPU_NEON_EXT_V1, 0x0c500b10, 0x0ff00fd0, "vmov%c\t%12-15r, %16-19r, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0x0e000b10, 0x0fd00f70, "vmov%c.32\t%16-19,7D[%21d], %12-15r"}, - {FPU_NEON_EXT_V1, 0x0e100b10, 0x0f500f70, "vmov%c.32\t%12-15r, %16-19,7D[%21d]"}, - {FPU_NEON_EXT_V1, 0x0e000b30, 0x0fd00f30, "vmov%c.16\t%16-19,7D[%6,21d], %12-15r"}, - {FPU_NEON_EXT_V1, 0x0e100b30, 0x0f500f30, "vmov%c.%23?us16\t%12-15r, %16-19,7D[%6,21d]"}, - {FPU_NEON_EXT_V1, 0x0e400b10, 0x0fd00f10, "vmov%c.8\t%16-19,7D[%5,6,21d], %12-15r"}, - {FPU_NEON_EXT_V1, 0x0e500b10, 0x0f500f10, "vmov%c.%23?us8\t%12-15r, %16-19,7D[%5,6,21d]"}, - - /* Floating point coprocessor (VFP) instructions */ - {FPU_VFP_EXT_V1xD, 0x0ef1fa10, 0x0fffffff, "fmstat%c"}, - {FPU_VFP_EXT_V1xD, 0x0ee00a10, 0x0fff0fff, "fmxr%c\tfpsid, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ee10a10, 0x0fff0fff, "fmxr%c\tfpscr, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ee60a10, 0x0fff0fff, "fmxr%c\tmvfr1, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ee70a10, 0x0fff0fff, "fmxr%c\tmvfr0, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ee80a10, 0x0fff0fff, "fmxr%c\tfpexc, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ee90a10, 0x0fff0fff, "fmxr%c\tfpinst, %12-15r\t@ Impl def"}, - {FPU_VFP_EXT_V1xD, 0x0eea0a10, 0x0fff0fff, "fmxr%c\tfpinst2, %12-15r\t@ Impl def"}, - {FPU_VFP_EXT_V1xD, 0x0ef00a10, 0x0fff0fff, "fmrx%c\t%12-15r, fpsid"}, - {FPU_VFP_EXT_V1xD, 0x0ef10a10, 0x0fff0fff, "fmrx%c\t%12-15r, fpscr"}, - {FPU_VFP_EXT_V1xD, 0x0ef60a10, 0x0fff0fff, "fmrx%c\t%12-15r, mvfr1"}, - {FPU_VFP_EXT_V1xD, 0x0ef70a10, 0x0fff0fff, "fmrx%c\t%12-15r, mvfr0"}, - {FPU_VFP_EXT_V1xD, 0x0ef80a10, 0x0fff0fff, "fmrx%c\t%12-15r, fpexc"}, - {FPU_VFP_EXT_V1xD, 0x0ef90a10, 0x0fff0fff, "fmrx%c\t%12-15r, fpinst\t@ Impl def"}, - {FPU_VFP_EXT_V1xD, 0x0efa0a10, 0x0fff0fff, "fmrx%c\t%12-15r, fpinst2\t@ Impl def"}, - {FPU_VFP_EXT_V1, 0x0e000b10, 0x0ff00fff, "fmdlr%c\t%z2, %12-15r"}, - {FPU_VFP_EXT_V1, 0x0e100b10, 0x0ff00fff, "fmrdl%c\t%12-15r, %z2"}, - {FPU_VFP_EXT_V1, 0x0e200b10, 0x0ff00fff, "fmdhr%c\t%z2, %12-15r"}, - {FPU_VFP_EXT_V1, 0x0e300b10, 0x0ff00fff, "fmrdh%c\t%12-15r, %z2"}, - {FPU_VFP_EXT_V1xD, 0x0ee00a10, 0x0ff00fff, "fmxr%c\t, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0ef00a10, 0x0ff00fff, "fmrx%c\t%12-15r, "}, - {FPU_VFP_EXT_V1xD, 0x0e000a10, 0x0ff00f7f, "fmsr%c\t%y2, %12-15r"}, - {FPU_VFP_EXT_V1xD, 0x0e100a10, 0x0ff00f7f, "fmrs%c\t%12-15r, %y2"}, - {FPU_VFP_EXT_V1xD, 0x0eb50a40, 0x0fbf0f70, "fcmp%7'ezs%c\t%y1"}, - {FPU_VFP_EXT_V1, 0x0eb50b40, 0x0fbf0f70, "fcmp%7'ezd%c\t%z1"}, - {FPU_VFP_EXT_V1xD, 0x0eb00a40, 0x0fbf0fd0, "fcpys%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0eb00ac0, 0x0fbf0fd0, "fabss%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb00b40, 0x0fbf0fd0, "fcpyd%c\t%z1, %z0"}, - {FPU_VFP_EXT_V1, 0x0eb00bc0, 0x0fbf0fd0, "fabsd%c\t%z1, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0eb10a40, 0x0fbf0fd0, "fnegs%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0eb10ac0, 0x0fbf0fd0, "fsqrts%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb10b40, 0x0fbf0fd0, "fnegd%c\t%z1, %z0"}, - {FPU_VFP_EXT_V1, 0x0eb10bc0, 0x0fbf0fd0, "fsqrtd%c\t%z1, %z0"}, - {FPU_VFP_EXT_V1, 0x0eb70ac0, 0x0fbf0fd0, "fcvtds%c\t%z1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb70bc0, 0x0fbf0fd0, "fcvtsd%c\t%y1, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0eb80a40, 0x0fbf0fd0, "fuitos%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0eb80ac0, 0x0fbf0fd0, "fsitos%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb80b40, 0x0fbf0fd0, "fuitod%c\t%z1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb80bc0, 0x0fbf0fd0, "fsitod%c\t%z1, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0eb40a40, 0x0fbf0f50, "fcmp%7'es%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1, 0x0eb40b40, 0x0fbf0f50, "fcmp%7'ed%c\t%z1, %z0"}, - {FPU_VFP_EXT_V3, 0x0eba0a40, 0x0fbe0f50, "f%16?us%7?lhtos%c\t%y1, #%5,0-3k"}, - {FPU_VFP_EXT_V3, 0x0eba0b40, 0x0fbe0f50, "f%16?us%7?lhtod%c\t%z1, #%5,0-3k"}, - {FPU_VFP_EXT_V1xD, 0x0ebc0a40, 0x0fbe0f50, "fto%16?sui%7'zs%c\t%y1, %y0"}, - {FPU_VFP_EXT_V1, 0x0ebc0b40, 0x0fbe0f50, "fto%16?sui%7'zd%c\t%y1, %z0"}, - {FPU_VFP_EXT_V3, 0x0ebe0a40, 0x0fbe0f50, "fto%16?us%7?lhs%c\t%y1, #%5,0-3k"}, - {FPU_VFP_EXT_V3, 0x0ebe0b40, 0x0fbe0f50, "fto%16?us%7?lhd%c\t%z1, #%5,0-3k"}, - {FPU_VFP_EXT_V1, 0x0c500b10, 0x0fb00ff0, "fmrrd%c\t%12-15r, %16-19r, %z0"}, - {FPU_VFP_EXT_V3, 0x0eb00a00, 0x0fb00ff0, "fconsts%c\t%y1, #%0-3,16-19d"}, - {FPU_VFP_EXT_V3, 0x0eb00b00, 0x0fb00ff0, "fconstd%c\t%z1, #%0-3,16-19d"}, - {FPU_VFP_EXT_V2, 0x0c400a10, 0x0ff00fd0, "fmsrr%c\t%y4, %12-15r, %16-19r"}, - {FPU_VFP_EXT_V2, 0x0c400b10, 0x0ff00fd0, "fmdrr%c\t%z0, %12-15r, %16-19r"}, - {FPU_VFP_EXT_V2, 0x0c500a10, 0x0ff00fd0, "fmrrs%c\t%12-15r, %16-19r, %y4"}, - {FPU_VFP_EXT_V1xD, 0x0e000a00, 0x0fb00f50, "fmacs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0e000a40, 0x0fb00f50, "fnmacs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1, 0x0e000b00, 0x0fb00f50, "fmacd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1, 0x0e000b40, 0x0fb00f50, "fnmacd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0e100a00, 0x0fb00f50, "fmscs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0e100a40, 0x0fb00f50, "fnmscs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1, 0x0e100b00, 0x0fb00f50, "fmscd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1, 0x0e100b40, 0x0fb00f50, "fnmscd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0e200a00, 0x0fb00f50, "fmuls%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0e200a40, 0x0fb00f50, "fnmuls%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1, 0x0e200b00, 0x0fb00f50, "fmuld%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1, 0x0e200b40, 0x0fb00f50, "fnmuld%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0e300a00, 0x0fb00f50, "fadds%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1xD, 0x0e300a40, 0x0fb00f50, "fsubs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1, 0x0e300b00, 0x0fb00f50, "faddd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1, 0x0e300b40, 0x0fb00f50, "fsubd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0e800a00, 0x0fb00f50, "fdivs%c\t%y1, %y2, %y0"}, - {FPU_VFP_EXT_V1, 0x0e800b00, 0x0fb00f50, "fdivd%c\t%z1, %z2, %z0"}, - {FPU_VFP_EXT_V1xD, 0x0d200a00, 0x0fb00f00, "fstmdbs%c\t%16-19r!, %y3"}, - {FPU_VFP_EXT_V1xD, 0x0d200b00, 0x0fb00f00, "fstmdb%0?xd%c\t%16-19r!, %z3"}, - {FPU_VFP_EXT_V1xD, 0x0d300a00, 0x0fb00f00, "fldmdbs%c\t%16-19r!, %y3"}, - {FPU_VFP_EXT_V1xD, 0x0d300b00, 0x0fb00f00, "fldmdb%0?xd%c\t%16-19r!, %z3"}, - {FPU_VFP_EXT_V1xD, 0x0d000a00, 0x0f300f00, "fsts%c\t%y1, %A"}, - {FPU_VFP_EXT_V1, 0x0d000b00, 0x0f300f00, "fstd%c\t%z1, %A"}, - {FPU_VFP_EXT_V1xD, 0x0d100a00, 0x0f300f00, "flds%c\t%y1, %A"}, - {FPU_VFP_EXT_V1, 0x0d100b00, 0x0f300f00, "fldd%c\t%z1, %A"}, - {FPU_VFP_EXT_V1xD, 0x0c800a00, 0x0f900f00, "fstmias%c\t%16-19r%21'!, %y3"}, - {FPU_VFP_EXT_V1xD, 0x0c800b00, 0x0f900f00, "fstmia%0?xd%c\t%16-19r%21'!, %z3"}, - {FPU_VFP_EXT_V1xD, 0x0c900a00, 0x0f900f00, "fldmias%c\t%16-19r%21'!, %y3"}, - {FPU_VFP_EXT_V1xD, 0x0c900b00, 0x0f900f00, "fldmia%0?xd%c\t%16-19r%21'!, %z3"}, - - /* Cirrus coprocessor instructions. */ - {ARM_CEXT_MAVERICK, 0x0d100400, 0x0f500f00, "cfldrs%c\tmvf%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c100400, 0x0f500f00, "cfldrs%c\tmvf%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d500400, 0x0f500f00, "cfldrd%c\tmvd%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c500400, 0x0f500f00, "cfldrd%c\tmvd%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d100500, 0x0f500f00, "cfldr32%c\tmvfx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c100500, 0x0f500f00, "cfldr32%c\tmvfx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d500500, 0x0f500f00, "cfldr64%c\tmvdx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c500500, 0x0f500f00, "cfldr64%c\tmvdx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d000400, 0x0f500f00, "cfstrs%c\tmvf%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c000400, 0x0f500f00, "cfstrs%c\tmvf%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d400400, 0x0f500f00, "cfstrd%c\tmvd%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c400400, 0x0f500f00, "cfstrd%c\tmvd%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d000500, 0x0f500f00, "cfstr32%c\tmvfx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c000500, 0x0f500f00, "cfstr32%c\tmvfx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0d400500, 0x0f500f00, "cfstr64%c\tmvdx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0c400500, 0x0f500f00, "cfstr64%c\tmvdx%12-15d, %A"}, - {ARM_CEXT_MAVERICK, 0x0e000450, 0x0ff00ff0, "cfmvsr%c\tmvf%16-19d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e100450, 0x0ff00ff0, "cfmvrs%c\t%12-15r, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000410, 0x0ff00ff0, "cfmvdlr%c\tmvd%16-19d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e100410, 0x0ff00ff0, "cfmvrdl%c\t%12-15r, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000430, 0x0ff00ff0, "cfmvdhr%c\tmvd%16-19d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e100430, 0x0ff00fff, "cfmvrdh%c\t%12-15r, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000510, 0x0ff00fff, "cfmv64lr%c\tmvdx%16-19d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e100510, 0x0ff00fff, "cfmvr64l%c\t%12-15r, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000530, 0x0ff00fff, "cfmv64hr%c\tmvdx%16-19d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e100530, 0x0ff00fff, "cfmvr64h%c\t%12-15r, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e200440, 0x0ff00fff, "cfmval32%c\tmvax%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e100440, 0x0ff00fff, "cfmv32al%c\tmvfx%12-15d, mvax%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e200460, 0x0ff00fff, "cfmvam32%c\tmvax%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e100460, 0x0ff00fff, "cfmv32am%c\tmvfx%12-15d, mvax%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e200480, 0x0ff00fff, "cfmvah32%c\tmvax%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e100480, 0x0ff00fff, "cfmv32ah%c\tmvfx%12-15d, mvax%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e2004a0, 0x0ff00fff, "cfmva32%c\tmvax%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e1004a0, 0x0ff00fff, "cfmv32a%c\tmvfx%12-15d, mvax%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e2004c0, 0x0ff00fff, "cfmva64%c\tmvax%12-15d, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e1004c0, 0x0ff00fff, "cfmv64a%c\tmvdx%12-15d, mvax%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e2004e0, 0x0fff0fff, "cfmvsc32%c\tdspsc, mvdx%12-15d"}, - {ARM_CEXT_MAVERICK, 0x0e1004e0, 0x0fff0fff, "cfmv32sc%c\tmvdx%12-15d, dspsc"}, - {ARM_CEXT_MAVERICK, 0x0e000400, 0x0ff00fff, "cfcpys%c\tmvf%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000420, 0x0ff00fff, "cfcpyd%c\tmvd%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000460, 0x0ff00fff, "cfcvtsd%c\tmvd%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000440, 0x0ff00fff, "cfcvtds%c\tmvf%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000480, 0x0ff00fff, "cfcvt32s%c\tmvf%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e0004a0, 0x0ff00fff, "cfcvt32d%c\tmvd%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e0004c0, 0x0ff00fff, "cfcvt64s%c\tmvf%12-15d, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e0004e0, 0x0ff00fff, "cfcvt64d%c\tmvd%12-15d, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e100580, 0x0ff00fff, "cfcvts32%c\tmvfx%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e1005a0, 0x0ff00fff, "cfcvtd32%c\tmvfx%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e1005c0, 0x0ff00fff, "cftruncs32%c\tmvfx%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e1005e0, 0x0ff00fff, "cftruncd32%c\tmvfx%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e000550, 0x0ff00ff0, "cfrshl32%c\tmvfx%16-19d, mvfx%0-3d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e000570, 0x0ff00ff0, "cfrshl64%c\tmvdx%16-19d, mvdx%0-3d, %12-15r"}, - {ARM_CEXT_MAVERICK, 0x0e000500, 0x0ff00f10, "cfsh32%c\tmvfx%12-15d, mvfx%16-19d, #%I"}, - {ARM_CEXT_MAVERICK, 0x0e200500, 0x0ff00f10, "cfsh64%c\tmvdx%12-15d, mvdx%16-19d, #%I"}, - {ARM_CEXT_MAVERICK, 0x0e100490, 0x0ff00ff0, "cfcmps%c\t%12-15r, mvf%16-19d, mvf%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e1004b0, 0x0ff00ff0, "cfcmpd%c\t%12-15r, mvd%16-19d, mvd%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100590, 0x0ff00ff0, "cfcmp32%c\t%12-15r, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e1005b0, 0x0ff00ff0, "cfcmp64%c\t%12-15r, mvdx%16-19d, mvdx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e300400, 0x0ff00fff, "cfabss%c\tmvf%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300420, 0x0ff00fff, "cfabsd%c\tmvd%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300440, 0x0ff00fff, "cfnegs%c\tmvf%12-15d, mvf%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300460, 0x0ff00fff, "cfnegd%c\tmvd%12-15d, mvd%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300480, 0x0ff00ff0, "cfadds%c\tmvf%12-15d, mvf%16-19d, mvf%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3004a0, 0x0ff00ff0, "cfaddd%c\tmvd%12-15d, mvd%16-19d, mvd%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3004c0, 0x0ff00ff0, "cfsubs%c\tmvf%12-15d, mvf%16-19d, mvf%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3004e0, 0x0ff00ff0, "cfsubd%c\tmvd%12-15d, mvd%16-19d, mvd%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100400, 0x0ff00ff0, "cfmuls%c\tmvf%12-15d, mvf%16-19d, mvf%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100420, 0x0ff00ff0, "cfmuld%c\tmvd%12-15d, mvd%16-19d, mvd%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e300500, 0x0ff00fff, "cfabs32%c\tmvfx%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300520, 0x0ff00fff, "cfabs64%c\tmvdx%12-15d, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300540, 0x0ff00fff, "cfneg32%c\tmvfx%12-15d, mvfx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300560, 0x0ff00fff, "cfneg64%c\tmvdx%12-15d, mvdx%16-19d"}, - {ARM_CEXT_MAVERICK, 0x0e300580, 0x0ff00ff0, "cfadd32%c\tmvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3005a0, 0x0ff00ff0, "cfadd64%c\tmvdx%12-15d, mvdx%16-19d, mvdx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3005c0, 0x0ff00ff0, "cfsub32%c\tmvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e3005e0, 0x0ff00ff0, "cfsub64%c\tmvdx%12-15d, mvdx%16-19d, mvdx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100500, 0x0ff00ff0, "cfmul32%c\tmvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100520, 0x0ff00ff0, "cfmul64%c\tmvdx%12-15d, mvdx%16-19d, mvdx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100540, 0x0ff00ff0, "cfmac32%c\tmvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100560, 0x0ff00ff0, "cfmsc32%c\tmvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e000600, 0x0ff00f10, "cfmadd32%c\tmvax%5-7d, mvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e100600, 0x0ff00f10, "cfmsub32%c\tmvax%5-7d, mvfx%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e200600, 0x0ff00f10, "cfmadda32%c\tmvax%5-7d, mvax%12-15d, mvfx%16-19d, mvfx%0-3d"}, - {ARM_CEXT_MAVERICK, 0x0e300600, 0x0ff00f10, "cfmsuba32%c\tmvax%5-7d, mvax%12-15d, mvfx%16-19d, mvfx%0-3d"}, - - /* Generic coprocessor instructions */ - {ARM_EXT_V2, 0x0c400000, 0x0ff00000, "mcrr%c\t%8-11d, %4-7d, %12-15r, %16-19r, cr%0-3d"}, - {ARM_EXT_V2, 0x0c500000, 0x0ff00000, "mrrc%c\t%8-11d, %4-7d, %12-15r, %16-19r, cr%0-3d"}, - {ARM_EXT_V2, 0x0e000000, 0x0f000010, "cdp%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ARM_EXT_V2, 0x0e100010, 0x0f100010, "mrc%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ARM_EXT_V2, 0x0e000010, 0x0f100010, "mcr%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ARM_EXT_V2, 0x0c000000, 0x0e100000, "stc%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ARM_EXT_V2, 0x0c100000, 0x0e100000, "ldc%22'l%c\t%8-11d, cr%12-15d, %A"}, - - /* V6 coprocessor instructions */ - {ARM_EXT_V6, 0xfc500000, 0xfff00000, "mrrc2%c\t%8-11d, %4-7d, %12-15r, %16-19r, cr%0-3d"}, - {ARM_EXT_V6, 0xfc400000, 0xfff00000, "mcrr2%c\t%8-11d, %4-7d, %12-15r, %16-19r, cr%0-3d"}, - - /* V5 coprocessor instructions */ - {ARM_EXT_V5, 0xfc100000, 0xfe100000, "ldc2%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ARM_EXT_V5, 0xfc000000, 0xfe100000, "stc2%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ARM_EXT_V5, 0xfe000000, 0xff000010, "cdp2%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ARM_EXT_V5, 0xfe000010, 0xff100010, "mcr2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ARM_EXT_V5, 0xfe100010, 0xff100010, "mrc2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - - {0, 0, 0, 0} -}; - -/* Neon opcode table: This does not encode the top byte -- that is - checked by the print_insn_neon routine, as it depends on whether we are - doing thumb32 or arm32 disassembly. */ - -/* print_insn_neon recognizes the following format control codes: - - %% % - - %c print condition code - %A print v{st,ld}[1234] operands - %B print v{st,ld}[1234] any one operands - %C print v{st,ld}[1234] single->all operands - %D print scalar - %E print vmov, vmvn, vorr, vbic encoded constant - %F print vtbl,vtbx register list - - %r print as an ARM register - %d print the bitfield in decimal - %e print the 2^N - bitfield in decimal - %D print as a NEON D register - %Q print as a NEON Q register - %R print as a NEON D or Q register - %Sn print byte scaled width limited by n - %Tn print short scaled width limited by n - %Un print long scaled width limited by n - - %'c print specified char iff bitfield is all ones - %`c print specified char iff bitfield is all zeroes - %?ab... select from array of values in big endian order */ - -static const struct opcode32 neon_opcodes[] = -{ - /* Extract */ - {FPU_NEON_EXT_V1, 0xf2b00840, 0xffb00850, "vext%c.8\t%12-15,22R, %16-19,7R, %0-3,5R, #%8-11d"}, - {FPU_NEON_EXT_V1, 0xf2b00000, 0xffb00810, "vext%c.8\t%12-15,22R, %16-19,7R, %0-3,5R, #%8-11d"}, - - /* Move data element to all lanes */ - {FPU_NEON_EXT_V1, 0xf3b40c00, 0xffb70f90, "vdup%c.32\t%12-15,22R, %0-3,5D[%19d]"}, - {FPU_NEON_EXT_V1, 0xf3b20c00, 0xffb30f90, "vdup%c.16\t%12-15,22R, %0-3,5D[%18-19d]"}, - {FPU_NEON_EXT_V1, 0xf3b10c00, 0xffb10f90, "vdup%c.8\t%12-15,22R, %0-3,5D[%17-19d]"}, - - /* Table lookup */ - {FPU_NEON_EXT_V1, 0xf3b00800, 0xffb00c50, "vtbl%c.8\t%12-15,22D, %F, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf3b00840, 0xffb00c50, "vtbx%c.8\t%12-15,22D, %F, %0-3,5D"}, - - /* Two registers, miscellaneous */ - {FPU_NEON_EXT_V1, 0xf2880a10, 0xfebf0fd0, "vmovl%c.%24?us8\t%12-15,22Q, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2900a10, 0xfebf0fd0, "vmovl%c.%24?us16\t%12-15,22Q, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2a00a10, 0xfebf0fd0, "vmovl%c.%24?us32\t%12-15,22Q, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf3b00500, 0xffbf0f90, "vcnt%c.8\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00580, 0xffbf0f90, "vmvn%c\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b20000, 0xffbf0f90, "vswp%c\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b20200, 0xffb30fd0, "vmovn%c.i%18-19T2\t%12-15,22D, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf3b20240, 0xffb30fd0, "vqmovun%c.s%18-19T2\t%12-15,22D, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf3b20280, 0xffb30fd0, "vqmovn%c.s%18-19T2\t%12-15,22D, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf3b202c0, 0xffb30fd0, "vqmovn%c.u%18-19T2\t%12-15,22D, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf3b20300, 0xffb30fd0, "vshll%c.i%18-19S2\t%12-15,22Q, %0-3,5D, #%18-19S2"}, - {FPU_NEON_EXT_V1, 0xf3bb0400, 0xffbf0e90, "vrecpe%c.%8?fu%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3bb0480, 0xffbf0e90, "vrsqrte%c.%8?fu%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00000, 0xffb30f90, "vrev64%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00080, 0xffb30f90, "vrev32%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00100, 0xffb30f90, "vrev16%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00400, 0xffb30f90, "vcls%c.s%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00480, 0xffb30f90, "vclz%c.i%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00700, 0xffb30f90, "vqabs%c.s%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00780, 0xffb30f90, "vqneg%c.s%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b20080, 0xffb30f90, "vtrn%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b20100, 0xffb30f90, "vuzp%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b20180, 0xffb30f90, "vzip%c.%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b10000, 0xffb30b90, "vcgt%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R, #0"}, - {FPU_NEON_EXT_V1, 0xf3b10080, 0xffb30b90, "vcge%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R, #0"}, - {FPU_NEON_EXT_V1, 0xf3b10100, 0xffb30b90, "vceq%c.%10?fi%18-19S2\t%12-15,22R, %0-3,5R, #0"}, - {FPU_NEON_EXT_V1, 0xf3b10180, 0xffb30b90, "vcle%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R, #0"}, - {FPU_NEON_EXT_V1, 0xf3b10200, 0xffb30b90, "vclt%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R, #0"}, - {FPU_NEON_EXT_V1, 0xf3b10300, 0xffb30b90, "vabs%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b10380, 0xffb30b90, "vneg%c.%10?fs%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00200, 0xffb30f10, "vpaddl%c.%7?us%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b00600, 0xffb30f10, "vpadal%c.%7?us%18-19S2\t%12-15,22R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3b30600, 0xffb30e10, "vcvt%c.%7-8?usff%18-19Sa.%7-8?ffus%18-19Sa\t%12-15,22R, %0-3,5R"}, - - /* Three registers of the same length */ - {FPU_NEON_EXT_V1, 0xf2000110, 0xffb00f10, "vand%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2100110, 0xffb00f10, "vbic%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2200110, 0xffb00f10, "vorr%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2300110, 0xffb00f10, "vorn%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000110, 0xffb00f10, "veor%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3100110, 0xffb00f10, "vbsl%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3200110, 0xffb00f10, "vbit%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3300110, 0xffb00f10, "vbif%c\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000d00, 0xffa00f10, "vadd%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000d10, 0xffa00f10, "vmla%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000e00, 0xffa00f10, "vceq%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000f00, 0xffa00f10, "vmax%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000f10, 0xffa00f10, "vrecps%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2200d00, 0xffa00f10, "vsub%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2200d10, 0xffa00f10, "vmls%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2200f00, 0xffa00f10, "vmin%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2200f10, 0xffa00f10, "vrsqrts%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000d00, 0xffa00f10, "vpadd%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000d10, 0xffa00f10, "vmul%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000e00, 0xffa00f10, "vcge%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000e10, 0xffa00f10, "vacge%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000f00, 0xffa00f10, "vpmax%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3200d00, 0xffa00f10, "vabd%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3200e00, 0xffa00f10, "vcgt%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3200e10, 0xffa00f10, "vacgt%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3200f00, 0xffa00f10, "vpmin%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000800, 0xff800f10, "vadd%c.i%20-21S3\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000810, 0xff800f10, "vtst%c.%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000900, 0xff800f10, "vmla%c.i%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000b00, 0xff800f10, "vqdmulh%c.s%20-21S6\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000b10, 0xff800f10, "vpadd%c.i%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000800, 0xff800f10, "vsub%c.i%20-21S3\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000810, 0xff800f10, "vceq%c.i%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000900, 0xff800f10, "vmls%c.i%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf3000b00, 0xff800f10, "vqrdmulh%c.s%20-21S6\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000000, 0xfe800f10, "vhadd%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000010, 0xfe800f10, "vqadd%c.%24?us%20-21S3\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000100, 0xfe800f10, "vrhadd%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000200, 0xfe800f10, "vhsub%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000210, 0xfe800f10, "vqsub%c.%24?us%20-21S3\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000300, 0xfe800f10, "vcgt%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000310, 0xfe800f10, "vcge%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000400, 0xfe800f10, "vshl%c.%24?us%20-21S3\t%12-15,22R, %0-3,5R, %16-19,7R"}, - {FPU_NEON_EXT_V1, 0xf2000410, 0xfe800f10, "vqshl%c.%24?us%20-21S3\t%12-15,22R, %0-3,5R, %16-19,7R"}, - {FPU_NEON_EXT_V1, 0xf2000500, 0xfe800f10, "vrshl%c.%24?us%20-21S3\t%12-15,22R, %0-3,5R, %16-19,7R"}, - {FPU_NEON_EXT_V1, 0xf2000510, 0xfe800f10, "vqrshl%c.%24?us%20-21S3\t%12-15,22R, %0-3,5R, %16-19,7R"}, - {FPU_NEON_EXT_V1, 0xf2000600, 0xfe800f10, "vmax%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000610, 0xfe800f10, "vmin%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000700, 0xfe800f10, "vabd%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000710, 0xfe800f10, "vaba%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000910, 0xfe800f10, "vmul%c.%24?pi%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000a00, 0xfe800f10, "vpmax%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - {FPU_NEON_EXT_V1, 0xf2000a10, 0xfe800f10, "vpmin%c.%24?us%20-21S2\t%12-15,22R, %16-19,7R, %0-3,5R"}, - - /* One register and an immediate value */ - {FPU_NEON_EXT_V1, 0xf2800e10, 0xfeb80fb0, "vmov%c.i8\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800e30, 0xfeb80fb0, "vmov%c.i64\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800f10, 0xfeb80fb0, "vmov%c.f32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800810, 0xfeb80db0, "vmov%c.i16\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800830, 0xfeb80db0, "vmvn%c.i16\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800910, 0xfeb80db0, "vorr%c.i16\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800930, 0xfeb80db0, "vbic%c.i16\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800c10, 0xfeb80eb0, "vmov%c.i32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800c30, 0xfeb80eb0, "vmvn%c.i32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800110, 0xfeb809b0, "vorr%c.i32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800130, 0xfeb809b0, "vbic%c.i32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800010, 0xfeb808b0, "vmov%c.i32\t%12-15,22R, %E"}, - {FPU_NEON_EXT_V1, 0xf2800030, 0xfeb808b0, "vmvn%c.i32\t%12-15,22R, %E"}, - - /* Two registers and a shift amount */ - {FPU_NEON_EXT_V1, 0xf2880810, 0xffb80fd0, "vshrn%c.i16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880850, 0xffb80fd0, "vrshrn%c.i16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880810, 0xfeb80fd0, "vqshrun%c.s16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880850, 0xfeb80fd0, "vqrshrun%c.s16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880910, 0xfeb80fd0, "vqshrn%c.%24?us16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880950, 0xfeb80fd0, "vqrshrn%c.%24?us16\t%12-15,22D, %0-3,5Q, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880a10, 0xfeb80fd0, "vshll%c.%24?us8\t%12-15,22D, %0-3,5Q, #%16-18d"}, - {FPU_NEON_EXT_V1, 0xf2900810, 0xffb00fd0, "vshrn%c.i32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900850, 0xffb00fd0, "vrshrn%c.i32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2880510, 0xffb80f90, "vshl%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18d"}, - {FPU_NEON_EXT_V1, 0xf3880410, 0xffb80f90, "vsri%c.8\t%12-15,22R, %0-3,5R, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf3880510, 0xffb80f90, "vsli%c.8\t%12-15,22R, %0-3,5R, #%16-18d"}, - {FPU_NEON_EXT_V1, 0xf3880610, 0xffb80f90, "vqshlu%c.s8\t%12-15,22R, %0-3,5R, #%16-18d"}, - {FPU_NEON_EXT_V1, 0xf2900810, 0xfeb00fd0, "vqshrun%c.s32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900850, 0xfeb00fd0, "vqrshrun%c.s32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900910, 0xfeb00fd0, "vqshrn%c.%24?us32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900950, 0xfeb00fd0, "vqrshrn%c.%24?us32\t%12-15,22D, %0-3,5Q, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900a10, 0xfeb00fd0, "vshll%c.%24?us16\t%12-15,22D, %0-3,5Q, #%16-19d"}, - {FPU_NEON_EXT_V1, 0xf2880010, 0xfeb80f90, "vshr%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880110, 0xfeb80f90, "vsra%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880210, 0xfeb80f90, "vrshr%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880310, 0xfeb80f90, "vrsra%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18e"}, - {FPU_NEON_EXT_V1, 0xf2880710, 0xfeb80f90, "vqshl%c.%24?us8\t%12-15,22R, %0-3,5R, #%16-18d"}, - {FPU_NEON_EXT_V1, 0xf2a00810, 0xffa00fd0, "vshrn%c.i64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00850, 0xffa00fd0, "vrshrn%c.i64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2900510, 0xffb00f90, "vshl%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19d"}, - {FPU_NEON_EXT_V1, 0xf3900410, 0xffb00f90, "vsri%c.16\t%12-15,22R, %0-3,5R, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf3900510, 0xffb00f90, "vsli%c.16\t%12-15,22R, %0-3,5R, #%16-19d"}, - {FPU_NEON_EXT_V1, 0xf3900610, 0xffb00f90, "vqshlu%c.s16\t%12-15,22R, %0-3,5R, #%16-19d"}, - {FPU_NEON_EXT_V1, 0xf2a00a10, 0xfea00fd0, "vshll%c.%24?us32\t%12-15,22D, %0-3,5Q, #%16-20d"}, - {FPU_NEON_EXT_V1, 0xf2900010, 0xfeb00f90, "vshr%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900110, 0xfeb00f90, "vsra%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900210, 0xfeb00f90, "vrshr%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900310, 0xfeb00f90, "vrsra%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19e"}, - {FPU_NEON_EXT_V1, 0xf2900710, 0xfeb00f90, "vqshl%c.%24?us16\t%12-15,22R, %0-3,5R, #%16-19d"}, - {FPU_NEON_EXT_V1, 0xf2800810, 0xfec00fd0, "vqshrun%c.s64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2800850, 0xfec00fd0, "vqrshrun%c.s64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2800910, 0xfec00fd0, "vqshrn%c.%24?us64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2800950, 0xfec00fd0, "vqrshrn%c.%24?us64\t%12-15,22D, %0-3,5Q, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00510, 0xffa00f90, "vshl%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20d"}, - {FPU_NEON_EXT_V1, 0xf3a00410, 0xffa00f90, "vsri%c.32\t%12-15,22R, %0-3,5R, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf3a00510, 0xffa00f90, "vsli%c.32\t%12-15,22R, %0-3,5R, #%16-20d"}, - {FPU_NEON_EXT_V1, 0xf3a00610, 0xffa00f90, "vqshlu%c.s32\t%12-15,22R, %0-3,5R, #%16-20d"}, - {FPU_NEON_EXT_V1, 0xf2a00010, 0xfea00f90, "vshr%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00110, 0xfea00f90, "vsra%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00210, 0xfea00f90, "vrshr%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00310, 0xfea00f90, "vrsra%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20e"}, - {FPU_NEON_EXT_V1, 0xf2a00710, 0xfea00f90, "vqshl%c.%24?us32\t%12-15,22R, %0-3,5R, #%16-20d"}, - {FPU_NEON_EXT_V1, 0xf2800590, 0xff800f90, "vshl%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21d"}, - {FPU_NEON_EXT_V1, 0xf3800490, 0xff800f90, "vsri%c.64\t%12-15,22R, %0-3,5R, #%16-21e"}, - {FPU_NEON_EXT_V1, 0xf3800590, 0xff800f90, "vsli%c.64\t%12-15,22R, %0-3,5R, #%16-21d"}, - {FPU_NEON_EXT_V1, 0xf3800690, 0xff800f90, "vqshlu%c.s64\t%12-15,22R, %0-3,5R, #%16-21d"}, - {FPU_NEON_EXT_V1, 0xf2800090, 0xfe800f90, "vshr%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21e"}, - {FPU_NEON_EXT_V1, 0xf2800190, 0xfe800f90, "vsra%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21e"}, - {FPU_NEON_EXT_V1, 0xf2800290, 0xfe800f90, "vrshr%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21e"}, - {FPU_NEON_EXT_V1, 0xf2800390, 0xfe800f90, "vrsra%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21e"}, - {FPU_NEON_EXT_V1, 0xf2800790, 0xfe800f90, "vqshl%c.%24?us64\t%12-15,22R, %0-3,5R, #%16-21d"}, - {FPU_NEON_EXT_V1, 0xf2a00e10, 0xfea00e90, "vcvt%c.%24,8?usff32.%24,8?ffus32\t%12-15,22R, %0-3,5R, #%16-20e"}, - - /* Three registers of different lengths */ - {FPU_NEON_EXT_V1, 0xf2800e00, 0xfea00f50, "vmull%c.p%20S0\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800400, 0xff800f50, "vaddhn%c.i%20-21T2\t%12-15,22D, %16-19,7Q, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf2800600, 0xff800f50, "vsubhn%c.i%20-21T2\t%12-15,22D, %16-19,7Q, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf2800900, 0xff800f50, "vqdmlal%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800b00, 0xff800f50, "vqdmlsl%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800d00, 0xff800f50, "vqdmull%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf3800400, 0xff800f50, "vraddhn%c.i%20-21T2\t%12-15,22D, %16-19,7Q, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf3800600, 0xff800f50, "vrsubhn%c.i%20-21T2\t%12-15,22D, %16-19,7Q, %0-3,5Q"}, - {FPU_NEON_EXT_V1, 0xf2800000, 0xfe800f50, "vaddl%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800100, 0xfe800f50, "vaddw%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7Q, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800200, 0xfe800f50, "vsubl%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800300, 0xfe800f50, "vsubw%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7Q, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800500, 0xfe800f50, "vabal%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800700, 0xfe800f50, "vabdl%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800800, 0xfe800f50, "vmlal%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800a00, 0xfe800f50, "vmlsl%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - {FPU_NEON_EXT_V1, 0xf2800c00, 0xfe800f50, "vmull%c.%24?us%20-21S2\t%12-15,22Q, %16-19,7D, %0-3,5D"}, - - /* Two registers and a scalar */ - {FPU_NEON_EXT_V1, 0xf2800040, 0xff800f50, "vmla%c.i%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800140, 0xff800f50, "vmla%c.f%20-21Sa\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800340, 0xff800f50, "vqdmlal%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800440, 0xff800f50, "vmls%c.i%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800540, 0xff800f50, "vmls%c.f%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800740, 0xff800f50, "vqdmlsl%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800840, 0xff800f50, "vmul%c.i%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800940, 0xff800f50, "vmul%c.f%20-21Sa\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800b40, 0xff800f50, "vqdmull%c.s%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800c40, 0xff800f50, "vqdmulh%c.s%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800d40, 0xff800f50, "vqrdmulh%c.s%20-21S6\t%12-15,22D, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf3800040, 0xff800f50, "vmla%c.i%20-21S6\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800140, 0xff800f50, "vmla%c.f%20-21Sa\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800440, 0xff800f50, "vmls%c.i%20-21S6\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800540, 0xff800f50, "vmls%c.f%20-21Sa\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800840, 0xff800f50, "vmul%c.i%20-21S6\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800940, 0xff800f50, "vmul%c.f%20-21Sa\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800c40, 0xff800f50, "vqdmulh%c.s%20-21S6\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf3800d40, 0xff800f50, "vqrdmulh%c.s%20-21S6\t%12-15,22Q, %16-19,7Q, %D"}, - {FPU_NEON_EXT_V1, 0xf2800240, 0xfe800f50, "vmlal%c.%24?us%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800640, 0xfe800f50, "vmlsl%c.%24?us%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - {FPU_NEON_EXT_V1, 0xf2800a40, 0xfe800f50, "vmull%c.%24?us%20-21S6\t%12-15,22Q, %16-19,7D, %D"}, - - /* Element and structure load/store */ - {FPU_NEON_EXT_V1, 0xf4a00fc0, 0xffb00fc0, "vld4%c.32\t%C"}, - {FPU_NEON_EXT_V1, 0xf4a00c00, 0xffb00f00, "vld1%c.%6-7S2\t%C"}, - {FPU_NEON_EXT_V1, 0xf4a00d00, 0xffb00f00, "vld2%c.%6-7S2\t%C"}, - {FPU_NEON_EXT_V1, 0xf4a00e00, 0xffb00f00, "vld3%c.%6-7S2\t%C"}, - {FPU_NEON_EXT_V1, 0xf4a00f00, 0xffb00f00, "vld4%c.%6-7S2\t%C"}, - {FPU_NEON_EXT_V1, 0xf4000200, 0xff900f00, "v%21?ls%21?dt1%c.%6-7S3\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000300, 0xff900f00, "v%21?ls%21?dt2%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000400, 0xff900f00, "v%21?ls%21?dt3%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000500, 0xff900f00, "v%21?ls%21?dt3%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000600, 0xff900f00, "v%21?ls%21?dt1%c.%6-7S3\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000700, 0xff900f00, "v%21?ls%21?dt1%c.%6-7S3\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000800, 0xff900f00, "v%21?ls%21?dt2%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000900, 0xff900f00, "v%21?ls%21?dt2%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000a00, 0xff900f00, "v%21?ls%21?dt1%c.%6-7S3\t%A"}, - {FPU_NEON_EXT_V1, 0xf4000000, 0xff900e00, "v%21?ls%21?dt4%c.%6-7S2\t%A"}, - {FPU_NEON_EXT_V1, 0xf4800000, 0xff900300, "v%21?ls%21?dt1%c.%10-11S2\t%B"}, - {FPU_NEON_EXT_V1, 0xf4800100, 0xff900300, "v%21?ls%21?dt2%c.%10-11S2\t%B"}, - {FPU_NEON_EXT_V1, 0xf4800200, 0xff900300, "v%21?ls%21?dt3%c.%10-11S2\t%B"}, - {FPU_NEON_EXT_V1, 0xf4800300, 0xff900300, "v%21?ls%21?dt4%c.%10-11S2\t%B"}, - - {0,0 ,0, 0} -}; - -/* Opcode tables: ARM, 16-bit Thumb, 32-bit Thumb. All three are partially - ordered: they must be searched linearly from the top to obtain a correct - match. */ - -/* print_insn_arm recognizes the following format control codes: - - %% % - - %a print address for ldr/str instruction - %s print address for ldr/str halfword/signextend instruction - %b print branch destination - %c print condition code (always bits 28-31) - %m print register mask for ldm/stm instruction - %o print operand2 (immediate or register + shift) - %p print 'p' iff bits 12-15 are 15 - %t print 't' iff bit 21 set and bit 24 clear - %B print arm BLX(1) destination - %C print the PSR sub type. - %U print barrier type. - %P print address for pli instruction. - - %r print as an ARM register - %d print the bitfield in decimal - %W print the bitfield plus one in decimal - %x print the bitfield in hex - %X print the bitfield as 1 hex digit without leading "0x" - - %'c print specified char iff bitfield is all ones - %`c print specified char iff bitfield is all zeroes - %?ab... select from array of values in big endian order - - %e print arm SMI operand (bits 0..7,8..19). - %E print the LSB and WIDTH fields of a BFI or BFC instruction. - %V print the 16-bit immediate field of a MOVT or MOVW instruction. */ - -static const struct opcode32 arm_opcodes[] = -{ - /* ARM instructions. */ - {ARM_EXT_V1, 0xe1a00000, 0xffffffff, "nop\t\t\t(mov r0,r0)"}, - {ARM_EXT_V4T | ARM_EXT_V5, 0x012FFF10, 0x0ffffff0, "bx%c\t%0-3r"}, - {ARM_EXT_V2, 0x00000090, 0x0fe000f0, "mul%20's%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V2, 0x00200090, 0x0fe000f0, "mla%20's%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V2S, 0x01000090, 0x0fb00ff0, "swp%22'b%c\t%12-15r, %0-3r, [%16-19r]"}, - {ARM_EXT_V3M, 0x00800090, 0x0fa000f0, "%22?sumull%20's%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V3M, 0x00a00090, 0x0fa000f0, "%22?sumlal%20's%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - - /* IDIV instructions. */ - {ARM_EXT_DIV, 0x0710f010, 0x0ff0f0f0, "sdiv%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_DIV, 0x0730f010, 0x0ff0f0f0, "udiv%c\t%16-19r, %0-3r, %8-11r"}, - - /* V7 instructions. */ - {ARM_EXT_V7, 0xf450f000, 0xfd70f000, "pli\t%P"}, - {ARM_EXT_V7, 0x0320f0f0, 0x0ffffff0, "dbg%c\t#%0-3d"}, - {ARM_EXT_V7, 0xf57ff050, 0xfffffff0, "dmb\t%U"}, - {ARM_EXT_V7, 0xf57ff040, 0xfffffff0, "dsb\t%U"}, - {ARM_EXT_V7, 0xf57ff060, 0xfffffff0, "isb\t%U"}, - - /* ARM V6T2 instructions. */ - {ARM_EXT_V6T2, 0x07c0001f, 0x0fe0007f, "bfc%c\t%12-15r, %E"}, - {ARM_EXT_V6T2, 0x07c00010, 0x0fe00070, "bfi%c\t%12-15r, %0-3r, %E"}, - {ARM_EXT_V6T2, 0x00600090, 0x0ff000f0, "mls%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6T2, 0x006000b0, 0x0f7000f0, "strht%c\t%12-15r, %s"}, - {ARM_EXT_V6T2, 0x00300090, 0x0f300090, "ldr%6's%5?hbt%c\t%12-15r, %s"}, - {ARM_EXT_V6T2, 0x03000000, 0x0ff00000, "movw%c\t%12-15r, %V"}, - {ARM_EXT_V6T2, 0x03400000, 0x0ff00000, "movt%c\t%12-15r, %V"}, - {ARM_EXT_V6T2, 0x06ff0f30, 0x0fff0ff0, "rbit%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6T2, 0x07a00050, 0x0fa00070, "%22?usbfx%c\t%12-15r, %0-3r, #%7-11d, #%16-20W"}, - - /* ARM V6Z instructions. */ - {ARM_EXT_V6Z, 0x01600070, 0x0ff000f0, "smc%c\t%e"}, - - /* ARM V6K instructions. */ - {ARM_EXT_V6K, 0xf57ff01f, 0xffffffff, "clrex"}, - {ARM_EXT_V6K, 0x01d00f9f, 0x0ff00fff, "ldrexb%c\t%12-15r, [%16-19r]"}, - {ARM_EXT_V6K, 0x01b00f9f, 0x0ff00fff, "ldrexd%c\t%12-15r, [%16-19r]"}, - {ARM_EXT_V6K, 0x01f00f9f, 0x0ff00fff, "ldrexh%c\t%12-15r, [%16-19r]"}, - {ARM_EXT_V6K, 0x01c00f90, 0x0ff00ff0, "strexb%c\t%12-15r, %0-3r, [%16-19r]"}, - {ARM_EXT_V6K, 0x01a00f90, 0x0ff00ff0, "strexd%c\t%12-15r, %0-3r, [%16-19r]"}, - {ARM_EXT_V6K, 0x01e00f90, 0x0ff00ff0, "strexh%c\t%12-15r, %0-3r, [%16-19r]"}, - - /* ARM V6K NOP hints. */ - {ARM_EXT_V6K, 0x0320f001, 0x0fffffff, "yield%c"}, - {ARM_EXT_V6K, 0x0320f002, 0x0fffffff, "wfe%c"}, - {ARM_EXT_V6K, 0x0320f003, 0x0fffffff, "wfi%c"}, - {ARM_EXT_V6K, 0x0320f004, 0x0fffffff, "sev%c"}, - {ARM_EXT_V6K, 0x0320f000, 0x0fffff00, "nop%c\t{%0-7d}"}, - - /* ARM V6 instructions. */ - {ARM_EXT_V6, 0xf1080000, 0xfffffe3f, "cpsie\t%8'a%7'i%6'f"}, - {ARM_EXT_V6, 0xf10a0000, 0xfffffe20, "cpsie\t%8'a%7'i%6'f,#%0-4d"}, - {ARM_EXT_V6, 0xf10C0000, 0xfffffe3f, "cpsid\t%8'a%7'i%6'f"}, - {ARM_EXT_V6, 0xf10e0000, 0xfffffe20, "cpsid\t%8'a%7'i%6'f,#%0-4d"}, - {ARM_EXT_V6, 0xf1000000, 0xfff1fe20, "cps\t#%0-4d"}, - {ARM_EXT_V6, 0x06800010, 0x0ff00ff0, "pkhbt%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06800010, 0x0ff00070, "pkhbt%c\t%12-15r, %16-19r, %0-3r, lsl #%7-11d"}, - {ARM_EXT_V6, 0x06800050, 0x0ff00ff0, "pkhtb%c\t%12-15r, %16-19r, %0-3r, asr #32"}, - {ARM_EXT_V6, 0x06800050, 0x0ff00070, "pkhtb%c\t%12-15r, %16-19r, %0-3r, asr #%7-11d"}, - {ARM_EXT_V6, 0x01900f9f, 0x0ff00fff, "ldrex%c\tr%12-15d, [%16-19r]"}, - {ARM_EXT_V6, 0x06200f10, 0x0ff00ff0, "qadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06200f90, 0x0ff00ff0, "qadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06200f30, 0x0ff00ff0, "qaddsubx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06200f70, 0x0ff00ff0, "qsub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06200ff0, 0x0ff00ff0, "qsub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06200f50, 0x0ff00ff0, "qsubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100f10, 0x0ff00ff0, "sadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100f90, 0x0ff00ff0, "sadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100f30, 0x0ff00ff0, "saddaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300f10, 0x0ff00ff0, "shadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300f90, 0x0ff00ff0, "shadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300f30, 0x0ff00ff0, "shaddsubx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300f70, 0x0ff00ff0, "shsub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300ff0, 0x0ff00ff0, "shsub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06300f50, 0x0ff00ff0, "shsubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100f70, 0x0ff00ff0, "ssub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100ff0, 0x0ff00ff0, "ssub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06100f50, 0x0ff00ff0, "ssubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500f10, 0x0ff00ff0, "uadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500f90, 0x0ff00ff0, "uadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500f30, 0x0ff00ff0, "uaddsubx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700f10, 0x0ff00ff0, "uhadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700f90, 0x0ff00ff0, "uhadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700f30, 0x0ff00ff0, "uhaddsubx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700f70, 0x0ff00ff0, "uhsub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700ff0, 0x0ff00ff0, "uhsub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06700f50, 0x0ff00ff0, "uhsubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600f10, 0x0ff00ff0, "uqadd16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600f90, 0x0ff00ff0, "uqadd8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600f30, 0x0ff00ff0, "uqaddsubx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600f70, 0x0ff00ff0, "uqsub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600ff0, 0x0ff00ff0, "uqsub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06600f50, 0x0ff00ff0, "uqsubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500f70, 0x0ff00ff0, "usub16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500ff0, 0x0ff00ff0, "usub8%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06500f50, 0x0ff00ff0, "usubaddx%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06bf0f30, 0x0fff0ff0, "rev%c\t\%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06bf0fb0, 0x0fff0ff0, "rev16%c\t\%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06ff0fb0, 0x0fff0ff0, "revsh%c\t\%12-15r, %0-3r"}, - {ARM_EXT_V6, 0xf8100a00, 0xfe50ffff, "rfe%23?id%24?ba\t\%16-19r%21'!"}, - {ARM_EXT_V6, 0x06bf0070, 0x0fff0ff0, "sxth%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06bf0470, 0x0fff0ff0, "sxth%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06bf0870, 0x0fff0ff0, "sxth%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06bf0c70, 0x0fff0ff0, "sxth%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x068f0070, 0x0fff0ff0, "sxtb16%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x068f0470, 0x0fff0ff0, "sxtb16%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x068f0870, 0x0fff0ff0, "sxtb16%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x068f0c70, 0x0fff0ff0, "sxtb16%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06af0070, 0x0fff0ff0, "sxtb%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06af0470, 0x0fff0ff0, "sxtb%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06af0870, 0x0fff0ff0, "sxtb%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06af0c70, 0x0fff0ff0, "sxtb%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06ff0070, 0x0fff0ff0, "uxth%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06ff0470, 0x0fff0ff0, "uxth%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06ff0870, 0x0fff0ff0, "uxth%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06ff0c70, 0x0fff0ff0, "uxth%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06cf0070, 0x0fff0ff0, "uxtb16%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06cf0470, 0x0fff0ff0, "uxtb16%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06cf0870, 0x0fff0ff0, "uxtb16%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06cf0c70, 0x0fff0ff0, "uxtb16%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06ef0070, 0x0fff0ff0, "uxtb%c\t%12-15r, %0-3r"}, - {ARM_EXT_V6, 0x06ef0470, 0x0fff0ff0, "uxtb%c\t%12-15r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06ef0870, 0x0fff0ff0, "uxtb%c\t%12-15r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06ef0c70, 0x0fff0ff0, "uxtb%c\t%12-15r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06b00070, 0x0ff00ff0, "sxtah%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06b00470, 0x0ff00ff0, "sxtah%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06b00870, 0x0ff00ff0, "sxtah%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06b00c70, 0x0ff00ff0, "sxtah%c\t%12-15r, %16-19r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06800070, 0x0ff00ff0, "sxtab16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06800470, 0x0ff00ff0, "sxtab16%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06800870, 0x0ff00ff0, "sxtab16%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06800c70, 0x0ff00ff0, "sxtab16%c\t%12-15r, %16-19r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06a00070, 0x0ff00ff0, "sxtab%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06a00470, 0x0ff00ff0, "sxtab%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06a00870, 0x0ff00ff0, "sxtab%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06a00c70, 0x0ff00ff0, "sxtab%c\t%12-15r, %16-19r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06f00070, 0x0ff00ff0, "uxtah%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06f00470, 0x0ff00ff0, "uxtah%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06f00870, 0x0ff00ff0, "uxtah%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06f00c70, 0x0ff00ff0, "uxtah%c\t%12-15r, %16-19r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06c00070, 0x0ff00ff0, "uxtab16%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06c00470, 0x0ff00ff0, "uxtab16%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06c00870, 0x0ff00ff0, "uxtab16%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06c00c70, 0x0ff00ff0, "uxtab16%c\t%12-15r, %16-19r, %0-3r, ROR #24"}, - {ARM_EXT_V6, 0x06e00070, 0x0ff00ff0, "uxtab%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0x06e00470, 0x0ff00ff0, "uxtab%c\t%12-15r, %16-19r, %0-3r, ror #8"}, - {ARM_EXT_V6, 0x06e00870, 0x0ff00ff0, "uxtab%c\t%12-15r, %16-19r, %0-3r, ror #16"}, - {ARM_EXT_V6, 0x06e00c70, 0x0ff00ff0, "uxtab%c\t%12-15r, %16-19r, %0-3r, ror #24"}, - {ARM_EXT_V6, 0x06800fb0, 0x0ff00ff0, "sel%c\t%12-15r, %16-19r, %0-3r"}, - {ARM_EXT_V6, 0xf1010000, 0xfffffc00, "setend\t%9?ble"}, - {ARM_EXT_V6, 0x0700f010, 0x0ff0f0d0, "smuad%5'x%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x0700f050, 0x0ff0f0d0, "smusd%5'x%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x07000010, 0x0ff000d0, "smlad%5'x%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6, 0x07400010, 0x0ff000d0, "smlald%5'x%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x07000050, 0x0ff000d0, "smlsd%5'x%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6, 0x07400050, 0x0ff000d0, "smlsld%5'x%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x0750f010, 0x0ff0f0d0, "smmul%5'r%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x07500010, 0x0ff000d0, "smmla%5'r%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6, 0x075000d0, 0x0ff000d0, "smmls%5'r%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6, 0xf84d0500, 0xfe5fffe0, "srs%23?id%24?ba\t%16-19r%21'!, #%0-4d"}, - {ARM_EXT_V6, 0x06a00010, 0x0fe00ff0, "ssat%c\t%12-15r, #%16-20W, %0-3r"}, - {ARM_EXT_V6, 0x06a00010, 0x0fe00070, "ssat%c\t%12-15r, #%16-20W, %0-3r, lsl #%7-11d"}, - {ARM_EXT_V6, 0x06a00050, 0x0fe00070, "ssat%c\t%12-15r, #%16-20W, %0-3r, asr #%7-11d"}, - {ARM_EXT_V6, 0x06a00f30, 0x0ff00ff0, "ssat16%c\t%12-15r, #%16-19W, %0-3r"}, - {ARM_EXT_V6, 0x01800f90, 0x0ff00ff0, "strex%c\t%12-15r, %0-3r, [%16-19r]"}, - {ARM_EXT_V6, 0x00400090, 0x0ff000f0, "umaal%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x0780f010, 0x0ff0f0f0, "usad8%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V6, 0x07800010, 0x0ff000f0, "usada8%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V6, 0x06e00010, 0x0fe00ff0, "usat%c\t%12-15r, #%16-20d, %0-3r"}, - {ARM_EXT_V6, 0x06e00010, 0x0fe00070, "usat%c\t%12-15r, #%16-20d, %0-3r, lsl #%7-11d"}, - {ARM_EXT_V6, 0x06e00050, 0x0fe00070, "usat%c\t%12-15r, #%16-20d, %0-3r, asr #%7-11d"}, - {ARM_EXT_V6, 0x06e00f30, 0x0ff00ff0, "usat16%c\t%12-15r, #%16-19d, %0-3r"}, - - /* V5J instruction. */ - {ARM_EXT_V5J, 0x012fff20, 0x0ffffff0, "bxj%c\t%0-3r"}, - - /* V5 Instructions. */ - {ARM_EXT_V5, 0xe1200070, 0xfff000f0, "bkpt\t0x%16-19X%12-15X%8-11X%0-3X"}, - {ARM_EXT_V5, 0xfa000000, 0xfe000000, "blx\t%B"}, - {ARM_EXT_V5, 0x012fff30, 0x0ffffff0, "blx%c\t%0-3r"}, - {ARM_EXT_V5, 0x016f0f10, 0x0fff0ff0, "clz%c\t%12-15r, %0-3r"}, - - /* V5E "El Segundo" Instructions. */ - {ARM_EXT_V5E, 0x000000d0, 0x0e1000f0, "ldrd%c\t%12-15r, %s"}, - {ARM_EXT_V5E, 0x000000f0, 0x0e1000f0, "strd%c\t%12-15r, %s"}, - {ARM_EXT_V5E, 0xf450f000, 0xfc70f000, "pld\t%a"}, - {ARM_EXT_V5ExP, 0x01000080, 0x0ff000f0, "smlabb%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V5ExP, 0x010000a0, 0x0ff000f0, "smlatb%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V5ExP, 0x010000c0, 0x0ff000f0, "smlabt%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V5ExP, 0x010000e0, 0x0ff000f0, "smlatt%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - - {ARM_EXT_V5ExP, 0x01200080, 0x0ff000f0, "smlawb%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - {ARM_EXT_V5ExP, 0x012000c0, 0x0ff000f0, "smlawt%c\t%16-19r, %0-3r, %8-11r, %12-15r"}, - - {ARM_EXT_V5ExP, 0x01400080, 0x0ff000f0, "smlalbb%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x014000a0, 0x0ff000f0, "smlaltb%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x014000c0, 0x0ff000f0, "smlalbt%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x014000e0, 0x0ff000f0, "smlaltt%c\t%12-15r, %16-19r, %0-3r, %8-11r"}, - - {ARM_EXT_V5ExP, 0x01600080, 0x0ff0f0f0, "smulbb%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x016000a0, 0x0ff0f0f0, "smultb%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x016000c0, 0x0ff0f0f0, "smulbt%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x016000e0, 0x0ff0f0f0, "smultt%c\t%16-19r, %0-3r, %8-11r"}, - - {ARM_EXT_V5ExP, 0x012000a0, 0x0ff0f0f0, "smulwb%c\t%16-19r, %0-3r, %8-11r"}, - {ARM_EXT_V5ExP, 0x012000e0, 0x0ff0f0f0, "smulwt%c\t%16-19r, %0-3r, %8-11r"}, - - {ARM_EXT_V5ExP, 0x01000050, 0x0ff00ff0, "qadd%c\t%12-15r, %0-3r, %16-19r"}, - {ARM_EXT_V5ExP, 0x01400050, 0x0ff00ff0, "qdadd%c\t%12-15r, %0-3r, %16-19r"}, - {ARM_EXT_V5ExP, 0x01200050, 0x0ff00ff0, "qsub%c\t%12-15r, %0-3r, %16-19r"}, - {ARM_EXT_V5ExP, 0x01600050, 0x0ff00ff0, "qdsub%c\t%12-15r, %0-3r, %16-19r"}, - - /* ARM Instructions. */ - {ARM_EXT_V1, 0x00000090, 0x0e100090, "str%6's%5?hb%c\t%12-15r, %s"}, - {ARM_EXT_V1, 0x00100090, 0x0e100090, "ldr%6's%5?hb%c\t%12-15r, %s"}, - {ARM_EXT_V1, 0x00000000, 0x0de00000, "and%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00200000, 0x0de00000, "eor%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00400000, 0x0de00000, "sub%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00600000, 0x0de00000, "rsb%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00800000, 0x0de00000, "add%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00a00000, 0x0de00000, "adc%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00c00000, 0x0de00000, "sbc%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x00e00000, 0x0de00000, "rsc%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V3, 0x0120f000, 0x0db0f000, "msr%c\t%22?SCPSR%C, %o"}, - {ARM_EXT_V3, 0x010f0000, 0x0fbf0fff, "mrs%c\t%12-15r, %22?SCPSR"}, - {ARM_EXT_V1, 0x01000000, 0x0de00000, "tst%p%c\t%16-19r, %o"}, - {ARM_EXT_V1, 0x01200000, 0x0de00000, "teq%p%c\t%16-19r, %o"}, - {ARM_EXT_V1, 0x01400000, 0x0de00000, "cmp%p%c\t%16-19r, %o"}, - {ARM_EXT_V1, 0x01600000, 0x0de00000, "cmn%p%c\t%16-19r, %o"}, - {ARM_EXT_V1, 0x01800000, 0x0de00000, "orr%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x03a00000, 0x0fef0000, "mov%20's%c\t%12-15r, %o"}, - {ARM_EXT_V1, 0x01a00000, 0x0def0ff0, "mov%20's%c\t%12-15r, %0-3r"}, - {ARM_EXT_V1, 0x01a00000, 0x0def0060, "lsl%20's%c\t%12-15r, %q"}, - {ARM_EXT_V1, 0x01a00020, 0x0def0060, "lsr%20's%c\t%12-15r, %q"}, - {ARM_EXT_V1, 0x01a00040, 0x0def0060, "asr%20's%c\t%12-15r, %q"}, - {ARM_EXT_V1, 0x01a00060, 0x0def0ff0, "rrx%20's%c\t%12-15r, %0-3r"}, - {ARM_EXT_V1, 0x01a00060, 0x0def0060, "ror%20's%c\t%12-15r, %q"}, - {ARM_EXT_V1, 0x01c00000, 0x0de00000, "bic%20's%c\t%12-15r, %16-19r, %o"}, - {ARM_EXT_V1, 0x01e00000, 0x0de00000, "mvn%20's%c\t%12-15r, %o"}, - {ARM_EXT_V1, 0x052d0004, 0x0fff0fff, "push%c\t{%12-15r}\t\t; (str%c %12-15r, %a)"}, - {ARM_EXT_V1, 0x04000000, 0x0e100000, "str%22'b%t%c\t%12-15r, %a"}, - {ARM_EXT_V1, 0x06000000, 0x0e100ff0, "str%22'b%t%c\t%12-15r, %a"}, - {ARM_EXT_V1, 0x04000000, 0x0c100010, "str%22'b%t%c\t%12-15r, %a"}, - {ARM_EXT_V1, 0x06000010, 0x0e000010, "undefined"}, - {ARM_EXT_V1, 0x049d0004, 0x0fff0fff, "pop%c\t{%12-15r}\t\t; (ldr%c %12-15r, %a)"}, - {ARM_EXT_V1, 0x04100000, 0x0c100000, "ldr%22'b%t%c\t%12-15r, %a"}, - {ARM_EXT_V1, 0x092d0000, 0x0fff0000, "push%c\t%m"}, - {ARM_EXT_V1, 0x08800000, 0x0ff00000, "stm%c\t%16-19r%21'!, %m%22'^"}, - {ARM_EXT_V1, 0x08000000, 0x0e100000, "stm%23?id%24?ba%c\t%16-19r%21'!, %m%22'^"}, - {ARM_EXT_V1, 0x08bd0000, 0x0fff0000, "pop%c\t%m"}, - {ARM_EXT_V1, 0x08900000, 0x0f900000, "ldm%c\t%16-19r%21'!, %m%22'^"}, - {ARM_EXT_V1, 0x08100000, 0x0e100000, "ldm%23?id%24?ba%c\t%16-19r%21'!, %m%22'^"}, - {ARM_EXT_V1, 0x0a000000, 0x0e000000, "b%24'l%c\t%b"}, - {ARM_EXT_V1, 0x0f000000, 0x0f000000, "svc%c\t%0-23x"}, - - /* The rest. */ - {ARM_EXT_V1, 0x00000000, 0x00000000, "undefined instruction %0-31x"}, - {0, 0x00000000, 0x00000000, 0} -}; - -/* print_insn_thumb16 recognizes the following format control codes: - - %S print Thumb register (bits 3..5 as high number if bit 6 set) - %D print Thumb register (bits 0..2 as high number if bit 7 set) - %I print bitfield as a signed decimal - (top bit of range being the sign bit) - %N print Thumb register mask (with LR) - %O print Thumb register mask (with PC) - %M print Thumb register mask - %b print CZB's 6-bit unsigned branch destination - %s print Thumb right-shift immediate (6..10; 0 == 32). - %c print the condition code - %C print the condition code, or "s" if not conditional - %x print warning if conditional an not at end of IT block" - %X print "\t; unpredictable " if conditional - %I print IT instruction suffix and operands - %r print bitfield as an ARM register - %d print bitfield as a decimal - %H print (bitfield * 2) as a decimal - %W print (bitfield * 4) as a decimal - %a print (bitfield * 4) as a pc-rel offset + decoded symbol - %B print Thumb branch destination (signed displacement) - %c print bitfield as a condition code - %'c print specified char iff bit is one - %?ab print a if bit is one else print b. */ - -static const struct opcode16 thumb_opcodes[] = -{ - /* Thumb instructions. */ - - /* ARM V6K no-argument instructions. */ - {ARM_EXT_V6K, 0xbf00, 0xffff, "nop%c"}, - {ARM_EXT_V6K, 0xbf10, 0xffff, "yield%c"}, - {ARM_EXT_V6K, 0xbf20, 0xffff, "wfe%c"}, - {ARM_EXT_V6K, 0xbf30, 0xffff, "wfi%c"}, - {ARM_EXT_V6K, 0xbf40, 0xffff, "sev%c"}, - {ARM_EXT_V6K, 0xbf00, 0xff0f, "nop%c\t{%4-7d}"}, - - /* ARM V6T2 instructions. */ - {ARM_EXT_V6T2, 0xb900, 0xfd00, "cbnz\t%0-2r, %b%X"}, - {ARM_EXT_V6T2, 0xb100, 0xfd00, "cbz\t%0-2r, %b%X"}, - {ARM_EXT_V6T2, 0xbf00, 0xff00, "it%I%X"}, - - /* ARM V6. */ - {ARM_EXT_V6, 0xb660, 0xfff8, "cpsie\t%2'a%1'i%0'f%X"}, - {ARM_EXT_V6, 0xb670, 0xfff8, "cpsid\t%2'a%1'i%0'f%X"}, - {ARM_EXT_V6, 0x4600, 0xffc0, "mov%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xba00, 0xffc0, "rev%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xba40, 0xffc0, "rev16%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xbac0, 0xffc0, "revsh%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xb650, 0xfff7, "setend\t%3?ble%X"}, - {ARM_EXT_V6, 0xb200, 0xffc0, "sxth%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xb240, 0xffc0, "sxtb%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xb280, 0xffc0, "uxth%c\t%0-2r, %3-5r"}, - {ARM_EXT_V6, 0xb2c0, 0xffc0, "uxtb%c\t%0-2r, %3-5r"}, - - /* ARM V5 ISA extends Thumb. */ - {ARM_EXT_V5T, 0xbe00, 0xff00, "bkpt\t%0-7x"}, /* Is always unconditional. */ - /* This is BLX(2). BLX(1) is a 32-bit instruction. */ - {ARM_EXT_V5T, 0x4780, 0xff87, "blx%c\t%3-6r%x"}, /* note: 4 bit register number. */ - /* ARM V4T ISA (Thumb v1). */ - {ARM_EXT_V4T, 0x46C0, 0xFFFF, "nop%c\t\t\t(mov r8, r8)"}, - /* Format 4. */ - {ARM_EXT_V4T, 0x4000, 0xFFC0, "and%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4040, 0xFFC0, "eor%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4080, 0xFFC0, "lsl%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x40C0, 0xFFC0, "lsr%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4100, 0xFFC0, "asr%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4140, 0xFFC0, "adc%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4180, 0xFFC0, "sbc%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x41C0, 0xFFC0, "ror%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4200, 0xFFC0, "tst%c\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4240, 0xFFC0, "neg%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4280, 0xFFC0, "cmp%c\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x42C0, 0xFFC0, "cmn%c\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4300, 0xFFC0, "orr%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4340, 0xFFC0, "mul%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x4380, 0xFFC0, "bic%C\t%0-2r, %3-5r"}, - {ARM_EXT_V4T, 0x43C0, 0xFFC0, "mvn%C\t%0-2r, %3-5r"}, - /* format 13 */ - {ARM_EXT_V4T, 0xB000, 0xFF80, "add%c\tsp, #%0-6W"}, - {ARM_EXT_V4T, 0xB080, 0xFF80, "sub%c\tsp, #%0-6W"}, - /* format 5 */ - {ARM_EXT_V4T, 0x4700, 0xFF80, "bx%c\t%S%x"}, - {ARM_EXT_V4T, 0x4400, 0xFF00, "add%c\t%D, %S"}, - {ARM_EXT_V4T, 0x4500, 0xFF00, "cmp%c\t%D, %S"}, - {ARM_EXT_V4T, 0x4600, 0xFF00, "mov%c\t%D, %S"}, - /* format 14 */ - {ARM_EXT_V4T, 0xB400, 0xFE00, "push%c\t%N"}, - {ARM_EXT_V4T, 0xBC00, 0xFE00, "pop%c\t%O"}, - /* format 2 */ - {ARM_EXT_V4T, 0x1800, 0xFE00, "add%C\t%0-2r, %3-5r, %6-8r"}, - {ARM_EXT_V4T, 0x1A00, 0xFE00, "sub%C\t%0-2r, %3-5r, %6-8r"}, - {ARM_EXT_V4T, 0x1C00, 0xFE00, "add%C\t%0-2r, %3-5r, #%6-8d"}, - {ARM_EXT_V4T, 0x1E00, 0xFE00, "sub%C\t%0-2r, %3-5r, #%6-8d"}, - /* format 8 */ - {ARM_EXT_V4T, 0x5200, 0xFE00, "strh%c\t%0-2r, [%3-5r, %6-8r]"}, - {ARM_EXT_V4T, 0x5A00, 0xFE00, "ldrh%c\t%0-2r, [%3-5r, %6-8r]"}, - {ARM_EXT_V4T, 0x5600, 0xF600, "ldrs%11?hb%c\t%0-2r, [%3-5r, %6-8r]"}, - /* format 7 */ - {ARM_EXT_V4T, 0x5000, 0xFA00, "str%10'b%c\t%0-2r, [%3-5r, %6-8r]"}, - {ARM_EXT_V4T, 0x5800, 0xFA00, "ldr%10'b%c\t%0-2r, [%3-5r, %6-8r]"}, - /* format 1 */ - {ARM_EXT_V4T, 0x0000, 0xF800, "lsl%C\t%0-2r, %3-5r, #%6-10d"}, - {ARM_EXT_V4T, 0x0800, 0xF800, "lsr%C\t%0-2r, %3-5r, %s"}, - {ARM_EXT_V4T, 0x1000, 0xF800, "asr%C\t%0-2r, %3-5r, %s"}, - /* format 3 */ - {ARM_EXT_V4T, 0x2000, 0xF800, "mov%C\t%8-10r, #%0-7d"}, - {ARM_EXT_V4T, 0x2800, 0xF800, "cmp%c\t%8-10r, #%0-7d"}, - {ARM_EXT_V4T, 0x3000, 0xF800, "add%C\t%8-10r, #%0-7d"}, - {ARM_EXT_V4T, 0x3800, 0xF800, "sub%C\t%8-10r, #%0-7d"}, - /* format 6 */ - {ARM_EXT_V4T, 0x4800, 0xF800, "ldr%c\t%8-10r, [pc, #%0-7W]\t(%0-7a)"}, /* TODO: Disassemble PC relative "LDR rD,=" */ - /* format 9 */ - {ARM_EXT_V4T, 0x6000, 0xF800, "str%c\t%0-2r, [%3-5r, #%6-10W]"}, - {ARM_EXT_V4T, 0x6800, 0xF800, "ldr%c\t%0-2r, [%3-5r, #%6-10W]"}, - {ARM_EXT_V4T, 0x7000, 0xF800, "strb%c\t%0-2r, [%3-5r, #%6-10d]"}, - {ARM_EXT_V4T, 0x7800, 0xF800, "ldrb%c\t%0-2r, [%3-5r, #%6-10d]"}, - /* format 10 */ - {ARM_EXT_V4T, 0x8000, 0xF800, "strh%c\t%0-2r, [%3-5r, #%6-10H]"}, - {ARM_EXT_V4T, 0x8800, 0xF800, "ldrh%c\t%0-2r, [%3-5r, #%6-10H]"}, - /* format 11 */ - {ARM_EXT_V4T, 0x9000, 0xF800, "str%c\t%8-10r, [sp, #%0-7W]"}, - {ARM_EXT_V4T, 0x9800, 0xF800, "ldr%c\t%8-10r, [sp, #%0-7W]"}, - /* format 12 */ - {ARM_EXT_V4T, 0xA000, 0xF800, "add%c\t%8-10r, pc, #%0-7W\t(adr %8-10r, %0-7a)"}, - {ARM_EXT_V4T, 0xA800, 0xF800, "add%c\t%8-10r, sp, #%0-7W"}, - /* format 15 */ - {ARM_EXT_V4T, 0xC000, 0xF800, "stmia%c\t%8-10r!, %M"}, - {ARM_EXT_V4T, 0xC800, 0xF800, "ldmia%c\t%8-10r!, %M"}, - /* format 17 */ - {ARM_EXT_V4T, 0xDF00, 0xFF00, "svc%c\t%0-7d"}, - /* format 16 */ - {ARM_EXT_V4T, 0xDE00, 0xFE00, "undefined"}, - {ARM_EXT_V4T, 0xD000, 0xF000, "b%8-11c.n\t%0-7B%X"}, - /* format 18 */ - {ARM_EXT_V4T, 0xE000, 0xF800, "b%c.n\t%0-10B%x"}, - - /* The E800 .. FFFF range is unconditionally redirected to the - 32-bit table, because even in pre-V6T2 ISAs, BL and BLX(1) pairs - are processed via that table. Thus, we can never encounter a - bare "second half of BL/BLX(1)" instruction here. */ - {ARM_EXT_V1, 0x0000, 0x0000, "undefined"}, - {0, 0, 0, 0} -}; - -/* Thumb32 opcodes use the same table structure as the ARM opcodes. - We adopt the convention that hw1 is the high 16 bits of .value and - .mask, hw2 the low 16 bits. - - print_insn_thumb32 recognizes the following format control codes: - - %% % - - %I print a 12-bit immediate from hw1[10],hw2[14:12,7:0] - %M print a modified 12-bit immediate (same location) - %J print a 16-bit immediate from hw1[3:0,10],hw2[14:12,7:0] - %K print a 16-bit immediate from hw2[3:0],hw1[3:0],hw2[11:4] - %S print a possibly-shifted Rm - - %a print the address of a plain load/store - %w print the width and signedness of a core load/store - %m print register mask for ldm/stm - - %E print the lsb and width fields of a bfc/bfi instruction - %F print the lsb and width fields of a sbfx/ubfx instruction - %b print a conditional branch offset - %B print an unconditional branch offset - %s print the shift field of an SSAT instruction - %R print the rotation field of an SXT instruction - %U print barrier type. - %P print address for pli instruction. - %c print the condition code - %x print warning if conditional an not at end of IT block" - %X print "\t; unpredictable " if conditional - - %d print bitfield in decimal - %W print bitfield*4 in decimal - %r print bitfield as an ARM register - %c print bitfield as a condition code - - %'c print specified char iff bitfield is all ones - %`c print specified char iff bitfield is all zeroes - %?ab... select from array of values in big endian order - - With one exception at the bottom (done because BL and BLX(1) need - to come dead last), this table was machine-sorted first in - decreasing order of number of bits set in the mask, then in - increasing numeric order of mask, then in increasing numeric order - of opcode. This order is not the clearest for a human reader, but - is guaranteed never to catch a special-case bit pattern with a more - general mask, which is important, because this instruction encoding - makes heavy use of special-case bit patterns. */ -static const struct opcode32 thumb32_opcodes[] = -{ - /* V7 instructions. */ - {ARM_EXT_V7, 0xf910f000, 0xff70f000, "pli%c\t%a"}, - {ARM_EXT_V7, 0xf3af80f0, 0xfffffff0, "dbg%c\t#%0-3d"}, - {ARM_EXT_V7, 0xf3bf8f50, 0xfffffff0, "dmb%c\t%U"}, - {ARM_EXT_V7, 0xf3bf8f40, 0xfffffff0, "dsb%c\t%U"}, - {ARM_EXT_V7, 0xf3bf8f60, 0xfffffff0, "isb%c\t%U"}, - {ARM_EXT_DIV, 0xfb90f0f0, 0xfff0f0f0, "sdiv%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_DIV, 0xfbb0f0f0, 0xfff0f0f0, "udiv%c\t%8-11r, %16-19r, %0-3r"}, - - /* Instructions defined in the basic V6T2 set. */ - {ARM_EXT_V6T2, 0xf3af8000, 0xffffffff, "nop%c.w"}, - {ARM_EXT_V6T2, 0xf3af8001, 0xffffffff, "yield%c.w"}, - {ARM_EXT_V6T2, 0xf3af8002, 0xffffffff, "wfe%c.w"}, - {ARM_EXT_V6T2, 0xf3af8003, 0xffffffff, "wfi%c.w"}, - {ARM_EXT_V6T2, 0xf3af9004, 0xffffffff, "sev%c.w"}, - {ARM_EXT_V6T2, 0xf3af8000, 0xffffff00, "nop%c.w\t{%0-7d}"}, - - {ARM_EXT_V6T2, 0xf3bf8f2f, 0xffffffff, "clrex%c"}, - {ARM_EXT_V6T2, 0xf3af8400, 0xffffff1f, "cpsie.w\t%7'a%6'i%5'f%X"}, - {ARM_EXT_V6T2, 0xf3af8600, 0xffffff1f, "cpsid.w\t%7'a%6'i%5'f%X"}, - {ARM_EXT_V6T2, 0xf3c08f00, 0xfff0ffff, "bxj%c\t%16-19r%x"}, - {ARM_EXT_V6T2, 0xe810c000, 0xffd0ffff, "rfedb%c\t%16-19r%21'!"}, - {ARM_EXT_V6T2, 0xe990c000, 0xffd0ffff, "rfeia%c\t%16-19r%21'!"}, - {ARM_EXT_V6T2, 0xf3ef8000, 0xffeff000, "mrs%c\t%8-11r, %D"}, - {ARM_EXT_V6T2, 0xf3af8100, 0xffffffe0, "cps\t#%0-4d%X"}, - {ARM_EXT_V6T2, 0xe8d0f000, 0xfff0fff0, "tbb%c\t[%16-19r, %0-3r]%x"}, - {ARM_EXT_V6T2, 0xe8d0f010, 0xfff0fff0, "tbh%c\t[%16-19r, %0-3r, lsl #1]%x"}, - {ARM_EXT_V6T2, 0xf3af8500, 0xffffff00, "cpsie\t%7'a%6'i%5'f, #%0-4d%X"}, - {ARM_EXT_V6T2, 0xf3af8700, 0xffffff00, "cpsid\t%7'a%6'i%5'f, #%0-4d%X"}, - {ARM_EXT_V6T2, 0xf3de8f00, 0xffffff00, "subs%c\tpc, lr, #%0-7d"}, - {ARM_EXT_V6T2, 0xf3808000, 0xffe0f000, "msr%c\t%C, %16-19r"}, - {ARM_EXT_V6T2, 0xe8500f00, 0xfff00fff, "ldrex%c\t%12-15r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xe8d00f4f, 0xfff00fef, "ldrex%4?hb%c\t%12-15r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xe800c000, 0xffd0ffe0, "srsdb%c\t%16-19r%21'!, #%0-4d"}, - {ARM_EXT_V6T2, 0xe980c000, 0xffd0ffe0, "srsia%c\t%16-19r%21'!, #%0-4d"}, - {ARM_EXT_V6T2, 0xfa0ff080, 0xfffff0c0, "sxth%c.w\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa1ff080, 0xfffff0c0, "uxth%c.w\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa2ff080, 0xfffff0c0, "sxtb16%c\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa3ff080, 0xfffff0c0, "uxtb16%c\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa4ff080, 0xfffff0c0, "sxtb%c.w\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa5ff080, 0xfffff0c0, "uxtb%c.w\t%8-11r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xe8400000, 0xfff000ff, "strex%c\t%8-11r, %12-15r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xe8d0007f, 0xfff000ff, "ldrexd%c\t%12-15r, %8-11r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xfa80f000, 0xfff0f0f0, "sadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f010, 0xfff0f0f0, "qadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f020, 0xfff0f0f0, "shadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f040, 0xfff0f0f0, "uadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f050, 0xfff0f0f0, "uqadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f060, 0xfff0f0f0, "uhadd8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa80f080, 0xfff0f0f0, "qadd%c\t%8-11r, %0-3r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa80f090, 0xfff0f0f0, "qdadd%c\t%8-11r, %0-3r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa80f0a0, 0xfff0f0f0, "qsub%c\t%8-11r, %0-3r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa80f0b0, 0xfff0f0f0, "qdsub%c\t%8-11r, %0-3r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa90f000, 0xfff0f0f0, "sadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f010, 0xfff0f0f0, "qadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f020, 0xfff0f0f0, "shadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f040, 0xfff0f0f0, "uadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f050, 0xfff0f0f0, "uqadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f060, 0xfff0f0f0, "uhadd16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa90f080, 0xfff0f0f0, "rev%c.w\t%8-11r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa90f090, 0xfff0f0f0, "rev16%c.w\t%8-11r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa90f0a0, 0xfff0f0f0, "rbit%c\t%8-11r, %16-19r"}, - {ARM_EXT_V6T2, 0xfa90f0b0, 0xfff0f0f0, "revsh%c.w\t%8-11r, %16-19r"}, - {ARM_EXT_V6T2, 0xfaa0f000, 0xfff0f0f0, "saddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f010, 0xfff0f0f0, "qaddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f020, 0xfff0f0f0, "shaddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f040, 0xfff0f0f0, "uaddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f050, 0xfff0f0f0, "uqaddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f060, 0xfff0f0f0, "uhaddsubx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfaa0f080, 0xfff0f0f0, "sel%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfab0f080, 0xfff0f0f0, "clz%c\t%8-11r, %16-19r"}, - {ARM_EXT_V6T2, 0xfac0f000, 0xfff0f0f0, "ssub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfac0f010, 0xfff0f0f0, "qsub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfac0f020, 0xfff0f0f0, "shsub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfac0f040, 0xfff0f0f0, "usub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfac0f050, 0xfff0f0f0, "uqsub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfac0f060, 0xfff0f0f0, "uhsub8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f000, 0xfff0f0f0, "ssub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f010, 0xfff0f0f0, "qsub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f020, 0xfff0f0f0, "shsub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f040, 0xfff0f0f0, "usub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f050, 0xfff0f0f0, "uqsub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfad0f060, 0xfff0f0f0, "uhsub16%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f000, 0xfff0f0f0, "ssubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f010, 0xfff0f0f0, "qsubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f020, 0xfff0f0f0, "shsubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f040, 0xfff0f0f0, "usubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f050, 0xfff0f0f0, "uqsubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfae0f060, 0xfff0f0f0, "uhsubaddx%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfb00f000, 0xfff0f0f0, "mul%c.w\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfb70f000, 0xfff0f0f0, "usad8%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa00f000, 0xffe0f0f0, "lsl%20's%c.w\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa20f000, 0xffe0f0f0, "lsr%20's%c.w\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa40f000, 0xffe0f0f0, "asr%20's%c.w\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa60f000, 0xffe0f0f0, "ror%20's%c.w\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xe8c00f40, 0xfff00fe0, "strex%4?hb%c\t%0-3r, %12-15r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xf3200000, 0xfff0f0e0, "ssat16%c\t%8-11r, #%0-4d, %16-19r"}, - {ARM_EXT_V6T2, 0xf3a00000, 0xfff0f0e0, "usat16%c\t%8-11r, #%0-4d, %16-19r"}, - {ARM_EXT_V6T2, 0xfb20f000, 0xfff0f0e0, "smuad%4'x%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfb30f000, 0xfff0f0e0, "smulw%4?tb%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfb40f000, 0xfff0f0e0, "smusd%4'x%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfb50f000, 0xfff0f0e0, "smmul%4'r%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfa00f080, 0xfff0f0c0, "sxtah%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa10f080, 0xfff0f0c0, "uxtah%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa20f080, 0xfff0f0c0, "sxtab16%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa30f080, 0xfff0f0c0, "uxtab16%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa40f080, 0xfff0f0c0, "sxtab%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfa50f080, 0xfff0f0c0, "uxtab%c\t%8-11r, %16-19r, %0-3r%R"}, - {ARM_EXT_V6T2, 0xfb10f000, 0xfff0f0c0, "smul%5?tb%4?tb%c\t%8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xf36f0000, 0xffff8020, "bfc%c\t%8-11r, %E"}, - {ARM_EXT_V6T2, 0xea100f00, 0xfff08f00, "tst%c.w\t%16-19r, %S"}, - {ARM_EXT_V6T2, 0xea900f00, 0xfff08f00, "teq%c\t%16-19r, %S"}, - {ARM_EXT_V6T2, 0xeb100f00, 0xfff08f00, "cmn%c.w\t%16-19r, %S"}, - {ARM_EXT_V6T2, 0xebb00f00, 0xfff08f00, "cmp%c.w\t%16-19r, %S"}, - {ARM_EXT_V6T2, 0xf0100f00, 0xfbf08f00, "tst%c.w\t%16-19r, %M"}, - {ARM_EXT_V6T2, 0xf0900f00, 0xfbf08f00, "teq%c\t%16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1100f00, 0xfbf08f00, "cmn%c.w\t%16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1b00f00, 0xfbf08f00, "cmp%c.w\t%16-19r, %M"}, - {ARM_EXT_V6T2, 0xea4f0000, 0xffef8000, "mov%20's%c.w\t%8-11r, %S"}, - {ARM_EXT_V6T2, 0xea6f0000, 0xffef8000, "mvn%20's%c.w\t%8-11r, %S"}, - {ARM_EXT_V6T2, 0xe8c00070, 0xfff000f0, "strexd%c\t%0-3r, %12-15r, %8-11r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xfb000000, 0xfff000f0, "mla%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb000010, 0xfff000f0, "mls%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb700000, 0xfff000f0, "usada8%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb800000, 0xfff000f0, "smull%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfba00000, 0xfff000f0, "umull%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfbc00000, 0xfff000f0, "smlal%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfbe00000, 0xfff000f0, "umlal%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfbe00060, 0xfff000f0, "umaal%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xe8500f00, 0xfff00f00, "ldrex%c\t%12-15r, [%16-19r, #%0-7W]"}, - {ARM_EXT_V6T2, 0xf7f08000, 0xfff0f000, "smc%c\t%K"}, - {ARM_EXT_V6T2, 0xf04f0000, 0xfbef8000, "mov%20's%c.w\t%8-11r, %M"}, - {ARM_EXT_V6T2, 0xf06f0000, 0xfbef8000, "mvn%20's%c.w\t%8-11r, %M"}, - {ARM_EXT_V6T2, 0xf810f000, 0xff70f000, "pld%c\t%a"}, - {ARM_EXT_V6T2, 0xfb200000, 0xfff000e0, "smlad%4'x%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb300000, 0xfff000e0, "smlaw%4?tb%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb400000, 0xfff000e0, "smlsd%4'x%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb500000, 0xfff000e0, "smmla%4'r%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfb600000, 0xfff000e0, "smmls%4'r%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfbc000c0, 0xfff000e0, "smlald%4'x%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xfbd000c0, 0xfff000e0, "smlsld%4'x%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xeac00000, 0xfff08030, "pkhbt%c\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xeac00020, 0xfff08030, "pkhtb%c\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xf3400000, 0xfff08020, "sbfx%c\t%8-11r, %16-19r, %F"}, - {ARM_EXT_V6T2, 0xf3c00000, 0xfff08020, "ubfx%c\t%8-11r, %16-19r, %F"}, - {ARM_EXT_V6T2, 0xf8000e00, 0xff900f00, "str%wt%c\t%12-15r, %a"}, - {ARM_EXT_V6T2, 0xfb100000, 0xfff000c0, "smla%5?tb%4?tb%c\t%8-11r, %16-19r, %0-3r, %12-15r"}, - {ARM_EXT_V6T2, 0xfbc00080, 0xfff000c0, "smlal%5?tb%4?tb%c\t%12-15r, %8-11r, %16-19r, %0-3r"}, - {ARM_EXT_V6T2, 0xf3600000, 0xfff08020, "bfi%c\t%8-11r, %16-19r, %E"}, - {ARM_EXT_V6T2, 0xf8100e00, 0xfe900f00, "ldr%wt%c\t%12-15r, %a"}, - {ARM_EXT_V6T2, 0xf3000000, 0xffd08020, "ssat%c\t%8-11r, #%0-4d, %16-19r%s"}, - {ARM_EXT_V6T2, 0xf3800000, 0xffd08020, "usat%c\t%8-11r, #%0-4d, %16-19r%s"}, - {ARM_EXT_V6T2, 0xf2000000, 0xfbf08000, "addw%c\t%8-11r, %16-19r, %I"}, - {ARM_EXT_V6T2, 0xf2400000, 0xfbf08000, "movw%c\t%8-11r, %J"}, - {ARM_EXT_V6T2, 0xf2a00000, 0xfbf08000, "subw%c\t%8-11r, %16-19r, %I"}, - {ARM_EXT_V6T2, 0xf2c00000, 0xfbf08000, "movt%c\t%8-11r, %J"}, - {ARM_EXT_V6T2, 0xea000000, 0xffe08000, "and%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xea200000, 0xffe08000, "bic%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xea400000, 0xffe08000, "orr%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xea600000, 0xffe08000, "orn%20's%c\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xea800000, 0xffe08000, "eor%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xeb000000, 0xffe08000, "add%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xeb400000, 0xffe08000, "adc%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xeb600000, 0xffe08000, "sbc%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xeba00000, 0xffe08000, "sub%20's%c.w\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xebc00000, 0xffe08000, "rsb%20's%c\t%8-11r, %16-19r, %S"}, - {ARM_EXT_V6T2, 0xe8400000, 0xfff00000, "strex%c\t%8-11r, %12-15r, [%16-19r, #%0-7W]"}, - {ARM_EXT_V6T2, 0xf0000000, 0xfbe08000, "and%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf0200000, 0xfbe08000, "bic%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf0400000, 0xfbe08000, "orr%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf0600000, 0xfbe08000, "orn%20's%c\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf0800000, 0xfbe08000, "eor%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1000000, 0xfbe08000, "add%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1400000, 0xfbe08000, "adc%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1600000, 0xfbe08000, "sbc%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1a00000, 0xfbe08000, "sub%20's%c.w\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xf1c00000, 0xfbe08000, "rsb%20's%c\t%8-11r, %16-19r, %M"}, - {ARM_EXT_V6T2, 0xe8800000, 0xffd00000, "stmia%c.w\t%16-19r%21'!, %m"}, - {ARM_EXT_V6T2, 0xe8900000, 0xffd00000, "ldmia%c.w\t%16-19r%21'!, %m"}, - {ARM_EXT_V6T2, 0xe9000000, 0xffd00000, "stmdb%c\t%16-19r%21'!, %m"}, - {ARM_EXT_V6T2, 0xe9100000, 0xffd00000, "ldmdb%c\t%16-19r%21'!, %m"}, - {ARM_EXT_V6T2, 0xe9c00000, 0xffd000ff, "strd%c\t%12-15r, %8-11r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xe9d00000, 0xffd000ff, "ldrd%c\t%12-15r, %8-11r, [%16-19r]"}, - {ARM_EXT_V6T2, 0xe9400000, 0xff500000, "strd%c\t%12-15r, %8-11r, [%16-19r, #%23`-%0-7W]%21'!"}, - {ARM_EXT_V6T2, 0xe9500000, 0xff500000, "ldrd%c\t%12-15r, %8-11r, [%16-19r, #%23`-%0-7W]%21'!"}, - {ARM_EXT_V6T2, 0xe8600000, 0xff700000, "strd%c\t%12-15r, %8-11r, [%16-19r], #%23`-%0-7W"}, - {ARM_EXT_V6T2, 0xe8700000, 0xff700000, "ldrd%c\t%12-15r, %8-11r, [%16-19r], #%23`-%0-7W"}, - {ARM_EXT_V6T2, 0xf8000000, 0xff100000, "str%w%c.w\t%12-15r, %a"}, - {ARM_EXT_V6T2, 0xf8100000, 0xfe100000, "ldr%w%c.w\t%12-15r, %a"}, - - /* Filter out Bcc with cond=E or F, which are used for other instructions. */ - {ARM_EXT_V6T2, 0xf3c08000, 0xfbc0d000, "undefined (bcc, cond=0xF)"}, - {ARM_EXT_V6T2, 0xf3808000, 0xfbc0d000, "undefined (bcc, cond=0xE)"}, - {ARM_EXT_V6T2, 0xf0008000, 0xf800d000, "b%22-25c.w\t%b%X"}, - {ARM_EXT_V6T2, 0xf0009000, 0xf800d000, "b%c.w\t%B%x"}, - - /* These have been 32-bit since the invention of Thumb. */ - {ARM_EXT_V4T, 0xf000c000, 0xf800d000, "blx%c\t%B%x"}, - {ARM_EXT_V4T, 0xf000d000, 0xf800d000, "bl%c\t%B%x"}, - - /* Fallback. */ - {ARM_EXT_V1, 0x00000000, 0x00000000, "undefined"}, - {0, 0, 0, 0} -}; - -static const char *const arm_conditional[] = -{"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", - "hi", "ls", "ge", "lt", "gt", "le", "al", "", ""}; - -static const char *const arm_fp_const[] = -{"0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0"}; - -static const char *const arm_shift[] = -{"lsl", "lsr", "asr", "ror"}; - -typedef struct -{ - const char *name; - const char *description; - const char *reg_names[16]; -} -arm_regname; - -static const arm_regname regnames[] = -{ - { "raw" , "Select raw register names", - { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"}}, - { "gcc", "Select register names used by GCC", - { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc" }}, - { "std", "Select register names used in ARM's ISA documentation", - { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc" }}, - { "apcs", "Select register names used in the APCS", - { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "sl", "fp", "ip", "sp", "lr", "pc" }}, - { "atpcs", "Select register names used in the ATPCS", - { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "IP", "SP", "LR", "PC" }}, - { "special-atpcs", "Select special register names used in the ATPCS", - { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }}, -}; - -static const char *const iwmmxt_wwnames[] = -{"b", "h", "w", "d"}; - -static const char *const iwmmxt_wwssnames[] = -{"b", "bus", "bc", "bss", - "h", "hus", "hc", "hss", - "w", "wus", "wc", "wss", - "d", "dus", "dc", "dss" -}; - -static const char *const iwmmxt_regnames[] = -{ "wr0", "wr1", "wr2", "wr3", "wr4", "wr5", "wr6", "wr7", - "wr8", "wr9", "wr10", "wr11", "wr12", "wr13", "wr14", "wr15" -}; - -static const char *const iwmmxt_cregnames[] = -{ "wcid", "wcon", "wcssf", "wcasf", "reserved", "reserved", "reserved", "reserved", - "wcgr0", "wcgr1", "wcgr2", "wcgr3", "reserved", "reserved", "reserved", "reserved" -}; - -/* Default to GCC register name set. */ -static unsigned int regname_selected = 1; - -#define arm_regnames regnames[regname_selected].reg_names - -static bfd_boolean force_thumb = false; - -/* Current IT instruction state. This contains the same state as the IT - bits in the CPSR. */ -static unsigned int ifthen_state; -/* IT state for the next instruction. */ -static unsigned int ifthen_next_state; -/* The address of the insn for which the IT state is valid. */ -static bfd_vma ifthen_address; -#define IFTHEN_COND ((ifthen_state >> 4) & 0xf) - -/* Cached mapping symbol state. */ -enum map_type { - MAP_ARM, - MAP_THUMB, - MAP_DATA -}; - -/* Decode a bitfield of the form matching regexp (N(-N)?,)*N(-N)?. - Returns pointer to following character of the format string and - fills in *VALUEP and *WIDTHP with the extracted value and number of - bits extracted. WIDTHP can be NULL. */ - -static const char * -arm_decode_bitfield (const char *ptr, unsigned long insn, - unsigned long *valuep, int *widthp) -{ - unsigned long value = 0; - int width = 0; - - do - { - int start, end; - int bits; - - for (start = 0; *ptr >= '0' && *ptr <= '9'; ptr++) - start = start * 10 + *ptr - '0'; - if (*ptr == '-') - for (end = 0, ptr++; *ptr >= '0' && *ptr <= '9'; ptr++) - end = end * 10 + *ptr - '0'; - else - end = start; - bits = end - start; - if (bits < 0) - abort (); - value |= ((insn >> start) & ((2ul << bits) - 1)) << width; - width += bits + 1; - } - while (*ptr++ == ','); - *valuep = value; - if (widthp) - *widthp = width; - return ptr - 1; -} - -static void -arm_decode_shift (long given, fprintf_function func, void *stream, - int print_shift) -{ - func (stream, "%s", arm_regnames[given & 0xf]); - - if ((given & 0xff0) != 0) - { - if ((given & 0x10) == 0) - { - int amount = (given & 0xf80) >> 7; - int shift = (given & 0x60) >> 5; - - if (amount == 0) - { - if (shift == 3) - { - func (stream, ", rrx"); - return; - } - - amount = 32; - } - - if (print_shift) - func (stream, ", %s #%d", arm_shift[shift], amount); - else - func (stream, ", #%d", amount); - } - else if (print_shift) - func (stream, ", %s %s", arm_shift[(given & 0x60) >> 5], - arm_regnames[(given & 0xf00) >> 8]); - else - func (stream, ", %s", arm_regnames[(given & 0xf00) >> 8]); - } -} - -/* Print one coprocessor instruction on INFO->STREAM. - Return true if the instruction matched, false if this is not a - recognised coprocessor instruction. */ - -static bfd_boolean -print_insn_coprocessor (bfd_vma pc, struct disassemble_info *info, long given, - bfd_boolean thumb) -{ - const struct opcode32 *insn; - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - unsigned long mask; - unsigned long value; - int cond; - - for (insn = coprocessor_opcodes; insn->assembler; insn++) - { - if (insn->value == FIRST_IWMMXT_INSN - && info->mach != bfd_mach_arm_XScale - && info->mach != bfd_mach_arm_iWMMXt - && info->mach != bfd_mach_arm_iWMMXt2) - insn = insn + IWMMXT_INSN_COUNT; - - mask = insn->mask; - value = insn->value; - if (thumb) - { - /* The high 4 bits are 0xe for Arm conditional instructions, and - 0xe for arm unconditional instructions. The rest of the - encoding is the same. */ - mask |= 0xf0000000; - value |= 0xe0000000; - if (ifthen_state) - cond = IFTHEN_COND; - else - cond = 16; - } - else - { - /* Only match unconditional instructions against unconditional - patterns. */ - if ((given & 0xf0000000) == 0xf0000000) - { - mask |= 0xf0000000; - cond = 16; - } - else - { - cond = (given >> 28) & 0xf; - if (cond == 0xe) - cond = 16; - } - } - if ((given & mask) == value) - { - const char *c; - - for (c = insn->assembler; *c; c++) - { - if (*c == '%') - { - switch (*++c) - { - case '%': - func (stream, "%%"); - break; - - case 'A': - func (stream, "[%s", arm_regnames [(given >> 16) & 0xf]); - - if ((given & (1 << 24)) != 0) - { - int offset = given & 0xff; - - if (offset) - func (stream, ", #%s%d]%s", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * 4, - ((given & 0x00200000) != 0 ? "!" : "")); - else - func (stream, "]"); - } - else - { - int offset = given & 0xff; - - func (stream, "]"); - - if (given & (1 << 21)) - { - if (offset) - func (stream, ", #%s%d", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * 4); - } - else - func (stream, ", {%d}", offset); - } - break; - - case 'B': - { - int regno = ((given >> 12) & 0xf) | ((given >> (22 - 4)) & 0x10); - int offset = (given >> 1) & 0x3f; - - if (offset == 1) - func (stream, "{d%d}", regno); - else if (regno + offset > 32) - func (stream, "{d%d-}", regno, regno + offset - 1); - else - func (stream, "{d%d-d%d}", regno, regno + offset - 1); - } - break; - - case 'C': - { - int rn = (given >> 16) & 0xf; - int offset = (given & 0xff) * 4; - int add = (given >> 23) & 1; - - func (stream, "[%s", arm_regnames[rn]); - - if (offset) - { - if (!add) - offset = -offset; - func (stream, ", #%d", offset); - } - func (stream, "]"); - if (rn == 15) - { - func (stream, "\t; "); - /* FIXME: Unsure if info->bytes_per_chunk is the - right thing to use here. */ - info->print_address_func (offset + pc - + info->bytes_per_chunk * 2, info); - } - } - break; - - case 'c': - func (stream, "%s", arm_conditional[cond]); - break; - - case 'I': - /* Print a Cirrus/DSP shift immediate. */ - /* Immediates are 7bit signed ints with bits 0..3 in - bits 0..3 of opcode and bits 4..6 in bits 5..7 - of opcode. */ - { - int imm; - - imm = (given & 0xf) | ((given & 0xe0) >> 1); - - /* Is ``imm'' a negative number? */ - if (imm & 0x40) - imm |= (~0u << 7); - - func (stream, "%d", imm); - } - - break; - - case 'F': - switch (given & 0x00408000) - { - case 0: - func (stream, "4"); - break; - case 0x8000: - func (stream, "1"); - break; - case 0x00400000: - func (stream, "2"); - break; - default: - func (stream, "3"); - } - break; - - case 'P': - switch (given & 0x00080080) - { - case 0: - func (stream, "s"); - break; - case 0x80: - func (stream, "d"); - break; - case 0x00080000: - func (stream, "e"); - break; - default: - func (stream, ""); - break; - } - break; - case 'Q': - switch (given & 0x00408000) - { - case 0: - func (stream, "s"); - break; - case 0x8000: - func (stream, "d"); - break; - case 0x00400000: - func (stream, "e"); - break; - default: - func (stream, "p"); - break; - } - break; - case 'R': - switch (given & 0x60) - { - case 0: - break; - case 0x20: - func (stream, "p"); - break; - case 0x40: - func (stream, "m"); - break; - default: - func (stream, "z"); - break; - } - break; - - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - { - int width; - unsigned long value; - - c = arm_decode_bitfield (c, given, &value, &width); - - switch (*c) - { - case 'r': - func (stream, "%s", arm_regnames[value]); - break; - case 'D': - func (stream, "d%ld", value); - break; - case 'Q': - if (value & 1) - func (stream, "", value >> 1); - else - func (stream, "q%ld", value >> 1); - break; - case 'd': - func (stream, "%ld", value); - break; - case 'k': - { - int from = (given & (1 << 7)) ? 32 : 16; - func (stream, "%ld", from - value); - } - break; - - case 'f': - if (value > 7) - func (stream, "#%s", arm_fp_const[value & 7]); - else - func (stream, "f%ld", value); - break; - - case 'w': - if (width == 2) - func (stream, "%s", iwmmxt_wwnames[value]); - else - func (stream, "%s", iwmmxt_wwssnames[value]); - break; - - case 'g': - func (stream, "%s", iwmmxt_regnames[value]); - break; - case 'G': - func (stream, "%s", iwmmxt_cregnames[value]); - break; - - case 'x': - func (stream, "0x%lx", value); - break; - - case '`': - c++; - if (value == 0) - func (stream, "%c", *c); - break; - case '\'': - c++; - if (value == ((1ul << width) - 1)) - func (stream, "%c", *c); - break; - case '?': - func (stream, "%c", c[(1 << width) - (int)value]); - c += 1 << width; - break; - default: - abort (); - } - break; - - case 'y': - case 'z': - { - int single = *c++ == 'y'; - int regno; - - switch (*c) - { - case '4': /* Sm pair */ - func (stream, "{"); - /* Fall through. */ - case '0': /* Sm, Dm */ - regno = given & 0x0000000f; - if (single) - { - regno <<= 1; - regno += (given >> 5) & 1; - } - else - regno += ((given >> 5) & 1) << 4; - break; - - case '1': /* Sd, Dd */ - regno = (given >> 12) & 0x0000000f; - if (single) - { - regno <<= 1; - regno += (given >> 22) & 1; - } - else - regno += ((given >> 22) & 1) << 4; - break; - - case '2': /* Sn, Dn */ - regno = (given >> 16) & 0x0000000f; - if (single) - { - regno <<= 1; - regno += (given >> 7) & 1; - } - else - regno += ((given >> 7) & 1) << 4; - break; - - case '3': /* List */ - func (stream, "{"); - regno = (given >> 12) & 0x0000000f; - if (single) - { - regno <<= 1; - regno += (given >> 22) & 1; - } - else - regno += ((given >> 22) & 1) << 4; - break; - - default: - abort (); - } - - func (stream, "%c%d", single ? 's' : 'd', regno); - - if (*c == '3') - { - int count = given & 0xff; - - if (single == 0) - count >>= 1; - - if (--count) - { - func (stream, "-%c%d", - single ? 's' : 'd', - regno + count); - } - - func (stream, "}"); - } - else if (*c == '4') - func (stream, ", %c%d}", single ? 's' : 'd', - regno + 1); - } - break; - - case 'L': - switch (given & 0x00400100) - { - case 0x00000000: func (stream, "b"); break; - case 0x00400000: func (stream, "h"); break; - case 0x00000100: func (stream, "w"); break; - case 0x00400100: func (stream, "d"); break; - default: - break; - } - break; - - case 'Z': - { - int value; - /* given (20, 23) | given (0, 3) */ - value = ((given >> 16) & 0xf0) | (given & 0xf); - func (stream, "%d", value); - } - break; - - case 'l': - /* This is like the 'A' operator, except that if - the width field "M" is zero, then the offset is - *not* multiplied by four. */ - { - int offset = given & 0xff; - int multiplier = (given & 0x00000100) ? 4 : 1; - - func (stream, "[%s", arm_regnames [(given >> 16) & 0xf]); - - if (offset) - { - if ((given & 0x01000000) != 0) - func (stream, ", #%s%d]%s", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * multiplier, - ((given & 0x00200000) != 0 ? "!" : "")); - else - func (stream, "], #%s%d", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * multiplier); - } - else - func (stream, "]"); - } - break; - - case 'r': - { - int imm4 = (given >> 4) & 0xf; - int puw_bits = ((given >> 22) & 6) | ((given >> 21) & 1); - int ubit = (given >> 23) & 1; - const char *rm = arm_regnames [given & 0xf]; - const char *rn = arm_regnames [(given >> 16) & 0xf]; - - switch (puw_bits) - { - case 1: - /* fall through */ - case 3: - func (stream, "[%s], %c%s", rn, ubit ? '+' : '-', rm); - if (imm4) - func (stream, ", lsl #%d", imm4); - break; - - case 4: - /* fall through */ - case 5: - /* fall through */ - case 6: - /* fall through */ - case 7: - func (stream, "[%s, %c%s", rn, ubit ? '+' : '-', rm); - if (imm4 > 0) - func (stream, ", lsl #%d", imm4); - func (stream, "]"); - if (puw_bits == 5 || puw_bits == 7) - func (stream, "!"); - break; - - default: - func (stream, "INVALID"); - } - } - break; - - case 'i': - { - long imm5; - imm5 = ((given & 0x100) >> 4) | (given & 0xf); - func (stream, "%ld", (imm5 == 0) ? 32 : imm5); - } - break; - - default: - abort (); - } - } - } - else - func (stream, "%c", *c); - } - return true; - } - } - return false; -} - -static void -print_arm_address (bfd_vma pc, struct disassemble_info *info, long given) -{ - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - - if (((given & 0x000f0000) == 0x000f0000) - && ((given & 0x02000000) == 0)) - { - int offset = given & 0xfff; - - func (stream, "[pc"); - - if (given & 0x01000000) - { - if ((given & 0x00800000) == 0) - offset = - offset; - - /* Pre-indexed. */ - func (stream, ", #%d]", offset); - - offset += pc + 8; - - /* Cope with the possibility of write-back - being used. Probably a very dangerous thing - for the programmer to do, but who are we to - argue ? */ - if (given & 0x00200000) - func (stream, "!"); - } - else - { - /* Post indexed. */ - func (stream, "], #%d", offset); - - /* ie ignore the offset. */ - offset = pc + 8; - } - - func (stream, "\t; "); - info->print_address_func (offset, info); - } - else - { - func (stream, "[%s", - arm_regnames[(given >> 16) & 0xf]); - if ((given & 0x01000000) != 0) - { - if ((given & 0x02000000) == 0) - { - int offset = given & 0xfff; - if (offset) - func (stream, ", #%s%d", - (((given & 0x00800000) == 0) - ? "-" : ""), offset); - } - else - { - func (stream, ", %s", - (((given & 0x00800000) == 0) - ? "-" : "")); - arm_decode_shift (given, func, stream, 1); - } - - func (stream, "]%s", - ((given & 0x00200000) != 0) ? "!" : ""); - } - else - { - if ((given & 0x02000000) == 0) - { - int offset = given & 0xfff; - if (offset) - func (stream, "], #%s%d", - (((given & 0x00800000) == 0) - ? "-" : ""), offset); - else - func (stream, "]"); - } - else - { - func (stream, "], %s", - (((given & 0x00800000) == 0) - ? "-" : "")); - arm_decode_shift (given, func, stream, 1); - } - } - } -} - -/* Print one neon instruction on INFO->STREAM. - Return true if the instruction matched, false if this is not a - recognised neon instruction. */ - -static bfd_boolean -print_insn_neon (struct disassemble_info *info, long given, bfd_boolean thumb) -{ - const struct opcode32 *insn; - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - - if (thumb) - { - if ((given & 0xef000000) == 0xef000000) - { - /* move bit 28 to bit 24 to translate Thumb2 to ARM encoding. */ - unsigned long bit28 = given & (1 << 28); - - given &= 0x00ffffff; - if (bit28) - given |= 0xf3000000; - else - given |= 0xf2000000; - } - else if ((given & 0xff000000) == 0xf9000000) - given ^= 0xf9000000 ^ 0xf4000000; - else - return false; - } - - for (insn = neon_opcodes; insn->assembler; insn++) - { - if ((given & insn->mask) == insn->value) - { - const char *c; - - for (c = insn->assembler; *c; c++) - { - if (*c == '%') - { - switch (*++c) - { - case '%': - func (stream, "%%"); - break; - - case 'c': - if (thumb && ifthen_state) - func (stream, "%s", arm_conditional[IFTHEN_COND]); - break; - - case 'A': - { - static const unsigned char enc[16] = - { - 0x4, 0x14, /* st4 0,1 */ - 0x4, /* st1 2 */ - 0x4, /* st2 3 */ - 0x3, /* st3 4 */ - 0x13, /* st3 5 */ - 0x3, /* st1 6 */ - 0x1, /* st1 7 */ - 0x2, /* st2 8 */ - 0x12, /* st2 9 */ - 0x2, /* st1 10 */ - 0, 0, 0, 0, 0 - }; - int rd = ((given >> 12) & 0xf) | (((given >> 22) & 1) << 4); - int rn = ((given >> 16) & 0xf); - int rm = ((given >> 0) & 0xf); - int align = ((given >> 4) & 0x3); - int type = ((given >> 8) & 0xf); - int n = enc[type] & 0xf; - int stride = (enc[type] >> 4) + 1; - int ix; - - func (stream, "{"); - if (stride > 1) - for (ix = 0; ix != n; ix++) - func (stream, "%sd%d", ix ? "," : "", rd + ix * stride); - else if (n == 1) - func (stream, "d%d", rd); - else - func (stream, "d%d-d%d", rd, rd + n - 1); - func (stream, "}, [%s", arm_regnames[rn]); - if (align) - func (stream, ", :%d", 32 << align); - func (stream, "]"); - if (rm == 0xd) - func (stream, "!"); - else if (rm != 0xf) - func (stream, ", %s", arm_regnames[rm]); - } - break; - - case 'B': - { - int rd = ((given >> 12) & 0xf) | (((given >> 22) & 1) << 4); - int rn = ((given >> 16) & 0xf); - int rm = ((given >> 0) & 0xf); - int idx_align = ((given >> 4) & 0xf); - int align = 0; - int size = ((given >> 10) & 0x3); - int idx = idx_align >> (size + 1); - int length = ((given >> 8) & 3) + 1; - int stride = 1; - int i; - - if (length > 1 && size > 0) - stride = (idx_align & (1 << size)) ? 2 : 1; - - switch (length) - { - case 1: - { - int amask = (1 << size) - 1; - if ((idx_align & (1 << size)) != 0) - return false; - if (size > 0) - { - if ((idx_align & amask) == amask) - align = 8 << size; - else if ((idx_align & amask) != 0) - return false; - } - } - break; - - case 2: - if (size == 2 && (idx_align & 2) != 0) - return false; - align = (idx_align & 1) ? 16 << size : 0; - break; - - case 3: - if ((size == 2 && (idx_align & 3) != 0) - || (idx_align & 1) != 0) - return false; - break; - - case 4: - if (size == 2) - { - if ((idx_align & 3) == 3) - return false; - align = (idx_align & 3) * 64; - } - else - align = (idx_align & 1) ? 32 << size : 0; - break; - - default: - abort (); - } - - func (stream, "{"); - for (i = 0; i < length; i++) - func (stream, "%sd%d[%d]", (i == 0) ? "" : ",", - rd + i * stride, idx); - func (stream, "}, [%s", arm_regnames[rn]); - if (align) - func (stream, ", :%d", align); - func (stream, "]"); - if (rm == 0xd) - func (stream, "!"); - else if (rm != 0xf) - func (stream, ", %s", arm_regnames[rm]); - } - break; - - case 'C': - { - int rd = ((given >> 12) & 0xf) | (((given >> 22) & 1) << 4); - int rn = ((given >> 16) & 0xf); - int rm = ((given >> 0) & 0xf); - int align = ((given >> 4) & 0x1); - int size = ((given >> 6) & 0x3); - int type = ((given >> 8) & 0x3); - int n = type + 1; - int stride = ((given >> 5) & 0x1); - int ix; - - if (stride && (n == 1)) - n++; - else - stride++; - - func (stream, "{"); - if (stride > 1) - for (ix = 0; ix != n; ix++) - func (stream, "%sd%d[]", ix ? "," : "", rd + ix * stride); - else if (n == 1) - func (stream, "d%d[]", rd); - else - func (stream, "d%d[]-d%d[]", rd, rd + n - 1); - func (stream, "}, [%s", arm_regnames[rn]); - if (align) - { - int align = (8 * (type + 1)) << size; - if (type == 3) - align = (size > 1) ? align >> 1 : align; - if (type == 2 || (type == 0 && !size)) - func (stream, ", :", align); - else - func (stream, ", :%d", align); - } - func (stream, "]"); - if (rm == 0xd) - func (stream, "!"); - else if (rm != 0xf) - func (stream, ", %s", arm_regnames[rm]); - } - break; - - case 'D': - { - int raw_reg = (given & 0xf) | ((given >> 1) & 0x10); - int size = (given >> 20) & 3; - int reg = raw_reg & ((4 << size) - 1); - int ix = raw_reg >> size >> 2; - - func (stream, "d%d[%d]", reg, ix); - } - break; - - case 'E': - /* Neon encoded constant for mov, mvn, vorr, vbic */ - { - int bits = 0; - int cmode = (given >> 8) & 0xf; - int op = (given >> 5) & 0x1; - unsigned long value = 0, hival = 0; - unsigned shift; - int size = 0; - int isfloat = 0; - - bits |= ((given >> 24) & 1) << 7; - bits |= ((given >> 16) & 7) << 4; - bits |= ((given >> 0) & 15) << 0; - - if (cmode < 8) - { - shift = (cmode >> 1) & 3; - value = (unsigned long)bits << (8 * shift); - size = 32; - } - else if (cmode < 12) - { - shift = (cmode >> 1) & 1; - value = (unsigned long)bits << (8 * shift); - size = 16; - } - else if (cmode < 14) - { - shift = (cmode & 1) + 1; - value = (unsigned long)bits << (8 * shift); - value |= (1ul << (8 * shift)) - 1; - size = 32; - } - else if (cmode == 14) - { - if (op) - { - /* bit replication into bytes */ - int ix; - unsigned long mask; - - value = 0; - hival = 0; - for (ix = 7; ix >= 0; ix--) - { - mask = ((bits >> ix) & 1) ? 0xff : 0; - if (ix <= 3) - value = (value << 8) | mask; - else - hival = (hival << 8) | mask; - } - size = 64; - } - else - { - /* byte replication */ - value = (unsigned long)bits; - size = 8; - } - } - else if (!op) - { - /* floating point encoding */ - int tmp; - - value = (unsigned long)(bits & 0x7f) << 19; - value |= (unsigned long)(bits & 0x80) << 24; - tmp = bits & 0x40 ? 0x3c : 0x40; - value |= (unsigned long)tmp << 24; - size = 32; - isfloat = 1; - } - else - { - func (stream, "", - bits, cmode, op); - break; - } - switch (size) - { - case 8: - func (stream, "#%ld\t; 0x%.2lx", value, value); - break; - - case 16: - func (stream, "#%ld\t; 0x%.4lx", value, value); - break; - - case 32: - if (isfloat) - { - unsigned char valbytes[4]; - double fvalue; - - /* Do this a byte at a time so we don't have to - worry about the host's endianness. */ - valbytes[0] = value & 0xff; - valbytes[1] = (value >> 8) & 0xff; - valbytes[2] = (value >> 16) & 0xff; - valbytes[3] = (value >> 24) & 0xff; - - floatformat_to_double (valbytes, &fvalue); - - func (stream, "#%.7g\t; 0x%.8lx", fvalue, - value); - } - else - func (stream, "#%ld\t; 0x%.8lx", - (long) ((value & 0x80000000) - ? value | ~0xffffffffl : value), value); - break; - - case 64: - func (stream, "#0x%.8lx%.8lx", hival, value); - break; - - default: - abort (); - } - } - break; - - case 'F': - { - int regno = ((given >> 16) & 0xf) | ((given >> (7 - 4)) & 0x10); - int num = (given >> 8) & 0x3; - - if (!num) - func (stream, "{d%d}", regno); - else if (num + regno >= 32) - func (stream, "{d%d-= '0' && *c <= '9') - limit = *c - '0'; - else if (*c >= 'a' && *c <= 'f') - limit = *c - 'a' + 10; - else - abort (); - low = limit >> 2; - high = limit & 3; - - if (value < low || value > high) - func (stream, "", base << value); - else - func (stream, "%d", base << value); - } - break; - case 'R': - if (given & (1 << 6)) - goto Q; - /* FALLTHROUGH */ - case 'D': - func (stream, "d%ld", value); - break; - case 'Q': - Q: - if (value & 1) - func (stream, "", value >> 1); - else - func (stream, "q%ld", value >> 1); - break; - - case '`': - c++; - if (value == 0) - func (stream, "%c", *c); - break; - case '\'': - c++; - if (value == ((1ul << width) - 1)) - func (stream, "%c", *c); - break; - case '?': - func (stream, "%c", c[(1 << width) - (int)value]); - c += 1 << width; - break; - default: - abort (); - } - break; - - default: - abort (); - } - } - } - else - func (stream, "%c", *c); - } - return true; - } - } - return false; -} - -/* Print one ARM instruction from PC on INFO->STREAM. */ - -static void -print_insn_arm_internal (bfd_vma pc, struct disassemble_info *info, long given) -{ - const struct opcode32 *insn; - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - - if (print_insn_coprocessor (pc, info, given, false)) - return; - - if (print_insn_neon (info, given, false)) - return; - - for (insn = arm_opcodes; insn->assembler; insn++) - { - if (insn->value == FIRST_IWMMXT_INSN - && info->mach != bfd_mach_arm_XScale - && info->mach != bfd_mach_arm_iWMMXt) - insn = insn + IWMMXT_INSN_COUNT; - - if ((given & insn->mask) == insn->value - /* Special case: an instruction with all bits set in the condition field - (0xFnnn_nnnn) is only matched if all those bits are set in insn->mask, - or by the catchall at the end of the table. */ - && ((given & 0xF0000000) != 0xF0000000 - || (insn->mask & 0xF0000000) == 0xF0000000 - || (insn->mask == 0 && insn->value == 0))) - { - const char *c; - - for (c = insn->assembler; *c; c++) - { - if (*c == '%') - { - switch (*++c) - { - case '%': - func (stream, "%%"); - break; - - case 'a': - print_arm_address (pc, info, given); - break; - - case 'P': - /* Set P address bit and use normal address - printing routine. */ - print_arm_address (pc, info, given | (1 << 24)); - break; - - case 's': - if ((given & 0x004f0000) == 0x004f0000) - { - /* PC relative with immediate offset. */ - int offset = ((given & 0xf00) >> 4) | (given & 0xf); - - if ((given & 0x00800000) == 0) - offset = -offset; - - func (stream, "[pc, #%d]\t; ", offset); - info->print_address_func (offset + pc + 8, info); - } - else - { - func (stream, "[%s", - arm_regnames[(given >> 16) & 0xf]); - if ((given & 0x01000000) != 0) - { - /* Pre-indexed. */ - if ((given & 0x00400000) == 0x00400000) - { - /* Immediate. */ - int offset = ((given & 0xf00) >> 4) | (given & 0xf); - if (offset) - func (stream, ", #%s%d", - (((given & 0x00800000) == 0) - ? "-" : ""), offset); - } - else - { - /* Register. */ - func (stream, ", %s%s", - (((given & 0x00800000) == 0) - ? "-" : ""), - arm_regnames[given & 0xf]); - } - - func (stream, "]%s", - ((given & 0x00200000) != 0) ? "!" : ""); - } - else - { - /* Post-indexed. */ - if ((given & 0x00400000) == 0x00400000) - { - /* Immediate. */ - int offset = ((given & 0xf00) >> 4) | (given & 0xf); - if (offset) - func (stream, "], #%s%d", - (((given & 0x00800000) == 0) - ? "-" : ""), offset); - else - func (stream, "]"); - } - else - { - /* Register. */ - func (stream, "], %s%s", - (((given & 0x00800000) == 0) - ? "-" : ""), - arm_regnames[given & 0xf]); - } - } - } - break; - - case 'b': - { - int disp = (((given & 0xffffff) ^ 0x800000) - 0x800000); - info->print_address_func (disp*4 + pc + 8, info); - } - break; - - case 'c': - if (((given >> 28) & 0xf) != 0xe) - func (stream, "%s", - arm_conditional [(given >> 28) & 0xf]); - break; - - case 'm': - { - int started = 0; - int reg; - - func (stream, "{"); - for (reg = 0; reg < 16; reg++) - if ((given & (1 << reg)) != 0) - { - if (started) - func (stream, ", "); - started = 1; - func (stream, "%s", arm_regnames[reg]); - } - func (stream, "}"); - } - break; - - case 'q': - arm_decode_shift (given, func, stream, 0); - break; - - case 'o': - if ((given & 0x02000000) != 0) - { - int rotate = (given & 0xf00) >> 7; - int immed = (given & 0xff); - immed = (((immed << (32 - rotate)) - | (immed >> rotate)) & 0xffffffff); - func (stream, "#%d\t; 0x%x", immed, immed); - } - else - arm_decode_shift (given, func, stream, 1); - break; - - case 'p': - if ((given & 0x0000f000) == 0x0000f000) - func (stream, "p"); - break; - - case 't': - if ((given & 0x01200000) == 0x00200000) - func (stream, "t"); - break; - - case 'A': - func (stream, "[%s", arm_regnames [(given >> 16) & 0xf]); - - if ((given & (1 << 24)) != 0) - { - int offset = given & 0xff; - - if (offset) - func (stream, ", #%s%d]%s", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * 4, - ((given & 0x00200000) != 0 ? "!" : "")); - else - func (stream, "]"); - } - else - { - int offset = given & 0xff; - - func (stream, "]"); - - if (given & (1 << 21)) - { - if (offset) - func (stream, ", #%s%d", - ((given & 0x00800000) == 0 ? "-" : ""), - offset * 4); - } - else - func (stream, ", {%d}", offset); - } - break; - - case 'B': - /* Print ARM V5 BLX(1) address: pc+25 bits. */ - { - bfd_vma address; - bfd_vma offset = 0; - - if (given & 0x00800000) - /* Is signed, hi bits should be ones. */ - offset = (-1) ^ 0x00ffffff; - - /* Offset is (SignExtend(offset field)<<2). */ - offset += given & 0x00ffffff; - offset <<= 2; - address = offset + pc + 8; - - if (given & 0x01000000) - /* H bit allows addressing to 2-byte boundaries. */ - address += 2; - - info->print_address_func (address, info); - } - break; - - case 'C': - func (stream, "_"); - if (given & 0x80000) - func (stream, "f"); - if (given & 0x40000) - func (stream, "s"); - if (given & 0x20000) - func (stream, "x"); - if (given & 0x10000) - func (stream, "c"); - break; - - case 'U': - switch (given & 0xf) - { - case 0xf: func(stream, "sy"); break; - case 0x7: func(stream, "un"); break; - case 0xe: func(stream, "st"); break; - case 0x6: func(stream, "unst"); break; - default: - func(stream, "#%d", (int)given & 0xf); - break; - } - break; - - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - { - int width; - unsigned long value; - - c = arm_decode_bitfield (c, given, &value, &width); - - switch (*c) - { - case 'r': - func (stream, "%s", arm_regnames[value]); - break; - case 'd': - func (stream, "%ld", value); - break; - case 'b': - func (stream, "%ld", value * 8); - break; - case 'W': - func (stream, "%ld", value + 1); - break; - case 'x': - func (stream, "0x%08lx", value); - - /* Some SWI instructions have special - meanings. */ - if ((given & 0x0fffffff) == 0x0FF00000) - func (stream, "\t; IMB"); - else if ((given & 0x0fffffff) == 0x0FF00001) - func (stream, "\t; IMBRange"); - break; - case 'X': - func (stream, "%01lx", value & 0xf); - break; - case '`': - c++; - if (value == 0) - func (stream, "%c", *c); - break; - case '\'': - c++; - if (value == ((1ul << width) - 1)) - func (stream, "%c", *c); - break; - case '?': - func (stream, "%c", c[(1 << width) - (int)value]); - c += 1 << width; - break; - default: - abort (); - } - break; - - case 'e': - { - int imm; - - imm = (given & 0xf) | ((given & 0xfff00) >> 4); - func (stream, "%d", imm); - } - break; - - case 'E': - /* LSB and WIDTH fields of BFI or BFC. The machine- - language instruction encodes LSB and MSB. */ - { - long msb = (given & 0x001f0000) >> 16; - long lsb = (given & 0x00000f80) >> 7; - - long width = msb - lsb + 1; - if (width > 0) - func (stream, "#%lu, #%lu", lsb, width); - else - func (stream, "(invalid: %lu:%lu)", lsb, msb); - } - break; - - case 'V': - /* 16-bit unsigned immediate from a MOVT or MOVW - instruction, encoded in bits 0:11 and 15:19. */ - { - long hi = (given & 0x000f0000) >> 4; - long lo = (given & 0x00000fff); - long imm16 = hi | lo; - func (stream, "#%lu\t; 0x%lx", imm16, imm16); - } - break; - - default: - abort (); - } - } - } - else - func (stream, "%c", *c); - } - return; - } - } - abort (); -} - -/* Print one 16-bit Thumb instruction from PC on INFO->STREAM. */ - -static void -print_insn_thumb16 (bfd_vma pc, struct disassemble_info *info, long given) -{ - const struct opcode16 *insn; - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - - for (insn = thumb_opcodes; insn->assembler; insn++) - if ((given & insn->mask) == insn->value) - { - const char *c = insn->assembler; - for (; *c; c++) - { - int domaskpc = 0; - int domasklr = 0; - - if (*c != '%') - { - func (stream, "%c", *c); - continue; - } - - switch (*++c) - { - case '%': - func (stream, "%%"); - break; - - case 'c': - if (ifthen_state) - func (stream, "%s", arm_conditional[IFTHEN_COND]); - break; - - case 'C': - if (ifthen_state) - func (stream, "%s", arm_conditional[IFTHEN_COND]); - else - func (stream, "s"); - break; - - case 'I': - { - unsigned int tmp; - - ifthen_next_state = given & 0xff; - for (tmp = given << 1; tmp & 0xf; tmp <<= 1) - func (stream, ((given ^ tmp) & 0x10) ? "e" : "t"); - func (stream, "\t%s", arm_conditional[(given >> 4) & 0xf]); - } - break; - - case 'x': - if (ifthen_next_state) - func (stream, "\t; unpredictable branch in IT block\n"); - break; - - case 'X': - if (ifthen_state) - func (stream, "\t; unpredictable ", - arm_conditional[IFTHEN_COND]); - break; - - case 'S': - { - long reg; - - reg = (given >> 3) & 0x7; - if (given & (1 << 6)) - reg += 8; - - func (stream, "%s", arm_regnames[reg]); - } - break; - - case 'D': - { - long reg; - - reg = given & 0x7; - if (given & (1 << 7)) - reg += 8; - - func (stream, "%s", arm_regnames[reg]); - } - break; - - case 'N': - if (given & (1 << 8)) - domasklr = 1; - /* Fall through. */ - case 'O': - if (*c == 'O' && (given & (1 << 8))) - domaskpc = 1; - /* Fall through. */ - case 'M': - { - int started = 0; - int reg; - - func (stream, "{"); - - /* It would be nice if we could spot - ranges, and generate the rS-rE format: */ - for (reg = 0; (reg < 8); reg++) - if ((given & (1 << reg)) != 0) - { - if (started) - func (stream, ", "); - started = 1; - func (stream, "%s", arm_regnames[reg]); - } - - if (domasklr) - { - if (started) - func (stream, ", "); - started = 1; - func (stream, "%s", arm_regnames[14] /* "lr" */); - } - - if (domaskpc) - { - if (started) - func (stream, ", "); - func (stream, "%s", arm_regnames[15] /* "pc" */); - } - - func (stream, "}"); - } - break; - - case 'b': - /* Print ARM V6T2 CZB address: pc+4+6 bits. */ - { - bfd_vma address = (pc + 4 - + ((given & 0x00f8) >> 2) - + ((given & 0x0200) >> 3)); - info->print_address_func (address, info); - } - break; - - case 's': - /* Right shift immediate -- bits 6..10; 1-31 print - as themselves, 0 prints as 32. */ - { - long imm = (given & 0x07c0) >> 6; - if (imm == 0) - imm = 32; - func (stream, "#%ld", imm); - } - break; - - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - { - int bitstart = *c++ - '0'; - int bitend = 0; - - while (*c >= '0' && *c <= '9') - bitstart = (bitstart * 10) + *c++ - '0'; - - switch (*c) - { - case '-': - { - long reg; - - c++; - while (*c >= '0' && *c <= '9') - bitend = (bitend * 10) + *c++ - '0'; - if (!bitend) - abort (); - reg = given >> bitstart; - reg &= (2 << (bitend - bitstart)) - 1; - switch (*c) - { - case 'r': - func (stream, "%s", arm_regnames[reg]); - break; - - case 'd': - func (stream, "%ld", reg); - break; - - case 'H': - func (stream, "%ld", reg << 1); - break; - - case 'W': - func (stream, "%ld", reg << 2); - break; - - case 'a': - /* PC-relative address -- the bottom two - bits of the address are dropped - before the calculation. */ - info->print_address_func - (((pc + 4) & ~3) + (reg << 2), info); - break; - - case 'x': - func (stream, "0x%04lx", reg); - break; - - case 'B': - reg = ((reg ^ (1 << bitend)) - (1 << bitend)); - info->print_address_func (reg * 2 + pc + 4, info); - break; - - case 'c': - func (stream, "%s", arm_conditional [reg]); - break; - - default: - abort (); - } - } - break; - - case '\'': - c++; - if ((given & (1 << bitstart)) != 0) - func (stream, "%c", *c); - break; - - case '?': - ++c; - if ((given & (1 << bitstart)) != 0) - func (stream, "%c", *c++); - else - func (stream, "%c", *++c); - break; - - default: - abort (); - } - } - break; - - default: - abort (); - } - } - return; - } - - /* No match. */ - abort (); -} - -/* Return the name of an V7M special register. */ -static const char * -psr_name (int regno) -{ - switch (regno) - { - case 0: return "APSR"; - case 1: return "IAPSR"; - case 2: return "EAPSR"; - case 3: return "PSR"; - case 5: return "IPSR"; - case 6: return "EPSR"; - case 7: return "IEPSR"; - case 8: return "MSP"; - case 9: return "PSP"; - case 16: return "PRIMASK"; - case 17: return "BASEPRI"; - case 18: return "BASEPRI_MASK"; - case 19: return "FAULTMASK"; - case 20: return "CONTROL"; - default: return ""; - } -} - -/* Print one 32-bit Thumb instruction from PC on INFO->STREAM. */ - -static void -print_insn_thumb32 (bfd_vma pc, struct disassemble_info *info, long given) -{ - const struct opcode32 *insn; - void *stream = info->stream; - fprintf_function func = info->fprintf_func; - - if (print_insn_coprocessor (pc, info, given, true)) - return; - - if (print_insn_neon (info, given, true)) - return; - - for (insn = thumb32_opcodes; insn->assembler; insn++) - if ((given & insn->mask) == insn->value) - { - const char *c = insn->assembler; - for (; *c; c++) - { - if (*c != '%') - { - func (stream, "%c", *c); - continue; - } - - switch (*++c) - { - case '%': - func (stream, "%%"); - break; - - case 'c': - if (ifthen_state) - func (stream, "%s", arm_conditional[IFTHEN_COND]); - break; - - case 'x': - if (ifthen_next_state) - func (stream, "\t; unpredictable branch in IT block\n"); - break; - - case 'X': - if (ifthen_state) - func (stream, "\t; unpredictable ", - arm_conditional[IFTHEN_COND]); - break; - - case 'I': - { - unsigned int imm12 = 0; - imm12 |= (given & 0x000000ffu); - imm12 |= (given & 0x00007000u) >> 4; - imm12 |= (given & 0x04000000u) >> 15; - func (stream, "#%u\t; 0x%x", imm12, imm12); - } - break; - - case 'M': - { - unsigned int bits = 0, imm, imm8, mod; - bits |= (given & 0x000000ffu); - bits |= (given & 0x00007000u) >> 4; - bits |= (given & 0x04000000u) >> 15; - imm8 = (bits & 0x0ff); - mod = (bits & 0xf00) >> 8; - switch (mod) - { - case 0: imm = imm8; break; - case 1: imm = ((imm8<<16) | imm8); break; - case 2: imm = ((imm8<<24) | (imm8 << 8)); break; - case 3: imm = ((imm8<<24) | (imm8 << 16) | (imm8 << 8) | imm8); break; - default: - mod = (bits & 0xf80) >> 7; - imm8 = (bits & 0x07f) | 0x80; - imm = (((imm8 << (32 - mod)) | (imm8 >> mod)) & 0xffffffff); - } - func (stream, "#%u\t; 0x%x", imm, imm); - } - break; - - case 'J': - { - unsigned int imm = 0; - imm |= (given & 0x000000ffu); - imm |= (given & 0x00007000u) >> 4; - imm |= (given & 0x04000000u) >> 15; - imm |= (given & 0x000f0000u) >> 4; - func (stream, "#%u\t; 0x%x", imm, imm); - } - break; - - case 'K': - { - unsigned int imm = 0; - imm |= (given & 0x000f0000u) >> 16; - imm |= (given & 0x00000ff0u) >> 0; - imm |= (given & 0x0000000fu) << 12; - func (stream, "#%u\t; 0x%x", imm, imm); - } - break; - - case 'S': - { - unsigned int reg = (given & 0x0000000fu); - unsigned int stp = (given & 0x00000030u) >> 4; - unsigned int imm = 0; - imm |= (given & 0x000000c0u) >> 6; - imm |= (given & 0x00007000u) >> 10; - - func (stream, "%s", arm_regnames[reg]); - switch (stp) - { - case 0: - if (imm > 0) - func (stream, ", lsl #%u", imm); - break; - - case 1: - if (imm == 0) - imm = 32; - func (stream, ", lsr #%u", imm); - break; - - case 2: - if (imm == 0) - imm = 32; - func (stream, ", asr #%u", imm); - break; - - case 3: - if (imm == 0) - func (stream, ", rrx"); - else - func (stream, ", ror #%u", imm); - } - } - break; - - case 'a': - { - unsigned int Rn = (given & 0x000f0000) >> 16; - unsigned int U = (given & 0x00800000) >> 23; - unsigned int op = (given & 0x00000f00) >> 8; - unsigned int i12 = (given & 0x00000fff); - unsigned int i8 = (given & 0x000000ff); - bfd_boolean writeback = false, postind = false; - int offset = 0; - - func (stream, "[%s", arm_regnames[Rn]); - if (U) /* 12-bit positive immediate offset */ - offset = i12; - else if (Rn == 15) /* 12-bit negative immediate offset */ - offset = -(int)i12; - else if (op == 0x0) /* shifted register offset */ - { - unsigned int Rm = (i8 & 0x0f); - unsigned int sh = (i8 & 0x30) >> 4; - func (stream, ", %s", arm_regnames[Rm]); - if (sh) - func (stream, ", lsl #%u", sh); - func (stream, "]"); - break; - } - else switch (op) - { - case 0xE: /* 8-bit positive immediate offset */ - offset = i8; - break; - - case 0xC: /* 8-bit negative immediate offset */ - offset = -i8; - break; - - case 0xF: /* 8-bit + preindex with wb */ - offset = i8; - writeback = true; - break; - - case 0xD: /* 8-bit - preindex with wb */ - offset = -i8; - writeback = true; - break; - - case 0xB: /* 8-bit + postindex */ - offset = i8; - postind = true; - break; - - case 0x9: /* 8-bit - postindex */ - offset = -i8; - postind = true; - break; - - default: - func (stream, ", ]"); - goto skip; - } - - if (postind) - func (stream, "], #%d", offset); - else - { - if (offset) - func (stream, ", #%d", offset); - func (stream, writeback ? "]!" : "]"); - } - - if (Rn == 15) - { - func (stream, "\t; "); - info->print_address_func (((pc + 4) & ~3) + offset, info); - } - } - skip: - break; - - case 'A': - { - unsigned int P = (given & 0x01000000) >> 24; - unsigned int U = (given & 0x00800000) >> 23; - unsigned int W = (given & 0x00400000) >> 21; - unsigned int Rn = (given & 0x000f0000) >> 16; - unsigned int off = (given & 0x000000ff); - - func (stream, "[%s", arm_regnames[Rn]); - if (P) - { - if (off || !U) - func (stream, ", #%c%u", U ? '+' : '-', off * 4); - func (stream, "]"); - if (W) - func (stream, "!"); - } - else - { - func (stream, "], "); - if (W) - func (stream, "#%c%u", U ? '+' : '-', off * 4); - else - func (stream, "{%u}", off); - } - } - break; - - case 'w': - { - unsigned int Sbit = (given & 0x01000000) >> 24; - unsigned int type = (given & 0x00600000) >> 21; - switch (type) - { - case 0: func (stream, Sbit ? "sb" : "b"); break; - case 1: func (stream, Sbit ? "sh" : "h"); break; - case 2: - if (Sbit) - func (stream, "??"); - break; - case 3: - func (stream, "??"); - break; - } - } - break; - - case 'm': - { - int started = 0; - int reg; - - func (stream, "{"); - for (reg = 0; reg < 16; reg++) - if ((given & (1 << reg)) != 0) - { - if (started) - func (stream, ", "); - started = 1; - func (stream, "%s", arm_regnames[reg]); - } - func (stream, "}"); - } - break; - - case 'E': - { - unsigned int msb = (given & 0x0000001f); - unsigned int lsb = 0; - lsb |= (given & 0x000000c0u) >> 6; - lsb |= (given & 0x00007000u) >> 10; - func (stream, "#%u, #%u", lsb, msb - lsb + 1); - } - break; - - case 'F': - { - unsigned int width = (given & 0x0000001f) + 1; - unsigned int lsb = 0; - lsb |= (given & 0x000000c0u) >> 6; - lsb |= (given & 0x00007000u) >> 10; - func (stream, "#%u, #%u", lsb, width); - } - break; - - case 'b': - { - unsigned int S = (given & 0x04000000u) >> 26; - unsigned int J1 = (given & 0x00002000u) >> 13; - unsigned int J2 = (given & 0x00000800u) >> 11; - int offset = 0; - - offset |= !S << 20; - offset |= J2 << 19; - offset |= J1 << 18; - offset |= (given & 0x003f0000) >> 4; - offset |= (given & 0x000007ff) << 1; - offset -= (1 << 20); - - info->print_address_func (pc + 4 + offset, info); - } - break; - - case 'B': - { - unsigned int S = (given & 0x04000000u) >> 26; - unsigned int I1 = (given & 0x00002000u) >> 13; - unsigned int I2 = (given & 0x00000800u) >> 11; - int offset = 0; - - offset |= !S << 24; - offset |= !(I1 ^ S) << 23; - offset |= !(I2 ^ S) << 22; - offset |= (given & 0x03ff0000u) >> 4; - offset |= (given & 0x000007ffu) << 1; - offset -= (1 << 24); - offset += pc + 4; - - /* BLX target addresses are always word aligned. */ - if ((given & 0x00001000u) == 0) - offset &= ~2u; - - info->print_address_func (offset, info); - } - break; - - case 's': - { - unsigned int shift = 0; - shift |= (given & 0x000000c0u) >> 6; - shift |= (given & 0x00007000u) >> 10; - if (given & 0x00200000u) - func (stream, ", asr #%u", shift); - else if (shift) - func (stream, ", lsl #%u", shift); - /* else print nothing - lsl #0 */ - } - break; - - case 'R': - { - unsigned int rot = (given & 0x00000030) >> 4; - if (rot) - func (stream, ", ror #%u", rot * 8); - } - break; - - case 'U': - switch (given & 0xf) - { - case 0xf: func(stream, "sy"); break; - case 0x7: func(stream, "un"); break; - case 0xe: func(stream, "st"); break; - case 0x6: func(stream, "unst"); break; - default: - func(stream, "#%d", (int)given & 0xf); - break; - } - break; - - case 'C': - if ((given & 0xff) == 0) - { - func (stream, "%cPSR_", (given & 0x100000) ? 'S' : 'C'); - if (given & 0x800) - func (stream, "f"); - if (given & 0x400) - func (stream, "s"); - if (given & 0x200) - func (stream, "x"); - if (given & 0x100) - func (stream, "c"); - } - else - { - func (stream, "%s", psr_name (given & 0xff)); - } - break; - - case 'D': - if ((given & 0xff) == 0) - func (stream, "%cPSR", (given & 0x100000) ? 'S' : 'C'); - else - func (stream, "%s", psr_name (given & 0xff)); - break; - - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - { - int width; - unsigned long val; - - c = arm_decode_bitfield (c, given, &val, &width); - - switch (*c) - { - case 'd': func (stream, "%lu", val); break; - case 'W': func (stream, "%lu", val * 4); break; - case 'r': func (stream, "%s", arm_regnames[val]); break; - - case 'c': - func (stream, "%s", arm_conditional[val]); - break; - - case '\'': - c++; - if (val == ((1ul << width) - 1)) - func (stream, "%c", *c); - break; - - case '`': - c++; - if (val == 0) - func (stream, "%c", *c); - break; - - case '?': - func (stream, "%c", c[(1 << width) - (int)val]); - c += 1 << width; - break; - - default: - abort (); - } - } - break; - - default: - abort (); - } - } - return; - } - - /* No match. */ - abort (); -} - -/* Print data bytes on INFO->STREAM. */ - -static void -print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED, struct disassemble_info *info, - long given) -{ - switch (info->bytes_per_chunk) - { - case 1: - info->fprintf_func (info->stream, ".byte\t0x%02lx", given); - break; - case 2: - info->fprintf_func (info->stream, ".short\t0x%04lx", given); - break; - case 4: - info->fprintf_func (info->stream, ".word\t0x%08lx", given); - break; - default: - abort (); - } -} - -/* Search back through the insn stream to determine if this instruction is - conditionally executed. */ -static void -find_ifthen_state (bfd_vma pc, struct disassemble_info *info, - bfd_boolean little) -{ - unsigned char b[2]; - unsigned int insn; - int status; - /* COUNT is twice the number of instructions seen. It will be odd if we - just crossed an instruction boundary. */ - int count; - int it_count; - unsigned int seen_it; - bfd_vma addr; - - ifthen_address = pc; - ifthen_state = 0; - - addr = pc; - count = 1; - it_count = 0; - seen_it = 0; - /* Scan backwards looking for IT instructions, keeping track of where - instruction boundaries are. We don't know if something is actually an - IT instruction until we find a definite instruction boundary. */ - for (;;) - { - if (addr == 0 || info->symbol_at_address_func(addr, info)) - { - /* A symbol must be on an instruction boundary, and will not - be within an IT block. */ - if (seen_it && (count & 1)) - break; - - return; - } - addr -= 2; - status = arm_read_memory (addr, (bfd_byte *)b, 2, info); - if (status) - return; - - if (little) - insn = (b[0]) | (b[1] << 8); - else - insn = (b[1]) | (b[0] << 8); - if (seen_it) - { - if ((insn & 0xf800) < 0xe800) - { - /* Addr + 2 is an instruction boundary. See if this matches - the expected boundary based on the position of the last - IT candidate. */ - if (count & 1) - break; - seen_it = 0; - } - } - if ((insn & 0xff00) == 0xbf00 && (insn & 0xf) != 0) - { - /* This could be an IT instruction. */ - seen_it = insn; - it_count = count >> 1; - } - if ((insn & 0xf800) >= 0xe800) - count++; - else - count = (count + 2) | 1; - /* IT blocks contain at most 4 instructions. */ - if (count >= 8 && !seen_it) - return; - } - /* We found an IT instruction. */ - ifthen_state = (seen_it & 0xe0) | ((seen_it << it_count) & 0x1f); - if ((ifthen_state & 0xf) == 0) - ifthen_state = 0; -} - -/* NOTE: There are no checks in these routines that - the relevant number of data bytes exist. */ - -int -print_insn_arm (bfd_vma pc, struct disassemble_info *info) -{ - unsigned char b[4]; - long given; - int status; - int is_thumb = false; - int is_data = false; - unsigned int size = 4; - void (*printer) (bfd_vma, struct disassemble_info *, long); - int little; - - little = (info->endian == BFD_ENDIAN_LITTLE); - is_thumb |= (pc & 1); - pc &= ~(bfd_vma)1; - - if (force_thumb) - is_thumb = true; - - info->bytes_per_line = 4; - - if (is_data) - { - int i; - - /* size was already set above. */ - info->bytes_per_chunk = size; - printer = print_insn_data; - - status = arm_read_memory (pc, (bfd_byte *)b, size, info); - given = 0; - if (little) - for (i = size - 1; i >= 0; i--) - given = b[i] | (given << 8); - else - for (i = 0; i < (int) size; i++) - given = b[i] | (given << 8); - } - else if (!is_thumb) - { - /* In ARM mode endianness is a straightforward issue: the instruction - is four bytes long and is either ordered 0123 or 3210. */ - printer = print_insn_arm_internal; - info->bytes_per_chunk = 4; - size = 4; - - status = arm_read_memory (pc, (bfd_byte *)b, 4, info); - if (little) - given = (b[0]) | (b[1] << 8) | (b[2] << 16) | ((unsigned)b[3] << 24); - else - given = (b[3]) | (b[2] << 8) | (b[1] << 16) | ((unsigned)b[0] << 24); - } - else - { - /* In Thumb mode we have the additional wrinkle of two - instruction lengths. Fortunately, the bits that determine - the length of the current instruction are always to be found - in the first two bytes. */ - printer = print_insn_thumb16; - info->bytes_per_chunk = 2; - size = 2; - - status = arm_read_memory (pc, (bfd_byte *)b, 2, info); - if (little) - given = (b[0]) | (b[1] << 8); - else - given = (b[1]) | (b[0] << 8); - - if (!status) - { - /* These bit patterns signal a four-byte Thumb - instruction. */ - if ((given & 0xF800) == 0xF800 - || (given & 0xF800) == 0xF000 - || (given & 0xF800) == 0xE800) - { - status = arm_read_memory (pc + 2, (bfd_byte *)b, 2, info); - if (little) - given = (b[0]) | (b[1] << 8) | (given << 16); - else - given = (b[1]) | (b[0] << 8) | (given << 16); - - printer = print_insn_thumb32; - size = 4; - } - } - - if (ifthen_address != pc) - find_ifthen_state(pc, info, little); - - if (ifthen_state) - { - if ((ifthen_state & 0xf) == 0x8) - ifthen_next_state = 0; - else - ifthen_next_state = (ifthen_state & 0xe0) - | ((ifthen_state & 0xf) << 1); - } - } - - if (status) - { - info->memory_error_func (status, pc, info); - return -1; - } - if (info->flags & INSN_HAS_RELOC) - /* If the instruction has a reloc associated with it, then - the offset field in the instruction will actually be the - addend for the reloc. (We are using REL type relocs). - In such cases, we can ignore the pc when computing - addresses, since the addend is not currently pc-relative. */ - pc = 0; - - /* We include the hexdump of the instruction. The format here - matches that used by objdump and the ARM ARM (in particular, - 32 bit Thumb instructions are displayed as pairs of halfwords, - not as a single word.) */ - if (is_thumb) - { - if (size == 2) - { - info->fprintf_func(info->stream, "%04lx ", - ((unsigned long)given) & 0xffff); - } - else - { - info->fprintf_func(info->stream, "%04lx %04lx ", - (((unsigned long)given) >> 16) & 0xffff, - ((unsigned long)given) & 0xffff); - } - } - else - { - info->fprintf_func(info->stream, "%08lx ", - ((unsigned long)given) & 0xffffffff); - } - - printer (pc, info, given); - - if (is_thumb) - { - ifthen_state = ifthen_next_state; - ifthen_address += size; - } - return size; -} diff --git a/disas/capstone.c b/disas/capstone.c index 20bc8f9669..fe3efb0d3c 100644 --- a/disas/capstone.c +++ b/disas/capstone.c @@ -191,37 +191,43 @@ bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size) size_t tsize = MIN(sizeof(cap_buf) - csize, size); const uint8_t *cbuf = cap_buf; - info->read_memory_func(pc + csize, cap_buf + csize, tsize, info); - csize += tsize; - size -= tsize; + if (info->read_memory_func(pc + csize, cap_buf + csize, tsize, info) == 0) { + csize += tsize; + size -= tsize; - while (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) { - cap_dump_insn(info, insn); - } + while (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) { + cap_dump_insn(info, insn); + } + + /* If the target memory is not consumed, go back for more... */ + if (size != 0) { + /* + * ... taking care to move any remaining fractional insn + * to the beginning of the buffer. + */ + if (csize != 0) { + memmove(cap_buf, cbuf, csize); + } + continue; + } - /* If the target memory is not consumed, go back for more... */ - if (size != 0) { /* - * ... taking care to move any remaining fractional insn - * to the beginning of the buffer. + * Since the target memory is consumed, we should not have + * a remaining fractional insn. */ if (csize != 0) { - memmove(cap_buf, cbuf, csize); + info->fprintf_func(info->stream, + "Disassembler disagrees with translator " + "over instruction decoding\n" + "Please report this to qemu-devel@nongnu.org\n"); } - continue; - } + break; - /* - * Since the target memory is consumed, we should not have - * a remaining fractional insn. - */ - if (csize != 0) { + } else { info->fprintf_func(info->stream, - "Disassembler disagrees with translator " - "over instruction decoding\n" - "Please report this to qemu-devel@nongnu.org\n"); + "0x%08" PRIx64 ": unable to read memory\n", pc); + break; } - break; } cs_close(&handle); @@ -286,16 +292,23 @@ bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count) /* Make certain that we can make progress. */ assert(tsize != 0); - info->read_memory_func(pc + csize, cap_buf + csize, tsize, info); - csize += tsize; + if (info->read_memory_func(pc + csize, cap_buf + csize, + tsize, info) == 0) + { + csize += tsize; - if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) { - cap_dump_insn(info, insn); - if (--count <= 0) { - break; + if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) { + cap_dump_insn(info, insn); + if (--count <= 0) { + break; + } } + memmove(cap_buf, cbuf, csize); + } else { + info->fprintf_func(info->stream, + "0x%08" PRIx64 ": unable to read memory\n", pc); + break; } - memmove(cap_buf, cbuf, csize); } cs_close(&handle); diff --git a/disas/i386.c b/disas/i386.c deleted file mode 100644 index 06c835236e..0000000000 --- a/disas/i386.c +++ /dev/null @@ -1,6771 +0,0 @@ -/* opcodes/i386-dis.c r1.126 */ -/* Print i386 instructions for GDB, the GNU debugger. - Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. - - This file is part of GDB. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, see . */ - -/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu) - July 1988 - modified by John Hassey (hassey@dg-rtp.dg.com) - x86-64 support added by Jan Hubicka (jh@suse.cz) - VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */ - -/* The main tables describing the instructions is essentially a copy - of the "Opcode Map" chapter (Appendix A) of the Intel 80386 - Programmers Manual. Usually, there is a capital letter, followed - by a small letter. The capital letter tell the addressing mode, - and the small letter tells about the operand size. Refer to - the Intel manual for details. */ - -#include "qemu/osdep.h" -#include "disas/dis-asm.h" -#include "qemu/cutils.h" - -/* include/opcode/i386.h r1.78 */ - -/* opcode/i386.h -- Intel 80386 opcode macros - Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 - Free Software Foundation, Inc. - - This file is part of GAS, the GNU Assembler, and GDB, the GNU Debugger. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, see . */ - -/* The SystemV/386 SVR3.2 assembler, and probably all AT&T derived - ix86 Unix assemblers, generate floating point instructions with - reversed source and destination registers in certain cases. - Unfortunately, gcc and possibly many other programs use this - reversed syntax, so we're stuck with it. - - eg. `fsub %st(3),%st' results in st = st - st(3) as expected, but - `fsub %st,%st(3)' results in st(3) = st - st(3), rather than - the expected st(3) = st(3) - st - - This happens with all the non-commutative arithmetic floating point - operations with two register operands, where the source register is - %st, and destination register is %st(i). - - The affected opcode map is dceX, dcfX, deeX, defX. */ - -#ifndef SYSV386_COMPAT -/* Set non-zero for broken, compatible instructions. Set to zero for - non-broken opcodes at your peril. gcc generates SystemV/386 - compatible instructions. */ -#define SYSV386_COMPAT 1 -#endif -#ifndef OLDGCC_COMPAT -/* Set non-zero to cater for old (<= 2.8.1) versions of gcc that could - generate nonsense fsubp, fsubrp, fdivp and fdivrp with operands - reversed. */ -#define OLDGCC_COMPAT SYSV386_COMPAT -#endif - -#define MOV_AX_DISP32 0xa0 -#define POP_SEG_SHORT 0x07 -#define JUMP_PC_RELATIVE 0xeb -#define INT_OPCODE 0xcd -#define INT3_OPCODE 0xcc -/* The opcode for the fwait instruction, which disassembler treats as a - prefix when it can. */ -#define FWAIT_OPCODE 0x9b -#define ADDR_PREFIX_OPCODE 0x67 -#define DATA_PREFIX_OPCODE 0x66 -#define LOCK_PREFIX_OPCODE 0xf0 -#define CS_PREFIX_OPCODE 0x2e -#define DS_PREFIX_OPCODE 0x3e -#define ES_PREFIX_OPCODE 0x26 -#define FS_PREFIX_OPCODE 0x64 -#define GS_PREFIX_OPCODE 0x65 -#define SS_PREFIX_OPCODE 0x36 -#define REPNE_PREFIX_OPCODE 0xf2 -#define REPE_PREFIX_OPCODE 0xf3 - -#define TWO_BYTE_OPCODE_ESCAPE 0x0f -#define NOP_OPCODE (char) 0x90 - -/* register numbers */ -#define EBP_REG_NUM 5 -#define ESP_REG_NUM 4 - -/* modrm_byte.regmem for twobyte escape */ -#define ESCAPE_TO_TWO_BYTE_ADDRESSING ESP_REG_NUM -/* index_base_byte.index for no index register addressing */ -#define NO_INDEX_REGISTER ESP_REG_NUM -/* index_base_byte.base for no base register addressing */ -#define NO_BASE_REGISTER EBP_REG_NUM -#define NO_BASE_REGISTER_16 6 - -/* modrm.mode = REGMEM_FIELD_HAS_REG when a register is in there */ -#define REGMEM_FIELD_HAS_REG 0x3/* always = 0x3 */ -#define REGMEM_FIELD_HAS_MEM (~REGMEM_FIELD_HAS_REG) - -/* x86-64 extension prefix. */ -#define REX_OPCODE 0x40 - -/* Indicates 64 bit operand size. */ -#define REX_W 8 -/* High extension to reg field of modrm byte. */ -#define REX_R 4 -/* High extension to SIB index field. */ -#define REX_X 2 -/* High extension to base field of modrm or SIB, or reg field of opcode. */ -#define REX_B 1 - -/* max operands per insn */ -#define MAX_OPERANDS 4 - -/* max immediates per insn (lcall, ljmp, insertq, extrq) */ -#define MAX_IMMEDIATE_OPERANDS 2 - -/* max memory refs per insn (string ops) */ -#define MAX_MEMORY_OPERANDS 2 - -/* max size of insn mnemonics. */ -#define MAX_MNEM_SIZE 16 - -/* max size of register name in insn mnemonics. */ -#define MAX_REG_NAME_SIZE 8 - -/* opcodes/i386-dis.c r1.126 */ - -static int fetch_data2(struct disassemble_info *, bfd_byte *); -static int fetch_data(struct disassemble_info *, bfd_byte *); -static void ckprefix (void); -static const char *prefix_name (int, int); -static int print_insn (bfd_vma, disassemble_info *); -static void dofloat (int); -static void OP_ST (int, int); -static void OP_STi (int, int); -static int putop (const char *, int); -static void oappend (const char *); -static void append_seg (void); -static void OP_indirE (int, int); -static void print_operand_value (char *buf, size_t bufsize, int hex, bfd_vma disp); -static void print_displacement (char *, bfd_vma); -static void OP_E (int, int); -static void OP_G (int, int); -static void OP_vvvv (int, int); -static bfd_vma get64 (void); -static bfd_signed_vma get32 (void); -static bfd_signed_vma get32s (void); -static int get16 (void); -static void set_op (bfd_vma, int); -static void OP_REG (int, int); -static void OP_IMREG (int, int); -static void OP_I (int, int); -static void OP_I64 (int, int); -static void OP_sI (int, int); -static void OP_J (int, int); -static void OP_SEG (int, int); -static void OP_DIR (int, int); -static void OP_OFF (int, int); -static void OP_OFF64 (int, int); -static void ptr_reg (int, int); -static void OP_ESreg (int, int); -static void OP_DSreg (int, int); -static void OP_C (int, int); -static void OP_D (int, int); -static void OP_T (int, int); -static void OP_R (int, int); -static void OP_MMX (int, int); -static void OP_XMM (int, int); -static void OP_EM (int, int); -static void OP_EX (int, int); -static void OP_EMC (int,int); -static void OP_MXC (int,int); -static void OP_MS (int, int); -static void OP_XS (int, int); -static void OP_M (int, int); -static void OP_VMX (int, int); -static void OP_0fae (int, int); -static void OP_0f07 (int, int); -static void NOP_Fixup1 (int, int); -static void NOP_Fixup2 (int, int); -static void OP_3DNowSuffix (int, int); -static void OP_SIMD_Suffix (int, int); -static void SIMD_Fixup (int, int); -static void PNI_Fixup (int, int); -static void SVME_Fixup (int, int); -static void INVLPG_Fixup (int, int); -static void BadOp (void); -static void VMX_Fixup (int, int); -static void REP_Fixup (int, int); -static void CMPXCHG8B_Fixup (int, int); -static void XMM_Fixup (int, int); -static void CRC32_Fixup (int, int); - -struct dis_private { - /* Points to first byte not fetched. */ - bfd_byte *max_fetched; - bfd_byte the_buffer[MAX_MNEM_SIZE]; - bfd_vma insn_start; - int orig_sizeflag; - sigjmp_buf bailout; -}; - -enum address_mode -{ - mode_16bit, - mode_32bit, - mode_64bit -}; - -static enum address_mode address_mode; - -/* Flags for the prefixes for the current instruction. See below. */ -static int prefixes; - -/* REX prefix the current instruction. See below. */ -static int rex; -/* Bits of REX we've already used. */ -static int rex_used; -/* Mark parts used in the REX prefix. When we are testing for - empty prefix (for 8bit register REX extension), just mask it - out. Otherwise test for REX bit is excuse for existence of REX - only in case value is nonzero. */ -#define USED_REX(value) \ - { \ - if (value) \ - { \ - if ((rex & value)) \ - rex_used |= (value) | REX_OPCODE; \ - } \ - else \ - rex_used |= REX_OPCODE; \ - } - -/* Flags for prefixes which we somehow handled when printing the - current instruction. */ -static int used_prefixes; - -/* The VEX.vvvv register, unencoded. */ -static int vex_reg; - -/* Flags stored in PREFIXES. */ -#define PREFIX_REPZ 1 -#define PREFIX_REPNZ 2 -#define PREFIX_LOCK 4 -#define PREFIX_CS 8 -#define PREFIX_SS 0x10 -#define PREFIX_DS 0x20 -#define PREFIX_ES 0x40 -#define PREFIX_FS 0x80 -#define PREFIX_GS 0x100 -#define PREFIX_DATA 0x200 -#define PREFIX_ADDR 0x400 -#define PREFIX_FWAIT 0x800 - -#define PREFIX_VEX_0F 0x1000 -#define PREFIX_VEX_0F38 0x2000 -#define PREFIX_VEX_0F3A 0x4000 - -/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive) - to ADDR (exclusive) are valid. Returns 1 for success, longjmps - on error. */ -static int -fetch_data2(struct disassemble_info *info, bfd_byte *addr) -{ - int status; - struct dis_private *priv = (struct dis_private *) info->private_data; - bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer); - - if (addr <= priv->the_buffer + MAX_MNEM_SIZE) - status = (*info->read_memory_func) (start, - priv->max_fetched, - addr - priv->max_fetched, - info); - else - status = -1; - if (status != 0) - { - /* If we did manage to read at least one byte, then - print_insn_i386 will do something sensible. Otherwise, print - an error. We do that here because this is where we know - STATUS. */ - if (priv->max_fetched == priv->the_buffer) - (*info->memory_error_func) (status, start, info); - siglongjmp(priv->bailout, 1); - } - else - priv->max_fetched = addr; - return 1; -} - -static int -fetch_data(struct disassemble_info *info, bfd_byte *addr) -{ - if (addr <= ((struct dis_private *) (info->private_data))->max_fetched) { - return 1; - } else { - return fetch_data2(info, addr); - } -} - - -#define XX { NULL, 0 } - -#define Bv { OP_vvvv, v_mode } -#define Eb { OP_E, b_mode } -#define Ev { OP_E, v_mode } -#define Ed { OP_E, d_mode } -#define Edq { OP_E, dq_mode } -#define Edqw { OP_E, dqw_mode } -#define Edqb { OP_E, dqb_mode } -#define Edqd { OP_E, dqd_mode } -#define indirEv { OP_indirE, stack_v_mode } -#define indirEp { OP_indirE, f_mode } -#define stackEv { OP_E, stack_v_mode } -#define Em { OP_E, m_mode } -#define Ew { OP_E, w_mode } -#define M { OP_M, 0 } /* lea, lgdt, etc. */ -#define Ma { OP_M, v_mode } -#define Mp { OP_M, f_mode } /* 32 or 48 bit memory operand for LDS, LES etc */ -#define Mq { OP_M, q_mode } -#define Gb { OP_G, b_mode } -#define Gv { OP_G, v_mode } -#define Gd { OP_G, d_mode } -#define Gdq { OP_G, dq_mode } -#define Gm { OP_G, m_mode } -#define Gw { OP_G, w_mode } -#define Rd { OP_R, d_mode } -#define Rm { OP_R, m_mode } -#define Ib { OP_I, b_mode } -#define sIb { OP_sI, b_mode } /* sign extended byte */ -#define Iv { OP_I, v_mode } -#define Iq { OP_I, q_mode } -#define Iv64 { OP_I64, v_mode } -#define Iw { OP_I, w_mode } -#define I1 { OP_I, const_1_mode } -#define Jb { OP_J, b_mode } -#define Jv { OP_J, v_mode } -#define Cm { OP_C, m_mode } -#define Dm { OP_D, m_mode } -#define Td { OP_T, d_mode } - -#define RMeAX { OP_REG, eAX_reg } -#define RMeBX { OP_REG, eBX_reg } -#define RMeCX { OP_REG, eCX_reg } -#define RMeDX { OP_REG, eDX_reg } -#define RMeSP { OP_REG, eSP_reg } -#define RMeBP { OP_REG, eBP_reg } -#define RMeSI { OP_REG, eSI_reg } -#define RMeDI { OP_REG, eDI_reg } -#define RMrAX { OP_REG, rAX_reg } -#define RMrBX { OP_REG, rBX_reg } -#define RMrCX { OP_REG, rCX_reg } -#define RMrDX { OP_REG, rDX_reg } -#define RMrSP { OP_REG, rSP_reg } -#define RMrBP { OP_REG, rBP_reg } -#define RMrSI { OP_REG, rSI_reg } -#define RMrDI { OP_REG, rDI_reg } -#define RMAL { OP_REG, al_reg } -#define RMAL { OP_REG, al_reg } -#define RMCL { OP_REG, cl_reg } -#define RMDL { OP_REG, dl_reg } -#define RMBL { OP_REG, bl_reg } -#define RMAH { OP_REG, ah_reg } -#define RMCH { OP_REG, ch_reg } -#define RMDH { OP_REG, dh_reg } -#define RMBH { OP_REG, bh_reg } -#define RMAX { OP_REG, ax_reg } -#define RMDX { OP_REG, dx_reg } - -#define eAX { OP_IMREG, eAX_reg } -#define eBX { OP_IMREG, eBX_reg } -#define eCX { OP_IMREG, eCX_reg } -#define eDX { OP_IMREG, eDX_reg } -#define eSP { OP_IMREG, eSP_reg } -#define eBP { OP_IMREG, eBP_reg } -#define eSI { OP_IMREG, eSI_reg } -#define eDI { OP_IMREG, eDI_reg } -#define AL { OP_IMREG, al_reg } -#define CL { OP_IMREG, cl_reg } -#define DL { OP_IMREG, dl_reg } -#define BL { OP_IMREG, bl_reg } -#define AH { OP_IMREG, ah_reg } -#define CH { OP_IMREG, ch_reg } -#define DH { OP_IMREG, dh_reg } -#define BH { OP_IMREG, bh_reg } -#define AX { OP_IMREG, ax_reg } -#define DX { OP_IMREG, dx_reg } -#define zAX { OP_IMREG, z_mode_ax_reg } -#define indirDX { OP_IMREG, indir_dx_reg } - -#define Sw { OP_SEG, w_mode } -#define Sv { OP_SEG, v_mode } -#define Ap { OP_DIR, 0 } -#define Ob { OP_OFF64, b_mode } -#define Ov { OP_OFF64, v_mode } -#define Xb { OP_DSreg, eSI_reg } -#define Xv { OP_DSreg, eSI_reg } -#define Xz { OP_DSreg, eSI_reg } -#define Yb { OP_ESreg, eDI_reg } -#define Yv { OP_ESreg, eDI_reg } -#define DSBX { OP_DSreg, eBX_reg } - -#define es { OP_REG, es_reg } -#define ss { OP_REG, ss_reg } -#define cs { OP_REG, cs_reg } -#define ds { OP_REG, ds_reg } -#define fs { OP_REG, fs_reg } -#define gs { OP_REG, gs_reg } - -#define MX { OP_MMX, 0 } -#define XM { OP_XMM, 0 } -#define EM { OP_EM, v_mode } -#define EMd { OP_EM, d_mode } -#define EMq { OP_EM, q_mode } -#define EXd { OP_EX, d_mode } -#define EXq { OP_EX, q_mode } -#define EXx { OP_EX, x_mode } -#define MS { OP_MS, v_mode } -#define XS { OP_XS, v_mode } -#define EMC { OP_EMC, v_mode } -#define MXC { OP_MXC, 0 } -#define VM { OP_VMX, q_mode } -#define OPSUF { OP_3DNowSuffix, 0 } -#define OPSIMD { OP_SIMD_Suffix, 0 } -#define XMM0 { XMM_Fixup, 0 } - -/* Used handle "rep" prefix for string instructions. */ -#define Xbr { REP_Fixup, eSI_reg } -#define Xvr { REP_Fixup, eSI_reg } -#define Ybr { REP_Fixup, eDI_reg } -#define Yvr { REP_Fixup, eDI_reg } -#define Yzr { REP_Fixup, eDI_reg } -#define indirDXr { REP_Fixup, indir_dx_reg } -#define ALr { REP_Fixup, al_reg } -#define eAXr { REP_Fixup, eAX_reg } - -#define cond_jump_flag { NULL, cond_jump_mode } -#define loop_jcxz_flag { NULL, loop_jcxz_mode } - -/* bits in sizeflag */ -#define SUFFIX_ALWAYS 4 -#define AFLAG 2 -#define DFLAG 1 - -#define b_mode 1 /* byte operand */ -#define v_mode 2 /* operand size depends on prefixes */ -#define w_mode 3 /* word operand */ -#define d_mode 4 /* double word operand */ -#define q_mode 5 /* quad word operand */ -#define t_mode 6 /* ten-byte operand */ -#define x_mode 7 /* 16-byte XMM operand */ -#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */ -#define cond_jump_mode 9 -#define loop_jcxz_mode 10 -#define dq_mode 11 /* operand size depends on REX prefixes. */ -#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */ -#define f_mode 13 /* 4- or 6-byte pointer operand */ -#define const_1_mode 14 -#define stack_v_mode 15 /* v_mode for stack-related opcodes. */ -#define z_mode 16 /* non-quad operand size depends on prefixes */ -#define o_mode 17 /* 16-byte operand */ -#define dqb_mode 18 /* registers like dq_mode, memory like b_mode. */ -#define dqd_mode 19 /* registers like dq_mode, memory like d_mode. */ - -#define es_reg 100 -#define cs_reg 101 -#define ss_reg 102 -#define ds_reg 103 -#define fs_reg 104 -#define gs_reg 105 - -#define eAX_reg 108 -#define eCX_reg 109 -#define eDX_reg 110 -#define eBX_reg 111 -#define eSP_reg 112 -#define eBP_reg 113 -#define eSI_reg 114 -#define eDI_reg 115 - -#define al_reg 116 -#define cl_reg 117 -#define dl_reg 118 -#define bl_reg 119 -#define ah_reg 120 -#define ch_reg 121 -#define dh_reg 122 -#define bh_reg 123 - -#define ax_reg 124 -#define cx_reg 125 -#define dx_reg 126 -#define bx_reg 127 -#define sp_reg 128 -#define bp_reg 129 -#define si_reg 130 -#define di_reg 131 - -#define rAX_reg 132 -#define rCX_reg 133 -#define rDX_reg 134 -#define rBX_reg 135 -#define rSP_reg 136 -#define rBP_reg 137 -#define rSI_reg 138 -#define rDI_reg 139 - -#define z_mode_ax_reg 149 -#define indir_dx_reg 150 - -#define FLOATCODE 1 -#define USE_GROUPS 2 -#define USE_PREFIX_USER_TABLE 3 -#define X86_64_SPECIAL 4 -#define IS_3BYTE_OPCODE 5 - -#define FLOAT NULL, { { NULL, FLOATCODE } } - -#define GRP1a NULL, { { NULL, USE_GROUPS }, { NULL, 0 } } -#define GRP1b NULL, { { NULL, USE_GROUPS }, { NULL, 1 } } -#define GRP1S NULL, { { NULL, USE_GROUPS }, { NULL, 2 } } -#define GRP1Ss NULL, { { NULL, USE_GROUPS }, { NULL, 3 } } -#define GRP2b NULL, { { NULL, USE_GROUPS }, { NULL, 4 } } -#define GRP2S NULL, { { NULL, USE_GROUPS }, { NULL, 5 } } -#define GRP2b_one NULL, { { NULL, USE_GROUPS }, { NULL, 6 } } -#define GRP2S_one NULL, { { NULL, USE_GROUPS }, { NULL, 7 } } -#define GRP2b_cl NULL, { { NULL, USE_GROUPS }, { NULL, 8 } } -#define GRP2S_cl NULL, { { NULL, USE_GROUPS }, { NULL, 9 } } -#define GRP3b NULL, { { NULL, USE_GROUPS }, { NULL, 10 } } -#define GRP3S NULL, { { NULL, USE_GROUPS }, { NULL, 11 } } -#define GRP4 NULL, { { NULL, USE_GROUPS }, { NULL, 12 } } -#define GRP5 NULL, { { NULL, USE_GROUPS }, { NULL, 13 } } -#define GRP6 NULL, { { NULL, USE_GROUPS }, { NULL, 14 } } -#define GRP7 NULL, { { NULL, USE_GROUPS }, { NULL, 15 } } -#define GRP8 NULL, { { NULL, USE_GROUPS }, { NULL, 16 } } -#define GRP9 NULL, { { NULL, USE_GROUPS }, { NULL, 17 } } -#define GRP11_C6 NULL, { { NULL, USE_GROUPS }, { NULL, 18 } } -#define GRP11_C7 NULL, { { NULL, USE_GROUPS }, { NULL, 19 } } -#define GRP12 NULL, { { NULL, USE_GROUPS }, { NULL, 20 } } -#define GRP13 NULL, { { NULL, USE_GROUPS }, { NULL, 21 } } -#define GRP14 NULL, { { NULL, USE_GROUPS }, { NULL, 22 } } -#define GRP15 NULL, { { NULL, USE_GROUPS }, { NULL, 23 } } -#define GRP16 NULL, { { NULL, USE_GROUPS }, { NULL, 24 } } -#define GRPAMD NULL, { { NULL, USE_GROUPS }, { NULL, 25 } } -#define GRPPADLCK1 NULL, { { NULL, USE_GROUPS }, { NULL, 26 } } -#define GRPPADLCK2 NULL, { { NULL, USE_GROUPS }, { NULL, 27 } } - -#define PREGRP0 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 0 } } -#define PREGRP1 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 1 } } -#define PREGRP2 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 2 } } -#define PREGRP3 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 3 } } -#define PREGRP4 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 4 } } -#define PREGRP5 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 5 } } -#define PREGRP6 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 6 } } -#define PREGRP7 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 7 } } -#define PREGRP8 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 8 } } -#define PREGRP9 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 9 } } -#define PREGRP10 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 10 } } -#define PREGRP11 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 11 } } -#define PREGRP12 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 12 } } -#define PREGRP13 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 13 } } -#define PREGRP14 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 14 } } -#define PREGRP15 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 15 } } -#define PREGRP16 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 16 } } -#define PREGRP17 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 17 } } -#define PREGRP18 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 18 } } -#define PREGRP19 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 19 } } -#define PREGRP20 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 20 } } -#define PREGRP21 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 21 } } -#define PREGRP22 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 22 } } -#define PREGRP23 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 23 } } -#define PREGRP24 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 24 } } -#define PREGRP25 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 25 } } -#define PREGRP26 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 26 } } -#define PREGRP27 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 27 } } -#define PREGRP28 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 28 } } -#define PREGRP29 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 29 } } -#define PREGRP30 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 30 } } -#define PREGRP31 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 31 } } -#define PREGRP32 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 32 } } -#define PREGRP33 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 33 } } -#define PREGRP34 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 34 } } -#define PREGRP35 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 35 } } -#define PREGRP36 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 36 } } -#define PREGRP37 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 37 } } -#define PREGRP38 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 38 } } -#define PREGRP39 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 39 } } -#define PREGRP40 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 40 } } -#define PREGRP41 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 41 } } -#define PREGRP42 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 42 } } -#define PREGRP43 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 43 } } -#define PREGRP44 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 44 } } -#define PREGRP45 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 45 } } -#define PREGRP46 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 46 } } -#define PREGRP47 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 47 } } -#define PREGRP48 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 48 } } -#define PREGRP49 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 49 } } -#define PREGRP50 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 50 } } -#define PREGRP51 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 51 } } -#define PREGRP52 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 52 } } -#define PREGRP53 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 53 } } -#define PREGRP54 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 54 } } -#define PREGRP55 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 55 } } -#define PREGRP56 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 56 } } -#define PREGRP57 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 57 } } -#define PREGRP58 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 58 } } -#define PREGRP59 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 59 } } -#define PREGRP60 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 60 } } -#define PREGRP61 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 61 } } -#define PREGRP62 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 62 } } -#define PREGRP63 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 63 } } -#define PREGRP64 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 64 } } -#define PREGRP65 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 65 } } -#define PREGRP66 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 66 } } -#define PREGRP67 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 67 } } -#define PREGRP68 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 68 } } -#define PREGRP69 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 69 } } -#define PREGRP70 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 70 } } -#define PREGRP71 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 71 } } -#define PREGRP72 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 72 } } -#define PREGRP73 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 73 } } -#define PREGRP74 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 74 } } -#define PREGRP75 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 75 } } -#define PREGRP76 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 76 } } -#define PREGRP77 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 77 } } -#define PREGRP78 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 78 } } -#define PREGRP79 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 79 } } -#define PREGRP80 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 80 } } -#define PREGRP81 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 81 } } -#define PREGRP82 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 82 } } -#define PREGRP83 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 83 } } -#define PREGRP84 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 84 } } -#define PREGRP85 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 85 } } -#define PREGRP86 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 86 } } -#define PREGRP87 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 87 } } -#define PREGRP88 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 88 } } -#define PREGRP89 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 89 } } -#define PREGRP90 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 90 } } -#define PREGRP91 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 91 } } -#define PREGRP92 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 92 } } -#define PREGRP93 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 93 } } -#define PREGRP94 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 94 } } -#define PREGRP95 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 95 } } -#define PREGRP96 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 96 } } -#define PREGRP97 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 97 } } -#define PREGRP98 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 98 } } -#define PREGRP99 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 99 } } -#define PREGRP100 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 100 } } -#define PREGRP101 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 101 } } -#define PREGRP102 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 102 } } -#define PREGRP103 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 103 } } -#define PREGRP104 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 104 } } -#define PREGRP105 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 105 } } -#define PREGRP106 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 106 } } -#define PREGRP107 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 107 } } -#define PREGRP108 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 108 } } -#define PREGRP109 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 109 } } - -#define X86_64_0 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 0 } } -#define X86_64_1 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 1 } } -#define X86_64_2 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 2 } } -#define X86_64_3 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 3 } } - -#define THREE_BYTE_0 NULL, { { NULL, IS_3BYTE_OPCODE }, { NULL, 0 } } -#define THREE_BYTE_1 NULL, { { NULL, IS_3BYTE_OPCODE }, { NULL, 1 } } - -typedef void (*op_rtn) (int bytemode, int sizeflag); - -struct dis386 { - const char *name; - struct - { - op_rtn rtn; - int bytemode; - } op[MAX_OPERANDS]; -}; - -/* Upper case letters in the instruction names here are macros. - 'A' => print 'b' if no register operands or suffix_always is true - 'B' => print 'b' if suffix_always is true - 'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand - . size prefix - 'D' => print 'w' if no register operands or 'w', 'l' or 'q', if - . suffix_always is true - 'E' => print 'e' if 32-bit form of jcxz - 'F' => print 'w' or 'l' depending on address size prefix (loop insns) - 'G' => print 'w' or 'l' depending on operand size prefix (i/o insns) - 'H' => print ",pt" or ",pn" branch hint - 'I' => honor following macro letter even in Intel mode (implemented only - . for some of the macro letters) - 'J' => print 'l' - 'K' => print 'd' or 'q' if rex prefix is present. - 'L' => print 'l' if suffix_always is true - 'N' => print 'n' if instruction has no wait "prefix" - 'O' => print 'd' or 'o' (or 'q' in Intel mode) - 'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix, - . or suffix_always is true. print 'q' if rex prefix is present. - 'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always - . is true - 'R' => print 'w', 'l' or 'q' ('d' for 'l' and 'e' in Intel mode) - 'S' => print 'w', 'l' or 'q' if suffix_always is true - 'T' => print 'q' in 64bit mode and behave as 'P' otherwise - 'U' => print 'q' in 64bit mode and behave as 'Q' otherwise - 'V' => print 'q' in 64bit mode and behave as 'S' otherwise - 'W' => print 'b', 'w' or 'l' ('d' in Intel mode) - 'X' => print 's', 'd' depending on data16 prefix (for XMM) - 'Y' => 'q' if instruction has an REX 64bit overwrite prefix - 'Z' => print 'q' in 64bit mode and behave as 'L' otherwise - - Many of the above letters print nothing in Intel mode. See "putop" - for the details. - - Braces '{' and '}', and vertical bars '|', indicate alternative - mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel - modes. In cases where there are only two alternatives, the X86_64 - instruction is reserved, and "(bad)" is printed. -*/ - -static const struct dis386 dis386[] = { - /* 00 */ - { "addB", { Eb, Gb } }, - { "addS", { Ev, Gv } }, - { "addB", { Gb, Eb } }, - { "addS", { Gv, Ev } }, - { "addB", { AL, Ib } }, - { "addS", { eAX, Iv } }, - { "push{T|}", { es } }, - { "pop{T|}", { es } }, - /* 08 */ - { "orB", { Eb, Gb } }, - { "orS", { Ev, Gv } }, - { "orB", { Gb, Eb } }, - { "orS", { Gv, Ev } }, - { "orB", { AL, Ib } }, - { "orS", { eAX, Iv } }, - { "push{T|}", { cs } }, - { "(bad)", { XX } }, /* 0x0f extended opcode escape */ - /* 10 */ - { "adcB", { Eb, Gb } }, - { "adcS", { Ev, Gv } }, - { "adcB", { Gb, Eb } }, - { "adcS", { Gv, Ev } }, - { "adcB", { AL, Ib } }, - { "adcS", { eAX, Iv } }, - { "push{T|}", { ss } }, - { "pop{T|}", { ss } }, - /* 18 */ - { "sbbB", { Eb, Gb } }, - { "sbbS", { Ev, Gv } }, - { "sbbB", { Gb, Eb } }, - { "sbbS", { Gv, Ev } }, - { "sbbB", { AL, Ib } }, - { "sbbS", { eAX, Iv } }, - { "push{T|}", { ds } }, - { "pop{T|}", { ds } }, - /* 20 */ - { "andB", { Eb, Gb } }, - { "andS", { Ev, Gv } }, - { "andB", { Gb, Eb } }, - { "andS", { Gv, Ev } }, - { "andB", { AL, Ib } }, - { "andS", { eAX, Iv } }, - { "(bad)", { XX } }, /* SEG ES prefix */ - { "daa{|}", { XX } }, - /* 28 */ - { "subB", { Eb, Gb } }, - { "subS", { Ev, Gv } }, - { "subB", { Gb, Eb } }, - { "subS", { Gv, Ev } }, - { "subB", { AL, Ib } }, - { "subS", { eAX, Iv } }, - { "(bad)", { XX } }, /* SEG CS prefix */ - { "das{|}", { XX } }, - /* 30 */ - { "xorB", { Eb, Gb } }, - { "xorS", { Ev, Gv } }, - { "xorB", { Gb, Eb } }, - { "xorS", { Gv, Ev } }, - { "xorB", { AL, Ib } }, - { "xorS", { eAX, Iv } }, - { "(bad)", { XX } }, /* SEG SS prefix */ - { "aaa{|}", { XX } }, - /* 38 */ - { "cmpB", { Eb, Gb } }, - { "cmpS", { Ev, Gv } }, - { "cmpB", { Gb, Eb } }, - { "cmpS", { Gv, Ev } }, - { "cmpB", { AL, Ib } }, - { "cmpS", { eAX, Iv } }, - { "(bad)", { XX } }, /* SEG DS prefix */ - { "aas{|}", { XX } }, - /* 40 */ - { "inc{S|}", { RMeAX } }, - { "inc{S|}", { RMeCX } }, - { "inc{S|}", { RMeDX } }, - { "inc{S|}", { RMeBX } }, - { "inc{S|}", { RMeSP } }, - { "inc{S|}", { RMeBP } }, - { "inc{S|}", { RMeSI } }, - { "inc{S|}", { RMeDI } }, - /* 48 */ - { "dec{S|}", { RMeAX } }, - { "dec{S|}", { RMeCX } }, - { "dec{S|}", { RMeDX } }, - { "dec{S|}", { RMeBX } }, - { "dec{S|}", { RMeSP } }, - { "dec{S|}", { RMeBP } }, - { "dec{S|}", { RMeSI } }, - { "dec{S|}", { RMeDI } }, - /* 50 */ - { "pushV", { RMrAX } }, - { "pushV", { RMrCX } }, - { "pushV", { RMrDX } }, - { "pushV", { RMrBX } }, - { "pushV", { RMrSP } }, - { "pushV", { RMrBP } }, - { "pushV", { RMrSI } }, - { "pushV", { RMrDI } }, - /* 58 */ - { "popV", { RMrAX } }, - { "popV", { RMrCX } }, - { "popV", { RMrDX } }, - { "popV", { RMrBX } }, - { "popV", { RMrSP } }, - { "popV", { RMrBP } }, - { "popV", { RMrSI } }, - { "popV", { RMrDI } }, - /* 60 */ - { X86_64_0 }, - { X86_64_1 }, - { X86_64_2 }, - { X86_64_3 }, - { "(bad)", { XX } }, /* seg fs */ - { "(bad)", { XX } }, /* seg gs */ - { "(bad)", { XX } }, /* op size prefix */ - { "(bad)", { XX } }, /* adr size prefix */ - /* 68 */ - { "pushT", { Iq } }, - { "imulS", { Gv, Ev, Iv } }, - { "pushT", { sIb } }, - { "imulS", { Gv, Ev, sIb } }, - { "ins{b||b|}", { Ybr, indirDX } }, - { "ins{R||G|}", { Yzr, indirDX } }, - { "outs{b||b|}", { indirDXr, Xb } }, - { "outs{R||G|}", { indirDXr, Xz } }, - /* 70 */ - { "joH", { Jb, XX, cond_jump_flag } }, - { "jnoH", { Jb, XX, cond_jump_flag } }, - { "jbH", { Jb, XX, cond_jump_flag } }, - { "jaeH", { Jb, XX, cond_jump_flag } }, - { "jeH", { Jb, XX, cond_jump_flag } }, - { "jneH", { Jb, XX, cond_jump_flag } }, - { "jbeH", { Jb, XX, cond_jump_flag } }, - { "jaH", { Jb, XX, cond_jump_flag } }, - /* 78 */ - { "jsH", { Jb, XX, cond_jump_flag } }, - { "jnsH", { Jb, XX, cond_jump_flag } }, - { "jpH", { Jb, XX, cond_jump_flag } }, - { "jnpH", { Jb, XX, cond_jump_flag } }, - { "jlH", { Jb, XX, cond_jump_flag } }, - { "jgeH", { Jb, XX, cond_jump_flag } }, - { "jleH", { Jb, XX, cond_jump_flag } }, - { "jgH", { Jb, XX, cond_jump_flag } }, - /* 80 */ - { GRP1b }, - { GRP1S }, - { "(bad)", { XX } }, - { GRP1Ss }, - { "testB", { Eb, Gb } }, - { "testS", { Ev, Gv } }, - { "xchgB", { Eb, Gb } }, - { "xchgS", { Ev, Gv } }, - /* 88 */ - { "movB", { Eb, Gb } }, - { "movS", { Ev, Gv } }, - { "movB", { Gb, Eb } }, - { "movS", { Gv, Ev } }, - { "movD", { Sv, Sw } }, - { "leaS", { Gv, M } }, - { "movD", { Sw, Sv } }, - { GRP1a }, - /* 90 */ - { PREGRP38 }, - { "xchgS", { RMeCX, eAX } }, - { "xchgS", { RMeDX, eAX } }, - { "xchgS", { RMeBX, eAX } }, - { "xchgS", { RMeSP, eAX } }, - { "xchgS", { RMeBP, eAX } }, - { "xchgS", { RMeSI, eAX } }, - { "xchgS", { RMeDI, eAX } }, - /* 98 */ - { "cW{t||t|}R", { XX } }, - { "cR{t||t|}O", { XX } }, - { "Jcall{T|}", { Ap } }, - { "(bad)", { XX } }, /* fwait */ - { "pushfT", { XX } }, - { "popfT", { XX } }, - { "sahf{|}", { XX } }, - { "lahf{|}", { XX } }, - /* a0 */ - { "movB", { AL, Ob } }, - { "movS", { eAX, Ov } }, - { "movB", { Ob, AL } }, - { "movS", { Ov, eAX } }, - { "movs{b||b|}", { Ybr, Xb } }, - { "movs{R||R|}", { Yvr, Xv } }, - { "cmps{b||b|}", { Xb, Yb } }, - { "cmps{R||R|}", { Xv, Yv } }, - /* a8 */ - { "testB", { AL, Ib } }, - { "testS", { eAX, Iv } }, - { "stosB", { Ybr, AL } }, - { "stosS", { Yvr, eAX } }, - { "lodsB", { ALr, Xb } }, - { "lodsS", { eAXr, Xv } }, - { "scasB", { AL, Yb } }, - { "scasS", { eAX, Yv } }, - /* b0 */ - { "movB", { RMAL, Ib } }, - { "movB", { RMCL, Ib } }, - { "movB", { RMDL, Ib } }, - { "movB", { RMBL, Ib } }, - { "movB", { RMAH, Ib } }, - { "movB", { RMCH, Ib } }, - { "movB", { RMDH, Ib } }, - { "movB", { RMBH, Ib } }, - /* b8 */ - { "movS", { RMeAX, Iv64 } }, - { "movS", { RMeCX, Iv64 } }, - { "movS", { RMeDX, Iv64 } }, - { "movS", { RMeBX, Iv64 } }, - { "movS", { RMeSP, Iv64 } }, - { "movS", { RMeBP, Iv64 } }, - { "movS", { RMeSI, Iv64 } }, - { "movS", { RMeDI, Iv64 } }, - /* c0 */ - { GRP2b }, - { GRP2S }, - { "retT", { Iw } }, - { "retT", { XX } }, - { "les{S|}", { Gv, Mp } }, - { "ldsS", { Gv, Mp } }, - { GRP11_C6 }, - { GRP11_C7 }, - /* c8 */ - { "enterT", { Iw, Ib } }, - { "leaveT", { XX } }, - { "lretP", { Iw } }, - { "lretP", { XX } }, - { "int3", { XX } }, - { "int", { Ib } }, - { "into{|}", { XX } }, - { "iretP", { XX } }, - /* d0 */ - { GRP2b_one }, - { GRP2S_one }, - { GRP2b_cl }, - { GRP2S_cl }, - { "aam{|}", { sIb } }, - { "aad{|}", { sIb } }, - { "(bad)", { XX } }, - { "xlat", { DSBX } }, - /* d8 */ - { FLOAT }, - { FLOAT }, - { FLOAT }, - { FLOAT }, - { FLOAT }, - { FLOAT }, - { FLOAT }, - { FLOAT }, - /* e0 */ - { "loopneFH", { Jb, XX, loop_jcxz_flag } }, - { "loopeFH", { Jb, XX, loop_jcxz_flag } }, - { "loopFH", { Jb, XX, loop_jcxz_flag } }, - { "jEcxzH", { Jb, XX, loop_jcxz_flag } }, - { "inB", { AL, Ib } }, - { "inG", { zAX, Ib } }, - { "outB", { Ib, AL } }, - { "outG", { Ib, zAX } }, - /* e8 */ - { "callT", { Jv } }, - { "jmpT", { Jv } }, - { "Jjmp{T|}", { Ap } }, - { "jmp", { Jb } }, - { "inB", { AL, indirDX } }, - { "inG", { zAX, indirDX } }, - { "outB", { indirDX, AL } }, - { "outG", { indirDX, zAX } }, - /* f0 */ - { "(bad)", { XX } }, /* lock prefix */ - { "icebp", { XX } }, - { "(bad)", { XX } }, /* repne */ - { "(bad)", { XX } }, /* repz */ - { "hlt", { XX } }, - { "cmc", { XX } }, - { GRP3b }, - { GRP3S }, - /* f8 */ - { "clc", { XX } }, - { "stc", { XX } }, - { "cli", { XX } }, - { "sti", { XX } }, - { "cld", { XX } }, - { "std", { XX } }, - { GRP4 }, - { GRP5 }, -}; - -static const struct dis386 dis386_twobyte[] = { - /* 00 */ - { GRP6 }, - { GRP7 }, - { "larS", { Gv, Ew } }, - { "lslS", { Gv, Ew } }, - { "(bad)", { XX } }, - { "syscall", { XX } }, - { "clts", { XX } }, - { "sysretP", { XX } }, - /* 08 */ - { "invd", { XX } }, - { "wbinvd", { XX } }, - { "(bad)", { XX } }, - { "ud2a", { XX } }, - { "(bad)", { XX } }, - { GRPAMD }, - { "femms", { XX } }, - { "", { MX, EM, OPSUF } }, /* See OP_3DNowSuffix. */ - /* 10 */ - { PREGRP8 }, - { PREGRP9 }, - { PREGRP30 }, - { "movlpX", { EXq, XM, { SIMD_Fixup, 'h' } } }, - { "unpcklpX", { XM, EXq } }, - { "unpckhpX", { XM, EXq } }, - { PREGRP31 }, - { "movhpX", { EXq, XM, { SIMD_Fixup, 'l' } } }, - /* 18 */ - { GRP16 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "nopQ", { Ev } }, - /* 20 */ - { "movZ", { Rm, Cm } }, - { "movZ", { Rm, Dm } }, - { "movZ", { Cm, Rm } }, - { "movZ", { Dm, Rm } }, - { "movL", { Rd, Td } }, - { "(bad)", { XX } }, - { "movL", { Td, Rd } }, - { "(bad)", { XX } }, - /* 28 */ - { "movapX", { XM, EXx } }, - { "movapX", { EXx, XM } }, - { PREGRP2 }, - { PREGRP33 }, - { PREGRP4 }, - { PREGRP3 }, - { PREGRP93 }, - { PREGRP94 }, - /* 30 */ - { "wrmsr", { XX } }, - { "rdtsc", { XX } }, - { "rdmsr", { XX } }, - { "rdpmc", { XX } }, - { "sysenter", { XX } }, - { "sysexit", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 38 */ - { THREE_BYTE_0 }, - { "(bad)", { XX } }, - { THREE_BYTE_1 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 40 */ - { "cmovo", { Gv, Ev } }, - { "cmovno", { Gv, Ev } }, - { "cmovb", { Gv, Ev } }, - { "cmovae", { Gv, Ev } }, - { "cmove", { Gv, Ev } }, - { "cmovne", { Gv, Ev } }, - { "cmovbe", { Gv, Ev } }, - { "cmova", { Gv, Ev } }, - /* 48 */ - { "cmovs", { Gv, Ev } }, - { "cmovns", { Gv, Ev } }, - { "cmovp", { Gv, Ev } }, - { "cmovnp", { Gv, Ev } }, - { "cmovl", { Gv, Ev } }, - { "cmovge", { Gv, Ev } }, - { "cmovle", { Gv, Ev } }, - { "cmovg", { Gv, Ev } }, - /* 50 */ - { "movmskpX", { Gdq, XS } }, - { PREGRP13 }, - { PREGRP12 }, - { PREGRP11 }, - { "andpX", { XM, EXx } }, - { "andnpX", { XM, EXx } }, - { "orpX", { XM, EXx } }, - { "xorpX", { XM, EXx } }, - /* 58 */ - { PREGRP0 }, - { PREGRP10 }, - { PREGRP17 }, - { PREGRP16 }, - { PREGRP14 }, - { PREGRP7 }, - { PREGRP5 }, - { PREGRP6 }, - /* 60 */ - { PREGRP95 }, - { PREGRP96 }, - { PREGRP97 }, - { "packsswb", { MX, EM } }, - { "pcmpgtb", { MX, EM } }, - { "pcmpgtw", { MX, EM } }, - { "pcmpgtd", { MX, EM } }, - { "packuswb", { MX, EM } }, - /* 68 */ - { "punpckhbw", { MX, EM } }, - { "punpckhwd", { MX, EM } }, - { "punpckhdq", { MX, EM } }, - { "packssdw", { MX, EM } }, - { PREGRP26 }, - { PREGRP24 }, - { "movd", { MX, Edq } }, - { PREGRP19 }, - /* 70 */ - { PREGRP22 }, - { GRP12 }, - { GRP13 }, - { GRP14 }, - { "pcmpeqb", { MX, EM } }, - { "pcmpeqw", { MX, EM } }, - { "pcmpeqd", { MX, EM } }, - { "emms", { XX } }, - /* 78 */ - { PREGRP34 }, - { PREGRP35 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP28 }, - { PREGRP29 }, - { PREGRP23 }, - { PREGRP20 }, - /* 80 */ - { "joH", { Jv, XX, cond_jump_flag } }, - { "jnoH", { Jv, XX, cond_jump_flag } }, - { "jbH", { Jv, XX, cond_jump_flag } }, - { "jaeH", { Jv, XX, cond_jump_flag } }, - { "jeH", { Jv, XX, cond_jump_flag } }, - { "jneH", { Jv, XX, cond_jump_flag } }, - { "jbeH", { Jv, XX, cond_jump_flag } }, - { "jaH", { Jv, XX, cond_jump_flag } }, - /* 88 */ - { "jsH", { Jv, XX, cond_jump_flag } }, - { "jnsH", { Jv, XX, cond_jump_flag } }, - { "jpH", { Jv, XX, cond_jump_flag } }, - { "jnpH", { Jv, XX, cond_jump_flag } }, - { "jlH", { Jv, XX, cond_jump_flag } }, - { "jgeH", { Jv, XX, cond_jump_flag } }, - { "jleH", { Jv, XX, cond_jump_flag } }, - { "jgH", { Jv, XX, cond_jump_flag } }, - /* 90 */ - { "seto", { Eb } }, - { "setno", { Eb } }, - { "setb", { Eb } }, - { "setae", { Eb } }, - { "sete", { Eb } }, - { "setne", { Eb } }, - { "setbe", { Eb } }, - { "seta", { Eb } }, - /* 98 */ - { "sets", { Eb } }, - { "setns", { Eb } }, - { "setp", { Eb } }, - { "setnp", { Eb } }, - { "setl", { Eb } }, - { "setge", { Eb } }, - { "setle", { Eb } }, - { "setg", { Eb } }, - /* a0 */ - { "pushT", { fs } }, - { "popT", { fs } }, - { "cpuid", { XX } }, - { "btS", { Ev, Gv } }, - { "shldS", { Ev, Gv, Ib } }, - { "shldS", { Ev, Gv, CL } }, - { GRPPADLCK2 }, - { GRPPADLCK1 }, - /* a8 */ - { "pushT", { gs } }, - { "popT", { gs } }, - { "rsm", { XX } }, - { "btsS", { Ev, Gv } }, - { "shrdS", { Ev, Gv, Ib } }, - { "shrdS", { Ev, Gv, CL } }, - { GRP15 }, - { "imulS", { Gv, Ev } }, - /* b0 */ - { "cmpxchgB", { Eb, Gb } }, - { "cmpxchgS", { Ev, Gv } }, - { "lssS", { Gv, Mp } }, - { "btrS", { Ev, Gv } }, - { "lfsS", { Gv, Mp } }, - { "lgsS", { Gv, Mp } }, - { "movz{bR|x|bR|x}", { Gv, Eb } }, - { "movz{wR|x|wR|x}", { Gv, Ew } }, /* yes, there really is movzww ! */ - /* b8 */ - { PREGRP37 }, - { "ud2b", { XX } }, - { GRP8 }, - { "btcS", { Ev, Gv } }, - { PREGRP107 }, - { PREGRP36 }, - { "movs{bR|x|bR|x}", { Gv, Eb } }, - { "movs{wR|x|wR|x}", { Gv, Ew } }, /* yes, there really is movsww ! */ - /* c0 */ - { "xaddB", { Eb, Gb } }, - { "xaddS", { Ev, Gv } }, - { PREGRP1 }, - { "movntiS", { Ev, Gv } }, - { "pinsrw", { MX, Edqw, Ib } }, - { "pextrw", { Gdq, MS, Ib } }, - { "shufpX", { XM, EXx, Ib } }, - { GRP9 }, - /* c8 */ - { "bswap", { RMeAX } }, - { "bswap", { RMeCX } }, - { "bswap", { RMeDX } }, - { "bswap", { RMeBX } }, - { "bswap", { RMeSP } }, - { "bswap", { RMeBP } }, - { "bswap", { RMeSI } }, - { "bswap", { RMeDI } }, - /* d0 */ - { PREGRP27 }, - { "psrlw", { MX, EM } }, - { "psrld", { MX, EM } }, - { "psrlq", { MX, EM } }, - { "paddq", { MX, EM } }, - { "pmullw", { MX, EM } }, - { PREGRP21 }, - { "pmovmskb", { Gdq, MS } }, - /* d8 */ - { "psubusb", { MX, EM } }, - { "psubusw", { MX, EM } }, - { "pminub", { MX, EM } }, - { "pand", { MX, EM } }, - { "paddusb", { MX, EM } }, - { "paddusw", { MX, EM } }, - { "pmaxub", { MX, EM } }, - { "pandn", { MX, EM } }, - /* e0 */ - { "pavgb", { MX, EM } }, - { "psraw", { MX, EM } }, - { "psrad", { MX, EM } }, - { "pavgw", { MX, EM } }, - { "pmulhuw", { MX, EM } }, - { "pmulhw", { MX, EM } }, - { PREGRP15 }, - { PREGRP25 }, - /* e8 */ - { "psubsb", { MX, EM } }, - { "psubsw", { MX, EM } }, - { "pminsw", { MX, EM } }, - { "por", { MX, EM } }, - { "paddsb", { MX, EM } }, - { "paddsw", { MX, EM } }, - { "pmaxsw", { MX, EM } }, - { "pxor", { MX, EM } }, - /* f0 */ - { PREGRP32 }, - { "psllw", { MX, EM } }, - { "pslld", { MX, EM } }, - { "psllq", { MX, EM } }, - { "pmuludq", { MX, EM } }, - { "pmaddwd", { MX, EM } }, - { "psadbw", { MX, EM } }, - { PREGRP18 }, - /* f8 */ - { "psubb", { MX, EM } }, - { "psubw", { MX, EM } }, - { "psubd", { MX, EM } }, - { "psubq", { MX, EM } }, - { "paddb", { MX, EM } }, - { "paddw", { MX, EM } }, - { "paddd", { MX, EM } }, - { "(bad)", { XX } }, -}; - -static const unsigned char onebyte_has_modrm[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */ - /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */ - /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */ - /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */ - /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */ - /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */ - /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */ - /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */ - /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -static const unsigned char twobyte_has_modrm[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */ - /* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1, /* 1f */ - /* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */ - /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */ - /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */ - /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */ - /* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */ - /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */ - /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */ - /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */ - /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */ - /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -static const unsigned char twobyte_uses_DATA_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */ - /* 70 */ 1,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -static const unsigned char twobyte_uses_REPNZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,1,0,0,0,0,0,0,1,1,1,0,1,1,1,1, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 1,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -static const unsigned char twobyte_uses_REPZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, /* 6f */ - /* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0, /* bf */ - /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 38 XX uses DATA prefix. */ -static const unsigned char threebyte_0x38_uses_DATA_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0, /* 0f */ - /* 10 */ 1,0,0,0,1,1,0,1,0,0,0,0,1,1,1,0, /* 1f */ - /* 20 */ 1,1,1,1,1,1,0,0,1,1,1,1,0,0,0,0, /* 2f */ - /* 30 */ 1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1, /* 3f */ - /* 40 */ 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 38 XX uses REPNZ prefix. */ -static const unsigned char threebyte_0x38_uses_REPNZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 38 XX uses REPZ prefix. */ -static const unsigned char threebyte_0x38_uses_REPZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 3a XX uses DATA prefix. */ -static const unsigned char threebyte_0x3a_uses_DATA_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1, /* 0f */ - /* 10 */ 0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 3a XX uses REPNZ prefix. */ -static const unsigned char threebyte_0x3a_uses_REPNZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -/* This is used to determine if opcode 0f 3a XX uses REPZ prefix. */ -static const unsigned char threebyte_0x3a_uses_REPZ_prefix[256] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ------------------------------- */ - /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ - /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ - /* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */ - /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ - /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ - /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ - /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */ - /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */ - /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ - /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ - /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ - /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ - /* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ - /* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */ - /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */ - /* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */ - /* ------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; - -static char obuf[100]; -static char *obufp; -static char scratchbuf[100]; -static unsigned char *start_codep; -static unsigned char *insn_codep; -static unsigned char *codep; -static disassemble_info *the_info; -static struct - { - int mod; - int reg; - int rm; - } -modrm; -static unsigned char need_modrm; - -/* If we are accessing mod/rm/reg without need_modrm set, then the - values are stale. Hitting this abort likely indicates that you - need to update onebyte_has_modrm or twobyte_has_modrm. */ -#define MODRM_CHECK if (!need_modrm) abort () - -static const char * const *names64; -static const char * const *names32; -static const char * const *names16; -static const char * const *names8; -static const char * const *names8rex; -static const char * const *names_seg; -static const char * const *index16; - -static const char * const intel_names64[] = { - "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" -}; -static const char * const intel_names32[] = { - "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", - "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" -}; -static const char * const intel_names16[] = { - "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", - "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" -}; -static const char * const intel_names8[] = { - "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", -}; -static const char * const intel_names8rex[] = { - "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", - "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" -}; -static const char * const intel_names_seg[] = { - "es", "cs", "ss", "ds", "fs", "gs", "?", "?", -}; -static const char * const intel_index16[] = { - "bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx" -}; - -static const char * const att_names64[] = { - "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", - "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" -}; -static const char * const att_names32[] = { - "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", - "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d" -}; -static const char * const att_names16[] = { - "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di", - "%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w" -}; -static const char * const att_names8[] = { - "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh", -}; -static const char * const att_names8rex[] = { - "%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil", - "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b" -}; -static const char * const att_names_seg[] = { - "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?", -}; -static const char * const att_index16[] = { - "%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx" -}; - -static const struct dis386 grps[][8] = { - /* GRP1a */ - { - { "popU", { stackEv } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRP1b */ - { - { "addA", { Eb, Ib } }, - { "orA", { Eb, Ib } }, - { "adcA", { Eb, Ib } }, - { "sbbA", { Eb, Ib } }, - { "andA", { Eb, Ib } }, - { "subA", { Eb, Ib } }, - { "xorA", { Eb, Ib } }, - { "cmpA", { Eb, Ib } }, - }, - /* GRP1S */ - { - { "addQ", { Ev, Iv } }, - { "orQ", { Ev, Iv } }, - { "adcQ", { Ev, Iv } }, - { "sbbQ", { Ev, Iv } }, - { "andQ", { Ev, Iv } }, - { "subQ", { Ev, Iv } }, - { "xorQ", { Ev, Iv } }, - { "cmpQ", { Ev, Iv } }, - }, - /* GRP1Ss */ - { - { "addQ", { Ev, sIb } }, - { "orQ", { Ev, sIb } }, - { "adcQ", { Ev, sIb } }, - { "sbbQ", { Ev, sIb } }, - { "andQ", { Ev, sIb } }, - { "subQ", { Ev, sIb } }, - { "xorQ", { Ev, sIb } }, - { "cmpQ", { Ev, sIb } }, - }, - /* GRP2b */ - { - { "rolA", { Eb, Ib } }, - { "rorA", { Eb, Ib } }, - { "rclA", { Eb, Ib } }, - { "rcrA", { Eb, Ib } }, - { "shlA", { Eb, Ib } }, - { "shrA", { Eb, Ib } }, - { "(bad)", { XX } }, - { "sarA", { Eb, Ib } }, - }, - /* GRP2S */ - { - { "rolQ", { Ev, Ib } }, - { "rorQ", { Ev, Ib } }, - { "rclQ", { Ev, Ib } }, - { "rcrQ", { Ev, Ib } }, - { "shlQ", { Ev, Ib } }, - { "shrQ", { Ev, Ib } }, - { "(bad)", { XX } }, - { "sarQ", { Ev, Ib } }, - }, - /* GRP2b_one */ - { - { "rolA", { Eb, I1 } }, - { "rorA", { Eb, I1 } }, - { "rclA", { Eb, I1 } }, - { "rcrA", { Eb, I1 } }, - { "shlA", { Eb, I1 } }, - { "shrA", { Eb, I1 } }, - { "(bad)", { XX } }, - { "sarA", { Eb, I1 } }, - }, - /* GRP2S_one */ - { - { "rolQ", { Ev, I1 } }, - { "rorQ", { Ev, I1 } }, - { "rclQ", { Ev, I1 } }, - { "rcrQ", { Ev, I1 } }, - { "shlQ", { Ev, I1 } }, - { "shrQ", { Ev, I1 } }, - { "(bad)", { XX } }, - { "sarQ", { Ev, I1 } }, - }, - /* GRP2b_cl */ - { - { "rolA", { Eb, CL } }, - { "rorA", { Eb, CL } }, - { "rclA", { Eb, CL } }, - { "rcrA", { Eb, CL } }, - { "shlA", { Eb, CL } }, - { "shrA", { Eb, CL } }, - { "(bad)", { XX } }, - { "sarA", { Eb, CL } }, - }, - /* GRP2S_cl */ - { - { "rolQ", { Ev, CL } }, - { "rorQ", { Ev, CL } }, - { "rclQ", { Ev, CL } }, - { "rcrQ", { Ev, CL } }, - { "shlQ", { Ev, CL } }, - { "shrQ", { Ev, CL } }, - { "(bad)", { XX } }, - { "sarQ", { Ev, CL } }, - }, - /* GRP3b */ - { - { "testA", { Eb, Ib } }, - { "(bad)", { Eb } }, - { "notA", { Eb } }, - { "negA", { Eb } }, - { "mulA", { Eb } }, /* Don't print the implicit %al register, */ - { "imulA", { Eb } }, /* to distinguish these opcodes from other */ - { "divA", { Eb } }, /* mul/imul opcodes. Do the same for div */ - { "idivA", { Eb } }, /* and idiv for consistency. */ - }, - /* GRP3S */ - { - { "testQ", { Ev, Iv } }, - { "(bad)", { XX } }, - { "notQ", { Ev } }, - { "negQ", { Ev } }, - { "mulQ", { Ev } }, /* Don't print the implicit register. */ - { "imulQ", { Ev } }, - { "divQ", { Ev } }, - { "idivQ", { Ev } }, - }, - /* GRP4 */ - { - { "incA", { Eb } }, - { "decA", { Eb } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRP5 */ - { - { "incQ", { Ev } }, - { "decQ", { Ev } }, - { "callT", { indirEv } }, - { "JcallT", { indirEp } }, - { "jmpT", { indirEv } }, - { "JjmpT", { indirEp } }, - { "pushU", { stackEv } }, - { "(bad)", { XX } }, - }, - /* GRP6 */ - { - { "sldtD", { Sv } }, - { "strD", { Sv } }, - { "lldt", { Ew } }, - { "ltr", { Ew } }, - { "verr", { Ew } }, - { "verw", { Ew } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRP7 */ - { - { "sgdt{Q|IQ||}", { { VMX_Fixup, 0 } } }, - { "sidt{Q|IQ||}", { { PNI_Fixup, 0 } } }, - { "lgdt{Q|Q||}", { M } }, - { "lidt{Q|Q||}", { { SVME_Fixup, 0 } } }, - { "smswD", { Sv } }, - { "(bad)", { XX } }, - { "lmsw", { Ew } }, - { "invlpg", { { INVLPG_Fixup, w_mode } } }, - }, - /* GRP8 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "btQ", { Ev, Ib } }, - { "btsQ", { Ev, Ib } }, - { "btrQ", { Ev, Ib } }, - { "btcQ", { Ev, Ib } }, - }, - /* GRP9 */ - { - { "(bad)", { XX } }, - { "cmpxchg8b", { { CMPXCHG8B_Fixup, q_mode } } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "", { VM } }, /* See OP_VMX. */ - { "vmptrst", { Mq } }, - }, - /* GRP11_C6 */ - { - { "movA", { Eb, Ib } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRP11_C7 */ - { - { "movQ", { Ev, Iv } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRP12 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "psrlw", { MS, Ib } }, - { "(bad)", { XX } }, - { "psraw", { MS, Ib } }, - { "(bad)", { XX } }, - { "psllw", { MS, Ib } }, - { "(bad)", { XX } }, - }, - /* GRP13 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "psrld", { MS, Ib } }, - { "(bad)", { XX } }, - { "psrad", { MS, Ib } }, - { "(bad)", { XX } }, - { "pslld", { MS, Ib } }, - { "(bad)", { XX } }, - }, - /* GRP14 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "psrlq", { MS, Ib } }, - { "psrldq", { MS, Ib } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "psllq", { MS, Ib } }, - { "pslldq", { MS, Ib } }, - }, - /* GRP15 */ - { - { "fxsave", { Ev } }, - { "fxrstor", { Ev } }, - { "ldmxcsr", { Ev } }, - { "stmxcsr", { Ev } }, - { "(bad)", { XX } }, - { "lfence", { { OP_0fae, 0 } } }, - { "mfence", { { OP_0fae, 0 } } }, - { "clflush", { { OP_0fae, 0 } } }, - }, - /* GRP16 */ - { - { "prefetchnta", { Ev } }, - { "prefetcht0", { Ev } }, - { "prefetcht1", { Ev } }, - { "prefetcht2", { Ev } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRPAMD */ - { - { "prefetch", { Eb } }, - { "prefetchw", { Eb } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* GRPPADLCK1 */ - { - { "xstore-rng", { { OP_0f07, 0 } } }, - { "xcrypt-ecb", { { OP_0f07, 0 } } }, - { "xcrypt-cbc", { { OP_0f07, 0 } } }, - { "xcrypt-ctr", { { OP_0f07, 0 } } }, - { "xcrypt-cfb", { { OP_0f07, 0 } } }, - { "xcrypt-ofb", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - }, - /* GRPPADLCK2 */ - { - { "montmul", { { OP_0f07, 0 } } }, - { "xsha1", { { OP_0f07, 0 } } }, - { "xsha256", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - { "(bad)", { { OP_0f07, 0 } } }, - } -}; - -static const struct dis386 prefix_user_table[][4] = { - /* PREGRP0 */ - { - { "addps", { XM, EXx } }, - { "addss", { XM, EXd } }, - { "addpd", { XM, EXx } }, - { "addsd", { XM, EXq } }, - }, - /* PREGRP1 */ - { - { "", { XM, EXx, OPSIMD } }, /* See OP_SIMD_SUFFIX. */ - { "", { XM, EXx, OPSIMD } }, - { "", { XM, EXx, OPSIMD } }, - { "", { XM, EXx, OPSIMD } }, - }, - /* PREGRP2 */ - { - { "cvtpi2ps", { XM, EMC } }, - { "cvtsi2ssY", { XM, Ev } }, - { "cvtpi2pd", { XM, EMC } }, - { "cvtsi2sdY", { XM, Ev } }, - }, - /* PREGRP3 */ - { - { "cvtps2pi", { MXC, EXx } }, - { "cvtss2siY", { Gv, EXx } }, - { "cvtpd2pi", { MXC, EXx } }, - { "cvtsd2siY", { Gv, EXx } }, - }, - /* PREGRP4 */ - { - { "cvttps2pi", { MXC, EXx } }, - { "cvttss2siY", { Gv, EXx } }, - { "cvttpd2pi", { MXC, EXx } }, - { "cvttsd2siY", { Gv, EXx } }, - }, - /* PREGRP5 */ - { - { "divps", { XM, EXx } }, - { "divss", { XM, EXx } }, - { "divpd", { XM, EXx } }, - { "divsd", { XM, EXx } }, - }, - /* PREGRP6 */ - { - { "maxps", { XM, EXx } }, - { "maxss", { XM, EXx } }, - { "maxpd", { XM, EXx } }, - { "maxsd", { XM, EXx } }, - }, - /* PREGRP7 */ - { - { "minps", { XM, EXx } }, - { "minss", { XM, EXx } }, - { "minpd", { XM, EXx } }, - { "minsd", { XM, EXx } }, - }, - /* PREGRP8 */ - { - { "movups", { XM, EXx } }, - { "movss", { XM, EXx } }, - { "movupd", { XM, EXx } }, - { "movsd", { XM, EXx } }, - }, - /* PREGRP9 */ - { - { "movups", { EXx, XM } }, - { "movss", { EXx, XM } }, - { "movupd", { EXx, XM } }, - { "movsd", { EXx, XM } }, - }, - /* PREGRP10 */ - { - { "mulps", { XM, EXx } }, - { "mulss", { XM, EXx } }, - { "mulpd", { XM, EXx } }, - { "mulsd", { XM, EXx } }, - }, - /* PREGRP11 */ - { - { "rcpps", { XM, EXx } }, - { "rcpss", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP12 */ - { - { "rsqrtps",{ XM, EXx } }, - { "rsqrtss",{ XM, EXx } }, - { "(bad)", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP13 */ - { - { "sqrtps", { XM, EXx } }, - { "sqrtss", { XM, EXx } }, - { "sqrtpd", { XM, EXx } }, - { "sqrtsd", { XM, EXx } }, - }, - /* PREGRP14 */ - { - { "subps", { XM, EXx } }, - { "subss", { XM, EXx } }, - { "subpd", { XM, EXx } }, - { "subsd", { XM, EXx } }, - }, - /* PREGRP15 */ - { - { "(bad)", { XM, EXx } }, - { "cvtdq2pd", { XM, EXq } }, - { "cvttpd2dq", { XM, EXx } }, - { "cvtpd2dq", { XM, EXx } }, - }, - /* PREGRP16 */ - { - { "cvtdq2ps", { XM, EXx } }, - { "cvttps2dq", { XM, EXx } }, - { "cvtps2dq", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP17 */ - { - { "cvtps2pd", { XM, EXq } }, - { "cvtss2sd", { XM, EXx } }, - { "cvtpd2ps", { XM, EXx } }, - { "cvtsd2ss", { XM, EXx } }, - }, - /* PREGRP18 */ - { - { "maskmovq", { MX, MS } }, - { "(bad)", { XM, EXx } }, - { "maskmovdqu", { XM, XS } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP19 */ - { - { "movq", { MX, EM } }, - { "movdqu", { XM, EXx } }, - { "movdqa", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP20 */ - { - { "movq", { EM, MX } }, - { "movdqu", { EXx, XM } }, - { "movdqa", { EXx, XM } }, - { "(bad)", { EXx, XM } }, - }, - /* PREGRP21 */ - { - { "(bad)", { EXx, XM } }, - { "movq2dq",{ XM, MS } }, - { "movq", { EXx, XM } }, - { "movdq2q",{ MX, XS } }, - }, - /* PREGRP22 */ - { - { "pshufw", { MX, EM, Ib } }, - { "pshufhw",{ XM, EXx, Ib } }, - { "pshufd", { XM, EXx, Ib } }, - { "pshuflw",{ XM, EXx, Ib } }, - }, - /* PREGRP23 */ - { - { "movd", { Edq, MX } }, - { "movq", { XM, EXx } }, - { "movd", { Edq, XM } }, - { "(bad)", { Ed, XM } }, - }, - /* PREGRP24 */ - { - { "(bad)", { MX, EXx } }, - { "(bad)", { XM, EXx } }, - { "punpckhqdq", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP25 */ - { - { "movntq", { EM, MX } }, - { "(bad)", { EM, XM } }, - { "movntdq",{ EM, XM } }, - { "(bad)", { EM, XM } }, - }, - /* PREGRP26 */ - { - { "(bad)", { MX, EXx } }, - { "(bad)", { XM, EXx } }, - { "punpcklqdq", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - }, - /* PREGRP27 */ - { - { "(bad)", { MX, EXx } }, - { "(bad)", { XM, EXx } }, - { "addsubpd", { XM, EXx } }, - { "addsubps", { XM, EXx } }, - }, - /* PREGRP28 */ - { - { "(bad)", { MX, EXx } }, - { "(bad)", { XM, EXx } }, - { "haddpd", { XM, EXx } }, - { "haddps", { XM, EXx } }, - }, - /* PREGRP29 */ - { - { "(bad)", { MX, EXx } }, - { "(bad)", { XM, EXx } }, - { "hsubpd", { XM, EXx } }, - { "hsubps", { XM, EXx } }, - }, - /* PREGRP30 */ - { - { "movlpX", { XM, EXq, { SIMD_Fixup, 'h' } } }, /* really only 2 operands */ - { "movsldup", { XM, EXx } }, - { "movlpd", { XM, EXq } }, - { "movddup", { XM, EXq } }, - }, - /* PREGRP31 */ - { - { "movhpX", { XM, EXq, { SIMD_Fixup, 'l' } } }, - { "movshdup", { XM, EXx } }, - { "movhpd", { XM, EXq } }, - { "(bad)", { XM, EXq } }, - }, - /* PREGRP32 */ - { - { "(bad)", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - { "(bad)", { XM, EXx } }, - { "lddqu", { XM, M } }, - }, - /* PREGRP33 */ - { - {"movntps", { Ev, XM } }, - {"movntss", { Ev, XM } }, - {"movntpd", { Ev, XM } }, - {"movntsd", { Ev, XM } }, - }, - - /* PREGRP34 */ - { - {"vmread", { Em, Gm } }, - {"(bad)", { XX } }, - {"extrq", { XS, Ib, Ib } }, - {"insertq", { XM, XS, Ib, Ib } }, - }, - - /* PREGRP35 */ - { - {"vmwrite", { Gm, Em } }, - {"(bad)", { XX } }, - {"extrq", { XM, XS } }, - {"insertq", { XM, XS } }, - }, - - /* PREGRP36 */ - { - { "bsrS", { Gv, Ev } }, - { "lzcntS", { Gv, Ev } }, - { "bsrS", { Gv, Ev } }, - { "(bad)", { XX } }, - }, - - /* PREGRP37 */ - { - { "(bad)", { XX } }, - { "popcntS", { Gv, Ev } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - - /* PREGRP38 */ - { - { "xchgS", { { NOP_Fixup1, eAX_reg }, { NOP_Fixup2, eAX_reg } } }, - { "pause", { XX } }, - { "xchgS", { { NOP_Fixup1, eAX_reg }, { NOP_Fixup2, eAX_reg } } }, - { "(bad)", { XX } }, - }, - - /* PREGRP39 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pblendvb", {XM, EXx, XMM0 } }, - { "(bad)", { XX } }, - }, - - /* PREGRP40 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "blendvps", {XM, EXx, XMM0 } }, - { "(bad)", { XX } }, - }, - - /* PREGRP41 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "blendvpd", { XM, EXx, XMM0 } }, - { "(bad)", { XX } }, - }, - - /* PREGRP42 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "ptest", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP43 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxbw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP44 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxbd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP45 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxbq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP46 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxwd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP47 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxwq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP48 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovsxdq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP49 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmuldq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP50 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpeqq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP51 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "movntdqa", { XM, EM } }, - { "(bad)", { XX } }, - }, - - /* PREGRP52 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "packusdw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP53 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxbw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP54 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxbd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP55 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxbq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP56 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxwd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP57 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxwq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP58 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmovzxdq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP59 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pminsb", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP60 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pminsd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP61 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pminuw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP62 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pminud", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP63 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmaxsb", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP64 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmaxsd", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP65 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmaxuw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP66 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmaxud", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP67 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pmulld", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP68 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "phminposuw", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP69 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "roundps", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP70 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "roundpd", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP71 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "roundss", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP72 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "roundsd", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP73 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "blendps", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP74 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "blendpd", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP75 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pblendw", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP76 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pextrb", { Edqb, XM, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP77 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pextrw", { Edqw, XM, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP78 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pextrK", { Edq, XM, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP79 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "extractps", { Edqd, XM, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP80 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pinsrb", { XM, Edqb, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP81 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "insertps", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP82 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pinsrK", { XM, Edq, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP83 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "dpps", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP84 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "dppd", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP85 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "mpsadbw", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP86 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpgtq", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP87 */ - { - { "movbe", { Gv, Ev } }, - { "(bad)", { XX } }, - { "movbe", { Gv, Ev } }, - { "crc32", { Gdq, { CRC32_Fixup, b_mode } } }, - }, - - /* PREGRP88 */ - { - { "movbe", { Ev, Gv } }, - { "(bad)", { XX } }, - { "movbe", { Ev, Gv } }, - { "crc32", { Gdq, { CRC32_Fixup, v_mode } } }, - }, - - /* PREGRP89 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpestrm", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP90 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpestri", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP91 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpistrm", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP92 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pcmpistri", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP93 */ - { - { "ucomiss",{ XM, EXd } }, - { "(bad)", { XX } }, - { "ucomisd",{ XM, EXq } }, - { "(bad)", { XX } }, - }, - - /* PREGRP94 */ - { - { "comiss", { XM, EXd } }, - { "(bad)", { XX } }, - { "comisd", { XM, EXq } }, - { "(bad)", { XX } }, - }, - - /* PREGRP95 */ - { - { "punpcklbw",{ MX, EMd } }, - { "(bad)", { XX } }, - { "punpcklbw",{ MX, EMq } }, - { "(bad)", { XX } }, - }, - - /* PREGRP96 */ - { - { "punpcklwd",{ MX, EMd } }, - { "(bad)", { XX } }, - { "punpcklwd",{ MX, EMq } }, - { "(bad)", { XX } }, - }, - - /* PREGRP97 */ - { - { "punpckldq",{ MX, EMd } }, - { "(bad)", { XX } }, - { "punpckldq",{ MX, EMq } }, - { "(bad)", { XX } }, - }, - - /* PREGRP98 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pclmulqdq", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP99 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aesimc", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP100 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aesenc", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP101 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aesenclast", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP102 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aesdec", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP103 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aesdeclast", { XM, EXx } }, - { "(bad)", { XX } }, - }, - - /* PREGRP104 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "aeskeygenassist", { XM, EXx, Ib } }, - { "(bad)", { XX } }, - }, - - /* PREGRP105 */ - { - { "andnS", { Gv, Bv, Ev } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - - /* PREGRP106 */ - { - { "bextrS", { Gv, Ev, Bv } }, - { "sarxS", { Gv, Ev, Bv } }, - { "shlxS", { Gv, Ev, Bv } }, - { "shrxS", { Gv, Ev, Bv } }, - }, - - /* PREGRP107 */ - { - { "bsfS", { Gv, Ev } }, - { "tzcntS", { Gv, Ev } }, - { "bsfS", { Gv, Ev } }, - { "(bad)", { XX } }, - }, - - /* PREGRP108 */ - { - { "bzhi", { Gv, Ev, Bv } }, - { "pext", { Gv, Bv, Ev } }, - { "(bad)", { XX } }, - { "pdep", { Gv, Bv, Ev } }, - }, - - /* PREGRP109 */ - { - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "rorx", { Gv, Ev, Ib } }, - }, -}; - -static const struct dis386 x86_64_table[][2] = { - { - { "pusha{P|}", { XX } }, - { "(bad)", { XX } }, - }, - { - { "popa{P|}", { XX } }, - { "(bad)", { XX } }, - }, - { - { "bound{S|}", { Gv, Ma } }, - { "(bad)", { XX } }, - }, - { - { "arpl", { Ew, Gw } }, - { "movs{||lq|xd}", { Gv, Ed } }, - }, -}; - -static const struct dis386 three_byte_table[][256] = { - /* THREE_BYTE_0 */ - { - /* 00 */ - { "pshufb", { MX, EM } }, - { "phaddw", { MX, EM } }, - { "phaddd", { MX, EM } }, - { "phaddsw", { MX, EM } }, - { "pmaddubsw", { MX, EM } }, - { "phsubw", { MX, EM } }, - { "phsubd", { MX, EM } }, - { "phsubsw", { MX, EM } }, - /* 08 */ - { "psignb", { MX, EM } }, - { "psignw", { MX, EM } }, - { "psignd", { MX, EM } }, - { "pmulhrsw", { MX, EM } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 10 */ - { PREGRP39 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP40 }, - { PREGRP41 }, - { "(bad)", { XX } }, - { PREGRP42 }, - /* 18 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "pabsb", { MX, EM } }, - { "pabsw", { MX, EM } }, - { "pabsd", { MX, EM } }, - { "(bad)", { XX } }, - /* 20 */ - { PREGRP43 }, - { PREGRP44 }, - { PREGRP45 }, - { PREGRP46 }, - { PREGRP47 }, - { PREGRP48 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 28 */ - { PREGRP49 }, - { PREGRP50 }, - { PREGRP51 }, - { PREGRP52 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 30 */ - { PREGRP53 }, - { PREGRP54 }, - { PREGRP55 }, - { PREGRP56 }, - { PREGRP57 }, - { PREGRP58 }, - { "(bad)", { XX } }, - { PREGRP86 }, - /* 38 */ - { PREGRP59 }, - { PREGRP60 }, - { PREGRP61 }, - { PREGRP62 }, - { PREGRP63 }, - { PREGRP64 }, - { PREGRP65 }, - { PREGRP66 }, - /* 40 */ - { PREGRP67 }, - { PREGRP68 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 48 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 50 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 58 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 60 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 68 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 70 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 78 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 80 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 88 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 90 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 98 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* a0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* a8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* b0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* b8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* c0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* c8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* d0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* d8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP99 }, - { PREGRP100 }, - { PREGRP101 }, - { PREGRP102 }, - { PREGRP103 }, - /* e0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* e8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* f0 */ - { PREGRP87 }, - { PREGRP88 }, - { PREGRP105 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP108 }, - { "(bad)", { XX } }, - { PREGRP106 }, - /* f8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* THREE_BYTE_1 */ - { - /* 00 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 08 */ - { PREGRP69 }, - { PREGRP70 }, - { PREGRP71 }, - { PREGRP72 }, - { PREGRP73 }, - { PREGRP74 }, - { PREGRP75 }, - { "palignr", { MX, EM, Ib } }, - /* 10 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP76 }, - { PREGRP77 }, - { PREGRP78 }, - { PREGRP79 }, - /* 18 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 20 */ - { PREGRP80 }, - { PREGRP81 }, - { PREGRP82 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 28 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 30 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 38 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 40 */ - { PREGRP83 }, - { PREGRP84 }, - { PREGRP85 }, - { "(bad)", { XX } }, - { PREGRP98 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 48 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 50 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 58 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 60 */ - { PREGRP89 }, - { PREGRP90 }, - { PREGRP91 }, - { PREGRP92 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 68 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 70 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 78 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 80 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 88 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 90 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* 98 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* a0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* a8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* b0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* b8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* c0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* c8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* d0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* d8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { PREGRP104 }, - /* e0 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* e8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* f0 */ - { PREGRP109 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - /* f8 */ - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - } -}; - -#define INTERNAL_DISASSEMBLER_ERROR "" - -static void -ckprefix (void) -{ - int newrex; - rex = 0; - prefixes = 0; - used_prefixes = 0; - rex_used = 0; - while (1) - { - fetch_data(the_info, codep + 1); - newrex = 0; - switch (*codep) - { - /* REX prefixes family. */ - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4a: - case 0x4b: - case 0x4c: - case 0x4d: - case 0x4e: - case 0x4f: - if (address_mode == mode_64bit) - newrex = *codep; - else - return; - break; - case 0xf3: - prefixes |= PREFIX_REPZ; - break; - case 0xf2: - prefixes |= PREFIX_REPNZ; - break; - case 0xf0: - prefixes |= PREFIX_LOCK; - break; - case 0x2e: - prefixes |= PREFIX_CS; - break; - case 0x36: - prefixes |= PREFIX_SS; - break; - case 0x3e: - prefixes |= PREFIX_DS; - break; - case 0x26: - prefixes |= PREFIX_ES; - break; - case 0x64: - prefixes |= PREFIX_FS; - break; - case 0x65: - prefixes |= PREFIX_GS; - break; - case 0x66: - prefixes |= PREFIX_DATA; - break; - case 0x67: - prefixes |= PREFIX_ADDR; - break; - case FWAIT_OPCODE: - /* fwait is really an instruction. If there are prefixes - before the fwait, they belong to the fwait, *not* to the - following instruction. */ - if (prefixes || rex) - { - prefixes |= PREFIX_FWAIT; - codep++; - return; - } - prefixes = PREFIX_FWAIT; - break; - default: - return; - } - /* Rex is ignored when followed by another prefix. */ - if (rex) - { - rex_used = rex; - return; - } - rex = newrex; - codep++; - } -} - -static void -ckvexprefix (void) -{ - int op, vex2, vex3, newrex = 0, newpfx = prefixes; - - if (address_mode == mode_16bit) { - return; - } - - fetch_data(the_info, codep + 1); - op = *codep; - - if (op != 0xc4 && op != 0xc5) { - return; - } - - fetch_data(the_info, codep + 2); - vex2 = codep[1]; - - if (address_mode == mode_32bit && (vex2 & 0xc0) != 0xc0) { - return; - } - - if (op == 0xc4) { - /* Three byte VEX prefix. */ - fetch_data(the_info, codep + 3); - vex3 = codep[2]; - - newrex |= (vex2 & 0x80 ? 0 : REX_R); - newrex |= (vex2 & 0x40 ? 0 : REX_X); - newrex |= (vex2 & 0x20 ? 0 : REX_B); - newrex |= (vex3 & 0x80 ? REX_W : 0); - switch (vex2 & 0x1f) { /* VEX.m-mmmm */ - case 1: - newpfx |= PREFIX_VEX_0F; - break; - case 2: - newpfx |= PREFIX_VEX_0F | PREFIX_VEX_0F38; - break; - case 3: - newpfx |= PREFIX_VEX_0F | PREFIX_VEX_0F3A; - break; - } - vex2 = vex3; - codep += 3; - } else { - /* Two byte VEX prefix. */ - newrex |= (vex2 & 0x80 ? 0 : REX_R); - newpfx |= PREFIX_VEX_0F; - codep += 2; - } - - vex_reg = (~vex2 >> 3) & 15; /* VEX.vvvv */ - switch (vex2 & 3) { /* VEX.pp */ - case 1: - newpfx |= PREFIX_DATA; /* 0x66 */ - break; - case 2: - newpfx |= PREFIX_REPZ; /* 0xf3 */ - break; - case 3: - newpfx |= PREFIX_REPNZ; /* 0xf2 */ - break; - } - - rex = newrex; - prefixes = newpfx; -} - -/* Return the name of the prefix byte PREF, or NULL if PREF is not a - prefix byte. */ - -static const char * -prefix_name (int pref, int sizeflag) -{ - static const char * const rexes [16] = - { - "rex", /* 0x40 */ - "rex.B", /* 0x41 */ - "rex.X", /* 0x42 */ - "rex.XB", /* 0x43 */ - "rex.R", /* 0x44 */ - "rex.RB", /* 0x45 */ - "rex.RX", /* 0x46 */ - "rex.RXB", /* 0x47 */ - "rex.W", /* 0x48 */ - "rex.WB", /* 0x49 */ - "rex.WX", /* 0x4a */ - "rex.WXB", /* 0x4b */ - "rex.WR", /* 0x4c */ - "rex.WRB", /* 0x4d */ - "rex.WRX", /* 0x4e */ - "rex.WRXB", /* 0x4f */ - }; - - switch (pref) - { - /* REX prefixes family. */ - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4a: - case 0x4b: - case 0x4c: - case 0x4d: - case 0x4e: - case 0x4f: - return rexes [pref - 0x40]; - case 0xf3: - return "repz"; - case 0xf2: - return "repnz"; - case 0xf0: - return "lock"; - case 0x2e: - return "cs"; - case 0x36: - return "ss"; - case 0x3e: - return "ds"; - case 0x26: - return "es"; - case 0x64: - return "fs"; - case 0x65: - return "gs"; - case 0x66: - return (sizeflag & DFLAG) ? "data16" : "data32"; - case 0x67: - if (address_mode == mode_64bit) - return (sizeflag & AFLAG) ? "addr32" : "addr64"; - else - return (sizeflag & AFLAG) ? "addr16" : "addr32"; - case FWAIT_OPCODE: - return "fwait"; - default: - return NULL; - } -} - -static char op_out[MAX_OPERANDS][100]; -static int op_ad, op_index[MAX_OPERANDS]; -static int two_source_ops; -static bfd_vma op_address[MAX_OPERANDS]; -static bfd_vma op_riprel[MAX_OPERANDS]; -static bfd_vma start_pc; - -/* - * On the 386's of 1988, the maximum length of an instruction is 15 bytes. - * (see topic "Redundant prefixes" in the "Differences from 8086" - * section of the "Virtual 8086 Mode" chapter.) - * 'pc' should be the address of this instruction, it will - * be used to print the target address if this is a relative jump or call - * The function returns the length of this instruction in bytes. - */ - -static char intel_syntax; -static char open_char; -static char close_char; -static char separator_char; -static char scale_char; - -int -print_insn_i386 (bfd_vma pc, disassemble_info *info) -{ - intel_syntax = -1; - - return print_insn (pc, info); -} - -static int -print_insn (bfd_vma pc, disassemble_info *info) -{ - const struct dis386 *dp; - int i; - char *op_txt[MAX_OPERANDS]; - int needcomma; - unsigned char uses_DATA_prefix, uses_LOCK_prefix; - unsigned char uses_REPNZ_prefix, uses_REPZ_prefix; - int sizeflag; - const char *p; - struct dis_private priv; - unsigned char op; - unsigned char threebyte; - - if (info->mach == bfd_mach_x86_64_intel_syntax - || info->mach == bfd_mach_x86_64) - address_mode = mode_64bit; - else - address_mode = mode_32bit; - - if (intel_syntax == (char) -1) - intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax - || info->mach == bfd_mach_x86_64_intel_syntax); - - if (info->mach == bfd_mach_i386_i386 - || info->mach == bfd_mach_x86_64 - || info->mach == bfd_mach_i386_i386_intel_syntax - || info->mach == bfd_mach_x86_64_intel_syntax) - priv.orig_sizeflag = AFLAG | DFLAG; - else if (info->mach == bfd_mach_i386_i8086) - priv.orig_sizeflag = 0; - else - abort (); - - for (p = info->disassembler_options; p != NULL; ) - { - if (strncmp (p, "x86-64", 6) == 0) - { - address_mode = mode_64bit; - priv.orig_sizeflag = AFLAG | DFLAG; - } - else if (strncmp (p, "i386", 4) == 0) - { - address_mode = mode_32bit; - priv.orig_sizeflag = AFLAG | DFLAG; - } - else if (strncmp (p, "i8086", 5) == 0) - { - address_mode = mode_16bit; - priv.orig_sizeflag = 0; - } - else if (strncmp (p, "intel", 5) == 0) - { - intel_syntax = 1; - } - else if (strncmp (p, "att", 3) == 0) - { - intel_syntax = 0; - } - else if (strncmp (p, "addr", 4) == 0) - { - if (address_mode == mode_64bit) - { - if (p[4] == '3' && p[5] == '2') - priv.orig_sizeflag &= ~AFLAG; - else if (p[4] == '6' && p[5] == '4') - priv.orig_sizeflag |= AFLAG; - } - else - { - if (p[4] == '1' && p[5] == '6') - priv.orig_sizeflag &= ~AFLAG; - else if (p[4] == '3' && p[5] == '2') - priv.orig_sizeflag |= AFLAG; - } - } - else if (strncmp (p, "data", 4) == 0) - { - if (p[4] == '1' && p[5] == '6') - priv.orig_sizeflag &= ~DFLAG; - else if (p[4] == '3' && p[5] == '2') - priv.orig_sizeflag |= DFLAG; - } - else if (strncmp (p, "suffix", 6) == 0) - priv.orig_sizeflag |= SUFFIX_ALWAYS; - - p = strchr (p, ','); - if (p != NULL) - p++; - } - - if (intel_syntax) - { - names64 = intel_names64; - names32 = intel_names32; - names16 = intel_names16; - names8 = intel_names8; - names8rex = intel_names8rex; - names_seg = intel_names_seg; - index16 = intel_index16; - open_char = '['; - close_char = ']'; - separator_char = '+'; - scale_char = '*'; - } - else - { - names64 = att_names64; - names32 = att_names32; - names16 = att_names16; - names8 = att_names8; - names8rex = att_names8rex; - names_seg = att_names_seg; - index16 = att_index16; - open_char = '('; - close_char = ')'; - separator_char = ','; - scale_char = ','; - } - - /* The output looks better if we put 7 bytes on a line, since that - puts most long word instructions on a single line. */ - info->bytes_per_line = 7; - - info->private_data = &priv; - priv.max_fetched = priv.the_buffer; - priv.insn_start = pc; - - obuf[0] = 0; - for (i = 0; i < MAX_OPERANDS; ++i) - { - op_out[i][0] = 0; - op_index[i] = -1; - } - - the_info = info; - start_pc = pc; - start_codep = priv.the_buffer; - codep = priv.the_buffer; - - if (sigsetjmp(priv.bailout, 0) != 0) - { - const char *name; - - /* Getting here means we tried for data but didn't get it. That - means we have an incomplete instruction of some sort. Just - print the first byte as a prefix or a .byte pseudo-op. */ - if (codep > priv.the_buffer) - { - name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); - if (name != NULL) - (*info->fprintf_func) (info->stream, "%s", name); - else - { - /* Just print the first byte as a .byte instruction. */ - (*info->fprintf_func) (info->stream, ".byte 0x%x", - (unsigned int) priv.the_buffer[0]); - } - - return 1; - } - - return -1; - } - - obufp = obuf; - ckprefix (); - ckvexprefix (); - - insn_codep = codep; - sizeflag = priv.orig_sizeflag; - - fetch_data(info, codep + 1); - two_source_ops = (*codep == 0x62) || (*codep == 0xc8); - - if (((prefixes & PREFIX_FWAIT) - && ((*codep < 0xd8) || (*codep > 0xdf))) - || (rex && rex_used)) - { - const char *name; - - /* fwait not followed by floating point instruction, or rex followed - by other prefixes. Print the first prefix. */ - name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); - if (name == NULL) - name = INTERNAL_DISASSEMBLER_ERROR; - (*info->fprintf_func) (info->stream, "%s", name); - return 1; - } - - op = 0; - if (prefixes & PREFIX_VEX_0F) - { - used_prefixes |= PREFIX_VEX_0F | PREFIX_VEX_0F38 | PREFIX_VEX_0F3A; - if (prefixes & PREFIX_VEX_0F38) - threebyte = 0x38; - else if (prefixes & PREFIX_VEX_0F3A) - threebyte = 0x3a; - else - threebyte = *codep++; - goto vex_opcode; - } - if (*codep == 0x0f) - { - fetch_data(info, codep + 2); - threebyte = codep[1]; - codep += 2; - vex_opcode: - dp = &dis386_twobyte[threebyte]; - need_modrm = twobyte_has_modrm[threebyte]; - uses_DATA_prefix = twobyte_uses_DATA_prefix[threebyte]; - uses_REPNZ_prefix = twobyte_uses_REPNZ_prefix[threebyte]; - uses_REPZ_prefix = twobyte_uses_REPZ_prefix[threebyte]; - uses_LOCK_prefix = (threebyte & ~0x02) == 0x20; - if (dp->name == NULL && dp->op[0].bytemode == IS_3BYTE_OPCODE) - { - fetch_data(info, codep + 2); - op = *codep++; - switch (threebyte) - { - case 0x38: - uses_DATA_prefix = threebyte_0x38_uses_DATA_prefix[op]; - uses_REPNZ_prefix = threebyte_0x38_uses_REPNZ_prefix[op]; - uses_REPZ_prefix = threebyte_0x38_uses_REPZ_prefix[op]; - break; - case 0x3a: - uses_DATA_prefix = threebyte_0x3a_uses_DATA_prefix[op]; - uses_REPNZ_prefix = threebyte_0x3a_uses_REPNZ_prefix[op]; - uses_REPZ_prefix = threebyte_0x3a_uses_REPZ_prefix[op]; - break; - default: - break; - } - } - } - else - { - dp = &dis386[*codep]; - need_modrm = onebyte_has_modrm[*codep]; - uses_DATA_prefix = 0; - uses_REPNZ_prefix = 0; - /* pause is 0xf3 0x90. */ - uses_REPZ_prefix = *codep == 0x90; - uses_LOCK_prefix = 0; - codep++; - } - - if (!uses_REPZ_prefix && (prefixes & PREFIX_REPZ)) - { - oappend ("repz "); - used_prefixes |= PREFIX_REPZ; - } - if (!uses_REPNZ_prefix && (prefixes & PREFIX_REPNZ)) - { - oappend ("repnz "); - used_prefixes |= PREFIX_REPNZ; - } - - if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK)) - { - oappend ("lock "); - used_prefixes |= PREFIX_LOCK; - } - - if (prefixes & PREFIX_ADDR) - { - sizeflag ^= AFLAG; - if (dp->op[2].bytemode != loop_jcxz_mode || intel_syntax) - { - if ((sizeflag & AFLAG) || address_mode == mode_64bit) - oappend ("addr32 "); - else - oappend ("addr16 "); - used_prefixes |= PREFIX_ADDR; - } - } - - if (!uses_DATA_prefix && (prefixes & PREFIX_DATA)) - { - sizeflag ^= DFLAG; - if (dp->op[2].bytemode == cond_jump_mode - && dp->op[0].bytemode == v_mode - && !intel_syntax) - { - if (sizeflag & DFLAG) - oappend ("data32 "); - else - oappend ("data16 "); - used_prefixes |= PREFIX_DATA; - } - } - - if (dp->name == NULL && dp->op[0].bytemode == IS_3BYTE_OPCODE) - { - dp = &three_byte_table[dp->op[1].bytemode][op]; - modrm.mod = (*codep >> 6) & 3; - modrm.reg = (*codep >> 3) & 7; - modrm.rm = *codep & 7; - } - else if (need_modrm) - { - fetch_data(info, codep + 1); - modrm.mod = (*codep >> 6) & 3; - modrm.reg = (*codep >> 3) & 7; - modrm.rm = *codep & 7; - } - - if (dp->name == NULL && dp->op[0].bytemode == FLOATCODE) - { - dofloat (sizeflag); - } - else - { - int index; - if (dp->name == NULL) - { - switch (dp->op[0].bytemode) - { - case USE_GROUPS: - dp = &grps[dp->op[1].bytemode][modrm.reg]; - break; - - case USE_PREFIX_USER_TABLE: - index = 0; - used_prefixes |= (prefixes & PREFIX_REPZ); - if (prefixes & PREFIX_REPZ) - index = 1; - else - { - /* We should check PREFIX_REPNZ and PREFIX_REPZ - before PREFIX_DATA. */ - used_prefixes |= (prefixes & PREFIX_REPNZ); - if (prefixes & PREFIX_REPNZ) - index = 3; - else - { - used_prefixes |= (prefixes & PREFIX_DATA); - if (prefixes & PREFIX_DATA) - index = 2; - } - } - dp = &prefix_user_table[dp->op[1].bytemode][index]; - break; - - case X86_64_SPECIAL: - index = address_mode == mode_64bit ? 1 : 0; - dp = &x86_64_table[dp->op[1].bytemode][index]; - break; - - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - break; - } - } - - if (dp->name != NULL && putop (dp->name, sizeflag) == 0) - { - for (i = 0; i < MAX_OPERANDS; ++i) - { - obufp = op_out[i]; - op_ad = MAX_OPERANDS - 1 - i; - if (dp->op[i].rtn) - (*dp->op[i].rtn) (dp->op[i].bytemode, sizeflag); - } - } - } - - /* See if any prefixes were not used. If so, print the first one - separately. If we don't do this, we'll wind up printing an - instruction stream which does not precisely correspond to the - bytes we are disassembling. */ - if ((prefixes & ~used_prefixes) != 0) - { - const char *name; - - name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); - if (name == NULL) - name = INTERNAL_DISASSEMBLER_ERROR; - (*info->fprintf_func) (info->stream, "%s", name); - return 1; - } - if (rex & ~rex_used) - { - const char *name; - name = prefix_name (rex | 0x40, priv.orig_sizeflag); - if (name == NULL) - name = INTERNAL_DISASSEMBLER_ERROR; - (*info->fprintf_func) (info->stream, "%s ", name); - } - - obufp = obuf + strlen (obuf); - for (i = strlen (obuf); i < 6; i++) - oappend (" "); - oappend (" "); - (*info->fprintf_func) (info->stream, "%s", obuf); - - /* The enter and bound instructions are printed with operands in the same - order as the intel book; everything else is printed in reverse order. */ - if (intel_syntax || two_source_ops) - { - bfd_vma riprel; - - for (i = 0; i < MAX_OPERANDS; ++i) - op_txt[i] = op_out[i]; - - for (i = 0; i < (MAX_OPERANDS >> 1); ++i) - { - op_ad = op_index[i]; - op_index[i] = op_index[MAX_OPERANDS - 1 - i]; - op_index[MAX_OPERANDS - 1 - i] = op_ad; - riprel = op_riprel[i]; - op_riprel[i] = op_riprel [MAX_OPERANDS - 1 - i]; - op_riprel[MAX_OPERANDS - 1 - i] = riprel; - } - } - else - { - for (i = 0; i < MAX_OPERANDS; ++i) - op_txt[MAX_OPERANDS - 1 - i] = op_out[i]; - } - - needcomma = 0; - for (i = 0; i < MAX_OPERANDS; ++i) - if (*op_txt[i]) - { - if (needcomma) - (*info->fprintf_func) (info->stream, ","); - if (op_index[i] != -1 && !op_riprel[i]) - (*info->print_address_func) ((bfd_vma) op_address[op_index[i]], info); - else - (*info->fprintf_func) (info->stream, "%s", op_txt[i]); - needcomma = 1; - } - - for (i = 0; i < MAX_OPERANDS; i++) - if (op_index[i] != -1 && op_riprel[i]) - { - (*info->fprintf_func) (info->stream, " # "); - (*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep - + op_address[op_index[i]]), info); - break; - } - return codep - priv.the_buffer; -} - -static const char *float_mem[] = { - /* d8 */ - "fadd{s||s|}", - "fmul{s||s|}", - "fcom{s||s|}", - "fcomp{s||s|}", - "fsub{s||s|}", - "fsubr{s||s|}", - "fdiv{s||s|}", - "fdivr{s||s|}", - /* d9 */ - "fld{s||s|}", - "(bad)", - "fst{s||s|}", - "fstp{s||s|}", - "fldenvIC", - "fldcw", - "fNstenvIC", - "fNstcw", - /* da */ - "fiadd{l||l|}", - "fimul{l||l|}", - "ficom{l||l|}", - "ficomp{l||l|}", - "fisub{l||l|}", - "fisubr{l||l|}", - "fidiv{l||l|}", - "fidivr{l||l|}", - /* db */ - "fild{l||l|}", - "fisttp{l||l|}", - "fist{l||l|}", - "fistp{l||l|}", - "(bad)", - "fld{t||t|}", - "(bad)", - "fstp{t||t|}", - /* dc */ - "fadd{l||l|}", - "fmul{l||l|}", - "fcom{l||l|}", - "fcomp{l||l|}", - "fsub{l||l|}", - "fsubr{l||l|}", - "fdiv{l||l|}", - "fdivr{l||l|}", - /* dd */ - "fld{l||l|}", - "fisttp{ll||ll|}", - "fst{l||l|}", - "fstp{l||l|}", - "frstorIC", - "(bad)", - "fNsaveIC", - "fNstsw", - /* de */ - "fiadd", - "fimul", - "ficom", - "ficomp", - "fisub", - "fisubr", - "fidiv", - "fidivr", - /* df */ - "fild", - "fisttp", - "fist", - "fistp", - "fbld", - "fild{ll||ll|}", - "fbstp", - "fistp{ll||ll|}", -}; - -static const unsigned char float_mem_mode[] = { - /* d8 */ - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - /* d9 */ - d_mode, - 0, - d_mode, - d_mode, - 0, - w_mode, - 0, - w_mode, - /* da */ - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - d_mode, - /* db */ - d_mode, - d_mode, - d_mode, - d_mode, - 0, - t_mode, - 0, - t_mode, - /* dc */ - q_mode, - q_mode, - q_mode, - q_mode, - q_mode, - q_mode, - q_mode, - q_mode, - /* dd */ - q_mode, - q_mode, - q_mode, - q_mode, - 0, - 0, - 0, - w_mode, - /* de */ - w_mode, - w_mode, - w_mode, - w_mode, - w_mode, - w_mode, - w_mode, - w_mode, - /* df */ - w_mode, - w_mode, - w_mode, - w_mode, - t_mode, - q_mode, - t_mode, - q_mode -}; - -#define ST { OP_ST, 0 } -#define STi { OP_STi, 0 } - -#define FGRPd9_2 NULL, { { NULL, 0 } } -#define FGRPd9_4 NULL, { { NULL, 1 } } -#define FGRPd9_5 NULL, { { NULL, 2 } } -#define FGRPd9_6 NULL, { { NULL, 3 } } -#define FGRPd9_7 NULL, { { NULL, 4 } } -#define FGRPda_5 NULL, { { NULL, 5 } } -#define FGRPdb_4 NULL, { { NULL, 6 } } -#define FGRPde_3 NULL, { { NULL, 7 } } -#define FGRPdf_4 NULL, { { NULL, 8 } } - -static const struct dis386 float_reg[][8] = { - /* d8 */ - { - { "fadd", { ST, STi } }, - { "fmul", { ST, STi } }, - { "fcom", { STi } }, - { "fcomp", { STi } }, - { "fsub", { ST, STi } }, - { "fsubr", { ST, STi } }, - { "fdiv", { ST, STi } }, - { "fdivr", { ST, STi } }, - }, - /* d9 */ - { - { "fld", { STi } }, - { "fxch", { STi } }, - { FGRPd9_2 }, - { "(bad)", { XX } }, - { FGRPd9_4 }, - { FGRPd9_5 }, - { FGRPd9_6 }, - { FGRPd9_7 }, - }, - /* da */ - { - { "fcmovb", { ST, STi } }, - { "fcmove", { ST, STi } }, - { "fcmovbe",{ ST, STi } }, - { "fcmovu", { ST, STi } }, - { "(bad)", { XX } }, - { FGRPda_5 }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* db */ - { - { "fcmovnb",{ ST, STi } }, - { "fcmovne",{ ST, STi } }, - { "fcmovnbe",{ ST, STi } }, - { "fcmovnu",{ ST, STi } }, - { FGRPdb_4 }, - { "fucomi", { ST, STi } }, - { "fcomi", { ST, STi } }, - { "(bad)", { XX } }, - }, - /* dc */ - { - { "fadd", { STi, ST } }, - { "fmul", { STi, ST } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, -#if SYSV386_COMPAT - { "fsub", { STi, ST } }, - { "fsubr", { STi, ST } }, - { "fdiv", { STi, ST } }, - { "fdivr", { STi, ST } }, -#else - { "fsubr", { STi, ST } }, - { "fsub", { STi, ST } }, - { "fdivr", { STi, ST } }, - { "fdiv", { STi, ST } }, -#endif - }, - /* dd */ - { - { "ffree", { STi } }, - { "(bad)", { XX } }, - { "fst", { STi } }, - { "fstp", { STi } }, - { "fucom", { STi } }, - { "fucomp", { STi } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - }, - /* de */ - { - { "faddp", { STi, ST } }, - { "fmulp", { STi, ST } }, - { "(bad)", { XX } }, - { FGRPde_3 }, -#if SYSV386_COMPAT - { "fsubp", { STi, ST } }, - { "fsubrp", { STi, ST } }, - { "fdivp", { STi, ST } }, - { "fdivrp", { STi, ST } }, -#else - { "fsubrp", { STi, ST } }, - { "fsubp", { STi, ST } }, - { "fdivrp", { STi, ST } }, - { "fdivp", { STi, ST } }, -#endif - }, - /* df */ - { - { "ffreep", { STi } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { "(bad)", { XX } }, - { FGRPdf_4 }, - { "fucomip", { ST, STi } }, - { "fcomip", { ST, STi } }, - { "(bad)", { XX } }, - }, -}; - -static const char *fgrps[][8] = { - /* d9_2 0 */ - { - "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", - }, - - /* d9_4 1 */ - { - "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)", - }, - - /* d9_5 2 */ - { - "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)", - }, - - /* d9_6 3 */ - { - "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp", - }, - - /* d9_7 4 */ - { - "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos", - }, - - /* da_5 5 */ - { - "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", - }, - - /* db_4 6 */ - { - "feni(287 only)","fdisi(287 only)","fNclex","fNinit", - "fNsetpm(287 only)","(bad)","(bad)","(bad)", - }, - - /* de_3 7 */ - { - "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", - }, - - /* df_4 8 */ - { - "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", - }, -}; - -static void -dofloat (int sizeflag) -{ - const struct dis386 *dp; - unsigned char floatop; - - floatop = codep[-1]; - - if (modrm.mod != 3) - { - int fp_indx = (floatop - 0xd8) * 8 + modrm.reg; - - putop (float_mem[fp_indx], sizeflag); - obufp = op_out[0]; - op_ad = 2; - OP_E (float_mem_mode[fp_indx], sizeflag); - return; - } - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - - dp = &float_reg[floatop - 0xd8][modrm.reg]; - if (dp->name == NULL) - { - putop (fgrps[dp->op[0].bytemode][modrm.rm], sizeflag); - - /* Instruction fnstsw is only one with strange arg. */ - if (floatop == 0xdf && codep[-1] == 0xe0) - pstrcpy (op_out[0], sizeof(op_out[0]), names16[0]); - } - else - { - putop (dp->name, sizeflag); - - obufp = op_out[0]; - op_ad = 2; - if (dp->op[0].rtn) - (*dp->op[0].rtn) (dp->op[0].bytemode, sizeflag); - - obufp = op_out[1]; - op_ad = 1; - if (dp->op[1].rtn) - (*dp->op[1].rtn) (dp->op[1].bytemode, sizeflag); - } -} - -static void -OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - oappend ("%st" + intel_syntax); -} - -static void -OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - snprintf (scratchbuf, sizeof(scratchbuf), "%%st(%d)", modrm.rm); - oappend (scratchbuf + intel_syntax); -} - -/* Capital letters in template are macros. */ -static int -putop (const char *template, int sizeflag) -{ - const char *p; - int alt = 0; - - for (p = template; *p; p++) - { - switch (*p) - { - default: - *obufp++ = *p; - break; - case '{': - alt = 0; - if (intel_syntax) - alt += 1; - if (address_mode == mode_64bit) - alt += 2; - while (alt != 0) - { - while (*++p != '|') - { - if (*p == '}') - { - /* Alternative not valid. */ - pstrcpy (obuf, sizeof(obuf), "(bad)"); - obufp = obuf + 5; - return 1; - } - else if (*p == '\0') - abort (); - } - alt--; - } - /* Fall through. */ - case 'I': - alt = 1; - continue; - case '|': - while (*++p != '}') - { - if (*p == '\0') - abort (); - } - break; - case '}': - break; - case 'A': - if (intel_syntax) - break; - if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS)) - *obufp++ = 'b'; - break; - case 'B': - if (intel_syntax) - break; - if (sizeflag & SUFFIX_ALWAYS) - *obufp++ = 'b'; - break; - case 'C': - if (intel_syntax && !alt) - break; - if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS)) - { - if (sizeflag & DFLAG) - *obufp++ = intel_syntax ? 'd' : 'l'; - else - *obufp++ = intel_syntax ? 'w' : 's'; - used_prefixes |= (prefixes & PREFIX_DATA); - } - break; - case 'D': - if (intel_syntax || !(sizeflag & SUFFIX_ALWAYS)) - break; - USED_REX (REX_W); - if (modrm.mod == 3) - { - if (rex & REX_W) - *obufp++ = 'q'; - else if (sizeflag & DFLAG) - *obufp++ = intel_syntax ? 'd' : 'l'; - else - *obufp++ = 'w'; - used_prefixes |= (prefixes & PREFIX_DATA); - } - else - *obufp++ = 'w'; - break; - case 'E': /* For jcxz/jecxz */ - if (address_mode == mode_64bit) - { - if (sizeflag & AFLAG) - *obufp++ = 'r'; - else - *obufp++ = 'e'; - } - else - if (sizeflag & AFLAG) - *obufp++ = 'e'; - used_prefixes |= (prefixes & PREFIX_ADDR); - break; - case 'F': - if (intel_syntax) - break; - if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS)) - { - if (sizeflag & AFLAG) - *obufp++ = address_mode == mode_64bit ? 'q' : 'l'; - else - *obufp++ = address_mode == mode_64bit ? 'l' : 'w'; - used_prefixes |= (prefixes & PREFIX_ADDR); - } - break; - case 'G': - if (intel_syntax || (obufp[-1] != 's' && !(sizeflag & SUFFIX_ALWAYS))) - break; - if ((rex & REX_W) || (sizeflag & DFLAG)) - *obufp++ = 'l'; - else - *obufp++ = 'w'; - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case 'H': - if (intel_syntax) - break; - if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS - || (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS) - { - used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS); - *obufp++ = ','; - *obufp++ = 'p'; - if (prefixes & PREFIX_DS) - *obufp++ = 't'; - else - *obufp++ = 'n'; - } - break; - case 'J': - if (intel_syntax) - break; - *obufp++ = 'l'; - break; - case 'K': - USED_REX (REX_W); - if (rex & REX_W) - *obufp++ = 'q'; - else - *obufp++ = 'd'; - break; - case 'Z': - if (intel_syntax) - break; - if (address_mode == mode_64bit && (sizeflag & SUFFIX_ALWAYS)) - { - *obufp++ = 'q'; - break; - } - /* Fall through. */ - case 'L': - if (intel_syntax) - break; - if (sizeflag & SUFFIX_ALWAYS) - *obufp++ = 'l'; - break; - case 'N': - if ((prefixes & PREFIX_FWAIT) == 0) - *obufp++ = 'n'; - else - used_prefixes |= PREFIX_FWAIT; - break; - case 'O': - USED_REX (REX_W); - if (rex & REX_W) - *obufp++ = 'o'; - else if (intel_syntax && (sizeflag & DFLAG)) - *obufp++ = 'q'; - else - *obufp++ = 'd'; - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case 'T': - if (intel_syntax) - break; - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - *obufp++ = 'q'; - break; - } - /* Fall through. */ - case 'P': - if (intel_syntax) - break; - if ((prefixes & PREFIX_DATA) - || (rex & REX_W) - || (sizeflag & SUFFIX_ALWAYS)) - { - USED_REX (REX_W); - if (rex & REX_W) - *obufp++ = 'q'; - else - { - if (sizeflag & DFLAG) - *obufp++ = 'l'; - else - *obufp++ = 'w'; - } - used_prefixes |= (prefixes & PREFIX_DATA); - } - break; - case 'U': - if (intel_syntax) - break; - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS)) - *obufp++ = 'q'; - break; - } - /* Fall through. */ - case 'Q': - if (intel_syntax && !alt) - break; - USED_REX (REX_W); - if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS)) - { - if (rex & REX_W) - *obufp++ = 'q'; - else - { - if (sizeflag & DFLAG) - *obufp++ = intel_syntax ? 'd' : 'l'; - else - *obufp++ = 'w'; - } - used_prefixes |= (prefixes & PREFIX_DATA); - } - break; - case 'R': - USED_REX (REX_W); - if (rex & REX_W) - *obufp++ = 'q'; - else if (sizeflag & DFLAG) - { - if (intel_syntax) - *obufp++ = 'd'; - else - *obufp++ = 'l'; - } - else - *obufp++ = 'w'; - if (intel_syntax && !p[1] - && ((rex & REX_W) || (sizeflag & DFLAG))) - *obufp++ = 'e'; - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case 'V': - if (intel_syntax) - break; - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - if (sizeflag & SUFFIX_ALWAYS) - *obufp++ = 'q'; - break; - } - /* Fall through. */ - case 'S': - if (intel_syntax) - break; - if (sizeflag & SUFFIX_ALWAYS) - { - if (rex & REX_W) - *obufp++ = 'q'; - else - { - if (sizeflag & DFLAG) - *obufp++ = 'l'; - else - *obufp++ = 'w'; - used_prefixes |= (prefixes & PREFIX_DATA); - } - } - break; - case 'X': - if (prefixes & PREFIX_DATA) - *obufp++ = 'd'; - else - *obufp++ = 's'; - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case 'Y': - if (intel_syntax) - break; - if (rex & REX_W) - { - USED_REX (REX_W); - *obufp++ = 'q'; - } - break; - /* implicit operand size 'l' for i386 or 'q' for x86-64 */ - case 'W': - /* operand size flag for cwtl, cbtw */ - USED_REX (REX_W); - if (rex & REX_W) - { - if (intel_syntax) - *obufp++ = 'd'; - else - *obufp++ = 'l'; - } - else if (sizeflag & DFLAG) - *obufp++ = 'w'; - else - *obufp++ = 'b'; - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - } - alt = 0; - } - *obufp = 0; - return 0; -} - -static void -oappend (const char *s) -{ - strcpy (obufp, s); - obufp += strlen (s); -} - -static void -append_seg (void) -{ - if (prefixes & PREFIX_CS) - { - used_prefixes |= PREFIX_CS; - oappend ("%cs:" + intel_syntax); - } - if (prefixes & PREFIX_DS) - { - used_prefixes |= PREFIX_DS; - oappend ("%ds:" + intel_syntax); - } - if (prefixes & PREFIX_SS) - { - used_prefixes |= PREFIX_SS; - oappend ("%ss:" + intel_syntax); - } - if (prefixes & PREFIX_ES) - { - used_prefixes |= PREFIX_ES; - oappend ("%es:" + intel_syntax); - } - if (prefixes & PREFIX_FS) - { - used_prefixes |= PREFIX_FS; - oappend ("%fs:" + intel_syntax); - } - if (prefixes & PREFIX_GS) - { - used_prefixes |= PREFIX_GS; - oappend ("%gs:" + intel_syntax); - } -} - -static void -OP_indirE (int bytemode, int sizeflag) -{ - if (!intel_syntax) - oappend ("*"); - OP_E (bytemode, sizeflag); -} - -static void -print_operand_value (char *buf, size_t bufsize, int hex, bfd_vma disp) -{ - if (address_mode == mode_64bit) - { - if (hex) - { - char tmp[30]; - int i; - buf[0] = '0'; - buf[1] = 'x'; - snprintf_vma (tmp, sizeof(tmp), disp); - for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++) { - } - pstrcpy (buf + 2, bufsize - 2, tmp + i); - } - else - { - bfd_signed_vma v = disp; - char tmp[30]; - int i; - if (v < 0) - { - *(buf++) = '-'; - v = -disp; - /* Check for possible overflow on 0x8000000000000000. */ - if (v < 0) - { - pstrcpy (buf, bufsize, "9223372036854775808"); - return; - } - } - if (!v) - { - pstrcpy (buf, bufsize, "0"); - return; - } - - i = 0; - tmp[29] = 0; - while (v) - { - tmp[28 - i] = (v % 10) + '0'; - v /= 10; - i++; - } - pstrcpy (buf, bufsize, tmp + 29 - i); - } - } - else - { - if (hex) - snprintf (buf, bufsize, "0x%x", (unsigned int) disp); - else - snprintf (buf, bufsize, "%d", (int) disp); - } -} - -/* Put DISP in BUF as signed hex number. */ - -static void -print_displacement (char *buf, bfd_vma disp) -{ - bfd_signed_vma val = disp; - char tmp[30]; - int i, j = 0; - - if (val < 0) - { - buf[j++] = '-'; - val = -disp; - - /* Check for possible overflow. */ - if (val < 0) - { - switch (address_mode) - { - case mode_64bit: - strcpy (buf + j, "0x8000000000000000"); - break; - case mode_32bit: - strcpy (buf + j, "0x80000000"); - break; - case mode_16bit: - strcpy (buf + j, "0x8000"); - break; - } - return; - } - } - - buf[j++] = '0'; - buf[j++] = 'x'; - - snprintf_vma (tmp, sizeof(tmp), val); - for (i = 0; tmp[i] == '0'; i++) - continue; - if (tmp[i] == '\0') - i--; - strcpy (buf + j, tmp + i); -} - -static void -intel_operand_size (int bytemode, int sizeflag) -{ - switch (bytemode) - { - case b_mode: - case dqb_mode: - oappend ("BYTE PTR "); - break; - case w_mode: - case dqw_mode: - oappend ("WORD PTR "); - break; - case stack_v_mode: - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - oappend ("QWORD PTR "); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - } - /* FALLTHRU */ - case v_mode: - case dq_mode: - USED_REX (REX_W); - if (rex & REX_W) - oappend ("QWORD PTR "); - else if ((sizeflag & DFLAG) || bytemode == dq_mode) - oappend ("DWORD PTR "); - else - oappend ("WORD PTR "); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case z_mode: - if ((rex & REX_W) || (sizeflag & DFLAG)) - *obufp++ = 'D'; - oappend ("WORD PTR "); - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case d_mode: - case dqd_mode: - oappend ("DWORD PTR "); - break; - case q_mode: - oappend ("QWORD PTR "); - break; - case m_mode: - if (address_mode == mode_64bit) - oappend ("QWORD PTR "); - else - oappend ("DWORD PTR "); - break; - case f_mode: - if (sizeflag & DFLAG) - oappend ("FWORD PTR "); - else - oappend ("DWORD PTR "); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case t_mode: - oappend ("TBYTE PTR "); - break; - case x_mode: - oappend ("XMMWORD PTR "); - break; - case o_mode: - oappend ("OWORD PTR "); - break; - default: - break; - } -} - -static void -OP_E (int bytemode, int sizeflag) -{ - bfd_vma disp; - int add = 0; - int riprel = 0; - USED_REX (REX_B); - if (rex & REX_B) - add += 8; - - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - - if (modrm.mod == 3) - { - switch (bytemode) - { - case b_mode: - USED_REX (0); - if (rex) - oappend (names8rex[modrm.rm + add]); - else - oappend (names8[modrm.rm + add]); - break; - case w_mode: - oappend (names16[modrm.rm + add]); - break; - case d_mode: - oappend (names32[modrm.rm + add]); - break; - case q_mode: - oappend (names64[modrm.rm + add]); - break; - case m_mode: - if (address_mode == mode_64bit) - oappend (names64[modrm.rm + add]); - else - oappend (names32[modrm.rm + add]); - break; - case stack_v_mode: - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - oappend (names64[modrm.rm + add]); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - } - bytemode = v_mode; - /* FALLTHRU */ - case v_mode: - case dq_mode: - case dqb_mode: - case dqd_mode: - case dqw_mode: - USED_REX (REX_W); - if (rex & REX_W) - oappend (names64[modrm.rm + add]); - else if ((sizeflag & DFLAG) || bytemode != v_mode) - oappend (names32[modrm.rm + add]); - else - oappend (names16[modrm.rm + add]); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case 0: - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - break; - } - return; - } - - disp = 0; - if (intel_syntax) - intel_operand_size (bytemode, sizeflag); - append_seg (); - - if ((sizeflag & AFLAG) || address_mode == mode_64bit) - { - /* 32/64 bit address mode */ - int havedisp; - int havesib; - int havebase; - int base; - int index = 0; - int scale = 0; - - havesib = 0; - havebase = 1; - base = modrm.rm; - - if (base == 4) - { - havesib = 1; - fetch_data(the_info, codep + 1); - index = (*codep >> 3) & 7; - if (address_mode == mode_64bit || index != 0x4) - /* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */ - scale = (*codep >> 6) & 3; - base = *codep & 7; - USED_REX (REX_X); - if (rex & REX_X) - index += 8; - codep++; - } - base += add; - - switch (modrm.mod) - { - case 0: - if ((base & 7) == 5) - { - havebase = 0; - if (address_mode == mode_64bit && !havesib) - riprel = 1; - disp = get32s (); - } - break; - case 1: - fetch_data (the_info, codep + 1); - disp = *codep++; - if ((disp & 0x80) != 0) - disp -= 0x100; - break; - case 2: - disp = get32s (); - break; - } - - havedisp = havebase || (havesib && (index != 4 || scale != 0)); - - if (!intel_syntax) - if (modrm.mod != 0 || (base & 7) == 5) - { - if (havedisp || riprel) - print_displacement (scratchbuf, disp); - else - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp); - oappend (scratchbuf); - if (riprel) - { - set_op (disp, 1); - oappend ("(%rip)"); - } - } - - if (havedisp || (intel_syntax && riprel)) - { - *obufp++ = open_char; - if (intel_syntax && riprel) - { - set_op (disp, 1); - oappend ("rip"); - } - *obufp = '\0'; - if (havebase) - oappend (address_mode == mode_64bit && (sizeflag & AFLAG) - ? names64[base] : names32[base]); - if (havesib) - { - if (index != 4) - { - if (!intel_syntax || havebase) - { - *obufp++ = separator_char; - *obufp = '\0'; - } - oappend (address_mode == mode_64bit && (sizeflag & AFLAG) - ? names64[index] : names32[index]); - } - if (scale != 0 || (!intel_syntax && index != 4)) - { - *obufp++ = scale_char; - *obufp = '\0'; - snprintf (scratchbuf, sizeof(scratchbuf), "%d", 1 << scale); - oappend (scratchbuf); - } - } - if (intel_syntax - && (disp || modrm.mod != 0 || (base & 7) == 5)) - { - if ((bfd_signed_vma) disp >= 0) - { - *obufp++ = '+'; - *obufp = '\0'; - } - else if (modrm.mod != 1) - { - *obufp++ = '-'; - *obufp = '\0'; - disp = - (bfd_signed_vma) disp; - } - - print_displacement (scratchbuf, disp); - oappend (scratchbuf); - } - - *obufp++ = close_char; - *obufp = '\0'; - } - else if (intel_syntax) - { - if (modrm.mod != 0 || (base & 7) == 5) - { - if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS - | PREFIX_ES | PREFIX_FS | PREFIX_GS)) - ; - else - { - oappend (names_seg[ds_reg - es_reg]); - oappend (":"); - } - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp); - oappend (scratchbuf); - } - } - } - else - { /* 16 bit address mode */ - switch (modrm.mod) - { - case 0: - if (modrm.rm == 6) - { - disp = get16 (); - if ((disp & 0x8000) != 0) - disp -= 0x10000; - } - break; - case 1: - fetch_data(the_info, codep + 1); - disp = *codep++; - if ((disp & 0x80) != 0) - disp -= 0x100; - break; - case 2: - disp = get16 (); - if ((disp & 0x8000) != 0) - disp -= 0x10000; - break; - } - - if (!intel_syntax) - if (modrm.mod != 0 || modrm.rm == 6) - { - print_displacement (scratchbuf, disp); - oappend (scratchbuf); - } - - if (modrm.mod != 0 || modrm.rm != 6) - { - *obufp++ = open_char; - *obufp = '\0'; - oappend (index16[modrm.rm]); - if (intel_syntax - && (disp || modrm.mod != 0 || modrm.rm == 6)) - { - if ((bfd_signed_vma) disp >= 0) - { - *obufp++ = '+'; - *obufp = '\0'; - } - else if (modrm.mod != 1) - { - *obufp++ = '-'; - *obufp = '\0'; - disp = - (bfd_signed_vma) disp; - } - - print_displacement (scratchbuf, disp); - oappend (scratchbuf); - } - - *obufp++ = close_char; - *obufp = '\0'; - } - else if (intel_syntax) - { - if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS - | PREFIX_ES | PREFIX_FS | PREFIX_GS)) - ; - else - { - oappend (names_seg[ds_reg - es_reg]); - oappend (":"); - } - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, - disp & 0xffff); - oappend (scratchbuf); - } - } -} - -static void -OP_G (int bytemode, int sizeflag) -{ - int add = 0; - USED_REX (REX_R); - if (rex & REX_R) - add += 8; - switch (bytemode) - { - case b_mode: - USED_REX (0); - if (rex) - oappend (names8rex[modrm.reg + add]); - else - oappend (names8[modrm.reg + add]); - break; - case w_mode: - oappend (names16[modrm.reg + add]); - break; - case d_mode: - oappend (names32[modrm.reg + add]); - break; - case q_mode: - oappend (names64[modrm.reg + add]); - break; - case v_mode: - case dq_mode: - case dqb_mode: - case dqd_mode: - case dqw_mode: - USED_REX (REX_W); - if (rex & REX_W) - oappend (names64[modrm.reg + add]); - else if ((sizeflag & DFLAG) || bytemode != v_mode) - oappend (names32[modrm.reg + add]); - else - oappend (names16[modrm.reg + add]); - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case m_mode: - if (address_mode == mode_64bit) - oappend (names64[modrm.reg + add]); - else - oappend (names32[modrm.reg + add]); - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - break; - } -} - -static void -OP_vvvv (int bytemode, int sizeflags) -{ - USED_REX (REX_W); - if (rex & REX_W) { - oappend(names64[vex_reg]); - } else { - oappend(names32[vex_reg]); - } -} - -static bfd_vma -get64 (void) -{ - bfd_vma x; -#ifdef BFD64 - unsigned int a; - unsigned int b; - - fetch_data(the_info, codep + 8); - a = *codep++ & 0xff; - a |= (*codep++ & 0xff) << 8; - a |= (*codep++ & 0xff) << 16; - a |= (*codep++ & 0xff) << 24; - b = *codep++ & 0xff; - b |= (*codep++ & 0xff) << 8; - b |= (*codep++ & 0xff) << 16; - b |= (*codep++ & 0xff) << 24; - x = a + ((bfd_vma) b << 32); -#else - abort (); - x = 0; -#endif - return x; -} - -static bfd_signed_vma -get32 (void) -{ - bfd_signed_vma x = 0; - - fetch_data(the_info, codep + 4); - x = *codep++ & (bfd_signed_vma) 0xff; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 8; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 16; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 24; - return x; -} - -static bfd_signed_vma -get32s (void) -{ - bfd_signed_vma x = 0; - - fetch_data(the_info, codep + 4); - x = *codep++ & (bfd_signed_vma) 0xff; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 8; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 16; - x |= (*codep++ & (bfd_signed_vma) 0xff) << 24; - - x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31); - - return x; -} - -static int -get16 (void) -{ - int x = 0; - - fetch_data(the_info, codep + 2); - x = *codep++ & 0xff; - x |= (*codep++ & 0xff) << 8; - return x; -} - -static void -set_op (bfd_vma op, int riprel) -{ - op_index[op_ad] = op_ad; - if (address_mode == mode_64bit) - { - op_address[op_ad] = op; - op_riprel[op_ad] = riprel; - } - else - { - /* Mask to get a 32-bit address. */ - op_address[op_ad] = op & 0xffffffff; - op_riprel[op_ad] = riprel & 0xffffffff; - } -} - -static void -OP_REG (int code, int sizeflag) -{ - const char *s; - int add = 0; - USED_REX (REX_B); - if (rex & REX_B) - add = 8; - - switch (code) - { - case ax_reg: case cx_reg: case dx_reg: case bx_reg: - case sp_reg: case bp_reg: case si_reg: case di_reg: - s = names16[code - ax_reg + add]; - break; - case es_reg: case ss_reg: case cs_reg: - case ds_reg: case fs_reg: case gs_reg: - s = names_seg[code - es_reg + add]; - break; - case al_reg: case ah_reg: case cl_reg: case ch_reg: - case dl_reg: case dh_reg: case bl_reg: case bh_reg: - USED_REX (0); - if (rex) - s = names8rex[code - al_reg + add]; - else - s = names8[code - al_reg]; - break; - case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg: - case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg: - if (address_mode == mode_64bit && (sizeflag & DFLAG)) - { - s = names64[code - rAX_reg + add]; - break; - } - code += eAX_reg - rAX_reg; - /* Fall through. */ - case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg: - case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg: - USED_REX (REX_W); - if (rex & REX_W) - s = names64[code - eAX_reg + add]; - else if (sizeflag & DFLAG) - s = names32[code - eAX_reg + add]; - else - s = names16[code - eAX_reg + add]; - used_prefixes |= (prefixes & PREFIX_DATA); - break; - default: - s = INTERNAL_DISASSEMBLER_ERROR; - break; - } - oappend (s); -} - -static void -OP_IMREG (int code, int sizeflag) -{ - const char *s; - - switch (code) - { - case indir_dx_reg: - if (intel_syntax) - s = "dx"; - else - s = "(%dx)"; - break; - case ax_reg: case cx_reg: case dx_reg: case bx_reg: - case sp_reg: case bp_reg: case si_reg: case di_reg: - s = names16[code - ax_reg]; - break; - case es_reg: case ss_reg: case cs_reg: - case ds_reg: case fs_reg: case gs_reg: - s = names_seg[code - es_reg]; - break; - case al_reg: case ah_reg: case cl_reg: case ch_reg: - case dl_reg: case dh_reg: case bl_reg: case bh_reg: - USED_REX (0); - if (rex) - s = names8rex[code - al_reg]; - else - s = names8[code - al_reg]; - break; - case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg: - case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg: - USED_REX (REX_W); - if (rex & REX_W) - s = names64[code - eAX_reg]; - else if (sizeflag & DFLAG) - s = names32[code - eAX_reg]; - else - s = names16[code - eAX_reg]; - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case z_mode_ax_reg: - if ((rex & REX_W) || (sizeflag & DFLAG)) - s = *names32; - else - s = *names16; - if (!(rex & REX_W)) - used_prefixes |= (prefixes & PREFIX_DATA); - break; - default: - s = INTERNAL_DISASSEMBLER_ERROR; - break; - } - oappend (s); -} - -static void -OP_I (int bytemode, int sizeflag) -{ - bfd_signed_vma op; - bfd_signed_vma mask = -1; - - switch (bytemode) - { - case b_mode: - fetch_data(the_info, codep + 1); - op = *codep++; - mask = 0xff; - break; - case q_mode: - if (address_mode == mode_64bit) - { - op = get32s (); - break; - } - /* Fall through. */ - case v_mode: - USED_REX (REX_W); - if (rex & REX_W) - op = get32s (); - else if (sizeflag & DFLAG) - { - op = get32 (); - mask = 0xffffffff; - } - else - { - op = get16 (); - mask = 0xfffff; - } - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case w_mode: - mask = 0xfffff; - op = get16 (); - break; - case const_1_mode: - if (intel_syntax) - oappend ("1"); - return; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - return; - } - - op &= mask; - scratchbuf[0] = '$'; - print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op); - oappend (scratchbuf + intel_syntax); - scratchbuf[0] = '\0'; -} - -static void -OP_I64 (int bytemode, int sizeflag) -{ - bfd_signed_vma op; - bfd_signed_vma mask = -1; - - if (address_mode != mode_64bit) - { - OP_I (bytemode, sizeflag); - return; - } - - switch (bytemode) - { - case b_mode: - fetch_data(the_info, codep + 1); - op = *codep++; - mask = 0xff; - break; - case v_mode: - USED_REX (REX_W); - if (rex & REX_W) - op = get64 (); - else if (sizeflag & DFLAG) - { - op = get32 (); - mask = 0xffffffff; - } - else - { - op = get16 (); - mask = 0xfffff; - } - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case w_mode: - mask = 0xfffff; - op = get16 (); - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - return; - } - - op &= mask; - scratchbuf[0] = '$'; - print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op); - oappend (scratchbuf + intel_syntax); - scratchbuf[0] = '\0'; -} - -static void -OP_sI (int bytemode, int sizeflag) -{ - bfd_signed_vma op; - - switch (bytemode) - { - case b_mode: - fetch_data(the_info, codep + 1); - op = *codep++; - if ((op & 0x80) != 0) - op -= 0x100; - break; - case v_mode: - USED_REX (REX_W); - if (rex & REX_W) - op = get32s (); - else if (sizeflag & DFLAG) - { - op = get32s (); - } - else - { - op = get16 (); - if ((op & 0x8000) != 0) - op -= 0x10000; - } - used_prefixes |= (prefixes & PREFIX_DATA); - break; - case w_mode: - op = get16 (); - if ((op & 0x8000) != 0) - op -= 0x10000; - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - return; - } - - scratchbuf[0] = '$'; - print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_J (int bytemode, int sizeflag) -{ - bfd_vma disp; - bfd_vma mask = -1; - bfd_vma segment = 0; - - switch (bytemode) - { - case b_mode: - fetch_data(the_info, codep + 1); - disp = *codep++; - if ((disp & 0x80) != 0) - disp -= 0x100; - break; - case v_mode: - if ((sizeflag & DFLAG) || (rex & REX_W)) - disp = get32s (); - else - { - disp = get16 (); - if ((disp & 0x8000) != 0) - disp -= 0x10000; - /* In 16bit mode, address is wrapped around at 64k within - the same segment. Otherwise, a data16 prefix on a jump - instruction means that the pc is masked to 16 bits after - the displacement is added! */ - mask = 0xffff; - if ((prefixes & PREFIX_DATA) == 0) - segment = ((start_pc + codep - start_codep) - & ~((bfd_vma) 0xffff)); - } - used_prefixes |= (prefixes & PREFIX_DATA); - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - return; - } - disp = ((start_pc + codep - start_codep + disp) & mask) | segment; - set_op (disp, 0); - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp); - oappend (scratchbuf); -} - -static void -OP_SEG (int bytemode, int sizeflag) -{ - if (bytemode == w_mode) - oappend (names_seg[modrm.reg]); - else - OP_E (modrm.mod == 3 ? bytemode : w_mode, sizeflag); -} - -static void -OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag) -{ - int seg, offset; - - if (sizeflag & DFLAG) - { - offset = get32 (); - seg = get16 (); - } - else - { - offset = get16 (); - seg = get16 (); - } - used_prefixes |= (prefixes & PREFIX_DATA); - if (intel_syntax) - snprintf (scratchbuf, sizeof(scratchbuf), "0x%x:0x%x", seg, offset); - else - snprintf (scratchbuf, sizeof(scratchbuf), "$0x%x,$0x%x", seg, offset); - oappend (scratchbuf); -} - -static void -OP_OFF (int bytemode, int sizeflag) -{ - bfd_vma off; - - if (intel_syntax && (sizeflag & SUFFIX_ALWAYS)) - intel_operand_size (bytemode, sizeflag); - append_seg (); - - if ((sizeflag & AFLAG) || address_mode == mode_64bit) - off = get32 (); - else - off = get16 (); - - if (intel_syntax) - { - if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS - | PREFIX_ES | PREFIX_FS | PREFIX_GS))) - { - oappend (names_seg[ds_reg - es_reg]); - oappend (":"); - } - } - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, off); - oappend (scratchbuf); -} - -static void -OP_OFF64 (int bytemode, int sizeflag) -{ - bfd_vma off; - - if (address_mode != mode_64bit - || (prefixes & PREFIX_ADDR)) - { - OP_OFF (bytemode, sizeflag); - return; - } - - if (intel_syntax && (sizeflag & SUFFIX_ALWAYS)) - intel_operand_size (bytemode, sizeflag); - append_seg (); - - off = get64 (); - - if (intel_syntax) - { - if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS - | PREFIX_ES | PREFIX_FS | PREFIX_GS))) - { - oappend (names_seg[ds_reg - es_reg]); - oappend (":"); - } - } - print_operand_value (scratchbuf, sizeof(scratchbuf), 1, off); - oappend (scratchbuf); -} - -static void -ptr_reg (int code, int sizeflag) -{ - const char *s; - - *obufp++ = open_char; - used_prefixes |= (prefixes & PREFIX_ADDR); - if (address_mode == mode_64bit) - { - if (!(sizeflag & AFLAG)) - s = names32[code - eAX_reg]; - else - s = names64[code - eAX_reg]; - } - else if (sizeflag & AFLAG) - s = names32[code - eAX_reg]; - else - s = names16[code - eAX_reg]; - oappend (s); - *obufp++ = close_char; - *obufp = 0; -} - -static void -OP_ESreg (int code, int sizeflag) -{ - if (intel_syntax) - { - switch (codep[-1]) - { - case 0x6d: /* insw/insl */ - intel_operand_size (z_mode, sizeflag); - break; - case 0xa5: /* movsw/movsl/movsq */ - case 0xa7: /* cmpsw/cmpsl/cmpsq */ - case 0xab: /* stosw/stosl */ - case 0xaf: /* scasw/scasl */ - intel_operand_size (v_mode, sizeflag); - break; - default: - intel_operand_size (b_mode, sizeflag); - } - } - oappend ("%es:" + intel_syntax); - ptr_reg (code, sizeflag); -} - -static void -OP_DSreg (int code, int sizeflag) -{ - if (intel_syntax) - { - switch (codep[-1]) - { - case 0x6f: /* outsw/outsl */ - intel_operand_size (z_mode, sizeflag); - break; - case 0xa5: /* movsw/movsl/movsq */ - case 0xa7: /* cmpsw/cmpsl/cmpsq */ - case 0xad: /* lodsw/lodsl/lodsq */ - intel_operand_size (v_mode, sizeflag); - break; - default: - intel_operand_size (b_mode, sizeflag); - } - } - if ((prefixes - & (PREFIX_CS - | PREFIX_DS - | PREFIX_SS - | PREFIX_ES - | PREFIX_FS - | PREFIX_GS)) == 0) - prefixes |= PREFIX_DS; - append_seg (); - ptr_reg (code, sizeflag); -} - -static void -OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - int add = 0; - if (rex & REX_R) - { - USED_REX (REX_R); - add = 8; - } - else if (address_mode != mode_64bit && (prefixes & PREFIX_LOCK)) - { - used_prefixes |= PREFIX_LOCK; - add = 8; - } - snprintf (scratchbuf, sizeof(scratchbuf), "%%cr%d", modrm.reg + add); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - int add = 0; - USED_REX (REX_R); - if (rex & REX_R) - add = 8; - if (intel_syntax) - snprintf (scratchbuf, sizeof(scratchbuf), "db%d", modrm.reg + add); - else - snprintf (scratchbuf, sizeof(scratchbuf), "%%db%d", modrm.reg + add); - oappend (scratchbuf); -} - -static void -OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - snprintf (scratchbuf, sizeof(scratchbuf), "%%tr%d", modrm.reg); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_R (int bytemode, int sizeflag) -{ - if (modrm.mod == 3) - OP_E (bytemode, sizeflag); - else - BadOp (); -} - -static void -OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - used_prefixes |= (prefixes & PREFIX_DATA); - if (prefixes & PREFIX_DATA) - { - int add = 0; - USED_REX (REX_R); - if (rex & REX_R) - add = 8; - snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.reg + add); - } - else - snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.reg); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - int add = 0; - USED_REX (REX_R); - if (rex & REX_R) - add = 8; - snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.reg + add); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_EM (int bytemode, int sizeflag) -{ - if (modrm.mod != 3) - { - if (intel_syntax && bytemode == v_mode) - { - bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode; - used_prefixes |= (prefixes & PREFIX_DATA); - } - OP_E (bytemode, sizeflag); - return; - } - - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - used_prefixes |= (prefixes & PREFIX_DATA); - if (prefixes & PREFIX_DATA) - { - int add = 0; - - USED_REX (REX_B); - if (rex & REX_B) - add = 8; - snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.rm + add); - } - else - snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.rm); - oappend (scratchbuf + intel_syntax); -} - -/* cvt* are the only instructions in sse2 which have - both SSE and MMX operands and also have 0x66 prefix - in their opcode. 0x66 was originally used to differentiate - between SSE and MMX instruction(operands). So we have to handle the - cvt* separately using OP_EMC and OP_MXC */ -static void -OP_EMC (int bytemode, int sizeflag) -{ - if (modrm.mod != 3) - { - if (intel_syntax && bytemode == v_mode) - { - bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode; - used_prefixes |= (prefixes & PREFIX_DATA); - } - OP_E (bytemode, sizeflag); - return; - } - - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - used_prefixes |= (prefixes & PREFIX_DATA); - snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.rm); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_MXC (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - used_prefixes |= (prefixes & PREFIX_DATA); - snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.reg); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_EX (int bytemode, int sizeflag) -{ - int add = 0; - if (modrm.mod != 3) - { - OP_E (bytemode, sizeflag); - return; - } - USED_REX (REX_B); - if (rex & REX_B) - add = 8; - - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.rm + add); - oappend (scratchbuf + intel_syntax); -} - -static void -OP_MS (int bytemode, int sizeflag) -{ - if (modrm.mod == 3) - OP_EM (bytemode, sizeflag); - else - BadOp (); -} - -static void -OP_XS (int bytemode, int sizeflag) -{ - if (modrm.mod == 3) - OP_EX (bytemode, sizeflag); - else - BadOp (); -} - -static void -OP_M (int bytemode, int sizeflag) -{ - if (modrm.mod == 3) - /* bad bound,lea,lds,les,lfs,lgs,lss,cmpxchg8b,vmptrst modrm */ - BadOp (); - else - OP_E (bytemode, sizeflag); -} - -static void -OP_0f07 (int bytemode, int sizeflag) -{ - if (modrm.mod != 3 || modrm.rm != 0) - BadOp (); - else - OP_E (bytemode, sizeflag); -} - -static void -OP_0fae (int bytemode, int sizeflag) -{ - if (modrm.mod == 3) - { - if (modrm.reg == 7) - strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence"); - - if (modrm.reg < 5 || modrm.rm != 0) - { - BadOp (); /* bad sfence, mfence, or lfence */ - return; - } - } - else if (modrm.reg != 7) - { - BadOp (); /* bad clflush */ - return; - } - - OP_E (bytemode, sizeflag); -} - -/* NOP is an alias of "xchg %ax,%ax" in 16bit mode, "xchg %eax,%eax" in - 32bit mode and "xchg %rax,%rax" in 64bit mode. */ - -static void -NOP_Fixup1 (int bytemode, int sizeflag) -{ - if ((prefixes & PREFIX_DATA) != 0 - || (rex != 0 - && rex != 0x48 - && address_mode == mode_64bit)) - OP_REG (bytemode, sizeflag); - else - strcpy (obuf, "nop"); -} - -static void -NOP_Fixup2 (int bytemode, int sizeflag) -{ - if ((prefixes & PREFIX_DATA) != 0 - || (rex != 0 - && rex != 0x48 - && address_mode == mode_64bit)) - OP_IMREG (bytemode, sizeflag); -} - -static const char *Suffix3DNow[] = { -/* 00 */ NULL, NULL, NULL, NULL, -/* 04 */ NULL, NULL, NULL, NULL, -/* 08 */ NULL, NULL, NULL, NULL, -/* 0C */ "pi2fw", "pi2fd", NULL, NULL, -/* 10 */ NULL, NULL, NULL, NULL, -/* 14 */ NULL, NULL, NULL, NULL, -/* 18 */ NULL, NULL, NULL, NULL, -/* 1C */ "pf2iw", "pf2id", NULL, NULL, -/* 20 */ NULL, NULL, NULL, NULL, -/* 24 */ NULL, NULL, NULL, NULL, -/* 28 */ NULL, NULL, NULL, NULL, -/* 2C */ NULL, NULL, NULL, NULL, -/* 30 */ NULL, NULL, NULL, NULL, -/* 34 */ NULL, NULL, NULL, NULL, -/* 38 */ NULL, NULL, NULL, NULL, -/* 3C */ NULL, NULL, NULL, NULL, -/* 40 */ NULL, NULL, NULL, NULL, -/* 44 */ NULL, NULL, NULL, NULL, -/* 48 */ NULL, NULL, NULL, NULL, -/* 4C */ NULL, NULL, NULL, NULL, -/* 50 */ NULL, NULL, NULL, NULL, -/* 54 */ NULL, NULL, NULL, NULL, -/* 58 */ NULL, NULL, NULL, NULL, -/* 5C */ NULL, NULL, NULL, NULL, -/* 60 */ NULL, NULL, NULL, NULL, -/* 64 */ NULL, NULL, NULL, NULL, -/* 68 */ NULL, NULL, NULL, NULL, -/* 6C */ NULL, NULL, NULL, NULL, -/* 70 */ NULL, NULL, NULL, NULL, -/* 74 */ NULL, NULL, NULL, NULL, -/* 78 */ NULL, NULL, NULL, NULL, -/* 7C */ NULL, NULL, NULL, NULL, -/* 80 */ NULL, NULL, NULL, NULL, -/* 84 */ NULL, NULL, NULL, NULL, -/* 88 */ NULL, NULL, "pfnacc", NULL, -/* 8C */ NULL, NULL, "pfpnacc", NULL, -/* 90 */ "pfcmpge", NULL, NULL, NULL, -/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt", -/* 98 */ NULL, NULL, "pfsub", NULL, -/* 9C */ NULL, NULL, "pfadd", NULL, -/* A0 */ "pfcmpgt", NULL, NULL, NULL, -/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1", -/* A8 */ NULL, NULL, "pfsubr", NULL, -/* AC */ NULL, NULL, "pfacc", NULL, -/* B0 */ "pfcmpeq", NULL, NULL, NULL, -/* B4 */ "pfmul", NULL, "pfrcpit2", "pmulhrw", -/* B8 */ NULL, NULL, NULL, "pswapd", -/* BC */ NULL, NULL, NULL, "pavgusb", -/* C0 */ NULL, NULL, NULL, NULL, -/* C4 */ NULL, NULL, NULL, NULL, -/* C8 */ NULL, NULL, NULL, NULL, -/* CC */ NULL, NULL, NULL, NULL, -/* D0 */ NULL, NULL, NULL, NULL, -/* D4 */ NULL, NULL, NULL, NULL, -/* D8 */ NULL, NULL, NULL, NULL, -/* DC */ NULL, NULL, NULL, NULL, -/* E0 */ NULL, NULL, NULL, NULL, -/* E4 */ NULL, NULL, NULL, NULL, -/* E8 */ NULL, NULL, NULL, NULL, -/* EC */ NULL, NULL, NULL, NULL, -/* F0 */ NULL, NULL, NULL, NULL, -/* F4 */ NULL, NULL, NULL, NULL, -/* F8 */ NULL, NULL, NULL, NULL, -/* FC */ NULL, NULL, NULL, NULL, -}; - -static void -OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - const char *mnemonic; - - fetch_data(the_info, codep + 1); - /* AMD 3DNow! instructions are specified by an opcode suffix in the - place where an 8-bit immediate would normally go. ie. the last - byte of the instruction. */ - obufp = obuf + strlen (obuf); - mnemonic = Suffix3DNow[*codep++ & 0xff]; - if (mnemonic) - oappend (mnemonic); - else - { - /* Since a variable sized modrm/sib chunk is between the start - of the opcode (0x0f0f) and the opcode suffix, we need to do - all the modrm processing first, and don't know until now that - we have a bad opcode. This necessitates some cleaning up. */ - op_out[0][0] = '\0'; - op_out[1][0] = '\0'; - BadOp (); - } -} - -static const char *simd_cmp_op[] = { - "eq", - "lt", - "le", - "unord", - "neq", - "nlt", - "nle", - "ord" -}; - -static void -OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -{ - unsigned int cmp_type; - - fetch_data(the_info, codep + 1); - obufp = obuf + strlen (obuf); - cmp_type = *codep++ & 0xff; - if (cmp_type < 8) - { - char suffix1 = 'p', suffix2 = 's'; - used_prefixes |= (prefixes & PREFIX_REPZ); - if (prefixes & PREFIX_REPZ) - suffix1 = 's'; - else - { - used_prefixes |= (prefixes & PREFIX_DATA); - if (prefixes & PREFIX_DATA) - suffix2 = 'd'; - else - { - used_prefixes |= (prefixes & PREFIX_REPNZ); - if (prefixes & PREFIX_REPNZ) - suffix1 = 's', suffix2 = 'd'; - } - } - snprintf (scratchbuf, sizeof(scratchbuf), "cmp%s%c%c", - simd_cmp_op[cmp_type], suffix1, suffix2); - used_prefixes |= (prefixes & PREFIX_REPZ); - oappend (scratchbuf); - } - else - { - /* We have a bad extension byte. Clean up. */ - op_out[0][0] = '\0'; - op_out[1][0] = '\0'; - BadOp (); - } -} - -static void -SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED) -{ - /* Change movlps/movhps to movhlps/movlhps for 2 register operand - forms of these instructions. */ - if (modrm.mod == 3) - { - char *p = obuf + strlen (obuf); - *(p + 1) = '\0'; - *p = *(p - 1); - *(p - 1) = *(p - 2); - *(p - 2) = *(p - 3); - *(p - 3) = extrachar; - } -} - -static void -PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag) -{ - if (modrm.mod == 3 && modrm.reg == 1 && modrm.rm <= 1) - { - /* Override "sidt". */ - size_t olen = strlen (obuf); - char *p = obuf + olen - 4; - const char * const *names = (address_mode == mode_64bit - ? names64 : names32); - - /* We might have a suffix when disassembling with -Msuffix. */ - if (*p == 'i') - --p; - - /* Remove "addr16/addr32" if we aren't in Intel mode. */ - if (!intel_syntax - && (prefixes & PREFIX_ADDR) - && olen >= (4 + 7) - && *(p - 1) == ' ' - && strncmp (p - 7, "addr", 4) == 0 - && (strncmp (p - 3, "16", 2) == 0 - || strncmp (p - 3, "32", 2) == 0)) - p -= 7; - - if (modrm.rm) - { - /* mwait %eax,%ecx */ - strcpy (p, "mwait"); - if (!intel_syntax) - strcpy (op_out[0], names[0]); - } - else - { - /* monitor %eax,%ecx,%edx" */ - strcpy (p, "monitor"); - if (!intel_syntax) - { - const char * const *op1_names; - if (!(prefixes & PREFIX_ADDR)) - op1_names = (address_mode == mode_16bit - ? names16 : names); - else - { - op1_names = (address_mode != mode_32bit - ? names32 : names16); - used_prefixes |= PREFIX_ADDR; - } - strcpy (op_out[0], op1_names[0]); - strcpy (op_out[2], names[2]); - } - } - if (!intel_syntax) - { - strcpy (op_out[1], names[1]); - two_source_ops = 1; - } - - codep++; - } - else - OP_M (0, sizeflag); -} - -static void -SVME_Fixup (int bytemode, int sizeflag) -{ - const char *alt; - char *p; - - switch (*codep) - { - case 0xd8: - alt = "vmrun"; - break; - case 0xd9: - alt = "vmmcall"; - break; - case 0xda: - alt = "vmload"; - break; - case 0xdb: - alt = "vmsave"; - break; - case 0xdc: - alt = "stgi"; - break; - case 0xdd: - alt = "clgi"; - break; - case 0xde: - alt = "skinit"; - break; - case 0xdf: - alt = "invlpga"; - break; - default: - OP_M (bytemode, sizeflag); - return; - } - /* Override "lidt". */ - p = obuf + strlen (obuf) - 4; - /* We might have a suffix. */ - if (*p == 'i') - --p; - strcpy (p, alt); - if (!(prefixes & PREFIX_ADDR)) - { - ++codep; - return; - } - used_prefixes |= PREFIX_ADDR; - switch (*codep++) - { - case 0xdf: - strcpy (op_out[1], names32[1]); - two_source_ops = 1; - /* Fall through. */ - case 0xd8: - case 0xda: - case 0xdb: - *obufp++ = open_char; - if (address_mode == mode_64bit || (sizeflag & AFLAG)) - alt = names32[0]; - else - alt = names16[0]; - strcpy (obufp, alt); - obufp += strlen (alt); - *obufp++ = close_char; - *obufp = '\0'; - break; - } -} - -static void -INVLPG_Fixup (int bytemode, int sizeflag) -{ - const char *alt; - - switch (*codep) - { - case 0xf8: - alt = "swapgs"; - break; - case 0xf9: - alt = "rdtscp"; - break; - default: - OP_M (bytemode, sizeflag); - return; - } - /* Override "invlpg". */ - strcpy (obuf + strlen (obuf) - 6, alt); - codep++; -} - -static void -BadOp (void) -{ - /* Throw away prefixes and 1st. opcode byte. */ - codep = insn_codep + 1; - oappend ("(bad)"); -} - -static void -VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag) -{ - if (modrm.mod == 3 - && modrm.reg == 0 - && modrm.rm >=1 - && modrm.rm <= 4) - { - /* Override "sgdt". */ - char *p = obuf + strlen (obuf) - 4; - - /* We might have a suffix when disassembling with -Msuffix. */ - if (*p == 'g') - --p; - - switch (modrm.rm) - { - case 1: - strcpy (p, "vmcall"); - break; - case 2: - strcpy (p, "vmlaunch"); - break; - case 3: - strcpy (p, "vmresume"); - break; - case 4: - strcpy (p, "vmxoff"); - break; - } - - codep++; - } - else - OP_E (0, sizeflag); -} - -static void -OP_VMX (int bytemode, int sizeflag) -{ - used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ)); - if (prefixes & PREFIX_DATA) - strcpy (obuf, "vmclear"); - else if (prefixes & PREFIX_REPZ) - strcpy (obuf, "vmxon"); - else - strcpy (obuf, "vmptrld"); - OP_E (bytemode, sizeflag); -} - -static void -REP_Fixup (int bytemode, int sizeflag) -{ - /* The 0xf3 prefix should be displayed as "rep" for ins, outs, movs, - lods and stos. */ - size_t ilen = 0; - - if (prefixes & PREFIX_REPZ) - switch (*insn_codep) - { - case 0x6e: /* outsb */ - case 0x6f: /* outsw/outsl */ - case 0xa4: /* movsb */ - case 0xa5: /* movsw/movsl/movsq */ - if (!intel_syntax) - ilen = 5; - else - ilen = 4; - break; - case 0xaa: /* stosb */ - case 0xab: /* stosw/stosl/stosq */ - case 0xac: /* lodsb */ - case 0xad: /* lodsw/lodsl/lodsq */ - if (!intel_syntax && (sizeflag & SUFFIX_ALWAYS)) - ilen = 5; - else - ilen = 4; - break; - case 0x6c: /* insb */ - case 0x6d: /* insl/insw */ - if (!intel_syntax) - ilen = 4; - else - ilen = 3; - break; - default: - abort (); - break; - } - - if (ilen != 0) - { - size_t olen; - char *p; - - olen = strlen (obuf); - p = obuf + olen - ilen - 1 - 4; - /* Handle "repz [addr16|addr32]". */ - if ((prefixes & PREFIX_ADDR)) - p -= 1 + 6; - - memmove (p + 3, p + 4, olen - (p + 3 - obuf)); - } - - switch (bytemode) - { - case al_reg: - case eAX_reg: - case indir_dx_reg: - OP_IMREG (bytemode, sizeflag); - break; - case eDI_reg: - OP_ESreg (bytemode, sizeflag); - break; - case eSI_reg: - OP_DSreg (bytemode, sizeflag); - break; - default: - abort (); - break; - } -} - -static void -CMPXCHG8B_Fixup (int bytemode, int sizeflag) -{ - USED_REX (REX_W); - if (rex & REX_W) - { - /* Change cmpxchg8b to cmpxchg16b. */ - char *p = obuf + strlen (obuf) - 2; - strcpy (p, "16b"); - bytemode = o_mode; - } - OP_M (bytemode, sizeflag); -} - -static void -XMM_Fixup (int reg, int sizeflag ATTRIBUTE_UNUSED) -{ - snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", reg); - oappend (scratchbuf + intel_syntax); -} - -static void -CRC32_Fixup (int bytemode, int sizeflag) -{ - /* Add proper suffix to "crc32". */ - char *p = obuf + strlen (obuf); - - switch (bytemode) - { - case b_mode: - if (intel_syntax) - break; - - *p++ = 'b'; - break; - case v_mode: - if (intel_syntax) - break; - - USED_REX (REX_W); - if (rex & REX_W) - *p++ = 'q'; - else if (sizeflag & DFLAG) - *p++ = 'l'; - else - *p++ = 'w'; - used_prefixes |= (prefixes & PREFIX_DATA); - break; - default: - oappend (INTERNAL_DISASSEMBLER_ERROR); - break; - } - *p = '\0'; - - if (modrm.mod == 3) - { - int add; - - /* Skip mod/rm byte. */ - MODRM_CHECK; - codep++; - - USED_REX (REX_B); - add = (rex & REX_B) ? 8 : 0; - if (bytemode == b_mode) - { - USED_REX (0); - if (rex) - oappend (names8rex[modrm.rm + add]); - else - oappend (names8[modrm.rm + add]); - } - else - { - USED_REX (REX_W); - if (rex & REX_W) - oappend (names64[modrm.rm + add]); - else if ((prefixes & PREFIX_DATA)) - oappend (names16[modrm.rm + add]); - else - oappend (names32[modrm.rm + add]); - } - } - else - OP_E (bytemode, sizeflag); -} diff --git a/disas/libvixl/LICENCE b/disas/libvixl/LICENCE deleted file mode 100644 index b7e160a3f5..0000000000 --- a/disas/libvixl/LICENCE +++ /dev/null @@ -1,30 +0,0 @@ -LICENCE -======= - -The software in this repository is covered by the following licence. - -// Copyright 2013, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/disas/libvixl/README b/disas/libvixl/README deleted file mode 100644 index 932a41adf7..0000000000 --- a/disas/libvixl/README +++ /dev/null @@ -1,11 +0,0 @@ - -The code in this directory is a subset of libvixl: - https://github.com/armvixl/vixl -(specifically, it is the set of files needed for disassembly only, -taken from libvixl 1.12). -Bugfixes should preferably be sent upstream initially. - -The disassembler does not currently support the entire A64 instruction -set. Notably: - * Limited support for system instructions. - * A few miscellaneous integer and floating point instructions are missing. diff --git a/disas/libvixl/meson.build b/disas/libvixl/meson.build deleted file mode 100644 index 5e2eb33e8e..0000000000 --- a/disas/libvixl/meson.build +++ /dev/null @@ -1,7 +0,0 @@ -libvixl_ss.add(files( - 'vixl/a64/decoder-a64.cc', - 'vixl/a64/disasm-a64.cc', - 'vixl/a64/instructions-a64.cc', - 'vixl/compiler-intrinsics.cc', - 'vixl/utils.cc', -)) diff --git a/disas/libvixl/vixl/a64/assembler-a64.h b/disas/libvixl/vixl/a64/assembler-a64.h deleted file mode 100644 index fda5ccc6c7..0000000000 --- a/disas/libvixl/vixl/a64/assembler-a64.h +++ /dev/null @@ -1,4624 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_A64_ASSEMBLER_A64_H_ -#define VIXL_A64_ASSEMBLER_A64_H_ - - -#include "vixl/globals.h" -#include "vixl/invalset.h" -#include "vixl/utils.h" -#include "vixl/code-buffer.h" -#include "vixl/a64/instructions-a64.h" - -namespace vixl { - -typedef uint64_t RegList; -static const int kRegListSizeInBits = sizeof(RegList) * 8; - - -// Registers. - -// Some CPURegister methods can return Register or VRegister types, so we need -// to declare them in advance. -class Register; -class VRegister; - -class CPURegister { - public: - enum RegisterType { - // The kInvalid value is used to detect uninitialized static instances, - // which are always zero-initialized before any constructors are called. - kInvalid = 0, - kRegister, - kVRegister, - kFPRegister = kVRegister, - kNoRegister - }; - - CPURegister() : code_(0), size_(0), type_(kNoRegister) { - VIXL_ASSERT(!IsValid()); - VIXL_ASSERT(IsNone()); - } - - CPURegister(unsigned code, unsigned size, RegisterType type) - : code_(code), size_(size), type_(type) { - VIXL_ASSERT(IsValidOrNone()); - } - - unsigned code() const { - VIXL_ASSERT(IsValid()); - return code_; - } - - RegisterType type() const { - VIXL_ASSERT(IsValidOrNone()); - return type_; - } - - RegList Bit() const { - VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); - return IsValid() ? (static_cast(1) << code_) : 0; - } - - unsigned size() const { - VIXL_ASSERT(IsValid()); - return size_; - } - - int SizeInBytes() const { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(size() % 8 == 0); - return size_ / 8; - } - - int SizeInBits() const { - VIXL_ASSERT(IsValid()); - return size_; - } - - bool Is8Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 8; - } - - bool Is16Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 16; - } - - bool Is32Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 32; - } - - bool Is64Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 64; - } - - bool Is128Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 128; - } - - bool IsValid() const { - if (IsValidRegister() || IsValidVRegister()) { - VIXL_ASSERT(!IsNone()); - return true; - } else { - // This assert is hit when the register has not been properly initialized. - // One cause for this can be an initialisation order fiasco. See - // https://isocpp.org/wiki/faq/ctors#static-init-order for some details. - VIXL_ASSERT(IsNone()); - return false; - } - } - - bool IsValidRegister() const { - return IsRegister() && - ((size_ == kWRegSize) || (size_ == kXRegSize)) && - ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); - } - - bool IsValidVRegister() const { - return IsVRegister() && - ((size_ == kBRegSize) || (size_ == kHRegSize) || - (size_ == kSRegSize) || (size_ == kDRegSize) || - (size_ == kQRegSize)) && - (code_ < kNumberOfVRegisters); - } - - bool IsValidFPRegister() const { - return IsFPRegister() && (code_ < kNumberOfVRegisters); - } - - bool IsNone() const { - // kNoRegister types should always have size 0 and code 0. - VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); - VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); - - return type_ == kNoRegister; - } - - bool Aliases(const CPURegister& other) const { - VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); - return (code_ == other.code_) && (type_ == other.type_); - } - - bool Is(const CPURegister& other) const { - VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); - return Aliases(other) && (size_ == other.size_); - } - - bool IsZero() const { - VIXL_ASSERT(IsValid()); - return IsRegister() && (code_ == kZeroRegCode); - } - - bool IsSP() const { - VIXL_ASSERT(IsValid()); - return IsRegister() && (code_ == kSPRegInternalCode); - } - - bool IsRegister() const { - return type_ == kRegister; - } - - bool IsVRegister() const { - return type_ == kVRegister; - } - - bool IsFPRegister() const { - return IsS() || IsD(); - } - - bool IsW() const { return IsValidRegister() && Is32Bits(); } - bool IsX() const { return IsValidRegister() && Is64Bits(); } - - // These assertions ensure that the size and type of the register are as - // described. They do not consider the number of lanes that make up a vector. - // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() - // does not imply Is1D() or Is8B(). - // Check the number of lanes, ie. the format of the vector, using methods such - // as Is8B(), Is1D(), etc. in the VRegister class. - bool IsV() const { return IsVRegister(); } - bool IsB() const { return IsV() && Is8Bits(); } - bool IsH() const { return IsV() && Is16Bits(); } - bool IsS() const { return IsV() && Is32Bits(); } - bool IsD() const { return IsV() && Is64Bits(); } - bool IsQ() const { return IsV() && Is128Bits(); } - - const Register& W() const; - const Register& X() const; - const VRegister& V() const; - const VRegister& B() const; - const VRegister& H() const; - const VRegister& S() const; - const VRegister& D() const; - const VRegister& Q() const; - - bool IsSameSizeAndType(const CPURegister& other) const { - return (size_ == other.size_) && (type_ == other.type_); - } - - protected: - unsigned code_; - unsigned size_; - RegisterType type_; - - private: - bool IsValidOrNone() const { - return IsValid() || IsNone(); - } -}; - - -class Register : public CPURegister { - public: - Register() : CPURegister() {} - explicit Register(const CPURegister& other) - : CPURegister(other.code(), other.size(), other.type()) { - VIXL_ASSERT(IsValidRegister()); - } - Register(unsigned code, unsigned size) - : CPURegister(code, size, kRegister) {} - - bool IsValid() const { - VIXL_ASSERT(IsRegister() || IsNone()); - return IsValidRegister(); - } - - static const Register& WRegFromCode(unsigned code); - static const Register& XRegFromCode(unsigned code); - - private: - static const Register wregisters[]; - static const Register xregisters[]; -}; - - -class VRegister : public CPURegister { - public: - VRegister() : CPURegister(), lanes_(1) {} - explicit VRegister(const CPURegister& other) - : CPURegister(other.code(), other.size(), other.type()), lanes_(1) { - VIXL_ASSERT(IsValidVRegister()); - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - VRegister(unsigned code, unsigned size, unsigned lanes = 1) - : CPURegister(code, size, kVRegister), lanes_(lanes) { - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - VRegister(unsigned code, VectorFormat format) - : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister), - lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) { - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - - bool IsValid() const { - VIXL_ASSERT(IsVRegister() || IsNone()); - return IsValidVRegister(); - } - - static const VRegister& BRegFromCode(unsigned code); - static const VRegister& HRegFromCode(unsigned code); - static const VRegister& SRegFromCode(unsigned code); - static const VRegister& DRegFromCode(unsigned code); - static const VRegister& QRegFromCode(unsigned code); - static const VRegister& VRegFromCode(unsigned code); - - VRegister V8B() const { return VRegister(code_, kDRegSize, 8); } - VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } - VRegister V4H() const { return VRegister(code_, kDRegSize, 4); } - VRegister V8H() const { return VRegister(code_, kQRegSize, 8); } - VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } - VRegister V4S() const { return VRegister(code_, kQRegSize, 4); } - VRegister V2D() const { return VRegister(code_, kQRegSize, 2); } - VRegister V1D() const { return VRegister(code_, kDRegSize, 1); } - - bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); } - bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); } - bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); } - bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); } - bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); } - bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); } - bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); } - bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); } - - // For consistency, we assert the number of lanes of these scalar registers, - // even though there are no vectors of equivalent total size with which they - // could alias. - bool Is1B() const { - VIXL_ASSERT(!(Is8Bits() && IsVector())); - return Is8Bits(); - } - bool Is1H() const { - VIXL_ASSERT(!(Is16Bits() && IsVector())); - return Is16Bits(); - } - bool Is1S() const { - VIXL_ASSERT(!(Is32Bits() && IsVector())); - return Is32Bits(); - } - - bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; } - bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; } - bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; } - bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; } - - int lanes() const { - return lanes_; - } - - bool IsScalar() const { - return lanes_ == 1; - } - - bool IsVector() const { - return lanes_ > 1; - } - - bool IsSameFormat(const VRegister& other) const { - return (size_ == other.size_) && (lanes_ == other.lanes_); - } - - unsigned LaneSizeInBytes() const { - return SizeInBytes() / lanes_; - } - - unsigned LaneSizeInBits() const { - return LaneSizeInBytes() * 8; - } - - private: - static const VRegister bregisters[]; - static const VRegister hregisters[]; - static const VRegister sregisters[]; - static const VRegister dregisters[]; - static const VRegister qregisters[]; - static const VRegister vregisters[]; - int lanes_; -}; - - -// Backward compatibility for FPRegisters. -typedef VRegister FPRegister; - -// No*Reg is used to indicate an unused argument, or an error case. Note that -// these all compare equal (using the Is() method). The Register and VRegister -// variants are provided for convenience. -const Register NoReg; -const VRegister NoVReg; -const FPRegister NoFPReg; // For backward compatibility. -const CPURegister NoCPUReg; - - -#define DEFINE_REGISTERS(N) \ -const Register w##N(N, kWRegSize); \ -const Register x##N(N, kXRegSize); -REGISTER_CODE_LIST(DEFINE_REGISTERS) -#undef DEFINE_REGISTERS -const Register wsp(kSPRegInternalCode, kWRegSize); -const Register sp(kSPRegInternalCode, kXRegSize); - - -#define DEFINE_VREGISTERS(N) \ -const VRegister b##N(N, kBRegSize); \ -const VRegister h##N(N, kHRegSize); \ -const VRegister s##N(N, kSRegSize); \ -const VRegister d##N(N, kDRegSize); \ -const VRegister q##N(N, kQRegSize); \ -const VRegister v##N(N, kQRegSize); -REGISTER_CODE_LIST(DEFINE_VREGISTERS) -#undef DEFINE_VREGISTERS - - -// Registers aliases. -const Register ip0 = x16; -const Register ip1 = x17; -const Register lr = x30; -const Register xzr = x31; -const Register wzr = w31; - - -// AreAliased returns true if any of the named registers overlap. Arguments -// set to NoReg are ignored. The system stack pointer may be specified. -bool AreAliased(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoReg, - const CPURegister& reg4 = NoReg, - const CPURegister& reg5 = NoReg, - const CPURegister& reg6 = NoReg, - const CPURegister& reg7 = NoReg, - const CPURegister& reg8 = NoReg); - - -// AreSameSizeAndType returns true if all of the specified registers have the -// same size, and are of the same type. The system stack pointer may be -// specified. Arguments set to NoReg are ignored, as are any subsequent -// arguments. At least one argument (reg1) must be valid (not NoCPUReg). -bool AreSameSizeAndType(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoCPUReg, - const CPURegister& reg4 = NoCPUReg, - const CPURegister& reg5 = NoCPUReg, - const CPURegister& reg6 = NoCPUReg, - const CPURegister& reg7 = NoCPUReg, - const CPURegister& reg8 = NoCPUReg); - - -// AreSameFormat returns true if all of the specified VRegisters have the same -// vector format. Arguments set to NoReg are ignored, as are any subsequent -// arguments. At least one argument (reg1) must be valid (not NoVReg). -bool AreSameFormat(const VRegister& reg1, - const VRegister& reg2, - const VRegister& reg3 = NoVReg, - const VRegister& reg4 = NoVReg); - - -// AreConsecutive returns true if all of the specified VRegisters are -// consecutive in the register file. Arguments set to NoReg are ignored, as are -// any subsequent arguments. At least one argument (reg1) must be valid -// (not NoVReg). -bool AreConsecutive(const VRegister& reg1, - const VRegister& reg2, - const VRegister& reg3 = NoVReg, - const VRegister& reg4 = NoVReg); - - -// Lists of registers. -class CPURegList { - public: - explicit CPURegList(CPURegister reg1, - CPURegister reg2 = NoCPUReg, - CPURegister reg3 = NoCPUReg, - CPURegister reg4 = NoCPUReg) - : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), - size_(reg1.size()), type_(reg1.type()) { - VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); - VIXL_ASSERT(IsValid()); - } - - CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) - : list_(list), size_(size), type_(type) { - VIXL_ASSERT(IsValid()); - } - - CPURegList(CPURegister::RegisterType type, unsigned size, - unsigned first_reg, unsigned last_reg) - : size_(size), type_(type) { - VIXL_ASSERT(((type == CPURegister::kRegister) && - (last_reg < kNumberOfRegisters)) || - ((type == CPURegister::kVRegister) && - (last_reg < kNumberOfVRegisters))); - VIXL_ASSERT(last_reg >= first_reg); - list_ = (UINT64_C(1) << (last_reg + 1)) - 1; - list_ &= ~((UINT64_C(1) << first_reg) - 1); - VIXL_ASSERT(IsValid()); - } - - CPURegister::RegisterType type() const { - VIXL_ASSERT(IsValid()); - return type_; - } - - // Combine another CPURegList into this one. Registers that already exist in - // this list are left unchanged. The type and size of the registers in the - // 'other' list must match those in this list. - void Combine(const CPURegList& other) { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(other.type() == type_); - VIXL_ASSERT(other.RegisterSizeInBits() == size_); - list_ |= other.list(); - } - - // Remove every register in the other CPURegList from this one. Registers that - // do not exist in this list are ignored. The type and size of the registers - // in the 'other' list must match those in this list. - void Remove(const CPURegList& other) { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(other.type() == type_); - VIXL_ASSERT(other.RegisterSizeInBits() == size_); - list_ &= ~other.list(); - } - - // Variants of Combine and Remove which take a single register. - void Combine(const CPURegister& other) { - VIXL_ASSERT(other.type() == type_); - VIXL_ASSERT(other.size() == size_); - Combine(other.code()); - } - - void Remove(const CPURegister& other) { - VIXL_ASSERT(other.type() == type_); - VIXL_ASSERT(other.size() == size_); - Remove(other.code()); - } - - // Variants of Combine and Remove which take a single register by its code; - // the type and size of the register is inferred from this list. - void Combine(int code) { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); - list_ |= (UINT64_C(1) << code); - } - - void Remove(int code) { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); - list_ &= ~(UINT64_C(1) << code); - } - - static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { - VIXL_ASSERT(list_1.type_ == list_2.type_); - VIXL_ASSERT(list_1.size_ == list_2.size_); - return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); - } - static CPURegList Union(const CPURegList& list_1, - const CPURegList& list_2, - const CPURegList& list_3); - static CPURegList Union(const CPURegList& list_1, - const CPURegList& list_2, - const CPURegList& list_3, - const CPURegList& list_4); - - static CPURegList Intersection(const CPURegList& list_1, - const CPURegList& list_2) { - VIXL_ASSERT(list_1.type_ == list_2.type_); - VIXL_ASSERT(list_1.size_ == list_2.size_); - return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); - } - static CPURegList Intersection(const CPURegList& list_1, - const CPURegList& list_2, - const CPURegList& list_3); - static CPURegList Intersection(const CPURegList& list_1, - const CPURegList& list_2, - const CPURegList& list_3, - const CPURegList& list_4); - - bool Overlaps(const CPURegList& other) const { - return (type_ == other.type_) && ((list_ & other.list_) != 0); - } - - RegList list() const { - VIXL_ASSERT(IsValid()); - return list_; - } - - void set_list(RegList new_list) { - VIXL_ASSERT(IsValid()); - list_ = new_list; - } - - // Remove all callee-saved registers from the list. This can be useful when - // preparing registers for an AAPCS64 function call, for example. - void RemoveCalleeSaved(); - - CPURegister PopLowestIndex(); - CPURegister PopHighestIndex(); - - // AAPCS64 callee-saved registers. - static CPURegList GetCalleeSaved(unsigned size = kXRegSize); - static CPURegList GetCalleeSavedV(unsigned size = kDRegSize); - - // AAPCS64 caller-saved registers. Note that this includes lr. - // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top - // 64-bits being caller-saved. - static CPURegList GetCallerSaved(unsigned size = kXRegSize); - static CPURegList GetCallerSavedV(unsigned size = kDRegSize); - - bool IsEmpty() const { - VIXL_ASSERT(IsValid()); - return list_ == 0; - } - - bool IncludesAliasOf(const CPURegister& other) const { - VIXL_ASSERT(IsValid()); - return (type_ == other.type()) && ((other.Bit() & list_) != 0); - } - - bool IncludesAliasOf(int code) const { - VIXL_ASSERT(IsValid()); - return ((code & list_) != 0); - } - - int Count() const { - VIXL_ASSERT(IsValid()); - return CountSetBits(list_); - } - - unsigned RegisterSizeInBits() const { - VIXL_ASSERT(IsValid()); - return size_; - } - - unsigned RegisterSizeInBytes() const { - int size_in_bits = RegisterSizeInBits(); - VIXL_ASSERT((size_in_bits % 8) == 0); - return size_in_bits / 8; - } - - unsigned TotalSizeInBytes() const { - VIXL_ASSERT(IsValid()); - return RegisterSizeInBytes() * Count(); - } - - private: - RegList list_; - unsigned size_; - CPURegister::RegisterType type_; - - bool IsValid() const; -}; - - -// AAPCS64 callee-saved registers. -extern const CPURegList kCalleeSaved; -extern const CPURegList kCalleeSavedV; - - -// AAPCS64 caller-saved registers. Note that this includes lr. -extern const CPURegList kCallerSaved; -extern const CPURegList kCallerSavedV; - - -// Operand. -class Operand { - public: - // # - // where is int64_t. - // This is allowed to be an implicit constructor because Operand is - // a wrapper class that doesn't normally perform any type conversion. - Operand(int64_t immediate = 0); // NOLINT(runtime/explicit) - - // rm, { #} - // where is one of {LSL, LSR, ASR, ROR}. - // is uint6_t. - // This is allowed to be an implicit constructor because Operand is - // a wrapper class that doesn't normally perform any type conversion. - Operand(Register reg, - Shift shift = LSL, - unsigned shift_amount = 0); // NOLINT(runtime/explicit) - - // rm, { {#}} - // where is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. - // is uint2_t. - explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); - - bool IsImmediate() const; - bool IsShiftedRegister() const; - bool IsExtendedRegister() const; - bool IsZero() const; - - // This returns an LSL shift (<= 4) operand as an equivalent extend operand, - // which helps in the encoding of instructions that use the stack pointer. - Operand ToExtendedRegister() const; - - int64_t immediate() const { - VIXL_ASSERT(IsImmediate()); - return immediate_; - } - - Register reg() const { - VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); - return reg_; - } - - Shift shift() const { - VIXL_ASSERT(IsShiftedRegister()); - return shift_; - } - - Extend extend() const { - VIXL_ASSERT(IsExtendedRegister()); - return extend_; - } - - unsigned shift_amount() const { - VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); - return shift_amount_; - } - - private: - int64_t immediate_; - Register reg_; - Shift shift_; - Extend extend_; - unsigned shift_amount_; -}; - - -// MemOperand represents the addressing mode of a load or store instruction. -class MemOperand { - public: - explicit MemOperand(Register base, - int64_t offset = 0, - AddrMode addrmode = Offset); - MemOperand(Register base, - Register regoffset, - Shift shift = LSL, - unsigned shift_amount = 0); - MemOperand(Register base, - Register regoffset, - Extend extend, - unsigned shift_amount = 0); - MemOperand(Register base, - const Operand& offset, - AddrMode addrmode = Offset); - - const Register& base() const { return base_; } - const Register& regoffset() const { return regoffset_; } - int64_t offset() const { return offset_; } - AddrMode addrmode() const { return addrmode_; } - Shift shift() const { return shift_; } - Extend extend() const { return extend_; } - unsigned shift_amount() const { return shift_amount_; } - bool IsImmediateOffset() const; - bool IsRegisterOffset() const; - bool IsPreIndex() const; - bool IsPostIndex() const; - - void AddOffset(int64_t offset); - - private: - Register base_; - Register regoffset_; - int64_t offset_; - AddrMode addrmode_; - Shift shift_; - Extend extend_; - unsigned shift_amount_; -}; - - -class LabelTestHelper; // Forward declaration. - - -class Label { - public: - Label() : location_(kLocationUnbound) {} - ~Label() { - // If the label has been linked to, it needs to be bound to a target. - VIXL_ASSERT(!IsLinked() || IsBound()); - } - - bool IsBound() const { return location_ >= 0; } - bool IsLinked() const { return !links_.empty(); } - - ptrdiff_t location() const { return location_; } - - static const int kNPreallocatedLinks = 4; - static const ptrdiff_t kInvalidLinkKey = PTRDIFF_MAX; - static const size_t kReclaimFrom = 512; - static const size_t kReclaimFactor = 2; - - typedef InvalSet LinksSetBase; - typedef InvalSetIterator LabelLinksIteratorBase; - - private: - class LinksSet : public LinksSetBase { - public: - LinksSet() : LinksSetBase() {} - }; - - // Allows iterating over the links of a label. The behaviour is undefined if - // the list of links is modified in any way while iterating. - class LabelLinksIterator : public LabelLinksIteratorBase { - public: - explicit LabelLinksIterator(Label* label) - : LabelLinksIteratorBase(&label->links_) {} - }; - - void Bind(ptrdiff_t location) { - // Labels can only be bound once. - VIXL_ASSERT(!IsBound()); - location_ = location; - } - - void AddLink(ptrdiff_t instruction) { - // If a label is bound, the assembler already has the information it needs - // to write the instruction, so there is no need to add it to links_. - VIXL_ASSERT(!IsBound()); - links_.insert(instruction); - } - - void DeleteLink(ptrdiff_t instruction) { - links_.erase(instruction); - } - - void ClearAllLinks() { - links_.clear(); - } - - // TODO: The comment below considers average case complexity for our - // usual use-cases. The elements of interest are: - // - Branches to a label are emitted in order: branch instructions to a label - // are generated at an offset in the code generation buffer greater than any - // other branch to that same label already generated. As an example, this can - // be broken when an instruction is patched to become a branch. Note that the - // code will still work, but the complexity considerations below may locally - // not apply any more. - // - Veneers are generated in order: for multiple branches of the same type - // branching to the same unbound label going out of range, veneers are - // generated in growing order of the branch instruction offset from the start - // of the buffer. - // - // When creating a veneer for a branch going out of range, the link for this - // branch needs to be removed from this `links_`. Since all branches are - // tracked in one underlying InvalSet, the complexity for this deletion is the - // same as for finding the element, ie. O(n), where n is the number of links - // in the set. - // This could be reduced to O(1) by using the same trick as used when tracking - // branch information for veneers: split the container to use one set per type - // of branch. With that setup, when a veneer is created and the link needs to - // be deleted, if the two points above hold, it must be the minimum element of - // the set for its type of branch, and that minimum element will be accessible - // in O(1). - - // The offsets of the instructions that have linked to this label. - LinksSet links_; - // The label location. - ptrdiff_t location_; - - static const ptrdiff_t kLocationUnbound = -1; - - // It is not safe to copy labels, so disable the copy constructor and operator - // by declaring them private (without an implementation). - Label(const Label&); - void operator=(const Label&); - - // The Assembler class is responsible for binding and linking labels, since - // the stored offsets need to be consistent with the Assembler's buffer. - friend class Assembler; - // The MacroAssembler and VeneerPool handle resolution of branches to distant - // targets. - friend class MacroAssembler; - friend class VeneerPool; -}; - - -// Required InvalSet template specialisations. -#define INVAL_SET_TEMPLATE_PARAMETERS \ - ptrdiff_t, \ - Label::kNPreallocatedLinks, \ - ptrdiff_t, \ - Label::kInvalidLinkKey, \ - Label::kReclaimFrom, \ - Label::kReclaimFactor -template<> -inline ptrdiff_t InvalSet::Key( - const ptrdiff_t& element) { - return element; -} -template<> -inline void InvalSet::SetKey( - ptrdiff_t* element, ptrdiff_t key) { - *element = key; -} -#undef INVAL_SET_TEMPLATE_PARAMETERS - - -class Assembler; -class LiteralPool; - -// A literal is a 32-bit or 64-bit piece of data stored in the instruction -// stream and loaded through a pc relative load. The same literal can be -// referred to by multiple instructions but a literal can only reside at one -// place in memory. A literal can be used by a load before or after being -// placed in memory. -// -// Internally an offset of 0 is associated with a literal which has been -// neither used nor placed. Then two possibilities arise: -// 1) the label is placed, the offset (stored as offset + 1) is used to -// resolve any subsequent load using the label. -// 2) the label is not placed and offset is the offset of the last load using -// the literal (stored as -offset -1). If multiple loads refer to this -// literal then the last load holds the offset of the preceding load and -// all loads form a chain. Once the offset is placed all the loads in the -// chain are resolved and future loads fall back to possibility 1. -class RawLiteral { - public: - enum DeletionPolicy { - kDeletedOnPlacementByPool, - kDeletedOnPoolDestruction, - kManuallyDeleted - }; - - RawLiteral(size_t size, - LiteralPool* literal_pool, - DeletionPolicy deletion_policy = kManuallyDeleted); - - // The literal pool only sees and deletes `RawLiteral*` pointers, but they are - // actually pointing to `Literal` objects. - virtual ~RawLiteral() {} - - size_t size() { - VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes); - VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes); - VIXL_ASSERT((size_ == kXRegSizeInBytes) || - (size_ == kWRegSizeInBytes) || - (size_ == kQRegSizeInBytes)); - return size_; - } - uint64_t raw_value128_low64() { - VIXL_ASSERT(size_ == kQRegSizeInBytes); - return low64_; - } - uint64_t raw_value128_high64() { - VIXL_ASSERT(size_ == kQRegSizeInBytes); - return high64_; - } - uint64_t raw_value64() { - VIXL_ASSERT(size_ == kXRegSizeInBytes); - VIXL_ASSERT(high64_ == 0); - return low64_; - } - uint32_t raw_value32() { - VIXL_ASSERT(size_ == kWRegSizeInBytes); - VIXL_ASSERT(high64_ == 0); - VIXL_ASSERT(is_uint32(low64_) || is_int32(low64_)); - return static_cast(low64_); - } - bool IsUsed() { return offset_ < 0; } - bool IsPlaced() { return offset_ > 0; } - - LiteralPool* GetLiteralPool() const { - return literal_pool_; - } - - ptrdiff_t offset() { - VIXL_ASSERT(IsPlaced()); - return offset_ - 1; - } - - protected: - void set_offset(ptrdiff_t offset) { - VIXL_ASSERT(offset >= 0); - VIXL_ASSERT(IsWordAligned(offset)); - VIXL_ASSERT(!IsPlaced()); - offset_ = offset + 1; - } - ptrdiff_t last_use() { - VIXL_ASSERT(IsUsed()); - return -offset_ - 1; - } - void set_last_use(ptrdiff_t offset) { - VIXL_ASSERT(offset >= 0); - VIXL_ASSERT(IsWordAligned(offset)); - VIXL_ASSERT(!IsPlaced()); - offset_ = -offset - 1; - } - - size_t size_; - ptrdiff_t offset_; - uint64_t low64_; - uint64_t high64_; - - private: - LiteralPool* literal_pool_; - DeletionPolicy deletion_policy_; - - friend class Assembler; - friend class LiteralPool; -}; - - -template -class Literal : public RawLiteral { - public: - explicit Literal(T value, - LiteralPool* literal_pool = NULL, - RawLiteral::DeletionPolicy ownership = kManuallyDeleted) - : RawLiteral(sizeof(value), literal_pool, ownership) { - VIXL_STATIC_ASSERT(sizeof(value) <= kXRegSizeInBytes); - UpdateValue(value); - } - - Literal(T high64, T low64, - LiteralPool* literal_pool = NULL, - RawLiteral::DeletionPolicy ownership = kManuallyDeleted) - : RawLiteral(kQRegSizeInBytes, literal_pool, ownership) { - VIXL_STATIC_ASSERT(sizeof(low64) == (kQRegSizeInBytes / 2)); - UpdateValue(high64, low64); - } - - virtual ~Literal() {} - - // Update the value of this literal, if necessary by rewriting the value in - // the pool. - // If the literal has already been placed in a literal pool, the address of - // the start of the code buffer must be provided, as the literal only knows it - // offset from there. This also allows patching the value after the code has - // been moved in memory. - void UpdateValue(T new_value, uint8_t* code_buffer = NULL) { - VIXL_ASSERT(sizeof(new_value) == size_); - memcpy(&low64_, &new_value, sizeof(new_value)); - if (IsPlaced()) { - VIXL_ASSERT(code_buffer != NULL); - RewriteValueInCode(code_buffer); - } - } - - void UpdateValue(T high64, T low64, uint8_t* code_buffer = NULL) { - VIXL_ASSERT(sizeof(low64) == size_ / 2); - memcpy(&low64_, &low64, sizeof(low64)); - memcpy(&high64_, &high64, sizeof(high64)); - if (IsPlaced()) { - VIXL_ASSERT(code_buffer != NULL); - RewriteValueInCode(code_buffer); - } - } - - void UpdateValue(T new_value, const Assembler* assembler); - void UpdateValue(T high64, T low64, const Assembler* assembler); - - private: - void RewriteValueInCode(uint8_t* code_buffer) { - VIXL_ASSERT(IsPlaced()); - VIXL_STATIC_ASSERT(sizeof(T) <= kXRegSizeInBytes); - switch (size()) { - case kSRegSizeInBytes: - *reinterpret_cast(code_buffer + offset()) = raw_value32(); - break; - case kDRegSizeInBytes: - *reinterpret_cast(code_buffer + offset()) = raw_value64(); - break; - default: - VIXL_ASSERT(size() == kQRegSizeInBytes); - uint64_t* base_address = - reinterpret_cast(code_buffer + offset()); - *base_address = raw_value128_low64(); - *(base_address + 1) = raw_value128_high64(); - } - } -}; - - -// Control whether or not position-independent code should be emitted. -enum PositionIndependentCodeOption { - // All code generated will be position-independent; all branches and - // references to labels generated with the Label class will use PC-relative - // addressing. - PositionIndependentCode, - - // Allow VIXL to generate code that refers to absolute addresses. With this - // option, it will not be possible to copy the code buffer and run it from a - // different address; code must be generated in its final location. - PositionDependentCode, - - // Allow VIXL to assume that the bottom 12 bits of the address will be - // constant, but that the top 48 bits may change. This allows `adrp` to - // function in systems which copy code between pages, but otherwise maintain - // 4KB page alignment. - PageOffsetDependentCode -}; - - -// Control how scaled- and unscaled-offset loads and stores are generated. -enum LoadStoreScalingOption { - // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, - // register-offset, pre-index or post-index instructions if necessary. - PreferScaledOffset, - - // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, - // register-offset, pre-index or post-index instructions if necessary. - PreferUnscaledOffset, - - // Require scaled-immediate-offset instructions. - RequireScaledOffset, - - // Require unscaled-immediate-offset instructions. - RequireUnscaledOffset -}; - - -// Assembler. -class Assembler { - public: - Assembler(size_t capacity, - PositionIndependentCodeOption pic = PositionIndependentCode); - Assembler(byte* buffer, size_t capacity, - PositionIndependentCodeOption pic = PositionIndependentCode); - - // The destructor asserts that one of the following is true: - // * The Assembler object has not been used. - // * Nothing has been emitted since the last Reset() call. - // * Nothing has been emitted since the last FinalizeCode() call. - ~Assembler(); - - // System functions. - - // Start generating code from the beginning of the buffer, discarding any code - // and data that has already been emitted into the buffer. - void Reset(); - - // Finalize a code buffer of generated instructions. This function must be - // called before executing or copying code from the buffer. - void FinalizeCode(); - - // Label. - // Bind a label to the current PC. - void bind(Label* label); - - // Bind a label to a specified offset from the start of the buffer. - void BindToOffset(Label* label, ptrdiff_t offset); - - // Place a literal at the current PC. - void place(RawLiteral* literal); - - ptrdiff_t CursorOffset() const { - return buffer_->CursorOffset(); - } - - ptrdiff_t BufferEndOffset() const { - return static_cast(buffer_->capacity()); - } - - // Return the address of an offset in the buffer. - template - T GetOffsetAddress(ptrdiff_t offset) const { - VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); - return buffer_->GetOffsetAddress(offset); - } - - // Return the address of a bound label. - template - T GetLabelAddress(const Label * label) const { - VIXL_ASSERT(label->IsBound()); - VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); - return GetOffsetAddress(label->location()); - } - - // Return the address of the cursor. - template - T GetCursorAddress() const { - VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); - return GetOffsetAddress(CursorOffset()); - } - - // Return the address of the start of the buffer. - template - T GetStartAddress() const { - VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); - return GetOffsetAddress(0); - } - - Instruction* InstructionAt(ptrdiff_t instruction_offset) { - return GetOffsetAddress(instruction_offset); - } - - ptrdiff_t InstructionOffset(Instruction* instruction) { - VIXL_STATIC_ASSERT(sizeof(*instruction) == 1); - ptrdiff_t offset = instruction - GetStartAddress(); - VIXL_ASSERT((0 <= offset) && - (offset < static_cast(BufferCapacity()))); - return offset; - } - - // Instruction set functions. - - // Branch / Jump instructions. - // Branch to register. - void br(const Register& xn); - - // Branch with link to register. - void blr(const Register& xn); - - // Branch to register with return hint. - void ret(const Register& xn = lr); - - // Unconditional branch to label. - void b(Label* label); - - // Conditional branch to label. - void b(Label* label, Condition cond); - - // Unconditional branch to PC offset. - void b(int imm26); - - // Conditional branch to PC offset. - void b(int imm19, Condition cond); - - // Branch with link to label. - void bl(Label* label); - - // Branch with link to PC offset. - void bl(int imm26); - - // Compare and branch to label if zero. - void cbz(const Register& rt, Label* label); - - // Compare and branch to PC offset if zero. - void cbz(const Register& rt, int imm19); - - // Compare and branch to label if not zero. - void cbnz(const Register& rt, Label* label); - - // Compare and branch to PC offset if not zero. - void cbnz(const Register& rt, int imm19); - - // Table lookup from one register. - void tbl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Table lookup from two registers. - void tbl(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vm); - - // Table lookup from three registers. - void tbl(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vn3, - const VRegister& vm); - - // Table lookup from four registers. - void tbl(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vn3, - const VRegister& vn4, - const VRegister& vm); - - // Table lookup extension from one register. - void tbx(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Table lookup extension from two registers. - void tbx(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vm); - - // Table lookup extension from three registers. - void tbx(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vn3, - const VRegister& vm); - - // Table lookup extension from four registers. - void tbx(const VRegister& vd, - const VRegister& vn, - const VRegister& vn2, - const VRegister& vn3, - const VRegister& vn4, - const VRegister& vm); - - // Test bit and branch to label if zero. - void tbz(const Register& rt, unsigned bit_pos, Label* label); - - // Test bit and branch to PC offset if zero. - void tbz(const Register& rt, unsigned bit_pos, int imm14); - - // Test bit and branch to label if not zero. - void tbnz(const Register& rt, unsigned bit_pos, Label* label); - - // Test bit and branch to PC offset if not zero. - void tbnz(const Register& rt, unsigned bit_pos, int imm14); - - // Address calculation instructions. - // Calculate a PC-relative address. Unlike for branches the offset in adr is - // unscaled (i.e. the result can be unaligned). - - // Calculate the address of a label. - void adr(const Register& rd, Label* label); - - // Calculate the address of a PC offset. - void adr(const Register& rd, int imm21); - - // Calculate the page address of a label. - void adrp(const Register& rd, Label* label); - - // Calculate the page address of a PC offset. - void adrp(const Register& rd, int imm21); - - // Data Processing instructions. - // Add. - void add(const Register& rd, - const Register& rn, - const Operand& operand); - - // Add and update status flags. - void adds(const Register& rd, - const Register& rn, - const Operand& operand); - - // Compare negative. - void cmn(const Register& rn, const Operand& operand); - - // Subtract. - void sub(const Register& rd, - const Register& rn, - const Operand& operand); - - // Subtract and update status flags. - void subs(const Register& rd, - const Register& rn, - const Operand& operand); - - // Compare. - void cmp(const Register& rn, const Operand& operand); - - // Negate. - void neg(const Register& rd, - const Operand& operand); - - // Negate and update status flags. - void negs(const Register& rd, - const Operand& operand); - - // Add with carry bit. - void adc(const Register& rd, - const Register& rn, - const Operand& operand); - - // Add with carry bit and update status flags. - void adcs(const Register& rd, - const Register& rn, - const Operand& operand); - - // Subtract with carry bit. - void sbc(const Register& rd, - const Register& rn, - const Operand& operand); - - // Subtract with carry bit and update status flags. - void sbcs(const Register& rd, - const Register& rn, - const Operand& operand); - - // Negate with carry bit. - void ngc(const Register& rd, - const Operand& operand); - - // Negate with carry bit and update status flags. - void ngcs(const Register& rd, - const Operand& operand); - - // Logical instructions. - // Bitwise and (A & B). - void and_(const Register& rd, - const Register& rn, - const Operand& operand); - - // Bitwise and (A & B) and update status flags. - void ands(const Register& rd, - const Register& rn, - const Operand& operand); - - // Bit test and set flags. - void tst(const Register& rn, const Operand& operand); - - // Bit clear (A & ~B). - void bic(const Register& rd, - const Register& rn, - const Operand& operand); - - // Bit clear (A & ~B) and update status flags. - void bics(const Register& rd, - const Register& rn, - const Operand& operand); - - // Bitwise or (A | B). - void orr(const Register& rd, const Register& rn, const Operand& operand); - - // Bitwise nor (A | ~B). - void orn(const Register& rd, const Register& rn, const Operand& operand); - - // Bitwise eor/xor (A ^ B). - void eor(const Register& rd, const Register& rn, const Operand& operand); - - // Bitwise enor/xnor (A ^ ~B). - void eon(const Register& rd, const Register& rn, const Operand& operand); - - // Logical shift left by variable. - void lslv(const Register& rd, const Register& rn, const Register& rm); - - // Logical shift right by variable. - void lsrv(const Register& rd, const Register& rn, const Register& rm); - - // Arithmetic shift right by variable. - void asrv(const Register& rd, const Register& rn, const Register& rm); - - // Rotate right by variable. - void rorv(const Register& rd, const Register& rn, const Register& rm); - - // Bitfield instructions. - // Bitfield move. - void bfm(const Register& rd, - const Register& rn, - unsigned immr, - unsigned imms); - - // Signed bitfield move. - void sbfm(const Register& rd, - const Register& rn, - unsigned immr, - unsigned imms); - - // Unsigned bitfield move. - void ubfm(const Register& rd, - const Register& rn, - unsigned immr, - unsigned imms); - - // Bfm aliases. - // Bitfield insert. - void bfi(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); - } - - // Bitfield extract and insert low. - void bfxil(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - bfm(rd, rn, lsb, lsb + width - 1); - } - - // Sbfm aliases. - // Arithmetic shift right. - void asr(const Register& rd, const Register& rn, unsigned shift) { - VIXL_ASSERT(shift < rd.size()); - sbfm(rd, rn, shift, rd.size() - 1); - } - - // Signed bitfield insert with zero at right. - void sbfiz(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); - } - - // Signed bitfield extract. - void sbfx(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - sbfm(rd, rn, lsb, lsb + width - 1); - } - - // Signed extend byte. - void sxtb(const Register& rd, const Register& rn) { - sbfm(rd, rn, 0, 7); - } - - // Signed extend halfword. - void sxth(const Register& rd, const Register& rn) { - sbfm(rd, rn, 0, 15); - } - - // Signed extend word. - void sxtw(const Register& rd, const Register& rn) { - sbfm(rd, rn, 0, 31); - } - - // Ubfm aliases. - // Logical shift left. - void lsl(const Register& rd, const Register& rn, unsigned shift) { - unsigned reg_size = rd.size(); - VIXL_ASSERT(shift < reg_size); - ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); - } - - // Logical shift right. - void lsr(const Register& rd, const Register& rn, unsigned shift) { - VIXL_ASSERT(shift < rd.size()); - ubfm(rd, rn, shift, rd.size() - 1); - } - - // Unsigned bitfield insert with zero at right. - void ubfiz(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); - } - - // Unsigned bitfield extract. - void ubfx(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width) { - VIXL_ASSERT(width >= 1); - VIXL_ASSERT(lsb + width <= rn.size()); - ubfm(rd, rn, lsb, lsb + width - 1); - } - - // Unsigned extend byte. - void uxtb(const Register& rd, const Register& rn) { - ubfm(rd, rn, 0, 7); - } - - // Unsigned extend halfword. - void uxth(const Register& rd, const Register& rn) { - ubfm(rd, rn, 0, 15); - } - - // Unsigned extend word. - void uxtw(const Register& rd, const Register& rn) { - ubfm(rd, rn, 0, 31); - } - - // Extract. - void extr(const Register& rd, - const Register& rn, - const Register& rm, - unsigned lsb); - - // Conditional select: rd = cond ? rn : rm. - void csel(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond); - - // Conditional select increment: rd = cond ? rn : rm + 1. - void csinc(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond); - - // Conditional select inversion: rd = cond ? rn : ~rm. - void csinv(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond); - - // Conditional select negation: rd = cond ? rn : -rm. - void csneg(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond); - - // Conditional set: rd = cond ? 1 : 0. - void cset(const Register& rd, Condition cond); - - // Conditional set mask: rd = cond ? -1 : 0. - void csetm(const Register& rd, Condition cond); - - // Conditional increment: rd = cond ? rn + 1 : rn. - void cinc(const Register& rd, const Register& rn, Condition cond); - - // Conditional invert: rd = cond ? ~rn : rn. - void cinv(const Register& rd, const Register& rn, Condition cond); - - // Conditional negate: rd = cond ? -rn : rn. - void cneg(const Register& rd, const Register& rn, Condition cond); - - // Rotate right. - void ror(const Register& rd, const Register& rs, unsigned shift) { - extr(rd, rs, rs, shift); - } - - // Conditional comparison. - // Conditional compare negative. - void ccmn(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond); - - // Conditional compare. - void ccmp(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond); - - // CRC-32 checksum from byte. - void crc32b(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 checksum from half-word. - void crc32h(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 checksum from word. - void crc32w(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 checksum from double word. - void crc32x(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 C checksum from byte. - void crc32cb(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 C checksum from half-word. - void crc32ch(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32 C checksum from word. - void crc32cw(const Register& rd, - const Register& rn, - const Register& rm); - - // CRC-32C checksum from double word. - void crc32cx(const Register& rd, - const Register& rn, - const Register& rm); - - // Multiply. - void mul(const Register& rd, const Register& rn, const Register& rm); - - // Negated multiply. - void mneg(const Register& rd, const Register& rn, const Register& rm); - - // Signed long multiply: 32 x 32 -> 64-bit. - void smull(const Register& rd, const Register& rn, const Register& rm); - - // Signed multiply high: 64 x 64 -> 64-bit <127:64>. - void smulh(const Register& xd, const Register& xn, const Register& xm); - - // Multiply and accumulate. - void madd(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Multiply and subtract. - void msub(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit. - void smaddl(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit. - void umaddl(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Unsigned long multiply: 32 x 32 -> 64-bit. - void umull(const Register& rd, - const Register& rn, - const Register& rm) { - umaddl(rd, rn, rm, xzr); - } - - // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>. - void umulh(const Register& xd, - const Register& xn, - const Register& xm); - - // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit. - void smsubl(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit. - void umsubl(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - - // Signed integer divide. - void sdiv(const Register& rd, const Register& rn, const Register& rm); - - // Unsigned integer divide. - void udiv(const Register& rd, const Register& rn, const Register& rm); - - // Bit reverse. - void rbit(const Register& rd, const Register& rn); - - // Reverse bytes in 16-bit half words. - void rev16(const Register& rd, const Register& rn); - - // Reverse bytes in 32-bit words. - void rev32(const Register& rd, const Register& rn); - - // Reverse bytes. - void rev(const Register& rd, const Register& rn); - - // Count leading zeroes. - void clz(const Register& rd, const Register& rn); - - // Count leading sign bits. - void cls(const Register& rd, const Register& rn); - - // Memory instructions. - // Load integer or FP register. - void ldr(const CPURegister& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Store integer or FP register. - void str(const CPURegister& rt, const MemOperand& dst, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load word with sign extension. - void ldrsw(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load byte. - void ldrb(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Store byte. - void strb(const Register& rt, const MemOperand& dst, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load byte with sign extension. - void ldrsb(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load half-word. - void ldrh(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Store half-word. - void strh(const Register& rt, const MemOperand& dst, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load half-word with sign extension. - void ldrsh(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferScaledOffset); - - // Load integer or FP register (with unscaled offset). - void ldur(const CPURegister& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Store integer or FP register (with unscaled offset). - void stur(const CPURegister& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load word with sign extension. - void ldursw(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load byte (with unscaled offset). - void ldurb(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Store byte (with unscaled offset). - void sturb(const Register& rt, const MemOperand& dst, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load byte with sign extension (and unscaled offset). - void ldursb(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load half-word (with unscaled offset). - void ldurh(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Store half-word (with unscaled offset). - void sturh(const Register& rt, const MemOperand& dst, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load half-word with sign extension (and unscaled offset). - void ldursh(const Register& rt, const MemOperand& src, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Load integer or FP register pair. - void ldp(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& src); - - // Store integer or FP register pair. - void stp(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& dst); - - // Load word pair with sign extension. - void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src); - - // Load integer or FP register pair, non-temporal. - void ldnp(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& src); - - // Store integer or FP register pair, non-temporal. - void stnp(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& dst); - - // Load integer or FP register from literal pool. - void ldr(const CPURegister& rt, RawLiteral* literal); - - // Load word with sign extension from literal pool. - void ldrsw(const Register& rt, RawLiteral* literal); - - // Load integer or FP register from pc + imm19 << 2. - void ldr(const CPURegister& rt, int imm19); - - // Load word with sign extension from pc + imm19 << 2. - void ldrsw(const Register& rt, int imm19); - - // Store exclusive byte. - void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); - - // Store exclusive half-word. - void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); - - // Store exclusive register. - void stxr(const Register& rs, const Register& rt, const MemOperand& dst); - - // Load exclusive byte. - void ldxrb(const Register& rt, const MemOperand& src); - - // Load exclusive half-word. - void ldxrh(const Register& rt, const MemOperand& src); - - // Load exclusive register. - void ldxr(const Register& rt, const MemOperand& src); - - // Store exclusive register pair. - void stxp(const Register& rs, - const Register& rt, - const Register& rt2, - const MemOperand& dst); - - // Load exclusive register pair. - void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); - - // Store-release exclusive byte. - void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); - - // Store-release exclusive half-word. - void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); - - // Store-release exclusive register. - void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); - - // Load-acquire exclusive byte. - void ldaxrb(const Register& rt, const MemOperand& src); - - // Load-acquire exclusive half-word. - void ldaxrh(const Register& rt, const MemOperand& src); - - // Load-acquire exclusive register. - void ldaxr(const Register& rt, const MemOperand& src); - - // Store-release exclusive register pair. - void stlxp(const Register& rs, - const Register& rt, - const Register& rt2, - const MemOperand& dst); - - // Load-acquire exclusive register pair. - void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); - - // Store-release byte. - void stlrb(const Register& rt, const MemOperand& dst); - - // Store-release half-word. - void stlrh(const Register& rt, const MemOperand& dst); - - // Store-release register. - void stlr(const Register& rt, const MemOperand& dst); - - // Load-acquire byte. - void ldarb(const Register& rt, const MemOperand& src); - - // Load-acquire half-word. - void ldarh(const Register& rt, const MemOperand& src); - - // Load-acquire register. - void ldar(const Register& rt, const MemOperand& src); - - // Prefetch memory. - void prfm(PrefetchOperation op, const MemOperand& addr, - LoadStoreScalingOption option = PreferScaledOffset); - - // Prefetch memory (with unscaled offset). - void prfum(PrefetchOperation op, const MemOperand& addr, - LoadStoreScalingOption option = PreferUnscaledOffset); - - // Prefetch memory in the literal pool. - void prfm(PrefetchOperation op, RawLiteral* literal); - - // Prefetch from pc + imm19 << 2. - void prfm(PrefetchOperation op, int imm19); - - // Move instructions. The default shift of -1 indicates that the move - // instruction will calculate an appropriate 16-bit immediate and left shift - // that is equal to the 64-bit immediate argument. If an explicit left shift - // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. - // - // For movk, an explicit shift can be used to indicate which half word should - // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant - // half word with zero, whereas movk(x0, 0, 48) will overwrite the - // most-significant. - - // Move immediate and keep. - void movk(const Register& rd, uint64_t imm, int shift = -1) { - MoveWide(rd, imm, shift, MOVK); - } - - // Move inverted immediate. - void movn(const Register& rd, uint64_t imm, int shift = -1) { - MoveWide(rd, imm, shift, MOVN); - } - - // Move immediate. - void movz(const Register& rd, uint64_t imm, int shift = -1) { - MoveWide(rd, imm, shift, MOVZ); - } - - // Misc instructions. - // Monitor debug-mode breakpoint. - void brk(int code); - - // Halting debug-mode breakpoint. - void hlt(int code); - - // Generate exception targeting EL1. - void svc(int code); - - // Move register to register. - void mov(const Register& rd, const Register& rn); - - // Move inverted operand to register. - void mvn(const Register& rd, const Operand& operand); - - // System instructions. - // Move to register from system register. - void mrs(const Register& rt, SystemRegister sysreg); - - // Move from register to system register. - void msr(SystemRegister sysreg, const Register& rt); - - // System instruction. - void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr); - - // System instruction with pre-encoded op (op1:crn:crm:op2). - void sys(int op, const Register& rt = xzr); - - // System data cache operation. - void dc(DataCacheOp op, const Register& rt); - - // System instruction cache operation. - void ic(InstructionCacheOp op, const Register& rt); - - // System hint. - void hint(SystemHint code); - - // Clear exclusive monitor. - void clrex(int imm4 = 0xf); - - // Data memory barrier. - void dmb(BarrierDomain domain, BarrierType type); - - // Data synchronization barrier. - void dsb(BarrierDomain domain, BarrierType type); - - // Instruction synchronization barrier. - void isb(); - - // Alias for system instructions. - // No-op. - void nop() { - hint(NOP); - } - - // FP and NEON instructions. - // Move double precision immediate to FP register. - void fmov(const VRegister& vd, double imm); - - // Move single precision immediate to FP register. - void fmov(const VRegister& vd, float imm); - - // Move FP register to register. - void fmov(const Register& rd, const VRegister& fn); - - // Move register to FP register. - void fmov(const VRegister& vd, const Register& rn); - - // Move FP register to FP register. - void fmov(const VRegister& vd, const VRegister& fn); - - // Move 64-bit register to top half of 128-bit FP register. - void fmov(const VRegister& vd, int index, const Register& rn); - - // Move top half of 128-bit FP register to 64-bit register. - void fmov(const Register& rd, const VRegister& vn, int index); - - // FP add. - void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); - - // FP subtract. - void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); - - // FP multiply. - void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); - - // FP fused multiply-add. - void fmadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - const VRegister& va); - - // FP fused multiply-subtract. - void fmsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - const VRegister& va); - - // FP fused multiply-add and negate. - void fnmadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - const VRegister& va); - - // FP fused multiply-subtract and negate. - void fnmsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - const VRegister& va); - - // FP multiply-negate scalar. - void fnmul(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP reciprocal exponent scalar. - void frecpx(const VRegister& vd, - const VRegister& vn); - - // FP divide. - void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm); - - // FP maximum. - void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm); - - // FP minimum. - void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm); - - // FP maximum number. - void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); - - // FP minimum number. - void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); - - // FP absolute. - void fabs(const VRegister& vd, const VRegister& vn); - - // FP negate. - void fneg(const VRegister& vd, const VRegister& vn); - - // FP square root. - void fsqrt(const VRegister& vd, const VRegister& vn); - - // FP round to integer, nearest with ties to away. - void frinta(const VRegister& vd, const VRegister& vn); - - // FP round to integer, implicit rounding. - void frinti(const VRegister& vd, const VRegister& vn); - - // FP round to integer, toward minus infinity. - void frintm(const VRegister& vd, const VRegister& vn); - - // FP round to integer, nearest with ties to even. - void frintn(const VRegister& vd, const VRegister& vn); - - // FP round to integer, toward plus infinity. - void frintp(const VRegister& vd, const VRegister& vn); - - // FP round to integer, exact, implicit rounding. - void frintx(const VRegister& vd, const VRegister& vn); - - // FP round to integer, towards zero. - void frintz(const VRegister& vd, const VRegister& vn); - - void FPCompareMacro(const VRegister& vn, - double value, - FPTrapFlags trap); - - void FPCompareMacro(const VRegister& vn, - const VRegister& vm, - FPTrapFlags trap); - - // FP compare registers. - void fcmp(const VRegister& vn, const VRegister& vm); - - // FP compare immediate. - void fcmp(const VRegister& vn, double value); - - void FPCCompareMacro(const VRegister& vn, - const VRegister& vm, - StatusFlags nzcv, - Condition cond, - FPTrapFlags trap); - - // FP conditional compare. - void fccmp(const VRegister& vn, - const VRegister& vm, - StatusFlags nzcv, - Condition cond); - - // FP signaling compare registers. - void fcmpe(const VRegister& vn, const VRegister& vm); - - // FP signaling compare immediate. - void fcmpe(const VRegister& vn, double value); - - // FP conditional signaling compare. - void fccmpe(const VRegister& vn, - const VRegister& vm, - StatusFlags nzcv, - Condition cond); - - // FP conditional select. - void fcsel(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - Condition cond); - - // Common FP Convert functions. - void NEONFPConvertToInt(const Register& rd, - const VRegister& vn, - Instr op); - void NEONFPConvertToInt(const VRegister& vd, - const VRegister& vn, - Instr op); - - // FP convert between precisions. - void fcvt(const VRegister& vd, const VRegister& vn); - - // FP convert to higher precision. - void fcvtl(const VRegister& vd, const VRegister& vn); - - // FP convert to higher precision (second part). - void fcvtl2(const VRegister& vd, const VRegister& vn); - - // FP convert to lower precision. - void fcvtn(const VRegister& vd, const VRegister& vn); - - // FP convert to lower prevision (second part). - void fcvtn2(const VRegister& vd, const VRegister& vn); - - // FP convert to lower precision, rounding to odd. - void fcvtxn(const VRegister& vd, const VRegister& vn); - - // FP convert to lower precision, rounding to odd (second part). - void fcvtxn2(const VRegister& vd, const VRegister& vn); - - // FP convert to signed integer, nearest with ties to away. - void fcvtas(const Register& rd, const VRegister& vn); - - // FP convert to unsigned integer, nearest with ties to away. - void fcvtau(const Register& rd, const VRegister& vn); - - // FP convert to signed integer, nearest with ties to away. - void fcvtas(const VRegister& vd, const VRegister& vn); - - // FP convert to unsigned integer, nearest with ties to away. - void fcvtau(const VRegister& vd, const VRegister& vn); - - // FP convert to signed integer, round towards -infinity. - void fcvtms(const Register& rd, const VRegister& vn); - - // FP convert to unsigned integer, round towards -infinity. - void fcvtmu(const Register& rd, const VRegister& vn); - - // FP convert to signed integer, round towards -infinity. - void fcvtms(const VRegister& vd, const VRegister& vn); - - // FP convert to unsigned integer, round towards -infinity. - void fcvtmu(const VRegister& vd, const VRegister& vn); - - // FP convert to signed integer, nearest with ties to even. - void fcvtns(const Register& rd, const VRegister& vn); - - // FP convert to unsigned integer, nearest with ties to even. - void fcvtnu(const Register& rd, const VRegister& vn); - - // FP convert to signed integer, nearest with ties to even. - void fcvtns(const VRegister& rd, const VRegister& vn); - - // FP convert to unsigned integer, nearest with ties to even. - void fcvtnu(const VRegister& rd, const VRegister& vn); - - // FP convert to signed integer or fixed-point, round towards zero. - void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0); - - // FP convert to unsigned integer or fixed-point, round towards zero. - void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0); - - // FP convert to signed integer or fixed-point, round towards zero. - void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0); - - // FP convert to unsigned integer or fixed-point, round towards zero. - void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0); - - // FP convert to signed integer, round towards +infinity. - void fcvtps(const Register& rd, const VRegister& vn); - - // FP convert to unsigned integer, round towards +infinity. - void fcvtpu(const Register& rd, const VRegister& vn); - - // FP convert to signed integer, round towards +infinity. - void fcvtps(const VRegister& vd, const VRegister& vn); - - // FP convert to unsigned integer, round towards +infinity. - void fcvtpu(const VRegister& vd, const VRegister& vn); - - // Convert signed integer or fixed point to FP. - void scvtf(const VRegister& fd, const Register& rn, int fbits = 0); - - // Convert unsigned integer or fixed point to FP. - void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0); - - // Convert signed integer or fixed-point to FP. - void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); - - // Convert unsigned integer or fixed-point to FP. - void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); - - // Unsigned absolute difference. - void uabd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference. - void sabd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned absolute difference and accumulate. - void uaba(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference and accumulate. - void saba(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add. - void add(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Subtract. - void sub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned halving add. - void uhadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed halving add. - void shadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned rounding halving add. - void urhadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed rounding halving add. - void srhadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned halving sub. - void uhsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed halving sub. - void shsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned saturating add. - void uqadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating add. - void sqadd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned saturating subtract. - void uqsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating subtract. - void sqsub(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add pairwise. - void addp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add pair of elements scalar. - void addp(const VRegister& vd, - const VRegister& vn); - - // Multiply-add to accumulator. - void mla(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Multiply-subtract to accumulator. - void mls(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Multiply. - void mul(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Multiply by scalar element. - void mul(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Multiply-add by scalar element. - void mla(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Multiply-subtract by scalar element. - void mls(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply-add by scalar element. - void smlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply-add by scalar element (second part). - void smlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply-add by scalar element. - void umlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply-add by scalar element (second part). - void umlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply-sub by scalar element. - void smlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply-sub by scalar element (second part). - void smlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply-sub by scalar element. - void umlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply-sub by scalar element (second part). - void umlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply by scalar element. - void smull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed long multiply by scalar element (second part). - void smull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply by scalar element. - void umull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply by scalar element (second part). - void umull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating double long multiply by element. - void sqdmull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating double long multiply by element (second part). - void sqdmull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating doubling long multiply-add by element. - void sqdmlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating doubling long multiply-add by element (second part). - void sqdmlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating doubling long multiply-sub by element. - void sqdmlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating doubling long multiply-sub by element (second part). - void sqdmlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Compare equal. - void cmeq(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare signed greater than or equal. - void cmge(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare signed greater than. - void cmgt(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare unsigned higher. - void cmhi(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare unsigned higher or same. - void cmhs(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare bitwise test bits nonzero. - void cmtst(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Compare bitwise to zero. - void cmeq(const VRegister& vd, - const VRegister& vn, - int value); - - // Compare signed greater than or equal to zero. - void cmge(const VRegister& vd, - const VRegister& vn, - int value); - - // Compare signed greater than zero. - void cmgt(const VRegister& vd, - const VRegister& vn, - int value); - - // Compare signed less than or equal to zero. - void cmle(const VRegister& vd, - const VRegister& vn, - int value); - - // Compare signed less than zero. - void cmlt(const VRegister& vd, - const VRegister& vn, - int value); - - // Signed shift left by register. - void sshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned shift left by register. - void ushl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating shift left by register. - void sqshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned saturating shift left by register. - void uqshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed rounding shift left by register. - void srshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned rounding shift left by register. - void urshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating rounding shift left by register. - void sqrshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned saturating rounding shift left by register. - void uqrshl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise and. - void and_(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise or. - void orr(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise or immediate. - void orr(const VRegister& vd, - const int imm8, - const int left_shift = 0); - - // Move register to register. - void mov(const VRegister& vd, - const VRegister& vn); - - // Bitwise orn. - void orn(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise eor. - void eor(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bit clear immediate. - void bic(const VRegister& vd, - const int imm8, - const int left_shift = 0); - - // Bit clear. - void bic(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise insert if false. - void bif(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise insert if true. - void bit(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Bitwise select. - void bsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Polynomial multiply. - void pmul(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Vector move immediate. - void movi(const VRegister& vd, - const uint64_t imm, - Shift shift = LSL, - const int shift_amount = 0); - - // Bitwise not. - void mvn(const VRegister& vd, - const VRegister& vn); - - // Vector move inverted immediate. - void mvni(const VRegister& vd, - const int imm8, - Shift shift = LSL, - const int shift_amount = 0); - - // Signed saturating accumulate of unsigned value. - void suqadd(const VRegister& vd, - const VRegister& vn); - - // Unsigned saturating accumulate of signed value. - void usqadd(const VRegister& vd, - const VRegister& vn); - - // Absolute value. - void abs(const VRegister& vd, - const VRegister& vn); - - // Signed saturating absolute value. - void sqabs(const VRegister& vd, - const VRegister& vn); - - // Negate. - void neg(const VRegister& vd, - const VRegister& vn); - - // Signed saturating negate. - void sqneg(const VRegister& vd, - const VRegister& vn); - - // Bitwise not. - void not_(const VRegister& vd, - const VRegister& vn); - - // Extract narrow. - void xtn(const VRegister& vd, - const VRegister& vn); - - // Extract narrow (second part). - void xtn2(const VRegister& vd, - const VRegister& vn); - - // Signed saturating extract narrow. - void sqxtn(const VRegister& vd, - const VRegister& vn); - - // Signed saturating extract narrow (second part). - void sqxtn2(const VRegister& vd, - const VRegister& vn); - - // Unsigned saturating extract narrow. - void uqxtn(const VRegister& vd, - const VRegister& vn); - - // Unsigned saturating extract narrow (second part). - void uqxtn2(const VRegister& vd, - const VRegister& vn); - - // Signed saturating extract unsigned narrow. - void sqxtun(const VRegister& vd, - const VRegister& vn); - - // Signed saturating extract unsigned narrow (second part). - void sqxtun2(const VRegister& vd, - const VRegister& vn); - - // Extract vector from pair of vectors. - void ext(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int index); - - // Duplicate vector element to vector or scalar. - void dup(const VRegister& vd, - const VRegister& vn, - int vn_index); - - // Move vector element to scalar. - void mov(const VRegister& vd, - const VRegister& vn, - int vn_index); - - // Duplicate general-purpose register to vector. - void dup(const VRegister& vd, - const Register& rn); - - // Insert vector element from another vector element. - void ins(const VRegister& vd, - int vd_index, - const VRegister& vn, - int vn_index); - - // Move vector element to another vector element. - void mov(const VRegister& vd, - int vd_index, - const VRegister& vn, - int vn_index); - - // Insert vector element from general-purpose register. - void ins(const VRegister& vd, - int vd_index, - const Register& rn); - - // Move general-purpose register to a vector element. - void mov(const VRegister& vd, - int vd_index, - const Register& rn); - - // Unsigned move vector element to general-purpose register. - void umov(const Register& rd, - const VRegister& vn, - int vn_index); - - // Move vector element to general-purpose register. - void mov(const Register& rd, - const VRegister& vn, - int vn_index); - - // Signed move vector element to general-purpose register. - void smov(const Register& rd, - const VRegister& vn, - int vn_index); - - // One-element structure load to one register. - void ld1(const VRegister& vt, - const MemOperand& src); - - // One-element structure load to two registers. - void ld1(const VRegister& vt, - const VRegister& vt2, - const MemOperand& src); - - // One-element structure load to three registers. - void ld1(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const MemOperand& src); - - // One-element structure load to four registers. - void ld1(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - const MemOperand& src); - - // One-element single structure load to one lane. - void ld1(const VRegister& vt, - int lane, - const MemOperand& src); - - // One-element single structure load to all lanes. - void ld1r(const VRegister& vt, - const MemOperand& src); - - // Two-element structure load. - void ld2(const VRegister& vt, - const VRegister& vt2, - const MemOperand& src); - - // Two-element single structure load to one lane. - void ld2(const VRegister& vt, - const VRegister& vt2, - int lane, - const MemOperand& src); - - // Two-element single structure load to all lanes. - void ld2r(const VRegister& vt, - const VRegister& vt2, - const MemOperand& src); - - // Three-element structure load. - void ld3(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const MemOperand& src); - - // Three-element single structure load to one lane. - void ld3(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - int lane, - const MemOperand& src); - - // Three-element single structure load to all lanes. - void ld3r(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const MemOperand& src); - - // Four-element structure load. - void ld4(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - const MemOperand& src); - - // Four-element single structure load to one lane. - void ld4(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - int lane, - const MemOperand& src); - - // Four-element single structure load to all lanes. - void ld4r(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - const MemOperand& src); - - // Count leading sign bits. - void cls(const VRegister& vd, - const VRegister& vn); - - // Count leading zero bits (vector). - void clz(const VRegister& vd, - const VRegister& vn); - - // Population count per byte. - void cnt(const VRegister& vd, - const VRegister& vn); - - // Reverse bit order. - void rbit(const VRegister& vd, - const VRegister& vn); - - // Reverse elements in 16-bit halfwords. - void rev16(const VRegister& vd, - const VRegister& vn); - - // Reverse elements in 32-bit words. - void rev32(const VRegister& vd, - const VRegister& vn); - - // Reverse elements in 64-bit doublewords. - void rev64(const VRegister& vd, - const VRegister& vn); - - // Unsigned reciprocal square root estimate. - void ursqrte(const VRegister& vd, - const VRegister& vn); - - // Unsigned reciprocal estimate. - void urecpe(const VRegister& vd, - const VRegister& vn); - - // Signed pairwise long add. - void saddlp(const VRegister& vd, - const VRegister& vn); - - // Unsigned pairwise long add. - void uaddlp(const VRegister& vd, - const VRegister& vn); - - // Signed pairwise long add and accumulate. - void sadalp(const VRegister& vd, - const VRegister& vn); - - // Unsigned pairwise long add and accumulate. - void uadalp(const VRegister& vd, - const VRegister& vn); - - // Shift left by immediate. - void shl(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift left by immediate. - void sqshl(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift left unsigned by immediate. - void sqshlu(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned saturating shift left by immediate. - void uqshl(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed shift left long by immediate. - void sshll(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed shift left long by immediate (second part). - void sshll2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed extend long. - void sxtl(const VRegister& vd, - const VRegister& vn); - - // Signed extend long (second part). - void sxtl2(const VRegister& vd, - const VRegister& vn); - - // Unsigned shift left long by immediate. - void ushll(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned shift left long by immediate (second part). - void ushll2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Shift left long by element size. - void shll(const VRegister& vd, - const VRegister& vn, - int shift); - - // Shift left long by element size (second part). - void shll2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned extend long. - void uxtl(const VRegister& vd, - const VRegister& vn); - - // Unsigned extend long (second part). - void uxtl2(const VRegister& vd, - const VRegister& vn); - - // Shift left by immediate and insert. - void sli(const VRegister& vd, - const VRegister& vn, - int shift); - - // Shift right by immediate and insert. - void sri(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed maximum. - void smax(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed pairwise maximum. - void smaxp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add across vector. - void addv(const VRegister& vd, - const VRegister& vn); - - // Signed add long across vector. - void saddlv(const VRegister& vd, - const VRegister& vn); - - // Unsigned add long across vector. - void uaddlv(const VRegister& vd, - const VRegister& vn); - - // FP maximum number across vector. - void fmaxnmv(const VRegister& vd, - const VRegister& vn); - - // FP maximum across vector. - void fmaxv(const VRegister& vd, - const VRegister& vn); - - // FP minimum number across vector. - void fminnmv(const VRegister& vd, - const VRegister& vn); - - // FP minimum across vector. - void fminv(const VRegister& vd, - const VRegister& vn); - - // Signed maximum across vector. - void smaxv(const VRegister& vd, - const VRegister& vn); - - // Signed minimum. - void smin(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed minimum pairwise. - void sminp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed minimum across vector. - void sminv(const VRegister& vd, - const VRegister& vn); - - // One-element structure store from one register. - void st1(const VRegister& vt, - const MemOperand& src); - - // One-element structure store from two registers. - void st1(const VRegister& vt, - const VRegister& vt2, - const MemOperand& src); - - // One-element structure store from three registers. - void st1(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const MemOperand& src); - - // One-element structure store from four registers. - void st1(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - const MemOperand& src); - - // One-element single structure store from one lane. - void st1(const VRegister& vt, - int lane, - const MemOperand& src); - - // Two-element structure store from two registers. - void st2(const VRegister& vt, - const VRegister& vt2, - const MemOperand& src); - - // Two-element single structure store from two lanes. - void st2(const VRegister& vt, - const VRegister& vt2, - int lane, - const MemOperand& src); - - // Three-element structure store from three registers. - void st3(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const MemOperand& src); - - // Three-element single structure store from three lanes. - void st3(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - int lane, - const MemOperand& src); - - // Four-element structure store from four registers. - void st4(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - const MemOperand& src); - - // Four-element single structure store from four lanes. - void st4(const VRegister& vt, - const VRegister& vt2, - const VRegister& vt3, - const VRegister& vt4, - int lane, - const MemOperand& src); - - // Unsigned add long. - void uaddl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned add long (second part). - void uaddl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned add wide. - void uaddw(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned add wide (second part). - void uaddw2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed add long. - void saddl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed add long (second part). - void saddl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed add wide. - void saddw(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed add wide (second part). - void saddw2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned subtract long. - void usubl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned subtract long (second part). - void usubl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned subtract wide. - void usubw(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned subtract wide (second part). - void usubw2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed subtract long. - void ssubl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed subtract long (second part). - void ssubl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed integer subtract wide. - void ssubw(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed integer subtract wide (second part). - void ssubw2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned maximum. - void umax(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned pairwise maximum. - void umaxp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned maximum across vector. - void umaxv(const VRegister& vd, - const VRegister& vn); - - // Unsigned minimum. - void umin(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned pairwise minimum. - void uminp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned minimum across vector. - void uminv(const VRegister& vd, - const VRegister& vn); - - // Transpose vectors (primary). - void trn1(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Transpose vectors (secondary). - void trn2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unzip vectors (primary). - void uzp1(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unzip vectors (secondary). - void uzp2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Zip vectors (primary). - void zip1(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Zip vectors (secondary). - void zip2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed shift right by immediate. - void sshr(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned shift right by immediate. - void ushr(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed rounding shift right by immediate. - void srshr(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned rounding shift right by immediate. - void urshr(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed shift right by immediate and accumulate. - void ssra(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned shift right by immediate and accumulate. - void usra(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed rounding shift right by immediate and accumulate. - void srsra(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned rounding shift right by immediate and accumulate. - void ursra(const VRegister& vd, - const VRegister& vn, - int shift); - - // Shift right narrow by immediate. - void shrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Shift right narrow by immediate (second part). - void shrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Rounding shift right narrow by immediate. - void rshrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Rounding shift right narrow by immediate (second part). - void rshrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned saturating shift right narrow by immediate. - void uqshrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned saturating shift right narrow by immediate (second part). - void uqshrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned saturating rounding shift right narrow by immediate. - void uqrshrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Unsigned saturating rounding shift right narrow by immediate (second part). - void uqrshrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift right narrow by immediate. - void sqshrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift right narrow by immediate (second part). - void sqshrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating rounded shift right narrow by immediate. - void sqrshrn(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating rounded shift right narrow by immediate (second part). - void sqrshrn2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift right unsigned narrow by immediate. - void sqshrun(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed saturating shift right unsigned narrow by immediate (second part). - void sqshrun2(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed sat rounded shift right unsigned narrow by immediate. - void sqrshrun(const VRegister& vd, - const VRegister& vn, - int shift); - - // Signed sat rounded shift right unsigned narrow by immediate (second part). - void sqrshrun2(const VRegister& vd, - const VRegister& vn, - int shift); - - // FP reciprocal step. - void frecps(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP reciprocal estimate. - void frecpe(const VRegister& vd, - const VRegister& vn); - - // FP reciprocal square root estimate. - void frsqrte(const VRegister& vd, - const VRegister& vn); - - // FP reciprocal square root step. - void frsqrts(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference and accumulate long. - void sabal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference and accumulate long (second part). - void sabal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned absolute difference and accumulate long. - void uabal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned absolute difference and accumulate long (second part). - void uabal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference long. - void sabdl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed absolute difference long (second part). - void sabdl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned absolute difference long. - void uabdl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned absolute difference long (second part). - void uabdl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Polynomial multiply long. - void pmull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Polynomial multiply long (second part). - void pmull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply-add. - void smlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply-add (second part). - void smlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned long multiply-add. - void umlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned long multiply-add (second part). - void umlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply-sub. - void smlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply-sub (second part). - void smlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned long multiply-sub. - void umlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned long multiply-sub (second part). - void umlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply. - void smull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed long multiply (second part). - void smull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply-add. - void sqdmlal(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply-add (second part). - void sqdmlal2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply-subtract. - void sqdmlsl(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply-subtract (second part). - void sqdmlsl2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply. - void sqdmull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling long multiply (second part). - void sqdmull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling multiply returning high half. - void sqdmulh(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating rounding doubling multiply returning high half. - void sqrdmulh(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Signed saturating doubling multiply element returning high half. - void sqdmulh(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Signed saturating rounding doubling multiply element returning high half. - void sqrdmulh(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // Unsigned long multiply long. - void umull(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Unsigned long multiply (second part). - void umull2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add narrow returning high half. - void addhn(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Add narrow returning high half (second part). - void addhn2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Rounding add narrow returning high half. - void raddhn(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Rounding add narrow returning high half (second part). - void raddhn2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Subtract narrow returning high half. - void subhn(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Subtract narrow returning high half (second part). - void subhn2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Rounding subtract narrow returning high half. - void rsubhn(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // Rounding subtract narrow returning high half (second part). - void rsubhn2(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP vector multiply accumulate. - void fmla(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP vector multiply subtract. - void fmls(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP vector multiply extended. - void fmulx(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP absolute greater than or equal. - void facge(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP absolute greater than. - void facgt(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP multiply by element. - void fmul(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // FP fused multiply-add to accumulator by element. - void fmla(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // FP fused multiply-sub from accumulator by element. - void fmls(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // FP multiply extended by element. - void fmulx(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index); - - // FP compare equal. - void fcmeq(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP greater than. - void fcmgt(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP greater than or equal. - void fcmge(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP compare equal to zero. - void fcmeq(const VRegister& vd, - const VRegister& vn, - double imm); - - // FP greater than zero. - void fcmgt(const VRegister& vd, - const VRegister& vn, - double imm); - - // FP greater than or equal to zero. - void fcmge(const VRegister& vd, - const VRegister& vn, - double imm); - - // FP less than or equal to zero. - void fcmle(const VRegister& vd, - const VRegister& vn, - double imm); - - // FP less than to zero. - void fcmlt(const VRegister& vd, - const VRegister& vn, - double imm); - - // FP absolute difference. - void fabd(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise add vector. - void faddp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise add scalar. - void faddp(const VRegister& vd, - const VRegister& vn); - - // FP pairwise maximum vector. - void fmaxp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise maximum scalar. - void fmaxp(const VRegister& vd, - const VRegister& vn); - - // FP pairwise minimum vector. - void fminp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise minimum scalar. - void fminp(const VRegister& vd, - const VRegister& vn); - - // FP pairwise maximum number vector. - void fmaxnmp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise maximum number scalar. - void fmaxnmp(const VRegister& vd, - const VRegister& vn); - - // FP pairwise minimum number vector. - void fminnmp(const VRegister& vd, - const VRegister& vn, - const VRegister& vm); - - // FP pairwise minimum number scalar. - void fminnmp(const VRegister& vd, - const VRegister& vn); - - // Emit generic instructions. - // Emit raw instructions into the instruction stream. - void dci(Instr raw_inst) { Emit(raw_inst); } - - // Emit 32 bits of data into the instruction stream. - void dc32(uint32_t data) { - VIXL_ASSERT(buffer_monitor_ > 0); - buffer_->Emit32(data); - } - - // Emit 64 bits of data into the instruction stream. - void dc64(uint64_t data) { - VIXL_ASSERT(buffer_monitor_ > 0); - buffer_->Emit64(data); - } - - // Copy a string into the instruction stream, including the terminating NULL - // character. The instruction pointer is then aligned correctly for - // subsequent instructions. - void EmitString(const char * string) { - VIXL_ASSERT(string != NULL); - VIXL_ASSERT(buffer_monitor_ > 0); - - buffer_->EmitString(string); - buffer_->Align(); - } - - // Code generation helpers. - - // Register encoding. - static Instr Rd(CPURegister rd) { - VIXL_ASSERT(rd.code() != kSPRegInternalCode); - return rd.code() << Rd_offset; - } - - static Instr Rn(CPURegister rn) { - VIXL_ASSERT(rn.code() != kSPRegInternalCode); - return rn.code() << Rn_offset; - } - - static Instr Rm(CPURegister rm) { - VIXL_ASSERT(rm.code() != kSPRegInternalCode); - return rm.code() << Rm_offset; - } - - static Instr RmNot31(CPURegister rm) { - VIXL_ASSERT(rm.code() != kSPRegInternalCode); - VIXL_ASSERT(!rm.IsZero()); - return Rm(rm); - } - - static Instr Ra(CPURegister ra) { - VIXL_ASSERT(ra.code() != kSPRegInternalCode); - return ra.code() << Ra_offset; - } - - static Instr Rt(CPURegister rt) { - VIXL_ASSERT(rt.code() != kSPRegInternalCode); - return rt.code() << Rt_offset; - } - - static Instr Rt2(CPURegister rt2) { - VIXL_ASSERT(rt2.code() != kSPRegInternalCode); - return rt2.code() << Rt2_offset; - } - - static Instr Rs(CPURegister rs) { - VIXL_ASSERT(rs.code() != kSPRegInternalCode); - return rs.code() << Rs_offset; - } - - // These encoding functions allow the stack pointer to be encoded, and - // disallow the zero register. - static Instr RdSP(Register rd) { - VIXL_ASSERT(!rd.IsZero()); - return (rd.code() & kRegCodeMask) << Rd_offset; - } - - static Instr RnSP(Register rn) { - VIXL_ASSERT(!rn.IsZero()); - return (rn.code() & kRegCodeMask) << Rn_offset; - } - - // Flags encoding. - static Instr Flags(FlagsUpdate S) { - if (S == SetFlags) { - return 1 << FlagsUpdate_offset; - } else if (S == LeaveFlags) { - return 0 << FlagsUpdate_offset; - } - VIXL_UNREACHABLE(); - return 0; - } - - static Instr Cond(Condition cond) { - return cond << Condition_offset; - } - - // PC-relative address encoding. - static Instr ImmPCRelAddress(int imm21) { - VIXL_ASSERT(is_int21(imm21)); - Instr imm = static_cast(truncate_to_int21(imm21)); - Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; - Instr immlo = imm << ImmPCRelLo_offset; - return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); - } - - // Branch encoding. - static Instr ImmUncondBranch(int imm26) { - VIXL_ASSERT(is_int26(imm26)); - return truncate_to_int26(imm26) << ImmUncondBranch_offset; - } - - static Instr ImmCondBranch(int imm19) { - VIXL_ASSERT(is_int19(imm19)); - return truncate_to_int19(imm19) << ImmCondBranch_offset; - } - - static Instr ImmCmpBranch(int imm19) { - VIXL_ASSERT(is_int19(imm19)); - return truncate_to_int19(imm19) << ImmCmpBranch_offset; - } - - static Instr ImmTestBranch(int imm14) { - VIXL_ASSERT(is_int14(imm14)); - return truncate_to_int14(imm14) << ImmTestBranch_offset; - } - - static Instr ImmTestBranchBit(unsigned bit_pos) { - VIXL_ASSERT(is_uint6(bit_pos)); - // Subtract five from the shift offset, as we need bit 5 from bit_pos. - unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); - unsigned b40 = bit_pos << ImmTestBranchBit40_offset; - b5 &= ImmTestBranchBit5_mask; - b40 &= ImmTestBranchBit40_mask; - return b5 | b40; - } - - // Data Processing encoding. - static Instr SF(Register rd) { - return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; - } - - static Instr ImmAddSub(int imm) { - VIXL_ASSERT(IsImmAddSub(imm)); - if (is_uint12(imm)) { // No shift required. - imm <<= ImmAddSub_offset; - } else { - imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); - } - return imm; - } - - static Instr ImmS(unsigned imms, unsigned reg_size) { - VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) || - ((reg_size == kWRegSize) && is_uint5(imms))); - USE(reg_size); - return imms << ImmS_offset; - } - - static Instr ImmR(unsigned immr, unsigned reg_size) { - VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || - ((reg_size == kWRegSize) && is_uint5(immr))); - USE(reg_size); - VIXL_ASSERT(is_uint6(immr)); - return immr << ImmR_offset; - } - - static Instr ImmSetBits(unsigned imms, unsigned reg_size) { - VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); - VIXL_ASSERT(is_uint6(imms)); - VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3)); - USE(reg_size); - return imms << ImmSetBits_offset; - } - - static Instr ImmRotate(unsigned immr, unsigned reg_size) { - VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); - VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || - ((reg_size == kWRegSize) && is_uint5(immr))); - USE(reg_size); - return immr << ImmRotate_offset; - } - - static Instr ImmLLiteral(int imm19) { - VIXL_ASSERT(is_int19(imm19)); - return truncate_to_int19(imm19) << ImmLLiteral_offset; - } - - static Instr BitN(unsigned bitn, unsigned reg_size) { - VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); - VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); - USE(reg_size); - return bitn << BitN_offset; - } - - static Instr ShiftDP(Shift shift) { - VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); - return shift << ShiftDP_offset; - } - - static Instr ImmDPShift(unsigned amount) { - VIXL_ASSERT(is_uint6(amount)); - return amount << ImmDPShift_offset; - } - - static Instr ExtendMode(Extend extend) { - return extend << ExtendMode_offset; - } - - static Instr ImmExtendShift(unsigned left_shift) { - VIXL_ASSERT(left_shift <= 4); - return left_shift << ImmExtendShift_offset; - } - - static Instr ImmCondCmp(unsigned imm) { - VIXL_ASSERT(is_uint5(imm)); - return imm << ImmCondCmp_offset; - } - - static Instr Nzcv(StatusFlags nzcv) { - return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; - } - - // MemOperand offset encoding. - static Instr ImmLSUnsigned(int imm12) { - VIXL_ASSERT(is_uint12(imm12)); - return imm12 << ImmLSUnsigned_offset; - } - - static Instr ImmLS(int imm9) { - VIXL_ASSERT(is_int9(imm9)); - return truncate_to_int9(imm9) << ImmLS_offset; - } - - static Instr ImmLSPair(int imm7, unsigned access_size) { - VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7); - int scaled_imm7 = imm7 >> access_size; - VIXL_ASSERT(is_int7(scaled_imm7)); - return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; - } - - static Instr ImmShiftLS(unsigned shift_amount) { - VIXL_ASSERT(is_uint1(shift_amount)); - return shift_amount << ImmShiftLS_offset; - } - - static Instr ImmPrefetchOperation(int imm5) { - VIXL_ASSERT(is_uint5(imm5)); - return imm5 << ImmPrefetchOperation_offset; - } - - static Instr ImmException(int imm16) { - VIXL_ASSERT(is_uint16(imm16)); - return imm16 << ImmException_offset; - } - - static Instr ImmSystemRegister(int imm15) { - VIXL_ASSERT(is_uint15(imm15)); - return imm15 << ImmSystemRegister_offset; - } - - static Instr ImmHint(int imm7) { - VIXL_ASSERT(is_uint7(imm7)); - return imm7 << ImmHint_offset; - } - - static Instr CRm(int imm4) { - VIXL_ASSERT(is_uint4(imm4)); - return imm4 << CRm_offset; - } - - static Instr CRn(int imm4) { - VIXL_ASSERT(is_uint4(imm4)); - return imm4 << CRn_offset; - } - - static Instr SysOp(int imm14) { - VIXL_ASSERT(is_uint14(imm14)); - return imm14 << SysOp_offset; - } - - static Instr ImmSysOp1(int imm3) { - VIXL_ASSERT(is_uint3(imm3)); - return imm3 << SysOp1_offset; - } - - static Instr ImmSysOp2(int imm3) { - VIXL_ASSERT(is_uint3(imm3)); - return imm3 << SysOp2_offset; - } - - static Instr ImmBarrierDomain(int imm2) { - VIXL_ASSERT(is_uint2(imm2)); - return imm2 << ImmBarrierDomain_offset; - } - - static Instr ImmBarrierType(int imm2) { - VIXL_ASSERT(is_uint2(imm2)); - return imm2 << ImmBarrierType_offset; - } - - // Move immediates encoding. - static Instr ImmMoveWide(uint64_t imm) { - VIXL_ASSERT(is_uint16(imm)); - return static_cast(imm << ImmMoveWide_offset); - } - - static Instr ShiftMoveWide(int64_t shift) { - VIXL_ASSERT(is_uint2(shift)); - return static_cast(shift << ShiftMoveWide_offset); - } - - // FP Immediates. - static Instr ImmFP32(float imm); - static Instr ImmFP64(double imm); - - // FP register type. - static Instr FPType(FPRegister fd) { - return fd.Is64Bits() ? FP64 : FP32; - } - - static Instr FPScale(unsigned scale) { - VIXL_ASSERT(is_uint6(scale)); - return scale << FPScale_offset; - } - - // Immediate field checking helpers. - static bool IsImmAddSub(int64_t immediate); - static bool IsImmConditionalCompare(int64_t immediate); - static bool IsImmFP32(float imm); - static bool IsImmFP64(double imm); - static bool IsImmLogical(uint64_t value, - unsigned width, - unsigned* n = NULL, - unsigned* imm_s = NULL, - unsigned* imm_r = NULL); - static bool IsImmLSPair(int64_t offset, unsigned access_size); - static bool IsImmLSScaled(int64_t offset, unsigned access_size); - static bool IsImmLSUnscaled(int64_t offset); - static bool IsImmMovn(uint64_t imm, unsigned reg_size); - static bool IsImmMovz(uint64_t imm, unsigned reg_size); - - // Instruction bits for vector format in data processing operations. - static Instr VFormat(VRegister vd) { - if (vd.Is64Bits()) { - switch (vd.lanes()) { - case 2: return NEON_2S; - case 4: return NEON_4H; - case 8: return NEON_8B; - default: return 0xffffffff; - } - } else { - VIXL_ASSERT(vd.Is128Bits()); - switch (vd.lanes()) { - case 2: return NEON_2D; - case 4: return NEON_4S; - case 8: return NEON_8H; - case 16: return NEON_16B; - default: return 0xffffffff; - } - } - } - - // Instruction bits for vector format in floating point data processing - // operations. - static Instr FPFormat(VRegister vd) { - if (vd.lanes() == 1) { - // Floating point scalar formats. - VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits()); - return vd.Is64Bits() ? FP64 : FP32; - } - - // Two lane floating point vector formats. - if (vd.lanes() == 2) { - VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits()); - return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S; - } - - // Four lane floating point vector format. - VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits()); - return NEON_FP_4S; - } - - // Instruction bits for vector format in load and store operations. - static Instr LSVFormat(VRegister vd) { - if (vd.Is64Bits()) { - switch (vd.lanes()) { - case 1: return LS_NEON_1D; - case 2: return LS_NEON_2S; - case 4: return LS_NEON_4H; - case 8: return LS_NEON_8B; - default: return 0xffffffff; - } - } else { - VIXL_ASSERT(vd.Is128Bits()); - switch (vd.lanes()) { - case 2: return LS_NEON_2D; - case 4: return LS_NEON_4S; - case 8: return LS_NEON_8H; - case 16: return LS_NEON_16B; - default: return 0xffffffff; - } - } - } - - // Instruction bits for scalar format in data processing operations. - static Instr SFormat(VRegister vd) { - VIXL_ASSERT(vd.lanes() == 1); - switch (vd.SizeInBytes()) { - case 1: return NEON_B; - case 2: return NEON_H; - case 4: return NEON_S; - case 8: return NEON_D; - default: return 0xffffffff; - } - } - - static Instr ImmNEONHLM(int index, int num_bits) { - int h, l, m; - if (num_bits == 3) { - VIXL_ASSERT(is_uint3(index)); - h = (index >> 2) & 1; - l = (index >> 1) & 1; - m = (index >> 0) & 1; - } else if (num_bits == 2) { - VIXL_ASSERT(is_uint2(index)); - h = (index >> 1) & 1; - l = (index >> 0) & 1; - m = 0; - } else { - VIXL_ASSERT(is_uint1(index) && (num_bits == 1)); - h = (index >> 0) & 1; - l = 0; - m = 0; - } - return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset); - } - - static Instr ImmNEONExt(int imm4) { - VIXL_ASSERT(is_uint4(imm4)); - return imm4 << ImmNEONExt_offset; - } - - static Instr ImmNEON5(Instr format, int index) { - VIXL_ASSERT(is_uint4(index)); - int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); - int imm5 = (index << (s + 1)) | (1 << s); - return imm5 << ImmNEON5_offset; - } - - static Instr ImmNEON4(Instr format, int index) { - VIXL_ASSERT(is_uint4(index)); - int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); - int imm4 = index << s; - return imm4 << ImmNEON4_offset; - } - - static Instr ImmNEONabcdefgh(int imm8) { - VIXL_ASSERT(is_uint8(imm8)); - Instr instr; - instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset; - instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset; - return instr; - } - - static Instr NEONCmode(int cmode) { - VIXL_ASSERT(is_uint4(cmode)); - return cmode << NEONCmode_offset; - } - - static Instr NEONModImmOp(int op) { - VIXL_ASSERT(is_uint1(op)); - return op << NEONModImmOp_offset; - } - - // Size of the code generated since label to the current position. - size_t SizeOfCodeGeneratedSince(Label* label) const { - VIXL_ASSERT(label->IsBound()); - return buffer_->OffsetFrom(label->location()); - } - - size_t SizeOfCodeGenerated() const { - return buffer_->CursorOffset(); - } - - size_t BufferCapacity() const { return buffer_->capacity(); } - - size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); } - - void EnsureSpaceFor(size_t amount) { - if (buffer_->RemainingBytes() < amount) { - size_t capacity = buffer_->capacity(); - size_t size = buffer_->CursorOffset(); - do { - // TODO(all): refine. - capacity *= 2; - } while ((capacity - size) < amount); - buffer_->Grow(capacity); - } - } - -#ifdef VIXL_DEBUG - void AcquireBuffer() { - VIXL_ASSERT(buffer_monitor_ >= 0); - buffer_monitor_++; - } - - void ReleaseBuffer() { - buffer_monitor_--; - VIXL_ASSERT(buffer_monitor_ >= 0); - } -#endif - - PositionIndependentCodeOption pic() const { - return pic_; - } - - bool AllowPageOffsetDependentCode() const { - return (pic() == PageOffsetDependentCode) || - (pic() == PositionDependentCode); - } - - static const Register& AppropriateZeroRegFor(const CPURegister& reg) { - return reg.Is64Bits() ? xzr : wzr; - } - - - protected: - void LoadStore(const CPURegister& rt, - const MemOperand& addr, - LoadStoreOp op, - LoadStoreScalingOption option = PreferScaledOffset); - - void LoadStorePair(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& addr, - LoadStorePairOp op); - void LoadStoreStruct(const VRegister& vt, - const MemOperand& addr, - NEONLoadStoreMultiStructOp op); - void LoadStoreStruct1(const VRegister& vt, - int reg_count, - const MemOperand& addr); - void LoadStoreStructSingle(const VRegister& vt, - uint32_t lane, - const MemOperand& addr, - NEONLoadStoreSingleStructOp op); - void LoadStoreStructSingleAllLanes(const VRegister& vt, - const MemOperand& addr, - NEONLoadStoreSingleStructOp op); - void LoadStoreStructVerify(const VRegister& vt, - const MemOperand& addr, - Instr op); - - void Prefetch(PrefetchOperation op, - const MemOperand& addr, - LoadStoreScalingOption option = PreferScaledOffset); - - // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 - // reports a bogus uninitialised warning then. - void Logical(const Register& rd, - const Register& rn, - const Operand operand, - LogicalOp op); - void LogicalImmediate(const Register& rd, - const Register& rn, - unsigned n, - unsigned imm_s, - unsigned imm_r, - LogicalOp op); - - void ConditionalCompare(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond, - ConditionalCompareOp op); - - void AddSubWithCarry(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - AddSubWithCarryOp op); - - - // Functions for emulating operands not directly supported by the instruction - // set. - void EmitShift(const Register& rd, - const Register& rn, - Shift shift, - unsigned amount); - void EmitExtendShift(const Register& rd, - const Register& rn, - Extend extend, - unsigned left_shift); - - void AddSub(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - AddSubOp op); - - void NEONTable(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEONTableOp op); - - // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified - // registers. Only simple loads are supported; sign- and zero-extension (such - // as in LDPSW_x or LDRB_w) are not supported. - static LoadStoreOp LoadOpFor(const CPURegister& rt); - static LoadStorePairOp LoadPairOpFor(const CPURegister& rt, - const CPURegister& rt2); - static LoadStoreOp StoreOpFor(const CPURegister& rt); - static LoadStorePairOp StorePairOpFor(const CPURegister& rt, - const CPURegister& rt2); - static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor( - const CPURegister& rt, const CPURegister& rt2); - static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( - const CPURegister& rt, const CPURegister& rt2); - static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); - - - private: - static uint32_t FP32ToImm8(float imm); - static uint32_t FP64ToImm8(double imm); - - // Instruction helpers. - void MoveWide(const Register& rd, - uint64_t imm, - int shift, - MoveWideImmediateOp mov_op); - void DataProcShiftedRegister(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - Instr op); - void DataProcExtendedRegister(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - Instr op); - void LoadStorePairNonTemporal(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& addr, - LoadStorePairNonTemporalOp op); - void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op); - void ConditionalSelect(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond, - ConditionalSelectOp op); - void DataProcessing1Source(const Register& rd, - const Register& rn, - DataProcessing1SourceOp op); - void DataProcessing3Source(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra, - DataProcessing3SourceOp op); - void FPDataProcessing1Source(const VRegister& fd, - const VRegister& fn, - FPDataProcessing1SourceOp op); - void FPDataProcessing3Source(const VRegister& fd, - const VRegister& fn, - const VRegister& fm, - const VRegister& fa, - FPDataProcessing3SourceOp op); - void NEONAcrossLanesL(const VRegister& vd, - const VRegister& vn, - NEONAcrossLanesOp op); - void NEONAcrossLanes(const VRegister& vd, - const VRegister& vn, - NEONAcrossLanesOp op); - void NEONModifiedImmShiftLsl(const VRegister& vd, - const int imm8, - const int left_shift, - NEONModifiedImmediateOp op); - void NEONModifiedImmShiftMsl(const VRegister& vd, - const int imm8, - const int shift_amount, - NEONModifiedImmediateOp op); - void NEONFP2Same(const VRegister& vd, - const VRegister& vn, - Instr vop); - void NEON3Same(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEON3SameOp vop); - void NEONFP3Same(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - Instr op); - void NEON3DifferentL(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEON3DifferentOp vop); - void NEON3DifferentW(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEON3DifferentOp vop); - void NEON3DifferentHN(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEON3DifferentOp vop); - void NEONFP2RegMisc(const VRegister& vd, - const VRegister& vn, - NEON2RegMiscOp vop, - double value = 0.0); - void NEON2RegMisc(const VRegister& vd, - const VRegister& vn, - NEON2RegMiscOp vop, - int value = 0); - void NEONFP2RegMisc(const VRegister& vd, - const VRegister& vn, - Instr op); - void NEONAddlp(const VRegister& vd, - const VRegister& vn, - NEON2RegMiscOp op); - void NEONPerm(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - NEONPermOp op); - void NEONFPByElement(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index, - NEONByIndexedElementOp op); - void NEONByElement(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index, - NEONByIndexedElementOp op); - void NEONByElementL(const VRegister& vd, - const VRegister& vn, - const VRegister& vm, - int vm_index, - NEONByIndexedElementOp op); - void NEONShiftImmediate(const VRegister& vd, - const VRegister& vn, - NEONShiftImmediateOp op, - int immh_immb); - void NEONShiftLeftImmediate(const VRegister& vd, - const VRegister& vn, - int shift, - NEONShiftImmediateOp op); - void NEONShiftRightImmediate(const VRegister& vd, - const VRegister& vn, - int shift, - NEONShiftImmediateOp op); - void NEONShiftImmediateL(const VRegister& vd, - const VRegister& vn, - int shift, - NEONShiftImmediateOp op); - void NEONShiftImmediateN(const VRegister& vd, - const VRegister& vn, - int shift, - NEONShiftImmediateOp op); - void NEONXtn(const VRegister& vd, - const VRegister& vn, - NEON2RegMiscOp vop); - - Instr LoadStoreStructAddrModeField(const MemOperand& addr); - - // Encode the specified MemOperand for the specified access size and scaling - // preference. - Instr LoadStoreMemOperand(const MemOperand& addr, - unsigned access_size, - LoadStoreScalingOption option); - - // Link the current (not-yet-emitted) instruction to the specified label, then - // return an offset to be encoded in the instruction. If the label is not yet - // bound, an offset of 0 is returned. - ptrdiff_t LinkAndGetByteOffsetTo(Label * label); - ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label); - ptrdiff_t LinkAndGetPageOffsetTo(Label * label); - - // A common implementation for the LinkAndGetOffsetTo helpers. - template - ptrdiff_t LinkAndGetOffsetTo(Label* label); - - // Literal load offset are in words (32-bit). - ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal); - - // Emit the instruction in buffer_. - void Emit(Instr instruction) { - VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); - VIXL_ASSERT(buffer_monitor_ > 0); - buffer_->Emit32(instruction); - } - - // Buffer where the code is emitted. - CodeBuffer* buffer_; - PositionIndependentCodeOption pic_; - -#ifdef VIXL_DEBUG - int64_t buffer_monitor_; -#endif -}; - - -// All Assembler emits MUST acquire/release the underlying code buffer. The -// helper scope below will do so and optionally ensure the buffer is big enough -// to receive the emit. It is possible to request the scope not to perform any -// checks (kNoCheck) if for example it is known in advance the buffer size is -// adequate or there is some other size checking mechanism in place. -class CodeBufferCheckScope { - public: - // Tell whether or not the scope needs to ensure the associated CodeBuffer - // has enough space for the requested size. - enum CheckPolicy { - kNoCheck, - kCheck - }; - - // Tell whether or not the scope should assert the amount of code emitted - // within the scope is consistent with the requested amount. - enum AssertPolicy { - kNoAssert, // No assert required. - kExactSize, // The code emitted must be exactly size bytes. - kMaximumSize // The code emitted must be at most size bytes. - }; - - CodeBufferCheckScope(Assembler* assm, - size_t size, - CheckPolicy check_policy = kCheck, - AssertPolicy assert_policy = kMaximumSize) - : assm_(assm) { - if (check_policy == kCheck) assm->EnsureSpaceFor(size); -#ifdef VIXL_DEBUG - assm->bind(&start_); - size_ = size; - assert_policy_ = assert_policy; - assm->AcquireBuffer(); -#else - USE(assert_policy); -#endif - } - - // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert). - explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) { -#ifdef VIXL_DEBUG - size_ = 0; - assert_policy_ = kNoAssert; - assm->AcquireBuffer(); -#endif - } - - ~CodeBufferCheckScope() { -#ifdef VIXL_DEBUG - assm_->ReleaseBuffer(); - switch (assert_policy_) { - case kNoAssert: break; - case kExactSize: - VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) == size_); - break; - case kMaximumSize: - VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) <= size_); - break; - default: - VIXL_UNREACHABLE(); - } -#endif - } - - protected: - Assembler* assm_; -#ifdef VIXL_DEBUG - Label start_; - size_t size_; - AssertPolicy assert_policy_; -#endif -}; - - -template -void Literal::UpdateValue(T new_value, const Assembler* assembler) { - return UpdateValue(new_value, assembler->GetStartAddress()); -} - - -template -void Literal::UpdateValue(T high64, T low64, const Assembler* assembler) { - return UpdateValue(high64, low64, assembler->GetStartAddress()); -} - - -} // namespace vixl - -#endif // VIXL_A64_ASSEMBLER_A64_H_ diff --git a/disas/libvixl/vixl/a64/constants-a64.h b/disas/libvixl/vixl/a64/constants-a64.h deleted file mode 100644 index 2caa73af87..0000000000 --- a/disas/libvixl/vixl/a64/constants-a64.h +++ /dev/null @@ -1,2116 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_A64_CONSTANTS_A64_H_ -#define VIXL_A64_CONSTANTS_A64_H_ - -namespace vixl { - -const unsigned kNumberOfRegisters = 32; -const unsigned kNumberOfVRegisters = 32; -const unsigned kNumberOfFPRegisters = kNumberOfVRegisters; -// Callee saved registers are x21-x30(lr). -const int kNumberOfCalleeSavedRegisters = 10; -const int kFirstCalleeSavedRegisterIndex = 21; -// Callee saved FP registers are d8-d15. -const int kNumberOfCalleeSavedFPRegisters = 8; -const int kFirstCalleeSavedFPRegisterIndex = 8; - -#define REGISTER_CODE_LIST(R) \ -R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ -R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ -R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ -R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) - -#define INSTRUCTION_FIELDS_LIST(V_) \ -/* Register fields */ \ -V_(Rd, 4, 0, Bits) /* Destination register. */ \ -V_(Rn, 9, 5, Bits) /* First source register. */ \ -V_(Rm, 20, 16, Bits) /* Second source register. */ \ -V_(Ra, 14, 10, Bits) /* Third source register. */ \ -V_(Rt, 4, 0, Bits) /* Load/store register. */ \ -V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \ -V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \ - \ -/* Common bits */ \ -V_(SixtyFourBits, 31, 31, Bits) \ -V_(FlagsUpdate, 29, 29, Bits) \ - \ -/* PC relative addressing */ \ -V_(ImmPCRelHi, 23, 5, SignedBits) \ -V_(ImmPCRelLo, 30, 29, Bits) \ - \ -/* Add/subtract/logical shift register */ \ -V_(ShiftDP, 23, 22, Bits) \ -V_(ImmDPShift, 15, 10, Bits) \ - \ -/* Add/subtract immediate */ \ -V_(ImmAddSub, 21, 10, Bits) \ -V_(ShiftAddSub, 23, 22, Bits) \ - \ -/* Add/substract extend */ \ -V_(ImmExtendShift, 12, 10, Bits) \ -V_(ExtendMode, 15, 13, Bits) \ - \ -/* Move wide */ \ -V_(ImmMoveWide, 20, 5, Bits) \ -V_(ShiftMoveWide, 22, 21, Bits) \ - \ -/* Logical immediate, bitfield and extract */ \ -V_(BitN, 22, 22, Bits) \ -V_(ImmRotate, 21, 16, Bits) \ -V_(ImmSetBits, 15, 10, Bits) \ -V_(ImmR, 21, 16, Bits) \ -V_(ImmS, 15, 10, Bits) \ - \ -/* Test and branch immediate */ \ -V_(ImmTestBranch, 18, 5, SignedBits) \ -V_(ImmTestBranchBit40, 23, 19, Bits) \ -V_(ImmTestBranchBit5, 31, 31, Bits) \ - \ -/* Conditionals */ \ -V_(Condition, 15, 12, Bits) \ -V_(ConditionBranch, 3, 0, Bits) \ -V_(Nzcv, 3, 0, Bits) \ -V_(ImmCondCmp, 20, 16, Bits) \ -V_(ImmCondBranch, 23, 5, SignedBits) \ - \ -/* Floating point */ \ -V_(FPType, 23, 22, Bits) \ -V_(ImmFP, 20, 13, Bits) \ -V_(FPScale, 15, 10, Bits) \ - \ -/* Load Store */ \ -V_(ImmLS, 20, 12, SignedBits) \ -V_(ImmLSUnsigned, 21, 10, Bits) \ -V_(ImmLSPair, 21, 15, SignedBits) \ -V_(ImmShiftLS, 12, 12, Bits) \ -V_(LSOpc, 23, 22, Bits) \ -V_(LSVector, 26, 26, Bits) \ -V_(LSSize, 31, 30, Bits) \ -V_(ImmPrefetchOperation, 4, 0, Bits) \ -V_(PrefetchHint, 4, 3, Bits) \ -V_(PrefetchTarget, 2, 1, Bits) \ -V_(PrefetchStream, 0, 0, Bits) \ - \ -/* Other immediates */ \ -V_(ImmUncondBranch, 25, 0, SignedBits) \ -V_(ImmCmpBranch, 23, 5, SignedBits) \ -V_(ImmLLiteral, 23, 5, SignedBits) \ -V_(ImmException, 20, 5, Bits) \ -V_(ImmHint, 11, 5, Bits) \ -V_(ImmBarrierDomain, 11, 10, Bits) \ -V_(ImmBarrierType, 9, 8, Bits) \ - \ -/* System (MRS, MSR, SYS) */ \ -V_(ImmSystemRegister, 19, 5, Bits) \ -V_(SysO0, 19, 19, Bits) \ -V_(SysOp, 18, 5, Bits) \ -V_(SysOp1, 18, 16, Bits) \ -V_(SysOp2, 7, 5, Bits) \ -V_(CRn, 15, 12, Bits) \ -V_(CRm, 11, 8, Bits) \ - \ -/* Load-/store-exclusive */ \ -V_(LdStXLoad, 22, 22, Bits) \ -V_(LdStXNotExclusive, 23, 23, Bits) \ -V_(LdStXAcquireRelease, 15, 15, Bits) \ -V_(LdStXSizeLog2, 31, 30, Bits) \ -V_(LdStXPair, 21, 21, Bits) \ - \ -/* NEON generic fields */ \ -V_(NEONQ, 30, 30, Bits) \ -V_(NEONSize, 23, 22, Bits) \ -V_(NEONLSSize, 11, 10, Bits) \ -V_(NEONS, 12, 12, Bits) \ -V_(NEONL, 21, 21, Bits) \ -V_(NEONM, 20, 20, Bits) \ -V_(NEONH, 11, 11, Bits) \ -V_(ImmNEONExt, 14, 11, Bits) \ -V_(ImmNEON5, 20, 16, Bits) \ -V_(ImmNEON4, 14, 11, Bits) \ - \ -/* NEON Modified Immediate fields */ \ -V_(ImmNEONabc, 18, 16, Bits) \ -V_(ImmNEONdefgh, 9, 5, Bits) \ -V_(NEONModImmOp, 29, 29, Bits) \ -V_(NEONCmode, 15, 12, Bits) \ - \ -/* NEON Shift Immediate fields */ \ -V_(ImmNEONImmhImmb, 22, 16, Bits) \ -V_(ImmNEONImmh, 22, 19, Bits) \ -V_(ImmNEONImmb, 18, 16, Bits) - -#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ -/* NZCV */ \ -V_(Flags, 31, 28, Bits) \ -V_(N, 31, 31, Bits) \ -V_(Z, 30, 30, Bits) \ -V_(C, 29, 29, Bits) \ -V_(V, 28, 28, Bits) \ -M_(NZCV, Flags_mask) \ -/* FPCR */ \ -V_(AHP, 26, 26, Bits) \ -V_(DN, 25, 25, Bits) \ -V_(FZ, 24, 24, Bits) \ -V_(RMode, 23, 22, Bits) \ -M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask) - -// Fields offsets. -#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \ -const int Name##_offset = LowBit; \ -const int Name##_width = HighBit - LowBit + 1; \ -const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit; -#define NOTHING(A, B) -INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS) -SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) -#undef NOTHING -#undef DECLARE_FIELDS_BITS - -// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed -// from ImmPCRelLo and ImmPCRelHi. -const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; - -// Condition codes. -enum Condition { - eq = 0, // Z set Equal. - ne = 1, // Z clear Not equal. - cs = 2, // C set Carry set. - cc = 3, // C clear Carry clear. - mi = 4, // N set Negative. - pl = 5, // N clear Positive or zero. - vs = 6, // V set Overflow. - vc = 7, // V clear No overflow. - hi = 8, // C set, Z clear Unsigned higher. - ls = 9, // C clear or Z set Unsigned lower or same. - ge = 10, // N == V Greater or equal. - lt = 11, // N != V Less than. - gt = 12, // Z clear, N == V Greater than. - le = 13, // Z set or N != V Less then or equal - al = 14, // Always. - nv = 15, // Behaves as always/al. - - // Aliases. - hs = cs, // C set Unsigned higher or same. - lo = cc // C clear Unsigned lower. -}; - -inline Condition InvertCondition(Condition cond) { - // Conditions al and nv behave identically, as "always true". They can't be - // inverted, because there is no "always false" condition. - VIXL_ASSERT((cond != al) && (cond != nv)); - return static_cast(cond ^ 1); -} - -enum FPTrapFlags { - EnableTrap = 1, - DisableTrap = 0 -}; - -enum FlagsUpdate { - SetFlags = 1, - LeaveFlags = 0 -}; - -enum StatusFlags { - NoFlag = 0, - - // Derive the flag combinations from the system register bit descriptions. - NFlag = N_mask, - ZFlag = Z_mask, - CFlag = C_mask, - VFlag = V_mask, - NZFlag = NFlag | ZFlag, - NCFlag = NFlag | CFlag, - NVFlag = NFlag | VFlag, - ZCFlag = ZFlag | CFlag, - ZVFlag = ZFlag | VFlag, - CVFlag = CFlag | VFlag, - NZCFlag = NFlag | ZFlag | CFlag, - NZVFlag = NFlag | ZFlag | VFlag, - NCVFlag = NFlag | CFlag | VFlag, - ZCVFlag = ZFlag | CFlag | VFlag, - NZCVFlag = NFlag | ZFlag | CFlag | VFlag, - - // Floating-point comparison results. - FPEqualFlag = ZCFlag, - FPLessThanFlag = NFlag, - FPGreaterThanFlag = CFlag, - FPUnorderedFlag = CVFlag -}; - -enum Shift { - NO_SHIFT = -1, - LSL = 0x0, - LSR = 0x1, - ASR = 0x2, - ROR = 0x3, - MSL = 0x4 -}; - -enum Extend { - NO_EXTEND = -1, - UXTB = 0, - UXTH = 1, - UXTW = 2, - UXTX = 3, - SXTB = 4, - SXTH = 5, - SXTW = 6, - SXTX = 7 -}; - -enum SystemHint { - NOP = 0, - YIELD = 1, - WFE = 2, - WFI = 3, - SEV = 4, - SEVL = 5 -}; - -enum BarrierDomain { - OuterShareable = 0, - NonShareable = 1, - InnerShareable = 2, - FullSystem = 3 -}; - -enum BarrierType { - BarrierOther = 0, - BarrierReads = 1, - BarrierWrites = 2, - BarrierAll = 3 -}; - -enum PrefetchOperation { - PLDL1KEEP = 0x00, - PLDL1STRM = 0x01, - PLDL2KEEP = 0x02, - PLDL2STRM = 0x03, - PLDL3KEEP = 0x04, - PLDL3STRM = 0x05, - - PLIL1KEEP = 0x08, - PLIL1STRM = 0x09, - PLIL2KEEP = 0x0a, - PLIL2STRM = 0x0b, - PLIL3KEEP = 0x0c, - PLIL3STRM = 0x0d, - - PSTL1KEEP = 0x10, - PSTL1STRM = 0x11, - PSTL2KEEP = 0x12, - PSTL2STRM = 0x13, - PSTL3KEEP = 0x14, - PSTL3STRM = 0x15 -}; - -// System/special register names. -// This information is not encoded as one field but as the concatenation of -// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). -enum SystemRegister { - NZCV = ((0x1 << SysO0_offset) | - (0x3 << SysOp1_offset) | - (0x4 << CRn_offset) | - (0x2 << CRm_offset) | - (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset, - FPCR = ((0x1 << SysO0_offset) | - (0x3 << SysOp1_offset) | - (0x4 << CRn_offset) | - (0x4 << CRm_offset) | - (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset -}; - -enum InstructionCacheOp { - IVAU = ((0x3 << SysOp1_offset) | - (0x7 << CRn_offset) | - (0x5 << CRm_offset) | - (0x1 << SysOp2_offset)) >> SysOp_offset -}; - -enum DataCacheOp { - CVAC = ((0x3 << SysOp1_offset) | - (0x7 << CRn_offset) | - (0xa << CRm_offset) | - (0x1 << SysOp2_offset)) >> SysOp_offset, - CVAU = ((0x3 << SysOp1_offset) | - (0x7 << CRn_offset) | - (0xb << CRm_offset) | - (0x1 << SysOp2_offset)) >> SysOp_offset, - CIVAC = ((0x3 << SysOp1_offset) | - (0x7 << CRn_offset) | - (0xe << CRm_offset) | - (0x1 << SysOp2_offset)) >> SysOp_offset, - ZVA = ((0x3 << SysOp1_offset) | - (0x7 << CRn_offset) | - (0x4 << CRm_offset) | - (0x1 << SysOp2_offset)) >> SysOp_offset -}; - -// Instruction enumerations. -// -// These are the masks that define a class of instructions, and the list of -// instructions within each class. Each enumeration has a Fixed, FMask and -// Mask value. -// -// Fixed: The fixed bits in this instruction class. -// FMask: The mask used to extract the fixed bits in the class. -// Mask: The mask used to identify the instructions within a class. -// -// The enumerations can be used like this: -// -// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); -// switch(instr->Mask(PCRelAddressingMask)) { -// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; -// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; -// default: printf("Unknown instruction\n"); -// } - - -// Generic fields. -enum GenericInstrField { - SixtyFourBits = 0x80000000, - ThirtyTwoBits = 0x00000000, - FP32 = 0x00000000, - FP64 = 0x00400000 -}; - -enum NEONFormatField { - NEONFormatFieldMask = 0x40C00000, - NEON_Q = 0x40000000, - NEON_8B = 0x00000000, - NEON_16B = NEON_8B | NEON_Q, - NEON_4H = 0x00400000, - NEON_8H = NEON_4H | NEON_Q, - NEON_2S = 0x00800000, - NEON_4S = NEON_2S | NEON_Q, - NEON_1D = 0x00C00000, - NEON_2D = 0x00C00000 | NEON_Q -}; - -enum NEONFPFormatField { - NEONFPFormatFieldMask = 0x40400000, - NEON_FP_2S = FP32, - NEON_FP_4S = FP32 | NEON_Q, - NEON_FP_2D = FP64 | NEON_Q -}; - -enum NEONLSFormatField { - NEONLSFormatFieldMask = 0x40000C00, - LS_NEON_8B = 0x00000000, - LS_NEON_16B = LS_NEON_8B | NEON_Q, - LS_NEON_4H = 0x00000400, - LS_NEON_8H = LS_NEON_4H | NEON_Q, - LS_NEON_2S = 0x00000800, - LS_NEON_4S = LS_NEON_2S | NEON_Q, - LS_NEON_1D = 0x00000C00, - LS_NEON_2D = LS_NEON_1D | NEON_Q -}; - -enum NEONScalarFormatField { - NEONScalarFormatFieldMask = 0x00C00000, - NEONScalar = 0x10000000, - NEON_B = 0x00000000, - NEON_H = 0x00400000, - NEON_S = 0x00800000, - NEON_D = 0x00C00000 -}; - -// PC relative addressing. -enum PCRelAddressingOp { - PCRelAddressingFixed = 0x10000000, - PCRelAddressingFMask = 0x1F000000, - PCRelAddressingMask = 0x9F000000, - ADR = PCRelAddressingFixed | 0x00000000, - ADRP = PCRelAddressingFixed | 0x80000000 -}; - -// Add/sub (immediate, shifted and extended.) -const int kSFOffset = 31; -enum AddSubOp { - AddSubOpMask = 0x60000000, - AddSubSetFlagsBit = 0x20000000, - ADD = 0x00000000, - ADDS = ADD | AddSubSetFlagsBit, - SUB = 0x40000000, - SUBS = SUB | AddSubSetFlagsBit -}; - -#define ADD_SUB_OP_LIST(V) \ - V(ADD), \ - V(ADDS), \ - V(SUB), \ - V(SUBS) - -enum AddSubImmediateOp { - AddSubImmediateFixed = 0x11000000, - AddSubImmediateFMask = 0x1F000000, - AddSubImmediateMask = 0xFF000000, - #define ADD_SUB_IMMEDIATE(A) \ - A##_w_imm = AddSubImmediateFixed | A, \ - A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits - ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE) - #undef ADD_SUB_IMMEDIATE -}; - -enum AddSubShiftedOp { - AddSubShiftedFixed = 0x0B000000, - AddSubShiftedFMask = 0x1F200000, - AddSubShiftedMask = 0xFF200000, - #define ADD_SUB_SHIFTED(A) \ - A##_w_shift = AddSubShiftedFixed | A, \ - A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits - ADD_SUB_OP_LIST(ADD_SUB_SHIFTED) - #undef ADD_SUB_SHIFTED -}; - -enum AddSubExtendedOp { - AddSubExtendedFixed = 0x0B200000, - AddSubExtendedFMask = 0x1F200000, - AddSubExtendedMask = 0xFFE00000, - #define ADD_SUB_EXTENDED(A) \ - A##_w_ext = AddSubExtendedFixed | A, \ - A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits - ADD_SUB_OP_LIST(ADD_SUB_EXTENDED) - #undef ADD_SUB_EXTENDED -}; - -// Add/sub with carry. -enum AddSubWithCarryOp { - AddSubWithCarryFixed = 0x1A000000, - AddSubWithCarryFMask = 0x1FE00000, - AddSubWithCarryMask = 0xFFE0FC00, - ADC_w = AddSubWithCarryFixed | ADD, - ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits, - ADC = ADC_w, - ADCS_w = AddSubWithCarryFixed | ADDS, - ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits, - SBC_w = AddSubWithCarryFixed | SUB, - SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits, - SBC = SBC_w, - SBCS_w = AddSubWithCarryFixed | SUBS, - SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits -}; - - -// Logical (immediate and shifted register). -enum LogicalOp { - LogicalOpMask = 0x60200000, - NOT = 0x00200000, - AND = 0x00000000, - BIC = AND | NOT, - ORR = 0x20000000, - ORN = ORR | NOT, - EOR = 0x40000000, - EON = EOR | NOT, - ANDS = 0x60000000, - BICS = ANDS | NOT -}; - -// Logical immediate. -enum LogicalImmediateOp { - LogicalImmediateFixed = 0x12000000, - LogicalImmediateFMask = 0x1F800000, - LogicalImmediateMask = 0xFF800000, - AND_w_imm = LogicalImmediateFixed | AND, - AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits, - ORR_w_imm = LogicalImmediateFixed | ORR, - ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits, - EOR_w_imm = LogicalImmediateFixed | EOR, - EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits, - ANDS_w_imm = LogicalImmediateFixed | ANDS, - ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits -}; - -// Logical shifted register. -enum LogicalShiftedOp { - LogicalShiftedFixed = 0x0A000000, - LogicalShiftedFMask = 0x1F000000, - LogicalShiftedMask = 0xFF200000, - AND_w = LogicalShiftedFixed | AND, - AND_x = LogicalShiftedFixed | AND | SixtyFourBits, - AND_shift = AND_w, - BIC_w = LogicalShiftedFixed | BIC, - BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits, - BIC_shift = BIC_w, - ORR_w = LogicalShiftedFixed | ORR, - ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits, - ORR_shift = ORR_w, - ORN_w = LogicalShiftedFixed | ORN, - ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits, - ORN_shift = ORN_w, - EOR_w = LogicalShiftedFixed | EOR, - EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits, - EOR_shift = EOR_w, - EON_w = LogicalShiftedFixed | EON, - EON_x = LogicalShiftedFixed | EON | SixtyFourBits, - EON_shift = EON_w, - ANDS_w = LogicalShiftedFixed | ANDS, - ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits, - ANDS_shift = ANDS_w, - BICS_w = LogicalShiftedFixed | BICS, - BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits, - BICS_shift = BICS_w -}; - -// Move wide immediate. -enum MoveWideImmediateOp { - MoveWideImmediateFixed = 0x12800000, - MoveWideImmediateFMask = 0x1F800000, - MoveWideImmediateMask = 0xFF800000, - MOVN = 0x00000000, - MOVZ = 0x40000000, - MOVK = 0x60000000, - MOVN_w = MoveWideImmediateFixed | MOVN, - MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits, - MOVZ_w = MoveWideImmediateFixed | MOVZ, - MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits, - MOVK_w = MoveWideImmediateFixed | MOVK, - MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits -}; - -// Bitfield. -const int kBitfieldNOffset = 22; -enum BitfieldOp { - BitfieldFixed = 0x13000000, - BitfieldFMask = 0x1F800000, - BitfieldMask = 0xFF800000, - SBFM_w = BitfieldFixed | 0x00000000, - SBFM_x = BitfieldFixed | 0x80000000, - SBFM = SBFM_w, - BFM_w = BitfieldFixed | 0x20000000, - BFM_x = BitfieldFixed | 0xA0000000, - BFM = BFM_w, - UBFM_w = BitfieldFixed | 0x40000000, - UBFM_x = BitfieldFixed | 0xC0000000, - UBFM = UBFM_w - // Bitfield N field. -}; - -// Extract. -enum ExtractOp { - ExtractFixed = 0x13800000, - ExtractFMask = 0x1F800000, - ExtractMask = 0xFFA00000, - EXTR_w = ExtractFixed | 0x00000000, - EXTR_x = ExtractFixed | 0x80000000, - EXTR = EXTR_w -}; - -// Unconditional branch. -enum UnconditionalBranchOp { - UnconditionalBranchFixed = 0x14000000, - UnconditionalBranchFMask = 0x7C000000, - UnconditionalBranchMask = 0xFC000000, - B = UnconditionalBranchFixed | 0x00000000, - BL = UnconditionalBranchFixed | 0x80000000 -}; - -// Unconditional branch to register. -enum UnconditionalBranchToRegisterOp { - UnconditionalBranchToRegisterFixed = 0xD6000000, - UnconditionalBranchToRegisterFMask = 0xFE000000, - UnconditionalBranchToRegisterMask = 0xFFFFFC1F, - BR = UnconditionalBranchToRegisterFixed | 0x001F0000, - BLR = UnconditionalBranchToRegisterFixed | 0x003F0000, - RET = UnconditionalBranchToRegisterFixed | 0x005F0000 -}; - -// Compare and branch. -enum CompareBranchOp { - CompareBranchFixed = 0x34000000, - CompareBranchFMask = 0x7E000000, - CompareBranchMask = 0xFF000000, - CBZ_w = CompareBranchFixed | 0x00000000, - CBZ_x = CompareBranchFixed | 0x80000000, - CBZ = CBZ_w, - CBNZ_w = CompareBranchFixed | 0x01000000, - CBNZ_x = CompareBranchFixed | 0x81000000, - CBNZ = CBNZ_w -}; - -// Test and branch. -enum TestBranchOp { - TestBranchFixed = 0x36000000, - TestBranchFMask = 0x7E000000, - TestBranchMask = 0x7F000000, - TBZ = TestBranchFixed | 0x00000000, - TBNZ = TestBranchFixed | 0x01000000 -}; - -// Conditional branch. -enum ConditionalBranchOp { - ConditionalBranchFixed = 0x54000000, - ConditionalBranchFMask = 0xFE000000, - ConditionalBranchMask = 0xFF000010, - B_cond = ConditionalBranchFixed | 0x00000000 -}; - -// System. -// System instruction encoding is complicated because some instructions use op -// and CR fields to encode parameters. To handle this cleanly, the system -// instructions are split into more than one enum. - -enum SystemOp { - SystemFixed = 0xD5000000, - SystemFMask = 0xFFC00000 -}; - -enum SystemSysRegOp { - SystemSysRegFixed = 0xD5100000, - SystemSysRegFMask = 0xFFD00000, - SystemSysRegMask = 0xFFF00000, - MRS = SystemSysRegFixed | 0x00200000, - MSR = SystemSysRegFixed | 0x00000000 -}; - -enum SystemHintOp { - SystemHintFixed = 0xD503201F, - SystemHintFMask = 0xFFFFF01F, - SystemHintMask = 0xFFFFF01F, - HINT = SystemHintFixed | 0x00000000 -}; - -enum SystemSysOp { - SystemSysFixed = 0xD5080000, - SystemSysFMask = 0xFFF80000, - SystemSysMask = 0xFFF80000, - SYS = SystemSysFixed | 0x00000000 -}; - -// Exception. -enum ExceptionOp { - ExceptionFixed = 0xD4000000, - ExceptionFMask = 0xFF000000, - ExceptionMask = 0xFFE0001F, - HLT = ExceptionFixed | 0x00400000, - BRK = ExceptionFixed | 0x00200000, - SVC = ExceptionFixed | 0x00000001, - HVC = ExceptionFixed | 0x00000002, - SMC = ExceptionFixed | 0x00000003, - DCPS1 = ExceptionFixed | 0x00A00001, - DCPS2 = ExceptionFixed | 0x00A00002, - DCPS3 = ExceptionFixed | 0x00A00003 -}; - -enum MemBarrierOp { - MemBarrierFixed = 0xD503309F, - MemBarrierFMask = 0xFFFFF09F, - MemBarrierMask = 0xFFFFF0FF, - DSB = MemBarrierFixed | 0x00000000, - DMB = MemBarrierFixed | 0x00000020, - ISB = MemBarrierFixed | 0x00000040 -}; - -enum SystemExclusiveMonitorOp { - SystemExclusiveMonitorFixed = 0xD503305F, - SystemExclusiveMonitorFMask = 0xFFFFF0FF, - SystemExclusiveMonitorMask = 0xFFFFF0FF, - CLREX = SystemExclusiveMonitorFixed -}; - -// Any load or store. -enum LoadStoreAnyOp { - LoadStoreAnyFMask = 0x0a000000, - LoadStoreAnyFixed = 0x08000000 -}; - -// Any load pair or store pair. -enum LoadStorePairAnyOp { - LoadStorePairAnyFMask = 0x3a000000, - LoadStorePairAnyFixed = 0x28000000 -}; - -#define LOAD_STORE_PAIR_OP_LIST(V) \ - V(STP, w, 0x00000000), \ - V(LDP, w, 0x00400000), \ - V(LDPSW, x, 0x40400000), \ - V(STP, x, 0x80000000), \ - V(LDP, x, 0x80400000), \ - V(STP, s, 0x04000000), \ - V(LDP, s, 0x04400000), \ - V(STP, d, 0x44000000), \ - V(LDP, d, 0x44400000), \ - V(STP, q, 0x84000000), \ - V(LDP, q, 0x84400000) - -// Load/store pair (post, pre and offset.) -enum LoadStorePairOp { - LoadStorePairMask = 0xC4400000, - LoadStorePairLBit = 1 << 22, - #define LOAD_STORE_PAIR(A, B, C) \ - A##_##B = C - LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR) - #undef LOAD_STORE_PAIR -}; - -enum LoadStorePairPostIndexOp { - LoadStorePairPostIndexFixed = 0x28800000, - LoadStorePairPostIndexFMask = 0x3B800000, - LoadStorePairPostIndexMask = 0xFFC00000, - #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \ - A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B - LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX) - #undef LOAD_STORE_PAIR_POST_INDEX -}; - -enum LoadStorePairPreIndexOp { - LoadStorePairPreIndexFixed = 0x29800000, - LoadStorePairPreIndexFMask = 0x3B800000, - LoadStorePairPreIndexMask = 0xFFC00000, - #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \ - A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B - LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX) - #undef LOAD_STORE_PAIR_PRE_INDEX -}; - -enum LoadStorePairOffsetOp { - LoadStorePairOffsetFixed = 0x29000000, - LoadStorePairOffsetFMask = 0x3B800000, - LoadStorePairOffsetMask = 0xFFC00000, - #define LOAD_STORE_PAIR_OFFSET(A, B, C) \ - A##_##B##_off = LoadStorePairOffsetFixed | A##_##B - LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET) - #undef LOAD_STORE_PAIR_OFFSET -}; - -enum LoadStorePairNonTemporalOp { - LoadStorePairNonTemporalFixed = 0x28000000, - LoadStorePairNonTemporalFMask = 0x3B800000, - LoadStorePairNonTemporalMask = 0xFFC00000, - LoadStorePairNonTemporalLBit = 1 << 22, - STNP_w = LoadStorePairNonTemporalFixed | STP_w, - LDNP_w = LoadStorePairNonTemporalFixed | LDP_w, - STNP_x = LoadStorePairNonTemporalFixed | STP_x, - LDNP_x = LoadStorePairNonTemporalFixed | LDP_x, - STNP_s = LoadStorePairNonTemporalFixed | STP_s, - LDNP_s = LoadStorePairNonTemporalFixed | LDP_s, - STNP_d = LoadStorePairNonTemporalFixed | STP_d, - LDNP_d = LoadStorePairNonTemporalFixed | LDP_d, - STNP_q = LoadStorePairNonTemporalFixed | STP_q, - LDNP_q = LoadStorePairNonTemporalFixed | LDP_q -}; - -// Load literal. -enum LoadLiteralOp { - LoadLiteralFixed = 0x18000000, - LoadLiteralFMask = 0x3B000000, - LoadLiteralMask = 0xFF000000, - LDR_w_lit = LoadLiteralFixed | 0x00000000, - LDR_x_lit = LoadLiteralFixed | 0x40000000, - LDRSW_x_lit = LoadLiteralFixed | 0x80000000, - PRFM_lit = LoadLiteralFixed | 0xC0000000, - LDR_s_lit = LoadLiteralFixed | 0x04000000, - LDR_d_lit = LoadLiteralFixed | 0x44000000, - LDR_q_lit = LoadLiteralFixed | 0x84000000 -}; - -#define LOAD_STORE_OP_LIST(V) \ - V(ST, RB, w, 0x00000000), \ - V(ST, RH, w, 0x40000000), \ - V(ST, R, w, 0x80000000), \ - V(ST, R, x, 0xC0000000), \ - V(LD, RB, w, 0x00400000), \ - V(LD, RH, w, 0x40400000), \ - V(LD, R, w, 0x80400000), \ - V(LD, R, x, 0xC0400000), \ - V(LD, RSB, x, 0x00800000), \ - V(LD, RSH, x, 0x40800000), \ - V(LD, RSW, x, 0x80800000), \ - V(LD, RSB, w, 0x00C00000), \ - V(LD, RSH, w, 0x40C00000), \ - V(ST, R, b, 0x04000000), \ - V(ST, R, h, 0x44000000), \ - V(ST, R, s, 0x84000000), \ - V(ST, R, d, 0xC4000000), \ - V(ST, R, q, 0x04800000), \ - V(LD, R, b, 0x04400000), \ - V(LD, R, h, 0x44400000), \ - V(LD, R, s, 0x84400000), \ - V(LD, R, d, 0xC4400000), \ - V(LD, R, q, 0x04C00000) - -// Load/store (post, pre, offset and unsigned.) -enum LoadStoreOp { - LoadStoreMask = 0xC4C00000, - LoadStoreVMask = 0x04000000, - #define LOAD_STORE(A, B, C, D) \ - A##B##_##C = D - LOAD_STORE_OP_LIST(LOAD_STORE), - #undef LOAD_STORE - PRFM = 0xC0800000 -}; - -// Load/store unscaled offset. -enum LoadStoreUnscaledOffsetOp { - LoadStoreUnscaledOffsetFixed = 0x38000000, - LoadStoreUnscaledOffsetFMask = 0x3B200C00, - LoadStoreUnscaledOffsetMask = 0xFFE00C00, - PRFUM = LoadStoreUnscaledOffsetFixed | PRFM, - #define LOAD_STORE_UNSCALED(A, B, C, D) \ - A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D - LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED) - #undef LOAD_STORE_UNSCALED -}; - -// Load/store post index. -enum LoadStorePostIndex { - LoadStorePostIndexFixed = 0x38000400, - LoadStorePostIndexFMask = 0x3B200C00, - LoadStorePostIndexMask = 0xFFE00C00, - #define LOAD_STORE_POST_INDEX(A, B, C, D) \ - A##B##_##C##_post = LoadStorePostIndexFixed | D - LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX) - #undef LOAD_STORE_POST_INDEX -}; - -// Load/store pre index. -enum LoadStorePreIndex { - LoadStorePreIndexFixed = 0x38000C00, - LoadStorePreIndexFMask = 0x3B200C00, - LoadStorePreIndexMask = 0xFFE00C00, - #define LOAD_STORE_PRE_INDEX(A, B, C, D) \ - A##B##_##C##_pre = LoadStorePreIndexFixed | D - LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX) - #undef LOAD_STORE_PRE_INDEX -}; - -// Load/store unsigned offset. -enum LoadStoreUnsignedOffset { - LoadStoreUnsignedOffsetFixed = 0x39000000, - LoadStoreUnsignedOffsetFMask = 0x3B000000, - LoadStoreUnsignedOffsetMask = 0xFFC00000, - PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM, - #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \ - A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D - LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET) - #undef LOAD_STORE_UNSIGNED_OFFSET -}; - -// Load/store register offset. -enum LoadStoreRegisterOffset { - LoadStoreRegisterOffsetFixed = 0x38200800, - LoadStoreRegisterOffsetFMask = 0x3B200C00, - LoadStoreRegisterOffsetMask = 0xFFE00C00, - PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM, - #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \ - A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D - LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET) - #undef LOAD_STORE_REGISTER_OFFSET -}; - -enum LoadStoreExclusive { - LoadStoreExclusiveFixed = 0x08000000, - LoadStoreExclusiveFMask = 0x3F000000, - LoadStoreExclusiveMask = 0xFFE08000, - STXRB_w = LoadStoreExclusiveFixed | 0x00000000, - STXRH_w = LoadStoreExclusiveFixed | 0x40000000, - STXR_w = LoadStoreExclusiveFixed | 0x80000000, - STXR_x = LoadStoreExclusiveFixed | 0xC0000000, - LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, - LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, - LDXR_w = LoadStoreExclusiveFixed | 0x80400000, - LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, - STXP_w = LoadStoreExclusiveFixed | 0x80200000, - STXP_x = LoadStoreExclusiveFixed | 0xC0200000, - LDXP_w = LoadStoreExclusiveFixed | 0x80600000, - LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, - STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, - STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, - STLXR_w = LoadStoreExclusiveFixed | 0x80008000, - STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, - LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, - LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, - LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, - LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, - STLXP_w = LoadStoreExclusiveFixed | 0x80208000, - STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, - LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, - LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, - STLRB_w = LoadStoreExclusiveFixed | 0x00808000, - STLRH_w = LoadStoreExclusiveFixed | 0x40808000, - STLR_w = LoadStoreExclusiveFixed | 0x80808000, - STLR_x = LoadStoreExclusiveFixed | 0xC0808000, - LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, - LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, - LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, - LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000 -}; - -// Conditional compare. -enum ConditionalCompareOp { - ConditionalCompareMask = 0x60000000, - CCMN = 0x20000000, - CCMP = 0x60000000 -}; - -// Conditional compare register. -enum ConditionalCompareRegisterOp { - ConditionalCompareRegisterFixed = 0x1A400000, - ConditionalCompareRegisterFMask = 0x1FE00800, - ConditionalCompareRegisterMask = 0xFFE00C10, - CCMN_w = ConditionalCompareRegisterFixed | CCMN, - CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN, - CCMP_w = ConditionalCompareRegisterFixed | CCMP, - CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP -}; - -// Conditional compare immediate. -enum ConditionalCompareImmediateOp { - ConditionalCompareImmediateFixed = 0x1A400800, - ConditionalCompareImmediateFMask = 0x1FE00800, - ConditionalCompareImmediateMask = 0xFFE00C10, - CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN, - CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN, - CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP, - CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP -}; - -// Conditional select. -enum ConditionalSelectOp { - ConditionalSelectFixed = 0x1A800000, - ConditionalSelectFMask = 0x1FE00000, - ConditionalSelectMask = 0xFFE00C00, - CSEL_w = ConditionalSelectFixed | 0x00000000, - CSEL_x = ConditionalSelectFixed | 0x80000000, - CSEL = CSEL_w, - CSINC_w = ConditionalSelectFixed | 0x00000400, - CSINC_x = ConditionalSelectFixed | 0x80000400, - CSINC = CSINC_w, - CSINV_w = ConditionalSelectFixed | 0x40000000, - CSINV_x = ConditionalSelectFixed | 0xC0000000, - CSINV = CSINV_w, - CSNEG_w = ConditionalSelectFixed | 0x40000400, - CSNEG_x = ConditionalSelectFixed | 0xC0000400, - CSNEG = CSNEG_w -}; - -// Data processing 1 source. -enum DataProcessing1SourceOp { - DataProcessing1SourceFixed = 0x5AC00000, - DataProcessing1SourceFMask = 0x5FE00000, - DataProcessing1SourceMask = 0xFFFFFC00, - RBIT = DataProcessing1SourceFixed | 0x00000000, - RBIT_w = RBIT, - RBIT_x = RBIT | SixtyFourBits, - REV16 = DataProcessing1SourceFixed | 0x00000400, - REV16_w = REV16, - REV16_x = REV16 | SixtyFourBits, - REV = DataProcessing1SourceFixed | 0x00000800, - REV_w = REV, - REV32_x = REV | SixtyFourBits, - REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00, - CLZ = DataProcessing1SourceFixed | 0x00001000, - CLZ_w = CLZ, - CLZ_x = CLZ | SixtyFourBits, - CLS = DataProcessing1SourceFixed | 0x00001400, - CLS_w = CLS, - CLS_x = CLS | SixtyFourBits -}; - -// Data processing 2 source. -enum DataProcessing2SourceOp { - DataProcessing2SourceFixed = 0x1AC00000, - DataProcessing2SourceFMask = 0x5FE00000, - DataProcessing2SourceMask = 0xFFE0FC00, - UDIV_w = DataProcessing2SourceFixed | 0x00000800, - UDIV_x = DataProcessing2SourceFixed | 0x80000800, - UDIV = UDIV_w, - SDIV_w = DataProcessing2SourceFixed | 0x00000C00, - SDIV_x = DataProcessing2SourceFixed | 0x80000C00, - SDIV = SDIV_w, - LSLV_w = DataProcessing2SourceFixed | 0x00002000, - LSLV_x = DataProcessing2SourceFixed | 0x80002000, - LSLV = LSLV_w, - LSRV_w = DataProcessing2SourceFixed | 0x00002400, - LSRV_x = DataProcessing2SourceFixed | 0x80002400, - LSRV = LSRV_w, - ASRV_w = DataProcessing2SourceFixed | 0x00002800, - ASRV_x = DataProcessing2SourceFixed | 0x80002800, - ASRV = ASRV_w, - RORV_w = DataProcessing2SourceFixed | 0x00002C00, - RORV_x = DataProcessing2SourceFixed | 0x80002C00, - RORV = RORV_w, - CRC32B = DataProcessing2SourceFixed | 0x00004000, - CRC32H = DataProcessing2SourceFixed | 0x00004400, - CRC32W = DataProcessing2SourceFixed | 0x00004800, - CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00, - CRC32CB = DataProcessing2SourceFixed | 0x00005000, - CRC32CH = DataProcessing2SourceFixed | 0x00005400, - CRC32CW = DataProcessing2SourceFixed | 0x00005800, - CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00 -}; - -// Data processing 3 source. -enum DataProcessing3SourceOp { - DataProcessing3SourceFixed = 0x1B000000, - DataProcessing3SourceFMask = 0x1F000000, - DataProcessing3SourceMask = 0xFFE08000, - MADD_w = DataProcessing3SourceFixed | 0x00000000, - MADD_x = DataProcessing3SourceFixed | 0x80000000, - MADD = MADD_w, - MSUB_w = DataProcessing3SourceFixed | 0x00008000, - MSUB_x = DataProcessing3SourceFixed | 0x80008000, - MSUB = MSUB_w, - SMADDL_x = DataProcessing3SourceFixed | 0x80200000, - SMSUBL_x = DataProcessing3SourceFixed | 0x80208000, - SMULH_x = DataProcessing3SourceFixed | 0x80400000, - UMADDL_x = DataProcessing3SourceFixed | 0x80A00000, - UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000, - UMULH_x = DataProcessing3SourceFixed | 0x80C00000 -}; - -// Floating point compare. -enum FPCompareOp { - FPCompareFixed = 0x1E202000, - FPCompareFMask = 0x5F203C00, - FPCompareMask = 0xFFE0FC1F, - FCMP_s = FPCompareFixed | 0x00000000, - FCMP_d = FPCompareFixed | FP64 | 0x00000000, - FCMP = FCMP_s, - FCMP_s_zero = FPCompareFixed | 0x00000008, - FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008, - FCMP_zero = FCMP_s_zero, - FCMPE_s = FPCompareFixed | 0x00000010, - FCMPE_d = FPCompareFixed | FP64 | 0x00000010, - FCMPE = FCMPE_s, - FCMPE_s_zero = FPCompareFixed | 0x00000018, - FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018, - FCMPE_zero = FCMPE_s_zero -}; - -// Floating point conditional compare. -enum FPConditionalCompareOp { - FPConditionalCompareFixed = 0x1E200400, - FPConditionalCompareFMask = 0x5F200C00, - FPConditionalCompareMask = 0xFFE00C10, - FCCMP_s = FPConditionalCompareFixed | 0x00000000, - FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000, - FCCMP = FCCMP_s, - FCCMPE_s = FPConditionalCompareFixed | 0x00000010, - FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010, - FCCMPE = FCCMPE_s -}; - -// Floating point conditional select. -enum FPConditionalSelectOp { - FPConditionalSelectFixed = 0x1E200C00, - FPConditionalSelectFMask = 0x5F200C00, - FPConditionalSelectMask = 0xFFE00C00, - FCSEL_s = FPConditionalSelectFixed | 0x00000000, - FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000, - FCSEL = FCSEL_s -}; - -// Floating point immediate. -enum FPImmediateOp { - FPImmediateFixed = 0x1E201000, - FPImmediateFMask = 0x5F201C00, - FPImmediateMask = 0xFFE01C00, - FMOV_s_imm = FPImmediateFixed | 0x00000000, - FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 -}; - -// Floating point data processing 1 source. -enum FPDataProcessing1SourceOp { - FPDataProcessing1SourceFixed = 0x1E204000, - FPDataProcessing1SourceFMask = 0x5F207C00, - FPDataProcessing1SourceMask = 0xFFFFFC00, - FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, - FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, - FMOV = FMOV_s, - FABS_s = FPDataProcessing1SourceFixed | 0x00008000, - FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, - FABS = FABS_s, - FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, - FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, - FNEG = FNEG_s, - FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, - FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, - FSQRT = FSQRT_s, - FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, - FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, - FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, - FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, - FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, - FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, - FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, - FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, - FRINTN = FRINTN_s, - FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, - FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, - FRINTP = FRINTP_s, - FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, - FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, - FRINTM = FRINTM_s, - FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, - FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, - FRINTZ = FRINTZ_s, - FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, - FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, - FRINTA = FRINTA_s, - FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, - FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, - FRINTX = FRINTX_s, - FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, - FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, - FRINTI = FRINTI_s -}; - -// Floating point data processing 2 source. -enum FPDataProcessing2SourceOp { - FPDataProcessing2SourceFixed = 0x1E200800, - FPDataProcessing2SourceFMask = 0x5F200C00, - FPDataProcessing2SourceMask = 0xFFE0FC00, - FMUL = FPDataProcessing2SourceFixed | 0x00000000, - FMUL_s = FMUL, - FMUL_d = FMUL | FP64, - FDIV = FPDataProcessing2SourceFixed | 0x00001000, - FDIV_s = FDIV, - FDIV_d = FDIV | FP64, - FADD = FPDataProcessing2SourceFixed | 0x00002000, - FADD_s = FADD, - FADD_d = FADD | FP64, - FSUB = FPDataProcessing2SourceFixed | 0x00003000, - FSUB_s = FSUB, - FSUB_d = FSUB | FP64, - FMAX = FPDataProcessing2SourceFixed | 0x00004000, - FMAX_s = FMAX, - FMAX_d = FMAX | FP64, - FMIN = FPDataProcessing2SourceFixed | 0x00005000, - FMIN_s = FMIN, - FMIN_d = FMIN | FP64, - FMAXNM = FPDataProcessing2SourceFixed | 0x00006000, - FMAXNM_s = FMAXNM, - FMAXNM_d = FMAXNM | FP64, - FMINNM = FPDataProcessing2SourceFixed | 0x00007000, - FMINNM_s = FMINNM, - FMINNM_d = FMINNM | FP64, - FNMUL = FPDataProcessing2SourceFixed | 0x00008000, - FNMUL_s = FNMUL, - FNMUL_d = FNMUL | FP64 -}; - -// Floating point data processing 3 source. -enum FPDataProcessing3SourceOp { - FPDataProcessing3SourceFixed = 0x1F000000, - FPDataProcessing3SourceFMask = 0x5F000000, - FPDataProcessing3SourceMask = 0xFFE08000, - FMADD_s = FPDataProcessing3SourceFixed | 0x00000000, - FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000, - FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000, - FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000, - FMADD_d = FPDataProcessing3SourceFixed | 0x00400000, - FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000, - FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000, - FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000 -}; - -// Conversion between floating point and integer. -enum FPIntegerConvertOp { - FPIntegerConvertFixed = 0x1E200000, - FPIntegerConvertFMask = 0x5F20FC00, - FPIntegerConvertMask = 0xFFFFFC00, - FCVTNS = FPIntegerConvertFixed | 0x00000000, - FCVTNS_ws = FCVTNS, - FCVTNS_xs = FCVTNS | SixtyFourBits, - FCVTNS_wd = FCVTNS | FP64, - FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, - FCVTNU = FPIntegerConvertFixed | 0x00010000, - FCVTNU_ws = FCVTNU, - FCVTNU_xs = FCVTNU | SixtyFourBits, - FCVTNU_wd = FCVTNU | FP64, - FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, - FCVTPS = FPIntegerConvertFixed | 0x00080000, - FCVTPS_ws = FCVTPS, - FCVTPS_xs = FCVTPS | SixtyFourBits, - FCVTPS_wd = FCVTPS | FP64, - FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, - FCVTPU = FPIntegerConvertFixed | 0x00090000, - FCVTPU_ws = FCVTPU, - FCVTPU_xs = FCVTPU | SixtyFourBits, - FCVTPU_wd = FCVTPU | FP64, - FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, - FCVTMS = FPIntegerConvertFixed | 0x00100000, - FCVTMS_ws = FCVTMS, - FCVTMS_xs = FCVTMS | SixtyFourBits, - FCVTMS_wd = FCVTMS | FP64, - FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, - FCVTMU = FPIntegerConvertFixed | 0x00110000, - FCVTMU_ws = FCVTMU, - FCVTMU_xs = FCVTMU | SixtyFourBits, - FCVTMU_wd = FCVTMU | FP64, - FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, - FCVTZS = FPIntegerConvertFixed | 0x00180000, - FCVTZS_ws = FCVTZS, - FCVTZS_xs = FCVTZS | SixtyFourBits, - FCVTZS_wd = FCVTZS | FP64, - FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, - FCVTZU = FPIntegerConvertFixed | 0x00190000, - FCVTZU_ws = FCVTZU, - FCVTZU_xs = FCVTZU | SixtyFourBits, - FCVTZU_wd = FCVTZU | FP64, - FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, - SCVTF = FPIntegerConvertFixed | 0x00020000, - SCVTF_sw = SCVTF, - SCVTF_sx = SCVTF | SixtyFourBits, - SCVTF_dw = SCVTF | FP64, - SCVTF_dx = SCVTF | SixtyFourBits | FP64, - UCVTF = FPIntegerConvertFixed | 0x00030000, - UCVTF_sw = UCVTF, - UCVTF_sx = UCVTF | SixtyFourBits, - UCVTF_dw = UCVTF | FP64, - UCVTF_dx = UCVTF | SixtyFourBits | FP64, - FCVTAS = FPIntegerConvertFixed | 0x00040000, - FCVTAS_ws = FCVTAS, - FCVTAS_xs = FCVTAS | SixtyFourBits, - FCVTAS_wd = FCVTAS | FP64, - FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, - FCVTAU = FPIntegerConvertFixed | 0x00050000, - FCVTAU_ws = FCVTAU, - FCVTAU_xs = FCVTAU | SixtyFourBits, - FCVTAU_wd = FCVTAU | FP64, - FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, - FMOV_ws = FPIntegerConvertFixed | 0x00060000, - FMOV_sw = FPIntegerConvertFixed | 0x00070000, - FMOV_xd = FMOV_ws | SixtyFourBits | FP64, - FMOV_dx = FMOV_sw | SixtyFourBits | FP64, - FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, - FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000 -}; - -// Conversion between fixed point and floating point. -enum FPFixedPointConvertOp { - FPFixedPointConvertFixed = 0x1E000000, - FPFixedPointConvertFMask = 0x5F200000, - FPFixedPointConvertMask = 0xFFFF0000, - FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000, - FCVTZS_ws_fixed = FCVTZS_fixed, - FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits, - FCVTZS_wd_fixed = FCVTZS_fixed | FP64, - FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64, - FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000, - FCVTZU_ws_fixed = FCVTZU_fixed, - FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits, - FCVTZU_wd_fixed = FCVTZU_fixed | FP64, - FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64, - SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000, - SCVTF_sw_fixed = SCVTF_fixed, - SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits, - SCVTF_dw_fixed = SCVTF_fixed | FP64, - SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64, - UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000, - UCVTF_sw_fixed = UCVTF_fixed, - UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits, - UCVTF_dw_fixed = UCVTF_fixed | FP64, - UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 -}; - -// Crypto - two register SHA. -enum Crypto2RegSHAOp { - Crypto2RegSHAFixed = 0x5E280800, - Crypto2RegSHAFMask = 0xFF3E0C00 -}; - -// Crypto - three register SHA. -enum Crypto3RegSHAOp { - Crypto3RegSHAFixed = 0x5E000000, - Crypto3RegSHAFMask = 0xFF208C00 -}; - -// Crypto - AES. -enum CryptoAESOp { - CryptoAESFixed = 0x4E280800, - CryptoAESFMask = 0xFF3E0C00 -}; - -// NEON instructions with two register operands. -enum NEON2RegMiscOp { - NEON2RegMiscFixed = 0x0E200800, - NEON2RegMiscFMask = 0x9F3E0C00, - NEON2RegMiscMask = 0xBF3FFC00, - NEON2RegMiscUBit = 0x20000000, - NEON_REV64 = NEON2RegMiscFixed | 0x00000000, - NEON_REV32 = NEON2RegMiscFixed | 0x20000000, - NEON_REV16 = NEON2RegMiscFixed | 0x00001000, - NEON_SADDLP = NEON2RegMiscFixed | 0x00002000, - NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit, - NEON_SUQADD = NEON2RegMiscFixed | 0x00003000, - NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit, - NEON_CLS = NEON2RegMiscFixed | 0x00004000, - NEON_CLZ = NEON2RegMiscFixed | 0x20004000, - NEON_CNT = NEON2RegMiscFixed | 0x00005000, - NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000, - NEON_SADALP = NEON2RegMiscFixed | 0x00006000, - NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit, - NEON_SQABS = NEON2RegMiscFixed | 0x00007000, - NEON_SQNEG = NEON2RegMiscFixed | 0x20007000, - NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000, - NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000, - NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000, - NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000, - NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000, - NEON_ABS = NEON2RegMiscFixed | 0x0000B000, - NEON_NEG = NEON2RegMiscFixed | 0x2000B000, - NEON_XTN = NEON2RegMiscFixed | 0x00012000, - NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000, - NEON_SHLL = NEON2RegMiscFixed | 0x20013000, - NEON_SQXTN = NEON2RegMiscFixed | 0x00014000, - NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit, - - NEON2RegMiscOpcode = 0x0001F000, - NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode, - NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode, - NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode, - NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode, - - // These instructions use only one bit of the size field. The other bit is - // used to distinguish between instructions. - NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000, - NEON_FABS = NEON2RegMiscFixed | 0x0080F000, - NEON_FNEG = NEON2RegMiscFixed | 0x2080F000, - NEON_FCVTN = NEON2RegMiscFixed | 0x00016000, - NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000, - NEON_FCVTL = NEON2RegMiscFixed | 0x00017000, - NEON_FRINTN = NEON2RegMiscFixed | 0x00018000, - NEON_FRINTA = NEON2RegMiscFixed | 0x20018000, - NEON_FRINTP = NEON2RegMiscFixed | 0x00818000, - NEON_FRINTM = NEON2RegMiscFixed | 0x00019000, - NEON_FRINTX = NEON2RegMiscFixed | 0x20019000, - NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000, - NEON_FRINTI = NEON2RegMiscFixed | 0x20819000, - NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000, - NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit, - NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000, - NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit, - NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000, - NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit, - NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000, - NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit, - NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000, - NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit, - NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000, - NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000, - NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit, - NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000, - NEON_URECPE = NEON2RegMiscFixed | 0x0081C000, - NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000, - NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000, - NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000, - NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000, - NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000, - NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000, - NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000, - - NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode, - NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode -}; - -// NEON instructions with three same-type operands. -enum NEON3SameOp { - NEON3SameFixed = 0x0E200400, - NEON3SameFMask = 0x9F200400, - NEON3SameMask = 0xBF20FC00, - NEON3SameUBit = 0x20000000, - NEON_ADD = NEON3SameFixed | 0x00008000, - NEON_ADDP = NEON3SameFixed | 0x0000B800, - NEON_SHADD = NEON3SameFixed | 0x00000000, - NEON_SHSUB = NEON3SameFixed | 0x00002000, - NEON_SRHADD = NEON3SameFixed | 0x00001000, - NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800, - NEON_CMGE = NEON3SameFixed | 0x00003800, - NEON_CMGT = NEON3SameFixed | 0x00003000, - NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT, - NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE, - NEON_CMTST = NEON3SameFixed | 0x00008800, - NEON_MLA = NEON3SameFixed | 0x00009000, - NEON_MLS = NEON3SameFixed | 0x20009000, - NEON_MUL = NEON3SameFixed | 0x00009800, - NEON_PMUL = NEON3SameFixed | 0x20009800, - NEON_SRSHL = NEON3SameFixed | 0x00005000, - NEON_SQSHL = NEON3SameFixed | 0x00004800, - NEON_SQRSHL = NEON3SameFixed | 0x00005800, - NEON_SSHL = NEON3SameFixed | 0x00004000, - NEON_SMAX = NEON3SameFixed | 0x00006000, - NEON_SMAXP = NEON3SameFixed | 0x0000A000, - NEON_SMIN = NEON3SameFixed | 0x00006800, - NEON_SMINP = NEON3SameFixed | 0x0000A800, - NEON_SABD = NEON3SameFixed | 0x00007000, - NEON_SABA = NEON3SameFixed | 0x00007800, - NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD, - NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA, - NEON_SQADD = NEON3SameFixed | 0x00000800, - NEON_SQSUB = NEON3SameFixed | 0x00002800, - NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000, - NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD, - NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB, - NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD, - NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX, - NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP, - NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN, - NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP, - NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL, - NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD, - NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL, - NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL, - NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB, - NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL, - NEON_SQDMULH = NEON3SameFixed | 0x0000B000, - NEON_SQRDMULH = NEON3SameFixed | 0x2000B000, - - // NEON floating point instructions with three same-type operands. - NEON3SameFPFixed = NEON3SameFixed | 0x0000C000, - NEON3SameFPFMask = NEON3SameFMask | 0x0000C000, - NEON3SameFPMask = NEON3SameMask | 0x00800000, - NEON_FADD = NEON3SameFixed | 0x0000D000, - NEON_FSUB = NEON3SameFixed | 0x0080D000, - NEON_FMUL = NEON3SameFixed | 0x2000D800, - NEON_FDIV = NEON3SameFixed | 0x2000F800, - NEON_FMAX = NEON3SameFixed | 0x0000F000, - NEON_FMAXNM = NEON3SameFixed | 0x0000C000, - NEON_FMAXP = NEON3SameFixed | 0x2000F000, - NEON_FMAXNMP = NEON3SameFixed | 0x2000C000, - NEON_FMIN = NEON3SameFixed | 0x0080F000, - NEON_FMINNM = NEON3SameFixed | 0x0080C000, - NEON_FMINP = NEON3SameFixed | 0x2080F000, - NEON_FMINNMP = NEON3SameFixed | 0x2080C000, - NEON_FMLA = NEON3SameFixed | 0x0000C800, - NEON_FMLS = NEON3SameFixed | 0x0080C800, - NEON_FMULX = NEON3SameFixed | 0x0000D800, - NEON_FRECPS = NEON3SameFixed | 0x0000F800, - NEON_FRSQRTS = NEON3SameFixed | 0x0080F800, - NEON_FABD = NEON3SameFixed | 0x2080D000, - NEON_FADDP = NEON3SameFixed | 0x2000D000, - NEON_FCMEQ = NEON3SameFixed | 0x0000E000, - NEON_FCMGE = NEON3SameFixed | 0x2000E000, - NEON_FCMGT = NEON3SameFixed | 0x2080E000, - NEON_FACGE = NEON3SameFixed | 0x2000E800, - NEON_FACGT = NEON3SameFixed | 0x2080E800, - - // NEON logical instructions with three same-type operands. - NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800, - NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800, - NEON3SameLogicalMask = 0xBFE0FC00, - NEON3SameLogicalFormatMask = NEON_Q, - NEON_AND = NEON3SameLogicalFixed | 0x00000000, - NEON_ORR = NEON3SameLogicalFixed | 0x00A00000, - NEON_ORN = NEON3SameLogicalFixed | 0x00C00000, - NEON_EOR = NEON3SameLogicalFixed | 0x20000000, - NEON_BIC = NEON3SameLogicalFixed | 0x00400000, - NEON_BIF = NEON3SameLogicalFixed | 0x20C00000, - NEON_BIT = NEON3SameLogicalFixed | 0x20800000, - NEON_BSL = NEON3SameLogicalFixed | 0x20400000 -}; - -// NEON instructions with three different-type operands. -enum NEON3DifferentOp { - NEON3DifferentFixed = 0x0E200000, - NEON3DifferentFMask = 0x9F200C00, - NEON3DifferentMask = 0xFF20FC00, - NEON_ADDHN = NEON3DifferentFixed | 0x00004000, - NEON_ADDHN2 = NEON_ADDHN | NEON_Q, - NEON_PMULL = NEON3DifferentFixed | 0x0000E000, - NEON_PMULL2 = NEON_PMULL | NEON_Q, - NEON_RADDHN = NEON3DifferentFixed | 0x20004000, - NEON_RADDHN2 = NEON_RADDHN | NEON_Q, - NEON_RSUBHN = NEON3DifferentFixed | 0x20006000, - NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q, - NEON_SABAL = NEON3DifferentFixed | 0x00005000, - NEON_SABAL2 = NEON_SABAL | NEON_Q, - NEON_SABDL = NEON3DifferentFixed | 0x00007000, - NEON_SABDL2 = NEON_SABDL | NEON_Q, - NEON_SADDL = NEON3DifferentFixed | 0x00000000, - NEON_SADDL2 = NEON_SADDL | NEON_Q, - NEON_SADDW = NEON3DifferentFixed | 0x00001000, - NEON_SADDW2 = NEON_SADDW | NEON_Q, - NEON_SMLAL = NEON3DifferentFixed | 0x00008000, - NEON_SMLAL2 = NEON_SMLAL | NEON_Q, - NEON_SMLSL = NEON3DifferentFixed | 0x0000A000, - NEON_SMLSL2 = NEON_SMLSL | NEON_Q, - NEON_SMULL = NEON3DifferentFixed | 0x0000C000, - NEON_SMULL2 = NEON_SMULL | NEON_Q, - NEON_SSUBL = NEON3DifferentFixed | 0x00002000, - NEON_SSUBL2 = NEON_SSUBL | NEON_Q, - NEON_SSUBW = NEON3DifferentFixed | 0x00003000, - NEON_SSUBW2 = NEON_SSUBW | NEON_Q, - NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000, - NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q, - NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000, - NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q, - NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000, - NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q, - NEON_SUBHN = NEON3DifferentFixed | 0x00006000, - NEON_SUBHN2 = NEON_SUBHN | NEON_Q, - NEON_UABAL = NEON_SABAL | NEON3SameUBit, - NEON_UABAL2 = NEON_UABAL | NEON_Q, - NEON_UABDL = NEON_SABDL | NEON3SameUBit, - NEON_UABDL2 = NEON_UABDL | NEON_Q, - NEON_UADDL = NEON_SADDL | NEON3SameUBit, - NEON_UADDL2 = NEON_UADDL | NEON_Q, - NEON_UADDW = NEON_SADDW | NEON3SameUBit, - NEON_UADDW2 = NEON_UADDW | NEON_Q, - NEON_UMLAL = NEON_SMLAL | NEON3SameUBit, - NEON_UMLAL2 = NEON_UMLAL | NEON_Q, - NEON_UMLSL = NEON_SMLSL | NEON3SameUBit, - NEON_UMLSL2 = NEON_UMLSL | NEON_Q, - NEON_UMULL = NEON_SMULL | NEON3SameUBit, - NEON_UMULL2 = NEON_UMULL | NEON_Q, - NEON_USUBL = NEON_SSUBL | NEON3SameUBit, - NEON_USUBL2 = NEON_USUBL | NEON_Q, - NEON_USUBW = NEON_SSUBW | NEON3SameUBit, - NEON_USUBW2 = NEON_USUBW | NEON_Q -}; - -// NEON instructions operating across vectors. -enum NEONAcrossLanesOp { - NEONAcrossLanesFixed = 0x0E300800, - NEONAcrossLanesFMask = 0x9F3E0C00, - NEONAcrossLanesMask = 0xBF3FFC00, - NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000, - NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000, - NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000, - NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000, - NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000, - NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000, - NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000, - - // NEON floating point across instructions. - NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000, - NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000, - NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x00800000, - - NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000, - NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000, - NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000, - NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 -}; - -// NEON instructions with indexed element operand. -enum NEONByIndexedElementOp { - NEONByIndexedElementFixed = 0x0F000000, - NEONByIndexedElementFMask = 0x9F000400, - NEONByIndexedElementMask = 0xBF00F400, - NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000, - NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000, - NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000, - NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000, - NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000, - NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000, - NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000, - NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000, - NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000, - NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000, - NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000, - NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000, - NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000, - NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000, - - // Floating point instructions. - NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000, - NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000, - NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000, - NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000, - NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000, - NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000 -}; - -// NEON register copy. -enum NEONCopyOp { - NEONCopyFixed = 0x0E000400, - NEONCopyFMask = 0x9FE08400, - NEONCopyMask = 0x3FE08400, - NEONCopyInsElementMask = NEONCopyMask | 0x40000000, - NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800, - NEONCopyDupElementMask = NEONCopyMask | 0x20007800, - NEONCopyDupGeneralMask = NEONCopyDupElementMask, - NEONCopyUmovMask = NEONCopyMask | 0x20007800, - NEONCopySmovMask = NEONCopyMask | 0x20007800, - NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000, - NEON_INS_GENERAL = NEONCopyFixed | 0x40001800, - NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000, - NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800, - NEON_SMOV = NEONCopyFixed | 0x00002800, - NEON_UMOV = NEONCopyFixed | 0x00003800 -}; - -// NEON extract. -enum NEONExtractOp { - NEONExtractFixed = 0x2E000000, - NEONExtractFMask = 0xBF208400, - NEONExtractMask = 0xBFE08400, - NEON_EXT = NEONExtractFixed | 0x00000000 -}; - -enum NEONLoadStoreMultiOp { - NEONLoadStoreMultiL = 0x00400000, - NEONLoadStoreMulti1_1v = 0x00007000, - NEONLoadStoreMulti1_2v = 0x0000A000, - NEONLoadStoreMulti1_3v = 0x00006000, - NEONLoadStoreMulti1_4v = 0x00002000, - NEONLoadStoreMulti2 = 0x00008000, - NEONLoadStoreMulti3 = 0x00004000, - NEONLoadStoreMulti4 = 0x00000000 -}; - -// NEON load/store multiple structures. -enum NEONLoadStoreMultiStructOp { - NEONLoadStoreMultiStructFixed = 0x0C000000, - NEONLoadStoreMultiStructFMask = 0xBFBF0000, - NEONLoadStoreMultiStructMask = 0xBFFFF000, - NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed, - NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed | - NEONLoadStoreMultiL, - NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v, - NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v, - NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v, - NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v, - NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2, - NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3, - NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4, - NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v, - NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v, - NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v, - NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v, - NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2, - NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3, - NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4 -}; - -// NEON load/store multiple structures with post-index addressing. -enum NEONLoadStoreMultiStructPostIndexOp { - NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000, - NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000, - NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000, - NEONLoadStoreMultiStructPostIndex = 0x00800000, - NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex, - NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex, - NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex, - NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex, - NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex, - NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex, - NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex, - NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex, - NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex, - NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex, - NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex, - NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex, - NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex, - NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex -}; - -enum NEONLoadStoreSingleOp { - NEONLoadStoreSingle1 = 0x00000000, - NEONLoadStoreSingle2 = 0x00200000, - NEONLoadStoreSingle3 = 0x00002000, - NEONLoadStoreSingle4 = 0x00202000, - NEONLoadStoreSingleL = 0x00400000, - NEONLoadStoreSingle_b = 0x00000000, - NEONLoadStoreSingle_h = 0x00004000, - NEONLoadStoreSingle_s = 0x00008000, - NEONLoadStoreSingle_d = 0x00008400, - NEONLoadStoreSingleAllLanes = 0x0000C000, - NEONLoadStoreSingleLenMask = 0x00202000 -}; - -// NEON load/store single structure. -enum NEONLoadStoreSingleStructOp { - NEONLoadStoreSingleStructFixed = 0x0D000000, - NEONLoadStoreSingleStructFMask = 0xBF9F0000, - NEONLoadStoreSingleStructMask = 0xBFFFE000, - NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed, - NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed | - NEONLoadStoreSingleL, - NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 | - NEONLoadStoreSingleStructLoad, - NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 | - NEONLoadStoreSingleStructLoad, - NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 | - NEONLoadStoreSingleStructLoad, - NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 | - NEONLoadStoreSingleStructLoad, - NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 | - NEONLoadStoreSingleStructFixed, - NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 | - NEONLoadStoreSingleStructFixed, - NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 | - NEONLoadStoreSingleStructFixed, - NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 | - NEONLoadStoreSingleStructFixed, - NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b, - NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h, - NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s, - NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d, - NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes, - NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b, - NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h, - NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s, - NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d, - - NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b, - NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h, - NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s, - NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d, - NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes, - NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b, - NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h, - NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s, - NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d, - - NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b, - NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h, - NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s, - NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d, - NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes, - NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b, - NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h, - NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s, - NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d, - - NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b, - NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h, - NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s, - NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d, - NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes, - NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b, - NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h, - NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s, - NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d -}; - -// NEON load/store single structure with post-index addressing. -enum NEONLoadStoreSingleStructPostIndexOp { - NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000, - NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000, - NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000, - NEONLoadStoreSingleStructPostIndex = 0x00800000, - NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex, - NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex, - NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex, - NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex, - NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex, - NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex, - NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex, - NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex, - NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex, - - NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex, - NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex, - NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex, - NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex, - NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex, - NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex, - NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex, - NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex, - NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex, - - NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex, - NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex, - NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex, - NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex, - NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex, - NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex, - NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex, - NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex, - NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex, - - NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex, - NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex, - NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex, - NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex, - NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex, - NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex, - NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex, - NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex, - NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex -}; - -// NEON modified immediate. -enum NEONModifiedImmediateOp { - NEONModifiedImmediateFixed = 0x0F000400, - NEONModifiedImmediateFMask = 0x9FF80400, - NEONModifiedImmediateOpBit = 0x20000000, - NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000, - NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000, - NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000, - NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000 -}; - -// NEON shift immediate. -enum NEONShiftImmediateOp { - NEONShiftImmediateFixed = 0x0F000400, - NEONShiftImmediateFMask = 0x9F800400, - NEONShiftImmediateMask = 0xBF80FC00, - NEONShiftImmediateUBit = 0x20000000, - NEON_SHL = NEONShiftImmediateFixed | 0x00005000, - NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000, - NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000, - NEON_SLI = NEONShiftImmediateFixed | 0x20005000, - NEON_SRI = NEONShiftImmediateFixed | 0x20004000, - NEON_SHRN = NEONShiftImmediateFixed | 0x00008000, - NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800, - NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000, - NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800, - NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000, - NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800, - NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000, - NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800, - NEON_SSHR = NEONShiftImmediateFixed | 0x00000000, - NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000, - NEON_USHR = NEONShiftImmediateFixed | 0x20000000, - NEON_URSHR = NEONShiftImmediateFixed | 0x20002000, - NEON_SSRA = NEONShiftImmediateFixed | 0x00001000, - NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000, - NEON_USRA = NEONShiftImmediateFixed | 0x20001000, - NEON_URSRA = NEONShiftImmediateFixed | 0x20003000, - NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000, - NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000, - NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000, - NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800, - NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800, - NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000, - NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000 -}; - -// NEON table. -enum NEONTableOp { - NEONTableFixed = 0x0E000000, - NEONTableFMask = 0xBF208C00, - NEONTableExt = 0x00001000, - NEONTableMask = 0xBF20FC00, - NEON_TBL_1v = NEONTableFixed | 0x00000000, - NEON_TBL_2v = NEONTableFixed | 0x00002000, - NEON_TBL_3v = NEONTableFixed | 0x00004000, - NEON_TBL_4v = NEONTableFixed | 0x00006000, - NEON_TBX_1v = NEON_TBL_1v | NEONTableExt, - NEON_TBX_2v = NEON_TBL_2v | NEONTableExt, - NEON_TBX_3v = NEON_TBL_3v | NEONTableExt, - NEON_TBX_4v = NEON_TBL_4v | NEONTableExt -}; - -// NEON perm. -enum NEONPermOp { - NEONPermFixed = 0x0E000800, - NEONPermFMask = 0xBF208C00, - NEONPermMask = 0x3F20FC00, - NEON_UZP1 = NEONPermFixed | 0x00001000, - NEON_TRN1 = NEONPermFixed | 0x00002000, - NEON_ZIP1 = NEONPermFixed | 0x00003000, - NEON_UZP2 = NEONPermFixed | 0x00005000, - NEON_TRN2 = NEONPermFixed | 0x00006000, - NEON_ZIP2 = NEONPermFixed | 0x00007000 -}; - -// NEON scalar instructions with two register operands. -enum NEONScalar2RegMiscOp { - NEONScalar2RegMiscFixed = 0x5E200800, - NEONScalar2RegMiscFMask = 0xDF3E0C00, - NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask, - NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero, - NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero, - NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero, - NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero, - NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero, - NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS, - NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS, - NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG, - NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG, - NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN, - NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN, - NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN, - NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD, - NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD, - - NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, - NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, - - NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, - NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, - NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, - NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, - NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF, - NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero, - NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero, - NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, - NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, - NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, - NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, - NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, - NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, - NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, - NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU, - NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS, - NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU, - NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS, - NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU, - NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS, - NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU, - NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN -}; - -// NEON scalar instructions with three same-type operands. -enum NEONScalar3SameOp { - NEONScalar3SameFixed = 0x5E200400, - NEONScalar3SameFMask = 0xDF200400, - NEONScalar3SameMask = 0xFF20FC00, - NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD, - NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ, - NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE, - NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT, - NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI, - NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS, - NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST, - NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB, - NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD, - NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD, - NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB, - NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB, - NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL, - NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL, - NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL, - NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL, - NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL, - NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL, - NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL, - NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL, - NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH, - NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH, - - // NEON floating point scalar instructions with three same-type operands. - NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000, - NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000, - NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000, - NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE, - NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT, - NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ, - NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE, - NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT, - NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX, - NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS, - NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS, - NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD -}; - -// NEON scalar instructions with three different-type operands. -enum NEONScalar3DiffOp { - NEONScalar3DiffFixed = 0x5E200000, - NEONScalar3DiffFMask = 0xDF200C00, - NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask, - NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL, - NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL, - NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL -}; - -// NEON scalar instructions with indexed element operand. -enum NEONScalarByIndexedElementOp { - NEONScalarByIndexedElementFixed = 0x5F000000, - NEONScalarByIndexedElementFMask = 0xDF000400, - NEONScalarByIndexedElementMask = 0xFF00F400, - NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement, - NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement, - NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement, - NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement, - NEON_SQRDMULH_byelement_scalar - = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement, - - // Floating point instructions. - NEONScalarByIndexedElementFPFixed - = NEONScalarByIndexedElementFixed | 0x00800000, - NEONScalarByIndexedElementFPMask - = NEONScalarByIndexedElementMask | 0x00800000, - NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement, - NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement, - NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement, - NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement -}; - -// NEON scalar register copy. -enum NEONScalarCopyOp { - NEONScalarCopyFixed = 0x5E000400, - NEONScalarCopyFMask = 0xDFE08400, - NEONScalarCopyMask = 0xFFE0FC00, - NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT -}; - -// NEON scalar pairwise instructions. -enum NEONScalarPairwiseOp { - NEONScalarPairwiseFixed = 0x5E300800, - NEONScalarPairwiseFMask = 0xDF3E0C00, - NEONScalarPairwiseMask = 0xFFB1F800, - NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000, - NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000, - NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000, - NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000, - NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000, - NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000 -}; - -// NEON scalar shift immediate. -enum NEONScalarShiftImmediateOp { - NEONScalarShiftImmediateFixed = 0x5F000400, - NEONScalarShiftImmediateFMask = 0xDF800400, - NEONScalarShiftImmediateMask = 0xFF80FC00, - NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL, - NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI, - NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI, - NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR, - NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR, - NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR, - NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR, - NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA, - NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA, - NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA, - NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA, - NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN, - NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN, - NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN, - NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN, - NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN, - NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN, - NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU, - NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm, - NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm, - NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm, - NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm, - NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm, - NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm -}; - -// Unimplemented and unallocated instructions. These are defined to make fixed -// bit assertion easier. -enum UnimplementedOp { - UnimplementedFixed = 0x00000000, - UnimplementedFMask = 0x00000000 -}; - -enum UnallocatedOp { - UnallocatedFixed = 0x00000000, - UnallocatedFMask = 0x00000000 -}; - -} // namespace vixl - -#endif // VIXL_A64_CONSTANTS_A64_H_ diff --git a/disas/libvixl/vixl/a64/cpu-a64.h b/disas/libvixl/vixl/a64/cpu-a64.h deleted file mode 100644 index cdf09a6af1..0000000000 --- a/disas/libvixl/vixl/a64/cpu-a64.h +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2014, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_CPU_A64_H -#define VIXL_CPU_A64_H - -#include "vixl/globals.h" -#include "vixl/a64/instructions-a64.h" - -namespace vixl { - -class CPU { - public: - // Initialise CPU support. - static void SetUp(); - - // Ensures the data at a given address and with a given size is the same for - // the I and D caches. I and D caches are not automatically coherent on ARM - // so this operation is required before any dynamically generated code can - // safely run. - static void EnsureIAndDCacheCoherency(void *address, size_t length); - - // Handle tagged pointers. - template - static T SetPointerTag(T pointer, uint64_t tag) { - VIXL_ASSERT(is_uintn(kAddressTagWidth, tag)); - - // Use C-style casts to get static_cast behaviour for integral types (T), - // and reinterpret_cast behaviour for other types. - - uint64_t raw = (uint64_t)pointer; - VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); - - raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); - return (T)raw; - } - - template - static uint64_t GetPointerTag(T pointer) { - // Use C-style casts to get static_cast behaviour for integral types (T), - // and reinterpret_cast behaviour for other types. - - uint64_t raw = (uint64_t)pointer; - VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); - - return (raw & kAddressTagMask) >> kAddressTagOffset; - } - - private: - // Return the content of the cache type register. - static uint32_t GetCacheType(); - - // I and D cache line size in bytes. - static unsigned icache_line_size_; - static unsigned dcache_line_size_; -}; - -} // namespace vixl - -#endif // VIXL_CPU_A64_H diff --git a/disas/libvixl/vixl/a64/decoder-a64.cc b/disas/libvixl/vixl/a64/decoder-a64.cc deleted file mode 100644 index 5ba2d3ce04..0000000000 --- a/disas/libvixl/vixl/a64/decoder-a64.cc +++ /dev/null @@ -1,877 +0,0 @@ -// Copyright 2014, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "vixl/globals.h" -#include "vixl/utils.h" -#include "vixl/a64/decoder-a64.h" - -namespace vixl { - -void Decoder::DecodeInstruction(const Instruction *instr) { - if (instr->Bits(28, 27) == 0) { - VisitUnallocated(instr); - } else { - switch (instr->Bits(27, 24)) { - // 0: PC relative addressing. - case 0x0: DecodePCRelAddressing(instr); break; - - // 1: Add/sub immediate. - case 0x1: DecodeAddSubImmediate(instr); break; - - // A: Logical shifted register. - // Add/sub with carry. - // Conditional compare register. - // Conditional compare immediate. - // Conditional select. - // Data processing 1 source. - // Data processing 2 source. - // B: Add/sub shifted register. - // Add/sub extended register. - // Data processing 3 source. - case 0xA: - case 0xB: DecodeDataProcessing(instr); break; - - // 2: Logical immediate. - // Move wide immediate. - case 0x2: DecodeLogical(instr); break; - - // 3: Bitfield. - // Extract. - case 0x3: DecodeBitfieldExtract(instr); break; - - // 4: Unconditional branch immediate. - // Exception generation. - // Compare and branch immediate. - // 5: Compare and branch immediate. - // Conditional branch. - // System. - // 6,7: Unconditional branch. - // Test and branch immediate. - case 0x4: - case 0x5: - case 0x6: - case 0x7: DecodeBranchSystemException(instr); break; - - // 8,9: Load/store register pair post-index. - // Load register literal. - // Load/store register unscaled immediate. - // Load/store register immediate post-index. - // Load/store register immediate pre-index. - // Load/store register offset. - // Load/store exclusive. - // C,D: Load/store register pair offset. - // Load/store register pair pre-index. - // Load/store register unsigned immediate. - // Advanced SIMD. - case 0x8: - case 0x9: - case 0xC: - case 0xD: DecodeLoadStore(instr); break; - - // E: FP fixed point conversion. - // FP integer conversion. - // FP data processing 1 source. - // FP compare. - // FP immediate. - // FP data processing 2 source. - // FP conditional compare. - // FP conditional select. - // Advanced SIMD. - // F: FP data processing 3 source. - // Advanced SIMD. - case 0xE: - case 0xF: DecodeFP(instr); break; - } - } -} - -void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { - visitors_.push_back(new_visitor); -} - - -void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { - visitors_.push_front(new_visitor); -} - - -void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, - DecoderVisitor* registered_visitor) { - std::list::iterator it; - for (it = visitors_.begin(); it != visitors_.end(); it++) { - if (*it == registered_visitor) { - visitors_.insert(it, new_visitor); - return; - } - } - // We reached the end of the list. The last element must be - // registered_visitor. - VIXL_ASSERT(*it == registered_visitor); - visitors_.insert(it, new_visitor); -} - - -void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, - DecoderVisitor* registered_visitor) { - std::list::iterator it; - for (it = visitors_.begin(); it != visitors_.end(); it++) { - if (*it == registered_visitor) { - it++; - visitors_.insert(it, new_visitor); - return; - } - } - // We reached the end of the list. The last element must be - // registered_visitor. - VIXL_ASSERT(*it == registered_visitor); - visitors_.push_back(new_visitor); -} - - -void Decoder::RemoveVisitor(DecoderVisitor* visitor) { - visitors_.remove(visitor); -} - - -void Decoder::DecodePCRelAddressing(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(27, 24) == 0x0); - // We know bit 28 is set, as = 0 is filtered out at the top level - // decode. - VIXL_ASSERT(instr->Bit(28) == 0x1); - VisitPCRelAddressing(instr); -} - - -void Decoder::DecodeBranchSystemException(const Instruction* instr) { - VIXL_ASSERT((instr->Bits(27, 24) == 0x4) || - (instr->Bits(27, 24) == 0x5) || - (instr->Bits(27, 24) == 0x6) || - (instr->Bits(27, 24) == 0x7) ); - - switch (instr->Bits(31, 29)) { - case 0: - case 4: { - VisitUnconditionalBranch(instr); - break; - } - case 1: - case 5: { - if (instr->Bit(25) == 0) { - VisitCompareBranch(instr); - } else { - VisitTestBranch(instr); - } - break; - } - case 2: { - if (instr->Bit(25) == 0) { - if ((instr->Bit(24) == 0x1) || - (instr->Mask(0x01000010) == 0x00000010)) { - VisitUnallocated(instr); - } else { - VisitConditionalBranch(instr); - } - } else { - VisitUnallocated(instr); - } - break; - } - case 6: { - if (instr->Bit(25) == 0) { - if (instr->Bit(24) == 0) { - if ((instr->Bits(4, 2) != 0) || - (instr->Mask(0x00E0001D) == 0x00200001) || - (instr->Mask(0x00E0001D) == 0x00400001) || - (instr->Mask(0x00E0001E) == 0x00200002) || - (instr->Mask(0x00E0001E) == 0x00400002) || - (instr->Mask(0x00E0001C) == 0x00600000) || - (instr->Mask(0x00E0001C) == 0x00800000) || - (instr->Mask(0x00E0001F) == 0x00A00000) || - (instr->Mask(0x00C0001C) == 0x00C00000)) { - VisitUnallocated(instr); - } else { - VisitException(instr); - } - } else { - if (instr->Bits(23, 22) == 0) { - const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); - if ((instr->Bits(21, 19) == 0x4) || - (masked_003FF0E0 == 0x00033000) || - (masked_003FF0E0 == 0x003FF020) || - (masked_003FF0E0 == 0x003FF060) || - (masked_003FF0E0 == 0x003FF0E0) || - (instr->Mask(0x00388000) == 0x00008000) || - (instr->Mask(0x0038E000) == 0x00000000) || - (instr->Mask(0x0039E000) == 0x00002000) || - (instr->Mask(0x003AE000) == 0x00002000) || - (instr->Mask(0x003CE000) == 0x00042000) || - (instr->Mask(0x003FFFC0) == 0x000320C0) || - (instr->Mask(0x003FF100) == 0x00032100) || - (instr->Mask(0x003FF200) == 0x00032200) || - (instr->Mask(0x003FF400) == 0x00032400) || - (instr->Mask(0x003FF800) == 0x00032800) || - (instr->Mask(0x0038F000) == 0x00005000) || - (instr->Mask(0x0038E000) == 0x00006000)) { - VisitUnallocated(instr); - } else { - VisitSystem(instr); - } - } else { - VisitUnallocated(instr); - } - } - } else { - if ((instr->Bit(24) == 0x1) || - (instr->Bits(20, 16) != 0x1F) || - (instr->Bits(15, 10) != 0) || - (instr->Bits(4, 0) != 0) || - (instr->Bits(24, 21) == 0x3) || - (instr->Bits(24, 22) == 0x3)) { - VisitUnallocated(instr); - } else { - VisitUnconditionalBranchToRegister(instr); - } - } - break; - } - case 3: - case 7: { - VisitUnallocated(instr); - break; - } - } -} - - -void Decoder::DecodeLoadStore(const Instruction* instr) { - VIXL_ASSERT((instr->Bits(27, 24) == 0x8) || - (instr->Bits(27, 24) == 0x9) || - (instr->Bits(27, 24) == 0xC) || - (instr->Bits(27, 24) == 0xD) ); - // TODO(all): rearrange the tree to integrate this branch. - if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) { - DecodeNEONLoadStore(instr); - return; - } - - if (instr->Bit(24) == 0) { - if (instr->Bit(28) == 0) { - if (instr->Bit(29) == 0) { - if (instr->Bit(26) == 0) { - VisitLoadStoreExclusive(instr); - } else { - VIXL_UNREACHABLE(); - } - } else { - if ((instr->Bits(31, 30) == 0x3) || - (instr->Mask(0xC4400000) == 0x40000000)) { - VisitUnallocated(instr); - } else { - if (instr->Bit(23) == 0) { - if (instr->Mask(0xC4400000) == 0xC0400000) { - VisitUnallocated(instr); - } else { - VisitLoadStorePairNonTemporal(instr); - } - } else { - VisitLoadStorePairPostIndex(instr); - } - } - } - } else { - if (instr->Bit(29) == 0) { - if (instr->Mask(0xC4000000) == 0xC4000000) { - VisitUnallocated(instr); - } else { - VisitLoadLiteral(instr); - } - } else { - if ((instr->Mask(0x84C00000) == 0x80C00000) || - (instr->Mask(0x44800000) == 0x44800000) || - (instr->Mask(0x84800000) == 0x84800000)) { - VisitUnallocated(instr); - } else { - if (instr->Bit(21) == 0) { - switch (instr->Bits(11, 10)) { - case 0: { - VisitLoadStoreUnscaledOffset(instr); - break; - } - case 1: { - if (instr->Mask(0xC4C00000) == 0xC0800000) { - VisitUnallocated(instr); - } else { - VisitLoadStorePostIndex(instr); - } - break; - } - case 2: { - // TODO: VisitLoadStoreRegisterOffsetUnpriv. - VisitUnimplemented(instr); - break; - } - case 3: { - if (instr->Mask(0xC4C00000) == 0xC0800000) { - VisitUnallocated(instr); - } else { - VisitLoadStorePreIndex(instr); - } - break; - } - } - } else { - if (instr->Bits(11, 10) == 0x2) { - if (instr->Bit(14) == 0) { - VisitUnallocated(instr); - } else { - VisitLoadStoreRegisterOffset(instr); - } - } else { - VisitUnallocated(instr); - } - } - } - } - } - } else { - if (instr->Bit(28) == 0) { - if (instr->Bit(29) == 0) { - VisitUnallocated(instr); - } else { - if ((instr->Bits(31, 30) == 0x3) || - (instr->Mask(0xC4400000) == 0x40000000)) { - VisitUnallocated(instr); - } else { - if (instr->Bit(23) == 0) { - VisitLoadStorePairOffset(instr); - } else { - VisitLoadStorePairPreIndex(instr); - } - } - } - } else { - if (instr->Bit(29) == 0) { - VisitUnallocated(instr); - } else { - if ((instr->Mask(0x84C00000) == 0x80C00000) || - (instr->Mask(0x44800000) == 0x44800000) || - (instr->Mask(0x84800000) == 0x84800000)) { - VisitUnallocated(instr); - } else { - VisitLoadStoreUnsignedOffset(instr); - } - } - } - } -} - - -void Decoder::DecodeLogical(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(27, 24) == 0x2); - - if (instr->Mask(0x80400000) == 0x00400000) { - VisitUnallocated(instr); - } else { - if (instr->Bit(23) == 0) { - VisitLogicalImmediate(instr); - } else { - if (instr->Bits(30, 29) == 0x1) { - VisitUnallocated(instr); - } else { - VisitMoveWideImmediate(instr); - } - } - } -} - - -void Decoder::DecodeBitfieldExtract(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(27, 24) == 0x3); - - if ((instr->Mask(0x80400000) == 0x80000000) || - (instr->Mask(0x80400000) == 0x00400000) || - (instr->Mask(0x80008000) == 0x00008000)) { - VisitUnallocated(instr); - } else if (instr->Bit(23) == 0) { - if ((instr->Mask(0x80200000) == 0x00200000) || - (instr->Mask(0x60000000) == 0x60000000)) { - VisitUnallocated(instr); - } else { - VisitBitfield(instr); - } - } else { - if ((instr->Mask(0x60200000) == 0x00200000) || - (instr->Mask(0x60000000) != 0x00000000)) { - VisitUnallocated(instr); - } else { - VisitExtract(instr); - } - } -} - - -void Decoder::DecodeAddSubImmediate(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(27, 24) == 0x1); - if (instr->Bit(23) == 1) { - VisitUnallocated(instr); - } else { - VisitAddSubImmediate(instr); - } -} - - -void Decoder::DecodeDataProcessing(const Instruction* instr) { - VIXL_ASSERT((instr->Bits(27, 24) == 0xA) || - (instr->Bits(27, 24) == 0xB)); - - if (instr->Bit(24) == 0) { - if (instr->Bit(28) == 0) { - if (instr->Mask(0x80008000) == 0x00008000) { - VisitUnallocated(instr); - } else { - VisitLogicalShifted(instr); - } - } else { - switch (instr->Bits(23, 21)) { - case 0: { - if (instr->Mask(0x0000FC00) != 0) { - VisitUnallocated(instr); - } else { - VisitAddSubWithCarry(instr); - } - break; - } - case 2: { - if ((instr->Bit(29) == 0) || - (instr->Mask(0x00000410) != 0)) { - VisitUnallocated(instr); - } else { - if (instr->Bit(11) == 0) { - VisitConditionalCompareRegister(instr); - } else { - VisitConditionalCompareImmediate(instr); - } - } - break; - } - case 4: { - if (instr->Mask(0x20000800) != 0x00000000) { - VisitUnallocated(instr); - } else { - VisitConditionalSelect(instr); - } - break; - } - case 6: { - if (instr->Bit(29) == 0x1) { - VisitUnallocated(instr); - VIXL_FALLTHROUGH(); - } else { - if (instr->Bit(30) == 0) { - if ((instr->Bit(15) == 0x1) || - (instr->Bits(15, 11) == 0) || - (instr->Bits(15, 12) == 0x1) || - (instr->Bits(15, 12) == 0x3) || - (instr->Bits(15, 13) == 0x3) || - (instr->Mask(0x8000EC00) == 0x00004C00) || - (instr->Mask(0x8000E800) == 0x80004000) || - (instr->Mask(0x8000E400) == 0x80004000)) { - VisitUnallocated(instr); - } else { - VisitDataProcessing2Source(instr); - } - } else { - if ((instr->Bit(13) == 1) || - (instr->Bits(20, 16) != 0) || - (instr->Bits(15, 14) != 0) || - (instr->Mask(0xA01FFC00) == 0x00000C00) || - (instr->Mask(0x201FF800) == 0x00001800)) { - VisitUnallocated(instr); - } else { - VisitDataProcessing1Source(instr); - } - } - break; - } - } - case 1: - case 3: - case 5: - case 7: VisitUnallocated(instr); break; - } - } - } else { - if (instr->Bit(28) == 0) { - if (instr->Bit(21) == 0) { - if ((instr->Bits(23, 22) == 0x3) || - (instr->Mask(0x80008000) == 0x00008000)) { - VisitUnallocated(instr); - } else { - VisitAddSubShifted(instr); - } - } else { - if ((instr->Mask(0x00C00000) != 0x00000000) || - (instr->Mask(0x00001400) == 0x00001400) || - (instr->Mask(0x00001800) == 0x00001800)) { - VisitUnallocated(instr); - } else { - VisitAddSubExtended(instr); - } - } - } else { - if ((instr->Bit(30) == 0x1) || - (instr->Bits(30, 29) == 0x1) || - (instr->Mask(0xE0600000) == 0x00200000) || - (instr->Mask(0xE0608000) == 0x00400000) || - (instr->Mask(0x60608000) == 0x00408000) || - (instr->Mask(0x60E00000) == 0x00E00000) || - (instr->Mask(0x60E00000) == 0x00800000) || - (instr->Mask(0x60E00000) == 0x00600000)) { - VisitUnallocated(instr); - } else { - VisitDataProcessing3Source(instr); - } - } - } -} - - -void Decoder::DecodeFP(const Instruction* instr) { - VIXL_ASSERT((instr->Bits(27, 24) == 0xE) || - (instr->Bits(27, 24) == 0xF)); - if (instr->Bit(28) == 0) { - DecodeNEONVectorDataProcessing(instr); - } else { - if (instr->Bits(31, 30) == 0x3) { - VisitUnallocated(instr); - } else if (instr->Bits(31, 30) == 0x1) { - DecodeNEONScalarDataProcessing(instr); - } else { - if (instr->Bit(29) == 0) { - if (instr->Bit(24) == 0) { - if (instr->Bit(21) == 0) { - if ((instr->Bit(23) == 1) || - (instr->Bit(18) == 1) || - (instr->Mask(0x80008000) == 0x00000000) || - (instr->Mask(0x000E0000) == 0x00000000) || - (instr->Mask(0x000E0000) == 0x000A0000) || - (instr->Mask(0x00160000) == 0x00000000) || - (instr->Mask(0x00160000) == 0x00120000)) { - VisitUnallocated(instr); - } else { - VisitFPFixedPointConvert(instr); - } - } else { - if (instr->Bits(15, 10) == 32) { - VisitUnallocated(instr); - } else if (instr->Bits(15, 10) == 0) { - if ((instr->Bits(23, 22) == 0x3) || - (instr->Mask(0x000E0000) == 0x000A0000) || - (instr->Mask(0x000E0000) == 0x000C0000) || - (instr->Mask(0x00160000) == 0x00120000) || - (instr->Mask(0x00160000) == 0x00140000) || - (instr->Mask(0x20C40000) == 0x00800000) || - (instr->Mask(0x20C60000) == 0x00840000) || - (instr->Mask(0xA0C60000) == 0x80060000) || - (instr->Mask(0xA0C60000) == 0x00860000) || - (instr->Mask(0xA0C60000) == 0x00460000) || - (instr->Mask(0xA0CE0000) == 0x80860000) || - (instr->Mask(0xA0CE0000) == 0x804E0000) || - (instr->Mask(0xA0CE0000) == 0x000E0000) || - (instr->Mask(0xA0D60000) == 0x00160000) || - (instr->Mask(0xA0D60000) == 0x80560000) || - (instr->Mask(0xA0D60000) == 0x80960000)) { - VisitUnallocated(instr); - } else { - VisitFPIntegerConvert(instr); - } - } else if (instr->Bits(14, 10) == 16) { - const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); - if ((instr->Mask(0x80180000) != 0) || - (masked_A0DF8000 == 0x00020000) || - (masked_A0DF8000 == 0x00030000) || - (masked_A0DF8000 == 0x00068000) || - (masked_A0DF8000 == 0x00428000) || - (masked_A0DF8000 == 0x00430000) || - (masked_A0DF8000 == 0x00468000) || - (instr->Mask(0xA0D80000) == 0x00800000) || - (instr->Mask(0xA0DE0000) == 0x00C00000) || - (instr->Mask(0xA0DF0000) == 0x00C30000) || - (instr->Mask(0xA0DC0000) == 0x00C40000)) { - VisitUnallocated(instr); - } else { - VisitFPDataProcessing1Source(instr); - } - } else if (instr->Bits(13, 10) == 8) { - if ((instr->Bits(15, 14) != 0) || - (instr->Bits(2, 0) != 0) || - (instr->Mask(0x80800000) != 0x00000000)) { - VisitUnallocated(instr); - } else { - VisitFPCompare(instr); - } - } else if (instr->Bits(12, 10) == 4) { - if ((instr->Bits(9, 5) != 0) || - (instr->Mask(0x80800000) != 0x00000000)) { - VisitUnallocated(instr); - } else { - VisitFPImmediate(instr); - } - } else { - if (instr->Mask(0x80800000) != 0x00000000) { - VisitUnallocated(instr); - } else { - switch (instr->Bits(11, 10)) { - case 1: { - VisitFPConditionalCompare(instr); - break; - } - case 2: { - if ((instr->Bits(15, 14) == 0x3) || - (instr->Mask(0x00009000) == 0x00009000) || - (instr->Mask(0x0000A000) == 0x0000A000)) { - VisitUnallocated(instr); - } else { - VisitFPDataProcessing2Source(instr); - } - break; - } - case 3: { - VisitFPConditionalSelect(instr); - break; - } - default: VIXL_UNREACHABLE(); - } - } - } - } - } else { - // Bit 30 == 1 has been handled earlier. - VIXL_ASSERT(instr->Bit(30) == 0); - if (instr->Mask(0xA0800000) != 0) { - VisitUnallocated(instr); - } else { - VisitFPDataProcessing3Source(instr); - } - } - } else { - VisitUnallocated(instr); - } - } - } -} - - -void Decoder::DecodeNEONLoadStore(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(29, 25) == 0x6); - if (instr->Bit(31) == 0) { - if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) { - VisitUnallocated(instr); - return; - } - - if (instr->Bit(23) == 0) { - if (instr->Bits(20, 16) == 0) { - if (instr->Bit(24) == 0) { - VisitNEONLoadStoreMultiStruct(instr); - } else { - VisitNEONLoadStoreSingleStruct(instr); - } - } else { - VisitUnallocated(instr); - } - } else { - if (instr->Bit(24) == 0) { - VisitNEONLoadStoreMultiStructPostIndex(instr); - } else { - VisitNEONLoadStoreSingleStructPostIndex(instr); - } - } - } else { - VisitUnallocated(instr); - } -} - - -void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(28, 25) == 0x7); - if (instr->Bit(31) == 0) { - if (instr->Bit(24) == 0) { - if (instr->Bit(21) == 0) { - if (instr->Bit(15) == 0) { - if (instr->Bit(10) == 0) { - if (instr->Bit(29) == 0) { - if (instr->Bit(11) == 0) { - VisitNEONTable(instr); - } else { - VisitNEONPerm(instr); - } - } else { - VisitNEONExtract(instr); - } - } else { - if (instr->Bits(23, 22) == 0) { - VisitNEONCopy(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - VisitUnallocated(instr); - } - } else { - if (instr->Bit(10) == 0) { - if (instr->Bit(11) == 0) { - VisitNEON3Different(instr); - } else { - if (instr->Bits(18, 17) == 0) { - if (instr->Bit(20) == 0) { - if (instr->Bit(19) == 0) { - VisitNEON2RegMisc(instr); - } else { - if (instr->Bits(30, 29) == 0x2) { - VisitCryptoAES(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - if (instr->Bit(19) == 0) { - VisitNEONAcrossLanes(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - VisitUnallocated(instr); - } - } - } else { - VisitNEON3Same(instr); - } - } - } else { - if (instr->Bit(10) == 0) { - VisitNEONByIndexedElement(instr); - } else { - if (instr->Bit(23) == 0) { - if (instr->Bits(22, 19) == 0) { - VisitNEONModifiedImmediate(instr); - } else { - VisitNEONShiftImmediate(instr); - } - } else { - VisitUnallocated(instr); - } - } - } - } else { - VisitUnallocated(instr); - } -} - - -void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) { - VIXL_ASSERT(instr->Bits(28, 25) == 0xF); - if (instr->Bit(24) == 0) { - if (instr->Bit(21) == 0) { - if (instr->Bit(15) == 0) { - if (instr->Bit(10) == 0) { - if (instr->Bit(29) == 0) { - if (instr->Bit(11) == 0) { - VisitCrypto3RegSHA(instr); - } else { - VisitUnallocated(instr); - } - } else { - VisitUnallocated(instr); - } - } else { - if (instr->Bits(23, 22) == 0) { - VisitNEONScalarCopy(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - VisitUnallocated(instr); - } - } else { - if (instr->Bit(10) == 0) { - if (instr->Bit(11) == 0) { - VisitNEONScalar3Diff(instr); - } else { - if (instr->Bits(18, 17) == 0) { - if (instr->Bit(20) == 0) { - if (instr->Bit(19) == 0) { - VisitNEONScalar2RegMisc(instr); - } else { - if (instr->Bit(29) == 0) { - VisitCrypto2RegSHA(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - if (instr->Bit(19) == 0) { - VisitNEONScalarPairwise(instr); - } else { - VisitUnallocated(instr); - } - } - } else { - VisitUnallocated(instr); - } - } - } else { - VisitNEONScalar3Same(instr); - } - } - } else { - if (instr->Bit(10) == 0) { - VisitNEONScalarByIndexedElement(instr); - } else { - if (instr->Bit(23) == 0) { - VisitNEONScalarShiftImmediate(instr); - } else { - VisitUnallocated(instr); - } - } - } -} - - -#define DEFINE_VISITOR_CALLERS(A) \ - void Decoder::Visit##A(const Instruction *instr) { \ - VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \ - std::list::iterator it; \ - for (it = visitors_.begin(); it != visitors_.end(); it++) { \ - (*it)->Visit##A(instr); \ - } \ - } -VISITOR_LIST(DEFINE_VISITOR_CALLERS) -#undef DEFINE_VISITOR_CALLERS -} // namespace vixl diff --git a/disas/libvixl/vixl/a64/decoder-a64.h b/disas/libvixl/vixl/a64/decoder-a64.h deleted file mode 100644 index b3f04f68fc..0000000000 --- a/disas/libvixl/vixl/a64/decoder-a64.h +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2014, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_A64_DECODER_A64_H_ -#define VIXL_A64_DECODER_A64_H_ - -#include - -#include "vixl/globals.h" -#include "vixl/a64/instructions-a64.h" - - -// List macro containing all visitors needed by the decoder class. - -#define VISITOR_LIST_THAT_RETURN(V) \ - V(PCRelAddressing) \ - V(AddSubImmediate) \ - V(LogicalImmediate) \ - V(MoveWideImmediate) \ - V(Bitfield) \ - V(Extract) \ - V(UnconditionalBranch) \ - V(UnconditionalBranchToRegister) \ - V(CompareBranch) \ - V(TestBranch) \ - V(ConditionalBranch) \ - V(System) \ - V(Exception) \ - V(LoadStorePairPostIndex) \ - V(LoadStorePairOffset) \ - V(LoadStorePairPreIndex) \ - V(LoadStorePairNonTemporal) \ - V(LoadLiteral) \ - V(LoadStoreUnscaledOffset) \ - V(LoadStorePostIndex) \ - V(LoadStorePreIndex) \ - V(LoadStoreRegisterOffset) \ - V(LoadStoreUnsignedOffset) \ - V(LoadStoreExclusive) \ - V(LogicalShifted) \ - V(AddSubShifted) \ - V(AddSubExtended) \ - V(AddSubWithCarry) \ - V(ConditionalCompareRegister) \ - V(ConditionalCompareImmediate) \ - V(ConditionalSelect) \ - V(DataProcessing1Source) \ - V(DataProcessing2Source) \ - V(DataProcessing3Source) \ - V(FPCompare) \ - V(FPConditionalCompare) \ - V(FPConditionalSelect) \ - V(FPImmediate) \ - V(FPDataProcessing1Source) \ - V(FPDataProcessing2Source) \ - V(FPDataProcessing3Source) \ - V(FPIntegerConvert) \ - V(FPFixedPointConvert) \ - V(Crypto2RegSHA) \ - V(Crypto3RegSHA) \ - V(CryptoAES) \ - V(NEON2RegMisc) \ - V(NEON3Different) \ - V(NEON3Same) \ - V(NEONAcrossLanes) \ - V(NEONByIndexedElement) \ - V(NEONCopy) \ - V(NEONExtract) \ - V(NEONLoadStoreMultiStruct) \ - V(NEONLoadStoreMultiStructPostIndex) \ - V(NEONLoadStoreSingleStruct) \ - V(NEONLoadStoreSingleStructPostIndex) \ - V(NEONModifiedImmediate) \ - V(NEONScalar2RegMisc) \ - V(NEONScalar3Diff) \ - V(NEONScalar3Same) \ - V(NEONScalarByIndexedElement) \ - V(NEONScalarCopy) \ - V(NEONScalarPairwise) \ - V(NEONScalarShiftImmediate) \ - V(NEONShiftImmediate) \ - V(NEONTable) \ - V(NEONPerm) \ - -#define VISITOR_LIST_THAT_DONT_RETURN(V) \ - V(Unallocated) \ - V(Unimplemented) \ - -#define VISITOR_LIST(V) \ - VISITOR_LIST_THAT_RETURN(V) \ - VISITOR_LIST_THAT_DONT_RETURN(V) \ - -namespace vixl { - -// The Visitor interface. Disassembler and simulator (and other tools) -// must provide implementations for all of these functions. -class DecoderVisitor { - public: - enum VisitorConstness { - kConstVisitor, - kNonConstVisitor - }; - explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) - : constness_(constness) {} - - virtual ~DecoderVisitor() {} - - #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; - VISITOR_LIST(DECLARE) - #undef DECLARE - - bool IsConstVisitor() const { return constness_ == kConstVisitor; } - Instruction* MutableInstruction(const Instruction* instr) { - VIXL_ASSERT(!IsConstVisitor()); - return const_cast(instr); - } - - private: - const VisitorConstness constness_; -}; - - -class Decoder { - public: - Decoder() {} - - // Top-level wrappers around the actual decoding function. - void Decode(const Instruction* instr) { - std::list::iterator it; - for (it = visitors_.begin(); it != visitors_.end(); it++) { - VIXL_ASSERT((*it)->IsConstVisitor()); - } - DecodeInstruction(instr); - } - void Decode(Instruction* instr) { - DecodeInstruction(const_cast(instr)); - } - - // Register a new visitor class with the decoder. - // Decode() will call the corresponding visitor method from all registered - // visitor classes when decoding reaches the leaf node of the instruction - // decode tree. - // Visitors are called in order. - // A visitor can be registered multiple times. - // - // d.AppendVisitor(V1); - // d.AppendVisitor(V2); - // d.PrependVisitor(V2); - // d.AppendVisitor(V3); - // - // d.Decode(i); - // - // will call in order visitor methods in V2, V1, V2, V3. - void AppendVisitor(DecoderVisitor* visitor); - void PrependVisitor(DecoderVisitor* visitor); - // These helpers register `new_visitor` before or after the first instance of - // `registered_visiter` in the list. - // So if - // V1, V2, V1, V2 - // are registered in this order in the decoder, calls to - // d.InsertVisitorAfter(V3, V1); - // d.InsertVisitorBefore(V4, V2); - // will yield the order - // V1, V3, V4, V2, V1, V2 - // - // For more complex modifications of the order of registered visitors, one can - // directly access and modify the list of visitors via the `visitors()' - // accessor. - void InsertVisitorBefore(DecoderVisitor* new_visitor, - DecoderVisitor* registered_visitor); - void InsertVisitorAfter(DecoderVisitor* new_visitor, - DecoderVisitor* registered_visitor); - - // Remove all instances of a previously registered visitor class from the list - // of visitors stored by the decoder. - void RemoveVisitor(DecoderVisitor* visitor); - - #define DECLARE(A) void Visit##A(const Instruction* instr); - VISITOR_LIST(DECLARE) - #undef DECLARE - - - std::list* visitors() { return &visitors_; } - - private: - // Decodes an instruction and calls the visitor functions registered with the - // Decoder class. - void DecodeInstruction(const Instruction* instr); - - // Decode the PC relative addressing instruction, and call the corresponding - // visitors. - // On entry, instruction bits 27:24 = 0x0. - void DecodePCRelAddressing(const Instruction* instr); - - // Decode the add/subtract immediate instruction, and call the correspoding - // visitors. - // On entry, instruction bits 27:24 = 0x1. - void DecodeAddSubImmediate(const Instruction* instr); - - // Decode the branch, system command, and exception generation parts of - // the instruction tree, and call the corresponding visitors. - // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. - void DecodeBranchSystemException(const Instruction* instr); - - // Decode the load and store parts of the instruction tree, and call - // the corresponding visitors. - // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. - void DecodeLoadStore(const Instruction* instr); - - // Decode the logical immediate and move wide immediate parts of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 27:24 = 0x2. - void DecodeLogical(const Instruction* instr); - - // Decode the bitfield and extraction parts of the instruction tree, - // and call the corresponding visitors. - // On entry, instruction bits 27:24 = 0x3. - void DecodeBitfieldExtract(const Instruction* instr); - - // Decode the data processing parts of the instruction tree, and call the - // corresponding visitors. - // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. - void DecodeDataProcessing(const Instruction* instr); - - // Decode the floating point parts of the instruction tree, and call the - // corresponding visitors. - // On entry, instruction bits 27:24 = {0xE, 0xF}. - void DecodeFP(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, - // and call the corresponding visitors. - // On entry, instruction bits 29:25 = 0x6. - void DecodeNEONLoadStore(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) vector data processing part of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 28:25 = 0x7. - void DecodeNEONVectorDataProcessing(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) scalar data processing part of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 28:25 = 0xF. - void DecodeNEONScalarDataProcessing(const Instruction* instr); - - private: - // Visitors are registered in a list. - std::list visitors_; -}; - -} // namespace vixl - -#endif // VIXL_A64_DECODER_A64_H_ diff --git a/disas/libvixl/vixl/a64/disasm-a64.cc b/disas/libvixl/vixl/a64/disasm-a64.cc deleted file mode 100644 index f34d1d68da..0000000000 --- a/disas/libvixl/vixl/a64/disasm-a64.cc +++ /dev/null @@ -1,3495 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include "vixl/a64/disasm-a64.h" - -namespace vixl { - -Disassembler::Disassembler() { - buffer_size_ = 256; - buffer_ = reinterpret_cast(malloc(buffer_size_)); - buffer_pos_ = 0; - own_buffer_ = true; - code_address_offset_ = 0; -} - - -Disassembler::Disassembler(char* text_buffer, int buffer_size) { - buffer_size_ = buffer_size; - buffer_ = text_buffer; - buffer_pos_ = 0; - own_buffer_ = false; - code_address_offset_ = 0; -} - - -Disassembler::~Disassembler() { - if (own_buffer_) { - free(buffer_); - } -} - - -char* Disassembler::GetOutput() { - return buffer_; -} - - -void Disassembler::VisitAddSubImmediate(const Instruction* instr) { - bool rd_is_zr = RdIsZROrSP(instr); - bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) && - (instr->ImmAddSub() == 0) ? true : false; - const char *mnemonic = ""; - const char *form = "'Rds, 'Rns, 'IAddSub"; - const char *form_cmp = "'Rns, 'IAddSub"; - const char *form_mov = "'Rds, 'Rns"; - - switch (instr->Mask(AddSubImmediateMask)) { - case ADD_w_imm: - case ADD_x_imm: { - mnemonic = "add"; - if (stack_op) { - mnemonic = "mov"; - form = form_mov; - } - break; - } - case ADDS_w_imm: - case ADDS_x_imm: { - mnemonic = "adds"; - if (rd_is_zr) { - mnemonic = "cmn"; - form = form_cmp; - } - break; - } - case SUB_w_imm: - case SUB_x_imm: mnemonic = "sub"; break; - case SUBS_w_imm: - case SUBS_x_imm: { - mnemonic = "subs"; - if (rd_is_zr) { - mnemonic = "cmp"; - form = form_cmp; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitAddSubShifted(const Instruction* instr) { - bool rd_is_zr = RdIsZROrSP(instr); - bool rn_is_zr = RnIsZROrSP(instr); - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm'NDP"; - const char *form_cmp = "'Rn, 'Rm'NDP"; - const char *form_neg = "'Rd, 'Rm'NDP"; - - switch (instr->Mask(AddSubShiftedMask)) { - case ADD_w_shift: - case ADD_x_shift: mnemonic = "add"; break; - case ADDS_w_shift: - case ADDS_x_shift: { - mnemonic = "adds"; - if (rd_is_zr) { - mnemonic = "cmn"; - form = form_cmp; - } - break; - } - case SUB_w_shift: - case SUB_x_shift: { - mnemonic = "sub"; - if (rn_is_zr) { - mnemonic = "neg"; - form = form_neg; - } - break; - } - case SUBS_w_shift: - case SUBS_x_shift: { - mnemonic = "subs"; - if (rd_is_zr) { - mnemonic = "cmp"; - form = form_cmp; - } else if (rn_is_zr) { - mnemonic = "negs"; - form = form_neg; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitAddSubExtended(const Instruction* instr) { - bool rd_is_zr = RdIsZROrSP(instr); - const char *mnemonic = ""; - Extend mode = static_cast(instr->ExtendMode()); - const char *form = ((mode == UXTX) || (mode == SXTX)) ? - "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext"; - const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ? - "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext"; - - switch (instr->Mask(AddSubExtendedMask)) { - case ADD_w_ext: - case ADD_x_ext: mnemonic = "add"; break; - case ADDS_w_ext: - case ADDS_x_ext: { - mnemonic = "adds"; - if (rd_is_zr) { - mnemonic = "cmn"; - form = form_cmp; - } - break; - } - case SUB_w_ext: - case SUB_x_ext: mnemonic = "sub"; break; - case SUBS_w_ext: - case SUBS_x_ext: { - mnemonic = "subs"; - if (rd_is_zr) { - mnemonic = "cmp"; - form = form_cmp; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitAddSubWithCarry(const Instruction* instr) { - bool rn_is_zr = RnIsZROrSP(instr); - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm"; - const char *form_neg = "'Rd, 'Rm"; - - switch (instr->Mask(AddSubWithCarryMask)) { - case ADC_w: - case ADC_x: mnemonic = "adc"; break; - case ADCS_w: - case ADCS_x: mnemonic = "adcs"; break; - case SBC_w: - case SBC_x: { - mnemonic = "sbc"; - if (rn_is_zr) { - mnemonic = "ngc"; - form = form_neg; - } - break; - } - case SBCS_w: - case SBCS_x: { - mnemonic = "sbcs"; - if (rn_is_zr) { - mnemonic = "ngcs"; - form = form_neg; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLogicalImmediate(const Instruction* instr) { - bool rd_is_zr = RdIsZROrSP(instr); - bool rn_is_zr = RnIsZROrSP(instr); - const char *mnemonic = ""; - const char *form = "'Rds, 'Rn, 'ITri"; - - if (instr->ImmLogical() == 0) { - // The immediate encoded in the instruction is not in the expected format. - Format(instr, "unallocated", "(LogicalImmediate)"); - return; - } - - switch (instr->Mask(LogicalImmediateMask)) { - case AND_w_imm: - case AND_x_imm: mnemonic = "and"; break; - case ORR_w_imm: - case ORR_x_imm: { - mnemonic = "orr"; - unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize - : kWRegSize; - if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) { - mnemonic = "mov"; - form = "'Rds, 'ITri"; - } - break; - } - case EOR_w_imm: - case EOR_x_imm: mnemonic = "eor"; break; - case ANDS_w_imm: - case ANDS_x_imm: { - mnemonic = "ands"; - if (rd_is_zr) { - mnemonic = "tst"; - form = "'Rn, 'ITri"; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { - VIXL_ASSERT((reg_size == kXRegSize) || - ((reg_size == kWRegSize) && (value <= 0xffffffff))); - - // Test for movz: 16 bits set at positions 0, 16, 32 or 48. - if (((value & UINT64_C(0xffffffffffff0000)) == 0) || - ((value & UINT64_C(0xffffffff0000ffff)) == 0) || - ((value & UINT64_C(0xffff0000ffffffff)) == 0) || - ((value & UINT64_C(0x0000ffffffffffff)) == 0)) { - return true; - } - - // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48). - if ((reg_size == kXRegSize) && - (((~value & UINT64_C(0xffffffffffff0000)) == 0) || - ((~value & UINT64_C(0xffffffff0000ffff)) == 0) || - ((~value & UINT64_C(0xffff0000ffffffff)) == 0) || - ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) { - return true; - } - if ((reg_size == kWRegSize) && - (((value & 0xffff0000) == 0xffff0000) || - ((value & 0x0000ffff) == 0x0000ffff))) { - return true; - } - return false; -} - - -void Disassembler::VisitLogicalShifted(const Instruction* instr) { - bool rd_is_zr = RdIsZROrSP(instr); - bool rn_is_zr = RnIsZROrSP(instr); - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm'NLo"; - - switch (instr->Mask(LogicalShiftedMask)) { - case AND_w: - case AND_x: mnemonic = "and"; break; - case BIC_w: - case BIC_x: mnemonic = "bic"; break; - case EOR_w: - case EOR_x: mnemonic = "eor"; break; - case EON_w: - case EON_x: mnemonic = "eon"; break; - case BICS_w: - case BICS_x: mnemonic = "bics"; break; - case ANDS_w: - case ANDS_x: { - mnemonic = "ands"; - if (rd_is_zr) { - mnemonic = "tst"; - form = "'Rn, 'Rm'NLo"; - } - break; - } - case ORR_w: - case ORR_x: { - mnemonic = "orr"; - if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) { - mnemonic = "mov"; - form = "'Rd, 'Rm"; - } - break; - } - case ORN_w: - case ORN_x: { - mnemonic = "orn"; - if (rn_is_zr) { - mnemonic = "mvn"; - form = "'Rd, 'Rm'NLo"; - } - break; - } - default: VIXL_UNREACHABLE(); - } - - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rn, 'Rm, 'INzcv, 'Cond"; - - switch (instr->Mask(ConditionalCompareRegisterMask)) { - case CCMN_w: - case CCMN_x: mnemonic = "ccmn"; break; - case CCMP_w: - case CCMP_x: mnemonic = "ccmp"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rn, 'IP, 'INzcv, 'Cond"; - - switch (instr->Mask(ConditionalCompareImmediateMask)) { - case CCMN_w_imm: - case CCMN_x_imm: mnemonic = "ccmn"; break; - case CCMP_w_imm: - case CCMP_x_imm: mnemonic = "ccmp"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitConditionalSelect(const Instruction* instr) { - bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr)); - bool rn_is_rm = (instr->Rn() == instr->Rm()); - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm, 'Cond"; - const char *form_test = "'Rd, 'CInv"; - const char *form_update = "'Rd, 'Rn, 'CInv"; - - Condition cond = static_cast(instr->Condition()); - bool invertible_cond = (cond != al) && (cond != nv); - - switch (instr->Mask(ConditionalSelectMask)) { - case CSEL_w: - case CSEL_x: mnemonic = "csel"; break; - case CSINC_w: - case CSINC_x: { - mnemonic = "csinc"; - if (rnm_is_zr && invertible_cond) { - mnemonic = "cset"; - form = form_test; - } else if (rn_is_rm && invertible_cond) { - mnemonic = "cinc"; - form = form_update; - } - break; - } - case CSINV_w: - case CSINV_x: { - mnemonic = "csinv"; - if (rnm_is_zr && invertible_cond) { - mnemonic = "csetm"; - form = form_test; - } else if (rn_is_rm && invertible_cond) { - mnemonic = "cinv"; - form = form_update; - } - break; - } - case CSNEG_w: - case CSNEG_x: { - mnemonic = "csneg"; - if (rn_is_rm && invertible_cond) { - mnemonic = "cneg"; - form = form_update; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitBitfield(const Instruction* instr) { - unsigned s = instr->ImmS(); - unsigned r = instr->ImmR(); - unsigned rd_size_minus_1 = - ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1; - const char *mnemonic = ""; - const char *form = ""; - const char *form_shift_right = "'Rd, 'Rn, 'IBr"; - const char *form_extend = "'Rd, 'Wn"; - const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1"; - const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1"; - const char *form_lsl = "'Rd, 'Rn, 'IBZ-r"; - - switch (instr->Mask(BitfieldMask)) { - case SBFM_w: - case SBFM_x: { - mnemonic = "sbfx"; - form = form_bfx; - if (r == 0) { - form = form_extend; - if (s == 7) { - mnemonic = "sxtb"; - } else if (s == 15) { - mnemonic = "sxth"; - } else if ((s == 31) && (instr->SixtyFourBits() == 1)) { - mnemonic = "sxtw"; - } else { - form = form_bfx; - } - } else if (s == rd_size_minus_1) { - mnemonic = "asr"; - form = form_shift_right; - } else if (s < r) { - mnemonic = "sbfiz"; - form = form_bfiz; - } - break; - } - case UBFM_w: - case UBFM_x: { - mnemonic = "ubfx"; - form = form_bfx; - if (r == 0) { - form = form_extend; - if (s == 7) { - mnemonic = "uxtb"; - } else if (s == 15) { - mnemonic = "uxth"; - } else { - form = form_bfx; - } - } - if (s == rd_size_minus_1) { - mnemonic = "lsr"; - form = form_shift_right; - } else if (r == s + 1) { - mnemonic = "lsl"; - form = form_lsl; - } else if (s < r) { - mnemonic = "ubfiz"; - form = form_bfiz; - } - break; - } - case BFM_w: - case BFM_x: { - mnemonic = "bfxil"; - form = form_bfx; - if (s < r) { - mnemonic = "bfi"; - form = form_bfiz; - } - } - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitExtract(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm, 'IExtract"; - - switch (instr->Mask(ExtractMask)) { - case EXTR_w: - case EXTR_x: { - if (instr->Rn() == instr->Rm()) { - mnemonic = "ror"; - form = "'Rd, 'Rn, 'IExtract"; - } else { - mnemonic = "extr"; - } - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitPCRelAddressing(const Instruction* instr) { - switch (instr->Mask(PCRelAddressingMask)) { - case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break; - case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break; - default: Format(instr, "unimplemented", "(PCRelAddressing)"); - } -} - - -void Disassembler::VisitConditionalBranch(const Instruction* instr) { - switch (instr->Mask(ConditionalBranchMask)) { - case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break; - default: VIXL_UNREACHABLE(); - } -} - - -void Disassembler::VisitUnconditionalBranchToRegister( - const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Xn"; - - switch (instr->Mask(UnconditionalBranchToRegisterMask)) { - case BR: mnemonic = "br"; break; - case BLR: mnemonic = "blr"; break; - case RET: { - mnemonic = "ret"; - if (instr->Rn() == kLinkRegCode) { - form = NULL; - } - break; - } - default: form = "(UnconditionalBranchToRegister)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitUnconditionalBranch(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'TImmUncn"; - - switch (instr->Mask(UnconditionalBranchMask)) { - case B: mnemonic = "b"; break; - case BL: mnemonic = "bl"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitDataProcessing1Source(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rd, 'Rn"; - - switch (instr->Mask(DataProcessing1SourceMask)) { - #define FORMAT(A, B) \ - case A##_w: \ - case A##_x: mnemonic = B; break; - FORMAT(RBIT, "rbit"); - FORMAT(REV16, "rev16"); - FORMAT(REV, "rev"); - FORMAT(CLZ, "clz"); - FORMAT(CLS, "cls"); - #undef FORMAT - case REV32_x: mnemonic = "rev32"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitDataProcessing2Source(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Rd, 'Rn, 'Rm"; - const char *form_wwx = "'Wd, 'Wn, 'Xm"; - - switch (instr->Mask(DataProcessing2SourceMask)) { - #define FORMAT(A, B) \ - case A##_w: \ - case A##_x: mnemonic = B; break; - FORMAT(UDIV, "udiv"); - FORMAT(SDIV, "sdiv"); - FORMAT(LSLV, "lsl"); - FORMAT(LSRV, "lsr"); - FORMAT(ASRV, "asr"); - FORMAT(RORV, "ror"); - #undef FORMAT - case CRC32B: mnemonic = "crc32b"; break; - case CRC32H: mnemonic = "crc32h"; break; - case CRC32W: mnemonic = "crc32w"; break; - case CRC32X: mnemonic = "crc32x"; form = form_wwx; break; - case CRC32CB: mnemonic = "crc32cb"; break; - case CRC32CH: mnemonic = "crc32ch"; break; - case CRC32CW: mnemonic = "crc32cw"; break; - case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break; - default: form = "(DataProcessing2Source)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitDataProcessing3Source(const Instruction* instr) { - bool ra_is_zr = RaIsZROrSP(instr); - const char *mnemonic = ""; - const char *form = "'Xd, 'Wn, 'Wm, 'Xa"; - const char *form_rrr = "'Rd, 'Rn, 'Rm"; - const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra"; - const char *form_xww = "'Xd, 'Wn, 'Wm"; - const char *form_xxx = "'Xd, 'Xn, 'Xm"; - - switch (instr->Mask(DataProcessing3SourceMask)) { - case MADD_w: - case MADD_x: { - mnemonic = "madd"; - form = form_rrrr; - if (ra_is_zr) { - mnemonic = "mul"; - form = form_rrr; - } - break; - } - case MSUB_w: - case MSUB_x: { - mnemonic = "msub"; - form = form_rrrr; - if (ra_is_zr) { - mnemonic = "mneg"; - form = form_rrr; - } - break; - } - case SMADDL_x: { - mnemonic = "smaddl"; - if (ra_is_zr) { - mnemonic = "smull"; - form = form_xww; - } - break; - } - case SMSUBL_x: { - mnemonic = "smsubl"; - if (ra_is_zr) { - mnemonic = "smnegl"; - form = form_xww; - } - break; - } - case UMADDL_x: { - mnemonic = "umaddl"; - if (ra_is_zr) { - mnemonic = "umull"; - form = form_xww; - } - break; - } - case UMSUBL_x: { - mnemonic = "umsubl"; - if (ra_is_zr) { - mnemonic = "umnegl"; - form = form_xww; - } - break; - } - case SMULH_x: { - mnemonic = "smulh"; - form = form_xxx; - break; - } - case UMULH_x: { - mnemonic = "umulh"; - form = form_xxx; - break; - } - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitCompareBranch(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rt, 'TImmCmpa"; - - switch (instr->Mask(CompareBranchMask)) { - case CBZ_w: - case CBZ_x: mnemonic = "cbz"; break; - case CBNZ_w: - case CBNZ_x: mnemonic = "cbnz"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitTestBranch(const Instruction* instr) { - const char *mnemonic = ""; - // If the top bit of the immediate is clear, the tested register is - // disassembled as Wt, otherwise Xt. As the top bit of the immediate is - // encoded in bit 31 of the instruction, we can reuse the Rt form, which - // uses bit 31 (normally "sf") to choose the register size. - const char *form = "'Rt, 'IS, 'TImmTest"; - - switch (instr->Mask(TestBranchMask)) { - case TBZ: mnemonic = "tbz"; break; - case TBNZ: mnemonic = "tbnz"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitMoveWideImmediate(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rd, 'IMoveImm"; - - // Print the shift separately for movk, to make it clear which half word will - // be overwritten. Movn and movz print the computed immediate, which includes - // shift calculation. - switch (instr->Mask(MoveWideImmediateMask)) { - case MOVN_w: - case MOVN_x: - if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) { - if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) { - mnemonic = "movn"; - } else { - mnemonic = "mov"; - form = "'Rd, 'IMoveNeg"; - } - } else { - mnemonic = "movn"; - } - break; - case MOVZ_w: - case MOVZ_x: - if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) - mnemonic = "mov"; - else - mnemonic = "movz"; - break; - case MOVK_w: - case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -#define LOAD_STORE_LIST(V) \ - V(STRB_w, "strb", "'Wt") \ - V(STRH_w, "strh", "'Wt") \ - V(STR_w, "str", "'Wt") \ - V(STR_x, "str", "'Xt") \ - V(LDRB_w, "ldrb", "'Wt") \ - V(LDRH_w, "ldrh", "'Wt") \ - V(LDR_w, "ldr", "'Wt") \ - V(LDR_x, "ldr", "'Xt") \ - V(LDRSB_x, "ldrsb", "'Xt") \ - V(LDRSH_x, "ldrsh", "'Xt") \ - V(LDRSW_x, "ldrsw", "'Xt") \ - V(LDRSB_w, "ldrsb", "'Wt") \ - V(LDRSH_w, "ldrsh", "'Wt") \ - V(STR_b, "str", "'Bt") \ - V(STR_h, "str", "'Ht") \ - V(STR_s, "str", "'St") \ - V(STR_d, "str", "'Dt") \ - V(LDR_b, "ldr", "'Bt") \ - V(LDR_h, "ldr", "'Ht") \ - V(LDR_s, "ldr", "'St") \ - V(LDR_d, "ldr", "'Dt") \ - V(STR_q, "str", "'Qt") \ - V(LDR_q, "ldr", "'Qt") - -void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStorePreIndex)"; - - switch (instr->Mask(LoadStorePreIndexMask)) { - #define LS_PREINDEX(A, B, C) \ - case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break; - LOAD_STORE_LIST(LS_PREINDEX) - #undef LS_PREINDEX - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStorePostIndex)"; - - switch (instr->Mask(LoadStorePostIndexMask)) { - #define LS_POSTINDEX(A, B, C) \ - case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break; - LOAD_STORE_LIST(LS_POSTINDEX) - #undef LS_POSTINDEX - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStoreUnsignedOffset)"; - - switch (instr->Mask(LoadStoreUnsignedOffsetMask)) { - #define LS_UNSIGNEDOFFSET(A, B, C) \ - case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break; - LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) - #undef LS_UNSIGNEDOFFSET - case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStoreRegisterOffset)"; - - switch (instr->Mask(LoadStoreRegisterOffsetMask)) { - #define LS_REGISTEROFFSET(A, B, C) \ - case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break; - LOAD_STORE_LIST(LS_REGISTEROFFSET) - #undef LS_REGISTEROFFSET - case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Wt, ['Xns'ILS]"; - const char *form_x = "'Xt, ['Xns'ILS]"; - const char *form_b = "'Bt, ['Xns'ILS]"; - const char *form_h = "'Ht, ['Xns'ILS]"; - const char *form_s = "'St, ['Xns'ILS]"; - const char *form_d = "'Dt, ['Xns'ILS]"; - const char *form_q = "'Qt, ['Xns'ILS]"; - const char *form_prefetch = "'PrefOp, ['Xns'ILS]"; - - switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { - case STURB_w: mnemonic = "sturb"; break; - case STURH_w: mnemonic = "sturh"; break; - case STUR_w: mnemonic = "stur"; break; - case STUR_x: mnemonic = "stur"; form = form_x; break; - case STUR_b: mnemonic = "stur"; form = form_b; break; - case STUR_h: mnemonic = "stur"; form = form_h; break; - case STUR_s: mnemonic = "stur"; form = form_s; break; - case STUR_d: mnemonic = "stur"; form = form_d; break; - case STUR_q: mnemonic = "stur"; form = form_q; break; - case LDURB_w: mnemonic = "ldurb"; break; - case LDURH_w: mnemonic = "ldurh"; break; - case LDUR_w: mnemonic = "ldur"; break; - case LDUR_x: mnemonic = "ldur"; form = form_x; break; - case LDUR_b: mnemonic = "ldur"; form = form_b; break; - case LDUR_h: mnemonic = "ldur"; form = form_h; break; - case LDUR_s: mnemonic = "ldur"; form = form_s; break; - case LDUR_d: mnemonic = "ldur"; form = form_d; break; - case LDUR_q: mnemonic = "ldur"; form = form_q; break; - case LDURSB_x: form = form_x; VIXL_FALLTHROUGH(); - case LDURSB_w: mnemonic = "ldursb"; break; - case LDURSH_x: form = form_x; VIXL_FALLTHROUGH(); - case LDURSH_w: mnemonic = "ldursh"; break; - case LDURSW_x: mnemonic = "ldursw"; form = form_x; break; - case PRFUM: mnemonic = "prfum"; form = form_prefetch; break; - default: form = "(LoadStoreUnscaledOffset)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadLiteral(const Instruction* instr) { - const char *mnemonic = "ldr"; - const char *form = "(LoadLiteral)"; - - switch (instr->Mask(LoadLiteralMask)) { - case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break; - case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break; - case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break; - case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break; - case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break; - case LDRSW_x_lit: { - mnemonic = "ldrsw"; - form = "'Xt, 'ILLiteral 'LValue"; - break; - } - case PRFM_lit: { - mnemonic = "prfm"; - form = "'PrefOp, 'ILLiteral 'LValue"; - break; - } - default: mnemonic = "unimplemented"; - } - Format(instr, mnemonic, form); -} - - -#define LOAD_STORE_PAIR_LIST(V) \ - V(STP_w, "stp", "'Wt, 'Wt2", "2") \ - V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \ - V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \ - V(STP_x, "stp", "'Xt, 'Xt2", "3") \ - V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \ - V(STP_s, "stp", "'St, 'St2", "2") \ - V(LDP_s, "ldp", "'St, 'St2", "2") \ - V(STP_d, "stp", "'Dt, 'Dt2", "3") \ - V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \ - V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \ - V(STP_q, "stp", "'Qt, 'Qt2", "4") - -void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStorePairPostIndex)"; - - switch (instr->Mask(LoadStorePairPostIndexMask)) { - #define LSP_POSTINDEX(A, B, C, D) \ - case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break; - LOAD_STORE_PAIR_LIST(LSP_POSTINDEX) - #undef LSP_POSTINDEX - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStorePairPreIndex)"; - - switch (instr->Mask(LoadStorePairPreIndexMask)) { - #define LSP_PREINDEX(A, B, C, D) \ - case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break; - LOAD_STORE_PAIR_LIST(LSP_PREINDEX) - #undef LSP_PREINDEX - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(LoadStorePairOffset)"; - - switch (instr->Mask(LoadStorePairOffsetMask)) { - #define LSP_OFFSET(A, B, C, D) \ - case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break; - LOAD_STORE_PAIR_LIST(LSP_OFFSET) - #undef LSP_OFFSET - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form; - - switch (instr->Mask(LoadStorePairNonTemporalMask)) { - case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break; - case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break; - case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break; - case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break; - case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break; - case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break; - case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break; - case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break; - case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break; - case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break; - default: form = "(LoadStorePairNonTemporal)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form; - - switch (instr->Mask(LoadStoreExclusiveMask)) { - case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break; - case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break; - case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break; - case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break; - case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break; - case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break; - case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break; - case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break; - case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; - case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; - case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break; - case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break; - case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break; - case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break; - case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break; - case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break; - case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break; - case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break; - case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break; - case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break; - case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; - case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; - case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break; - case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break; - case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break; - case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break; - case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break; - case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break; - case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break; - case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break; - case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break; - case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break; - default: form = "(LoadStoreExclusive)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPCompare(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Fn, 'Fm"; - const char *form_zero = "'Fn, #0.0"; - - switch (instr->Mask(FPCompareMask)) { - case FCMP_s_zero: - case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH(); - case FCMP_s: - case FCMP_d: mnemonic = "fcmp"; break; - case FCMPE_s_zero: - case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH(); - case FCMPE_s: - case FCMPE_d: mnemonic = "fcmpe"; break; - default: form = "(FPCompare)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPConditionalCompare(const Instruction* instr) { - const char *mnemonic = "unmplemented"; - const char *form = "'Fn, 'Fm, 'INzcv, 'Cond"; - - switch (instr->Mask(FPConditionalCompareMask)) { - case FCCMP_s: - case FCCMP_d: mnemonic = "fccmp"; break; - case FCCMPE_s: - case FCCMPE_d: mnemonic = "fccmpe"; break; - default: form = "(FPConditionalCompare)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPConditionalSelect(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Fd, 'Fn, 'Fm, 'Cond"; - - switch (instr->Mask(FPConditionalSelectMask)) { - case FCSEL_s: - case FCSEL_d: mnemonic = "fcsel"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Fd, 'Fn"; - - switch (instr->Mask(FPDataProcessing1SourceMask)) { - #define FORMAT(A, B) \ - case A##_s: \ - case A##_d: mnemonic = B; break; - FORMAT(FMOV, "fmov"); - FORMAT(FABS, "fabs"); - FORMAT(FNEG, "fneg"); - FORMAT(FSQRT, "fsqrt"); - FORMAT(FRINTN, "frintn"); - FORMAT(FRINTP, "frintp"); - FORMAT(FRINTM, "frintm"); - FORMAT(FRINTZ, "frintz"); - FORMAT(FRINTA, "frinta"); - FORMAT(FRINTX, "frintx"); - FORMAT(FRINTI, "frinti"); - #undef FORMAT - case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break; - case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break; - case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break; - case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break; - case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break; - case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break; - default: form = "(FPDataProcessing1Source)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Fd, 'Fn, 'Fm"; - - switch (instr->Mask(FPDataProcessing2SourceMask)) { - #define FORMAT(A, B) \ - case A##_s: \ - case A##_d: mnemonic = B; break; - FORMAT(FMUL, "fmul"); - FORMAT(FDIV, "fdiv"); - FORMAT(FADD, "fadd"); - FORMAT(FSUB, "fsub"); - FORMAT(FMAX, "fmax"); - FORMAT(FMIN, "fmin"); - FORMAT(FMAXNM, "fmaxnm"); - FORMAT(FMINNM, "fminnm"); - FORMAT(FNMUL, "fnmul"); - #undef FORMAT - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Fd, 'Fn, 'Fm, 'Fa"; - - switch (instr->Mask(FPDataProcessing3SourceMask)) { - #define FORMAT(A, B) \ - case A##_s: \ - case A##_d: mnemonic = B; break; - FORMAT(FMADD, "fmadd"); - FORMAT(FMSUB, "fmsub"); - FORMAT(FNMADD, "fnmadd"); - FORMAT(FNMSUB, "fnmsub"); - #undef FORMAT - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPImmediate(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "(FPImmediate)"; - - switch (instr->Mask(FPImmediateMask)) { - case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break; - case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPIntegerConvert(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(FPIntegerConvert)"; - const char *form_rf = "'Rd, 'Fn"; - const char *form_fr = "'Fd, 'Rn"; - - switch (instr->Mask(FPIntegerConvertMask)) { - case FMOV_ws: - case FMOV_xd: mnemonic = "fmov"; form = form_rf; break; - case FMOV_sw: - case FMOV_dx: mnemonic = "fmov"; form = form_fr; break; - case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break; - case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break; - case FCVTAS_ws: - case FCVTAS_xs: - case FCVTAS_wd: - case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break; - case FCVTAU_ws: - case FCVTAU_xs: - case FCVTAU_wd: - case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break; - case FCVTMS_ws: - case FCVTMS_xs: - case FCVTMS_wd: - case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break; - case FCVTMU_ws: - case FCVTMU_xs: - case FCVTMU_wd: - case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break; - case FCVTNS_ws: - case FCVTNS_xs: - case FCVTNS_wd: - case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break; - case FCVTNU_ws: - case FCVTNU_xs: - case FCVTNU_wd: - case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break; - case FCVTZU_xd: - case FCVTZU_ws: - case FCVTZU_wd: - case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break; - case FCVTZS_xd: - case FCVTZS_wd: - case FCVTZS_xs: - case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break; - case FCVTPU_xd: - case FCVTPU_ws: - case FCVTPU_wd: - case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break; - case FCVTPS_xd: - case FCVTPS_wd: - case FCVTPS_xs: - case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break; - case SCVTF_sw: - case SCVTF_sx: - case SCVTF_dw: - case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break; - case UCVTF_sw: - case UCVTF_sx: - case UCVTF_dw: - case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) { - const char *mnemonic = ""; - const char *form = "'Rd, 'Fn, 'IFPFBits"; - const char *form_fr = "'Fd, 'Rn, 'IFPFBits"; - - switch (instr->Mask(FPFixedPointConvertMask)) { - case FCVTZS_ws_fixed: - case FCVTZS_xs_fixed: - case FCVTZS_wd_fixed: - case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break; - case FCVTZU_ws_fixed: - case FCVTZU_xs_fixed: - case FCVTZU_wd_fixed: - case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break; - case SCVTF_sw_fixed: - case SCVTF_sx_fixed: - case SCVTF_dw_fixed: - case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break; - case UCVTF_sw_fixed: - case UCVTF_sx_fixed: - case UCVTF_dw_fixed: - case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break; - default: VIXL_UNREACHABLE(); - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitSystem(const Instruction* instr) { - // Some system instructions hijack their Op and Cp fields to represent a - // range of immediates instead of indicating a different instruction. This - // makes the decoding tricky. - const char *mnemonic = "unimplemented"; - const char *form = "(System)"; - - if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { - switch (instr->Mask(SystemExclusiveMonitorMask)) { - case CLREX: { - mnemonic = "clrex"; - form = (instr->CRm() == 0xf) ? NULL : "'IX"; - break; - } - } - } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { - switch (instr->Mask(SystemSysRegMask)) { - case MRS: { - mnemonic = "mrs"; - switch (instr->ImmSystemRegister()) { - case NZCV: form = "'Xt, nzcv"; break; - case FPCR: form = "'Xt, fpcr"; break; - default: form = "'Xt, (unknown)"; break; - } - break; - } - case MSR: { - mnemonic = "msr"; - switch (instr->ImmSystemRegister()) { - case NZCV: form = "nzcv, 'Xt"; break; - case FPCR: form = "fpcr, 'Xt"; break; - default: form = "(unknown), 'Xt"; break; - } - break; - } - } - } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { - switch (instr->ImmHint()) { - case NOP: { - mnemonic = "nop"; - form = NULL; - break; - } - } - } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { - switch (instr->Mask(MemBarrierMask)) { - case DMB: { - mnemonic = "dmb"; - form = "'M"; - break; - } - case DSB: { - mnemonic = "dsb"; - form = "'M"; - break; - } - case ISB: { - mnemonic = "isb"; - form = NULL; - break; - } - } - } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) { - switch (instr->SysOp()) { - case IVAU: - mnemonic = "ic"; - form = "ivau, 'Xt"; - break; - case CVAC: - mnemonic = "dc"; - form = "cvac, 'Xt"; - break; - case CVAU: - mnemonic = "dc"; - form = "cvau, 'Xt"; - break; - case CIVAC: - mnemonic = "dc"; - form = "civac, 'Xt"; - break; - case ZVA: - mnemonic = "dc"; - form = "zva, 'Xt"; - break; - default: - mnemonic = "sys"; - if (instr->Rt() == 31) { - form = "'G1, 'Kn, 'Km, 'G2"; - } else { - form = "'G1, 'Kn, 'Km, 'G2, 'Xt"; - } - break; - } - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitException(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'IDebug"; - - switch (instr->Mask(ExceptionMask)) { - case HLT: mnemonic = "hlt"; break; - case BRK: mnemonic = "brk"; break; - case SVC: mnemonic = "svc"; break; - case HVC: mnemonic = "hvc"; break; - case SMC: mnemonic = "smc"; break; - case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break; - case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break; - case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break; - default: form = "(Exception)"; - } - Format(instr, mnemonic, form); -} - - -void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) { - VisitUnimplemented(instr); -} - - -void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) { - VisitUnimplemented(instr); -} - - -void Disassembler::VisitCryptoAES(const Instruction* instr) { - VisitUnimplemented(instr); -} - - -void Disassembler::VisitNEON2RegMisc(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vd.%s, 'Vn.%s"; - const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0"; - const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0"; - NEONFormatDecoder nfd(instr); - - static const NEONFormatMap map_lp_ta = { - {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D} - }; - - static const NEONFormatMap map_cvt_ta = { - {22}, {NF_4S, NF_2D} - }; - - static const NEONFormatMap map_cvt_tb = { - {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S} - }; - - if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { - // These instructions all use a two bit size field, except NOT and RBIT, - // which use the field to encode the operation. - switch (instr->Mask(NEON2RegMiscMask)) { - case NEON_REV64: mnemonic = "rev64"; break; - case NEON_REV32: mnemonic = "rev32"; break; - case NEON_REV16: mnemonic = "rev16"; break; - case NEON_SADDLP: - mnemonic = "saddlp"; - nfd.SetFormatMap(0, &map_lp_ta); - break; - case NEON_UADDLP: - mnemonic = "uaddlp"; - nfd.SetFormatMap(0, &map_lp_ta); - break; - case NEON_SUQADD: mnemonic = "suqadd"; break; - case NEON_USQADD: mnemonic = "usqadd"; break; - case NEON_CLS: mnemonic = "cls"; break; - case NEON_CLZ: mnemonic = "clz"; break; - case NEON_CNT: mnemonic = "cnt"; break; - case NEON_SADALP: - mnemonic = "sadalp"; - nfd.SetFormatMap(0, &map_lp_ta); - break; - case NEON_UADALP: - mnemonic = "uadalp"; - nfd.SetFormatMap(0, &map_lp_ta); - break; - case NEON_SQABS: mnemonic = "sqabs"; break; - case NEON_SQNEG: mnemonic = "sqneg"; break; - case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break; - case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break; - case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break; - case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break; - case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break; - case NEON_ABS: mnemonic = "abs"; break; - case NEON_NEG: mnemonic = "neg"; break; - case NEON_RBIT_NOT: - switch (instr->FPType()) { - case 0: mnemonic = "mvn"; break; - case 1: mnemonic = "rbit"; break; - default: form = "(NEON2RegMisc)"; - } - nfd.SetFormatMaps(nfd.LogicalFormatMap()); - break; - } - } else { - // These instructions all use a one bit size field, except XTN, SQXTUN, - // SHLL, SQXTN and UQXTN, which use a two bit size field. - nfd.SetFormatMaps(nfd.FPFormatMap()); - switch (instr->Mask(NEON2RegMiscFPMask)) { - case NEON_FABS: mnemonic = "fabs"; break; - case NEON_FNEG: mnemonic = "fneg"; break; - case NEON_FCVTN: - mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn"; - nfd.SetFormatMap(0, &map_cvt_tb); - nfd.SetFormatMap(1, &map_cvt_ta); - break; - case NEON_FCVTXN: - mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn"; - nfd.SetFormatMap(0, &map_cvt_tb); - nfd.SetFormatMap(1, &map_cvt_ta); - break; - case NEON_FCVTL: - mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl"; - nfd.SetFormatMap(0, &map_cvt_ta); - nfd.SetFormatMap(1, &map_cvt_tb); - break; - case NEON_FRINTN: mnemonic = "frintn"; break; - case NEON_FRINTA: mnemonic = "frinta"; break; - case NEON_FRINTP: mnemonic = "frintp"; break; - case NEON_FRINTM: mnemonic = "frintm"; break; - case NEON_FRINTX: mnemonic = "frintx"; break; - case NEON_FRINTZ: mnemonic = "frintz"; break; - case NEON_FRINTI: mnemonic = "frinti"; break; - case NEON_FCVTNS: mnemonic = "fcvtns"; break; - case NEON_FCVTNU: mnemonic = "fcvtnu"; break; - case NEON_FCVTPS: mnemonic = "fcvtps"; break; - case NEON_FCVTPU: mnemonic = "fcvtpu"; break; - case NEON_FCVTMS: mnemonic = "fcvtms"; break; - case NEON_FCVTMU: mnemonic = "fcvtmu"; break; - case NEON_FCVTZS: mnemonic = "fcvtzs"; break; - case NEON_FCVTZU: mnemonic = "fcvtzu"; break; - case NEON_FCVTAS: mnemonic = "fcvtas"; break; - case NEON_FCVTAU: mnemonic = "fcvtau"; break; - case NEON_FSQRT: mnemonic = "fsqrt"; break; - case NEON_SCVTF: mnemonic = "scvtf"; break; - case NEON_UCVTF: mnemonic = "ucvtf"; break; - case NEON_URSQRTE: mnemonic = "ursqrte"; break; - case NEON_URECPE: mnemonic = "urecpe"; break; - case NEON_FRSQRTE: mnemonic = "frsqrte"; break; - case NEON_FRECPE: mnemonic = "frecpe"; break; - case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break; - case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break; - case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break; - case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break; - case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break; - default: - if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && - (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); - - switch (instr->Mask(NEON2RegMiscMask)) { - case NEON_XTN: mnemonic = "xtn"; break; - case NEON_SQXTN: mnemonic = "sqxtn"; break; - case NEON_UQXTN: mnemonic = "uqxtn"; break; - case NEON_SQXTUN: mnemonic = "sqxtun"; break; - case NEON_SHLL: - mnemonic = "shll"; - nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); - nfd.SetFormatMap(1, nfd.IntegerFormatMap()); - switch (instr->NEONSize()) { - case 0: form = "'Vd.%s, 'Vn.%s, #8"; break; - case 1: form = "'Vd.%s, 'Vn.%s, #16"; break; - case 2: form = "'Vd.%s, 'Vn.%s, #32"; break; - default: form = "(NEON2RegMisc)"; - } - } - Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); - return; - } else { - form = "(NEON2RegMisc)"; - } - } - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEON3Same(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; - NEONFormatDecoder nfd(instr); - - if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { - switch (instr->Mask(NEON3SameLogicalMask)) { - case NEON_AND: mnemonic = "and"; break; - case NEON_ORR: - mnemonic = "orr"; - if (instr->Rm() == instr->Rn()) { - mnemonic = "mov"; - form = "'Vd.%s, 'Vn.%s"; - } - break; - case NEON_ORN: mnemonic = "orn"; break; - case NEON_EOR: mnemonic = "eor"; break; - case NEON_BIC: mnemonic = "bic"; break; - case NEON_BIF: mnemonic = "bif"; break; - case NEON_BIT: mnemonic = "bit"; break; - case NEON_BSL: mnemonic = "bsl"; break; - default: form = "(NEON3Same)"; - } - nfd.SetFormatMaps(nfd.LogicalFormatMap()); - } else { - static const char *mnemonics[] = { - "shadd", "uhadd", "shadd", "uhadd", - "sqadd", "uqadd", "sqadd", "uqadd", - "srhadd", "urhadd", "srhadd", "urhadd", - NULL, NULL, NULL, NULL, // Handled by logical cases above. - "shsub", "uhsub", "shsub", "uhsub", - "sqsub", "uqsub", "sqsub", "uqsub", - "cmgt", "cmhi", "cmgt", "cmhi", - "cmge", "cmhs", "cmge", "cmhs", - "sshl", "ushl", "sshl", "ushl", - "sqshl", "uqshl", "sqshl", "uqshl", - "srshl", "urshl", "srshl", "urshl", - "sqrshl", "uqrshl", "sqrshl", "uqrshl", - "smax", "umax", "smax", "umax", - "smin", "umin", "smin", "umin", - "sabd", "uabd", "sabd", "uabd", - "saba", "uaba", "saba", "uaba", - "add", "sub", "add", "sub", - "cmtst", "cmeq", "cmtst", "cmeq", - "mla", "mls", "mla", "mls", - "mul", "pmul", "mul", "pmul", - "smaxp", "umaxp", "smaxp", "umaxp", - "sminp", "uminp", "sminp", "uminp", - "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh", - "addp", "unallocated", "addp", "unallocated", - "fmaxnm", "fmaxnmp", "fminnm", "fminnmp", - "fmla", "unallocated", "fmls", "unallocated", - "fadd", "faddp", "fsub", "fabd", - "fmulx", "fmul", "unallocated", "unallocated", - "fcmeq", "fcmge", "unallocated", "fcmgt", - "unallocated", "facge", "unallocated", "facgt", - "fmax", "fmaxp", "fmin", "fminp", - "frecps", "fdiv", "frsqrts", "unallocated"}; - - // Operation is determined by the opcode bits (15-11), the top bit of - // size (23) and the U bit (29). - unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) | - instr->Bit(29); - VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0]))); - mnemonic = mnemonics[index]; - // Assert that index is not one of the previously handled logical - // instructions. - VIXL_ASSERT(mnemonic != NULL); - - if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { - nfd.SetFormatMaps(nfd.FPFormatMap()); - } - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEON3Different(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; - - NEONFormatDecoder nfd(instr); - nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); - - // Ignore the Q bit. Appending a "2" suffix is handled later. - switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) { - case NEON_PMULL: mnemonic = "pmull"; break; - case NEON_SABAL: mnemonic = "sabal"; break; - case NEON_SABDL: mnemonic = "sabdl"; break; - case NEON_SADDL: mnemonic = "saddl"; break; - case NEON_SMLAL: mnemonic = "smlal"; break; - case NEON_SMLSL: mnemonic = "smlsl"; break; - case NEON_SMULL: mnemonic = "smull"; break; - case NEON_SSUBL: mnemonic = "ssubl"; break; - case NEON_SQDMLAL: mnemonic = "sqdmlal"; break; - case NEON_SQDMLSL: mnemonic = "sqdmlsl"; break; - case NEON_SQDMULL: mnemonic = "sqdmull"; break; - case NEON_UABAL: mnemonic = "uabal"; break; - case NEON_UABDL: mnemonic = "uabdl"; break; - case NEON_UADDL: mnemonic = "uaddl"; break; - case NEON_UMLAL: mnemonic = "umlal"; break; - case NEON_UMLSL: mnemonic = "umlsl"; break; - case NEON_UMULL: mnemonic = "umull"; break; - case NEON_USUBL: mnemonic = "usubl"; break; - case NEON_SADDW: - mnemonic = "saddw"; - nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); - break; - case NEON_SSUBW: - mnemonic = "ssubw"; - nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); - break; - case NEON_UADDW: - mnemonic = "uaddw"; - nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); - break; - case NEON_USUBW: - mnemonic = "usubw"; - nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); - break; - case NEON_ADDHN: - mnemonic = "addhn"; - nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - break; - case NEON_RADDHN: - mnemonic = "raddhn"; - nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - break; - case NEON_RSUBHN: - mnemonic = "rsubhn"; - nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - break; - case NEON_SUBHN: - mnemonic = "subhn"; - nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - break; - default: form = "(NEON3Different)"; - } - Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, 'Vn.%s"; - - NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(), - NEONFormatDecoder::IntegerFormatMap()); - - if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { - nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); - nfd.SetFormatMap(1, nfd.FPFormatMap()); - switch (instr->Mask(NEONAcrossLanesFPMask)) { - case NEON_FMAXV: mnemonic = "fmaxv"; break; - case NEON_FMINV: mnemonic = "fminv"; break; - case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break; - case NEON_FMINNMV: mnemonic = "fminnmv"; break; - default: form = "(NEONAcrossLanes)"; break; - } - } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) { - switch (instr->Mask(NEONAcrossLanesMask)) { - case NEON_ADDV: mnemonic = "addv"; break; - case NEON_SMAXV: mnemonic = "smaxv"; break; - case NEON_SMINV: mnemonic = "sminv"; break; - case NEON_UMAXV: mnemonic = "umaxv"; break; - case NEON_UMINV: mnemonic = "uminv"; break; - case NEON_SADDLV: - mnemonic = "saddlv"; - nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); - break; - case NEON_UADDLV: - mnemonic = "uaddlv"; - nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); - break; - default: form = "(NEONAcrossLanes)"; break; - } - } - Format(instr, mnemonic, nfd.Substitute(form, - NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat)); -} - - -void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - bool l_instr = false; - bool fp_instr = false; - - const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]"; - - static const NEONFormatMap map_ta = { - {23, 22}, {NF_UNDEF, NF_4S, NF_2D} - }; - NEONFormatDecoder nfd(instr, &map_ta, - NEONFormatDecoder::IntegerFormatMap(), - NEONFormatDecoder::ScalarFormatMap()); - - switch (instr->Mask(NEONByIndexedElementMask)) { - case NEON_SMULL_byelement: mnemonic = "smull"; l_instr = true; break; - case NEON_UMULL_byelement: mnemonic = "umull"; l_instr = true; break; - case NEON_SMLAL_byelement: mnemonic = "smlal"; l_instr = true; break; - case NEON_UMLAL_byelement: mnemonic = "umlal"; l_instr = true; break; - case NEON_SMLSL_byelement: mnemonic = "smlsl"; l_instr = true; break; - case NEON_UMLSL_byelement: mnemonic = "umlsl"; l_instr = true; break; - case NEON_SQDMULL_byelement: mnemonic = "sqdmull"; l_instr = true; break; - case NEON_SQDMLAL_byelement: mnemonic = "sqdmlal"; l_instr = true; break; - case NEON_SQDMLSL_byelement: mnemonic = "sqdmlsl"; l_instr = true; break; - case NEON_MUL_byelement: mnemonic = "mul"; break; - case NEON_MLA_byelement: mnemonic = "mla"; break; - case NEON_MLS_byelement: mnemonic = "mls"; break; - case NEON_SQDMULH_byelement: mnemonic = "sqdmulh"; break; - case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break; - default: - switch (instr->Mask(NEONByIndexedElementFPMask)) { - case NEON_FMUL_byelement: mnemonic = "fmul"; fp_instr = true; break; - case NEON_FMLA_byelement: mnemonic = "fmla"; fp_instr = true; break; - case NEON_FMLS_byelement: mnemonic = "fmls"; fp_instr = true; break; - case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break; - } - } - - if (l_instr) { - Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); - } else if (fp_instr) { - nfd.SetFormatMap(0, nfd.FPFormatMap()); - Format(instr, mnemonic, nfd.Substitute(form)); - } else { - nfd.SetFormatMap(0, nfd.IntegerFormatMap()); - Format(instr, mnemonic, nfd.Substitute(form)); - } -} - - -void Disassembler::VisitNEONCopy(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONCopy)"; - - NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(), - NEONFormatDecoder::TriangularScalarFormatMap()); - - if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { - mnemonic = "mov"; - nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); - form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]"; - } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { - mnemonic = "mov"; - nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); - if (nfd.GetVectorFormat() == kFormatD) { - form = "'Vd.%s['IVInsIndex1], 'Xn"; - } else { - form = "'Vd.%s['IVInsIndex1], 'Wn"; - } - } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { - if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) { - mnemonic = "mov"; - } else { - mnemonic = "umov"; - } - nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); - if (nfd.GetVectorFormat() == kFormatD) { - form = "'Xd, 'Vn.%s['IVInsIndex1]"; - } else { - form = "'Wd, 'Vn.%s['IVInsIndex1]"; - } - } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) { - mnemonic = "smov"; - nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); - form = "'Rdq, 'Vn.%s['IVInsIndex1]"; - } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { - mnemonic = "dup"; - form = "'Vd.%s, 'Vn.%s['IVInsIndex1]"; - } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { - mnemonic = "dup"; - if (nfd.GetVectorFormat() == kFormat2D) { - form = "'Vd.%s, 'Xn"; - } else { - form = "'Vd.%s, 'Wn"; - } - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONExtract(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONExtract)"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); - if (instr->Mask(NEONExtractMask) == NEON_EXT) { - mnemonic = "ext"; - form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract"; - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONLoadStoreMultiStruct)"; - const char *form_1v = "{'Vt.%1$s}, ['Xns]"; - const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]"; - const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]"; - const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); - - switch (instr->Mask(NEONLoadStoreMultiStructMask)) { - case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break; - case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break; - case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break; - case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break; - case NEON_LD2: mnemonic = "ld2"; form = form_2v; break; - case NEON_LD3: mnemonic = "ld3"; form = form_3v; break; - case NEON_LD4: mnemonic = "ld4"; form = form_4v; break; - case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break; - case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break; - case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break; - case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break; - case NEON_ST2: mnemonic = "st2"; form = form_2v; break; - case NEON_ST3: mnemonic = "st3"; form = form_3v; break; - case NEON_ST4: mnemonic = "st4"; form = form_4v; break; - default: break; - } - - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONLoadStoreMultiStructPostIndex( - const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONLoadStoreMultiStructPostIndex)"; - const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1"; - const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2"; - const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3"; - const char *form_4v = - "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); - - switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { - case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break; - case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break; - case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break; - case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break; - case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break; - case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break; - case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break; - case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break; - case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break; - case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break; - case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break; - case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break; - case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break; - case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break; - default: break; - } - - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONLoadStoreSingleStruct)"; - - const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]"; - const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]"; - const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]"; - const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); - - switch (instr->Mask(NEONLoadStoreSingleStructMask)) { - case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break; - case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break; - case NEON_LD1_s: - mnemonic = "ld1"; - VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); - form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; - break; - case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break; - case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break; - case NEON_ST1_s: - mnemonic = "st1"; - VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); - form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; - break; - case NEON_LD1R: - mnemonic = "ld1r"; - form = "{'Vt.%s}, ['Xns]"; - break; - case NEON_LD2_b: - case NEON_ST2_b: - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]"; - break; - case NEON_LD2_h: - case NEON_ST2_h: - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]"; - break; - case NEON_LD2_s: - case NEON_ST2_s: - VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d); - VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d); - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]"; - else - form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]"; - break; - case NEON_LD2R: - mnemonic = "ld2r"; - form = "{'Vt.%s, 'Vt2.%s}, ['Xns]"; - break; - case NEON_LD3_b: - case NEON_ST3_b: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]"; - break; - case NEON_LD3_h: - case NEON_ST3_h: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]"; - break; - case NEON_LD3_s: - case NEON_ST3_s: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]"; - else - form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]"; - break; - case NEON_LD3R: - mnemonic = "ld3r"; - form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]"; - break; - case NEON_LD4_b: - case NEON_ST4_b: - mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; - form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]"; - break; - case NEON_LD4_h: - case NEON_ST4_h: - mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; - form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]"; - break; - case NEON_LD4_s: - case NEON_ST4_s: - VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d); - VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d); - mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]"; - else - form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]"; - break; - case NEON_LD4R: - mnemonic = "ld4r"; - form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; - break; - default: break; - } - - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONLoadStoreSingleStructPostIndex( - const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONLoadStoreSingleStructPostIndex)"; - - const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1"; - const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2"; - const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4"; - const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); - - switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { - case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break; - case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break; - case NEON_LD1_s_post: - mnemonic = "ld1"; - VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); - form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; - break; - case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break; - case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break; - case NEON_ST1_s_post: - mnemonic = "st1"; - VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); - form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; - break; - case NEON_LD1R_post: - mnemonic = "ld1r"; - form = "{'Vt.%s}, ['Xns], 'Xmz1"; - break; - case NEON_LD2_b_post: - case NEON_ST2_b_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2"; - break; - case NEON_ST2_h_post: - case NEON_LD2_h_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4"; - break; - case NEON_LD2_s_post: - case NEON_ST2_s_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8"; - else - form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16"; - break; - case NEON_LD2R_post: - mnemonic = "ld2r"; - form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2"; - break; - case NEON_LD3_b_post: - case NEON_ST3_b_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3"; - break; - case NEON_LD3_h_post: - case NEON_ST3_h_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6"; - break; - case NEON_LD3_s_post: - case NEON_ST3_s_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12"; - else - form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3"; - break; - case NEON_LD3R_post: - mnemonic = "ld3r"; - form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3"; - break; - case NEON_LD4_b_post: - case NEON_ST4_b_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; - form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4"; - break; - case NEON_LD4_h_post: - case NEON_ST4_h_post: - mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4"; - form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8"; - break; - case NEON_LD4_s_post: - case NEON_ST4_s_post: - mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; - if ((instr->NEONLSSize() & 1) == 0) - form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16"; - else - form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32"; - break; - case NEON_LD4R_post: - mnemonic = "ld4r"; - form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4"; - break; - default: break; - } - - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1"; - - int cmode = instr->NEONCmode(); - int cmode_3 = (cmode >> 3) & 1; - int cmode_2 = (cmode >> 2) & 1; - int cmode_1 = (cmode >> 1) & 1; - int cmode_0 = cmode & 1; - int q = instr->NEONQ(); - int op = instr->NEONModImmOp(); - - static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} }; - static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} }; - static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} }; - NEONFormatDecoder nfd(instr, &map_b); - - if (cmode_3 == 0) { - if (cmode_0 == 0) { - mnemonic = (op == 1) ? "mvni" : "movi"; - } else { // cmode<0> == '1'. - mnemonic = (op == 1) ? "bic" : "orr"; - } - nfd.SetFormatMap(0, &map_s); - } else { // cmode<3> == '1'. - if (cmode_2 == 0) { - if (cmode_0 == 0) { - mnemonic = (op == 1) ? "mvni" : "movi"; - } else { // cmode<0> == '1'. - mnemonic = (op == 1) ? "bic" : "orr"; - } - nfd.SetFormatMap(0, &map_h); - } else { // cmode<2> == '1'. - if (cmode_1 == 0) { - mnemonic = (op == 1) ? "mvni" : "movi"; - form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2"; - nfd.SetFormatMap(0, &map_s); - } else { // cmode<1> == '1'. - if (cmode_0 == 0) { - mnemonic = "movi"; - if (op == 0) { - form = "'Vt.%s, 'IVMIImm8"; - } else { - form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm"; - } - } else { // cmode<0> == '1' - mnemonic = "fmov"; - if (op == 0) { - form = "'Vt.%s, 'IVMIImmFPSingle"; - nfd.SetFormatMap(0, &map_s); - } else { - if (q == 1) { - form = "'Vt.2d, 'IVMIImmFPDouble"; - } - } - } - } - } - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, %sn"; - const char *form_0 = "%sd, %sn, #0"; - const char *form_fp0 = "%sd, %sn, #0.0"; - - NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); - - if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { - // These instructions all use a two bit size field, except NOT and RBIT, - // which use the field to encode the operation. - switch (instr->Mask(NEONScalar2RegMiscMask)) { - case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break; - case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break; - case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break; - case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break; - case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break; - case NEON_NEG_scalar: mnemonic = "neg"; break; - case NEON_SQNEG_scalar: mnemonic = "sqneg"; break; - case NEON_ABS_scalar: mnemonic = "abs"; break; - case NEON_SQABS_scalar: mnemonic = "sqabs"; break; - case NEON_SUQADD_scalar: mnemonic = "suqadd"; break; - case NEON_USQADD_scalar: mnemonic = "usqadd"; break; - default: form = "(NEONScalar2RegMisc)"; - } - } else { - // These instructions all use a one bit size field, except SQXTUN, SQXTN - // and UQXTN, which use a two bit size field. - nfd.SetFormatMaps(nfd.FPScalarFormatMap()); - switch (instr->Mask(NEONScalar2RegMiscFPMask)) { - case NEON_FRSQRTE_scalar: mnemonic = "frsqrte"; break; - case NEON_FRECPE_scalar: mnemonic = "frecpe"; break; - case NEON_SCVTF_scalar: mnemonic = "scvtf"; break; - case NEON_UCVTF_scalar: mnemonic = "ucvtf"; break; - case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break; - case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break; - case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break; - case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break; - case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break; - case NEON_FRECPX_scalar: mnemonic = "frecpx"; break; - case NEON_FCVTNS_scalar: mnemonic = "fcvtns"; break; - case NEON_FCVTNU_scalar: mnemonic = "fcvtnu"; break; - case NEON_FCVTPS_scalar: mnemonic = "fcvtps"; break; - case NEON_FCVTPU_scalar: mnemonic = "fcvtpu"; break; - case NEON_FCVTMS_scalar: mnemonic = "fcvtms"; break; - case NEON_FCVTMU_scalar: mnemonic = "fcvtmu"; break; - case NEON_FCVTZS_scalar: mnemonic = "fcvtzs"; break; - case NEON_FCVTZU_scalar: mnemonic = "fcvtzu"; break; - case NEON_FCVTAS_scalar: mnemonic = "fcvtas"; break; - case NEON_FCVTAU_scalar: mnemonic = "fcvtau"; break; - case NEON_FCVTXN_scalar: - nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); - mnemonic = "fcvtxn"; - break; - default: - nfd.SetFormatMap(0, nfd.ScalarFormatMap()); - nfd.SetFormatMap(1, nfd.LongScalarFormatMap()); - switch (instr->Mask(NEONScalar2RegMiscMask)) { - case NEON_SQXTN_scalar: mnemonic = "sqxtn"; break; - case NEON_UQXTN_scalar: mnemonic = "uqxtn"; break; - case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break; - default: form = "(NEONScalar2RegMisc)"; - } - } - } - Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); -} - - -void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, %sn, %sm"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(), - NEONFormatDecoder::ScalarFormatMap()); - - switch (instr->Mask(NEONScalar3DiffMask)) { - case NEON_SQDMLAL_scalar : mnemonic = "sqdmlal"; break; - case NEON_SQDMLSL_scalar : mnemonic = "sqdmlsl"; break; - case NEON_SQDMULL_scalar : mnemonic = "sqdmull"; break; - default: form = "(NEONScalar3Diff)"; - } - Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); -} - - -void Disassembler::VisitNEONScalar3Same(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, %sn, %sm"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); - - if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { - nfd.SetFormatMaps(nfd.FPScalarFormatMap()); - switch (instr->Mask(NEONScalar3SameFPMask)) { - case NEON_FACGE_scalar: mnemonic = "facge"; break; - case NEON_FACGT_scalar: mnemonic = "facgt"; break; - case NEON_FCMEQ_scalar: mnemonic = "fcmeq"; break; - case NEON_FCMGE_scalar: mnemonic = "fcmge"; break; - case NEON_FCMGT_scalar: mnemonic = "fcmgt"; break; - case NEON_FMULX_scalar: mnemonic = "fmulx"; break; - case NEON_FRECPS_scalar: mnemonic = "frecps"; break; - case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break; - case NEON_FABD_scalar: mnemonic = "fabd"; break; - default: form = "(NEONScalar3Same)"; - } - } else { - switch (instr->Mask(NEONScalar3SameMask)) { - case NEON_ADD_scalar: mnemonic = "add"; break; - case NEON_SUB_scalar: mnemonic = "sub"; break; - case NEON_CMEQ_scalar: mnemonic = "cmeq"; break; - case NEON_CMGE_scalar: mnemonic = "cmge"; break; - case NEON_CMGT_scalar: mnemonic = "cmgt"; break; - case NEON_CMHI_scalar: mnemonic = "cmhi"; break; - case NEON_CMHS_scalar: mnemonic = "cmhs"; break; - case NEON_CMTST_scalar: mnemonic = "cmtst"; break; - case NEON_UQADD_scalar: mnemonic = "uqadd"; break; - case NEON_SQADD_scalar: mnemonic = "sqadd"; break; - case NEON_UQSUB_scalar: mnemonic = "uqsub"; break; - case NEON_SQSUB_scalar: mnemonic = "sqsub"; break; - case NEON_USHL_scalar: mnemonic = "ushl"; break; - case NEON_SSHL_scalar: mnemonic = "sshl"; break; - case NEON_UQSHL_scalar: mnemonic = "uqshl"; break; - case NEON_SQSHL_scalar: mnemonic = "sqshl"; break; - case NEON_URSHL_scalar: mnemonic = "urshl"; break; - case NEON_SRSHL_scalar: mnemonic = "srshl"; break; - case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break; - case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break; - case NEON_SQDMULH_scalar: mnemonic = "sqdmulh"; break; - case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break; - default: form = "(NEONScalar3Same)"; - } - } - Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); -} - - -void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]"; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); - bool long_instr = false; - - switch (instr->Mask(NEONScalarByIndexedElementMask)) { - case NEON_SQDMULL_byelement_scalar: - mnemonic = "sqdmull"; - long_instr = true; - break; - case NEON_SQDMLAL_byelement_scalar: - mnemonic = "sqdmlal"; - long_instr = true; - break; - case NEON_SQDMLSL_byelement_scalar: - mnemonic = "sqdmlsl"; - long_instr = true; - break; - case NEON_SQDMULH_byelement_scalar: - mnemonic = "sqdmulh"; - break; - case NEON_SQRDMULH_byelement_scalar: - mnemonic = "sqrdmulh"; - break; - default: - nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); - switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { - case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break; - case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break; - case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break; - case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break; - default: form = "(NEONScalarByIndexedElement)"; - } - } - - if (long_instr) { - nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); - } - - Format(instr, mnemonic, nfd.Substitute( - form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat)); -} - - -void Disassembler::VisitNEONScalarCopy(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONScalarCopy)"; - - NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); - - if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { - mnemonic = "mov"; - form = "%sd, 'Vn.%s['IVInsIndex1]"; - } - - Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat)); -} - - -void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, 'Vn.%s"; - NEONFormatMap map = { {22}, {NF_2S, NF_2D} }; - NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map); - - switch (instr->Mask(NEONScalarPairwiseMask)) { - case NEON_ADDP_scalar: mnemonic = "addp"; break; - case NEON_FADDP_scalar: mnemonic = "faddp"; break; - case NEON_FMAXP_scalar: mnemonic = "fmaxp"; break; - case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break; - case NEON_FMINP_scalar: mnemonic = "fminp"; break; - case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break; - default: form = "(NEONScalarPairwise)"; - } - Format(instr, mnemonic, nfd.Substitute(form, - NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat)); -} - - -void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "%sd, %sn, 'Is1"; - const char *form_2 = "%sd, %sn, 'Is2"; - - static const NEONFormatMap map_shift = { - {22, 21, 20, 19}, - {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, - NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D} - }; - static const NEONFormatMap map_shift_narrow = { - {21, 20, 19}, - {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D} - }; - NEONFormatDecoder nfd(instr, &map_shift); - - if (instr->ImmNEONImmh()) { // immh has to be non-zero. - switch (instr->Mask(NEONScalarShiftImmediateMask)) { - case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu"; break; - case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs"; break; - case NEON_SCVTF_imm_scalar: mnemonic = "scvtf"; break; - case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf"; break; - case NEON_SRI_scalar: mnemonic = "sri"; break; - case NEON_SSHR_scalar: mnemonic = "sshr"; break; - case NEON_USHR_scalar: mnemonic = "ushr"; break; - case NEON_SRSHR_scalar: mnemonic = "srshr"; break; - case NEON_URSHR_scalar: mnemonic = "urshr"; break; - case NEON_SSRA_scalar: mnemonic = "ssra"; break; - case NEON_USRA_scalar: mnemonic = "usra"; break; - case NEON_SRSRA_scalar: mnemonic = "srsra"; break; - case NEON_URSRA_scalar: mnemonic = "ursra"; break; - case NEON_SHL_scalar: mnemonic = "shl"; form = form_2; break; - case NEON_SLI_scalar: mnemonic = "sli"; form = form_2; break; - case NEON_SQSHLU_scalar: mnemonic = "sqshlu"; form = form_2; break; - case NEON_SQSHL_imm_scalar: mnemonic = "sqshl"; form = form_2; break; - case NEON_UQSHL_imm_scalar: mnemonic = "uqshl"; form = form_2; break; - case NEON_UQSHRN_scalar: - mnemonic = "uqshrn"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - case NEON_UQRSHRN_scalar: - mnemonic = "uqrshrn"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - case NEON_SQSHRN_scalar: - mnemonic = "sqshrn"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - case NEON_SQRSHRN_scalar: - mnemonic = "sqrshrn"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - case NEON_SQSHRUN_scalar: - mnemonic = "sqshrun"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - case NEON_SQRSHRUN_scalar: - mnemonic = "sqrshrun"; - nfd.SetFormatMap(1, &map_shift_narrow); - break; - default: - form = "(NEONScalarShiftImmediate)"; - } - } else { - form = "(NEONScalarShiftImmediate)"; - } - Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); -} - - -void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vd.%s, 'Vn.%s, 'Is1"; - const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2"; - const char *form_xtl = "'Vd.%s, 'Vn.%s"; - - // 0001->8H, 001x->4S, 01xx->2D, all others undefined. - static const NEONFormatMap map_shift_ta = { - {22, 21, 20, 19}, - {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D} - }; - - // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, - // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. - static const NEONFormatMap map_shift_tb = { - {22, 21, 20, 19, 30}, - {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H, - NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, - NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, - NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D} - }; - - NEONFormatDecoder nfd(instr, &map_shift_tb); - - if (instr->ImmNEONImmh()) { // immh has to be non-zero. - switch (instr->Mask(NEONShiftImmediateMask)) { - case NEON_SQSHLU: mnemonic = "sqshlu"; form = form_shift_2; break; - case NEON_SQSHL_imm: mnemonic = "sqshl"; form = form_shift_2; break; - case NEON_UQSHL_imm: mnemonic = "uqshl"; form = form_shift_2; break; - case NEON_SHL: mnemonic = "shl"; form = form_shift_2; break; - case NEON_SLI: mnemonic = "sli"; form = form_shift_2; break; - case NEON_SCVTF_imm: mnemonic = "scvtf"; break; - case NEON_UCVTF_imm: mnemonic = "ucvtf"; break; - case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break; - case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break; - case NEON_SRI: mnemonic = "sri"; break; - case NEON_SSHR: mnemonic = "sshr"; break; - case NEON_USHR: mnemonic = "ushr"; break; - case NEON_SRSHR: mnemonic = "srshr"; break; - case NEON_URSHR: mnemonic = "urshr"; break; - case NEON_SSRA: mnemonic = "ssra"; break; - case NEON_USRA: mnemonic = "usra"; break; - case NEON_SRSRA: mnemonic = "srsra"; break; - case NEON_URSRA: mnemonic = "ursra"; break; - case NEON_SHRN: - mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_RSHRN: - mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_UQSHRN: - mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_UQRSHRN: - mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_SQSHRN: - mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_SQRSHRN: - mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_SQSHRUN: - mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_SQRSHRUN: - mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun"; - nfd.SetFormatMap(1, &map_shift_ta); - break; - case NEON_SSHLL: - nfd.SetFormatMap(0, &map_shift_ta); - if (instr->ImmNEONImmb() == 0 && - CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant. - form = form_xtl; - mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl"; - } else { // sshll variant. - form = form_shift_2; - mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll"; - } - break; - case NEON_USHLL: - nfd.SetFormatMap(0, &map_shift_ta); - if (instr->ImmNEONImmb() == 0 && - CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant. - form = form_xtl; - mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl"; - } else { // ushll variant. - form = form_shift_2; - mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll"; - } - break; - default: form = "(NEONShiftImmediate)"; - } - } else { - form = "(NEONShiftImmediate)"; - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitNEONTable(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "(NEONTable)"; - const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s"; - const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s"; - const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; - const char form_4v[] = - "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; - static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} }; - NEONFormatDecoder nfd(instr, &map_b); - - switch (instr->Mask(NEONTableMask)) { - case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break; - case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break; - case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break; - case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break; - case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break; - case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break; - case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break; - case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break; - default: break; - } - - char re_form[sizeof(form_4v) + 6]; - int reg_num = instr->Rn(); - snprintf(re_form, sizeof(re_form), form, - (reg_num + 1) % kNumberOfVRegisters, - (reg_num + 2) % kNumberOfVRegisters, - (reg_num + 3) % kNumberOfVRegisters); - - Format(instr, mnemonic, nfd.Substitute(re_form)); -} - - -void Disassembler::VisitNEONPerm(const Instruction* instr) { - const char *mnemonic = "unimplemented"; - const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; - NEONFormatDecoder nfd(instr); - - switch (instr->Mask(NEONPermMask)) { - case NEON_TRN1: mnemonic = "trn1"; break; - case NEON_TRN2: mnemonic = "trn2"; break; - case NEON_UZP1: mnemonic = "uzp1"; break; - case NEON_UZP2: mnemonic = "uzp2"; break; - case NEON_ZIP1: mnemonic = "zip1"; break; - case NEON_ZIP2: mnemonic = "zip2"; break; - default: form = "(NEONPerm)"; - } - Format(instr, mnemonic, nfd.Substitute(form)); -} - - -void Disassembler::VisitUnimplemented(const Instruction* instr) { - Format(instr, "unimplemented", "(Unimplemented)"); -} - - -void Disassembler::VisitUnallocated(const Instruction* instr) { - Format(instr, "unallocated", "(Unallocated)"); -} - - -void Disassembler::ProcessOutput(const Instruction* /*instr*/) { - // The base disasm does nothing more than disassembling into a buffer. -} - - -void Disassembler::AppendRegisterNameToOutput(const Instruction* instr, - const CPURegister& reg) { - USE(instr); - VIXL_ASSERT(reg.IsValid()); - char reg_char; - - if (reg.IsRegister()) { - reg_char = reg.Is64Bits() ? 'x' : 'w'; - } else { - VIXL_ASSERT(reg.IsVRegister()); - switch (reg.SizeInBits()) { - case kBRegSize: reg_char = 'b'; break; - case kHRegSize: reg_char = 'h'; break; - case kSRegSize: reg_char = 's'; break; - case kDRegSize: reg_char = 'd'; break; - default: - VIXL_ASSERT(reg.Is128Bits()); - reg_char = 'q'; - } - } - - if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) { - // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31. - AppendToOutput("%c%d", reg_char, reg.code()); - } else if (reg.Aliases(sp)) { - // Disassemble w31/x31 as stack pointer wsp/sp. - AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp"); - } else { - // Disassemble w31/x31 as zero register wzr/xzr. - AppendToOutput("%czr", reg_char); - } -} - - -void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr, - int64_t offset) { - USE(instr); - uint64_t abs_offset = offset; - char sign = (offset < 0) ? '-' : '+'; - if (offset < 0) { - abs_offset = -abs_offset; - } - AppendToOutput("#%c0x%" PRIx64, sign, abs_offset); -} - - -void Disassembler::AppendAddressToOutput(const Instruction* instr, - const void* addr) { - USE(instr); - AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast(addr)); -} - - -void Disassembler::AppendCodeAddressToOutput(const Instruction* instr, - const void* addr) { - AppendAddressToOutput(instr, addr); -} - - -void Disassembler::AppendDataAddressToOutput(const Instruction* instr, - const void* addr) { - AppendAddressToOutput(instr, addr); -} - - -void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr, - const void* addr) { - USE(instr); - int64_t rel_addr = CodeRelativeAddress(addr); - if (rel_addr >= 0) { - AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr); - } else { - AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr); - } -} - - -void Disassembler::AppendCodeRelativeCodeAddressToOutput( - const Instruction* instr, const void* addr) { - AppendCodeRelativeAddressToOutput(instr, addr); -} - - -void Disassembler::AppendCodeRelativeDataAddressToOutput( - const Instruction* instr, const void* addr) { - AppendCodeRelativeAddressToOutput(instr, addr); -} - - -void Disassembler::MapCodeAddress(int64_t base_address, - const Instruction* instr_address) { - set_code_address_offset( - base_address - reinterpret_cast(instr_address)); -} -int64_t Disassembler::CodeRelativeAddress(const void* addr) { - return reinterpret_cast(addr) + code_address_offset(); -} - - -void Disassembler::Format(const Instruction* instr, const char* mnemonic, - const char* format) { - VIXL_ASSERT(mnemonic != NULL); - ResetOutput(); - Substitute(instr, mnemonic); - if (format != NULL) { - VIXL_ASSERT(buffer_pos_ < buffer_size_); - buffer_[buffer_pos_++] = ' '; - Substitute(instr, format); - } - VIXL_ASSERT(buffer_pos_ < buffer_size_); - buffer_[buffer_pos_] = 0; - ProcessOutput(instr); -} - - -void Disassembler::Substitute(const Instruction* instr, const char* string) { - char chr = *string++; - while (chr != '\0') { - if (chr == '\'') { - string += SubstituteField(instr, string); - } else { - VIXL_ASSERT(buffer_pos_ < buffer_size_); - buffer_[buffer_pos_++] = chr; - } - chr = *string++; - } -} - - -int Disassembler::SubstituteField(const Instruction* instr, - const char* format) { - switch (format[0]) { - // NB. The remaining substitution prefix characters are: GJKUZ. - case 'R': // Register. X or W, selected by sf bit. - case 'F': // FP register. S or D, selected by type field. - case 'V': // Vector register, V, vector format. - case 'W': - case 'X': - case 'B': - case 'H': - case 'S': - case 'D': - case 'Q': return SubstituteRegisterField(instr, format); - case 'I': return SubstituteImmediateField(instr, format); - case 'L': return SubstituteLiteralField(instr, format); - case 'N': return SubstituteShiftField(instr, format); - case 'P': return SubstitutePrefetchField(instr, format); - case 'C': return SubstituteConditionField(instr, format); - case 'E': return SubstituteExtendField(instr, format); - case 'A': return SubstitutePCRelAddressField(instr, format); - case 'T': return SubstituteBranchTargetField(instr, format); - case 'O': return SubstituteLSRegOffsetField(instr, format); - case 'M': return SubstituteBarrierField(instr, format); - case 'K': return SubstituteCrField(instr, format); - case 'G': return SubstituteSysOpField(instr, format); - default: { - VIXL_UNREACHABLE(); - return 1; - } - } -} - - -int Disassembler::SubstituteRegisterField(const Instruction* instr, - const char* format) { - char reg_prefix = format[0]; - unsigned reg_num = 0; - unsigned field_len = 2; - - switch (format[1]) { - case 'd': - reg_num = instr->Rd(); - if (format[2] == 'q') { - reg_prefix = instr->NEONQ() ? 'X' : 'W'; - field_len = 3; - } - break; - case 'n': reg_num = instr->Rn(); break; - case 'm': - reg_num = instr->Rm(); - switch (format[2]) { - // Handle registers tagged with b (bytes), z (instruction), or - // r (registers), used for address updates in - // NEON load/store instructions. - case 'r': - case 'b': - case 'z': { - field_len = 3; - char* eimm; - int imm = static_cast(strtol(&format[3], &eimm, 10)); - field_len += eimm - &format[3]; - if (reg_num == 31) { - switch (format[2]) { - case 'z': - imm *= (1 << instr->NEONLSSize()); - break; - case 'r': - imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes - : kQRegSizeInBytes; - break; - case 'b': - break; - } - AppendToOutput("#%d", imm); - return field_len; - } - break; - } - } - break; - case 'e': - // This is register Rm, but using a 4-bit specifier. Used in NEON - // by-element instructions. - reg_num = (instr->Rm() & 0xf); - break; - case 'a': reg_num = instr->Ra(); break; - case 's': reg_num = instr->Rs(); break; - case 't': - reg_num = instr->Rt(); - if (format[0] == 'V') { - if ((format[2] >= '2') && (format[2] <= '4')) { - // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4. - reg_num = (reg_num + format[2] - '1') % 32; - field_len = 3; - } - } else { - if (format[2] == '2') { - // Handle register specifier Rt2. - reg_num = instr->Rt2(); - field_len = 3; - } - } - break; - default: VIXL_UNREACHABLE(); - } - - // Increase field length for registers tagged as stack. - if (format[2] == 's') { - field_len = 3; - } - - CPURegister::RegisterType reg_type = CPURegister::kRegister; - unsigned reg_size = kXRegSize; - - if (reg_prefix == 'R') { - reg_prefix = instr->SixtyFourBits() ? 'X' : 'W'; - } else if (reg_prefix == 'F') { - reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D'; - } - - switch (reg_prefix) { - case 'W': - reg_type = CPURegister::kRegister; reg_size = kWRegSize; break; - case 'X': - reg_type = CPURegister::kRegister; reg_size = kXRegSize; break; - case 'B': - reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break; - case 'H': - reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break; - case 'S': - reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break; - case 'D': - reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break; - case 'Q': - reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break; - case 'V': - AppendToOutput("v%d", reg_num); - return field_len; - default: - VIXL_UNREACHABLE(); - } - - if ((reg_type == CPURegister::kRegister) && - (reg_num == kZeroRegCode) && (format[2] == 's')) { - reg_num = kSPRegInternalCode; - } - - AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type)); - - return field_len; -} - - -int Disassembler::SubstituteImmediateField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'I'); - - switch (format[1]) { - case 'M': { // IMoveImm, IMoveNeg or IMoveLSL. - if (format[5] == 'L') { - AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide()); - if (instr->ShiftMoveWide() > 0) { - AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide()); - } - } else { - VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N')); - uint64_t imm = static_cast(instr->ImmMoveWide()) << - (16 * instr->ShiftMoveWide()); - if (format[5] == 'N') - imm = ~imm; - if (!instr->SixtyFourBits()) - imm &= UINT64_C(0xffffffff); - AppendToOutput("#0x%" PRIx64, imm); - } - return 8; - } - case 'L': { - switch (format[2]) { - case 'L': { // ILLiteral - Immediate Load Literal. - AppendToOutput("pc%+" PRId32, - instr->ImmLLiteral() << kLiteralEntrySizeLog2); - return 9; - } - case 'S': { // ILS - Immediate Load/Store. - if (instr->ImmLS() != 0) { - AppendToOutput(", #%" PRId32, instr->ImmLS()); - } - return 3; - } - case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. - if (instr->ImmLSPair() != 0) { - // format[3] is the scale value. Convert to a number. - int scale = 1 << (format[3] - '0'); - AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale); - } - return 4; - } - case 'U': { // ILU - Immediate Load/Store Unsigned. - if (instr->ImmLSUnsigned() != 0) { - int shift = instr->SizeLS(); - AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift); - } - return 3; - } - default: { - VIXL_UNIMPLEMENTED(); - return 0; - } - } - } - case 'C': { // ICondB - Immediate Conditional Branch. - int64_t offset = instr->ImmCondBranch() << 2; - AppendPCRelativeOffsetToOutput(instr, offset); - return 6; - } - case 'A': { // IAddSub. - VIXL_ASSERT(instr->ShiftAddSub() <= 1); - int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub()); - AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); - return 7; - } - case 'F': { // IFPSingle, IFPDouble or IFPFBits. - if (format[3] == 'F') { // IFPFbits. - AppendToOutput("#%" PRId32, 64 - instr->FPScale()); - return 8; - } else { - AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(), - format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64()); - return 9; - } - } - case 'T': { // ITri - Immediate Triangular Encoded. - AppendToOutput("#0x%" PRIx64, instr->ImmLogical()); - return 4; - } - case 'N': { // INzcv. - int nzcv = (instr->Nzcv() << Flags_offset); - AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N', - ((nzcv & ZFlag) == 0) ? 'z' : 'Z', - ((nzcv & CFlag) == 0) ? 'c' : 'C', - ((nzcv & VFlag) == 0) ? 'v' : 'V'); - return 5; - } - case 'P': { // IP - Conditional compare. - AppendToOutput("#%" PRId32, instr->ImmCondCmp()); - return 2; - } - case 'B': { // Bitfields. - return SubstituteBitfieldImmediateField(instr, format); - } - case 'E': { // IExtract. - AppendToOutput("#%" PRId32, instr->ImmS()); - return 8; - } - case 'S': { // IS - Test and branch bit. - AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) | - instr->ImmTestBranchBit40()); - return 2; - } - case 's': { // Is - Shift (immediate). - switch (format[2]) { - case '1': { // Is1 - SSHR. - int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh()); - shift -= instr->ImmNEONImmhImmb(); - AppendToOutput("#%d", shift); - return 3; - } - case '2': { // Is2 - SLI. - int shift = instr->ImmNEONImmhImmb(); - shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh()); - AppendToOutput("#%d", shift); - return 3; - } - default: { - VIXL_UNIMPLEMENTED(); - return 0; - } - } - } - case 'D': { // IDebug - HLT and BRK instructions. - AppendToOutput("#0x%" PRIx32, instr->ImmException()); - return 6; - } - case 'V': { // Immediate Vector. - switch (format[2]) { - case 'E': { // IVExtract. - AppendToOutput("#%" PRId32, instr->ImmNEONExt()); - return 9; - } - case 'B': { // IVByElemIndex. - int vm_index = (instr->NEONH() << 1) | instr->NEONL(); - if (instr->NEONSize() == 1) { - vm_index = (vm_index << 1) | instr->NEONM(); - } - AppendToOutput("%d", vm_index); - return strlen("IVByElemIndex"); - } - case 'I': { // INS element. - if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) { - int rd_index, rn_index; - int imm5 = instr->ImmNEON5(); - int imm4 = instr->ImmNEON4(); - int tz = CountTrailingZeros(imm5, 32); - rd_index = imm5 >> (tz + 1); - rn_index = imm4 >> tz; - if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) { - AppendToOutput("%d", rd_index); - return strlen("IVInsIndex1"); - } else if (strncmp(format, "IVInsIndex2", - strlen("IVInsIndex2")) == 0) { - AppendToOutput("%d", rn_index); - return strlen("IVInsIndex2"); - } else { - VIXL_UNIMPLEMENTED(); - return 0; - } - } - VIXL_FALLTHROUGH(); - } - case 'L': { // IVLSLane[0123] - suffix indicates access size shift. - AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0')); - return 9; - } - case 'M': { // Modified Immediate cases. - if (strncmp(format, - "IVMIImmFPSingle", - strlen("IVMIImmFPSingle")) == 0) { - AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), - instr->ImmNEONFP32()); - return strlen("IVMIImmFPSingle"); - } else if (strncmp(format, - "IVMIImmFPDouble", - strlen("IVMIImmFPDouble")) == 0) { - AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), - instr->ImmNEONFP64()); - return strlen("IVMIImmFPDouble"); - } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) { - uint64_t imm8 = instr->ImmNEONabcdefgh(); - AppendToOutput("#0x%" PRIx64, imm8); - return strlen("IVMIImm8"); - } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) { - uint64_t imm8 = instr->ImmNEONabcdefgh(); - uint64_t imm = 0; - for (int i = 0; i < 8; ++i) { - if (imm8 & (1 << i)) { - imm |= (UINT64_C(0xff) << (8 * i)); - } - } - AppendToOutput("#0x%" PRIx64, imm); - return strlen("IVMIImm"); - } else if (strncmp(format, "IVMIShiftAmt1", - strlen("IVMIShiftAmt1")) == 0) { - int cmode = instr->NEONCmode(); - int shift_amount = 8 * ((cmode >> 1) & 3); - AppendToOutput("#%d", shift_amount); - return strlen("IVMIShiftAmt1"); - } else if (strncmp(format, "IVMIShiftAmt2", - strlen("IVMIShiftAmt2")) == 0) { - int cmode = instr->NEONCmode(); - int shift_amount = 8 << (cmode & 1); - AppendToOutput("#%d", shift_amount); - return strlen("IVMIShiftAmt2"); - } else { - VIXL_UNIMPLEMENTED(); - return 0; - } - } - default: { - VIXL_UNIMPLEMENTED(); - return 0; - } - } - } - case 'X': { // IX - CLREX instruction. - AppendToOutput("#0x%" PRIx32, instr->CRm()); - return 2; - } - default: { - VIXL_UNIMPLEMENTED(); - return 0; - } - } -} - - -int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr, - const char* format) { - VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B')); - unsigned r = instr->ImmR(); - unsigned s = instr->ImmS(); - - switch (format[2]) { - case 'r': { // IBr. - AppendToOutput("#%d", r); - return 3; - } - case 's': { // IBs+1 or IBs-r+1. - if (format[3] == '+') { - AppendToOutput("#%d", s + 1); - return 5; - } else { - VIXL_ASSERT(format[3] == '-'); - AppendToOutput("#%d", s - r + 1); - return 7; - } - } - case 'Z': { // IBZ-r. - VIXL_ASSERT((format[3] == '-') && (format[4] == 'r')); - unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize; - AppendToOutput("#%d", reg_size - r); - return 5; - } - default: { - VIXL_UNREACHABLE(); - return 0; - } - } -} - - -int Disassembler::SubstituteLiteralField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); - USE(format); - - const void * address = instr->LiteralAddress(); - switch (instr->Mask(LoadLiteralMask)) { - case LDR_w_lit: - case LDR_x_lit: - case LDRSW_x_lit: - case LDR_s_lit: - case LDR_d_lit: - case LDR_q_lit: - AppendCodeRelativeDataAddressToOutput(instr, address); - break; - case PRFM_lit: { - // Use the prefetch hint to decide how to print the address. - switch (instr->PrefetchHint()) { - case 0x0: // PLD: prefetch for load. - case 0x2: // PST: prepare for store. - AppendCodeRelativeDataAddressToOutput(instr, address); - break; - case 0x1: // PLI: preload instructions. - AppendCodeRelativeCodeAddressToOutput(instr, address); - break; - case 0x3: // Unallocated hint. - AppendCodeRelativeAddressToOutput(instr, address); - break; - } - break; - } - default: - VIXL_UNREACHABLE(); - } - - return 6; -} - - -int Disassembler::SubstituteShiftField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'N'); - VIXL_ASSERT(instr->ShiftDP() <= 0x3); - - switch (format[1]) { - case 'D': { // HDP. - VIXL_ASSERT(instr->ShiftDP() != ROR); - VIXL_FALLTHROUGH(); - } - case 'L': { // HLo. - if (instr->ImmDPShift() != 0) { - const char* shift_type[] = {"lsl", "lsr", "asr", "ror"}; - AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()], - instr->ImmDPShift()); - } - return 3; - } - default: - VIXL_UNIMPLEMENTED(); - return 0; - } -} - - -int Disassembler::SubstituteConditionField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'C'); - const char* condition_code[] = { "eq", "ne", "hs", "lo", - "mi", "pl", "vs", "vc", - "hi", "ls", "ge", "lt", - "gt", "le", "al", "nv" }; - int cond; - switch (format[1]) { - case 'B': cond = instr->ConditionBranch(); break; - case 'I': { - cond = InvertCondition(static_cast(instr->Condition())); - break; - } - default: cond = instr->Condition(); - } - AppendToOutput("%s", condition_code[cond]); - return 4; -} - - -int Disassembler::SubstitutePCRelAddressField(const Instruction* instr, - const char* format) { - VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. - (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. - - int64_t offset = instr->ImmPCRel(); - - // Compute the target address based on the effective address (after applying - // code_address_offset). This is required for correct behaviour of adrp. - const Instruction* base = instr + code_address_offset(); - if (format[9] == 'P') { - offset *= kPageSize; - base = AlignDown(base, kPageSize); - } - // Strip code_address_offset before printing, so we can use the - // semantically-correct AppendCodeRelativeAddressToOutput. - const void* target = - reinterpret_cast(base + offset - code_address_offset()); - - AppendPCRelativeOffsetToOutput(instr, offset); - AppendToOutput(" "); - AppendCodeRelativeAddressToOutput(instr, target); - return 13; -} - - -int Disassembler::SubstituteBranchTargetField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(strncmp(format, "TImm", 4) == 0); - - int64_t offset = 0; - switch (format[5]) { - // BImmUncn - unconditional branch immediate. - case 'n': offset = instr->ImmUncondBranch(); break; - // BImmCond - conditional branch immediate. - case 'o': offset = instr->ImmCondBranch(); break; - // BImmCmpa - compare and branch immediate. - case 'm': offset = instr->ImmCmpBranch(); break; - // BImmTest - test and branch immediate. - case 'e': offset = instr->ImmTestBranch(); break; - default: VIXL_UNIMPLEMENTED(); - } - offset <<= kInstructionSizeLog2; - const void* target_address = reinterpret_cast(instr + offset); - VIXL_STATIC_ASSERT(sizeof(*instr) == 1); - - AppendPCRelativeOffsetToOutput(instr, offset); - AppendToOutput(" "); - AppendCodeRelativeCodeAddressToOutput(instr, target_address); - - return 8; -} - - -int Disassembler::SubstituteExtendField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(strncmp(format, "Ext", 3) == 0); - VIXL_ASSERT(instr->ExtendMode() <= 7); - USE(format); - - const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx", - "sxtb", "sxth", "sxtw", "sxtx" }; - - // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit - // registers becomes lsl. - if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) && - (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) || - (instr->ExtendMode() == UXTX))) { - if (instr->ImmExtendShift() > 0) { - AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift()); - } - } else { - AppendToOutput(", %s", extend_mode[instr->ExtendMode()]); - if (instr->ImmExtendShift() > 0) { - AppendToOutput(" #%" PRId32, instr->ImmExtendShift()); - } - } - return 3; -} - - -int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0); - const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl", - "undefined", "undefined", "sxtw", "sxtx" }; - USE(format); - - unsigned shift = instr->ImmShiftLS(); - Extend ext = static_cast(instr->ExtendMode()); - char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x'; - - unsigned rm = instr->Rm(); - if (rm == kZeroRegCode) { - AppendToOutput("%czr", reg_type); - } else { - AppendToOutput("%c%d", reg_type, rm); - } - - // Extend mode UXTX is an alias for shift mode LSL here. - if (!((ext == UXTX) && (shift == 0))) { - AppendToOutput(", %s", extend_mode[ext]); - if (shift != 0) { - AppendToOutput(" #%d", instr->SizeLS()); - } - } - return 9; -} - - -int Disassembler::SubstitutePrefetchField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'P'); - USE(format); - - static const char* hints[] = {"ld", "li", "st"}; - static const char* stream_options[] = {"keep", "strm"}; - - unsigned hint = instr->PrefetchHint(); - unsigned target = instr->PrefetchTarget() + 1; - unsigned stream = instr->PrefetchStream(); - - if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) { - // Unallocated prefetch operations. - int prefetch_mode = instr->ImmPrefetchOperation(); - AppendToOutput("#0b%c%c%c%c%c", - (prefetch_mode & (1 << 4)) ? '1' : '0', - (prefetch_mode & (1 << 3)) ? '1' : '0', - (prefetch_mode & (1 << 2)) ? '1' : '0', - (prefetch_mode & (1 << 1)) ? '1' : '0', - (prefetch_mode & (1 << 0)) ? '1' : '0'); - } else { - VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0]))); - AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]); - } - return 6; -} - -int Disassembler::SubstituteBarrierField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'M'); - USE(format); - - static const char* options[4][4] = { - { "sy (0b0000)", "oshld", "oshst", "osh" }, - { "sy (0b0100)", "nshld", "nshst", "nsh" }, - { "sy (0b1000)", "ishld", "ishst", "ish" }, - { "sy (0b1100)", "ld", "st", "sy" } - }; - int domain = instr->ImmBarrierDomain(); - int type = instr->ImmBarrierType(); - - AppendToOutput("%s", options[domain][type]); - return 1; -} - -int Disassembler::SubstituteSysOpField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'G'); - int op = -1; - switch (format[1]) { - case '1': op = instr->SysOp1(); break; - case '2': op = instr->SysOp2(); break; - default: - VIXL_UNREACHABLE(); - } - AppendToOutput("#%d", op); - return 2; -} - -int Disassembler::SubstituteCrField(const Instruction* instr, - const char* format) { - VIXL_ASSERT(format[0] == 'K'); - int cr = -1; - switch (format[1]) { - case 'n': cr = instr->CRn(); break; - case 'm': cr = instr->CRm(); break; - default: - VIXL_UNREACHABLE(); - } - AppendToOutput("C%d", cr); - return 2; -} - -void Disassembler::ResetOutput() { - buffer_pos_ = 0; - buffer_[buffer_pos_] = 0; -} - - -void Disassembler::AppendToOutput(const char* format, ...) { - va_list args; - va_start(args, format); - buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_, - format, args); - va_end(args); -} - - -void PrintDisassembler::ProcessOutput(const Instruction* instr) { - fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n", - reinterpret_cast(instr), - instr->InstructionBits(), - GetOutput()); -} - -} // namespace vixl diff --git a/disas/libvixl/vixl/a64/disasm-a64.h b/disas/libvixl/vixl/a64/disasm-a64.h deleted file mode 100644 index 930df6ea6a..0000000000 --- a/disas/libvixl/vixl/a64/disasm-a64.h +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_A64_DISASM_A64_H -#define VIXL_A64_DISASM_A64_H - -#include "vixl/globals.h" -#include "vixl/utils.h" -#include "vixl/a64/instructions-a64.h" -#include "vixl/a64/decoder-a64.h" -#include "vixl/a64/assembler-a64.h" - -namespace vixl { - -class Disassembler: public DecoderVisitor { - public: - Disassembler(); - Disassembler(char* text_buffer, int buffer_size); - virtual ~Disassembler(); - char* GetOutput(); - - // Declare all Visitor functions. - #define DECLARE(A) virtual void Visit##A(const Instruction* instr); - VISITOR_LIST(DECLARE) - #undef DECLARE - - protected: - virtual void ProcessOutput(const Instruction* instr); - - // Default output functions. The functions below implement a default way of - // printing elements in the disassembly. A sub-class can override these to - // customize the disassembly output. - - // Prints the name of a register. - // TODO: This currently doesn't allow renaming of V registers. - virtual void AppendRegisterNameToOutput(const Instruction* instr, - const CPURegister& reg); - - // Prints a PC-relative offset. This is used for example when disassembling - // branches to immediate offsets. - virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, - int64_t offset); - - // Prints an address, in the general case. It can be code or data. This is - // used for example to print the target address of an ADR instruction. - virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, - const void* addr); - - // Prints the address of some code. - // This is used for example to print the target address of a branch to an - // immediate offset. - // A sub-class can for example override this method to lookup the address and - // print an appropriate name. - virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, - const void* addr); - - // Prints the address of some data. - // This is used for example to print the source address of a load literal - // instruction. - virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, - const void* addr); - - // Same as the above, but for addresses that are not relative to the code - // buffer. They are currently not used by VIXL. - virtual void AppendAddressToOutput(const Instruction* instr, - const void* addr); - virtual void AppendCodeAddressToOutput(const Instruction* instr, - const void* addr); - virtual void AppendDataAddressToOutput(const Instruction* instr, - const void* addr); - - public: - // Get/Set the offset that should be added to code addresses when printing - // code-relative addresses in the AppendCodeRelativeAddressToOutput() - // helpers. - // Below is an example of how a branch immediate instruction in memory at - // address 0xb010200 would disassemble with different offsets. - // Base address | Disassembly - // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc) - // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc) - // 0xb010200 | 0x0: b #+0xcc (addr 0xcc) - void MapCodeAddress(int64_t base_address, const Instruction* instr_address); - int64_t CodeRelativeAddress(const void* instr); - - private: - void Format( - const Instruction* instr, const char* mnemonic, const char* format); - void Substitute(const Instruction* instr, const char* string); - int SubstituteField(const Instruction* instr, const char* format); - int SubstituteRegisterField(const Instruction* instr, const char* format); - int SubstituteImmediateField(const Instruction* instr, const char* format); - int SubstituteLiteralField(const Instruction* instr, const char* format); - int SubstituteBitfieldImmediateField( - const Instruction* instr, const char* format); - int SubstituteShiftField(const Instruction* instr, const char* format); - int SubstituteExtendField(const Instruction* instr, const char* format); - int SubstituteConditionField(const Instruction* instr, const char* format); - int SubstitutePCRelAddressField(const Instruction* instr, const char* format); - int SubstituteBranchTargetField(const Instruction* instr, const char* format); - int SubstituteLSRegOffsetField(const Instruction* instr, const char* format); - int SubstitutePrefetchField(const Instruction* instr, const char* format); - int SubstituteBarrierField(const Instruction* instr, const char* format); - int SubstituteSysOpField(const Instruction* instr, const char* format); - int SubstituteCrField(const Instruction* instr, const char* format); - bool RdIsZROrSP(const Instruction* instr) const { - return (instr->Rd() == kZeroRegCode); - } - - bool RnIsZROrSP(const Instruction* instr) const { - return (instr->Rn() == kZeroRegCode); - } - - bool RmIsZROrSP(const Instruction* instr) const { - return (instr->Rm() == kZeroRegCode); - } - - bool RaIsZROrSP(const Instruction* instr) const { - return (instr->Ra() == kZeroRegCode); - } - - bool IsMovzMovnImm(unsigned reg_size, uint64_t value); - - int64_t code_address_offset() const { return code_address_offset_; } - - protected: - void ResetOutput(); - void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); - - void set_code_address_offset(int64_t code_address_offset) { - code_address_offset_ = code_address_offset; - } - - char* buffer_; - uint32_t buffer_pos_; - uint32_t buffer_size_; - bool own_buffer_; - - int64_t code_address_offset_; -}; - - -class PrintDisassembler: public Disassembler { - public: - explicit PrintDisassembler(FILE* stream) : stream_(stream) { } - - protected: - virtual void ProcessOutput(const Instruction* instr); - - private: - FILE *stream_; -}; -} // namespace vixl - -#endif // VIXL_A64_DISASM_A64_H diff --git a/disas/libvixl/vixl/a64/instructions-a64.cc b/disas/libvixl/vixl/a64/instructions-a64.cc deleted file mode 100644 index 33992f88a4..0000000000 --- a/disas/libvixl/vixl/a64/instructions-a64.cc +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "vixl/a64/instructions-a64.h" -#include "vixl/a64/assembler-a64.h" - -namespace vixl { - - -// Floating-point infinity values. -const float16 kFP16PositiveInfinity = 0x7c00; -const float16 kFP16NegativeInfinity = 0xfc00; -const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000); -const float kFP32NegativeInfinity = rawbits_to_float(0xff800000); -const double kFP64PositiveInfinity = - rawbits_to_double(UINT64_C(0x7ff0000000000000)); -const double kFP64NegativeInfinity = - rawbits_to_double(UINT64_C(0xfff0000000000000)); - - -// The default NaN values (for FPCR.DN=1). -const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000)); -const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000); -const float16 kFP16DefaultNaN = 0x7e00; - - -static uint64_t RotateRight(uint64_t value, - unsigned int rotate, - unsigned int width) { - VIXL_ASSERT(width <= 64); - rotate &= 63; - return ((value & ((UINT64_C(1) << rotate) - 1)) << - (width - rotate)) | (value >> rotate); -} - - -static uint64_t RepeatBitsAcrossReg(unsigned reg_size, - uint64_t value, - unsigned width) { - VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || - (width == 32)); - VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); - uint64_t result = value & ((UINT64_C(1) << width) - 1); - for (unsigned i = width; i < reg_size; i *= 2) { - result |= (result << i); - } - return result; -} - - -bool Instruction::IsLoad() const { - if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { - return false; - } - - if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { - return Mask(LoadStorePairLBit) != 0; - } else { - LoadStoreOp op = static_cast(Mask(LoadStoreMask)); - switch (op) { - case LDRB_w: - case LDRH_w: - case LDR_w: - case LDR_x: - case LDRSB_w: - case LDRSB_x: - case LDRSH_w: - case LDRSH_x: - case LDRSW_x: - case LDR_b: - case LDR_h: - case LDR_s: - case LDR_d: - case LDR_q: return true; - default: return false; - } - } -} - - -bool Instruction::IsStore() const { - if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { - return false; - } - - if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { - return Mask(LoadStorePairLBit) == 0; - } else { - LoadStoreOp op = static_cast(Mask(LoadStoreMask)); - switch (op) { - case STRB_w: - case STRH_w: - case STR_w: - case STR_x: - case STR_b: - case STR_h: - case STR_s: - case STR_d: - case STR_q: return true; - default: return false; - } - } -} - - -// Logical immediates can't encode zero, so a return value of zero is used to -// indicate a failure case. Specifically, where the constraints on imm_s are -// not met. -uint64_t Instruction::ImmLogical() const { - unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; - int32_t n = BitN(); - int32_t imm_s = ImmSetBits(); - int32_t imm_r = ImmRotate(); - - // An integer is constructed from the n, imm_s and imm_r bits according to - // the following table: - // - // N imms immr size S R - // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) - // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) - // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) - // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) - // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) - // 0 11110s xxxxxr 2 UInt(s) UInt(r) - // (s bits must not be all set) - // - // A pattern is constructed of size bits, where the least significant S+1 - // bits are set. The pattern is rotated right by R, and repeated across a - // 32 or 64-bit value, depending on destination register width. - // - - if (n == 1) { - if (imm_s == 0x3f) { - return 0; - } - uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; - return RotateRight(bits, imm_r, 64); - } else { - if ((imm_s >> 1) == 0x1f) { - return 0; - } - for (int width = 0x20; width >= 0x2; width >>= 1) { - if ((imm_s & width) == 0) { - int mask = width - 1; - if ((imm_s & mask) == mask) { - return 0; - } - uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; - return RepeatBitsAcrossReg(reg_size, - RotateRight(bits, imm_r & mask, width), - width); - } - } - } - VIXL_UNREACHABLE(); - return 0; -} - - -uint32_t Instruction::ImmNEONabcdefgh() const { - return ImmNEONabc() << 5 | ImmNEONdefgh(); -} - - -float Instruction::Imm8ToFP32(uint32_t imm8) { - // Imm8: abcdefgh (8 bits) - // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) - // where B is b ^ 1 - uint32_t bits = imm8; - uint32_t bit7 = (bits >> 7) & 0x1; - uint32_t bit6 = (bits >> 6) & 0x1; - uint32_t bit5_to_0 = bits & 0x3f; - uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); - - return rawbits_to_float(result); -} - - -float Instruction::ImmFP32() const { - return Imm8ToFP32(ImmFP()); -} - - -double Instruction::Imm8ToFP64(uint32_t imm8) { - // Imm8: abcdefgh (8 bits) - // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 - // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) - // where B is b ^ 1 - uint32_t bits = imm8; - uint64_t bit7 = (bits >> 7) & 0x1; - uint64_t bit6 = (bits >> 6) & 0x1; - uint64_t bit5_to_0 = bits & 0x3f; - uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); - - return rawbits_to_double(result); -} - - -double Instruction::ImmFP64() const { - return Imm8ToFP64(ImmFP()); -} - - -float Instruction::ImmNEONFP32() const { - return Imm8ToFP32(ImmNEONabcdefgh()); -} - - -double Instruction::ImmNEONFP64() const { - return Imm8ToFP64(ImmNEONabcdefgh()); -} - - -unsigned CalcLSDataSize(LoadStoreOp op) { - VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); - unsigned size = static_cast(op) >> LSSize_offset; - if ((op & LSVector_mask) != 0) { - // Vector register memory operations encode the access size in the "size" - // and "opc" fields. - if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { - size = kQRegSizeInBytesLog2; - } - } - return size; -} - - -unsigned CalcLSPairDataSize(LoadStorePairOp op) { - VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); - VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); - switch (op) { - case STP_q: - case LDP_q: return kQRegSizeInBytesLog2; - case STP_x: - case LDP_x: - case STP_d: - case LDP_d: return kXRegSizeInBytesLog2; - default: return kWRegSizeInBytesLog2; - } -} - - -int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) { - switch (branch_type) { - case UncondBranchType: - return ImmUncondBranch_width; - case CondBranchType: - return ImmCondBranch_width; - case CompareBranchType: - return ImmCmpBranch_width; - case TestBranchType: - return ImmTestBranch_width; - default: - VIXL_UNREACHABLE(); - return 0; - } -} - - -int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) { - int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1); - return encoded_max * kInstructionSize; -} - - -bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, - int64_t offset) { - return is_intn(ImmBranchRangeBitwidth(branch_type), offset); -} - - -const Instruction* Instruction::ImmPCOffsetTarget() const { - const Instruction * base = this; - ptrdiff_t offset; - if (IsPCRelAddressing()) { - // ADR and ADRP. - offset = ImmPCRel(); - if (Mask(PCRelAddressingMask) == ADRP) { - base = AlignDown(base, kPageSize); - offset *= kPageSize; - } else { - VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); - } - } else { - // All PC-relative branches. - VIXL_ASSERT(BranchType() != UnknownBranchType); - // Relative branch offsets are instruction-size-aligned. - offset = ImmBranch() << kInstructionSizeLog2; - } - return base + offset; -} - - -int Instruction::ImmBranch() const { - switch (BranchType()) { - case CondBranchType: return ImmCondBranch(); - case UncondBranchType: return ImmUncondBranch(); - case CompareBranchType: return ImmCmpBranch(); - case TestBranchType: return ImmTestBranch(); - default: VIXL_UNREACHABLE(); - } - return 0; -} - - -void Instruction::SetImmPCOffsetTarget(const Instruction* target) { - if (IsPCRelAddressing()) { - SetPCRelImmTarget(target); - } else { - SetBranchImmTarget(target); - } -} - - -void Instruction::SetPCRelImmTarget(const Instruction* target) { - ptrdiff_t imm21; - if ((Mask(PCRelAddressingMask) == ADR)) { - imm21 = target - this; - } else { - VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); - uintptr_t this_page = reinterpret_cast(this) / kPageSize; - uintptr_t target_page = reinterpret_cast(target) / kPageSize; - imm21 = target_page - this_page; - } - Instr imm = Assembler::ImmPCRelAddress(static_cast(imm21)); - - SetInstructionBits(Mask(~ImmPCRel_mask) | imm); -} - - -void Instruction::SetBranchImmTarget(const Instruction* target) { - VIXL_ASSERT(((target - this) & 3) == 0); - Instr branch_imm = 0; - uint32_t imm_mask = 0; - int offset = static_cast((target - this) >> kInstructionSizeLog2); - switch (BranchType()) { - case CondBranchType: { - branch_imm = Assembler::ImmCondBranch(offset); - imm_mask = ImmCondBranch_mask; - break; - } - case UncondBranchType: { - branch_imm = Assembler::ImmUncondBranch(offset); - imm_mask = ImmUncondBranch_mask; - break; - } - case CompareBranchType: { - branch_imm = Assembler::ImmCmpBranch(offset); - imm_mask = ImmCmpBranch_mask; - break; - } - case TestBranchType: { - branch_imm = Assembler::ImmTestBranch(offset); - imm_mask = ImmTestBranch_mask; - break; - } - default: VIXL_UNREACHABLE(); - } - SetInstructionBits(Mask(~imm_mask) | branch_imm); -} - - -void Instruction::SetImmLLiteral(const Instruction* source) { - VIXL_ASSERT(IsWordAligned(source)); - ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2; - Instr imm = Assembler::ImmLLiteral(static_cast(offset)); - Instr mask = ImmLLiteral_mask; - - SetInstructionBits(Mask(~mask) | imm); -} - - -VectorFormat VectorFormatHalfWidth(const VectorFormat vform) { - VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D || - vform == kFormatH || vform == kFormatS || vform == kFormatD); - switch (vform) { - case kFormat8H: return kFormat8B; - case kFormat4S: return kFormat4H; - case kFormat2D: return kFormat2S; - case kFormatH: return kFormatB; - case kFormatS: return kFormatH; - case kFormatD: return kFormatS; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - - -VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) { - VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S || - vform == kFormatB || vform == kFormatH || vform == kFormatS); - switch (vform) { - case kFormat8B: return kFormat8H; - case kFormat4H: return kFormat4S; - case kFormat2S: return kFormat2D; - case kFormatB: return kFormatH; - case kFormatH: return kFormatS; - case kFormatS: return kFormatD; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - - -VectorFormat VectorFormatFillQ(const VectorFormat vform) { - switch (vform) { - case kFormatB: - case kFormat8B: - case kFormat16B: return kFormat16B; - case kFormatH: - case kFormat4H: - case kFormat8H: return kFormat8H; - case kFormatS: - case kFormat2S: - case kFormat4S: return kFormat4S; - case kFormatD: - case kFormat1D: - case kFormat2D: return kFormat2D; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - -VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) { - switch (vform) { - case kFormat4H: return kFormat8B; - case kFormat8H: return kFormat16B; - case kFormat2S: return kFormat4H; - case kFormat4S: return kFormat8H; - case kFormat1D: return kFormat2S; - case kFormat2D: return kFormat4S; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - -VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) { - VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S); - switch (vform) { - case kFormat8B: return kFormat16B; - case kFormat4H: return kFormat8H; - case kFormat2S: return kFormat4S; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - - -VectorFormat VectorFormatHalfLanes(const VectorFormat vform) { - VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S); - switch (vform) { - case kFormat16B: return kFormat8B; - case kFormat8H: return kFormat4H; - case kFormat4S: return kFormat2S; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - - -VectorFormat ScalarFormatFromLaneSize(int laneSize) { - switch (laneSize) { - case 8: return kFormatB; - case 16: return kFormatH; - case 32: return kFormatS; - case 64: return kFormatD; - default: VIXL_UNREACHABLE(); return kFormatUndefined; - } -} - - -unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormatB: return kBRegSize; - case kFormatH: return kHRegSize; - case kFormatS: return kSRegSize; - case kFormatD: return kDRegSize; - case kFormat8B: - case kFormat4H: - case kFormat2S: - case kFormat1D: return kDRegSize; - default: return kQRegSize; - } -} - - -unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) { - return RegisterSizeInBitsFromFormat(vform) / 8; -} - - -unsigned LaneSizeInBitsFromFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormatB: - case kFormat8B: - case kFormat16B: return 8; - case kFormatH: - case kFormat4H: - case kFormat8H: return 16; - case kFormatS: - case kFormat2S: - case kFormat4S: return 32; - case kFormatD: - case kFormat1D: - case kFormat2D: return 64; - default: VIXL_UNREACHABLE(); return 0; - } -} - - -int LaneSizeInBytesFromFormat(VectorFormat vform) { - return LaneSizeInBitsFromFormat(vform) / 8; -} - - -int LaneSizeInBytesLog2FromFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormatB: - case kFormat8B: - case kFormat16B: return 0; - case kFormatH: - case kFormat4H: - case kFormat8H: return 1; - case kFormatS: - case kFormat2S: - case kFormat4S: return 2; - case kFormatD: - case kFormat1D: - case kFormat2D: return 3; - default: VIXL_UNREACHABLE(); return 0; - } -} - - -int LaneCountFromFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormat16B: return 16; - case kFormat8B: - case kFormat8H: return 8; - case kFormat4H: - case kFormat4S: return 4; - case kFormat2S: - case kFormat2D: return 2; - case kFormat1D: - case kFormatB: - case kFormatH: - case kFormatS: - case kFormatD: return 1; - default: VIXL_UNREACHABLE(); return 0; - } -} - - -int MaxLaneCountFromFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormatB: - case kFormat8B: - case kFormat16B: return 16; - case kFormatH: - case kFormat4H: - case kFormat8H: return 8; - case kFormatS: - case kFormat2S: - case kFormat4S: return 4; - case kFormatD: - case kFormat1D: - case kFormat2D: return 2; - default: VIXL_UNREACHABLE(); return 0; - } -} - - -// Does 'vform' indicate a vector format or a scalar format? -bool IsVectorFormat(VectorFormat vform) { - VIXL_ASSERT(vform != kFormatUndefined); - switch (vform) { - case kFormatB: - case kFormatH: - case kFormatS: - case kFormatD: return false; - default: return true; - } -} - - -int64_t MaxIntFromFormat(VectorFormat vform) { - return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); -} - - -int64_t MinIntFromFormat(VectorFormat vform) { - return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform)); -} - - -uint64_t MaxUintFromFormat(VectorFormat vform) { - return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); -} -} // namespace vixl - diff --git a/disas/libvixl/vixl/a64/instructions-a64.h b/disas/libvixl/vixl/a64/instructions-a64.h deleted file mode 100644 index 7e0dbae36a..0000000000 --- a/disas/libvixl/vixl/a64/instructions-a64.h +++ /dev/null @@ -1,757 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_A64_INSTRUCTIONS_A64_H_ -#define VIXL_A64_INSTRUCTIONS_A64_H_ - -#include "vixl/globals.h" -#include "vixl/utils.h" -#include "vixl/a64/constants-a64.h" - -namespace vixl { -// ISA constants. -------------------------------------------------------------- - -typedef uint32_t Instr; -const unsigned kInstructionSize = 4; -const unsigned kInstructionSizeLog2 = 2; -const unsigned kLiteralEntrySize = 4; -const unsigned kLiteralEntrySizeLog2 = 2; -const unsigned kMaxLoadLiteralRange = 1 * MBytes; - -// This is the nominal page size (as used by the adrp instruction); the actual -// size of the memory pages allocated by the kernel is likely to differ. -const unsigned kPageSize = 4 * KBytes; -const unsigned kPageSizeLog2 = 12; - -const unsigned kBRegSize = 8; -const unsigned kBRegSizeLog2 = 3; -const unsigned kBRegSizeInBytes = kBRegSize / 8; -const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3; -const unsigned kHRegSize = 16; -const unsigned kHRegSizeLog2 = 4; -const unsigned kHRegSizeInBytes = kHRegSize / 8; -const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3; -const unsigned kWRegSize = 32; -const unsigned kWRegSizeLog2 = 5; -const unsigned kWRegSizeInBytes = kWRegSize / 8; -const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3; -const unsigned kXRegSize = 64; -const unsigned kXRegSizeLog2 = 6; -const unsigned kXRegSizeInBytes = kXRegSize / 8; -const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3; -const unsigned kSRegSize = 32; -const unsigned kSRegSizeLog2 = 5; -const unsigned kSRegSizeInBytes = kSRegSize / 8; -const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3; -const unsigned kDRegSize = 64; -const unsigned kDRegSizeLog2 = 6; -const unsigned kDRegSizeInBytes = kDRegSize / 8; -const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3; -const unsigned kQRegSize = 128; -const unsigned kQRegSizeLog2 = 7; -const unsigned kQRegSizeInBytes = kQRegSize / 8; -const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3; -const uint64_t kWRegMask = UINT64_C(0xffffffff); -const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); -const uint64_t kSRegMask = UINT64_C(0xffffffff); -const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); -const uint64_t kSSignMask = UINT64_C(0x80000000); -const uint64_t kDSignMask = UINT64_C(0x8000000000000000); -const uint64_t kWSignMask = UINT64_C(0x80000000); -const uint64_t kXSignMask = UINT64_C(0x8000000000000000); -const uint64_t kByteMask = UINT64_C(0xff); -const uint64_t kHalfWordMask = UINT64_C(0xffff); -const uint64_t kWordMask = UINT64_C(0xffffffff); -const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff); -const uint64_t kWMaxUInt = UINT64_C(0xffffffff); -const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff); -const int64_t kXMinInt = INT64_C(0x8000000000000000); -const int32_t kWMaxInt = INT32_C(0x7fffffff); -const int32_t kWMinInt = INT32_C(0x80000000); -const unsigned kLinkRegCode = 30; -const unsigned kZeroRegCode = 31; -const unsigned kSPRegInternalCode = 63; -const unsigned kRegCodeMask = 0x1f; - -const unsigned kAddressTagOffset = 56; -const unsigned kAddressTagWidth = 8; -const uint64_t kAddressTagMask = - ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset; -VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); - -// AArch64 floating-point specifics. These match IEEE-754. -const unsigned kDoubleMantissaBits = 52; -const unsigned kDoubleExponentBits = 11; -const unsigned kFloatMantissaBits = 23; -const unsigned kFloatExponentBits = 8; -const unsigned kFloat16MantissaBits = 10; -const unsigned kFloat16ExponentBits = 5; - -// Floating-point infinity values. -extern const float16 kFP16PositiveInfinity; -extern const float16 kFP16NegativeInfinity; -extern const float kFP32PositiveInfinity; -extern const float kFP32NegativeInfinity; -extern const double kFP64PositiveInfinity; -extern const double kFP64NegativeInfinity; - -// The default NaN values (for FPCR.DN=1). -extern const float16 kFP16DefaultNaN; -extern const float kFP32DefaultNaN; -extern const double kFP64DefaultNaN; - -unsigned CalcLSDataSize(LoadStoreOp op); -unsigned CalcLSPairDataSize(LoadStorePairOp op); - -enum ImmBranchType { - UnknownBranchType = 0, - CondBranchType = 1, - UncondBranchType = 2, - CompareBranchType = 3, - TestBranchType = 4 -}; - -enum AddrMode { - Offset, - PreIndex, - PostIndex -}; - -enum FPRounding { - // The first four values are encodable directly by FPCR. - FPTieEven = 0x0, - FPPositiveInfinity = 0x1, - FPNegativeInfinity = 0x2, - FPZero = 0x3, - - // The final rounding modes are only available when explicitly specified by - // the instruction (such as with fcvta). It cannot be set in FPCR. - FPTieAway, - FPRoundOdd -}; - -enum Reg31Mode { - Reg31IsStackPointer, - Reg31IsZeroRegister -}; - -// Instructions. --------------------------------------------------------------- - -class Instruction { - public: - Instr InstructionBits() const { - return *(reinterpret_cast(this)); - } - - void SetInstructionBits(Instr new_instr) { - *(reinterpret_cast(this)) = new_instr; - } - - int Bit(int pos) const { - return (InstructionBits() >> pos) & 1; - } - - uint32_t Bits(int msb, int lsb) const { - return unsigned_bitextract_32(msb, lsb, InstructionBits()); - } - - int32_t SignedBits(int msb, int lsb) const { - int32_t bits = *(reinterpret_cast(this)); - return signed_bitextract_32(msb, lsb, bits); - } - - Instr Mask(uint32_t mask) const { - return InstructionBits() & mask; - } - - #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ - int32_t Name() const { return Func(HighBit, LowBit); } - INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) - #undef DEFINE_GETTER - - // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), - // formed from ImmPCRelLo and ImmPCRelHi. - int ImmPCRel() const { - int offset = - static_cast((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); - int width = ImmPCRelLo_width + ImmPCRelHi_width; - return signed_bitextract_32(width - 1, 0, offset); - } - - uint64_t ImmLogical() const; - unsigned ImmNEONabcdefgh() const; - float ImmFP32() const; - double ImmFP64() const; - float ImmNEONFP32() const; - double ImmNEONFP64() const; - - unsigned SizeLS() const { - return CalcLSDataSize(static_cast(Mask(LoadStoreMask))); - } - - unsigned SizeLSPair() const { - return CalcLSPairDataSize( - static_cast(Mask(LoadStorePairMask))); - } - - int NEONLSIndex(int access_size_shift) const { - int64_t q = NEONQ(); - int64_t s = NEONS(); - int64_t size = NEONLSSize(); - int64_t index = (q << 3) | (s << 2) | size; - return static_cast(index >> access_size_shift); - } - - // Helpers. - bool IsCondBranchImm() const { - return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; - } - - bool IsUncondBranchImm() const { - return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; - } - - bool IsCompareBranch() const { - return Mask(CompareBranchFMask) == CompareBranchFixed; - } - - bool IsTestBranch() const { - return Mask(TestBranchFMask) == TestBranchFixed; - } - - bool IsImmBranch() const { - return BranchType() != UnknownBranchType; - } - - bool IsPCRelAddressing() const { - return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; - } - - bool IsLogicalImmediate() const { - return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; - } - - bool IsAddSubImmediate() const { - return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; - } - - bool IsAddSubExtended() const { - return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; - } - - bool IsLoadOrStore() const { - return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; - } - - bool IsLoad() const; - bool IsStore() const; - - bool IsLoadLiteral() const { - // This includes PRFM_lit. - return Mask(LoadLiteralFMask) == LoadLiteralFixed; - } - - bool IsMovn() const { - return (Mask(MoveWideImmediateMask) == MOVN_x) || - (Mask(MoveWideImmediateMask) == MOVN_w); - } - - static int ImmBranchRangeBitwidth(ImmBranchType branch_type); - static int32_t ImmBranchForwardRange(ImmBranchType branch_type); - static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset); - - // Indicate whether Rd can be the stack pointer or the zero register. This - // does not check that the instruction actually has an Rd field. - Reg31Mode RdMode() const { - // The following instructions use sp or wsp as Rd: - // Add/sub (immediate) when not setting the flags. - // Add/sub (extended) when not setting the flags. - // Logical (immediate) when not setting the flags. - // Otherwise, r31 is the zero register. - if (IsAddSubImmediate() || IsAddSubExtended()) { - if (Mask(AddSubSetFlagsBit)) { - return Reg31IsZeroRegister; - } else { - return Reg31IsStackPointer; - } - } - if (IsLogicalImmediate()) { - // Of the logical (immediate) instructions, only ANDS (and its aliases) - // can set the flags. The others can all write into sp. - // Note that some logical operations are not available to - // immediate-operand instructions, so we have to combine two masks here. - if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { - return Reg31IsZeroRegister; - } else { - return Reg31IsStackPointer; - } - } - return Reg31IsZeroRegister; - } - - // Indicate whether Rn can be the stack pointer or the zero register. This - // does not check that the instruction actually has an Rn field. - Reg31Mode RnMode() const { - // The following instructions use sp or wsp as Rn: - // All loads and stores. - // Add/sub (immediate). - // Add/sub (extended). - // Otherwise, r31 is the zero register. - if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { - return Reg31IsStackPointer; - } - return Reg31IsZeroRegister; - } - - ImmBranchType BranchType() const { - if (IsCondBranchImm()) { - return CondBranchType; - } else if (IsUncondBranchImm()) { - return UncondBranchType; - } else if (IsCompareBranch()) { - return CompareBranchType; - } else if (IsTestBranch()) { - return TestBranchType; - } else { - return UnknownBranchType; - } - } - - // Find the target of this instruction. 'this' may be a branch or a - // PC-relative addressing instruction. - const Instruction* ImmPCOffsetTarget() const; - - // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or - // a PC-relative addressing instruction. - void SetImmPCOffsetTarget(const Instruction* target); - // Patch a literal load instruction to load from 'source'. - void SetImmLLiteral(const Instruction* source); - - // The range of a load literal instruction, expressed as 'instr +- range'. - // The range is actually the 'positive' range; the branch instruction can - // target [instr - range - kInstructionSize, instr + range]. - static const int kLoadLiteralImmBitwidth = 19; - static const int kLoadLiteralRange = - (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize; - - // Calculate the address of a literal referred to by a load-literal - // instruction, and return it as the specified type. - // - // The literal itself is safely mutable only if the backing buffer is safely - // mutable. - template - T LiteralAddress() const { - uint64_t base_raw = reinterpret_cast(this); - int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2; - uint64_t address_raw = base_raw + offset; - - // Cast the address using a C-style cast. A reinterpret_cast would be - // appropriate, but it can't cast one integral type to another. - T address = (T)(address_raw); - - // Assert that the address can be represented by the specified type. - VIXL_ASSERT((uint64_t)(address) == address_raw); - - return address; - } - - uint32_t Literal32() const { - uint32_t literal; - memcpy(&literal, LiteralAddress(), sizeof(literal)); - return literal; - } - - uint64_t Literal64() const { - uint64_t literal; - memcpy(&literal, LiteralAddress(), sizeof(literal)); - return literal; - } - - float LiteralFP32() const { - return rawbits_to_float(Literal32()); - } - - double LiteralFP64() const { - return rawbits_to_double(Literal64()); - } - - const Instruction* NextInstruction() const { - return this + kInstructionSize; - } - - const Instruction* InstructionAtOffset(int64_t offset) const { - VIXL_ASSERT(IsWordAligned(this + offset)); - return this + offset; - } - - template static Instruction* Cast(T src) { - return reinterpret_cast(src); - } - - template static const Instruction* CastConst(T src) { - return reinterpret_cast(src); - } - - private: - int ImmBranch() const; - - static float Imm8ToFP32(uint32_t imm8); - static double Imm8ToFP64(uint32_t imm8); - - void SetPCRelImmTarget(const Instruction* target); - void SetBranchImmTarget(const Instruction* target); -}; - - -// Functions for handling NEON vector format information. -enum VectorFormat { - kFormatUndefined = 0xffffffff, - kFormat8B = NEON_8B, - kFormat16B = NEON_16B, - kFormat4H = NEON_4H, - kFormat8H = NEON_8H, - kFormat2S = NEON_2S, - kFormat4S = NEON_4S, - kFormat1D = NEON_1D, - kFormat2D = NEON_2D, - - // Scalar formats. We add the scalar bit to distinguish between scalar and - // vector enumerations; the bit is always set in the encoding of scalar ops - // and always clear for vector ops. Although kFormatD and kFormat1D appear - // to be the same, their meaning is subtly different. The first is a scalar - // operation, the second a vector operation that only affects one lane. - kFormatB = NEON_B | NEONScalar, - kFormatH = NEON_H | NEONScalar, - kFormatS = NEON_S | NEONScalar, - kFormatD = NEON_D | NEONScalar -}; - -VectorFormat VectorFormatHalfWidth(const VectorFormat vform); -VectorFormat VectorFormatDoubleWidth(const VectorFormat vform); -VectorFormat VectorFormatDoubleLanes(const VectorFormat vform); -VectorFormat VectorFormatHalfLanes(const VectorFormat vform); -VectorFormat ScalarFormatFromLaneSize(int lanesize); -VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform); -VectorFormat VectorFormatFillQ(const VectorFormat vform); -unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); -unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); -// TODO: Make the return types of these functions consistent. -unsigned LaneSizeInBitsFromFormat(VectorFormat vform); -int LaneSizeInBytesFromFormat(VectorFormat vform); -int LaneSizeInBytesLog2FromFormat(VectorFormat vform); -int LaneCountFromFormat(VectorFormat vform); -int MaxLaneCountFromFormat(VectorFormat vform); -bool IsVectorFormat(VectorFormat vform); -int64_t MaxIntFromFormat(VectorFormat vform); -int64_t MinIntFromFormat(VectorFormat vform); -uint64_t MaxUintFromFormat(VectorFormat vform); - - -enum NEONFormat { - NF_UNDEF = 0, - NF_8B = 1, - NF_16B = 2, - NF_4H = 3, - NF_8H = 4, - NF_2S = 5, - NF_4S = 6, - NF_1D = 7, - NF_2D = 8, - NF_B = 9, - NF_H = 10, - NF_S = 11, - NF_D = 12 -}; - -static const unsigned kNEONFormatMaxBits = 6; - -struct NEONFormatMap { - // The bit positions in the instruction to consider. - uint8_t bits[kNEONFormatMaxBits]; - - // Mapping from concatenated bits to format. - NEONFormat map[1 << kNEONFormatMaxBits]; -}; - -class NEONFormatDecoder { - public: - enum SubstitutionMode { - kPlaceholder, - kFormat - }; - - // Construct a format decoder with increasingly specific format maps for each - // subsitution. If no format map is specified, the default is the integer - // format map. - explicit NEONFormatDecoder(const Instruction* instr) { - instrbits_ = instr->InstructionBits(); - SetFormatMaps(IntegerFormatMap()); - } - NEONFormatDecoder(const Instruction* instr, - const NEONFormatMap* format) { - instrbits_ = instr->InstructionBits(); - SetFormatMaps(format); - } - NEONFormatDecoder(const Instruction* instr, - const NEONFormatMap* format0, - const NEONFormatMap* format1) { - instrbits_ = instr->InstructionBits(); - SetFormatMaps(format0, format1); - } - NEONFormatDecoder(const Instruction* instr, - const NEONFormatMap* format0, - const NEONFormatMap* format1, - const NEONFormatMap* format2) { - instrbits_ = instr->InstructionBits(); - SetFormatMaps(format0, format1, format2); - } - - // Set the format mapping for all or individual substitutions. - void SetFormatMaps(const NEONFormatMap* format0, - const NEONFormatMap* format1 = NULL, - const NEONFormatMap* format2 = NULL) { - VIXL_ASSERT(format0 != NULL); - formats_[0] = format0; - formats_[1] = (format1 == NULL) ? formats_[0] : format1; - formats_[2] = (format2 == NULL) ? formats_[1] : format2; - } - void SetFormatMap(unsigned index, const NEONFormatMap* format) { - VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0]))); - VIXL_ASSERT(format != NULL); - formats_[index] = format; - } - - // Substitute %s in the input string with the placeholder string for each - // register, ie. "'B", "'H", etc. - const char* SubstitutePlaceholders(const char* string) { - return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder); - } - - // Substitute %s in the input string with a new string based on the - // substitution mode. - const char* Substitute(const char* string, - SubstitutionMode mode0 = kFormat, - SubstitutionMode mode1 = kFormat, - SubstitutionMode mode2 = kFormat) { - snprintf(form_buffer_, sizeof(form_buffer_), string, - GetSubstitute(0, mode0), - GetSubstitute(1, mode1), - GetSubstitute(2, mode2)); - return form_buffer_; - } - - // Append a "2" to a mnemonic string based of the state of the Q bit. - const char* Mnemonic(const char* mnemonic) { - if ((instrbits_ & NEON_Q) != 0) { - snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); - return mne_buffer_; - } - return mnemonic; - } - - VectorFormat GetVectorFormat(int format_index = 0) { - return GetVectorFormat(formats_[format_index]); - } - - VectorFormat GetVectorFormat(const NEONFormatMap* format_map) { - static const VectorFormat vform[] = { - kFormatUndefined, - kFormat8B, kFormat16B, kFormat4H, kFormat8H, - kFormat2S, kFormat4S, kFormat1D, kFormat2D, - kFormatB, kFormatH, kFormatS, kFormatD - }; - VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0]))); - return vform[GetNEONFormat(format_map)]; - } - - // Built in mappings for common cases. - - // The integer format map uses three bits (Q, size<1:0>) to encode the - // "standard" set of NEON integer vector formats. - static const NEONFormatMap* IntegerFormatMap() { - static const NEONFormatMap map = { - {23, 22, 30}, - {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D} - }; - return ↦ - } - - // The long integer format map uses two bits (size<1:0>) to encode the - // long set of NEON integer vector formats. These are used in narrow, wide - // and long operations. - static const NEONFormatMap* LongIntegerFormatMap() { - static const NEONFormatMap map = { - {23, 22}, {NF_8H, NF_4S, NF_2D} - }; - return ↦ - } - - // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector - // formats: NF_2S, NF_4S, NF_2D. - static const NEONFormatMap* FPFormatMap() { - // The FP format map assumes two bits (Q, size<0>) are used to encode the - // NEON FP vector formats: NF_2S, NF_4S, NF_2D. - static const NEONFormatMap map = { - {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D} - }; - return ↦ - } - - // The load/store format map uses three bits (Q, 11, 10) to encode the - // set of NEON vector formats. - static const NEONFormatMap* LoadStoreFormatMap() { - static const NEONFormatMap map = { - {11, 10, 30}, - {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D} - }; - return ↦ - } - - // The logical format map uses one bit (Q) to encode the NEON vector format: - // NF_8B, NF_16B. - static const NEONFormatMap* LogicalFormatMap() { - static const NEONFormatMap map = { - {30}, {NF_8B, NF_16B} - }; - return ↦ - } - - // The triangular format map uses between two and five bits to encode the NEON - // vector format: - // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H - // x1000->2S, x1001->4S, 10001->2D, all others undefined. - static const NEONFormatMap* TriangularFormatMap() { - static const NEONFormatMap map = { - {19, 18, 17, 16, 30}, - {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, - NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D, - NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B, - NF_4H, NF_8H, NF_8B, NF_16B} - }; - return ↦ - } - - // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar - // formats: NF_B, NF_H, NF_S, NF_D. - static const NEONFormatMap* ScalarFormatMap() { - static const NEONFormatMap map = { - {23, 22}, {NF_B, NF_H, NF_S, NF_D} - }; - return ↦ - } - - // The long scalar format map uses two bits (size<1:0>) to encode the longer - // NEON scalar formats: NF_H, NF_S, NF_D. - static const NEONFormatMap* LongScalarFormatMap() { - static const NEONFormatMap map = { - {23, 22}, {NF_H, NF_S, NF_D} - }; - return ↦ - } - - // The FP scalar format map assumes one bit (size<0>) is used to encode the - // NEON FP scalar formats: NF_S, NF_D. - static const NEONFormatMap* FPScalarFormatMap() { - static const NEONFormatMap map = { - {22}, {NF_S, NF_D} - }; - return ↦ - } - - // The triangular scalar format map uses between one and four bits to encode - // the NEON FP scalar formats: - // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined. - static const NEONFormatMap* TriangularScalarFormatMap() { - static const NEONFormatMap map = { - {19, 18, 17, 16}, - {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, - NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B} - }; - return ↦ - } - - private: - // Get a pointer to a string that represents the format or placeholder for - // the specified substitution index, based on the format map and instruction. - const char* GetSubstitute(int index, SubstitutionMode mode) { - if (mode == kFormat) { - return NEONFormatAsString(GetNEONFormat(formats_[index])); - } - VIXL_ASSERT(mode == kPlaceholder); - return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index])); - } - - // Get the NEONFormat enumerated value for bits obtained from the - // instruction based on the specified format mapping. - NEONFormat GetNEONFormat(const NEONFormatMap* format_map) { - return format_map->map[PickBits(format_map->bits)]; - } - - // Convert a NEONFormat into a string. - static const char* NEONFormatAsString(NEONFormat format) { - static const char* formats[] = { - "undefined", - "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d", - "b", "h", "s", "d" - }; - VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0]))); - return formats[format]; - } - - // Convert a NEONFormat into a register placeholder string. - static const char* NEONFormatAsPlaceholder(NEONFormat format) { - VIXL_ASSERT((format == NF_B) || (format == NF_H) || - (format == NF_S) || (format == NF_D) || - (format == NF_UNDEF)); - static const char* formats[] = { - "undefined", - "undefined", "undefined", "undefined", "undefined", - "undefined", "undefined", "undefined", "undefined", - "'B", "'H", "'S", "'D" - }; - return formats[format]; - } - - // Select bits from instrbits_ defined by the bits array, concatenate them, - // and return the value. - uint8_t PickBits(const uint8_t bits[]) { - uint8_t result = 0; - for (unsigned b = 0; b < kNEONFormatMaxBits; b++) { - if (bits[b] == 0) break; - result <<= 1; - result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1; - } - return result; - } - - Instr instrbits_; - const NEONFormatMap* formats_[3]; - char form_buffer_[64]; - char mne_buffer_[16]; -}; -} // namespace vixl - -#endif // VIXL_A64_INSTRUCTIONS_A64_H_ diff --git a/disas/libvixl/vixl/code-buffer.h b/disas/libvixl/vixl/code-buffer.h deleted file mode 100644 index b95babbdee..0000000000 --- a/disas/libvixl/vixl/code-buffer.h +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2014, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_CODE_BUFFER_H -#define VIXL_CODE_BUFFER_H - -#include -#include "vixl/globals.h" - -namespace vixl { - -class CodeBuffer { - public: - explicit CodeBuffer(size_t capacity = 4 * KBytes); - CodeBuffer(void* buffer, size_t capacity); - ~CodeBuffer(); - - void Reset(); - - ptrdiff_t OffsetFrom(ptrdiff_t offset) const { - ptrdiff_t cursor_offset = cursor_ - buffer_; - VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset)); - return cursor_offset - offset; - } - - ptrdiff_t CursorOffset() const { - return OffsetFrom(0); - } - - template - T GetOffsetAddress(ptrdiff_t offset) const { - VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_))); - return reinterpret_cast(buffer_ + offset); - } - - size_t RemainingBytes() const { - VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); - return (buffer_ + capacity_) - cursor_; - } - - // A code buffer can emit: - // * 32-bit data: instruction and constant. - // * 64-bit data: constant. - // * string: debug info. - void Emit32(uint32_t data) { Emit(data); } - - void Emit64(uint64_t data) { Emit(data); } - - void EmitString(const char* string); - - // Align to kInstructionSize. - void Align(); - - size_t capacity() const { return capacity_; } - - bool IsManaged() const { return managed_; } - - void Grow(size_t new_capacity); - - bool IsDirty() const { return dirty_; } - - void SetClean() { dirty_ = false; } - - private: - template - void Emit(T value) { - VIXL_ASSERT(RemainingBytes() >= sizeof(value)); - dirty_ = true; - memcpy(cursor_, &value, sizeof(value)); - cursor_ += sizeof(value); - } - - // Backing store of the buffer. - byte* buffer_; - // If true the backing store is allocated and deallocated by the buffer. The - // backing store can then grow on demand. If false the backing store is - // provided by the user and cannot be resized internally. - bool managed_; - // Pointer to the next location to be written. - byte* cursor_; - // True if there has been any write since the buffer was created or cleaned. - bool dirty_; - // Capacity in bytes of the backing store. - size_t capacity_; -}; - -} // namespace vixl - -#endif // VIXL_CODE_BUFFER_H - diff --git a/disas/libvixl/vixl/compiler-intrinsics.cc b/disas/libvixl/vixl/compiler-intrinsics.cc deleted file mode 100644 index fd551faeb1..0000000000 --- a/disas/libvixl/vixl/compiler-intrinsics.cc +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "compiler-intrinsics.h" - -namespace vixl { - - -int CountLeadingSignBitsFallBack(int64_t value, int width) { - VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); - if (value >= 0) { - return CountLeadingZeros(value, width) - 1; - } else { - return CountLeadingZeros(~value, width) - 1; - } -} - - -int CountLeadingZerosFallBack(uint64_t value, int width) { - VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); - if (value == 0) { - return width; - } - int count = 0; - value = value << (64 - width); - if ((value & UINT64_C(0xffffffff00000000)) == 0) { - count += 32; - value = value << 32; - } - if ((value & UINT64_C(0xffff000000000000)) == 0) { - count += 16; - value = value << 16; - } - if ((value & UINT64_C(0xff00000000000000)) == 0) { - count += 8; - value = value << 8; - } - if ((value & UINT64_C(0xf000000000000000)) == 0) { - count += 4; - value = value << 4; - } - if ((value & UINT64_C(0xc000000000000000)) == 0) { - count += 2; - value = value << 2; - } - if ((value & UINT64_C(0x8000000000000000)) == 0) { - count += 1; - } - count += (value == 0); - return count; -} - - -int CountSetBitsFallBack(uint64_t value, int width) { - VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); - - // Mask out unused bits to ensure that they are not counted. - value &= (UINT64_C(0xffffffffffffffff) >> (64 - width)); - - // Add up the set bits. - // The algorithm works by adding pairs of bit fields together iteratively, - // where the size of each bit field doubles each time. - // An example for an 8-bit value: - // Bits: h g f e d c b a - // \ | \ | \ | \ | - // value = h+g f+e d+c b+a - // \ | \ | - // value = h+g+f+e d+c+b+a - // \ | - // value = h+g+f+e+d+c+b+a - const uint64_t kMasks[] = { - UINT64_C(0x5555555555555555), - UINT64_C(0x3333333333333333), - UINT64_C(0x0f0f0f0f0f0f0f0f), - UINT64_C(0x00ff00ff00ff00ff), - UINT64_C(0x0000ffff0000ffff), - UINT64_C(0x00000000ffffffff), - }; - - for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) { - int shift = 1 << i; - value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]); - } - - return static_cast(value); -} - - -int CountTrailingZerosFallBack(uint64_t value, int width) { - VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); - int count = 0; - value = value << (64 - width); - if ((value & UINT64_C(0xffffffff)) == 0) { - count += 32; - value = value >> 32; - } - if ((value & 0xffff) == 0) { - count += 16; - value = value >> 16; - } - if ((value & 0xff) == 0) { - count += 8; - value = value >> 8; - } - if ((value & 0xf) == 0) { - count += 4; - value = value >> 4; - } - if ((value & 0x3) == 0) { - count += 2; - value = value >> 2; - } - if ((value & 0x1) == 0) { - count += 1; - } - count += (value == 0); - return count - (64 - width); -} - - -} // namespace vixl diff --git a/disas/libvixl/vixl/compiler-intrinsics.h b/disas/libvixl/vixl/compiler-intrinsics.h deleted file mode 100644 index 9431beddb9..0000000000 --- a/disas/libvixl/vixl/compiler-intrinsics.h +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -#ifndef VIXL_COMPILER_INTRINSICS_H -#define VIXL_COMPILER_INTRINSICS_H - -#include "globals.h" - -namespace vixl { - -// Helper to check whether the version of GCC used is greater than the specified -// requirement. -#define MAJOR 1000000 -#define MINOR 1000 -#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) -#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ - ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \ - ((major) * MAJOR + (minor) * MINOR + (patchlevel))) -#elif defined(__GNUC__) && defined(__GNUC_MINOR__) -#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ - ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \ - ((major) * MAJOR + (minor) * MINOR + (patchlevel))) -#else -#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0 -#endif - - -#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS) - -#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb)) -#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz)) -#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz)) -#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs)) -#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) - -#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS) -// The documentation for these builtins is available at: -// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html - -# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0)) -# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0)) -# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0)) -# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0)) -# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0)) - -#else -// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually -// implemented C++ methods. - -#define COMPILER_HAS_BUILTIN_BSWAP false -#define COMPILER_HAS_BUILTIN_CLRSB false -#define COMPILER_HAS_BUILTIN_CLZ false -#define COMPILER_HAS_BUILTIN_CTZ false -#define COMPILER_HAS_BUILTIN_FFS false -#define COMPILER_HAS_BUILTIN_POPCOUNT false - -#endif - - -template -inline bool IsPowerOf2(V value) { - return (value != 0) && ((value & (value - 1)) == 0); -} - - -// Declaration of fallback functions. -int CountLeadingSignBitsFallBack(int64_t value, int width); -int CountLeadingZerosFallBack(uint64_t value, int width); -int CountSetBitsFallBack(uint64_t value, int width); -int CountTrailingZerosFallBack(uint64_t value, int width); - - -// Implementation of intrinsics functions. -// TODO: The implementations could be improved for sizes different from 32bit -// and 64bit: we could mask the values and call the appropriate builtin. - -template -inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) { -#if COMPILER_HAS_BUILTIN_CLRSB - if (width == 32) { - return __builtin_clrsb(value); - } else if (width == 64) { - return __builtin_clrsbll(value); - } -#endif - return CountLeadingSignBitsFallBack(value, width); -} - - -template -inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) { -#if COMPILER_HAS_BUILTIN_CLZ - if (width == 32) { - return (value == 0) ? 32 : __builtin_clz(static_cast(value)); - } else if (width == 64) { - return (value == 0) ? 64 : __builtin_clzll(value); - } -#endif - return CountLeadingZerosFallBack(value, width); -} - - -template -inline int CountSetBits(V value, int width = (sizeof(V) * 8)) { -#if COMPILER_HAS_BUILTIN_POPCOUNT - if (width == 32) { - return __builtin_popcount(static_cast(value)); - } else if (width == 64) { - return __builtin_popcountll(value); - } -#endif - return CountSetBitsFallBack(value, width); -} - - -template -inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) { -#if COMPILER_HAS_BUILTIN_CTZ - if (width == 32) { - return (value == 0) ? 32 : __builtin_ctz(static_cast(value)); - } else if (width == 64) { - return (value == 0) ? 64 : __builtin_ctzll(value); - } -#endif - return CountTrailingZerosFallBack(value, width); -} - -} // namespace vixl - -#endif // VIXL_COMPILER_INTRINSICS_H - diff --git a/disas/libvixl/vixl/globals.h b/disas/libvixl/vixl/globals.h deleted file mode 100644 index 3a71942f1e..0000000000 --- a/disas/libvixl/vixl/globals.h +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_GLOBALS_H -#define VIXL_GLOBALS_H - -// Get standard C99 macros for integer types. -#ifndef __STDC_CONSTANT_MACROS -#define __STDC_CONSTANT_MACROS -#endif - -#ifndef __STDC_LIMIT_MACROS -#define __STDC_LIMIT_MACROS -#endif - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS -#endif - -extern "C" { -#include -#include -} - -#include -#include -#include -#include -#include - -#include "vixl/platform.h" - - -typedef uint8_t byte; - -// Type for half-precision (16 bit) floating point numbers. -typedef uint16_t float16; - -const int KBytes = 1024; -const int MBytes = 1024 * KBytes; - -#define VIXL_ABORT() \ - do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false) -#ifdef VIXL_DEBUG - #define VIXL_ASSERT(condition) assert(condition) - #define VIXL_CHECK(condition) VIXL_ASSERT(condition) - #define VIXL_UNIMPLEMENTED() \ - do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false) - #define VIXL_UNREACHABLE() \ - do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false) -#else - #define VIXL_ASSERT(condition) ((void) 0) - #define VIXL_CHECK(condition) assert(condition) - #define VIXL_UNIMPLEMENTED() ((void) 0) - #define VIXL_UNREACHABLE() ((void) 0) -#endif -// This is not as powerful as template based assertions, but it is simple. -// It assumes that the descriptions are unique. If this starts being a problem, -// we can switch to a different implemention. -#define VIXL_CONCAT(a, b) a##b -#define VIXL_STATIC_ASSERT_LINE(line, condition) \ - typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \ - __attribute__((unused)) -#define VIXL_STATIC_ASSERT(condition) \ - VIXL_STATIC_ASSERT_LINE(__LINE__, condition) - -template -inline void USE(T1) {} - -template -inline void USE(T1, T2) {} - -template -inline void USE(T1, T2, T3) {} - -template -inline void USE(T1, T2, T3, T4) {} - -#define VIXL_ALIGNMENT_EXCEPTION() \ - do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0) - -// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough -// argument to annotate intentional fall-through between switch labels. -// For more information please refer to: -// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough -#ifndef __has_warning - #define __has_warning(x) 0 -#endif - -// Fallthrough annotation for Clang and C++11(201103L). -#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L - #define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT -// Fallthrough annotation for GCC >= 7. -#elif __GNUC__ >= 7 - #define VIXL_FALLTHROUGH() __attribute__((fallthrough)) -#else - #define VIXL_FALLTHROUGH() do {} while (0) -#endif - -#if __cplusplus >= 201103L - #define VIXL_NO_RETURN [[noreturn]] //NOLINT -#else - #define VIXL_NO_RETURN __attribute__((noreturn)) -#endif - -// Some functions might only be marked as "noreturn" for the DEBUG build. This -// macro should be used for such cases (for more details see what -// VIXL_UNREACHABLE expands to). -#ifdef VIXL_DEBUG - #define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN -#else - #define VIXL_DEBUG_NO_RETURN -#endif - -#ifdef VIXL_INCLUDE_SIMULATOR -#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE - #define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 1 -#endif -#else -#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE - #define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 0 -#endif -#if VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE - #warning "Generating Simulator instructions without Simulator support." -#endif -#endif - -#ifdef USE_SIMULATOR - #error "Please see the release notes for USE_SIMULATOR." -#endif - -#endif // VIXL_GLOBALS_H diff --git a/disas/libvixl/vixl/invalset.h b/disas/libvixl/vixl/invalset.h deleted file mode 100644 index 2e0871f8c3..0000000000 --- a/disas/libvixl/vixl/invalset.h +++ /dev/null @@ -1,775 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_INVALSET_H_ -#define VIXL_INVALSET_H_ - -#include - -#include -#include - -#include "vixl/globals.h" - -namespace vixl { - -// We define a custom data structure template and its iterator as `std` -// containers do not fit the performance requirements for some of our use cases. -// -// The structure behaves like an iterable unordered set with special properties -// and restrictions. "InvalSet" stands for "Invalidatable Set". -// -// Restrictions and requirements: -// - Adding an element already present in the set is illegal. In debug mode, -// this is checked at insertion time. -// - The templated class `ElementType` must provide comparison operators so that -// `std::sort()` can be used. -// - A key must be available to represent invalid elements. -// - Elements with an invalid key must compare higher or equal to any other -// element. -// -// Use cases and performance considerations: -// Our use cases present two specificities that allow us to design this -// structure to provide fast insertion *and* fast search and deletion -// operations: -// - Elements are (generally) inserted in order (sorted according to their key). -// - A key is available to mark elements as invalid (deleted). -// The backing `std::vector` allows for fast insertions. When -// searching for an element we ensure the elements are sorted (this is generally -// the case) and perform a binary search. When deleting an element we do not -// free the associated memory immediately. Instead, an element to be deleted is -// marked with the 'invalid' key. Other methods of the container take care of -// ignoring entries marked as invalid. -// To avoid the overhead of the `std::vector` container when only few entries -// are used, a number of elements are preallocated. - -// 'ElementType' and 'KeyType' are respectively the types of the elements and -// their key. The structure only reclaims memory when safe to do so, if the -// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and -// greater than ` / RECLAIM_FACTOR. -#define TEMPLATE_INVALSET_P_DECL \ - class ElementType, \ - unsigned N_PREALLOCATED_ELEMENTS, \ - class KeyType, \ - KeyType INVALID_KEY, \ - size_t RECLAIM_FROM, \ - unsigned RECLAIM_FACTOR - -#define TEMPLATE_INVALSET_P_DEF \ -ElementType, N_PREALLOCATED_ELEMENTS, \ -KeyType, INVALID_KEY, RECLAIM_FROM, RECLAIM_FACTOR - -template class InvalSetIterator; // Forward declaration. - -template class InvalSet { - public: - InvalSet(); - ~InvalSet(); - - static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS; - static const KeyType kInvalidKey = INVALID_KEY; - - // It is illegal to insert an element already present in the set. - void insert(const ElementType& element); - - // Looks for the specified element in the set and - if found - deletes it. - void erase(const ElementType& element); - - // This indicates the number of (valid) elements stored in this set. - size_t size() const; - - // Returns true if no elements are stored in the set. - // Note that this does not mean the the backing storage is empty: it can still - // contain invalid elements. - bool empty() const; - - void clear(); - - const ElementType min_element(); - - // This returns the key of the minimum element in the set. - KeyType min_element_key(); - - static bool IsValid(const ElementType& element); - static KeyType Key(const ElementType& element); - static void SetKey(ElementType* element, KeyType key); - - protected: - // Returns a pointer to the element in vector_ if it was found, or NULL - // otherwise. - ElementType* Search(const ElementType& element); - - // The argument *must* point to an element stored in *this* set. - // This function is not allowed to move elements in the backing vector - // storage. - void EraseInternal(ElementType* element); - - // The elements in the range searched must be sorted. - ElementType* BinarySearch(const ElementType& element, - ElementType* start, - ElementType* end) const; - - // Sort the elements. - enum SortType { - // The 'hard' version guarantees that invalid elements are moved to the end - // of the container. - kHardSort, - // The 'soft' version only guarantees that the elements will be sorted. - // Invalid elements may still be present anywhere in the set. - kSoftSort - }; - void Sort(SortType sort_type); - - // Delete the elements that have an invalid key. The complexity is linear - // with the size of the vector. - void Clean(); - - const ElementType Front() const; - const ElementType Back() const; - - // Delete invalid trailing elements and return the last valid element in the - // set. - const ElementType CleanBack(); - - // Returns a pointer to the start or end of the backing storage. - const ElementType* StorageBegin() const; - const ElementType* StorageEnd() const; - ElementType* StorageBegin(); - ElementType* StorageEnd(); - - // Returns the index of the element within the backing storage. The element - // must belong to the backing storage. - size_t ElementIndex(const ElementType* element) const; - - // Returns the element at the specified index in the backing storage. - const ElementType* ElementAt(size_t index) const; - ElementType* ElementAt(size_t index); - - static const ElementType* FirstValidElement(const ElementType* from, - const ElementType* end); - - void CacheMinElement(); - const ElementType CachedMinElement() const; - - bool ShouldReclaimMemory() const; - void ReclaimMemory(); - - bool IsUsingVector() const { return vector_ != NULL; } - void set_sorted(bool sorted) { sorted_ = sorted; } - - // We cache some data commonly required by users to improve performance. - // We cannot cache pointers to elements as we do not control the backing - // storage. - bool valid_cached_min_; - size_t cached_min_index_; // Valid iff `valid_cached_min_` is true. - KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true. - - // Indicates whether the elements are sorted. - bool sorted_; - - // This represents the number of (valid) elements in this set. - size_t size_; - - // The backing storage is either the array of preallocated elements or the - // vector. The structure starts by using the preallocated elements, and - // transitions (permanently) to using the vector once more than - // kNPreallocatedElements are used. - // Elements are only invalidated when using the vector. The preallocated - // storage always only contains valid elements. - ElementType preallocated_[kNPreallocatedElements]; - std::vector* vector_; - -#ifdef VIXL_DEBUG - // Iterators acquire and release this monitor. While a set is acquired, - // certain operations are illegal to ensure that the iterator will - // correctly iterate over the elements in the set. - int monitor_; - int monitor() const { return monitor_; } - void Acquire() { monitor_++; } - void Release() { - monitor_--; - VIXL_ASSERT(monitor_ >= 0); - } -#endif - - friend class InvalSetIterator >; - typedef ElementType _ElementType; - typedef KeyType _KeyType; -}; - - -template class InvalSetIterator { - private: - // Redefine types to mirror the associated set types. - typedef typename S::_ElementType ElementType; - typedef typename S::_KeyType KeyType; - - public: - explicit InvalSetIterator(S* inval_set); - ~InvalSetIterator(); - - ElementType* Current() const; - void Advance(); - bool Done() const; - - // Mark this iterator as 'done'. - void Finish(); - - // Delete the current element and advance the iterator to point to the next - // element. - void DeleteCurrentAndAdvance(); - - static bool IsValid(const ElementType& element); - static KeyType Key(const ElementType& element); - - protected: - void MoveToValidElement(); - - // Indicates if the iterator is looking at the vector or at the preallocated - // elements. - const bool using_vector_; - // Used when looking at the preallocated elements, or in debug mode when using - // the vector to track how many times the iterator has advanced. - size_t index_; - typename std::vector::iterator iterator_; - S* inval_set_; -}; - - -template -InvalSet::InvalSet() - : valid_cached_min_(false), - sorted_(true), size_(0), vector_(NULL) { -#ifdef VIXL_DEBUG - monitor_ = 0; -#endif -} - - -template -InvalSet::~InvalSet() { - VIXL_ASSERT(monitor_ == 0); - delete vector_; -} - - -template -void InvalSet::insert(const ElementType& element) { - VIXL_ASSERT(monitor() == 0); - VIXL_ASSERT(IsValid(element)); - VIXL_ASSERT(Search(element) == NULL); - set_sorted(empty() || (sorted_ && (element > CleanBack()))); - if (IsUsingVector()) { - vector_->push_back(element); - } else { - if (size_ < kNPreallocatedElements) { - preallocated_[size_] = element; - } else { - // Transition to using the vector. - vector_ = new std::vector(preallocated_, - preallocated_ + size_); - vector_->push_back(element); - } - } - size_++; - - if (valid_cached_min_ && (element < min_element())) { - cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1; - cached_min_key_ = Key(element); - valid_cached_min_ = true; - } - - if (ShouldReclaimMemory()) { - ReclaimMemory(); - } -} - - -template -void InvalSet::erase(const ElementType& element) { - VIXL_ASSERT(monitor() == 0); - VIXL_ASSERT(IsValid(element)); - ElementType* local_element = Search(element); - if (local_element != NULL) { - EraseInternal(local_element); - } -} - - -template -ElementType* InvalSet::Search( - const ElementType& element) { - VIXL_ASSERT(monitor() == 0); - if (empty()) { - return NULL; - } - if (ShouldReclaimMemory()) { - ReclaimMemory(); - } - if (!sorted_) { - Sort(kHardSort); - } - if (!valid_cached_min_) { - CacheMinElement(); - } - return BinarySearch(element, ElementAt(cached_min_index_), StorageEnd()); -} - - -template -size_t InvalSet::size() const { - return size_; -} - - -template -bool InvalSet::empty() const { - return size_ == 0; -} - - -template -void InvalSet::clear() { - VIXL_ASSERT(monitor() == 0); - size_ = 0; - if (IsUsingVector()) { - vector_->clear(); - } - set_sorted(true); - valid_cached_min_ = false; -} - - -template -const ElementType InvalSet::min_element() { - VIXL_ASSERT(monitor() == 0); - VIXL_ASSERT(!empty()); - CacheMinElement(); - return *ElementAt(cached_min_index_); -} - - -template -KeyType InvalSet::min_element_key() { - VIXL_ASSERT(monitor() == 0); - if (valid_cached_min_) { - return cached_min_key_; - } else { - return Key(min_element()); - } -} - - -template -bool InvalSet::IsValid(const ElementType& element) { - return Key(element) != kInvalidKey; -} - - -template -void InvalSet::EraseInternal(ElementType* element) { - // Note that this function must be safe even while an iterator has acquired - // this set. - VIXL_ASSERT(element != NULL); - size_t deleted_index = ElementIndex(element); - if (IsUsingVector()) { - VIXL_ASSERT((&(vector_->front()) <= element) && - (element <= &(vector_->back()))); - SetKey(element, kInvalidKey); - } else { - VIXL_ASSERT((preallocated_ <= element) && - (element < (preallocated_ + kNPreallocatedElements))); - ElementType* end = preallocated_ + kNPreallocatedElements; - size_t copy_size = sizeof(*element) * (end - element - 1); - memmove(element, element + 1, copy_size); - } - size_--; - - if (valid_cached_min_ && - (deleted_index == cached_min_index_)) { - if (sorted_ && !empty()) { - const ElementType* min = FirstValidElement(element, StorageEnd()); - cached_min_index_ = ElementIndex(min); - cached_min_key_ = Key(*min); - valid_cached_min_ = true; - } else { - valid_cached_min_ = false; - } - } -} - - -template -ElementType* InvalSet::BinarySearch( - const ElementType& element, ElementType* start, ElementType* end) const { - if (start == end) { - return NULL; - } - VIXL_ASSERT(sorted_); - VIXL_ASSERT(start < end); - VIXL_ASSERT(!empty()); - - // Perform a binary search through the elements while ignoring invalid - // elements. - ElementType* elements = start; - size_t low = 0; - size_t high = (end - start) - 1; - while (low < high) { - // Find valid bounds. - while (!IsValid(elements[low]) && (low < high)) ++low; - while (!IsValid(elements[high]) && (low < high)) --high; - VIXL_ASSERT(low <= high); - // Avoid overflow when computing the middle index. - size_t middle = low / 2 + high / 2 + (low & high & 1); - if ((middle == low) || (middle == high)) { - break; - } - while (!IsValid(elements[middle]) && (middle < high - 1)) ++middle; - while (!IsValid(elements[middle]) && (low + 1 < middle)) --middle; - if (!IsValid(elements[middle])) { - break; - } - if (elements[middle] < element) { - low = middle; - } else { - high = middle; - } - } - - if (elements[low] == element) return &elements[low]; - if (elements[high] == element) return &elements[high]; - return NULL; -} - - -template -void InvalSet::Sort(SortType sort_type) { - VIXL_ASSERT(monitor() == 0); - if (sort_type == kSoftSort) { - if (sorted_) { - return; - } - } - if (empty()) { - return; - } - - Clean(); - std::sort(StorageBegin(), StorageEnd()); - - set_sorted(true); - cached_min_index_ = 0; - cached_min_key_ = Key(Front()); - valid_cached_min_ = true; -} - - -template -void InvalSet::Clean() { - VIXL_ASSERT(monitor() == 0); - if (empty() || !IsUsingVector()) { - return; - } - // Manually iterate through the vector storage to discard invalid elements. - ElementType* start = &(vector_->front()); - ElementType* end = start + vector_->size(); - ElementType* c = start; - ElementType* first_invalid; - ElementType* first_valid; - ElementType* next_invalid; - - while (c < end && IsValid(*c)) { c++; } - first_invalid = c; - - while (c < end) { - while (c < end && !IsValid(*c)) { c++; } - first_valid = c; - while (c < end && IsValid(*c)) { c++; } - next_invalid = c; - - ptrdiff_t n_moved_elements = (next_invalid - first_valid); - memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c)); - first_invalid = first_invalid + n_moved_elements; - c = next_invalid; - } - - // Delete the trailing invalid elements. - vector_->erase(vector_->begin() + (first_invalid - start), vector_->end()); - VIXL_ASSERT(vector_->size() == size_); - - if (sorted_) { - valid_cached_min_ = true; - cached_min_index_ = 0; - cached_min_key_ = Key(*ElementAt(0)); - } else { - valid_cached_min_ = false; - } -} - - -template -const ElementType InvalSet::Front() const { - VIXL_ASSERT(!empty()); - return IsUsingVector() ? vector_->front() : preallocated_[0]; -} - - -template -const ElementType InvalSet::Back() const { - VIXL_ASSERT(!empty()); - return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1]; -} - - -template -const ElementType InvalSet::CleanBack() { - VIXL_ASSERT(monitor() == 0); - if (IsUsingVector()) { - // Delete the invalid trailing elements. - typename std::vector::reverse_iterator it = vector_->rbegin(); - while (!IsValid(*it)) { - it++; - } - vector_->erase(it.base(), vector_->end()); - } - return Back(); -} - - -template -const ElementType* InvalSet::StorageBegin() const { - return IsUsingVector() ? &(vector_->front()) : preallocated_; -} - - -template -const ElementType* InvalSet::StorageEnd() const { - return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; -} - - -template -ElementType* InvalSet::StorageBegin() { - return IsUsingVector() ? &(vector_->front()) : preallocated_; -} - - -template -ElementType* InvalSet::StorageEnd() { - return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; -} - - -template -size_t InvalSet::ElementIndex( - const ElementType* element) const { - VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd())); - return element - StorageBegin(); -} - - -template -const ElementType* InvalSet::ElementAt( - size_t index) const { - VIXL_ASSERT( - (IsUsingVector() && (index < vector_->size())) || (index < size_)); - return StorageBegin() + index; -} - -template -ElementType* InvalSet::ElementAt(size_t index) { - VIXL_ASSERT( - (IsUsingVector() && (index < vector_->size())) || (index < size_)); - return StorageBegin() + index; -} - -template -const ElementType* InvalSet::FirstValidElement( - const ElementType* from, const ElementType* end) { - while ((from < end) && !IsValid(*from)) { - from++; - } - return from; -} - - -template -void InvalSet::CacheMinElement() { - VIXL_ASSERT(monitor() == 0); - VIXL_ASSERT(!empty()); - - if (valid_cached_min_) { - return; - } - - if (sorted_) { - const ElementType* min = FirstValidElement(StorageBegin(), StorageEnd()); - cached_min_index_ = ElementIndex(min); - cached_min_key_ = Key(*min); - valid_cached_min_ = true; - } else { - Sort(kHardSort); - } - VIXL_ASSERT(valid_cached_min_); -} - - -template -bool InvalSet::ShouldReclaimMemory() const { - if (!IsUsingVector()) { - return false; - } - size_t n_invalid_elements = vector_->size() - size_; - return (n_invalid_elements > RECLAIM_FROM) && - (n_invalid_elements > vector_->size() / RECLAIM_FACTOR); -} - - -template -void InvalSet::ReclaimMemory() { - VIXL_ASSERT(monitor() == 0); - Clean(); -} - - -template -InvalSetIterator::InvalSetIterator(S* inval_set) - : using_vector_((inval_set != NULL) && inval_set->IsUsingVector()), - index_(0), - inval_set_(inval_set) { - if (inval_set != NULL) { - inval_set->Sort(S::kSoftSort); -#ifdef VIXL_DEBUG - inval_set->Acquire(); -#endif - if (using_vector_) { - iterator_ = typename std::vector::iterator( - inval_set_->vector_->begin()); - } - MoveToValidElement(); - } -} - - -template -InvalSetIterator::~InvalSetIterator() { -#ifdef VIXL_DEBUG - if (inval_set_ != NULL) { - inval_set_->Release(); - } -#endif -} - - -template -typename S::_ElementType* InvalSetIterator::Current() const { - VIXL_ASSERT(!Done()); - if (using_vector_) { - return &(*iterator_); - } else { - return &(inval_set_->preallocated_[index_]); - } -} - - -template -void InvalSetIterator::Advance() { - VIXL_ASSERT(!Done()); - if (using_vector_) { - iterator_++; -#ifdef VIXL_DEBUG - index_++; -#endif - MoveToValidElement(); - } else { - index_++; - } -} - - -template -bool InvalSetIterator::Done() const { - if (using_vector_) { - bool done = (iterator_ == inval_set_->vector_->end()); - VIXL_ASSERT(done == (index_ == inval_set_->size())); - return done; - } else { - return index_ == inval_set_->size(); - } -} - - -template -void InvalSetIterator::Finish() { - VIXL_ASSERT(inval_set_->sorted_); - if (using_vector_) { - iterator_ = inval_set_->vector_->end(); - } - index_ = inval_set_->size(); -} - - -template -void InvalSetIterator::DeleteCurrentAndAdvance() { - if (using_vector_) { - inval_set_->EraseInternal(&(*iterator_)); - MoveToValidElement(); - } else { - inval_set_->EraseInternal(inval_set_->preallocated_ + index_); - } -} - - -template -bool InvalSetIterator::IsValid(const ElementType& element) { - return S::IsValid(element); -} - - -template -typename S::_KeyType InvalSetIterator::Key(const ElementType& element) { - return S::Key(element); -} - - -template -void InvalSetIterator::MoveToValidElement() { - if (using_vector_) { - while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) { - iterator_++; - } - } else { - VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0])); - // Nothing to do. - } -} - -#undef TEMPLATE_INVALSET_P_DECL -#undef TEMPLATE_INVALSET_P_DEF - -} // namespace vixl - -#endif // VIXL_INVALSET_H_ diff --git a/disas/libvixl/vixl/platform.h b/disas/libvixl/vixl/platform.h deleted file mode 100644 index 26a74de81b..0000000000 --- a/disas/libvixl/vixl/platform.h +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2014, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef PLATFORM_H -#define PLATFORM_H - -// Define platform specific functionalities. -extern "C" { -#include -} - -namespace vixl { -inline void HostBreakpoint() { raise(SIGINT); } -} // namespace vixl - -#endif diff --git a/disas/libvixl/vixl/utils.cc b/disas/libvixl/vixl/utils.cc deleted file mode 100644 index 69304d266d..0000000000 --- a/disas/libvixl/vixl/utils.cc +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "vixl/utils.h" -#include - -namespace vixl { - -uint32_t float_to_rawbits(float value) { - uint32_t bits = 0; - memcpy(&bits, &value, 4); - return bits; -} - - -uint64_t double_to_rawbits(double value) { - uint64_t bits = 0; - memcpy(&bits, &value, 8); - return bits; -} - - -float rawbits_to_float(uint32_t bits) { - float value = 0.0; - memcpy(&value, &bits, 4); - return value; -} - - -double rawbits_to_double(uint64_t bits) { - double value = 0.0; - memcpy(&value, &bits, 8); - return value; -} - - -uint32_t float_sign(float val) { - uint32_t rawbits = float_to_rawbits(val); - return unsigned_bitextract_32(31, 31, rawbits); -} - - -uint32_t float_exp(float val) { - uint32_t rawbits = float_to_rawbits(val); - return unsigned_bitextract_32(30, 23, rawbits); -} - - -uint32_t float_mantissa(float val) { - uint32_t rawbits = float_to_rawbits(val); - return unsigned_bitextract_32(22, 0, rawbits); -} - - -uint32_t double_sign(double val) { - uint64_t rawbits = double_to_rawbits(val); - return static_cast(unsigned_bitextract_64(63, 63, rawbits)); -} - - -uint32_t double_exp(double val) { - uint64_t rawbits = double_to_rawbits(val); - return static_cast(unsigned_bitextract_64(62, 52, rawbits)); -} - - -uint64_t double_mantissa(double val) { - uint64_t rawbits = double_to_rawbits(val); - return unsigned_bitextract_64(51, 0, rawbits); -} - - -float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) { - uint32_t bits = (sign << 31) | (exp << 23) | mantissa; - return rawbits_to_float(bits); -} - - -double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) { - uint64_t bits = (sign << 63) | (exp << 52) | mantissa; - return rawbits_to_double(bits); -} - - -int float16classify(float16 value) { - uint16_t exponent_max = (1 << 5) - 1; - uint16_t exponent_mask = exponent_max << 10; - uint16_t mantissa_mask = (1 << 10) - 1; - - uint16_t exponent = (value & exponent_mask) >> 10; - uint16_t mantissa = value & mantissa_mask; - if (exponent == 0) { - if (mantissa == 0) { - return FP_ZERO; - } - return FP_SUBNORMAL; - } else if (exponent == exponent_max) { - if (mantissa == 0) { - return FP_INFINITE; - } - return FP_NAN; - } - return FP_NORMAL; -} - - -unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) { - VIXL_ASSERT((reg_size % 8) == 0); - int count = 0; - for (unsigned i = 0; i < (reg_size / 16); i++) { - if ((imm & 0xffff) == 0) { - count++; - } - imm >>= 16; - } - return count; -} - -} // namespace vixl diff --git a/disas/libvixl/vixl/utils.h b/disas/libvixl/vixl/utils.h deleted file mode 100644 index ecb0f1014a..0000000000 --- a/disas/libvixl/vixl/utils.h +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015, ARM Limited -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_UTILS_H -#define VIXL_UTILS_H - -#include -#include -#include "vixl/globals.h" -#include "vixl/compiler-intrinsics.h" - -namespace vixl { - -// Macros for compile-time format checking. -#if GCC_VERSION_OR_NEWER(4, 4, 0) -#define PRINTF_CHECK(format_index, varargs_index) \ - __attribute__((format(gnu_printf, format_index, varargs_index))) -#else -#define PRINTF_CHECK(format_index, varargs_index) -#endif - -// Check number width. -inline bool is_intn(unsigned n, int64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); - int64_t limit = INT64_C(1) << (n - 1); - return (-limit <= x) && (x < limit); -} - -inline bool is_uintn(unsigned n, int64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); - return !(x >> n); -} - -inline uint32_t truncate_to_intn(unsigned n, int64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); - return static_cast(x & ((INT64_C(1) << n) - 1)); -} - -#define INT_1_TO_63_LIST(V) \ -V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ -V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ -V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ -V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ -V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ -V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ -V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ -V(57) V(58) V(59) V(60) V(61) V(62) V(63) - -#define DECLARE_IS_INT_N(N) \ -inline bool is_int##N(int64_t x) { return is_intn(N, x); } -#define DECLARE_IS_UINT_N(N) \ -inline bool is_uint##N(int64_t x) { return is_uintn(N, x); } -#define DECLARE_TRUNCATE_TO_INT_N(N) \ -inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); } -INT_1_TO_63_LIST(DECLARE_IS_INT_N) -INT_1_TO_63_LIST(DECLARE_IS_UINT_N) -INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) -#undef DECLARE_IS_INT_N -#undef DECLARE_IS_UINT_N -#undef DECLARE_TRUNCATE_TO_INT_N - -// Bit field extraction. -inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { - return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); -} - -inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { - return (x >> lsb) & ((static_cast(1) << (1 + msb - lsb)) - 1); -} - -inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { - return (x << (31 - msb)) >> (lsb + 31 - msb); -} - -inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) { - return (x << (63 - msb)) >> (lsb + 63 - msb); -} - -// Floating point representation. -uint32_t float_to_rawbits(float value); -uint64_t double_to_rawbits(double value); -float rawbits_to_float(uint32_t bits); -double rawbits_to_double(uint64_t bits); - -uint32_t float_sign(float val); -uint32_t float_exp(float val); -uint32_t float_mantissa(float val); -uint32_t double_sign(double val); -uint32_t double_exp(double val); -uint64_t double_mantissa(double val); - -float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa); -double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa); - -// An fpclassify() function for 16-bit half-precision floats. -int float16classify(float16 value); - -// NaN tests. -inline bool IsSignallingNaN(double num) { - const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); - uint64_t raw = double_to_rawbits(num); - if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) { - return true; - } - return false; -} - - -inline bool IsSignallingNaN(float num) { - const uint32_t kFP32QuietNaNMask = 0x00400000; - uint32_t raw = float_to_rawbits(num); - if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) { - return true; - } - return false; -} - - -inline bool IsSignallingNaN(float16 num) { - const uint16_t kFP16QuietNaNMask = 0x0200; - return (float16classify(num) == FP_NAN) && - ((num & kFP16QuietNaNMask) == 0); -} - - -template -inline bool IsQuietNaN(T num) { - return std::isnan(num) && !IsSignallingNaN(num); -} - - -// Convert the NaN in 'num' to a quiet NaN. -inline double ToQuietNaN(double num) { - const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); - VIXL_ASSERT(std::isnan(num)); - return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask); -} - - -inline float ToQuietNaN(float num) { - const uint32_t kFP32QuietNaNMask = 0x00400000; - VIXL_ASSERT(std::isnan(num)); - return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask); -} - - -// Fused multiply-add. -inline double FusedMultiplyAdd(double op1, double op2, double a) { - return fma(op1, op2, a); -} - - -inline float FusedMultiplyAdd(float op1, float op2, float a) { - return fmaf(op1, op2, a); -} - - -inline uint64_t LowestSetBit(uint64_t value) { - return value & -value; -} - - -template -inline int HighestSetBitPosition(T value) { - VIXL_ASSERT(value != 0); - return (sizeof(value) * 8 - 1) - CountLeadingZeros(value); -} - - -template -inline int WhichPowerOf2(V value) { - VIXL_ASSERT(IsPowerOf2(value)); - return CountTrailingZeros(value); -} - - -unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); - - -template -T ReverseBits(T value) { - VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || - (sizeof(value) == 4) || (sizeof(value) == 8)); - T result = 0; - for (unsigned i = 0; i < (sizeof(value) * 8); i++) { - result = (result << 1) | (value & 1); - value >>= 1; - } - return result; -} - - -template -T ReverseBytes(T value, int block_bytes_log2) { - VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8)); - VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value)); - // Split the 64-bit value into an 8-bit array, where b[0] is the least - // significant byte, and b[7] is the most significant. - uint8_t bytes[8]; - uint64_t mask = UINT64_C(0xff00000000000000); - for (int i = 7; i >= 0; i--) { - bytes[i] = (static_cast(value) & mask) >> (i * 8); - mask >>= 8; - } - - // Permutation tables for REV instructions. - // permute_table[0] is used by REV16_x, REV16_w - // permute_table[1] is used by REV32_x, REV_w - // permute_table[2] is used by REV_x - VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4)); - static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1}, - {4, 5, 6, 7, 0, 1, 2, 3}, - {0, 1, 2, 3, 4, 5, 6, 7} }; - T result = 0; - for (int i = 0; i < 8; i++) { - result <<= 8; - result |= bytes[permute_table[block_bytes_log2 - 1][i]]; - } - return result; -} - - -// Pointer alignment -// TODO: rename/refactor to make it specific to instructions. -template -bool IsWordAligned(T pointer) { - VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) - return ((intptr_t)(pointer) & 3) == 0; -} - -// Increment a pointer (up to 64 bits) until it has the specified alignment. -template -T AlignUp(T pointer, size_t alignment) { - // Use C-style casts to get static_cast behaviour for integral types (T), and - // reinterpret_cast behaviour for other types. - - uint64_t pointer_raw = (uint64_t)pointer; - VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); - - size_t align_step = (alignment - pointer_raw) % alignment; - VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); - - return (T)(pointer_raw + align_step); -} - -// Decrement a pointer (up to 64 bits) until it has the specified alignment. -template -T AlignDown(T pointer, size_t alignment) { - // Use C-style casts to get static_cast behaviour for integral types (T), and - // reinterpret_cast behaviour for other types. - - uint64_t pointer_raw = (uint64_t)pointer; - VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); - - size_t align_step = pointer_raw % alignment; - VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); - - return (T)(pointer_raw - align_step); -} - -} // namespace vixl - -#endif // VIXL_UTILS_H diff --git a/disas/meson.build b/disas/meson.build index 449f99e1de..1977f5cd92 100644 --- a/disas/meson.build +++ b/disas/meson.build @@ -1,22 +1,13 @@ -libvixl_ss = ss.source_set() -subdir('libvixl') - common_ss.add(when: 'CONFIG_ALPHA_DIS', if_true: files('alpha.c')) -common_ss.add(when: 'CONFIG_ARM_A64_DIS', if_true: files('arm-a64.cc')) -common_ss.add_all(when: 'CONFIG_ARM_A64_DIS', if_true: libvixl_ss) -common_ss.add(when: 'CONFIG_ARM_DIS', if_true: files('arm.c')) common_ss.add(when: 'CONFIG_CRIS_DIS', if_true: files('cris.c')) common_ss.add(when: 'CONFIG_HEXAGON_DIS', if_true: files('hexagon.c')) common_ss.add(when: 'CONFIG_HPPA_DIS', if_true: files('hppa.c')) -common_ss.add(when: 'CONFIG_I386_DIS', if_true: files('i386.c')) common_ss.add(when: 'CONFIG_M68K_DIS', if_true: files('m68k.c')) common_ss.add(when: 'CONFIG_MICROBLAZE_DIS', if_true: files('microblaze.c')) common_ss.add(when: 'CONFIG_MIPS_DIS', if_true: files('mips.c')) -common_ss.add(when: 'CONFIG_NANOMIPS_DIS', if_true: files('nanomips.cpp')) +common_ss.add(when: 'CONFIG_NANOMIPS_DIS', if_true: files('nanomips.c')) common_ss.add(when: 'CONFIG_NIOS2_DIS', if_true: files('nios2.c')) -common_ss.add(when: 'CONFIG_PPC_DIS', if_true: files('ppc.c')) common_ss.add(when: 'CONFIG_RISCV_DIS', if_true: files('riscv.c')) -common_ss.add(when: 'CONFIG_S390_DIS', if_true: files('s390.c')) common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c')) common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c')) common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c')) diff --git a/disas/mips.c b/disas/mips.c index b9a5204304..5aacacb2c8 100644 --- a/disas/mips.c +++ b/disas/mips.c @@ -20,6 +20,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, see . */ #include "qemu/osdep.h" +#include "qemu/bitops.h" #include "disas/dis-asm.h" /* mips.h. Mips opcode list for GDB, the GNU debugger. @@ -1334,9 +1335,9 @@ const struct mips_opcode mips_builtin_opcodes[] = {"balc", "+p", 0xe8000000, 0xfc000000, UBD|WR_31, 0, I32R6}, {"bc", "+p", 0xc8000000, 0xfc000000, UBD|WR_31, 0, I32R6}, {"jic", "t,o", 0xd8000000, 0xffe00000, UBD|RD_t, 0, I32R6}, -{"beqzc", "s,+p", 0xd8000000, 0xfc000000, CBD|RD_s, 0, I32R6}, +{"beqzc", "s,+q", 0xd8000000, 0xfc000000, CBD|RD_s, 0, I32R6}, {"jialc", "t,o", 0xf8000000, 0xffe00000, UBD|RD_t, 0, I32R6}, -{"bnezc", "s,+p", 0xf8000000, 0xfc000000, CBD|RD_s, 0, I32R6}, +{"bnezc", "s,+q", 0xf8000000, 0xfc000000, CBD|RD_s, 0, I32R6}, {"beqzalc", "s,t,p", 0x20000000, 0xffe00000, CBD|RD_s|RD_t, 0, I32R6}, {"bovc", "s,t,p", 0x20000000, 0xfc000000, CBD|RD_s|RD_t, 0, I32R6}, {"beqc", "s,t,p", 0x20000000, 0xfc000000, CBD|RD_s|RD_t, 0, I32R6}, @@ -4462,6 +4463,13 @@ print_insn_args (const char *d, (*info->print_address_func) (info->target, info); break; + case 'q': + /* Sign extend the displacement with 21 bits. */ + delta = sextract32(l, OP_SH_DELTA, 21); + info->target = (delta << 2) + pc + INSNLEN; + (*info->print_address_func) (info->target, info); + break; + case 't': /* Coprocessor 0 reg name */ (*info->fprintf_func) (info->stream, "%s", mips_cp0_names[(l >> OP_SH_RT) & diff --git a/disas/nanomips.cpp b/disas/nanomips.c similarity index 72% rename from disas/nanomips.cpp rename to disas/nanomips.c index 9be8df75dd..a0253598dd 100644 --- a/disas/nanomips.cpp +++ b/disas/nanomips.c @@ -30,274 +30,112 @@ #include "qemu/osdep.h" #include "disas/dis-asm.h" -#include -#include -#include -#include -#include +typedef int64_t int64; +typedef uint64_t uint64; +typedef uint32_t uint32; +typedef uint16_t uint16; +typedef uint64_t img_address; -#include "nanomips.h" +typedef enum { + instruction, + call_instruction, + branch_instruction, + return_instruction, + reserved_block, + pool, +} TABLE_ENTRY_TYPE; + +typedef enum { + MIPS64_ = 0x00000001, + XNP_ = 0x00000002, + XMMS_ = 0x00000004, + EVA_ = 0x00000008, + DSP_ = 0x00000010, + MT_ = 0x00000020, + EJTAG_ = 0x00000040, + TLBINV_ = 0x00000080, + CP0_ = 0x00000100, + CP1_ = 0x00000200, + CP2_ = 0x00000400, + UDI_ = 0x00000800, + MCU_ = 0x00001000, + VZ_ = 0x00002000, + TLB_ = 0x00004000, + MVH_ = 0x00008000, + ALL_ATTRIBUTES = 0xffffffffull, +} TABLE_ATTRIBUTE_TYPE; + +typedef struct Dis_info { + img_address m_pc; + fprintf_function fprintf_func; + FILE *stream; + sigjmp_buf buf; +} Dis_info; + +typedef bool (*conditional_function)(uint64 instruction); +typedef char * (*disassembly_function)(uint64 instruction, + Dis_info *info); + +typedef struct Pool { + TABLE_ENTRY_TYPE type; + const struct Pool *next_table; + int next_table_size; + int instructions_size; + uint64 mask; + uint64 value; + disassembly_function disassembly; + conditional_function condition; + uint64 attributes; +} Pool; #define IMGASSERTONCE(test) -int nanomips_dis(char *buf, - unsigned address, - unsigned short one, - unsigned short two, - unsigned short three) +static char * G_GNUC_PRINTF(1, 2) img_format(const char *format, ...) { - std::string disasm; - uint16 bits[3] = {one, two, three}; - - NMD::TABLE_ENTRY_TYPE type; - NMD d(address, NMD::ALL_ATTRIBUTES); - int size = d.Disassemble(bits, disasm, type); - - strcpy(buf, disasm.c_str()); - return size; -} - -int print_insn_nanomips(bfd_vma memaddr, struct disassemble_info *info) -{ - int status; - bfd_byte buffer[2]; - uint16_t insn1 = 0, insn2 = 0, insn3 = 0; - char buf[200]; - - info->bytes_per_chunk = 2; - info->display_endian = info->endian; - info->insn_info_valid = 1; - info->branch_delay_insns = 0; - info->data_size = 0; - info->insn_type = dis_nonbranch; - info->target = 0; - info->target2 = 0; - - status = (*info->read_memory_func)(memaddr, buffer, 2, info); - if (status != 0) { - (*info->memory_error_func)(status, memaddr, info); - return -1; - } - - if (info->endian == BFD_ENDIAN_BIG) { - insn1 = bfd_getb16(buffer); - } else { - insn1 = bfd_getl16(buffer); - } - (*info->fprintf_func)(info->stream, "%04x ", insn1); - - /* Handle 32-bit opcodes. */ - if ((insn1 & 0x1000) == 0) { - status = (*info->read_memory_func)(memaddr + 2, buffer, 2, info); - if (status != 0) { - (*info->memory_error_func)(status, memaddr + 2, info); - return -1; - } - - if (info->endian == BFD_ENDIAN_BIG) { - insn2 = bfd_getb16(buffer); - } else { - insn2 = bfd_getl16(buffer); - } - (*info->fprintf_func)(info->stream, "%04x ", insn2); - } else { - (*info->fprintf_func)(info->stream, " "); - } - /* Handle 48-bit opcodes. */ - if ((insn1 >> 10) == 0x18) { - status = (*info->read_memory_func)(memaddr + 4, buffer, 2, info); - if (status != 0) { - (*info->memory_error_func)(status, memaddr + 4, info); - return -1; - } - - if (info->endian == BFD_ENDIAN_BIG) { - insn3 = bfd_getb16(buffer); - } else { - insn3 = bfd_getl16(buffer); - } - (*info->fprintf_func)(info->stream, "%04x ", insn3); - } else { - (*info->fprintf_func)(info->stream, " "); - } - - int length = nanomips_dis(buf, memaddr, insn1, insn2, insn3); - - /* FIXME: Should probably use a hash table on the major opcode here. */ - - (*info->fprintf_func) (info->stream, "%s", buf); - if (length > 0) { - return length / 8; - } - - info->insn_type = dis_noninsn; - - return insn3 ? 6 : insn2 ? 4 : 2; -} - - -namespace img -{ - address addr32(address a) - { - return a; - } - - std::string format(const char *format, ...) - { - char buffer[256]; - va_list args; - va_start(args, format); - int err = vsprintf(buffer, format, args); - if (err < 0) { - perror(buffer); - } - va_end(args); - return buffer; - } - - std::string format(const char *format, - std::string s) - { - char buffer[256]; - - sprintf(buffer, format, s.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - std::string s2) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), s2.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - std::string s2, - std::string s3) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), s2.c_str(), s3.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - std::string s2, - std::string s3, - std::string s4) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), s2.c_str(), s3.c_str(), - s4.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - std::string s2, - std::string s3, - std::string s4, - std::string s5) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), s2.c_str(), s3.c_str(), - s4.c_str(), s5.c_str()); - - return buffer; - } - - std::string format(const char *format, - uint64 d, - std::string s2) - { - char buffer[256]; - - sprintf(buffer, format, d, s2.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - uint64 d, - std::string s2) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), d, s2.c_str()); - - return buffer; - } - - std::string format(const char *format, - std::string s1, - std::string s2, - uint64 d) - { - char buffer[256]; - - sprintf(buffer, format, s1.c_str(), s2.c_str(), d); - - return buffer; - } - - char as_char(int c) - { - return static_cast(c); - } -}; - - -std::string to_string(img::address a) -{ - char buffer[256]; - sprintf(buffer, "0x%" PRIx64, a); + char *buffer; + va_list args; + va_start(args, format); + buffer = g_strdup_vprintf(format, args); + va_end(args); return buffer; } -uint64 extract_bits(uint64 data, uint32 bit_offset, uint32 bit_size) +static char *to_string(img_address a) +{ + return g_strdup_printf("0x%" PRIx64, a); +} + + +static uint64 extract_bits(uint64 data, uint32 bit_offset, uint32 bit_size) { return (data << (64 - (bit_size + bit_offset))) >> (64 - bit_size); } -int64 sign_extend(int64 data, int msb) +static int64 sign_extend(int64 data, int msb) { uint64 shift = 63 - msb; return (data << shift) >> shift; } -uint64 NMD::renumber_registers(uint64 index, uint64 *register_list, - size_t register_list_size) +static uint64 renumber_registers(uint64 index, uint64 *register_list, + size_t register_list_size, Dis_info *info) { if (index < register_list_size) { return register_list[index]; } - throw std::runtime_error(img::format( - "Invalid register mapping index %" PRIu64 - ", size of list = %zu", - index, register_list_size)); + info->fprintf_func(info->stream, "Invalid register mapping index %" PRIu64 + ", size of list = %zu", index, register_list_size); + siglongjmp(info->buf, 1); } /* - * NMD::decode_gpr_gpr4() - decoder for 'gpr4' gpr encoding type + * decode_gpr_gpr4() - decoder for 'gpr4' gpr encoding type * * Map a 4-bit code to the 5-bit register space according to this pattern: * @@ -322,17 +160,17 @@ uint64 NMD::renumber_registers(uint64 index, uint64 *register_list, * - MUL[4X4] * - SW[4X4] */ -uint64 NMD::decode_gpr_gpr4(uint64 d) +static uint64 decode_gpr_gpr4(uint64 d, Dis_info *info) { static uint64 register_list[] = { 8, 9, 10, 11, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr4_zero() - decoder for 'gpr4.zero' gpr encoding type + * decode_gpr_gpr4_zero() - decoder for 'gpr4.zero' gpr encoding type * * Map a 4-bit code to the 5-bit register space according to this pattern: * @@ -358,17 +196,17 @@ uint64 NMD::decode_gpr_gpr4(uint64 d) * - MOVEP * - SW[4X4] */ -uint64 NMD::decode_gpr_gpr4_zero(uint64 d) +static uint64 decode_gpr_gpr4_zero(uint64 d, Dis_info *info) { static uint64 register_list[] = { 8, 9, 10, 0, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr3() - decoder for 'gpr3' gpr encoding type + * decode_gpr_gpr3() - decoder for 'gpr3' gpr encoding type * * Map a 3-bit code to the 5-bit register space according to this pattern: * @@ -417,16 +255,16 @@ uint64 NMD::decode_gpr_gpr4_zero(uint64 d) * - SW[16] * - XOR[16] */ -uint64 NMD::decode_gpr_gpr3(uint64 d) +static uint64 decode_gpr_gpr3(uint64 d, Dis_info *info) { static uint64 register_list[] = { 16, 17, 18, 19, 4, 5, 6, 7 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr3_src_store() - decoder for 'gpr3.src.store' gpr encoding + * decode_gpr_gpr3_src_store() - decoder for 'gpr3.src.store' gpr encoding * type * * Map a 3-bit code to the 5-bit register space according to this pattern: @@ -457,16 +295,16 @@ uint64 NMD::decode_gpr_gpr3(uint64 d) * - SW[16] * - SW[GP16] */ -uint64 NMD::decode_gpr_gpr3_src_store(uint64 d) +static uint64 decode_gpr_gpr3_src_store(uint64 d, Dis_info *info) { static uint64 register_list[] = { 0, 17, 18, 19, 4, 5, 6, 7 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr2_reg1() - decoder for 'gpr2.reg1' gpr encoding type + * decode_gpr_gpr2_reg1() - decoder for 'gpr2.reg1' gpr encoding type * * Map a 2-bit code to the 5-bit register space according to this pattern: * @@ -487,16 +325,16 @@ uint64 NMD::decode_gpr_gpr3_src_store(uint64 d) * - MOVEP * - MOVEP[REV] */ -uint64 NMD::decode_gpr_gpr2_reg1(uint64 d) +static uint64 decode_gpr_gpr2_reg1(uint64 d, Dis_info *info) { static uint64 register_list[] = { 4, 5, 6, 7 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr2_reg2() - decoder for 'gpr2.reg2' gpr encoding type + * decode_gpr_gpr2_reg2() - decoder for 'gpr2.reg2' gpr encoding type * * Map a 2-bit code to the 5-bit register space according to this pattern: * @@ -517,16 +355,16 @@ uint64 NMD::decode_gpr_gpr2_reg1(uint64 d) * - MOVEP * - MOVEP[REV] */ -uint64 NMD::decode_gpr_gpr2_reg2(uint64 d) +static uint64 decode_gpr_gpr2_reg2(uint64 d, Dis_info *info) { static uint64 register_list[] = { 5, 6, 7, 8 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } /* - * NMD::decode_gpr_gpr1() - decoder for 'gpr1' gpr encoding type + * decode_gpr_gpr1() - decoder for 'gpr1' gpr encoding type * * Map a 1-bit code to the 5-bit register space according to this pattern: * @@ -546,81 +384,28 @@ uint64 NMD::decode_gpr_gpr2_reg2(uint64 d) * * - MOVE.BALC */ -uint64 NMD::decode_gpr_gpr1(uint64 d) +static uint64 decode_gpr_gpr1(uint64 d, Dis_info *info) { static uint64 register_list[] = { 4, 5 }; return renumber_registers(d, register_list, - sizeof(register_list) / sizeof(register_list[0])); + sizeof(register_list) / sizeof(register_list[0]), info); } -uint64 NMD::copy(uint64 d) -{ - return d; -} - - -int64 NMD::copy(int64 d) -{ - return d; -} - - -int64 NMD::neg_copy(uint64 d) +static int64 neg_copy(uint64 d) { return 0ll - d; } -int64 NMD::neg_copy(int64 d) -{ - return -d; -} - - -/* strange wrapper around gpr3 */ -uint64 NMD::encode_rs3_and_check_rs3_ge_rt3(uint64 d) -{ -return decode_gpr_gpr3(d); -} - - -/* strange wrapper around gpr3 */ -uint64 NMD::encode_rs3_and_check_rs3_lt_rt3(uint64 d) -{ - return decode_gpr_gpr3(d); -} - - -/* nop - done by extraction function */ -uint64 NMD::encode_s_from_address(uint64 d) -{ - return d; -} - - -/* nop - done by extraction function */ -uint64 NMD::encode_u_from_address(uint64 d) -{ - return d; -} - - -/* nop - done by extraction function */ -uint64 NMD::encode_s_from_s_hi(uint64 d) -{ - return d; -} - - -uint64 NMD::encode_count3_from_count(uint64 d) +static uint64 encode_count3_from_count(uint64 d) { IMGASSERTONCE(d < 8); return d == 0ull ? 8ull : d; } -uint64 NMD::encode_shift3_from_shift(uint64 d) +static uint64 encode_shift3_from_shift(uint64 d) { IMGASSERTONCE(d < 8); return d == 0ull ? 8ull : d; @@ -628,21 +413,21 @@ uint64 NMD::encode_shift3_from_shift(uint64 d) /* special value for load literal */ -int64 NMD::encode_eu_from_s_li16(uint64 d) +static int64 encode_eu_from_s_li16(uint64 d) { IMGASSERTONCE(d < 128); return d == 127 ? -1 : (int64)d; } -uint64 NMD::encode_msbd_from_size(uint64 d) +static uint64 encode_msbd_from_size(uint64 d) { IMGASSERTONCE(d < 32); return d + 1; } -uint64 NMD::encode_eu_from_u_andi16(uint64 d) +static uint64 encode_eu_from_u_andi16(uint64 d) { IMGASSERTONCE(d < 16); if (d == 12) { @@ -655,42 +440,14 @@ uint64 NMD::encode_eu_from_u_andi16(uint64 d) } -uint64 NMD::encode_msbd_from_pos_and_size(uint64 d) -{ - IMGASSERTONCE(0); - return d; -} - - /* save16 / restore16 ???? */ -uint64 NMD::encode_rt1_from_rt(uint64 d) +static uint64 encode_rt1_from_rt(uint64 d) { return d ? 31 : 30; } -/* ? */ -uint64 NMD::encode_lsb_from_pos_and_size(uint64 d) -{ - return d; -} - - -std::string NMD::save_restore_list(uint64 rt, uint64 count, uint64 gp) -{ - std::string str; - - for (uint64 counter = 0; counter != count; counter++) { - bool use_gp = gp && (counter == count - 1); - uint64 this_rt = use_gp ? 28 : ((rt & 0x10) | (rt + counter)) & 0x1f; - str += img::format(",%s", GPR(this_rt)); - } - - return str; -} - - -std::string NMD::GPR(uint64 reg) +static const char *GPR(uint64 reg, Dis_info *info) { static const char *gpr_reg[32] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", @@ -703,12 +460,32 @@ std::string NMD::GPR(uint64 reg) return gpr_reg[reg]; } - throw std::runtime_error(img::format("Invalid GPR register index %" PRIu64, - reg)); + info->fprintf_func(info->stream, "Invalid GPR register index %" PRIu64, + reg); + siglongjmp(info->buf, 1); } -std::string NMD::FPR(uint64 reg) +static char *save_restore_list(uint64 rt, uint64 count, uint64 gp, + Dis_info *info) +{ + char *reg_list[34]; + reg_list[0] = (char *)""; + + assert(count <= 32); + for (uint64 counter = 0; counter != count; counter++) { + bool use_gp = gp && (counter == count - 1); + uint64 this_rt = use_gp ? 28 : ((rt & 0x10) | (rt + counter)) & 0x1f; + /* glib usage below requires casting away const */ + reg_list[counter + 1] = (char *)GPR(this_rt, info); + } + reg_list[count + 1] = NULL; + + return g_strjoinv(",", reg_list); +} + + +static const char *FPR(uint64 reg, Dis_info *info) { static const char *fpr_reg[32] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", @@ -721,12 +498,13 @@ std::string NMD::FPR(uint64 reg) return fpr_reg[reg]; } - throw std::runtime_error(img::format("Invalid FPR register index %" PRIu64, - reg)); + info->fprintf_func(info->stream, "Invalid FPR register index %" PRIu64, + reg); + siglongjmp(info->buf, 1); } -std::string NMD::AC(uint64 reg) +static const char *AC(uint64 reg, Dis_info *info) { static const char *ac_reg[4] = { "ac0", "ac1", "ac2", "ac3" @@ -736,42 +514,22 @@ std::string NMD::AC(uint64 reg) return ac_reg[reg]; } - throw std::runtime_error(img::format("Invalid AC register index %" PRIu64, - reg)); + info->fprintf_func(info->stream, "Invalid AC register index %" PRIu64, + reg); + siglongjmp(info->buf, 1); } -std::string NMD::IMMEDIATE(uint64 value) -{ - return img::format("0x%" PRIx64, value); -} - - -std::string NMD::IMMEDIATE(int64 value) -{ - return img::format("%" PRId64, value); -} - - -std::string NMD::CPR(uint64 reg) -{ - /* needs more work */ - return img::format("CP%" PRIu64, reg); -} - - -std::string NMD::ADDRESS(uint64 value, int instruction_size) +static char *ADDRESS(uint64 value, int instruction_size, Dis_info *info) { /* token for string replace */ - /* const char TOKEN_REPLACE = (char)0xa2; */ - img::address address = m_pc + value + instruction_size; + img_address address = info->m_pc + value + instruction_size; /* symbol replacement */ - /* return img::as_char(TOKEN_REPLACE) + to_string(address); */ return to_string(address); } -uint64 NMD::extract_op_code_value(const uint16 * data, int size) +static uint64 extract_op_code_value(const uint16 *data, int size) { switch (size) { case 16: @@ -786,13 +544,6 @@ uint64 NMD::extract_op_code_value(const uint16 * data, int size) } -int NMD::Disassemble(const uint16 * data, std::string & dis, - NMD::TABLE_ENTRY_TYPE & type) -{ - return Disassemble(data, dis, type, MAJOR, 2); -} - - /* * Recurse through tables until the instruction is found then return * the string and size @@ -804,74 +555,48 @@ int NMD::Disassemble(const uint16 * data, std::string & dis, * instruction size - negative is error * disassembly string - on error will constain error string */ -int NMD::Disassemble(const uint16 * data, std::string & dis, - NMD::TABLE_ENTRY_TYPE & type, const Pool *table, - int table_size) +static int Disassemble(const uint16 *data, char **dis, + TABLE_ENTRY_TYPE *type, const Pool *table, + int table_size, Dis_info *info) { - try - { - for (int i = 0; i < table_size; i++) { - uint64 op_code = extract_op_code_value(data, - table[i].instructions_size); - if ((op_code & table[i].mask) == table[i].value) { - /* possible match */ - conditional_function cond = table[i].condition; - if ((cond == 0) || (this->*cond)(op_code)) { - try - { - if (table[i].type == pool) { - return Disassemble(data, dis, type, - table[i].next_table, - table[i].next_table_size); - } else if ((table[i].type == instruction) || - (table[i].type == call_instruction) || - (table[i].type == branch_instruction) || - (table[i].type == return_instruction)) { - if ((table[i].attributes != 0) && - (m_requested_instruction_categories & - table[i].attributes) == 0) { - /* - * failed due to instruction having - * an ASE attribute and the requested version - * not having that attribute - */ - dis = "ASE attribute mismatch"; - return -5; - } - disassembly_function dis_fn = table[i].disassembly; - if (dis_fn == 0) { - dis = "disassembler failure - bad table entry"; - return -6; - } - type = table[i].type; - dis = (this->*dis_fn)(op_code); - return table[i].instructions_size; - } else { - dis = "reserved instruction"; - return -2; - } - } - catch (std::runtime_error & e) - { - dis = e.what(); - return -3; /* runtime error */ + for (int i = 0; i < table_size; i++) { + uint64 op_code = extract_op_code_value(data, + table[i].instructions_size); + if ((op_code & table[i].mask) == table[i].value) { + /* possible match */ + conditional_function cond = table[i].condition; + if ((cond == NULL) || cond(op_code)) { + if (table[i].type == pool) { + return Disassemble(data, dis, type, + table[i].next_table, + table[i].next_table_size, + info); + } else if ((table[i].type == instruction) || + (table[i].type == call_instruction) || + (table[i].type == branch_instruction) || + (table[i].type == return_instruction)) { + disassembly_function dis_fn = table[i].disassembly; + if (dis_fn == 0) { + *dis = g_strdup( + "disassembler failure - bad table entry"); + return -6; } + *type = table[i].type; + *dis = dis_fn(op_code, info); + return table[i].instructions_size; + } else { + *dis = g_strdup("reserved instruction"); + return -2; } } } } - catch (std::exception & e) - { - dis = e.what(); - return -4; /* runtime error */ - } - - dis = "failed to disassemble"; + *dis = g_strdup("failed to disassemble"); return -1; /* failed to disassemble */ } -uint64 NMD::extract_code_18_to_0(uint64 instruction) +static uint64 extract_code_18_to_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 19); @@ -879,7 +604,7 @@ uint64 NMD::extract_code_18_to_0(uint64 instruction) } -uint64 NMD::extract_shift3_2_1_0(uint64 instruction) +static uint64 extract_shift3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 3); @@ -887,7 +612,7 @@ uint64 NMD::extract_shift3_2_1_0(uint64 instruction) } -uint64 NMD::extract_u_11_10_9_8_7_6_5_4_3__s3(uint64 instruction) +static uint64 extract_u_11_10_9_8_7_6_5_4_3__s3(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 9) << 3; @@ -895,7 +620,7 @@ uint64 NMD::extract_u_11_10_9_8_7_6_5_4_3__s3(uint64 instruction) } -uint64 NMD::extract_count_3_2_1_0(uint64 instruction) +static uint64 extract_count_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 4); @@ -903,7 +628,7 @@ uint64 NMD::extract_count_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_rtz3_9_8_7(uint64 instruction) +static uint64 extract_rtz3_9_8_7(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 7, 3); @@ -911,7 +636,7 @@ uint64 NMD::extract_rtz3_9_8_7(uint64 instruction) } -uint64 NMD::extract_u_17_to_1__s1(uint64 instruction) +static uint64 extract_u_17_to_1__s1(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 1, 17) << 1; @@ -919,7 +644,7 @@ uint64 NMD::extract_u_17_to_1__s1(uint64 instruction) } -int64 NMD::extract_s__se9_20_19_18_17_16_15_14_13_12_11(uint64 instruction) +static int64 extract_s__se9_20_19_18_17_16_15_14_13_12_11(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 11, 10); @@ -928,7 +653,7 @@ int64 NMD::extract_s__se9_20_19_18_17_16_15_14_13_12_11(uint64 instruction) } -int64 NMD::extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(uint64 instruction) +static int64 extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 11; @@ -938,7 +663,7 @@ int64 NMD::extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(uint64 instruction) } -uint64 NMD::extract_u_10(uint64 instruction) +static uint64 extract_u_10(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 10, 1); @@ -946,7 +671,7 @@ uint64 NMD::extract_u_10(uint64 instruction) } -uint64 NMD::extract_rtz4_27_26_25_23_22_21(uint64 instruction) +static uint64 extract_rtz4_27_26_25_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 3); @@ -955,7 +680,7 @@ uint64 NMD::extract_rtz4_27_26_25_23_22_21(uint64 instruction) } -uint64 NMD::extract_sa_15_14_13_12_11(uint64 instruction) +static uint64 extract_sa_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 5); @@ -963,7 +688,7 @@ uint64 NMD::extract_sa_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_shift_4_3_2_1_0(uint64 instruction) +static uint64 extract_shift_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 5); @@ -971,7 +696,7 @@ uint64 NMD::extract_shift_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_shiftx_10_9_8_7__s1(uint64 instruction) +static uint64 extract_shiftx_10_9_8_7__s1(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 7, 4) << 1; @@ -979,7 +704,7 @@ uint64 NMD::extract_shiftx_10_9_8_7__s1(uint64 instruction) } -uint64 NMD::extract_hint_25_24_23_22_21(uint64 instruction) +static uint64 extract_hint_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -987,7 +712,7 @@ uint64 NMD::extract_hint_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_count3_14_13_12(uint64 instruction) +static uint64 extract_count3_14_13_12(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 12, 3); @@ -995,7 +720,7 @@ uint64 NMD::extract_count3_14_13_12(uint64 instruction) } -int64 NMD::extract_s__se31_0_11_to_2_20_to_12_s12(uint64 instruction) +static int64 extract_s__se31_0_11_to_2_20_to_12_s12(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 31; @@ -1006,7 +731,7 @@ int64 NMD::extract_s__se31_0_11_to_2_20_to_12_s12(uint64 instruction) } -int64 NMD::extract_s__se7_0_6_5_4_3_2_1_s1(uint64 instruction) +static int64 extract_s__se7_0_6_5_4_3_2_1_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 7; @@ -1016,7 +741,7 @@ int64 NMD::extract_s__se7_0_6_5_4_3_2_1_s1(uint64 instruction) } -uint64 NMD::extract_u2_10_9(uint64 instruction) +static uint64 extract_u2_10_9(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 9, 2); @@ -1024,7 +749,7 @@ uint64 NMD::extract_u2_10_9(uint64 instruction) } -uint64 NMD::extract_code_25_24_23_22_21_20_19_18_17_16(uint64 instruction) +static uint64 extract_code_25_24_23_22_21_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 10); @@ -1032,7 +757,7 @@ uint64 NMD::extract_code_25_24_23_22_21_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_rs_20_19_18_17_16(uint64 instruction) +static uint64 extract_rs_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1040,7 +765,7 @@ uint64 NMD::extract_rs_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_u_2_1__s1(uint64 instruction) +static uint64 extract_u_2_1__s1(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 1, 2) << 1; @@ -1048,7 +773,7 @@ uint64 NMD::extract_u_2_1__s1(uint64 instruction) } -uint64 NMD::extract_stripe_6(uint64 instruction) +static uint64 extract_stripe_6(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 6, 1); @@ -1056,7 +781,7 @@ uint64 NMD::extract_stripe_6(uint64 instruction) } -uint64 NMD::extract_ac_15_14(uint64 instruction) +static uint64 extract_ac_15_14(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 14, 2); @@ -1064,7 +789,7 @@ uint64 NMD::extract_ac_15_14(uint64 instruction) } -uint64 NMD::extract_shift_20_19_18_17_16(uint64 instruction) +static uint64 extract_shift_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1072,7 +797,7 @@ uint64 NMD::extract_shift_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_rdl_25_24(uint64 instruction) +static uint64 extract_rdl_25_24(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 24, 1); @@ -1080,7 +805,7 @@ uint64 NMD::extract_rdl_25_24(uint64 instruction) } -int64 NMD::extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(uint64 instruction) +static int64 extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 10; @@ -1090,7 +815,7 @@ int64 NMD::extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(uint64 instruction) } -uint64 NMD::extract_eu_6_5_4_3_2_1_0(uint64 instruction) +static uint64 extract_eu_6_5_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 7); @@ -1098,7 +823,7 @@ uint64 NMD::extract_eu_6_5_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_shift_5_4_3_2_1_0(uint64 instruction) +static uint64 extract_shift_5_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 6); @@ -1106,7 +831,7 @@ uint64 NMD::extract_shift_5_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_count_19_18_17_16(uint64 instruction) +static uint64 extract_count_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 4); @@ -1114,7 +839,7 @@ uint64 NMD::extract_count_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_code_2_1_0(uint64 instruction) +static uint64 extract_code_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 3); @@ -1122,7 +847,7 @@ uint64 NMD::extract_code_2_1_0(uint64 instruction) } -uint64 NMD::extract_u_11_10_9_8_7_6_5_4_3_2_1_0(uint64 instruction) +static uint64 extract_u_11_10_9_8_7_6_5_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 12); @@ -1130,7 +855,7 @@ uint64 NMD::extract_u_11_10_9_8_7_6_5_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_rs_4_3_2_1_0(uint64 instruction) +static uint64 extract_rs_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 5); @@ -1138,7 +863,7 @@ uint64 NMD::extract_rs_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_u_20_to_3__s3(uint64 instruction) +static uint64 extract_u_20_to_3__s3(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 18) << 3; @@ -1146,7 +871,7 @@ uint64 NMD::extract_u_20_to_3__s3(uint64 instruction) } -uint64 NMD::extract_u_3_2_1_0__s2(uint64 instruction) +static uint64 extract_u_3_2_1_0__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 4) << 2; @@ -1154,7 +879,7 @@ uint64 NMD::extract_u_3_2_1_0__s2(uint64 instruction) } -uint64 NMD::extract_cofun_25_24_23(uint64 instruction) +static uint64 extract_cofun_25_24_23(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 23); @@ -1162,7 +887,7 @@ uint64 NMD::extract_cofun_25_24_23(uint64 instruction) } -uint64 NMD::extract_u_2_1_0__s2(uint64 instruction) +static uint64 extract_u_2_1_0__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 3) << 2; @@ -1170,7 +895,7 @@ uint64 NMD::extract_u_2_1_0__s2(uint64 instruction) } -uint64 NMD::extract_rd3_3_2_1(uint64 instruction) +static uint64 extract_rd3_3_2_1(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 1, 3); @@ -1178,7 +903,7 @@ uint64 NMD::extract_rd3_3_2_1(uint64 instruction) } -uint64 NMD::extract_sa_15_14_13_12(uint64 instruction) +static uint64 extract_sa_15_14_13_12(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 12, 4); @@ -1186,7 +911,7 @@ uint64 NMD::extract_sa_15_14_13_12(uint64 instruction) } -uint64 NMD::extract_rt_25_24_23_22_21(uint64 instruction) +static uint64 extract_rt_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -1194,7 +919,7 @@ uint64 NMD::extract_rt_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_ru_7_6_5_4_3(uint64 instruction) +static uint64 extract_ru_7_6_5_4_3(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 5); @@ -1202,7 +927,7 @@ uint64 NMD::extract_ru_7_6_5_4_3(uint64 instruction) } -uint64 NMD::extract_u_17_to_0(uint64 instruction) +static uint64 extract_u_17_to_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 18); @@ -1210,7 +935,7 @@ uint64 NMD::extract_u_17_to_0(uint64 instruction) } -uint64 NMD::extract_rsz4_4_2_1_0(uint64 instruction) +static uint64 extract_rsz4_4_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 3); @@ -1219,7 +944,7 @@ uint64 NMD::extract_rsz4_4_2_1_0(uint64 instruction) } -int64 NMD::extract_s__se21_0_20_to_1_s1(uint64 instruction) +static int64 extract_s__se21_0_20_to_1_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 21; @@ -1229,7 +954,7 @@ int64 NMD::extract_s__se21_0_20_to_1_s1(uint64 instruction) } -uint64 NMD::extract_op_25_to_3(uint64 instruction) +static uint64 extract_op_25_to_3(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 23); @@ -1237,7 +962,7 @@ uint64 NMD::extract_op_25_to_3(uint64 instruction) } -uint64 NMD::extract_rs4_4_2_1_0(uint64 instruction) +static uint64 extract_rs4_4_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 3); @@ -1246,7 +971,7 @@ uint64 NMD::extract_rs4_4_2_1_0(uint64 instruction) } -uint64 NMD::extract_bit_23_22_21(uint64 instruction) +static uint64 extract_bit_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 3); @@ -1254,7 +979,7 @@ uint64 NMD::extract_bit_23_22_21(uint64 instruction) } -uint64 NMD::extract_rt_41_40_39_38_37(uint64 instruction) +static uint64 extract_rt_41_40_39_38_37(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 37, 5); @@ -1262,7 +987,7 @@ uint64 NMD::extract_rt_41_40_39_38_37(uint64 instruction) } -int64 NMD::extract_shift__se5_21_20_19_18_17_16(uint64 instruction) +static int64 extract_shift__se5_21_20_19_18_17_16(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 16, 6); @@ -1271,7 +996,7 @@ int64 NMD::extract_shift__se5_21_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_rd2_3_8(uint64 instruction) +static uint64 extract_rd2_3_8(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 1) << 1; @@ -1280,7 +1005,7 @@ uint64 NMD::extract_rd2_3_8(uint64 instruction) } -uint64 NMD::extract_code_17_to_0(uint64 instruction) +static uint64 extract_code_17_to_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 18); @@ -1288,7 +1013,7 @@ uint64 NMD::extract_code_17_to_0(uint64 instruction) } -uint64 NMD::extract_size_20_19_18_17_16(uint64 instruction) +static uint64 extract_size_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1296,7 +1021,7 @@ uint64 NMD::extract_size_20_19_18_17_16(uint64 instruction) } -int64 NMD::extract_s__se8_15_7_6_5_4_3_2_s2(uint64 instruction) +static int64 extract_s__se8_15_7_6_5_4_3_2_s2(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 2, 6) << 2; @@ -1306,7 +1031,7 @@ int64 NMD::extract_s__se8_15_7_6_5_4_3_2_s2(uint64 instruction) } -uint64 NMD::extract_u_15_to_0(uint64 instruction) +static uint64 extract_u_15_to_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 16); @@ -1314,7 +1039,7 @@ uint64 NMD::extract_u_15_to_0(uint64 instruction) } -uint64 NMD::extract_fs_20_19_18_17_16(uint64 instruction) +static uint64 extract_fs_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1322,7 +1047,7 @@ uint64 NMD::extract_fs_20_19_18_17_16(uint64 instruction) } -int64 NMD::extract_s__se8_15_7_6_5_4_3_2_1_0(uint64 instruction) +static int64 extract_s__se8_15_7_6_5_4_3_2_1_0(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 8); @@ -1332,7 +1057,7 @@ int64 NMD::extract_s__se8_15_7_6_5_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_stype_20_19_18_17_16(uint64 instruction) +static uint64 extract_stype_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1340,7 +1065,7 @@ uint64 NMD::extract_stype_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_rtl_11(uint64 instruction) +static uint64 extract_rtl_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 9, 1); @@ -1348,7 +1073,7 @@ uint64 NMD::extract_rtl_11(uint64 instruction) } -uint64 NMD::extract_hs_20_19_18_17_16(uint64 instruction) +static uint64 extract_hs_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1356,7 +1081,7 @@ uint64 NMD::extract_hs_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_sel_13_12_11(uint64 instruction) +static uint64 extract_sel_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 3); @@ -1364,7 +1089,7 @@ uint64 NMD::extract_sel_13_12_11(uint64 instruction) } -uint64 NMD::extract_lsb_4_3_2_1_0(uint64 instruction) +static uint64 extract_lsb_4_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 5); @@ -1372,7 +1097,7 @@ uint64 NMD::extract_lsb_4_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_gp_2(uint64 instruction) +static uint64 extract_gp_2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 2, 1); @@ -1380,7 +1105,7 @@ uint64 NMD::extract_gp_2(uint64 instruction) } -uint64 NMD::extract_rt3_9_8_7(uint64 instruction) +static uint64 extract_rt3_9_8_7(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 7, 3); @@ -1388,7 +1113,7 @@ uint64 NMD::extract_rt3_9_8_7(uint64 instruction) } -uint64 NMD::extract_ft_25_24_23_22_21(uint64 instruction) +static uint64 extract_ft_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -1396,7 +1121,7 @@ uint64 NMD::extract_ft_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_u_17_16_15_14_13_12_11(uint64 instruction) +static uint64 extract_u_17_16_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 7); @@ -1404,7 +1129,7 @@ uint64 NMD::extract_u_17_16_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_cs_20_19_18_17_16(uint64 instruction) +static uint64 extract_cs_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1412,7 +1137,7 @@ uint64 NMD::extract_cs_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_rt4_9_7_6_5(uint64 instruction) +static uint64 extract_rt4_9_7_6_5(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 5, 3); @@ -1421,7 +1146,7 @@ uint64 NMD::extract_rt4_9_7_6_5(uint64 instruction) } -uint64 NMD::extract_msbt_10_9_8_7_6(uint64 instruction) +static uint64 extract_msbt_10_9_8_7_6(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 6, 5); @@ -1429,7 +1154,7 @@ uint64 NMD::extract_msbt_10_9_8_7_6(uint64 instruction) } -uint64 NMD::extract_u_5_4_3_2_1_0__s2(uint64 instruction) +static uint64 extract_u_5_4_3_2_1_0__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 6) << 2; @@ -1437,7 +1162,7 @@ uint64 NMD::extract_u_5_4_3_2_1_0__s2(uint64 instruction) } -uint64 NMD::extract_sa_15_14_13(uint64 instruction) +static uint64 extract_sa_15_14_13(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 13, 3); @@ -1445,7 +1170,7 @@ uint64 NMD::extract_sa_15_14_13(uint64 instruction) } -int64 NMD::extract_s__se14_0_13_to_1_s1(uint64 instruction) +static int64 extract_s__se14_0_13_to_1_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 14; @@ -1455,7 +1180,7 @@ int64 NMD::extract_s__se14_0_13_to_1_s1(uint64 instruction) } -uint64 NMD::extract_rs3_6_5_4(uint64 instruction) +static uint64 extract_rs3_6_5_4(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 4, 3); @@ -1463,7 +1188,7 @@ uint64 NMD::extract_rs3_6_5_4(uint64 instruction) } -uint64 NMD::extract_u_31_to_0__s32(uint64 instruction) +static uint64 extract_u_31_to_0__s32(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 32) << 32; @@ -1471,7 +1196,7 @@ uint64 NMD::extract_u_31_to_0__s32(uint64 instruction) } -uint64 NMD::extract_shift_10_9_8_7_6(uint64 instruction) +static uint64 extract_shift_10_9_8_7_6(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 6, 5); @@ -1479,7 +1204,7 @@ uint64 NMD::extract_shift_10_9_8_7_6(uint64 instruction) } -uint64 NMD::extract_cs_25_24_23_22_21(uint64 instruction) +static uint64 extract_cs_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -1487,7 +1212,7 @@ uint64 NMD::extract_cs_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_shiftx_11_10_9_8_7_6(uint64 instruction) +static uint64 extract_shiftx_11_10_9_8_7_6(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 6, 6); @@ -1495,7 +1220,7 @@ uint64 NMD::extract_shiftx_11_10_9_8_7_6(uint64 instruction) } -uint64 NMD::extract_rt_9_8_7_6_5(uint64 instruction) +static uint64 extract_rt_9_8_7_6_5(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 5, 5); @@ -1503,7 +1228,7 @@ uint64 NMD::extract_rt_9_8_7_6_5(uint64 instruction) } -uint64 NMD::extract_op_25_24_23_22_21(uint64 instruction) +static uint64 extract_op_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -1511,7 +1236,7 @@ uint64 NMD::extract_op_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_u_6_5_4_3_2_1_0__s2(uint64 instruction) +static uint64 extract_u_6_5_4_3_2_1_0__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 7) << 2; @@ -1519,7 +1244,7 @@ uint64 NMD::extract_u_6_5_4_3_2_1_0__s2(uint64 instruction) } -uint64 NMD::extract_bit_16_15_14_13_12_11(uint64 instruction) +static uint64 extract_bit_16_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 6); @@ -1527,7 +1252,7 @@ uint64 NMD::extract_bit_16_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_mask_20_19_18_17_16_15_14(uint64 instruction) +static uint64 extract_mask_20_19_18_17_16_15_14(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 14, 7); @@ -1535,7 +1260,7 @@ uint64 NMD::extract_mask_20_19_18_17_16_15_14(uint64 instruction) } -uint64 NMD::extract_eu_3_2_1_0(uint64 instruction) +static uint64 extract_eu_3_2_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 4); @@ -1543,7 +1268,7 @@ uint64 NMD::extract_eu_3_2_1_0(uint64 instruction) } -uint64 NMD::extract_u_7_6_5_4__s4(uint64 instruction) +static uint64 extract_u_7_6_5_4__s4(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 4, 4) << 4; @@ -1551,7 +1276,7 @@ uint64 NMD::extract_u_7_6_5_4__s4(uint64 instruction) } -int64 NMD::extract_s__se8_15_7_6_5_4_3_s3(uint64 instruction) +static int64 extract_s__se8_15_7_6_5_4_3_s3(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 3, 5) << 3; @@ -1561,7 +1286,7 @@ int64 NMD::extract_s__se8_15_7_6_5_4_3_s3(uint64 instruction) } -uint64 NMD::extract_ft_15_14_13_12_11(uint64 instruction) +static uint64 extract_ft_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 5); @@ -1569,7 +1294,7 @@ uint64 NMD::extract_ft_15_14_13_12_11(uint64 instruction) } -int64 NMD::extract_s__se31_15_to_0_31_to_16(uint64 instruction) +static int64 extract_s__se31_15_to_0_31_to_16(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 16) << 16; @@ -1579,7 +1304,7 @@ int64 NMD::extract_s__se31_15_to_0_31_to_16(uint64 instruction) } -uint64 NMD::extract_u_20_19_18_17_16_15_14_13(uint64 instruction) +static uint64 extract_u_20_19_18_17_16_15_14_13(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 13, 8); @@ -1587,7 +1312,7 @@ uint64 NMD::extract_u_20_19_18_17_16_15_14_13(uint64 instruction) } -uint64 NMD::extract_u_17_to_2__s2(uint64 instruction) +static uint64 extract_u_17_to_2__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 2, 16) << 2; @@ -1595,7 +1320,7 @@ uint64 NMD::extract_u_17_to_2__s2(uint64 instruction) } -uint64 NMD::extract_rd_15_14_13_12_11(uint64 instruction) +static uint64 extract_rd_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 5); @@ -1603,7 +1328,7 @@ uint64 NMD::extract_rd_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_c0s_20_19_18_17_16(uint64 instruction) +static uint64 extract_c0s_20_19_18_17_16(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 16, 5); @@ -1611,7 +1336,7 @@ uint64 NMD::extract_c0s_20_19_18_17_16(uint64 instruction) } -uint64 NMD::extract_code_1_0(uint64 instruction) +static uint64 extract_code_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 2); @@ -1619,7 +1344,7 @@ uint64 NMD::extract_code_1_0(uint64 instruction) } -int64 NMD::extract_s__se25_0_24_to_1_s1(uint64 instruction) +static int64 extract_s__se25_0_24_to_1_s1(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 1) << 25; @@ -1629,7 +1354,7 @@ int64 NMD::extract_s__se25_0_24_to_1_s1(uint64 instruction) } -uint64 NMD::extract_u_1_0(uint64 instruction) +static uint64 extract_u_1_0(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 2); @@ -1637,7 +1362,7 @@ uint64 NMD::extract_u_1_0(uint64 instruction) } -uint64 NMD::extract_u_3_8__s2(uint64 instruction) +static uint64 extract_u_3_8__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 3, 1) << 3; @@ -1646,7 +1371,7 @@ uint64 NMD::extract_u_3_8__s2(uint64 instruction) } -uint64 NMD::extract_fd_15_14_13_12_11(uint64 instruction) +static uint64 extract_fd_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 5); @@ -1654,7 +1379,7 @@ uint64 NMD::extract_fd_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_u_4_3_2_1_0__s2(uint64 instruction) +static uint64 extract_u_4_3_2_1_0__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 5) << 2; @@ -1662,7 +1387,7 @@ uint64 NMD::extract_u_4_3_2_1_0__s2(uint64 instruction) } -uint64 NMD::extract_rtz4_9_7_6_5(uint64 instruction) +static uint64 extract_rtz4_9_7_6_5(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 5, 3); @@ -1671,7 +1396,7 @@ uint64 NMD::extract_rtz4_9_7_6_5(uint64 instruction) } -uint64 NMD::extract_sel_15_14_13_12_11(uint64 instruction) +static uint64 extract_sel_15_14_13_12_11(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 11, 5); @@ -1679,7 +1404,7 @@ uint64 NMD::extract_sel_15_14_13_12_11(uint64 instruction) } -uint64 NMD::extract_ct_25_24_23_22_21(uint64 instruction) +static uint64 extract_ct_25_24_23_22_21(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 21, 5); @@ -1687,7 +1412,7 @@ uint64 NMD::extract_ct_25_24_23_22_21(uint64 instruction) } -uint64 NMD::extract_u_20_to_2__s2(uint64 instruction) +static uint64 extract_u_20_to_2__s2(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 2, 19) << 2; @@ -1695,7 +1420,7 @@ uint64 NMD::extract_u_20_to_2__s2(uint64 instruction) } -int64 NMD::extract_s__se3_4_2_1_0(uint64 instruction) +static int64 extract_s__se3_4_2_1_0(uint64 instruction) { int64 value = 0; value |= extract_bits(instruction, 0, 3); @@ -1705,7 +1430,7 @@ int64 NMD::extract_s__se3_4_2_1_0(uint64 instruction) } -uint64 NMD::extract_u_3_2_1_0__s1(uint64 instruction) +static uint64 extract_u_3_2_1_0__s1(uint64 instruction) { uint64 value = 0; value |= extract_bits(instruction, 0, 4) << 1; @@ -1714,28 +1439,28 @@ uint64 NMD::extract_u_3_2_1_0__s1(uint64 instruction) -bool NMD::ADDIU_32__cond(uint64 instruction) +static bool ADDIU_32__cond(uint64 instruction) { uint64 rt = extract_rt_25_24_23_22_21(instruction); return rt != 0; } -bool NMD::ADDIU_RS5__cond(uint64 instruction) +static bool ADDIU_RS5__cond(uint64 instruction) { uint64 rt = extract_rt_9_8_7_6_5(instruction); return rt != 0; } -bool NMD::BALRSC_cond(uint64 instruction) +static bool BALRSC_cond(uint64 instruction) { uint64 rt = extract_rt_25_24_23_22_21(instruction); return rt != 0; } -bool NMD::BEQC_16__cond(uint64 instruction) +static bool BEQC_16__cond(uint64 instruction) { uint64 rs3 = extract_rs3_6_5_4(instruction); uint64 rt3 = extract_rt3_9_8_7(instruction); @@ -1744,7 +1469,7 @@ bool NMD::BEQC_16__cond(uint64 instruction) } -bool NMD::BNEC_16__cond(uint64 instruction) +static bool BNEC_16__cond(uint64 instruction) { uint64 rs3 = extract_rs3_6_5_4(instruction); uint64 rt3 = extract_rt3_9_8_7(instruction); @@ -1753,35 +1478,35 @@ bool NMD::BNEC_16__cond(uint64 instruction) } -bool NMD::MOVE_cond(uint64 instruction) +static bool MOVE_cond(uint64 instruction) { uint64 rt = extract_rt_9_8_7_6_5(instruction); return rt != 0; } -bool NMD::P16_BR1_cond(uint64 instruction) +static bool P16_BR1_cond(uint64 instruction) { uint64 u = extract_u_3_2_1_0__s1(instruction); return u != 0; } -bool NMD::PREF_S9__cond(uint64 instruction) +static bool PREF_S9__cond(uint64 instruction) { uint64 hint = extract_hint_25_24_23_22_21(instruction); return hint != 31; } -bool NMD::PREFE_cond(uint64 instruction) +static bool PREFE_cond(uint64 instruction) { uint64 hint = extract_hint_25_24_23_22_21(instruction); return hint != 31; } -bool NMD::SLTU_cond(uint64 instruction) +static bool SLTU_cond(uint64 instruction) { uint64 rd = extract_rd_15_14_13_12_11(instruction); return rd != 0; @@ -1799,15 +1524,15 @@ bool NMD::SLTU_cond(uint64 instruction) * fs ----- * fd ----- */ -std::string NMD::ABS_D(uint64 instruction) +static char *ABS_D(uint64 instruction, Dis_info *info) { uint64 fd_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string fs = FPR(copy(fs_value)); - std::string fd = FPR(copy(fd_value)); + const char *fs = FPR(fs_value, info); + const char *fd = FPR(fd_value, info); - return img::format("ABS.D %s, %s", fd, fs); + return img_format("ABS.D %s, %s", fd, fs); } @@ -1821,15 +1546,15 @@ std::string NMD::ABS_D(uint64 instruction) * fd ----- * fs ----- */ -std::string NMD::ABS_S(uint64 instruction) +static char *ABS_S(uint64 instruction, Dis_info *info) { uint64 fd_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string fs = FPR(copy(fs_value)); - std::string fd = FPR(copy(fd_value)); + const char *fs = FPR(fs_value, info); + const char *fd = FPR(fd_value, info); - return img::format("ABS.S %s, %s", fd, fs); + return img_format("ABS.S %s, %s", fd, fs); } @@ -1843,15 +1568,15 @@ std::string NMD::ABS_S(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ABSQ_S_PH(uint64 instruction) +static char *ABSQ_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ABSQ_S.PH %s, %s", rt, rs); + return img_format("ABSQ_S.PH %s, %s", rt, rs); } @@ -1865,15 +1590,15 @@ std::string NMD::ABSQ_S_PH(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ABSQ_S_QB(uint64 instruction) +static char *ABSQ_S_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ABSQ_S.QB %s, %s", rt, rs); + return img_format("ABSQ_S.QB %s, %s", rt, rs); } @@ -1887,15 +1612,15 @@ std::string NMD::ABSQ_S_QB(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ABSQ_S_W(uint64 instruction) +static char *ABSQ_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ABSQ_S.W %s, %s", rt, rs); + return img_format("ABSQ_S.W %s, %s", rt, rs); } @@ -1908,17 +1633,16 @@ std::string NMD::ABSQ_S_W(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ACLR(uint64 instruction) +static char *ACLR(uint64 instruction, Dis_info *info) { uint64 bit_value = extract_bit_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string bit = IMMEDIATE(copy(bit_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("ACLR %s, %s(%s)", bit, s, rs); + return img_format("ACLR 0x%" PRIx64 ", %" PRId64 "(%s)", + bit_value, s_value, rs); } @@ -1931,17 +1655,17 @@ std::string NMD::ACLR(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADD(uint64 instruction) +static char *ADD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADD %s, %s, %s", rd, rs, rt); + return img_format("ADD %s, %s, %s", rd, rs, rt); } @@ -1956,17 +1680,17 @@ std::string NMD::ADD(uint64 instruction) * fs ----- * fd ----- */ -std::string NMD::ADD_D(uint64 instruction) +static char *ADD_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); - std::string fd = FPR(copy(fd_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); + const char *fd = FPR(fd_value, info); - return img::format("ADD.D %s, %s, %s", fd, fs, ft); + return img_format("ADD.D %s, %s, %s", fd, fs, ft); } @@ -1981,17 +1705,17 @@ std::string NMD::ADD_D(uint64 instruction) * fs ----- * fd ----- */ -std::string NMD::ADD_S(uint64 instruction) +static char *ADD_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); - std::string fd = FPR(copy(fd_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); + const char *fd = FPR(fd_value, info); - return img::format("ADD.S %s, %s, %s", fd, fs, ft); + return img_format("ADD.S %s, %s, %s", fd, fs, ft); } @@ -2004,17 +1728,16 @@ std::string NMD::ADD_S(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_32_(uint64 instruction) +static char *ADDIU_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_15_to_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ADDIU %s, %s, %s", rt, rs, u); + return img_format("ADDIU %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -2027,15 +1750,14 @@ std::string NMD::ADDIU_32_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_48_(uint64 instruction) +static char *ADDIU_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("ADDIU %s, %s", rt, s); + return img_format("ADDIU %s, %" PRId64, rt, s_value); } @@ -2048,15 +1770,14 @@ std::string NMD::ADDIU_48_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_GP48_(uint64 instruction) +static char *ADDIU_GP48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("ADDIU %s, $%d, %s", rt, 28, s); + return img_format("ADDIU %s, $%d, %" PRId64, rt, 28, s_value); } @@ -2069,15 +1790,14 @@ std::string NMD::ADDIU_GP48_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_GP_B_(uint64 instruction) +static char *ADDIU_GP_B_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("ADDIU %s, $%d, %s", rt, 28, u); + return img_format("ADDIU %s, $%d, 0x%" PRIx64, rt, 28, u_value); } @@ -2090,15 +1810,14 @@ std::string NMD::ADDIU_GP_B_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_GP_W_(uint64 instruction) +static char *ADDIU_GP_W_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_to_2__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("ADDIU %s, $%d, %s", rt, 28, u); + return img_format("ADDIU %s, $%d, 0x%" PRIx64, rt, 28, u_value); } @@ -2111,17 +1830,17 @@ std::string NMD::ADDIU_GP_W_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_NEG_(uint64 instruction) +static char *ADDIU_NEG_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(neg_copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + int64 u = neg_copy(u_value); - return img::format("ADDIU %s, %s, %s", rt, rs, u); + return img_format("ADDIU %s, %s, %" PRId64, rt, rs, u); } @@ -2134,15 +1853,14 @@ std::string NMD::ADDIU_NEG_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_R1_SP_(uint64 instruction) +static char *ADDIU_R1_SP_(uint64 instruction, Dis_info *info) { uint64 u_value = extract_u_5_4_3_2_1_0__s2(instruction); uint64 rt3_value = extract_rt3_9_8_7(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); - return img::format("ADDIU %s, $%d, %s", rt3, 29, u); + return img_format("ADDIU %s, $%d, 0x%" PRIx64, rt3, 29, u_value); } @@ -2155,17 +1873,16 @@ std::string NMD::ADDIU_R1_SP_(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::ADDIU_R2_(uint64 instruction) +static char *ADDIU_R2_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_2_1_0__s2(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("ADDIU %s, %s, %s", rt3, rs3, u); + return img_format("ADDIU %s, %s, 0x%" PRIx64, rt3, rs3, u_value); } @@ -2177,15 +1894,14 @@ std::string NMD::ADDIU_R2_(uint64 instruction) * rt ----- * s - --- */ -std::string NMD::ADDIU_RS5_(uint64 instruction) +static char *ADDIU_RS5_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); int64 s_value = extract_s__se3_4_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("ADDIU %s, %s", rt, s); + return img_format("ADDIU %s, %" PRId64, rt, s_value); } @@ -2199,15 +1915,15 @@ std::string NMD::ADDIU_RS5_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDIUPC_32_(uint64 instruction) +static char *ADDIUPC_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); int64 s_value = extract_s__se21_0_20_to_1_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("ADDIUPC %s, %s", rt, s); + return img_format("ADDIUPC %s, %s", rt, s); } @@ -2221,15 +1937,15 @@ std::string NMD::ADDIUPC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDIUPC_48_(uint64 instruction) +static char *ADDIUPC_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 6); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 6, info); - return img::format("ADDIUPC %s, %s", rt, s); + return img_format("ADDIUPC %s, %s", rt, s); } @@ -2243,17 +1959,17 @@ std::string NMD::ADDIUPC_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQ_PH(uint64 instruction) +static char *ADDQ_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQ.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDQ.PH %s, %s, %s", rd, rs, rt); } @@ -2268,17 +1984,17 @@ std::string NMD::ADDQ_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQ_S_PH(uint64 instruction) +static char *ADDQ_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQ_S.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDQ_S.PH %s, %s, %s", rd, rs, rt); } @@ -2292,17 +2008,17 @@ std::string NMD::ADDQ_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQ_S_W(uint64 instruction) +static char *ADDQ_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQ_S.W %s, %s, %s", rd, rs, rt); + return img_format("ADDQ_S.W %s, %s, %s", rd, rs, rt); } @@ -2317,17 +2033,17 @@ std::string NMD::ADDQ_S_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQH_PH(uint64 instruction) +static char *ADDQH_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQH.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDQH.PH %s, %s, %s", rd, rs, rt); } @@ -2342,17 +2058,17 @@ std::string NMD::ADDQH_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQH_R_PH(uint64 instruction) +static char *ADDQH_R_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQH_R.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDQH_R.PH %s, %s, %s", rd, rs, rt); } @@ -2367,17 +2083,17 @@ std::string NMD::ADDQH_R_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQH_R_W(uint64 instruction) +static char *ADDQH_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQH_R.W %s, %s, %s", rd, rs, rt); + return img_format("ADDQH_R.W %s, %s, %s", rd, rs, rt); } @@ -2392,17 +2108,17 @@ std::string NMD::ADDQH_R_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDQH_W(uint64 instruction) +static char *ADDQH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDQH.W %s, %s, %s", rd, rs, rt); + return img_format("ADDQH.W %s, %s, %s", rd, rs, rt); } @@ -2416,17 +2132,17 @@ std::string NMD::ADDQH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDSC(uint64 instruction) +static char *ADDSC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDSC %s, %s, %s", rd, rs, rt); + return img_format("ADDSC %s, %s, %s", rd, rs, rt); } @@ -2439,17 +2155,17 @@ std::string NMD::ADDSC(uint64 instruction) * rs3 --- * rd3 --- */ -std::string NMD::ADDU_16_(uint64 instruction) +static char *ADDU_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 rd3_value = extract_rd3_3_2_1(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string rd3 = GPR(decode_gpr_gpr3(rd3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rd3 = GPR(decode_gpr_gpr3(rd3_value, info), info); - return img::format("ADDU %s, %s, %s", rd3, rs3, rt3); + return img_format("ADDU %s, %s, %s", rd3, rs3, rt3); } @@ -2463,17 +2179,17 @@ std::string NMD::ADDU_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_32_(uint64 instruction) +static char *ADDU_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDU %s, %s, %s", rd, rs, rt); + return img_format("ADDU %s, %s, %s", rd, rs, rt); } @@ -2487,15 +2203,15 @@ std::string NMD::ADDU_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_4X4_(uint64 instruction) +static char *ADDU_4X4_(uint64 instruction, Dis_info *info) { uint64 rt4_value = extract_rt4_9_7_6_5(instruction); uint64 rs4_value = extract_rs4_4_2_1_0(instruction); - std::string rs4 = GPR(decode_gpr_gpr4(rs4_value)); - std::string rt4 = GPR(decode_gpr_gpr4(rt4_value)); + const char *rs4 = GPR(decode_gpr_gpr4(rs4_value, info), info); + const char *rt4 = GPR(decode_gpr_gpr4(rt4_value, info), info); - return img::format("ADDU %s, %s", rs4, rt4); + return img_format("ADDU %s, %s", rs4, rt4); } @@ -2509,17 +2225,17 @@ std::string NMD::ADDU_4X4_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_PH(uint64 instruction) +static char *ADDU_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDU.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDU.PH %s, %s, %s", rd, rs, rt); } @@ -2533,17 +2249,17 @@ std::string NMD::ADDU_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_QB(uint64 instruction) +static char *ADDU_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDU.QB %s, %s, %s", rd, rs, rt); + return img_format("ADDU.QB %s, %s, %s", rd, rs, rt); } @@ -2558,17 +2274,17 @@ std::string NMD::ADDU_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_S_PH(uint64 instruction) +static char *ADDU_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDU_S.PH %s, %s, %s", rd, rs, rt); + return img_format("ADDU_S.PH %s, %s, %s", rd, rs, rt); } @@ -2582,17 +2298,17 @@ std::string NMD::ADDU_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDU_S_QB(uint64 instruction) +static char *ADDU_S_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDU_S.QB %s, %s, %s", rd, rs, rt); + return img_format("ADDU_S.QB %s, %s, %s", rd, rs, rt); } @@ -2607,17 +2323,17 @@ std::string NMD::ADDU_S_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDUH_QB(uint64 instruction) +static char *ADDUH_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDUH.QB %s, %s, %s", rd, rs, rt); + return img_format("ADDUH.QB %s, %s, %s", rd, rs, rt); } @@ -2632,17 +2348,17 @@ std::string NMD::ADDUH_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDUH_R_QB(uint64 instruction) +static char *ADDUH_R_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDUH_R.QB %s, %s, %s", rd, rs, rt); + return img_format("ADDUH_R.QB %s, %s, %s", rd, rs, rt); } /* @@ -2655,17 +2371,17 @@ std::string NMD::ADDUH_R_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ADDWC(uint64 instruction) +static char *ADDWC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ADDWC %s, %s, %s", rd, rs, rt); + return img_format("ADDWC %s, %s, %s", rd, rs, rt); } @@ -2679,15 +2395,15 @@ std::string NMD::ADDWC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ALUIPC(uint64 instruction) +static char *ALUIPC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); int64 s_value = extract_s__se31_0_11_to_2_20_to_12_s12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("ALUIPC %s, %%pcrel_hi(%s)", rt, s); + return img_format("ALUIPC %s, %%pcrel_hi(%s)", rt, s); } @@ -2700,15 +2416,15 @@ std::string NMD::ALUIPC(uint64 instruction) * rs3 --- * eu ---- */ -std::string NMD::AND_16_(uint64 instruction) +static char *AND_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("AND %s, %s", rs3, rt3); + return img_format("AND %s, %s", rs3, rt3); } @@ -2722,17 +2438,17 @@ std::string NMD::AND_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::AND_32_(uint64 instruction) +static char *AND_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("AND %s, %s, %s", rd, rs, rt); + return img_format("AND %s, %s, %s", rd, rs, rt); } @@ -2745,17 +2461,17 @@ std::string NMD::AND_32_(uint64 instruction) * rs3 --- * eu ---- */ -std::string NMD::ANDI_16_(uint64 instruction) +static char *ANDI_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 eu_value = extract_eu_3_2_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string eu = IMMEDIATE(encode_eu_from_u_andi16(eu_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + uint64 eu = encode_eu_from_u_andi16(eu_value); - return img::format("ANDI %s, %s, %s", rt3, rs3, eu); + return img_format("ANDI %s, %s, 0x%" PRIx64, rt3, rs3, eu); } @@ -2769,17 +2485,16 @@ std::string NMD::ANDI_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ANDI_32_(uint64 instruction) +static char *ANDI_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ANDI %s, %s, %s", rt, rs, u); + return img_format("ANDI %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -2793,17 +2508,16 @@ std::string NMD::ANDI_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::APPEND(uint64 instruction) +static char *APPEND(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("APPEND %s, %s, %s", rt, rs, sa); + return img_format("APPEND %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -2817,17 +2531,16 @@ std::string NMD::APPEND(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ASET(uint64 instruction) +static char *ASET(uint64 instruction, Dis_info *info) { uint64 bit_value = extract_bit_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string bit = IMMEDIATE(copy(bit_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("ASET %s, %s(%s)", bit, s, rs); + return img_format("ASET 0x%" PRIx64 ", %" PRId64 "(%s)", + bit_value, s_value, rs); } @@ -2841,13 +2554,13 @@ std::string NMD::ASET(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BALC_16_(uint64 instruction) +static char *BALC_16_(uint64 instruction, Dis_info *info) { int64 s_value = extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(instruction); - std::string s = ADDRESS(encode_s_from_address(s_value), 2); + g_autofree char *s = ADDRESS(s_value, 2, info); - return img::format("BALC %s", s); + return img_format("BALC %s", s); } @@ -2861,13 +2574,13 @@ std::string NMD::BALC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BALC_32_(uint64 instruction) +static char *BALC_32_(uint64 instruction, Dis_info *info) { int64 s_value = extract_s__se25_0_24_to_1_s1(instruction); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BALC %s", s); + return img_format("BALC %s", s); } @@ -2881,15 +2594,15 @@ std::string NMD::BALC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BALRSC(uint64 instruction) +static char *BALRSC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("BALRSC %s, %s", rt, rs); + return img_format("BALRSC %s, %s", rt, rs); } @@ -2903,17 +2616,16 @@ std::string NMD::BALRSC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BBEQZC(uint64 instruction) +static char *BBEQZC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 bit_value = extract_bit_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string bit = IMMEDIATE(copy(bit_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BBEQZC %s, %s, %s", rt, bit, s); + return img_format("BBEQZC %s, 0x%" PRIx64 ", %s", rt, bit_value, s); } @@ -2927,17 +2639,16 @@ std::string NMD::BBEQZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BBNEZC(uint64 instruction) +static char *BBNEZC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 bit_value = extract_bit_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string bit = IMMEDIATE(copy(bit_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BBNEZC %s, %s, %s", rt, bit, s); + return img_format("BBNEZC %s, 0x%" PRIx64 ", %s", rt, bit_value, s); } @@ -2951,13 +2662,13 @@ std::string NMD::BBNEZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC_16_(uint64 instruction) +static char *BC_16_(uint64 instruction, Dis_info *info) { int64 s_value = extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(instruction); - std::string s = ADDRESS(encode_s_from_address(s_value), 2); + g_autofree char *s = ADDRESS(s_value, 2, info); - return img::format("BC %s", s); + return img_format("BC %s", s); } @@ -2971,13 +2682,13 @@ std::string NMD::BC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC_32_(uint64 instruction) +static char *BC_32_(uint64 instruction, Dis_info *info) { int64 s_value = extract_s__se25_0_24_to_1_s1(instruction); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BC %s", s); + return img_format("BC %s", s); } @@ -2991,15 +2702,15 @@ std::string NMD::BC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC1EQZC(uint64 instruction) +static char *BC1EQZC(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *ft = FPR(ft_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BC1EQZC %s, %s", ft, s); + return img_format("BC1EQZC %s, %s", ft, s); } @@ -3013,15 +2724,15 @@ std::string NMD::BC1EQZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC1NEZC(uint64 instruction) +static char *BC1NEZC(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *ft = FPR(ft_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BC1NEZC %s, %s", ft, s); + return img_format("BC1NEZC %s, %s", ft, s); } @@ -3035,15 +2746,14 @@ std::string NMD::BC1NEZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC2EQZC(uint64 instruction) +static char *BC2EQZC(uint64 instruction, Dis_info *info) { uint64 ct_value = extract_ct_25_24_23_22_21(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string ct = CPR(copy(ct_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BC2EQZC %s, %s", ct, s); + return img_format("BC2EQZC CP%" PRIu64 ", %s", ct_value, s); } @@ -3057,15 +2767,14 @@ std::string NMD::BC2EQZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BC2NEZC(uint64 instruction) +static char *BC2NEZC(uint64 instruction, Dis_info *info) { uint64 ct_value = extract_ct_25_24_23_22_21(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string ct = CPR(copy(ct_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BC2NEZC %s, %s", ct, s); + return img_format("BC2NEZC CP%" PRIu64 ", %s", ct_value, s); } @@ -3079,17 +2788,17 @@ std::string NMD::BC2NEZC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BEQC_16_(uint64 instruction) +static char *BEQC_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_3_2_1_0__s1(instruction); - std::string rs3 = GPR(encode_rs3_and_check_rs3_lt_rt3(rs3_value)); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = ADDRESS(encode_u_from_address(u_value), 2); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + g_autofree char *u = ADDRESS(u_value, 2, info); - return img::format("BEQC %s, %s, %s", rs3, rt3, u); + return img_format("BEQC %s, %s, %s", rs3, rt3, u); } @@ -3103,17 +2812,17 @@ std::string NMD::BEQC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BEQC_32_(uint64 instruction) +static char *BEQC_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BEQC %s, %s, %s", rs, rt, s); + return img_format("BEQC %s, %s, %s", rs, rt, s); } @@ -3127,17 +2836,16 @@ std::string NMD::BEQC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BEQIC(uint64 instruction) +static char *BEQIC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BEQIC %s, %s, %s", rt, u, s); + return img_format("BEQIC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3151,15 +2859,15 @@ std::string NMD::BEQIC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BEQZC_16_(uint64 instruction) +static char *BEQZC_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); int64 s_value = extract_s__se7_0_6_5_4_3_2_1_s1(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 2); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + g_autofree char *s = ADDRESS(s_value, 2, info); - return img::format("BEQZC %s, %s", rt3, s); + return img_format("BEQZC %s, %s", rt3, s); } @@ -3173,17 +2881,17 @@ std::string NMD::BEQZC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BGEC(uint64 instruction) +static char *BGEC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BGEC %s, %s, %s", rs, rt, s); + return img_format("BGEC %s, %s, %s", rs, rt, s); } @@ -3197,17 +2905,16 @@ std::string NMD::BGEC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BGEIC(uint64 instruction) +static char *BGEIC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BGEIC %s, %s, %s", rt, u, s); + return img_format("BGEIC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3221,17 +2928,16 @@ std::string NMD::BGEIC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BGEIUC(uint64 instruction) +static char *BGEIUC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BGEIUC %s, %s, %s", rt, u, s); + return img_format("BGEIUC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3245,17 +2951,17 @@ std::string NMD::BGEIUC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BGEUC(uint64 instruction) +static char *BGEUC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BGEUC %s, %s, %s", rs, rt, s); + return img_format("BGEUC %s, %s, %s", rs, rt, s); } @@ -3269,17 +2975,17 @@ std::string NMD::BGEUC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BLTC(uint64 instruction) +static char *BLTC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BLTC %s, %s, %s", rs, rt, s); + return img_format("BLTC %s, %s, %s", rs, rt, s); } @@ -3293,17 +2999,16 @@ std::string NMD::BLTC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BLTIC(uint64 instruction) +static char *BLTIC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BLTIC %s, %s, %s", rt, u, s); + return img_format("BLTIC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3317,17 +3022,16 @@ std::string NMD::BLTIC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BLTIUC(uint64 instruction) +static char *BLTIUC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BLTIUC %s, %s, %s", rt, u, s); + return img_format("BLTIUC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3341,17 +3045,17 @@ std::string NMD::BLTIUC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BLTUC(uint64 instruction) +static char *BLTUC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BLTUC %s, %s, %s", rs, rt, s); + return img_format("BLTUC %s, %s, %s", rs, rt, s); } @@ -3365,17 +3069,17 @@ std::string NMD::BLTUC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BNEC_16_(uint64 instruction) +static char *BNEC_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_3_2_1_0__s1(instruction); - std::string rs3 = GPR(encode_rs3_and_check_rs3_ge_rt3(rs3_value)); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = ADDRESS(encode_u_from_address(u_value), 2); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + g_autofree char *u = ADDRESS(u_value, 2, info); - return img::format("BNEC %s, %s, %s", rs3, rt3, u); + return img_format("BNEC %s, %s, %s", rs3, rt3, u); } @@ -3389,17 +3093,17 @@ std::string NMD::BNEC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BNEC_32_(uint64 instruction) +static char *BNEC_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BNEC %s, %s, %s", rs, rt, s); + return img_format("BNEC %s, %s, %s", rs, rt, s); } @@ -3413,17 +3117,16 @@ std::string NMD::BNEC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BNEIC(uint64 instruction) +static char *BNEIC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_16_15_14_13_12_11(instruction); int64 s_value = extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BNEIC %s, %s, %s", rt, u, s); + return img_format("BNEIC %s, 0x%" PRIx64 ", %s", rt, u_value, s); } @@ -3437,15 +3140,15 @@ std::string NMD::BNEIC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BNEZC_16_(uint64 instruction) +static char *BNEZC_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); int64 s_value = extract_s__se7_0_6_5_4_3_2_1_s1(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 2); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + g_autofree char *s = ADDRESS(s_value, 2, info); - return img::format("BNEZC %s, %s", rt3, s); + return img_format("BNEZC %s, %s", rt3, s); } @@ -3459,13 +3162,13 @@ std::string NMD::BNEZC_16_(uint64 instruction) * s[13:1] ------------- * s[14] - */ -std::string NMD::BPOSGE32C(uint64 instruction) +static char *BPOSGE32C(uint64 instruction, Dis_info *info) { int64 s_value = extract_s__se14_0_13_to_1_s1(instruction); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("BPOSGE32C %s", s); + return img_format("BPOSGE32C %s", s); } @@ -3479,13 +3182,12 @@ std::string NMD::BPOSGE32C(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BREAK_16_(uint64 instruction) +static char *BREAK_16_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_2_1_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("BREAK %s", code); + return img_format("BREAK 0x%" PRIx64, code_value); } @@ -3499,13 +3201,12 @@ std::string NMD::BREAK_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BREAK_32_(uint64 instruction) +static char *BREAK_32_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_18_to_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("BREAK %s", code); + return img_format("BREAK 0x%" PRIx64, code_value); } @@ -3519,13 +3220,13 @@ std::string NMD::BREAK_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::BRSC(uint64 instruction) +static char *BRSC(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("BRSC %s", rs); + return img_format("BRSC %s", rs); } @@ -3539,17 +3240,16 @@ std::string NMD::BRSC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CACHE(uint64 instruction) +static char *CACHE(uint64 instruction, Dis_info *info) { uint64 op_value = extract_op_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string op = IMMEDIATE(copy(op_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("CACHE %s, %s(%s)", op, s, rs); + return img_format("CACHE 0x%" PRIx64 ", %" PRId64 "(%s)", + op_value, s_value, rs); } @@ -3563,17 +3263,16 @@ std::string NMD::CACHE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CACHEE(uint64 instruction) +static char *CACHEE(uint64 instruction, Dis_info *info) { uint64 op_value = extract_op_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string op = IMMEDIATE(copy(op_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("CACHEE %s, %s(%s)", op, s, rs); + return img_format("CACHEE 0x%" PRIx64 ", %" PRId64 "(%s)", + op_value, s_value, rs); } @@ -3587,15 +3286,15 @@ std::string NMD::CACHEE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CEIL_L_D(uint64 instruction) +static char *CEIL_L_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CEIL.L.D %s, %s", ft, fs); + return img_format("CEIL.L.D %s, %s", ft, fs); } @@ -3609,15 +3308,15 @@ std::string NMD::CEIL_L_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CEIL_L_S(uint64 instruction) +static char *CEIL_L_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CEIL.L.S %s, %s", ft, fs); + return img_format("CEIL.L.S %s, %s", ft, fs); } @@ -3631,15 +3330,15 @@ std::string NMD::CEIL_L_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CEIL_W_D(uint64 instruction) +static char *CEIL_W_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CEIL.W.D %s, %s", ft, fs); + return img_format("CEIL.W.D %s, %s", ft, fs); } @@ -3653,15 +3352,15 @@ std::string NMD::CEIL_W_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CEIL_W_S(uint64 instruction) +static char *CEIL_W_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CEIL.W.S %s, %s", ft, fs); + return img_format("CEIL.W.S %s, %s", ft, fs); } @@ -3675,15 +3374,14 @@ std::string NMD::CEIL_W_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CFC1(uint64 instruction) +static char *CFC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("CFC1 %s, %s", rt, cs); + return img_format("CFC1 %s, CP%" PRIu64, rt, cs_value); } @@ -3697,15 +3395,14 @@ std::string NMD::CFC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CFC2(uint64 instruction) +static char *CFC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("CFC2 %s, %s", rt, cs); + return img_format("CFC2 %s, CP%" PRIu64, rt, cs_value); } @@ -3719,15 +3416,15 @@ std::string NMD::CFC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CLASS_D(uint64 instruction) +static char *CLASS_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CLASS.D %s, %s", ft, fs); + return img_format("CLASS.D %s, %s", ft, fs); } @@ -3741,15 +3438,15 @@ std::string NMD::CLASS_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CLASS_S(uint64 instruction) +static char *CLASS_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CLASS.S %s, %s", ft, fs); + return img_format("CLASS.S %s, %s", ft, fs); } @@ -3763,15 +3460,15 @@ std::string NMD::CLASS_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CLO(uint64 instruction) +static char *CLO(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("CLO %s, %s", rt, rs); + return img_format("CLO %s, %s", rt, rs); } @@ -3785,15 +3482,15 @@ std::string NMD::CLO(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CLZ(uint64 instruction) +static char *CLZ(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("CLZ %s, %s", rt, rs); + return img_format("CLZ %s, %s", rt, rs); } @@ -3807,17 +3504,17 @@ std::string NMD::CLZ(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_AF_D(uint64 instruction) +static char *CMP_AF_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.AF.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.AF.D %s, %s, %s", fd, fs, ft); } @@ -3831,17 +3528,17 @@ std::string NMD::CMP_AF_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_AF_S(uint64 instruction) +static char *CMP_AF_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.AF.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.AF.S %s, %s, %s", fd, fs, ft); } @@ -3855,17 +3552,17 @@ std::string NMD::CMP_AF_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_EQ_D(uint64 instruction) +static char *CMP_EQ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.EQ.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.EQ.D %s, %s, %s", fd, fs, ft); } @@ -3878,15 +3575,15 @@ std::string NMD::CMP_EQ_D(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMP_EQ_PH(uint64 instruction) +static char *CMP_EQ_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMP.EQ.PH %s, %s", rs, rt); + return img_format("CMP.EQ.PH %s, %s", rs, rt); } @@ -3900,17 +3597,17 @@ std::string NMD::CMP_EQ_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_EQ_S(uint64 instruction) +static char *CMP_EQ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.EQ.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.EQ.S %s, %s, %s", fd, fs, ft); } @@ -3924,17 +3621,17 @@ std::string NMD::CMP_EQ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_LE_D(uint64 instruction) +static char *CMP_LE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.LE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.LE.D %s, %s, %s", fd, fs, ft); } @@ -3947,15 +3644,15 @@ std::string NMD::CMP_LE_D(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMP_LE_PH(uint64 instruction) +static char *CMP_LE_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMP.LE.PH %s, %s", rs, rt); + return img_format("CMP.LE.PH %s, %s", rs, rt); } @@ -3969,17 +3666,17 @@ std::string NMD::CMP_LE_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_LE_S(uint64 instruction) +static char *CMP_LE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.LE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.LE.S %s, %s, %s", fd, fs, ft); } @@ -3993,17 +3690,17 @@ std::string NMD::CMP_LE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_LT_D(uint64 instruction) +static char *CMP_LT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.LT.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.LT.D %s, %s, %s", fd, fs, ft); } @@ -4016,15 +3713,15 @@ std::string NMD::CMP_LT_D(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMP_LT_PH(uint64 instruction) +static char *CMP_LT_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMP.LT.PH %s, %s", rs, rt); + return img_format("CMP.LT.PH %s, %s", rs, rt); } @@ -4038,17 +3735,17 @@ std::string NMD::CMP_LT_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_LT_S(uint64 instruction) +static char *CMP_LT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.LT.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.LT.S %s, %s, %s", fd, fs, ft); } @@ -4062,17 +3759,17 @@ std::string NMD::CMP_LT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_NE_D(uint64 instruction) +static char *CMP_NE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.NE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.NE.D %s, %s, %s", fd, fs, ft); } @@ -4086,17 +3783,17 @@ std::string NMD::CMP_NE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_NE_S(uint64 instruction) +static char *CMP_NE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.NE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.NE.S %s, %s, %s", fd, fs, ft); } @@ -4110,17 +3807,17 @@ std::string NMD::CMP_NE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_OR_D(uint64 instruction) +static char *CMP_OR_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.OR.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.OR.D %s, %s, %s", fd, fs, ft); } @@ -4134,17 +3831,17 @@ std::string NMD::CMP_OR_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_OR_S(uint64 instruction) +static char *CMP_OR_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.OR.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.OR.S %s, %s, %s", fd, fs, ft); } @@ -4158,17 +3855,17 @@ std::string NMD::CMP_OR_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SAF_D(uint64 instruction) +static char *CMP_SAF_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SAF.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SAF.D %s, %s, %s", fd, fs, ft); } @@ -4182,17 +3879,17 @@ std::string NMD::CMP_SAF_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SAF_S(uint64 instruction) +static char *CMP_SAF_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SAF.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SAF.S %s, %s, %s", fd, fs, ft); } @@ -4206,17 +3903,17 @@ std::string NMD::CMP_SAF_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SEQ_D(uint64 instruction) +static char *CMP_SEQ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SEQ.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SEQ.D %s, %s, %s", fd, fs, ft); } @@ -4230,17 +3927,17 @@ std::string NMD::CMP_SEQ_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SEQ_S(uint64 instruction) +static char *CMP_SEQ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SEQ.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SEQ.S %s, %s, %s", fd, fs, ft); } @@ -4254,17 +3951,17 @@ std::string NMD::CMP_SEQ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SLE_D(uint64 instruction) +static char *CMP_SLE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SLE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SLE.D %s, %s, %s", fd, fs, ft); } @@ -4278,17 +3975,17 @@ std::string NMD::CMP_SLE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SLE_S(uint64 instruction) +static char *CMP_SLE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SLE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SLE.S %s, %s, %s", fd, fs, ft); } @@ -4302,17 +3999,17 @@ std::string NMD::CMP_SLE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SLT_D(uint64 instruction) +static char *CMP_SLT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SLT.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SLT.D %s, %s, %s", fd, fs, ft); } @@ -4326,17 +4023,17 @@ std::string NMD::CMP_SLT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SLT_S(uint64 instruction) +static char *CMP_SLT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SLT.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SLT.S %s, %s, %s", fd, fs, ft); } @@ -4350,17 +4047,17 @@ std::string NMD::CMP_SLT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SNE_D(uint64 instruction) +static char *CMP_SNE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SNE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SNE.D %s, %s, %s", fd, fs, ft); } @@ -4374,17 +4071,17 @@ std::string NMD::CMP_SNE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SNE_S(uint64 instruction) +static char *CMP_SNE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SNE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SNE.S %s, %s, %s", fd, fs, ft); } @@ -4398,17 +4095,17 @@ std::string NMD::CMP_SNE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SOR_D(uint64 instruction) +static char *CMP_SOR_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SOR.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SOR.D %s, %s, %s", fd, fs, ft); } @@ -4422,17 +4119,17 @@ std::string NMD::CMP_SOR_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SOR_S(uint64 instruction) +static char *CMP_SOR_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SOR.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SOR.S %s, %s, %s", fd, fs, ft); } @@ -4446,17 +4143,17 @@ std::string NMD::CMP_SOR_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUEQ_D(uint64 instruction) +static char *CMP_SUEQ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUEQ.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUEQ.D %s, %s, %s", fd, fs, ft); } @@ -4470,17 +4167,17 @@ std::string NMD::CMP_SUEQ_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUEQ_S(uint64 instruction) +static char *CMP_SUEQ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUEQ.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUEQ.S %s, %s, %s", fd, fs, ft); } @@ -4494,17 +4191,17 @@ std::string NMD::CMP_SUEQ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SULE_D(uint64 instruction) +static char *CMP_SULE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SULE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SULE.D %s, %s, %s", fd, fs, ft); } @@ -4518,17 +4215,17 @@ std::string NMD::CMP_SULE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SULE_S(uint64 instruction) +static char *CMP_SULE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SULE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SULE.S %s, %s, %s", fd, fs, ft); } @@ -4542,17 +4239,17 @@ std::string NMD::CMP_SULE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SULT_D(uint64 instruction) +static char *CMP_SULT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SULT.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SULT.D %s, %s, %s", fd, fs, ft); } @@ -4566,17 +4263,17 @@ std::string NMD::CMP_SULT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SULT_S(uint64 instruction) +static char *CMP_SULT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SULT.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SULT.S %s, %s, %s", fd, fs, ft); } @@ -4590,17 +4287,17 @@ std::string NMD::CMP_SULT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUN_D(uint64 instruction) +static char *CMP_SUN_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUN.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUN.D %s, %s, %s", fd, fs, ft); } @@ -4614,17 +4311,17 @@ std::string NMD::CMP_SUN_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUNE_D(uint64 instruction) +static char *CMP_SUNE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUNE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUNE.D %s, %s, %s", fd, fs, ft); } @@ -4638,17 +4335,17 @@ std::string NMD::CMP_SUNE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUNE_S(uint64 instruction) +static char *CMP_SUNE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUNE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUNE.S %s, %s, %s", fd, fs, ft); } @@ -4662,17 +4359,17 @@ std::string NMD::CMP_SUNE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_SUN_S(uint64 instruction) +static char *CMP_SUN_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.SUN.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.SUN.S %s, %s, %s", fd, fs, ft); } @@ -4686,17 +4383,17 @@ std::string NMD::CMP_SUN_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UEQ_D(uint64 instruction) +static char *CMP_UEQ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UEQ.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.UEQ.D %s, %s, %s", fd, fs, ft); } @@ -4710,17 +4407,17 @@ std::string NMD::CMP_UEQ_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UEQ_S(uint64 instruction) +static char *CMP_UEQ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UEQ.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.UEQ.S %s, %s, %s", fd, fs, ft); } @@ -4734,17 +4431,17 @@ std::string NMD::CMP_UEQ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_ULE_D(uint64 instruction) +static char *CMP_ULE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.ULE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.ULE.D %s, %s, %s", fd, fs, ft); } @@ -4758,17 +4455,17 @@ std::string NMD::CMP_ULE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_ULE_S(uint64 instruction) +static char *CMP_ULE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.ULE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.ULE.S %s, %s, %s", fd, fs, ft); } @@ -4782,17 +4479,17 @@ std::string NMD::CMP_ULE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_ULT_D(uint64 instruction) +static char *CMP_ULT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.ULT.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.ULT.D %s, %s, %s", fd, fs, ft); } @@ -4806,17 +4503,17 @@ std::string NMD::CMP_ULT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_ULT_S(uint64 instruction) +static char *CMP_ULT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.ULT.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.ULT.S %s, %s, %s", fd, fs, ft); } @@ -4830,17 +4527,17 @@ std::string NMD::CMP_ULT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UN_D(uint64 instruction) +static char *CMP_UN_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UN.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.UN.D %s, %s, %s", fd, fs, ft); } @@ -4854,17 +4551,17 @@ std::string NMD::CMP_UN_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UNE_D(uint64 instruction) +static char *CMP_UNE_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UNE.D %s, %s, %s", fd, fs, ft); + return img_format("CMP.UNE.D %s, %s, %s", fd, fs, ft); } @@ -4878,17 +4575,17 @@ std::string NMD::CMP_UNE_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UNE_S(uint64 instruction) +static char *CMP_UNE_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UNE.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.UNE.S %s, %s, %s", fd, fs, ft); } @@ -4902,17 +4599,17 @@ std::string NMD::CMP_UNE_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMP_UN_S(uint64 instruction) +static char *CMP_UN_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("CMP.UN.S %s, %s, %s", fd, fs, ft); + return img_format("CMP.UN.S %s, %s, %s", fd, fs, ft); } @@ -4927,17 +4624,17 @@ std::string NMD::CMP_UN_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGDU_EQ_QB(uint64 instruction) +static char *CMPGDU_EQ_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGDU.EQ.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGDU.EQ.QB %s, %s, %s", rd, rs, rt); } @@ -4952,17 +4649,17 @@ std::string NMD::CMPGDU_EQ_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGDU_LE_QB(uint64 instruction) +static char *CMPGDU_LE_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGDU.LE.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGDU.LE.QB %s, %s, %s", rd, rs, rt); } @@ -4977,17 +4674,17 @@ std::string NMD::CMPGDU_LE_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGDU_LT_QB(uint64 instruction) +static char *CMPGDU_LT_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGDU.LT.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGDU.LT.QB %s, %s, %s", rd, rs, rt); } @@ -5002,17 +4699,17 @@ std::string NMD::CMPGDU_LT_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGU_EQ_QB(uint64 instruction) +static char *CMPGU_EQ_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGU.EQ.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGU.EQ.QB %s, %s, %s", rd, rs, rt); } @@ -5027,17 +4724,17 @@ std::string NMD::CMPGU_EQ_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGU_LE_QB(uint64 instruction) +static char *CMPGU_LE_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGU.LE.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGU.LE.QB %s, %s, %s", rd, rs, rt); } @@ -5052,17 +4749,17 @@ std::string NMD::CMPGU_LE_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CMPGU_LT_QB(uint64 instruction) +static char *CMPGU_LT_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPGU.LT.QB %s, %s, %s", rd, rs, rt); + return img_format("CMPGU.LT.QB %s, %s, %s", rd, rs, rt); } @@ -5076,15 +4773,15 @@ std::string NMD::CMPGU_LT_QB(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMPU_EQ_QB(uint64 instruction) +static char *CMPU_EQ_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPU.EQ.QB %s, %s", rs, rt); + return img_format("CMPU.EQ.QB %s, %s", rs, rt); } @@ -5098,15 +4795,15 @@ std::string NMD::CMPU_EQ_QB(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMPU_LE_QB(uint64 instruction) +static char *CMPU_LE_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPU.LE.QB %s, %s", rs, rt); + return img_format("CMPU.LE.QB %s, %s", rs, rt); } @@ -5120,15 +4817,15 @@ std::string NMD::CMPU_LE_QB(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::CMPU_LT_QB(uint64 instruction) +static char *CMPU_LT_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("CMPU.LT.QB %s, %s", rs, rt); + return img_format("CMPU.LT.QB %s, %s", rs, rt); } @@ -5142,13 +4839,12 @@ std::string NMD::CMPU_LT_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::COP2_1(uint64 instruction) +static char *COP2_1(uint64 instruction, Dis_info *info) { uint64 cofun_value = extract_cofun_25_24_23(instruction); - std::string cofun = IMMEDIATE(copy(cofun_value)); - return img::format("COP2_1 %s", cofun); + return img_format("COP2_1 0x%" PRIx64, cofun_value); } @@ -5162,15 +4858,14 @@ std::string NMD::COP2_1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CTC1(uint64 instruction) +static char *CTC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("CTC1 %s, %s", rt, cs); + return img_format("CTC1 %s, CP%" PRIu64, rt, cs_value); } @@ -5184,15 +4879,14 @@ std::string NMD::CTC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CTC2(uint64 instruction) +static char *CTC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("CTC2 %s, %s", rt, cs); + return img_format("CTC2 %s, CP%" PRIu64, rt, cs_value); } @@ -5206,15 +4900,15 @@ std::string NMD::CTC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_D_L(uint64 instruction) +static char *CVT_D_L(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.D.L %s, %s", ft, fs); + return img_format("CVT.D.L %s, %s", ft, fs); } @@ -5228,15 +4922,15 @@ std::string NMD::CVT_D_L(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_D_S(uint64 instruction) +static char *CVT_D_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.D.S %s, %s", ft, fs); + return img_format("CVT.D.S %s, %s", ft, fs); } @@ -5250,15 +4944,15 @@ std::string NMD::CVT_D_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_D_W(uint64 instruction) +static char *CVT_D_W(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.D.W %s, %s", ft, fs); + return img_format("CVT.D.W %s, %s", ft, fs); } @@ -5272,15 +4966,15 @@ std::string NMD::CVT_D_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_L_D(uint64 instruction) +static char *CVT_L_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.L.D %s, %s", ft, fs); + return img_format("CVT.L.D %s, %s", ft, fs); } @@ -5294,15 +4988,15 @@ std::string NMD::CVT_L_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_L_S(uint64 instruction) +static char *CVT_L_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.L.S %s, %s", ft, fs); + return img_format("CVT.L.S %s, %s", ft, fs); } @@ -5316,15 +5010,15 @@ std::string NMD::CVT_L_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_S_D(uint64 instruction) +static char *CVT_S_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.S.D %s, %s", ft, fs); + return img_format("CVT.S.D %s, %s", ft, fs); } @@ -5338,15 +5032,15 @@ std::string NMD::CVT_S_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_S_L(uint64 instruction) +static char *CVT_S_L(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.S.L %s, %s", ft, fs); + return img_format("CVT.S.L %s, %s", ft, fs); } @@ -5360,15 +5054,15 @@ std::string NMD::CVT_S_L(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_S_PL(uint64 instruction) +static char *CVT_S_PL(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.S.PL %s, %s", ft, fs); + return img_format("CVT.S.PL %s, %s", ft, fs); } @@ -5382,15 +5076,15 @@ std::string NMD::CVT_S_PL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_S_PU(uint64 instruction) +static char *CVT_S_PU(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.S.PU %s, %s", ft, fs); + return img_format("CVT.S.PU %s, %s", ft, fs); } @@ -5404,15 +5098,15 @@ std::string NMD::CVT_S_PU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_S_W(uint64 instruction) +static char *CVT_S_W(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.S.W %s, %s", ft, fs); + return img_format("CVT.S.W %s, %s", ft, fs); } @@ -5426,15 +5120,15 @@ std::string NMD::CVT_S_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_W_D(uint64 instruction) +static char *CVT_W_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.W.D %s, %s", ft, fs); + return img_format("CVT.W.D %s, %s", ft, fs); } @@ -5448,15 +5142,15 @@ std::string NMD::CVT_W_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::CVT_W_S(uint64 instruction) +static char *CVT_W_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("CVT.W.S %s, %s", ft, fs); + return img_format("CVT.W.S %s, %s", ft, fs); } @@ -5470,15 +5164,14 @@ std::string NMD::CVT_W_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DADDIU_48_(uint64 instruction) +static char *DADDIU_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("DADDIU %s, %s", rt, s); + return img_format("DADDIU %s, %" PRId64, rt, s_value); } @@ -5492,17 +5185,17 @@ std::string NMD::DADDIU_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DADDIU_NEG_(uint64 instruction) +static char *DADDIU_NEG_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(neg_copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + int64 u = neg_copy(u_value); - return img::format("DADDIU %s, %s, %s", rt, rs, u); + return img_format("DADDIU %s, %s, %" PRId64, rt, rs, u); } @@ -5516,17 +5209,16 @@ std::string NMD::DADDIU_NEG_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DADDIU_U12_(uint64 instruction) +static char *DADDIU_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DADDIU %s, %s, %s", rt, rs, u); + return img_format("DADDIU %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -5540,17 +5232,17 @@ std::string NMD::DADDIU_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DADD(uint64 instruction) +static char *DADD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DADD %s, %s, %s", rd, rs, rt); + return img_format("DADD %s, %s, %s", rd, rs, rt); } @@ -5564,17 +5256,17 @@ std::string NMD::DADD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DADDU(uint64 instruction) +static char *DADDU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DADDU %s, %s, %s", rd, rs, rt); + return img_format("DADDU %s, %s, %s", rd, rs, rt); } @@ -5588,15 +5280,15 @@ std::string NMD::DADDU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DCLO(uint64 instruction) +static char *DCLO(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DCLO %s, %s", rt, rs); + return img_format("DCLO %s, %s", rt, rs); } @@ -5610,15 +5302,15 @@ std::string NMD::DCLO(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DCLZ(uint64 instruction) +static char *DCLZ(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DCLZ %s, %s", rt, rs); + return img_format("DCLZ %s, %s", rt, rs); } @@ -5632,17 +5324,17 @@ std::string NMD::DCLZ(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DDIV(uint64 instruction) +static char *DDIV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DDIV %s, %s, %s", rd, rs, rt); + return img_format("DDIV %s, %s, %s", rd, rs, rt); } @@ -5656,17 +5348,17 @@ std::string NMD::DDIV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DDIVU(uint64 instruction) +static char *DDIVU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DDIVU %s, %s, %s", rd, rs, rt); + return img_format("DDIVU %s, %s, %s", rd, rs, rt); } @@ -5680,11 +5372,11 @@ std::string NMD::DDIVU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DERET(uint64 instruction) +static char *DERET(uint64 instruction, Dis_info *info) { (void)instruction; - return "DERET "; + return g_strdup("DERET "); } @@ -5698,19 +5390,19 @@ std::string NMD::DERET(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DEXTM(uint64 instruction) +static char *DEXTM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string lsb = IMMEDIATE(copy(lsb_value)); - std::string msbd = IMMEDIATE(encode_msbd_from_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 msbd = encode_msbd_from_size(msbd_value); - return img::format("DEXTM %s, %s, %s, %s", rt, rs, lsb, msbd); + return img_format("DEXTM %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd); } @@ -5724,19 +5416,19 @@ std::string NMD::DEXTM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DEXT(uint64 instruction) +static char *DEXT(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string lsb = IMMEDIATE(copy(lsb_value)); - std::string msbd = IMMEDIATE(encode_msbd_from_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 msbd = encode_msbd_from_size(msbd_value); - return img::format("DEXT %s, %s, %s, %s", rt, rs, lsb, msbd); + return img_format("DEXT %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd); } @@ -5750,19 +5442,19 @@ std::string NMD::DEXT(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DEXTU(uint64 instruction) +static char *DEXTU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string lsb = IMMEDIATE(copy(lsb_value)); - std::string msbd = IMMEDIATE(encode_msbd_from_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 msbd = encode_msbd_from_size(msbd_value); - return img::format("DEXTU %s, %s, %s, %s", rt, rs, lsb, msbd); + return img_format("DEXTU %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd); } @@ -5776,20 +5468,19 @@ std::string NMD::DEXTU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DINSM(uint64 instruction) +static char *DINSM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string pos = IMMEDIATE(encode_lsb_from_pos_and_size(lsb_value)); - std::string size = IMMEDIATE(encode_lsb_from_pos_and_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); /* !!!!!!!!!! - no conversion function */ - return img::format("DINSM %s, %s, %s, %s", rt, rs, pos, size); + return img_format("DINSM %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd_value); /* hand edited */ } @@ -5804,20 +5495,19 @@ std::string NMD::DINSM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DINS(uint64 instruction) +static char *DINS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string pos = IMMEDIATE(encode_lsb_from_pos_and_size(lsb_value)); - std::string size = IMMEDIATE(encode_lsb_from_pos_and_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); /* !!!!!!!!!! - no conversion function */ - return img::format("DINS %s, %s, %s, %s", rt, rs, pos, size); + return img_format("DINS %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd_value); /* hand edited */ } @@ -5832,20 +5522,19 @@ std::string NMD::DINS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DINSU(uint64 instruction) +static char *DINSU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string pos = IMMEDIATE(encode_lsb_from_pos_and_size(lsb_value)); - std::string size = IMMEDIATE(encode_lsb_from_pos_and_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); /* !!!!!!!!!! - no conversion function */ - return img::format("DINSU %s, %s, %s, %s", rt, rs, pos, size); + return img_format("DINSU %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd_value); /* hand edited */ } @@ -5860,13 +5549,13 @@ std::string NMD::DINSU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DI(uint64 instruction) +static char *DI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("DI %s", rt); + return img_format("DI %s", rt); } @@ -5880,17 +5569,17 @@ std::string NMD::DI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DIV(uint64 instruction) +static char *DIV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DIV %s, %s, %s", rd, rs, rt); + return img_format("DIV %s, %s, %s", rd, rs, rt); } @@ -5904,17 +5593,17 @@ std::string NMD::DIV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DIV_D(uint64 instruction) +static char *DIV_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("DIV.D %s, %s, %s", fd, fs, ft); + return img_format("DIV.D %s, %s, %s", fd, fs, ft); } @@ -5928,17 +5617,17 @@ std::string NMD::DIV_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DIV_S(uint64 instruction) +static char *DIV_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("DIV.S %s, %s, %s", fd, fs, ft); + return img_format("DIV.S %s, %s, %s", fd, fs, ft); } @@ -5952,17 +5641,17 @@ std::string NMD::DIV_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DIVU(uint64 instruction) +static char *DIVU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DIVU %s, %s, %s", rd, rs, rt); + return img_format("DIVU %s, %s, %s", rd, rs, rt); } @@ -5976,19 +5665,18 @@ std::string NMD::DIVU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DLSA(uint64 instruction) +static char *DLSA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); uint64 u2_value = extract_u2_10_9(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string u2 = IMMEDIATE(copy(u2_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DLSA %s, %s, %s, %s", rd, rs, rt, u2); + return img_format("DLSA %s, %s, %s, 0x%" PRIx64, rd, rs, rt, u2_value); } @@ -6002,15 +5690,14 @@ std::string NMD::DLSA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DLUI_48_(uint64 instruction) +static char *DLUI_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); uint64 u_value = extract_u_31_to_0__s32(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("DLUI %s, %s", rt, u); + return img_format("DLUI %s, 0x%" PRIx64, rt, u_value); } @@ -6024,17 +5711,16 @@ std::string NMD::DLUI_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMFC0(uint64 instruction) +static char *DMFC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMFC0 %s, %s, %s", rt, c0s, sel); + return img_format("DMFC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -6048,15 +5734,15 @@ std::string NMD::DMFC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMFC1(uint64 instruction) +static char *DMFC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("DMFC1 %s, %s", rt, fs); + return img_format("DMFC1 %s, %s", rt, fs); } @@ -6070,15 +5756,14 @@ std::string NMD::DMFC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMFC2(uint64 instruction) +static char *DMFC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMFC2 %s, %s", rt, cs); + return img_format("DMFC2 %s, CP%" PRIu64, rt, cs_value); } @@ -6092,17 +5777,16 @@ std::string NMD::DMFC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMFGC0(uint64 instruction) +static char *DMFGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMFGC0 %s, %s, %s", rt, c0s, sel); + return img_format("DMFGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -6116,17 +5800,17 @@ std::string NMD::DMFGC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMOD(uint64 instruction) +static char *DMOD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMOD %s, %s, %s", rd, rs, rt); + return img_format("DMOD %s, %s, %s", rd, rs, rt); } @@ -6140,17 +5824,17 @@ std::string NMD::DMOD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMODU(uint64 instruction) +static char *DMODU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMODU %s, %s, %s", rd, rs, rt); + return img_format("DMODU %s, %s, %s", rd, rs, rt); } @@ -6164,17 +5848,16 @@ std::string NMD::DMODU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMTC0(uint64 instruction) +static char *DMTC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMTC0 %s, %s, %s", rt, c0s, sel); + return img_format("DMTC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -6188,15 +5871,15 @@ std::string NMD::DMTC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMTC1(uint64 instruction) +static char *DMTC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("DMTC1 %s, %s", rt, fs); + return img_format("DMTC1 %s, %s", rt, fs); } @@ -6210,15 +5893,14 @@ std::string NMD::DMTC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMTC2(uint64 instruction) +static char *DMTC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMTC2 %s, %s", rt, cs); + return img_format("DMTC2 %s, CP%" PRIu64, rt, cs_value); } @@ -6232,17 +5914,16 @@ std::string NMD::DMTC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMTGC0(uint64 instruction) +static char *DMTGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMTGC0 %s, %s, %s", rt, c0s, sel); + return img_format("DMTGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -6256,13 +5937,13 @@ std::string NMD::DMTGC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMT(uint64 instruction) +static char *DMT(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("DMT %s", rt); + return img_format("DMT %s", rt); } @@ -6276,17 +5957,17 @@ std::string NMD::DMT(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMUH(uint64 instruction) +static char *DMUH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMUH %s, %s, %s", rd, rs, rt); + return img_format("DMUH %s, %s, %s", rd, rs, rt); } @@ -6300,17 +5981,17 @@ std::string NMD::DMUH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMUHU(uint64 instruction) +static char *DMUHU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMUHU %s, %s, %s", rd, rs, rt); + return img_format("DMUHU %s, %s, %s", rd, rs, rt); } @@ -6324,17 +6005,17 @@ std::string NMD::DMUHU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMUL(uint64 instruction) +static char *DMUL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMUL %s, %s, %s", rd, rs, rt); + return img_format("DMUL %s, %s, %s", rd, rs, rt); } @@ -6348,17 +6029,17 @@ std::string NMD::DMUL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DMULU(uint64 instruction) +static char *DMULU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DMULU %s, %s, %s", rd, rs, rt); + return img_format("DMULU %s, %s, %s", rd, rs, rt); } @@ -6373,17 +6054,17 @@ std::string NMD::DMULU(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::DPA_W_PH(uint64 instruction) +static char *DPA_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPA.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPA.W.PH %s, %s, %s", ac, rs, rt); } @@ -6397,17 +6078,17 @@ std::string NMD::DPA_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAQ_SA_L_W(uint64 instruction) +static char *DPAQ_SA_L_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAQ_SA.L.W %s, %s, %s", ac, rs, rt); + return img_format("DPAQ_SA.L.W %s, %s, %s", ac, rs, rt); } @@ -6421,17 +6102,17 @@ std::string NMD::DPAQ_SA_L_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAQ_S_W_PH(uint64 instruction) +static char *DPAQ_S_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAQ_S.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPAQ_S.W.PH %s, %s, %s", ac, rs, rt); } @@ -6445,17 +6126,17 @@ std::string NMD::DPAQ_S_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAQX_SA_W_PH(uint64 instruction) +static char *DPAQX_SA_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAQX_SA.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPAQX_SA.W.PH %s, %s, %s", ac, rs, rt); } @@ -6469,17 +6150,17 @@ std::string NMD::DPAQX_SA_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAQX_S_W_PH(uint64 instruction) +static char *DPAQX_S_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAQX_S.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPAQX_S.W.PH %s, %s, %s", ac, rs, rt); } @@ -6493,17 +6174,17 @@ std::string NMD::DPAQX_S_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAU_H_QBL(uint64 instruction) +static char *DPAU_H_QBL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAU.H.QBL %s, %s, %s", ac, rs, rt); + return img_format("DPAU.H.QBL %s, %s, %s", ac, rs, rt); } @@ -6517,17 +6198,17 @@ std::string NMD::DPAU_H_QBL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAU_H_QBR(uint64 instruction) +static char *DPAU_H_QBR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAU.H.QBR %s, %s, %s", ac, rs, rt); + return img_format("DPAU.H.QBR %s, %s, %s", ac, rs, rt); } @@ -6541,17 +6222,17 @@ std::string NMD::DPAU_H_QBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPAX_W_PH(uint64 instruction) +static char *DPAX_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPAX.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPAX.W.PH %s, %s, %s", ac, rs, rt); } @@ -6565,17 +6246,17 @@ std::string NMD::DPAX_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPS_W_PH(uint64 instruction) +static char *DPS_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPS.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPS.W.PH %s, %s, %s", ac, rs, rt); } @@ -6589,17 +6270,17 @@ std::string NMD::DPS_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSQ_SA_L_W(uint64 instruction) +static char *DPSQ_SA_L_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSQ_SA.L.W %s, %s, %s", ac, rs, rt); + return img_format("DPSQ_SA.L.W %s, %s, %s", ac, rs, rt); } @@ -6613,17 +6294,17 @@ std::string NMD::DPSQ_SA_L_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSQ_S_W_PH(uint64 instruction) +static char *DPSQ_S_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSQ_S.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPSQ_S.W.PH %s, %s, %s", ac, rs, rt); } @@ -6637,17 +6318,17 @@ std::string NMD::DPSQ_S_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSQX_SA_W_PH(uint64 instruction) +static char *DPSQX_SA_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSQX_SA.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPSQX_SA.W.PH %s, %s, %s", ac, rs, rt); } @@ -6661,17 +6342,17 @@ std::string NMD::DPSQX_SA_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSQX_S_W_PH(uint64 instruction) +static char *DPSQX_S_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSQX_S.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPSQX_S.W.PH %s, %s, %s", ac, rs, rt); } @@ -6685,17 +6366,17 @@ std::string NMD::DPSQX_S_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSU_H_QBL(uint64 instruction) +static char *DPSU_H_QBL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSU.H.QBL %s, %s, %s", ac, rs, rt); + return img_format("DPSU.H.QBL %s, %s, %s", ac, rs, rt); } @@ -6709,17 +6390,17 @@ std::string NMD::DPSU_H_QBL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSU_H_QBR(uint64 instruction) +static char *DPSU_H_QBR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSU.H.QBR %s, %s, %s", ac, rs, rt); + return img_format("DPSU.H.QBR %s, %s, %s", ac, rs, rt); } @@ -6733,17 +6414,17 @@ std::string NMD::DPSU_H_QBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DPSX_W_PH(uint64 instruction) +static char *DPSX_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DPSX.W.PH %s, %s, %s", ac, rs, rt); + return img_format("DPSX.W.PH %s, %s, %s", ac, rs, rt); } @@ -6757,17 +6438,16 @@ std::string NMD::DPSX_W_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DROTR(uint64 instruction) +static char *DROTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DROTR %s, %s, %s", rt, rs, shift); + return img_format("DROTR %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6781,17 +6461,16 @@ std::string NMD::DROTR(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DROTR32(uint64 instruction) +static char *DROTR32(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DROTR32 %s, %s, %s", rt, rs, shift); + return img_format("DROTR32 %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6805,17 +6484,17 @@ std::string NMD::DROTR32(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DROTRV(uint64 instruction) +static char *DROTRV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DROTRV %s, %s, %s", rd, rs, rt); + return img_format("DROTRV %s, %s, %s", rd, rs, rt); } @@ -6829,19 +6508,18 @@ std::string NMD::DROTRV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DROTX(uint64 instruction) +static char *DROTX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shiftx_value = extract_shiftx_11_10_9_8_7_6(instruction); uint64 shift_value = extract_shift_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); - std::string shiftx = IMMEDIATE(copy(shiftx_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DROTX %s, %s, %s, %s", rt, rs, shift, shiftx); + return img_format("DROTX %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, shift_value, shiftx_value); } @@ -6855,17 +6533,16 @@ std::string NMD::DROTX(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSLL(uint64 instruction) +static char *DSLL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSLL %s, %s, %s", rt, rs, shift); + return img_format("DSLL %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6879,17 +6556,16 @@ std::string NMD::DSLL(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSLL32(uint64 instruction) +static char *DSLL32(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSLL32 %s, %s, %s", rt, rs, shift); + return img_format("DSLL32 %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6903,17 +6579,17 @@ std::string NMD::DSLL32(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DSLLV(uint64 instruction) +static char *DSLLV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DSLLV %s, %s, %s", rd, rs, rt); + return img_format("DSLLV %s, %s, %s", rd, rs, rt); } @@ -6927,17 +6603,16 @@ std::string NMD::DSLLV(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSRA(uint64 instruction) +static char *DSRA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSRA %s, %s, %s", rt, rs, shift); + return img_format("DSRA %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6951,17 +6626,16 @@ std::string NMD::DSRA(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSRA32(uint64 instruction) +static char *DSRA32(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSRA32 %s, %s, %s", rt, rs, shift); + return img_format("DSRA32 %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -6975,17 +6649,17 @@ std::string NMD::DSRA32(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DSRAV(uint64 instruction) +static char *DSRAV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DSRAV %s, %s, %s", rd, rs, rt); + return img_format("DSRAV %s, %s, %s", rd, rs, rt); } @@ -6999,17 +6673,16 @@ std::string NMD::DSRAV(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSRL(uint64 instruction) +static char *DSRL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSRL %s, %s, %s", rt, rs, shift); + return img_format("DSRL %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -7023,17 +6696,16 @@ std::string NMD::DSRL(uint64 instruction) * rs ----- * shift ----- */ -std::string NMD::DSRL32(uint64 instruction) +static char *DSRL32(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("DSRL32 %s, %s, %s", rt, rs, shift); + return img_format("DSRL32 %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -7047,17 +6719,17 @@ std::string NMD::DSRL32(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DSRLV(uint64 instruction) +static char *DSRLV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DSRLV %s, %s, %s", rd, rs, rt); + return img_format("DSRLV %s, %s, %s", rd, rs, rt); } @@ -7071,17 +6743,17 @@ std::string NMD::DSRLV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DSUB(uint64 instruction) +static char *DSUB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DSUB %s, %s, %s", rd, rs, rt); + return img_format("DSUB %s, %s, %s", rd, rs, rt); } @@ -7095,17 +6767,17 @@ std::string NMD::DSUB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DSUBU(uint64 instruction) +static char *DSUBU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("DSUBU %s, %s, %s", rd, rs, rt); + return img_format("DSUBU %s, %s, %s", rd, rs, rt); } @@ -7119,13 +6791,13 @@ std::string NMD::DSUBU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DVPE(uint64 instruction) +static char *DVPE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("DVPE %s", rt); + return img_format("DVPE %s", rt); } @@ -7139,13 +6811,13 @@ std::string NMD::DVPE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::DVP(uint64 instruction) +static char *DVP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("DVP %s", rt); + return img_format("DVP %s", rt); } @@ -7159,11 +6831,11 @@ std::string NMD::DVP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EHB(uint64 instruction) +static char *EHB(uint64 instruction, Dis_info *info) { (void)instruction; - return "EHB "; + return g_strdup("EHB "); } @@ -7177,13 +6849,13 @@ std::string NMD::EHB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EI(uint64 instruction) +static char *EI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("EI %s", rt); + return img_format("EI %s", rt); } @@ -7197,13 +6869,13 @@ std::string NMD::EI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EMT(uint64 instruction) +static char *EMT(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("EMT %s", rt); + return img_format("EMT %s", rt); } @@ -7217,11 +6889,11 @@ std::string NMD::EMT(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ERET(uint64 instruction) +static char *ERET(uint64 instruction, Dis_info *info) { (void)instruction; - return "ERET "; + return g_strdup("ERET "); } @@ -7235,11 +6907,11 @@ std::string NMD::ERET(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ERETNC(uint64 instruction) +static char *ERETNC(uint64 instruction, Dis_info *info) { (void)instruction; - return "ERETNC "; + return g_strdup("ERETNC "); } @@ -7253,13 +6925,13 @@ std::string NMD::ERETNC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EVP(uint64 instruction) +static char *EVP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("EVP %s", rt); + return img_format("EVP %s", rt); } @@ -7273,13 +6945,13 @@ std::string NMD::EVP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EVPE(uint64 instruction) +static char *EVPE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("EVPE %s", rt); + return img_format("EVPE %s", rt); } @@ -7293,19 +6965,19 @@ std::string NMD::EVPE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXT(uint64 instruction) +static char *EXT(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string lsb = IMMEDIATE(copy(lsb_value)); - std::string msbd = IMMEDIATE(encode_msbd_from_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 msbd = encode_msbd_from_size(msbd_value); - return img::format("EXT %s, %s, %s, %s", rt, rs, lsb, msbd); + return img_format("EXT %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd); } @@ -7319,19 +6991,18 @@ std::string NMD::EXT(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTD(uint64 instruction) +static char *EXTD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); uint64 shift_value = extract_shift_10_9_8_7_6(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("EXTD %s, %s, %s, %s", rd, rs, rt, shift); + return img_format("EXTD %s, %s, %s, 0x%" PRIx64, rd, rs, rt, shift_value); } @@ -7345,19 +7016,18 @@ std::string NMD::EXTD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTD32(uint64 instruction) +static char *EXTD32(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); uint64 shift_value = extract_shift_10_9_8_7_6(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("EXTD32 %s, %s, %s, %s", rd, rs, rt, shift); + return img_format("EXTD32 %s, %s, %s, 0x%" PRIx64, rd, rs, rt, shift_value); } @@ -7371,17 +7041,16 @@ std::string NMD::EXTD32(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTPDP(uint64 instruction) +static char *EXTPDP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 size_value = extract_size_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string size = IMMEDIATE(copy(size_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTPDP %s, %s, %s", rt, ac, size); + return img_format("EXTPDP %s, %s, 0x%" PRIx64, rt, ac, size_value); } @@ -7395,17 +7064,17 @@ std::string NMD::EXTPDP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTPDPV(uint64 instruction) +static char *EXTPDPV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTPDPV %s, %s, %s", rt, ac, rs); + return img_format("EXTPDPV %s, %s, %s", rt, ac, rs); } @@ -7419,17 +7088,16 @@ std::string NMD::EXTPDPV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTP(uint64 instruction) +static char *EXTP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 size_value = extract_size_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string size = IMMEDIATE(copy(size_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTP %s, %s, %s", rt, ac, size); + return img_format("EXTP %s, %s, 0x%" PRIx64, rt, ac, size_value); } @@ -7443,17 +7111,17 @@ std::string NMD::EXTP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::EXTPV(uint64 instruction) +static char *EXTPV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTPV %s, %s, %s", rt, ac, rs); + return img_format("EXTPV %s, %s, %s", rt, ac, rs); } @@ -7468,17 +7136,16 @@ std::string NMD::EXTPV(uint64 instruction) * shift ----- * ac -- */ -std::string NMD::EXTR_RS_W(uint64 instruction) +static char *EXTR_RS_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 shift_value = extract_shift_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTR_RS.W %s, %s, %s", rt, ac, shift); + return img_format("EXTR_RS.W %s, %s, 0x%" PRIx64, rt, ac, shift_value); } @@ -7493,17 +7160,16 @@ std::string NMD::EXTR_RS_W(uint64 instruction) * shift ----- * ac -- */ -std::string NMD::EXTR_R_W(uint64 instruction) +static char *EXTR_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 shift_value = extract_shift_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTR_R.W %s, %s, %s", rt, ac, shift); + return img_format("EXTR_R.W %s, %s, 0x%" PRIx64, rt, ac, shift_value); } @@ -7518,17 +7184,16 @@ std::string NMD::EXTR_R_W(uint64 instruction) * shift ----- * ac -- */ -std::string NMD::EXTR_S_H(uint64 instruction) +static char *EXTR_S_H(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 shift_value = extract_shift_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTR_S.H %s, %s, %s", rt, ac, shift); + return img_format("EXTR_S.H %s, %s, 0x%" PRIx64, rt, ac, shift_value); } @@ -7543,17 +7208,16 @@ std::string NMD::EXTR_S_H(uint64 instruction) * shift ----- * ac -- */ -std::string NMD::EXTR_W(uint64 instruction) +static char *EXTR_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 shift_value = extract_shift_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("EXTR.W %s, %s, %s", rt, ac, shift); + return img_format("EXTR.W %s, %s, 0x%" PRIx64, rt, ac, shift_value); } @@ -7568,17 +7232,17 @@ std::string NMD::EXTR_W(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::EXTRV_RS_W(uint64 instruction) +static char *EXTRV_RS_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTRV_RS.W %s, %s, %s", rt, ac, rs); + return img_format("EXTRV_RS.W %s, %s, %s", rt, ac, rs); } @@ -7593,17 +7257,17 @@ std::string NMD::EXTRV_RS_W(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::EXTRV_R_W(uint64 instruction) +static char *EXTRV_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTRV_R.W %s, %s, %s", rt, ac, rs); + return img_format("EXTRV_R.W %s, %s, %s", rt, ac, rs); } @@ -7618,17 +7282,17 @@ std::string NMD::EXTRV_R_W(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::EXTRV_S_H(uint64 instruction) +static char *EXTRV_S_H(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTRV_S.H %s, %s, %s", rt, ac, rs); + return img_format("EXTRV_S.H %s, %s, %s", rt, ac, rs); } @@ -7643,17 +7307,17 @@ std::string NMD::EXTRV_S_H(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::EXTRV_W(uint64 instruction) +static char *EXTRV_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); - return img::format("EXTRV.W %s, %s, %s", rt, ac, rs); + return img_format("EXTRV.W %s, %s, %s", rt, ac, rs); } @@ -7668,19 +7332,18 @@ std::string NMD::EXTRV_W(uint64 instruction) * rd ----- * shift ----- */ -std::string NMD::EXTW(uint64 instruction) +static char *EXTW(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); uint64 shift_value = extract_shift_10_9_8_7_6(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("EXTW %s, %s, %s, %s", rd, rs, rt, shift); + return img_format("EXTW %s, %s, %s, 0x%" PRIx64, rd, rs, rt, shift_value); } @@ -7694,15 +7357,15 @@ std::string NMD::EXTW(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::FLOOR_L_D(uint64 instruction) +static char *FLOOR_L_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("FLOOR.L.D %s, %s", ft, fs); + return img_format("FLOOR.L.D %s, %s", ft, fs); } @@ -7716,15 +7379,15 @@ std::string NMD::FLOOR_L_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::FLOOR_L_S(uint64 instruction) +static char *FLOOR_L_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("FLOOR.L.S %s, %s", ft, fs); + return img_format("FLOOR.L.S %s, %s", ft, fs); } @@ -7738,15 +7401,15 @@ std::string NMD::FLOOR_L_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::FLOOR_W_D(uint64 instruction) +static char *FLOOR_W_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("FLOOR.W.D %s, %s", ft, fs); + return img_format("FLOOR.W.D %s, %s", ft, fs); } @@ -7760,15 +7423,15 @@ std::string NMD::FLOOR_W_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::FLOOR_W_S(uint64 instruction) +static char *FLOOR_W_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("FLOOR.W.S %s, %s", ft, fs); + return img_format("FLOOR.W.S %s, %s", ft, fs); } @@ -7782,17 +7445,17 @@ std::string NMD::FLOOR_W_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::FORK(uint64 instruction) +static char *FORK(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("FORK %s, %s, %s", rd, rs, rt); + return img_format("FORK %s, %s, %s", rd, rs, rt); } @@ -7806,13 +7469,12 @@ std::string NMD::FORK(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::HYPCALL(uint64 instruction) +static char *HYPCALL(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_17_to_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("HYPCALL %s", code); + return img_format("HYPCALL 0x%" PRIx64, code_value); } @@ -7826,13 +7488,12 @@ std::string NMD::HYPCALL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::HYPCALL_16_(uint64 instruction) +static char *HYPCALL_16_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_1_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("HYPCALL %s", code); + return img_format("HYPCALL 0x%" PRIx64, code_value); } @@ -7846,20 +7507,19 @@ std::string NMD::HYPCALL_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::INS(uint64 instruction) +static char *INS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 msbd_value = extract_msbt_10_9_8_7_6(instruction); uint64 lsb_value = extract_lsb_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string pos = IMMEDIATE(encode_lsb_from_pos_and_size(lsb_value)); - std::string size = IMMEDIATE(encode_lsb_from_pos_and_size(msbd_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); /* !!!!!!!!!! - no conversion function */ - return img::format("INS %s, %s, %s, %s", rt, rs, pos, size); + return img_format("INS %s, %s, 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, lsb_value, msbd_value); /* hand edited */ } @@ -7873,15 +7533,15 @@ std::string NMD::INS(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::INSV(uint64 instruction) +static char *INSV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("INSV %s, %s", rt, rs); + return img_format("INSV %s, %s", rt, rs); } @@ -7895,11 +7555,11 @@ std::string NMD::INSV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::IRET(uint64 instruction) +static char *IRET(uint64 instruction, Dis_info *info) { (void)instruction; - return "IRET "; + return g_strdup("IRET "); } @@ -7913,13 +7573,13 @@ std::string NMD::IRET(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::JALRC_16_(uint64 instruction) +static char *JALRC_16_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("JALRC $%d, %s", 31, rt); + return img_format("JALRC $%d, %s", 31, rt); } @@ -7933,15 +7593,15 @@ std::string NMD::JALRC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::JALRC_32_(uint64 instruction) +static char *JALRC_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("JALRC %s, %s", rt, rs); + return img_format("JALRC %s, %s", rt, rs); } @@ -7955,15 +7615,15 @@ std::string NMD::JALRC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::JALRC_HB(uint64 instruction) +static char *JALRC_HB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("JALRC.HB %s, %s", rt, rs); + return img_format("JALRC.HB %s, %s", rt, rs); } @@ -7977,13 +7637,13 @@ std::string NMD::JALRC_HB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::JRC(uint64 instruction) +static char *JRC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); - std::string rt = GPR(copy(rt_value)); + const char *rt = GPR(rt_value, info); - return img::format("JRC %s", rt); + return img_format("JRC %s", rt); } @@ -7997,17 +7657,16 @@ std::string NMD::JRC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LB_16_(uint64 instruction) +static char *LB_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("LB %s, %s(%s)", rt3, u, rs3); + return img_format("LB %s, 0x%" PRIx64 "(%s)", rt3, u_value, rs3); } @@ -8021,15 +7680,14 @@ std::string NMD::LB_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LB_GP_(uint64 instruction) +static char *LB_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LB %s, %s($%d)", rt, u, 28); + return img_format("LB %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -8043,17 +7701,16 @@ std::string NMD::LB_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LB_S9_(uint64 instruction) +static char *LB_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LB %s, %s(%s)", rt, s, rs); + return img_format("LB %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8067,17 +7724,16 @@ std::string NMD::LB_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LB_U12_(uint64 instruction) +static char *LB_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LB %s, %s(%s)", rt, u, rs); + return img_format("LB %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -8091,17 +7747,16 @@ std::string NMD::LB_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBE(uint64 instruction) +static char *LBE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LBE %s, %s(%s)", rt, s, rs); + return img_format("LBE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8115,17 +7770,16 @@ std::string NMD::LBE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBU_16_(uint64 instruction) +static char *LBU_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("LBU %s, %s(%s)", rt3, u, rs3); + return img_format("LBU %s, 0x%" PRIx64 "(%s)", rt3, u_value, rs3); } @@ -8139,15 +7793,14 @@ std::string NMD::LBU_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBU_GP_(uint64 instruction) +static char *LBU_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LBU %s, %s($%d)", rt, u, 28); + return img_format("LBU %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -8161,17 +7814,16 @@ std::string NMD::LBU_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBU_S9_(uint64 instruction) +static char *LBU_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LBU %s, %s(%s)", rt, s, rs); + return img_format("LBU %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8185,17 +7837,16 @@ std::string NMD::LBU_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBU_U12_(uint64 instruction) +static char *LBU_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LBU %s, %s(%s)", rt, u, rs); + return img_format("LBU %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -8209,17 +7860,16 @@ std::string NMD::LBU_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBUE(uint64 instruction) +static char *LBUE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LBUE %s, %s(%s)", rt, s, rs); + return img_format("LBUE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8233,17 +7883,17 @@ std::string NMD::LBUE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBUX(uint64 instruction) +static char *LBUX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LBUX %s, %s(%s)", rd, rs, rt); + return img_format("LBUX %s, %s(%s)", rd, rs, rt); } @@ -8257,17 +7907,17 @@ std::string NMD::LBUX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LBX(uint64 instruction) +static char *LBX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LBX %s, %s(%s)", rd, rs, rt); + return img_format("LBX %s, %s(%s)", rd, rs, rt); } @@ -8281,15 +7931,14 @@ std::string NMD::LBX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LD_GP_(uint64 instruction) +static char *LD_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_to_3__s3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LD %s, %s($%d)", rt, u, 28); + return img_format("LD %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -8303,17 +7952,16 @@ std::string NMD::LD_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LD_S9_(uint64 instruction) +static char *LD_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LD %s, %s(%s)", rt, s, rs); + return img_format("LD %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8327,17 +7975,16 @@ std::string NMD::LD_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LD_U12_(uint64 instruction) +static char *LD_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LD %s, %s(%s)", rt, u, rs); + return img_format("LD %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -8351,15 +7998,14 @@ std::string NMD::LD_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC1_GP_(uint64 instruction) +static char *LDC1_GP_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_2__s2(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *ft = FPR(ft_value, info); - return img::format("LDC1 %s, %s($%d)", ft, u, 28); + return img_format("LDC1 %s, 0x%" PRIx64 "($%d)", ft, u_value, 28); } @@ -8373,17 +8019,16 @@ std::string NMD::LDC1_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC1_S9_(uint64 instruction) +static char *LDC1_S9_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LDC1 %s, %s(%s)", ft, s, rs); + return img_format("LDC1 %s, %" PRId64 "(%s)", ft, s_value, rs); } @@ -8397,17 +8042,16 @@ std::string NMD::LDC1_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC1_U12_(uint64 instruction) +static char *LDC1_U12_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LDC1 %s, %s(%s)", ft, u, rs); + return img_format("LDC1 %s, 0x%" PRIx64 "(%s)", ft, u_value, rs); } @@ -8421,17 +8065,17 @@ std::string NMD::LDC1_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC1XS(uint64 instruction) +static char *LDC1XS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LDC1XS %s, %s(%s)", ft, rs, rt); + return img_format("LDC1XS %s, %s(%s)", ft, rs, rt); } @@ -8445,17 +8089,17 @@ std::string NMD::LDC1XS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC1X(uint64 instruction) +static char *LDC1X(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LDC1X %s, %s(%s)", ft, rs, rt); + return img_format("LDC1X %s, %s(%s)", ft, rs, rt); } @@ -8469,17 +8113,16 @@ std::string NMD::LDC1X(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDC2(uint64 instruction) +static char *LDC2(uint64 instruction, Dis_info *info) { uint64 ct_value = extract_ct_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ct = CPR(copy(ct_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("LDC2 %s, %s(%s)", ct, s, rs); + return img_format("LDC2 CP%" PRIu64 ", %" PRId64 "(%s)", + ct_value, s_value, rs); } @@ -8493,19 +8136,19 @@ std::string NMD::LDC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDM(uint64 instruction) +static char *LDM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("LDM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("LDM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -8519,15 +8162,15 @@ std::string NMD::LDM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDPC_48_(uint64 instruction) +static char *LDPC_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 6); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 6, info); - return img::format("LDPC %s, %s", rt, s); + return img_format("LDPC %s, %s", rt, s); } @@ -8541,17 +8184,17 @@ std::string NMD::LDPC_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDX(uint64 instruction) +static char *LDX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LDX %s, %s(%s)", rd, rs, rt); + return img_format("LDX %s, %s(%s)", rd, rs, rt); } @@ -8565,17 +8208,17 @@ std::string NMD::LDX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LDXS(uint64 instruction) +static char *LDXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LDXS %s, %s(%s)", rd, rs, rt); + return img_format("LDXS %s, %s(%s)", rd, rs, rt); } @@ -8589,17 +8232,16 @@ std::string NMD::LDXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LH_16_(uint64 instruction) +static char *LH_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_2_1__s1(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("LH %s, %s(%s)", rt3, u, rs3); + return img_format("LH %s, 0x%" PRIx64 "(%s)", rt3, u_value, rs3); } @@ -8613,15 +8255,14 @@ std::string NMD::LH_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LH_GP_(uint64 instruction) +static char *LH_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_1__s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LH %s, %s($%d)", rt, u, 28); + return img_format("LH %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -8635,17 +8276,16 @@ std::string NMD::LH_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LH_S9_(uint64 instruction) +static char *LH_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LH %s, %s(%s)", rt, s, rs); + return img_format("LH %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8659,17 +8299,16 @@ std::string NMD::LH_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LH_U12_(uint64 instruction) +static char *LH_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LH %s, %s(%s)", rt, u, rs); + return img_format("LH %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -8683,17 +8322,16 @@ std::string NMD::LH_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHE(uint64 instruction) +static char *LHE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LHE %s, %s(%s)", rt, s, rs); + return img_format("LHE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8707,17 +8345,16 @@ std::string NMD::LHE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHU_16_(uint64 instruction) +static char *LHU_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_2_1__s1(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("LHU %s, %s(%s)", rt3, u, rs3); + return img_format("LHU %s, 0x%" PRIx64 "(%s)", rt3, u_value, rs3); } @@ -8731,15 +8368,14 @@ std::string NMD::LHU_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHU_GP_(uint64 instruction) +static char *LHU_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_1__s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LHU %s, %s($%d)", rt, u, 28); + return img_format("LHU %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -8753,17 +8389,16 @@ std::string NMD::LHU_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHU_S9_(uint64 instruction) +static char *LHU_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LHU %s, %s(%s)", rt, s, rs); + return img_format("LHU %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8777,17 +8412,16 @@ std::string NMD::LHU_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHU_U12_(uint64 instruction) +static char *LHU_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LHU %s, %s(%s)", rt, u, rs); + return img_format("LHU %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -8801,17 +8435,16 @@ std::string NMD::LHU_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHUE(uint64 instruction) +static char *LHUE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LHUE %s, %s(%s)", rt, s, rs); + return img_format("LHUE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8825,17 +8458,17 @@ std::string NMD::LHUE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHUX(uint64 instruction) +static char *LHUX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LHUX %s, %s(%s)", rd, rs, rt); + return img_format("LHUX %s, %s(%s)", rd, rs, rt); } @@ -8849,17 +8482,17 @@ std::string NMD::LHUX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHUXS(uint64 instruction) +static char *LHUXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LHUXS %s, %s(%s)", rd, rs, rt); + return img_format("LHUXS %s, %s(%s)", rd, rs, rt); } @@ -8873,17 +8506,17 @@ std::string NMD::LHUXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHXS(uint64 instruction) +static char *LHXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LHXS %s, %s(%s)", rd, rs, rt); + return img_format("LHXS %s, %s(%s)", rd, rs, rt); } @@ -8897,17 +8530,17 @@ std::string NMD::LHXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LHX(uint64 instruction) +static char *LHX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LHX %s, %s(%s)", rd, rs, rt); + return img_format("LHX %s, %s(%s)", rd, rs, rt); } @@ -8921,15 +8554,15 @@ std::string NMD::LHX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LI_16_(uint64 instruction) +static char *LI_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 eu_value = extract_eu_6_5_4_3_2_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string eu = IMMEDIATE(encode_eu_from_s_li16(eu_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + int64 eu = encode_eu_from_s_li16(eu_value); - return img::format("LI %s, %s", rt3, eu); + return img_format("LI %s, %" PRId64, rt3, eu); } @@ -8943,15 +8576,14 @@ std::string NMD::LI_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LI_48_(uint64 instruction) +static char *LI_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("LI %s, %s", rt, s); + return img_format("LI %s, %" PRId64, rt, s_value); } @@ -8965,17 +8597,16 @@ std::string NMD::LI_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LL(uint64 instruction) +static char *LL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LL %s, %s(%s)", rt, s, rs); + return img_format("LL %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -8989,17 +8620,16 @@ std::string NMD::LL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LLD(uint64 instruction) +static char *LLD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_s3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LLD %s, %s(%s)", rt, s, rs); + return img_format("LLD %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -9013,17 +8643,17 @@ std::string NMD::LLD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LLDP(uint64 instruction) +static char *LLDP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LLDP %s, %s, (%s)", rt, ru, rs); + return img_format("LLDP %s, %s, (%s)", rt, ru, rs); } @@ -9037,17 +8667,16 @@ std::string NMD::LLDP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LLE(uint64 instruction) +static char *LLE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LLE %s, %s(%s)", rt, s, rs); + return img_format("LLE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -9061,17 +8690,17 @@ std::string NMD::LLE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LLWP(uint64 instruction) +static char *LLWP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LLWP %s, %s, (%s)", rt, ru, rs); + return img_format("LLWP %s, %s, (%s)", rt, ru, rs); } @@ -9085,17 +8714,17 @@ std::string NMD::LLWP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LLWPE(uint64 instruction) +static char *LLWPE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LLWPE %s, %s, (%s)", rt, ru, rs); + return img_format("LLWPE %s, %s, (%s)", rt, ru, rs); } @@ -9109,19 +8738,18 @@ std::string NMD::LLWPE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LSA(uint64 instruction) +static char *LSA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); uint64 u2_value = extract_u2_10_9(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); - std::string u2 = IMMEDIATE(copy(u2_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LSA %s, %s, %s, %s", rd, rs, rt, u2); + return img_format("LSA %s, %s, %s, 0x%" PRIx64, rd, rs, rt, u2_value); } @@ -9135,15 +8763,14 @@ std::string NMD::LSA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LUI(uint64 instruction) +static char *LUI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); int64 s_value = extract_s__se31_0_11_to_2_20_to_12_s12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("LUI %s, %%hi(%s)", rt, s); + return img_format("LUI %s, %%hi(%" PRId64 ")", rt, s_value); } @@ -9157,17 +8784,16 @@ std::string NMD::LUI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_16_(uint64 instruction) +static char *LW_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_3_2_1_0__s2(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("LW %s, %s(%s)", rt3, u, rs3); + return img_format("LW %s, 0x%" PRIx64 "(%s)", rt3, u_value, rs3); } @@ -9181,17 +8807,16 @@ std::string NMD::LW_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_4X4_(uint64 instruction) +static char *LW_4X4_(uint64 instruction, Dis_info *info) { uint64 rt4_value = extract_rt4_9_7_6_5(instruction); uint64 rs4_value = extract_rs4_4_2_1_0(instruction); uint64 u_value = extract_u_3_8__s2(instruction); - std::string rt4 = GPR(decode_gpr_gpr4(rt4_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs4 = GPR(decode_gpr_gpr4(rs4_value)); + const char *rt4 = GPR(decode_gpr_gpr4(rt4_value, info), info); + const char *rs4 = GPR(decode_gpr_gpr4(rs4_value, info), info); - return img::format("LW %s, %s(%s)", rt4, u, rs4); + return img_format("LW %s, 0x%" PRIx64 "(%s)", rt4, u_value, rs4); } @@ -9205,15 +8830,14 @@ std::string NMD::LW_4X4_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_GP_(uint64 instruction) +static char *LW_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_to_2__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LW %s, %s($%d)", rt, u, 28); + return img_format("LW %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -9227,15 +8851,14 @@ std::string NMD::LW_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_GP16_(uint64 instruction) +static char *LW_GP16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 u_value = extract_u_6_5_4_3_2_1_0__s2(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); - return img::format("LW %s, %s($%d)", rt3, u, 28); + return img_format("LW %s, 0x%" PRIx64 "($%d)", rt3, u_value, 28); } @@ -9249,17 +8872,16 @@ std::string NMD::LW_GP16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_S9_(uint64 instruction) +static char *LW_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LW %s, %s(%s)", rt, s, rs); + return img_format("LW %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -9273,15 +8895,14 @@ std::string NMD::LW_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_SP_(uint64 instruction) +static char *LW_SP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); uint64 u_value = extract_u_4_3_2_1_0__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LW %s, %s($%d)", rt, u, 29); + return img_format("LW %s, 0x%" PRIx64 "($%d)", rt, u_value, 29); } @@ -9295,17 +8916,16 @@ std::string NMD::LW_SP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LW_U12_(uint64 instruction) +static char *LW_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LW %s, %s(%s)", rt, u, rs); + return img_format("LW %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -9319,15 +8939,14 @@ std::string NMD::LW_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC1_GP_(uint64 instruction) +static char *LWC1_GP_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_2__s2(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *ft = FPR(ft_value, info); - return img::format("LWC1 %s, %s($%d)", ft, u, 28); + return img_format("LWC1 %s, 0x%" PRIx64 "($%d)", ft, u_value, 28); } @@ -9341,17 +8960,16 @@ std::string NMD::LWC1_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC1_S9_(uint64 instruction) +static char *LWC1_S9_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LWC1 %s, %s(%s)", ft, s, rs); + return img_format("LWC1 %s, %" PRId64 "(%s)", ft, s_value, rs); } @@ -9365,17 +8983,16 @@ std::string NMD::LWC1_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC1_U12_(uint64 instruction) +static char *LWC1_U12_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LWC1 %s, %s(%s)", ft, u, rs); + return img_format("LWC1 %s, 0x%" PRIx64 "(%s)", ft, u_value, rs); } @@ -9389,17 +9006,17 @@ std::string NMD::LWC1_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC1X(uint64 instruction) +static char *LWC1X(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWC1X %s, %s(%s)", ft, rs, rt); + return img_format("LWC1X %s, %s(%s)", ft, rs, rt); } @@ -9413,17 +9030,17 @@ std::string NMD::LWC1X(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC1XS(uint64 instruction) +static char *LWC1XS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWC1XS %s, %s(%s)", ft, rs, rt); + return img_format("LWC1XS %s, %s(%s)", ft, rs, rt); } @@ -9437,17 +9054,16 @@ std::string NMD::LWC1XS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWC2(uint64 instruction) +static char *LWC2(uint64 instruction, Dis_info *info) { uint64 ct_value = extract_ct_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ct = CPR(copy(ct_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("LWC2 %s, %s(%s)", ct, s, rs); + return img_format("LWC2 CP%" PRIu64 ", %" PRId64 "(%s)", + ct_value, s_value, rs); } @@ -9461,17 +9077,16 @@ std::string NMD::LWC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWE(uint64 instruction) +static char *LWE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LWE %s, %s(%s)", rt, s, rs); + return img_format("LWE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -9485,19 +9100,19 @@ std::string NMD::LWE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWM(uint64 instruction) +static char *LWM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("LWM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("LWM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -9511,15 +9126,15 @@ std::string NMD::LWM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWPC_48_(uint64 instruction) +static char *LWPC_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 6); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 6, info); - return img::format("LWPC %s, %s", rt, s); + return img_format("LWPC %s, %s", rt, s); } @@ -9533,15 +9148,14 @@ std::string NMD::LWPC_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWU_GP_(uint64 instruction) +static char *LWU_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_2__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("LWU %s, %s($%d)", rt, u, 28); + return img_format("LWU %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -9555,17 +9169,16 @@ std::string NMD::LWU_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWU_S9_(uint64 instruction) +static char *LWU_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LWU %s, %s(%s)", rt, s, rs); + return img_format("LWU %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -9579,17 +9192,16 @@ std::string NMD::LWU_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWU_U12_(uint64 instruction) +static char *LWU_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("LWU %s, %s(%s)", rt, u, rs); + return img_format("LWU %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -9603,17 +9215,17 @@ std::string NMD::LWU_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWUX(uint64 instruction) +static char *LWUX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWUX %s, %s(%s)", rd, rs, rt); + return img_format("LWUX %s, %s(%s)", rd, rs, rt); } @@ -9627,17 +9239,17 @@ std::string NMD::LWUX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWUXS(uint64 instruction) +static char *LWUXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWUXS %s, %s(%s)", rd, rs, rt); + return img_format("LWUXS %s, %s(%s)", rd, rs, rt); } @@ -9651,17 +9263,17 @@ std::string NMD::LWUXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWX(uint64 instruction) +static char *LWX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWX %s, %s(%s)", rd, rs, rt); + return img_format("LWX %s, %s(%s)", rd, rs, rt); } @@ -9675,17 +9287,17 @@ std::string NMD::LWX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWXS_16_(uint64 instruction) +static char *LWXS_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 rd3_value = extract_rd3_3_2_1(instruction); - std::string rd3 = GPR(decode_gpr_gpr3(rd3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string rt3 = IMMEDIATE(decode_gpr_gpr3(rt3_value)); + const char *rd3 = GPR(decode_gpr_gpr3(rd3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + uint64 rt3 = decode_gpr_gpr3(rt3_value, info); - return img::format("LWXS %s, %s(%s)", rd3, rs3, rt3); + return img_format("LWXS %s, %s(0x%" PRIx64 ")", rd3, rs3, rt3); } @@ -9699,17 +9311,17 @@ std::string NMD::LWXS_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::LWXS_32_(uint64 instruction) +static char *LWXS_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("LWXS %s, %s(%s)", rd, rs, rt); + return img_format("LWXS %s, %s(%s)", rd, rs, rt); } @@ -9724,17 +9336,17 @@ std::string NMD::LWXS_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MADD_DSP_(uint64 instruction) +static char *MADD_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MADD %s, %s, %s", ac, rs, rt); + return img_format("MADD %s, %s, %s", ac, rs, rt); } @@ -9748,17 +9360,17 @@ std::string NMD::MADD_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MADDF_D(uint64 instruction) +static char *MADDF_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MADDF.D %s, %s, %s", fd, fs, ft); + return img_format("MADDF.D %s, %s, %s", fd, fs, ft); } @@ -9772,17 +9384,17 @@ std::string NMD::MADDF_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MADDF_S(uint64 instruction) +static char *MADDF_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MADDF.S %s, %s, %s", fd, fs, ft); + return img_format("MADDF.S %s, %s, %s", fd, fs, ft); } @@ -9797,17 +9409,17 @@ std::string NMD::MADDF_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MADDU_DSP_(uint64 instruction) +static char *MADDU_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MADDU %s, %s, %s", ac, rs, rt); + return img_format("MADDU %s, %s, %s", ac, rs, rt); } @@ -9822,17 +9434,17 @@ std::string NMD::MADDU_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAQ_S_W_PHL(uint64 instruction) +static char *MAQ_S_W_PHL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MAQ_S.W.PHL %s, %s, %s", ac, rs, rt); + return img_format("MAQ_S.W.PHL %s, %s, %s", ac, rs, rt); } @@ -9847,17 +9459,17 @@ std::string NMD::MAQ_S_W_PHL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAQ_S_W_PHR(uint64 instruction) +static char *MAQ_S_W_PHR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MAQ_S.W.PHR %s, %s, %s", ac, rs, rt); + return img_format("MAQ_S.W.PHR %s, %s, %s", ac, rs, rt); } @@ -9872,17 +9484,17 @@ std::string NMD::MAQ_S_W_PHR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAQ_SA_W_PHL(uint64 instruction) +static char *MAQ_SA_W_PHL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MAQ_SA.W.PHL %s, %s, %s", ac, rs, rt); + return img_format("MAQ_SA.W.PHL %s, %s, %s", ac, rs, rt); } @@ -9897,17 +9509,17 @@ std::string NMD::MAQ_SA_W_PHL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAQ_SA_W_PHR(uint64 instruction) +static char *MAQ_SA_W_PHR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MAQ_SA.W.PHR %s, %s, %s", ac, rs, rt); + return img_format("MAQ_SA.W.PHR %s, %s, %s", ac, rs, rt); } @@ -9921,17 +9533,17 @@ std::string NMD::MAQ_SA_W_PHR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAX_D(uint64 instruction) +static char *MAX_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MAX.D %s, %s, %s", fd, fs, ft); + return img_format("MAX.D %s, %s, %s", fd, fs, ft); } @@ -9945,17 +9557,17 @@ std::string NMD::MAX_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAX_S(uint64 instruction) +static char *MAX_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MAX.S %s, %s, %s", fd, fs, ft); + return img_format("MAX.S %s, %s, %s", fd, fs, ft); } @@ -9969,17 +9581,17 @@ std::string NMD::MAX_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAXA_D(uint64 instruction) +static char *MAXA_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MAXA.D %s, %s, %s", fd, fs, ft); + return img_format("MAXA.D %s, %s, %s", fd, fs, ft); } @@ -9993,17 +9605,17 @@ std::string NMD::MAXA_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MAXA_S(uint64 instruction) +static char *MAXA_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MAXA.S %s, %s, %s", fd, fs, ft); + return img_format("MAXA.S %s, %s, %s", fd, fs, ft); } @@ -10017,17 +9629,16 @@ std::string NMD::MAXA_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFC0(uint64 instruction) +static char *MFC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFC0 %s, %s, %s", rt, c0s, sel); + return img_format("MFC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10041,15 +9652,15 @@ std::string NMD::MFC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFC1(uint64 instruction) +static char *MFC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MFC1 %s, %s", rt, fs); + return img_format("MFC1 %s, %s", rt, fs); } @@ -10063,15 +9674,14 @@ std::string NMD::MFC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFC2(uint64 instruction) +static char *MFC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFC2 %s, %s", rt, cs); + return img_format("MFC2 %s, CP%" PRIu64, rt, cs_value); } @@ -10085,17 +9695,16 @@ std::string NMD::MFC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFGC0(uint64 instruction) +static char *MFGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFGC0 %s, %s, %s", rt, c0s, sel); + return img_format("MFGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10109,17 +9718,16 @@ std::string NMD::MFGC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFHC0(uint64 instruction) +static char *MFHC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFHC0 %s, %s, %s", rt, c0s, sel); + return img_format("MFHC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10133,15 +9741,15 @@ std::string NMD::MFHC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFHC1(uint64 instruction) +static char *MFHC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MFHC1 %s, %s", rt, fs); + return img_format("MFHC1 %s, %s", rt, fs); } @@ -10155,15 +9763,14 @@ std::string NMD::MFHC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFHC2(uint64 instruction) +static char *MFHC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFHC2 %s, %s", rt, cs); + return img_format("MFHC2 %s, CP%" PRIu64, rt, cs_value); } @@ -10177,17 +9784,16 @@ std::string NMD::MFHC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFHGC0(uint64 instruction) +static char *MFHGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFHGC0 %s, %s, %s", rt, c0s, sel); + return img_format("MFHGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10200,15 +9806,15 @@ std::string NMD::MFHGC0(uint64 instruction) * rt ----- * ac -- */ -std::string NMD::MFHI_DSP_(uint64 instruction) +static char *MFHI_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("MFHI %s, %s", rt, ac); + return img_format("MFHI %s, %s", rt, ac); } @@ -10222,19 +9828,17 @@ std::string NMD::MFHI_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFHTR(uint64 instruction) +static char *MFHTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); uint64 u_value = extract_u_10(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = IMMEDIATE(copy(c0s_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFHTR %s, %s, %s, %s", rt, c0s, u, sel); + return img_format("MFHTR %s, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + rt, c0s_value, u_value, sel_value); } @@ -10247,15 +9851,15 @@ std::string NMD::MFHTR(uint64 instruction) * rt ----- * ac -- */ -std::string NMD::MFLO_DSP_(uint64 instruction) +static char *MFLO_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ac = AC(copy(ac_value)); + const char *rt = GPR(rt_value, info); + const char *ac = AC(ac_value, info); - return img::format("MFLO %s, %s", rt, ac); + return img_format("MFLO %s, %s", rt, ac); } @@ -10269,19 +9873,17 @@ std::string NMD::MFLO_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MFTR(uint64 instruction) +static char *MFTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); uint64 u_value = extract_u_10(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = IMMEDIATE(copy(c0s_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MFTR %s, %s, %s, %s", rt, c0s, u, sel); + return img_format("MFTR %s, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + rt, c0s_value, u_value, sel_value); } @@ -10295,17 +9897,17 @@ std::string NMD::MFTR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MIN_D(uint64 instruction) +static char *MIN_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MIN.D %s, %s, %s", fd, fs, ft); + return img_format("MIN.D %s, %s, %s", fd, fs, ft); } @@ -10319,17 +9921,17 @@ std::string NMD::MIN_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MIN_S(uint64 instruction) +static char *MIN_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MIN.S %s, %s, %s", fd, fs, ft); + return img_format("MIN.S %s, %s, %s", fd, fs, ft); } @@ -10343,17 +9945,17 @@ std::string NMD::MIN_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MINA_D(uint64 instruction) +static char *MINA_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MINA.D %s, %s, %s", fd, fs, ft); + return img_format("MINA.D %s, %s, %s", fd, fs, ft); } @@ -10367,17 +9969,17 @@ std::string NMD::MINA_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MINA_S(uint64 instruction) +static char *MINA_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MINA.S %s, %s, %s", fd, fs, ft); + return img_format("MINA.S %s, %s, %s", fd, fs, ft); } @@ -10391,17 +9993,17 @@ std::string NMD::MINA_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOD(uint64 instruction) +static char *MOD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MOD %s, %s, %s", rd, rs, rt); + return img_format("MOD %s, %s, %s", rd, rs, rt); } @@ -10415,17 +10017,17 @@ std::string NMD::MOD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MODSUB(uint64 instruction) +static char *MODSUB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MODSUB %s, %s, %s", rd, rs, rt); + return img_format("MODSUB %s, %s, %s", rd, rs, rt); } @@ -10439,17 +10041,17 @@ std::string NMD::MODSUB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MODU(uint64 instruction) +static char *MODU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MODU %s, %s, %s", rd, rs, rt); + return img_format("MODU %s, %s, %s", rd, rs, rt); } @@ -10463,15 +10065,15 @@ std::string NMD::MODU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOV_D(uint64 instruction) +static char *MOV_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MOV.D %s, %s", ft, fs); + return img_format("MOV.D %s, %s", ft, fs); } @@ -10485,15 +10087,15 @@ std::string NMD::MOV_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOV_S(uint64 instruction) +static char *MOV_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MOV.S %s, %s", ft, fs); + return img_format("MOV.S %s, %s", ft, fs); } @@ -10507,17 +10109,17 @@ std::string NMD::MOV_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVE_BALC(uint64 instruction) +static char *MOVE_BALC(uint64 instruction, Dis_info *info) { uint64 rtz4_value = extract_rtz4_27_26_25_23_22_21(instruction); uint64 rd1_value = extract_rdl_25_24(instruction); int64 s_value = extract_s__se21_0_20_to_1_s1(instruction); - std::string rd1 = GPR(decode_gpr_gpr1(rd1_value)); - std::string rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 4); + const char *rd1 = GPR(decode_gpr_gpr1(rd1_value, info), info); + const char *rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value, info), info); + g_autofree char *s = ADDRESS(s_value, 4, info); - return img::format("MOVE.BALC %s, %s, %s", rd1, rtz4, s); + return img_format("MOVE.BALC %s, %s, %s", rd1, rtz4, s); } @@ -10531,19 +10133,19 @@ std::string NMD::MOVE_BALC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVEP(uint64 instruction) +static char *MOVEP(uint64 instruction, Dis_info *info) { uint64 rtz4_value = extract_rtz4_9_7_6_5(instruction); uint64 rd2_value = extract_rd2_3_8(instruction); uint64 rsz4_value = extract_rsz4_4_2_1_0(instruction); - std::string rd2 = GPR(decode_gpr_gpr2_reg1(rd2_value)); - std::string re2 = GPR(decode_gpr_gpr2_reg2(rd2_value)); + const char *rd2 = GPR(decode_gpr_gpr2_reg1(rd2_value, info), info); + const char *re2 = GPR(decode_gpr_gpr2_reg2(rd2_value, info), info); /* !!!!!!!!!! - no conversion function */ - std::string rsz4 = GPR(decode_gpr_gpr4_zero(rsz4_value)); - std::string rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value)); + const char *rsz4 = GPR(decode_gpr_gpr4_zero(rsz4_value, info), info); + const char *rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value, info), info); - return img::format("MOVEP %s, %s, %s, %s", rd2, re2, rsz4, rtz4); + return img_format("MOVEP %s, %s, %s, %s", rd2, re2, rsz4, rtz4); /* hand edited */ } @@ -10558,19 +10160,19 @@ std::string NMD::MOVEP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVEP_REV_(uint64 instruction) +static char *MOVEP_REV_(uint64 instruction, Dis_info *info) { uint64 rt4_value = extract_rt4_9_7_6_5(instruction); uint64 rd2_value = extract_rd2_3_8(instruction); uint64 rs4_value = extract_rs4_4_2_1_0(instruction); - std::string rs4 = GPR(decode_gpr_gpr4(rs4_value)); - std::string rt4 = GPR(decode_gpr_gpr4(rt4_value)); - std::string rd2 = GPR(decode_gpr_gpr2_reg1(rd2_value)); - std::string rs2 = GPR(decode_gpr_gpr2_reg2(rd2_value)); + const char *rs4 = GPR(decode_gpr_gpr4(rs4_value, info), info); + const char *rt4 = GPR(decode_gpr_gpr4(rt4_value, info), info); + const char *rd2 = GPR(decode_gpr_gpr2_reg1(rd2_value, info), info); + const char *rs2 = GPR(decode_gpr_gpr2_reg2(rd2_value, info), info); /* !!!!!!!!!! - no conversion function */ - return img::format("MOVEP %s, %s, %s, %s", rs4, rt4, rd2, rs2); + return img_format("MOVEP %s, %s, %s, %s", rs4, rt4, rd2, rs2); /* hand edited */ } @@ -10585,15 +10187,15 @@ std::string NMD::MOVEP_REV_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVE(uint64 instruction) +static char *MOVE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); uint64 rs_value = extract_rs_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("MOVE %s, %s", rt, rs); + return img_format("MOVE %s, %s", rt, rs); } @@ -10607,17 +10209,17 @@ std::string NMD::MOVE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVN(uint64 instruction) +static char *MOVN(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MOVN %s, %s, %s", rd, rs, rt); + return img_format("MOVN %s, %s, %s", rd, rs, rt); } @@ -10631,17 +10233,17 @@ std::string NMD::MOVN(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MOVZ(uint64 instruction) +static char *MOVZ(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MOVZ %s, %s, %s", rd, rs, rt); + return img_format("MOVZ %s, %s, %s", rd, rs, rt); } @@ -10655,17 +10257,17 @@ std::string NMD::MOVZ(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MSUB_DSP_(uint64 instruction) +static char *MSUB_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MSUB %s, %s, %s", ac, rs, rt); + return img_format("MSUB %s, %s, %s", ac, rs, rt); } @@ -10679,17 +10281,17 @@ std::string NMD::MSUB_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MSUBF_D(uint64 instruction) +static char *MSUBF_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MSUBF.D %s, %s, %s", fd, fs, ft); + return img_format("MSUBF.D %s, %s, %s", fd, fs, ft); } @@ -10703,17 +10305,17 @@ std::string NMD::MSUBF_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MSUBF_S(uint64 instruction) +static char *MSUBF_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MSUBF.S %s, %s, %s", fd, fs, ft); + return img_format("MSUBF.S %s, %s, %s", fd, fs, ft); } @@ -10727,17 +10329,17 @@ std::string NMD::MSUBF_S(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MSUBU_DSP_(uint64 instruction) +static char *MSUBU_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MSUBU %s, %s, %s", ac, rs, rt); + return img_format("MSUBU %s, %s, %s", ac, rs, rt); } @@ -10751,17 +10353,16 @@ std::string NMD::MSUBU_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTC0(uint64 instruction) +static char *MTC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTC0 %s, %s, %s", rt, c0s, sel); + return img_format("MTC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10775,15 +10376,15 @@ std::string NMD::MTC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTC1(uint64 instruction) +static char *MTC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MTC1 %s, %s", rt, fs); + return img_format("MTC1 %s, %s", rt, fs); } @@ -10797,15 +10398,14 @@ std::string NMD::MTC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTC2(uint64 instruction) +static char *MTC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTC2 %s, %s", rt, cs); + return img_format("MTC2 %s, CP%" PRIu64, rt, cs_value); } @@ -10819,17 +10419,16 @@ std::string NMD::MTC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTGC0(uint64 instruction) +static char *MTGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTGC0 %s, %s, %s", rt, c0s, sel); + return img_format("MTGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10843,17 +10442,16 @@ std::string NMD::MTGC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTHC0(uint64 instruction) +static char *MTHC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTHC0 %s, %s, %s", rt, c0s, sel); + return img_format("MTHC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10867,15 +10465,15 @@ std::string NMD::MTHC0(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTHC1(uint64 instruction) +static char *MTHC1(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string fs = FPR(copy(fs_value)); + const char *rt = GPR(rt_value, info); + const char *fs = FPR(fs_value, info); - return img::format("MTHC1 %s, %s", rt, fs); + return img_format("MTHC1 %s, %s", rt, fs); } @@ -10889,15 +10487,14 @@ std::string NMD::MTHC1(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTHC2(uint64 instruction) +static char *MTHC2(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 cs_value = extract_cs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string cs = CPR(copy(cs_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTHC2 %s, %s", rt, cs); + return img_format("MTHC2 %s, CP%" PRIu64, rt, cs_value); } @@ -10911,17 +10508,16 @@ std::string NMD::MTHC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTHGC0(uint64 instruction) +static char *MTHGC0(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = CPR(copy(c0s_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTHGC0 %s, %s, %s", rt, c0s, sel); + return img_format("MTHGC0 %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, c0s_value, sel_value); } @@ -10934,15 +10530,15 @@ std::string NMD::MTHGC0(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MTHI_DSP_(uint64 instruction) +static char *MTHI_DSP_(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rs = GPR(copy(rs_value)); - std::string ac = AC(copy(ac_value)); + const char *rs = GPR(rs_value, info); + const char *ac = AC(ac_value, info); - return img::format("MTHI %s, %s", rs, ac); + return img_format("MTHI %s, %s", rs, ac); } @@ -10955,15 +10551,15 @@ std::string NMD::MTHI_DSP_(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MTHLIP(uint64 instruction) +static char *MTHLIP(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rs = GPR(copy(rs_value)); - std::string ac = AC(copy(ac_value)); + const char *rs = GPR(rs_value, info); + const char *ac = AC(ac_value, info); - return img::format("MTHLIP %s, %s", rs, ac); + return img_format("MTHLIP %s, %s", rs, ac); } @@ -10977,19 +10573,17 @@ std::string NMD::MTHLIP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTHTR(uint64 instruction) +static char *MTHTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); uint64 u_value = extract_u_10(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = IMMEDIATE(copy(c0s_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTHTR %s, %s, %s, %s", rt, c0s, u, sel); + return img_format("MTHTR %s, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + rt, c0s_value, u_value, sel_value); } @@ -11002,15 +10596,15 @@ std::string NMD::MTHTR(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MTLO_DSP_(uint64 instruction) +static char *MTLO_DSP_(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rs = GPR(copy(rs_value)); - std::string ac = AC(copy(ac_value)); + const char *rs = GPR(rs_value, info); + const char *ac = AC(ac_value, info); - return img::format("MTLO %s, %s", rs, ac); + return img_format("MTLO %s, %s", rs, ac); } @@ -11024,19 +10618,17 @@ std::string NMD::MTLO_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MTTR(uint64 instruction) +static char *MTTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 c0s_value = extract_c0s_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_15_14_13_12_11(instruction); uint64 u_value = extract_u_10(instruction); - std::string rt = GPR(copy(rt_value)); - std::string c0s = IMMEDIATE(copy(c0s_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("MTTR %s, %s, %s, %s", rt, c0s, u, sel); + return img_format("MTTR %s, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + rt, c0s_value, u_value, sel_value); } @@ -11050,17 +10642,17 @@ std::string NMD::MTTR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUH(uint64 instruction) +static char *MUH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MUH %s, %s, %s", rd, rs, rt); + return img_format("MUH %s, %s, %s", rd, rs, rt); } @@ -11074,17 +10666,17 @@ std::string NMD::MUH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUHU(uint64 instruction) +static char *MUHU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MUHU %s, %s, %s", rd, rs, rt); + return img_format("MUHU %s, %s, %s", rd, rs, rt); } @@ -11098,17 +10690,17 @@ std::string NMD::MUHU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_32_(uint64 instruction) +static char *MUL_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MUL %s, %s, %s", rd, rs, rt); + return img_format("MUL %s, %s, %s", rd, rs, rt); } @@ -11122,15 +10714,15 @@ std::string NMD::MUL_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_4X4_(uint64 instruction) +static char *MUL_4X4_(uint64 instruction, Dis_info *info) { uint64 rt4_value = extract_rt4_9_7_6_5(instruction); uint64 rs4_value = extract_rs4_4_2_1_0(instruction); - std::string rs4 = GPR(decode_gpr_gpr4(rs4_value)); - std::string rt4 = GPR(decode_gpr_gpr4(rt4_value)); + const char *rs4 = GPR(decode_gpr_gpr4(rs4_value, info), info); + const char *rt4 = GPR(decode_gpr_gpr4(rt4_value, info), info); - return img::format("MUL %s, %s", rs4, rt4); + return img_format("MUL %s, %s", rs4, rt4); } @@ -11144,17 +10736,17 @@ std::string NMD::MUL_4X4_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_D(uint64 instruction) +static char *MUL_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MUL.D %s, %s, %s", fd, fs, ft); + return img_format("MUL.D %s, %s, %s", fd, fs, ft); } @@ -11169,17 +10761,17 @@ std::string NMD::MUL_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_PH(uint64 instruction) +static char *MUL_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MUL.PH %s, %s, %s", rd, rs, rt); + return img_format("MUL.PH %s, %s, %s", rd, rs, rt); } @@ -11194,17 +10786,17 @@ std::string NMD::MUL_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_S_PH(uint64 instruction) +static char *MUL_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MUL_S.PH %s, %s, %s", rd, rs, rt); + return img_format("MUL_S.PH %s, %s, %s", rd, rs, rt); } @@ -11218,17 +10810,17 @@ std::string NMD::MUL_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MUL_S(uint64 instruction) +static char *MUL_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("MUL.S %s, %s, %s", fd, fs, ft); + return img_format("MUL.S %s, %s, %s", fd, fs, ft); } @@ -11243,17 +10835,17 @@ std::string NMD::MUL_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULEQ_S_W_PHL(uint64 instruction) +static char *MULEQ_S_W_PHL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULEQ_S.W.PHL %s, %s, %s", rd, rs, rt); + return img_format("MULEQ_S.W.PHL %s, %s, %s", rd, rs, rt); } @@ -11268,17 +10860,17 @@ std::string NMD::MULEQ_S_W_PHL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULEQ_S_W_PHR(uint64 instruction) +static char *MULEQ_S_W_PHR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULEQ_S.W.PHR %s, %s, %s", rd, rs, rt); + return img_format("MULEQ_S.W.PHR %s, %s, %s", rd, rs, rt); } @@ -11293,17 +10885,17 @@ std::string NMD::MULEQ_S_W_PHR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULEU_S_PH_QBL(uint64 instruction) +static char *MULEU_S_PH_QBL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULEU_S.PH.QBL %s, %s, %s", rd, rs, rt); + return img_format("MULEU_S.PH.QBL %s, %s, %s", rd, rs, rt); } @@ -11318,17 +10910,17 @@ std::string NMD::MULEU_S_PH_QBL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULEU_S_PH_QBR(uint64 instruction) +static char *MULEU_S_PH_QBR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULEU_S.PH.QBR %s, %s, %s", rd, rs, rt); + return img_format("MULEU_S.PH.QBR %s, %s, %s", rd, rs, rt); } @@ -11343,17 +10935,17 @@ std::string NMD::MULEU_S_PH_QBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULQ_RS_PH(uint64 instruction) +static char *MULQ_RS_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULQ_RS.PH %s, %s, %s", rd, rs, rt); + return img_format("MULQ_RS.PH %s, %s, %s", rd, rs, rt); } @@ -11368,17 +10960,17 @@ std::string NMD::MULQ_RS_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULQ_RS_W(uint64 instruction) +static char *MULQ_RS_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULQ_RS.W %s, %s, %s", rd, rs, rt); + return img_format("MULQ_RS.W %s, %s, %s", rd, rs, rt); } @@ -11393,17 +10985,17 @@ std::string NMD::MULQ_RS_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULQ_S_PH(uint64 instruction) +static char *MULQ_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULQ_S.PH %s, %s, %s", rd, rs, rt); + return img_format("MULQ_S.PH %s, %s, %s", rd, rs, rt); } @@ -11418,17 +11010,17 @@ std::string NMD::MULQ_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULQ_S_W(uint64 instruction) +static char *MULQ_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULQ_S.W %s, %s, %s", rd, rs, rt); + return img_format("MULQ_S.W %s, %s, %s", rd, rs, rt); } @@ -11443,17 +11035,17 @@ std::string NMD::MULQ_S_W(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MULSA_W_PH(uint64 instruction) +static char *MULSA_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULSA.W.PH %s, %s, %s", ac, rs, rt); + return img_format("MULSA.W.PH %s, %s, %s", ac, rs, rt); } @@ -11468,17 +11060,17 @@ std::string NMD::MULSA_W_PH(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MULSAQ_S_W_PH(uint64 instruction) +static char *MULSAQ_S_W_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULSAQ_S.W.PH %s, %s, %s", ac, rs, rt); + return img_format("MULSAQ_S.W.PH %s, %s, %s", ac, rs, rt); } @@ -11492,17 +11084,17 @@ std::string NMD::MULSAQ_S_W_PH(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MULT_DSP_(uint64 instruction) +static char *MULT_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULT %s, %s, %s", ac, rs, rt); + return img_format("MULT %s, %s, %s", ac, rs, rt); } @@ -11516,17 +11108,17 @@ std::string NMD::MULT_DSP_(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::MULTU_DSP_(uint64 instruction) +static char *MULTU_DSP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string ac = AC(copy(ac_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ac = AC(ac_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULTU %s, %s, %s", ac, rs, rt); + return img_format("MULTU %s, %s, %s", ac, rs, rt); } @@ -11540,17 +11132,17 @@ std::string NMD::MULTU_DSP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::MULU(uint64 instruction) +static char *MULU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("MULU %s, %s, %s", rd, rs, rt); + return img_format("MULU %s, %s, %s", rd, rs, rt); } @@ -11564,15 +11156,15 @@ std::string NMD::MULU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NEG_D(uint64 instruction) +static char *NEG_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("NEG.D %s, %s", ft, fs); + return img_format("NEG.D %s, %s", ft, fs); } @@ -11586,15 +11178,15 @@ std::string NMD::NEG_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NEG_S(uint64 instruction) +static char *NEG_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("NEG.S %s, %s", ft, fs); + return img_format("NEG.S %s, %s", ft, fs); } @@ -11608,11 +11200,11 @@ std::string NMD::NEG_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NOP_16_(uint64 instruction) +static char *NOP_16_(uint64 instruction, Dis_info *info) { (void)instruction; - return "NOP "; + return g_strdup("NOP "); } @@ -11626,11 +11218,11 @@ std::string NMD::NOP_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NOP_32_(uint64 instruction) +static char *NOP_32_(uint64 instruction, Dis_info *info) { (void)instruction; - return "NOP "; + return g_strdup("NOP "); } @@ -11644,17 +11236,17 @@ std::string NMD::NOP_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NOR(uint64 instruction) +static char *NOR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("NOR %s, %s, %s", rd, rs, rt); + return img_format("NOR %s, %s, %s", rd, rs, rt); } @@ -11668,15 +11260,15 @@ std::string NMD::NOR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::NOT_16_(uint64 instruction) +static char *NOT_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("NOT %s, %s", rt3, rs3); + return img_format("NOT %s, %s", rt3, rs3); } @@ -11690,15 +11282,15 @@ std::string NMD::NOT_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::OR_16_(uint64 instruction) +static char *OR_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); - return img::format("OR %s, %s", rs3, rt3); + return img_format("OR %s, %s", rs3, rt3); } @@ -11712,17 +11304,17 @@ std::string NMD::OR_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::OR_32_(uint64 instruction) +static char *OR_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("OR %s, %s, %s", rd, rs, rt); + return img_format("OR %s, %s, %s", rd, rs, rt); } @@ -11736,17 +11328,16 @@ std::string NMD::OR_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ORI(uint64 instruction) +static char *ORI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ORI %s, %s, %s", rt, rs, u); + return img_format("ORI %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -11761,17 +11352,17 @@ std::string NMD::ORI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PACKRL_PH(uint64 instruction) +static char *PACKRL_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PACKRL.PH %s, %s, %s", rd, rs, rt); + return img_format("PACKRL.PH %s, %s, %s", rd, rs, rt); } @@ -11785,11 +11376,11 @@ std::string NMD::PACKRL_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PAUSE(uint64 instruction) +static char *PAUSE(uint64 instruction, Dis_info *info) { (void)instruction; - return "PAUSE "; + return g_strdup("PAUSE "); } @@ -11804,17 +11395,17 @@ std::string NMD::PAUSE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PICK_PH(uint64 instruction) +static char *PICK_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PICK.PH %s, %s, %s", rd, rs, rt); + return img_format("PICK.PH %s, %s, %s", rd, rs, rt); } @@ -11829,17 +11420,17 @@ std::string NMD::PICK_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PICK_QB(uint64 instruction) +static char *PICK_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PICK.QB %s, %s, %s", rd, rs, rt); + return img_format("PICK.QB %s, %s, %s", rd, rs, rt); } @@ -11854,15 +11445,15 @@ std::string NMD::PICK_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQ_W_PHL(uint64 instruction) +static char *PRECEQ_W_PHL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQ.W.PHL %s, %s", rt, rs); + return img_format("PRECEQ.W.PHL %s, %s", rt, rs); } @@ -11877,15 +11468,15 @@ std::string NMD::PRECEQ_W_PHL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQ_W_PHR(uint64 instruction) +static char *PRECEQ_W_PHR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQ.W.PHR %s, %s", rt, rs); + return img_format("PRECEQ.W.PHR %s, %s", rt, rs); } @@ -11900,15 +11491,15 @@ std::string NMD::PRECEQ_W_PHR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQU_PH_QBLA(uint64 instruction) +static char *PRECEQU_PH_QBLA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQU.PH.QBLA %s, %s", rt, rs); + return img_format("PRECEQU.PH.QBLA %s, %s", rt, rs); } @@ -11923,15 +11514,15 @@ std::string NMD::PRECEQU_PH_QBLA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQU_PH_QBL(uint64 instruction) +static char *PRECEQU_PH_QBL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQU.PH.QBL %s, %s", rt, rs); + return img_format("PRECEQU.PH.QBL %s, %s", rt, rs); } @@ -11946,15 +11537,15 @@ std::string NMD::PRECEQU_PH_QBL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQU_PH_QBRA(uint64 instruction) +static char *PRECEQU_PH_QBRA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQU.PH.QBRA %s, %s", rt, rs); + return img_format("PRECEQU.PH.QBRA %s, %s", rt, rs); } @@ -11969,15 +11560,15 @@ std::string NMD::PRECEQU_PH_QBRA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEQU_PH_QBR(uint64 instruction) +static char *PRECEQU_PH_QBR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEQU.PH.QBR %s, %s", rt, rs); + return img_format("PRECEQU.PH.QBR %s, %s", rt, rs); } @@ -11993,15 +11584,15 @@ std::string NMD::PRECEQU_PH_QBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEU_PH_QBLA(uint64 instruction) +static char *PRECEU_PH_QBLA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEU.PH.QBLA %s, %s", rt, rs); + return img_format("PRECEU.PH.QBLA %s, %s", rt, rs); } @@ -12016,15 +11607,15 @@ std::string NMD::PRECEU_PH_QBLA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEU_PH_QBL(uint64 instruction) +static char *PRECEU_PH_QBL(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEU.PH.QBL %s, %s", rt, rs); + return img_format("PRECEU.PH.QBL %s, %s", rt, rs); } @@ -12040,15 +11631,15 @@ std::string NMD::PRECEU_PH_QBL(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEU_PH_QBRA(uint64 instruction) +static char *PRECEU_PH_QBRA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEU.PH.QBRA %s, %s", rt, rs); + return img_format("PRECEU.PH.QBRA %s, %s", rt, rs); } @@ -12063,15 +11654,15 @@ std::string NMD::PRECEU_PH_QBRA(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECEU_PH_QBR(uint64 instruction) +static char *PRECEU_PH_QBR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECEU.PH.QBR %s, %s", rt, rs); + return img_format("PRECEU.PH.QBR %s, %s", rt, rs); } @@ -12086,17 +11677,17 @@ std::string NMD::PRECEU_PH_QBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECR_QB_PH(uint64 instruction) +static char *PRECR_QB_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PRECR.QB.PH %s, %s, %s", rd, rs, rt); + return img_format("PRECR.QB.PH %s, %s, %s", rd, rs, rt); } @@ -12111,17 +11702,16 @@ std::string NMD::PRECR_QB_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECR_SRA_PH_W(uint64 instruction) +static char *PRECR_SRA_PH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECR_SRA.PH.W %s, %s, %s", rt, rs, sa); + return img_format("PRECR_SRA.PH.W %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -12136,17 +11726,16 @@ std::string NMD::PRECR_SRA_PH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECR_SRA_R_PH_W(uint64 instruction) +static char *PRECR_SRA_R_PH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PRECR_SRA_R.PH.W %s, %s, %s", rt, rs, sa); + return img_format("PRECR_SRA_R.PH.W %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -12161,17 +11750,17 @@ std::string NMD::PRECR_SRA_R_PH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECRQ_PH_W(uint64 instruction) +static char *PRECRQ_PH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PRECRQ.PH.W %s, %s, %s", rd, rs, rt); + return img_format("PRECRQ.PH.W %s, %s, %s", rd, rs, rt); } @@ -12186,17 +11775,17 @@ std::string NMD::PRECRQ_PH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECRQ_QB_PH(uint64 instruction) +static char *PRECRQ_QB_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PRECRQ.QB.PH %s, %s, %s", rd, rs, rt); + return img_format("PRECRQ.QB.PH %s, %s, %s", rd, rs, rt); } @@ -12211,17 +11800,17 @@ std::string NMD::PRECRQ_QB_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECRQ_RS_PH_W(uint64 instruction) +static char *PRECRQ_RS_PH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PRECRQ_RS.PH.W %s, %s, %s", rd, rs, rt); + return img_format("PRECRQ_RS.PH.W %s, %s, %s", rd, rs, rt); } @@ -12236,17 +11825,17 @@ std::string NMD::PRECRQ_RS_PH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PRECRQU_S_QB_PH(uint64 instruction) +static char *PRECRQU_S_QB_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("PRECRQU_S.QB.PH %s, %s, %s", rd, rs, rt); + return img_format("PRECRQU_S.QB.PH %s, %s, %s", rd, rs, rt); } @@ -12260,17 +11849,16 @@ std::string NMD::PRECRQU_S_QB_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PREF_S9_(uint64 instruction) +static char *PREF_S9_(uint64 instruction, Dis_info *info) { uint64 hint_value = extract_hint_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string hint = IMMEDIATE(copy(hint_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("PREF %s, %s(%s)", hint, s, rs); + return img_format("PREF 0x%" PRIx64 ", %" PRId64 "(%s)", + hint_value, s_value, rs); } @@ -12284,17 +11872,16 @@ std::string NMD::PREF_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PREF_U12_(uint64 instruction) +static char *PREF_U12_(uint64 instruction, Dis_info *info) { uint64 hint_value = extract_hint_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string hint = IMMEDIATE(copy(hint_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("PREF %s, %s(%s)", hint, u, rs); + return img_format("PREF 0x%" PRIx64 ", 0x%" PRIx64 "(%s)", + hint_value, u_value, rs); } @@ -12308,17 +11895,16 @@ std::string NMD::PREF_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PREFE(uint64 instruction) +static char *PREFE(uint64 instruction, Dis_info *info) { uint64 hint_value = extract_hint_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string hint = IMMEDIATE(copy(hint_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("PREFE %s, %s(%s)", hint, s, rs); + return img_format("PREFE 0x%" PRIx64 ", %" PRId64 "(%s)", + hint_value, s_value, rs); } @@ -12332,17 +11918,16 @@ std::string NMD::PREFE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::PREPEND(uint64 instruction) +static char *PREPEND(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("PREPEND %s, %s, %s", rt, rs, sa); + return img_format("PREPEND %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -12355,15 +11940,15 @@ std::string NMD::PREPEND(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::RADDU_W_QB(uint64 instruction) +static char *RADDU_W_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("RADDU.W.QB %s, %s", rt, rs); + return img_format("RADDU.W.QB %s, %s", rt, rs); } @@ -12376,15 +11961,14 @@ std::string NMD::RADDU_W_QB(uint64 instruction) * rt ----- * mask ------- */ -std::string NMD::RDDSP(uint64 instruction) +static char *RDDSP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 mask_value = extract_mask_20_19_18_17_16_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string mask = IMMEDIATE(copy(mask_value)); + const char *rt = GPR(rt_value, info); - return img::format("RDDSP %s, %s", rt, mask); + return img_format("RDDSP %s, 0x%" PRIx64, rt, mask_value); } @@ -12398,17 +11982,16 @@ std::string NMD::RDDSP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RDHWR(uint64 instruction) +static char *RDHWR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 hs_value = extract_hs_20_19_18_17_16(instruction); uint64 sel_value = extract_sel_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string hs = CPR(copy(hs_value)); - std::string sel = IMMEDIATE(copy(sel_value)); + const char *rt = GPR(rt_value, info); - return img::format("RDHWR %s, %s, %s", rt, hs, sel); + return img_format("RDHWR %s, CP%" PRIu64 ", 0x%" PRIx64, + rt, hs_value, sel_value); } @@ -12422,15 +12005,15 @@ std::string NMD::RDHWR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RDPGPR(uint64 instruction) +static char *RDPGPR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("RDPGPR %s, %s", rt, rs); + return img_format("RDPGPR %s, %s", rt, rs); } @@ -12444,15 +12027,15 @@ std::string NMD::RDPGPR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RECIP_D(uint64 instruction) +static char *RECIP_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RECIP.D %s, %s", ft, fs); + return img_format("RECIP.D %s, %s", ft, fs); } @@ -12466,15 +12049,15 @@ std::string NMD::RECIP_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RECIP_S(uint64 instruction) +static char *RECIP_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RECIP.S %s, %s", ft, fs); + return img_format("RECIP.S %s, %s", ft, fs); } @@ -12488,15 +12071,14 @@ std::string NMD::RECIP_S(uint64 instruction) * rt ----- * s ---------- */ -std::string NMD::REPL_PH(uint64 instruction) +static char *REPL_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); int64 s_value = extract_s__se9_20_19_18_17_16_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); + const char *rt = GPR(rt_value, info); - return img::format("REPL.PH %s, %s", rt, s); + return img_format("REPL.PH %s, %" PRId64, rt, s_value); } @@ -12510,15 +12092,14 @@ std::string NMD::REPL_PH(uint64 instruction) * rt ----- * u -------- */ -std::string NMD::REPL_QB(uint64 instruction) +static char *REPL_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_19_18_17_16_15_14_13(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("REPL.QB %s, %s", rt, u); + return img_format("REPL.QB %s, 0x%" PRIx64, rt, u_value); } @@ -12532,15 +12113,15 @@ std::string NMD::REPL_QB(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::REPLV_PH(uint64 instruction) +static char *REPLV_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("REPLV.PH %s, %s", rt, rs); + return img_format("REPLV.PH %s, %s", rt, rs); } @@ -12553,15 +12134,15 @@ std::string NMD::REPLV_PH(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::REPLV_QB(uint64 instruction) +static char *REPLV_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("REPLV.QB %s, %s", rt, rs); + return img_format("REPLV.QB %s, %s", rt, rs); } @@ -12575,16 +12156,16 @@ std::string NMD::REPLV_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RESTORE_32_(uint64 instruction) +static char *RESTORE_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 count_value = extract_count_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3__s3(instruction); uint64 gp_value = extract_gp_2(instruction); - std::string u = IMMEDIATE(copy(u_value)); - return img::format("RESTORE %s%s", u, - save_restore_list(rt_value, count_value, gp_value)); + g_autofree char *save_restore_str = save_restore_list( + rt_value, count_value, gp_value, info); + return img_format("RESTORE 0x%" PRIx64 "%s", u_value, save_restore_str); } @@ -12598,15 +12179,15 @@ std::string NMD::RESTORE_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RESTORE_JRC_16_(uint64 instruction) +static char *RESTORE_JRC_16_(uint64 instruction, Dis_info *info) { uint64 rt1_value = extract_rtl_11(instruction); uint64 u_value = extract_u_7_6_5_4__s4(instruction); uint64 count_value = extract_count_3_2_1_0(instruction); - std::string u = IMMEDIATE(copy(u_value)); - return img::format("RESTORE.JRC %s%s", u, - save_restore_list(encode_rt1_from_rt(rt1_value), count_value, 0)); + g_autofree char *save_restore_str = save_restore_list( + encode_rt1_from_rt(rt1_value), count_value, 0, info); + return img_format("RESTORE.JRC 0x%" PRIx64 "%s", u_value, save_restore_str); } @@ -12620,16 +12201,17 @@ std::string NMD::RESTORE_JRC_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RESTORE_JRC_32_(uint64 instruction) +static char *RESTORE_JRC_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 count_value = extract_count_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3__s3(instruction); uint64 gp_value = extract_gp_2(instruction); - std::string u = IMMEDIATE(copy(u_value)); - return img::format("RESTORE.JRC %s%s", u, - save_restore_list(rt_value, count_value, gp_value)); + g_autofree char *save_restore_str = save_restore_list( + rt_value, count_value, gp_value, info); + return img_format("RESTORE.JRC 0x%" PRIx64 "%s", u_value, + save_restore_str); } @@ -12643,15 +12225,14 @@ std::string NMD::RESTORE_JRC_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RESTOREF(uint64 instruction) +static char *RESTOREF(uint64 instruction, Dis_info *info) { uint64 count_value = extract_count_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3__s3(instruction); - std::string u = IMMEDIATE(copy(u_value)); - std::string count = IMMEDIATE(copy(count_value)); - return img::format("RESTOREF %s, %s", u, count); + return img_format("RESTOREF 0x%" PRIx64 ", 0x%" PRIx64, + u_value, count_value); } @@ -12665,15 +12246,15 @@ std::string NMD::RESTOREF(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RINT_D(uint64 instruction) +static char *RINT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RINT.D %s, %s", ft, fs); + return img_format("RINT.D %s, %s", ft, fs); } @@ -12687,15 +12268,15 @@ std::string NMD::RINT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RINT_S(uint64 instruction) +static char *RINT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RINT.S %s, %s", ft, fs); + return img_format("RINT.S %s, %s", ft, fs); } @@ -12709,17 +12290,16 @@ std::string NMD::RINT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROTR(uint64 instruction) +static char *ROTR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ROTR %s, %s, %s", rt, rs, shift); + return img_format("ROTR %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -12733,17 +12313,17 @@ std::string NMD::ROTR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROTRV(uint64 instruction) +static char *ROTRV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("ROTRV %s, %s, %s", rd, rs, rt); + return img_format("ROTRV %s, %s, %s", rd, rs, rt); } @@ -12757,7 +12337,7 @@ std::string NMD::ROTRV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROTX(uint64 instruction) +static char *ROTX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); @@ -12765,14 +12345,11 @@ std::string NMD::ROTX(uint64 instruction) uint64 stripe_value = extract_stripe_6(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); - std::string shiftx = IMMEDIATE(copy(shiftx_value)); - std::string stripe = IMMEDIATE(copy(stripe_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("ROTX %s, %s, %s, %s, %s", - rt, rs, shift, shiftx, stripe); + return img_format("ROTX %s, %s, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + rt, rs, shift_value, shiftx_value, stripe_value); } @@ -12786,15 +12363,15 @@ std::string NMD::ROTX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROUND_L_D(uint64 instruction) +static char *ROUND_L_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("ROUND.L.D %s, %s", ft, fs); + return img_format("ROUND.L.D %s, %s", ft, fs); } @@ -12808,15 +12385,15 @@ std::string NMD::ROUND_L_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROUND_L_S(uint64 instruction) +static char *ROUND_L_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("ROUND.L.S %s, %s", ft, fs); + return img_format("ROUND.L.S %s, %s", ft, fs); } @@ -12830,15 +12407,15 @@ std::string NMD::ROUND_L_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROUND_W_D(uint64 instruction) +static char *ROUND_W_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("ROUND.W.D %s, %s", ft, fs); + return img_format("ROUND.W.D %s, %s", ft, fs); } @@ -12852,15 +12429,15 @@ std::string NMD::ROUND_W_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::ROUND_W_S(uint64 instruction) +static char *ROUND_W_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("ROUND.W.S %s, %s", ft, fs); + return img_format("ROUND.W.S %s, %s", ft, fs); } @@ -12874,15 +12451,15 @@ std::string NMD::ROUND_W_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RSQRT_D(uint64 instruction) +static char *RSQRT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RSQRT.D %s, %s", ft, fs); + return img_format("RSQRT.D %s, %s", ft, fs); } @@ -12896,15 +12473,15 @@ std::string NMD::RSQRT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::RSQRT_S(uint64 instruction) +static char *RSQRT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("RSQRT.S %s, %s", ft, fs); + return img_format("RSQRT.S %s, %s", ft, fs); } @@ -12918,15 +12495,15 @@ std::string NMD::RSQRT_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SAVE_16_(uint64 instruction) +static char *SAVE_16_(uint64 instruction, Dis_info *info) { uint64 rt1_value = extract_rtl_11(instruction); uint64 u_value = extract_u_7_6_5_4__s4(instruction); uint64 count_value = extract_count_3_2_1_0(instruction); - std::string u = IMMEDIATE(copy(u_value)); - return img::format("SAVE %s%s", u, - save_restore_list(encode_rt1_from_rt(rt1_value), count_value, 0)); + g_autofree char *save_restore_str = save_restore_list( + encode_rt1_from_rt(rt1_value), count_value, 0, info); + return img_format("SAVE 0x%" PRIx64 "%s", u_value, save_restore_str); } @@ -12940,16 +12517,16 @@ std::string NMD::SAVE_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SAVE_32_(uint64 instruction) +static char *SAVE_32_(uint64 instruction, Dis_info *info) { uint64 count_value = extract_count_19_18_17_16(instruction); uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3__s3(instruction); uint64 gp_value = extract_gp_2(instruction); - std::string u = IMMEDIATE(copy(u_value)); - return img::format("SAVE %s%s", u, - save_restore_list(rt_value, count_value, gp_value)); + g_autofree char *save_restore_str = save_restore_list( + rt_value, count_value, gp_value, info); + return img_format("SAVE 0x%" PRIx64 "%s", u_value, save_restore_str); } @@ -12963,15 +12540,13 @@ std::string NMD::SAVE_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SAVEF(uint64 instruction) +static char *SAVEF(uint64 instruction, Dis_info *info) { uint64 count_value = extract_count_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3__s3(instruction); - std::string u = IMMEDIATE(copy(u_value)); - std::string count = IMMEDIATE(copy(count_value)); - return img::format("SAVEF %s, %s", u, count); + return img_format("SAVEF 0x%" PRIx64 ", 0x%" PRIx64, u_value, count_value); } @@ -12985,17 +12560,16 @@ std::string NMD::SAVEF(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SB_16_(uint64 instruction) +static char *SB_16_(uint64 instruction, Dis_info *info) { uint64 rtz3_value = extract_rtz3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_1_0(instruction); - std::string rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("SB %s, %s(%s)", rtz3, u, rs3); + return img_format("SB %s, 0x%" PRIx64 "(%s)", rtz3, u_value, rs3); } @@ -13009,15 +12583,14 @@ std::string NMD::SB_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SB_GP_(uint64 instruction) +static char *SB_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("SB %s, %s($%d)", rt, u, 28); + return img_format("SB %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -13031,17 +12604,16 @@ std::string NMD::SB_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SB_S9_(uint64 instruction) +static char *SB_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SB %s, %s(%s)", rt, s, rs); + return img_format("SB %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13055,17 +12627,16 @@ std::string NMD::SB_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SB_U12_(uint64 instruction) +static char *SB_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SB %s, %s(%s)", rt, u, rs); + return img_format("SB %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -13079,17 +12650,16 @@ std::string NMD::SB_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SBE(uint64 instruction) +static char *SBE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SBE %s, %s(%s)", rt, s, rs); + return img_format("SBE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13103,17 +12673,17 @@ std::string NMD::SBE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SBX(uint64 instruction) +static char *SBX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SBX %s, %s(%s)", rd, rs, rt); + return img_format("SBX %s, %s(%s)", rd, rs, rt); } @@ -13127,17 +12697,16 @@ std::string NMD::SBX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SC(uint64 instruction) +static char *SC(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SC %s, %s(%s)", rt, s, rs); + return img_format("SC %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13151,17 +12720,16 @@ std::string NMD::SC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SCD(uint64 instruction) +static char *SCD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_s3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SCD %s, %s(%s)", rt, s, rs); + return img_format("SCD %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13175,17 +12743,17 @@ std::string NMD::SCD(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SCDP(uint64 instruction) +static char *SCDP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SCDP %s, %s, (%s)", rt, ru, rs); + return img_format("SCDP %s, %s, (%s)", rt, ru, rs); } @@ -13199,17 +12767,16 @@ std::string NMD::SCDP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SCE(uint64 instruction) +static char *SCE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SCE %s, %s(%s)", rt, s, rs); + return img_format("SCE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13223,17 +12790,17 @@ std::string NMD::SCE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SCWP(uint64 instruction) +static char *SCWP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SCWP %s, %s, (%s)", rt, ru, rs); + return img_format("SCWP %s, %s, (%s)", rt, ru, rs); } @@ -13247,17 +12814,17 @@ std::string NMD::SCWP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SCWPE(uint64 instruction) +static char *SCWPE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ru_value = extract_ru_7_6_5_4_3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string ru = GPR(copy(ru_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *ru = GPR(ru_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SCWPE %s, %s, (%s)", rt, ru, rs); + return img_format("SCWPE %s, %s, (%s)", rt, ru, rs); } @@ -13271,15 +12838,14 @@ std::string NMD::SCWPE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SD_GP_(uint64 instruction) +static char *SD_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_to_3__s3(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("SD %s, %s($%d)", rt, u, 28); + return img_format("SD %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -13293,17 +12859,16 @@ std::string NMD::SD_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SD_S9_(uint64 instruction) +static char *SD_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SD %s, %s(%s)", rt, s, rs); + return img_format("SD %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13317,17 +12882,16 @@ std::string NMD::SD_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SD_U12_(uint64 instruction) +static char *SD_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SD %s, %s(%s)", rt, u, rs); + return img_format("SD %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -13341,13 +12905,12 @@ std::string NMD::SD_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDBBP_16_(uint64 instruction) +static char *SDBBP_16_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_2_1_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("SDBBP %s", code); + return img_format("SDBBP 0x%" PRIx64, code_value); } @@ -13361,13 +12924,12 @@ std::string NMD::SDBBP_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDBBP_32_(uint64 instruction) +static char *SDBBP_32_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_18_to_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("SDBBP %s", code); + return img_format("SDBBP 0x%" PRIx64, code_value); } @@ -13381,15 +12943,14 @@ std::string NMD::SDBBP_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC1_GP_(uint64 instruction) +static char *SDC1_GP_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_2__s2(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *ft = FPR(ft_value, info); - return img::format("SDC1 %s, %s($%d)", ft, u, 28); + return img_format("SDC1 %s, 0x%" PRIx64 "($%d)", ft, u_value, 28); } @@ -13403,17 +12964,16 @@ std::string NMD::SDC1_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC1_S9_(uint64 instruction) +static char *SDC1_S9_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SDC1 %s, %s(%s)", ft, s, rs); + return img_format("SDC1 %s, %" PRId64 "(%s)", ft, s_value, rs); } @@ -13427,17 +12987,16 @@ std::string NMD::SDC1_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC1_U12_(uint64 instruction) +static char *SDC1_U12_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SDC1 %s, %s(%s)", ft, u, rs); + return img_format("SDC1 %s, 0x%" PRIx64 "(%s)", ft, u_value, rs); } @@ -13451,17 +13010,17 @@ std::string NMD::SDC1_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC1X(uint64 instruction) +static char *SDC1X(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SDC1X %s, %s(%s)", ft, rs, rt); + return img_format("SDC1X %s, %s(%s)", ft, rs, rt); } @@ -13475,17 +13034,17 @@ std::string NMD::SDC1X(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC1XS(uint64 instruction) +static char *SDC1XS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SDC1XS %s, %s(%s)", ft, rs, rt); + return img_format("SDC1XS %s, %s(%s)", ft, rs, rt); } @@ -13499,17 +13058,16 @@ std::string NMD::SDC1XS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDC2(uint64 instruction) +static char *SDC2(uint64 instruction, Dis_info *info) { uint64 cs_value = extract_cs_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string cs = CPR(copy(cs_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("SDC2 %s, %s(%s)", cs, s, rs); + return img_format("SDC2 CP%" PRIu64 ", %" PRId64 "(%s)", + cs_value, s_value, rs); } @@ -13523,19 +13081,19 @@ std::string NMD::SDC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDM(uint64 instruction) +static char *SDM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("SDM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("SDM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -13549,15 +13107,15 @@ std::string NMD::SDM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDPC_48_(uint64 instruction) +static char *SDPC_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 6); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 6, info); - return img::format("SDPC %s, %s", rt, s); + return img_format("SDPC %s, %s", rt, s); } @@ -13571,17 +13129,17 @@ std::string NMD::SDPC_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDXS(uint64 instruction) +static char *SDXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SDXS %s, %s(%s)", rd, rs, rt); + return img_format("SDXS %s, %s(%s)", rd, rs, rt); } @@ -13595,17 +13153,17 @@ std::string NMD::SDXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SDX(uint64 instruction) +static char *SDX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SDX %s, %s(%s)", rd, rs, rt); + return img_format("SDX %s, %s(%s)", rd, rs, rt); } @@ -13619,15 +13177,15 @@ std::string NMD::SDX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SEB(uint64 instruction) +static char *SEB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SEB %s, %s", rt, rs); + return img_format("SEB %s, %s", rt, rs); } @@ -13641,15 +13199,15 @@ std::string NMD::SEB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SEH(uint64 instruction) +static char *SEH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SEH %s, %s", rt, rs); + return img_format("SEH %s, %s", rt, rs); } @@ -13663,17 +13221,17 @@ std::string NMD::SEH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SEL_D(uint64 instruction) +static char *SEL_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SEL.D %s, %s, %s", fd, fs, ft); + return img_format("SEL.D %s, %s, %s", fd, fs, ft); } @@ -13687,17 +13245,17 @@ std::string NMD::SEL_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SEL_S(uint64 instruction) +static char *SEL_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SEL.S %s, %s, %s", fd, fs, ft); + return img_format("SEL.S %s, %s, %s", fd, fs, ft); } @@ -13711,17 +13269,17 @@ std::string NMD::SEL_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SELEQZ_D(uint64 instruction) +static char *SELEQZ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SELEQZ.D %s, %s, %s", fd, fs, ft); + return img_format("SELEQZ.D %s, %s, %s", fd, fs, ft); } @@ -13735,17 +13293,17 @@ std::string NMD::SELEQZ_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SELEQZ_S(uint64 instruction) +static char *SELEQZ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SELEQZ.S %s, %s, %s", fd, fs, ft); + return img_format("SELEQZ.S %s, %s, %s", fd, fs, ft); } @@ -13759,17 +13317,17 @@ std::string NMD::SELEQZ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SELNEZ_D(uint64 instruction) +static char *SELNEZ_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SELNEZ.D %s, %s, %s", fd, fs, ft); + return img_format("SELNEZ.D %s, %s, %s", fd, fs, ft); } @@ -13783,17 +13341,17 @@ std::string NMD::SELNEZ_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SELNEZ_S(uint64 instruction) +static char *SELNEZ_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SELNEZ.S %s, %s, %s", fd, fs, ft); + return img_format("SELNEZ.S %s, %s, %s", fd, fs, ft); } @@ -13807,17 +13365,16 @@ std::string NMD::SELNEZ_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SEQI(uint64 instruction) +static char *SEQI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SEQI %s, %s, %s", rt, rs, u); + return img_format("SEQI %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -13831,17 +13388,16 @@ std::string NMD::SEQI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SH_16_(uint64 instruction) +static char *SH_16_(uint64 instruction, Dis_info *info) { uint64 rtz3_value = extract_rtz3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_2_1__s1(instruction); - std::string rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("SH %s, %s(%s)", rtz3, u, rs3); + return img_format("SH %s, 0x%" PRIx64 "(%s)", rtz3, u_value, rs3); } @@ -13855,15 +13411,14 @@ std::string NMD::SH_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SH_GP_(uint64 instruction) +static char *SH_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_1__s1(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("SH %s, %s($%d)", rt, u, 28); + return img_format("SH %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -13877,17 +13432,16 @@ std::string NMD::SH_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SH_S9_(uint64 instruction) +static char *SH_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SH %s, %s(%s)", rt, s, rs); + return img_format("SH %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13901,17 +13455,16 @@ std::string NMD::SH_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SH_U12_(uint64 instruction) +static char *SH_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SH %s, %s(%s)", rt, u, rs); + return img_format("SH %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -13925,17 +13478,16 @@ std::string NMD::SH_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHE(uint64 instruction) +static char *SHE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHE %s, %s(%s)", rt, s, rs); + return img_format("SHE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -13949,15 +13501,14 @@ std::string NMD::SHE(uint64 instruction) * shift ------ * ac -- */ -std::string NMD::SHILO(uint64 instruction) +static char *SHILO(uint64 instruction, Dis_info *info) { int64 shift_value = extract_shift__se5_21_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string shift = IMMEDIATE(copy(shift_value)); - std::string ac = AC(copy(ac_value)); + const char *ac = AC(ac_value, info); - return img::format("SHILO %s, %s", ac, shift); + return img_format("SHILO %s, 0x%" PRIx64, ac, shift_value); } @@ -13971,15 +13522,15 @@ std::string NMD::SHILO(uint64 instruction) * rs ----- * ac -- */ -std::string NMD::SHILOV(uint64 instruction) +static char *SHILOV(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ac_value = extract_ac_15_14(instruction); - std::string rs = GPR(copy(rs_value)); - std::string ac = AC(copy(ac_value)); + const char *rs = GPR(rs_value, info); + const char *ac = AC(ac_value, info); - return img::format("SHILOV %s, %s", ac, rs); + return img_format("SHILOV %s, %s", ac, rs); } @@ -13993,17 +13544,16 @@ std::string NMD::SHILOV(uint64 instruction) * rs ----- * sa ---- */ -std::string NMD::SHLL_PH(uint64 instruction) +static char *SHLL_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLL.PH %s, %s, %s", rt, rs, sa); + return img_format("SHLL.PH %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14017,17 +13567,16 @@ std::string NMD::SHLL_PH(uint64 instruction) * rs ----- * sa --- */ -std::string NMD::SHLL_QB(uint64 instruction) +static char *SHLL_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLL.QB %s, %s, %s", rt, rs, sa); + return img_format("SHLL.QB %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14042,17 +13591,16 @@ std::string NMD::SHLL_QB(uint64 instruction) * rs ----- * sa ---- */ -std::string NMD::SHLL_S_PH(uint64 instruction) +static char *SHLL_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLL_S.PH %s, %s, %s", rt, rs, sa); + return img_format("SHLL_S.PH %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14066,17 +13614,16 @@ std::string NMD::SHLL_S_PH(uint64 instruction) * rs ----- * sa ----- */ -std::string NMD::SHLL_S_W(uint64 instruction) +static char *SHLL_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLL_S.W %s, %s, %s", rt, rs, sa); + return img_format("SHLL_S.W %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14091,17 +13638,17 @@ std::string NMD::SHLL_S_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHLLV_PH(uint64 instruction) +static char *SHLLV_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLLV.PH %s, %s, %s", rd, rt, rs); + return img_format("SHLLV.PH %s, %s, %s", rd, rt, rs); } @@ -14115,17 +13662,17 @@ std::string NMD::SHLLV_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHLLV_QB(uint64 instruction) +static char *SHLLV_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLLV.QB %s, %s, %s", rd, rt, rs); + return img_format("SHLLV.QB %s, %s, %s", rd, rt, rs); } @@ -14140,17 +13687,17 @@ std::string NMD::SHLLV_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHLLV_S_PH(uint64 instruction) +static char *SHLLV_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLLV_S.PH %s, %s, %s", rd, rt, rs); + return img_format("SHLLV_S.PH %s, %s, %s", rd, rt, rs); } @@ -14164,17 +13711,17 @@ std::string NMD::SHLLV_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHLLV_S_W(uint64 instruction) +static char *SHLLV_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHLLV_S.W %s, %s, %s", rd, rt, rs); + return img_format("SHLLV_S.W %s, %s, %s", rd, rt, rs); } @@ -14188,17 +13735,16 @@ std::string NMD::SHLLV_S_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRA_PH(uint64 instruction) +static char *SHRA_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRA.PH %s, %s, %s", rt, rs, sa); + return img_format("SHRA.PH %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14212,17 +13758,16 @@ std::string NMD::SHRA_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRA_QB(uint64 instruction) +static char *SHRA_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRA.QB %s, %s, %s", rt, rs, sa); + return img_format("SHRA.QB %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14236,17 +13781,16 @@ std::string NMD::SHRA_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRA_R_PH(uint64 instruction) +static char *SHRA_R_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRA_R.PH %s, %s, %s", rt, rs, sa); + return img_format("SHRA_R.PH %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14260,17 +13804,16 @@ std::string NMD::SHRA_R_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRA_R_QB(uint64 instruction) +static char *SHRA_R_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRA_R.QB %s, %s, %s", rt, rs, sa); + return img_format("SHRA_R.QB %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14284,17 +13827,16 @@ std::string NMD::SHRA_R_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRA_R_W(uint64 instruction) +static char *SHRA_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12_11(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRA_R.W %s, %s, %s", rt, rs, sa); + return img_format("SHRA_R.W %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14308,17 +13850,17 @@ std::string NMD::SHRA_R_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRAV_PH(uint64 instruction) +static char *SHRAV_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRAV.PH %s, %s, %s", rd, rt, rs); + return img_format("SHRAV.PH %s, %s, %s", rd, rt, rs); } @@ -14332,17 +13874,17 @@ std::string NMD::SHRAV_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRAV_QB(uint64 instruction) +static char *SHRAV_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRAV.QB %s, %s, %s", rd, rt, rs); + return img_format("SHRAV.QB %s, %s, %s", rd, rt, rs); } @@ -14356,17 +13898,17 @@ std::string NMD::SHRAV_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRAV_R_PH(uint64 instruction) +static char *SHRAV_R_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRAV_R.PH %s, %s, %s", rd, rt, rs); + return img_format("SHRAV_R.PH %s, %s, %s", rd, rt, rs); } @@ -14380,17 +13922,17 @@ std::string NMD::SHRAV_R_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRAV_R_QB(uint64 instruction) +static char *SHRAV_R_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRAV_R.QB %s, %s, %s", rd, rt, rs); + return img_format("SHRAV_R.QB %s, %s, %s", rd, rt, rs); } @@ -14404,17 +13946,17 @@ std::string NMD::SHRAV_R_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRAV_R_W(uint64 instruction) +static char *SHRAV_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRAV_R.W %s, %s, %s", rd, rt, rs); + return img_format("SHRAV_R.W %s, %s, %s", rd, rt, rs); } @@ -14428,17 +13970,16 @@ std::string NMD::SHRAV_R_W(uint64 instruction) * rs ----- * sa ---- */ -std::string NMD::SHRL_PH(uint64 instruction) +static char *SHRL_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRL.PH %s, %s, %s", rt, rs, sa); + return img_format("SHRL.PH %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14452,17 +13993,16 @@ std::string NMD::SHRL_PH(uint64 instruction) * rs ----- * sa --- */ -std::string NMD::SHRL_QB(uint64 instruction) +static char *SHRL_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 sa_value = extract_sa_15_14_13(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string sa = IMMEDIATE(copy(sa_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRL.QB %s, %s, %s", rt, rs, sa); + return img_format("SHRL.QB %s, %s, 0x%" PRIx64, rt, rs, sa_value); } @@ -14477,17 +14017,17 @@ std::string NMD::SHRL_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRLV_PH(uint64 instruction) +static char *SHRLV_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRLV.PH %s, %s, %s", rd, rt, rs); + return img_format("SHRLV.PH %s, %s, %s", rd, rt, rs); } @@ -14501,17 +14041,17 @@ std::string NMD::SHRLV_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHRLV_QB(uint64 instruction) +static char *SHRLV_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rd = GPR(rd_value, info); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SHRLV.QB %s, %s, %s", rd, rt, rs); + return img_format("SHRLV.QB %s, %s, %s", rd, rt, rs); } @@ -14525,17 +14065,17 @@ std::string NMD::SHRLV_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHX(uint64 instruction) +static char *SHX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SHX %s, %s(%s)", rd, rs, rt); + return img_format("SHX %s, %s(%s)", rd, rs, rt); } @@ -14549,17 +14089,17 @@ std::string NMD::SHX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SHXS(uint64 instruction) +static char *SHXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SHXS %s, %s(%s)", rd, rs, rt); + return img_format("SHXS %s, %s(%s)", rd, rs, rt); } @@ -14573,13 +14113,12 @@ std::string NMD::SHXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SIGRIE(uint64 instruction) +static char *SIGRIE(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_18_to_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("SIGRIE %s", code); + return img_format("SIGRIE 0x%" PRIx64, code_value); } @@ -14593,17 +14132,17 @@ std::string NMD::SIGRIE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLL_16_(uint64 instruction) +static char *SLL_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 shift3_value = extract_shift3_2_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string shift3 = IMMEDIATE(encode_shift3_from_shift(shift3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + uint64 shift3 = encode_shift3_from_shift(shift3_value); - return img::format("SLL %s, %s, %s", rt3, rs3, shift3); + return img_format("SLL %s, %s, 0x%" PRIx64, rt3, rs3, shift3); } @@ -14617,17 +14156,16 @@ std::string NMD::SLL_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLL_32_(uint64 instruction) +static char *SLL_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SLL %s, %s, %s", rt, rs, shift); + return img_format("SLL %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -14641,17 +14179,17 @@ std::string NMD::SLL_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLLV(uint64 instruction) +static char *SLLV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SLLV %s, %s, %s", rd, rs, rt); + return img_format("SLLV %s, %s, %s", rd, rs, rt); } @@ -14665,17 +14203,17 @@ std::string NMD::SLLV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLT(uint64 instruction) +static char *SLT(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SLT %s, %s, %s", rd, rs, rt); + return img_format("SLT %s, %s, %s", rd, rs, rt); } @@ -14689,17 +14227,16 @@ std::string NMD::SLT(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLTI(uint64 instruction) +static char *SLTI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SLTI %s, %s, %s", rt, rs, u); + return img_format("SLTI %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -14713,17 +14250,16 @@ std::string NMD::SLTI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLTIU(uint64 instruction) +static char *SLTIU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SLTIU %s, %s, %s", rt, rs, u); + return img_format("SLTIU %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -14737,17 +14273,17 @@ std::string NMD::SLTIU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SLTU(uint64 instruction) +static char *SLTU(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SLTU %s, %s, %s", rd, rs, rt); + return img_format("SLTU %s, %s, %s", rd, rs, rt); } @@ -14761,17 +14297,17 @@ std::string NMD::SLTU(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SOV(uint64 instruction) +static char *SOV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SOV %s, %s, %s", rd, rs, rt); + return img_format("SOV %s, %s, %s", rd, rs, rt); } @@ -14785,13 +14321,12 @@ std::string NMD::SOV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SPECIAL2(uint64 instruction) +static char *SPECIAL2(uint64 instruction, Dis_info *info) { uint64 op_value = extract_op_25_to_3(instruction); - std::string op = IMMEDIATE(copy(op_value)); - return img::format("SPECIAL2 %s", op); + return img_format("SPECIAL2 0x%" PRIx64, op_value); } @@ -14805,15 +14340,15 @@ std::string NMD::SPECIAL2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SQRT_D(uint64 instruction) +static char *SQRT_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("SQRT.D %s, %s", ft, fs); + return img_format("SQRT.D %s, %s", ft, fs); } @@ -14827,15 +14362,15 @@ std::string NMD::SQRT_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SQRT_S(uint64 instruction) +static char *SQRT_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("SQRT.S %s, %s", ft, fs); + return img_format("SQRT.S %s, %s", ft, fs); } @@ -14849,17 +14384,16 @@ std::string NMD::SQRT_S(uint64 instruction) * rd ----- * sa ----- */ -std::string NMD::SRA(uint64 instruction) +static char *SRA(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SRA %s, %s, %s", rt, rs, shift); + return img_format("SRA %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -14873,17 +14407,17 @@ std::string NMD::SRA(uint64 instruction) * rt ----- * rd ----- */ -std::string NMD::SRAV(uint64 instruction) +static char *SRAV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SRAV %s, %s, %s", rd, rs, rt); + return img_format("SRAV %s, %s, %s", rd, rs, rt); } @@ -14897,17 +14431,17 @@ std::string NMD::SRAV(uint64 instruction) * rt ----- * rd ----- */ -std::string NMD::SRL_16_(uint64 instruction) +static char *SRL_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 shift3_value = extract_shift3_2_1_0(instruction); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string shift3 = IMMEDIATE(encode_shift3_from_shift(shift3_value)); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + uint64 shift3 = encode_shift3_from_shift(shift3_value); - return img::format("SRL %s, %s, %s", rt3, rs3, shift3); + return img_format("SRL %s, %s, 0x%" PRIx64, rt3, rs3, shift3); } @@ -14921,17 +14455,16 @@ std::string NMD::SRL_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SRL_32_(uint64 instruction) +static char *SRL_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 shift_value = extract_shift_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string shift = IMMEDIATE(copy(shift_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SRL %s, %s, %s", rt, rs, shift); + return img_format("SRL %s, %s, 0x%" PRIx64, rt, rs, shift_value); } @@ -14945,17 +14478,17 @@ std::string NMD::SRL_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SRLV(uint64 instruction) +static char *SRLV(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SRLV %s, %s, %s", rd, rs, rt); + return img_format("SRLV %s, %s, %s", rd, rs, rt); } @@ -14969,17 +14502,17 @@ std::string NMD::SRLV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUB(uint64 instruction) +static char *SUB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUB %s, %s, %s", rd, rs, rt); + return img_format("SUB %s, %s, %s", rd, rs, rt); } @@ -14993,17 +14526,17 @@ std::string NMD::SUB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUB_D(uint64 instruction) +static char *SUB_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SUB.D %s, %s, %s", fd, fs, ft); + return img_format("SUB.D %s, %s, %s", fd, fs, ft); } @@ -15017,17 +14550,17 @@ std::string NMD::SUB_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUB_S(uint64 instruction) +static char *SUB_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); uint64 fd_value = extract_fd_15_14_13_12_11(instruction); - std::string fd = FPR(copy(fd_value)); - std::string fs = FPR(copy(fs_value)); - std::string ft = FPR(copy(ft_value)); + const char *fd = FPR(fd_value, info); + const char *fs = FPR(fs_value, info); + const char *ft = FPR(ft_value, info); - return img::format("SUB.S %s, %s, %s", fd, fs, ft); + return img_format("SUB.S %s, %s, %s", fd, fs, ft); } @@ -15041,17 +14574,17 @@ std::string NMD::SUB_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQ_PH(uint64 instruction) +static char *SUBQ_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQ.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBQ.PH %s, %s, %s", rd, rs, rt); } @@ -15066,17 +14599,17 @@ std::string NMD::SUBQ_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQ_S_PH(uint64 instruction) +static char *SUBQ_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQ_S.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBQ_S.PH %s, %s, %s", rd, rs, rt); } @@ -15091,17 +14624,17 @@ std::string NMD::SUBQ_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQ_S_W(uint64 instruction) +static char *SUBQ_S_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQ_S.W %s, %s, %s", rd, rs, rt); + return img_format("SUBQ_S.W %s, %s, %s", rd, rs, rt); } @@ -15116,17 +14649,17 @@ std::string NMD::SUBQ_S_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQH_PH(uint64 instruction) +static char *SUBQH_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQH.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBQH.PH %s, %s, %s", rd, rs, rt); } @@ -15141,17 +14674,17 @@ std::string NMD::SUBQH_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQH_R_PH(uint64 instruction) +static char *SUBQH_R_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQH_R.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBQH_R.PH %s, %s, %s", rd, rs, rt); } @@ -15166,17 +14699,17 @@ std::string NMD::SUBQH_R_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQH_R_W(uint64 instruction) +static char *SUBQH_R_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQH_R.W %s, %s, %s", rd, rs, rt); + return img_format("SUBQH_R.W %s, %s, %s", rd, rs, rt); } @@ -15191,17 +14724,17 @@ std::string NMD::SUBQH_R_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBQH_W(uint64 instruction) +static char *SUBQH_W(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBQH.W %s, %s, %s", rd, rs, rt); + return img_format("SUBQH.W %s, %s, %s", rd, rs, rt); } @@ -15215,17 +14748,17 @@ std::string NMD::SUBQH_W(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_16_(uint64 instruction) +static char *SUBU_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 rd3_value = extract_rd3_3_2_1(instruction); - std::string rd3 = GPR(decode_gpr_gpr3(rd3_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); + const char *rd3 = GPR(decode_gpr_gpr3(rd3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); - return img::format("SUBU %s, %s, %s", rd3, rs3, rt3); + return img_format("SUBU %s, %s, %s", rd3, rs3, rt3); } @@ -15239,17 +14772,17 @@ std::string NMD::SUBU_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_32_(uint64 instruction) +static char *SUBU_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBU %s, %s, %s", rd, rs, rt); + return img_format("SUBU %s, %s, %s", rd, rs, rt); } @@ -15263,17 +14796,17 @@ std::string NMD::SUBU_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_PH(uint64 instruction) +static char *SUBU_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBU.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBU.PH %s, %s, %s", rd, rs, rt); } @@ -15287,17 +14820,17 @@ std::string NMD::SUBU_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_QB(uint64 instruction) +static char *SUBU_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBU.QB %s, %s, %s", rd, rs, rt); + return img_format("SUBU.QB %s, %s, %s", rd, rs, rt); } @@ -15312,17 +14845,17 @@ std::string NMD::SUBU_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_S_PH(uint64 instruction) +static char *SUBU_S_PH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBU_S.PH %s, %s, %s", rd, rs, rt); + return img_format("SUBU_S.PH %s, %s, %s", rd, rs, rt); } @@ -15337,17 +14870,17 @@ std::string NMD::SUBU_S_PH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBU_S_QB(uint64 instruction) +static char *SUBU_S_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBU_S.QB %s, %s, %s", rd, rs, rt); + return img_format("SUBU_S.QB %s, %s, %s", rd, rs, rt); } @@ -15362,17 +14895,17 @@ std::string NMD::SUBU_S_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBUH_QB(uint64 instruction) +static char *SUBUH_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBUH.QB %s, %s, %s", rd, rs, rt); + return img_format("SUBUH.QB %s, %s, %s", rd, rs, rt); } @@ -15387,17 +14920,17 @@ std::string NMD::SUBUH_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SUBUH_R_QB(uint64 instruction) +static char *SUBUH_R_QB(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SUBUH_R.QB %s, %s, %s", rd, rs, rt); + return img_format("SUBUH_R.QB %s, %s, %s", rd, rs, rt); } @@ -15411,17 +14944,16 @@ std::string NMD::SUBUH_R_QB(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_16_(uint64 instruction) +static char *SW_16_(uint64 instruction, Dis_info *info) { uint64 rtz3_value = extract_rtz3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); uint64 u_value = extract_u_3_2_1_0__s2(instruction); - std::string rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); + const char *rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value, info), info); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); - return img::format("SW %s, %s(%s)", rtz3, u, rs3); + return img_format("SW %s, 0x%" PRIx64 "(%s)", rtz3, u_value, rs3); } @@ -15435,17 +14967,16 @@ std::string NMD::SW_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_4X4_(uint64 instruction) +static char *SW_4X4_(uint64 instruction, Dis_info *info) { uint64 rtz4_value = extract_rtz4_9_7_6_5(instruction); uint64 rs4_value = extract_rs4_4_2_1_0(instruction); uint64 u_value = extract_u_3_8__s2(instruction); - std::string rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs4 = GPR(decode_gpr_gpr4(rs4_value)); + const char *rtz4 = GPR(decode_gpr_gpr4_zero(rtz4_value, info), info); + const char *rs4 = GPR(decode_gpr_gpr4(rs4_value, info), info); - return img::format("SW %s, %s(%s)", rtz4, u, rs4); + return img_format("SW %s, 0x%" PRIx64 "(%s)", rtz4, u_value, rs4); } @@ -15459,15 +14990,14 @@ std::string NMD::SW_4X4_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_GP16_(uint64 instruction) +static char *SW_GP16_(uint64 instruction, Dis_info *info) { uint64 u_value = extract_u_6_5_4_3_2_1_0__s2(instruction); uint64 rtz3_value = extract_rtz3_9_8_7(instruction); - std::string rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rtz3 = GPR(decode_gpr_gpr3_src_store(rtz3_value, info), info); - return img::format("SW %s, %s($%d)", rtz3, u, 28); + return img_format("SW %s, 0x%" PRIx64 "($%d)", rtz3, u_value, 28); } @@ -15481,15 +15011,14 @@ std::string NMD::SW_GP16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_GP_(uint64 instruction) +static char *SW_GP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 u_value = extract_u_20_to_2__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("SW %s, %s($%d)", rt, u, 28); + return img_format("SW %s, 0x%" PRIx64 "($%d)", rt, u_value, 28); } @@ -15503,17 +15032,16 @@ std::string NMD::SW_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_S9_(uint64 instruction) +static char *SW_S9_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SW %s, %s(%s)", rt, s, rs); + return img_format("SW %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -15527,15 +15055,14 @@ std::string NMD::SW_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_SP_(uint64 instruction) +static char *SW_SP_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_9_8_7_6_5(instruction); uint64 u_value = extract_u_4_3_2_1_0__s2(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); - return img::format("SW %s, %s($%d)", rt, u, 29); + return img_format("SW %s, 0x%" PRIx64 "($%d)", rt, u_value, 29); } @@ -15549,17 +15076,16 @@ std::string NMD::SW_SP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SW_U12_(uint64 instruction) +static char *SW_U12_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SW %s, %s(%s)", rt, u, rs); + return img_format("SW %s, 0x%" PRIx64 "(%s)", rt, u_value, rs); } @@ -15573,15 +15099,14 @@ std::string NMD::SW_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC1_GP_(uint64 instruction) +static char *SWC1_GP_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 u_value = extract_u_17_to_2__s2(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *ft = FPR(ft_value, info); - return img::format("SWC1 %s, %s($%d)", ft, u, 28); + return img_format("SWC1 %s, 0x%" PRIx64 "($%d)", ft, u_value, 28); } @@ -15595,17 +15120,16 @@ std::string NMD::SWC1_GP_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC1_S9_(uint64 instruction) +static char *SWC1_S9_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SWC1 %s, %s(%s)", ft, s, rs); + return img_format("SWC1 %s, %" PRId64 "(%s)", ft, s_value, rs); } @@ -15619,17 +15143,16 @@ std::string NMD::SWC1_S9_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC1_U12_(uint64 instruction) +static char *SWC1_U12_(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string ft = FPR(copy(ft_value)); - std::string u = IMMEDIATE(copy(u_value)); - std::string rs = GPR(copy(rs_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SWC1 %s, %s(%s)", ft, u, rs); + return img_format("SWC1 %s, 0x%" PRIx64 "(%s)", ft, u_value, rs); } @@ -15643,17 +15166,17 @@ std::string NMD::SWC1_U12_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC1X(uint64 instruction) +static char *SWC1X(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SWC1X %s, %s(%s)", ft, rs, rt); + return img_format("SWC1X %s, %s(%s)", ft, rs, rt); } @@ -15667,17 +15190,17 @@ std::string NMD::SWC1X(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC1XS(uint64 instruction) +static char *SWC1XS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 ft_value = extract_ft_15_14_13_12_11(instruction); - std::string ft = FPR(copy(ft_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *ft = FPR(ft_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SWC1XS %s, %s(%s)", ft, rs, rt); + return img_format("SWC1XS %s, %s(%s)", ft, rs, rt); } @@ -15691,17 +15214,16 @@ std::string NMD::SWC1XS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWC2(uint64 instruction) +static char *SWC2(uint64 instruction, Dis_info *info) { uint64 cs_value = extract_cs_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string cs = CPR(copy(cs_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("SWC2 %s, %s(%s)", cs, s, rs); + return img_format("SWC2 CP%" PRIu64 ", %" PRId64 "(%s)", + cs_value, s_value, rs); } @@ -15715,17 +15237,16 @@ std::string NMD::SWC2(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWE(uint64 instruction) +static char *SWE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("SWE %s, %s(%s)", rt, s, rs); + return img_format("SWE %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -15739,19 +15260,19 @@ std::string NMD::SWE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWM(uint64 instruction) +static char *SWM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("SWM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("SWM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -15765,15 +15286,15 @@ std::string NMD::SWM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWPC_48_(uint64 instruction) +static char *SWPC_48_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_41_40_39_38_37(instruction); int64 s_value = extract_s__se31_15_to_0_31_to_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = ADDRESS(encode_s_from_address(s_value), 6); + const char *rt = GPR(rt_value, info); + g_autofree char *s = ADDRESS(s_value, 6, info); - return img::format("SWPC %s, %s", rt, s); + return img_format("SWPC %s, %s", rt, s); } @@ -15787,17 +15308,17 @@ std::string NMD::SWPC_48_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWX(uint64 instruction) +static char *SWX(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SWX %s, %s(%s)", rd, rs, rt); + return img_format("SWX %s, %s(%s)", rd, rs, rt); } @@ -15811,17 +15332,17 @@ std::string NMD::SWX(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SWXS(uint64 instruction) +static char *SWXS(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("SWXS %s, %s(%s)", rd, rs, rt); + return img_format("SWXS %s, %s(%s)", rd, rs, rt); } @@ -15835,13 +15356,12 @@ std::string NMD::SWXS(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SYNC(uint64 instruction) +static char *SYNC(uint64 instruction, Dis_info *info) { uint64 stype_value = extract_stype_20_19_18_17_16(instruction); - std::string stype = IMMEDIATE(copy(stype_value)); - return img::format("SYNC %s", stype); + return img_format("SYNC 0x%" PRIx64, stype_value); } @@ -15855,15 +15375,14 @@ std::string NMD::SYNC(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SYNCI(uint64 instruction) +static char *SYNCI(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("SYNCI %s(%s)", s, rs); + return img_format("SYNCI %" PRId64 "(%s)", s_value, rs); } @@ -15877,15 +15396,14 @@ std::string NMD::SYNCI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SYNCIE(uint64 instruction) +static char *SYNCIE(uint64 instruction, Dis_info *info) { uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rs = GPR(rs_value, info); - return img::format("SYNCIE %s(%s)", s, rs); + return img_format("SYNCIE %" PRId64 "(%s)", s_value, rs); } @@ -15899,13 +15417,12 @@ std::string NMD::SYNCIE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::SYSCALL_16_(uint64 instruction) +static char *SYSCALL_16_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_1_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("SYSCALL %s", code); + return img_format("SYSCALL 0x%" PRIx64, code_value); } @@ -15917,13 +15434,12 @@ std::string NMD::SYSCALL_16_(uint64 instruction) * 00000000000010 * code ------------------ */ -std::string NMD::SYSCALL_32_(uint64 instruction) +static char *SYSCALL_32_(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_17_to_0(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("SYSCALL %s", code); + return img_format("SYSCALL 0x%" PRIx64, code_value); } @@ -15937,15 +15453,15 @@ std::string NMD::SYSCALL_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TEQ(uint64 instruction) +static char *TEQ(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("TEQ %s, %s", rs, rt); + return img_format("TEQ %s, %s", rs, rt); } @@ -15959,11 +15475,11 @@ std::string NMD::TEQ(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGINV(uint64 instruction) +static char *TLBGINV(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGINV "; + return g_strdup("TLBGINV "); } @@ -15977,11 +15493,11 @@ std::string NMD::TLBGINV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGINVF(uint64 instruction) +static char *TLBGINVF(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGINVF "; + return g_strdup("TLBGINVF "); } @@ -15995,11 +15511,11 @@ std::string NMD::TLBGINVF(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGP(uint64 instruction) +static char *TLBGP(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGP "; + return g_strdup("TLBGP "); } @@ -16013,11 +15529,11 @@ std::string NMD::TLBGP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGR(uint64 instruction) +static char *TLBGR(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGR "; + return g_strdup("TLBGR "); } @@ -16031,11 +15547,11 @@ std::string NMD::TLBGR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGWI(uint64 instruction) +static char *TLBGWI(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGWI "; + return g_strdup("TLBGWI "); } @@ -16049,11 +15565,11 @@ std::string NMD::TLBGWI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBGWR(uint64 instruction) +static char *TLBGWR(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBGWR "; + return g_strdup("TLBGWR "); } @@ -16067,11 +15583,11 @@ std::string NMD::TLBGWR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBINV(uint64 instruction) +static char *TLBINV(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBINV "; + return g_strdup("TLBINV "); } @@ -16085,11 +15601,11 @@ std::string NMD::TLBINV(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBINVF(uint64 instruction) +static char *TLBINVF(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBINVF "; + return g_strdup("TLBINVF "); } @@ -16103,11 +15619,11 @@ std::string NMD::TLBINVF(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBP(uint64 instruction) +static char *TLBP(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBP "; + return g_strdup("TLBP "); } @@ -16121,11 +15637,11 @@ std::string NMD::TLBP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBR(uint64 instruction) +static char *TLBR(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBR "; + return g_strdup("TLBR "); } @@ -16139,11 +15655,11 @@ std::string NMD::TLBR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBWI(uint64 instruction) +static char *TLBWI(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBWI "; + return g_strdup("TLBWI "); } @@ -16157,11 +15673,11 @@ std::string NMD::TLBWI(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TLBWR(uint64 instruction) +static char *TLBWR(uint64 instruction, Dis_info *info) { (void)instruction; - return "TLBWR "; + return g_strdup("TLBWR "); } @@ -16175,15 +15691,15 @@ std::string NMD::TLBWR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TNE(uint64 instruction) +static char *TNE(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("TNE %s, %s", rs, rt); + return img_format("TNE %s, %s", rs, rt); } @@ -16197,15 +15713,15 @@ std::string NMD::TNE(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TRUNC_L_D(uint64 instruction) +static char *TRUNC_L_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("TRUNC.L.D %s, %s", ft, fs); + return img_format("TRUNC.L.D %s, %s", ft, fs); } @@ -16219,15 +15735,15 @@ std::string NMD::TRUNC_L_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TRUNC_L_S(uint64 instruction) +static char *TRUNC_L_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("TRUNC.L.S %s, %s", ft, fs); + return img_format("TRUNC.L.S %s, %s", ft, fs); } @@ -16241,15 +15757,15 @@ std::string NMD::TRUNC_L_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TRUNC_W_D(uint64 instruction) +static char *TRUNC_W_D(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("TRUNC.W.D %s, %s", ft, fs); + return img_format("TRUNC.W.D %s, %s", ft, fs); } @@ -16263,15 +15779,15 @@ std::string NMD::TRUNC_W_D(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::TRUNC_W_S(uint64 instruction) +static char *TRUNC_W_S(uint64 instruction, Dis_info *info) { uint64 ft_value = extract_ft_25_24_23_22_21(instruction); uint64 fs_value = extract_fs_20_19_18_17_16(instruction); - std::string ft = FPR(copy(ft_value)); - std::string fs = FPR(copy(fs_value)); + const char *ft = FPR(ft_value, info); + const char *fs = FPR(fs_value, info); - return img::format("TRUNC.W.S %s, %s", ft, fs); + return img_format("TRUNC.W.S %s, %s", ft, fs); } @@ -16285,19 +15801,19 @@ std::string NMD::TRUNC_W_S(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UALDM(uint64 instruction) +static char *UALDM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("UALDM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("UALDM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -16311,17 +15827,16 @@ std::string NMD::UALDM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UALH(uint64 instruction) +static char *UALH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("UALH %s, %s(%s)", rt, s, rs); + return img_format("UALH %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -16335,19 +15850,19 @@ std::string NMD::UALH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UALWM(uint64 instruction) +static char *UALWM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("UALWM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("UALWM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -16361,19 +15876,19 @@ std::string NMD::UALWM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UASDM(uint64 instruction) +static char *UASDM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("UASDM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("UASDM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -16387,17 +15902,16 @@ std::string NMD::UASDM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UASH(uint64 instruction) +static char *UASH(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("UASH %s, %s(%s)", rt, s, rs); + return img_format("UASH %s, %" PRId64 "(%s)", rt, s_value, rs); } @@ -16411,19 +15925,19 @@ std::string NMD::UASH(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UASWM(uint64 instruction) +static char *UASWM(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); int64 s_value = extract_s__se8_15_7_6_5_4_3_2_1_0(instruction); uint64 count3_value = extract_count3_14_13_12(instruction); - std::string rt = GPR(copy(rt_value)); - std::string s = IMMEDIATE(copy(s_value)); - std::string rs = GPR(copy(rs_value)); - std::string count3 = IMMEDIATE(encode_count3_from_count(count3_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); + uint64 count3 = encode_count3_from_count(count3_value); - return img::format("UASWM %s, %s(%s), %s", rt, s, rs, count3); + return img_format("UASWM %s, %" PRId64 "(%s), 0x%" PRIx64, + rt, s_value, rs, count3); } @@ -16437,13 +15951,12 @@ std::string NMD::UASWM(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::UDI(uint64 instruction) +static char *UDI(uint64 instruction, Dis_info *info) { uint64 op_value = extract_op_25_to_3(instruction); - std::string op = IMMEDIATE(copy(op_value)); - return img::format("UDI %s", op); + return img_format("UDI 0x%" PRIx64, op_value); } @@ -16455,13 +15968,12 @@ std::string NMD::UDI(uint64 instruction) * 001000 1100001101111111 * code ---------- */ -std::string NMD::WAIT(uint64 instruction) +static char *WAIT(uint64 instruction, Dis_info *info) { uint64 code_value = extract_code_25_24_23_22_21_20_19_18_17_16(instruction); - std::string code = IMMEDIATE(copy(code_value)); - return img::format("WAIT %s", code); + return img_format("WAIT 0x%" PRIx64, code_value); } @@ -16475,15 +15987,14 @@ std::string NMD::WAIT(uint64 instruction) * rt ----- * mask ------- */ -std::string NMD::WRDSP(uint64 instruction) +static char *WRDSP(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 mask_value = extract_mask_20_19_18_17_16_15_14(instruction); - std::string rt = GPR(copy(rt_value)); - std::string mask = IMMEDIATE(copy(mask_value)); + const char *rt = GPR(rt_value, info); - return img::format("WRDSP %s, %s", rt, mask); + return img_format("WRDSP %s, 0x%" PRIx64, rt, mask_value); } @@ -16497,15 +16008,15 @@ std::string NMD::WRDSP(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::WRPGPR(uint64 instruction) +static char *WRPGPR(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("WRPGPR %s, %s", rt, rs); + return img_format("WRPGPR %s, %s", rt, rs); } @@ -16519,15 +16030,15 @@ std::string NMD::WRPGPR(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::XOR_16_(uint64 instruction) +static char *XOR_16_(uint64 instruction, Dis_info *info) { uint64 rt3_value = extract_rt3_9_8_7(instruction); uint64 rs3_value = extract_rs3_6_5_4(instruction); - std::string rs3 = GPR(decode_gpr_gpr3(rs3_value)); - std::string rt3 = GPR(decode_gpr_gpr3(rt3_value)); + const char *rs3 = GPR(decode_gpr_gpr3(rs3_value, info), info); + const char *rt3 = GPR(decode_gpr_gpr3(rt3_value, info), info); - return img::format("XOR %s, %s", rs3, rt3); + return img_format("XOR %s, %s", rs3, rt3); } @@ -16541,17 +16052,17 @@ std::string NMD::XOR_16_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::XOR_32_(uint64 instruction) +static char *XOR_32_(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 rd_value = extract_rd_15_14_13_12_11(instruction); - std::string rd = GPR(copy(rd_value)); - std::string rs = GPR(copy(rs_value)); - std::string rt = GPR(copy(rt_value)); + const char *rd = GPR(rd_value, info); + const char *rs = GPR(rs_value, info); + const char *rt = GPR(rt_value, info); - return img::format("XOR %s, %s, %s", rd, rs, rt); + return img_format("XOR %s, %s, %s", rd, rs, rt); } @@ -16565,17 +16076,16 @@ std::string NMD::XOR_32_(uint64 instruction) * rs ----- * rd ----- */ -std::string NMD::XORI(uint64 instruction) +static char *XORI(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); uint64 u_value = extract_u_11_10_9_8_7_6_5_4_3_2_1_0(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); - std::string u = IMMEDIATE(copy(u_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("XORI %s, %s, %s", rt, rs, u); + return img_format("XORI %s, %s, 0x%" PRIx64, rt, rs, u_value); } @@ -16588,15 +16098,15 @@ std::string NMD::XORI(uint64 instruction) * rt ----- * rs ----- */ -std::string NMD::YIELD(uint64 instruction) +static char *YIELD(uint64 instruction, Dis_info *info) { uint64 rt_value = extract_rt_25_24_23_22_21(instruction); uint64 rs_value = extract_rs_20_19_18_17_16(instruction); - std::string rt = GPR(copy(rt_value)); - std::string rs = GPR(copy(rs_value)); + const char *rt = GPR(rt_value, info); + const char *rs = GPR(rs_value, info); - return img::format("YIELD %s, %s", rt, rs); + return img_format("YIELD %s, %s", rt, rs); } @@ -16703,83 +16213,83 @@ std::string NMD::YIELD(uint64 instruction) * */ -NMD::Pool NMD::P_SYSCALL[2] = { +static const Pool P_SYSCALL[2] = { { instruction , 0 , 0 , 32, - 0xfffc0000, 0x00080000, &NMD::SYSCALL_32_ , 0, + 0xfffc0000, 0x00080000, &SYSCALL_32_ , 0, 0x0 }, /* SYSCALL[32] */ { instruction , 0 , 0 , 32, - 0xfffc0000, 0x000c0000, &NMD::HYPCALL , 0, + 0xfffc0000, 0x000c0000, &HYPCALL , 0, CP0_ | VZ_ }, /* HYPCALL */ }; -NMD::Pool NMD::P_RI[4] = { +static const Pool P_RI[4] = { { instruction , 0 , 0 , 32, - 0xfff80000, 0x00000000, &NMD::SIGRIE , 0, + 0xfff80000, 0x00000000, &SIGRIE , 0, 0x0 }, /* SIGRIE */ { pool , P_SYSCALL , 2 , 32, 0xfff80000, 0x00080000, 0 , 0, 0x0 }, /* P.SYSCALL */ { instruction , 0 , 0 , 32, - 0xfff80000, 0x00100000, &NMD::BREAK_32_ , 0, + 0xfff80000, 0x00100000, &BREAK_32_ , 0, 0x0 }, /* BREAK[32] */ { instruction , 0 , 0 , 32, - 0xfff80000, 0x00180000, &NMD::SDBBP_32_ , 0, + 0xfff80000, 0x00180000, &SDBBP_32_ , 0, EJTAG_ }, /* SDBBP[32] */ }; -NMD::Pool NMD::P_ADDIU[2] = { +static const Pool P_ADDIU[2] = { { pool , P_RI , 4 , 32, 0xffe00000, 0x00000000, 0 , 0, 0x0 }, /* P.RI */ { instruction , 0 , 0 , 32, - 0xfc000000, 0x00000000, &NMD::ADDIU_32_ , &NMD::ADDIU_32__cond , + 0xfc000000, 0x00000000, &ADDIU_32_ , &ADDIU_32__cond , 0x0 }, /* ADDIU[32] */ }; -NMD::Pool NMD::P_TRAP[2] = { +static const Pool P_TRAP[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000000, &NMD::TEQ , 0, + 0xfc0007ff, 0x20000000, &TEQ , 0, XMMS_ }, /* TEQ */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000400, &NMD::TNE , 0, + 0xfc0007ff, 0x20000400, &TNE , 0, XMMS_ }, /* TNE */ }; -NMD::Pool NMD::P_CMOVE[2] = { +static const Pool P_CMOVE[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000210, &NMD::MOVZ , 0, + 0xfc0007ff, 0x20000210, &MOVZ , 0, 0x0 }, /* MOVZ */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000610, &NMD::MOVN , 0, + 0xfc0007ff, 0x20000610, &MOVN , 0, 0x0 }, /* MOVN */ }; -NMD::Pool NMD::P_D_MT_VPE[2] = { +static const Pool P_D_MT_VPE[2] = { { instruction , 0 , 0 , 32, - 0xfc1f3fff, 0x20010ab0, &NMD::DMT , 0, + 0xfc1f3fff, 0x20010ab0, &DMT , 0, MT_ }, /* DMT */ { instruction , 0 , 0 , 32, - 0xfc1f3fff, 0x20000ab0, &NMD::DVPE , 0, + 0xfc1f3fff, 0x20000ab0, &DVPE , 0, MT_ }, /* DVPE */ }; -NMD::Pool NMD::P_E_MT_VPE[2] = { +static const Pool P_E_MT_VPE[2] = { { instruction , 0 , 0 , 32, - 0xfc1f3fff, 0x20010eb0, &NMD::EMT , 0, + 0xfc1f3fff, 0x20010eb0, &EMT , 0, MT_ }, /* EMT */ { instruction , 0 , 0 , 32, - 0xfc1f3fff, 0x20000eb0, &NMD::EVPE , 0, + 0xfc1f3fff, 0x20000eb0, &EVPE , 0, MT_ }, /* EVPE */ }; -NMD::Pool NMD::_P_MT_VPE[2] = { +static const Pool _P_MT_VPE[2] = { { pool , P_D_MT_VPE , 2 , 32, 0xfc003fff, 0x20000ab0, 0 , 0, 0x0 }, /* P.D_MT_VPE */ @@ -16789,7 +16299,7 @@ NMD::Pool NMD::_P_MT_VPE[2] = { }; -NMD::Pool NMD::P_MT_VPE[8] = { +static const Pool P_MT_VPE[8] = { { reserved_block , 0 , 0 , 32, 0xfc003bff, 0x200002b0, 0 , 0, 0x0 }, /* P.MT_VPE~*(0) */ @@ -16817,38 +16327,38 @@ NMD::Pool NMD::P_MT_VPE[8] = { }; -NMD::Pool NMD::P_DVP[2] = { +static const Pool P_DVP[2] = { { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20000390, &NMD::DVP , 0, + 0xfc00ffff, 0x20000390, &DVP , 0, 0x0 }, /* DVP */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20000790, &NMD::EVP , 0, + 0xfc00ffff, 0x20000790, &EVP , 0, 0x0 }, /* EVP */ }; -NMD::Pool NMD::P_SLTU[2] = { +static const Pool P_SLTU[2] = { { pool , P_DVP , 2 , 32, 0xfc00fbff, 0x20000390, 0 , 0, 0x0 }, /* P.DVP */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000390, &NMD::SLTU , &NMD::SLTU_cond , + 0xfc0003ff, 0x20000390, &SLTU , &SLTU_cond , 0x0 }, /* SLTU */ }; -NMD::Pool NMD::_POOL32A0[128] = { +static const Pool _POOL32A0[128] = { { pool , P_TRAP , 2 , 32, 0xfc0003ff, 0x20000000, 0 , 0, 0x0 }, /* P.TRAP */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000008, &NMD::SEB , 0, + 0xfc0003ff, 0x20000008, &SEB , 0, XMMS_ }, /* SEB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000010, &NMD::SLLV , 0, + 0xfc0003ff, 0x20000010, &SLLV , 0, 0x0 }, /* SLLV */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000018, &NMD::MUL_32_ , 0, + 0xfc0003ff, 0x20000018, &MUL_32_ , 0, 0x0 }, /* MUL[32] */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000020, 0 , 0, @@ -16857,22 +16367,22 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000028, 0 , 0, 0x0 }, /* _POOL32A0~*(5) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000030, &NMD::MFC0 , 0, + 0xfc0003ff, 0x20000030, &MFC0 , 0, 0x0 }, /* MFC0 */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000038, &NMD::MFHC0 , 0, + 0xfc0003ff, 0x20000038, &MFHC0 , 0, CP0_ | MVH_ }, /* MFHC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000040, 0 , 0, 0x0 }, /* _POOL32A0~*(8) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000048, &NMD::SEH , 0, + 0xfc0003ff, 0x20000048, &SEH , 0, 0x0 }, /* SEH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000050, &NMD::SRLV , 0, + 0xfc0003ff, 0x20000050, &SRLV , 0, 0x0 }, /* SRLV */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000058, &NMD::MUH , 0, + 0xfc0003ff, 0x20000058, &MUH , 0, 0x0 }, /* MUH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000060, 0 , 0, @@ -16881,10 +16391,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000068, 0 , 0, 0x0 }, /* _POOL32A0~*(13) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000070, &NMD::MTC0 , 0, + 0xfc0003ff, 0x20000070, &MTC0 , 0, CP0_ }, /* MTC0 */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000078, &NMD::MTHC0 , 0, + 0xfc0003ff, 0x20000078, &MTHC0 , 0, CP0_ | MVH_ }, /* MTHC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000080, 0 , 0, @@ -16893,10 +16403,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000088, 0 , 0, 0x0 }, /* _POOL32A0~*(17) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000090, &NMD::SRAV , 0, + 0xfc0003ff, 0x20000090, &SRAV , 0, 0x0 }, /* SRAV */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000098, &NMD::MULU , 0, + 0xfc0003ff, 0x20000098, &MULU , 0, 0x0 }, /* MULU */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000a0, 0 , 0, @@ -16905,10 +16415,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200000a8, 0 , 0, 0x0 }, /* _POOL32A0~*(21) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000b0, &NMD::MFGC0 , 0, + 0xfc0003ff, 0x200000b0, &MFGC0 , 0, CP0_ | VZ_ }, /* MFGC0 */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000b8, &NMD::MFHGC0 , 0, + 0xfc0003ff, 0x200000b8, &MFHGC0 , 0, CP0_ | VZ_ | MVH_ }, /* MFHGC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000c0, 0 , 0, @@ -16917,10 +16427,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200000c8, 0 , 0, 0x0 }, /* _POOL32A0~*(25) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000d0, &NMD::ROTRV , 0, + 0xfc0003ff, 0x200000d0, &ROTRV , 0, 0x0 }, /* ROTRV */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000d8, &NMD::MUHU , 0, + 0xfc0003ff, 0x200000d8, &MUHU , 0, 0x0 }, /* MUHU */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000e0, 0 , 0, @@ -16929,10 +16439,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200000e8, 0 , 0, 0x0 }, /* _POOL32A0~*(29) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000f0, &NMD::MTGC0 , 0, + 0xfc0003ff, 0x200000f0, &MTGC0 , 0, CP0_ | VZ_ }, /* MTGC0 */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000f8, &NMD::MTHGC0 , 0, + 0xfc0003ff, 0x200000f8, &MTHGC0 , 0, CP0_ | VZ_ | MVH_ }, /* MTHGC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000100, 0 , 0, @@ -16941,10 +16451,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000108, 0 , 0, 0x0 }, /* _POOL32A0~*(33) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000110, &NMD::ADD , 0, + 0xfc0003ff, 0x20000110, &ADD , 0, XMMS_ }, /* ADD */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000118, &NMD::DIV , 0, + 0xfc0003ff, 0x20000118, &DIV , 0, 0x0 }, /* DIV */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000120, 0 , 0, @@ -16953,7 +16463,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000128, 0 , 0, 0x0 }, /* _POOL32A0~*(37) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000130, &NMD::DMFC0 , 0, + 0xfc0003ff, 0x20000130, &DMFC0 , 0, CP0_ | MIPS64_ }, /* DMFC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000138, 0 , 0, @@ -16965,10 +16475,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000148, 0 , 0, 0x0 }, /* _POOL32A0~*(41) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000150, &NMD::ADDU_32_ , 0, + 0xfc0003ff, 0x20000150, &ADDU_32_ , 0, 0x0 }, /* ADDU[32] */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000158, &NMD::MOD , 0, + 0xfc0003ff, 0x20000158, &MOD , 0, 0x0 }, /* MOD */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000160, 0 , 0, @@ -16977,7 +16487,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000168, 0 , 0, 0x0 }, /* _POOL32A0~*(45) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000170, &NMD::DMTC0 , 0, + 0xfc0003ff, 0x20000170, &DMTC0 , 0, CP0_ | MIPS64_ }, /* DMTC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000178, 0 , 0, @@ -16989,10 +16499,10 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000188, 0 , 0, 0x0 }, /* _POOL32A0~*(49) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000190, &NMD::SUB , 0, + 0xfc0003ff, 0x20000190, &SUB , 0, XMMS_ }, /* SUB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000198, &NMD::DIVU , 0, + 0xfc0003ff, 0x20000198, &DIVU , 0, 0x0 }, /* DIVU */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001a0, 0 , 0, @@ -17001,22 +16511,22 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200001a8, 0 , 0, 0x0 }, /* _POOL32A0~*(53) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001b0, &NMD::DMFGC0 , 0, + 0xfc0003ff, 0x200001b0, &DMFGC0 , 0, CP0_ | MIPS64_ | VZ_}, /* DMFGC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001b8, 0 , 0, 0x0 }, /* _POOL32A0~*(55) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001c0, &NMD::RDHWR , 0, + 0xfc0003ff, 0x200001c0, &RDHWR , 0, XMMS_ }, /* RDHWR */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001c8, 0 , 0, 0x0 }, /* _POOL32A0~*(57) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001d0, &NMD::SUBU_32_ , 0, + 0xfc0003ff, 0x200001d0, &SUBU_32_ , 0, 0x0 }, /* SUBU[32] */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001d8, &NMD::MODU , 0, + 0xfc0003ff, 0x200001d8, &MODU , 0, 0x0 }, /* MODU */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001e0, 0 , 0, @@ -17025,7 +16535,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200001e8, 0 , 0, 0x0 }, /* _POOL32A0~*(61) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001f0, &NMD::DMTGC0 , 0, + 0xfc0003ff, 0x200001f0, &DMTGC0 , 0, CP0_ | MIPS64_ | VZ_}, /* DMTGC0 */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001f8, 0 , 0, @@ -17046,13 +16556,13 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000220, 0 , 0, 0x0 }, /* _POOL32A0~*(68) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000228, &NMD::FORK , 0, + 0xfc0003ff, 0x20000228, &FORK , 0, MT_ }, /* FORK */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000230, &NMD::MFTR , 0, + 0xfc0003ff, 0x20000230, &MFTR , 0, MT_ }, /* MFTR */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000238, &NMD::MFHTR , 0, + 0xfc0003ff, 0x20000238, &MFHTR , 0, MT_ }, /* MFHTR */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000240, 0 , 0, @@ -17061,7 +16571,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000248, 0 , 0, 0x0 }, /* _POOL32A0~*(73) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000250, &NMD::AND_32_ , 0, + 0xfc0003ff, 0x20000250, &AND_32_ , 0, 0x0 }, /* AND[32] */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000258, 0 , 0, @@ -17070,13 +16580,13 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000260, 0 , 0, 0x0 }, /* _POOL32A0~*(76) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000268, &NMD::YIELD , 0, + 0xfc0003ff, 0x20000268, &YIELD , 0, MT_ }, /* YIELD */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000270, &NMD::MTTR , 0, + 0xfc0003ff, 0x20000270, &MTTR , 0, MT_ }, /* MTTR */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000278, &NMD::MTHTR , 0, + 0xfc0003ff, 0x20000278, &MTHTR , 0, MT_ }, /* MTHTR */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000280, 0 , 0, @@ -17085,7 +16595,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000288, 0 , 0, 0x0 }, /* _POOL32A0~*(81) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000290, &NMD::OR_32_ , 0, + 0xfc0003ff, 0x20000290, &OR_32_ , 0, 0x0 }, /* OR[32] */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000298, 0 , 0, @@ -17109,7 +16619,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200002c8, 0 , 0, 0x0 }, /* _POOL32A0~*(89) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200002d0, &NMD::NOR , 0, + 0xfc0003ff, 0x200002d0, &NOR , 0, 0x0 }, /* NOR */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200002d8, 0 , 0, @@ -17133,7 +16643,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000308, 0 , 0, 0x0 }, /* _POOL32A0~*(97) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000310, &NMD::XOR_32_ , 0, + 0xfc0003ff, 0x20000310, &XOR_32_ , 0, 0x0 }, /* XOR[32] */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000318, 0 , 0, @@ -17157,7 +16667,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x20000348, 0 , 0, 0x0 }, /* _POOL32A0~*(105) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000350, &NMD::SLT , 0, + 0xfc0003ff, 0x20000350, &SLT , 0, 0x0 }, /* SLT */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000358, 0 , 0, @@ -17205,7 +16715,7 @@ NMD::Pool NMD::_POOL32A0[128] = { 0xfc0003ff, 0x200003c8, 0 , 0, 0x0 }, /* _POOL32A0~*(121) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200003d0, &NMD::SOV , 0, + 0xfc0003ff, 0x200003d0, &SOV , 0, 0x0 }, /* SOV */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200003d8, 0 , 0, @@ -17225,185 +16735,185 @@ NMD::Pool NMD::_POOL32A0[128] = { }; -NMD::Pool NMD::ADDQ__S__PH[2] = { +static const Pool ADDQ__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000000d, &NMD::ADDQ_PH , 0, + 0xfc0007ff, 0x2000000d, &ADDQ_PH , 0, DSP_ }, /* ADDQ.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000040d, &NMD::ADDQ_S_PH , 0, + 0xfc0007ff, 0x2000040d, &ADDQ_S_PH , 0, DSP_ }, /* ADDQ_S.PH */ }; -NMD::Pool NMD::MUL__S__PH[2] = { +static const Pool MUL__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000002d, &NMD::MUL_PH , 0, + 0xfc0007ff, 0x2000002d, &MUL_PH , 0, DSP_ }, /* MUL.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000042d, &NMD::MUL_S_PH , 0, + 0xfc0007ff, 0x2000042d, &MUL_S_PH , 0, DSP_ }, /* MUL_S.PH */ }; -NMD::Pool NMD::ADDQH__R__PH[2] = { +static const Pool ADDQH__R__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000004d, &NMD::ADDQH_PH , 0, + 0xfc0007ff, 0x2000004d, &ADDQH_PH , 0, DSP_ }, /* ADDQH.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000044d, &NMD::ADDQH_R_PH , 0, + 0xfc0007ff, 0x2000044d, &ADDQH_R_PH , 0, DSP_ }, /* ADDQH_R.PH */ }; -NMD::Pool NMD::ADDQH__R__W[2] = { +static const Pool ADDQH__R__W[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000008d, &NMD::ADDQH_W , 0, + 0xfc0007ff, 0x2000008d, &ADDQH_W , 0, DSP_ }, /* ADDQH.W */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000048d, &NMD::ADDQH_R_W , 0, + 0xfc0007ff, 0x2000048d, &ADDQH_R_W , 0, DSP_ }, /* ADDQH_R.W */ }; -NMD::Pool NMD::ADDU__S__QB[2] = { +static const Pool ADDU__S__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200000cd, &NMD::ADDU_QB , 0, + 0xfc0007ff, 0x200000cd, &ADDU_QB , 0, DSP_ }, /* ADDU.QB */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200004cd, &NMD::ADDU_S_QB , 0, + 0xfc0007ff, 0x200004cd, &ADDU_S_QB , 0, DSP_ }, /* ADDU_S.QB */ }; -NMD::Pool NMD::ADDU__S__PH[2] = { +static const Pool ADDU__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000010d, &NMD::ADDU_PH , 0, + 0xfc0007ff, 0x2000010d, &ADDU_PH , 0, DSP_ }, /* ADDU.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000050d, &NMD::ADDU_S_PH , 0, + 0xfc0007ff, 0x2000050d, &ADDU_S_PH , 0, DSP_ }, /* ADDU_S.PH */ }; -NMD::Pool NMD::ADDUH__R__QB[2] = { +static const Pool ADDUH__R__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000014d, &NMD::ADDUH_QB , 0, + 0xfc0007ff, 0x2000014d, &ADDUH_QB , 0, DSP_ }, /* ADDUH.QB */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000054d, &NMD::ADDUH_R_QB , 0, + 0xfc0007ff, 0x2000054d, &ADDUH_R_QB , 0, DSP_ }, /* ADDUH_R.QB */ }; -NMD::Pool NMD::SHRAV__R__PH[2] = { +static const Pool SHRAV__R__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000018d, &NMD::SHRAV_PH , 0, + 0xfc0007ff, 0x2000018d, &SHRAV_PH , 0, DSP_ }, /* SHRAV.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000058d, &NMD::SHRAV_R_PH , 0, + 0xfc0007ff, 0x2000058d, &SHRAV_R_PH , 0, DSP_ }, /* SHRAV_R.PH */ }; -NMD::Pool NMD::SHRAV__R__QB[2] = { +static const Pool SHRAV__R__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200001cd, &NMD::SHRAV_QB , 0, + 0xfc0007ff, 0x200001cd, &SHRAV_QB , 0, DSP_ }, /* SHRAV.QB */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200005cd, &NMD::SHRAV_R_QB , 0, + 0xfc0007ff, 0x200005cd, &SHRAV_R_QB , 0, DSP_ }, /* SHRAV_R.QB */ }; -NMD::Pool NMD::SUBQ__S__PH[2] = { +static const Pool SUBQ__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000020d, &NMD::SUBQ_PH , 0, + 0xfc0007ff, 0x2000020d, &SUBQ_PH , 0, DSP_ }, /* SUBQ.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000060d, &NMD::SUBQ_S_PH , 0, + 0xfc0007ff, 0x2000060d, &SUBQ_S_PH , 0, DSP_ }, /* SUBQ_S.PH */ }; -NMD::Pool NMD::SUBQH__R__PH[2] = { +static const Pool SUBQH__R__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000024d, &NMD::SUBQH_PH , 0, + 0xfc0007ff, 0x2000024d, &SUBQH_PH , 0, DSP_ }, /* SUBQH.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000064d, &NMD::SUBQH_R_PH , 0, + 0xfc0007ff, 0x2000064d, &SUBQH_R_PH , 0, DSP_ }, /* SUBQH_R.PH */ }; -NMD::Pool NMD::SUBQH__R__W[2] = { +static const Pool SUBQH__R__W[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000028d, &NMD::SUBQH_W , 0, + 0xfc0007ff, 0x2000028d, &SUBQH_W , 0, DSP_ }, /* SUBQH.W */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000068d, &NMD::SUBQH_R_W , 0, + 0xfc0007ff, 0x2000068d, &SUBQH_R_W , 0, DSP_ }, /* SUBQH_R.W */ }; -NMD::Pool NMD::SUBU__S__QB[2] = { +static const Pool SUBU__S__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200002cd, &NMD::SUBU_QB , 0, + 0xfc0007ff, 0x200002cd, &SUBU_QB , 0, DSP_ }, /* SUBU.QB */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200006cd, &NMD::SUBU_S_QB , 0, + 0xfc0007ff, 0x200006cd, &SUBU_S_QB , 0, DSP_ }, /* SUBU_S.QB */ }; -NMD::Pool NMD::SUBU__S__PH[2] = { +static const Pool SUBU__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000030d, &NMD::SUBU_PH , 0, + 0xfc0007ff, 0x2000030d, &SUBU_PH , 0, DSP_ }, /* SUBU.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000070d, &NMD::SUBU_S_PH , 0, + 0xfc0007ff, 0x2000070d, &SUBU_S_PH , 0, DSP_ }, /* SUBU_S.PH */ }; -NMD::Pool NMD::SHRA__R__PH[2] = { +static const Pool SHRA__R__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000335, &NMD::SHRA_PH , 0, + 0xfc0007ff, 0x20000335, &SHRA_PH , 0, DSP_ }, /* SHRA.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000735, &NMD::SHRA_R_PH , 0, + 0xfc0007ff, 0x20000735, &SHRA_R_PH , 0, DSP_ }, /* SHRA_R.PH */ }; -NMD::Pool NMD::SUBUH__R__QB[2] = { +static const Pool SUBUH__R__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000034d, &NMD::SUBUH_QB , 0, + 0xfc0007ff, 0x2000034d, &SUBUH_QB , 0, DSP_ }, /* SUBUH.QB */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000074d, &NMD::SUBUH_R_QB , 0, + 0xfc0007ff, 0x2000074d, &SUBUH_R_QB , 0, DSP_ }, /* SUBUH_R.QB */ }; -NMD::Pool NMD::SHLLV__S__PH[2] = { +static const Pool SHLLV__S__PH[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000038d, &NMD::SHLLV_PH , 0, + 0xfc0007ff, 0x2000038d, &SHLLV_PH , 0, DSP_ }, /* SHLLV.PH */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x2000078d, &NMD::SHLLV_S_PH , 0, + 0xfc0007ff, 0x2000078d, &SHLLV_S_PH , 0, DSP_ }, /* SHLLV_S.PH */ }; -NMD::Pool NMD::SHLL__S__PH[4] = { +static const Pool SHLL__S__PH[4] = { { instruction , 0 , 0 , 32, - 0xfc000fff, 0x200003b5, &NMD::SHLL_PH , 0, + 0xfc000fff, 0x200003b5, &SHLL_PH , 0, DSP_ }, /* SHLL.PH */ { reserved_block , 0 , 0 , 32, 0xfc000fff, 0x200007b5, 0 , 0, 0x0 }, /* SHLL[_S].PH~*(1) */ { instruction , 0 , 0 , 32, - 0xfc000fff, 0x20000bb5, &NMD::SHLL_S_PH , 0, + 0xfc000fff, 0x20000bb5, &SHLL_S_PH , 0, DSP_ }, /* SHLL_S.PH */ { reserved_block , 0 , 0 , 32, 0xfc000fff, 0x20000fb5, 0 , 0, @@ -17411,19 +16921,19 @@ NMD::Pool NMD::SHLL__S__PH[4] = { }; -NMD::Pool NMD::PRECR_SRA__R__PH_W[2] = { +static const Pool PRECR_SRA__R__PH_W[2] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200003cd, &NMD::PRECR_SRA_PH_W , 0, + 0xfc0007ff, 0x200003cd, &PRECR_SRA_PH_W , 0, DSP_ }, /* PRECR_SRA.PH.W */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200007cd, &NMD::PRECR_SRA_R_PH_W , 0, + 0xfc0007ff, 0x200007cd, &PRECR_SRA_R_PH_W , 0, DSP_ }, /* PRECR_SRA_R.PH.W */ }; -NMD::Pool NMD::_POOL32A5[128] = { +static const Pool _POOL32A5[128] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000005, &NMD::CMP_EQ_PH , 0, + 0xfc0003ff, 0x20000005, &CMP_EQ_PH , 0, DSP_ }, /* CMP.EQ.PH */ { pool , ADDQ__S__PH , 2 , 32, 0xfc0003ff, 0x2000000d, 0 , 0, @@ -17432,10 +16942,10 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x20000015, 0 , 0, 0x0 }, /* _POOL32A5~*(2) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000001d, &NMD::SHILO , 0, + 0xfc0003ff, 0x2000001d, &SHILO , 0, DSP_ }, /* SHILO */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000025, &NMD::MULEQ_S_W_PHL , 0, + 0xfc0003ff, 0x20000025, &MULEQ_S_W_PHL , 0, DSP_ }, /* MULEQ_S.W.PHL */ { pool , MUL__S__PH , 2 , 32, 0xfc0003ff, 0x2000002d, 0 , 0, @@ -17444,10 +16954,10 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x20000035, 0 , 0, 0x0 }, /* _POOL32A5~*(6) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000003d, &NMD::REPL_PH , 0, + 0xfc0003ff, 0x2000003d, &REPL_PH , 0, DSP_ }, /* REPL.PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000045, &NMD::CMP_LT_PH , 0, + 0xfc0003ff, 0x20000045, &CMP_LT_PH , 0, DSP_ }, /* CMP.LT.PH */ { pool , ADDQH__R__PH , 2 , 32, 0xfc0003ff, 0x2000004d, 0 , 0, @@ -17459,10 +16969,10 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000005d, 0 , 0, 0x0 }, /* _POOL32A5~*(11) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000065, &NMD::MULEQ_S_W_PHR , 0, + 0xfc0003ff, 0x20000065, &MULEQ_S_W_PHR , 0, DSP_ }, /* MULEQ_S.W.PHR */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000006d, &NMD::PRECR_QB_PH , 0, + 0xfc0003ff, 0x2000006d, &PRECR_QB_PH , 0, DSP_ }, /* PRECR.QB.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000075, 0 , 0, @@ -17471,13 +16981,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000007d, 0 , 0, 0x0 }, /* _POOL32A5~*(15) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000085, &NMD::CMP_LE_PH , 0, + 0xfc0003ff, 0x20000085, &CMP_LE_PH , 0, DSP_ }, /* CMP.LE.PH */ { pool , ADDQH__R__W , 2 , 32, 0xfc0003ff, 0x2000008d, 0 , 0, 0x0 }, /* ADDQH[_R].W */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000095, &NMD::MULEU_S_PH_QBL , 0, + 0xfc0003ff, 0x20000095, &MULEU_S_PH_QBL , 0, DSP_ }, /* MULEU_S.PH.QBL */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000009d, 0 , 0, @@ -17486,7 +16996,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200000a5, 0 , 0, 0x0 }, /* _POOL32A5~*(20) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000ad, &NMD::PRECRQ_QB_PH , 0, + 0xfc0003ff, 0x200000ad, &PRECRQ_QB_PH , 0, DSP_ }, /* PRECRQ.QB.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000b5, 0 , 0, @@ -17495,13 +17005,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200000bd, 0 , 0, 0x0 }, /* _POOL32A5~*(23) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000c5, &NMD::CMPGU_EQ_QB , 0, + 0xfc0003ff, 0x200000c5, &CMPGU_EQ_QB , 0, DSP_ }, /* CMPGU.EQ.QB */ { pool , ADDU__S__QB , 2 , 32, 0xfc0003ff, 0x200000cd, 0 , 0, 0x0 }, /* ADDU[_S].QB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000d5, &NMD::MULEU_S_PH_QBR , 0, + 0xfc0003ff, 0x200000d5, &MULEU_S_PH_QBR , 0, DSP_ }, /* MULEU_S.PH.QBR */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000dd, 0 , 0, @@ -17510,7 +17020,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200000e5, 0 , 0, 0x0 }, /* _POOL32A5~*(28) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200000ed, &NMD::PRECRQ_PH_W , 0, + 0xfc0003ff, 0x200000ed, &PRECRQ_PH_W , 0, DSP_ }, /* PRECRQ.PH.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200000f5, 0 , 0, @@ -17519,13 +17029,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200000fd, 0 , 0, 0x0 }, /* _POOL32A5~*(31) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000105, &NMD::CMPGU_LT_QB , 0, + 0xfc0003ff, 0x20000105, &CMPGU_LT_QB , 0, DSP_ }, /* CMPGU.LT.QB */ { pool , ADDU__S__PH , 2 , 32, 0xfc0003ff, 0x2000010d, 0 , 0, 0x0 }, /* ADDU[_S].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000115, &NMD::MULQ_RS_PH , 0, + 0xfc0003ff, 0x20000115, &MULQ_RS_PH , 0, DSP_ }, /* MULQ_RS.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000011d, 0 , 0, @@ -17534,7 +17044,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x20000125, 0 , 0, 0x0 }, /* _POOL32A5~*(36) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000012d, &NMD::PRECRQ_RS_PH_W , 0, + 0xfc0003ff, 0x2000012d, &PRECRQ_RS_PH_W , 0, DSP_ }, /* PRECRQ_RS.PH.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000135, 0 , 0, @@ -17543,13 +17053,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000013d, 0 , 0, 0x0 }, /* _POOL32A5~*(39) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000145, &NMD::CMPGU_LE_QB , 0, + 0xfc0003ff, 0x20000145, &CMPGU_LE_QB , 0, DSP_ }, /* CMPGU.LE.QB */ { pool , ADDUH__R__QB , 2 , 32, 0xfc0003ff, 0x2000014d, 0 , 0, 0x0 }, /* ADDUH[_R].QB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000155, &NMD::MULQ_S_PH , 0, + 0xfc0003ff, 0x20000155, &MULQ_S_PH , 0, DSP_ }, /* MULQ_S.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000015d, 0 , 0, @@ -17558,7 +17068,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x20000165, 0 , 0, 0x0 }, /* _POOL32A5~*(44) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000016d, &NMD::PRECRQU_S_QB_PH , 0, + 0xfc0003ff, 0x2000016d, &PRECRQU_S_QB_PH , 0, DSP_ }, /* PRECRQU_S.QB.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000175, 0 , 0, @@ -17567,13 +17077,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000017d, 0 , 0, 0x0 }, /* _POOL32A5~*(47) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000185, &NMD::CMPGDU_EQ_QB , 0, + 0xfc0003ff, 0x20000185, &CMPGDU_EQ_QB , 0, DSP_ }, /* CMPGDU.EQ.QB */ { pool , SHRAV__R__PH , 2 , 32, 0xfc0003ff, 0x2000018d, 0 , 0, 0x0 }, /* SHRAV[_R].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000195, &NMD::MULQ_RS_W , 0, + 0xfc0003ff, 0x20000195, &MULQ_RS_W , 0, DSP_ }, /* MULQ_RS.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000019d, 0 , 0, @@ -17582,7 +17092,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200001a5, 0 , 0, 0x0 }, /* _POOL32A5~*(52) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001ad, &NMD::PACKRL_PH , 0, + 0xfc0003ff, 0x200001ad, &PACKRL_PH , 0, DSP_ }, /* PACKRL.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001b5, 0 , 0, @@ -17591,13 +17101,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200001bd, 0 , 0, 0x0 }, /* _POOL32A5~*(55) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001c5, &NMD::CMPGDU_LT_QB , 0, + 0xfc0003ff, 0x200001c5, &CMPGDU_LT_QB , 0, DSP_ }, /* CMPGDU.LT.QB */ { pool , SHRAV__R__QB , 2 , 32, 0xfc0003ff, 0x200001cd, 0 , 0, 0x0 }, /* SHRAV[_R].QB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001d5, &NMD::MULQ_S_W , 0, + 0xfc0003ff, 0x200001d5, &MULQ_S_W , 0, DSP_ }, /* MULQ_S.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001dd, 0 , 0, @@ -17606,7 +17116,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200001e5, 0 , 0, 0x0 }, /* _POOL32A5~*(60) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200001ed, &NMD::PICK_QB , 0, + 0xfc0003ff, 0x200001ed, &PICK_QB , 0, DSP_ }, /* PICK.QB */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200001f5, 0 , 0, @@ -17615,13 +17125,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200001fd, 0 , 0, 0x0 }, /* _POOL32A5~*(63) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000205, &NMD::CMPGDU_LE_QB , 0, + 0xfc0003ff, 0x20000205, &CMPGDU_LE_QB , 0, DSP_ }, /* CMPGDU.LE.QB */ { pool , SUBQ__S__PH , 2 , 32, 0xfc0003ff, 0x2000020d, 0 , 0, 0x0 }, /* SUBQ[_S].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000215, &NMD::APPEND , 0, + 0xfc0003ff, 0x20000215, &APPEND , 0, DSP_ }, /* APPEND */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000021d, 0 , 0, @@ -17630,7 +17140,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x20000225, 0 , 0, 0x0 }, /* _POOL32A5~*(68) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x2000022d, &NMD::PICK_PH , 0, + 0xfc0003ff, 0x2000022d, &PICK_PH , 0, DSP_ }, /* PICK.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x20000235, 0 , 0, @@ -17639,13 +17149,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000023d, 0 , 0, 0x0 }, /* _POOL32A5~*(71) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000245, &NMD::CMPU_EQ_QB , 0, + 0xfc0003ff, 0x20000245, &CMPU_EQ_QB , 0, DSP_ }, /* CMPU.EQ.QB */ { pool , SUBQH__R__PH , 2 , 32, 0xfc0003ff, 0x2000024d, 0 , 0, 0x0 }, /* SUBQH[_R].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000255, &NMD::PREPEND , 0, + 0xfc0003ff, 0x20000255, &PREPEND , 0, DSP_ }, /* PREPEND */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000025d, 0 , 0, @@ -17663,13 +17173,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000027d, 0 , 0, 0x0 }, /* _POOL32A5~*(79) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000285, &NMD::CMPU_LT_QB , 0, + 0xfc0003ff, 0x20000285, &CMPU_LT_QB , 0, DSP_ }, /* CMPU.LT.QB */ { pool , SUBQH__R__W , 2 , 32, 0xfc0003ff, 0x2000028d, 0 , 0, 0x0 }, /* SUBQH[_R].W */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000295, &NMD::MODSUB , 0, + 0xfc0003ff, 0x20000295, &MODSUB , 0, DSP_ }, /* MODSUB */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000029d, 0 , 0, @@ -17687,13 +17197,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200002bd, 0 , 0, 0x0 }, /* _POOL32A5~*(87) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200002c5, &NMD::CMPU_LE_QB , 0, + 0xfc0003ff, 0x200002c5, &CMPU_LE_QB , 0, DSP_ }, /* CMPU.LE.QB */ { pool , SUBU__S__QB , 2 , 32, 0xfc0003ff, 0x200002cd, 0 , 0, 0x0 }, /* SUBU[_S].QB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200002d5, &NMD::SHRAV_R_W , 0, + 0xfc0003ff, 0x200002d5, &SHRAV_R_W , 0, DSP_ }, /* SHRAV_R.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200002dd, 0 , 0, @@ -17705,19 +17215,19 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200002ed, 0 , 0, 0x0 }, /* _POOL32A5~*(93) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200002f5, &NMD::SHRA_R_W , 0, + 0xfc0003ff, 0x200002f5, &SHRA_R_W , 0, DSP_ }, /* SHRA_R.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200002fd, 0 , 0, 0x0 }, /* _POOL32A5~*(95) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000305, &NMD::ADDQ_S_W , 0, + 0xfc0003ff, 0x20000305, &ADDQ_S_W , 0, DSP_ }, /* ADDQ_S.W */ { pool , SUBU__S__PH , 2 , 32, 0xfc0003ff, 0x2000030d, 0 , 0, 0x0 }, /* SUBU[_S].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000315, &NMD::SHRLV_PH , 0, + 0xfc0003ff, 0x20000315, &SHRLV_PH , 0, DSP_ }, /* SHRLV.PH */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000031d, 0 , 0, @@ -17735,13 +17245,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000033d, 0 , 0, 0x0 }, /* _POOL32A5~*(103) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000345, &NMD::SUBQ_S_W , 0, + 0xfc0003ff, 0x20000345, &SUBQ_S_W , 0, DSP_ }, /* SUBQ_S.W */ { pool , SUBUH__R__QB , 2 , 32, 0xfc0003ff, 0x2000034d, 0 , 0, 0x0 }, /* SUBUH[_R].QB */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000355, &NMD::SHRLV_QB , 0, + 0xfc0003ff, 0x20000355, &SHRLV_QB , 0, DSP_ }, /* SHRLV.QB */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000035d, 0 , 0, @@ -17759,13 +17269,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x2000037d, 0 , 0, 0x0 }, /* _POOL32A5~*(111) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000385, &NMD::ADDSC , 0, + 0xfc0003ff, 0x20000385, &ADDSC , 0, DSP_ }, /* ADDSC */ { pool , SHLLV__S__PH , 2 , 32, 0xfc0003ff, 0x2000038d, 0 , 0, 0x0 }, /* SHLLV[_S].PH */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x20000395, &NMD::SHLLV_QB , 0, + 0xfc0003ff, 0x20000395, &SHLLV_QB , 0, DSP_ }, /* SHLLV.QB */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x2000039d, 0 , 0, @@ -17783,13 +17293,13 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200003bd, 0 , 0, 0x0 }, /* _POOL32A5~*(119) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200003c5, &NMD::ADDWC , 0, + 0xfc0003ff, 0x200003c5, &ADDWC , 0, DSP_ }, /* ADDWC */ { pool , PRECR_SRA__R__PH_W , 2 , 32, 0xfc0003ff, 0x200003cd, 0 , 0, 0x0 }, /* PRECR_SRA[_R].PH.W */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200003d5, &NMD::SHLLV_S_W , 0, + 0xfc0003ff, 0x200003d5, &SHLLV_S_W , 0, DSP_ }, /* SHLLV_S.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200003dd, 0 , 0, @@ -17801,7 +17311,7 @@ NMD::Pool NMD::_POOL32A5[128] = { 0xfc0003ff, 0x200003ed, 0 , 0, 0x0 }, /* _POOL32A5~*(125) */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0x200003f5, &NMD::SHLL_S_W , 0, + 0xfc0003ff, 0x200003f5, &SHLL_S_W , 0, DSP_ }, /* SHLL_S.W */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0x200003fd, 0 , 0, @@ -17809,59 +17319,59 @@ NMD::Pool NMD::_POOL32A5[128] = { }; -NMD::Pool NMD::PP_LSX[16] = { +static const Pool PP_LSX[16] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000007, &NMD::LBX , 0, + 0xfc0007ff, 0x20000007, &LBX , 0, 0x0 }, /* LBX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000087, &NMD::SBX , 0, + 0xfc0007ff, 0x20000087, &SBX , 0, XMMS_ }, /* SBX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000107, &NMD::LBUX , 0, + 0xfc0007ff, 0x20000107, &LBUX , 0, 0x0 }, /* LBUX */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0x20000187, 0 , 0, 0x0 }, /* PP.LSX~*(3) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000207, &NMD::LHX , 0, + 0xfc0007ff, 0x20000207, &LHX , 0, 0x0 }, /* LHX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000287, &NMD::SHX , 0, + 0xfc0007ff, 0x20000287, &SHX , 0, XMMS_ }, /* SHX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000307, &NMD::LHUX , 0, + 0xfc0007ff, 0x20000307, &LHUX , 0, 0x0 }, /* LHUX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000387, &NMD::LWUX , 0, + 0xfc0007ff, 0x20000387, &LWUX , 0, MIPS64_ }, /* LWUX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000407, &NMD::LWX , 0, + 0xfc0007ff, 0x20000407, &LWX , 0, 0x0 }, /* LWX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000487, &NMD::SWX , 0, + 0xfc0007ff, 0x20000487, &SWX , 0, XMMS_ }, /* SWX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000507, &NMD::LWC1X , 0, + 0xfc0007ff, 0x20000507, &LWC1X , 0, CP1_ }, /* LWC1X */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000587, &NMD::SWC1X , 0, + 0xfc0007ff, 0x20000587, &SWC1X , 0, CP1_ }, /* SWC1X */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000607, &NMD::LDX , 0, + 0xfc0007ff, 0x20000607, &LDX , 0, MIPS64_ }, /* LDX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000687, &NMD::SDX , 0, + 0xfc0007ff, 0x20000687, &SDX , 0, MIPS64_ }, /* SDX */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000707, &NMD::LDC1X , 0, + 0xfc0007ff, 0x20000707, &LDC1X , 0, CP1_ }, /* LDC1X */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000787, &NMD::SDC1X , 0, + 0xfc0007ff, 0x20000787, &SDC1X , 0, CP1_ }, /* SDC1X */ }; -NMD::Pool NMD::PP_LSXS[16] = { +static const Pool PP_LSXS[16] = { { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0x20000047, 0 , 0, 0x0 }, /* PP.LSXS~*(0) */ @@ -17875,45 +17385,45 @@ NMD::Pool NMD::PP_LSXS[16] = { 0xfc0007ff, 0x200001c7, 0 , 0, 0x0 }, /* PP.LSXS~*(3) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000247, &NMD::LHXS , 0, + 0xfc0007ff, 0x20000247, &LHXS , 0, 0x0 }, /* LHXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200002c7, &NMD::SHXS , 0, + 0xfc0007ff, 0x200002c7, &SHXS , 0, XMMS_ }, /* SHXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000347, &NMD::LHUXS , 0, + 0xfc0007ff, 0x20000347, &LHUXS , 0, 0x0 }, /* LHUXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200003c7, &NMD::LWUXS , 0, + 0xfc0007ff, 0x200003c7, &LWUXS , 0, MIPS64_ }, /* LWUXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000447, &NMD::LWXS_32_ , 0, + 0xfc0007ff, 0x20000447, &LWXS_32_ , 0, 0x0 }, /* LWXS[32] */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200004c7, &NMD::SWXS , 0, + 0xfc0007ff, 0x200004c7, &SWXS , 0, XMMS_ }, /* SWXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000547, &NMD::LWC1XS , 0, + 0xfc0007ff, 0x20000547, &LWC1XS , 0, CP1_ }, /* LWC1XS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200005c7, &NMD::SWC1XS , 0, + 0xfc0007ff, 0x200005c7, &SWC1XS , 0, CP1_ }, /* SWC1XS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000647, &NMD::LDXS , 0, + 0xfc0007ff, 0x20000647, &LDXS , 0, MIPS64_ }, /* LDXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200006c7, &NMD::SDXS , 0, + 0xfc0007ff, 0x200006c7, &SDXS , 0, MIPS64_ }, /* SDXS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x20000747, &NMD::LDC1XS , 0, + 0xfc0007ff, 0x20000747, &LDC1XS , 0, CP1_ }, /* LDC1XS */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0x200007c7, &NMD::SDC1XS , 0, + 0xfc0007ff, 0x200007c7, &SDC1XS , 0, CP1_ }, /* SDC1XS */ }; -NMD::Pool NMD::P_LSX[2] = { +static const Pool P_LSX[2] = { { pool , PP_LSX , 16 , 32, 0xfc00007f, 0x20000007, 0 , 0, 0x0 }, /* PP.LSX */ @@ -17923,28 +17433,28 @@ NMD::Pool NMD::P_LSX[2] = { }; -NMD::Pool NMD::POOL32Axf_1_0[4] = { +static const Pool POOL32Axf_1_0[4] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000007f, &NMD::MFHI_DSP_ , 0, + 0xfc003fff, 0x2000007f, &MFHI_DSP_ , 0, DSP_ }, /* MFHI[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000107f, &NMD::MFLO_DSP_ , 0, + 0xfc003fff, 0x2000107f, &MFLO_DSP_ , 0, DSP_ }, /* MFLO[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000207f, &NMD::MTHI_DSP_ , 0, + 0xfc003fff, 0x2000207f, &MTHI_DSP_ , 0, DSP_ }, /* MTHI[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000307f, &NMD::MTLO_DSP_ , 0, + 0xfc003fff, 0x2000307f, &MTLO_DSP_ , 0, DSP_ }, /* MTLO[DSP] */ }; -NMD::Pool NMD::POOL32Axf_1_1[4] = { +static const Pool POOL32Axf_1_1[4] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000027f, &NMD::MTHLIP , 0, + 0xfc003fff, 0x2000027f, &MTHLIP , 0, DSP_ }, /* MTHLIP */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000127f, &NMD::SHILOV , 0, + 0xfc003fff, 0x2000127f, &SHILOV , 0, DSP_ }, /* SHILOV */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0x2000227f, 0 , 0, @@ -17955,53 +17465,53 @@ NMD::Pool NMD::POOL32Axf_1_1[4] = { }; -NMD::Pool NMD::POOL32Axf_1_3[4] = { +static const Pool POOL32Axf_1_3[4] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000067f, &NMD::RDDSP , 0, + 0xfc003fff, 0x2000067f, &RDDSP , 0, DSP_ }, /* RDDSP */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000167f, &NMD::WRDSP , 0, + 0xfc003fff, 0x2000167f, &WRDSP , 0, DSP_ }, /* WRDSP */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000267f, &NMD::EXTP , 0, + 0xfc003fff, 0x2000267f, &EXTP , 0, DSP_ }, /* EXTP */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x2000367f, &NMD::EXTPDP , 0, + 0xfc003fff, 0x2000367f, &EXTPDP , 0, DSP_ }, /* EXTPDP */ }; -NMD::Pool NMD::POOL32Axf_1_4[2] = { +static const Pool POOL32Axf_1_4[2] = { { instruction , 0 , 0 , 32, - 0xfc001fff, 0x2000087f, &NMD::SHLL_QB , 0, + 0xfc001fff, 0x2000087f, &SHLL_QB , 0, DSP_ }, /* SHLL.QB */ { instruction , 0 , 0 , 32, - 0xfc001fff, 0x2000187f, &NMD::SHRL_QB , 0, + 0xfc001fff, 0x2000187f, &SHRL_QB , 0, DSP_ }, /* SHRL.QB */ }; -NMD::Pool NMD::MAQ_S_A__W_PHR[2] = { +static const Pool MAQ_S_A__W_PHR[2] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20000a7f, &NMD::MAQ_S_W_PHR , 0, + 0xfc003fff, 0x20000a7f, &MAQ_S_W_PHR , 0, DSP_ }, /* MAQ_S.W.PHR */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20002a7f, &NMD::MAQ_SA_W_PHR , 0, + 0xfc003fff, 0x20002a7f, &MAQ_SA_W_PHR , 0, DSP_ }, /* MAQ_SA.W.PHR */ }; -NMD::Pool NMD::MAQ_S_A__W_PHL[2] = { +static const Pool MAQ_S_A__W_PHL[2] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20001a7f, &NMD::MAQ_S_W_PHL , 0, + 0xfc003fff, 0x20001a7f, &MAQ_S_W_PHL , 0, DSP_ }, /* MAQ_S.W.PHL */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20003a7f, &NMD::MAQ_SA_W_PHL , 0, + 0xfc003fff, 0x20003a7f, &MAQ_SA_W_PHL , 0, DSP_ }, /* MAQ_SA.W.PHL */ }; -NMD::Pool NMD::POOL32Axf_1_5[2] = { +static const Pool POOL32Axf_1_5[2] = { { pool , MAQ_S_A__W_PHR , 2 , 32, 0xfc001fff, 0x20000a7f, 0 , 0, 0x0 }, /* MAQ_S[A].W.PHR */ @@ -18011,23 +17521,23 @@ NMD::Pool NMD::POOL32Axf_1_5[2] = { }; -NMD::Pool NMD::POOL32Axf_1_7[4] = { +static const Pool POOL32Axf_1_7[4] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20000e7f, &NMD::EXTR_W , 0, + 0xfc003fff, 0x20000e7f, &EXTR_W , 0, DSP_ }, /* EXTR.W */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20001e7f, &NMD::EXTR_R_W , 0, + 0xfc003fff, 0x20001e7f, &EXTR_R_W , 0, DSP_ }, /* EXTR_R.W */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20002e7f, &NMD::EXTR_RS_W , 0, + 0xfc003fff, 0x20002e7f, &EXTR_RS_W , 0, DSP_ }, /* EXTR_RS.W */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20003e7f, &NMD::EXTR_S_H , 0, + 0xfc003fff, 0x20003e7f, &EXTR_S_H , 0, DSP_ }, /* EXTR_S.H */ }; -NMD::Pool NMD::POOL32Axf_1[8] = { +static const Pool POOL32Axf_1[8] = { { pool , POOL32Axf_1_0 , 4 , 32, 0xfc000fff, 0x2000007f, 0 , 0, 0x0 }, /* POOL32Axf_1_0 */ @@ -18055,119 +17565,119 @@ NMD::Pool NMD::POOL32Axf_1[8] = { }; -NMD::Pool NMD::POOL32Axf_2_DSP__0_7[8] = { +static const Pool POOL32Axf_2_DSP__0_7[8] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200000bf, &NMD::DPA_W_PH , 0, + 0xfc003fff, 0x200000bf, &DPA_W_PH , 0, DSP_ }, /* DPA.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200002bf, &NMD::DPAQ_S_W_PH , 0, + 0xfc003fff, 0x200002bf, &DPAQ_S_W_PH , 0, DSP_ }, /* DPAQ_S.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200004bf, &NMD::DPS_W_PH , 0, + 0xfc003fff, 0x200004bf, &DPS_W_PH , 0, DSP_ }, /* DPS.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200006bf, &NMD::DPSQ_S_W_PH , 0, + 0xfc003fff, 0x200006bf, &DPSQ_S_W_PH , 0, DSP_ }, /* DPSQ_S.W.PH */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0x200008bf, 0 , 0, 0x0 }, /* POOL32Axf_2(DSP)_0_7~*(4) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20000abf, &NMD::MADD_DSP_ , 0, + 0xfc003fff, 0x20000abf, &MADD_DSP_ , 0, DSP_ }, /* MADD[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20000cbf, &NMD::MULT_DSP_ , 0, + 0xfc003fff, 0x20000cbf, &MULT_DSP_ , 0, DSP_ }, /* MULT[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20000ebf, &NMD::EXTRV_W , 0, + 0xfc003fff, 0x20000ebf, &EXTRV_W , 0, DSP_ }, /* EXTRV.W */ }; -NMD::Pool NMD::POOL32Axf_2_DSP__8_15[8] = { +static const Pool POOL32Axf_2_DSP__8_15[8] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200010bf, &NMD::DPAX_W_PH , 0, + 0xfc003fff, 0x200010bf, &DPAX_W_PH , 0, DSP_ }, /* DPAX.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200012bf, &NMD::DPAQ_SA_L_W , 0, + 0xfc003fff, 0x200012bf, &DPAQ_SA_L_W , 0, DSP_ }, /* DPAQ_SA.L.W */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200014bf, &NMD::DPSX_W_PH , 0, + 0xfc003fff, 0x200014bf, &DPSX_W_PH , 0, DSP_ }, /* DPSX.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200016bf, &NMD::DPSQ_SA_L_W , 0, + 0xfc003fff, 0x200016bf, &DPSQ_SA_L_W , 0, DSP_ }, /* DPSQ_SA.L.W */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0x200018bf, 0 , 0, 0x0 }, /* POOL32Axf_2(DSP)_8_15~*(4) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20001abf, &NMD::MADDU_DSP_ , 0, + 0xfc003fff, 0x20001abf, &MADDU_DSP_ , 0, DSP_ }, /* MADDU[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20001cbf, &NMD::MULTU_DSP_ , 0, + 0xfc003fff, 0x20001cbf, &MULTU_DSP_ , 0, DSP_ }, /* MULTU[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20001ebf, &NMD::EXTRV_R_W , 0, + 0xfc003fff, 0x20001ebf, &EXTRV_R_W , 0, DSP_ }, /* EXTRV_R.W */ }; -NMD::Pool NMD::POOL32Axf_2_DSP__16_23[8] = { +static const Pool POOL32Axf_2_DSP__16_23[8] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200020bf, &NMD::DPAU_H_QBL , 0, + 0xfc003fff, 0x200020bf, &DPAU_H_QBL , 0, DSP_ }, /* DPAU.H.QBL */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200022bf, &NMD::DPAQX_S_W_PH , 0, + 0xfc003fff, 0x200022bf, &DPAQX_S_W_PH , 0, DSP_ }, /* DPAQX_S.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200024bf, &NMD::DPSU_H_QBL , 0, + 0xfc003fff, 0x200024bf, &DPSU_H_QBL , 0, DSP_ }, /* DPSU.H.QBL */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200026bf, &NMD::DPSQX_S_W_PH , 0, + 0xfc003fff, 0x200026bf, &DPSQX_S_W_PH , 0, DSP_ }, /* DPSQX_S.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200028bf, &NMD::EXTPV , 0, + 0xfc003fff, 0x200028bf, &EXTPV , 0, DSP_ }, /* EXTPV */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20002abf, &NMD::MSUB_DSP_ , 0, + 0xfc003fff, 0x20002abf, &MSUB_DSP_ , 0, DSP_ }, /* MSUB[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20002cbf, &NMD::MULSA_W_PH , 0, + 0xfc003fff, 0x20002cbf, &MULSA_W_PH , 0, DSP_ }, /* MULSA.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20002ebf, &NMD::EXTRV_RS_W , 0, + 0xfc003fff, 0x20002ebf, &EXTRV_RS_W , 0, DSP_ }, /* EXTRV_RS.W */ }; -NMD::Pool NMD::POOL32Axf_2_DSP__24_31[8] = { +static const Pool POOL32Axf_2_DSP__24_31[8] = { { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200030bf, &NMD::DPAU_H_QBR , 0, + 0xfc003fff, 0x200030bf, &DPAU_H_QBR , 0, DSP_ }, /* DPAU.H.QBR */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200032bf, &NMD::DPAQX_SA_W_PH , 0, + 0xfc003fff, 0x200032bf, &DPAQX_SA_W_PH , 0, DSP_ }, /* DPAQX_SA.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200034bf, &NMD::DPSU_H_QBR , 0, + 0xfc003fff, 0x200034bf, &DPSU_H_QBR , 0, DSP_ }, /* DPSU.H.QBR */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200036bf, &NMD::DPSQX_SA_W_PH , 0, + 0xfc003fff, 0x200036bf, &DPSQX_SA_W_PH , 0, DSP_ }, /* DPSQX_SA.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x200038bf, &NMD::EXTPDPV , 0, + 0xfc003fff, 0x200038bf, &EXTPDPV , 0, DSP_ }, /* EXTPDPV */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20003abf, &NMD::MSUBU_DSP_ , 0, + 0xfc003fff, 0x20003abf, &MSUBU_DSP_ , 0, DSP_ }, /* MSUBU[DSP] */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20003cbf, &NMD::MULSAQ_S_W_PH , 0, + 0xfc003fff, 0x20003cbf, &MULSAQ_S_W_PH , 0, DSP_ }, /* MULSAQ_S.W.PH */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0x20003ebf, &NMD::EXTRV_S_H , 0, + 0xfc003fff, 0x20003ebf, &EXTRV_S_H , 0, DSP_ }, /* EXTRV_S.H */ }; -NMD::Pool NMD::POOL32Axf_2[4] = { +static const Pool POOL32Axf_2[4] = { { pool , POOL32Axf_2_DSP__0_7, 8 , 32, 0xfc0031ff, 0x200000bf, 0 , 0, 0x0 }, /* POOL32Axf_2(DSP)_0_7 */ @@ -18183,12 +17693,12 @@ NMD::Pool NMD::POOL32Axf_2[4] = { }; -NMD::Pool NMD::POOL32Axf_4[128] = { +static const Pool POOL32Axf_4[128] = { { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000013f, &NMD::ABSQ_S_QB , 0, + 0xfc00ffff, 0x2000013f, &ABSQ_S_QB , 0, DSP_ }, /* ABSQ_S.QB */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000033f, &NMD::REPLV_PH , 0, + 0xfc00ffff, 0x2000033f, &REPLV_PH , 0, DSP_ }, /* REPLV.PH */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000053f, 0 , 0, @@ -18209,10 +17719,10 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20000f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(7) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000113f, &NMD::ABSQ_S_PH , 0, + 0xfc00ffff, 0x2000113f, &ABSQ_S_PH , 0, DSP_ }, /* ABSQ_S.PH */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000133f, &NMD::REPLV_QB , 0, + 0xfc00ffff, 0x2000133f, &REPLV_QB , 0, DSP_ }, /* REPLV.QB */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000153f, 0 , 0, @@ -18233,7 +17743,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20001f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(15) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000213f, &NMD::ABSQ_S_W , 0, + 0xfc00ffff, 0x2000213f, &ABSQ_S_W , 0, DSP_ }, /* ABSQ_S.W */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000233f, 0 , 0, @@ -18281,7 +17791,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20003f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(31) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000413f, &NMD::INSV , 0, + 0xfc00ffff, 0x2000413f, &INSV , 0, DSP_ }, /* INSV */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000433f, 0 , 0, @@ -18296,16 +17806,16 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000493f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(36) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20004b3f, &NMD::CLO , 0, + 0xfc00ffff, 0x20004b3f, &CLO , 0, XMMS_ }, /* CLO */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20004d3f, &NMD::MFC2 , 0, + 0xfc00ffff, 0x20004d3f, &MFC2 , 0, CP2_ }, /* MFC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20004f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(39) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000513f, &NMD::PRECEQ_W_PHL , 0, + 0xfc00ffff, 0x2000513f, &PRECEQ_W_PHL , 0, DSP_ }, /* PRECEQ.W.PHL */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000533f, 0 , 0, @@ -18320,16 +17830,16 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000593f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(44) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20005b3f, &NMD::CLZ , 0, + 0xfc00ffff, 0x20005b3f, &CLZ , 0, XMMS_ }, /* CLZ */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20005d3f, &NMD::MTC2 , 0, + 0xfc00ffff, 0x20005d3f, &MTC2 , 0, CP2_ }, /* MTC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20005f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(47) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000613f, &NMD::PRECEQ_W_PHR , 0, + 0xfc00ffff, 0x2000613f, &PRECEQ_W_PHR , 0, DSP_ }, /* PRECEQ.W.PHR */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000633f, 0 , 0, @@ -18347,16 +17857,16 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20006b3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(53) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20006d3f, &NMD::DMFC2 , 0, + 0xfc00ffff, 0x20006d3f, &DMFC2 , 0, CP2_ }, /* DMFC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20006f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(55) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000713f, &NMD::PRECEQU_PH_QBL , 0, + 0xfc00ffff, 0x2000713f, &PRECEQU_PH_QBL , 0, DSP_ }, /* PRECEQU.PH.QBL */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000733f, &NMD::PRECEQU_PH_QBLA , 0, + 0xfc00ffff, 0x2000733f, &PRECEQU_PH_QBLA , 0, DSP_ }, /* PRECEQU.PH.QBLA */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000753f, 0 , 0, @@ -18371,7 +17881,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20007b3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(61) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20007d3f, &NMD::DMTC2 , 0, + 0xfc00ffff, 0x20007d3f, &DMTC2 , 0, CP2_ }, /* DMTC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20007f3f, 0 , 0, @@ -18395,16 +17905,16 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20008b3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(69) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20008d3f, &NMD::MFHC2 , 0, + 0xfc00ffff, 0x20008d3f, &MFHC2 , 0, CP2_ }, /* MFHC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20008f3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(71) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000913f, &NMD::PRECEQU_PH_QBR , 0, + 0xfc00ffff, 0x2000913f, &PRECEQU_PH_QBR , 0, DSP_ }, /* PRECEQU.PH.QBR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000933f, &NMD::PRECEQU_PH_QBRA , 0, + 0xfc00ffff, 0x2000933f, &PRECEQU_PH_QBRA , 0, DSP_ }, /* PRECEQU.PH.QBRA */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000953f, 0 , 0, @@ -18419,7 +17929,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x20009b3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(77) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x20009d3f, &NMD::MTHC2 , 0, + 0xfc00ffff, 0x20009d3f, &MTHC2 , 0, CP2_ }, /* MTHC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x20009f3f, 0 , 0, @@ -18449,10 +17959,10 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000af3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(87) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000b13f, &NMD::PRECEU_PH_QBL , 0, + 0xfc00ffff, 0x2000b13f, &PRECEU_PH_QBL , 0, DSP_ }, /* PRECEU.PH.QBL */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000b33f, &NMD::PRECEU_PH_QBLA , 0, + 0xfc00ffff, 0x2000b33f, &PRECEU_PH_QBLA , 0, DSP_ }, /* PRECEU.PH.QBLA */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000b53f, 0 , 0, @@ -18491,16 +18001,16 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000cb3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(101) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000cd3f, &NMD::CFC2 , 0, + 0xfc00ffff, 0x2000cd3f, &CFC2 , 0, CP2_ }, /* CFC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000cf3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(103) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000d13f, &NMD::PRECEU_PH_QBR , 0, + 0xfc00ffff, 0x2000d13f, &PRECEU_PH_QBR , 0, DSP_ }, /* PRECEU.PH.QBR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000d33f, &NMD::PRECEU_PH_QBRA , 0, + 0xfc00ffff, 0x2000d33f, &PRECEU_PH_QBRA , 0, DSP_ }, /* PRECEU.PH.QBRA */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000d53f, 0 , 0, @@ -18515,7 +18025,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000db3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(109) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000dd3f, &NMD::CTC2 , 0, + 0xfc00ffff, 0x2000dd3f, &CTC2 , 0, CP2_ }, /* CTC2 */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000df3f, 0 , 0, @@ -18545,7 +18055,7 @@ NMD::Pool NMD::POOL32Axf_4[128] = { 0xfc00ffff, 0x2000ef3f, 0 , 0, 0x0 }, /* POOL32Axf_4~*(119) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000f13f, &NMD::RADDU_W_QB , 0, + 0xfc00ffff, 0x2000f13f, &RADDU_W_QB , 0, DSP_ }, /* RADDU.W.QB */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000f33f, 0 , 0, @@ -18571,18 +18081,18 @@ NMD::Pool NMD::POOL32Axf_4[128] = { }; -NMD::Pool NMD::POOL32Axf_5_group0[32] = { +static const Pool POOL32Axf_5_group0[32] = { { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000017f, &NMD::TLBGP , 0, + 0xfc00ffff, 0x2000017f, &TLBGP , 0, CP0_ | VZ_ | TLB_ }, /* TLBGP */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000037f, &NMD::TLBP , 0, + 0xfc00ffff, 0x2000037f, &TLBP , 0, CP0_ | TLB_ }, /* TLBP */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000057f, &NMD::TLBGINV , 0, + 0xfc00ffff, 0x2000057f, &TLBGINV , 0, CP0_ | VZ_ | TLB_ | TLBINV_}, /* TLBGINV */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000077f, &NMD::TLBINV , 0, + 0xfc00ffff, 0x2000077f, &TLBINV , 0, CP0_ | TLB_ | TLBINV_}, /* TLBINV */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000097f, 0 , 0, @@ -18597,16 +18107,16 @@ NMD::Pool NMD::POOL32Axf_5_group0[32] = { 0xfc00ffff, 0x20000f7f, 0 , 0, 0x0 }, /* POOL32Axf_5_group0~*(7) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000117f, &NMD::TLBGR , 0, + 0xfc00ffff, 0x2000117f, &TLBGR , 0, CP0_ | VZ_ | TLB_ }, /* TLBGR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000137f, &NMD::TLBR , 0, + 0xfc00ffff, 0x2000137f, &TLBR , 0, CP0_ | TLB_ }, /* TLBR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000157f, &NMD::TLBGINVF , 0, + 0xfc00ffff, 0x2000157f, &TLBGINVF , 0, CP0_ | VZ_ | TLB_ | TLBINV_}, /* TLBGINVF */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000177f, &NMD::TLBINVF , 0, + 0xfc00ffff, 0x2000177f, &TLBINVF , 0, CP0_ | TLB_ | TLBINV_}, /* TLBINVF */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000197f, 0 , 0, @@ -18621,10 +18131,10 @@ NMD::Pool NMD::POOL32Axf_5_group0[32] = { 0xfc00ffff, 0x20001f7f, 0 , 0, 0x0 }, /* POOL32Axf_5_group0~*(15) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000217f, &NMD::TLBGWI , 0, + 0xfc00ffff, 0x2000217f, &TLBGWI , 0, CP0_ | VZ_ | TLB_ }, /* TLBGWI */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000237f, &NMD::TLBWI , 0, + 0xfc00ffff, 0x2000237f, &TLBWI , 0, CP0_ | TLB_ }, /* TLBWI */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000257f, 0 , 0, @@ -18645,10 +18155,10 @@ NMD::Pool NMD::POOL32Axf_5_group0[32] = { 0xfc00ffff, 0x20002f7f, 0 , 0, 0x0 }, /* POOL32Axf_5_group0~*(23) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000317f, &NMD::TLBGWR , 0, + 0xfc00ffff, 0x2000317f, &TLBGWR , 0, CP0_ | VZ_ | TLB_ }, /* TLBGWR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000337f, &NMD::TLBWR , 0, + 0xfc00ffff, 0x2000337f, &TLBWR , 0, CP0_ | TLB_ }, /* TLBWR */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000357f, 0 , 0, @@ -18671,7 +18181,7 @@ NMD::Pool NMD::POOL32Axf_5_group0[32] = { }; -NMD::Pool NMD::POOL32Axf_5_group1[32] = { +static const Pool POOL32Axf_5_group1[32] = { { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000417f, 0 , 0, 0x0 }, /* POOL32Axf_5_group1~*(0) */ @@ -18682,7 +18192,7 @@ NMD::Pool NMD::POOL32Axf_5_group1[32] = { 0xfc00ffff, 0x2000457f, 0 , 0, 0x0 }, /* POOL32Axf_5_group1~*(2) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000477f, &NMD::DI , 0, + 0xfc00ffff, 0x2000477f, &DI , 0, 0x0 }, /* DI */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000497f, 0 , 0, @@ -18706,7 +18216,7 @@ NMD::Pool NMD::POOL32Axf_5_group1[32] = { 0xfc00ffff, 0x2000557f, 0 , 0, 0x0 }, /* POOL32Axf_5_group1~*(10) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000577f, &NMD::EI , 0, + 0xfc00ffff, 0x2000577f, &EI , 0, 0x0 }, /* EI */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000597f, 0 , 0, @@ -18771,22 +18281,22 @@ NMD::Pool NMD::POOL32Axf_5_group1[32] = { }; -NMD::Pool NMD::ERETx[2] = { +static const Pool ERETx[2] = { { instruction , 0 , 0 , 32, - 0xfc01ffff, 0x2000f37f, &NMD::ERET , 0, + 0xfc01ffff, 0x2000f37f, &ERET , 0, 0x0 }, /* ERET */ { instruction , 0 , 0 , 32, - 0xfc01ffff, 0x2001f37f, &NMD::ERETNC , 0, + 0xfc01ffff, 0x2001f37f, &ERETNC , 0, 0x0 }, /* ERETNC */ }; -NMD::Pool NMD::POOL32Axf_5_group3[32] = { +static const Pool POOL32Axf_5_group3[32] = { { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000c17f, 0 , 0, 0x0 }, /* POOL32Axf_5_group3~*(0) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000c37f, &NMD::WAIT , 0, + 0xfc00ffff, 0x2000c37f, &WAIT , 0, 0x0 }, /* WAIT */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000c57f, 0 , 0, @@ -18810,7 +18320,7 @@ NMD::Pool NMD::POOL32Axf_5_group3[32] = { 0xfc00ffff, 0x2000d17f, 0 , 0, 0x0 }, /* POOL32Axf_5_group3~*(8) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000d37f, &NMD::IRET , 0, + 0xfc00ffff, 0x2000d37f, &IRET , 0, MCU_ }, /* IRET */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000d57f, 0 , 0, @@ -18831,10 +18341,10 @@ NMD::Pool NMD::POOL32Axf_5_group3[32] = { 0xfc00ffff, 0x2000df7f, 0 , 0, 0x0 }, /* POOL32Axf_5_group3~*(15) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000e17f, &NMD::RDPGPR , 0, + 0xfc00ffff, 0x2000e17f, &RDPGPR , 0, CP0_ }, /* RDPGPR */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000e37f, &NMD::DERET , 0, + 0xfc00ffff, 0x2000e37f, &DERET , 0, EJTAG_ }, /* DERET */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0x2000e57f, 0 , 0, @@ -18855,7 +18365,7 @@ NMD::Pool NMD::POOL32Axf_5_group3[32] = { 0xfc00ffff, 0x2000ef7f, 0 , 0, 0x0 }, /* POOL32Axf_5_group3~*(23) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0x2000f17f, &NMD::WRPGPR , 0, + 0xfc00ffff, 0x2000f17f, &WRPGPR , 0, CP0_ }, /* WRPGPR */ { pool , ERETx , 2 , 32, 0xfc00ffff, 0x2000f37f, 0 , 0, @@ -18881,7 +18391,7 @@ NMD::Pool NMD::POOL32Axf_5_group3[32] = { }; -NMD::Pool NMD::POOL32Axf_5[4] = { +static const Pool POOL32Axf_5[4] = { { pool , POOL32Axf_5_group0 , 32 , 32, 0xfc00c1ff, 0x2000017f, 0 , 0, 0x0 }, /* POOL32Axf_5_group0 */ @@ -18897,25 +18407,25 @@ NMD::Pool NMD::POOL32Axf_5[4] = { }; -NMD::Pool NMD::SHRA__R__QB[2] = { +static const Pool SHRA__R__QB[2] = { { instruction , 0 , 0 , 32, - 0xfc001fff, 0x200001ff, &NMD::SHRA_QB , 0, + 0xfc001fff, 0x200001ff, &SHRA_QB , 0, DSP_ }, /* SHRA.QB */ { instruction , 0 , 0 , 32, - 0xfc001fff, 0x200011ff, &NMD::SHRA_R_QB , 0, + 0xfc001fff, 0x200011ff, &SHRA_R_QB , 0, DSP_ }, /* SHRA_R.QB */ }; -NMD::Pool NMD::POOL32Axf_7[8] = { +static const Pool POOL32Axf_7[8] = { { pool , SHRA__R__QB , 2 , 32, 0xfc000fff, 0x200001ff, 0 , 0, 0x0 }, /* SHRA[_R].QB */ { instruction , 0 , 0 , 32, - 0xfc000fff, 0x200003ff, &NMD::SHRL_PH , 0, + 0xfc000fff, 0x200003ff, &SHRL_PH , 0, DSP_ }, /* SHRL.PH */ { instruction , 0 , 0 , 32, - 0xfc000fff, 0x200005ff, &NMD::REPL_QB , 0, + 0xfc000fff, 0x200005ff, &REPL_QB , 0, DSP_ }, /* REPL.QB */ { reserved_block , 0 , 0 , 32, 0xfc000fff, 0x200007ff, 0 , 0, @@ -18935,7 +18445,7 @@ NMD::Pool NMD::POOL32Axf_7[8] = { }; -NMD::Pool NMD::POOL32Axf[8] = { +static const Pool POOL32Axf[8] = { { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0x2000003f, 0 , 0, 0x0 }, /* POOL32Axf~*(0) */ @@ -18963,18 +18473,18 @@ NMD::Pool NMD::POOL32Axf[8] = { }; -NMD::Pool NMD::_POOL32A7[8] = { +static const Pool _POOL32A7[8] = { { pool , P_LSX , 2 , 32, 0xfc00003f, 0x20000007, 0 , 0, 0x0 }, /* P.LSX */ { instruction , 0 , 0 , 32, - 0xfc00003f, 0x2000000f, &NMD::LSA , 0, + 0xfc00003f, 0x2000000f, &LSA , 0, 0x0 }, /* LSA */ { reserved_block , 0 , 0 , 32, 0xfc00003f, 0x20000017, 0 , 0, 0x0 }, /* _POOL32A7~*(2) */ { instruction , 0 , 0 , 32, - 0xfc00003f, 0x2000001f, &NMD::EXTW , 0, + 0xfc00003f, 0x2000001f, &EXTW , 0, 0x0 }, /* EXTW */ { reserved_block , 0 , 0 , 32, 0xfc00003f, 0x20000027, 0 , 0, @@ -18991,18 +18501,18 @@ NMD::Pool NMD::_POOL32A7[8] = { }; -NMD::Pool NMD::P32A[8] = { +static const Pool P32A[8] = { { pool , _POOL32A0 , 128 , 32, 0xfc000007, 0x20000000, 0 , 0, 0x0 }, /* _POOL32A0 */ { instruction , 0 , 0 , 32, - 0xfc000007, 0x20000001, &NMD::SPECIAL2 , 0, + 0xfc000007, 0x20000001, &SPECIAL2 , 0, UDI_ }, /* SPECIAL2 */ { instruction , 0 , 0 , 32, - 0xfc000007, 0x20000002, &NMD::COP2_1 , 0, + 0xfc000007, 0x20000002, &COP2_1 , 0, CP2_ }, /* COP2_1 */ { instruction , 0 , 0 , 32, - 0xfc000007, 0x20000003, &NMD::UDI , 0, + 0xfc000007, 0x20000003, &UDI , 0, UDI_ }, /* UDI */ { reserved_block , 0 , 0 , 32, 0xfc000007, 0x20000004, 0 , 0, @@ -19019,44 +18529,44 @@ NMD::Pool NMD::P32A[8] = { }; -NMD::Pool NMD::P_GP_D[2] = { +static const Pool P_GP_D[2] = { { instruction , 0 , 0 , 32, - 0xfc000007, 0x40000001, &NMD::LD_GP_ , 0, + 0xfc000007, 0x40000001, &LD_GP_ , 0, MIPS64_ }, /* LD[GP] */ { instruction , 0 , 0 , 32, - 0xfc000007, 0x40000005, &NMD::SD_GP_ , 0, + 0xfc000007, 0x40000005, &SD_GP_ , 0, MIPS64_ }, /* SD[GP] */ }; -NMD::Pool NMD::P_GP_W[4] = { +static const Pool P_GP_W[4] = { { instruction , 0 , 0 , 32, - 0xfc000003, 0x40000000, &NMD::ADDIU_GP_W_ , 0, + 0xfc000003, 0x40000000, &ADDIU_GP_W_ , 0, 0x0 }, /* ADDIU[GP.W] */ { pool , P_GP_D , 2 , 32, 0xfc000003, 0x40000001, 0 , 0, 0x0 }, /* P.GP.D */ { instruction , 0 , 0 , 32, - 0xfc000003, 0x40000002, &NMD::LW_GP_ , 0, + 0xfc000003, 0x40000002, &LW_GP_ , 0, 0x0 }, /* LW[GP] */ { instruction , 0 , 0 , 32, - 0xfc000003, 0x40000003, &NMD::SW_GP_ , 0, + 0xfc000003, 0x40000003, &SW_GP_ , 0, 0x0 }, /* SW[GP] */ }; -NMD::Pool NMD::POOL48I[32] = { +static const Pool POOL48I[32] = { { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600000000000ull, &NMD::LI_48_ , 0, + 0xfc1f00000000ull, 0x600000000000ull, &LI_48_ , 0, XMMS_ }, /* LI[48] */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600100000000ull, &NMD::ADDIU_48_ , 0, + 0xfc1f00000000ull, 0x600100000000ull, &ADDIU_48_ , 0, XMMS_ }, /* ADDIU[48] */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600200000000ull, &NMD::ADDIU_GP48_ , 0, + 0xfc1f00000000ull, 0x600200000000ull, &ADDIU_GP48_ , 0, XMMS_ }, /* ADDIU[GP48] */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600300000000ull, &NMD::ADDIUPC_48_ , 0, + 0xfc1f00000000ull, 0x600300000000ull, &ADDIUPC_48_ , 0, XMMS_ }, /* ADDIUPC[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x600400000000ull, 0 , 0, @@ -19080,7 +18590,7 @@ NMD::Pool NMD::POOL48I[32] = { 0xfc1f00000000ull, 0x600a00000000ull, 0 , 0, 0x0 }, /* POOL48I~*(10) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600b00000000ull, &NMD::LWPC_48_ , 0, + 0xfc1f00000000ull, 0x600b00000000ull, &LWPC_48_ , 0, XMMS_ }, /* LWPC[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x600c00000000ull, 0 , 0, @@ -19092,13 +18602,13 @@ NMD::Pool NMD::POOL48I[32] = { 0xfc1f00000000ull, 0x600e00000000ull, 0 , 0, 0x0 }, /* POOL48I~*(14) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x600f00000000ull, &NMD::SWPC_48_ , 0, + 0xfc1f00000000ull, 0x600f00000000ull, &SWPC_48_ , 0, XMMS_ }, /* SWPC[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x601000000000ull, 0 , 0, 0x0 }, /* POOL48I~*(16) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x601100000000ull, &NMD::DADDIU_48_ , 0, + 0xfc1f00000000ull, 0x601100000000ull, &DADDIU_48_ , 0, MIPS64_ }, /* DADDIU[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x601200000000ull, 0 , 0, @@ -19107,7 +18617,7 @@ NMD::Pool NMD::POOL48I[32] = { 0xfc1f00000000ull, 0x601300000000ull, 0 , 0, 0x0 }, /* POOL48I~*(19) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x601400000000ull, &NMD::DLUI_48_ , 0, + 0xfc1f00000000ull, 0x601400000000ull, &DLUI_48_ , 0, MIPS64_ }, /* DLUI[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x601500000000ull, 0 , 0, @@ -19128,7 +18638,7 @@ NMD::Pool NMD::POOL48I[32] = { 0xfc1f00000000ull, 0x601a00000000ull, 0 , 0, 0x0 }, /* POOL48I~*(26) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x601b00000000ull, &NMD::LDPC_48_ , 0, + 0xfc1f00000000ull, 0x601b00000000ull, &LDPC_48_ , 0, MIPS64_ }, /* LDPC[48] */ { reserved_block , 0 , 0 , 48, 0xfc1f00000000ull, 0x601c00000000ull, 0 , 0, @@ -19140,33 +18650,33 @@ NMD::Pool NMD::POOL48I[32] = { 0xfc1f00000000ull, 0x601e00000000ull, 0 , 0, 0x0 }, /* POOL48I~*(30) */ { instruction , 0 , 0 , 48, - 0xfc1f00000000ull, 0x601f00000000ull, &NMD::SDPC_48_ , 0, + 0xfc1f00000000ull, 0x601f00000000ull, &SDPC_48_ , 0, MIPS64_ }, /* SDPC[48] */ }; -NMD::Pool NMD::PP_SR[4] = { +static const Pool PP_SR[4] = { { instruction , 0 , 0 , 32, - 0xfc10f003, 0x80003000, &NMD::SAVE_32_ , 0, + 0xfc10f003, 0x80003000, &SAVE_32_ , 0, 0x0 }, /* SAVE[32] */ { reserved_block , 0 , 0 , 32, 0xfc10f003, 0x80003001, 0 , 0, 0x0 }, /* PP.SR~*(1) */ { instruction , 0 , 0 , 32, - 0xfc10f003, 0x80003002, &NMD::RESTORE_32_ , 0, + 0xfc10f003, 0x80003002, &RESTORE_32_ , 0, 0x0 }, /* RESTORE[32] */ { return_instruction , 0 , 0 , 32, - 0xfc10f003, 0x80003003, &NMD::RESTORE_JRC_32_ , 0, + 0xfc10f003, 0x80003003, &RESTORE_JRC_32_ , 0, 0x0 }, /* RESTORE.JRC[32] */ }; -NMD::Pool NMD::P_SR_F[8] = { +static const Pool P_SR_F[8] = { { instruction , 0 , 0 , 32, - 0xfc10f007, 0x80103000, &NMD::SAVEF , 0, + 0xfc10f007, 0x80103000, &SAVEF , 0, CP1_ }, /* SAVEF */ { instruction , 0 , 0 , 32, - 0xfc10f007, 0x80103001, &NMD::RESTOREF , 0, + 0xfc10f007, 0x80103001, &RESTOREF , 0, CP1_ }, /* RESTOREF */ { reserved_block , 0 , 0 , 32, 0xfc10f007, 0x80103002, 0 , 0, @@ -19189,7 +18699,7 @@ NMD::Pool NMD::P_SR_F[8] = { }; -NMD::Pool NMD::P_SR[2] = { +static const Pool P_SR[2] = { { pool , PP_SR , 4 , 32, 0xfc10f000, 0x80003000, 0 , 0, 0x0 }, /* PP.SR */ @@ -19199,26 +18709,26 @@ NMD::Pool NMD::P_SR[2] = { }; -NMD::Pool NMD::P_SLL[5] = { +static const Pool P_SLL[5] = { { instruction , 0 , 0 , 32, - 0xffe0f1ff, 0x8000c000, &NMD::NOP_32_ , 0, + 0xffe0f1ff, 0x8000c000, &NOP_32_ , 0, 0x0 }, /* NOP[32] */ { instruction , 0 , 0 , 32, - 0xffe0f1ff, 0x8000c003, &NMD::EHB , 0, + 0xffe0f1ff, 0x8000c003, &EHB , 0, 0x0 }, /* EHB */ { instruction , 0 , 0 , 32, - 0xffe0f1ff, 0x8000c005, &NMD::PAUSE , 0, + 0xffe0f1ff, 0x8000c005, &PAUSE , 0, 0x0 }, /* PAUSE */ { instruction , 0 , 0 , 32, - 0xffe0f1ff, 0x8000c006, &NMD::SYNC , 0, + 0xffe0f1ff, 0x8000c006, &SYNC , 0, 0x0 }, /* SYNC */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c000, &NMD::SLL_32_ , 0, + 0xfc00f1e0, 0x8000c000, &SLL_32_ , 0, 0x0 }, /* SLL[32] */ }; -NMD::Pool NMD::P_SHIFT[16] = { +static const Pool P_SHIFT[16] = { { pool , P_SLL , 5 , 32, 0xfc00f1e0, 0x8000c000, 0 , 0, 0x0 }, /* P.SLL */ @@ -19226,53 +18736,53 @@ NMD::Pool NMD::P_SHIFT[16] = { 0xfc00f1e0, 0x8000c020, 0 , 0, 0x0 }, /* P.SHIFT~*(1) */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c040, &NMD::SRL_32_ , 0, + 0xfc00f1e0, 0x8000c040, &SRL_32_ , 0, 0x0 }, /* SRL[32] */ { reserved_block , 0 , 0 , 32, 0xfc00f1e0, 0x8000c060, 0 , 0, 0x0 }, /* P.SHIFT~*(3) */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c080, &NMD::SRA , 0, + 0xfc00f1e0, 0x8000c080, &SRA , 0, 0x0 }, /* SRA */ { reserved_block , 0 , 0 , 32, 0xfc00f1e0, 0x8000c0a0, 0 , 0, 0x0 }, /* P.SHIFT~*(5) */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c0c0, &NMD::ROTR , 0, + 0xfc00f1e0, 0x8000c0c0, &ROTR , 0, 0x0 }, /* ROTR */ { reserved_block , 0 , 0 , 32, 0xfc00f1e0, 0x8000c0e0, 0 , 0, 0x0 }, /* P.SHIFT~*(7) */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c100, &NMD::DSLL , 0, + 0xfc00f1e0, 0x8000c100, &DSLL , 0, MIPS64_ }, /* DSLL */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c120, &NMD::DSLL32 , 0, + 0xfc00f1e0, 0x8000c120, &DSLL32 , 0, MIPS64_ }, /* DSLL32 */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c140, &NMD::DSRL , 0, + 0xfc00f1e0, 0x8000c140, &DSRL , 0, MIPS64_ }, /* DSRL */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c160, &NMD::DSRL32 , 0, + 0xfc00f1e0, 0x8000c160, &DSRL32 , 0, MIPS64_ }, /* DSRL32 */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c180, &NMD::DSRA , 0, + 0xfc00f1e0, 0x8000c180, &DSRA , 0, MIPS64_ }, /* DSRA */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c1a0, &NMD::DSRA32 , 0, + 0xfc00f1e0, 0x8000c1a0, &DSRA32 , 0, MIPS64_ }, /* DSRA32 */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c1c0, &NMD::DROTR , 0, + 0xfc00f1e0, 0x8000c1c0, &DROTR , 0, MIPS64_ }, /* DROTR */ { instruction , 0 , 0 , 32, - 0xfc00f1e0, 0x8000c1e0, &NMD::DROTR32 , 0, + 0xfc00f1e0, 0x8000c1e0, &DROTR32 , 0, MIPS64_ }, /* DROTR32 */ }; -NMD::Pool NMD::P_ROTX[4] = { +static const Pool P_ROTX[4] = { { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000d000, &NMD::ROTX , 0, + 0xfc00f820, 0x8000d000, &ROTX , 0, XMMS_ }, /* ROTX */ { reserved_block , 0 , 0 , 32, 0xfc00f820, 0x8000d020, 0 , 0, @@ -19286,74 +18796,74 @@ NMD::Pool NMD::P_ROTX[4] = { }; -NMD::Pool NMD::P_INS[4] = { +static const Pool P_INS[4] = { { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000e000, &NMD::INS , 0, + 0xfc00f820, 0x8000e000, &INS , 0, XMMS_ }, /* INS */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000e020, &NMD::DINSU , 0, + 0xfc00f820, 0x8000e020, &DINSU , 0, MIPS64_ }, /* DINSU */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000e800, &NMD::DINSM , 0, + 0xfc00f820, 0x8000e800, &DINSM , 0, MIPS64_ }, /* DINSM */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000e820, &NMD::DINS , 0, + 0xfc00f820, 0x8000e820, &DINS , 0, MIPS64_ }, /* DINS */ }; -NMD::Pool NMD::P_EXT[4] = { +static const Pool P_EXT[4] = { { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000f000, &NMD::EXT , 0, + 0xfc00f820, 0x8000f000, &EXT , 0, XMMS_ }, /* EXT */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000f020, &NMD::DEXTU , 0, + 0xfc00f820, 0x8000f020, &DEXTU , 0, MIPS64_ }, /* DEXTU */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000f800, &NMD::DEXTM , 0, + 0xfc00f820, 0x8000f800, &DEXTM , 0, MIPS64_ }, /* DEXTM */ { instruction , 0 , 0 , 32, - 0xfc00f820, 0x8000f820, &NMD::DEXT , 0, + 0xfc00f820, 0x8000f820, &DEXT , 0, MIPS64_ }, /* DEXT */ }; -NMD::Pool NMD::P_U12[16] = { +static const Pool P_U12[16] = { { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80000000, &NMD::ORI , 0, + 0xfc00f000, 0x80000000, &ORI , 0, 0x0 }, /* ORI */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80001000, &NMD::XORI , 0, + 0xfc00f000, 0x80001000, &XORI , 0, 0x0 }, /* XORI */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80002000, &NMD::ANDI_32_ , 0, + 0xfc00f000, 0x80002000, &ANDI_32_ , 0, 0x0 }, /* ANDI[32] */ { pool , P_SR , 2 , 32, 0xfc00f000, 0x80003000, 0 , 0, 0x0 }, /* P.SR */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80004000, &NMD::SLTI , 0, + 0xfc00f000, 0x80004000, &SLTI , 0, 0x0 }, /* SLTI */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80005000, &NMD::SLTIU , 0, + 0xfc00f000, 0x80005000, &SLTIU , 0, 0x0 }, /* SLTIU */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80006000, &NMD::SEQI , 0, + 0xfc00f000, 0x80006000, &SEQI , 0, 0x0 }, /* SEQI */ { reserved_block , 0 , 0 , 32, 0xfc00f000, 0x80007000, 0 , 0, 0x0 }, /* P.U12~*(7) */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80008000, &NMD::ADDIU_NEG_ , 0, + 0xfc00f000, 0x80008000, &ADDIU_NEG_ , 0, 0x0 }, /* ADDIU[NEG] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x80009000, &NMD::DADDIU_U12_ , 0, + 0xfc00f000, 0x80009000, &DADDIU_U12_ , 0, MIPS64_ }, /* DADDIU[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8000a000, &NMD::DADDIU_NEG_ , 0, + 0xfc00f000, 0x8000a000, &DADDIU_NEG_ , 0, MIPS64_ }, /* DADDIU[NEG] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8000b000, &NMD::DROTX , 0, + 0xfc00f000, 0x8000b000, &DROTX , 0, MIPS64_ }, /* DROTX */ { pool , P_SHIFT , 16 , 32, 0xfc00f000, 0x8000c000, 0 , 0, @@ -19370,19 +18880,19 @@ NMD::Pool NMD::P_U12[16] = { }; -NMD::Pool NMD::RINT_fmt[2] = { +static const Pool RINT_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000020, &NMD::RINT_S , 0, + 0xfc0003ff, 0xa0000020, &RINT_S , 0, CP1_ }, /* RINT.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000220, &NMD::RINT_D , 0, + 0xfc0003ff, 0xa0000220, &RINT_D , 0, CP1_ }, /* RINT.D */ }; -NMD::Pool NMD::ADD_fmt0[2] = { +static const Pool ADD_fmt0[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000030, &NMD::ADD_S , 0, + 0xfc0003ff, 0xa0000030, &ADD_S , 0, CP1_ }, /* ADD.S */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa0000230, 0 , 0, @@ -19390,29 +18900,29 @@ NMD::Pool NMD::ADD_fmt0[2] = { }; -NMD::Pool NMD::SELEQZ_fmt[2] = { +static const Pool SELEQZ_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000038, &NMD::SELEQZ_S , 0, + 0xfc0003ff, 0xa0000038, &SELEQZ_S , 0, CP1_ }, /* SELEQZ.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000238, &NMD::SELEQZ_D , 0, + 0xfc0003ff, 0xa0000238, &SELEQZ_D , 0, CP1_ }, /* SELEQZ.D */ }; -NMD::Pool NMD::CLASS_fmt[2] = { +static const Pool CLASS_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000060, &NMD::CLASS_S , 0, + 0xfc0003ff, 0xa0000060, &CLASS_S , 0, CP1_ }, /* CLASS.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000260, &NMD::CLASS_D , 0, + 0xfc0003ff, 0xa0000260, &CLASS_D , 0, CP1_ }, /* CLASS.D */ }; -NMD::Pool NMD::SUB_fmt0[2] = { +static const Pool SUB_fmt0[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000070, &NMD::SUB_S , 0, + 0xfc0003ff, 0xa0000070, &SUB_S , 0, CP1_ }, /* SUB.S */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa0000270, 0 , 0, @@ -19420,19 +18930,19 @@ NMD::Pool NMD::SUB_fmt0[2] = { }; -NMD::Pool NMD::SELNEZ_fmt[2] = { +static const Pool SELNEZ_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000078, &NMD::SELNEZ_S , 0, + 0xfc0003ff, 0xa0000078, &SELNEZ_S , 0, CP1_ }, /* SELNEZ.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000278, &NMD::SELNEZ_D , 0, + 0xfc0003ff, 0xa0000278, &SELNEZ_D , 0, CP1_ }, /* SELNEZ.D */ }; -NMD::Pool NMD::MUL_fmt0[2] = { +static const Pool MUL_fmt0[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00000b0, &NMD::MUL_S , 0, + 0xfc0003ff, 0xa00000b0, &MUL_S , 0, CP1_ }, /* MUL.S */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa00002b0, 0 , 0, @@ -19440,19 +18950,19 @@ NMD::Pool NMD::MUL_fmt0[2] = { }; -NMD::Pool NMD::SEL_fmt[2] = { +static const Pool SEL_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00000b8, &NMD::SEL_S , 0, + 0xfc0003ff, 0xa00000b8, &SEL_S , 0, CP1_ }, /* SEL.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00002b8, &NMD::SEL_D , 0, + 0xfc0003ff, 0xa00002b8, &SEL_D , 0, CP1_ }, /* SEL.D */ }; -NMD::Pool NMD::DIV_fmt0[2] = { +static const Pool DIV_fmt0[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00000f0, &NMD::DIV_S , 0, + 0xfc0003ff, 0xa00000f0, &DIV_S , 0, CP1_ }, /* DIV.S */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa00002f0, 0 , 0, @@ -19460,9 +18970,9 @@ NMD::Pool NMD::DIV_fmt0[2] = { }; -NMD::Pool NMD::ADD_fmt1[2] = { +static const Pool ADD_fmt1[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000130, &NMD::ADD_D , 0, + 0xfc0003ff, 0xa0000130, &ADD_D , 0, CP1_ }, /* ADD.D */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa0000330, 0 , 0, @@ -19470,9 +18980,9 @@ NMD::Pool NMD::ADD_fmt1[2] = { }; -NMD::Pool NMD::SUB_fmt1[2] = { +static const Pool SUB_fmt1[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa0000170, &NMD::SUB_D , 0, + 0xfc0003ff, 0xa0000170, &SUB_D , 0, CP1_ }, /* SUB.D */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa0000370, 0 , 0, @@ -19480,9 +18990,9 @@ NMD::Pool NMD::SUB_fmt1[2] = { }; -NMD::Pool NMD::MUL_fmt1[2] = { +static const Pool MUL_fmt1[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00001b0, &NMD::MUL_D , 0, + 0xfc0003ff, 0xa00001b0, &MUL_D , 0, CP1_ }, /* MUL.D */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa00003b0, 0 , 0, @@ -19490,19 +19000,19 @@ NMD::Pool NMD::MUL_fmt1[2] = { }; -NMD::Pool NMD::MADDF_fmt[2] = { +static const Pool MADDF_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00001b8, &NMD::MADDF_S , 0, + 0xfc0003ff, 0xa00001b8, &MADDF_S , 0, CP1_ }, /* MADDF.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00003b8, &NMD::MADDF_D , 0, + 0xfc0003ff, 0xa00003b8, &MADDF_D , 0, CP1_ }, /* MADDF.D */ }; -NMD::Pool NMD::DIV_fmt1[2] = { +static const Pool DIV_fmt1[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00001f0, &NMD::DIV_D , 0, + 0xfc0003ff, 0xa00001f0, &DIV_D , 0, CP1_ }, /* DIV.D */ { reserved_block , 0 , 0 , 32, 0xfc0003ff, 0xa00003f0, 0 , 0, @@ -19510,17 +19020,17 @@ NMD::Pool NMD::DIV_fmt1[2] = { }; -NMD::Pool NMD::MSUBF_fmt[2] = { +static const Pool MSUBF_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00001f8, &NMD::MSUBF_S , 0, + 0xfc0003ff, 0xa00001f8, &MSUBF_S , 0, CP1_ }, /* MSUBF.S */ { instruction , 0 , 0 , 32, - 0xfc0003ff, 0xa00003f8, &NMD::MSUBF_D , 0, + 0xfc0003ff, 0xa00003f8, &MSUBF_D , 0, CP1_ }, /* MSUBF.D */ }; -NMD::Pool NMD::POOL32F_0[64] = { +static const Pool POOL32F_0[64] = { { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xa0000000, 0 , 0, CP1_ }, /* POOL32F_0~*(0) */ @@ -19716,177 +19226,177 @@ NMD::Pool NMD::POOL32F_0[64] = { }; -NMD::Pool NMD::MIN_fmt[2] = { +static const Pool MIN_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa0000003, &NMD::MIN_S , 0, + 0xfc00023f, 0xa0000003, &MIN_S , 0, CP1_ }, /* MIN.S */ { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa0000203, &NMD::MIN_D , 0, + 0xfc00023f, 0xa0000203, &MIN_D , 0, CP1_ }, /* MIN.D */ }; -NMD::Pool NMD::MAX_fmt[2] = { +static const Pool MAX_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa000000b, &NMD::MAX_S , 0, + 0xfc00023f, 0xa000000b, &MAX_S , 0, CP1_ }, /* MAX.S */ { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa000020b, &NMD::MAX_D , 0, + 0xfc00023f, 0xa000020b, &MAX_D , 0, CP1_ }, /* MAX.D */ }; -NMD::Pool NMD::MINA_fmt[2] = { +static const Pool MINA_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa0000023, &NMD::MINA_S , 0, + 0xfc00023f, 0xa0000023, &MINA_S , 0, CP1_ }, /* MINA.S */ { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa0000223, &NMD::MINA_D , 0, + 0xfc00023f, 0xa0000223, &MINA_D , 0, CP1_ }, /* MINA.D */ }; -NMD::Pool NMD::MAXA_fmt[2] = { +static const Pool MAXA_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa000002b, &NMD::MAXA_S , 0, + 0xfc00023f, 0xa000002b, &MAXA_S , 0, CP1_ }, /* MAXA.S */ { instruction , 0 , 0 , 32, - 0xfc00023f, 0xa000022b, &NMD::MAXA_D , 0, + 0xfc00023f, 0xa000022b, &MAXA_D , 0, CP1_ }, /* MAXA.D */ }; -NMD::Pool NMD::CVT_L_fmt[2] = { +static const Pool CVT_L_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000013b, &NMD::CVT_L_S , 0, + 0xfc007fff, 0xa000013b, &CVT_L_S , 0, CP1_ }, /* CVT.L.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000413b, &NMD::CVT_L_D , 0, + 0xfc007fff, 0xa000413b, &CVT_L_D , 0, CP1_ }, /* CVT.L.D */ }; -NMD::Pool NMD::RSQRT_fmt[2] = { +static const Pool RSQRT_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000023b, &NMD::RSQRT_S , 0, + 0xfc007fff, 0xa000023b, &RSQRT_S , 0, CP1_ }, /* RSQRT.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000423b, &NMD::RSQRT_D , 0, + 0xfc007fff, 0xa000423b, &RSQRT_D , 0, CP1_ }, /* RSQRT.D */ }; -NMD::Pool NMD::FLOOR_L_fmt[2] = { +static const Pool FLOOR_L_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000033b, &NMD::FLOOR_L_S , 0, + 0xfc007fff, 0xa000033b, &FLOOR_L_S , 0, CP1_ }, /* FLOOR.L.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000433b, &NMD::FLOOR_L_D , 0, + 0xfc007fff, 0xa000433b, &FLOOR_L_D , 0, CP1_ }, /* FLOOR.L.D */ }; -NMD::Pool NMD::CVT_W_fmt[2] = { +static const Pool CVT_W_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000093b, &NMD::CVT_W_S , 0, + 0xfc007fff, 0xa000093b, &CVT_W_S , 0, CP1_ }, /* CVT.W.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000493b, &NMD::CVT_W_D , 0, + 0xfc007fff, 0xa000493b, &CVT_W_D , 0, CP1_ }, /* CVT.W.D */ }; -NMD::Pool NMD::SQRT_fmt[2] = { +static const Pool SQRT_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0000a3b, &NMD::SQRT_S , 0, + 0xfc007fff, 0xa0000a3b, &SQRT_S , 0, CP1_ }, /* SQRT.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0004a3b, &NMD::SQRT_D , 0, + 0xfc007fff, 0xa0004a3b, &SQRT_D , 0, CP1_ }, /* SQRT.D */ }; -NMD::Pool NMD::FLOOR_W_fmt[2] = { +static const Pool FLOOR_W_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0000b3b, &NMD::FLOOR_W_S , 0, + 0xfc007fff, 0xa0000b3b, &FLOOR_W_S , 0, CP1_ }, /* FLOOR.W.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0004b3b, &NMD::FLOOR_W_D , 0, + 0xfc007fff, 0xa0004b3b, &FLOOR_W_D , 0, CP1_ }, /* FLOOR.W.D */ }; -NMD::Pool NMD::RECIP_fmt[2] = { +static const Pool RECIP_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000123b, &NMD::RECIP_S , 0, + 0xfc007fff, 0xa000123b, &RECIP_S , 0, CP1_ }, /* RECIP.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000523b, &NMD::RECIP_D , 0, + 0xfc007fff, 0xa000523b, &RECIP_D , 0, CP1_ }, /* RECIP.D */ }; -NMD::Pool NMD::CEIL_L_fmt[2] = { +static const Pool CEIL_L_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000133b, &NMD::CEIL_L_S , 0, + 0xfc007fff, 0xa000133b, &CEIL_L_S , 0, CP1_ }, /* CEIL.L.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000533b, &NMD::CEIL_L_D , 0, + 0xfc007fff, 0xa000533b, &CEIL_L_D , 0, CP1_ }, /* CEIL.L.D */ }; -NMD::Pool NMD::CEIL_W_fmt[2] = { +static const Pool CEIL_W_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0001b3b, &NMD::CEIL_W_S , 0, + 0xfc007fff, 0xa0001b3b, &CEIL_W_S , 0, CP1_ }, /* CEIL.W.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0005b3b, &NMD::CEIL_W_D , 0, + 0xfc007fff, 0xa0005b3b, &CEIL_W_D , 0, CP1_ }, /* CEIL.W.D */ }; -NMD::Pool NMD::TRUNC_L_fmt[2] = { +static const Pool TRUNC_L_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000233b, &NMD::TRUNC_L_S , 0, + 0xfc007fff, 0xa000233b, &TRUNC_L_S , 0, CP1_ }, /* TRUNC.L.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000633b, &NMD::TRUNC_L_D , 0, + 0xfc007fff, 0xa000633b, &TRUNC_L_D , 0, CP1_ }, /* TRUNC.L.D */ }; -NMD::Pool NMD::TRUNC_W_fmt[2] = { +static const Pool TRUNC_W_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0002b3b, &NMD::TRUNC_W_S , 0, + 0xfc007fff, 0xa0002b3b, &TRUNC_W_S , 0, CP1_ }, /* TRUNC.W.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0006b3b, &NMD::TRUNC_W_D , 0, + 0xfc007fff, 0xa0006b3b, &TRUNC_W_D , 0, CP1_ }, /* TRUNC.W.D */ }; -NMD::Pool NMD::ROUND_L_fmt[2] = { +static const Pool ROUND_L_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000333b, &NMD::ROUND_L_S , 0, + 0xfc007fff, 0xa000333b, &ROUND_L_S , 0, CP1_ }, /* ROUND.L.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000733b, &NMD::ROUND_L_D , 0, + 0xfc007fff, 0xa000733b, &ROUND_L_D , 0, CP1_ }, /* ROUND.L.D */ }; -NMD::Pool NMD::ROUND_W_fmt[2] = { +static const Pool ROUND_W_fmt[2] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0003b3b, &NMD::ROUND_W_S , 0, + 0xfc007fff, 0xa0003b3b, &ROUND_W_S , 0, CP1_ }, /* ROUND.W.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0007b3b, &NMD::ROUND_W_D , 0, + 0xfc007fff, 0xa0007b3b, &ROUND_W_D , 0, CP1_ }, /* ROUND.W.D */ }; -NMD::Pool NMD::POOL32Fxf_0[64] = { +static const Pool POOL32Fxf_0[64] = { { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000003b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(0) */ @@ -19936,7 +19446,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa0000f3b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(15) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000103b, &NMD::CFC1 , 0, + 0xfc003fff, 0xa000103b, &CFC1 , 0, CP1_ }, /* CFC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000113b, 0 , 0, @@ -19960,7 +19470,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa000173b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(23) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000183b, &NMD::CTC1 , 0, + 0xfc003fff, 0xa000183b, &CTC1 , 0, CP1_ }, /* CTC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000193b, 0 , 0, @@ -19984,10 +19494,10 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa0001f3b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(31) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000203b, &NMD::MFC1 , 0, + 0xfc003fff, 0xa000203b, &MFC1 , 0, CP1_ }, /* MFC1 */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000213b, &NMD::CVT_S_PL , 0, + 0xfc003fff, 0xa000213b, &CVT_S_PL , 0, CP1_ }, /* CVT.S.PL */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000223b, 0 , 0, @@ -19996,7 +19506,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa000233b, 0 , 0, CP1_ }, /* TRUNC.L.fmt */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000243b, &NMD::DMFC1 , 0, + 0xfc003fff, 0xa000243b, &DMFC1 , 0, CP1_ | MIPS64_ }, /* DMFC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000253b, 0 , 0, @@ -20008,10 +19518,10 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa000273b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(39) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000283b, &NMD::MTC1 , 0, + 0xfc003fff, 0xa000283b, &MTC1 , 0, CP1_ }, /* MTC1 */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000293b, &NMD::CVT_S_PU , 0, + 0xfc003fff, 0xa000293b, &CVT_S_PU , 0, CP1_ }, /* CVT.S.PU */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa0002a3b, 0 , 0, @@ -20020,7 +19530,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa0002b3b, 0 , 0, CP1_ }, /* TRUNC.W.fmt */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa0002c3b, &NMD::DMTC1 , 0, + 0xfc003fff, 0xa0002c3b, &DMTC1 , 0, CP1_ | MIPS64_ }, /* DMTC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa0002d3b, 0 , 0, @@ -20032,7 +19542,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa0002f3b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(47) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000303b, &NMD::MFHC1 , 0, + 0xfc003fff, 0xa000303b, &MFHC1 , 0, CP1_ }, /* MFHC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000313b, 0 , 0, @@ -20056,7 +19566,7 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { 0xfc003fff, 0xa000373b, 0 , 0, CP1_ }, /* POOL32Fxf_0~*(55) */ { instruction , 0 , 0 , 32, - 0xfc003fff, 0xa000383b, &NMD::MTHC1 , 0, + 0xfc003fff, 0xa000383b, &MTHC1 , 0, CP1_ }, /* MTHC1 */ { reserved_block , 0 , 0 , 32, 0xfc003fff, 0xa000393b, 0 , 0, @@ -20082,12 +19592,12 @@ NMD::Pool NMD::POOL32Fxf_0[64] = { }; -NMD::Pool NMD::MOV_fmt[4] = { +static const Pool MOV_fmt[4] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000007b, &NMD::MOV_S , 0, + 0xfc007fff, 0xa000007b, &MOV_S , 0, CP1_ }, /* MOV.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000207b, &NMD::MOV_D , 0, + 0xfc007fff, 0xa000207b, &MOV_D , 0, CP1_ }, /* MOV.D */ { reserved_block , 0 , 0 , 32, 0xfc007fff, 0xa000407b, 0 , 0, @@ -20098,12 +19608,12 @@ NMD::Pool NMD::MOV_fmt[4] = { }; -NMD::Pool NMD::ABS_fmt[4] = { +static const Pool ABS_fmt[4] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000037b, &NMD::ABS_S , 0, + 0xfc007fff, 0xa000037b, &ABS_S , 0, CP1_ }, /* ABS.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000237b, &NMD::ABS_D , 0, + 0xfc007fff, 0xa000237b, &ABS_D , 0, CP1_ }, /* ABS.D */ { reserved_block , 0 , 0 , 32, 0xfc007fff, 0xa000437b, 0 , 0, @@ -20114,12 +19624,12 @@ NMD::Pool NMD::ABS_fmt[4] = { }; -NMD::Pool NMD::NEG_fmt[4] = { +static const Pool NEG_fmt[4] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0000b7b, &NMD::NEG_S , 0, + 0xfc007fff, 0xa0000b7b, &NEG_S , 0, CP1_ }, /* NEG.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0002b7b, &NMD::NEG_D , 0, + 0xfc007fff, 0xa0002b7b, &NEG_D , 0, CP1_ }, /* NEG.D */ { reserved_block , 0 , 0 , 32, 0xfc007fff, 0xa0004b7b, 0 , 0, @@ -20130,15 +19640,15 @@ NMD::Pool NMD::NEG_fmt[4] = { }; -NMD::Pool NMD::CVT_D_fmt[4] = { +static const Pool CVT_D_fmt[4] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000137b, &NMD::CVT_D_S , 0, + 0xfc007fff, 0xa000137b, &CVT_D_S , 0, CP1_ }, /* CVT.D.S */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000337b, &NMD::CVT_D_W , 0, + 0xfc007fff, 0xa000337b, &CVT_D_W , 0, CP1_ }, /* CVT.D.W */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa000537b, &NMD::CVT_D_L , 0, + 0xfc007fff, 0xa000537b, &CVT_D_L , 0, CP1_ }, /* CVT.D.L */ { reserved_block , 0 , 0 , 32, 0xfc007fff, 0xa000737b, 0 , 0, @@ -20146,15 +19656,15 @@ NMD::Pool NMD::CVT_D_fmt[4] = { }; -NMD::Pool NMD::CVT_S_fmt[4] = { +static const Pool CVT_S_fmt[4] = { { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0001b7b, &NMD::CVT_S_D , 0, + 0xfc007fff, 0xa0001b7b, &CVT_S_D , 0, CP1_ }, /* CVT.S.D */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0003b7b, &NMD::CVT_S_W , 0, + 0xfc007fff, 0xa0003b7b, &CVT_S_W , 0, CP1_ }, /* CVT.S.W */ { instruction , 0 , 0 , 32, - 0xfc007fff, 0xa0005b7b, &NMD::CVT_S_L , 0, + 0xfc007fff, 0xa0005b7b, &CVT_S_L , 0, CP1_ }, /* CVT.S.L */ { reserved_block , 0 , 0 , 32, 0xfc007fff, 0xa0007b7b, 0 , 0, @@ -20162,7 +19672,7 @@ NMD::Pool NMD::CVT_S_fmt[4] = { }; -NMD::Pool NMD::POOL32Fxf_1[32] = { +static const Pool POOL32Fxf_1[32] = { { pool , MOV_fmt , 4 , 32, 0xfc001fff, 0xa000007b, 0 , 0, CP1_ }, /* MOV.fmt */ @@ -20262,7 +19772,7 @@ NMD::Pool NMD::POOL32Fxf_1[32] = { }; -NMD::Pool NMD::POOL32Fxf[4] = { +static const Pool POOL32Fxf[4] = { { pool , POOL32Fxf_0 , 64 , 32, 0xfc0000ff, 0xa000003b, 0 , 0, CP1_ }, /* POOL32Fxf_0 */ @@ -20278,7 +19788,7 @@ NMD::Pool NMD::POOL32Fxf[4] = { }; -NMD::Pool NMD::POOL32F_3[8] = { +static const Pool POOL32F_3[8] = { { pool , MIN_fmt , 2 , 32, 0xfc00003f, 0xa0000003, 0 , 0, CP1_ }, /* MIN.fmt */ @@ -20306,66 +19816,66 @@ NMD::Pool NMD::POOL32F_3[8] = { }; -NMD::Pool NMD::CMP_condn_S[32] = { +static const Pool CMP_condn_S[32] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000005, &NMD::CMP_AF_S , 0, + 0xfc0007ff, 0xa0000005, &CMP_AF_S , 0, CP1_ }, /* CMP.AF.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000045, &NMD::CMP_UN_S , 0, + 0xfc0007ff, 0xa0000045, &CMP_UN_S , 0, CP1_ }, /* CMP.UN.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000085, &NMD::CMP_EQ_S , 0, + 0xfc0007ff, 0xa0000085, &CMP_EQ_S , 0, CP1_ }, /* CMP.EQ.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00000c5, &NMD::CMP_UEQ_S , 0, + 0xfc0007ff, 0xa00000c5, &CMP_UEQ_S , 0, CP1_ }, /* CMP.UEQ.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000105, &NMD::CMP_LT_S , 0, + 0xfc0007ff, 0xa0000105, &CMP_LT_S , 0, CP1_ }, /* CMP.LT.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000145, &NMD::CMP_ULT_S , 0, + 0xfc0007ff, 0xa0000145, &CMP_ULT_S , 0, CP1_ }, /* CMP.ULT.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000185, &NMD::CMP_LE_S , 0, + 0xfc0007ff, 0xa0000185, &CMP_LE_S , 0, CP1_ }, /* CMP.LE.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00001c5, &NMD::CMP_ULE_S , 0, + 0xfc0007ff, 0xa00001c5, &CMP_ULE_S , 0, CP1_ }, /* CMP.ULE.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000205, &NMD::CMP_SAF_S , 0, + 0xfc0007ff, 0xa0000205, &CMP_SAF_S , 0, CP1_ }, /* CMP.SAF.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000245, &NMD::CMP_SUN_S , 0, + 0xfc0007ff, 0xa0000245, &CMP_SUN_S , 0, CP1_ }, /* CMP.SUN.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000285, &NMD::CMP_SEQ_S , 0, + 0xfc0007ff, 0xa0000285, &CMP_SEQ_S , 0, CP1_ }, /* CMP.SEQ.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00002c5, &NMD::CMP_SUEQ_S , 0, + 0xfc0007ff, 0xa00002c5, &CMP_SUEQ_S , 0, CP1_ }, /* CMP.SUEQ.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000305, &NMD::CMP_SLT_S , 0, + 0xfc0007ff, 0xa0000305, &CMP_SLT_S , 0, CP1_ }, /* CMP.SLT.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000345, &NMD::CMP_SULT_S , 0, + 0xfc0007ff, 0xa0000345, &CMP_SULT_S , 0, CP1_ }, /* CMP.SULT.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000385, &NMD::CMP_SLE_S , 0, + 0xfc0007ff, 0xa0000385, &CMP_SLE_S , 0, CP1_ }, /* CMP.SLE.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00003c5, &NMD::CMP_SULE_S , 0, + 0xfc0007ff, 0xa00003c5, &CMP_SULE_S , 0, CP1_ }, /* CMP.SULE.S */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000405, 0 , 0, CP1_ }, /* CMP.condn.S~*(16) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000445, &NMD::CMP_OR_S , 0, + 0xfc0007ff, 0xa0000445, &CMP_OR_S , 0, CP1_ }, /* CMP.OR.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000485, &NMD::CMP_UNE_S , 0, + 0xfc0007ff, 0xa0000485, &CMP_UNE_S , 0, CP1_ }, /* CMP.UNE.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00004c5, &NMD::CMP_NE_S , 0, + 0xfc0007ff, 0xa00004c5, &CMP_NE_S , 0, CP1_ }, /* CMP.NE.S */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000505, 0 , 0, @@ -20383,13 +19893,13 @@ NMD::Pool NMD::CMP_condn_S[32] = { 0xfc0007ff, 0xa0000605, 0 , 0, CP1_ }, /* CMP.condn.S~*(24) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000645, &NMD::CMP_SOR_S , 0, + 0xfc0007ff, 0xa0000645, &CMP_SOR_S , 0, CP1_ }, /* CMP.SOR.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000685, &NMD::CMP_SUNE_S , 0, + 0xfc0007ff, 0xa0000685, &CMP_SUNE_S , 0, CP1_ }, /* CMP.SUNE.S */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00006c5, &NMD::CMP_SNE_S , 0, + 0xfc0007ff, 0xa00006c5, &CMP_SNE_S , 0, CP1_ }, /* CMP.SNE.S */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000705, 0 , 0, @@ -20406,66 +19916,66 @@ NMD::Pool NMD::CMP_condn_S[32] = { }; -NMD::Pool NMD::CMP_condn_D[32] = { +static const Pool CMP_condn_D[32] = { { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000015, &NMD::CMP_AF_D , 0, + 0xfc0007ff, 0xa0000015, &CMP_AF_D , 0, CP1_ }, /* CMP.AF.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000055, &NMD::CMP_UN_D , 0, + 0xfc0007ff, 0xa0000055, &CMP_UN_D , 0, CP1_ }, /* CMP.UN.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000095, &NMD::CMP_EQ_D , 0, + 0xfc0007ff, 0xa0000095, &CMP_EQ_D , 0, CP1_ }, /* CMP.EQ.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00000d5, &NMD::CMP_UEQ_D , 0, + 0xfc0007ff, 0xa00000d5, &CMP_UEQ_D , 0, CP1_ }, /* CMP.UEQ.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000115, &NMD::CMP_LT_D , 0, + 0xfc0007ff, 0xa0000115, &CMP_LT_D , 0, CP1_ }, /* CMP.LT.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000155, &NMD::CMP_ULT_D , 0, + 0xfc0007ff, 0xa0000155, &CMP_ULT_D , 0, CP1_ }, /* CMP.ULT.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000195, &NMD::CMP_LE_D , 0, + 0xfc0007ff, 0xa0000195, &CMP_LE_D , 0, CP1_ }, /* CMP.LE.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00001d5, &NMD::CMP_ULE_D , 0, + 0xfc0007ff, 0xa00001d5, &CMP_ULE_D , 0, CP1_ }, /* CMP.ULE.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000215, &NMD::CMP_SAF_D , 0, + 0xfc0007ff, 0xa0000215, &CMP_SAF_D , 0, CP1_ }, /* CMP.SAF.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000255, &NMD::CMP_SUN_D , 0, + 0xfc0007ff, 0xa0000255, &CMP_SUN_D , 0, CP1_ }, /* CMP.SUN.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000295, &NMD::CMP_SEQ_D , 0, + 0xfc0007ff, 0xa0000295, &CMP_SEQ_D , 0, CP1_ }, /* CMP.SEQ.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00002d5, &NMD::CMP_SUEQ_D , 0, + 0xfc0007ff, 0xa00002d5, &CMP_SUEQ_D , 0, CP1_ }, /* CMP.SUEQ.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000315, &NMD::CMP_SLT_D , 0, + 0xfc0007ff, 0xa0000315, &CMP_SLT_D , 0, CP1_ }, /* CMP.SLT.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000355, &NMD::CMP_SULT_D , 0, + 0xfc0007ff, 0xa0000355, &CMP_SULT_D , 0, CP1_ }, /* CMP.SULT.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000395, &NMD::CMP_SLE_D , 0, + 0xfc0007ff, 0xa0000395, &CMP_SLE_D , 0, CP1_ }, /* CMP.SLE.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00003d5, &NMD::CMP_SULE_D , 0, + 0xfc0007ff, 0xa00003d5, &CMP_SULE_D , 0, CP1_ }, /* CMP.SULE.D */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000415, 0 , 0, CP1_ }, /* CMP.condn.D~*(16) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000455, &NMD::CMP_OR_D , 0, + 0xfc0007ff, 0xa0000455, &CMP_OR_D , 0, CP1_ }, /* CMP.OR.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000495, &NMD::CMP_UNE_D , 0, + 0xfc0007ff, 0xa0000495, &CMP_UNE_D , 0, CP1_ }, /* CMP.UNE.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00004d5, &NMD::CMP_NE_D , 0, + 0xfc0007ff, 0xa00004d5, &CMP_NE_D , 0, CP1_ }, /* CMP.NE.D */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000515, 0 , 0, @@ -20483,13 +19993,13 @@ NMD::Pool NMD::CMP_condn_D[32] = { 0xfc0007ff, 0xa0000615, 0 , 0, CP1_ }, /* CMP.condn.D~*(24) */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000655, &NMD::CMP_SOR_D , 0, + 0xfc0007ff, 0xa0000655, &CMP_SOR_D , 0, CP1_ }, /* CMP.SOR.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa0000695, &NMD::CMP_SUNE_D , 0, + 0xfc0007ff, 0xa0000695, &CMP_SUNE_D , 0, CP1_ }, /* CMP.SUNE.D */ { instruction , 0 , 0 , 32, - 0xfc0007ff, 0xa00006d5, &NMD::CMP_SNE_D , 0, + 0xfc0007ff, 0xa00006d5, &CMP_SNE_D , 0, CP1_ }, /* CMP.SNE.D */ { reserved_block , 0 , 0 , 32, 0xfc0007ff, 0xa0000715, 0 , 0, @@ -20506,7 +20016,7 @@ NMD::Pool NMD::CMP_condn_D[32] = { }; -NMD::Pool NMD::POOL32F_5[8] = { +static const Pool POOL32F_5[8] = { { pool , CMP_condn_S , 32 , 32, 0xfc00003f, 0xa0000005, 0 , 0, CP1_ }, /* CMP.condn.S */ @@ -20534,7 +20044,7 @@ NMD::Pool NMD::POOL32F_5[8] = { }; -NMD::Pool NMD::POOL32F[8] = { +static const Pool POOL32F[8] = { { pool , POOL32F_0 , 64 , 32, 0xfc000007, 0xa0000000, 0 , 0, CP1_ }, /* POOL32F_0 */ @@ -20562,18 +20072,18 @@ NMD::Pool NMD::POOL32F[8] = { }; -NMD::Pool NMD::POOL32S_0[64] = { +static const Pool POOL32S_0[64] = { { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc0000000, 0 , 0, 0x0 }, /* POOL32S_0~*(0) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000008, &NMD::DLSA , 0, + 0xfc0001ff, 0xc0000008, &DLSA , 0, MIPS64_ }, /* DLSA */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000010, &NMD::DSLLV , 0, + 0xfc0001ff, 0xc0000010, &DSLLV , 0, MIPS64_ }, /* DSLLV */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000018, &NMD::DMUL , 0, + 0xfc0001ff, 0xc0000018, &DMUL , 0, MIPS64_ }, /* DMUL */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc0000020, 0 , 0, @@ -20594,10 +20104,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc0000048, 0 , 0, 0x0 }, /* POOL32S_0~*(9) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000050, &NMD::DSRLV , 0, + 0xfc0001ff, 0xc0000050, &DSRLV , 0, MIPS64_ }, /* DSRLV */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000058, &NMD::DMUH , 0, + 0xfc0001ff, 0xc0000058, &DMUH , 0, MIPS64_ }, /* DMUH */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc0000060, 0 , 0, @@ -20618,10 +20128,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc0000088, 0 , 0, 0x0 }, /* POOL32S_0~*(17) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000090, &NMD::DSRAV , 0, + 0xfc0001ff, 0xc0000090, &DSRAV , 0, MIPS64_ }, /* DSRAV */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000098, &NMD::DMULU , 0, + 0xfc0001ff, 0xc0000098, &DMULU , 0, MIPS64_ }, /* DMULU */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc00000a0, 0 , 0, @@ -20642,10 +20152,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc00000c8, 0 , 0, 0x0 }, /* POOL32S_0~*(25) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc00000d0, &NMD::DROTRV , 0, + 0xfc0001ff, 0xc00000d0, &DROTRV , 0, MIPS64_ }, /* DROTRV */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc00000d8, &NMD::DMUHU , 0, + 0xfc0001ff, 0xc00000d8, &DMUHU , 0, MIPS64_ }, /* DMUHU */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc00000e0, 0 , 0, @@ -20666,10 +20176,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc0000108, 0 , 0, 0x0 }, /* POOL32S_0~*(33) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000110, &NMD::DADD , 0, + 0xfc0001ff, 0xc0000110, &DADD , 0, MIPS64_ }, /* DADD */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000118, &NMD::DDIV , 0, + 0xfc0001ff, 0xc0000118, &DDIV , 0, MIPS64_ }, /* DDIV */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc0000120, 0 , 0, @@ -20690,10 +20200,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc0000148, 0 , 0, 0x0 }, /* POOL32S_0~*(41) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000150, &NMD::DADDU , 0, + 0xfc0001ff, 0xc0000150, &DADDU , 0, MIPS64_ }, /* DADDU */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000158, &NMD::DMOD , 0, + 0xfc0001ff, 0xc0000158, &DMOD , 0, MIPS64_ }, /* DMOD */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc0000160, 0 , 0, @@ -20714,10 +20224,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc0000188, 0 , 0, 0x0 }, /* POOL32S_0~*(49) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000190, &NMD::DSUB , 0, + 0xfc0001ff, 0xc0000190, &DSUB , 0, MIPS64_ }, /* DSUB */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc0000198, &NMD::DDIVU , 0, + 0xfc0001ff, 0xc0000198, &DDIVU , 0, MIPS64_ }, /* DDIVU */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc00001a0, 0 , 0, @@ -20738,10 +20248,10 @@ NMD::Pool NMD::POOL32S_0[64] = { 0xfc0001ff, 0xc00001c8, 0 , 0, 0x0 }, /* POOL32S_0~*(57) */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc00001d0, &NMD::DSUBU , 0, + 0xfc0001ff, 0xc00001d0, &DSUBU , 0, MIPS64_ }, /* DSUBU */ { instruction , 0 , 0 , 32, - 0xfc0001ff, 0xc00001d8, &NMD::DMODU , 0, + 0xfc0001ff, 0xc00001d8, &DMODU , 0, MIPS64_ }, /* DMODU */ { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc00001e0, 0 , 0, @@ -20758,7 +20268,7 @@ NMD::Pool NMD::POOL32S_0[64] = { }; -NMD::Pool NMD::POOL32Sxf_4[128] = { +static const Pool POOL32Sxf_4[128] = { { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0xc000013c, 0 , 0, 0x0 }, /* POOL32Sxf_4~*(0) */ @@ -20871,7 +20381,7 @@ NMD::Pool NMD::POOL32Sxf_4[128] = { 0xfc00ffff, 0xc000493c, 0 , 0, 0x0 }, /* POOL32Sxf_4~*(36) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0xc0004b3c, &NMD::DCLO , 0, + 0xfc00ffff, 0xc0004b3c, &DCLO , 0, MIPS64_ }, /* DCLO */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0xc0004d3c, 0 , 0, @@ -20895,7 +20405,7 @@ NMD::Pool NMD::POOL32Sxf_4[128] = { 0xfc00ffff, 0xc000593c, 0 , 0, 0x0 }, /* POOL32Sxf_4~*(44) */ { instruction , 0 , 0 , 32, - 0xfc00ffff, 0xc0005b3c, &NMD::DCLZ , 0, + 0xfc00ffff, 0xc0005b3c, &DCLZ , 0, MIPS64_ }, /* DCLZ */ { reserved_block , 0 , 0 , 32, 0xfc00ffff, 0xc0005d3c, 0 , 0, @@ -21146,7 +20656,7 @@ NMD::Pool NMD::POOL32Sxf_4[128] = { }; -NMD::Pool NMD::POOL32Sxf[8] = { +static const Pool POOL32Sxf[8] = { { reserved_block , 0 , 0 , 32, 0xfc0001ff, 0xc000003c, 0 , 0, 0x0 }, /* POOL32Sxf~*(0) */ @@ -21174,12 +20684,12 @@ NMD::Pool NMD::POOL32Sxf[8] = { }; -NMD::Pool NMD::POOL32S_4[8] = { +static const Pool POOL32S_4[8] = { { instruction , 0 , 0 , 32, - 0xfc00003f, 0xc0000004, &NMD::EXTD , 0, + 0xfc00003f, 0xc0000004, &EXTD , 0, MIPS64_ }, /* EXTD */ { instruction , 0 , 0 , 32, - 0xfc00003f, 0xc000000c, &NMD::EXTD32 , 0, + 0xfc00003f, 0xc000000c, &EXTD32 , 0, MIPS64_ }, /* EXTD32 */ { reserved_block , 0 , 0 , 32, 0xfc00003f, 0xc0000014, 0 , 0, @@ -21202,7 +20712,7 @@ NMD::Pool NMD::POOL32S_4[8] = { }; -NMD::Pool NMD::POOL32S[8] = { +static const Pool POOL32S[8] = { { pool , POOL32S_0 , 64 , 32, 0xfc000007, 0xc0000000, 0 , 0, 0x0 }, /* POOL32S_0 */ @@ -21230,29 +20740,29 @@ NMD::Pool NMD::POOL32S[8] = { }; -NMD::Pool NMD::P_LUI[2] = { +static const Pool P_LUI[2] = { { instruction , 0 , 0 , 32, - 0xfc000002, 0xe0000000, &NMD::LUI , 0, + 0xfc000002, 0xe0000000, &LUI , 0, 0x0 }, /* LUI */ { instruction , 0 , 0 , 32, - 0xfc000002, 0xe0000002, &NMD::ALUIPC , 0, + 0xfc000002, 0xe0000002, &ALUIPC , 0, 0x0 }, /* ALUIPC */ }; -NMD::Pool NMD::P_GP_LH[2] = { +static const Pool P_GP_LH[2] = { { instruction , 0 , 0 , 32, - 0xfc1c0001, 0x44100000, &NMD::LH_GP_ , 0, + 0xfc1c0001, 0x44100000, &LH_GP_ , 0, 0x0 }, /* LH[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0001, 0x44100001, &NMD::LHU_GP_ , 0, + 0xfc1c0001, 0x44100001, &LHU_GP_ , 0, 0x0 }, /* LHU[GP] */ }; -NMD::Pool NMD::P_GP_SH[2] = { +static const Pool P_GP_SH[2] = { { instruction , 0 , 0 , 32, - 0xfc1c0001, 0x44140000, &NMD::SH_GP_ , 0, + 0xfc1c0001, 0x44140000, &SH_GP_ , 0, 0x0 }, /* SH[GP] */ { reserved_block , 0 , 0 , 32, 0xfc1c0001, 0x44140001, 0 , 0, @@ -21260,25 +20770,25 @@ NMD::Pool NMD::P_GP_SH[2] = { }; -NMD::Pool NMD::P_GP_CP1[4] = { +static const Pool P_GP_CP1[4] = { { instruction , 0 , 0 , 32, - 0xfc1c0003, 0x44180000, &NMD::LWC1_GP_ , 0, + 0xfc1c0003, 0x44180000, &LWC1_GP_ , 0, CP1_ }, /* LWC1[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0003, 0x44180001, &NMD::SWC1_GP_ , 0, + 0xfc1c0003, 0x44180001, &SWC1_GP_ , 0, CP1_ }, /* SWC1[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0003, 0x44180002, &NMD::LDC1_GP_ , 0, + 0xfc1c0003, 0x44180002, &LDC1_GP_ , 0, CP1_ }, /* LDC1[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0003, 0x44180003, &NMD::SDC1_GP_ , 0, + 0xfc1c0003, 0x44180003, &SDC1_GP_ , 0, CP1_ }, /* SDC1[GP] */ }; -NMD::Pool NMD::P_GP_M64[4] = { +static const Pool P_GP_M64[4] = { { instruction , 0 , 0 , 32, - 0xfc1c0003, 0x441c0000, &NMD::LWU_GP_ , 0, + 0xfc1c0003, 0x441c0000, &LWU_GP_ , 0, MIPS64_ }, /* LWU[GP] */ { reserved_block , 0 , 0 , 32, 0xfc1c0003, 0x441c0001, 0 , 0, @@ -21292,18 +20802,18 @@ NMD::Pool NMD::P_GP_M64[4] = { }; -NMD::Pool NMD::P_GP_BH[8] = { +static const Pool P_GP_BH[8] = { { instruction , 0 , 0 , 32, - 0xfc1c0000, 0x44000000, &NMD::LB_GP_ , 0, + 0xfc1c0000, 0x44000000, &LB_GP_ , 0, 0x0 }, /* LB[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0000, 0x44040000, &NMD::SB_GP_ , 0, + 0xfc1c0000, 0x44040000, &SB_GP_ , 0, 0x0 }, /* SB[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0000, 0x44080000, &NMD::LBU_GP_ , 0, + 0xfc1c0000, 0x44080000, &LBU_GP_ , 0, 0x0 }, /* LBU[GP] */ { instruction , 0 , 0 , 32, - 0xfc1c0000, 0x440c0000, &NMD::ADDIU_GP_B_ , 0, + 0xfc1c0000, 0x440c0000, &ADDIU_GP_B_ , 0, 0x0 }, /* ADDIU[GP.B] */ { pool , P_GP_LH , 2 , 32, 0xfc1c0000, 0x44100000, 0 , 0, @@ -21320,136 +20830,136 @@ NMD::Pool NMD::P_GP_BH[8] = { }; -NMD::Pool NMD::P_LS_U12[16] = { +static const Pool P_LS_U12[16] = { { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84000000, &NMD::LB_U12_ , 0, + 0xfc00f000, 0x84000000, &LB_U12_ , 0, 0x0 }, /* LB[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84001000, &NMD::SB_U12_ , 0, + 0xfc00f000, 0x84001000, &SB_U12_ , 0, 0x0 }, /* SB[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84002000, &NMD::LBU_U12_ , 0, + 0xfc00f000, 0x84002000, &LBU_U12_ , 0, 0x0 }, /* LBU[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84003000, &NMD::PREF_U12_ , 0, + 0xfc00f000, 0x84003000, &PREF_U12_ , 0, 0x0 }, /* PREF[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84004000, &NMD::LH_U12_ , 0, + 0xfc00f000, 0x84004000, &LH_U12_ , 0, 0x0 }, /* LH[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84005000, &NMD::SH_U12_ , 0, + 0xfc00f000, 0x84005000, &SH_U12_ , 0, 0x0 }, /* SH[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84006000, &NMD::LHU_U12_ , 0, + 0xfc00f000, 0x84006000, &LHU_U12_ , 0, 0x0 }, /* LHU[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84007000, &NMD::LWU_U12_ , 0, + 0xfc00f000, 0x84007000, &LWU_U12_ , 0, MIPS64_ }, /* LWU[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84008000, &NMD::LW_U12_ , 0, + 0xfc00f000, 0x84008000, &LW_U12_ , 0, 0x0 }, /* LW[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x84009000, &NMD::SW_U12_ , 0, + 0xfc00f000, 0x84009000, &SW_U12_ , 0, 0x0 }, /* SW[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400a000, &NMD::LWC1_U12_ , 0, + 0xfc00f000, 0x8400a000, &LWC1_U12_ , 0, CP1_ }, /* LWC1[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400b000, &NMD::SWC1_U12_ , 0, + 0xfc00f000, 0x8400b000, &SWC1_U12_ , 0, CP1_ }, /* SWC1[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400c000, &NMD::LD_U12_ , 0, + 0xfc00f000, 0x8400c000, &LD_U12_ , 0, MIPS64_ }, /* LD[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400d000, &NMD::SD_U12_ , 0, + 0xfc00f000, 0x8400d000, &SD_U12_ , 0, MIPS64_ }, /* SD[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400e000, &NMD::LDC1_U12_ , 0, + 0xfc00f000, 0x8400e000, &LDC1_U12_ , 0, CP1_ }, /* LDC1[U12] */ { instruction , 0 , 0 , 32, - 0xfc00f000, 0x8400f000, &NMD::SDC1_U12_ , 0, + 0xfc00f000, 0x8400f000, &SDC1_U12_ , 0, CP1_ }, /* SDC1[U12] */ }; -NMD::Pool NMD::P_PREF_S9_[2] = { +static const Pool P_PREF_S9_[2] = { { instruction , 0 , 0 , 32, - 0xffe07f00, 0xa7e01800, &NMD::SYNCI , 0, + 0xffe07f00, 0xa7e01800, &SYNCI , 0, 0x0 }, /* SYNCI */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4001800, &NMD::PREF_S9_ , &NMD::PREF_S9__cond , + 0xfc007f00, 0xa4001800, &PREF_S9_ , &PREF_S9__cond , 0x0 }, /* PREF[S9] */ }; -NMD::Pool NMD::P_LS_S0[16] = { +static const Pool P_LS_S0[16] = { { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4000000, &NMD::LB_S9_ , 0, + 0xfc007f00, 0xa4000000, &LB_S9_ , 0, 0x0 }, /* LB[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4000800, &NMD::SB_S9_ , 0, + 0xfc007f00, 0xa4000800, &SB_S9_ , 0, 0x0 }, /* SB[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4001000, &NMD::LBU_S9_ , 0, + 0xfc007f00, 0xa4001000, &LBU_S9_ , 0, 0x0 }, /* LBU[S9] */ { pool , P_PREF_S9_ , 2 , 32, 0xfc007f00, 0xa4001800, 0 , 0, 0x0 }, /* P.PREF[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002000, &NMD::LH_S9_ , 0, + 0xfc007f00, 0xa4002000, &LH_S9_ , 0, 0x0 }, /* LH[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002800, &NMD::SH_S9_ , 0, + 0xfc007f00, 0xa4002800, &SH_S9_ , 0, 0x0 }, /* SH[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4003000, &NMD::LHU_S9_ , 0, + 0xfc007f00, 0xa4003000, &LHU_S9_ , 0, 0x0 }, /* LHU[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4003800, &NMD::LWU_S9_ , 0, + 0xfc007f00, 0xa4003800, &LWU_S9_ , 0, MIPS64_ }, /* LWU[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004000, &NMD::LW_S9_ , 0, + 0xfc007f00, 0xa4004000, &LW_S9_ , 0, 0x0 }, /* LW[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004800, &NMD::SW_S9_ , 0, + 0xfc007f00, 0xa4004800, &SW_S9_ , 0, 0x0 }, /* SW[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4005000, &NMD::LWC1_S9_ , 0, + 0xfc007f00, 0xa4005000, &LWC1_S9_ , 0, CP1_ }, /* LWC1[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4005800, &NMD::SWC1_S9_ , 0, + 0xfc007f00, 0xa4005800, &SWC1_S9_ , 0, CP1_ }, /* SWC1[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4006000, &NMD::LD_S9_ , 0, + 0xfc007f00, 0xa4006000, &LD_S9_ , 0, MIPS64_ }, /* LD[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4006800, &NMD::SD_S9_ , 0, + 0xfc007f00, 0xa4006800, &SD_S9_ , 0, MIPS64_ }, /* SD[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4007000, &NMD::LDC1_S9_ , 0, + 0xfc007f00, 0xa4007000, &LDC1_S9_ , 0, CP1_ }, /* LDC1[S9] */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4007800, &NMD::SDC1_S9_ , 0, + 0xfc007f00, 0xa4007800, &SDC1_S9_ , 0, CP1_ }, /* SDC1[S9] */ }; -NMD::Pool NMD::ASET_ACLR[2] = { +static const Pool ASET_ACLR[2] = { { instruction , 0 , 0 , 32, - 0xfe007f00, 0xa4001100, &NMD::ASET , 0, + 0xfe007f00, 0xa4001100, &ASET , 0, MCU_ }, /* ASET */ { instruction , 0 , 0 , 32, - 0xfe007f00, 0xa6001100, &NMD::ACLR , 0, + 0xfe007f00, 0xa6001100, &ACLR , 0, MCU_ }, /* ACLR */ }; -NMD::Pool NMD::P_LL[4] = { +static const Pool P_LL[4] = { { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005100, &NMD::LL , 0, + 0xfc007f03, 0xa4005100, &LL , 0, 0x0 }, /* LL */ { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005101, &NMD::LLWP , 0, + 0xfc007f03, 0xa4005101, &LLWP , 0, XNP_ }, /* LLWP */ { reserved_block , 0 , 0 , 32, 0xfc007f03, 0xa4005102, 0 , 0, @@ -21460,12 +20970,12 @@ NMD::Pool NMD::P_LL[4] = { }; -NMD::Pool NMD::P_SC[4] = { +static const Pool P_SC[4] = { { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005900, &NMD::SC , 0, + 0xfc007f03, 0xa4005900, &SC , 0, 0x0 }, /* SC */ { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005901, &NMD::SCWP , 0, + 0xfc007f03, 0xa4005901, &SCWP , 0, XNP_ }, /* SCWP */ { reserved_block , 0 , 0 , 32, 0xfc007f03, 0xa4005902, 0 , 0, @@ -21476,12 +20986,12 @@ NMD::Pool NMD::P_SC[4] = { }; -NMD::Pool NMD::P_LLD[8] = { +static const Pool P_LLD[8] = { { instruction , 0 , 0 , 32, - 0xfc007f07, 0xa4007100, &NMD::LLD , 0, + 0xfc007f07, 0xa4007100, &LLD , 0, MIPS64_ }, /* LLD */ { instruction , 0 , 0 , 32, - 0xfc007f07, 0xa4007101, &NMD::LLDP , 0, + 0xfc007f07, 0xa4007101, &LLDP , 0, MIPS64_ }, /* LLDP */ { reserved_block , 0 , 0 , 32, 0xfc007f07, 0xa4007102, 0 , 0, @@ -21504,12 +21014,12 @@ NMD::Pool NMD::P_LLD[8] = { }; -NMD::Pool NMD::P_SCD[8] = { +static const Pool P_SCD[8] = { { instruction , 0 , 0 , 32, - 0xfc007f07, 0xa4007900, &NMD::SCD , 0, + 0xfc007f07, 0xa4007900, &SCD , 0, MIPS64_ }, /* SCD */ { instruction , 0 , 0 , 32, - 0xfc007f07, 0xa4007901, &NMD::SCDP , 0, + 0xfc007f07, 0xa4007901, &SCDP , 0, MIPS64_ }, /* SCDP */ { reserved_block , 0 , 0 , 32, 0xfc007f07, 0xa4007902, 0 , 0, @@ -21532,7 +21042,7 @@ NMD::Pool NMD::P_SCD[8] = { }; -NMD::Pool NMD::P_LS_S1[16] = { +static const Pool P_LS_S1[16] = { { reserved_block , 0 , 0 , 32, 0xfc007f00, 0xa4000100, 0 , 0, 0x0 }, /* P.LS.S1~*(0) */ @@ -21546,22 +21056,22 @@ NMD::Pool NMD::P_LS_S1[16] = { 0xfc007f00, 0xa4001900, 0 , 0, 0x0 }, /* P.LS.S1~*(3) */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002100, &NMD::UALH , 0, + 0xfc007f00, 0xa4002100, &UALH , 0, XMMS_ }, /* UALH */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002900, &NMD::UASH , 0, + 0xfc007f00, 0xa4002900, &UASH , 0, XMMS_ }, /* UASH */ { reserved_block , 0 , 0 , 32, 0xfc007f00, 0xa4003100, 0 , 0, 0x0 }, /* P.LS.S1~*(6) */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4003900, &NMD::CACHE , 0, + 0xfc007f00, 0xa4003900, &CACHE , 0, CP0_ }, /* CACHE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004100, &NMD::LWC2 , 0, + 0xfc007f00, 0xa4004100, &LWC2 , 0, CP2_ }, /* LWC2 */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004900, &NMD::SWC2 , 0, + 0xfc007f00, 0xa4004900, &SWC2 , 0, CP2_ }, /* SWC2 */ { pool , P_LL , 4 , 32, 0xfc007f00, 0xa4005100, 0 , 0, @@ -21570,10 +21080,10 @@ NMD::Pool NMD::P_LS_S1[16] = { 0xfc007f00, 0xa4005900, 0 , 0, 0x0 }, /* P.SC */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4006100, &NMD::LDC2 , 0, + 0xfc007f00, 0xa4006100, &LDC2 , 0, CP2_ }, /* LDC2 */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4006900, &NMD::SDC2 , 0, + 0xfc007f00, 0xa4006900, &SDC2 , 0, CP2_ }, /* SDC2 */ { pool , P_LLD , 8 , 32, 0xfc007f00, 0xa4007100, 0 , 0, @@ -21584,22 +21094,22 @@ NMD::Pool NMD::P_LS_S1[16] = { }; -NMD::Pool NMD::P_PREFE[2] = { +static const Pool P_PREFE[2] = { { instruction , 0 , 0 , 32, - 0xffe07f00, 0xa7e01a00, &NMD::SYNCIE , 0, + 0xffe07f00, 0xa7e01a00, &SYNCIE , 0, CP0_ | EVA_ }, /* SYNCIE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4001a00, &NMD::PREFE , &NMD::PREFE_cond , + 0xfc007f00, 0xa4001a00, &PREFE , &PREFE_cond , CP0_ | EVA_ }, /* PREFE */ }; -NMD::Pool NMD::P_LLE[4] = { +static const Pool P_LLE[4] = { { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005200, &NMD::LLE , 0, + 0xfc007f03, 0xa4005200, &LLE , 0, CP0_ | EVA_ }, /* LLE */ { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005201, &NMD::LLWPE , 0, + 0xfc007f03, 0xa4005201, &LLWPE , 0, CP0_ | EVA_ }, /* LLWPE */ { reserved_block , 0 , 0 , 32, 0xfc007f03, 0xa4005202, 0 , 0, @@ -21610,12 +21120,12 @@ NMD::Pool NMD::P_LLE[4] = { }; -NMD::Pool NMD::P_SCE[4] = { +static const Pool P_SCE[4] = { { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005a00, &NMD::SCE , 0, + 0xfc007f03, 0xa4005a00, &SCE , 0, CP0_ | EVA_ }, /* SCE */ { instruction , 0 , 0 , 32, - 0xfc007f03, 0xa4005a01, &NMD::SCWPE , 0, + 0xfc007f03, 0xa4005a01, &SCWPE , 0, CP0_ | EVA_ }, /* SCWPE */ { reserved_block , 0 , 0 , 32, 0xfc007f03, 0xa4005a02, 0 , 0, @@ -21626,36 +21136,36 @@ NMD::Pool NMD::P_SCE[4] = { }; -NMD::Pool NMD::P_LS_E0[16] = { +static const Pool P_LS_E0[16] = { { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4000200, &NMD::LBE , 0, + 0xfc007f00, 0xa4000200, &LBE , 0, CP0_ | EVA_ }, /* LBE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4000a00, &NMD::SBE , 0, + 0xfc007f00, 0xa4000a00, &SBE , 0, CP0_ | EVA_ }, /* SBE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4001200, &NMD::LBUE , 0, + 0xfc007f00, 0xa4001200, &LBUE , 0, CP0_ | EVA_ }, /* LBUE */ { pool , P_PREFE , 2 , 32, 0xfc007f00, 0xa4001a00, 0 , 0, 0x0 }, /* P.PREFE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002200, &NMD::LHE , 0, + 0xfc007f00, 0xa4002200, &LHE , 0, CP0_ | EVA_ }, /* LHE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4002a00, &NMD::SHE , 0, + 0xfc007f00, 0xa4002a00, &SHE , 0, CP0_ | EVA_ }, /* SHE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4003200, &NMD::LHUE , 0, + 0xfc007f00, 0xa4003200, &LHUE , 0, CP0_ | EVA_ }, /* LHUE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4003a00, &NMD::CACHEE , 0, + 0xfc007f00, 0xa4003a00, &CACHEE , 0, CP0_ | EVA_ }, /* CACHEE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004200, &NMD::LWE , 0, + 0xfc007f00, 0xa4004200, &LWE , 0, CP0_ | EVA_ }, /* LWE */ { instruction , 0 , 0 , 32, - 0xfc007f00, 0xa4004a00, &NMD::SWE , 0, + 0xfc007f00, 0xa4004a00, &SWE , 0, CP0_ | EVA_ }, /* SWE */ { pool , P_LLE , 4 , 32, 0xfc007f00, 0xa4005200, 0 , 0, @@ -21678,47 +21188,47 @@ NMD::Pool NMD::P_LS_E0[16] = { }; -NMD::Pool NMD::P_LS_WM[2] = { +static const Pool P_LS_WM[2] = { { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000400, &NMD::LWM , 0, + 0xfc000f00, 0xa4000400, &LWM , 0, XMMS_ }, /* LWM */ { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000c00, &NMD::SWM , 0, + 0xfc000f00, 0xa4000c00, &SWM , 0, XMMS_ }, /* SWM */ }; -NMD::Pool NMD::P_LS_UAWM[2] = { +static const Pool P_LS_UAWM[2] = { { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000500, &NMD::UALWM , 0, + 0xfc000f00, 0xa4000500, &UALWM , 0, XMMS_ }, /* UALWM */ { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000d00, &NMD::UASWM , 0, + 0xfc000f00, 0xa4000d00, &UASWM , 0, XMMS_ }, /* UASWM */ }; -NMD::Pool NMD::P_LS_DM[2] = { +static const Pool P_LS_DM[2] = { { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000600, &NMD::LDM , 0, + 0xfc000f00, 0xa4000600, &LDM , 0, MIPS64_ }, /* LDM */ { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000e00, &NMD::SDM , 0, + 0xfc000f00, 0xa4000e00, &SDM , 0, MIPS64_ }, /* SDM */ }; -NMD::Pool NMD::P_LS_UADM[2] = { +static const Pool P_LS_UADM[2] = { { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000700, &NMD::UALDM , 0, + 0xfc000f00, 0xa4000700, &UALDM , 0, MIPS64_ }, /* UALDM */ { instruction , 0 , 0 , 32, - 0xfc000f00, 0xa4000f00, &NMD::UASDM , 0, + 0xfc000f00, 0xa4000f00, &UASDM , 0, MIPS64_ }, /* UASDM */ }; -NMD::Pool NMD::P_LS_S9[8] = { +static const Pool P_LS_S9[8] = { { pool , P_LS_S0 , 16 , 32, 0xfc000700, 0xa4000000, 0 , 0, 0x0 }, /* P.LS.S0 */ @@ -21746,32 +21256,32 @@ NMD::Pool NMD::P_LS_S9[8] = { }; -NMD::Pool NMD::P_BAL[2] = { +static const Pool P_BAL[2] = { { branch_instruction , 0 , 0 , 32, - 0xfe000000, 0x28000000, &NMD::BC_32_ , 0, + 0xfe000000, 0x28000000, &BC_32_ , 0, 0x0 }, /* BC[32] */ { call_instruction , 0 , 0 , 32, - 0xfe000000, 0x2a000000, &NMD::BALC_32_ , 0, + 0xfe000000, 0x2a000000, &BALC_32_ , 0, 0x0 }, /* BALC[32] */ }; -NMD::Pool NMD::P_BALRSC[2] = { +static const Pool P_BALRSC[2] = { { branch_instruction , 0 , 0 , 32, - 0xffe0f000, 0x48008000, &NMD::BRSC , 0, + 0xffe0f000, 0x48008000, &BRSC , 0, 0x0 }, /* BRSC */ { call_instruction , 0 , 0 , 32, - 0xfc00f000, 0x48008000, &NMD::BALRSC , &NMD::BALRSC_cond , + 0xfc00f000, 0x48008000, &BALRSC , &BALRSC_cond , 0x0 }, /* BALRSC */ }; -NMD::Pool NMD::P_J[16] = { +static const Pool P_J[16] = { { call_instruction , 0 , 0 , 32, - 0xfc00f000, 0x48000000, &NMD::JALRC_32_ , 0, + 0xfc00f000, 0x48000000, &JALRC_32_ , 0, 0x0 }, /* JALRC[32] */ { call_instruction , 0 , 0 , 32, - 0xfc00f000, 0x48001000, &NMD::JALRC_HB , 0, + 0xfc00f000, 0x48001000, &JALRC_HB , 0, 0x0 }, /* JALRC.HB */ { reserved_block , 0 , 0 , 32, 0xfc00f000, 0x48002000, 0 , 0, @@ -21818,21 +21328,21 @@ NMD::Pool NMD::P_J[16] = { }; -NMD::Pool NMD::P_BR3A[32] = { +static const Pool P_BR3A[32] = { { branch_instruction , 0 , 0 , 32, - 0xfc1fc000, 0x88004000, &NMD::BC1EQZC , 0, + 0xfc1fc000, 0x88004000, &BC1EQZC , 0, CP1_ }, /* BC1EQZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1fc000, 0x88014000, &NMD::BC1NEZC , 0, + 0xfc1fc000, 0x88014000, &BC1NEZC , 0, CP1_ }, /* BC1NEZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1fc000, 0x88024000, &NMD::BC2EQZC , 0, + 0xfc1fc000, 0x88024000, &BC2EQZC , 0, CP2_ }, /* BC2EQZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1fc000, 0x88034000, &NMD::BC2NEZC , 0, + 0xfc1fc000, 0x88034000, &BC2NEZC , 0, CP2_ }, /* BC2NEZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1fc000, 0x88044000, &NMD::BPOSGE32C , 0, + 0xfc1fc000, 0x88044000, &BPOSGE32C , 0, DSP_ }, /* BPOSGE32C */ { reserved_block , 0 , 0 , 32, 0xfc1fc000, 0x88054000, 0 , 0, @@ -21918,67 +21428,67 @@ NMD::Pool NMD::P_BR3A[32] = { }; -NMD::Pool NMD::P_BR1[4] = { +static const Pool P_BR1[4] = { { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0x88000000, &NMD::BEQC_32_ , 0, + 0xfc00c000, 0x88000000, &BEQC_32_ , 0, 0x0 }, /* BEQC[32] */ { pool , P_BR3A , 32 , 32, 0xfc00c000, 0x88004000, 0 , 0, 0x0 }, /* P.BR3A */ { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0x88008000, &NMD::BGEC , 0, + 0xfc00c000, 0x88008000, &BGEC , 0, 0x0 }, /* BGEC */ { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0x8800c000, &NMD::BGEUC , 0, + 0xfc00c000, 0x8800c000, &BGEUC , 0, 0x0 }, /* BGEUC */ }; -NMD::Pool NMD::P_BR2[4] = { +static const Pool P_BR2[4] = { { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0xa8000000, &NMD::BNEC_32_ , 0, + 0xfc00c000, 0xa8000000, &BNEC_32_ , 0, 0x0 }, /* BNEC[32] */ { reserved_block , 0 , 0 , 32, 0xfc00c000, 0xa8004000, 0 , 0, 0x0 }, /* P.BR2~*(1) */ { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0xa8008000, &NMD::BLTC , 0, + 0xfc00c000, 0xa8008000, &BLTC , 0, 0x0 }, /* BLTC */ { branch_instruction , 0 , 0 , 32, - 0xfc00c000, 0xa800c000, &NMD::BLTUC , 0, + 0xfc00c000, 0xa800c000, &BLTUC , 0, 0x0 }, /* BLTUC */ }; -NMD::Pool NMD::P_BRI[8] = { +static const Pool P_BRI[8] = { { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8000000, &NMD::BEQIC , 0, + 0xfc1c0000, 0xc8000000, &BEQIC , 0, 0x0 }, /* BEQIC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8040000, &NMD::BBEQZC , 0, + 0xfc1c0000, 0xc8040000, &BBEQZC , 0, XMMS_ }, /* BBEQZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8080000, &NMD::BGEIC , 0, + 0xfc1c0000, 0xc8080000, &BGEIC , 0, 0x0 }, /* BGEIC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc80c0000, &NMD::BGEIUC , 0, + 0xfc1c0000, 0xc80c0000, &BGEIUC , 0, 0x0 }, /* BGEIUC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8100000, &NMD::BNEIC , 0, + 0xfc1c0000, 0xc8100000, &BNEIC , 0, 0x0 }, /* BNEIC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8140000, &NMD::BBNEZC , 0, + 0xfc1c0000, 0xc8140000, &BBNEZC , 0, XMMS_ }, /* BBNEZC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc8180000, &NMD::BLTIC , 0, + 0xfc1c0000, 0xc8180000, &BLTIC , 0, 0x0 }, /* BLTIC */ { branch_instruction , 0 , 0 , 32, - 0xfc1c0000, 0xc81c0000, &NMD::BLTIUC , 0, + 0xfc1c0000, 0xc81c0000, &BLTIUC , 0, 0x0 }, /* BLTIUC */ }; -NMD::Pool NMD::P32[32] = { +static const Pool P32[32] = { { pool , P_ADDIU , 2 , 32, 0xfc000000, 0x00000000, 0 , 0, 0x0 }, /* P.ADDIU */ @@ -22004,7 +21514,7 @@ NMD::Pool NMD::P32[32] = { 0xfc000000, 0xe0000000, 0 , 0, 0x0 }, /* P.LUI */ { instruction , 0 , 0 , 32, - 0xfc000000, 0x04000000, &NMD::ADDIUPC_32_ , 0, + 0xfc000000, 0x04000000, &ADDIUPC_32_ , 0, 0x0 }, /* ADDIUPC[32] */ { reserved_block , 0 , 0 , 32, 0xfc000000, 0x24000000, 0 , 0, @@ -22028,7 +21538,7 @@ NMD::Pool NMD::P32[32] = { 0xfc000000, 0xe4000000, 0 , 0, 0x0 }, /* P32~*(29) */ { call_instruction , 0 , 0 , 32, - 0xfc000000, 0x08000000, &NMD::MOVE_BALC , 0, + 0xfc000000, 0x08000000, &MOVE_BALC , 0, XMMS_ }, /* MOVE.BALC */ { pool , P_BAL , 2 , 32, 0xfc000000, 0x28000000, 0 , 0, @@ -22078,17 +21588,17 @@ NMD::Pool NMD::P32[32] = { }; -NMD::Pool NMD::P16_SYSCALL[2] = { +static const Pool P16_SYSCALL[2] = { { instruction , 0 , 0 , 16, - 0xfffc , 0x1008 , &NMD::SYSCALL_16_ , 0, + 0xfffc , 0x1008 , &SYSCALL_16_ , 0, 0x0 }, /* SYSCALL[16] */ { instruction , 0 , 0 , 16, - 0xfffc , 0x100c , &NMD::HYPCALL_16_ , 0, + 0xfffc , 0x100c , &HYPCALL_16_ , 0, CP0_ | VZ_ }, /* HYPCALL[16] */ }; -NMD::Pool NMD::P16_RI[4] = { +static const Pool P16_RI[4] = { { reserved_block , 0 , 0 , 16, 0xfff8 , 0x1000 , 0 , 0, 0x0 }, /* P16.RI~*(0) */ @@ -22096,51 +21606,51 @@ NMD::Pool NMD::P16_RI[4] = { 0xfff8 , 0x1008 , 0 , 0, 0x0 }, /* P16.SYSCALL */ { instruction , 0 , 0 , 16, - 0xfff8 , 0x1010 , &NMD::BREAK_16_ , 0, + 0xfff8 , 0x1010 , &BREAK_16_ , 0, 0x0 }, /* BREAK[16] */ { instruction , 0 , 0 , 16, - 0xfff8 , 0x1018 , &NMD::SDBBP_16_ , 0, + 0xfff8 , 0x1018 , &SDBBP_16_ , 0, EJTAG_ }, /* SDBBP[16] */ }; -NMD::Pool NMD::P16_MV[2] = { +static const Pool P16_MV[2] = { { pool , P16_RI , 4 , 16, 0xffe0 , 0x1000 , 0 , 0, 0x0 }, /* P16.RI */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x1000 , &NMD::MOVE , &NMD::MOVE_cond , + 0xfc00 , 0x1000 , &MOVE , &MOVE_cond , 0x0 }, /* MOVE */ }; -NMD::Pool NMD::P16_SHIFT[2] = { +static const Pool P16_SHIFT[2] = { { instruction , 0 , 0 , 16, - 0xfc08 , 0x3000 , &NMD::SLL_16_ , 0, + 0xfc08 , 0x3000 , &SLL_16_ , 0, 0x0 }, /* SLL[16] */ { instruction , 0 , 0 , 16, - 0xfc08 , 0x3008 , &NMD::SRL_16_ , 0, + 0xfc08 , 0x3008 , &SRL_16_ , 0, 0x0 }, /* SRL[16] */ }; -NMD::Pool NMD::POOL16C_00[4] = { +static const Pool POOL16C_00[4] = { { instruction , 0 , 0 , 16, - 0xfc0f , 0x5000 , &NMD::NOT_16_ , 0, + 0xfc0f , 0x5000 , &NOT_16_ , 0, 0x0 }, /* NOT[16] */ { instruction , 0 , 0 , 16, - 0xfc0f , 0x5004 , &NMD::XOR_16_ , 0, + 0xfc0f , 0x5004 , &XOR_16_ , 0, 0x0 }, /* XOR[16] */ { instruction , 0 , 0 , 16, - 0xfc0f , 0x5008 , &NMD::AND_16_ , 0, + 0xfc0f , 0x5008 , &AND_16_ , 0, 0x0 }, /* AND[16] */ { instruction , 0 , 0 , 16, - 0xfc0f , 0x500c , &NMD::OR_16_ , 0, + 0xfc0f , 0x500c , &OR_16_ , 0, 0x0 }, /* OR[16] */ }; -NMD::Pool NMD::POOL16C_0[2] = { +static const Pool POOL16C_0[2] = { { pool , POOL16C_00 , 4 , 16, 0xfc03 , 0x5000 , 0 , 0, 0x0 }, /* POOL16C_00 */ @@ -22150,39 +21660,39 @@ NMD::Pool NMD::POOL16C_0[2] = { }; -NMD::Pool NMD::P16C[2] = { +static const Pool P16C[2] = { { pool , POOL16C_0 , 2 , 16, 0xfc01 , 0x5000 , 0 , 0, 0x0 }, /* POOL16C_0 */ { instruction , 0 , 0 , 16, - 0xfc01 , 0x5001 , &NMD::LWXS_16_ , 0, + 0xfc01 , 0x5001 , &LWXS_16_ , 0, 0x0 }, /* LWXS[16] */ }; -NMD::Pool NMD::P16_A1[2] = { +static const Pool P16_A1[2] = { { reserved_block , 0 , 0 , 16, 0xfc40 , 0x7000 , 0 , 0, 0x0 }, /* P16.A1~*(0) */ { instruction , 0 , 0 , 16, - 0xfc40 , 0x7040 , &NMD::ADDIU_R1_SP_ , 0, + 0xfc40 , 0x7040 , &ADDIU_R1_SP_ , 0, 0x0 }, /* ADDIU[R1.SP] */ }; -NMD::Pool NMD::P_ADDIU_RS5_[2] = { +static const Pool P_ADDIU_RS5_[2] = { { instruction , 0 , 0 , 16, - 0xffe8 , 0x9008 , &NMD::NOP_16_ , 0, + 0xffe8 , 0x9008 , &NOP_16_ , 0, 0x0 }, /* NOP[16] */ { instruction , 0 , 0 , 16, - 0xfc08 , 0x9008 , &NMD::ADDIU_RS5_ , &NMD::ADDIU_RS5__cond , + 0xfc08 , 0x9008 , &ADDIU_RS5_ , &ADDIU_RS5__cond , 0x0 }, /* ADDIU[RS5] */ }; -NMD::Pool NMD::P16_A2[2] = { +static const Pool P16_A2[2] = { { instruction , 0 , 0 , 16, - 0xfc08 , 0x9000 , &NMD::ADDIU_R2_ , 0, + 0xfc08 , 0x9000 , &ADDIU_R2_ , 0, 0x0 }, /* ADDIU[R2] */ { pool , P_ADDIU_RS5_ , 2 , 16, 0xfc08 , 0x9008 , 0 , 0, @@ -22190,62 +21700,62 @@ NMD::Pool NMD::P16_A2[2] = { }; -NMD::Pool NMD::P16_ADDU[2] = { +static const Pool P16_ADDU[2] = { { instruction , 0 , 0 , 16, - 0xfc01 , 0xb000 , &NMD::ADDU_16_ , 0, + 0xfc01 , 0xb000 , &ADDU_16_ , 0, 0x0 }, /* ADDU[16] */ { instruction , 0 , 0 , 16, - 0xfc01 , 0xb001 , &NMD::SUBU_16_ , 0, + 0xfc01 , 0xb001 , &SUBU_16_ , 0, 0x0 }, /* SUBU[16] */ }; -NMD::Pool NMD::P16_JRC[2] = { +static const Pool P16_JRC[2] = { { branch_instruction , 0 , 0 , 16, - 0xfc1f , 0xd800 , &NMD::JRC , 0, + 0xfc1f , 0xd800 , &JRC , 0, 0x0 }, /* JRC */ { call_instruction , 0 , 0 , 16, - 0xfc1f , 0xd810 , &NMD::JALRC_16_ , 0, + 0xfc1f , 0xd810 , &JALRC_16_ , 0, 0x0 }, /* JALRC[16] */ }; -NMD::Pool NMD::P16_BR1[2] = { +static const Pool P16_BR1[2] = { { branch_instruction , 0 , 0 , 16, - 0xfc00 , 0xd800 , &NMD::BEQC_16_ , &NMD::BEQC_16__cond , + 0xfc00 , 0xd800 , &BEQC_16_ , &BEQC_16__cond , XMMS_ }, /* BEQC[16] */ { branch_instruction , 0 , 0 , 16, - 0xfc00 , 0xd800 , &NMD::BNEC_16_ , &NMD::BNEC_16__cond , + 0xfc00 , 0xd800 , &BNEC_16_ , &BNEC_16__cond , XMMS_ }, /* BNEC[16] */ }; -NMD::Pool NMD::P16_BR[2] = { +static const Pool P16_BR[2] = { { pool , P16_JRC , 2 , 16, 0xfc0f , 0xd800 , 0 , 0, 0x0 }, /* P16.JRC */ { pool , P16_BR1 , 2 , 16, - 0xfc00 , 0xd800 , 0 , &NMD::P16_BR1_cond , + 0xfc00 , 0xd800 , 0 , &P16_BR1_cond , 0x0 }, /* P16.BR1 */ }; -NMD::Pool NMD::P16_SR[2] = { +static const Pool P16_SR[2] = { { instruction , 0 , 0 , 16, - 0xfd00 , 0x1c00 , &NMD::SAVE_16_ , 0, + 0xfd00 , 0x1c00 , &SAVE_16_ , 0, 0x0 }, /* SAVE[16] */ { return_instruction , 0 , 0 , 16, - 0xfd00 , 0x1d00 , &NMD::RESTORE_JRC_16_ , 0, + 0xfd00 , 0x1d00 , &RESTORE_JRC_16_ , 0, 0x0 }, /* RESTORE.JRC[16] */ }; -NMD::Pool NMD::P16_4X4[4] = { +static const Pool P16_4X4[4] = { { instruction , 0 , 0 , 16, - 0xfd08 , 0x3c00 , &NMD::ADDU_4X4_ , 0, + 0xfd08 , 0x3c00 , &ADDU_4X4_ , 0, XMMS_ }, /* ADDU[4X4] */ { instruction , 0 , 0 , 16, - 0xfd08 , 0x3c08 , &NMD::MUL_4X4_ , 0, + 0xfd08 , 0x3c08 , &MUL_4X4_ , 0, XMMS_ }, /* MUL[4X4] */ { reserved_block , 0 , 0 , 16, 0xfd08 , 0x3d00 , 0 , 0, @@ -22256,15 +21766,15 @@ NMD::Pool NMD::P16_4X4[4] = { }; -NMD::Pool NMD::P16_LB[4] = { +static const Pool P16_LB[4] = { { instruction , 0 , 0 , 16, - 0xfc0c , 0x5c00 , &NMD::LB_16_ , 0, + 0xfc0c , 0x5c00 , &LB_16_ , 0, 0x0 }, /* LB[16] */ { instruction , 0 , 0 , 16, - 0xfc0c , 0x5c04 , &NMD::SB_16_ , 0, + 0xfc0c , 0x5c04 , &SB_16_ , 0, 0x0 }, /* SB[16] */ { instruction , 0 , 0 , 16, - 0xfc0c , 0x5c08 , &NMD::LBU_16_ , 0, + 0xfc0c , 0x5c08 , &LBU_16_ , 0, 0x0 }, /* LBU[16] */ { reserved_block , 0 , 0 , 16, 0xfc0c , 0x5c0c , 0 , 0, @@ -22272,15 +21782,15 @@ NMD::Pool NMD::P16_LB[4] = { }; -NMD::Pool NMD::P16_LH[4] = { +static const Pool P16_LH[4] = { { instruction , 0 , 0 , 16, - 0xfc09 , 0x7c00 , &NMD::LH_16_ , 0, + 0xfc09 , 0x7c00 , &LH_16_ , 0, 0x0 }, /* LH[16] */ { instruction , 0 , 0 , 16, - 0xfc09 , 0x7c01 , &NMD::SH_16_ , 0, + 0xfc09 , 0x7c01 , &SH_16_ , 0, 0x0 }, /* SH[16] */ { instruction , 0 , 0 , 16, - 0xfc09 , 0x7c08 , &NMD::LHU_16_ , 0, + 0xfc09 , 0x7c08 , &LHU_16_ , 0, 0x0 }, /* LHU[16] */ { reserved_block , 0 , 0 , 16, 0xfc09 , 0x7c09 , 0 , 0, @@ -22288,7 +21798,7 @@ NMD::Pool NMD::P16_LH[4] = { }; -NMD::Pool NMD::P16[32] = { +static const Pool P16[32] = { { pool , P16_MV , 2 , 16, 0xfc00 , 0x1000 , 0 , 0, 0x0 }, /* P16.MV */ @@ -22308,40 +21818,40 @@ NMD::Pool NMD::P16[32] = { 0xfc00 , 0xb000 , 0 , 0, 0x0 }, /* P16.ADDU */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xd000 , &NMD::LI_16_ , 0, + 0xfc00 , 0xd000 , &LI_16_ , 0, 0x0 }, /* LI[16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xf000 , &NMD::ANDI_16_ , 0, + 0xfc00 , 0xf000 , &ANDI_16_ , 0, 0x0 }, /* ANDI[16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x1400 , &NMD::LW_16_ , 0, + 0xfc00 , 0x1400 , &LW_16_ , 0, 0x0 }, /* LW[16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x3400 , &NMD::LW_SP_ , 0, + 0xfc00 , 0x3400 , &LW_SP_ , 0, 0x0 }, /* LW[SP] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x5400 , &NMD::LW_GP16_ , 0, + 0xfc00 , 0x5400 , &LW_GP16_ , 0, 0x0 }, /* LW[GP16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x7400 , &NMD::LW_4X4_ , 0, + 0xfc00 , 0x7400 , &LW_4X4_ , 0, XMMS_ }, /* LW[4X4] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0x9400 , &NMD::SW_16_ , 0, + 0xfc00 , 0x9400 , &SW_16_ , 0, 0x0 }, /* SW[16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xb400 , &NMD::SW_SP_ , 0, + 0xfc00 , 0xb400 , &SW_SP_ , 0, 0x0 }, /* SW[SP] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xd400 , &NMD::SW_GP16_ , 0, + 0xfc00 , 0xd400 , &SW_GP16_ , 0, 0x0 }, /* SW[GP16] */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xf400 , &NMD::SW_4X4_ , 0, + 0xfc00 , 0xf400 , &SW_4X4_ , 0, XMMS_ }, /* SW[4X4] */ { branch_instruction , 0 , 0 , 16, - 0xfc00 , 0x1800 , &NMD::BC_16_ , 0, + 0xfc00 , 0x1800 , &BC_16_ , 0, 0x0 }, /* BC[16] */ { call_instruction , 0 , 0 , 16, - 0xfc00 , 0x3800 , &NMD::BALC_16_ , 0, + 0xfc00 , 0x3800 , &BALC_16_ , 0, 0x0 }, /* BALC[16] */ { reserved_block , 0 , 0 , 16, 0xfc00 , 0x5800 , 0 , 0, @@ -22350,10 +21860,10 @@ NMD::Pool NMD::P16[32] = { 0xfc00 , 0x7800 , 0 , 0, 0x0 }, /* P16~*(14) */ { branch_instruction , 0 , 0 , 16, - 0xfc00 , 0x9800 , &NMD::BEQZC_16_ , 0, + 0xfc00 , 0x9800 , &BEQZC_16_ , 0, 0x0 }, /* BEQZC[16] */ { branch_instruction , 0 , 0 , 16, - 0xfc00 , 0xb800 , &NMD::BNEZC_16_ , 0, + 0xfc00 , 0xb800 , &BNEZC_16_ , 0, 0x0 }, /* BNEZC[16] */ { pool , P16_BR , 2 , 16, 0xfc00 , 0xd800 , 0 , 0, @@ -22377,18 +21887,18 @@ NMD::Pool NMD::P16[32] = { 0xfc00 , 0x9c00 , 0 , 0, 0x0 }, /* P16~*(19) */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xbc00 , &NMD::MOVEP , 0, + 0xfc00 , 0xbc00 , &MOVEP , 0, XMMS_ }, /* MOVEP */ { reserved_block , 0 , 0 , 16, 0xfc00 , 0xdc00 , 0 , 0, 0x0 }, /* P16~*(27) */ { instruction , 0 , 0 , 16, - 0xfc00 , 0xfc00 , &NMD::MOVEP_REV_ , 0, + 0xfc00 , 0xfc00 , &MOVEP_REV_ , 0, XMMS_ }, /* MOVEP[REV] */ }; -NMD::Pool NMD::MAJOR[2] = { +static const Pool MAJOR[2] = { { pool , P32 , 32 , 32, 0x10000000, 0x00000000, 0 , 0, 0x0 }, /* P32 */ @@ -22396,3 +21906,85 @@ NMD::Pool NMD::MAJOR[2] = { 0x1000 , 0x1000 , 0 , 0, 0x0 }, /* P16 */ }; + +static bool nanomips_dis(const uint16_t *data, char **buf, Dis_info *info) +{ + TABLE_ENTRY_TYPE type; + + /* Handle runtime errors. */ + if (unlikely(sigsetjmp(info->buf, 0) != 0)) { + return false; + } + return Disassemble(data, buf, &type, MAJOR, ARRAY_SIZE(MAJOR), info) >= 0; +} + +static bool read_u16(uint16_t *ret, bfd_vma memaddr, + struct disassemble_info *info) +{ + int status = (*info->read_memory_func)(memaddr, (bfd_byte *)ret, 2, info); + if (status != 0) { + (*info->memory_error_func)(status, memaddr, info); + return false; + } + + if ((info->endian == BFD_ENDIAN_BIG) != HOST_BIG_ENDIAN) { + bswap16s(ret); + } + return true; +} + +int print_insn_nanomips(bfd_vma memaddr, struct disassemble_info *info) +{ + int length; + uint16_t words[3] = { }; + g_autofree char *buf = NULL; + + info->bytes_per_chunk = 2; + info->display_endian = info->endian; + info->insn_info_valid = 1; + info->branch_delay_insns = 0; + info->data_size = 0; + info->insn_type = dis_nonbranch; + info->target = 0; + info->target2 = 0; + + Dis_info disassm_info; + disassm_info.m_pc = memaddr; + disassm_info.fprintf_func = info->fprintf_func; + disassm_info.stream = info->stream; + + if (!read_u16(&words[0], memaddr, info)) { + return -1; + } + length = 2; + + /* Handle 32-bit opcodes. */ + if ((words[0] & 0x1000) == 0) { + if (!read_u16(&words[1], memaddr + 2, info)) { + return -1; + } + length = 4; + + /* Handle 48-bit opcodes. */ + if ((words[0] >> 10) == 0x18) { + if (!read_u16(&words[1], memaddr + 4, info)) { + return -1; + } + length = 6; + } + } + + for (int i = 0; i < ARRAY_SIZE(words); i++) { + if (i * 2 < length) { + (*info->fprintf_func)(info->stream, "%04x ", words[i]); + } else { + (*info->fprintf_func)(info->stream, " "); + } + } + + if (nanomips_dis(words, &buf, &disassm_info)) { + (*info->fprintf_func) (info->stream, "%s", buf); + } + + return length; +} diff --git a/disas/nanomips.h b/disas/nanomips.h deleted file mode 100644 index a0a2225301..0000000000 --- a/disas/nanomips.h +++ /dev/null @@ -1,1076 +0,0 @@ -/* - * Header file for nanoMIPS disassembler component of QEMU - * - * Copyright (C) 2018 Wave Computing, Inc. - * Copyright (C) 2018 Matthew Fortune - * Copyright (C) 2018 Aleksandar Markovic - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -#ifndef DISAS_NANOMIPS_H -#define DISAS_NANOMIPS_H - -#include - -typedef int64_t int64; -typedef uint64_t uint64; -typedef uint32_t uint32; -typedef uint16_t uint16; - -namespace img -{ - typedef uint64_t address; -} - - -class NMD -{ -public: - - enum TABLE_ENTRY_TYPE { - instruction, - call_instruction, - branch_instruction, - return_instruction, - reserved_block, - pool, - }; - - enum TABLE_ATTRIBUTE_TYPE { - MIPS64_ = 0x00000001, - XNP_ = 0x00000002, - XMMS_ = 0x00000004, - EVA_ = 0x00000008, - DSP_ = 0x00000010, - MT_ = 0x00000020, - EJTAG_ = 0x00000040, - TLBINV_ = 0x00000080, - CP0_ = 0x00000100, - CP1_ = 0x00000200, - CP2_ = 0x00000400, - UDI_ = 0x00000800, - MCU_ = 0x00001000, - VZ_ = 0x00002000, - TLB_ = 0x00004000, - MVH_ = 0x00008000, - ALL_ATTRIBUTES = 0xffffffffull, - }; - - - NMD(img::address pc, TABLE_ATTRIBUTE_TYPE requested_instruction_categories) - : m_pc(pc) - , m_requested_instruction_categories(requested_instruction_categories) - { - } - - int Disassemble(const uint16 *data, std::string & dis, - TABLE_ENTRY_TYPE & type); - -private: - - img::address m_pc; - TABLE_ATTRIBUTE_TYPE m_requested_instruction_categories; - - typedef std::string(NMD:: *disassembly_function)(uint64 instruction); - typedef bool(NMD:: *conditional_function)(uint64 instruction); - - struct Pool { - TABLE_ENTRY_TYPE type; - struct Pool *next_table; - int next_table_size; - int instructions_size; - uint64 mask; - uint64 value; - disassembly_function disassembly; - conditional_function condition; - uint64 attributes; - }; - - uint64 extract_op_code_value(const uint16 *data, int size); - int Disassemble(const uint16 *data, std::string & dis, - TABLE_ENTRY_TYPE & type, const Pool *table, int table_size); - - uint64 renumber_registers(uint64 index, uint64 *register_list, - size_t register_list_size); - - uint64 decode_gpr_gpr4(uint64 d); - uint64 decode_gpr_gpr4_zero(uint64 d); - uint64 decode_gpr_gpr3(uint64 d); - uint64 decode_gpr_gpr3_src_store(uint64 d); - uint64 decode_gpr_gpr2_reg1(uint64 d); - uint64 decode_gpr_gpr2_reg2(uint64 d); - uint64 decode_gpr_gpr1(uint64 d); - - uint64 copy(uint64 d); - int64 copy(int64 d); - int64 neg_copy(uint64 d); - int64 neg_copy(int64 d); - uint64 encode_rs3_and_check_rs3_ge_rt3(uint64 d); - uint64 encode_rs3_and_check_rs3_lt_rt3(uint64 d); - uint64 encode_s_from_address(uint64 d); - uint64 encode_u_from_address(uint64 d); - uint64 encode_s_from_s_hi(uint64 d); - uint64 encode_count3_from_count(uint64 d); - uint64 encode_shift3_from_shift(uint64 d); - int64 encode_eu_from_s_li16(uint64 d); - uint64 encode_msbd_from_size(uint64 d); - uint64 encode_eu_from_u_andi16(uint64 d); - - uint64 encode_msbd_from_pos_and_size(uint64 d); - - uint64 encode_rt1_from_rt(uint64 d); - uint64 encode_lsb_from_pos_and_size(uint64 d); - - std::string save_restore_list(uint64 rt, uint64 count, uint64 gp); - - std::string GPR(uint64 reg); - std::string FPR(uint64 reg); - std::string AC(uint64 reg); - std::string IMMEDIATE(uint64 value); - std::string IMMEDIATE(int64 value); - std::string CPR(uint64 reg); - std::string ADDRESS(uint64 value, int instruction_size); - - int64 extract_s__se3_4_2_1_0(uint64 instruction); - int64 extract_s__se7_0_6_5_4_3_2_1_s1(uint64 instruction); - int64 extract_s__se8_15_7_6_5_4_3_s3(uint64 instruction); - int64 extract_s__se8_15_7_6_5_4_3_2_s2(uint64 instruction); - int64 extract_s__se8_15_7_6_5_4_3_2_1_0(uint64 instruction); - int64 extract_s__se9_20_19_18_17_16_15_14_13_12_11(uint64 instruction); - int64 extract_s__se10_0_9_8_7_6_5_4_3_2_1_s1(uint64 instruction); - int64 extract_s__se11_0_10_9_8_7_6_5_4_3_2_1_0_s1(uint64 instruction); - int64 extract_s__se14_0_13_to_1_s1(uint64 instruction); - int64 extract_s__se21_0_20_to_1_s1(uint64 instruction); - int64 extract_s__se25_0_24_to_1_s1(uint64 instruction); - int64 extract_s__se31_15_to_0_31_to_16(uint64 instruction); - int64 extract_s__se31_0_11_to_2_20_to_12_s12(uint64 instruction); - int64 extract_shift__se5_21_20_19_18_17_16(uint64 instruction); - - uint64 extract_ac_15_14(uint64 instruction); - uint64 extract_bit_16_15_14_13_12_11(uint64 instruction); - uint64 extract_bit_23_22_21(uint64 instruction); - uint64 extract_c0s_20_19_18_17_16(uint64 instruction); - uint64 extract_code_17_to_0(uint64 instruction); - uint64 extract_code_18_to_0(uint64 instruction); - uint64 extract_code_1_0(uint64 instruction); - uint64 extract_code_2_1_0(uint64 instruction); - uint64 extract_code_25_24_23_22_21_20_19_18_17_16(uint64 instruction); - uint64 extract_cofun_25_24_23(uint64 instruction); - uint64 extract_count3_14_13_12(uint64 instruction); - uint64 extract_count_3_2_1_0(uint64 instruction); - uint64 extract_count_19_18_17_16(uint64 instruction); - uint64 extract_cs_20_19_18_17_16(uint64 instruction); - uint64 extract_cs_25_24_23_22_21(uint64 instruction); - uint64 extract_ct_25_24_23_22_21(uint64 instruction); - uint64 extract_eu_3_2_1_0(uint64 instruction); - uint64 extract_eu_6_5_4_3_2_1_0(uint64 instruction); - uint64 extract_fd_15_14_13_12_11(uint64 instruction); - uint64 extract_fs_20_19_18_17_16(uint64 instruction); - uint64 extract_ft_15_14_13_12_11(uint64 instruction); - uint64 extract_ft_25_24_23_22_21(uint64 instruction); - uint64 extract_gp_2(uint64 instruction); - uint64 extract_hint_25_24_23_22_21(uint64 instruction); - uint64 extract_hs_20_19_18_17_16(uint64 instruction); - uint64 extract_lsb_4_3_2_1_0(uint64 instruction); - uint64 extract_mask_20_19_18_17_16_15_14(uint64 instruction); - uint64 extract_msbt_10_9_8_7_6(uint64 instruction); - uint64 extract_op_25_24_23_22_21(uint64 instruction); - uint64 extract_op_25_to_3(uint64 instruction); - uint64 extract_rdl_25_24(uint64 instruction); - uint64 extract_rd2_3_8(uint64 instruction); - uint64 extract_rd3_3_2_1(uint64 instruction); - uint64 extract_rd_15_14_13_12_11(uint64 instruction); - uint64 extract_rs3_6_5_4(uint64 instruction); - uint64 extract_rs4_4_2_1_0(uint64 instruction); - uint64 extract_rs_4_3_2_1_0(uint64 instruction); - uint64 extract_rs_20_19_18_17_16(uint64 instruction); - uint64 extract_rsz4_4_2_1_0(uint64 instruction); - uint64 extract_rtl_11(uint64 instruction); - uint64 extract_rt3_9_8_7(uint64 instruction); - uint64 extract_rt4_9_7_6_5(uint64 instruction); - uint64 extract_rt_25_24_23_22_21(uint64 instruction); - uint64 extract_rt_41_40_39_38_37(uint64 instruction); - uint64 extract_rt_9_8_7_6_5(uint64 instruction); - uint64 extract_rtz3_9_8_7(uint64 instruction); - uint64 extract_rtz4_27_26_25_23_22_21(uint64 instruction); - uint64 extract_rtz4_9_7_6_5(uint64 instruction); - uint64 extract_ru_7_6_5_4_3(uint64 instruction); - uint64 extract_sa_15_14_13_12_11(uint64 instruction); - uint64 extract_sa_15_14_13_12(uint64 instruction); - uint64 extract_sa_15_14_13(uint64 instruction); - uint64 extract_sel_13_12_11(uint64 instruction); - uint64 extract_sel_15_14_13_12_11(uint64 instruction); - uint64 extract_shift3_2_1_0(uint64 instruction); - uint64 extract_shift_4_3_2_1_0(uint64 instruction); - uint64 extract_shift_5_4_3_2_1_0(uint64 instruction); - uint64 extract_shift_20_19_18_17_16(uint64 instruction); - uint64 extract_shift_10_9_8_7_6(uint64 instruction); - uint64 extract_shiftx_11_10_9_8_7_6(uint64 instruction); - uint64 extract_shiftx_10_9_8_7__s1(uint64 instruction); - uint64 extract_size_20_19_18_17_16(uint64 instruction); - uint64 extract_stripe_6(uint64 instruction); - uint64 extract_stype_20_19_18_17_16(uint64 instruction); - uint64 extract_u2_10_9(uint64 instruction); - uint64 extract_u_11_10_9_8_7_6_5_4_3_2_1_0(uint64 instruction); - uint64 extract_u_15_to_0(uint64 instruction); - uint64 extract_u_17_to_0(uint64 instruction); - uint64 extract_u_1_0(uint64 instruction); - uint64 extract_u_3_2_1_0__s1(uint64 instruction); - uint64 extract_u_2_1_0__s2(uint64 instruction); - uint64 extract_u_3_2_1_0__s2(uint64 instruction); - uint64 extract_u_4_3_2_1_0__s2(uint64 instruction); - uint64 extract_u_5_4_3_2_1_0__s2(uint64 instruction); - uint64 extract_u_6_5_4_3_2_1_0__s2(uint64 instruction); - uint64 extract_u_31_to_0__s32(uint64 instruction); - uint64 extract_u_10(uint64 instruction); - uint64 extract_u_17_16_15_14_13_12_11(uint64 instruction); - uint64 extract_u_20_19_18_17_16_15_14_13(uint64 instruction); - uint64 extract_u_17_to_1__s1(uint64 instruction); - uint64 extract_u_2_1__s1(uint64 instruction); - uint64 extract_u_17_to_2__s2(uint64 instruction); - uint64 extract_u_20_to_2__s2(uint64 instruction); - uint64 extract_u_20_to_3__s3(uint64 instruction); - uint64 extract_u_3_8__s2(uint64 instruction); - uint64 extract_u_11_10_9_8_7_6_5_4_3__s3(uint64 instruction); - uint64 extract_u_7_6_5_4__s4(uint64 instruction); - - bool ADDIU_32__cond(uint64 instruction); - bool ADDIU_RS5__cond(uint64 instruction); - bool BALRSC_cond(uint64 instruction); - bool BEQC_16__cond(uint64 instruction); - bool BNEC_16__cond(uint64 instruction); - bool MOVE_cond(uint64 instruction); - bool P16_BR1_cond(uint64 instruction); - bool PREF_S9__cond(uint64 instruction); - bool PREFE_cond(uint64 instruction); - bool SLTU_cond(uint64 instruction); - - std::string ABS_D(uint64 instruction); - std::string ABS_S(uint64 instruction); - std::string ABSQ_S_PH(uint64 instruction); - std::string ABSQ_S_QB(uint64 instruction); - std::string ABSQ_S_W(uint64 instruction); - std::string ACLR(uint64 instruction); - std::string ADD(uint64 instruction); - std::string ADD_D(uint64 instruction); - std::string ADD_S(uint64 instruction); - std::string ADDIU_32_(uint64 instruction); - std::string ADDIU_48_(uint64 instruction); - std::string ADDIU_GP48_(uint64 instruction); - std::string ADDIU_GP_B_(uint64 instruction); - std::string ADDIU_GP_W_(uint64 instruction); - std::string ADDIU_NEG_(uint64 instruction); - std::string ADDIU_R1_SP_(uint64 instruction); - std::string ADDIU_R2_(uint64 instruction); - std::string ADDIU_RS5_(uint64 instruction); - std::string ADDIUPC_32_(uint64 instruction); - std::string ADDIUPC_48_(uint64 instruction); - std::string ADDQ_PH(uint64 instruction); - std::string ADDQ_S_PH(uint64 instruction); - std::string ADDQ_S_W(uint64 instruction); - std::string ADDQH_PH(uint64 instruction); - std::string ADDQH_R_PH(uint64 instruction); - std::string ADDQH_R_W(uint64 instruction); - std::string ADDQH_W(uint64 instruction); - std::string ADDSC(uint64 instruction); - std::string ADDU_16_(uint64 instruction); - std::string ADDU_32_(uint64 instruction); - std::string ADDU_4X4_(uint64 instruction); - std::string ADDU_PH(uint64 instruction); - std::string ADDU_QB(uint64 instruction); - std::string ADDU_S_PH(uint64 instruction); - std::string ADDU_S_QB(uint64 instruction); - std::string ADDUH_QB(uint64 instruction); - std::string ADDUH_R_QB(uint64 instruction); - std::string ADDWC(uint64 instruction); - std::string ALUIPC(uint64 instruction); - std::string AND_16_(uint64 instruction); - std::string AND_32_(uint64 instruction); - std::string ANDI_16_(uint64 instruction); - std::string ANDI_32_(uint64 instruction); - std::string APPEND(uint64 instruction); - std::string ASET(uint64 instruction); - std::string BALC_16_(uint64 instruction); - std::string BALC_32_(uint64 instruction); - std::string BALRSC(uint64 instruction); - std::string BBEQZC(uint64 instruction); - std::string BBNEZC(uint64 instruction); - std::string BC_16_(uint64 instruction); - std::string BC_32_(uint64 instruction); - std::string BC1EQZC(uint64 instruction); - std::string BC1NEZC(uint64 instruction); - std::string BC2EQZC(uint64 instruction); - std::string BC2NEZC(uint64 instruction); - std::string BEQC_16_(uint64 instruction); - std::string BEQC_32_(uint64 instruction); - std::string BEQIC(uint64 instruction); - std::string BEQZC_16_(uint64 instruction); - std::string BGEC(uint64 instruction); - std::string BGEIC(uint64 instruction); - std::string BGEIUC(uint64 instruction); - std::string BGEUC(uint64 instruction); - std::string BLTC(uint64 instruction); - std::string BLTIC(uint64 instruction); - std::string BLTIUC(uint64 instruction); - std::string BLTUC(uint64 instruction); - std::string BNEC_16_(uint64 instruction); - std::string BNEC_32_(uint64 instruction); - std::string BNEIC(uint64 instruction); - std::string BNEZC_16_(uint64 instruction); - std::string BPOSGE32C(uint64 instruction); - std::string BREAK_16_(uint64 instruction); - std::string BREAK_32_(uint64 instruction); - std::string BRSC(uint64 instruction); - std::string CACHE(uint64 instruction); - std::string CACHEE(uint64 instruction); - std::string CEIL_L_D(uint64 instruction); - std::string CEIL_L_S(uint64 instruction); - std::string CEIL_W_D(uint64 instruction); - std::string CEIL_W_S(uint64 instruction); - std::string CFC1(uint64 instruction); - std::string CFC2(uint64 instruction); - std::string CLASS_D(uint64 instruction); - std::string CLASS_S(uint64 instruction); - std::string CLO(uint64 instruction); - std::string CLZ(uint64 instruction); - std::string CMP_AF_D(uint64 instruction); - std::string CMP_AF_S(uint64 instruction); - std::string CMP_EQ_D(uint64 instruction); - std::string CMP_EQ_PH(uint64 instruction); - std::string CMP_EQ_S(uint64 instruction); - std::string CMP_LE_D(uint64 instruction); - std::string CMP_LE_PH(uint64 instruction); - std::string CMP_LE_S(uint64 instruction); - std::string CMP_LT_D(uint64 instruction); - std::string CMP_LT_PH(uint64 instruction); - std::string CMP_LT_S(uint64 instruction); - std::string CMP_NE_D(uint64 instruction); - std::string CMP_NE_S(uint64 instruction); - std::string CMP_OR_D(uint64 instruction); - std::string CMP_OR_S(uint64 instruction); - std::string CMP_SAF_D(uint64 instruction); - std::string CMP_SAF_S(uint64 instruction); - std::string CMP_SEQ_D(uint64 instruction); - std::string CMP_SEQ_S(uint64 instruction); - std::string CMP_SLE_D(uint64 instruction); - std::string CMP_SLE_S(uint64 instruction); - std::string CMP_SLT_D(uint64 instruction); - std::string CMP_SLT_S(uint64 instruction); - std::string CMP_SNE_D(uint64 instruction); - std::string CMP_SNE_S(uint64 instruction); - std::string CMP_SOR_D(uint64 instruction); - std::string CMP_SOR_S(uint64 instruction); - std::string CMP_SUEQ_D(uint64 instruction); - std::string CMP_SUEQ_S(uint64 instruction); - std::string CMP_SULE_D(uint64 instruction); - std::string CMP_SULE_S(uint64 instruction); - std::string CMP_SULT_D(uint64 instruction); - std::string CMP_SULT_S(uint64 instruction); - std::string CMP_SUN_D(uint64 instruction); - std::string CMP_SUN_S(uint64 instruction); - std::string CMP_SUNE_D(uint64 instruction); - std::string CMP_SUNE_S(uint64 instruction); - std::string CMP_UEQ_D(uint64 instruction); - std::string CMP_UEQ_S(uint64 instruction); - std::string CMP_ULE_D(uint64 instruction); - std::string CMP_ULE_S(uint64 instruction); - std::string CMP_ULT_D(uint64 instruction); - std::string CMP_ULT_S(uint64 instruction); - std::string CMP_UN_D(uint64 instruction); - std::string CMP_UN_S(uint64 instruction); - std::string CMP_UNE_D(uint64 instruction); - std::string CMP_UNE_S(uint64 instruction); - std::string CMPGDU_EQ_QB(uint64 instruction); - std::string CMPGDU_LE_QB(uint64 instruction); - std::string CMPGDU_LT_QB(uint64 instruction); - std::string CMPGU_EQ_QB(uint64 instruction); - std::string CMPGU_LE_QB(uint64 instruction); - std::string CMPGU_LT_QB(uint64 instruction); - std::string CMPU_EQ_QB(uint64 instruction); - std::string CMPU_LE_QB(uint64 instruction); - std::string CMPU_LT_QB(uint64 instruction); - std::string COP2_1(uint64 instruction); - std::string CTC1(uint64 instruction); - std::string CTC2(uint64 instruction); - std::string CVT_D_L(uint64 instruction); - std::string CVT_D_S(uint64 instruction); - std::string CVT_D_W(uint64 instruction); - std::string CVT_L_D(uint64 instruction); - std::string CVT_L_S(uint64 instruction); - std::string CVT_S_D(uint64 instruction); - std::string CVT_S_L(uint64 instruction); - std::string CVT_S_PL(uint64 instruction); - std::string CVT_S_PU(uint64 instruction); - std::string CVT_S_W(uint64 instruction); - std::string CVT_W_D(uint64 instruction); - std::string CVT_W_S(uint64 instruction); - std::string DADDIU_48_(uint64 instruction); - std::string DADDIU_NEG_(uint64 instruction); - std::string DADDIU_U12_(uint64 instruction); - std::string DADD(uint64 instruction); - std::string DADDU(uint64 instruction); - std::string DCLO(uint64 instruction); - std::string DCLZ(uint64 instruction); - std::string DDIV(uint64 instruction); - std::string DDIVU(uint64 instruction); - std::string DERET(uint64 instruction); - std::string DEXTM(uint64 instruction); - std::string DEXT(uint64 instruction); - std::string DEXTU(uint64 instruction); - std::string DINSM(uint64 instruction); - std::string DINS(uint64 instruction); - std::string DINSU(uint64 instruction); - std::string DI(uint64 instruction); - std::string DIV(uint64 instruction); - std::string DIV_D(uint64 instruction); - std::string DIV_S(uint64 instruction); - std::string DIVU(uint64 instruction); - std::string DLSA(uint64 instruction); - std::string DLUI_48_(uint64 instruction); - std::string DMFC0(uint64 instruction); - std::string DMFC1(uint64 instruction); - std::string DMFC2(uint64 instruction); - std::string DMFGC0(uint64 instruction); - std::string DMOD(uint64 instruction); - std::string DMODU(uint64 instruction); - std::string DMTC0(uint64 instruction); - std::string DMTC1(uint64 instruction); - std::string DMTC2(uint64 instruction); - std::string DMTGC0(uint64 instruction); - std::string DMT(uint64 instruction); - std::string DMUH(uint64 instruction); - std::string DMUHU(uint64 instruction); - std::string DMUL(uint64 instruction); - std::string DMULU(uint64 instruction); - std::string DPAQ_S_W_PH(uint64 instruction); - std::string DPAQ_SA_L_W(uint64 instruction); - std::string DPAQX_S_W_PH(uint64 instruction); - std::string DPAQX_SA_W_PH(uint64 instruction); - std::string DPAU_H_QBL(uint64 instruction); - std::string DPAU_H_QBR(uint64 instruction); - std::string DPA_W_PH(uint64 instruction); - std::string DPAX_W_PH(uint64 instruction); - std::string DPS_W_PH(uint64 instruction); - std::string DPSQ_SA_L_W(uint64 instruction); - std::string DPSQ_S_W_PH(uint64 instruction); - std::string DPSQX_SA_W_PH(uint64 instruction); - std::string DPSQX_S_W_PH(uint64 instruction); - std::string DPSU_H_QBL(uint64 instruction); - std::string DPSU_H_QBR(uint64 instruction); - std::string DPSX_W_PH(uint64 instruction); - std::string DROTR(uint64 instruction); - std::string DROTR32(uint64 instruction); - std::string DROTRV(uint64 instruction); - std::string DROTX(uint64 instruction); - std::string DSLL(uint64 instruction); - std::string DSLL32(uint64 instruction); - std::string DSLLV(uint64 instruction); - std::string DSRA(uint64 instruction); - std::string DSRA32(uint64 instruction); - std::string DSRAV(uint64 instruction); - std::string DSRL32(uint64 instruction); - std::string DSRL(uint64 instruction); - std::string DSRLV(uint64 instruction); - std::string DSUB(uint64 instruction); - std::string DSUBU(uint64 instruction); - std::string DVP(uint64 instruction); - std::string DVPE(uint64 instruction); - std::string EHB(uint64 instruction); - std::string EI(uint64 instruction); - std::string EMT(uint64 instruction); - std::string ERET(uint64 instruction); - std::string ERETNC(uint64 instruction); - std::string EVP(uint64 instruction); - std::string EVPE(uint64 instruction); - std::string EXT(uint64 instruction); - std::string EXTD(uint64 instruction); - std::string EXTD32(uint64 instruction); - std::string EXTP(uint64 instruction); - std::string EXTPDP(uint64 instruction); - std::string EXTPDPV(uint64 instruction); - std::string EXTPV(uint64 instruction); - std::string EXTR_RS_W(uint64 instruction); - std::string EXTR_R_W(uint64 instruction); - std::string EXTR_S_H(uint64 instruction); - std::string EXTR_W(uint64 instruction); - std::string EXTRV_R_W(uint64 instruction); - std::string EXTRV_RS_W(uint64 instruction); - std::string EXTRV_S_H(uint64 instruction); - std::string EXTRV_W(uint64 instruction); - std::string EXTW(uint64 instruction); - std::string FLOOR_L_D(uint64 instruction); - std::string FLOOR_L_S(uint64 instruction); - std::string FLOOR_W_D(uint64 instruction); - std::string FLOOR_W_S(uint64 instruction); - std::string FORK(uint64 instruction); - std::string HYPCALL(uint64 instruction); - std::string HYPCALL_16_(uint64 instruction); - std::string INS(uint64 instruction); - std::string INSV(uint64 instruction); - std::string IRET(uint64 instruction); - std::string JALRC_16_(uint64 instruction); - std::string JALRC_32_(uint64 instruction); - std::string JALRC_HB(uint64 instruction); - std::string JRC(uint64 instruction); - std::string LB_16_(uint64 instruction); - std::string LB_GP_(uint64 instruction); - std::string LB_S9_(uint64 instruction); - std::string LB_U12_(uint64 instruction); - std::string LBE(uint64 instruction); - std::string LBU_16_(uint64 instruction); - std::string LBU_GP_(uint64 instruction); - std::string LBU_S9_(uint64 instruction); - std::string LBU_U12_(uint64 instruction); - std::string LBUE(uint64 instruction); - std::string LBUX(uint64 instruction); - std::string LBX(uint64 instruction); - std::string LD_GP_(uint64 instruction); - std::string LD_S9_(uint64 instruction); - std::string LD_U12_(uint64 instruction); - std::string LDC1_GP_(uint64 instruction); - std::string LDC1_S9_(uint64 instruction); - std::string LDC1_U12_(uint64 instruction); - std::string LDC1X(uint64 instruction); - std::string LDC1XS(uint64 instruction); - std::string LDC2(uint64 instruction); - std::string LDM(uint64 instruction); - std::string LDPC_48_(uint64 instruction); - std::string LDX(uint64 instruction); - std::string LDXS(uint64 instruction); - std::string LH_16_(uint64 instruction); - std::string LH_GP_(uint64 instruction); - std::string LH_S9_(uint64 instruction); - std::string LH_U12_(uint64 instruction); - std::string LHE(uint64 instruction); - std::string LHU_16_(uint64 instruction); - std::string LHU_GP_(uint64 instruction); - std::string LHU_S9_(uint64 instruction); - std::string LHU_U12_(uint64 instruction); - std::string LHUE(uint64 instruction); - std::string LHUX(uint64 instruction); - std::string LHUXS(uint64 instruction); - std::string LHX(uint64 instruction); - std::string LHXS(uint64 instruction); - std::string LI_16_(uint64 instruction); - std::string LI_48_(uint64 instruction); - std::string LL(uint64 instruction); - std::string LLD(uint64 instruction); - std::string LLDP(uint64 instruction); - std::string LLE(uint64 instruction); - std::string LLWP(uint64 instruction); - std::string LLWPE(uint64 instruction); - std::string LSA(uint64 instruction); - std::string LUI(uint64 instruction); - std::string LW_16_(uint64 instruction); - std::string LW_4X4_(uint64 instruction); - std::string LWC1_GP_(uint64 instruction); - std::string LWC1_S9_(uint64 instruction); - std::string LWC1_U12_(uint64 instruction); - std::string LWC1X(uint64 instruction); - std::string LWC1XS(uint64 instruction); - std::string LWC2(uint64 instruction); - std::string LWE(uint64 instruction); - std::string LW_GP_(uint64 instruction); - std::string LW_GP16_(uint64 instruction); - std::string LWM(uint64 instruction); - std::string LWPC_48_(uint64 instruction); - std::string LW_S9_(uint64 instruction); - std::string LW_SP_(uint64 instruction); - std::string LW_U12_(uint64 instruction); - std::string LWU_GP_(uint64 instruction); - std::string LWU_S9_(uint64 instruction); - std::string LWU_U12_(uint64 instruction); - std::string LWUX(uint64 instruction); - std::string LWUXS(uint64 instruction); - std::string LWX(uint64 instruction); - std::string LWXS_16_(uint64 instruction); - std::string LWXS_32_(uint64 instruction); - std::string MADD_DSP_(uint64 instruction); - std::string MADDF_D(uint64 instruction); - std::string MADDF_S(uint64 instruction); - std::string MADDU_DSP_(uint64 instruction); - std::string MAQ_S_W_PHL(uint64 instruction); - std::string MAQ_S_W_PHR(uint64 instruction); - std::string MAQ_SA_W_PHL(uint64 instruction); - std::string MAQ_SA_W_PHR(uint64 instruction); - std::string MAX_D(uint64 instruction); - std::string MAX_S(uint64 instruction); - std::string MAXA_D(uint64 instruction); - std::string MAXA_S(uint64 instruction); - std::string MFC0(uint64 instruction); - std::string MFC1(uint64 instruction); - std::string MFC2(uint64 instruction); - std::string MFGC0(uint64 instruction); - std::string MFHC0(uint64 instruction); - std::string MFHC1(uint64 instruction); - std::string MFHC2(uint64 instruction); - std::string MFHGC0(uint64 instruction); - std::string MFHI_DSP_(uint64 instruction); - std::string MFHTR(uint64 instruction); - std::string MFLO_DSP_(uint64 instruction); - std::string MFTR(uint64 instruction); - std::string MIN_D(uint64 instruction); - std::string MIN_S(uint64 instruction); - std::string MINA_D(uint64 instruction); - std::string MINA_S(uint64 instruction); - std::string MOD(uint64 instruction); - std::string MODSUB(uint64 instruction); - std::string MODU(uint64 instruction); - std::string MOV_D(uint64 instruction); - std::string MOV_S(uint64 instruction); - std::string MOVE_BALC(uint64 instruction); - std::string MOVEP(uint64 instruction); - std::string MOVEP_REV_(uint64 instruction); - std::string MOVE(uint64 instruction); - std::string MOVN(uint64 instruction); - std::string MOVZ(uint64 instruction); - std::string MSUB_DSP_(uint64 instruction); - std::string MSUBF_D(uint64 instruction); - std::string MSUBF_S(uint64 instruction); - std::string MSUBU_DSP_(uint64 instruction); - std::string MTC0(uint64 instruction); - std::string MTC1(uint64 instruction); - std::string MTC2(uint64 instruction); - std::string MTGC0(uint64 instruction); - std::string MTHC0(uint64 instruction); - std::string MTHC1(uint64 instruction); - std::string MTHC2(uint64 instruction); - std::string MTHGC0(uint64 instruction); - std::string MTHI_DSP_(uint64 instruction); - std::string MTHLIP(uint64 instruction); - std::string MTHTR(uint64 instruction); - std::string MTLO_DSP_(uint64 instruction); - std::string MTTR(uint64 instruction); - std::string MUH(uint64 instruction); - std::string MUHU(uint64 instruction); - std::string MUL_32_(uint64 instruction); - std::string MUL_4X4_(uint64 instruction); - std::string MUL_D(uint64 instruction); - std::string MUL_PH(uint64 instruction); - std::string MUL_S(uint64 instruction); - std::string MUL_S_PH(uint64 instruction); - std::string MULEQ_S_W_PHL(uint64 instruction); - std::string MULEQ_S_W_PHR(uint64 instruction); - std::string MULEU_S_PH_QBL(uint64 instruction); - std::string MULEU_S_PH_QBR(uint64 instruction); - std::string MULQ_RS_PH(uint64 instruction); - std::string MULQ_RS_W(uint64 instruction); - std::string MULQ_S_PH(uint64 instruction); - std::string MULQ_S_W(uint64 instruction); - std::string MULSA_W_PH(uint64 instruction); - std::string MULSAQ_S_W_PH(uint64 instruction); - std::string MULT_DSP_(uint64 instruction); - std::string MULTU_DSP_(uint64 instruction); - std::string MULU(uint64 instruction); - std::string NEG_D(uint64 instruction); - std::string NEG_S(uint64 instruction); - std::string NOP_16_(uint64 instruction); - std::string NOP_32_(uint64 instruction); - std::string NOR(uint64 instruction); - std::string NOT_16_(uint64 instruction); - std::string OR_16_(uint64 instruction); - std::string OR_32_(uint64 instruction); - std::string ORI(uint64 instruction); - std::string PACKRL_PH(uint64 instruction); - std::string PAUSE(uint64 instruction); - std::string PICK_PH(uint64 instruction); - std::string PICK_QB(uint64 instruction); - std::string PRECEQ_W_PHL(uint64 instruction); - std::string PRECEQ_W_PHR(uint64 instruction); - std::string PRECEQU_PH_QBL(uint64 instruction); - std::string PRECEQU_PH_QBLA(uint64 instruction); - std::string PRECEQU_PH_QBR(uint64 instruction); - std::string PRECEQU_PH_QBRA(uint64 instruction); - std::string PRECEU_PH_QBL(uint64 instruction); - std::string PRECEU_PH_QBLA(uint64 instruction); - std::string PRECEU_PH_QBR(uint64 instruction); - std::string PRECEU_PH_QBRA(uint64 instruction); - std::string PRECR_QB_PH(uint64 instruction); - std::string PRECR_SRA_PH_W(uint64 instruction); - std::string PRECR_SRA_R_PH_W(uint64 instruction); - std::string PRECRQ_PH_W(uint64 instruction); - std::string PRECRQ_QB_PH(uint64 instruction); - std::string PRECRQ_RS_PH_W(uint64 instruction); - std::string PRECRQU_S_QB_PH(uint64 instruction); - std::string PREF_S9_(uint64 instruction); - std::string PREF_U12_(uint64 instruction); - std::string PREFE(uint64 instruction); - std::string PREPEND(uint64 instruction); - std::string RADDU_W_QB(uint64 instruction); - std::string RDDSP(uint64 instruction); - std::string RDHWR(uint64 instruction); - std::string RDPGPR(uint64 instruction); - std::string RECIP_D(uint64 instruction); - std::string RECIP_S(uint64 instruction); - std::string REPL_PH(uint64 instruction); - std::string REPL_QB(uint64 instruction); - std::string REPLV_PH(uint64 instruction); - std::string REPLV_QB(uint64 instruction); - std::string RESTORE_32_(uint64 instruction); - std::string RESTORE_JRC_16_(uint64 instruction); - std::string RESTORE_JRC_32_(uint64 instruction); - std::string RESTOREF(uint64 instruction); - std::string RINT_D(uint64 instruction); - std::string RINT_S(uint64 instruction); - std::string ROTR(uint64 instruction); - std::string ROTRV(uint64 instruction); - std::string ROTX(uint64 instruction); - std::string ROUND_L_D(uint64 instruction); - std::string ROUND_L_S(uint64 instruction); - std::string ROUND_W_D(uint64 instruction); - std::string ROUND_W_S(uint64 instruction); - std::string RSQRT_D(uint64 instruction); - std::string RSQRT_S(uint64 instruction); - std::string SAVE_16_(uint64 instruction); - std::string SAVE_32_(uint64 instruction); - std::string SAVEF(uint64 instruction); - std::string SB_16_(uint64 instruction); - std::string SB_GP_(uint64 instruction); - std::string SB_S9_(uint64 instruction); - std::string SB_U12_(uint64 instruction); - std::string SBE(uint64 instruction); - std::string SBX(uint64 instruction); - std::string SC(uint64 instruction); - std::string SCD(uint64 instruction); - std::string SCDP(uint64 instruction); - std::string SCE(uint64 instruction); - std::string SCWP(uint64 instruction); - std::string SCWPE(uint64 instruction); - std::string SD_GP_(uint64 instruction); - std::string SD_S9_(uint64 instruction); - std::string SD_U12_(uint64 instruction); - std::string SDBBP_16_(uint64 instruction); - std::string SDBBP_32_(uint64 instruction); - std::string SDC1_GP_(uint64 instruction); - std::string SDC1_S9_(uint64 instruction); - std::string SDC1_U12_(uint64 instruction); - std::string SDC1X(uint64 instruction); - std::string SDC1XS(uint64 instruction); - std::string SDC2(uint64 instruction); - std::string SDM(uint64 instruction); - std::string SDPC_48_(uint64 instruction); - std::string SDX(uint64 instruction); - std::string SDXS(uint64 instruction); - std::string SEB(uint64 instruction); - std::string SEH(uint64 instruction); - std::string SEL_D(uint64 instruction); - std::string SEL_S(uint64 instruction); - std::string SELEQZ_D(uint64 instruction); - std::string SELEQZ_S(uint64 instruction); - std::string SELNEZ_D(uint64 instruction); - std::string SELNEZ_S(uint64 instruction); - std::string SEQI(uint64 instruction); - std::string SH_16_(uint64 instruction); - std::string SH_GP_(uint64 instruction); - std::string SH_S9_(uint64 instruction); - std::string SH_U12_(uint64 instruction); - std::string SHE(uint64 instruction); - std::string SHILO(uint64 instruction); - std::string SHILOV(uint64 instruction); - std::string SHLL_PH(uint64 instruction); - std::string SHLL_QB(uint64 instruction); - std::string SHLL_S_PH(uint64 instruction); - std::string SHLL_S_W(uint64 instruction); - std::string SHLLV_PH(uint64 instruction); - std::string SHLLV_QB(uint64 instruction); - std::string SHLLV_S_PH(uint64 instruction); - std::string SHLLV_S_W(uint64 instruction); - std::string SHRA_PH(uint64 instruction); - std::string SHRA_QB(uint64 instruction); - std::string SHRA_R_PH(uint64 instruction); - std::string SHRA_R_QB(uint64 instruction); - std::string SHRA_R_W(uint64 instruction); - std::string SHRAV_PH(uint64 instruction); - std::string SHRAV_QB(uint64 instruction); - std::string SHRAV_R_PH(uint64 instruction); - std::string SHRAV_R_QB(uint64 instruction); - std::string SHRAV_R_W(uint64 instruction); - std::string SHRL_PH(uint64 instruction); - std::string SHRL_QB(uint64 instruction); - std::string SHRLV_PH(uint64 instruction); - std::string SHRLV_QB(uint64 instruction); - std::string SHX(uint64 instruction); - std::string SHXS(uint64 instruction); - std::string SIGRIE(uint64 instruction); - std::string SLL_16_(uint64 instruction); - std::string SLL_32_(uint64 instruction); - std::string SLLV(uint64 instruction); - std::string SLT(uint64 instruction); - std::string SLTI(uint64 instruction); - std::string SLTIU(uint64 instruction); - std::string SLTU(uint64 instruction); - std::string SOV(uint64 instruction); - std::string SPECIAL2(uint64 instruction); - std::string SQRT_D(uint64 instruction); - std::string SQRT_S(uint64 instruction); - std::string SRA(uint64 instruction); - std::string SRAV(uint64 instruction); - std::string SRL_16_(uint64 instruction); - std::string SRL_32_(uint64 instruction); - std::string SRLV(uint64 instruction); - std::string SUB(uint64 instruction); - std::string SUB_D(uint64 instruction); - std::string SUB_S(uint64 instruction); - std::string SUBQ_PH(uint64 instruction); - std::string SUBQ_S_PH(uint64 instruction); - std::string SUBQ_S_W(uint64 instruction); - std::string SUBQH_PH(uint64 instruction); - std::string SUBQH_R_PH(uint64 instruction); - std::string SUBQH_R_W(uint64 instruction); - std::string SUBQH_W(uint64 instruction); - std::string SUBU_16_(uint64 instruction); - std::string SUBU_32_(uint64 instruction); - std::string SUBU_PH(uint64 instruction); - std::string SUBU_QB(uint64 instruction); - std::string SUBU_S_PH(uint64 instruction); - std::string SUBU_S_QB(uint64 instruction); - std::string SUBUH_QB(uint64 instruction); - std::string SUBUH_R_QB(uint64 instruction); - std::string SW_16_(uint64 instruction); - std::string SW_4X4_(uint64 instruction); - std::string SW_GP16_(uint64 instruction); - std::string SW_GP_(uint64 instruction); - std::string SW_S9_(uint64 instruction); - std::string SW_SP_(uint64 instruction); - std::string SW_U12_(uint64 instruction); - std::string SWC1_GP_(uint64 instruction); - std::string SWC1_S9_(uint64 instruction); - std::string SWC1_U12_(uint64 instruction); - std::string SWC1X(uint64 instruction); - std::string SWC1XS(uint64 instruction); - std::string SWC2(uint64 instruction); - std::string SWE(uint64 instruction); - std::string SWM(uint64 instruction); - std::string SWPC_48_(uint64 instruction); - std::string SWX(uint64 instruction); - std::string SWXS(uint64 instruction); - std::string SYNC(uint64 instruction); - std::string SYNCI(uint64 instruction); - std::string SYNCIE(uint64 instruction); - std::string SYSCALL_16_(uint64 instruction); - std::string SYSCALL_32_(uint64 instruction); - std::string TEQ(uint64 instruction); - std::string TLBGINV(uint64 instruction); - std::string TLBGINVF(uint64 instruction); - std::string TLBGP(uint64 instruction); - std::string TLBGR(uint64 instruction); - std::string TLBGWI(uint64 instruction); - std::string TLBGWR(uint64 instruction); - std::string TLBINV(uint64 instruction); - std::string TLBINVF(uint64 instruction); - std::string TLBP(uint64 instruction); - std::string TLBR(uint64 instruction); - std::string TLBWI(uint64 instruction); - std::string TLBWR(uint64 instruction); - std::string TNE(uint64 instruction); - std::string TRUNC_L_D(uint64 instruction); - std::string TRUNC_L_S(uint64 instruction); - std::string TRUNC_W_D(uint64 instruction); - std::string TRUNC_W_S(uint64 instruction); - std::string UALDM(uint64 instruction); - std::string UALH(uint64 instruction); - std::string UALWM(uint64 instruction); - std::string UASDM(uint64 instruction); - std::string UASH(uint64 instruction); - std::string UASWM(uint64 instruction); - std::string UDI(uint64 instruction); - std::string WAIT(uint64 instruction); - std::string WRDSP(uint64 instruction); - std::string WRPGPR(uint64 instruction); - std::string XOR_16_(uint64 instruction); - std::string XOR_32_(uint64 instruction); - std::string XORI(uint64 instruction); - std::string YIELD(uint64 instruction); - - static Pool P_SYSCALL[2]; - static Pool P_RI[4]; - static Pool P_ADDIU[2]; - static Pool P_TRAP[2]; - static Pool P_CMOVE[2]; - static Pool P_D_MT_VPE[2]; - static Pool P_E_MT_VPE[2]; - static Pool _P_MT_VPE[2]; - static Pool P_MT_VPE[8]; - static Pool P_DVP[2]; - static Pool P_SLTU[2]; - static Pool _POOL32A0[128]; - static Pool ADDQ__S__PH[2]; - static Pool MUL__S__PH[2]; - static Pool ADDQH__R__PH[2]; - static Pool ADDQH__R__W[2]; - static Pool ADDU__S__QB[2]; - static Pool ADDU__S__PH[2]; - static Pool ADDUH__R__QB[2]; - static Pool SHRAV__R__PH[2]; - static Pool SHRAV__R__QB[2]; - static Pool SUBQ__S__PH[2]; - static Pool SUBQH__R__PH[2]; - static Pool SUBQH__R__W[2]; - static Pool SUBU__S__QB[2]; - static Pool SUBU__S__PH[2]; - static Pool SHRA__R__PH[2]; - static Pool SUBUH__R__QB[2]; - static Pool SHLLV__S__PH[2]; - static Pool SHLL__S__PH[4]; - static Pool PRECR_SRA__R__PH_W[2]; - static Pool _POOL32A5[128]; - static Pool PP_LSX[16]; - static Pool PP_LSXS[16]; - static Pool P_LSX[2]; - static Pool POOL32Axf_1_0[4]; - static Pool POOL32Axf_1_1[4]; - static Pool POOL32Axf_1_3[4]; - static Pool POOL32Axf_1_4[2]; - static Pool MAQ_S_A__W_PHR[2]; - static Pool MAQ_S_A__W_PHL[2]; - static Pool POOL32Axf_1_5[2]; - static Pool POOL32Axf_1_7[4]; - static Pool POOL32Axf_1[8]; - static Pool POOL32Axf_2_DSP__0_7[8]; - static Pool POOL32Axf_2_DSP__8_15[8]; - static Pool POOL32Axf_2_DSP__16_23[8]; - static Pool POOL32Axf_2_DSP__24_31[8]; - static Pool POOL32Axf_2[4]; - static Pool POOL32Axf_4[128]; - static Pool POOL32Axf_5_group0[32]; - static Pool POOL32Axf_5_group1[32]; - static Pool ERETx[2]; - static Pool POOL32Axf_5_group3[32]; - static Pool POOL32Axf_5[4]; - static Pool SHRA__R__QB[2]; - static Pool POOL32Axf_7[8]; - static Pool POOL32Axf[8]; - static Pool _POOL32A7[8]; - static Pool P32A[8]; - static Pool P_GP_D[2]; - static Pool P_GP_W[4]; - static Pool POOL48I[32]; - static Pool PP_SR[4]; - static Pool P_SR_F[8]; - static Pool P_SR[2]; - static Pool P_SLL[5]; - static Pool P_SHIFT[16]; - static Pool P_ROTX[4]; - static Pool P_INS[4]; - static Pool P_EXT[4]; - static Pool P_U12[16]; - static Pool RINT_fmt[2]; - static Pool ADD_fmt0[2]; - static Pool SELEQZ_fmt[2]; - static Pool CLASS_fmt[2]; - static Pool SUB_fmt0[2]; - static Pool SELNEZ_fmt[2]; - static Pool MUL_fmt0[2]; - static Pool SEL_fmt[2]; - static Pool DIV_fmt0[2]; - static Pool ADD_fmt1[2]; - static Pool SUB_fmt1[2]; - static Pool MUL_fmt1[2]; - static Pool MADDF_fmt[2]; - static Pool DIV_fmt1[2]; - static Pool MSUBF_fmt[2]; - static Pool POOL32F_0[64]; - static Pool MIN_fmt[2]; - static Pool MAX_fmt[2]; - static Pool MINA_fmt[2]; - static Pool MAXA_fmt[2]; - static Pool CVT_L_fmt[2]; - static Pool RSQRT_fmt[2]; - static Pool FLOOR_L_fmt[2]; - static Pool CVT_W_fmt[2]; - static Pool SQRT_fmt[2]; - static Pool FLOOR_W_fmt[2]; - static Pool RECIP_fmt[2]; - static Pool CEIL_L_fmt[2]; - static Pool CEIL_W_fmt[2]; - static Pool TRUNC_L_fmt[2]; - static Pool TRUNC_W_fmt[2]; - static Pool ROUND_L_fmt[2]; - static Pool ROUND_W_fmt[2]; - static Pool POOL32Fxf_0[64]; - static Pool MOV_fmt[4]; - static Pool ABS_fmt[4]; - static Pool NEG_fmt[4]; - static Pool CVT_D_fmt[4]; - static Pool CVT_S_fmt[4]; - static Pool POOL32Fxf_1[32]; - static Pool POOL32Fxf[4]; - static Pool POOL32F_3[8]; - static Pool CMP_condn_S[32]; - static Pool CMP_condn_D[32]; - static Pool POOL32F_5[8]; - static Pool POOL32F[8]; - static Pool POOL32S_0[64]; - static Pool POOL32Sxf_4[128]; - static Pool POOL32Sxf[8]; - static Pool POOL32S_4[8]; - static Pool POOL32S[8]; - static Pool P_LUI[2]; - static Pool P_GP_LH[2]; - static Pool P_GP_SH[2]; - static Pool P_GP_CP1[4]; - static Pool P_GP_M64[4]; - static Pool P_GP_BH[8]; - static Pool P_LS_U12[16]; - static Pool P_PREF_S9_[2]; - static Pool P_LS_S0[16]; - static Pool ASET_ACLR[2]; - static Pool P_LL[4]; - static Pool P_SC[4]; - static Pool P_LLD[8]; - static Pool P_SCD[8]; - static Pool P_LS_S1[16]; - static Pool P_PREFE[2]; - static Pool P_LLE[4]; - static Pool P_SCE[4]; - static Pool P_LS_E0[16]; - static Pool P_LS_WM[2]; - static Pool P_LS_UAWM[2]; - static Pool P_LS_DM[2]; - static Pool P_LS_UADM[2]; - static Pool P_LS_S9[8]; - static Pool P_BAL[2]; - static Pool P_BALRSC[2]; - static Pool P_J[16]; - static Pool P_BR3A[32]; - static Pool P_BR1[4]; - static Pool P_BR2[4]; - static Pool P_BRI[8]; - static Pool P32[32]; - static Pool P16_SYSCALL[2]; - static Pool P16_RI[4]; - static Pool P16_MV[2]; - static Pool P16_SHIFT[2]; - static Pool POOL16C_00[4]; - static Pool POOL16C_0[2]; - static Pool P16C[2]; - static Pool P16_A1[2]; - static Pool P_ADDIU_RS5_[2]; - static Pool P16_A2[2]; - static Pool P16_ADDU[2]; - static Pool P16_JRC[2]; - static Pool P16_BR1[2]; - static Pool P16_BR[2]; - static Pool P16_SR[2]; - static Pool P16_4X4[4]; - static Pool P16_LB[4]; - static Pool P16_LH[4]; - static Pool P16[32]; - static Pool MAJOR[2]; - -}; - -#endif diff --git a/disas/nios2.c b/disas/nios2.c index c3e82140c7..98ac07d72e 100644 --- a/disas/nios2.c +++ b/disas/nios2.c @@ -3478,54 +3478,37 @@ nios2_disassemble (bfd_vma address, unsigned long opcode, instruction word at the address given, and prints the disassembled instruction on the stream info->stream using info->fprintf_func. */ -static int -print_insn_nios2 (bfd_vma address, disassemble_info *info, - enum bfd_endian endianness) +int print_insn_nios2(bfd_vma address, disassemble_info *info) { - bfd_byte buffer[INSNLEN]; - int status; + bfd_byte buffer[INSNLEN]; + int status; - status = (*info->read_memory_func) (address, buffer, INSNLEN, info); - if (status == 0) - { - unsigned long insn; - if (endianness == BFD_ENDIAN_BIG) - insn = (unsigned long) bfd_getb32 (buffer); - else - insn = (unsigned long) bfd_getl32 (buffer); - return nios2_disassemble (address, insn, info); + status = (*info->read_memory_func)(address, buffer, INSNLEN, info); + if (status == 0) { + unsigned long insn; + if (info->endian == BFD_ENDIAN_BIG) { + insn = (unsigned long) bfd_getb32(buffer); + } else { + insn = (unsigned long) bfd_getl32(buffer); + } + return nios2_disassemble(address, insn, info); } - /* We might have a 16-bit R2 instruction at the end of memory. Try that. */ - if (info->mach == bfd_mach_nios2r2) - { - status = (*info->read_memory_func) (address, buffer, 2, info); - if (status == 0) - { - unsigned long insn; - if (endianness == BFD_ENDIAN_BIG) - insn = (unsigned long) bfd_getb16 (buffer); - else - insn = (unsigned long) bfd_getl16 (buffer); - return nios2_disassemble (address, insn, info); - } + /* We might have a 16-bit R2 instruction at the end of memory. Try that. */ + if (info->mach == bfd_mach_nios2r2) { + status = (*info->read_memory_func)(address, buffer, 2, info); + if (status == 0) { + unsigned long insn; + if (info->endian == BFD_ENDIAN_BIG) { + insn = (unsigned long) bfd_getb16(buffer); + } else { + insn = (unsigned long) bfd_getl16(buffer); + } + return nios2_disassemble(address, insn, info); + } } - /* If we got here, we couldn't read anything. */ - (*info->memory_error_func) (status, address, info); - return -1; -} - -/* These two functions are the main entry points, accessed from - disassemble.c. */ -int -print_insn_big_nios2 (bfd_vma address, disassemble_info *info) -{ - return print_insn_nios2 (address, info, BFD_ENDIAN_BIG); -} - -int -print_insn_little_nios2 (bfd_vma address, disassemble_info *info) -{ - return print_insn_nios2 (address, info, BFD_ENDIAN_LITTLE); + /* If we got here, we couldn't read anything. */ + (*info->memory_error_func)(status, address, info); + return -1; } diff --git a/disas/ppc.c b/disas/ppc.c deleted file mode 100644 index 02be878198..0000000000 --- a/disas/ppc.c +++ /dev/null @@ -1,5435 +0,0 @@ -/* ppc-dis.c -- Disassemble PowerPC instructions - Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 - Free Software Foundation, Inc. - Written by Ian Lance Taylor, Cygnus Support - -This file is part of GDB, GAS, and the GNU binutils. - -GDB, GAS, and the GNU binutils are free software; you can redistribute -them and/or modify them under the terms of the GNU General Public -License as published by the Free Software Foundation; either version -2, or (at your option) any later version. - -GDB, GAS, and the GNU binutils are distributed in the hope that they -will be useful, but WITHOUT ANY WARRANTY; without even the implied -warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this file; see the file COPYING. If not, -see . */ -#include "qemu/osdep.h" -#include "disas/dis-asm.h" -#define BFD_DEFAULT_TARGET_SIZE 64 - -/* ppc.h -- Header file for PowerPC opcode table - Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, - 2007 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Cygnus Support - -This file is part of GDB, GAS, and the GNU binutils. - -GDB, GAS, and the GNU binutils are free software; you can redistribute -them and/or modify them under the terms of the GNU General Public -License as published by the Free Software Foundation; either version -1, or (at your option) any later version. - -GDB, GAS, and the GNU binutils are distributed in the hope that they -will be useful, but WITHOUT ANY WARRANTY; without even the implied -warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this file; see the file COPYING. If not, -see . */ - -/* The opcode table is an array of struct powerpc_opcode. */ - -struct powerpc_opcode -{ - /* The opcode name. */ - const char *name; - - /* The opcode itself. Those bits which will be filled in with - operands are zeroes. */ - unsigned long opcode; - - /* The opcode mask. This is used by the disassembler. This is a - mask containing ones indicating those bits which must match the - opcode field, and zeroes indicating those bits which need not - match (and are presumably filled in by operands). */ - unsigned long mask; - - /* One bit flags for the opcode. These are used to indicate which - specific processors support the instructions. The defined values - are listed below. */ - unsigned long flags; - - /* An array of operand codes. Each code is an index into the - operand table. They appear in the order which the operands must - appear in assembly code, and are terminated by a zero. */ - unsigned char operands[8]; -}; - -/* The table itself is sorted by major opcode number, and is otherwise - in the order in which the disassembler should consider - instructions. */ -extern const struct powerpc_opcode powerpc_opcodes[]; -extern const int powerpc_num_opcodes; - -/* Values defined for the flags field of a struct powerpc_opcode. */ - -/* Opcode is defined for the PowerPC architecture. */ -#define PPC_OPCODE_PPC 1 - -/* Opcode is defined for the POWER (RS/6000) architecture. */ -#define PPC_OPCODE_POWER 2 - -/* Opcode is defined for the POWER2 (Rios 2) architecture. */ -#define PPC_OPCODE_POWER2 4 - -/* Opcode is only defined on 32 bit architectures. */ -#define PPC_OPCODE_32 8 - -/* Opcode is only defined on 64 bit architectures. */ -#define PPC_OPCODE_64 0x10 - -/* Opcode is supported by the Motorola PowerPC 601 processor. The 601 - is assumed to support all PowerPC (PPC_OPCODE_PPC) instructions, - but it also supports many additional POWER instructions. */ -#define PPC_OPCODE_601 0x20 - -/* Opcode is supported in both the Power and PowerPC architectures - (ie, compiler's -mcpu=common or assembler's -mcom). */ -#define PPC_OPCODE_COMMON 0x40 - -/* Opcode is supported for any Power or PowerPC platform (this is - for the assembler's -many option, and it eliminates duplicates). */ -#define PPC_OPCODE_ANY 0x80 - -/* Opcode is supported as part of the 64-bit bridge. */ -#define PPC_OPCODE_64_BRIDGE 0x100 - -/* Opcode is supported by Altivec Vector Unit */ -#define PPC_OPCODE_ALTIVEC 0x200 - -/* Opcode is supported by PowerPC 403 processor. */ -#define PPC_OPCODE_403 0x400 - -/* Opcode is supported by PowerPC BookE processor. */ -#define PPC_OPCODE_BOOKE 0x800 - -/* Opcode is only supported by 64-bit PowerPC BookE processor. */ -#define PPC_OPCODE_BOOKE64 0x1000 - -/* Opcode is supported by PowerPC 440 processor. */ -#define PPC_OPCODE_440 0x2000 - -/* Opcode is only supported by Power4 architecture. */ -#define PPC_OPCODE_POWER4 0x4000 - -/* Opcode isn't supported by Power4 architecture. */ -#define PPC_OPCODE_NOPOWER4 0x8000 - -/* Opcode is only supported by POWERPC Classic architecture. */ -#define PPC_OPCODE_CLASSIC 0x10000 - -/* Opcode is only supported by e500x2 Core. */ -#define PPC_OPCODE_SPE 0x20000 - -/* Opcode is supported by e500x2 Integer select APU. */ -#define PPC_OPCODE_ISEL 0x40000 - -/* Opcode is an e500 SPE floating point instruction. */ -#define PPC_OPCODE_EFS 0x80000 - -/* Opcode is supported by branch locking APU. */ -#define PPC_OPCODE_BRLOCK 0x100000 - -/* Opcode is supported by performance monitor APU. */ -#define PPC_OPCODE_PMR 0x200000 - -/* Opcode is supported by cache locking APU. */ -#define PPC_OPCODE_CACHELCK 0x400000 - -/* Opcode is supported by machine check APU. */ -#define PPC_OPCODE_RFMCI 0x800000 - -/* Opcode is only supported by Power5 architecture. */ -#define PPC_OPCODE_POWER5 0x1000000 - -/* Opcode is supported by PowerPC e300 family. */ -#define PPC_OPCODE_E300 0x2000000 - -/* Opcode is only supported by Power6 architecture. */ -#define PPC_OPCODE_POWER6 0x4000000 - -/* Opcode is only supported by PowerPC Cell family. */ -#define PPC_OPCODE_CELL 0x8000000 - -/* A macro to extract the major opcode from an instruction. */ -#define PPC_OP(i) (((i) >> 26) & 0x3f) - -/* The operands table is an array of struct powerpc_operand. */ - -struct powerpc_operand -{ - /* A bitmask of bits in the operand. */ - unsigned int bitm; - - /* How far the operand is left shifted in the instruction. - -1 to indicate that BITM and SHIFT cannot be used to determine - where the operand goes in the insn. */ - int shift; - - /* Insertion function. This is used by the assembler. To insert an - operand value into an instruction, check this field. - - If it is NULL, execute - i |= (op & o->bitm) << o->shift; - (i is the instruction which we are filling in, o is a pointer to - this structure, and op is the operand value). - - If this field is not NULL, then simply call it with the - instruction and the operand value. It will return the new value - of the instruction. If the ERRMSG argument is not NULL, then if - the operand value is illegal, *ERRMSG will be set to a warning - string (the operand will be inserted in any case). If the - operand value is legal, *ERRMSG will be unchanged (most operands - can accept any value). */ - unsigned long (*insert) - (unsigned long instruction, long op, int dialect, const char **errmsg); - - /* Extraction function. This is used by the disassembler. To - extract this operand type from an instruction, check this field. - - If it is NULL, compute - op = (i >> o->shift) & o->bitm; - if ((o->flags & PPC_OPERAND_SIGNED) != 0) - sign_extend (op); - (i is the instruction, o is a pointer to this structure, and op - is the result). - - If this field is not NULL, then simply call it with the - instruction value. It will return the value of the operand. If - the INVALID argument is not NULL, *INVALID will be set to - non-zero if this operand type can not actually be extracted from - this operand (i.e., the instruction does not match). If the - operand is valid, *INVALID will not be changed. */ - long (*extract) (unsigned long instruction, int dialect, int *invalid); - - /* One bit syntax flags. */ - unsigned long flags; -}; - -/* Elements in the table are retrieved by indexing with values from - the operands field of the powerpc_opcodes table. */ - -extern const struct powerpc_operand powerpc_operands[]; -extern const unsigned int num_powerpc_operands; - -/* Values defined for the flags field of a struct powerpc_operand. */ - -/* This operand takes signed values. */ -#define PPC_OPERAND_SIGNED (0x1) - -/* This operand takes signed values, but also accepts a full positive - range of values when running in 32 bit mode. That is, if bits is - 16, it takes any value from -0x8000 to 0xffff. In 64 bit mode, - this flag is ignored. */ -#define PPC_OPERAND_SIGNOPT (0x2) - -/* This operand does not actually exist in the assembler input. This - is used to support extended mnemonics such as mr, for which two - operands fields are identical. The assembler should call the - insert function with any op value. The disassembler should call - the extract function, ignore the return value, and check the value - placed in the valid argument. */ -#define PPC_OPERAND_FAKE (0x4) - -/* The next operand should be wrapped in parentheses rather than - separated from this one by a comma. This is used for the load and - store instructions which want their operands to look like - reg,displacement(reg) - */ -#define PPC_OPERAND_PARENS (0x8) - -/* This operand may use the symbolic names for the CR fields, which - are - lt 0 gt 1 eq 2 so 3 un 3 - cr0 0 cr1 1 cr2 2 cr3 3 - cr4 4 cr5 5 cr6 6 cr7 7 - These may be combined arithmetically, as in cr2*4+gt. These are - only supported on the PowerPC, not the POWER. */ -#define PPC_OPERAND_CR (0x10) - -/* This operand names a register. The disassembler uses this to print - register names with a leading 'r'. */ -#define PPC_OPERAND_GPR (0x20) - -/* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0. */ -#define PPC_OPERAND_GPR_0 (0x40) - -/* This operand names a floating point register. The disassembler - prints these with a leading 'f'. */ -#define PPC_OPERAND_FPR (0x80) - -/* This operand is a relative branch displacement. The disassembler - prints these symbolically if possible. */ -#define PPC_OPERAND_RELATIVE (0x100) - -/* This operand is an absolute branch address. The disassembler - prints these symbolically if possible. */ -#define PPC_OPERAND_ABSOLUTE (0x200) - -/* This operand is optional, and is zero if omitted. This is used for - example, in the optional BF field in the comparison instructions. The - assembler must count the number of operands remaining on the line, - and the number of operands remaining for the opcode, and decide - whether this operand is present or not. The disassembler should - print this operand out only if it is not zero. */ -#define PPC_OPERAND_OPTIONAL (0x400) - -/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand - is omitted, then for the next operand use this operand value plus - 1, ignoring the next operand field for the opcode. This wretched - hack is needed because the Power rotate instructions can take - either 4 or 5 operands. The disassembler should print this operand - out regardless of the PPC_OPERAND_OPTIONAL field. */ -#define PPC_OPERAND_NEXT (0x800) - -/* This operand should be regarded as a negative number for the - purposes of overflow checking (i.e., the normal most negative - number is disallowed and one more than the normal most positive - number is allowed). This flag will only be set for a signed - operand. */ -#define PPC_OPERAND_NEGATIVE (0x1000) - -/* This operand names a vector unit register. The disassembler - prints these with a leading 'v'. */ -#define PPC_OPERAND_VR (0x2000) - -/* This operand is for the DS field in a DS form instruction. */ -#define PPC_OPERAND_DS (0x4000) - -/* This operand is for the DQ field in a DQ form instruction. */ -#define PPC_OPERAND_DQ (0x8000) - -/* Valid range of operand is 0..n rather than 0..n-1. */ -#define PPC_OPERAND_PLUS1 (0x10000) - -/* The POWER and PowerPC assemblers use a few macros. We keep them - with the operands table for simplicity. The macro table is an - array of struct powerpc_macro. */ - -struct powerpc_macro -{ - /* The macro name. */ - const char *name; - - /* The number of operands the macro takes. */ - unsigned int operands; - - /* One bit flags for the opcode. These are used to indicate which - specific processors support the instructions. The values are the - same as those for the struct powerpc_opcode flags field. */ - unsigned long flags; - - /* A format string to turn the macro into a normal instruction. - Each %N in the string is replaced with operand number N (zero - based). */ - const char *format; -}; - -extern const struct powerpc_macro powerpc_macros[]; -extern const int powerpc_num_macros; - -/* ppc-opc.c -- PowerPC opcode list - Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004, - 2005, 2006, 2007 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Cygnus Support - - This file is part of GDB, GAS, and the GNU binutils. - - GDB, GAS, and the GNU binutils are free software; you can redistribute - them and/or modify them under the terms of the GNU General Public - License as published by the Free Software Foundation; either version - 2, or (at your option) any later version. - - GDB, GAS, and the GNU binutils are distributed in the hope that they - will be useful, but WITHOUT ANY WARRANTY; without even the implied - warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this file; see the file COPYING. - If not, see . */ - -/* This file holds the PowerPC opcode table. The opcode table - includes almost all of the extended instruction mnemonics. This - permits the disassembler to use them, and simplifies the assembler - logic, at the cost of increasing the table size. The table is - strictly constant data, so the compiler should be able to put it in - the .text section. - - This file also holds the operand table. All knowledge about - inserting operands into instructions and vice-versa is kept in this - file. */ - -/* Local insertion and extraction functions. */ - -static unsigned long insert_bat (unsigned long, long, int, const char **); -static long extract_bat (unsigned long, int, int *); -static unsigned long insert_bba (unsigned long, long, int, const char **); -static long extract_bba (unsigned long, int, int *); -static unsigned long insert_bdm (unsigned long, long, int, const char **); -static long extract_bdm (unsigned long, int, int *); -static unsigned long insert_bdp (unsigned long, long, int, const char **); -static long extract_bdp (unsigned long, int, int *); -static unsigned long insert_bo (unsigned long, long, int, const char **); -static long extract_bo (unsigned long, int, int *); -static unsigned long insert_boe (unsigned long, long, int, const char **); -static long extract_boe (unsigned long, int, int *); -static unsigned long insert_fxm (unsigned long, long, int, const char **); -static long extract_fxm (unsigned long, int, int *); -static unsigned long insert_mbe (unsigned long, long, int, const char **); -static long extract_mbe (unsigned long, int, int *); -static unsigned long insert_mb6 (unsigned long, long, int, const char **); -static long extract_mb6 (unsigned long, int, int *); -static long extract_nb (unsigned long, int, int *); -static unsigned long insert_nsi (unsigned long, long, int, const char **); -static long extract_nsi (unsigned long, int, int *); -static unsigned long insert_ral (unsigned long, long, int, const char **); -static unsigned long insert_ram (unsigned long, long, int, const char **); -static unsigned long insert_raq (unsigned long, long, int, const char **); -static unsigned long insert_ras (unsigned long, long, int, const char **); -static unsigned long insert_rbs (unsigned long, long, int, const char **); -static long extract_rbs (unsigned long, int, int *); -static unsigned long insert_sh6 (unsigned long, long, int, const char **); -static long extract_sh6 (unsigned long, int, int *); -static unsigned long insert_spr (unsigned long, long, int, const char **); -static long extract_spr (unsigned long, int, int *); -static unsigned long insert_sprg (unsigned long, long, int, const char **); -static long extract_sprg (unsigned long, int, int *); -static unsigned long insert_tbr (unsigned long, long, int, const char **); -static long extract_tbr (unsigned long, int, int *); - -/* The operands table. - - The fields are bitm, shift, insert, extract, flags. - - We used to put parens around the various additions, like the one - for BA just below. However, that caused trouble with feeble - compilers with a limit on depth of a parenthesized expression, like - (reportedly) the compiler in Microsoft Developer Studio 5. So we - omit the parens, since the macros are never used in a context where - the addition will be ambiguous. */ - -const struct powerpc_operand powerpc_operands[] = -{ - /* The zero index is used to indicate the end of the list of - operands. */ -#define UNUSED 0 - { 0, 0, NULL, NULL, 0 }, - - /* The BA field in an XL form instruction. */ -#define BA UNUSED + 1 - /* The BI field in a B form or XL form instruction. */ -#define BI BA -#define BI_MASK (0x1f << 16) - { 0x1f, 16, NULL, NULL, PPC_OPERAND_CR }, - - /* The BA field in an XL form instruction when it must be the same - as the BT field in the same instruction. */ -#define BAT BA + 1 - { 0x1f, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE }, - - /* The BB field in an XL form instruction. */ -#define BB BAT + 1 -#define BB_MASK (0x1f << 11) - { 0x1f, 11, NULL, NULL, PPC_OPERAND_CR }, - - /* The BB field in an XL form instruction when it must be the same - as the BA field in the same instruction. */ -#define BBA BB + 1 - { 0x1f, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE }, - - /* The BD field in a B form instruction. The lower two bits are - forced to zero. */ -#define BD BBA + 1 - { 0xfffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, - - /* The BD field in a B form instruction when absolute addressing is - used. */ -#define BDA BD + 1 - { 0xfffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, - - /* The BD field in a B form instruction when the - modifier is used. - This sets the y bit of the BO field appropriately. */ -#define BDM BDA + 1 - { 0xfffc, 0, insert_bdm, extract_bdm, - PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, - - /* The BD field in a B form instruction when the - modifier is used - and absolute address is used. */ -#define BDMA BDM + 1 - { 0xfffc, 0, insert_bdm, extract_bdm, - PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, - - /* The BD field in a B form instruction when the + modifier is used. - This sets the y bit of the BO field appropriately. */ -#define BDP BDMA + 1 - { 0xfffc, 0, insert_bdp, extract_bdp, - PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, - - /* The BD field in a B form instruction when the + modifier is used - and absolute addressing is used. */ -#define BDPA BDP + 1 - { 0xfffc, 0, insert_bdp, extract_bdp, - PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, - - /* The BF field in an X or XL form instruction. */ -#define BF BDPA + 1 - /* The CRFD field in an X form instruction. */ -#define CRFD BF - { 0x7, 23, NULL, NULL, PPC_OPERAND_CR }, - - /* The BF field in an X or XL form instruction. */ -#define BFF BF + 1 - { 0x7, 23, NULL, NULL, 0 }, - - /* An optional BF field. This is used for comparison instructions, - in which an omitted BF field is taken as zero. */ -#define OBF BFF + 1 - { 0x7, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, - - /* The BFA field in an X or XL form instruction. */ -#define BFA OBF + 1 - { 0x7, 18, NULL, NULL, PPC_OPERAND_CR }, - - /* The BO field in a B form instruction. Certain values are - illegal. */ -#define BO BFA + 1 -#define BO_MASK (0x1f << 21) - { 0x1f, 21, insert_bo, extract_bo, 0 }, - - /* The BO field in a B form instruction when the + or - modifier is - used. This is like the BO field, but it must be even. */ -#define BOE BO + 1 - { 0x1e, 21, insert_boe, extract_boe, 0 }, - -#define BH BOE + 1 - { 0x3, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The BT field in an X or XL form instruction. */ -#define BT BH + 1 - { 0x1f, 21, NULL, NULL, PPC_OPERAND_CR }, - - /* The condition register number portion of the BI field in a B form - or XL form instruction. This is used for the extended - conditional branch mnemonics, which set the lower two bits of the - BI field. This field is optional. */ -#define CR BT + 1 - { 0x7, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, - - /* The CRB field in an X form instruction. */ -#define CRB CR + 1 - /* The MB field in an M form instruction. */ -#define MB CRB -#define MB_MASK (0x1f << 6) - { 0x1f, 6, NULL, NULL, 0 }, - - /* The CRFS field in an X form instruction. */ -#define CRFS CRB + 1 - { 0x7, 0, NULL, NULL, PPC_OPERAND_CR }, - - /* The CT field in an X form instruction. */ -#define CT CRFS + 1 - /* The MO field in an mbar instruction. */ -#define MO CT - { 0x1f, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The D field in a D form instruction. This is a displacement off - a register, and implies that the next operand is a register in - parentheses. */ -#define D CT + 1 - { 0xffff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, - - /* The DE field in a DE form instruction. This is like D, but is 12 - bits only. */ -#define DE D + 1 - { 0xfff, 4, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, - - /* The DES field in a DES form instruction. This is like DS, but is 14 - bits only (12 stored.) */ -#define DES DE + 1 - { 0x3ffc, 2, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, - - /* The DQ field in a DQ form instruction. This is like D, but the - lower four bits are forced to zero. */ -#define DQ DES + 1 - { 0xfff0, 0, NULL, NULL, - PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ }, - - /* The DS field in a DS form instruction. This is like D, but the - lower two bits are forced to zero. */ -#undef DS -#define DS DQ + 1 - { 0xfffc, 0, NULL, NULL, - PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS }, - - /* The E field in a wrteei instruction. */ -#define E DS + 1 - { 0x1, 15, NULL, NULL, 0 }, - - /* The FL1 field in a POWER SC form instruction. */ -#define FL1 E + 1 - /* The U field in an X form instruction. */ -#define U FL1 - { 0xf, 12, NULL, NULL, 0 }, - - /* The FL2 field in a POWER SC form instruction. */ -#define FL2 FL1 + 1 - { 0x7, 2, NULL, NULL, 0 }, - - /* The FLM field in an XFL form instruction. */ -#define FLM FL2 + 1 - { 0xff, 17, NULL, NULL, 0 }, - - /* The FRA field in an X or A form instruction. */ -#define FRA FLM + 1 -#define FRA_MASK (0x1f << 16) - { 0x1f, 16, NULL, NULL, PPC_OPERAND_FPR }, - - /* The FRB field in an X or A form instruction. */ -#define FRB FRA + 1 -#define FRB_MASK (0x1f << 11) - { 0x1f, 11, NULL, NULL, PPC_OPERAND_FPR }, - - /* The FRC field in an A form instruction. */ -#define FRC FRB + 1 -#define FRC_MASK (0x1f << 6) - { 0x1f, 6, NULL, NULL, PPC_OPERAND_FPR }, - - /* The FRS field in an X form instruction or the FRT field in a D, X - or A form instruction. */ -#define FRS FRC + 1 -#define FRT FRS - { 0x1f, 21, NULL, NULL, PPC_OPERAND_FPR }, - - /* The FXM field in an XFX instruction. */ -#define FXM FRS + 1 - { 0xff, 12, insert_fxm, extract_fxm, 0 }, - - /* Power4 version for mfcr. */ -#define FXM4 FXM + 1 - { 0xff, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL }, - - /* The L field in a D or X form instruction. */ -#define L FXM4 + 1 - { 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The LEV field in a POWER SVC form instruction. */ -#define SVC_LEV L + 1 - { 0x7f, 5, NULL, NULL, 0 }, - - /* The LEV field in an SC form instruction. */ -#define LEV SVC_LEV + 1 - { 0x7f, 5, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The LI field in an I form instruction. The lower two bits are - forced to zero. */ -#define LI LEV + 1 - { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, - - /* The LI field in an I form instruction when used as an absolute - address. */ -#define LIA LI + 1 - { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, - - /* The LS field in an X (sync) form instruction. */ -#define LS LIA + 1 - { 0x3, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The ME field in an M form instruction. */ -#define ME LS + 1 -#define ME_MASK (0x1f << 1) - { 0x1f, 1, NULL, NULL, 0 }, - - /* The MB and ME fields in an M form instruction expressed a single - operand which is a bitmask indicating which bits to select. This - is a two operand form using PPC_OPERAND_NEXT. See the - description in opcode/ppc.h for what this means. */ -#define MBE ME + 1 - { 0x1f, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, - { -1, 0, insert_mbe, extract_mbe, 0 }, - - /* The MB or ME field in an MD or MDS form instruction. The high - bit is wrapped to the low end. */ -#define MB6 MBE + 2 -#define ME6 MB6 -#define MB6_MASK (0x3f << 5) - { 0x3f, 5, insert_mb6, extract_mb6, 0 }, - - /* The NB field in an X form instruction. The value 32 is stored as - 0. */ -#define NB MB6 + 1 - { 0x1f, 11, NULL, extract_nb, PPC_OPERAND_PLUS1 }, - - /* The NSI field in a D form instruction. This is the same as the - SI field, only negated. */ -#define NSI NB + 1 - { 0xffff, 0, insert_nsi, extract_nsi, - PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, - - /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */ -#define RA NSI + 1 -#define RA_MASK (0x1f << 16) - { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR }, - - /* As above, but 0 in the RA field means zero, not r0. */ -#define RA0 RA + 1 - { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR_0 }, - - /* The RA field in the DQ form lq instruction, which has special - value restrictions. */ -#define RAQ RA0 + 1 - { 0x1f, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 }, - - /* The RA field in a D or X form instruction which is an updating - load, which means that the RA field may not be zero and may not - equal the RT field. */ -#define RAL RAQ + 1 - { 0x1f, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 }, - - /* The RA field in an lmw instruction, which has special value - restrictions. */ -#define RAM RAL + 1 - { 0x1f, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 }, - - /* The RA field in a D or X form instruction which is an updating - store or an updating floating point load, which means that the RA - field may not be zero. */ -#define RAS RAM + 1 - { 0x1f, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 }, - - /* The RA field of the tlbwe instruction, which is optional. */ -#define RAOPT RAS + 1 - { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, - - /* The RB field in an X, XO, M, or MDS form instruction. */ -#define RB RAOPT + 1 -#define RB_MASK (0x1f << 11) - { 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR }, - - /* The RB field in an X form instruction when it must be the same as - the RS field in the instruction. This is used for extended - mnemonics like mr. */ -#define RBS RB + 1 - { 0x1f, 11, insert_rbs, extract_rbs, PPC_OPERAND_FAKE }, - - /* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form - instruction or the RT field in a D, DS, X, XFX or XO form - instruction. */ -#define RS RBS + 1 -#define RT RS -#define RT_MASK (0x1f << 21) - { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR }, - - /* The RS and RT fields of the DS form stq instruction, which have - special value restrictions. */ -#define RSQ RS + 1 -#define RTQ RSQ - { 0x1e, 21, NULL, NULL, PPC_OPERAND_GPR_0 }, - - /* The RS field of the tlbwe instruction, which is optional. */ -#define RSO RSQ + 1 -#define RTO RSO - { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, - - /* The SH field in an X or M form instruction. */ -#define SH RSO + 1 -#define SH_MASK (0x1f << 11) - /* The other UIMM field in a EVX form instruction. */ -#define EVUIMM SH - { 0x1f, 11, NULL, NULL, 0 }, - - /* The SH field in an MD form instruction. This is split. */ -#define SH6 SH + 1 -#define SH6_MASK ((0x1f << 11) | (1 << 1)) - { 0x3f, -1, insert_sh6, extract_sh6, 0 }, - - /* The SH field of the tlbwe instruction, which is optional. */ -#define SHO SH6 + 1 - { 0x1f, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The SI field in a D form instruction. */ -#define SI SHO + 1 - { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED }, - - /* The SI field in a D form instruction when we accept a wide range - of positive values. */ -#define SISIGNOPT SI + 1 - { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, - - /* The SPR field in an XFX form instruction. This is flipped--the - lower 5 bits are stored in the upper 5 and vice- versa. */ -#define SPR SISIGNOPT + 1 -#define PMR SPR -#define SPR_MASK (0x3ff << 11) - { 0x3ff, 11, insert_spr, extract_spr, 0 }, - - /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */ -#define SPRBAT SPR + 1 -#define SPRBAT_MASK (0x3 << 17) - { 0x3, 17, NULL, NULL, 0 }, - - /* The SPRG register number in an XFX form m[ft]sprg instruction. */ -#define SPRG SPRBAT + 1 - { 0x1f, 16, insert_sprg, extract_sprg, 0 }, - - /* The SR field in an X form instruction. */ -#define SR SPRG + 1 - { 0xf, 16, NULL, NULL, 0 }, - - /* The STRM field in an X AltiVec form instruction. */ -#define STRM SR + 1 - { 0x3, 21, NULL, NULL, 0 }, - - /* The SV field in a POWER SC form instruction. */ -#define SV STRM + 1 - { 0x3fff, 2, NULL, NULL, 0 }, - - /* The TBR field in an XFX form instruction. This is like the SPR - field, but it is optional. */ -#define TBR SV + 1 - { 0x3ff, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL }, - - /* The TO field in a D or X form instruction. */ -#define TO TBR + 1 -#define TO_MASK (0x1f << 21) - { 0x1f, 21, NULL, NULL, 0 }, - - /* The UI field in a D form instruction. */ -#define UI TO + 1 - { 0xffff, 0, NULL, NULL, 0 }, - - /* The VA field in a VA, VX or VXR form instruction. */ -#define VA UI + 1 - { 0x1f, 16, NULL, NULL, PPC_OPERAND_VR }, - - /* The VB field in a VA, VX or VXR form instruction. */ -#define VB VA + 1 - { 0x1f, 11, NULL, NULL, PPC_OPERAND_VR }, - - /* The VC field in a VA form instruction. */ -#define VC VB + 1 - { 0x1f, 6, NULL, NULL, PPC_OPERAND_VR }, - - /* The VD or VS field in a VA, VX, VXR or X form instruction. */ -#define VD VC + 1 -#define VS VD - { 0x1f, 21, NULL, NULL, PPC_OPERAND_VR }, - - /* The SIMM field in a VX form instruction. */ -#define SIMM VD + 1 - { 0x1f, 16, NULL, NULL, PPC_OPERAND_SIGNED}, - - /* The UIMM field in a VX form instruction, and TE in Z form. */ -#define UIMM SIMM + 1 -#define TE UIMM - { 0x1f, 16, NULL, NULL, 0 }, - - /* The SHB field in a VA form instruction. */ -#define SHB UIMM + 1 - { 0xf, 6, NULL, NULL, 0 }, - - /* The other UIMM field in a half word EVX form instruction. */ -#define EVUIMM_2 SHB + 1 - { 0x3e, 10, NULL, NULL, PPC_OPERAND_PARENS }, - - /* The other UIMM field in a word EVX form instruction. */ -#define EVUIMM_4 EVUIMM_2 + 1 - { 0x7c, 9, NULL, NULL, PPC_OPERAND_PARENS }, - - /* The other UIMM field in a double EVX form instruction. */ -#define EVUIMM_8 EVUIMM_4 + 1 - { 0xf8, 8, NULL, NULL, PPC_OPERAND_PARENS }, - - /* The WS field. */ -#define WS EVUIMM_8 + 1 - { 0x7, 11, NULL, NULL, 0 }, - - /* The L field in an mtmsrd or A form instruction or W in an X form. */ -#define A_L WS + 1 -#define W A_L - { 0x1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL }, - -#define RMC A_L + 1 - { 0x3, 9, NULL, NULL, 0 }, - -#define R RMC + 1 - { 0x1, 16, NULL, NULL, 0 }, - -#define SP R + 1 - { 0x3, 19, NULL, NULL, 0 }, - -#define S SP + 1 - { 0x1, 20, NULL, NULL, 0 }, - - /* SH field starting at bit position 16. */ -#define SH16 S + 1 - /* The DCM and DGM fields in a Z form instruction. */ -#define DCM SH16 -#define DGM DCM - { 0x3f, 10, NULL, NULL, 0 }, - - /* The EH field in larx instruction. */ -#define EH SH16 + 1 - { 0x1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The L field in an mtfsf or XFL form instruction. */ -#define XFL_L EH + 1 - { 0x1, 25, NULL, NULL, PPC_OPERAND_OPTIONAL}, -}; - -const unsigned int num_powerpc_operands = (sizeof (powerpc_operands) - / sizeof (powerpc_operands[0])); - -/* The functions used to insert and extract complicated operands. */ - -/* The BA field in an XL form instruction when it must be the same as - the BT field in the same instruction. This operand is marked FAKE. - The insertion function just copies the BT field into the BA field, - and the extraction function just checks that the fields are the - same. */ - -static unsigned long -insert_bat (unsigned long insn, - long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | (((insn >> 21) & 0x1f) << 16); -} - -static long -extract_bat (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - if (((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f)) - *invalid = 1; - return 0; -} - -/* The BB field in an XL form instruction when it must be the same as - the BA field in the same instruction. This operand is marked FAKE. - The insertion function just copies the BA field into the BB field, - and the extraction function just checks that the fields are the - same. */ - -static unsigned long -insert_bba (unsigned long insn, - long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | (((insn >> 16) & 0x1f) << 11); -} - -static long -extract_bba (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - if (((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f)) - *invalid = 1; - return 0; -} - -/* The BD field in a B form instruction when the - modifier is used. - This modifier means that the branch is not expected to be taken. - For chips built to versions of the architecture prior to version 2 - (ie. not Power4 compatible), we set the y bit of the BO field to 1 - if the offset is negative. When extracting, we require that the y - bit be 1 and that the offset be positive, since if the y bit is 0 - we just want to print the normal form of the instruction. - Power4 compatible targets use two bits, "a", and "t", instead of - the "y" bit. "at" == 00 => no hint, "at" == 01 => unpredictable, - "at" == 10 => not taken, "at" == 11 => taken. The "t" bit is 00001 - in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000 - for branch on CTR. We only handle the taken/not-taken hint here. - Note that we don't relax the conditions tested here when - disassembling with -Many because insns using extract_bdm and - extract_bdp always occur in pairs. One or the other will always - be valid. */ - -static unsigned long -insert_bdm (unsigned long insn, - long value, - int dialect, - const char **errmsg ATTRIBUTE_UNUSED) -{ - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - if ((value & 0x8000) != 0) - insn |= 1 << 21; - } - else - { - if ((insn & (0x14 << 21)) == (0x04 << 21)) - insn |= 0x02 << 21; - else if ((insn & (0x14 << 21)) == (0x10 << 21)) - insn |= 0x08 << 21; - } - return insn | (value & 0xfffc); -} - -static long -extract_bdm (unsigned long insn, - int dialect, - int *invalid) -{ - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - if (((insn & (1 << 21)) == 0) != ((insn & (1 << 15)) == 0)) - *invalid = 1; - } - else - { - if ((insn & (0x17 << 21)) != (0x06 << 21) - && (insn & (0x1d << 21)) != (0x18 << 21)) - *invalid = 1; - } - - return ((insn & 0xfffc) ^ 0x8000) - 0x8000; -} - -/* The BD field in a B form instruction when the + modifier is used. - This is like BDM, above, except that the branch is expected to be - taken. */ - -static unsigned long -insert_bdp (unsigned long insn, - long value, - int dialect, - const char **errmsg ATTRIBUTE_UNUSED) -{ - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - if ((value & 0x8000) == 0) - insn |= 1 << 21; - } - else - { - if ((insn & (0x14 << 21)) == (0x04 << 21)) - insn |= 0x03 << 21; - else if ((insn & (0x14 << 21)) == (0x10 << 21)) - insn |= 0x09 << 21; - } - return insn | (value & 0xfffc); -} - -static long -extract_bdp (unsigned long insn, - int dialect, - int *invalid) -{ - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - if (((insn & (1 << 21)) == 0) == ((insn & (1 << 15)) == 0)) - *invalid = 1; - } - else - { - if ((insn & (0x17 << 21)) != (0x07 << 21) - && (insn & (0x1d << 21)) != (0x19 << 21)) - *invalid = 1; - } - - return ((insn & 0xfffc) ^ 0x8000) - 0x8000; -} - -/* Check for legal values of a BO field. */ - -static int -valid_bo (long value, int dialect, int extract) -{ - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - int valid; - /* Certain encodings have bits that are required to be zero. - These are (z must be zero, y may be anything): - 001zy - 011zy - 1z00y - 1z01y - 1z1zz - */ - switch (value & 0x14) - { - default: - case 0: - valid = 1; - break; - case 0x4: - valid = (value & 0x2) == 0; - break; - case 0x10: - valid = (value & 0x8) == 0; - break; - case 0x14: - valid = value == 0x14; - break; - } - /* When disassembling with -Many, accept power4 encodings too. */ - if (valid - || (dialect & PPC_OPCODE_ANY) == 0 - || !extract) - return valid; - } - - /* Certain encodings have bits that are required to be zero. - These are (z must be zero, a & t may be anything): - 0000z - 0001z - 0100z - 0101z - 001at - 011at - 1a00t - 1a01t - 1z1zz - */ - if ((value & 0x14) == 0) - return (value & 0x1) == 0; - else if ((value & 0x14) == 0x14) - return value == 0x14; - else - return 1; -} - -/* The BO field in a B form instruction. Warn about attempts to set - the field to an illegal value. */ - -static unsigned long -insert_bo (unsigned long insn, - long value, - int dialect, - const char **errmsg) -{ - if (!valid_bo (value, dialect, 0)) - *errmsg = "invalid conditional option"; - return insn | ((value & 0x1f) << 21); -} - -static long -extract_bo (unsigned long insn, - int dialect, - int *invalid) -{ - long value; - - value = (insn >> 21) & 0x1f; - if (!valid_bo (value, dialect, 1)) - *invalid = 1; - return value; -} - -/* The BO field in a B form instruction when the + or - modifier is - used. This is like the BO field, but it must be even. When - extracting it, we force it to be even. */ - -static unsigned long -insert_boe (unsigned long insn, - long value, - int dialect, - const char **errmsg) -{ - if (!valid_bo (value, dialect, 0)) - *errmsg = "invalid conditional option"; - else if ((value & 1) != 0) - *errmsg = "attempt to set y bit when using + or - modifier"; - - return insn | ((value & 0x1f) << 21); -} - -static long -extract_boe (unsigned long insn, - int dialect, - int *invalid) -{ - long value; - - value = (insn >> 21) & 0x1f; - if (!valid_bo (value, dialect, 1)) - *invalid = 1; - return value & 0x1e; -} - -/* FXM mask in mfcr and mtcrf instructions. */ - -static unsigned long -insert_fxm (unsigned long insn, - long value, - int dialect, - const char **errmsg) -{ - /* If we're handling the mfocrf and mtocrf insns ensure that exactly - one bit of the mask field is set. */ - if ((insn & (1 << 20)) != 0) - { - if (value == 0 || (value & -value) != value) - { - *errmsg = "invalid mask field"; - value = 0; - } - } - - /* If the optional field on mfcr is missing that means we want to use - the old form of the instruction that moves the whole cr. In that - case we'll have VALUE zero. There doesn't seem to be a way to - distinguish this from the case where someone writes mfcr %r3,0. */ - else if (value == 0) - ; - - /* If only one bit of the FXM field is set, we can use the new form - of the instruction, which is faster. Unlike the Power4 branch hint - encoding, this is not backward compatible. Do not generate the - new form unless -mpower4 has been given, or -many and the two - operand form of mfcr was used. */ - else if ((value & -value) == value - && ((dialect & PPC_OPCODE_POWER4) != 0 - || ((dialect & PPC_OPCODE_ANY) != 0 - && (insn & (0x3ff << 1)) == 19 << 1))) - insn |= 1 << 20; - - /* Any other value on mfcr is an error. */ - else if ((insn & (0x3ff << 1)) == 19 << 1) - { - *errmsg = "ignoring invalid mfcr mask"; - value = 0; - } - - return insn | ((value & 0xff) << 12); -} - -static long -extract_fxm (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - long mask = (insn >> 12) & 0xff; - - /* Is this a Power4 insn? */ - if ((insn & (1 << 20)) != 0) - { - /* Exactly one bit of MASK should be set. */ - if (mask == 0 || (mask & -mask) != mask) - *invalid = 1; - } - - /* Check that non-power4 form of mfcr has a zero MASK. */ - else if ((insn & (0x3ff << 1)) == 19 << 1) - { - if (mask != 0) - *invalid = 1; - } - - return mask; -} - -/* The MB and ME fields in an M form instruction expressed as a single - operand which is itself a bitmask. The extraction function always - marks it as invalid, since we never want to recognize an - instruction which uses a field of this type. */ - -static unsigned long -insert_mbe (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - unsigned long uval, mask; - int mb, me, mx, count, last; - - uval = value; - - if (uval == 0) - { - *errmsg = "illegal bitmask"; - return insn; - } - - mb = 0; - me = 32; - if ((uval & 1) != 0) - last = 1; - else - last = 0; - count = 0; - - /* mb: location of last 0->1 transition */ - /* me: location of last 1->0 transition */ - /* count: # transitions */ - - for (mx = 0, mask = 1L << 31; mx < 32; ++mx, mask >>= 1) - { - if ((uval & mask) && !last) - { - ++count; - mb = mx; - last = 1; - } - else if (!(uval & mask) && last) - { - ++count; - me = mx; - last = 0; - } - } - if (me == 0) - me = 32; - - if (count != 2 && (count != 0 || ! last)) - *errmsg = "illegal bitmask"; - - return insn | (mb << 6) | ((me - 1) << 1); -} - -static long -extract_mbe (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - long ret; - int mb, me; - int i; - - *invalid = 1; - - mb = (insn >> 6) & 0x1f; - me = (insn >> 1) & 0x1f; - if (mb < me + 1) - { - ret = 0; - for (i = mb; i <= me; i++) - ret |= 1L << (31 - i); - } - else if (mb == me + 1) - ret = ~0; - else /* (mb > me + 1) */ - { - ret = ~0; - for (i = me + 1; i < mb; i++) - ret &= ~(1L << (31 - i)); - } - return ret; -} - -/* The MB or ME field in an MD or MDS form instruction. The high bit - is wrapped to the low end. */ - -static unsigned long -insert_mb6 (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | ((value & 0x1f) << 6) | (value & 0x20); -} - -static long -extract_mb6 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn >> 6) & 0x1f) | (insn & 0x20); -} - -/* The NB field in an X form instruction. The value 32 is stored as - 0. */ - -static long -extract_nb (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - long ret; - - ret = (insn >> 11) & 0x1f; - if (ret == 0) - ret = 32; - return ret; -} - -/* The NSI field in a D form instruction. This is the same as the SI - field, only negated. The extraction function always marks it as - invalid, since we never want to recognize an instruction which uses - a field of this type. */ - -static unsigned long -insert_nsi (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | (-value & 0xffff); -} - -static long -extract_nsi (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - *invalid = 1; - return -(((insn & 0xffff) ^ 0x8000) - 0x8000); -} - -/* The RA field in a D or X form instruction which is an updating - load, which means that the RA field may not be zero and may not - equal the RT field. */ - -static unsigned long -insert_ral (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if (value == 0 - || (unsigned long) value == ((insn >> 21) & 0x1f)) - *errmsg = "invalid register operand when updating"; - return insn | ((value & 0x1f) << 16); -} - -/* The RA field in an lmw instruction, which has special value - restrictions. */ - -static unsigned long -insert_ram (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if ((unsigned long) value >= ((insn >> 21) & 0x1f)) - *errmsg = "index register in load range"; - return insn | ((value & 0x1f) << 16); -} - -/* The RA field in the DQ form lq instruction, which has special - value restrictions. */ - -static unsigned long -insert_raq (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - long rtvalue = (insn & RT_MASK) >> 21; - - if (value == rtvalue) - *errmsg = "source and target register operands must be different"; - return insn | ((value & 0x1f) << 16); -} - -/* The RA field in a D or X form instruction which is an updating - store or an updating floating point load, which means that the RA - field may not be zero. */ - -static unsigned long -insert_ras (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if (value == 0) - *errmsg = "invalid register operand when updating"; - return insn | ((value & 0x1f) << 16); -} - -/* The RB field in an X form instruction when it must be the same as - the RS field in the instruction. This is used for extended - mnemonics like mr. This operand is marked FAKE. The insertion - function just copies the BT field into the BA field, and the - extraction function just checks that the fields are the same. */ - -static unsigned long -insert_rbs (unsigned long insn, - long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | (((insn >> 21) & 0x1f) << 11); -} - -static long -extract_rbs (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid) -{ - if (((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f)) - *invalid = 1; - return 0; -} - -/* The SH field in an MD form instruction. This is split. */ - -static unsigned long -insert_sh6 (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); -} - -static long -extract_sh6 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20); -} - -/* The SPR field in an XFX form instruction. This is flipped--the - lower 5 bits are stored in the upper 5 and vice- versa. */ - -static unsigned long -insert_spr (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); -} - -static long -extract_spr (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); -} - -/* Some dialects have 8 SPRG registers instead of the standard 4. */ - -static unsigned long -insert_sprg (unsigned long insn, - long value, - int dialect, - const char **errmsg) -{ - /* This check uses PPC_OPCODE_403 because PPC405 is later defined - as a synonym. If ever a 405 specific dialect is added this - check should use that instead. */ - if (value > 7 - || (value > 3 - && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0)) - *errmsg = "invalid sprg number"; - - /* If this is mfsprg4..7 then use spr 260..263 which can be read in - user mode. Anything else must use spr 272..279. */ - if (value <= 3 || (insn & 0x100) != 0) - value |= 0x10; - - return insn | ((value & 0x17) << 16); -} - -static long -extract_sprg (unsigned long insn, - int dialect, - int *invalid) -{ - unsigned long val = (insn >> 16) & 0x1f; - - /* mfsprg can use 260..263 and 272..279. mtsprg only uses spr 272..279 - If not BOOKE or 405, then both use only 272..275. */ - if (val <= 3 - || (val < 0x10 && (insn & 0x100) != 0) - || (val - 0x10 > 3 - && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0)) - *invalid = 1; - return val & 7; -} - -/* The TBR field in an XFX instruction. This is just like SPR, but it - is optional. When TBR is omitted, it must be inserted as 268 (the - magic number of the TB register). These functions treat 0 - (indicating an omitted optional operand) as 268. This means that - ``mftb 4,0'' is not handled correctly. This does not matter very - much, since the architecture manual does not define mftb as - accepting any values other than 268 or 269. */ - -#define TB (268) - -static unsigned long -insert_tbr (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - if (value == 0) - value = TB; - return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); -} - -static long -extract_tbr (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - long ret; - - ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); - if (ret == TB) - ret = 0; - return ret; -} - -/* Macros used to form opcodes. */ - -/* The main opcode. */ -#define OP(x) ((((unsigned long)(x)) & 0x3f) << 26) -#define OP_MASK OP (0x3f) - -/* The main opcode combined with a trap code in the TO field of a D - form instruction. Used for extended mnemonics for the trap - instructions. */ -#define OPTO(x,to) (OP (x) | ((((unsigned long)(to)) & 0x1f) << 21)) -#define OPTO_MASK (OP_MASK | TO_MASK) - -/* The main opcode combined with a comparison size bit in the L field - of a D form or X form instruction. Used for extended mnemonics for - the comparison instructions. */ -#define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21)) -#define OPL_MASK OPL (0x3f,1) - -/* An A form instruction. */ -#define A(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1) | (((unsigned long)(rc)) & 1)) -#define A_MASK A (0x3f, 0x1f, 1) - -/* An A_MASK with the FRB field fixed. */ -#define AFRB_MASK (A_MASK | FRB_MASK) - -/* An A_MASK with the FRC field fixed. */ -#define AFRC_MASK (A_MASK | FRC_MASK) - -/* An A_MASK with the FRA and FRC fields fixed. */ -#define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK) - -/* An AFRAFRC_MASK, but with L bit clear. */ -#define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16)) - -/* A B form instruction. */ -#define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1)) -#define B_MASK B (0x3f, 1, 1) - -/* A B form instruction setting the BO field. */ -#define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) -#define BBO_MASK BBO (0x3f, 0x1f, 1, 1) - -/* A BBO_MASK with the y bit of the BO field removed. This permits - matching a conditional branch regardless of the setting of the y - bit. Similarly for the 'at' bits used for power4 branch hints. */ -#define Y_MASK (((unsigned long) 1) << 21) -#define AT1_MASK (((unsigned long) 3) << 21) -#define AT2_MASK (((unsigned long) 9) << 21) -#define BBOY_MASK (BBO_MASK &~ Y_MASK) -#define BBOAT_MASK (BBO_MASK &~ AT1_MASK) - -/* A B form instruction setting the BO field and the condition bits of - the BI field. */ -#define BBOCB(op, bo, cb, aa, lk) \ - (BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16)) -#define BBOCB_MASK BBOCB (0x3f, 0x1f, 0x3, 1, 1) - -/* A BBOCB_MASK with the y bit of the BO field removed. */ -#define BBOYCB_MASK (BBOCB_MASK &~ Y_MASK) -#define BBOATCB_MASK (BBOCB_MASK &~ AT1_MASK) -#define BBOAT2CB_MASK (BBOCB_MASK &~ AT2_MASK) - -/* A BBOYCB_MASK in which the BI field is fixed. */ -#define BBOYBI_MASK (BBOYCB_MASK | BI_MASK) -#define BBOATBI_MASK (BBOAT2CB_MASK | BI_MASK) - -/* A Context form instruction. */ -#define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7)) -#define CTX_MASK CTX(0x3f, 0x7) - -/* A User Context form instruction. */ -#define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) -#define UCTX_MASK UCTX(0x3f, 0x1f) - -/* The main opcode mask with the RA field clear. */ -#define DRA_MASK (OP_MASK | RA_MASK) - -/* A DS form instruction. */ -#define DSO(op, xop) (OP (op) | ((xop) & 0x3)) -#define DS_MASK DSO (0x3f, 3) - -/* A DE form instruction. */ -#define DEO(op, xop) (OP (op) | ((xop) & 0xf)) -#define DE_MASK DEO (0x3e, 0xf) - -/* An EVSEL form instruction. */ -#define EVSEL(op, xop) (OP (op) | (((unsigned long)(xop)) & 0xff) << 3) -#define EVSEL_MASK EVSEL(0x3f, 0xff) - -/* An M form instruction. */ -#define M(op, rc) (OP (op) | ((rc) & 1)) -#define M_MASK M (0x3f, 1) - -/* An M form instruction with the ME field specified. */ -#define MME(op, me, rc) (M ((op), (rc)) | ((((unsigned long)(me)) & 0x1f) << 1)) - -/* An M_MASK with the MB and ME fields fixed. */ -#define MMBME_MASK (M_MASK | MB_MASK | ME_MASK) - -/* An M_MASK with the SH and ME fields fixed. */ -#define MSHME_MASK (M_MASK | SH_MASK | ME_MASK) - -/* An MD form instruction. */ -#define MD(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x7) << 2) | ((rc) & 1)) -#define MD_MASK MD (0x3f, 0x7, 1) - -/* An MD_MASK with the MB field fixed. */ -#define MDMB_MASK (MD_MASK | MB6_MASK) - -/* An MD_MASK with the SH field fixed. */ -#define MDSH_MASK (MD_MASK | SH6_MASK) - -/* An MDS form instruction. */ -#define MDS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0xf) << 1) | ((rc) & 1)) -#define MDS_MASK MDS (0x3f, 0xf, 1) - -/* An MDS_MASK with the MB field fixed. */ -#define MDSMB_MASK (MDS_MASK | MB6_MASK) - -/* An SC form instruction. */ -#define SC(op, sa, lk) (OP (op) | ((((unsigned long)(sa)) & 1) << 1) | ((lk) & 1)) -#define SC_MASK (OP_MASK | (((unsigned long)0x3ff) << 16) | (((unsigned long)1) << 1) | 1) - -/* A VX form instruction. */ -#define VX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff)) - -/* The mask for an VX form instruction. */ -#define VX_MASK VX(0x3f, 0x7ff) - -/* A VA form instruction. */ -#define VXA(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x03f)) - -/* The mask for a VA form instruction. */ -#define VXA_MASK VXA(0x3f, 0x3f) - -/* A VXR form instruction. */ -#define VXR(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | (((unsigned long)(xop)) & 0x3ff)) - -/* The mask for a VXR form instruction. */ -#define VXR_MASK VXR(0x3f, 0x3ff, 1) - -/* An X form instruction. */ -#define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) - -/* A Z form instruction. */ -#define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1)) - -/* An X form instruction with the RC bit specified. */ -#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1)) - -/* A Z form instruction with the RC bit specified. */ -#define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1)) - -/* The mask for an X form instruction. */ -#define X_MASK XRC (0x3f, 0x3ff, 1) - -/* The mask for a Z form instruction. */ -#define Z_MASK ZRC (0x3f, 0x1ff, 1) -#define Z2_MASK ZRC (0x3f, 0xff, 1) - -/* An X_MASK with the RA field fixed. */ -#define XRA_MASK (X_MASK | RA_MASK) - -/* An XRA_MASK with the W field clear. */ -#define XWRA_MASK (XRA_MASK & ~((unsigned long) 1 << 16)) - -/* An X_MASK with the RB field fixed. */ -#define XRB_MASK (X_MASK | RB_MASK) - -/* An X_MASK with the RT field fixed. */ -#define XRT_MASK (X_MASK | RT_MASK) - -/* An XRT_MASK mask with the L bits clear. */ -#define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21)) - -/* An X_MASK with the RA and RB fields fixed. */ -#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK) - -/* An X form instruction with the RA field fixed. */ -#define XRA(op, xop, ra) (X((op), (xop)) | (((ra) << 16) & XRA_MASK)) - -/* An XRARB_MASK, but with the L bit clear. */ -#define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16)) - -/* An X_MASK with the RT and RA fields fixed. */ -#define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK) - -/* An XRTRA_MASK, but with L bit clear. */ -#define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21)) - -/* An X form instruction with the L bit specified. */ -#define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21)) - -/* The mask for an X form comparison instruction. */ -#define XCMP_MASK (X_MASK | (((unsigned long)1) << 22)) - -/* The mask for an X form comparison instruction with the L field - fixed. */ -#define XCMPL_MASK (XCMP_MASK | (((unsigned long)1) << 21)) - -/* An X form trap instruction with the TO field specified. */ -#define XTO(op, xop, to) (X ((op), (xop)) | ((((unsigned long)(to)) & 0x1f) << 21)) -#define XTO_MASK (X_MASK | TO_MASK) - -/* An X form tlb instruction with the SH field specified. */ -#define XTLB(op, xop, sh) (X ((op), (xop)) | ((((unsigned long)(sh)) & 0x1f) << 11)) -#define XTLB_MASK (X_MASK | SH_MASK) - -/* An X form sync instruction. */ -#define XSYNC(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) - -/* An X form sync instruction with everything filled in except the LS field. */ -#define XSYNC_MASK (0xff9fffff) - -/* An X_MASK, but with the EH bit clear. */ -#define XEH_MASK (X_MASK & ~((unsigned long )1)) - -/* An X form AltiVec dss instruction. */ -#define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25)) -#define XDSS_MASK XDSS(0x3f, 0x3ff, 1) - -/* An XFL form instruction. */ -#define XFL(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1)) -#define XFL_MASK XFL (0x3f, 0x3ff, 1) - -/* An X form isel instruction. */ -#define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) -#define XISEL_MASK XISEL(0x3f, 0x1f) - -/* An XL form instruction with the LK field set to 0. */ -#define XL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) - -/* An XL form instruction which uses the LK field. */ -#define XLLK(op, xop, lk) (XL ((op), (xop)) | ((lk) & 1)) - -/* The mask for an XL form instruction. */ -#define XL_MASK XLLK (0x3f, 0x3ff, 1) - -/* An XL form instruction which explicitly sets the BO field. */ -#define XLO(op, bo, xop, lk) \ - (XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) -#define XLO_MASK (XL_MASK | BO_MASK) - -/* An XL form instruction which explicitly sets the y bit of the BO - field. */ -#define XLYLK(op, xop, y, lk) (XLLK ((op), (xop), (lk)) | ((((unsigned long)(y)) & 1) << 21)) -#define XLYLK_MASK (XL_MASK | Y_MASK) - -/* An XL form instruction which sets the BO field and the condition - bits of the BI field. */ -#define XLOCB(op, bo, cb, xop, lk) \ - (XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16)) -#define XLOCB_MASK XLOCB (0x3f, 0x1f, 0x3, 0x3ff, 1) - -/* An XL_MASK or XLYLK_MASK or XLOCB_MASK with the BB field fixed. */ -#define XLBB_MASK (XL_MASK | BB_MASK) -#define XLYBB_MASK (XLYLK_MASK | BB_MASK) -#define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK) - -/* A mask for branch instructions using the BH field. */ -#define XLBH_MASK (XL_MASK | (0x1c << 11)) - -/* An XL_MASK with the BO and BB fields fixed. */ -#define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK) - -/* An XL_MASK with the BO, BI and BB fields fixed. */ -#define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK) - -/* An XO form instruction. */ -#define XO(op, xop, oe, rc) \ - (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1)) -#define XO_MASK XO (0x3f, 0x1ff, 1, 1) - -/* An XO_MASK with the RB field fixed. */ -#define XORB_MASK (XO_MASK | RB_MASK) - -/* An XS form instruction. */ -#define XS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2) | (((unsigned long)(rc)) & 1)) -#define XS_MASK XS (0x3f, 0x1ff, 1) - -/* A mask for the FXM version of an XFX form instruction. */ -#define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20)) - -/* An XFX form instruction with the FXM field filled in. */ -#define XFXM(op, xop, fxm, p4) \ - (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \ - | ((unsigned long)(p4) << 20)) - -/* An XFX form instruction with the SPR field filled in. */ -#define XSPR(op, xop, spr) \ - (X ((op), (xop)) | ((((unsigned long)(spr)) & 0x1f) << 16) | ((((unsigned long)(spr)) & 0x3e0) << 6)) -#define XSPR_MASK (X_MASK | SPR_MASK) - -/* An XFX form instruction with the SPR field filled in except for the - SPRBAT field. */ -#define XSPRBAT_MASK (XSPR_MASK &~ SPRBAT_MASK) - -/* An XFX form instruction with the SPR field filled in except for the - SPRG field. */ -#define XSPRG_MASK (XSPR_MASK & ~(0x1f << 16)) - -/* An X form instruction with everything filled in except the E field. */ -#define XE_MASK (0xffff7fff) - -/* An X form user context instruction. */ -#define XUC(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) -#define XUC_MASK XUC(0x3f, 0x1f) - -/* The BO encodings used in extended conditional branch mnemonics. */ -#define BODNZF (0x0) -#define BODNZFP (0x1) -#define BODZF (0x2) -#define BODZFP (0x3) -#define BODNZT (0x8) -#define BODNZTP (0x9) -#define BODZT (0xa) -#define BODZTP (0xb) - -#define BOF (0x4) -#define BOFP (0x5) -#define BOFM4 (0x6) -#define BOFP4 (0x7) -#define BOT (0xc) -#define BOTP (0xd) -#define BOTM4 (0xe) -#define BOTP4 (0xf) - -#define BODNZ (0x10) -#define BODNZP (0x11) -#define BODZ (0x12) -#define BODZP (0x13) -#define BODNZM4 (0x18) -#define BODNZP4 (0x19) -#define BODZM4 (0x1a) -#define BODZP4 (0x1b) - -#define BOU (0x14) - -/* The BI condition bit encodings used in extended conditional branch - mnemonics. */ -#define CBLT (0) -#define CBGT (1) -#define CBEQ (2) -#define CBSO (3) - -/* The TO encodings used in extended trap mnemonics. */ -#define TOLGT (0x1) -#define TOLLT (0x2) -#define TOEQ (0x4) -#define TOLGE (0x5) -#define TOLNL (0x5) -#define TOLLE (0x6) -#define TOLNG (0x6) -#define TOGT (0x8) -#define TOGE (0xc) -#define TONL (0xc) -#define TOLT (0x10) -#define TOLE (0x14) -#define TONG (0x14) -#define TONE (0x18) -#define TOU (0x1f) - -/* Smaller names for the flags so each entry in the opcodes table will - fit on a single line. */ -#undef PPC -#define PPC PPC_OPCODE_PPC -#define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON -#define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM -#define POWER4 PPC_OPCODE_POWER4 -#define POWER5 PPC_OPCODE_POWER5 -#define POWER6 PPC_OPCODE_POWER6 -/* Documentation purposes only; we don't actually check the isa for disas. */ -#define POWER7 PPC_OPCODE_POWER6 -#define POWER9 PPC_OPCODE_POWER6 -#define CELL PPC_OPCODE_CELL -#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC -#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC -#define PPC403 PPC_OPCODE_403 -#define PPC405 PPC403 -#define PPC440 PPC_OPCODE_440 -#define PPC750 PPC -#define PPC860 PPC -#define PPCVEC PPC_OPCODE_ALTIVEC -#define POWER PPC_OPCODE_POWER -#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2 -#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2 -#define POWER32 PPC_OPCODE_POWER | PPC_OPCODE_32 -#define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON -#define COM32 PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON | PPC_OPCODE_32 -#define M601 PPC_OPCODE_POWER | PPC_OPCODE_601 -#define PWRCOM PPC_OPCODE_POWER | PPC_OPCODE_601 | PPC_OPCODE_COMMON -#define MFDEC1 PPC_OPCODE_POWER -#define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE -#define BOOKE PPC_OPCODE_BOOKE -#define BOOKE64 PPC_OPCODE_BOOKE64 -#define CLASSIC PPC_OPCODE_CLASSIC -#define PPCE300 PPC_OPCODE_E300 -#define PPCSPE PPC_OPCODE_SPE -#define PPCISEL PPC_OPCODE_ISEL -#define PPCEFS PPC_OPCODE_EFS -#define PPCBRLK PPC_OPCODE_BRLOCK -#define PPCPMR PPC_OPCODE_PMR -#define PPCCHLK PPC_OPCODE_CACHELCK -#define PPCCHLK64 PPC_OPCODE_CACHELCK | PPC_OPCODE_BOOKE64 -#define PPCRFMCI PPC_OPCODE_RFMCI - -/* The opcode table. - - The format of the opcode table is: - - NAME OPCODE MASK FLAGS { OPERANDS } - - NAME is the name of the instruction. - OPCODE is the instruction opcode. - MASK is the opcode mask; this is used to tell the disassembler - which bits in the actual opcode must match OPCODE. - FLAGS are flags indicated what processors support the instruction. - OPERANDS is the list of operands. - - The disassembler reads the table in order and prints the first - instruction which matches, so this table is sorted to put more - specific instructions before more general instructions. It is also - sorted by major opcode. */ - -const struct powerpc_opcode powerpc_opcodes[] = { -{ "attn", X(0,256), X_MASK, POWER4, { 0 } }, -{ "tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdi", OP(2), OP_MASK, PPC64, { TO, RA, SI } }, - -{ "twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twi", OP(3), OP_MASK, PPCCOM, { TO, RA, SI } }, -{ "ti", OP(3), OP_MASK, PWRCOM, { TO, RA, SI } }, - -{ "macchw", XO(4,172,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchw.", XO(4,172,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwo", XO(4,172,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwo.", XO(4,172,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchws", XO(4,236,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchws.", XO(4,236,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwso", XO(4,236,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwso.", XO(4,236,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsu", XO(4,204,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsu.", XO(4,204,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsuo", XO(4,204,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsuo.", XO(4,204,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwu", XO(4,140,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwu.", XO(4,140,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwuo", XO(4,140,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwuo.", XO(4,140,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhw", XO(4,44,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhw.", XO(4,44,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwo", XO(4,44,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwo.", XO(4,44,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhws", XO(4,108,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhws.", XO(4,108,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwso", XO(4,108,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwso.", XO(4,108,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsu", XO(4,76,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsu.", XO(4,76,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsuo", XO(4,76,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsuo.", XO(4,76,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwu", XO(4,12,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwu.", XO(4,12,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwuo", XO(4,12,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwuo.", XO(4,12,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhw", XO(4,428,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhw.", XO(4,428,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwo", XO(4,428,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwo.", XO(4,428,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhws", XO(4,492,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhws.", XO(4,492,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwso", XO(4,492,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwso.", XO(4,492,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsu", XO(4,460,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsu.", XO(4,460,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsuo", XO(4,460,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsuo.", XO(4,460,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwu", XO(4,396,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwu.", XO(4,396,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwuo", XO(4,396,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwuo.", XO(4,396,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchw", XRC(4,168,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchw.", XRC(4,168,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchwu", XRC(4,136,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchwu.", XRC(4,136,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhw", XRC(4,40,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhw.", XRC(4,40,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhwu", XRC(4,8,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhwu.", XRC(4,8,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhw", XRC(4,424,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhw.", XRC(4,424,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhwu", XRC(4,392,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhwu.", XRC(4,392,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchw", XO(4,174,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchw.", XO(4,174,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwo", XO(4,174,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwo.", XO(4,174,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchws", XO(4,238,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchws.", XO(4,238,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwso", XO(4,238,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwso.", XO(4,238,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhw", XO(4,46,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhw.", XO(4,46,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwo", XO(4,46,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwo.", XO(4,46,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhws", XO(4,110,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhws.", XO(4,110,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwso", XO(4,110,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwso.", XO(4,110,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhw", XO(4,430,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhw.", XO(4,430,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwo", XO(4,430,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwo.", XO(4,430,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhws", XO(4,494,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhws.", XO(4,494,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwso", XO(4,494,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwso.", XO(4,494,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mfvscr", VX(4, 1540), VX_MASK, PPCVEC, { VD } }, -{ "mtvscr", VX(4, 1604), VX_MASK, PPCVEC, { VB } }, - - /* Double-precision opcodes. */ - /* Some of these conflict with AltiVec, so move them before, since - PPCVEC includes the PPC_OPCODE_PPC set. */ -{ "efscfd", VX(4, 719), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdabs", VX(4, 740), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdnabs", VX(4, 741), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdneg", VX(4, 742), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdadd", VX(4, 736), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdsub", VX(4, 737), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdmul", VX(4, 744), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efddiv", VX(4, 745), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdcmpgt", VX(4, 748), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcmplt", VX(4, 749), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcmpeq", VX(4, 750), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtstgt", VX(4, 764), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtstlt", VX(4, 765), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtsteq", VX(4, 766), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcfsi", VX(4, 753), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfsid", VX(4, 739), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfui", VX(4, 752), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfuid", VX(4, 738), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfsf", VX(4, 755), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfuf", VX(4, 754), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsi", VX(4, 757), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsidz",VX(4, 747), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsiz", VX(4, 762), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctui", VX(4, 756), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuidz",VX(4, 746), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuiz", VX(4, 760), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsf", VX(4, 759), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuf", VX(4, 758), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfs", VX(4, 751), VX_MASK, PPCEFS, { RS, RB } }, - /* End of double-precision opcodes. */ - -{ "vaddcuw", VX(4, 384), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddfp", VX(4, 10), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddsbs", VX(4, 768), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddshs", VX(4, 832), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddsws", VX(4, 896), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddubm", VX(4, 0), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddubs", VX(4, 512), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduhm", VX(4, 64), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduhs", VX(4, 576), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduwm", VX(4, 128), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduws", VX(4, 640), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vand", VX(4, 1028), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vandc", VX(4, 1092), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsb", VX(4, 1282), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsh", VX(4, 1346), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsw", VX(4, 1410), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgub", VX(4, 1026), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavguh", VX(4, 1090), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavguw", VX(4, 1154), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcfsx", VX(4, 842), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vcfux", VX(4, 778), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vcmpbfp", VXR(4, 966, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpbfp.", VXR(4, 966, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpeqfp", VXR(4, 198, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpeqfp.", VXR(4, 198, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequb", VXR(4, 6, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequb.", VXR(4, 6, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequh", VXR(4, 70, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequh.", VXR(4, 70, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequw", VXR(4, 134, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequw.", VXR(4, 134, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgefp", VXR(4, 454, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgefp.", VXR(4, 454, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtfp", VXR(4, 710, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtfp.", VXR(4, 710, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsb", VXR(4, 774, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsb.", VXR(4, 774, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsh", VXR(4, 838, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsh.", VXR(4, 838, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsw", VXR(4, 902, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsw.", VXR(4, 902, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtub", VXR(4, 518, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtub.", VXR(4, 518, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuh", VXR(4, 582, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuh.", VXR(4, 582, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuw", VXR(4, 646, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuw.", VXR(4, 646, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vctsxs", VX(4, 970), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vctuxs", VX(4, 906), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vexptefp", VX(4, 394), VX_MASK, PPCVEC, { VD, VB } }, -{ "vlogefp", VX(4, 458), VX_MASK, PPCVEC, { VD, VB } }, -{ "vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, { VD, VA, VC, VB } }, -{ "vmaxfp", VX(4, 1034), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsb", VX(4, 258), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsh", VX(4, 322), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsw", VX(4, 386), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxub", VX(4, 2), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxuh", VX(4, 66), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxuw", VX(4, 130), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vminfp", VX(4, 1098), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsb", VX(4, 770), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsh", VX(4, 834), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsw", VX(4, 898), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminub", VX(4, 514), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminuh", VX(4, 578), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminuw", VX(4, 642), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmrghb", VX(4, 12), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrghh", VX(4, 76), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrghw", VX(4, 140), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglb", VX(4, 268), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglh", VX(4, 332), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglw", VX(4, 396), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmulesb", VX(4, 776), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulesh", VX(4, 840), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuleub", VX(4, 520), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuleuh", VX(4, 584), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulosb", VX(4, 264), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulosh", VX(4, 328), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuloub", VX(4, 8), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulouh", VX(4, 72), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, { VD, VA, VC, VB } }, -{ "vnor", VX(4, 1284), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vor", VX(4, 1156), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vperm", VXA(4, 43), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vpkpx", VX(4, 782), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkshss", VX(4, 398), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkshus", VX(4, 270), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkswss", VX(4, 462), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkswus", VX(4, 334), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuhum", VX(4, 14), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuhus", VX(4, 142), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuwum", VX(4, 78), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuwus", VX(4, 206), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrefp", VX(4, 266), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfim", VX(4, 714), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfin", VX(4, 522), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfip", VX(4, 650), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfiz", VX(4, 586), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrlb", VX(4, 4), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrlh", VX(4, 68), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrlw", VX(4, 132), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrsqrtefp", VX(4, 330), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrldmi", VX(4, 197), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrldnm", VX(4, 453), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrlwmi", VX(4, 133), VX_MASK, PPCVEC, { VD, VA, VB} }, -{ "vrlwnm", VX(4, 389), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsel", VXA(4, 42), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vsl", VX(4, 452), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslb", VX(4, 260), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsldoi", VXA(4, 44), VXA_MASK, PPCVEC, { VD, VA, VB, SHB } }, -{ "vslh", VX(4, 324), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslo", VX(4, 1036), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslw", VX(4, 388), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vspltb", VX(4, 524), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vsplth", VX(4, 588), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vspltisb", VX(4, 780), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltish", VX(4, 844), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltisw", VX(4, 908), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltw", VX(4, 652), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vsr", VX(4, 708), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrab", VX(4, 772), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrah", VX(4, 836), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsraw", VX(4, 900), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrb", VX(4, 516), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrh", VX(4, 580), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsro", VX(4, 1100), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrw", VX(4, 644), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubcuw", VX(4, 1408), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubfp", VX(4, 74), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubsbs", VX(4, 1792), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubshs", VX(4, 1856), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubsws", VX(4, 1920), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsububm", VX(4, 1024), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsububs", VX(4, 1536), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuhm", VX(4, 1088), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuhs", VX(4, 1600), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuwm", VX(4, 1152), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuws", VX(4, 1664), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsumsws", VX(4, 1928), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum2sws", VX(4, 1672), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4sbs", VX(4, 1800), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4shs", VX(4, 1608), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4ubs", VX(4, 1544), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vupkhpx", VX(4, 846), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupkhsb", VX(4, 526), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupkhsh", VX(4, 590), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklpx", VX(4, 974), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklsb", VX(4, 654), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklsh", VX(4, 718), VX_MASK, PPCVEC, { VD, VB } }, -{ "vxor", VX(4, 1220), VX_MASK, PPCVEC, { VD, VA, VB } }, - -{ "evaddw", VX(4, 512), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evaddiw", VX(4, 514), VX_MASK, PPCSPE, { RS, RB, UIMM } }, -{ "evsubfw", VX(4, 516), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsubw", VX(4, 516), VX_MASK, PPCSPE, { RS, RB, RA } }, -{ "evsubifw", VX(4, 518), VX_MASK, PPCSPE, { RS, UIMM, RB } }, -{ "evsubiw", VX(4, 518), VX_MASK, PPCSPE, { RS, RB, UIMM } }, -{ "evabs", VX(4, 520), VX_MASK, PPCSPE, { RS, RA } }, -{ "evneg", VX(4, 521), VX_MASK, PPCSPE, { RS, RA } }, -{ "evextsb", VX(4, 522), VX_MASK, PPCSPE, { RS, RA } }, -{ "evextsh", VX(4, 523), VX_MASK, PPCSPE, { RS, RA } }, -{ "evrndw", VX(4, 524), VX_MASK, PPCSPE, { RS, RA } }, -{ "evcntlzw", VX(4, 525), VX_MASK, PPCSPE, { RS, RA } }, -{ "evcntlsw", VX(4, 526), VX_MASK, PPCSPE, { RS, RA } }, - -{ "brinc", VX(4, 527), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evand", VX(4, 529), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evandc", VX(4, 530), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmr", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, BBA } }, -{ "evor", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evorc", VX(4, 539), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evxor", VX(4, 534), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "eveqv", VX(4, 537), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evnand", VX(4, 542), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evnot", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, BBA } }, -{ "evnor", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evrlw", VX(4, 552), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evrlwi", VX(4, 554), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evslw", VX(4, 548), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evslwi", VX(4, 550), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsrws", VX(4, 545), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsrwu", VX(4, 544), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsrwis", VX(4, 547), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsrwiu", VX(4, 546), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsplati", VX(4, 553), VX_MASK, PPCSPE, { RS, SIMM } }, -{ "evsplatfi", VX(4, 555), VX_MASK, PPCSPE, { RS, SIMM } }, -{ "evmergehi", VX(4, 556), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergelo", VX(4, 557), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergehilo",VX(4,558), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergelohi",VX(4,559), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evcmpgts", VX(4, 561), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpgtu", VX(4, 560), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmplts", VX(4, 563), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpltu", VX(4, 562), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpeq", VX(4, 564), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evsel", EVSEL(4,79),EVSEL_MASK, PPCSPE, { RS, RA, RB, CRFS } }, - -{ "evldd", VX(4, 769), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evlddx", VX(4, 768), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evldw", VX(4, 771), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evldwx", VX(4, 770), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evldh", VX(4, 773), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evldhx", VX(4, 772), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhe", VX(4, 785), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhex", VX(4, 784), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhou", VX(4, 789), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhoux", VX(4, 788), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhos", VX(4, 791), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhosx", VX(4, 790), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwwsplat",VX(4, 793), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwwsplatx",VX(4, 792), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhsplat",VX(4, 797), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhsplatx",VX(4, 796), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhesplat",VX(4, 777), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhesplatx",VX(4, 776), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhousplat",VX(4, 781), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhossplat",VX(4, 783), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evstdd", VX(4, 801), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstddx", VX(4, 800), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstdw", VX(4, 803), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstdwx", VX(4, 802), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstdh", VX(4, 805), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstdhx", VX(4, 804), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwwe", VX(4, 825), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwwex", VX(4, 824), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwwo", VX(4, 829), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwwox", VX(4, 828), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwhe", VX(4, 817), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwhex", VX(4, 816), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwho", VX(4, 821), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwhox", VX(4, 820), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evfsabs", VX(4, 644), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsnabs", VX(4, 645), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsneg", VX(4, 646), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsadd", VX(4, 640), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfssub", VX(4, 641), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfsmul", VX(4, 648), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfsdiv", VX(4, 649), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfscmpgt", VX(4, 652), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscmplt", VX(4, 653), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscmpeq", VX(4, 654), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststgt", VX(4, 668), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststlt", VX(4, 669), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststeq", VX(4, 670), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscfui", VX(4, 656), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctuiz", VX(4, 664), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfsi", VX(4, 657), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfuf", VX(4, 658), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfsf", VX(4, 659), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctui", VX(4, 660), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsi", VX(4, 661), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsiz", VX(4, 666), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctuf", VX(4, 662), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsf", VX(4, 663), VX_MASK, PPCSPE, { RS, RB } }, - -{ "efsabs", VX(4, 708), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsnabs", VX(4, 709), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsneg", VX(4, 710), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsadd", VX(4, 704), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efssub", VX(4, 705), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efsmul", VX(4, 712), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efsdiv", VX(4, 713), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efscmpgt", VX(4, 716), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscmplt", VX(4, 717), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscmpeq", VX(4, 718), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststgt", VX(4, 732), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststlt", VX(4, 733), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststeq", VX(4, 734), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscfui", VX(4, 720), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctuiz", VX(4, 728), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfsi", VX(4, 721), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfuf", VX(4, 722), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfsf", VX(4, 723), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctui", VX(4, 724), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsi", VX(4, 725), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsiz", VX(4, 730), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctuf", VX(4, 726), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsf", VX(4, 727), VX_MASK, PPCEFS, { RS, RB } }, - -{ "evmhossf", VX(4, 1031), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossfa", VX(4, 1063), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmf", VX(4, 1039), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfa", VX(4, 1071), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmi", VX(4, 1037), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmia", VX(4, 1069), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumi", VX(4, 1036), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumia", VX(4, 1068), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessf", VX(4, 1027), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfa", VX(4, 1059), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmf", VX(4, 1035), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfa", VX(4, 1067), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmi", VX(4, 1033), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmia", VX(4, 1065), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumi", VX(4, 1032), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumia", VX(4, 1064), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhossfaaw",VX(4, 1287), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossiaaw",VX(4, 1285), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfaaw",VX(4, 1295), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmiaaw",VX(4, 1293), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhousiaaw",VX(4, 1284), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumiaaw",VX(4, 1292), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfaaw",VX(4, 1283), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessiaaw",VX(4, 1281), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfaaw",VX(4, 1291), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmiaaw",VX(4, 1289), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheusiaaw",VX(4, 1280), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumiaaw",VX(4, 1288), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhossfanw",VX(4, 1415), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossianw",VX(4, 1413), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfanw",VX(4, 1423), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmianw",VX(4, 1421), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhousianw",VX(4, 1412), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumianw",VX(4, 1420), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfanw",VX(4, 1411), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessianw",VX(4, 1409), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfanw",VX(4, 1419), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmianw",VX(4, 1417), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheusianw",VX(4, 1408), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumianw",VX(4, 1416), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhogsmfaa",VX(4, 1327), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogsmiaa",VX(4, 1325), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogumiaa",VX(4, 1324), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmfaa",VX(4, 1323), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmiaa",VX(4, 1321), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegumiaa",VX(4, 1320), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhogsmfan",VX(4, 1455), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogsmian",VX(4, 1453), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogumian",VX(4, 1452), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmfan",VX(4, 1451), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmian",VX(4, 1449), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegumian",VX(4, 1448), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwhssf", VX(4, 1095), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhssfa", VX(4, 1127), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmf", VX(4, 1103), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmfa", VX(4, 1135), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmi", VX(4, 1101), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmia", VX(4, 1133), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhumi", VX(4, 1100), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhumia", VX(4, 1132), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlumi", VX(4, 1096), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumia", VX(4, 1128), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlssiaaw",VX(4, 1345), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlsmiaaw",VX(4, 1353), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlusiaaw",VX(4, 1344), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumiaaw",VX(4, 1352), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlssianw",VX(4, 1473), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlsmianw",VX(4, 1481), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlusianw",VX(4, 1472), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumianw",VX(4, 1480), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssf", VX(4, 1107), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwssfa", VX(4, 1139), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmf", VX(4, 1115), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfa", VX(4, 1147), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmi", VX(4, 1113), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmia", VX(4, 1145), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumi", VX(4, 1112), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumia", VX(4, 1144), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssfaa", VX(4, 1363), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfaa", VX(4, 1371), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmiaa", VX(4, 1369), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumiaa", VX(4, 1368), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssfan", VX(4, 1491), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfan", VX(4, 1499), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmian", VX(4, 1497), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumian", VX(4, 1496), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evaddssiaaw",VX(4, 1217), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddsmiaaw",VX(4, 1225), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddusiaaw",VX(4, 1216), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddumiaaw",VX(4, 1224), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evsubfssiaaw",VX(4, 1219), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfsmiaaw",VX(4, 1227), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfusiaaw",VX(4, 1218), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfumiaaw",VX(4, 1226), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evmra", VX(4, 1220), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evdivws", VX(4, 1222), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evdivwu", VX(4, 1223), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "mulli", OP(7), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "muli", OP(7), OP_MASK, PWRCOM, { RT, RA, SI } }, - -{ "subfic", OP(8), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "sfi", OP(8), OP_MASK, PWRCOM, { RT, RA, SI } }, - -{ "dozi", OP(9), OP_MASK, M601, { RT, RA, SI } }, - -{ "bce", B(9,0,0), B_MASK, BOOKE64, { BO, BI, BD } }, -{ "bcel", B(9,0,1), B_MASK, BOOKE64, { BO, BI, BD } }, -{ "bcea", B(9,1,0), B_MASK, BOOKE64, { BO, BI, BDA } }, -{ "bcela", B(9,1,1), B_MASK, BOOKE64, { BO, BI, BDA } }, - -{ "cmplwi", OPL(10,0), OPL_MASK, PPCCOM, { OBF, RA, UI } }, -{ "cmpldi", OPL(10,1), OPL_MASK, PPC64, { OBF, RA, UI } }, -{ "cmpli", OP(10), OP_MASK, PPC, { BF, L, RA, UI } }, -{ "cmpli", OP(10), OP_MASK, PWRCOM, { BF, RA, UI } }, - -{ "cmpwi", OPL(11,0), OPL_MASK, PPCCOM, { OBF, RA, SI } }, -{ "cmpdi", OPL(11,1), OPL_MASK, PPC64, { OBF, RA, SI } }, -{ "cmpi", OP(11), OP_MASK, PPC, { BF, L, RA, SI } }, -{ "cmpi", OP(11), OP_MASK, PWRCOM, { BF, RA, SI } }, - -{ "addic", OP(12), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "ai", OP(12), OP_MASK, PWRCOM, { RT, RA, SI } }, -{ "subic", OP(12), OP_MASK, PPCCOM, { RT, RA, NSI } }, - -{ "addic.", OP(13), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "ai.", OP(13), OP_MASK, PWRCOM, { RT, RA, SI } }, -{ "subic.", OP(13), OP_MASK, PPCCOM, { RT, RA, NSI } }, - -{ "li", OP(14), DRA_MASK, PPCCOM, { RT, SI } }, -{ "lil", OP(14), DRA_MASK, PWRCOM, { RT, SI } }, -{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA0, SI } }, -{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA0 } }, -{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA0, NSI } }, -{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA0 } }, - -{ "lis", OP(15), DRA_MASK, PPCCOM, { RT, SISIGNOPT } }, -{ "liu", OP(15), DRA_MASK, PWRCOM, { RT, SISIGNOPT } }, -{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA0,SISIGNOPT } }, -{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA0,SISIGNOPT } }, -{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA0, NSI } }, - -{ "bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BD } }, -{ "bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, { BD } }, -{ "bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BD } }, -{ "bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, { BD } }, -{ "bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDA } }, -{ "bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, { BDA } }, -{ "bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDA } }, -{ "bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, { BDA } }, -{ "bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, { BD } }, -{ "bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, { BD } }, -{ "bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, { BDA } }, -{ "bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, { BDA } }, -{ "blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnztla-",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnztla+",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnzfla-",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzfla+",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bc-", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDM } }, -{ "bc+", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDP } }, -{ "bc", B(16,0,0), B_MASK, COM, { BO, BI, BD } }, -{ "bcl-", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDM } }, -{ "bcl+", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDP } }, -{ "bcl", B(16,0,1), B_MASK, COM, { BO, BI, BD } }, -{ "bca-", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDMA } }, -{ "bca+", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDPA } }, -{ "bca", B(16,1,0), B_MASK, COM, { BO, BI, BDA } }, -{ "bcla-", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDMA } }, -{ "bcla+", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDPA } }, -{ "bcla", B(16,1,1), B_MASK, COM, { BO, BI, BDA } }, - -{ "sc", SC(17,1,0), SC_MASK, PPC, { LEV } }, -{ "svc", SC(17,0,0), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } }, -{ "svcl", SC(17,0,1), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } }, -{ "svca", SC(17,1,0), SC_MASK, PWRCOM, { SV } }, -{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } }, - -{ "b", B(18,0,0), B_MASK, COM, { LI } }, -{ "bl", B(18,0,1), B_MASK, COM, { LI } }, -{ "ba", B(18,1,0), B_MASK, COM, { LIA } }, -{ "bla", B(18,1,1), B_MASK, COM, { LIA } }, - -{ "mcrf", XL(19,0), XLBB_MASK|(3 << 21)|(3 << 16), COM, { BF, BFA } }, - -{ "blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, { 0 } }, -{ "blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, { 0 } }, -{ "bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdnzlrl-",XLO(19,BODNZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlrl-",XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlrl+",XLO(19,BODNZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlrl+",XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, { BI } }, -{ "btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnztlr-",XLO(19,BODNZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlr+",XLO(19,BODNZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlrl",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnztlrl-",XLO(19,BODNZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlrl+",XLO(19,BODNZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnzflr-",XLO(19,BODNZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflr+",XLO(19,BODNZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflrl",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdztlrl-",XLO(19,BODZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlrl+",XLO(19,BODZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bclre", XLLK(19,17,0), XLBB_MASK, BOOKE64, { BO, BI } }, -{ "bclrel", XLLK(19,17,1), XLBB_MASK, BOOKE64, { BO, BI } }, - -{ "rfid", XL(19,18), 0xffffffff, PPC64, { 0 } }, - -{ "crnot", XL(19,33), XL_MASK, PPCCOM, { BT, BA, BBA } }, -{ "crnor", XL(19,33), XL_MASK, COM, { BT, BA, BB } }, -{ "rfmci", X(19,38), 0xffffffff, PPCRFMCI, { 0 } }, - -{ "rfi", XL(19,50), 0xffffffff, COM, { 0 } }, -{ "rfci", XL(19,51), 0xffffffff, PPC403 | BOOKE, { 0 } }, - -{ "rfsvc", XL(19,82), 0xffffffff, POWER, { 0 } }, - -{ "crandc", XL(19,129), XL_MASK, COM, { BT, BA, BB } }, - -{ "isync", XL(19,150), 0xffffffff, PPCCOM, { 0 } }, -{ "ics", XL(19,150), 0xffffffff, PWRCOM, { 0 } }, - -{ "crclr", XL(19,193), XL_MASK, PPCCOM, { BT, BAT, BBA } }, -{ "crxor", XL(19,193), XL_MASK, COM, { BT, BA, BB } }, - -{ "crnand", XL(19,225), XL_MASK, COM, { BT, BA, BB } }, - -{ "crand", XL(19,257), XL_MASK, COM, { BT, BA, BB } }, - -{ "hrfid", XL(19,274), 0xffffffff, POWER5 | CELL, { 0 } }, - -{ "crset", XL(19,289), XL_MASK, PPCCOM, { BT, BAT, BBA } }, -{ "creqv", XL(19,289), XL_MASK, COM, { BT, BA, BB } }, - -{ "doze", XL(19,402), 0xffffffff, POWER6, { 0 } }, - -{ "crorc", XL(19,417), XL_MASK, COM, { BT, BA, BB } }, - -{ "nap", XL(19,434), 0xffffffff, POWER6, { 0 } }, - -{ "crmove", XL(19,449), XL_MASK, PPCCOM, { BT, BA, BBA } }, -{ "cror", XL(19,449), XL_MASK, COM, { BT, BA, BB } }, - -{ "sleep", XL(19,466), 0xffffffff, POWER6, { 0 } }, -{ "rvwinkle", XL(19,498), 0xffffffff, POWER6, { 0 } }, - -{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, { 0 } }, -{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, { 0 } }, -{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bcctre", XLLK(19,529,0), XLBB_MASK, BOOKE64, { BO, BI } }, -{ "bcctrel", XLLK(19,529,1), XLBB_MASK, BOOKE64, { BO, BI } }, - -{ "rlwimi", M(20,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlimi", M(20,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rlwimi.", M(20,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlimi.", M(20,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, { RA, RS, SH } }, -{ "clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, { RA, RS, MB } }, -{ "rlwinm", M(21,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlinm", M(21,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, -{ "rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, { RA,RS,SH } }, -{ "clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, { RA, RS, MB } }, -{ "rlwinm.", M(21,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlinm.", M(21,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rlmi", M(22,0), M_MASK, M601, { RA,RS,RB,MBE,ME } }, -{ "rlmi.", M(22,1), M_MASK, M601, { RA,RS,RB,MBE,ME } }, - -{ "be", B(22,0,0), B_MASK, BOOKE64, { LI } }, -{ "bel", B(22,0,1), B_MASK, BOOKE64, { LI } }, -{ "bea", B(22,1,0), B_MASK, BOOKE64, { LIA } }, -{ "bela", B(22,1,1), B_MASK, BOOKE64, { LIA } }, - -{ "rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, { RA, RS, RB } }, -{ "rlwnm", M(23,0), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } }, -{ "rlnm", M(23,0), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } }, -{ "rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, { RA, RS, RB } }, -{ "rlwnm.", M(23,1), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } }, -{ "rlnm.", M(23,1), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } }, - -{ "nop", OP(24), 0xffffffff, PPCCOM, { 0 } }, -{ "ori", OP(24), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "oril", OP(24), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "oris", OP(25), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "oriu", OP(25), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "xori", OP(26), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "xoril", OP(26), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "xoris", OP(27), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "xoriu", OP(27), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "andi.", OP(28), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "andil.", OP(28), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "andis.", OP(29), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "andiu.", OP(29), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "rotldi", MD(30,0,0), MDMB_MASK, PPC64, { RA, RS, SH6 } }, -{ "clrldi", MD(30,0,0), MDSH_MASK, PPC64, { RA, RS, MB6 } }, -{ "rldicl", MD(30,0,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rotldi.", MD(30,0,1), MDMB_MASK, PPC64, { RA, RS, SH6 } }, -{ "clrldi.", MD(30,0,1), MDSH_MASK, PPC64, { RA, RS, MB6 } }, -{ "rldicl.", MD(30,0,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rldicr", MD(30,1,0), MD_MASK, PPC64, { RA, RS, SH6, ME6 } }, -{ "rldicr.", MD(30,1,1), MD_MASK, PPC64, { RA, RS, SH6, ME6 } }, - -{ "rldic", MD(30,2,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rldic.", MD(30,2,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rldimi", MD(30,3,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rldimi.", MD(30,3,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rotld", MDS(30,8,0), MDSMB_MASK, PPC64, { RA, RS, RB } }, -{ "rldcl", MDS(30,8,0), MDS_MASK, PPC64, { RA, RS, RB, MB6 } }, -{ "rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, { RA, RS, RB } }, -{ "rldcl.", MDS(30,8,1), MDS_MASK, PPC64, { RA, RS, RB, MB6 } }, - -{ "rldcr", MDS(30,9,0), MDS_MASK, PPC64, { RA, RS, RB, ME6 } }, -{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC64, { RA, RS, RB, ME6 } }, - -{ "cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } }, -{ "cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } }, -{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } }, -{ "cmp", X(31,0), XCMPL_MASK, PWRCOM, { BF, RA, RB } }, - -{ "twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, { RA, RB } }, -{ "teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, { 0 } }, -{ "tw", X(31,4), X_MASK, PPCCOM, { TO, RA, RB } }, -{ "t", X(31,4), X_MASK, PWRCOM, { TO, RA, RB } }, - -{ "subfc", XO(31,8,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sf", XO(31,8,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subc", XO(31,8,0,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sf.", XO(31,8,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RB, RA } }, -{ "subfco", XO(31,8,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfo", XO(31,8,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subco", XO(31,8,1,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RB, RA } }, - -{ "mulhdu", XO(31,9,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addc", XO(31,10,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "a", XO(31,10,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addc.", XO(31,10,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "a.", XO(31,10,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addco", XO(31,10,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ao", XO(31,10,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addco.", XO(31,10,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ao.", XO(31,10,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "mulhwu", XO(31,11,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "mulhwu.", XO(31,11,0,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "isellt", X(31,15), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "iselgt", X(31,47), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "iseleq", X(31,79), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "isel", XISEL(31,15), XISEL_MASK, PPCISEL, { RT, RA, RB, CRB } }, - -{ "mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, { RT, FXM } }, -{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4 | COM, { RT } }, -{ "mfcr", X(31,19), XFXFXM_MASK, POWER4, { RT, FXM4 } }, - -{ "lwarx", X(31,20), XEH_MASK, PPC, { RT, RA0, RB, EH } }, - -{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA0, RB } }, - -{ "icbt", X(31,22), X_MASK, BOOKE|PPCE300, { CT, RA, RB } }, -{ "icbt", X(31,262), XRT_MASK, PPC403, { RA, RB } }, - -{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lx", X(31,23), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "slw", XRC(31,24,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sl", XRC(31,24,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "slw.", XRC(31,24,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sl.", XRC(31,24,1), X_MASK, PWRCOM, { RA, RS, RB } }, - -{ "cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, { RA, RS } }, -{ "cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, { RA, RS } }, -{ "cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, { RA, RS } }, -{ "cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, { RA, RS } }, - -{ "sld", XRC(31,27,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "sld.", XRC(31,27,1), X_MASK, PPC64, { RA, RS, RB } }, - -{ "and", XRC(31,28,0), X_MASK, COM, { RA, RS, RB } }, -{ "and.", XRC(31,28,1), X_MASK, COM, { RA, RS, RB } }, - -{ "maskg", XRC(31,29,0), X_MASK, M601, { RA, RS, RB } }, -{ "maskg.", XRC(31,29,1), X_MASK, M601, { RA, RS, RB } }, - -{ "icbte", X(31,30), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } }, -{ "cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } }, -{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } }, -{ "cmpl", X(31,32), XCMPL_MASK, PWRCOM, { BF, RA, RB } }, - -{ "subf", XO(31,40,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "sub", XO(31,40,0,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subf.", XO(31,40,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "sub.", XO(31,40,0,1), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfo", XO(31,40,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "subo", XO(31,40,1,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "subo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RB, RA } }, - -{ "ldux", X(31,53), X_MASK, PPC64, { RT, RAL, RB } }, - -{ "dcbst", X(31,54), XRT_MASK, PPC, { RA, RB } }, - -{ "lwzux", X(31,55), X_MASK, PPCCOM, { RT, RAL, RB } }, -{ "lux", X(31,55), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "dcbste", X(31,62), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "lwzuxe", X(31,63), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "cntlzd", XRC(31,58,0), XRB_MASK, PPC64, { RA, RS } }, -{ "cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, { RA, RS } }, - -{ "andc", XRC(31,60,0), X_MASK, COM, { RA, RS, RB } }, -{ "andc.", XRC(31,60,1), X_MASK, COM, { RA, RS, RB } }, - -{ "tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, { RA, RB } }, -{ "tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdng", XTO(31,68,TONG), XTO_MASK, PPC64, { RA, RB } }, -{ "tdne", XTO(31,68,TONE), XTO_MASK, PPC64, { RA, RB } }, -{ "td", X(31,68), X_MASK, PPC64, { TO, RA, RB } }, - -{ "mulhd", XO(31,73,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulhd.", XO(31,73,0,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "mulhw", XO(31,75,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "mulhw.", XO(31,75,0,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440, { RA, RS, RB } }, -{ "dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440, { RA, RS, RB } }, - -{ "mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, { SR, RS } }, - -{ "mfmsr", X(31,83), XRARB_MASK, COM, { RT } }, - -{ "ldarx", X(31,84), XEH_MASK, PPC64, { RT, RA0, RB, EH } }, - -{ "dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, { RA, RB } }, -{ "dcbf", X(31,86), XLRT_MASK, PPC, { RA, RB, L } }, - -{ "lbzx", X(31,87), X_MASK, COM, { RT, RA0, RB } }, - -{ "dcbfe", X(31,94), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "neg", XO(31,104,0,0), XORB_MASK, COM, { RT, RA } }, -{ "neg.", XO(31,104,0,1), XORB_MASK, COM, { RT, RA } }, -{ "nego", XO(31,104,1,0), XORB_MASK, COM, { RT, RA } }, -{ "nego.", XO(31,104,1,1), XORB_MASK, COM, { RT, RA } }, - -{ "mul", XO(31,107,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "mul.", XO(31,107,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "mulo", XO(31,107,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "mulo.", XO(31,107,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "mtsrdin", X(31,114), XRA_MASK, PPC64, { RS, RB } }, - -{ "clf", X(31,118), XTO_MASK, POWER, { RA, RB } }, - -{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } }, - -{ "popcntb", X(31,122), XRB_MASK, POWER5, { RA, RS } }, -{ "popcntw", X(31,378), XRB_MASK, POWER7, { RA, RS } }, -{ "popcntd", X(31,506), XRB_MASK, POWER7, { RA, RS } }, - -{ "cnttzw", XRC(31,538,0), XRB_MASK, POWER9, { RA, RS } }, -{ "cnttzw.", XRC(31,538,1), XRB_MASK, POWER9, { RA, RS } }, -{ "cnttzd", XRC(31,570,0), XRB_MASK, POWER9, { RA, RS } }, -{ "cnttzd.", XRC(31,570,1), XRB_MASK, POWER9, { RA, RS } }, - -{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } }, -{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } }, -{ "not.", XRC(31,124,1), X_MASK, COM, { RA, RS, RBS } }, -{ "nor.", XRC(31,124,1), X_MASK, COM, { RA, RS, RB } }, - -{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "lbzuxe", X(31,127), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "wrtee", X(31,131), XRARB_MASK, PPC403 | BOOKE, { RS } }, - -{ "dcbtstls",X(31,134), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "subfe", XO(31,136,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfe", XO(31,136,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "adde", XO(31,138,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ae", XO(31,138,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "adde.", XO(31,138,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ae.", XO(31,138,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addeo", XO(31,138,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "aeo", XO(31,138,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "dcbtstlse",X(31,142),X_MASK, PPCCHLK64, { CT, RA, RB }}, - -{ "mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, { FXM, RS } }, -{ "mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, { RS }}, -{ "mtcrf", X(31,144), XFXFXM_MASK, COM, { FXM, RS } }, - -{ "mtmsr", X(31,146), XRARB_MASK, COM, { RS } }, - -{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA0, RB } }, - -{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA0, RB } }, - -{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stx", X(31,151), X_MASK, PWRCOM, { RS, RA, RB } }, - -{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "slq", XRC(31,152,0), X_MASK, M601, { RA, RS, RB } }, -{ "slq.", XRC(31,152,1), X_MASK, M601, { RA, RS, RB } }, - -{ "sle", XRC(31,153,0), X_MASK, M601, { RA, RS, RB } }, -{ "sle.", XRC(31,153,1), X_MASK, M601, { RA, RS, RB } }, - -{ "prtyw", X(31,154), XRB_MASK, POWER6, { RA, RS } }, - -{ "wrteei", X(31,163), XE_MASK, PPC403 | BOOKE, { E } }, - -{ "dcbtls", X(31,166), X_MASK, PPCCHLK, { CT, RA, RB }}, -{ "dcbtlse", X(31,174), X_MASK, PPCCHLK64, { CT, RA, RB }}, - -{ "mtmsrd", X(31,178), XRLARB_MASK, PPC64, { RS, A_L } }, - -{ "stdux", X(31,181), X_MASK, PPC64, { RS, RAS, RB } }, - -{ "stwux", X(31,183), X_MASK, PPCCOM, { RS, RAS, RB } }, -{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA0, RB } }, - -{ "sliq", XRC(31,184,0), X_MASK, M601, { RA, RS, SH } }, -{ "sliq.", XRC(31,184,1), X_MASK, M601, { RA, RS, SH } }, - -{ "prtyd", X(31,186), XRB_MASK, POWER6, { RA, RS } }, - -{ "stwuxe", X(31,191), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfzeo.",XO(31,200,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, { RT, RA } }, - -{ "addze", XO(31,202,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "aze", XO(31,202,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, { RT, RA } }, - -{ "mtsr", X(31,210), XRB_MASK|(1<<20), COM32, { SR, RS } }, - -{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA0, RB } }, - -{ "stbx", X(31,215), X_MASK, COM, { RS, RA0, RB } }, - -{ "sllq", XRC(31,216,0), X_MASK, M601, { RA, RS, RB } }, -{ "sllq.", XRC(31,216,1), X_MASK, M601, { RA, RS, RB } }, - -{ "sleq", XRC(31,217,0), X_MASK, M601, { RA, RS, RB } }, -{ "sleq.", XRC(31,217,1), X_MASK, M601, { RA, RS, RB } }, - -{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "icblc", X(31,230), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfmeo.",XO(31,232,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, { RT, RA } }, - -{ "mulld", XO(31,233,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulld.", XO(31,233,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulldo", XO(31,233,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulldo.", XO(31,233,1,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addme", XO(31,234,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ame", XO(31,234,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, { RT, RA } }, - -{ "addex", XO(31,170,0,0), XO_MASK, POWER9, { RT, RA, RB } }, - -{ "mullw", XO(31,235,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "muls", XO(31,235,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "muls.", XO(31,235,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "mulso", XO(31,235,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "icblce", X(31,238), X_MASK, PPCCHLK64, { CT, RA, RB }}, -{ "mtsrin", X(31,242), XRA_MASK, PPC32, { RS, RB } }, -{ "mtsri", X(31,242), XRA_MASK, POWER32, { RS, RB } }, - -{ "dcbtst", X(31,246), X_MASK, PPC, { CT, RA, RB } }, - -{ "stbux", X(31,247), X_MASK, COM, { RS, RAS, RB } }, - -{ "slliq", XRC(31,248,0), X_MASK, M601, { RA, RS, SH } }, -{ "slliq.", XRC(31,248,1), X_MASK, M601, { RA, RS, SH } }, - -{ "dcbtste", X(31,253), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "stbuxe", X(31,255), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "mfdcrx", X(31,259), X_MASK, BOOKE, { RS, RA } }, - -{ "doz", XO(31,264,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "doz.", XO(31,264,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "dozo", XO(31,264,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "dozo.", XO(31,264,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "add", XO(31,266,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "cax", XO(31,266,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "add.", XO(31,266,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "cax.", XO(31,266,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addo", XO(31,266,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "caxo", XO(31,266,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addo.", XO(31,266,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "tlbiel", X(31,274), XRTLRA_MASK, POWER4, { RB, L } }, - -{ "mfapidi", X(31,275), X_MASK, BOOKE, { RT, RA } }, - -{ "lscbx", XRC(31,277,0), X_MASK, M601, { RT, RA, RB } }, -{ "lscbx.", XRC(31,277,1), X_MASK, M601, { RT, RA, RB } }, - -{ "dcbt", X(31,278), X_MASK, PPC, { CT, RA, RB } }, - -{ "lhzx", X(31,279), X_MASK, COM, { RT, RA0, RB } }, - -{ "eqv", XRC(31,284,0), X_MASK, COM, { RA, RS, RB } }, -{ "eqv.", XRC(31,284,1), X_MASK, COM, { RA, RS, RB } }, - -{ "dcbte", X(31,286), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "tlbie", X(31,306), XRTLRA_MASK, PPC, { RB, L } }, -{ "tlbi", X(31,306), XRT_MASK, POWER, { RA0, RB } }, - -{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } }, - -{ "lhzux", X(31,311), X_MASK, COM, { RT, RAL, RB } }, - -{ "xor", XRC(31,316,0), X_MASK, COM, { RA, RS, RB } }, -{ "xor.", XRC(31,316,1), X_MASK, COM, { RA, RS, RB } }, - -{ "lhzuxe", X(31,319), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "mfexisr", XSPR(31,323,64), XSPR_MASK, PPC403, { RT } }, -{ "mfexier", XSPR(31,323,66), XSPR_MASK, PPC403, { RT } }, -{ "mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, { RT } }, -{ "mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, { RT } }, -{ "mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, { RT } }, -{ "mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, { RT } }, -{ "mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, { RT } }, -{ "mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, { RT } }, -{ "mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, { RT } }, -{ "mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, { RT } }, -{ "mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, { RT } }, -{ "mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, { RT } }, -{ "mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, { RT } }, -{ "mfdcr", X(31,323), X_MASK, PPC403 | BOOKE, { RT, SPR } }, - -{ "div", XO(31,331,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "div.", XO(31,331,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "divo", XO(31,331,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divo.", XO(31,331,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "mfpmr", X(31,334), X_MASK, PPCPMR, { RT, PMR }}, - -{ "mfmq", XSPR(31,339,0), XSPR_MASK, M601, { RT } }, -{ "mfxer", XSPR(31,339,1), XSPR_MASK, COM, { RT } }, -{ "mfrtcu", XSPR(31,339,4), XSPR_MASK, COM, { RT } }, -{ "mfrtcl", XSPR(31,339,5), XSPR_MASK, COM, { RT } }, -{ "mfdec", XSPR(31,339,6), XSPR_MASK, MFDEC1, { RT } }, -{ "mfdec", XSPR(31,339,22), XSPR_MASK, MFDEC2, { RT } }, -{ "mflr", XSPR(31,339,8), XSPR_MASK, COM, { RT } }, -{ "mfctr", XSPR(31,339,9), XSPR_MASK, COM, { RT } }, -{ "mftid", XSPR(31,339,17), XSPR_MASK, POWER, { RT } }, -{ "mfdsisr", XSPR(31,339,18), XSPR_MASK, COM, { RT } }, -{ "mfdar", XSPR(31,339,19), XSPR_MASK, COM, { RT } }, -{ "mfsdr0", XSPR(31,339,24), XSPR_MASK, POWER, { RT } }, -{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, COM, { RT } }, -{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, COM, { RT } }, -{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, COM, { RT } }, -{ "mfcfar", XSPR(31,339,28), XSPR_MASK, POWER6, { RT } }, -{ "mfpid", XSPR(31,339,48), XSPR_MASK, BOOKE, { RT } }, -{ "mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, { RT } }, -{ "mfcsrr0", XSPR(31,339,58), XSPR_MASK, BOOKE, { RT } }, -{ "mfcsrr1", XSPR(31,339,59), XSPR_MASK, BOOKE, { RT } }, -{ "mfdear", XSPR(31,339,61), XSPR_MASK, BOOKE, { RT } }, -{ "mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, { RT } }, -{ "mfesr", XSPR(31,339,62), XSPR_MASK, BOOKE, { RT } }, -{ "mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, { RT } }, -{ "mfivpr", XSPR(31,339,63), XSPR_MASK, BOOKE, { RT } }, -{ "mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, { RT } }, -{ "mficr", XSPR(31,339,148), XSPR_MASK, PPC860, { RT } }, -{ "mfder", XSPR(31,339,149), XSPR_MASK, PPC860, { RT } }, -{ "mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, { RT } }, -{ "mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, { RT } }, -{ "mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, { RT } }, -{ "mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, { RT } }, -{ "mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, { RT } }, -{ "mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, { RT } }, -{ "mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, { RT } }, -{ "mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, { RT } }, -{ "mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, { RT } }, -{ "mftb", X(31,371), X_MASK, CLASSIC, { RT, TBR } }, -{ "mftb", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } }, -{ "mftbl", XSPR(31,371,268), XSPR_MASK, CLASSIC, { RT } }, -{ "mftbl", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } }, -{ "mftbu", XSPR(31,371,269), XSPR_MASK, CLASSIC, { RT } }, -{ "mftbu", XSPR(31,339,269), XSPR_MASK, BOOKE, { RT } }, -{ "mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, { RT, SPRG } }, -{ "mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, { RT } }, -{ "mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, { RT } }, -{ "mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, { RT } }, -{ "mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, { RT } }, -{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, { RT } }, -{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } }, -{ "mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, { RT } }, -{ "mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, { RT } }, -{ "mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, { RT } }, -{ "mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, { RT } }, -{ "mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, { RT } }, -{ "mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, { RT } }, -{ "mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, { RT } }, -{ "mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, { RT } }, -{ "mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, { RT } }, -{ "mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, { RT } }, -{ "mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, { RT } }, -{ "mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, { RT } }, -{ "mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, { RT } }, -{ "mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, { RT } }, -{ "mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, { RT } }, -{ "mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, { RT } }, -{ "mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, { RT } }, -{ "mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, { RT } }, -{ "mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, { RT } }, -{ "mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, { RT } }, -{ "mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, { RT } }, -{ "mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, { RT } }, -{ "mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, { RT } }, -{ "mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, { RT } }, -{ "mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, { RT } }, -{ "mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, { RT } }, -{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, { RT } }, -{ "mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, { RT } }, -{ "mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, { RT } }, -{ "mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, { RT } }, -{ "mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, { RT } }, -{ "mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } }, -{ "mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, { RT } }, -{ "mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, { RT } }, -{ "mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, { RT } }, -{ "mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, { RT } }, -{ "mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbram0",XSPR(31,339,817), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbram1",XSPR(31,339,818), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbram0",XSPR(31,339,825), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbram1",XSPR(31,339,826), XSPR_MASK, PPC860, { RT } }, -{ "mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, { RT } }, -{ "mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, { RT } }, -{ "mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, { RT } }, -{ "mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, { RT } }, -{ "mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405, { RT } }, -{ "mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, { RT } }, -{ "mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, { RT } }, -{ "mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, { RT } }, -{ "mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, { RT } }, -{ "mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, { RT } }, -{ "mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, { RT } }, -{ "mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, { RT } }, -{ "mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, { RT } }, -{ "mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, { RT } }, -{ "mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, { RT } }, -{ "mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, { RT } }, -{ "mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403, { RT } }, -{ "mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, { RT } }, -{ "mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, { RT } }, -{ "mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, { RT } }, -{ "mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, { RT } }, -{ "mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, { RT } }, -{ "mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, { RT } }, -{ "mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, { RT } }, -{ "mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, { RT } }, -{ "mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, { RT } }, -{ "mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, { RT } }, -{ "mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, { RT } }, -{ "mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, { RT } }, -{ "mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, { RT } }, -{ "mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, { RT } }, -{ "mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, { RT } }, -{ "mfspr", X(31,339), X_MASK, COM, { RT, SPR } }, - -{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA0, RB } }, - -{ "dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, -{ "dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, - -{ "lhax", X(31,343), X_MASK, COM, { RT, RA0, RB } }, - -{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, -{ "dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, - -{ "dccci", X(31,454), XRT_MASK, PPC403|PPC440, { RA, RB } }, - -{ "abs", XO(31,360,0,0), XORB_MASK, M601, { RT, RA } }, -{ "abs.", XO(31,360,0,1), XORB_MASK, M601, { RT, RA } }, -{ "abso", XO(31,360,1,0), XORB_MASK, M601, { RT, RA } }, -{ "abso.", XO(31,360,1,1), XORB_MASK, M601, { RT, RA } }, - -{ "divs", XO(31,363,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divs.", XO(31,363,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "divso", XO(31,363,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divso.", XO(31,363,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "tlbia", X(31,370), 0xffffffff, PPC, { 0 } }, - -{ "lwaux", X(31,373), X_MASK, PPC64, { RT, RAL, RB } }, - -{ "lhaux", X(31,375), X_MASK, COM, { RT, RAL, RB } }, - -{ "lhauxe", X(31,383), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "mtdcrx", X(31,387), X_MASK, BOOKE, { RA, RS } }, - -{ "dcblc", X(31,390), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "subfe64", XO(31,392,0,0), XO_MASK, BOOKE64, { RT, RA, RB } }, -{ "subfe64o",XO(31,392,1,0), XO_MASK, BOOKE64, { RT, RA, RB } }, - -{ "adde64", XO(31,394,0,0), XO_MASK, BOOKE64, { RT, RA, RB } }, -{ "adde64o", XO(31,394,1,0), XO_MASK, BOOKE64, { RT, RA, RB } }, - -{ "dcblce", X(31,398), X_MASK, PPCCHLK64, { CT, RA, RB }}, - -{ "slbmte", X(31,402), XRA_MASK, PPC64, { RS, RB } }, - -{ "sthx", X(31,407), X_MASK, COM, { RS, RA0, RB } }, - -{ "cmpb", X(31,508), X_MASK, POWER6, { RA, RS, RB } }, - -{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } }, - -{ "lfdpx", X(31,791), X_MASK, POWER6, { FRT, RA, RB } }, - -{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } }, - -{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } }, - -{ "stfdpx", X(31,919), X_MASK, POWER6, { FRS, RA, RB } }, - -{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } }, - -{ "orc", XRC(31,412,0), X_MASK, COM, { RA, RS, RB } }, -{ "orc.", XRC(31,412,1), X_MASK, COM, { RA, RS, RB } }, - -{ "sradi", XS(31,413,0), XS_MASK, PPC64, { RA, RS, SH6 } }, -{ "sradi.", XS(31,413,1), XS_MASK, PPC64, { RA, RS, SH6 } }, - -{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "slbie", X(31,434), XRTRA_MASK, PPC64, { RB } }, - -{ "ecowx", X(31,438), X_MASK, PPC, { RT, RA, RB } }, - -{ "sthux", X(31,439), X_MASK, COM, { RS, RAS, RB } }, - -{ "sthuxe", X(31,447), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "cctpl", 0x7c210b78, 0xffffffff, CELL, { 0 }}, -{ "cctpm", 0x7c421378, 0xffffffff, CELL, { 0 }}, -{ "cctph", 0x7c631b78, 0xffffffff, CELL, { 0 }}, -{ "db8cyc", 0x7f9ce378, 0xffffffff, CELL, { 0 }}, -{ "db10cyc", 0x7fbdeb78, 0xffffffff, CELL, { 0 }}, -{ "db12cyc", 0x7fdef378, 0xffffffff, CELL, { 0 }}, -{ "db16cyc", 0x7ffffb78, 0xffffffff, CELL, { 0 }}, -{ "mr", XRC(31,444,0), X_MASK, COM, { RA, RS, RBS } }, -{ "or", XRC(31,444,0), X_MASK, COM, { RA, RS, RB } }, -{ "mr.", XRC(31,444,1), X_MASK, COM, { RA, RS, RBS } }, -{ "or.", XRC(31,444,1), X_MASK, COM, { RA, RS, RB } }, - -{ "mtexisr", XSPR(31,451,64), XSPR_MASK, PPC403, { RS } }, -{ "mtexier", XSPR(31,451,66), XSPR_MASK, PPC403, { RS } }, -{ "mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, { RS } }, -{ "mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, { RS } }, -{ "mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, { RS } }, -{ "mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, { RS } }, -{ "mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, { RS } }, -{ "mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, { RS } }, -{ "mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, { RS } }, -{ "mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, { RS } }, -{ "mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, { RS } }, -{ "mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, { RS } }, -{ "mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, { RS } }, -{ "mtdcr", X(31,451), X_MASK, PPC403 | BOOKE, { SPR, RS } }, - -{ "subfze64",XO(31,456,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "subfze64o",XO(31,456,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divdu", XO(31,457,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdu.", XO(31,457,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divduo", XO(31,457,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divduo.", XO(31,457,1,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addze64", XO(31,458,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "addze64o",XO(31,458,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divwu", XO(31,459,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwu.", XO(31,459,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwuo", XO(31,459,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwuo.", XO(31,459,1,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "mtmq", XSPR(31,467,0), XSPR_MASK, M601, { RS } }, -{ "mtxer", XSPR(31,467,1), XSPR_MASK, COM, { RS } }, -{ "mtlr", XSPR(31,467,8), XSPR_MASK, COM, { RS } }, -{ "mtctr", XSPR(31,467,9), XSPR_MASK, COM, { RS } }, -{ "mttid", XSPR(31,467,17), XSPR_MASK, POWER, { RS } }, -{ "mtdsisr", XSPR(31,467,18), XSPR_MASK, COM, { RS } }, -{ "mtdar", XSPR(31,467,19), XSPR_MASK, COM, { RS } }, -{ "mtrtcu", XSPR(31,467,20), XSPR_MASK, COM, { RS } }, -{ "mtrtcl", XSPR(31,467,21), XSPR_MASK, COM, { RS } }, -{ "mtdec", XSPR(31,467,22), XSPR_MASK, COM, { RS } }, -{ "mtsdr0", XSPR(31,467,24), XSPR_MASK, POWER, { RS } }, -{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, COM, { RS } }, -{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, COM, { RS } }, -{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, COM, { RS } }, -{ "mtcfar", XSPR(31,467,28), XSPR_MASK, POWER6, { RS } }, -{ "mtpid", XSPR(31,467,48), XSPR_MASK, BOOKE, { RS } }, -{ "mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, { RS } }, -{ "mtdecar", XSPR(31,467,54), XSPR_MASK, BOOKE, { RS } }, -{ "mtcsrr0", XSPR(31,467,58), XSPR_MASK, BOOKE, { RS } }, -{ "mtcsrr1", XSPR(31,467,59), XSPR_MASK, BOOKE, { RS } }, -{ "mtdear", XSPR(31,467,61), XSPR_MASK, BOOKE, { RS } }, -{ "mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, { RS } }, -{ "mtesr", XSPR(31,467,62), XSPR_MASK, BOOKE, { RS } }, -{ "mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, { RS } }, -{ "mtivpr", XSPR(31,467,63), XSPR_MASK, BOOKE, { RS } }, -{ "mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, { RS } }, -{ "mticr", XSPR(31,467,148), XSPR_MASK, PPC860, { RS } }, -{ "mtder", XSPR(31,467,149), XSPR_MASK, PPC860, { RS } }, -{ "mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, { RS } }, -{ "mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, { RS } }, -{ "mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, { RS } }, -{ "mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, { RS } }, -{ "mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, { RS } }, -{ "mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, { RS } }, -{ "mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, { RS } }, -{ "mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, { RS } }, -{ "mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, { RS } }, -{ "mtsprg", XSPR(31,467,256), XSPRG_MASK,PPC, { SPRG, RS } }, -{ "mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, { RS } }, -{ "mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, { RS } }, -{ "mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, { RS } }, -{ "mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, { RS } }, -{ "mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, { RS } }, -{ "mtear", XSPR(31,467,282), XSPR_MASK, PPC, { RS } }, -{ "mttbl", XSPR(31,467,284), XSPR_MASK, PPC, { RS } }, -{ "mttbu", XSPR(31,467,285), XSPR_MASK, PPC, { RS } }, -{ "mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, { RS } }, -{ "mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, { RS } }, -{ "mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, { RS } }, -{ "mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, { RS } }, -{ "mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, { RS } }, -{ "mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, { RS } }, -{ "mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, { RS } }, -{ "mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, { RS } }, -{ "mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, { RS } }, -{ "mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, { RS } }, -{ "mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, { RS } }, -{ "mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, { RS } }, -{ "mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, { RS } }, -{ "mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, { RS } }, -{ "mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, { RS } }, -{ "mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, { RS } }, -{ "mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, { RS } }, -{ "mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, { RS } }, -{ "mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, { RS } }, -{ "mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, { RS } }, -{ "mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, { RS } }, -{ "mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, { RS } }, -{ "mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, { RS } }, -{ "mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, { RS } }, -{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, { RS } }, -{ "mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, { RS } }, -{ "mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, { RS } }, -{ "mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, { RS } }, -{ "mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405, { RS } }, -{ "mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, { RS } }, -{ "mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, { RS } }, -{ "mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, { RS } }, -{ "mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, { RS } }, -{ "mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, { RS } }, -{ "mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, { RS } }, -{ "mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, { RS } }, -{ "mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, { RS } }, -{ "mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, { RS } }, -{ "mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, { RS } }, -{ "mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, { RS } }, -{ "mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, { RS } }, -{ "mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, { RS } }, -{ "mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, { RS } }, -{ "mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, { RS } }, -{ "mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, { RS } }, -{ "mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, { RS } }, -{ "mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, { RS } }, -{ "mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, { RS } }, -{ "mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, { RS } }, -{ "mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, { RS } }, -{ "mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, { RS } }, -{ "mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, { RS } }, -{ "mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, { RS } }, -{ "mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, { RS } }, -{ "mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, { RS } }, -{ "mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, { RS } }, -{ "mtspr", X(31,467), X_MASK, COM, { SPR, RS } }, - -{ "dcbi", X(31,470), XRT_MASK, PPC, { RA, RB } }, - -{ "nand", XRC(31,476,0), X_MASK, COM, { RA, RS, RB } }, -{ "nand.", XRC(31,476,1), X_MASK, COM, { RA, RS, RB } }, - -{ "dcbie", X(31,478), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "dcread", X(31,486), X_MASK, PPC403|PPC440, { RT, RA, RB }}, - -{ "mtpmr", X(31,462), X_MASK, PPCPMR, { PMR, RS }}, - -{ "icbtls", X(31,486), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "nabs", XO(31,488,0,0), XORB_MASK, M601, { RT, RA } }, -{ "subfme64",XO(31,488,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "nabs.", XO(31,488,0,1), XORB_MASK, M601, { RT, RA } }, -{ "nabso", XO(31,488,1,0), XORB_MASK, M601, { RT, RA } }, -{ "subfme64o",XO(31,488,1,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "nabso.", XO(31,488,1,1), XORB_MASK, M601, { RT, RA } }, - -{ "divd", XO(31,489,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divd.", XO(31,489,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdo", XO(31,489,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdo.", XO(31,489,1,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addme64", XO(31,490,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "addme64o",XO(31,490,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divw", XO(31,491,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divw.", XO(31,491,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwo", XO(31,491,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwo.", XO(31,491,1,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "icbtlse", X(31,494), X_MASK, PPCCHLK64, { CT, RA, RB }}, - -{ "slbia", X(31,498), 0xffffffff, PPC64, { 0 } }, - -{ "cli", X(31,502), XRB_MASK, POWER, { RT, RA } }, - -{ "stdcxe.", XRC(31,511,1), X_MASK, BOOKE64, { RS, RA, RB } }, - -{ "mcrxr", X(31,512), XRARB_MASK|(3<<21), COM, { BF } }, - -{ "bblels", X(31,518), X_MASK, PPCBRLK, { 0 }}, -{ "mcrxr64", X(31,544), XRARB_MASK|(3<<21), BOOKE64, { BF } }, - -{ "clcs", X(31,531), XRB_MASK, M601, { RT, RA } }, - -{ "ldbrx", X(31,532), X_MASK, CELL, { RT, RA0, RB } }, - -{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lsx", X(31,533), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lbrx", X(31,534), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA0, RB } }, - -{ "srw", XRC(31,536,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sr", XRC(31,536,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "srw.", XRC(31,536,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sr.", XRC(31,536,1), X_MASK, PWRCOM, { RA, RS, RB } }, - -{ "rrib", XRC(31,537,0), X_MASK, M601, { RA, RS, RB } }, -{ "rrib.", XRC(31,537,1), X_MASK, M601, { RA, RS, RB } }, - -{ "srd", XRC(31,539,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "srd.", XRC(31,539,1), X_MASK, PPC64, { RA, RS, RB } }, - -{ "maskir", XRC(31,541,0), X_MASK, M601, { RA, RS, RB } }, -{ "maskir.", XRC(31,541,1), X_MASK, M601, { RA, RS, RB } }, - -{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA0, RB } }, - -{ "bbelr", X(31,550), X_MASK, PPCBRLK, { 0 }}, - -{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } }, - -{ "lfsux", X(31,567), X_MASK, COM, { FRT, RAS, RB } }, - -{ "lfsuxe", X(31,575), X_MASK, BOOKE64, { FRT, RAS, RB } }, - -{ "mfsr", X(31,595), XRB_MASK|(1<<20), COM32, { RT, SR } }, - -{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA0, NB } }, -{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA0, NB } }, - -{ "lwsync", XSYNC(31,598,1), 0xffffffff, PPC, { 0 } }, -{ "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, { 0 } }, -{ "msync", X(31,598), 0xffffffff, BOOKE, { 0 } }, -{ "sync", X(31,598), XSYNC_MASK, PPCCOM, { LS } }, -{ "dcs", X(31,598), 0xffffffff, PWRCOM, { 0 } }, - -{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA0, RB } }, - -{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA0, RB } }, - -{ "mffgpr", XRC(31,607,0), XRA_MASK, POWER6, { FRT, RB } }, - -{ "mfsri", X(31,627), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "dclst", X(31,630), XRB_MASK, PWRCOM, { RS, RA } }, - -{ "lfdux", X(31,631), X_MASK, COM, { FRT, RAS, RB } }, - -{ "lfduxe", X(31,639), X_MASK, BOOKE64, { FRT, RAS, RB } }, - -{ "mfsrin", X(31,659), XRA_MASK, PPC32, { RT, RB } }, - -{ "stdbrx", X(31,660), X_MASK, CELL, { RS, RA0, RB } }, - -{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA0, RB } }, - -{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA0, RB } }, - -{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA0, RB } }, - -{ "srq", XRC(31,664,0), X_MASK, M601, { RA, RS, RB } }, -{ "srq.", XRC(31,664,1), X_MASK, M601, { RA, RS, RB } }, - -{ "sre", XRC(31,665,0), X_MASK, M601, { RA, RS, RB } }, -{ "sre.", XRC(31,665,1), X_MASK, M601, { RA, RS, RB } }, - -{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA0, RB } }, - -{ "stfsux", X(31,695), X_MASK, COM, { FRS, RAS, RB } }, - -{ "sriq", XRC(31,696,0), X_MASK, M601, { RA, RS, SH } }, -{ "sriq.", XRC(31,696,1), X_MASK, M601, { RA, RS, SH } }, - -{ "stfsuxe", X(31,703), X_MASK, BOOKE64, { FRS, RAS, RB } }, - -{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA0, NB } }, -{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA0, NB } }, - -{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA0, RB } }, - -{ "srlq", XRC(31,728,0), X_MASK, M601, { RA, RS, RB } }, -{ "srlq.", XRC(31,728,1), X_MASK, M601, { RA, RS, RB } }, - -{ "sreq", XRC(31,729,0), X_MASK, M601, { RA, RS, RB } }, -{ "sreq.", XRC(31,729,1), X_MASK, M601, { RA, RS, RB } }, - -{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA0, RB } }, - -{ "mftgpr", XRC(31,735,0), XRA_MASK, POWER6, { RT, FRB } }, - -{ "dcba", X(31,758), XRT_MASK, PPC405 | BOOKE, { RA, RB } }, - -{ "stfdux", X(31,759), X_MASK, COM, { FRS, RAS, RB } }, - -{ "srliq", XRC(31,760,0), X_MASK, M601, { RA, RS, SH } }, -{ "srliq.", XRC(31,760,1), X_MASK, M601, { RA, RS, SH } }, - -{ "dcbae", X(31,766), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "stfduxe", X(31,767), X_MASK, BOOKE64, { FRS, RAS, RB } }, - -{ "tlbivax", X(31,786), XRT_MASK, BOOKE, { RA, RB } }, -{ "tlbivaxe",X(31,787), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "lwzcix", X(31,789), X_MASK, POWER6, { RT, RA0, RB } }, - -{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA0, RB } }, - -{ "sraw", XRC(31,792,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sra", XRC(31,792,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "sraw.", XRC(31,792,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sra.", XRC(31,792,1), X_MASK, PWRCOM, { RA, RS, RB } }, - -{ "srad", XRC(31,794,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "srad.", XRC(31,794,1), X_MASK, PPC64, { RA, RS, RB } }, - -{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA0, RB } }, -{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "rac", X(31,818), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "lhzcix", X(31,821), X_MASK, POWER6, { RT, RA0, RB } }, - -{ "dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, { STRM } }, -{ "dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, { 0 } }, - -{ "srawi", XRC(31,824,0), X_MASK, PPCCOM, { RA, RS, SH } }, -{ "srai", XRC(31,824,0), X_MASK, PWRCOM, { RA, RS, SH } }, -{ "srawi.", XRC(31,824,1), X_MASK, PPCCOM, { RA, RS, SH } }, -{ "srai.", XRC(31,824,1), X_MASK, PWRCOM, { RA, RS, SH } }, - -{ "slbmfev", X(31,851), XRA_MASK, PPC64, { RT, RB } }, - -{ "lbzcix", X(31,853), X_MASK, POWER6, { RT, RA0, RB } }, - -{ "mbar", X(31,854), X_MASK, BOOKE, { MO } }, -{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } }, - -{ "lfiwax", X(31,855), X_MASK, POWER6, { FRT, RA0, RB } }, - -{ "ldcix", X(31,885), X_MASK, POWER6, { RT, RA0, RB } }, - -{ "tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE, { RTO, RA, RB } }, -{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE, { RTO, RA, RB } }, -{ "tlbsxe", XRC(31,915,0), X_MASK, BOOKE64, { RTO, RA, RB } }, -{ "tlbsxe.", XRC(31,915,1), X_MASK, BOOKE64, { RTO, RA, RB } }, - -{ "slbmfee", X(31,915), XRA_MASK, PPC64, { RT, RB } }, - -{ "stwcix", X(31,917), X_MASK, POWER6, { RS, RA0, RB } }, - -{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA0, RB } }, - -{ "sraq", XRC(31,920,0), X_MASK, M601, { RA, RS, RB } }, -{ "sraq.", XRC(31,920,1), X_MASK, M601, { RA, RS, RB } }, - -{ "srea", XRC(31,921,0), X_MASK, M601, { RA, RS, RB } }, -{ "srea.", XRC(31,921,1), X_MASK, M601, { RA, RS, RB } }, - -{ "extsh", XRC(31,922,0), XRB_MASK, PPCCOM, { RA, RS } }, -{ "exts", XRC(31,922,0), XRB_MASK, PWRCOM, { RA, RS } }, -{ "extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, { RA, RS } }, -{ "exts.", XRC(31,922,1), XRB_MASK, PWRCOM, { RA, RS } }, - -{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbre", X(31,946), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } }, - -{ "sthcix", X(31,949), X_MASK, POWER6, { RS, RA0, RB } }, - -{ "sraiq", XRC(31,952,0), X_MASK, M601, { RA, RS, SH } }, -{ "sraiq.", XRC(31,952,1), X_MASK, M601, { RA, RS, SH } }, - -{ "extsb", XRC(31,954,0), XRB_MASK, PPC, { RA, RS} }, -{ "extsb.", XRC(31,954,1), XRB_MASK, PPC, { RA, RS} }, - -{ "stduxe", X(31,959), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "iccci", X(31,966), XRT_MASK, PPC403|PPC440, { RA, RB } }, - -{ "tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbwe", X(31,978), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } }, -{ "tlbld", X(31,978), XRTRA_MASK, PPC, { RB } }, - -{ "stbcix", X(31,981), X_MASK, POWER6, { RS, RA0, RB } }, - -{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } }, - -{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA0, RB } }, - -{ "extsw", XRC(31,986,0), XRB_MASK, PPC64 | BOOKE64,{ RA, RS } }, -{ "extsw.", XRC(31,986,1), XRB_MASK, PPC64, { RA, RS } }, - -{ "icread", X(31,998), XRT_MASK, PPC403|PPC440, { RA, RB } }, - -{ "icbie", X(31,990), XRT_MASK, BOOKE64, { RA, RB } }, -{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA0, RB } }, - -{ "tlbli", X(31,1010), XRTRA_MASK, PPC, { RB } }, - -{ "stdcix", X(31,1013), X_MASK, POWER6, { RS, RA0, RB } }, - -{ "dcbzl", XOPL(31,1014,1), XRT_MASK,POWER4, { RA, RB } }, -{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } }, -{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } }, - -{ "dcbze", X(31,1022), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "lvebx", X(31, 7), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvehx", X(31, 39), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvewx", X(31, 71), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvsl", X(31, 6), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvsr", X(31, 38), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvx", X(31, 103), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvxl", X(31, 359), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "stvebx", X(31, 135), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvehx", X(31, 167), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvewx", X(31, 199), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvx", X(31, 231), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvxl", X(31, 487), X_MASK, PPCVEC, { VS, RA, RB } }, - -/* New load/store left/right index vector instructions that are in the Cell only. */ -{ "lvlx", X(31, 519), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvlxl", X(31, 775), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvrx", X(31, 551), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvrxl", X(31, 807), X_MASK, CELL, { VD, RA0, RB } }, -{ "stvlx", X(31, 647), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvlxl", X(31, 903), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvrx", X(31, 679), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvrxl", X(31, 935), X_MASK, CELL, { VS, RA0, RB } }, - -{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA0 } }, -{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA0 } }, - -{ "lwzu", OP(33), OP_MASK, PPCCOM, { RT, D, RAL } }, -{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA0 } }, - -{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA0 } }, - -{ "lbzu", OP(35), OP_MASK, COM, { RT, D, RAL } }, - -{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA0 } }, -{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA0 } }, - -{ "stwu", OP(37), OP_MASK, PPCCOM, { RS, D, RAS } }, -{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA0 } }, - -{ "stb", OP(38), OP_MASK, COM, { RS, D, RA0 } }, - -{ "stbu", OP(39), OP_MASK, COM, { RS, D, RAS } }, - -{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA0 } }, - -{ "lhzu", OP(41), OP_MASK, COM, { RT, D, RAL } }, - -{ "lha", OP(42), OP_MASK, COM, { RT, D, RA0 } }, - -{ "lhau", OP(43), OP_MASK, COM, { RT, D, RAL } }, - -{ "sth", OP(44), OP_MASK, COM, { RS, D, RA0 } }, - -{ "sthu", OP(45), OP_MASK, COM, { RS, D, RAS } }, - -{ "lmw", OP(46), OP_MASK, PPCCOM, { RT, D, RAM } }, -{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA0 } }, - -{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA0 } }, -{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA0 } }, - -{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA0 } }, - -{ "lfsu", OP(49), OP_MASK, COM, { FRT, D, RAS } }, - -{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA0 } }, - -{ "lfdu", OP(51), OP_MASK, COM, { FRT, D, RAS } }, - -{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA0 } }, - -{ "stfsu", OP(53), OP_MASK, COM, { FRS, D, RAS } }, - -{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA0 } }, - -{ "stfdu", OP(55), OP_MASK, COM, { FRS, D, RAS } }, - -{ "lq", OP(56), OP_MASK, POWER4, { RTQ, DQ, RAQ } }, - -{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA0 } }, - -{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA0 } }, - -{ "lfdp", OP(57), OP_MASK, POWER6, { FRT, D, RA0 } }, - -{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lbzue", DEO(58,1), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lhzue", DEO(58,3), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lhaue", DEO(58,5), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lwzue", DEO(58,7), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "stbue", DEO(58,9), DE_MASK, BOOKE64, { RS, DE, RAS } }, -{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "sthue", DEO(58,11), DE_MASK, BOOKE64, { RS, DE, RAS } }, -{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "stwue", DEO(58,15), DE_MASK, BOOKE64, { RS, DE, RAS } }, - -{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA0 } }, - -{ "ldu", DSO(58,1), DS_MASK, PPC64, { RT, DS, RAL } }, - -{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA0 } }, - -{ "dadd", XRC(59,2,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dadd.", XRC(59,2,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "dqua", ZRC(59,3,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "dqua.", ZRC(59,3,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, - -{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, - -{ "fsubs", A(59,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fsubs.", A(59,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, - -{ "fadds", A(59,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fadds.", A(59,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, - -{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } }, -{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } }, - -{ "fres", A(59,24,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, -{ "fres.", A(59,24,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, - -{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } }, -{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } }, - -{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } }, -{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } }, - -{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, - -{ "fmadds", A(59,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fmadds.", A(59,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, - -{ "fnmsubs", A(59,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fnmsubs.",A(59,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, - -{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, - -{ "dmul", XRC(59,34,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dmul.", XRC(59,34,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "drrnd", ZRC(59,35,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, - -{ "dscli", ZRC(59,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscli.", ZRC(59,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, - -{ "dquai", ZRC(59,67,0), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } }, -{ "dquai.", ZRC(59,67,1), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } }, - -{ "dscri", ZRC(59,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscri.", ZRC(59,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, - -{ "drintx", ZRC(59,99,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintx.", ZRC(59,99,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, - -{ "dcmpo", X(59,130), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "dtstex", X(59,162), X_MASK, POWER6, { BF, FRA, FRB } }, -{ "dtstdc", Z(59,194), Z_MASK, POWER6, { BF, FRA, DCM } }, -{ "dtstdg", Z(59,226), Z_MASK, POWER6, { BF, FRA, DGM } }, - -{ "drintn", ZRC(59,227,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintn.", ZRC(59,227,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, - -{ "dctdp", XRC(59,258,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctdp.", XRC(59,258,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "dctfix", XRC(59,290,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctfix.", XRC(59,290,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "ddedpd", XRC(59,322,0), X_MASK, POWER6, { SP, FRT, FRB } }, -{ "ddedpd.", XRC(59,322,1), X_MASK, POWER6, { SP, FRT, FRB } }, - -{ "dxex", XRC(59,354,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dxex.", XRC(59,354,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "dsub", XRC(59,514,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dsub.", XRC(59,514,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "ddiv", XRC(59,546,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "ddiv.", XRC(59,546,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "dcmpu", X(59,642), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "dtstsf", X(59,674), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "drsp", XRC(59,770,0), X_MASK, POWER6, { FRT, FRB } }, -{ "drsp.", XRC(59,770,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "dcffix", XRC(59,802,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dcffix.", XRC(59,802,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "denbcd", XRC(59,834,0), X_MASK, POWER6, { S, FRT, FRB } }, -{ "denbcd.", XRC(59,834,1), X_MASK, POWER6, { S, FRT, FRB } }, - -{ "diex", XRC(59,866,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "diex.", XRC(59,866,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } }, - -{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } }, - -{ "stfdp", OP(61), OP_MASK, POWER6, { FRT, D, RA0 } }, - -{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA0 } }, -{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA0 } }, -{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA0 } }, -{ "lfsue", DEO(62,5), DE_MASK, BOOKE64, { FRT, DES, RAS } }, -{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA0 } }, -{ "lfdue", DEO(62,7), DE_MASK, BOOKE64, { FRT, DES, RAS } }, -{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA0 } }, -{ "stdue", DEO(62,9), DE_MASK, BOOKE64, { RS, DES, RAS } }, -{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA0 } }, -{ "stfsue", DEO(62,13), DE_MASK, BOOKE64, { FRS, DES, RAS } }, -{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA0 } }, -{ "stfdue", DEO(62,15), DE_MASK, BOOKE64, { FRS, DES, RAS } }, - -{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA0 } }, - -{ "stdu", DSO(62,1), DS_MASK, PPC64, { RS, DS, RAS } }, - -{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA0 } }, - -{ "fcmpu", X(63,0), X_MASK|(3<<21), COM, { BF, FRA, FRB } }, - -{ "daddq", XRC(63,2,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "daddq.", XRC(63,2,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "dquaq", ZRC(63,3,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, - -{ "fcpsgn", XRC(63,8,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "fcpsgn.", XRC(63,8,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "frsp", XRC(63,12,0), XRA_MASK, COM, { FRT, FRB } }, -{ "frsp.", XRC(63,12,1), XRA_MASK, COM, { FRT, FRB } }, - -{ "fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcir", XRC(63,14,0), XRA_MASK, POWER2, { FRT, FRB } }, -{ "fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcir.", XRC(63,14,1), XRA_MASK, POWER2, { FRT, FRB } }, - -{ "fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcirz", XRC(63,15,0), XRA_MASK, POWER2, { FRT, FRB } }, -{ "fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcirz.", XRC(63,15,1), XRA_MASK, POWER2, { FRT, FRB } }, - -{ "fdiv", A(63,18,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fd", A(63,18,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fd.", A(63,18,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, - -{ "fsub", A(63,20,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fs", A(63,20,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fsub.", A(63,20,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fs.", A(63,20,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, - -{ "fadd", A(63,21,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fa", A(63,21,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fadd.", A(63,21,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fa.", A(63,21,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, - -{ "fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } }, -{ "fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } }, - -{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, - -{ "fre", A(63,24,0), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } }, -{ "fre.", A(63,24,1), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } }, - -{ "fmul", A(63,25,0), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } }, -{ "fm", A(63,25,0), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } }, -{ "fmul.", A(63,25,1), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } }, -{ "fm.", A(63,25,1), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } }, - -{ "frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, -{ "frsqrte.",A(63,26,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, - -{ "fmsub", A(63,28,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fms", A(63,28,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fmsub.", A(63,28,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fms.", A(63,28,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, - -{ "fmadd", A(63,29,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fma", A(63,29,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fmadd.", A(63,29,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fma.", A(63,29,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, - -{ "fnmsub", A(63,30,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnms", A(63,30,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fnmsub.", A(63,30,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnms.", A(63,30,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, - -{ "fnmadd", A(63,31,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnma", A(63,31,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fnmadd.", A(63,31,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnma.", A(63,31,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, - -{ "fcmpo", X(63,32), X_MASK|(3<<21), COM, { BF, FRA, FRB } }, - -{ "dmulq", XRC(63,34,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dmulq.", XRC(63,34,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "drrndq", ZRC(63,35,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, - -{ "mtfsb1", XRC(63,38,0), XRARB_MASK, COM, { BT } }, -{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, { BT } }, - -{ "fneg", XRC(63,40,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fneg.", XRC(63,40,1), XRA_MASK, COM, { FRT, FRB } }, - -{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, { BF, BFA } }, - -{ "dscliq", ZRC(63,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, - -{ "dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } }, -{ "dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } }, - -{ "mtfsb0", XRC(63,70,0), XRARB_MASK, COM, { BT } }, -{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, { BT } }, - -{ "fmr", XRC(63,72,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fmr.", XRC(63,72,1), XRA_MASK, COM, { FRT, FRB } }, - -{ "dscriq", ZRC(63,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, - -{ "drintxq", ZRC(63,99,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintxq.",ZRC(63,99,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, - -{ "dcmpoq", X(63,130), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "mtfsfi", XRC(63,134,0), XWRA_MASK|(3<<21)|(1<<11), COM, { BFF, U, W } }, -{ "mtfsfi.", XRC(63,134,1), XWRA_MASK|(3<<21)|(1<<11), COM, { BFF, U, W } }, - -{ "fnabs", XRC(63,136,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fnabs.", XRC(63,136,1), XRA_MASK, COM, { FRT, FRB } }, - -{ "dtstexq", X(63,162), X_MASK, POWER6, { BF, FRA, FRB } }, -{ "dtstdcq", Z(63,194), Z_MASK, POWER6, { BF, FRA, DCM } }, -{ "dtstdgq", Z(63,226), Z_MASK, POWER6, { BF, FRA, DGM } }, - -{ "drintnq", ZRC(63,227,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintnq.",ZRC(63,227,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } }, - -{ "dctqpq", XRC(63,258,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctqpq.", XRC(63,258,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "fabs", XRC(63,264,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fabs.", XRC(63,264,1), XRA_MASK, COM, { FRT, FRB } }, - -{ "dctfixq", XRC(63,290,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctfixq.",XRC(63,290,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "ddedpdq", XRC(63,322,0), X_MASK, POWER6, { SP, FRT, FRB } }, -{ "ddedpdq.",XRC(63,322,1), X_MASK, POWER6, { SP, FRT, FRB } }, - -{ "dxexq", XRC(63,354,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dxexq.", XRC(63,354,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "frin", XRC(63,392,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frin.", XRC(63,392,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "friz", XRC(63,424,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "friz.", XRC(63,424,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frip", XRC(63,456,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frip.", XRC(63,456,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frim", XRC(63,488,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frim.", XRC(63,488,1), XRA_MASK, POWER5, { FRT, FRB } }, - -{ "dsubq", XRC(63,514,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dsubq.", XRC(63,514,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -{ "mffsl", XRA(63,583,12), XRARB_MASK, POWER9, { FRT } }, - -{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } }, -{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } }, - -{ "dcmpuq", X(63,642), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "dtstsfq", X(63,674), X_MASK, POWER6, { BF, FRA, FRB } }, - -{ "mtfsf", XFL(63,711,0), XFL_MASK, COM, { FLM, FRB, XFL_L, W } }, -{ "mtfsf.", XFL(63,711,1), XFL_MASK, COM, { FLM, FRB, XFL_L, W } }, - -{ "drdpq", XRC(63,770,0), X_MASK, POWER6, { FRT, FRB } }, -{ "drdpq.", XRC(63,770,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "dcffixq", XRC(63,802,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dcffixq.",XRC(63,802,1), X_MASK, POWER6, { FRT, FRB } }, - -{ "fctid", XRC(63,814,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fctid.", XRC(63,814,1), XRA_MASK, PPC64, { FRT, FRB } }, - -{ "fctidz", XRC(63,815,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC64, { FRT, FRB } }, - -{ "denbcdq", XRC(63,834,0), X_MASK, POWER6, { S, FRT, FRB } }, -{ "denbcdq.",XRC(63,834,1), X_MASK, POWER6, { S, FRT, FRB } }, - -{ "fcfid", XRC(63,846,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC64, { FRT, FRB } }, - -{ "diexq", XRC(63,866,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "diexq.", XRC(63,866,1), X_MASK, POWER6, { FRT, FRA, FRB } }, - -}; - -const int powerpc_num_opcodes = - sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]); - -/* The macro table. This is only used by the assembler. */ - -/* The expressions of the form (-x ! 31) & (x | 31) have the value 0 - when x=0; 32-x when x is between 1 and 31; are negative if x is - negative; and are 32 or more otherwise. This is what you want - when, for instance, you are emulating a right shift by a - rotate-left-and-mask, because the underlying instructions support - shifts of size 0 but not shifts of size 32. By comparison, when - extracting x bits from some word you want to use just 32-x, because - the underlying instructions don't support extracting 0 bits but do - support extracting the whole word (32 bits in this case). */ - -const struct powerpc_macro powerpc_macros[] = { -{ "extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1" }, -{ "extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1" }, -{ "extrdi", 4, PPC64, "rldicl %0,%1,(%2)+(%3),64-(%2)" }, -{ "extrdi.", 4, PPC64, "rldicl. %0,%1,(%2)+(%3),64-(%2)" }, -{ "insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3" }, -{ "insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3" }, -{ "rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0" }, -{ "rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0" }, -{ "sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)" }, -{ "sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)" }, -{ "srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2" }, -{ "srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2" }, -{ "clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)" }, -{ "clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)" }, -{ "clrlsldi",4, PPC64, "rldic %0,%1,%3,(%2)-(%3)" }, -{ "clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)" }, - -{ "extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1" }, -{ "extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1" }, -{ "extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" }, -{ "extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" }, -{ "inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1" }, -{ "inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, -{ "insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1" }, -{ "insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, -{ "rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31" }, -{ "rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31" }, -{ "slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)" }, -{ "sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)" }, -{ "slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)" }, -{ "sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)" }, -{ "srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)" }, -{ "clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)" }, -{ "clrlslwi",4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)" }, -{ "clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)" }, -}; - -const int powerpc_num_macros = - sizeof (powerpc_macros) / sizeof (powerpc_macros[0]); - - -/* This file provides several disassembler functions, all of which use - the disassembler interface defined in dis-asm.h. Several functions - are provided because this file handles disassembly for the PowerPC - in both big and little endian mode and also for the POWER (RS/6000) - chip. */ - -static int print_insn_powerpc (bfd_vma, struct disassemble_info *, int, int); - -/* Determine which set of machines to disassemble for. PPC403/601 or - BookE. For convenience, also disassemble instructions supported - by the AltiVec vector unit. */ - -static int -powerpc_dialect (struct disassemble_info *info) -{ - int dialect = PPC_OPCODE_PPC; - - if (BFD_DEFAULT_TARGET_SIZE == 64) - dialect |= PPC_OPCODE_64; - - if (info->disassembler_options - && strstr (info->disassembler_options, "booke") != NULL) - dialect |= PPC_OPCODE_BOOKE | PPC_OPCODE_BOOKE64; - else if ((info->mach == bfd_mach_ppc_e500) - || (info->disassembler_options - && strstr (info->disassembler_options, "e500") != NULL)) - dialect |= (PPC_OPCODE_BOOKE - | PPC_OPCODE_SPE | PPC_OPCODE_ISEL - | PPC_OPCODE_EFS | PPC_OPCODE_BRLOCK - | PPC_OPCODE_PMR | PPC_OPCODE_CACHELCK - | PPC_OPCODE_RFMCI); - else if (info->disassembler_options - && strstr (info->disassembler_options, "efs") != NULL) - dialect |= PPC_OPCODE_EFS; - else if (info->disassembler_options - && strstr (info->disassembler_options, "e300") != NULL) - dialect |= PPC_OPCODE_E300 | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON; - else if (info->disassembler_options - && strstr (info->disassembler_options, "440") != NULL) - dialect |= PPC_OPCODE_BOOKE | PPC_OPCODE_32 - | PPC_OPCODE_440 | PPC_OPCODE_ISEL | PPC_OPCODE_RFMCI; - else - dialect |= (PPC_OPCODE_403 | PPC_OPCODE_601 | PPC_OPCODE_CLASSIC - | PPC_OPCODE_COMMON | PPC_OPCODE_ALTIVEC); - - if (info->disassembler_options - && strstr (info->disassembler_options, "power4") != NULL) - dialect |= PPC_OPCODE_POWER4; - - if (info->disassembler_options - && strstr (info->disassembler_options, "power5") != NULL) - dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_POWER5; - - if (info->disassembler_options - && strstr (info->disassembler_options, "cell") != NULL) - dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC; - - if (info->disassembler_options - && strstr (info->disassembler_options, "power6") != NULL) - dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC; - - if (info->disassembler_options - && strstr (info->disassembler_options, "any") != NULL) - dialect |= PPC_OPCODE_ANY; - - if (info->disassembler_options) - { - if (strstr (info->disassembler_options, "32") != NULL) - dialect &= ~PPC_OPCODE_64; - else if (strstr (info->disassembler_options, "64") != NULL) - dialect |= PPC_OPCODE_64; - } - - info->private_data = (char *) 0 + dialect; - return dialect; -} - -/* QEMU default */ -int -print_insn_ppc (bfd_vma memaddr, struct disassemble_info *info) -{ - int dialect = (char *) info->private_data - (char *) 0; - return print_insn_powerpc (memaddr, info, info->endian == BFD_ENDIAN_BIG, - dialect); -} - -/* Print a big endian PowerPC instruction. */ - -int -print_insn_big_powerpc (bfd_vma memaddr, struct disassemble_info *info) -{ - int dialect = (char *) info->private_data - (char *) 0; - return print_insn_powerpc (memaddr, info, 1, dialect); -} - -/* Print a little endian PowerPC instruction. */ - -int -print_insn_little_powerpc (bfd_vma memaddr, struct disassemble_info *info) -{ - int dialect = (char *) info->private_data - (char *) 0; - return print_insn_powerpc (memaddr, info, 0, dialect); -} - -/* Print a POWER (RS/6000) instruction. */ - -int -print_insn_rs6000 (bfd_vma memaddr, struct disassemble_info *info) -{ - return print_insn_powerpc (memaddr, info, 1, PPC_OPCODE_POWER); -} - -/* Extract the operand value from the PowerPC or POWER instruction. */ - -static long -operand_value_powerpc (const struct powerpc_operand *operand, - unsigned long insn, int dialect) -{ - long value; - int invalid; - /* Extract the value from the instruction. */ - if (operand->extract) - value = (*operand->extract) (insn, dialect, &invalid); - else - { - value = (insn >> operand->shift) & operand->bitm; - if ((operand->flags & PPC_OPERAND_SIGNED) != 0) - { - /* BITM is always some number of zeros followed by some - number of ones, followed by some number of zeros. */ - unsigned long top = operand->bitm; - /* top & -top gives the rightmost 1 bit, so this - fills in any trailing zeros. */ - top |= (top & -top) - 1; - top &= ~(top >> 1); - value = (value ^ top) - top; - } - } - - return value; -} - -/* Determine whether the optional operand(s) should be printed. */ - -static int -skip_optional_operands (const unsigned char *opindex, - unsigned long insn, int dialect) -{ - const struct powerpc_operand *operand; - - for (; *opindex != 0; opindex++) - { - operand = &powerpc_operands[*opindex]; - if ((operand->flags & PPC_OPERAND_NEXT) != 0 - || ((operand->flags & PPC_OPERAND_OPTIONAL) != 0 - && operand_value_powerpc (operand, insn, dialect) != 0)) - return 0; - } - - return 1; -} - -/* Print a PowerPC or POWER instruction. */ - -static int -print_insn_powerpc (bfd_vma memaddr, - struct disassemble_info *info, - int bigendian, - int dialect) -{ - bfd_byte buffer[4]; - int status; - unsigned long insn; - const struct powerpc_opcode *opcode; - const struct powerpc_opcode *opcode_end; - unsigned long op; - - if (dialect == 0) - dialect = powerpc_dialect (info); - - status = (*info->read_memory_func) (memaddr, buffer, 4, info); - if (status != 0) - { - (*info->memory_error_func) (status, memaddr, info); - return -1; - } - - if (bigendian) - insn = bfd_getb32 (buffer); - else - insn = bfd_getl32 (buffer); - - /* Get the major opcode of the instruction. */ - op = PPC_OP (insn); - - /* Find the first match in the opcode table. We could speed this up - a bit by doing a binary search on the major opcode. */ - opcode_end = powerpc_opcodes + powerpc_num_opcodes; - again: - for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++) - { - unsigned long table_op; - const unsigned char *opindex; - const struct powerpc_operand *operand; - int invalid; - int need_comma; - int need_paren; - int skip_optional; - - table_op = PPC_OP (opcode->opcode); - if (op < table_op) - break; - if (op > table_op) - continue; - - if ((insn & opcode->mask) != opcode->opcode - || (opcode->flags & dialect) == 0) - continue; - - /* Make two passes over the operands. First see if any of them - have extraction functions, and, if they do, make sure the - instruction is valid. */ - invalid = 0; - for (opindex = opcode->operands; *opindex != 0; opindex++) - { - operand = powerpc_operands + *opindex; - if (operand->extract) - (*operand->extract) (insn, dialect, &invalid); - } - if (invalid) - continue; - - /* The instruction is valid. */ - if (opcode->operands[0] != 0) - (*info->fprintf_func) (info->stream, "%-7s ", opcode->name); - else - (*info->fprintf_func) (info->stream, "%s", opcode->name); - - /* Now extract and print the operands. */ - need_comma = 0; - need_paren = 0; - skip_optional = -1; - for (opindex = opcode->operands; *opindex != 0; opindex++) - { - long value; - - operand = powerpc_operands + *opindex; - - /* Operands that are marked FAKE are simply ignored. We - already made sure that the extract function considered - the instruction to be valid. */ - if ((operand->flags & PPC_OPERAND_FAKE) != 0) - continue; - - /* If all of the optional operands have the value zero, - then don't print any of them. */ - if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0) - { - if (skip_optional < 0) - skip_optional = skip_optional_operands (opindex, insn, - dialect); - if (skip_optional) - continue; - } - - value = operand_value_powerpc (operand, insn, dialect); - - if (need_comma) - { - (*info->fprintf_func) (info->stream, ","); - need_comma = 0; - } - - /* Print the operand as directed by the flags. */ - if ((operand->flags & PPC_OPERAND_GPR) != 0 - || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0)) - (*info->fprintf_func) (info->stream, "r%ld", value); - else if ((operand->flags & PPC_OPERAND_FPR) != 0) - (*info->fprintf_func) (info->stream, "f%ld", value); - else if ((operand->flags & PPC_OPERAND_VR) != 0) - (*info->fprintf_func) (info->stream, "v%ld", value); - else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0) - (*info->print_address_func) (memaddr + value, info); - else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0) - (*info->print_address_func) ((bfd_vma) value & 0xffffffff, info); - else if ((operand->flags & PPC_OPERAND_CR) == 0 - || (dialect & PPC_OPCODE_PPC) == 0) - (*info->fprintf_func) (info->stream, "%ld", value); - else - { - if (operand->bitm == 7) - (*info->fprintf_func) (info->stream, "cr%ld", value); - else - { - static const char *cbnames[4] = { "lt", "gt", "eq", "so" }; - int cr; - int cc; - - cr = value >> 2; - if (cr != 0) - (*info->fprintf_func) (info->stream, "4*cr%d+", cr); - cc = value & 3; - (*info->fprintf_func) (info->stream, "%s", cbnames[cc]); - } - } - - if (need_paren) - { - (*info->fprintf_func) (info->stream, ")"); - need_paren = 0; - } - - if ((operand->flags & PPC_OPERAND_PARENS) == 0) - need_comma = 1; - else - { - (*info->fprintf_func) (info->stream, "("); - need_paren = 1; - } - } - - /* We have found and printed an instruction; return. */ - return 4; - } - - if ((dialect & PPC_OPCODE_ANY) != 0) - { - dialect = ~PPC_OPCODE_ANY; - goto again; - } - - /* We could not find a match. */ - (*info->fprintf_func) (info->stream, ".long 0x%lx", insn); - - return 4; -} diff --git a/disas/riscv.c b/disas/riscv.c index 278d9be924..d216b9c39b 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -156,6 +156,13 @@ typedef enum { rv_codec_css_swsp, rv_codec_css_sdsp, rv_codec_css_sqsp, + rv_codec_k_bs, + rv_codec_k_rnum, + rv_codec_v_r, + rv_codec_v_ldst, + rv_codec_v_i, + rv_codec_vsetvli, + rv_codec_vsetivli, } rv_codec; typedef enum { @@ -478,6 +485,456 @@ typedef enum { rv_op_fsflags = 316, rv_op_fsrmi = 317, rv_op_fsflagsi = 318, + rv_op_bseti = 319, + rv_op_bclri = 320, + rv_op_binvi = 321, + rv_op_bexti = 322, + rv_op_rori = 323, + rv_op_clz = 324, + rv_op_ctz = 325, + rv_op_cpop = 326, + rv_op_sext_h = 327, + rv_op_sext_b = 328, + rv_op_xnor = 329, + rv_op_orn = 330, + rv_op_andn = 331, + rv_op_rol = 332, + rv_op_ror = 333, + rv_op_sh1add = 334, + rv_op_sh2add = 335, + rv_op_sh3add = 336, + rv_op_sh1add_uw = 337, + rv_op_sh2add_uw = 338, + rv_op_sh3add_uw = 339, + rv_op_clmul = 340, + rv_op_clmulr = 341, + rv_op_clmulh = 342, + rv_op_min = 343, + rv_op_minu = 344, + rv_op_max = 345, + rv_op_maxu = 346, + rv_op_clzw = 347, + rv_op_ctzw = 348, + rv_op_cpopw = 349, + rv_op_slli_uw = 350, + rv_op_add_uw = 351, + rv_op_rolw = 352, + rv_op_rorw = 353, + rv_op_rev8 = 354, + rv_op_zext_h = 355, + rv_op_roriw = 356, + rv_op_orc_b = 357, + rv_op_bset = 358, + rv_op_bclr = 359, + rv_op_binv = 360, + rv_op_bext = 361, + rv_op_aes32esmi = 362, + rv_op_aes32esi = 363, + rv_op_aes32dsmi = 364, + rv_op_aes32dsi = 365, + rv_op_aes64ks1i = 366, + rv_op_aes64ks2 = 367, + rv_op_aes64im = 368, + rv_op_aes64esm = 369, + rv_op_aes64es = 370, + rv_op_aes64dsm = 371, + rv_op_aes64ds = 372, + rv_op_sha256sig0 = 373, + rv_op_sha256sig1 = 374, + rv_op_sha256sum0 = 375, + rv_op_sha256sum1 = 376, + rv_op_sha512sig0 = 377, + rv_op_sha512sig1 = 378, + rv_op_sha512sum0 = 379, + rv_op_sha512sum1 = 380, + rv_op_sha512sum0r = 381, + rv_op_sha512sum1r = 382, + rv_op_sha512sig0l = 383, + rv_op_sha512sig0h = 384, + rv_op_sha512sig1l = 385, + rv_op_sha512sig1h = 386, + rv_op_sm3p0 = 387, + rv_op_sm3p1 = 388, + rv_op_sm4ed = 389, + rv_op_sm4ks = 390, + rv_op_brev8 = 391, + rv_op_pack = 392, + rv_op_packh = 393, + rv_op_packw = 394, + rv_op_unzip = 395, + rv_op_zip = 396, + rv_op_xperm4 = 397, + rv_op_xperm8 = 398, + rv_op_vle8_v = 399, + rv_op_vle16_v = 400, + rv_op_vle32_v = 401, + rv_op_vle64_v = 402, + rv_op_vse8_v = 403, + rv_op_vse16_v = 404, + rv_op_vse32_v = 405, + rv_op_vse64_v = 406, + rv_op_vlm_v = 407, + rv_op_vsm_v = 408, + rv_op_vlse8_v = 409, + rv_op_vlse16_v = 410, + rv_op_vlse32_v = 411, + rv_op_vlse64_v = 412, + rv_op_vsse8_v = 413, + rv_op_vsse16_v = 414, + rv_op_vsse32_v = 415, + rv_op_vsse64_v = 416, + rv_op_vluxei8_v = 417, + rv_op_vluxei16_v = 418, + rv_op_vluxei32_v = 419, + rv_op_vluxei64_v = 420, + rv_op_vloxei8_v = 421, + rv_op_vloxei16_v = 422, + rv_op_vloxei32_v = 423, + rv_op_vloxei64_v = 424, + rv_op_vsuxei8_v = 425, + rv_op_vsuxei16_v = 426, + rv_op_vsuxei32_v = 427, + rv_op_vsuxei64_v = 428, + rv_op_vsoxei8_v = 429, + rv_op_vsoxei16_v = 430, + rv_op_vsoxei32_v = 431, + rv_op_vsoxei64_v = 432, + rv_op_vle8ff_v = 433, + rv_op_vle16ff_v = 434, + rv_op_vle32ff_v = 435, + rv_op_vle64ff_v = 436, + rv_op_vl1re8_v = 437, + rv_op_vl1re16_v = 438, + rv_op_vl1re32_v = 439, + rv_op_vl1re64_v = 440, + rv_op_vl2re8_v = 441, + rv_op_vl2re16_v = 442, + rv_op_vl2re32_v = 443, + rv_op_vl2re64_v = 444, + rv_op_vl4re8_v = 445, + rv_op_vl4re16_v = 446, + rv_op_vl4re32_v = 447, + rv_op_vl4re64_v = 448, + rv_op_vl8re8_v = 449, + rv_op_vl8re16_v = 450, + rv_op_vl8re32_v = 451, + rv_op_vl8re64_v = 452, + rv_op_vs1r_v = 453, + rv_op_vs2r_v = 454, + rv_op_vs4r_v = 455, + rv_op_vs8r_v = 456, + rv_op_vadd_vv = 457, + rv_op_vadd_vx = 458, + rv_op_vadd_vi = 459, + rv_op_vsub_vv = 460, + rv_op_vsub_vx = 461, + rv_op_vrsub_vx = 462, + rv_op_vrsub_vi = 463, + rv_op_vwaddu_vv = 464, + rv_op_vwaddu_vx = 465, + rv_op_vwadd_vv = 466, + rv_op_vwadd_vx = 467, + rv_op_vwsubu_vv = 468, + rv_op_vwsubu_vx = 469, + rv_op_vwsub_vv = 470, + rv_op_vwsub_vx = 471, + rv_op_vwaddu_wv = 472, + rv_op_vwaddu_wx = 473, + rv_op_vwadd_wv = 474, + rv_op_vwadd_wx = 475, + rv_op_vwsubu_wv = 476, + rv_op_vwsubu_wx = 477, + rv_op_vwsub_wv = 478, + rv_op_vwsub_wx = 479, + rv_op_vadc_vvm = 480, + rv_op_vadc_vxm = 481, + rv_op_vadc_vim = 482, + rv_op_vmadc_vvm = 483, + rv_op_vmadc_vxm = 484, + rv_op_vmadc_vim = 485, + rv_op_vsbc_vvm = 486, + rv_op_vsbc_vxm = 487, + rv_op_vmsbc_vvm = 488, + rv_op_vmsbc_vxm = 489, + rv_op_vand_vv = 490, + rv_op_vand_vx = 491, + rv_op_vand_vi = 492, + rv_op_vor_vv = 493, + rv_op_vor_vx = 494, + rv_op_vor_vi = 495, + rv_op_vxor_vv = 496, + rv_op_vxor_vx = 497, + rv_op_vxor_vi = 498, + rv_op_vsll_vv = 499, + rv_op_vsll_vx = 500, + rv_op_vsll_vi = 501, + rv_op_vsrl_vv = 502, + rv_op_vsrl_vx = 503, + rv_op_vsrl_vi = 504, + rv_op_vsra_vv = 505, + rv_op_vsra_vx = 506, + rv_op_vsra_vi = 507, + rv_op_vnsrl_wv = 508, + rv_op_vnsrl_wx = 509, + rv_op_vnsrl_wi = 510, + rv_op_vnsra_wv = 511, + rv_op_vnsra_wx = 512, + rv_op_vnsra_wi = 513, + rv_op_vmseq_vv = 514, + rv_op_vmseq_vx = 515, + rv_op_vmseq_vi = 516, + rv_op_vmsne_vv = 517, + rv_op_vmsne_vx = 518, + rv_op_vmsne_vi = 519, + rv_op_vmsltu_vv = 520, + rv_op_vmsltu_vx = 521, + rv_op_vmslt_vv = 522, + rv_op_vmslt_vx = 523, + rv_op_vmsleu_vv = 524, + rv_op_vmsleu_vx = 525, + rv_op_vmsleu_vi = 526, + rv_op_vmsle_vv = 527, + rv_op_vmsle_vx = 528, + rv_op_vmsle_vi = 529, + rv_op_vmsgtu_vx = 530, + rv_op_vmsgtu_vi = 531, + rv_op_vmsgt_vx = 532, + rv_op_vmsgt_vi = 533, + rv_op_vminu_vv = 534, + rv_op_vminu_vx = 535, + rv_op_vmin_vv = 536, + rv_op_vmin_vx = 537, + rv_op_vmaxu_vv = 538, + rv_op_vmaxu_vx = 539, + rv_op_vmax_vv = 540, + rv_op_vmax_vx = 541, + rv_op_vmul_vv = 542, + rv_op_vmul_vx = 543, + rv_op_vmulh_vv = 544, + rv_op_vmulh_vx = 545, + rv_op_vmulhu_vv = 546, + rv_op_vmulhu_vx = 547, + rv_op_vmulhsu_vv = 548, + rv_op_vmulhsu_vx = 549, + rv_op_vdivu_vv = 550, + rv_op_vdivu_vx = 551, + rv_op_vdiv_vv = 552, + rv_op_vdiv_vx = 553, + rv_op_vremu_vv = 554, + rv_op_vremu_vx = 555, + rv_op_vrem_vv = 556, + rv_op_vrem_vx = 557, + rv_op_vwmulu_vv = 558, + rv_op_vwmulu_vx = 559, + rv_op_vwmulsu_vv = 560, + rv_op_vwmulsu_vx = 561, + rv_op_vwmul_vv = 562, + rv_op_vwmul_vx = 563, + rv_op_vmacc_vv = 564, + rv_op_vmacc_vx = 565, + rv_op_vnmsac_vv = 566, + rv_op_vnmsac_vx = 567, + rv_op_vmadd_vv = 568, + rv_op_vmadd_vx = 569, + rv_op_vnmsub_vv = 570, + rv_op_vnmsub_vx = 571, + rv_op_vwmaccu_vv = 572, + rv_op_vwmaccu_vx = 573, + rv_op_vwmacc_vv = 574, + rv_op_vwmacc_vx = 575, + rv_op_vwmaccsu_vv = 576, + rv_op_vwmaccsu_vx = 577, + rv_op_vwmaccus_vx = 578, + rv_op_vmv_v_v = 579, + rv_op_vmv_v_x = 580, + rv_op_vmv_v_i = 581, + rv_op_vmerge_vvm = 582, + rv_op_vmerge_vxm = 583, + rv_op_vmerge_vim = 584, + rv_op_vsaddu_vv = 585, + rv_op_vsaddu_vx = 586, + rv_op_vsaddu_vi = 587, + rv_op_vsadd_vv = 588, + rv_op_vsadd_vx = 589, + rv_op_vsadd_vi = 590, + rv_op_vssubu_vv = 591, + rv_op_vssubu_vx = 592, + rv_op_vssub_vv = 593, + rv_op_vssub_vx = 594, + rv_op_vaadd_vv = 595, + rv_op_vaadd_vx = 596, + rv_op_vaaddu_vv = 597, + rv_op_vaaddu_vx = 598, + rv_op_vasub_vv = 599, + rv_op_vasub_vx = 600, + rv_op_vasubu_vv = 601, + rv_op_vasubu_vx = 602, + rv_op_vsmul_vv = 603, + rv_op_vsmul_vx = 604, + rv_op_vssrl_vv = 605, + rv_op_vssrl_vx = 606, + rv_op_vssrl_vi = 607, + rv_op_vssra_vv = 608, + rv_op_vssra_vx = 609, + rv_op_vssra_vi = 610, + rv_op_vnclipu_wv = 611, + rv_op_vnclipu_wx = 612, + rv_op_vnclipu_wi = 613, + rv_op_vnclip_wv = 614, + rv_op_vnclip_wx = 615, + rv_op_vnclip_wi = 616, + rv_op_vfadd_vv = 617, + rv_op_vfadd_vf = 618, + rv_op_vfsub_vv = 619, + rv_op_vfsub_vf = 620, + rv_op_vfrsub_vf = 621, + rv_op_vfwadd_vv = 622, + rv_op_vfwadd_vf = 623, + rv_op_vfwadd_wv = 624, + rv_op_vfwadd_wf = 625, + rv_op_vfwsub_vv = 626, + rv_op_vfwsub_vf = 627, + rv_op_vfwsub_wv = 628, + rv_op_vfwsub_wf = 629, + rv_op_vfmul_vv = 630, + rv_op_vfmul_vf = 631, + rv_op_vfdiv_vv = 632, + rv_op_vfdiv_vf = 633, + rv_op_vfrdiv_vf = 634, + rv_op_vfwmul_vv = 635, + rv_op_vfwmul_vf = 636, + rv_op_vfmacc_vv = 637, + rv_op_vfmacc_vf = 638, + rv_op_vfnmacc_vv = 639, + rv_op_vfnmacc_vf = 640, + rv_op_vfmsac_vv = 641, + rv_op_vfmsac_vf = 642, + rv_op_vfnmsac_vv = 643, + rv_op_vfnmsac_vf = 644, + rv_op_vfmadd_vv = 645, + rv_op_vfmadd_vf = 646, + rv_op_vfnmadd_vv = 647, + rv_op_vfnmadd_vf = 648, + rv_op_vfmsub_vv = 649, + rv_op_vfmsub_vf = 650, + rv_op_vfnmsub_vv = 651, + rv_op_vfnmsub_vf = 652, + rv_op_vfwmacc_vv = 653, + rv_op_vfwmacc_vf = 654, + rv_op_vfwnmacc_vv = 655, + rv_op_vfwnmacc_vf = 656, + rv_op_vfwmsac_vv = 657, + rv_op_vfwmsac_vf = 658, + rv_op_vfwnmsac_vv = 659, + rv_op_vfwnmsac_vf = 660, + rv_op_vfsqrt_v = 661, + rv_op_vfrsqrt7_v = 662, + rv_op_vfrec7_v = 663, + rv_op_vfmin_vv = 664, + rv_op_vfmin_vf = 665, + rv_op_vfmax_vv = 666, + rv_op_vfmax_vf = 667, + rv_op_vfsgnj_vv = 668, + rv_op_vfsgnj_vf = 669, + rv_op_vfsgnjn_vv = 670, + rv_op_vfsgnjn_vf = 671, + rv_op_vfsgnjx_vv = 672, + rv_op_vfsgnjx_vf = 673, + rv_op_vfslide1up_vf = 674, + rv_op_vfslide1down_vf = 675, + rv_op_vmfeq_vv = 676, + rv_op_vmfeq_vf = 677, + rv_op_vmfne_vv = 678, + rv_op_vmfne_vf = 679, + rv_op_vmflt_vv = 680, + rv_op_vmflt_vf = 681, + rv_op_vmfle_vv = 682, + rv_op_vmfle_vf = 683, + rv_op_vmfgt_vf = 684, + rv_op_vmfge_vf = 685, + rv_op_vfclass_v = 686, + rv_op_vfmerge_vfm = 687, + rv_op_vfmv_v_f = 688, + rv_op_vfcvt_xu_f_v = 689, + rv_op_vfcvt_x_f_v = 690, + rv_op_vfcvt_f_xu_v = 691, + rv_op_vfcvt_f_x_v = 692, + rv_op_vfcvt_rtz_xu_f_v = 693, + rv_op_vfcvt_rtz_x_f_v = 694, + rv_op_vfwcvt_xu_f_v = 695, + rv_op_vfwcvt_x_f_v = 696, + rv_op_vfwcvt_f_xu_v = 697, + rv_op_vfwcvt_f_x_v = 698, + rv_op_vfwcvt_f_f_v = 699, + rv_op_vfwcvt_rtz_xu_f_v = 700, + rv_op_vfwcvt_rtz_x_f_v = 701, + rv_op_vfncvt_xu_f_w = 702, + rv_op_vfncvt_x_f_w = 703, + rv_op_vfncvt_f_xu_w = 704, + rv_op_vfncvt_f_x_w = 705, + rv_op_vfncvt_f_f_w = 706, + rv_op_vfncvt_rod_f_f_w = 707, + rv_op_vfncvt_rtz_xu_f_w = 708, + rv_op_vfncvt_rtz_x_f_w = 709, + rv_op_vredsum_vs = 710, + rv_op_vredand_vs = 711, + rv_op_vredor_vs = 712, + rv_op_vredxor_vs = 713, + rv_op_vredminu_vs = 714, + rv_op_vredmin_vs = 715, + rv_op_vredmaxu_vs = 716, + rv_op_vredmax_vs = 717, + rv_op_vwredsumu_vs = 718, + rv_op_vwredsum_vs = 719, + rv_op_vfredusum_vs = 720, + rv_op_vfredosum_vs = 721, + rv_op_vfredmin_vs = 722, + rv_op_vfredmax_vs = 723, + rv_op_vfwredusum_vs = 724, + rv_op_vfwredosum_vs = 725, + rv_op_vmand_mm = 726, + rv_op_vmnand_mm = 727, + rv_op_vmandn_mm = 728, + rv_op_vmxor_mm = 729, + rv_op_vmor_mm = 730, + rv_op_vmnor_mm = 731, + rv_op_vmorn_mm = 732, + rv_op_vmxnor_mm = 733, + rv_op_vcpop_m = 734, + rv_op_vfirst_m = 735, + rv_op_vmsbf_m = 736, + rv_op_vmsif_m = 737, + rv_op_vmsof_m = 738, + rv_op_viota_m = 739, + rv_op_vid_v = 740, + rv_op_vmv_x_s = 741, + rv_op_vmv_s_x = 742, + rv_op_vfmv_f_s = 743, + rv_op_vfmv_s_f = 744, + rv_op_vslideup_vx = 745, + rv_op_vslideup_vi = 746, + rv_op_vslide1up_vx = 747, + rv_op_vslidedown_vx = 748, + rv_op_vslidedown_vi = 749, + rv_op_vslide1down_vx = 750, + rv_op_vrgather_vv = 751, + rv_op_vrgatherei16_vv = 752, + rv_op_vrgather_vx = 753, + rv_op_vrgather_vi = 754, + rv_op_vcompress_vm = 755, + rv_op_vmv1r_v = 756, + rv_op_vmv2r_v = 757, + rv_op_vmv4r_v = 758, + rv_op_vmv8r_v = 759, + rv_op_vzext_vf2 = 760, + rv_op_vzext_vf4 = 761, + rv_op_vzext_vf8 = 762, + rv_op_vsext_vf2 = 763, + rv_op_vsext_vf4 = 764, + rv_op_vsext_vf8 = 765, + rv_op_vsetvli = 766, + rv_op_vsetivli = 767, + rv_op_vsetvl = 768, } rv_op; /* structures */ @@ -497,6 +954,10 @@ typedef struct { uint8_t succ; uint8_t aq; uint8_t rl; + uint8_t bs; + uint8_t rnum; + uint8_t vm; + uint32_t vzimm; } rv_decode; typedef struct { @@ -535,6 +996,13 @@ static const char rv_freg_name_sym[32][5] = { "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11", }; +static const char rv_vreg_name_sym[32][4] = { + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +}; + /* instruction formats */ #define rv_fmt_none "O\t" @@ -572,6 +1040,36 @@ static const char rv_freg_name_sym[32][5] = { #define rv_fmt_rd_rs2 "O\t0,2" #define rv_fmt_rs1_offset "O\t1,o" #define rv_fmt_rs2_offset "O\t2,o" +#define rv_fmt_rs1_rs2_bs "O\t1,2,b" +#define rv_fmt_rd_rs1_rnum "O\t0,1,n" +#define rv_fmt_ldst_vd_rs1_vm "O\tD,(1)m" +#define rv_fmt_ldst_vd_rs1_rs2_vm "O\tD,(1),2m" +#define rv_fmt_ldst_vd_rs1_vs2_vm "O\tD,(1),Fm" +#define rv_fmt_vd_vs2_vs1 "O\tD,F,E" +#define rv_fmt_vd_vs2_vs1_vl "O\tD,F,El" +#define rv_fmt_vd_vs2_vs1_vm "O\tD,F,Em" +#define rv_fmt_vd_vs2_rs1_vl "O\tD,F,1l" +#define rv_fmt_vd_vs2_fs1_vl "O\tD,F,4l" +#define rv_fmt_vd_vs2_rs1_vm "O\tD,F,1m" +#define rv_fmt_vd_vs2_fs1_vm "O\tD,F,4m" +#define rv_fmt_vd_vs2_imm_vl "O\tD,F,il" +#define rv_fmt_vd_vs2_imm_vm "O\tD,F,im" +#define rv_fmt_vd_vs2_uimm_vm "O\tD,F,um" +#define rv_fmt_vd_vs1_vs2_vm "O\tD,E,Fm" +#define rv_fmt_vd_rs1_vs2_vm "O\tD,1,Fm" +#define rv_fmt_vd_fs1_vs2_vm "O\tD,4,Fm" +#define rv_fmt_vd_vs1 "O\tD,E" +#define rv_fmt_vd_rs1 "O\tD,1" +#define rv_fmt_vd_fs1 "O\tD,4" +#define rv_fmt_vd_imm "O\tD,i" +#define rv_fmt_vd_vs2 "O\tD,F" +#define rv_fmt_vd_vs2_vm "O\tD,Fm" +#define rv_fmt_rd_vs2_vm "O\t0,Fm" +#define rv_fmt_rd_vs2 "O\t0,F" +#define rv_fmt_fd_vs2 "O\t3,F" +#define rv_fmt_vd_vm "O\tDm" +#define rv_fmt_vsetvli "O\t0,1,v" +#define rv_fmt_vsetivli "O\t0,u,v" /* pseudo-instruction constraints */ @@ -723,6 +1221,7 @@ static const rv_comp_data rvcp_csrrw[] = { { rv_op_illegal, NULL } }; + static const rv_comp_data rvcp_csrrs[] = { { rv_op_rdcycle, rvcc_rdcycle }, { rv_op_rdtime, rvcc_rdtime }, @@ -1117,6 +1616,456 @@ const rv_opcode_data opcode_data[] = { { "fsflags", rv_codec_i_csr, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "fsrmi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, { "fsflagsi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, + { "bseti", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "bclri", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "binvi", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "bexti", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "rori", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "clz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "ctz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "cpop", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "sext.h", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "sext.b", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "xnor", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "orn", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "andn", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "rol", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "ror", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh1add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh2add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh3add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh1add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh2add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh3add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmul", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmulr", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmulh", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "min", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "minu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "max", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "maxu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "slli.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rolw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rorw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rev8", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "zext.h", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "roriw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "orc.b", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "bset", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "bclr", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "binv", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "bext", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "aes32esmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 }, + { "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "aes64es", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "aes64dsm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "aes64ds", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha256sig0", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sha256sig1", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sha256sum0", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sha256sum1", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sha512sig0", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sig1", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sum0", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sum1", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sum0r", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sum1r", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sig0l", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sig0h", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sig1l", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sha512sig1h", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sm3p0", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sm3p1", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, + { "sm4ed", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "sm4ks", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, + { "brev8", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "pack", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "packh", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "packw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "unzip", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "zip", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "xperm4", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "xperm8", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "vle8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle8_v, rv_op_vle8_v, 0 }, + { "vle16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle16_v, rv_op_vle16_v, 0 }, + { "vle32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle32_v, rv_op_vle32_v, 0 }, + { "vle64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle64_v, rv_op_vle64_v, 0 }, + { "vse8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vse8_v, rv_op_vse8_v, 0 }, + { "vse16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vse16_v, rv_op_vse16_v, 0 }, + { "vse32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vse32_v, rv_op_vse32_v, 0 }, + { "vse64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vse64_v, rv_op_vse64_v, 0 }, + { "vlm.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vlm_v, rv_op_vlm_v, 0 }, + { "vsm.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vsm_v, rv_op_vsm_v, 0 }, + { "vlse8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vlse8_v, rv_op_vlse8_v, 0 }, + { "vlse16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vlse16_v, rv_op_vlse16_v, 0 }, + { "vlse32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vlse32_v, rv_op_vlse32_v, 0 }, + { "vlse64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vlse64_v, rv_op_vlse64_v, 0 }, + { "vsse8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vsse8_v, rv_op_vsse8_v, 0 }, + { "vsse16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vsse16_v, rv_op_vsse16_v, 0 }, + { "vsse32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vsse32_v, rv_op_vsse32_v, 0 }, + { "vsse64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_rs2_vm, NULL, rv_op_vsse64_v, rv_op_vsse64_v, 0 }, + { "vluxei8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vluxei8_v, rv_op_vluxei8_v, 0 }, + { "vluxei16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vluxei16_v, rv_op_vluxei16_v, 0 }, + { "vluxei32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vluxei32_v, rv_op_vluxei32_v, 0 }, + { "vluxei64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vluxei64_v, rv_op_vluxei64_v, 0 }, + { "vloxei8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vloxei8_v, rv_op_vloxei8_v, 0 }, + { "vloxei16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vloxei16_v, rv_op_vloxei16_v, 0 }, + { "vloxei32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vloxei32_v, rv_op_vloxei32_v, 0 }, + { "vloxei64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vloxei64_v, rv_op_vloxei64_v, 0 }, + { "vsuxei8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsuxei8_v, rv_op_vsuxei8_v, 0 }, + { "vsuxei16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsuxei16_v, rv_op_vsuxei16_v, 0 }, + { "vsuxei32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsuxei32_v, rv_op_vsuxei32_v, 0 }, + { "vsuxei64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsuxei64_v, rv_op_vsuxei64_v, 0 }, + { "vsoxei8.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsoxei8_v, rv_op_vsoxei8_v, 0 }, + { "vsoxei16.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsoxei16_v, rv_op_vsoxei16_v, 0 }, + { "vsoxei32.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsoxei32_v, rv_op_vsoxei32_v, 0 }, + { "vsoxei64.v", rv_codec_v_r, rv_fmt_ldst_vd_rs1_vs2_vm, NULL, rv_op_vsoxei64_v, rv_op_vsoxei64_v, 0 }, + { "vle8ff.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle8ff_v, rv_op_vle8ff_v, 0 }, + { "vle16ff.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle16ff_v, rv_op_vle16ff_v, 0 }, + { "vle32ff.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle32ff_v, rv_op_vle32ff_v, 0 }, + { "vle64ff.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vle64ff_v, rv_op_vle64ff_v, 0 }, + { "vl1re8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl1re8_v, rv_op_vl1re8_v, 0 }, + { "vl1re16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl1re16_v, rv_op_vl1re16_v, 0 }, + { "vl1re32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl1re32_v, rv_op_vl1re32_v, 0 }, + { "vl1re64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl1re64_v, rv_op_vl1re64_v, 0 }, + { "vl2re8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl2re8_v, rv_op_vl2re8_v, 0 }, + { "vl2re16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl2re16_v, rv_op_vl2re16_v, 0 }, + { "vl2re32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl2re32_v, rv_op_vl2re32_v, 0 }, + { "vl2re64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl2re64_v, rv_op_vl2re64_v, 0 }, + { "vl4re8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl4re8_v, rv_op_vl4re8_v, 0 }, + { "vl4re16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl4re16_v, rv_op_vl4re16_v, 0 }, + { "vl4re32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl4re32_v, rv_op_vl4re32_v, 0 }, + { "vl4re64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl4re64_v, rv_op_vl4re64_v, 0 }, + { "vl8re8.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl8re8_v, rv_op_vl8re8_v, 0 }, + { "vl8re16.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl8re16_v, rv_op_vl8re16_v, 0 }, + { "vl8re32.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl8re32_v, rv_op_vl8re32_v, 0 }, + { "vl8re64.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vl8re64_v, rv_op_vl8re64_v, 0 }, + { "vs1r.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vs1r_v, rv_op_vs1r_v, 0 }, + { "vs2r.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vs2r_v, rv_op_vs2r_v, 0 }, + { "vs4r.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vs4r_v, rv_op_vs4r_v, 0 }, + { "vs8r.v", rv_codec_v_ldst, rv_fmt_ldst_vd_rs1_vm, NULL, rv_op_vs8r_v, rv_op_vs8r_v, 0 }, + { "vadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vadd_vv, rv_op_vadd_vv, 0 }, + { "vadd.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vadd_vx, rv_op_vadd_vx, 0 }, + { "vadd.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vadd_vi, rv_op_vadd_vi, 0 }, + { "vsub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsub_vv, rv_op_vsub_vv, 0 }, + { "vsub.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsub_vx, rv_op_vsub_vx, 0 }, + { "vrsub.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vrsub_vx, rv_op_vrsub_vx, 0 }, + { "vrsub.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vrsub_vi, rv_op_vrsub_vi, 0 }, + { "vwaddu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwaddu_vv, rv_op_vwaddu_vv, 0 }, + { "vwaddu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwaddu_vx, rv_op_vwaddu_vx, 0 }, + { "vwadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwadd_vv, rv_op_vwadd_vv, 0 }, + { "vwadd.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwadd_vx, rv_op_vwadd_vx, 0 }, + { "vwsubu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwsubu_vv, rv_op_vwsubu_vv, 0 }, + { "vwsubu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwsubu_vx, rv_op_vwsubu_vx, 0 }, + { "vwsub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwsub_vv, rv_op_vwsub_vv, 0 }, + { "vwsub.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwsub_vx, rv_op_vwsub_vx, 0 }, + { "vwaddu.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwaddu_wv, rv_op_vwaddu_wv, 0 }, + { "vwaddu.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwaddu_wx, rv_op_vwaddu_wx, 0 }, + { "vwadd.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwadd_wv, rv_op_vwadd_wv, 0 }, + { "vwadd.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwadd_wx, rv_op_vwadd_wx, 0 }, + { "vwsubu.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwsubu_wv, rv_op_vwsubu_wv, 0 }, + { "vwsubu.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwsubu_wx, rv_op_vwsubu_wx, 0 }, + { "vwsub.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwsub_wv, rv_op_vwsub_wv, 0 }, + { "vwsub.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwsub_wx, rv_op_vwsub_wx, 0 }, + { "vadc.vvm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vl, NULL, rv_op_vadc_vvm, rv_op_vadc_vvm, 0 }, + { "vadc.vxm", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vl, NULL, rv_op_vadc_vxm, rv_op_vadc_vxm, 0 }, + { "vadc.vim", rv_codec_v_i, rv_fmt_vd_vs2_imm_vl, NULL, rv_op_vadc_vim, rv_op_vadc_vim, 0 }, + { "vmadc.vvm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vl, NULL, rv_op_vmadc_vvm, rv_op_vmadc_vvm, 0 }, + { "vmadc.vxm", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vl, NULL, rv_op_vmadc_vxm, rv_op_vmadc_vxm, 0 }, + { "vmadc.vim", rv_codec_v_i, rv_fmt_vd_vs2_imm_vl, NULL, rv_op_vmadc_vim, rv_op_vmadc_vim, 0 }, + { "vsbc.vvm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vl, NULL, rv_op_vsbc_vvm, rv_op_vsbc_vvm, 0 }, + { "vsbc.vxm", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vl, NULL, rv_op_vsbc_vxm, rv_op_vsbc_vxm, 0 }, + { "vmsbc.vvm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vl, NULL, rv_op_vmsbc_vvm, rv_op_vmsbc_vvm, 0 }, + { "vmsbc.vxm", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vl, NULL, rv_op_vmsbc_vxm, rv_op_vmsbc_vxm, 0 }, + { "vand.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vand_vv, rv_op_vand_vv, 0 }, + { "vand.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vand_vx, rv_op_vand_vx, 0 }, + { "vand.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vand_vi, rv_op_vand_vi, 0 }, + { "vor.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vor_vv, rv_op_vor_vv, 0 }, + { "vor.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vor_vx, rv_op_vor_vx, 0 }, + { "vor.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vor_vi, rv_op_vor_vi, 0 }, + { "vxor.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vxor_vv, rv_op_vxor_vv, 0 }, + { "vxor.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vxor_vx, rv_op_vxor_vx, 0 }, + { "vxor.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vxor_vi, rv_op_vxor_vi, 0 }, + { "vsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsll_vv, rv_op_vsll_vv, 0 }, + { "vsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsll_vx, rv_op_vsll_vx, 0 }, + { "vsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vsll_vi, rv_op_vsll_vi, 0 }, + { "vsrl.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsrl_vv, rv_op_vsrl_vv, 0 }, + { "vsrl.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsrl_vx, rv_op_vsrl_vx, 0 }, + { "vsrl.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vsrl_vi, rv_op_vsrl_vi, 0 }, + { "vsra.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsra_vv, rv_op_vsra_vv, 0 }, + { "vsra.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsra_vx, rv_op_vsra_vx, 0 }, + { "vsra.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vsra_vi, rv_op_vsra_vi, 0 }, + { "vnsrl.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vnsrl_wv, rv_op_vnsrl_wv, 0 }, + { "vnsrl.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vnsrl_wx, rv_op_vnsrl_wx, 0 }, + { "vnsrl.wi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vnsrl_wi, rv_op_vnsrl_wi, 0 }, + { "vnsra.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vnsra_wv, rv_op_vnsra_wv, 0 }, + { "vnsra.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vnsra_wx, rv_op_vnsra_wx, 0 }, + { "vnsra.wi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vnsra_wi, rv_op_vnsra_wi, 0 }, + { "vmseq.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmseq_vv, rv_op_vmseq_vv, 0 }, + { "vmseq.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmseq_vx, rv_op_vmseq_vx, 0 }, + { "vmseq.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmseq_vi, rv_op_vmseq_vi, 0 }, + { "vmsne.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmsne_vv, rv_op_vmsne_vv, 0 }, + { "vmsne.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsne_vx, rv_op_vmsne_vx, 0 }, + { "vmsne.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmsne_vi, rv_op_vmsne_vi, 0 }, + { "vmsltu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmsltu_vv, rv_op_vmsltu_vv, 0 }, + { "vmsltu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsltu_vx, rv_op_vmsltu_vx, 0 }, + { "vmslt.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmslt_vv, rv_op_vmslt_vv, 0 }, + { "vmslt.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmslt_vx, rv_op_vmslt_vx, 0 }, + { "vmsleu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmsleu_vv, rv_op_vmsleu_vv, 0 }, + { "vmsleu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsleu_vx, rv_op_vmsleu_vx, 0 }, + { "vmsleu.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmsleu_vi, rv_op_vmsleu_vi, 0 }, + { "vmsle.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmsle_vv, rv_op_vmsle_vv, 0 }, + { "vmsle.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsle_vx, rv_op_vmsle_vx, 0 }, + { "vmsle.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmsle_vi, rv_op_vmsle_vi, 0 }, + { "vmsgtu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsgtu_vx, rv_op_vmsgtu_vx, 0 }, + { "vmsgtu.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmsgtu_vi, rv_op_vmsgtu_vi, 0 }, + { "vmsgt.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmsgt_vx, rv_op_vmsgt_vx, 0 }, + { "vmsgt.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vmsgt_vi, rv_op_vmsgt_vi, 0 }, + { "vminu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vminu_vv, rv_op_vminu_vv, 0 }, + { "vminu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vminu_vx, rv_op_vminu_vx, 0 }, + { "vmin.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmin_vv, rv_op_vmin_vv, 0 }, + { "vmin.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmin_vx, rv_op_vmin_vx, 0 }, + { "vmaxu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmaxu_vv, rv_op_vmaxu_vv, 0 }, + { "vmaxu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmaxu_vx, rv_op_vmaxu_vx, 0 }, + { "vmax.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmax_vv, rv_op_vmax_vv, 0 }, + { "vmax.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmax_vx, rv_op_vmax_vx, 0 }, + { "vmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmul_vv, rv_op_vmul_vv, 0 }, + { "vmul.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmul_vx, rv_op_vmul_vx, 0 }, + { "vmulh.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmulh_vv, rv_op_vmulh_vv, 0 }, + { "vmulh.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmulh_vx, rv_op_vmulh_vx, 0 }, + { "vmulhu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmulhu_vv, rv_op_vmulhu_vv, 0 }, + { "vmulhu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmulhu_vx, rv_op_vmulhu_vx, 0 }, + { "vmulhsu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmulhsu_vv, rv_op_vmulhsu_vv, 0 }, + { "vmulhsu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vmulhsu_vx, rv_op_vmulhsu_vx, 0 }, + { "vdivu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vdivu_vv, rv_op_vdivu_vv, 0 }, + { "vdivu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vdivu_vx, rv_op_vdivu_vx, 0 }, + { "vdiv.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vdiv_vv, rv_op_vdiv_vv, 0 }, + { "vdiv.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vdiv_vx, rv_op_vdiv_vx, 0 }, + { "vremu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vremu_vv, rv_op_vremu_vv, 0 }, + { "vremu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vremu_vx, rv_op_vremu_vx, 0 }, + { "vrem.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vrem_vv, rv_op_vrem_vv, 0 }, + { "vrem.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vrem_vx, rv_op_vrem_vx, 0 }, + { "vwmulu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwmulu_vv, rv_op_vwmulu_vv, 0 }, + { "vwmulu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwmulu_vx, rv_op_vwmulu_vx, 0 }, + { "vwmulsu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwmulsu_vv, rv_op_vwmulsu_vv, 0 }, + { "vwmulsu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwmulsu_vx, rv_op_vwmulsu_vx, 0 }, + { "vwmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwmul_vv, rv_op_vwmul_vv, 0 }, + { "vwmul.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vwmul_vx, rv_op_vwmul_vx, 0 }, + { "vmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vmacc_vv, rv_op_vmacc_vv, 0 }, + { "vmacc.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vmacc_vx, rv_op_vmacc_vx, 0 }, + { "vnmsac.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vnmsac_vv, rv_op_vnmsac_vv, 0 }, + { "vnmsac.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vnmsac_vx, rv_op_vnmsac_vx, 0 }, + { "vmadd.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vmadd_vv, rv_op_vmadd_vv, 0 }, + { "vmadd.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vmadd_vx, rv_op_vmadd_vx, 0 }, + { "vnmsub.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vnmsub_vv, rv_op_vnmsub_vv, 0 }, + { "vnmsub.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vnmsub_vx, rv_op_vnmsub_vx, 0 }, + { "vwmaccu.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vwmaccu_vv, rv_op_vwmaccu_vv, 0 }, + { "vwmaccu.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vwmaccu_vx, rv_op_vwmaccu_vx, 0 }, + { "vwmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vwmacc_vv, rv_op_vwmacc_vv, 0 }, + { "vwmacc.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vwmacc_vx, rv_op_vwmacc_vx, 0 }, + { "vwmaccsu.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vwmaccsu_vv, rv_op_vwmaccsu_vv, 0 }, + { "vwmaccsu.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vwmaccsu_vx, rv_op_vwmaccsu_vx, 0 }, + { "vwmaccus.vx", rv_codec_v_r, rv_fmt_vd_rs1_vs2_vm, NULL, rv_op_vwmaccus_vx, rv_op_vwmaccus_vx, 0 }, + { "vmv.v.v", rv_codec_v_r, rv_fmt_vd_vs1, NULL, rv_op_vmv_v_v, rv_op_vmv_v_v, 0 }, + { "vmv.v.x", rv_codec_v_r, rv_fmt_vd_rs1, NULL, rv_op_vmv_v_x, rv_op_vmv_v_x, 0 }, + { "vmv.v.i", rv_codec_v_i, rv_fmt_vd_imm, NULL, rv_op_vmv_v_i, rv_op_vmv_v_i, 0 }, + { "vmerge.vvm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vl, NULL, rv_op_vmerge_vvm, rv_op_vmerge_vvm, 0 }, + { "vmerge.vxm", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vl, NULL, rv_op_vmerge_vxm, rv_op_vmerge_vxm, 0 }, + { "vmerge.vim", rv_codec_v_i, rv_fmt_vd_vs2_imm_vl, NULL, rv_op_vmerge_vim, rv_op_vmerge_vim, 0 }, + { "vsaddu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsaddu_vv, rv_op_vsaddu_vv, 0 }, + { "vsaddu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsaddu_vx, rv_op_vsaddu_vx, 0 }, + { "vsaddu.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vsaddu_vi, rv_op_vsaddu_vi, 0 }, + { "vsadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsadd_vv, rv_op_vsadd_vv, 0 }, + { "vsadd.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsadd_vx, rv_op_vsadd_vx, 0 }, + { "vsadd.vi", rv_codec_v_i, rv_fmt_vd_vs2_imm_vm, NULL, rv_op_vsadd_vi, rv_op_vsadd_vi, 0 }, + { "vssubu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vssubu_vv, rv_op_vssubu_vv, 0 }, + { "vssubu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vssubu_vx, rv_op_vssubu_vx, 0 }, + { "vssub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vssub_vv, rv_op_vssub_vv, 0 }, + { "vssub.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vssub_vx, rv_op_vssub_vx, 0 }, + { "vaadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vaadd_vv, rv_op_vaadd_vv, 0 }, + { "vaadd.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vaadd_vx, rv_op_vaadd_vx, 0 }, + { "vaaddu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vaaddu_vv, rv_op_vaaddu_vv, 0 }, + { "vaaddu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vaaddu_vx, rv_op_vaaddu_vx, 0 }, + { "vasub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vasub_vv, rv_op_vasub_vv, 0 }, + { "vasub.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vasub_vx, rv_op_vasub_vx, 0 }, + { "vasubu.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vasubu_vv, rv_op_vasubu_vv, 0 }, + { "vasubu.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vasubu_vx, rv_op_vasubu_vx, 0 }, + { "vsmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vsmul_vv, rv_op_vsmul_vv, 0 }, + { "vsmul.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vsmul_vx, rv_op_vsmul_vx, 0 }, + { "vssrl.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vssrl_vv, rv_op_vssrl_vv, 0 }, + { "vssrl.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vssrl_vx, rv_op_vssrl_vx, 0 }, + { "vssrl.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vssrl_vi, rv_op_vssrl_vi, 0 }, + { "vssra.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vssra_vv, rv_op_vssra_vv, 0 }, + { "vssra.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vssra_vx, rv_op_vssra_vx, 0 }, + { "vssra.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vssra_vi, rv_op_vssra_vi, 0 }, + { "vnclipu.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vnclipu_wv, rv_op_vnclipu_wv, 0 }, + { "vnclipu.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vnclipu_wx, rv_op_vnclipu_wx, 0 }, + { "vnclipu.wi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vnclipu_wi, rv_op_vnclipu_wi, 0 }, + { "vnclip.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vnclip_wv, rv_op_vnclip_wv, 0 }, + { "vnclip.wx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vnclip_wx, rv_op_vnclip_wx, 0 }, + { "vnclip.wi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vnclip_wi, rv_op_vnclip_wi, 0 }, + { "vfadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfadd_vv, rv_op_vfadd_vv, 0 }, + { "vfadd.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfadd_vf, rv_op_vfadd_vf, 0 }, + { "vfsub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfsub_vv, rv_op_vfsub_vv, 0 }, + { "vfsub.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfsub_vf, rv_op_vfsub_vf, 0 }, + { "vfrsub.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfrsub_vf, rv_op_vfrsub_vf, 0 }, + { "vfwadd.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwadd_vv, rv_op_vfwadd_vv, 0 }, + { "vfwadd.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfwadd_vf, rv_op_vfwadd_vf, 0 }, + { "vfwadd.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwadd_wv, rv_op_vfwadd_wv, 0 }, + { "vfwadd.wf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfwadd_wf, rv_op_vfwadd_wf, 0 }, + { "vfwsub.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwsub_vv, rv_op_vfwsub_vv, 0 }, + { "vfwsub.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfwsub_vf, rv_op_vfwsub_vf, 0 }, + { "vfwsub.wv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwsub_wv, rv_op_vfwsub_wv, 0 }, + { "vfwsub.wf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfwsub_wf, rv_op_vfwsub_wf, 0 }, + { "vfmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfmul_vv, rv_op_vfmul_vv, 0 }, + { "vfmul.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfmul_vf, rv_op_vfmul_vf, 0 }, + { "vfdiv.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfdiv_vv, rv_op_vfdiv_vv, 0 }, + { "vfdiv.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfdiv_vf, rv_op_vfdiv_vf, 0 }, + { "vfrdiv.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfrdiv_vf, rv_op_vfrdiv_vf, 0 }, + { "vfwmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwmul_vv, rv_op_vfwmul_vv, 0 }, + { "vfwmul.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfwmul_vf, rv_op_vfwmul_vf, 0 }, + { "vfmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfmacc_vv, rv_op_vfmacc_vv, 0 }, + { "vfmacc.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfmacc_vf, rv_op_vfmacc_vf, 0 }, + { "vfnmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfnmacc_vv, rv_op_vfnmacc_vv, 0 }, + { "vfnmacc.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfnmacc_vf, rv_op_vfnmacc_vf, 0 }, + { "vfmsac.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfmsac_vv, rv_op_vfmsac_vv, 0 }, + { "vfmsac.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfmsac_vf, rv_op_vfmsac_vf, 0 }, + { "vfnmsac.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfnmsac_vv, rv_op_vfnmsac_vv, 0 }, + { "vfnmsac.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfnmsac_vf, rv_op_vfnmsac_vf, 0 }, + { "vfmadd.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfmadd_vv, rv_op_vfmadd_vv, 0 }, + { "vfmadd.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfmadd_vf, rv_op_vfmadd_vf, 0 }, + { "vfnmadd.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfnmadd_vv, rv_op_vfnmadd_vv, 0 }, + { "vfnmadd.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfnmadd_vf, rv_op_vfnmadd_vf, 0 }, + { "vfmsub.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfmsub_vv, rv_op_vfmsub_vv, 0 }, + { "vfmsub.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfmsub_vf, rv_op_vfmsub_vf, 0 }, + { "vfnmsub.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfnmsub_vv, rv_op_vfnmsub_vv, 0 }, + { "vfnmsub.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfnmsub_vf, rv_op_vfnmsub_vf, 0 }, + { "vfwmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfwmacc_vv, rv_op_vfwmacc_vv, 0 }, + { "vfwmacc.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfwmacc_vf, rv_op_vfwmacc_vf, 0 }, + { "vfwnmacc.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfwnmacc_vv, rv_op_vfwnmacc_vv, 0 }, + { "vfwnmacc.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfwnmacc_vf, rv_op_vfwnmacc_vf, 0 }, + { "vfwmsac.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfwmsac_vv, rv_op_vfwmsac_vv, 0 }, + { "vfwmsac.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfwmsac_vf, rv_op_vfwmsac_vf, 0 }, + { "vfwnmsac.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, rv_op_vfwnmsac_vv, rv_op_vfwnmsac_vv, 0 }, + { "vfwnmsac.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, rv_op_vfwnmsac_vf, rv_op_vfwnmsac_vf, 0 }, + { "vfsqrt.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vfsqrt_v, rv_op_vfsqrt_v, 0 }, + { "vfrsqrt7.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vfrsqrt7_v, rv_op_vfrsqrt7_v, 0 }, + { "vfrec7.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vfrec7_v, rv_op_vfrec7_v, 0 }, + { "vfmin.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfmin_vv, rv_op_vfmin_vv, 0 }, + { "vfmin.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfmin_vf, rv_op_vfmin_vf, 0 }, + { "vfmax.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfmax_vv, rv_op_vfmax_vv, 0 }, + { "vfmax.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfmax_vf, rv_op_vfmax_vf, 0 }, + { "vfsgnj.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfsgnj_vv, rv_op_vfsgnj_vv, 0 }, + { "vfsgnj.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfsgnj_vf, rv_op_vfsgnj_vf, 0 }, + { "vfsgnjn.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfsgnjn_vv, rv_op_vfsgnjn_vv, 0 }, + { "vfsgnjn.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfsgnjn_vf, rv_op_vfsgnjn_vf, 0 }, + { "vfsgnjx.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfsgnjx_vv, rv_op_vfsgnjx_vv, 0 }, + { "vfsgnjx.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfsgnjx_vf, rv_op_vfsgnjx_vf, 0 }, + { "vfslide1up.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfslide1up_vf, rv_op_vfslide1up_vf, 0 }, + { "vfslide1down.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vfslide1down_vf, rv_op_vfslide1down_vf, 0 }, + { "vmfeq.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmfeq_vv, rv_op_vmfeq_vv, 0 }, + { "vmfeq.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmfeq_vf, rv_op_vmfeq_vf, 0 }, + { "vmfne.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmfne_vv, rv_op_vmfne_vv, 0 }, + { "vmfne.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmfne_vf, rv_op_vmfne_vf, 0 }, + { "vmflt.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmflt_vv, rv_op_vmflt_vv, 0 }, + { "vmflt.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmflt_vf, rv_op_vmflt_vf, 0 }, + { "vmfle.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmfle_vv, rv_op_vmfle_vv, 0 }, + { "vmfle.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmfle_vf, rv_op_vmfle_vf, 0 }, + { "vmfgt.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmfgt_vf, rv_op_vmfgt_vf, 0 }, + { "vmfge.vf", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vm, NULL, rv_op_vmfge_vf, rv_op_vmfge_vf, 0 }, + { "vfclass.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfclass_v, rv_op_vfclass_v, 0 }, + { "vfmerge.vfm", rv_codec_v_r, rv_fmt_vd_vs2_fs1_vl, NULL, rv_op_vfmerge_vfm, rv_op_vfmerge_vfm, 0 }, + { "vfmv.v.f", rv_codec_v_r, rv_fmt_vd_fs1, NULL, rv_op_vfmv_v_f, rv_op_vfmv_v_f, 0 }, + { "vfcvt.xu.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_xu_f_v, rv_op_vfcvt_xu_f_v, 0 }, + { "vfcvt.x.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_x_f_v, rv_op_vfcvt_x_f_v, 0 }, + { "vfcvt.f.xu.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_f_xu_v, rv_op_vfcvt_f_xu_v, 0 }, + { "vfcvt.f.x.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_f_x_v, rv_op_vfcvt_f_x_v, 0 }, + { "vfcvt.rtz.xu.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_rtz_xu_f_v, rv_op_vfcvt_rtz_xu_f_v, 0 }, + { "vfcvt.rtz.x.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfcvt_rtz_x_f_v, rv_op_vfcvt_rtz_x_f_v, 0 }, + { "vfwcvt.xu.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_xu_f_v, rv_op_vfwcvt_xu_f_v, 0 }, + { "vfwcvt.x.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_x_f_v, rv_op_vfwcvt_x_f_v, 0 }, + { "vfwcvt.f.xu.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_f_xu_v, rv_op_vfwcvt_f_xu_v, 0 }, + { "vfwcvt.f.x.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_f_x_v, rv_op_vfwcvt_f_x_v, 0 }, + { "vfwcvt.f.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_f_f_v, rv_op_vfwcvt_f_f_v, 0 }, + { "vfwcvt.rtz.xu.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_rtz_xu_f_v, rv_op_vfwcvt_rtz_xu_f_v, 0 }, + { "vfwcvt.rtz.x.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfwcvt_rtz_x_f_v, rv_op_vfwcvt_rtz_x_f_v, 0 }, + { "vfncvt.xu.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_xu_f_w, rv_op_vfncvt_xu_f_w, 0 }, + { "vfncvt.x.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_x_f_w, rv_op_vfncvt_x_f_w, 0 }, + { "vfncvt.f.xu.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_f_xu_w, rv_op_vfncvt_f_xu_w, 0 }, + { "vfncvt.f.x.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_f_x_w, rv_op_vfncvt_f_x_w, 0 }, + { "vfncvt.f.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_f_f_w, rv_op_vfncvt_f_f_w, 0 }, + { "vfncvt.rod.f.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_rod_f_f_w, rv_op_vfncvt_rod_f_f_w, 0 }, + { "vfncvt.rtz.xu.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_rtz_xu_f_w, rv_op_vfncvt_rtz_xu_f_w, 0 }, + { "vfncvt.rtz.x.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vfncvt_rtz_x_f_w, rv_op_vfncvt_rtz_x_f_w, 0 }, + { "vredsum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredsum_vs, rv_op_vredsum_vs, 0 }, + { "vredand.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredand_vs, rv_op_vredand_vs, 0 }, + { "vredor.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredor_vs, rv_op_vredor_vs, 0 }, + { "vredxor.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredxor_vs, rv_op_vredxor_vs, 0 }, + { "vredminu.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredminu_vs, rv_op_vredminu_vs, 0 }, + { "vredmin.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredmin_vs, rv_op_vredmin_vs, 0 }, + { "vredmaxu.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredmaxu_vs, rv_op_vredmaxu_vs, 0 }, + { "vredmax.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vredmax_vs, rv_op_vredmax_vs, 0 }, + { "vwredsumu.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwredsumu_vs, rv_op_vwredsumu_vs, 0 }, + { "vwredsum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vwredsum_vs, rv_op_vwredsum_vs, 0 }, + { "vfredusum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfredusum_vs, rv_op_vfredusum_vs, 0 }, + { "vfredosum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfredosum_vs, rv_op_vfredosum_vs, 0 }, + { "vfredmin.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfredmin_vs, rv_op_vfredmin_vs, 0 }, + { "vfredmax.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfredmax_vs, rv_op_vfredmax_vs, 0 }, + { "vfwredusum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwredusum_vs, rv_op_vfwredusum_vs, 0 }, + { "vfwredosum.vs", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vfwredosum_vs, rv_op_vfwredosum_vs, 0 }, + { "vmand.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmand_mm, rv_op_vmand_mm, 0 }, + { "vmnand.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmnand_mm, rv_op_vmnand_mm, 0 }, + { "vmandn.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmandn_mm, rv_op_vmandn_mm, 0 }, + { "vmxor.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmxor_mm, rv_op_vmxor_mm, 0 }, + { "vmor.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmor_mm, rv_op_vmor_mm, 0 }, + { "vmnor.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmnor_mm, rv_op_vmnor_mm, 0 }, + { "vmorn.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmorn_mm, rv_op_vmorn_mm, 0 }, + { "vmxnor.mm", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vmxnor_mm, rv_op_vmxnor_mm, 0 }, + { "vcpop.m", rv_codec_v_r, rv_fmt_rd_vs2_vm, NULL, rv_op_vcpop_m, rv_op_vcpop_m, 0 }, + { "vfirst.m", rv_codec_v_r, rv_fmt_rd_vs2_vm, NULL, rv_op_vfirst_m, rv_op_vfirst_m, 0 }, + { "vmsbf.m", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vmsbf_m, rv_op_vmsbf_m, 0 }, + { "vmsif.m", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vmsif_m, rv_op_vmsif_m, 0 }, + { "vmsof.m", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vmsof_m, rv_op_vmsof_m, 0 }, + { "viota.m", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_viota_m, rv_op_viota_m, 0 }, + { "vid.v", rv_codec_v_r, rv_fmt_vd_vm, NULL, rv_op_vid_v, rv_op_vid_v, 0 }, + { "vmv.x.s", rv_codec_v_r, rv_fmt_rd_vs2, NULL, rv_op_vmv_x_s, rv_op_vmv_x_s, 0 }, + { "vmv.s.x", rv_codec_v_r, rv_fmt_vd_rs1, NULL, rv_op_vmv_s_x, rv_op_vmv_s_x, 0 }, + { "vfmv.f.s", rv_codec_v_r, rv_fmt_fd_vs2, NULL, rv_op_vfmv_f_s, rv_op_vfmv_f_s, 0 }, + { "vfmv.s.f", rv_codec_v_r, rv_fmt_vd_fs1, NULL, rv_op_vfmv_s_f, rv_op_vfmv_s_f, 0 }, + { "vslideup.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vslideup_vx, rv_op_vslideup_vx, 0 }, + { "vslideup.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vslideup_vi, rv_op_vslideup_vi, 0 }, + { "vslide1up.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vslide1up_vx, rv_op_vslide1up_vx, 0 }, + { "vslidedown.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vslidedown_vx, rv_op_vslidedown_vx, 0 }, + { "vslidedown.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vslidedown_vi, rv_op_vslidedown_vi, 0 }, + { "vslide1down.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vslide1down_vx, rv_op_vslide1down_vx, 0 }, + { "vrgather.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vrgather_vv, rv_op_vrgather_vv, 0 }, + { "vrgatherei16.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, rv_op_vrgatherei16_vv, rv_op_vrgatherei16_vv, 0 }, + { "vrgather.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, rv_op_vrgather_vx, rv_op_vrgather_vx, 0 }, + { "vrgather.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, rv_op_vrgather_vi, rv_op_vrgather_vi, 0 }, + { "vcompress.vm", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, rv_op_vcompress_vm, rv_op_vcompress_vm, 0 }, + { "vmv1r.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vmv1r_v, rv_op_vmv1r_v, 0 }, + { "vmv2r.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vmv2r_v, rv_op_vmv2r_v, 0 }, + { "vmv4r.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vmv4r_v, rv_op_vmv4r_v, 0 }, + { "vmv8r.v", rv_codec_v_r, rv_fmt_vd_vs2, NULL, rv_op_vmv8r_v, rv_op_vmv8r_v, 0 }, + { "vzext.vf2", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vzext_vf2, rv_op_vzext_vf2, 0 }, + { "vzext.vf4", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vzext_vf4, rv_op_vzext_vf4, 0 }, + { "vzext.vf8", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vzext_vf8, rv_op_vzext_vf8, 0 }, + { "vsext.vf2", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vsext_vf2, rv_op_vsext_vf2, 0 }, + { "vsext.vf4", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vsext_vf4, rv_op_vsext_vf4, 0 }, + { "vsext.vf8", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vsext_vf8, rv_op_vsext_vf8, 0 }, + { "vsetvli", rv_codec_vsetvli, rv_fmt_vsetvli, NULL, rv_op_vsetvli, rv_op_vsetvli, 0 }, + { "vsetivli", rv_codec_vsetivli, rv_fmt_vsetivli, NULL, rv_op_vsetivli, rv_op_vsetivli, 0 }, + { "vsetvl", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, rv_op_vsetvl, rv_op_vsetvl, 0 } }; /* CSR names */ @@ -1130,14 +2079,17 @@ static const char *csr_name(int csrno) case 0x0003: return "fcsr"; case 0x0004: return "uie"; case 0x0005: return "utvec"; + case 0x0008: return "vstart"; + case 0x0009: return "vxsat"; + case 0x000a: return "vxrm"; + case 0x000f: return "vcsr"; + case 0x0015: return "seed"; case 0x0040: return "uscratch"; case 0x0041: return "uepc"; case 0x0042: return "ucause"; case 0x0043: return "utval"; case 0x0044: return "uip"; case 0x0100: return "sstatus"; - case 0x0102: return "sedeleg"; - case 0x0103: return "sideleg"; case 0x0104: return "sie"; case 0x0105: return "stvec"; case 0x0106: return "scounteren"; @@ -1303,6 +2255,9 @@ static const char *csr_name(int csrno) case 0x0c00: return "cycle"; case 0x0c01: return "time"; case 0x0c02: return "instret"; + case 0x0c20: return "vl"; + case 0x0c21: return "vtype"; + case 0x0c22: return "vlenb"; case 0x0c80: return "cycleh"; case 0x0c81: return "timeh"; case 0x0c82: return "instreth"; @@ -1490,9 +2445,86 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) break; case 1: switch (((inst >> 12) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b111111111111)) { + case 40: op = rv_op_vl1re8_v; break; + case 552: op = rv_op_vl2re8_v; break; + case 1576: op = rv_op_vl4re8_v; break; + case 3624: op = rv_op_vl8re8_v; break; + } + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vle8_v; break; + case 11: op = rv_op_vlm_v; break; + case 16: op = rv_op_vle8ff_v; break; + } + break; + case 1: op = rv_op_vluxei8_v; break; + case 2: op = rv_op_vlse8_v; break; + case 3: op = rv_op_vloxei8_v; break; + } + break; case 2: op = rv_op_flw; break; case 3: op = rv_op_fld; break; case 4: op = rv_op_flq; break; + case 5: + switch (((inst >> 20) & 0b111111111111)) { + case 40: op = rv_op_vl1re16_v; break; + case 552: op = rv_op_vl2re16_v; break; + case 1576: op = rv_op_vl4re16_v; break; + case 3624: op = rv_op_vl8re16_v; break; + } + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vle16_v; break; + case 16: op = rv_op_vle16ff_v; break; + } + break; + case 1: op = rv_op_vluxei16_v; break; + case 2: op = rv_op_vlse16_v; break; + case 3: op = rv_op_vloxei16_v; break; + } + break; + case 6: + switch (((inst >> 20) & 0b111111111111)) { + case 40: op = rv_op_vl1re32_v; break; + case 552: op = rv_op_vl2re32_v; break; + case 1576: op = rv_op_vl4re32_v; break; + case 3624: op = rv_op_vl8re32_v; break; + } + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vle32_v; break; + case 16: op = rv_op_vle32ff_v; break; + } + break; + case 1: op = rv_op_vluxei32_v; break; + case 2: op = rv_op_vlse32_v; break; + case 3: op = rv_op_vloxei32_v; break; + } + break; + case 7: + switch (((inst >> 20) & 0b111111111111)) { + case 40: op = rv_op_vl1re64_v; break; + case 552: op = rv_op_vl2re64_v; break; + case 1576: op = rv_op_vl4re64_v; break; + case 3624: op = rv_op_vl8re64_v; break; + } + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vle64_v; break; + case 16: op = rv_op_vle64ff_v; break; + } + break; + case 1: op = rv_op_vluxei64_v; break; + case 2: op = rv_op_vlse64_v; break; + case 3: op = rv_op_vloxei64_v; break; + } + break; } break; case 3: @@ -1507,7 +2539,49 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 0: op = rv_op_addi; break; case 1: switch (((inst >> 27) & 0b11111)) { - case 0: op = rv_op_slli; break; + case 0b00000: op = rv_op_slli; break; + case 0b00001: + switch (((inst >> 20) & 0b1111111)) { + case 0b0001111: op = rv_op_zip; break; + } + break; + case 0b00010: + switch (((inst >> 20) & 0b1111111)) { + case 0b0000000: op = rv_op_sha256sum0; break; + case 0b0000001: op = rv_op_sha256sum1; break; + case 0b0000010: op = rv_op_sha256sig0; break; + case 0b0000011: op = rv_op_sha256sig1; break; + case 0b0000100: op = rv_op_sha512sum0; break; + case 0b0000101: op = rv_op_sha512sum1; break; + case 0b0000110: op = rv_op_sha512sig0; break; + case 0b0000111: op = rv_op_sha512sig1; break; + case 0b0001000: op = rv_op_sm3p0; break; + case 0b0001001: op = rv_op_sm3p1; break; + } + break; + case 0b00101: op = rv_op_bseti; break; + case 0b00110: + switch (((inst >> 20) & 0b1111111)) { + case 0b0000000: op = rv_op_aes64im; break; + default: + if (((inst >> 24) & 0b0111) == 0b001) { + op = rv_op_aes64ks1i; + } + break; + } + break; + case 0b01001: op = rv_op_bclri; break; + case 0b01101: op = rv_op_binvi; break; + case 0b01100: + switch (((inst >> 20) & 0b1111111)) { + case 0b0000000: op = rv_op_clz; break; + case 0b0000001: op = rv_op_ctz; break; + case 0b0000010: op = rv_op_cpop; break; + /* 0b0000011 */ + case 0b0000100: op = rv_op_sext_b; break; + case 0b0000101: op = rv_op_sext_h; break; + } + break; } break; case 2: op = rv_op_slti; break; @@ -1515,8 +2589,23 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 4: op = rv_op_xori; break; case 5: switch (((inst >> 27) & 0b11111)) { - case 0: op = rv_op_srli; break; - case 8: op = rv_op_srai; break; + case 0b00000: op = rv_op_srli; break; + case 0b00001: + switch (((inst >> 20) & 0b1111111)) { + case 0b0001111: op = rv_op_unzip; break; + } + break; + case 0b00101: op = rv_op_orc_b; break; + case 0b01000: op = rv_op_srai; break; + case 0b01001: op = rv_op_bexti; break; + case 0b01100: op = rv_op_rori; break; + case 0b01101: + switch ((inst >> 20) & 0b1111111) { + case 0b0011000: op = rv_op_rev8; break; + case 0b0111000: op = rv_op_rev8; break; + case 0b0000111: op = rv_op_brev8; break; + } + break; } break; case 6: op = rv_op_ori; break; @@ -1530,12 +2619,21 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 1: switch (((inst >> 25) & 0b1111111)) { case 0: op = rv_op_slliw; break; + case 4: op = rv_op_slli_uw; break; + case 48: + switch ((inst >> 20) & 0b11111) { + case 0b00000: op = rv_op_clzw; break; + case 0b00001: op = rv_op_ctzw; break; + case 0b00010: op = rv_op_cpopw; break; + } + break; } break; case 5: switch (((inst >> 25) & 0b1111111)) { case 0: op = rv_op_srliw; break; case 32: op = rv_op_sraiw; break; + case 48: op = rv_op_roriw; break; } break; } @@ -1551,9 +2649,64 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) break; case 9: switch (((inst >> 12) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b111111111111)) { + case 40: op = rv_op_vs1r_v; break; + case 552: op = rv_op_vs2r_v; break; + case 1576: op = rv_op_vs4r_v; break; + case 3624: op = rv_op_vs8r_v; break; + } + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vse8_v; break; + case 11: op = rv_op_vsm_v; break; + } + break; + case 1: op = rv_op_vsuxei8_v; break; + case 2: op = rv_op_vsse8_v; break; + case 3: op = rv_op_vsoxei8_v; break; + } + break; case 2: op = rv_op_fsw; break; case 3: op = rv_op_fsd; break; case 4: op = rv_op_fsq; break; + case 5: + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vse16_v; break; + } + break; + case 1: op = rv_op_vsuxei16_v; break; + case 2: op = rv_op_vsse16_v; break; + case 3: op = rv_op_vsoxei16_v; break; + } + break; + case 6: + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vse32_v; break; + } + break; + case 1: op = rv_op_vsuxei32_v; break; + case 2: op = rv_op_vsse32_v; break; + case 3: op = rv_op_vsoxei32_v; break; + } + break; + case 7: + switch (((inst >> 26) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_vse64_v; break; + } + break; + case 1: op = rv_op_vsuxei64_v; break; + case 2: op = rv_op_vsse64_v; break; + case 3: op = rv_op_vsoxei64_v; break; + } + break; } break; case 11: @@ -1623,8 +2776,56 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 13: op = rv_op_divu; break; case 14: op = rv_op_rem; break; case 15: op = rv_op_remu; break; + case 36: + switch ((inst >> 20) & 0b11111) { + case 0: op = rv_op_zext_h; break; + default: op = rv_op_pack; break; + } + break; + case 39: op = rv_op_packh; break; + + case 41: op = rv_op_clmul; break; + case 42: op = rv_op_clmulr; break; + case 43: op = rv_op_clmulh; break; + case 44: op = rv_op_min; break; + case 45: op = rv_op_minu; break; + case 46: op = rv_op_max; break; + case 47: op = rv_op_maxu; break; + case 130: op = rv_op_sh1add; break; + case 132: op = rv_op_sh2add; break; + case 134: op = rv_op_sh3add; break; + case 161: op = rv_op_bset; break; + case 162: op = rv_op_xperm4; break; + case 164: op = rv_op_xperm8; break; + case 200: op = rv_op_aes64es; break; + case 216: op = rv_op_aes64esm; break; + case 232: op = rv_op_aes64ds; break; + case 248: op = rv_op_aes64dsm; break; case 256: op = rv_op_sub; break; + case 260: op = rv_op_xnor; break; case 261: op = rv_op_sra; break; + case 262: op = rv_op_orn; break; + case 263: op = rv_op_andn; break; + case 289: op = rv_op_bclr; break; + case 293: op = rv_op_bext; break; + case 320: op = rv_op_sha512sum0r; break; + case 328: op = rv_op_sha512sum1r; break; + case 336: op = rv_op_sha512sig0l; break; + case 344: op = rv_op_sha512sig1l; break; + case 368: op = rv_op_sha512sig0h; break; + case 376: op = rv_op_sha512sig1h; break; + case 385: op = rv_op_rol; break; + case 389: op = rv_op_ror; break; + case 417: op = rv_op_binv; break; + case 504: op = rv_op_aes64ks2; break; + } + switch ((inst >> 25) & 0b0011111) { + case 17: op = rv_op_aes32esi; break; + case 19: op = rv_op_aes32esmi; break; + case 21: op = rv_op_aes32dsi; break; + case 23: op = rv_op_aes32dsmi; break; + case 24: op = rv_op_sm4ed; break; + case 26: op = rv_op_sm4ks; break; } break; case 13: op = rv_op_lui; break; @@ -1638,8 +2839,20 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 13: op = rv_op_divuw; break; case 14: op = rv_op_remw; break; case 15: op = rv_op_remuw; break; + case 32: op = rv_op_add_uw; break; + case 36: + switch ((inst >> 20) & 0b11111) { + case 0: op = rv_op_zext_h; break; + default: op = rv_op_packw; break; + } + break; + case 130: op = rv_op_sh1add_uw; break; + case 132: op = rv_op_sh2add_uw; break; + case 134: op = rv_op_sh3add_uw; break; case 256: op = rv_op_subw; break; case 261: op = rv_op_sraw; break; + case 385: op = rv_op_rolw; break; + case 389: op = rv_op_rorw; break; } break; case 16: @@ -1860,6 +3073,408 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) break; } break; + case 21: + switch (((inst >> 12) & 0b111)) { + case 0: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vadd_vv; break; + case 2: op = rv_op_vsub_vv; break; + case 4: op = rv_op_vminu_vv; break; + case 5: op = rv_op_vmin_vv; break; + case 6: op = rv_op_vmaxu_vv; break; + case 7: op = rv_op_vmax_vv; break; + case 9: op = rv_op_vand_vv; break; + case 10: op = rv_op_vor_vv; break; + case 11: op = rv_op_vxor_vv; break; + case 12: op = rv_op_vrgather_vv; break; + case 14: op = rv_op_vrgatherei16_vv; break; + case 16: if (((inst >> 25) & 1) == 0) op = rv_op_vadc_vvm; break; + case 17: op = rv_op_vmadc_vvm; break; + case 18: if (((inst >> 25) & 1) == 0) op = rv_op_vsbc_vvm; break; + case 19: op = rv_op_vmsbc_vvm; break; + case 23: + if (((inst >> 20) & 0b111111) == 32) + op = rv_op_vmv_v_v; + else if (((inst >> 25) & 1) == 0) + op = rv_op_vmerge_vvm; + break; + case 24: op = rv_op_vmseq_vv; break; + case 25: op = rv_op_vmsne_vv; break; + case 26: op = rv_op_vmsltu_vv; break; + case 27: op = rv_op_vmslt_vv; break; + case 28: op = rv_op_vmsleu_vv; break; + case 29: op = rv_op_vmsle_vv; break; + case 32: op = rv_op_vsaddu_vv; break; + case 33: op = rv_op_vsadd_vv; break; + case 34: op = rv_op_vssubu_vv; break; + case 35: op = rv_op_vssub_vv; break; + case 37: op = rv_op_vsll_vv; break; + case 39: op = rv_op_vsmul_vv; break; + case 40: op = rv_op_vsrl_vv; break; + case 41: op = rv_op_vsra_vv; break; + case 42: op = rv_op_vssrl_vv; break; + case 43: op = rv_op_vssra_vv; break; + case 44: op = rv_op_vnsrl_wv; break; + case 45: op = rv_op_vnsra_wv; break; + case 46: op = rv_op_vnclipu_wv; break; + case 47: op = rv_op_vnclip_wv; break; + case 48: op = rv_op_vwredsumu_vs; break; + case 49: op = rv_op_vwredsum_vs; break; + } + break; + case 1: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vfadd_vv; break; + case 1: op = rv_op_vfredusum_vs; break; + case 2: op = rv_op_vfsub_vv; break; + case 3: op = rv_op_vfredosum_vs; break; + case 4: op = rv_op_vfmin_vv; break; + case 5: op = rv_op_vfredmin_vs; break; + case 6: op = rv_op_vfmax_vv; break; + case 7: op = rv_op_vfredmax_vs; break; + case 8: op = rv_op_vfsgnj_vv; break; + case 9: op = rv_op_vfsgnjn_vv; break; + case 10: op = rv_op_vfsgnjx_vv; break; + case 16: + switch (((inst >> 15) & 0b11111)) { + case 0: if ((inst >> 25) & 1) op = rv_op_vfmv_f_s; break; + } + break; + case 18: + switch (((inst >> 15) & 0b11111)) { + case 0: op = rv_op_vfcvt_xu_f_v; break; + case 1: op = rv_op_vfcvt_x_f_v; break; + case 2: op = rv_op_vfcvt_f_xu_v; break; + case 3: op = rv_op_vfcvt_f_x_v; break; + case 6: op = rv_op_vfcvt_rtz_xu_f_v; break; + case 7: op = rv_op_vfcvt_rtz_x_f_v; break; + case 8: op = rv_op_vfwcvt_xu_f_v; break; + case 9: op = rv_op_vfwcvt_x_f_v; break; + case 10: op = rv_op_vfwcvt_f_xu_v; break; + case 11: op = rv_op_vfwcvt_f_x_v; break; + case 12: op = rv_op_vfwcvt_f_f_v; break; + case 14: op = rv_op_vfwcvt_rtz_xu_f_v; break; + case 15: op = rv_op_vfwcvt_rtz_x_f_v; break; + case 16: op = rv_op_vfncvt_xu_f_w; break; + case 17: op = rv_op_vfncvt_x_f_w; break; + case 18: op = rv_op_vfncvt_f_xu_w; break; + case 19: op = rv_op_vfncvt_f_x_w; break; + case 20: op = rv_op_vfncvt_f_f_w; break; + case 21: op = rv_op_vfncvt_rod_f_f_w; break; + case 22: op = rv_op_vfncvt_rtz_xu_f_w; break; + case 23: op = rv_op_vfncvt_rtz_x_f_w; break; + } + break; + case 19: + switch (((inst >> 15) & 0b11111)) { + case 0: op = rv_op_vfsqrt_v; break; + case 4: op = rv_op_vfrsqrt7_v; break; + case 5: op = rv_op_vfrec7_v; break; + case 16: op = rv_op_vfclass_v; break; + } + break; + case 24: op = rv_op_vmfeq_vv; break; + case 25: op = rv_op_vmfle_vv; break; + case 27: op = rv_op_vmflt_vv; break; + case 28: op = rv_op_vmfne_vv; break; + case 32: op = rv_op_vfdiv_vv; break; + case 36: op = rv_op_vfmul_vv; break; + case 40: op = rv_op_vfmadd_vv; break; + case 41: op = rv_op_vfnmadd_vv; break; + case 42: op = rv_op_vfmsub_vv; break; + case 43: op = rv_op_vfnmsub_vv; break; + case 44: op = rv_op_vfmacc_vv; break; + case 45: op = rv_op_vfnmacc_vv; break; + case 46: op = rv_op_vfmsac_vv; break; + case 47: op = rv_op_vfnmsac_vv; break; + case 48: op = rv_op_vfwadd_vv; break; + case 49: op = rv_op_vfwredusum_vs; break; + case 50: op = rv_op_vfwsub_vv; break; + case 51: op = rv_op_vfwredosum_vs; break; + case 52: op = rv_op_vfwadd_wv; break; + case 54: op = rv_op_vfwsub_wv; break; + case 56: op = rv_op_vfwmul_vv; break; + case 60: op = rv_op_vfwmacc_vv; break; + case 61: op = rv_op_vfwnmacc_vv; break; + case 62: op = rv_op_vfwmsac_vv; break; + case 63: op = rv_op_vfwnmsac_vv; break; + } + break; + case 2: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vredsum_vs; break; + case 1: op = rv_op_vredand_vs; break; + case 2: op = rv_op_vredor_vs; break; + case 3: op = rv_op_vredxor_vs; break; + case 4: op = rv_op_vredminu_vs; break; + case 5: op = rv_op_vredmin_vs; break; + case 6: op = rv_op_vredmaxu_vs; break; + case 7: op = rv_op_vredmax_vs; break; + case 8: op = rv_op_vaaddu_vv; break; + case 9: op = rv_op_vaadd_vv; break; + case 10: op = rv_op_vasubu_vv; break; + case 11: op = rv_op_vasub_vv; break; + case 16: + switch (((inst >> 15) & 0b11111)) { + case 0: if ((inst >> 25) & 1) op = rv_op_vmv_x_s; break; + case 16: op = rv_op_vcpop_m; break; + case 17: op = rv_op_vfirst_m; break; + } + break; + case 18: + switch (((inst >> 15) & 0b11111)) { + case 2: op = rv_op_vzext_vf8; break; + case 3: op = rv_op_vsext_vf8; break; + case 4: op = rv_op_vzext_vf4; break; + case 5: op = rv_op_vsext_vf4; break; + case 6: op = rv_op_vzext_vf2; break; + case 7: op = rv_op_vsext_vf2; break; + } + break; + case 20: + switch (((inst >> 15) & 0b11111)) { + case 1: op = rv_op_vmsbf_m; break; + case 2: op = rv_op_vmsof_m; break; + case 3: op = rv_op_vmsif_m; break; + case 16: op = rv_op_viota_m; break; + case 17: if (((inst >> 20) & 0b11111) == 0) op = rv_op_vid_v; break; + } + break; + case 23: if ((inst >> 25) & 1) op = rv_op_vcompress_vm; break; + case 24: if ((inst >> 25) & 1) op = rv_op_vmandn_mm; break; + case 25: if ((inst >> 25) & 1) op = rv_op_vmand_mm; break; + case 26: if ((inst >> 25) & 1) op = rv_op_vmor_mm; break; + case 27: if ((inst >> 25) & 1) op = rv_op_vmxor_mm; break; + case 28: if ((inst >> 25) & 1) op = rv_op_vmorn_mm; break; + case 29: if ((inst >> 25) & 1) op = rv_op_vmnand_mm; break; + case 30: if ((inst >> 25) & 1) op = rv_op_vmnor_mm; break; + case 31: if ((inst >> 25) & 1) op = rv_op_vmxnor_mm; break; + case 32: op = rv_op_vdivu_vv; break; + case 33: op = rv_op_vdiv_vv; break; + case 34: op = rv_op_vremu_vv; break; + case 35: op = rv_op_vrem_vv; break; + case 36: op = rv_op_vmulhu_vv; break; + case 37: op = rv_op_vmul_vv; break; + case 38: op = rv_op_vmulhsu_vv; break; + case 39: op = rv_op_vmulh_vv; break; + case 41: op = rv_op_vmadd_vv; break; + case 43: op = rv_op_vnmsub_vv; break; + case 45: op = rv_op_vmacc_vv; break; + case 47: op = rv_op_vnmsac_vv; break; + case 48: op = rv_op_vwaddu_vv; break; + case 49: op = rv_op_vwadd_vv; break; + case 50: op = rv_op_vwsubu_vv; break; + case 51: op = rv_op_vwsub_vv; break; + case 52: op = rv_op_vwaddu_wv; break; + case 53: op = rv_op_vwadd_wv; break; + case 54: op = rv_op_vwsubu_wv; break; + case 55: op = rv_op_vwsub_wv; break; + case 56: op = rv_op_vwmulu_vv; break; + case 58: op = rv_op_vwmulsu_vv; break; + case 59: op = rv_op_vwmul_vv; break; + case 60: op = rv_op_vwmaccu_vv; break; + case 61: op = rv_op_vwmacc_vv; break; + case 63: op = rv_op_vwmaccsu_vv; break; + } + break; + case 3: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vadd_vi; break; + case 3: op = rv_op_vrsub_vi; break; + case 9: op = rv_op_vand_vi; break; + case 10: op = rv_op_vor_vi; break; + case 11: op = rv_op_vxor_vi; break; + case 12: op = rv_op_vrgather_vi; break; + case 14: op = rv_op_vslideup_vi; break; + case 15: op = rv_op_vslidedown_vi; break; + case 16: if (((inst >> 25) & 1) == 0) op = rv_op_vadc_vim; break; + case 17: op = rv_op_vmadc_vim; break; + case 23: + if (((inst >> 20) & 0b111111) == 32) + op = rv_op_vmv_v_i; + else if (((inst >> 25) & 1) == 0) + op = rv_op_vmerge_vim; + break; + case 24: op = rv_op_vmseq_vi; break; + case 25: op = rv_op_vmsne_vi; break; + case 28: op = rv_op_vmsleu_vi; break; + case 29: op = rv_op_vmsle_vi; break; + case 30: op = rv_op_vmsgtu_vi; break; + case 31: op = rv_op_vmsgt_vi; break; + case 32: op = rv_op_vsaddu_vi; break; + case 33: op = rv_op_vsadd_vi; break; + case 37: op = rv_op_vsll_vi; break; + case 39: + switch (((inst >> 15) & 0b11111)) { + case 0: op = rv_op_vmv1r_v; break; + case 1: op = rv_op_vmv2r_v; break; + case 3: op = rv_op_vmv4r_v; break; + case 7: op = rv_op_vmv8r_v; break; + } + break; + case 40: op = rv_op_vsrl_vi; break; + case 41: op = rv_op_vsra_vi; break; + case 42: op = rv_op_vssrl_vi; break; + case 43: op = rv_op_vssra_vi; break; + case 44: op = rv_op_vnsrl_wi; break; + case 45: op = rv_op_vnsra_wi; break; + case 46: op = rv_op_vnclipu_wi; break; + case 47: op = rv_op_vnclip_wi; break; + } + break; + case 4: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vadd_vx; break; + case 2: op = rv_op_vsub_vx; break; + case 3: op = rv_op_vrsub_vx; break; + case 4: op = rv_op_vminu_vx; break; + case 5: op = rv_op_vmin_vx; break; + case 6: op = rv_op_vmaxu_vx; break; + case 7: op = rv_op_vmax_vx; break; + case 9: op = rv_op_vand_vx; break; + case 10: op = rv_op_vor_vx; break; + case 11: op = rv_op_vxor_vx; break; + case 12: op = rv_op_vrgather_vx; break; + case 14: op = rv_op_vslideup_vx; break; + case 15: op = rv_op_vslidedown_vx; break; + case 16: if (((inst >> 25) & 1) == 0) op = rv_op_vadc_vxm; break; + case 17: op = rv_op_vmadc_vxm; break; + case 18: if (((inst >> 25) & 1) == 0) op = rv_op_vsbc_vxm; break; + case 19: op = rv_op_vmsbc_vxm; break; + case 23: + if (((inst >> 20) & 0b111111) == 32) + op = rv_op_vmv_v_x; + else if (((inst >> 25) & 1) == 0) + op = rv_op_vmerge_vxm; + break; + case 24: op = rv_op_vmseq_vx; break; + case 25: op = rv_op_vmsne_vx; break; + case 26: op = rv_op_vmsltu_vx; break; + case 27: op = rv_op_vmslt_vx; break; + case 28: op = rv_op_vmsleu_vx; break; + case 29: op = rv_op_vmsle_vx; break; + case 30: op = rv_op_vmsgtu_vx; break; + case 31: op = rv_op_vmsgt_vx; break; + case 32: op = rv_op_vsaddu_vx; break; + case 33: op = rv_op_vsadd_vx; break; + case 34: op = rv_op_vssubu_vx; break; + case 35: op = rv_op_vssub_vx; break; + case 37: op = rv_op_vsll_vx; break; + case 39: op = rv_op_vsmul_vx; break; + case 40: op = rv_op_vsrl_vx; break; + case 41: op = rv_op_vsra_vx; break; + case 42: op = rv_op_vssrl_vx; break; + case 43: op = rv_op_vssra_vx; break; + case 44: op = rv_op_vnsrl_wx; break; + case 45: op = rv_op_vnsra_wx; break; + case 46: op = rv_op_vnclipu_wx; break; + case 47: op = rv_op_vnclip_wx; break; + } + break; + case 5: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_vfadd_vf; break; + case 2: op = rv_op_vfsub_vf; break; + case 4: op = rv_op_vfmin_vf; break; + case 6: op = rv_op_vfmax_vf; break; + case 8: op = rv_op_vfsgnj_vf; break; + case 9: op = rv_op_vfsgnjn_vf; break; + case 10: op = rv_op_vfsgnjx_vf; break; + case 14: op = rv_op_vfslide1up_vf; break; + case 15: op = rv_op_vfslide1down_vf; break; + case 16: + switch (((inst >> 20) & 0b11111)) { + case 0: if ((inst >> 25) & 1) op = rv_op_vfmv_s_f; break; + } + break; + case 23: + if (((inst >> 25) & 1) == 0) + op = rv_op_vfmerge_vfm; + else if (((inst >> 20) & 0b111111) == 32) + op = rv_op_vfmv_v_f; + break; + case 24: op = rv_op_vmfeq_vf; break; + case 25: op = rv_op_vmfle_vf; break; + case 27: op = rv_op_vmflt_vf; break; + case 28: op = rv_op_vmfne_vf; break; + case 29: op = rv_op_vmfgt_vf; break; + case 31: op = rv_op_vmfge_vf; break; + case 32: op = rv_op_vfdiv_vf; break; + case 33: op = rv_op_vfrdiv_vf; break; + case 36: op = rv_op_vfmul_vf; break; + case 39: op = rv_op_vfrsub_vf; break; + case 40: op = rv_op_vfmadd_vf; break; + case 41: op = rv_op_vfnmadd_vf; break; + case 42: op = rv_op_vfmsub_vf; break; + case 43: op = rv_op_vfnmsub_vf; break; + case 44: op = rv_op_vfmacc_vf; break; + case 45: op = rv_op_vfnmacc_vf; break; + case 46: op = rv_op_vfmsac_vf; break; + case 47: op = rv_op_vfnmsac_vf; break; + case 48: op = rv_op_vfwadd_vf; break; + case 50: op = rv_op_vfwsub_vf; break; + case 52: op = rv_op_vfwadd_wf; break; + case 54: op = rv_op_vfwsub_wf; break; + case 56: op = rv_op_vfwmul_vf; break; + case 60: op = rv_op_vfwmacc_vf; break; + case 61: op = rv_op_vfwnmacc_vf; break; + case 62: op = rv_op_vfwmsac_vf; break; + case 63: op = rv_op_vfwnmsac_vf; break; + } + break; + case 6: + switch (((inst >> 26) & 0b111111)) { + case 8: op = rv_op_vaaddu_vx; break; + case 9: op = rv_op_vaadd_vx; break; + case 10: op = rv_op_vasubu_vx; break; + case 11: op = rv_op_vasub_vx; break; + case 14: op = rv_op_vslide1up_vx; break; + case 15: op = rv_op_vslide1down_vx; break; + case 16: + switch (((inst >> 20) & 0b11111)) { + case 0: if ((inst >> 25) & 1) op = rv_op_vmv_s_x; break; + } + break; + case 32: op = rv_op_vdivu_vx; break; + case 33: op = rv_op_vdiv_vx; break; + case 34: op = rv_op_vremu_vx; break; + case 35: op = rv_op_vrem_vx; break; + case 36: op = rv_op_vmulhu_vx; break; + case 37: op = rv_op_vmul_vx; break; + case 38: op = rv_op_vmulhsu_vx; break; + case 39: op = rv_op_vmulh_vx; break; + case 41: op = rv_op_vmadd_vx; break; + case 43: op = rv_op_vnmsub_vx; break; + case 45: op = rv_op_vmacc_vx; break; + case 47: op = rv_op_vnmsac_vx; break; + case 48: op = rv_op_vwaddu_vx; break; + case 49: op = rv_op_vwadd_vx; break; + case 50: op = rv_op_vwsubu_vx; break; + case 51: op = rv_op_vwsub_vx; break; + case 52: op = rv_op_vwaddu_wx; break; + case 53: op = rv_op_vwadd_wx; break; + case 54: op = rv_op_vwsubu_wx; break; + case 55: op = rv_op_vwsub_wx; break; + case 56: op = rv_op_vwmulu_vx; break; + case 58: op = rv_op_vwmulsu_vx; break; + case 59: op = rv_op_vwmul_vx; break; + case 60: op = rv_op_vwmaccu_vx; break; + case 61: op = rv_op_vwmacc_vx; break; + case 62: op = rv_op_vwmaccus_vx; break; + case 63: op = rv_op_vwmaccsu_vx; break; + } + break; + case 7: + if (((inst >> 31) & 1) == 0) { + op = rv_op_vsetvli; + } else if ((inst >> 30) & 1) { + op = rv_op_vsetivli; + } else if (((inst >> 25) & 0b11111) == 0) { + op = rv_op_vsetvl; + } + break; + } + break; case 22: switch (((inst >> 12) & 0b111)) { case 0: op = rv_op_addid; break; @@ -2108,10 +3723,25 @@ static int32_t operand_sbimm12(rv_inst inst) ((inst << 56) >> 63) << 11; } -static uint32_t operand_cimmsh6(rv_inst inst) +static uint32_t operand_cimmshl6(rv_inst inst, rv_isa isa) { - return ((inst << 51) >> 63) << 5 | + int imm = ((inst << 51) >> 63) << 5 | (inst << 57) >> 59; + if (isa == rv128) { + imm = imm ? imm : 64; + } + return imm; +} + +static uint32_t operand_cimmshr6(rv_inst inst, rv_isa isa) +{ + int imm = ((inst << 51) >> 63) << 5 | + (inst << 57) >> 59; + if (isa == rv128) { + imm = imm | (imm & 32) << 1; + imm = imm ? imm : 64; + } + return imm; } static int32_t operand_cimmi(rv_inst inst) @@ -2223,9 +3853,39 @@ static uint32_t operand_cimmq(rv_inst inst) ((inst << 57) >> 62) << 6; } +static uint32_t operand_vimm(rv_inst inst) +{ + return (int64_t)(inst << 44) >> 59; +} + +static uint32_t operand_vzimm11(rv_inst inst) +{ + return (inst << 33) >> 53; +} + +static uint32_t operand_vzimm10(rv_inst inst) +{ + return (inst << 34) >> 54; +} + +static uint32_t operand_bs(rv_inst inst) +{ + return (inst << 32) >> 62; +} + +static uint32_t operand_rnum(rv_inst inst) +{ + return (inst << 40) >> 60; +} + +static uint32_t operand_vm(rv_inst inst) +{ + return (inst << 38) >> 63; +} + /* decode operands */ -static void decode_inst_operands(rv_decode *dec) +static void decode_inst_operands(rv_decode *dec, rv_isa isa) { rv_inst inst = dec->inst; dec->codec = opcode_data[dec->op].codec; @@ -2348,7 +4008,7 @@ static void decode_inst_operands(rv_decode *dec) case rv_codec_cb_sh6: dec->rd = dec->rs1 = operand_crs1rdq(inst) + 8; dec->rs2 = rv_ireg_zero; - dec->imm = operand_cimmsh6(inst); + dec->imm = operand_cimmshr6(inst, isa); break; case rv_codec_ci: dec->rd = dec->rs1 = operand_crs1rd(inst); @@ -2363,7 +4023,7 @@ static void decode_inst_operands(rv_decode *dec) case rv_codec_ci_sh6: dec->rd = dec->rs1 = operand_crs1rd(inst); dec->rs2 = rv_ireg_zero; - dec->imm = operand_cimmsh6(inst); + dec->imm = operand_cimmshl6(inst, isa); break; case rv_codec_ci_16sp: dec->rd = rv_ireg_sp; @@ -2502,6 +4162,43 @@ static void decode_inst_operands(rv_decode *dec) dec->rs2 = operand_crs2(inst); dec->imm = operand_cimmsqsp(inst); break; + case rv_codec_k_bs: + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->bs = operand_bs(inst); + break; + case rv_codec_k_rnum: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rnum = operand_rnum(inst); + break; + case rv_codec_v_r: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->vm = operand_vm(inst); + break; + case rv_codec_v_ldst: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->vm = operand_vm(inst); + break; + case rv_codec_v_i: + dec->rd = operand_rd(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = operand_vimm(inst); + dec->vm = operand_vm(inst); + break; + case rv_codec_vsetvli: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->vzimm = operand_vzimm11(inst); + break; + case rv_codec_vsetivli: + dec->rd = operand_rd(inst); + dec->imm = operand_vimm(inst); + dec->vzimm = operand_vzimm10(inst); + break; }; } @@ -2661,6 +4358,14 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec) case ')': append(buf, ")", buflen); break; + case 'b': + snprintf(tmp, sizeof(tmp), "%d", dec->bs); + append(buf, tmp, buflen); + break; + case 'n': + snprintf(tmp, sizeof(tmp), "%d", dec->rnum); + append(buf, tmp, buflen); + break; case '0': append(buf, rv_ireg_name_sym[dec->rd], buflen); break; @@ -2690,6 +4395,10 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec) snprintf(tmp, sizeof(tmp), "%d", dec->imm); append(buf, tmp, buflen); break; + case 'u': + snprintf(tmp, sizeof(tmp), "%u", ((uint32_t)dec->imm & 0b11111)); + append(buf, tmp, buflen); + break; case 'o': snprintf(tmp, sizeof(tmp), "%d", dec->imm); append(buf, tmp, buflen); @@ -2778,6 +4487,60 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec) append(buf, ".rl", buflen); } break; + case 'l': + append(buf, ",v0", buflen); + break; + case 'm': + if (dec->vm == 0) { + append(buf, ",v0.t", buflen); + } + break; + case 'D': + append(buf, rv_vreg_name_sym[dec->rd], buflen); + break; + case 'E': + append(buf, rv_vreg_name_sym[dec->rs1], buflen); + break; + case 'F': + append(buf, rv_vreg_name_sym[dec->rs2], buflen); + break; + case 'G': + append(buf, rv_vreg_name_sym[dec->rs3], buflen); + break; + case 'v': { + char nbuf[32] = {0}; + const int sew = 1 << (((dec->vzimm >> 3) & 0b111) + 3); + sprintf(nbuf, "%d", sew); + const int lmul = dec->vzimm & 0b11; + const int flmul = (dec->vzimm >> 2) & 1; + const char *vta = (dec->vzimm >> 6) & 1 ? "ta" : "tu"; + const char *vma = (dec->vzimm >> 7) & 1 ? "ma" : "mu"; + append(buf, "e", buflen); + append(buf, nbuf, buflen); + append(buf, ",m", buflen); + if (flmul) { + switch (lmul) { + case 3: + sprintf(nbuf, "f2"); + break; + case 2: + sprintf(nbuf, "f4"); + break; + case 1: + sprintf(nbuf, "f8"); + break; + } + append(buf, nbuf, buflen); + } else { + sprintf(nbuf, "%d", 1 << lmul); + append(buf, nbuf, buflen); + } + append(buf, ",", buflen); + append(buf, vta, buflen); + append(buf, ",", buflen); + append(buf, vma, buflen); + break; + } default: break; } @@ -2871,10 +4634,10 @@ disasm_inst(char *buf, size_t buflen, rv_isa isa, uint64_t pc, rv_inst inst) dec.pc = pc; dec.inst = inst; decode_inst_opcode(&dec, isa); - decode_inst_operands(&dec); + decode_inst_operands(&dec, isa); decode_inst_decompress(&dec, isa); decode_inst_lift_pseudo(&dec); - format_inst(buf, buflen, 16, &dec); + format_inst(buf, buflen, 24, &dec); } #define INST_FMT_2 "%04" PRIx64 " " @@ -2939,3 +4702,8 @@ int print_insn_riscv64(bfd_vma memaddr, struct disassemble_info *info) { return print_insn_riscv(memaddr, info, rv64); } + +int print_insn_riscv128(bfd_vma memaddr, struct disassemble_info *info) +{ + return print_insn_riscv(memaddr, info, rv128); +} diff --git a/disas/s390.c b/disas/s390.c deleted file mode 100644 index a9ec8fa593..0000000000 --- a/disas/s390.c +++ /dev/null @@ -1,1892 +0,0 @@ -/* opcodes/s390-dis.c revision 1.12 */ -/* s390-dis.c -- Disassemble S390 instructions - Copyright 2000, 2001, 2002, 2003, 2005 Free Software Foundation, Inc. - Contributed by Martin Schwidefsky (schwidefsky@de.ibm.com). - - This file is part of GDB, GAS and the GNU binutils. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA - 02110-1301, USA. */ - -#include "qemu/osdep.h" -#include "disas/dis-asm.h" - -/* include/opcode/s390.h revision 1.9 */ -/* s390.h -- Header file for S390 opcode table - Copyright 2000, 2001, 2003 Free Software Foundation, Inc. - Contributed by Martin Schwidefsky (schwidefsky@de.ibm.com). - - This file is part of BFD, the Binary File Descriptor library. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA - 02110-1301, USA. */ - -#ifndef S390_H -#define S390_H - -/* List of instruction sets variations. */ - -enum s390_opcode_mode_val - { - S390_OPCODE_ESA = 0, - S390_OPCODE_ZARCH - }; - -enum s390_opcode_cpu_val - { - S390_OPCODE_G5 = 0, - S390_OPCODE_G6, - S390_OPCODE_Z900, - S390_OPCODE_Z990, - S390_OPCODE_Z9_109, - S390_OPCODE_Z9_EC, - S390_OPCODE_Z10 - }; - -/* The opcode table is an array of struct s390_opcode. */ - -struct s390_opcode - { - /* The opcode name. */ - const char * name; - - /* The opcode itself. Those bits which will be filled in with - operands are zeroes. */ - unsigned char opcode[6]; - - /* The opcode mask. This is used by the disassembler. This is a - mask containing ones indicating those bits which must match the - opcode field, and zeroes indicating those bits which need not - match (and are presumably filled in by operands). */ - unsigned char mask[6]; - - /* The opcode length in bytes. */ - int oplen; - - /* An array of operand codes. Each code is an index into the - operand table. They appear in the order which the operands must - appear in assembly code, and are terminated by a zero. */ - unsigned char operands[6]; - - /* Bitmask of execution modes this opcode is available for. */ - unsigned int modes; - - /* First cpu this opcode is available for. */ - enum s390_opcode_cpu_val min_cpu; - }; - -/* The table itself is sorted by major opcode number, and is otherwise - in the order in which the disassembler should consider - instructions. */ -/* QEMU: Mark these static. */ -static const struct s390_opcode s390_opcodes[]; -static const int s390_num_opcodes; - -/* Values defined for the flags field of a struct powerpc_opcode. */ - -/* The operands table is an array of struct s390_operand. */ - -struct s390_operand - { - /* The number of bits in the operand. */ - int bits; - - /* How far the operand is left shifted in the instruction. */ - int shift; - - /* One bit syntax flags. */ - unsigned long flags; - }; - -/* Elements in the table are retrieved by indexing with values from - the operands field of the powerpc_opcodes table. */ - -static const struct s390_operand s390_operands[]; - -/* Values defined for the flags field of a struct s390_operand. */ - -/* This operand names a register. The disassembler uses this to print - register names with a leading 'r'. */ -#define S390_OPERAND_GPR 0x1 - -/* This operand names a floating point register. The disassembler - prints these with a leading 'f'. */ -#define S390_OPERAND_FPR 0x2 - -/* This operand names an access register. The disassembler - prints these with a leading 'a'. */ -#define S390_OPERAND_AR 0x4 - -/* This operand names a control register. The disassembler - prints these with a leading 'c'. */ -#define S390_OPERAND_CR 0x8 - -/* This operand is a displacement. */ -#define S390_OPERAND_DISP 0x10 - -/* This operand names a base register. */ -#define S390_OPERAND_BASE 0x20 - -/* This operand names an index register, it can be skipped. */ -#define S390_OPERAND_INDEX 0x40 - -/* This operand is a relative branch displacement. The disassembler - prints these symbolically if possible. */ -#define S390_OPERAND_PCREL 0x80 - -/* This operand takes signed values. */ -#define S390_OPERAND_SIGNED 0x100 - -/* This operand is a length. */ -#define S390_OPERAND_LENGTH 0x200 - -/* This operand is optional. Only a single operand at the end of - the instruction may be optional. */ -#define S390_OPERAND_OPTIONAL 0x400 - -/* QEMU-ADD */ -/* ??? Not quite the format the assembler takes, but easy to implement - without recourse to the table generator. */ -#define S390_OPERAND_CCODE 0x800 - -static const char s390_ccode_name[16][4] = { - "n", /* 0000 */ - "o", /* 0001 */ - "h", /* 0010 */ - "nle", /* 0011 */ - "l", /* 0100 */ - "nhe", /* 0101 */ - "lh", /* 0110 */ - "ne", /* 0111 */ - "e", /* 1000 */ - "nlh", /* 1001 */ - "he", /* 1010 */ - "nl", /* 1011 */ - "le", /* 1100 */ - "nh", /* 1101 */ - "no", /* 1110 */ - "a" /* 1111 */ -}; -/* QEMU-END */ - -#endif /* S390_H */ - -static int init_flag = 0; -static int opc_index[256]; - -/* QEMU: We've disabled the architecture check below. */ -/* static int current_arch_mask = 0; */ - -/* Set up index table for first opcode byte. */ - -static void -init_disasm (struct disassemble_info *info) -{ - int i; - - memset (opc_index, 0, sizeof (opc_index)); - - /* Reverse order, such that each opc_index ends up pointing to the - first matching entry instead of the last. */ - for (i = s390_num_opcodes; i--; ) - opc_index[s390_opcodes[i].opcode[0]] = i; - -#ifdef QEMU_DISABLE - switch (info->mach) - { - case bfd_mach_s390_31: - current_arch_mask = 1 << S390_OPCODE_ESA; - break; - case bfd_mach_s390_64: - current_arch_mask = 1 << S390_OPCODE_ZARCH; - break; - default: - abort (); - } -#endif /* QEMU_DISABLE */ - - init_flag = 1; -} - -/* Extracts an operand value from an instruction. */ - -static inline unsigned int -s390_extract_operand (unsigned char *insn, const struct s390_operand *operand) -{ - unsigned int val; - int bits; - - /* Extract fragments of the operand byte for byte. */ - insn += operand->shift / 8; - bits = (operand->shift & 7) + operand->bits; - val = 0; - do - { - val <<= 8; - val |= (unsigned int) *insn++; - bits -= 8; - } - while (bits > 0); - val >>= -bits; - val &= ((1U << (operand->bits - 1)) << 1) - 1; - - /* Check for special long displacement case. */ - if (operand->bits == 20 && operand->shift == 20) - val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; - - /* Sign extend value if the operand is signed or pc relative. */ - if ((operand->flags & (S390_OPERAND_SIGNED | S390_OPERAND_PCREL)) - && (val & (1U << (operand->bits - 1)))) - val |= (-1U << (operand->bits - 1)) << 1; - - /* Double value if the operand is pc relative. */ - if (operand->flags & S390_OPERAND_PCREL) - val <<= 1; - - /* Length x in an instructions has real length x + 1. */ - if (operand->flags & S390_OPERAND_LENGTH) - val++; - return val; -} - -/* Print a S390 instruction. */ - -int -print_insn_s390 (bfd_vma memaddr, struct disassemble_info *info) -{ - bfd_byte buffer[6]; - const struct s390_opcode *opcode; - const struct s390_opcode *opcode_end; - unsigned int value; - int status, opsize, bufsize; - char separator; - - if (init_flag == 0) - init_disasm (info); - - /* The output looks better if we put 6 bytes on a line. */ - info->bytes_per_line = 6; - - /* Every S390 instruction is max 6 bytes long. */ - memset (buffer, 0, 6); - status = (*info->read_memory_func) (memaddr, buffer, 6, info); - if (status != 0) - { - for (bufsize = 0; bufsize < 6; bufsize++) - if ((*info->read_memory_func) (memaddr, buffer, bufsize + 1, info) != 0) - break; - if (bufsize <= 0) - { - (*info->memory_error_func) (status, memaddr, info); - return -1; - } - /* Opsize calculation looks strange but it works - 00xxxxxx -> 2 bytes, 01xxxxxx/10xxxxxx -> 4 bytes, - 11xxxxxx -> 6 bytes. */ - opsize = ((((buffer[0] >> 6) + 1) >> 1) + 1) << 1; - status = opsize > bufsize; - } - else - { - bufsize = 6; - opsize = ((((buffer[0] >> 6) + 1) >> 1) + 1) << 1; - } - - if (status == 0) - { - /* Find the first match in the opcode table. */ - opcode_end = s390_opcodes + s390_num_opcodes; - for (opcode = s390_opcodes + opc_index[(int) buffer[0]]; - (opcode < opcode_end) && (buffer[0] == opcode->opcode[0]); - opcode++) - { - const struct s390_operand *operand; - const unsigned char *opindex; - -#ifdef QEMU_DISABLE - /* Check architecture. */ - if (!(opcode->modes & current_arch_mask)) - continue; -#endif /* QEMU_DISABLE */ - - /* Check signature of the opcode. */ - if ((buffer[1] & opcode->mask[1]) != opcode->opcode[1] - || (buffer[2] & opcode->mask[2]) != opcode->opcode[2] - || (buffer[3] & opcode->mask[3]) != opcode->opcode[3] - || (buffer[4] & opcode->mask[4]) != opcode->opcode[4] - || (buffer[5] & opcode->mask[5]) != opcode->opcode[5]) - continue; - - /* The instruction is valid. */ -/* QEMU-MOD */ - (*info->fprintf_func) (info->stream, "%s", opcode->name); - - if (s390_operands[opcode->operands[0]].flags & S390_OPERAND_CCODE) - separator = 0; - else - separator = '\t'; -/* QEMU-END */ - - /* Extract the operands. */ - for (opindex = opcode->operands; *opindex != 0; opindex++) - { - unsigned int value; - - operand = s390_operands + *opindex; - value = s390_extract_operand (buffer, operand); - - if ((operand->flags & S390_OPERAND_INDEX) && value == 0) - continue; - if ((operand->flags & S390_OPERAND_BASE) && - value == 0 && separator == '(') - { - separator = ','; - continue; - } - - if (separator) - (*info->fprintf_func) (info->stream, "%c", separator); - - if (operand->flags & S390_OPERAND_GPR) - (*info->fprintf_func) (info->stream, "%%r%i", value); - else if (operand->flags & S390_OPERAND_FPR) - (*info->fprintf_func) (info->stream, "%%f%i", value); - else if (operand->flags & S390_OPERAND_AR) - (*info->fprintf_func) (info->stream, "%%a%i", value); - else if (operand->flags & S390_OPERAND_CR) - (*info->fprintf_func) (info->stream, "%%c%i", value); - else if (operand->flags & S390_OPERAND_PCREL) - (*info->print_address_func) (memaddr + (int) value, info); - else if (operand->flags & S390_OPERAND_SIGNED) - (*info->fprintf_func) (info->stream, "%i", (int) value); -/* QEMU-ADD */ - else if (operand->flags & S390_OPERAND_CCODE) - { - (*info->fprintf_func) (info->stream, "%s", - s390_ccode_name[(int) value]); - separator = '\t'; - continue; - } -/* QEMU-END */ - else - (*info->fprintf_func) (info->stream, "%u", value); - - if (operand->flags & S390_OPERAND_DISP) - { - separator = '('; - } - else if (operand->flags & S390_OPERAND_BASE) - { - (*info->fprintf_func) (info->stream, ")"); - separator = ','; - } - else - separator = ','; - } - - /* Found instruction, printed it, return its size. */ - return opsize; - } - /* No matching instruction found, fall through to hex print. */ - } - - if (bufsize >= 4) - { - value = (unsigned int) buffer[0]; - value = (value << 8) + (unsigned int) buffer[1]; - value = (value << 8) + (unsigned int) buffer[2]; - value = (value << 8) + (unsigned int) buffer[3]; - (*info->fprintf_func) (info->stream, ".long\t0x%08x", value); - return 4; - } - else if (bufsize >= 2) - { - value = (unsigned int) buffer[0]; - value = (value << 8) + (unsigned int) buffer[1]; - (*info->fprintf_func) (info->stream, ".short\t0x%04x", value); - return 2; - } - else - { - value = (unsigned int) buffer[0]; - (*info->fprintf_func) (info->stream, ".byte\t0x%02x", value); - return 1; - } -} - -/* opcodes/s390-opc.c revision 1.16 */ -/* s390-opc.c -- S390 opcode list - Copyright 2000, 2001, 2003 Free Software Foundation, Inc. - Contributed by Martin Schwidefsky (schwidefsky@de.ibm.com). - - This file is part of GDB, GAS, and the GNU binutils. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA - 02110-1301, USA. */ - -/* This file holds the S390 opcode table. The opcode table - includes almost all of the extended instruction mnemonics. This - permits the disassembler to use them, and simplifies the assembler - logic, at the cost of increasing the table size. The table is - strictly constant data, so the compiler should be able to put it in - the .text section. - - This file also holds the operand table. All knowledge about - inserting operands into instructions and vice-versa is kept in this - file. */ - -/* The operands table. - The fields are bits, shift, insert, extract, flags. */ - -static const struct s390_operand s390_operands[] = -{ -#define UNUSED 0 - { 0, 0, 0 }, /* Indicates the end of the operand list */ - -#define R_8 1 /* GPR starting at position 8 */ - { 4, 8, S390_OPERAND_GPR }, -#define R_12 2 /* GPR starting at position 12 */ - { 4, 12, S390_OPERAND_GPR }, -#define R_16 3 /* GPR starting at position 16 */ - { 4, 16, S390_OPERAND_GPR }, -#define R_20 4 /* GPR starting at position 20 */ - { 4, 20, S390_OPERAND_GPR }, -#define R_24 5 /* GPR starting at position 24 */ - { 4, 24, S390_OPERAND_GPR }, -#define R_28 6 /* GPR starting at position 28 */ - { 4, 28, S390_OPERAND_GPR }, -#define R_32 7 /* GPR starting at position 32 */ - { 4, 32, S390_OPERAND_GPR }, - -#define F_8 8 /* FPR starting at position 8 */ - { 4, 8, S390_OPERAND_FPR }, -#define F_12 9 /* FPR starting at position 12 */ - { 4, 12, S390_OPERAND_FPR }, -#define F_16 10 /* FPR starting at position 16 */ - { 4, 16, S390_OPERAND_FPR }, -#define F_20 11 /* FPR starting at position 16 */ - { 4, 16, S390_OPERAND_FPR }, -#define F_24 12 /* FPR starting at position 24 */ - { 4, 24, S390_OPERAND_FPR }, -#define F_28 13 /* FPR starting at position 28 */ - { 4, 28, S390_OPERAND_FPR }, -#define F_32 14 /* FPR starting at position 32 */ - { 4, 32, S390_OPERAND_FPR }, - -#define A_8 15 /* Access reg. starting at position 8 */ - { 4, 8, S390_OPERAND_AR }, -#define A_12 16 /* Access reg. starting at position 12 */ - { 4, 12, S390_OPERAND_AR }, -#define A_24 17 /* Access reg. starting at position 24 */ - { 4, 24, S390_OPERAND_AR }, -#define A_28 18 /* Access reg. starting at position 28 */ - { 4, 28, S390_OPERAND_AR }, - -#define C_8 19 /* Control reg. starting at position 8 */ - { 4, 8, S390_OPERAND_CR }, -#define C_12 20 /* Control reg. starting at position 12 */ - { 4, 12, S390_OPERAND_CR }, - -#define B_16 21 /* Base register starting at position 16 */ - { 4, 16, S390_OPERAND_BASE|S390_OPERAND_GPR }, -#define B_32 22 /* Base register starting at position 32 */ - { 4, 32, S390_OPERAND_BASE|S390_OPERAND_GPR }, - -#define X_12 23 /* Index register starting at position 12 */ - { 4, 12, S390_OPERAND_INDEX|S390_OPERAND_GPR }, - -#define D_20 24 /* Displacement starting at position 20 */ - { 12, 20, S390_OPERAND_DISP }, -#define D_36 25 /* Displacement starting at position 36 */ - { 12, 36, S390_OPERAND_DISP }, -#define D20_20 26 /* 20 bit displacement starting at 20 */ - { 20, 20, S390_OPERAND_DISP|S390_OPERAND_SIGNED }, - -#define L4_8 27 /* 4 bit length starting at position 8 */ - { 4, 8, S390_OPERAND_LENGTH }, -#define L4_12 28 /* 4 bit length starting at position 12 */ - { 4, 12, S390_OPERAND_LENGTH }, -#define L8_8 29 /* 8 bit length starting at position 8 */ - { 8, 8, S390_OPERAND_LENGTH }, - -#define U4_8 30 /* 4 bit unsigned value starting at 8 */ - { 4, 8, 0 }, -#define U4_12 31 /* 4 bit unsigned value starting at 12 */ - { 4, 12, 0 }, -#define U4_16 32 /* 4 bit unsigned value starting at 16 */ - { 4, 16, 0 }, -#define U4_20 33 /* 4 bit unsigned value starting at 20 */ - { 4, 20, 0 }, -#define U8_8 34 /* 8 bit unsigned value starting at 8 */ - { 8, 8, 0 }, -#define U8_16 35 /* 8 bit unsigned value starting at 16 */ - { 8, 16, 0 }, -#define I16_16 36 /* 16 bit signed value starting at 16 */ - { 16, 16, S390_OPERAND_SIGNED }, -#define U16_16 37 /* 16 bit unsigned value starting at 16 */ - { 16, 16, 0 }, -#define J16_16 38 /* PC relative jump offset at 16 */ - { 16, 16, S390_OPERAND_PCREL }, -#define J32_16 39 /* PC relative long offset at 16 */ - { 32, 16, S390_OPERAND_PCREL }, -#define I32_16 40 /* 32 bit signed value starting at 16 */ - { 32, 16, S390_OPERAND_SIGNED }, -#define U32_16 41 /* 32 bit unsigned value starting at 16 */ - { 32, 16, 0 }, -#define M_16 42 /* 4 bit optional mask starting at 16 */ - { 4, 16, S390_OPERAND_OPTIONAL }, -#define RO_28 43 /* optional GPR starting at position 28 */ - { 4, 28, (S390_OPERAND_GPR | S390_OPERAND_OPTIONAL) }, - -/* QEMU-ADD: */ -#define M4_12 44 /* 4-bit condition-code starting at 12 */ - { 4, 12, S390_OPERAND_CCODE }, -#define M4_32 45 /* 4-bit condition-code starting at 32 */ - { 4, 32, S390_OPERAND_CCODE }, -#define I8_32 46 /* 8 bit signed value starting at 32 */ - { 8, 32, S390_OPERAND_SIGNED }, -#define U8_24 47 /* 8 bit unsigned value starting at 24 */ - { 8, 24, 0 }, -#define U8_32 48 /* 8 bit unsigned value starting at 32 */ - { 8, 32, 0 }, -#define I16_32 49 - { 16, 32, S390_OPERAND_SIGNED }, -#define M4_16 50 /* 4-bit condition-code starting at 12 */ - { 4, 16, S390_OPERAND_CCODE }, -#define I8_16 51 - { 8, 16, S390_OPERAND_SIGNED }, -/* QEMU-END */ -}; - - -/* Macros used to form opcodes. */ - -/* 8/16/48 bit opcodes. */ -#define OP8(x) { x, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define OP16(x) { x >> 8, x & 255, 0x00, 0x00, 0x00, 0x00 } -#define OP48(x) { x >> 40, (x >> 32) & 255, (x >> 24) & 255, \ - (x >> 16) & 255, (x >> 8) & 255, x & 255} - -/* The new format of the INSTR_x_y and MASK_x_y defines is based - on the following rules: - 1) the middle part of the definition (x in INSTR_x_y) is the official - names of the instruction format that you can find in the principals - of operation. - 2) the last part of the definition (y in INSTR_x_y) gives you an idea - which operands the binary representation of the instruction has. - The meanings of the letters in y are: - a - access register - c - control register - d - displacement, 12 bit - f - floating pointer register - i - signed integer, 4, 8, 16 or 32 bit - l - length, 4 or 8 bit - p - pc relative - r - general purpose register - u - unsigned integer, 4, 8, 16 or 32 bit - m - mode field, 4 bit - 0 - operand skipped. - The order of the letters reflects the layout of the format in - storage and not the order of the parameters of the instructions. - The use of the letters is not a 100% match with the PoP but it is - quite close. - - For example the instruction "mvo" is defined in the PoP as follows: - - MVO D1(L1,B1),D2(L2,B2) [SS] - - -------------------------------------- - | 'F1' | L1 | L2 | B1 | D1 | B2 | D2 | - -------------------------------------- - 0 8 12 16 20 32 36 - - The instruction format is: INSTR_SS_LLRDRD / MASK_SS_LLRDRD. */ - -#define INSTR_E 2, { 0,0,0,0,0,0 } /* e.g. pr */ -#define INSTR_RIE_RRP 6, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxhg */ -#define INSTR_RIL_0P 6, { J32_16,0,0,0,0 } /* e.g. jg */ -#define INSTR_RIL_RP 6, { R_8,J32_16,0,0,0,0 } /* e.g. brasl */ -#define INSTR_RIL_UP 6, { U4_8,J32_16,0,0,0,0 } /* e.g. brcl */ -#define INSTR_RIL_RI 6, { R_8,I32_16,0,0,0,0 } /* e.g. afi */ -#define INSTR_RIL_RU 6, { R_8,U32_16,0,0,0,0 } /* e.g. alfi */ -#define INSTR_RI_0P 4, { J16_16,0,0,0,0,0 } /* e.g. j */ -#define INSTR_RI_RI 4, { R_8,I16_16,0,0,0,0 } /* e.g. ahi */ -#define INSTR_RI_RP 4, { R_8,J16_16,0,0,0,0 } /* e.g. brct */ -#define INSTR_RI_RU 4, { R_8,U16_16,0,0,0,0 } /* e.g. tml */ -#define INSTR_RI_UP 4, { U4_8,J16_16,0,0,0,0 } /* e.g. brc */ -#define INSTR_RRE_00 4, { 0,0,0,0,0,0 } /* e.g. palb */ -#define INSTR_RRE_0R 4, { R_28,0,0,0,0,0 } /* e.g. tb */ -#define INSTR_RRE_AA 4, { A_24,A_28,0,0,0,0 } /* e.g. cpya */ -#define INSTR_RRE_AR 4, { A_24,R_28,0,0,0,0 } /* e.g. sar */ -#define INSTR_RRE_F0 4, { F_24,0,0,0,0,0 } /* e.g. sqer */ -#define INSTR_RRE_FF 4, { F_24,F_28,0,0,0,0 } /* e.g. debr */ -#define INSTR_RRE_R0 4, { R_24,0,0,0,0,0 } /* e.g. ipm */ -#define INSTR_RRE_RA 4, { R_24,A_28,0,0,0,0 } /* e.g. ear */ -#define INSTR_RRE_RF 4, { R_24,F_28,0,0,0,0 } /* e.g. cefbr */ -#define INSTR_RRE_RR 4, { R_24,R_28,0,0,0,0 } /* e.g. lura */ -#define INSTR_RRE_FR 4, { F_24,R_28,0,0,0,0 } /* e.g. ldgr */ -/* Actually efpc and sfpc do not take an optional operand. - This is just a workaround for existing code e.g. glibc. */ -#define INSTR_RRE_RR_OPT 4, { R_24,RO_28,0,0,0,0 } /* efpc, sfpc */ -#define INSTR_RRF_F0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. madbr */ -/* QEMU-MOD */ -#define INSTR_RRF_F0FF2 4, { F_24,F_28,F_16,0,0,0 } /* e.g. cpsdr */ -/* QEMU-END */ -#define INSTR_RRF_F0FR 4, { F_24,F_16,R_28,0,0,0 } /* e.g. iedtr */ -#define INSTR_RRF_FUFF 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. didbr */ -#define INSTR_RRF_RURR 4, { R_24,R_28,R_16,U4_20,0,0 } /* e.g. .insn */ -#define INSTR_RRF_R0RR 4, { R_24,R_28,R_16,0,0,0 } /* e.g. idte */ -#define INSTR_RRF_U0FF 4, { F_24,U4_16,F_28,0,0,0 } /* e.g. fixr */ -#define INSTR_RRF_U0RF 4, { R_24,U4_16,F_28,0,0,0 } /* e.g. cfebr */ -#define INSTR_RRF_UUFF 4, { F_24,U4_16,F_28,U4_20,0,0 } /* e.g. fidtr */ -#define INSTR_RRF_0UFF 4, { F_24,F_28,U4_20,0,0,0 } /* e.g. ldetr */ -#define INSTR_RRF_FFFU 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. qadtr */ -#define INSTR_RRF_M0RR 4, { R_24,R_28,M_16,0,0,0 } /* e.g. sske */ -#define INSTR_RR_0R 2, { R_12, 0,0,0,0,0 } /* e.g. br */ -#define INSTR_RR_FF 2, { F_8,F_12,0,0,0,0 } /* e.g. adr */ -#define INSTR_RR_R0 2, { R_8, 0,0,0,0,0 } /* e.g. spm */ -#define INSTR_RR_RR 2, { R_8,R_12,0,0,0,0 } /* e.g. lr */ -#define INSTR_RR_U0 2, { U8_8, 0,0,0,0,0 } /* e.g. svc */ -#define INSTR_RR_UR 2, { U4_8,R_12,0,0,0,0 } /* e.g. bcr */ -#define INSTR_RRR_F0FF 4, { F_24,F_28,F_16,0,0,0 } /* e.g. ddtr */ -#define INSTR_RSE_RRRD 6, { R_8,R_12,D_20,B_16,0,0 } /* e.g. lmh */ -#define INSTR_RSE_CCRD 6, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lmh */ -#define INSTR_RSE_RURD 6, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icmh */ -#define INSTR_RSL_R0RD 6, { R_8,D_20,B_16,0,0,0 } /* e.g. tp */ -#define INSTR_RSI_RRP 4, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxh */ -#define INSTR_RSY_RRRD 6, { R_8,R_12,D20_20,B_16,0,0 } /* e.g. stmy */ -#define INSTR_RSY_RURD 6, { R_8,U4_12,D20_20,B_16,0,0 } /* e.g. icmh */ -#define INSTR_RSY_AARD 6, { A_8,A_12,D20_20,B_16,0,0 } /* e.g. lamy */ -#define INSTR_RSY_CCRD 6, { C_8,C_12,D20_20,B_16,0,0 } /* e.g. lamy */ -#define INSTR_RS_AARD 4, { A_8,A_12,D_20,B_16,0,0 } /* e.g. lam */ -#define INSTR_RS_CCRD 4, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lctl */ -#define INSTR_RS_R0RD 4, { R_8,D_20,B_16,0,0,0 } /* e.g. sll */ -#define INSTR_RS_RRRD 4, { R_8,R_12,D_20,B_16,0,0 } /* e.g. cs */ -#define INSTR_RS_RURD 4, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icm */ -#define INSTR_RXE_FRRD 6, { F_8,D_20,X_12,B_16,0,0 } /* e.g. axbr */ -#define INSTR_RXE_RRRD 6, { R_8,D_20,X_12,B_16,0,0 } /* e.g. lg */ -#define INSTR_RXF_FRRDF 6, { F_32,F_8,D_20,X_12,B_16,0 } /* e.g. madb */ -#define INSTR_RXF_RRRDR 6, { R_32,R_8,D_20,X_12,B_16,0 } /* e.g. .insn */ -#define INSTR_RXY_RRRD 6, { R_8,D20_20,X_12,B_16,0,0 } /* e.g. ly */ -#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */ -#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */ -#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */ -#define INSTR_RX_RRRD 4, { R_8,D_20,X_12,B_16,0,0 } /* e.g. l */ -#define INSTR_RX_URRD 4, { U4_8,D_20,X_12,B_16,0,0 } /* e.g. bc */ -#define INSTR_SI_URD 4, { D_20,B_16,U8_8,0,0,0 } /* e.g. cli */ -#define INSTR_SIY_URD 6, { D20_20,B_16,U8_8,0,0,0 } /* e.g. tmy */ -#define INSTR_SSE_RDRD 6, { D_20,B_16,D_36,B_32,0,0 } /* e.g. mvsdk */ -#define INSTR_SS_L0RDRD 6, { D_20,L8_8,B_16,D_36,B_32,0 } /* e.g. mvc */ -#define INSTR_SS_L2RDRD 6, { D_20,B_16,D_36,L8_8,B_32,0 } /* e.g. pka */ -#define INSTR_SS_LIRDRD 6, { D_20,L4_8,B_16,D_36,B_32,U4_12 } /* e.g. srp */ -#define INSTR_SS_LLRDRD 6, { D_20,L4_8,B_16,D_36,L4_12,B_32 } /* e.g. pack */ -#define INSTR_SS_RRRDRD 6, { D_20,R_8,B_16,D_36,B_32,R_12 } /* e.g. mvck */ -#define INSTR_SS_RRRDRD2 6, { R_8,D_20,B_16,R_12,D_36,B_32 } /* e.g. plo */ -#define INSTR_SS_RRRDRD3 6, { R_8,R_12,D_20,B_16,D_36,B_32 } /* e.g. lmd */ -#define INSTR_S_00 4, { 0,0,0,0,0,0 } /* e.g. hsch */ -#define INSTR_S_RD 4, { D_20,B_16,0,0,0,0 } /* e.g. lpsw */ -#define INSTR_SSF_RRDRD 6, { D_20,B_16,D_36,B_32,R_8,0 } /* e.g. mvcos */ - -#define MASK_E { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RIE_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RIL_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RIL_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RIL_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RIL_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RIL_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RI_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RI_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RI_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RI_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RI_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRE_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } -#define MASK_RRE_0R { 0xff, 0xff, 0xff, 0xf0, 0x00, 0x00 } -#define MASK_RRE_AA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_AR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_F0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } -#define MASK_RRE_FF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_R0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } -#define MASK_RRE_RA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_RF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_FR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRE_RR_OPT { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } -#define MASK_RRF_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RRF_F0FF2 { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RRF_F0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RRF_FUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRF_RURR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRF_R0RR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRF_U0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RRF_U0RF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RRF_UUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRF_0UFF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 } -#define MASK_RRF_FFFU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRF_M0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RR_0R { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RR_FF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RR_R0 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RR_RR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RR_U0 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RR_UR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RRR_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } -#define MASK_RSE_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSE_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSE_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSL_R0RD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSI_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RS_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RS_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RS_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RS_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RS_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RSY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSY_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSY_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RSY_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXE_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXE_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXF_FRRDF { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXF_RRRDR { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RX_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_RX_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SI_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SIY_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define MASK_SSE_RDRD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_L0RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_L2RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_LIRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_LLRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_RRRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_RRRDRD2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SS_RRRDRD3 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define MASK_S_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } -#define MASK_S_RD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } -#define MASK_SSF_RRDRD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } - -/* QEMU-ADD: */ -#define INSTR_RIE_MRRP 6, { M4_32, R_8, R_12, J16_16, 0, 0 } /* e.g. crj */ -#define MASK_RIE_MRRP { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } - -#define INSTR_RIE_MRIP 6, { M4_12, R_8, I8_32, J16_16, 0, 0 } /* e.g. cij */ -#define MASK_RIE_MRIP { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } - -#define INSTR_RIE_RRIII 6, { R_8, R_12, U8_16, U8_24, U8_32, 0 } /* risbg */ -#define MASK_RIE_RRIII { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define INSTR_RIE_MRI 6, { M4_32, R_8, I16_16, 0, 0, 0 } /* e.g. cit */ -#define MASK_RIE_MRI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define INSTR_RIE_MRU 6, { M4_32, R_8, U16_16, 0, 0, 0 } /* e.g. clfit */ -#define MASK_RIE_MRU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -#define INSTR_RIE_RRI 6, { R_8, R_12, I16_16, 0, 0, 0 } -#define MASK_RIE_RRI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } - -#define INSTR_RXY_URRD 6, { U8_8, D20_20, X_12, B_16, 0, 0 } -#define MASK_RXY_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } - -#define INSTR_SIL_DRI 6, { D_20, B_16, I16_32, 0, 0, 0 } -#define MASK_SIL_DRI { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } - -#define INSTR_RSY_MRRD 6, { M4_12, R_8, D20_20, B_16, 0, 0 } -#define MASK_SRY_MRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } - -#define INSTR_RRF_MRR 6, { M4_16, R_24, R_28, 0, 0, 0 } -#define MASK_RRF_MRR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } - -#define INSTR_SIY_DRI 6, { D20_20, B_16, I8_16, 0, 0, 0 } -#define MASK_SIY_DRI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } -/* QEMU-END */ - -/* include "s390-opc.tab" generated from opcodes/s390-opc.txt rev 1.17 */ -/* The opcode table. This file was generated by s390-mkopc. - - The format of the opcode table is: - - NAME OPCODE MASK OPERANDS - - Name is the name of the instruction. - OPCODE is the instruction opcode. - MASK is the opcode mask; this is used to tell the disassembler - which bits in the actual opcode must match OPCODE. - OPERANDS is the list of operands. - - The disassembler reads the table in order and prints the first - instruction which matches. */ - -static const struct s390_opcode s390_opcodes[] = - { - { "dp", OP8(0xfdLL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "mp", OP8(0xfcLL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "sp", OP8(0xfbLL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "ap", OP8(0xfaLL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "cp", OP8(0xf9LL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "zap", OP8(0xf8LL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "unpk", OP8(0xf3LL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "pack", OP8(0xf2LL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "mvo", OP8(0xf1LL), MASK_SS_LLRDRD, INSTR_SS_LLRDRD, 3, 0}, - { "srp", OP8(0xf0LL), MASK_SS_LIRDRD, INSTR_SS_LIRDRD, 3, 0}, - { "lmd", OP8(0xefLL), MASK_SS_RRRDRD3, INSTR_SS_RRRDRD3, 2, 2}, - { "plo", OP8(0xeeLL), MASK_SS_RRRDRD2, INSTR_SS_RRRDRD2, 3, 0}, - { "stdy", OP48(0xed0000000067LL), MASK_RXY_FRRD, INSTR_RXY_FRRD, 2, 3}, - { "stey", OP48(0xed0000000066LL), MASK_RXY_FRRD, INSTR_RXY_FRRD, 2, 3}, - { "ldy", OP48(0xed0000000065LL), MASK_RXY_FRRD, INSTR_RXY_FRRD, 2, 3}, - { "ley", OP48(0xed0000000064LL), MASK_RXY_FRRD, INSTR_RXY_FRRD, 2, 3}, - { "tgxt", OP48(0xed0000000059LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "tcxt", OP48(0xed0000000058LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "tgdt", OP48(0xed0000000055LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "tcdt", OP48(0xed0000000054LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "tget", OP48(0xed0000000051LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "tcet", OP48(0xed0000000050LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 2, 5}, - { "srxt", OP48(0xed0000000049LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 5}, - { "slxt", OP48(0xed0000000048LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 5}, - { "srdt", OP48(0xed0000000041LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 5}, - { "sldt", OP48(0xed0000000040LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 5}, - { "msd", OP48(0xed000000003fLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 3}, - { "mad", OP48(0xed000000003eLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 3}, - { "myh", OP48(0xed000000003dLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "mayh", OP48(0xed000000003cLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "my", OP48(0xed000000003bLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "may", OP48(0xed000000003aLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "myl", OP48(0xed0000000039LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "mayl", OP48(0xed0000000038LL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 2, 4}, - { "mee", OP48(0xed0000000037LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "sqe", OP48(0xed0000000034LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "mse", OP48(0xed000000002fLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 3}, - { "mae", OP48(0xed000000002eLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 3}, - { "lxe", OP48(0xed0000000026LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "lxd", OP48(0xed0000000025LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "lde", OP48(0xed0000000024LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "msdb", OP48(0xed000000001fLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 0}, - { "madb", OP48(0xed000000001eLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 0}, - { "ddb", OP48(0xed000000001dLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "mdb", OP48(0xed000000001cLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "sdb", OP48(0xed000000001bLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "adb", OP48(0xed000000001aLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "cdb", OP48(0xed0000000019LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "kdb", OP48(0xed0000000018LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "meeb", OP48(0xed0000000017LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "sqdb", OP48(0xed0000000015LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "sqeb", OP48(0xed0000000014LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "tcxb", OP48(0xed0000000012LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "tcdb", OP48(0xed0000000011LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "tceb", OP48(0xed0000000010LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "mseb", OP48(0xed000000000fLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 0}, - { "maeb", OP48(0xed000000000eLL), MASK_RXF_FRRDF, INSTR_RXF_FRRDF, 3, 0}, - { "deb", OP48(0xed000000000dLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "mdeb", OP48(0xed000000000cLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "seb", OP48(0xed000000000bLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "aeb", OP48(0xed000000000aLL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "ceb", OP48(0xed0000000009LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "keb", OP48(0xed0000000008LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "mxdb", OP48(0xed0000000007LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "lxeb", OP48(0xed0000000006LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "lxdb", OP48(0xed0000000005LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "ldeb", OP48(0xed0000000004LL), MASK_RXE_FRRD, INSTR_RXE_FRRD, 3, 0}, - { "brxlg", OP48(0xec0000000045LL), MASK_RIE_RRP, INSTR_RIE_RRP, 2, 2}, - { "brxhg", OP48(0xec0000000044LL), MASK_RIE_RRP, INSTR_RIE_RRP, 2, 2}, -/* QEMU-ADD: */ - { "crj", OP48(0xec0000000076LL), MASK_RIE_MRRP, INSTR_RIE_MRRP, 3, 6}, - { "cgrj", OP48(0xec0000000064LL), MASK_RIE_MRRP, INSTR_RIE_MRRP, 3, 6}, - { "clrj", OP48(0xec0000000077LL), MASK_RIE_MRRP, INSTR_RIE_MRRP, 3, 6}, - { "clgrj", OP48(0xec0000000065LL), MASK_RIE_MRRP, INSTR_RIE_MRRP, 3, 6}, - { "cij", OP48(0xec000000007eLL), MASK_RIE_MRIP, INSTR_RIE_MRIP, 3, 6}, - { "cgij", OP48(0xec000000007cLL), MASK_RIE_MRIP, INSTR_RIE_MRIP, 3, 6}, - { "clij", OP48(0xec000000007fLL), MASK_RIE_MRIP, INSTR_RIE_MRIP, 3, 6}, - { "clgij", OP48(0xec000000007dLL), MASK_RIE_MRIP, INSTR_RIE_MRIP, 3, 6}, - { "risbg", OP48(0xec0000000055LL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "risbhg", OP48(0xec000000005dLL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "risblg", OP48(0xec0000000051LL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "rnsbg", OP48(0xec0000000054LL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "rosbg", OP48(0xec0000000056LL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "rxsbg", OP48(0xec0000000057LL), MASK_RIE_RRIII, INSTR_RIE_RRIII, 3, 6}, - { "cit", OP48(0xec0000000072LL), MASK_RIE_MRI, INSTR_RIE_MRI, 3, 6}, - { "cgit", OP48(0xec0000000070LL), MASK_RIE_MRI, INSTR_RIE_MRI, 3, 6}, - { "clfit", OP48(0xec0000000073LL), MASK_RIE_MRU, INSTR_RIE_MRU, 3, 6}, - { "clgit", OP48(0xec0000000071LL), MASK_RIE_MRU, INSTR_RIE_MRU, 3, 6}, - { "ahik", OP48(0xec00000000d8LL), MASK_RIE_RRI, INSTR_RIE_RRI, 3, 6}, - { "aghik", OP48(0xec00000000d9LL), MASK_RIE_RRI, INSTR_RIE_RRI, 3, 6}, - { "alhsik", OP48(0xec00000000daLL), MASK_RIE_RRI, INSTR_RIE_RRI, 3, 6}, - { "alghsik", OP48(0xec00000000dbLL), MASK_RIE_RRI, INSTR_RIE_RRI, 3, 6}, -/* QEMU-END */ - { "tp", OP48(0xeb00000000c0LL), MASK_RSL_R0RD, INSTR_RSL_R0RD, 3, 0}, - { "stamy", OP48(0xeb000000009bLL), MASK_RSY_AARD, INSTR_RSY_AARD, 2, 3}, - { "lamy", OP48(0xeb000000009aLL), MASK_RSY_AARD, INSTR_RSY_AARD, 2, 3}, - { "lmy", OP48(0xeb0000000098LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "lmh", OP48(0xeb0000000096LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "lmh", OP48(0xeb0000000096LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "stmy", OP48(0xeb0000000090LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "clclu", OP48(0xeb000000008fLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "mvclu", OP48(0xeb000000008eLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3}, - { "mvclu", OP48(0xeb000000008eLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0}, - { "icmy", OP48(0xeb0000000081LL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "icmh", OP48(0xeb0000000080LL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "icmh", OP48(0xeb0000000080LL), MASK_RSE_RURD, INSTR_RSE_RURD, 2, 2}, - { "xiy", OP48(0xeb0000000057LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "oiy", OP48(0xeb0000000056LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "cliy", OP48(0xeb0000000055LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "niy", OP48(0xeb0000000054LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "mviy", OP48(0xeb0000000052LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "tmy", OP48(0xeb0000000051LL), MASK_SIY_URD, INSTR_SIY_URD, 2, 3}, - { "bxleg", OP48(0xeb0000000045LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "bxleg", OP48(0xeb0000000045LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "bxhg", OP48(0xeb0000000044LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "bxhg", OP48(0xeb0000000044LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "cdsg", OP48(0xeb000000003eLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "cdsg", OP48(0xeb000000003eLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "cdsy", OP48(0xeb0000000031LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "csg", OP48(0xeb0000000030LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "csg", OP48(0xeb0000000030LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "lctlg", OP48(0xeb000000002fLL), MASK_RSY_CCRD, INSTR_RSY_CCRD, 2, 3}, - { "lctlg", OP48(0xeb000000002fLL), MASK_RSE_CCRD, INSTR_RSE_CCRD, 2, 2}, - { "stcmy", OP48(0xeb000000002dLL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "stcmh", OP48(0xeb000000002cLL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "stcmh", OP48(0xeb000000002cLL), MASK_RSE_RURD, INSTR_RSE_RURD, 2, 2}, - { "stmh", OP48(0xeb0000000026LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "stmh", OP48(0xeb0000000026LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "stctg", OP48(0xeb0000000025LL), MASK_RSY_CCRD, INSTR_RSY_CCRD, 2, 3}, - { "stctg", OP48(0xeb0000000025LL), MASK_RSE_CCRD, INSTR_RSE_CCRD, 2, 2}, - { "stmg", OP48(0xeb0000000024LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "stmg", OP48(0xeb0000000024LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "clmy", OP48(0xeb0000000021LL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "clmh", OP48(0xeb0000000020LL), MASK_RSY_RURD, INSTR_RSY_RURD, 2, 3}, - { "clmh", OP48(0xeb0000000020LL), MASK_RSE_RURD, INSTR_RSE_RURD, 2, 2}, - { "rll", OP48(0xeb000000001dLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3}, - { "rll", OP48(0xeb000000001dLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 2}, - { "rllg", OP48(0xeb000000001cLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "rllg", OP48(0xeb000000001cLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "csy", OP48(0xeb0000000014LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "tracg", OP48(0xeb000000000fLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "tracg", OP48(0xeb000000000fLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "sllg", OP48(0xeb000000000dLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "sllg", OP48(0xeb000000000dLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "srlg", OP48(0xeb000000000cLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "srlg", OP48(0xeb000000000cLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "slag", OP48(0xeb000000000bLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "slag", OP48(0xeb000000000bLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "srag", OP48(0xeb000000000aLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "srag", OP48(0xeb000000000aLL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, - { "lmg", OP48(0xeb0000000004LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 2, 3}, - { "lmg", OP48(0xeb0000000004LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 2, 2}, -/* QEMU-ADD: */ - { "loc", OP48(0xeb00000000f2LL), MASK_SRY_MRRD, INSTR_RSY_MRRD, 3, 6}, - { "locg", OP48(0xeb00000000e2LL), MASK_SRY_MRRD, INSTR_RSY_MRRD, 3, 6}, - { "stoc", OP48(0xeb00000000f3LL), MASK_SRY_MRRD, INSTR_RSY_MRRD, 3, 6}, - { "stocg", OP48(0xeb00000000e3LL), MASK_SRY_MRRD, INSTR_RSY_MRRD, 3, 6}, - { "srak", OP48(0xeb00000000dcLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 6}, - { "slak", OP48(0xeb00000000ddLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 6}, - { "srlk", OP48(0xeb00000000deLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 6}, - { "sllk", OP48(0xeb00000000dfLL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 6}, - { "asi", OP48(0xeb000000006aLL), MASK_SIY_DRI, INSTR_SIY_DRI, 3, 6}, - { "alsi", OP48(0xeb000000006eLL), MASK_SIY_DRI, INSTR_SIY_DRI, 3, 6}, - { "agsi", OP48(0xeb000000007aLL), MASK_SIY_DRI, INSTR_SIY_DRI, 3, 6}, - { "algsi", OP48(0xeb000000007eLL), MASK_SIY_DRI, INSTR_SIY_DRI, 3, 6}, -/* QEMU-END */ - { "unpka", OP8(0xeaLL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "pka", OP8(0xe9LL), MASK_SS_L2RDRD, INSTR_SS_L2RDRD, 3, 0}, - { "mvcin", OP8(0xe8LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "mvcdk", OP16(0xe50fLL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0}, - { "mvcsk", OP16(0xe50eLL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0}, - { "tprot", OP16(0xe501LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0}, - { "strag", OP48(0xe50000000002LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 2, 2}, - { "lasp", OP16(0xe500LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0}, -/* QEMU-ADD: */ - { "mvhhi", OP16(0xe544LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "mvghi", OP16(0xe548LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "mvhi", OP16(0xe54cLL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "chhsi", OP16(0xe554LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "clhhsi", OP16(0xe555LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "cghsi", OP16(0xe558LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "clghsi", OP16(0xe559LL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "chsi", OP16(0xe55cLL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, - { "clfhsi", OP16(0xe55dLL), MASK_SIL_DRI, INSTR_SIL_DRI, 3, 6}, -/* QEMU-END */ - { "slb", OP48(0xe30000000099LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "slb", OP48(0xe30000000099LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "alc", OP48(0xe30000000098LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "alc", OP48(0xe30000000098LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "dl", OP48(0xe30000000097LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "dl", OP48(0xe30000000097LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "ml", OP48(0xe30000000096LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "ml", OP48(0xe30000000096LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "llh", OP48(0xe30000000095LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 4}, - { "llc", OP48(0xe30000000094LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 4}, - { "llgh", OP48(0xe30000000091LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "llgh", OP48(0xe30000000091LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "llgc", OP48(0xe30000000090LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "llgc", OP48(0xe30000000090LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lpq", OP48(0xe3000000008fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lpq", OP48(0xe3000000008fLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "stpq", OP48(0xe3000000008eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "stpq", OP48(0xe3000000008eLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "slbg", OP48(0xe30000000089LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "slbg", OP48(0xe30000000089LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "alcg", OP48(0xe30000000088LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "alcg", OP48(0xe30000000088LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "dlg", OP48(0xe30000000087LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "dlg", OP48(0xe30000000087LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "mlg", OP48(0xe30000000086LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "mlg", OP48(0xe30000000086LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "xg", OP48(0xe30000000082LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "xg", OP48(0xe30000000082LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "og", OP48(0xe30000000081LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "og", OP48(0xe30000000081LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "ng", OP48(0xe30000000080LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ng", OP48(0xe30000000080LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "shy", OP48(0xe3000000007bLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ahy", OP48(0xe3000000007aLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "chy", OP48(0xe30000000079LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lhy", OP48(0xe30000000078LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lgb", OP48(0xe30000000077LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lb", OP48(0xe30000000076LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "icy", OP48(0xe30000000073LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "stcy", OP48(0xe30000000072LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lay", OP48(0xe30000000071LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sthy", OP48(0xe30000000070LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sly", OP48(0xe3000000005fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "aly", OP48(0xe3000000005eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sy", OP48(0xe3000000005bLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ay", OP48(0xe3000000005aLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cy", OP48(0xe30000000059LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ly", OP48(0xe30000000058LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "xy", OP48(0xe30000000057LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "oy", OP48(0xe30000000056LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cly", OP48(0xe30000000055LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ny", OP48(0xe30000000054LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "msy", OP48(0xe30000000051LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sty", OP48(0xe30000000050LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "bctg", OP48(0xe30000000046LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "bctg", OP48(0xe30000000046LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "strvh", OP48(0xe3000000003fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "strvh", OP48(0xe3000000003fLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "strv", OP48(0xe3000000003eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "strv", OP48(0xe3000000003eLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "clgf", OP48(0xe30000000031LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "clgf", OP48(0xe30000000031LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cgf", OP48(0xe30000000030LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cgf", OP48(0xe30000000030LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "strvg", OP48(0xe3000000002fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "strvg", OP48(0xe3000000002fLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cvdg", OP48(0xe3000000002eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cvdg", OP48(0xe3000000002eLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cvdy", OP48(0xe30000000026LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "stg", OP48(0xe30000000024LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "stg", OP48(0xe30000000024LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "clg", OP48(0xe30000000021LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "clg", OP48(0xe30000000021LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cg", OP48(0xe30000000020LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cg", OP48(0xe30000000020LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lrvh", OP48(0xe3000000001fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "lrvh", OP48(0xe3000000001fLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "lrv", OP48(0xe3000000001eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3}, - { "lrv", OP48(0xe3000000001eLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 2}, - { "dsgf", OP48(0xe3000000001dLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "dsgf", OP48(0xe3000000001dLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "msgf", OP48(0xe3000000001cLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "msgf", OP48(0xe3000000001cLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "slgf", OP48(0xe3000000001bLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "slgf", OP48(0xe3000000001bLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "algf", OP48(0xe3000000001aLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "algf", OP48(0xe3000000001aLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "sgf", OP48(0xe30000000019LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sgf", OP48(0xe30000000019LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "agf", OP48(0xe30000000018LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "agf", OP48(0xe30000000018LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "llgt", OP48(0xe30000000017LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "llgt", OP48(0xe30000000017LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "llgf", OP48(0xe30000000016LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "llgf", OP48(0xe30000000016LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lgh", OP48(0xe30000000015LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lgh", OP48(0xe30000000015LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lgf", OP48(0xe30000000014LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lgf", OP48(0xe30000000014LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lray", OP48(0xe30000000013LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lt", OP48(0xe30000000012LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 4}, - { "lrvg", OP48(0xe3000000000fLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lrvg", OP48(0xe3000000000fLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cvbg", OP48(0xe3000000000eLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "cvbg", OP48(0xe3000000000eLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "dsg", OP48(0xe3000000000dLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "dsg", OP48(0xe3000000000dLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "msg", OP48(0xe3000000000cLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "msg", OP48(0xe3000000000cLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "slg", OP48(0xe3000000000bLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "slg", OP48(0xe3000000000bLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "alg", OP48(0xe3000000000aLL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "alg", OP48(0xe3000000000aLL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "sg", OP48(0xe30000000009LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "sg", OP48(0xe30000000009LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "ag", OP48(0xe30000000008LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "ag", OP48(0xe30000000008LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "cvby", OP48(0xe30000000006LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lg", OP48(0xe30000000004LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lg", OP48(0xe30000000004LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "lrag", OP48(0xe30000000003LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 3}, - { "lrag", OP48(0xe30000000003LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 2, 2}, - { "ltg", OP48(0xe30000000002LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 2, 4}, -/* QEMU-ADD: */ - { "pfd", OP48(0xe30000000036LL), MASK_RXY_URRD, INSTR_RXY_URRD, 3, 6}, -/* QEMU-END */ - { "unpku", OP8(0xe2LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "pku", OP8(0xe1LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "edmk", OP8(0xdfLL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "ed", OP8(0xdeLL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "trt", OP8(0xddLL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "tr", OP8(0xdcLL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "mvcs", OP8(0xdbLL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD, 3, 0}, - { "mvcp", OP8(0xdaLL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD, 3, 0}, - { "mvck", OP8(0xd9LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD, 3, 0}, - { "xc", OP8(0xd7LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "oc", OP8(0xd6LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "clc", OP8(0xd5LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "nc", OP8(0xd4LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "mvz", OP8(0xd3LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "mvc", OP8(0xd2LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "mvn", OP8(0xd1LL), MASK_SS_L0RDRD, INSTR_SS_L0RDRD, 3, 0}, - { "csst", OP16(0xc802LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD, 2, 5}, - { "ectg", OP16(0xc801LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD, 2, 5}, - { "mvcos", OP16(0xc800LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD, 2, 4}, -/* QEMU-ADD: */ - { "exrl", OP16(0xc600ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "pfdrl", OP16(0xc602ll), MASK_RIL_UP, INSTR_RIL_UP, 3, 6}, - { "cghrl", OP16(0xc604ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "chrl", OP16(0xc605ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "clghrl", OP16(0xc606ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "clhrl", OP16(0xc607ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "cgrl", OP16(0xc608ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "clgrl", OP16(0xc60all), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "cgfrl", OP16(0xc60cll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "crl", OP16(0xc60dll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "clgfrl", OP16(0xc60ell), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "clrl", OP16(0xc60fll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - - { "llhrl", OP16(0xc400ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "lghrl", OP16(0xc404ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "lhrl", OP16(0xc405ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "llghrl", OP16(0xc406ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "sthrl", OP16(0xc407ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "lgrl", OP16(0xc408ll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "stgrl", OP16(0xc40bll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "lgfrl", OP16(0xc40cll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "lrl", OP16(0xc40dll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "llgfrl", OP16(0xc40ell), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, - { "strl", OP16(0xc40fll), MASK_RIL_RP, INSTR_RIL_RP, 3, 6}, -/* QEMU-END */ - { "clfi", OP16(0xc20fLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "clgfi", OP16(0xc20eLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "cfi", OP16(0xc20dLL), MASK_RIL_RI, INSTR_RIL_RI, 2, 4}, - { "cgfi", OP16(0xc20cLL), MASK_RIL_RI, INSTR_RIL_RI, 2, 4}, - { "alfi", OP16(0xc20bLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "algfi", OP16(0xc20aLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "afi", OP16(0xc209LL), MASK_RIL_RI, INSTR_RIL_RI, 2, 4}, - { "agfi", OP16(0xc208LL), MASK_RIL_RI, INSTR_RIL_RI, 2, 4}, - { "slfi", OP16(0xc205LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "slgfi", OP16(0xc204LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, -/* QEMU-ADD: */ - { "msfi", OP16(0xc201ll), MASK_RIL_RI, INSTR_RIL_RI, 3, 6}, - { "msgfi", OP16(0xc200ll), MASK_RIL_RI, INSTR_RIL_RI, 3, 6}, -/* QEMU-END */ - { "jg", OP16(0xc0f4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgno", OP16(0xc0e4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnh", OP16(0xc0d4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnp", OP16(0xc0d4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgle", OP16(0xc0c4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnl", OP16(0xc0b4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnm", OP16(0xc0b4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jghe", OP16(0xc0a4LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnlh", OP16(0xc094LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jge", OP16(0xc084LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgz", OP16(0xc084LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgne", OP16(0xc074LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnz", OP16(0xc074LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jglh", OP16(0xc064LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnhe", OP16(0xc054LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgl", OP16(0xc044LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgm", OP16(0xc044LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgnle", OP16(0xc034LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgh", OP16(0xc024LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgp", OP16(0xc024LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "jgo", OP16(0xc014LL), MASK_RIL_0P, INSTR_RIL_0P, 3, 2}, - { "llilf", OP16(0xc00fLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "llihf", OP16(0xc00eLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "oilf", OP16(0xc00dLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "oihf", OP16(0xc00cLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "nilf", OP16(0xc00bLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "nihf", OP16(0xc00aLL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "iilf", OP16(0xc009LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "iihf", OP16(0xc008LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "xilf", OP16(0xc007LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "xihf", OP16(0xc006LL), MASK_RIL_RU, INSTR_RIL_RU, 2, 4}, - { "brasl", OP16(0xc005LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 2}, - { "brcl", OP16(0xc004LL), MASK_RIL_UP, INSTR_RIL_UP, 3, 2}, - { "lgfi", OP16(0xc001LL), MASK_RIL_RI, INSTR_RIL_RI, 2, 4}, - { "larl", OP16(0xc000LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 2}, - { "icm", OP8(0xbfLL), MASK_RS_RURD, INSTR_RS_RURD, 3, 0}, - { "stcm", OP8(0xbeLL), MASK_RS_RURD, INSTR_RS_RURD, 3, 0}, - { "clm", OP8(0xbdLL), MASK_RS_RURD, INSTR_RS_RURD, 3, 0}, - { "cds", OP8(0xbbLL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "cs", OP8(0xbaLL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "cu42", OP16(0xb9b3LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cu41", OP16(0xb9b2LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cu24", OP16(0xb9b1LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cu14", OP16(0xb9b0LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "lptea", OP16(0xb9aaLL), MASK_RRF_RURR, INSTR_RRF_RURR, 2, 4}, - { "esea", OP16(0xb99dLL), MASK_RRE_R0, INSTR_RRE_R0, 2, 2}, - { "slbr", OP16(0xb999LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "alcr", OP16(0xb998LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "dlr", OP16(0xb997LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "mlr", OP16(0xb996LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "llhr", OP16(0xb995LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "llcr", OP16(0xb994LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "troo", OP16(0xb993LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 4}, - { "troo", OP16(0xb993LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "trot", OP16(0xb992LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 4}, - { "trot", OP16(0xb992LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "trto", OP16(0xb991LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 4}, - { "trto", OP16(0xb991LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "trtt", OP16(0xb990LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 4}, - { "trtt", OP16(0xb990LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "idte", OP16(0xb98eLL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 2, 3}, - { "epsw", OP16(0xb98dLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "cspg", OP16(0xb98aLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 3}, - { "slbgr", OP16(0xb989LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "alcgr", OP16(0xb988LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "dlgr", OP16(0xb987LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "mlgr", OP16(0xb986LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "llghr", OP16(0xb985LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "llgcr", OP16(0xb984LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "flogr", OP16(0xb983LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "xgr", OP16(0xb982LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "ogr", OP16(0xb981LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "ngr", OP16(0xb980LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "bctgr", OP16(0xb946LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "klmd", OP16(0xb93fLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 3}, - { "kimd", OP16(0xb93eLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 3}, - { "clgfr", OP16(0xb931LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cgfr", OP16(0xb930LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "kmc", OP16(0xb92fLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 3}, - { "km", OP16(0xb92eLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 3}, - { "lhr", OP16(0xb927LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "lbr", OP16(0xb926LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "sturg", OP16(0xb925LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "clgr", OP16(0xb921LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cgr", OP16(0xb920LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lrvr", OP16(0xb91fLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 2}, - { "kmac", OP16(0xb91eLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 3}, - { "dsgfr", OP16(0xb91dLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "msgfr", OP16(0xb91cLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "slgfr", OP16(0xb91bLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "algfr", OP16(0xb91aLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "sgfr", OP16(0xb919LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "agfr", OP16(0xb918LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "llgtr", OP16(0xb917LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "llgfr", OP16(0xb916LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lgfr", OP16(0xb914LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lcgfr", OP16(0xb913LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "ltgfr", OP16(0xb912LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lngfr", OP16(0xb911LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lpgfr", OP16(0xb910LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lrvgr", OP16(0xb90fLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "eregg", OP16(0xb90eLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "dsgr", OP16(0xb90dLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "msgr", OP16(0xb90cLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "slgr", OP16(0xb90bLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "algr", OP16(0xb90aLL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "sgr", OP16(0xb909LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "agr", OP16(0xb908LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lghr", OP16(0xb907LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "lgbr", OP16(0xb906LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 4}, - { "lurag", OP16(0xb905LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lgr", OP16(0xb904LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lcgr", OP16(0xb903LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "ltgr", OP16(0xb902LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lngr", OP16(0xb901LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "lpgr", OP16(0xb900LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, -/* QEMU-ADD: */ - { "crt", OP16(0xb972LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 6}, - { "cgrt", OP16(0xb960LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 6}, - { "clrt", OP16(0xb973LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 6}, - { "clgrt", OP16(0xb961LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 3, 6}, - { "locr", OP16(0xb9f2LL), MASK_RRF_MRR, INSTR_RRF_MRR, 3, 6}, - { "locgr", OP16(0xb9e2LL), MASK_RRF_MRR, INSTR_RRF_MRR, 3, 6}, - { "popcnt", OP16(0xb9e1LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 6}, - { "ngrk", OP16(0xb9e4LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "ogrk", OP16(0xb9e6LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "xgrk", OP16(0xb9e7LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "agrk", OP16(0xb9e8LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "sgrk", OP16(0xb9e9LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "algrk", OP16(0xb9eaLL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "slgrk", OP16(0xb9ebLL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "nrk", OP16(0xb9f4LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "ork", OP16(0xb9f6LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "xrk", OP16(0xb9f7LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "ark", OP16(0xb9f8LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "srk", OP16(0xb9f9LL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "alrk", OP16(0xb9faLL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, - { "slrk", OP16(0xb9fbLL), MASK_RRF_R0RR, INSTR_RRF_R0RR, 3, 6}, -/* QEMU-END */ - { "lctl", OP8(0xb7LL), MASK_RS_CCRD, INSTR_RS_CCRD, 3, 0}, - { "stctl", OP8(0xb6LL), MASK_RS_CCRD, INSTR_RS_CCRD, 3, 0}, - { "rrxtr", OP16(0xb3ffLL), MASK_RRF_FFFU, INSTR_RRF_FFFU, 2, 5}, - { "iextr", OP16(0xb3feLL), MASK_RRF_F0FR, INSTR_RRF_F0FR, 2, 5}, - { "qaxtr", OP16(0xb3fdLL), MASK_RRF_FFFU, INSTR_RRF_FFFU, 2, 5}, - { "cextr", OP16(0xb3fcLL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "cxstr", OP16(0xb3fbLL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "cxutr", OP16(0xb3faLL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "cxgtr", OP16(0xb3f9LL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "rrdtr", OP16(0xb3f7LL), MASK_RRF_FFFU, INSTR_RRF_FFFU, 2, 5}, - { "iedtr", OP16(0xb3f6LL), MASK_RRF_F0FR, INSTR_RRF_F0FR, 2, 5}, - { "qadtr", OP16(0xb3f5LL), MASK_RRF_FFFU, INSTR_RRF_FFFU, 2, 5}, - { "cedtr", OP16(0xb3f4LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "cdstr", OP16(0xb3f3LL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "cdutr", OP16(0xb3f2LL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "cdgtr", OP16(0xb3f1LL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "esxtr", OP16(0xb3efLL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "eextr", OP16(0xb3edLL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cxtr", OP16(0xb3ecLL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "csxtr", OP16(0xb3ebLL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cuxtr", OP16(0xb3eaLL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cgxtr", OP16(0xb3e9LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 5}, - { "kxtr", OP16(0xb3e8LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "esdtr", OP16(0xb3e7LL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "eedtr", OP16(0xb3e5LL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cdtr", OP16(0xb3e4LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "csdtr", OP16(0xb3e3LL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cudtr", OP16(0xb3e2LL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cgdtr", OP16(0xb3e1LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 5}, - { "kdtr", OP16(0xb3e0LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "fixtr", OP16(0xb3dfLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 2, 5}, - { "ltxtr", OP16(0xb3deLL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "ldxtr", OP16(0xb3ddLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 2, 5}, - { "lxdtr", OP16(0xb3dcLL), MASK_RRF_0UFF, INSTR_RRF_0UFF, 2, 5}, - { "sxtr", OP16(0xb3dbLL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "axtr", OP16(0xb3daLL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "dxtr", OP16(0xb3d9LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "mxtr", OP16(0xb3d8LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "fidtr", OP16(0xb3d7LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 2, 5}, - { "ltdtr", OP16(0xb3d6LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "ledtr", OP16(0xb3d5LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 2, 5}, - { "ldetr", OP16(0xb3d4LL), MASK_RRF_0UFF, INSTR_RRF_0UFF, 2, 5}, - { "sdtr", OP16(0xb3d3LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "adtr", OP16(0xb3d2LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "ddtr", OP16(0xb3d1LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "mdtr", OP16(0xb3d0LL), MASK_RRR_F0FF, INSTR_RRR_F0FF, 2, 5}, - { "lgdr", OP16(0xb3cdLL), MASK_RRE_RF, INSTR_RRE_RF, 2, 5}, - { "cgxr", OP16(0xb3caLL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cgdr", OP16(0xb3c9LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cger", OP16(0xb3c8LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cxgr", OP16(0xb3c6LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cdgr", OP16(0xb3c5LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cegr", OP16(0xb3c4LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "ldgr", OP16(0xb3c1LL), MASK_RRE_FR, INSTR_RRE_FR, 2, 5}, - { "cfxr", OP16(0xb3baLL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cfdr", OP16(0xb3b9LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cfer", OP16(0xb3b8LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cxfr", OP16(0xb3b6LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "cdfr", OP16(0xb3b5LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "cefr", OP16(0xb3b4LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "cgxbr", OP16(0xb3aaLL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cgdbr", OP16(0xb3a9LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cgebr", OP16(0xb3a8LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 2, 2}, - { "cxgbr", OP16(0xb3a6LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cdgbr", OP16(0xb3a5LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cegbr", OP16(0xb3a4LL), MASK_RRE_RR, INSTR_RRE_RR, 2, 2}, - { "cfxbr", OP16(0xb39aLL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 3, 0}, - { "cfdbr", OP16(0xb399LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 3, 0}, - { "cfebr", OP16(0xb398LL), MASK_RRF_U0RF, INSTR_RRF_U0RF, 3, 0}, - { "cxfbr", OP16(0xb396LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "cdfbr", OP16(0xb395LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "cefbr", OP16(0xb394LL), MASK_RRE_RF, INSTR_RRE_RF, 3, 0}, - { "efpc", OP16(0xb38cLL), MASK_RRE_RR_OPT, INSTR_RRE_RR_OPT, 3, 0}, - { "sfasr", OP16(0xb385LL), MASK_RRE_R0, INSTR_RRE_R0, 2, 5}, - { "sfpc", OP16(0xb384LL), MASK_RRE_RR_OPT, INSTR_RRE_RR_OPT, 3, 0}, - { "fidr", OP16(0xb37fLL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "fier", OP16(0xb377LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "lzxr", OP16(0xb376LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "lzdr", OP16(0xb375LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "lzer", OP16(0xb374LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "lcdfr", OP16(0xb373LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "cpsdr", OP16(0xb372LL), MASK_RRF_F0FF2, INSTR_RRF_F0FF2, 2, 5}, - { "lndfr", OP16(0xb371LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "lpdfr", OP16(0xb370LL), MASK_RRE_FF, INSTR_RRE_FF, 2, 5}, - { "cxr", OP16(0xb369LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "fixr", OP16(0xb367LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "lexr", OP16(0xb366LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lxr", OP16(0xb365LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "lcxr", OP16(0xb363LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ltxr", OP16(0xb362LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lnxr", OP16(0xb361LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lpxr", OP16(0xb360LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "fidbr", OP16(0xb35fLL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "didbr", OP16(0xb35bLL), MASK_RRF_FUFF, INSTR_RRF_FUFF, 3, 0}, - { "thdr", OP16(0xb359LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "thder", OP16(0xb358LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "fiebr", OP16(0xb357LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "diebr", OP16(0xb353LL), MASK_RRF_FUFF, INSTR_RRF_FUFF, 3, 0}, - { "tbdr", OP16(0xb351LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "tbedr", OP16(0xb350LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "dxbr", OP16(0xb34dLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "mxbr", OP16(0xb34cLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sxbr", OP16(0xb34bLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "axbr", OP16(0xb34aLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "cxbr", OP16(0xb349LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "kxbr", OP16(0xb348LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "fixbr", OP16(0xb347LL), MASK_RRF_U0FF, INSTR_RRF_U0FF, 3, 0}, - { "lexbr", OP16(0xb346LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ldxbr", OP16(0xb345LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ledbr", OP16(0xb344LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lcxbr", OP16(0xb343LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ltxbr", OP16(0xb342LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lnxbr", OP16(0xb341LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lpxbr", OP16(0xb340LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "msdr", OP16(0xb33fLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 3}, - { "madr", OP16(0xb33eLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 3}, - { "myhr", OP16(0xb33dLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "mayhr", OP16(0xb33cLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "myr", OP16(0xb33bLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "mayr", OP16(0xb33aLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "mylr", OP16(0xb339LL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "maylr", OP16(0xb338LL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 2, 4}, - { "meer", OP16(0xb337LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sqxr", OP16(0xb336LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "mser", OP16(0xb32fLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 3}, - { "maer", OP16(0xb32eLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 3}, - { "lxer", OP16(0xb326LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lxdr", OP16(0xb325LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lder", OP16(0xb324LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "msdbr", OP16(0xb31fLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 0}, - { "madbr", OP16(0xb31eLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 0}, - { "ddbr", OP16(0xb31dLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "mdbr", OP16(0xb31cLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sdbr", OP16(0xb31bLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "adbr", OP16(0xb31aLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "cdbr", OP16(0xb319LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "kdbr", OP16(0xb318LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "meebr", OP16(0xb317LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sqxbr", OP16(0xb316LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sqdbr", OP16(0xb315LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sqebr", OP16(0xb314LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lcdbr", OP16(0xb313LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ltdbr", OP16(0xb312LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lndbr", OP16(0xb311LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lpdbr", OP16(0xb310LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "msebr", OP16(0xb30fLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 0}, - { "maebr", OP16(0xb30eLL), MASK_RRF_F0FF, INSTR_RRF_F0FF, 3, 0}, - { "debr", OP16(0xb30dLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "mdebr", OP16(0xb30cLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "sebr", OP16(0xb30bLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "aebr", OP16(0xb30aLL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "cebr", OP16(0xb309LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "kebr", OP16(0xb308LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "mxdbr", OP16(0xb307LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lxebr", OP16(0xb306LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lxdbr", OP16(0xb305LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ldebr", OP16(0xb304LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lcebr", OP16(0xb303LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "ltebr", OP16(0xb302LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lnebr", OP16(0xb301LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, - { "lpebr", OP16(0xb300LL), MASK_RRE_FF, INSTR_RRE_FF, 3, 0}, -/* QEMU-ADD: */ - { "clfebr", OP16(0xb39cLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "clfdbr", OP16(0xb39dLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "clfxbr", OP16(0xb39eLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "clgebr", OP16(0xb3acLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "clgdbr", OP16(0xb3adLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "clgxbr", OP16(0xb3aeLL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "celfbr", OP16(0xb390LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "cdlfbr", OP16(0xb391LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "cxlfbr", OP16(0xb392LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "celgbr", OP16(0xb3a0LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "cdlgbr", OP16(0xb3a1LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, - { "cxlgbr", OP16(0xb3a2LL), MASK_RRF_UUFF, INSTR_RRF_UUFF, 3, 6}, -/* QEMU-END */ - { "trap4", OP16(0xb2ffLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "lfas", OP16(0xb2bdLL), MASK_S_RD, INSTR_S_RD, 2, 5}, - { "srnmt", OP16(0xb2b9LL), MASK_S_RD, INSTR_S_RD, 2, 5}, - { "lpswe", OP16(0xb2b2LL), MASK_S_RD, INSTR_S_RD, 2, 2}, - { "stfl", OP16(0xb2b1LL), MASK_S_RD, INSTR_S_RD, 3, 2}, - { "stfle", OP16(0xb2b0LL), MASK_S_RD, INSTR_S_RD, 2, 4}, - { "cu12", OP16(0xb2a7LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cutfu", OP16(0xb2a7LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cutfu", OP16(0xb2a7LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "cu21", OP16(0xb2a6LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cuutf", OP16(0xb2a6LL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "cuutf", OP16(0xb2a6LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "tre", OP16(0xb2a5LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "lfpc", OP16(0xb29dLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stfpc", OP16(0xb29cLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "srnm", OP16(0xb299LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stsi", OP16(0xb27dLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stckf", OP16(0xb27cLL), MASK_S_RD, INSTR_S_RD, 2, 4}, - { "sacf", OP16(0xb279LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stcke", OP16(0xb278LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "rp", OP16(0xb277LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "xsch", OP16(0xb276LL), MASK_S_00, INSTR_S_00, 3, 0}, - { "siga", OP16(0xb274LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "cmpsc", OP16(0xb263LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "cmpsc", OP16(0xb263LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "srst", OP16(0xb25eLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "clst", OP16(0xb25dLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "bsa", OP16(0xb25aLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "bsg", OP16(0xb258LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "cuse", OP16(0xb257LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "mvst", OP16(0xb255LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "mvpg", OP16(0xb254LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "msr", OP16(0xb252LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "csp", OP16(0xb250LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "ear", OP16(0xb24fLL), MASK_RRE_RA, INSTR_RRE_RA, 3, 0}, - { "sar", OP16(0xb24eLL), MASK_RRE_AR, INSTR_RRE_AR, 3, 0}, - { "cpya", OP16(0xb24dLL), MASK_RRE_AA, INSTR_RRE_AA, 3, 0}, - { "tar", OP16(0xb24cLL), MASK_RRE_AR, INSTR_RRE_AR, 3, 0}, - { "lura", OP16(0xb24bLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "esta", OP16(0xb24aLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "ereg", OP16(0xb249LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "palb", OP16(0xb248LL), MASK_RRE_00, INSTR_RRE_00, 3, 0}, - { "msta", OP16(0xb247LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "stura", OP16(0xb246LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "sqer", OP16(0xb245LL), MASK_RRE_F0, INSTR_RRE_F0, 3, 0}, - { "sqdr", OP16(0xb244LL), MASK_RRE_F0, INSTR_RRE_F0, 3, 0}, - { "cksm", OP16(0xb241LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "bakr", OP16(0xb240LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "schm", OP16(0xb23cLL), MASK_S_00, INSTR_S_00, 3, 0}, - { "rchp", OP16(0xb23bLL), MASK_S_00, INSTR_S_00, 3, 0}, - { "stcps", OP16(0xb23aLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stcrw", OP16(0xb239LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "rsch", OP16(0xb238LL), MASK_S_00, INSTR_S_00, 3, 0}, - { "sal", OP16(0xb237LL), MASK_S_00, INSTR_S_00, 3, 0}, - { "tpi", OP16(0xb236LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "tsch", OP16(0xb235LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stsch", OP16(0xb234LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "ssch", OP16(0xb233LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "msch", OP16(0xb232LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "hsch", OP16(0xb231LL), MASK_S_00, INSTR_S_00, 3, 0}, - { "csch", OP16(0xb230LL), MASK_S_00, INSTR_S_00, 3, 0}, - { "pgout", OP16(0xb22fLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "pgin", OP16(0xb22eLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "dxr", OP16(0xb22dLL), MASK_RRE_F0, INSTR_RRE_F0, 3, 0}, - { "tb", OP16(0xb22cLL), MASK_RRE_0R, INSTR_RRE_0R, 3, 0}, - { "sske", OP16(0xb22bLL), MASK_RRF_M0RR, INSTR_RRF_M0RR, 2, 4}, - { "sske", OP16(0xb22bLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "rrbe", OP16(0xb22aLL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "iske", OP16(0xb229LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "pt", OP16(0xb228LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "esar", OP16(0xb227LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "epar", OP16(0xb226LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "ssar", OP16(0xb225LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "iac", OP16(0xb224LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "ivsk", OP16(0xb223LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "ipm", OP16(0xb222LL), MASK_RRE_R0, INSTR_RRE_R0, 3, 0}, - { "ipte", OP16(0xb221LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0}, - { "cfc", OP16(0xb21aLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "sac", OP16(0xb219LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "pc", OP16(0xb218LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "sie", OP16(0xb214LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stap", OP16(0xb212LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stpx", OP16(0xb211LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "spx", OP16(0xb210LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "ptlb", OP16(0xb20dLL), MASK_S_00, INSTR_S_00, 3, 0}, - { "ipk", OP16(0xb20bLL), MASK_S_00, INSTR_S_00, 3, 0}, - { "spka", OP16(0xb20aLL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stpt", OP16(0xb209LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "spt", OP16(0xb208LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stckc", OP16(0xb207LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "sckc", OP16(0xb206LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stck", OP16(0xb205LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "sck", OP16(0xb204LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "stidp", OP16(0xb202LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "lra", OP8(0xb1LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "mc", OP8(0xafLL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "sigp", OP8(0xaeLL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "stosm", OP8(0xadLL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "stnsm", OP8(0xacLL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "clcle", OP8(0xa9LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "mvcle", OP8(0xa8LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "j", OP16(0xa7f4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jno", OP16(0xa7e4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnh", OP16(0xa7d4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnp", OP16(0xa7d4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jle", OP16(0xa7c4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnl", OP16(0xa7b4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnm", OP16(0xa7b4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jhe", OP16(0xa7a4LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnlh", OP16(0xa794LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "je", OP16(0xa784LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jz", OP16(0xa784LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jne", OP16(0xa774LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnz", OP16(0xa774LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jlh", OP16(0xa764LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnhe", OP16(0xa754LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jl", OP16(0xa744LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jm", OP16(0xa744LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jnle", OP16(0xa734LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jh", OP16(0xa724LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jp", OP16(0xa724LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "jo", OP16(0xa714LL), MASK_RI_0P, INSTR_RI_0P, 3, 0}, - { "cghi", OP16(0xa70fLL), MASK_RI_RI, INSTR_RI_RI, 2, 2}, - { "chi", OP16(0xa70eLL), MASK_RI_RI, INSTR_RI_RI, 3, 0}, - { "mghi", OP16(0xa70dLL), MASK_RI_RI, INSTR_RI_RI, 2, 2}, - { "mhi", OP16(0xa70cLL), MASK_RI_RI, INSTR_RI_RI, 3, 0}, - { "aghi", OP16(0xa70bLL), MASK_RI_RI, INSTR_RI_RI, 2, 2}, - { "ahi", OP16(0xa70aLL), MASK_RI_RI, INSTR_RI_RI, 3, 0}, - { "lghi", OP16(0xa709LL), MASK_RI_RI, INSTR_RI_RI, 2, 2}, - { "lhi", OP16(0xa708LL), MASK_RI_RI, INSTR_RI_RI, 3, 0}, - { "brctg", OP16(0xa707LL), MASK_RI_RP, INSTR_RI_RP, 2, 2}, - { "brct", OP16(0xa706LL), MASK_RI_RP, INSTR_RI_RP, 3, 0}, - { "bras", OP16(0xa705LL), MASK_RI_RP, INSTR_RI_RP, 3, 0}, - { "brc", OP16(0xa704LL), MASK_RI_UP, INSTR_RI_UP, 3, 0}, - { "tmhl", OP16(0xa703LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "tmhh", OP16(0xa702LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "tml", OP16(0xa701LL), MASK_RI_RU, INSTR_RI_RU, 3, 0}, - { "tmll", OP16(0xa701LL), MASK_RI_RU, INSTR_RI_RU, 3, 0}, - { "tmh", OP16(0xa700LL), MASK_RI_RU, INSTR_RI_RU, 3, 0}, - { "tmlh", OP16(0xa700LL), MASK_RI_RU, INSTR_RI_RU, 3, 0}, - { "llill", OP16(0xa50fLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "llilh", OP16(0xa50eLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "llihl", OP16(0xa50dLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "llihh", OP16(0xa50cLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "oill", OP16(0xa50bLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "oilh", OP16(0xa50aLL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "oihl", OP16(0xa509LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "oihh", OP16(0xa508LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "nill", OP16(0xa507LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "nilh", OP16(0xa506LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "nihl", OP16(0xa505LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "nihh", OP16(0xa504LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "iill", OP16(0xa503LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "iilh", OP16(0xa502LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "iihl", OP16(0xa501LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "iihh", OP16(0xa500LL), MASK_RI_RU, INSTR_RI_RU, 2, 2}, - { "stam", OP8(0x9bLL), MASK_RS_AARD, INSTR_RS_AARD, 3, 0}, - { "lam", OP8(0x9aLL), MASK_RS_AARD, INSTR_RS_AARD, 3, 0}, - { "trace", OP8(0x99LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "lm", OP8(0x98LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "xi", OP8(0x97LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "oi", OP8(0x96LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "cli", OP8(0x95LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "ni", OP8(0x94LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "ts", OP8(0x93LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "mvi", OP8(0x92LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "tm", OP8(0x91LL), MASK_SI_URD, INSTR_SI_URD, 3, 0}, - { "stm", OP8(0x90LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "slda", OP8(0x8fLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "srda", OP8(0x8eLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "sldl", OP8(0x8dLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "srdl", OP8(0x8cLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "sla", OP8(0x8bLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "sra", OP8(0x8aLL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "sll", OP8(0x89LL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "srl", OP8(0x88LL), MASK_RS_R0RD, INSTR_RS_R0RD, 3, 0}, - { "bxle", OP8(0x87LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "bxh", OP8(0x86LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "brxle", OP8(0x85LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0}, - { "brxh", OP8(0x84LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0}, - { "diag", OP8(0x83LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0}, - { "lpsw", OP8(0x82LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "ssm", OP8(0x80LL), MASK_S_RD, INSTR_S_RD, 3, 0}, - { "su", OP8(0x7fLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "au", OP8(0x7eLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "de", OP8(0x7dLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "me", OP8(0x7cLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "mde", OP8(0x7cLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "se", OP8(0x7bLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "ae", OP8(0x7aLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "ce", OP8(0x79LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "le", OP8(0x78LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "ms", OP8(0x71LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "ste", OP8(0x70LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "sw", OP8(0x6fLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "aw", OP8(0x6eLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "dd", OP8(0x6dLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "md", OP8(0x6cLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "sd", OP8(0x6bLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "ad", OP8(0x6aLL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "cd", OP8(0x69LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "ld", OP8(0x68LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "mxd", OP8(0x67LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "std", OP8(0x60LL), MASK_RX_FRRD, INSTR_RX_FRRD, 3, 0}, - { "sl", OP8(0x5fLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "al", OP8(0x5eLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "d", OP8(0x5dLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "m", OP8(0x5cLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "s", OP8(0x5bLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "a", OP8(0x5aLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "c", OP8(0x59LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "l", OP8(0x58LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "x", OP8(0x57LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "o", OP8(0x56LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "cl", OP8(0x55LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "n", OP8(0x54LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "lae", OP8(0x51LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "st", OP8(0x50LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "cvb", OP8(0x4fLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "cvd", OP8(0x4eLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "bas", OP8(0x4dLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "mh", OP8(0x4cLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "sh", OP8(0x4bLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "ah", OP8(0x4aLL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "ch", OP8(0x49LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "lh", OP8(0x48LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "b", OP16(0x47f0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bno", OP16(0x47e0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnh", OP16(0x47d0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnp", OP16(0x47d0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "ble", OP16(0x47c0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnl", OP16(0x47b0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnm", OP16(0x47b0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bhe", OP16(0x47a0LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnlh", OP16(0x4790LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "be", OP16(0x4780LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bz", OP16(0x4780LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bne", OP16(0x4770LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnz", OP16(0x4770LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "blh", OP16(0x4760LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnhe", OP16(0x4750LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bl", OP16(0x4740LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bm", OP16(0x4740LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bnle", OP16(0x4730LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bh", OP16(0x4720LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bp", OP16(0x4720LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bo", OP16(0x4710LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bc", OP8(0x47LL), MASK_RX_URRD, INSTR_RX_URRD, 3, 0}, - { "nop", OP16(0x4700LL), MASK_RX_0RRD, INSTR_RX_0RRD, 3, 0}, - { "bct", OP8(0x46LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "bal", OP8(0x45LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "ex", OP8(0x44LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "ic", OP8(0x43LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "stc", OP8(0x42LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "la", OP8(0x41LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "sth", OP8(0x40LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0}, - { "sur", OP8(0x3fLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "aur", OP8(0x3eLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "der", OP8(0x3dLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "mer", OP8(0x3cLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "mder", OP8(0x3cLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ser", OP8(0x3bLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "aer", OP8(0x3aLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "cer", OP8(0x39LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ler", OP8(0x38LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "sxr", OP8(0x37LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "axr", OP8(0x36LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lrer", OP8(0x35LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ledr", OP8(0x35LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "her", OP8(0x34LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lcer", OP8(0x33LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lter", OP8(0x32LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lner", OP8(0x31LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lper", OP8(0x30LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "swr", OP8(0x2fLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "awr", OP8(0x2eLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ddr", OP8(0x2dLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "mdr", OP8(0x2cLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "sdr", OP8(0x2bLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "adr", OP8(0x2aLL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "cdr", OP8(0x29LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ldr", OP8(0x28LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "mxdr", OP8(0x27LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "mxr", OP8(0x26LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lrdr", OP8(0x25LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ldxr", OP8(0x25LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "hdr", OP8(0x24LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lcdr", OP8(0x23LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "ltdr", OP8(0x22LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lndr", OP8(0x21LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "lpdr", OP8(0x20LL), MASK_RR_FF, INSTR_RR_FF, 3, 0}, - { "slr", OP8(0x1fLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "alr", OP8(0x1eLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "dr", OP8(0x1dLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "mr", OP8(0x1cLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "sr", OP8(0x1bLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "ar", OP8(0x1aLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "cr", OP8(0x19LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "lr", OP8(0x18LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "xr", OP8(0x17LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "or", OP8(0x16LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "clr", OP8(0x15LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "nr", OP8(0x14LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "lcr", OP8(0x13LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "ltr", OP8(0x12LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "lnr", OP8(0x11LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "lpr", OP8(0x10LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "clcl", OP8(0x0fLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "mvcl", OP8(0x0eLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "basr", OP8(0x0dLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "bassm", OP8(0x0cLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "bsm", OP8(0x0bLL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "svc", OP8(0x0aLL), MASK_RR_U0, INSTR_RR_U0, 3, 0}, - { "br", OP16(0x07f0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnor", OP16(0x07e0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnhr", OP16(0x07d0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnpr", OP16(0x07d0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bler", OP16(0x07c0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnlr", OP16(0x07b0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnmr", OP16(0x07b0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bher", OP16(0x07a0LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnlhr", OP16(0x0790LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "ber", OP16(0x0780LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bzr", OP16(0x0780LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bner", OP16(0x0770LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnzr", OP16(0x0770LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "blhr", OP16(0x0760LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnher", OP16(0x0750LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "blr", OP16(0x0740LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bmr", OP16(0x0740LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bnler", OP16(0x0730LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bhr", OP16(0x0720LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bpr", OP16(0x0720LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bor", OP16(0x0710LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bcr", OP8(0x07LL), MASK_RR_UR, INSTR_RR_UR, 3, 0}, - { "nopr", OP16(0x0700LL), MASK_RR_0R, INSTR_RR_0R, 3, 0}, - { "bctr", OP8(0x06LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "balr", OP8(0x05LL), MASK_RR_RR, INSTR_RR_RR, 3, 0}, - { "spm", OP8(0x04LL), MASK_RR_R0, INSTR_RR_R0, 3, 0}, - { "trap2", OP16(0x01ffLL), MASK_E, INSTR_E, 3, 0}, - { "sam64", OP16(0x010eLL), MASK_E, INSTR_E, 2, 2}, - { "sam31", OP16(0x010dLL), MASK_E, INSTR_E, 3, 2}, - { "sam24", OP16(0x010cLL), MASK_E, INSTR_E, 3, 2}, - { "tam", OP16(0x010bLL), MASK_E, INSTR_E, 3, 2}, - { "pfpo", OP16(0x010aLL), MASK_E, INSTR_E, 2, 5}, - { "sckpf", OP16(0x0107LL), MASK_E, INSTR_E, 3, 0}, - { "upt", OP16(0x0102LL), MASK_E, INSTR_E, 3, 0}, - { "pr", OP16(0x0101LL), MASK_E, INSTR_E, 3, 0}, -}; - -static const int s390_num_opcodes = - sizeof (s390_opcodes) / sizeof (s390_opcodes[0]); diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt index 8d6d53a5a2..8ec653f81c 100644 --- a/docs/COLO-FT.txt +++ b/docs/COLO-FT.txt @@ -209,9 +209,9 @@ children.0=childs0 \ 3. On Secondary VM's QEMU monitor, issue command -{'execute':'qmp_capabilities'} -{'execute': 'nbd-server-start', 'arguments': {'addr': {'type': 'inet', 'data': {'host': '0.0.0.0', 'port': '9999'} } } } -{'execute': 'nbd-server-add', 'arguments': {'device': 'parent0', 'writable': true } } +{"execute":"qmp_capabilities"} +{"execute": "nbd-server-start", "arguments": {"addr": {"type": "inet", "data": {"host": "0.0.0.0", "port": "9999"} } } } +{"execute": "nbd-server-add", "arguments": {"device": "parent0", "writable": true } } Note: a. The qmp command nbd-server-start and nbd-server-add must be run @@ -222,11 +222,11 @@ Note: will be merged into the parent disk on failover. 4. On Primary VM's QEMU monitor, issue command: -{'execute':'qmp_capabilities'} -{'execute': 'human-monitor-command', 'arguments': {'command-line': 'drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.2,file.port=9999,file.export=parent0,node-name=replication0'}} -{'execute': 'x-blockdev-change', 'arguments':{'parent': 'colo-disk0', 'node': 'replication0' } } -{'execute': 'migrate-set-capabilities', 'arguments': {'capabilities': [ {'capability': 'x-colo', 'state': true } ] } } -{'execute': 'migrate', 'arguments': {'uri': 'tcp:127.0.0.2:9998' } } +{"execute":"qmp_capabilities"} +{"execute": "human-monitor-command", "arguments": {"command-line": "drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.2,file.port=9999,file.export=parent0,node-name=replication0"}} +{"execute": "x-blockdev-change", "arguments":{"parent": "colo-disk0", "node": "replication0" } } +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [ {"capability": "x-colo", "state": true } ] } } +{"execute": "migrate", "arguments": {"uri": "tcp:127.0.0.2:9998" } } Note: a. There should be only one NBD Client for each primary disk. @@ -249,59 +249,59 @@ if you want to resume the replication, follow "Secondary resume replication" == Primary Failover == The Secondary died, resume on the Primary -{'execute': 'x-blockdev-change', 'arguments':{ 'parent': 'colo-disk0', 'child': 'children.1'} } -{'execute': 'human-monitor-command', 'arguments':{ 'command-line': 'drive_del replication0' } } -{'execute': 'object-del', 'arguments':{ 'id': 'comp0' } } -{'execute': 'object-del', 'arguments':{ 'id': 'iothread1' } } -{'execute': 'object-del', 'arguments':{ 'id': 'm0' } } -{'execute': 'object-del', 'arguments':{ 'id': 'redire0' } } -{'execute': 'object-del', 'arguments':{ 'id': 'redire1' } } -{'execute': 'x-colo-lost-heartbeat' } +{"execute": "x-blockdev-change", "arguments":{ "parent": "colo-disk0", "child": "children.1"} } +{"execute": "human-monitor-command", "arguments":{ "command-line": "drive_del replication0" } } +{"execute": "object-del", "arguments":{ "id": "comp0" } } +{"execute": "object-del", "arguments":{ "id": "iothread1" } } +{"execute": "object-del", "arguments":{ "id": "m0" } } +{"execute": "object-del", "arguments":{ "id": "redire0" } } +{"execute": "object-del", "arguments":{ "id": "redire1" } } +{"execute": "x-colo-lost-heartbeat" } == Secondary Failover == The Primary died, resume on the Secondary and prepare to become the new Primary -{'execute': 'nbd-server-stop'} -{'execute': 'x-colo-lost-heartbeat'} +{"execute": "nbd-server-stop"} +{"execute": "x-colo-lost-heartbeat"} -{'execute': 'object-del', 'arguments':{ 'id': 'f2' } } -{'execute': 'object-del', 'arguments':{ 'id': 'f1' } } -{'execute': 'chardev-remove', 'arguments':{ 'id': 'red1' } } -{'execute': 'chardev-remove', 'arguments':{ 'id': 'red0' } } +{"execute": "object-del", "arguments":{ "id": "f2" } } +{"execute": "object-del", "arguments":{ "id": "f1" } } +{"execute": "chardev-remove", "arguments":{ "id": "red1" } } +{"execute": "chardev-remove", "arguments":{ "id": "red0" } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'mirror0', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '0.0.0.0', 'port': '9003' } }, 'server': true } } } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'compare1', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '0.0.0.0', 'port': '9004' } }, 'server': true } } } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'compare0', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '127.0.0.1', 'port': '9001' } }, 'server': true } } } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'compare0-0', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '127.0.0.1', 'port': '9001' } }, 'server': false } } } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'compare_out', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '127.0.0.1', 'port': '9005' } }, 'server': true } } } } -{'execute': 'chardev-add', 'arguments':{ 'id': 'compare_out0', 'backend': {'type': 'socket', 'data': {'addr': { 'type': 'inet', 'data': { 'host': '127.0.0.1', 'port': '9005' } }, 'server': false } } } } +{"execute": "chardev-add", "arguments":{ "id": "mirror0", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "0.0.0.0", "port": "9003" } }, "server": true } } } } +{"execute": "chardev-add", "arguments":{ "id": "compare1", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "0.0.0.0", "port": "9004" } }, "server": true } } } } +{"execute": "chardev-add", "arguments":{ "id": "compare0", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "127.0.0.1", "port": "9001" } }, "server": true } } } } +{"execute": "chardev-add", "arguments":{ "id": "compare0-0", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "127.0.0.1", "port": "9001" } }, "server": false } } } } +{"execute": "chardev-add", "arguments":{ "id": "compare_out", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "127.0.0.1", "port": "9005" } }, "server": true } } } } +{"execute": "chardev-add", "arguments":{ "id": "compare_out0", "backend": {"type": "socket", "data": {"addr": { "type": "inet", "data": { "host": "127.0.0.1", "port": "9005" } }, "server": false } } } } == Primary resume replication == Resume replication after new Secondary is up. Start the new Secondary (Steps 2 and 3 above), then on the Primary: -{'execute': 'drive-mirror', 'arguments':{ 'device': 'colo-disk0', 'job-id': 'resync', 'target': 'nbd://127.0.0.2:9999/parent0', 'mode': 'existing', 'format': 'raw', 'sync': 'full'} } +{"execute": "drive-mirror", "arguments":{ "device": "colo-disk0", "job-id": "resync", "target": "nbd://127.0.0.2:9999/parent0", "mode": "existing", "format": "raw", "sync": "full"} } Wait until disk is synced, then: -{'execute': 'stop'} -{'execute': 'block-job-cancel', 'arguments':{ 'device': 'resync'} } +{"execute": "stop"} +{"execute": "block-job-cancel", "arguments":{ "device": "resync"} } -{'execute': 'human-monitor-command', 'arguments':{ 'command-line': 'drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.2,file.port=9999,file.export=parent0,node-name=replication0'}} -{'execute': 'x-blockdev-change', 'arguments':{ 'parent': 'colo-disk0', 'node': 'replication0' } } +{"execute": "human-monitor-command", "arguments":{ "command-line": "drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.2,file.port=9999,file.export=parent0,node-name=replication0"}} +{"execute": "x-blockdev-change", "arguments":{ "parent": "colo-disk0", "node": "replication0" } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-mirror', 'id': 'm0', 'props': { 'netdev': 'hn0', 'queue': 'tx', 'outdev': 'mirror0' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-redirector', 'id': 'redire0', 'props': { 'netdev': 'hn0', 'queue': 'rx', 'indev': 'compare_out' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-redirector', 'id': 'redire1', 'props': { 'netdev': 'hn0', 'queue': 'rx', 'outdev': 'compare0' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'iothread', 'id': 'iothread1' } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'colo-compare', 'id': 'comp0', 'props': { 'primary_in': 'compare0-0', 'secondary_in': 'compare1', 'outdev': 'compare_out0', 'iothread': 'iothread1' } } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-mirror", "id": "m0", "netdev": "hn0", "queue": "tx", "outdev": "mirror0" } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id": "redire0", "netdev": "hn0", "queue": "rx", "indev": "compare_out" } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id": "redire1", "netdev": "hn0", "queue": "rx", "outdev": "compare0" } } +{"execute": "object-add", "arguments":{ "qom-type": "iothread", "id": "iothread1" } } +{"execute": "object-add", "arguments":{ "qom-type": "colo-compare", "id": "comp0", "primary_in": "compare0-0", "secondary_in": "compare1", "outdev": "compare_out0", "iothread": "iothread1" } } -{'execute': 'migrate-set-capabilities', 'arguments':{ 'capabilities': [ {'capability': 'x-colo', 'state': true } ] } } -{'execute': 'migrate', 'arguments':{ 'uri': 'tcp:127.0.0.2:9998' } } +{"execute": "migrate-set-capabilities", "arguments":{ "capabilities": [ {"capability": "x-colo", "state": true } ] } } +{"execute": "migrate", "arguments":{ "uri": "tcp:127.0.0.2:9998" } } Note: If this Primary previously was a Secondary, then we need to insert the filters before the filter-rewriter by using the -"'insert': 'before', 'position': 'id=rew0'" Options. See below. +""insert": "before", "position": "id=rew0"" Options. See below. == Secondary resume replication == Become Primary and resume replication after new Secondary is up. Note @@ -309,23 +309,23 @@ that now 127.0.0.1 is the Secondary and 127.0.0.2 is the Primary. Start the new Secondary (Steps 2 and 3 above, but with primary_ip=127.0.0.2), then on the old Secondary: -{'execute': 'drive-mirror', 'arguments':{ 'device': 'colo-disk0', 'job-id': 'resync', 'target': 'nbd://127.0.0.1:9999/parent0', 'mode': 'existing', 'format': 'raw', 'sync': 'full'} } +{"execute": "drive-mirror", "arguments":{ "device": "colo-disk0", "job-id": "resync", "target": "nbd://127.0.0.1:9999/parent0", "mode": "existing", "format": "raw", "sync": "full"} } Wait until disk is synced, then: -{'execute': 'stop'} -{'execute': 'block-job-cancel', 'arguments':{ 'device': 'resync' } } +{"execute": "stop"} +{"execute": "block-job-cancel", "arguments":{ "device": "resync" } } -{'execute': 'human-monitor-command', 'arguments':{ 'command-line': 'drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.1,file.port=9999,file.export=parent0,node-name=replication0'}} -{'execute': 'x-blockdev-change', 'arguments':{ 'parent': 'colo-disk0', 'node': 'replication0' } } +{"execute": "human-monitor-command", "arguments":{ "command-line": "drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=127.0.0.1,file.port=9999,file.export=parent0,node-name=replication0"}} +{"execute": "x-blockdev-change", "arguments":{ "parent": "colo-disk0", "node": "replication0" } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-mirror', 'id': 'm0', 'props': { 'insert': 'before', 'position': 'id=rew0', 'netdev': 'hn0', 'queue': 'tx', 'outdev': 'mirror0' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-redirector', 'id': 'redire0', 'props': { 'insert': 'before', 'position': 'id=rew0', 'netdev': 'hn0', 'queue': 'rx', 'indev': 'compare_out' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'filter-redirector', 'id': 'redire1', 'props': { 'insert': 'before', 'position': 'id=rew0', 'netdev': 'hn0', 'queue': 'rx', 'outdev': 'compare0' } } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'iothread', 'id': 'iothread1' } } -{'execute': 'object-add', 'arguments':{ 'qom-type': 'colo-compare', 'id': 'comp0', 'props': { 'primary_in': 'compare0-0', 'secondary_in': 'compare1', 'outdev': 'compare_out0', 'iothread': 'iothread1' } } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-mirror", "id": "m0", "insert": "before", "position": "id=rew0", "netdev": "hn0", "queue": "tx", "outdev": "mirror0" } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id": "redire0", "insert": "before", "position": "id=rew0", "netdev": "hn0", "queue": "rx", "indev": "compare_out" } } +{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id": "redire1", "insert": "before", "position": "id=rew0", "netdev": "hn0", "queue": "rx", "outdev": "compare0" } } +{"execute": "object-add", "arguments":{ "qom-type": "iothread", "id": "iothread1" } } +{"execute": "object-add", "arguments":{ "qom-type": "colo-compare", "id": "comp0", "primary_in": "compare0-0", "secondary_in": "compare1", "outdev": "compare_out0", "iothread": "iothread1" } } -{'execute': 'migrate-set-capabilities', 'arguments':{ 'capabilities': [ {'capability': 'x-colo', 'state': true } ] } } -{'execute': 'migrate', 'arguments':{ 'uri': 'tcp:127.0.0.1:9998' } } +{"execute": "migrate-set-capabilities", "arguments":{ "capabilities": [ {"capability": "x-colo", "state": true } ] } } +{"execute": "migrate", "arguments":{ "uri": "tcp:127.0.0.1:9998" } } == TODO == 1. Support shared storage. diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index 692323609e..1c1e7b9e11 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -29,6 +29,41 @@ The `Repology`_ site is a useful resource to identify currently shipped versions of software in various operating systems, though it does not cover all distros listed below. +Supported host architectures +---------------------------- + +Those hosts are officially supported, with various accelerators: + + .. list-table:: + :header-rows: 1 + + * - CPU Architecture + - Accelerators + * - Arm + - kvm (64 bit only), tcg, xen + * - MIPS (little endian only) + - kvm, tcg + * - PPC + - kvm, tcg + * - RISC-V + - kvm, tcg + * - s390x + - kvm, tcg + * - SPARC + - tcg + * - x86 + - hax, hvf (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen + +Other host architectures are not supported. It is possible to build QEMU system +emulation on an unsupported host architecture using the configure +``--enable-tcg-interpreter`` option to enable the TCI support, but note that +this is very slow and is not recommended for normal use. QEMU user emulation +requires host-specific support for signal handling, therefore TCI won't help +on unsupported host architectures. + +Non-supported architectures may be removed in the future following the +:ref:`deprecation process`. + Linux OS, macOS, FreeBSD, NetBSD, OpenBSD ----------------------------------------- @@ -36,7 +71,10 @@ The project aims to support the most recent major version at all times. Support for the previous major version will be dropped 2 years after the new major version is released or when the vendor itself drops support, whichever comes first. In this context, third-party efforts to extend the lifetime of a distro -are not considered, even when they are endorsed by the vendor (eg. Debian LTS). +are not considered, even when they are endorsed by the vendor (eg. Debian LTS); +the same is true of repositories that contain packages backported from later +releases (e.g. Debian backports). Within each major release, only the most +recent minor release is considered. For the purposes of identifying supported software versions available on Linux, the project will look at CentOS, Debian, Fedora, openSUSE, RHEL, SLES and @@ -45,18 +83,30 @@ Ubuntu LTS. Other distros will be assumed to ship similar software versions. For FreeBSD and OpenBSD, decisions will be made based on the contents of the respective ports repository, while NetBSD will use the pkgsrc repository. -For macOS, `HomeBrew`_ will be used, although `MacPorts`_ is expected to carry +For macOS, `Homebrew`_ will be used, although `MacPorts`_ is expected to carry similar versions. Windows ------- -The project supports building with current versions of the MinGW toolchain, -hosted on Linux (Debian/Fedora). +The project aims to support the two most recent versions of Windows that are +still supported by the vendor. The minimum Windows API that is currently +targeted is "Windows 8", so theoretically the QEMU binaries can still be run +on older versions of Windows, too. However, such old versions of Windows are +not tested anymore, so it is recommended to use one of the latest versions of +Windows instead. -The version of the Windows API that's currently targeted is Vista / Server -2008. +The project supports building QEMU with current versions of the MinGW +toolchain, either hosted on Linux (Debian/Fedora) or via `MSYS2`_ on Windows. +A more recent Windows version is always preferred as it is less likely to have +problems with building via MSYS2. The building process of QEMU involves some +Python scripts that call os.symlink() which needs special attention for the +build process to successfully complete. On newer versions of Windows 10, +unprivileged accounts can create symlinks if Developer Mode is enabled. +When Developer Mode is not available/enabled, the SeCreateSymbolicLinkPrivilege +privilege is required, or the process must be run as an administrator. -.. _HomeBrew: https://brew.sh/ +.. _Homebrew: https://brew.sh/ .. _MacPorts: https://www.macports.org/ +.. _MSYS2: https://www.msys2.org/ .. _Repology: https://repology.org/ diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 6d438f1c8d..0b26c01da0 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -1,3 +1,5 @@ +.. _Deprecated features: + Deprecated features =================== @@ -37,15 +39,6 @@ should specify an ``audiodev=`` property. Additionally, when using vnc, you should specify an ``audiodev=`` property if you plan to transmit audio through the VNC protocol. -Creating sound card devices using ``-soundhw`` (since 5.1) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Sound card devices should be created using ``-device`` instead. The -names are the same for most devices. The exceptions are ``hda`` which -needs two devices (``-device intel-hda -device hda-duplex``) and -``pcspk`` which can be activated using ``-machine -pcspk-audiodev=``. - ``-chardev`` backend aliases ``tty`` and ``parport`` (since 6.0) '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -65,25 +58,6 @@ and will cause a warning. The replacement for the ``nodelay`` short-form boolean option is ``nodelay=on`` rather than ``delay=off``. -``--enable-fips`` (since 6.0) -''''''''''''''''''''''''''''' - -This option restricts usage of certain cryptographic algorithms when -the host is operating in FIPS mode. - -If FIPS compliance is required, QEMU should be built with the ``libgcrypt`` -library enabled as a cryptography provider. - -Neither the ``nettle`` library, or the built-in cryptography provider are -supported on FIPS enabled hosts. - -``-writeconfig`` (since 6.0) -''''''''''''''''''''''''''''' - -The ``-writeconfig`` option is not able to serialize the entire contents -of the QEMU command line. It is thus considered a failed experiment -and deprecated, with no current replacement. - Userspace local APIC with KVM (x86, since 6.0) '''''''''''''''''''''''''''''''''''''''''''''' @@ -107,68 +81,70 @@ the process listing. This is replaced by the new ``password-secret`` option which lets the password be securely provided on the command line using a ``secret`` object instance. -``opened`` property of ``rng-*`` objects (since 6.0.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +``-smp`` ("parameter=0" SMP configurations) (since 6.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''' -The only effect of specifying ``opened=on`` in the command line or QMP -``object-add`` is that the device is opened immediately, possibly before all -other options have been processed. This will either have no effect (if -``opened`` was the last option) or cause errors. The property is therefore -useless and should not be specified. +Specified CPU topology parameters must be greater than zero. -``loaded`` property of ``secret`` and ``secret_keyring`` objects (since 6.0.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +In the SMP configuration, users should either provide a CPU topology +parameter with a reasonable value (greater than zero) or just omit it +and QEMU will compute the missing value. -The only effect of specifying ``loaded=on`` in the command line or QMP -``object-add`` is that the secret is loaded immediately, possibly before all -other options have been processed. This will either have no effect (if -``loaded`` was the last option) or cause options to be effectively ignored as -if they were not given. The property is therefore useless and should not be -specified. +However, historically it was implicitly allowed for users to provide +a parameter with zero value, which is meaningless and could also possibly +cause unexpected results in the -smp parsing. So support for this kind of +configurations (e.g. -smp 8,sockets=0) is deprecated since 6.2 and will +be removed in the near future, users have to ensure that all the topology +members described with -smp are greater than zero. -``-display sdl,window_close=...`` (since 6.1) -''''''''''''''''''''''''''''''''''''''''''''' +Plugin argument passing through ``arg=`` (since 6.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -Use ``-display sdl,window-close=...`` instead (i.e. with a minus instead of -an underscore between "window" and "close"). +Passing TCG plugins arguments through ``arg=`` is redundant is makes the +command-line less readable, especially when the argument itself consist of a +name and a value, e.g. ``-plugin plugin_name,arg="arg_name=arg_value"``. +Therefore, the usage of ``arg`` is redundant. Single-word arguments are treated +as short-form boolean values, and passed to plugins as ``arg_name=on``. +However, short-form booleans are deprecated and full explicit ``arg_name=on`` +form is preferred. -``-no-quit`` (since 6.1) -'''''''''''''''''''''''' +``-drive if=none`` for the sifive_u OTP device (since 6.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -The ``-no-quit`` is a synonym for ``-display ...,window-close=off`` which -should be used instead. +Using ``-drive if=none`` to configure the OTP device of the sifive_u +RISC-V machine is deprecated. Use ``-drive if=pflash`` instead. QEMU Machine Protocol (QMP) commands ------------------------------------ -``blockdev-open-tray``, ``blockdev-close-tray`` argument ``device`` (since 2.8.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``blockdev-open-tray``, ``blockdev-close-tray`` argument ``device`` (since 2.8) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use argument ``id`` instead. -``eject`` argument ``device`` (since 2.8.0) -''''''''''''''''''''''''''''''''''''''''''' +``eject`` argument ``device`` (since 2.8) +''''''''''''''''''''''''''''''''''''''''' Use argument ``id`` instead. -``blockdev-change-medium`` argument ``device`` (since 2.8.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``blockdev-change-medium`` argument ``device`` (since 2.8) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use argument ``id`` instead. -``block_set_io_throttle`` argument ``device`` (since 2.8.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``block_set_io_throttle`` argument ``device`` (since 2.8) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use argument ``id`` instead. -``blockdev-add`` empty string argument ``backing`` (since 2.10.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``blockdev-add`` empty string argument ``backing`` (since 2.10) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use argument value ``null`` instead. -``block-commit`` arguments ``base`` and ``top`` (since 3.1.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``block-commit`` arguments ``base`` and ``top`` (since 3.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use arguments ``base-node`` and ``top-node`` instead. @@ -179,6 +155,50 @@ Use the more generic commands ``block-export-add`` and ``block-export-del`` instead. As part of this deprecation, where ``nbd-server-add`` used a single ``bitmap``, the new ``block-export-add`` uses a list of ``bitmaps``. +``query-qmp-schema`` return value member ``values`` (since 6.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``values`` in return value elements with meta-type ``enum`` is +deprecated. Use ``members`` instead. + +``drive-backup`` (since 6.2) +'''''''''''''''''''''''''''' + +Use ``blockdev-backup`` in combination with ``blockdev-add`` instead. +This change primarily separates the creation/opening process of the backup +target with explicit, separate steps. ``blockdev-backup`` uses mostly the +same arguments as ``drive-backup``, except the ``format`` and ``mode`` +options are removed in favor of using explicit ``blockdev-create`` and +``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for +details. + +Incorrectly typed ``device_add`` arguments (since 6.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Due to shortcomings in the internal implementation of ``device_add``, QEMU +incorrectly accepts certain invalid arguments: Any object or list arguments are +silently ignored. Other argument types are not checked, but an implicit +conversion happens, so that e.g. string values can be assigned to integer +device properties or vice versa. + +This is a bug in QEMU that will be fixed in the future so that previously +accepted incorrect commands will return an error. Users should make sure that +all arguments passed to ``device_add`` are consistent with the documented +property types. + +``query-sgx`` return value member ``section-size`` (since 7.0) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``section-size`` in return value elements with meta-type ``uint64`` is +deprecated. Use ``sections`` instead. + + +``query-sgx-capabilities`` return value member ``section-size`` (since 7.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``section-size`` in return value elements with meta-type ``uint64`` is +deprecated. Use ``sections`` instead. + System accelerators ------------------- @@ -188,38 +208,46 @@ MIPS ``Trap-and-Emul`` KVM support (since 6.0) The MIPS ``Trap-and-Emul`` KVM host and guest support has been removed from Linux upstream kernel, declare it deprecated. -System emulator CPUS --------------------- +Host Architectures +------------------ -``Icelake-Client`` CPU Model (since 5.2.0) -'''''''''''''''''''''''''''''''''''''''''' +BE MIPS (since 7.2) +''''''''''''''''''' -``Icelake-Client`` CPU Models are deprecated. Use ``Icelake-Server`` CPU -Models instead. +As Debian 10 ("Buster") moved into LTS the big endian 32 bit version of +MIPS moved out of support making it hard to maintain our +cross-compilation CI tests of the architecture. As we no longer have +CI coverage support may bitrot away before the deprecation process +completes. The little endian variants of MIPS (both 32 and 64 bit) are +still a supported host architecture. -MIPS ``I7200`` CPU Model (since 5.2) -'''''''''''''''''''''''''''''''''''' +QEMU API (QAPI) events +---------------------- + +``MEM_UNPLUG_ERROR`` (since 6.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead. -The ``I7200`` guest CPU relies on the nanoMIPS ISA, which is deprecated -(the ISA has never been upstreamed to a compiler toolchain). Therefore -this CPU is also deprecated. System emulator machines ------------------------ -Raspberry Pi ``raspi2`` and ``raspi3`` machines (since 5.2) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -The Raspberry Pi machines come in various models (A, A+, B, B+). To be able -to distinguish which model QEMU is implementing, the ``raspi2`` and ``raspi3`` -machines have been renamed ``raspi2b`` and ``raspi3b``. +The ``dtb-kaslr-seed`` property on the ``virt`` board has been +deprecated; use the new name ``dtb-randomness`` instead. The new name +better reflects the way this property affects all random data within +the device tree blob, not just the ``kaslr-seed`` node. -Aspeed ``swift-bmc`` machine (since 6.1) -'''''''''''''''''''''''''''''''''''''''' +``pc-i440fx-1.4`` up to ``pc-i440fx-1.7`` (since 7.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''' + +These old machine types are quite neglected nowadays and thus might have +various pitfalls with regards to live migration. Use a newer machine type +instead. -This machine is deprecated because we have enough AST2500 based OpenPOWER -machines. It can be easily replaced by the ``witherspoon-bmc`` or the -``romulus-bmc`` machines. Backend options --------------- @@ -245,8 +273,8 @@ Device options Emulated device options ''''''''''''''''''''''' -``-device virtio-blk,scsi=on|off`` (since 5.0.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``-device virtio-blk,scsi=on|off`` (since 5.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The virtio-blk SCSI passthrough feature is a legacy VIRTIO feature. VIRTIO 1.0 and later do not support it because the virtio-scsi device was introduced for @@ -255,17 +283,42 @@ full SCSI support. Use virtio-scsi instead when SCSI passthrough is required. Note this also applies to ``-device virtio-blk-pci,scsi=on|off``, which is an alias. +``-device sga`` (since 6.2) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``sga`` device loads an option ROM for x86 targets which enables +SeaBIOS to send messages to the serial console. SeaBIOS 1.11.0 onwards +contains native support for this feature and thus use of the option +ROM approach is obsolete. The native SeaBIOS support can be activated +by using ``-machine graphics=off``. + +``-device nvme-ns,eui64-default=on|off`` (since 7.1) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In QEMU versions 6.1, 6.2 and 7.0, the ``nvme-ns`` generates an EUI-64 +identifier that is not globally unique. If an EUI-64 identifier is required, the +user must set it explicitly using the ``nvme-ns`` device parameter ``eui64``. + +``-device nvme,use-intel-id=on|off`` (since 7.1) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``nvme`` device originally used a PCI Vendor/Device Identifier combination +from Intel that was not properly allocated. Since version 5.2, the controller +has used a properly allocated identifier. Deprecate the ``use-intel-id`` +machine compatibility parameter. + + Block device options '''''''''''''''''''' -``"backing": ""`` (since 2.12.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``"backing": ""`` (since 2.12) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order to prevent QEMU from automatically opening an image's backing chain, use ``"backing": null`` instead. -``rbd`` keyvalue pair encoded filenames: ``""`` (since 3.1.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``rbd`` keyvalue pair encoded filenames: ``""`` (since 3.1) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Options for ``rbd`` should be specified according to its runtime options, like other block drivers. Legacy parsing of keyvalue pair encoded @@ -280,31 +333,11 @@ The above, converted to the current supported format:: json:{"file.driver":"rbd", "file.pool":"rbd", "file.image":"name"} -linux-user mode CPUs --------------------- - -``ppc64abi32`` CPUs (since 5.2.0) -''''''''''''''''''''''''''''''''' - -The ``ppc64abi32`` architecture has a number of issues which regularly -trip up our CI testing and is suspected to be quite broken. For that -reason the maintainers strongly suspect no one actually uses it. - -MIPS ``I7200`` CPU (since 5.2) -'''''''''''''''''''''''''''''' - -The ``I7200`` guest CPU relies on the nanoMIPS ISA, which is deprecated -(the ISA has never been upstreamed to a compiler toolchain). Therefore -this CPU is also deprecated. - -Related binaries ----------------- - Backwards compatibility ----------------------- -Runnability guarantee of CPU models (since 4.1.0) -''''''''''''''''''''''''''''''''''''''''''''''''' +Runnability guarantee of CPU models (since 4.1) +''''''''''''''''''''''''''''''''''''''''''''''' Previous versions of QEMU never changed existing CPU models in ways that introduced additional host software or hardware @@ -329,11 +362,38 @@ versions, aliases will point to newer CPU model versions depending on the machine type, so management software must resolve CPU model aliases before starting a virtual machine. -Guest Emulator ISAs -------------------- +Tools +----- -nanoMIPS ISA -'''''''''''' +virtiofsd +''''''''' -The ``nanoMIPS`` ISA has never been upstreamed to any compiler toolchain. -As it is hard to generate binaries for it, declare it deprecated. +There is a new Rust implementation of ``virtiofsd`` at +``https://gitlab.com/virtio-fs/virtiofsd``; +since this is now marked stable, new development should be done on that +rather than the existing C version in the QEMU tree. +The C version will still accept fixes and patches that +are already in development for the moment, but will eventually +be deleted from this tree. +New deployments should use the Rust version, and existing systems +should consider moving to it. The command line and feature set +is very close and moving should be simple. + + +QEMU guest agent +---------------- + +``--blacklist`` command line option (since 7.2) +''''''''''''''''''''''''''''''''''''''''''''''' + +``--blacklist`` has been replaced by ``--block-rpcs`` (which is a better +wording for what this option does). The short form ``-b`` still stays +the same and thus is the preferred way for scripts that should run with +both, older and future versions of QEMU. + +``blacklist`` config file option (since 7.2) +'''''''''''''''''''''''''''''''''''''''''''' + +The ``blacklist`` config file option has been renamed to ``block-rpcs`` +(to be in sync with the renaming of the corresponding command line +option). diff --git a/docs/about/index.rst b/docs/about/index.rst index beb762aa0a..5bea653c07 100644 --- a/docs/about/index.rst +++ b/docs/about/index.rst @@ -1,5 +1,6 @@ +---------- About QEMU -========== +---------- QEMU is a generic and open source machine emulator and virtualizer. diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst index cbfa1a8e31..63df9848fd 100644 --- a/docs/about/removed-features.rst +++ b/docs/about/removed-features.rst @@ -140,18 +140,79 @@ Use ``-rtc driftfix=slew`` instead. Replaced by ``-rtc base=date``. -``-vnc ...,tls=...``, ``-vnc ...,x509=...`` & ``-vnc ...,x509verify=...`` -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``-vnc ...,tls=...``, ``-vnc ...,x509=...`` & ``-vnc ...,x509verify=...`` (removed in 3.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The "tls-creds" option should be used instead to point to a "tls-creds-x509" object created using "-object". +``-mem-path`` fallback to RAM (removed in 5.0) +'''''''''''''''''''''''''''''''''''''''''''''' + +If guest RAM allocation from file pointed by ``mem-path`` failed, +QEMU was falling back to allocating from RAM, which might have resulted +in unpredictable behavior since the backing file specified by the user +as ignored. Currently, users are responsible for making sure the backing storage +specified with ``-mem-path`` can actually provide the guest RAM configured with +``-m`` and QEMU fails to start up if RAM allocation is unsuccessful. + ``-net ...,name=...`` (removed in 5.1) '''''''''''''''''''''''''''''''''''''' The ``name`` parameter of the ``-net`` option was a synonym for the ``id`` parameter, which should now be used instead. +``-numa node,mem=...`` (removed in 5.1) +''''''''''''''''''''''''''''''''''''''' + +The parameter ``mem`` of ``-numa node`` was used to assign a part of guest RAM +to a NUMA node. But when using it, it's impossible to manage a specified RAM +chunk on the host side (like bind it to a host node, setting bind policy, ...), +so the guest ends up with the fake NUMA configuration with suboptiomal +performance. +However since 2014 there is an alternative way to assign RAM to a NUMA node +using parameter ``memdev``, which does the same as ``mem`` and adds +means to actually manage node RAM on the host side. Use parameter ``memdev`` +with *memory-backend-ram* backend as replacement for parameter ``mem`` +to achieve the same fake NUMA effect or a properly configured +*memory-backend-file* backend to actually benefit from NUMA configuration. +New machine versions (since 5.1) will not accept the option but it will still +work with old machine types. User can check the QAPI schema to see if the legacy +option is supported by looking at MachineInfo::numa-mem-supported property. + +``-numa`` node (without memory specified) (removed in 5.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Splitting RAM by default between NUMA nodes had the same issues as ``mem`` +parameter with the difference that the role of the user plays QEMU using +implicit generic or board specific splitting rule. +Use ``memdev`` with *memory-backend-ram* backend or ``mem`` (if +it's supported by used machine type) to define mapping explicitly instead. +Users of existing VMs, wishing to preserve the same RAM distribution, should +configure it explicitly using ``-numa node,memdev`` options. Current RAM +distribution can be retrieved using HMP command ``info numa`` and if separate +memory devices (pc|nv-dimm) are present use ``info memory-device`` and subtract +device memory from output of ``info numa``. + +``-smp`` (invalid topologies) (removed in 5.2) +'''''''''''''''''''''''''''''''''''''''''''''' + +CPU topology properties should describe whole machine topology including +possible CPUs. + +However, historically it was possible to start QEMU with an incorrect topology +where *n* <= *sockets* * *cores* * *threads* < *maxcpus*, +which could lead to an incorrect topology enumeration by the guest. +Support for invalid topologies is removed, the user must ensure +topologies described with -smp include all possible cpus, i.e. +*sockets* * *cores* * *threads* = *maxcpus*. + +``-machine enforce-config-section=on|off`` (removed in 5.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``enforce-config-section`` property was replaced by the +``-global migration.send-configuration={on|off}`` option. + ``-no-kvm`` (removed in 5.2) '''''''''''''''''''''''''''' @@ -194,8 +255,8 @@ by the ``tls-authz`` and ``sasl-authz`` options. The ``pretty=on|off`` switch has no effect for HMP monitors and its use is rejected. -``-drive file=json:{...{'driver':'file'}}`` (removed 6.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``-drive file=json:{...{'driver':'file'}}`` (removed in 6.0) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The 'file' driver for drives is no longer appropriate for character or host devices and will only accept regular files (S_IFREG). The correct driver @@ -269,11 +330,90 @@ RISC-V firmware not booted by default (removed in 5.1) QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default`` for the RISC-V ``virt`` machine and ``sifive_u`` machine. +``-no-quit`` (removed in 7.0) +''''''''''''''''''''''''''''' + +The ``-no-quit`` was a synonym for ``-display ...,window-close=off`` which +should be used instead. + +``--enable-fips`` (removed in 7.1) +'''''''''''''''''''''''''''''''''' + +This option restricted usage of certain cryptographic algorithms when +the host is operating in FIPS mode. + +If FIPS compliance is required, QEMU should be built with the ``libgcrypt`` +or ``gnutls`` library enabled as a cryptography provider. + +Neither the ``nettle`` library, or the built-in cryptography provider are +supported on FIPS enabled hosts. + +``-writeconfig`` (removed in 7.1) +''''''''''''''''''''''''''''''''' + +The ``-writeconfig`` option was not able to serialize the entire contents +of the QEMU command line. It is thus considered a failed experiment +and removed without a replacement. + +``loaded`` property of ``secret`` and ``secret_keyring`` objects (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``loaded=on`` option in the command line or QMP ``object-add`` either had +no effect (if ``loaded`` was the last option) or caused options to be +effectively ignored as if they were not given. The property is therefore +useless and should simply be removed. + +``opened`` property of ``rng-*`` objects (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``opened=on`` option in the command line or QMP ``object-add`` either had +no effect (if ``opened`` was the last option) or caused errors. The property +is therefore useless and should simply be removed. + +``-display sdl,window_close=...`` (removed in 7.1) +'''''''''''''''''''''''''''''''''''''''''''''''''' + +Use ``-display sdl,window-close=...`` instead (i.e. with a minus instead of +an underscore between "window" and "close"). + +``-alt-grab`` and ``-display sdl,alt_grab=on`` (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Use ``-display sdl,grab-mod=lshift-lctrl-lalt`` instead. + +``-ctrl-grab`` and ``-display sdl,ctrl_grab=on`` (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Use ``-display sdl,grab-mod=rctrl`` instead. + +``-sdl`` (removed in 7.1) +''''''''''''''''''''''''' + +Use ``-display sdl`` instead. + +``-curses`` (removed in 7.1) +'''''''''''''''''''''''''''' + +Use ``-display curses`` instead. + +Creating sound card devices using ``-soundhw`` (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Sound card devices should be created using ``-device`` or ``-audio``. +The exception is ``pcspk`` which can be activated using ``-machine +pcspk-audiodev=``. + +``-watchdog`` (since 7.2) +''''''''''''''''''''''''' + +Use ``-device`` instead. + + QEMU Machine Protocol (QMP) commands ------------------------------------ -``block-dirty-bitmap-add`` "autoload" parameter (removed in 4.2.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``block-dirty-bitmap-add`` "autoload" parameter (removed in 4.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The "autoload" parameter has been ignored since 2.12.0. All bitmaps are automatically loaded from qcow2 images. @@ -287,7 +427,8 @@ documentation of ``query-hotpluggable-cpus`` for additional details. ``change`` (removed in 6.0) ''''''''''''''''''''''''''' -Use ``blockdev-change-medium`` or ``change-vnc-password`` instead. +Use ``blockdev-change-medium`` or ``change-vnc-password`` or +``display-update`` instead. ``query-events`` (removed in 6.0) ''''''''''''''''''''''''''''''''' @@ -456,20 +597,26 @@ Nobody was using this CPU emulation in QEMU, and there were no test images available to make sure that the code is still working, so it has been removed without replacement. -``lm32`` CPUs (removed in 6.1.0) -'''''''''''''''''''''''''''''''' +``lm32`` CPUs (removed in 6.1) +'''''''''''''''''''''''''''''' The only public user of this architecture was the milkymist project, which has been dead for years; there was never an upstream Linux port. Removed without replacement. -``unicore32`` CPUs (since 6.1.0) -'''''''''''''''''''''''''''''''' +``unicore32`` CPUs (removed in 6.1) +''''''''''''''''''''''''''''''''''' Support for this CPU was removed from the upstream Linux kernel, and there is no available upstream toolchain to build binaries for it. Removed without replacement. +x86 ``Icelake-Client`` CPU (removed in 7.1) +''''''''''''''''''''''''''''''''''''''''''' + +There isn't ever Icelake Client CPU, it is some wrong and imaginary one. +Use ``Icelake-Server`` instead. + System emulator machines ------------------------ @@ -513,6 +660,24 @@ This machine has been renamed ``fuloong2e``. These machine types were very old and likely could not be used for live migration from old QEMU versions anymore. Use a newer machine type instead. +Raspberry Pi ``raspi2`` and ``raspi3`` machines (removed in 6.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The Raspberry Pi machines come in various models (A, A+, B, B+). To be able +to distinguish which model QEMU is implementing, the ``raspi2`` and ``raspi3`` +machines have been renamed ``raspi2b`` and ``raspi3b``. + +Aspeed ``swift-bmc`` machine (removed in 7.0) +''''''''''''''''''''''''''''''''''''''''''''' + +This machine was removed because it was unused. Alternative AST2500 based +OpenPOWER machines are ``witherspoon-bmc`` and ``romulus-bmc``. + +ppc ``taihu`` machine (removed in 7.2) +''''''''''''''''''''''''''''''''''''''''''''' + +This machine was removed because it was partially emulated and 405 +machines are very similar. Use the ``ref405ep`` machine instead. linux-user mode CPUs -------------------- @@ -526,6 +691,27 @@ the upstream Linux kernel in 2018, and it has also been dropped from glibc, so there is no new Linux development taking place with this architecture. For running the old binaries, you can use older versions of QEMU. +``ppc64abi32`` CPUs (removed in 7.0) +'''''''''''''''''''''''''''''''''''' + +The ``ppc64abi32`` architecture has a number of issues which regularly +tripped up the CI testing and was suspected to be quite broken. For that +reason the maintainers strongly suspected no one actually used it. + + +TCG introspection features +-------------------------- + +TCG trace-events (since 6.2) +'''''''''''''''''''''''''''' + +The ability to add new TCG trace points had bit rotted and as the +feature can be replicated with TCG plugins it was removed. If +any user is currently using this feature and needs help with +converting to using TCG plugins they should contact the qemu-devel +mailing list. + + System emulator devices ----------------------- @@ -590,84 +776,8 @@ enforce that any failure to open the backing image (including if the backing file is missing or an incorrect format was specified) is an error when ``-u`` is not used. -Command line options --------------------- - -``-smp`` (invalid topologies) (removed 5.2) -''''''''''''''''''''''''''''''''''''''''''' - -CPU topology properties should describe whole machine topology including -possible CPUs. - -However, historically it was possible to start QEMU with an incorrect topology -where *n* <= *sockets* * *cores* * *threads* < *maxcpus*, -which could lead to an incorrect topology enumeration by the guest. -Support for invalid topologies is removed, the user must ensure -topologies described with -smp include all possible cpus, i.e. -*sockets* * *cores* * *threads* = *maxcpus*. - -``-numa`` node (without memory specified) (removed 5.2) -''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Splitting RAM by default between NUMA nodes had the same issues as ``mem`` -parameter with the difference that the role of the user plays QEMU using -implicit generic or board specific splitting rule. -Use ``memdev`` with *memory-backend-ram* backend or ``mem`` (if -it's supported by used machine type) to define mapping explicitly instead. -Users of existing VMs, wishing to preserve the same RAM distribution, should -configure it explicitly using ``-numa node,memdev`` options. Current RAM -distribution can be retrieved using HMP command ``info numa`` and if separate -memory devices (pc|nv-dimm) are present use ``info memory-device`` and subtract -device memory from output of ``info numa``. - -``-numa node,mem=``\ *size* (removed in 5.1) -'''''''''''''''''''''''''''''''''''''''''''' - -The parameter ``mem`` of ``-numa node`` was used to assign a part of -guest RAM to a NUMA node. But when using it, it's impossible to manage a specified -RAM chunk on the host side (like bind it to a host node, setting bind policy, ...), -so the guest ends up with the fake NUMA configuration with suboptiomal performance. -However since 2014 there is an alternative way to assign RAM to a NUMA node -using parameter ``memdev``, which does the same as ``mem`` and adds -means to actually manage node RAM on the host side. Use parameter ``memdev`` -with *memory-backend-ram* backend as replacement for parameter ``mem`` -to achieve the same fake NUMA effect or a properly configured -*memory-backend-file* backend to actually benefit from NUMA configuration. -New machine versions (since 5.1) will not accept the option but it will still -work with old machine types. User can check the QAPI schema to see if the legacy -option is supported by looking at MachineInfo::numa-mem-supported property. - -``-mem-path`` fallback to RAM (removed in 5.0) -'''''''''''''''''''''''''''''''''''''''''''''' - -If guest RAM allocation from file pointed by ``mem-path`` failed, -QEMU was falling back to allocating from RAM, which might have resulted -in unpredictable behavior since the backing file specified by the user -as ignored. Currently, users are responsible for making sure the backing storage -specified with ``-mem-path`` can actually provide the guest RAM configured with -``-m`` and QEMU fails to start up if RAM allocation is unsuccessful. - -``-smp`` (invalid topologies) (removed 5.2) -''''''''''''''''''''''''''''''''''''''''''' - -CPU topology properties should describe whole machine topology including -possible CPUs. - -However, historically it was possible to start QEMU with an incorrect topology -where *n* <= *sockets* * *cores* * *threads* < *maxcpus*, -which could lead to an incorrect topology enumeration by the guest. -Support for invalid topologies is removed, the user must ensure -topologies described with -smp include all possible cpus, i.e. -*sockets* * *cores* * *threads* = *maxcpus*. - -``-machine enforce-config-section=on|off`` (removed 5.2) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -The ``enforce-config-section`` property was replaced by the -``-global migration.send-configuration={on|off}`` option. - -qemu-img amend to adjust backing file (removed in 6.1) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +``qemu-img amend`` to adjust backing file (removed in 6.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The use of ``qemu-img amend`` to modify the name or format of a qcow2 backing image was never fully documented or tested, and interferes @@ -678,8 +788,8 @@ backing chain should be performed with ``qemu-img rebase -u`` either before or after the remaining changes being performed by amend, as appropriate. -qemu-img backing file without format (removed in 6.1) -''''''''''''''''''''''''''''''''''''''''''''''''''''' +``qemu-img`` backing file without format (removed in 6.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The use of ``qemu-img create``, ``qemu-img rebase``, or ``qemu-img convert`` to create or modify an image that depends on a backing file diff --git a/docs/amd-memory-encryption.txt b/docs/amd-memory-encryption.txt deleted file mode 100644 index ffca382b5f..0000000000 --- a/docs/amd-memory-encryption.txt +++ /dev/null @@ -1,148 +0,0 @@ -Secure Encrypted Virtualization (SEV) is a feature found on AMD processors. - -SEV is an extension to the AMD-V architecture which supports running encrypted -virtual machines (VMs) under the control of KVM. Encrypted VMs have their pages -(code and data) secured such that only the guest itself has access to the -unencrypted version. Each encrypted VM is associated with a unique encryption -key; if its data is accessed by a different entity using a different key the -encrypted guests data will be incorrectly decrypted, leading to unintelligible -data. - -Key management for this feature is handled by a separate processor known as the -AMD secure processor (AMD-SP), which is present in AMD SOCs. Firmware running -inside the AMD-SP provides commands to support a common VM lifecycle. This -includes commands for launching, snapshotting, migrating and debugging the -encrypted guest. These SEV commands can be issued via KVM_MEMORY_ENCRYPT_OP -ioctls. - -Secure Encrypted Virtualization - Encrypted State (SEV-ES) builds on the SEV -support to additionally protect the guest register state. In order to allow a -hypervisor to perform functions on behalf of a guest, there is architectural -support for notifying a guest's operating system when certain types of VMEXITs -are about to occur. This allows the guest to selectively share information with -the hypervisor to satisfy the requested function. - -Launching ---------- -Boot images (such as bios) must be encrypted before a guest can be booted. The -MEMORY_ENCRYPT_OP ioctl provides commands to encrypt the images: LAUNCH_START, -LAUNCH_UPDATE_DATA, LAUNCH_MEASURE and LAUNCH_FINISH. These four commands -together generate a fresh memory encryption key for the VM, encrypt the boot -images and provide a measurement than can be used as an attestation of a -successful launch. - -For a SEV-ES guest, the LAUNCH_UPDATE_VMSA command is also used to encrypt the -guest register state, or VM save area (VMSA), for all of the guest vCPUs. - -LAUNCH_START is called first to create a cryptographic launch context within -the firmware. To create this context, guest owner must provide a guest policy, -its public Diffie-Hellman key (PDH) and session parameters. These inputs -should be treated as a binary blob and must be passed as-is to the SEV firmware. - -The guest policy is passed as plaintext. A hypervisor may choose to read it, -but should not modify it (any modification of the policy bits will result -in bad measurement). The guest policy is a 4-byte data structure containing -several flags that restricts what can be done on a running SEV guest. -See KM Spec section 3 and 6.2 for more details. - -The guest policy can be provided via the 'policy' property (see below) - -# ${QEMU} \ - sev-guest,id=sev0,policy=0x1...\ - -Setting the "SEV-ES required" policy bit (bit 2) will launch the guest as a -SEV-ES guest (see below) - -# ${QEMU} \ - sev-guest,id=sev0,policy=0x5...\ - -The guest owner provided DH certificate and session parameters will be used to -establish a cryptographic session with the guest owner to negotiate keys used -for the attestation. - -The DH certificate and session blob can be provided via the 'dh-cert-file' and -'session-file' properties (see below) - -# ${QEMU} \ - sev-guest,id=sev0,dh-cert-file=,session-file= - -LAUNCH_UPDATE_DATA encrypts the memory region using the cryptographic context -created via the LAUNCH_START command. If required, this command can be called -multiple times to encrypt different memory regions. The command also calculates -the measurement of the memory contents as it encrypts. - -LAUNCH_UPDATE_VMSA encrypts all the vCPU VMSAs for a SEV-ES guest using the -cryptographic context created via the LAUNCH_START command. The command also -calculates the measurement of the VMSAs as it encrypts them. - -LAUNCH_MEASURE can be used to retrieve the measurement of encrypted memory and, -for a SEV-ES guest, encrypted VMSAs. This measurement is a signature of the -memory contents and, for a SEV-ES guest, the VMSA contents, that can be sent -to the guest owner as an attestation that the memory and VMSAs were encrypted -correctly by the firmware. The guest owner may wait to provide the guest -confidential information until it can verify the attestation measurement. -Since the guest owner knows the initial contents of the guest at boot, the -attestation measurement can be verified by comparing it to what the guest owner -expects. - -LAUNCH_FINISH finalizes the guest launch and destroys the cryptographic -context. - -See SEV KM API Spec [1] 'Launching a guest' usage flow (Appendix A) for the -complete flow chart. - -To launch a SEV guest - -# ${QEMU} \ - -machine ...,confidential-guest-support=sev0 \ - -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1 - -To launch a SEV-ES guest - -# ${QEMU} \ - -machine ...,confidential-guest-support=sev0 \ - -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1,policy=0x5 - -An SEV-ES guest has some restrictions as compared to a SEV guest. Because the -guest register state is encrypted and cannot be updated by the VMM/hypervisor, -a SEV-ES guest: - - Does not support SMM - SMM support requires updating the guest register - state. - - Does not support reboot - a system reset requires updating the guest register - state. - - Requires in-kernel irqchip - the burden is placed on the hypervisor to - manage booting APs. - -Debugging ------------ -Since the memory contents of a SEV guest are encrypted, hypervisor access to -the guest memory will return cipher text. If the guest policy allows debugging, -then a hypervisor can use the DEBUG_DECRYPT and DEBUG_ENCRYPT commands to access -the guest memory region for debug purposes. This is not supported in QEMU yet. - -Snapshot/Restore ------------------ -TODO - -Live Migration ----------------- -TODO - -References ------------------ - -AMD Memory Encryption whitepaper: -https://developer.amd.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf - -Secure Encrypted Virtualization Key Management: -[1] http://developer.amd.com/wordpress/media/2017/11/55766_SEV-KM-API_Specification.pdf - -KVM Forum slides: -http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf -https://www.linux-kvm.org/images/9/94/Extending-Secure-Encrypted-Virtualization-with-SEV-ES-Thomas-Lendacky-AMD.pdf - -AMD64 Architecture Programmer's Manual: - http://support.amd.com/TechDocs/24593.pdf - SME is section 7.10 - SEV is section 15.34 - SEV-ES is section 15.35 diff --git a/docs/block-replication.txt b/docs/block-replication.txt index 108e9166a8..e1b28a6cc1 100644 --- a/docs/block-replication.txt +++ b/docs/block-replication.txt @@ -79,7 +79,7 @@ Primary | || Secondary disk <--------- hidden-disk 5 <--------- || | | || | | || '-------------------------' - || drive-backup sync=none 6 + || blockdev-backup sync=none 6 1) The disk on the primary is represented by a block device with two children, providing replication between a primary disk and the host that @@ -101,7 +101,7 @@ should support bdrv_make_empty() and backing file. that is modified by the primary VM. It should also start as an empty disk, and the driver supports bdrv_make_empty() and backing file. -6) The drive-backup job (sync=none) is run to allow hidden-disk to buffer +6) The blockdev-backup job (sync=none) is run to allow hidden-disk to buffer any state that would otherwise be lost by the speculative write-through of the NBD server into the secondary disk. So before block replication, the primary disk and secondary disk should contain the same data. @@ -156,15 +156,15 @@ Primary: children.0.driver=raw Run qmp command in primary qemu: - { 'execute': 'human-monitor-command', - 'arguments': { - 'command-line': 'drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=xxxx,file.port=xxxx,file.export=colo1,node-name=nbd_client1' + { "execute": "human-monitor-command", + "arguments": { + "command-line": "drive_add -n buddy driver=replication,mode=primary,file.driver=nbd,file.host=xxxx,file.port=xxxx,file.export=colo1,node-name=nbd_client1" } } - { 'execute': 'x-blockdev-change', - 'arguments': { - 'parent': 'colo1', - 'node': 'nbd_client1' + { "execute": "x-blockdev-change", + "arguments": { + "parent": "colo1", + "node": "nbd_client1" } } Note: @@ -179,7 +179,7 @@ Primary: Secondary: -drive if=none,driver=raw,file.filename=1.raw,id=colo1 \ - -drive if=none,id=childs1,driver=replication,mode=secondary,top-id=childs1 + -drive if=none,id=childs1,driver=replication,mode=secondary,top-id=top-disk1 file.file.filename=active_disk.qcow2,\ file.driver=qcow2,\ file.backing.file.filename=hidden_disk.qcow2,\ @@ -189,21 +189,21 @@ Secondary: vote-threshold=1,children.0=childs1 Then run qmp command in secondary qemu: - { 'execute': 'nbd-server-start', - 'arguments': { - 'addr': { - 'type': 'inet', - 'data': { - 'host': 'xxx', - 'port': 'xxx' + { "execute": "nbd-server-start", + "arguments": { + "addr": { + "type": "inet", + "data": { + "host": "xxx", + "port": "xxx" } } } } - { 'execute': 'nbd-server-add', - 'arguments': { - 'device': 'colo1', - 'writable': true + { "execute": "nbd-server-add", + "arguments": { + "device": "colo1", + "writable": true } } @@ -223,22 +223,22 @@ After Failover: Primary: The secondary host is down, so we should run the following qmp command to remove the nbd child from the quorum: - { 'execute': 'x-blockdev-change', - 'arguments': { - 'parent': 'colo1', - 'child': 'children.1' + { "execute": "x-blockdev-change", + "arguments": { + "parent": "colo1", + "child": "children.1" } } - { 'execute': 'human-monitor-command', - 'arguments': { - 'command-line': 'drive_del xxxx' + { "execute": "human-monitor-command", + "arguments": { + "command-line": "drive_del xxxx" } } Note: there is no qmp command to remove the blockdev now Secondary: The primary host is down, so we should do the following thing: - { 'execute': 'nbd-server-stop' } + { "execute": "nbd-server-stop" } Promote Secondary to Primary: see COLO-FT.txt diff --git a/docs/ccid.txt b/docs/ccid.txt deleted file mode 100644 index 2b85b1bd42..0000000000 --- a/docs/ccid.txt +++ /dev/null @@ -1,182 +0,0 @@ -QEMU CCID Device Documentation. - -Contents -1. USB CCID device -2. Building -3. Using ccid-card-emulated with hardware -4. Using ccid-card-emulated with certificates -5. Using ccid-card-passthru with client side hardware -6. Using ccid-card-passthru with client side certificates -7. Passthrough protocol scenario -8. libcacard - -1. USB CCID device - -The USB CCID device is a USB device implementing the CCID specification, which -lets one connect smart card readers that implement the same spec. For more -information see the specification: - - Universal Serial Bus - Device Class: Smart Card - CCID - Specification for - Integrated Circuit(s) Cards Interface Devices - Revision 1.1 - April 22rd, 2005 - -Smartcards are used for authentication, single sign on, decryption in -public/private schemes and digital signatures. A smartcard reader on the client -cannot be used on a guest with simple usb passthrough since it will then not be -available on the client, possibly locking the computer when it is "removed". On -the other hand this device can let you use the smartcard on both the client and -the guest machine. It is also possible to have a completely virtual smart card -reader and smart card (i.e. not backed by a physical device) using this device. - -2. Building - -The cryptographic functions and access to the physical card is done via the -libcacard library, whose development package must be installed prior to -building QEMU: - -In redhat/fedora: - yum install libcacard-devel -In ubuntu: - apt-get install libcacard-dev - -Configuring and building: - ./configure --enable-smartcard && make - - -3. Using ccid-card-emulated with hardware - -Assuming you have a working smartcard on the host with the current -user, using libcacard, QEMU acts as another client using ccid-card-emulated: - - qemu -usb -device usb-ccid -device ccid-card-emulated - - -4. Using ccid-card-emulated with certificates stored in files - -You must create the CA and card certificates. This is a one time process. -We use NSS certificates: - - mkdir fake-smartcard - cd fake-smartcard - certutil -N -d sql:$PWD - certutil -S -d sql:$PWD -s "CN=Fake Smart Card CA" -x -t TC,TC,TC -n fake-smartcard-ca - certutil -S -d sql:$PWD -t ,, -s "CN=John Doe" -n id-cert -c fake-smartcard-ca - certutil -S -d sql:$PWD -t ,, -s "CN=John Doe (signing)" --nsCertType smime -n signing-cert -c fake-smartcard-ca - certutil -S -d sql:$PWD -t ,, -s "CN=John Doe (encryption)" --nsCertType sslClient -n encryption-cert -c fake-smartcard-ca - -Note: you must have exactly three certificates. - -You can use the emulated card type with the certificates backend: - - qemu -usb -device usb-ccid -device ccid-card-emulated,backend=certificates,db=sql:$PWD,cert1=id-cert,cert2=signing-cert,cert3=encryption-cert - -To use the certificates in the guest, export the CA certificate: - - certutil -L -r -d sql:$PWD -o fake-smartcard-ca.cer -n fake-smartcard-ca - -and import it in the guest: - - certutil -A -d /etc/pki/nssdb -i fake-smartcard-ca.cer -t TC,TC,TC -n fake-smartcard-ca - -In a Linux guest you can then use the CoolKey PKCS #11 module to access -the card: - - certutil -d /etc/pki/nssdb -L -h all - -It will prompt you for the PIN (which is the password you assigned to the -certificate database early on), and then show you all three certificates -together with the manually imported CA cert: - - Certificate Nickname Trust Attributes - fake-smartcard-ca CT,C,C - John Doe:CAC ID Certificate u,u,u - John Doe:CAC Email Signature Certificate u,u,u - John Doe:CAC Email Encryption Certificate u,u,u - -If this does not happen, CoolKey is not installed or not registered with -NSS. Registration can be done from Firefox or the command line: - - modutil -dbdir /etc/pki/nssdb -add "CAC Module" -libfile /usr/lib64/pkcs11/libcoolkeypk11.so - modutil -dbdir /etc/pki/nssdb -list - - -5. Using ccid-card-passthru with client side hardware - -on the host specify the ccid-card-passthru device with a suitable chardev: - - qemu -chardev socket,server=on,host=0.0.0.0,port=2001,id=ccid,wait=off \ - -usb -device usb-ccid -device ccid-card-passthru,chardev=ccid - -on the client run vscclient, built when you built QEMU: - - vscclient 2001 - - -6. Using ccid-card-passthru with client side certificates - -This case is not particularly useful, but you can use it to debug -your setup if #4 works but #5 does not. - -Follow instructions as per #4, except run QEMU and vscclient as follows: -Run qemu as per #5, and run vscclient from the "fake-smartcard" -directory as follows: - - qemu -chardev socket,server=on,host=0.0.0.0,port=2001,id=ccid,wait=off \ - -usb -device usb-ccid -device ccid-card-passthru,chardev=ccid - vscclient -e "db=\"sql:$PWD\" use_hw=no soft=(,Test,CAC,,id-cert,signing-cert,encryption-cert)" 2001 - - -7. Passthrough protocol scenario - -This is a typical interchange of messages when using the passthru card device. -usb-ccid is a usb device. It defaults to an unattached usb device on startup. -usb-ccid expects a chardev and expects the protocol defined in -cac_card/vscard_common.h to be passed over that. -The usb-ccid device can be in one of three modes: - * detached - * attached with no card - * attached with card - -A typical interchange is: (the arrow shows who started each exchange, it can be client -originated or guest originated) - -client event | vscclient | passthru | usb-ccid | guest event ----------------------------------------------------------------------------------------------- - | VSC_Init | | | - | VSC_ReaderAdd | | attach | - | | | | sees new usb device. -card inserted -> | | | | - | VSC_ATR | insert | insert | see new card - | | | | - | VSC_APDU | VSC_APDU | | <- guest sends APDU -client<->physical | | | | -card APDU exchange| | | | -client response ->| VSC_APDU | VSC_APDU | | receive APDU response - ... - [APDU<->APDU repeats several times] - ... -card removed -> | | | | - | VSC_CardRemove | remove | remove | card removed - ... - [(card insert, apdu's, card remove) repeat] - ... -kill/quit | | | | - vscclient | | | | - | VSC_ReaderRemove | | detach | - | | | | usb device removed. - - -8. libcacard - -Both ccid-card-emulated and vscclient use libcacard as the card emulator. -libcacard implements a completely virtual CAC (DoD standard for smart -cards) compliant card and uses NSS to retrieve certificates and do -any encryption. The backend can then be a real reader and card, or -certificates stored in files. - -For documentation of the library see docs/libcacard.txt. - diff --git a/docs/conf.py b/docs/conf.py index ff6e92c6e2..e33cf3d381 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -73,8 +73,14 @@ needs_sphinx = '1.6' # ones. extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc'] +if sphinx.version_info[:3] > (4, 0, 0): + tags.add('sphinx4') + extensions += ['dbusdoc'] +else: + extensions += ['fakedbusdoc'] + # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = [os.path.join(qemu_docdir, '_templates')] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: @@ -85,9 +91,14 @@ source_suffix = '.rst' # The master toctree document. master_doc = 'index' +# Interpret `single-backticks` to be a cross-reference to any kind of +# referenceable object. Unresolvable or ambiguous references will emit a +# warning at build time. +default_role = 'any' + # General information about the project. project = u'QEMU' -copyright = u'2021, The QEMU Project Developers' +copyright = u'2022, The QEMU Project Developers' author = u'The QEMU Project Developers' # The version info for the project you're documenting, acts as replacement for @@ -115,7 +126,7 @@ finally: # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -166,6 +177,7 @@ html_theme = 'sphinx_rtd_theme' if LooseVersion(sphinx_rtd_theme.__version__) >= LooseVersion("0.4.3"): html_theme_options = { "style_nav_header_background": "#802400", + "navigation_with_keys": True, } html_logo = os.path.join(qemu_docdir, "../ui/icons/qemu_128x128.png") @@ -181,6 +193,10 @@ html_css_files = [ 'theme_overrides.css', ] +html_js_files = [ + 'custom.js', +] + html_context = { "display_gitlab": True, "gitlab_user": "qemu-project", @@ -301,3 +317,5 @@ kerneldoc_bin = ['perl', os.path.join(qemu_docdir, '../scripts/kernel-doc')] kerneldoc_srctree = os.path.join(qemu_docdir, '..') hxtool_srctree = os.path.join(qemu_docdir, '..') qapidoc_srctree = os.path.join(qemu_docdir, '..') +dbusdoc_srctree = os.path.join(qemu_docdir, '..') +dbus_index_common_prefix = ["org.qemu."] diff --git a/docs/devel/acpi-bits.rst b/docs/devel/acpi-bits.rst new file mode 100644 index 0000000000..4a94c7d83d --- /dev/null +++ b/docs/devel/acpi-bits.rst @@ -0,0 +1,141 @@ +============================================================================= +ACPI/SMBIOS avocado tests using biosbits +============================================================================= + +Biosbits is a software written by Josh Triplett that can be downloaded +from https://biosbits.org/. The github codebase can be found +`here `__. It is a software that executes +the bios components such as acpi and smbios tables directly through acpica +bios interpreter (a freely available C based library written by Intel, +downloadable from https://acpica.org/ and is included with biosbits) without an +operating system getting involved in between. +There are several advantages to directly testing the bios in a real physical +machine or VM as opposed to indirectly discovering bios issues through the +operating system. For one thing, the OSes tend to hide bios problems from the +end user. The other is that we have more control of what we wanted to test +and how by directly using acpica interpreter on top of the bios on a running +system. More details on the inspiration for developing biosbits and its real +life uses can be found in [#a]_ and [#b]_. +For QEMU, we maintain a fork of bios bits in gitlab along with all the +dependent submodules here: https://gitlab.com/qemu-project/biosbits-bits +This fork contains numerous fixes, a newer acpica and changes specific to +running this avocado QEMU tests using bits. The author of this document +is the sole maintainer of the QEMU fork of bios bits repo. + +Under the directory ``tests/avocado/``, ``acpi-bits.py`` is a QEMU avocado +test that drives all this. + +A brief description of the various test files follows. + +Under ``tests/avocado/`` as the root we have: + +:: + + ├── acpi-bits + │ ├── bits-config + │ │ └── bits-cfg.txt + │ ├── bits-tests + │ ├── smbios.py2 + │ ├── testacpi.py2 + │ └── testcpuid.py2 + ├── acpi-bits.py + +* ``tests/avocado``: + + ``acpi-bits.py``: + This is the main python avocado test script that generates a + biosbits iso. It then spawns a QEMU VM with it, collects the log and reports + test failures. This is the script one would be interested in if they wanted + to add or change some component of the log parsing, add a new command line + to alter how QEMU is spawned etc. Test writers typically would not need to + modify this script unless they wanted to enhance or change the log parsing + for their tests. In order to enable debugging, you can set **V=1** + environment variable. This enables verbose mode for the test and also dumps + the entire log from bios bits and more information in case failure happens. + + In order to run this test, please perform the following steps from the QEMU + build directory: + :: + + $ make check-venv (needed only the first time to create the venv) + $ ./tests/venv/bin/avocado run -t acpi tests/avocado + + The above will run all acpi avocado tests including this one. + In order to run the individual tests, perform the following: + :: + + $ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py --tap - + + The above will produce output in tap format. You can omit "--tap -" in the + end and it will produce output like the following: + :: + + $ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py + Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits + JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef + JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log + (1/1) tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits: PASS (33.09 s) + RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0 + JOB TIME : 39.22 s + + You can inspect the log file for more information about the run or in order + to diagnoze issues. If you pass V=1 in the environment, more diagnostic logs + would be found in the test log. + +* ``tests/avocado/acpi-bits/bits-config``: + + This location contains biosbits configuration files that determine how the + software runs the tests. + + ``bits-config.txt``: + This is the biosbits config file that determines what tests + or actions are performed by bits. The description of the config options are + provided in the file itself. + +* ``tests/avocado/acpi-bits/bits-tests``: + + This directory contains biosbits python based tests that are run from within + the biosbits environment in the spawned VM. New additions of test cases can + be made in the appropriate test file. For example, new acpi tests can go + into testacpi.py2 and one would call testsuite.add_test() to register the new + test so that it gets executed as a part of the ACPI tests. + It might be occasionally necessary to disable some subtests or add a new + test that belongs to a test suite not already present in this directory. To + do this, please clone the bits source from + https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits. + Note that this is the "qemu-bits" branch and not the "bits" branch of the + repository. "qemu-bits" is the branch where we have made all the QEMU + specific enhancements and we must use the source from this branch only. + Copy the test suite/script that needs modification (addition of new tests + or disabling them) from python directory into this directory. For + example, in order to change cpuid related tests, copy the following + file into this directory and rename it with .py2 extension: + https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py + Then make your additions and changes here. Therefore, the steps are: + + (a) Copy unmodified test script to this directory from bits source. + (b) Add a SPDX license header. + (c) Perform modifications to the test. + + Commits (a), (b) and (c) should go under separate commits so that the original + test script and the changes we have made are separated and clear. + + The test framework will then use your modified test script to run the test. + No further changes would be needed. Please check the logs to make sure that + appropriate changes have taken effect. + + The tests have an extension .py2 in order to indicate that: + + (a) They are python2.7 based scripts and not python 3 scripts. + (b) They are run from within the bios bits VM and is not subjected to QEMU + build/test python script maintenance and dependency resolutions. + (c) They need not be loaded by avocado framework when running tests. + + +Author: Ani Sinha + +References: +----------- +.. [#a] https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf +.. [#b] https://www.youtube.com/watch?v=36QIepyUuhg + diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 3baec158f2..1894721743 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -42,73 +42,21 @@ perform a build: ../configure make -For now, checks on the compilation environment are found in configure -rather than meson.build, though this is expected to change. The command -line is parsed in the configure script and, whenever needed, converted -into the appropriate options to Meson. - -New checks should be added to Meson, which usually comprises the -following tasks: - - - Add a Meson build option to meson_options.txt. - - - Add support to the command line arg parser to handle any new - ``--enable-XXX``/``--disable-XXX`` flags required by the feature. - - - Add information to the help output message to report on the new - feature flag. - - - Add code to perform the actual feature check. - - - Add code to include the feature status in ``config-host.h`` - - - Add code to print out the feature status in the configure summary - upon completion. - - -Taking the probe for SDL2_Image as an example, we have the following pieces -in configure:: - - # Initial variable state - sdl_image=auto - - ..snip.. - - # Configure flag processing - --disable-sdl-image) sdl_image=disabled - ;; - --enable-sdl-image) sdl_image=enabled - ;; - - ..snip.. - - # Help output feature message - sdl-image SDL Image support for icons - - ..snip.. - - # Meson invocation - -Dsdl_image=$sdl_image - -In meson_options.txt:: - - option('sdl', type : 'feature', value : 'auto', - description: 'SDL Image support for icons') - -In meson.build:: - - # Detect dependency - sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), - method: 'pkg-config', - kwargs: static_kwargs) - - # Create config-host.h (if applicable) - config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) - - # Summary - summary_info += {'SDL image support': sdl_image.found()} +The configure script automatically recognizes +command line options for which a same-named Meson option exists; +dashes in the command line are replaced with underscores. +Many checks on the compilation environment are still found in configure +rather than ``meson.build``, but new checks should be added directly to +``meson.build``. +Patches are also welcome to move existing checks from the configure +phase to ``meson.build``. When doing so, ensure that ``meson.build`` does +not use anymore the keys that you have removed from ``config-host.mak``. +Typically these will be replaced in ``meson.build`` by boolean variables, +``get_option('optname')`` invocations, or ``dep.found()`` expressions. +In general, the remaining checks have little or no interdependencies, +so they can be moved one by one. Helper functions ---------------- @@ -151,11 +99,6 @@ developers in checking for system features: Write a minimal C program main() function to the temporary file indicated by $TMPC -``feature_not_found $NAME $REMEDY`` - Print a message to stderr that the feature $NAME was not available - on the system, suggesting the user try $REMEDY to address the - problem. - ``error_exit $MESSAGE $MORE...`` Print $MESSAGE to stderr, followed by $MORE... and then exit from the configure script with non-zero status @@ -173,11 +116,11 @@ process for: 1) executables, which include: - - Tools - qemu-img, qemu-nbd, qga (guest agent), etc + - Tools - ``qemu-img``, ``qemu-nbd``, ``qga`` (guest agent), etc - - System emulators - qemu-system-$ARCH + - System emulators - ``qemu-system-$ARCH`` - - Userspace emulators - qemu-$ARCH + - Userspace emulators - ``qemu-$ARCH`` - Unit tests @@ -335,6 +278,60 @@ new target, or enabling new devices or hardware for a particular system/userspace emulation target +Adding checks +------------- + +New checks should be added to Meson. Compiler checks can be as simple as +the following:: + + config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) + +A more complex task such as adding a new dependency usually +comprises the following tasks: + + - Add a Meson build option to meson_options.txt. + + - Add code to perform the actual feature check. + + - Add code to include the feature status in ``config-host.h`` + + - Add code to print out the feature status in the configure summary + upon completion. + +Taking the probe for SDL2_Image as an example, we have the following +in ``meson_options.txt``:: + + option('sdl_image', type : 'feature', value : 'auto', + description: 'SDL Image support for icons') + +Unless the option was given a non-``auto`` value (on the configure +command line), the detection code must be performed only if the +dependency will be used:: + + sdl_image = not_found + if not get_option('sdl_image').auto() or have_system + sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), + method: 'pkg-config', + static: enable_static) + endif + +This avoids warnings on static builds of user-mode emulators, for example. +Most of the libraries used by system-mode emulators are not available for +static linking. + +The other supporting code is generally simple:: + + # Create config-host.h (if applicable) + config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) + + # Summary + summary_info += {'SDL image support': sdl_image.found()} + +For the configure script to parse the new option, the +``scripts/meson-buildoptions.sh`` file must be up-to-date; ``make +update-buildoptions`` (or just ``make``) will take care of updating it. + + Support scripts --------------- @@ -380,6 +377,16 @@ phony target, while benchmarks are run with ``make bench``. Meson test suites such as ``unit`` can be ran with ``make check-unit`` too. It is also possible to run tests defined in meson.build with ``meson test``. +Useful make targets +------------------- + +``help`` + Print a help message for the most common build targets. + +``print-VAR`` + Print the value of the variable VAR. Useful for debugging the build + system. + Important files for the build system ==================================== @@ -404,7 +411,7 @@ number of dynamically created files listed later. ``tests/Makefile.include`` Rules for external test harnesses. These include the TCG tests, - ``qemu-iotests`` and the Avocado-based acceptance tests. + ``qemu-iotests`` and the Avocado-based integration tests. ``tests/docker/Makefile.include`` Rules for Docker tests. Like tests/Makefile, this file is included @@ -452,11 +459,10 @@ Built by Meson: scripts/make_device_config.sh program, feeding it the default-configs/$TARGET-NAME file as input. -``config-host.h``, ``$TARGET-NAME/config-target.h``, ``$TARGET-NAME/config-devices.h`` - These files are used by source code to determine what features - are enabled. They are generated from the contents of the corresponding - ``*.h`` files using the scripts/create_config program. This extracts - relevant variables and formats them as C preprocessor macros. +``config-host.h``, ``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h`` + These files are used by source code to determine what features are + enabled. They are generated from the contents of the corresponding + ``*.mak`` files using Meson's ``configure_file()`` function. ``build.ninja`` The build rules. @@ -473,14 +479,3 @@ Built by Makefile: meson.build. The rules are produced from Meson's JSON description of tests (obtained with "meson introspect --tests") through the script scripts/mtest2make.py. - - -Useful make targets -------------------- - -``help`` - Print a help message for the most common build targets. - -``print-VAR`` - Print the value of the variable VAR. Useful for debugging the build - system. diff --git a/docs/devel/ci-definitions.rst.inc b/docs/devel/ci-definitions.rst.inc new file mode 100644 index 0000000000..6d5c6fd9f2 --- /dev/null +++ b/docs/devel/ci-definitions.rst.inc @@ -0,0 +1,121 @@ +Definition of terms +=================== + +This section defines the terms used in this document and correlates them with +what is currently used on QEMU. + +Automated tests +--------------- + +An automated test is written on a test framework using its generic test +functions/classes. The test framework can run the tests and report their +success or failure [1]_. + +An automated test has essentially three parts: + +1. The test initialization of the parameters, where the expected parameters, + like inputs and expected results, are set up; +2. The call to the code that should be tested; +3. An assertion, comparing the result from the previous call with the expected + result set during the initialization of the parameters. If the result + matches the expected result, the test has been successful; otherwise, it has + failed. + +Unit testing +------------ + +A unit test is responsible for exercising individual software components as a +unit, like interfaces, data structures, and functionality, uncovering errors +within the boundaries of a component. The verification effort is in the +smallest software unit and focuses on the internal processing logic and data +structures. A test case of unit tests should be designed to uncover errors due +to erroneous computations, incorrect comparisons, or improper control flow [2]_. + +On QEMU, unit testing is represented by the 'check-unit' target from 'make'. + +Functional testing +------------------ + +A functional test focuses on the functional requirement of the software. +Deriving sets of input conditions, the functional tests should fully exercise +all the functional requirements for a program. Functional testing is +complementary to other testing techniques, attempting to find errors like +incorrect or missing functions, interface errors, behavior errors, and +initialization and termination errors [3]_. + +On QEMU, functional testing is represented by the 'check-qtest' target from +'make'. + +System testing +-------------- + +System tests ensure all application elements mesh properly while the overall +functionality and performance are achieved [4]_. Some or all system components +are integrated to create a complete system to be tested as a whole. System +testing ensures that components are compatible, interact correctly, and +transfer the right data at the right time across their interfaces. As system +testing focuses on interactions, use case-based testing is a practical approach +to system testing [5]_. Note that, in some cases, system testing may require +interaction with third-party software, like operating system images, databases, +networks, and so on. + +On QEMU, system testing is represented by the 'check-avocado' target from +'make'. + +Flaky tests +----------- + +A flaky test is defined as a test that exhibits both a passing and a failing +result with the same code on different runs. Some usual reasons for an +intermittent/flaky test are async wait, concurrency, and test order dependency +[6]_. + +Gating +------ + +A gate restricts the move of code from one stage to another on a +test/deployment pipeline. The step move is granted with approval. The approval +can be a manual intervention or a set of tests succeeding [7]_. + +On QEMU, the gating process happens during the pull request. The approval is +done by the project leader running its own set of tests. The pull request gets +merged when the tests succeed. + +Continuous Integration (CI) +--------------------------- + +Continuous integration (CI) requires the builds of the entire application and +the execution of a comprehensive set of automated tests every time there is a +need to commit any set of changes [8]_. The automated tests can be composed of +the unit, functional, system, and other tests. + +Keynotes about continuous integration (CI) [9]_: + +1. System tests may depend on external software (operating system images, + firmware, database, network). +2. It may take a long time to build and test. It may be impractical to build + the system being developed several times per day. +3. If the development platform is different from the target platform, it may + not be possible to run system tests in the developer’s private workspace. + There may be differences in hardware, operating system, or installed + software. Therefore, more time is required for testing the system. + +References +---------- + +.. [1] Sommerville, Ian (2016). Software Engineering. p. 233. +.. [2] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, + A Practitioner’s Approach. p. 48, 376, 378, 381. +.. [3] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, + A Practitioner’s Approach. p. 388. +.. [4] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, + A Practitioner’s Approach. Software Engineering, p. 377. +.. [5] Sommerville, Ian (2016). Software Engineering. p. 59, 232, 240. +.. [6] Luo, Qingzhou, et al. An empirical analysis of flaky tests. + Proceedings of the 22nd ACM SIGSOFT International Symposium on + Foundations of Software Engineering. 2014. +.. [7] Humble, Jez & Farley, David (2010). Continuous Delivery: + Reliable Software Releases Through Build, Test, and Deployment, p. 122. +.. [8] Humble, Jez & Farley, David (2010). Continuous Delivery: + Reliable Software Releases Through Build, Test, and Deployment, p. 55. +.. [9] Sommerville, Ian (2016). Software Engineering. p. 743. diff --git a/docs/devel/ci-jobs.rst.inc b/docs/devel/ci-jobs.rst.inc new file mode 100644 index 0000000000..1f28fec0d0 --- /dev/null +++ b/docs/devel/ci-jobs.rst.inc @@ -0,0 +1,179 @@ +.. _ci_var: + +Custom CI/CD variables +====================== + +QEMU CI pipelines can be tuned by setting some CI environment variables. + +Set variable globally in the user's CI namespace +------------------------------------------------ + +Variables can be set globally in the user's CI namespace setting. + +For further information about how to set these variables, please refer to:: + + https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project + +Set variable manually when pushing a branch or tag to the user's repository +--------------------------------------------------------------------------- + +Variables can be set manually when pushing a branch or tag, using +git-push command line arguments. + +Example setting the QEMU_CI_EXAMPLE_VAR variable: + +.. code:: + + git push -o ci.variable="QEMU_CI_EXAMPLE_VAR=value" myrepo mybranch + +For further information about how to set these variables, please refer to:: + + https://docs.gitlab.com/ee/user/project/push_options.html#push-options-for-gitlab-cicd + +Setting aliases in your git config +---------------------------------- + +You can use aliases to make it easier to push branches with different +CI configurations. For example define an alias for triggering CI: + +.. code:: + + git config --local alias.push-ci "push -o ci.variable=QEMU_CI=1" + git config --local alias.push-ci-now "push -o ci.variable=QEMU_CI=2" + +Which lets you run: + +.. code:: + + git push-ci + +to create the pipeline, or: + +.. code:: + + git push-ci-now + +to create and run the pipeline + + +Variable naming and grouping +---------------------------- + +The variables used by QEMU's CI configuration are grouped together +in a handful of namespaces + + * QEMU_JOB_nnnn - variables to be defined in individual jobs + or templates, to influence the shared rules defined in the + .base_job_template. + + * QEMU_CI_nnn - variables to be set by contributors in their + repository CI settings, or as git push variables, to influence + which jobs get run in a pipeline + + * nnn - other misc variables not falling into the above + categories, or using different names for historical reasons + and not yet converted. + +Maintainer controlled job variables +----------------------------------- + +The following variables may be set when defining a job in the +CI configuration file. + +QEMU_JOB_CIRRUS +~~~~~~~~~~~~~~~ + +The job makes use of Cirrus CI infrastructure, requiring the +configuration setup for cirrus-run to be present in the repository + +QEMU_JOB_OPTIONAL +~~~~~~~~~~~~~~~~~ + +The job is expected to be successful in general, but is not run +by default due to need to conserve limited CI resources. It is +available to be started manually by the contributor in the CI +pipelines UI. + +QEMU_JOB_ONLY_FORKS +~~~~~~~~~~~~~~~~~~~ + +The job results are only of interest to contributors prior to +submitting code. They are not required as part of the gating +CI pipeline. + +QEMU_JOB_SKIPPED +~~~~~~~~~~~~~~~~ + +The job is not reliably successsful in general, so is not +currently suitable to be run by default. Ideally this should +be a temporary marker until the problems can be addressed, or +the job permanently removed. + +QEMU_JOB_PUBLISH +~~~~~~~~~~~~~~~~ + +The job is for publishing content after a branch has been +merged into the upstream default branch. + +QEMU_JOB_AVOCADO +~~~~~~~~~~~~~~~~ + +The job runs the Avocado integration test suite + +Contributor controlled runtime variables +---------------------------------------- + +The following variables may be set by contributors to control +job execution + +QEMU_CI +~~~~~~~ + +By default, no pipelines will be created on contributor forks +in order to preserve CI credits + +Set this variable to 1 to create the pipelines, but leave all +the jobs to be manually started from the UI + +Set this variable to 2 to create the pipelines and run all +the jobs immediately, as was historicaly behaviour + +QEMU_CI_AVOCADO_TESTING +~~~~~~~~~~~~~~~~~~~~~~~ +By default, tests using the Avocado framework are not run automatically in +the pipelines (because multiple artifacts have to be downloaded, and if +these artifacts are not already cached, downloading them make the jobs +reach the timeout limit). Set this variable to have the tests using the +Avocado framework run automatically. + +Other misc variables +-------------------- + +These variables are primarily to control execution of jobs on +private runners + +AARCH64_RUNNER_AVAILABLE +~~~~~~~~~~~~~~~~~~~~~~~~ +If you've got access to an aarch64 host that can be used as a gitlab-CI +runner, you can set this variable to enable the tests that require this +kind of host. The runner should be tagged with "aarch64". + +AARCH32_RUNNER_AVAILABLE +~~~~~~~~~~~~~~~~~~~~~~~~ +If you've got access to an armhf host or an arch64 host that can run +aarch32 EL0 code to be used as a gitlab-CI runner, you can set this +variable to enable the tests that require this kind of host. The +runner should be tagged with "aarch32". + +S390X_RUNNER_AVAILABLE +~~~~~~~~~~~~~~~~~~~~~~ +If you've got access to an IBM Z host that can be used as a gitlab-CI +runner, you can set this variable to enable the tests that require this +kind of host. The runner should be tagged with "s390x". + +CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If you've got access to a CentOS Stream 8 x86_64 host that can be +used as a gitlab-CI runner, you can set this variable to enable the +tests that require this kind of host. The runner should be tagged with +both "centos_stream_8" and "x86_64". diff --git a/docs/devel/ci-runners.rst.inc b/docs/devel/ci-runners.rst.inc new file mode 100644 index 0000000000..7817001fb2 --- /dev/null +++ b/docs/devel/ci-runners.rst.inc @@ -0,0 +1,117 @@ +Jobs on Custom Runners +====================== + +Besides the jobs run under the various CI systems listed before, there +are a number additional jobs that will run before an actual merge. +These use the same GitLab CI's service/framework already used for all +other GitLab based CI jobs, but rely on additional systems, not the +ones provided by GitLab as "shared runners". + +The architecture of GitLab's CI service allows different machines to +be set up with GitLab's "agent", called gitlab-runner, which will take +care of running jobs created by events such as a push to a branch. +Here, the combination of a machine, properly configured with GitLab's +gitlab-runner, is called a "custom runner". + +The GitLab CI jobs definition for the custom runners are located under:: + + .gitlab-ci.d/custom-runners.yml + +Custom runners entail custom machines. To see a list of the machines +currently deployed in the QEMU GitLab CI and their maintainers, please +refer to the QEMU `wiki `__. + +Machine Setup Howto +------------------- + +For all Linux based systems, the setup can be mostly automated by the +execution of two Ansible playbooks. Create an ``inventory`` file +under ``scripts/ci/setup``, such as this:: + + fully.qualified.domain + other.machine.hostname + +You may need to set some variables in the inventory file itself. One +very common need is to tell Ansible to use a Python 3 interpreter on +those hosts. This would look like:: + + fully.qualified.domain ansible_python_interpreter=/usr/bin/python3 + other.machine.hostname ansible_python_interpreter=/usr/bin/python3 + +Build environment +~~~~~~~~~~~~~~~~~ + +The ``scripts/ci/setup/build-environment.yml`` Ansible playbook will +set up machines with the environment needed to perform builds and run +QEMU tests. This playbook consists on the installation of various +required packages (and a general package update while at it). It +currently covers a number of different Linux distributions, but it can +be expanded to cover other systems. + +The minimum required version of Ansible successfully tested in this +playbook is 2.8.0 (a version check is embedded within the playbook +itself). To run the playbook, execute:: + + cd scripts/ci/setup + ansible-playbook -i inventory build-environment.yml + +Please note that most of the tasks in the playbook require superuser +privileges, such as those from the ``root`` account or those obtained +by ``sudo``. If necessary, please refer to ``ansible-playbook`` +options such as ``--become``, ``--become-method``, ``--become-user`` +and ``--ask-become-pass``. + +gitlab-runner setup and registration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The gitlab-runner agent needs to be installed on each machine that +will run jobs. The association between a machine and a GitLab project +happens with a registration token. To find the registration token for +your repository/project, navigate on GitLab's web UI to: + + * Settings (the gears-like icon at the bottom of the left hand side + vertical toolbar), then + * CI/CD, then + * Runners, and click on the "Expand" button, then + * Under "Set up a specific Runner manually", look for the value under + "And this registration token:" + +Copy the ``scripts/ci/setup/vars.yml.template`` file to +``scripts/ci/setup/vars.yml``. Then, set the +``gitlab_runner_registration_token`` variable to the value obtained +earlier. + +To run the playbook, execute:: + + cd scripts/ci/setup + ansible-playbook -i inventory gitlab-runner.yml + +Following the registration, it's necessary to configure the runner tags, +and optionally other configurations on the GitLab UI. Navigate to: + + * Settings (the gears like icon), then + * CI/CD, then + * Runners, and click on the "Expand" button, then + * "Runners activated for this project", then + * Click on the "Edit" icon (next to the "Lock" Icon) + +Tags are very important as they are used to route specific jobs to +specific types of runners, so it's a good idea to double check that +the automatically created tags are consistent with the OS and +architecture. For instance, an Ubuntu 20.04 aarch64 system should +have tags set as:: + + ubuntu_20.04,aarch64 + +Because the job definition at ``.gitlab-ci.d/custom-runners.yml`` +would contain:: + + ubuntu-20.04-aarch64-all: + tags: + - ubuntu_20.04 + - aarch64 + +It's also recommended to: + + * increase the "Maximum job timeout" to something like ``2h`` + * give it a better Description diff --git a/docs/devel/ci.rst b/docs/devel/ci.rst index 205572510c..ed88a2010b 100644 --- a/docs/devel/ci.rst +++ b/docs/devel/ci.rst @@ -1,167 +1,14 @@ +.. _ci: + == CI == -QEMU has configurations enabled for a number of different CI services. -The most up to date information about them and their status can be -found at:: +Most of QEMU's CI is run on GitLab's infrastructure although a number +of other CI services are used for specialised purposes. The most up to +date information about them and their status can be found on the +`project wiki testing page `_. - https://wiki.qemu.org/Testing/CI - -Custom CI/CD variables -====================== - -QEMU CI pipelines can be tuned by setting some CI environment variables. - -Set variable globally in the user's CI namespace ------------------------------------------------- - -Variables can be set globally in the user's CI namespace setting. - -For further information about how to set these variables, please refer to:: - - https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project - -Set variable manually when pushing a branch or tag to the user's repository ---------------------------------------------------------------------------- - -Variables can be set manually when pushing a branch or tag, using -git-push command line arguments. - -Example setting the QEMU_CI_EXAMPLE_VAR variable: - -.. code:: - - git push -o ci.variable="QEMU_CI_EXAMPLE_VAR=value" myrepo mybranch - -For further information about how to set these variables, please refer to:: - - https://docs.gitlab.com/ee/user/project/push_options.html#push-options-for-gitlab-cicd - -Here is a list of the most used variables: - -QEMU_CI_AVOCADO_TESTING -~~~~~~~~~~~~~~~~~~~~~~~ -By default, tests using the Avocado framework are not run automatically in -the pipelines (because multiple artifacts have to be downloaded, and if -these artifacts are not already cached, downloading them make the jobs -reach the timeout limit). Set this variable to have the tests using the -Avocado framework run automatically. - -Jobs on Custom Runners -====================== - -Besides the jobs run under the various CI systems listed before, there -are a number additional jobs that will run before an actual merge. -These use the same GitLab CI's service/framework already used for all -other GitLab based CI jobs, but rely on additional systems, not the -ones provided by GitLab as "shared runners". - -The architecture of GitLab's CI service allows different machines to -be set up with GitLab's "agent", called gitlab-runner, which will take -care of running jobs created by events such as a push to a branch. -Here, the combination of a machine, properly configured with GitLab's -gitlab-runner, is called a "custom runner". - -The GitLab CI jobs definition for the custom runners are located under:: - - .gitlab-ci.d/custom-runners.yml - -Custom runners entail custom machines. To see a list of the machines -currently deployed in the QEMU GitLab CI and their maintainers, please -refer to the QEMU `wiki `__. - -Machine Setup Howto -------------------- - -For all Linux based systems, the setup can be mostly automated by the -execution of two Ansible playbooks. Create an ``inventory`` file -under ``scripts/ci/setup``, such as this:: - - fully.qualified.domain - other.machine.hostname - -You may need to set some variables in the inventory file itself. One -very common need is to tell Ansible to use a Python 3 interpreter on -those hosts. This would look like:: - - fully.qualified.domain ansible_python_interpreter=/usr/bin/python3 - other.machine.hostname ansible_python_interpreter=/usr/bin/python3 - -Build environment -~~~~~~~~~~~~~~~~~ - -The ``scripts/ci/setup/build-environment.yml`` Ansible playbook will -set up machines with the environment needed to perform builds and run -QEMU tests. This playbook consists on the installation of various -required packages (and a general package update while at it). It -currently covers a number of different Linux distributions, but it can -be expanded to cover other systems. - -The minimum required version of Ansible successfully tested in this -playbook is 2.8.0 (a version check is embedded within the playbook -itself). To run the playbook, execute:: - - cd scripts/ci/setup - ansible-playbook -i inventory build-environment.yml - -Please note that most of the tasks in the playbook require superuser -privileges, such as those from the ``root`` account or those obtained -by ``sudo``. If necessary, please refer to ``ansible-playbook`` -options such as ``--become``, ``--become-method``, ``--become-user`` -and ``--ask-become-pass``. - -gitlab-runner setup and registration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The gitlab-runner agent needs to be installed on each machine that -will run jobs. The association between a machine and a GitLab project -happens with a registration token. To find the registration token for -your repository/project, navigate on GitLab's web UI to: - - * Settings (the gears-like icon at the bottom of the left hand side - vertical toolbar), then - * CI/CD, then - * Runners, and click on the "Expand" button, then - * Under "Set up a specific Runner manually", look for the value under - "And this registration token:" - -Copy the ``scripts/ci/setup/vars.yml.template`` file to -``scripts/ci/setup/vars.yml``. Then, set the -``gitlab_runner_registration_token`` variable to the value obtained -earlier. - -To run the playbook, execute:: - - cd scripts/ci/setup - ansible-playbook -i inventory gitlab-runner.yml - -Following the registration, it's necessary to configure the runner tags, -and optionally other configurations on the GitLab UI. Navigate to: - - * Settings (the gears like icon), then - * CI/CD, then - * Runners, and click on the "Expand" button, then - * "Runners activated for this project", then - * Click on the "Edit" icon (next to the "Lock" Icon) - -Tags are very important as they are used to route specific jobs to -specific types of runners, so it's a good idea to double check that -the automatically created tags are consistent with the OS and -architecture. For instance, an Ubuntu 20.04 aarch64 system should -have tags set as:: - - ubuntu_20.04,aarch64 - -Because the job definition at ``.gitlab-ci.d/custom-runners.yml`` -would contain:: - - ubuntu-20.04-aarch64-all: - tags: - - ubuntu_20.04 - - aarch64 - -It's also recommended to: - - * increase the "Maximum job timeout" to something like ``2h`` - * give it a better Description +.. include:: ci-definitions.rst.inc +.. include:: ci-jobs.rst.inc +.. include:: ci-runners.rst.inc diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst index 956bd147ea..675fbeb6ab 100644 --- a/docs/devel/clocks.rst +++ b/docs/devel/clocks.rst @@ -260,6 +260,29 @@ clocks get the new clock period value: *Clock 2*, *Clock 3* and *Clock 4*. It is not possible to disconnect a clock or to change the clock connection after it is connected. +Clock multiplier and divider settings +------------------------------------- + +By default, when clocks are connected together, the child +clocks run with the same period as their source (parent) clock. +The Clock API supports a built-in period multiplier/divider +mechanism so you can configure a clock to make its children +run at a different period from its own. If you call the +``clock_set_mul_div()`` function you can specify the clock's +multiplier and divider values. The children of that clock +will all run with a period of ``parent_period * multiplier / divider``. +For instance, if the clock has a frequency of 8MHz and you set its +multiplier to 2 and its divider to 3, the child clocks will run +at 12MHz. + +You can change the multiplier and divider of a clock at runtime, +so you can use this to model clock controller devices which +have guest-programmable frequency multipliers or dividers. + +Note that ``clock_set_mul_div()`` does not automatically call +``clock_propagate()``. If you make a runtime change to the +multiplier or divider you must call clock_propagate() yourself. + Unconnected input clocks ------------------------ diff --git a/docs/devel/code-of-conduct.rst b/docs/devel/code-of-conduct.rst index 277b5250d1..f734ed0317 100644 --- a/docs/devel/code-of-conduct.rst +++ b/docs/devel/code-of-conduct.rst @@ -1,3 +1,5 @@ +.. _code_of_conduct: + Code of Conduct =============== @@ -55,6 +57,6 @@ Sources ------- This document is based on the `Fedora Code of Conduct -`__ and the -`Contributor Covenant version 1.3.0 +`__ +(as of April 2021) and the `Contributor Covenant version 1.3.0 `__. diff --git a/docs/devel/fuzzing.rst b/docs/devel/fuzzing.rst index 2749bb9bed..715330c856 100644 --- a/docs/devel/fuzzing.rst +++ b/docs/devel/fuzzing.rst @@ -182,10 +182,11 @@ The output should contain a complete list of matched MemoryRegions. OSS-Fuzz -------- -QEMU is continuously fuzzed on `OSS-Fuzz` __(https://github.com/google/oss-fuzz). -By default, the OSS-Fuzz build will try to fuzz every fuzz-target. Since the -generic-fuzz target requires additional information provided in environment -variables, we pre-define some generic-fuzz configs in +QEMU is continuously fuzzed on `OSS-Fuzz +`_. By default, the OSS-Fuzz build +will try to fuzz every fuzz-target. Since the generic-fuzz target +requires additional information provided in environment variables, we +pre-define some generic-fuzz configs in ``tests/qtest/fuzz/generic_fuzz_configs.h``. Each config must specify: - ``.name``: To identify the fuzzer config @@ -286,8 +287,8 @@ select the fuzz target. Then, the qtest client is initialized. If the target requires qos, qgraph is set up and the QOM/LIBQOS modules are initialized. Then the QGraph is walked and the QEMU cmd_line is determined and saved. -After this, the ``vl.c:qemu_main`` is called to set up the guest. There are -target-specific hooks that can be called before and after qemu_main, for +After this, the ``vl.c:main`` is called to set up the guest. There are +target-specific hooks that can be called before and after main, for additional setup(e.g. PCI setup, or VM snapshotting). ``LLVMFuzzerTestOneInput``: Uses qtest/qos functions to act based on the fuzz diff --git a/docs/devel/index-api.rst b/docs/devel/index-api.rst new file mode 100644 index 0000000000..60c0d7459d --- /dev/null +++ b/docs/devel/index-api.rst @@ -0,0 +1,14 @@ +Internal QEMU APIs +------------------ + +Details about how QEMU's various internal APIs. Most of these are +generated from in-code annotations to function prototypes. + +.. toctree:: + :maxdepth: 2 + + bitops + loads-stores + memory + modules + ui diff --git a/docs/devel/index-build.rst b/docs/devel/index-build.rst new file mode 100644 index 0000000000..57e8d39d98 --- /dev/null +++ b/docs/devel/index-build.rst @@ -0,0 +1,19 @@ +QEMU Build and Test System +-------------------------- + +Details about how QEMU's build system works and how it is integrated +into our testing infrastructure. You will need to understand some of +the basics if you are adding new files and targets to the build. + +.. toctree:: + :maxdepth: 3 + + build-system + kconfig + testing + acpi-bits + qtest + ci + qapi-code-gen + fuzzing + control-flow-integrity diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst new file mode 100644 index 0000000000..e1a93df263 --- /dev/null +++ b/docs/devel/index-internals.rst @@ -0,0 +1,21 @@ +Internal Subsystem Information +------------------------------ + +Details about QEMU's various subsystems including how to add features to them. + +.. toctree:: + :maxdepth: 2 + + qom + atomics + block-coroutine-wrapper + clocks + ebpf_rss + migration + multi-process + reset + s390-dasd-ipl + tracing + vfio-migration + writing-monitor-commands + virtio-backends diff --git a/docs/devel/index-process.rst b/docs/devel/index-process.rst new file mode 100644 index 0000000000..d50dd74c3e --- /dev/null +++ b/docs/devel/index-process.rst @@ -0,0 +1,17 @@ +QEMU Community Processes +------------------------ + +Notes about how to interact with the community and how and where to submit patches. + +.. toctree:: + :maxdepth: 2 + + code-of-conduct + conflict-resolution + maintainers + style + submitting-a-patch + trivial-patches + stable-process + submitting-a-pull-request + secure-coding-practices diff --git a/docs/devel/index-tcg.rst b/docs/devel/index-tcg.rst new file mode 100644 index 0000000000..7b9760b26f --- /dev/null +++ b/docs/devel/index-tcg.rst @@ -0,0 +1,16 @@ +TCG Emulation +------------- + +Details about QEMU's Tiny Code Generator and the infrastructure +associated with emulation. You do not need to worry about this if you +are only implementing things for HW accelerated hypervisors. + +.. toctree:: + :maxdepth: 2 + + tcg + decodetree + multi-thread-tcg + tcg-icount + tcg-plugins + replay diff --git a/docs/devel/index.rst b/docs/devel/index.rst index 5522db7241..09cfb322be 100644 --- a/docs/devel/index.rst +++ b/docs/devel/index.rst @@ -1,46 +1,16 @@ +--------------------- Developer Information -===================== +--------------------- This section of the manual documents various parts of the internals of QEMU. You only need to read it if you are interested in reading or modifying QEMU's source code. .. toctree:: - :maxdepth: 2 - :includehidden: + :maxdepth: 1 - code-of-conduct - conflict-resolution - build-system - style - kconfig - testing - fuzzing - control-flow-integrity - loads-stores - memory - migration - atomics - stable-process - ci - qtest - decodetree - secure-coding-practices - tcg - tcg-icount - tracing - multi-thread-tcg - tcg-plugins - bitops - ui - reset - s390-dasd-ipl - clocks - qom - modules - block-coroutine-wrapper - multi-process - ebpf_rss - vfio-migration - qapi-code-gen - writing-qmp-commands + index-process + index-build + index-api + index-internals + index-tcg diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index a1cdbec751..69674d008a 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -192,11 +192,15 @@ declares its dependencies in different ways: no directive and are not used in the Makefile either; they only appear as conditions for ``default y`` directives. - QEMU currently has two device groups, ``PCI_DEVICES`` and - ``TEST_DEVICES``. PCI devices usually have a ``default y if + QEMU currently has three device groups, ``PCI_DEVICES``, ``I2C_DEVICES``, + and ``TEST_DEVICES``. PCI devices usually have a ``default y if PCI_DEVICES`` directive rather than just ``default y``. This lets some boards (notably s390) easily support a subset of PCI devices, for example only VFIO (passthrough) and virtio-pci devices. + ``I2C_DEVICES`` is similar to ``PCI_DEVICES``. It contains i2c devices + that users might reasonably want to plug in to an i2c bus on any + board (and not ones which are very board-specific or that need + to be wired up in a way that can't be done on the command line). ``TEST_DEVICES`` instead is used for devices that are rarely used on production virtual machines, but provide useful hooks to test QEMU or KVM. @@ -301,7 +305,7 @@ and also listed as follows in the top-level meson.build's host_kconfig variable:: host_kconfig = \ - ('CONFIG_TPM' in config_host ? ['CONFIG_TPM=y'] : []) + \ + (have_tpm ? ['CONFIG_TPM=y'] : []) + \ ('CONFIG_SPICE' in config_host ? ['CONFIG_SPICE=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ ... diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index 568274baec..ad5dfe133e 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -68,15 +68,19 @@ Regexes for git grep - ``\`` - ``\`` -``cpu_{ld,st}*_mmuidx_ra`` -~~~~~~~~~~~~~~~~~~~~~~~~~~ +``cpu_{ld,st}*_mmu`` +~~~~~~~~~~~~~~~~~~~~ -These functions operate on a guest virtual address plus a context, -known as a "mmu index" or ``mmuidx``, which controls how that virtual -address is translated. The meaning of the indexes are target specific, -but specifying a particular index might be necessary if, for instance, -the helper requires an "always as non-privileged" access rather that -the default access for the current state of the guest CPU. +These functions operate on a guest virtual address, plus a context +known as a "mmu index" which controls how that virtual address is +translated, plus a ``MemOp`` which contains alignment requirements +among other things. The ``MemOp`` and mmu index are combined into +a single argument of type ``MemOpIdx``. + +The meaning of the indexes are target specific, but specifying a +particular index might be necessary if, for instance, the helper +requires a "always as non-privileged" access rather than the +default access for the current state of the guest CPU. These functions may cause a guest CPU exception to be taken (e.g. for an alignment fault or MMU fault) which will result in @@ -99,6 +103,35 @@ function, which is a return address into the generated code [#gpc]_. Function names follow the pattern: +load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)`` + +store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)`` + +``size`` + - ``b`` : 8 bits + - ``w`` : 16 bits + - ``l`` : 32 bits + - ``q`` : 64 bits + +``end`` + - (empty) : for target endian, or 8 bit sizes + - ``_be`` : big endian + - ``_le`` : little endian + +Regexes for git grep: + - ``\`` + - ``\`` + + +``cpu_{ld,st}*_mmuidx_ra`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +These functions work like the ``cpu_{ld,st}_mmu`` functions except +that the ``mmuidx`` parameter is not combined with a ``MemOp``, +and therefore there is no required alignment supplied or enforced. + +Function names follow the pattern: + load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)`` store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)`` @@ -132,7 +165,8 @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``. These are generally the preferred way to do accesses by guest virtual address from helper functions, unless the access should -be performed with a context other than the default. +be performed with a context other than the default, or alignment +should be enforced for the access. Function names follow the pattern: @@ -241,7 +275,7 @@ called during the translator callback ``translate_insn``. There is a set of functions ending in ``_swap`` which, if the parameter is true, returns the value in the endianness that is the reverse of -the guest native endianness, as determined by ``TARGET_WORDS_BIGENDIAN``. +the guest native endianness, as determined by ``TARGET_BIG_ENDIAN``. Function names follow the pattern: diff --git a/docs/devel/maintainers.rst b/docs/devel/maintainers.rst new file mode 100644 index 0000000000..5c907d901c --- /dev/null +++ b/docs/devel/maintainers.rst @@ -0,0 +1,107 @@ +.. _maintainers: + +The Role of Maintainers +======================= + +Maintainers are a critical part of the project's contributor ecosystem. +They come from a wide range of backgrounds from unpaid hobbyists +working in their spare time to employees who work on the project as +part of their job. Maintainer activities include: + + - reviewing patches and suggesting changes + - collecting patches and preparing pull requests + - tending to the long term health of their area + - participating in other project activities + +They are also human and subject to the same pressures as everyone else +including overload and burnout. Like everyone else they are subject +to project's :ref:`code_of_conduct` and should also be exemplars of +excellent community collaborators. + +The MAINTAINERS file +-------------------- + +The `MAINTAINERS +`__ +file contains the canonical list of who is a maintainer. The file +is machine readable so an appropriately configured git (see +:ref:`cc_the_relevant_maintainer`) can automatically Cc them on +patches that touch their area of code. + +The file also describes the status of the area of code to give an idea +of how actively that section is maintained. + +.. list-table:: Meaning of support status in MAINTAINERS + :widths: 25 75 + :header-rows: 1 + + * - Status + - Meaning + * - Supported + - Someone is actually paid to look after this. + * - Maintained + - Someone actually looks after it. + * - Odd Fixes + - It has a maintainer but they don't have time to do + much other than throw the odd patch in. + * - Orphan + - No current maintainer. + * - Obsolete + - Old obsolete code, should use something else. + +Please bear in mind that even if someone is paid to support something +it does not mean they are paid to support you. This is open source and +the code comes with no warranty and the project makes no guarantees +about dealing with bugs or features requests. + + + +Becoming a reviewer +------------------- + +Most maintainers start by becoming subsystem reviewers. While anyone +is welcome to review code on the mailing list getting added to the +MAINTAINERS file with a line like:: + + R: Random Hacker + +marks you as a 'designated reviewer' - expected to provide regular +spontaneous feedback. This will ensure that patches touching a given +subsystem will automatically be CC'd to you. + +Becoming a maintainer +--------------------- + +Maintainers are volunteers who put themselves forward or have been +asked by others to keep an eye on an area of code. They have generally +demonstrated to the community, usually via contributions and code +reviews, that they have a good understanding of the subsystem. They +are also trusted to make a positive contribution to the project and +work well with the other contributors. + +The process is simple - simply send a patch to the list that updates +the ``MAINTAINERS`` file. Sometimes this is done as part of a larger +series when a new sub-system is being added to the code base. This can +also be done by a retiring maintainer who nominates their replacement +after discussion with other contributors. + +Once the patch is reviewed and merged the only other step is to make +sure your GPG key is signed. + +.. _maintainer_keys: + +Maintainer GPG Keys +~~~~~~~~~~~~~~~~~~~ + +GPG is used to sign pull requests so they can be identified as really +coming from the maintainer. If your key is not already signed by +members of the QEMU community, you should make arrangements to attend +a `KeySigningParty `__ (for +example at KVM Forum) or make alternative arrangements to have your +key signed by an attendee. Key signing requires meeting another +community member **in person** [#]_ so please make appropriate +arrangements. + +.. [#] In recent pandemic times we have had to exercise some + flexibility here. Maintainers still need to sign their pull + requests though. diff --git a/docs/devel/memory.rst b/docs/devel/memory.rst index 5dc8a12682..69c5e3f914 100644 --- a/docs/devel/memory.rst +++ b/docs/devel/memory.rst @@ -67,11 +67,15 @@ MemoryRegion): You initialize a pure container with memory_region_init(). -- alias: a subsection of another region. Aliases allow a region to be - split apart into discontiguous regions. Examples of uses are memory banks - used when the guest address space is smaller than the amount of RAM - addressed, or a memory controller that splits main memory to expose a "PCI - hole". Aliases may point to any type of region, including other aliases, +- alias: a subsection of another region. Aliases allow a region to be + split apart into discontiguous regions. Examples of uses are memory + banks used when the guest address space is smaller than the amount + of RAM addressed, or a memory controller that splits main memory to + expose a "PCI hole". You can also create aliases to avoid trying to + add the original region to multiple parents via + `memory_region_add_subregion`. + + Aliases may point to any type of region, including other aliases, but an alias may not point back to itself, directly or indirectly. You initialize these with memory_region_init_alias(). diff --git a/docs/devel/migration.rst b/docs/devel/migration.rst index 2401253482..3e9656d8e0 100644 --- a/docs/devel/migration.rst +++ b/docs/devel/migration.rst @@ -389,19 +389,13 @@ Each version is associated with a series of fields saved. The ``save_state`` al the state as the newer version. But ``load_state`` sometimes is able to load state from an older version. -You can see that there are several version fields: +You can see that there are two version fields: - ``version_id``: the maximum version_id supported by VMState for that device. - ``minimum_version_id``: the minimum version_id that VMState is able to understand for that device. -- ``minimum_version_id_old``: For devices that were not able to port to vmstate, we can - assign a function that knows how to read this old state. This field is - ignored if there is no ``load_state_old`` handler. -VMState is able to read versions from minimum_version_id to -version_id. And the function ``load_state_old()`` (if present) is able to -load state from minimum_version_id_old to minimum_version_id. This -function is deprecated and will be removed when no more users are left. +VMState is able to read versions from minimum_version_id to version_id. There are *_V* forms of many ``VMSTATE_`` macros to load fields for version dependent fields, e.g. diff --git a/docs/devel/modules.rst b/docs/devel/modules.rst index 066f347b89..8e999c4fa4 100644 --- a/docs/devel/modules.rst +++ b/docs/devel/modules.rst @@ -1,5 +1,5 @@ ============ -Qemu modules +QEMU modules ============ .. kernel-doc:: include/qemu/module.h diff --git a/docs/devel/multi-process.rst b/docs/devel/multi-process.rst index 69699329d6..e4801751f2 100644 --- a/docs/devel/multi-process.rst +++ b/docs/devel/multi-process.rst @@ -1,15 +1,17 @@ -This is the design document for multi-process QEMU. It does not -necessarily reflect the status of the current implementation, which -may lack features or be considerably different from what is described -in this document. This document is still useful as a description of -the goals and general direction of this feature. - -Please refer to the following wiki for latest details: -https://wiki.qemu.org/Features/MultiProcessQEMU - Multi-process QEMU =================== +.. note:: + + This is the design document for multi-process QEMU. It does not + necessarily reflect the status of the current implementation, which + may lack features or be considerably different from what is described + in this document. This document is still useful as a description of + the goals and general direction of this feature. + + Please refer to the following wiki for latest details: + https://wiki.qemu.org/Features/MultiProcessQEMU + QEMU is often used as the hypervisor for virtual machines running in the Oracle cloud. Since one of the advantages of cloud computing is the ability to run many VMs from different tenants in the same cloud @@ -185,9 +187,9 @@ desired, in which the emulation application should only be allowed to access the files or devices the VM it's running on behalf of can access. #### qemu-io model -Qemu-io is a test harness used to test changes to the QEMU block backend -object code. (e.g., the code that implements disk images for disk driver -emulation) Qemu-io is not a device emulation application per se, but it +``qemu-io`` is a test harness used to test changes to the QEMU block backend +object code (e.g., the code that implements disk images for disk driver +emulation). ``qemu-io`` is not a device emulation application per se, but it does compile the QEMU block objects into a separate binary from the main QEMU one. This could be useful for disk device emulation, since its emulation applications will need to include the QEMU block objects. @@ -639,7 +641,7 @@ the CPU that issued the MMIO. +==========+========================+ | rid | range MMIO is within | +----------+------------------------+ -| offset | offset withing *rid* | +| offset | offset within *rid* | +----------+------------------------+ | type | e.g., load or store | +----------+------------------------+ diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst index 5b446ee08b..c9541a7b20 100644 --- a/docs/devel/multi-thread-tcg.rst +++ b/docs/devel/multi-thread-tcg.rst @@ -228,7 +228,7 @@ Emulated hardware state Currently thanks to KVM work any access to IO memory is automatically protected by the global iothread mutex, also known as the BQL (Big -Qemu Lock). Any IO region that doesn't use global mutex is expected to +QEMU Lock). Any IO region that doesn't use global mutex is expected to do its own locking. However IO memory isn't the only way emulated hardware state can be diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt index aeb997bed5..343120f2ef 100644 --- a/docs/devel/multiple-iothreads.txt +++ b/docs/devel/multiple-iothreads.txt @@ -109,7 +109,7 @@ The AioContext originates from the QEMU block layer, even though nowadays AioContext is a generic event loop that can be used by any QEMU subsystem. The block layer has support for AioContext integrated. Each BlockDriverState -is associated with an AioContext using bdrv_try_set_aio_context() and +is associated with an AioContext using bdrv_try_change_aio_context() and bdrv_get_aio_context(). This allows block layer code to process I/O inside the right AioContext. Other subsystems may wish to follow a similar approach. @@ -134,5 +134,5 @@ Long-running jobs (usually in the form of coroutines) are best scheduled in the BlockDriverState's AioContext to avoid the need to acquire/release around each bdrv_*() call. The functions bdrv_add/remove_aio_context_notifier, or alternatively blk_add/remove_aio_context_notifier if you use BlockBackends, -can be used to get a notification whenever bdrv_try_set_aio_context() moves a +can be used to get a notification whenever bdrv_try_change_aio_context() moves a BlockDriverState to a different AioContext. diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 26c62b0e7b..cd9b544376 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -41,8 +41,8 @@ used internally. There are several kinds of types: simple types (a number of built-in types, such as ``int`` and ``str``; as well as enumerations), arrays, -complex types (structs and two flavors of unions), and alternate types -(a choice between other types). +complex types (structs and unions), and alternate types (a choice +between other types). Schema syntax @@ -200,7 +200,9 @@ Syntax:: '*if': COND, '*features': FEATURES } ENUM-VALUE = STRING - | { 'name': STRING, '*if': COND } + | { 'name': STRING, + '*if': COND, + '*features': FEATURES } Member 'enum' names the enum type. @@ -319,13 +321,9 @@ Union types Syntax:: UNION = { 'union': STRING, - 'data': BRANCHES, - '*if': COND, - '*features': FEATURES } - | { 'union': STRING, - 'data': BRANCHES, 'base': ( MEMBERS | STRING ), 'discriminator': STRING, + 'data': BRANCHES, '*if': COND, '*features': FEATURES } BRANCHES = { BRANCH, ... } @@ -334,63 +332,30 @@ Syntax:: Member 'union' names the union type. -There are two flavors of union types: simple (no discriminator or -base), and flat (both discriminator and base). - -Each BRANCH of the 'data' object defines a branch of the union. A -union must have at least one branch. - -The BRANCH's STRING name is the branch name. - -The BRANCH's value defines the branch's properties, in particular its -type. The form TYPE-REF_ is shorthand for :code:`{ 'type': TYPE-REF }`. - -A simple union type defines a mapping from automatic discriminator -values to data types like in this example:: - - { 'struct': 'BlockdevOptionsFile', 'data': { 'filename': 'str' } } - { 'struct': 'BlockdevOptionsQcow2', - 'data': { 'backing': 'str', '*lazy-refcounts': 'bool' } } - - { 'union': 'BlockdevOptionsSimple', - 'data': { 'file': 'BlockdevOptionsFile', - 'qcow2': 'BlockdevOptionsQcow2' } } - -In the Client JSON Protocol, a simple union is represented by an -object that contains the 'type' member as a discriminator, and a -'data' member that is of the specified data type corresponding to the -discriminator value, as in these examples:: - - { "type": "file", "data": { "filename": "/some/place/my-image" } } - { "type": "qcow2", "data": { "backing": "/some/place/my-image", - "lazy-refcounts": true } } - -The generated C code uses a struct containing a union. Additionally, -an implicit C enum 'NameKind' is created, corresponding to the union -'Name', for accessing the various branches of the union. The value -for each branch can be of any type. - -Flat unions permit arbitrary common members that occur in all variants -of the union, not just a discriminator. Their discriminators need not -be named 'type'. They also avoid nesting on the wire. - The 'base' member defines the common members. If it is a MEMBERS_ object, it defines common members just like a struct type's 'data' member defines struct type members. If it is a STRING, it names a struct type whose members are the common members. -All flat union branches must be `Struct types`_. +Member 'discriminator' must name a non-optional enum-typed member of +the base struct. That member's value selects a branch by its name. +If no such branch exists, an empty branch is assumed. -In the Client JSON Protocol, a flat union is represented by an object -with the common members (from the base type) and the selected branch's -members. The two sets of member names must be disjoint. Member -'discriminator' must name a non-optional enum-typed member of the base -struct. +Each BRANCH of the 'data' object defines a branch of the union. A +union must have at least one branch. -The following example enhances the above simple union example by -adding an optional common member 'read-only', renaming the -discriminator to something more applicable than the simple union's -default of 'type', and reducing the number of ``{}`` required on the wire:: +The BRANCH's STRING name is the branch name. It must be a value of +the discriminator enum type. + +The BRANCH's value defines the branch's properties, in particular its +type. The type must a struct type. The form TYPE-REF_ is shorthand +for :code:`{ 'type': TYPE-REF }`. + +In the Client JSON Protocol, a union is represented by an object with +the common members (from the base type) and the selected branch's +members. The two sets of member names must be disjoint. + +Example:: { 'enum': 'BlockdevDriver', 'data': [ 'file', 'qcow2' ] } { 'union': 'BlockdevOptions', @@ -406,30 +371,11 @@ Resulting in these JSON objects:: { "driver": "qcow2", "read-only": false, "backing": "/some/place/my-image", "lazy-refcounts": true } -Notice that in a flat union, the discriminator name is controlled by -the user, but because it must map to a base member with enum type, the -code generator ensures that branches match the existing values of the -enum. The order of branches need not match the order of the enum -values. The branches need not cover all possible enum values. -Omitted enum values are still valid branches that add no additional -members to the data type. In the resulting generated C data types, a -flat union is represented as a struct with the base members in QAPI -schema order, and then a union of structures for each branch of the -struct. - -A simple union can always be re-written as a flat union where the base -class has a single member named 'type', and where each branch of the -union has a struct with a single member named 'data'. That is, :: - - { 'union': 'Simple', 'data': { 'one': 'str', 'two': 'int' } } - -is identical on the wire to:: - - { 'enum': 'Enum', 'data': ['one', 'two'] } - { 'struct': 'Branch1', 'data': { 'data': 'str' } } - { 'struct': 'Branch2', 'data': { 'data': 'int' } } - { 'union': 'Flat', 'base': { 'type': 'Enum' }, 'discriminator': 'type', - 'data': { 'one': 'Branch1', 'two': 'Branch2' } } +The order of branches need not match the order of the enum values. +The branches need not cover all possible enum values. In the +resulting generated C data types, a union is represented as a struct +with the base members in QAPI schema order, and then a union of +structures for each branch of the struct. The optional 'if' member specifies a conditional. See `Configuring the schema`_ below for more on this. @@ -762,8 +708,14 @@ QEMU shows a certain behaviour. Special features ~~~~~~~~~~~~~~~~ -Feature "deprecated" marks a command, event, or struct member as -deprecated. It is not supported elsewhere so far. +Feature "deprecated" marks a command, event, enum value, or struct +member as deprecated. It is not supported elsewhere so far. +Interfaces so marked may be withdrawn in future releases in accordance +with QEMU's deprecation policy. + +Feature "unstable" marks a command, event, enum value, or struct +member as unstable. It is not supported elsewhere so far. Interfaces +so marked may be withdrawn or changed incompatibly in future releases. Naming rules and reserved names @@ -787,10 +739,11 @@ Type names ending with ``Kind`` or ``List`` are reserved for the generator, which uses them for implicit union enums and array types, respectively. -Command names, and member names within a type, should be all lower -case with words separated by a hyphen. However, some existing older -commands and complex types use underscore; when extending them, -consistency is preferred over blindly avoiding underscore. +Command names, member names within a type, and feature names should be +all lower case with words separated by a hyphen. However, some +existing older commands and complex types use underscore; when +extending them, consistency is preferred over blindly avoiding +underscore. Event names should be ALL_CAPS with words separated by underscore. @@ -798,9 +751,8 @@ Member name ``u`` and names starting with ``has-`` or ``has_`` are reserved for the generator, which uses them for unions and for tracking optional members. -Any name (command, event, type, member, or enum value) beginning with -``x-`` is marked experimental, and may be withdrawn or changed -incompatibly in a future release. +Names beginning with ``x-`` used to signify "experimental". This +convention has been replaced by special feature "unstable". Pragmas ``command-name-exceptions`` and ``member-name-exceptions`` let you violate naming rules. Use for new code is strongly discouraged. See @@ -826,25 +778,31 @@ Configuring the schema Syntax:: COND = STRING - | [ STRING, ... ] + | { 'all: [ COND, ... ] } + | { 'any: [ COND, ... ] } + | { 'not': COND } All definitions take an optional 'if' member. Its value must be a -string or a list of strings. A string is shorthand for a list -containing just that string. The code generated for the definition -will then be guarded by #if STRING for each STRING in the COND list. +string, or an object with a single member 'all', 'any' or 'not'. + +The C code generated for the definition will then be guarded by an #if +preprocessing directive with an operand generated from that condition: + + * STRING will generate defined(STRING) + * { 'all': [COND, ...] } will generate (COND && ...) + * { 'any': [COND, ...] } will generate (COND || ...) + * { 'not': COND } will generate !COND Example: a conditional struct :: { 'struct': 'IfStruct', 'data': { 'foo': 'int' }, - 'if': ['defined(CONFIG_FOO)', 'defined(HAVE_BAR)'] } + 'if': { 'all': [ 'CONFIG_FOO', 'HAVE_BAR' ] } } gets its generated code guarded like this:: - #if defined(CONFIG_FOO) - #if defined(HAVE_BAR) + #if defined(CONFIG_FOO) && defined(HAVE_BAR) ... generated code ... - #endif /* defined(HAVE_BAR) */ - #endif /* defined(CONFIG_FOO) */ + #endif /* defined(HAVE_BAR) && defined(CONFIG_FOO) */ Individual members of complex types, commands arguments, and event-specific data can also be made conditional. This requires the @@ -853,9 +811,9 @@ longhand form of MEMBER. Example: a struct type with unconditional member 'foo' and conditional member 'bar' :: - { 'struct': 'IfStruct', 'data': - { 'foo': 'int', - 'bar': { 'type': 'int', 'if': 'defined(IFCOND)'} } } + { 'struct': 'IfStruct', + 'data': { 'foo': 'int', + 'bar': { 'type': 'int', 'if': 'IFCOND'} } } A union's discriminator may not be conditional. @@ -865,9 +823,9 @@ the longhand form of ENUM-VALUE_. Example: an enum type with unconditional value 'foo' and conditional value 'bar' :: - { 'enum': 'IfEnum', 'data': - [ 'foo', - { 'name' : 'bar', 'if': 'defined(IFCOND)' } ] } + { 'enum': 'IfEnum', + 'data': [ 'foo', + { 'name' : 'bar', 'if': 'IFCOND' } ] } Likewise, features can be conditional. This requires the longhand form of FEATURE_. @@ -877,7 +835,7 @@ Example: a struct with conditional feature 'allow-negative-numbers' :: { 'struct': 'TestType', 'data': { 'number': 'int' }, 'features': [ { 'name': 'allow-negative-numbers', - 'if': 'defined(IFCOND)' } ] } + 'if': 'IFCOND' } ] } Please note that you are responsible to ensure that the C code will compile with an arbitrary combination of conditions, since the @@ -999,15 +957,16 @@ definition must have documentation. Definition documentation starts with a line naming the definition, followed by an optional overview, a description of each argument (for commands and events), member (for structs and unions), branch (for -alternates), or value (for enums), and finally optional tagged -sections. +alternates), or value (for enums), a description of each feature (if +any), and finally optional tagged sections. -Descriptions of arguments can span multiple lines. The description -text can start on the line following the '\@argname:', in which case it -must not be indented at all. It can also start on the same line as -the '\@argname:'. In this case if it spans multiple lines then second -and subsequent lines must be indented to line up with the first -character of the first line of the description:: +The description of an argument or feature 'name' starts with +'\@name:'. The description text can start on the line following the +'\@name:', in which case it must not be indented at all. It can also +start on the same line as the '\@name:'. In this case if it spans +multiple lines then second and subsequent lines must be indented to +line up with the first character of the first line of the +description:: # @argone: # This is a two line description @@ -1029,6 +988,12 @@ The number of spaces between the ':' and the text is not significant. Extensions added after the definition was first released carry a '(since x.y.z)' comment. +The feature descriptions must be preceded by a line "Features:", like +this:: + + # Features: + # @feature: Description text + A tagged section starts with one of the following words: "Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:". The section ends with the start of a new section. @@ -1043,12 +1008,6 @@ multiline argument descriptions. A 'Since: x.y.z' tagged section lists the release that introduced the definition. -The text of a section can start on a new line, in -which case it must not be indented at all. It can also start -on the same line as the 'Note:', 'Returns:', etc tag. In this -case if it spans multiple lines then second and subsequent -lines must be indented to match the first. - An 'Example' or 'Examples' section is automatically rendered entirely as literal fixed-width text. In other sections, the text is formatted, and rST markup can be used. @@ -1207,7 +1166,8 @@ and "variants". "members" is a JSON array describing the object's common members, if any. Each element is a JSON object with members "name" (the member's -name), "type" (the name of its type), and optionally "default". The +name), "type" (the name of its type), "features" (a JSON array of +feature strings), and "default". The latter two are optional. The member is optional if "default" is present. Currently, "default" can only have value null. Other values are reserved for future extensions. The "members" array is in no particular order; clients @@ -1240,7 +1200,7 @@ that provides the variant members for this type tag value). The "variants" array is in no particular order, and is not guaranteed to list cases in the same order as the corresponding "tag" enum type. -Example: the SchemaInfo for flat union BlockdevOptions from section +Example: the SchemaInfo for union BlockdevOptions from section `Union types`_ :: { "name": "BlockdevOptions", "meta-type": "object", @@ -1255,27 +1215,6 @@ Example: the SchemaInfo for flat union BlockdevOptions from section Note that base types are "flattened": its members are included in the "members" array. -A simple union implicitly defines an enumeration type for its implicit -discriminator (called "type" on the wire, see section `Union types`_). - -A simple union implicitly defines an object type for each of its -variants. - -Example: the SchemaInfo for simple union BlockdevOptionsSimple from section -`Union types`_ :: - - { "name": "BlockdevOptionsSimple", "meta-type": "object", - "members": [ - { "name": "type", "type": "BlockdevOptionsSimpleKind" } ], - "tag": "type", - "variants": [ - { "case": "file", "type": "q_obj-BlockdevOptionsFile-wrapper" }, - { "case": "qcow2", "type": "q_obj-BlockdevOptionsQcow2-wrapper" } ] } - - Enumeration type "BlockdevOptionsSimpleKind" and the object types - "q_obj-BlockdevOptionsFile-wrapper", "q_obj-BlockdevOptionsQcow2-wrapper" - are implicitly defined. - The SchemaInfo for an alternate type has meta-type "alternate", and variant member "members". "members" is a JSON array. Each element is a JSON object with member "type", which names a type. Values of the @@ -1302,14 +1241,22 @@ Example: the SchemaInfo for ['str'] :: "element-type": "str" } The SchemaInfo for an enumeration type has meta-type "enum" and -variant member "values". The values are listed in no particular -order; clients must search the entire enum when learning whether a -particular value is supported. +variant member "members". + +"members" is a JSON array describing the enumeration values. Each +element is a JSON object with member "name" (the member's name), and +optionally "features" (a JSON array of feature strings). The +"members" array is in no particular order; clients must search the +entire array when learning whether a particular value is supported. Example: the SchemaInfo for MyEnum from section `Enumeration types`_ :: { "name": "MyEnum", "meta-type": "enum", - "values": [ "value1", "value2", "value3" ] } + "members": [ + { "name": "value1" }, + { "name": "value2" }, + { "name": "value3" } + ] } The SchemaInfo for a built-in type has the same name as the type in the QAPI schema (see section `Built-in Types`_), with one exception @@ -1684,6 +1631,9 @@ The following files are generated: ``$(prefix)qapi-commands.h`` Function prototypes for the QMP commands specified in the schema + ``$(prefix)qapi-commands.trace-events`` + Trace event declarations, see :ref:`tracing`. + ``$(prefix)qapi-init-commands.h`` Command initialization prototype @@ -1704,6 +1654,13 @@ Example:: void qmp_marshal_my_command(QDict *args, QObject **ret, Error **errp); #endif /* EXAMPLE_QAPI_COMMANDS_H */ + + $ cat qapi-generated/example-qapi-commands.trace-events + # AUTOMATICALLY GENERATED, DO NOT MODIFY + + qmp_enter_my_command(const char *json) "%s" + qmp_exit_my_command(const char *result, bool succeeded) "%s %d" + $ cat qapi-generated/example-qapi-commands.c [Uninteresting stuff omitted...] @@ -1743,14 +1700,27 @@ Example:: goto out; } + if (trace_event_get_state_backends(TRACE_QMP_ENTER_MY_COMMAND)) { + g_autoptr(GString) req_json = qobject_to_json(QOBJECT(args)); + + trace_qmp_enter_my_command(req_json->str); + } + retval = qmp_my_command(arg.arg1, &err); - error_propagate(errp, err); if (err) { + trace_qmp_exit_my_command(error_get_pretty(err), false); + error_propagate(errp, err); goto out; } qmp_marshal_output_UserDefOne(retval, ret, errp); + if (trace_event_get_state_backends(TRACE_QMP_EXIT_MY_COMMAND)) { + g_autoptr(GString) ret_json = qobject_to_json(*ret); + + trace_qmp_exit_my_command(ret_json->str, true); + } + out: visit_free(v); v = qapi_dealloc_visitor_new(); diff --git a/docs/devel/qgraph.rst b/docs/devel/qgraph.rst index 39e293687e..43342d9d65 100644 --- a/docs/devel/qgraph.rst +++ b/docs/devel/qgraph.rst @@ -1,8 +1,7 @@ .. _qgraph: -======================================== Qtest Driver Framework -======================================== +====================== In order to test a specific driver, plain libqos tests need to take care of booting QEMU with the right machine and devices. @@ -15,7 +14,7 @@ support that device. Using only libqos APIs, the test has to manually take care of covering all the setups, and build the correct command line. -This also introduces backward compability issues: if a device/driver command +This also introduces backward compatibility issues: if a device/driver command line name is changed, all tests that use that will not work properly anymore and need to be adjusted. @@ -31,17 +30,19 @@ so the sdhci-test should only care of linking its qgraph node with that interface. In this way, if the command line of a sdhci driver is changed, only the respective qgraph driver node has to be adjusted. +QGraph concepts +--------------- + The graph is composed by nodes that represent machines, drivers, tests and edges that define the relationships between them (``CONSUMES``, ``PRODUCES``, and ``CONTAINS``). - Nodes -^^^^^^ +~~~~~ A node can be of four types: -- **QNODE_MACHINE**: for example ``arm/raspi2`` +- **QNODE_MACHINE**: for example ``arm/raspi2b`` - **QNODE_DRIVER**: for example ``generic-sdhci`` - **QNODE_INTERFACE**: for example ``sdhci`` (interface for all ``-sdhci`` drivers). @@ -64,7 +65,7 @@ Notes for the nodes: drivers name, otherwise they won't be discovered Edges -^^^^^^ +~~~~~ An edge relation between two nodes (drivers or machines) ``X`` and ``Y`` can be: @@ -73,7 +74,7 @@ An edge relation between two nodes (drivers or machines) ``X`` and ``Y`` can be: - ``X CONTAINS Y``: ``Y`` is part of ``X`` component Execution steps -^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~ The basic framework steps are the following: @@ -92,8 +93,64 @@ The basic framework steps are the following: Depending on the QEMU binary used, only some drivers/machines will be available and only test that are reached by them will be executed. +Command line +~~~~~~~~~~~~ + +Command line is built by using node names and optional arguments +passed by the user when building the edges. + +There are three types of command line arguments: + +- ``in node`` : created from the node name. For example, machines will + have ``-M `` to its command line, while devices + ``-device ``. It is automatically done by the framework. +- ``after node`` : added as additional argument to the node name. + This argument is added optionally when creating edges, + by setting the parameter ``after_cmd_line`` and + ``extra_edge_opts`` in ``QOSGraphEdgeOptions``. + The framework automatically adds + a comma before ``extra_edge_opts``, + because it is going to add attributes + after the destination node pointed by + the edge containing these options, and automatically + adds a space before ``after_cmd_line``, because it + adds an additional device, not an attribute. +- ``before node`` : added as additional argument to the node name. + This argument is added optionally when creating edges, + by setting the parameter ``before_cmd_line`` in + ``QOSGraphEdgeOptions``. This attribute + is going to add attributes before the destination node + pointed by the edge containing these options. It is + helpful to commands that are not node-representable, + such as ``-fdsev`` or ``-netdev``. + +While adding command line in edges is always used, not all nodes names are +used in every path walk: this is because the contained or produced ones +are already added by QEMU, so only nodes that "consumes" will be used to +build the command line. Also, nodes that will have ``{ "abstract" : true }`` +as QMP attribute will loose their command line, since they are not proper +devices to be added in QEMU. + +Example:: + + QOSGraphEdgeOptions opts = { + .before_cmd_line = "-drive id=drv0,if=none,file=null-co://," + "file.read-zeroes=on,format=raw", + .after_cmd_line = "-device scsi-hd,bus=vs0.0,drive=drv0", + + opts.extra_device_opts = "id=vs0"; + }; + + qos_node_create_driver("virtio-scsi-device", + virtio_scsi_device_create); + qos_node_consumes("virtio-scsi-device", "virtio-bus", &opts); + +Will produce the following command line: +``-drive id=drv0,if=none,file=null-co://, -device virtio-scsi-device,id=vs0 -device scsi-hd,bus=vs0.0,drive=drv0`` + Troubleshooting unavailable tests -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + If there is no path from an available machine to a test then that test will be unavailable and won't execute. This can happen if a test or driver did not set up its qgraph node correctly. It can also happen if the necessary machine type @@ -119,12 +176,12 @@ It is possible to troubleshoot unavailable tests by running:: # |-> dest='i440FX-pcihost' type=0 (node=0x5591421117f0) # src='' # |-> dest='x86_64/pc' type=0 (node=0x559142111600) - # |-> dest='arm/raspi2' type=0 (node=0x559142110740) + # |-> dest='arm/raspi2b' type=0 (node=0x559142110740) ... # } # ALL QGRAPH NODES: { # name='virtio-net-tests/announce-self' type=3 cmd_line='(null)' [available] - # name='arm/raspi2' type=0 cmd_line='-M raspi2 ' [UNAVAILABLE] + # name='arm/raspi2b' type=0 cmd_line='-M raspi2b ' [UNAVAILABLE] ... # } @@ -135,8 +192,8 @@ qgraph path in the "ALL QGRAPH EDGES" output as follows: '' -> 'x86_64/pc' -> 'virtio-net'. The root of the qgraph is '' and the depth first search begins there. -The ``arm/raspi`` machine node is listed as "UNAVAILABLE". Although it is -reachable from the root via '' -> 'arm/raspi2' the node is unavailable because +The ``arm/raspi2b`` machine node is listed as "UNAVAILABLE". Although it is +reachable from the root via '' -> 'arm/raspi2b' the node is unavailable because the QEMU binary did not list it when queried by the framework. This is expected because we used the ``qemu-system-x86_64`` binary which does not support ARM machine types. @@ -151,14 +208,14 @@ Typically this is because the QEMU binary lacks support for the necessary machine type or device. Creating a new driver and its interface -""""""""""""""""""""""""""""""""""""""""" +--------------------------------------- Here we continue the ``sdhci`` use case, with the following scenario: - ``sdhci-test`` aims to test the ``read[q,w], writeq`` functions offered by the ``sdhci`` drivers. - The current ``sdhci`` device is supported by both ``x86_64/pc`` and ``ARM`` - (in this example we focus on the ``arm-raspi2``) machines. + (in this example we focus on the ``arm-raspi2b``) machines. - QEMU offers 2 types of drivers: ``QSDHCI_MemoryMapped`` for ``ARM`` and ``QSDHCI_PCI`` for ``x86_64/pc``. Both implement the ``read[q,w], writeq`` functions. @@ -180,11 +237,11 @@ In order to implement such scenario in qgraph, the test developer needs to: all the pci drivers available) ``sdhci-pci --consumes--> pci-bus`` -- Create an ``arm/raspi2`` machine node. This machine ``contains`` +- Create an ``arm/raspi2b`` machine node. This machine ``contains`` a ``generic-sdhci`` memory mapped ``sdhci`` driver node, representing ``QSDHCI_MemoryMapped``. - ``arm/raspi2 --contains--> generic-sdhci`` + ``arm/raspi2b --contains--> generic-sdhci`` - Create the ``sdhci`` interface node. This interface offers the functions that are shared by all ``sdhci`` devices. The interface is produced by ``sdhci-pci`` and ``generic-sdhci``, @@ -199,7 +256,7 @@ In order to implement such scenario in qgraph, the test developer needs to: ``sdhci-test --consumes--> sdhci`` -``arm-raspi2`` machine, simplified from +``arm-raspi2b`` machine, simplified from ``tests/qtest/libqos/arm-raspi2-machine.c``:: #include "qgraph.h" @@ -217,7 +274,7 @@ In order to implement such scenario in qgraph, the test developer needs to: return &machine->alloc; } - fprintf(stderr, "%s not present in arm/raspi2\n", interface); + fprintf(stderr, "%s not present in arm/raspi2b\n", interface); g_assert_not_reached(); } @@ -229,7 +286,7 @@ In order to implement such scenario in qgraph, the test developer needs to: return &machine->sdhci.obj; } - fprintf(stderr, "%s not present in arm/raspi2\n", device); + fprintf(stderr, "%s not present in arm/raspi2b\n", device); g_assert_not_reached(); } @@ -253,10 +310,10 @@ In order to implement such scenario in qgraph, the test developer needs to: static void raspi2_register_nodes(void) { - /* arm/raspi2 --contains--> generic-sdhci */ - qos_node_create_machine("arm/raspi2", + /* arm/raspi2b --contains--> generic-sdhci */ + qos_node_create_machine("arm/raspi2b", qos_create_machine_arm_raspi2); - qos_node_contains("arm/raspi2", "generic-sdhci", NULL); + qos_node_contains("arm/raspi2b", "generic-sdhci", NULL); } libqos_init(raspi2_register_nodes); @@ -470,7 +527,7 @@ In the above example, all possible types of relations are created:: | +--produces-- + | - arm/raspi2 --contains--> generic-sdhci + arm/raspi2b --contains--> generic-sdhci or inverting the consumes edge in consumed_by:: @@ -486,10 +543,10 @@ or inverting the consumes edge in consumed_by:: | +--produces-- + | - arm/raspi2 --contains--> generic-sdhci + arm/raspi2b --contains--> generic-sdhci Adding a new test -""""""""""""""""" +----------------- Given the above setup, adding a new test is very simple. ``sdhci-test``, taken from ``tests/qtest/sdhci-test.c``:: @@ -536,7 +593,7 @@ Final graph will be like this:: | +--produces-- + | - arm/raspi2 --contains--> generic-sdhci + arm/raspi2b --contains--> generic-sdhci or inverting the consumes edge in consumed_by:: @@ -552,7 +609,7 @@ or inverting the consumes edge in consumed_by:: | +--produces-- + | - arm/raspi2 --contains--> generic-sdhci + arm/raspi2b --contains--> generic-sdhci Assuming there the binary is ``QTEST_QEMU_BINARY=./qemu-system-x86_64`` @@ -561,66 +618,11 @@ a valid test path will be: and for the binary ``QTEST_QEMU_BINARY=./qemu-system-arm``: -``/arm/raspi2/generic-sdhci/sdhci/sdhci-test`` +``/arm/raspi2b/generic-sdhci/sdhci/sdhci-test`` Additional examples are also in ``test-qgraph.c`` -Command line: -"""""""""""""" - -Command line is built by using node names and optional arguments -passed by the user when building the edges. - -There are three types of command line arguments: - -- ``in node`` : created from the node name. For example, machines will - have ``-M `` to its command line, while devices - ``-device ``. It is automatically done by the framework. -- ``after node`` : added as additional argument to the node name. - This argument is added optionally when creating edges, - by setting the parameter ``after_cmd_line`` and - ``extra_edge_opts`` in ``QOSGraphEdgeOptions``. - The framework automatically adds - a comma before ``extra_edge_opts``, - because it is going to add attributes - after the destination node pointed by - the edge containing these options, and automatically - adds a space before ``after_cmd_line``, because it - adds an additional device, not an attribute. -- ``before node`` : added as additional argument to the node name. - This argument is added optionally when creating edges, - by setting the parameter ``before_cmd_line`` in - ``QOSGraphEdgeOptions``. This attribute - is going to add attributes before the destination node - pointed by the edge containing these options. It is - helpful to commands that are not node-representable, - such as ``-fdsev`` or ``-netdev``. - -While adding command line in edges is always used, not all nodes names are -used in every path walk: this is because the contained or produced ones -are already added by QEMU, so only nodes that "consumes" will be used to -build the command line. Also, nodes that will have ``{ "abstract" : true }`` -as QMP attribute will loose their command line, since they are not proper -devices to be added in QEMU. - -Example:: - - QOSGraphEdgeOptions opts = { - .before_cmd_line = "-drive id=drv0,if=none,file=null-co://," - "file.read-zeroes=on,format=raw", - .after_cmd_line = "-device scsi-hd,bus=vs0.0,drive=drv0", - - opts.extra_device_opts = "id=vs0"; - }; - - qos_node_create_driver("virtio-scsi-device", - virtio_scsi_device_create); - qos_node_consumes("virtio-scsi-device", "virtio-bus", &opts); - -Will produce the following command line: -``-drive id=drv0,if=none,file=null-co://, -device virtio-scsi-device,id=vs0 -device scsi-hd,bus=vs0.0,drive=drv0`` - Qgraph API reference -^^^^^^^^^^^^^^^^^^^^ +-------------------- .. kernel-doc:: tests/qtest/libqos/qgraph.h diff --git a/docs/devel/qom.rst b/docs/devel/qom.rst index e5fe3597cd..3e34b07c98 100644 --- a/docs/devel/qom.rst +++ b/docs/devel/qom.rst @@ -292,8 +292,7 @@ in the header file: .. code-block:: c :caption: Declaring a simple type - OBJECT_DECLARE_SIMPLE_TYPE(MyDevice, my_device, - MY_DEVICE, DEVICE) + OBJECT_DECLARE_SIMPLE_TYPE(MyDevice, MY_DEVICE) This is equivalent to the following: @@ -372,8 +371,8 @@ This accepts an array of interface type names. { TYPE_USER_CREATABLE }, { NULL }) -If the type is not intended to be instantiated, then then -the OBJECT_DEFINE_ABSTRACT_TYPE() macro can be used instead: +If the type is not intended to be instantiated, then the +OBJECT_DEFINE_ABSTRACT_TYPE() macro can be used instead: .. code-block:: c :caption: Defining a simple abstract type diff --git a/docs/devel/qtest.rst b/docs/devel/qtest.rst index c3dceb6c8a..0455aa06ab 100644 --- a/docs/devel/qtest.rst +++ b/docs/devel/qtest.rst @@ -3,7 +3,6 @@ QTest Device Emulation Testing Framework ======================================== .. toctree:: - :hidden: qgraph @@ -89,4 +88,4 @@ QTest Protocol libqtest API reference ---------------------- -.. kernel-doc:: tests/qtest/libqos/libqtest.h +.. kernel-doc:: tests/qtest/libqtest.h diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst new file mode 100644 index 0000000000..0244be8b9c --- /dev/null +++ b/docs/devel/replay.rst @@ -0,0 +1,306 @@ +.. + Copyright (c) 2022, ISP RAS + Written by Pavel Dovgalyuk and Alex Bennée + +======================= +Execution Record/Replay +======================= + +Core concepts +============= + +Record/replay functions are used for the deterministic replay of qemu +execution. Execution recording writes a non-deterministic events log, which +can be later used for replaying the execution anywhere and for unlimited +number of times. Execution replaying reads the log and replays all +non-deterministic events including external input, hardware clocks, +and interrupts. + +Several parts of QEMU include function calls to make event log recording +and replaying. +Devices' models that have non-deterministic input from external devices were +changed to write every external event into the execution log immediately. +E.g. network packets are written into the log when they arrive into the virtual +network adapter. + +All non-deterministic events are coming from these devices. But to +replay them we need to know at which moments they occur. We specify +these moments by counting the number of instructions executed between +every pair of consecutive events. + +Academic papers with description of deterministic replay implementation: + +* `Deterministic Replay of System's Execution with Multi-target QEMU Simulator for Dynamic Analysis and Reverse Debugging `_ +* `Don't panic: reverse debugging of kernel drivers `_ + +Modifications of qemu include: + + * wrappers for clock and time functions to save their return values in the log + * saving different asynchronous events (e.g. system shutdown) into the log + * synchronization of the bottom halves execution + * synchronization of the threads from thread pool + * recording/replaying user input (mouse, keyboard, and microphone) + * adding internal checkpoints for cpu and io synchronization + * network filter for recording and replaying the packets + * block driver for making block layer deterministic + * serial port input record and replay + * recording of random numbers obtained from the external sources + +Instruction counting +-------------------- + +QEMU should work in icount mode to use record/replay feature. icount was +designed to allow deterministic execution in absence of external inputs +of the virtual machine. We also use icount to control the occurrence of the +non-deterministic events. The number of instructions elapsed from the last event +is written to the log while recording the execution. In replay mode we +can predict when to inject that event using the instruction counter. + +Locking and thread synchronisation +---------------------------------- + +Previously the synchronisation of the main thread and the vCPU thread +was ensured by the holding of the BQL. However the trend has been to +reduce the time the BQL was held across the system including under TCG +system emulation. As it is important that batches of events are kept +in sequence (e.g. expiring timers and checkpoints in the main thread +while instruction checkpoints are written by the vCPU thread) we need +another lock to keep things in lock-step. This role is now handled by +the replay_mutex_lock. It used to be held only for each event being +written but now it is held for a whole execution period. This results +in a deterministic ping-pong between the two main threads. + +As the BQL is now a finer grained lock than the replay_lock it is almost +certainly a bug, and a source of deadlocks, to take the +replay_mutex_lock while the BQL is held. This is enforced by an assert. +While the unlocks are usually in the reverse order, this is not +necessary; you can drop the replay_lock while holding the BQL, without +doing a more complicated unlock_iothread/replay_unlock/lock_iothread +sequence. + +Checkpoints +----------- + +Replaying the execution of virtual machine is bound by sources of +non-determinism. These are inputs from clock and peripheral devices, +and QEMU thread scheduling. Thread scheduling affect on processing events +from timers, asynchronous input-output, and bottom halves. + +Invocations of timers are coupled with clock reads and changing the state +of the virtual machine. Reads produce non-deterministic data taken from +host clock. And VM state changes should preserve their order. Their relative +order in replay mode must replicate the order of callbacks in record mode. +To preserve this order we use checkpoints. When a specific clock is processed +in record mode we save to the log special "checkpoint" event. +Checkpoints here do not refer to virtual machine snapshots. They are just +record/replay events used for synchronization. + +QEMU in replay mode will try to invoke timers processing in random moment +of time. That's why we do not process a group of timers until the checkpoint +event will be read from the log. Such an event allows synchronizing CPU +execution and timer events. + +Two other checkpoints govern the "warping" of the virtual clock. +While the virtual machine is idle, the virtual clock increments at +1 ns per *real time* nanosecond. This is done by setting up a timer +(called the warp timer) on the virtual real time clock, so that the +timer fires at the next deadline of the virtual clock; the virtual clock +is then incremented (which is called "warping" the virtual clock) as +soon as the timer fires or the CPUs need to go out of the idle state. +Two functions are used for this purpose; because these actions change +virtual machine state and must be deterministic, each of them creates a +checkpoint. ``icount_start_warp_timer`` checks if the CPUs are idle and if so +starts accounting real time to virtual clock. ``icount_account_warp_timer`` +is called when the CPUs get an interrupt or when the warp timer fires, +and it warps the virtual clock by the amount of real time that has passed +since ``icount_start_warp_timer``. + +Virtual devices +=============== + +Record/replay mechanism, that could be enabled through icount mode, expects +the virtual devices to satisfy the following requirement: +everything that affects +the guest state during execution in icount mode should be deterministic. + +Timers +------ + +Timers are used to execute callbacks from different subsystems of QEMU +at the specified moments of time. There are several kinds of timers: + + * Real time clock. Based on host time and used only for callbacks that + do not change the virtual machine state. For this reason real time + clock and timers does not affect deterministic replay at all. + * Virtual clock. These timers run only during the emulation. In icount + mode virtual clock value is calculated using executed instructions counter. + That is why it is completely deterministic and does not have to be recorded. + * Host clock. This clock is used by device models that simulate real time + sources (e.g. real time clock chip). Host clock is the one of the sources + of non-determinism. Host clock read operations should be logged to + make the execution deterministic. + * Virtual real time clock. This clock is similar to real time clock but + it is used only for increasing virtual clock while virtual machine is + sleeping. Due to its nature it is also non-deterministic as the host clock + and has to be logged too. + +All virtual devices should use virtual clock for timers that change the guest +state. Virtual clock is deterministic, therefore such timers are deterministic +too. + +Virtual devices can also use realtime clock for the events that do not change +the guest state directly. When the clock ticking should depend on VM execution +speed, use virtual clock with EXTERNAL attribute. It is not deterministic, +but its speed depends on the guest execution. This clock is used by +the virtual devices (e.g., slirp routing device) that lie outside the +replayed guest. + +Block devices +------------- + +Block devices record/replay module (``blkreplay``) intercepts calls of +bdrv coroutine functions at the top of block drivers stack. + +All block completion operations are added to the queue in the coroutines. +When the queue is flushed the information about processed requests +is recorded to the log. In replay phase the queue is matched with +events read from the log. Therefore block devices requests are processed +deterministically. + +Bottom halves +------------- + +Bottom half callbacks, that affect the guest state, should be invoked through +``replay_bh_schedule_event`` or ``replay_bh_schedule_oneshot_event`` functions. +Their invocations are saved in record mode and synchronized with the existing +log in replay mode. + +Disk I/O events are completely deterministic in our model, because +in both record and replay modes we start virtual machine from the same +disk state. But callbacks that virtual disk controller uses for reading and +writing the disk may occur at different moments of time in record and replay +modes. + +Reading and writing requests are created by CPU thread of QEMU. Later these +requests proceed to block layer which creates "bottom halves". Bottom +halves consist of callback and its parameters. They are processed when +main loop locks the global mutex. These locks are not synchronized with +replaying process because main loop also processes the events that do not +affect the virtual machine state (like user interaction with monitor). + +That is why we had to implement saving and replaying bottom halves callbacks +synchronously to the CPU execution. When the callback is about to execute +it is added to the queue in the replay module. This queue is written to the +log when its callbacks are executed. In replay mode callbacks are not processed +until the corresponding event is read from the events log file. + +Sometimes the block layer uses asynchronous callbacks for its internal purposes +(like reading or writing VM snapshots or disk image cluster tables). In this +case bottom halves are not marked as "replayable" and do not saved +into the log. + +Saving/restoring the VM state +----------------------------- + +All fields in the device state structure (including virtual timers) +should be restored by loadvm to the same values they had before savevm. + +Avoid accessing other devices' state, because the order of saving/restoring +is not defined. It means that you should not call functions like +``update_irq`` in ``post_load`` callback. Save everything explicitly to avoid +the dependencies that may make restoring the VM state non-deterministic. + +Stopping the VM +--------------- + +Stopping the guest should not interfere with its state (with the exception +of the network connections, that could be broken by the remote timeouts). +VM can be stopped at any moment of replay by the user. Restarting the VM +after that stop should not break the replay by the unneeded guest state change. + +Replay log format +================= + +Record/replay log consists of the header and the sequence of execution +events. The header includes 4-byte replay version id and 8-byte reserved +field. Version is updated every time replay log format changes to prevent +using replay log created by another build of qemu. + +The sequence of the events describes virtual machine state changes. +It includes all non-deterministic inputs of VM, synchronization marks and +instruction counts used to correctly inject inputs at replay. + +Synchronization marks (checkpoints) are used for synchronizing qemu threads +that perform operations with virtual hardware. These operations may change +system's state (e.g., change some register or generate interrupt) and +therefore should execute synchronously with CPU thread. + +Every event in the log includes 1-byte event id and optional arguments. +When argument is an array, it is stored as 4-byte array length +and corresponding number of bytes with data. +Here is the list of events that are written into the log: + + - EVENT_INSTRUCTION. Instructions executed since last event. Followed by: + + - 4-byte number of executed instructions. + + - EVENT_INTERRUPT. Used to synchronize interrupt processing. + - EVENT_EXCEPTION. Used to synchronize exception handling. + - EVENT_ASYNC. This is a group of events. When such an event is generated, + it is stored in the queue and processed in icount_account_warp_timer(). + Every such event has it's own id from the following list: + + - REPLAY_ASYNC_EVENT_BH. Bottom-half callback. This event synchronizes + callbacks that affect virtual machine state, but normally called + asynchronously. Followed by: + + - 8-byte operation id. + + - REPLAY_ASYNC_EVENT_INPUT. Input device event. Contains + parameters of keyboard and mouse input operations + (key press/release, mouse pointer movement). Followed by: + + - 9-16 bytes depending of input event. + + - REPLAY_ASYNC_EVENT_INPUT_SYNC. Internal input synchronization event. + - REPLAY_ASYNC_EVENT_CHAR_READ. Character (e.g., serial port) device input + initiated by the sender. Followed by: + + - 1-byte character device id. + - Array with bytes were read. + + - REPLAY_ASYNC_EVENT_BLOCK. Block device operation. Used to synchronize + operations with disk and flash drives with CPU. Followed by: + + - 8-byte operation id. + + - REPLAY_ASYNC_EVENT_NET. Incoming network packet. Followed by: + + - 1-byte network adapter id. + - 4-byte packet flags. + - Array with packet bytes. + + - EVENT_SHUTDOWN. Occurs when user sends shutdown event to qemu, + e.g., by closing the window. + - EVENT_CHAR_WRITE. Used to synchronize character output operations. Followed by: + + - 4-byte output function return value. + - 4-byte offset in the output array. + + - EVENT_CHAR_READ_ALL. Used to synchronize character input operations, + initiated by qemu. Followed by: + + - Array with bytes that were read. + + - EVENT_CHAR_READ_ALL_ERROR. Unsuccessful character input operation, + initiated by qemu. Followed by: + + - 4-byte error code. + + - EVENT_CLOCK + clock_id. Group of events for host clock read operations. Followed by: + + - 8-byte clock value. + + - EVENT_CHECKPOINT + checkpoint_id. Checkpoint for synchronization of + CPU, internal threads, and asynchronous input events. + - EVENT_END. Last event in the log. diff --git a/docs/devel/replay.txt b/docs/devel/replay.txt deleted file mode 100644 index e641c35add..0000000000 --- a/docs/devel/replay.txt +++ /dev/null @@ -1,46 +0,0 @@ -Record/replay mechanism, that could be enabled through icount mode, expects -the virtual devices to satisfy the following requirements. - -The main idea behind this document is that everything that affects -the guest state during execution in icount mode should be deterministic. - -Timers -====== - -All virtual devices should use virtual clock for timers that change the guest -state. Virtual clock is deterministic, therefore such timers are deterministic -too. - -Virtual devices can also use realtime clock for the events that do not change -the guest state directly. When the clock ticking should depend on VM execution -speed, use virtual clock with EXTERNAL attribute. It is not deterministic, -but its speed depends on the guest execution. This clock is used by -the virtual devices (e.g., slirp routing device) that lie outside the -replayed guest. - -Bottom halves -============= - -Bottom half callbacks, that affect the guest state, should be invoked through -replay_bh_schedule_event or replay_bh_schedule_oneshot_event functions. -Their invocations are saved in record mode and synchronized with the existing -log in replay mode. - -Saving/restoring the VM state -============================= - -All fields in the device state structure (including virtual timers) -should be restored by loadvm to the same values they had before savevm. - -Avoid accessing other devices' state, because the order of saving/restoring -is not defined. It means that you should not call functions like -'update_irq' in post_load callback. Save everything explicitly to avoid -the dependencies that may make restoring the VM state non-deterministic. - -Stopping the VM -=============== - -Stopping the guest should not interfere with its state (with the exception -of the network connections, that could be broken by the remote timeouts). -VM can be stopped at any moment of replay by the user. Restarting the VM -after that stop should not break the replay by the unneeded guest state change. diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst index abea1102dc..7cc6a6b314 100644 --- a/docs/devel/reset.rst +++ b/docs/devel/reset.rst @@ -210,9 +210,11 @@ Polling the reset state Resettable interface provides the ``resettable_is_in_reset()`` function. This function returns true if the object parameter is currently under reset. -An object is under reset from the beginning of the *init* phase to the end of -the *exit* phase. During all three phases, the function will return that the -object is in reset. +An object is under reset from the beginning of the *enter* phase (before +either its children or its own enter method is called) to the *exit* +phase. During *enter* and *hold* phase only, the function will return that the +object is in reset. The state is changed after the *exit* is propagated to +its children and just before calling the object's own *exit* method. This function may be used if the object behavior has to be adapted while in reset state. For example if a device has an irq input, diff --git a/docs/devel/stable-process.rst b/docs/devel/stable-process.rst index e541b983fa..c21fb86645 100644 --- a/docs/devel/stable-process.rst +++ b/docs/devel/stable-process.rst @@ -1,3 +1,5 @@ +.. _stable-process: + QEMU and the stable process =========================== diff --git a/docs/devel/style.rst b/docs/devel/style.rst index 260e3263fa..7ddd42b6c2 100644 --- a/docs/devel/style.rst +++ b/docs/devel/style.rst @@ -1,3 +1,5 @@ +.. _coding-style: + ================= QEMU Coding Style ================= @@ -10,6 +12,10 @@ patches before submitting. Formatting and style ******************** +The repository includes a ``.editorconfig`` file which can help with +getting the right settings for your preferred $EDITOR. See +``_ for details. + Whitespace ========== @@ -149,6 +155,12 @@ If there are two versions of a function to be called with or without a lock held, the function that expects the lock to be already held usually uses the suffix ``_locked``. +If a function is a shim designed to deal with compatibility +workarounds we use the suffix ``_compat``. These are generally not +called directly and aliased to the plain function name via the +pre-processor. Another common suffix is ``_impl``; it is used for the +concrete implementation of a function that will not be called +directly, but rather through a macro or an inline function. Block structure =============== @@ -481,11 +493,11 @@ of arguments. C standard, implementation defined and undefined behaviors ========================================================== -C code in QEMU should be written to the C99 language specification. A copy -of the final version of the C99 standard with corrigenda TC1, TC2, and TC3 -included, formatted as a draft, can be downloaded from: +C code in QEMU should be written to the C11 language specification. A +copy of the final version of the C11 standard formatted as a draft, +can be downloaded from: - ``_ + ``_ The C language specification defines regions of undefined behavior and implementation defined behavior (to give compiler authors enough leeway to @@ -510,7 +522,7 @@ documented in the GNU Compiler Collection manual starting at version 4.0. Automatic memory deallocation ============================= -QEMU has a mandatory dependency either the GCC or CLang compiler. As +QEMU has a mandatory dependency on either the GCC or the Clang compiler. As such it has the freedom to make use of a C language extension for automatically running a cleanup function when a stack variable goes out of scope. This can be used to simplify function cleanup paths, @@ -686,7 +698,7 @@ Rationale: hex numbers are hard to read in logs when there is no 0x prefix, especially when (occasionally) the representation doesn't contain any letters and especially in one line with other decimal numbers. Number groups are allowed to not use '0x' because for some things notations like %x.%x.%x are used not -only in Qemu. Also dumping raw data bytes with '0x' is less readable. +only in QEMU. Also dumping raw data bytes with '0x' is less readable. '#' printf flag --------------- diff --git a/docs/devel/submitting-a-patch.rst b/docs/devel/submitting-a-patch.rst new file mode 100644 index 0000000000..c641d948f1 --- /dev/null +++ b/docs/devel/submitting-a-patch.rst @@ -0,0 +1,593 @@ +.. _submitting-a-patch: + +Submitting a Patch +================== + +QEMU welcomes contributions to fix bugs, add functionality or improve +the documentation. However, we get a lot of patches, and so we have +some guidelines about submitting them. If you follow these, you'll +help make our task of contribution review easier and your change is +likely to be accepted and committed faster. + +This page seems very long, so if you are only trying to post a quick +one-shot fix, the bare minimum we ask is that: + +.. list-table:: Minimal Checklist for Patches + :widths: 35 65 + :header-rows: 1 + + * - Check + - Reason + * - Patches contain Signed-off-by: Real Name + - States you are legally able to contribute the code. See :ref:`patch_emails_must_include_a_signed_off_by_line` + * - Sent as patch emails to ``qemu-devel@nongnu.org`` + - The project uses an email list based workflow. See :ref:`submitting_your_patches` + * - Be prepared to respond to review comments + - Code that doesn't pass review will not get merged. See :ref:`participating_in_code_review` + +You do not have to subscribe to post (list policy is to reply-to-all to +preserve CCs and keep non-subscribers in the loop on the threads they +start), although you may find it easier as a subscriber to pick up good +ideas from other posts. If you do subscribe, be prepared for a high +volume of email, often over one thousand messages in a week. The list is +moderated; first-time posts from an email address (whether or not you +subscribed) may be subject to some delay while waiting for a moderator +to allow your address. + +The larger your contribution is, or if you plan on becoming a long-term +contributor, then the more important the rest of this page becomes. +Reading the table of contents below should already give you an idea of +the basic requirements. Use the table of contents as a reference, and +read the parts that you have doubts about. + +.. contents:: Table of Contents + +.. _writing_your_patches: + +Writing your Patches +-------------------- + +.. _use_the_qemu_coding_style: + +Use the QEMU coding style +~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can run run *scripts/checkpatch.pl * before submitting to +check that you are in compliance with our coding standards. Be aware +that ``checkpatch.pl`` is not infallible, though, especially where C +preprocessor macros are involved; use some common sense too. See also: + +- :ref:`coding-style` +- `Automate a checkpatch run on + commit `__ + +.. _base_patches_against_current_git_master: + +Base patches against current git master +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There's no point submitting a patch which is based on a released version +of QEMU because development will have moved on from then and it probably +won't even apply to master. We only apply selected bugfixes to release +branches and then only as backports once the code has gone into master. + +It is also okay to base patches on top of other on-going work that is +not yet part of the git master branch. To aid continuous integration +tools, such as `patchew `__, you should `add a +tag `__ +line ``Based-on: $MESSAGE_ID`` to your cover letter to make the series +dependency obvious. + +.. _split_up_long_patches: + +Split up long patches +~~~~~~~~~~~~~~~~~~~~~ + +Split up longer patches into a patch series of logical code changes. +Each change should compile and execute successfully. For instance, don't +add a file to the makefile in patch one and then add the file itself in +patch two. (This rule is here so that people can later use tools like +`git bisect `__ without hitting +points in the commit history where QEMU doesn't work for reasons +unrelated to the bug they're chasing.) Put documentation first, not +last, so that someone reading the series can do a clean-room evaluation +of the documentation, then validate that the code matched the +documentation. A commit message that mentions "Also, ..." is often a +good candidate for splitting into multiple patches. For more thoughts on +properly splitting patches and writing good commit messages, see `this +advice from +OpenStack `__. + +.. _make_code_motion_patches_easy_to_review: + +Make code motion patches easy to review +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If a series requires large blocks of code motion, there are tricks for +making the refactoring easier to review. Split up the series so that +semantic changes (or even function renames) are done in a separate patch +from the raw code motion. Use a one-time setup of ``git config +diff.renames true;`` ``git config diff.algorithm patience`` (refer to +`git-config `__). The 'diff.renames' +property ensures file rename patches will be given in a more compact +representation that focuses only on the differences across the file +rename, instead of showing the entire old file as a deletion and the new +file as an insertion. Meanwhile, the 'diff.algorithm' property ensures +that extracting a non-contiguous subset of one file into a new file, but +where all extracted parts occur in the same order both before and after +the patch, will reduce churn in trying to treat unrelated ``}`` lines in +the original file as separating hunks of changes. + +Ideally, a code motion patch can be reviewed by doing:: + + git format-patch --stdout -1 > patch; + diff -u <(sed -n 's/^-//p' patch) <(sed -n 's/^\+//p' patch) + +to focus on the few changes that weren't wholesale code motion. + +.. _dont_include_irrelevant_changes: + +Don't include irrelevant changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In particular, don't include formatting, coding style or whitespace +changes to bits of code that would otherwise not be touched by the +patch. (It's OK to fix coding style issues in the immediate area (few +lines) of the lines you're changing.) If you think a section of code +really does need a reindent or other large-scale style fix, submit this +as a separate patch which makes no semantic changes; don't put it in the +same patch as your bug fix. + +For smaller patches in less frequently changed areas of QEMU, consider +using the :ref:`trivial-patches` process. + +.. _write_a_meaningful_commit_message: + +Write a meaningful commit message +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Commit messages should be meaningful and should stand on their own as a +historical record of why the changes you applied were necessary or +useful. + +QEMU follows the usual standard for git commit messages: the first line +(which becomes the email subject line) is "subsystem: single line +summary of change". Whether the "single line summary of change" starts +with a capital is a matter of taste, but we prefer that the summary does +not end in a dot. Look at ``git shortlog -30`` for an idea of sample +subject lines. Then there is a blank line and a more detailed +description of the patch, another blank and your Signed-off-by: line. +Please do not use lines that are longer than 76 characters in your +commit message (so that the text still shows up nicely with "git show" +in a 80-columns terminal window). + +The body of the commit message is a good place to document why your +change is important. Don't include comments like "This is a suggestion +for fixing this bug" (they can go below the ``---`` line in the email so +they don't go into the final commit message). Make sure the body of the +commit message can be read in isolation even if the reader's mailer +displays the subject line some distance apart (that is, a body that +starts with "... so that" as a continuation of the subject line is +harder to follow). + +If your patch fixes a commit that is already in the repository, please +add an additional line with "Fixes: +("Fixed commit subject")" below the patch description / before your +"Signed-off-by:" line in the commit message. + +If your patch fixes a bug in the gitlab bug tracker, please add a line +with "Resolves: " to the commit message, too. Gitlab can +close bugs automatically once commits with the "Resolved:" keyword get +merged into the master branch of the project. And if your patch addresses +a bug in another public bug tracker, you can also use a line with +"Buglink: " for reference here, too. + +Example:: + + Fixes: 14055ce53c2d ("s390x/tcg: avoid overflows in time2tod/tod2time") + Resolves: https://gitlab.com/qemu-project/qemu/-/issues/42 + Buglink: https://bugs.launchpad.net/qemu/+bug/1804323`` + +Some other tags that are used in commit messages include "Message-Id:" +"Tested-by:", "Acked-by:", "Reported-by:", "Suggested-by:". See ``git +log`` for these keywords for example usage. + +.. _test_your_patches: + +Test your patches +~~~~~~~~~~~~~~~~~ + +Although QEMU uses various :ref:`ci` services that attempt to test +patches submitted to the list, it still saves everyone time if you +have already tested that your patch compiles and works. Because QEMU +is such a large project the default configuration won't create a +testing pipeline on GitLab when a branch is pushed. See the :ref:`CI +variable documentation` for details on how to control the +running of tests; but it is still wise to also check that your patches +work with a full build before submitting a series, especially if your +changes might have an unintended effect on other areas of the code you +don't normally experiment with. See :ref:`testing` for more details on +what tests are available. + +Also, it is a wise idea to include a testsuite addition as part of +your patches - either to ensure that future changes won't regress your +new feature, or to add a test which exposes the bug that the rest of +your series fixes. Keeping separate commits for the test and the fix +allows reviewers to rebase the test to occur first to prove it catches +the problem, then again to place it last in the series so that +bisection doesn't land on a known-broken state. + +.. _submitting_your_patches: + +Submitting your Patches +----------------------- + +The QEMU project uses a public email based workflow for reviewing and +merging patches. As a result all contributions to QEMU must be **sent +as patches** to the qemu-devel `mailing list +`__. Patch +contributions should not be posted on the bug tracker, posted on +forums, or externally hosted and linked to. (We have other mailing +lists too, but all patches must go to qemu-devel, possibly with a Cc: +to another list.) ``git send-email`` (`step-by-step setup guide +`__ and `hints and tips +`__) +works best for delivering the patch without mangling it, but +attachments can be used as a last resort on a first-time submission. + +.. _if_you_cannot_send_patch_emails: + +If you cannot send patch emails +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In rare cases it may not be possible to send properly formatted patch +emails. You can use `sourcehut `__ to send your +patches to the QEMU mailing list by following these steps: + +#. Register or sign in to your account +#. Add your SSH public key in `meta \| + keys `__. +#. Publish your git branch using **git push git@git.sr.ht:~USERNAME/qemu + HEAD** +#. Send your patches to the QEMU mailing list using the web-based + ``git-send-email`` UI at https://git.sr.ht/~USERNAME/qemu/send-email + +`This video +`__ +shows the web-based ``git-send-email`` workflow. Documentation is +available `here +`__. + +.. _cc_the_relevant_maintainer: + +CC the relevant maintainer +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Send patches both to the mailing list and CC the maintainer(s) of the +files you are modifying. look in the MAINTAINERS file to find out who +that is. Also try using scripts/get_maintainer.pl from the repository +for learning the most common committers for the files you touched. + +Example:: + + ~/src/qemu/scripts/get_maintainer.pl -f hw/ide/core.c + +In fact, you can automate this, via a one-time setup of ``git config +sendemail.cccmd 'scripts/get_maintainer.pl --nogit-fallback'`` (Refer to +`git-config `__.) + +.. _do_not_send_as_an_attachment: + +Do not send as an attachment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Send patches inline so they are easy to reply to with review comments. +Do not put patches in attachments. + +.. _use_git_format_patch: + +Use ``git format-patch`` +~~~~~~~~~~~~~~~~~~~~~~~~ + +Use the right diff format. +`git format-patch `__ will +produce patch emails in the right format (check the documentation to +find out how to drive it). You can then edit the cover letter before +using ``git send-email`` to mail the files to the mailing list. (We +recommend `git send-email `__ +because mail clients often mangle patches by wrapping long lines or +messing up whitespace. Some distributions do not include send-email in a +default install of git; you may need to download additional packages, +such as 'git-email' on Fedora-based systems.) Patch series need a cover +letter, with shallow threading (all patches in the series are +in-reply-to the cover letter, but not to each other); single unrelated +patches do not need a cover letter (but if you do send a cover letter, +use ``--numbered`` so the cover and the patch have distinct subject lines). +Patches are easier to find if they start a new top-level thread, rather +than being buried in-reply-to another existing thread. + +.. _avoid_posting_large_binary_blob: + +Avoid posting large binary blob +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you added binaries to the repository, consider producing the patch +emails using ``git format-patch --no-binary`` and include a link to a +git repository to fetch the original commit. + +.. _patch_emails_must_include_a_signed_off_by_line: + +Patch emails must include a ``Signed-off-by:`` line +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your patches **must** include a Signed-off-by: line. This is a hard +requirement because it's how you say "I'm legally okay to contribute +this and happy for it to go into QEMU". The process is modelled after +the `Linux kernel +`__ +policy. + +If you wrote the patch, make sure your "From:" and "Signed-off-by:" +lines use the same spelling. It's okay if you subscribe or contribute to +the list via more than one address, but using multiple addresses in one +commit just confuses things. If someone else wrote the patch, git will +include a "From:" line in the body of the email (different from your +envelope From:) that will give credit to the correct author; but again, +that author's Signed-off-by: line is mandatory, with the same spelling. + +There are various tooling options for automatically adding these tags +include using ``git commit -s`` or ``git format-patch -s``. For more +information see `SubmittingPatches 1.12 +`__. + +.. _include_a_meaningful_cover_letter: + +Include a meaningful cover letter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is a requirement for any series with multiple patches (as it aids +continuous integration), but optional for an isolated patch. The cover +letter explains the overall goal of such a series, and also provides a +convenient 0/N email for others to reply to the series as a whole. A +one-time setup of ``git config format.coverletter auto`` (refer to +`git-config `__) will generate the +cover letter as needed. + +When reviewers don't know your goal at the start of their review, they +may object to early changes that don't make sense until the end of the +series, because they do not have enough context yet at that point of +their review. A series where the goal is unclear also risks a higher +number of review-fix cycles because the reviewers haven't bought into +the idea yet. If the cover letter can explain these points to the +reviewer, the process will be smoother patches will get merged faster. +Make sure your cover letter includes a diffstat of changes made over the +entire series; potential reviewers know what files they are interested +in, and they need an easy way determine if your series touches them. + +.. _use_the_rfc_tag_if_needed: + +Use the RFC tag if needed +~~~~~~~~~~~~~~~~~~~~~~~~~ + +For example, "[PATCH RFC v2]". ``git format-patch --subject-prefix=RFC`` +can help. + +"RFC" means "Request For Comments" and is a statement that you don't +intend for your patchset to be applied to master, but would like some +review on it anyway. Reasons for doing this include: + +- the patch depends on some pending kernel changes which haven't yet + been accepted, so the QEMU patch series is blocked until that + dependency has been dealt with, but is worth reviewing anyway +- the patch set is not finished yet (perhaps it doesn't cover all use + cases or work with all targets) but you want early review of a major + API change or design structure before continuing + +In general, since it's asking other people to do review work on a +patchset that the submitter themselves is saying shouldn't be applied, +it's best to: + +- use it sparingly +- in the cover letter, be clear about why a patch is an RFC, what areas + of the patchset you're looking for review on, and why reviewers + should care + +.. _consider_whether_your_patch_is_applicable_for_stable: + +Consider whether your patch is applicable for stable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your patch fixes a severe issue or a regression, it may be applicable +for stable. In that case, consider adding ``Cc: qemu-stable@nongnu.org`` +to your patch to notify the stable maintainers. + +For more details on how QEMU's stable process works, refer to the +:ref:`stable-process` page. + +.. _participating_in_code_review: + +Participating in Code Review +---------------------------- + +All patches submitted to the QEMU project go through a code review +process before they are accepted. This will often mean a series will +go through a number of iterations before being picked up by +:ref:`maintainers`. You therefore should be prepared to +read replies to your messages and be willing to act on them. + +Maintainers are often willing to manually fix up first-time +contributions, since there is a learning curve involved in making an +ideal patch submission. However for the best results you should +proactively respond to suggestions with changes or justifications for +your current approach. + +Some areas of code that are well maintained may review patches +quickly, lesser-loved areas of code may have a longer delay. + +.. _stay_around_to_fix_problems_raised_in_code_review: + +Stay around to fix problems raised in code review +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Not many patches get into QEMU straight away -- it is quite common that +developers will identify bugs, or suggest a cleaner approach, or even +just point out code style issues or commit message typos. You'll need to +respond to these, and then send a second version of your patches with +the issues fixed. This takes a little time and effort on your part, but +if you don't do it then your changes will never get into QEMU. + +Remember that a maintainer is under no obligation to take your +patches. If someone has spent the time reviewing your code and +suggesting improvements and you simply re-post without either +addressing the comment directly or providing additional justification +for the change then it becomes wasted effort. You cannot demand others +merge and then fix up your code after the fact. + +When replying to comments on your patches **reply to all and not just +the sender** -- keeping discussion on the mailing list means everybody +can follow it. Remember the spirit of the :ref:`code_of_conduct` and +keep discussions respectful and collaborative and avoid making +personal comments. + +.. _pay_attention_to_review_comments: + +Pay attention to review comments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Someone took their time to review your work, and it pays to respect that +effort; repeatedly submitting a series without addressing all comments +from the previous round tends to alienate reviewers and stall your +patch. Reviewers aren't always perfect, so it is okay if you want to +argue that your code was correct in the first place instead of blindly +doing everything the reviewer asked. On the other hand, if someone +pointed out a potential issue during review, then even if your code +turns out to be correct, it's probably a sign that you should improve +your commit message and/or comments in the code explaining why the code +is correct. + +If you fix issues that are raised during review **resend the entire +patch series** not just the one patch that was changed. This allows +maintainers to easily apply the fixed series without having to manually +identify which patches are relevant. Send the new version as a complete +fresh email or series of emails -- don't try to make it a followup to +version 1. (This helps automatic patch email handling tools distinguish +between v1 and v2 emails.) + +.. _when_resending_patches_add_a_version_tag: + +When resending patches add a version tag +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All patches beyond the first version should include a version tag -- for +example, "[PATCH v2]". This means people can easily identify whether +they're looking at the most recent version. (The first version of a +patch need not say "v1", just [PATCH] is sufficient.) For patch series, +the version applies to the whole series -- even if you only change one +patch, you resend the entire series and mark it as "v2". Don't try to +track versions of different patches in the series separately. `git +format-patch `__ and `git +send-email `__ both understand +the ``-v2`` option to make this easier. Send each new revision as a new +top-level thread, rather than burying it in-reply-to an earlier +revision, as many reviewers are not looking inside deep threads for new +patches. + +.. _include_version_history_in_patchset_revisions: + +Include version history in patchset revisions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For later versions of patches, include a summary of changes from +previous versions, but not in the commit message itself. In an email +formatted as a git patch, the commit message is the part above the ``---`` +line, and this will go into the git changelog when the patch is +committed. This part should be a self-contained description of what this +version of the patch does, written to make sense to anybody who comes +back to look at this commit in git in six months' time. The part below +the ``---`` line and above the patch proper (git format-patch puts the +diffstat here) is a good place to put remarks for people reading the +patch email, and this is where the "changes since previous version" +summary belongs. The `git-publish +`__ script can help with +tracking a good summary across versions. Also, the `git-backport-diff +`__ script can help focus +reviewers on what changed between revisions. + +.. _tips_and_tricks: + +Tips and Tricks +--------------- + +.. _proper_use_of_reviewed_by_tags_can_aid_review: + +Proper use of Reviewed-by: tags can aid review +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When reviewing a large series, a reviewer can reply to some of the +patches with a Reviewed-by tag, stating that they are happy with that +patch in isolation (sometimes conditional on minor cleanup, like fixing +whitespace, that doesn't affect code content). You should then update +those commit messages by hand to include the Reviewed-by tag, so that in +the next revision, reviewers can spot which patches were already clean +from the previous round. Conversely, if you significantly modify a patch +that was previously reviewed, remove the reviewed-by tag out of the +commit message, as well as listing the changes from the previous +version, to make it easier to focus a reviewer's attention to your +changes. + +.. _if_your_patch_seems_to_have_been_ignored: + +If your patch seems to have been ignored +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your patchset has received no replies you should "ping" it after a +week or two, by sending an email as a reply-to-all to the patch mail, +including the word "ping" and ideally also a link to the page for the +patch on `patchew `__ or +`lore.kernel.org `__. It's worth +double-checking for reasons why your patch might have been ignored +(forgot to CC the maintainer? annoyed people by failing to respond to +review comments on an earlier version?), but often for less-maintained +areas of QEMU patches do just slip through the cracks. If your ping is +also ignored, ping again after another week or so. As the submitter, you +are the person with the most motivation to get your patch applied, so +you have to be persistent. + +.. _is_my_patch_in: + +Is my patch in? +~~~~~~~~~~~~~~~ + +QEMU has some Continuous Integration machines that try to catch patch +submission problems as soon as possible. `patchew +`__ includes a web interface for tracking the +status of various threads that have been posted to the list, and may +send you an automated mail if it detected a problem with your patch. + +Once your patch has had enough review on list, the maintainer for that +area of code will send notification to the list that they are including +your patch in a particular staging branch. Periodically, the maintainer +then takes care of :ref:`submitting-a-pull-request` +for aggregating topic branches into mainline QEMU. Generally, you do not +need to send a pull request unless you have contributed enough patches +to become a maintainer over a particular section of code. Maintainers +may further modify your commit, by resolving simple merge conflicts or +fixing minor typos pointed out during review, but will always add a +Signed-off-by line in addition to yours, indicating that it went through +their tree. Occasionally, the maintainer's pull request may hit more +difficult merge conflicts, where you may be requested to help rebase and +resolve the problems. It may take a couple of weeks between when your +patch first had a positive review to when it finally lands in qemu.git; +release cycle freezes may extend that time even longer. + +.. _return_the_favor: + +Return the favor +~~~~~~~~~~~~~~~~ + +Peer review only works if everyone chips in a bit of review time. If +everyone submitted more patches than they reviewed, we would have a +patch backlog. A good goal is to try to review at least as many patches +from others as what you submit. Don't worry if you don't know the code +base as well as a maintainer; it's perfectly fine to admit when your +review is weak because you are unfamiliar with the code. diff --git a/docs/devel/submitting-a-pull-request.rst b/docs/devel/submitting-a-pull-request.rst new file mode 100644 index 0000000000..a4cd7ebbb6 --- /dev/null +++ b/docs/devel/submitting-a-pull-request.rst @@ -0,0 +1,73 @@ +.. _submitting-a-pull-request: + +Submitting a Pull Request +========================= + +QEMU welcomes contributions of code, but we generally expect these to be +sent as simple patch emails to the mailing list (see our page on +:ref:`submitting-a-patch` +for more details). Generally only existing submaintainers of a tree +will need to submit pull requests, although occasionally for a large +patch series we might ask a submitter to send a pull request. This page +documents our recommendations on pull requests for those people. + +A good rule of thumb is not to send a pull request unless somebody asks +you to. + +**Resend the patches with the pull request** as emails which are +threaded as follow-ups to the pull request itself. The simplest way to +do this is to use ``git format-patch --cover-letter`` to create the +emails, and then edit the cover letter to include the pull request +details that ``git request-pull`` outputs. + +**Use PULL as the subject line tag** in both the cover letter and the +retransmitted patch mails (for example, by using +``--subject-prefix=PULL`` in your ``git format-patch`` command). This +helps people to filter in or out the resulting emails (especially useful +if they are only CC'd on one email out of the set). + +**Each patch must have your own Signed-off-by: line** as well as that of +the original author if the patch was not written by you. This is because +with a pull request you're now indicating that the patch has passed via +you rather than directly from the original author. + +**Don't forget to add Reviewed-by: and Acked-by: lines**. When other +people have reviewed the patches you're putting in the pull request, +make sure you've copied their signoffs across. (If you use the `patches +tool `__ to add patches from email +directly to your git repo it will include the tags automatically; if +you're updating patches manually or in some other way you'll need to +edit the commit messages by hand.) + +**Don't send pull requests for code that hasn't passed review**. A pull +request says these patches are ready to go into QEMU now, so they must +have passed the standard code review processes. In particular if you've +corrected issues in one round of code review, you need to send your +fixed patch series as normal to the list; you can't put it in a pull +request until it's gone through. (Extremely trivial fixes may be OK to +just fix in passing, but if in doubt err on the side of not.) + +**Test before sending**. This is an obvious thing to say, but make sure +everything builds (including that it compiles at each step of the patch +series) and that "make check" passes before sending out the pull +request. As a submaintainer you're one of QEMU's lines of defense +against bad code, so double check the details. + +**All pull requests must be signed**. By "signed" here we mean that +the pullreq email should quote a tag which is a GPG-signed tag (as +created with 'gpg tag -s ...'). See :ref:`maintainer_keys` for +details. + +**Pull requests not for master should say "not for master" and have +"PULL SUBSYSTEM whatever" in the subject tag**. If your pull request is +targeting a stable branch or some submaintainer tree, please include the +string "not for master" in the cover letter email, and make sure the +subject tag is "PULL SUBSYSTEM s390/block/whatever" rather than just +"PULL". This allows it to be automatically filtered out of the set of +pull requests that should be applied to master. + +You might be interested in the `make-pullreq +`__ +script which automates some of this process for you and includes a few +sanity checks. Note that you must edit it to configure it suitably for +your local situation! diff --git a/docs/devel/tcg-icount.rst b/docs/devel/tcg-icount.rst index 8d67b6c076..50c8e8dabc 100644 --- a/docs/devel/tcg-icount.rst +++ b/docs/devel/tcg-icount.rst @@ -92,6 +92,3 @@ When the translator is handling an instruction of this kind: } * it must end the TB immediately after this instruction - -Note that some older front-ends call a "gen_io_end()" function: -this is obsolete and should not be used. diff --git a/docs/devel/tcg-plugins.rst b/docs/devel/tcg-plugins.rst index 047bf4ada7..9740a70406 100644 --- a/docs/devel/tcg-plugins.rst +++ b/docs/devel/tcg-plugins.rst @@ -3,7 +3,6 @@ Copyright (c) 2019, Linaro Limited Written by Emilio Cota and Alex Bennée -================ QEMU TCG Plugins ================ @@ -16,8 +15,35 @@ only monitor it passively. However they can do this down to an individual instruction granularity including potentially subscribing to all load and store operations. -API Stability -============= +Usage +----- + +Any QEMU binary with TCG support has plugins enabled by default. +Earlier releases needed to be explicitly enabled with:: + + configure --enable-plugins + +Once built a program can be run with multiple plugins loaded each with +their own arguments:: + + $QEMU $OTHER_QEMU_ARGS \ + -plugin contrib/plugin/libhowvec.so,inline=on,count=hint \ + -plugin contrib/plugin/libhotblocks.so + +Arguments are plugin specific and can be used to modify their +behaviour. In this case the howvec plugin is being asked to use inline +ops to count and break down the hint instructions by type. + +Linux user-mode emulation also evaluates the environment variable +``QEMU_PLUGIN``:: + + QEMU_PLUGIN="file=contrib/plugins/libhowvec.so,inline=on,count=hint" $QEMU + +Writing plugins +--------------- + +API versioning +~~~~~~~~~~~~~~ This is a new feature for QEMU and it does allow people to develop out-of-tree plugins that can be dynamically linked into a running QEMU @@ -25,9 +51,6 @@ process. However the project reserves the right to change or break the API should it need to do so. The best way to avoid this is to submit your plugin upstream so they can be updated if/when the API changes. -API versioning --------------- - All plugins need to declare a symbol which exports the plugin API version they were built against. This can be done simply by:: @@ -43,18 +66,8 @@ current API versions supported by QEMU. The API version will be incremented if new APIs are added. The minimum API version will be incremented if existing APIs are changed or removed. -Exposure of QEMU internals --------------------------- - -The plugin architecture actively avoids leaking implementation details -about how QEMU's translation works to the plugins. While there are -conceptions such as translation time and translation blocks the -details are opaque to plugins. The plugin is able to query select -details of instructions and system configuration only through the -exported *qemu_plugin* functions. - -Query Handle Lifetime ---------------------- +Lifetime of the query handle +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each callback provides an opaque anonymous information handle which can usually be further queried to find out information about a @@ -63,32 +76,8 @@ valid during the lifetime of the callback so it is important that any information that is needed is extracted during the callback and saved by the plugin. -API -=== - -.. kernel-doc:: include/qemu/qemu-plugin.h - -Usage -===== - -Any QEMU binary with TCG support has plugins enabled by default. -Earlier releases needed to be explicitly enabled with:: - - configure --enable-plugins - -Once built a program can be run with multiple plugins loaded each with -their own arguments:: - - $QEMU $OTHER_QEMU_ARGS \ - -plugin tests/plugin/libhowvec.so,arg=inline,arg=hint \ - -plugin tests/plugin/libhotblocks.so - -Arguments are plugin specific and can be used to modify their -behaviour. In this case the howvec plugin is being asked to use inline -ops to count and break down the hint instructions by type. - -Plugin Life cycle -================= +Plugin life cycle +~~~~~~~~~~~~~~~~~ First the plugin is loaded and the public qemu_plugin_install function is called. The plugin will then register callbacks for various plugin @@ -111,11 +100,21 @@ callback which can then ensure atomicity itself. Finally when QEMU exits all the registered *atexit* callbacks are invoked. +Exposure of QEMU internals +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The plugin architecture actively avoids leaking implementation details +about how QEMU's translation works to the plugins. While there are +conceptions such as translation time and translation blocks the +details are opaque to plugins. The plugin is able to query select +details of instructions and system configuration only through the +exported *qemu_plugin* functions. + Internals -========= +--------- Locking -------- +~~~~~~~ We have to ensure we cannot deadlock, particularly under MTTCG. For this we acquire a lock when called from plugin code. We also keep the @@ -142,16 +141,145 @@ requested. The plugin isn't completely uninstalled until the safe work has executed while all vCPUs are quiescent. Example Plugins -=============== +--------------- There are a number of plugins included with QEMU and you are encouraged to contribute your own plugins plugins upstream. There is a -``contrib/plugins`` directory where they can go. +``contrib/plugins`` directory where they can go. There are also some +basic plugins that are used to test and exercise the API during the +``make check-tcg`` target in ``tests\plugins``. -- tests/plugins +- tests/plugins/empty.c -These are some basic plugins that are used to test and exercise the -API during the ``make check-tcg`` target. +Purely a test plugin for measuring the overhead of the plugins system +itself. Does no instrumentation. + +- tests/plugins/bb.c + +A very basic plugin which will measure execution in course terms as +each basic block is executed. By default the results are shown once +execution finishes:: + + $ qemu-aarch64 -plugin tests/plugin/libbb.so \ + -d plugin ./tests/tcg/aarch64-linux-user/sha1 + SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6 + bb's: 2277338, insns: 158483046 + +Behaviour can be tweaked with the following arguments: + + * inline=true|false + + Use faster inline addition of a single counter. Not per-cpu and not + thread safe. + + * idle=true|false + + Dump the current execution stats whenever the guest vCPU idles + +- tests/plugins/insn.c + +This is a basic instruction level instrumentation which can count the +number of instructions executed on each core/thread:: + + $ qemu-aarch64 -plugin tests/plugin/libinsn.so \ + -d plugin ./tests/tcg/aarch64-linux-user/threadcount + Created 10 threads + Done + cpu 0 insns: 46765 + cpu 1 insns: 3694 + cpu 2 insns: 3694 + cpu 3 insns: 2994 + cpu 4 insns: 1497 + cpu 5 insns: 1497 + cpu 6 insns: 1497 + cpu 7 insns: 1497 + total insns: 63135 + +Behaviour can be tweaked with the following arguments: + + * inline=true|false + + Use faster inline addition of a single counter. Not per-cpu and not + thread safe. + + * sizes=true|false + + Give a summary of the instruction sizes for the execution + + * match= + + Only instrument instructions matching the string prefix. Will show + some basic stats including how many instructions have executed since + the last execution. For example:: + + $ qemu-aarch64 -plugin tests/plugin/libinsn.so,match=bl \ + -d plugin ./tests/tcg/aarch64-linux-user/sha512-vector + ... + 0x40069c, 'bl #0x4002b0', 10 hits, 1093 match hits, Δ+1257 since last match, 98 avg insns/match + 0x4006ac, 'bl #0x403690', 10 hits, 1094 match hits, Δ+47 since last match, 98 avg insns/match + 0x4037fc, 'bl #0x4002b0', 18 hits, 1095 match hits, Δ+22 since last match, 98 avg insns/match + 0x400720, 'bl #0x403690', 10 hits, 1096 match hits, Δ+58 since last match, 98 avg insns/match + 0x4037fc, 'bl #0x4002b0', 19 hits, 1097 match hits, Δ+22 since last match, 98 avg insns/match + 0x400730, 'bl #0x403690', 10 hits, 1098 match hits, Δ+33 since last match, 98 avg insns/match + 0x4037ac, 'bl #0x4002b0', 12 hits, 1099 match hits, Δ+20 since last match, 98 avg insns/match + ... + +For more detailed execution tracing see the ``execlog`` plugin for +other options. + +- tests/plugins/mem.c + +Basic instruction level memory instrumentation:: + + $ qemu-aarch64 -plugin tests/plugin/libmem.so,inline=true \ + -d plugin ./tests/tcg/aarch64-linux-user/sha1 + SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6 + inline mem accesses: 79525013 + +Behaviour can be tweaked with the following arguments: + + * inline=true|false + + Use faster inline addition of a single counter. Not per-cpu and not + thread safe. + + * callback=true|false + + Use callbacks on each memory instrumentation. + + * hwaddr=true|false + + Count IO accesses (only for system emulation) + +- tests/plugins/syscall.c + +A basic syscall tracing plugin. This only works for user-mode. By +default it will give a summary of syscall stats at the end of the +run:: + + $ qemu-aarch64 -plugin tests/plugin/libsyscall \ + -d plugin ./tests/tcg/aarch64-linux-user/threadcount + Created 10 threads + Done + syscall no. calls errors + 226 12 0 + 99 11 11 + 115 11 0 + 222 11 0 + 93 10 0 + 220 10 0 + 233 10 0 + 215 8 0 + 214 4 0 + 134 2 0 + 64 2 0 + 96 1 0 + 94 1 0 + 80 1 0 + 261 1 0 + 78 1 0 + 160 1 0 + 135 1 0 - contrib/plugins/hotblocks.c @@ -168,7 +296,7 @@ slightly faster (but not thread safe) counters. Example:: - ./aarch64-linux-user/qemu-aarch64 \ + $ qemu-aarch64 \ -plugin contrib/plugins/libhotblocks.so -d plugin \ ./tests/tcg/aarch64-linux-user/sha1 SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6 @@ -182,7 +310,7 @@ Example:: Similar to hotblocks but this time tracks memory accesses:: - ./aarch64-linux-user/qemu-aarch64 \ + $ qemu-aarch64 \ -plugin contrib/plugins/libhotpages.so -d plugin \ ./tests/tcg/aarch64-linux-user/sha1 SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6 @@ -193,17 +321,32 @@ Similar to hotblocks but this time tracks memory accesses:: 0x0000000048b000, 0x0001, 130594, 0x0001, 355 0x0000000048a000, 0x0001, 1826, 0x0001, 11 +The hotpages plugin can be configured using the following arguments: + + * sortby=reads|writes|address + + Log the data sorted by either the number of reads, the number of writes, or + memory address. (Default: entries are sorted by the sum of reads and writes) + + * io=on + + Track IO addresses. Only relevant to full system emulation. (Default: off) + + * pagesize=N + + The page size used. (Default: N = 4096) + - contrib/plugins/howvec.c This is an instruction classifier so can be used to count different types of instructions. It has a number of options to refine which get -counted. You can give an argument for a class of instructions to break -it down fully, so for example to see all the system registers -accesses:: +counted. You can give a value to the ``count`` argument for a class of +instructions to break it down fully, so for example to see all the system +registers accesses:: - ./aarch64-softmmu/qemu-system-aarch64 $(QEMU_ARGS) \ + $ qemu-system-aarch64 $(QEMU_ARGS) \ -append "root=/dev/sda2 systemd.unit=benchmark.service" \ - -smp 4 -plugin ./contrib/plugins/libhowvec.so,arg=sreg -d plugin + -smp 4 -plugin ./contrib/plugins/libhowvec.so,count=sreg -d plugin which will lead to a sorted list after the class breakdown:: @@ -269,10 +412,10 @@ for the plugin is a path for the socket the two instances will communicate over:: - ./sparc-softmmu/qemu-system-sparc -monitor none -parallel none \ + $ qemu-system-sparc -monitor none -parallel none \ -net none -M SS-20 -m 256 -kernel day11/zImage.elf \ - -plugin ./contrib/plugins/liblockstep.so,arg=lockstep-sparc.sock \ - -d plugin,nochain + -plugin ./contrib/plugins/liblockstep.so,sockpath=lockstep-sparc.sock \ + -d plugin,nochain which will eventually report:: @@ -286,27 +429,27 @@ which will eventually report:: previously @ 0x000000ffd08098/5 (809900593 insns) previously @ 0x000000ffd080c0/1 (809900588 insns) -- contrib/plugins/hwprofile +- contrib/plugins/hwprofile.c The hwprofile tool can only be used with system emulation and allows the user to see what hardware is accessed how often. It has a number of options: - * arg=read or arg=write + * track=read or track=write By default the plugin tracks both reads and writes. You can use one of these options to limit the tracking to just one class of accesses. - * arg=source + * source Will include a detailed break down of what the guest PC that made the - access was. Not compatible with arg=pattern. Example output:: + access was. Not compatible with the pattern option. Example output:: cirrus-low-memory @ 0xfffffd00000a0000 pc:fffffc0000005cdc, 1, 256 pc:fffffc0000005ce8, 1, 256 pc:fffffc0000005cec, 1, 256 - * arg=pattern + * pattern Instead break down the accesses based on the offset into the HW region. This can be useful for seeing the most used registers of a @@ -327,9 +470,9 @@ The execlog tool traces executed instructions with memory access. It can be used for debugging and security analysis purposes. Please be aware that this will generate a lot of output. -The plugin takes no argument:: +The plugin needs default argument:: - qemu-system-arm $(QEMU_ARGS) \ + $ qemu-system-arm $(QEMU_ARGS) \ -plugin ./contrib/plugins/libexeclog.so -d plugin which will output an execution trace following this structure:: @@ -345,21 +488,26 @@ which will output an execution trace following this structure:: 0, 0xd34, 0xf9c8f000, "bl #0x10c8" 0, 0x10c8, 0xfff96c43, "ldr r3, [r0, #0x44]", load, 0x200000e4, RAM -- contrib/plugins/cache +the output can be filtered to only track certain instructions or +addresses using the ``ifilter`` or ``afilter`` options. You can stack the +arguments if required:: -Cache modelling plugin that measures the performance of a given cache -configuration when a given working set is run:: + $ qemu-system-arm $(QEMU_ARGS) \ + -plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin - qemu-x86_64 -plugin ./contrib/plugins/libcache.so \ +- contrib/plugins/cache.c + +Cache modelling plugin that measures the performance of a given L1 cache +configuration, and optionally a unified L2 per-core cache when a given working +set is run:: + + $ qemu-x86_64 -plugin ./contrib/plugins/libcache.so \ -d plugin -D cache.log ./tests/tcg/x86_64-linux-user/float_convs will report the following:: - Data accesses: 996479, Misses: 507 - Miss rate: 0.050879% - - Instruction accesses: 2641737, Misses: 18617 - Miss rate: 0.704726% + core #, data accesses, data misses, dmiss rate, insn accesses, insn misses, imiss rate + 0 996695 508 0.0510% 2642799 18617 0.7044% address, data misses, instruction 0x424f1e (_int_malloc), 109, movq %rax, 8(%rcx) @@ -377,29 +525,60 @@ will report the following:: The plugin has a number of arguments, all of them are optional: - * arg="limit=N" + * limit=N Print top N icache and dcache thrashing instructions along with their address, number of misses, and its disassembly. (default: 32) - * arg="icachesize=N" - * arg="iblksize=B" - * arg="iassoc=A" + * icachesize=N + * iblksize=B + * iassoc=A Instruction cache configuration arguments. They specify the cache size, block size, and associativity of the instruction cache, respectively. (default: N = 16384, B = 64, A = 8) - * arg="dcachesize=N" - * arg="dblksize=B" - * arg="dassoc=A" + * dcachesize=N + * dblksize=B + * dassoc=A Data cache configuration arguments. They specify the cache size, block size, and associativity of the data cache, respectively. (default: N = 16384, B = 64, A = 8) - * arg="evict=POLICY" + * evict=POLICY Sets the eviction policy to POLICY. Available policies are: :code:`lru`, :code:`fifo`, and :code:`rand`. The plugin will use the specified policy for both instruction and data caches. (default: POLICY = :code:`lru`) + + * cores=N + + Sets the number of cores for which we maintain separate icache and dcache. + (default: for linux-user, N = 1, for full system emulation: N = cores + available to guest) + + * l2=on + + Simulates a unified L2 cache (stores blocks for both instructions and data) + using the default L2 configuration (cache size = 2MB, associativity = 16-way, + block size = 64B). + + * l2cachesize=N + * l2blksize=B + * l2assoc=A + + L2 cache configuration arguments. They specify the cache size, block size, and + associativity of the L2 cache, respectively. Setting any of the L2 + configuration arguments implies ``l2=on``. + (default: N = 2097152 (2MB), B = 64, A = 16) + +API +--- + +The following API is generated from the inline documentation in +``include/qemu/qemu-plugin.h``. Please ensure any updates to the API +include the full kernel-doc annotations. + +.. kernel-doc:: include/qemu/qemu-plugin.h + diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index 8a9cda33a5..e10c47b5a7 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -1,11 +1,12 @@ -=============== +.. _testing: + Testing in QEMU =============== This document describes the testing infrastructure in QEMU. Testing with "make check" -========================= +------------------------- The "make check" testing family includes most of the C based tests in QEMU. For a quick help, run ``make check-help`` from the source tree. @@ -24,7 +25,7 @@ expect the executables to exist and will fail with obscure messages if they cannot find them. Unit tests ----------- +~~~~~~~~~~ Unit tests, which can be invoked with ``make check-unit``, are simple C tests that typically link to individual QEMU object files and exercise them by @@ -67,7 +68,7 @@ and copy the actual command line which executes the unit test, then run it from the command line. QTest ------ +~~~~~ QTest is a device emulation testing framework. It can be very useful to test device models; it could also control certain aspects of QEMU (such as virtual @@ -80,8 +81,38 @@ QTest cases can be executed with make check-qtest +Writing portable test cases +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Both unit tests and qtests can run on POSIX hosts as well as Windows hosts. +Care must be taken when writing portable test cases that can be built and run +successfully on various hosts. The following list shows some best practices: + +* Use portable APIs from glib whenever necessary, e.g.: g_setenv(), + g_mkdtemp(), g_mkdir(). +* Avoid using hardcoded /tmp for temporary file directory. + Use g_get_tmp_dir() instead. +* Bear in mind that Windows has different special string representation for + stdin/stdout/stderr and null devices. For example if your test case uses + "/dev/fd/2" and "/dev/null" on Linux, remember to use "2" and "nul" on + Windows instead. Also IO redirection does not work on Windows, so avoid + using "2>nul" whenever necessary. +* If your test cases uses the blkdebug feature, use relative path to pass + the config and image file paths in the command line as Windows absolute + path contains the delimiter ":" which will confuse the blkdebug parser. +* Use double quotes in your extra QEMU command line in your test cases + instead of single quotes, as Windows does not drop single quotes when + passing the command line to QEMU. +* Windows opens a file in text mode by default, while a POSIX compliant + implementation treats text files and binary files the same. So if your + test cases opens a file to write some data and later wants to compare the + written data with the original one, be sure to pass the letter 'b' as + part of the mode string to fopen(), or O_BINARY flag for the open() call. +* If a certain test case can only run on POSIX or Linux hosts, use a proper + #ifdef in the codes. If the whole test suite cannot run on Windows, disable + the build in the meson.build file. + QAPI schema tests ------------------ +~~~~~~~~~~~~~~~~~ The QAPI schema tests validate the QAPI parser used by QMP, by feeding predefined input to the parser and comparing the result with the reference @@ -108,33 +139,14 @@ parser (either fixing a bug or extending/modifying the syntax). To do this: ``qapi-schema += foo.json`` check-block ------------ +~~~~~~~~~~~ ``make check-block`` runs a subset of the block layer iotests (the tests that are in the "auto" group). See the "QEMU iotests" section below for more information. -GCC gcov support ----------------- - -``gcov`` is a GCC tool to analyze the testing coverage by -instrumenting the tested code. To use it, configure QEMU with -``--enable-gcov`` option and build. Then run ``make check`` as usual. - -If you want to gather coverage information on a single test the ``make -clean-gcda`` target can be used to delete any existing coverage -information before running a single test. - -You can generate a HTML coverage report by executing ``make -coverage-html`` which will create -``meson-logs/coveragereport/index.html``. - -Further analysis can be conducted by running the ``gcov`` command -directly on the various .gcda output files. Please read the ``gcov`` -documentation for more information. - QEMU iotests -============ +------------ QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing framework widely used to test block layer related features. It is higher level @@ -171,7 +183,7 @@ More options are supported by the ``./check`` script, run ``./check -h`` for help. Writing a new test case ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ Consider writing a tests case when you are making any changes to the block layer. An iotest case is usually the choice for that. There are already many @@ -224,8 +236,38 @@ another application on the host may have locked the file, possibly leading to a test failure. If using such devices are explicitly desired, consider adding ``locking=off`` option to disable image locking. +Debugging a test case +~~~~~~~~~~~~~~~~~~~~~ + +The following options to the ``check`` script can be useful when debugging +a failing test: + +* ``-gdb`` wraps every QEMU invocation in a ``gdbserver``, which waits for a + connection from a gdb client. The options given to ``gdbserver`` (e.g. the + address on which to listen for connections) are taken from the ``$GDB_OPTIONS`` + environment variable. By default (if ``$GDB_OPTIONS`` is empty), it listens on + ``localhost:12345``. + It is possible to connect to it for example with + ``gdb -iex "target remote $addr"``, where ``$addr`` is the address + ``gdbserver`` listens on. + If the ``-gdb`` option is not used, ``$GDB_OPTIONS`` is ignored, + regardless of whether it is set or not. + +* ``-valgrind`` attaches a valgrind instance to QEMU. If it detects + warnings, it will print and save the log in + ``$TEST_DIR/.valgrind``. + The final command line will be ``valgrind --log-file=$TEST_DIR/ + .valgrind --error-exitcode=99 $QEMU ...`` + +* ``-d`` (debug) just increases the logging verbosity, showing + for example the QMP commands and answers. + +* ``-p`` (print) redirects QEMU’s stdout and stderr to the test output, + instead of saving it into a log file in + ``$TEST_DIR/qemu-machine-``. + Test case groups ----------------- +~~~~~~~~~~~~~~~~ "Tests may belong to one or more test groups, which are defined in the form of a comment in the test source file. By convention, test groups are listed @@ -275,17 +317,17 @@ Note that the following group names have a special meaning: .. _container-ref: Container based tests -===================== +--------------------- Introduction ------------- +~~~~~~~~~~~~ The container testing framework in QEMU utilizes public images to build and test QEMU in predefined and widely accessible Linux environments. This makes it possible to expand the test coverage across distros, toolchain flavors and library versions. The support was originally written for Docker although we also support Podman as -an alternative container runtime. Although the many of the target +an alternative container runtime. Although many of the target names and scripts are prefixed with "docker" the system will automatically run on whichever is configured. @@ -293,7 +335,7 @@ The container images are also used to augment the generation of tests for testing TCG. See :ref:`checktcg-ref` for more details. Docker Prerequisites --------------------- +~~~~~~~~~~~~~~~~~~~~ Install "docker" with the system package manager and start the Docker service on your development machine, then make sure you have the privilege to run @@ -324,7 +366,7 @@ exploit the whole host with Docker bind mounting or other privileged operations. So only do it on development machines. Podman Prerequisites --------------------- +~~~~~~~~~~~~~~~~~~~~ Install "podman" with the system package manager. @@ -336,7 +378,7 @@ Install "podman" with the system package manager. The last command should print an empty table, to verify the system is ready. Quickstart ----------- +~~~~~~~~~~ From source tree, type ``make docker-help`` to see the help. Testing can be started without configuring or building QEMU (``configure`` and @@ -352,7 +394,7 @@ is downloaded and initialized automatically), in which the ``test-build`` job is executed. Registry --------- +~~~~~~~~ The QEMU project has a container registry hosted by GitLab at ``registry.gitlab.com/qemu-project/qemu`` which will automatically be @@ -363,25 +405,123 @@ locally by using the ``NOCACHE`` build option: .. code:: - make docker-image-debian10 NOCACHE=1 + make docker-image-debian-arm64-cross NOCACHE=1 Images ------- +~~~~~~ Along with many other images, the ``centos8`` image is defined in a Dockerfile in ``tests/docker/dockerfiles/``, called ``centos8.docker``. ``make docker-help`` command will list all the available images. -To add a new image, simply create a new ``.docker`` file under the -``tests/docker/dockerfiles/`` directory. - A ``.pre`` script can be added beside the ``.docker`` file, which will be executed before building the image under the build context directory. This is mainly used to do necessary host side setup. One such setup is ``binfmt_misc``, for example, to make qemu-user powered cross build containers work. +Most of the existing Dockerfiles were written by hand, simply by creating a +a new ``.docker`` file under the ``tests/docker/dockerfiles/`` directory. +This has led to an inconsistent set of packages being present across the +different containers. + +Thus going forward, QEMU is aiming to automatically generate the Dockerfiles +using the ``lcitool`` program provided by the ``libvirt-ci`` project: + + https://gitlab.com/libvirt/libvirt-ci + +In that project, there is a ``mappings.yml`` file defining the distro native +package names for a wide variety of third party projects. This is processed +in combination with a project defined list of build pre-requisites to determine +the list of native packages to install on each distribution. This can be used +to generate dockerfiles, VM package lists and Cirrus CI variables needed to +setup build environments across OS distributions with a consistent set of +packages present. + +When preparing a patch series that adds a new build pre-requisite to QEMU, +updates to various lcitool data files may be required. + + +Adding new build pre-requisites +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the simple case where the pre-requisite is already known to ``libvirt-ci`` +the following steps are needed + + * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite + + * Run ``make lcitool-refresh`` to re-generate all relevant build environment + manifests + +In some cases ``libvirt-ci`` will not know about the build pre-requisite and +thus some extra preparation steps will be required first + + * Fork the ``libvirt-ci`` project on gitlab + + * Edit the ``mappings.yml`` change to add an entry for the new build + prerequisite, listing its native package name on as many OS distros + as practical. + + * Commit the ``mappings.yml`` change and submit a merge request to + the ``libvirt-ci`` project, noting in the description that this + is a new build pre-requisite desired for use with QEMU + + * CI pipeline will run to validate that the changes to ``mappings.yml`` + are correct, by attempting to install the newly listed package on + all OS distributions supported by ``libvirt-ci``. + + * Once the merge request is accepted, go back to QEMU and update + the ``libvirt-ci`` submodule to point to a commit that contains + the ``mappings.yml`` update. + + +Adding new OS distros +^^^^^^^^^^^^^^^^^^^^^ + +In some cases ``libvirt-ci`` will not know about the OS distro that is +desired to be tested. Before adding a new OS distro, discuss the proposed +addition: + + * Send a mail to qemu-devel, copying people listed in the + MAINTAINERS file for ``Build and test automation``. + + There are limited CI compute resources available to QEMU, so the + cost/benefit tradeoff of adding new OS distros needs to be considered. + + * File an issue at https://gitlab.com/libvirt/libvirt-ci/-/issues + pointing to the qemu-devel mail thread in the archives. + + This alerts other people who might be interested in the work + to avoid duplication, as well as to get feedback from libvirt-ci + maintainers on any tips to ease the addition + +Assuming there is agreement to add a new OS distro then + + * Fork the ``libvirt-ci`` project on gitlab + + * Add metadata under ``guests/lcitool/lcitool/ansible/group_vars/`` + for the new OS distro. There might be code changes required if + the OS distro uses a package format not currently known. The + ``libvirt-ci`` maintainers can advise on this when the issue + is file. + + * Edit the ``mappings.yml`` change to update all the existing package + entries, providing details of the new OS distro + + * Commit the ``mappings.yml`` change and submit a merge request to + the ``libvirt-ci`` project, noting in the description that this + is a new build pre-requisite desired for use with QEMU + + * CI pipeline will run to validate that the changes to ``mappings.yml`` + are correct, by attempting to install the newly listed package on + all OS distributions supported by ``libvirt-ci``. + + * Once the merge request is accepted, go back to QEMU and update + the ``libvirt-ci`` submodule to point to a commit that contains + the ``mappings.yml`` update. + + Tests ------ +~~~~~ Different tests are added to cover various configurations to build and test QEMU. Docker tests are the executables under ``tests/docker`` named @@ -392,7 +532,7 @@ source and build it. The full list of tests is printed in the ``make docker-help`` help. Debugging a Docker test failure -------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When CI tasks, maintainers or yourself report a Docker test failure, follow the below steps to debug it: @@ -409,7 +549,7 @@ below steps to debug it: the prompt for debug. Options -------- +~~~~~~~ Various options can be used to affect how Docker tests are done. The full list is in the ``make docker`` help text. The frequently used ones are: @@ -423,7 +563,7 @@ list is in the ``make docker`` help text. The frequently used ones are: failure" section. Thread Sanitizer -================ +---------------- Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports building and testing with this tool. @@ -433,7 +573,7 @@ For more information on TSan: https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual Thread Sanitizer in Docker ---------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~ TSan is currently supported in the ubuntu2004 docker. The test-tsan test will build using TSan and then run make check. @@ -448,7 +588,7 @@ We recommend using DEBUG=1 to allow launching the test from inside the docker, and to allow review of the warnings generated by TSan. Building and Testing with TSan ------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible to build and test with TSan, with a few additional steps. These steps are normally done automatically in the docker. @@ -487,7 +627,7 @@ This allows for running the test and then checking the warnings afterwards. If you want TSan to stop and exit with error on warnings, use exitcode=66. TSan Suppressions ------------------ +~~~~~~~~~~~~~~~~~ Keep in mind that for any data race warning, although there might be a data race detected by TSan, there might be no actual bug here. TSan provides several different mechanisms for suppressing warnings. In general it is recommended @@ -513,7 +653,7 @@ More information on the file format can be found here under "Blacklist Format": https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags TSan Annotations ----------------- +~~~~~~~~~~~~~~~~ include/qemu/tsan.h defines annotations. See this file for more descriptions of the annotations themselves. Annotations can be used to suppress TSan warnings or give TSan more information so that it can detect proper @@ -529,15 +669,53 @@ The full set of annotations can be found here: https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +docker-binfmt-image-debian-% targets +------------------------------------ + +It is possible to combine Debian's bootstrap scripts with a configured +``binfmt_misc`` to bootstrap a number of Debian's distros including +experimental ports not yet supported by a released OS. This can +simplify setting up a rootfs by using docker to contain the foreign +rootfs rather than manually invoking chroot. + +Setting up ``binfmt_misc`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use the script ``qemu-binfmt-conf.sh`` to configure a QEMU +user binary to automatically run binaries for the foreign +architecture. While the scripts will try their best to work with +dynamically linked QEMU's a statically linked one will present less +potential complications when copying into the docker image. Modern +kernels support the ``F`` (fix binary) flag which will open the QEMU +executable on setup and avoids the need to find and re-open in the +chroot environment. This is triggered with the ``--persistent`` flag. + +Example invocation +~~~~~~~~~~~~~~~~~~ + +For example to setup the HPPA ports builds of Debian:: + + make docker-binfmt-image-debian-sid-hppa \ + DEB_TYPE=sid DEB_ARCH=hppa \ + DEB_URL=http://ftp.ports.debian.org/debian-ports/ \ + DEB_KEYRING=/usr/share/keyrings/debian-ports-archive-keyring.gpg \ + EXECUTABLE=(pwd)/qemu-hppa V=1 + +The ``DEB_`` variables are substitutions used by +``debian-boostrap.pre`` which is called to do the initial debootstrap +of the rootfs before it is copied into the container. The second stage +is run as part of the build. The final image will be tagged as +``qemu/debian-sid-hppa``. + VM testing -========== +---------- This test suite contains scripts that bootstrap various guest images that have necessary packages to build QEMU. The basic usage is documented in ``Makefile`` help which is displayed with ``make vm-help``. Quickstart ----------- +~~~~~~~~~~ Run ``make vm-help`` to list available make targets. Invoke a specific make command to run build test in an image. For example, ``make vm-build-freebsd`` @@ -552,29 +730,29 @@ concerned about attackers taking control of the guest and potentially exploiting a QEMU security bug to compromise the host. QEMU binaries -------------- +~~~~~~~~~~~~~ -By default, qemu-system-x86_64 is searched in $PATH to run the guest. If there -isn't one, or if it is older than 2.10, the test won't work. In this case, +By default, ``qemu-system-x86_64`` is searched in $PATH to run the guest. If +there isn't one, or if it is older than 2.10, the test won't work. In this case, provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``. -Likewise the path to qemu-img can be set in QEMU_IMG environment variable. +Likewise the path to ``qemu-img`` can be set in QEMU_IMG environment variable. Make jobs ---------- +~~~~~~~~~ The ``-j$X`` option in the make command line is not propagated into the VM, specify ``J=$X`` to control the make jobs in the guest. Debugging ---------- +~~~~~~~~~ Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive debugging and verbose output. If this is not enough, see the next section. ``V=1`` will be propagated down into the make jobs in the guest. Manual invocation ------------------ +~~~~~~~~~~~~~~~~~ Each guest script is an executable script with the same command line options. For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: @@ -598,7 +776,7 @@ For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: $ ./netbsd --interactive --image /var/tmp/netbsd.img sh Adding new guests ------------------ +~~~~~~~~~~~~~~~~~ Please look at existing guest scripts for how to add new guests. @@ -631,7 +809,7 @@ the script's ``main()``. recommended. Image fuzzer testing -==================== +-------------------- An image fuzzer was added to exercise format drivers. Currently only qcow2 is supported. To start the fuzzer, run @@ -640,20 +818,19 @@ supported. To start the fuzzer, run tests/image-fuzzer/runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2 -Alternatively, some command different from "qemu-img info" can be tested, by +Alternatively, some command different from ``qemu-img info`` can be tested, by changing the ``-c`` option. -Acceptance tests using the Avocado Framework -============================================ +Integration tests using the Avocado Framework +--------------------------------------------- -The ``tests/acceptance`` directory hosts functional tests, also known -as acceptance level tests. They're usually higher level tests, and -may interact with external resources and with various guest operating -systems. +The ``tests/avocado`` directory hosts integration tests. They're usually +higher level tests, and may interact with external resources and with +various guest operating systems. These tests are written using the Avocado Testing Framework (which must be installed separately) in conjunction with a the ``avocado_qemu.Test`` -class, implemented at ``tests/acceptance/avocado_qemu``. +class, implemented at ``tests/avocado/avocado_qemu``. Tests based on ``avocado_qemu.Test`` can easily: @@ -683,13 +860,13 @@ Tests based on ``avocado_qemu.Test`` can easily: - http://avocado-framework.readthedocs.io/en/latest/api/utils/avocado.utils.html Running tests -------------- +~~~~~~~~~~~~~ -You can run the acceptance tests simply by executing: +You can run the avocado tests simply by executing: .. code:: - make check-acceptance + make check-avocado This involves the automatic creation of Python virtual environment within the build tree (at ``tests/venv``) which will have all the @@ -703,16 +880,85 @@ available. On Debian and Ubuntu based systems, depending on the specific version, they may be on packages named ``python3-venv`` and ``python3-pip``. +It is also possible to run tests based on tags using the +``make check-avocado`` command and the ``AVOCADO_TAGS`` environment +variable: + +.. code:: + + make check-avocado AVOCADO_TAGS=quick + +Note that tags separated with commas have an AND behavior, while tags +separated by spaces have an OR behavior. For more information on Avocado +tags, see: + + https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/tags.html + +To run a single test file, a couple of them, or a test within a file +using the ``make check-avocado`` command, set the ``AVOCADO_TESTS`` +environment variable with the test files or test names. To run all +tests from a single file, use: + + .. code:: + + make check-avocado AVOCADO_TESTS=$FILEPATH + +The same is valid to run tests from multiple test files: + + .. code:: + + make check-avocado AVOCADO_TESTS='$FILEPATH1 $FILEPATH2' + +To run a single test within a file, use: + + .. code:: + + make check-avocado AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME + +The same is valid to run single tests from multiple test files: + + .. code:: + + make check-avocado AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2' + The scripts installed inside the virtual environment may be used without an "activation". For instance, the Avocado test runner may be invoked by running: .. code:: - tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/acceptance/ + tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ + +Note that if ``make check-avocado`` was not executed before, it is +possible to create the Python virtual environment with the dependencies +needed running: + + .. code:: + + make check-venv + +It is also possible to run tests from a single file or a single test within +a test file. To run tests from a single file within the build tree, use: + + .. code:: + + tests/venv/bin/avocado run tests/avocado/$TESTFILE + +To run a single test within a test file, use: + + .. code:: + + tests/venv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME + +Valid test names are visible in the output from any previous execution +of Avocado or ``make check-avocado``, and can also be queried using: + + .. code:: + + tests/venv/bin/avocado list tests/avocado Manual Installation -------------------- +~~~~~~~~~~~~~~~~~~~ To manually install Avocado and its dependencies, run: @@ -725,18 +971,18 @@ Alternatively, follow the instructions on this link: https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/installing.html Overview --------- +~~~~~~~~ -The ``tests/acceptance/avocado_qemu`` directory provides the +The ``tests/avocado/avocado_qemu`` directory provides the ``avocado_qemu`` Python module, containing the ``avocado_qemu.Test`` class. Here's a simple usage example: .. code:: - from avocado_qemu import Test + from avocado_qemu import QemuSystemTest - class Version(Test): + class Version(QemuSystemTest): """ :avocado: tags=quick """ @@ -761,7 +1007,7 @@ in the current directory, tagged as "quick", run: avocado run -t quick . The ``avocado_qemu.Test`` base test class ------------------------------------------ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``avocado_qemu.Test`` class has a number of characteristics that are worth being mentioned right away. @@ -781,10 +1027,10 @@ and hypothetical example follows: .. code:: - from avocado_qemu import Test + from avocado_qemu import QemuSystemTest - class MultipleMachines(Test): + class MultipleMachines(QemuSystemTest): def test_multiple_machines(self): first_machine = self.get_vm() second_machine = self.get_vm() @@ -811,7 +1057,7 @@ At test "tear down", ``avocado_qemu.Test`` handles all the QEMUMachines shutdown. The ``avocado_qemu.LinuxTest`` base test class -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``avocado_qemu.LinuxTest`` is further specialization of the ``avocado_qemu.Test`` class, so it contains all the characteristics of @@ -834,7 +1080,7 @@ like this: self.ssh_command('some_command_to_be_run_in_the_guest') Please refer to tests that use ``avocado_qemu.LinuxTest`` under -``tests/acceptance`` for more examples. +``tests/avocado`` for more examples. QEMUMachine ~~~~~~~~~~~ @@ -854,7 +1100,7 @@ execution of a QEMU binary, giving its users: a more succinct and intuitive way QEMU binary selection -~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^ The QEMU binary used for the ``self.vm`` QEMUMachine instance will primarily depend on the value of the ``qemu_bin`` parameter. If it's @@ -875,20 +1121,23 @@ The resulting ``qemu_bin`` value will be preserved in the ``avocado_qemu.Test`` as an attribute with the same name. Attribute reference -------------------- +~~~~~~~~~~~~~~~~~~~ + +Test +^^^^ Besides the attributes and methods that are part of the base ``avocado.Test`` class, the following attributes are available on any ``avocado_qemu.Test`` instance. vm -~~ +'' A QEMUMachine instance, initially configured according to the given ``qemu_bin`` parameter. arch -~~~~ +'''' The architecture can be used on different levels of the stack, e.g. by the framework or by the test itself. At the framework level, it will @@ -905,7 +1154,7 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=arch:VALUE`` tag, it will be set to ``VALUE``. cpu -~~~ +''' The cpu model that will be set to all QEMUMachine instances created by the test. @@ -916,7 +1165,7 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=cpu:VALUE`` tag, it will be set to ``VALUE``. machine -~~~~~~~ +''''''' The machine type that will be set to all QEMUMachine instances created by the test. @@ -927,20 +1176,20 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=machine:VALUE`` tag, it will be set to ``VALUE``. qemu_bin -~~~~~~~~ +'''''''' The preserved value of the ``qemu_bin`` parameter or the result of the dynamic probe for a QEMU binary in the current working directory or source tree. LinuxTest -~~~~~~~~~ +^^^^^^^^^ Besides the attributes present on the ``avocado_qemu.Test`` base class, the ``avocado_qemu.LinuxTest`` adds the following attributes: distro -...... +'''''' The name of the Linux distribution used as the guest image for the test. The name should match the **Provider** column on the list @@ -949,7 +1198,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_version -.............. +'''''''''''''' The version of the Linux distribution as the guest image for the test. The name should match the **Version** column on the list @@ -958,7 +1207,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_checksum -............... +''''''''''''''' The sha256 hash of the guest image file used for the test. @@ -967,7 +1216,7 @@ same name), no validation on the integrity of the image will be performed. Parameter reference -------------------- +~~~~~~~~~~~~~~~~~~~ To understand how Avocado parameters are accessed by tests, and how they can be passed to tests, please refer to:: @@ -981,8 +1230,11 @@ like the following: PARAMS (key=qemu_bin, path=*, default=./qemu-system-x86_64) => './qemu-system-x86_64 +Test +^^^^ + arch -~~~~ +'''' The architecture that will influence the selection of a QEMU binary (when one is not explicitly given). @@ -995,31 +1247,30 @@ This parameter has a direct relation with the ``arch`` attribute. If not given, it will default to None. cpu -~~~ +''' The cpu model that will be set to all QEMUMachine instances created by the test. machine -~~~~~~~ +''''''' The machine type that will be set to all QEMUMachine instances created by the test. - qemu_bin -~~~~~~~~ +'''''''' The exact QEMU binary to be used on QEMUMachine. LinuxTest -~~~~~~~~~ +^^^^^^^^^ Besides the parameters present on the ``avocado_qemu.Test`` base class, the ``avocado_qemu.LinuxTest`` adds the following parameters: distro -...... +'''''' The name of the Linux distribution used as the guest image for the test. The name should match the **Provider** column on the list @@ -1028,7 +1279,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_version -.............. +'''''''''''''' The version of the Linux distribution as the guest image for the test. The name should match the **Version** column on the list @@ -1037,7 +1288,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_checksum -............... +''''''''''''''' The sha256 hash of the guest image file used for the test. @@ -1045,7 +1296,8 @@ If this value is not set in the code or by this parameter no validation on the integrity of the image will be performed. Skipping tests --------------- +~~~~~~~~~~~~~~ + The Avocado framework provides Python decorators which allow for easily skip tests running under certain conditions. For example, on the lack of a binary on the test system or when the running environment is a CI system. For further @@ -1060,7 +1312,7 @@ environment variables became a kind of standard way to enable/disable tests. Here is a list of the most used variables: AVOCADO_ALLOW_LARGE_STORAGE -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^ Tests which are going to fetch or produce assets considered *large* are not going to run unless that ``AVOCADO_ALLOW_LARGE_STORAGE=1`` is exported on the environment. @@ -1069,7 +1321,7 @@ The definition of *large* is a bit arbitrary here, but it usually means an asset which occupies at least 1GB of size on disk when uncompressed. AVOCADO_ALLOW_UNTRUSTED_CODE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are tests which will boot a kernel image or firmware that can be considered not safe to run on the developer's workstation, thus they are skipped by default. The definition of *not safe* is also arbitrary but @@ -1080,7 +1332,7 @@ You should export ``AVOCADO_ALLOW_UNTRUSTED_CODE=1`` on the environment in order to allow tests which make use of those kind of assets. AVOCADO_TIMEOUT_EXPECTED -~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^ The Avocado framework has a timeout mechanism which interrupts tests to avoid the test suite of getting stuck. The timeout value can be set via test parameter or property defined in the test class, for further details:: @@ -1094,7 +1346,7 @@ compiled with debug flags. Therefore, the ``AVOCADO_TIMEOUT_EXPECTED`` variable has been used to determine whether those tests should run or not. GITLAB_CI -~~~~~~~~~ +^^^^^^^^^ A number of tests are flagged to not run on the GitLab CI. Usually because they proved to the flaky or there are constraints on the CI environment which would make them fail. If you encounter a similar situation then use that @@ -1107,7 +1359,7 @@ variable as shown on the code snippet below to skip the test: do_something() Uninstalling Avocado --------------------- +~~~~~~~~~~~~~~~~~~~~ If you've followed the manual installation instructions above, you can easily uninstall Avocado. Start by listing the packages you have @@ -1119,13 +1371,13 @@ And remove any package you want with:: pip uninstall -If you've used ``make check-acceptance``, the Python virtual environment where +If you've used ``make check-avocado``, the Python virtual environment where Avocado is installed will be cleaned up as part of ``make check-clean``. .. _checktcg-ref: Testing with "make check-tcg" -============================= +----------------------------- The check-tcg tests are intended for simple smoke tests of both linux-user and softmmu TCG functionality. However to build test @@ -1142,7 +1394,7 @@ for the architecture in question, for example:: $(configure) --cross-cc-aarch64=aarch64-cc -There is also a ``--cross-cc-flags-ARCH`` flag in case additional +There is also a ``--cross-cc-cflags-ARCH`` flag in case additional compiler flags are needed to build for a given target. If you have the ability to run containers as the user the build system @@ -1158,7 +1410,7 @@ itself. See :ref:`container-ref` for more details. Running subset of tests ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ You can build the tests for one architecture:: @@ -1172,7 +1424,7 @@ Adding ``V=1`` to the invocation will show the details of how to invoke QEMU for the test which is useful for debugging tests. TCG test dependencies ---------------------- +~~~~~~~~~~~~~~~~~~~~~ The TCG tests are deliberately very light on dependencies and are either totally bare with minimal gcc lib support (for softmmu tests) @@ -1204,3 +1456,22 @@ exercise as many corner cases as possible. It is a useful test suite to run to exercise QEMU's linux-user code:: https://linux-test-project.github.io/ + +GCC gcov support +---------------- + +``gcov`` is a GCC tool to analyze the testing coverage by +instrumenting the tested code. To use it, configure QEMU with +``--enable-gcov`` option and build. Then run the tests as usual. + +If you want to gather coverage information on a single test the ``make +clean-gcda`` target can be used to delete any existing coverage +information before running a single test. + +You can generate a HTML coverage report by executing ``make +coverage-html`` which will create +``meson-logs/coveragereport/index.html``. + +Further analysis can be conducted by running the ``gcov`` command +directly on the various .gcda output files. Please read the ``gcov`` +documentation for more information. diff --git a/docs/devel/tracing.rst b/docs/devel/tracing.rst index ba83954899..d288480db1 100644 --- a/docs/devel/tracing.rst +++ b/docs/devel/tracing.rst @@ -1,3 +1,5 @@ +.. _tracing: + ======= Tracing ======= @@ -46,7 +48,7 @@ file. During build, the "trace-events" file in each listed subdirectory will be processed by the "tracetool" script to generate code for the trace events. The individual "trace-events" files are merged into a "trace-events-all" file, -which is also installed into "/usr/share/qemu" with the name "trace-events". +which is also installed into "/usr/share/qemu". This merged file is to be used by the "simpletrace.py" script to later analyse traces in the simpletrace data format. @@ -411,88 +413,3 @@ disabled, this check will have no performance impact. return ptr; } -"tcg" ------ - -Guest code generated by TCG can be traced by defining an event with the "tcg" -event property. Internally, this property generates two events: -"_trans" to trace the event at translation time, and -"_exec" to trace the event at execution time. - -Instead of using these two events, you should instead use the function -"trace__tcg" during translation (TCG code generation). This function -will automatically call "trace__trans", and will generate the -necessary TCG code to call "trace__exec" during guest code execution. - -Events with the "tcg" property can be declared in the "trace-events" file with a -mix of native and TCG types, and "trace__tcg" will gracefully forward -them to the "_trans" and "_exec" events. Since TCG values -are not known at translation time, these are ignored by the "_trans" -event. Because of this, the entry in the "trace-events" file needs two printing -formats (separated by a comma):: - - tcg foo(uint8_t a1, TCGv_i32 a2) "a1=%d", "a1=%d a2=%d" - -For example:: - - #include "trace-tcg.h" - - void some_disassembly_func (...) - { - uint8_t a1 = ...; - TCGv_i32 a2 = ...; - trace_foo_tcg(a1, a2); - } - -This will immediately call:: - - void trace_foo_trans(uint8_t a1); - -and will generate the TCG code to call:: - - void trace_foo(uint8_t a1, uint32_t a2); - -"vcpu" ------- - -Identifies events that trace vCPU-specific information. It implicitly adds a -"CPUState*" argument, and extends the tracing print format to show the vCPU -information. If used together with the "tcg" property, it adds a second -"TCGv_env" argument that must point to the per-target global TCG register that -points to the vCPU when guest code is executed (usually the "cpu_env" variable). - -The "tcg" and "vcpu" properties are currently only honored in the root -./trace-events file. - -The following example events:: - - foo(uint32_t a) "a=%x" - vcpu bar(uint32_t a) "a=%x" - tcg vcpu baz(uint32_t a) "a=%x", "a=%x" - -Can be used as:: - - #include "trace-tcg.h" - - CPUArchState *env; - TCGv_ptr cpu_env; - - void some_disassembly_func(...) - { - /* trace emitted at this point */ - trace_foo(0xd1); - /* trace emitted at this point */ - trace_bar(env_cpu(env), 0xd2); - /* trace emitted at this point (env) and when guest code is executed (cpu_env) */ - trace_baz_tcg(env_cpu(env), cpu_env, 0xd3); - } - -If the translating vCPU has address 0xc1 and code is later executed by vCPU -0xc2, this would be an example output:: - - // at guest code translation - foo a=0xd1 - bar cpu=0xc1 a=0xd2 - baz_trans cpu=0xc1 a=0xd3 - // at guest code execution - baz_exec cpu=0xc2 a=0xd3 diff --git a/docs/devel/trivial-patches.rst b/docs/devel/trivial-patches.rst new file mode 100644 index 0000000000..9380c730f7 --- /dev/null +++ b/docs/devel/trivial-patches.rst @@ -0,0 +1,52 @@ +.. _trivial-patches: + +Trivial Patches +=============== + +Overview +-------- + +Trivial patches that change just a few lines of code sometimes languish +on the mailing list even though they require only a small amount of +review. This is often the case for patches that do not fall under an +actively maintained subsystem and therefore fall through the cracks. + +The trivial patches team take on the task of reviewing and building pull +requests for patches that: + +- Do not fall under an actively maintained subsystem. +- Are single patches or short series (max 2-4 patches). +- Only touch a few lines of code. + +**You should hint that your patch is a candidate by CCing +qemu-trivial@nongnu.org.** + +Repositories +------------ + +Since the trivial patch team rotates maintainership there is only one +active repository at a time: + +- git://github.com/vivier/qemu.git trivial-patches - `browse `__ + +Workflow +-------- + +The trivial patches team rotates the duty of collecting trivial patches +amongst its members. A team member's job is to: + +1. Identify trivial patches on the development mailing list. +2. Review trivial patches, merge them into a git tree, and reply to state + that the patch is queued. +3. Send pull requests to the development mailing list once a week. + +A single team member can be on duty as long as they like. The suggested +time is 1 week before handing off to the next member. + +Team +---- + +If you would like to join the trivial patches team, contact Laurent +Vivier. The current team includes: + +- `Laurent Vivier `__ diff --git a/docs/devel/ui.rst b/docs/devel/ui.rst index 06c7d622ce..17fb667dec 100644 --- a/docs/devel/ui.rst +++ b/docs/devel/ui.rst @@ -1,8 +1,8 @@ ================= -Qemu UI subsystem +QEMU UI subsystem ================= -Qemu Clipboard +QEMU Clipboard -------------- .. kernel-doc:: include/ui/clipboard.h diff --git a/docs/devel/virtio-backends.rst b/docs/devel/virtio-backends.rst new file mode 100644 index 0000000000..9ff092e7a0 --- /dev/null +++ b/docs/devel/virtio-backends.rst @@ -0,0 +1,214 @@ +.. + Copyright (c) 2022, Linaro Limited + Written by Alex Bennée + +Writing VirtIO backends for QEMU +================================ + +This document attempts to outline the information a developer needs to +know to write device emulations in QEMU. It is specifically focused on +implementing VirtIO devices. For VirtIO the frontend is the driver +running on the guest. The backend is the everything that QEMU needs to +do to handle the emulation of the VirtIO device. This can be done +entirely in QEMU, divided between QEMU and the kernel (vhost) or +handled by a separate process which is configured by QEMU +(vhost-user). + +VirtIO Transports +----------------- + +VirtIO supports a number of different transports. While the details of +the configuration and operation of the device will generally be the +same QEMU represents them as different devices depending on the +transport they use. For example -device virtio-foo represents the foo +device using mmio and -device virtio-foo-pci is the same class of +device using the PCI transport. + +Using the QEMU Object Model (QOM) +--------------------------------- + +Generally all devices in QEMU are super classes of ``TYPE_DEVICE`` +however VirtIO devices should be based on ``TYPE_VIRTIO_DEVICE`` which +itself is derived from the base class. For example: + +.. code:: c + + static const TypeInfo virtio_blk_info = { + .name = TYPE_VIRTIO_BLK, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOBlock), + .instance_init = virtio_blk_instance_init, + .class_init = virtio_blk_class_init, + }; + +The author may decide to have a more expansive class hierarchy to +support multiple device types. For example the Virtio GPU device: + +.. code:: c + + static const TypeInfo virtio_gpu_base_info = { + .name = TYPE_VIRTIO_GPU_BASE, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOGPUBase), + .class_size = sizeof(VirtIOGPUBaseClass), + .class_init = virtio_gpu_base_class_init, + .abstract = true + }; + + static const TypeInfo vhost_user_gpu_info = { + .name = TYPE_VHOST_USER_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VhostUserGPU), + .instance_init = vhost_user_gpu_instance_init, + .instance_finalize = vhost_user_gpu_instance_finalize, + .class_init = vhost_user_gpu_class_init, + }; + + static const TypeInfo virtio_gpu_info = { + .name = TYPE_VIRTIO_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VirtIOGPU), + .class_size = sizeof(VirtIOGPUClass), + .class_init = virtio_gpu_class_init, + }; + +defines a base class for the VirtIO GPU and then specialises two +versions, one for the internal implementation and the other for the +vhost-user version. + +VirtIOPCIProxy +^^^^^^^^^^^^^^ + +[AJB: the following is supposition and welcomes more informed +opinions] + +Probably due to legacy from the pre-QOM days PCI VirtIO devices don't +follow the normal hierarchy. Instead the a standalone object is based +on the VirtIOPCIProxy class and the specific VirtIO instance is +manually instantiated: + +.. code:: c + + /* + * virtio-blk-pci: This extends VirtioPCIProxy. + */ + #define TYPE_VIRTIO_BLK_PCI "virtio-blk-pci-base" + DECLARE_INSTANCE_CHECKER(VirtIOBlkPCI, VIRTIO_BLK_PCI, + TYPE_VIRTIO_BLK_PCI) + + struct VirtIOBlkPCI { + VirtIOPCIProxy parent_obj; + VirtIOBlock vdev; + }; + + static Property virtio_blk_pci_properties[] = { + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), + }; + + static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) + { + VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + ... + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); + } + + static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) + { + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, virtio_blk_pci_properties); + k->realize = virtio_blk_pci_realize; + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; + } + + static void virtio_blk_pci_instance_init(Object *obj) + { + VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_BLK); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); + } + + static const VirtioPCIDeviceTypeInfo virtio_blk_pci_info = { + .base_name = TYPE_VIRTIO_BLK_PCI, + .generic_name = "virtio-blk-pci", + .transitional_name = "virtio-blk-pci-transitional", + .non_transitional_name = "virtio-blk-pci-non-transitional", + .instance_size = sizeof(VirtIOBlkPCI), + .instance_init = virtio_blk_pci_instance_init, + .class_init = virtio_blk_pci_class_init, + }; + +Here you can see the instance_init has to manually instantiate the +underlying ``TYPE_VIRTIO_BLOCK`` object and link an alias for one of +it's properties to the PCI device. + + +Back End Implementations +------------------------ + +There are a number of places where the implementation of the backend +can be done: + +* in QEMU itself +* in the host kernel (a.k.a vhost) +* in a separate process (a.k.a. vhost-user) + +vhost_ops vs TYPE_VHOST_USER_BACKEND +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two choices to how to implement vhost code. Most of the code +which has to work with either vhost or vhost-user uses +``vhost_dev_init()`` to instantiate the appropriate backend. This +means including a ``struct vhost_dev`` in the main object structure. + +For vhost-user devices you also need to add code to track the +initialisation of the ``chardev`` device used for the control socket +between QEMU and the external vhost-user process. + +If you only need to implement a vhost-user backed the other option is +a use a QOM-ified version of vhost-user. + +.. code:: c + + static void + vhost_user_gpu_instance_init(Object *obj) + { + VhostUserGPU *g = VHOST_USER_GPU(obj); + + g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND)); + object_property_add_alias(obj, "chardev", + OBJECT(g->vhost), "chardev"); + } + + static const TypeInfo vhost_user_gpu_info = { + .name = TYPE_VHOST_USER_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VhostUserGPU), + .instance_init = vhost_user_gpu_instance_init, + .instance_finalize = vhost_user_gpu_instance_finalize, + .class_init = vhost_user_gpu_class_init, + }; + +Using it this way entails adding a ``struct VhostUserBackend`` to your +core object structure and manually instantiating the backend. This +sub-structure tracks both the ``vhost_dev`` and ``CharDev`` types +needed for the connection. Instead of calling ``vhost_dev_init`` you +would call ``vhost_user_backend_dev_init`` which does what is needed +on your behalf. diff --git a/docs/devel/writing-qmp-commands.rst b/docs/devel/writing-monitor-commands.rst similarity index 75% rename from docs/devel/writing-qmp-commands.rst rename to docs/devel/writing-monitor-commands.rst index 6a10a06c48..2fefedcd98 100644 --- a/docs/devel/writing-qmp-commands.rst +++ b/docs/devel/writing-monitor-commands.rst @@ -1,8 +1,8 @@ -How to write QMP commands using the QAPI framework -================================================== +How to write monitor commands +============================= This document is a step-by-step guide on how to write new QMP commands using -the QAPI framework. It also shows how to implement new style HMP commands. +the QAPI framework and HMP commands. This document doesn't discuss QMP protocol level details, nor does it dive into the QAPI framework implementation. @@ -11,6 +11,14 @@ For an in-depth introduction to the QAPI framework, please refer to docs/devel/qapi-code-gen.txt. For documentation about the QMP protocol, start with docs/interop/qmp-intro.txt. +New commands may be implemented in QMP only. New HMP commands should be +implemented on top of QMP. The typical HMP command wraps around an +equivalent QMP command, but HMP convenience commands built from QMP +building blocks are also fine. The long term goal is to make all +existing HMP commands conform to this, to fully isolate HMP from the +internals of QEMU. Refer to the `Writing a debugging aid returning +unstructured text`_ section for further guidance on commands that +would have traditionally been HMP only. Overview -------- @@ -85,8 +93,8 @@ any data". Now you're ready to enter the QMP example commands as explained in the following sections. -Writing a command that doesn't return data ------------------------------------------- +Writing a simple command: hello-world +------------------------------------- That's the most simple QMP command that can be written. Usually, this kind of command carries some meaningful action in QEMU but here it will just print @@ -293,9 +301,7 @@ Here's the implementation of the "hello-world" HMP command:: Error *err = NULL; qmp_hello_world(!!message, message, &err); - if (err) { - monitor_printf(mon, "%s\n", error_get_pretty(err)); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } } @@ -307,9 +313,10 @@ There are three important points to be noticed: 1. The "mon" and "qdict" arguments are mandatory for all HMP functions. The former is the monitor object. The latter is how the monitor passes arguments entered by the user to the command implementation -2. hmp_hello_world() performs error checking. In this example we just print - the error description to the user, but we could do more, like taking - different actions depending on the error qmp_hello_world() returns +2. hmp_hello_world() performs error checking. In this example we just call + hmp_handle_error() which prints a message to the user, but we could do + more, like taking different actions depending on the error + qmp_hello_world() returns 3. The "err" variable must be initialized to NULL before performing the QMP call @@ -324,13 +331,10 @@ we should add it to the hmp-commands.hx file:: .cmd = hmp_hello_world, }, -:: - - STEXI - @item hello_world @var{message} - @findex hello_world - Print message to the standard output - ETEXI + SRST + ``hello_world`` *message* + Print message to the standard output + ERST To test this you have to open a user monitor and issue the "hello-world" command. It might be instructive to check the command's documentation with @@ -340,8 +344,8 @@ Please, check the "-monitor" command-line option to know how to open a user monitor. -Writing a command that returns data ------------------------------------ +Writing more complex commands +----------------------------- A QMP command is capable of returning any data the QAPI supports like integers, strings, booleans, enumerations and user defined types. @@ -350,6 +354,35 @@ In this section we will focus on user defined types. Please, check the QAPI documentation for information about the other types. +Modelling data in QAPI +~~~~~~~~~~~~~~~~~~~~~~ + +For a QMP command that to be considered stable and supported long term, +there is a requirement returned data should be explicitly modelled +using fine-grained QAPI types. As a general guide, a caller of the QMP +command should never need to parse individual returned data fields. If +a field appears to need parsing, then it should be split into separate +fields corresponding to each distinct data item. This should be the +common case for any new QMP command that is intended to be used by +machines, as opposed to exclusively human operators. + +Some QMP commands, however, are only intended as ad hoc debugging aids +for human operators. While they may return large amounts of formatted +data, it is not expected that machines will need to parse the result. +The overhead of defining a fine grained QAPI type for the data may not +be justified by the potential benefit. In such cases, it is permitted +to have a command return a simple string that contains formatted data, +however, it is mandatory for the command to use the 'x-' name prefix. +This indicates that the command is not guaranteed to be long term +stable / liable to change in future and is not following QAPI design +best practices. An example where this approach is taken is the QMP +command "x-query-registers". This returns a formatted dump of the +architecture specific CPU state. The way the data is formatted varies +across QEMU targets, is liable to change over time, and is only +intended to be consumed as an opaque string by machines. Refer to the +`Writing a debugging aid returning unstructured text`_ section for +an illustration. + User Defined Types ~~~~~~~~~~~~~~~~~~ @@ -466,9 +499,7 @@ Here's the HMP counterpart of the query-alarm-clock command:: Error *err = NULL; clock = qmp_query_alarm_clock(&err); - if (err) { - monitor_printf(mon, "Could not query alarm clock information\n"); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } @@ -607,9 +638,7 @@ has to traverse the list, it's shown below for reference:: Error *err = NULL; method_list = qmp_query_alarm_methods(&err); - if (err) { - monitor_printf(mon, "Could not query alarm methods\n"); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } @@ -620,3 +649,100 @@ has to traverse the list, it's shown below for reference:: qapi_free_TimerAlarmMethodList(method_list); } + +Writing a debugging aid returning unstructured text +--------------------------------------------------- + +As discussed in section `Modelling data in QAPI`_, it is required that +commands expecting machine usage be using fine-grained QAPI data types. +The exception to this rule applies when the command is solely intended +as a debugging aid and allows for returning unstructured text. This is +commonly needed for query commands that report aspects of QEMU's +internal state that are useful to human operators. + +In this example we will consider a simplified variant of the HMP +command ``info roms``. Following the earlier rules, this command will +need to live under the ``x-`` name prefix, so its QMP implementation +will be called ``x-query-roms``. It will have no parameters and will +return a single text string:: + + { 'struct': 'HumanReadableText', + 'data': { 'human-readable-text': 'str' } } + + { 'command': 'x-query-roms', + 'returns': 'HumanReadableText' } + +The ``HumanReadableText`` struct is intended to be used for all +commands, under the ``x-`` name prefix that are returning unstructured +text targeted at humans. It should never be used for commands outside +the ``x-`` name prefix, as those should be using structured QAPI types. + +Implementing the QMP command +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The QMP implementation will typically involve creating a ``GString`` +object and printing formatted data into it:: + + HumanReadableText *qmp_x_query_roms(Error **errp) + { + g_autoptr(GString) buf = g_string_new(""); + Rom *rom; + + QTAILQ_FOREACH(rom, &roms, next) { + g_string_append_printf("%s size=0x%06zx name=\"%s\"\n", + memory_region_name(rom->mr), + rom->romsize, + rom->name); + } + + return human_readable_text_from_str(buf); + } + + +Implementing the HMP command +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now that the QMP command is in place, we can also make it available in +the human monitor (HMP) as shown in previous examples. The HMP +implementations will all look fairly similar, as all they need do is +invoke the QMP command and then print the resulting text or error +message. Here's the implementation of the "info roms" HMP command:: + + void hmp_info_roms(Monitor *mon, const QDict *qdict) + { + Error err = NULL; + g_autoptr(HumanReadableText) info = qmp_x_query_roms(&err); + + if (hmp_handle_error(mon, err)) { + return; + } + monitor_puts(mon, info->human_readable_text); + } + +Also, you have to add the function's prototype to the hmp.h file. + +There's one last step to actually make the command available to +monitor users, we should add it to the hmp-commands-info.hx file:: + + { + .name = "roms", + .args_type = "", + .params = "", + .help = "show roms", + .cmd = hmp_info_roms, + }, + +The case of writing a HMP info handler that calls a no-parameter QMP query +command is quite common. To simplify the implementation there is a general +purpose HMP info handler for this scenario. All that is required to expose +a no-parameter QMP query command via HMP is to declare it using the +'.cmd_info_hrt' field to point to the QMP handler, and leave the '.cmd' +field NULL:: + + { + .name = "roms", + .args_type = "", + .params = "", + .help = "show roms", + .cmd_info_hrt = qmp_x_query_roms, + }, diff --git a/docs/hyperv.txt b/docs/hyperv.txt deleted file mode 100644 index 000638a2fd..0000000000 --- a/docs/hyperv.txt +++ /dev/null @@ -1,222 +0,0 @@ -Hyper-V Enlightenments -====================== - - -1. Description -=============== -In some cases when implementing a hardware interface in software is slow, KVM -implements its own paravirtualized interfaces. This works well for Linux as -guest support for such features is added simultaneously with the feature itself. -It may, however, be hard-to-impossible to add support for these interfaces to -proprietary OSes, namely, Microsoft Windows. - -KVM on x86 implements Hyper-V Enlightenments for Windows guests. These features -make Windows and Hyper-V guests think they're running on top of a Hyper-V -compatible hypervisor and use Hyper-V specific features. - - -2. Setup -========= -No Hyper-V enlightenments are enabled by default by either KVM or QEMU. In -QEMU, individual enlightenments can be enabled through CPU flags, e.g: - - qemu-system-x86_64 --enable-kvm --cpu host,hv_relaxed,hv_vpindex,hv_time, ... - -Sometimes there are dependencies between enlightenments, QEMU is supposed to -check that the supplied configuration is sane. - -When any set of the Hyper-V enlightenments is enabled, QEMU changes hypervisor -identification (CPUID 0x40000000..0x4000000A) to Hyper-V. KVM identification -and features are kept in leaves 0x40000100..0x40000101. - - -3. Existing enlightenments -=========================== - -3.1. hv-relaxed -================ -This feature tells guest OS to disable watchdog timeouts as it is running on a -hypervisor. It is known that some Windows versions will do this even when they -see 'hypervisor' CPU flag. - -3.2. hv-vapic -============== -Provides so-called VP Assist page MSR to guest allowing it to work with APIC -more efficiently. In particular, this enlightenment allows paravirtualized -(exit-less) EOI processing. - -3.3. hv-spinlocks=xxx -====================== -Enables paravirtualized spinlocks. The parameter indicates how many times -spinlock acquisition should be attempted before indicating the situation to the -hypervisor. A special value 0xffffffff indicates "never notify". - -3.4. hv-vpindex -================ -Provides HV_X64_MSR_VP_INDEX (0x40000002) MSR to the guest which has Virtual -processor index information. This enlightenment makes sense in conjunction with -hv-synic, hv-stimer and other enlightenments which require the guest to know its -Virtual Processor indices (e.g. when VP index needs to be passed in a -hypercall). - -3.5. hv-runtime -================ -Provides HV_X64_MSR_VP_RUNTIME (0x40000010) MSR to the guest. The MSR keeps the -virtual processor run time in 100ns units. This gives guest operating system an -idea of how much time was 'stolen' from it (when the virtual CPU was preempted -to perform some other work). - -3.6. hv-crash -============== -Provides HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 (0x40000100..0x40000105) and -HV_X64_MSR_CRASH_CTL (0x40000105) MSRs to the guest. These MSRs are written to -by the guest when it crashes, HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 MSRs -contain additional crash information. This information is outputted in QEMU log -and through QAPI. -Note: unlike under genuine Hyper-V, write to HV_X64_MSR_CRASH_CTL causes guest -to shutdown. This effectively blocks crash dump generation by Windows. - -3.7. hv-time -============= -Enables two Hyper-V-specific clocksources available to the guest: MSR-based -Hyper-V clocksource (HV_X64_MSR_TIME_REF_COUNT, 0x40000020) and Reference TSC -page (enabled via MSR HV_X64_MSR_REFERENCE_TSC, 0x40000021). Both clocksources -are per-guest, Reference TSC page clocksource allows for exit-less time stamp -readings. Using this enlightenment leads to significant speedup of all timestamp -related operations. - -3.8. hv-synic -============== -Enables Hyper-V Synthetic interrupt controller - an extension of a local APIC. -When enabled, this enlightenment provides additional communication facilities -to the guest: SynIC messages and Events. This is a pre-requisite for -implementing VMBus devices (not yet in QEMU). Additionally, this enlightenment -is needed to enable Hyper-V synthetic timers. SynIC is controlled through MSRs -HV_X64_MSR_SCONTROL..HV_X64_MSR_EOM (0x40000080..0x40000084) and -HV_X64_MSR_SINT0..HV_X64_MSR_SINT15 (0x40000090..0x4000009F) - -Requires: hv-vpindex - -3.9. hv-stimer -=============== -Enables Hyper-V synthetic timers. There are four synthetic timers per virtual -CPU controlled through HV_X64_MSR_STIMER0_CONFIG..HV_X64_MSR_STIMER3_COUNT -(0x400000B0..0x400000B7) MSRs. These timers can work either in single-shot or -periodic mode. It is known that certain Windows versions revert to using HPET -(or even RTC when HPET is unavailable) extensively when this enlightenment is -not provided; this can lead to significant CPU consumption, even when virtual -CPU is idle. - -Requires: hv-vpindex, hv-synic, hv-time - -3.10. hv-tlbflush -================== -Enables paravirtualized TLB shoot-down mechanism. On x86 architecture, remote -TLB flush procedure requires sending IPIs and waiting for other CPUs to perform -local TLB flush. In virtualized environment some virtual CPUs may not even be -scheduled at the time of the call and may not require flushing (or, flushing -may be postponed until the virtual CPU is scheduled). hv-tlbflush enlightenment -implements TLB shoot-down through hypervisor enabling the optimization. - -Requires: hv-vpindex - -3.11. hv-ipi -============= -Enables paravirtualized IPI send mechanism. HvCallSendSyntheticClusterIpi -hypercall may target more than 64 virtual CPUs simultaneously, doing the same -through APIC requires more than one access (and thus exit to the hypervisor). - -Requires: hv-vpindex - -3.12. hv-vendor-id=xxx -======================= -This changes Hyper-V identification in CPUID 0x40000000.EBX-EDX from the default -"Microsoft Hv". The parameter should be no longer than 12 characters. According -to the specification, guests shouldn't use this information and it is unknown -if there is a Windows version which acts differently. -Note: hv-vendor-id is not an enlightenment and thus doesn't enable Hyper-V -identification when specified without some other enlightenment. - -3.13. hv-reset -=============== -Provides HV_X64_MSR_RESET (0x40000003) MSR to the guest allowing it to reset -itself by writing to it. Even when this MSR is enabled, it is not a recommended -way for Windows to perform system reboot and thus it may not be used. - -3.14. hv-frequencies -============================================ -Provides HV_X64_MSR_TSC_FREQUENCY (0x40000022) and HV_X64_MSR_APIC_FREQUENCY -(0x40000023) allowing the guest to get its TSC/APIC frequencies without doing -measurements. - -3.15 hv-reenlightenment -======================== -The enlightenment is nested specific, it targets Hyper-V on KVM guests. When -enabled, it provides HV_X64_MSR_REENLIGHTENMENT_CONTROL (0x40000106), -HV_X64_MSR_TSC_EMULATION_CONTROL (0x40000107)and HV_X64_MSR_TSC_EMULATION_STATUS -(0x40000108) MSRs allowing the guest to get notified when TSC frequency changes -(only happens on migration) and keep using old frequency (through emulation in -the hypervisor) until it is ready to switch to the new one. This, in conjunction -with hv-frequencies, allows Hyper-V on KVM to pass stable clocksource (Reference -TSC page) to its own guests. - -Note, KVM doesn't fully support re-enlightenment notifications and doesn't -emulate TSC accesses after migration so 'tsc-frequency=' CPU option also has to -be specified to make migration succeed. The destination host has to either have -the same TSC frequency or support TSC scaling CPU feature. - -Recommended: hv-frequencies - -3.16. hv-evmcs -=============== -The enlightenment is nested specific, it targets Hyper-V on KVM guests. When -enabled, it provides Enlightened VMCS version 1 feature to the guest. The feature -implements paravirtualized protocol between L0 (KVM) and L1 (Hyper-V) -hypervisors making L2 exits to the hypervisor faster. The feature is Intel-only. -Note: some virtualization features (e.g. Posted Interrupts) are disabled when -hv-evmcs is enabled. It may make sense to measure your nested workload with and -without the feature to find out if enabling it is beneficial. - -Requires: hv-vapic - -3.17. hv-stimer-direct -======================= -Hyper-V specification allows synthetic timer operation in two modes: "classic", -when expiration event is delivered as SynIC message and "direct", when the event -is delivered via normal interrupt. It is known that nested Hyper-V can only -use synthetic timers in direct mode and thus 'hv-stimer-direct' needs to be -enabled. - -Requires: hv-vpindex, hv-synic, hv-time, hv-stimer - -3.17. hv-no-nonarch-coresharing=on/off/auto -=========================================== -This enlightenment tells guest OS that virtual processors will never share a -physical core unless they are reported as sibling SMT threads. This information -is required by Windows and Hyper-V guests to properly mitigate SMT related CPU -vulnerabilities. -When the option is set to 'auto' QEMU will enable the feature only when KVM -reports that non-architectural coresharing is impossible, this means that -hyper-threading is not supported or completely disabled on the host. This -setting also prevents migration as SMT settings on the destination may differ. -When the option is set to 'on' QEMU will always enable the feature, regardless -of host setup. To keep guests secure, this can only be used in conjunction with -exposing correct vCPU topology and vCPU pinning. - -4. Development features -======================== -In some cases (e.g. during development) it may make sense to use QEMU in -'pass-through' mode and give Windows guests all enlightenments currently -supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU -flag. -Note: "hv-passthrough" flag only enables enlightenments which are known to QEMU -(have corresponding "hv-*" flag) and copies "hv-spinlocks="/"hv-vendor-id=" -values from KVM to QEMU. "hv-passthrough" overrides all other "hv-*" settings on -the command line. Also, enabling this flag effectively prevents migration as the -list of enabled enlightenments may differ between target and destination hosts. - - -4. Useful links -================ -Hyper-V Top Level Functional specification and other information: -https://github.com/MicrosoftDocs/Virtualization-Documentation diff --git a/docs/image-fuzzer.txt b/docs/image-fuzzer.txt index 3e23ebec33..279cc8c807 100644 --- a/docs/image-fuzzer.txt +++ b/docs/image-fuzzer.txt @@ -51,10 +51,10 @@ assumes that core dumps will be generated in the current working directory. For comprehensive test results, please, set up your test environment properly. -Paths to binaries under test (SUTs) qemu-img and qemu-io are retrieved from -environment variables. If the environment check fails the runner will +Paths to binaries under test (SUTs) ``qemu-img`` and ``qemu-io`` are retrieved +from environment variables. If the environment check fails the runner will use SUTs installed in system paths. -qemu-img is required for creation of backing files, so it's mandatory to set +``qemu-img`` is required for creation of backing files, so it's mandatory to set the related environment variable if it's not installed in the system path. For details about environment variables see qemu-iotests/check. diff --git a/docs/index.rst b/docs/index.rst index 5f7eaaa632..0b9ee9901d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,6 +3,7 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. +================================ Welcome to QEMU's documentation! ================================ diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst index 059ad67929..1de46febdc 100644 --- a/docs/interop/bitmaps.rst +++ b/docs/interop/bitmaps.rst @@ -539,12 +539,11 @@ other partial disk images on top of a base image to reconstruct a full backup from the point in time at which the incremental backup was issued. The "Push Model" here references the fact that QEMU is "pushing" the modified -blocks out to a destination. We will be using the `drive-backup -`_ and `blockdev-backup -`_ QMP commands to create both +blocks out to a destination. We will be using the `blockdev-backup +`_ QMP command to create both full and incremental backups. -Both of these commands are jobs, which have their own QMP API for querying and +The command is a background job, which has its own QMP API for querying and management documented in `Background jobs `_. @@ -557,6 +556,10 @@ create a new incremental backup chain attached to a drive. This example creates a new, full backup of "drive0" and accompanies it with a new, empty bitmap that records writes from this point in time forward. +The target can be created with the help of `blockdev-add +`_ or `blockdev-create +`_ command. + .. note:: Any new writes that happen after this command is issued, even while the backup job runs, will be written locally and not to the backup destination. These writes will be recorded in the bitmap @@ -576,12 +579,11 @@ new, empty bitmap that records writes from this point in time forward. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -664,12 +666,11 @@ use a transaction to reset the bitmap while making a new full backup: } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.new_full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -728,19 +729,35 @@ Example: First Incremental Backup $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue an incremental backup command: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -785,20 +802,36 @@ Example: Second Incremental Backup $ qemu-img create -f qcow2 drive0.inc1.qcow2 \ -b drive0.inc0.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc1.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. The only difference here is that we have changed the target image below. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc1.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -866,20 +899,36 @@ image: file for you, but you lose control over format options like compatibility and preallocation presets. +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc2.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. Apart from the new destination image, there is no difference from the last two examples. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc2.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -930,6 +979,38 @@ point in time. $ qemu-img create -f qcow2 drive0.full.qcow2 64G $ qemu-img create -f qcow2 drive1.full.qcow2 64G +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.full.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.full.qcow2" + } + } + } + + <- { "return": {} } + #. Create a full (anchor) backup for each drive, with accompanying bitmaps: .. code-block:: QMP @@ -953,21 +1034,19 @@ point in time. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", - "target": "/path/to/drive1.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target1", + "sync": "full" } } ] @@ -1016,6 +1095,38 @@ point in time. $ qemu-img create -f qcow2 drive1.inc0.qcow2 \ -b drive1.full.qcow2 -F qcow2 +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a multi-drive incremental push backup transaction: .. code-block:: QMP @@ -1025,25 +1136,21 @@ point in time. "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }, ] @@ -1119,19 +1226,35 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Attempt to create an incremental backup via QMP: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1164,6 +1287,19 @@ described above. This example demonstrates the single-job failure case: "event": "BLOCK_JOB_COMPLETED" } +#. Remove target node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-del", + "arguments": { + "node-name": "target0", + } + } + + <- { "return": {} } + #. Delete the failed image, and re-create it. .. code:: bash @@ -1172,20 +1308,36 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Retry the command after fixing the underlying problem, such as freeing up space on the backup volume: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1210,7 +1362,8 @@ described above. This example demonstrates the single-job failure case: Example: Partial Transactional Failures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -QMP commands like `drive-backup `_ +QMP commands like `blockdev-backup +`_ conceptually only start a job, and so transactions containing these commands may succeed even if the job it created later fails. This might have surprising interactions with notions of how a "transaction" ought to behave. @@ -1240,25 +1393,21 @@ and one succeeds: "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } @@ -1375,25 +1524,21 @@ applied: }, "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } diff --git a/docs/interop/dbus-display.rst b/docs/interop/dbus-display.rst new file mode 100644 index 0000000000..8c6e8e0f5a --- /dev/null +++ b/docs/interop/dbus-display.rst @@ -0,0 +1,31 @@ +D-Bus display +============= + +QEMU can export the VM display through D-Bus (when started with ``-display +dbus``), to allow out-of-process UIs, remote protocol servers or other +interactive display usages. + +Various specialized D-Bus interfaces are available on different object paths +under ``/org/qemu/Display1/``, depending on the VM configuration. + +QEMU also implements the standard interfaces, such as +`org.freedesktop.DBus.Introspectable +`_. + +.. contents:: + :local: + :depth: 1 + +.. only:: sphinx4 + + .. dbus-doc:: ui/dbus-display1.xml + +.. only:: not sphinx4 + + .. warning:: + Sphinx 4 is required to build D-Bus documentation. + + This is the content of ``ui/dbus-display1.xml``: + + .. literalinclude:: ../../ui/dbus-display1.xml + :language: xml diff --git a/docs/interop/dbus-vmstate.rst b/docs/interop/dbus-vmstate.rst index 1d719c1c60..5fb3f279e2 100644 --- a/docs/interop/dbus-vmstate.rst +++ b/docs/interop/dbus-vmstate.rst @@ -2,9 +2,6 @@ D-Bus VMState ============= -Introduction -============ - The QEMU dbus-vmstate object's aim is to migrate helpers' data running on a QEMU D-Bus bus. (refer to the :doc:`dbus` document for some recommendations on D-Bus usage) @@ -26,49 +23,16 @@ dbus-vmstate object can be configured with the expected list of helpers by setting its ``id-list`` property, with a comma-separated ``Id`` list. -Interface -========= +.. only:: sphinx4 -On object path ``/org/qemu/VMState1``, the following -``org.qemu.VMState1`` interface should be implemented: + .. dbus-doc:: backends/dbus-vmstate1.xml -.. code:: xml +.. only:: not sphinx4 - - - - - - - - - + .. warning:: + Sphinx 4 is required to build D-Bus documentation. -"Id" property -------------- + This is the content of ``backends/dbus-vmstate1.xml``: -A string that identifies the helper uniquely. (maximum 256 bytes -including terminating NUL byte) - -.. note:: - - The helper ID namespace is a separate namespace. In particular, it is not - related to QEMU "id" used in -object/-device objects. - -Load(in u8[] bytes) method --------------------------- - -The method called on destination with the state to restore. - -The helper may be initially started in a waiting state (with -an --incoming argument for example), and it may resume on success. - -An error may be returned to the caller. - -Save(out u8[] bytes) method ---------------------------- - -The method called on the source to get the current state to be -migrated. The helper should continue to run normally. - -An error may be returned to the caller. + .. literalinclude:: ../../backends/dbus-vmstate1.xml + :language: xml diff --git a/docs/interop/dbus.rst b/docs/interop/dbus.rst index be596d3f41..427debc9c5 100644 --- a/docs/interop/dbus.rst +++ b/docs/interop/dbus.rst @@ -108,3 +108,5 @@ QEMU Interfaces =============== :doc:`dbus-vmstate` + +:doc:`dbus-display` diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json index 8d8b0be030..56814f02b3 100644 --- a/docs/interop/firmware.json +++ b/docs/interop/firmware.json @@ -113,13 +113,22 @@ # Virtualization, as specified in the AMD64 Architecture # Programmer's Manual. QEMU command line options related to # this feature are documented in -# "docs/amd-memory-encryption.txt". +# "docs/system/i386/amd-memory-encryption.rst". # # @amd-sev-es: The firmware supports running under AMD Secure Encrypted # Virtualization - Encrypted State, as specified in the AMD64 # Architecture Programmer's Manual. QEMU command line options # related to this feature are documented in -# "docs/amd-memory-encryption.txt". +# "docs/system/i386/amd-memory-encryption.rst". +# +# @amd-sev-snp: The firmware supports running under AMD Secure Encrypted +# Virtualization - Secure Nested Paging, as specified in the +# AMD64 Architecture Programmer's Manual. QEMU command line +# options related to this feature are documented in +# "docs/system/i386/amd-memory-encryption.rst". +# +# @intel-tdx: The firmware supports running under Intel Trust Domain +# Extensions (TDX). # # @enrolled-keys: The variable store (NVRAM) template associated with # the firmware binary has the UEFI Secure Boot @@ -185,9 +194,11 @@ # Since: 3.0 ## { 'enum' : 'FirmwareFeature', - 'data' : [ 'acpi-s3', 'acpi-s4', 'amd-sev', 'amd-sev-es', 'enrolled-keys', - 'requires-smm', 'secure-boot', 'verbose-dynamic', - 'verbose-static' ] } + 'data' : [ 'acpi-s3', 'acpi-s4', + 'amd-sev', 'amd-sev-es', 'amd-sev-snp', + 'intel-tdx', + 'enrolled-keys', 'requires-smm', 'secure-boot', + 'verbose-dynamic', 'verbose-static' ] } ## # @FirmwareFlashFile: @@ -210,24 +221,61 @@ 'data' : { 'filename' : 'str', 'format' : 'BlockdevDriver' } } + +## +# @FirmwareFlashType: +# +# Describes how the firmware build handles code versus variable +# persistence. +# +# @split: the executable file contains code while the NVRAM +# template provides variable storage. The executable +# must be configured read-only and can be shared between +# multiple guests. The NVRAM template must be cloned +# for each new guest and configured read-write. +# +# @combined: the executable file contains both code and +# variable storage. The executable must be cloned +# for each new guest and configured read-write. +# No NVRAM template will be specified. +# +# @stateless: the executable file contains code and variable +# storage is not persisted. The executable must +# be configured read-only and can be shared +# between multiple guests. No NVRAM template +# will be specified. +# +# Since: 7.0.0 +## +{ 'enum': 'FirmwareFlashMode', + 'data': [ 'split', 'combined', 'stateless' ] } + ## # @FirmwareMappingFlash: # # Describes loading and mapping properties for the firmware executable # and its accompanying NVRAM file, when @FirmwareDevice is @flash. # -# @executable: Identifies the firmware executable. The firmware -# executable may be shared by multiple virtual machine -# definitions. The preferred corresponding QEMU command -# line options are +# @mode: Describes how the firmware build handles code versus variable +# storage. If not present, it must be treated as if it was +# configured with value ``split``. Since: 7.0.0 +# +# @executable: Identifies the firmware executable. The @mode +# indicates whether there will be an associated +# NVRAM template present. The preferred +# corresponding QEMU command line options are # -drive if=none,id=pflash0,readonly=on,file=@executable.@filename,format=@executable.@format # -machine pflash0=pflash0 -# or equivalent -blockdev instead of -drive. +# or equivalent -blockdev instead of -drive. When +# @mode is ``combined`` the executable must be +# cloned before use and configured with readonly=off. # With QEMU versions older than 4.0, you have to use # -drive if=pflash,unit=0,readonly=on,file=@executable.@filename,format=@executable.@format # # @nvram-template: Identifies the NVRAM template compatible with -# @executable. Management software instantiates an +# @executable, when @mode is set to ``split``, +# otherwise it should not be present. +# Management software instantiates an # individual copy -- a specific NVRAM file -- from # @nvram-template.@filename for each new virtual # machine definition created. @nvram-template.@filename @@ -246,8 +294,9 @@ # Since: 3.0 ## { 'struct' : 'FirmwareMappingFlash', - 'data' : { 'executable' : 'FirmwareFlashFile', - 'nvram-template' : 'FirmwareFlashFile' } } + 'data' : { '*mode': 'FirmwareFlashMode', + 'executable' : 'FirmwareFlashFile', + '*nvram-template' : 'FirmwareFlashFile' } } ## # @FirmwareMappingKernel: diff --git a/docs/interop/index.rst b/docs/interop/index.rst index f9801a9c20..b7632acb7b 100644 --- a/docs/interop/index.rst +++ b/docs/interop/index.rst @@ -1,5 +1,6 @@ +------------------------------------------------ System Emulation Management and Interoperability -================================================ +------------------------------------------------ This section of the manual contains documents and specifications that are useful for making QEMU interoperate with other software. @@ -11,6 +12,7 @@ are useful for making QEMU interoperate with other software. bitmaps dbus dbus-vmstate + dbus-display live-block-operations pr-helper qemu-ga @@ -20,3 +22,4 @@ are useful for making QEMU interoperate with other software. vhost-user vhost-user-gpu vhost-vdpa + virtio-balloon-stats diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst index 9e3635b233..135784ab33 100644 --- a/docs/interop/live-block-operations.rst +++ b/docs/interop/live-block-operations.rst @@ -53,7 +53,7 @@ files in a disk image backing chain: (1) Directional: 'base' and 'top'. Given the simple disk image chain above, image [A] can be referred to as 'base', and image [B] as - 'top'. (This terminology can be seen in in QAPI schema file, + 'top'. (This terminology can be seen in the QAPI schema file, block-core.json.) (2) Relational: 'backing file' and 'overlay'. Again, taking the same @@ -116,8 +116,8 @@ QEMU block layer supports. (3) ``drive-mirror`` (and ``blockdev-mirror``): Synchronize a running disk to another image. -(4) ``drive-backup`` (and ``blockdev-backup``): Point-in-time (live) copy - of a block device to a destination. +(4) ``blockdev-backup`` (and the deprecated ``drive-backup``): + Point-in-time (live) copy of a block device to a destination. .. _`Interacting with a QEMU instance`: @@ -555,13 +555,14 @@ Currently, there are four different kinds: (3) ``none`` -- Synchronize only the new writes from this point on. - .. note:: In the case of ``drive-backup`` (or ``blockdev-backup``), - the behavior of ``none`` synchronization mode is different. - Normally, a ``backup`` job consists of two parts: Anything - that is overwritten by the guest is first copied out to - the backup, and in the background the whole image is - copied from start to end. With ``sync=none``, it's only - the first part. + .. note:: In the case of ``blockdev-backup`` (or deprecated + ``drive-backup``), the behavior of ``none`` + synchronization mode is different. Normally, a + ``backup`` job consists of two parts: Anything that is + overwritten by the guest is first copied out to the + backup, and in the background the whole image is copied + from start to end. With ``sync=none``, it's only the + first part. (4) ``incremental`` -- Synchronize content that is described by the dirty bitmap @@ -640,7 +641,7 @@ at this point: (QEMU) block-job-complete device=job0 In either of the above cases, if you once again run the -`query-block-jobs` command, there should not be any active block +``query-block-jobs`` command, there should not be any active block operation. Comparing 'commit' and 'mirror': In both then cases, the overlay images @@ -824,7 +825,7 @@ entire disk image chain, to a target, using ``blockdev-mirror`` would be: job ready to be completed (5) Gracefully complete the 'mirror' block device job, and notice the - the event ``BLOCK_JOB_COMPLETED`` + event ``BLOCK_JOB_COMPLETED`` (6) Shutdown the guest by issuing the QMP ``quit`` command so that caches are flushed @@ -928,19 +929,22 @@ Shutdown the guest, by issuing the ``quit`` QMP command:: } -Live disk backup --- ``drive-backup`` and ``blockdev-backup`` -------------------------------------------------------------- +Live disk backup --- ``blockdev-backup`` and the deprecated``drive-backup`` +--------------------------------------------------------------------------- -The ``drive-backup`` (and its newer equivalent ``blockdev-backup``) allows +The ``blockdev-backup`` (and the deprecated ``drive-backup``) allows you to create a point-in-time snapshot. -In this case, the point-in-time is when you *start* the ``drive-backup`` -(or its newer equivalent ``blockdev-backup``) command. +In this case, the point-in-time is when you *start* the +``blockdev-backup`` (or deprecated ``drive-backup``) command. QMP invocation for ``drive-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Note that ``drive-backup`` command is deprecated since QEMU 6.2 and +will be removed in future. + Yet again, starting afresh with our example disk image chain:: [A] <-- [B] <-- [C] <-- [D] @@ -965,11 +969,22 @@ will be issued, indicating the live block device job operation has completed, and no further action is required. +Moving from the deprecated ``drive-backup`` to newer ``blockdev-backup`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``blockdev-backup`` differs from ``drive-backup`` in how you specify +the backup target. With ``blockdev-backup`` you can't specify filename +as a target. Instead you use ``node-name`` of existing block node, +which you may add by ``blockdev-add`` or ``blockdev-create`` commands. +Correspondingly, ``blockdev-backup`` doesn't have ``mode`` and +``format`` arguments which don't apply to an existing block node. See +following sections for details and examples. + + Notes on ``blockdev-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``blockdev-backup`` command is equivalent in functionality to -``drive-backup``, except that it operates at node-level in a Block Driver +The ``blockdev-backup`` command operates at node-level in a Block Driver State (BDS) graph. E.g. the sequence of actions to create a point-in-time backup diff --git a/docs/interop/nbd.txt b/docs/interop/nbd.txt index 10ce098a29..f5ca25174a 100644 --- a/docs/interop/nbd.txt +++ b/docs/interop/nbd.txt @@ -1,4 +1,4 @@ -Qemu supports the NBD protocol, and has an internal NBD client (see +QEMU supports the NBD protocol, and has an internal NBD client (see block/nbd.c), an internal NBD server (see blockdev-nbd.c), and an external NBD server tool (see qemu-nbd.c). The common code is placed in nbd/*. @@ -7,11 +7,11 @@ The NBD protocol is specified here: https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md The following paragraphs describe some specific properties of NBD -protocol realization in Qemu. +protocol realization in QEMU. = Metadata namespaces = -Qemu supports the "base:allocation" metadata context as defined in the +QEMU supports the "base:allocation" metadata context as defined in the NBD protocol specification, and also defines an additional metadata namespace "qemu". @@ -68,3 +68,4 @@ NBD_CMD_BLOCK_STATUS for "qemu:dirty-bitmap:", NBD_CMD_CACHE * 4.2: NBD_FLAG_CAN_MULTI_CONN for shareable read-only exports, NBD_CMD_FLAG_FAST_ZERO * 5.2: NBD_CMD_BLOCK_STATUS for "qemu:allocation-depth" +* 7.1: NBD_FLAG_CAN_MULTI_CONN for shareable writable exports diff --git a/docs/interop/qcow2.txt b/docs/interop/qcow2.txt index 0463f761ef..f7dc304ff6 100644 --- a/docs/interop/qcow2.txt +++ b/docs/interop/qcow2.txt @@ -313,7 +313,7 @@ The fields of the bitmaps extension are: The number of bitmaps contained in the image. Must be greater than or equal to 1. - Note: Qemu currently only supports up to 65535 bitmaps per + Note: QEMU currently only supports up to 65535 bitmaps per image. 4 - 7: Reserved, must be zero. @@ -775,7 +775,7 @@ Structure of a bitmap directory entry: 2: extra_data_compatible This flags is meaningful when the extra data is unknown to the software (currently any extra data is - unknown to Qemu). + unknown to QEMU). If it is set, the bitmap may be used as expected, extra data must be left as is. If it is not set, the bitmap must not be used, but @@ -793,7 +793,7 @@ Structure of a bitmap directory entry: 17: granularity_bits Granularity bits. Valid values: 0 - 63. - Note: Qemu currently supports only values 9 - 31. + Note: QEMU currently supports only values 9 - 31. Granularity is calculated as granularity = 1 << granularity_bits @@ -804,7 +804,7 @@ Structure of a bitmap directory entry: 18 - 19: name_size Size of the bitmap name. Must be non-zero. - Note: Qemu currently doesn't support values greater than + Note: QEMU currently doesn't support values greater than 1023. 20 - 23: extra_data_size diff --git a/docs/interop/qemu-ga.rst b/docs/interop/qemu-ga.rst index 3063357bb5..a9183802d1 100644 --- a/docs/interop/qemu-ga.rst +++ b/docs/interop/qemu-ga.rst @@ -79,10 +79,10 @@ Options Daemonize after startup (detach from terminal). -.. option:: -b, --blacklist=LIST +.. option:: -b, --block-rpcs=LIST - Comma-separated list of RPCs to disable (no spaces, ``?`` to list - available RPCs). + Comma-separated list of RPCs to disable (no spaces, use ``help`` to + list available RPCs). .. option:: -D, --dump-conf @@ -125,7 +125,7 @@ pidfile string fsfreeze-hook string statedir string verbose boolean -blacklist string list +block-rpcs string list ============= =========== See also diff --git a/docs/interop/vhost-user-gpu.rst b/docs/interop/vhost-user-gpu.rst index 71a2c52b31..1640553729 100644 --- a/docs/interop/vhost-user-gpu.rst +++ b/docs/interop/vhost-user-gpu.rst @@ -13,10 +13,10 @@ Introduction ============ The vhost-user-gpu protocol is aiming at sharing the rendering result -of a virtio-gpu, done from a vhost-user slave process to a vhost-user -master process (such as QEMU). It bears a resemblance to a display +of a virtio-gpu, done from a vhost-user back-end process to a vhost-user +front-end process (such as QEMU). It bears a resemblance to a display server protocol, if you consider QEMU as the display server and the -slave as the client, but in a very limited way. Typically, it will +back-end as the client, but in a very limited way. Typically, it will work by setting a scanout/display configuration, before sending flush events for the display updates. It will also update the cursor shape and position. @@ -26,8 +26,8 @@ socket ancillary data to share opened file descriptors (DMABUF fds or shared memory). The socket is usually obtained via ``VHOST_USER_GPU_SET_SOCKET``. -Requests are sent by the *slave*, and the optional replies by the -*master*. +Requests are sent by the *back-end*, and the optional replies by the +*front-end*. Wire format =========== diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index edc3ad84a3..3f18ab424e 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -23,21 +23,41 @@ space process on the same host. It uses communication over a Unix domain socket to share file descriptors in the ancillary data of the message. -The protocol defines 2 sides of the communication, *master* and -*slave*. *Master* is the application that shares its virtqueues, in -our case QEMU. *Slave* is the consumer of the virtqueues. +The protocol defines 2 sides of the communication, *front-end* and +*back-end*. The *front-end* is the application that shares its virtqueues, in +our case QEMU. The *back-end* is the consumer of the virtqueues. -In the current implementation QEMU is the *master*, and the *slave* is -the external process consuming the virtio queues, for example a +In the current implementation QEMU is the *front-end*, and the *back-end* +is the external process consuming the virtio queues, for example a software Ethernet switch running in user space, such as Snabbswitch, -or a block device backend processing read & write to a virtual -disk. In order to facilitate interoperability between various backend +or a block device back-end processing read & write to a virtual +disk. In order to facilitate interoperability between various back-end implementations, it is recommended to follow the :ref:`Backend program conventions `. -*Master* and *slave* can be either a client (i.e. connecting) or +The *front-end* and *back-end* can be either a client (i.e. connecting) or server (listening) in the socket communication. +Support for platforms other than Linux +-------------------------------------- + +While vhost-user was initially developed targeting Linux, nowadays it +is supported on any platform that provides the following features: + +- A way for requesting shared memory represented by a file descriptor + so it can be passed over a UNIX domain socket and then mapped by the + other process. + +- AF_UNIX sockets with SCM_RIGHTS, so QEMU and the other process can + exchange messages through it, including ancillary data when needed. + +- Either eventfd or pipe/pipe2. On platforms where eventfd is not + available, QEMU will automatically fall back to pipe2 or, as a last + resort, pipe. Each file descriptor will be used for receiving or + sending events by reading or writing (respectively) an 8-byte value + to the corresponding it. The 8-value itself has no meaning and + should not be interpreted. + Message Specification ===================== @@ -57,7 +77,7 @@ Header :flags: 32-bit bit field - Lower 2 bits are the version (currently 0x01) -- Bit 2 is the reply flag - needs to be sent on each reply from the slave +- Bit 2 is the reply flag - needs to be sent on each reply from the back-end - Bit 3 is the need_reply flag - see :ref:`REPLY_ACK ` for details. @@ -202,8 +222,8 @@ Virtio device config space :size: a 32-bit configuration space access size in bytes :flags: a 32-bit value: - - 0: Vhost master messages used for writeable fields - - 1: Vhost master messages used for live migration + - 0: Vhost front-end messages used for writable fields + - 1: Vhost front-end messages used for live migration :payload: Size bytes array holding the contents of the virtio device's configuration space @@ -270,8 +290,8 @@ vhost for the Linux Kernel. Most messages that can be sent via the Unix domain socket implementing vhost-user have an equivalent ioctl to the kernel implementation. -The communication consists of *master* sending message requests and -*slave* sending message replies. Most of the requests don't require +The communication consists of the *front-end* sending message requests and +the *back-end* sending message replies. Most of the requests don't require replies. Here is a list of the ones that do: * ``VHOST_USER_GET_FEATURES`` @@ -285,9 +305,10 @@ replies. Here is a list of the ones that do: :ref:`REPLY_ACK ` The section on ``REPLY_ACK`` protocol extension. -There are several messages that the master sends with file descriptors passed +There are several messages that the front-end sends with file descriptors passed in the ancillary data: +* ``VHOST_USER_ADD_MEM_REG`` * ``VHOST_USER_SET_MEM_TABLE`` * ``VHOST_USER_SET_LOG_BASE`` (if ``VHOST_USER_PROTOCOL_F_LOG_SHMFD``) * ``VHOST_USER_SET_LOG_FD`` @@ -297,100 +318,108 @@ in the ancillary data: * ``VHOST_USER_SET_SLAVE_REQ_FD`` * ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``) -If *master* is unable to send the full message or receives a wrong +If *front-end* is unable to send the full message or receives a wrong reply it will close the connection. An optional reconnection mechanism can be implemented. -If *slave* detects some error such as incompatible features, it may also +If *back-end* detects some error such as incompatible features, it may also close the connection. This should only happen in exceptional circumstances. Any protocol extensions are gated by protocol feature bits, which -allows full backwards compatibility on both master and slave. As -older slaves don't support negotiating protocol features, a feature +allows full backwards compatibility on both front-end and back-end. As +older back-ends don't support negotiating protocol features, a feature bit was dedicated for this purpose:: #define VHOST_USER_F_PROTOCOL_FEATURES 30 -Starting and stopping rings ---------------------------- +Note that VHOST_USER_F_PROTOCOL_FEATURES is the UNUSED (30) feature +bit defined in `VIRTIO 1.1 6.3 Legacy Interface: Reserved Feature Bits +`_. +VIRTIO devices do not advertise this feature bit and therefore VIRTIO +drivers cannot negotiate it. -Client must only process each ring when it is started. +This reserved feature bit was reused by the vhost-user protocol to add +vhost-user protocol feature negotiation in a backwards compatible +fashion. Old vhost-user front-end and back-end implementations continue to +work even though they are not aware of vhost-user protocol feature +negotiation. -Client must only pass data between the ring and the backend, when the -ring is enabled. +Ring states +----------- -If ring is started but disabled, client must process the ring without -talking to the backend. +Rings can be in one of three states: -For example, for a networking device, in the disabled state client -must not supply any new RX packets, but must process and discard any -TX packets. +* stopped: the back-end must not process the ring at all. + +* started but disabled: the back-end must process the ring without + causing any side effects. For example, for a networking device, + in the disabled state the back-end must not supply any new RX packets, + but must process and discard any TX packets. + +* started and enabled. + +Each ring is initialized in a stopped state. The back-end must start +ring upon receiving a kick (that is, detecting that file descriptor is +readable) on the descriptor specified by ``VHOST_USER_SET_VRING_KICK`` +or receiving the in-band message ``VHOST_USER_VRING_KICK`` if negotiated, +and stop ring upon receiving ``VHOST_USER_GET_VRING_BASE``. + +Rings can be enabled or disabled by ``VHOST_USER_SET_VRING_ENABLE``. If ``VHOST_USER_F_PROTOCOL_FEATURES`` has not been negotiated, the -ring is initialized in an enabled state. +ring starts directly in the enabled state. If ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, the ring is -initialized in a disabled state. Client must not pass data to/from the -backend until ring is enabled by ``VHOST_USER_SET_VRING_ENABLE`` with -parameter 1, or after it has been disabled by -``VHOST_USER_SET_VRING_ENABLE`` with parameter 0. +initialized in a disabled state and is enabled by +``VHOST_USER_SET_VRING_ENABLE`` with parameter 1. -Each ring is initialized in a stopped state, client must not process -it until ring is started, or after it has been stopped. - -Client must start ring upon receiving a kick (that is, detecting that -file descriptor is readable) on the descriptor specified by -``VHOST_USER_SET_VRING_KICK`` or receiving the in-band message -``VHOST_USER_VRING_KICK`` if negotiated, and stop ring upon receiving -``VHOST_USER_GET_VRING_BASE``. - -While processing the rings (whether they are enabled or not), client +While processing the rings (whether they are enabled or not), the back-end must support changing some configuration aspects on the fly. Multiple queue support ---------------------- -Many devices have a fixed number of virtqueues. In this case the master +Many devices have a fixed number of virtqueues. In this case the front-end already knows the number of available virtqueues without communicating with the -slave. +back-end. Some devices do not have a fixed number of virtqueues. Instead the maximum -number of virtqueues is chosen by the slave. The number can depend on host -resource availability or slave implementation details. Such devices are called +number of virtqueues is chosen by the back-end. The number can depend on host +resource availability or back-end implementation details. Such devices are called multiple queue devices. -Multiple queue support allows the slave to advertise the maximum number of -queues. This is treated as a protocol extension, hence the slave has to +Multiple queue support allows the back-end to advertise the maximum number of +queues. This is treated as a protocol extension, hence the back-end has to implement protocol features first. The multiple queues feature is supported only when the protocol feature ``VHOST_USER_PROTOCOL_F_MQ`` (bit 0) is set. -The max number of queues the slave supports can be queried with message -``VHOST_USER_GET_QUEUE_NUM``. Master should stop when the number of requested +The max number of queues the back-end supports can be queried with message +``VHOST_USER_GET_QUEUE_NUM``. Front-end should stop when the number of requested queues is bigger than that. -As all queues share one connection, the master uses a unique index for each +As all queues share one connection, the front-end uses a unique index for each queue in the sent message to identify a specified queue. -The master enables queues by sending message ``VHOST_USER_SET_VRING_ENABLE``. +The front-end enables queues by sending message ``VHOST_USER_SET_VRING_ENABLE``. vhost-user-net has historically automatically enabled the first queue pair. -Slaves should always implement the ``VHOST_USER_PROTOCOL_F_MQ`` protocol +Back-ends should always implement the ``VHOST_USER_PROTOCOL_F_MQ`` protocol feature, even for devices with a fixed number of virtqueues, since it is simple to implement and offers a degree of introspection. -Masters must not rely on the ``VHOST_USER_PROTOCOL_F_MQ`` protocol feature for +Front-ends must not rely on the ``VHOST_USER_PROTOCOL_F_MQ`` protocol feature for devices with a fixed number of virtqueues. Only true multiqueue devices require this protocol feature. Migration --------- -During live migration, the master may need to track the modifications -the slave makes to the memory mapped regions. The client should mark +During live migration, the front-end may need to track the modifications +the back-end makes to the memory mapped regions. The front-end should mark the dirty pages in a log. Once it complies to this logging, it may declare the ``VHOST_F_LOG_ALL`` vhost feature. -To start/stop logging of data/used ring writes, server may send +To start/stop logging of data/used ring writes, the front-end may send messages ``VHOST_USER_SET_FEATURES`` with ``VHOST_F_LOG_ALL`` and ``VHOST_USER_SET_VRING_ADDR`` with ``VHOST_VRING_F_LOG`` in ring's flags set to 1/0, respectively. @@ -404,7 +433,7 @@ Dirty pages are of size:: #define VHOST_LOG_PAGE 0x1000 The log memory fd is provided in the ancillary data of -``VHOST_USER_SET_LOG_BASE`` message when the slave has +``VHOST_USER_SET_LOG_BASE`` message when the back-end has ``VHOST_USER_PROTOCOL_F_LOG_SHMFD`` protocol feature. The size of the log is supplied as part of ``VhostUserMsg`` which @@ -430,26 +459,26 @@ the bit offset of the last byte of the ring must fall within the size supplied by ``VhostUserLog``. ``VHOST_USER_SET_LOG_FD`` is an optional message with an eventfd in -ancillary data, it may be used to inform the master that the log has +ancillary data, it may be used to inform the front-end that the log has been modified. Once the source has finished migration, rings will be stopped by the source. No further update must be done before rings are restarted. -In postcopy migration the slave is started before all the memory has +In postcopy migration the back-end is started before all the memory has been received from the source host, and care must be taken to avoid -accessing pages that have yet to be received. The slave opens a +accessing pages that have yet to be received. The back-end opens a 'userfault'-fd and registers the memory with it; this fd is then -passed back over to the master. The master services requests on the +passed back over to the front-end. The front-end services requests on the userfaultfd for pages that are accessed and when the page is available it performs WAKE ioctl's on the userfaultfd to wake the stalled -slave. The client indicates support for this via the +back-end. The front-end indicates support for this via the ``VHOST_USER_PROTOCOL_F_PAGEFAULT`` feature. Memory access ------------- -The master sends a list of vhost memory regions to the slave using the +The front-end sends a list of vhost memory regions to the back-end using the ``VHOST_USER_SET_MEM_TABLE`` message. Each region has two base addresses: a guest address and a user address. @@ -474,60 +503,60 @@ IOMMU support ------------- When the ``VIRTIO_F_IOMMU_PLATFORM`` feature has been negotiated, the -master sends IOTLB entries update & invalidation by sending -``VHOST_USER_IOTLB_MSG`` requests to the slave with a ``struct +front-end sends IOTLB entries update & invalidation by sending +``VHOST_USER_IOTLB_MSG`` requests to the back-end with a ``struct vhost_iotlb_msg`` as payload. For update events, the ``iotlb`` payload has to be filled with the update message type (2), the I/O virtual address, the size, the user virtual address, and the permissions flags. Addresses and size must be within vhost memory regions set via the ``VHOST_USER_SET_MEM_TABLE`` request. For invalidation events, the ``iotlb`` payload has to be filled with the invalidation message type -(3), the I/O virtual address and the size. On success, the slave is +(3), the I/O virtual address and the size. On success, the back-end is expected to reply with a zero payload, non-zero otherwise. -The slave relies on the slave communication channel (see :ref:`Slave -communication ` section below) to send IOTLB miss +The back-end relies on the back-end communication channel (see :ref:`Back-end +communication ` section below) to send IOTLB miss and access failure events, by sending ``VHOST_USER_SLAVE_IOTLB_MSG`` -requests to the master with a ``struct vhost_iotlb_msg`` as +requests to the front-end with a ``struct vhost_iotlb_msg`` as payload. For miss events, the iotlb payload has to be filled with the miss message type (1), the I/O virtual address and the permissions flags. For access failure event, the iotlb payload has to be filled with the access failure message type (4), the I/O virtual address and -the permissions flags. For synchronization purpose, the slave may -rely on the reply-ack feature, so the master may send a reply when +the permissions flags. For synchronization purpose, the back-end may +rely on the reply-ack feature, so the front-end may send a reply when operation is completed if the reply-ack feature is negotiated and -slaves requests a reply. For miss events, completed operation means -either master sent an update message containing the IOTLB entry -containing requested address and permission, or master sent nothing if +back-ends requests a reply. For miss events, completed operation means +either front-end sent an update message containing the IOTLB entry +containing requested address and permission, or front-end sent nothing if the IOTLB miss message is invalid (invalid IOVA or permission). -The master isn't expected to take the initiative to send IOTLB update -messages, as the slave sends IOTLB miss messages for the guest virtual +The front-end isn't expected to take the initiative to send IOTLB update +messages, as the back-end sends IOTLB miss messages for the guest virtual memory areas it needs to access. -.. _slave_communication: +.. _backend_communication: -Slave communication -------------------- +Back-end communication +---------------------- -An optional communication channel is provided if the slave declares +An optional communication channel is provided if the back-end declares ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` protocol feature, to allow the -slave to make requests to the master. +back-end to make requests to the front-end. The fd is provided via ``VHOST_USER_SET_SLAVE_REQ_FD`` ancillary data. -A slave may then send ``VHOST_USER_SLAVE_*`` messages to the master +A back-end may then send ``VHOST_USER_SLAVE_*`` messages to the front-end using this fd communication channel. If ``VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD`` protocol feature is -negotiated, slave can send file descriptors (at most 8 descriptors in -each message) to master via ancillary data using this fd communication +negotiated, back-end can send file descriptors (at most 8 descriptors in +each message) to front-end via ancillary data using this fd communication channel. Inflight I/O tracking --------------------- -To support reconnecting after restart or crash, slave may need to +To support reconnecting after restart or crash, back-end may need to resubmit inflight I/Os. If virtqueue is processed in order, we can easily achieve that by getting the inflight descriptors from descriptor table (split virtqueue) or descriptor ring (packed @@ -535,18 +564,18 @@ virtqueue). However, it can't work when we process descriptors out-of-order because some entries which store the information of inflight descriptors in available ring (split virtqueue) or descriptor ring (packed virtqueue) might be overridden by new entries. To solve -this problem, slave need to allocate an extra buffer to store this -information of inflight descriptors and share it with master for +this problem, the back-end need to allocate an extra buffer to store this +information of inflight descriptors and share it with front-end for persistent. ``VHOST_USER_GET_INFLIGHT_FD`` and ``VHOST_USER_SET_INFLIGHT_FD`` are used to transfer this buffer -between master and slave. And the format of this buffer is described +between front-end and back-end. And the format of this buffer is described below: +---------------+---------------+-----+---------------+ | queue0 region | queue1 region | ... | queueN region | +---------------+---------------+-----+---------------+ -N is the number of available virtqueues. Slave could get it from num +N is the number of available virtqueues. The back-end could get it from num queues field of ``VhostUserInflight``. For split virtqueue, queue region can be implemented as: @@ -578,8 +607,8 @@ For split virtqueue, queue region can be implemented as: * Zero value indicates an uninitialized buffer */ uint16_t version; - /* The size of DescStateSplit array. It's equal to the virtqueue - * size. Slave could get it from queue size field of VhostUserInflight. */ + /* The size of DescStateSplit array. It's equal to the virtqueue size. + * The back-end could get it from queue size field of VhostUserInflight. */ uint16_t desc_num; /* The head of list that track the last batch of used descriptors. */ @@ -685,8 +714,8 @@ For packed virtqueue, queue region can be implemented as: * Zero value indicates an uninitialized buffer */ uint16_t version; - /* The size of DescStatePacked array. It's equal to the virtqueue - * size. Slave could get it from queue size field of VhostUserInflight. */ + /* The size of DescStatePacked array. It's equal to the virtqueue size. + * The back-end could get it from queue size field of VhostUserInflight. */ uint16_t desc_num; /* The head of free DescStatePacked entry list */ @@ -778,7 +807,7 @@ When reconnecting: #. Use ``old_used_wrap_counter`` to calculate the available flags #. If ``d.flags`` is not equal to the calculated flags value (means - slave has submitted the buffer to guest driver before crash, so + back-end has submitted the buffer to guest driver before crash, so it has to commit the in-progres update), set ``old_free_head``, ``old_used_idx``, ``old_used_wrap_counter`` to ``free_head``, ``used_idx``, ``used_wrap_counter`` @@ -807,11 +836,11 @@ cause the sending application(s) to block, it is not advised to use this feature unless absolutely necessary. It is also considered an error to negotiate this feature without also negotiating ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` and ``VHOST_USER_PROTOCOL_F_REPLY_ACK``, -the former is necessary for getting a message channel from the slave -to the master, while the latter needs to be used with the in-band +the former is necessary for getting a message channel from the back-end +to the front-end, while the latter needs to be used with the in-band notification messages to block until they are processed, both to avoid blocking later and for proper processing (at least in the simulation -use case.) As it has no other way of signalling this error, the slave +use case.) As it has no other way of signalling this error, the back-end should close the connection as a response to a ``VHOST_USER_SET_PROTOCOL_FEATURES`` message that sets the in-band notifications feature flag without the other two. @@ -839,95 +868,101 @@ Protocol features #define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15 #define VHOST_USER_PROTOCOL_F_STATUS 16 -Master message types --------------------- +Front-end message types +----------------------- ``VHOST_USER_GET_FEATURES`` :id: 1 :equivalent ioctl: ``VHOST_GET_FEATURES`` - :master payload: N/A - :slave payload: ``u64`` + :request payload: N/A + :reply payload: ``u64`` Get from the underlying vhost implementation the features bitmask. - Feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` signals slave support + Feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` signals back-end support for ``VHOST_USER_GET_PROTOCOL_FEATURES`` and ``VHOST_USER_SET_PROTOCOL_FEATURES``. ``VHOST_USER_SET_FEATURES`` :id: 2 :equivalent ioctl: ``VHOST_SET_FEATURES`` - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Enable features in the underlying vhost implementation using a bitmask. Feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` signals - slave support for ``VHOST_USER_GET_PROTOCOL_FEATURES`` and + back-end support for ``VHOST_USER_GET_PROTOCOL_FEATURES`` and ``VHOST_USER_SET_PROTOCOL_FEATURES``. ``VHOST_USER_GET_PROTOCOL_FEATURES`` :id: 15 :equivalent ioctl: ``VHOST_GET_FEATURES`` - :master payload: N/A - :slave payload: ``u64`` + :request payload: N/A + :reply payload: ``u64`` Get the protocol feature bitmask from the underlying vhost implementation. Only legal if feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` is present in - ``VHOST_USER_GET_FEATURES``. + ``VHOST_USER_GET_FEATURES``. It does not need to be acknowledged by + ``VHOST_USER_SET_FEATURES``. .. Note:: - Slave that reported ``VHOST_USER_F_PROTOCOL_FEATURES`` must + Back-ends that report ``VHOST_USER_F_PROTOCOL_FEATURES`` must support this message even before ``VHOST_USER_SET_FEATURES`` was called. ``VHOST_USER_SET_PROTOCOL_FEATURES`` :id: 16 :equivalent ioctl: ``VHOST_SET_FEATURES`` - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Enable protocol features in the underlying vhost implementation. Only legal if feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` is present in - ``VHOST_USER_GET_FEATURES``. + ``VHOST_USER_GET_FEATURES``. It does not need to be acknowledged by + ``VHOST_USER_SET_FEATURES``. .. Note:: - Slave that reported ``VHOST_USER_F_PROTOCOL_FEATURES`` must support + Back-ends that report ``VHOST_USER_F_PROTOCOL_FEATURES`` must support this message even before ``VHOST_USER_SET_FEATURES`` was called. ``VHOST_USER_SET_OWNER`` :id: 3 :equivalent ioctl: ``VHOST_SET_OWNER`` - :master payload: N/A + :request payload: N/A + :reply payload: N/A - Issued when a new connection is established. It sets the current - *master* as an owner of the session. This can be used on the *slave* + Issued when a new connection is established. It marks the sender + as the front-end that owns of the session. This can be used on the *back-end* as a "session start" flag. ``VHOST_USER_RESET_OWNER`` :id: 4 - :master payload: N/A + :request payload: N/A + :reply payload: N/A .. admonition:: Deprecated This is no longer used. Used to be sent to request disabling all - rings, but some clients interpreted it to also discard connection + rings, but some back-ends interpreted it to also discard connection state (this interpretation would lead to bugs). It is recommended - that clients either ignore this message, or use it to disable all + that back-ends either ignore this message, or use it to disable all rings. ``VHOST_USER_SET_MEM_TABLE`` :id: 5 :equivalent ioctl: ``VHOST_SET_MEM_TABLE`` - :master payload: memory regions description - :slave payload: (postcopy only) memory regions description + :request payload: memory regions description + :reply payload: (postcopy only) memory regions description - Sets the memory map regions on the slave so it can translate the + Sets the memory map regions on the back-end so it can translate the vring addresses. In the ancillary data there is an array of file descriptors for each memory mapped region. The size and ordering of the fds matches the number and ordering of memory regions. When ``VHOST_USER_POSTCOPY_LISTEN`` has been received, ``SET_MEM_TABLE`` replies with the bases of the memory mapped - regions to the master. The slave must have mmap'd the regions but + regions to the front-end. The back-end must have mmap'd the regions but not yet accessed them and should not yet generate a userfault event. @@ -941,12 +976,12 @@ Master message types ``VHOST_USER_SET_LOG_BASE`` :id: 6 :equivalent ioctl: ``VHOST_SET_LOG_BASE`` - :master payload: u64 - :slave payload: N/A + :request payload: u64 + :reply payload: N/A Sets logging shared memory space. - When slave has ``VHOST_USER_PROTOCOL_F_LOG_SHMFD`` protocol feature, + When the back-end has ``VHOST_USER_PROTOCOL_F_LOG_SHMFD`` protocol feature, the log memory fd is provided in the ancillary data of ``VHOST_USER_SET_LOG_BASE`` message, the size and offset of shared memory area provided in the message. @@ -954,44 +989,48 @@ Master message types ``VHOST_USER_SET_LOG_FD`` :id: 7 :equivalent ioctl: ``VHOST_SET_LOG_FD`` - :master payload: N/A + :request payload: N/A + :reply payload: N/A Sets the logging file descriptor, which is passed as ancillary data. ``VHOST_USER_SET_VRING_NUM`` :id: 8 :equivalent ioctl: ``VHOST_SET_VRING_NUM`` - :master payload: vring state description + :request payload: vring state description + :reply payload: N/A Set the size of the queue. ``VHOST_USER_SET_VRING_ADDR`` :id: 9 :equivalent ioctl: ``VHOST_SET_VRING_ADDR`` - :master payload: vring address description - :slave payload: N/A + :request payload: vring address description + :reply payload: N/A Sets the addresses of the different aspects of the vring. ``VHOST_USER_SET_VRING_BASE`` :id: 10 :equivalent ioctl: ``VHOST_SET_VRING_BASE`` - :master payload: vring state description + :request payload: vring state description + :reply payload: N/A Sets the base offset in the available vring. ``VHOST_USER_GET_VRING_BASE`` :id: 11 :equivalent ioctl: ``VHOST_USER_GET_VRING_BASE`` - :master payload: vring state description - :slave payload: vring state description + :request payload: vring state description + :reply payload: vring state description Get the available vring base offset. ``VHOST_USER_SET_VRING_KICK`` :id: 12 :equivalent ioctl: ``VHOST_SET_VRING_KICK`` - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Set the event file descriptor for adding buffers to the vring. It is passed in the ancillary data. @@ -1009,7 +1048,8 @@ Master message types ``VHOST_USER_SET_VRING_CALL`` :id: 13 :equivalent ioctl: ``VHOST_SET_VRING_CALL`` - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Set the event file descriptor to signal when buffers are used. It is passed in the ancillary data. @@ -1027,7 +1067,8 @@ Master message types ``VHOST_USER_SET_VRING_ERR`` :id: 14 :equivalent ioctl: ``VHOST_SET_VRING_ERR`` - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Set the event file descriptor to signal when error occurs. It is passed in the ancillary data. @@ -1044,10 +1085,10 @@ Master message types ``VHOST_USER_GET_QUEUE_NUM`` :id: 17 :equivalent ioctl: N/A - :master payload: N/A - :slave payload: u64 + :request payload: N/A + :reply payload: u64 - Query how many queues the backend supports. + Query how many queues the back-end supports. This request should be sent only when ``VHOST_USER_PROTOCOL_F_MQ`` is set in queried protocol features by @@ -1056,9 +1097,10 @@ Master message types ``VHOST_USER_SET_VRING_ENABLE`` :id: 18 :equivalent ioctl: N/A - :master payload: vring state description + :request payload: vring state description + :reply payload: N/A - Signal slave to enable or disable corresponding vring. + Signal the back-end to enable or disable corresponding vring. This request should be sent only when ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated. @@ -1066,9 +1108,10 @@ Master message types ``VHOST_USER_SEND_RARP`` :id: 19 :equivalent ioctl: N/A - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A - Ask vhost user backend to broadcast a fake RARP to notify the migration + Ask vhost user back-end to broadcast a fake RARP to notify the migration is terminated for guest that does not support GUEST_ANNOUNCE. Only legal if feature bit ``VHOST_USER_F_PROTOCOL_FEATURES`` is @@ -1076,12 +1119,13 @@ Master message types ``VHOST_USER_PROTOCOL_F_RARP`` is present in ``VHOST_USER_GET_PROTOCOL_FEATURES``. The first 6 bytes of the payload contain the mac address of the guest to allow the vhost user - backend to construct and broadcast the fake RARP. + back-end to construct and broadcast the fake RARP. ``VHOST_USER_NET_SET_MTU`` :id: 20 :equivalent ioctl: N/A - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Set host MTU value exposed to the guest. @@ -1091,35 +1135,36 @@ Master message types ``VHOST_USER_PROTOCOL_F_NET_MTU`` is present in ``VHOST_USER_GET_PROTOCOL_FEATURES``. - If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, slave must + If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, the back-end must respond with zero in case the specified MTU is valid, or non-zero otherwise. ``VHOST_USER_SET_SLAVE_REQ_FD`` :id: 21 :equivalent ioctl: N/A - :master payload: N/A + :request payload: N/A + :reply payload: N/A - Set the socket file descriptor for slave initiated requests. It is passed + Set the socket file descriptor for back-end initiated requests. It is passed in the ancillary data. This request should be sent only when ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, and protocol feature bit ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` bit is present in ``VHOST_USER_GET_PROTOCOL_FEATURES``. If - ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, slave must + ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, the back-end must respond with zero for success, non-zero otherwise. ``VHOST_USER_IOTLB_MSG`` :id: 22 :equivalent ioctl: N/A (equivalent to ``VHOST_IOTLB_MSG`` message type) - :master payload: ``struct vhost_iotlb_msg`` - :slave payload: ``u64`` + :request payload: ``struct vhost_iotlb_msg`` + :reply payload: ``u64`` Send IOTLB messages with ``struct vhost_iotlb_msg`` as payload. - Master sends such requests to update and invalidate entries in the - device IOTLB. The slave has to acknowledge the request with sending + The front-end sends such requests to update and invalidate entries in the + device IOTLB. The back-end has to acknowledge the request with sending zero as ``u64`` payload for success, non-zero otherwise. This request should be send only when ``VIRTIO_F_IOMMU_PLATFORM`` @@ -1128,7 +1173,8 @@ Master message types ``VHOST_USER_SET_VRING_ENDIAN`` :id: 23 :equivalent ioctl: ``VHOST_SET_VRING_ENDIAN`` - :master payload: vring state description + :request payload: vring state description + :reply payload: N/A Set the endianness of a VQ for legacy devices. Little-endian is indicated with state.num set to 0 and big-endian is indicated with @@ -1138,42 +1184,42 @@ Master message types ``VHOST_USER_PROTOCOL_F_CROSS_ENDIAN`` has been negotiated. Backends that negotiated this feature should handle both endiannesses and expect this message once (per VQ) during device - configuration (ie. before the master starts the VQ). + configuration (ie. before the front-end starts the VQ). ``VHOST_USER_GET_CONFIG`` :id: 24 :equivalent ioctl: N/A - :master payload: virtio device config space - :slave payload: virtio device config space + :request payload: virtio device config space + :reply payload: virtio device config space When ``VHOST_USER_PROTOCOL_F_CONFIG`` is negotiated, this message is - submitted by the vhost-user master to fetch the contents of the - virtio device configuration space, vhost-user slave's payload size - MUST match master's request, vhost-user slave uses zero length of - payload to indicate an error to vhost-user master. The vhost-user - master may cache the contents to avoid repeated + submitted by the vhost-user front-end to fetch the contents of the + virtio device configuration space, vhost-user back-end's payload size + MUST match the front-end's request, vhost-user back-end uses zero length of + payload to indicate an error to the vhost-user front-end. The vhost-user + front-end may cache the contents to avoid repeated ``VHOST_USER_GET_CONFIG`` calls. ``VHOST_USER_SET_CONFIG`` :id: 25 :equivalent ioctl: N/A - :master payload: virtio device config space - :slave payload: N/A + :request payload: virtio device config space + :reply payload: N/A When ``VHOST_USER_PROTOCOL_F_CONFIG`` is negotiated, this message is - submitted by the vhost-user master when the Guest changes the virtio + submitted by the vhost-user front-end when the Guest changes the virtio device configuration space and also can be used for live migration - on the destination host. The vhost-user slave must check the flags - field, and slaves MUST NOT accept SET_CONFIG for read-only + on the destination host. The vhost-user back-end must check the flags + field, and back-ends MUST NOT accept SET_CONFIG for read-only configuration space fields unless the live migration bit is set. ``VHOST_USER_CREATE_CRYPTO_SESSION`` :id: 26 :equivalent ioctl: N/A - :master payload: crypto session description - :slave payload: crypto session description + :request payload: crypto session description + :reply payload: crypto session description - Create a session for crypto operation. The server side must return + Create a session for crypto operation. The back-end must return the session id, 0 or positive for success, negative for failure. This request should be sent only when ``VHOST_USER_PROTOCOL_F_CRYPTO_SESSION`` feature has been @@ -1183,7 +1229,8 @@ Master message types ``VHOST_USER_CLOSE_CRYPTO_SESSION`` :id: 27 :equivalent ioctl: N/A - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A Close a session for crypto operation which was previously created by ``VHOST_USER_CREATE_CRYPTO_SESSION``. @@ -1195,20 +1242,21 @@ Master message types ``VHOST_USER_POSTCOPY_ADVISE`` :id: 28 - :master payload: N/A - :slave payload: userfault fd + :request payload: N/A + :reply payload: userfault fd - When ``VHOST_USER_PROTOCOL_F_PAGEFAULT`` is supported, the master - advises slave that a migration with postcopy enabled is underway, - the slave must open a userfaultfd for later use. Note that at this + When ``VHOST_USER_PROTOCOL_F_PAGEFAULT`` is supported, the front-end + advises back-end that a migration with postcopy enabled is underway, + the back-end must open a userfaultfd for later use. Note that at this stage the migration is still in precopy mode. ``VHOST_USER_POSTCOPY_LISTEN`` :id: 29 - :master payload: N/A + :request payload: N/A + :reply payload: N/A - Master advises slave that a transition to postcopy mode has - happened. The slave must ensure that shared memory is registered + The front-end advises back-end that a transition to postcopy mode has + happened. The back-end must ensure that shared memory is registered with userfaultfd to cause faulting of non-present pages. This is always sent sometime after a ``VHOST_USER_POSTCOPY_ADVISE``, @@ -1216,10 +1264,11 @@ Master message types ``VHOST_USER_POSTCOPY_END`` :id: 30 - :slave payload: ``u64`` + :request payload: N/A + :reply payload: ``u64`` - Master advises that postcopy migration has now completed. The slave - must disable the userfaultfd. The response is an acknowledgement + The front-end advises that postcopy migration has now completed. The back-end + must disable the userfaultfd. The reply is an acknowledgement only. When ``VHOST_USER_PROTOCOL_F_PAGEFAULT`` is supported, this message @@ -1231,140 +1280,165 @@ Master message types ``VHOST_USER_GET_INFLIGHT_FD`` :id: 31 :equivalent ioctl: N/A - :master payload: inflight description + :request payload: inflight description + :reply payload: N/A When ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD`` protocol feature has - been successfully negotiated, this message is submitted by master to - get a shared buffer from slave. The shared buffer will be used to - track inflight I/O by slave. QEMU should retrieve a new one when vm + been successfully negotiated, this message is submitted by the front-end to + get a shared buffer from back-end. The shared buffer will be used to + track inflight I/O by back-end. QEMU should retrieve a new one when vm reset. ``VHOST_USER_SET_INFLIGHT_FD`` :id: 32 :equivalent ioctl: N/A - :master payload: inflight description + :request payload: inflight description + :reply payload: N/A When ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD`` protocol feature has - been successfully negotiated, this message is submitted by master to - send the shared inflight buffer back to slave so that slave could - get inflight I/O after a crash or restart. + been successfully negotiated, this message is submitted by the front-end to + send the shared inflight buffer back to the back-end so that the back-end + could get inflight I/O after a crash or restart. ``VHOST_USER_GPU_SET_SOCKET`` :id: 33 :equivalent ioctl: N/A - :master payload: N/A + :request payload: N/A + :reply payload: N/A Sets the GPU protocol socket file descriptor, which is passed as - ancillary data. The GPU protocol is used to inform the master of + ancillary data. The GPU protocol is used to inform the front-end of rendering state and updates. See vhost-user-gpu.rst for details. ``VHOST_USER_RESET_DEVICE`` :id: 34 :equivalent ioctl: N/A - :master payload: N/A - :slave payload: N/A + :request payload: N/A + :reply payload: N/A - Ask the vhost user backend to disable all rings and reset all + Ask the vhost user back-end to disable all rings and reset all internal device state to the initial state, ready to be - reinitialized. The backend retains ownership of the device + reinitialized. The back-end retains ownership of the device throughout the reset operation. Only valid if the ``VHOST_USER_PROTOCOL_F_RESET_DEVICE`` protocol - feature is set by the backend. + feature is set by the back-end. ``VHOST_USER_VRING_KICK`` :id: 35 :equivalent ioctl: N/A - :slave payload: vring state description - :master payload: N/A + :request payload: vring state description + :reply payload: N/A When the ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` protocol feature has been successfully negotiated, this message may be - submitted by the master to indicate that a buffer was added to + submitted by the front-end to indicate that a buffer was added to the vring instead of signalling it using the vring's kick file - descriptor or having the slave rely on polling. + descriptor or having the back-end rely on polling. The state.num field is currently reserved and must be set to 0. ``VHOST_USER_GET_MAX_MEM_SLOTS`` :id: 36 :equivalent ioctl: N/A - :slave payload: u64 + :request payload: N/A + :reply payload: u64 When the ``VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS`` protocol feature has been successfully negotiated, this message is submitted - by master to the slave. The slave should return the message with a + by the front-end to the back-end. The back-end should return the message with a u64 payload containing the maximum number of memory slots for - QEMU to expose to the guest. The value returned by the backend + QEMU to expose to the guest. The value returned by the back-end will be capped at the maximum number of ram slots which can be supported by the target platform. ``VHOST_USER_ADD_MEM_REG`` :id: 37 :equivalent ioctl: N/A - :slave payload: single memory region description + :request payload: N/A + :reply payload: single memory region description When the ``VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS`` protocol feature has been successfully negotiated, this message is submitted - by the master to the slave. The message payload contains a memory + by the front-end to the back-end. The message payload contains a memory region descriptor struct, describing a region of guest memory which - the slave device must map in. When the + the back-end device must map in. When the ``VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS`` protocol feature has been successfully negotiated, along with the ``VHOST_USER_REM_MEM_REG`` message, this message is used to set and - update the memory tables of the slave device. + update the memory tables of the back-end device. + + Exactly one file descriptor from which the memory is mapped is + passed in the ancillary data. + + In postcopy mode (see ``VHOST_USER_POSTCOPY_LISTEN``), the back-end + replies with the bases of the memory mapped region to the front-end. + For further details on postcopy, see ``VHOST_USER_SET_MEM_TABLE``. + They apply to ``VHOST_USER_ADD_MEM_REG`` accordingly. ``VHOST_USER_REM_MEM_REG`` :id: 38 :equivalent ioctl: N/A - :slave payload: single memory region description + :request payload: N/A + :reply payload: single memory region description When the ``VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS`` protocol feature has been successfully negotiated, this message is submitted - by the master to the slave. The message payload contains a memory + by the front-end to the back-end. The message payload contains a memory region descriptor struct, describing a region of guest memory which - the slave device must unmap. When the + the back-end device must unmap. When the ``VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS`` protocol feature has been successfully negotiated, along with the ``VHOST_USER_ADD_MEM_REG`` message, this message is used to set and - update the memory tables of the slave device. + update the memory tables of the back-end device. + + The memory region to be removed is identified by its guest address, + user address and size. The mmap offset is ignored. + + No file descriptors SHOULD be passed in the ancillary data. For + compatibility with existing incorrect implementations, the back-end MAY + accept messages with one file descriptor. If a file descriptor is + passed, the back-end MUST close it without using it otherwise. ``VHOST_USER_SET_STATUS`` :id: 39 :equivalent ioctl: VHOST_VDPA_SET_STATUS - :slave payload: N/A - :master payload: ``u64`` + :request payload: ``u64`` + :reply payload: N/A When the ``VHOST_USER_PROTOCOL_F_STATUS`` protocol feature has been - successfully negotiated, this message is submitted by the master to - notify the backend with updated device status as defined in the Virtio + successfully negotiated, this message is submitted by the front-end to + notify the back-end with updated device status as defined in the Virtio specification. ``VHOST_USER_GET_STATUS`` :id: 40 :equivalent ioctl: VHOST_VDPA_GET_STATUS - :slave payload: ``u64`` - :master payload: N/A + :request payload: N/A + :reply payload: ``u64`` When the ``VHOST_USER_PROTOCOL_F_STATUS`` protocol feature has been - successfully negotiated, this message is submitted by the master to - query the backend for its device status as defined in the Virtio + successfully negotiated, this message is submitted by the front-end to + query the back-end for its device status as defined in the Virtio specification. -Slave message types -------------------- +Back-end message types +---------------------- + +For this type of message, the request is sent by the back-end and the reply +is sent by the front-end. ``VHOST_USER_SLAVE_IOTLB_MSG`` :id: 1 :equivalent ioctl: N/A (equivalent to ``VHOST_IOTLB_MSG`` message type) - :slave payload: ``struct vhost_iotlb_msg`` - :master payload: N/A + :request payload: ``struct vhost_iotlb_msg`` + :reply payload: N/A Send IOTLB messages with ``struct vhost_iotlb_msg`` as payload. - Slave sends such requests to notify of an IOTLB miss, or an IOTLB + The back-end sends such requests to notify of an IOTLB miss, or an IOTLB access failure. If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is - negotiated, and slave set the ``VHOST_USER_NEED_REPLY`` flag, master + negotiated, and back-end set the ``VHOST_USER_NEED_REPLY`` flag, the front-end must respond with zero when operation is successfully completed, or non-zero otherwise. This request should be send only when ``VIRTIO_F_IOMMU_PLATFORM`` feature has been successfully @@ -1373,23 +1447,23 @@ Slave message types ``VHOST_USER_SLAVE_CONFIG_CHANGE_MSG`` :id: 2 :equivalent ioctl: N/A - :slave payload: N/A - :master payload: N/A + :request payload: N/A + :reply payload: N/A When ``VHOST_USER_PROTOCOL_F_CONFIG`` is negotiated, vhost-user - slave sends such messages to notify that the virtio device's + back-end sends such messages to notify that the virtio device's configuration space has changed, for those host devices which can support such feature, host driver can send ``VHOST_USER_GET_CONFIG`` - message to slave to get the latest content. If - ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, and slave set the - ``VHOST_USER_NEED_REPLY`` flag, master must respond with zero when + message to the back-end to get the latest content. If + ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, and the back-end sets the + ``VHOST_USER_NEED_REPLY`` flag, the front-end must respond with zero when operation is successfully completed, or non-zero otherwise. ``VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG`` :id: 3 :equivalent ioctl: N/A - :slave payload: vring area description - :master payload: N/A + :request payload: vring area description + :reply payload: N/A Sets host notifier for a specified queue. The queue index is contained in the ``u64`` field of the vring area description. The @@ -1400,7 +1474,7 @@ Slave message types description. QEMU can mmap the file descriptor based on the size and offset to get a memory range. Registering a host notifier means mapping this memory range to the VM as the specified queue's notify - MMIO region. Slave sends this request to tell QEMU to de-register + MMIO region. The back-end sends this request to tell QEMU to de-register the existing notifier if any and register the new notifier if the request is sent with a file descriptor. @@ -1411,28 +1485,28 @@ Slave message types ``VHOST_USER_SLAVE_VRING_CALL`` :id: 4 :equivalent ioctl: N/A - :slave payload: vring state description - :master payload: N/A + :request payload: vring state description + :reply payload: N/A When the ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` protocol feature has been successfully negotiated, this message may be - submitted by the slave to indicate that a buffer was used from + submitted by the back-end to indicate that a buffer was used from the vring instead of signalling this using the vring's call file - descriptor or having the master relying on polling. + descriptor or having the front-end relying on polling. The state.num field is currently reserved and must be set to 0. ``VHOST_USER_SLAVE_VRING_ERR`` :id: 5 :equivalent ioctl: N/A - :slave payload: vring state description - :master payload: N/A + :request payload: vring state description + :reply payload: N/A When the ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` protocol feature has been successfully negotiated, this message may be - submitted by the slave to indicate that an error occurred on the + submitted by the back-end to indicate that an error occurred on the specific vring, instead of signalling the error file descriptor - set by the master via ``VHOST_USER_SET_VRING_ERR``. + set by the front-end via ``VHOST_USER_SET_VRING_ERR``. The state.num field is currently reserved and must be set to 0. @@ -1443,21 +1517,21 @@ VHOST_USER_PROTOCOL_F_REPLY_ACK The original vhost-user specification only demands replies for certain commands. This differs from the vhost protocol implementation where -commands are sent over an ``ioctl()`` call and block until the client +commands are sent over an ``ioctl()`` call and block until the back-end has completed. With this protocol extension negotiated, the sender (QEMU) can set the ``need_reply`` [Bit 3] flag to any command. This indicates that the -client MUST respond with a Payload ``VhostUserMsg`` indicating success +back-end MUST respond with a Payload ``VhostUserMsg`` indicating success or failure. The payload should be set to zero on success or non-zero on failure, unless the message already has an explicit reply body. -The response payload gives QEMU a deterministic indication of the result +The reply payload gives QEMU a deterministic indication of the result of the command. Today, QEMU is expected to terminate the main vhost-user loop upon receiving such errors. In future, qemu could be taught to be more resilient for selective requests. -For the message types that already solicit a reply from the client, +For the message types that already solicit a reply from the back-end, the presence of ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` or need_reply bit being set brings no behavioural change. (See the Communication_ section for details.) @@ -1467,26 +1541,26 @@ section for details.) Backend program conventions =========================== -vhost-user backends can provide various devices & services and may +vhost-user back-ends can provide various devices & services and may need to be configured manually depending on the use case. However, it is a good idea to follow the conventions listed here when possible. Users, QEMU or libvirt, can then rely on some common behaviour to avoid heterogeneous configuration and management of the -backend programs and facilitate interoperability. +back-end programs and facilitate interoperability. -Each backend installed on a host system should come with at least one +Each back-end installed on a host system should come with at least one JSON file that conforms to the vhost-user.json schema. Each file -informs the management applications about the backend type, and binary +informs the management applications about the back-end type, and binary location. In addition, it defines rules for management apps for -picking the highest priority backend when multiple match the search +picking the highest priority back-end when multiple match the search criteria (see ``@VhostUserBackend`` documentation in the schema file). -If the backend is not capable of enabling a requested feature on the +If the back-end is not capable of enabling a requested feature on the host (such as 3D acceleration with virgl), or the initialization -failed, the backend should fail to start early and exit with a status +failed, the back-end should fail to start early and exit with a status != 0. It may also print a message to stderr for further details. -The backend program must not daemonize itself, but it may be +The back-end program must not daemonize itself, but it may be daemonized by the management layer. It may also have a restricted access to the system. @@ -1494,7 +1568,7 @@ File descriptors 0, 1 and 2 will exist, and have regular stdin/stdout/stderr usage (they may have been redirected to /dev/null by the management layer, or to a log handler). -The backend program must end (as quickly and cleanly as possible) when +The back-end program must end (as quickly and cleanly as possible) when the SIGTERM signal is received. Eventually, it may receive SIGKILL by the management layer after a few seconds. @@ -1508,15 +1582,15 @@ are mandatory, unless explicitly said differently: --fd=FDNUM - When this argument is given, the backend program is started with the + When this argument is given, the back-end program is started with the vhost-user socket as file descriptor FDNUM. It is incompatible with --socket-path. --print-capabilities - Output to stdout the backend capabilities in JSON format, and then + Output to stdout the back-end capabilities in JSON format, and then exit successfully. Other options and arguments should be ignored, and - the backend program should not perform its normal function. The + the back-end program should not perform its normal function. The capabilities can be reported dynamically depending on the host capabilities. diff --git a/docs/virtio-balloon-stats.txt b/docs/interop/virtio-balloon-stats.rst similarity index 66% rename from docs/virtio-balloon-stats.txt rename to docs/interop/virtio-balloon-stats.rst index 1732cc8c8a..b9a6a6edb2 100644 --- a/docs/virtio-balloon-stats.txt +++ b/docs/interop/virtio-balloon-stats.rst @@ -1,4 +1,4 @@ -virtio balloon memory statistics +Virtio balloon memory statistics ================================ The virtio balloon driver supports guest memory statistics reporting. These @@ -9,10 +9,12 @@ Before querying the available stats, clients first have to enable polling. This is done by writing a time interval value (in seconds) to the guest-stats-polling-interval property. This value can be: - > 0 enables polling in the specified interval. If polling is already + > 0 + enables polling in the specified interval. If polling is already enabled, the polling time interval is changed to the new value - 0 disables polling. Previous polled statistics are still valid and + 0 + disables polling. Previous polled statistics are still valid and can be queried. Once polling is enabled, the virtio-balloon device in QEMU will start @@ -22,7 +24,7 @@ interval. To retrieve those stats, clients have to query the guest-stats property, which will return a dictionary containing: - o A key named 'stats', containing all available stats. If the guest + * A key named 'stats', containing all available stats. If the guest doesn't support a particular stat, or if it couldn't be retrieved, its value will be -1. Currently, the following stats are supported: @@ -37,7 +39,7 @@ which will return a dictionary containing: - stat-htlb-pgalloc - stat-htlb-pgfail - o A key named last-update, which contains the last stats update + * A key named last-update, which contains the last stats update timestamp in seconds. Since this timestamp is generated by the host, a buggy guest can't influence its value. The value is 0 if the guest has not updated the stats (yet). @@ -61,32 +63,32 @@ It's also important to note the following: respond to the request the timer will never be re-armed, which has the same effect as disabling polling -Here are a few examples. QEMU is started with '-device virtio-balloon', -which generates '/machine/peripheral-anon/device[1]' as the QOM path for +Here are a few examples. QEMU is started with ``-device virtio-balloon``, +which generates ``/machine/peripheral-anon/device[1]`` as the QOM path for the balloon device. -Enable polling with 2 seconds interval: +Enable polling with 2 seconds interval:: -{ "execute": "qom-set", - "arguments": { "path": "/machine/peripheral-anon/device[1]", - "property": "guest-stats-polling-interval", "value": 2 } } + { "execute": "qom-set", + "arguments": { "path": "/machine/peripheral-anon/device[1]", + "property": "guest-stats-polling-interval", "value": 2 } } -{ "return": {} } + { "return": {} } -Change polling to 10 seconds: +Change polling to 10 seconds:: -{ "execute": "qom-set", - "arguments": { "path": "/machine/peripheral-anon/device[1]", - "property": "guest-stats-polling-interval", "value": 10 } } + { "execute": "qom-set", + "arguments": { "path": "/machine/peripheral-anon/device[1]", + "property": "guest-stats-polling-interval", "value": 10 } } -{ "return": {} } + { "return": {} } -Get stats: +Get stats:: -{ "execute": "qom-get", - "arguments": { "path": "/machine/peripheral-anon/device[1]", - "property": "guest-stats" } } -{ + { "execute": "qom-get", + "arguments": { "path": "/machine/peripheral-anon/device[1]", + "property": "guest-stats" } } + { "return": { "stats": { "stat-swap-out": 0, @@ -98,12 +100,12 @@ Get stats: }, "last-update": 1358529861 } -} + } -Disable polling: +Disable polling:: -{ "execute": "qom-set", - "arguments": { "path": "/machine/peripheral-anon/device[1]", - "property": "stats-polling-interval", "value": 0 } } + { "execute": "qom-set", + "arguments": { "path": "/machine/peripheral-anon/device[1]", + "property": "stats-polling-interval", "value": 0 } } -{ "return": {} } + { "return": {} } diff --git a/docs/meson.build b/docs/meson.build index 300b134329..9136fed3b7 100644 --- a/docs/meson.build +++ b/docs/meson.build @@ -9,7 +9,7 @@ endif # Check if tools are available to build documentation. build_docs = false if sphinx_build.found() - SPHINX_ARGS = ['env', 'CONFDIR=' + qemu_confdir, sphinx_build] + SPHINX_ARGS = ['env', 'CONFDIR=' + qemu_confdir, sphinx_build, '-q'] # If we're making warnings fatal, apply this to Sphinx runs as well if get_option('werror') SPHINX_ARGS += [ '-W' ] @@ -18,12 +18,12 @@ if sphinx_build.found() # This is a bit awkward but works: create a trivial document and # try to run it with our configuration file (which enforces a # version requirement). This will fail if sphinx-build is too old. - run_command('mkdir', ['-p', tmpdir / 'sphinx']) - run_command('touch', [tmpdir / 'sphinx/index.rst']) + run_command('mkdir', ['-p', tmpdir / 'sphinx'], check: true) + run_command('touch', [tmpdir / 'sphinx/index.rst'], check: true) sphinx_build_test_out = run_command(SPHINX_ARGS + [ '-c', meson.current_source_dir(), '-b', 'html', tmpdir / 'sphinx', - tmpdir / 'sphinx/out']) + tmpdir / 'sphinx/out'], check: false) build_docs = (sphinx_build_test_out.returncode() == 0) if not build_docs @@ -35,18 +35,7 @@ if sphinx_build.found() endif if build_docs - SPHINX_ARGS += ['-Dversion=' + meson.project_version(), '-Drelease=' + config_host['PKGVERSION']] - - sphinx_extn_depends = [ meson.source_root() / 'docs/sphinx/depfile.py', - meson.source_root() / 'docs/sphinx/hxtool.py', - meson.source_root() / 'docs/sphinx/kerneldoc.py', - meson.source_root() / 'docs/sphinx/kernellog.py', - meson.source_root() / 'docs/sphinx/qapidoc.py', - meson.source_root() / 'docs/sphinx/qmp_lexer.py', - qapi_gen_depends ] - sphinx_template_files = [ meson.source_root() / 'docs/_templates/footer.html' ] - - have_ga = have_tools and config_host.has_key('CONFIG_GUEST_AGENT') + SPHINX_ARGS += ['-Dversion=' + meson.project_version(), '-Drelease=' + get_option('pkgversion')] man_pages = { 'qemu-ga.8': (have_ga ? 'man8' : ''), @@ -57,7 +46,7 @@ if build_docs 'qemu-nbd.8': (have_tools ? 'man8' : ''), 'qemu-pr-helper.8': (have_tools ? 'man8' : ''), 'qemu-storage-daemon.1': (have_tools ? 'man1' : ''), - 'qemu-trace-stap.1': (config_host.has_key('CONFIG_TRACE_SYSTEMTAP') ? 'man1' : ''), + 'qemu-trace-stap.1': (stap.found() ? 'man1' : ''), 'virtfs-proxy-helper.1': (have_virtfs_proxy_helper ? 'man1' : ''), 'virtiofsd.1': (have_virtiofsd ? 'man1' : ''), 'qemu.1': 'man1', @@ -77,7 +66,6 @@ if build_docs output: 'docs.stamp', input: files('conf.py'), depfile: 'docs.d', - depend_files: [ sphinx_extn_depends, sphinx_template_files ], command: [SPHINX_ARGS, '-Ddepfile=@DEPFILE@', '-Ddepfile_stamp=@OUTPUT0@', '-b', 'html', '-d', private_dir, diff --git a/docs/multiseat.txt b/docs/multiseat.txt index 11850c96ff..2b297e979d 100644 --- a/docs/multiseat.txt +++ b/docs/multiseat.txt @@ -123,7 +123,7 @@ Background info is here: guest side with pci-bridge-seat ------------------------------- -Qemu version 2.4 and newer has a new pci-bridge-seat device which +QEMU version 2.4 and newer has a new pci-bridge-seat device which can be used instead of pci-bridge. Just swap the device name in the qemu command line above. The only difference between the two devices is the pci id. We can match the pci id instead of the device path diff --git a/docs/nvdimm.txt b/docs/nvdimm.txt index 0aae682be3..fd7773dc5a 100644 --- a/docs/nvdimm.txt +++ b/docs/nvdimm.txt @@ -15,7 +15,7 @@ backend (i.e. memory-backend-file and memory-backend-ram). A simple way to create a vNVDIMM device at startup time is done via the following command line options: - -machine pc,nvdimm + -machine pc,nvdimm=on -m $RAM_SIZE,slots=$N,maxmem=$MAX_SIZE -object memory-backend-file,id=mem1,share=on,mem-path=$PATH,size=$NVDIMM_SIZE,readonly=off -device nvdimm,id=nvdimm1,memdev=mem1,unarmed=off diff --git a/docs/papr-pef.txt b/docs/papr-pef.txt deleted file mode 100644 index 72550e9bf8..0000000000 --- a/docs/papr-pef.txt +++ /dev/null @@ -1,30 +0,0 @@ -POWER (PAPR) Protected Execution Facility (PEF) -=============================================== - -Protected Execution Facility (PEF), also known as Secure Guest support -is a feature found on IBM POWER9 and POWER10 processors. - -If a suitable firmware including an Ultravisor is installed, it adds -an extra memory protection mode to the CPU. The ultravisor manages a -pool of secure memory which cannot be accessed by the hypervisor. - -When this feature is enabled in QEMU, a guest can use ultracalls to -enter "secure mode". This transfers most of its memory to secure -memory, where it cannot be eavesdropped by a compromised hypervisor. - -Launching ---------- - -To launch a guest which will be permitted to enter PEF secure mode: - -# ${QEMU} \ - -object pef-guest,id=pef0 \ - -machine confidential-guest-support=pef0 \ - ... - -Live Migration ----------------- - -Live migration is not yet implemented for PEF guests. For -consistency, we currently prevent migration if the PEF feature is -enabled, whether or not the guest has actually entered secure mode. diff --git a/docs/pcie_sriov.txt b/docs/pcie_sriov.txt new file mode 100644 index 0000000000..11158dbf88 --- /dev/null +++ b/docs/pcie_sriov.txt @@ -0,0 +1,115 @@ +PCI SR/IOV EMULATION SUPPORT +============================ + +Description +=========== +SR/IOV (Single Root I/O Virtualization) is an optional extended capability +of a PCI Express device. It allows a single physical function (PF) to appear as multiple +virtual functions (VFs) for the main purpose of eliminating software +overhead in I/O from virtual machines. + +QEMU now implements the basic common functionality to enable an emulated device +to support SR/IOV. Yet no fully implemented devices exists in QEMU, but a +proof-of-concept hack of the Intel igb can be found here: + +git://github.com/knuto/qemu.git sriov_patches_v5 + +Implementation +============== +Implementing emulation of an SR/IOV capable device typically consists of +implementing support for two types of device classes; the "normal" physical device +(PF) and the virtual device (VF). From QEMU's perspective, the VFs are just +like other devices, except that some of their properties are derived from +the PF. + +A virtual function is different from a physical function in that the BAR +space for all VFs are defined by the BAR registers in the PFs SR/IOV +capability. All VFs have the same BARs and BAR sizes. + +Accesses to these virtual BARs then is computed as + + + * + + +From our emulation perspective this means that there is a separate call for +setting up a BAR for a VF. + +1) To enable SR/IOV support in the PF, it must be a PCI Express device so + you would need to add a PCI Express capability in the normal PCI + capability list. You might also want to add an ARI (Alternative + Routing-ID Interpretation) capability to indicate that your device + supports functions beyond it's "own" function space (0-7), + which is necessary to support more than 7 functions, or + if functions extends beyond offset 7 because they are placed at an + offset > 1 or have stride > 1. + + ... + #include "hw/pci/pcie.h" + #include "hw/pci/pcie_sriov.h" + + pci_your_pf_dev_realize( ... ) + { + ... + int ret = pcie_endpoint_cap_init(d, 0x70); + ... + pcie_ari_init(d, 0x100, 1); + ... + + /* Add and initialize the SR/IOV capability */ + pcie_sriov_pf_init(d, 0x200, "your_virtual_dev", + vf_devid, initial_vfs, total_vfs, + fun_offset, stride); + + /* Set up individual VF BARs (parameters as for normal BARs) */ + pcie_sriov_pf_init_vf_bar( ... ) + ... + } + + For cleanup, you simply call: + + pcie_sriov_pf_exit(device); + + which will delete all the virtual functions and associated resources. + +2) Similarly in the implementation of the virtual function, you need to + make it a PCI Express device and add a similar set of capabilities + except for the SR/IOV capability. Then you need to set up the VF BARs as + subregions of the PFs SR/IOV VF BARs by calling + pcie_sriov_vf_register_bar() instead of the normal pci_register_bar() call: + + pci_your_vf_dev_realize( ... ) + { + ... + int ret = pcie_endpoint_cap_init(d, 0x60); + ... + pcie_ari_init(d, 0x100, 1); + ... + memory_region_init(mr, ... ) + pcie_sriov_vf_register_bar(d, bar_nr, mr); + ... + } + +Testing on Linux guest +====================== +The easiest is if your device driver supports sysfs based SR/IOV +enabling. Support for this was added in kernel v.3.8, so not all drivers +support it yet. + +To enable 4 VFs for a device at 01:00.0: + + modprobe yourdriver + echo 4 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs + +You should now see 4 VFs with lspci. +To turn SR/IOV off again - the standard requires you to turn it off before you can enable +another VF count, and the emulation enforces this: + + echo 0 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs + +Older drivers typically provide a max_vfs module parameter +to enable it at load time: + + modprobe yourdriver max_vfs=4 + +To disable the VFs again then, you simply have to unload the driver: + + rmmod yourdriver diff --git a/docs/qemu_logo.pdf b/docs/qemu_logo.pdf deleted file mode 100644 index 294cb7dec5..0000000000 Binary files a/docs/qemu_logo.pdf and /dev/null differ diff --git a/docs/replay.txt b/docs/replay.txt deleted file mode 100644 index 5b008ca491..0000000000 --- a/docs/replay.txt +++ /dev/null @@ -1,410 +0,0 @@ -Copyright (c) 2010-2015 Institute for System Programming - of the Russian Academy of Sciences. - -This work is licensed under the terms of the GNU GPL, version 2 or later. -See the COPYING file in the top-level directory. - -Record/replay -------------- - -Record/replay functions are used for the deterministic replay of qemu execution. -Execution recording writes a non-deterministic events log, which can be later -used for replaying the execution anywhere and for unlimited number of times. -It also supports checkpointing for faster rewind to the specific replay moment. -Execution replaying reads the log and replays all non-deterministic events -including external input, hardware clocks, and interrupts. - -Deterministic replay has the following features: - * Deterministically replays whole system execution and all contents of - the memory, state of the hardware devices, clocks, and screen of the VM. - * Writes execution log into the file for later replaying for multiple times - on different machines. - * Supports i386, x86_64, and Arm hardware platforms. - * Performs deterministic replay of all operations with keyboard and mouse - input devices. - -Usage of the record/replay: - * First, record the execution with the following command line: - qemu-system-i386 \ - -icount shift=7,rr=record,rrfile=replay.bin \ - -drive file=disk.qcow2,if=none,snapshot,id=img-direct \ - -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay \ - -device ide-hd,drive=img-blkreplay \ - -netdev user,id=net1 -device rtl8139,netdev=net1 \ - -object filter-replay,id=replay,netdev=net1 - * After recording, you can replay it by using another command line: - qemu-system-i386 \ - -icount shift=7,rr=replay,rrfile=replay.bin \ - -drive file=disk.qcow2,if=none,snapshot,id=img-direct \ - -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay \ - -device ide-hd,drive=img-blkreplay \ - -netdev user,id=net1 -device rtl8139,netdev=net1 \ - -object filter-replay,id=replay,netdev=net1 - The only difference with recording is changing the rr option - from record to replay. - * Block device images are not actually changed in the recording mode, - because all of the changes are written to the temporary overlay file. - This behavior is enabled by using blkreplay driver. It should be used - for every enabled block device, as described in 'Block devices' section. - * '-net none' option should be specified when network is not used, - because QEMU adds network card by default. When network is needed, - it should be configured explicitly with replay filter, as described - in 'Network devices' section. - * Interaction with audio devices and serial ports are recorded and replayed - automatically when such devices are enabled. - -Academic papers with description of deterministic replay implementation: -http://www.computer.org/csdl/proceedings/csmr/2012/4666/00/4666a553-abs.html -http://dl.acm.org/citation.cfm?id=2786805.2803179 - -Modifications of qemu include: - * wrappers for clock and time functions to save their return values in the log - * saving different asynchronous events (e.g. system shutdown) into the log - * synchronization of the bottom halves execution - * synchronization of the threads from thread pool - * recording/replaying user input (mouse, keyboard, and microphone) - * adding internal checkpoints for cpu and io synchronization - * network filter for recording and replaying the packets - * block driver for making block layer deterministic - * serial port input record and replay - * recording of random numbers obtained from the external sources - -Locking and thread synchronisation ----------------------------------- - -Previously the synchronisation of the main thread and the vCPU thread -was ensured by the holding of the BQL. However the trend has been to -reduce the time the BQL was held across the system including under TCG -system emulation. As it is important that batches of events are kept -in sequence (e.g. expiring timers and checkpoints in the main thread -while instruction checkpoints are written by the vCPU thread) we need -another lock to keep things in lock-step. This role is now handled by -the replay_mutex_lock. It used to be held only for each event being -written but now it is held for a whole execution period. This results -in a deterministic ping-pong between the two main threads. - -As the BQL is now a finer grained lock than the replay_lock it is almost -certainly a bug, and a source of deadlocks, to take the -replay_mutex_lock while the BQL is held. This is enforced by an assert. -While the unlocks are usually in the reverse order, this is not -necessary; you can drop the replay_lock while holding the BQL, without -doing a more complicated unlock_iothread/replay_unlock/lock_iothread -sequence. - -Non-deterministic events ------------------------- - -Our record/replay system is based on saving and replaying non-deterministic -events (e.g. keyboard input) and simulating deterministic ones (e.g. reading -from HDD or memory of the VM). Saving only non-deterministic events makes -log file smaller and simulation faster. - -The following non-deterministic data from peripheral devices is saved into -the log: mouse and keyboard input, network packets, audio controller input, -serial port input, and hardware clocks (they are non-deterministic -too, because their values are taken from the host machine). Inputs from -simulated hardware, memory of VM, software interrupts, and execution of -instructions are not saved into the log, because they are deterministic and -can be replayed by simulating the behavior of virtual machine starting from -initial state. - -We had to solve three tasks to implement deterministic replay: recording -non-deterministic events, replaying non-deterministic events, and checking -that there is no divergence between record and replay modes. - -We changed several parts of QEMU to make event log recording and replaying. -Devices' models that have non-deterministic input from external devices were -changed to write every external event into the execution log immediately. -E.g. network packets are written into the log when they arrive into the virtual -network adapter. - -All non-deterministic events are coming from these devices. But to -replay them we need to know at which moments they occur. We specify -these moments by counting the number of instructions executed between -every pair of consecutive events. - -Instruction counting --------------------- - -QEMU should work in icount mode to use record/replay feature. icount was -designed to allow deterministic execution in absence of external inputs -of the virtual machine. We also use icount to control the occurrence of the -non-deterministic events. The number of instructions elapsed from the last event -is written to the log while recording the execution. In replay mode we -can predict when to inject that event using the instruction counter. - -Timers ------- - -Timers are used to execute callbacks from different subsystems of QEMU -at the specified moments of time. There are several kinds of timers: - * Real time clock. Based on host time and used only for callbacks that - do not change the virtual machine state. For this reason real time - clock and timers does not affect deterministic replay at all. - * Virtual clock. These timers run only during the emulation. In icount - mode virtual clock value is calculated using executed instructions counter. - That is why it is completely deterministic and does not have to be recorded. - * Host clock. This clock is used by device models that simulate real time - sources (e.g. real time clock chip). Host clock is the one of the sources - of non-determinism. Host clock read operations should be logged to - make the execution deterministic. - * Virtual real time clock. This clock is similar to real time clock but - it is used only for increasing virtual clock while virtual machine is - sleeping. Due to its nature it is also non-deterministic as the host clock - and has to be logged too. - -Checkpoints ------------ - -Replaying of the execution of virtual machine is bound by sources of -non-determinism. These are inputs from clock and peripheral devices, -and QEMU thread scheduling. Thread scheduling affect on processing events -from timers, asynchronous input-output, and bottom halves. - -Invocations of timers are coupled with clock reads and changing the state -of the virtual machine. Reads produce non-deterministic data taken from -host clock. And VM state changes should preserve their order. Their relative -order in replay mode must replicate the order of callbacks in record mode. -To preserve this order we use checkpoints. When a specific clock is processed -in record mode we save to the log special "checkpoint" event. -Checkpoints here do not refer to virtual machine snapshots. They are just -record/replay events used for synchronization. - -QEMU in replay mode will try to invoke timers processing in random moment -of time. That's why we do not process a group of timers until the checkpoint -event will be read from the log. Such an event allows synchronizing CPU -execution and timer events. - -Two other checkpoints govern the "warping" of the virtual clock. -While the virtual machine is idle, the virtual clock increments at -1 ns per *real time* nanosecond. This is done by setting up a timer -(called the warp timer) on the virtual real time clock, so that the -timer fires at the next deadline of the virtual clock; the virtual clock -is then incremented (which is called "warping" the virtual clock) as -soon as the timer fires or the CPUs need to go out of the idle state. -Two functions are used for this purpose; because these actions change -virtual machine state and must be deterministic, each of them creates a -checkpoint. icount_start_warp_timer checks if the CPUs are idle and if so -starts accounting real time to virtual clock. icount_account_warp_timer -is called when the CPUs get an interrupt or when the warp timer fires, -and it warps the virtual clock by the amount of real time that has passed -since icount_start_warp_timer. - -Bottom halves -------------- - -Disk I/O events are completely deterministic in our model, because -in both record and replay modes we start virtual machine from the same -disk state. But callbacks that virtual disk controller uses for reading and -writing the disk may occur at different moments of time in record and replay -modes. - -Reading and writing requests are created by CPU thread of QEMU. Later these -requests proceed to block layer which creates "bottom halves". Bottom -halves consist of callback and its parameters. They are processed when -main loop locks the global mutex. These locks are not synchronized with -replaying process because main loop also processes the events that do not -affect the virtual machine state (like user interaction with monitor). - -That is why we had to implement saving and replaying bottom halves callbacks -synchronously to the CPU execution. When the callback is about to execute -it is added to the queue in the replay module. This queue is written to the -log when its callbacks are executed. In replay mode callbacks are not processed -until the corresponding event is read from the events log file. - -Sometimes the block layer uses asynchronous callbacks for its internal purposes -(like reading or writing VM snapshots or disk image cluster tables). In this -case bottom halves are not marked as "replayable" and do not saved -into the log. - -Block devices -------------- - -Block devices record/replay module intercepts calls of -bdrv coroutine functions at the top of block drivers stack. -To record and replay block operations the drive must be configured -as following: - -drive file=disk.qcow2,if=none,snapshot,id=img-direct - -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay - -device ide-hd,drive=img-blkreplay - -blkreplay driver should be inserted between disk image and virtual driver -controller. Therefore all disk requests may be recorded and replayed. - -All block completion operations are added to the queue in the coroutines. -Queue is flushed at checkpoints and information about processed requests -is recorded to the log. In replay phase the queue is matched with -events read from the log. Therefore block devices requests are processed -deterministically. - -Snapshotting ------------- - -New VM snapshots may be created in replay mode. They can be used later -to recover the desired VM state. All VM states created in replay mode -are associated with the moment of time in the replay scenario. -After recovering the VM state replay will start from that position. - -Default starting snapshot name may be specified with icount field -rrsnapshot as follows: - -icount shift=7,rr=record,rrfile=replay.bin,rrsnapshot=snapshot_name - -This snapshot is created at start of recording and restored at start -of replaying. It also can be loaded while replaying to roll back -the execution. - -'snapshot' flag of the disk image must be removed to save the snapshots -in the overlay (or original image) instead of using the temporary overlay. - -drive file=disk.ovl,if=none,id=img-direct - -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay - -device ide-hd,drive=img-blkreplay - -Use QEMU monitor to create additional snapshots. 'savevm ' command -created the snapshot and 'loadvm ' restores it. To prevent corruption -of the original disk image, use overlay files linked to the original images. -Therefore all new snapshots (including the starting one) will be saved in -overlays and the original image remains unchanged. - -When you need to use snapshots with diskless virtual machine, -it must be started with 'orphan' qcow2 image. This image will be used -for storing VM snapshots. Here is the example of the command line for this: - - qemu-system-i386 -icount shift=3,rr=replay,rrfile=record.bin,rrsnapshot=init \ - -net none -drive file=empty.qcow2,if=none,id=rr - -empty.qcow2 drive does not connected to any virtual block device and used -for VM snapshots only. - -Network devices ---------------- - -Record and replay for network interactions is performed with the network filter. -Each backend must have its own instance of the replay filter as follows: - -netdev user,id=net1 -device rtl8139,netdev=net1 - -object filter-replay,id=replay,netdev=net1 - -Replay network filter is used to record and replay network packets. While -recording the virtual machine this filter puts all packets coming from -the outer world into the log. In replay mode packets from the log are -injected into the network device. All interactions with network backend -in replay mode are disabled. - -Audio devices -------------- - -Audio data is recorded and replay automatically. The command line for recording -and replaying must contain identical specifications of audio hardware, e.g.: - -soundhw ac97 - -Serial ports ------------- - -Serial ports input is recorded and replay automatically. The command lines -for recording and replaying must contain identical number of ports in record -and replay modes, but their backends may differ. -E.g., '-serial stdio' in record mode, and '-serial null' in replay mode. - -Reverse debugging ------------------ - -Reverse debugging allows "executing" the program in reverse direction. -GDB remote protocol supports "reverse step" and "reverse continue" -commands. The first one steps single instruction backwards in time, -and the second one finds the last breakpoint in the past. - -Recorded executions may be used to enable reverse debugging. QEMU can't -execute the code in backwards direction, but can load a snapshot and -replay forward to find the desired position or breakpoint. - -The following GDB commands are supported: - - reverse-stepi (or rsi) - step one instruction backwards - - reverse-continue (or rc) - find last breakpoint in the past - -Reverse step loads the nearest snapshot and replays the execution until -the required instruction is met. - -Reverse continue may include several passes of examining the execution -between the snapshots. Each of the passes include the following steps: - 1. loading the snapshot - 2. replaying to examine the breakpoints - 3. if breakpoint or watchpoint was met - - loading the snapshot again - - replaying to the required breakpoint - 4. else - - proceeding to the p.1 with the earlier snapshot - -Therefore usage of the reverse debugging requires at least one snapshot -created in advance. This can be done by omitting 'snapshot' option -for the block drives and adding 'rrsnapshot' for both record and replay -command lines. -See the "Snapshotting" section to learn more about running record/replay -and creating the snapshot in these modes. - -Replay log format ------------------ - -Record/replay log consists of the header and the sequence of execution -events. The header includes 4-byte replay version id and 8-byte reserved -field. Version is updated every time replay log format changes to prevent -using replay log created by another build of qemu. - -The sequence of the events describes virtual machine state changes. -It includes all non-deterministic inputs of VM, synchronization marks and -instruction counts used to correctly inject inputs at replay. - -Synchronization marks (checkpoints) are used for synchronizing qemu threads -that perform operations with virtual hardware. These operations may change -system's state (e.g., change some register or generate interrupt) and -therefore should execute synchronously with CPU thread. - -Every event in the log includes 1-byte event id and optional arguments. -When argument is an array, it is stored as 4-byte array length -and corresponding number of bytes with data. -Here is the list of events that are written into the log: - - - EVENT_INSTRUCTION. Instructions executed since last event. - Argument: 4-byte number of executed instructions. - - EVENT_INTERRUPT. Used to synchronize interrupt processing. - - EVENT_EXCEPTION. Used to synchronize exception handling. - - EVENT_ASYNC. This is a group of events. They are always processed - together with checkpoints. When such an event is generated, it is - stored in the queue and processed only when checkpoint occurs. - Every such event is followed by 1-byte checkpoint id and 1-byte - async event id from the following list: - - REPLAY_ASYNC_EVENT_BH. Bottom-half callback. This event synchronizes - callbacks that affect virtual machine state, but normally called - asynchronously. - Argument: 8-byte operation id. - - REPLAY_ASYNC_EVENT_INPUT. Input device event. Contains - parameters of keyboard and mouse input operations - (key press/release, mouse pointer movement). - Arguments: 9-16 bytes depending of input event. - - REPLAY_ASYNC_EVENT_INPUT_SYNC. Internal input synchronization event. - - REPLAY_ASYNC_EVENT_CHAR_READ. Character (e.g., serial port) device input - initiated by the sender. - Arguments: 1-byte character device id. - Array with bytes were read. - - REPLAY_ASYNC_EVENT_BLOCK. Block device operation. Used to synchronize - operations with disk and flash drives with CPU. - Argument: 8-byte operation id. - - REPLAY_ASYNC_EVENT_NET. Incoming network packet. - Arguments: 1-byte network adapter id. - 4-byte packet flags. - Array with packet bytes. - - EVENT_SHUTDOWN. Occurs when user sends shutdown event to qemu, - e.g., by closing the window. - - EVENT_CHAR_WRITE. Used to synchronize character output operations. - Arguments: 4-byte output function return value. - 4-byte offset in the output array. - - EVENT_CHAR_READ_ALL. Used to synchronize character input operations, - initiated by qemu. - Argument: Array with bytes that were read. - - EVENT_CHAR_READ_ALL_ERROR. Unsuccessful character input operation, - initiated by qemu. - Argument: 4-byte error code. - - EVENT_CLOCK + clock_id. Group of events for host clock read operations. - Argument: 8-byte clock value. - - EVENT_CHECKPOINT + checkpoint_id. Checkpoint for synchronization of - CPU, internal threads, and asynchronous input events. May be followed - by one or more EVENT_ASYNC events. - - EVENT_END. Last event in the log. diff --git a/docs/specs/acpi_cpu_hotplug.rst b/docs/specs/acpi_cpu_hotplug.rst new file mode 100644 index 0000000000..351057c967 --- /dev/null +++ b/docs/specs/acpi_cpu_hotplug.rst @@ -0,0 +1,235 @@ +QEMU<->ACPI BIOS CPU hotplug interface +====================================== + +QEMU supports CPU hotplug via ACPI. This document +describes the interface between QEMU and the ACPI BIOS. + +ACPI BIOS GPE.2 handler is dedicated for notifying OS about CPU hot-add +and hot-remove events. + + +Legacy ACPI CPU hotplug interface registers +------------------------------------------- + +CPU present bitmap for: + +- ICH9-LPC (IO port 0x0cd8-0xcf7, 1-byte access) +- PIIX-PM (IO port 0xaf00-0xaf1f, 1-byte access) +- One bit per CPU. Bit position reflects corresponding CPU APIC ID. Read-only. +- The first DWORD in bitmap is used in write mode to switch from legacy + to modern CPU hotplug interface, write 0 into it to do switch. + +QEMU sets corresponding CPU bit on hot-add event and issues SCI +with GPE.2 event set. CPU present map is read by ACPI BIOS GPE.2 handler +to notify OS about CPU hot-add events. CPU hot-remove isn't supported. + + +Modern ACPI CPU hotplug interface registers +------------------------------------------- + +Register block base address: + +- ICH9-LPC IO port 0x0cd8 +- PIIX-PM IO port 0xaf00 + +Register block size: + +- ACPI_CPU_HOTPLUG_REG_LEN = 12 + +All accesses to registers described below, imply little-endian byte order. + +Reserved registers behavior: + +- write accesses are ignored +- read accesses return all bits set to 0. + +The last stored value in 'CPU selector' must refer to a possible CPU, otherwise + +- reads from any register return 0 +- writes to any other register are ignored until valid value is stored into it + +On QEMU start, 'CPU selector' is initialized to a valid value, on reset it +keeps the current value. + +Read access behavior +^^^^^^^^^^^^^^^^^^^^ + +offset [0x0-0x3] + Command data 2: (DWORD access) + + If value last stored in 'Command field' is: + + 0: + reads as 0x0 + 3: + upper 32 bits of architecture specific CPU ID value + other values: + reserved + +offset [0x4] + CPU device status fields: (1 byte access) + + bits: + + 0: + Device is enabled and may be used by guest + 1: + Device insert event, used to distinguish device for which + no device check event to OSPM was issued. + It's valid only when bit 0 is set. + 2: + Device remove event, used to distinguish device for which + no device eject request to OSPM was issued. Firmware must + ignore this bit. + 3: + reserved and should be ignored by OSPM + 4: + if set to 1, OSPM requests firmware to perform device eject. + 5-7: + reserved and should be ignored by OSPM + +offset [0x5-0x7] + reserved + +offset [0x8] + Command data: (DWORD access) + + If value last stored in 'Command field' is one of: + + 0: + contains 'CPU selector' value of a CPU with pending event[s] + 3: + lower 32 bits of architecture specific CPU ID value + (in x86 case: APIC ID) + otherwise: + contains 0 + +Write access behavior +^^^^^^^^^^^^^^^^^^^^^ + +offset [0x0-0x3] + CPU selector: (DWORD access) + + Selects active CPU device. All following accesses to other + registers will read/store data from/to selected CPU. + Valid values: [0 .. max_cpus) + +offset [0x4] + CPU device control fields: (1 byte access) + + bits: + + 0: + reserved, OSPM must clear it before writing to register. + 1: + if set to 1 clears device insert event, set by OSPM + after it has emitted device check event for the + selected CPU device + 2: + if set to 1 clears device remove event, set by OSPM + after it has emitted device eject request for the + selected CPU device. + 3: + if set to 1 initiates device eject, set by OSPM when it + triggers CPU device removal and calls _EJ0 method or by firmware + when bit #4 is set. In case bit #4 were set, it's cleared as + part of device eject. + 4: + if set to 1, OSPM hands over device eject to firmware. + Firmware shall issue device eject request as described above + (bit #3) and OSPM should not touch device eject bit (#3) in case + it's asked firmware to perform CPU device eject. + 5-7: + reserved, OSPM must clear them before writing to register + +offset[0x5] + Command field: (1 byte access) + + value: + + 0: + selects a CPU device with inserting/removing events and + following reads from 'Command data' register return + selected CPU ('CPU selector' value). + If no CPU with events found, the current 'CPU selector' doesn't + change and corresponding insert/remove event flags are not modified. + + 1: + following writes to 'Command data' register set OST event + register in QEMU + 2: + following writes to 'Command data' register set OST status + register in QEMU + 3: + following reads from 'Command data' and 'Command data 2' return + architecture specific CPU ID value for currently selected CPU. + other values: + reserved + +offset [0x6-0x7] + reserved + +offset [0x8] + Command data: (DWORD access) + + If last stored 'Command field' value is: + + 1: + stores value into OST event register + 2: + stores value into OST status register, triggers + ACPI_DEVICE_OST QMP event from QEMU to external applications + with current values of OST event and status registers. + other values: + reserved + +Typical usecases +---------------- + +(x86) Detecting and enabling modern CPU hotplug interface +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +QEMU starts with legacy CPU hotplug interface enabled. Detecting and +switching to modern interface is based on the 2 legacy CPU hotplug features: + +#. Writes into CPU bitmap are ignored. +#. CPU bitmap always has bit #0 set, corresponding to boot CPU. + +Use following steps to detect and enable modern CPU hotplug interface: + +#. Store 0x0 to the 'CPU selector' register, attempting to switch to modern mode +#. Store 0x0 to the 'CPU selector' register, to ensure valid selector value +#. Store 0x0 to the 'Command field' register +#. Read the 'Command data 2' register. + If read value is 0x0, the modern interface is enabled. + Otherwise legacy or no CPU hotplug interface available + +Get a cpu with pending event +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Store 0x0 to the 'CPU selector' register. +#. Store 0x0 to the 'Command field' register. +#. Read the 'CPU device status fields' register. +#. If both bit #1 and bit #2 are clear in the value read, there is no CPU + with a pending event and selected CPU remains unchanged. +#. Otherwise, read the 'Command data' register. The value read is the + selector of the CPU with the pending event (which is already selected). + +Enumerate CPUs present/non present CPUs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Set the present CPU count to 0. +#. Set the iterator to 0. +#. Store 0x0 to the 'CPU selector' register, to ensure that it's in + a valid state and that access to other registers won't be ignored. +#. Store 0x0 to the 'Command field' register to make 'Command data' + register return 'CPU selector' value of selected CPU +#. Read the 'CPU device status fields' register. +#. If bit #0 is set, increment the present CPU count. +#. Increment the iterator. +#. Store the iterator to the 'CPU selector' register. +#. Read the 'Command data' register. +#. If the value read is not zero, goto 05. +#. Otherwise store 0x0 to the 'CPU selector' register, to put it + into a valid state and exit. + The iterator at this point equals "max_cpus". diff --git a/docs/specs/acpi_cpu_hotplug.txt b/docs/specs/acpi_cpu_hotplug.txt deleted file mode 100644 index 9bd59ae0da..0000000000 --- a/docs/specs/acpi_cpu_hotplug.txt +++ /dev/null @@ -1,160 +0,0 @@ -QEMU<->ACPI BIOS CPU hotplug interface --------------------------------------- - -QEMU supports CPU hotplug via ACPI. This document -describes the interface between QEMU and the ACPI BIOS. - -ACPI BIOS GPE.2 handler is dedicated for notifying OS about CPU hot-add -and hot-remove events. - -============================================ -Legacy ACPI CPU hotplug interface registers: --------------------------------------------- -CPU present bitmap for: - ICH9-LPC (IO port 0x0cd8-0xcf7, 1-byte access) - PIIX-PM (IO port 0xaf00-0xaf1f, 1-byte access) - One bit per CPU. Bit position reflects corresponding CPU APIC ID. Read-only. - The first DWORD in bitmap is used in write mode to switch from legacy - to modern CPU hotplug interface, write 0 into it to do switch. ---------------------------------------------------------------- -QEMU sets corresponding CPU bit on hot-add event and issues SCI -with GPE.2 event set. CPU present map is read by ACPI BIOS GPE.2 handler -to notify OS about CPU hot-add events. CPU hot-remove isn't supported. - -===================================== -Modern ACPI CPU hotplug interface registers: -------------------------------------- -Register block base address: - ICH9-LPC IO port 0x0cd8 - PIIX-PM IO port 0xaf00 -Register block size: - ACPI_CPU_HOTPLUG_REG_LEN = 12 - -All accesses to registers described below, imply little-endian byte order. - -Reserved resisters behavior: - - write accesses are ignored - - read accesses return all bits set to 0. - -The last stored value in 'CPU selector' must refer to a possible CPU, otherwise - - reads from any register return 0 - - writes to any other register are ignored until valid value is stored into it -On QEMU start, 'CPU selector' is initialized to a valid value, on reset it -keeps the current value. - -read access: - offset: - [0x0-0x3] Command data 2: (DWORD access) - if value last stored in 'Command field': - 0: reads as 0x0 - 3: upper 32 bits of architecture specific CPU ID value - other values: reserved - [0x4] CPU device status fields: (1 byte access) - bits: - 0: Device is enabled and may be used by guest - 1: Device insert event, used to distinguish device for which - no device check event to OSPM was issued. - It's valid only when bit 0 is set. - 2: Device remove event, used to distinguish device for which - no device eject request to OSPM was issued. Firmware must - ignore this bit. - 3: reserved and should be ignored by OSPM - 4: if set to 1, OSPM requests firmware to perform device eject. - 5-7: reserved and should be ignored by OSPM - [0x5-0x7] reserved - [0x8] Command data: (DWORD access) - contains 0 unless value last stored in 'Command field' is one of: - 0: contains 'CPU selector' value of a CPU with pending event[s] - 3: lower 32 bits of architecture specific CPU ID value - (in x86 case: APIC ID) - -write access: - offset: - [0x0-0x3] CPU selector: (DWORD access) - selects active CPU device. All following accesses to other - registers will read/store data from/to selected CPU. - Valid values: [0 .. max_cpus) - [0x4] CPU device control fields: (1 byte access) - bits: - 0: reserved, OSPM must clear it before writing to register. - 1: if set to 1 clears device insert event, set by OSPM - after it has emitted device check event for the - selected CPU device - 2: if set to 1 clears device remove event, set by OSPM - after it has emitted device eject request for the - selected CPU device. - 3: if set to 1 initiates device eject, set by OSPM when it - triggers CPU device removal and calls _EJ0 method or by firmware - when bit #4 is set. In case bit #4 were set, it's cleared as - part of device eject. - 4: if set to 1, OSPM hands over device eject to firmware. - Firmware shall issue device eject request as described above - (bit #3) and OSPM should not touch device eject bit (#3) in case - it's asked firmware to perform CPU device eject. - 5-7: reserved, OSPM must clear them before writing to register - [0x5] Command field: (1 byte access) - value: - 0: selects a CPU device with inserting/removing events and - following reads from 'Command data' register return - selected CPU ('CPU selector' value). - If no CPU with events found, the current 'CPU selector' doesn't - change and corresponding insert/remove event flags are not modified. - 1: following writes to 'Command data' register set OST event - register in QEMU - 2: following writes to 'Command data' register set OST status - register in QEMU - 3: following reads from 'Command data' and 'Command data 2' return - architecture specific CPU ID value for currently selected CPU. - other values: reserved - [0x6-0x7] reserved - [0x8] Command data: (DWORD access) - if last stored 'Command field' value: - 1: stores value into OST event register - 2: stores value into OST status register, triggers - ACPI_DEVICE_OST QMP event from QEMU to external applications - with current values of OST event and status registers. - other values: reserved - -Typical usecases: - - (x86) Detecting and enabling modern CPU hotplug interface. - QEMU starts with legacy CPU hotplug interface enabled. Detecting and - switching to modern interface is based on the 2 legacy CPU hotplug features: - 1. Writes into CPU bitmap are ignored. - 2. CPU bitmap always has bit#0 set, corresponding to boot CPU. - - Use following steps to detect and enable modern CPU hotplug interface: - 1. Store 0x0 to the 'CPU selector' register, - attempting to switch to modern mode - 2. Store 0x0 to the 'CPU selector' register, - to ensure valid selector value - 3. Store 0x0 to the 'Command field' register, - 4. Read the 'Command data 2' register. - If read value is 0x0, the modern interface is enabled. - Otherwise legacy or no CPU hotplug interface available - - - Get a cpu with pending event - 1. Store 0x0 to the 'CPU selector' register. - 2. Store 0x0 to the 'Command field' register. - 3. Read the 'CPU device status fields' register. - 4. If both bit#1 and bit#2 are clear in the value read, there is no CPU - with a pending event and selected CPU remains unchanged. - 5. Otherwise, read the 'Command data' register. The value read is the - selector of the CPU with the pending event (which is already - selected). - - - Enumerate CPUs present/non present CPUs - 01. Set the present CPU count to 0. - 02. Set the iterator to 0. - 03. Store 0x0 to the 'CPU selector' register, to ensure that it's in - a valid state and that access to other registers won't be ignored. - 04. Store 0x0 to the 'Command field' register to make 'Command data' - register return 'CPU selector' value of selected CPU - 05. Read the 'CPU device status fields' register. - 06. If bit#0 is set, increment the present CPU count. - 07. Increment the iterator. - 08. Store the iterator to the 'CPU selector' register. - 09. Read the 'Command data' register. - 10. If the value read is not zero, goto 05. - 11. Otherwise store 0x0 to the 'CPU selector' register, to put it - into a valid state and exit. - The iterator at this point equals "max_cpus". diff --git a/docs/specs/acpi_erst.rst b/docs/specs/acpi_erst.rst new file mode 100644 index 0000000000..2339b60ad7 --- /dev/null +++ b/docs/specs/acpi_erst.rst @@ -0,0 +1,200 @@ +ACPI ERST DEVICE +================ + +The ACPI ERST device is utilized to support the ACPI Error Record +Serialization Table, ERST, functionality. This feature is designed for +storing error records in persistent storage for future reference +and/or debugging. + +The ACPI specification[1], in Chapter "ACPI Platform Error Interfaces +(APEI)", and specifically subsection "Error Serialization", outlines a +method for storing error records into persistent storage. + +The format of error records is described in the UEFI specification[2], +in Appendix N "Common Platform Error Record". + +While the ACPI specification allows for an NVRAM "mode" (see +GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES) where non-volatile RAM is +directly exposed for direct access by the OS/guest, this device +implements the non-NVRAM "mode". This non-NVRAM "mode" is what is +implemented by most BIOS (since flash memory requires programming +operations in order to update its contents). Furthermore, as of the +time of this writing, Linux only supports the non-NVRAM "mode". + + +Background/Motivation +--------------------- + +Linux uses the persistent storage filesystem, pstore, to record +information (eg. dmesg tail) upon panics and shutdowns. Pstore is +independent of, and runs before, kdump. In certain scenarios (ie. +hosts/guests with root filesystems on NFS/iSCSI where networking +software and/or hardware fails, and thus kdump fails), pstore may +contain information available for post-mortem debugging. + +Two common storage backends for the pstore filesystem are ACPI ERST +and UEFI. Most BIOS implement ACPI ERST. UEFI is not utilized in all +guests. With QEMU supporting ACPI ERST, it becomes a viable pstore +storage backend for virtual machines (as it is now for bare metal +machines). + +Enabling support for ACPI ERST facilitates a consistent method to +capture kernel panic information in a wide range of guests: from +resource-constrained microvms to very large guests, and in particular, +in direct-boot environments (which would lack UEFI run-time services). + +Note that Microsoft Windows also utilizes the ACPI ERST for certain +crash information, if available[3]. + + +Configuration|Usage +------------------- + +To use ACPI ERST, a memory-backend-file object and acpi-erst device +can be created, for example: + + qemu ... + -object memory-backend-file,id=erstnvram,mem-path=acpi-erst.backing,size=0x10000,share=on \ + -device acpi-erst,memdev=erstnvram + +For proper operation, the ACPI ERST device needs a memory-backend-file +object with the following parameters: + + - id: The id of the memory-backend-file object is used to associate + this memory with the acpi-erst device. + - size: The size of the ACPI ERST backing storage. This parameter is + required. + - mem-path: The location of the ACPI ERST backing storage file. This + parameter is also required. + - share: The share=on parameter is required so that updates to the + ERST backing store are written to the file. + +and ERST device: + + - memdev: Is the object id of the memory-backend-file. + - record_size: Specifies the size of the records (or slots) in the + backend storage. Must be a power of two value greater than or + equal to 4096 (PAGE_SIZE). + + +PCI Interface +------------- + +The ERST device is a PCI device with two BARs, one for accessing the +programming registers, and the other for accessing the record exchange +buffer. + +BAR0 contains the programming interface consisting of ACTION and VALUE +64-bit registers. All ERST actions/operations/side effects happen on +the write to the ACTION, by design. Any data needed by the action must +be placed into VALUE prior to writing ACTION. Reading the VALUE +simply returns the register contents, which can be updated by a +previous ACTION. + +BAR1 contains the 8KiB record exchange buffer, which is the +implemented maximum record size. + + +Backend Storage Format +---------------------- + +The backend storage is divided into fixed size "slots", 8KiB in +length, with each slot storing a single record. Not all slots need to +be occupied, and they need not be occupied in a contiguous fashion. +The ability to clear/erase specific records allows for the formation +of unoccupied slots. + +Slot 0 contains a backend storage header that identifies the contents +as ERST and also facilitates efficient access to the records. +Depending upon the size of the backend storage, additional slots will +be designated to be a part of the slot 0 header. For example, at 8KiB, +the slot 0 header can accommodate 1021 records. Thus a storage size +of 8MiB (8KiB * 1024) requires an additional slot for use by the +header. In this scenario, slot 0 and slot 1 form the backend storage +header, and records can be stored starting at slot 2. + +Below is an example layout of the backend storage format (for storage +size less than 8MiB). The size of the storage is a multiple of 8KiB, +and contains N number of slots to store records. The example below +shows two records (in CPER format) in the backend storage, while the +remaining slots are empty/available. + +:: + + Slot Record + <------------------ 8KiB --------------------> + +--------------------------------------------+ + 0 | storage header | + +--------------------------------------------+ + 1 | empty/available | + +--------------------------------------------+ + 2 | CPER | + +--------------------------------------------+ + 3 | CPER | + +--------------------------------------------+ + ... | | + +--------------------------------------------+ + N | empty/available | + +--------------------------------------------+ + +The storage header consists of some basic information and an array +of CPER record_id's to efficiently access records in the backend +storage. + +All fields in the header are stored in little endian format. + +:: + + +--------------------------------------------+ + | magic | 0x0000 + +--------------------------------------------+ + | record_offset | record_size | 0x0008 + +--------------------------------------------+ + | record_count | reserved | version | 0x0010 + +--------------------------------------------+ + | record_id[0] | 0x0018 + +--------------------------------------------+ + | record_id[1] | 0x0020 + +--------------------------------------------+ + | record_id[...] | + +--------------------------------------------+ + | record_id[N] | 0x1FF8 + +--------------------------------------------+ + +The 'magic' field contains the value 0x524F545354535245. + +The 'record_size' field contains the value 0x2000, 8KiB. + +The 'record_offset' field points to the first record_id in the array, +0x0018. + +The 'version' field contains 0x0100, the first version. + +The 'record_count' field contains the number of valid records in the +backend storage. + +The 'record_id' array fields are the 64-bit record identifiers of the +CPER record in the corresponding slot. Stated differently, the +location of a CPER record_id in the record_id[] array provides the +slot index for the corresponding record in the backend storage. + +Note that, for example, with a backend storage less than 8MiB, slot 0 +contains the header, so the record_id[0] will never contain a valid +CPER record_id. Instead slot 1 is the first available slot and thus +record_id_[1] may contain a CPER. + +A 'record_id' of all 0s or all 1s indicates an invalid record (ie. the +slot is available). + + +References +---------- + +[1] "Advanced Configuration and Power Interface Specification", + version 4.0, June 2009. + +[2] "Unified Extensible Firmware Interface Specification", + version 2.1, October 2008. + +[3] "Windows Hardware Error Architecture", specifically + "Error Record Persistence Mechanism". diff --git a/docs/specs/acpi_mem_hotplug.rst b/docs/specs/acpi_mem_hotplug.rst new file mode 100644 index 0000000000..069819bc3e --- /dev/null +++ b/docs/specs/acpi_mem_hotplug.rst @@ -0,0 +1,128 @@ +QEMU<->ACPI BIOS memory hotplug interface +========================================= + +ACPI BIOS GPE.3 handler is dedicated for notifying OS about memory hot-add +and hot-remove events. + +Memory hot-plug interface (IO port 0xa00-0xa17, 1-4 byte access) +---------------------------------------------------------------- + +Read access behavior +^^^^^^^^^^^^^^^^^^^^ + +[0x0-0x3] + Lo part of memory device phys address +[0x4-0x7] + Hi part of memory device phys address +[0x8-0xb] + Lo part of memory device size in bytes +[0xc-0xf] + Hi part of memory device size in bytes +[0x10-0x13] + Memory device proximity domain +[0x14] + Memory device status fields + + bits: + + 0: + Device is enabled and may be used by guest + 1: + Device insert event, used to distinguish device for which + no device check event to OSPM was issued. + It's valid only when bit 1 is set. + 2: + Device remove event, used to distinguish device for which + no device eject request to OSPM was issued. + 3-7: + reserved and should be ignored by OSPM + +[0x15-0x17] + reserved + +Write access behavior +^^^^^^^^^^^^^^^^^^^^^ + + +[0x0-0x3] + Memory device slot selector, selects active memory device. + All following accesses to other registers in 0xa00-0xa17 + region will read/store data from/to selected memory device. +[0x4-0x7] + OST event code reported by OSPM +[0x8-0xb] + OST status code reported by OSPM +[0xc-0x13] + reserved, writes into it are ignored +[0x14] + Memory device control fields + + bits: + + 0: + reserved, OSPM must clear it before writing to register. + Due to BUG in versions prior 2.4 that field isn't cleared + when other fields are written. Keep it reserved and don't + try to reuse it. + 1: + if set to 1 clears device insert event, set by OSPM + after it has emitted device check event for the + selected memory device + 2: + if set to 1 clears device remove event, set by OSPM + after it has emitted device eject request for the + selected memory device + 3: + if set to 1 initiates device eject, set by OSPM when it + triggers memory device removal and calls _EJ0 method + 4-7: + reserved, OSPM must clear them before writing to register + +Selecting memory device slot beyond present range has no effect on platform: + +- write accesses to memory hot-plug registers not documented above are ignored +- read accesses to memory hot-plug registers not documented above return + all bits set to 1. + +Memory hot remove process diagram +--------------------------------- + +:: + + +-------------+ +-----------------------+ +------------------+ + | 1. QEMU | | 2. QEMU | |3. QEMU | + | device_del +---->+ device unplug request +----->+Send SCI to guest,| + | | | cb | |return control to | + | | | | |management | + +-------------+ +-----------------------+ +------------------+ + + +---------------------------------------------------------------------+ + + +---------------------+ +-------------------------+ + | OSPM: | remove event | OSPM: | + | send Eject Request, | | Scan memory devices | + | clear remove event +<-------------+ for event flags | + | | | | + +---------------------+ +-------------------------+ + | + | + +---------v--------+ +-----------------------+ + | Guest OS: | success | OSPM: | + | process Ejection +----------->+ Execute _EJ0 method, | + | request | | set eject bit in flags| + +------------------+ +-----------------------+ + |failure | + v v + +------------------------+ +-----------------------+ + | OSPM: | | QEMU: | + | set OST event & status | | call device unplug cb | + | fields | | | + +------------------------+ +-----------------------+ + | | + v v + +------------------+ +-------------------+ + |QEMU: | |QEMU: | + |Send OST QMP event| |Send device deleted| + | | |QMP event | + +------------------+ | | + +-------------------+ diff --git a/docs/specs/acpi_mem_hotplug.txt b/docs/specs/acpi_mem_hotplug.txt deleted file mode 100644 index 3df3620ce4..0000000000 --- a/docs/specs/acpi_mem_hotplug.txt +++ /dev/null @@ -1,94 +0,0 @@ -QEMU<->ACPI BIOS memory hotplug interface --------------------------------------- - -ACPI BIOS GPE.3 handler is dedicated for notifying OS about memory hot-add -and hot-remove events. - -Memory hot-plug interface (IO port 0xa00-0xa17, 1-4 byte access): ---------------------------------------------------------------- -0xa00: - read access: - [0x0-0x3] Lo part of memory device phys address - [0x4-0x7] Hi part of memory device phys address - [0x8-0xb] Lo part of memory device size in bytes - [0xc-0xf] Hi part of memory device size in bytes - [0x10-0x13] Memory device proximity domain - [0x14] Memory device status fields - bits: - 0: Device is enabled and may be used by guest - 1: Device insert event, used to distinguish device for which - no device check event to OSPM was issued. - It's valid only when bit 1 is set. - 2: Device remove event, used to distinguish device for which - no device eject request to OSPM was issued. - 3-7: reserved and should be ignored by OSPM - [0x15-0x17] reserved - - write access: - [0x0-0x3] Memory device slot selector, selects active memory device. - All following accesses to other registers in 0xa00-0xa17 - region will read/store data from/to selected memory device. - [0x4-0x7] OST event code reported by OSPM - [0x8-0xb] OST status code reported by OSPM - [0xc-0x13] reserved, writes into it are ignored - [0x14] Memory device control fields - bits: - 0: reserved, OSPM must clear it before writing to register. - Due to BUG in versions prior 2.4 that field isn't cleared - when other fields are written. Keep it reserved and don't - try to reuse it. - 1: if set to 1 clears device insert event, set by OSPM - after it has emitted device check event for the - selected memory device - 2: if set to 1 clears device remove event, set by OSPM - after it has emitted device eject request for the - selected memory device - 3: if set to 1 initiates device eject, set by OSPM when it - triggers memory device removal and calls _EJ0 method - 4-7: reserved, OSPM must clear them before writing to register - -Selecting memory device slot beyond present range has no effect on platform: - - write accesses to memory hot-plug registers not documented above are - ignored - - read accesses to memory hot-plug registers not documented above return - all bits set to 1. - -Memory hot remove process diagram: ----------------------------------- - +-------------+     +-----------------------+      +------------------+      - |  1. QEMU    |     | 2. QEMU               |      |3. QEMU           |      - |  device_del +---->+ device unplug request +----->+Send SCI to guest,|      - |             |     |         cb            |      |return control to |      - +-------------+     +-----------------------+      |management        |      -                                                    +------------------+      -                                                                              - +---------------------------------------------------------------------+      -                                                                              - +---------------------+              +-------------------------+             - | OSPM:               | remove event | OSPM:                   |             - | send Eject Request, |              | Scan memory devices     |             - | clear remove event  +<-------------+ for event flags         |             - |                     |              |                         |             - +---------------------+              +-------------------------+             -           |                                                                  -           |                                                                  - +---------v--------+            +-----------------------+                    - | Guest OS:        |  success   | OSPM:                 |                    - | process Ejection +----------->+ Execute _EJ0 method,  |                    - | request          |            | set eject bit in flags|                    - +------------------+            +-----------------------+                    -           |failure                         |                                 -           v                                v                                 - +------------------------+      +-----------------------+                    - | OSPM:                  |      | QEMU:                 |                    - | set OST event & status |      | call device unplug cb |                    - | fields                 |      |                       |                    - +------------------------+      +-----------------------+                    -          |                                  |                                -          v                                  v                                - +------------------+              +-------------------+                      - |QEMU:             |              |QEMU:              |                      - |Send OST QMP event|              |Send device deleted|                      - |                  |              |QMP event          |                      - +------------------+              |                   |                      -                                   +-------------------+ diff --git a/docs/specs/acpi_nvdimm.rst b/docs/specs/acpi_nvdimm.rst new file mode 100644 index 0000000000..ab0335253d --- /dev/null +++ b/docs/specs/acpi_nvdimm.rst @@ -0,0 +1,228 @@ +QEMU<->ACPI BIOS NVDIMM interface +================================= + +QEMU supports NVDIMM via ACPI. This document describes the basic concepts of +NVDIMM ACPI and the interface between QEMU and the ACPI BIOS. + +NVDIMM ACPI Background +---------------------- + +NVDIMM is introduced in ACPI 6.0 which defines an NVDIMM root device under +_SB scope with a _HID of "ACPI0012". For each NVDIMM present or intended +to be supported by platform, platform firmware also exposes an ACPI +Namespace Device under the root device. + +The NVDIMM child devices under the NVDIMM root device are defined with _ADR +corresponding to the NFIT device handle. The NVDIMM root device and the +NVDIMM devices can have device specific methods (_DSM) to provide additional +functions specific to a particular NVDIMM implementation. + +This is an example from ACPI 6.0, a platform contains one NVDIMM:: + + Scope (\_SB){ + Device (NVDR) // Root device + { + Name (_HID, "ACPI0012") + Method (_STA) {...} + Method (_FIT) {...} + Method (_DSM, ...) {...} + Device (NVD) + { + Name(_ADR, h) //where h is NFIT Device Handle for this NVDIMM + Method (_DSM, ...) {...} + } + } + } + +Methods supported on both NVDIMM root device and NVDIMM device +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +_DSM (Device Specific Method) + It is a control method that enables devices to provide device specific + control functions that are consumed by the device driver. + The NVDIMM DSM specification can be found at + http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf + + Arguments: + + Arg0 + A Buffer containing a UUID (16 Bytes) + Arg1 + An Integer containing the Revision ID (4 Bytes) + Arg2 + An Integer containing the Function Index (4 Bytes) + Arg3 + A package containing parameters for the function specified by the + UUID, Revision ID, and Function Index + + Return Value: + + If Function Index = 0, a Buffer containing a function index bitfield. + Otherwise, the return value and type depends on the UUID, revision ID + and function index which are described in the DSM specification. + +Methods on NVDIMM ROOT Device +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +_FIT(Firmware Interface Table) + It evaluates to a buffer returning data in the format of a series of NFIT + Type Structure. + + Arguments: None + + Return Value: + A Buffer containing a list of NFIT Type structure entries. + + The detailed definition of the structure can be found at ACPI 6.0: 5.2.25 + NVDIMM Firmware Interface Table (NFIT). + +QEMU NVDIMM Implementation +-------------------------- + +QEMU uses 4 bytes IO Port starting from 0x0a18 and a RAM-based memory page +for NVDIMM ACPI. + +Memory: + QEMU uses BIOS Linker/loader feature to ask BIOS to allocate a memory + page and dynamically patch its address into an int32 object named "MEMA" + in ACPI. + + This page is RAM-based and it is used to transfer data between _DSM + method and QEMU. If ACPI has control, this pages is owned by ACPI which + writes _DSM input data to it, otherwise, it is owned by QEMU which + emulates _DSM access and writes the output data to it. + + ACPI writes _DSM Input Data (based on the offset in the page): + + [0x0 - 0x3] + 4 bytes, NVDIMM Device Handle. + + The handle is completely QEMU internal thing, the values in + range [1, 0xFFFF] indicate nvdimm device. Other values are + reserved for other purposes. + + Reserved handles: + + - 0 is reserved for nvdimm root device named NVDR. + - 0x10000 is reserved for QEMU internal DSM function called on + the root device. + + [0x4 - 0x7] + 4 bytes, Revision ID, that is the Arg1 of _DSM method. + + [0x8 - 0xB] + 4 bytes. Function Index, that is the Arg2 of _DSM method. + + [0xC - 0xFFF] + 4084 bytes, the Arg3 of _DSM method. + + QEMU writes Output Data (based on the offset in the page): + + [0x0 - 0x3] + 4 bytes, the length of result + + [0x4 - 0xFFF] + 4092 bytes, the DSM result filled by QEMU + +IO Port 0x0a18 - 0xa1b: + ACPI writes the address of the memory page allocated by BIOS to this + port then QEMU gets the control and fills the result in the memory page. + + Write Access: + + [0x0a18 - 0xa1b] + 4 bytes, the address of the memory page allocated by BIOS. + +_DSM process diagram +-------------------- + +"MEMA" indicates the address of memory page allocated by BIOS. + +:: + + +----------------------+ +-----------------------+ + | 1. OSPM | | 2. OSPM | + | save _DSM input data | | write "MEMA" to | Exit to QEMU + | to the page +----->| IO port 0x0a18 +------------+ + | indicated by "MEMA" | | | | + +----------------------+ +-----------------------+ | + | + v + +--------------------+ +-----------+ +------------------+--------+ + | 5 QEMU | | 4 QEMU | | 3. QEMU | + | write _DSM result | | emulate | | get _DSM input data from | + | to the page +<------+ _DSM +<-----+ the page indicated by the | + | | | | | value from the IO port | + +--------+-----------+ +-----------+ +---------------------------+ + | + | Enter Guest + | + v + +--------------------------+ +--------------+ + | 6 OSPM | | 7 OSPM | + | result size is returned | | _DSM return | + | by reading DSM +----->+ | + | result from the page | | | + +--------------------------+ +--------------+ + +NVDIMM hotplug +-------------- + +ACPI BIOS GPE.4 handler is dedicated for notifying OS about nvdimm device +hot-add event. + +QEMU internal use only _DSM functions +------------------------------------- + +Read FIT +^^^^^^^^ + +_FIT method uses _DSM method to fetch NFIT structures blob from QEMU +in 1 page sized increments which are then concatenated and returned +as _FIT method result. + +Input parameters: + +Arg0 + UUID {set to 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62} +Arg1 + Revision ID (set to 1) +Arg2 + Function Index, 0x1 +Arg3 + A package containing a buffer whose layout is as follows: + + +----------+--------+--------+-------------------------------------------+ + | Field | Length | Offset | Description | + +----------+--------+--------+-------------------------------------------+ + | offset | 4 | 0 | offset in QEMU's NFIT structures blob to | + | | | | read from | + +----------+--------+--------+-------------------------------------------+ + +Output layout in the dsm memory page: + + +----------+--------+--------+-------------------------------------------+ + | Field | Length | Offset | Description | + +----------+--------+--------+-------------------------------------------+ + | length | 4 | 0 | length of entire returned data | + | | | | (including this header) | + +----------+--------+--------+-------------------------------------------+ + | | | | return status codes | + | | | | | + | | | | - 0x0 - success | + | | | | - 0x100 - error caused by NFIT update | + | status | 4 | 4 | while read by _FIT wasn't completed | + | | | | - other codes follow Chapter 3 in | + | | | | DSM Spec Rev1 | + +----------+--------+--------+-------------------------------------------+ + | fit data | Varies | 8 | contains FIT data. This field is present | + | | | | if status field is 0. | + +----------+--------+--------+-------------------------------------------+ + +The FIT offset is maintained by the OSPM itself, current offset plus +the size of the fit data returned by the function is the next offset +OSPM should read. When all FIT data has been read out, zero fit data +size is returned. + +If it returns status code 0x100, OSPM should restart to read FIT (read +from offset 0 again). diff --git a/docs/specs/acpi_nvdimm.txt b/docs/specs/acpi_nvdimm.txt deleted file mode 100644 index 3ec42ecbce..0000000000 --- a/docs/specs/acpi_nvdimm.txt +++ /dev/null @@ -1,188 +0,0 @@ -QEMU<->ACPI BIOS NVDIMM interface ---------------------------------- - -QEMU supports NVDIMM via ACPI. This document describes the basic concepts of -NVDIMM ACPI and the interface between QEMU and the ACPI BIOS. - -NVDIMM ACPI Background ----------------------- -NVDIMM is introduced in ACPI 6.0 which defines an NVDIMM root device under -_SB scope with a _HID of “ACPI0012”. For each NVDIMM present or intended -to be supported by platform, platform firmware also exposes an ACPI -Namespace Device under the root device. - -The NVDIMM child devices under the NVDIMM root device are defined with _ADR -corresponding to the NFIT device handle. The NVDIMM root device and the -NVDIMM devices can have device specific methods (_DSM) to provide additional -functions specific to a particular NVDIMM implementation. - -This is an example from ACPI 6.0, a platform contains one NVDIMM: - -Scope (\_SB){ - Device (NVDR) // Root device - { - Name (_HID, “ACPI0012”) - Method (_STA) {...} - Method (_FIT) {...} - Method (_DSM, ...) {...} - Device (NVD) - { - Name(_ADR, h) //where h is NFIT Device Handle for this NVDIMM - Method (_DSM, ...) {...} - } - } -} - -Method supported on both NVDIMM root device and NVDIMM device -_DSM (Device Specific Method) - It is a control method that enables devices to provide device specific - control functions that are consumed by the device driver. - The NVDIMM DSM specification can be found at: - http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf - - Arguments: - Arg0 – A Buffer containing a UUID (16 Bytes) - Arg1 – An Integer containing the Revision ID (4 Bytes) - Arg2 – An Integer containing the Function Index (4 Bytes) - Arg3 – A package containing parameters for the function specified by the - UUID, Revision ID, and Function Index - - Return Value: - If Function Index = 0, a Buffer containing a function index bitfield. - Otherwise, the return value and type depends on the UUID, revision ID - and function index which are described in the DSM specification. - -Methods on NVDIMM ROOT Device -_FIT(Firmware Interface Table) - It evaluates to a buffer returning data in the format of a series of NFIT - Type Structure. - - Arguments: None - - Return Value: - A Buffer containing a list of NFIT Type structure entries. - - The detailed definition of the structure can be found at ACPI 6.0: 5.2.25 - NVDIMM Firmware Interface Table (NFIT). - -QEMU NVDIMM Implementation -========================== -QEMU uses 4 bytes IO Port starting from 0x0a18 and a RAM-based memory page -for NVDIMM ACPI. - -Memory: - QEMU uses BIOS Linker/loader feature to ask BIOS to allocate a memory - page and dynamically patch its address into an int32 object named "MEMA" - in ACPI. - - This page is RAM-based and it is used to transfer data between _DSM - method and QEMU. If ACPI has control, this pages is owned by ACPI which - writes _DSM input data to it, otherwise, it is owned by QEMU which - emulates _DSM access and writes the output data to it. - - ACPI writes _DSM Input Data (based on the offset in the page): - [0x0 - 0x3]: 4 bytes, NVDIMM Device Handle. - - The handle is completely QEMU internal thing, the values in - range [1, 0xFFFF] indicate nvdimm device. Other values are - reserved for other purposes. - - Reserved handles: - 0 is reserved for nvdimm root device named NVDR. - 0x10000 is reserved for QEMU internal DSM function called on - the root device. - - [0x4 - 0x7]: 4 bytes, Revision ID, that is the Arg1 of _DSM method. - [0x8 - 0xB]: 4 bytes. Function Index, that is the Arg2 of _DSM method. - [0xC - 0xFFF]: 4084 bytes, the Arg3 of _DSM method. - - QEMU Writes Output Data (based on the offset in the page): - [0x0 - 0x3]: 4 bytes, the length of result - [0x4 - 0xFFF]: 4092 bytes, the DSM result filled by QEMU - -IO Port 0x0a18 - 0xa1b: - ACPI writes the address of the memory page allocated by BIOS to this - port then QEMU gets the control and fills the result in the memory page. - - write Access: - [0x0a18 - 0xa1b]: 4 bytes, the address of the memory page allocated - by BIOS. - -_DSM process diagram: ---------------------- -"MEMA" indicates the address of memory page allocated by BIOS. - - +----------------------+   +-----------------------+ - |   1. OSPM   |      | 2. OSPM | - | save _DSM input data | | write "MEMA" to | Exit to QEMU - | to the page +----->| IO port 0x0a18 +------------+ - | indicated by "MEMA" | | | | - +----------------------+ +-----------------------+ | -  | -  v - +------------- ----+ +-----------+ +------------------+--------+ - | 5 QEMU | | 4 QEMU | | 3. QEMU | - | write _DSM result | | emulate | | get _DSM input data from | - | to the page +<------+ _DSM +<-----+ the page indicated by the | - | | | | | value from the IO port | - +--------+-----------+ +-----------+ +---------------------------+ - | - | Enter Guest - | - v - +--------------------------+ +--------------+ - | 6 OSPM | | 7 OSPM | - | result size is returned | | _DSM return | - | by reading DSM +----->+ | - | result from the page | | | - +--------------------------+ +--------------+ - -NVDIMM hotplug --------------- -ACPI BIOS GPE.4 handler is dedicated for notifying OS about nvdimm device -hot-add event. - -QEMU internal use only _DSM function ------------------------------------- -1) Read FIT - _FIT method uses _DSM method to fetch NFIT structures blob from QEMU - in 1 page sized increments which are then concatenated and returned - as _FIT method result. - - Input parameters: - Arg0 – UUID {set to 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62} - Arg1 – Revision ID (set to 1) - Arg2 - Function Index, 0x1 - Arg3 - A package containing a buffer whose layout is as follows: - - +----------+--------+--------+-------------------------------------------+ - | Field | Length | Offset | Description | - +----------+--------+--------+-------------------------------------------+ - | offset | 4 | 0 | offset in QEMU's NFIT structures blob to | - | | | | read from | - +----------+--------+--------+-------------------------------------------+ - - Output layout in the dsm memory page: - +----------+--------+--------+-------------------------------------------+ - | Field | Length | Offset | Description | - +----------+--------+--------+-------------------------------------------+ - | length | 4 | 0 | length of entire returned data | - | | | | (including this header) | - +----------+-----------------+-------------------------------------------+ - | | | | return status codes | - | | | | 0x0 - success | - | | | | 0x100 - error caused by NFIT update while | - | status | 4 | 4 | read by _FIT wasn't completed, other | - | | | | codes follow Chapter 3 in DSM Spec Rev1 | - +----------+-----------------+-------------------------------------------+ - | fit data | Varies | 8 | contains FIT data, this field is present | - | | | | if status field is 0; | - +----------+--------+--------+-------------------------------------------+ - - The FIT offset is maintained by the OSPM itself, current offset plus - the size of the fit data returned by the function is the next offset - OSPM should read. When all FIT data has been read out, zero fit data - size is returned. - - If it returns status code 0x100, OSPM should restart to read FIT (read - from offset 0 again). diff --git a/docs/specs/acpi_pci_hotplug.txt b/docs/specs/acpi_pci_hotplug.rst similarity index 51% rename from docs/specs/acpi_pci_hotplug.txt rename to docs/specs/acpi_pci_hotplug.rst index a839434f31..685bc5c322 100644 --- a/docs/specs/acpi_pci_hotplug.txt +++ b/docs/specs/acpi_pci_hotplug.rst @@ -1,45 +1,48 @@ QEMU<->ACPI BIOS PCI hotplug interface --------------------------------------- +====================================== QEMU supports PCI hotplug via ACPI, for PCI bus 0. This document describes the interface between QEMU and the ACPI BIOS. -ACPI GPE block (IO ports 0xafe0-0xafe3, byte access): ------------------------------------------ +ACPI GPE block (IO ports 0xafe0-0xafe3, byte access) +---------------------------------------------------- Generic ACPI GPE block. Bit 1 (GPE.1) used to notify PCI hotplug/eject event to ACPI BIOS, via SCI interrupt. -PCI slot injection notification pending (IO port 0xae00-0xae03, 4-byte access): ---------------------------------------------------------------- +PCI slot injection notification pending (IO port 0xae00-0xae03, 4-byte access) +------------------------------------------------------------------------------ + Slot injection notification pending. One bit per slot. Read by ACPI BIOS GPE.1 handler to notify OS of injection events. Read-only. -PCI slot removal notification (IO port 0xae04-0xae07, 4-byte access): ------------------------------------------------------ +PCI slot removal notification (IO port 0xae04-0xae07, 4-byte access) +-------------------------------------------------------------------- + Slot removal notification pending. One bit per slot. Read by ACPI BIOS GPE.1 handler to notify OS of removal events. Read-only. -PCI device eject (IO port 0xae08-0xae0b, 4-byte access): ----------------------------------------- +PCI device eject (IO port 0xae08-0xae0b, 4-byte access) +------------------------------------------------------- Write: Used by ACPI BIOS _EJ0 method to request device removal. One bit per slot. Read: Hotplug features register. Used by platform to identify features available. Current base feature set (no bits set): - - Read-only "up" register @0xae00, 4-byte access, bit per slot - - Read-only "down" register @0xae04, 4-byte access, bit per slot - - Read/write "eject" register @0xae08, 4-byte access, - write: bit per slot eject, read: hotplug feature set - - Read-only hotplug capable register @0xae0c, 4-byte access, bit per slot -PCI removability status (IO port 0xae0c-0xae0f, 4-byte access): ------------------------------------------------ +- Read-only "up" register @0xae00, 4-byte access, bit per slot +- Read-only "down" register @0xae04, 4-byte access, bit per slot +- Read/write "eject" register @0xae08, 4-byte access, + write: bit per slot eject, read: hotplug feature set +- Read-only hotplug capable register @0xae0c, 4-byte access, bit per slot + +PCI removability status (IO port 0xae0c-0xae0f, 4-byte access) +-------------------------------------------------------------- Used by ACPI BIOS _RMV method to indicate removability status to OS. One -bit per slot. Read-only +bit per slot. Read-only. diff --git a/docs/specs/fw_cfg.txt b/docs/specs/fw_cfg.rst similarity index 58% rename from docs/specs/fw_cfg.txt rename to docs/specs/fw_cfg.rst index 3e6d586f66..5ad47a901c 100644 --- a/docs/specs/fw_cfg.txt +++ b/docs/specs/fw_cfg.rst @@ -1,7 +1,9 @@ +=========================================== QEMU Firmware Configuration (fw_cfg) Device =========================================== -= Guest-side Hardware Interface = +Guest-side Hardware Interface +============================= This hardware interface allows the guest to retrieve various data items (blobs) that can influence how the firmware configures itself, or may @@ -9,7 +11,8 @@ contain tables to be installed for the guest OS. Examples include device boot order, ACPI and SMBIOS tables, virtual machine UUID, SMP and NUMA information, kernel/initrd images for direct (Linux) kernel booting, etc. -== Selector (Control) Register == +Selector (Control) Register +--------------------------- * Write only * Location: platform dependent (IOport or MMIO) @@ -30,10 +33,12 @@ of 1 means the item's data can be overwritten by writes to the data register. In other words, configuration write mode is enabled when the selector value is between 0x4000-0x7fff or 0xc000-0xffff. -NOTE: As of QEMU v2.4, writes to the fw_cfg data register are no +.. NOTE:: + As of QEMU v2.4, writes to the fw_cfg data register are no longer supported, and will be ignored (treated as no-ops)! -NOTE: As of QEMU v2.9, writes are reinstated, but only through the DMA +.. NOTE:: + As of QEMU v2.9, writes are reinstated, but only through the DMA interface (see below). Furthermore, writeability of any specific item is governed independently of Bit14 in the selector key value. @@ -45,17 +50,19 @@ items are accessed with a selector value between 0x0000-0x7fff, and architecture specific configuration items are accessed with a selector value between 0x8000-0xffff. -== Data Register == +Data Register +------------- * Read/Write (writes ignored as of QEMU v2.4, but see the DMA interface) -* Location: platform dependent (IOport [*] or MMIO) +* Location: platform dependent (IOport [#]_ or MMIO) * Width: 8-bit (if IOport), 8/16/32/64-bit (if MMIO) * Endianness: string-preserving -[*] On platforms where the data register is exposed as an IOport, its -port number will always be one greater than the port number of the -selector register. In other words, the two ports overlap, and can not -be mapped separately. +.. [#] + On platforms where the data register is exposed as an IOport, its + port number will always be one greater than the port number of the + selector register. In other words, the two ports overlap, and can not + be mapped separately. The data register allows access to an array of bytes for each firmware configuration data item. The specific item is selected by writing to @@ -74,91 +81,103 @@ An N-byte wide read of the data register will return the next available N bytes of the selected firmware configuration item, as a substring, in increasing address order, similar to memcpy(). -== Register Locations == +Register Locations +------------------ -=== x86, x86_64 Register Locations === +x86, x86_64 + * Selector Register IOport: 0x510 + * Data Register IOport: 0x511 + * DMA Address IOport: 0x514 -Selector Register IOport: 0x510 -Data Register IOport: 0x511 -DMA Address IOport: 0x514 +Arm + * Selector Register address: Base + 8 (2 bytes) + * Data Register address: Base + 0 (8 bytes) + * DMA Address address: Base + 16 (8 bytes) -=== Arm Register Locations === +ACPI Interface +-------------- -Selector Register address: Base + 8 (2 bytes) -Data Register address: Base + 0 (8 bytes) -DMA Address address: Base + 16 (8 bytes) - -== ACPI Interface == - -The fw_cfg device is defined with ACPI ID "QEMU0002". Since we expect +The fw_cfg device is defined with ACPI ID ``QEMU0002``. Since we expect ACPI tables to be passed into the guest through the fw_cfg device itself, the guest-side firmware can not use ACPI to find fw_cfg. However, once the firmware is finished setting up ACPI tables and hands control over to the guest kernel, the latter can use the fw_cfg ACPI node for a more accurate inventory of in-use IOport or MMIO regions. -== Firmware Configuration Items == +Firmware Configuration Items +---------------------------- -=== Signature (Key 0x0000, FW_CFG_SIGNATURE) === +Signature (Key 0x0000, ``FW_CFG_SIGNATURE``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The presence of the fw_cfg selector and data registers can be verified -by selecting the "signature" item using key 0x0000 (FW_CFG_SIGNATURE), +by selecting the "signature" item using key 0x0000 (``FW_CFG_SIGNATURE``), and reading four bytes from the data register. If the fw_cfg device is -present, the four bytes read will contain the characters "QEMU". +present, the four bytes read will contain the characters ``QEMU``. If the DMA interface is available, then reading the DMA Address -Register returns 0x51454d5520434647 ("QEMU CFG" in big-endian format). +Register returns 0x51454d5520434647 (``QEMU CFG`` in big-endian format). -=== Revision / feature bitmap (Key 0x0001, FW_CFG_ID) === +Revision / feature bitmap (Key 0x0001, ``FW_CFG_ID``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A 32-bit little-endian unsigned int, this item is used to check for enabled features. - - Bit 0: traditional interface. Always set. - - Bit 1: DMA interface. -=== File Directory (Key 0x0019, FW_CFG_FILE_DIR) === +- Bit 0: traditional interface. Always set. +- Bit 1: DMA interface. + +File Directory (Key 0x0019, ``FW_CFG_FILE_DIR``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. highlight:: c Firmware configuration items stored at selector keys 0x0020 or higher -(FW_CFG_FILE_FIRST or higher) have an associated entry in a directory +(``FW_CFG_FILE_FIRST`` or higher) have an associated entry in a directory structure, which makes it easier for guest-side firmware to identify -and retrieve them. The format of this file directory (from fw_cfg.h in -the QEMU source tree) is shown here, slightly annotated for clarity: +and retrieve them. The format of this file directory (from ``fw_cfg.h`` in +the QEMU source tree) is shown here, slightly annotated for clarity:: -struct FWCfgFiles { /* the entire file directory fw_cfg item */ - uint32_t count; /* number of entries, in big-endian format */ - struct FWCfgFile f[]; /* array of file entries, see below */ -}; + struct FWCfgFiles { /* the entire file directory fw_cfg item */ + uint32_t count; /* number of entries, in big-endian format */ + struct FWCfgFile f[]; /* array of file entries, see below */ + }; -struct FWCfgFile { /* an individual file entry, 64 bytes total */ - uint32_t size; /* size of referenced fw_cfg item, big-endian */ - uint16_t select; /* selector key of fw_cfg item, big-endian */ - uint16_t reserved; - char name[56]; /* fw_cfg item name, NUL-terminated ascii */ -}; + struct FWCfgFile { /* an individual file entry, 64 bytes total */ + uint32_t size; /* size of referenced fw_cfg item, big-endian */ + uint16_t select; /* selector key of fw_cfg item, big-endian */ + uint16_t reserved; + char name[56]; /* fw_cfg item name, NUL-terminated ascii */ + }; -=== All Other Data Items === +All Other Data Items +~~~~~~~~~~~~~~~~~~~~ Please consult the QEMU source for the most up-to-date and authoritative list of selector keys and their respective items' purpose, format and writeability. -=== Ranges === +Ranges +~~~~~~ Theoretically, there may be up to 0x4000 generic firmware configuration items, and up to 0x4000 architecturally specific ones. +=============== =========== Selector Reg. Range Usage ---------------- ----------- +=============== =========== 0x0000 - 0x3fff Generic (0x0000 - 0x3fff, generally RO, possibly RW through - the DMA interface in QEMU v2.9+) + the DMA interface in QEMU v2.9+) 0x4000 - 0x7fff Generic (0x0000 - 0x3fff, RW, ignored in QEMU v2.4+) 0x8000 - 0xbfff Arch. Specific (0x0000 - 0x3fff, generally RO, possibly RW - through the DMA interface in QEMU v2.9+) + through the DMA interface in QEMU v2.9+) 0xc000 - 0xffff Arch. Specific (0x0000 - 0x3fff, RW, ignored in v2.4+) +=============== =========== In practice, the number of allowed firmware configuration items depends on the machine type/version. -= Guest-side DMA Interface = +Guest-side DMA Interface +======================== If bit 1 of the feature bitmap is set, the DMA interface is present. This does not replace the existing fw_cfg interface, it is an add-on. This interface @@ -171,68 +190,74 @@ addresses can be triggered with just one write, whereas operations with 64-bit addresses can be triggered with one 64-bit write or two 32-bit writes, starting with the most significant half (at offset 0). -In this register, the physical address of a FWCfgDmaAccess structure in RAM -should be written. This is the format of the FWCfgDmaAccess structure: +In this register, the physical address of a ``FWCfgDmaAccess`` structure in RAM +should be written. This is the format of the ``FWCfgDmaAccess`` structure:: -typedef struct FWCfgDmaAccess { - uint32_t control; - uint32_t length; - uint64_t address; -} FWCfgDmaAccess; + typedef struct FWCfgDmaAccess { + uint32_t control; + uint32_t length; + uint64_t address; + } FWCfgDmaAccess; The fields of the structure are in big endian mode, and the field at the lowest -address is the "control" field. +address is the ``control`` field. -The "control" field has the following bits: - - Bit 0: Error - - Bit 1: Read - - Bit 2: Skip - - Bit 3: Select. The upper 16 bits are the selected index. - - Bit 4: Write +The ``control`` field has the following bits: -When an operation is triggered, if the "control" field has bit 3 set, the +- Bit 0: Error +- Bit 1: Read +- Bit 2: Skip +- Bit 3: Select. The upper 16 bits are the selected index. +- Bit 4: Write + +When an operation is triggered, if the ``control`` field has bit 3 set, the upper 16 bits are interpreted as an index of a firmware configuration item. This has the same effect as writing the selector register. -If the "control" field has bit 1 set, a read operation will be performed. -"length" bytes for the current selector and offset will be copied into the -physical RAM address specified by the "address" field. +If the ``control`` field has bit 1 set, a read operation will be performed. +``length`` bytes for the current selector and offset will be copied into the +physical RAM address specified by the ``address`` field. -If the "control" field has bit 4 set (and not bit 1), a write operation will be -performed. "length" bytes will be copied from the physical RAM address -specified by the "address" field to the current selector and offset. QEMU +If the ``control`` field has bit 4 set (and not bit 1), a write operation will be +performed. ``length`` bytes will be copied from the physical RAM address +specified by the ``address`` field to the current selector and offset. QEMU prevents starting or finishing the write beyond the end of the item associated with the current selector (i.e., the item cannot be resized). Truncated writes are dropped entirely. Writes to read-only items are also rejected. All of these -write errors set bit 0 (the error bit) in the "control" field. +write errors set bit 0 (the error bit) in the ``control`` field. -If the "control" field has bit 2 set (and neither bit 1 nor bit 4), a skip +If the ``control`` field has bit 2 set (and neither bit 1 nor bit 4), a skip operation will be performed. The offset for the current selector will be -advanced "length" bytes. +advanced ``length`` bytes. -To check the result, read the "control" field: - error bit set -> something went wrong. - all bits cleared -> transfer finished successfully. - otherwise -> transfer still in progress (doesn't happen - today due to implementation not being async, - but may in the future). +To check the result, read the ``control`` field: -= Externally Provided Items = +Error bit set + Something went wrong. +All bits cleared + Transfer finished successfully. +Otherwise + Transfer still in progress + (doesn't happen today due to implementation not being async, + but may in the future). + +Externally Provided Items +========================= Since v2.4, "file" fw_cfg items (i.e., items with selector keys above -FW_CFG_FILE_FIRST, and with a corresponding entry in the fw_cfg file +``FW_CFG_FILE_FIRST``, and with a corresponding entry in the fw_cfg file directory structure) may be inserted via the QEMU command line, using -the following syntax: +the following syntax:: -fw_cfg [name=],file= -Or +Or:: -fw_cfg [name=],string= Since v5.1, QEMU allows some objects to generate fw_cfg-specific content, the content is then associated with a "file" item using the 'gen_id' option -in the command line, using the following syntax: +in the command line, using the following syntax:: -object ,id=,[generator-specific-options] \ -fw_cfg [name=],gen_id= @@ -241,24 +266,24 @@ See QEMU man page for more documentation. Using item_name with plain ASCII characters only is recommended. -Item names beginning with "opt/" are reserved for users. QEMU will +Item names beginning with ``opt/`` are reserved for users. QEMU will never create entries with such names unless explicitly ordered by the user. To avoid clashes among different users, it is strongly recommended -that you use names beginning with opt/RFQDN/, where RFQDN is a reverse +that you use names beginning with ``opt/RFQDN/``, where RFQDN is a reverse fully qualified domain name you control. For instance, if SeaBIOS -wanted to define additional names, the prefix "opt/org.seabios/" would +wanted to define additional names, the prefix ``opt/org.seabios/`` would be appropriate. -For historical reasons, "opt/ovmf/" is reserved for OVMF firmware. +For historical reasons, ``opt/ovmf/`` is reserved for OVMF firmware. -Prefix "opt/org.qemu/" is reserved for QEMU itself. +Prefix ``opt/org.qemu/`` is reserved for QEMU itself. -Use of names not beginning with "opt/" is potentially dangerous and +Use of names not beginning with ``opt/`` is potentially dangerous and entirely unsupported. QEMU will warn if you try. -Use of names not beginning with "opt/" is tolerated with 'gen_id' (that +Use of names not beginning with ``opt/`` is tolerated with 'gen_id' (that is, the warning is suppressed), but you must know exactly what you're doing. diff --git a/docs/specs/index.rst b/docs/specs/index.rst index b7b08ea30d..a58d9311cb 100644 --- a/docs/specs/index.rst +++ b/docs/specs/index.rst @@ -1,5 +1,6 @@ +---------------------------------------------- System Emulation Guest Hardware Specifications -============================================== +---------------------------------------------- This section of the manual contains specifications of guest hardware that is specific to QEMU. @@ -13,3 +14,10 @@ guest hardware that is specific to QEMU. acpi_hw_reduced_hotplug tpm acpi_hest_ghes + acpi_cpu_hotplug + acpi_mem_hotplug + acpi_pci_hotplug + acpi_nvdimm + acpi_erst + sev-guest-firmware + fw_cfg diff --git a/docs/specs/pci-ids.txt b/docs/specs/pci-ids.txt index 5e407a6f32..e463c4cb3a 100644 --- a/docs/specs/pci-ids.txt +++ b/docs/specs/pci-ids.txt @@ -22,16 +22,14 @@ maintained as part of the virtio specification. 1af4:1004 SCSI host bus adapter device (legacy) 1af4:1005 entropy generator device (legacy) 1af4:1009 9p filesystem device (legacy) +1af4:1012 vsock device (bug compatibility) -1af4:1041 network device (modern) -1af4:1042 block device (modern) -1af4:1043 console device (modern) -1af4:1044 entropy generator device (modern) -1af4:1045 balloon device (modern) -1af4:1048 SCSI host bus adapter device (modern) -1af4:1049 9p filesystem device (modern) -1af4:1050 virtio gpu device (modern) -1af4:1052 virtio input device (modern) +1af4:1040 Start of ID range for modern virtio devices. The PCI device + to ID is calculated from the virtio device ID by adding the +1af4:10ef 0x1040 offset. The virtio IDs are defined in the virtio + specification. The Linux kernel has a header file with + defines for all virtio IDs (linux/virtio_ids.h), qemu has a + copy in include/standard-headers/. 1af4:10f0 Available for experimental usage without registration. Must get to official ID when the code leaves the test lab (i.e. when seeking @@ -65,6 +63,7 @@ PCI devices (other than virtio): 1b36:000f mdpy (mdev sample device), linux/samples/vfio-mdev/mdpy.c 1b36:0010 PCIe NVMe device (-device nvme) 1b36:0011 PCI PVPanic device (-device pvpanic-pci) +1b36:0012 PCI ACPI ERST device (-device acpi-erst) All these devices are documented in docs/specs. diff --git a/docs/specs/ppc-spapr-hcalls.rst b/docs/specs/ppc-spapr-hcalls.rst new file mode 100644 index 0000000000..6cdcef2026 --- /dev/null +++ b/docs/specs/ppc-spapr-hcalls.rst @@ -0,0 +1,99 @@ +====================== +sPAPR hypervisor calls +====================== + +When used with the ``pseries`` machine type, ``qemu-system-ppc64`` implements +a set of hypervisor calls (a.k.a. hcalls) defined in the Linux on Power +Architecture Reference ([LoPAR]_) document. This document is a subset of the +Power Architecture Platform Reference (PAPR+) specification (IBM internal only), +which is what PowerVM, the IBM proprietary hypervisor, adheres to. + +The subset in LoPAR is selected based on the requirements of Linux as a guest. + +In addition to those calls, we have added our own private hypervisor +calls which are mostly used as a private interface between the firmware +running in the guest and QEMU. + +All those hypercalls start at hcall number 0xf000 which correspond +to an implementation specific range in PAPR. + +``H_RTAS (0xf000)`` +=================== + +RTAS stands for Run-Time Abstraction Sercies and is a set of runtime services +generally provided by the firmware inside the guest to the operating system. It +predates the existence of hypervisors (it was originally an extension to Open +Firmware) and is still used by PAPR and LoPAR to provide various services that +are not performance sensitive. + +We currently implement the RTAS services in QEMU itself. The actual RTAS +"firmware" blob in the guest is a small stub of a few instructions which +calls our private H_RTAS hypervisor call to pass the RTAS calls to QEMU. + +Arguments: + + ``r3``: ``H_RTAS (0xf000)`` + + ``r4``: Guest physical address of RTAS parameter block. + +Returns: + + ``H_SUCCESS``: Successfully called the RTAS function (RTAS result will have + been stored in the parameter block). + + ``H_PARAMETER``: Unknown token. + +``H_LOGICAL_MEMOP (0xf001)`` +============================ + +When the guest runs in "real mode" (in powerpc terminology this means with MMU +disabled, i.e. guest effective address equals to guest physical address), it +only has access to a subset of memory and no I/Os. + +PAPR and LoPAR provides a set of hypervisor calls to perform cacheable or +non-cacheable accesses to any guest physical addresses that the +guest can use in order to access IO devices while in real mode. + +This is typically used by the firmware running in the guest. + +However, doing a hypercall for each access is extremely inefficient +(even more so when running KVM) when accessing the frame buffer. In +that case, things like scrolling become unusably slow. + +This hypercall allows the guest to request a "memory op" to be applied +to memory. The supported memory ops at this point are to copy a range +of memory (supports overlap of source and destination) and XOR which +is used by our SLOF firmware to invert the screen. + +Arguments: + + ``r3 ``: ``H_LOGICAL_MEMOP (0xf001)`` + + ``r4``: Guest physical address of destination. + + ``r5``: Guest physical address of source. + + ``r6``: Individual element size, defined by the binary logarithm of the + desired size. Supported values are: + + ``0`` = 1 byte + + ``1`` = 2 bytes + + ``2`` = 4 bytes + + ``3`` = 8 bytes + + ``r7``: Number of elements. + + ``r8``: Operation. Supported values are: + + ``0``: copy + + ``1``: xor + +Returns: + + ``H_SUCCESS``: Success. + + ``H_PARAMETER``: Invalid argument. diff --git a/docs/specs/ppc-spapr-hcalls.txt b/docs/specs/ppc-spapr-hcalls.txt deleted file mode 100644 index 93fe3da91b..0000000000 --- a/docs/specs/ppc-spapr-hcalls.txt +++ /dev/null @@ -1,78 +0,0 @@ -When used with the "pseries" machine type, QEMU-system-ppc64 implements -a set of hypervisor calls using a subset of the server "PAPR" specification -(IBM internal at this point), which is also what IBM's proprietary hypervisor -adheres too. - -The subset is selected based on the requirements of Linux as a guest. - -In addition to those calls, we have added our own private hypervisor -calls which are mostly used as a private interface between the firmware -running in the guest and QEMU. - -All those hypercalls start at hcall number 0xf000 which correspond -to an implementation specific range in PAPR. - -- H_RTAS (0xf000) - -RTAS is a set of runtime services generally provided by the firmware -inside the guest to the operating system. It predates the existence -of hypervisors (it was originally an extension to Open Firmware) and -is still used by PAPR to provide various services that aren't performance -sensitive. - -We currently implement the RTAS services in QEMU itself. The actual RTAS -"firmware" blob in the guest is a small stub of a few instructions which -calls our private H_RTAS hypervisor call to pass the RTAS calls to QEMU. - -Arguments: - - r3 : H_RTAS (0xf000) - r4 : Guest physical address of RTAS parameter block - -Returns: - - H_SUCCESS : Successfully called the RTAS function (RTAS result - will have been stored in the parameter block) - H_PARAMETER : Unknown token - -- H_LOGICAL_MEMOP (0xf001) - -When the guest runs in "real mode" (in powerpc lingua this means -with MMU disabled, ie guest effective == guest physical), it only -has access to a subset of memory and no IOs. - -PAPR provides a set of hypervisor calls to perform cacheable or -non-cacheable accesses to any guest physical addresses that the -guest can use in order to access IO devices while in real mode. - -This is typically used by the firmware running in the guest. - -However, doing a hypercall for each access is extremely inefficient -(even more so when running KVM) when accessing the frame buffer. In -that case, things like scrolling become unusably slow. - -This hypercall allows the guest to request a "memory op" to be applied -to memory. The supported memory ops at this point are to copy a range -of memory (supports overlap of source and destination) and XOR which -is used by our SLOF firmware to invert the screen. - -Arguments: - - r3: H_LOGICAL_MEMOP (0xf001) - r4: Guest physical address of destination - r5: Guest physical address of source - r6: Individual element size - 0 = 1 byte - 1 = 2 bytes - 2 = 4 bytes - 3 = 8 bytes - r7: Number of elements - r8: Operation - 0 = copy - 1 = xor - -Returns: - - H_SUCCESS : Success - H_PARAMETER : Invalid argument - diff --git a/docs/specs/ppc-spapr-hotplug.rst b/docs/specs/ppc-spapr-hotplug.rst new file mode 100644 index 0000000000..f84dc55ad9 --- /dev/null +++ b/docs/specs/ppc-spapr-hotplug.rst @@ -0,0 +1,510 @@ +============================= +sPAPR Dynamic Reconfiguration +============================= + +sPAPR or pSeries guests make use of a facility called dynamic reconfiguration +to handle hot plugging of dynamic "physical" resources like PCI cards, or +"logical"/para-virtual resources like memory, CPUs, and "physical" +host-bridges, which are generally managed by the host/hypervisor and provided +to guests as virtualized resources. The specifics of dynamic reconfiguration +are documented extensively in section 13 of the Linux on Power Architecture +Reference document ([LoPAR]_). This document provides a summary of that +information as it applies to the implementation within QEMU. + +Dynamic-reconfiguration Connectors +================================== + +To manage hot plug/unplug of these resources, a firmware abstraction known as +a Dynamic Resource Connector (DRC) is used to assign a particular dynamic +resource to the guest, and provide an interface for the guest to manage +configuration/removal of the resource associated with it. + +Device tree description of DRCs +=============================== + +A set of four Open Firmware device tree array properties are used to describe +the name/index/power-domain/type of each DRC allocated to a guest at +boot time. There may be multiple sets of these arrays, rooted at different +paths in the device tree depending on the type of resource the DRCs manage. + +In some cases, the DRCs themselves may be provided by a dynamic resource, +such as the DRCs managing PCI slots on a hot plugged PHB. In this case the +arrays would be fetched as part of the device tree retrieval interfaces +for hot plugged resources described under :ref:`guest-host-interface`. + +The array properties are described below. Each entry/element in an array +describes the DRC identified by the element in the corresponding position +of ``ibm,drc-indexes``: + +``ibm,drc-names`` +----------------- + + First 4-bytes: big-endian (BE) encoded integer denoting the number of entries. + + Each entry: a NULL-terminated ```` string encoded as a byte array. + + ```` values for logical/virtual resources are defined in the Linux on + Power Architecture Reference ([LoPAR]_) section 13.5.2.4, and basically + consist of the type of the resource followed by a space and a numerical + value that's unique across resources of that type. + + ```` values for "physical" resources such as PCI or VIO devices are + defined as being "location codes", which are the "location labels" of each + encapsulating device, starting from the chassis down to the individual slot + for the device, concatenated by a hyphen. This provides a mapping of + resources to a physical location in a chassis for debugging purposes. For + QEMU, this mapping is less important, so we assign a location code that + conforms to naming specifications, but is simply a location label for the + slot by itself to simplify the implementation. The naming convention for + location labels is documented in detail in the [LoPAR]_ section 12.3.1.5, + and in our case amounts to using ``C`` for PCI/VIO device slots, where + ```` is unique across all PCI/VIO device slots. + +``ibm,drc-indexes`` +------------------- + + First 4-bytes: BE-encoded integer denoting the number of entries. + + Each 4-byte entry: BE-encoded ```` integer that is unique across all + DRCs in the machine. + + ```` is arbitrary, but in the case of QEMU we try to maintain the + convention used to assign them to pSeries guests on pHyp (the hypervisor + portion of PowerVM): + + ``bit[31:28]``: integer encoding of ````, where ```` is: + + ``1`` for CPU resource. + + ``2`` for PHB resource. + + ``3`` for VIO resource. + + ``4`` for PCI resource. + + ``8`` for memory resource. + + ``bit[27:0]``: integer encoding of ````, where ```` is unique + across all resources of specified type. + +``ibm,drc-power-domains`` +------------------------- + + First 4-bytes: BE-encoded integer denoting the number of entries. + + Each 4-byte entry: 32-bit, BE-encoded ```` integer that specifies the + power domain the resource will be assigned to. In the case of QEMU we + associated all resources with a "live insertion" domain, where the power is + assumed to be managed automatically. The integer value for this domain is a + special value of ``-1``. + + +``ibm,drc-types`` +----------------- + + First 4-bytes: BE-encoded integer denoting the number of entries. + + Each entry: a NULL-terminated ```` string encoded as a byte array. + ```` is assigned as follows: + + "CPU" for a CPU. + + "PHB" for a physical host-bridge. + + "SLOT" for a VIO slot. + + "28" for a PCI slot. + + "MEM" for memory resource. + +.. _guest-host-interface: + +Guest->Host interface to manage dynamic resources +================================================= + +Each DRC is given a globally unique DRC index, and resources associated with a +particular DRC are configured/managed by the guest via a number of RTAS calls +which reference individual DRCs based on the DRC index. This can be considered +the guest->host interface. + +``rtas-set-power-level`` +------------------------ + +Set the power level for a specified power domain. + + ``arg[0]``: integer identifying power domain. + + ``arg[1]``: new power level for the domain, ``0-100``. + + ``output[0]``: status, ``0`` on success. + + ``output[1]``: power level after command. + +``rtas-get-power-level`` +------------------------ + +Get the power level for a specified power domain. + + ``arg[0]``: integer identifying power domain. + + ``output[0]``: status, ``0`` on success. + + ``output[1]``: current power level. + +``rtas-set-indicator`` +---------------------- + +Set the state of an indicator or sensor. + + ``arg[0]``: integer identifying sensor/indicator type. + + ``arg[1]``: index of sensor, for DR-related sensors this is generally the DRC + index. + + ``arg[2]``: desired sensor value. + + ``output[0]``: status, ``0`` on success. + +For the purpose of this document we focus on the indicator/sensor types +associated with a DRC. The types are: + +* ``9001``: ``isolation-state``, controls/indicates whether a device has been + made accessible to a guest. Supported sensor values: + + ``0``: ``isolate``, device is made inaccessible by guest OS. + + ``1``: ``unisolate``, device is made available to guest OS. + +* ``9002``: ``dr-indicator``, controls "visual" indicator associated with + device. Supported sensor values: + + ``0``: ``inactive``, resource may be safely removed. + + ``1``: ``active``, resource is in use and cannot be safely removed. + + ``2``: ``identify``, used to visually identify slot for interactive hot plug. + + ``3``: ``action``, in most cases, used in the same manner as identify. + +* ``9003``: ``allocation-state``, generally only used for "logical" DR resources + to request the allocation/deallocation of a resource prior to acquiring it via + ``isolation-state->unisolate``, or after releasing it via + ``isolation-state->isolate``, respectively. For "physical" DR (like PCI + hot plug/unplug) the pre-allocation of the resource is implied and this sensor + is unused. Supported sensor values: + + ``0``: ``unusable``, tell firmware/system the resource can be + unallocated/reclaimed and added back to the system resource pool. + + ``1``: ``usable``, request the resource be allocated/reserved for use by + guest OS. + + ``2``: ``exchange``, used to allocate a spare resource to use for fail-over + in certain situations. Unused in QEMU. + + ``3``: ``recover``, used to reclaim a previously allocated resource that's + not currently allocated to the guest OS. Unused in QEMU. + +``rtas-get-sensor-state:`` +-------------------------- + +Used to read an indicator or sensor value. + + ``arg[0]``: integer identifying sensor/indicator type. + + ``arg[1]``: index of sensor, for DR-related sensors this is generally the DRC + index + + ``output[0]``: status, 0 on success + +For DR-related operations, the only noteworthy sensor is ``dr-entity-sense``, +which has a type value of ``9003``, as ``allocation-state`` does in the case of +``rtas-set-indicator``. The semantics/encodings of the sensor values are +distinct however. + +Supported sensor values for ``dr-entity-sense`` (``9003``) sensor: + + ``0``: empty. + + For physical resources: DRC/slot is empty. + + For logical resources: unused. + + ``1``: present. + + For physical resources: DRC/slot is populated with a device/resource. + + For logical resources: resource has been allocated to the DRC. + + ``2``: unusable. + + For physical resources: unused. + + For logical resources: DRC has no resource allocated to it. + + ``3``: exchange. + + For physical resources: unused. + + For logical resources: resource available for exchange (see + ``allocation-state`` sensor semantics above). + + ``4``: recovery. + + For physical resources: unused. + + For logical resources: resource available for recovery (see + ``allocation-state`` sensor semantics above). + +``rtas-ibm-configure-connector`` +-------------------------------- + +Used to fetch an OpenFirmware device tree description of the resource associated +with a particular DRC. + + ``arg[0]``: guest physical address of 4096-byte work area buffer. + + ``arg[1]``: 0, or address of additional 4096-byte work area buffer; only + non-zero if a prior RTAS response indicated a need for additional memory. + + ``output[0]``: status: + + ``0``: completed transmittal of device tree node. + + ``1``: instruct guest to prepare for next device tree sibling node. + + ``2``: instruct guest to prepare for next device tree child node. + + ``3``: instruct guest to prepare for next device tree property. + + ``4``: instruct guest to ascend to parent device tree node. + + ``5``: instruct guest to provide additional work-area buffer via ``arg[1]``. + + ``990x``: instruct guest that operation took too long and to try again + later. + +The DRC index is encoded in the first 4-bytes of the first work area buffer. +Work area (``wa``) layout, using 4-byte offsets: + + ``wa[0]``: DRC index of the DRC to fetch device tree nodes from. + + ``wa[1]``: ``0`` (hard-coded). + + ``wa[2]``: + + For next-sibling/next-child response: + + ``wa`` offset of null-terminated string denoting the new node's name. + + For next-property response: + + ``wa`` offset of null-terminated string denoting new property's name. + + ``wa[3]``: for next-property response (unused otherwise): + + Byte-length of new property's value. + + ``wa[4]``: for next-property response (unused otherwise): + + New property's value, encoded as an OFDT-compatible byte array. + +Hot plug/unplug events +====================== + +For most DR operations, the hypervisor will issue host->guest add/remove events +using the EPOW/check-exception notification framework, where the host issues a +check-exception interrupt, then provides an RTAS event log via an +rtas-check-exception call issued by the guest in response. This framework is +documented by PAPR+ v2.7, and already use in by QEMU for generating powerdown +requests via EPOW events. + +For DR, this framework has been extended to include hotplug events, which were +previously unneeded due to direct manipulation of DR-related guest userspace +tools by host-level management such as an HMC. This level of management is not +applicable to KVM on Power, hence the reason for extending the notification +framework to support hotplug events. + +The format for these EPOW-signalled events is described below under +:ref:`hot-plug-unplug-event-structure`. Note that these events are not formally +part of the PAPR+ specification, and have been superseded by a newer format, +also described below under :ref:`hot-plug-unplug-event-structure`, and so are +now deemed a "legacy" format. The formats are similar, but the "modern" format +contains additional fields/flags, which are denoted for the purposes of this +documentation with ``#ifdef GUEST_SUPPORTS_MODERN`` guards. + +QEMU should assume support only for "legacy" fields/flags unless the guest +advertises support for the "modern" format via +``ibm,client-architecture-support`` hcall by setting byte 5, bit 6 of it's +``ibm,architecture-vec-5`` option vector structure (as described by [LoPAR]_, +section B.5.2.3). As with "legacy" format events, "modern" format events are +surfaced to the guest via check-exception RTAS calls, but use a dedicated event +source to signal the guest. This event source is advertised to the guest by the +addition of a ``hot-plug-events`` node under ``/event-sources`` node of the +guest's device tree using the standard format described in [LoPAR]_, +section B.5.12.2. + +.. _hot-plug-unplug-event-structure: + +Hot plug/unplug event structure +=============================== + +The hot plug specific payload in QEMU is implemented as follows (with all values +encoded in big-endian format): + +.. code-block:: c + + struct rtas_event_log_v6_hp { + #define SECTION_ID_HOTPLUG 0x4850 /* HP */ + struct section_header { + uint16_t section_id; /* set to SECTION_ID_HOTPLUG */ + uint16_t section_length; /* sizeof(rtas_event_log_v6_hp), + * plus the length of the DRC name + * if a DRC name identifier is + * specified for hotplug_identifier + */ + uint8_t section_version; /* version 1 */ + uint8_t section_subtype; /* unused */ + uint16_t creator_component_id; /* unused */ + } hdr; + #define RTAS_LOG_V6_HP_TYPE_CPU 1 + #define RTAS_LOG_V6_HP_TYPE_MEMORY 2 + #define RTAS_LOG_V6_HP_TYPE_SLOT 3 + #define RTAS_LOG_V6_HP_TYPE_PHB 4 + #define RTAS_LOG_V6_HP_TYPE_PCI 5 + uint8_t hotplug_type; /* type of resource/device */ + #define RTAS_LOG_V6_HP_ACTION_ADD 1 + #define RTAS_LOG_V6_HP_ACTION_REMOVE 2 + uint8_t hotplug_action; /* action (add/remove) */ + #define RTAS_LOG_V6_HP_ID_DRC_NAME 1 + #define RTAS_LOG_V6_HP_ID_DRC_INDEX 2 + #define RTAS_LOG_V6_HP_ID_DRC_COUNT 3 + #ifdef GUEST_SUPPORTS_MODERN + #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4 + #endif + uint8_t hotplug_identifier; /* type of the resource identifier, + * which serves as the discriminator + * for the 'drc' union field below + */ + #ifdef GUEST_SUPPORTS_MODERN + uint8_t capabilities; /* capability flags, currently unused + * by QEMU + */ + #else + uint8_t reserved; + #endif + union { + uint32_t index; /* DRC index of resource to take action + * on + */ + uint32_t count; /* number of DR resources to take + * action on (guest chooses which) + */ + #ifdef GUEST_SUPPORTS_MODERN + struct { + uint32_t count; /* number of DR resources to take + * action on + */ + uint32_t index; /* DRC index of first resource to take + * action on. guest will take action + * on DRC index through + * DRC index in + * sequential order + */ + } count_indexed; + #endif + char name[1]; /* string representing the name of the + * DRC to take action on + */ + } drc; + } QEMU_PACKED; + +``ibm,lrdr-capacity`` +===================== + +``ibm,lrdr-capacity`` is a property in the /rtas device tree node that +identifies the dynamic reconfiguration capabilities of the guest. It consists +of a triple consisting of ````, ```` and ````. + + ````, encoded in BE format represents the maximum address in bytes and + hence the maximum memory that can be allocated to the guest. + + ````, encoded in BE format represents the size increments in which + memory can be hot-plugged to the guest. + + ````, a BE-encoded integer, represents the maximum number of + processors that the guest can have. + +``pseries`` guests use this property to note the maximum allowed CPUs for the +guest. + +``ibm,dynamic-reconfiguration-memory`` +====================================== + +``ibm,dynamic-reconfiguration-memory`` is a device tree node that represents +dynamically reconfigurable logical memory blocks (LMB). This node is generated +only when the guest advertises the support for it via +``ibm,client-architecture-support`` call. Memory that is not dynamically +reconfigurable is represented by ``/memory`` nodes. The properties of this node +that are of interest to the sPAPR memory hotplug implementation in QEMU are +described here. + +``ibm,lmb-size`` +---------------- + +This 64-bit integer defines the size of each dynamically reconfigurable LMB. + +``ibm,associativity-lookup-arrays`` +----------------------------------- + +This property defines a lookup array in which the NUMA associativity +information for each LMB can be found. It is a property encoded array +that begins with an integer M, the number of associativity lists followed +by an integer N, the number of entries per associativity list and terminated +by M associativity lists each of length N integers. + +This property provides the same information as given by ``ibm,associativity`` +property in a ``/memory`` node. Each assigned LMB has an index value between +0 and M-1 which is used as an index into this table to select which +associativity list to use for the LMB. This index value for each LMB is defined +in ``ibm,dynamic-memory`` property. + +``ibm,dynamic-memory`` +---------------------- + +This property describes the dynamically reconfigurable memory. It is a +property encoded array that has an integer N, the number of LMBs followed +by N LMB list entries. + +Each LMB list entry consists of the following elements: + +- Logical address of the start of the LMB encoded as a 64-bit integer. This + corresponds to ``reg`` property in ``/memory`` node. +- DRC index of the LMB that corresponds to ``ibm,my-drc-index`` property + in a ``/memory`` node. +- Four bytes reserved for expansion. +- Associativity list index for the LMB that is used as an index into + ``ibm,associativity-lookup-arrays`` property described earlier. This is used + to retrieve the right associativity list to be used for this LMB. +- A 32-bit flags word. The bit at bit position ``0x00000008`` defines whether + the LMB is assigned to the partition as of boot time. + +``ibm,dynamic-memory-v2`` +------------------------- + +This property describes the dynamically reconfigurable memory. This is +an alternate and newer way to describe dynamically reconfigurable memory. +It is a property encoded array that has an integer N (the number of +LMB set entries) followed by N LMB set entries. There is an LMB set entry +for each sequential group of LMBs that share common attributes. + +Each LMB set entry consists of the following elements: + +- Number of sequential LMBs in the entry represented by a 32-bit integer. +- Logical address of the first LMB in the set encoded as a 64-bit integer. +- DRC index of the first LMB in the set. +- Associativity list index that is used as an index into + ``ibm,associativity-lookup-arrays`` property described earlier. This + is used to retrieve the right associativity list to be used for all + the LMBs in this set. +- A 32-bit flags word that applies to all the LMBs in the set. diff --git a/docs/specs/ppc-spapr-hotplug.txt b/docs/specs/ppc-spapr-hotplug.txt deleted file mode 100644 index d4fb2d46d9..0000000000 --- a/docs/specs/ppc-spapr-hotplug.txt +++ /dev/null @@ -1,409 +0,0 @@ -= sPAPR Dynamic Reconfiguration = - -sPAPR/"pseries" guests make use of a facility called dynamic-reconfiguration -to handle hotplugging of dynamic "physical" resources like PCI cards, or -"logical"/paravirtual resources like memory, CPUs, and "physical" -host-bridges, which are generally managed by the host/hypervisor and provided -to guests as virtualized resources. The specifics of dynamic-reconfiguration -are documented extensively in PAPR+ v2.7, Section 13.1. This document -provides a summary of that information as it applies to the implementation -within QEMU. - -== Dynamic-reconfiguration Connectors == - -To manage hotplug/unplug of these resources, a firmware abstraction known as -a Dynamic Resource Connector (DRC) is used to assign a particular dynamic -resource to the guest, and provide an interface for the guest to manage -configuration/removal of the resource associated with it. - -== Device-tree description of DRCs == - -A set of 4 Open Firmware device tree array properties are used to describe -the name/index/power-domain/type of each DRC allocated to a guest at -boot-time. There may be multiple sets of these arrays, rooted at different -paths in the device tree depending on the type of resource the DRCs manage. - -In some cases, the DRCs themselves may be provided by a dynamic resource, -such as the DRCs managing PCI slots on a hotplugged PHB. In this case the -arrays would be fetched as part of the device tree retrieval interfaces -for hotplugged resources described under "Guest->Host interface". - -The array properties are described below. Each entry/element in an array -describes the DRC identified by the element in the corresponding position -of ibm,drc-indexes: - -ibm,drc-names: - first 4-bytes: BE-encoded integer denoting the number of entries - each entry: a NULL-terminated string encoded as a byte array - - values for logical/virtual resources are defined in PAPR+ v2.7, - Section 13.5.2.4, and basically consist of the type of the resource - followed by a space and a numerical value that's unique across resources - of that type. - - values for "physical" resources such as PCI or VIO devices are - defined as being "location codes", which are the "location labels" of - each encapsulating device, starting from the chassis down to the - individual slot for the device, concatenated by a hyphen. This provides - a mapping of resources to a physical location in a chassis for debugging - purposes. For QEMU, this mapping is less important, so we assign a - location code that conforms to naming specifications, but is simply a - location label for the slot by itself to simplify the implementation. - The naming convention for location labels is documented in detail in - PAPR+ v2.7, Section 12.3.1.5, and in our case amounts to using "C" - for PCI/VIO device slots, where is unique across all PCI/VIO - device slots. - -ibm,drc-indexes: - first 4-bytes: BE-encoded integer denoting the number of entries - each 4-byte entry: BE-encoded integer that is unique across all DRCs - in the machine - - is arbitrary, but in the case of QEMU we try to maintain the - convention used to assign them to pSeries guests on pHyp: - - bit[31:28]: integer encoding of , where is: - 1 for CPU resource - 2 for PHB resource - 3 for VIO resource - 4 for PCI resource - 8 for Memory resource - bit[27:0]: integer encoding of , where is unique across - all resources of specified type - -ibm,drc-power-domains: - first 4-bytes: BE-encoded integer denoting the number of entries - each 4-byte entry: 32-bit, BE-encoded integer that specifies the - power domain the resource will be assigned to. In the case of QEMU - we associated all resources with a "live insertion" domain, where the - power is assumed to be managed automatically. The integer value for - this domain is a special value of -1. - - -ibm,drc-types: - first 4-bytes: BE-encoded integer denoting the number of entries - each entry: a NULL-terminated string encoded as a byte array - - is assigned as follows: - "CPU" for a CPU - "PHB" for a physical host-bridge - "SLOT" for a VIO slot - "28" for a PCI slot - "MEM" for memory resource - -== Guest->Host interface to manage dynamic resources == - -Each DRC is given a globally unique DRC Index, and resources associated with -a particular DRC are configured/managed by the guest via a number of RTAS -calls which reference individual DRCs based on the DRC index. This can be -considered the guest->host interface. - -rtas-set-power-level: - arg[0]: integer identifying power domain - arg[1]: new power level for the domain, 0-100 - output[0]: status, 0 on success - output[1]: power level after command - - Set the power level for a specified power domain - -rtas-get-power-level: - arg[0]: integer identifying power domain - output[0]: status, 0 on success - output[1]: current power level - - Get the power level for a specified power domain - -rtas-set-indicator: - arg[0]: integer identifying sensor/indicator type - arg[1]: index of sensor, for DR-related sensors this is generally the - DRC index - arg[2]: desired sensor value - output[0]: status, 0 on success - - Set the state of an indicator or sensor. For the purpose of this document we - focus on the indicator/sensor types associated with a DRC. The types are: - - 9001: isolation-state, controls/indicates whether a device has been made - accessible to a guest - - supported sensor values: - 0: isolate, device is made unaccessible by guest OS - 1: unisolate, device is made available to guest OS - - 9002: dr-indicator, controls "visual" indicator associated with device - - supported sensor values: - 0: inactive, resource may be safely removed - 1: active, resource is in use and cannot be safely removed - 2: identify, used to visually identify slot for interactive hotplug - 3: action, in most cases, used in the same manner as identify - - 9003: allocation-state, generally only used for "logical" DR resources to - request the allocation/deallocation of a resource prior to acquiring - it via isolation-state->unisolate, or after releasing it via - isolation-state->isolate, respectively. for "physical" DR (like PCI - hotplug/unplug) the pre-allocation of the resource is implied and - this sensor is unused. - - supported sensor values: - 0: unusable, tell firmware/system the resource can be - unallocated/reclaimed and added back to the system resource pool - 1: usable, request the resource be allocated/reserved for use by - guest OS - 2: exchange, used to allocate a spare resource to use for fail-over - in certain situations. unused in QEMU - 3: recover, used to reclaim a previously allocated resource that's - not currently allocated to the guest OS. unused in QEMU - -rtas-get-sensor-state: - arg[0]: integer identifying sensor/indicator type - arg[1]: index of sensor, for DR-related sensors this is generally the - DRC index - output[0]: status, 0 on success - - Used to read an indicator or sensor value. - - For DR-related operations, the only noteworthy sensor is dr-entity-sense, - which has a type value of 9003, as allocation-state does in the case of - rtas-set-indicator. The semantics/encodings of the sensor values are distinct - however: - - supported sensor values for dr-entity-sense (9003) sensor: - 0: empty, - for physical resources: DRC/slot is empty - for logical resources: unused - 1: present, - for physical resources: DRC/slot is populated with a device/resource - for logical resources: resource has been allocated to the DRC - 2: unusable, - for physical resources: unused - for logical resources: DRC has no resource allocated to it - 3: exchange, - for physical resources: unused - for logical resources: resource available for exchange (see - allocation-state sensor semantics above) - 4: recovery, - for physical resources: unused - for logical resources: resource available for recovery (see - allocation-state sensor semantics above) - -rtas-ibm-configure-connector: - arg[0]: guest physical address of 4096-byte work area buffer - arg[1]: 0, or address of additional 4096-byte work area buffer. only non-zero - if a prior RTAS response indicated a need for additional memory - output[0]: status: - 0: completed transmittal of device-tree node - 1: instruct guest to prepare for next DT sibling node - 2: instruct guest to prepare for next DT child node - 3: instruct guest to prepare for next DT property - 4: instruct guest to ascend to parent DT node - 5: instruct guest to provide additional work-area buffer - via arg[1] - 990x: instruct guest that operation took too long and to try - again later - - Used to fetch an OF device-tree description of the resource associated with - a particular DRC. The DRC index is encoded in the first 4-bytes of the first - work area buffer. - - Work area layout, using 4-byte offsets: - wa[0]: DRC index of the DRC to fetch device-tree nodes from - wa[1]: 0 (hard-coded) - wa[2]: for next-sibling/next-child response: - wa offset of null-terminated string denoting the new node's name - for next-property response: - wa offset of null-terminated string denoting new property's name - wa[3]: for next-property response (unused otherwise): - byte-length of new property's value - wa[4]: for next-property response (unused otherwise): - new property's value, encoded as an OFDT-compatible byte array - -== hotplug/unplug events == - -For most DR operations, the hypervisor will issue host->guest add/remove events -using the EPOW/check-exception notification framework, where the host issues a -check-exception interrupt, then provides an RTAS event log via an -rtas-check-exception call issued by the guest in response. This framework is -documented by PAPR+ v2.7, and already use in by QEMU for generating powerdown -requests via EPOW events. - -For DR, this framework has been extended to include hotplug events, which were -previously unneeded due to direct manipulation of DR-related guest userspace -tools by host-level management such as an HMC. This level of management is not -applicable to PowerKVM, hence the reason for extending the notification -framework to support hotplug events. - -The format for these EPOW-signalled events is described below under -"hotplug/unplug event structure". Note that these events are not -formally part of the PAPR+ specification, and have been superseded by a -newer format, also described below under "hotplug/unplug event structure", -and so are now deemed a "legacy" format. The formats are similar, but the -"modern" format contains additional fields/flags, which are denoted for the -purposes of this documentation with "#ifdef GUEST_SUPPORTS_MODERN" guards. - -QEMU should assume support only for "legacy" fields/flags unless the guest -advertises support for the "modern" format via ibm,client-architecture-support -hcall by setting byte 5, bit 6 of it's ibm,architecture-vec-5 option vector -structure (as described by LoPAPR v11, B.6.2.3). As with "legacy" format events, -"modern" format events are surfaced to the guest via check-exception RTAS calls, -but use a dedicated event source to signal the guest. This event source is -advertised to the guest by the addition of a "hot-plug-events" node under -"/event-sources" node of the guest's device tree using the standard format -described in LoPAPR v11, B.6.12.1. - -== hotplug/unplug event structure == - -The hotplug-specific payload in QEMU is implemented as follows (with all values -encoded in big-endian format): - -struct rtas_event_log_v6_hp { -#define SECTION_ID_HOTPLUG 0x4850 /* HP */ - struct section_header { - uint16_t section_id; /* set to SECTION_ID_HOTPLUG */ - uint16_t section_length; /* sizeof(rtas_event_log_v6_hp), - * plus the length of the DRC name - * if a DRC name identifier is - * specified for hotplug_identifier - */ - uint8_t section_version; /* version 1 */ - uint8_t section_subtype; /* unused */ - uint16_t creator_component_id; /* unused */ - } hdr; -#define RTAS_LOG_V6_HP_TYPE_CPU 1 -#define RTAS_LOG_V6_HP_TYPE_MEMORY 2 -#define RTAS_LOG_V6_HP_TYPE_SLOT 3 -#define RTAS_LOG_V6_HP_TYPE_PHB 4 -#define RTAS_LOG_V6_HP_TYPE_PCI 5 - uint8_t hotplug_type; /* type of resource/device */ -#define RTAS_LOG_V6_HP_ACTION_ADD 1 -#define RTAS_LOG_V6_HP_ACTION_REMOVE 2 - uint8_t hotplug_action; /* action (add/remove) */ -#define RTAS_LOG_V6_HP_ID_DRC_NAME 1 -#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2 -#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3 -#ifdef GUEST_SUPPORTS_MODERN -#define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4 -#endif - uint8_t hotplug_identifier; /* type of the resource identifier, - * which serves as the discriminator - * for the 'drc' union field below - */ -#ifdef GUEST_SUPPORTS_MODERN - uint8_t capabilities; /* capability flags, currently unused - * by QEMU - */ -#else - uint8_t reserved; -#endif - union { - uint32_t index; /* DRC index of resource to take action - * on - */ - uint32_t count; /* number of DR resources to take - * action on (guest chooses which) - */ -#ifdef GUEST_SUPPORTS_MODERN - struct { - uint32_t count; /* number of DR resources to take - * action on - */ - uint32_t index; /* DRC index of first resource to take - * action on. guest will take action - * on DRC index through - * DRC index in - * sequential order - */ - } count_indexed; -#endif - char name[1]; /* string representing the name of the - * DRC to take action on - */ - } drc; -} QEMU_PACKED; - -== ibm,lrdr-capacity == - -ibm,lrdr-capacity is a property in the /rtas device tree node that identifies -the dynamic reconfiguration capabilities of the guest. It consists of a triple -consisting of , and . - - , encoded in BE format represents the maximum address in bytes and - hence the maximum memory that can be allocated to the guest. - - , encoded in BE format represents the size increments in which - memory can be hot-plugged to the guest. - - , a BE-encoded integer, represents the maximum number of - processors that the guest can have. - -pseries guests use this property to note the maximum allowed CPUs for the -guest. - -== ibm,dynamic-reconfiguration-memory == - -ibm,dynamic-reconfiguration-memory is a device tree node that represents -dynamically reconfigurable logical memory blocks (LMB). This node -is generated only when the guest advertises the support for it via -ibm,client-architecture-support call. Memory that is not dynamically -reconfigurable is represented by /memory nodes. The properties of this -node that are of interest to the sPAPR memory hotplug implementation -in QEMU are described here. - -ibm,lmb-size - -This 64bit integer defines the size of each dynamically reconfigurable LMB. - -ibm,associativity-lookup-arrays - -This property defines a lookup array in which the NUMA associativity -information for each LMB can be found. It is a property encoded array -that begins with an integer M, the number of associativity lists followed -by an integer N, the number of entries per associativity list and terminated -by M associativity lists each of length N integers. - -This property provides the same information as given by ibm,associativity -property in a /memory node. Each assigned LMB has an index value between -0 and M-1 which is used as an index into this table to select which -associativity list to use for the LMB. This index value for each LMB -is defined in ibm,dynamic-memory property. - -ibm,dynamic-memory - -This property describes the dynamically reconfigurable memory. It is a -property encoded array that has an integer N, the number of LMBs followed -by N LMB list entries. - -Each LMB list entry consists of the following elements: - -- Logical address of the start of the LMB encoded as a 64bit integer. This - corresponds to reg property in /memory node. -- DRC index of the LMB that corresponds to ibm,my-drc-index property - in a /memory node. -- Four bytes reserved for expansion. -- Associativity list index for the LMB that is used as an index into - ibm,associativity-lookup-arrays property described earlier. This - is used to retrieve the right associativity list to be used for this - LMB. -- A 32bit flags word. The bit at bit position 0x00000008 defines whether - the LMB is assigned to the partition as of boot time. - -ibm,dynamic-memory-v2 - -This property describes the dynamically reconfigurable memory. This is -an alternate and newer way to describe dynamically reconfigurable memory. -It is a property encoded array that has an integer N (the number of -LMB set entries) followed by N LMB set entries. There is an LMB set entry -for each sequential group of LMBs that share common attributes. - -Each LMB set entry consists of the following elements: - -- Number of sequential LMBs in the entry represented by a 32bit integer. -- Logical address of the first LMB in the set encoded as a 64bit integer. -- DRC index of the first LMB in the set. -- Associativity list index that is used as an index into - ibm,associativity-lookup-arrays property described earlier. This - is used to retrieve the right associativity list to be used for all - the LMBs in this set. -- A 32bit flags word that applies to all the LMBs in the set. - -[1] http://thread.gmane.org/gmane.linux.ports.ppc.embedded/75350/focus=106867 diff --git a/docs/specs/ppc-spapr-uv-hcalls.rst b/docs/specs/ppc-spapr-uv-hcalls.rst new file mode 100644 index 0000000000..a00288deb3 --- /dev/null +++ b/docs/specs/ppc-spapr-uv-hcalls.rst @@ -0,0 +1,89 @@ +=================================== +Hypervisor calls and the Ultravisor +=================================== + +On PPC64 systems supporting Protected Execution Facility (PEF), system memory +can be placed in a secured region where only an ultravisor running in firmware +can provide access to. pSeries guests on such systems can communicate with +the ultravisor (via ultracalls) to switch to a secure virtual machine (SVM) mode +where the guest's memory is relocated to this secured region, making its memory +inaccessible to normal processes/guests running on the host. + +The various ultracalls/hypercalls relating to SVM mode are currently only +documented internally, but are planned for direct inclusion into the Linux on +Power Architecture Reference document ([LoPAR]_). An internal ACR has been filed +to reserve a hypercall number range specific to this use case to avoid any +future conflicts with the IBM internally maintained Power Architecture Platform +Reference (PAPR+) documentation specification. This document summarizes some of +these details as they relate to QEMU. + +Hypercalls needed by the ultravisor +=================================== + +Switching to SVM mode involves a number of hcalls issued by the ultravisor to +the hypervisor to orchestrate the movement of guest memory to secure memory and +various other aspects of the SVM mode. Numbers are assigned for these hcalls +within the reserved range ``0xEF00-0xEF80``. The below documents the hcalls +relevant to QEMU. + +``H_TPM_COMM`` (``0xef10``) +--------------------------- + +SVM file systems are encrypted using a symmetric key. This key is then +wrapped/encrypted using the public key of a trusted system which has the private +key stored in the system's TPM. An Ultravisor will use this hcall to +unwrap/unseal the symmetric key using the system's TPM device or a TPM Resource +Manager associated with the device. + +The Ultravisor sets up a separate session key with the TPM in advance during +host system boot. All sensitive in and out values will be encrypted using the +session key. Though the hypervisor will see the in and out buffers in raw form, +any sensitive contents will generally be encrypted using this session key. + +Arguments: + + ``r3``: ``H_TPM_COMM`` (``0xef10``) + + ``r4``: ``TPM`` operation, one of: + + ``TPM_COMM_OP_EXECUTE`` (``0x1``): send a request to a TPM and receive a + response, opening a new TPM session if one has not already been opened. + + ``TPM_COMM_OP_CLOSE_SESSION`` (``0x2``): close the existing TPM session, if + any. + + ``r5``: ``in_buffer``, guest physical address of buffer containing the + request. Caller may use the same address for both request and response. + + ``r6``: ``in_size``, size of the in buffer. Must be less than or equal to + 4 KB. + + ``r7``: ``out_buffer``, guest physical address of buffer to store the + response. Caller may use the same address for both request and response. + + ``r8``: ``out_size``, size of the out buffer. Must be at least 4 KB, as this + is the maximum request/response size supported by most TPM implementations, + including the TPM Resource Manager in the linux kernel. + +Return values: + + ``r3``: one of the following values: + + ``H_Success``: request processed successfully. + + ``H_PARAMETER``: invalid TPM operation. + + ``H_P2``: ``in_buffer`` is invalid. + + ``H_P3``: ``in_size`` is invalid. + + ``H_P4``: ``out_buffer`` is invalid. + + ``H_P5``: ``out_size`` is invalid. + + ``H_RESOURCE``: problem communicating with TPM. + + ``H_FUNCTION``: TPM access is not currently allowed/configured. + + ``r4``: For ``TPM_COMM_OP_EXECUTE``, the size of the response will be stored + here upon success. diff --git a/docs/specs/ppc-spapr-uv-hcalls.txt b/docs/specs/ppc-spapr-uv-hcalls.txt deleted file mode 100644 index 389c2740d7..0000000000 --- a/docs/specs/ppc-spapr-uv-hcalls.txt +++ /dev/null @@ -1,76 +0,0 @@ -On PPC64 systems supporting Protected Execution Facility (PEF), system -memory can be placed in a secured region where only an "ultravisor" -running in firmware can provide to access it. pseries guests on such -systems can communicate with the ultravisor (via ultracalls) to switch to a -secure VM mode (SVM) where the guest's memory is relocated to this secured -region, making its memory inaccessible to normal processes/guests running on -the host. - -The various ultracalls/hypercalls relating to SVM mode are currently -only documented internally, but are planned for direct inclusion into the -public OpenPOWER version of the PAPR specification (LoPAPR/LoPAR). An internal -ACR has been filed to reserve a hypercall number range specific to this -use-case to avoid any future conflicts with the internally-maintained PAPR -specification. This document summarizes some of these details as they relate -to QEMU. - -== hypercalls needed by the ultravisor == - -Switching to SVM mode involves a number of hcalls issued by the ultravisor -to the hypervisor to orchestrate the movement of guest memory to secure -memory and various other aspects SVM mode. Numbers are assigned for these -hcalls within the reserved range 0xEF00-0xEF80. The below documents the -hcalls relevant to QEMU. - -- H_TPM_COMM (0xef10) - - For TPM_COMM_OP_EXECUTE operation: - Send a request to a TPM and receive a response, opening a new TPM session - if one has not already been opened. - - For TPM_COMM_OP_CLOSE_SESSION operation: - Close the existing TPM session, if any. - - Arguments: - - r3 : H_TPM_COMM (0xef10) - r4 : TPM operation, one of: - TPM_COMM_OP_EXECUTE (0x1) - TPM_COMM_OP_CLOSE_SESSION (0x2) - r5 : in_buffer, guest physical address of buffer containing the request - - Caller may use the same address for both request and response - r6 : in_size, size of the in buffer - - Must be less than or equal to 4KB - r7 : out_buffer, guest physical address of buffer to store the response - - Caller may use the same address for both request and response - r8 : out_size, size of the out buffer - - Must be at least 4KB, as this is the maximum request/response size - supported by most TPM implementations, including the TPM Resource - Manager in the linux kernel. - - Return values: - - r3 : H_Success request processed successfully - H_PARAMETER invalid TPM operation - H_P2 in_buffer is invalid - H_P3 in_size is invalid - H_P4 out_buffer is invalid - H_P5 out_size is invalid - H_RESOURCE problem communicating with TPM - H_FUNCTION TPM access is not currently allowed/configured - r4 : For TPM_COMM_OP_EXECUTE, the size of the response will be stored here - upon success. - - Use-case/notes: - - SVM filesystems are encrypted using a symmetric key. This key is then - wrapped/encrypted using the public key of a trusted system which has the - private key stored in the system's TPM. An Ultravisor will use this - hcall to unwrap/unseal the symmetric key using the system's TPM device - or a TPM Resource Manager associated with the device. - - The Ultravisor sets up a separate session key with the TPM in advance - during host system boot. All sensitive in and out values will be - encrypted using the session key. Though the hypervisor will see the 'in' - and 'out' buffers in raw form, any sensitive contents will generally be - encrypted using this session key. diff --git a/docs/specs/sev-guest-firmware.rst b/docs/specs/sev-guest-firmware.rst new file mode 100644 index 0000000000..3f7f082df5 --- /dev/null +++ b/docs/specs/sev-guest-firmware.rst @@ -0,0 +1,125 @@ +==================================================== +QEMU/Guest Firmware Interface for AMD SEV and SEV-ES +==================================================== + +Overview +======== + +The guest firmware image (OVMF) may contain some configuration entries +which are used by QEMU before the guest launches. These are listed in a +GUIDed table at a known location in the firmware image. QEMU parses +this table when it loads the firmware image into memory, and then QEMU +reads individual entries when their values are needed. + +Though nothing in the table structure is SEV-specific, currently all the +entries in the table are related to SEV and SEV-ES features. + + +Table parsing in QEMU +--------------------- + +The table is parsed from the footer: first the presence of the table +footer GUID (96b582de-1fb2-45f7-baea-a366c55a082d) at 0xffffffd0 is +verified. If that is found, two bytes at 0xffffffce are the entire +table length. + +Then the table is scanned backwards looking for the specific entry GUID. + +QEMU files related to parsing and scanning the OVMF table: + - ``hw/i386/pc_sysfw_ovmf.c`` + +The edk2 firmware code that constructs this structure is in the +`OVMF Reset Vector file`_. + + +Table memory layout +------------------- + ++------------+--------+-----------------------------------------+ +| GPA | Length | Description | ++============+========+=========================================+ +| 0xffffff80 | 4 | Zero padding | ++------------+--------+-----------------------------------------+ +| 0xffffff84 | 4 | SEV hashes table base address | ++------------+--------+-----------------------------------------+ +| 0xffffff88 | 4 | SEV hashes table size (=0x400) | ++------------+--------+-----------------------------------------+ +| 0xffffff8c | 2 | SEV hashes table entry length (=0x1a) | ++------------+--------+-----------------------------------------+ +| 0xffffff8e | 16 | SEV hashes table GUID: | +| | | 7255371f-3a3b-4b04-927b-1da6efa8d454 | ++------------+--------+-----------------------------------------+ +| 0xffffff9e | 4 | SEV secret block base address | ++------------+--------+-----------------------------------------+ +| 0xffffffa2 | 4 | SEV secret block size (=0xc00) | ++------------+--------+-----------------------------------------+ +| 0xffffffa6 | 2 | SEV secret block entry length (=0x1a) | ++------------+--------+-----------------------------------------+ +| 0xffffffa8 | 16 | SEV secret block GUID: | +| | | 4c2eb361-7d9b-4cc3-8081-127c90d3d294 | ++------------+--------+-----------------------------------------+ +| 0xffffffb8 | 4 | SEV-ES AP reset RIP | ++------------+--------+-----------------------------------------+ +| 0xffffffbc | 2 | SEV-ES reset block entry length (=0x16) | ++------------+--------+-----------------------------------------+ +| 0xffffffbe | 16 | SEV-ES reset block entry GUID: | +| | | 00f771de-1a7e-4fcb-890e-68c77e2fb44e | ++------------+--------+-----------------------------------------+ +| 0xffffffce | 2 | Length of entire table including table | +| | | footer GUID and length (=0x72) | ++------------+--------+-----------------------------------------+ +| 0xffffffd0 | 16 | OVMF GUIDed table footer GUID: | +| | | 96b582de-1fb2-45f7-baea-a366c55a082d | ++------------+--------+-----------------------------------------+ +| 0xffffffe0 | 8 | Application processor entry point code | ++------------+--------+-----------------------------------------+ +| 0xffffffe8 | 8 | "\0\0\0\0VTF\0" | ++------------+--------+-----------------------------------------+ +| 0xfffffff0 | 16 | Reset vector code | ++------------+--------+-----------------------------------------+ + + +Table entries description +========================= + +SEV-ES reset block +------------------ + +Entry GUID: 00f771de-1a7e-4fcb-890e-68c77e2fb44e + +For the initial boot of an AP under SEV-ES, the "reset" RIP must be +programmed to the RAM area defined by this entry. The entry's format +is: + +* IP value [0:15] +* CS segment base [31:16] + +A hypervisor reads the CS segment base and IP value. The CS segment +base value represents the high order 16-bits of the CS segment base, so +the hypervisor must left shift the value of the CS segment base by 16 +bits to form the full CS segment base for the CS segment register. It +would then program the EIP register with the IP value as read. + + +SEV secret block +---------------- + +Entry GUID: 4c2eb361-7d9b-4cc3-8081-127c90d3d294 + +This describes the guest RAM area where the hypervisor should inject the +Guest Owner secret (using SEV_LAUNCH_SECRET). + + +SEV hashes table +---------------- + +Entry GUID: 7255371f-3a3b-4b04-927b-1da6efa8d454 + +This describes the guest RAM area where the hypervisor should install a +table describing the hashes of certain firmware configuration device +files that would otherwise be passed in unchecked. The current use is +for the kernel, initrd and command line values, but others may be added. + + +.. _OVMF Reset Vector file: + https://github.com/tianocore/edk2/blob/master/OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm diff --git a/docs/specs/tpm.rst b/docs/specs/tpm.rst index 3be190343a..535912a92b 100644 --- a/docs/specs/tpm.rst +++ b/docs/specs/tpm.rst @@ -250,24 +250,25 @@ hardware TPM ``/dev/tpm0``: The following commands should result in similar output inside the VM with a Linux kernel that either has the TPM TIS driver built-in or -available as a module: +available as a module (assuming a TPM 2 is passed through): .. code-block:: console # dmesg | grep -i tpm - [ 0.711310] tpm_tis 00:06: 1.2 TPM (device=id 0x1, rev-id 1) - - # dmesg | grep TCPA - [ 0.000000] ACPI: TCPA 0x0000000003FFD191C 000032 (v02 BOCHS \ - BXPCTCPA 0000001 BXPC 00000001) + [ 0.012560] ACPI: TPM2 0x000000000BFFD1900 00004C (v04 BOCHS \ + BXPC 0000001 BXPC 00000001) # ls -l /dev/tpm* - crw-------. 1 root root 10, 224 Jul 11 10:11 /dev/tpm0 + crw-rw----. 1 tss root 10, 224 Sep 6 12:36 /dev/tpm0 + crw-rw----. 1 tss rss 253, 65536 Sep 6 12:36 /dev/tpmrm0 - # find /sys/devices/ | grep pcrs$ | xargs cat - PCR-00: 35 4E 3B CE 23 9F 38 59 ... + Starting with Linux 5.12 there are PCR entries for TPM 2 in sysfs: + # find /sys/devices/ -type f | grep pcr-sha + ... + /sys/devices/LNXSYSTEM:00/LNXSYBUS:00/MSFT0101:00/tpm/tpm0/pcr-sha256/1 + ... + /sys/devices/LNXSYSTEM:00/LNXSYBUS:00/MSFT0101:00/tpm/tpm0/pcr-sha256/9 ... - PCR-23: 00 00 00 00 00 00 00 00 ... The QEMU TPM emulator device ---------------------------- @@ -304,6 +305,7 @@ a socket interface. They do not need to be run as root. mkdir /tmp/mytpm1 swtpm socket --tpmstate dir=/tmp/mytpm1 \ --ctrl type=unixio,path=/tmp/mytpm1/swtpm-sock \ + --tpm2 \ --log level=20 Command line to start QEMU with the TPM emulator device communicating @@ -365,19 +367,20 @@ available as a module: .. code-block:: console # dmesg | grep -i tpm - [ 0.711310] tpm_tis 00:06: 1.2 TPM (device=id 0x1, rev-id 1) - - # dmesg | grep TCPA - [ 0.000000] ACPI: TCPA 0x0000000003FFD191C 000032 (v02 BOCHS \ - BXPCTCPA 0000001 BXPC 00000001) + [ 0.012560] ACPI: TPM2 0x000000000BFFD1900 00004C (v04 BOCHS \ + BXPC 0000001 BXPC 00000001) # ls -l /dev/tpm* - crw-------. 1 root root 10, 224 Jul 11 10:11 /dev/tpm0 + crw-rw----. 1 tss root 10, 224 Sep 6 12:36 /dev/tpm0 + crw-rw----. 1 tss rss 253, 65536 Sep 6 12:36 /dev/tpmrm0 - # find /sys/devices/ | grep pcrs$ | xargs cat - PCR-00: 35 4E 3B CE 23 9F 38 59 ... + Starting with Linux 5.12 there are PCR entries for TPM 2 in sysfs: + # find /sys/devices/ -type f | grep pcr-sha + ... + /sys/devices/LNXSYSTEM:00/LNXSYBUS:00/MSFT0101:00/tpm/tpm0/pcr-sha256/1 + ... + /sys/devices/LNXSYSTEM:00/LNXSYBUS:00/MSFT0101:00/tpm/tpm0/pcr-sha256/9 ... - PCR-23: 00 00 00 00 00 00 00 00 ... Migration with the TPM emulator =============================== @@ -398,7 +401,8 @@ In a 1st terminal start an instance of a swtpm using the following command: mkdir /tmp/mytpm1 swtpm socket --tpmstate dir=/tmp/mytpm1 \ --ctrl type=unixio,path=/tmp/mytpm1/swtpm-sock \ - --log level=20 --tpm2 + --tpm2 \ + --log level=20 In a 2nd terminal start the VM: diff --git a/docs/specs/vmgenid.txt b/docs/specs/vmgenid.txt index aa9f518676..80ff69f31c 100644 --- a/docs/specs/vmgenid.txt +++ b/docs/specs/vmgenid.txt @@ -153,7 +153,7 @@ change the contents of the memory at runtime, specifically when starting a backed-up or snapshotted image. In order to do this, QEMU must know the address that has been allocated. -The mechanism chosen for this memory sharing is writeable fw_cfg blobs. +The mechanism chosen for this memory sharing is writable fw_cfg blobs. These are data object that are visible to both QEMU and guests, and are addressable as sequential files. @@ -164,7 +164,7 @@ Two fw_cfg blobs are used in this case: /etc/vmgenid_guid - contains the actual VM Generation ID GUID - read-only to the guest /etc/vmgenid_addr - contains the address of the downloaded vmgenid blob - - writeable by the guest + - writable by the guest QEMU sends the following commands to the guest at startup: diff --git a/docs/sphinx-static/custom.js b/docs/sphinx-static/custom.js new file mode 100644 index 0000000000..71a8605305 --- /dev/null +++ b/docs/sphinx-static/custom.js @@ -0,0 +1,9 @@ +document.addEventListener('keydown', (event) => { + // find a better way to look it up? + let search_input = document.getElementsByName('q')[0]; + + if (event.code === 'KeyS' && document.activeElement !== search_input) { + event.preventDefault(); + search_input.focus(); + } +}); diff --git a/docs/sphinx/dbusdoc.py b/docs/sphinx/dbusdoc.py new file mode 100644 index 0000000000..be284ed08f --- /dev/null +++ b/docs/sphinx/dbusdoc.py @@ -0,0 +1,166 @@ +# D-Bus XML documentation extension +# +# Copyright (C) 2021, Red Hat Inc. +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Author: Marc-André Lureau +"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML.""" + +import os +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +import sphinx +from docutils import nodes +from docutils.nodes import Element, Node +from docutils.parsers.rst import Directive, directives +from docutils.parsers.rst.states import RSTState +from docutils.statemachine import StringList, ViewList +from sphinx.application import Sphinx +from sphinx.errors import ExtensionError +from sphinx.util import logging +from sphinx.util.docstrings import prepare_docstring +from sphinx.util.docutils import SphinxDirective, switch_source_input +from sphinx.util.nodes import nested_parse_with_titles + +import dbusdomain +from dbusparser import parse_dbus_xml + +logger = logging.getLogger(__name__) + +__version__ = "1.0" + + +class DBusDoc: + def __init__(self, sphinx_directive, dbusfile): + self._cur_doc = None + self._sphinx_directive = sphinx_directive + self._dbusfile = dbusfile + self._top_node = nodes.section() + self.result = StringList() + self.indent = "" + + def add_line(self, line: str, *lineno: int) -> None: + """Append one line of generated reST to the output.""" + if line.strip(): # not a blank line + self.result.append(self.indent + line, self._dbusfile, *lineno) + else: + self.result.append("", self._dbusfile, *lineno) + + def add_method(self, method): + self.add_line(f".. dbus:method:: {method.name}") + self.add_line("") + self.indent += " " + for arg in method.in_args: + self.add_line(f":arg {arg.signature} {arg.name}: {arg.doc_string}") + for arg in method.out_args: + self.add_line(f":ret {arg.signature} {arg.name}: {arg.doc_string}") + self.add_line("") + for line in prepare_docstring("\n" + method.doc_string): + self.add_line(line) + self.indent = self.indent[:-3] + + def add_signal(self, signal): + self.add_line(f".. dbus:signal:: {signal.name}") + self.add_line("") + self.indent += " " + for arg in signal.args: + self.add_line(f":arg {arg.signature} {arg.name}: {arg.doc_string}") + self.add_line("") + for line in prepare_docstring("\n" + signal.doc_string): + self.add_line(line) + self.indent = self.indent[:-3] + + def add_property(self, prop): + self.add_line(f".. dbus:property:: {prop.name}") + self.indent += " " + self.add_line(f":type: {prop.signature}") + access = {"read": "readonly", "write": "writeonly", "readwrite": "readwrite"}[ + prop.access + ] + self.add_line(f":{access}:") + if prop.emits_changed_signal: + self.add_line(f":emits-changed: yes") + self.add_line("") + for line in prepare_docstring("\n" + prop.doc_string): + self.add_line(line) + self.indent = self.indent[:-3] + + def add_interface(self, iface): + self.add_line(f".. dbus:interface:: {iface.name}") + self.add_line("") + self.indent += " " + for line in prepare_docstring("\n" + iface.doc_string): + self.add_line(line) + for method in iface.methods: + self.add_method(method) + for sig in iface.signals: + self.add_signal(sig) + for prop in iface.properties: + self.add_property(prop) + self.indent = self.indent[:-3] + + +def parse_generated_content(state: RSTState, content: StringList) -> List[Node]: + """Parse a generated content by Documenter.""" + with switch_source_input(state, content): + node = nodes.paragraph() + node.document = state.document + state.nested_parse(content, 0, node) + + return node.children + + +class DBusDocDirective(SphinxDirective): + """Extract documentation from the specified D-Bus XML file""" + + has_content = True + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = True + + def run(self): + reporter = self.state.document.reporter + + try: + source, lineno = reporter.get_source_and_line(self.lineno) # type: ignore + except AttributeError: + source, lineno = (None, None) + + logger.debug("[dbusdoc] %s:%s: input:\n%s", source, lineno, self.block_text) + + env = self.state.document.settings.env + dbusfile = env.config.qapidoc_srctree + "/" + self.arguments[0] + with open(dbusfile, "rb") as f: + xml_data = f.read() + xml = parse_dbus_xml(xml_data) + doc = DBusDoc(self, dbusfile) + for iface in xml: + doc.add_interface(iface) + + result = parse_generated_content(self.state, doc.result) + return result + + +def setup(app: Sphinx) -> Dict[str, Any]: + """Register dbus-doc directive with Sphinx""" + app.add_config_value("dbusdoc_srctree", None, "env") + app.add_directive("dbus-doc", DBusDocDirective) + dbusdomain.setup(app) + + return dict(version=__version__, parallel_read_safe=True, parallel_write_safe=True) diff --git a/docs/sphinx/dbusdomain.py b/docs/sphinx/dbusdomain.py new file mode 100644 index 0000000000..2ea95af623 --- /dev/null +++ b/docs/sphinx/dbusdomain.py @@ -0,0 +1,406 @@ +# D-Bus sphinx domain extension +# +# Copyright (C) 2021, Red Hat Inc. +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Author: Marc-André Lureau + +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + Optional, + Tuple, + cast, +) + +from docutils import nodes +from docutils.nodes import Element, Node +from docutils.parsers.rst import directives +from sphinx import addnodes +from sphinx.addnodes import desc_signature, pending_xref +from sphinx.directives import ObjectDescription +from sphinx.domains import Domain, Index, IndexEntry, ObjType +from sphinx.locale import _ +from sphinx.roles import XRefRole +from sphinx.util import nodes as node_utils +from sphinx.util.docfields import Field, TypedField +from sphinx.util.typing import OptionSpec + + +class DBusDescription(ObjectDescription[str]): + """Base class for DBus objects""" + + option_spec: OptionSpec = ObjectDescription.option_spec.copy() + option_spec.update( + { + "deprecated": directives.flag, + } + ) + + def get_index_text(self, modname: str, name: str) -> str: + """Return the text for the index entry of the object.""" + raise NotImplementedError("must be implemented in subclasses") + + def add_target_and_index( + self, name: str, sig: str, signode: desc_signature + ) -> None: + ifacename = self.env.ref_context.get("dbus:interface") + node_id = name + if ifacename: + node_id = f"{ifacename}.{node_id}" + + signode["names"].append(name) + signode["ids"].append(node_id) + + if "noindexentry" not in self.options: + indextext = self.get_index_text(ifacename, name) + if indextext: + self.indexnode["entries"].append( + ("single", indextext, node_id, "", None) + ) + + domain = cast(DBusDomain, self.env.get_domain("dbus")) + domain.note_object(name, self.objtype, node_id, location=signode) + + +class DBusInterface(DBusDescription): + """ + Implementation of ``dbus:interface``. + """ + + def get_index_text(self, ifacename: str, name: str) -> str: + return ifacename + + def before_content(self) -> None: + self.env.ref_context["dbus:interface"] = self.arguments[0] + + def after_content(self) -> None: + self.env.ref_context.pop("dbus:interface") + + def handle_signature(self, sig: str, signode: desc_signature) -> str: + signode += addnodes.desc_annotation("interface ", "interface ") + signode += addnodes.desc_name(sig, sig) + return sig + + def run(self) -> List[Node]: + _, node = super().run() + name = self.arguments[0] + section = nodes.section(ids=[name + "-section"]) + section += nodes.title(name, "%s interface" % name) + section += node + return [self.indexnode, section] + + +class DBusMember(DBusDescription): + + signal = False + + +class DBusMethod(DBusMember): + """ + Implementation of ``dbus:method``. + """ + + option_spec: OptionSpec = DBusMember.option_spec.copy() + option_spec.update( + { + "noreply": directives.flag, + } + ) + + doc_field_types: List[Field] = [ + TypedField( + "arg", + label=_("Arguments"), + names=("arg",), + rolename="arg", + typerolename=None, + typenames=("argtype", "type"), + ), + TypedField( + "ret", + label=_("Returns"), + names=("ret",), + rolename="ret", + typerolename=None, + typenames=("rettype", "type"), + ), + ] + + def get_index_text(self, ifacename: str, name: str) -> str: + return _("%s() (%s method)") % (name, ifacename) + + def handle_signature(self, sig: str, signode: desc_signature) -> str: + params = addnodes.desc_parameterlist() + returns = addnodes.desc_parameterlist() + + contentnode = addnodes.desc_content() + self.state.nested_parse(self.content, self.content_offset, contentnode) + for child in contentnode: + if isinstance(child, nodes.field_list): + for field in child: + ty, sg, name = field[0].astext().split(None, 2) + param = addnodes.desc_parameter() + param += addnodes.desc_sig_keyword_type(sg, sg) + param += addnodes.desc_sig_space() + param += addnodes.desc_sig_name(name, name) + if ty == "arg": + params += param + elif ty == "ret": + returns += param + + anno = "signal " if self.signal else "method " + signode += addnodes.desc_annotation(anno, anno) + signode += addnodes.desc_name(sig, sig) + signode += params + if not self.signal and "noreply" not in self.options: + ret = addnodes.desc_returns() + ret += returns + signode += ret + + return sig + + +class DBusSignal(DBusMethod): + """ + Implementation of ``dbus:signal``. + """ + + doc_field_types: List[Field] = [ + TypedField( + "arg", + label=_("Arguments"), + names=("arg",), + rolename="arg", + typerolename=None, + typenames=("argtype", "type"), + ), + ] + signal = True + + def get_index_text(self, ifacename: str, name: str) -> str: + return _("%s() (%s signal)") % (name, ifacename) + + +class DBusProperty(DBusMember): + """ + Implementation of ``dbus:property``. + """ + + option_spec: OptionSpec = DBusMember.option_spec.copy() + option_spec.update( + { + "type": directives.unchanged, + "readonly": directives.flag, + "writeonly": directives.flag, + "readwrite": directives.flag, + "emits-changed": directives.unchanged, + } + ) + + doc_field_types: List[Field] = [] + + def get_index_text(self, ifacename: str, name: str) -> str: + return _("%s (%s property)") % (name, ifacename) + + def transform_content(self, contentnode: addnodes.desc_content) -> None: + fieldlist = nodes.field_list() + access = None + if "readonly" in self.options: + access = _("read-only") + if "writeonly" in self.options: + access = _("write-only") + if "readwrite" in self.options: + access = _("read & write") + if access: + content = nodes.Text(access) + fieldname = nodes.field_name("", _("Access")) + fieldbody = nodes.field_body("", nodes.paragraph("", "", content)) + field = nodes.field("", fieldname, fieldbody) + fieldlist += field + emits = self.options.get("emits-changed", None) + if emits: + content = nodes.Text(emits) + fieldname = nodes.field_name("", _("Emits Changed")) + fieldbody = nodes.field_body("", nodes.paragraph("", "", content)) + field = nodes.field("", fieldname, fieldbody) + fieldlist += field + if len(fieldlist) > 0: + contentnode.insert(0, fieldlist) + + def handle_signature(self, sig: str, signode: desc_signature) -> str: + contentnode = addnodes.desc_content() + self.state.nested_parse(self.content, self.content_offset, contentnode) + ty = self.options.get("type") + + signode += addnodes.desc_annotation("property ", "property ") + signode += addnodes.desc_name(sig, sig) + signode += addnodes.desc_sig_punctuation("", ":") + signode += addnodes.desc_sig_keyword_type(ty, ty) + return sig + + def run(self) -> List[Node]: + self.name = "dbus:member" + return super().run() + + +class DBusXRef(XRefRole): + def process_link(self, env, refnode, has_explicit_title, title, target): + refnode["dbus:interface"] = env.ref_context.get("dbus:interface") + if not has_explicit_title: + title = title.lstrip(".") # only has a meaning for the target + target = target.lstrip("~") # only has a meaning for the title + # if the first character is a tilde, don't display the module/class + # parts of the contents + if title[0:1] == "~": + title = title[1:] + dot = title.rfind(".") + if dot != -1: + title = title[dot + 1 :] + # if the first character is a dot, search more specific namespaces first + # else search builtins first + if target[0:1] == ".": + target = target[1:] + refnode["refspecific"] = True + return title, target + + +class DBusIndex(Index): + """ + Index subclass to provide a D-Bus interfaces index. + """ + + name = "dbusindex" + localname = _("D-Bus Interfaces Index") + shortname = _("dbus") + + def generate( + self, docnames: Iterable[str] = None + ) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]: + content: Dict[str, List[IndexEntry]] = {} + # list of prefixes to ignore + ignores: List[str] = self.domain.env.config["dbus_index_common_prefix"] + ignores = sorted(ignores, key=len, reverse=True) + + ifaces = sorted( + [ + x + for x in self.domain.data["objects"].items() + if x[1].objtype == "interface" + ], + key=lambda x: x[0].lower(), + ) + for name, (docname, node_id, _) in ifaces: + if docnames and docname not in docnames: + continue + + for ignore in ignores: + if name.startswith(ignore): + name = name[len(ignore) :] + stripped = ignore + break + else: + stripped = "" + + entries = content.setdefault(name[0].lower(), []) + entries.append(IndexEntry(stripped + name, 0, docname, node_id, "", "", "")) + + # sort by first letter + sorted_content = sorted(content.items()) + + return sorted_content, False + + +class ObjectEntry(NamedTuple): + docname: str + node_id: str + objtype: str + + +class DBusDomain(Domain): + """ + Implementation of the D-Bus domain. + """ + + name = "dbus" + label = "D-Bus" + object_types: Dict[str, ObjType] = { + "interface": ObjType(_("interface"), "iface", "obj"), + "method": ObjType(_("method"), "meth", "obj"), + "signal": ObjType(_("signal"), "sig", "obj"), + "property": ObjType(_("property"), "attr", "_prop", "obj"), + } + directives = { + "interface": DBusInterface, + "method": DBusMethod, + "signal": DBusSignal, + "property": DBusProperty, + } + roles = { + "iface": DBusXRef(), + "meth": DBusXRef(), + "sig": DBusXRef(), + "prop": DBusXRef(), + } + initial_data: Dict[str, Dict[str, Tuple[Any]]] = { + "objects": {}, # fullname -> ObjectEntry + } + indices = [ + DBusIndex, + ] + + @property + def objects(self) -> Dict[str, ObjectEntry]: + return self.data.setdefault("objects", {}) # fullname -> ObjectEntry + + def note_object( + self, name: str, objtype: str, node_id: str, location: Any = None + ) -> None: + self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype) + + def clear_doc(self, docname: str) -> None: + for fullname, obj in list(self.objects.items()): + if obj.docname == docname: + del self.objects[fullname] + + def find_obj(self, typ: str, name: str) -> Optional[Tuple[str, ObjectEntry]]: + # skip parens + if name[-2:] == "()": + name = name[:-2] + if typ in ("meth", "sig", "prop"): + try: + ifacename, name = name.rsplit(".", 1) + except ValueError: + pass + return self.objects.get(name) + + def resolve_xref( + self, + env: "BuildEnvironment", + fromdocname: str, + builder: "Builder", + typ: str, + target: str, + node: pending_xref, + contnode: Element, + ) -> Optional[Element]: + """Resolve the pending_xref *node* with the given *typ* and *target*.""" + objdef = self.find_obj(typ, target) + if objdef: + return node_utils.make_refnode( + builder, fromdocname, objdef.docname, objdef.node_id, contnode + ) + + def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]: + for refname, obj in self.objects.items(): + yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1) + + +def setup(app): + app.add_domain(DBusDomain) + app.add_config_value("dbus_index_common_prefix", [], "env") diff --git a/docs/sphinx/dbusparser.py b/docs/sphinx/dbusparser.py new file mode 100644 index 0000000000..024553eae7 --- /dev/null +++ b/docs/sphinx/dbusparser.py @@ -0,0 +1,373 @@ +# Based from "GDBus - GLib D-Bus Library": +# +# Copyright (C) 2008-2011 Red Hat, Inc. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General +# Public License along with this library; if not, see . +# +# Author: David Zeuthen + +import xml.parsers.expat + + +class Annotation: + def __init__(self, key, value): + self.key = key + self.value = value + self.annotations = [] + self.since = "" + + +class Arg: + def __init__(self, name, signature): + self.name = name + self.signature = signature + self.annotations = [] + self.doc_string = "" + self.since = "" + + +class Method: + def __init__(self, name, h_type_implies_unix_fd=True): + self.name = name + self.h_type_implies_unix_fd = h_type_implies_unix_fd + self.in_args = [] + self.out_args = [] + self.annotations = [] + self.doc_string = "" + self.since = "" + self.deprecated = False + self.unix_fd = False + + +class Signal: + def __init__(self, name): + self.name = name + self.args = [] + self.annotations = [] + self.doc_string = "" + self.since = "" + self.deprecated = False + + +class Property: + def __init__(self, name, signature, access): + self.name = name + self.signature = signature + self.access = access + self.annotations = [] + self.arg = Arg("value", self.signature) + self.arg.annotations = self.annotations + self.readable = False + self.writable = False + if self.access == "readwrite": + self.readable = True + self.writable = True + elif self.access == "read": + self.readable = True + elif self.access == "write": + self.writable = True + else: + raise ValueError('Invalid access type "{}"'.format(self.access)) + self.doc_string = "" + self.since = "" + self.deprecated = False + self.emits_changed_signal = True + + +class Interface: + def __init__(self, name): + self.name = name + self.methods = [] + self.signals = [] + self.properties = [] + self.annotations = [] + self.doc_string = "" + self.doc_string_brief = "" + self.since = "" + self.deprecated = False + + +class DBusXMLParser: + STATE_TOP = "top" + STATE_NODE = "node" + STATE_INTERFACE = "interface" + STATE_METHOD = "method" + STATE_SIGNAL = "signal" + STATE_PROPERTY = "property" + STATE_ARG = "arg" + STATE_ANNOTATION = "annotation" + STATE_IGNORED = "ignored" + + def __init__(self, xml_data, h_type_implies_unix_fd=True): + self._parser = xml.parsers.expat.ParserCreate() + self._parser.CommentHandler = self.handle_comment + self._parser.CharacterDataHandler = self.handle_char_data + self._parser.StartElementHandler = self.handle_start_element + self._parser.EndElementHandler = self.handle_end_element + + self.parsed_interfaces = [] + self._cur_object = None + + self.state = DBusXMLParser.STATE_TOP + self.state_stack = [] + self._cur_object = None + self._cur_object_stack = [] + + self.doc_comment_last_symbol = "" + + self._h_type_implies_unix_fd = h_type_implies_unix_fd + + self._parser.Parse(xml_data) + + COMMENT_STATE_BEGIN = "begin" + COMMENT_STATE_PARAMS = "params" + COMMENT_STATE_BODY = "body" + COMMENT_STATE_SKIP = "skip" + + def handle_comment(self, data): + comment_state = DBusXMLParser.COMMENT_STATE_BEGIN + lines = data.split("\n") + symbol = "" + body = "" + in_para = False + params = {} + for line in lines: + orig_line = line + line = line.lstrip() + if comment_state == DBusXMLParser.COMMENT_STATE_BEGIN: + if len(line) > 0: + colon_index = line.find(": ") + if colon_index == -1: + if line.endswith(":"): + symbol = line[0 : len(line) - 1] + comment_state = DBusXMLParser.COMMENT_STATE_PARAMS + else: + comment_state = DBusXMLParser.COMMENT_STATE_SKIP + else: + symbol = line[0:colon_index] + rest_of_line = line[colon_index + 2 :].strip() + if len(rest_of_line) > 0: + body += rest_of_line + "\n" + comment_state = DBusXMLParser.COMMENT_STATE_PARAMS + elif comment_state == DBusXMLParser.COMMENT_STATE_PARAMS: + if line.startswith("@"): + colon_index = line.find(": ") + if colon_index == -1: + comment_state = DBusXMLParser.COMMENT_STATE_BODY + if not in_para: + in_para = True + body += orig_line + "\n" + else: + param = line[1:colon_index] + docs = line[colon_index + 2 :] + params[param] = docs + else: + comment_state = DBusXMLParser.COMMENT_STATE_BODY + if len(line) > 0: + if not in_para: + in_para = True + body += orig_line + "\n" + elif comment_state == DBusXMLParser.COMMENT_STATE_BODY: + if len(line) > 0: + if not in_para: + in_para = True + body += orig_line + "\n" + else: + if in_para: + body += "\n" + in_para = False + if in_para: + body += "\n" + + if symbol != "": + self.doc_comment_last_symbol = symbol + self.doc_comment_params = params + self.doc_comment_body = body + + def handle_char_data(self, data): + # print 'char_data=%s'%data + pass + + def handle_start_element(self, name, attrs): + old_state = self.state + old_cur_object = self._cur_object + if self.state == DBusXMLParser.STATE_IGNORED: + self.state = DBusXMLParser.STATE_IGNORED + elif self.state == DBusXMLParser.STATE_TOP: + if name == DBusXMLParser.STATE_NODE: + self.state = DBusXMLParser.STATE_NODE + else: + self.state = DBusXMLParser.STATE_IGNORED + elif self.state == DBusXMLParser.STATE_NODE: + if name == DBusXMLParser.STATE_INTERFACE: + self.state = DBusXMLParser.STATE_INTERFACE + iface = Interface(attrs["name"]) + self._cur_object = iface + self.parsed_interfaces.append(iface) + elif name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + # assign docs, if any + if "name" in attrs and self.doc_comment_last_symbol == attrs["name"]: + self._cur_object.doc_string = self.doc_comment_body + if "short_description" in self.doc_comment_params: + short_description = self.doc_comment_params["short_description"] + self._cur_object.doc_string_brief = short_description + if "since" in self.doc_comment_params: + self._cur_object.since = self.doc_comment_params["since"].strip() + + elif self.state == DBusXMLParser.STATE_INTERFACE: + if name == DBusXMLParser.STATE_METHOD: + self.state = DBusXMLParser.STATE_METHOD + method = Method( + attrs["name"], h_type_implies_unix_fd=self._h_type_implies_unix_fd + ) + self._cur_object.methods.append(method) + self._cur_object = method + elif name == DBusXMLParser.STATE_SIGNAL: + self.state = DBusXMLParser.STATE_SIGNAL + signal = Signal(attrs["name"]) + self._cur_object.signals.append(signal) + self._cur_object = signal + elif name == DBusXMLParser.STATE_PROPERTY: + self.state = DBusXMLParser.STATE_PROPERTY + prop = Property(attrs["name"], attrs["type"], attrs["access"]) + self._cur_object.properties.append(prop) + self._cur_object = prop + elif name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + # assign docs, if any + if "name" in attrs and self.doc_comment_last_symbol == attrs["name"]: + self._cur_object.doc_string = self.doc_comment_body + if "since" in self.doc_comment_params: + self._cur_object.since = self.doc_comment_params["since"].strip() + + elif self.state == DBusXMLParser.STATE_METHOD: + if name == DBusXMLParser.STATE_ARG: + self.state = DBusXMLParser.STATE_ARG + arg_name = None + if "name" in attrs: + arg_name = attrs["name"] + arg = Arg(arg_name, attrs["type"]) + direction = attrs.get("direction", "in") + if direction == "in": + self._cur_object.in_args.append(arg) + elif direction == "out": + self._cur_object.out_args.append(arg) + else: + raise ValueError('Invalid direction "{}"'.format(direction)) + self._cur_object = arg + elif name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + # assign docs, if any + if self.doc_comment_last_symbol == old_cur_object.name: + if "name" in attrs and attrs["name"] in self.doc_comment_params: + doc_string = self.doc_comment_params[attrs["name"]] + if doc_string is not None: + self._cur_object.doc_string = doc_string + if "since" in self.doc_comment_params: + self._cur_object.since = self.doc_comment_params[ + "since" + ].strip() + + elif self.state == DBusXMLParser.STATE_SIGNAL: + if name == DBusXMLParser.STATE_ARG: + self.state = DBusXMLParser.STATE_ARG + arg_name = None + if "name" in attrs: + arg_name = attrs["name"] + arg = Arg(arg_name, attrs["type"]) + self._cur_object.args.append(arg) + self._cur_object = arg + elif name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + # assign docs, if any + if self.doc_comment_last_symbol == old_cur_object.name: + if "name" in attrs and attrs["name"] in self.doc_comment_params: + doc_string = self.doc_comment_params[attrs["name"]] + if doc_string is not None: + self._cur_object.doc_string = doc_string + if "since" in self.doc_comment_params: + self._cur_object.since = self.doc_comment_params[ + "since" + ].strip() + + elif self.state == DBusXMLParser.STATE_PROPERTY: + if name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + elif self.state == DBusXMLParser.STATE_ARG: + if name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + elif self.state == DBusXMLParser.STATE_ANNOTATION: + if name == DBusXMLParser.STATE_ANNOTATION: + self.state = DBusXMLParser.STATE_ANNOTATION + anno = Annotation(attrs["name"], attrs["value"]) + self._cur_object.annotations.append(anno) + self._cur_object = anno + else: + self.state = DBusXMLParser.STATE_IGNORED + + else: + raise ValueError( + 'Unhandled state "{}" while entering element with name "{}"'.format( + self.state, name + ) + ) + + self.state_stack.append(old_state) + self._cur_object_stack.append(old_cur_object) + + def handle_end_element(self, name): + self.state = self.state_stack.pop() + self._cur_object = self._cur_object_stack.pop() + + +def parse_dbus_xml(xml_data): + parser = DBusXMLParser(xml_data, True) + return parser.parsed_interfaces diff --git a/docs/sphinx/depfile.py b/docs/sphinx/depfile.py index 277fdf0f56..afdcbcec6e 100644 --- a/docs/sphinx/depfile.py +++ b/docs/sphinx/depfile.py @@ -12,6 +12,8 @@ import os import sphinx +import sys +from pathlib import Path __version__ = '1.0' @@ -20,8 +22,21 @@ def get_infiles(env): yield env.doc2path(x) yield from ((os.path.join(env.srcdir, dep) for dep in env.dependencies[x])) + for mod in sys.modules.values(): + if hasattr(mod, '__file__'): + if mod.__file__: + yield mod.__file__ + # this is perhaps going to include unused files: + for static_path in env.config.html_static_path + env.config.templates_path: + for path in Path(static_path).rglob('*'): + yield str(path) -def write_depfile(app, env): + +def write_depfile(app, exception): + if exception: + return + + env = app.env if not env.config.depfile: return @@ -42,7 +57,7 @@ def write_depfile(app, env): def setup(app): app.add_config_value('depfile', None, 'env') app.add_config_value('depfile_stamp', None, 'env') - app.connect('env-updated', write_depfile) + app.connect('build-finished', write_depfile) return dict( version = __version__, diff --git a/docs/sphinx/fakedbusdoc.py b/docs/sphinx/fakedbusdoc.py new file mode 100644 index 0000000000..d2c5079046 --- /dev/null +++ b/docs/sphinx/fakedbusdoc.py @@ -0,0 +1,25 @@ +# D-Bus XML documentation extension, compatibility gunk for +"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML.""" + +from docutils.parsers.rst import Directive +from sphinx.application import Sphinx +from typing import Any, Dict + + +class FakeDBusDocDirective(Directive): + has_content = True + required_arguments = 1 + + def run(self): + return [] + + +def setup(app: Sphinx) -> Dict[str, Any]: + """Register a fake dbus-doc directive with Sphinx""" + app.add_directive("dbus-doc", FakeDBusDocDirective) diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py index 87c67ab23f..d791b59492 100644 --- a/docs/sphinx/qapidoc.py +++ b/docs/sphinx/qapidoc.py @@ -112,17 +112,19 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): def _nodes_for_ifcond(self, ifcond, with_if=True): """Return list of Text, literal nodes for the ifcond - Return a list which gives text like ' (If: cond1, cond2, cond3)', where - the conditions are in literal-text and the commas are not. + Return a list which gives text like ' (If: condition)'. If with_if is False, we don't return the "(If: " and ")". """ - condlist = intersperse([nodes.literal('', c) for c in ifcond], - nodes.Text(', ')) + + doc = ifcond.docgen() + if not doc: + return [] + doc = nodes.literal('', doc) if not with_if: - return condlist + return [doc] nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')] - nodelist.extend(condlist) + nodelist.append(doc) nodelist.append(nodes.Text(')')) return nodelist @@ -139,7 +141,7 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): term.append(nodes.literal('', member.type.doc_type())) if member.optional: term.append(nodes.Text(' (optional)')) - if member.ifcond: + if member.ifcond.is_present(): term.extend(self._nodes_for_ifcond(member.ifcond)) return term @@ -154,7 +156,7 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): nodes.literal('', variants.tag_member.name), nodes.Text(' is '), nodes.literal('', '"%s"' % variant.name)] - if variant.ifcond: + if variant.ifcond.is_present(): term.extend(self._nodes_for_ifcond(variant.ifcond)) return term @@ -209,7 +211,7 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): dlnode = nodes.definition_list() for section in doc.args.values(): termtext = [nodes.literal('', section.member.name)] - if section.member.ifcond: + if section.member.ifcond.is_present(): termtext.extend(self._nodes_for_ifcond(section.member.ifcond)) # TODO drop fallbacks when undocumented members are outlawed if section.text: @@ -277,7 +279,7 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): def _nodes_for_if_section(self, ifcond): """Return list of doctree nodes for the "If" section""" nodelist = [] - if ifcond: + if ifcond.is_present(): snode = self._make_section('If') snode += nodes.paragraph( '', '', *self._nodes_for_ifcond(ifcond, with_if=False) diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index cec87e3743..6c5b05128e 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -14,6 +14,7 @@ AST2400 SoC based machines : - ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC - ``quanta-q71l-bmc`` OpenBMC Quanta BMC +- ``supermicrox11-bmc`` Supermicro X11 BMC AST2500 SoC based machines : @@ -21,12 +22,19 @@ AST2500 SoC based machines : - ``romulus-bmc`` OpenPOWER Romulus POWER9 BMC - ``witherspoon-bmc`` OpenPOWER Witherspoon POWER9 BMC - ``sonorapass-bmc`` OCP SonoraPass BMC -- ``swift-bmc`` OpenPOWER Swift BMC POWER9 +- ``fp5280g2-bmc`` Inspur FP5280G2 BMC +- ``g220a-bmc`` Bytedance G220A BMC AST2600 SoC based machines : - ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex-A7) - ``tacoma-bmc`` OpenPOWER Witherspoon POWER9 AST2600 BMC +- ``rainier-bmc`` IBM Rainier POWER10 BMC +- ``fuji-bmc`` Facebook Fuji BMC +- ``bletchley-bmc`` Facebook Bletchley BMC +- ``fby35-bmc`` Facebook fby35 BMC +- ``qcom-dc-scm-v1-bmc`` Qualcomm DC-SCM V1 BMC +- ``qcom-firework-bmc`` Qualcomm Firework BMC Supported devices ----------------- @@ -35,7 +43,7 @@ Supported devices * Interrupt Controller (VIC) * Timer Controller * RTC Controller - * I2C Controller + * I2C Controller, including the new register interface of the AST2600 * System Control Unit (SCU) * SRAM mapping * X-DMA Controller (basic interface) @@ -51,38 +59,49 @@ Supported devices * Front LEDs (PCA9552 on I2C bus) * LPC Peripheral Controller (a subset of subdevices are supported) * Hash/Crypto Engine (HACE) - Hash support only. TODO: HMAC and RSA + * ADC + * Secure Boot Controller (AST2600) + * eMMC Boot Controller (dummy) + * PECI Controller (minimal) + * I3C Controller Missing devices --------------- * Coprocessor support - * ADC (out of tree implementation) * PWM and Fan Controller * Slave GPIO Controller * Super I/O Controller * PCI-Express 1 Controller * Graphic Display Controller - * PECI Controller * MCTP Controller * Mailbox Controller * Virtual UART * eSPI Controller - * I3C Controller Boot options ------------ -The Aspeed machines can be started using the ``-kernel`` option to -load a Linux kernel or from a firmware. Images can be downloaded from -the OpenBMC jenkins : +The Aspeed machines can be started using the ``-kernel`` and ``-dtb`` options +to load a Linux kernel or from a firmware. Images can be downloaded from the +OpenBMC jenkins : - https://jenkins.openbmc.org/job/ci-openbmc/lastSuccessfulBuild/distro=ubuntu,label=docker-builder + https://jenkins.openbmc.org/job/ci-openbmc/lastSuccessfulBuild/ or directly from the OpenBMC GitHub release repository : https://github.com/openbmc/openbmc/releases +To boot a kernel directly from a Linux build tree: + +.. code-block:: bash + + $ qemu-system-arm -M ast2600-evb -nographic \ + -kernel arch/arm/boot/zImage \ + -dtb arch/arm/boot/dts/aspeed-ast2600-evb.dtb \ + -initrd rootfs.cpio + The image should be attached as an MTD drive. Run : .. code-block:: bash @@ -107,3 +126,113 @@ FMC chip and a bigger (64M) SPI chip, use : .. code-block:: bash -M ast2500-evb,fmc-model=mx25l25635e,spi-model=mx66u51235f + + +Aspeed minibmc family boards (``ast1030-evb``) +================================================================== + +The QEMU Aspeed machines model mini BMCs of various Aspeed evaluation +boards. They are based on different releases of the +Aspeed SoC : the AST1030 integrating an ARM Cortex M4F CPU (200MHz). + +The SoC comes with SRAM, SPI, I2C, etc. + +AST1030 SoC based machines : + +- ``ast1030-evb`` Aspeed AST1030 Evaluation board (Cortex-M4F) + +Supported devices +----------------- + + * SMP (for the AST1030 Cortex-M4F) + * Interrupt Controller (VIC) + * Timer Controller + * I2C Controller + * System Control Unit (SCU) + * SRAM mapping + * Static Memory Controller (SMC or FMC) - Only SPI Flash support + * SPI Memory Controller + * USB 2.0 Controller + * Watchdog Controller + * GPIO Controller (Master only) + * UART + * LPC Peripheral Controller (a subset of subdevices are supported) + * Hash/Crypto Engine (HACE) - Hash support only. TODO: HMAC and RSA + * ADC + * Secure Boot Controller + * PECI Controller (minimal) + + +Missing devices +--------------- + + * PWM and Fan Controller + * Slave GPIO Controller + * Mailbox Controller + * Virtual UART + * eSPI Controller + * I3C Controller + +Boot options +------------ + +The Aspeed machines can be started using the ``-kernel`` to load a +Zephyr OS or from a firmware. Images can be downloaded from the +ASPEED GitHub release repository : + + https://github.com/AspeedTech-BMC/zephyr/releases + +To boot a kernel directly from a Zephyr build tree: + +.. code-block:: bash + + $ qemu-system-arm -M ast1030-evb -nographic \ + -kernel zephyr.elf + +Facebook Yosemite v3.5 Platform and CraterLake Server (``fby35``) +================================================================== + +Facebook has a series of multi-node compute server designs named +Yosemite. The most recent version released was +`Yosemite v3 `__. + +Yosemite v3.5 is an iteration on this design, and is very similar: there's a +baseboard with a BMC, and 4 server slots. The new server board design termed +"CraterLake" includes a Bridge IC (BIC), with room for expansion boards to +include various compute accelerators (video, inferencing, etc). At the moment, +only the first server slot's BIC is included. + +Yosemite v3.5 is itself a sled which fits into a 40U chassis, and 3 sleds +can be fit into a chassis. See `here `__ +for an example. + +In this generation, the BMC is an AST2600 and each BIC is an AST1030. The BMC +runs `OpenBMC `__, and the BIC runs +`OpenBIC `__. + +Firmware images can be retrieved from the Github releases or built from the +source code, see the README's for instructions on that. This image uses the +"fby35" machine recipe from OpenBMC, and the "yv35-cl" target from OpenBIC. +Some reference images can also be found here: + +.. code-block:: bash + + $ wget https://github.com/facebook/openbmc/releases/download/openbmc-e2294ff5d31d/fby35.mtd + $ wget https://github.com/peterdelevoryas/OpenBIC/releases/download/oby35-cl-2022.13.01/Y35BCL.elf + +Since this machine has multiple SoC's, each with their own serial console, the +recommended way to run it is to allocate a pseudoterminal for each serial +console and let the monitor use stdio. Also, starting in a paused state is +useful because it allows you to attach to the pseudoterminals before the boot +process starts. + +.. code-block:: bash + + $ qemu-system-arm -machine fby35 \ + -drive file=fby35.mtd,format=raw,if=mtd \ + -device loader,file=Y35BCL.elf,addr=0,cpu-num=2 \ + -serial pty -serial pty -serial mon:stdio \ + -display none -S + $ screen /dev/tty0 # In a separate TMUX pane, terminal window, etc. + $ screen /dev/tty1 + $ (qemu) c # Start the boot process once screen is setup. diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst index 584eb17097..00c444042f 100644 --- a/docs/system/arm/cpu-features.rst +++ b/docs/system/arm/cpu-features.rst @@ -217,10 +217,6 @@ TCG VCPU Features TCG VCPU features are CPU features that are specific to TCG. Below is the list of TCG VCPU features and their descriptions. - pauth Enable or disable ``FEAT_Pauth``, pointer - authentication. By default, the feature is - enabled with ``-cpu max``. - pauth-impdef When ``FEAT_Pauth`` is enabled, either the *impdef* (Implementation Defined) algorithm is enabled or the *architected* QARMA algorithm @@ -288,7 +284,7 @@ SVE CPU Property Parsing Semantics CPU Property Dependencies and Constraints"). 4) If one or more vector lengths have been explicitly enabled and at - at least one of the dependency lengths of the maximum enabled length + least one of the dependency lengths of the maximum enabled length has been explicitly disabled, then an error is generated (see constraint (2) of "SVE CPU Property Dependencies and Constraints"). @@ -376,6 +372,31 @@ verbose command lines. However, the recommended way to select vector lengths is to explicitly enable each desired length. Therefore only example's (1), (4), and (6) exhibit recommended uses of the properties. +SME CPU Property Examples +------------------------- + + 1) Disable SME:: + + $ qemu-system-aarch64 -M virt -cpu max,sme=off + + 2) Implicitly enable all vector lengths for the ``max`` CPU type:: + + $ qemu-system-aarch64 -M virt -cpu max + + 3) Only enable the 256-bit vector length:: + + $ qemu-system-aarch64 -M virt -cpu max,sme256=on + + 3) Enable the 256-bit and 1024-bit vector lengths:: + + $ qemu-system-aarch64 -M virt -cpu max,sme256=on,sme1024=on + + 4) Disable the 512-bit vector length. This results in all the other + lengths supported by ``max`` defaulting to enabled + (128, 256, 1024 and 2048):: + + $ qemu-system-aarch64 -M virt -cpu max,sve512=off + SVE User-mode Default Vector Length Property -------------------------------------------- @@ -391,3 +412,34 @@ length supported by QEMU is 256. If this property is set to ``-1`` then the default vector length is set to the maximum possible length. + +SME CPU Properties +================== + +The SME CPU properties are much like the SVE properties: ``sme`` is +used to enable or disable the entire SME feature, and ``sme`` is +used to enable or disable specific vector lengths. Finally, +``sme_fa64`` is used to enable or disable ``FEAT_SME_FA64``, which +allows execution of the "full a64" instruction set while Streaming +SVE mode is enabled. + +SME is not supported by KVM at this time. + +At least one vector length must be enabled when ``sme`` is enabled, +and all vector lengths must be powers of 2. The maximum vector +length supported by qemu is 2048 bits. Otherwise, there are no +additional constraints on the set of vector lengths supported by SME. + +SME User-mode Default Vector Length Property +-------------------------------------------- + +For qemu-aarch64, the cpu property ``sme-default-vector-length=N`` is +defined to mirror the Linux kernel parameter file +``/proc/sys/abi/sme_default_vector_length``. The default length, ``N``, +is in units of bytes and must be between 16 and 8192. +If not specified, the default vector length is 32. + +As with ``sve-default-vector-length``, if the default length is larger +than the maximum vector length enabled, the actual vector length will +be reduced. If this property is set to ``-1`` then the default vector +length is set to the maximum possible length. diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 144dc491d9..e3af79bb8c 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -9,24 +9,44 @@ the following architecture extensions: - FEAT_AA32HPD (AArch32 hierarchical permission disables) - FEAT_AA32I8MM (AArch32 Int8 matrix multiplication instructions) - FEAT_AES (AESD and AESE instructions) +- FEAT_BBM at level 2 (Translation table break-before-make levels) - FEAT_BF16 (AArch64 BFloat16 instructions) - FEAT_BTI (Branch Target Identification) +- FEAT_CSV2 (Cache speculation variant 2) +- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1) +- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2) +- FEAT_CSV2_2 (Cache speculation variant 2, version 2) +- FEAT_CSV3 (Cache speculation variant 3) +- FEAT_DGH (Data gathering hint) - FEAT_DIT (Data Independent Timing instructions) - FEAT_DPB (DC CVAP instruction) +- FEAT_Debugv8p2 (Debug changes for v8.2) +- FEAT_Debugv8p4 (Debug changes for v8.4) - FEAT_DotProd (Advanced SIMD dot product instructions) +- FEAT_DoubleFault (Double Fault Extension) +- FEAT_E0PD (Preventing EL0 access to halves of address maps) +- FEAT_ETS (Enhanced Translation Synchronization) - FEAT_FCMA (Floating-point complex number instructions) - FEAT_FHM (Floating-point half-precision multiplication instructions) - FEAT_FP16 (Half-precision floating-point data processing) - FEAT_FRINTTS (Floating-point to integer instructions) - FEAT_FlagM (Flag manipulation instructions v2) - FEAT_FlagM2 (Enhancements to flag manipulation instructions) +- FEAT_GTG (Guest translation granule size) +- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state) +- FEAT_HCX (Support for the HCRX_EL2 register) - FEAT_HPDS (Hierarchical permission disables) - FEAT_I8MM (AArch64 Int8 matrix multiplication instructions) +- FEAT_IDST (ID space trap handling) +- FEAT_IESB (Implicit error synchronization event) - FEAT_JSCVT (JavaScript conversion instructions) - FEAT_LOR (Limited ordering regions) +- FEAT_LPA (Large Physical Address space) +- FEAT_LPA2 (Large Physical and virtual Address space v2) - FEAT_LRCPC (Load-acquire RCpc instructions) - FEAT_LRCPC2 (Load-acquire RCpc instructions v2) - FEAT_LSE (Large System Extensions) +- FEAT_LVA (Large Virtual Address space) - FEAT_MTE (Memory Tagging Extension) - FEAT_MTE2 (Memory Tagging Extension) - FEAT_MTE3 (MTE Asymmetric Fault Handling) @@ -36,8 +56,12 @@ the following architecture extensions: - FEAT_PMULL (PMULL, PMULL2 instructions) - FEAT_PMUv3p1 (PMU Extensions v3.1) - FEAT_PMUv3p4 (PMU Extensions v3.4) +- FEAT_PMUv3p5 (PMU Extensions v3.5) +- FEAT_RAS (Reliability, availability, and serviceability) +- FEAT_RASv1p1 (RAS Extension v1.1) - FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions) - FEAT_RNG (Random number generator) +- FEAT_S2FWB (Stage 2 forced Write-Back) - FEAT_SB (Speculation Barrier) - FEAT_SEL2 (Secure EL2) - FEAT_SHA1 (SHA1 instructions) @@ -46,11 +70,16 @@ the following architecture extensions: - FEAT_SHA512 (Advanced SIMD SHA512 instructions) - FEAT_SM3 (Advanced SIMD SM3 instructions) - FEAT_SM4 (Advanced SIMD SM4 instructions) +- FEAT_SME (Scalable Matrix Extension) +- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) +- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) +- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) - FEAT_SPECRES (Speculation restriction instructions) - FEAT_SSBS (Speculative Store Bypass Safe) - FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain) - FEAT_TLBIRANGE (TLB invalidate range instructions) - FEAT_TTCNP (Translation table Common not private translations) +- FEAT_TTL (Translation Table Level) - FEAT_TTST (Small translation tables) - FEAT_UAO (Unprivileged Access Override control) - FEAT_VHE (Virtualization Host Extensions) diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst index 69f57c2886..c38df32bde 100644 --- a/docs/system/arm/nuvoton.rst +++ b/docs/system/arm/nuvoton.rst @@ -20,6 +20,8 @@ Hyperscale applications. The following machines are based on this chip : - ``quanta-gbs-bmc`` Quanta GBS server BMC - ``quanta-gsj`` Quanta GSJ server BMC +- ``kudo-bmc`` Fii USA Kudo server BMC +- ``mori-bmc`` Fii USA Mori server BMC There are also two more SoCs, NPCM710 and NPCM705, which are single-core variants of NPCM750 and NPCM730, respectively. These are currently not @@ -80,9 +82,9 @@ Boot options The Nuvoton machines can boot from an OpenBMC firmware image, or directly into a kernel using the ``-kernel`` option. OpenBMC images for ``quanta-gsj`` and -possibly others can be downloaded from the OpenPOWER jenkins : +possibly others can be downloaded from the OpenBMC jenkins : - https://openpower.xyz/ + https://jenkins.openbmc.org/ The firmware image should be attached as an MTD drive. Example : diff --git a/docs/system/arm/orangepi.rst b/docs/system/arm/orangepi.rst index 6f23907fb6..83c7445197 100644 --- a/docs/system/arm/orangepi.rst +++ b/docs/system/arm/orangepi.rst @@ -128,7 +128,7 @@ Alternatively, you can also choose to build you own image with buildroot using the orangepi_pc_defconfig. Also see https://buildroot.org for more information. When using an image as an SD card, it must be resized to a power of two. This can be -done with the qemu-img command. It is recommended to only increase the image size +done with the ``qemu-img`` command. It is recommended to only increase the image size instead of shrinking it to a power of two, to avoid loss of data. For example, to prepare a downloaded Armbian image, first extract it and then increase its size to one gigabyte as follows: @@ -250,14 +250,14 @@ and set the following environment variables before booting: Optionally you may save the environment variables to SD card with 'saveenv'. To continue booting simply give the 'boot' command and NetBSD boots. -Orange Pi PC acceptance tests -""""""""""""""""""""""""""""" +Orange Pi PC integration tests +"""""""""""""""""""""""""""""" -The Orange Pi PC machine has several acceptance tests included. +The Orange Pi PC machine has several integration tests included. To run the whole set of tests, build QEMU from source and simply provide the following command: .. code-block:: bash $ AVOCADO_ALLOW_LARGE_STORAGE=yes avocado --show=app,console run \ - -t machine:orangepi-pc tests/acceptance/boot_linux_console.py + -t machine:orangepi-pc tests/avocado/boot_linux_console.py diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index 59acf0eeaf..20442ea2c1 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -52,10 +52,14 @@ Supported guest CPU types: - ``cortex-a7`` (32-bit) - ``cortex-a15`` (32-bit; the default) +- ``cortex-a35`` (64-bit) - ``cortex-a53`` (64-bit) - ``cortex-a57`` (64-bit) - ``cortex-a72`` (64-bit) +- ``cortex-a76`` (64-bit) +- ``a64fx`` (64-bit) - ``host`` (with KVM only) +- ``neoverse-n1`` (64-bit) - ``max`` (same as ``host`` for KVM; best possible emulation with TCG) Note that the default is ``cortex-a15``, so for an AArch64 guest you must @@ -95,14 +99,17 @@ gic-version Valid values are: ``2`` - GICv2 + GICv2. Note that this limits the number of CPUs to 8. ``3`` - GICv3 + GICv3. This allows up to 512 CPUs. + ``4`` + GICv4. Requires ``virtualization`` to be ``on``; allows up to 317 CPUs. ``host`` Use the same GIC version the host provides, when using KVM ``max`` Use the best GIC version possible (same as host when using KVM; - currently same as ``3``` for TCG, but this may change in future) + with TCG this is currently ``3`` if ``virtualization`` is ``off`` and + ``4`` if ``virtualization`` is ``on``, but this may change in future) its Set ``on``/``off`` to enable/disable ITS instantiation. The default is ``on`` @@ -120,6 +127,19 @@ ras Set ``on``/``off`` to enable/disable reporting host memory errors to a guest using ACPI and guest external abort exceptions. The default is off. +dtb-randomness + Set ``on``/``off`` to pass random seeds via the guest DTB + rng-seed and kaslr-seed nodes (in both "/chosen" and + "/secure-chosen") to use for features like the random number + generator and address space randomisation. The default is + ``on``. You will want to disable it if your trusted boot chain + will verify the DTB it is passed, since this option causes the + DTB to be non-deterministic. It would be the responsibility of + the firmware to come up with a seed and pass it on if it wants to. + +dtb-kaslr-seed + A deprecated synonym for dtb-randomness. + Linux guest kernel configuration """""""""""""""""""""""""""""""" diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index 27f73500d9..92ad10d2da 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -32,6 +32,8 @@ Implemented devices: - OCM (256KB of On Chip Memory) - XRAM (4MB of on chip Accelerator RAM) - DDR memory +- BBRAM (36 bytes of Battery-backed RAM) +- eFUSE (3072 bytes of one-time field-programmable bit array) QEMU does not yet model any other devices, including the PL and the AI Engine. @@ -175,3 +177,50 @@ Run the following at the U-Boot prompt: fdt set /chosen/dom0 reg <0x00000000 0x40000000 0x0 0x03100000> booti 30000000 - 20000000 +BBRAM File Backend +"""""""""""""""""" +BBRAM can have an optional file backend, which must be a seekable +binary file with a size of 36 bytes or larger. A file with all +binary 0s is a 'blank'. + +To add a file-backend for the BBRAM: + +.. code-block:: bash + + -drive if=pflash,index=0,file=versal-bbram.bin,format=raw + +To use a different index value, N, from default of 0, add: + +.. code-block:: bash + + -global xlnx,bbram-ctrl.drive-index=N + +eFUSE File Backend +"""""""""""""""""" +eFUSE can have an optional file backend, which must be a seekable +binary file with a size of 3072 bytes or larger. A file with all +binary 0s is a 'blank'. + +To add a file-backend for the eFUSE: + +.. code-block:: bash + + -drive if=pflash,index=1,file=versal-efuse.bin,format=raw + +To use a different index value, N, from default of 1, add: + +.. code-block:: bash + + -global xlnx,efuse.drive-index=N + +.. warning:: + In actual physical Versal, BBRAM and eFUSE contain sensitive data. + The QEMU device models do **not** encrypt nor obfuscate any data + when holding them in models' memory or when writing them to their + file backends. + + Thus, a file backend should be used with caution, and 'format=luks' + is highly recommended (albeit with usage complexity). + + Better yet, do not use actual product data when running guest image + on this Xilinx Versal Virt board. diff --git a/docs/system/authz.rst b/docs/system/authz.rst index 942af39602..55b7315e49 100644 --- a/docs/system/authz.rst +++ b/docs/system/authz.rst @@ -77,9 +77,7 @@ To create an instance of this driver via QMP: "arguments": { "qom-type": "authz-simple", "id": "authz0", - "props": { - "identity": "fred" - } + "identity": "fred" } } @@ -110,15 +108,13 @@ To create an instance of this class via QMP: "arguments": { "qom-type": "authz-list", "id": "authz0", - "props": { - "rules": [ - { "match": "fred", "policy": "allow", "format": "exact" }, - { "match": "bob", "policy": "allow", "format": "exact" }, - { "match": "danb", "policy": "deny", "format": "exact" }, - { "match": "dan*", "policy": "allow", "format": "glob" } - ], - "policy": "deny" - } + "rules": [ + { "match": "fred", "policy": "allow", "format": "exact" }, + { "match": "bob", "policy": "allow", "format": "exact" }, + { "match": "danb", "policy": "deny", "format": "exact" }, + { "match": "dan*", "policy": "allow", "format": "glob" } + ], + "policy": "deny" } } @@ -143,10 +139,8 @@ To create an instance of this class via QMP: "arguments": { "qom-type": "authz-list-file", "id": "authz0", - "props": { - "filename": "/etc/qemu/myvm-vnc.acl", - "refresh": true - } + "filename": "/etc/qemu/myvm-vnc.acl", + "refresh": true } } diff --git a/docs/confidential-guest-support.txt b/docs/system/confidential-guest-support.rst similarity index 77% rename from docs/confidential-guest-support.txt rename to docs/system/confidential-guest-support.rst index 71d07ba57a..0c490dbda2 100644 --- a/docs/confidential-guest-support.txt +++ b/docs/system/confidential-guest-support.rst @@ -19,10 +19,10 @@ Running a Confidential Guest To run a confidential guest you need to add two command line parameters: -1. Use "-object" to create a "confidential guest support" object. The +1. Use ``-object`` to create a "confidential guest support" object. The type and parameters will vary with the specific mechanism to be used -2. Set the "confidential-guest-support" machine parameter to the ID of +2. Set the ``confidential-guest-support`` machine parameter to the ID of the object from (1). Example (for AMD SEV):: @@ -37,13 +37,8 @@ Supported mechanisms Currently supported confidential guest mechanisms are: -AMD Secure Encrypted Virtualization (SEV) - docs/amd-memory-encryption.txt - -POWER Protected Execution Facility (PEF) - docs/papr-pef.txt - -s390x Protected Virtualization (PV) - docs/system/s390x/protvirt.rst +* AMD Secure Encrypted Virtualization (SEV) (see :doc:`i386/amd-memory-encryption`) +* POWER Protected Execution Facility (PEF) (see :ref:`power-papr-protected-execution-facility-pef`) +* s390x Protected Virtualization (PV) (see :doc:`s390x/protvirt`) Other mechanisms may be supported in future. diff --git a/docs/system/cpu-models-x86.rst.inc b/docs/system/cpu-models-x86.rst.inc index 9119f5dff5..7f6368f999 100644 --- a/docs/system/cpu-models-x86.rst.inc +++ b/docs/system/cpu-models-x86.rst.inc @@ -1,5 +1,5 @@ Recommendations for KVM CPU model configuration on x86 hosts -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +============================================================ The information that follows provides recommendations for configuring CPU models on x86 hosts. The goals are to maximise performance, while @@ -49,7 +49,7 @@ future OS and toolchains are likely to target newer ABIs. The table that follows illustrates which ABI compatibility levels can be satisfied by the QEMU CPU models. Note that the table only lists the long term stable CPU model versions (eg Haswell-v4). -In addition to whats listed, there are also many CPU model +In addition to what is listed, there are also many CPU model aliases which resolve to a different CPU model version, depending on the machine type is in use. @@ -368,7 +368,7 @@ featureset, which prevents guests having optimal performance. Syntax for configuring CPU models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +================================= The examples below illustrate the approach to configuring the various CPU models / features in QEMU and libvirt. diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 7afcfd8064..0506006056 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -82,9 +82,14 @@ Emulated Devices .. toctree:: :maxdepth: 1 + devices/can.rst + devices/ccid.rst + devices/cxl.rst devices/ivshmem.rst devices/net.rst devices/nvme.rst devices/usb.rst devices/vhost-user.rst devices/virtio-pmem.rst + devices/vhost-user-rng.rst + devices/canokey.rst diff --git a/docs/system/device-url-syntax.rst.inc b/docs/system/device-url-syntax.rst.inc index d15a021508..7dbc525fa8 100644 --- a/docs/system/device-url-syntax.rst.inc +++ b/docs/system/device-url-syntax.rst.inc @@ -15,7 +15,7 @@ These are specified using a special URL syntax. 'iqn.2008-11.org.linux-kvm[:]' but this can also be set from the command line or a configuration file. - Since version Qemu 2.4 it is possible to specify a iSCSI request + Since version QEMU 2.4 it is possible to specify a iSCSI request timeout to detect stalled requests and force a reestablishment of the session. The timeout is specified in seconds. The default is 0 which means no timeout. Libiscsi 1.15.0 or greater is required for this diff --git a/docs/can.txt b/docs/system/devices/can.rst similarity index 66% rename from docs/can.txt rename to docs/system/devices/can.rst index 0d310237df..0af3d9912a 100644 --- a/docs/can.txt +++ b/docs/system/devices/can.rst @@ -1,6 +1,5 @@ -QEMU CAN bus emulation support -============================== - +CAN Bus Emulation Support +========================= The CAN bus emulation provides mechanism to connect multiple emulated CAN controller chips together by one or multiple CAN busses (the controller device "canbus" parameter). The individual busses @@ -32,34 +31,39 @@ emulated environment for testing and RTEMS GSoC slot has been donated to work on CAN hardware emulation on QEMU. Examples how to use CAN emulation for SJA1000 based boards -========================================================== - +---------------------------------------------------------- When QEMU with CAN PCI support is compiled then one of the next CAN boards can be selected - (1) CAN bus Kvaser PCI CAN-S (single SJA1000 channel) boad. QEMU startup options +(1) CAN bus Kvaser PCI CAN-S (single SJA1000 channel) board. QEMU startup options:: + -object can-bus,id=canbus0 -device kvaser_pci,canbus=canbus0 - Add "can-host-socketcan" object to connect device to host system CAN bus + +Add "can-host-socketcan" object to connect device to host system CAN bus:: + -object can-host-socketcan,id=canhost0,if=can0,canbus=canbus0 - (2) CAN bus PCM-3680I PCI (dual SJA1000 channel) emulation +(2) CAN bus PCM-3680I PCI (dual SJA1000 channel) emulation:: + -object can-bus,id=canbus0 -device pcm3680_pci,canbus0=canbus0,canbus1=canbus0 - another example: +Another example:: + -object can-bus,id=canbus0 -object can-bus,id=canbus1 -device pcm3680_pci,canbus0=canbus0,canbus1=canbus1 - (3) CAN bus MIOe-3680 PCI (dual SJA1000 channel) emulation +(3) CAN bus MIOe-3680 PCI (dual SJA1000 channel) emulation:: + -device mioe3680_pci,canbus0=canbus0 - The ''kvaser_pci'' board/device model is compatible with and has been tested with -''kvaser_pci'' driver included in mainline Linux kernel. +the ''kvaser_pci'' driver included in mainline Linux kernel. The tested setup was Linux 4.9 kernel on the host and guest side. -Example for qemu-system-x86_64: + +Example for qemu-system-x86_64:: qemu-system-x86_64 -accel kvm -kernel /boot/vmlinuz-4.9.0-4-amd64 \ -initrd ramdisk.cpio \ @@ -69,7 +73,7 @@ Example for qemu-system-x86_64: -device kvaser_pci,canbus=canbus0 \ -nographic -append "console=ttyS0" -Example for qemu-system-arm: +Example for qemu-system-arm:: qemu-system-arm -cpu arm1176 -m 256 -M versatilepb \ -kernel kernel-qemu-arm1176-versatilepb \ @@ -84,24 +88,23 @@ Example for qemu-system-arm: The CAN interface of the host system has to be configured for proper bitrate and set up. Configuration is not propagated from emulated devices through bus to the physical host device. Example configuration -for 1 Mbit/s +for 1 Mbit/s:: ip link set can0 type can bitrate 1000000 ip link set can0 up Virtual (host local only) can interface can be used on the host -side instead of physical interface +side instead of physical interface:: ip link add dev can0 type vcan The CAN interface on the host side can be used to analyze CAN -traffic with "candump" command which is included in "can-utils". +traffic with "candump" command which is included in "can-utils":: candump can0 CTU CAN FD support examples -=========================== - +--------------------------- This open-source core provides CAN FD support. CAN FD drames are delivered even to the host systems when SocketCAN interface is found CAN FD capable. @@ -113,7 +116,7 @@ on the board. Example how to connect the canbus0-bus (virtual wire) to the host Linux system (SocketCAN used) and to both CTU CAN FD cores emulated on the corresponding PCI card expects that host system CAN bus -is setup according to the previous SJA1000 section. +is setup according to the previous SJA1000 section:: qemu-system-x86_64 -enable-kvm -kernel /boot/vmlinuz-4.19.52+ \ -initrd ramdisk.cpio \ @@ -125,7 +128,7 @@ is setup according to the previous SJA1000 section. -device ctucan_pci,canbus0=canbus0-bus,canbus1=canbus0-bus \ -nographic -Setup of CTU CAN FD controller in a guest Linux system +Setup of CTU CAN FD controller in a guest Linux system:: insmod ctucanfd.ko || modprobe ctucanfd insmod ctucanfd_pci.ko || modprobe ctucanfd_pci @@ -150,49 +153,37 @@ Setup of CTU CAN FD controller in a guest Linux system /bin/ip link set $ifc up done -The test can run for example +The test can run for example:: candump can1 -in the guest system and next commands in the host system for basic CAN +in the guest system and next commands in the host system for basic CAN:: cangen can0 -for CAN FD without bitrate switch +for CAN FD without bitrate switch:: cangen can0 -f -and with bitrate switch +and with bitrate switch:: cangen can0 -b -The test can be run viceversa, generate messages in the guest system and capture them -in the host one and much more combinations. +The test can also be run the other way around, generating messages in the +guest system and capturing them in the host system. Other combinations are +also possible. Links to other resources -======================== +------------------------ - (1) CAN related projects at Czech Technical University, Faculty of Electrical Engineering - http://canbus.pages.fel.cvut.cz/ - (2) Repository with development can-pci branch at Czech Technical University - https://gitlab.fel.cvut.cz/canbus/qemu-canbus - (3) RTEMS page describing project - https://devel.rtems.org/wiki/Developer/Simulators/QEMU/CANEmulation - (4) RTLWS 2015 article about the project and its use with CANopen emulation - http://cmp.felk.cvut.cz/~pisa/can/doc/rtlws-17-pisa-qemu-can.pdf - (5) GNU/Linux, CAN and CANopen in Real-time Control Applications - Slides from LinuxDays 2017 (include updated RTLWS 2015 content) - https://www.linuxdays.cz/2017/video/Pavel_Pisa-CAN_canopen.pdf - (6) Linux SocketCAN utilities - https://github.com/linux-can/can-utils/ - (7) CTU CAN FD project including core VHDL design, Linux driver, - test utilities etc. - https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core - (8) CTU CAN FD Core Datasheet Documentation - http://canbus.pages.fel.cvut.cz/ctucanfd_ip_core/Progdokum.pdf - (9) CTU CAN FD Core System Architecture Documentation - http://canbus.pages.fel.cvut.cz/ctucanfd_ip_core/ctu_can_fd_architecture.pdf - (10) CTU CAN FD Driver Documentation - http://canbus.pages.fel.cvut.cz/ctucanfd_ip_core/driver_doc/ctucanfd-driver.html - (11) Integration with PCIe interfacing for Intel/Altera Cyclone IV based board - https://gitlab.fel.cvut.cz/canbus/pcie-ctu_can_fd + (1) `CAN related projects at Czech Technical University, Faculty of Electrical Engineering `_ + (2) `Repository with development can-pci branch at Czech Technical University `_ + (3) `RTEMS page describing project `_ + (4) `RTLWS 2015 article about the project and its use with CANopen emulation `_ + (5) `GNU/Linux, CAN and CANopen in Real-time Control Applications Slides from LinuxDays 2017 (include updated RTLWS 2015 content) `_ + (6) `Linux SocketCAN utilities `_ + (7) `CTU CAN FD project including core VHDL design, Linux driver, test utilities etc. `_ + (8) `CTU CAN FD Core Datasheet Documentation `_ + (9) `CTU CAN FD Core System Architecture Documentation `_ + (10) `CTU CAN FD Driver Documentation `_ + (11) `Integration with PCIe interfacing for Intel/Altera Cyclone IV based board `_ diff --git a/docs/system/devices/canokey.rst b/docs/system/devices/canokey.rst new file mode 100644 index 0000000000..cfa6186e48 --- /dev/null +++ b/docs/system/devices/canokey.rst @@ -0,0 +1,158 @@ +.. _canokey: + +CanoKey QEMU +------------ + +CanoKey [1]_ is an open-source secure key with supports of + +* U2F / FIDO2 with Ed25519 and HMAC-secret +* OpenPGP Card V3.4 with RSA4096, Ed25519 and more [2]_ +* PIV (NIST SP 800-73-4) +* HOTP / TOTP +* NDEF + +All these platform-independent features are in canokey-core [3]_. + +For different platforms, CanoKey has different implementations, +including both hardware implementions and virtual cards: + +* CanoKey STM32 [4]_ +* CanoKey Pigeon [5]_ +* (virt-card) CanoKey USB/IP +* (virt-card) CanoKey FunctionFS + +In QEMU, yet another CanoKey virt-card is implemented. +CanoKey QEMU exposes itself as a USB device to the guest OS. + +With the same software configuration as a hardware key, +the guest OS can use all the functionalities of a secure key as if +there was actually an hardware key plugged in. + +CanoKey QEMU provides much convenience for debugging: + +* libcanokey-qemu supports debugging output thus developers can + inspect what happens inside a secure key +* CanoKey QEMU supports trace event thus event +* QEMU USB stack supports pcap thus USB packet between the guest + and key can be captured and analysed + +Then for developers: + +* For developers on software with secure key support (e.g. FIDO2, OpenPGP), + they can see what happens inside the secure key +* For secure key developers, USB packets between guest OS and CanoKey + can be easily captured and analysed + +Also since this is a virtual card, it can be easily used in CI for testing +on code coping with secure key. + +Building +======== + +libcanokey-qemu is required to use CanoKey QEMU. + +.. code-block:: shell + + git clone https://github.com/canokeys/canokey-qemu + mkdir canokey-qemu/build + pushd canokey-qemu/build + +If you want to install libcanokey-qemu in a different place, +add ``-DCMAKE_INSTALL_PREFIX=/path/to/your/place`` to cmake below. + +.. code-block:: shell + + cmake .. + make + make install # may need sudo + popd + +Then configuring and building: + +.. code-block:: shell + + # depending on your env, lib/pkgconfig can be lib64/pkgconfig + export PKG_CONFIG_PATH=/path/to/your/place/lib/pkgconfig:$PKG_CONFIG_PATH + ./configure --enable-canokey && make + +Using CanoKey QEMU +================== + +CanoKey QEMU stores all its data on a file of the host specified by the argument +when invoking qemu. + +.. parsed-literal:: + + |qemu_system| -usb -device canokey,file=$HOME/.canokey-file + +Note: you should keep this file carefully as it may contain your private key! + +The first time when the file is used, it is created and initialized by CanoKey, +afterwards CanoKey QEMU would just read this file. + +After the guest OS boots, you can check that there is a USB device. + +For example, If the guest OS is an Linux machine. You may invoke lsusb +and find CanoKey QEMU there: + +.. code-block:: shell + + $ lsusb + Bus 001 Device 002: ID 20a0:42d4 Clay Logic CanoKey QEMU + +You may setup the key as guided in [6]_. The console for the key is at [7]_. + +Debugging +========= + +CanoKey QEMU consists of two parts, ``libcanokey-qemu.so`` and ``canokey.c``, +the latter of which resides in QEMU. The former provides core functionality +of a secure key while the latter provides platform-dependent functions: +USB packet handling. + +If you want to trace what happens inside the secure key, when compiling +libcanokey-qemu, you should add ``-DQEMU_DEBUG_OUTPUT=ON`` in cmake command +line: + +.. code-block:: shell + + cmake .. -DQEMU_DEBUG_OUTPUT=ON + +If you want to trace events happened in canokey.c, use + +.. parsed-literal:: + + |qemu_system| --trace "canokey_*" \\ + -usb -device canokey,file=$HOME/.canokey-file + +If you want to capture USB packets between the guest and the host, you can: + +.. parsed-literal:: + + |qemu_system| -usb -device canokey,file=$HOME/.canokey-file,pcap=key.pcap + +Limitations +=========== + +Currently libcanokey-qemu.so has dozens of global variables as it was originally +designed for embedded systems. Thus one qemu instance can not have +multiple CanoKey QEMU running, namely you can not + +.. parsed-literal:: + + |qemu_system| -usb -device canokey,file=$HOME/.canokey-file \\ + -device canokey,file=$HOME/.canokey-file2 + +Also, there is no lock on canokey-file, thus two CanoKey QEMU instance +can not read one canokey-file at the same time. + +References +========== + +.. [1] ``_ +.. [2] ``_ +.. [3] ``_ +.. [4] ``_ +.. [5] ``_ +.. [6] ``_ +.. [7] ``_ diff --git a/docs/system/devices/ccid.rst b/docs/system/devices/ccid.rst new file mode 100644 index 0000000000..3b8c2ab46a --- /dev/null +++ b/docs/system/devices/ccid.rst @@ -0,0 +1,171 @@ +Chip Card Interface Device (CCID) +================================= + +USB CCID device +--------------- +The USB CCID device is a USB device implementing the CCID specification, which +lets one connect smart card readers that implement the same spec. For more +information see the specification:: + + Universal Serial Bus + Device Class: Smart Card + CCID + Specification for + Integrated Circuit(s) Cards Interface Devices + Revision 1.1 + April 22rd, 2005 + +Smartcards are used for authentication, single sign on, decryption in +public/private schemes and digital signatures. A smartcard reader on the client +cannot be used on a guest with simple usb passthrough since it will then not be +available on the client, possibly locking the computer when it is "removed". On +the other hand this device can let you use the smartcard on both the client and +the guest machine. It is also possible to have a completely virtual smart card +reader and smart card (i.e. not backed by a physical device) using this device. + +Building +-------- +The cryptographic functions and access to the physical card is done via the +libcacard library, whose development package must be installed prior to +building QEMU: + +In redhat/fedora:: + + yum install libcacard-devel + +In ubuntu:: + + apt-get install libcacard-dev + +Configuring and building:: + + ./configure --enable-smartcard && make + +Using ccid-card-emulated with hardware +-------------------------------------- +Assuming you have a working smartcard on the host with the current +user, using libcacard, QEMU acts as another client using ccid-card-emulated:: + + qemu -usb -device usb-ccid -device ccid-card-emulated + +Using ccid-card-emulated with certificates stored in files +---------------------------------------------------------- +You must create the CA and card certificates. This is a one time process. +We use NSS certificates:: + + mkdir fake-smartcard + cd fake-smartcard + certutil -N -d sql:$PWD + certutil -S -d sql:$PWD -s "CN=Fake Smart Card CA" -x -t TC,TC,TC -n fake-smartcard-ca + certutil -S -d sql:$PWD -t ,, -s "CN=John Doe" -n id-cert -c fake-smartcard-ca + certutil -S -d sql:$PWD -t ,, -s "CN=John Doe (signing)" --nsCertType smime -n signing-cert -c fake-smartcard-ca + certutil -S -d sql:$PWD -t ,, -s "CN=John Doe (encryption)" --nsCertType sslClient -n encryption-cert -c fake-smartcard-ca + +Note: you must have exactly three certificates. + +You can use the emulated card type with the certificates backend:: + + qemu -usb -device usb-ccid -device ccid-card-emulated,backend=certificates,db=sql:$PWD,cert1=id-cert,cert2=signing-cert,cert3=encryption-cert + +To use the certificates in the guest, export the CA certificate:: + + certutil -L -r -d sql:$PWD -o fake-smartcard-ca.cer -n fake-smartcard-ca + +and import it in the guest:: + + certutil -A -d /etc/pki/nssdb -i fake-smartcard-ca.cer -t TC,TC,TC -n fake-smartcard-ca + +In a Linux guest you can then use the CoolKey PKCS #11 module to access +the card:: + + certutil -d /etc/pki/nssdb -L -h all + +It will prompt you for the PIN (which is the password you assigned to the +certificate database early on), and then show you all three certificates +together with the manually imported CA cert:: + + Certificate Nickname Trust Attributes + fake-smartcard-ca CT,C,C + John Doe:CAC ID Certificate u,u,u + John Doe:CAC Email Signature Certificate u,u,u + John Doe:CAC Email Encryption Certificate u,u,u + +If this does not happen, CoolKey is not installed or not registered with +NSS. Registration can be done from Firefox or the command line:: + + modutil -dbdir /etc/pki/nssdb -add "CAC Module" -libfile /usr/lib64/pkcs11/libcoolkeypk11.so + modutil -dbdir /etc/pki/nssdb -list + +Using ccid-card-passthru with client side hardware +-------------------------------------------------- +On the host specify the ccid-card-passthru device with a suitable chardev:: + + qemu -chardev socket,server=on,host=0.0.0.0,port=2001,id=ccid,wait=off \ + -usb -device usb-ccid -device ccid-card-passthru,chardev=ccid + +On the client run vscclient, built when you built QEMU:: + + vscclient 2001 + +Using ccid-card-passthru with client side certificates +------------------------------------------------------ +This case is not particularly useful, but you can use it to debug +your setup. + +Follow instructions above, except run QEMU and vscclient as follows. + +Run qemu as per above, and run vscclient from the "fake-smartcard" +directory as follows:: + + qemu -chardev socket,server=on,host=0.0.0.0,port=2001,id=ccid,wait=off \ + -usb -device usb-ccid -device ccid-card-passthru,chardev=ccid + vscclient -e "db=\"sql:$PWD\" use_hw=no soft=(,Test,CAC,,id-cert,signing-cert,encryption-cert)" 2001 + + +Passthrough protocol scenario +----------------------------- +This is a typical interchange of messages when using the passthru card device. +usb-ccid is a usb device. It defaults to an unattached usb device on startup. +usb-ccid expects a chardev and expects the protocol defined in +cac_card/vscard_common.h to be passed over that. +The usb-ccid device can be in one of three modes: + +* detached +* attached with no card +* attached with card + +A typical interchange is (the arrow shows who started each exchange, it can be client +originated or guest originated):: + + client event | vscclient | passthru | usb-ccid | guest event + ------------------------------------------------------------------------------------------------ + | VSC_Init | | | + | VSC_ReaderAdd | | attach | + | | | | sees new usb device. + card inserted -> | | | | + | VSC_ATR | insert | insert | see new card + | | | | + | VSC_APDU | VSC_APDU | | <- guest sends APDU + client <-> physical | | | | + card APDU exchange | | | | + client response -> | VSC_APDU | VSC_APDU | | receive APDU response + ... + [APDU<->APDU repeats several times] + ... + card removed -> | | | | + | VSC_CardRemove | remove | remove | card removed + ... + [(card insert, apdu's, card remove) repeat] + ... + kill/quit | | | | + vscclient | | | | + | VSC_ReaderRemove | | detach | + | | | | usb device removed. + +libcacard +--------- +Both ccid-card-emulated and vscclient use libcacard as the card emulator. +libcacard implements a completely virtual CAC (DoD standard for smart +cards) compliant card and uses NSS to retrieve certificates and do +any encryption. The backend can then be a real reader and card, or +certificates stored in files. diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst new file mode 100644 index 0000000000..f25783a4ec --- /dev/null +++ b/docs/system/devices/cxl.rst @@ -0,0 +1,386 @@ +Compute Express Link (CXL) +========================== +From the view of a single host, CXL is an interconnect standard that +targets accelerators and memory devices attached to a CXL host. +This description will focus on those aspects visible either to +software running on a QEMU emulated host or to the internals of +functional emulation. As such, it will skip over many of the +electrical and protocol elements that would be more of interest +for real hardware and will dominate more general introductions to CXL. +It will also completely ignore the fabric management aspects of CXL +by considering only a single host and a static configuration. + +CXL shares many concepts and much of the infrastructure of PCI Express, +with CXL Host Bridges, which have CXL Root Ports which may be directly +attached to CXL or PCI End Points. Alternatively there may be CXL Switches +with CXL and PCI Endpoints attached below them. In many cases additional +control and capabilities are exposed via PCI Express interfaces. +This sharing of interfaces and hence emulation code is reflected +in how the devices are emulated in QEMU. In most cases the various +CXL elements are built upon an equivalent PCIe devices. + +CXL devices support the following interfaces: + +* Most conventional PCIe interfaces + + - Configuration space access + - BAR mapped memory accesses used for registers and mailboxes. + - MSI/MSI-X + - AER + - DOE mailboxes + - IDE + - Many other PCI express defined interfaces.. + +* Memory operations + + - Equivalent of accessing DRAM / NVDIMMs. Any access / feature + supported by the host for normal memory should also work for + CXL attached memory devices. + +* Cache operations. The are mostly irrelevant to QEMU emulation as + QEMU is not emulating a coherency protocol. Any emulation related + to these will be device specific and is out of the scope of this + document. + +CXL 2.0 Device Types +-------------------- +CXL 2.0 End Points are often categorized into three types. + +**Type 1:** These support coherent caching of host memory. Example might +be a crypto accelerators. May also have device private memory accessible +via means such as PCI memory reads and writes to BARs. + +**Type 2:** These support coherent caching of host memory and host +managed device memory (HDM) for which the coherency protocol is managed +by the host. This is a complex topic, so for more information on CXL +coherency see the CXL 2.0 specification. + +**Type 3 Memory devices:** These devices act as a means of attaching +additional memory (HDM) to a CXL host including both volatile and +persistent memory. The CXL topology may support interleaving across a +number of Type 3 memory devices using HDM Decoders in the host, host +bridge, switch upstream port and endpoints. + +Scope of CXL emulation in QEMU +------------------------------ +The focus of CXL emulation is CXL revision 2.0 and later. Earlier CXL +revisions defined a smaller set of features, leaving much of the control +interface as implementation defined or device specific, making generic +emulation challenging with host specific firmware being responsible +for setup and the Endpoints being presented to operating systems +as Root Complex Integrated End Points. CXL rev 2.0 looks a lot +more like PCI Express, with fully specified discoverability +of the CXL topology. + +CXL System components +---------------------- +A CXL system is made up a Host with a number of 'standard components' +the control and capabilities of which are discoverable by system software +using means described in the CXL 2.0 specification. + +CXL Fixed Memory Windows (CFMW) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A CFMW consists of a particular range of Host Physical Address space +which is routed to particular CXL Host Bridges. At time of generic +software initialization it will have a particularly interleaving +configuration and associated Quality of Service Throttling Group (QTG). +This information is available to system software, when making +decisions about how to configure interleave across available CXL +memory devices. It is provide as CFMW Structures (CFMWS) in +the CXL Early Discovery Table, an ACPI table. + +Note: QTG 0 is the only one currently supported in QEMU. + +CXL Host Bridge (CXL HB) +~~~~~~~~~~~~~~~~~~~~~~~~ +A CXL host bridge is similar to the PCIe equivalent, but with a +specification defined register interface called CXL Host Bridge +Component Registers (CHBCR). The location of this CHBCR MMIO +space is described to system software via a CXL Host Bridge +Structure (CHBS) in the CEDT ACPI table. The actual interfaces +are identical to those used for other parts of the CXL hierarchy +as CXL Component Registers in PCI BARs. + +Interfaces provided include: + +* Configuration of HDM Decoders to route CXL Memory accesses with + a particularly Host Physical Address range to the target port + below which the CXL device servicing that address lies. This + may be a mapping to a single Root Port (RP) or across a set of + target RPs. + +CXL Root Ports (CXL RP) +~~~~~~~~~~~~~~~~~~~~~~~ +A CXL Root Port servers te same purpose as a PCIe Root Port. +There are a number of CXL specific Designated Vendor Specific +Extended Capabilities (DVSEC) in PCIe Configuration Space +and associated component register access via PCI bars. + +CXL Switch +~~~~~~~~~~ +Here we consider a simple CXL switch with only a single +virtual hierarchy. Whilst more complex devices exist, their +visibility to a particular host is generally the same as for +a simple switch design. Hosts often have no awareness +of complex rerouting and device pooling, they simply see +devices being hot added or hot removed. + +A CXL switch has a similar architecture to those in PCIe, +with a single upstream port, internal PCI bus and multiple +downstream ports. + +Both the CXL upstream and downstream ports have CXL specific +DVSECs in configuration space, and component registers in PCI +BARs. The Upstream Port has the configuration interfaces for +the HDM decoders which route incoming memory accesses to the +appropriate downstream port. + +A CXL switch is created in a similar fashion to PCI switches +by creating an upstream port (cxl-upstream) and a number of +downstream ports on the internal switch bus (cxl-downstream). + +CXL Memory Devices - Type 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +CXL type 3 devices use a PCI class code and are intended to be supported +by a generic operating system driver. They have HDM decoders +though in these EP devices, the decoder is responsible not for +routing but for translation of the incoming host physical address (HPA) +into a Device Physical Address (DPA). + +CXL Memory Interleave +--------------------- +To understand the interaction of different CXL hardware components which +are emulated in QEMU, let us consider a memory read in a fully configured +CXL topology. Note that system software is responsible for configuration +of all components with the exception of the CFMWs. System software is +responsible for allocating appropriate ranges from within the CFMWs +and exposing those via normal memory configurations as would be done +for system RAM. + +Example system Topology. x marks the match in each decoder level:: + + |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| + | __________ __________________________________ __________ | + | | | | | | | | + | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | + | | HB0 only | | Configured to interleave memory | | HB1 only | | + | | | | memory accesses across HB0/HB1 | | | | + | |__________| |_____x____________________________| |__________| | + | | | | + | | | | + | | | | + | Interleave Decoder | | + | Matches this HB | | + \_____________| |_____________/ + __________|__________ _____|_______________ + | | | | + (2) | CXL HB 0 | | CXL HB 1 | + | HB IntLv Decoders | | HB IntLv Decoders | + | PCI/CXL Root Bus 0c | | PCI/CXL Root Bus 0d | + | | | | + |___x_________________| |_____________________| + | | | | + | | | | + A HB 0 HDM Decoder | | | + matches this Port | | | + | | | | + ___________|___ __________|__ __|_________ ___|_________ + (3)| Root Port 0 | | Root Port 1 | | Root Port 2| | Root Port 3 | + | Appears in | | Appears in | | Appears in | | Appear in | + | PCI topology | | PCI Topology| | PCI Topo | | PCI Topo | + | As 0c:00.0 | | as 0c:01.0 | | as de:00.0 | | as de:01.0 | + |_______________| |_____________| |____________| |_____________| + | | | | + | | | | + _____|_________ ______|______ ______|_____ ______|_______ + (4)| x | | | | | | | + | CXL Type3 0 | | CXL Type3 1 | | CXL type3 2| | CLX Type 3 3 | + | | | | | | | | + | PMEM0(Vol LSA)| | PMEM1 (...) | | PMEM2 (...)| | PMEM3 (...) | + | Decoder to go | | | | | | | + | from host PA | | PCI 0e:00.0 | | PCI df:00.0| | PCI e0:00.0 | + | to device PA | | | | | | | + | PCI as 0d:00.0| | | | | | | + |_______________| |_____________| |____________| |______________| + +Notes: + +(1) **3 CXL Fixed Memory Windows (CFMW)** corresponding to different + ranges of the system physical address map. Each CFMW has + particular interleave setup across the CXL Host Bridges (HB) + CFMW0 provides uninterleaved access to HB0, CFW2 provides + uninterleaved access to HB1. CFW1 provides interleaved memory access + across HB0 and HB1. + +(2) **Two CXL Host Bridges**. Each of these has 2 CXL Root Ports and + programmable HDM decoders to route memory accesses either to + a single port or interleave them across multiple ports. + A complex configuration here, might be to use the following HDM + decoders in HB0. HDM0 routes CFMW0 requests to RP0 and hence + part of CXL Type3 0. HDM1 routes CFMW0 requests from a + different region of the CFMW0 PA range to RP2 and hence part + of CXL Type 3 1. HDM2 routes yet another PA range from within + CFMW0 to be interleaved across RP0 and RP1, providing 2 way + interleave of part of the memory provided by CXL Type3 0 and + CXL Type 3 1. HDM3 routes those interleaved accesses from + CFMW1 that target HB0 to RP 0 and another part of the memory of + CXL Type 3 0 (as part of a 2 way interleave at the system level + across for example CXL Type3 0 and CXL Type3 2. + HDM4 is used to enable system wide 4 way interleave across all + the present CXL type3 devices, by interleaving those (interleaved) + requests that HB0 receives from from CFMW1 across RP 0 and + RP 1 and hence to yet more regions of the memory of the + attached Type3 devices. Note this is a representative subset + of the full range of possible HDM decoder configurations in this + topology. + +(3) **Four CXL Root Ports.** In this case the CXL Type 3 devices are + directly attached to these ports. + +(4) **Four CXL Type3 memory expansion devices.** These will each have + HDM decoders, but in this case rather than performing interleave + they will take the Host Physical Addresses of accesses and map + them to their own local Device Physical Address Space (DPA). + +Example topology involving a switch:: + + |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| + | __________ __________________________________ __________ | + | | | | | | | | + | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | + | | HB0 only | | Configured to interleave memory | | HB1 only | | + | | | | memory accesses across HB0/HB1 | | | | + | |____x_____| |__________________________________| |__________| | + | | | | + | | | | + | | | + Interleave Decoder | | | + Matches this HB | | | + \_____________| |_____________/ + __________|__________ _____|_______________ + | | | | + | CXL HB 0 | | CXL HB 1 | + | HB IntLv Decoders | | HB IntLv Decoders | + | PCI/CXL Root Bus 0c | | PCI/CXL Root Bus 0d | + | | | | + |___x_________________| |_____________________| + | | | | + | + A HB 0 HDM Decoder + matches this Port + ___________|___ + | Root Port 0 | + | Appears in | + | PCI topology | + | As 0c:00.0 | + |___________x___| + | + | + \_____________________ + | + | + --------------------------------------------------- + | Switch 0 USP as PCI 0d:00.0 | + | USP has HDM decoder which direct traffic to | + | appropriate downstream port | + | Switch BUS appears as 0e | + |x__________________________________________________| + | | | | + | | | | + _____|_________ ______|______ ______|_____ ______|_______ + (4)| x | | | | | | | + | CXL Type3 0 | | CXL Type3 1 | | CXL type3 2| | CLX Type 3 3 | + | | | | | | | | + | PMEM0(Vol LSA)| | PMEM1 (...) | | PMEM2 (...)| | PMEM3 (...) | + | Decoder to go | | | | | | | + | from host PA | | PCI 10:00.0 | | PCI 11:00.0| | PCI 12:00.0 | + | to device PA | | | | | | | + | PCI as 0f:00.0| | | | | | | + |_______________| |_____________| |____________| |______________| + +Example command lines +--------------------- +A very simple setup with just one directly attached CXL Type 3 device:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + ... + -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ + -device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G + +A setup suitable for 4 way interleave. Only one fixed window provided, to enable 2 way +interleave across 2 CXL host bridges. Each host bridge has 2 CXL Root Ports, with +the CXL Type3 device directly attached (no switches).:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + ... + -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ + -object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \ + -object memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M \ + -object memory-backend-file,id=cxl-mem4,share=on,mem-path=/tmp/cxltest4.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa4,share=on,mem-path=/tmp/lsa4.raw,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ + -device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \ + -device cxl-type3,bus=root_port14,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \ + -device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \ + -device cxl-type3,bus=root_port15,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \ + -device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \ + -device cxl-type3,bus=root_port16,memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k + +An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + ... + -object memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M \ + -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M \ + -object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \ + -object memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa0.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa1.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M \ + -object memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \ + -device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \ + -device cxl-upstream,bus=root_port0,id=us0 \ + -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ + -device cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,size=256M \ + -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ + -device cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,size=256M \ + -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ + -device cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,size=256M \ + -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ + -device cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,size=256M \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k + +Kernel Configuration Options +---------------------------- + +In Linux 5.18 the following options are necessary to make use of +OS management of CXL memory devices as described here. + +* CONFIG_CXL_BUS +* CONFIG_CXL_PCI +* CONFIG_CXL_ACPI +* CONFIG_CXL_PMEM +* CONFIG_CXL_MEM +* CONFIG_CXL_PORT +* CONFIG_CXL_REGION + +References +---------- + + - Consortium website for specifications etc: + http://www.computeexpresslink.org + - Compute Express link Revision 2 specification, October 2020 + - CEDT CFMWS & QTG _DSM ECN May 2021 diff --git a/docs/system/devices/nvme.rst b/docs/system/devices/nvme.rst index bff72d1c24..30f841ef62 100644 --- a/docs/system/devices/nvme.rst +++ b/docs/system/devices/nvme.rst @@ -70,7 +70,7 @@ namespaces and additional features, the ``nvme-ns`` device must be used. The namespaces defined by the ``nvme-ns`` device will attach to the most recently defined ``nvme-bus`` that is created by the ``nvme`` device. Namespace -identifers are allocated automatically, starting from ``1``. +identifiers are allocated automatically, starting from ``1``. There are a number of parameters available: @@ -104,34 +104,38 @@ multipath I/O. .. code-block:: console -device nvme-subsys,id=nvme-subsys-0,nqn=subsys0 - -device nvme,serial=a,subsys=nvme-subsys-0 - -device nvme,serial=b,subsys=nvme-subsys-0 + -device nvme,serial=deadbeef,subsys=nvme-subsys-0 + -device nvme,serial=deadbeef,subsys=nvme-subsys-0 This will create an NVM subsystem with two controllers. Having controllers linked to an ``nvme-subsys`` device allows additional ``nvme-ns`` parameters: -``shared`` (default: ``off``) +``shared`` (default: ``on`` since 6.2) Specifies that the namespace will be attached to all controllers in the - subsystem. If set to ``off`` (the default), the namespace will remain a - private namespace and may only be attached to a single controller at a time. + subsystem. If set to ``off``, the namespace will remain a private namespace + and may only be attached to a single controller at a time. Shared namespaces + are always automatically attached to all controllers (also when controllers + are hotplugged). ``detached`` (default: ``off``) If set to ``on``, the namespace will be be available in the subsystem, but - not attached to any controllers initially. + not attached to any controllers initially. A shared namespace with this set + to ``on`` will never be automatically attached to controllers. Thus, adding .. code-block:: console -drive file=nvm-1.img,if=none,id=nvm-1 - -device nvme-ns,drive=nvm-1,nsid=1,shared=on + -device nvme-ns,drive=nvm-1,nsid=1 -drive file=nvm-2.img,if=none,id=nvm-2 - -device nvme-ns,drive=nvm-2,nsid=3,detached=on + -device nvme-ns,drive=nvm-2,nsid=3,shared=off,detached=on -will cause NSID 1 will be a shared namespace (due to ``shared=on``) that is -initially attached to both controllers. NSID 3 will be a private namespace -(i.e. only attachable to a single controller at a time) and will not be -attached to any controller initially (due to ``detached=on``). +will cause NSID 1 will be a shared namespace that is initially attached to both +controllers. NSID 3 will be a private namespace due to ``shared=off`` and only +attachable to a single controller at a time. Additionally it will not be +attached to any controller initially (due to ``detached=on``) or to hotplugged +controllers. Optional Features ================= @@ -235,3 +239,85 @@ The virtual namespace device supports DIF- and DIX-based protection information to ``1`` to transfer protection information as the first eight bytes of metadata. Otherwise, the protection information is transferred as the last eight bytes. + +Virtualization Enhancements and SR-IOV (Experimental Support) +------------------------------------------------------------- + +The ``nvme`` device supports Single Root I/O Virtualization and Sharing +along with Virtualization Enhancements. The controller has to be linked to +an NVM Subsystem device (``nvme-subsys``) for use with SR-IOV. + +A number of parameters are present (**please note, that they may be +subject to change**): + +``sriov_max_vfs`` (default: ``0``) + Indicates the maximum number of PCIe virtual functions supported + by the controller. Specifying a non-zero value enables reporting of both + SR-IOV and ARI (Alternative Routing-ID Interpretation) capabilities + by the NVMe device. Virtual function controllers will not report SR-IOV. + +``sriov_vq_flexible`` + Indicates the total number of flexible queue resources assignable to all + the secondary controllers. Implicitly sets the number of primary + controller's private resources to ``(max_ioqpairs - sriov_vq_flexible)``. + +``sriov_vi_flexible`` + Indicates the total number of flexible interrupt resources assignable to + all the secondary controllers. Implicitly sets the number of primary + controller's private resources to ``(msix_qsize - sriov_vi_flexible)``. + +``sriov_max_vi_per_vf`` (default: ``0``) + Indicates the maximum number of virtual interrupt resources assignable + to a secondary controller. The default ``0`` resolves to + ``(sriov_vi_flexible / sriov_max_vfs)`` + +``sriov_max_vq_per_vf`` (default: ``0``) + Indicates the maximum number of virtual queue resources assignable to + a secondary controller. The default ``0`` resolves to + ``(sriov_vq_flexible / sriov_max_vfs)`` + +The simplest possible invocation enables the capability to set up one VF +controller and assign an admin queue, an IO queue, and a MSI-X interrupt. + +.. code-block:: console + + -device nvme-subsys,id=subsys0 + -device nvme,serial=deadbeef,subsys=subsys0,sriov_max_vfs=1, + sriov_vq_flexible=2,sriov_vi_flexible=1 + +The minimum steps required to configure a functional NVMe secondary +controller are: + + * unbind flexible resources from the primary controller + +.. code-block:: console + + nvme virt-mgmt /dev/nvme0 -c 0 -r 1 -a 1 -n 0 + nvme virt-mgmt /dev/nvme0 -c 0 -r 0 -a 1 -n 0 + + * perform a Function Level Reset on the primary controller to actually + release the resources + +.. code-block:: console + + echo 1 > /sys/bus/pci/devices/0000:01:00.0/reset + + * enable VF + +.. code-block:: console + + echo 1 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs + + * assign the flexible resources to the VF and set it ONLINE + +.. code-block:: console + + nvme virt-mgmt /dev/nvme0 -c 1 -r 1 -a 8 -n 1 + nvme virt-mgmt /dev/nvme0 -c 1 -r 0 -a 8 -n 2 + nvme virt-mgmt /dev/nvme0 -c 1 -r 0 -a 9 -n 0 + + * bind the NVMe driver to the VF + +.. code-block:: console + + echo 0000:01:00.1 > /sys/bus/pci/drivers/nvme/bind \ No newline at end of file diff --git a/docs/system/devices/usb.rst b/docs/system/devices/usb.rst index afb7d6c226..37cb9b33ae 100644 --- a/docs/system/devices/usb.rst +++ b/docs/system/devices/usb.rst @@ -178,8 +178,20 @@ option or the ``device_add`` monitor command. Available devices are: host character device id. ``usb-braille,chardev=id`` - Braille device. This will use BrlAPI to display the braille output on - a real or fake device referenced by id. + Braille device. This emulates a Baum Braille device USB port. id has to + specify a character device defined with ``-chardev …,id=id``. One will + normally use BrlAPI to display the braille output on a BRLTTY-supported + device with + + .. parsed-literal:: + + |qemu_system| [...] -chardev braille,id=brl -device usb-braille,chardev=brl + + or alternatively, use the following equivalent shortcut: + + .. parsed-literal:: + + |qemu_system| [...] -usbdevice braille ``usb-net[,netdev=id]`` Network adapter that supports CDC ethernet and RNDIS protocols. id @@ -199,6 +211,10 @@ option or the ``device_add`` monitor command. Available devices are: ``u2f-{emulated,passthru}`` Universal Second Factor device +``canokey`` + An Open-source Secure Key implementing FIDO2, OpenPGP, PIV and more. + For more information, see :ref:`canokey`. + Physical port addressing ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -349,3 +365,44 @@ and also assign it to the correct USB bus in QEMU like this: -device usb-ehci,id=ehci \\ -device usb-host,bus=usb-bus.0,hostbus=3,hostport=1 \\ -device usb-host,bus=ehci.0,hostbus=1,hostport=1 + +``usb-host`` properties for reset behavior +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``guest-reset`` and ``guest-reset-all`` properties control +whenever the guest is allowed to reset the physical usb device on the +host. There are three cases: + +``guest-reset=false`` + The guest is not allowed to reset the (physical) usb device. + +``guest-reset=true,guest-resets-all=false`` + The guest is allowed to reset the device when it is not yet + initialized (aka no usb bus address assigned). Usually this results + in one guest reset being allowed. This is the default behavior. + +``guest-reset=true,guest-resets-all=true`` + The guest is allowed to reset the device as it pleases. + +The reason for this existing are broken usb devices. In theory one +should be able to reset (and re-initialize) usb devices at any time. +In practice that may result in shitty usb device firmware crashing and +the device not responding any more until you power-cycle (aka un-plug +and re-plug) it. + +What works best pretty much depends on the behavior of the specific +usb device at hand, so it's a trial-and-error game. If the default +doesn't work, try another option and see whenever the situation +improves. + +record usb transfers +^^^^^^^^^^^^^^^^^^^^ + +All usb devices have support for recording the usb traffic. This can +be enabled using the ``pcap=`` property, for example: + +``-device usb-mouse,pcap=mouse.pcap`` + +The pcap files are compatible with the linux kernels usbmon. Many +tools, including ``wireshark``, can decode and inspect these trace +files. diff --git a/docs/system/devices/vhost-user-rng.rst b/docs/system/devices/vhost-user-rng.rst new file mode 100644 index 0000000000..a145d4105c --- /dev/null +++ b/docs/system/devices/vhost-user-rng.rst @@ -0,0 +1,39 @@ +QEMU vhost-user-rng - RNG emulation +=================================== + +Background +---------- + +What follows builds on the material presented in vhost-user.rst - it should +be reviewed before moving forward with the content in this file. + +Description +----------- + +The vhost-user-rng device implementation was designed to work with a random +number generator daemon such as the one found in the vhost-device crate of +the rust-vmm project available on github [1]. + +[1]. https://github.com/rust-vmm/vhost-device + +Examples +-------- + +The daemon should be started first: + +:: + + host# vhost-device-rng --socket-path=rng.sock -c 1 -m 512 -p 1000 + +The QEMU invocation needs to create a chardev socket the device can +use to communicate as well as share the guests memory over a memfd. + +:: + + host# qemu-system \ + -chardev socket,path=$(PATH)/rng.sock,id=rng0 \ + -device vhost-user-rng-pci,chardev=rng0 \ + -m 4096 \ + -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ + -numa node,memdev=mem \ + ... diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst index 144d083df3..453eb73f6c 100644 --- a/docs/system/gdb.rst +++ b/docs/system/gdb.rst @@ -15,7 +15,8 @@ The ``-s`` option will make QEMU listen for an incoming connection from gdb on TCP port 1234, and ``-S`` will make QEMU not start the guest until you tell it to from gdb. (If you want to specify which TCP port to use or to use something other than TCP for the gdbstub -connection, use the ``-gdb dev`` option instead of ``-s``.) +connection, use the ``-gdb dev`` option instead of ``-s``. See +`Using unix sockets`_ for an example.) .. parsed-literal:: @@ -55,7 +56,7 @@ machine has more than one CPU, QEMU exposes each CPU cluster as a separate "inferior", where each CPU within the cluster is a separate "thread". Most QEMU machine types have identical CPUs, so there is a single cluster which has all the CPUs in it. A few machine types are -heterogenous and have multiple clusters: for example the ``sifive_u`` +heterogeneous and have multiple clusters: for example the ``sifive_u`` machine has a cluster with one E51 core and a second cluster with four U54 cores. Here the E51 is the only thread in the first inferior, and the U54 cores are all threads in the second inferior. @@ -100,6 +101,29 @@ not just those in the cluster you are currently working on:: (gdb) set schedule-multiple on +Using unix sockets +================== + +An alternate method for connecting gdb to the QEMU gdbstub is to use +a unix socket (if supported by your operating system). This is useful when +running several tests in parallel, or if you do not have a known free TCP +port (e.g. when running automated tests). + +First create a chardev with the appropriate options, then +instruct the gdbserver to use that device: + +.. parsed-literal:: + + |qemu_system| -chardev socket,path=/tmp/gdb-socket,server=on,wait=off,id=gdb0 -gdb chardev:gdb0 -S ... + +Start gdb as before, but this time connect using the path to +the socket:: + + (gdb) target remote /tmp/gdb-socket + +Note that to use a unix socket for the connection you will need +gdb version 9.0 or newer. + Advanced debugging options ========================== diff --git a/docs/system/guest-loader.rst b/docs/system/guest-loader.rst index 4320d1183f..9ef9776bf0 100644 --- a/docs/system/guest-loader.rst +++ b/docs/system/guest-loader.rst @@ -51,4 +51,4 @@ The full syntax of the guest-loader is:: ``bootargs=`` This is an optional field for kernel blobs which will pass command - like via the `/chosen/module@/bootargs` node. + like via the ``/chosen/module@/bootargs`` node. diff --git a/docs/system/i386/amd-memory-encryption.rst b/docs/system/i386/amd-memory-encryption.rst new file mode 100644 index 0000000000..dcf4add0e7 --- /dev/null +++ b/docs/system/i386/amd-memory-encryption.rst @@ -0,0 +1,206 @@ +AMD Secure Encrypted Virtualization (SEV) +========================================= + +Secure Encrypted Virtualization (SEV) is a feature found on AMD processors. + +SEV is an extension to the AMD-V architecture which supports running encrypted +virtual machines (VMs) under the control of KVM. Encrypted VMs have their pages +(code and data) secured such that only the guest itself has access to the +unencrypted version. Each encrypted VM is associated with a unique encryption +key; if its data is accessed by a different entity using a different key the +encrypted guests data will be incorrectly decrypted, leading to unintelligible +data. + +Key management for this feature is handled by a separate processor known as the +AMD secure processor (AMD-SP), which is present in AMD SOCs. Firmware running +inside the AMD-SP provides commands to support a common VM lifecycle. This +includes commands for launching, snapshotting, migrating and debugging the +encrypted guest. These SEV commands can be issued via KVM_MEMORY_ENCRYPT_OP +ioctls. + +Secure Encrypted Virtualization - Encrypted State (SEV-ES) builds on the SEV +support to additionally protect the guest register state. In order to allow a +hypervisor to perform functions on behalf of a guest, there is architectural +support for notifying a guest's operating system when certain types of VMEXITs +are about to occur. This allows the guest to selectively share information with +the hypervisor to satisfy the requested function. + +Launching +--------- + +Boot images (such as bios) must be encrypted before a guest can be booted. The +``MEMORY_ENCRYPT_OP`` ioctl provides commands to encrypt the images: ``LAUNCH_START``, +``LAUNCH_UPDATE_DATA``, ``LAUNCH_MEASURE`` and ``LAUNCH_FINISH``. These four commands +together generate a fresh memory encryption key for the VM, encrypt the boot +images and provide a measurement than can be used as an attestation of a +successful launch. + +For a SEV-ES guest, the ``LAUNCH_UPDATE_VMSA`` command is also used to encrypt the +guest register state, or VM save area (VMSA), for all of the guest vCPUs. + +``LAUNCH_START`` is called first to create a cryptographic launch context within +the firmware. To create this context, guest owner must provide a guest policy, +its public Diffie-Hellman key (PDH) and session parameters. These inputs +should be treated as a binary blob and must be passed as-is to the SEV firmware. + +The guest policy is passed as plaintext. A hypervisor may choose to read it, +but should not modify it (any modification of the policy bits will result +in bad measurement). The guest policy is a 4-byte data structure containing +several flags that restricts what can be done on a running SEV guest. +See SEV API Spec ([SEVAPI]_) section 3 and 6.2 for more details. + +The guest policy can be provided via the ``policy`` property:: + + # ${QEMU} \ + sev-guest,id=sev0,policy=0x1...\ + +Setting the "SEV-ES required" policy bit (bit 2) will launch the guest as a +SEV-ES guest:: + + # ${QEMU} \ + sev-guest,id=sev0,policy=0x5...\ + +The guest owner provided DH certificate and session parameters will be used to +establish a cryptographic session with the guest owner to negotiate keys used +for the attestation. + +The DH certificate and session blob can be provided via the ``dh-cert-file`` and +``session-file`` properties:: + + # ${QEMU} \ + sev-guest,id=sev0,dh-cert-file=,session-file= + +``LAUNCH_UPDATE_DATA`` encrypts the memory region using the cryptographic context +created via the ``LAUNCH_START`` command. If required, this command can be called +multiple times to encrypt different memory regions. The command also calculates +the measurement of the memory contents as it encrypts. + +``LAUNCH_UPDATE_VMSA`` encrypts all the vCPU VMSAs for a SEV-ES guest using the +cryptographic context created via the ``LAUNCH_START`` command. The command also +calculates the measurement of the VMSAs as it encrypts them. + +``LAUNCH_MEASURE`` can be used to retrieve the measurement of encrypted memory and, +for a SEV-ES guest, encrypted VMSAs. This measurement is a signature of the +memory contents and, for a SEV-ES guest, the VMSA contents, that can be sent +to the guest owner as an attestation that the memory and VMSAs were encrypted +correctly by the firmware. The guest owner may wait to provide the guest +confidential information until it can verify the attestation measurement. +Since the guest owner knows the initial contents of the guest at boot, the +attestation measurement can be verified by comparing it to what the guest owner +expects. + +``LAUNCH_FINISH`` finalizes the guest launch and destroys the cryptographic +context. + +See SEV API Spec ([SEVAPI]_) 'Launching a guest' usage flow (Appendix A) for the +complete flow chart. + +To launch a SEV guest:: + + # ${QEMU} \ + -machine ...,confidential-guest-support=sev0 \ + -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1 + +To launch a SEV-ES guest:: + + # ${QEMU} \ + -machine ...,confidential-guest-support=sev0 \ + -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1,policy=0x5 + +An SEV-ES guest has some restrictions as compared to a SEV guest. Because the +guest register state is encrypted and cannot be updated by the VMM/hypervisor, +a SEV-ES guest: + + - Does not support SMM - SMM support requires updating the guest register + state. + - Does not support reboot - a system reset requires updating the guest register + state. + - Requires in-kernel irqchip - the burden is placed on the hypervisor to + manage booting APs. + +Calculating expected guest launch measurement +--------------------------------------------- + +In order to verify the guest launch measurement, The Guest Owner must compute +it in the exact same way as it is calculated by the AMD-SP. SEV API Spec +([SEVAPI]_) section 6.5.1 describes the AMD-SP operations: + + GCTX.LD is finalized, producing the hash digest of all plaintext data + imported into the guest. + + The launch measurement is calculated as: + + HMAC(0x04 || API_MAJOR || API_MINOR || BUILD || GCTX.POLICY || GCTX.LD || MNONCE; GCTX.TIK) + + where "||" represents concatenation. + +The values of API_MAJOR, API_MINOR, BUILD, and GCTX.POLICY can be obtained +from the ``query-sev`` qmp command. + +The value of MNONCE is part of the response of ``query-sev-launch-measure``: it +is the last 16 bytes of the base64-decoded data field (see SEV API Spec +([SEVAPI]_) section 6.5.2 Table 52: LAUNCH_MEASURE Measurement Buffer). + +The value of GCTX.LD is +``SHA256(firmware_blob || kernel_hashes_blob || vmsas_blob)``, where: + +* ``firmware_blob`` is the content of the entire firmware flash file (for + example, ``OVMF.fd``). Note that you must build a stateless firmware file + which doesn't use an NVRAM store, because the NVRAM area is not measured, and + therefore it is not secure to use a firmware which uses state from an NVRAM + store. +* if kernel is used, and ``kernel-hashes=on``, then ``kernel_hashes_blob`` is + the content of PaddedSevHashTable (including the zero padding), which itself + includes the hashes of kernel, initrd, and cmdline that are passed to the + guest. The PaddedSevHashTable struct is defined in ``target/i386/sev.c``. +* if SEV-ES is enabled (``policy & 0x4 != 0``), ``vmsas_blob`` is the + concatenation of all VMSAs of the guest vcpus. Each VMSA is 4096 bytes long; + its content is defined inside Linux kernel code as ``struct vmcb_save_area``, + or in AMD APM Volume 2 ([APMVOL2]_) Table B-2: VMCB Layout, State Save Area. + +If kernel hashes are not used, or SEV-ES is disabled, use empty blobs for +``kernel_hashes_blob`` and ``vmsas_blob`` as needed. + +Debugging +--------- + +Since the memory contents of a SEV guest are encrypted, hypervisor access to +the guest memory will return cipher text. If the guest policy allows debugging, +then a hypervisor can use the DEBUG_DECRYPT and DEBUG_ENCRYPT commands to access +the guest memory region for debug purposes. This is not supported in QEMU yet. + +Snapshot/Restore +---------------- + +TODO + +Live Migration +--------------- + +TODO + +References +---------- + +`AMD Memory Encryption whitepaper +`_ + +.. [SEVAPI] `Secure Encrypted Virtualization API + `_ + +.. [APMVOL2] `AMD64 Architecture Programmer's Manual Volume 2: System Programming + `_ + +KVM Forum slides: + +* `AMD’s Virtualization Memory Encryption (2016) + `_ +* `Extending Secure Encrypted Virtualization With SEV-ES (2018) + `_ + +`AMD64 Architecture Programmer's Manual: +`_ + +* SME is section 7.10 +* SEV is section 15.34 +* SEV-ES is section 15.35 diff --git a/docs/system/i386/cpu.rst b/docs/system/i386/cpu.rst new file mode 100644 index 0000000000..738719da9a --- /dev/null +++ b/docs/system/i386/cpu.rst @@ -0,0 +1 @@ +.. include:: ../cpu-models-x86.rst.inc diff --git a/docs/system/i386/hyperv.rst b/docs/system/i386/hyperv.rst new file mode 100644 index 0000000000..2505dc4c86 --- /dev/null +++ b/docs/system/i386/hyperv.rst @@ -0,0 +1,288 @@ +Hyper-V Enlightenments +====================== + + +Description +----------- + +In some cases when implementing a hardware interface in software is slow, KVM +implements its own paravirtualized interfaces. This works well for Linux as +guest support for such features is added simultaneously with the feature itself. +It may, however, be hard-to-impossible to add support for these interfaces to +proprietary OSes, namely, Microsoft Windows. + +KVM on x86 implements Hyper-V Enlightenments for Windows guests. These features +make Windows and Hyper-V guests think they're running on top of a Hyper-V +compatible hypervisor and use Hyper-V specific features. + + +Setup +----- + +No Hyper-V enlightenments are enabled by default by either KVM or QEMU. In +QEMU, individual enlightenments can be enabled through CPU flags, e.g: + +.. parsed-literal:: + + |qemu_system| --enable-kvm --cpu host,hv_relaxed,hv_vpindex,hv_time, ... + +Sometimes there are dependencies between enlightenments, QEMU is supposed to +check that the supplied configuration is sane. + +When any set of the Hyper-V enlightenments is enabled, QEMU changes hypervisor +identification (CPUID 0x40000000..0x4000000A) to Hyper-V. KVM identification +and features are kept in leaves 0x40000100..0x40000101. + + +Existing enlightenments +----------------------- + +``hv-relaxed`` + This feature tells guest OS to disable watchdog timeouts as it is running on a + hypervisor. It is known that some Windows versions will do this even when they + see 'hypervisor' CPU flag. + +``hv-vapic`` + Provides so-called VP Assist page MSR to guest allowing it to work with APIC + more efficiently. In particular, this enlightenment allows paravirtualized + (exit-less) EOI processing. + +``hv-spinlocks`` = xxx + Enables paravirtualized spinlocks. The parameter indicates how many times + spinlock acquisition should be attempted before indicating the situation to the + hypervisor. A special value 0xffffffff indicates "never notify". + +``hv-vpindex`` + Provides HV_X64_MSR_VP_INDEX (0x40000002) MSR to the guest which has Virtual + processor index information. This enlightenment makes sense in conjunction with + hv-synic, hv-stimer and other enlightenments which require the guest to know its + Virtual Processor indices (e.g. when VP index needs to be passed in a + hypercall). + +``hv-runtime`` + Provides HV_X64_MSR_VP_RUNTIME (0x40000010) MSR to the guest. The MSR keeps the + virtual processor run time in 100ns units. This gives guest operating system an + idea of how much time was 'stolen' from it (when the virtual CPU was preempted + to perform some other work). + +``hv-crash`` + Provides HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 (0x40000100..0x40000105) and + HV_X64_MSR_CRASH_CTL (0x40000105) MSRs to the guest. These MSRs are written to + by the guest when it crashes, HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 MSRs + contain additional crash information. This information is outputted in QEMU log + and through QAPI. + Note: unlike under genuine Hyper-V, write to HV_X64_MSR_CRASH_CTL causes guest + to shutdown. This effectively blocks crash dump generation by Windows. + +``hv-time`` + Enables two Hyper-V-specific clocksources available to the guest: MSR-based + Hyper-V clocksource (HV_X64_MSR_TIME_REF_COUNT, 0x40000020) and Reference TSC + page (enabled via MSR HV_X64_MSR_REFERENCE_TSC, 0x40000021). Both clocksources + are per-guest, Reference TSC page clocksource allows for exit-less time stamp + readings. Using this enlightenment leads to significant speedup of all timestamp + related operations. + +``hv-synic`` + Enables Hyper-V Synthetic interrupt controller - an extension of a local APIC. + When enabled, this enlightenment provides additional communication facilities + to the guest: SynIC messages and Events. This is a pre-requisite for + implementing VMBus devices (not yet in QEMU). Additionally, this enlightenment + is needed to enable Hyper-V synthetic timers. SynIC is controlled through MSRs + HV_X64_MSR_SCONTROL..HV_X64_MSR_EOM (0x40000080..0x40000084) and + HV_X64_MSR_SINT0..HV_X64_MSR_SINT15 (0x40000090..0x4000009F) + + Requires: ``hv-vpindex`` + +``hv-stimer`` + Enables Hyper-V synthetic timers. There are four synthetic timers per virtual + CPU controlled through HV_X64_MSR_STIMER0_CONFIG..HV_X64_MSR_STIMER3_COUNT + (0x400000B0..0x400000B7) MSRs. These timers can work either in single-shot or + periodic mode. It is known that certain Windows versions revert to using HPET + (or even RTC when HPET is unavailable) extensively when this enlightenment is + not provided; this can lead to significant CPU consumption, even when virtual + CPU is idle. + + Requires: ``hv-vpindex``, ``hv-synic``, ``hv-time`` + +``hv-tlbflush`` + Enables paravirtualized TLB shoot-down mechanism. On x86 architecture, remote + TLB flush procedure requires sending IPIs and waiting for other CPUs to perform + local TLB flush. In virtualized environment some virtual CPUs may not even be + scheduled at the time of the call and may not require flushing (or, flushing + may be postponed until the virtual CPU is scheduled). hv-tlbflush enlightenment + implements TLB shoot-down through hypervisor enabling the optimization. + + Requires: ``hv-vpindex`` + +``hv-ipi`` + Enables paravirtualized IPI send mechanism. HvCallSendSyntheticClusterIpi + hypercall may target more than 64 virtual CPUs simultaneously, doing the same + through APIC requires more than one access (and thus exit to the hypervisor). + + Requires: ``hv-vpindex`` + +``hv-vendor-id`` = xxx + This changes Hyper-V identification in CPUID 0x40000000.EBX-EDX from the default + "Microsoft Hv". The parameter should be no longer than 12 characters. According + to the specification, guests shouldn't use this information and it is unknown + if there is a Windows version which acts differently. + Note: hv-vendor-id is not an enlightenment and thus doesn't enable Hyper-V + identification when specified without some other enlightenment. + +``hv-reset`` + Provides HV_X64_MSR_RESET (0x40000003) MSR to the guest allowing it to reset + itself by writing to it. Even when this MSR is enabled, it is not a recommended + way for Windows to perform system reboot and thus it may not be used. + +``hv-frequencies`` + Provides HV_X64_MSR_TSC_FREQUENCY (0x40000022) and HV_X64_MSR_APIC_FREQUENCY + (0x40000023) allowing the guest to get its TSC/APIC frequencies without doing + measurements. + +``hv-reenlightenment`` + The enlightenment is nested specific, it targets Hyper-V on KVM guests. When + enabled, it provides HV_X64_MSR_REENLIGHTENMENT_CONTROL (0x40000106), + HV_X64_MSR_TSC_EMULATION_CONTROL (0x40000107)and HV_X64_MSR_TSC_EMULATION_STATUS + (0x40000108) MSRs allowing the guest to get notified when TSC frequency changes + (only happens on migration) and keep using old frequency (through emulation in + the hypervisor) until it is ready to switch to the new one. This, in conjunction + with ``hv-frequencies``, allows Hyper-V on KVM to pass stable clocksource + (Reference TSC page) to its own guests. + + Note, KVM doesn't fully support re-enlightenment notifications and doesn't + emulate TSC accesses after migration so 'tsc-frequency=' CPU option also has to + be specified to make migration succeed. The destination host has to either have + the same TSC frequency or support TSC scaling CPU feature. + + Recommended: ``hv-frequencies`` + +``hv-evmcs`` + The enlightenment is nested specific, it targets Hyper-V on KVM guests. When + enabled, it provides Enlightened VMCS version 1 feature to the guest. The feature + implements paravirtualized protocol between L0 (KVM) and L1 (Hyper-V) + hypervisors making L2 exits to the hypervisor faster. The feature is Intel-only. + + Note: some virtualization features (e.g. Posted Interrupts) are disabled when + hv-evmcs is enabled. It may make sense to measure your nested workload with and + without the feature to find out if enabling it is beneficial. + + Requires: ``hv-vapic`` + +``hv-stimer-direct`` + Hyper-V specification allows synthetic timer operation in two modes: "classic", + when expiration event is delivered as SynIC message and "direct", when the event + is delivered via normal interrupt. It is known that nested Hyper-V can only + use synthetic timers in direct mode and thus ``hv-stimer-direct`` needs to be + enabled. + + Requires: ``hv-vpindex``, ``hv-synic``, ``hv-time``, ``hv-stimer`` + +``hv-avic`` (``hv-apicv``) + The enlightenment allows to use Hyper-V SynIC with hardware APICv/AVIC enabled. + Normally, Hyper-V SynIC disables these hardware feature and suggests the guest + to use paravirtualized AutoEOI feature. + Note: enabling this feature on old hardware (without APICv/AVIC support) may + have negative effect on guest's performance. + +``hv-no-nonarch-coresharing`` = on/off/auto + This enlightenment tells guest OS that virtual processors will never share a + physical core unless they are reported as sibling SMT threads. This information + is required by Windows and Hyper-V guests to properly mitigate SMT related CPU + vulnerabilities. + + When the option is set to 'auto' QEMU will enable the feature only when KVM + reports that non-architectural coresharing is impossible, this means that + hyper-threading is not supported or completely disabled on the host. This + setting also prevents migration as SMT settings on the destination may differ. + When the option is set to 'on' QEMU will always enable the feature, regardless + of host setup. To keep guests secure, this can only be used in conjunction with + exposing correct vCPU topology and vCPU pinning. + +``hv-version-id-build``, ``hv-version-id-major``, ``hv-version-id-minor``, ``hv-version-id-spack``, ``hv-version-id-sbranch``, ``hv-version-id-snumber`` + This changes Hyper-V version identification in CPUID 0x40000002.EAX-EDX from the + default (WS2016). + + - ``hv-version-id-build`` sets 'Build Number' (32 bits) + - ``hv-version-id-major`` sets 'Major Version' (16 bits) + - ``hv-version-id-minor`` sets 'Minor Version' (16 bits) + - ``hv-version-id-spack`` sets 'Service Pack' (32 bits) + - ``hv-version-id-sbranch`` sets 'Service Branch' (8 bits) + - ``hv-version-id-snumber`` sets 'Service Number' (24 bits) + + Note: hv-version-id-* are not enlightenments and thus don't enable Hyper-V + identification when specified without any other enlightenments. + +``hv-syndbg`` + Enables Hyper-V synthetic debugger interface, this is a special interface used + by Windows Kernel debugger to send the packets through, rather than sending + them via serial/network . + When enabled, this enlightenment provides additional communication facilities + to the guest: SynDbg messages. + This new communication is used by Windows Kernel debugger rather than sending + packets via serial/network, adding significant performance boost over the other + comm channels. + This enlightenment requires a VMBus device (-device vmbus-bridge,irq=15). + + Requires: ``hv-relaxed``, ``hv_time``, ``hv-vapic``, ``hv-vpindex``, ``hv-synic``, ``hv-runtime``, ``hv-stimer`` + +``hv-emsr-bitmap`` + The enlightenment is nested specific, it targets Hyper-V on KVM guests. When + enabled, it allows L0 (KVM) and L1 (Hyper-V) hypervisors to collaborate to + avoid unnecessary updates to L2 MSR-Bitmap upon vmexits. While the protocol is + supported for both VMX (Intel) and SVM (AMD), the VMX implementation requires + Enlightened VMCS (``hv-evmcs``) feature to also be enabled. + + Recommended: ``hv-evmcs`` (Intel) + +``hv-xmm-input`` + Hyper-V specification allows to pass parameters for certain hypercalls using XMM + registers ("XMM Fast Hypercall Input"). When the feature is in use, it allows + for faster hypercalls processing as KVM can avoid reading guest's memory. + +``hv-tlbflush-ext`` + Allow for extended GVA ranges to be passed to Hyper-V TLB flush hypercalls + (HvFlushVirtualAddressList/HvFlushVirtualAddressListEx). + + Requires: ``hv-tlbflush`` + +``hv-tlbflush-direct`` + The enlightenment is nested specific, it targets Hyper-V on KVM guests. When + enabled, it allows L0 (KVM) to directly handle TLB flush hypercalls from L2 + guest without the need to exit to L1 (Hyper-V) hypervisor. While the feature is + supported for both VMX (Intel) and SVM (AMD), the VMX implementation requires + Enlightened VMCS (``hv-evmcs``) feature to also be enabled. + + Requires: ``hv-vapic`` + + Recommended: ``hv-evmcs`` (Intel) + +Supplementary features +---------------------- + +``hv-passthrough`` + In some cases (e.g. during development) it may make sense to use QEMU in + 'pass-through' mode and give Windows guests all enlightenments currently + supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU + flag. + + Note: ``hv-passthrough`` flag only enables enlightenments which are known to QEMU + (have corresponding 'hv-' flag) and copies ``hv-spinlocks`` and ``hv-vendor-id`` + values from KVM to QEMU. ``hv-passthrough`` overrides all other 'hv-' settings on + the command line. Also, enabling this flag effectively prevents migration as the + list of enabled enlightenments may differ between target and destination hosts. + +``hv-enforce-cpuid`` + By default, KVM allows the guest to use all currently supported Hyper-V + enlightenments when Hyper-V CPUID interface was exposed, regardless of if + some features were not announced in guest visible CPUIDs. ``hv-enforce-cpuid`` + feature alters this behavior and only allows the guest to use exposed Hyper-V + enlightenments. + + +Useful links +------------ +Hyper-V Top Level Functional specification and other information: + +- https://github.com/MicrosoftDocs/Virtualization-Documentation +- https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs + diff --git a/docs/system/i386/kvm-pv.rst b/docs/system/i386/kvm-pv.rst new file mode 100644 index 0000000000..1e5a9923ef --- /dev/null +++ b/docs/system/i386/kvm-pv.rst @@ -0,0 +1,100 @@ +Paravirtualized KVM features +============================ + +Description +----------- + +In some cases when implementing hardware interfaces in software is slow, ``KVM`` +implements its own paravirtualized interfaces. + +Setup +----- + +Paravirtualized ``KVM`` features are represented as CPU flags. The following +features are enabled by default for any CPU model when ``KVM`` acceleration is +enabled: + +- ``kvmclock`` +- ``kvm-nopiodelay`` +- ``kvm-asyncpf`` +- ``kvm-steal-time`` +- ``kvm-pv-eoi`` +- ``kvmclock-stable-bit`` + +``kvm-msi-ext-dest-id`` feature is enabled by default in x2apic mode with split +irqchip (e.g. "-machine ...,kernel-irqchip=split -cpu ...,x2apic"). + +Note: when CPU model ``host`` is used, QEMU passes through all supported +paravirtualized ``KVM`` features to the guest. + +Existing features +----------------- + +``kvmclock`` + Expose a ``KVM`` specific paravirtualized clocksource to the guest. Supported + since Linux v2.6.26. + +``kvm-nopiodelay`` + The guest doesn't need to perform delays on PIO operations. Supported since + Linux v2.6.26. + +``kvm-mmu`` + This feature is deprecated. + +``kvm-asyncpf`` + Enable asynchronous page fault mechanism. Supported since Linux v2.6.38. + Note: since Linux v5.10 the feature is deprecated and not enabled by ``KVM``. + Use ``kvm-asyncpf-int`` instead. + +``kvm-steal-time`` + Enable stolen (when guest vCPU is not running) time accounting. Supported + since Linux v3.1. + +``kvm-pv-eoi`` + Enable paravirtualized end-of-interrupt signaling. Supported since Linux + v3.10. + +``kvm-pv-unhalt`` + Enable paravirtualized spinlocks support. Supported since Linux v3.12. + +``kvm-pv-tlb-flush`` + Enable paravirtualized TLB flush mechanism. Supported since Linux v4.16. + +``kvm-pv-ipi`` + Enable paravirtualized IPI mechanism. Supported since Linux v4.19. + +``kvm-poll-control`` + Enable host-side polling on HLT control from the guest. Supported since Linux + v5.10. + +``kvm-pv-sched-yield`` + Enable paravirtualized sched yield feature. Supported since Linux v5.10. + +``kvm-asyncpf-int`` + Enable interrupt based asynchronous page fault mechanism. Supported since Linux + v5.10. + +``kvm-msi-ext-dest-id`` + Support 'Extended Destination ID' for external interrupts. The feature allows + to use up to 32768 CPUs without IRQ remapping (but other limits may apply making + the number of supported vCPUs for a given configuration lower). Supported since + Linux v5.10. + +``kvmclock-stable-bit`` + Tell the guest that guest visible TSC value can be fully trusted for kvmclock + computations and no warps are expected. Supported since Linux v2.6.35. + +Supplementary features +---------------------- + +``kvm-pv-enforce-cpuid`` + Limit the supported paravirtualized feature set to the exposed features only. + Note, by default, ``KVM`` allows the guest to use all currently supported + paravirtualized features even when they were not announced in guest visible + CPUIDs. Supported since Linux v5.10. + + +Useful links +------------ + +Please refer to Documentation/virt/kvm in Linux for additional details. diff --git a/docs/system/i386/sgx.rst b/docs/system/i386/sgx.rst new file mode 100644 index 0000000000..0f0a73f758 --- /dev/null +++ b/docs/system/i386/sgx.rst @@ -0,0 +1,188 @@ +Software Guard eXtensions (SGX) +=============================== + +Overview +-------- + +Intel Software Guard eXtensions (SGX) is a set of instructions and mechanisms +for memory accesses in order to provide security accesses for sensitive +applications and data. SGX allows an application to use it's pariticular +address space as an *enclave*, which is a protected area provides confidentiality +and integrity even in the presence of privileged malware. Accesses to the +enclave memory area from any software not resident in the enclave are prevented, +including those from privileged software. + +Virtual SGX +----------- + +SGX feature is exposed to guest via SGX CPUID. Looking at SGX CPUID, we can +report the same CPUID info to guest as on host for most of SGX CPUID. With +reporting the same CPUID guest is able to use full capacity of SGX, and KVM +doesn't need to emulate those info. + +The guest's EPC base and size are determined by QEMU, and KVM needs QEMU to +notify such info to it before it can initialize SGX for guest. + +Virtual EPC +~~~~~~~~~~~ + +By default, QEMU does not assign EPC to a VM, i.e. fully enabling SGX in a VM +requires explicit allocation of EPC to the VM. Similar to other specialized +memory types, e.g. hugetlbfs, EPC is exposed as a memory backend. + +SGX EPC is enumerated through CPUID, i.e. EPC "devices" need to be realized +prior to realizing the vCPUs themselves, which occurs long before generic +devices are parsed and realized. This limitation means that EPC does not +require -maxmem as EPC is not treated as {cold,hot}plugged memory. + +QEMU does not artificially restrict the number of EPC sections exposed to a +guest, e.g. QEMU will happily allow you to create 64 1M EPC sections. Be aware +that some kernels may not recognize all EPC sections, e.g. the Linux SGX driver +is hardwired to support only 8 EPC sections. + +The following QEMU snippet creates two EPC sections, with 64M pre-allocated +to the VM and an additional 28M mapped but not allocated:: + + -object memory-backend-epc,id=mem1,size=64M,prealloc=on \ + -object memory-backend-epc,id=mem2,size=28M \ + -M sgx-epc.0.memdev=mem1,sgx-epc.1.memdev=mem2 + +Note: + +The size and location of the virtual EPC are far less restricted compared +to physical EPC. Because physical EPC is protected via range registers, +the size of the physical EPC must be a power of two (though software sees +a subset of the full EPC, e.g. 92M or 128M) and the EPC must be naturally +aligned. KVM SGX's virtual EPC is purely a software construct and only +requires the size and location to be page aligned. QEMU enforces the EPC +size is a multiple of 4k and will ensure the base of the EPC is 4k aligned. +To simplify the implementation, EPC is always located above 4g in the guest +physical address space. + +Migration +~~~~~~~~~ + +QEMU/KVM doesn't prevent live migrating SGX VMs, although from hardware's +perspective, SGX doesn't support live migration, since both EPC and the SGX +key hierarchy are bound to the physical platform. However live migration +can be supported in the sense if guest software stack can support recreating +enclaves when it suffers sudden lose of EPC; and if guest enclaves can detect +SGX keys being changed, and handle gracefully. For instance, when ERESUME fails +with #PF.SGX, guest software can gracefully detect it and recreate enclaves; +and when enclave fails to unseal sensitive information from outside, it can +detect such error and sensitive information can be provisioned to it again. + +CPUID +~~~~~ + +Due to its myriad dependencies, SGX is currently not listed as supported +in any of QEMU's built-in CPU configuration. To expose SGX (and SGX Launch +Control) to a guest, you must either use ``-cpu host`` to pass-through the +host CPU model, or explicitly enable SGX when using a built-in CPU model, +e.g. via ``-cpu ,+sgx`` or ``-cpu ,+sgx,+sgxlc``. + +All SGX sub-features enumerated through CPUID, e.g. SGX2, MISCSELECT, +ATTRIBUTES, etc... can be restricted via CPUID flags. Be aware that enforcing +restriction of MISCSELECT, ATTRIBUTES and XFRM requires intercepting ECREATE, +i.e. may marginally reduce SGX performance in the guest. All SGX sub-features +controlled via -cpu are prefixed with "sgx", e.g.:: + + $ qemu-system-x86_64 -cpu help | xargs printf "%s\n" | grep sgx + sgx + sgx-debug + sgx-encls-c + sgx-enclv + sgx-exinfo + sgx-kss + sgx-mode64 + sgx-provisionkey + sgx-tokenkey + sgx1 + sgx2 + sgxlc + +The following QEMU snippet passes through the host CPU but restricts access to +the provision and EINIT token keys:: + + -cpu host,-sgx-provisionkey,-sgx-tokenkey + +SGX sub-features cannot be emulated, i.e. sub-features that are not present +in hardware cannot be forced on via '-cpu'. + +Virtualize SGX Launch Control +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +QEMU SGX support for Launch Control (LC) is passive, in the sense that it +does not actively change the LC configuration. QEMU SGX provides the user +the ability to set/clear the CPUID flag (and by extension the associated +IA32_FEATURE_CONTROL MSR bit in fw_cfg) and saves/restores the LE Hash MSRs +when getting/putting guest state, but QEMU does not add new controls to +directly modify the LC configuration. Similar to hardware behavior, locking +the LC configuration to a non-Intel value is left to guest firmware. Unlike +host bios setting for SGX launch control(LC), there is no special bios setting +for SGX guest by our design. If host is in locked mode, we can still allow +creating VM with SGX. + +Feature Control +~~~~~~~~~~~~~~~ + +QEMU SGX updates the ``etc/msr_feature_control`` fw_cfg entry to set the SGX +(bit 18) and SGX LC (bit 17) flags based on their respective CPUID support, +i.e. existing guest firmware will automatically set SGX and SGX LC accordingly, +assuming said firmware supports fw_cfg.msr_feature_control. + +Launching a guest +----------------- + +To launch a SGX guest: + +.. parsed-literal:: + + |qemu_system_x86| \\ + -cpu host,+sgx-provisionkey \\ + -object memory-backend-epc,id=mem1,size=64M,prealloc=on \\ + -M sgx-epc.0.memdev=mem1,sgx-epc.0.node=0 + +Utilizing SGX in the guest requires a kernel/OS with SGX support. +The support can be determined in guest by:: + + $ grep sgx /proc/cpuinfo + +and SGX epc info by:: + + $ dmesg | grep sgx + [ 0.182807] sgx: EPC section 0x140000000-0x143ffffff + [ 0.183695] sgx: [Firmware Bug]: Unable to map EPC section to online node. Fallback to the NUMA node 0. + +To launch a SGX numa guest: + +.. parsed-literal:: + + |qemu_system_x86| \\ + -cpu host,+sgx-provisionkey \\ + -object memory-backend-ram,size=2G,host-nodes=0,policy=bind,id=node0 \\ + -object memory-backend-epc,id=mem0,size=64M,prealloc=on,host-nodes=0,policy=bind \\ + -numa node,nodeid=0,cpus=0-1,memdev=node0 \\ + -object memory-backend-ram,size=2G,host-nodes=1,policy=bind,id=node1 \\ + -object memory-backend-epc,id=mem1,size=28M,prealloc=on,host-nodes=1,policy=bind \\ + -numa node,nodeid=1,cpus=2-3,memdev=node1 \\ + -M sgx-epc.0.memdev=mem0,sgx-epc.0.node=0,sgx-epc.1.memdev=mem1,sgx-epc.1.node=1 + +and SGX epc numa info by:: + + $ dmesg | grep sgx + [ 0.369937] sgx: EPC section 0x180000000-0x183ffffff + [ 0.370259] sgx: EPC section 0x184000000-0x185bfffff + + $ dmesg | grep SRAT + [ 0.009981] ACPI: SRAT: Node 0 PXM 0 [mem 0x180000000-0x183ffffff] + [ 0.009982] ACPI: SRAT: Node 1 PXM 1 [mem 0x184000000-0x185bfffff] + +References +---------- + +- `SGX Homepage `__ + +- `SGX SDK `__ + +- SGX specification: Intel SDM Volume 3 diff --git a/docs/system/images.rst b/docs/system/images.rst index 3d9144e625..d000bd6b6f 100644 --- a/docs/system/images.rst +++ b/docs/system/images.rst @@ -20,7 +20,7 @@ where myimage.img is the disk image filename and mysize is its size in kilobytes. You can add an ``M`` suffix to give the size in megabytes and a ``G`` suffix for gigabytes. -See the qemu-img invocation documentation for more information. +See the ``qemu-img`` invocation documentation for more information. .. _disk_005fimages_005fsnapshot_005fmode: diff --git a/docs/system/index.rst b/docs/system/index.rst index 7b9276c05f..e3695649c5 100644 --- a/docs/system/index.rst +++ b/docs/system/index.rst @@ -1,5 +1,6 @@ +---------------- System Emulation -================ +---------------- This section of the manual is the overall guide for users using QEMU for full system emulation (as opposed to user-mode emulation). @@ -26,6 +27,7 @@ or Hypervisor.Framework. secrets authz gdb + replay managed-startup bootindex cpu-hotplug @@ -33,3 +35,4 @@ or Hypervisor.Framework. targets security multi-process + confidential-guest-support diff --git a/docs/system/loongarch/loongson3.rst b/docs/system/loongarch/loongson3.rst new file mode 100644 index 0000000000..489ea20f8f --- /dev/null +++ b/docs/system/loongarch/loongson3.rst @@ -0,0 +1,129 @@ +:orphan: + +========================================== +loongson3 virt generic platform (``virt``) +========================================== + +The ``virt`` machine use gpex host bridge, and there are some +emulated devices on virt board, such as loongson7a RTC device, +IOAPIC device, ACPI device and so on. + +Supported devices +----------------- + +The ``virt`` machine supports: +- Gpex host bridge +- Ls7a RTC device +- Ls7a IOAPIC device +- ACPI GED device +- Fw_cfg device +- PCI/PCIe devices +- Memory device +- CPU device. Type: la464-loongarch-cpu. + +CPU and machine Type +-------------------- + +The ``qemu-system-loongarch64`` provides emulation for virt +machine. You can specify the machine type ``virt`` and +cpu type ``la464-loongarch-cpu``. + +Boot options +------------ + +We can boot the LoongArch virt machine by specifying the uefi bios, +initrd, and linux kernel. And those source codes and binary files +can be accessed by following steps. + +(1) booting command: + +.. code-block:: bash + + $ qemu-system-loongarch64 -machine virt -m 4G -cpu la464-loongarch-cpu \ + -smp 1 -bios QEMU_EFI.fd -kernel vmlinuz.efi -initrd initrd.img \ + -append "root=/dev/ram rdinit=/sbin/init console=ttyS0,115200" \ + --nographic + +Note: The running speed may be a little slow, as the performance of our +qemu and uefi bios is not perfect, and it is being fixed. + +(2) cross compiler tools: + +.. code-block:: bash + + wget https://github.com/loongson/build-tools/releases/download/ \ + 2022.05.29/loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz + + tar -vxf loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz + +(3) qemu compile configure option: + +.. code-block:: bash + + ./configure --disable-rdma --disable-pvrdma --prefix=usr \ + --target-list="loongarch64-softmmu" \ + --disable-libiscsi --disable-libnfs --disable-libpmem \ + --disable-glusterfs --enable-libusb --enable-usb-redir \ + --disable-opengl --disable-xen --enable-spice \ + --enable-debug --disable-capstone --disable-kvm \ + --enable-profiler + make + +(4) uefi bios source code and compile method: + +.. code-block:: bash + + git clone https://github.com/loongson/edk2-LoongarchVirt.git + + cd edk2-LoongarchVirt + + git submodule update --init + + export PATH=$YOUR_COMPILER_PATH/bin:$PATH + + export WORKSPACE=`pwd` + + export PACKAGES_PATH=$WORKSPACE/edk2-LoongarchVirt + + export GCC5_LOONGARCH64_PREFIX=loongarch64-unknown-linux-gnu- + + edk2-LoongarchVirt/edksetup.sh + + make -C edk2-LoongarchVirt/BaseTools + + build --buildtarget=DEBUG --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc + + build --buildtarget=RELEASE --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc + +The efi binary file path: + + Build/LoongArchQemu/DEBUG_GCC5/FV/QEMU_EFI.fd + + Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd + +(5) linux kernel source code and compile method: + +.. code-block:: bash + + git clone https://github.com/loongson/linux.git + + export PATH=$YOUR_COMPILER_PATH/bin:$PATH + + export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/lib:$LD_LIBRARY_PATH + + export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH + + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- loongson3_defconfig + + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- + + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- install + + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- modules_install + +Note: The branch of linux source code is loongarch-next. + +(6) initrd file: + + You can use busybox tool and the linux modules to make a initrd file. Or you can access the + binary files: https://github.com/yangxiaojuan-loongson/qemu-binary diff --git a/docs/system/multi-process.rst b/docs/system/multi-process.rst index 46bb0cafc2..1b8852c27c 100644 --- a/docs/system/multi-process.rst +++ b/docs/system/multi-process.rst @@ -2,7 +2,7 @@ Multi-process QEMU ================== This document describes how to configure and use multi-process qemu. -For the design document refer to docs/devel/qemu-multiprocess. +For the design document refer to docs/devel/multi-process.rst. 1) Configuration ---------------- @@ -45,7 +45,7 @@ Following is a description of command-line used to launch mpqemu. -device lsi53c895a,id=lsi0 \ -drive id=drive_image2,file=/build/ol7-nvme-test-1.qcow2 \ -device scsi-hd,id=drive2,drive=drive_image2,bus=lsi0.0,scsi-id=0 \ - -object x-remote-object,id=robj1,devid=lsi1,fd=4, + -object x-remote-object,id=robj1,devid=lsi0,fd=4, * QEMU: diff --git a/docs/system/openrisc/cpu-features.rst b/docs/system/openrisc/cpu-features.rst new file mode 100644 index 0000000000..aeb65e22ff --- /dev/null +++ b/docs/system/openrisc/cpu-features.rst @@ -0,0 +1,15 @@ +CPU Features +============ + +The QEMU emulation of the OpenRISC architecture provides following built in +features. + +- Shadow GPRs +- MMU TLB with 128 entries, 1 way +- Power Management (PM) +- Programmable Interrupt Controller (PIC) +- Tick Timer + +These features are on by default and the presence can be confirmed by checking +the contents of the Unit Presence Register (``UPR``) and CPU Configuration +Register (``CPUCFGR``). diff --git a/docs/system/openrisc/emulation.rst b/docs/system/openrisc/emulation.rst new file mode 100644 index 0000000000..0af898ab20 --- /dev/null +++ b/docs/system/openrisc/emulation.rst @@ -0,0 +1,17 @@ +OpenRISC 1000 CPU architecture support +====================================== + +QEMU's TCG emulation includes support for the OpenRISC or1200 implementation of +the OpenRISC 1000 cpu architecture. + +The or1200 cpu also has support for the following instruction subsets: + +- ORBIS32 (OpenRISC Basic Instruction Set) +- ORFPX32 (OpenRISC Floating-Point eXtension) + +In addition to the instruction subsets the QEMU TCG emulation also has support +for most Class II (optional) instructions. + +For information on all OpenRISC instructions please refer to the latest +architecture manual available on the OpenRISC website in the +`OpenRISC Architecture `_ section. diff --git a/docs/system/openrisc/or1k-sim.rst b/docs/system/openrisc/or1k-sim.rst new file mode 100644 index 0000000000..ef10439737 --- /dev/null +++ b/docs/system/openrisc/or1k-sim.rst @@ -0,0 +1,43 @@ +Or1ksim board +============= + +The QEMU Or1ksim machine emulates the standard OpenRISC board simulator which is +also the standard SoC configuration. + +Supported devices +----------------- + + * 16550A UART + * ETHOC Ethernet controller + * SMP (OpenRISC multicore using ompic) + +Boot options +------------ + +The Or1ksim machine can be started using the ``-kernel`` and ``-initrd`` options +to load a Linux kernel and optional disk image. + +.. code-block:: bash + + $ qemu-system-or1k -cpu or1220 -M or1k-sim -nographic \ + -kernel vmlinux \ + -initrd initramfs.cpio.gz \ + -m 128 + +Linux guest kernel configuration +"""""""""""""""""""""""""""""""" + +The 'or1ksim_defconfig' for Linux openrisc kernels includes the right +drivers for the or1ksim machine. If you would like to run an SMP system +choose the 'simple_smp_defconfig' config. + +Hardware configuration information +"""""""""""""""""""""""""""""""""" + +The ``or1k-sim`` board automatically generates a device tree blob ("dtb") +which it passes to the guest. This provides information about the +addresses, interrupt lines and other configuration of the various devices +in the system. + +The location of the DTB will be passed in register ``r3`` to the guest operating +system. diff --git a/docs/system/openrisc/virt.rst b/docs/system/openrisc/virt.rst new file mode 100644 index 0000000000..2fe61ac942 --- /dev/null +++ b/docs/system/openrisc/virt.rst @@ -0,0 +1,50 @@ +'virt' generic virtual platform +=============================== + +The ``virt`` board is a platform which does not correspond to any +real hardware; it is designed for use in virtual machines. +It is the recommended board type if you simply want to run +a guest such as Linux and do not care about reproducing the +idiosyncrasies and limitations of a particular bit of real-world +hardware. + +Supported devices +----------------- + + * PCI/PCIe devices + * 8 virtio-mmio transport devices + * 16550A UART + * Goldfish RTC + * SiFive Test device for poweroff and reboot + * SMP (OpenRISC multicore using ompic) + +Boot options +------------ + +The virt machine can be started using the ``-kernel`` and ``-initrd`` options +to load a Linux kernel and optional disk image. For example: + +.. code-block:: bash + + $ qemu-system-or1k -cpu or1220 -M or1k-sim -nographic \ + -device virtio-net-device,netdev=user -netdev user,id=user,net=10.9.0.1/24,host=10.9.0.100 \ + -device virtio-blk-device,drive=d0 -drive file=virt.qcow2,id=d0,if=none,format=qcow2 \ + -kernel vmlinux \ + -initrd initramfs.cpio.gz \ + -m 128 + +Linux guest kernel configuration +"""""""""""""""""""""""""""""""" + +The 'virt_defconfig' for Linux openrisc kernels includes the right drivers for +the ``virt`` machine. + +Hardware configuration information +"""""""""""""""""""""""""""""""""" + +The ``virt`` board automatically generates a device tree blob ("dtb") which it +passes to the guest. This provides information about the addresses, interrupt +lines and other configuration of the various devices in the system. + +The location of the DTB will be passed in register ``r3`` to the guest operating +system. diff --git a/docs/system/ppc/embedded.rst b/docs/system/ppc/embedded.rst index cfffbda24d..af3b3d9fa4 100644 --- a/docs/system/ppc/embedded.rst +++ b/docs/system/ppc/embedded.rst @@ -6,5 +6,4 @@ Embedded family boards - ``ppce500`` generic paravirt e500 platform - ``ref405ep`` ref405ep - ``sam460ex`` aCube Sam460ex -- ``taihu`` taihu - ``virtex-ml507`` Xilinx Virtex ML507 reference design diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst index 4c4cdea527..c8f9762342 100644 --- a/docs/system/ppc/powernv.rst +++ b/docs/system/ppc/powernv.rst @@ -1,7 +1,7 @@ -PowerNV family boards (``powernv8``, ``powernv9``) +PowerNV family boards (``powernv8``, ``powernv9``, ``powernv10``) ================================================================== -PowerNV (as Non-Virtualized) is the "baremetal" platform using the +PowerNV (as Non-Virtualized) is the "bare metal" platform using the OPAL firmware. It runs Linux on IBM and OpenPOWER systems and it can be used as an hypervisor OS, running KVM guests, or simply as a host OS. @@ -16,16 +16,14 @@ Supported devices ----------------- * Multi processor support for POWER8, POWER8NVL and POWER9. - * XSCOM, serial communication sideband bus to configure chiplets - * Simple LPC Controller - * Processor Service Interface (PSI) Controller - * Interrupt Controller, XICS (POWER8) and XIVE (POWER9) - * POWER8 PHB3 PCIe Host bridge and POWER9 PHB4 PCIe Host bridge - * Simple OCC is an on-chip microcontroller used for power management - tasks - * iBT device to handle BMC communication, with the internal BMC - simulator provided by QEMU or an external BMC such as an Aspeed - QEMU machine. + * XSCOM, serial communication sideband bus to configure chiplets. + * Simple LPC Controller. + * Processor Service Interface (PSI) Controller. + * Interrupt Controller, XICS (POWER8) and XIVE (POWER9) and XIVE2 (Power10). + * POWER8 PHB3 PCIe Host bridge and POWER9 PHB4 PCIe Host bridge. + * Simple OCC is an on-chip micro-controller used for power management tasks. + * iBT device to handle BMC communication, with the internal BMC simulator + provided by QEMU or an external BMC such as an Aspeed QEMU machine. * PNOR containing the different firmware partitions. Missing devices @@ -33,32 +31,42 @@ Missing devices A lot is missing, among which : - * POWER10 processor - * XIVE2 (POWER10) interrupt controller - * I2C controllers (yet to be merged) - * NPU/NPU2/NPU3 controllers - * EEH support for PCIe Host bridge controllers - * NX controller - * VAS controller - * chipTOD (Time Of Day) + * I2C controllers (yet to be merged). + * NPU/NPU2/NPU3 controllers. + * EEH support for PCIe Host bridge controllers. + * NX controller. + * VAS controller. + * chipTOD (Time Of Day). * Self Boot Engine (SBE). - * FSI bus + * FSI bus. Firmware -------- The OPAL firmware (OpenPower Abstraction Layer) for OpenPower systems includes the runtime services ``skiboot`` and the bootloader kernel and -initramfs ``skiroot``. Source code can be found on GitHub: +initramfs ``skiroot``. Source code can be found on the `OpenPOWER account at +GitHub `_. - https://github.com/open-power. - -Prebuilt images of ``skiboot`` and ``skiboot`` are made available on the `OpenPOWER `__ site. To boot a POWER9 machine, use the `witherspoon `__ images. For POWER8, use -the `palmetto `__ images. +Prebuilt images of ``skiboot`` and ``skiroot`` are made available on the +`OpenPOWER `__ site. QEMU includes a prebuilt image of ``skiboot`` which is updated when a more recent version is required by the models. +Current acceleration status +--------------------------- + +KVM acceleration in Linux Power hosts is provided by the kvm-hv and +kvm-pr modules. kvm-hv is adherent to PAPR and it's not compliant with +powernv. kvm-pr in theory could be used as a valid accel option but +this isn't supported by kvm-pr at this moment. + +To spare users from dealing with not so informative errors when attempting +to use accel=kvm, the powernv machine will throw an error informing that +KVM is not supported. This can be revisited in the future if kvm-pr (or +any other KVM alternative) is usable as KVM accel for this machine. + Boot options ------------ @@ -84,6 +92,7 @@ and a SATA disk : Complex PCIe configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~ + Six PHBs are defined per chip (POWER9) but no default PCI layout is provided (to be compatible with libvirt). One PCI device can be added on any of the available PCIe slots using command line options such as: @@ -158,7 +167,7 @@ one on the command line : The files `palmetto-SDR.bin `__ and `palmetto-FRU.bin `__ define a Sensor Data Record repository and a Field Replaceable Unit -inventory for a palmetto BMC. They can be used to extend the QEMU BMC +inventory for a Palmetto BMC. They can be used to extend the QEMU BMC simulator. .. code-block:: bash @@ -190,4 +199,8 @@ CAVEATS ------- * No support for multiple HW threads (SMT=1). Same as pseries. - * CPU can hang when doing intensive I/Os. Use ``-append powersave=off`` in that case. + +Maintainer contact information +------------------------------ + +Cédric Le Goater diff --git a/docs/system/ppc/ppce500.rst b/docs/system/ppc/ppce500.rst index afc58f60f5..fa40e57d18 100644 --- a/docs/system/ppc/ppce500.rst +++ b/docs/system/ppc/ppce500.rst @@ -75,7 +75,7 @@ as the BIOS. QEMU follows below truth table to select which payload to execute: When both -bios and -kernel are present, QEMU loads U-Boot and U-Boot in turns automatically loads the kernel image specified by the -kernel parameter via U-Boot's built-in "bootm" command, hence a legacy uImage format is required in -such senario. +such scenario. Running Linux kernel -------------------- @@ -113,7 +113,7 @@ To boot the 32-bit Linux kernel: .. code-block:: bash - $ qemu-system-ppc{64|32} -M ppce500 -cpu e500mc -smp 4 -m 2G \ + $ qemu-system-ppc64 -M ppce500 -cpu e500mc -smp 4 -m 2G \ -display none -serial stdio \ -kernel vmlinux \ -initrd /path/to/rootfs.cpio \ @@ -146,15 +146,18 @@ You can specify a real world SoC device that QEMU has built-in support but all these SoCs are e500v2 based MPC85xx series, hence you cannot test anything built for P4080 (e500mc), P5020 (e5500) and T2080 (e6500). +Networking +---------- + By default a VirtIO standard PCI networking device is connected as an ethernet interface at PCI address 0.1.0, but we can switch that to an e1000 NIC by: .. code-block:: bash - $ qemu-system-ppc -M ppce500 -smp 4 -m 2G \ - -display none -serial stdio \ - -bios u-boot \ - -nic tap,ifname=tap0,script=no,downscript=no,model=e1000 + $ qemu-system-ppc64 -M ppce500 -smp 4 -m 2G \ + -display none -serial stdio \ + -bios u-boot \ + -nic tap,ifname=tap0,script=no,downscript=no,model=e1000 The QEMU ``ppce500`` machine can also dynamically instantiate an eTSEC device if “-device eTSEC” is given to QEMU: @@ -162,3 +165,18 @@ if “-device eTSEC” is given to QEMU: .. code-block:: bash -netdev tap,ifname=tap0,script=no,downscript=no,id=net0 -device eTSEC,netdev=net0 + +Root file system on flash drive +------------------------------- + +Rather than using a root file system on ram disk, it is possible to have it on +CFI flash. Given an ext2 image whose size must be a power of two, it can be used +as follows: + +.. code-block:: bash + + $ qemu-system-ppc64 -M ppce500 -cpu e500mc -smp 4 -m 2G \ + -display none -serial stdio \ + -kernel vmlinux \ + -drive if=pflash,file=/path/to/rootfs.ext2,format=raw \ + -append "rootwait root=/dev/mtdblock0" diff --git a/docs/system/ppc/pseries.rst b/docs/system/ppc/pseries.rst index 932d4dd17d..a876d897b6 100644 --- a/docs/system/ppc/pseries.rst +++ b/docs/system/ppc/pseries.rst @@ -1,12 +1,298 @@ +=================================== pSeries family boards (``pseries``) =================================== +The Power machine para-virtualized environment described by the Linux on Power +Architecture Reference ([LoPAR]_) document is called pSeries. This environment +is also known as sPAPR, System p guests, or simply Power Linux guests (although +it is capable of running other operating systems, such as AIX). + +Even though pSeries is designed to behave as a guest environment, it is also +capable of acting as a hypervisor OS, providing, on that role, nested +virtualization capabilities. + Supported devices ------------------ +================= + + * Multi processor support for many Power processors generations: POWER7, + POWER7+, POWER8, POWER8NVL, POWER9, and Power10. Support for POWER5+ exists, + but its state is unknown. + * Interrupt Controller, XICS (POWER8) and XIVE (POWER9 and Power10) + * vPHB PCIe Host bridge. + * vscsi and vnet devices, compatible with the same devices available on a + PowerVM hypervisor with VIOS managing LPARs. + * Virtio based devices. + * PCIe device pass through. Missing devices ---------------- +=============== + * SPICE support. Firmware --------- +======== + +The pSeries platform in QEMU comes with 2 firmwares: + +`SLOF `_ (Slimline Open Firmware) is an +implementation of the `IEEE 1275-1994, Standard for Boot (Initialization +Configuration) Firmware: Core Requirements and Practices +`_. + +SLOF performs bus scanning, PCI resource allocation, provides the client +interface to boot from block devices and network. + +QEMU includes a prebuilt image of SLOF which is updated when a more recent +version is required. + +VOF (Virtual Open Firmware) is a minimalistic firmware to work with +``-machine pseries,x-vof=on``. When enabled, the firmware acts as a slim +shim and QEMU implements parts of the IEEE 1275 Open Firmware interface. + +VOF does not have device drivers, does not do PCI resource allocation and +relies on ``-kernel`` used with Linux kernels recent enough (v5.4+) +to PCI resource assignment. It is ideal to use with petitboot. + +Booting via ``-kernel`` supports the following: + ++-------------------+-------------------+------------------+ +| kernel | pseries,x-vof=off | pseries,x-vof=on | ++===================+===================+==================+ +| vmlinux BE | ✓ | ✓ | ++-------------------+-------------------+------------------+ +| vmlinux LE | ✓ | ✓ | ++-------------------+-------------------+------------------+ +| zImage.pseries BE | ✓¹ | ✓¹ | ++-------------------+-------------------+------------------+ +| zImage.pseries LE | ✓ | ✓ | ++-------------------+-------------------+------------------+ + +¹ must set kernel-addr=0 + +Build directions +================ + +.. code-block:: bash + + ./configure --target-list=ppc64-softmmu && make + +Running instructions +==================== + +Someone can select the pSeries machine type by running QEMU with the following +options: + +.. code-block:: bash + + qemu-system-ppc64 -M pseries + +sPAPR devices +============= + +The sPAPR specification defines a set of para-virtualized devices, which are +also supported by the pSeries machine in QEMU and can be instantiated with the +``-device`` option: + +* ``spapr-vlan`` : a virtual network interface. +* ``spapr-vscsi`` : a virtual SCSI disk interface. +* ``spapr-rng`` : a pseudo-device for passing random number generator data to the + guest (see the `H_RANDOM hypercall feature + `_ for details). +* ``spapr-vty``: a virtual teletype. +* ``spapr-pci-host-bridge``: a PCI host bridge. +* ``tpm-spapr``: a Trusted Platform Module (TPM). +* ``spapr-tpm-proxy``: a TPM proxy. + +These are compatible with the devices historically available for use when +running the IBM PowerVM hypervisor with LPARs. + +However, since these devices have originally been specified with another +hypervisor and non-Linux guests in mind, you should use the virtio counterparts +(virtio-net, virtio-blk/scsi and virtio-rng for instance) if possible instead, +since they will most probably give you better performance with Linux guests in a +QEMU environment. + +The pSeries machine in QEMU is always instantiated with the following devices: + +* A NVRAM device (``spapr-nvram``). +* A virtual teletype (``spapr-vty``). +* A PCI host bridge (``spapr-pci-host-bridge``). + +Hence, it is not needed to add them manually, unless you use the ``-nodefaults`` +command line option in QEMU. + +In the case of the default ``spapr-nvram`` device, if someone wants to make the +contents of the NVRAM device persistent, they will need to specify a PFLASH +device when starting QEMU, i.e. either use +``-drive if=pflash,file=,format=raw`` to set the default PFLASH +device, or specify one with an ID +(``-drive if=none,file=,format=raw,id=pfid``) and pass that ID to the +NVRAM device with ``-global spapr-nvram.drive=pfid``. + +sPAPR specification +------------------- + +The main source of documentation on the sPAPR standard is the [LoPAR]_ document. +However, documentation specific to QEMU's implementation of the specification +can also be found in QEMU documentation: + +.. toctree:: + :maxdepth: 1 + + ../../specs/ppc-spapr-hotplug.rst + ../../specs/ppc-spapr-hcalls.rst + ../../specs/ppc-spapr-numa.rst + ../../specs/ppc-spapr-uv-hcalls.rst + ../../specs/ppc-spapr-xive.rst + +Switching between the KVM-PR and KVM-HV kernel module +===================================================== + +Currently, there are two implementations of KVM on Power, ``kvm_hv.ko`` and +``kvm_pr.ko``. + + +If a host supports both KVM modes, and both KVM kernel modules are loaded, it is +possible to switch between the two modes with the ``kvm-type`` parameter: + +* Use ``qemu-system-ppc64 -M pseries,accel=kvm,kvm-type=PR`` to use the + ``kvm_pr.ko`` kernel module. +* Use ``qemu-system-ppc64 -M pseries,accel=kvm,kvm-type=HV`` to use ``kvm_hv.ko`` + instead. + +KVM-PR +------ + +KVM-PR uses the so-called **PR**\ oblem state of the PPC CPUs to run the guests, +i.e. the virtual machine is run in user mode and all privileged instructions +trap and have to be emulated by the host. That means you can run KVM-PR inside +a pSeries guest (or a PowerVM LPAR for that matter), and that is where it has +originated, as historically (prior to POWER7) it was not possible to run Linux +on hypervisor mode on a Power processor (this function was restricted to +PowerVM, the IBM proprietary hypervisor). + +Because all privileged instructions are trapped, guests that use a lot of +privileged instructions run quite slow with KVM-PR. On the other hand, because +of that, this kernel module can run on pretty much every PPC hardware, and is +able to emulate a lot of guests CPUs. This module can even be used to run other +PowerPC guests like an emulated PowerMac. + +As KVM-PR can be run inside a pSeries guest, it can also provide nested +virtualization capabilities (i.e. running a guest from within a guest). + +It is important to notice that, as KVM-HV provides a much better execution +performance, maintenance work has been much more focused on it in the past +years. Maintenance for KVM-PR has been minimal. + +In order to run KVM-PR guests with POWER9 processors, someone will need to start +QEMU with ``kernel_irqchip=off`` command line option. + +KVM-HV +------ + +KVM-HV uses the hypervisor mode of more recent Power processors, that allow +access to the bare metal hardware directly. Although POWER7 had this capability, +it was only starting with POWER8 that this was officially supported by IBM. + +Originally, KVM-HV was only available when running on a PowerNV platform (a.k.a. +Power bare metal). Although it runs on a PowerNV platform, it can only be used +to start pSeries guests. As the pSeries guest doesn't have access to the +hypervisor mode of the Power CPU, it wasn't possible to run KVM-HV on a guest. +This limitation has been lifted, and now it is possible to run KVM-HV inside +pSeries guests as well, making nested virtualization possible with KVM-HV. + +As KVM-HV has access to privileged instructions, guests that use a lot of these +can run much faster than with KVM-PR. On the other hand, the guest CPU has to be +of the same type as the host CPU this way, e.g. it is not possible to specify an +embedded PPC CPU for the guest with KVM-HV. However, there is at least the +possibility to run the guest in a backward-compatibility mode of the previous +CPUs generations, e.g. you can run a POWER7 guest on a POWER8 host by using +``-cpu POWER8,compat=power7`` as parameter to QEMU. + +Modules support +=============== + +As noticed in the sections above, each module can run in a different +environment. The following table shows with which environment each module can +run. As long as you are in a supported environment, you can run KVM-PR or KVM-HV +nested. Combinations not shown in the table are not available. + ++--------------+------------+------+-------------------+----------+--------+ +| Platform | Host type | Bits | Page table format | KVM-HV | KVM-PR | ++==============+============+======+===================+==========+========+ +| PowerNV | bare metal | 32 | hash | no | yes | +| | | +-------------------+----------+--------+ +| | | | radix | N/A | N/A | +| | +------+-------------------+----------+--------+ +| | | 64 | hash | yes | yes | +| | | +-------------------+----------+--------+ +| | | | radix | yes | no | ++--------------+------------+------+-------------------+----------+--------+ +| pSeries [1]_ | PowerNV | 32 | hash | no | yes | +| | | +-------------------+----------+--------+ +| | | | radix | N/A | N/A | +| | +------+-------------------+----------+--------+ +| | | 64 | hash | no | yes | +| | | +-------------------+----------+--------+ +| | | | radix | yes [2]_ | no | +| +------------+------+-------------------+----------+--------+ +| | PowerVM | 32 | hash | no | yes | +| | | +-------------------+----------+--------+ +| | | | radix | N/A | N/A | +| | +------+-------------------+----------+--------+ +| | | 64 | hash | no | yes | +| | | +-------------------+----------+--------+ +| | | | radix [3]_ | no | yes | ++--------------+------------+------+-------------------+----------+--------+ + +.. [1] On POWER9 DD2.1 processors, the page table format on the host and guest + must be the same. + +.. [2] KVM-HV cannot run nested on POWER8 machines. + +.. [3] Introduced on Power10 machines. + + +.. _power-papr-protected-execution-facility-pef: + +POWER (PAPR) Protected Execution Facility (PEF) +----------------------------------------------- + +Protected Execution Facility (PEF), also known as Secure Guest support +is a feature found on IBM POWER9 and POWER10 processors. + +If a suitable firmware including an Ultravisor is installed, it adds +an extra memory protection mode to the CPU. The ultravisor manages a +pool of secure memory which cannot be accessed by the hypervisor. + +When this feature is enabled in QEMU, a guest can use ultracalls to +enter "secure mode". This transfers most of its memory to secure +memory, where it cannot be eavesdropped by a compromised hypervisor. + +Launching +^^^^^^^^^ + +To launch a guest which will be permitted to enter PEF secure mode:: + + $ qemu-system-ppc64 \ + -object pef-guest,id=pef0 \ + -machine confidential-guest-support=pef0 \ + ... + +Live Migration +^^^^^^^^^^^^^^ + +Live migration is not yet implemented for PEF guests. For +consistency, QEMU currently prevents migration if the PEF feature is +enabled, whether or not the guest has actually entered secure mode. + + +Maintainer contact information +============================== + +Cédric Le Goater + +Daniel Henrique Barboza + +.. [LoPAR] `Linux on Power Architecture Reference document (LoPAR) revision + 2.9 `_. diff --git a/docs/system/qemu-block-drivers.rst b/docs/system/qemu-block-drivers.rst index bd99d4fa8e..c2c0114cec 100644 --- a/docs/system/qemu-block-drivers.rst +++ b/docs/system/qemu-block-drivers.rst @@ -1,18 +1,22 @@ :orphan: +============================ QEMU block drivers reference ============================ +-------- Synopsis -------- QEMU block driver reference manual +----------- Description ----------- .. include:: qemu-block-drivers.rst.inc +-------- See also -------- diff --git a/docs/system/qemu-block-drivers.rst.inc b/docs/system/qemu-block-drivers.rst.inc index 16225710eb..dfe5d2293d 100644 --- a/docs/system/qemu-block-drivers.rst.inc +++ b/docs/system/qemu-block-drivers.rst.inc @@ -511,13 +511,13 @@ of an inet socket: |qemu_system| linux.img -hdb nbd+unix://?socket=/tmp/my_socket -In this case, the block device must be exported using qemu-nbd: +In this case, the block device must be exported using ``qemu-nbd``: .. parsed-literal:: qemu-nbd --socket=/tmp/my_socket my_disk.qcow2 -The use of qemu-nbd allows sharing of a disk between several guests: +The use of ``qemu-nbd`` allows sharing of a disk between several guests: .. parsed-literal:: @@ -530,7 +530,7 @@ and then you can use it with two guests: |qemu_system| linux1.img -hdb nbd+unix://?socket=/tmp/my_socket |qemu_system| linux2.img -hdb nbd+unix://?socket=/tmp/my_socket -If the nbd-server uses named exports (supported since NBD 2.9.18, or with QEMU's +If the ``nbd-server`` uses named exports (supported since NBD 2.9.18, or with QEMU's own embedded NBD server), you must specify an export name in the URI: .. parsed-literal:: @@ -778,10 +778,32 @@ The optional *HOST_KEY_CHECK* parameter controls how the remote host's key is checked. The default is ``yes`` which means to use the local ``.ssh/known_hosts`` file. Setting this to ``no`` turns off known-hosts checking. Or you can check that the host key -matches a specific fingerprint: -``host_key_check=md5:78:45:8e:14:57:4f:d5:45:83:0a:0e:f3:49:82:c9:c8`` -(``sha1:`` can also be used as a prefix, but note that OpenSSH -tools only use MD5 to print fingerprints). +matches a specific fingerprint. The fingerprint can be provided in +``md5``, ``sha1``, or ``sha256`` format, however, it is strongly +recommended to only use ``sha256``, since the other options are +considered insecure by modern standards. The fingerprint value +must be given as a hex encoded string:: + + host_key_check=sha256:04ce2ae89ff4295a6b9c4111640bdcb3297858ee55cb434d9dd88796e93aa795 + +The key string may optionally contain ":" separators between +each pair of hex digits. + +The ``$HOME/.ssh/known_hosts`` file contains the base64 encoded +host keys. These can be converted into the format needed for +QEMU using a command such as:: + + $ for key in `grep 10.33.8.112 known_hosts | awk '{print $3}'` + do + echo $key | base64 -d | sha256sum + done + 6c3aa525beda9dc83eadfbd7e5ba7d976ecb59575d1633c87cd06ed2ed6e366f - + 12214fd9ea5b408086f98ecccd9958609bd9ac7c0ea316734006bc7818b45dc8 - + d36420137bcbd101209ef70c3b15dc07362fbe0fa53c5b135eba6e6afa82f0ce - + +Note that there can be multiple keys present per host, each with +different key ciphers. Care is needed to pick the key fingerprint +that matches the cipher QEMU will negotiate with the remote server. Currently authentication must be done using ssh-agent. Other authentication methods may be supported in future. diff --git a/docs/system/qemu-cpu-models.rst b/docs/system/qemu-cpu-models.rst index 53d7538c47..5cf6e46f8a 100644 --- a/docs/system/qemu-cpu-models.rst +++ b/docs/system/qemu-cpu-models.rst @@ -1,20 +1,24 @@ :orphan: +================================== QEMU / KVM CPU model configuration ================================== +-------- Synopsis -'''''''' +-------- QEMU CPU Modelling Infrastructure manual +----------- Description -''''''''''' +----------- .. include:: cpu-models-x86.rst.inc .. include:: cpu-models-mips.rst.inc +-------- See also -'''''''' +-------- The HTML documentation of QEMU for more precise information and Linux user mode emulator invocation. diff --git a/docs/system/qemu-manpage.rst b/docs/system/qemu-manpage.rst index e9a25d0680..c47a412758 100644 --- a/docs/system/qemu-manpage.rst +++ b/docs/system/qemu-manpage.rst @@ -6,9 +6,11 @@ parts of the documentation that go in the manpage as well as the HTML manual. -Title -===== +======================= +QEMU User Documentation +======================= +-------- Synopsis -------- @@ -16,11 +18,13 @@ Synopsis |qemu_system| [options] [disk_image] +----------- Description ----------- .. include:: target-i386-desc.rst.inc +------- Options ------- @@ -33,11 +37,13 @@ not need a disk image. .. include:: mux-chardev.rst.inc +----- Notes ----- .. include:: device-url-syntax.rst.inc +-------- See also -------- diff --git a/docs/system/replay.rst b/docs/system/replay.rst new file mode 100644 index 0000000000..3105327423 --- /dev/null +++ b/docs/system/replay.rst @@ -0,0 +1,237 @@ +.. _replay: + +.. + Copyright (c) 2010-2022 Institute for System Programming + of the Russian Academy of Sciences. + + This work is licensed under the terms of the GNU GPL, version 2 or later. + See the COPYING file in the top-level directory. + +Record/replay +============= + +Record/replay functions are used for the deterministic replay of qemu execution. +Execution recording writes a non-deterministic events log, which can be later +used for replaying the execution anywhere and for unlimited number of times. +It also supports checkpointing for faster rewind to the specific replay moment. +Execution replaying reads the log and replays all non-deterministic events +including external input, hardware clocks, and interrupts. + +Deterministic replay has the following features: + + * Deterministically replays whole system execution and all contents of + the memory, state of the hardware devices, clocks, and screen of the VM. + * Writes execution log into the file for later replaying for multiple times + on different machines. + * Supports i386, x86_64, ARM, AArch64, Risc-V, MIPS, MIPS64, S390X, Alpha, + PowerPC, PowerPC64, M68000, Microblaze, OpenRISC, Nios II, SPARC, + and Xtensa hardware platforms. + * Performs deterministic replay of all operations with keyboard and mouse + input devices, serial ports, and network. + +Usage of the record/replay: + + * First, record the execution with the following command line: + + .. parsed-literal:: + |qemu_system| \\ + -icount shift=auto,rr=record,rrfile=replay.bin \\ + -drive file=disk.qcow2,if=none,snapshot,id=img-direct \\ + -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay \\ + -device ide-hd,drive=img-blkreplay \\ + -netdev user,id=net1 -device rtl8139,netdev=net1 \\ + -object filter-replay,id=replay,netdev=net1 + + * After recording, you can replay it by using another command line: + + .. parsed-literal:: + |qemu_system| \\ + -icount shift=auto,rr=replay,rrfile=replay.bin \\ + -drive file=disk.qcow2,if=none,snapshot,id=img-direct \\ + -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay \\ + -device ide-hd,drive=img-blkreplay \\ + -netdev user,id=net1 -device rtl8139,netdev=net1 \\ + -object filter-replay,id=replay,netdev=net1 + + The only difference with recording is changing the rr option + from record to replay. + * Block device images are not actually changed in the recording mode, + because all of the changes are written to the temporary overlay file. + This behavior is enabled by using blkreplay driver. It should be used + for every enabled block device, as described in :ref:`block-label` section. + * ``-net none`` option should be specified when network is not used, + because QEMU adds network card by default. When network is needed, + it should be configured explicitly with replay filter, as described + in :ref:`network-label` section. + * Interaction with audio devices and serial ports are recorded and replayed + automatically when such devices are enabled. + +Core idea +--------- + +Record/replay system is based on saving and replaying non-deterministic +events (e.g. keyboard input) and simulating deterministic ones (e.g. reading +from HDD or memory of the VM). Saving only non-deterministic events makes +log file smaller and simulation faster. + +The following non-deterministic data from peripheral devices is saved into +the log: mouse and keyboard input, network packets, audio controller input, +serial port input, and hardware clocks (they are non-deterministic +too, because their values are taken from the host machine). Inputs from +simulated hardware, memory of VM, software interrupts, and execution of +instructions are not saved into the log, because they are deterministic and +can be replayed by simulating the behavior of virtual machine starting from +initial state. + +Instruction counting +-------------------- + +QEMU should work in icount mode to use record/replay feature. icount was +designed to allow deterministic execution in absence of external inputs +of the virtual machine. Record/replay feature is enabled through ``-icount`` +command-line option, making possible deterministic execution of the machine, +interacting with user or network. + +.. _block-label: + +Block devices +------------- + +Block devices record/replay module intercepts calls of +bdrv coroutine functions at the top of block drivers stack. +To record and replay block operations the drive must be configured +as following: + +.. parsed-literal:: + -drive file=disk.qcow2,if=none,snapshot,id=img-direct + -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay + -device ide-hd,drive=img-blkreplay + +blkreplay driver should be inserted between disk image and virtual driver +controller. Therefore all disk requests may be recorded and replayed. + +.. _snapshotting-label: + +Snapshotting +------------ + +New VM snapshots may be created in replay mode. They can be used later +to recover the desired VM state. All VM states created in replay mode +are associated with the moment of time in the replay scenario. +After recovering the VM state replay will start from that position. + +Default starting snapshot name may be specified with icount field +rrsnapshot as follows: + +.. parsed-literal:: + -icount shift=auto,rr=record,rrfile=replay.bin,rrsnapshot=snapshot_name + +This snapshot is created at start of recording and restored at start +of replaying. It also can be loaded while replaying to roll back +the execution. + +``snapshot`` flag of the disk image must be removed to save the snapshots +in the overlay (or original image) instead of using the temporary overlay. + +.. parsed-literal:: + -drive file=disk.ovl,if=none,id=img-direct + -drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay + -device ide-hd,drive=img-blkreplay + +Use QEMU monitor to create additional snapshots. ``savevm `` command +created the snapshot and ``loadvm `` restores it. To prevent corruption +of the original disk image, use overlay files linked to the original images. +Therefore all new snapshots (including the starting one) will be saved in +overlays and the original image remains unchanged. + +When you need to use snapshots with diskless virtual machine, +it must be started with "orphan" qcow2 image. This image will be used +for storing VM snapshots. Here is the example of the command line for this: + +.. parsed-literal:: + |qemu_system| \\ + -icount shift=auto,rr=replay,rrfile=record.bin,rrsnapshot=init \\ + -net none -drive file=empty.qcow2,if=none,id=rr + +``empty.qcow2`` drive does not connected to any virtual block device and used +for VM snapshots only. + +.. _network-label: + +Network devices +--------------- + +Record and replay for network interactions is performed with the network filter. +Each backend must have its own instance of the replay filter as follows: + +.. parsed-literal:: + -netdev user,id=net1 -device rtl8139,netdev=net1 + -object filter-replay,id=replay,netdev=net1 + +Replay network filter is used to record and replay network packets. While +recording the virtual machine this filter puts all packets coming from +the outer world into the log. In replay mode packets from the log are +injected into the network device. All interactions with network backend +in replay mode are disabled. + +Audio devices +------------- + +Audio data is recorded and replay automatically. The command line for recording +and replaying must contain identical specifications of audio hardware, e.g.: + +.. parsed-literal:: + -soundhw ac97 + +Serial ports +------------ + +Serial ports input is recorded and replay automatically. The command lines +for recording and replaying must contain identical number of ports in record +and replay modes, but their backends may differ. +E.g., ``-serial stdio`` in record mode, and ``-serial null`` in replay mode. + +Reverse debugging +----------------- + +Reverse debugging allows "executing" the program in reverse direction. +GDB remote protocol supports "reverse step" and "reverse continue" +commands. The first one steps single instruction backwards in time, +and the second one finds the last breakpoint in the past. + +Recorded executions may be used to enable reverse debugging. QEMU can't +execute the code in backwards direction, but can load a snapshot and +replay forward to find the desired position or breakpoint. + +The following GDB commands are supported: + + - ``reverse-stepi`` (or ``rsi``) - step one instruction backwards + - ``reverse-continue`` (or ``rc``) - find last breakpoint in the past + +Reverse step loads the nearest snapshot and replays the execution until +the required instruction is met. + +Reverse continue may include several passes of examining the execution +between the snapshots. Each of the passes include the following steps: + + #. loading the snapshot + #. replaying to examine the breakpoints + #. if breakpoint or watchpoint was met + + * loading the snapshot again + * replaying to the required breakpoint + + #. else + + * proceeding to the p.1 with the earlier snapshot + +Therefore usage of the reverse debugging requires at least one snapshot +created. This can be done by omitting ``snapshot`` option +for the block drives and adding ``rrsnapshot`` for both record and replay +command lines. +See the :ref:`snapshotting-label` section to learn more about running record/replay +and creating the snapshot in these modes. + +When ``rrsnapshot`` is not used, then snapshot named ``start_debugging`` +created in temporary overlay. This allows using reverse debugging, but with +temporary snapshots (existing within the session). diff --git a/docs/system/riscv/shakti-c.rst b/docs/system/riscv/shakti-c.rst index a6035d42b0..fea57f7b6b 100644 --- a/docs/system/riscv/shakti-c.rst +++ b/docs/system/riscv/shakti-c.rst @@ -45,7 +45,7 @@ Shakti SDK can be used to generate the baremetal example UART applications. Binary would be generated in: software/examples/uart_applns/loopback/output/loopback.shakti -You could also download the precompiled example applicatons using below +You could also download the precompiled example applications using below commands. .. code-block:: bash diff --git a/docs/system/riscv/sifive_u.rst b/docs/system/riscv/sifive_u.rst index 01108b5ecc..7b166567f9 100644 --- a/docs/system/riscv/sifive_u.rst +++ b/docs/system/riscv/sifive_u.rst @@ -24,6 +24,7 @@ The ``sifive_u`` machine supports the following devices: * 2 QSPI controllers * 1 ISSI 25WP256 flash * 1 SD card in SPI mode +* PWM0 and PWM1 Please note the real world HiFive Unleashed board has a fixed configuration of 1 E51 core and 4 U54 core combination and the RISC-V core boots in 64-bit mode. @@ -209,15 +210,16 @@ command line options with ``qemu-system-riscv32``. Running U-Boot -------------- -U-Boot mainline v2021.01 release is tested at the time of writing. To build a +U-Boot mainline v2021.07 release is tested at the time of writing. To build a U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use -the sifive_fu540_defconfig with similar commands as described above for Linux: +the sifive_unleashed_defconfig with similar commands as described above for +Linux: .. code-block:: bash $ export CROSS_COMPILE=riscv64-linux- $ export OPENSBI=/path/to/opensbi-riscv64-generic-fw_dynamic.bin - $ make sifive_fu540_defconfig + $ make sifive_unleashed_defconfig You will get spl/u-boot-spl.bin and u-boot.itb file in the build tree. @@ -312,31 +314,29 @@ board on QEMU ``sifive_u`` machine out of the box. This allows users to develop and test the recommended RISC-V boot flow with a real world use case: ZSBL (in QEMU) loads U-Boot SPL from SD card or SPI flash to L2LIM, then U-Boot SPL loads the combined payload image of OpenSBI fw_dynamic -firmware and U-Boot proper. However sometimes we want to have a quick test -of booting U-Boot on QEMU without the needs of preparing the SPI flash or -SD card images, an alternate way can be used, which is to create a U-Boot -S-mode image by modifying the configuration of U-Boot: +firmware and U-Boot proper. + +However sometimes we want to have a quick test of booting U-Boot on QEMU +without the needs of preparing the SPI flash or SD card images, an alternate +way can be used, which is to create a U-Boot S-mode image by modifying the +configuration of U-Boot: .. code-block:: bash + $ export CROSS_COMPILE=riscv64-linux- + $ make sifive_unleashed_defconfig $ make menuconfig -then manually select the following configuration in U-Boot: +then manually select the following configuration: - Device Tree Control > Provider of DTB for DT Control > Prior Stage bootloader DTB + * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB -This lets U-Boot to use the QEMU generated device tree blob. During the build, -a build error will be seen below: +and unselect the following configuration: -.. code-block:: none + * Library routines ---> Allow access to binman information in the device tree - MKIMAGE u-boot.img - ./tools/mkimage: Can't open arch/riscv/dts/hifive-unleashed-a00.dtb: No such file or directory - ./tools/mkimage: failed to build FIT - make: *** [Makefile:1440: u-boot.img] Error 1 - -The above errors can be safely ignored as we don't run U-Boot SPL under QEMU -in this alternate configuration. +This changes U-Boot to use the QEMU generated device tree blob, and bypass +running the U-Boot SPL stage. Boot the 64-bit U-Boot S-mode image directly: @@ -351,14 +351,18 @@ It's possible to create a 32-bit U-Boot S-mode image as well. .. code-block:: bash $ export CROSS_COMPILE=riscv64-linux- - $ make sifive_fu540_defconfig + $ make sifive_unleashed_defconfig $ make menuconfig then manually update the following configuration in U-Boot: - Device Tree Control > Provider of DTB for DT Control > Prior Stage bootloader DTB - RISC-V architecture > Base ISA > RV32I - Boot images > Text Base > 0x80400000 + * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB + * RISC-V architecture ---> Base ISA ---> RV32I + * Boot options ---> Boot images ---> Text Base ---> 0x80400000 + +and unselect the following configuration: + + * Library routines ---> Allow access to binman information in the device tree Use the same command line options to boot the 32-bit U-Boot S-mode image: diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst index 321d77e07d..4b16e41d7f 100644 --- a/docs/system/riscv/virt.rst +++ b/docs/system/riscv/virt.rst @@ -23,9 +23,9 @@ The ``virt`` machine supports the following devices: * 1 generic PCIe host bridge * The fw_cfg device that allows a guest to obtain data from QEMU -Note that the default CPU is a generic RV32GC/RV64GC. Optional extensions -can be enabled via command line parameters, e.g.: ``-cpu rv64,x-h=true`` -enables the hypervisor extension for RV64. +The hypervisor extension has been enabled for the default CPU, so virtual +machines with hypervisor extension can simply be used without explicitly +declaring. Hardware configuration information ---------------------------------- @@ -53,6 +53,32 @@ with the default OpenSBI firmware image as the -bios. It also supports the recommended RISC-V bootflow: U-Boot SPL (M-mode) loads OpenSBI fw_dynamic firmware and U-Boot proper (S-mode), using the standard -bios functionality. +Machine-specific options +------------------------ + +The following machine-specific options are supported: + +- aclint=[on|off] + + When this option is "on", ACLINT devices will be emulated instead of + SiFive CLINT. When not specified, this option is assumed to be "off". + +- aia=[none|aplic|aplic-imsic] + + This option allows selecting interrupt controller defined by the AIA + (advanced interrupt architecture) specification. The "aia=aplic" selects + APLIC (advanced platform level interrupt controller) to handle wired + interrupts whereas the "aia=aplic-imsic" selects APLIC and IMSIC (incoming + message signaled interrupt controller) to handle both wired interrupts and + MSIs. When not specified, this option is assumed to be "none" which selects + SiFive PLIC to handle wired interrupts. + +- aia-guests=nnn + + The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest + having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified, + the default number of per-HART VS-level AIA IMSIC pages is 0. + Running Linux kernel -------------------- @@ -136,3 +162,28 @@ The minimal QEMU commands to run U-Boot SPL are: To test 32-bit U-Boot images, switch to use qemu-riscv32_smode_defconfig and riscv32_spl_defconfig builds, and replace ``qemu-system-riscv64`` with ``qemu-system-riscv32`` in the command lines above to boot the 32-bit U-Boot. + +Enabling TPM +------------ + +A TPM device can be connected to the virt board by following the steps below. + +First launch the TPM emulator: + +.. code-block:: bash + + $ swtpm socket --tpm2 -t -d --tpmstate dir=/tmp/tpm \ + --ctrl type=unixio,path=swtpm-sock + +Then launch QEMU with some additional arguments to link a TPM device to the backend: + +.. code-block:: bash + + $ qemu-system-riscv64 \ + ... other args .... \ + -chardev socket,id=chrtpm,path=swtpm-sock \ + -tpmdev emulator,id=tpm0,chardev=chrtpm \ + -device tpm-tis-device,tpmdev=tpm0 + +The TPM device can be seen in the memory tree and the generated device +tree and should be accessible from the guest software. diff --git a/docs/system/s390x/bootdevices.rst b/docs/system/s390x/bootdevices.rst index 9e591cb9dc..1a7a18b43b 100644 --- a/docs/system/s390x/bootdevices.rst +++ b/docs/system/s390x/bootdevices.rst @@ -53,6 +53,32 @@ recommended to specify a CD-ROM device via ``-device scsi-cd`` (as mentioned above) instead. +Selecting kernels with the ``loadparm`` property +------------------------------------------------ + +The ``s390-ccw-virtio`` machine supports the so-called ``loadparm`` parameter +which can be used to select the kernel on the disk of the guest that the +s390-ccw bios should boot. When starting QEMU, it can be specified like this:: + + qemu-system-s390x -machine s390-ccw-virtio,loadparm= + +The first way to use this parameter is to use the word ``PROMPT`` as the +```` here. In that case the s390-ccw bios will show a list of +installed kernels on the disk of the guest and ask the user to enter a number +to chose which kernel should be booted -- similar to what can be achieved by +specifying the ``-boot menu=on`` option when starting QEMU. Note that the menu +list will only show the names of the installed kernels when using a DASD-like +disk image with 4k byte sectors. On normal SCSI-style disks with 512-byte +sectors, there is not enough space for the zipl loader on the disk to store +the kernel names, so you only get a list without names here. + +The second way to use this parameter is to use a number in the range from 0 +to 31. The numbers that can be used here correspond to the numbers that are +shown when using the ``PROMPT`` option, and the s390-ccw bios will then try +to automatically boot the kernel that is associated with the given number. +Note that ``0`` can be used to boot the default entry. + + Booting from a network device ----------------------------- @@ -65,7 +91,7 @@ you can specify it via the ``-global s390-ipl.netboot_fw=filename`` command line option. The ``bootindex`` property is especially important for booting via the network. -If you don't specify the the ``bootindex`` property here, the network bootloader +If you don't specify the ``bootindex`` property here, the network bootloader firmware code won't get loaded into the guest memory so that the network boot will fail. For a successful network boot, try something like this:: diff --git a/docs/system/target-i386.rst b/docs/system/target-i386.rst index 22ba5ce2c0..e64c013077 100644 --- a/docs/system/target-i386.rst +++ b/docs/system/target-i386.rst @@ -19,7 +19,17 @@ Board-specific documentation i386/microvm i386/pc -.. include:: cpu-models-x86.rst.inc +Architectural features +~~~~~~~~~~~~~~~~~~~~~~ + +.. toctree:: + :maxdepth: 1 + + i386/cpu + i386/hyperv + i386/kvm-pv + i386/sgx + i386/amd-memory-encryption .. _pcsys_005freq: diff --git a/docs/system/target-openrisc.rst b/docs/system/target-openrisc.rst new file mode 100644 index 0000000000..22cb2217a6 --- /dev/null +++ b/docs/system/target-openrisc.rst @@ -0,0 +1,71 @@ +.. _OpenRISC-System-emulator: + +OpenRISC System emulator +~~~~~~~~~~~~~~~~~~~~~~~~ + +QEMU can emulate 32-bit OpenRISC CPUs using the ``qemu-system-or1k`` executable. + +OpenRISC CPUs are generally built into "system-on-chip" (SoC) designs that run +on FPGAs. These SoCs are based on the same core architecture as the or1ksim +(the original OpenRISC instruction level simulator) which QEMU supports. For +this reason QEMU does not need to support many different boards to support the +OpenRISC hardware ecosystem. + +The OpenRISC CPU supported by QEMU is the ``or1200``, it supports an MMU and can +run linux. + +Choosing a board model +====================== + +For QEMU's OpenRISC system emulation, you must specify which board model you +want to use with the ``-M`` or ``--machine`` option; the default machine is +``or1k-sim``. + +If you intend to boot Linux, it is possible to have a single kernel image that +will boot on any of the QEMU machines. To do this one would compile all required +drivers into the kernel. This is possible because QEMU will create a device tree +structure that describes the QEMU machine and pass a pointer to the structure to +the kernel. The kernel can then use this to configure itself for the machine. + +However, typically users will have specific firmware images for a specific machine. + +If you already have a system image or a kernel that works on hardware and you +want to boot with QEMU, check whether QEMU lists that machine in its ``-machine +help`` output. If it is listed, then you can probably use that board model. If +it is not listed, then unfortunately your image will almost certainly not boot +on QEMU. (You might be able to extract the filesystem and use that with a +different kernel which boots on a system that QEMU does emulate.) + +If you don't care about reproducing the idiosyncrasies of a particular +bit of hardware, such as small amount of RAM, no PCI or other hard disk, etc., +and just want to run Linux, the best option is to use the ``virt`` board. This +is a platform which doesn't correspond to any real hardware and is designed for +use in virtual machines. You'll need to compile Linux with a suitable +configuration for running on the ``virt`` board. ``virt`` supports PCI, virtio +and large amounts of RAM. + +Board-specific documentation +============================ + +.. + This table of contents should be kept sorted alphabetically + by the title text of each file, which isn't the same ordering + as an alphabetical sort by filename. + +.. toctree:: + :maxdepth: 1 + + openrisc/or1k-sim + openrisc/virt + +Emulated CPU architecture support +================================= + +.. toctree:: + openrisc/emulation + +OpenRISC CPU features +===================== + +.. toctree:: + openrisc/cpu-features diff --git a/docs/system/targets.rst b/docs/system/targets.rst index 9dcd95dd84..224fadae71 100644 --- a/docs/system/targets.rst +++ b/docs/system/targets.rst @@ -21,6 +21,7 @@ Contents: target-m68k target-mips target-ppc + target-openrisc target-riscv target-rx target-s390x diff --git a/docs/system/tls.rst b/docs/system/tls.rst index b0973afe1b..e284c82801 100644 --- a/docs/system/tls.rst +++ b/docs/system/tls.rst @@ -182,7 +182,7 @@ certificates. --template client-hostNNN.info \ --outfile client-hostNNN-cert.pem -The subject alt name extension data is not required for clients, so the +The subject alt name extension data is not required for clients, so the ``dns_name`` and ``ip_address`` fields are not included. The ``tls_www_client`` keyword is the key purpose extension to indicate this certificate is intended for usage in a web client. Although QEMU network @@ -311,7 +311,7 @@ containing one or more usernames and random keys:: mkdir -m 0700 /tmp/keys psktool -u rich -p /tmp/keys/keys.psk -TLS-enabled servers such as qemu-nbd can use this directory like so:: +TLS-enabled servers such as ``qemu-nbd`` can use this directory like so:: qemu-nbd \ -t -x / \ diff --git a/docs/throttle.txt b/docs/throttle.txt index b5b78b7326..0a0453a5ee 100644 --- a/docs/throttle.txt +++ b/docs/throttle.txt @@ -273,11 +273,9 @@ A group can be created using the object-add QMP function: "arguments": { "qom-type": "throttle-group", "id": "group0", - "props": { - "limits" : { - "iops-total": 1000 - "bps-write": 2097152 - } + "limits" : { + "iops-total": 1000, + "bps-write": 2097152 } } } diff --git a/docs/tools/index.rst b/docs/tools/index.rst index ef6041a490..1edd5a8054 100644 --- a/docs/tools/index.rst +++ b/docs/tools/index.rst @@ -1,5 +1,6 @@ +----- Tools -===== +----- This section of the manual documents QEMU's "tools": its command line utilities and other standalone programs. diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index b7d602a288..15aeddc6d8 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -1,3 +1,4 @@ +======================= QEMU disk image utility ======================= @@ -56,7 +57,7 @@ cases. See below for a description of the supported disk formats. *OUTPUT_FMT* is the destination format. *OPTIONS* is a comma separated list of format specific options in a -name=value format. Use ``-o ?`` for an overview of the options supported +name=value format. Use ``-o help`` for an overview of the options supported by the used format or see the format descriptions below for details. *SNAPSHOT_PARAM* is param used for internal snapshot, format is @@ -126,9 +127,9 @@ by the used format or see the format descriptions below for details. .. option:: -S SIZE Indicates the consecutive number of bytes that must contain only zeros - for qemu-img to create a sparse image during conversion. This value is rounded - down to the nearest 512 bytes. You may use the common size suffixes like - ``k`` for kilobytes. + for ``qemu-img`` to create a sparse image during conversion. This value is + rounded down to the nearest 512 bytes. You may use the common size suffixes + like ``k`` for kilobytes. .. option:: -t CACHE @@ -331,8 +332,8 @@ Command description: ``-r all`` fixes all kinds of errors, with a higher risk of choosing the wrong fix or hiding corruption that has already occurred. - Only the formats ``qcow2``, ``qed`` and ``vdi`` support - consistency checks. + Only the formats ``qcow2``, ``qed``, ``parallels``, ``vhdx``, ``vmdk`` and + ``vdi`` support consistency checks. In case the image does not have any inconsistencies, check exits with ``0``. Other exit codes indicate the kind of inconsistency found or if another error @@ -414,7 +415,7 @@ Command description: 4 Error on reading data -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM* to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can @@ -430,7 +431,7 @@ Command description: suppressed from the destination image. *SPARSE_SIZE* indicates the consecutive number of bytes (defaults to 4k) - that must contain only zeros for qemu-img to create a sparse image during + that must contain only zeros for ``qemu-img`` to create a sparse image during conversion. If *SPARSE_SIZE* is 0, the source will not be scanned for unallocated or zero sectors, and the destination image will always be fully allocated. @@ -438,7 +439,7 @@ Command description: You can use the *BACKING_FILE* option to force the output image to be created as a copy on write image of the specified base image; the *BACKING_FILE* should have the same content as the input's base image, - however the path, image format, etc may differ. + however the path, image format (as given by *BACKING_FMT*), etc may differ. If a relative path name is given, the backing file is looked up relative to the directory containing *OUTPUT_FILENAME*. @@ -446,7 +447,7 @@ Command description: If the ``-n`` option is specified, the target volume creation will be skipped. This is useful for formats such as ``rbd`` if the target volume has already been created with site specific options that cannot - be supplied through qemu-img. + be supplied through ``qemu-img``. Out of order writes can be enabled with ``-W`` to improve performance. This is only recommended for preallocated devices like host devices or other @@ -462,7 +463,7 @@ Command description: ``--skip-broken-bitmaps`` is also specified to copy only the consistent bitmaps. -.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE] [-F BACKING_FMT] [-u] [-o OPTIONS] FILENAME [SIZE] +.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE [-F BACKING_FMT]] [-u] [-o OPTIONS] FILENAME [SIZE] Create the new disk image *FILENAME* of size *SIZE* and format *FMT*. Depending on the file format, you can add one or more *OPTIONS* @@ -471,7 +472,7 @@ Command description: If the option *BACKING_FILE* is specified, then the image will record only the differences from *BACKING_FILE*. No size needs to be specified in this case. *BACKING_FILE* will never be modified unless you use the - ``commit`` monitor command (or qemu-img commit). + ``commit`` monitor command (or ``qemu-img commit``). If a relative path name is given, the backing file is looked up relative to the directory containing *FILENAME*. @@ -683,7 +684,7 @@ Command description: Safe mode This is the default mode and performs a real rebase operation. The - new backing file may differ from the old one and qemu-img rebase + new backing file may differ from the old one and ``qemu-img rebase`` will take care of keeping the guest-visible content of *FILENAME* unchanged. @@ -696,7 +697,7 @@ Command description: exists. Unsafe mode - qemu-img uses the unsafe mode if ``-u`` is specified. In this + ``qemu-img`` uses the unsafe mode if ``-u`` is specified. In this mode, only the backing file name and format of *FILENAME* is changed without any checks on the file contents. The user must take care of specifying the correct new backing file, or the guest-visible @@ -734,7 +735,7 @@ Command description: sizes accordingly. Failure to do so will result in data loss! When shrinking images, the ``--shrink`` option must be given. This informs - qemu-img that the user acknowledges all loss of data beyond the truncated + ``qemu-img`` that the user acknowledges all loss of data beyond the truncated image's end. After using this command to grow a disk image, you must use file system and diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst index ee862fa0bc..faf6349ea5 100644 --- a/docs/tools/qemu-nbd.rst +++ b/docs/tools/qemu-nbd.rst @@ -1,3 +1,4 @@ +===================================== QEMU Disk Network Block Device Server ===================================== @@ -26,18 +27,18 @@ Options .. program:: qemu-nbd *filename* is a disk image filename, or a set of block -driver options if ``--image-opts`` is specified. +driver options if :option:`--image-opts` is specified. *dev* is an NBD device. -.. option:: --object type,id=ID,...props... +.. option:: --object type,id=ID,... Define a new instance of the *type* object class identified by *ID*. See the :manpage:`qemu(1)` manual page for full details of the properties supported. The common object types that it makes sense to define are the ``secret`` object, which is used to supply passwords and/or encryption keys, and the ``tls-creds`` object, which is used to supply TLS - credentials for the qemu-nbd server or client. + credentials for the ``qemu-nbd`` server or client. .. option:: -p, --port=PORT @@ -98,8 +99,10 @@ driver options if ``--image-opts`` is specified. .. option:: --cache=CACHE - The cache mode to be used with the file. See the documentation of - the emulator's ``-drive cache=...`` option for allowed values. + The cache mode to be used with the file. Valid values are: + ``none``, ``writeback`` (the default), ``writethrough``, + ``directsync`` and ``unsafe``. See the documentation of + the emulator's ``-drive cache=...`` option for more info. .. option:: -n, --nocache @@ -136,8 +139,7 @@ driver options if ``--image-opts`` is specified. .. option:: -e, --shared=NUM Allow up to *NUM* clients to share the device (default - ``1``), 0 for unlimited. Safe for readers, but for now, - consistency is not guaranteed between multiple writers. + ``1``), 0 for unlimited. .. option:: -t, --persistent @@ -162,9 +164,22 @@ driver options if ``--image-opts`` is specified. .. option:: --tls-creds=ID Enable mandatory TLS encryption for the server by setting the ID - of the TLS credentials object previously created with the --object - option; or provide the credentials needed for connecting as a client - in list mode. + of the TLS credentials object previously created with the + :option:`--object` option; or provide the credentials needed for + connecting as a client in list mode. + +.. option:: --tls-hostname=hostname + + When validating an x509 certificate received over a TLS connection, + the hostname that the NBD client used to connect will be checked + against information in the server provided certificate. Sometimes + it might be required to override the hostname used to perform this + check. For example, if the NBD client is using a tunnel from localhost + to connect to the remote server, the :option:`--tls-hostname` option should + be used to set the officially expected hostname of the remote NBD + server. This can also be used if accessing NBD over a UNIX socket + where there is no inherent hostname available. This is only permitted + when acting as a NBD client with the :option:`--list` option. .. option:: --fork @@ -210,7 +225,7 @@ disconnects: qemu-nbd -f qcow2 file.qcow2 Start a long-running server listening with encryption on port 10810, -and whitelist clients with a specific X.509 certificate to connect to +and allow clients with a specific X.509 certificate to connect to a 1 megabyte subset of a raw file, using the export name 'subset': :: @@ -235,7 +250,7 @@ daemon: Expose the guest-visible contents of a qcow2 file via a block device /dev/nbd0 (and possibly creating /dev/nbd0p1 and friends for partitions found within), then disconnect the device when done. -Access to bind qemu-nbd to an /dev/nbd device generally requires root +Access to bind ``qemu-nbd`` to a /dev/nbd device generally requires root privileges, and may also require the execution of ``modprobe nbd`` to enable the kernel NBD client module. *CAUTION*: Do not use this method to mount filesystems from an untrusted guest image - a diff --git a/docs/tools/qemu-pr-helper.rst b/docs/tools/qemu-pr-helper.rst index ac036180ac..c32867cfc6 100644 --- a/docs/tools/qemu-pr-helper.rst +++ b/docs/tools/qemu-pr-helper.rst @@ -1,3 +1,4 @@ +================================== QEMU persistent reservation helper ================================== @@ -20,8 +21,8 @@ programs because incorrect usage can disrupt regular operation of the storage fabric. QEMU's SCSI passthrough devices ``scsi-block`` and ``scsi-generic`` support passing guest persistent reservation requests to a privileged external helper program. :program:`qemu-pr-helper` -is that external helper; it creates a socket which QEMU can -connect to to communicate with it. +is that external helper; it creates a listener socket which will +accept incoming connections for communication with QEMU. If you want to run VMs in a setup like this, this helper should be started as a system service, and you should read the QEMU manual diff --git a/docs/tools/qemu-storage-daemon.rst b/docs/tools/qemu-storage-daemon.rst index 3ec4bdd914..ea00149a63 100644 --- a/docs/tools/qemu-storage-daemon.rst +++ b/docs/tools/qemu-storage-daemon.rst @@ -1,3 +1,4 @@ +=================== QEMU Storage Daemon =================== @@ -9,9 +10,10 @@ Synopsis Description ----------- -qemu-storage-daemon provides disk image functionality from QEMU, qemu-img, and -qemu-nbd in a long-running process controlled via QMP commands without running -a virtual machine. It can export disk images, run block job operations, and +``qemu-storage-daemon`` provides disk image functionality from QEMU, +``qemu-img``, and ``qemu-nbd`` in a long-running process controlled via QMP +commands without running a virtual machine. +It can export disk images, run block job operations, and perform other disk-related operations. The daemon is controlled via a QMP monitor and initial configuration from the command-line. @@ -74,7 +76,8 @@ Standard options: .. option:: --export [type=]nbd,id=,node-name=[,name=][,writable=on|off][,bitmap=] --export [type=]vhost-user-blk,id=,node-name=,addr.type=unix,addr.path=[,writable=on|off][,logical-block-size=][,num-queues=] --export [type=]vhost-user-blk,id=,node-name=,addr.type=fd,addr.str=[,writable=on|off][,logical-block-size=][,num-queues=] - --export [type=]fuse,id=,node-name=,mountpoint=[,growable=on|off][,writable=on|off] + --export [type=]fuse,id=,node-name=,mountpoint=[,growable=on|off][,writable=on|off][,allow-other=on|off|auto] + --export [type=]vduse-blk,id=,node-name=,name=[,writable=on|off][,num-queues=][,queue-size=][,logical-block-size=][,serial=] is a block export definition. ``node-name`` is the block node that should be exported. ``writable`` determines whether or not the export allows write @@ -101,7 +104,33 @@ Standard options: mounted). Consequently, applications that have opened the given file before the export became active will continue to see its original content. If ``growable`` is set, writes after the end of the exported file will grow the - block node to fit. + block node to fit. The ``allow-other`` option controls whether users other + than the user running the process will be allowed to access the export. Note + that enabling this option as a non-root user requires enabling the + user_allow_other option in the global fuse.conf configuration file. Setting + ``allow-other`` to auto (the default) will try enabling this option, and on + error fall back to disabling it. + + The ``vduse-blk`` export type takes a ``name`` (must be unique across the host) + to create the VDUSE device. + ``num-queues`` sets the number of virtqueues (the default is 1). + ``queue-size`` sets the virtqueue descriptor table size (the default is 256). + + The instantiated VDUSE device must then be added to the vDPA bus using the + vdpa(8) command from the iproute2 project:: + + # vdpa dev add name mgmtdev vduse + + The device can be removed from the vDPA bus later as follows:: + + # vdpa dev del + + For more information about attaching vDPA devices to the host with + virtio_vdpa.ko or attaching them to guests with vhost_vdpa.ko, see + https://vdpa-dev.gitlab.io/. + + For more information about VDUSE, see + https://docs.kernel.org/userspace-api/vduse.html. .. option:: --monitor MONITORDEF @@ -147,6 +176,13 @@ Standard options: created but before accepting connections. The daemon has started successfully when the pid file is written and clients may begin connecting. +.. option:: --daemonize + + Daemonize the process. The parent process will exit once startup is complete + (i.e., after the pid file has been or would have been written) or failure + occurs. Its exit code reflects whether the child has started up successfully + or failed to do so. + Examples -------- Launch the daemon with QMP monitor socket ``qmp.sock`` so clients can execute @@ -199,7 +235,7 @@ Export raw image file ``disk.img`` over NBD UNIX domain socket ``nbd.sock``:: --nbd-server addr.type=unix,addr.path=nbd.sock \ --export type=nbd,id=export,node-name=disk,writable=on -Export a qcow2 image file ``disk.qcow2`` as a vhosts-user-blk device over UNIX +Export a qcow2 image file ``disk.qcow2`` as a vhost-user-blk device over UNIX domain socket ``vhost-user-blk.sock``:: $ qemu-storage-daemon \ diff --git a/docs/tools/qemu-trace-stap.rst b/docs/tools/qemu-trace-stap.rst index fb70445c75..2169ce5d17 100644 --- a/docs/tools/qemu-trace-stap.rst +++ b/docs/tools/qemu-trace-stap.rst @@ -1,3 +1,4 @@ +========================= QEMU SystemTap trace tool ========================= @@ -45,19 +46,19 @@ The following commands are valid: any of the listed names. If no *PATTERN* is given, the all possible probes will be listed. - For example, to list all probes available in the ``qemu-system-x86_64`` + For example, to list all probes available in the |qemu_system| binary: - :: + .. parsed-literal:: - $ qemu-trace-stap list qemu-system-x86_64 + $ qemu-trace-stap list |qemu_system| To filter the list to only cover probes related to QEMU's cryptographic subsystem, in a binary outside ``$PATH`` - :: + .. parsed-literal:: - $ qemu-trace-stap list /opt/qemu/4.0.0/bin/qemu-system-x86_64 'qcrypto*' + $ qemu-trace-stap list /opt/qemu/|version|/bin/|qemu_system| 'qcrypto*' .. option:: run OPTIONS BINARY PATTERN... @@ -89,26 +90,26 @@ The following commands are valid: Restrict the tracing session so that it only triggers for the process identified by *PID*. - For example, to monitor all processes executing ``qemu-system-x86_64`` + For example, to monitor all processes executing |qemu_system| as found on ``$PATH``, displaying all I/O related probes: - :: + .. parsed-literal:: - $ qemu-trace-stap run qemu-system-x86_64 'qio*' + $ qemu-trace-stap run |qemu_system| 'qio*' To monitor only the QEMU process with PID 1732 - :: + .. parsed-literal:: - $ qemu-trace-stap run --pid=1732 qemu-system-x86_64 'qio*' + $ qemu-trace-stap run --pid=1732 |qemu_system| 'qio*' To monitor QEMU processes running an alternative binary outside of ``$PATH``, displaying verbose information about setup of the tracing environment: - :: + .. parsed-literal:: - $ qemu-trace-stap -v run /opt/qemu/4.0.0/qemu-system-x86_64 'qio*' + $ qemu-trace-stap -v run /opt/qemu/|version|/bin/|qemu_system| 'qio*' See also -------- diff --git a/docs/tools/virtiofsd.rst b/docs/tools/virtiofsd.rst index b208f2a6f0..995a754a7b 100644 --- a/docs/tools/virtiofsd.rst +++ b/docs/tools/virtiofsd.rst @@ -104,6 +104,18 @@ Options * posix_acl|no_posix_acl - Enable/disable posix acl support. Posix ACLs are disabled by default. + * security_label|no_security_label - + Enable/disable security label support. Security labels are disabled by + default. This will allow client to send a MAC label of file during + file creation. Typically this is expected to be SELinux security + label. Server will try to set that label on newly created file + atomically wherever possible. + + * killpriv_v2|no_killpriv_v2 - + Enable/disable ``FUSE_HANDLE_KILLPRIV_V2`` support. KILLPRIV_V2 is enabled + by default as long as the client supports it. Enabling this option helps + with performance in write path. + .. option:: --socket-path=PATH Listen on vhost-user UNIX domain socket at PATH. @@ -120,7 +132,7 @@ Options .. option:: --thread-pool-size=NUM Restrict the number of worker threads per request queue to NUM. The default - is 64. + is 0. .. option:: --cache=none|auto|always @@ -136,8 +148,8 @@ Extended attribute (xattr) mapping By default the name of xattr's used by the client are passed through to the server file system. This can be a problem where either those xattr names are used by something on the server (e.g. selinux client/server confusion) or if the -virtiofsd is running in a container with restricted privileges where it cannot -access some attributes. +``virtiofsd`` is running in a container with restricted privileges where it +cannot access some attributes. Mapping syntax ~~~~~~~~~~~~~~ @@ -183,6 +195,12 @@ Using ':' as the separator a rule is of the form: 'ok' as either an explicit terminator or for special handling of certain patterns. +- 'unsupported' - If a client tries to use a name matching 'key' it's + denied using ENOTSUP; when the server passes an attribute + name matching 'prepend' it's hidden. In many ways it's use is very like + 'ok' as either an explicit terminator or for special handling of certain + patterns. + **key** is a string tested as a prefix on an attribute name originating on the client. It maybe empty in which case a 'client' rule will always match on client names. @@ -214,7 +232,7 @@ e.g.: ``:ok:server::security.:`` - will pass 'securty.' xattr's in listxattr from the server + will pass 'security.' xattr's in listxattr from the server and ignore following rules. ``:ok:all:::`` @@ -342,6 +360,31 @@ client arguments or lists returned from the host. This stops the client seeing any 'security.' attributes on the server and stops it setting any. +SELinux support +--------------- +One can enable support for SELinux by running virtiofsd with option +"-o security_label". But this will try to save guest's security context +in xattr security.selinux on host and it might fail if host's SELinux +policy does not permit virtiofsd to do this operation. + +Hence, it is preferred to remap guest's "security.selinux" xattr to say +"trusted.virtiofs.security.selinux" on host. + +"-o xattrmap=:map:security.selinux:trusted.virtiofs.:" + +This will make sure that guest and host's SELinux xattrs on same file +remain separate and not interfere with each other. And will allow both +host and guest to implement their own separate SELinux policies. + +Setting trusted xattr on host requires CAP_SYS_ADMIN. So one will need +add this capability to daemon. + +"-o modcaps=+sys_admin" + +Giving CAP_SYS_ADMIN increases the risk on system. Now virtiofsd is more +powerful and if gets compromised, it can do lot of damage to host system. +So keep this trade-off in my mind while making a decision. + Examples -------- diff --git a/docs/u2f.txt b/docs/u2f.txt index 8f44994818..7f5813a0b7 100644 --- a/docs/u2f.txt +++ b/docs/u2f.txt @@ -21,7 +21,7 @@ The second factor is materialized by a device implementing the U2F protocol. In case of a USB U2F security key, it is a USB HID device that implements the U2F protocol. -In Qemu, the USB U2F key device offers a dedicated support of U2F, allowing +In QEMU, the USB U2F key device offers a dedicated support of U2F, allowing guest USB FIDO/U2F security keys operating in two possible modes: pass-through and emulated. diff --git a/docs/user/index.rst b/docs/user/index.rst index 9faa4badd7..2c4e29f3db 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -1,5 +1,6 @@ +------------------- User Mode Emulation -=================== +------------------- This section of the manual is the overall guide for users using QEMU for user-mode emulation. In this mode, QEMU can launch diff --git a/docs/user/main.rst b/docs/user/main.rst index e08d4be63b..6f2ffa080f 100644 --- a/docs/user/main.rst +++ b/docs/user/main.rst @@ -166,7 +166,6 @@ Other binaries - user mode (PowerPC) - * ``qemu-ppc64abi32`` TODO. * ``qemu-ppc64`` TODO. * ``qemu-ppc`` TODO. diff --git a/dump/dump.c b/dump/dump.c index ab625909f3..df117c847f 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -12,7 +12,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "elf.h" #include "exec/hwaddr.h" @@ -29,6 +28,7 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "hw/misc/vmcoreinfo.h" +#include "migration/blocker.h" #ifdef TARGET_X86_64 #include "win_dump.h" @@ -47,11 +47,23 @@ #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ +static Error *dump_migration_blocker; + #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ ((DIV_ROUND_UP((hdr_size), 4) + \ DIV_ROUND_UP((name_size), 4) + \ DIV_ROUND_UP((desc_size), 4)) * 4) +static inline bool dump_is_64bit(DumpState *s) +{ + return s->dump_info.d_class == ELFCLASS64; +} + +static inline bool dump_has_filter(DumpState *s) +{ + return s->filter_area_length > 0; +} + uint16_t cpu_to_dump16(DumpState *s, uint16_t val) { if (s->dump_info.d_endian == ELFDATA2LSB) { @@ -91,6 +103,7 @@ static int dump_cleanup(DumpState *s) memory_mapping_list_free(&s->list); close(s->fd); g_free(s->guest_note); + g_array_unref(s->string_table_buf); s->guest_note = NULL; if (s->resume) { if (s->detached) { @@ -101,6 +114,7 @@ static int dump_cleanup(DumpState *s) qemu_mutex_unlock_iothread(); } } + migrate_del_blocker(dump_migration_blocker); return 0; } @@ -118,63 +132,81 @@ static int fd_write_vmcore(const void *buf, size_t size, void *opaque) return 0; } -static void write_elf64_header(DumpState *s, Error **errp) +static void prepare_elf64_header(DumpState *s, Elf64_Ehdr *elf_header) { - Elf64_Ehdr elf_header; - int ret; + /* + * phnum in the elf header is 16 bit, if we have more segments we + * set phnum to PN_XNUM and write the real number of segments to a + * special section. + */ + uint16_t phnum = MIN(s->phdr_num, PN_XNUM); - memset(&elf_header, 0, sizeof(Elf64_Ehdr)); - memcpy(&elf_header, ELFMAG, SELFMAG); - elf_header.e_ident[EI_CLASS] = ELFCLASS64; - elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; - elf_header.e_ident[EI_VERSION] = EV_CURRENT; - elf_header.e_type = cpu_to_dump16(s, ET_CORE); - elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); - elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); - elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); - elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr)); - elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); - elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); - if (s->have_section) { - uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info; - - elf_header.e_shoff = cpu_to_dump64(s, shoff); - elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); - elf_header.e_shnum = cpu_to_dump16(s, 1); - } - - ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); - if (ret < 0) { - error_setg_errno(errp, -ret, "dump: failed to write elf header"); - } + memset(elf_header, 0, sizeof(Elf64_Ehdr)); + memcpy(elf_header, ELFMAG, SELFMAG); + elf_header->e_ident[EI_CLASS] = ELFCLASS64; + elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; + elf_header->e_ident[EI_VERSION] = EV_CURRENT; + elf_header->e_type = cpu_to_dump16(s, ET_CORE); + elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); + elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); + elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); + elf_header->e_phoff = cpu_to_dump64(s, s->phdr_offset); + elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); + elf_header->e_phnum = cpu_to_dump16(s, phnum); + elf_header->e_shoff = cpu_to_dump64(s, s->shdr_offset); + elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); + elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); + elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); } -static void write_elf32_header(DumpState *s, Error **errp) +static void prepare_elf32_header(DumpState *s, Elf32_Ehdr *elf_header) { - Elf32_Ehdr elf_header; + /* + * phnum in the elf header is 16 bit, if we have more segments we + * set phnum to PN_XNUM and write the real number of segments to a + * special section. + */ + uint16_t phnum = MIN(s->phdr_num, PN_XNUM); + + memset(elf_header, 0, sizeof(Elf32_Ehdr)); + memcpy(elf_header, ELFMAG, SELFMAG); + elf_header->e_ident[EI_CLASS] = ELFCLASS32; + elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; + elf_header->e_ident[EI_VERSION] = EV_CURRENT; + elf_header->e_type = cpu_to_dump16(s, ET_CORE); + elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); + elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); + elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); + elf_header->e_phoff = cpu_to_dump32(s, s->phdr_offset); + elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); + elf_header->e_phnum = cpu_to_dump16(s, phnum); + elf_header->e_shoff = cpu_to_dump32(s, s->shdr_offset); + elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); + elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); + elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); +} + +static void write_elf_header(DumpState *s, Error **errp) +{ + Elf32_Ehdr elf32_header; + Elf64_Ehdr elf64_header; + size_t header_size; + void *header_ptr; int ret; - memset(&elf_header, 0, sizeof(Elf32_Ehdr)); - memcpy(&elf_header, ELFMAG, SELFMAG); - elf_header.e_ident[EI_CLASS] = ELFCLASS32; - elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; - elf_header.e_ident[EI_VERSION] = EV_CURRENT; - elf_header.e_type = cpu_to_dump16(s, ET_CORE); - elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); - elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); - elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); - elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr)); - elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); - elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); - if (s->have_section) { - uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info; - - elf_header.e_shoff = cpu_to_dump32(s, shoff); - elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); - elf_header.e_shnum = cpu_to_dump16(s, 1); + /* The NULL header and the shstrtab are always defined */ + assert(s->shdr_num >= 2); + if (dump_is_64bit(s)) { + prepare_elf64_header(s, &elf64_header); + header_size = sizeof(elf64_header); + header_ptr = &elf64_header; + } else { + prepare_elf32_header(s, &elf32_header); + header_size = sizeof(elf32_header); + header_ptr = &elf32_header; } - ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); + ret = fd_write_vmcore(header_ptr, header_size, s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write elf header"); } @@ -229,25 +261,15 @@ static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, } } -static void write_elf64_note(DumpState *s, Error **errp) +static void prepare_elf64_phdr_note(DumpState *s, Elf64_Phdr *phdr) { - Elf64_Phdr phdr; - hwaddr begin = s->memory_offset - s->note_size; - int ret; - - memset(&phdr, 0, sizeof(Elf64_Phdr)); - phdr.p_type = cpu_to_dump32(s, PT_NOTE); - phdr.p_offset = cpu_to_dump64(s, begin); - phdr.p_paddr = 0; - phdr.p_filesz = cpu_to_dump64(s, s->note_size); - phdr.p_memsz = cpu_to_dump64(s, s->note_size); - phdr.p_vaddr = 0; - - ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); - if (ret < 0) { - error_setg_errno(errp, -ret, - "dump: failed to write program header table"); - } + memset(phdr, 0, sizeof(*phdr)); + phdr->p_type = cpu_to_dump32(s, PT_NOTE); + phdr->p_offset = cpu_to_dump64(s, s->note_offset); + phdr->p_paddr = 0; + phdr->p_filesz = cpu_to_dump64(s, s->note_size); + phdr->p_memsz = cpu_to_dump64(s, s->note_size); + phdr->p_vaddr = 0; } static inline int cpu_index(CPUState *cpu) @@ -295,25 +317,15 @@ static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, write_guest_note(f, s, errp); } -static void write_elf32_note(DumpState *s, Error **errp) +static void prepare_elf32_phdr_note(DumpState *s, Elf32_Phdr *phdr) { - hwaddr begin = s->memory_offset - s->note_size; - Elf32_Phdr phdr; - int ret; - - memset(&phdr, 0, sizeof(Elf32_Phdr)); - phdr.p_type = cpu_to_dump32(s, PT_NOTE); - phdr.p_offset = cpu_to_dump32(s, begin); - phdr.p_paddr = 0; - phdr.p_filesz = cpu_to_dump32(s, s->note_size); - phdr.p_memsz = cpu_to_dump32(s, s->note_size); - phdr.p_vaddr = 0; - - ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); - if (ret < 0) { - error_setg_errno(errp, -ret, - "dump: failed to write program header table"); - } + memset(phdr, 0, sizeof(*phdr)); + phdr->p_type = cpu_to_dump32(s, PT_NOTE); + phdr->p_offset = cpu_to_dump32(s, s->note_offset); + phdr->p_paddr = 0; + phdr->p_filesz = cpu_to_dump32(s, s->note_size); + phdr->p_memsz = cpu_to_dump32(s, s->note_size); + phdr->p_vaddr = 0; } static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, @@ -343,30 +355,162 @@ static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, write_guest_note(f, s, errp); } -static void write_elf_section(DumpState *s, int type, Error **errp) +static void write_elf_phdr_note(DumpState *s, Error **errp) { - Elf32_Shdr shdr32; - Elf64_Shdr shdr64; - int shdr_size; - void *shdr; + ERRP_GUARD(); + Elf32_Phdr phdr32; + Elf64_Phdr phdr64; + void *phdr; + size_t size; int ret; - if (type == 0) { - shdr_size = sizeof(Elf32_Shdr); - memset(&shdr32, 0, shdr_size); - shdr32.sh_info = cpu_to_dump32(s, s->sh_info); - shdr = &shdr32; + if (dump_is_64bit(s)) { + prepare_elf64_phdr_note(s, &phdr64); + size = sizeof(phdr64); + phdr = &phdr64; } else { - shdr_size = sizeof(Elf64_Shdr); - memset(&shdr64, 0, shdr_size); - shdr64.sh_info = cpu_to_dump32(s, s->sh_info); - shdr = &shdr64; + prepare_elf32_phdr_note(s, &phdr32); + size = sizeof(phdr32); + phdr = &phdr32; } - ret = fd_write_vmcore(shdr, shdr_size, s); + ret = fd_write_vmcore(phdr, size, s); if (ret < 0) { error_setg_errno(errp, -ret, - "dump: failed to write section header table"); + "dump: failed to write program header table"); + } +} + +static void prepare_elf_section_hdr_zero(DumpState *s) +{ + if (dump_is_64bit(s)) { + Elf64_Shdr *shdr64 = s->elf_section_hdrs; + + shdr64->sh_info = cpu_to_dump32(s, s->phdr_num); + } else { + Elf32_Shdr *shdr32 = s->elf_section_hdrs; + + shdr32->sh_info = cpu_to_dump32(s, s->phdr_num); + } +} + +static void prepare_elf_section_hdr_string(DumpState *s, void *buff) +{ + uint64_t index = s->string_table_buf->len; + const char strtab[] = ".shstrtab"; + Elf32_Shdr shdr32 = {}; + Elf64_Shdr shdr64 = {}; + int shdr_size; + void *shdr; + + g_array_append_vals(s->string_table_buf, strtab, sizeof(strtab)); + if (dump_is_64bit(s)) { + shdr_size = sizeof(Elf64_Shdr); + shdr64.sh_type = SHT_STRTAB; + shdr64.sh_offset = s->section_offset + s->elf_section_data_size; + shdr64.sh_name = index; + shdr64.sh_size = s->string_table_buf->len; + shdr = &shdr64; + } else { + shdr_size = sizeof(Elf32_Shdr); + shdr32.sh_type = SHT_STRTAB; + shdr32.sh_offset = s->section_offset + s->elf_section_data_size; + shdr32.sh_name = index; + shdr32.sh_size = s->string_table_buf->len; + shdr = &shdr32; + } + memcpy(buff, shdr, shdr_size); +} + +static bool prepare_elf_section_hdrs(DumpState *s, Error **errp) +{ + size_t len, sizeof_shdr; + void *buff_hdr; + + /* + * Section ordering: + * - HDR zero + * - Arch section hdrs + * - String table hdr + */ + sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); + len = sizeof_shdr * s->shdr_num; + s->elf_section_hdrs = g_malloc0(len); + buff_hdr = s->elf_section_hdrs; + + /* + * The first section header is ALWAYS a special initial section + * header. + * + * The header should be 0 with one exception being that if + * phdr_num is PN_XNUM then the sh_info field contains the real + * number of segment entries. + * + * As we zero allocate the buffer we will only need to modify + * sh_info for the PN_XNUM case. + */ + if (s->phdr_num >= PN_XNUM) { + prepare_elf_section_hdr_zero(s); + } + buff_hdr += sizeof_shdr; + + /* Add architecture defined section headers */ + if (s->dump_info.arch_sections_write_hdr_fn + && s->shdr_num > 2) { + buff_hdr += s->dump_info.arch_sections_write_hdr_fn(s, buff_hdr); + + if (s->shdr_num >= SHN_LORESERVE) { + error_setg_errno(errp, EINVAL, + "dump: too many architecture defined sections"); + return false; + } + } + + /* + * String table is the last section since strings are added via + * arch_sections_write_hdr(). + */ + prepare_elf_section_hdr_string(s, buff_hdr); + return true; +} + +static void write_elf_section_headers(DumpState *s, Error **errp) +{ + size_t sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); + int ret; + + if (!prepare_elf_section_hdrs(s, errp)) { + return; + } + + ret = fd_write_vmcore(s->elf_section_hdrs, s->shdr_num * sizeof_shdr, s); + if (ret < 0) { + error_setg_errno(errp, -ret, "dump: failed to write section headers"); + } + + g_free(s->elf_section_hdrs); +} + +static void write_elf_sections(DumpState *s, Error **errp) +{ + int ret; + + if (s->elf_section_data_size) { + /* Write architecture section data */ + ret = fd_write_vmcore(s->elf_section_data, + s->elf_section_data_size, s); + if (ret < 0) { + error_setg_errno(errp, -ret, + "dump: failed to write architecture section data"); + return; + } + } + + /* Write string table */ + ret = fd_write_vmcore(s->string_table_buf->data, + s->string_table_buf->len, s); + if (ret < 0) { + error_setg_errno(errp, -ret, "dump: failed to write string table data"); } } @@ -386,23 +530,21 @@ static void write_data(DumpState *s, void *buf, int length, Error **errp) static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, int64_t size, Error **errp) { + ERRP_GUARD(); int64_t i; - Error *local_err = NULL; for (i = 0; i < size / s->dump_info.page_size; i++) { write_data(s, block->host_addr + start + i * s->dump_info.page_size, - s->dump_info.page_size, &local_err); - if (local_err) { - error_propagate(errp, local_err); + s->dump_info.page_size, errp); + if (*errp) { return; } } if ((size % s->dump_info.page_size) != 0) { write_data(s, block->host_addr + start + i * s->dump_info.page_size, - size % s->dump_info.page_size, &local_err); - if (local_err) { - error_propagate(errp, local_err); + size % s->dump_info.page_size, errp); + if (*errp) { return; } } @@ -423,29 +565,30 @@ static void get_offset_range(hwaddr phys_addr, *p_offset = -1; *p_filesz = 0; - if (s->has_filter) { - if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { + if (dump_has_filter(s)) { + if (phys_addr < s->filter_area_begin || + phys_addr >= s->filter_area_begin + s->filter_area_length) { return; } } QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { - if (s->has_filter) { - if (block->target_start >= s->begin + s->length || - block->target_end <= s->begin) { + if (dump_has_filter(s)) { + if (block->target_start >= s->filter_area_begin + s->filter_area_length || + block->target_end <= s->filter_area_begin) { /* This block is out of the range */ continue; } - if (s->begin <= block->target_start) { + if (s->filter_area_begin <= block->target_start) { start = block->target_start; } else { - start = s->begin; + start = s->filter_area_begin; } size_in_block = block->target_end - start; - if (s->begin + s->length < block->target_end) { - size_in_block -= block->target_end - (s->begin + s->length); + if (s->filter_area_begin + s->filter_area_length < block->target_end) { + size_in_block -= block->target_end - (s->filter_area_begin + s->filter_area_length); } } else { start = block->target_start; @@ -470,53 +613,56 @@ static void get_offset_range(hwaddr phys_addr, } } -static void write_elf_loads(DumpState *s, Error **errp) +static void write_elf_phdr_loads(DumpState *s, Error **errp) { + ERRP_GUARD(); hwaddr offset, filesz; MemoryMapping *memory_mapping; uint32_t phdr_index = 1; - uint32_t max_index; - Error *local_err = NULL; - - if (s->have_section) { - max_index = s->sh_info; - } else { - max_index = s->phdr_num; - } QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { get_offset_range(memory_mapping->phys_addr, memory_mapping->length, s, &offset, &filesz); - if (s->dump_info.d_class == ELFCLASS64) { + if (dump_is_64bit(s)) { write_elf64_load(s, memory_mapping, phdr_index++, offset, - filesz, &local_err); + filesz, errp); } else { write_elf32_load(s, memory_mapping, phdr_index++, offset, - filesz, &local_err); + filesz, errp); } - if (local_err) { - error_propagate(errp, local_err); + if (*errp) { return; } - if (phdr_index >= max_index) { + if (phdr_index >= s->phdr_num) { break; } } } +static void write_elf_notes(DumpState *s, Error **errp) +{ + if (dump_is_64bit(s)) { + write_elf64_notes(fd_write_vmcore, s, errp); + } else { + write_elf32_notes(fd_write_vmcore, s, errp); + } +} + /* write elf header, PT_NOTE and elf note to vmcore. */ static void dump_begin(DumpState *s, Error **errp) { - Error *local_err = NULL; + ERRP_GUARD(); /* * the vmcore's format is: * -------------- * | elf header | * -------------- + * | sctn_hdr | + * -------------- * | PT_NOTE | * -------------- * | PT_LOAD | @@ -525,8 +671,6 @@ static void dump_begin(DumpState *s, Error **errp) * -------------- * | PT_LOAD | * -------------- - * | sec_hdr | - * -------------- * | elf note | * -------------- * | memory | @@ -537,143 +681,137 @@ static void dump_begin(DumpState *s, Error **errp) */ /* write elf header to vmcore */ - if (s->dump_info.d_class == ELFCLASS64) { - write_elf64_header(s, &local_err); - } else { - write_elf32_header(s, &local_err); - } - if (local_err) { - error_propagate(errp, local_err); + write_elf_header(s, errp); + if (*errp) { return; } - if (s->dump_info.d_class == ELFCLASS64) { - /* write PT_NOTE to vmcore */ - write_elf64_note(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - /* write all PT_LOAD to vmcore */ - write_elf_loads(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - /* write section to vmcore */ - if (s->have_section) { - write_elf_section(s, 1, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - } - - /* write notes to vmcore */ - write_elf64_notes(fd_write_vmcore, s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - } else { - /* write PT_NOTE to vmcore */ - write_elf32_note(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - /* write all PT_LOAD to vmcore */ - write_elf_loads(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - /* write section to vmcore */ - if (s->have_section) { - write_elf_section(s, 0, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - } - - /* write notes to vmcore */ - write_elf32_notes(fd_write_vmcore, s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } + /* write section headers to vmcore */ + write_elf_section_headers(s, errp); + if (*errp) { + return; } + + /* write PT_NOTE to vmcore */ + write_elf_phdr_note(s, errp); + if (*errp) { + return; + } + + /* write all PT_LOADs to vmcore */ + write_elf_phdr_loads(s, errp); + if (*errp) { + return; + } + + /* write notes to vmcore */ + write_elf_notes(s, errp); } -static int get_next_block(DumpState *s, GuestPhysBlock *block) +int64_t dump_filtered_memblock_size(GuestPhysBlock *block, + int64_t filter_area_start, + int64_t filter_area_length) { - while (1) { - block = QTAILQ_NEXT(block, next); - if (!block) { - /* no more block */ - return 1; - } + int64_t size, left, right; - s->start = 0; - s->next_block = block; - if (s->has_filter) { - if (block->target_start >= s->begin + s->length || - block->target_end <= s->begin) { - /* This block is out of the range */ - continue; - } - - if (s->begin > block->target_start) { - s->start = s->begin - block->target_start; - } - } - - return 0; + /* No filter, return full size */ + if (!filter_area_length) { + return block->target_end - block->target_start; } + + /* calculate the overlapped region. */ + left = MAX(filter_area_start, block->target_start); + right = MIN(filter_area_start + filter_area_length, block->target_end); + size = right - left; + size = size > 0 ? size : 0; + + return size; +} + +int64_t dump_filtered_memblock_start(GuestPhysBlock *block, + int64_t filter_area_start, + int64_t filter_area_length) +{ + if (filter_area_length) { + /* return -1 if the block is not within filter area */ + if (block->target_start >= filter_area_start + filter_area_length || + block->target_end <= filter_area_start) { + return -1; + } + + if (filter_area_start > block->target_start) { + return filter_area_start - block->target_start; + } + } + + return 0; } /* write all memory to vmcore */ static void dump_iterate(DumpState *s, Error **errp) { + ERRP_GUARD(); GuestPhysBlock *block; - int64_t size; - Error *local_err = NULL; + int64_t memblock_size, memblock_start; - do { - block = s->next_block; - - size = block->target_end - block->target_start; - if (s->has_filter) { - size -= s->start; - if (s->begin + s->length < block->target_end) { - size -= block->target_end - (s->begin + s->length); - } + QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { + memblock_start = dump_filtered_memblock_start(block, s->filter_area_begin, s->filter_area_length); + if (memblock_start == -1) { + continue; } - write_memory(s, block, s->start, size, &local_err); - if (local_err) { - error_propagate(errp, local_err); + + memblock_size = dump_filtered_memblock_size(block, s->filter_area_begin, s->filter_area_length); + + /* Write the memory to file */ + write_memory(s, block, memblock_start, memblock_size, errp); + if (*errp) { return; } + } +} - } while (!get_next_block(s, block)); +static void dump_end(DumpState *s, Error **errp) +{ + int rc; + ERRP_GUARD(); + + if (s->elf_section_data_size) { + s->elf_section_data = g_malloc0(s->elf_section_data_size); + } + + /* Adds the architecture defined section data to s->elf_section_data */ + if (s->dump_info.arch_sections_write_fn && + s->elf_section_data_size) { + rc = s->dump_info.arch_sections_write_fn(s, s->elf_section_data); + if (rc) { + error_setg_errno(errp, rc, + "dump: failed to get arch section data"); + g_free(s->elf_section_data); + return; + } + } + + /* write sections to vmcore */ + write_elf_sections(s, errp); } static void create_vmcore(DumpState *s, Error **errp) { - Error *local_err = NULL; + ERRP_GUARD(); - dump_begin(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + dump_begin(s, errp); + if (*errp) { return; } + /* Iterate over memory and dump it to file */ dump_iterate(s, errp); + if (*errp) { + return; + } + + /* Write the section data */ + dump_end(s, errp); } static int write_start_flat_header(int fd) @@ -768,7 +906,7 @@ static void get_note_sizes(DumpState *s, const void *note, uint64_t name_sz; uint64_t desc_sz; - if (s->dump_info.d_class == ELFCLASS64) { + if (dump_is_64bit(s)) { const Elf64_Nhdr *hdr = note; note_head_sz = sizeof(Elf64_Nhdr); name_sz = tswap64(hdr->n_namesz); @@ -806,6 +944,7 @@ static bool note_name_equal(DumpState *s, /* write common header, sub header and elf note to vmcore */ static void create_header32(DumpState *s, Error **errp) { + ERRP_GUARD(); DiskDumpHeader32 *dh = NULL; KdumpSubHeader32 *kh = NULL; size_t size; @@ -814,7 +953,6 @@ static void create_header32(DumpState *s, Error **errp) uint32_t bitmap_blocks; uint32_t status = 0; uint64_t offset_note; - Error *local_err = NULL; /* write common header, the version of kdump-compressed format is 6th */ size = sizeof(DiskDumpHeader32); @@ -890,9 +1028,8 @@ static void create_header32(DumpState *s, Error **errp) s->note_buf_offset = 0; /* use s->note_buf to store notes temporarily */ - write_elf32_notes(buf_write_note, s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + write_elf32_notes(buf_write_note, s, errp); + if (*errp) { goto out; } if (write_buffer(s->fd, offset_note, s->note_buf, @@ -918,6 +1055,7 @@ out: /* write common header, sub header and elf note to vmcore */ static void create_header64(DumpState *s, Error **errp) { + ERRP_GUARD(); DiskDumpHeader64 *dh = NULL; KdumpSubHeader64 *kh = NULL; size_t size; @@ -926,7 +1064,6 @@ static void create_header64(DumpState *s, Error **errp) uint32_t bitmap_blocks; uint32_t status = 0; uint64_t offset_note; - Error *local_err = NULL; /* write common header, the version of kdump-compressed format is 6th */ size = sizeof(DiskDumpHeader64); @@ -1002,9 +1139,8 @@ static void create_header64(DumpState *s, Error **errp) s->note_buf_offset = 0; /* use s->note_buf to store notes temporarily */ - write_elf64_notes(buf_write_note, s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + write_elf64_notes(buf_write_note, s, errp); + if (*errp) { goto out; } @@ -1030,10 +1166,10 @@ out: static void write_dump_header(DumpState *s, Error **errp) { - if (s->dump_info.d_class == ELFCLASS32) { - create_header32(s, errp); - } else { + if (dump_is_64bit(s)) { create_header64(s, errp); + } else { + create_header32(s, errp); } } @@ -1117,55 +1253,81 @@ static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) } /* - * exam every page and return the page frame number and the address of the page. - * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys - * blocks, so block->target_start and block->target_end should be interal - * multiples of the target page size. + * Return the page frame number and the page content in *bufptr. bufptr can be + * NULL. If not NULL, *bufptr must contains a target page size of pre-allocated + * memory. This is not necessarily the memory returned. */ static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, uint8_t **bufptr, DumpState *s) { GuestPhysBlock *block = *blockptr; - hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1); - uint8_t *buf; + uint32_t page_size = s->dump_info.page_size; + uint8_t *buf = NULL, *hbuf; + hwaddr addr; /* block == NULL means the start of the iteration */ if (!block) { block = QTAILQ_FIRST(&s->guest_phys_blocks.head); *blockptr = block; - assert((block->target_start & ~target_page_mask) == 0); - assert((block->target_end & ~target_page_mask) == 0); - *pfnptr = dump_paddr_to_pfn(s, block->target_start); - if (bufptr) { - *bufptr = block->host_addr; - } - return true; - } - - *pfnptr = *pfnptr + 1; - addr = dump_pfn_to_paddr(s, *pfnptr); - - if ((addr >= block->target_start) && - (addr + s->dump_info.page_size <= block->target_end)) { - buf = block->host_addr + (addr - block->target_start); + addr = block->target_start; + *pfnptr = dump_paddr_to_pfn(s, addr); } else { - /* the next page is in the next block */ - block = QTAILQ_NEXT(block, next); - *blockptr = block; - if (!block) { - return false; + *pfnptr += 1; + addr = dump_pfn_to_paddr(s, *pfnptr); + } + assert(block != NULL); + + while (1) { + if (addr >= block->target_start && addr < block->target_end) { + size_t n = MIN(block->target_end - addr, page_size - addr % page_size); + hbuf = block->host_addr + (addr - block->target_start); + if (!buf) { + if (n == page_size) { + /* this is a whole target page, go for it */ + assert(addr % page_size == 0); + buf = hbuf; + break; + } else if (bufptr) { + assert(*bufptr); + buf = *bufptr; + memset(buf, 0, page_size); + } else { + return true; + } + } + + memcpy(buf + addr % page_size, hbuf, n); + addr += n; + if (addr % page_size == 0) { + /* we filled up the page */ + break; + } + } else { + /* the next page is in the next block */ + *blockptr = block = QTAILQ_NEXT(block, next); + if (!block) { + break; + } + + addr = block->target_start; + /* are we still in the same page? */ + if (dump_paddr_to_pfn(s, addr) != *pfnptr) { + if (buf) { + /* no, but we already filled something earlier, return it */ + break; + } else { + /* else continue from there */ + *pfnptr = dump_paddr_to_pfn(s, addr); + } + } } - assert((block->target_start & ~target_page_mask) == 0); - assert((block->target_end & ~target_page_mask) == 0); - *pfnptr = dump_paddr_to_pfn(s, block->target_start); - buf = block->host_addr; } if (bufptr) { *bufptr = buf; } - return true; + return buf != NULL; } static void write_dump_bitmap(DumpState *s, Error **errp) @@ -1289,14 +1451,6 @@ static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) return 0; } -/* - * check if the page is all 0 - */ -static inline bool is_zero_page(const uint8_t *buf, size_t page_size) -{ - return buffer_is_zero(buf, page_size); -} - static void write_dump_pages(DumpState *s, Error **errp) { int ret = 0; @@ -1311,6 +1465,7 @@ static void write_dump_pages(DumpState *s, Error **errp) uint8_t *buf; GuestPhysBlock *block_iter = NULL; uint64_t pfn_iter; + g_autofree uint8_t *page = NULL; /* get offset of page_desc and page_data in dump file */ offset_desc = s->offset_page; @@ -1346,14 +1501,15 @@ static void write_dump_pages(DumpState *s, Error **errp) } offset_data += s->dump_info.page_size; + page = g_malloc(s->dump_info.page_size); /* * dump memory to vmcore page by page. zero page will all be resided in the * first page of page section */ - while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { + for (buf = page; get_next_page(&block_iter, &pfn_iter, &buf, s); buf = page) { /* check zero page */ - if (is_zero_page(buf, s->dump_info.page_size)) { + if (buffer_is_zero(buf, s->dump_info.page_size)) { ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), false); if (ret < 0) { @@ -1468,8 +1624,8 @@ out: static void create_kdump_vmcore(DumpState *s, Error **errp) { + ERRP_GUARD(); int ret; - Error *local_err = NULL; /* * the kdump-compressed format is: @@ -1499,21 +1655,18 @@ static void create_kdump_vmcore(DumpState *s, Error **errp) return; } - write_dump_header(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + write_dump_header(s, errp); + if (*errp) { return; } - write_dump_bitmap(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + write_dump_bitmap(s, errp); + if (*errp) { return; } - write_dump_pages(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); + write_dump_pages(s, errp); + if (*errp) { return; } @@ -1524,30 +1677,22 @@ static void create_kdump_vmcore(DumpState *s, Error **errp) } } -static ram_addr_t get_start_block(DumpState *s) +static int validate_start_block(DumpState *s) { GuestPhysBlock *block; - if (!s->has_filter) { - s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head); + if (!dump_has_filter(s)) { return 0; } QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { - if (block->target_start >= s->begin + s->length || - block->target_end <= s->begin) { - /* This block is out of the range */ + /* This block is out of the range */ + if (block->target_start >= s->filter_area_begin + s->filter_area_length || + block->target_end <= s->filter_area_begin) { continue; } - - s->next_block = block; - if (s->begin > block->target_start) { - s->start = s->begin - block->target_start; - } else { - s->start = 0; - } - return s->start; - } + return 0; + } return -1; } @@ -1568,31 +1713,25 @@ static void dump_state_prepare(DumpState *s) *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; } -bool dump_in_progress(void) +bool qemu_system_dump_in_progress(void) { DumpState *state = &dump_state_global; return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); } -/* calculate total size of memory to be dumped (taking filter into - * acoount.) */ +/* + * calculate total size of memory to be dumped (taking filter into + * account.) + */ static int64_t dump_calculate_size(DumpState *s) { GuestPhysBlock *block; - int64_t size = 0, total = 0, left = 0, right = 0; + int64_t total = 0; QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { - if (s->has_filter) { - /* calculate the overlapped region. */ - left = MAX(s->begin, block->target_start); - right = MIN(s->begin + s->length, block->target_end); - size = right - left; - size = size > 0 ? size : 0; - } else { - /* count the whole region in */ - size = (block->target_end - block->target_start); - } - total += size; + total += dump_filtered_memblock_size(block, + s->filter_area_begin, + s->filter_area_length); } return total; @@ -1643,10 +1782,10 @@ static void dump_init(DumpState *s, int fd, bool has_format, DumpGuestMemoryFormat format, bool paging, bool has_filter, int64_t begin, int64_t length, Error **errp) { + ERRP_GUARD(); VMCoreInfoState *vmci = vmcoreinfo_find(); CPUState *cpu; int nr_cpus; - Error *err = NULL; int ret; s->has_format = has_format; @@ -1675,9 +1814,20 @@ static void dump_init(DumpState *s, int fd, bool has_format, } s->fd = fd; - s->has_filter = has_filter; - s->begin = begin; - s->length = length; + if (has_filter && !length) { + error_setg(errp, QERR_INVALID_PARAMETER, "length"); + goto cleanup; + } + s->filter_area_begin = begin; + s->filter_area_length = length; + + /* First index is 0, it's the special null name */ + s->string_table_buf = g_array_new(FALSE, TRUE, 1); + /* + * Allocate the null name, due to the clearing option set to true + * it will be 0. + */ + g_array_set_size(s->string_table_buf, 1); memory_mapping_list_init(&s->list); @@ -1694,8 +1844,8 @@ static void dump_init(DumpState *s, int fd, bool has_format, goto cleanup; } - s->start = get_start_block(s); - if (s->start == -1) { + /* Is the filter filtering everything? */ + if (validate_start_block(s) == -1) { error_setg(errp, QERR_INVALID_PARAMETER, "begin"); goto cleanup; } @@ -1731,8 +1881,8 @@ static void dump_init(DumpState *s, int fd, bool has_format, uint32_t size; uint16_t format; - note_head_size = s->dump_info.d_class == ELFCLASS32 ? - sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr); + note_head_size = dump_is_64bit(s) ? + sizeof(Elf64_Nhdr) : sizeof(Elf32_Nhdr); format = le16_to_cpu(vmci->vmcoreinfo.guest_format); size = le32_to_cpu(vmci->vmcoreinfo.size); @@ -1765,9 +1915,8 @@ static void dump_init(DumpState *s, int fd, bool has_format, /* get memory mapping */ if (paging) { - qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err); - if (err != NULL) { - error_propagate(errp, err); + qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp); + if (*errp) { goto cleanup; } } else { @@ -1811,51 +1960,58 @@ static void dump_init(DumpState *s, int fd, bool has_format, return; } - if (s->has_filter) { - memory_mapping_filter(&s->list, s->begin, s->length); + if (dump_has_filter(s)) { + memory_mapping_filter(&s->list, s->filter_area_begin, s->filter_area_length); } /* - * calculate phdr_num - * - * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow + * The first section header is always a special one in which most + * fields are 0. The section header string table is also always + * set. */ - s->phdr_num = 1; /* PT_NOTE */ - if (s->list.num < UINT16_MAX - 2) { + s->shdr_num = 2; + + /* + * Adds the number of architecture sections to shdr_num and sets + * elf_section_data_size so we know the offsets and sizes of all + * parts. + */ + if (s->dump_info.arch_sections_add_fn) { + s->dump_info.arch_sections_add_fn(s); + } + + /* + * calculate shdr_num so we know the offsets and sizes of all + * parts. + * Calculate phdr_num + * + * The absolute maximum amount of phdrs is UINT32_MAX - 1 as + * sh_info is 32 bit. There's special handling once we go over + * UINT16_MAX - 1 but that is handled in the ehdr and section + * code. + */ + s->phdr_num = 1; /* Reserve PT_NOTE */ + if (s->list.num <= UINT32_MAX - 1) { s->phdr_num += s->list.num; - s->have_section = false; } else { - s->have_section = true; - s->phdr_num = PN_XNUM; - s->sh_info = 1; /* PT_NOTE */ - - /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ - if (s->list.num <= UINT32_MAX - 1) { - s->sh_info += s->list.num; - } else { - s->sh_info = UINT32_MAX; - } + s->phdr_num = UINT32_MAX; } - if (s->dump_info.d_class == ELFCLASS64) { - if (s->have_section) { - s->memory_offset = sizeof(Elf64_Ehdr) + - sizeof(Elf64_Phdr) * s->sh_info + - sizeof(Elf64_Shdr) + s->note_size; - } else { - s->memory_offset = sizeof(Elf64_Ehdr) + - sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; - } + /* + * Now that the number of section and program headers is known we + * can calculate the offsets of the headers and data. + */ + if (dump_is_64bit(s)) { + s->shdr_offset = sizeof(Elf64_Ehdr); + s->phdr_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num; + s->note_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num; } else { - if (s->have_section) { - s->memory_offset = sizeof(Elf32_Ehdr) + - sizeof(Elf32_Phdr) * s->sh_info + - sizeof(Elf32_Shdr) + s->note_size; - } else { - s->memory_offset = sizeof(Elf32_Ehdr) + - sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; - } + s->shdr_offset = sizeof(Elf32_Ehdr); + s->phdr_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num; + s->note_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num; } + s->memory_offset = s->note_offset + s->note_size; + s->section_offset = s->memory_offset + s->total_size; return; @@ -1866,33 +2022,32 @@ cleanup: /* this operation might be time consuming. */ static void dump_process(DumpState *s, Error **errp) { - Error *local_err = NULL; + ERRP_GUARD(); DumpQueryResult *result = NULL; if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { #ifdef TARGET_X86_64 - create_win_dump(s, &local_err); + create_win_dump(s, errp); #endif } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { - create_kdump_vmcore(s, &local_err); + create_kdump_vmcore(s, errp); } else { - create_vmcore(s, &local_err); + create_vmcore(s, errp); } /* make sure status is written after written_size updates */ smp_wmb(); qatomic_set(&s->status, - (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); + (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); /* send DUMP_COMPLETED message (unconditionally) */ result = qmp_query_dump(NULL); /* should never fail */ assert(result); - qapi_event_send_dump_completed(result, !!local_err, (local_err ? - error_get_pretty(local_err) : NULL)); + qapi_event_send_dump_completed(result, !!*errp, (*errp ? + error_get_pretty(*errp) : NULL)); qapi_free_DumpQueryResult(result); - error_propagate(errp, local_err); dump_cleanup(s); } @@ -1921,10 +2076,10 @@ void qmp_dump_guest_memory(bool paging, const char *file, int64_t length, bool has_format, DumpGuestMemoryFormat format, Error **errp) { + ERRP_GUARD(); const char *p; int fd = -1; DumpState *s; - Error *local_err = NULL; bool detach_p = false; if (runstate_check(RUN_STATE_INMIGRATE)) { @@ -1934,7 +2089,7 @@ void qmp_dump_guest_memory(bool paging, const char *file, /* if there is a dump in background, we should wait until the dump * finished */ - if (dump_in_progress()) { + if (qemu_system_dump_in_progress()) { error_setg(errp, "There is a dump in process, please wait."); return; } @@ -2005,13 +2160,27 @@ void qmp_dump_guest_memory(bool paging, const char *file, return; } + if (!dump_migration_blocker) { + error_setg(&dump_migration_blocker, + "Live migration disabled: dump-guest-memory in progress"); + } + + /* + * Allows even for -only-migratable, but forbid migration during the + * process of dump guest memory. + */ + if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { + /* Remember to release the fd before passing it over to dump state */ + close(fd); + return; + } + s = &dump_state_global; dump_state_prepare(s); dump_init(s, fd, has_format, format, paging, has_begin, - begin, length, &local_err); - if (local_err) { - error_propagate(errp, local_err); + begin, length, errp); + if (*errp) { qatomic_set(&s->status, DUMP_STATUS_FAILED); return; } @@ -2030,7 +2199,7 @@ void qmp_dump_guest_memory(bool paging, const char *file, DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) { DumpGuestMemoryCapability *cap = - g_malloc0(sizeof(DumpGuestMemoryCapability)); + g_new0(DumpGuestMemoryCapability, 1); DumpGuestMemoryFormatList **tail = &cap->formats; /* elf is always available */ diff --git a/dump/win_dump.c b/dump/win_dump.c index c5eb5a9aac..f20b6051b6 100644 --- a/dump/win_dump.c +++ b/dump/win_dump.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "elf.h" #include "exec/hwaddr.h" @@ -24,11 +23,31 @@ #include "hw/misc/vmcoreinfo.h" #include "win_dump.h" -static size_t write_run(WinDumpPhyMemRun64 *run, int fd, Error **errp) +static size_t win_dump_ptr_size(bool x64) +{ + return x64 ? sizeof(uint64_t) : sizeof(uint32_t); +} + +#define _WIN_DUMP_FIELD(f) (x64 ? h->x64.f : h->x32.f) +#define WIN_DUMP_FIELD(field) _WIN_DUMP_FIELD(field) + +#define _WIN_DUMP_FIELD_PTR(f) (x64 ? (void *)&h->x64.f : (void *)&h->x32.f) +#define WIN_DUMP_FIELD_PTR(field) _WIN_DUMP_FIELD_PTR(field) + +#define _WIN_DUMP_FIELD_SIZE(f) (x64 ? sizeof(h->x64.f) : sizeof(h->x32.f)) +#define WIN_DUMP_FIELD_SIZE(field) _WIN_DUMP_FIELD_SIZE(field) + +static size_t win_dump_ctx_size(bool x64) +{ + return x64 ? sizeof(WinContext64) : sizeof(WinContext32); +} + +static size_t write_run(uint64_t base_page, uint64_t page_count, + int fd, Error **errp) { void *buf; - uint64_t addr = run->BasePage << TARGET_PAGE_BITS; - uint64_t size = run->PageCount << TARGET_PAGE_BITS; + uint64_t addr = base_page << TARGET_PAGE_BITS; + uint64_t size = page_count << TARGET_PAGE_BITS; uint64_t len, l; size_t total = 0; @@ -57,15 +76,16 @@ static size_t write_run(WinDumpPhyMemRun64 *run, int fd, Error **errp) return total; } -static void write_runs(DumpState *s, WinDumpHeader64 *h, Error **errp) +static void write_runs(DumpState *s, WinDumpHeader *h, bool x64, Error **errp) { - WinDumpPhyMemDesc64 *desc = &h->PhysicalMemoryBlock; - WinDumpPhyMemRun64 *run = desc->Run; + uint64_t BasePage, PageCount; Error *local_err = NULL; int i; - for (i = 0; i < desc->NumberOfRuns; i++) { - s->written_size += write_run(run + i, s->fd, &local_err); + for (i = 0; i < WIN_DUMP_FIELD(PhysicalMemoryBlock.NumberOfRuns); i++) { + BasePage = WIN_DUMP_FIELD(PhysicalMemoryBlock.Run[i].BasePage); + PageCount = WIN_DUMP_FIELD(PhysicalMemoryBlock.Run[i].PageCount); + s->written_size += write_run(BasePage, PageCount, s->fd, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -73,30 +93,45 @@ static void write_runs(DumpState *s, WinDumpHeader64 *h, Error **errp) } } -static void patch_mm_pfn_database(WinDumpHeader64 *h, Error **errp) +static int cpu_read_ptr(bool x64, CPUState *cpu, uint64_t addr, uint64_t *ptr) +{ + int ret; + uint32_t ptr32; + uint64_t ptr64; + + ret = cpu_memory_rw_debug(cpu, addr, x64 ? (void *)&ptr64 : (void *)&ptr32, + win_dump_ptr_size(x64), 0); + + *ptr = x64 ? ptr64 : ptr32; + + return ret; +} + +static void patch_mm_pfn_database(WinDumpHeader *h, bool x64, Error **errp) { if (cpu_memory_rw_debug(first_cpu, - h->KdDebuggerDataBlock + KDBG_MM_PFN_DATABASE_OFFSET64, - (uint8_t *)&h->PfnDatabase, sizeof(h->PfnDatabase), 0)) { + WIN_DUMP_FIELD(KdDebuggerDataBlock) + KDBG_MM_PFN_DATABASE_OFFSET, + WIN_DUMP_FIELD_PTR(PfnDatabase), + WIN_DUMP_FIELD_SIZE(PfnDatabase), 0)) { error_setg(errp, "win-dump: failed to read MmPfnDatabase"); return; } } -static void patch_bugcheck_data(WinDumpHeader64 *h, Error **errp) +static void patch_bugcheck_data(WinDumpHeader *h, bool x64, Error **errp) { uint64_t KiBugcheckData; - if (cpu_memory_rw_debug(first_cpu, - h->KdDebuggerDataBlock + KDBG_KI_BUGCHECK_DATA_OFFSET64, - (uint8_t *)&KiBugcheckData, sizeof(KiBugcheckData), 0)) { + if (cpu_read_ptr(x64, first_cpu, + WIN_DUMP_FIELD(KdDebuggerDataBlock) + KDBG_KI_BUGCHECK_DATA_OFFSET, + &KiBugcheckData)) { error_setg(errp, "win-dump: failed to read KiBugcheckData"); return; } - if (cpu_memory_rw_debug(first_cpu, - KiBugcheckData, - h->BugcheckData, sizeof(h->BugcheckData), 0)) { + if (cpu_memory_rw_debug(first_cpu, KiBugcheckData, + WIN_DUMP_FIELD(BugcheckData), + WIN_DUMP_FIELD_SIZE(BugcheckData), 0)) { error_setg(errp, "win-dump: failed to read bugcheck data"); return; } @@ -105,62 +140,72 @@ static void patch_bugcheck_data(WinDumpHeader64 *h, Error **errp) * If BugcheckCode wasn't saved, we consider guest OS as alive. */ - if (!h->BugcheckCode) { - h->BugcheckCode = LIVE_SYSTEM_DUMP; + if (!WIN_DUMP_FIELD(BugcheckCode)) { + *(uint32_t *)WIN_DUMP_FIELD_PTR(BugcheckCode) = LIVE_SYSTEM_DUMP; } } /* * This routine tries to correct mistakes in crashdump header. */ -static void patch_header(WinDumpHeader64 *h) +static void patch_header(WinDumpHeader *h, bool x64) { Error *local_err = NULL; - h->RequiredDumpSpace = sizeof(WinDumpHeader64) + - (h->PhysicalMemoryBlock.NumberOfPages << TARGET_PAGE_BITS); - h->PhysicalMemoryBlock.unused = 0; - h->unused1 = 0; + if (x64) { + h->x64.RequiredDumpSpace = sizeof(WinDumpHeader64) + + (h->x64.PhysicalMemoryBlock.NumberOfPages << TARGET_PAGE_BITS); + h->x64.PhysicalMemoryBlock.unused = 0; + h->x64.unused1 = 0; + } else { + h->x32.RequiredDumpSpace = sizeof(WinDumpHeader32) + + (h->x32.PhysicalMemoryBlock.NumberOfPages << TARGET_PAGE_BITS); + } - patch_mm_pfn_database(h, &local_err); + patch_mm_pfn_database(h, x64, &local_err); if (local_err) { warn_report_err(local_err); local_err = NULL; } - patch_bugcheck_data(h, &local_err); + patch_bugcheck_data(h, x64, &local_err); if (local_err) { warn_report_err(local_err); } } -static void check_header(WinDumpHeader64 *h, Error **errp) +static bool check_header(WinDumpHeader *h, bool *x64, Error **errp) { const char Signature[] = "PAGE"; - const char ValidDump[] = "DU64"; if (memcmp(h->Signature, Signature, sizeof(h->Signature))) { error_setg(errp, "win-dump: invalid header, expected '%.4s'," " got '%.4s'", Signature, h->Signature); - return; + return false; } - if (memcmp(h->ValidDump, ValidDump, sizeof(h->ValidDump))) { - error_setg(errp, "win-dump: invalid header, expected '%.4s'," - " got '%.4s'", ValidDump, h->ValidDump); - return; + if (!memcmp(h->ValidDump, "DUMP", sizeof(h->ValidDump))) { + *x64 = false; + } else if (!memcmp(h->ValidDump, "DU64", sizeof(h->ValidDump))) { + *x64 = true; + } else { + error_setg(errp, "win-dump: invalid header, expected 'DUMP' or 'DU64'," + " got '%.4s'", h->ValidDump); + return false; } + + return true; } -static void check_kdbg(WinDumpHeader64 *h, Error **errp) +static void check_kdbg(WinDumpHeader *h, bool x64, Error **errp) { const char OwnerTag[] = "KDBG"; char read_OwnerTag[4]; - uint64_t KdDebuggerDataBlock = h->KdDebuggerDataBlock; + uint64_t KdDebuggerDataBlock = WIN_DUMP_FIELD(KdDebuggerDataBlock); bool try_fallback = true; try_again: if (cpu_memory_rw_debug(first_cpu, - KdDebuggerDataBlock + KDBG_OWNER_TAG_OFFSET64, + KdDebuggerDataBlock + KDBG_OWNER_TAG_OFFSET, (uint8_t *)&read_OwnerTag, sizeof(read_OwnerTag), 0)) { error_setg(errp, "win-dump: failed to read OwnerTag"); return; @@ -174,7 +219,7 @@ try_again: * we try to use KDBG obtained by guest driver. */ - KdDebuggerDataBlock = h->BugcheckParameter1; + KdDebuggerDataBlock = WIN_DUMP_FIELD(BugcheckParameter1); try_fallback = false; goto try_again; } else { @@ -185,7 +230,11 @@ try_again: } } - h->KdDebuggerDataBlock = KdDebuggerDataBlock; + if (x64) { + h->x64.KdDebuggerDataBlock = KdDebuggerDataBlock; + } else { + h->x32.KdDebuggerDataBlock = KdDebuggerDataBlock; + } } struct saved_context { @@ -193,24 +242,25 @@ struct saved_context { uint64_t addr; }; -static void patch_and_save_context(WinDumpHeader64 *h, +static void patch_and_save_context(WinDumpHeader *h, bool x64, struct saved_context *saved_ctx, Error **errp) { + uint64_t KdDebuggerDataBlock = WIN_DUMP_FIELD(KdDebuggerDataBlock); uint64_t KiProcessorBlock; uint16_t OffsetPrcbContext; CPUState *cpu; int i = 0; - if (cpu_memory_rw_debug(first_cpu, - h->KdDebuggerDataBlock + KDBG_KI_PROCESSOR_BLOCK_OFFSET64, - (uint8_t *)&KiProcessorBlock, sizeof(KiProcessorBlock), 0)) { + if (cpu_read_ptr(x64, first_cpu, + KdDebuggerDataBlock + KDBG_KI_PROCESSOR_BLOCK_OFFSET, + &KiProcessorBlock)) { error_setg(errp, "win-dump: failed to read KiProcessorBlock"); return; } if (cpu_memory_rw_debug(first_cpu, - h->KdDebuggerDataBlock + KDBG_OFFSET_PRCB_CONTEXT_OFFSET64, + KdDebuggerDataBlock + KDBG_OFFSET_PRCB_CONTEXT_OFFSET, (uint8_t *)&OffsetPrcbContext, sizeof(OffsetPrcbContext), 0)) { error_setg(errp, "win-dump: failed to read OffsetPrcbContext"); return; @@ -223,17 +273,24 @@ static void patch_and_save_context(WinDumpHeader64 *h, uint64_t Context; WinContext ctx; - if (cpu_memory_rw_debug(first_cpu, - KiProcessorBlock + i * sizeof(uint64_t), - (uint8_t *)&Prcb, sizeof(Prcb), 0)) { + if (i >= WIN_DUMP_FIELD(NumberProcessors)) { + warn_report("win-dump: number of QEMU CPUs is bigger than" + " NumberProcessors (%u) in guest Windows", + WIN_DUMP_FIELD(NumberProcessors)); + return; + } + + if (cpu_read_ptr(x64, first_cpu, + KiProcessorBlock + i * win_dump_ptr_size(x64), + &Prcb)) { error_setg(errp, "win-dump: failed to read" " CPU #%d PRCB location", i); return; } - if (cpu_memory_rw_debug(first_cpu, + if (cpu_read_ptr(x64, first_cpu, Prcb + OffsetPrcbContext, - (uint8_t *)&Context, sizeof(Context), 0)) { + &Context)) { error_setg(errp, "win-dump: failed to read" " CPU #%d ContextFrame location", i); return; @@ -241,56 +298,88 @@ static void patch_and_save_context(WinDumpHeader64 *h, saved_ctx[i].addr = Context; - ctx = (WinContext){ - .ContextFlags = WIN_CTX_ALL, - .MxCsr = env->mxcsr, - - .SegEs = env->segs[0].selector, - .SegCs = env->segs[1].selector, - .SegSs = env->segs[2].selector, - .SegDs = env->segs[3].selector, - .SegFs = env->segs[4].selector, - .SegGs = env->segs[5].selector, - .EFlags = cpu_compute_eflags(env), - - .Dr0 = env->dr[0], - .Dr1 = env->dr[1], - .Dr2 = env->dr[2], - .Dr3 = env->dr[3], - .Dr6 = env->dr[6], - .Dr7 = env->dr[7], - - .Rax = env->regs[R_EAX], - .Rbx = env->regs[R_EBX], - .Rcx = env->regs[R_ECX], - .Rdx = env->regs[R_EDX], - .Rsp = env->regs[R_ESP], - .Rbp = env->regs[R_EBP], - .Rsi = env->regs[R_ESI], - .Rdi = env->regs[R_EDI], - .R8 = env->regs[8], - .R9 = env->regs[9], - .R10 = env->regs[10], - .R11 = env->regs[11], - .R12 = env->regs[12], - .R13 = env->regs[13], - .R14 = env->regs[14], - .R15 = env->regs[15], - - .Rip = env->eip, - .FltSave = { + if (x64) { + ctx.x64 = (WinContext64){ + .ContextFlags = WIN_CTX64_ALL, .MxCsr = env->mxcsr, - }, - }; + + .SegEs = env->segs[0].selector, + .SegCs = env->segs[1].selector, + .SegSs = env->segs[2].selector, + .SegDs = env->segs[3].selector, + .SegFs = env->segs[4].selector, + .SegGs = env->segs[5].selector, + .EFlags = cpu_compute_eflags(env), + + .Dr0 = env->dr[0], + .Dr1 = env->dr[1], + .Dr2 = env->dr[2], + .Dr3 = env->dr[3], + .Dr6 = env->dr[6], + .Dr7 = env->dr[7], + + .Rax = env->regs[R_EAX], + .Rbx = env->regs[R_EBX], + .Rcx = env->regs[R_ECX], + .Rdx = env->regs[R_EDX], + .Rsp = env->regs[R_ESP], + .Rbp = env->regs[R_EBP], + .Rsi = env->regs[R_ESI], + .Rdi = env->regs[R_EDI], + .R8 = env->regs[8], + .R9 = env->regs[9], + .R10 = env->regs[10], + .R11 = env->regs[11], + .R12 = env->regs[12], + .R13 = env->regs[13], + .R14 = env->regs[14], + .R15 = env->regs[15], + + .Rip = env->eip, + .FltSave = { + .MxCsr = env->mxcsr, + }, + }; + } else { + ctx.x32 = (WinContext32){ + .ContextFlags = WIN_CTX32_FULL | WIN_CTX_DBG, + + .SegEs = env->segs[0].selector, + .SegCs = env->segs[1].selector, + .SegSs = env->segs[2].selector, + .SegDs = env->segs[3].selector, + .SegFs = env->segs[4].selector, + .SegGs = env->segs[5].selector, + .EFlags = cpu_compute_eflags(env), + + .Dr0 = env->dr[0], + .Dr1 = env->dr[1], + .Dr2 = env->dr[2], + .Dr3 = env->dr[3], + .Dr6 = env->dr[6], + .Dr7 = env->dr[7], + + .Eax = env->regs[R_EAX], + .Ebx = env->regs[R_EBX], + .Ecx = env->regs[R_ECX], + .Edx = env->regs[R_EDX], + .Esp = env->regs[R_ESP], + .Ebp = env->regs[R_EBP], + .Esi = env->regs[R_ESI], + .Edi = env->regs[R_EDI], + + .Eip = env->eip, + }; + } if (cpu_memory_rw_debug(first_cpu, Context, - (uint8_t *)&saved_ctx[i].ctx, sizeof(WinContext), 0)) { + &saved_ctx[i].ctx, win_dump_ctx_size(x64), 0)) { error_setg(errp, "win-dump: failed to save CPU #%d context", i); return; } if (cpu_memory_rw_debug(first_cpu, Context, - (uint8_t *)&ctx, sizeof(WinContext), 1)) { + &ctx, win_dump_ctx_size(x64), 1)) { error_setg(errp, "win-dump: failed to write CPU #%d context", i); return; } @@ -299,14 +388,14 @@ static void patch_and_save_context(WinDumpHeader64 *h, } } -static void restore_context(WinDumpHeader64 *h, +static void restore_context(WinDumpHeader *h, bool x64, struct saved_context *saved_ctx) { int i; - for (i = 0; i < h->NumberProcessors; i++) { + for (i = 0; i < WIN_DUMP_FIELD(NumberProcessors); i++) { if (cpu_memory_rw_debug(first_cpu, saved_ctx[i].addr, - (uint8_t *)&saved_ctx[i].ctx, sizeof(WinContext), 1)) { + &saved_ctx[i].ctx, win_dump_ctx_size(x64), 1)) { warn_report("win-dump: failed to restore CPU #%d context", i); } } @@ -314,69 +403,71 @@ static void restore_context(WinDumpHeader64 *h, void create_win_dump(DumpState *s, Error **errp) { - WinDumpHeader64 *h = (WinDumpHeader64 *)(s->guest_note + - VMCOREINFO_ELF_NOTE_HDR_SIZE); + WinDumpHeader *h = (void *)(s->guest_note + VMCOREINFO_ELF_NOTE_HDR_SIZE); X86CPU *first_x86_cpu = X86_CPU(first_cpu); uint64_t saved_cr3 = first_x86_cpu->env.cr[3]; struct saved_context *saved_ctx = NULL; Error *local_err = NULL; + bool x64 = true; + size_t hdr_size; - if (s->guest_note_size != sizeof(WinDumpHeader64) + - VMCOREINFO_ELF_NOTE_HDR_SIZE) { + if (s->guest_note_size != VMCOREINFO_WIN_DUMP_NOTE_SIZE32 && + s->guest_note_size != VMCOREINFO_WIN_DUMP_NOTE_SIZE64) { error_setg(errp, "win-dump: invalid vmcoreinfo note size"); return; } - check_header(h, &local_err); - if (local_err) { + if (!check_header(h, &x64, &local_err)) { error_propagate(errp, local_err); return; } + hdr_size = x64 ? sizeof(WinDumpHeader64) : sizeof(WinDumpHeader32); + /* * Further access to kernel structures by virtual addresses * should be made from system context. */ - first_x86_cpu->env.cr[3] = h->DirectoryTableBase; + first_x86_cpu->env.cr[3] = WIN_DUMP_FIELD(DirectoryTableBase); - check_kdbg(h, &local_err); + check_kdbg(h, x64, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_cr3; } - patch_header(h); + patch_header(h, x64); - saved_ctx = g_new(struct saved_context, h->NumberProcessors); + saved_ctx = g_new(struct saved_context, WIN_DUMP_FIELD(NumberProcessors)); /* * Always patch context because there is no way * to determine if the system-saved context is valid */ - patch_and_save_context(h, saved_ctx, &local_err); + patch_and_save_context(h, x64, saved_ctx, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_free; } - s->total_size = h->RequiredDumpSpace; + s->total_size = WIN_DUMP_FIELD(RequiredDumpSpace); - s->written_size = qemu_write_full(s->fd, h, sizeof(*h)); - if (s->written_size != sizeof(*h)) { + s->written_size = qemu_write_full(s->fd, h, hdr_size); + if (s->written_size != hdr_size) { error_setg(errp, QERR_IO_ERROR); goto out_restore; } - write_runs(s, h, &local_err); + write_runs(s, h, x64, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_restore; } out_restore: - restore_context(h, saved_ctx); + restore_context(h, x64, saved_ctx); out_free: g_free(saved_ctx); out_cr3: diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c index 118c68da83..cee658c158 100644 --- a/ebpf/ebpf_rss.c +++ b/ebpf/ebpf_rss.c @@ -49,7 +49,7 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) goto error; } - bpf_program__set_socket_filter(rss_bpf_ctx->progs.tun_rss_steering_prog); + bpf_program__set_type(rss_bpf_ctx->progs.tun_rss_steering_prog, BPF_PROG_TYPE_SOCKET_FILTER); if (rss_bpf__load(rss_bpf_ctx)) { trace_ebpf_error("eBPF RSS", "can not load RSS program"); diff --git a/ebpf/meson.build b/ebpf/meson.build index 9cd0635370..2dd0fd8948 100644 --- a/ebpf/meson.build +++ b/ebpf/meson.build @@ -1 +1 @@ -common_ss.add(when: libbpf, if_true: files('ebpf_rss.c'), if_false: files('ebpf_rss-stub.c')) +softmmu_ss.add(when: libbpf, if_true: files('ebpf_rss.c'), if_false: files('ebpf_rss-stub.c')) diff --git a/event-loop-base.c b/event-loop-base.c new file mode 100644 index 0000000000..d5be4dc6fc --- /dev/null +++ b/event-loop-base.c @@ -0,0 +1,140 @@ +/* + * QEMU event-loop base + * + * Copyright (C) 2022 Red Hat Inc + * + * Authors: + * Stefan Hajnoczi + * Nicolas Saenz Julienne + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qom/object_interfaces.h" +#include "qapi/error.h" +#include "block/thread-pool.h" +#include "sysemu/event-loop-base.h" + +typedef struct { + const char *name; + ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */ +} EventLoopBaseParamInfo; + +static void event_loop_base_instance_init(Object *obj) +{ + EventLoopBase *base = EVENT_LOOP_BASE(obj); + + base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; +} + +static EventLoopBaseParamInfo aio_max_batch_info = { + "aio-max-batch", offsetof(EventLoopBase, aio_max_batch), +}; +static EventLoopBaseParamInfo thread_pool_min_info = { + "thread-pool-min", offsetof(EventLoopBase, thread_pool_min), +}; +static EventLoopBaseParamInfo thread_pool_max_info = { + "thread-pool-max", offsetof(EventLoopBase, thread_pool_max), +}; + +static void event_loop_base_get_param(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj); + EventLoopBaseParamInfo *info = opaque; + int64_t *field = (void *)event_loop_base + info->offset; + + visit_type_int64(v, name, field, errp); +} + +static void event_loop_base_set_param(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj); + EventLoopBase *base = EVENT_LOOP_BASE(obj); + EventLoopBaseParamInfo *info = opaque; + int64_t *field = (void *)base + info->offset; + int64_t value; + + if (!visit_type_int64(v, name, &value, errp)) { + return; + } + + if (value < 0) { + error_setg(errp, "%s value must be in range [0, %" PRId64 "]", + info->name, INT64_MAX); + return; + } + + *field = value; + + if (bc->update_params) { + bc->update_params(base, errp); + } + + return; +} + +static void event_loop_base_complete(UserCreatable *uc, Error **errp) +{ + EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc); + EventLoopBase *base = EVENT_LOOP_BASE(uc); + + if (bc->init) { + bc->init(base, errp); + } +} + +static bool event_loop_base_can_be_deleted(UserCreatable *uc) +{ + EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc); + EventLoopBase *backend = EVENT_LOOP_BASE(uc); + + if (bc->can_be_deleted) { + return bc->can_be_deleted(backend); + } + + return true; +} + +static void event_loop_base_class_init(ObjectClass *klass, void *class_data) +{ + UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); + ucc->complete = event_loop_base_complete; + ucc->can_be_deleted = event_loop_base_can_be_deleted; + + object_class_property_add(klass, "aio-max-batch", "int", + event_loop_base_get_param, + event_loop_base_set_param, + NULL, &aio_max_batch_info); + object_class_property_add(klass, "thread-pool-min", "int", + event_loop_base_get_param, + event_loop_base_set_param, + NULL, &thread_pool_min_info); + object_class_property_add(klass, "thread-pool-max", "int", + event_loop_base_get_param, + event_loop_base_set_param, + NULL, &thread_pool_max_info); +} + +static const TypeInfo event_loop_base_info = { + .name = TYPE_EVENT_LOOP_BASE, + .parent = TYPE_OBJECT, + .instance_size = sizeof(EventLoopBase), + .instance_init = event_loop_base_instance_init, + .class_size = sizeof(EventLoopBaseClass), + .class_init = event_loop_base_class_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&event_loop_base_info); +} +type_init(register_types); diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index dddee92d6e..247400031c 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -19,7 +19,7 @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s) { switch (a->cls) { case float_class_snan: - float_raise(float_flag_invalid, s); + float_raise(float_flag_invalid | float_flag_invalid_snan, s); if (s->default_nan_mode) { parts_default_nan(a, s); } else { @@ -40,7 +40,7 @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b, float_status *s) { if (is_snan(a->cls) || is_snan(b->cls)) { - float_raise(float_flag_invalid, s); + float_raise(float_flag_invalid | float_flag_invalid_snan, s); } if (s->default_nan_mode) { @@ -68,7 +68,7 @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b, int which; if (unlikely(abc_mask & float_cmask_snan)) { - float_raise(float_flag_invalid, s); + float_raise(float_flag_invalid | float_flag_invalid_snan, s); } which = pickNaNMulAdd(a->cls, b->cls, c->cls, @@ -214,18 +214,35 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, p->frac_lo &= ~round_mask; } } else if (unlikely(exp >= exp_max)) { - flags |= float_flag_overflow | float_flag_inexact; - if (overflow_norm) { + flags |= float_flag_overflow; + if (s->rebias_overflow) { + exp -= fmt->exp_re_bias; + } else if (overflow_norm) { + flags |= float_flag_inexact; exp = exp_max - 1; frac_allones(p); p->frac_lo &= ~round_mask; } else { + flags |= float_flag_inexact; p->cls = float_class_inf; exp = exp_max; frac_clear(p); } } frac_shr(p, frac_shift); + } else if (unlikely(s->rebias_underflow)) { + flags |= float_flag_underflow; + exp += fmt->exp_re_bias; + if (p->frac_lo & round_mask) { + flags |= float_flag_inexact; + if (frac_addi(p, p, inc)) { + frac_shr(p, 1); + p->frac_hi |= DECOMPOSED_IMPLICIT_BIT; + exp++; + } + p->frac_lo &= ~round_mask; + } + frac_shr(p, frac_shift); } else if (s->flush_to_zero) { flags |= float_flag_output_denormal; p->cls = float_class_zero; @@ -354,7 +371,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, return a; } /* Inf - Inf */ - float_raise(float_flag_invalid, s); + float_raise(float_flag_invalid | float_flag_invalid_isi, s); parts_default_nan(a, s); return a; } @@ -423,7 +440,7 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, /* Inf * Zero == NaN */ if (unlikely(ab_mask == float_cmask_infzero)) { - float_raise(float_flag_invalid, s); + float_raise(float_flag_invalid | float_flag_invalid_imz, s); parts_default_nan(a, s); return a; } @@ -489,11 +506,13 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, if (unlikely(ab_mask != float_cmask_normal)) { if (unlikely(ab_mask == float_cmask_infzero)) { + float_raise(float_flag_invalid | float_flag_invalid_imz, s); goto d_nan; } if (ab_mask & float_cmask_inf) { if (c->cls == float_class_inf && a->sign != c->sign) { + float_raise(float_flag_invalid | float_flag_invalid_isi, s); goto d_nan; } goto return_inf; @@ -566,7 +585,6 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, goto finish_sign; d_nan: - float_raise(float_flag_invalid, s); parts_default_nan(a, s); return a; } @@ -589,11 +607,13 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, } /* 0/0 or Inf/Inf => NaN */ - if (unlikely(ab_mask == float_cmask_zero) || - unlikely(ab_mask == float_cmask_inf)) { - float_raise(float_flag_invalid, s); - parts_default_nan(a, s); - return a; + if (unlikely(ab_mask == float_cmask_zero)) { + float_raise(float_flag_invalid | float_flag_invalid_zdz, s); + goto d_nan; + } + if (unlikely(ab_mask == float_cmask_inf)) { + float_raise(float_flag_invalid | float_flag_invalid_idi, s); + goto d_nan; } /* All the NaN cases */ @@ -624,6 +644,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, float_raise(float_flag_divbyzero, s); a->cls = float_class_inf; return a; + + d_nan: + parts_default_nan(a, s); + return a; } /* @@ -862,7 +886,7 @@ static void partsN(sqrt)(FloatPartsN *a, float_status *status, return; d_nan: - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_sqrt, status); parts_default_nan(a, status); } @@ -1042,13 +1066,15 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode, switch (p->cls) { case float_class_snan: + flags |= float_flag_invalid_snan; + /* fall through */ case float_class_qnan: - flags = float_flag_invalid; + flags |= float_flag_invalid; r = max; break; case float_class_inf: - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = p->sign ? min : max; break; @@ -1070,11 +1096,11 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode, if (r <= -(uint64_t)min) { r = -r; } else { - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = min; } } else if (r > max) { - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = max; } break; @@ -1107,13 +1133,15 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode, switch (p->cls) { case float_class_snan: + flags |= float_flag_invalid_snan; + /* fall through */ case float_class_qnan: - flags = float_flag_invalid; + flags |= float_flag_invalid; r = max; break; case float_class_inf: - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = p->sign ? 0 : max; break; @@ -1131,15 +1159,15 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode, } if (p->sign) { - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = 0; } else if (p->exp > DECOMPOSED_BINARY_POINT) { - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = max; } else { r = p->frac_hi >> (DECOMPOSED_BINARY_POINT - p->exp); if (r > max) { - flags = float_flag_invalid; + flags = float_flag_invalid | float_flag_invalid_cvti; r = max; } } @@ -1219,14 +1247,35 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, if (unlikely(ab_mask & float_cmask_anynan)) { /* - * For minnum/maxnum, if one operand is a QNaN, and the other + * For minNum/maxNum (IEEE 754-2008) + * or minimumNumber/maximumNumber (IEEE 754-2019), + * if one operand is a QNaN, and the other * operand is numerical, then return numerical argument. */ - if ((flags & minmax_isnum) + if ((flags & (minmax_isnum | minmax_isnumber)) && !(ab_mask & float_cmask_snan) && (ab_mask & ~float_cmask_qnan)) { return is_nan(a->cls) ? b : a; } + + /* + * In IEEE 754-2019, minNum, maxNum, minNumMag and maxNumMag + * are removed and replaced with minimum, minimumNumber, maximum + * and maximumNumber. + * minimumNumber/maximumNumber behavior for SNaN is changed to: + * If both operands are NaNs, a QNaN is returned. + * If either operand is a SNaN, + * an invalid operation exception is signaled, + * but unless both operands are NaNs, + * the SNaN is otherwise ignored and not converted to a QNaN. + */ + if ((flags & minmax_isnumber) + && (ab_mask & float_cmask_snan) + && (ab_mask & ~float_cmask_anynan)) { + float_raise(float_flag_invalid, s); + return is_nan(a->cls) ? b : a; + } + return parts_pick_nan(a, b, s); } @@ -1295,16 +1344,19 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, float_status *s, bool is_quiet) { int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); - int cmp; if (likely(ab_mask == float_cmask_normal)) { + FloatRelation cmp; + if (a->sign != b->sign) { goto a_sign; } - if (a->exp != b->exp) { - cmp = a->exp < b->exp ? -1 : 1; - } else { + if (a->exp == b->exp) { cmp = frac_cmp(a, b); + } else if (a->exp < b->exp) { + cmp = float_relation_less; + } else { + cmp = float_relation_greater; } if (a->sign) { cmp = -cmp; @@ -1313,7 +1365,9 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, } if (unlikely(ab_mask & float_cmask_anynan)) { - if (!is_quiet || (ab_mask & float_cmask_snan)) { + if (ab_mask & float_cmask_snan) { + float_raise(float_flag_invalid | float_flag_invalid_snan, s); + } else if (!is_quiet) { float_raise(float_flag_invalid, s); } return float_relation_unordered; @@ -1382,6 +1436,7 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) parts_return_nan(a, s); return; case float_class_zero: + float_raise(float_flag_divbyzero, s); /* log2(0) = -inf */ a->cls = float_class_inf; a->sign = 1; diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc index 12467bb9bb..1610472cfc 100644 --- a/fpu/softfloat-specialize.c.inc +++ b/fpu/softfloat-specialize.c.inc @@ -198,7 +198,6 @@ static void parts128_default_nan(FloatParts128 *p, float_status *status) static uint64_t parts_silence_nan_frac(uint64_t frac, float_status *status) { g_assert(!no_signaling_nans(status)); - g_assert(!status->default_nan_mode); /* The only snan_bit_is_one target without default_nan_mode is HPPA. */ if (snan_bit_is_one(status)) { @@ -391,7 +390,8 @@ bool float32_is_signaling_nan(float32 a_, float_status *status) static int pickNaN(FloatClass a_cls, FloatClass b_cls, bool aIsLargerSignificand, float_status *status) { -#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) +#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) || \ + defined(TARGET_LOONGARCH64) || defined(TARGET_S390X) /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take * the first of: * 1. A if it is signaling @@ -507,7 +507,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, * the default NaN */ if (infzero && is_qnan(c_cls)) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); return 3; } @@ -534,7 +534,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, * case sets InvalidOp and returns the default NaN */ if (infzero) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); return 3; } /* Prefer sNaN over qNaN, in the a, b, c order. */ @@ -557,7 +557,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, * case sets InvalidOp and returns the input value 'c' */ if (infzero) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); return 2; } /* Prefer sNaN over qNaN, in the c, a, b order. */ @@ -575,13 +575,36 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, return 1; } } +#elif defined(TARGET_LOONGARCH64) + /* + * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan) + * case sets InvalidOp and returns the input value 'c' + */ + if (infzero) { + float_raise(float_flag_invalid | float_flag_invalid_imz, status); + return 2; + } + /* Prefer sNaN over qNaN, in the c, a, b order. */ + if (is_snan(c_cls)) { + return 2; + } else if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(c_cls)) { + return 2; + } else if (is_qnan(a_cls)) { + return 0; + } else { + return 1; + } #elif defined(TARGET_PPC) /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer * to return an input NaN if we have one (ie c) rather than generating * a default NaN */ if (infzero) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); return 2; } @@ -598,7 +621,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, #elif defined(TARGET_RISCV) /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */ if (infzero) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); } return 3; /* default NaN */ #elif defined(TARGET_XTENSA) @@ -607,7 +630,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, * an input NaN if we have one (ie c). */ if (infzero) { - float_raise(float_flag_invalid, status); + float_raise(float_flag_invalid | float_flag_invalid_imz, status); return 2; } if (status->use_first_nan) { diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 6e769f990c..108f9cb224 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -436,6 +436,11 @@ enum { minmax_isnum = 2, /* Set for the IEEE 754-2008 minNumMag() and minNumMag() operations. */ minmax_ismag = 4, + /* + * Set for the IEEE 754-2019 minimumNumber() and maximumNumber() + * operations. + */ + minmax_isnumber = 8, }; /* Simple helpers for checking if, or what kind of, NaN we have */ @@ -516,6 +521,7 @@ typedef struct { typedef struct { int exp_size; int exp_bias; + int exp_re_bias; int exp_max; int frac_size; int frac_shift; @@ -527,6 +533,7 @@ typedef struct { #define FLOAT_PARAMS_(E) \ .exp_size = E, \ .exp_bias = ((1 << E) - 1) >> 1, \ + .exp_re_bias = (1 << (E - 1)) + (1 << (E - 2)), \ .exp_max = (1 << E) - 1 #define FLOAT_PARAMS(E, F) \ @@ -869,10 +876,10 @@ static FloatParts128 *parts128_minmax(FloatParts128 *a, FloatParts128 *b, #define parts_minmax(A, B, S, F) \ PARTS_GENERIC_64_128(minmax, A)(A, B, S, F) -static int parts64_compare(FloatParts64 *a, FloatParts64 *b, - float_status *s, bool q); -static int parts128_compare(FloatParts128 *a, FloatParts128 *b, - float_status *s, bool q); +static FloatRelation parts64_compare(FloatParts64 *a, FloatParts64 *b, + float_status *s, bool q); +static FloatRelation parts128_compare(FloatParts128 *a, FloatParts128 *b, + float_status *s, bool q); #define parts_compare(A, B, S, Q) \ PARTS_GENERIC_64_128(compare, A)(A, B, S, Q) @@ -952,21 +959,23 @@ static void frac128_allones(FloatParts128 *a) #define frac_allones(A) FRAC_GENERIC_64_128(allones, A)(A) -static int frac64_cmp(FloatParts64 *a, FloatParts64 *b) +static FloatRelation frac64_cmp(FloatParts64 *a, FloatParts64 *b) { - return a->frac == b->frac ? 0 : a->frac < b->frac ? -1 : 1; + return (a->frac == b->frac ? float_relation_equal + : a->frac < b->frac ? float_relation_less + : float_relation_greater); } -static int frac128_cmp(FloatParts128 *a, FloatParts128 *b) +static FloatRelation frac128_cmp(FloatParts128 *a, FloatParts128 *b) { uint64_t ta = a->frac_hi, tb = b->frac_hi; if (ta == tb) { ta = a->frac_lo, tb = b->frac_lo; if (ta == tb) { - return 0; + return float_relation_equal; } } - return ta < tb ? -1 : 1; + return ta < tb ? float_relation_less : float_relation_greater; } #define frac_cmp(A, B) FRAC_GENERIC_64_128(cmp, A)(A, B) @@ -1688,6 +1697,50 @@ static float64 float64_round_pack_canonical(FloatParts64 *p, return float64_pack_raw(p); } +static float64 float64r32_round_pack_canonical(FloatParts64 *p, + float_status *s) +{ + parts_uncanon(p, s, &float32_params); + + /* + * In parts_uncanon, we placed the fraction for float32 at the lsb. + * We need to adjust the fraction higher so that the least N bits are + * zero, and the fraction is adjacent to the float64 implicit bit. + */ + switch (p->cls) { + case float_class_normal: + if (unlikely(p->exp == 0)) { + /* + * The result is denormal for float32, but can be represented + * in normalized form for float64. Adjust, per canonicalize. + */ + int shift = frac_normalize(p); + p->exp = (float32_params.frac_shift - + float32_params.exp_bias - shift + 1 + + float64_params.exp_bias); + frac_shr(p, float64_params.frac_shift); + } else { + frac_shl(p, float32_params.frac_shift - float64_params.frac_shift); + p->exp += float64_params.exp_bias - float32_params.exp_bias; + } + break; + case float_class_snan: + case float_class_qnan: + frac_shl(p, float32_params.frac_shift - float64_params.frac_shift); + p->exp = float64_params.exp_max; + break; + case float_class_inf: + p->exp = float64_params.exp_max; + break; + case float_class_zero: + break; + default: + g_assert_not_reached(); + } + + return float64_pack_raw(p); +} + static void float128_unpack_canonical(FloatParts128 *p, float128 f, float_status *s) { @@ -1933,6 +1986,28 @@ float64_sub(float64 a, float64 b, float_status *s) return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub); } +static float64 float64r32_addsub(float64 a, float64 b, float_status *status, + bool subtract) +{ + FloatParts64 pa, pb, *pr; + + float64_unpack_canonical(&pa, a, status); + float64_unpack_canonical(&pb, b, status); + pr = parts_addsub(&pa, &pb, status, subtract); + + return float64r32_round_pack_canonical(pr, status); +} + +float64 float64r32_add(float64 a, float64 b, float_status *status) +{ + return float64r32_addsub(a, b, status, false); +} + +float64 float64r32_sub(float64 a, float64 b, float_status *status) +{ + return float64r32_addsub(a, b, status, true); +} + static bfloat16 QEMU_FLATTEN bfloat16_addsub(bfloat16 a, bfloat16 b, float_status *status, bool subtract) { @@ -2064,6 +2139,17 @@ float64_mul(float64 a, float64 b, float_status *s) f64_is_zon2, f64_addsubmul_post); } +float64 float64r32_mul(float64 a, float64 b, float_status *status) +{ + FloatParts64 pa, pb, *pr; + + float64_unpack_canonical(&pa, a, status); + float64_unpack_canonical(&pb, b, status); + pr = parts_mul(&pa, &pb, status); + + return float64r32_round_pack_canonical(pr, status); +} + bfloat16 QEMU_FLATTEN bfloat16_mul(bfloat16 a, bfloat16 b, float_status *status) { @@ -2291,6 +2377,19 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s); } +float64 float64r32_muladd(float64 a, float64 b, float64 c, + int flags, float_status *status) +{ + FloatParts64 pa, pb, pc, *pr; + + float64_unpack_canonical(&pa, a, status); + float64_unpack_canonical(&pb, b, status); + float64_unpack_canonical(&pc, c, status); + pr = parts_muladd(&pa, &pb, &pc, flags, status); + + return float64r32_round_pack_canonical(pr, status); +} + bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c, int flags, float_status *status) { @@ -2414,6 +2513,17 @@ float64_div(float64 a, float64 b, float_status *s) f64_div_pre, f64_div_post); } +float64 float64r32_div(float64 a, float64 b, float_status *status) +{ + FloatParts64 pa, pb, *pr; + + float64_unpack_canonical(&pa, a, status); + float64_unpack_canonical(&pb, b, status); + pr = parts_div(&pa, &pb, status); + + return float64r32_round_pack_canonical(pr, status); +} + bfloat16 QEMU_FLATTEN bfloat16_div(bfloat16 a, bfloat16 b, float_status *status) { @@ -2538,8 +2648,10 @@ floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status) static void parts_float_to_ahp(FloatParts64 *a, float_status *s) { switch (a->cls) { - case float_class_qnan: case float_class_snan: + float_raise(float_flag_invalid_snan, s); + /* fall through */ + case float_class_qnan: /* * There is no NaN in the destination format. Raise Invalid * and return a zero with the sign of the input NaN. @@ -3046,6 +3158,60 @@ static int64_t float128_to_int64_scalbn(float128 a, FloatRoundMode rmode, return parts_float_to_sint(&p, rmode, scale, INT64_MIN, INT64_MAX, s); } +static Int128 float128_to_int128_scalbn(float128 a, FloatRoundMode rmode, + int scale, float_status *s) +{ + int flags = 0; + Int128 r; + FloatParts128 p; + + float128_unpack_canonical(&p, a, s); + + switch (p.cls) { + case float_class_snan: + flags |= float_flag_invalid_snan; + /* fall through */ + case float_class_qnan: + flags |= float_flag_invalid; + r = UINT128_MAX; + break; + + case float_class_inf: + flags = float_flag_invalid | float_flag_invalid_cvti; + r = p.sign ? INT128_MIN : INT128_MAX; + break; + + case float_class_zero: + return int128_zero(); + + case float_class_normal: + if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { + flags = float_flag_inexact; + } + + if (p.exp < 127) { + int shift = 127 - p.exp; + r = int128_urshift(int128_make128(p.frac_lo, p.frac_hi), shift); + if (p.sign) { + r = int128_neg(r); + } + } else if (p.exp == 127 && p.sign && p.frac_lo == 0 && + p.frac_hi == DECOMPOSED_IMPLICIT_BIT) { + r = INT128_MIN; + } else { + flags = float_flag_invalid | float_flag_invalid_cvti; + r = p.sign ? INT128_MIN : INT128_MAX; + } + break; + + default: + g_assert_not_reached(); + } + + float_raise(flags, s); + return r; +} + static int32_t floatx80_to_int32_scalbn(floatx80 a, FloatRoundMode rmode, int scale, float_status *s) { @@ -3128,6 +3294,11 @@ int64_t float128_to_int64(float128 a, float_status *s) return float128_to_int64_scalbn(a, s->float_rounding_mode, 0, s); } +Int128 float128_to_int128(float128 a, float_status *s) +{ + return float128_to_int128_scalbn(a, s->float_rounding_mode, 0, s); +} + int32_t floatx80_to_int32(floatx80 a, float_status *s) { return floatx80_to_int32_scalbn(a, s->float_rounding_mode, 0, s); @@ -3193,6 +3364,11 @@ int64_t float128_to_int64_round_to_zero(float128 a, float_status *s) return float128_to_int64_scalbn(a, float_round_to_zero, 0, s); } +Int128 float128_to_int128_round_to_zero(float128 a, float_status *s) +{ + return float128_to_int128_scalbn(a, float_round_to_zero, 0, s); +} + int32_t floatx80_to_int32_round_to_zero(floatx80 a, float_status *s) { return floatx80_to_int32_scalbn(a, float_round_to_zero, 0, s); @@ -3372,6 +3548,61 @@ static uint64_t float128_to_uint64_scalbn(float128 a, FloatRoundMode rmode, return parts_float_to_uint(&p, rmode, scale, UINT64_MAX, s); } +static Int128 float128_to_uint128_scalbn(float128 a, FloatRoundMode rmode, + int scale, float_status *s) +{ + int flags = 0; + Int128 r; + FloatParts128 p; + + float128_unpack_canonical(&p, a, s); + + switch (p.cls) { + case float_class_snan: + flags |= float_flag_invalid_snan; + /* fall through */ + case float_class_qnan: + flags |= float_flag_invalid; + r = UINT128_MAX; + break; + + case float_class_inf: + flags = float_flag_invalid | float_flag_invalid_cvti; + r = p.sign ? int128_zero() : UINT128_MAX; + break; + + case float_class_zero: + return int128_zero(); + + case float_class_normal: + if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { + flags = float_flag_inexact; + if (p.cls == float_class_zero) { + r = int128_zero(); + break; + } + } + + if (p.sign) { + flags = float_flag_invalid | float_flag_invalid_cvti; + r = int128_zero(); + } else if (p.exp <= 127) { + int shift = 127 - p.exp; + r = int128_urshift(int128_make128(p.frac_lo, p.frac_hi), shift); + } else { + flags = float_flag_invalid | float_flag_invalid_cvti; + r = UINT128_MAX; + } + break; + + default: + g_assert_not_reached(); + } + + float_raise(flags, s); + return r; +} + uint8_t float16_to_uint8(float16 a, float_status *s) { return float16_to_uint8_scalbn(a, s->float_rounding_mode, 0, s); @@ -3432,6 +3663,11 @@ uint64_t float128_to_uint64(float128 a, float_status *s) return float128_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); } +Int128 float128_to_uint128(float128 a, float_status *s) +{ + return float128_to_uint128_scalbn(a, s->float_rounding_mode, 0, s); +} + uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *s) { return float16_to_uint16_scalbn(a, float_round_to_zero, 0, s); @@ -3487,6 +3723,11 @@ uint64_t float128_to_uint64_round_to_zero(float128 a, float_status *s) return float128_to_uint64_scalbn(a, float_round_to_zero, 0, s); } +Int128 float128_to_uint128_round_to_zero(float128 a, float_status *s) +{ + return float128_to_uint128_scalbn(a, float_round_to_zero, 0, s); +} + uint16_t bfloat16_to_uint16(bfloat16 a, float_status *s) { return bfloat16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); @@ -3672,6 +3913,35 @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status) return int64_to_bfloat16_scalbn(a, 0, status); } +float128 int128_to_float128(Int128 a, float_status *status) +{ + FloatParts128 p = { }; + int shift; + + if (int128_nz(a)) { + p.cls = float_class_normal; + if (!int128_nonneg(a)) { + p.sign = true; + a = int128_neg(a); + } + + shift = clz64(int128_gethi(a)); + if (shift == 64) { + shift += clz64(int128_getlo(a)); + } + + p.exp = 127 - shift; + a = int128_lshift(a, shift); + + p.frac_hi = int128_gethi(a); + p.frac_lo = int128_getlo(a); + } else { + p.cls = float_class_zero; + } + + return float128_round_pack_canonical(&p, status); +} + float128 int64_to_float128(int64_t a, float_status *status) { FloatParts128 p; @@ -3861,6 +4131,31 @@ float128 uint64_to_float128(uint64_t a, float_status *status) return float128_round_pack_canonical(&p, status); } +float128 uint128_to_float128(Int128 a, float_status *status) +{ + FloatParts128 p = { }; + int shift; + + if (int128_nz(a)) { + p.cls = float_class_normal; + + shift = clz64(int128_gethi(a)); + if (shift == 64) { + shift += clz64(int128_getlo(a)); + } + + p.exp = 127 - shift; + a = int128_lshift(a, shift); + + p.frac_hi = int128_gethi(a); + p.frac_lo = int128_getlo(a); + } else { + p.cls = float_class_zero; + } + + return float128_round_pack_canonical(&p, status); +} + /* * Minimum and maximum */ @@ -3927,12 +4222,14 @@ static float128 float128_minmax(float128 a, float128 b, { return type##_minmax(a, b, s, flags); } #define MINMAX_2(type) \ - MINMAX_1(type, max, 0) \ - MINMAX_1(type, maxnum, minmax_isnum) \ - MINMAX_1(type, maxnummag, minmax_isnum | minmax_ismag) \ - MINMAX_1(type, min, minmax_ismin) \ - MINMAX_1(type, minnum, minmax_ismin | minmax_isnum) \ - MINMAX_1(type, minnummag, minmax_ismin | minmax_isnum | minmax_ismag) + MINMAX_1(type, max, 0) \ + MINMAX_1(type, maxnum, minmax_isnum) \ + MINMAX_1(type, maxnummag, minmax_isnum | minmax_ismag) \ + MINMAX_1(type, maximum_number, minmax_isnumber) \ + MINMAX_1(type, min, minmax_ismin) \ + MINMAX_1(type, minnum, minmax_ismin | minmax_isnum) \ + MINMAX_1(type, minnummag, minmax_ismin | minmax_isnum | minmax_ismag) \ + MINMAX_1(type, minimum_number, minmax_ismin | minmax_isnumber) \ MINMAX_2(float16) MINMAX_2(bfloat16) @@ -4276,6 +4573,15 @@ float64 QEMU_FLATTEN float64_sqrt(float64 xa, float_status *s) return soft_f64_sqrt(ua.s, s); } +float64 float64r32_sqrt(float64 a, float_status *status) +{ + FloatParts64 p; + + float64_unpack_canonical(&p, a, status); + parts_sqrt(&p, status, &float64_params); + return float64r32_round_pack_canonical(&p, status); +} + bfloat16 QEMU_FLATTEN bfloat16_sqrt(bfloat16 a, float_status *status) { FloatParts64 p; @@ -4829,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status) float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); - rp = *parts_muladd(&tp, &xp, &rp, 0, status); + rp = *parts_muladd(&tp, &xnp, &rp, 0, status); xnp = *parts_mul(&xnp, &xp, status); } diff --git a/fsdev/9p-marshal.c b/fsdev/9p-marshal.c index a01bba6908..f9b0336cd5 100644 --- a/fsdev/9p-marshal.c +++ b/fsdev/9p-marshal.c @@ -18,6 +18,8 @@ #include "9p-marshal.h" +P9ARRAY_DEFINE_TYPE(V9fsString, v9fs_string_free); + void v9fs_string_free(V9fsString *str) { g_free(str->data); @@ -25,7 +27,7 @@ void v9fs_string_free(V9fsString *str) str->size = 0; } -void GCC_FMT_ATTR(2, 3) +void G_GNUC_PRINTF(2, 3) v9fs_string_sprintf(V9fsString *str, const char *fmt, ...) { va_list ap; diff --git a/fsdev/9p-marshal.h b/fsdev/9p-marshal.h index ceaf2f521e..f1abbe151c 100644 --- a/fsdev/9p-marshal.h +++ b/fsdev/9p-marshal.h @@ -1,10 +1,13 @@ #ifndef QEMU_9P_MARSHAL_H #define QEMU_9P_MARSHAL_H +#include "p9array.h" + typedef struct V9fsString { uint16_t size; char *data; } V9fsString; +P9ARRAY_DECLARE_TYPE(V9fsString); typedef struct V9fsQID { uint8_t type; diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index 42f677cf38..4997677460 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -16,8 +16,16 @@ #include #include -#include #include "qemu-fsdev-throttle.h" +#include "p9array.h" + +#ifdef CONFIG_LINUX +# include +#endif +#ifdef CONFIG_DARWIN +# include +# include +#endif #define SM_LOCAL_MODE_BITS 0600 #define SM_LOCAL_DIR_MODE_BITS 0700 @@ -105,6 +113,7 @@ struct V9fsPath { uint16_t size; char *data; }; +P9ARRAY_DECLARE_TYPE(V9fsPath); typedef union V9fsFidOpenState V9fsFidOpenState; diff --git a/fsdev/meson.build b/fsdev/meson.build index adf57cc43e..b632b66348 100644 --- a/fsdev/meson.build +++ b/fsdev/meson.build @@ -7,6 +7,7 @@ fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files( 'qemu-fsdev.c', ), if_false: files('qemu-fsdev-dummy.c')) softmmu_ss.add_all(when: 'CONFIG_LINUX', if_true: fsdev_ss) +softmmu_ss.add_all(when: 'CONFIG_DARWIN', if_true: fsdev_ss) if have_virtfs_proxy_helper executable('virtfs-proxy-helper', diff --git a/fsdev/p9array.h b/fsdev/p9array.h new file mode 100644 index 0000000000..90e83a7c7b --- /dev/null +++ b/fsdev/p9array.h @@ -0,0 +1,164 @@ +/* + * P9Array - deep auto free C-array + * + * Copyright (c) 2021 Crudebyte + * + * Authors: + * Christian Schoenebeck + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef QEMU_P9ARRAY_H +#define QEMU_P9ARRAY_H + +#include "qemu/compiler.h" + +/** + * P9Array provides a mechanism to access arrays in common C-style (e.g. by + * square bracket [] operator) in conjunction with reference variables that + * perform deep auto free of the array when leaving the scope of the auto + * reference variable. That means not only is the array itself automatically + * freed, but also memory dynamically allocated by the individual array + * elements. + * + * Example: + * + * Consider the following user struct @c Foo which shall be used as scalar + * (element) type of an array: + * @code + * typedef struct Foo { + * int i; + * char *s; + * } Foo; + * @endcode + * and assume it has the following function to free memory allocated by @c Foo + * instances: + * @code + * void free_foo(Foo *foo) { + * free(foo->s); + * } + * @endcode + * Add the following to a shared header file: + * @code + * P9ARRAY_DECLARE_TYPE(Foo); + * @endcode + * and the following to a C unit file: + * @code + * P9ARRAY_DEFINE_TYPE(Foo, free_foo); + * @endcode + * Finally the array may then be used like this: + * @code + * void doSomething(size_t n) { + * P9ARRAY_REF(Foo) foos = NULL; + * P9ARRAY_NEW(Foo, foos, n); + * for (size_t i = 0; i < n; ++i) { + * foos[i].i = i; + * foos[i].s = calloc(4096, 1); + * snprintf(foos[i].s, 4096, "foo %d", i); + * if (...) { + * return; // array auto freed here + * } + * } + * // array auto freed here + * } + * @endcode + */ + +/** + * P9ARRAY_DECLARE_TYPE() - Declares an array type for the passed @scalar_type. + * + * @scalar_type: type of the individual array elements + * + * This is typically used from a shared header file. + */ +#define P9ARRAY_DECLARE_TYPE(scalar_type) \ + typedef struct P9Array##scalar_type { \ + size_t len; \ + scalar_type first[]; \ + } P9Array##scalar_type; \ + \ + void p9array_new_##scalar_type(scalar_type **auto_var, size_t len); \ + void p9array_auto_free_##scalar_type(scalar_type **auto_var); \ + +/** + * P9ARRAY_DEFINE_TYPE() - Defines an array type for the passed @scalar_type + * and appropriate @scalar_cleanup_func. + * + * @scalar_type: type of the individual array elements + * @scalar_cleanup_func: appropriate function to free memory dynamically + * allocated by individual array elements before + * + * This is typically used from a C unit file. + */ +#define P9ARRAY_DEFINE_TYPE(scalar_type, scalar_cleanup_func) \ + void p9array_new_##scalar_type(scalar_type **auto_var, size_t len) \ + { \ + p9array_auto_free_##scalar_type(auto_var); \ + P9Array##scalar_type *arr = g_malloc0(sizeof(P9Array##scalar_type) + \ + len * sizeof(scalar_type)); \ + arr->len = len; \ + *auto_var = &arr->first[0]; \ + } \ + \ + void p9array_auto_free_##scalar_type(scalar_type **auto_var) \ + { \ + scalar_type *first = (*auto_var); \ + if (!first) { \ + return; \ + } \ + P9Array##scalar_type *arr = (P9Array##scalar_type *) ( \ + ((char *)first) - offsetof(P9Array##scalar_type, first) \ + ); \ + for (size_t i = 0; i < arr->len; ++i) { \ + scalar_cleanup_func(&arr->first[i]); \ + } \ + g_free(arr); \ + } \ + +/** + * P9ARRAY_REF() - Declare a reference variable for an array. + * + * @scalar_type: type of the individual array elements + * + * Used to declare a reference variable (unique pointer) for an array. After + * leaving the scope of the reference variable, the associated array is + * automatically freed. + */ +#define P9ARRAY_REF(scalar_type) \ + __attribute((__cleanup__(p9array_auto_free_##scalar_type))) scalar_type* + +/** + * P9ARRAY_NEW() - Allocate a new array. + * + * @scalar_type: type of the individual array elements + * @auto_var: destination reference variable + * @len: amount of array elements to be allocated immediately + * + * Allocates a new array of passed @scalar_type with @len number of array + * elements and assigns the created array to the reference variable + * @auto_var. + */ +#define P9ARRAY_NEW(scalar_type, auto_var, len) \ + QEMU_BUILD_BUG_MSG( \ + !__builtin_types_compatible_p(scalar_type, typeof(*auto_var)), \ + "P9Array scalar type mismatch" \ + ); \ + p9array_new_##scalar_type((&auto_var), len) + +#endif /* QEMU_P9ARRAY_H */ diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c index 15c0e79b06..d9511f429c 100644 --- a/fsdev/virtfs-proxy-helper.c +++ b/fsdev/virtfs-proxy-helper.c @@ -10,6 +10,7 @@ */ #include "qemu/osdep.h" +#include #include #include #include @@ -21,11 +22,11 @@ #include #endif #include -#include "qemu-common.h" #include "qemu/sockets.h" #include "qemu/xattr.h" #include "9p-iov-marshal.h" #include "hw/9pfs/9p-proxy.h" +#include "hw/9pfs/9p-util.h" #include "fsdev/9p-iov-marshal.h" #define PROGNAME "virtfs-proxy-helper" @@ -57,7 +58,7 @@ static bool is_daemon; static bool get_version; /* IOC getversion IOCTL supported */ static char *prog_name; -static void GCC_FMT_ATTR(2, 3) do_log(int loglevel, const char *format, ...) +static void G_GNUC_PRINTF(2, 3) do_log(int loglevel, const char *format, ...) { va_list ap; @@ -338,6 +339,28 @@ static void resetugid(int suid, int sgid) } } +/* + * Open regular file or directory. Attempts to open any special file are + * rejected. + * + * returns file descriptor or -1 on error + */ +static int open_regular(const char *pathname, int flags, mode_t mode) +{ + int fd; + + fd = open(pathname, flags, mode); + if (fd < 0) { + return fd; + } + + if (close_if_special_file(fd) < 0) { + return -1; + } + + return fd; +} + /* * send response in two parts * 1) ProxyHeader @@ -640,7 +663,7 @@ static int do_create_others(int type, struct iovec *iovec) if (retval < 0) { goto err_out; } - retval = mkdir(path.data, mode); + retval = g_mkdir(path.data, mode); break; case T_SYMLINK: retval = proxy_unmarshal(iovec, offset, "ss", &oldpath, &path); @@ -682,7 +705,7 @@ static int do_create(struct iovec *iovec) if (ret < 0) { goto unmarshal_err_out; } - ret = open(path.data, flags, mode); + ret = open_regular(path.data, flags, mode); if (ret < 0) { ret = -errno; } @@ -707,7 +730,7 @@ static int do_open(struct iovec *iovec) if (ret < 0) { goto err_out; } - ret = open(path.data, flags); + ret = open_regular(path.data, flags, 0); if (ret < 0) { ret = -errno; } diff --git a/gdb-xml/arm-m-profile-mve.xml b/gdb-xml/arm-m-profile-mve.xml new file mode 100644 index 0000000000..cba664c4c5 --- /dev/null +++ b/gdb-xml/arm-m-profile-mve.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + diff --git a/gdb-xml/arm-neon.xml b/gdb-xml/arm-neon.xml index ce3ee03ec4..9dce0a996f 100644 --- a/gdb-xml/arm-neon.xml +++ b/gdb-xml/arm-neon.xml @@ -82,7 +82,5 @@ - - diff --git a/gdb-xml/arm-vfp-sysregs.xml b/gdb-xml/arm-vfp-sysregs.xml new file mode 100644 index 0000000000..c4aa2721c8 --- /dev/null +++ b/gdb-xml/arm-vfp-sysregs.xml @@ -0,0 +1,17 @@ + + + + + + + diff --git a/gdb-xml/arm-vfp.xml b/gdb-xml/arm-vfp.xml index b20881e9a9..ebed5b3d57 100644 --- a/gdb-xml/arm-vfp.xml +++ b/gdb-xml/arm-vfp.xml @@ -23,7 +23,5 @@ - - diff --git a/gdb-xml/arm-vfp3.xml b/gdb-xml/arm-vfp3.xml index 227afd8017..ef391c7144 100644 --- a/gdb-xml/arm-vfp3.xml +++ b/gdb-xml/arm-vfp3.xml @@ -39,7 +39,5 @@ - - diff --git a/gdb-xml/i386-32bit.xml b/gdb-xml/i386-32bit.xml index 872fcea9c2..7a66a02b67 100644 --- a/gdb-xml/i386-32bit.xml +++ b/gdb-xml/i386-32bit.xml @@ -110,7 +110,7 @@ - + diff --git a/gdb-xml/loongarch-base64.xml b/gdb-xml/loongarch-base64.xml new file mode 100644 index 0000000000..2d8a1f6b73 --- /dev/null +++ b/gdb-xml/loongarch-base64.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/loongarch-fpu.xml b/gdb-xml/loongarch-fpu.xml new file mode 100644 index 0000000000..78e42cf5dd --- /dev/null +++ b/gdb-xml/loongarch-fpu.xml @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/riscv-32bit-cpu.xml b/gdb-xml/riscv-32bit-cpu.xml index 0d07aaec85..466f2c0648 100644 --- a/gdb-xml/riscv-32bit-cpu.xml +++ b/gdb-xml/riscv-32bit-cpu.xml @@ -5,13 +5,9 @@ are permitted in any medium without royalty provided the copyright notice and this notice are preserved. --> - - - + diff --git a/gdb-xml/riscv-32bit-fpu.xml b/gdb-xml/riscv-32bit-fpu.xml index 1eaae9119e..24aa087031 100644 --- a/gdb-xml/riscv-32bit-fpu.xml +++ b/gdb-xml/riscv-32bit-fpu.xml @@ -5,13 +5,9 @@ are permitted in any medium without royalty provided the copyright notice and this notice are preserved. --> - - - + @@ -43,8 +39,4 @@ - - - - diff --git a/gdb-xml/riscv-64bit-cpu.xml b/gdb-xml/riscv-64bit-cpu.xml index b8aa424ae4..c4d83de09b 100644 --- a/gdb-xml/riscv-64bit-cpu.xml +++ b/gdb-xml/riscv-64bit-cpu.xml @@ -5,13 +5,9 @@ are permitted in any medium without royalty provided the copyright notice and this notice are preserved. --> - - - + diff --git a/gdb-xml/riscv-64bit-fpu.xml b/gdb-xml/riscv-64bit-fpu.xml index 794854cc01..d0f17f9984 100644 --- a/gdb-xml/riscv-64bit-fpu.xml +++ b/gdb-xml/riscv-64bit-fpu.xml @@ -5,10 +5,6 @@ are permitted in any medium without royalty provided the copyright notice and this notice are preserved. --> - - @@ -17,7 +13,7 @@ - + @@ -49,8 +45,4 @@ - - - - diff --git a/gdbstub.c b/gdbstub/gdbstub.c similarity index 95% rename from gdbstub.c rename to gdbstub/gdbstub.c index 52bde5bdc9..be88ca0d71 100644 --- a/gdbstub.c +++ b/gdbstub/gdbstub.c @@ -24,20 +24,19 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/module.h" -#include "trace/trace-root.h" +#include "trace.h" +#include "exec/gdbstub.h" #ifdef CONFIG_USER_ONLY #include "qemu.h" #else #include "monitor/monitor.h" #include "chardev/char.h" #include "chardev/char-fe.h" -#include "exec/gdbstub.h" #include "hw/cpu/cluster.h" #include "hw/boards.h" #endif @@ -46,12 +45,14 @@ #include "qemu/sockets.h" #include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" #include "sysemu/runstate.h" #include "semihosting/semihost.h" #include "exec/exec-all.h" +#include "exec/hwaddr.h" #include "sysemu/replay.h" +#include "internals.h" + #ifdef CONFIG_USER_ONLY #define GDB_ATTACHED "0" #else @@ -94,7 +95,7 @@ static inline int cpu_gdb_index(CPUState *cpu) { #if defined(CONFIG_USER_ONLY) TaskState *ts = (TaskState *) cpu->opaque; - return ts->ts_tid; + return ts ? ts->ts_tid : -1; #else return cpu->cpu_index + 1; #endif @@ -368,27 +369,10 @@ typedef struct GDBState { gdb_syscall_complete_cb current_syscall_cb; GString *str_buf; GByteArray *mem_buf; + int sstep_flags; + int supported_sstep_flags; } GDBState; -/* By default use no IRQs and no timers while single stepping so as to - * make single stepping like an ICE HW step. - */ -static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER; - -/* Retrieves flags for single step mode. */ -static int get_sstep_flags(void) -{ - /* - * In replay mode all events written into the log should be replayed. - * That is why NOIRQ flag is removed in this mode. - */ - if (replay_mode != REPLAY_MODE_NONE) { - return SSTEP_ENABLE; - } else { - return sstep_flags; - } -} - static GDBState gdbserver_state; static void init_gdbserver_state(void) @@ -399,6 +383,15 @@ static void init_gdbserver_state(void) gdbserver_state.str_buf = g_string_new(NULL); gdbserver_state.mem_buf = g_byte_array_sized_new(MAX_PACKET_LENGTH); gdbserver_state.last_packet = g_byte_array_sized_new(MAX_PACKET_LENGTH + 4); + + /* + * What single-step modes are supported is accelerator dependent. + * By default try to use no IRQs and no timers while single + * stepping so as to make single stepping like a typical ICE HW step. + */ + gdbserver_state.supported_sstep_flags = accel_supported_gdbstub_sstep_flags(); + gdbserver_state.sstep_flags = SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER; + gdbserver_state.sstep_flags &= gdbserver_state.supported_sstep_flags; } #ifndef CONFIG_USER_ONLY @@ -420,7 +413,7 @@ static int get_char(void) int ret; for(;;) { - ret = qemu_recv(gdbserver_state.fd, &ch, 1, 0); + ret = recv(gdbserver_state.fd, &ch, 1, 0); if (ret < 0) { if (errno == ECONNRESET) gdbserver_state.fd = -1; @@ -438,6 +431,15 @@ static int get_char(void) } #endif +/* + * Return true if there is a GDB currently connected to the stub + * and attached to a CPU + */ +static bool gdb_attached(void) +{ + return gdbserver_state.init && gdbserver_state.c_cpu; +} + static enum { GDB_SYS_UNKNOWN, GDB_SYS_ENABLED, @@ -459,8 +461,7 @@ int use_gdb_syscalls(void) /* -semihosting-config target=auto */ /* On the first call check if gdb is connected and remember. */ if (gdb_syscall_mode == GDB_SYS_UNKNOWN) { - gdb_syscall_mode = gdbserver_state.init ? - GDB_SYS_ENABLED : GDB_SYS_DISABLED; + gdb_syscall_mode = gdb_attached() ? GDB_SYS_ENABLED : GDB_SYS_DISABLED; } return gdb_syscall_mode == GDB_SYS_ENABLED; } @@ -505,7 +506,7 @@ static int gdb_continue_partial(char *newstates) CPU_FOREACH(cpu) { if (newstates[cpu->cpu_index] == 's') { trace_gdbstub_op_stepping(cpu->cpu_index); - cpu_single_step(cpu, sstep_flags); + cpu_single_step(cpu, gdbserver_state.sstep_flags); } } gdbserver_state.running_state = 1; @@ -513,7 +514,15 @@ static int gdb_continue_partial(char *newstates) int flag = 0; if (!runstate_needs_reset()) { - if (vm_prepare_start()) { + bool step_requested = false; + CPU_FOREACH(cpu) { + if (newstates[cpu->cpu_index] == 's') { + step_requested = true; + break; + } + } + + if (vm_prepare_start(step_requested)) { return 0; } @@ -524,7 +533,7 @@ static int gdb_continue_partial(char *newstates) break; /* nothing to do here */ case 's': trace_gdbstub_op_stepping(cpu->cpu_index); - cpu_single_step(cpu, get_sstep_flags()); + cpu_single_step(cpu, gdbserver_state.sstep_flags); cpu_resume(cpu); flag = 1; break; @@ -1005,130 +1014,16 @@ void gdb_register_coprocessor(CPUState *cpu, } } -#ifndef CONFIG_USER_ONLY -/* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */ -static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) -{ - static const int xlat[] = { - [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE, - [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ, - [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS, - }; - - CPUClass *cc = CPU_GET_CLASS(cpu); - int cputype = xlat[gdbtype]; - - if (cc->gdb_stop_before_watchpoint) { - cputype |= BP_STOP_BEFORE_ACCESS; - } - return cputype; -} -#endif - -static int gdb_breakpoint_insert(int type, target_ulong addr, target_ulong len) -{ - CPUState *cpu; - int err = 0; - - if (kvm_enabled()) { - return kvm_insert_breakpoint(gdbserver_state.c_cpu, addr, len, type); - } - - switch (type) { - case GDB_BREAKPOINT_SW: - case GDB_BREAKPOINT_HW: - CPU_FOREACH(cpu) { - err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL); - if (err) { - break; - } - } - return err; -#ifndef CONFIG_USER_ONLY - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_ACCESS: - CPU_FOREACH(cpu) { - err = cpu_watchpoint_insert(cpu, addr, len, - xlat_gdb_type(cpu, type), NULL); - if (err) { - break; - } - } - return err; -#endif - default: - return -ENOSYS; - } -} - -static int gdb_breakpoint_remove(int type, target_ulong addr, target_ulong len) -{ - CPUState *cpu; - int err = 0; - - if (kvm_enabled()) { - return kvm_remove_breakpoint(gdbserver_state.c_cpu, addr, len, type); - } - - switch (type) { - case GDB_BREAKPOINT_SW: - case GDB_BREAKPOINT_HW: - CPU_FOREACH(cpu) { - err = cpu_breakpoint_remove(cpu, addr, BP_GDB); - if (err) { - break; - } - } - return err; -#ifndef CONFIG_USER_ONLY - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_ACCESS: - CPU_FOREACH(cpu) { - err = cpu_watchpoint_remove(cpu, addr, len, - xlat_gdb_type(cpu, type)); - if (err) - break; - } - return err; -#endif - default: - return -ENOSYS; - } -} - -static inline void gdb_cpu_breakpoint_remove_all(CPUState *cpu) -{ - cpu_breakpoint_remove_all(cpu, BP_GDB); -#ifndef CONFIG_USER_ONLY - cpu_watchpoint_remove_all(cpu, BP_GDB); -#endif -} - static void gdb_process_breakpoint_remove_all(GDBProcess *p) { CPUState *cpu = get_first_cpu_in_process(p); while (cpu) { - gdb_cpu_breakpoint_remove_all(cpu); + gdb_breakpoint_remove_all(cpu); cpu = gdb_next_cpu_in_process(cpu); } } -static void gdb_breakpoint_remove_all(void) -{ - CPUState *cpu; - - if (kvm_enabled()) { - kvm_remove_all_breakpoints(gdbserver_state.c_cpu); - return; - } - - CPU_FOREACH(cpu) { - gdb_cpu_breakpoint_remove_all(cpu); - } -} static void gdb_set_cpu_pc(target_ulong pc) { @@ -1660,7 +1555,8 @@ static void handle_insert_bp(GArray *params, void *user_ctx) return; } - res = gdb_breakpoint_insert(get_param(params, 0)->val_ul, + res = gdb_breakpoint_insert(gdbserver_state.c_cpu, + get_param(params, 0)->val_ul, get_param(params, 1)->val_ull, get_param(params, 2)->val_ull); if (res >= 0) { @@ -1683,7 +1579,8 @@ static void handle_remove_bp(GArray *params, void *user_ctx) return; } - res = gdb_breakpoint_remove(get_param(params, 0)->val_ul, + res = gdb_breakpoint_remove(gdbserver_state.c_cpu, + get_param(params, 0)->val_ul, get_param(params, 1)->val_ull, get_param(params, 2)->val_ull); if (res >= 0) { @@ -1857,14 +1754,46 @@ static void handle_read_all_regs(GArray *params, void *user_ctx) static void handle_file_io(GArray *params, void *user_ctx) { if (params->len >= 1 && gdbserver_state.current_syscall_cb) { - target_ulong ret, err; + uint64_t ret; + int err; - ret = (target_ulong)get_param(params, 0)->val_ull; + ret = get_param(params, 0)->val_ull; if (params->len >= 2) { - err = (target_ulong)get_param(params, 1)->val_ull; + err = get_param(params, 1)->val_ull; } else { err = 0; } + + /* Convert GDB error numbers back to host error numbers. */ +#define E(X) case GDB_E##X: err = E##X; break + switch (err) { + case 0: + break; + E(PERM); + E(NOENT); + E(INTR); + E(BADF); + E(ACCES); + E(FAULT); + E(BUSY); + E(EXIST); + E(NODEV); + E(NOTDIR); + E(ISDIR); + E(INVAL); + E(NFILE); + E(MFILE); + E(FBIG); + E(NOSPC); + E(SPIPE); + E(ROFS); + E(NAMETOOLONG); + default: + err = EINVAL; + break; + } +#undef E + gdbserver_state.current_syscall_cb(gdbserver_state.c_cpu, ret, err); gdbserver_state.current_syscall_cb = NULL; } @@ -1883,7 +1812,7 @@ static void handle_step(GArray *params, void *user_ctx) gdb_set_cpu_pc((target_ulong)get_param(params, 0)->val_ull); } - cpu_single_step(gdbserver_state.c_cpu, get_sstep_flags()); + cpu_single_step(gdbserver_state.c_cpu, gdbserver_state.sstep_flags); gdb_continue(); } @@ -2017,24 +1946,44 @@ static void handle_v_commands(GArray *params, void *user_ctx) static void handle_query_qemu_sstepbits(GArray *params, void *user_ctx) { - g_string_printf(gdbserver_state.str_buf, "ENABLE=%x,NOIRQ=%x,NOTIMER=%x", - SSTEP_ENABLE, SSTEP_NOIRQ, SSTEP_NOTIMER); + g_string_printf(gdbserver_state.str_buf, "ENABLE=%x", SSTEP_ENABLE); + + if (gdbserver_state.supported_sstep_flags & SSTEP_NOIRQ) { + g_string_append_printf(gdbserver_state.str_buf, ",NOIRQ=%x", + SSTEP_NOIRQ); + } + + if (gdbserver_state.supported_sstep_flags & SSTEP_NOTIMER) { + g_string_append_printf(gdbserver_state.str_buf, ",NOTIMER=%x", + SSTEP_NOTIMER); + } + put_strbuf(); } static void handle_set_qemu_sstep(GArray *params, void *user_ctx) { + int new_sstep_flags; + if (!params->len) { return; } - sstep_flags = get_param(params, 0)->val_ul; + new_sstep_flags = get_param(params, 0)->val_ul; + + if (new_sstep_flags & ~gdbserver_state.supported_sstep_flags) { + put_packet("E22"); + return; + } + + gdbserver_state.sstep_flags = new_sstep_flags; put_packet("OK"); } static void handle_query_qemu_sstep(GArray *params, void *user_ctx) { - g_string_printf(gdbserver_state.str_buf, "0x%x", sstep_flags); + g_string_printf(gdbserver_state.str_buf, "0x%x", + gdbserver_state.sstep_flags); put_strbuf(); } @@ -2482,7 +2431,7 @@ static void handle_target_halt(GArray *params, void *user_ctx) * because gdb is doing an initial connect and the state * should be cleaned up. */ - gdb_breakpoint_remove_all(); + gdb_breakpoint_remove_all(gdbserver_state.c_cpu); } static int gdb_handle_packet(const char *line_buf) @@ -2853,7 +2802,7 @@ void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va) target_ulong addr; uint64_t i64; - if (!gdbserver_state.init) { + if (!gdb_attached()) { return; } @@ -3138,8 +3087,12 @@ gdb_handlesig(CPUState *cpu, int sig) tb_flush(cpu); if (sig != 0) { - snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig)); - put_packet(buf); + gdb_set_stop_cpu(cpu); + g_string_printf(gdbserver_state.str_buf, + "T%02xthread:", target_signal_to_gdb(sig)); + gdb_append_thread_id(cpu, gdbserver_state.str_buf); + g_string_append_c(gdbserver_state.str_buf, ';'); + put_strbuf(); } /* put_packet() might have detected that the peer terminated the connection. */ @@ -3218,7 +3171,7 @@ static bool gdb_accept_socket(int gdb_fd) static int gdbserver_open_socket(const char *path) { - struct sockaddr_un sockaddr; + struct sockaddr_un sockaddr = {}; int fd, ret; fd = socket(AF_UNIX, SOCK_STREAM, 0); @@ -3247,7 +3200,7 @@ static int gdbserver_open_socket(const char *path) static bool gdb_accept_tcp(int gdb_fd) { - struct sockaddr_in sockaddr; + struct sockaddr_in sockaddr = {}; socklen_t len; int fd; @@ -3493,6 +3446,11 @@ int gdbserver_start(const char *device) return -1; } + if (!gdb_supports_guest_debug()) { + error_report("gdbstub: current accelerator doesn't support guest debugging"); + return -1; + } + if (!device) return -1; if (strcmp(device, "none") != 0) { diff --git a/gdbstub/internals.h b/gdbstub/internals.h new file mode 100644 index 0000000000..eabb0341d1 --- /dev/null +++ b/gdbstub/internals.h @@ -0,0 +1,17 @@ +/* + * gdbstub internals + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _INTERNALS_H_ +#define _INTERNALS_H_ + +bool gdb_supports_guest_debug(void); +int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len); +int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len); +void gdb_breakpoint_remove_all(CPUState *cs); + +#endif /* _INTERNALS_H_ */ diff --git a/gdbstub/meson.build b/gdbstub/meson.build new file mode 100644 index 0000000000..fc895a2c39 --- /dev/null +++ b/gdbstub/meson.build @@ -0,0 +1,9 @@ +# +# The main gdbstub still relies on per-build definitions of various +# types. The bits pushed to softmmu/user.c try to use guest agnostic +# types such as hwaddr. +# + +specific_ss.add(files('gdbstub.c')) +softmmu_ss.add(files('softmmu.c')) +user_ss.add(files('user.c')) diff --git a/gdbstub/softmmu.c b/gdbstub/softmmu.c new file mode 100644 index 0000000000..f208c6cf15 --- /dev/null +++ b/gdbstub/softmmu.c @@ -0,0 +1,51 @@ +/* + * gdb server stub - softmmu specific bits + * + * Debug integration depends on support from the individual + * accelerators so most of this involves calling the ops helpers. + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "exec/gdbstub.h" +#include "exec/hwaddr.h" +#include "sysemu/cpus.h" +#include "internals.h" + +bool gdb_supports_guest_debug(void) +{ + const AccelOpsClass *ops = cpus_get_accel(); + if (ops->supports_guest_debug) { + return ops->supports_guest_debug(); + } + return false; +} + +int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + const AccelOpsClass *ops = cpus_get_accel(); + if (ops->insert_breakpoint) { + return ops->insert_breakpoint(cs, type, addr, len); + } + return -ENOSYS; +} + +int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + const AccelOpsClass *ops = cpus_get_accel(); + if (ops->remove_breakpoint) { + return ops->remove_breakpoint(cs, type, addr, len); + } + return -ENOSYS; +} + +void gdb_breakpoint_remove_all(CPUState *cs) +{ + const AccelOpsClass *ops = cpus_get_accel(); + if (ops->remove_all_breakpoints) { + ops->remove_all_breakpoints(cs); + } +} diff --git a/gdbstub/trace-events b/gdbstub/trace-events new file mode 100644 index 0000000000..03f0c303bf --- /dev/null +++ b/gdbstub/trace-events @@ -0,0 +1,29 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# gdbstub.c +gdbstub_op_start(const char *device) "Starting gdbstub using device %s" +gdbstub_op_exiting(uint8_t code) "notifying exit with code=0x%02x" +gdbstub_op_continue(void) "Continuing all CPUs" +gdbstub_op_continue_cpu(int cpu_index) "Continuing CPU %d" +gdbstub_op_stepping(int cpu_index) "Stepping CPU %d" +gdbstub_op_extra_info(const char *info) "Thread extra info: %s" +gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 "" +gdbstub_hit_internal_error(void) "RUN_STATE_INTERNAL_ERROR" +gdbstub_hit_break(void) "RUN_STATE_DEBUG" +gdbstub_hit_paused(void) "RUN_STATE_PAUSED" +gdbstub_hit_shutdown(void) "RUN_STATE_SHUTDOWN" +gdbstub_hit_io_error(void) "RUN_STATE_IO_ERROR" +gdbstub_hit_watchdog(void) "RUN_STATE_WATCHDOG" +gdbstub_hit_unknown(int state) "Unknown run state=0x%x" +gdbstub_io_reply(const char *message) "Sent: %s" +gdbstub_io_binaryreply(size_t ofs, const char *line) "0x%04zx: %s" +gdbstub_io_command(const char *command) "Received: %s" +gdbstub_io_got_ack(void) "Got ACK" +gdbstub_io_got_unexpected(uint8_t ch) "Got 0x%02x when expecting ACK/NACK" +gdbstub_err_got_nack(void) "Got NACK, retransmitting" +gdbstub_err_garbage(uint8_t ch) "received garbage between packets: 0x%02x" +gdbstub_err_overrun(void) "command buffer overrun, dropping command" +gdbstub_err_invalid_repeat(uint8_t ch) "got invalid RLE count: 0x%02x" +gdbstub_err_invalid_rle(void) "got invalid RLE sequence" +gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x" +gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x" diff --git a/gdbstub/trace.h b/gdbstub/trace.h new file mode 100644 index 0000000000..dee87b1238 --- /dev/null +++ b/gdbstub/trace.h @@ -0,0 +1 @@ +#include "trace/trace-gdbstub.h" diff --git a/gdbstub/user.c b/gdbstub/user.c new file mode 100644 index 0000000000..033e5fdd71 --- /dev/null +++ b/gdbstub/user.c @@ -0,0 +1,68 @@ +/* + * gdbstub user-mode helper routines. + * + * We know for user-mode we are using TCG so we can call stuff directly. + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "exec/hwaddr.h" +#include "exec/gdbstub.h" +#include "hw/core/cpu.h" +#include "internals.h" + +bool gdb_supports_guest_debug(void) +{ + /* user-mode == TCG == supported */ + return true; +} + +int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + CPUState *cpu; + int err = 0; + + switch (type) { + case GDB_BREAKPOINT_SW: + case GDB_BREAKPOINT_HW: + CPU_FOREACH(cpu) { + err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL); + if (err) { + break; + } + } + return err; + default: + /* user-mode doesn't support watchpoints */ + return -ENOSYS; + } +} + +int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len) +{ + CPUState *cpu; + int err = 0; + + switch (type) { + case GDB_BREAKPOINT_SW: + case GDB_BREAKPOINT_HW: + CPU_FOREACH(cpu) { + err = cpu_breakpoint_remove(cpu, addr, BP_GDB); + if (err) { + break; + } + } + return err; + default: + /* user-mode doesn't support watchpoints */ + return -ENOSYS; + } +} + +void gdb_breakpoint_remove_all(CPUState *cs) +{ + cpu_breakpoint_remove_all(cs, BP_GDB); +} diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 27206ac049..754b1e8408 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -100,9 +100,11 @@ ERST { .name = "registers", - .args_type = "cpustate_all:-a", - .params = "[-a]", - .help = "show the cpu registers (-a: all - show register info for all cpus)", + .args_type = "cpustate_all:-a,vcpu:i?", + .params = "[-a|vcpu]", + .help = "show the cpu registers (-a: show register info for all cpus;" + " vcpu: specific vCPU to query; show the current CPU's registers if" + " no argument is specified)", .cmd = hmp_info_registers, }, @@ -127,21 +129,6 @@ SRST Show local APIC state ERST -#if defined(TARGET_I386) - { - .name = "ioapic", - .args_type = "", - .params = "", - .help = "show io apic state", - .cmd = hmp_info_io_apic, - }, -#endif - -SRST - ``info ioapic`` - Show io APIC state -ERST - { .name = "cpus", .args_type = "", @@ -174,7 +161,7 @@ ERST .args_type = "", .params = "", .help = "show the interrupts statistics (if available)", - .cmd = hmp_info_irq, + .cmd_info_hrt = qmp_x_query_irq, }, SRST @@ -200,7 +187,7 @@ ERST .args_type = "", .params = "", .help = "show RDMA state", - .cmd = hmp_info_rdma, + .cmd_info_hrt = qmp_x_query_rdma, }, SRST @@ -340,7 +327,7 @@ ERST .args_type = "", .params = "", .help = "show NUMA information", - .cmd = hmp_info_numa, + .cmd_info_hrt = qmp_x_query_numa, }, SRST @@ -353,7 +340,7 @@ ERST .args_type = "", .params = "", .help = "show guest USB devices", - .cmd = hmp_info_usb, + .cmd_info_hrt = qmp_x_query_usb, }, SRST @@ -373,13 +360,15 @@ SRST Show host USB devices. ERST +#if defined(CONFIG_TCG) { .name = "profile", .args_type = "", .params = "", .help = "show profiling information", - .cmd = hmp_info_profile, + .cmd_info_hrt = qmp_x_query_profile, }, +#endif SRST ``info profile`` @@ -609,7 +598,7 @@ ERST .args_type = "", .params = "", .help = "show roms", - .cmd = hmp_info_roms, + .cmd_info_hrt = qmp_x_query_roms, }, SRST @@ -787,7 +776,7 @@ ERST .args_type = "", .params = "", .help = "Display system ramblock information", - .cmd = hmp_info_ramblock, + .cmd_info_hrt = qmp_x_query_ramblock, }, SRST @@ -877,3 +866,130 @@ SRST ``info dirty_rate`` Display the vcpu dirty rate information. ERST + + { + .name = "vcpu_dirty_limit", + .args_type = "", + .params = "", + .help = "show dirty page limit information of all vCPU", + .cmd = hmp_info_vcpu_dirty_limit, + }, + +SRST + ``info vcpu_dirty_limit`` + Display the vcpu dirty page limit information. +ERST + +#if defined(TARGET_I386) + { + .name = "sgx", + .args_type = "", + .params = "", + .help = "show intel SGX information", + .cmd = hmp_info_sgx, + }, +#endif + +SRST + ``info sgx`` + Show intel SGX information. +ERST + +#if defined(CONFIG_MOS6522) + { + .name = "via", + .args_type = "", + .params = "", + .help = "show guest mos6522 VIA devices", + .cmd = hmp_info_via, + }, +#endif + +SRST + ``info via`` + Show guest mos6522 VIA devices. +ERST + + { + .name = "stats", + .args_type = "target:s,names:s?,provider:s?", + .params = "target [names] [provider]", + .help = "show statistics for the given target (vm or vcpu); optionally filter by" + "name (comma-separated list, or * for all) and provider", + .cmd = hmp_info_stats, + }, + +SRST + ``stats`` + Show runtime-collected statistics +ERST + + { + .name = "virtio", + .args_type = "", + .params = "", + .help = "List all available virtio devices", + .cmd = hmp_virtio_query, + .flags = "p", + }, + +SRST + ``info virtio`` + List all available virtio devices +ERST + + { + .name = "virtio-status", + .args_type = "path:s", + .params = "path", + .help = "Display status of a given virtio device", + .cmd = hmp_virtio_status, + .flags = "p", + }, + +SRST + ``info virtio-status`` *path* + Display status of a given virtio device +ERST + + { + .name = "virtio-queue-status", + .args_type = "path:s,queue:i", + .params = "path queue", + .help = "Display status of a given virtio queue", + .cmd = hmp_virtio_queue_status, + .flags = "p", + }, + +SRST + ``info virtio-queue-status`` *path* *queue* + Display status of a given virtio queue +ERST + + { + .name = "virtio-vhost-queue-status", + .args_type = "path:s,queue:i", + .params = "path queue", + .help = "Display status of a given vhost queue", + .cmd = hmp_vhost_queue_status, + .flags = "p", + }, + +SRST + ``info virtio-vhost-queue-status`` *path* *queue* + Display status of a given vhost queue +ERST + + { + .name = "virtio-queue-element", + .args_type = "path:s,queue:i,index:i?", + .params = "path queue [index]", + .help = "Display element of a given virtio queue", + .cmd = hmp_virtio_queue_element, + .flags = "p", + }, + +SRST + ``info virtio-queue-element`` *path* *queue* [*index*] + Display element of a given virtio queue +ERST diff --git a/hmp-commands.hx b/hmp-commands.hx index 8e45bce2cd..673e39a697 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -78,6 +78,7 @@ ERST .help = "resize a block image", .cmd = hmp_block_resize, .coroutine = true, + .flags = "p", }, SRST @@ -94,6 +95,7 @@ ERST .params = "device [speed [base]]", .help = "copy data from a backing file into a block device", .cmd = hmp_block_stream, + .flags = "p", }, SRST @@ -107,6 +109,7 @@ ERST .params = "device speed", .help = "set maximum speed for a background block operation", .cmd = hmp_block_job_set_speed, + .flags = "p", }, SRST @@ -122,6 +125,7 @@ ERST "\n\t\t\t if you want to abort the operation immediately" "\n\t\t\t instead of keep running until data is in sync)", .cmd = hmp_block_job_cancel, + .flags = "p", }, SRST @@ -135,6 +139,7 @@ ERST .params = "device", .help = "stop an active background block operation", .cmd = hmp_block_job_complete, + .flags = "p", }, SRST @@ -149,6 +154,7 @@ ERST .params = "device", .help = "pause an active background block operation", .cmd = hmp_block_job_pause, + .flags = "p", }, SRST @@ -162,6 +168,7 @@ ERST .params = "device", .help = "resume a paused background block operation", .cmd = hmp_block_job_resume, + .flags = "p", }, SRST @@ -202,9 +209,9 @@ ERST { .name = "change", - .args_type = "device:B,target:F,arg:s?,read-only-mode:s?", - .params = "device filename [format [read-only-mode]]", - .help = "change a removable medium, optional format", + .args_type = "device:B,force:-f,target:F,arg:s?,read-only-mode:s?", + .params = "device [-f] filename [format [read-only-mode]]", + .help = "change a removable medium, optional format, use -f to force the operation", .cmd = hmp_change, }, @@ -212,11 +219,14 @@ SRST ``change`` *device* *setting* Change the configuration of a device. - ``change`` *diskdevice* *filename* [*format* [*read-only-mode*]] + ``change`` *diskdevice* [-f] *filename* [*format* [*read-only-mode*]] Change the medium for a removable disk device to point to *filename*. eg:: (qemu) change ide1-cd0 /path/to/some.iso + ``-f`` + forces the operation even if the guest has locked the tray. + *format* is optional. *read-only-mode* may be used to change the read-only status of the device. @@ -244,11 +254,12 @@ ERST { .name = "screendump", - .args_type = "filename:F,device:s?,head:i?", - .params = "filename [device [head]]", - .help = "save screen from head 'head' of display device 'device' " - "into PPM image 'filename'", - .cmd = hmp_screendump, + .args_type = "filename:F,format:-fs,device:s?,head:i?", + .params = "filename [-f format] [device [head]]", + .help = "save screen from head 'head' of display device 'device'" + "in specified format 'format' as image 'filename'." + "Currently only 'png' and 'ppm' formats are supported.", + .cmd = hmp_screendump, .coroutine = true, }, @@ -382,7 +393,7 @@ SRST ERST { - .name = "stop", + .name = "stop|s", .args_type = "", .params = "", .help = "stop emulation", @@ -390,7 +401,7 @@ ERST }, SRST -``stop`` +``stop`` or ``s`` Stop emulation. ERST @@ -1064,7 +1075,7 @@ ERST "-l: dump in kdump-compressed format, with lzo compression.\n\t\t\t" "-s: dump in kdump-compressed format, with snappy compression.\n\t\t\t" "-w: dump in Windows crashdump format (can be used instead of ELF-dump converting),\n\t\t\t" - " for Windows x64 guests with vmcoreinfo driver only.\n\t\t\t" + " for Windows x86 and x64 guests with vmcoreinfo driver only.\n\t\t\t" "begin: the starting physical address.\n\t\t\t" "length: the memory size, in bytes.", .cmd = hmp_dump_guest_memory, @@ -1265,7 +1276,11 @@ ERST { .name = "netdev_add", .args_type = "netdev:O", - .params = "[user|tap|socket|vde|bridge|hubport|netmap|vhost-user],id=str[,prop=value][,...]", + .params = "[user|tap|socket|stream|dgram|vde|bridge|hubport|netmap|vhost-user" +#ifdef CONFIG_VMNET + "|vmnet-host|vmnet-shared|vmnet-bridged" +#endif + "],id=str[,prop=value][,...]", .help = "add host network device", .cmd = hmp_netdev_add, .command_completion = netdev_add_completion, @@ -1398,6 +1413,7 @@ ERST .params = "nbd_server_start [-a] [-w] host:port", .help = "serve block devices on the given host and port", .cmd = hmp_nbd_server_start, + .flags = "p", }, SRST ``nbd_server_start`` *host*:*port* @@ -1413,6 +1429,7 @@ ERST .params = "nbd_server_add [-w] device [name]", .help = "export a block device via NBD", .cmd = hmp_nbd_server_add, + .flags = "p", }, SRST ``nbd_server_add`` *device* [ *name* ] @@ -1428,6 +1445,7 @@ ERST .params = "nbd_server_remove [-f] name", .help = "remove an export previously exposed via NBD", .cmd = hmp_nbd_server_remove, + .flags = "p", }, SRST ``nbd_server_remove [-f]`` *name* @@ -1444,6 +1462,7 @@ ERST .params = "nbd_server_stop", .help = "stop serving block devices using the NBD protocol", .cmd = hmp_nbd_server_stop, + .flags = "p", }, SRST ``nbd_server_stop`` @@ -1473,6 +1492,7 @@ ERST .params = "getfd name", .help = "receive a file descriptor via SCM rights and assign it a name", .cmd = hmp_getfd, + .flags = "p", }, SRST @@ -1488,6 +1508,7 @@ ERST .params = "closefd name", .help = "close a file descriptor previously passed via SCM rights", .cmd = hmp_closefd, + .flags = "p", }, SRST @@ -1503,6 +1524,7 @@ ERST .params = "device bps bps_rd bps_wr iops iops_rd iops_wr", .help = "change I/O throttle limits for a block drive", .cmd = hmp_block_set_io_throttle, + .flags = "p", }, SRST @@ -1514,34 +1536,35 @@ ERST { .name = "set_password", - .args_type = "protocol:s,password:s,connected:s?", - .params = "protocol password action-if-connected", + .args_type = "protocol:s,password:s,display:-ds,connected:s?", + .params = "protocol password [-d display] [action-if-connected]", .help = "set spice/vnc password", .cmd = hmp_set_password, }, SRST -``set_password [ vnc | spice ] password [ action-if-connected ]`` - Change spice/vnc password. Use zero to make the password stay valid - forever. *action-if-connected* specifies what should happen in - case a connection is established: *fail* makes the password change - fail. *disconnect* changes the password and disconnects the - client. *keep* changes the password and keeps the connection up. - *keep* is the default. +``set_password [ vnc | spice ] password [ -d display ] [ action-if-connected ]`` + Change spice/vnc password. *display* can be used with 'vnc' to specify + which display to set the password on. *action-if-connected* specifies + what should happen in case a connection is established: *fail* makes + the password change fail. *disconnect* changes the password and + disconnects the client. *keep* changes the password and keeps the + connection up. *keep* is the default. ERST { .name = "expire_password", - .args_type = "protocol:s,time:s", - .params = "protocol time", + .args_type = "protocol:s,time:s,display:-ds", + .params = "protocol time [-d display]", .help = "set spice/vnc password expire-time", .cmd = hmp_expire_password, }, SRST -``expire_password [ vnc | spice ]`` *expire-time* - Specify when a password for spice/vnc becomes - invalid. *expire-time* accepts: +``expire_password [ vnc | spice ] expire-time [ -d display ]`` + Specify when a password for spice/vnc becomes invalid. + *display* behaves the same as in ``set_password``. + *expire-time* accepts: ``now`` Invalidate password instantly. @@ -1720,13 +1743,13 @@ SRST ERST { - .name = "info", - .args_type = "item:s?", - .params = "[subcommand]", - .help = "show various information about the system state", - .cmd = hmp_info_help, - .sub_table = hmp_info_cmds, - .flags = "p", + .name = "calc_dirty_rate", + .args_type = "dirty_ring:-r,dirty_bitmap:-b,second:l,sample_pages_per_GB:l?", + .params = "[-r] [-b] second [sample_pages_per_GB]", + .help = "start a round of guest dirty rate measurement (using -r to" + "\n\t\t\t specify dirty ring as the method of calculation and" + "\n\t\t\t -b to specify dirty bitmap as method of calculation)", + .cmd = hmp_calc_dirty_rate, }, SRST @@ -1737,9 +1760,58 @@ SRST ERST { - .name = "calc_dirty_rate", - .args_type = "second:l,sample_pages_per_GB:l?", - .params = "second [sample_pages_per_GB]", - .help = "start a round of guest dirty rate measurement", - .cmd = hmp_calc_dirty_rate, + .name = "set_vcpu_dirty_limit", + .args_type = "dirty_rate:l,cpu_index:l?", + .params = "dirty_rate [cpu_index]", + .help = "set dirty page rate limit, use cpu_index to set limit" + "\n\t\t\t\t\t on a specified virtual cpu", + .cmd = hmp_set_vcpu_dirty_limit, }, + +SRST +``set_vcpu_dirty_limit`` + Set dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "cancel_vcpu_dirty_limit", + .args_type = "cpu_index:l?", + .params = "[cpu_index]", + .help = "cancel dirty page rate limit, use cpu_index to cancel" + "\n\t\t\t\t\t limit on a specified virtual cpu", + .cmd = hmp_cancel_vcpu_dirty_limit, + }, + +SRST +``cancel_vcpu_dirty_limit`` + Cancel dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "info", + .args_type = "item:s?", + .params = "[subcommand]", + .help = "show various information about the system state", + .cmd = hmp_info_help, + .sub_table = hmp_info_cmds, + .flags = "p", + }, + +#if defined(CONFIG_FDT) + { + .name = "dumpdtb", + .args_type = "filename:F", + .params = "filename", + .help = "dump the FDT in dtb format to 'filename'", + .cmd = hmp_dumpdtb, + }, + +SRST +``dumpdtb`` *filename* + Dump the FDT in dtb format to *filename*. +ERST +#endif diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c index 210d9e7705..d42ce6d8b8 100644 --- a/hw/9pfs/9p-local.c +++ b/hw/9pfs/9p-local.c @@ -32,10 +32,12 @@ #include "qemu/error-report.h" #include "qemu/option.h" #include +#ifdef CONFIG_LINUX #include #ifdef CONFIG_LINUX_MAGIC_H #include #endif +#endif #include #ifndef XFS_SUPER_MAGIC @@ -560,6 +562,15 @@ again: if (!entry) { return NULL; } +#ifdef CONFIG_DARWIN + int off; + off = telldir(fs->dir.stream); + /* If telldir fails, fail the entire readdir call */ + if (off < 0) { + return NULL; + } + entry->d_seekoff = off; +#endif if (ctx->export_flags & V9FS_SM_MAPPED) { entry->d_type = DT_UNKNOWN; @@ -671,7 +682,7 @@ static int local_mknod(FsContext *fs_ctx, V9fsPath *dir_path, if (fs_ctx->export_flags & V9FS_SM_MAPPED || fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { - err = mknodat(dirfd, name, fs_ctx->fmode | S_IFREG, 0); + err = qemu_mknodat(dirfd, name, fs_ctx->fmode | S_IFREG, 0); if (err == -1) { goto out; } @@ -686,7 +697,7 @@ static int local_mknod(FsContext *fs_ctx, V9fsPath *dir_path, } } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || fs_ctx->export_flags & V9FS_SM_NONE) { - err = mknodat(dirfd, name, credp->fc_mode, credp->fc_rdev); + err = qemu_mknodat(dirfd, name, credp->fc_mode, credp->fc_rdev); if (err == -1) { goto out; } @@ -779,16 +790,20 @@ static int local_fstat(FsContext *fs_ctx, int fid_type, mode_t tmp_mode; dev_t tmp_dev; - if (fgetxattr(fd, "user.virtfs.uid", &tmp_uid, sizeof(uid_t)) > 0) { + if (qemu_fgetxattr(fd, "user.virtfs.uid", + &tmp_uid, sizeof(uid_t)) > 0) { stbuf->st_uid = le32_to_cpu(tmp_uid); } - if (fgetxattr(fd, "user.virtfs.gid", &tmp_gid, sizeof(gid_t)) > 0) { + if (qemu_fgetxattr(fd, "user.virtfs.gid", + &tmp_gid, sizeof(gid_t)) > 0) { stbuf->st_gid = le32_to_cpu(tmp_gid); } - if (fgetxattr(fd, "user.virtfs.mode", &tmp_mode, sizeof(mode_t)) > 0) { + if (qemu_fgetxattr(fd, "user.virtfs.mode", + &tmp_mode, sizeof(mode_t)) > 0) { stbuf->st_mode = le32_to_cpu(tmp_mode); } - if (fgetxattr(fd, "user.virtfs.rdev", &tmp_dev, sizeof(dev_t)) > 0) { + if (qemu_fgetxattr(fd, "user.virtfs.rdev", + &tmp_dev, sizeof(dev_t)) > 0) { stbuf->st_rdev = le64_to_cpu(tmp_dev); } } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { diff --git a/hw/9pfs/9p-posix-acl.c b/hw/9pfs/9p-posix-acl.c index eadae270dd..4b2cb3c66c 100644 --- a/hw/9pfs/9p-posix-acl.c +++ b/hw/9pfs/9p-posix-acl.c @@ -65,7 +65,11 @@ static int mp_pacl_removexattr(FsContext *ctx, int ret; ret = local_removexattr_nofollow(ctx, path, MAP_ACL_ACCESS); - if (ret == -1 && errno == ENODATA) { + /* + * macOS returns ENOATTR (!=ENODATA on macOS), whereas Linux returns + * ENODATA (==ENOATTR on Linux), so checking for ENOATTR is fine + */ + if (ret == -1 && errno == ENOATTR) { /* * We don't get ENODATA error when trying to remove a * posix acl that is not present. So don't throw the error @@ -115,7 +119,11 @@ static int mp_dacl_removexattr(FsContext *ctx, int ret; ret = local_removexattr_nofollow(ctx, path, MAP_ACL_DEFAULT); - if (ret == -1 && errno == ENODATA) { + /* + * macOS returns ENOATTR (!=ENODATA on macOS), whereas Linux returns + * ENODATA (==ENOATTR on Linux), so checking for ENOATTR is fine + */ + if (ret == -1 && errno == ENOATTR) { /* * We don't get ENODATA error when trying to remove a * posix acl that is not present. So don't throw the error diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c index 09bd9f1464..99d115ff0d 100644 --- a/hw/9pfs/9p-proxy.c +++ b/hw/9pfs/9p-proxy.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" #include #include -#include "qemu-common.h" #include "9p.h" #include "qapi/error.h" #include "qemu/cutils.h" @@ -123,10 +122,16 @@ static void prstatfs_to_statfs(struct statfs *stfs, ProxyStatFS *prstfs) stfs->f_bavail = prstfs->f_bavail; stfs->f_files = prstfs->f_files; stfs->f_ffree = prstfs->f_ffree; +#ifdef CONFIG_DARWIN + /* f_namelen and f_frsize do not exist on Darwin */ + stfs->f_fsid.val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU; + stfs->f_fsid.val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU; +#else stfs->f_fsid.__val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU; stfs->f_fsid.__val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU; stfs->f_namelen = prstfs->f_namelen; stfs->f_frsize = prstfs->f_frsize; +#endif } /* Converts proxy_stat structure to VFS stat structure */ @@ -143,12 +148,24 @@ static void prstat_to_stat(struct stat *stbuf, ProxyStat *prstat) stbuf->st_size = prstat->st_size; stbuf->st_blksize = prstat->st_blksize; stbuf->st_blocks = prstat->st_blocks; - stbuf->st_atim.tv_sec = prstat->st_atim_sec; - stbuf->st_atim.tv_nsec = prstat->st_atim_nsec; + stbuf->st_atime = prstat->st_atim_sec; stbuf->st_mtime = prstat->st_mtim_sec; - stbuf->st_mtim.tv_nsec = prstat->st_mtim_nsec; stbuf->st_ctime = prstat->st_ctim_sec; +#ifdef CONFIG_DARWIN + stbuf->st_atimespec.tv_sec = prstat->st_atim_sec; + stbuf->st_mtimespec.tv_sec = prstat->st_mtim_sec; + stbuf->st_ctimespec.tv_sec = prstat->st_ctim_sec; + stbuf->st_atimespec.tv_nsec = prstat->st_atim_nsec; + stbuf->st_mtimespec.tv_nsec = prstat->st_mtim_nsec; + stbuf->st_ctimespec.tv_nsec = prstat->st_ctim_nsec; +#else + stbuf->st_atim.tv_sec = prstat->st_atim_sec; + stbuf->st_mtim.tv_sec = prstat->st_mtim_sec; + stbuf->st_ctim.tv_sec = prstat->st_ctim_sec; + stbuf->st_atim.tv_nsec = prstat->st_atim_nsec; + stbuf->st_mtim.tv_nsec = prstat->st_mtim_nsec; stbuf->st_ctim.tv_nsec = prstat->st_ctim_nsec; +#endif } /* @@ -688,7 +705,21 @@ static off_t proxy_telldir(FsContext *ctx, V9fsFidOpenState *fs) static struct dirent *proxy_readdir(FsContext *ctx, V9fsFidOpenState *fs) { - return readdir(fs->dir.stream); + struct dirent *entry; + entry = readdir(fs->dir.stream); +#ifdef CONFIG_DARWIN + if (!entry) { + return NULL; + } + int td; + td = telldir(fs->dir.stream); + /* If telldir fails, fail the entire readdir call */ + if (td < 0) { + return NULL; + } + entry->d_seekoff = td; +#endif + return entry; } static void proxy_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) @@ -1155,7 +1186,7 @@ static int proxy_parse_opts(QemuOpts *opts, FsDriverEntry *fs, Error **errp) static int proxy_init(FsContext *ctx, Error **errp) { - V9fsProxy *proxy = g_malloc(sizeof(V9fsProxy)); + V9fsProxy *proxy = g_new(V9fsProxy, 1); int sock_id; if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c index b38088e066..1c5813e4dd 100644 --- a/hw/9pfs/9p-synth.c +++ b/hw/9pfs/9p-synth.c @@ -49,7 +49,7 @@ static V9fsSynthNode *v9fs_add_dir_node(V9fsSynthNode *parent, int mode, /* Add directory type and remove write bits */ mode = ((mode & 0777) | S_IFDIR) & ~(S_IWUSR | S_IWGRP | S_IWOTH); - node = g_malloc0(sizeof(V9fsSynthNode)); + node = g_new0(V9fsSynthNode, 1); if (attr) { /* We are adding .. or . entries */ node->attr = attr; @@ -92,7 +92,7 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, } } /* Add the name */ - node = v9fs_add_dir_node(parent, mode, name, NULL, synth_node_count++); + node = v9fs_add_dir_node(parent, mode, name, NULL, ++synth_node_count); v9fs_add_dir_node(node, parent->attr->mode, "..", parent->attr, parent->attr->inode); v9fs_add_dir_node(node, node->attr->mode, ".", @@ -128,9 +128,9 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, } /* Add file type and remove write bits */ mode = ((mode & 0777) | S_IFREG); - node = g_malloc0(sizeof(V9fsSynthNode)); + node = g_new0(V9fsSynthNode, 1); node->attr = &node->actual_attr; - node->attr->inode = synth_node_count++; + node->attr->inode = ++synth_node_count; node->attr->nlink = 1; node->attr->read = read; node->attr->write = write; @@ -182,7 +182,12 @@ static int synth_opendir(FsContext *ctx, V9fsSynthOpenState *synth_open; V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; - synth_open = g_malloc(sizeof(*synth_open)); + /* + * V9fsSynthOpenState contains 'struct dirent' which have OS-specific + * properties, thus it's zero cleared on allocation here and below + * in synth_open. + */ + synth_open = g_new0(V9fsSynthOpenState, 1); synth_open->node = node; node->open_count++; fs->private = synth_open; @@ -220,9 +225,20 @@ static void synth_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) static void synth_direntry(V9fsSynthNode *node, struct dirent *entry, off_t off) { - strcpy(entry->d_name, node->name); + size_t sz = strlen(node->name) + 1; + /* + * 'entry' is always inside of V9fsSynthOpenState which have NAME_MAX + * back padding. Ensure we do not overflow it. + */ + g_assert(sizeof(struct dirent) + NAME_MAX >= + offsetof(struct dirent, d_name) + sz); + memcpy(entry->d_name, node->name, sz); entry->d_ino = node->attr->inode; +#ifdef CONFIG_DARWIN + entry->d_seekoff = off + 1; +#else entry->d_off = off + 1; +#endif } static struct dirent *synth_get_dentry(V9fsSynthNode *dir, @@ -266,7 +282,7 @@ static int synth_open(FsContext *ctx, V9fsPath *fs_path, V9fsSynthOpenState *synth_open; V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; - synth_open = g_malloc(sizeof(*synth_open)); + synth_open = g_new0(V9fsSynthOpenState, 1); synth_open->node = node; node->open_count++; fs->private = synth_open; @@ -427,7 +443,9 @@ static int synth_statfs(FsContext *s, V9fsPath *fs_path, stbuf->f_bsize = 512; stbuf->f_blocks = 0; stbuf->f_files = synth_node_count; +#ifndef CONFIG_DARWIN stbuf->f_namelen = NAME_MAX; +#endif return 0; } diff --git a/hw/9pfs/9p-synth.h b/hw/9pfs/9p-synth.h index 036d7e4a5b..eeb246f377 100644 --- a/hw/9pfs/9p-synth.h +++ b/hw/9pfs/9p-synth.h @@ -41,6 +41,11 @@ typedef struct V9fsSynthOpenState { off_t offset; V9fsSynthNode *node; struct dirent dent; + /* + * Ensure there is enough space for 'dent' above, some systems have a + * d_name size of just 1, which would cause a buffer overrun. + */ + char dent_trailing_space[NAME_MAX]; } V9fsSynthOpenState; int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, diff --git a/hw/9pfs/9p-util-darwin.c b/hw/9pfs/9p-util-darwin.c new file mode 100644 index 0000000000..95146e7354 --- /dev/null +++ b/hw/9pfs/9p-util-darwin.c @@ -0,0 +1,147 @@ +/* + * 9p utilities (Darwin Implementation) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/xattr.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "9p-util.h" + +ssize_t fgetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size) +{ + int ret; + int fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + ret = fgetxattr(fd, name, value, size, 0, 0); + close_preserve_errno(fd); + return ret; +} + +ssize_t flistxattrat_nofollow(int dirfd, const char *filename, + char *list, size_t size) +{ + int ret; + int fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + ret = flistxattr(fd, list, size, 0); + close_preserve_errno(fd); + return ret; +} + +ssize_t fremovexattrat_nofollow(int dirfd, const char *filename, + const char *name) +{ + int ret; + int fd = openat_file(dirfd, filename, O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + ret = fremovexattr(fd, name, 0); + close_preserve_errno(fd); + return ret; +} + +int fsetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size, int flags) +{ + int ret; + int fd = openat_file(dirfd, filename, O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + ret = fsetxattr(fd, name, value, size, 0, flags); + close_preserve_errno(fd); + return ret; +} + +/* + * As long as mknodat is not available on macOS, this workaround + * using pthread_fchdir_np is needed. + * + * Radar filed with Apple for implementing mknodat: + * rdar://FB9862426 (https://openradar.appspot.com/FB9862426) + */ +#if defined CONFIG_PTHREAD_FCHDIR_NP + +static int create_socket_file_at_cwd(const char *filename, mode_t mode) { + int fd, err; + struct sockaddr_un addr = { + .sun_family = AF_UNIX + }; + + err = snprintf(addr.sun_path, sizeof(addr.sun_path), "./%s", filename); + if (err < 0 || err >= sizeof(addr.sun_path)) { + errno = ENAMETOOLONG; + return -1; + } + fd = socket(PF_UNIX, SOCK_DGRAM, 0); + if (fd == -1) { + return fd; + } + err = bind(fd, (struct sockaddr *) &addr, sizeof(addr)); + if (err == -1) { + goto out; + } + /* + * FIXME: Should rather be using descriptor-based fchmod() on the + * socket file descriptor above (preferably before bind() call), + * instead of path-based fchmodat(), to prevent concurrent transient + * state issues between creating the named FIFO file at bind() and + * delayed adjustment of permissions at fchmodat(). However currently + * macOS (12.x) does not support such operations on socket file + * descriptors yet. + * + * Filed report with Apple: FB9997731 + */ + err = fchmodat(AT_FDCWD, filename, mode, AT_SYMLINK_NOFOLLOW); +out: + close_preserve_errno(fd); + return err; +} + +int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev) +{ + int preserved_errno, err; + + if (S_ISREG(mode) || !(mode & S_IFMT)) { + int fd = openat_file(dirfd, filename, O_CREAT, mode); + if (fd == -1) { + return fd; + } + close(fd); + return 0; + } + if (!pthread_fchdir_np) { + error_report_once("pthread_fchdir_np() not available on this version of macOS"); + errno = ENOTSUP; + return -1; + } + if (pthread_fchdir_np(dirfd) < 0) { + return -1; + } + if (S_ISSOCK(mode)) { + err = create_socket_file_at_cwd(filename, mode); + } else { + err = mknod(filename, mode, dev); + } + preserved_errno = errno; + /* Stop using the thread-local cwd */ + pthread_fchdir_np(-1); + if (err < 0) { + errno = preserved_errno; + } + return err; +} + +#endif diff --git a/hw/9pfs/9p-util.c b/hw/9pfs/9p-util-linux.c similarity index 90% rename from hw/9pfs/9p-util.c rename to hw/9pfs/9p-util-linux.c index 3221d9b498..db451b0784 100644 --- a/hw/9pfs/9p-util.c +++ b/hw/9pfs/9p-util-linux.c @@ -1,5 +1,5 @@ /* - * 9p utilities + * 9p utilities (Linux Implementation) * * Copyright IBM, Corp. 2017 * @@ -61,4 +61,10 @@ int fsetxattrat_nofollow(int dirfd, const char *filename, const char *name, ret = lsetxattr(proc_path, name, value, size, flags); g_free(proc_path); return ret; + +} + +int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev) +{ + return mknodat(dirfd, filename, mode, dev); } diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index 546f46dc7d..6b44e5f7a4 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -13,12 +13,100 @@ #ifndef QEMU_9P_UTIL_H #define QEMU_9P_UTIL_H +#include "qemu/error-report.h" + #ifdef O_PATH #define O_PATH_9P_UTIL O_PATH #else #define O_PATH_9P_UTIL 0 #endif +#if !defined(CONFIG_LINUX) + +/* + * Generates a Linux device number (a.k.a. dev_t) for given device major + * and minor numbers. + * + * To be more precise: it generates a device number in glibc's format + * (MMMM_Mmmm_mmmM_MMmm, 64 bits) actually, which is compatible with + * Linux's format (mmmM_MMmm, 32 bits), as described in . + */ +static inline uint64_t makedev_dotl(uint32_t dev_major, uint32_t dev_minor) +{ + uint64_t dev; + + // from glibc sysmacros.h: + dev = (((uint64_t) (dev_major & 0x00000fffu)) << 8); + dev |= (((uint64_t) (dev_major & 0xfffff000u)) << 32); + dev |= (((uint64_t) (dev_minor & 0x000000ffu)) << 0); + dev |= (((uint64_t) (dev_minor & 0xffffff00u)) << 12); + return dev; +} + +#endif + +/* + * Converts given device number from host's device number format to Linux + * device number format. As both the size of type dev_t and encoding of + * dev_t is system dependant, we have to convert them for Linux guests if + * host is not running Linux. + */ +static inline uint64_t host_dev_to_dotl_dev(dev_t dev) +{ +#ifdef CONFIG_LINUX + return dev; +#else + return makedev_dotl(major(dev), minor(dev)); +#endif +} + +/* Translates errno from host -> Linux if needed */ +static inline int errno_to_dotl(int err) { +#if defined(CONFIG_LINUX) + /* nothing to translate (Linux -> Linux) */ +#elif defined(CONFIG_DARWIN) + /* + * translation mandatory for macOS hosts + * + * FIXME: Only most important errnos translated here yet, this should be + * extended to as many errnos being translated as possible in future. + */ + if (err == ENAMETOOLONG) { + err = 36; /* ==ENAMETOOLONG on Linux */ + } else if (err == ENOTEMPTY) { + err = 39; /* ==ENOTEMPTY on Linux */ + } else if (err == ELOOP) { + err = 40; /* ==ELOOP on Linux */ + } else if (err == ENOATTR) { + err = 61; /* ==ENODATA on Linux */ + } else if (err == ENOTSUP) { + err = 95; /* ==EOPNOTSUPP on Linux */ + } else if (err == EOPNOTSUPP) { + err = 95; /* ==EOPNOTSUPP on Linux */ + } +#else +#error Missing errno translation to Linux for this host system +#endif + return err; +} + +#ifdef CONFIG_DARWIN +#define qemu_fgetxattr(...) fgetxattr(__VA_ARGS__, 0, 0) +#define qemu_lgetxattr(...) getxattr(__VA_ARGS__, 0, XATTR_NOFOLLOW) +#define qemu_llistxattr(...) listxattr(__VA_ARGS__, XATTR_NOFOLLOW) +#define qemu_lremovexattr(...) removexattr(__VA_ARGS__, XATTR_NOFOLLOW) +static inline int qemu_lsetxattr(const char *path, const char *name, + const void *value, size_t size, int flags) { + return setxattr(path, name, value, size, 0, flags | XATTR_NOFOLLOW); +} +#else +#define qemu_fgetxattr fgetxattr +#define qemu_lgetxattr lgetxattr +#define qemu_llistxattr llistxattr +#define qemu_lremovexattr lremovexattr +#define qemu_lsetxattr lsetxattr +#endif + static inline void close_preserve_errno(int fd) { int serrno = errno; @@ -26,6 +114,38 @@ static inline void close_preserve_errno(int fd) errno = serrno; } +/** + * close_if_special_file() - Close @fd if neither regular file nor directory. + * + * @fd: file descriptor of open file + * Return: 0 on regular file or directory, -1 otherwise + * + * CVE-2023-2861: Prohibit opening any special file directly on host + * (especially device files), as a compromised client could potentially gain + * access outside exported tree under certain, unsafe setups. We expect + * client to handle I/O on special files exclusively on guest side. + */ +static inline int close_if_special_file(int fd) +{ + struct stat stbuf; + + if (fstat(fd, &stbuf) < 0) { + close_preserve_errno(fd); + return -1; + } + if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) { + error_report_once( + "9p: broken or compromised client detected; attempt to open " + "special file (i.e. neither regular file, nor directory)" + ); + close(fd); + errno = ENXIO; + return -1; + } + + return 0; +} + static inline int openat_dir(int dirfd, const char *name) { return openat(dirfd, name, @@ -37,10 +157,13 @@ static inline int openat_file(int dirfd, const char *name, int flags, { int fd, serrno, ret; +#ifndef CONFIG_DARWIN again: +#endif fd = openat(dirfd, name, flags | O_NOFOLLOW | O_NOCTTY | O_NONBLOCK, mode); if (fd == -1) { +#ifndef CONFIG_DARWIN if (errno == EPERM && (flags & O_NOATIME)) { /* * The client passed O_NOATIME but we lack permissions to honor it. @@ -53,6 +176,11 @@ again: flags &= ~O_NOATIME; goto again; } +#endif + return -1; + } + + if (close_if_special_file(fd) < 0) { return -1; } @@ -78,4 +206,61 @@ ssize_t flistxattrat_nofollow(int dirfd, const char *filename, ssize_t fremovexattrat_nofollow(int dirfd, const char *filename, const char *name); +/* + * Darwin has d_seekoff, which appears to function similarly to d_off. + * However, it does not appear to be supported on all file systems, + * so ensure it is manually injected earlier and call here when + * needed. + */ +static inline off_t qemu_dirent_off(struct dirent *dent) +{ +#ifdef CONFIG_DARWIN + return dent->d_seekoff; +#else + return dent->d_off; +#endif +} + +/** + * qemu_dirent_dup() - Duplicate directory entry @dent. + * + * @dent: original directory entry to be duplicated + * Return: duplicated directory entry which should be freed with g_free() + * + * It is highly recommended to use this function instead of open coding + * duplication of dirent objects, because the actual struct dirent + * size may be bigger or shorter than sizeof(struct dirent) and correct + * handling is platform specific (see gitlab issue #841). + */ +static inline struct dirent *qemu_dirent_dup(struct dirent *dent) +{ + size_t sz = 0; +#if defined _DIRENT_HAVE_D_RECLEN + /* Avoid use of strlen() if platform supports d_reclen. */ + sz = dent->d_reclen; +#endif + /* + * Test sz for zero even if d_reclen is available + * because some drivers may set d_reclen to zero. + */ + if (sz == 0) { + /* Fallback to the most portable way. */ + sz = offsetof(struct dirent, d_name) + + strlen(dent->d_name) + 1; + } + return g_memdup(dent, sz); +} + +/* + * As long as mknodat is not available on macOS, this workaround + * using pthread_fchdir_np is needed. qemu_mknodat is defined in + * os-posix.c. pthread_fchdir_np is weakly linked here as a guard + * in case it disappears in future macOS versions, because it is + * is a private API. + */ +#if defined CONFIG_DARWIN && defined CONFIG_PTHREAD_FCHDIR_NP +int pthread_fchdir_np(int fd) __attribute__((weak_import)); +#endif +int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev); + #endif diff --git a/hw/9pfs/9p-xattr-user.c b/hw/9pfs/9p-xattr-user.c index f2ae9582e6..535677ed60 100644 --- a/hw/9pfs/9p-xattr-user.c +++ b/hw/9pfs/9p-xattr-user.c @@ -27,7 +27,7 @@ static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = ENOATTR; @@ -49,7 +49,7 @@ static ssize_t mp_user_listxattr(FsContext *ctx, const char *path, name_size -= 12; } else { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ return 0; @@ -74,7 +74,7 @@ static int mp_user_setxattr(FsContext *ctx, const char *path, const char *name, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = EACCES; @@ -88,7 +88,7 @@ static int mp_user_removexattr(FsContext *ctx, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = EACCES; diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 2815257f42..072cf67956 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -17,6 +17,11 @@ */ #include "qemu/osdep.h" +#ifdef CONFIG_LINUX +#include +#else +#include +#endif #include #include "hw/virtio/virtio.h" #include "qapi/error.h" @@ -27,12 +32,12 @@ #include "virtio-9p.h" #include "fsdev/qemu-fsdev.h" #include "9p-xattr.h" +#include "9p-util.h" #include "coth.h" #include "trace.h" #include "migration/blocker.h" #include "qemu/xxhash.h" #include -#include int open_fd_hw; int total_open_fd; @@ -50,6 +55,8 @@ enum { Oappend = 0x80, }; +P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free); + static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...) { ssize_t ret; @@ -131,11 +138,20 @@ static int dotl_to_open_flags(int flags) { P9_DOTL_NONBLOCK, O_NONBLOCK } , { P9_DOTL_DSYNC, O_DSYNC }, { P9_DOTL_FASYNC, FASYNC }, +#ifndef CONFIG_DARWIN + { P9_DOTL_NOATIME, O_NOATIME }, + /* + * On Darwin, we could map to F_NOCACHE, which is + * similar, but doesn't quite have the same + * semantics. However, we don't support O_DIRECT + * even on linux at the moment, so we just ignore + * it here. + */ { P9_DOTL_DIRECT, O_DIRECT }, +#endif { P9_DOTL_LARGEFILE, O_LARGEFILE }, { P9_DOTL_DIRECTORY, O_DIRECTORY }, { P9_DOTL_NOFOLLOW, O_NOFOLLOW }, - { P9_DOTL_NOATIME, O_NOATIME }, { P9_DOTL_SYNC, O_SYNC }, }; @@ -164,10 +180,12 @@ static int get_dotl_openflags(V9fsState *s, int oflags) */ flags = dotl_to_open_flags(oflags); flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT); +#ifndef CONFIG_DARWIN /* * Ignore direct disk access hint until the server supports it. */ flags &= ~O_DIRECT; +#endif return flags; } @@ -185,7 +203,7 @@ void v9fs_path_free(V9fsPath *path) } -void GCC_FMT_ATTR(2, 3) +void G_GNUC_PRINTF(2, 3) v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...) { va_list ap; @@ -238,7 +256,8 @@ static size_t v9fs_string_size(V9fsString *str) } /* - * returns 0 if fid got re-opened, 1 if not, < 0 on error */ + * returns 0 if fid got re-opened, 1 if not, < 0 on error + */ static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f) { int err = 1; @@ -264,33 +283,32 @@ static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid) V9fsFidState *f; V9fsState *s = pdu->s; - QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); + if (f) { BUG_ON(f->clunked); - if (f->fid == fid) { - /* - * Update the fid ref upfront so that - * we don't get reclaimed when we yield - * in open later. - */ - f->ref++; - /* - * check whether we need to reopen the - * file. We might have closed the fd - * while trying to free up some file - * descriptors. - */ - err = v9fs_reopen_fid(pdu, f); - if (err < 0) { - f->ref--; - return NULL; - } - /* - * Mark the fid as referenced so that the LRU - * reclaim won't close the file descriptor - */ - f->flags |= FID_REFERENCED; - return f; + /* + * Update the fid ref upfront so that + * we don't get reclaimed when we yield + * in open later. + */ + f->ref++; + /* + * check whether we need to reopen the + * file. We might have closed the fd + * while trying to free up some file + * descriptors. + */ + err = v9fs_reopen_fid(pdu, f); + if (err < 0) { + f->ref--; + return NULL; } + /* + * Mark the fid as referenced so that the LRU + * reclaim won't close the file descriptor + */ + f->flags |= FID_REFERENCED; + return f; } return NULL; } @@ -299,14 +317,13 @@ static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid) { V9fsFidState *f; - QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); + if (f) { /* If fid is already there return NULL */ BUG_ON(f->clunked); - if (f->fid == fid) { - return NULL; - } + return NULL; } - f = g_malloc0(sizeof(V9fsFidState)); + f = g_new0(V9fsFidState, 1); f->fid = fid; f->fid_type = P9_FID_NONE; f->ref = 1; @@ -315,7 +332,7 @@ static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid) * reclaim won't close the file descriptor */ f->flags |= FID_REFERENCED; - QSIMPLEQ_INSERT_TAIL(&s->fid_list, f, next); + g_hash_table_insert(s->fids, GINT_TO_POINTER(fid), f); v9fs_readdir_init(s->proto_version, &f->fs.dir); v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir); @@ -406,12 +423,12 @@ static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid) { V9fsFidState *fidp; - QSIMPLEQ_FOREACH(fidp, &s->fid_list, next) { - if (fidp->fid == fid) { - QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next); - fidp->clunked = true; - return fidp; - } + /* TODO: Use g_hash_table_steal_extended() instead? */ + fidp = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); + if (fidp) { + g_hash_table_remove(s->fids, GINT_TO_POINTER(fid)); + fidp->clunked = true; + return fidp; } return NULL; } @@ -421,10 +438,15 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu) int reclaim_count = 0; V9fsState *s = pdu->s; V9fsFidState *f; + GHashTableIter iter; + gpointer fid; + + g_hash_table_iter_init(&iter, s->fids); + QSLIST_HEAD(, V9fsFidState) reclaim_list = QSLIST_HEAD_INITIALIZER(reclaim_list); - QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) { /* * Unlink fids cannot be reclaimed. Check * for them and skip them. Also skip fids @@ -496,72 +518,85 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu) } } +/* + * This is used when a path is removed from the directory tree. Any + * fids that still reference it must not be closed from then on, since + * they cannot be reopened. + */ static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path) { - int err; + int err = 0; V9fsState *s = pdu->s; - V9fsFidState *fidp, *fidp_next; + V9fsFidState *fidp; + gpointer fid; + GHashTableIter iter; + /* + * The most common case is probably that we have exactly one + * fid for the given path, so preallocate exactly one. + */ + g_autoptr(GArray) to_reopen = g_array_sized_new(FALSE, FALSE, + sizeof(V9fsFidState *), 1); + gint i; - fidp = QSIMPLEQ_FIRST(&s->fid_list); - if (!fidp) { - return 0; - } + g_hash_table_iter_init(&iter, s->fids); /* - * v9fs_reopen_fid() can yield : a reference on the fid must be held - * to ensure its pointer remains valid and we can safely pass it to - * QSIMPLEQ_NEXT(). The corresponding put_fid() can also yield so - * we must keep a reference on the next fid as well. So the logic here - * is to get a reference on a fid and only put it back during the next - * iteration after we could get a reference on the next fid. Start with - * the first one. + * We iterate over the fid table looking for the entries we need + * to reopen, and store them in to_reopen. This is because + * v9fs_reopen_fid() and put_fid() yield. This allows the fid table + * to be modified in the meantime, invalidating our iterator. */ - for (fidp->ref++; fidp; fidp = fidp_next) { + while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &fidp)) { if (fidp->path.size == path->size && !memcmp(fidp->path.data, path->data, path->size)) { - /* Mark the fid non reclaimable. */ - fidp->flags |= FID_NON_RECLAIMABLE; - - /* reopen the file/dir if already closed */ - err = v9fs_reopen_fid(pdu, fidp); - if (err < 0) { - put_fid(pdu, fidp); - return err; - } - } - - fidp_next = QSIMPLEQ_NEXT(fidp, next); - - if (fidp_next) { /* - * Ensure the next fid survives a potential clunk request during - * put_fid() below and v9fs_reopen_fid() in the next iteration. + * Ensure the fid survives a potential clunk request during + * v9fs_reopen_fid or put_fid. */ - fidp_next->ref++; + fidp->ref++; + fidp->flags |= FID_NON_RECLAIMABLE; + g_array_append_val(to_reopen, fidp); } - - /* We're done with this fid */ - put_fid(pdu, fidp); } - return 0; + for (i = 0; i < to_reopen->len; i++) { + fidp = g_array_index(to_reopen, V9fsFidState*, i); + /* reopen the file/dir if already closed */ + err = v9fs_reopen_fid(pdu, fidp); + if (err < 0) { + break; + } + } + + for (i = 0; i < to_reopen->len; i++) { + put_fid(pdu, g_array_index(to_reopen, V9fsFidState*, i)); + } + return err; } static void coroutine_fn virtfs_reset(V9fsPDU *pdu) { V9fsState *s = pdu->s; V9fsFidState *fidp; + GList *freeing; + /* + * Get a list of all the values (fid states) in the table, which + * we then... + */ + g_autoptr(GList) fids = g_hash_table_get_values(s->fids); - /* Free all fids */ - while (!QSIMPLEQ_EMPTY(&s->fid_list)) { - /* Get fid */ - fidp = QSIMPLEQ_FIRST(&s->fid_list); + /* ... remove from the table, taking over ownership. */ + g_hash_table_steal_all(s->fids); + + /* + * This allows us to release our references to them asynchronously without + * iterating over the hash table and risking iterator invalidation + * through concurrent modifications. + */ + for (freeing = fids; freeing; freeing = freeing->next) { + fidp = freeing->data; fidp->ref++; - - /* Clunk fid */ - QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next); fidp->clunked = true; - put_fid(pdu, fidp); } } @@ -610,8 +645,8 @@ static inline uint64_t mirror64bit(uint64_t value) ((uint64_t)mirror8bit((value >> 56) & 0xff)); } -/** - * @brief Parameter k for the Exponential Golomb algorihm to be used. +/* + * Parameter k for the Exponential Golomb algorihm to be used. * * The smaller this value, the smaller the minimum bit count for the Exp. * Golomb generated affixes will be (at lowest index) however for the @@ -624,28 +659,30 @@ static inline uint64_t mirror64bit(uint64_t value) * should be small, for a large amount of devices k might be increased * instead. The default of k=0 should be fine for most users though. * - * @b IMPORTANT: In case this ever becomes a runtime parameter; the value of + * IMPORTANT: In case this ever becomes a runtime parameter; the value of * k should not change as long as guest is still running! Because that would * cause completely different inode numbers to be generated on guest. */ #define EXP_GOLOMB_K 0 /** - * @brief Exponential Golomb algorithm for arbitrary k (including k=0). + * expGolombEncode() - Exponential Golomb algorithm for arbitrary k + * (including k=0). * - * The Exponential Golomb algorithm generates @b prefixes (@b not suffixes!) + * @n: natural number (or index) of the prefix to be generated + * (1, 2, 3, ...) + * @k: parameter k of Exp. Golomb algorithm to be used + * (see comment on EXP_GOLOMB_K macro for details about k) + * Return: prefix for given @n and @k + * + * The Exponential Golomb algorithm generates prefixes (NOT suffixes!) * with growing length and with the mathematical property of being * "prefix-free". The latter means the generated prefixes can be prepended * in front of arbitrary numbers and the resulting concatenated numbers are * guaranteed to be always unique. * * This is a minor adjustment to the original Exp. Golomb algorithm in the - * sense that lowest allowed index (@param n) starts with 1, not with zero. - * - * @param n - natural number (or index) of the prefix to be generated - * (1, 2, 3, ...) - * @param k - parameter k of Exp. Golomb algorithm to be used - * (see comment on EXP_GOLOMB_K macro for details about k) + * sense that lowest allowed index (@n) starts with 1, not with zero. */ static VariLenAffix expGolombEncode(uint64_t n, int k) { @@ -659,7 +696,9 @@ static VariLenAffix expGolombEncode(uint64_t n, int k) } /** - * @brief Converts a suffix into a prefix, or a prefix into a suffix. + * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix. + * @affix: either suffix or prefix to be inverted + * Return: inversion of passed @affix * * Simply mirror all bits of the affix value, for the purpose to preserve * respectively the mathematical "prefix-free" or "suffix-free" property @@ -683,16 +722,16 @@ static VariLenAffix invertAffix(const VariLenAffix *affix) } /** - * @brief Generates suffix numbers with "suffix-free" property. + * affixForIndex() - Generates suffix numbers with "suffix-free" property. + * @index: natural number (or index) of the suffix to be generated + * (1, 2, 3, ...) + * Return: Suffix suitable to assemble unique number. * * This is just a wrapper function on top of the Exp. Golomb algorithm. * * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes, * this function converts the Exp. Golomb prefixes into appropriate suffixes * which are still suitable for generating unique numbers. - * - * @param n - natural number (or index) of the suffix to be generated - * (1, 2, 3, ...) */ static VariLenAffix affixForIndex(uint64_t index) { @@ -782,7 +821,7 @@ static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev) val = qht_lookup(&pdu->s->qpd_table, &lookup, hash); if (!val) { - val = g_malloc0(sizeof(QpdEntry)); + val = g_new0(QpdEntry, 1); *val = lookup; affix = affixForIndex(pdu->s->qp_affix_next); val->prefix_bits = affix.bits; @@ -792,8 +831,8 @@ static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev) return val->prefix_bits; } -/** - * @brief Slow / full mapping host inode nr -> guest inode nr. +/* + * Slow / full mapping host inode nr -> guest inode nr. * * This function performs a slower and much more costly remapping of an * original file inode number on host to an appropriate different inode @@ -805,7 +844,7 @@ static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev) * qid_path_suffixmap() failed. In practice this slow / full mapping is not * expected ever to be used at all though. * - * @see qid_path_suffixmap() for details + * See qid_path_suffixmap() for details * */ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf, @@ -830,7 +869,7 @@ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf, return -ENFILE; } - val = g_malloc0(sizeof(QppEntry)); + val = g_new0(QpfEntry, 1); *val = lookup; /* new unique inode and device combo */ @@ -846,8 +885,8 @@ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf, return 0; } -/** - * @brief Quick mapping host inode nr -> guest inode nr. +/* + * Quick mapping host inode nr -> guest inode nr. * * This function performs quick remapping of an original file inode number * on host to an appropriate different inode number on guest. This remapping @@ -906,7 +945,7 @@ static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf, return -ENFILE; } - val = g_malloc0(sizeof(QppEntry)); + val = g_new0(QppEntry, 1); *val = lookup; /* new unique inode affix and device combo */ @@ -1032,6 +1071,8 @@ static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len) } len += ret; id = P9_RERROR; + } else { + err = errno_to_dotl(err); } ret = pdu_marshal(pdu, len, "d", err); @@ -1262,6 +1303,40 @@ static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path, #define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ +/** + * blksize_to_iounit() - Block size exposed to 9p client. + * Return: block size + * + * @pdu: 9p client request + * @blksize: host filesystem's block size + * + * Convert host filesystem's block size into an appropriate block size for + * 9p client (guest OS side). The value returned suggests an "optimum" block + * size for 9p I/O, i.e. to maximize performance. + */ +static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize) +{ + int32_t iounit = 0; + V9fsState *s = pdu->s; + + /* + * iounit should be multiples of blksize (host filesystem block size) + * as well as less than (client msize - P9_IOHDRSZ) + */ + if (blksize) { + iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize); + } + if (!iounit) { + iounit = s->msize - P9_IOHDRSZ; + } + return iounit; +} + +static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf) +{ + return blksize_to_iounit(pdu, stbuf->st_blksize); +} + static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf, V9fsStatDotl *v9lstat) { @@ -1271,16 +1346,22 @@ static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf, v9lstat->st_nlink = stbuf->st_nlink; v9lstat->st_uid = stbuf->st_uid; v9lstat->st_gid = stbuf->st_gid; - v9lstat->st_rdev = stbuf->st_rdev; + v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev); v9lstat->st_size = stbuf->st_size; - v9lstat->st_blksize = stbuf->st_blksize; + v9lstat->st_blksize = stat_to_iounit(pdu, stbuf); v9lstat->st_blocks = stbuf->st_blocks; v9lstat->st_atime_sec = stbuf->st_atime; - v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; v9lstat->st_mtime_sec = stbuf->st_mtime; - v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec; v9lstat->st_ctime_sec = stbuf->st_ctime; +#ifdef CONFIG_DARWIN + v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec; + v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec; + v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec; +#else + v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; + v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec; v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec; +#endif /* Currently we only support BASIC fields in stat */ v9lstat->st_result_mask = P9_STATS_BASIC; @@ -1702,15 +1783,17 @@ static bool same_stat_id(const struct stat *a, const struct stat *b) static void coroutine_fn v9fs_walk(void *opaque) { - int name_idx; - V9fsQID *qids = NULL; - int i, err = 0; - V9fsPath dpath, path, *pathes = NULL; + int name_idx, nwalked; + g_autofree V9fsQID *qids = NULL; + int i, err = 0, any_err = 0; + V9fsPath dpath, path; + P9ARRAY_REF(V9fsPath) pathes = NULL; uint16_t nwnames; - struct stat stbuf, fidst, *stbufs = NULL; + struct stat stbuf, fidst; + g_autofree struct stat *stbufs = NULL; size_t offset = 7; int32_t fid, newfid; - V9fsString *wnames = NULL; + P9ARRAY_REF(V9fsString) wnames = NULL; V9fsFidState *fidp; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; @@ -1720,7 +1803,7 @@ static void coroutine_fn v9fs_walk(void *opaque) err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { pdu_complete(pdu, err); - return ; + return; } offset += err; @@ -1731,10 +1814,10 @@ static void coroutine_fn v9fs_walk(void *opaque) goto out_nofid; } if (nwnames) { - wnames = g_new0(V9fsString, nwnames); + P9ARRAY_NEW(V9fsString, wnames, nwnames); qids = g_new0(V9fsQID, nwnames); stbufs = g_new0(struct stat, nwnames); - pathes = g_new0(V9fsPath, nwnames); + P9ARRAY_NEW(V9fsPath, pathes, nwnames); for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { @@ -1768,54 +1851,61 @@ static void coroutine_fn v9fs_walk(void *opaque) * driver code altogether inside the following block. */ v9fs_co_run_in_worker({ + nwalked = 0; if (v9fs_request_cancelled(pdu)) { - err = -EINTR; + any_err |= err = -EINTR; break; } err = s->ops->lstat(&s->ctx, &dpath, &fidst); if (err < 0) { - err = -errno; + any_err |= err = -errno; break; } stbuf = fidst; - for (name_idx = 0; name_idx < nwnames; name_idx++) { + for (; nwalked < nwnames; nwalked++) { if (v9fs_request_cancelled(pdu)) { - err = -EINTR; + any_err |= err = -EINTR; break; } if (!same_stat_id(&pdu->s->root_st, &stbuf) || - strcmp("..", wnames[name_idx].data)) + strcmp("..", wnames[nwalked].data)) { err = s->ops->name_to_path(&s->ctx, &dpath, - wnames[name_idx].data, &path); + wnames[nwalked].data, + &pathes[nwalked]); if (err < 0) { - err = -errno; + any_err |= err = -errno; break; } if (v9fs_request_cancelled(pdu)) { - err = -EINTR; + any_err |= err = -EINTR; break; } - err = s->ops->lstat(&s->ctx, &path, &stbuf); + err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf); if (err < 0) { - err = -errno; + any_err |= err = -errno; break; } - stbufs[name_idx] = stbuf; - v9fs_path_copy(&dpath, &path); - v9fs_path_copy(&pathes[name_idx], &path); + stbufs[nwalked] = stbuf; + v9fs_path_copy(&dpath, &pathes[nwalked]); } } }); /* * Handle all the rest of this Twalk request on main thread ... + * + * NOTE: -EINTR is an exception where we deviate from the protocol spec + * and simply send a (R)Lerror response instead of bothering to assemble + * a (deducted) Rwalk response; because -EINTR is always the result of a + * Tflush request, so client would no longer wait for a response in this + * case anyway. */ - if (err < 0) { + if ((err < 0 && !nwalked) || err == -EINTR) { goto out; } - err = stat_to_qid(pdu, &fidst, &qid); - if (err < 0) { + any_err |= err = stat_to_qid(pdu, &fidst, &qid); + if (err < 0 && !nwalked) { goto out; } stbuf = fidst; @@ -1824,20 +1914,29 @@ static void coroutine_fn v9fs_walk(void *opaque) v9fs_path_copy(&dpath, &fidp->path); v9fs_path_copy(&path, &fidp->path); - for (name_idx = 0; name_idx < nwnames; name_idx++) { + for (name_idx = 0; name_idx < nwalked; name_idx++) { if (!same_stat_id(&pdu->s->root_st, &stbuf) || strcmp("..", wnames[name_idx].data)) { stbuf = stbufs[name_idx]; - err = stat_to_qid(pdu, &stbuf, &qid); + any_err |= err = stat_to_qid(pdu, &stbuf, &qid); if (err < 0) { - goto out; + break; } v9fs_path_copy(&path, &pathes[name_idx]); v9fs_path_copy(&dpath, &path); } memcpy(&qids[name_idx], &qid, sizeof(qid)); } + if (any_err < 0) { + if (!name_idx) { + /* don't send any QIDs, send Rlerror instead */ + goto out; + } else { + /* send QIDs (not Rlerror), but fid MUST remain unaffected */ + goto send_qids; + } + } if (fid == newfid) { if (fidp->fid_type != P9_FID_NONE) { err = -EINVAL; @@ -1855,8 +1954,9 @@ static void coroutine_fn v9fs_walk(void *opaque) newfidp->uid = fidp->uid; v9fs_path_copy(&newfidp->path, &path); } - err = v9fs_walk_marshal(pdu, nwnames, qids); - trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); +send_qids: + err = v9fs_walk_marshal(pdu, name_idx, qids); + trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids); out: put_fid(pdu, fidp); if (newfidp) { @@ -1866,38 +1966,14 @@ out: v9fs_path_free(&path); out_nofid: pdu_complete(pdu, err); - if (nwnames && nwnames <= P9_MAXWELEM) { - for (name_idx = 0; name_idx < nwnames; name_idx++) { - v9fs_string_free(&wnames[name_idx]); - v9fs_path_free(&pathes[name_idx]); - } - g_free(wnames); - g_free(qids); - g_free(stbufs); - g_free(pathes); - } } static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path) { struct statfs stbuf; - int32_t iounit = 0; - V9fsState *s = pdu->s; + int err = v9fs_co_statfs(pdu, path, &stbuf); - /* - * iounit should be multiples of f_bsize (host filesystem block size - * and as well as less than (client msize - P9_IOHDRSZ)) - */ - if (!v9fs_co_statfs(pdu, path, &stbuf)) { - if (stbuf.f_bsize) { - iounit = stbuf.f_bsize; - iounit *= (s->msize - P9_IOHDRSZ) / stbuf.f_bsize; - } - } - if (!iounit) { - iounit = s->msize - P9_IOHDRSZ; - } - return iounit; + return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0); } static void coroutine_fn v9fs_open(void *opaque) @@ -2260,7 +2336,7 @@ static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu, count += len; v9fs_stat_free(&v9stat); v9fs_path_free(&path); - saved_dir_pos = dent->d_off; + saved_dir_pos = qemu_dirent_off(dent); } v9fs_readdir_unlock(&fidp->fs.dir); @@ -2365,10 +2441,11 @@ out_nofid: } /** - * Returns size required in Rreaddir response for the passed dirent @p name. + * v9fs_readdir_response_size() - Returns size required in Rreaddir response + * for the passed dirent @name. * - * @param name - directory entry's name (i.e. file name, directory name) - * @returns required size in bytes + * @name: directory entry's name (i.e. file name, directory name) + * Return: required size in bytes */ size_t v9fs_readdir_response_size(V9fsString *name) { @@ -2399,6 +2476,7 @@ static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString name; int len, err = 0; int32_t count = 0; + off_t off; struct dirent *dent; struct stat *st; struct V9fsDirEnt *entries = NULL; @@ -2459,12 +2537,13 @@ static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, qid.version = 0; } + off = qemu_dirent_off(dent); v9fs_string_init(&name); v9fs_string_sprintf(&name, "%s", dent->d_name); /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */ len = pdu_marshal(pdu, 11 + count, "Qqbs", - &qid, dent->d_off, + &qid, off, dent->d_type, &name); v9fs_string_free(&name); @@ -3143,6 +3222,8 @@ static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp, V9fsFidState *tfidp; V9fsState *s = pdu->s; V9fsFidState *dirfidp = NULL; + GHashTableIter iter; + gpointer fid; v9fs_path_init(&new_path); if (newdirfid != -1) { @@ -3176,11 +3257,13 @@ static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp, if (err < 0) { goto out; } + /* * Fixup fid's pointing to the old name to * start pointing to the new name */ - QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) { + g_hash_table_iter_init(&iter, s->fids); + while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) { if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) { /* replace the name */ v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data)); @@ -3258,6 +3341,8 @@ static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir, V9fsPath oldpath, newpath; V9fsState *s = pdu->s; int err; + GHashTableIter iter; + gpointer fid; v9fs_path_init(&oldpath); v9fs_path_init(&newpath); @@ -3274,7 +3359,8 @@ static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir, * Fixup fid's pointing to the old name to * start pointing to the new name */ - QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) { + g_hash_table_iter_init(&iter, s->fids); + while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) { if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) { /* replace the name */ v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data)); @@ -3504,9 +3590,15 @@ static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf) f_bavail = stbuf->f_bavail / bsize_factor; f_files = stbuf->f_files; f_ffree = stbuf->f_ffree; +#ifdef CONFIG_DARWIN + fsid_val = (unsigned int)stbuf->f_fsid.val[0] | + (unsigned long long)stbuf->f_fsid.val[1] << 32; + f_namelen = NAME_MAX; +#else fsid_val = (unsigned int) stbuf->f_fsid.__val[0] | (unsigned long long)stbuf->f_fsid.__val[1] << 32; f_namelen = stbuf->f_namelen; +#endif return pdu_marshal(pdu, offset, "ddqqqqqqd", f_type, f_bsize, f_blocks, f_bfree, @@ -3876,6 +3968,24 @@ out_nofid: v9fs_string_free(&name); } +#if defined(CONFIG_LINUX) +/* Currently, only Linux has XATTR_SIZE_MAX */ +#define P9_XATTR_SIZE_MAX XATTR_SIZE_MAX +#elif defined(CONFIG_DARWIN) +/* + * Darwin doesn't seem to define a maximum xattr size in its user + * space header, so manually configure it across platforms as 64k. + * + * Having no limit at all can lead to QEMU crashing during large g_malloc() + * calls. Because QEMU does not currently support macOS guests, the below + * preliminary solution only works due to its being a reflection of the limit of + * Linux guests. + */ +#define P9_XATTR_SIZE_MAX 65536 +#else +#error Missing definition for P9_XATTR_SIZE_MAX for this host system +#endif + static void coroutine_fn v9fs_xattrcreate(void *opaque) { int flags, rflags = 0; @@ -3908,7 +4018,7 @@ static void coroutine_fn v9fs_xattrcreate(void *opaque) rflags |= XATTR_REPLACE; } - if (size > XATTR_SIZE_MAX) { + if (size > P9_XATTR_SIZE_MAX) { err = -E2BIG; goto out_nofid; } @@ -4140,7 +4250,7 @@ int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t, s->ctx.fmode = fse->fmode; s->ctx.dmode = fse->dmode; - QSIMPLEQ_INIT(&s->fid_list); + s->fids = g_hash_table_new(NULL, NULL); qemu_co_rwlock_init(&s->rename_lock); if (s->ops->init(&s->ctx, errp) < 0) { @@ -4200,6 +4310,10 @@ void v9fs_device_unrealize_common(V9fsState *s) if (s->ctx.fst) { fsdev_throttle_cleanup(s->ctx.fst); } + if (s->fids) { + g_hash_table_destroy(s->fids); + s->fids = NULL; + } g_free(s->tag); qp_table_destroy(&s->qpd_table); qp_table_destroy(&s->qpp_table); diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index 1567b67841..2fce4140d1 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -100,8 +100,8 @@ typedef enum P9ProtoVersion { V9FS_PROTO_2000L = 0x02, } P9ProtoVersion; -/** - * @brief Minimum message size supported by this 9pfs server. +/* + * Minimum message size supported by this 9pfs server. * * A client establishes a session by sending a Tversion request along with a * 'msize' parameter which suggests the server a maximum message size ever to be @@ -231,7 +231,7 @@ static inline void v9fs_readdir_init(P9ProtoVersion proto_version, V9fsDir *dir) } } -/** +/* * Type for 9p fs drivers' (a.k.a. 9p backends) result of readdir requests, * which is a chained list of directory entries. */ @@ -289,8 +289,8 @@ typedef enum AffixType_t { AffixType_Suffix, /* A.k.a. postfix. */ } AffixType_t; -/** - * @brief Unique affix of variable length. +/* + * Unique affix of variable length. * * An affix is (currently) either a suffix or a prefix, which is either * going to be prepended (prefix) or appended (suffix) with some other @@ -304,7 +304,7 @@ typedef struct VariLenAffix { AffixType_t type; /* Whether this affix is a suffix or a prefix. */ uint64_t value; /* Actual numerical value of this affix. */ /* - * Lenght of the affix, that is how many (of the lowest) bits of @c value + * Lenght of the affix, that is how many (of the lowest) bits of ``value`` * must be used for appending/prepending this affix to its final resulting, * unique number. */ @@ -339,7 +339,7 @@ typedef struct { struct V9fsState { QLIST_HEAD(, V9fsPDU) free_list; QLIST_HEAD(, V9fsPDU) active_list; - QSIMPLEQ_HEAD(, V9fsFidState) fid_list; + GHashTable *fids; FileOperations *ops; FsContext ctx; char *tag; @@ -424,21 +424,24 @@ typedef struct V9fsGetlock extern int open_fd_hw; extern int total_open_fd; -static inline void v9fs_path_write_lock(V9fsState *s) +static inline void coroutine_fn +v9fs_path_write_lock(V9fsState *s) { if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { qemu_co_rwlock_wrlock(&s->rename_lock); } } -static inline void v9fs_path_read_lock(V9fsState *s) +static inline void coroutine_fn +v9fs_path_read_lock(V9fsState *s) { if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { qemu_co_rwlock_rdlock(&s->rename_lock); } } -static inline void v9fs_path_unlock(V9fsState *s) +static inline void coroutine_fn +v9fs_path_unlock(V9fsState *s) { if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { qemu_co_rwlock_unlock(&s->rename_lock); diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c index 032cce04c4..93ba44fb75 100644 --- a/hw/9pfs/codir.c +++ b/hw/9pfs/codir.c @@ -22,6 +22,8 @@ #include "qemu/coroutine.h" #include "qemu/main-loop.h" #include "coth.h" +#include "9p-xattr.h" +#include "9p-util.h" /* * Intended to be called from bottom-half (e.g. background I/O thread) @@ -139,12 +141,11 @@ static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, /* append next node to result chain */ if (!e) { - *entries = e = g_malloc0(sizeof(V9fsDirEnt)); + *entries = e = g_new0(V9fsDirEnt, 1); } else { - e = e->next = g_malloc0(sizeof(V9fsDirEnt)); + e = e->next = g_new0(V9fsDirEnt, 1); } - e->dent = g_malloc0(sizeof(struct dirent)); - memcpy(e->dent, dent, sizeof(struct dirent)); + e->dent = qemu_dirent_dup(dent); /* perform a full stat() for directory entry if requested by caller */ if (dostat) { @@ -162,12 +163,12 @@ static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, break; } - e->st = g_malloc0(sizeof(struct stat)); + e->st = g_new0(struct stat, 1); memcpy(e->st, &stbuf, sizeof(struct stat)); } size += len; - saved_dir_pos = dent->d_off; + saved_dir_pos = qemu_dirent_off(dent); } /* restore (last) saved position */ @@ -183,14 +184,25 @@ out: } /** - * @brief Reads multiple directory entries in one rush. + * v9fs_co_readdir_many() - Reads multiple directory entries in one rush. + * + * @pdu: the causing 9p (T_readdir) client request + * @fidp: already opened directory where readdir shall be performed on + * @entries: output for directory entries (must not be NULL) + * @offset: initial position inside the directory the function shall + * seek to before retrieving the directory entries + * @maxsize: maximum result message body size (in bytes) + * @dostat: whether a stat() should be performed and returned for + * each directory entry + * Return: resulting response message body size (in bytes) on success, + * negative error code otherwise * * Retrieves the requested (max. amount of) directory entries from the fs * driver. This function must only be called by the main IO thread (top half). * Internally this function call will be dispatched to a background IO thread * (bottom half) where it is eventually executed by the fs driver. * - * @discussion Acquiring multiple directory entries in one rush from the fs + * Acquiring multiple directory entries in one rush from the fs * driver, instead of retrieving each directory entry individually, is very * beneficial from performance point of view. Because for every fs driver * request latency is added, which in practice could lead to overall @@ -198,20 +210,9 @@ out: * directory) if every directory entry was individually requested from fs * driver. * - * @note You must @b ALWAYS call @c v9fs_free_dirents(entries) after calling + * NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling * v9fs_co_readdir_many(), both on success and on error cases of this - * function, to avoid memory leaks once @p entries are no longer needed. - * - * @param pdu - the causing 9p (T_readdir) client request - * @param fidp - already opened directory where readdir shall be performed on - * @param entries - output for directory entries (must not be NULL) - * @param offset - initial position inside the directory the function shall - * seek to before retrieving the directory entries - * @param maxsize - maximum result message body size (in bytes) - * @param dostat - whether a stat() should be performed and returned for - * each directory entry - * @returns resulting response message body size (in bytes) on success, - * negative error code otherwise + * function, to avoid memory leaks once @entries are no longer needed. */ int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries, diff --git a/hw/9pfs/coth.h b/hw/9pfs/coth.h index c51289903d..1a1edbdc2a 100644 --- a/hw/9pfs/coth.h +++ b/hw/9pfs/coth.h @@ -19,7 +19,7 @@ #include "qemu/coroutine.h" #include "9p.h" -/** +/* * we want to use bottom half because we want to make sure the below * sequence of events. * @@ -29,7 +29,7 @@ * we cannot swap step 1 and 2, because that would imply worker thread * can enter coroutine while step1 is still running * - * @b PERFORMANCE @b CONSIDERATIONS: As a rule of thumb, keep in mind + * PERFORMANCE CONSIDERATIONS: As a rule of thumb, keep in mind * that hopping between threads adds @b latency! So when handling a * 9pfs request, avoid calling v9fs_co_run_in_worker() too often, because * this might otherwise sum up to a significant, huge overall latency for @@ -51,7 +51,9 @@ */ \ qemu_coroutine_yield(); \ qemu_bh_delete(co_bh); \ - code_block; \ + do { \ + code_block; \ + } while (0); \ /* re-enter back to qemu thread */ \ qemu_coroutine_yield(); \ } while (0) diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index 99be5d9119..12443b6ad5 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -4,7 +4,6 @@ fs_ss.add(files( '9p-posix-acl.c', '9p-proxy.c', '9p-synth.c', - '9p-util.c', '9p-xattr-user.c', '9p-xattr.c', '9p.c', @@ -14,6 +13,8 @@ fs_ss.add(files( 'coth.c', 'coxattr.c', )) +fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c')) +fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c')) fs_ss.add(when: 'CONFIG_XEN', if_true: files('xen-9p-backend.c')) softmmu_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index 6c77966c0b..a12e55c165 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -48,3 +48,9 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }" v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u" + +# xen-9p-backend.c +xen_9pfs_alloc(char *name) "name %s" +xen_9pfs_connect(char *name) "name %s" +xen_9pfs_disconnect(char *name) "name %s" +xen_9pfs_free(char *name) "name %s" diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 54ee93b71f..5f522e68e9 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -216,7 +216,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp) } v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag); - virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size); + virtio_init(vdev, VIRTIO_ID_9P, v->config_size); v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output); } diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 65c4979c3c..ab1df8dd2f 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -24,6 +24,8 @@ #include "qemu/option.h" #include "fsdev/qemu-fsdev.h" +#include "trace.h" + #define VERSIONS "1" #define MAX_RINGS 8 #define MAX_RING_ORDER 9 @@ -335,6 +337,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; + trace_xen_9pfs_disconnect(xendev->name); + for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].evtchndev != NULL) { qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), @@ -343,39 +347,40 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].local_port); xen_9pdev->rings[i].evtchndev = NULL; } - } -} - -static int xen_9pfs_free(struct XenLegacyDevice *xendev) -{ - Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); - int i; - - if (xen_9pdev->rings[0].evtchndev != NULL) { - xen_9pfs_disconnect(xendev); - } - - for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].data, (1 << xen_9pdev->rings[i].ring_order)); + xen_9pdev->rings[i].data = NULL; } if (xen_9pdev->rings[i].intf != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].intf, 1); + xen_9pdev->rings[i].intf = NULL; } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); + xen_9pdev->rings[i].bh = NULL; } } g_free(xen_9pdev->id); + xen_9pdev->id = NULL; g_free(xen_9pdev->tag); + xen_9pdev->tag = NULL; g_free(xen_9pdev->path); + xen_9pdev->path = NULL; g_free(xen_9pdev->security_model); + xen_9pdev->security_model = NULL; g_free(xen_9pdev->rings); + xen_9pdev->rings = NULL; +} + +static int xen_9pfs_free(struct XenLegacyDevice *xendev) +{ + trace_xen_9pfs_free(xendev->name); + return 0; } @@ -387,6 +392,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; + trace_xen_9pfs_connect(xendev->name); + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { @@ -494,6 +501,8 @@ out: static void xen_9pfs_alloc(struct XenLegacyDevice *xendev) { + trace_xen_9pfs_alloc(xendev->name); + xenstore_write_be_str(xendev, "versions", VERSIONS); xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); diff --git a/hw/Kconfig b/hw/Kconfig index 8cb7664d70..38233bbb0f 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -6,6 +6,7 @@ source audio/Kconfig source block/Kconfig source char/Kconfig source core/Kconfig +source cxl/Kconfig source display/Kconfig source dma/Kconfig source gpio/Kconfig @@ -49,6 +50,7 @@ source avr/Kconfig source cris/Kconfig source hppa/Kconfig source i386/Kconfig +source loongarch/Kconfig source m68k/Kconfig source microblaze/Kconfig source mips/Kconfig @@ -81,3 +83,5 @@ config XLNX_ZYNQMP select REGISTER select CAN_BUS select PTIMER + select XLNX_BBRAM + select XLNX_EFUSE_ZYNQMP diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig index cfc4ede8d9..3703aca212 100644 --- a/hw/acpi/Kconfig +++ b/hw/acpi/Kconfig @@ -5,9 +5,13 @@ config ACPI_X86 bool select ACPI select ACPI_NVDIMM + select ACPI_CXL select ACPI_CPU_HOTPLUG select ACPI_MEMORY_HOTPLUG select ACPI_HMAT + select ACPI_PIIX4 + select ACPI_PCIHP + select ACPI_ERST config ACPI_X86_ICH bool @@ -24,6 +28,14 @@ config ACPI_NVDIMM bool depends on ACPI +config ACPI_PIIX4 + bool + depends on ACPI + +config ACPI_PCIHP + bool + depends on ACPI + config ACPI_HMAT bool depends on ACPI @@ -41,8 +53,21 @@ config ACPI_VMGENID default y depends on PC +config ACPI_VIOT + bool + depends on ACPI + config ACPI_HW_REDUCED bool select ACPI select ACPI_MEMORY_HOTPLUG select ACPI_NVDIMM + +config ACPI_ERST + bool + default y + depends on ACPI && PCI + +config ACPI_CXL + bool + depends on ACPI diff --git a/hw/acpi/acpi-cpu-hotplug-stub.c b/hw/acpi/acpi-cpu-hotplug-stub.c new file mode 100644 index 0000000000..3fc4b14c26 --- /dev/null +++ b/hw/acpi/acpi-cpu-hotplug-stub.c @@ -0,0 +1,50 @@ +#include "qemu/osdep.h" +#include "hw/acpi/cpu_hotplug.h" +#include "migration/vmstate.h" + + +/* Following stubs are all related to ACPI cpu hotplug */ +const VMStateDescription vmstate_cpu_hotplug; + +void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu, + CPUHotplugState *cpuhp_state, + uint16_t io_port) +{ + return; +} + +void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, + AcpiCpuHotplug *gpe_cpu, uint16_t base) +{ + return; +} + +void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list) +{ + return; +} + +void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) +{ + return; +} + +void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + AcpiCpuHotplug *g, DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + return; +} diff --git a/hw/acpi/acpi-mem-hotplug-stub.c b/hw/acpi/acpi-mem-hotplug-stub.c new file mode 100644 index 0000000000..73a076a265 --- /dev/null +++ b/hw/acpi/acpi-mem-hotplug-stub.c @@ -0,0 +1,35 @@ +#include "qemu/osdep.h" +#include "hw/acpi/memory_hotplug.h" +#include "migration/vmstate.h" + +const VMStateDescription vmstate_memory_hotplug; + +void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, + MemHotplugState *state, hwaddr io_base) +{ + return; +} + +void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list) +{ + return; +} + +void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_memory_unplug_cb(MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev, + MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} diff --git a/hw/acpi/acpi-nvdimm-stub.c b/hw/acpi/acpi-nvdimm-stub.c new file mode 100644 index 0000000000..8baff9be6f --- /dev/null +++ b/hw/acpi/acpi-nvdimm-stub.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "hw/mem/nvdimm.h" +#include "hw/hotplug.h" + +void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + return; +} diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c new file mode 100644 index 0000000000..a43f6dafc9 --- /dev/null +++ b/hw/acpi/acpi-pci-hotplug-stub.c @@ -0,0 +1,43 @@ +#include "qemu/osdep.h" +#include "hw/acpi/pcihp.h" +#include "migration/vmstate.h" + +const VMStateDescription vmstate_acpi_pcihp_pci_status; + +void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, + MemoryRegion *address_space_io, bool bridges_enabled, + uint16_t io_base) +{ + return; +} + +void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, + AcpiPciHpState *s, DeviceState *dev, + Error **errp) +{ + return; +} + +void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off) +{ + return; +} + diff --git a/hw/acpi/acpi-x86-stub.c b/hw/acpi/acpi-x86-stub.c index e9e46c5c5f..3df1e090f4 100644 --- a/hw/acpi/acpi-x86-stub.c +++ b/hw/acpi/acpi-x86-stub.c @@ -3,7 +3,8 @@ #include "hw/i386/acpi-build.h" void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry) + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) { } diff --git a/hw/acpi/acpi_interface.c b/hw/acpi/acpi_interface.c index 6583917b8e..c668d361f6 100644 --- a/hw/acpi/acpi_interface.c +++ b/hw/acpi/acpi_interface.c @@ -1,5 +1,6 @@ #include "qemu/osdep.h" #include "hw/acpi/acpi_dev_interface.h" +#include "hw/acpi/acpi_aml_interface.h" #include "qemu/module.h" void acpi_send_event(DeviceState *dev, AcpiEventStatusBits event) @@ -18,8 +19,15 @@ static void register_types(void) .parent = TYPE_INTERFACE, .class_size = sizeof(AcpiDeviceIfClass), }; + static const TypeInfo acpi_dev_aml_if_info = { + .name = TYPE_ACPI_DEV_AML_IF, + .parent = TYPE_INTERFACE, + .class_size = sizeof(AcpiDevAmlIfClass), + }; + type_register_static(&acpi_dev_if_info); + type_register_static(&acpi_dev_aml_if_info); } type_init(register_types) diff --git a/hw/acpi/aml-build-stub.c b/hw/acpi/aml-build-stub.c index 8d8ad1a314..89a8fec4af 100644 --- a/hw/acpi/aml-build-stub.c +++ b/hw/acpi/aml-build-stub.c @@ -26,6 +26,16 @@ void aml_append(Aml *parent_ctx, Aml *child) { } +Aml *aml_return(Aml *val) +{ + return NULL; +} + +Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag) +{ + return NULL; +} + Aml *aml_resource_template(void) { return NULL; diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index d5103e6d7b..42feb4d4d7 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -52,6 +52,19 @@ static void build_append_byte(GArray *array, uint8_t val) g_array_append_val(array, val); } +static void build_append_padded_str(GArray *array, const char *str, + size_t maxlen, char pad) +{ + size_t i; + size_t len = strlen(str); + + g_assert(len <= maxlen); + g_array_append_vals(array, str, len); + for (i = maxlen - len; i > 0; i--) { + g_array_append_val(array, pad); + } +} + static void build_append_array(GArray *array, GArray *val) { g_array_append_vals(array, val->data, val->len); @@ -194,7 +207,7 @@ build_append_nameseg(GArray *array, const char *seg) g_array_append_vals(array, "____", ACPI_NAMESEG_LEN - len); } -static void GCC_FMT_ATTR(2, 0) +static void G_GNUC_PRINTF(2, 0) build_append_namestringv(GArray *array, const char *format, va_list ap) { char *s; @@ -257,7 +270,7 @@ build_append_namestringv(GArray *array, const char *format, va_list ap) g_strfreev(segs); } -GCC_FMT_ATTR(2, 3) +G_GNUC_PRINTF(2, 3) static void build_append_namestring(GArray *array, const char *format, ...) { va_list ap; @@ -1692,27 +1705,53 @@ Aml *aml_object_type(Aml *object) return var; } -void -build_header(BIOSLinker *linker, GArray *table_data, - AcpiTableHeader *h, const char *sig, int len, uint8_t rev, - const char *oem_id, const char *oem_table_id) +void acpi_table_begin(AcpiTable *desc, GArray *array) { - unsigned tbl_offset = (char *)h - table_data->data; - unsigned checksum_offset = (char *)&h->checksum - table_data->data; - memcpy(&h->signature, sig, 4); - h->length = cpu_to_le32(len); - h->revision = rev; - strpadcpy((char *)h->oem_id, sizeof h->oem_id, oem_id, ' '); - strpadcpy((char *)h->oem_table_id, sizeof h->oem_table_id, - oem_table_id, ' '); + desc->array = array; + desc->table_offset = array->len; + + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + */ + g_assert(strlen(desc->sig) == 4); + g_array_append_vals(array, desc->sig, 4); /* Signature */ + /* + * reserve space for Length field, which will be patched by + * acpi_table_end() when the table creation is finished. + */ + build_append_int_noprefix(array, 0, 4); /* Length */ + build_append_int_noprefix(array, desc->rev, 1); /* Revision */ + build_append_int_noprefix(array, 0, 1); /* Checksum */ + build_append_padded_str(array, desc->oem_id, 6, '\0'); /* OEMID */ + /* OEM Table ID */ + build_append_padded_str(array, desc->oem_table_id, 8, '\0'); + build_append_int_noprefix(array, 1, 4); /* OEM Revision */ + g_array_append_vals(array, ACPI_BUILD_APPNAME8, 4); /* Creator ID */ + build_append_int_noprefix(array, 1, 4); /* Creator Revision */ +} + +void acpi_table_end(BIOSLinker *linker, AcpiTable *desc) +{ + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + * Table 5-2 DESCRIPTION_HEADER Fields + */ + const unsigned checksum_offset = 9; + uint32_t table_len = desc->array->len - desc->table_offset; + uint32_t table_len_le = cpu_to_le32(table_len); + gchar *len_ptr = &desc->array->data[desc->table_offset + 4]; + + /* patch "Length" field that has been reserved by acpi_table_begin() + * to the actual length, i.e. accumulated table length from + * acpi_table_begin() till acpi_table_end() + */ + memcpy(len_ptr, &table_len_le, sizeof table_len_le); - h->oem_revision = cpu_to_le32(1); - memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME8, 4); - h->asl_compiler_revision = cpu_to_le32(1); - /* Checksum to be filled in by Guest linker */ bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE, - tbl_offset, len, checksum_offset); + desc->table_offset, table_len, desc->table_offset + checksum_offset); } void *acpi_data_push(GArray *table_data, unsigned size) @@ -1822,73 +1861,81 @@ build_rsdp(GArray *tbl, BIOSLinker *linker, AcpiRsdpData *rsdp_data) 32); } -/* Build rsdt table */ +/* + * ACPI 1.0 Root System Description Table (RSDT) + */ void build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, const char *oem_id, const char *oem_table_id) { int i; - unsigned rsdt_entries_offset; - AcpiRsdtDescriptorRev1 *rsdt; - int rsdt_start = table_data->len; - const unsigned table_data_len = (sizeof(uint32_t) * table_offsets->len); - const unsigned rsdt_entry_size = sizeof(rsdt->table_offset_entry[0]); - const size_t rsdt_len = sizeof(*rsdt) + table_data_len; + AcpiTable table = { .sig = "RSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - rsdt = acpi_data_push(table_data, rsdt_len); - rsdt_entries_offset = (char *)rsdt->table_offset_entry - table_data->data; + acpi_table_begin(&table, table_data); for (i = 0; i < table_offsets->len; ++i) { uint32_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); - uint32_t rsdt_entry_offset = rsdt_entries_offset + rsdt_entry_size * i; + uint32_t rsdt_entry_offset = table.array->len; - /* rsdt->table_offset_entry to be filled by Guest linker */ + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 4); + + /* mark position of RSDT entry to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, rsdt_entry_offset, rsdt_entry_size, + ACPI_BUILD_TABLE_FILE, rsdt_entry_offset, 4, ACPI_BUILD_TABLE_FILE, ref_tbl_offset); + } - build_header(linker, table_data, - (void *)(table_data->data + rsdt_start), - "RSDT", rsdt_len, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } -/* Build xsdt table */ +/* + * ACPI 2.0 eXtended System Description Table (XSDT) + */ void build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, const char *oem_id, const char *oem_table_id) { int i; - unsigned xsdt_entries_offset; - AcpiXsdtDescriptorRev2 *xsdt; - int xsdt_start = table_data->len; - const unsigned table_data_len = (sizeof(uint64_t) * table_offsets->len); - const unsigned xsdt_entry_size = sizeof(xsdt->table_offset_entry[0]); - const size_t xsdt_len = sizeof(*xsdt) + table_data_len; + AcpiTable table = { .sig = "XSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); - xsdt = acpi_data_push(table_data, xsdt_len); - xsdt_entries_offset = (char *)xsdt->table_offset_entry - table_data->data; for (i = 0; i < table_offsets->len; ++i) { uint64_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); - uint64_t xsdt_entry_offset = xsdt_entries_offset + xsdt_entry_size * i; + uint64_t xsdt_entry_offset = table.array->len; - /* xsdt->table_offset_entry to be filled by Guest linker */ + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 8); + + /* mark position of RSDT entry to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, xsdt_entry_size, + ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, 8, ACPI_BUILD_TABLE_FILE, ref_tbl_offset); } - build_header(linker, table_data, - (void *)(table_data->data + xsdt_start), - "XSDT", xsdt_len, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } -void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, +/* + * ACPI spec, Revision 4.0 + * 5.2.16.2 Memory Affinity Structure + */ +void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags) { - numamem->type = ACPI_SRAT_MEMORY; - numamem->length = sizeof(*numamem); - numamem->proximity = cpu_to_le32(node); - numamem->flags = cpu_to_le32(flags); - numamem->base_addr = cpu_to_le64(base); - numamem->range_length = cpu_to_le64(len); + build_append_int_noprefix(table_data, 1, 1); /* Type */ + build_append_int_noprefix(table_data, 40, 1); /* Length */ + build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, base, 4); /* Base Address Low */ + /* Base Address High */ + build_append_int_noprefix(table_data, base >> 32, 4); + build_append_int_noprefix(table_data, len, 4); /* Length Low */ + build_append_int_noprefix(table_data, len >> 32, 4); /* Length High */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, flags, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ } /* @@ -1898,11 +1945,12 @@ void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id) { - int slit_start, i, j; - slit_start = table_data->len; + int i, j; int nb_numa_nodes = ms->numa_state->num_nodes; + AcpiTable table = { .sig = "SLIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); build_append_int_noprefix(table_data, nb_numa_nodes, 8); for (i = 0; i < nb_numa_nodes; i++) { @@ -1913,21 +1961,124 @@ void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, 1); } } - - build_header(linker, table_data, - (void *)(table_data->data + slit_start), - "SLIT", - table_data->len - slit_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } -/* build rev1/rev3/rev5.1 FADT */ +/* + * ACPI spec, Revision 6.3 + * 5.2.29.1 Processor hierarchy node structure (Type 0) + */ +static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num) +{ + int i; + + build_append_byte(tbl, 0); /* Type 0 - processor */ + build_append_byte(tbl, 20 + priv_num * 4); /* Length */ + build_append_int_noprefix(tbl, 0, 2); /* Reserved */ + build_append_int_noprefix(tbl, flags, 4); /* Flags */ + build_append_int_noprefix(tbl, parent, 4); /* Parent */ + build_append_int_noprefix(tbl, id, 4); /* ACPI Processor ID */ + + /* Number of private resources */ + build_append_int_noprefix(tbl, priv_num, 4); + + /* Private resources[N] */ + if (priv_num > 0) { + assert(priv_rsrc); + for (i = 0; i < priv_num; i++) { + build_append_int_noprefix(tbl, priv_rsrc[i], 4); + } + } +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29 Processor Properties Topology Table (PPTT) + */ +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + CPUArchIdList *cpus = ms->possible_cpus; + int64_t socket_id = -1, cluster_id = -1, core_id = -1; + uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0; + uint32_t pptt_start = table_data->len; + int n; + AcpiTable table = { .sig = "PPTT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* + * This works with the assumption that cpus[n].props.*_id has been + * sorted from top to down levels in mc->possible_cpu_arch_ids(). + * Otherwise, the unexpected and duplicated containers will be + * created. + */ + for (n = 0; n < cpus->len; n++) { + if (cpus->cpus[n].props.socket_id != socket_id) { + assert(cpus->cpus[n].props.socket_id > socket_id); + socket_id = cpus->cpus[n].props.socket_id; + cluster_id = -1; + core_id = -1; + socket_offset = table_data->len - pptt_start; + build_processor_hierarchy_node(table_data, + (1 << 0), /* Physical package */ + 0, socket_id, NULL, 0); + } + + if (mc->smp_props.clusters_supported) { + if (cpus->cpus[n].props.cluster_id != cluster_id) { + assert(cpus->cpus[n].props.cluster_id > cluster_id); + cluster_id = cpus->cpus[n].props.cluster_id; + core_id = -1; + cluster_offset = table_data->len - pptt_start; + build_processor_hierarchy_node(table_data, + (0 << 0), /* Not a physical package */ + socket_offset, cluster_id, NULL, 0); + } + } else { + cluster_offset = socket_offset; + } + + if (ms->smp.threads == 1) { + build_processor_hierarchy_node(table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 3), /* Node is a Leaf */ + cluster_offset, n, NULL, 0); + } else { + if (cpus->cpus[n].props.core_id != core_id) { + assert(cpus->cpus[n].props.core_id > core_id); + core_id = cpus->cpus[n].props.core_id; + core_offset = table_data->len - pptt_start; + build_processor_hierarchy_node(table_data, + (0 << 0), /* Not a physical package */ + cluster_offset, core_id, NULL, 0); + } + + build_processor_hierarchy_node(table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + core_offset, n, NULL, 0); + } + } + + acpi_table_end(linker, &table); +} + +/* build rev1/rev3/rev5.1/rev6.0 FADT */ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, const char *oem_id, const char *oem_table_id) { int off; - int fadt_start = tbl->len; + AcpiTable table = { .sig = "FACP", .rev = f->rev, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - acpi_data_push(tbl, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, tbl); /* FACS address to be filled by Guest linker at runtime */ off = tbl->len; @@ -1986,12 +2137,18 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, build_append_int_noprefix(tbl, 0, 1); /* DAY_ALRM */ build_append_int_noprefix(tbl, 0, 1); /* MON_ALRM */ build_append_int_noprefix(tbl, f->rtc_century, 1); /* CENTURY */ - build_append_int_noprefix(tbl, 0, 2); /* IAPC_BOOT_ARCH */ + /* IAPC_BOOT_ARCH */ + if (f->rev == 1) { + build_append_int_noprefix(tbl, 0, 2); + } else { + /* since ACPI v2.0 */ + build_append_int_noprefix(tbl, f->iapc_boot_arch, 2); + } build_append_int_noprefix(tbl, 0, 1); /* Reserved */ build_append_int_noprefix(tbl, f->flags, 4); /* Flags */ if (f->rev == 1) { - goto build_hdr; + goto done; } build_append_gas_from_struct(tbl, &f->reset_reg); /* RESET_REG */ @@ -2028,7 +2185,7 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); /* X_GPE1_BLK */ if (f->rev <= 4) { - goto build_hdr; + goto done; } /* SLEEP_CONTROL_REG */ @@ -2036,12 +2193,18 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, /* SLEEP_STATUS_REG */ build_append_gas_from_struct(tbl, &f->sleep_sts); - /* TODO: extra fields need to be added to support revisions above rev5 */ - assert(f->rev == 5); + if (f->rev == 5) { + goto done; + } -build_hdr: - build_header(linker, tbl, (void *)(tbl->data + fadt_start), - "FACP", tbl->len - fadt_start, f->rev, oem_id, oem_table_id); + /* Hypervisor Vendor Identity */ + build_append_padded_str(tbl, "QEMU", 8, '\0'); + + /* TODO: extra fields need to be added to support revisions above rev6 */ + assert(f->rev == 6); + +done: + acpi_table_end(linker, &table); } #ifdef CONFIG_TPM @@ -2054,13 +2217,14 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, const char *oem_id, const char *oem_table_id) { uint8_t start_method_params[12] = {}; - unsigned log_addr_offset, tpm2_start; + unsigned log_addr_offset; uint64_t control_area_start_address; TPMIf *tpmif = tpm_find(); uint32_t start_method; + AcpiTable table = { .sig = "TPM2", .rev = 4, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - tpm2_start = table_data->len; - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* Platform Class */ build_append_int_noprefix(table_data, TPM2_ACPI_CLASS_CLIENT, 2); @@ -2098,9 +2262,7 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, log_addr_offset, 8, ACPI_BUILD_TPMLOG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + tpm2_start), - "TPM2", table_data->len - tpm2_start, 4, oem_id, oem_table_id); + acpi_table_end(linker, &table); } #endif diff --git a/hw/acpi/core.c b/hw/acpi/core.c index 435e190b00..ac485f6f16 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -345,8 +345,8 @@ int acpi_get_slic_oem(AcpiSlicOem *oem) struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length)); if (memcmp(hdr->sig, "SLIC", 4) == 0) { - oem->id = hdr->oem_id; - oem->table_id = hdr->oem_table_id; + oem->id = g_strndup(hdr->oem_id, 6); + oem->table_id = g_strndup(hdr->oem_table_id, 8); return 0; } } diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index f82e9512fd..3646dbfe68 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -297,7 +297,6 @@ static const VMStateDescription vmstate_cpuhp_sts = { .name = "CPU hotplug device state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_BOOL(is_inserting, AcpiCpuStatus), VMSTATE_BOOL(is_removing, AcpiCpuStatus), @@ -311,7 +310,6 @@ const VMStateDescription vmstate_cpu_hotplug = { .name = "CPU hotplug state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(selector, CPUHotplugState), VMSTATE_UINT8(command, CPUHotplugState), @@ -669,21 +667,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, /* build _MAT object */ assert(adevc && adevc->madt_cpu); - adevc->madt_cpu(adev, i, arch_ids, madt_buf); - switch (madt_buf->data[0]) { - case ACPI_APIC_PROCESSOR: { - AcpiMadtProcessorApic *apic = (void *)madt_buf->data; - apic->flags = cpu_to_le32(1); - break; - } - case ACPI_APIC_LOCAL_X2APIC: { - AcpiMadtProcessorX2Apic *apic = (void *)madt_buf->data; - apic->flags = cpu_to_le32(1); - break; - } - default: - assert(0); - } + adevc->madt_cpu(adev, i, arch_ids, madt_buf, + true); /* set enabled flag */ aml_append(dev, aml_name_decl("_MAT", aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data))); g_array_free(madt_buf, true); diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 53654f8638..ff14c3f410 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -52,6 +52,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, + .max_access_size = 4, + }, + .impl = { .max_access_size = 1, }, }; diff --git a/hw/acpi/cxl-stub.c b/hw/acpi/cxl-stub.c new file mode 100644 index 0000000000..15bc21076b --- /dev/null +++ b/hw/acpi/cxl-stub.c @@ -0,0 +1,12 @@ + +/* + * Stubs for ACPI platforms that don't support CXl + */ +#include "qemu/osdep.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/cxl.h" + +void build_cxl_osc_method(Aml *dev) +{ + g_assert_not_reached(); +} diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c new file mode 100644 index 0000000000..2bf8c07993 --- /dev/null +++ b/hw/acpi/cxl.c @@ -0,0 +1,256 @@ +/* + * CXL ACPI Implementation + * + * Copyright(C) 2020 Intel Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "hw/cxl/cxl.h" +#include "hw/mem/memory-device.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/acpi/cxl.h" +#include "qapi/error.h" +#include "qemu/uuid.h" + +static void cedt_build_chbs(GArray *table_data, PXBDev *cxl) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(cxl->cxl.cxl_host_bridge); + struct MemoryRegion *mr = sbd->mmio[0].memory; + + /* Type */ + build_append_int_noprefix(table_data, 0, 1); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + + /* Record Length */ + build_append_int_noprefix(table_data, 32, 2); + + /* UID - currently equal to bus number */ + build_append_int_noprefix(table_data, cxl->bus_nr, 4); + + /* Version */ + build_append_int_noprefix(table_data, 1, 4); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + /* Base - subregion within a container that is in PA space */ + build_append_int_noprefix(table_data, mr->container->addr + mr->addr, 8); + + /* Length */ + build_append_int_noprefix(table_data, memory_region_size(mr), 8); +} + +/* + * CFMWS entries in CXL 2.0 ECN: CEDT CFMWS & QTG _DSM. + * Interleave ways encoding in CXL 2.0 ECN: 3, 6, 12 and 16-way memory + * interleaving. + */ +static void cedt_build_cfmws(GArray *table_data, CXLState *cxls) +{ + GList *it; + + for (it = cxls->fixed_windows; it; it = it->next) { + CXLFixedWindow *fw = it->data; + int i; + + /* Type */ + build_append_int_noprefix(table_data, 1, 1); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + + /* Record Length */ + build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + /* Base HPA */ + build_append_int_noprefix(table_data, fw->mr.addr, 8); + + /* Window Size */ + build_append_int_noprefix(table_data, fw->size, 8); + + /* Host Bridge Interleave Ways */ + build_append_int_noprefix(table_data, fw->enc_int_ways, 1); + + /* Host Bridge Interleave Arithmetic */ + build_append_int_noprefix(table_data, 0, 1); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + + /* Host Bridge Interleave Granularity */ + build_append_int_noprefix(table_data, fw->enc_int_gran, 4); + + /* Window Restrictions */ + build_append_int_noprefix(table_data, 0x0f, 2); /* No restrictions */ + + /* QTG ID */ + build_append_int_noprefix(table_data, 0, 2); + + /* Host Bridge List (list of UIDs - currently bus_nr) */ + for (i = 0; i < fw->num_targets; i++) { + g_assert(fw->target_hbs[i]); + build_append_int_noprefix(table_data, fw->target_hbs[i]->bus_nr, 4); + } + } +} + +static int cxl_foreach_pxb_hb(Object *obj, void *opaque) +{ + Aml *cedt = opaque; + + if (object_dynamic_cast(obj, TYPE_PXB_CXL_DEVICE)) { + cedt_build_chbs(cedt->buf, PXB_CXL_DEV(obj)); + } + + return 0; +} + +void cxl_build_cedt(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, const char *oem_id, + const char *oem_table_id, CXLState *cxl_state) +{ + Aml *cedt; + AcpiTable table = { .sig = "CEDT", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + acpi_add_table(table_offsets, table_data); + acpi_table_begin(&table, table_data); + cedt = init_aml_allocator(); + + /* reserve space for CEDT header */ + + object_child_foreach_recursive(object_get_root(), cxl_foreach_pxb_hb, cedt); + cedt_build_cfmws(cedt->buf, cxl_state); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, cedt->buf->data, cedt->buf->len); + free_aml_allocator(); + + acpi_table_end(linker, &table); +} + +static Aml *__build_cxl_osc_method(void) +{ + Aml *method, *if_uuid, *else_uuid, *if_arg1_not_1, *if_cxl, *if_caps_masked; + Aml *a_ctrl = aml_local(0); + Aml *a_cdw1 = aml_name("CDW1"); + + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + /* CDW1 is used for the return value so is present whether or not a match occurs */ + aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + /* + * Generate shared section between: + * CXL 2.0 - 9.14.2.1.4 and + * PCI Firmware Specification 3.0 + * 4.5.1. _OSC Interface for PCI Host Bridge Devices + * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is + * identified by the Universal Unique IDentifier (UUID) + * 33DB4D5B-1FF7-401C-9657-7441C03DD766 + * The _OSC interface for a CXL Host bridge is + * identified by the UUID 68F2D50B-C469-4D8A-BD3D-941A103FD3FC + * A CXL Host bridge is compatible with a PCI host bridge so + * for the shared section match both. + */ + if_uuid = aml_if( + aml_lor(aml_equal(aml_arg(0), + aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766")), + aml_equal(aml_arg(0), + aml_touuid("68F2D50B-C469-4D8A-BD3D-941A103FD3FC")))); + aml_append(if_uuid, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(if_uuid, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + + aml_append(if_uuid, aml_store(aml_name("CDW3"), a_ctrl)); + + /* + * + * Allows OS control for all 5 features: + * PCIeHotplug SHPCHotplug PME AER PCIeCapability + */ + aml_append(if_uuid, aml_and(a_ctrl, aml_int(0x1F), a_ctrl)); + + /* + * Check _OSC revision. + * PCI Firmware specification 3.3 and CXL 2.0 both use revision 1 + * Unknown Revision is CDW1 - BIT (3) + */ + if_arg1_not_1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1)))); + aml_append(if_arg1_not_1, aml_or(a_cdw1, aml_int(0x08), a_cdw1)); + aml_append(if_uuid, if_arg1_not_1); + + if_caps_masked = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); + + /* Capability bits were masked */ + aml_append(if_caps_masked, aml_or(a_cdw1, aml_int(0x10), a_cdw1)); + aml_append(if_uuid, if_caps_masked); + + aml_append(if_uuid, aml_store(aml_name("CDW2"), aml_name("SUPP"))); + aml_append(if_uuid, aml_store(aml_name("CDW3"), aml_name("CTRL"))); + + /* Update DWORD3 (the return value) */ + aml_append(if_uuid, aml_store(a_ctrl, aml_name("CDW3"))); + + /* CXL only section as per CXL 2.0 - 9.14.2.1.4 */ + if_cxl = aml_if(aml_equal( + aml_arg(0), aml_touuid("68F2D50B-C469-4D8A-BD3D-941A103FD3FC"))); + /* CXL support field */ + aml_append(if_cxl, aml_create_dword_field(aml_arg(3), aml_int(12), "CDW4")); + /* CXL capabilities */ + aml_append(if_cxl, aml_create_dword_field(aml_arg(3), aml_int(16), "CDW5")); + aml_append(if_cxl, aml_store(aml_name("CDW4"), aml_name("SUPC"))); + aml_append(if_cxl, aml_store(aml_name("CDW5"), aml_name("CTRC"))); + + /* CXL 2.0 Port/Device Register access */ + aml_append(if_cxl, + aml_or(aml_name("CDW5"), aml_int(0x1), aml_name("CDW5"))); + aml_append(if_uuid, if_cxl); + + aml_append(if_uuid, aml_return(aml_arg(3))); + aml_append(method, if_uuid); + + /* + * If no UUID matched, return Unrecognized UUID via Arg3 DWord 1 + * ACPI 6.4 - 6.2.11 + * Unrecognised UUID - BIT(2) + */ + else_uuid = aml_else(); + + aml_append(else_uuid, + aml_or(aml_name("CDW1"), aml_int(0x4), aml_name("CDW1"))); + aml_append(else_uuid, aml_return(aml_arg(3))); + aml_append(method, else_uuid); + + return method; +} + +void build_cxl_osc_method(Aml *dev) +{ + aml_append(dev, aml_name_decl("SUPP", aml_int(0))); + aml_append(dev, aml_name_decl("CTRL", aml_int(0))); + aml_append(dev, aml_name_decl("SUPC", aml_int(0))); + aml_append(dev, aml_name_decl("CTRC", aml_int(0))); + aml_append(dev, __build_cxl_osc_method()); +} diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c new file mode 100644 index 0000000000..aefcc03ad6 --- /dev/null +++ b/hw/acpi/erst.c @@ -0,0 +1,1049 @@ +/* + * ACPI Error Record Serialization Table, ERST, Implementation + * + * ACPI ERST introduced in ACPI 4.0, June 16, 2009. + * ACPI Platform Error Interfaces : Error Serialization + * + * Copyright (c) 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-core.h" +#include "exec/memory.h" +#include "qom/object.h" +#include "hw/pci/pci.h" +#include "qom/object_interfaces.h" +#include "qemu/error-report.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/bios-linker-loader.h" +#include "exec/address-spaces.h" +#include "sysemu/hostmem.h" +#include "hw/acpi/erst.h" +#include "trace.h" + +/* ACPI 4.0: Table 17-16 Serialization Actions */ +#define ACTION_BEGIN_WRITE_OPERATION 0x0 +#define ACTION_BEGIN_READ_OPERATION 0x1 +#define ACTION_BEGIN_CLEAR_OPERATION 0x2 +#define ACTION_END_OPERATION 0x3 +#define ACTION_SET_RECORD_OFFSET 0x4 +#define ACTION_EXECUTE_OPERATION 0x5 +#define ACTION_CHECK_BUSY_STATUS 0x6 +#define ACTION_GET_COMMAND_STATUS 0x7 +#define ACTION_GET_RECORD_IDENTIFIER 0x8 +#define ACTION_SET_RECORD_IDENTIFIER 0x9 +#define ACTION_GET_RECORD_COUNT 0xA +#define ACTION_BEGIN_DUMMY_WRITE_OPERATION 0xB +#define ACTION_RESERVED 0xC +#define ACTION_GET_ERROR_LOG_ADDRESS_RANGE 0xD +#define ACTION_GET_ERROR_LOG_ADDRESS_LENGTH 0xE +#define ACTION_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES 0xF +#define ACTION_GET_EXECUTE_OPERATION_TIMINGS 0x10 /* ACPI 6.3 */ + +/* ACPI 4.0: Table 17-17 Command Status Definitions */ +#define STATUS_SUCCESS 0x00 +#define STATUS_NOT_ENOUGH_SPACE 0x01 +#define STATUS_HARDWARE_NOT_AVAILABLE 0x02 +#define STATUS_FAILED 0x03 +#define STATUS_RECORD_STORE_EMPTY 0x04 +#define STATUS_RECORD_NOT_FOUND 0x05 + +/* ACPI 4.0: Table 17-19 Serialization Instructions */ +#define INST_READ_REGISTER 0x00 +#define INST_READ_REGISTER_VALUE 0x01 +#define INST_WRITE_REGISTER 0x02 +#define INST_WRITE_REGISTER_VALUE 0x03 +#define INST_NOOP 0x04 +#define INST_LOAD_VAR1 0x05 +#define INST_LOAD_VAR2 0x06 +#define INST_STORE_VAR1 0x07 +#define INST_ADD 0x08 +#define INST_SUBTRACT 0x09 +#define INST_ADD_VALUE 0x0A +#define INST_SUBTRACT_VALUE 0x0B +#define INST_STALL 0x0C +#define INST_STALL_WHILE_TRUE 0x0D +#define INST_SKIP_NEXT_INSTRUCTION_IF_TRUE 0x0E +#define INST_GOTO 0x0F +#define INST_SET_SRC_ADDRESS_BASE 0x10 +#define INST_SET_DST_ADDRESS_BASE 0x11 +#define INST_MOVE_DATA 0x12 + +/* UEFI 2.1: Appendix N Common Platform Error Record */ +#define UEFI_CPER_RECORD_MIN_SIZE 128U +#define UEFI_CPER_RECORD_LENGTH_OFFSET 20U +#define UEFI_CPER_RECORD_ID_OFFSET 96U + +/* + * NOTE that when accessing CPER fields within a record, memcpy() + * is utilized to avoid a possible misaligned access on the host. + */ + +/* + * This implementation is an ACTION (cmd) and VALUE (data) + * interface consisting of just two 64-bit registers. + */ +#define ERST_REG_SIZE (16UL) +#define ERST_ACTION_OFFSET (0UL) /* action (cmd) */ +#define ERST_VALUE_OFFSET (8UL) /* argument/value (data) */ + +/* + * ERST_RECORD_SIZE is the buffer size for exchanging ERST + * record contents. Thus, it defines the maximum record size. + * As this is mapped through a PCI BAR, it must be a power of + * two and larger than UEFI_CPER_RECORD_MIN_SIZE. + * The backing storage is divided into fixed size "slots", + * each ERST_RECORD_SIZE in length, and each "slot" + * storing a single record. No attempt at optimizing storage + * through compression, compaction, etc is attempted. + * NOTE that slot 0 is reserved for the backing storage header. + * Depending upon the size of the backing storage, additional + * slots will be part of the slot 0 header in order to account + * for a record_id for each available remaining slot. + */ +/* 8KiB records, not too small, not too big */ +#define ERST_RECORD_SIZE (8192UL) + +#define ACPI_ERST_MEMDEV_PROP "memdev" +#define ACPI_ERST_RECORD_SIZE_PROP "record_size" + +/* + * From the ACPI ERST spec sections: + * A record id of all 0s is used to indicate 'unspecified' record id. + * A record id of all 1s is used to indicate empty or end. + */ +#define ERST_UNSPECIFIED_RECORD_ID (0UL) +#define ERST_EMPTY_END_RECORD_ID (~0UL) + +#define ERST_IS_VALID_RECORD_ID(rid) \ + ((rid != ERST_UNSPECIFIED_RECORD_ID) && \ + (rid != ERST_EMPTY_END_RECORD_ID)) + +/* + * Implementation-specific definitions and types. + * Values are arbitrary and chosen for this implementation. + * See erst.rst documentation for details. + */ +#define ERST_EXECUTE_OPERATION_MAGIC 0x9CUL +#define ERST_STORE_MAGIC 0x524F545354535245UL /* ERSTSTOR */ +typedef struct { + uint64_t magic; + uint32_t record_size; + uint32_t storage_offset; /* offset to record storage beyond header */ + uint16_t version; + uint16_t reserved; + uint32_t record_count; + uint64_t map[]; /* contains record_ids, and position indicates index */ +} __attribute__((packed)) ERSTStorageHeader; + +/* + * Object cast macro + */ +#define ACPIERST(obj) \ + OBJECT_CHECK(ERSTDeviceState, (obj), TYPE_ACPI_ERST) + +/* + * Main ERST device state structure + */ +typedef struct { + PCIDevice parent_obj; + + /* Backend storage */ + HostMemoryBackend *hostmem; + MemoryRegion *hostmem_mr; + uint32_t storage_size; + uint32_t default_record_size; + + /* Programming registers */ + MemoryRegion iomem_mr; + + /* Exchange buffer */ + MemoryRegion exchange_mr; + + /* Interface state */ + uint8_t operation; + uint8_t busy_status; + uint8_t command_status; + uint32_t record_offset; + uint64_t reg_action; + uint64_t reg_value; + uint64_t record_identifier; + ERSTStorageHeader *header; + unsigned first_record_index; + unsigned last_record_index; + unsigned next_record_index; + +} ERSTDeviceState; + +/*******************************************************************/ +/*******************************************************************/ +typedef struct { + GArray *table_data; + pcibus_t bar; + uint8_t instruction; + uint8_t flags; + uint8_t register_bit_width; + pcibus_t register_offset; +} BuildSerializationInstructionEntry; + +/* ACPI 4.0: 17.4.1.2 Serialization Instruction Entries */ +static void build_serialization_instruction( + BuildSerializationInstructionEntry *e, + uint8_t serialization_action, + uint64_t value) +{ + /* ACPI 4.0: Table 17-18 Serialization Instruction Entry */ + struct AcpiGenericAddress gas; + uint64_t mask; + + /* Serialization Action */ + build_append_int_noprefix(e->table_data, serialization_action, 1); + /* Instruction */ + build_append_int_noprefix(e->table_data, e->instruction, 1); + /* Flags */ + build_append_int_noprefix(e->table_data, e->flags, 1); + /* Reserved */ + build_append_int_noprefix(e->table_data, 0, 1); + /* Register Region */ + gas.space_id = AML_SYSTEM_MEMORY; + gas.bit_width = e->register_bit_width; + gas.bit_offset = 0; + gas.access_width = (uint8_t)ctz32(e->register_bit_width) - 2; + gas.address = (uint64_t)(e->bar + e->register_offset); + build_append_gas_from_struct(e->table_data, &gas); + /* Value */ + build_append_int_noprefix(e->table_data, value, 8); + /* Mask */ + mask = (1ULL << (e->register_bit_width - 1) << 1) - 1; + build_append_int_noprefix(e->table_data, mask, 8); +} + +/* ACPI 4.0: 17.4.1 Serialization Action Table */ +void build_erst(GArray *table_data, BIOSLinker *linker, Object *erst_dev, + const char *oem_id, const char *oem_table_id) +{ + /* + * Serialization Action Table + * The serialization action table must be generated first + * so that its size can be known in order to populate the + * Instruction Entry Count field. + */ + unsigned action; + GArray *table_instruction_data = g_array_new(FALSE, FALSE, sizeof(char)); + pcibus_t bar0 = pci_get_bar_addr(PCI_DEVICE(erst_dev), 0); + AcpiTable table = { .sig = "ERST", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + /* Contexts for the different ways ACTION and VALUE are accessed */ + BuildSerializationInstructionEntry rd_value_32_val = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_READ_REGISTER_VALUE, + .register_bit_width = 32, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry rd_value_32 = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_READ_REGISTER, + .register_bit_width = 32, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry rd_value_64 = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_READ_REGISTER, + .register_bit_width = 64, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry wr_value_32_val = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_WRITE_REGISTER_VALUE, + .register_bit_width = 32, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry wr_value_32 = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_WRITE_REGISTER, + .register_bit_width = 32, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry wr_value_64 = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_WRITE_REGISTER, + .register_bit_width = 64, + .register_offset = ERST_VALUE_OFFSET, + }; + BuildSerializationInstructionEntry wr_action = { + .table_data = table_instruction_data, .bar = bar0, .flags = 0, + .instruction = INST_WRITE_REGISTER_VALUE, + .register_bit_width = 32, + .register_offset = ERST_ACTION_OFFSET, + }; + + trace_acpi_erst_pci_bar_0(bar0); + + /* Serialization Instruction Entries */ + action = ACTION_BEGIN_WRITE_OPERATION; + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_BEGIN_READ_OPERATION; + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_BEGIN_CLEAR_OPERATION; + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_END_OPERATION; + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_SET_RECORD_OFFSET; + build_serialization_instruction(&wr_value_32, action, 0); + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_EXECUTE_OPERATION; + build_serialization_instruction(&wr_value_32_val, action, + ERST_EXECUTE_OPERATION_MAGIC); + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_CHECK_BUSY_STATUS; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_32_val, action, 0x01); + + action = ACTION_GET_COMMAND_STATUS; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_32, action, 0); + + action = ACTION_GET_RECORD_IDENTIFIER; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_64, action, 0); + + action = ACTION_SET_RECORD_IDENTIFIER; + build_serialization_instruction(&wr_value_64, action, 0); + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_GET_RECORD_COUNT; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_32, action, 0); + + action = ACTION_BEGIN_DUMMY_WRITE_OPERATION; + build_serialization_instruction(&wr_action, action, action); + + action = ACTION_GET_ERROR_LOG_ADDRESS_RANGE; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_64, action, 0); + + action = ACTION_GET_ERROR_LOG_ADDRESS_LENGTH; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_64, action, 0); + + action = ACTION_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_32, action, 0); + + action = ACTION_GET_EXECUTE_OPERATION_TIMINGS; + build_serialization_instruction(&wr_action, action, action); + build_serialization_instruction(&rd_value_64, action, 0); + + /* Serialization Header */ + acpi_table_begin(&table, table_data); + + /* Serialization Header Size */ + build_append_int_noprefix(table_data, 48, 4); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + /* + * Instruction Entry Count + * Each instruction entry is 32 bytes + */ + g_assert((table_instruction_data->len) % 32 == 0); + build_append_int_noprefix(table_data, + (table_instruction_data->len / 32), 4); + + /* Serialization Instruction Entries */ + g_array_append_vals(table_data, table_instruction_data->data, + table_instruction_data->len); + g_array_free(table_instruction_data, TRUE); + + acpi_table_end(linker, &table); +} + +/*******************************************************************/ +/*******************************************************************/ +static uint8_t *get_nvram_ptr_by_index(ERSTDeviceState *s, unsigned index) +{ + uint8_t *rc = NULL; + off_t offset = (index * le32_to_cpu(s->header->record_size)); + + g_assert(offset < s->storage_size); + + rc = memory_region_get_ram_ptr(s->hostmem_mr); + rc += offset; + + return rc; +} + +static void make_erst_storage_header(ERSTDeviceState *s) +{ + ERSTStorageHeader *header = s->header; + unsigned mapsz, headersz; + + header->magic = cpu_to_le64(ERST_STORE_MAGIC); + header->record_size = cpu_to_le32(s->default_record_size); + header->version = cpu_to_le16(0x0100); + header->reserved = cpu_to_le16(0x0000); + + /* Compute mapsize */ + mapsz = s->storage_size / s->default_record_size; + mapsz *= sizeof(uint64_t); + /* Compute header+map size */ + headersz = sizeof(ERSTStorageHeader) + mapsz; + /* Round up to nearest integer multiple of ERST_RECORD_SIZE */ + headersz = QEMU_ALIGN_UP(headersz, s->default_record_size); + header->storage_offset = cpu_to_le32(headersz); + + /* + * The HostMemoryBackend initializes contents to zero, + * so all record_ids stashed in the map are zero'd. + * As well the record_count is zero. Properly initialized. + */ +} + +static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp) +{ + ERSTStorageHeader *header; + uint32_t record_size; + + header = memory_region_get_ram_ptr(s->hostmem_mr); + s->header = header; + + /* Ensure pointer to header is 64-bit aligned */ + g_assert(QEMU_PTR_IS_ALIGNED(header, sizeof(uint64_t))); + + /* + * Check if header is uninitialized; HostMemoryBackend inits to 0 + */ + if (le64_to_cpu(header->magic) == 0UL) { + make_erst_storage_header(s); + } + + /* Validity check record_size */ + record_size = le32_to_cpu(header->record_size); + if (!( + (record_size) && /* non zero */ + (record_size >= UEFI_CPER_RECORD_MIN_SIZE) && + (((record_size - 1) & record_size) == 0) && /* is power of 2 */ + (record_size >= 4096) /* PAGE_SIZE */ + )) { + error_setg(errp, "ERST record_size %u is invalid", record_size); + return; + } + + /* Validity check header */ + if (!( + (le64_to_cpu(header->magic) == ERST_STORE_MAGIC) && + ((le32_to_cpu(header->storage_offset) % record_size) == 0) && + (le16_to_cpu(header->version) == 0x0100) && + (le16_to_cpu(header->reserved) == 0) + )) { + error_setg(errp, "ERST backend storage header is invalid"); + return; + } + + /* Check storage_size against record_size */ + if (((s->storage_size % record_size) != 0) || + (record_size > s->storage_size)) { + error_setg(errp, "ACPI ERST requires storage size be multiple of " + "record size (%uKiB)", record_size); + return; + } + + /* Compute offset of first and last record storage slot */ + s->first_record_index = le32_to_cpu(header->storage_offset) + / record_size; + s->last_record_index = (s->storage_size / record_size); +} + +static void update_map_entry(ERSTDeviceState *s, unsigned index, + uint64_t record_id) +{ + if (index < s->last_record_index) { + s->header->map[index] = cpu_to_le64(record_id); + } +} + +static unsigned find_next_empty_record_index(ERSTDeviceState *s) +{ + unsigned rc = 0; /* 0 not a valid index */ + unsigned index = s->first_record_index; + + for (; index < s->last_record_index; ++index) { + if (le64_to_cpu(s->header->map[index]) == ERST_UNSPECIFIED_RECORD_ID) { + rc = index; + break; + } + } + + return rc; +} + +static unsigned lookup_erst_record(ERSTDeviceState *s, + uint64_t record_identifier) +{ + unsigned rc = 0; /* 0 not a valid index */ + + /* Find the record_identifier in the map */ + if (record_identifier != ERST_UNSPECIFIED_RECORD_ID) { + /* + * Count number of valid records encountered, and + * short-circuit the loop if identifier not found + */ + uint32_t record_count = le32_to_cpu(s->header->record_count); + unsigned count = 0; + unsigned index; + for (index = s->first_record_index; index < s->last_record_index && + count < record_count; ++index) { + if (le64_to_cpu(s->header->map[index]) == record_identifier) { + rc = index; + break; + } + if (le64_to_cpu(s->header->map[index]) != + ERST_UNSPECIFIED_RECORD_ID) { + ++count; + } + } + } + + return rc; +} + +/* + * ACPI 4.0: 17.4.1.1 Serialization Actions, also see + * ACPI 4.0: 17.4.2.2 Operations - Reading 6.c and 2.c + */ +static unsigned get_next_record_identifier(ERSTDeviceState *s, + uint64_t *record_identifier, bool first) +{ + unsigned found = 0; + unsigned index; + + /* For operations needing to return 'first' record identifier */ + if (first) { + /* Reset initial index to beginning */ + s->next_record_index = s->first_record_index; + } + index = s->next_record_index; + + *record_identifier = ERST_EMPTY_END_RECORD_ID; + + if (le32_to_cpu(s->header->record_count)) { + for (; index < s->last_record_index; ++index) { + if (le64_to_cpu(s->header->map[index]) != + ERST_UNSPECIFIED_RECORD_ID) { + /* where to start next time */ + s->next_record_index = index + 1; + *record_identifier = le64_to_cpu(s->header->map[index]); + found = 1; + break; + } + } + } + if (!found) { + /* at end (ie scan complete), reset */ + s->next_record_index = s->first_record_index; + } + + return STATUS_SUCCESS; +} + +/* ACPI 4.0: 17.4.2.3 Operations - Clearing */ +static unsigned clear_erst_record(ERSTDeviceState *s) +{ + unsigned rc = STATUS_RECORD_NOT_FOUND; + unsigned index; + + /* Check for valid record identifier */ + if (!ERST_IS_VALID_RECORD_ID(s->record_identifier)) { + return STATUS_FAILED; + } + + index = lookup_erst_record(s, s->record_identifier); + if (index) { + /* No need to wipe record, just invalidate its map entry */ + uint32_t record_count; + update_map_entry(s, index, ERST_UNSPECIFIED_RECORD_ID); + record_count = le32_to_cpu(s->header->record_count); + record_count -= 1; + s->header->record_count = cpu_to_le32(record_count); + rc = STATUS_SUCCESS; + } + + return rc; +} + +/* ACPI 4.0: 17.4.2.2 Operations - Reading */ +static unsigned read_erst_record(ERSTDeviceState *s) +{ + unsigned rc = STATUS_RECORD_NOT_FOUND; + unsigned exchange_length; + unsigned index; + + /* Check if backend storage is empty */ + if (le32_to_cpu(s->header->record_count) == 0) { + return STATUS_RECORD_STORE_EMPTY; + } + + exchange_length = memory_region_size(&s->exchange_mr); + + /* Check for record identifier of all 0s */ + if (s->record_identifier == ERST_UNSPECIFIED_RECORD_ID) { + /* Set to 'first' record in storage */ + get_next_record_identifier(s, &s->record_identifier, true); + /* record_identifier is now a valid id, or all 1s */ + } + + /* Check for record identifier of all 1s */ + if (s->record_identifier == ERST_EMPTY_END_RECORD_ID) { + return STATUS_FAILED; + } + + /* Validate record_offset */ + if (s->record_offset > (exchange_length - UEFI_CPER_RECORD_MIN_SIZE)) { + return STATUS_FAILED; + } + + index = lookup_erst_record(s, s->record_identifier); + if (index) { + uint8_t *nvram; + uint8_t *exchange; + uint32_t record_length; + + /* Obtain pointer to the exchange buffer */ + exchange = memory_region_get_ram_ptr(&s->exchange_mr); + exchange += s->record_offset; + /* Obtain pointer to slot in storage */ + nvram = get_nvram_ptr_by_index(s, index); + /* Validate CPER record_length */ + memcpy((uint8_t *)&record_length, + &nvram[UEFI_CPER_RECORD_LENGTH_OFFSET], + sizeof(uint32_t)); + record_length = le32_to_cpu(record_length); + if (record_length < UEFI_CPER_RECORD_MIN_SIZE) { + rc = STATUS_FAILED; + } + if (record_length > exchange_length - s->record_offset) { + rc = STATUS_FAILED; + } + /* If all is ok, copy the record to the exchange buffer */ + if (rc != STATUS_FAILED) { + memcpy(exchange, nvram, record_length); + rc = STATUS_SUCCESS; + } + } else { + /* + * See "Reading : 'The steps performed by the platform ...' 2.c" + * Set to 'first' record in storage + */ + get_next_record_identifier(s, &s->record_identifier, true); + } + + return rc; +} + +/* ACPI 4.0: 17.4.2.1 Operations - Writing */ +static unsigned write_erst_record(ERSTDeviceState *s) +{ + unsigned rc = STATUS_FAILED; + unsigned exchange_length; + unsigned index; + uint64_t record_identifier; + uint32_t record_length; + uint8_t *exchange; + uint8_t *nvram = NULL; + bool record_found = false; + + exchange_length = memory_region_size(&s->exchange_mr); + + /* Validate record_offset */ + if (s->record_offset > (exchange_length - UEFI_CPER_RECORD_MIN_SIZE)) { + return STATUS_FAILED; + } + + /* Obtain pointer to record in the exchange buffer */ + exchange = memory_region_get_ram_ptr(&s->exchange_mr); + exchange += s->record_offset; + + /* Validate CPER record_length */ + memcpy((uint8_t *)&record_length, &exchange[UEFI_CPER_RECORD_LENGTH_OFFSET], + sizeof(uint32_t)); + record_length = le32_to_cpu(record_length); + if (record_length < UEFI_CPER_RECORD_MIN_SIZE) { + return STATUS_FAILED; + } + if (record_length > exchange_length - s->record_offset) { + return STATUS_FAILED; + } + + /* Extract record identifier */ + memcpy((uint8_t *)&record_identifier, &exchange[UEFI_CPER_RECORD_ID_OFFSET], + sizeof(uint64_t)); + record_identifier = le64_to_cpu(record_identifier); + + /* Check for valid record identifier */ + if (!ERST_IS_VALID_RECORD_ID(record_identifier)) { + return STATUS_FAILED; + } + + index = lookup_erst_record(s, record_identifier); + if (index) { + /* Record found, overwrite existing record */ + nvram = get_nvram_ptr_by_index(s, index); + record_found = true; + } else { + /* Record not found, not an overwrite, allocate for write */ + index = find_next_empty_record_index(s); + if (index) { + nvram = get_nvram_ptr_by_index(s, index); + } else { + /* All slots are occupied */ + rc = STATUS_NOT_ENOUGH_SPACE; + } + } + if (nvram) { + /* Write the record into the slot */ + memcpy(nvram, exchange, record_length); + memset(nvram + record_length, 0xFF, exchange_length - record_length); + /* If a new record, increment the record_count */ + if (!record_found) { + uint32_t record_count; + record_count = le32_to_cpu(s->header->record_count); + record_count += 1; /* writing new record */ + s->header->record_count = cpu_to_le32(record_count); + } + update_map_entry(s, index, record_identifier); + rc = STATUS_SUCCESS; + } + + return rc; +} + +/*******************************************************************/ + +static uint64_t erst_rd_reg64(hwaddr addr, + uint64_t reg, unsigned size) +{ + uint64_t rdval; + uint64_t mask; + unsigned shift; + + if (size == sizeof(uint64_t)) { + /* 64b access */ + mask = 0xFFFFFFFFFFFFFFFFUL; + shift = 0; + } else { + /* 32b access */ + mask = 0x00000000FFFFFFFFUL; + shift = ((addr & 0x4) == 0x4) ? 32 : 0; + } + + rdval = reg; + rdval >>= shift; + rdval &= mask; + + return rdval; +} + +static uint64_t erst_wr_reg64(hwaddr addr, + uint64_t reg, uint64_t val, unsigned size) +{ + uint64_t wrval; + uint64_t mask; + unsigned shift; + + if (size == sizeof(uint64_t)) { + /* 64b access */ + mask = 0xFFFFFFFFFFFFFFFFUL; + shift = 0; + } else { + /* 32b access */ + mask = 0x00000000FFFFFFFFUL; + shift = ((addr & 0x4) == 0x4) ? 32 : 0; + } + + val &= mask; + val <<= shift; + mask <<= shift; + wrval = reg; + wrval &= ~mask; + wrval |= val; + + return wrval; +} + +static void erst_reg_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + ERSTDeviceState *s = (ERSTDeviceState *)opaque; + + /* + * NOTE: All actions/operations/side effects happen on the WRITE, + * by this implementation's design. The READs simply return the + * reg_value contents. + */ + trace_acpi_erst_reg_write(addr, val, size); + + switch (addr) { + case ERST_VALUE_OFFSET + 0: + case ERST_VALUE_OFFSET + 4: + s->reg_value = erst_wr_reg64(addr, s->reg_value, val, size); + break; + case ERST_ACTION_OFFSET + 0: + /* + * NOTE: all valid values written to this register are of the + * ACTION_* variety. Thus there is no need to make this a 64-bit + * register, 32-bits is appropriate. As such ERST_ACTION_OFFSET+4 + * is not needed. + */ + switch (val) { + case ACTION_BEGIN_WRITE_OPERATION: + case ACTION_BEGIN_READ_OPERATION: + case ACTION_BEGIN_CLEAR_OPERATION: + case ACTION_BEGIN_DUMMY_WRITE_OPERATION: + case ACTION_END_OPERATION: + s->operation = val; + break; + case ACTION_SET_RECORD_OFFSET: + s->record_offset = s->reg_value; + break; + case ACTION_EXECUTE_OPERATION: + if ((uint8_t)s->reg_value == ERST_EXECUTE_OPERATION_MAGIC) { + s->busy_status = 1; + switch (s->operation) { + case ACTION_BEGIN_WRITE_OPERATION: + s->command_status = write_erst_record(s); + break; + case ACTION_BEGIN_READ_OPERATION: + s->command_status = read_erst_record(s); + break; + case ACTION_BEGIN_CLEAR_OPERATION: + s->command_status = clear_erst_record(s); + break; + case ACTION_BEGIN_DUMMY_WRITE_OPERATION: + s->command_status = STATUS_SUCCESS; + break; + case ACTION_END_OPERATION: + s->command_status = STATUS_SUCCESS; + break; + default: + s->command_status = STATUS_FAILED; + break; + } + s->busy_status = 0; + } + break; + case ACTION_CHECK_BUSY_STATUS: + s->reg_value = s->busy_status; + break; + case ACTION_GET_COMMAND_STATUS: + s->reg_value = s->command_status; + break; + case ACTION_GET_RECORD_IDENTIFIER: + s->command_status = get_next_record_identifier(s, + &s->reg_value, false); + break; + case ACTION_SET_RECORD_IDENTIFIER: + s->record_identifier = s->reg_value; + break; + case ACTION_GET_RECORD_COUNT: + s->reg_value = le32_to_cpu(s->header->record_count); + break; + case ACTION_GET_ERROR_LOG_ADDRESS_RANGE: + s->reg_value = (hwaddr)pci_get_bar_addr(PCI_DEVICE(s), 1); + break; + case ACTION_GET_ERROR_LOG_ADDRESS_LENGTH: + s->reg_value = le32_to_cpu(s->header->record_size); + break; + case ACTION_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES: + s->reg_value = 0x0; /* intentional, not NVRAM mode */ + break; + case ACTION_GET_EXECUTE_OPERATION_TIMINGS: + s->reg_value = + (100ULL << 32) | /* 100us max time */ + (10ULL << 0) ; /* 10us min time */ + break; + default: + /* Unknown action/command, NOP */ + break; + } + break; + default: + /* This should not happen, but if it does, NOP */ + break; + } +} + +static uint64_t erst_reg_read(void *opaque, hwaddr addr, + unsigned size) +{ + ERSTDeviceState *s = (ERSTDeviceState *)opaque; + uint64_t val = 0; + + switch (addr) { + case ERST_ACTION_OFFSET + 0: + case ERST_ACTION_OFFSET + 4: + val = erst_rd_reg64(addr, s->reg_action, size); + break; + case ERST_VALUE_OFFSET + 0: + case ERST_VALUE_OFFSET + 4: + val = erst_rd_reg64(addr, s->reg_value, size); + break; + default: + break; + } + trace_acpi_erst_reg_read(addr, val, size); + return val; +} + +static const MemoryRegionOps erst_reg_ops = { + .read = erst_reg_read, + .write = erst_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/*******************************************************************/ +/*******************************************************************/ +static int erst_post_load(void *opaque, int version_id) +{ + ERSTDeviceState *s = opaque; + + /* Recompute pointer to header */ + s->header = (ERSTStorageHeader *)get_nvram_ptr_by_index(s, 0); + trace_acpi_erst_post_load(s->header, le32_to_cpu(s->header->record_size)); + + return 0; +} + +static const VMStateDescription erst_vmstate = { + .name = "acpi-erst", + .version_id = 1, + .minimum_version_id = 1, + .post_load = erst_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(operation, ERSTDeviceState), + VMSTATE_UINT8(busy_status, ERSTDeviceState), + VMSTATE_UINT8(command_status, ERSTDeviceState), + VMSTATE_UINT32(record_offset, ERSTDeviceState), + VMSTATE_UINT64(reg_action, ERSTDeviceState), + VMSTATE_UINT64(reg_value, ERSTDeviceState), + VMSTATE_UINT64(record_identifier, ERSTDeviceState), + VMSTATE_UINT32(next_record_index, ERSTDeviceState), + VMSTATE_END_OF_LIST() + } +}; + +static void erst_realizefn(PCIDevice *pci_dev, Error **errp) +{ + ERSTDeviceState *s = ACPIERST(pci_dev); + + trace_acpi_erst_realizefn_in(); + + if (!s->hostmem) { + error_setg(errp, "'" ACPI_ERST_MEMDEV_PROP "' property is not set"); + return; + } else if (host_memory_backend_is_mapped(s->hostmem)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(s->hostmem))); + return; + } + + s->hostmem_mr = host_memory_backend_get_memory(s->hostmem); + + /* HostMemoryBackend size will be multiple of PAGE_SIZE */ + s->storage_size = object_property_get_int(OBJECT(s->hostmem), "size", errp); + + /* Initialize backend storage and record_count */ + check_erst_backend_storage(s, errp); + + /* BAR 0: Programming registers */ + memory_region_init_io(&s->iomem_mr, OBJECT(pci_dev), &erst_reg_ops, s, + TYPE_ACPI_ERST, ERST_REG_SIZE); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->iomem_mr); + + /* BAR 1: Exchange buffer memory */ + memory_region_init_ram(&s->exchange_mr, OBJECT(pci_dev), + "erst.exchange", + le32_to_cpu(s->header->record_size), errp); + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, + &s->exchange_mr); + + /* Include the backend storage in the migration stream */ + vmstate_register_ram_global(s->hostmem_mr); + + trace_acpi_erst_realizefn_out(s->storage_size); +} + +static void erst_reset(DeviceState *dev) +{ + ERSTDeviceState *s = ACPIERST(dev); + + trace_acpi_erst_reset_in(le32_to_cpu(s->header->record_count)); + s->operation = 0; + s->busy_status = 0; + s->command_status = STATUS_SUCCESS; + s->record_identifier = ERST_UNSPECIFIED_RECORD_ID; + s->record_offset = 0; + s->next_record_index = s->first_record_index; + /* NOTE: first/last_record_index are computed only once */ + trace_acpi_erst_reset_out(le32_to_cpu(s->header->record_count)); +} + +static Property erst_properties[] = { + DEFINE_PROP_LINK(ACPI_ERST_MEMDEV_PROP, ERSTDeviceState, hostmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_UINT32(ACPI_ERST_RECORD_SIZE_PROP, ERSTDeviceState, + default_record_size, ERST_RECORD_SIZE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void erst_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + trace_acpi_erst_class_init_in(); + k->realize = erst_realizefn; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_ACPI_ERST; + k->revision = 0x00; + k->class_id = PCI_CLASS_OTHERS; + dc->reset = erst_reset; + dc->vmsd = &erst_vmstate; + dc->user_creatable = true; + dc->hotpluggable = false; + device_class_set_props(dc, erst_properties); + dc->desc = "ACPI Error Record Serialization Table (ERST) device"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + trace_acpi_erst_class_init_out(); +} + +static const TypeInfo erst_type_info = { + .name = TYPE_ACPI_ERST, + .parent = TYPE_PCI_DEVICE, + .class_init = erst_class_init, + .instance_size = sizeof(ERSTDeviceState), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void erst_register_types(void) +{ + type_register_static(&erst_type_info); +} + +type_init(erst_register_types) diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index e28457a7d1..a3d31631fe 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -267,6 +267,13 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev, } } +static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) +{ + AcpiGedState *s = ACPI_GED(adev); + + acpi_memory_ospm_status(&s->memhp_state, list); +} + static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) { AcpiGedState *s = ACPI_GED(adev); @@ -409,6 +416,7 @@ static void acpi_ged_class_init(ObjectClass *class, void *data) hc->unplug_request = acpi_ged_unplug_request_cb; hc->unplug = acpi_ged_unplug_cb; + adevc->ospm_status = acpi_ged_ospm_status; adevc->send_event = acpi_ged_send_event; } diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c index a749b84d62..e9511d9b8f 100644 --- a/hw/acpi/ghes.c +++ b/hw/acpi/ghes.c @@ -249,7 +249,7 @@ void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { /* * Initialize the value of read_ack_register to 1, so GHES can be - * writeable after (re)boot. + * writable after (re)boot. * ACPI 6.2: 18.3.2.8 Generic Hardware Error Source version 2 * (GHESv2 - Type 10) */ @@ -362,18 +362,16 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) void acpi_build_hest(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - uint64_t hest_start = table_data->len; + AcpiTable table = { .sig = "HEST", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - /* Hardware Error Source Table header*/ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* Error Source Count */ build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4); - build_ghes_v2(table_data, ACPI_HEST_SRC_ID_SEA, linker); - build_header(linker, table_data, (void *)(table_data->data + hest_start), - "HEST", table_data->len - hest_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index edb3fd91b2..3a6d51282a 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -128,7 +128,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, } /* Latency or Bandwidth Entries */ - entry_list = g_malloc0(num_initiator * num_target * sizeof(uint16_t)); + entry_list = g_new0(uint16_t, num_initiator * num_target); for (i = 0; i < hmat_lb->list->len; i++) { lb_data = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); index = lb_data->initiator * num_target + lb_data->target; @@ -200,6 +200,8 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) HMAT_LB_Info *hmat_lb; NumaHmatCacheOptions *hmat_cache; + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + for (i = 0; i < numa_state->num_nodes; i++) { flags = 0; @@ -256,14 +258,10 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) void build_hmat(GArray *table_data, BIOSLinker *linker, NumaState *numa_state, const char *oem_id, const char *oem_table_id) { - int hmat_start = table_data->len; - - /* reserve space for HMAT header */ - acpi_data_push(table_data, 40); + AcpiTable table = { .sig = "HMAT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + acpi_table_begin(&table, table_data); hmat_build_table_structs(table_data, numa_state); - - build_header(linker, table_data, - (void *)(table_data->data + hmat_start), - "HMAT", table_data->len - hmat_start, 2, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index 778e27b659..bd9bbade70 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -163,7 +163,6 @@ static const VMStateDescription vmstate_memhp_state = { .name = "ich9_pm/memhp", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = vmstate_test_use_memhp, .fields = (VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs), @@ -181,7 +180,6 @@ static const VMStateDescription vmstate_tco_io_state = { .name = "ich9_pm/tco", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = vmstate_test_use_tco, .fields = (VMStateField[]) { VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts, @@ -208,7 +206,6 @@ static const VMStateDescription vmstate_cpuhp_state = { .name = "ich9_pm/cpuhp", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, .fields = (VMStateField[]) { @@ -419,6 +416,20 @@ static void ich9_pm_set_acpi_pci_hotplug(Object *obj, bool value, Error **errp) s->pm.use_acpi_hotplug_bridge = value; } +static bool ich9_pm_get_keep_pci_slot_hpc(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + return s->pm.keep_pci_slot_hpc; +} + +static void ich9_pm_set_keep_pci_slot_hpc(Object *obj, bool value, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + s->pm.keep_pci_slot_hpc = value; +} + void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) { static const uint32_t gpe0_len = ICH9_PMIO_GPE0_LEN; @@ -428,6 +439,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) pm->disable_s4 = 0; pm->s4_val = 2; pm->use_acpi_hotplug_bridge = true; + pm->keep_pci_slot_hpc = true; object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE, &pm->pm_io_base, OBJ_PROP_FLAG_READ); @@ -451,9 +463,12 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) object_property_add_bool(obj, ACPI_PM_PROP_TCO_ENABLED, ich9_pm_get_enable_tco, ich9_pm_set_enable_tco); - object_property_add_bool(obj, "acpi-pci-hotplug-with-bridge-support", + object_property_add_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, ich9_pm_get_acpi_pci_hotplug, ich9_pm_set_acpi_pci_hotplug); + object_property_add_bool(obj, "x-keep-pci-slot-hpc", + ich9_pm_get_keep_pci_slot_hpc, + ich9_pm_set_keep_pci_slot_hpc); } void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, diff --git a/hw/acpi/ipmi-stub.c b/hw/acpi/ipmi-stub.c index 8634fb325c..befaf0a882 100644 --- a/hw/acpi/ipmi-stub.c +++ b/hw/acpi/ipmi-stub.c @@ -10,6 +10,6 @@ #include "qemu/osdep.h" #include "hw/acpi/ipmi.h" -void build_acpi_ipmi_devices(Aml *table, BusState *bus, const char *resource) +void build_ipmi_dev_aml(AcpiDevAmlIf *adev, Aml *scope) { } diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c index 96e48eba15..a20e57d465 100644 --- a/hw/acpi/ipmi.c +++ b/hw/acpi/ipmi.c @@ -13,7 +13,7 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/ipmi.h" -static Aml *aml_ipmi_crs(IPMIFwInfo *info, const char *resource) +static Aml *aml_ipmi_crs(IPMIFwInfo *info) { Aml *crs = aml_resource_template(); @@ -49,7 +49,7 @@ static Aml *aml_ipmi_crs(IPMIFwInfo *info, const char *resource) break; case IPMI_MEMSPACE_SMBUS: aml_append(crs, aml_i2c_serial_bus_device(info->base_address, - resource)); + "^")); break; default: abort(); @@ -62,46 +62,27 @@ static Aml *aml_ipmi_crs(IPMIFwInfo *info, const char *resource) return crs; } -static Aml *aml_ipmi_device(IPMIFwInfo *info, const char *resource) +void build_ipmi_dev_aml(AcpiDevAmlIf *adev, Aml *scope) { Aml *dev; - uint16_t version = ((info->ipmi_spec_major_revision << 8) - | (info->ipmi_spec_minor_revision << 4)); + IPMIFwInfo info = {}; + IPMIInterface *ii = IPMI_INTERFACE(adev); + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + uint16_t version; - assert(info->ipmi_spec_minor_revision <= 15); + iic->get_fwinfo(ii, &info); + assert(info.ipmi_spec_minor_revision <= 15); + version = ((info.ipmi_spec_major_revision << 8) + | (info.ipmi_spec_minor_revision << 4)); - dev = aml_device("MI%d", info->uuid); + dev = aml_device("MI%d", info.uuid); aml_append(dev, aml_name_decl("_HID", aml_eisaid("IPI0001"))); aml_append(dev, aml_name_decl("_STR", aml_string("ipmi_%s", - info->interface_name))); - aml_append(dev, aml_name_decl("_UID", aml_int(info->uuid))); - aml_append(dev, aml_name_decl("_CRS", aml_ipmi_crs(info, resource))); - aml_append(dev, aml_name_decl("_IFT", aml_int(info->interface_type))); + info.interface_name))); + aml_append(dev, aml_name_decl("_UID", aml_int(info.uuid))); + aml_append(dev, aml_name_decl("_CRS", aml_ipmi_crs(&info))); + aml_append(dev, aml_name_decl("_IFT", aml_int(info.interface_type))); aml_append(dev, aml_name_decl("_SRV", aml_int(version))); - return dev; -} - -void build_acpi_ipmi_devices(Aml *scope, BusState *bus, const char *resource) -{ - - BusChild *kid; - - QTAILQ_FOREACH(kid, &bus->children, sibling) { - IPMIInterface *ii; - IPMIInterfaceClass *iic; - IPMIFwInfo info; - Object *obj = object_dynamic_cast(OBJECT(kid->child), - TYPE_IPMI_INTERFACE); - - if (!obj) { - continue; - } - - ii = IPMI_INTERFACE(obj); - iic = IPMI_INTERFACE_GET_CLASS(obj); - memset(&info, 0, sizeof(info)); - iic->get_fwinfo(ii, &info); - aml_append(scope, aml_ipmi_device(&info, resource)); - } + aml_append(scope, dev); } diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index af37889423..0a7e89a13e 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -1,6 +1,5 @@ #include "qemu/osdep.h" #include "hw/acpi/memory_hotplug.h" -#include "hw/acpi/pc-hotplug.h" #include "hw/mem/pc-dimm.h" #include "hw/qdev-core.h" #include "migration/vmstate.h" @@ -8,6 +7,7 @@ #include "qapi/error.h" #include "qapi/qapi-events-acpi.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #define MEMORY_SLOTS_NUMBER "MDNR" #define MEMORY_HOTPLUG_IO_REGION "HPMR" @@ -178,8 +178,16 @@ static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); if (local_err) { trace_mhp_acpi_pc_dimm_delete_failed(mem_st->selector); - qapi_event_send_mem_unplug_error(dev->id, + + /* + * Send both MEM_UNPLUG_ERROR and DEVICE_UNPLUG_GUEST_ERROR + * while the deprecation of MEM_UNPLUG_ERROR is + * pending. + */ + qapi_event_send_mem_unplug_error(dev->id ? : "", error_get_pretty(local_err)); + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); error_free(local_err); break; } @@ -309,7 +317,6 @@ static const VMStateDescription vmstate_memhp_sts = { .name = "memory hotplug device state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_BOOL(is_enabled, MemStatus), VMSTATE_BOOL(is_inserting, MemStatus), @@ -323,7 +330,6 @@ const VMStateDescription vmstate_memory_hotplug = { .name = "memory hotplug state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(selector, MemHotplugState), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count, diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index 29f804d13e..f8c820ca94 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -6,21 +6,33 @@ acpi_ss.add(files( 'core.c', 'utils.c', )) -acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c')) -acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu_hotplug.c')) +acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c', 'cpu_hotplug.c')) +acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c')) +acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_false: files('acpi-mem-hotplug-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_true: files('nvdimm.c')) +acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c')) +acpi_ss.add(when: 'CONFIG_ACPI_CXL', if_true: files('cxl.c'), if_false: files('cxl-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c')) acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c')) acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c')) acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c')) -acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c')) acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c')) +acpi_ss.add(when: 'CONFIG_ACPI_ERST', if_true: files('erst.c')) acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c')) acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) -acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c')) -softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c')) +if have_tpm + acpi_ss.add(files('tpm.c')) +endif +softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', - 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c')) + 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c', + 'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c', + 'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c', + 'cxl-stub.c')) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index e3d5fe1939..a3b25a92f3 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -35,6 +35,7 @@ #include "hw/nvram/fw_cfg.h" #include "hw/mem/nvdimm.h" #include "qemu/nvdimm-utils.h" +#include "trace.h" /* * define Byte Addressable Persistent Memory (PM) Region according to @@ -44,22 +45,6 @@ static const uint8_t nvdimm_nfit_spa_uuid[] = UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33, 0x18, 0xb7, 0x8c, 0xdb); -/* - * NVDIMM Firmware Interface Table - * @signature: "NFIT" - * - * It provides information that allows OSPM to enumerate NVDIMM present in - * the platform and associate system physical address ranges created by the - * NVDIMMs. - * - * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) - */ -struct NvdimmNfitHeader { - ACPI_TABLE_HEADER_DEF - uint32_t reserved; -} QEMU_PACKED; -typedef struct NvdimmNfitHeader NvdimmNfitHeader; - /* * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware * Interface Table (NFIT). @@ -355,10 +340,10 @@ nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) static GArray *nvdimm_build_device_structure(NVDIMMState *state) { - GSList *device_list = nvdimm_get_device_list(); + GSList *device_list, *list = nvdimm_get_device_list(); GArray *structures = g_array_new(false, true /* clear */, 1); - for (; device_list; device_list = device_list->next) { + for (device_list = list; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; /* build System Physical Address Range Structure. */ @@ -373,7 +358,7 @@ static GArray *nvdimm_build_device_structure(NVDIMMState *state) /* build NVDIMM Control Region Structure. */ nvdimm_build_structure_dcr(structures, dev); } - g_slist_free(device_list); + g_slist_free(list); if (state->persistence) { nvdimm_build_structure_caps(structures, state->persistence); @@ -401,25 +386,33 @@ void nvdimm_plug(NVDIMMState *state) nvdimm_build_fit_buffer(state); } +/* + * NVDIMM Firmware Interface Table + * @signature: "NFIT" + * + * It provides information that allows OSPM to enumerate NVDIMM present in + * the platform and associate system physical address ranges created by the + * NVDIMMs. + * + * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) + */ + static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { NvdimmFitBuffer *fit_buf = &state->fit_buf; - unsigned int header; + AcpiTable table = { .sig = "NFIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; acpi_add_table(table_offsets, table_data); - /* NFIT header. */ - header = table_data->len; - acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); + acpi_table_begin(&table, table_data); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* NVDIMM device structures. */ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len); - - build_header(linker, table_data, - (void *)(table_data->data + header), "NFIT", - sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, oem_id, - oem_table_id); + acpi_table_end(linker, &table); } #define NVDIMM_DSM_MEMORY_SIZE 4096 @@ -484,7 +477,7 @@ struct NvdimmFuncGetLabelDataOut { /* the size of buffer filled by QEMU. */ uint32_t len; uint32_t func_ret_status; /* return status code. */ - uint8_t out_buf[]; /* the data got via Get Namesapce Label function. */ + uint8_t out_buf[]; /* the data got via Get Namespace Label function. */ } QEMU_PACKED; typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut; QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE); @@ -558,8 +551,8 @@ static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, fit = fit_buf->fit; - nvdimm_debug("Read FIT: offset 0x%x FIT size 0x%x Dirty %s.\n", - read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No"); + trace_acpi_nvdimm_read_fit(read_fit->offset, fit->len, + fit_buf->dirty ? "Yes" : "No"); if (read_fit->offset > fit->len) { func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID; @@ -666,7 +659,7 @@ static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr) label_size = nvdimm->label_size; mxfer = nvdimm_get_max_xfer_label_size(); - nvdimm_debug("label_size 0x%x, max_xfer 0x%x.\n", label_size, mxfer); + trace_acpi_nvdimm_label_info(label_size, mxfer); label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); label_size_out.label_size = cpu_to_le32(label_size); @@ -682,20 +675,18 @@ static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID; if (offset + length < offset) { - nvdimm_debug("offset 0x%x + length 0x%x is overflow.\n", offset, - length); + trace_acpi_nvdimm_label_overflow(offset, length); return ret; } if (nvdimm->label_size < offset + length) { - nvdimm_debug("position 0x%x is beyond label data (len = %" PRIx64 ").\n", - offset + length, nvdimm->label_size); + trace_acpi_nvdimm_label_oversize(offset + length, nvdimm->label_size); return ret; } if (length > nvdimm_get_max_xfer_label_size()) { - nvdimm_debug("length (0x%x) is larger than max_xfer (0x%x).\n", - length, nvdimm_get_max_xfer_label_size()); + trace_acpi_nvdimm_label_xfer_exceed(length, + nvdimm_get_max_xfer_label_size()); return ret; } @@ -718,8 +709,8 @@ static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, get_label_data->offset = le32_to_cpu(get_label_data->offset); get_label_data->length = le32_to_cpu(get_label_data->length); - nvdimm_debug("Read Label Data: offset 0x%x length 0x%x.\n", - get_label_data->offset, get_label_data->length); + trace_acpi_nvdimm_read_label(get_label_data->offset, + get_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, get_label_data->length); @@ -757,8 +748,8 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, set_label_data->offset = le32_to_cpu(set_label_data->offset); set_label_data->length = le32_to_cpu(set_label_data->length); - nvdimm_debug("Write Label Data: offset 0x%x length 0x%x.\n", - set_label_data->offset, set_label_data->length); + trace_acpi_nvdimm_write_label(set_label_data->offset, + set_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, set_label_data->length); @@ -829,7 +820,7 @@ static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr) static uint64_t nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) { - nvdimm_debug("BUG: we never read _DSM IO Port.\n"); + trace_acpi_nvdimm_read_io_port(); return 0; } @@ -840,7 +831,7 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) NvdimmDsmIn *in; hwaddr dsm_mem_addr = val; - nvdimm_debug("dsm memory address 0x%" HWADDR_PRIx ".\n", dsm_mem_addr); + trace_acpi_nvdimm_dsm_mem_addr(dsm_mem_addr); /* * The DSM memory is mapped to guest address space so an evil guest @@ -854,12 +845,10 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) in->function = le32_to_cpu(in->function); in->handle = le32_to_cpu(in->handle); - nvdimm_debug("Revision 0x%x Handler 0x%x Function 0x%x.\n", in->revision, - in->handle, in->function); + trace_acpi_nvdimm_dsm_info(in->revision, in->handle, in->function); if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) { - nvdimm_debug("Revision 0x%x is not supported, expect 0x%x.\n", - in->revision, 0x1); + trace_acpi_nvdimm_invalid_revision(in->revision); nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); goto exit; } @@ -933,6 +922,7 @@ void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, #define NVDIMM_DSM_RFIT_STATUS "RSTA" #define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62" +#define NVDIMM_DEVICE_DSM_UUID "4309AC30-0D11-11E4-9191-0800200C9A66" static void nvdimm_build_common_dsm(Aml *dev, NVDIMMState *nvdimm_state) @@ -1040,15 +1030,14 @@ static void nvdimm_build_common_dsm(Aml *dev, /* UUID for QEMU internal use */), expected_uuid)); aml_append(elsectx, ifctx); elsectx2 = aml_else(); - aml_append(elsectx2, aml_store( - aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66") + aml_append(elsectx2, aml_store(aml_touuid(NVDIMM_DEVICE_DSM_UUID) /* UUID for NVDIMM Devices */, expected_uuid)); aml_append(elsectx, elsectx2); aml_append(method, elsectx); uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid)); - unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL)); + unsupport = aml_if(aml_lor(unpatched, uuid_invalid)); /* * function 0 is called to inquire what functions are supported by @@ -1080,10 +1069,9 @@ static void nvdimm_build_common_dsm(Aml *dev, * in the DSM Spec. */ pckg = aml_arg(3); - ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg), + ifctx = aml_if(aml_land(aml_equal(aml_object_type(pckg), aml_int(4 /* Package */)) /* It is a Package? */, - aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */, - NULL)); + aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */)); pckg_index = aml_local(2); pckg_buf = aml_local(3); @@ -1255,6 +1243,7 @@ static void nvdimm_build_fit(Aml *dev) static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) { uint32_t slot; + Aml *method, *pkg, *field, *com_call; for (slot = 0; slot < ram_slots; slot++) { uint32_t handle = nvdimm_slot_to_handle(slot); @@ -1272,6 +1261,100 @@ static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) */ aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle))); + /* + * ACPI v6.4: Section 6.5.10 NVDIMM Label Methods + */ + /* _LSI */ + method = aml_method("_LSI", 0, AML_SERIALIZED); + com_call = aml_call5(NVDIMM_COMMON_DSM, + aml_touuid(NVDIMM_DEVICE_DSM_UUID), + aml_int(1), aml_int(4), aml_int(0), + aml_int(handle)); + aml_append(method, aml_store(com_call, aml_local(0))); + + aml_append(method, aml_create_dword_field(aml_local(0), + aml_int(0), "STTS")); + aml_append(method, aml_create_dword_field(aml_local(0), aml_int(4), + "SLSA")); + aml_append(method, aml_create_dword_field(aml_local(0), aml_int(8), + "MAXT")); + + pkg = aml_package(3); + aml_append(pkg, aml_name("STTS")); + aml_append(pkg, aml_name("SLSA")); + aml_append(pkg, aml_name("MAXT")); + aml_append(method, aml_store(pkg, aml_local(1))); + aml_append(method, aml_return(aml_local(1))); + + aml_append(nvdimm_dev, method); + + /* _LSR */ + method = aml_method("_LSR", 2, AML_SERIALIZED); + aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL))); + + aml_append(method, aml_create_dword_field(aml_name("INPT"), + aml_int(0), "OFST")); + aml_append(method, aml_create_dword_field(aml_name("INPT"), + aml_int(4), "LEN")); + aml_append(method, aml_store(aml_arg(0), aml_name("OFST"))); + aml_append(method, aml_store(aml_arg(1), aml_name("LEN"))); + + pkg = aml_package(1); + aml_append(pkg, aml_name("INPT")); + aml_append(method, aml_store(pkg, aml_local(0))); + + com_call = aml_call5(NVDIMM_COMMON_DSM, + aml_touuid(NVDIMM_DEVICE_DSM_UUID), + aml_int(1), aml_int(5), aml_local(0), + aml_int(handle)); + aml_append(method, aml_store(com_call, aml_local(3))); + field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS"); + aml_append(method, field); + field = aml_create_field(aml_local(3), aml_int(32), + aml_shiftleft(aml_name("LEN"), aml_int(3)), + "LDAT"); + aml_append(method, field); + aml_append(method, aml_name_decl("LSA", aml_buffer(0, NULL))); + aml_append(method, aml_to_buffer(aml_name("LDAT"), aml_name("LSA"))); + + pkg = aml_package(2); + aml_append(pkg, aml_name("STTS")); + aml_append(pkg, aml_name("LSA")); + + aml_append(method, aml_store(pkg, aml_local(1))); + aml_append(method, aml_return(aml_local(1))); + + aml_append(nvdimm_dev, method); + + /* _LSW */ + method = aml_method("_LSW", 3, AML_SERIALIZED); + aml_append(method, aml_store(aml_arg(2), aml_local(2))); + aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL))); + field = aml_create_dword_field(aml_name("INPT"), + aml_int(0), "OFST"); + aml_append(method, field); + field = aml_create_dword_field(aml_name("INPT"), + aml_int(4), "TLEN"); + aml_append(method, field); + aml_append(method, aml_store(aml_arg(0), aml_name("OFST"))); + aml_append(method, aml_store(aml_arg(1), aml_name("TLEN"))); + + aml_append(method, aml_concatenate(aml_name("INPT"), aml_local(2), + aml_name("INPT"))); + pkg = aml_package(1); + aml_append(pkg, aml_name("INPT")); + aml_append(method, aml_store(pkg, aml_local(0))); + com_call = aml_call5(NVDIMM_COMMON_DSM, + aml_touuid(NVDIMM_DEVICE_DSM_UUID), + aml_int(1), aml_int(6), aml_local(0), + aml_int(handle)); + aml_append(method, aml_store(com_call, aml_local(3))); + field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS"); + aml_append(method, field); + aml_append(method, aml_return(aml_name("STTS"))); + + aml_append(nvdimm_dev, method); + nvdimm_build_device_dsm(nvdimm_dev, handle); aml_append(root_dev, nvdimm_dev); } @@ -1282,14 +1365,15 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, NVDIMMState *nvdimm_state, uint32_t ram_slots, const char *oem_id) { + int mem_addr_offset; Aml *ssdt, *sb_scope, *dev; - int mem_addr_offset, nvdimm_ssdt; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "NVDIMM" }; acpi_add_table(table_offsets, table_data); + acpi_table_begin(&table, table_data); ssdt = init_aml_allocator(); - acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); - sb_scope = aml_scope("\\_SB"); dev = aml_device("NVDR"); @@ -1318,8 +1402,6 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, aml_append(sb_scope, dev); aml_append(ssdt, sb_scope); - nvdimm_ssdt = table_data->len; - /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); mem_addr_offset = build_append_named_dword(table_data, @@ -1331,18 +1413,20 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), NVDIMM_DSM_MEM_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + nvdimm_ssdt), - "SSDT", table_data->len - nvdimm_ssdt, 1, oem_id, "NVDIMM"); free_aml_allocator(); + /* + * must be executed as the last so that pointer patching command above + * would be executed by guest before it recalculated checksum which were + * scheduled by acpi_table_end() + */ + acpi_table_end(linker, &table); } void nvdimm_build_srat(GArray *table_data) { - GSList *device_list = nvdimm_get_device_list(); + GSList *device_list, *list = nvdimm_get_device_list(); - for (; device_list; device_list = device_list->next) { - AcpiSratMemoryAffinity *numamem = NULL; + for (device_list = list; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; Object *obj = OBJECT(dev); uint64_t addr, size; @@ -1352,11 +1436,10 @@ void nvdimm_build_srat(GArray *table_data) addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort); size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort); - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, addr, size, node, + build_srat_memory(table_data, addr, size, node, MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE); } - g_slist_free(device_list); + g_slist_free(list); } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c index 75b1103ec4..20b70dcd81 100644 --- a/hw/acpi/pci.c +++ b/hw/acpi/pci.c @@ -28,19 +28,20 @@ #include "hw/acpi/pci.h" #include "hw/pci/pcie_host.h" +/* + * PCI Firmware Specification, Revision 3.0 + * 4.1.2 MCFG Table Description. + */ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, const char *oem_id, const char *oem_table_id) { - int mcfg_start = table_data->len; + AcpiTable table = { .sig = "MCFG", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); - /* - * PCI Firmware Specification, Revision 3.0 - * 4.1.2 MCFG Table Description. - */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); /* Reserved */ build_append_int_noprefix(table_data, 0, 8); - /* * Memory Mapped Enhanced Configuration Space Base Address Allocation * Structure @@ -56,6 +57,5 @@ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, /* Reserved */ build_append_int_noprefix(table_data, 0, 4); - build_header(linker, table_data, (void *)(table_data->data + mcfg_start), - "MCFG", table_data->len - mcfg_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index f4d706e47d..a2a3738b46 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -32,6 +32,7 @@ #include "hw/pci/pci_bridge.h" #include "hw/pci/pci_host.h" #include "hw/pci/pcie_port.h" +#include "hw/pci-bridge/xio3130_downstream.h" #include "hw/i386/acpi-build.h" #include "hw/acpi/acpi.h" #include "hw/pci/pci_bus.h" @@ -128,20 +129,15 @@ static void acpi_set_pci_info(void) static void acpi_pcihp_disable_root_bus(void) { - static bool root_hp_disabled; Object *host = acpi_get_i386_pci_host(); PCIBus *bus; - if (root_hp_disabled) { - return; - } - bus = PCI_HOST_BRIDGE(host)->bus; - if (bus) { + if (bus && qbus_is_hotpluggable(BUS(bus))) { /* setting the hotplug handler to NULL makes the bus non-hotpluggable */ qbus_set_hotplug_handler(BUS(bus), NULL); } - root_hp_disabled = true; + return; } @@ -196,8 +192,12 @@ static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev) * ACPI doesn't allow hotplug of bridge devices. Don't allow * hot-unplug of bridge devices unless they were added by hotplug * (and so, not described by acpi). + * + * Don't allow hot-unplug of SR-IOV Virtual Functions, as they + * will be removed implicitly, when Physical Function is unplugged. */ - return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable; + return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable || + pci_is_vf(dev); } static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slots) @@ -222,9 +222,27 @@ static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slo PCIDevice *dev = PCI_DEVICE(qdev); if (PCI_SLOT(dev->devfn) == slot) { if (!acpi_pcihp_pc_no_hotplug(s, dev)) { - hotplug_ctrl = qdev_get_hotplug_handler(qdev); - hotplug_handler_unplug(hotplug_ctrl, qdev, &error_abort); - object_unparent(OBJECT(qdev)); + /* + * partially_hotplugged is used by virtio-net failover: + * failover has asked the guest OS to unplug the device + * but we need to keep some references to the device + * to be able to plug it back in case of failure so + * we don't execute hotplug_handler_unplug(). + */ + if (dev->partially_hotplugged) { + /* + * pending_deleted_event is set to true when + * virtio-net failover asks to unplug the device, + * and set to false here when the operation is done + * This is used by the migration loop to detect the + * end of the operation and really start the migration. + */ + qdev->pending_deleted_event = false; + } else { + hotplug_ctrl = qdev_get_hotplug_handler(qdev); + hotplug_handler_unplug(hotplug_ctrl, qdev, &error_abort); + object_unparent(OBJECT(qdev)); + } } } } @@ -283,7 +301,7 @@ void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, /* Only hotplugged devices need the hotplug capability. */ if (dev->hotplugged && - acpi_pcihp_get_bsel(pci_get_bus(PCI_DEVICE(dev))) < 0) { + acpi_pcihp_get_bsel(pci_get_bus(pdev)) < 0) { error_setg(errp, "Unsupported bus. Bus doesn't have property '" ACPI_PCIHP_PROP_BSEL "' set"); return; @@ -323,6 +341,8 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, { PCIDevice *pdev = PCI_DEVICE(dev); int slot = PCI_SLOT(pdev->devfn); + PCIDevice *bridge; + PCIBus *bus; int bsel; /* Don't send event when device is enabled during qemu machine creation: @@ -352,7 +372,14 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, return; } - bsel = acpi_pcihp_get_bsel(pci_get_bus(pdev)); + bus = pci_get_bus(pdev); + bridge = pci_bridge_get_device(bus); + if (object_dynamic_cast(OBJECT(bridge), TYPE_PCIE_ROOT_PORT) || + object_dynamic_cast(OBJECT(bridge), TYPE_XIO3130_DOWNSTREAM)) { + pcie_cap_slot_enable_power(bridge); + } + + bsel = acpi_pcihp_get_bsel(bus); g_assert(bsel >= 0); s->acpi_pcihp_pci_status[bsel].up |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); @@ -363,8 +390,8 @@ void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, { PCIDevice *pdev = PCI_DEVICE(dev); - trace_acpi_pci_unplug(PCI_SLOT(PCI_DEVICE(dev)->devfn), - acpi_pcihp_get_bsel(pci_get_bus(PCI_DEVICE(dev)))); + trace_acpi_pci_unplug(PCI_SLOT(pdev->devfn), + acpi_pcihp_get_bsel(pci_get_bus(pdev))); /* * clean up acpi-index so it could reused by another device @@ -396,6 +423,22 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, return; } + /* + * pending_deleted_event is used by virtio-net failover to detect the + * end of the unplug operation, the flag is set to false in + * acpi_pcihp_eject_slot() when the operation is completed. + */ + pdev->qdev.pending_deleted_event = true; + /* if unplug was requested before OSPM is initialized, + * linux kernel will clear GPE0.sts[] bits during boot, which effectively + * hides unplug event. And than followup qmp_device_del() calls remain + * blocked by above flag permanently. + * Unblock qmp_device_del() by setting expire limit, so user can + * repeat unplug request later when OSPM has been booted. + */ + pdev->qdev.pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* 1 msec */ + s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } @@ -467,6 +510,9 @@ static void pci_write(void *opaque, hwaddr addr, uint64_t data, } bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select); + if (!bus) { + break; + } QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) { Object *o = OBJECT(kid->child); PCIDevice *dev = PCI_DEVICE(o); @@ -522,12 +568,6 @@ void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, OBJ_PROP_FLAG_READ); } -bool vmstate_acpi_pcihp_use_acpi_index(void *opaque, int version_id) -{ - AcpiPciHpState *s = opaque; - return s->acpi_index; -} - const VMStateDescription vmstate_acpi_pcihp_pci_status = { .name = "acpi_pcihp_pci_status", .version_id = 1, diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index 48f7a1edbc..0a81f1ad93 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -28,6 +28,8 @@ #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "hw/acpi/acpi.h" +#include "hw/acpi/pcihp.h" +#include "hw/acpi/piix4.h" #include "sysemu/runstate.h" #include "sysemu/sysemu.h" #include "sysemu/xen.h" @@ -56,46 +58,6 @@ struct pci_status { uint32_t down; }; -struct PIIX4PMState { - /*< private >*/ - PCIDevice parent_obj; - /*< public >*/ - - MemoryRegion io; - uint32_t io_base; - - MemoryRegion io_gpe; - ACPIREGS ar; - - APMState apm; - - PMSMBus smb; - uint32_t smb_io_base; - - qemu_irq irq; - qemu_irq smi_irq; - int smm_enabled; - bool smm_compat; - Notifier machine_ready; - Notifier powerdown_notifier; - - AcpiPciHpState acpi_pci_hotplug; - bool use_acpi_hotplug_bridge; - bool use_acpi_root_pci_hotplug; - - uint8_t disable_s3; - uint8_t disable_s4; - uint8_t s4_val; - - bool cpu_hotplug_legacy; - AcpiCpuHotplug gpe_cpu; - CPUHotplugState cpuhp_state; - - MemHotplugState acpi_memory_hotplug; -}; - -OBJECT_DECLARE_SIMPLE_TYPE(PIIX4PMState, PIIX4_PM) - static void piix4_acpi_system_hot_add_init(MemoryRegion *parent, PCIBus *bus, PIIX4PMState *s); @@ -230,7 +192,6 @@ static const VMStateDescription vmstate_memhp_state = { .name = "piix4_pm/memhp", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = vmstate_test_use_memhp, .fields = (VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState), @@ -255,7 +216,6 @@ static const VMStateDescription vmstate_cpuhp_state = { .name = "piix4_pm/cpuhp", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, .fields = (VMStateField[]) { @@ -269,6 +229,16 @@ static bool piix4_vmstate_need_smbus(void *opaque, int version_id) return pm_smbus_vmstate_needed(); } +/* + * This is a fudge to turn off the acpi_index field, + * whose test was always broken on piix4 with 6.2 and older machine types. + */ +static bool vmstate_test_migrate_acpi_index(void *opaque, int version_id) +{ + PIIX4PMState *s = PIIX4_PM(opaque); + return s->use_acpi_hotplug_bridge && !s->not_migrate_acpi_index; +} + /* qemu-kvm 1.2 uses version 3 but advertised as 2 * To support incoming qemu-kvm 1.2 migration, change version_id * and minimum_version_id to 2 below (which breaks migration from @@ -299,7 +269,7 @@ static const VMStateDescription vmstate_acpi = { struct AcpiPciHpPciStatus), VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug, PIIX4PMState, vmstate_test_use_acpi_hotplug_bridge, - vmstate_acpi_pcihp_use_acpi_index), + vmstate_test_migrate_acpi_index), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { @@ -516,6 +486,10 @@ static void piix4_pm_realize(PCIDevice *dev, Error **errp) s->machine_ready.notify = piix4_pm_machine_ready; qemu_add_machine_init_done_notifier(&s->machine_ready); + if (xen_enabled()) { + s->use_acpi_hotplug_bridge = false; + } + piix4_acpi_system_hot_add_init(pci_address_space_io(dev), pci_get_bus(dev), s); qbus_set_hotplug_handler(BUS(pci_get_bus(dev)), OBJECT(s)); @@ -523,32 +497,12 @@ static void piix4_pm_realize(PCIDevice *dev, Error **errp) piix4_pm_add_properties(s); } -I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, - qemu_irq sci_irq, qemu_irq smi_irq, - int smm_enabled, DeviceState **piix4_pm) +static void piix4_pm_init(Object *obj) { - PCIDevice *pci_dev; - DeviceState *dev; - PIIX4PMState *s; + PIIX4PMState *s = PIIX4_PM(obj); - pci_dev = pci_new(devfn, TYPE_PIIX4_PM); - dev = DEVICE(pci_dev); - qdev_prop_set_uint32(dev, "smb_io_base", smb_io_base); - if (piix4_pm) { - *piix4_pm = dev; - } - - s = PIIX4_PM(dev); - s->irq = sci_irq; - s->smi_irq = smi_irq; - s->smm_enabled = smm_enabled; - if (xen_enabled()) { - s->use_acpi_hotplug_bridge = false; - } - - pci_realize_and_unref(pci_dev, bus, &error_fatal); - - return s->smb.smbus; + qdev_init_gpio_out(DEVICE(obj), &s->irq, 1); + qdev_init_gpio_out_named(DEVICE(obj), &s->smi_irq, "smi-irq", 1); } static uint64_t gpe_readb(void *opaque, hwaddr addr, unsigned width) @@ -647,13 +601,16 @@ static Property piix4_pm_properties[] = { DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0), DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0), DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_VAL, PIIX4PMState, s4_val, 2), - DEFINE_PROP_BOOL("acpi-pci-hotplug-with-bridge-support", PIIX4PMState, + DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, PIIX4PMState, use_acpi_hotplug_bridge, true), - DEFINE_PROP_BOOL("acpi-root-pci-hotplug", PIIX4PMState, + DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState, use_acpi_root_pci_hotplug, true), DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState, acpi_memory_hotplug.is_enabled, true), DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false), + DEFINE_PROP_BOOL("smm-enabled", PIIX4PMState, smm_enabled, false), + DEFINE_PROP_BOOL("x-not-migrate-acpi-index", PIIX4PMState, + not_migrate_acpi_index, false), DEFINE_PROP_END_OF_LIST(), }; @@ -692,6 +649,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data) static const TypeInfo piix4_pm_info = { .name = TYPE_PIIX4_PM, .parent = TYPE_PCI_DEVICE, + .instance_init = piix4_pm_init, .instance_size = sizeof(PIIX4PMState), .class_init = piix4_pm_class_init, .interfaces = (InterfaceInfo[]) { diff --git a/hw/acpi/tco.c b/hw/acpi/tco.c index cf1e68a539..4783721e4e 100644 --- a/hw/acpi/tco.c +++ b/hw/acpi/tco.c @@ -239,7 +239,6 @@ const VMStateDescription vmstate_tco_io_sts = { .name = "tco io device status", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT16(tco.rld, TCOIORegs), VMSTATE_UINT8(tco.din, TCOIORegs), diff --git a/hw/acpi/trace-events b/hw/acpi/trace-events index 974d770e8b..eb60b04f9b 100644 --- a/hw/acpi/trace-events +++ b/hw/acpi/trace-events @@ -55,3 +55,31 @@ piix4_gpe_writeb(uint64_t addr, unsigned width, uint64_t val) "addr: 0x%" PRIx64 # tco.c tco_timer_reload(int ticks, int msec) "ticks=%d (%d ms)" tco_timer_expired(int timeouts_no, bool strap, bool no_reboot) "timeouts_no=%d no_reboot=%d/%d" + +# erst.c +acpi_erst_reg_write(uint64_t addr, uint64_t val, unsigned size) "addr: 0x%04" PRIx64 " <== 0x%016" PRIx64 " (size: %u)" +acpi_erst_reg_read(uint64_t addr, uint64_t val, unsigned size) " addr: 0x%04" PRIx64 " ==> 0x%016" PRIx64 " (size: %u)" +acpi_erst_mem_write(uint64_t addr, uint64_t val, unsigned size) "addr: 0x%06" PRIx64 " <== 0x%016" PRIx64 " (size: %u)" +acpi_erst_mem_read(uint64_t addr, uint64_t val, unsigned size) " addr: 0x%06" PRIx64 " ==> 0x%016" PRIx64 " (size: %u)" +acpi_erst_pci_bar_0(uint64_t addr) "BAR0: 0x%016" PRIx64 +acpi_erst_pci_bar_1(uint64_t addr) "BAR1: 0x%016" PRIx64 +acpi_erst_realizefn_in(void) +acpi_erst_realizefn_out(unsigned size) "total nvram size %u bytes" +acpi_erst_reset_in(unsigned record_count) "record_count %u" +acpi_erst_reset_out(unsigned record_count) "record_count %u" +acpi_erst_post_load(void *header, unsigned slot_size) "header: 0x%p slot_size %u" +acpi_erst_class_init_in(void) +acpi_erst_class_init_out(void) + +# nvdimm.c +acpi_nvdimm_read_fit(uint32_t offset, uint32_t len, const char *dirty) "Read FIT: offset 0x%" PRIx32 " FIT size 0x%" PRIx32 " Dirty %s" +acpi_nvdimm_label_info(uint32_t label_size, uint32_t mxfer) "label_size 0x%" PRIx32 ", max_xfer 0x%" PRIx32 +acpi_nvdimm_label_overflow(uint32_t offset, uint32_t length) "offset 0x%" PRIx32 " + length 0x%" PRIx32 " is overflow" +acpi_nvdimm_label_oversize(uint32_t pos, uint64_t size) "position 0x%" PRIx32 " is beyond label data (len = %" PRIu64 ")" +acpi_nvdimm_label_xfer_exceed(uint32_t length, uint32_t max_xfer) "length (0x%" PRIx32 ") is larger than max_xfer (0x%" PRIx32 ")" +acpi_nvdimm_read_label(uint32_t offset, uint32_t length) "Read Label Data: offset 0x%" PRIx32 " length 0x%" PRIx32 +acpi_nvdimm_write_label(uint32_t offset, uint32_t length) "Write Label Data: offset 0x%" PRIx32 " length 0x%" PRIx32 +acpi_nvdimm_read_io_port(void) "Alert: we never read _DSM IO Port" +acpi_nvdimm_dsm_mem_addr(uint64_t dsm_mem_addr) "dsm memory address 0x%" PRIx64 +acpi_nvdimm_dsm_info(uint32_t revision, uint32_t handle, uint32_t function) "Revision 0x%" PRIx32 " Handle 0x%" PRIx32 " Function 0x%" PRIx32 +acpi_nvdimm_invalid_revision(uint32_t revision) "Revision 0x%" PRIx32 " is not supported, expect 0x1" diff --git a/hw/acpi/viot.c b/hw/acpi/viot.c new file mode 100644 index 0000000000..4e0bf69067 --- /dev/null +++ b/hw/acpi/viot.c @@ -0,0 +1,143 @@ +/* + * ACPI Virtual I/O Translation table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/viot.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" + +struct viot_pci_host_range { + int min_bus; + int max_bus; +}; + +static void build_pci_host_range(GArray *table_data, int min_bus, int max_bus, + uint16_t output_node) +{ + /* Type */ + build_append_int_noprefix(table_data, 1 /* PCI range */, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Length */ + build_append_int_noprefix(table_data, 24, 2); + /* Endpoint start */ + build_append_int_noprefix(table_data, PCI_BUILD_BDF(min_bus, 0), 4); + /* PCI Segment start */ + build_append_int_noprefix(table_data, 0, 2); + /* PCI Segment end */ + build_append_int_noprefix(table_data, 0, 2); + /* PCI BDF start */ + build_append_int_noprefix(table_data, PCI_BUILD_BDF(min_bus, 0), 2); + /* PCI BDF end */ + build_append_int_noprefix(table_data, PCI_BUILD_BDF(max_bus, 0xff), 2); + /* Output node */ + build_append_int_noprefix(table_data, output_node, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 6); +} + +/* Build PCI range for a given PCI host bridge */ +static int enumerate_pci_host_bridges(Object *obj, void *opaque) +{ + GArray *pci_host_ranges = opaque; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + int min_bus, max_bus; + + pci_bus_range(bus, &min_bus, &max_bus); + + const struct viot_pci_host_range pci_host_range = { + .min_bus = min_bus, + .max_bus = max_bus, + }; + g_array_append_val(pci_host_ranges, pci_host_range); + } + } + + return 0; +} + +static gint pci_host_range_compare(gconstpointer a, gconstpointer b) +{ + struct viot_pci_host_range *range_a = (struct viot_pci_host_range *)a; + struct viot_pci_host_range *range_b = (struct viot_pci_host_range *)b; + + if (range_a->min_bus < range_b->min_bus) { + return -1; + } else if (range_a->min_bus > range_b->min_bus) { + return 1; + } else { + return 0; + } +} + +/* + * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI + * endpoints. + * + * Defined in the ACPI Specification (Version TBD) + */ +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id) +{ + /* The virtio-iommu node follows the 48-bytes header */ + int viommu_off = 48; + AcpiTable table = { .sig = "VIOT", .rev = 0, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + GArray *pci_host_ranges = g_array_new(false, true, + sizeof(struct viot_pci_host_range)); + struct viot_pci_host_range *pci_host_range; + int i; + + /* Build the list of PCI ranges that this viommu manages */ + object_child_foreach_recursive(OBJECT(ms), enumerate_pci_host_bridges, + pci_host_ranges); + + /* Sort the pci host ranges by min_bus */ + g_array_sort(pci_host_ranges, pci_host_range_compare); + + /* ACPI table header */ + acpi_table_begin(&table, table_data); + /* Node count */ + build_append_int_noprefix(table_data, pci_host_ranges->len + 1, 2); + /* Node offset */ + build_append_int_noprefix(table_data, viommu_off, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* Virtio-iommu node */ + /* Type */ + build_append_int_noprefix(table_data, 3 /* virtio-pci IOMMU */, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Length */ + build_append_int_noprefix(table_data, 16, 2); + /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 2); + /* PCI BDF number */ + build_append_int_noprefix(table_data, virtio_iommu_bdf, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* PCI ranges found above */ + for (i = 0; i < pci_host_ranges->len; i++) { + pci_host_range = &g_array_index(pci_host_ranges, + struct viot_pci_host_range, i); + + build_pci_host_range(table_data, pci_host_range->min_bus, + pci_host_range->max_bus, viommu_off); + } + + g_array_free(pci_host_ranges, true); + + acpi_table_end(linker, &table); +} + diff --git a/hw/acpi/viot.h b/hw/acpi/viot.h new file mode 100644 index 0000000000..9fe565bb87 --- /dev/null +++ b/hw/acpi/viot.h @@ -0,0 +1,13 @@ +/* + * ACPI Virtual I/O Translation Table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef VIOT_H +#define VIOT_H + +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id); + +#endif /* VIOT_H */ diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c index 4f41a13ea0..0c9f158ac9 100644 --- a/hw/acpi/vmgenid.c +++ b/hw/acpi/vmgenid.c @@ -29,6 +29,8 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, Aml *ssdt, *dev, *scope, *method, *addr, *if_ctx; uint32_t vgia_offset; QemuUUID guid_le; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "VMGENID" }; /* Fill in the GUID values. These need to be converted to little-endian * first, since that's what the guest expects @@ -42,12 +44,10 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, g_array_insert_vals(guid, VMGENID_GUID_OFFSET, guid_le.data, ARRAY_SIZE(guid_le.data)); - /* Put this in a separate SSDT table */ + /* Put VMGNEID into a separate SSDT table */ + acpi_table_begin(&table, table_data); ssdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); - /* Storage for the GUID address */ vgia_offset = table_data->len + build_append_named_dword(ssdt->buf, "VGIA"); @@ -116,9 +116,8 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, ACPI_BUILD_TABLE_FILE, vgia_offset, sizeof(uint32_t), VMGENID_GUID_FW_CFG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - ssdt->buf->len), - "SSDT", ssdt->buf->len, 1, oem_id, "VMGENID"); + /* must be called after above command to ensure correct table checksum */ + acpi_table_end(linker, &table); free_aml_allocator(); } diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c new file mode 100644 index 0000000000..0d29663129 --- /dev/null +++ b/hw/adc/aspeed_adc.c @@ -0,0 +1,443 @@ +/* + * Aspeed ADC + * + * Copyright 2017-2021 IBM Corp. + * + * Andrew Jeffery + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/adc/aspeed_adc.h" +#include "trace.h" + +#define ASPEED_ADC_MEMORY_REGION_SIZE 0x1000 +#define ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE 0x100 +#define ASPEED_ADC_ENGINE_CH_EN_MASK 0xffff0000 +#define ASPEED_ADC_ENGINE_CH_EN(x) ((BIT(x)) << 16) +#define ASPEED_ADC_ENGINE_INIT BIT(8) +#define ASPEED_ADC_ENGINE_AUTO_COMP BIT(5) +#define ASPEED_ADC_ENGINE_COMP BIT(4) +#define ASPEED_ADC_ENGINE_MODE_MASK 0x0000000e +#define ASPEED_ADC_ENGINE_MODE_OFF (0b000 << 1) +#define ASPEED_ADC_ENGINE_MODE_STANDBY (0b001 << 1) +#define ASPEED_ADC_ENGINE_MODE_NORMAL (0b111 << 1) +#define ASPEED_ADC_ENGINE_EN BIT(0) +#define ASPEED_ADC_HYST_EN BIT(31) + +#define ASPEED_ADC_L_MASK ((1 << 10) - 1) +#define ASPEED_ADC_L(x) ((x) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_H(x) (((x) >> 16) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_LH_MASK (ASPEED_ADC_L_MASK << 16 | ASPEED_ADC_L_MASK) +#define LOWER_CHANNEL_MASK ((1 << 10) - 1) +#define LOWER_CHANNEL_DATA(x) ((x) & LOWER_CHANNEL_MASK) +#define UPPER_CHANNEL_DATA(x) (((x) >> 16) & LOWER_CHANNEL_MASK) + +#define TO_REG(addr) (addr >> 2) + +#define ENGINE_CONTROL TO_REG(0x00) +#define INTERRUPT_CONTROL TO_REG(0x04) +#define VGA_DETECT_CONTROL TO_REG(0x08) +#define CLOCK_CONTROL TO_REG(0x0C) +#define DATA_CHANNEL_1_AND_0 TO_REG(0x10) +#define DATA_CHANNEL_7_AND_6 TO_REG(0x1C) +#define DATA_CHANNEL_9_AND_8 TO_REG(0x20) +#define DATA_CHANNEL_15_AND_14 TO_REG(0x2C) +#define BOUNDS_CHANNEL_0 TO_REG(0x30) +#define BOUNDS_CHANNEL_7 TO_REG(0x4C) +#define BOUNDS_CHANNEL_8 TO_REG(0x50) +#define BOUNDS_CHANNEL_15 TO_REG(0x6C) +#define HYSTERESIS_CHANNEL_0 TO_REG(0x70) +#define HYSTERESIS_CHANNEL_7 TO_REG(0x8C) +#define HYSTERESIS_CHANNEL_8 TO_REG(0x90) +#define HYSTERESIS_CHANNEL_15 TO_REG(0xAC) +#define INTERRUPT_SOURCE TO_REG(0xC0) +#define COMPENSATING_AND_TRIMMING TO_REG(0xC4) + +static inline uint32_t update_channels(uint32_t current) +{ + return ((((current >> 16) & ASPEED_ADC_L_MASK) + 7) << 16) | + ((current + 5) & ASPEED_ADC_L_MASK); +} + +static bool breaks_threshold(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + int a_bounds_reg = BOUNDS_CHANNEL_0 + (reg - DATA_CHANNEL_1_AND_0) * 2; + int b_bounds_reg = a_bounds_reg + 1; + uint32_t a_and_b = s->regs[reg]; + uint32_t a_bounds = s->regs[a_bounds_reg]; + uint32_t b_bounds = s->regs[b_bounds_reg]; + uint32_t a = ASPEED_ADC_L(a_and_b); + uint32_t b = ASPEED_ADC_H(a_and_b); + uint32_t a_lower = ASPEED_ADC_L(a_bounds); + uint32_t a_upper = ASPEED_ADC_H(a_bounds); + uint32_t b_lower = ASPEED_ADC_L(b_bounds); + uint32_t b_upper = ASPEED_ADC_H(b_bounds); + + return (a < a_lower || a > a_upper) || + (b < b_lower || b > b_upper); +} + +static uint32_t read_channel_sample(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + /* Poor man's sampling */ + uint32_t value = s->regs[reg]; + s->regs[reg] = update_channels(s->regs[reg]); + + if (breaks_threshold(s, reg)) { + s->regs[INTERRUPT_CONTROL] |= BIT(reg - DATA_CHANNEL_1_AND_0); + qemu_irq_raise(s->irq); + } + + return value; +} + +static uint64_t aspeed_adc_engine_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t value = 0; + + switch (reg) { + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + break; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + break; + } + /* fallthrough */ + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + case ENGINE_CONTROL: + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + case INTERRUPT_SOURCE: + case COMPENSATING_AND_TRIMMING: + value = s->regs[reg]; + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + break; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + value = read_channel_sample(s, reg); + /* Allow 16-bit reads of the data registers */ + if (addr & 0x2) { + assert(size == 2); + value >>= 16; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: 0x%" HWADDR_PRIx "\n", + __func__, s->engine_id, addr); + break; + } + + trace_aspeed_adc_engine_read(s->engine_id, addr, value); + return value; +} + +static void aspeed_adc_engine_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t init = 0; + + trace_aspeed_adc_engine_write(s->engine_id, addr, value); + + switch (reg) { + case ENGINE_CONTROL: + init = !!(value & ASPEED_ADC_ENGINE_EN); + init *= ASPEED_ADC_ENGINE_INIT; + + value &= ~ASPEED_ADC_ENGINE_INIT; + value |= init; + + value &= ~ASPEED_ADC_ENGINE_AUTO_COMP; + break; + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + return; + } + /* fallthrough */ + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + return; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + value &= ASPEED_ADC_LH_MASK; + break; + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + return; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + value &= (ASPEED_ADC_HYST_EN | ASPEED_ADC_LH_MASK); + break; + case INTERRUPT_SOURCE: + value &= 0xffff; + break; + case COMPENSATING_AND_TRIMMING: + value &= 0xf; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: " + "0x%" HWADDR_PRIx " 0x%" PRIx64 "\n", + __func__, s->engine_id, addr, value); + break; + } + + s->regs[reg] = value; +} + +static const MemoryRegionOps aspeed_adc_engine_ops = { + .read = aspeed_adc_engine_read, + .write = aspeed_adc_engine_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const uint32_t aspeed_adc_resets[ASPEED_ADC_NR_REGS] = { + [ENGINE_CONTROL] = 0x00000000, + [INTERRUPT_CONTROL] = 0x00000000, + [VGA_DETECT_CONTROL] = 0x0000000f, + [CLOCK_CONTROL] = 0x0000000f, +}; + +static void aspeed_adc_engine_reset(DeviceState *dev) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + + memcpy(s->regs, aspeed_adc_resets, sizeof(aspeed_adc_resets)); +} + +static void aspeed_adc_engine_realize(DeviceState *dev, Error **errp) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_ADC_ENGINE ".%d", + s->engine_id); + + assert(s->engine_id < 2); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_adc_engine_ops, s, name, + ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); +} + +static const VMStateDescription vmstate_aspeed_adc_engine = { + .name = TYPE_ASPEED_ADC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedADCEngineState, ASPEED_ADC_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static Property aspeed_adc_engine_properties[] = { + DEFINE_PROP_UINT32("engine-id", AspeedADCEngineState, engine_id, 0), + DEFINE_PROP_UINT32("nr-channels", AspeedADCEngineState, nr_channels, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_adc_engine_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_adc_engine_realize; + dc->reset = aspeed_adc_engine_reset; + device_class_set_props(dc, aspeed_adc_engine_properties); + dc->desc = "Aspeed Analog-to-Digital Engine"; + dc->vmsd = &vmstate_aspeed_adc_engine; +} + +static const TypeInfo aspeed_adc_engine_info = { + .name = TYPE_ASPEED_ADC_ENGINE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedADCEngineState), + .class_init = aspeed_adc_engine_class_init, +}; + +static void aspeed_adc_instance_init(Object *obj) +{ + AspeedADCState *s = ASPEED_ADC(obj); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(obj); + uint32_t nr_channels = ASPEED_ADC_NR_CHANNELS / aac->nr_engines; + + for (int i = 0; i < aac->nr_engines; i++) { + AspeedADCEngineState *engine = &s->engines[i]; + object_initialize_child(obj, "engine[*]", engine, + TYPE_ASPEED_ADC_ENGINE); + qdev_prop_set_uint32(DEVICE(engine), "engine-id", i); + qdev_prop_set_uint32(DEVICE(engine), "nr-channels", nr_channels); + } +} + +static void aspeed_adc_set_irq(void *opaque, int n, int level) +{ + AspeedADCState *s = opaque; + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(s); + uint32_t pending = 0; + + /* TODO: update Global IRQ status register on AST2600 (Need specs) */ + for (int i = 0; i < aac->nr_engines; i++) { + uint32_t irq_status = s->engines[i].regs[INTERRUPT_CONTROL] & 0xFF; + pending |= irq_status << (i * 8); + } + + qemu_set_irq(s->irq, !!pending); +} + +static void aspeed_adc_realize(DeviceState *dev, Error **errp) +{ + AspeedADCState *s = ASPEED_ADC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(dev); + + qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_adc_set_irq, + s, NULL, aac->nr_engines); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init(&s->mmio, OBJECT(s), TYPE_ASPEED_ADC, + ASPEED_ADC_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); + + for (int i = 0; i < aac->nr_engines; i++) { + Object *eng = OBJECT(&s->engines[i]); + + if (!sysbus_realize(SYS_BUS_DEVICE(eng), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(eng), 0, + qdev_get_gpio_in(DEVICE(sbd), i)); + memory_region_add_subregion(&s->mmio, + i * ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE, + &s->engines[i].mmio); + } +} + +static void aspeed_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->realize = aspeed_adc_realize; + dc->desc = "Aspeed Analog-to-Digital Converter"; + aac->nr_engines = 1; +} + +static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->desc = "ASPEED 2600 ADC Controller"; + aac->nr_engines = 2; +} + +static void aspeed_1030_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->desc = "ASPEED 1030 ADC Controller"; + aac->nr_engines = 2; +} + +static const TypeInfo aspeed_adc_info = { + .name = TYPE_ASPEED_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_adc_instance_init, + .instance_size = sizeof(AspeedADCState), + .class_init = aspeed_adc_class_init, + .class_size = sizeof(AspeedADCClass), + .abstract = true, +}; + +static const TypeInfo aspeed_2400_adc_info = { + .name = TYPE_ASPEED_2400_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2500_adc_info = { + .name = TYPE_ASPEED_2500_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2600_adc_info = { + .name = TYPE_ASPEED_2600_ADC, + .parent = TYPE_ASPEED_ADC, + .class_init = aspeed_2600_adc_class_init, +}; + +static const TypeInfo aspeed_1030_adc_info = { + .name = TYPE_ASPEED_1030_ADC, + .parent = TYPE_ASPEED_ADC, + .class_init = aspeed_1030_adc_class_init, /* No change since AST2600 */ +}; + +static void aspeed_adc_register_types(void) +{ + type_register_static(&aspeed_adc_engine_info); + type_register_static(&aspeed_adc_info); + type_register_static(&aspeed_2400_adc_info); + type_register_static(&aspeed_2500_adc_info); + type_register_static(&aspeed_2600_adc_info); + type_register_static(&aspeed_1030_adc_info); +} + +type_init(aspeed_adc_register_types); diff --git a/hw/adc/meson.build b/hw/adc/meson.build index ac4f093fea..b29ac7ccdf 100644 --- a/hw/adc/meson.build +++ b/hw/adc/meson.build @@ -1,4 +1,5 @@ softmmu_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) softmmu_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c index 0f0a9f63e2..bc6f3f55e6 100644 --- a/hw/adc/npcm7xx_adc.c +++ b/hw/adc/npcm7xx_adc.c @@ -36,7 +36,7 @@ REG32(NPCM7XX_ADC_DATA, 0x4) #define NPCM7XX_ADC_CON_INT BIT(18) #define NPCM7XX_ADC_CON_EN BIT(17) #define NPCM7XX_ADC_CON_RST BIT(16) -#define NPCM7XX_ADC_CON_CONV BIT(14) +#define NPCM7XX_ADC_CON_CONV BIT(13) #define NPCM7XX_ADC_CON_DIV(rv) extract32(rv, 1, 8) #define NPCM7XX_ADC_MAX_RESULT 1023 @@ -242,7 +242,7 @@ static void npcm7xx_adc_init(Object *obj) for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) { object_property_add_uint32_ptr(obj, "adci[*]", - &s->adci[i], OBJ_PROP_FLAG_WRITE); + &s->adci[i], OBJ_PROP_FLAG_READWRITE); } object_property_add_uint32_ptr(obj, "vref", &s->vref, OBJ_PROP_FLAG_WRITE); diff --git a/hw/adc/trace-events b/hw/adc/trace-events index 456f21c8f4..5a4c444d77 100644 --- a/hw/adc/trace-events +++ b/hw/adc/trace-events @@ -3,3 +3,6 @@ # npcm7xx_adc.c npcm7xx_adc_read(const char *id, uint64_t offset, uint32_t value) " %s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 npcm7xx_adc_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 + +aspeed_adc_engine_read(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 +aspeed_adc_engine_write(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c index cfc7bab065..032e19cbd0 100644 --- a/hw/adc/zynq-xadc.c +++ b/hw/adc/zynq-xadc.c @@ -86,7 +86,7 @@ static void zynq_xadc_update_ints(ZynqXADCState *s) s->regs[INT_STS] |= INT_DFIFO_GTH; } - qemu_set_irq(s->qemu_irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK])); + qemu_set_irq(s->irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK])); } static void zynq_xadc_reset(DeviceState *d) @@ -262,7 +262,7 @@ static void zynq_xadc_init(Object *obj) memory_region_init_io(&s->iomem, obj, &xadc_ops, s, "zynq-xadc", ZYNQ_XADC_MMIO_SIZE); sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->qemu_irq); + sysbus_init_irq(sbd, &s->irq); } static const VMStateDescription vmstate_zynq_xadc = { diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index c78ed96d0e..c502c8c62a 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -7,7 +7,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "elf.h" #include "hw/loader.h" @@ -21,8 +20,6 @@ #include "qemu/datadir.h" #include "net/net.h" -#define MAX_IDE_BUS 2 - static uint64_t cpu_alpha_superpage_to_phys(void *opaque, uint64_t addr) { if (((addr >> 41) & 3) == 2) { diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 4ba0aca067..17fcde8e1c 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -25,10 +25,12 @@ config ARM_VIRT select ACPI_PCI select MEM_DEVICE select DIMM - select ACPI_MEMORY_HOTPLUG select ACPI_HW_REDUCED - select ACPI_NVDIMM select ACPI_APEI + select ACPI_VIOT + select VIRTIO_MEM_SUPPORTED + select ACPI_CXL + select ACPI_HMAT config CHEETAH bool @@ -46,6 +48,7 @@ config DIGIC config EXYNOS4 bool + imply I2C_DEVICES select A9MPCORE select I2C select LAN9118 @@ -95,6 +98,9 @@ config MUSCA select SPLIT_IRQ select UNIMP +config MARVELL_88W8618 + bool + config MUSICPAL bool select OR_IRQ @@ -181,6 +187,7 @@ config REALVIEW bool imply PCI_DEVICES imply PCI_TESTDEV + imply I2C_DEVICES select SMC91C111 select LAN9118 select A9MPCORE @@ -226,6 +233,7 @@ config SABRELITE config STELLARIS bool + imply I2C_DEVICES select ARM_V7M select CMSDK_APB_WATCHDOG select I2C @@ -237,6 +245,7 @@ config STELLARIS select SSI_SD select STELLARIS_INPUT select STELLARIS_ENET # ethernet + select STELLARIS_GPTM # general purpose timer module select UNIMP config STM32VLDISCOVERY @@ -382,6 +391,8 @@ config XLNX_VERSAL select XLNX_ZDMA select XLNX_ZYNQMP select OR_IRQ + select XLNX_BBRAM + select XLNX_EFUSE_VERSAL config NPCM7XX bool @@ -391,6 +402,7 @@ config NPCM7XX select SMBUS select AT24C # EEPROM select MAX34451 + select ISL_PMBUS_VR select PL310 # cache controller select PMBUS select SERIAL @@ -400,6 +412,7 @@ config NPCM7XX config FSL_IMX25 bool + imply I2C_DEVICES select IMX select IMX_FEC select IMX_I2C @@ -408,6 +421,7 @@ config FSL_IMX25 config FSL_IMX31 bool + imply I2C_DEVICES select SERIAL select IMX select IMX_I2C @@ -416,6 +430,7 @@ config FSL_IMX31 config FSL_IMX6 bool + imply I2C_DEVICES select A9MPCORE select IMX select IMX_FEC @@ -429,6 +444,7 @@ config ASPEED_SOC select DS1338 select FTGMAC100 select I2C + select DPS310 select PCA9552 select SERIAL select SMBUS_EEPROM @@ -440,9 +456,12 @@ config ASPEED_SOC select EMC141X select UNIMP select LED + select PMBUS + select MAX31785 config MPS2 bool + imply I2C_DEVICES select ARMSSE select LAN9118 select MPS2_FPGAIO @@ -459,6 +478,7 @@ config FSL_IMX7 bool imply PCI_DEVICES imply TEST_DEVICES + imply I2C_DEVICES select A15MPCORE select PCI select IMX @@ -474,6 +494,7 @@ config ARM_SMMUV3 config FSL_IMX6UL bool + imply I2C_DEVICES select A15MPCORE select IMX select IMX_FEC @@ -488,6 +509,7 @@ config MICROBIT config NRF51_SOC bool + imply I2C_DEVICES select I2C select ARM_V7M select UNIMP diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index 05e84728cb..79082289ea 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -130,9 +130,7 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) int i; for (i = 0; i < AW_A10_NUM_USB; i++) { - char bus[16]; - - sprintf(bus, "usb-bus.%d", i); + g_autofree char *bus = g_strdup_printf("usb-bus.%d", i); object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", true, &error_fatal); diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index 27f1070145..308ed15552 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -174,7 +174,7 @@ void allwinner_h3_bootrom_setup(AwH3State *s, BlockBackend *blk) const int64_t rom_size = 32 * KiB; g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); - if (blk_pread(blk, 8 * KiB, buffer, rom_size) < 0) { + if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { error_setg(&error_fatal, "%s: failed to read BlockBackend data", __func__); return; @@ -235,11 +235,10 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) /* CPUs */ for (i = 0; i < AW_H3_NUM_CPUS; i++) { - /* Provide Power State Coordination Interface */ - qdev_prop_set_int32(DEVICE(&s->cpus[i]), "psci-conduit", - QEMU_PSCI_CONDUIT_HVC); - - /* Disable secondary CPUs */ + /* + * Disable secondary CPUs. Guest EL3 firmware will start + * them via CPU reset control registers. + */ qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off", i > 0); diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c index a1456cb0f4..aecdeb9815 100644 --- a/hw/arm/armsse.c +++ b/hw/arm/armsse.c @@ -689,17 +689,6 @@ static void armsse_forward_sec_resp_cfg(ARMSSE *s) qdev_connect_gpio_out(dev_splitter, 2, s->sec_resp_cfg_in); } -static void armsse_mainclk_update(void *opaque, ClockEvent event) -{ - ARMSSE *s = ARM_SSE(opaque); - - /* - * Set system_clock_scale from our Clock input; this is what - * controls the tick rate of the CPU SysTick timer. - */ - system_clock_scale = clock_ticks_to_ns(s->mainclk, 1); -} - static void armsse_init(Object *obj) { ARMSSE *s = ARM_SSE(obj); @@ -711,8 +700,7 @@ static void armsse_init(Object *obj) assert(info->sram_banks <= MAX_SRAM_BANKS); assert(info->num_cpus <= SSE_MAX_CPUS); - s->mainclk = qdev_init_clock_in(DEVICE(s), "MAINCLK", - armsse_mainclk_update, s, ClockUpdate); + s->mainclk = qdev_init_clock_in(DEVICE(s), "MAINCLK", NULL, NULL, 0); s->s32kclk = qdev_init_clock_in(DEVICE(s), "S32KCLK", NULL, NULL, 0); memory_region_init(&s->container, obj, "armsse-container", UINT64_MAX); @@ -995,6 +983,9 @@ static void armsse_realize(DeviceState *dev, Error **errp) int j; char *gpioname; + qdev_connect_clock_in(cpudev, "cpuclk", s->mainclk); + /* The SSE subsystems do not wire up a systick refclk */ + qdev_prop_set_uint32(cpudev, "num-irq", s->exp_numirq + NUM_SSE_IRQS); /* * In real hardware the initial Secure VTOR is set from the INITSVTOR* @@ -1651,9 +1642,6 @@ static void armsse_realize(DeviceState *dev, Error **errp) * devices in the ARMSSE. */ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->container); - - /* Set initial system_clock_scale from MAINCLK */ - armsse_mainclk_update(s, ClockUpdate); } static void armsse_idau_check(IDAUInterface *ii, uint32_t address, diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index 9ce5c30cd5..50a9507c0b 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -14,11 +14,14 @@ #include "hw/arm/boot.h" #include "hw/loader.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "elf.h" #include "sysemu/reset.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "qemu/log.h" #include "target/arm/idau.h" +#include "migration/vmstate.h" /* Bitbanded IO. Each word corresponds to a single bit. */ @@ -124,6 +127,122 @@ static const hwaddr bitband_output_addr[ARMV7M_NUM_BITBANDS] = { 0x22000000, 0x42000000 }; +static MemTxResult v7m_sysreg_ns_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + MemoryRegion *mr = opaque; + + if (attrs.secure) { + /* S accesses to the alias act like NS accesses to the real region */ + attrs.secure = 0; + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); + } else { + /* NS attrs are RAZ/WI for privileged, and BusFault for user */ + if (attrs.user) { + return MEMTX_ERROR; + } + return MEMTX_OK; + } +} + +static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + MemoryRegion *mr = opaque; + + if (attrs.secure) { + /* S accesses to the alias act like NS accesses to the real region */ + attrs.secure = 0; + return memory_region_dispatch_read(mr, addr, data, + size_memop(size) | MO_TE, attrs); + } else { + /* NS attrs are RAZ/WI for privileged, and BusFault for user */ + if (attrs.user) { + return MEMTX_ERROR; + } + *data = 0; + return MEMTX_OK; + } +} + +static const MemoryRegionOps v7m_sysreg_ns_ops = { + .read_with_attrs = v7m_sysreg_ns_read, + .write_with_attrs = v7m_sysreg_ns_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static MemTxResult v7m_systick_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + ARMv7MState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); +} + +static MemTxResult v7m_systick_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + ARMv7MState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, + attrs); +} + +static const MemoryRegionOps v7m_systick_ops = { + .read_with_attrs = v7m_systick_read, + .write_with_attrs = v7m_systick_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * Unassigned portions of the PPB space are RAZ/WI for privileged + * accesses, and fault for non-privileged accesses. + */ +static MemTxResult ppb_default_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n", + (uint32_t)addr); + if (attrs.user) { + return MEMTX_ERROR; + } + *data = 0; + return MEMTX_OK; +} + +static MemTxResult ppb_default_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n", + (uint32_t)addr); + if (attrs.user) { + return MEMTX_ERROR; + } + return MEMTX_OK; +} + +static const MemoryRegionOps ppb_default_ops = { + .read_with_attrs = ppb_default_read, + .write_with_attrs = ppb_default_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 8, +}; + static void armv7m_instance_init(Object *obj) { ARMv7MState *s = ARMV7M(obj); @@ -137,10 +256,20 @@ static void armv7m_instance_init(Object *obj) object_property_add_alias(obj, "num-irq", OBJECT(&s->nvic), "num-irq"); + object_initialize_child(obj, "systick-reg-ns", &s->systick[M_REG_NS], + TYPE_SYSTICK); + /* + * We can't initialize the secure systick here, as we don't know + * yet if we need it. + */ + for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { object_initialize_child(obj, "bitband[*]", &s->bitband[i], TYPE_BITBAND); } + + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", NULL, NULL, 0); + s->cpuclk = qdev_init_clock_in(DEVICE(obj), "cpuclk", NULL, NULL, 0); } static void armv7m_realize(DeviceState *dev, Error **errp) @@ -155,6 +284,12 @@ static void armv7m_realize(DeviceState *dev, Error **errp) return; } + /* cpuclk must be connected; refclk is optional */ + if (!clock_has_source(s->cpuclk)) { + error_setg(errp, "armv7m: cpuclk must be connected"); + return; + } + memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -1); s->cpu = ARM_CPU(object_new_with_props(s->cpu_type, OBJECT(s), "cpu", @@ -223,13 +358,142 @@ static void armv7m_realize(DeviceState *dev, Error **errp) qdev_pass_gpios(DEVICE(&s->nvic), dev, "SYSRESETREQ"); qdev_pass_gpios(DEVICE(&s->nvic), dev, "NMI"); + /* + * We map various devices into the container MR at their architected + * addresses. In particular, we map everything corresponding to the + * "System PPB" space. This is the range from 0xe0000000 to 0xe00fffff + * and includes the NVIC, the System Control Space (system registers), + * the systick timer, and for CPUs with the Security extension an NS + * banked version of all of these. + * + * The default behaviour for unimplemented registers/ranges + * (for instance the Data Watchpoint and Trace unit at 0xe0001000) + * is to RAZ/WI for privileged access and BusFault for non-privileged + * access. + * + * The NVIC and System Control Space (SCS) starts at 0xe000e000 + * and looks like this: + * 0x004 - ICTR + * 0x010 - 0xff - systick + * 0x100..0x7ec - NVIC + * 0x7f0..0xcff - Reserved + * 0xd00..0xd3c - SCS registers + * 0xd40..0xeff - Reserved or Not implemented + * 0xf00 - STIR + * + * Some registers within this space are banked between security states. + * In v8M there is a second range 0xe002e000..0xe002efff which is the + * NonSecure alias SCS; secure accesses to this behave like NS accesses + * to the main SCS range, and non-secure accesses (including when + * the security extension is not implemented) are RAZ/WI. + * Note that both the main SCS range and the alias range are defined + * to be exempt from memory attribution (R_BLJT) and so the memory + * transaction attribute always matches the current CPU security + * state (attrs.secure == env->v7m.secure). In the v7m_sysreg_ns_ops + * wrappers we change attrs.secure to indicate the NS access; so + * generally code determining which banked register to use should + * use attrs.secure; code determining actual behaviour of the system + * should use env->v7m.secure. + * + * Within the PPB space, some MRs overlap, and the priority + * of overlapping regions is: + * - default region (for RAZ/WI and BusFault) : -1 + * - system register regions (provided by the NVIC) : 0 + * - systick : 1 + * This is because the systick device is a small block of registers + * in the middle of the other system control registers. + */ + + memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s, + "nvic-default", 0x100000); + memory_region_add_subregion_overlap(&s->container, 0xe0000000, + &s->defaultmem, -1); + /* Wire the NVIC up to the CPU */ sbd = SYS_BUS_DEVICE(&s->nvic); sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); - memory_region_add_subregion(&s->container, 0xe0000000, + memory_region_add_subregion(&s->container, 0xe000e000, sysbus_mmio_get_region(sbd, 0)); + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + /* Create the NS alias region for the NVIC sysregs */ + memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), + &v7m_sysreg_ns_ops, + sysbus_mmio_get_region(sbd, 0), + "nvic_sysregs_ns", 0x1000); + memory_region_add_subregion(&s->container, 0xe002e000, + &s->sysreg_ns_mem); + } + + /* + * Create and map the systick devices. Note that we only connect + * refclk if it has been connected to us; otherwise the systick + * device gets the wrong answer for clock_has_source(refclk), because + * it has an immediate source (the ARMv7M's clock object) but not + * an ultimate source, and then it won't correctly auto-select the + * CPU clock as its only possible clock source. + */ + if (clock_has_source(s->refclk)) { + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_NS]), "refclk", + s->refclk); + } + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_NS]), "cpuclk", s->cpuclk); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, + qdev_get_gpio_in_named(DEVICE(&s->nvic), + "systick-trigger", M_REG_NS)); + + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + /* + * We couldn't init the secure systick device in instance_init + * as we didn't know then if the CPU had the security extensions; + * so we have to do it here. + */ + object_initialize_child(OBJECT(dev), "systick-reg-s", + &s->systick[M_REG_S], TYPE_SYSTICK); + if (clock_has_source(s->refclk)) { + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_S]), "refclk", + s->refclk); + } + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_S]), "cpuclk", + s->cpuclk); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, + qdev_get_gpio_in_named(DEVICE(&s->nvic), + "systick-trigger", M_REG_S)); + } + + memory_region_init_io(&s->systickmem, OBJECT(s), + &v7m_systick_ops, s, + "v7m_systick", 0xe0); + + memory_region_add_subregion_overlap(&s->container, 0xe000e010, + &s->systickmem, 1); + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + memory_region_init_io(&s->systick_ns_mem, OBJECT(s), + &v7m_sysreg_ns_ops, &s->systickmem, + "v7m_systick_ns", 0xe0); + memory_region_add_subregion_overlap(&s->container, 0xe002e010, + &s->systick_ns_mem, 1); + } + + /* If the CPU has RAS support, create the RAS register block */ + if (cpu_isar_feature(aa32_ras, s->cpu)) { + object_initialize_child(OBJECT(dev), "armv7m-ras", + &s->ras, TYPE_ARMV7M_RAS); + sbd = SYS_BUS_DEVICE(&s->ras); + if (!sysbus_realize(sbd, errp)) { + return; + } + memory_region_add_subregion_overlap(&s->container, 0xe0005000, + sysbus_mmio_get_region(sbd, 0), 1); + } for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { if (s->enable_bitband) { @@ -269,11 +533,23 @@ static Property armv7m_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static const VMStateDescription vmstate_armv7m = { + .name = "armv7m", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(refclk, ARMv7MState), + VMSTATE_CLOCK(cpuclk, ARMv7MState), + VMSTATE_END_OF_LIST() + } +}; + static void armv7m_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = armv7m_realize; + dc->vmsd = &vmstate_armv7m; device_class_set_props(dc, armv7m_properties); } @@ -292,21 +568,15 @@ static void armv7m_reset(void *opaque) cpu_reset(CPU(cpu)); } -void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) +void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, + hwaddr mem_base, int mem_size) { - int image_size; + ssize_t image_size; uint64_t entry; - int big_endian; AddressSpace *as; int asidx; CPUState *cs = CPU(cpu); -#ifdef TARGET_WORDS_BIGENDIAN - big_endian = 1; -#else - big_endian = 0; -#endif - if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { asidx = ARMASIdx_S; } else { @@ -317,9 +587,9 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) if (kernel_filename) { image_size = load_elf_as(kernel_filename, NULL, NULL, NULL, &entry, NULL, NULL, - NULL, big_endian, EM_ARM, 1, 0, as); + NULL, 0, EM_ARM, 1, 0, as); if (image_size < 0) { - image_size = load_image_targphys_as(kernel_filename, 0, + image_size = load_image_targphys_as(kernel_filename, mem_base, mem_size, as); } if (image_size < 0) { diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 9d43e26c51..97fb1916ec 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -21,9 +21,12 @@ #include "hw/misc/led.h" #include "hw/qdev-properties.h" #include "sysemu/block-backend.h" +#include "sysemu/reset.h" #include "hw/loader.h" #include "qemu/error-report.h" #include "qemu/units.h" +#include "hw/qdev-clock.h" +#include "sysemu/sysemu.h" static struct arm_boot_info aspeed_board_binfo = { .board_id = -1, /* device-tree-only board */ @@ -35,8 +38,6 @@ struct AspeedMachineState { /* Public */ AspeedSoCState soc; - MemoryRegion ram_container; - MemoryRegion max_ram; bool mmio_exec; char *fmc_model; char *spi_model; @@ -106,17 +107,6 @@ struct AspeedMachineState { SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ SCU_AST2500_HW_STRAP_RESERVED1) -/* Swift hardware value: 0xF11AD206 */ -#define SWIFT_BMC_HW_STRAP1 ( \ - AST2500_HW_STRAP1_DEFAULTS | \ - SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ - SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ - SCU_AST2500_HW_STRAP_UART_DEBUG | \ - SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ - SCU_H_PLL_BYPASS_EN | \ - SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ - SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) - #define G220A_BMC_HW_STRAP1 ( \ SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ @@ -131,6 +121,21 @@ struct AspeedMachineState { SCU_HW_STRAP_VGA_SIZE_SET(VGA_64M_DRAM) | \ SCU_AST2500_HW_STRAP_RESERVED1) +/* FP5280G2 hardware value: 0XF100D286 */ +#define FP5280G2_BMC_HW_STRAP1 ( \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_RESERVED28 | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER) | \ + SCU_AST2500_HW_STRAP_SET_AXI_AHB_RATIO(AXI_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_MAC1_RGMII | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2500_HW_STRAP_RESERVED1) + /* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */ #define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1 @@ -156,29 +161,21 @@ struct AspeedMachineState { #define TACOMA_BMC_HW_STRAP2 0x00000040 /* Rainier hardware value: (QEMU prototype) */ -#define RAINIER_BMC_HW_STRAP1 0x00000000 -#define RAINIER_BMC_HW_STRAP2 0x00000000 +#define RAINIER_BMC_HW_STRAP1 0x00422016 +#define RAINIER_BMC_HW_STRAP2 0x80000848 -/* - * The max ram region is for firmwares that scan the address space - * with load/store to guess how much RAM the SoC has. - */ -static uint64_t max_ram_read(void *opaque, hwaddr offset, unsigned size) -{ - return 0; -} +/* Fuji hardware value */ +#define FUJI_BMC_HW_STRAP1 0x00000000 +#define FUJI_BMC_HW_STRAP2 0x00000000 -static void max_ram_write(void *opaque, hwaddr offset, uint64_t value, - unsigned size) -{ - /* Discard writes */ -} +/* Bletchley hardware value */ +/* TODO: Leave same as EVB for now. */ +#define BLETCHLEY_BMC_HW_STRAP1 AST2600_EVB_HW_STRAP1 +#define BLETCHLEY_BMC_HW_STRAP2 AST2600_EVB_HW_STRAP2 -static const MemoryRegionOps max_ram_ops = { - .read = max_ram_read, - .write = max_ram_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; +/* Qualcomm DC-SCM hardware value */ +#define QCOM_DC_SCM_V1_BMC_HW_STRAP1 0x00000000 +#define QCOM_DC_SCM_V1_BMC_HW_STRAP2 0x00000041 #define AST_SMP_MAILBOX_BASE 0x1e6e2180 #define AST_SMP_MBOX_FIELD_ENTRY (AST_SMP_MAILBOX_BASE + 0x0) @@ -191,33 +188,35 @@ static const MemoryRegionOps max_ram_ops = { static void aspeed_write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t poll_mailbox_ready[] = { + AddressSpace *as = arm_boot_address_space(cpu, info); + static const ARMInsnFixup poll_mailbox_ready[] = { /* * r2 = per-cpu go sign value * r1 = AST_SMP_MBOX_FIELD_ENTRY * r0 = AST_SMP_MBOX_FIELD_GOSIGN */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */ - 0xe21000ff, /* ands r0, r0, #255 */ - 0xe59f201c, /* ldr r2, [pc, #28] */ - 0xe1822000, /* orr r2, r2, r0 */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5 */ + { 0xe21000ff }, /* ands r0, r0, #255 */ + { 0xe59f201c }, /* ldr r2, [pc, #28] */ + { 0xe1822000 }, /* orr r2, r2, r0 */ - 0xe59f1018, /* ldr r1, [pc, #24] */ - 0xe59f0018, /* ldr r0, [pc, #24] */ + { 0xe59f1018 }, /* ldr r1, [pc, #24] */ + { 0xe59f0018 }, /* ldr r0, [pc, #24] */ - 0xe320f002, /* wfe */ - 0xe5904000, /* ldr r4, [r0] */ - 0xe1520004, /* cmp r2, r4 */ - 0x1afffffb, /* bne */ - 0xe591f000, /* ldr pc, [r1] */ - AST_SMP_MBOX_GOSIGN, - AST_SMP_MBOX_FIELD_ENTRY, - AST_SMP_MBOX_FIELD_GOSIGN, + { 0xe320f002 }, /* wfe */ + { 0xe5904000 }, /* ldr r4, [r0] */ + { 0xe1520004 }, /* cmp r2, r4 */ + { 0x1afffffb }, /* bne */ + { 0xe591f000 }, /* ldr pc, [r1] */ + { AST_SMP_MBOX_GOSIGN }, + { AST_SMP_MBOX_FIELD_ENTRY }, + { AST_SMP_MBOX_FIELD_GOSIGN }, + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; - rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready, - sizeof(poll_mailbox_ready), - info->smp_loader_start); + arm_write_bootloader("aspeed.smpboot", as, info->smp_loader_start, + poll_mailbox_ready, fixupcontext); } static void aspeed_reset_secondary(ARMCPU *cpu, @@ -238,7 +237,7 @@ static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, Error **errp) { BlockBackend *blk = blk_by_legacy_dinfo(dinfo); - uint8_t *storage; + g_autofree void *storage = NULL; int64_t size; /* The block backend size should have already been 'validated' by @@ -254,34 +253,36 @@ static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, rom_size = size; } - storage = g_new0(uint8_t, rom_size); - if (blk_pread(blk, 0, storage, rom_size) < 0) { + storage = g_malloc0(rom_size); + if (blk_pread(blk, 0, rom_size, storage, 0) < 0) { error_setg(errp, "failed to read the initial flash content"); return; } rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); - g_free(storage); } -static void aspeed_board_init_flashes(AspeedSMCState *s, - const char *flashtype) +void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, + unsigned int count, int unit0) { - int i ; + int i; - for (i = 0; i < s->num_cs; ++i) { - AspeedSMCFlash *fl = &s->flashes[i]; - DriveInfo *dinfo = drive_get_next(IF_MTD); + if (!flashtype) { + return; + } + + for (i = 0; i < count; ++i) { + DriveInfo *dinfo = drive_get(IF_MTD, 0, unit0 + i); qemu_irq cs_line; + DeviceState *dev; - fl->flash = qdev_new(flashtype); + dev = qdev_new(flashtype); if (dinfo) { - qdev_prop_set_drive(fl->flash, "drive", - blk_by_legacy_dinfo(dinfo)); + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); } - qdev_realize_and_unref(fl->flash, BUS(s->spi), &error_fatal); + qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); - cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0); + cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); } } @@ -301,26 +302,37 @@ static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo) &error_fatal); } +static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) +{ + AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); + AspeedSoCState *s = &bmc->soc; + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + aspeed_soc_uart_set_chr(s, amc->uart_default, serial_hd(0)); + for (int i = 1, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { + if (uart == amc->uart_default) { + continue; + } + aspeed_soc_uart_set_chr(s, uart, serial_hd(i)); + } +} + static void aspeed_machine_init(MachineState *machine) { AspeedMachineState *bmc = ASPEED_MACHINE(machine); AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); AspeedSoCClass *sc; DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); - ram_addr_t max_ram_size; int i; NICInfo *nd = &nd_table[0]; - memory_region_init(&bmc->ram_container, NULL, "aspeed-ram-container", - 4 * GiB); - memory_region_add_subregion(&bmc->ram_container, 0, machine->ram); - object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name); sc = ASPEED_SOC_GET_CLASS(&bmc->soc); /* - * This will error out if isize is not supported by memory controller. + * This will error out if the RAM size is not supported by the + * memory controller of the SoC. */ object_property_set_uint(OBJECT(&bmc->soc), "ram-size", machine->ram_size, &error_fatal); @@ -337,8 +349,8 @@ static void aspeed_machine_init(MachineState *machine) &error_abort); object_property_set_int(OBJECT(&bmc->soc), "hw-strap2", amc->hw_strap2, &error_abort); - object_property_set_int(OBJECT(&bmc->soc), "num-cs", amc->num_cs, - &error_abort); + object_property_set_link(OBJECT(&bmc->soc), "memory", + OBJECT(get_system_memory()), &error_abort); object_property_set_link(OBJECT(&bmc->soc), "dram", OBJECT(machine->ram), &error_abort); if (machine->kernel_filename) { @@ -350,27 +362,21 @@ static void aspeed_machine_init(MachineState *machine) object_property_set_int(OBJECT(&bmc->soc), "hw-prot-key", ASPEED_SCU_PROT_KEY, &error_abort); } + connect_serial_hds_to_uarts(bmc); qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort); - memory_region_add_subregion(get_system_memory(), - sc->memmap[ASPEED_DEV_SDRAM], - &bmc->ram_container); - - max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size", - &error_abort); - memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL, - "max_ram", max_ram_size - machine->ram_size); - memory_region_add_subregion(&bmc->ram_container, machine->ram_size, &bmc->max_ram); - - aspeed_board_init_flashes(&bmc->soc.fmc, bmc->fmc_model ? - bmc->fmc_model : amc->fmc_model); - aspeed_board_init_flashes(&bmc->soc.spi[0], bmc->spi_model ? - bmc->spi_model : amc->spi_model); + aspeed_board_init_flashes(&bmc->soc.fmc, + bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, + amc->num_cs, 0); + aspeed_board_init_flashes(&bmc->soc.spi[0], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + 1, amc->num_cs); /* Install first FMC flash content as a boot rom. */ if (drive0) { AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0]; MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + uint64_t size = memory_region_size(&fl->mmio); /* * create a ROM region using the default mapping window size of @@ -380,15 +386,15 @@ static void aspeed_machine_init(MachineState *machine) */ if (ASPEED_MACHINE(machine)->mmio_exec) { memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, fl->size); + &fl->mmio, 0, size); memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, boot_rom); } else { memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", - fl->size, &error_abort); + size, &error_abort); memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, boot_rom); - write_boot_rom(drive0, FIRMWARE_ADDR, fl->size, &error_abort); + write_boot_rom(drive0, FIRMWARE_ADDR, size, &error_abort); } } @@ -407,23 +413,33 @@ static void aspeed_machine_init(MachineState *machine) aspeed_board_binfo.ram_size = machine->ram_size; aspeed_board_binfo.loader_start = sc->memmap[ASPEED_DEV_SDRAM]; - aspeed_board_binfo.nb_cpus = sc->num_cpus; if (amc->i2c_init) { amc->i2c_init(bmc); } for (i = 0; i < bmc->soc.sdhci.num_slots; i++) { - sdhci_attach_drive(&bmc->soc.sdhci.slots[i], drive_get_next(IF_SD)); + sdhci_attach_drive(&bmc->soc.sdhci.slots[i], + drive_get(IF_SD, 0, i)); } if (bmc->soc.emmc.num_slots) { - sdhci_attach_drive(&bmc->soc.emmc.slots[0], drive_get_next(IF_SD)); + sdhci_attach_drive(&bmc->soc.emmc.slots[0], + drive_get(IF_SD, 0, bmc->soc.sdhci.num_slots)); } arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); } +static void at24c_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) +{ + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); +} + static void palmetto_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -489,16 +505,19 @@ static void ast2500_evb_i2c_init(AspeedMachineState *bmc) /* The AST2500 EVB expects a LM75 but a TMP105 is compatible */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), TYPE_TMP105, 0x4d); - - /* The AST2500 EVB does not have an RTC. Let's pretend that one is - * plugged on the I2C bus header */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); } static void ast2600_evb_i2c_init(AspeedMachineState *bmc) { - /* Start with some devices on our I2C busses */ - ast2500_evb_i2c_init(bmc); + AspeedSoCState *soc = &bmc->soc; + uint8_t *eeprom_buf = g_malloc0(8 * 1024); + + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50, + eeprom_buf); + + /* LM75 is compatible with TMP105 driver */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), + TYPE_TMP105, 0x4d); } static void romulus_bmc_i2c_init(AspeedMachineState *bmc) @@ -510,33 +529,10 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); } -static void swift_bmc_i2c_init(AspeedMachineState *bmc) +static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr) { - AspeedSoCState *soc = &bmc->soc; - - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "pca9552", 0x60); - - /* The swift board expects a TMP275 but a TMP105 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "tmp105", 0x48); - /* The swift board expects a pca9551 but a pca9552 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9552", 0x60); - - /* The swift board expects an Epson RX8900 RTC but a ds1338 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "ds1338", 0x32); - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x60); - - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4c); - /* The swift board expects a pca9539 but a pca9552 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "pca9552", 0x74); - - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4c); - /* The swift board expects a pca9539 but a pca9552 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "pca9552", - 0x74); - - /* The swift board expects a TMP275 but a TMP105 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 12), "tmp105", 0x48); - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 12), "tmp105", 0x4a); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, bus_id), + TYPE_PCA9552, addr); } static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) @@ -555,9 +551,9 @@ static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, eeprom4_54); /* PCA9539 @ 0x76, but PCA9552 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "pca9552", 0x76); + create_pca9552(soc, 4, 0x76); /* PCA9539 @ 0x77, but PCA9552 is compatible */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "pca9552", 0x77); + create_pca9552(soc, 4, 0x77); /* bus 6 : */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp105", 0x48); @@ -568,8 +564,8 @@ static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) uint8_t *eeprom8_56 = g_malloc0(8 * 1024); smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 8), 0x56, eeprom8_56); - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x60); - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x61); + create_pca9552(soc, 8, 0x60); + create_pca9552(soc, 8, 0x61); /* bus 8 : adc128d818 @ 0x1d */ /* bus 8 : adc128d818 @ 0x1f */ @@ -601,8 +597,6 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc) LEDState *led; /* Bus 3: TODO bmp280@77 */ - /* Bus 3: TODO max31785@52 */ - /* Bus 3: TODO dps310@76 */ dev = DEVICE(i2c_slave_new(TYPE_PCA9552, 0x60)); qdev_prop_set_string(dev, "description", "pca1"); i2c_slave_realize_and_unref(I2C_SLAVE(dev), @@ -617,6 +611,8 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc) qdev_connect_gpio_out(dev, pca1_leds[i].gpio_id, qdev_get_gpio_in(DEVICE(led), 0)); } + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "dps310", 0x76); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "max31785", 0x52); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "tmp423", 0x4c); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), "tmp423", 0x4c); @@ -674,9 +670,50 @@ static void g220a_bmc_i2c_init(AspeedMachineState *bmc) eeprom_buf); } +static void aspeed_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) +{ + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); +} + +static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CSlave *i2c_mux; + + /* The at24c256 */ + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 1), 0x50, 32768); + + /* The fp5280g2 expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x49); + + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), + "pca9546", 0x70); + /* It expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 0), TYPE_TMP105, + 0x4a); + + /* It expects a ds3232 but a ds1338 is good enough */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "ds1338", 0x68); + + /* It expects a pca9555 but a pca9552 is compatible */ + create_pca9552(soc, 8, 0x30); +} + static void rainier_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; + I2CSlave *i2c_mux; + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 0), 0x51, 32 * KiB); + + create_pca9552(soc, 3, 0x61); /* The rainier expects a TMP275 but a TMP105 is compatible */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105, @@ -685,11 +722,23 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc) 0x49); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105, 0x4a); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x52, 64 * KiB); + create_pca9552(soc, 4, 0x60); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105, 0x48); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105, 0x49); + create_pca9552(soc, 5, 0x60); + create_pca9552(soc, 5, 0x61); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105, 0x48); @@ -697,33 +746,267 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc) 0x4a); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105, 0x4b); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 3), 0x51, 64 * KiB); - /* Bus 7: TODO dps310@76 */ - /* Bus 7: TODO max31785@52 */ - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9552", 0x61); + create_pca9552(soc, 7, 0x30); + create_pca9552(soc, 7, 0x31); + create_pca9552(soc, 7, 0x32); + create_pca9552(soc, 7, 0x33); + create_pca9552(soc, 7, 0x60); + create_pca9552(soc, 7, 0x61); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "dps310", 0x76); /* Bus 7: TODO si7021-a20@20 */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), TYPE_TMP105, 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "max31785", 0x52); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50, 64 * KiB); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x51, 64 * KiB); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105, 0x48); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105, 0x4a); - i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x61); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x50, 64 * KiB); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 64 * KiB); + create_pca9552(soc, 8, 0x60); + create_pca9552(soc, 8, 0x61); /* Bus 8: ucd90320@11 */ /* Bus 8: ucd90320@b */ /* Bus 8: ucd90320@c */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4c); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4d); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 9), 0x50, 128 * KiB); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4c); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4d); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 10), 0x50, 128 * KiB); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105, 0x48); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105, 0x49); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + create_pca9552(soc, 11, 0x60); + + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 13), 0x50, 64 * KiB); + create_pca9552(soc, 13, 0x60); + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 14), 0x50, 64 * KiB); + create_pca9552(soc, 14, 0x60); + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 15), 0x50, 64 * KiB); + create_pca9552(soc, 15, 0x60); +} + +static void get_pca9548_channels(I2CBus *bus, uint8_t mux_addr, + I2CBus **channels) +{ + I2CSlave *mux = i2c_slave_create_simple(bus, "pca9548", mux_addr); + for (int i = 0; i < 8; i++) { + channels[i] = pca954x_i2c_get_bus(mux, i); + } +} + +#define TYPE_LM75 TYPE_TMP105 +#define TYPE_TMP75 TYPE_TMP105 +#define TYPE_TMP422 "tmp422" + +static void fuji_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CBus *i2c[144] = {}; + + for (int i = 0; i < 16; i++) { + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + I2CBus *i2c180 = i2c[2]; + I2CBus *i2c480 = i2c[8]; + I2CBus *i2c600 = i2c[11]; + + get_pca9548_channels(i2c180, 0x70, &i2c[16]); + get_pca9548_channels(i2c480, 0x70, &i2c[24]); + /* NOTE: The device tree skips [32, 40) in the alias numbering */ + get_pca9548_channels(i2c600, 0x77, &i2c[40]); + get_pca9548_channels(i2c[24], 0x71, &i2c[48]); + get_pca9548_channels(i2c[25], 0x72, &i2c[56]); + get_pca9548_channels(i2c[26], 0x76, &i2c[64]); + get_pca9548_channels(i2c[27], 0x76, &i2c[72]); + for (int i = 0; i < 8; i++) { + get_pca9548_channels(i2c[40 + i], 0x76, &i2c[80 + i * 8]); + } + + i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4c); + i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4d); + + aspeed_eeprom_init(i2c[19], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[20], 0x50, 2 * KiB); + aspeed_eeprom_init(i2c[22], 0x52, 2 * KiB); + + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x48); + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x49); + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x4a); + i2c_slave_create_simple(i2c[3], TYPE_TMP422, 0x4c); + + aspeed_eeprom_init(i2c[8], 0x51, 64 * KiB); + i2c_slave_create_simple(i2c[8], TYPE_LM75, 0x4a); + + i2c_slave_create_simple(i2c[50], TYPE_LM75, 0x4c); + aspeed_eeprom_init(i2c[50], 0x52, 64 * KiB); + i2c_slave_create_simple(i2c[51], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[52], TYPE_TMP75, 0x49); + + i2c_slave_create_simple(i2c[59], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[60], TYPE_TMP75, 0x49); + + aspeed_eeprom_init(i2c[65], 0x53, 64 * KiB); + i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x49); + i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x48); + aspeed_eeprom_init(i2c[68], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[69], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[70], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[71], 0x52, 64 * KiB); + + aspeed_eeprom_init(i2c[73], 0x53, 64 * KiB); + i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x49); + i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x48); + aspeed_eeprom_init(i2c[76], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[77], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[78], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[79], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[28], 0x50, 2 * KiB); + + for (int i = 0; i < 8; i++) { + aspeed_eeprom_init(i2c[81 + i * 8], 0x56, 64 * KiB); + i2c_slave_create_simple(i2c[82 + i * 8], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[83 + i * 8], TYPE_TMP75, 0x4b); + i2c_slave_create_simple(i2c[84 + i * 8], TYPE_TMP75, 0x4a); + } +} + +#define TYPE_TMP421 "tmp421" + +static void bletchley_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CBus *i2c[13] = {}; + for (int i = 0; i < 13; i++) { + if ((i == 8) || (i == 11)) { + continue; + } + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + + /* Bus 0 - 5 all have the same config. */ + for (int i = 0; i < 6; i++) { + /* Missing model: ti,ina230 @ 0x45 */ + /* Missing model: mps,mp5023 @ 0x40 */ + i2c_slave_create_simple(i2c[i], TYPE_TMP421, 0x4f); + /* Missing model: nxp,pca9539 @ 0x76, but PCA9552 works enough */ + i2c_slave_create_simple(i2c[i], TYPE_PCA9552, 0x76); + i2c_slave_create_simple(i2c[i], TYPE_PCA9552, 0x67); + /* Missing model: fsc,fusb302 @ 0x22 */ + } + + /* Bus 6 */ + at24c_eeprom_init(i2c[6], 0x56, 65536); + /* Missing model: nxp,pcf85263 @ 0x51 , but ds1338 works enough */ + i2c_slave_create_simple(i2c[6], "ds1338", 0x51); + + + /* Bus 7 */ + at24c_eeprom_init(i2c[7], 0x54, 65536); + + /* Bus 9 */ + i2c_slave_create_simple(i2c[9], TYPE_TMP421, 0x4f); + + /* Bus 10 */ + i2c_slave_create_simple(i2c[10], TYPE_TMP421, 0x4f); + /* Missing model: ti,hdc1080 @ 0x40 */ + i2c_slave_create_simple(i2c[10], TYPE_PCA9552, 0x67); + + /* Bus 12 */ + /* Missing model: adi,adm1278 @ 0x11 */ + i2c_slave_create_simple(i2c[12], TYPE_TMP421, 0x4c); + i2c_slave_create_simple(i2c[12], TYPE_TMP421, 0x4d); + i2c_slave_create_simple(i2c[12], TYPE_PCA9552, 0x67); +} + +static void fby35_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CBus *i2c[16]; + + for (int i = 0; i < 16; i++) { + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + + i2c_slave_create_simple(i2c[2], TYPE_LM75, 0x4f); + i2c_slave_create_simple(i2c[8], TYPE_TMP421, 0x1f); + /* Hotswap controller is actually supposed to be mp5920 or ltc4282. */ + i2c_slave_create_simple(i2c[11], "adm1272", 0x44); + i2c_slave_create_simple(i2c[12], TYPE_LM75, 0x4e); + i2c_slave_create_simple(i2c[12], TYPE_LM75, 0x4f); + + aspeed_eeprom_init(i2c[4], 0x51, 128 * KiB); + aspeed_eeprom_init(i2c[6], 0x51, 128 * KiB); + aspeed_eeprom_init(i2c[8], 0x50, 32 * KiB); + aspeed_eeprom_init(i2c[11], 0x51, 128 * KiB); + aspeed_eeprom_init(i2c[11], 0x54, 128 * KiB); + + /* + * TODO: There is a multi-master i2c connection to an AST1030 MiniBMC on + * buses 0, 1, 2, 3, and 9. Source address 0x10, target address 0x20 on + * each. + */ +} + +static void qcom_dc_scm_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 15), "tmp105", 0x4d); +} + +static void qcom_dc_scm_firework_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CSlave *therm_mux, *cpuvr_mux; + + /* Create the generic DC-SCM hardware */ + qcom_dc_scm_bmc_i2c_init(bmc); + + /* Now create the Firework specific hardware */ + + /* I2C7 CPUVR MUX */ + cpuvr_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), + "pca9546", 0x70); + i2c_slave_create_simple(pca954x_i2c_get_bus(cpuvr_mux, 0), "pca9548", 0x72); + i2c_slave_create_simple(pca954x_i2c_get_bus(cpuvr_mux, 1), "pca9548", 0x72); + i2c_slave_create_simple(pca954x_i2c_get_bus(cpuvr_mux, 2), "pca9548", 0x72); + i2c_slave_create_simple(pca954x_i2c_get_bus(cpuvr_mux, 3), "pca9548", 0x72); + + /* I2C8 Thermal Diodes*/ + therm_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), + "pca9548", 0x70); + i2c_slave_create_simple(pca954x_i2c_get_bus(therm_mux, 0), TYPE_LM75, 0x4C); + i2c_slave_create_simple(pca954x_i2c_get_bus(therm_mux, 1), TYPE_LM75, 0x4C); + i2c_slave_create_simple(pca954x_i2c_get_bus(therm_mux, 2), TYPE_LM75, 0x48); + i2c_slave_create_simple(pca954x_i2c_get_bus(therm_mux, 3), TYPE_LM75, 0x48); + i2c_slave_create_simple(pca954x_i2c_get_bus(therm_mux, 4), TYPE_LM75, 0x48); + + /* I2C9 Fan Controller (MAX31785) */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "max31785", 0x52); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "max31785", 0x54); } static bool aspeed_get_mmio_exec(Object *obj, Error **errp) @@ -804,6 +1087,7 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data) mc->no_parallel = 1; mc->default_ram_id = "ram"; amc->macs_mask = ASPEED_MAC0_ON; + amc->uart_default = ASPEED_DEV_UART5; aspeed_machine_class_props_init(oc); } @@ -817,7 +1101,7 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) amc->soc_name = "ast2400-a1"; amc->hw_strap1 = PALMETTO_BMC_HW_STRAP1; amc->fmc_model = "n25q256a"; - amc->spi_model = "mx25l25635e"; + amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = palmetto_bmc_i2c_init; mc->default_ram_size = 256 * MiB; @@ -867,8 +1151,8 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) mc->desc = "Aspeed AST2500 EVB (ARM1176)"; amc->soc_name = "ast2500-a1"; amc->hw_strap1 = AST2500_EVB_HW_STRAP1; - amc->fmc_model = "w25q256"; - amc->spi_model = "mx25l25635e"; + amc->fmc_model = "mx25l25635e"; + amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = ast2500_evb_i2c_init; mc->default_ram_size = 512 * MiB; @@ -910,26 +1194,6 @@ static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; -static void aspeed_machine_swift_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); - - mc->desc = "OpenPOWER Swift BMC (ARM1176)"; - amc->soc_name = "ast2500-a1"; - amc->hw_strap1 = SWIFT_BMC_HW_STRAP1; - amc->fmc_model = "mx66l1g45g"; - amc->spi_model = "mx66l1g45g"; - amc->num_cs = 2; - amc->i2c_init = swift_bmc_i2c_init; - mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); - - mc->deprecation_reason = "redundant system. Please use a similar " - "OpenPOWER BMC, Witherspoon or Romulus."; -}; - static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -938,7 +1202,7 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) mc->desc = "OpenPOWER Witherspoon BMC (ARM1176)"; amc->soc_name = "ast2500-a1"; amc->hw_strap1 = WITHERSPOON_BMC_HW_STRAP1; - amc->fmc_model = "mx25l25635e"; + amc->fmc_model = "mx25l25635f"; amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = witherspoon_bmc_i2c_init; @@ -953,13 +1217,14 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "Aspeed AST2600 EVB (Cortex-A7)"; - amc->soc_name = "ast2600-a1"; + amc->soc_name = "ast2600-a3"; amc->hw_strap1 = AST2600_EVB_HW_STRAP1; amc->hw_strap2 = AST2600_EVB_HW_STRAP2; - amc->fmc_model = "w25q512jv"; + amc->fmc_model = "mx66u51235f"; amc->spi_model = "mx66u51235f"; amc->num_cs = 1; - amc->macs_mask = ASPEED_MAC1_ON | ASPEED_MAC2_ON | ASPEED_MAC3_ON; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON | + ASPEED_MAC3_ON; amc->i2c_init = ast2600_evb_i2c_init; mc->default_ram_size = 1 * GiB; mc->default_cpus = mc->min_cpus = mc->max_cpus = @@ -972,7 +1237,7 @@ static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data) AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)"; - amc->soc_name = "ast2600-a1"; + amc->soc_name = "ast2600-a3"; amc->hw_strap1 = TACOMA_BMC_HW_STRAP1; amc->hw_strap2 = TACOMA_BMC_HW_STRAP2; amc->fmc_model = "mx66l1g45g"; @@ -996,20 +1261,38 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) amc->fmc_model = "n25q512a"; amc->spi_model = "mx25l25635e"; amc->num_cs = 2; - amc->macs_mask = ASPEED_MAC1_ON | ASPEED_MAC2_ON; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = g220a_bmc_i2c_init; mc->default_ram_size = 1024 * MiB; mc->default_cpus = mc->min_cpus = mc->max_cpus = aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Inspur FP5280G2 BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = FP5280G2_BMC_HW_STRAP1; + amc->fmc_model = "n25q512a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = fp5280g2_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "IBM Rainier BMC (Cortex-A7)"; - amc->soc_name = "ast2600-a1"; + amc->soc_name = "ast2600-a3"; amc->hw_strap1 = RAINIER_BMC_HW_STRAP1; amc->hw_strap2 = RAINIER_BMC_HW_STRAP2; amc->fmc_model = "mx66l1g45g"; @@ -1022,6 +1305,216 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */ +#if HOST_LONG_BITS == 32 +#define FUJI_BMC_RAM_SIZE (1 * GiB) +#else +#define FUJI_BMC_RAM_SIZE (2 * GiB) +#endif + +static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Fuji BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = FUJI_BMC_HW_STRAP1; + amc->hw_strap2 = FUJI_BMC_HW_STRAP2; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC3_ON; + amc->i2c_init = fuji_bmc_i2c_init; + amc->uart_default = ASPEED_DEV_UART1; + mc->default_ram_size = FUJI_BMC_RAM_SIZE; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */ +#if HOST_LONG_BITS == 32 +#define BLETCHLEY_BMC_RAM_SIZE (1 * GiB) +#else +#define BLETCHLEY_BMC_RAM_SIZE (2 * GiB) +#endif + +static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Bletchley BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = BLETCHLEY_BMC_HW_STRAP1; + amc->hw_strap2 = BLETCHLEY_BMC_HW_STRAP2; + amc->fmc_model = "w25q01jvq"; + amc->spi_model = NULL; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON; + amc->i2c_init = bletchley_bmc_i2c_init; + mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +} + +static void fby35_reset(MachineState *state, ShutdownCause reason) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(state); + AspeedGPIOState *gpio = &bmc->soc.gpio; + + qemu_devices_reset(reason); + + /* Board ID: 7 (Class-1, 4 slots) */ + object_property_set_bool(OBJECT(gpio), "gpioV4", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioV5", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioV6", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioV7", false, &error_fatal); + + /* Slot presence pins, inverse polarity. (False means present) */ + object_property_set_bool(OBJECT(gpio), "gpioH4", false, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioH5", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioH6", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioH7", true, &error_fatal); + + /* Slot 12v power pins, normal polarity. (True means powered-on) */ + object_property_set_bool(OBJECT(gpio), "gpioB2", true, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioB3", false, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioB4", false, &error_fatal); + object_property_set_bool(OBJECT(gpio), "gpioB5", false, &error_fatal); +} + +static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook fby35 BMC (Cortex-A7)"; + mc->reset = fby35_reset; + amc->fmc_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC3_ON; + amc->i2c_init = fby35_i2c_init; + /* FIXME: Replace this macro with something more general */ + mc->default_ram_size = FUJI_BMC_RAM_SIZE; +} + +#define AST1030_INTERNAL_FLASH_SIZE (1024 * 1024) +/* Main SYSCLK frequency in Hz (200MHz) */ +#define SYSCLK_FRQ 200000000ULL + +static void aspeed_minibmc_machine_init(MachineState *machine) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(machine); + AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); + Clock *sysclk; + + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); + + object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name); + qdev_connect_clock_in(DEVICE(&bmc->soc), "sysclk", sysclk); + + object_property_set_link(OBJECT(&bmc->soc), "memory", + OBJECT(get_system_memory()), &error_abort); + connect_serial_hds_to_uarts(bmc); + qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort); + + aspeed_board_init_flashes(&bmc->soc.fmc, + bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, + amc->num_cs, + 0); + + aspeed_board_init_flashes(&bmc->soc.spi[0], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, amc->num_cs); + + aspeed_board_init_flashes(&bmc->soc.spi[1], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, (amc->num_cs * 2)); + + if (amc->i2c_init) { + amc->i2c_init(bmc); + } + + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + 0, + AST1030_INTERNAL_FLASH_SIZE); +} + +static void ast1030_evb_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + /* U10 24C08 connects to SDA/SCL Groupt 1 by default */ + uint8_t *eeprom_buf = g_malloc0(32 * 1024); + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 0), 0x50, eeprom_buf); + + /* U11 LM75 connects to SDA/SCL Group 2 by default */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 1), "tmp105", 0x4d); +} + +static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc, + void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Aspeed AST1030 MiniBMC (Cortex-M4)"; + amc->soc_name = "ast1030-a1"; + amc->hw_strap1 = 0; + amc->hw_strap2 = 0; + mc->init = aspeed_minibmc_machine_init; + amc->i2c_init = ast1030_evb_i2c_init; + mc->default_ram_size = 0; + mc->default_cpus = mc->min_cpus = mc->max_cpus = 1; + amc->fmc_model = "sst25vf032b"; + amc->spi_model = "sst25vf032b"; + amc->num_cs = 2; + amc->macs_mask = 0; +} + +static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, + void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Qualcomm DC-SCM V1 BMC (Cortex A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = QCOM_DC_SCM_V1_BMC_HW_STRAP1; + amc->hw_strap2 = QCOM_DC_SCM_V1_BMC_HW_STRAP2; + amc->fmc_model = "n25q512a"; + amc->spi_model = "n25q512a"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; + amc->i2c_init = qcom_dc_scm_bmc_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, + void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Qualcomm DC-SCM V1/Firework BMC (Cortex A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = QCOM_DC_SCM_V1_BMC_HW_STRAP1; + amc->hw_strap2 = QCOM_DC_SCM_V1_BMC_HW_STRAP2; + amc->fmc_model = "n25q512a"; + amc->spi_model = "n25q512a"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; + amc->i2c_init = qcom_dc_scm_firework_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + static const TypeInfo aspeed_machine_types[] = { { .name = MACHINE_TYPE_NAME("palmetto-bmc"), @@ -1039,10 +1532,6 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("romulus-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_romulus_class_init, - }, { - .name = MACHINE_TYPE_NAME("swift-bmc"), - .parent = TYPE_ASPEED_MACHINE, - .class_init = aspeed_machine_swift_class_init, }, { .name = MACHINE_TYPE_NAME("sonorapass-bmc"), .parent = TYPE_ASPEED_MACHINE, @@ -1063,6 +1552,18 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("g220a-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_g220a_class_init, + }, { + .name = MACHINE_TYPE_NAME("qcom-dc-scm-v1-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_qcom_dc_scm_v1_class_init, + }, { + .name = MACHINE_TYPE_NAME("qcom-firework-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_qcom_firework_class_init, + }, { + .name = MACHINE_TYPE_NAME("fp5280g2-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_fp5280g2_class_init, }, { .name = MACHINE_TYPE_NAME("quanta-q71l-bmc"), .parent = TYPE_ASPEED_MACHINE, @@ -1071,6 +1572,22 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("rainier-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_rainier_class_init, + }, { + .name = MACHINE_TYPE_NAME("fuji-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_fuji_class_init, + }, { + .name = MACHINE_TYPE_NAME("bletchley-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_bletchley_class_init, + }, { + .name = MACHINE_TYPE_NAME("fby35-bmc"), + .parent = MACHINE_TYPE_NAME("ast2600-evb"), + .class_init = aspeed_machine_fby35_class_init, + }, { + .name = MACHINE_TYPE_NAME("ast1030-evb"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_minibmc_machine_ast1030_evb_class_init, }, { .name = TYPE_ASPEED_MACHINE, .parent = TYPE_MACHINE, diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c new file mode 100644 index 0000000000..4d0b9b115f --- /dev/null +++ b/hw/arm/aspeed_ast10x0.c @@ -0,0 +1,377 @@ +/* + * ASPEED Ast10x0 SoC + * + * Copyright (C) 2022 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * Implementation extracted from the AST2600 and adapted for Ast10x0. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" + +#define ASPEED_SOC_IOMEM_SIZE 0x00200000 + +static const hwaddr aspeed_soc_ast1030_memmap[] = { + [ASPEED_DEV_SRAM] = 0x00000000, + [ASPEED_DEV_SBC] = 0x79000000, + [ASPEED_DEV_IOMEM] = 0x7E600000, + [ASPEED_DEV_PWM] = 0x7E610000, + [ASPEED_DEV_FMC] = 0x7E620000, + [ASPEED_DEV_SPI1] = 0x7E630000, + [ASPEED_DEV_SPI2] = 0x7E640000, + [ASPEED_DEV_SCU] = 0x7E6E2000, + [ASPEED_DEV_ADC] = 0x7E6E9000, + [ASPEED_DEV_SBC] = 0x7E6F2000, + [ASPEED_DEV_GPIO] = 0x7E780000, + [ASPEED_DEV_TIMER1] = 0x7E782000, + [ASPEED_DEV_UART1] = 0x7E783000, + [ASPEED_DEV_UART2] = 0x7E78D000, + [ASPEED_DEV_UART3] = 0x7E78E000, + [ASPEED_DEV_UART4] = 0x7E78F000, + [ASPEED_DEV_UART5] = 0x7E784000, + [ASPEED_DEV_UART6] = 0x7E790000, + [ASPEED_DEV_UART7] = 0x7E790100, + [ASPEED_DEV_UART8] = 0x7E790200, + [ASPEED_DEV_UART9] = 0x7E790300, + [ASPEED_DEV_UART10] = 0x7E790400, + [ASPEED_DEV_UART11] = 0x7E790500, + [ASPEED_DEV_UART12] = 0x7E790600, + [ASPEED_DEV_UART13] = 0x7E790700, + [ASPEED_DEV_WDT] = 0x7E785000, + [ASPEED_DEV_LPC] = 0x7E789000, + [ASPEED_DEV_PECI] = 0x7E78B000, + [ASPEED_DEV_I2C] = 0x7E7B0000, +}; + +static const int aspeed_soc_ast1030_irqmap[] = { + [ASPEED_DEV_UART1] = 47, + [ASPEED_DEV_UART2] = 48, + [ASPEED_DEV_UART3] = 49, + [ASPEED_DEV_UART4] = 50, + [ASPEED_DEV_UART5] = 8, + [ASPEED_DEV_UART6] = 57, + [ASPEED_DEV_UART7] = 58, + [ASPEED_DEV_UART8] = 59, + [ASPEED_DEV_UART9] = 60, + [ASPEED_DEV_UART10] = 61, + [ASPEED_DEV_UART11] = 62, + [ASPEED_DEV_UART12] = 63, + [ASPEED_DEV_UART13] = 64, + [ASPEED_DEV_GPIO] = 11, + [ASPEED_DEV_TIMER1] = 16, + [ASPEED_DEV_TIMER2] = 17, + [ASPEED_DEV_TIMER3] = 18, + [ASPEED_DEV_TIMER4] = 19, + [ASPEED_DEV_TIMER5] = 20, + [ASPEED_DEV_TIMER6] = 21, + [ASPEED_DEV_TIMER7] = 22, + [ASPEED_DEV_TIMER8] = 23, + [ASPEED_DEV_WDT] = 24, + [ASPEED_DEV_LPC] = 35, + [ASPEED_DEV_PECI] = 38, + [ASPEED_DEV_FMC] = 39, + [ASPEED_DEV_PWM] = 44, + [ASPEED_DEV_ADC] = 46, + [ASPEED_DEV_SPI1] = 65, + [ASPEED_DEV_SPI2] = 66, + [ASPEED_DEV_I2C] = 110, /* 110 ~ 123 */ + [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */ +}; + +static qemu_irq aspeed_soc_ast1030_get_irq(AspeedSoCState *s, int dev) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + return qdev_get_gpio_in(DEVICE(&s->armv7m), sc->irqmap[dev]); +} + +static void aspeed_soc_ast1030_init(Object *obj) +{ + AspeedSoCState *s = ASPEED_SOC(obj); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + char socname[8]; + char typename[64]; + int i; + + if (sscanf(sc->name, "%7s", socname) != 1) { + g_assert_not_reached(); + } + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + + snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); + object_initialize_child(obj, "scu", &s->scu, typename); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev); + + object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu), "hw-strap1"); + object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), "hw-strap2"); + + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); + object_initialize_child(obj, "i2c", &s->i2c, typename); + + snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); + object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + + snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); + object_initialize_child(obj, "fmc", &s->fmc, typename); + + for (i = 0; i < sc->spis_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.spi%d-%s", i + 1, socname); + object_initialize_child(obj, "spi[*]", &s->spi[i], typename); + } + + object_initialize_child(obj, "lpc", &s->lpc, TYPE_ASPEED_LPC); + + object_initialize_child(obj, "peci", &s->peci, TYPE_ASPEED_PECI); + + object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_SBC); + + for (i = 0; i < sc->wdts_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); + object_initialize_child(obj, "wdt[*]", &s->wdt[i], typename); + } + + for (i = 0; i < sc->uarts_num; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); + } + + snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); + object_initialize_child(obj, "gpio", &s->gpio, typename); + + object_initialize_child(obj, "iomem", &s->iomem, TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "sbc-unimplemented", &s->sbc_unimplemented, + TYPE_UNIMPLEMENTED_DEVICE); +} + +static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) +{ + AspeedSoCState *s = ASPEED_SOC(dev_soc); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + DeviceState *armv7m; + Error *err = NULL; + int i; + g_autofree char *sram_name = NULL; + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* General I/O memory space to catch all unimplemented device */ + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + sc->memmap[ASPEED_DEV_IOMEM], + ASPEED_SOC_IOMEM_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->sbc_unimplemented), + "aspeed.sbc", sc->memmap[ASPEED_DEV_SBC], + 0x40000); + + /* AST1030 CPU Core */ + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 256); + qdev_prop_set_string(armv7m, "cpu-type", sc->cpu_type); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(s->memory), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), &error_abort); + + /* Internal SRAM */ + sram_name = g_strdup_printf("aspeed.sram.%d", + CPU(s->armv7m.cpu)->cpu_index); + memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_SRAM], + &s->sram); + + /* SCU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + + /* I2C */ + + object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(&s->sram), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { + qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->armv7m), + sc->irqmap[ASPEED_DEV_I2C] + i); + /* The AST1030 I2C controller has one IRQ per bus. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); + } + + /* PECI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + sc->memmap[ASPEED_DEV_PECI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + + /* LPC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + + /* Connect the LPC IRQ to the GIC. It is otherwise unused. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + + /* + * On the AST1030 LPC subdevice IRQs are connected straight to the GIC. + */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1, + qdev_get_gpio_in(DEVICE(&s->armv7m), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2, + qdev_get_gpio_in(DEVICE(&s->armv7m), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3, + qdev_get_gpio_in(DEVICE(&s->armv7m), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4, + qdev_get_gpio_in(DEVICE(&s->armv7m), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4)); + + /* UART */ + if (!aspeed_soc_uart_realize(s, errp)) { + return; + } + + /* Timer */ + object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + sc->memmap[ASPEED_DEV_TIMER1]); + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); + } + + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + + /* FMC, The number of CS is set at the board level */ + object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(&s->sram), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + + /* SPI */ + for (i = 0; i < sc->spis_num; i++) { + object_property_set_link(OBJECT(&s->spi[i]), "dram", + OBJECT(&s->sram), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, + sc->memmap[ASPEED_DEV_SPI1 + i]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); + } + + /* Secure Boot Controller */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sbc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sbc), 0, sc->memmap[ASPEED_DEV_SBC]); + + /* Watch dog */ + for (i = 0; i < sc->wdts_num; i++) { + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(&s->wdt[i]); + + object_property_set_link(OBJECT(&s->wdt[i]), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, + sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); + } + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + sc->memmap[ASPEED_DEV_GPIO]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); +} + +static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + + dc->realize = aspeed_soc_ast1030_realize; + + sc->name = "ast1030-a1"; + sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-m4"); + sc->silicon_rev = AST1030_A1_SILICON_REV; + sc->sram_size = 0xc0000; + sc->spis_num = 2; + sc->ehcis_num = 0; + sc->wdts_num = 4; + sc->macs_num = 1; + sc->uarts_num = 13; + sc->irqmap = aspeed_soc_ast1030_irqmap; + sc->memmap = aspeed_soc_ast1030_memmap; + sc->num_cpus = 1; + sc->get_irq = aspeed_soc_ast1030_get_irq; +} + +static const TypeInfo aspeed_soc_ast1030_type_info = { + .name = "ast1030-a1", + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(AspeedSoCState), + .instance_init = aspeed_soc_ast1030_init, + .class_init = aspeed_soc_ast1030_class_init, + .class_size = sizeof(AspeedSoCClass), +}; + +static void aspeed_soc_register_types(void) +{ + type_register_static(&aspeed_soc_ast1030_type_info); +} + +type_init(aspeed_soc_register_types) diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index e3013128c6..cd75465c2b 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -11,7 +11,6 @@ #include "qapi/error.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" -#include "hw/char/serial.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" @@ -19,15 +18,17 @@ #include "sysemu/sysemu.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 +#define ASPEED_SOC_DPMCU_SIZE 0x00040000 static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_SRAM] = 0x10000000, + [ASPEED_DEV_DPMCU] = 0x18000000, /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_PWM] = 0x1E610000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, - [ASPEED_DEV_SPI2] = 0x1E641000, + [ASPEED_DEV_SPI2] = 0x1E631000, [ASPEED_DEV_EHCI1] = 0x1E6A1000, [ASPEED_DEV_EHCI2] = 0x1E6A3000, [ASPEED_DEV_MII1] = 0x1E650000, @@ -44,6 +45,9 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_SCU] = 0x1E6E2000, [ASPEED_DEV_XDMA] = 0x1E6E7000, [ASPEED_DEV_ADC] = 0x1E6E9000, + [ASPEED_DEV_DP] = 0x1E6EB000, + [ASPEED_DEV_SBC] = 0x1E6F2000, + [ASPEED_DEV_EMMC_BC] = 0x1E6f5000, [ASPEED_DEV_VIDEO] = 0x1E700000, [ASPEED_DEV_SDHCI] = 0x1E740000, [ASPEED_DEV_EMMC] = 0x1E750000, @@ -55,9 +59,22 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_LPC] = 0x1E789000, [ASPEED_DEV_IBT] = 0x1E789140, [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_PECI] = 0x1E78B000, [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART2] = 0x1E78D000, + [ASPEED_DEV_UART3] = 0x1E78E000, + [ASPEED_DEV_UART4] = 0x1E78F000, [ASPEED_DEV_UART5] = 0x1E784000, + [ASPEED_DEV_UART6] = 0x1E790000, + [ASPEED_DEV_UART7] = 0x1E790100, + [ASPEED_DEV_UART8] = 0x1E790200, + [ASPEED_DEV_UART9] = 0x1E790300, + [ASPEED_DEV_UART10] = 0x1E790400, + [ASPEED_DEV_UART11] = 0x1E790500, + [ASPEED_DEV_UART12] = 0x1E790600, + [ASPEED_DEV_UART13] = 0x1E790700, [ASPEED_DEV_VUART] = 0x1E787000, + [ASPEED_DEV_I3C] = 0x1E7A0000, [ASPEED_DEV_SDRAM] = 0x80000000, }; @@ -72,6 +89,14 @@ static const int aspeed_soc_ast2600_irqmap[] = { [ASPEED_DEV_UART3] = 49, [ASPEED_DEV_UART4] = 50, [ASPEED_DEV_UART5] = 8, + [ASPEED_DEV_UART6] = 57, + [ASPEED_DEV_UART7] = 58, + [ASPEED_DEV_UART8] = 59, + [ASPEED_DEV_UART9] = 60, + [ASPEED_DEV_UART10] = 61, + [ASPEED_DEV_UART11] = 62, + [ASPEED_DEV_UART12] = 63, + [ASPEED_DEV_UART13] = 64, [ASPEED_DEV_VUART] = 8, [ASPEED_DEV_FMC] = 39, [ASPEED_DEV_SDMC] = 0, @@ -98,19 +123,22 @@ static const int aspeed_soc_ast2600_irqmap[] = { [ASPEED_DEV_LPC] = 35, [ASPEED_DEV_IBT] = 143, [ASPEED_DEV_I2C] = 110, /* 110 -> 125 */ + [ASPEED_DEV_PECI] = 38, [ASPEED_DEV_ETH1] = 2, [ASPEED_DEV_ETH2] = 3, [ASPEED_DEV_HACE] = 4, [ASPEED_DEV_ETH3] = 32, [ASPEED_DEV_ETH4] = 33, [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */ + [ASPEED_DEV_DP] = 62, + [ASPEED_DEV_I3C] = 102, /* 102 -> 107 */ }; -static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) +static qemu_irq aspeed_soc_ast2600_get_irq(AspeedSoCState *s, int dev) { AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - return qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[ctrl]); + return qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[dev]); } static void aspeed_soc_ast2600_init(Object *obj) @@ -148,12 +176,16 @@ static void aspeed_soc_ast2600_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); object_initialize_child(obj, "i2c", &s->i2c, typename); + object_initialize_child(obj, "peci", &s->peci, TYPE_ASPEED_PECI); + snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); object_initialize_child(obj, "fmc", &s->fmc, typename); - object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs"); for (i = 0; i < sc->spis_num; i++) { snprintf(typename, sizeof(typename), "aspeed.spi%d-%s", i + 1, socname); @@ -169,8 +201,6 @@ static void aspeed_soc_ast2600_init(Object *obj) object_initialize_child(obj, "sdmc", &s->sdmc, typename); object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), "ram-size"); - object_property_add_alias(obj, "max-ram-size", OBJECT(&s->sdmc), - "max-ram-size"); for (i = 0; i < sc->wdts_num; i++) { snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); @@ -184,6 +214,10 @@ static void aspeed_soc_ast2600_init(Object *obj) object_initialize_child(obj, "mii[*]", &s->mii[i], TYPE_ASPEED_MII); } + for (i = 0; i < sc->uarts_num; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); + } + snprintf(typename, sizeof(typename), TYPE_ASPEED_XDMA "-%s", socname); object_initialize_child(obj, "xdma", &s->xdma, typename); @@ -216,6 +250,17 @@ static void aspeed_soc_ast2600_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); object_initialize_child(obj, "hace", &s->hace, typename); + + object_initialize_child(obj, "i3c", &s->i3c, TYPE_ASPEED_I3C); + + object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_SBC); + + object_initialize_child(obj, "iomem", &s->iomem, TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "video", &s->video, TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "dpmcu", &s->dpmcu, TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "emmc-boot-controller", + &s->emmc_boot_controller, + TYPE_UNIMPLEMENTED_DEVICE); } /* @@ -235,14 +280,21 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); Error *err = NULL; qemu_irq irq; + g_autofree char *sram_name = NULL; /* IO space */ - create_unimplemented_device("aspeed_soc.io", sc->memmap[ASPEED_DEV_IOMEM], - ASPEED_SOC_IOMEM_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + sc->memmap[ASPEED_DEV_IOMEM], + ASPEED_SOC_IOMEM_SIZE); /* Video engine stub */ - create_unimplemented_device("aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], - 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->video), "aspeed.video", + sc->memmap[ASPEED_DEV_VIDEO], 0x1000); + + /* eMMC Boot Controller stub */ + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->emmc_boot_controller), + "aspeed.emmc-boot-controller", + sc->memmap[ASPEED_DEV_EMMC_BC], 0x1000); /* CPU */ for (i = 0; i < sc->num_cpus; i++) { @@ -255,6 +307,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000, &error_abort); + object_property_set_bool(OBJECT(&s->cpu[i]), "neon", false, + &error_abort); + object_property_set_link(OBJECT(&s->cpu[i]), "memory", + OBJECT(s->memory), &error_abort); if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { return; @@ -269,11 +325,11 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); for (i = 0; i < sc->num_cpus; i++) { SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); - DeviceState *d = DEVICE(qemu_get_cpu(i)); + DeviceState *d = DEVICE(&s->cpu[i]); irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); sysbus_connect_irq(sbd, i, irq); @@ -286,26 +342,31 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) } /* SRAM */ - memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", - sc->sram_size, &err); + sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index); + memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); if (err) { error_propagate(errp, err); return; } - memory_region_add_subregion(get_system_memory(), + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SRAM], &s->sram); + /* DPMCU */ + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->dpmcu), "aspeed.dpmcu", + sc->memmap[ASPEED_DEV_DPMCU], + ASPEED_SOC_DPMCU_SIZE); + /* SCU */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); /* RTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); @@ -315,17 +376,25 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } - /* UART - attach an 8250 to the IO space as our UART5 */ - serial_mm_init(get_system_memory(), sc->memmap[ASPEED_DEV_UART5], 2, - aspeed_soc_get_irq(s, ASPEED_DEV_UART5), - 38400, serial_hd(0), DEVICE_LITTLE_ENDIAN); + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + + /* UART */ + if (!aspeed_soc_uart_realize(s, errp)) { + return; + } /* I2C */ object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), @@ -333,26 +402,32 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[ASPEED_DEV_I2C] + i); - /* - * The AST2600 SoC has one IRQ per I2C bus. Skip the common - * IRQ (AST2400 and AST2500) and connect all bussses. - */ - sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), i + 1, irq); + /* The AST2600 I2C controller has one IRQ per bus. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); } + /* PECI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + sc->memmap[ASPEED_DEV_PECI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, - s->fmc.ctrl->flash_window_base); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); @@ -360,14 +435,13 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) for (i = 0; i < sc->spis_num; i++) { object_property_set_link(OBJECT(&s->spi[i]), "dram", OBJECT(s->dram_mr), &error_abort); - object_property_set_int(OBJECT(&s->spi[i]), "num-cs", 1, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, - s->spi[i].ctrl->flash_window_base); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } /* EHCI */ @@ -375,7 +449,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); @@ -385,7 +459,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdmc), 0, + sc->memmap[ASPEED_DEV_SDMC]); /* Watch dog */ for (i = 0; i < sc->wdts_num; i++) { @@ -396,10 +471,15 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); } + /* RAM */ + if (!aspeed_soc_dram_init(s, errp)) { + return; + } + /* Net */ for (i = 0; i < sc->macs_num; i++) { object_property_set_bool(OBJECT(&s->ftgmac100[i]), "aspeed", true, @@ -407,7 +487,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); @@ -418,7 +498,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->mii[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->mii[i]), 0, sc->memmap[ASPEED_DEV_MII1 + i]); } @@ -426,7 +506,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->xdma), 0, sc->memmap[ASPEED_DEV_XDMA]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); @@ -435,14 +515,14 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio_1_8v), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio_1_8v), 0, sc->memmap[ASPEED_DEV_GPIO_1_8V]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO_1_8V)); @@ -451,7 +531,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); @@ -460,7 +540,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->emmc), 0, + sc->memmap[ASPEED_DEV_EMMC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); @@ -468,7 +549,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); /* Connect the LPC IRQ to the GIC. It is otherwise unused. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, @@ -504,9 +585,28 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + + /* I3C */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i3c), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]); + for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) { + qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_I3C] + i); + /* The AST2600 I3C controller has one IRQ per bus. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq); + } + + /* Secure Boot Controller */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sbc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sbc), 0, sc->memmap[ASPEED_DEV_SBC]); } static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) @@ -516,21 +616,23 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) dc->realize = aspeed_soc_ast2600_realize; - sc->name = "ast2600-a1"; + sc->name = "ast2600-a3"; sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); - sc->silicon_rev = AST2600_A1_SILICON_REV; + sc->silicon_rev = AST2600_A3_SILICON_REV; sc->sram_size = 0x16400; sc->spis_num = 2; sc->ehcis_num = 2; sc->wdts_num = 4; sc->macs_num = 4; + sc->uarts_num = 13; sc->irqmap = aspeed_soc_ast2600_irqmap; sc->memmap = aspeed_soc_ast2600_memmap; sc->num_cpus = 2; + sc->get_irq = aspeed_soc_ast2600_get_irq; } static const TypeInfo aspeed_soc_ast2600_type_info = { - .name = "ast2600-a1", + .name = "ast2600-a3", .parent = TYPE_ASPEED_SOC, .instance_size = sizeof(AspeedSoCState), .instance_init = aspeed_soc_ast2600_init, diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index 3ad6c56fa9..b05b9dd416 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "qapi/error.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" @@ -45,9 +46,13 @@ static const hwaddr aspeed_soc_ast2400_memmap[] = { [ASPEED_DEV_LPC] = 0x1E789000, [ASPEED_DEV_IBT] = 0x1E789140, [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_PECI] = 0x1E78B000, [ASPEED_DEV_ETH1] = 0x1E660000, [ASPEED_DEV_ETH2] = 0x1E680000, [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART2] = 0x1E78D000, + [ASPEED_DEV_UART3] = 0x1E78E000, + [ASPEED_DEV_UART4] = 0x1E78F000, [ASPEED_DEV_UART5] = 0x1E784000, [ASPEED_DEV_VUART] = 0x1E787000, [ASPEED_DEV_SDRAM] = 0x40000000, @@ -77,9 +82,13 @@ static const hwaddr aspeed_soc_ast2500_memmap[] = { [ASPEED_DEV_LPC] = 0x1E789000, [ASPEED_DEV_IBT] = 0x1E789140, [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_PECI] = 0x1E78B000, [ASPEED_DEV_ETH1] = 0x1E660000, [ASPEED_DEV_ETH2] = 0x1E680000, [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART2] = 0x1E78D000, + [ASPEED_DEV_UART3] = 0x1E78E000, + [ASPEED_DEV_UART4] = 0x1E78F000, [ASPEED_DEV_UART5] = 0x1E784000, [ASPEED_DEV_VUART] = 0x1E787000, [ASPEED_DEV_SDRAM] = 0x80000000, @@ -112,6 +121,7 @@ static const int aspeed_soc_ast2400_irqmap[] = { [ASPEED_DEV_PWM] = 28, [ASPEED_DEV_LPC] = 8, [ASPEED_DEV_I2C] = 12, + [ASPEED_DEV_PECI] = 15, [ASPEED_DEV_ETH1] = 2, [ASPEED_DEV_ETH2] = 3, [ASPEED_DEV_XDMA] = 6, @@ -121,11 +131,11 @@ static const int aspeed_soc_ast2400_irqmap[] = { #define aspeed_soc_ast2500_irqmap aspeed_soc_ast2400_irqmap -static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) +static qemu_irq aspeed_soc_ast2400_get_irq(AspeedSoCState *s, int dev) { AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - return qdev_get_gpio_in(DEVICE(&s->vic), sc->irqmap[ctrl]); + return qdev_get_gpio_in(DEVICE(&s->vic), sc->irqmap[dev]); } static void aspeed_soc_init(Object *obj) @@ -162,12 +172,16 @@ static void aspeed_soc_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); object_initialize_child(obj, "i2c", &s->i2c, typename); + object_initialize_child(obj, "peci", &s->peci, TYPE_ASPEED_PECI); + snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); object_initialize_child(obj, "fmc", &s->fmc, typename); - object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs"); for (i = 0; i < sc->spis_num; i++) { snprintf(typename, sizeof(typename), "aspeed.spi%d-%s", i + 1, socname); @@ -183,8 +197,6 @@ static void aspeed_soc_init(Object *obj) object_initialize_child(obj, "sdmc", &s->sdmc, typename); object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), "ram-size"); - object_property_add_alias(obj, "max-ram-size", OBJECT(&s->sdmc), - "max-ram-size"); for (i = 0; i < sc->wdts_num; i++) { snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); @@ -196,6 +208,10 @@ static void aspeed_soc_init(Object *obj) TYPE_FTGMAC100); } + for (i = 0; i < sc->uarts_num; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); + } + snprintf(typename, sizeof(typename), TYPE_ASPEED_XDMA "-%s", socname); object_initialize_child(obj, "xdma", &s->xdma, typename); @@ -216,6 +232,9 @@ static void aspeed_soc_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); object_initialize_child(obj, "hace", &s->hace, typename); + + object_initialize_child(obj, "iomem", &s->iomem, TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "video", &s->video, TYPE_UNIMPLEMENTED_DEVICE); } static void aspeed_soc_realize(DeviceState *dev, Error **errp) @@ -224,43 +243,47 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); Error *err = NULL; + g_autofree char *sram_name = NULL; /* IO space */ - create_unimplemented_device("aspeed_soc.io", sc->memmap[ASPEED_DEV_IOMEM], - ASPEED_SOC_IOMEM_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + sc->memmap[ASPEED_DEV_IOMEM], + ASPEED_SOC_IOMEM_SIZE); /* Video engine stub */ - create_unimplemented_device("aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], - 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->video), "aspeed.video", + sc->memmap[ASPEED_DEV_VIDEO], 0x1000); /* CPU */ for (i = 0; i < sc->num_cpus; i++) { + object_property_set_link(OBJECT(&s->cpu[i]), "memory", + OBJECT(s->memory), &error_abort); if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { return; } } /* SRAM */ - memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", - sc->sram_size, &err); + sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index); + memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); if (err) { error_propagate(errp, err); return; } - memory_region_add_subregion(get_system_memory(), + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SRAM], &s->sram); /* SCU */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); /* VIC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->vic), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, sc->memmap[ASPEED_DEV_VIC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->vic), 0, sc->memmap[ASPEED_DEV_VIC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0, qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1, @@ -270,7 +293,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); @@ -280,17 +303,25 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } - /* UART - attach an 8250 to the IO space as our UART5 */ - serial_mm_init(get_system_memory(), sc->memmap[ASPEED_DEV_UART5], 2, - aspeed_soc_get_irq(s, ASPEED_DEV_UART5), 38400, - serial_hd(0), DEVICE_LITTLE_ENDIAN); + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + + /* UART */ + if (!aspeed_soc_uart_realize(s, errp)) { + return; + } /* I2C */ object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), @@ -298,32 +329,40 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, aspeed_soc_get_irq(s, ASPEED_DEV_I2C)); + /* PECI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + sc->memmap[ASPEED_DEV_PECI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, - s->fmc.ctrl->flash_window_base); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); /* SPI */ for (i = 0; i < sc->spis_num; i++) { - object_property_set_int(OBJECT(&s->spi[i]), "num-cs", 1, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, - s->spi[i].ctrl->flash_window_base); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } /* EHCI */ @@ -331,7 +370,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); @@ -341,7 +380,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdmc), 0, + sc->memmap[ASPEED_DEV_SDMC]); /* Watch dog */ for (i = 0; i < sc->wdts_num; i++) { @@ -352,10 +392,15 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); } + /* RAM */ + if (!aspeed_soc_dram_init(s, errp)) { + return; + } + /* Net */ for (i = 0; i < sc->macs_num; i++) { object_property_set_bool(OBJECT(&s->ftgmac100[i]), "aspeed", true, @@ -363,7 +408,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); @@ -373,7 +418,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->xdma), 0, sc->memmap[ASPEED_DEV_XDMA]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); @@ -382,7 +427,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); @@ -390,7 +436,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); @@ -399,7 +445,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); /* Connect the LPC IRQ to the VIC */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, @@ -432,11 +478,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); } static Property aspeed_soc_properties[] = { + DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION, + MemoryRegion *), DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_END_OF_LIST(), @@ -473,9 +522,11 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) sc->ehcis_num = 1; sc->wdts_num = 2; sc->macs_num = 2; + sc->uarts_num = 5; sc->irqmap = aspeed_soc_ast2400_irqmap; sc->memmap = aspeed_soc_ast2400_memmap; sc->num_cpus = 1; + sc->get_irq = aspeed_soc_ast2400_get_irq; } static const TypeInfo aspeed_soc_ast2400_type_info = { @@ -498,9 +549,11 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) sc->ehcis_num = 2; sc->wdts_num = 3; sc->macs_num = 2; + sc->uarts_num = 5; sc->irqmap = aspeed_soc_ast2500_irqmap; sc->memmap = aspeed_soc_ast2500_memmap; sc->num_cpus = 1; + sc->get_irq = aspeed_soc_ast2400_get_irq; } static const TypeInfo aspeed_soc_ast2500_type_info = { @@ -517,4 +570,100 @@ static void aspeed_soc_register_types(void) type_register_static(&aspeed_soc_ast2500_type_info); }; -type_init(aspeed_soc_register_types) +type_init(aspeed_soc_register_types); + +qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev) +{ + return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev); +} + +bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + SerialMM *smm; + + for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { + smm = &s->uart[i]; + + /* Chardev property is set by the machine. */ + qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400); + qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) { + return false; + } + + sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart)); + aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]); + } + + return true; +} + +void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i = dev - ASPEED_DEV_UART1; + + g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num); + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); +} + +/* + * SDMC should be realized first to get correct RAM size and max size + * values + */ +bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + ram_addr_t ram_size, max_ram_size; + + ram_size = object_property_get_uint(OBJECT(&s->sdmc), "ram-size", + &error_abort); + max_ram_size = object_property_get_uint(OBJECT(&s->sdmc), "max-ram-size", + &error_abort); + + memory_region_init(&s->dram_container, OBJECT(s), "ram-container", + max_ram_size); + memory_region_add_subregion(&s->dram_container, 0, s->dram_mr); + + /* + * Add a memory region beyond the RAM region to let firmwares scan + * the address space with load/store and guess how much RAM the + * SoC has. + */ + if (ram_size < max_ram_size) { + DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + + qdev_prop_set_string(dev, "name", "ram-empty"); + qdev_prop_set_uint64(dev, "size", max_ram_size - ram_size); + if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) { + return false; + } + + memory_region_add_subregion_overlap(&s->dram_container, ram_size, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0), -1000); + } + + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_SDRAM], &s->dram_container); + return true; +} + +void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr) +{ + memory_region_add_subregion(s->memory, addr, + sysbus_mmio_get_region(dev, n)); +} + +void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, + const char *name, hwaddr addr, uint64_t size) +{ + qdev_prop_set_string(DEVICE(dev), "name", name); + qdev_prop_set_uint64(DEVICE(dev), "size", size); + sysbus_realize(dev, &error_abort); + + memory_region_add_subregion_overlap(s->memory, addr, + sysbus_mmio_get_region(dev, 0), -1000); +} diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 48538c9360..3c2a4160cd 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -23,6 +23,13 @@ /* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */ #define BCM2835_SDHC_CAPAREG 0x52134b4 +/* + * According to Linux driver & DTS, dma channels 0--10 have separate IRQ, + * while channels 11--14 share one IRQ: + */ +#define SEPARATE_DMA_IRQ_MAX 10 +#define ORGATED_DMA_IRQ_COUNT 4 + static void create_unimp(BCM2835PeripheralState *ps, UnimplementedDeviceState *uds, const char *name, hwaddr ofs, hwaddr size) @@ -101,6 +108,11 @@ static void bcm2835_peripherals_init(Object *obj) /* DMA Channels */ object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA); + object_initialize_child(obj, "orgated-dma-irq", + &s->orgated_dma_irq, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->orgated_dma_irq), "num-lines", + ORGATED_DMA_IRQ_COUNT, &error_abort); + object_property_add_const_link(OBJECT(&s->dma), "dma-mr", OBJECT(&s->gpu_bus_mr)); @@ -322,12 +334,24 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1)); - for (n = 0; n <= 12; n++) { + for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) { sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_DMA0 + n)); } + if (!qdev_realize(DEVICE(&s->orgated_dma_irq), NULL, errp)) { + return; + } + for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), + SEPARATE_DMA_IRQ_MAX + 1 + n, + qdev_get_gpio_in(DEVICE(&s->orgated_dma_irq), n)); + } + qdev_connect_gpio_out(DEVICE(&s->orgated_dma_irq), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1)); /* THERMAL */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) { diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 57efb61ee4..8ff315f431 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/error-report.h" #include "qapi/error.h" @@ -60,26 +59,6 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, return cpu_get_address_space(cs, asidx); } -typedef enum { - FIXUP_NONE = 0, /* do nothing */ - FIXUP_TERMINATOR, /* end of insns */ - FIXUP_BOARDID, /* overwrite with board ID number */ - FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ - FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ - FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ - FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ - FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ - FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ - FIXUP_BOOTREG, /* overwrite with boot register address */ - FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ - FIXUP_MAX, -} FixupType; - -typedef struct ARMInsnFixup { - uint32_t insn; - FixupType fixup; -} ARMInsnFixup; - static const ARMInsnFixup bootloader_aarch64[] = { { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */ { 0xaa1f03e1 }, /* mov x1, xzr */ @@ -150,9 +129,10 @@ static const ARMInsnFixup smpboot[] = { { 0, FIXUP_TERMINATOR } }; -static void write_bootloader(const char *name, hwaddr addr, - const ARMInsnFixup *insns, uint32_t *fixupcontext, - AddressSpace *as) +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext) { /* Fix up the specified bootloader fragment and write it into * guest memory using rom_add_blob_fixed(). fixupcontext is @@ -214,8 +194,8 @@ static void default_write_secondary(ARMCPU *cpu, fixupcontext[FIXUP_DSB] = CP15_DSB_INSN; } - write_bootloader("smpboot", info->smp_loader_start, - smpboot, fixupcontext, as); + arm_write_bootloader("smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); } void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, @@ -479,18 +459,24 @@ static void fdt_add_psci_node(void *fdt) } /* - * If /psci node is present in provided DTB, assume that no fixup - * is necessary and all PSCI configuration should be taken as-is + * A pre-existing /psci node might specify function ID values + * that don't match QEMU's PSCI implementation. Delete the whole + * node and put our own in instead. */ rc = fdt_path_offset(fdt, "/psci"); if (rc >= 0) { - return; + qemu_fdt_nop_node(fdt, "/psci"); } qemu_fdt_add_subnode(fdt, "/psci"); - if (armcpu->psci_version == 2) { - const char comp[] = "arm,psci-0.2\0arm,psci"; - qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp)); + if (armcpu->psci_version >= QEMU_PSCI_VERSION_0_2) { + if (armcpu->psci_version < QEMU_PSCI_VERSION_1_0) { + const char comp[] = "arm,psci-0.2\0arm,psci"; + qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp)); + } else { + const char comp[] = "arm,psci-1.0\0arm,psci-0.2\0arm,psci"; + qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp)); + } cpu_off_fn = QEMU_PSCI_0_2_FN_CPU_OFF; if (arm_feature(&armcpu->env, ARM_FEATURE_AARCH64)) { @@ -599,10 +585,23 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, } g_strfreev(node_path); + /* + * We drop all the memory nodes which correspond to empty NUMA nodes + * from the device tree, because the Linux NUMA binding document + * states they should not be generated. Linux will get the NUMA node + * IDs of the empty NUMA nodes from the distance map if they are needed. + * This means QEMU users may be obliged to provide command lines which + * configure distance maps when the empty NUMA node IDs are needed and + * Linux's default distance map isn't sufficient. + */ if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { mem_base = binfo->loader_start; for (i = 0; i < ms->numa_state->num_nodes; i++) { mem_len = ms->numa_state->nodes[i].node_mem; + if (!mem_len) { + continue; + } + rc = fdt_add_memory_node(fdt, acells, mem_base, scells, mem_len, i); if (rc < 0) { @@ -665,8 +664,13 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, * the DTB is copied again upon reset, even if addr points into RAM. */ rom_add_blob_fixed_as("dtb", fdt, size, addr, as); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr_for_as(as, addr, size)); - g_free(fdt); + if (fdt != ms->fdt) { + g_free(ms->fdt); + ms->fdt = fdt; + } return size; @@ -743,7 +747,16 @@ static void do_cpu_reset(void *opaque) env->cp15.scr_el3 |= SCR_ATA; } if (cpu_isar_feature(aa64_sve, cpu)) { - env->cp15.cptr_el[3] |= CPTR_EZ; + env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK; + env->vfp.zcr_el[3] = 0xf; + } + if (cpu_isar_feature(aa64_sme, cpu)) { + env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK; + env->cp15.scr_el3 |= SCR_ENTP2; + env->vfp.smcr_el[3] = 0xf; + } + if (cpu_isar_feature(aa64_hcx, cpu)) { + env->cp15.scr_el3 |= SCR_HXEN; } /* AArch64 kernels never boot in secure mode */ assert(!info->secure_boot); @@ -792,7 +805,7 @@ static void do_cpu_reset(void *opaque) set_kernel_args(info, as); } } - } else { + } else if (info->secondary_cpu_reset_hook) { info->secondary_cpu_reset_hook(cpu, info); } } @@ -800,55 +813,6 @@ static void do_cpu_reset(void *opaque) } } -/** - * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified - * by key. - * @fw_cfg: The firmware config instance to store the data in. - * @size_key: The firmware config key to store the size of the loaded - * data under, with fw_cfg_add_i32(). - * @data_key: The firmware config key to store the loaded data under, - * with fw_cfg_add_bytes(). - * @image_name: The name of the image file to load. If it is NULL, the - * function returns without doing anything. - * @try_decompress: Whether the image should be decompressed (gunzipped) before - * adding it to fw_cfg. If decompression fails, the image is - * loaded as-is. - * - * In case of failure, the function prints an error message to stderr and the - * process exits with status 1. - */ -static void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, - uint16_t data_key, const char *image_name, - bool try_decompress) -{ - size_t size = -1; - uint8_t *data; - - if (image_name == NULL) { - return; - } - - if (try_decompress) { - size = load_image_gzipped_buffer(image_name, - LOAD_IMAGE_MAX_GUNZIP_BYTES, &data); - } - - if (size == (size_t)-1) { - gchar *contents; - gsize length; - - if (!g_file_get_contents(image_name, &contents, &length, NULL)) { - error_report("failed to load \"%s\"", image_name); - exit(1); - } - size = length; - data = (uint8_t *)contents; - } - - fw_cfg_add_i32(fw_cfg, size_key, size); - fw_cfg_add_bytes(fw_cfg, data_key, data, size); -} - static int do_arm_linux_init(Object *obj, void *opaque) { if (object_dynamic_cast(obj, TYPE_ARM_LINUX_BOOT_IF)) { @@ -863,7 +827,7 @@ static int do_arm_linux_init(Object *obj, void *opaque) return 0; } -static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, +static ssize_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, uint64_t *lowaddr, uint64_t *highaddr, int elf_machine, AddressSpace *as) { @@ -874,7 +838,7 @@ static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, } elf_header; int data_swab = 0; bool big_endian; - int64_t ret = -1; + ssize_t ret = -1; Error *err = NULL; @@ -996,7 +960,7 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, /* Set up for a direct boot of a kernel image file. */ CPUState *cs; AddressSpace *as = arm_boot_address_space(cpu, info); - int kernel_size; + ssize_t kernel_size; int initrd_size; int is_linux = 0; uint64_t elf_entry; @@ -1018,16 +982,6 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, elf_machine = EM_ARM; } - if (!info->secondary_cpu_reset_hook) { - info->secondary_cpu_reset_hook = default_reset_secondary; - } - if (!info->write_secondary_boot) { - info->write_secondary_boot = default_write_secondary; - } - - if (info->nb_cpus == 0) - info->nb_cpus = 1; - /* Assume that raw images are linux kernels, and ELF images are not. */ kernel_size = arm_load_elf(info, &elf_entry, &image_low_addr, &image_high_addr, elf_machine, as); @@ -1085,7 +1039,7 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, if (kernel_size > info->ram_size) { error_report("kernel '%s' is too large to fit in RAM " - "(kernel size %d, RAM size %" PRId64 ")", + "(kernel size %zd, RAM size %" PRId64 ")", info->kernel_filename, kernel_size, info->ram_size); exit(1); } @@ -1201,12 +1155,9 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, fixupcontext[FIXUP_ENTRYPOINT_LO] = entry; fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32; - write_bootloader("bootloader", info->loader_start, - primary_loader, fixupcontext, as); + arm_write_bootloader("bootloader", as, info->loader_start, + primary_loader, fixupcontext); - if (info->nb_cpus > 1) { - info->write_secondary_boot(cpu, info); - } if (info->write_board_setup) { info->write_board_setup(cpu, info); } @@ -1287,6 +1238,9 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) { CPUState *cs; AddressSpace *as = arm_boot_address_space(cpu, info); + int boot_el; + CPUARMState *env = &cpu->env; + int nb_cpus = 0; /* * CPU objects (unlike devices) are not automatically reset on system @@ -1296,6 +1250,7 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) */ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { qemu_register_reset(do_cpu_reset, ARM_CPU(cs)); + nb_cpus++; } /* @@ -1317,6 +1272,87 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) arm_setup_direct_kernel_boot(cpu, info); } + /* + * Disable the PSCI conduit if it is set up to target the same + * or a lower EL than the one we're going to start the guest code in. + * This logic needs to agree with the code in do_cpu_reset() which + * decides whether we're going to boot the guest in the highest + * supported exception level or in a lower one. + */ + + /* + * If PSCI is enabled, then SMC calls all go to the PSCI handler and + * are never emulated to trap into guest code. It therefore does not + * make sense for the board to have a setup code fragment that runs + * in Secure, because this will probably need to itself issue an SMC of some + * kind as part of its operation. + */ + assert(info->psci_conduit == QEMU_PSCI_CONDUIT_DISABLED || + !info->secure_board_setup); + + /* Boot into highest supported EL ... */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + boot_el = 3; + } else if (arm_feature(env, ARM_FEATURE_EL2)) { + boot_el = 2; + } else { + boot_el = 1; + } + /* ...except that if we're booting Linux we adjust the EL we boot into */ + if (info->is_linux && !info->secure_boot) { + boot_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1; + } + + if ((info->psci_conduit == QEMU_PSCI_CONDUIT_HVC && boot_el >= 2) || + (info->psci_conduit == QEMU_PSCI_CONDUIT_SMC && boot_el == 3)) { + info->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; + } + + if (info->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + Object *cpuobj = OBJECT(cs); + + object_property_set_int(cpuobj, "psci-conduit", info->psci_conduit, + &error_abort); + /* + * Secondary CPUs start in PSCI powered-down state. Like the + * code in do_cpu_reset(), we assume first_cpu is the primary + * CPU. + */ + if (cs != first_cpu) { + object_property_set_bool(cpuobj, "start-powered-off", true, + &error_abort); + } + } + } + + if (info->psci_conduit == QEMU_PSCI_CONDUIT_DISABLED && + info->is_linux && nb_cpus > 1) { + /* + * We're booting Linux but not using PSCI, so for SMP we need + * to write a custom secondary CPU boot loader stub, and arrange + * for the secondary CPU reset to make the accompanying initialization. + */ + if (!info->secondary_cpu_reset_hook) { + info->secondary_cpu_reset_hook = default_reset_secondary; + } + if (!info->write_secondary_boot) { + info->write_secondary_boot = default_write_secondary; + } + info->write_secondary_boot(cpu, info); + } else { + /* + * No secondary boot stub; don't use the reset hook that would + * have set the CPU up to call it + */ + info->write_secondary_boot = NULL; + info->secondary_cpu_reset_hook = NULL; + } + + /* + * arm_load_dtb() may add a PSCI node so it must be called after we have + * decided whether to enable PSCI and set the psci-conduit CPU properties. + */ if (!info->skip_dtb_autoload && have_dtb(info)) { if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { exit(1); diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index 294ba5de6e..5e3372a3c7 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -81,7 +81,7 @@ static void cubieboard_init(MachineState *machine) } /* Retrieve SD bus */ - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, 0); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(a10), "sd-bus"); diff --git a/hw/arm/digic.c b/hw/arm/digic.c index 614232165c..6df5547977 100644 --- a/hw/arm/digic.c +++ b/hw/arm/digic.c @@ -39,10 +39,7 @@ static void digic_init(Object *obj) object_initialize_child(obj, "cpu", &s->cpu, ARM_CPU_TYPE_NAME("arm946")); for (i = 0; i < DIGIC4_NB_TIMERS; i++) { -#define DIGIC_TIMER_NAME_MLEN 11 - char name[DIGIC_TIMER_NAME_MLEN]; - - snprintf(name, DIGIC_TIMER_NAME_MLEN, "timer[%d]", i); + g_autofree char *name = g_strdup_printf("timer[%d]", i); object_initialize_child(obj, name, &s->timer[i], TYPE_DIGIC_TIMER); } diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c index b771a3d8b7..4093af09cb 100644 --- a/hw/arm/digic_boards.c +++ b/hw/arm/digic_boards.c @@ -25,7 +25,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "hw/boards.h" #include "qemu/error-report.h" diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c index 5c7a51bbad..8dafa2215b 100644 --- a/hw/arm/exynos4210.c +++ b/hw/arm/exynos4210.c @@ -101,6 +101,348 @@ #define EXYNOS4210_PL330_BASE1_ADDR 0x12690000 #define EXYNOS4210_PL330_BASE2_ADDR 0x12850000 +enum ExtGicId { + EXT_GIC_ID_MDMA_LCD0 = 66, + EXT_GIC_ID_PDMA0, + EXT_GIC_ID_PDMA1, + EXT_GIC_ID_TIMER0, + EXT_GIC_ID_TIMER1, + EXT_GIC_ID_TIMER2, + EXT_GIC_ID_TIMER3, + EXT_GIC_ID_TIMER4, + EXT_GIC_ID_MCT_L0, + EXT_GIC_ID_WDT, + EXT_GIC_ID_RTC_ALARM, + EXT_GIC_ID_RTC_TIC, + EXT_GIC_ID_GPIO_XB, + EXT_GIC_ID_GPIO_XA, + EXT_GIC_ID_MCT_L1, + EXT_GIC_ID_IEM_APC, + EXT_GIC_ID_IEM_IEC, + EXT_GIC_ID_NFC, + EXT_GIC_ID_UART0, + EXT_GIC_ID_UART1, + EXT_GIC_ID_UART2, + EXT_GIC_ID_UART3, + EXT_GIC_ID_UART4, + EXT_GIC_ID_MCT_G0, + EXT_GIC_ID_I2C0, + EXT_GIC_ID_I2C1, + EXT_GIC_ID_I2C2, + EXT_GIC_ID_I2C3, + EXT_GIC_ID_I2C4, + EXT_GIC_ID_I2C5, + EXT_GIC_ID_I2C6, + EXT_GIC_ID_I2C7, + EXT_GIC_ID_SPI0, + EXT_GIC_ID_SPI1, + EXT_GIC_ID_SPI2, + EXT_GIC_ID_MCT_G1, + EXT_GIC_ID_USB_HOST, + EXT_GIC_ID_USB_DEVICE, + EXT_GIC_ID_MODEMIF, + EXT_GIC_ID_HSMMC0, + EXT_GIC_ID_HSMMC1, + EXT_GIC_ID_HSMMC2, + EXT_GIC_ID_HSMMC3, + EXT_GIC_ID_SDMMC, + EXT_GIC_ID_MIPI_CSI_4LANE, + EXT_GIC_ID_MIPI_DSI_4LANE, + EXT_GIC_ID_MIPI_CSI_2LANE, + EXT_GIC_ID_MIPI_DSI_2LANE, + EXT_GIC_ID_ONENAND_AUDI, + EXT_GIC_ID_ROTATOR, + EXT_GIC_ID_FIMC0, + EXT_GIC_ID_FIMC1, + EXT_GIC_ID_FIMC2, + EXT_GIC_ID_FIMC3, + EXT_GIC_ID_JPEG, + EXT_GIC_ID_2D, + EXT_GIC_ID_PCIe, + EXT_GIC_ID_MIXER, + EXT_GIC_ID_HDMI, + EXT_GIC_ID_HDMI_I2C, + EXT_GIC_ID_MFC, + EXT_GIC_ID_TVENC, +}; + +enum ExtInt { + EXT_GIC_ID_EXTINT0 = 48, + EXT_GIC_ID_EXTINT1, + EXT_GIC_ID_EXTINT2, + EXT_GIC_ID_EXTINT3, + EXT_GIC_ID_EXTINT4, + EXT_GIC_ID_EXTINT5, + EXT_GIC_ID_EXTINT6, + EXT_GIC_ID_EXTINT7, + EXT_GIC_ID_EXTINT8, + EXT_GIC_ID_EXTINT9, + EXT_GIC_ID_EXTINT10, + EXT_GIC_ID_EXTINT11, + EXT_GIC_ID_EXTINT12, + EXT_GIC_ID_EXTINT13, + EXT_GIC_ID_EXTINT14, + EXT_GIC_ID_EXTINT15 +}; + +/* + * External GIC sources which are not from External Interrupt Combiner or + * External Interrupts are starting from EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ, + * which is INTG16 in Internal Interrupt Combiner. + */ + +static const uint32_t +combiner_grp_to_gic_id[64 - EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = { + /* int combiner groups 16-19 */ + { }, { }, { }, { }, + /* int combiner group 20 */ + { 0, EXT_GIC_ID_MDMA_LCD0 }, + /* int combiner group 21 */ + { EXT_GIC_ID_PDMA0, EXT_GIC_ID_PDMA1 }, + /* int combiner group 22 */ + { EXT_GIC_ID_TIMER0, EXT_GIC_ID_TIMER1, EXT_GIC_ID_TIMER2, + EXT_GIC_ID_TIMER3, EXT_GIC_ID_TIMER4 }, + /* int combiner group 23 */ + { EXT_GIC_ID_RTC_ALARM, EXT_GIC_ID_RTC_TIC }, + /* int combiner group 24 */ + { EXT_GIC_ID_GPIO_XB, EXT_GIC_ID_GPIO_XA }, + /* int combiner group 25 */ + { EXT_GIC_ID_IEM_APC, EXT_GIC_ID_IEM_IEC }, + /* int combiner group 26 */ + { EXT_GIC_ID_UART0, EXT_GIC_ID_UART1, EXT_GIC_ID_UART2, EXT_GIC_ID_UART3, + EXT_GIC_ID_UART4 }, + /* int combiner group 27 */ + { EXT_GIC_ID_I2C0, EXT_GIC_ID_I2C1, EXT_GIC_ID_I2C2, EXT_GIC_ID_I2C3, + EXT_GIC_ID_I2C4, EXT_GIC_ID_I2C5, EXT_GIC_ID_I2C6, + EXT_GIC_ID_I2C7 }, + /* int combiner group 28 */ + { EXT_GIC_ID_SPI0, EXT_GIC_ID_SPI1, EXT_GIC_ID_SPI2 , EXT_GIC_ID_USB_HOST}, + /* int combiner group 29 */ + { EXT_GIC_ID_HSMMC0, EXT_GIC_ID_HSMMC1, EXT_GIC_ID_HSMMC2, + EXT_GIC_ID_HSMMC3, EXT_GIC_ID_SDMMC }, + /* int combiner group 30 */ + { EXT_GIC_ID_MIPI_CSI_4LANE, EXT_GIC_ID_MIPI_CSI_2LANE }, + /* int combiner group 31 */ + { EXT_GIC_ID_MIPI_DSI_4LANE, EXT_GIC_ID_MIPI_DSI_2LANE }, + /* int combiner group 32 */ + { EXT_GIC_ID_FIMC0, EXT_GIC_ID_FIMC1 }, + /* int combiner group 33 */ + { EXT_GIC_ID_FIMC2, EXT_GIC_ID_FIMC3 }, + /* int combiner group 34 */ + { EXT_GIC_ID_ONENAND_AUDI, EXT_GIC_ID_NFC }, + /* int combiner group 35 */ + { 0, 0, 0, EXT_GIC_ID_MCT_L1 }, + /* int combiner group 36 */ + { EXT_GIC_ID_MIXER }, + /* int combiner group 37 */ + { EXT_GIC_ID_EXTINT4, EXT_GIC_ID_EXTINT5, EXT_GIC_ID_EXTINT6, + EXT_GIC_ID_EXTINT7 }, + /* groups 38-50 */ + { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, + /* int combiner group 51 */ + { EXT_GIC_ID_MCT_L0 }, + /* group 52 */ + { }, + /* int combiner group 53 */ + { EXT_GIC_ID_WDT }, + /* groups 54-63 */ + { }, { }, { }, { }, { }, { }, { }, { }, { }, { } +}; + +#define EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit) ((grp) * 8 + (bit)) +#define EXYNOS4210_COMBINER_GET_GRP_NUM(irq) ((irq) / 8) +#define EXYNOS4210_COMBINER_GET_BIT_NUM(irq) \ + ((irq) - 8 * EXYNOS4210_COMBINER_GET_GRP_NUM(irq)) + +/* + * Some interrupt lines go to multiple combiner inputs. + * This data structure defines those: each array element is + * a list of combiner inputs which are connected together; + * the one with the smallest interrupt ID value must be first. + * As with combiner_grp_to_gic_id[], we rely on (0, 0) not being + * wired to anything so we can use 0 as a terminator. + */ +#define IRQNO(G, B) EXYNOS4210_COMBINER_GET_IRQ_NUM(G, B) +#define IRQNONE 0 + +#define COMBINERMAP_SIZE 16 + +static const int combinermap[COMBINERMAP_SIZE][6] = { + /* MDNIE_LCD1 */ + { IRQNO(0, 4), IRQNO(1, 0), IRQNONE }, + { IRQNO(0, 5), IRQNO(1, 1), IRQNONE }, + { IRQNO(0, 6), IRQNO(1, 2), IRQNONE }, + { IRQNO(0, 7), IRQNO(1, 3), IRQNONE }, + /* TMU */ + { IRQNO(2, 4), IRQNO(3, 4), IRQNONE }, + { IRQNO(2, 5), IRQNO(3, 5), IRQNONE }, + { IRQNO(2, 6), IRQNO(3, 6), IRQNONE }, + { IRQNO(2, 7), IRQNO(3, 7), IRQNONE }, + /* LCD1 */ + { IRQNO(11, 4), IRQNO(12, 0), IRQNONE }, + { IRQNO(11, 5), IRQNO(12, 1), IRQNONE }, + { IRQNO(11, 6), IRQNO(12, 2), IRQNONE }, + { IRQNO(11, 7), IRQNO(12, 3), IRQNONE }, + /* Multi-core timer */ + { IRQNO(1, 4), IRQNO(12, 4), IRQNO(35, 4), IRQNO(51, 4), IRQNO(53, 4), IRQNONE }, + { IRQNO(1, 5), IRQNO(12, 5), IRQNO(35, 5), IRQNO(51, 5), IRQNO(53, 5), IRQNONE }, + { IRQNO(1, 6), IRQNO(12, 6), IRQNO(35, 6), IRQNO(51, 6), IRQNO(53, 6), IRQNONE }, + { IRQNO(1, 7), IRQNO(12, 7), IRQNO(35, 7), IRQNO(51, 7), IRQNO(53, 7), IRQNONE }, +}; + +#undef IRQNO + +static const int *combinermap_entry(int irq) +{ + /* + * If the interrupt number passed in is the first entry in some + * line of the combinermap, return a pointer to that line; + * otherwise return NULL. + */ + int i; + for (i = 0; i < COMBINERMAP_SIZE; i++) { + if (combinermap[i][0] == irq) { + return combinermap[i]; + } + } + return NULL; +} + +static int mapline_size(const int *mapline) +{ + /* Return number of entries in this mapline in total */ + int i = 0; + + if (!mapline) { + /* Not in the map? IRQ goes to exactly one combiner input */ + return 1; + } + while (*mapline != IRQNONE) { + mapline++; + i++; + } + return i; +} + +/* + * Initialize board IRQs. + * These IRQs contain splitted Int/External Combiner and External Gic IRQs. + */ +static void exynos4210_init_board_irqs(Exynos4210State *s) +{ + uint32_t grp, bit, irq_id, n; + DeviceState *extgicdev = DEVICE(&s->ext_gic); + DeviceState *intcdev = DEVICE(&s->int_combiner); + DeviceState *extcdev = DEVICE(&s->ext_combiner); + int splitcount = 0; + DeviceState *splitter; + const int *mapline; + int numlines, splitin, in; + + for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) { + irq_id = 0; + if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4)) { + /* MCT_G0 is passed to External GIC */ + irq_id = EXT_GIC_ID_MCT_G0; + } + if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 5)) { + /* MCT_G1 is passed to External and GIC */ + irq_id = EXT_GIC_ID_MCT_G1; + } + + if (s->irq_table[n]) { + /* + * This must be some non-first entry in a combinermap line, + * and we've already filled it in. + */ + continue; + } + mapline = combinermap_entry(n); + /* + * We need to connect the IRQ to multiple inputs on both combiners + * and possibly also to the external GIC. + */ + numlines = 2 * mapline_size(mapline); + if (irq_id) { + numlines++; + } + assert(splitcount < EXYNOS4210_NUM_SPLITTERS); + splitter = DEVICE(&s->splitter[splitcount]); + qdev_prop_set_uint16(splitter, "num-lines", numlines); + qdev_realize(splitter, NULL, &error_abort); + splitcount++; + + in = n; + splitin = 0; + for (;;) { + s->irq_table[in] = qdev_get_gpio_in(splitter, 0); + qdev_connect_gpio_out(splitter, splitin, + qdev_get_gpio_in(intcdev, in)); + qdev_connect_gpio_out(splitter, splitin + 1, + qdev_get_gpio_in(extcdev, in)); + splitin += 2; + if (!mapline) { + break; + } + mapline++; + in = *mapline; + if (in == IRQNONE) { + break; + } + } + if (irq_id) { + qdev_connect_gpio_out(splitter, splitin, + qdev_get_gpio_in(extgicdev, irq_id - 32)); + } + } + for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) { + /* these IDs are passed to Internal Combiner and External GIC */ + grp = EXYNOS4210_COMBINER_GET_GRP_NUM(n); + bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); + irq_id = combiner_grp_to_gic_id[grp - + EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][bit]; + + if (s->irq_table[n]) { + /* + * This must be some non-first entry in a combinermap line, + * and we've already filled it in. + */ + continue; + } + + if (irq_id) { + assert(splitcount < EXYNOS4210_NUM_SPLITTERS); + splitter = DEVICE(&s->splitter[splitcount]); + qdev_prop_set_uint16(splitter, "num-lines", 2); + qdev_realize(splitter, NULL, &error_abort); + splitcount++; + s->irq_table[n] = qdev_get_gpio_in(splitter, 0); + qdev_connect_gpio_out(splitter, 0, qdev_get_gpio_in(intcdev, n)); + qdev_connect_gpio_out(splitter, 1, + qdev_get_gpio_in(extgicdev, irq_id - 32)); + } else { + s->irq_table[n] = qdev_get_gpio_in(intcdev, n); + } + } + /* + * We check this here to avoid a more obscure assert later when + * qdev_assert_realized_properly() checks that we realized every + * child object we initialized. + */ + assert(splitcount == EXYNOS4210_NUM_SPLITTERS); +} + +/* + * Get IRQ number from exynos4210 IRQ subsystem stub. + * To identify IRQ source use internal combiner group and bit number + * grp - group number + * bit - bit number inside group + */ +uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit) +{ + return EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit); +} + static uint8_t chipid_and_omr[] = { 0x11, 0x02, 0x21, 0x43, 0x09, 0x00, 0x00, 0x00 }; @@ -173,6 +515,9 @@ static DeviceState *pl330_create(uint32_t base, qemu_or_irq *orgate, int i; dev = qdev_new("pl330"); + object_property_set_link(OBJECT(dev), "memory", + OBJECT(get_system_memory()), + &error_fatal); qdev_prop_set_uint8(dev, "num_events", nevents); qdev_prop_set_uint8(dev, "num_chnls", 8); qdev_prop_set_uint8(dev, "num_periph_req", nreq); @@ -202,7 +547,6 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) { Exynos4210State *s = EXYNOS4210_SOC(socdev); MemoryRegion *system_mem = get_system_memory(); - qemu_irq gate_irq[EXYNOS4210_NCPUS][EXYNOS4210_IRQ_GATE_NINPUTS]; SysBusDevice *busdev; DeviceState *dev, *uart[4], *pl330[3]; int i, n; @@ -226,81 +570,63 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); } - /*** IRQs ***/ - - s->irq_table = exynos4210_init_irq(&s->irqs); - /* IRQ Gate */ for (i = 0; i < EXYNOS4210_NCPUS; i++) { - dev = qdev_new("exynos4210.irq_gate"); - qdev_prop_set_uint32(dev, "n_in", EXYNOS4210_IRQ_GATE_NINPUTS); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - /* Get IRQ Gate input in gate_irq */ - for (n = 0; n < EXYNOS4210_IRQ_GATE_NINPUTS; n++) { - gate_irq[i][n] = qdev_get_gpio_in(dev, n); - } - busdev = SYS_BUS_DEVICE(dev); - - /* Connect IRQ Gate output to CPU's IRQ line */ - sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(DEVICE(s->cpu[i]), ARM_CPU_IRQ)); + DeviceState *orgate = DEVICE(&s->cpu_irq_orgate[i]); + object_property_set_int(OBJECT(orgate), "num-lines", + EXYNOS4210_IRQ_GATE_NINPUTS, + &error_abort); + qdev_realize(orgate, NULL, &error_abort); + qdev_connect_gpio_out(orgate, 0, + qdev_get_gpio_in(DEVICE(s->cpu[i]), ARM_CPU_IRQ)); } /* Private memory region and Internal GIC */ - dev = qdev_new(TYPE_A9MPCORE_PRIV); - qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); - busdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(busdev, &error_fatal); + qdev_prop_set_uint32(DEVICE(&s->a9mpcore), "num-cpu", EXYNOS4210_NCPUS); + busdev = SYS_BUS_DEVICE(&s->a9mpcore); + sysbus_realize(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { - sysbus_connect_irq(busdev, n, gate_irq[n][0]); - } - for (n = 0; n < EXYNOS4210_INT_GIC_NIRQ; n++) { - s->irqs.int_gic_irq[n] = qdev_get_gpio_in(dev, n); + sysbus_connect_irq(busdev, n, + qdev_get_gpio_in(DEVICE(&s->cpu_irq_orgate[n]), 0)); } /* Cache controller */ sysbus_create_simple("l2x0", EXYNOS4210_L2X0_BASE_ADDR, NULL); /* External GIC */ - dev = qdev_new("exynos4210.gic"); - qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); - busdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(busdev, &error_fatal); + qdev_prop_set_uint32(DEVICE(&s->ext_gic), "num-cpu", EXYNOS4210_NCPUS); + busdev = SYS_BUS_DEVICE(&s->ext_gic); + sysbus_realize(busdev, &error_fatal); /* Map CPU interface */ sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_GIC_CPU_BASE_ADDR); /* Map Distributer interface */ sysbus_mmio_map(busdev, 1, EXYNOS4210_EXT_GIC_DIST_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { - sysbus_connect_irq(busdev, n, gate_irq[n][1]); - } - for (n = 0; n < EXYNOS4210_EXT_GIC_NIRQ; n++) { - s->irqs.ext_gic_irq[n] = qdev_get_gpio_in(dev, n); + sysbus_connect_irq(busdev, n, + qdev_get_gpio_in(DEVICE(&s->cpu_irq_orgate[n]), 1)); } /* Internal Interrupt Combiner */ - dev = qdev_new("exynos4210.combiner"); - busdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(busdev, &error_fatal); + busdev = SYS_BUS_DEVICE(&s->int_combiner); + sysbus_realize(busdev, &error_fatal); for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { - sysbus_connect_irq(busdev, n, s->irqs.int_gic_irq[n]); + sysbus_connect_irq(busdev, n, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), n)); } - exynos4210_combiner_get_gpioin(&s->irqs, dev, 0); sysbus_mmio_map(busdev, 0, EXYNOS4210_INT_COMBINER_BASE_ADDR); /* External Interrupt Combiner */ - dev = qdev_new("exynos4210.combiner"); - qdev_prop_set_uint32(dev, "external", 1); - busdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(busdev, &error_fatal); + qdev_prop_set_uint32(DEVICE(&s->ext_combiner), "external", 1); + busdev = SYS_BUS_DEVICE(&s->ext_combiner); + sysbus_realize(busdev, &error_fatal); for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { - sysbus_connect_irq(busdev, n, s->irqs.ext_gic_irq[n]); + sysbus_connect_irq(busdev, n, qdev_get_gpio_in(DEVICE(&s->ext_gic), n)); } - exynos4210_combiner_get_gpioin(&s->irqs, dev, 1); sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_COMBINER_BASE_ADDR); /* Initialize board IRQs. */ - exynos4210_init_board_irqs(&s->irqs); + exynos4210_init_board_irqs(s); /*** Memory ***/ @@ -485,6 +811,23 @@ static void exynos4210_init(Object *obj) object_initialize_child(obj, name, orgate, TYPE_OR_IRQ); g_free(name); } + + for (i = 0; i < ARRAY_SIZE(s->cpu_irq_orgate); i++) { + g_autofree char *name = g_strdup_printf("cpu-irq-orgate%d", i); + object_initialize_child(obj, name, &s->cpu_irq_orgate[i], TYPE_OR_IRQ); + } + + for (i = 0; i < ARRAY_SIZE(s->splitter); i++) { + g_autofree char *name = g_strdup_printf("irq-splitter%d", i); + object_initialize_child(obj, name, &s->splitter[i], TYPE_SPLIT_IRQ); + } + + object_initialize_child(obj, "a9mpcore", &s->a9mpcore, TYPE_A9MPCORE_PRIV); + object_initialize_child(obj, "ext-gic", &s->ext_gic, TYPE_EXYNOS4210_GIC); + object_initialize_child(obj, "int-combiner", &s->int_combiner, + TYPE_EXYNOS4210_COMBINER); + object_initialize_child(obj, "ext-combiner", &s->ext_combiner, + TYPE_EXYNOS4210_COMBINER); } static void exynos4210_class_init(ObjectClass *klass, void *data) diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index 35dd9875da..ef5bcbc212 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -67,7 +67,6 @@ static unsigned long exynos4_board_ram_size[EXYNOS4_NUM_OF_BOARDS] = { static struct arm_boot_info exynos4_board_binfo = { .loader_start = EXYNOS4210_BASE_BOOT_ADDR, .smp_loader_start = EXYNOS4210_SMP_BOOT_ADDR, - .nb_cpus = EXYNOS4210_NCPUS, .write_secondary_boot = exynos4210_write_secondary, }; diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c new file mode 100644 index 0000000000..90c04bbc33 --- /dev/null +++ b/hw/arm/fby35.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com) + * + * This code is licensed under the GPL version 2 or later. See the COPYING + * file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" +#include "hw/boards.h" +#include "hw/qdev-clock.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/arm/boot.h" + +#define TYPE_FBY35 MACHINE_TYPE_NAME("fby35") +OBJECT_DECLARE_SIMPLE_TYPE(Fby35State, FBY35); + +struct Fby35State { + MachineState parent_obj; + + MemoryRegion bmc_memory; + MemoryRegion bmc_dram; + MemoryRegion bmc_boot_rom; + MemoryRegion bic_memory; + Clock *bic_sysclk; + + AspeedSoCState bmc; + AspeedSoCState bic; + + bool mmio_exec; +}; + +#define FBY35_BMC_RAM_SIZE (2 * GiB) +#define FBY35_BMC_FIRMWARE_ADDR 0x0 + +static void fby35_bmc_write_boot_rom(DriveInfo *dinfo, MemoryRegion *mr, + hwaddr offset, size_t rom_size, + Error **errp) +{ + BlockBackend *blk = blk_by_legacy_dinfo(dinfo); + g_autofree void *storage = NULL; + int64_t size; + + /* + * The block backend size should have already been 'validated' by + * the creation of the m25p80 object. + */ + size = blk_getlength(blk); + if (size <= 0) { + error_setg(errp, "failed to get flash size"); + return; + } + + if (rom_size > size) { + rom_size = size; + } + + storage = g_malloc0(rom_size); + if (blk_pread(blk, 0, rom_size, storage, 0) < 0) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + + /* TODO: find a better way to install the ROM */ + memcpy(memory_region_get_ram_ptr(mr) + offset, storage, rom_size); +} + +static void fby35_bmc_init(Fby35State *s) +{ + DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); + + object_initialize_child(OBJECT(s), "bmc", &s->bmc, "ast2600-a3"); + + memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", + UINT64_MAX); + memory_region_init_ram(&s->bmc_dram, OBJECT(&s->bmc), "bmc-dram", + FBY35_BMC_RAM_SIZE, &error_abort); + + object_property_set_int(OBJECT(&s->bmc), "ram-size", FBY35_BMC_RAM_SIZE, + &error_abort); + object_property_set_link(OBJECT(&s->bmc), "memory", OBJECT(&s->bmc_memory), + &error_abort); + object_property_set_link(OBJECT(&s->bmc), "dram", OBJECT(&s->bmc_dram), + &error_abort); + object_property_set_int(OBJECT(&s->bmc), "hw-strap1", 0x000000C0, + &error_abort); + object_property_set_int(OBJECT(&s->bmc), "hw-strap2", 0x00000003, + &error_abort); + aspeed_soc_uart_set_chr(&s->bmc, ASPEED_DEV_UART5, serial_hd(0)); + qdev_realize(DEVICE(&s->bmc), NULL, &error_abort); + + aspeed_board_init_flashes(&s->bmc.fmc, "n25q00", 2, 0); + + /* Install first FMC flash content as a boot rom. */ + if (drive0) { + AspeedSMCFlash *fl = &s->bmc.fmc.flashes[0]; + MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + uint64_t size = memory_region_size(&fl->mmio); + + if (s->mmio_exec) { + memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", + &fl->mmio, 0, size); + memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, + boot_rom); + } else { + + memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", + size, &error_abort); + memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, + boot_rom); + fby35_bmc_write_boot_rom(drive0, boot_rom, FBY35_BMC_FIRMWARE_ADDR, + size, &error_abort); + } + } +} + +static void fby35_bic_init(Fby35State *s) +{ + s->bic_sysclk = clock_new(OBJECT(s), "SYSCLK"); + clock_set_hz(s->bic_sysclk, 200000000ULL); + + object_initialize_child(OBJECT(s), "bic", &s->bic, "ast1030-a1"); + + memory_region_init(&s->bic_memory, OBJECT(&s->bic), "bic-memory", + UINT64_MAX); + + qdev_connect_clock_in(DEVICE(&s->bic), "sysclk", s->bic_sysclk); + object_property_set_link(OBJECT(&s->bic), "memory", OBJECT(&s->bic_memory), + &error_abort); + aspeed_soc_uart_set_chr(&s->bic, ASPEED_DEV_UART5, serial_hd(1)); + qdev_realize(DEVICE(&s->bic), NULL, &error_abort); + + aspeed_board_init_flashes(&s->bic.fmc, "sst25vf032b", 2, 2); + aspeed_board_init_flashes(&s->bic.spi[0], "sst25vf032b", 2, 4); + aspeed_board_init_flashes(&s->bic.spi[1], "sst25vf032b", 2, 6); +} + +static void fby35_init(MachineState *machine) +{ + Fby35State *s = FBY35(machine); + + fby35_bmc_init(s); + fby35_bic_init(s); +} + + +static bool fby35_get_mmio_exec(Object *obj, Error **errp) +{ + return FBY35(obj)->mmio_exec; +} + +static void fby35_set_mmio_exec(Object *obj, bool value, Error **errp) +{ + FBY35(obj)->mmio_exec = value; +} + +static void fby35_instance_init(Object *obj) +{ + FBY35(obj)->mmio_exec = false; +} + +static void fby35_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Meta Platforms fby35"; + mc->init = fby35_init; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->min_cpus = mc->max_cpus = mc->default_cpus = 3; + + object_class_property_add_bool(oc, "execute-in-place", + fby35_get_mmio_exec, + fby35_set_mmio_exec); + object_class_property_set_description(oc, "execute-in-place", + "boot directly from CE0 flash device"); +} + +static const TypeInfo fby35_types[] = { + { + .name = MACHINE_TYPE_NAME("fby35"), + .parent = TYPE_MACHINE, + .class_init = fby35_class_init, + .instance_size = sizeof(Fby35State), + .instance_init = fby35_instance_init, + }, +}; + +DEFINE_TYPES(fby35_types); diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c index e0128d7316..f189712329 100644 --- a/hw/arm/fsl-imx6ul.c +++ b/hw/arm/fsl-imx6ul.c @@ -166,8 +166,6 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) return; } - object_property_set_int(OBJECT(&s->cpu), "psci-conduit", - QEMU_PSCI_CONDUIT_SMC, &error_abort); qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); /* @@ -534,6 +532,13 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) */ create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000); + /* + * SAI (Audio SSI (Synchronous Serial Interface)) + */ + create_unimplemented_device("sai1", FSL_IMX6UL_SAI1_ADDR, 0x4000); + create_unimplemented_device("sai2", FSL_IMX6UL_SAI2_ADDR, 0x4000); + create_unimplemented_device("sai3", FSL_IMX6UL_SAI3_ADDR, 0x4000); + /* * PWM */ @@ -542,6 +547,11 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000); create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000); + /* + * Audio ASRC (asynchronous sample rate converter) + */ + create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, 0x4000); + /* * CAN */ diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c index 2ff2cab924..cc6fdb9373 100644 --- a/hw/arm/fsl-imx7.c +++ b/hw/arm/fsl-imx7.c @@ -159,9 +159,6 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) for (i = 0; i < smp_cpus; i++) { o = OBJECT(&s->cpu[i]); - object_property_set_int(o, "psci-conduit", QEMU_PSCI_CONDUIT_SMC, - &error_abort); - /* On uniprocessor, the CBAR is set to 0 */ if (smp_cpus > 1) { object_property_set_int(o, "reset-cbar", FSL_IMX7_A7MPCORE_ADDR, @@ -169,7 +166,10 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) } if (i) { - /* Secondary CPUs start in PSCI powered-down state */ + /* + * Secondary CPUs start in powered-down state (and can be + * powered up via the SRC system reset controller) + */ object_property_set_bool(o, "start-powered-off", true, &error_abort); } @@ -467,6 +467,13 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) create_unimplemented_device("can1", FSL_IMX7_CAN1_ADDR, FSL_IMX7_CANn_SIZE); create_unimplemented_device("can2", FSL_IMX7_CAN2_ADDR, FSL_IMX7_CANn_SIZE); + /* + * SAI (Audio SSI (Synchronous Serial Interface)) + */ + create_unimplemented_device("sai1", FSL_IMX7_SAI1_ADDR, FSL_IMX7_SAIn_SIZE); + create_unimplemented_device("sai2", FSL_IMX7_SAI2_ADDR, FSL_IMX7_SAIn_SIZE); + create_unimplemented_device("sai2", FSL_IMX7_SAI3_ADDR, FSL_IMX7_SAIn_SIZE); + /* * OCOTP */ diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index c3cb315dbc..f12aacea6b 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "hw/sysbus.h" @@ -49,66 +48,6 @@ /* Board init. */ -static void hb_write_board_setup(ARMCPU *cpu, - const struct arm_boot_info *info) -{ - arm_write_secure_board_setup_dummy_smc(cpu, info, MVBAR_ADDR); -} - -static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info) -{ - int n; - uint32_t smpboot[] = { - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 - read current core id */ - 0xe210000f, /* ands r0, r0, #0x0f */ - 0xe3a03040, /* mov r3, #0x40 - jump address is 0x40 + 0x10 * core id */ - 0xe0830200, /* add r0, r3, r0, lsl #4 */ - 0xe59f2024, /* ldr r2, privbase */ - 0xe3a01001, /* mov r1, #1 */ - 0xe5821100, /* str r1, [r2, #256] - set GICC_CTLR.Enable */ - 0xe3a010ff, /* mov r1, #0xff */ - 0xe5821104, /* str r1, [r2, #260] - set GICC_PMR.Priority to 0xff */ - 0xf57ff04f, /* dsb */ - 0xe320f003, /* wfi */ - 0xe5901000, /* ldr r1, [r0] */ - 0xe1110001, /* tst r1, r1 */ - 0x0afffffb, /* beq */ - 0xe12fff11, /* bx r1 */ - MPCORE_PERIPHBASE /* privbase: MPCore peripheral base address. */ - }; - for (n = 0; n < ARRAY_SIZE(smpboot); n++) { - smpboot[n] = tswap32(smpboot[n]); - } - rom_add_blob_fixed_as("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR, - arm_boot_address_space(cpu, info)); -} - -static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) -{ - CPUARMState *env = &cpu->env; - - switch (info->nb_cpus) { - case 4: - address_space_stl_notdirty(&address_space_memory, - SMP_BOOT_REG + 0x30, 0, - MEMTXATTRS_UNSPECIFIED, NULL); - /* fallthrough */ - case 3: - address_space_stl_notdirty(&address_space_memory, - SMP_BOOT_REG + 0x20, 0, - MEMTXATTRS_UNSPECIFIED, NULL); - /* fallthrough */ - case 2: - address_space_stl_notdirty(&address_space_memory, - SMP_BOOT_REG + 0x10, 0, - MEMTXATTRS_UNSPECIFIED, NULL); - env->regs[15] = SMP_BOOT_ADDR; - break; - default: - break; - } -} - #define NUM_REGS 0x200 static void hb_regs_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) @@ -272,12 +211,6 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) object_property_set_int(cpuobj, "psci-conduit", QEMU_PSCI_CONDUIT_SMC, &error_abort); - if (n) { - /* Secondary CPUs start in PSCI powered-down state */ - object_property_set_bool(cpuobj, "start-powered-off", true, - &error_abort); - } - if (object_property_find(cpuobj, "reset-cbar")) { object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE, &error_abort); @@ -391,13 +324,9 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) * clear that the value is meaningless. */ highbank_binfo.board_id = -1; - highbank_binfo.nb_cpus = smp_cpus; highbank_binfo.loader_start = 0; - highbank_binfo.write_secondary_boot = hb_write_secondary; - highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR; - highbank_binfo.write_board_setup = hb_write_board_setup; - highbank_binfo.secure_board_setup = true; + highbank_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; arm_load_kernel(ARM_CPU(first_cpu), machine, &highbank_binfo); } diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c index bd16acd4d9..b4f7f4e8a7 100644 --- a/hw/arm/imx25_pdk.c +++ b/hw/arm/imx25_pdk.c @@ -114,8 +114,7 @@ static void imx25_pdk_init(MachineState *machine) imx25_pdk_binfo.ram_size = machine->ram_size; imx25_pdk_binfo.loader_start = FSL_IMX25_SDRAM0_ADDR; - imx25_pdk_binfo.board_id = 1771, - imx25_pdk_binfo.nb_cpus = 1; + imx25_pdk_binfo.board_id = 1771; for (i = 0; i < FSL_IMX25_NUM_ESDHCS; i++) { BusState *bus; @@ -123,7 +122,7 @@ static void imx25_pdk_init(MachineState *machine) DriveInfo *di; BlockBackend *blk; - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, i); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(&s->soc.esdhc[i]), "sd-bus"); carddev = qdev_new(TYPE_SD_CARD); diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c index 16e8985953..b109ece3ae 100644 --- a/hw/arm/integratorcp.c +++ b/hw/arm/integratorcp.c @@ -649,7 +649,7 @@ static void integratorcp_init(MachineState *machine) qdev_get_gpio_in_named(icp, ICP_GPIO_MMC_WPROT, 0)); qdev_connect_gpio_out_named(dev, "card-inserted", 0, qdev_get_gpio_in_named(icp, ICP_GPIO_MMC_CARDIN, 0)); - dinfo = drive_get_next(IF_SD); + dinfo = drive_get(IF_SD, 0, 0); if (dinfo) { DeviceState *card; diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c index 39559c44c2..b1b281c9ac 100644 --- a/hw/arm/kzm.c +++ b/hw/arm/kzm.c @@ -124,7 +124,6 @@ static void kzm_init(MachineState *machine) } kzm_binfo.ram_size = machine->ram_size; - kzm_binfo.nb_cpus = 1; if (!qtest_enabled()) { arm_load_kernel(&s->soc.cpu, machine, &kzm_binfo); diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c index 77fae874b1..d83c3c380e 100644 --- a/hw/arm/mcimx6ul-evk.c +++ b/hw/arm/mcimx6ul-evk.c @@ -34,7 +34,7 @@ static void mcimx6ul_evk_init(MachineState *machine) .loader_start = FSL_IMX6UL_MMDC_ADDR, .board_id = -1, .ram_size = machine->ram_size, - .nb_cpus = machine->smp.cpus, + .psci_conduit = QEMU_PSCI_CONDUIT_SMC, }; s = FSL_IMX6UL(object_new(TYPE_FSL_IMX6UL)); @@ -52,7 +52,7 @@ static void mcimx6ul_evk_init(MachineState *machine) DriveInfo *di; BlockBackend *blk; - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, i); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus"); carddev = qdev_new(TYPE_SD_CARD); diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c index 935d4b0f1c..6182b15f19 100644 --- a/hw/arm/mcimx7d-sabre.c +++ b/hw/arm/mcimx7d-sabre.c @@ -36,7 +36,7 @@ static void mcimx7d_sabre_init(MachineState *machine) .loader_start = FSL_IMX7_MMDC_ADDR, .board_id = -1, .ram_size = machine->ram_size, - .nb_cpus = machine->smp.cpus, + .psci_conduit = QEMU_PSCI_CONDUIT_SMC, }; s = FSL_IMX7(object_new(TYPE_FSL_IMX7)); @@ -52,7 +52,7 @@ static void mcimx7d_sabre_init(MachineState *machine) DriveInfo *di; BlockBackend *blk; - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, i); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus"); carddev = qdev_new(TYPE_SD_CARD); diff --git a/hw/arm/meson.build b/hw/arm/meson.build index 721a8eb8be..92f9f6e000 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -1,6 +1,5 @@ arm_ss = ss.source_set() arm_ss.add(files('boot.c'), fdt) -arm_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) arm_ss.add(when: 'CONFIG_ARM_VIRT', if_true: files('virt.c')) arm_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c')) @@ -48,7 +47,12 @@ arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-ver arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c')) arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c')) -arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_soc.c', 'aspeed.c', 'aspeed_ast2600.c')) +arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( + 'aspeed_soc.c', + 'aspeed.c', + 'aspeed_ast2600.c', + 'aspeed_ast10x0.c', + 'fby35.c')) arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c')) arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c')) arm_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c')) diff --git a/hw/arm/microbit.c b/hw/arm/microbit.c index e9494334ce..50df362088 100644 --- a/hw/arm/microbit.c +++ b/hw/arm/microbit.c @@ -57,7 +57,7 @@ static void microbit_init(MachineState *machine) mr, -1); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - s->nrf51.flash_size); + 0, s->nrf51.flash_size); } static void microbit_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index e23830f4b7..284c09c91d 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -32,7 +32,7 @@ * Application Note AN524: * https://developer.arm.com/documentation/dai0524/latest/ * Application Note AN547: - * https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf + * https://developer.arm.com/documentation/dai0547/latest/ * * The AN505 defers to the Cortex-M33 processor ARMv8M IoT Kit FVP User Guide * (ARM ECM0601256) for the details of some of the device layout: @@ -373,6 +373,11 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno) } } +/* Union describing the device-specific extra data we pass to the devfn. */ +typedef union PPCExtraData { + bool i2c_internal; +} PPCExtraData; + /* Most of the devices in the AN505 FPGA image sit behind * Peripheral Protection Controllers. These data structures * define the layout of which devices sit behind which PPCs. @@ -382,7 +387,8 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno) */ typedef MemoryRegion *MakeDevFn(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs); + const int *irqs, + const PPCExtraData *extradata); typedef struct PPCPortInfo { const char *name; @@ -391,6 +397,7 @@ typedef struct PPCPortInfo { hwaddr addr; hwaddr size; int irqs[3]; /* currently no device needs more IRQ lines than this */ + PPCExtraData extradata; /* to pass device-specific info to the devfn */ } PPCPortInfo; typedef struct PPCInfo { @@ -401,7 +408,8 @@ typedef struct PPCInfo { static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, + const PPCExtraData *extradata) { /* Initialize, configure and realize a TYPE_UNIMPLEMENTED_DEVICE, * and return a pointer to its MemoryRegion. @@ -417,7 +425,7 @@ static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms, static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { /* The irq[] array is tx, rx, combined, in that order */ MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); @@ -441,7 +449,7 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { MPS2SCC *scc = opaque; DeviceState *sccdev; @@ -465,7 +473,7 @@ static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { MPS2FPGAIO *fpgaio = opaque; MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); @@ -480,7 +488,8 @@ static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, + const PPCExtraData *extradata) { SysBusDevice *s; NICInfo *nd = &nd_table[0]; @@ -500,7 +509,8 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, + const PPCExtraData *extradata) { /* * The AN524 makes the ethernet and USB share a PPC port. @@ -543,7 +553,7 @@ static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { TZMPC *mpc = opaque; int i = mpc - &mms->mpc[0]; @@ -615,7 +625,7 @@ static void remap_irq_fn(void *opaque, int n, int level) static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { /* The irq[] array is DMACINTR, DMACINTERR, DMACINTTC, in that order */ PL080State *dma = opaque; @@ -672,7 +682,7 @@ static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { /* * The AN505 has five PL022 SPI controllers. @@ -694,7 +704,7 @@ static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque, static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { ArmSbconI2CState *i2c = opaque; SysBusDevice *s; @@ -702,12 +712,26 @@ static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque, object_initialize_child(OBJECT(mms), name, i2c, TYPE_ARM_SBCON_I2C); s = SYS_BUS_DEVICE(i2c); sysbus_realize(s, &error_fatal); + + /* + * If this is an internal-use-only i2c bus, mark it full + * so that user-created i2c devices are not plugged into it. + * If we implement models of any on-board i2c devices that + * plug in to one of the internal-use-only buses, then we will + * need to create and plugging those in here before we mark the + * bus as full. + */ + if (extradata->i2c_internal) { + BusState *qbus = qdev_get_child_bus(DEVICE(i2c), "i2c"); + qbus_mark_full(qbus); + } + return sysbus_mmio_get_region(s, 0); } static MemoryRegion *make_rtc(MPS2TZMachineState *mms, void *opaque, const char *name, hwaddr size, - const int *irqs) + const int *irqs, const PPCExtraData *extradata) { PL031State *pl031 = opaque; SysBusDevice *s; @@ -912,10 +936,14 @@ static void mps2tz_common_init(MachineState *machine) { "uart2", make_uart, &mms->uart[2], 0x40202000, 0x1000, { 36, 37, 44 } }, { "uart3", make_uart, &mms->uart[3], 0x40203000, 0x1000, { 38, 39, 45 } }, { "uart4", make_uart, &mms->uart[4], 0x40204000, 0x1000, { 40, 41, 46 } }, - { "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000 }, - { "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000 }, - { "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000 }, - { "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000 }, + { "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, + { "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, }, }, { .name = "apb_ppcexp2", @@ -956,15 +984,20 @@ static void mps2tz_common_init(MachineState *machine) }, { .name = "apb_ppcexp1", .ports = { - { "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000 }, - { "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000 }, + { "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, { "spi0", make_spi, &mms->spi[0], 0x41202000, 0x1000, { 52 } }, { "spi1", make_spi, &mms->spi[1], 0x41203000, 0x1000, { 53 } }, { "spi2", make_spi, &mms->spi[2], 0x41204000, 0x1000, { 54 } }, - { "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000 }, - { "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000 }, + { "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, { /* port 7 reserved */ }, - { "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000 }, + { "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000, {}, + { .i2c_internal = true /* DDR4 EEPROM */ } }, }, }, { .name = "apb_ppcexp2", @@ -1006,15 +1039,20 @@ static void mps2tz_common_init(MachineState *machine) }, { .name = "apb_ppcexp1", .ports = { - { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000 }, - { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000 }, + { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, { "spi0", make_spi, &mms->spi[0], 0x49202000, 0x1000, { 53 } }, { "spi1", make_spi, &mms->spi[1], 0x49203000, 0x1000, { 54 } }, { "spi2", make_spi, &mms->spi[2], 0x49204000, 0x1000, { 55 } }, - { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000 }, - { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000 }, + { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, { /* port 7 reserved */ }, - { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000 }, + { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000, {}, + { .i2c_internal = true /* DDR4 EEPROM */ } }, }, }, { .name = "apb_ppcexp2", @@ -1040,6 +1078,10 @@ static void mps2tz_common_init(MachineState *machine) { "gpio1", make_unimp_dev, &mms->gpio[1], 0x41101000, 0x1000 }, { "gpio2", make_unimp_dev, &mms->gpio[2], 0x41102000, 0x1000 }, { "gpio3", make_unimp_dev, &mms->gpio[3], 0x41103000, 0x1000 }, + { /* port 4 USER AHB interface 0 */ }, + { /* port 5 USER AHB interface 1 */ }, + { /* port 6 USER AHB interface 2 */ }, + { /* port 7 USER AHB interface 3 */ }, { "eth-usb", make_eth_usb, NULL, 0x41400000, 0x200000, { 49 } }, }, }, @@ -1084,7 +1126,7 @@ static void mps2tz_common_init(MachineState *machine) } mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size, - pinfo->irqs); + pinfo->irqs, &pinfo->extradata); portname = g_strdup_printf("port[%d]", port); object_property_set_link(OBJECT(ppc), portname, OBJECT(mr), &error_fatal); @@ -1155,7 +1197,7 @@ static void mps2tz_common_init(MachineState *machine) } armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - boot_ram_size(mms)); + 0, boot_ram_size(mms)); } static void mps2_tz_idau_check(IDAUInterface *ii, uint32_t address, @@ -1197,7 +1239,7 @@ static void mps2_set_remap(Object *obj, const char *value, Error **errp) } } -static void mps2_machine_reset(MachineState *machine) +static void mps2_machine_reset(MachineState *machine, ShutdownCause reason) { MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine); @@ -1207,7 +1249,7 @@ static void mps2_machine_reset(MachineState *machine) * reset see the correct mapping. */ remap_memory(mms, mms->remap); - qemu_devices_reset(); + qemu_devices_reset(reason); } static void mps2tz_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index 81413b7133..a86a994dba 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -86,6 +86,7 @@ struct MPS2MachineState { CMSDKAPBWatchdog watchdog; CMSDKAPBTimer timer[2]; Clock *sysclk; + Clock *refclk; }; #define TYPE_MPS2_MACHINE "mps2" @@ -99,6 +100,15 @@ OBJECT_DECLARE_TYPE(MPS2MachineState, MPS2MachineClass, MPS2_MACHINE) /* Main SYSCLK frequency in Hz */ #define SYSCLK_FRQ 25000000 +/* + * The Application Notes don't say anything about how the + * systick reference clock is configured. (Quite possibly + * they don't have one at all.) This 1MHz clock matches the + * pre-existing behaviour that used to be hardcoded in the + * armv7m_systick implementation. + */ +#define REFCLK_FRQ (1 * 1000 * 1000) + /* Initialize the auxiliary RAM region @mr and map it into * the memory map at @base. */ @@ -146,6 +156,9 @@ static void mps2_common_init(MachineState *machine) mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); clock_set_hz(mms->sysclk, SYSCLK_FRQ); + mms->refclk = clock_new(OBJECT(machine), "REFCLK"); + clock_set_hz(mms->refclk, REFCLK_FRQ); + /* The FPGA images have an odd combination of different RAMs, * because in hardware they are different implementations and * connected to different buses, giving varying performance/size @@ -223,6 +236,8 @@ static void mps2_common_init(MachineState *machine) default: g_assert_not_reached(); } + qdev_connect_clock_in(armv7m, "cpuclk", mms->sysclk); + qdev_connect_clock_in(armv7m, "refclk", mms->refclk); qdev_prop_set_string(armv7m, "cpu-type", machine->cpu_type); qdev_prop_set_bit(armv7m, "enable-bitband", true); object_property_set_link(OBJECT(&mms->armv7m), "memory", @@ -413,7 +428,17 @@ static void mps2_common_init(MachineState *machine) 0x40023000, /* Audio */ 0x40029000, /* Shield0 */ 0x4002a000}; /* Shield1 */ - sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL); + DeviceState *dev; + + dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL); + if (i < 2) { + /* + * internal-only bus: mark it full to avoid user-created + * i2c devices being plugged into it. + */ + BusState *qbus = qdev_get_child_bus(dev, "i2c"); + qbus_mark_full(qbus); + } } create_unimplemented_device("i2s", 0x40024000, 0x400); @@ -424,10 +449,8 @@ static void mps2_common_init(MachineState *machine) qdev_get_gpio_in(armv7m, mmc->fpga_type == FPGA_AN511 ? 47 : 13)); - system_clock_scale = NANOSECONDS_PER_SECOND / SYSCLK_FRQ; - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - 0x400000); + 0, 0x400000); } static void mps2_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c index 5cfe7caf83..b5fe9f364d 100644 --- a/hw/arm/msf2-soc.c +++ b/hw/arm/msf2-soc.c @@ -29,6 +29,7 @@ #include "hw/char/serial.h" #include "hw/arm/msf2-soc.h" #include "hw/misc/unimp.h" +#include "hw/qdev-clock.h" #include "sysemu/sysemu.h" #define MSF2_TIMER_BASE 0x40004000 @@ -73,6 +74,9 @@ static void m2sxxx_soc_initfn(Object *obj) } object_initialize_child(obj, "emac", &s->emac, TYPE_MSS_EMAC); + + s->m3clk = qdev_init_clock_in(DEVICE(obj), "m3clk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", NULL, NULL, 0); } static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) @@ -83,11 +87,34 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) int i; MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *nvm = g_new(MemoryRegion, 1); - MemoryRegion *nvm_alias = g_new(MemoryRegion, 1); - MemoryRegion *sram = g_new(MemoryRegion, 1); - memory_region_init_rom(nvm, OBJECT(dev_soc), "MSF2.eNVM", s->envm_size, + if (!clock_has_source(s->m3clk)) { + error_setg(errp, "m3clk must be wired up by the board code"); + return; + } + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk must not be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC SYSTICK_CR register at 0xe0042038, + * which allows the guest to program the divisor between the m3clk and + * the systick refclk to either /4, /8, /16 or /32, as well as setting + * the value the guest can read in the STCALIB register. Currently we + * implement the divisor as a fixed /32, which matches the reset value + * of SYSTICK_CR. + */ + clock_set_mul_div(s->refclk, 32, 1); + clock_set_source(s->refclk, s->m3clk); + + memory_region_init_rom(&s->nvm, OBJECT(dev_soc), "MSF2.eNVM", s->envm_size, &error_fatal); /* * On power-on, the eNVM region 0x60000000 is automatically @@ -95,34 +122,28 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) * start address (0x0). We do not support remapping other eNVM, * eSRAM and DDR regions by guest(via Sysreg) currently. */ - memory_region_init_alias(nvm_alias, OBJECT(dev_soc), "MSF2.eNVM", nvm, 0, - s->envm_size); + memory_region_init_alias(&s->nvm_alias, OBJECT(dev_soc), "MSF2.eNVM", + &s->nvm, 0, s->envm_size); - memory_region_add_subregion(system_memory, ENVM_BASE_ADDRESS, nvm); - memory_region_add_subregion(system_memory, 0, nvm_alias); + memory_region_add_subregion(system_memory, ENVM_BASE_ADDRESS, &s->nvm); + memory_region_add_subregion(system_memory, 0, &s->nvm_alias); - memory_region_init_ram(sram, NULL, "MSF2.eSRAM", s->esram_size, + memory_region_init_ram(&s->sram, NULL, "MSF2.eSRAM", s->esram_size, &error_fatal); - memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, sram); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 81); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->m3clk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(get_system_memory()), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { return; } - if (!s->m3clk) { - error_setg(errp, "Invalid m3clk value"); - error_append_hint(errp, "m3clk can not be zero\n"); - return; - } - - system_clock_scale = NANOSECONDS_PER_SECOND / s->m3clk; - for (i = 0; i < MSF2_NUM_UARTS; i++) { if (serial_hd(i)) { serial_mm_init(get_system_memory(), uart_addr[i], 2, @@ -132,8 +153,13 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) } dev = DEVICE(&s->timer); - /* APB0 clock is the timer input clock */ - qdev_prop_set_uint32(dev, "clock-frequency", s->m3clk / s->apb0div); + /* + * APB0 clock is the timer input clock. + * TODO: ideally the MSF2 timer device should use a Clock rather than a + * clock-frequency integer property. + */ + qdev_prop_set_uint32(dev, "clock-frequency", + clock_get_hz(s->m3clk) / s->apb0div); if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer), errp)) { return; } @@ -210,8 +236,6 @@ static Property m2sxxx_soc_properties[] = { DEFINE_PROP_UINT64("eNVM-size", MSF2State, envm_size, MSF2_ENVM_MAX_SIZE), DEFINE_PROP_UINT64("eSRAM-size", MSF2State, esram_size, MSF2_ESRAM_MAX_SIZE), - /* Libero GUI shows 100Mhz as default for clocks */ - DEFINE_PROP_UINT32("m3clk", MSF2State, m3clk, 100 * 1000000), /* default divisors in Libero GUI */ DEFINE_PROP_UINT8("apb0div", MSF2State, apb0div, 2), DEFINE_PROP_UINT8("apb1div", MSF2State, apb1div, 2), diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index 343ec977c0..a6df473ec9 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -29,6 +29,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/arm/boot.h" +#include "hw/qdev-clock.h" #include "exec/address-spaces.h" #include "hw/arm/msf2-soc.h" @@ -44,11 +45,12 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) DeviceState *spi_flash; MSF2State *soc; MachineClass *mc = MACHINE_GET_CLASS(machine); - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, 0); qemu_irq cs_line; BusState *spi_bus; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ddr = g_new(MemoryRegion, 1); + Clock *m3clk; if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { error_report("This board can only be used with CPU %s", @@ -72,7 +74,10 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) * in Libero. CPU clock is divided by APB0 and APB1 divisors for * peripherals. Emcraft's SoM kit comes with these settings by default. */ - qdev_prop_set_uint32(dev, "m3clk", 142 * 1000000); + /* This clock doesn't need migration because it is fixed-frequency */ + m3clk = clock_new(OBJECT(machine), "m3clk"); + clock_set_hz(m3clk, 142 * 1000000); + qdev_connect_clock_in(dev, "m3clk", m3clk); qdev_prop_set_uint32(dev, "apb0div", 2); qdev_prop_set_uint32(dev, "apb1div", 2); @@ -93,7 +98,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - soc->envm_size); + 0, soc->envm_size); } static void emcraft_sf2_machine_init(MachineClass *mc) diff --git a/hw/arm/musca.c b/hw/arm/musca.c index 7a83f7dda7..6eeee57c9d 100644 --- a/hw/arm/musca.c +++ b/hw/arm/musca.c @@ -597,7 +597,8 @@ static void musca_init(MachineState *machine) "cfg_sec_resp", 0)); } - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x2000000); + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + 0, 0x2000000); } static void musca_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 2d612cc0c9..b65c020115 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -34,12 +34,12 @@ #include "ui/pixel_ops.h" #include "qemu/cutils.h" #include "qom/object.h" +#include "hw/net/mv88w8618_eth.h" #define MP_MISC_BASE 0x80002000 #define MP_MISC_SIZE 0x00001000 #define MP_ETH_BASE 0x80008000 -#define MP_ETH_SIZE 0x00001000 #define MP_WLAN_BASE 0x8000C000 #define MP_WLAN_SIZE 0x00000800 @@ -84,383 +84,6 @@ /* Wolfson 8750 I2C address */ #define MP_WM_ADDR 0x1A -/* Ethernet register offsets */ -#define MP_ETH_SMIR 0x010 -#define MP_ETH_PCXR 0x408 -#define MP_ETH_SDCMR 0x448 -#define MP_ETH_ICR 0x450 -#define MP_ETH_IMR 0x458 -#define MP_ETH_FRDP0 0x480 -#define MP_ETH_FRDP1 0x484 -#define MP_ETH_FRDP2 0x488 -#define MP_ETH_FRDP3 0x48C -#define MP_ETH_CRDP0 0x4A0 -#define MP_ETH_CRDP1 0x4A4 -#define MP_ETH_CRDP2 0x4A8 -#define MP_ETH_CRDP3 0x4AC -#define MP_ETH_CTDP0 0x4E0 -#define MP_ETH_CTDP1 0x4E4 - -/* MII PHY access */ -#define MP_ETH_SMIR_DATA 0x0000FFFF -#define MP_ETH_SMIR_ADDR 0x03FF0000 -#define MP_ETH_SMIR_OPCODE (1 << 26) /* Read value */ -#define MP_ETH_SMIR_RDVALID (1 << 27) - -/* PHY registers */ -#define MP_ETH_PHY1_BMSR 0x00210000 -#define MP_ETH_PHY1_PHYSID1 0x00410000 -#define MP_ETH_PHY1_PHYSID2 0x00610000 - -#define MP_PHY_BMSR_LINK 0x0004 -#define MP_PHY_BMSR_AUTONEG 0x0008 - -#define MP_PHY_88E3015 0x01410E20 - -/* TX descriptor status */ -#define MP_ETH_TX_OWN (1U << 31) - -/* RX descriptor status */ -#define MP_ETH_RX_OWN (1U << 31) - -/* Interrupt cause/mask bits */ -#define MP_ETH_IRQ_RX_BIT 0 -#define MP_ETH_IRQ_RX (1 << MP_ETH_IRQ_RX_BIT) -#define MP_ETH_IRQ_TXHI_BIT 2 -#define MP_ETH_IRQ_TXLO_BIT 3 - -/* Port config bits */ -#define MP_ETH_PCXR_2BSM_BIT 28 /* 2-byte incoming suffix */ - -/* SDMA command bits */ -#define MP_ETH_CMD_TXHI (1 << 23) -#define MP_ETH_CMD_TXLO (1 << 22) - -typedef struct mv88w8618_tx_desc { - uint32_t cmdstat; - uint16_t res; - uint16_t bytes; - uint32_t buffer; - uint32_t next; -} mv88w8618_tx_desc; - -typedef struct mv88w8618_rx_desc { - uint32_t cmdstat; - uint16_t bytes; - uint16_t buffer_size; - uint32_t buffer; - uint32_t next; -} mv88w8618_rx_desc; - -#define TYPE_MV88W8618_ETH "mv88w8618_eth" -OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_eth_state, MV88W8618_ETH) - -struct mv88w8618_eth_state { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - qemu_irq irq; - MemoryRegion *dma_mr; - AddressSpace dma_as; - uint32_t smir; - uint32_t icr; - uint32_t imr; - int mmio_index; - uint32_t vlan_header; - uint32_t tx_queue[2]; - uint32_t rx_queue[4]; - uint32_t frx_queue[4]; - uint32_t cur_rx[4]; - NICState *nic; - NICConf conf; -}; - -static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr, - mv88w8618_rx_desc *desc) -{ - cpu_to_le32s(&desc->cmdstat); - cpu_to_le16s(&desc->bytes); - cpu_to_le16s(&desc->buffer_size); - cpu_to_le32s(&desc->buffer); - cpu_to_le32s(&desc->next); - dma_memory_write(dma_as, addr, desc, sizeof(*desc)); -} - -static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr, - mv88w8618_rx_desc *desc) -{ - dma_memory_read(dma_as, addr, desc, sizeof(*desc)); - le32_to_cpus(&desc->cmdstat); - le16_to_cpus(&desc->bytes); - le16_to_cpus(&desc->buffer_size); - le32_to_cpus(&desc->buffer); - le32_to_cpus(&desc->next); -} - -static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) -{ - mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); - uint32_t desc_addr; - mv88w8618_rx_desc desc; - int i; - - for (i = 0; i < 4; i++) { - desc_addr = s->cur_rx[i]; - if (!desc_addr) { - continue; - } - do { - eth_rx_desc_get(&s->dma_as, desc_addr, &desc); - if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) { - dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header, - buf, size); - desc.bytes = size + s->vlan_header; - desc.cmdstat &= ~MP_ETH_RX_OWN; - s->cur_rx[i] = desc.next; - - s->icr |= MP_ETH_IRQ_RX; - if (s->icr & s->imr) { - qemu_irq_raise(s->irq); - } - eth_rx_desc_put(&s->dma_as, desc_addr, &desc); - return size; - } - desc_addr = desc.next; - } while (desc_addr != s->rx_queue[i]); - } - return size; -} - -static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr, - mv88w8618_tx_desc *desc) -{ - cpu_to_le32s(&desc->cmdstat); - cpu_to_le16s(&desc->res); - cpu_to_le16s(&desc->bytes); - cpu_to_le32s(&desc->buffer); - cpu_to_le32s(&desc->next); - dma_memory_write(dma_as, addr, desc, sizeof(*desc)); -} - -static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr, - mv88w8618_tx_desc *desc) -{ - dma_memory_read(dma_as, addr, desc, sizeof(*desc)); - le32_to_cpus(&desc->cmdstat); - le16_to_cpus(&desc->res); - le16_to_cpus(&desc->bytes); - le32_to_cpus(&desc->buffer); - le32_to_cpus(&desc->next); -} - -static void eth_send(mv88w8618_eth_state *s, int queue_index) -{ - uint32_t desc_addr = s->tx_queue[queue_index]; - mv88w8618_tx_desc desc; - uint32_t next_desc; - uint8_t buf[2048]; - int len; - - do { - eth_tx_desc_get(&s->dma_as, desc_addr, &desc); - next_desc = desc.next; - if (desc.cmdstat & MP_ETH_TX_OWN) { - len = desc.bytes; - if (len < 2048) { - dma_memory_read(&s->dma_as, desc.buffer, buf, len); - qemu_send_packet(qemu_get_queue(s->nic), buf, len); - } - desc.cmdstat &= ~MP_ETH_TX_OWN; - s->icr |= 1 << (MP_ETH_IRQ_TXLO_BIT - queue_index); - eth_tx_desc_put(&s->dma_as, desc_addr, &desc); - } - desc_addr = next_desc; - } while (desc_addr != s->tx_queue[queue_index]); -} - -static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset, - unsigned size) -{ - mv88w8618_eth_state *s = opaque; - - switch (offset) { - case MP_ETH_SMIR: - if (s->smir & MP_ETH_SMIR_OPCODE) { - switch (s->smir & MP_ETH_SMIR_ADDR) { - case MP_ETH_PHY1_BMSR: - return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG | - MP_ETH_SMIR_RDVALID; - case MP_ETH_PHY1_PHYSID1: - return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID; - case MP_ETH_PHY1_PHYSID2: - return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID; - default: - return MP_ETH_SMIR_RDVALID; - } - } - return 0; - - case MP_ETH_ICR: - return s->icr; - - case MP_ETH_IMR: - return s->imr; - - case MP_ETH_FRDP0 ... MP_ETH_FRDP3: - return s->frx_queue[(offset - MP_ETH_FRDP0)/4]; - - case MP_ETH_CRDP0 ... MP_ETH_CRDP3: - return s->rx_queue[(offset - MP_ETH_CRDP0)/4]; - - case MP_ETH_CTDP0 ... MP_ETH_CTDP1: - return s->tx_queue[(offset - MP_ETH_CTDP0)/4]; - - default: - return 0; - } -} - -static void mv88w8618_eth_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - mv88w8618_eth_state *s = opaque; - - switch (offset) { - case MP_ETH_SMIR: - s->smir = value; - break; - - case MP_ETH_PCXR: - s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2; - break; - - case MP_ETH_SDCMR: - if (value & MP_ETH_CMD_TXHI) { - eth_send(s, 1); - } - if (value & MP_ETH_CMD_TXLO) { - eth_send(s, 0); - } - if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) { - qemu_irq_raise(s->irq); - } - break; - - case MP_ETH_ICR: - s->icr &= value; - break; - - case MP_ETH_IMR: - s->imr = value; - if (s->icr & s->imr) { - qemu_irq_raise(s->irq); - } - break; - - case MP_ETH_FRDP0 ... MP_ETH_FRDP3: - s->frx_queue[(offset - MP_ETH_FRDP0)/4] = value; - break; - - case MP_ETH_CRDP0 ... MP_ETH_CRDP3: - s->rx_queue[(offset - MP_ETH_CRDP0)/4] = - s->cur_rx[(offset - MP_ETH_CRDP0)/4] = value; - break; - - case MP_ETH_CTDP0 ... MP_ETH_CTDP1: - s->tx_queue[(offset - MP_ETH_CTDP0)/4] = value; - break; - } -} - -static const MemoryRegionOps mv88w8618_eth_ops = { - .read = mv88w8618_eth_read, - .write = mv88w8618_eth_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void eth_cleanup(NetClientState *nc) -{ - mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); - - s->nic = NULL; -} - -static NetClientInfo net_mv88w8618_info = { - .type = NET_CLIENT_DRIVER_NIC, - .size = sizeof(NICState), - .receive = eth_receive, - .cleanup = eth_cleanup, -}; - -static void mv88w8618_eth_init(Object *obj) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - DeviceState *dev = DEVICE(sbd); - mv88w8618_eth_state *s = MV88W8618_ETH(dev); - - sysbus_init_irq(sbd, &s->irq); - memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s, - "mv88w8618-eth", MP_ETH_SIZE); - sysbus_init_mmio(sbd, &s->iomem); -} - -static void mv88w8618_eth_realize(DeviceState *dev, Error **errp) -{ - mv88w8618_eth_state *s = MV88W8618_ETH(dev); - - if (!s->dma_mr) { - error_setg(errp, TYPE_MV88W8618_ETH " 'dma-memory' link not set"); - return; - } - - address_space_init(&s->dma_as, s->dma_mr, "emac-dma"); - s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); -} - -static const VMStateDescription mv88w8618_eth_vmsd = { - .name = "mv88w8618_eth", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT32(smir, mv88w8618_eth_state), - VMSTATE_UINT32(icr, mv88w8618_eth_state), - VMSTATE_UINT32(imr, mv88w8618_eth_state), - VMSTATE_UINT32(vlan_header, mv88w8618_eth_state), - VMSTATE_UINT32_ARRAY(tx_queue, mv88w8618_eth_state, 2), - VMSTATE_UINT32_ARRAY(rx_queue, mv88w8618_eth_state, 4), - VMSTATE_UINT32_ARRAY(frx_queue, mv88w8618_eth_state, 4), - VMSTATE_UINT32_ARRAY(cur_rx, mv88w8618_eth_state, 4), - VMSTATE_END_OF_LIST() - } -}; - -static Property mv88w8618_eth_properties[] = { - DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf), - DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr, - TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), -}; - -static void mv88w8618_eth_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &mv88w8618_eth_vmsd; - device_class_set_props(dc, mv88w8618_eth_properties); - dc->realize = mv88w8618_eth_realize; -} - -static const TypeInfo mv88w8618_eth_info = { - .name = TYPE_MV88W8618_ETH, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(mv88w8618_eth_state), - .instance_init = mv88w8618_eth_init, - .class_init = mv88w8618_eth_class_init, -}; - /* LCD register offsets */ #define MP_LCD_IRQCTRL 0x180 #define MP_LCD_IRQSTAT 0x184 @@ -841,7 +464,7 @@ static void mv88w8618_timer_init(SysBusDevice *dev, mv88w8618_timer_state *s, sysbus_init_irq(dev, &s->irq); s->freq = freq; - s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_DEFAULT); + s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_LEGACY); } static uint64_t mv88w8618_pit_read(void *opaque, hwaddr offset, @@ -1745,7 +1368,6 @@ static void musicpal_register_types(void) type_register_static(&mv88w8618_pic_info); type_register_static(&mv88w8618_pit_info); type_register_static(&mv88w8618_flashcfg_info); - type_register_static(&mv88w8618_eth_info); type_register_static(&mv88w8618_wlan_info); type_register_static(&musicpal_lcd_info); type_register_static(&musicpal_gpio_info); diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c index 1733b71507..83753d53a3 100644 --- a/hw/arm/netduino2.c +++ b/hw/arm/netduino2.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "hw/boards.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "qemu/error-report.h" #include "hw/arm/stm32f205_soc.h" #include "hw/arm/boot.h" @@ -36,20 +37,19 @@ static void netduino2_init(MachineState *machine) { DeviceState *dev; + Clock *sysclk; - /* - * TODO: ideally we would model the SoC RCC and let it handle - * system_clock_scale, including its ability to define different - * possible SYSCLK sources. - */ - system_clock_scale = NANOSECONDS_PER_SECOND / SYSCLK_FRQ; + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F205_SOC); qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); + qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void netduino2_machine_init(MachineClass *mc) diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c index d3ad7a2b67..515c081605 100644 --- a/hw/arm/netduinoplus2.c +++ b/hw/arm/netduinoplus2.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "hw/boards.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "qemu/error-report.h" #include "hw/arm/stm32f405_soc.h" #include "hw/arm/boot.h" @@ -36,21 +37,20 @@ static void netduinoplus2_init(MachineState *machine) { DeviceState *dev; + Clock *sysclk; - /* - * TODO: ideally we would model the SoC RCC and let it handle - * system_clock_scale, including its ability to define different - * possible SYSCLK sources. - */ - system_clock_scale = NANOSECONDS_PER_SECOND / SYSCLK_FRQ; + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F405_SOC); qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); + qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void netduinoplus2_machine_init(MachineClass *mc) diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c index 2ab0080e0b..d85cc02765 100644 --- a/hw/arm/npcm7xx.c +++ b/hw/arm/npcm7xx.c @@ -63,6 +63,8 @@ #define NPCM7XX_ROM_BA (0xffff0000) #define NPCM7XX_ROM_SZ (64 * KiB) +/* SDHCI Modules */ +#define NPCM7XX_MMC_BA (0xf0842000) /* Clock configuration values to be fixed up when bypassing bootloader */ @@ -83,6 +85,7 @@ enum NPCM7xxInterrupt { NPCM7XX_UART3_IRQ, NPCM7XX_EMC1RX_IRQ = 15, NPCM7XX_EMC1TX_IRQ, + NPCM7XX_MMC_IRQ = 26, NPCM7XX_TIMER0_IRQ = 32, /* Timer Module 0 */ NPCM7XX_TIMER1_IRQ, NPCM7XX_TIMER2_IRQ, @@ -352,10 +355,7 @@ static struct arm_boot_info npcm7xx_binfo = { void npcm7xx_load_kernel(MachineState *machine, NPCM7xxState *soc) { - NPCM7xxClass *sc = NPCM7XX_GET_CLASS(soc); - npcm7xx_binfo.ram_size = machine->ram_size; - npcm7xx_binfo.nb_cpus = sc->num_cpus; arm_load_kernel(&soc->cpu[0], machine, &npcm7xx_binfo); } @@ -443,6 +443,8 @@ static void npcm7xx_init(Object *obj) for (i = 0; i < ARRAY_SIZE(s->emc); i++) { object_initialize_child(obj, "emc[*]", &s->emc[i], TYPE_NPCM7XX_EMC); } + + object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI); } static void npcm7xx_realize(DeviceState *dev, Error **errp) @@ -707,6 +709,12 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) &error_abort); memory_region_add_subregion(get_system_memory(), NPCM7XX_ROM_BA, &s->irom); + /* SDHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->mmc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc), 0, NPCM7XX_MMC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc), 0, + npcm7xx_irq(s, NPCM7XX_MMC_IRQ)); + create_unimplemented_device("npcm7xx.shm", 0xc0001000, 4 * KiB); create_unimplemented_device("npcm7xx.vdmx", 0xe0800000, 4 * KiB); create_unimplemented_device("npcm7xx.pcierc", 0xe1000000, 64 * KiB); @@ -736,7 +744,6 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) create_unimplemented_device("npcm7xx.usbd[8]", 0xf0838000, 4 * KiB); create_unimplemented_device("npcm7xx.usbd[9]", 0xf0839000, 4 * KiB); create_unimplemented_device("npcm7xx.sd", 0xf0840000, 8 * KiB); - create_unimplemented_device("npcm7xx.mmc", 0xf0842000, 8 * KiB); create_unimplemented_device("npcm7xx.pcimbx", 0xf0848000, 512 * KiB); create_unimplemented_device("npcm7xx.aes", 0xf0858000, 4 * KiB); create_unimplemented_device("npcm7xx.des", 0xf0859000, 4 * KiB); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index e5a3243995..6bc6f5d2fe 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -24,13 +24,31 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" -#define NPCM750_EVB_POWER_ON_STRAPS 0x00001ff7 -#define QUANTA_GSJ_POWER_ON_STRAPS 0x00001fff -#define QUANTA_GBS_POWER_ON_STRAPS 0x000017ff +#define NPCM7XX_POWER_ON_STRAPS_DEFAULT ( \ + NPCM7XX_PWRON_STRAP_SPI0F18 | \ + NPCM7XX_PWRON_STRAP_SFAB | \ + NPCM7XX_PWRON_STRAP_BSPA | \ + NPCM7XX_PWRON_STRAP_FUP(FUP_NORM_UART2) | \ + NPCM7XX_PWRON_STRAP_SECEN | \ + NPCM7XX_PWRON_STRAP_HIZ | \ + NPCM7XX_PWRON_STRAP_ECC | \ + NPCM7XX_PWRON_STRAP_RESERVE1 | \ + NPCM7XX_PWRON_STRAP_J2EN | \ + NPCM7XX_PWRON_STRAP_CKFRQ(CKFRQ_DEFAULT)) + +#define NPCM750_EVB_POWER_ON_STRAPS ( \ + NPCM7XX_POWER_ON_STRAPS_DEFAULT & ~NPCM7XX_PWRON_STRAP_J2EN) +#define QUANTA_GSJ_POWER_ON_STRAPS NPCM7XX_POWER_ON_STRAPS_DEFAULT +#define QUANTA_GBS_POWER_ON_STRAPS ( \ + NPCM7XX_POWER_ON_STRAPS_DEFAULT & ~NPCM7XX_PWRON_STRAP_SFAB) +#define KUDO_BMC_POWER_ON_STRAPS NPCM7XX_POWER_ON_STRAPS_DEFAULT +#define MORI_BMC_POWER_ON_STRAPS NPCM7XX_POWER_ON_STRAPS_DEFAULT static const char npcm7xx_default_bootrom[] = "npcm7xx_bootrom.bin"; @@ -80,6 +98,22 @@ static void npcm7xx_connect_dram(NPCM7xxState *soc, MemoryRegion *dram) &error_abort); } +static void sdhci_attach_drive(SDHCIState *sdhci, int unit) +{ + DriveInfo *di = drive_get(IF_SD, 0, unit); + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + + BusState *bus = qdev_get_child_bus(DEVICE(sdhci), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + + DeviceState *carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); +} + static NPCM7xxState *npcm7xx_create_soc(MachineState *machine, uint32_t hw_straps) { @@ -309,6 +343,39 @@ static void quanta_gbs_i2c_init(NPCM7xxState *soc) */ } +static void kudo_bmc_i2c_init(NPCM7xxState *soc) +{ + I2CSlave *i2c_mux; + + i2c_mux = i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), + TYPE_PCA9548, 0x75); + + /* tmp105 is compatible with the lm75 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 4), "tmp105", 0x5c); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), "tmp105", 0x5c); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 6), "tmp105", 0x5c); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 7), "tmp105", 0x5c); + + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x77); + + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), TYPE_PCA9548, 0x77); + + at24c_eeprom_init(soc, 4, 0x50, 8192); /* mbfru */ + + i2c_mux = i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 13), + TYPE_PCA9548, 0x77); + + /* tmp105 is compatible with the lm75 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 2), "tmp105", 0x48); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 3), "tmp105", 0x49); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 4), "tmp105", 0x48); + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), "tmp105", 0x49); + + at24c_eeprom_init(soc, 14, 0x55, 8192); /* bmcfru */ + + /* TODO: Add remaining i2c devices. */ +} + static void npcm750_evb_init(MachineState *machine) { NPCM7xxState *soc; @@ -354,6 +421,41 @@ static void quanta_gbs_init(MachineState *machine) drive_get(IF_MTD, 0, 0)); quanta_gbs_i2c_init(soc); + sdhci_attach_drive(&soc->mmc.sdhci, 0); + npcm7xx_load_kernel(machine, soc); +} + +static void kudo_bmc_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, KUDO_BMC_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + npcm7xx_connect_flash(&soc->fiu[0], 0, "mx66u51235f", + drive_get(IF_MTD, 0, 0)); + npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f", + drive_get(IF_MTD, 3, 0)); + + kudo_bmc_i2c_init(soc); + sdhci_attach_drive(&soc->mmc.sdhci, 0); + npcm7xx_load_kernel(machine, soc); +} + +static void mori_bmc_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, MORI_BMC_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f", + drive_get(IF_MTD, 3, 0)); + npcm7xx_load_kernel(machine, soc); } @@ -417,6 +519,30 @@ static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1 * GiB; } +static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM730); + + mc->desc = "Kudo BMC (Cortex-A9)"; + mc->init = kudo_bmc_init; + mc->default_ram_size = 1 * GiB; +}; + +static void mori_bmc_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM730); + + mc->desc = "Mori BMC (Cortex-A9)"; + mc->init = mori_bmc_init; + mc->default_ram_size = 1 * GiB; +} + static const TypeInfo npcm7xx_machine_types[] = { { .name = TYPE_NPCM7XX_MACHINE, @@ -437,6 +563,14 @@ static const TypeInfo npcm7xx_machine_types[] = { .name = MACHINE_TYPE_NAME("quanta-gbs-bmc"), .parent = TYPE_NPCM7XX_MACHINE, .class_init = gbs_bmc_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("kudo-bmc"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = kudo_bmc_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("mori-bmc"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = mori_bmc_machine_class_init, }, }; diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c index 9407c2f268..34da0d62f0 100644 --- a/hw/arm/nrf51_soc.c +++ b/hw/arm/nrf51_soc.c @@ -12,6 +12,7 @@ #include "qapi/error.h" #include "hw/arm/boot.h" #include "hw/sysbus.h" +#include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #include "qemu/log.h" @@ -66,7 +67,22 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) return; } - system_clock_scale = NANOSECONDS_PER_SECOND / HCLK_FRQ; + /* + * HCLK on this SoC is fixed, so we set up sysclk ourselves and + * the board shouldn't connect it. + */ + if (clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must not be wired up by the board code"); + return; + } + /* This clock doesn't need migration because it is fixed-frequency */ + clock_set_hz(s->sysclk, HCLK_FRQ); + qdev_connect_clock_in(DEVICE(&s->cpu), "cpuclk", s->sysclk); + /* + * This SoC has no systick device, so don't connect refclk. + * TODO: model the lack of systick (currently the armv7m object + * will always provide one). + */ object_property_set_link(OBJECT(&s->cpu), "memory", OBJECT(&s->container), &error_abort); @@ -191,6 +207,8 @@ static void nrf51_soc_init(Object *obj) TYPE_NRF51_TIMER); } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); } static Property nrf51_soc_properties[] = { diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index af3164c551..b151113c27 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -24,6 +24,7 @@ #include "chardev/char.h" #include "qemu/cutils.h" #include "qemu/bswap.h" +#include "qemu/hw-version.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" #include "sysemu/sysemu.h" @@ -701,7 +702,7 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) static void *mipid_init(void) { - struct mipid_s *s = (struct mipid_s *) g_malloc0(sizeof(*s)); + struct mipid_s *s = g_malloc0(sizeof(*s)); s->id = 0x838f03; mipid_reset(s); @@ -1299,7 +1300,7 @@ static int n810_atag_setup(const struct arm_boot_info *info, void *p) static void n8x0_init(MachineState *machine, struct arm_boot_info *binfo, int model) { - struct n800_s *s = (struct n800_s *) g_malloc0(sizeof(*s)); + struct n800_s *s = g_malloc0(sizeof(*s)); MachineClass *mc = MACHINE_GET_CLASS(machine); if (machine->ram_size != mc->default_ram_size) { @@ -1364,7 +1365,7 @@ static void n8x0_init(MachineState *machine, } if (option_rom[0].name && - (machine->boot_order[0] == 'n' || !machine->kernel_filename)) { + (machine->boot_config.order[0] == 'n' || !machine->kernel_filename)) { uint8_t *nolo_tags = g_new(uint8_t, 0x10000); /* No, wait, better start at the ROM. */ s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index 180d3788f8..f693faa43e 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -18,10 +18,10 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qapi/error.h" -#include "qemu-common.h" #include "cpu.h" #include "exec/address-spaces.h" #include "hw/hw.h" @@ -35,6 +35,7 @@ #include "sysemu/qtest.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" +#include "sysemu/rtc.h" #include "qemu/range.h" #include "hw/sysbus.h" #include "qemu/cutils.h" diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c index 02b1aa8c97..8571eedd73 100644 --- a/hw/arm/omap2.c +++ b/hw/arm/omap2.c @@ -274,7 +274,7 @@ static void omap_eac_format_update(struct omap_eac_s *s) fmt.freq = s->codec.rate; /* TODO: signedness possibly depends on the CODEC hardware - or * does I2S specify it? */ - /* All register writes are 16 bits so we we store 16-bit samples + /* All register writes are 16 bits so we store 16-bit samples * in the buffers regardless of AGCFR[B8_16] value. */ fmt.fmt = AUDIO_FORMAT_U16; diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index 0cf9895ce7..3ace474870 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -25,9 +25,7 @@ #include "hw/qdev-properties.h" #include "hw/arm/allwinner-h3.h" -static struct arm_boot_info orangepi_binfo = { - .nb_cpus = AW_H3_NUM_CPUS, -}; +static struct arm_boot_info orangepi_binfo; static void orangepi_init(MachineState *machine) { @@ -85,7 +83,7 @@ static void orangepi_init(MachineState *machine) qdev_realize(DEVICE(h3), NULL, &error_abort); /* Retrieve SD bus */ - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, 0); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(h3), "sd-bus"); @@ -105,6 +103,7 @@ static void orangepi_init(MachineState *machine) } orangepi_binfo.loader_start = h3->memmap[AW_H3_DEV_SDRAM]; orangepi_binfo.ram_size = machine->ram_size; + orangepi_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; arm_load_kernel(ARM_CPU(first_cpu), machine, &orangepi_binfo); } diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index 15a247efae..93dda83d7a 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qapi/error.h" @@ -27,9 +26,11 @@ #include "chardev/char-fe.h" #include "sysemu/blockdev.h" #include "sysemu/qtest.h" +#include "sysemu/rtc.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qom/object.h" +#include "target/arm/cpregs.h" static struct { hwaddr io_base; @@ -383,7 +384,6 @@ static const ARMCPRegInfo pxa_cp_reginfo[] = { { .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_IO, .readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write }, - REGINFO_SENTINEL }; static void pxa2xx_setup_cp14(PXA2xxState *s) @@ -1305,6 +1305,8 @@ static int pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event) case I2C_NACK: s->status |= 1 << 1; /* set ACKNAK */ break; + default: + return -1; } pxa2xx_i2c_update(s); diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c index ed032fed54..47132ab982 100644 --- a/hw/arm/pxa2xx_pic.c +++ b/hw/arm/pxa2xx_pic.c @@ -17,6 +17,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "target/arm/cpregs.h" #define ICIP 0x00 /* Interrupt Controller IRQ Pending register */ #define ICMR 0x04 /* Interrupt Controller Mask register */ @@ -256,7 +257,6 @@ static const ARMCPRegInfo pxa_pic_cp_reginfo[] = { REGINFO_FOR_PIC_CP("ICLR2", 8), REGINFO_FOR_PIC_CP("ICFP2", 9), REGINFO_FOR_PIC_CP("ICPR2", 0xa), - REGINFO_SENTINEL }; static const MemoryRegionOps pxa2xx_pic_ops = { diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index b30a17871f..a7d287b1a8 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -16,6 +16,7 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" +#include "hw/arm/boot.h" #include "hw/arm/bcm2836.h" #include "hw/registerfields.h" #include "qemu/error-report.h" @@ -124,20 +125,22 @@ static const char *board_type(uint32_t board_rev) static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t smpboot[] = { - 0xe1a0e00f, /* mov lr, pc */ - 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ - 0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */ - 0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */ - 0xe320f001, /* 1: yield */ - 0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/ - 0xe3530000, /* cmp r3, #0 ;spin while zero */ - 0x0afffffb, /* beq 1b */ - 0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */ - 0xe12fff13, /* bx r3 ;jump to target */ - 0x400000cc, /* (constant: mailbox 3 read/clear base) */ + static const ARMInsnFixup smpboot[] = { + { 0xe1a0e00f }, /* mov lr, pc */ + { 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4) }, /* mov pc, BOARDSETUP_ADDR */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ + { 0xe7e10050 }, /* ubfx r0, r0, #0, #2 ;extract LSB */ + { 0xe59f5014 }, /* ldr r5, =0x400000CC ;load mbox base */ + { 0xe320f001 }, /* 1: yield */ + { 0xe7953200 }, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core */ + { 0xe3530000 }, /* cmp r3, #0 ;spin while zero */ + { 0x0afffffb }, /* beq 1b */ + { 0xe7853200 }, /* str r3, [r5, r0, lsl #4] ;clear mbox */ + { 0xe12fff13 }, /* bx r3 ;jump to target */ + { 0x400000cc }, /* (constant: mailbox 3 read/clear base) */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; /* check that we don't overrun board setup vectors */ QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR); @@ -145,9 +148,8 @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0 || (BOARDSETUP_ADDR >> 4) >= 0x100); - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, - arm_boot_address_space(cpu, info)); + arm_write_bootloader("raspi_smpboot", arm_boot_address_space(cpu, info), + info->smp_loader_start, smpboot, fixupcontext); } static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) @@ -161,26 +163,28 @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) * the primary CPU goes into the kernel. We put these variables inside * a rom blob, so that the reset for ROM contents zeroes them for us. */ - static const uint32_t smpboot[] = { - 0xd2801b05, /* mov x5, 0xd8 */ - 0xd53800a6, /* mrs x6, mpidr_el1 */ - 0x924004c6, /* and x6, x6, #0x3 */ - 0xd503205f, /* spin: wfe */ - 0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */ - 0xb4ffffc4, /* cbz x4, spin */ - 0xd2800000, /* mov x0, #0x0 */ - 0xd2800001, /* mov x1, #0x0 */ - 0xd2800002, /* mov x2, #0x0 */ - 0xd2800003, /* mov x3, #0x0 */ - 0xd61f0080, /* br x4 */ + static const ARMInsnFixup smpboot[] = { + { 0xd2801b05 }, /* mov x5, 0xd8 */ + { 0xd53800a6 }, /* mrs x6, mpidr_el1 */ + { 0x924004c6 }, /* and x6, x6, #0x3 */ + { 0xd503205f }, /* spin: wfe */ + { 0xf86678a4 }, /* ldr x4, [x5,x6,lsl #3] */ + { 0xb4ffffc4 }, /* cbz x4, spin */ + { 0xd2800000 }, /* mov x0, #0x0 */ + { 0xd2800001 }, /* mov x1, #0x0 */ + { 0xd2800002 }, /* mov x2, #0x0 */ + { 0xd2800003 }, /* mov x3, #0x0 */ + { 0xd61f0080 }, /* br x4 */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; static const uint64_t spintables[] = { 0, 0, 0, 0 }; - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, as); + arm_write_bootloader("raspi_smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables), SPINTABLE_ADDR, as); } @@ -204,7 +208,6 @@ static void setup_boot(MachineState *machine, RaspiProcessorId processor_id, s->binfo.board_id = MACH_TYPE_BCM2708; s->binfo.ram_size = ram_size; - s->binfo.nb_cpus = machine->smp.cpus; if (processor_id <= PROCESSOR_ID_BCM2836) { /* @@ -281,10 +284,10 @@ static void raspi_machine_init(MachineState *machine) object_property_add_const_link(OBJECT(&s->soc), "ram", OBJECT(machine->ram)); object_property_set_int(OBJECT(&s->soc), "board-rev", board_rev, &error_abort); - qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); /* Create and plug in the SD cards */ - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, 0); blk = di ? blk_by_legacy_dinfo(di) : NULL; bus = qdev_get_child_bus(DEVICE(&s->soc), "sd-bus"); if (bus == NULL) { @@ -340,7 +343,6 @@ static void raspi2b_machine_class_init(ObjectClass *oc, void *data) MachineClass *mc = MACHINE_CLASS(oc); RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); - mc->alias = "raspi2"; rmc->board_rev = 0xa21041; raspi_machine_class_common_init(mc, rmc->board_rev); }; @@ -360,7 +362,6 @@ static void raspi3b_machine_class_init(ObjectClass *oc, void *data) MachineClass *mc = MACHINE_CLASS(oc); RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); - mc->alias = "raspi3"; rmc->board_rev = 0xa02082; raspi_machine_class_common_init(mc, rmc->board_rev); }; diff --git a/hw/arm/realview.c b/hw/arm/realview.c index 1c54316ba3..d2dc8a8952 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -13,9 +13,11 @@ #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "hw/arm/primecell.h" +#include "hw/core/split-irq.h" #include "hw/net/lan9118.h" #include "hw/net/smc91c111.h" #include "hw/pci/pci.h" +#include "hw/qdev-core.h" #include "net/net.h" #include "sysemu/sysemu.h" #include "hw/boards.h" @@ -53,6 +55,20 @@ static const int realview_board_id[] = { 0x76d }; +static void split_irq_from_named(DeviceState *src, const char* outname, + qemu_irq out1, qemu_irq out2) { + DeviceState *splitter = qdev_new(TYPE_SPLIT_IRQ); + + qdev_prop_set_uint32(splitter, "num-lines", 2); + + qdev_realize_and_unref(splitter, NULL, &error_fatal); + + qdev_connect_gpio_out(splitter, 0, out1); + qdev_connect_gpio_out(splitter, 1, out2); + qdev_connect_gpio_out_named(src, outname, 0, + qdev_get_gpio_in(splitter, 0)); +} + static void realview_init(MachineState *machine, enum realview_board_type board_type) { @@ -66,7 +82,6 @@ static void realview_init(MachineState *machine, DeviceState *dev, *sysctl, *gpio2, *pl041; SysBusDevice *busdev; qemu_irq pic[64]; - qemu_irq mmc_irq[2]; PCIBus *pci_bus = NULL; NICInfo *nd; DriveInfo *dinfo; @@ -229,15 +244,15 @@ static void realview_init(MachineState *machine, * and the PL061 has them the other way about. Also the card * detect line is inverted. */ - mmc_irq[0] = qemu_irq_split( - qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), - qdev_get_gpio_in(gpio2, 1)); - mmc_irq[1] = qemu_irq_split( - qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), - qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); - qdev_connect_gpio_out_named(dev, "card-read-only", 0, mmc_irq[0]); - qdev_connect_gpio_out_named(dev, "card-inserted", 0, mmc_irq[1]); - dinfo = drive_get_next(IF_SD); + split_irq_from_named(dev, "card-read-only", + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), + qdev_get_gpio_in(gpio2, 1)); + + split_irq_from_named(dev, "card-inserted", + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), + qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); + + dinfo = drive_get(IF_SD, 0, 0); if (dinfo) { DeviceState *card; @@ -363,7 +378,6 @@ static void realview_init(MachineState *machine, memory_region_add_subregion(sysmem, SMP_BOOT_ADDR, ram_hack); realview_binfo.ram_size = ram_size; - realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); arm_load_kernel(ARM_CPU(first_cpu), machine, &realview_binfo); diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c index 29fc777b61..41191245b8 100644 --- a/hw/arm/sabrelite.c +++ b/hw/arm/sabrelite.c @@ -76,7 +76,7 @@ static void sabrelite_init(MachineState *machine) if (spi_bus) { DeviceState *flash_dev; qemu_irq cs_line; - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, 0); flash_dev = qdev_new("sst25vf016b"); if (dinfo) { @@ -87,13 +87,12 @@ static void sabrelite_init(MachineState *machine) qdev_realize_and_unref(flash_dev, BUS(spi_bus), &error_fatal); cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); - sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line); + qdev_connect_gpio_out(DEVICE(&s->gpio[2]), 19, cs_line); } } } sabrelite_binfo.ram_size = machine->ram_size; - sabrelite_binfo.nb_cpus = machine->smp.cpus; sabrelite_binfo.secure_boot = true; sabrelite_binfo.write_secondary_boot = sabrelite_write_secondary; sabrelite_binfo.secondary_cpu_reset_hook = sabrelite_reset_secondary; diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index c1629df603..4bb444684f 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -65,7 +64,7 @@ enum { SBSA_GIC_DIST, SBSA_GIC_REDIST, SBSA_SECURE_EC, - SBSA_GWDT, + SBSA_GWDT_WS0, SBSA_GWDT_REFRESH, SBSA_GWDT_CONTROL, SBSA_SMMU, @@ -140,12 +139,14 @@ static const int sbsa_ref_irqmap[] = { [SBSA_AHCI] = 10, [SBSA_EHCI] = 11, [SBSA_SMMU] = 12, /* ... to 15 */ - [SBSA_GWDT] = 16, + [SBSA_GWDT_WS0] = 16, }; static const char * const valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("cortex-a76"), + ARM_CPU_TYPE_NAME("neoverse-n1"), ARM_CPU_TYPE_NAME("max"), }; @@ -191,6 +192,20 @@ static void create_fdt(SBSAMachineState *sms) qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + /* + * This versioning scheme is for informing platform fw only. It is neither: + * - A QEMU versioned machine type; a given version of QEMU will emulate + * a given version of the platform. + * - A reflection of level of SBSA (now SystemReady SR) support provided. + * + * machine-version-major: updated when changes breaking fw compatibility + * are introduced. + * machine-version-minor: updated when features are added that don't break + * fw compatibility. + */ + qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0); + qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0); + if (ms->numa_state->have_numa_distance) { int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); uint32_t *matrix = g_malloc0(size); @@ -481,7 +496,7 @@ static void create_wdt(const SBSAMachineState *sms) hwaddr cbase = sbsa_ref_memmap[SBSA_GWDT_CONTROL].base; DeviceState *dev = qdev_new(TYPE_WDT_SBSA); SysBusDevice *s = SYS_BUS_DEVICE(dev); - int irq = sbsa_ref_irqmap[SBSA_GWDT]; + int irq = sbsa_ref_irqmap[SBSA_GWDT_WS0]; sysbus_realize_and_unref(s, &error_fatal); sysbus_mmio_map(s, 0, rbase); @@ -670,7 +685,7 @@ static void sbsa_ref_init(MachineState *machine) int n, sbsa_max_cpus; if (!cpu_type_valid(machine->cpu_type)) { - error_report("mach-virt: CPU type %s not supported", machine->cpu_type); + error_report("sbsa-ref: CPU type %s not supported", machine->cpu_type); exit(1); } @@ -777,7 +792,6 @@ static void sbsa_ref_init(MachineState *machine) create_secure_ec(secure_sysmem); sms->bootinfo.ram_size = machine->ram_size; - sms->bootinfo.nb_cpus = smp_cpus; sms->bootinfo.board_id = -1; sms->bootinfo.loader_start = sbsa_ref_memmap[SBSA_MEM].base; sms->bootinfo.get_dtb = sbsa_ref_dtb; diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 0459850a93..e09b9c13b7 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -193,7 +193,8 @@ static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte, dma_addr_t addr = baseaddr + index * sizeof(*pte); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte)); + ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { info->type = SMMU_PTW_ERR_WALK_EABT; diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index d1885ae3f2..bce161870f 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -56,6 +56,7 @@ REG32(IDR2, 0x8) REG32(IDR3, 0xc) FIELD(IDR3, HAD, 2, 1); FIELD(IDR3, RIL, 10, 1); + FIELD(IDR3, BBML, 11, 2); REG32(IDR4, 0x10) REG32(IDR5, 0x14) FIELD(IDR5, OAS, 0, 3); @@ -387,7 +388,6 @@ typedef struct SMMUEventInfo { SMMUEventType type; uint32_t sid; bool recorded; - bool record_trans_faults; bool inval_ste_allowed; union { struct { diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 01b60bee49..daa80e9c7b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -102,7 +102,8 @@ static inline MemTxResult queue_read(SMMUQueue *q, void *data) { dma_addr_t addr = Q_CONS_ENTRY(q); - return dma_memory_read(&address_space_memory, addr, data, q->entry_size); + return dma_memory_read(&address_space_memory, addr, data, q->entry_size, + MEMTXATTRS_UNSPECIFIED); } static MemTxResult queue_write(SMMUQueue *q, void *data) @@ -110,7 +111,8 @@ static MemTxResult queue_write(SMMUQueue *q, void *data) dma_addr_t addr = Q_PROD_ENTRY(q); MemTxResult ret; - ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); + ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size, + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { return ret; } @@ -257,6 +259,7 @@ static void smmuv3_init_regs(SMMUv3State *s) s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2); /* 4K, 16K and 64K granule support */ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); @@ -276,6 +279,12 @@ static void smmuv3_init_regs(SMMUv3State *s) s->features = 0; s->sid_split = 0; s->aidr = 0x1; + s->cr[0] = 0; + s->cr0ack = 0; + s->irq_ctrl = 0; + s->gerror = 0; + s->gerrorn = 0; + s->statusr = 0; } static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, @@ -285,7 +294,8 @@ static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, trace_smmuv3_get_ste(addr); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Cannot fetch pte at address=0x%"PRIx64"\n", addr); @@ -306,7 +316,8 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, trace_smmuv3_get_cd(addr); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Cannot fetch pte at address=0x%"PRIx64"\n", addr); @@ -411,7 +422,7 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); /* TODO: guarantee 64-bit single-copy atomicity */ ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, - sizeof(l1std)); + sizeof(l1std), MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); @@ -517,7 +528,7 @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); } - event->record_trans_faults = CD_R(cd); + cfg->record_faults = CD_R(cd); return 0; @@ -670,7 +681,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, tt = select_tt(cfg, addr); if (!tt) { - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_TRANSLATION; event.u.f_translation.addr = addr; event.u.f_translation.rnw = flag & 0x1; @@ -686,7 +697,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, if (cached_entry) { if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -710,28 +721,28 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, event.u.f_walk_eabt.addr2 = ptw_info.addr; break; case SMMU_PTW_ERR_TRANSLATION: - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_TRANSLATION; event.u.f_translation.addr = addr; event.u.f_translation.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ADDR_SIZE: - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_ADDR_SIZE; event.u.f_addr_size.addr = addr; event.u.f_addr_size.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ACCESS: - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_ACCESS; event.u.f_access.addr = addr; event.u.f_access.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_PERMISSION: - if (event.record_trans_faults) { + if (cfg->record_faults) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -750,7 +761,7 @@ epilogue: qemu_mutex_unlock(&s->mutex); switch (status) { case SMMU_TRANS_SUCCESS: - entry.perm = flag; + entry.perm = cached_entry->entry.perm; entry.translated_addr = cached_entry->entry.translated_addr + (addr & cached_entry->entry.addr_mask); entry.addr_mask = cached_entry->entry.addr_mask; @@ -776,7 +787,7 @@ epilogue: break; case SMMU_TRANS_ERROR: qemu_log_mask(LOG_GUEST_ERROR, - "%s translation failed for iova=0x%"PRIx64"(%s)\n", + "%s translation failed for iova=0x%"PRIx64" (%s)\n", mr->parent_obj.name, addr, smmu_event_string(event.type)); smmuv3_record_event(s, &event); break; diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index ad48cf2605..a9e96c37f8 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -9,7 +9,9 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "hw/core/split-irq.h" #include "hw/sysbus.h" +#include "hw/sd/sd.h" #include "hw/ssi/ssi.h" #include "hw/arm/boot.h" #include "qemu/timer.h" @@ -26,6 +28,7 @@ #include "hw/watchdog/cmsdk-apb-watchdog.h" #include "migration/vmstate.h" #include "hw/misc/unimp.h" +#include "hw/timer/stellaris-gptm.h" #include "hw/qdev-clock.h" #include "qom/object.h" @@ -55,306 +58,6 @@ typedef const struct { uint32_t peripherals; } stellaris_board_info; -/* General purpose timer module. */ - -#define TYPE_STELLARIS_GPTM "stellaris-gptm" -OBJECT_DECLARE_SIMPLE_TYPE(gptm_state, STELLARIS_GPTM) - -struct gptm_state { - SysBusDevice parent_obj; - - MemoryRegion iomem; - uint32_t config; - uint32_t mode[2]; - uint32_t control; - uint32_t state; - uint32_t mask; - uint32_t load[2]; - uint32_t match[2]; - uint32_t prescale[2]; - uint32_t match_prescale[2]; - uint32_t rtc; - int64_t tick[2]; - struct gptm_state *opaque[2]; - QEMUTimer *timer[2]; - /* The timers have an alternate output used to trigger the ADC. */ - qemu_irq trigger; - qemu_irq irq; -}; - -static void gptm_update_irq(gptm_state *s) -{ - int level; - level = (s->state & s->mask) != 0; - qemu_set_irq(s->irq, level); -} - -static void gptm_stop(gptm_state *s, int n) -{ - timer_del(s->timer[n]); -} - -static void gptm_reload(gptm_state *s, int n, int reset) -{ - int64_t tick; - if (reset) - tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - else - tick = s->tick[n]; - - if (s->config == 0) { - /* 32-bit CountDown. */ - uint32_t count; - count = s->load[0] | (s->load[1] << 16); - tick += (int64_t)count * system_clock_scale; - } else if (s->config == 1) { - /* 32-bit RTC. 1Hz tick. */ - tick += NANOSECONDS_PER_SECOND; - } else if (s->mode[n] == 0xa) { - /* PWM mode. Not implemented. */ - } else { - qemu_log_mask(LOG_UNIMP, - "GPTM: 16-bit timer mode unimplemented: 0x%x\n", - s->mode[n]); - return; - } - s->tick[n] = tick; - timer_mod(s->timer[n], tick); -} - -static void gptm_tick(void *opaque) -{ - gptm_state **p = (gptm_state **)opaque; - gptm_state *s; - int n; - - s = *p; - n = p - s->opaque; - if (s->config == 0) { - s->state |= 1; - if ((s->control & 0x20)) { - /* Output trigger. */ - qemu_irq_pulse(s->trigger); - } - if (s->mode[0] & 1) { - /* One-shot. */ - s->control &= ~1; - } else { - /* Periodic. */ - gptm_reload(s, 0, 0); - } - } else if (s->config == 1) { - /* RTC. */ - uint32_t match; - s->rtc++; - match = s->match[0] | (s->match[1] << 16); - if (s->rtc > match) - s->rtc = 0; - if (s->rtc == 0) { - s->state |= 8; - } - gptm_reload(s, 0, 0); - } else if (s->mode[n] == 0xa) { - /* PWM mode. Not implemented. */ - } else { - qemu_log_mask(LOG_UNIMP, - "GPTM: 16-bit timer mode unimplemented: 0x%x\n", - s->mode[n]); - } - gptm_update_irq(s); -} - -static uint64_t gptm_read(void *opaque, hwaddr offset, - unsigned size) -{ - gptm_state *s = (gptm_state *)opaque; - - switch (offset) { - case 0x00: /* CFG */ - return s->config; - case 0x04: /* TAMR */ - return s->mode[0]; - case 0x08: /* TBMR */ - return s->mode[1]; - case 0x0c: /* CTL */ - return s->control; - case 0x18: /* IMR */ - return s->mask; - case 0x1c: /* RIS */ - return s->state; - case 0x20: /* MIS */ - return s->state & s->mask; - case 0x24: /* CR */ - return 0; - case 0x28: /* TAILR */ - return s->load[0] | ((s->config < 4) ? (s->load[1] << 16) : 0); - case 0x2c: /* TBILR */ - return s->load[1]; - case 0x30: /* TAMARCHR */ - return s->match[0] | ((s->config < 4) ? (s->match[1] << 16) : 0); - case 0x34: /* TBMATCHR */ - return s->match[1]; - case 0x38: /* TAPR */ - return s->prescale[0]; - case 0x3c: /* TBPR */ - return s->prescale[1]; - case 0x40: /* TAPMR */ - return s->match_prescale[0]; - case 0x44: /* TBPMR */ - return s->match_prescale[1]; - case 0x48: /* TAR */ - if (s->config == 1) { - return s->rtc; - } - qemu_log_mask(LOG_UNIMP, - "GPTM: read of TAR but timer read not supported\n"); - return 0; - case 0x4c: /* TBR */ - qemu_log_mask(LOG_UNIMP, - "GPTM: read of TBR but timer read not supported\n"); - return 0; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "GPTM: read at bad offset 0x02%" HWADDR_PRIx "\n", - offset); - return 0; - } -} - -static void gptm_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - gptm_state *s = (gptm_state *)opaque; - uint32_t oldval; - - /* The timers should be disabled before changing the configuration. - We take advantage of this and defer everything until the timer - is enabled. */ - switch (offset) { - case 0x00: /* CFG */ - s->config = value; - break; - case 0x04: /* TAMR */ - s->mode[0] = value; - break; - case 0x08: /* TBMR */ - s->mode[1] = value; - break; - case 0x0c: /* CTL */ - oldval = s->control; - s->control = value; - /* TODO: Implement pause. */ - if ((oldval ^ value) & 1) { - if (value & 1) { - gptm_reload(s, 0, 1); - } else { - gptm_stop(s, 0); - } - } - if (((oldval ^ value) & 0x100) && s->config >= 4) { - if (value & 0x100) { - gptm_reload(s, 1, 1); - } else { - gptm_stop(s, 1); - } - } - break; - case 0x18: /* IMR */ - s->mask = value & 0x77; - gptm_update_irq(s); - break; - case 0x24: /* CR */ - s->state &= ~value; - break; - case 0x28: /* TAILR */ - s->load[0] = value & 0xffff; - if (s->config < 4) { - s->load[1] = value >> 16; - } - break; - case 0x2c: /* TBILR */ - s->load[1] = value & 0xffff; - break; - case 0x30: /* TAMARCHR */ - s->match[0] = value & 0xffff; - if (s->config < 4) { - s->match[1] = value >> 16; - } - break; - case 0x34: /* TBMATCHR */ - s->match[1] = value >> 16; - break; - case 0x38: /* TAPR */ - s->prescale[0] = value; - break; - case 0x3c: /* TBPR */ - s->prescale[1] = value; - break; - case 0x40: /* TAPMR */ - s->match_prescale[0] = value; - break; - case 0x44: /* TBPMR */ - s->match_prescale[0] = value; - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "GPTM: write at bad offset 0x02%" HWADDR_PRIx "\n", - offset); - } - gptm_update_irq(s); -} - -static const MemoryRegionOps gptm_ops = { - .read = gptm_read, - .write = gptm_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_stellaris_gptm = { - .name = "stellaris_gptm", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT32(config, gptm_state), - VMSTATE_UINT32_ARRAY(mode, gptm_state, 2), - VMSTATE_UINT32(control, gptm_state), - VMSTATE_UINT32(state, gptm_state), - VMSTATE_UINT32(mask, gptm_state), - VMSTATE_UNUSED(8), - VMSTATE_UINT32_ARRAY(load, gptm_state, 2), - VMSTATE_UINT32_ARRAY(match, gptm_state, 2), - VMSTATE_UINT32_ARRAY(prescale, gptm_state, 2), - VMSTATE_UINT32_ARRAY(match_prescale, gptm_state, 2), - VMSTATE_UINT32(rtc, gptm_state), - VMSTATE_INT64_ARRAY(tick, gptm_state, 2), - VMSTATE_TIMER_PTR_ARRAY(timer, gptm_state, 2), - VMSTATE_END_OF_LIST() - } -}; - -static void stellaris_gptm_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - gptm_state *s = STELLARIS_GPTM(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - sysbus_init_irq(sbd, &s->irq); - qdev_init_gpio_out(dev, &s->trigger, 1); - - memory_region_init_io(&s->iomem, obj, &gptm_ops, s, - "gptm", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - - s->opaque[0] = s->opaque[1] = s; -} - -static void stellaris_gptm_realize(DeviceState *dev, Error **errp) -{ - gptm_state *s = STELLARIS_GPTM(dev); - s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]); - s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]); -} - /* System controller. */ #define TYPE_STELLARIS_SYS "stellaris-sys" @@ -562,17 +265,18 @@ static bool ssys_use_rcc2(ssys_state *s) */ static void ssys_calculate_system_clock(ssys_state *s, bool propagate_clock) { + int period_ns; /* * SYSDIV field specifies divisor: 0 == /1, 1 == /2, etc. Input * clock is 200MHz, which is a period of 5 ns. Dividing the clock * frequency by X is the same as multiplying the period by X. */ if (ssys_use_rcc2(s)) { - system_clock_scale = 5 * (((s->rcc2 >> 23) & 0x3f) + 1); + period_ns = 5 * (((s->rcc2 >> 23) & 0x3f) + 1); } else { - system_clock_scale = 5 * (((s->rcc >> 23) & 0xf) + 1); + period_ns = 5 * (((s->rcc >> 23) & 0xf) + 1); } - clock_set_ns(s->sysclk, system_clock_scale); + clock_set_ns(s->sysclk, period_ns); if (propagate_clock) { clock_propagate(s->sysclk); } @@ -755,33 +459,6 @@ static void stellaris_sys_instance_init(Object *obj) s->sysclk = qdev_init_clock_out(DEVICE(s), "SYSCLK"); } -static DeviceState *stellaris_sys_init(uint32_t base, qemu_irq irq, - stellaris_board_info *board, - uint8_t *macaddr) -{ - DeviceState *dev = qdev_new(TYPE_STELLARIS_SYS); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - - /* Most devices come preprogrammed with a MAC address in the user data. */ - qdev_prop_set_uint32(dev, "user0", - macaddr[0] | (macaddr[1] << 8) | (macaddr[2] << 16)); - qdev_prop_set_uint32(dev, "user1", - macaddr[3] | (macaddr[4] << 8) | (macaddr[5] << 16)); - qdev_prop_set_uint32(dev, "did0", board->did0); - qdev_prop_set_uint32(dev, "did1", board->did1); - qdev_prop_set_uint32(dev, "dc0", board->dc0); - qdev_prop_set_uint32(dev, "dc1", board->dc1); - qdev_prop_set_uint32(dev, "dc2", board->dc2); - qdev_prop_set_uint32(dev, "dc3", board->dc3); - qdev_prop_set_uint32(dev, "dc4", board->dc4); - - sysbus_realize_and_unref(sbd, &error_fatal); - sysbus_mmio_map(sbd, 0, base); - sysbus_connect_irq(sbd, 0, irq); - - return dev; -} - /* I2C controller. */ #define TYPE_STELLARIS_I2C "stellaris-i2c" @@ -1349,6 +1026,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) DeviceState *ssys_dev; int i; int j; + const uint8_t *macaddr; MemoryRegion *sram = g_new(MemoryRegion, 1); MemoryRegion *flash = g_new(MemoryRegion, 1); @@ -1366,15 +1044,42 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) &error_fatal); memory_region_add_subregion(system_memory, 0x20000000, sram); + /* + * Create the system-registers object early, because we will + * need its sysclk output. + */ + ssys_dev = qdev_new(TYPE_STELLARIS_SYS); + /* Most devices come preprogrammed with a MAC address in the user data. */ + macaddr = nd_table[0].macaddr.a; + qdev_prop_set_uint32(ssys_dev, "user0", + macaddr[0] | (macaddr[1] << 8) | (macaddr[2] << 16)); + qdev_prop_set_uint32(ssys_dev, "user1", + macaddr[3] | (macaddr[4] << 8) | (macaddr[5] << 16)); + qdev_prop_set_uint32(ssys_dev, "did0", board->did0); + qdev_prop_set_uint32(ssys_dev, "did1", board->did1); + qdev_prop_set_uint32(ssys_dev, "dc0", board->dc0); + qdev_prop_set_uint32(ssys_dev, "dc1", board->dc1); + qdev_prop_set_uint32(ssys_dev, "dc2", board->dc2); + qdev_prop_set_uint32(ssys_dev, "dc3", board->dc3); + qdev_prop_set_uint32(ssys_dev, "dc4", board->dc4); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ssys_dev), &error_fatal); + nvic = qdev_new(TYPE_ARMV7M); qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); qdev_prop_set_bit(nvic, "enable-bitband", true); + qdev_connect_clock_in(nvic, "cpuclk", + qdev_get_clock_out(ssys_dev, "SYSCLK")); + /* This SoC does not connect the systick reference clock */ object_property_set_link(OBJECT(nvic), "memory", OBJECT(get_system_memory()), &error_abort); /* This will exit with an error if the user passed us a bad cpu_type */ sysbus_realize_and_unref(SYS_BUS_DEVICE(nvic), &error_fatal); + /* Now we can wire up the IRQ and MMIO of the system registers */ + sysbus_mmio_map(SYS_BUS_DEVICE(ssys_dev), 0, 0x400fe000); + sysbus_connect_irq(SYS_BUS_DEVICE(ssys_dev), 0, qdev_get_gpio_in(nvic, 28)); + if (board->dc1 & (1 << 16)) { dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000, qdev_get_gpio_in(nvic, 14), @@ -1388,19 +1093,21 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) } for (i = 0; i < 4; i++) { if (board->dc2 & (0x10000 << i)) { - dev = sysbus_create_simple(TYPE_STELLARIS_GPTM, - 0x40030000 + i * 0x1000, - qdev_get_gpio_in(nvic, timer_irq[i])); + SysBusDevice *sbd; + + dev = qdev_new(TYPE_STELLARIS_GPTM); + sbd = SYS_BUS_DEVICE(dev); + qdev_connect_clock_in(dev, "clk", + qdev_get_clock_out(ssys_dev, "SYSCLK")); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, 0x40030000 + i * 0x1000); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, timer_irq[i])); /* TODO: This is incorrect, but we get away with it because the ADC output is only ever pulsed. */ qdev_connect_gpio_out(dev, 0, adc); } } - ssys_dev = stellaris_sys_init(0x400fe000, qdev_get_gpio_in(nvic, 28), - board, nd_table[0].macaddr.a); - - if (board->dc1 & (1 << 3)) { /* watchdog present */ dev = qdev_new(TYPE_LUMINARY_WATCHDOG); @@ -1452,6 +1159,10 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) void *bus; DeviceState *sddev; DeviceState *ssddev; + DriveInfo *dinfo; + DeviceState *carddev; + DeviceState *gpio_d_splitter; + BlockBackend *blk; /* * Some boards have both an OLED controller and SD card connected to @@ -1516,12 +1227,30 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) * - Make the ssd0323 OLED controller chipselect active-low */ bus = qdev_get_child_bus(dev, "ssi"); - sddev = ssi_create_peripheral(bus, "ssi-sd"); + + dinfo = drive_get(IF_SD, 0, 0); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_prop_set_bit(carddev, "spi", true); + qdev_realize_and_unref(carddev, + qdev_get_child_bus(sddev, "sd-bus"), + &error_fatal); + ssddev = ssi_create_peripheral(bus, "ssd0323"); - gpio_out[GPIO_D][0] = qemu_irq_split( - qdev_get_gpio_in_named(sddev, SSI_GPIO_CS, 0), + + gpio_d_splitter = qdev_new(TYPE_SPLIT_IRQ); + qdev_prop_set_uint32(gpio_d_splitter, "num-lines", 2); + qdev_realize_and_unref(gpio_d_splitter, NULL, &error_fatal); + qdev_connect_gpio_out( + gpio_d_splitter, 0, + qdev_get_gpio_in_named(sddev, SSI_GPIO_CS, 0)); + qdev_connect_gpio_out( + gpio_d_splitter, 1, qdev_get_gpio_in_named(ssddev, SSI_GPIO_CS, 0)); + gpio_out[GPIO_D][0] = qdev_get_gpio_in(gpio_d_splitter, 0); + gpio_out[GPIO_C][7] = qdev_get_gpio_in(ssddev, 0); /* Make sure the select pin is high. */ @@ -1573,7 +1302,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) create_unimplemented_device("hibernation", 0x400fc000, 0x1000); create_unimplemented_device("flash-control", 0x400fd000, 0x1000); - armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, flash_size); + armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, 0, flash_size); } /* FIXME: Figure out how to generate these from stellaris_boards. */ @@ -1642,22 +1371,6 @@ static const TypeInfo stellaris_i2c_info = { .class_init = stellaris_i2c_class_init, }; -static void stellaris_gptm_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &vmstate_stellaris_gptm; - dc->realize = stellaris_gptm_realize; -} - -static const TypeInfo stellaris_gptm_info = { - .name = TYPE_STELLARIS_GPTM, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(gptm_state), - .instance_init = stellaris_gptm_init, - .class_init = stellaris_gptm_class_init, -}; - static void stellaris_adc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1696,7 +1409,6 @@ static const TypeInfo stellaris_sys_info = { static void stellaris_register_types(void) { type_register_static(&stellaris_i2c_info); - type_register_static(&stellaris_gptm_info); type_register_static(&stellaris_adc_info); type_register_static(&stellaris_sys_info); } diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c index 0c4a5c6645..f7b344ba9f 100644 --- a/hw/arm/stm32f100_soc.c +++ b/hw/arm/stm32f100_soc.c @@ -30,6 +30,7 @@ #include "exec/address-spaces.h" #include "hw/arm/stm32f100_soc.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #include "sysemu/sysemu.h" @@ -57,6 +58,9 @@ static void stm32f100_soc_initfn(Object *obj) for (i = 0; i < STM_NUM_SPIS; i++) { object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_STM32F2XX_SPI); } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); } static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) @@ -67,31 +71,54 @@ static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) int i; MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *sram = g_new(MemoryRegion, 1); - MemoryRegion *flash = g_new(MemoryRegion, 1); - MemoryRegion *flash_alias = g_new(MemoryRegion, 1); + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); /* * Init flash region * Flash starts at 0x08000000 and then is aliased to boot memory at 0x0 */ - memory_region_init_rom(flash, OBJECT(dev_soc), "STM32F100.flash", + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F100.flash", FLASH_SIZE, &error_fatal); - memory_region_init_alias(flash_alias, OBJECT(dev_soc), - "STM32F100.flash.alias", flash, 0, FLASH_SIZE); - memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, flash); - memory_region_add_subregion(system_memory, 0, flash_alias); + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "STM32F100.flash.alias", &s->flash, 0, FLASH_SIZE); + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); /* Init SRAM region */ - memory_region_init_ram(sram, NULL, "STM32F100.sram", SRAM_SIZE, + memory_region_init_ram(&s->sram, NULL, "STM32F100.sram", SRAM_SIZE, &error_fatal); - memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, sram); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); /* Init ARMv7m */ armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 61); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(get_system_memory()), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index 9cd41bf56d..c6b75a381d 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -29,6 +29,7 @@ #include "exec/address-spaces.h" #include "hw/arm/stm32f205_soc.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "sysemu/sysemu.h" /* At the moment only Timer 2 to 5 are modelled */ @@ -74,6 +75,9 @@ static void stm32f205_soc_initfn(Object *obj) for (i = 0; i < STM_NUM_SPIS; i++) { object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_STM32F2XX_SPI); } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); } static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) @@ -84,26 +88,49 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) int i; MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *sram = g_new(MemoryRegion, 1); - MemoryRegion *flash = g_new(MemoryRegion, 1); - MemoryRegion *flash_alias = g_new(MemoryRegion, 1); - memory_region_init_rom(flash, OBJECT(dev_soc), "STM32F205.flash", + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F205.flash", FLASH_SIZE, &error_fatal); - memory_region_init_alias(flash_alias, OBJECT(dev_soc), - "STM32F205.flash.alias", flash, 0, FLASH_SIZE); + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "STM32F205.flash.alias", &s->flash, 0, FLASH_SIZE); - memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, flash); - memory_region_add_subregion(system_memory, 0, flash_alias); + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); - memory_region_init_ram(sram, NULL, "STM32F205.sram", SRAM_SIZE, + memory_region_init_ram(&s->sram, NULL, "STM32F205.sram", SRAM_SIZE, &error_fatal); - memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, sram); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(get_system_memory()), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c index cb04c11198..c07947d9f8 100644 --- a/hw/arm/stm32f405_soc.c +++ b/hw/arm/stm32f405_soc.c @@ -24,10 +24,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "exec/address-spaces.h" #include "sysemu/sysemu.h" #include "hw/arm/stm32f405_soc.h" +#include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #define SYSCFG_ADD 0x40013800 @@ -80,6 +80,9 @@ static void stm32f405_soc_initfn(Object *obj) } object_initialize_child(obj, "exti", &s->exti, TYPE_STM32F4XX_EXTI); + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); } static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) @@ -91,6 +94,30 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) Error *err = NULL; int i; + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F405.flash", FLASH_SIZE, &err); if (err != NULL) { @@ -116,6 +143,8 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) qdev_prop_set_uint32(armv7m, "num-irq", 96); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(system_memory), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c index 7e8191ebf5..67675e952f 100644 --- a/hw/arm/stm32vldiscovery.c +++ b/hw/arm/stm32vldiscovery.c @@ -27,6 +27,7 @@ #include "qapi/error.h" #include "hw/boards.h" #include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" #include "qemu/error-report.h" #include "hw/arm/stm32f100_soc.h" #include "hw/arm/boot.h" @@ -39,21 +40,20 @@ static void stm32vldiscovery_init(MachineState *machine) { DeviceState *dev; + Clock *sysclk; - /* - * TODO: ideally we would model the SoC RCC and let it handle - * system_clock_scale, including its ability to define different - * possible SYSCLK sources. - */ - system_clock_scale = NANOSECONDS_PER_SECOND / SYSCLK_FRQ; + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F100_SOC); qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); + qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void stm32vldiscovery_machine_init(MachineClass *mc) @@ -63,4 +63,3 @@ static void stm32vldiscovery_machine_init(MachineClass *mc) } DEFINE_MACHINE("stm32vldiscovery", stm32vldiscovery_machine_init) - diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c index 939a57dda5..39b8f01ac4 100644 --- a/hw/arm/strongarm.c +++ b/hw/arm/strongarm.c @@ -28,7 +28,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "hw/irq.h" #include "hw/qdev-properties.h" @@ -41,6 +40,7 @@ #include "chardev/char-fe.h" #include "chardev/char-serial.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "hw/ssi/ssi.h" #include "qapi/error.h" #include "qemu/cutils.h" diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index 575399c4fc..ecc1f6cf74 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -310,7 +310,7 @@ static void versatile_init(MachineState *machine, int board_id) qdev_connect_gpio_out(sysctl, 0, qdev_get_gpio_in(dev, 0)); dev = sysbus_create_varargs("pl181", 0x10005000, sic[22], sic[1], NULL); - dinfo = drive_get_next(IF_SD); + dinfo = drive_get(IF_SD, 0, 0); if (dinfo) { DeviceState *card; @@ -322,7 +322,7 @@ static void versatile_init(MachineState *machine, int board_id) } dev = sysbus_create_varargs("pl181", 0x1000b000, sic[23], sic[2], NULL); - dinfo = drive_get_next(IF_SD); + dinfo = drive_get(IF_SD, 0, 1); if (dinfo) { DeviceState *card; diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index 58481c0762..e1d1983ae6 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -23,7 +23,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "hw/sysbus.h" @@ -625,7 +624,7 @@ static void vexpress_common_init(MachineState *machine) qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT)); qdev_connect_gpio_out_named(dev, "card-inserted", 0, qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN)); - dinfo = drive_get_next(IF_SD); + dinfo = drive_get(IF_SD, 0, 0); if (dinfo) { DeviceState *card; @@ -657,7 +656,7 @@ static void vexpress_common_init(MachineState *machine) sysbus_create_simple("pl111", map[VE_CLCD], pic[14]); - dinfo = drive_get_next(IF_PFLASH); + dinfo = drive_get(IF_PFLASH, 0, 0); pflash0 = ve_pflash_cfi01_register(map[VE_NORFLASH0], "vexpress.flash0", dinfo); if (!pflash0) { @@ -673,7 +672,7 @@ static void vexpress_common_init(MachineState *machine) memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], flashalias); } - dinfo = drive_get_next(IF_PFLASH); + dinfo = drive_get(IF_PFLASH, 0, 1); if (!ve_pflash_cfi01_register(map[VE_NORFLASH1], "vexpress.flash1", dinfo)) { error_report("vexpress: error registering flash 1"); @@ -709,7 +708,6 @@ static void vexpress_common_init(MachineState *machine) } daughterboard->bootinfo.ram_size = machine->ram_size; - daughterboard->bootinfo.nb_cpus = machine->smp.cpus; daughterboard->bootinfo.board_id = VEXPRESS_BOARD_ID; daughterboard->bootinfo.loader_start = daughterboard->loader_start; daughterboard->bootinfo.smp_loader_start = map[VE_SRAM]; diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 037cc1fd82..4156111d49 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -42,6 +42,7 @@ #include "hw/acpi/memory_hotplug.h" #include "hw/acpi/generic_event_device.h" #include "hw/acpi/tpm.h" +#include "hw/acpi/hmat.h" #include "hw/pci/pcie_host.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" @@ -55,6 +56,7 @@ #include "kvm_arm.h" #include "migration/vmstate.h" #include "hw/acpi/ghes.h" +#include "hw/acpi/viot.h" #define ARM_SPI_BASE 32 @@ -157,10 +159,9 @@ static void acpi_dsdt_add_virtio(Aml *scope, } static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, - uint32_t irq, bool use_highmem, bool highmem_ecam, - VirtMachineState *vms) + uint32_t irq, VirtMachineState *vms) { - int ecam_id = VIRT_ECAM_ID(highmem_ecam); + int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam); struct GPEXConfig cfg = { .mmio32 = memmap[VIRT_PCIE_MMIO], .pio = memmap[VIRT_PCIE_PIO], @@ -169,7 +170,7 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, .bus = vms->bus, }; - if (use_highmem) { + if (vms->highmem_mmio) { cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO]; } @@ -228,6 +229,7 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms) Aml *dev = aml_device("TPM0"); aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); aml_append(dev, aml_name_decl("_UID", aml_int(0))); Aml *crs = aml_resource_template(); @@ -240,6 +242,29 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms) } #endif +#define ID_MAPPING_ENTRY_SIZE 20 +#define SMMU_V3_ENTRY_SIZE 68 +#define ROOT_COMPLEX_ENTRY_SIZE 36 +#define IORT_NODE_OFFSET 48 + +static void build_iort_id_mapping(GArray *table_data, uint32_t input_base, + uint32_t id_count, uint32_t out_ref) +{ + /* Table 4 ID mapping format */ + build_append_int_noprefix(table_data, input_base, 4); /* Input base */ + build_append_int_noprefix(table_data, id_count, 4); /* Number of IDs */ + build_append_int_noprefix(table_data, input_base, 4); /* Output base */ + build_append_int_noprefix(table_data, out_ref, 4); /* Output Reference */ + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Single mapping (disabled) */, 4); +} + +struct AcpiIortIdMapping { + uint32_t input_base; + uint32_t id_count; +}; +typedef struct AcpiIortIdMapping AcpiIortIdMapping; + /* Build the iort ID mapping to SMMUv3 for a given PCI host bridge */ static int iort_host_bridges(Object *obj, void *opaque) @@ -273,20 +298,26 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b) return idmap_a->input_base - idmap_b->input_base; } +/* + * Input Output Remapping Table (IORT) + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049E.b, Feb 2021 + */ static void build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - int i, nb_nodes, rc_mapping_count, iort_start = table_data->len; + int i, nb_nodes, rc_mapping_count; + const uint32_t iort_node_offset = IORT_NODE_OFFSET; + size_t node_size, smmu_offset = 0; AcpiIortIdMapping *idmap; - AcpiIortItsGroup *its; - AcpiIortTable *iort; - AcpiIortSmmu3 *smmu; - size_t node_size, iort_node_offset, iort_length, smmu_offset = 0; - AcpiIortRC *rc; + uint32_t id = 0; GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); - iort = acpi_data_push(table_data, sizeof(*iort)); + AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + /* Table 2 The IORT */ + acpi_table_begin(&table, table_data); if (vms->iommu == VIRT_IOMMU_SMMUV3) { AcpiIortIdMapping next_range = {0}; @@ -324,186 +355,202 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) nb_nodes = 2; /* RC, ITS */ rc_mapping_count = 1; } + /* Number of IORT Nodes */ + build_append_int_noprefix(table_data, nb_nodes, 4); - iort_length = sizeof(*iort); - iort->node_count = cpu_to_le32(nb_nodes); - /* - * Use a copy in case table_data->data moves during acpi_data_push - * operations. - */ - iort_node_offset = sizeof(*iort); - iort->node_offset = cpu_to_le32(iort_node_offset); + /* Offset to Array of IORT Nodes */ + build_append_int_noprefix(table_data, IORT_NODE_OFFSET, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - /* ITS group node */ - node_size = sizeof(*its) + sizeof(uint32_t); - iort_length += node_size; - its = acpi_data_push(table_data, node_size); - - its->type = ACPI_IORT_NODE_ITS_GROUP; - its->length = cpu_to_le16(node_size); - its->its_count = cpu_to_le32(1); - its->identifiers[0] = 0; /* MADT translation_id */ + /* Table 12 ITS Group Format */ + build_append_int_noprefix(table_data, 0 /* ITS Group */, 1); /* Type */ + node_size = 20 /* fixed header size */ + 4 /* 1 GIC ITS Identifier */; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 0, 4); /* Number of ID mappings */ + build_append_int_noprefix(table_data, 0, 4); /* Reference to ID Array */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ITSs */ + /* GIC ITS Identifier Array */ + build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); if (vms->iommu == VIRT_IOMMU_SMMUV3) { int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; - /* SMMUv3 node */ - smmu_offset = iort_node_offset + node_size; - node_size = sizeof(*smmu) + sizeof(*idmap); - iort_length += node_size; - smmu = acpi_data_push(table_data, node_size); + smmu_offset = table_data->len - table.table_offset; + /* Table 9 SMMUv3 Format */ + build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */ + node_size = SMMU_V3_ENTRY_SIZE + ID_MAPPING_ENTRY_SIZE; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 4, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ + /* Reference to ID Array */ + build_append_int_noprefix(table_data, SMMU_V3_ENTRY_SIZE, 4); + /* Base address */ + build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8); + /* Flags */ + build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* VATOS address */ + /* Model */ + build_append_int_noprefix(table_data, 0 /* Generic SMMU-v3 */, 4); + build_append_int_noprefix(table_data, irq, 4); /* Event */ + build_append_int_noprefix(table_data, irq + 1, 4); /* PRI */ + build_append_int_noprefix(table_data, irq + 3, 4); /* GERR */ + build_append_int_noprefix(table_data, irq + 2, 4); /* Sync */ + build_append_int_noprefix(table_data, 0, 4); /* Proximity domain */ + /* DeviceID mapping index (ignored since interrupts are GSIV based) */ + build_append_int_noprefix(table_data, 0, 4); - smmu->type = ACPI_IORT_NODE_SMMU_V3; - smmu->length = cpu_to_le16(node_size); - smmu->mapping_count = cpu_to_le32(1); - smmu->mapping_offset = cpu_to_le32(sizeof(*smmu)); - smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base); - smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE); - smmu->event_gsiv = cpu_to_le32(irq); - smmu->pri_gsiv = cpu_to_le32(irq + 1); - smmu->sync_gsiv = cpu_to_le32(irq + 2); - smmu->gerr_gsiv = cpu_to_le32(irq + 3); - - /* Identity RID mapping covering the whole input RID range */ - idmap = &smmu->id_mapping_array[0]; - idmap->input_base = 0; - idmap->id_count = cpu_to_le32(0xFFFF); - idmap->output_base = 0; /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); } - /* Root Complex Node */ - node_size = sizeof(*rc) + sizeof(*idmap) * rc_mapping_count; - iort_length += node_size; - rc = acpi_data_push(table_data, node_size); + /* Table 17 Root Complex Node */ + build_append_int_noprefix(table_data, 2 /* Root complex */, 1); /* Type */ + node_size = ROOT_COMPLEX_ENTRY_SIZE + + ID_MAPPING_ENTRY_SIZE * rc_mapping_count; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 3, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + /* Number of ID mappings */ + build_append_int_noprefix(table_data, rc_mapping_count, 4); + /* Reference to ID Array */ + build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 4); - rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX; - rc->length = cpu_to_le16(node_size); - rc->mapping_count = cpu_to_le32(rc_mapping_count); - rc->mapping_offset = cpu_to_le32(sizeof(*rc)); + /* Table 14 Memory access properties */ + /* CCA: Cache Coherent Attribute */ + build_append_int_noprefix(table_data, 1 /* fully coherent */, 4); + build_append_int_noprefix(table_data, 0, 1); /* AH: Note Allocation Hints */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Table 15 Memory Access Flags */ + build_append_int_noprefix(table_data, 0x3 /* CCA = CPM = DACS = 1 */, 1); - /* fully coherent device */ - rc->memory_properties.cache_coherency = cpu_to_le32(1); - rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */ - rc->pci_segment_number = 0; /* MCFG pci_segment */ + build_append_int_noprefix(table_data, 0, 4); /* ATS Attribute */ + /* MCFG pci_segment */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Segment number */ + /* Memory address size limit */ + build_append_int_noprefix(table_data, 64, 1); + + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + + /* Output Reference */ if (vms->iommu == VIRT_IOMMU_SMMUV3) { AcpiIortIdMapping *range; /* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */ for (i = 0; i < smmu_idmaps->len; i++) { - idmap = &rc->id_mapping_array[i]; range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); - - idmap->input_base = cpu_to_le32(range->input_base); - idmap->id_count = cpu_to_le32(range->id_count); - idmap->output_base = cpu_to_le32(range->input_base); /* output IORT node is the smmuv3 node */ - idmap->output_reference = cpu_to_le32(smmu_offset); + build_iort_id_mapping(table_data, range->input_base, + range->id_count, smmu_offset); } /* bypassed RIDs connect to ITS group node directly: RC -> ITS */ for (i = 0; i < its_idmaps->len; i++) { - idmap = &rc->id_mapping_array[smmu_idmaps->len + i]; range = &g_array_index(its_idmaps, AcpiIortIdMapping, i); - - idmap->input_base = cpu_to_le32(range->input_base); - idmap->id_count = cpu_to_le32(range->id_count); - idmap->output_base = cpu_to_le32(range->input_base); /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, range->input_base, + range->id_count, iort_node_offset); } } else { - /* Identity RID mapping covering the whole input RID range */ - idmap = &rc->id_mapping_array[0]; - idmap->input_base = cpu_to_le32(0); - idmap->id_count = cpu_to_le32(0xFFFF); - idmap->output_base = cpu_to_le32(0); /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); } + acpi_table_end(linker, &table); g_array_free(smmu_idmaps, true); g_array_free(its_idmaps, true); - - /* - * Update the pointer address in case table_data->data moves during above - * acpi_data_push operations. - */ - iort = (AcpiIortTable *)(table_data->data + iort_start); - iort->length = cpu_to_le32(iort_length); - - build_header(linker, table_data, (void *)(table_data->data + iort_start), - "IORT", table_data->len - iort_start, 0, vms->oem_id, - vms->oem_table_id); } +/* + * Serial Port Console Redirection Table (SPCR) + * Rev: 1.07 + */ static void build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - AcpiSerialPortConsoleRedirection *spcr; - const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART]; - int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE; - int spcr_start = table_data->len; + AcpiTable table = { .sig = "SPCR", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - spcr = acpi_data_push(table_data, sizeof(*spcr)); + acpi_table_begin(&table, table_data); - spcr->interface_type = 0x3; /* ARM PL011 UART */ + /* Interface Type */ + build_append_int_noprefix(table_data, 3, 1); /* ARM PL011 UART */ + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + /* Base Address */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + /* Interrupt Type */ + build_append_int_noprefix(table_data, + (1 << 3) /* Bit[3] ARMH GIC interrupt */, 1); + build_append_int_noprefix(table_data, 0, 1); /* IRQ */ + /* Global System Interrupt */ + build_append_int_noprefix(table_data, + vms->irqmap[VIRT_UART] + ARM_SPI_BASE, 4); + build_append_int_noprefix(table_data, 3 /* 9600 */, 1); /* Baud Rate */ + build_append_int_noprefix(table_data, 0 /* No Parity */, 1); /* Parity */ + /* Stop Bits */ + build_append_int_noprefix(table_data, 1 /* 1 Stop bit */, 1); + /* Flow Control */ + build_append_int_noprefix(table_data, + (1 << 1) /* RTS/CTS hardware flow control */, 1); + /* Terminal Type */ + build_append_int_noprefix(table_data, 0 /* VT100 */, 1); + build_append_int_noprefix(table_data, 0, 1); /* Language */ + /* PCI Device ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + /* PCI Vendor ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + build_append_int_noprefix(table_data, 0, 1); /* PCI Bus Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Device Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Function Number */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Flags */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - spcr->base_address.space_id = AML_SYSTEM_MEMORY; - spcr->base_address.bit_width = 8; - spcr->base_address.bit_offset = 0; - spcr->base_address.access_width = 1; - spcr->base_address.address = cpu_to_le64(uart_memmap->base); - - spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */ - spcr->gsi = cpu_to_le32(irq); /* Global System Interrupt */ - - spcr->baud = 3; /* Baud Rate: 3 = 9600 */ - spcr->parity = 0; /* No Parity */ - spcr->stopbits = 1; /* 1 Stop bit */ - spcr->flowctrl = (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */ - spcr->term_type = 0; /* Terminal Type: 0 = VT100 */ - - spcr->pci_device_id = 0xffff; /* PCI Device ID: not a PCI device */ - spcr->pci_vendor_id = 0xffff; /* PCI Vendor ID: not a PCI device */ - - build_header(linker, table_data, (void *)(table_data->data + spcr_start), - "SPCR", table_data->len - spcr_start, 2, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); } +/* + * ACPI spec, Revision 5.1 + * 5.2.16 System Resource Affinity Table (SRAT) + */ static void build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - AcpiSystemResourceAffinityTable *srat; - AcpiSratProcessorGiccAffinity *core; - AcpiSratMemoryAffinity *numamem; - int i, srat_start; + int i; uint64_t mem_base; MachineClass *mc = MACHINE_GET_CLASS(vms); MachineState *ms = MACHINE(vms); const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms); + AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - srat_start = table_data->len; - srat = acpi_data_push(table_data, sizeof(*srat)); - srat->reserved1 = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ for (i = 0; i < cpu_list->len; ++i) { - core = acpi_data_push(table_data, sizeof(*core)); - core->type = ACPI_SRAT_PROCESSOR_GICC; - core->length = sizeof(*core); - core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id); - core->acpi_processor_uid = cpu_to_le32(i); - core->flags = cpu_to_le32(1); + uint32_t nodeid = cpu_list->cpus[i].props.node_id; + /* + * 5.2.16.4 GICC Affinity Structure + */ + build_append_int_noprefix(table_data, 3, 1); /* Type */ + build_append_int_noprefix(table_data, 18, 1); /* Length */ + build_append_int_noprefix(table_data, nodeid, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags, Table 5-76 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ } mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < ms->numa_state->num_nodes; ++i) { if (ms->numa_state->nodes[i].node_mem > 0) { - numamem = acpi_data_push(table_data, sizeof(*numamem)); - build_srat_memory(numamem, mem_base, + build_srat_memory(table_data, mem_base, ms->numa_state->nodes[i].node_mem, i, MEM_AFFINITY_ENABLED); mem_base += ms->numa_state->nodes[i].node_mem; @@ -515,152 +562,258 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } if (ms->device_memory) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, ms->device_memory->base, + build_srat_memory(table_data, ms->device_memory->base, memory_region_size(&ms->device_memory->mr), ms->numa_state->num_nodes - 1, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } - build_header(linker, table_data, (void *)(table_data->data + srat_start), - "SRAT", table_data->len - srat_start, 3, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); } -/* GTDT */ +/* + * ACPI spec, Revision 5.1 + * 5.2.24 Generic Timer Description Table (GTDT) + */ static void build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); - int gtdt_start = table_data->len; - AcpiGenericTimerTable *gtdt; - uint32_t irqflags; + /* + * Table 5-117 Flag Definitions + * set only "Timer interrupt Mode" and assume "Timer Interrupt + * polarity" bit as '0: Interrupt is Active high' + */ + uint32_t irqflags = vmc->claim_edge_triggered_timers ? + 1 : /* Interrupt is Edge triggered */ + 0; /* Interrupt is Level triggered */ + AcpiTable table = { .sig = "GTDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - if (vmc->claim_edge_triggered_timers) { - irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE; - } else { - irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL; - } + acpi_table_begin(&table, table_data); - gtdt = acpi_data_push(table_data, sizeof *gtdt); - /* The interrupt values are the same with the device tree when adding 16 */ - gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16); - gtdt->secure_el1_flags = cpu_to_le32(irqflags); + /* CntControlBase Physical Address */ + build_append_int_noprefix(table_data, 0xFFFFFFFFFFFFFFFF, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + /* + * FIXME: clarify comment: + * The interrupt values are the same with the device tree when adding 16 + */ + /* Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ + 16, 4); + /* Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ + 16, 4); + /* Non-Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags | + 1UL << 2, /* Always-on Capability */ + 4); + /* Virtual timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ + 16, 4); + /* Virtual Timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL2 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ + 16, 4); + /* Non-Secure EL2 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* CntReadBase Physical address */ + build_append_int_noprefix(table_data, 0xFFFFFFFFFFFFFFFF, 8); + /* Platform Timer Count */ + build_append_int_noprefix(table_data, 0, 4); + /* Platform Timer Offset */ + build_append_int_noprefix(table_data, 0, 4); - gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16); - gtdt->non_secure_el1_flags = cpu_to_le32(irqflags | - ACPI_GTDT_CAP_ALWAYS_ON); - - gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16); - gtdt->virtual_timer_flags = cpu_to_le32(irqflags); - - gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16); - gtdt->non_secure_el2_flags = cpu_to_le32(irqflags); - - build_header(linker, table_data, - (void *)(table_data->data + gtdt_start), "GTDT", - table_data->len - gtdt_start, 2, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); +} + +/* Debug Port Table 2 (DBG2) */ +static void +build_dbg2(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + AcpiTable table = { .sig = "DBG2", .rev = 0, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + int dbg2devicelength; + const char name[] = "COM0"; + const int namespace_length = sizeof(name); + + acpi_table_begin(&table, table_data); + + dbg2devicelength = 22 + /* BaseAddressRegister[] offset */ + 12 + /* BaseAddressRegister[] */ + 4 + /* AddressSize[] */ + namespace_length /* NamespaceString[] */; + + /* OffsetDbgDeviceInfo */ + build_append_int_noprefix(table_data, 44, 4); + /* NumberDbgDeviceInfo */ + build_append_int_noprefix(table_data, 1, 4); + + /* Table 2. Debug Device Information structure format */ + build_append_int_noprefix(table_data, 0, 1); /* Revision */ + build_append_int_noprefix(table_data, dbg2devicelength, 2); /* Length */ + /* NumberofGenericAddressRegisters */ + build_append_int_noprefix(table_data, 1, 1); + /* NameSpaceStringLength */ + build_append_int_noprefix(table_data, namespace_length, 2); + build_append_int_noprefix(table_data, 38, 2); /* NameSpaceStringOffset */ + build_append_int_noprefix(table_data, 0, 2); /* OemDataLength */ + /* OemDataOffset (0 means no OEM data) */ + build_append_int_noprefix(table_data, 0, 2); + + /* Port Type */ + build_append_int_noprefix(table_data, 0x8000 /* Serial */, 2); + /* Port Subtype */ + build_append_int_noprefix(table_data, 0x3 /* ARM PL011 UART */, 2); + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* BaseAddressRegisterOffset */ + build_append_int_noprefix(table_data, 22, 2); + /* AddressSizeOffset */ + build_append_int_noprefix(table_data, 34, 2); + + /* BaseAddressRegister[] */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + + /* AddressSize[] */ + build_append_int_noprefix(table_data, + vms->memmap[VIRT_UART].size, 4); + + /* NamespaceString[] */ + g_array_append_vals(table_data, name, namespace_length); + + acpi_table_end(linker, &table); +}; + +/* + * ACPI spec, Revision 6.0 Errata A + * 5.2.12 Multiple APIC Description Table (MADT) + */ +static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size) +{ + build_append_int_noprefix(table_data, 0xE, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Discovery Range Base Addres */ + build_append_int_noprefix(table_data, base, 8); + build_append_int_noprefix(table_data, size, 4); /* Discovery Range Length */ } -/* MADT */ static void build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); - int madt_start = table_data->len; - const MemMapEntry *memmap = vms->memmap; - const int *irqmap = vms->irqmap; - AcpiMadtGenericDistributor *gicd; - AcpiMadtGenericMsiFrame *gic_msi; int i; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + const MemMapEntry *memmap = vms->memmap; + AcpiTable table = { .sig = "APIC", .rev = 4, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - acpi_data_push(table_data, sizeof(AcpiMultipleApicTable)); + acpi_table_begin(&table, table_data); + /* Local Interrupt Controller Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); /* Flags */ - gicd = acpi_data_push(table_data, sizeof *gicd); - gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR; - gicd->length = sizeof(*gicd); - gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base); - gicd->version = vms->gic_version; + /* 5.2.12.15 GIC Distributor Structure */ + build_append_int_noprefix(table_data, 0xC, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_DIST].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* System Vector Base */ + /* GIC version */ + build_append_int_noprefix(table_data, vms->gic_version, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ for (i = 0; i < MACHINE(vms)->smp.cpus; i++) { - AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data, - sizeof(*gicc)); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); + uint64_t physical_base_address = 0, gich = 0, gicv = 0; + uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0; + uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ? + PPI(VIRTUAL_PMU_IRQ) : 0; - gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE; - gicc->length = sizeof(*gicc); - if (vms->gic_version == 2) { - gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base); - gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base); - gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base); + if (vms->gic_version == VIRT_GIC_VERSION_2) { + physical_base_address = memmap[VIRT_GIC_CPU].base; + gicv = memmap[VIRT_GIC_VCPU].base; + gich = memmap[VIRT_GIC_HYP].base; } - gicc->cpu_interface_number = cpu_to_le32(i); - gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity); - gicc->uid = cpu_to_le32(i); - gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED); - if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { - gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ)); - } - if (vms->virt) { - gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ)); - } + /* 5.2.12.14 GIC Structure */ + build_append_int_noprefix(table_data, 0xB, 1); /* Type */ + build_append_int_noprefix(table_data, 80, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, i, 4); /* GIC ID */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags */ + build_append_int_noprefix(table_data, 1, 4); /* Enabled */ + /* Parking Protocol Version */ + build_append_int_noprefix(table_data, 0, 4); + /* Performance Interrupt GSIV */ + build_append_int_noprefix(table_data, pmu_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* Parked Address */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, physical_base_address, 8); + build_append_int_noprefix(table_data, gicv, 8); /* GICV */ + build_append_int_noprefix(table_data, gich, 8); /* GICH */ + /* VGIC Maintenance interrupt */ + build_append_int_noprefix(table_data, vgic_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ + /* MPIDR */ + build_append_int_noprefix(table_data, armcpu->mp_affinity, 8); + /* Processor Power Efficiency Class */ + build_append_int_noprefix(table_data, 0, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 3); } - if (vms->gic_version == 3) { - AcpiMadtGenericTranslator *gic_its; - int nb_redist_regions = virt_gicv3_redist_region_count(vms); - AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data, - sizeof *gicr); - - gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR; - gicr->length = sizeof(*gicr); - gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base); - gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size); - - if (nb_redist_regions == 2) { - gicr = acpi_data_push(table_data, sizeof(*gicr)); - gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR; - gicr->length = sizeof(*gicr); - gicr->base_address = - cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base); - gicr->range_length = - cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size); + if (vms->gic_version != VIRT_GIC_VERSION_2) { + build_append_gicr(table_data, memmap[VIRT_GIC_REDIST].base, + memmap[VIRT_GIC_REDIST].size); + if (virt_gicv3_redist_region_count(vms) == 2) { + build_append_gicr(table_data, memmap[VIRT_HIGH_GIC_REDIST2].base, + memmap[VIRT_HIGH_GIC_REDIST2].size); } if (its_class_name() && !vmc->no_its) { - gic_its = acpi_data_push(table_data, sizeof *gic_its); - gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR; - gic_its->length = sizeof(*gic_its); - gic_its->translation_id = 0; - gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base); + /* + * ACPI spec, Revision 6.0 Errata A + * (original 6.0 definition has invalid Length) + * 5.2.12.18 GIC ITS Structure + */ + build_append_int_noprefix(table_data, 0xF, 1); /* Type */ + build_append_int_noprefix(table_data, 20, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ITS ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_ITS].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } } else { - gic_msi = acpi_data_push(table_data, sizeof *gic_msi); - gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME; - gic_msi->length = sizeof(*gic_msi); - gic_msi->gic_msi_frame_id = 0; - gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base); - gic_msi->flags = cpu_to_le32(1); - gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS); - gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE); - } + const uint16_t spi_base = vms->irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE; - build_header(linker, table_data, - (void *)(table_data->data + madt_start), "APIC", - table_data->len - madt_start, 3, vms->oem_id, - vms->oem_table_id); + /* 5.2.12.16 GIC MSI Frame Structure */ + build_append_int_noprefix(table_data, 0xD, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC MSI Frame ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_V2M].base, 8); + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + /* SPI Count */ + build_append_int_noprefix(table_data, NUM_GICV2M_SPIS, 2); + build_append_int_noprefix(table_data, spi_base, 2); /* SPI Base */ + } + acpi_table_end(linker, &table); } /* FADT */ -static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker, +static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms, unsigned dsdt_tbl_offset) { - /* ACPI v5.1 */ + /* ACPI v6.0 */ AcpiFadtData fadt = { - .rev = 5, - .minor_ver = 1, + .rev = 6, + .minor_ver = 0, .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, .xdsdt_tbl_offset = &dsdt_tbl_offset, }; @@ -692,10 +845,11 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) MachineState *ms = MACHINE(vms); const MemMapEntry *memmap = vms->memmap; const int *irqmap = vms->irqmap; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. * While UEFI can use libfdt to disable the RTC device node in the DTB that @@ -712,8 +866,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO], (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS); - acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE), - vms->highmem, vms->highmem_ecam, vms); + acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms); if (vms->acpi_dev) { build_ged_aml(scope, "\\_SB."GED_DEVICE, HOTPLUG_HANDLER(vms->acpi_dev), @@ -742,12 +895,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) aml_append(dsdt, scope); - /* copy AML table into ACPI tables blob and patch header there */ + /* copy AML table into ACPI tables blob */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 2, vms->oem_id, - vms->oem_table_id); + + acpi_table_end(linker, &table); free_aml_allocator(); } @@ -790,13 +941,19 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) dsdt = tables_blob->len; build_dsdt(tables_blob, tables->linker, vms); - /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */ + /* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */ acpi_add_table(table_offsets, tables_blob); - build_fadt_rev5(tables_blob, tables->linker, vms, dsdt); + build_fadt_rev6(tables_blob, tables->linker, vms, dsdt); acpi_add_table(table_offsets, tables_blob); build_madt(tables_blob, tables->linker, vms); + if (!vmc->no_cpu_topology) { + acpi_add_table(table_offsets, tables_blob); + build_pptt(tables_blob, tables->linker, ms, + vms->oem_id, vms->oem_table_id); + } + acpi_add_table(table_offsets, tables_blob); build_gtdt(tables_blob, tables->linker, vms); @@ -813,6 +970,9 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) acpi_add_table(table_offsets, tables_blob); build_spcr(tables_blob, tables->linker, vms); + acpi_add_table(table_offsets, tables_blob); + build_dbg2(tables_blob, tables->linker, vms); + if (vms->ras) { build_ghes_error_table(tables->hardware_errors, tables->linker); acpi_add_table(table_offsets, tables_blob); @@ -828,6 +988,12 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) build_slit(tables_blob, tables->linker, ms, vms->oem_id, vms->oem_table_id); } + + if (ms->numa_state->hmat_enabled) { + acpi_add_table(table_offsets, tables_blob); + build_hmat(tables_blob, tables->linker, ms->numa_state, + vms->oem_id, vms->oem_table_id); + } } if (ms->nvdimms_state->is_enabled) { @@ -849,6 +1015,12 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) } #endif + if (vms->iommu == VIRT_IOMMU_VIRTIO) { + acpi_add_table(table_offsets, tables_blob); + build_viot(ms, tables_blob, tables->linker, vms->virtio_iommu_bdf, + vms->oem_id, vms->oem_table_id); + } + /* XSDT is pointed to by RSDP */ xsdt = tables_blob->len; build_xsdt(tables_blob, tables->linker, table_offsets, vms->oem_id, diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 81eda46b0b..b871350856 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -29,7 +29,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/option.h" @@ -49,6 +48,7 @@ #include "sysemu/runstate.h" #include "sysemu/tpm.h" #include "sysemu/kvm.h" +#include "sysemu/hvf.h" #include "hw/loader.h" #include "qapi/error.h" #include "qemu/bitops.h" @@ -56,7 +56,7 @@ #include "qemu/module.h" #include "hw/pci-host/gpex.h" #include "hw/virtio/virtio-pci.h" -#include "hw/arm/sysbus-fdt.h" +#include "hw/core/sysbus-fdt.h" #include "hw/platform-bus.h" #include "hw/qdev-properties.h" #include "hw/arm/fdt.h" @@ -71,9 +71,11 @@ #include "hw/arm/smmuv3.h" #include "hw/acpi/acpi.h" #include "target/arm/internals.h" +#include "hw/mem/memory-device.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" #include "hw/acpi/generic_event_device.h" +#include "hw/virtio/virtio-mem-pci.h" #include "hw/virtio/virtio-iommu.h" #include "hw/char/pl011.h" #include "qemu/guest-random.h" @@ -197,9 +199,13 @@ static const int a15irqmap[] = { static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a7"), ARM_CPU_TYPE_NAME("cortex-a15"), + ARM_CPU_TYPE_NAME("cortex-a35"), ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("cortex-a76"), + ARM_CPU_TYPE_NAME("a64fx"), + ARM_CPU_TYPE_NAME("neoverse-n1"), ARM_CPU_TYPE_NAME("host"), ARM_CPU_TYPE_NAME("max"), }; @@ -216,14 +222,18 @@ static bool cpu_type_valid(const char *cpu) return false; } -static void create_kaslr_seed(MachineState *ms, const char *node) +static void create_randomness(MachineState *ms, const char *node) { - uint64_t seed; + struct { + uint64_t kaslr; + uint8_t rng[32]; + } seed; if (qemu_guest_getrandom(&seed, sizeof(seed), NULL)) { return; } - qemu_fdt_setprop_u64(ms->fdt, node, "kaslr-seed", seed); + qemu_fdt_setprop_u64(ms->fdt, node, "kaslr-seed", seed.kaslr); + qemu_fdt_setprop(ms->fdt, node, "rng-seed", seed.rng, sizeof(seed.rng)); } static void create_fdt(VirtMachineState *vms) @@ -243,14 +253,19 @@ static void create_fdt(VirtMachineState *vms) qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,dummy-virt"); qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_string(fdt, "/", "model", "linux,dummy-virt"); /* /chosen must exist for load_dtb to fill in necessary properties later */ qemu_fdt_add_subnode(fdt, "/chosen"); - create_kaslr_seed(ms, "/chosen"); + if (vms->dtb_randomness) { + create_randomness(ms, "/chosen"); + } if (vms->secure) { qemu_fdt_add_subnode(fdt, "/secure-chosen"); - create_kaslr_seed(ms, "/secure-chosen"); + if (vms->dtb_randomness) { + create_randomness(ms, "/secure-chosen"); + } } /* Clock node, for the benefit of the UART. The kernel device tree @@ -350,20 +365,21 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) int cpu; int addr_cells = 1; const MachineState *ms = MACHINE(vms); + const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); int smp_cpus = ms->smp.cpus; /* - * From Documentation/devicetree/bindings/arm/cpus.txt - * On ARM v8 64-bit systems value should be set to 2, - * that corresponds to the MPIDR_EL1 register size. - * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs - * in the system, #address-cells can be set to 1, since - * MPIDR_EL1[63:32] bits are not used for CPUs - * identification. + * See Linux Documentation/devicetree/bindings/arm/cpus.yaml + * On ARM v8 64-bit systems value should be set to 2, + * that corresponds to the MPIDR_EL1 register size. + * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs + * in the system, #address-cells can be set to 1, since + * MPIDR_EL1[63:32] bits are not used for CPUs + * identification. * - * Here we actually don't know whether our system is 32- or 64-bit one. - * The simplest way to go is to examine affinity IDs of all our CPUs. If - * at least one of them has Aff3 populated, we set #address-cells to 2. + * Here we actually don't know whether our system is 32- or 64-bit one. + * The simplest way to go is to examine affinity IDs of all our CPUs. If + * at least one of them has Aff3 populated, we set #address-cells to 2. */ for (cpu = 0; cpu < smp_cpus; cpu++) { ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); @@ -406,8 +422,58 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) ms->possible_cpus->cpus[cs->cpu_index].props.node_id); } + if (!vmc->no_cpu_topology) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + } + g_free(nodename); } + + if (!vmc->no_cpu_topology) { + /* + * Add vCPU topology description through fdt node cpu-map. + * + * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt + * In a SMP system, the hierarchy of CPUs can be defined through + * four entities that are used to describe the layout of CPUs in + * the system: socket/cluster/core/thread. + * + * A socket node represents the boundary of system physical package + * and its child nodes must be one or more cluster nodes. A system + * can contain several layers of clustering within a single physical + * package and cluster nodes can be contained in parent cluster nodes. + * + * Note: currently we only support one layer of clustering within + * each physical package. + */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + + for (cpu = smp_cpus - 1; cpu >= 0; cpu--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d", + cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads), + (cpu / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters, + (cpu / ms->smp.threads) % ms->smp.cores, + cpu % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster%d/core%d", + cpu / (ms->smp.clusters * ms->smp.cores), + (cpu / ms->smp.cores) % ms->smp.clusters, + cpu % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } + } } static void fdt_add_its_gic_node(VirtMachineState *vms) @@ -422,6 +488,7 @@ static void fdt_add_its_gic_node(VirtMachineState *vms) qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "arm,gic-v3-its"); qemu_fdt_setprop(ms->fdt, nodename, "msi-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#msi-cells", 1); qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, vms->memmap[VIRT_GIC_ITS].base, 2, vms->memmap[VIRT_GIC_ITS].size); @@ -464,7 +531,7 @@ static void fdt_add_gic_node(VirtMachineState *vms) qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x2); qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2); qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0); - if (vms->gic_version == VIRT_GIC_VERSION_3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { int nb_redist_regions = virt_gicv3_redist_region_count(vms); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", @@ -583,6 +650,12 @@ static void create_its(VirtMachineState *vms) const char *itsclass = its_class_name(); DeviceState *dev; + if (!strcmp(itsclass, "arm-gicv3-its")) { + if (!vms->tcg_its) { + itsclass = NULL; + } + } + if (!itsclass) { /* Do nothing if not supported */ return; @@ -620,20 +693,38 @@ static void create_v2m(VirtMachineState *vms) vms->msi_controller = VIRT_MSI_CTRL_GICV2M; } -static void create_gic(VirtMachineState *vms) +static void create_gic(VirtMachineState *vms, MemoryRegion *mem) { MachineState *ms = MACHINE(vms); /* We create a standalone GIC */ SysBusDevice *gicbusdev; const char *gictype; - int type = vms->gic_version, i; + int i; unsigned int smp_cpus = ms->smp.cpus; uint32_t nb_redist_regions = 0; + int revision; - gictype = (type == 3) ? gicv3_class_name() : gic_class_name(); + if (vms->gic_version == VIRT_GIC_VERSION_2) { + gictype = gic_class_name(); + } else { + gictype = gicv3_class_name(); + } + switch (vms->gic_version) { + case VIRT_GIC_VERSION_2: + revision = 2; + break; + case VIRT_GIC_VERSION_3: + revision = 3; + break; + case VIRT_GIC_VERSION_4: + revision = 4; + break; + default: + g_assert_not_reached(); + } vms->gic = qdev_new(gictype); - qdev_prop_set_uint32(vms->gic, "revision", type); + qdev_prop_set_uint32(vms->gic, "revision", revision); qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); /* Note that the num-irq property counts both internal and external * interrupts; there are always 32 of the former (mandated by GIC spec). @@ -643,9 +734,8 @@ static void create_gic(VirtMachineState *vms) qdev_prop_set_bit(vms->gic, "has-security-extensions", vms->secure); } - if (type == 3) { - uint32_t redist0_capacity = - vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; + if (vms->gic_version != VIRT_GIC_VERSION_2) { + uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST); uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); nb_redist_regions = virt_gicv3_redist_region_count(vms); @@ -654,9 +744,17 @@ static void create_gic(VirtMachineState *vms) nb_redist_regions); qdev_prop_set_uint32(vms->gic, "redist-region-count[0]", redist0_count); + if (!kvm_irqchip_in_kernel()) { + if (vms->tcg_its) { + object_property_set_link(OBJECT(vms->gic), "sysmem", + OBJECT(mem), &error_fatal); + qdev_prop_set_bit(vms->gic, "has-lpi", true); + } + } + if (nb_redist_regions == 2) { uint32_t redist1_capacity = - vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; + virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); qdev_prop_set_uint32(vms->gic, "redist-region-count[1]", MIN(smp_cpus - redist0_count, redist1_capacity)); @@ -670,7 +768,7 @@ static void create_gic(VirtMachineState *vms) gicbusdev = SYS_BUS_DEVICE(vms->gic); sysbus_realize_and_unref(gicbusdev, &error_fatal); sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base); - if (type == 3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base); if (nb_redist_regions == 2) { sysbus_mmio_map(gicbusdev, 2, @@ -708,7 +806,7 @@ static void create_gic(VirtMachineState *vms) ppibase + timer_irq[irq])); } - if (type == 3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { qemu_irq irq = qdev_get_gpio_in(vms->gic, ppibase + ARCH_GIC_MAINT_IRQ); qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", @@ -734,9 +832,9 @@ static void create_gic(VirtMachineState *vms) fdt_add_gic_node(vms); - if (type == 3 && vms->its) { + if (vms->gic_version != VIRT_GIC_VERSION_2 && vms->its) { create_its(vms); - } else if (type == 2) { + } else if (vms->gic_version == VIRT_GIC_VERSION_2) { create_v2m(vms); } } @@ -834,8 +932,6 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev, qemu_fdt_add_subnode(fdt, "/gpio-keys"); qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys"); - qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#size-cells", 0); - qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#address-cells", 1); qemu_fdt_add_subnode(fdt, "/gpio-keys/poweroff"); qemu_fdt_setprop_string(fdt, "/gpio-keys/poweroff", @@ -1104,7 +1200,7 @@ static void virt_flash_fdt(VirtMachineState *vms, qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay"); g_free(nodename); - nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); + nodename = g_strdup_printf("/flash@%" PRIx64, flashbase + flashsize); qemu_fdt_add_subnode(ms->fdt, nodename); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", @@ -1265,8 +1361,6 @@ static void create_smmu(const VirtMachineState *vms, qemu_fdt_setprop(ms->fdt, node, "interrupt-names", irq_names, sizeof(irq_names)); - qemu_fdt_setprop_cell(ms->fdt, node, "clocks", vms->clock_phandle); - qemu_fdt_setprop_string(ms->fdt, node, "clock-names", "apb_pclk"); qemu_fdt_setprop(ms->fdt, node, "dma-coherent", NULL, 0); qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); @@ -1277,14 +1371,15 @@ static void create_smmu(const VirtMachineState *vms, static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) { - const char compat[] = "virtio,pci-iommu"; + const char compat[] = "virtio,pci-iommu\0pci1af4,1057"; uint16_t bdf = vms->virtio_iommu_bdf; MachineState *ms = MACHINE(vms); char *node; vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); - node = g_strdup_printf("%s/virtio_iommu@%d", vms->pciehb_nodename, bdf); + node = g_strdup_printf("%s/virtio_iommu@%x,%x", vms->pciehb_nodename, + PCI_SLOT(bdf), PCI_FUNC(bdf)); qemu_fdt_add_subnode(ms->fdt, node); qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", @@ -1347,7 +1442,7 @@ static void create_pcie(VirtMachineState *vms) mmio_reg, base_mmio, size_mmio); memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); - if (vms->highmem) { + if (vms->highmem_mmio) { /* Map high MMIO space */ MemoryRegion *high_mmio_alias = g_new0(MemoryRegion, 1); @@ -1394,14 +1489,14 @@ static void create_pcie(VirtMachineState *vms) qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); if (vms->msi_phandle) { - qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-parent", - vms->msi_phandle); + qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map", + 0, vms->msi_phandle, 0, 0x10000); } qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base_ecam, 2, size_ecam); - if (vms->highmem) { + if (vms->highmem_mmio) { qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", 1, FDT_PCI_RANGE_IOPORT, 2, 0, 2, base_pio, 2, size_pio, @@ -1444,7 +1539,7 @@ static void create_platform_bus(VirtMachineState *vms) MemoryRegion *sysmem = get_system_memory(); dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); - dev->id = TYPE_PLATFORM_BUS_DEVICE; + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); qdev_prop_set_uint32(dev, "num_irqs", PLATFORM_BUS_NUM_IRQS); qdev_prop_set_uint32(dev, "mmio_size", vms->memmap[VIRT_PLATFORM_BUS].size); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -1524,7 +1619,7 @@ static void virt_build_smbios(VirtMachineState *vms) smbios_set_defaults("QEMU", product, vmc->smbios_old_sys_ver ? "1.0" : mc->name, false, - true, SMBIOS_ENTRY_POINT_30); + true, SMBIOS_ENTRY_POINT_TYPE_64); smbios_get_tables(MACHINE(vms), NULL, 0, &smbios_tables, &smbios_tables_len, @@ -1586,19 +1681,19 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) * purposes are to make TCG consistent (with 64-bit KVM hosts) * and to improve SGI efficiency. */ - if (vms->gic_version == VIRT_GIC_VERSION_3) { - clustersz = GICV3_TARGETLIST_BITS; - } else { + if (vms->gic_version == VIRT_GIC_VERSION_2) { clustersz = GIC_TARGETLIST_BITS; + } else { + clustersz = GICV3_TARGETLIST_BITS; } } return arm_cpu_mp_affinity(idx, clustersz); } -static void virt_set_memmap(VirtMachineState *vms) +static void virt_set_memmap(VirtMachineState *vms, int pa_bits) { MachineState *ms = MACHINE(vms); - hwaddr base, device_memory_base, device_memory_size; + hwaddr base, device_memory_base, device_memory_size, memtop; int i; vms->memmap = extended_memmap; @@ -1613,6 +1708,14 @@ static void virt_set_memmap(VirtMachineState *vms) exit(EXIT_FAILURE); } + /* + * !highmem is exactly the same as limiting the PA space to 32bit, + * irrespective of the underlying capabilities of the HW. + */ + if (!vms->highmem) { + pa_bits = 32; + } + /* * We compute the base of the high IO region depending on the * amount of initial and device memory. The device memory start/size @@ -1625,7 +1728,12 @@ static void virt_set_memmap(VirtMachineState *vms) device_memory_size = ms->maxram_size - ms->ram_size + ms->ram_slots * GiB; /* Base address of the high IO region */ - base = device_memory_base + ROUND_UP(device_memory_size, GiB); + memtop = base = device_memory_base + ROUND_UP(device_memory_size, GiB); + if (memtop > BIT_ULL(pa_bits)) { + error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n", + pa_bits, memtop - BIT_ULL(pa_bits)); + exit(EXIT_FAILURE); + } if (base < device_memory_base) { error_report("maxmem/slots too huge"); exit(EXIT_FAILURE); @@ -1634,15 +1742,43 @@ static void virt_set_memmap(VirtMachineState *vms) base = vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES; } + /* We know for sure that at least the memory fits in the PA space */ + vms->highest_gpa = memtop - 1; + for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) { hwaddr size = extended_memmap[i].size; + bool fits; base = ROUND_UP(base, size); vms->memmap[i].base = base; vms->memmap[i].size = size; + + /* + * Check each device to see if they fit in the PA space, + * moving highest_gpa as we go. + * + * For each device that doesn't fit, disable it. + */ + fits = (base + size) <= BIT_ULL(pa_bits); + if (fits) { + vms->highest_gpa = base + size - 1; + } + + switch (i) { + case VIRT_HIGH_GIC_REDIST2: + vms->highmem_redists &= fits; + break; + case VIRT_HIGH_PCIE_ECAM: + vms->highmem_ecam &= fits; + break; + case VIRT_HIGH_PCIE_MMIO: + vms->highmem_mmio &= fits; + break; + } + base += size; } - vms->highest_gpa = base - 1; + if (device_memory_size > 0) { ms->device_memory = g_malloc0(sizeof(*ms->device_memory)); ms->device_memory->base = device_memory_base; @@ -1681,6 +1817,10 @@ static void finalize_gic_version(VirtMachineState *vms) error_report( "gic-version=3 is not supported with kernel-irqchip=off"); exit(1); + case VIRT_GIC_VERSION_4: + error_report( + "gic-version=4 is not supported with kernel-irqchip=off"); + exit(1); } } @@ -1718,6 +1858,9 @@ static void finalize_gic_version(VirtMachineState *vms) case VIRT_GIC_VERSION_2: case VIRT_GIC_VERSION_3: break; + case VIRT_GIC_VERSION_4: + error_report("gic-version=4 is not supported with KVM"); + exit(1); } /* Check chosen version is effectively supported by the host */ @@ -1739,11 +1882,27 @@ static void finalize_gic_version(VirtMachineState *vms) vms->gic_version = VIRT_GIC_VERSION_2; break; case VIRT_GIC_VERSION_MAX: - vms->gic_version = VIRT_GIC_VERSION_3; + if (module_object_class_by_name("arm-gicv3")) { + /* CONFIG_ARM_GICV3_TCG was set */ + if (vms->virt) { + /* GICv4 only makes sense if CPU has EL2 */ + vms->gic_version = VIRT_GIC_VERSION_4; + } else { + vms->gic_version = VIRT_GIC_VERSION_3; + } + } else { + vms->gic_version = VIRT_GIC_VERSION_2; + } break; case VIRT_GIC_VERSION_HOST: error_report("gic-version=host requires KVM"); exit(1); + case VIRT_GIC_VERSION_4: + if (!vms->virt) { + error_report("gic-version=4 requires virtualization enabled"); + exit(1); + } + break; case VIRT_GIC_VERSION_2: case VIRT_GIC_VERSION_3: break; @@ -1833,12 +1992,35 @@ static void machvirt_init(MachineState *machine) unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; + if (!cpu_type_valid(machine->cpu_type)) { + error_report("mach-virt: CPU type %s not supported", machine->cpu_type); + exit(1); + } + + possible_cpus = mc->possible_cpu_arch_ids(machine); + /* * In accelerated mode, the memory map is computed earlier in kvm_type() * to create a VM with the right number of IPA bits. */ if (!vms->memmap) { - virt_set_memmap(vms); + Object *cpuobj; + ARMCPU *armcpu; + int pa_bits; + + /* + * Instanciate a temporary CPU object to find out about what + * we are about to deal with. Once this is done, get rid of + * the object. + */ + cpuobj = object_new(possible_cpus->cpus[0].type); + armcpu = ARM_CPU(cpuobj); + + pa_bits = arm_pamax(armcpu); + + object_unref(cpuobj); + + virt_set_memmap(vms, pa_bits); } /* We can probe only here because during property set @@ -1846,17 +2028,7 @@ static void machvirt_init(MachineState *machine) */ finalize_gic_version(vms); - if (!cpu_type_valid(machine->cpu_type)) { - error_report("mach-virt: CPU type %s not supported", machine->cpu_type); - exit(1); - } - if (vms->secure) { - if (kvm_enabled()) { - error_report("mach-virt: KVM does not support Security extensions"); - exit(1); - } - /* * The Secure view of the world is the same as the NonSecure, * but with a few extra devices. Create it as a container region @@ -1890,16 +2062,16 @@ static void machvirt_init(MachineState *machine) vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; } - /* The maximum number of CPUs depends on the GIC version, or on how - * many redistributors we can fit into the memory map. + /* + * The maximum number of CPUs depends on the GIC version, or on how + * many redistributors we can fit into the memory map (which in turn + * depends on whether this is a GICv3 or v4). */ - if (vms->gic_version == VIRT_GIC_VERSION_3) { - virt_max_cpus = - vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; - virt_max_cpus += - vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; - } else { + if (vms->gic_version == VIRT_GIC_VERSION_2) { virt_max_cpus = GIC_NCPU; + } else { + virt_max_cpus = virt_redist_capacity(vms, VIRT_GIC_REDIST) + + virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); } if (max_cpus > virt_max_cpus) { @@ -1909,21 +2081,29 @@ static void machvirt_init(MachineState *machine) exit(1); } - if (vms->virt && kvm_enabled()) { - error_report("mach-virt: KVM does not support providing " - "Virtualization extensions to the guest CPU"); + if (vms->secure && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "Security extensions (TrustZone) to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); exit(1); } - if (vms->mte && kvm_enabled()) { - error_report("mach-virt: KVM does not support providing " - "MTE to the guest CPU"); + if (vms->virt && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "Virtualization extensions to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); + exit(1); + } + + if (vms->mte && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "MTE to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); exit(1); } create_fdt(vms); - possible_cpus = mc->possible_cpu_arch_ids(machine); assert(possible_cpus->len == max_cpus); for (n = 0; n < possible_cpus->len; n++) { Object *cpuobj; @@ -1953,17 +2133,6 @@ static void machvirt_init(MachineState *machine) object_property_set_bool(cpuobj, "has_el2", false, NULL); } - if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { - object_property_set_int(cpuobj, "psci-conduit", vms->psci_conduit, - NULL); - - /* Secondary CPUs start in PSCI powered-down state */ - if (n > 0) { - object_property_set_bool(cpuobj, "start-powered-off", true, - NULL); - } - } - if (vmc->kvm_no_adjvtime && object_property_find(cpuobj, "kvm-no-adjvtime")) { object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL); @@ -1978,6 +2147,10 @@ static void machvirt_init(MachineState *machine) object_property_set_bool(cpuobj, "pmu", false, NULL); } + if (vmc->no_tcg_lpa2 && object_property_find(cpuobj, "lpa2")) { + object_property_set_bool(cpuobj, "lpa2", false, NULL); + } + if (object_property_find(cpuobj, "reset-cbar")) { object_property_set_int(cpuobj, "reset-cbar", vms->memmap[VIRT_CPUPERIPHS].base, @@ -2043,7 +2216,7 @@ static void machvirt_init(MachineState *machine) virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem); - create_gic(vms); + create_gic(vms, sysmem); virt_cpu_post_init(vms, sysmem); @@ -2061,7 +2234,7 @@ static void machvirt_init(MachineState *machine) machine->ram_size, "mach-virt.tag"); } - vms->highmem_ecam &= vms->highmem && (!firmware_loaded || aarch64); + vms->highmem_ecam &= (!firmware_loaded || aarch64); create_rtc(vms); @@ -2105,12 +2278,12 @@ static void machvirt_init(MachineState *machine) } vms->bootinfo.ram_size = machine->ram_size; - vms->bootinfo.nb_cpus = smp_cpus; vms->bootinfo.board_id = -1; vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base; vms->bootinfo.get_dtb = machvirt_dtb; vms->bootinfo.skip_dtb_autoload = true; vms->bootinfo.firmware_loaded = firmware_loaded; + vms->bootinfo.psci_conduit = vms->psci_conduit; arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); vms->machine_done.notify = virt_machine_done; @@ -2173,6 +2346,20 @@ static void virt_set_its(Object *obj, bool value, Error **errp) vms->its = value; } +static bool virt_get_dtb_randomness(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->dtb_randomness; +} + +static void virt_set_dtb_randomness(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->dtb_randomness = value; +} + static char *virt_get_oem_id(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); @@ -2272,8 +2459,19 @@ static void virt_set_mte(Object *obj, bool value, Error **errp) static char *virt_get_gic_version(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - const char *val = vms->gic_version == VIRT_GIC_VERSION_3 ? "3" : "2"; + const char *val; + switch (vms->gic_version) { + case VIRT_GIC_VERSION_4: + val = "4"; + break; + case VIRT_GIC_VERSION_3: + val = "3"; + break; + default: + val = "2"; + break; + } return g_strdup(val); } @@ -2281,7 +2479,9 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - if (!strcmp(value, "3")) { + if (!strcmp(value, "4")) { + vms->gic_version = VIRT_GIC_VERSION_4; + } else if (!strcmp(value, "3")) { vms->gic_version = VIRT_GIC_VERSION_3; } else if (!strcmp(value, "2")) { vms->gic_version = VIRT_GIC_VERSION_2; @@ -2350,7 +2550,9 @@ virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index) static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx) { - return idx % ms->numa_state->num_nodes; + int64_t socket_id = ms->possible_cpus->cpus[idx].props.socket_id; + + return socket_id % ms->numa_state->num_nodes; } static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) @@ -2358,6 +2560,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) int n; unsigned int max_cpus = ms->smp.max_cpus; VirtMachineState *vms = VIRT_MACHINE(ms); + MachineClass *mc = MACHINE_GET_CLASS(vms); if (ms->possible_cpus) { assert(ms->possible_cpus->len == max_cpus); @@ -2371,8 +2574,20 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) ms->possible_cpus->cpus[n].type = ms->cpu_type; ms->possible_cpus->cpus[n].arch_id = virt_cpu_mp_affinity(vms, n); + + assert(!mc->smp_props.dies_supported); + ms->possible_cpus->cpus[n].props.has_socket_id = true; + ms->possible_cpus->cpus[n].props.socket_id = + n / (ms->smp.clusters * ms->smp.cores * ms->smp.threads); + ms->possible_cpus->cpus[n].props.has_cluster_id = true; + ms->possible_cpus->cpus[n].props.cluster_id = + (n / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters; + ms->possible_cpus->cpus[n].props.has_core_id = true; + ms->possible_cpus->cpus[n].props.core_id = + (n / ms->smp.threads) % ms->smp.cores; ms->possible_cpus->cpus[n].props.has_thread_id = true; - ms->possible_cpus->cpus[n].props.thread_id = n; + ms->possible_cpus->cpus[n].props.thread_id = + n % ms->smp.threads; } return ms->possible_cpus; } @@ -2420,6 +2635,64 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev, dev, &error_abort); } +static void virt_virtio_md_pci_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + if (!hotplug_dev2 && dev->hotplugged) { + /* + * Without a bus hotplug handler, we cannot control the plug/unplug + * order. We should never reach this point when hotplugging on ARM. + * However, it's nice to add a safety net, similar to what we have + * on x86. + */ + error_setg(errp, "hotplug of virtio based memory devices not supported" + " on this bus."); + return; + } + /* + * First, see if we can plug this memory device at all. If that + * succeeds, branch of to the actual hotplug handler. + */ + memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL, + &local_err); + if (!local_err && hotplug_dev2) { + hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err); + } + error_propagate(errp, local_err); +} + +static void virt_virtio_md_pci_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + /* + * Plug the memory device first and then branch off to the actual + * hotplug handler. If that one fails, we can easily undo the memory + * device bits. + */ + memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + if (hotplug_dev2) { + hotplug_handler_plug(hotplug_dev2, dev, &local_err); + if (local_err) { + memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + } + } + error_propagate(errp, local_err); +} + +static void virt_virtio_md_pci_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + /* We don't support hot unplug of virtio based memory devices */ + error_setg(errp, "virtio based memory devices cannot be unplugged."); +} + + static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -2427,10 +2700,17 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { virt_memory_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + virt_virtio_md_pci_pre_plug(hotplug_dev, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { hwaddr db_start = 0, db_end = 0; char *resv_prop_str; + if (vms->iommu != VIRT_IOMMU_NONE) { + error_setg(errp, "virt machine does not support multiple IOMMUs"); + return; + } + switch (vms->msi_controller) { case VIRT_MSI_CTRL_NONE: return; @@ -2450,8 +2730,9 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, db_start, db_end, VIRTIO_IOMMU_RESV_MEM_T_MSI); - qdev_prop_set_uint32(dev, "len-reserved-regions", 1); - qdev_prop_set_string(dev, "reserved-regions[0]", resv_prop_str); + object_property_set_uint(OBJECT(dev), "len-reserved-regions", 1, errp); + object_property_set_str(OBJECT(dev), "reserved-regions[0]", + resv_prop_str, errp); g_free(resv_prop_str); } } @@ -2472,6 +2753,11 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { virt_memory_plug(hotplug_dev, dev, errp); } + + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + virt_virtio_md_pci_plug(hotplug_dev, dev, errp); + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { PCIDevice *pdev = PCI_DEVICE(dev); @@ -2528,6 +2814,8 @@ static void virt_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev, { if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { virt_dimm_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + virt_virtio_md_pci_unplug_request(hotplug_dev, dev, errp); } else { error_setg(errp, "device unplug request for unsupported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -2551,16 +2839,11 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, MachineClass *mc = MACHINE_GET_CLASS(machine); if (device_is_dynamic_sysbus(mc, dev) || - (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM))) { + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { return HOTPLUG_HANDLER(machine); } - if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { - VirtMachineState *vms = VIRT_MACHINE(machine); - - if (!vms->bootinfo.firmware_loaded || !virt_is_acpi_enabled(vms)) { - return HOTPLUG_HANDLER(machine); - } - } return NULL; } @@ -2577,7 +2860,7 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa); /* we freeze the memory map to compute the highest gpa */ - virt_set_memmap(vms); + virt_set_memmap(vms, max_vm_pa_size); requested_pa_size = 64 - clz64(vms->highest_gpa); @@ -2638,6 +2921,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = virt_machine_device_unplug_request_cb; hc->unplug = virt_machine_device_unplug_cb; mc->nvdimm_supported = true; + mc->smp_props.clusters_supported = true; mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; mc->default_ram_id = "mach-virt.ram"; @@ -2670,17 +2954,17 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) virt_set_gic_version); object_class_property_set_description(oc, "gic-version", "Set GIC version. " - "Valid values are 2, 3, host and max"); + "Valid values are 2, 3, 4, host and max"); object_class_property_add_str(oc, "iommu", virt_get_iommu, virt_set_iommu); object_class_property_set_description(oc, "iommu", "Set the IOMMU type. " "Valid values are none and smmuv3"); - object_class_property_add_bool(oc, "default_bus_bypass_iommu", + object_class_property_add_bool(oc, "default-bus-bypass-iommu", virt_get_default_bus_bypass_iommu, virt_set_default_bus_bypass_iommu); - object_class_property_set_description(oc, "default_bus_bypass_iommu", + object_class_property_set_description(oc, "default-bus-bypass-iommu", "Set on/off to enable/disable " "bypass_iommu for default root bus"); @@ -2702,6 +2986,19 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) "Set on/off to enable/disable " "ITS instantiation"); + object_class_property_add_bool(oc, "dtb-randomness", + virt_get_dtb_randomness, + virt_set_dtb_randomness); + object_class_property_set_description(oc, "dtb-randomness", + "Set off to disable passing random or " + "non-deterministic dtb nodes to guest"); + + object_class_property_add_bool(oc, "dtb-kaslr-seed", + virt_get_dtb_randomness, + virt_set_dtb_randomness); + object_class_property_set_description(oc, "dtb-kaslr-seed", + "Deprecated synonym of dtb-randomness"); + object_class_property_add_str(oc, "x-oem-id", virt_get_oem_id, virt_set_oem_id); @@ -2740,12 +3037,20 @@ static void virt_instance_init(Object *obj) vms->gic_version = VIRT_GIC_VERSION_NOSEL; vms->highmem_ecam = !vmc->no_highmem_ecam; + vms->highmem_mmio = true; + vms->highmem_redists = true; if (vmc->no_its) { vms->its = false; } else { /* Default allows ITS instantiation */ vms->its = true; + + if (vmc->no_tcg_its) { + vms->tcg_its = false; + } else { + vms->tcg_its = true; + } } /* Default disallows iommu instantiation */ @@ -2760,6 +3065,9 @@ static void virt_instance_init(Object *obj) /* MTE is disabled by default. */ vms->mte = false; + /* Supply kaslr-seed and rng-seed by default */ + vms->dtb_randomness = true; + vms->irqmap = a15irqmap; virt_flash_create(vms); @@ -2788,10 +3096,48 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); -static void virt_machine_6_1_options(MachineClass *mc) +static void virt_machine_7_2_options(MachineClass *mc) { } -DEFINE_VIRT_MACHINE_AS_LATEST(6, 1) +DEFINE_VIRT_MACHINE_AS_LATEST(7, 2) + +static void virt_machine_7_1_options(MachineClass *mc) +{ + virt_machine_7_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); +} +DEFINE_VIRT_MACHINE(7, 1) + +static void virt_machine_7_0_options(MachineClass *mc) +{ + virt_machine_7_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len); +} +DEFINE_VIRT_MACHINE(7, 0) + +static void virt_machine_6_2_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_7_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len); + vmc->no_tcg_lpa2 = true; +} +DEFINE_VIRT_MACHINE(6, 2) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; + vmc->no_cpu_topology = true; + + /* qemu ITS was introduced with 6.2 */ + vmc->no_tcg_its = true; +} +DEFINE_VIRT_MACHINE(6, 1) static void virt_machine_6_0_options(MachineClass *mc) { diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index 245af81bbb..3190cc0b8d 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -125,9 +125,10 @@ static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq) sysbus_connect_irq(s, 0, irq); } -static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, - bool is_qspi) +static inline int zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, + bool is_qspi, int unit0) { + int unit = unit0; DeviceState *dev; SysBusDevice *busdev; SSIBus *spi; @@ -156,7 +157,7 @@ static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, spi = (SSIBus *)qdev_get_child_bus(dev, bus_name); for (j = 0; j < num_ss; ++j) { - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, unit++); flash_dev = qdev_new("n25q128"); if (dinfo) { qdev_prop_set_drive_err(flash_dev, "drive", @@ -170,6 +171,7 @@ static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, } } + return unit; } static void zynq_init(MachineState *machine) @@ -247,9 +249,9 @@ static void zynq_init(MachineState *machine) pic[n] = qdev_get_gpio_in(dev, n); } - zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false); - zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false); - zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true); + n = zynq_init_spi_flashes(0xE0006000, pic[58 - IRQ_OFFSET], false, 0); + n = zynq_init_spi_flashes(0xE0007000, pic[81 - IRQ_OFFSET], false, n); + n = zynq_init_spi_flashes(0xE000D000, pic[51 - IRQ_OFFSET], true, n); sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - IRQ_OFFSET]); sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - IRQ_OFFSET]); @@ -298,7 +300,7 @@ static void zynq_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, hci_addr); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - IRQ_OFFSET]); - di = drive_get_next(IF_SD); + di = drive_get(IF_SD, 0, n); blk = di ? blk_by_legacy_dinfo(di) : NULL; carddev = qdev_new(TYPE_SD_CARD); qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); @@ -312,6 +314,9 @@ static void zynq_init(MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]); dev = qdev_new("pl330"); + object_property_set_link(OBJECT(dev), "memory", + OBJECT(address_space_mem), + &error_fatal); qdev_prop_set_uint8(dev, "num_chnls", 8); qdev_prop_set_uint8(dev, "num_periph_req", 4); qdev_prop_set_uint8(dev, "num_events", 16); @@ -338,7 +343,6 @@ static void zynq_init(MachineState *machine) sysbus_mmio_map(busdev, 0, 0xF8007000); zynq_binfo.ram_size = machine->ram_size; - zynq_binfo.nb_cpus = 1; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; zynq_binfo.board_setup_addr = BOARD_SETUP_ADDR; diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 5bca360dce..37fc9b919c 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -15,7 +15,6 @@ #include "sysemu/device_tree.h" #include "hw/boards.h" #include "hw/sysbus.h" -#include "hw/arm/sysbus-fdt.h" #include "hw/arm/fdt.h" #include "cpu.h" #include "hw/qdev-properties.h" @@ -25,6 +24,8 @@ #define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt") OBJECT_DECLARE_SIMPLE_TYPE(VersalVirt, XLNX_VERSAL_VIRT_MACHINE) +#define XLNX_VERSAL_NUM_OSPI_FLASH 4 + struct VersalVirt { MachineState parent_obj; @@ -356,6 +357,61 @@ static void fdt_add_rtc_node(VersalVirt *s) g_free(name); } +static void fdt_add_bbram_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_BBRAM; + const char interrupt_names[] = "bbram-error"; + char *name = g_strdup_printf("/bbram@%x", MM_PMC_BBRAM_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_PMC_APB_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_BBRAM_CTRL, + 2, MM_PMC_BBRAM_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_ctrl_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CTRL; + const char interrupt_names[] = "pmc_efuse"; + char *name = g_strdup_printf("/pmc_efuse@%x", MM_PMC_EFUSE_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_EFUSE_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CTRL, + 2, MM_PMC_EFUSE_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_cache_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CACHE; + char *name = g_strdup_printf("/xlnx_pmc_efuse_cache@%x", + MM_PMC_EFUSE_CACHE); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CACHE, + 2, MM_PMC_EFUSE_CACHE_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + static void fdt_nop_memory_nodes(void *fdt, Error **errp) { Error *err = NULL; @@ -510,6 +566,30 @@ static void create_virtio_regions(VersalVirt *s) } } +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 0); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 1); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + static void sd_plugin_card(SDHCIState *sd, DriveInfo *di) { BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; @@ -547,6 +627,9 @@ static void versal_virt_init(MachineState *machine) * When loading an OS, we turn on QEMU's PSCI implementation with SMC * as the PSCI conduit. When there's no -kernel, we assume the user * provides EL3 firmware to handle PSCI. + * + * Even if the user provides a kernel filename, arm_load_kernel() + * may suppress PSCI if it's going to boot that guest code at EL3. */ if (machine->kernel_filename) { psci_conduit = QEMU_PSCI_CONDUIT_SMC; @@ -556,8 +639,6 @@ static void versal_virt_init(MachineState *machine) TYPE_XLNX_VERSAL); object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram), &error_abort); - object_property_set_int(OBJECT(&s->soc), "psci-conduit", psci_conduit, - &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); fdt_create(s); @@ -570,6 +651,9 @@ static void versal_virt_init(MachineState *machine) fdt_add_usb_xhci_nodes(s); fdt_add_sd_nodes(s); fdt_add_rtc_node(s); + fdt_add_bbram_node(s); + fdt_add_efuse_ctrl_node(s); + fdt_add_efuse_cache_node(s); fdt_add_cpu_nodes(s, psci_conduit); fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz); fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz); @@ -579,28 +663,50 @@ static void versal_virt_init(MachineState *machine) memory_region_add_subregion_overlap(get_system_memory(), 0, &s->soc.fpd.apu.mr, 0); + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.pmc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.pmc.efuse); + /* Plugin SD cards. */ for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) { - sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD)); + sd_plugin_card(&s->soc.pmc.iou.sd[i], + drive_get(IF_SD, 0, i)); } s->binfo.ram_size = machine->ram_size; s->binfo.loader_start = 0x0; s->binfo.get_dtb = versal_virt_get_dtb; s->binfo.modify_dtb = versal_virt_modify_dtb; - if (machine->kernel_filename) { - arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo); - } else { - AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0], - &s->binfo); + s->binfo.psci_conduit = psci_conduit; + if (!machine->kernel_filename) { /* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL). * Offset things by 4K. */ s->binfo.loader_start = 0x1000; s->binfo.dtb_limit = 0x1000000; - if (arm_load_dtb(s->binfo.loader_start, - &s->binfo, s->binfo.dtb_limit, as, machine) < 0) { - exit(EXIT_FAILURE); + } + arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo); + + for (i = 0; i < XLNX_VERSAL_NUM_OSPI_FLASH; i++) { + BusState *spi_bus; + DeviceState *flash_dev; + qemu_irq cs_line; + DriveInfo *dinfo = drive_get(IF_MTD, 0, i); + + spi_bus = qdev_get_child_bus(DEVICE(&s->soc.pmc.iou.ospi), "spi0"); + + flash_dev = qdev_new("mt35xu01g"); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); } + qdev_realize_and_unref(flash_dev, spi_bus, &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.pmc.iou.ospi), + i + 1, cs_line); } } @@ -614,9 +720,9 @@ static void versal_virt_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Xilinx Versal Virtual development board"; mc->init = versal_virt_init; - mc->min_cpus = XLNX_VERSAL_NR_ACPUS; - mc->max_cpus = XLNX_VERSAL_NR_ACPUS; - mc->default_cpus = XLNX_VERSAL_NR_ACPUS; + mc->min_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; + mc->max_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; + mc->default_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; mc->no_cdrom = true; mc->default_ram_id = "ddr"; } diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index fb776834f7..57276e1506 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -21,24 +21,33 @@ #include "kvm_arm.h" #include "hw/misc/unimp.h" #include "hw/arm/xlnx-versal.h" +#include "qemu/log.h" +#include "hw/sysbus.h" #define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72") +#define XLNX_VERSAL_RCPU_TYPE ARM_CPU_TYPE_NAME("cortex-r5f") #define GEM_REVISION 0x40070106 +#define VERSAL_NUM_PMC_APB_IRQS 3 +#define NUM_OSPI_IRQ_LINES 3 + static void versal_create_apu_cpus(Versal *s) { int i; + object_initialize_child(OBJECT(s), "apu-cluster", &s->fpd.apu.cluster, + TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->fpd.apu.cluster), "cluster-id", 0); + for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) { Object *obj; - object_initialize_child(OBJECT(s), "apu-cpu[*]", &s->fpd.apu.cpu[i], + object_initialize_child(OBJECT(&s->fpd.apu.cluster), + "apu-cpu[*]", &s->fpd.apu.cpu[i], XLNX_VERSAL_ACPU_TYPE); obj = OBJECT(&s->fpd.apu.cpu[i]); - object_property_set_int(obj, "psci-conduit", s->cfg.psci_conduit, - &error_abort); if (i) { - /* Secondary CPUs start in PSCI powered-down state */ + /* Secondary CPUs start in powered-down state */ object_property_set_bool(obj, "start-powered-off", true, &error_abort); } @@ -49,6 +58,8 @@ static void versal_create_apu_cpus(Versal *s) &error_abort); qdev_realize(DEVICE(obj), NULL, &error_fatal); } + + qdev_realize(DEVICE(&s->fpd.apu.cluster), NULL, &error_fatal); } static void versal_create_apu_gic(Versal *s, qemu_irq *pic) @@ -120,6 +131,35 @@ static void versal_create_apu_gic(Versal *s, qemu_irq *pic) } } +static void versal_create_rpu_cpus(Versal *s) +{ + int i; + + object_initialize_child(OBJECT(s), "rpu-cluster", &s->lpd.rpu.cluster, + TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->lpd.rpu.cluster), "cluster-id", 1); + + for (i = 0; i < ARRAY_SIZE(s->lpd.rpu.cpu); i++) { + Object *obj; + + object_initialize_child(OBJECT(&s->lpd.rpu.cluster), + "rpu-cpu[*]", &s->lpd.rpu.cpu[i], + XLNX_VERSAL_RCPU_TYPE); + obj = OBJECT(&s->lpd.rpu.cpu[i]); + object_property_set_bool(obj, "start-powered-off", true, + &error_abort); + + object_property_set_int(obj, "mp-affinity", 0x100 | i, &error_abort); + object_property_set_int(obj, "core-count", ARRAY_SIZE(s->lpd.rpu.cpu), + &error_abort); + object_property_set_link(obj, "memory", OBJECT(&s->lpd.rpu.mr), + &error_abort); + qdev_realize(DEVICE(obj), NULL, &error_fatal); + } + + qdev_realize(DEVICE(&s->lpd.rpu.cluster), NULL, &error_fatal); +} + static void versal_create_uarts(Versal *s, qemu_irq *pic) { int i; @@ -218,6 +258,8 @@ static void versal_create_admas(Versal *s, qemu_irq *pic) TYPE_XLNX_ZDMA); dev = DEVICE(&s->lpd.iou.adma[i]); object_property_set_int(OBJECT(dev), "bus-width", 128, &error_abort); + object_property_set_link(OBJECT(dev), "dma", + OBJECT(get_system_memory()), &error_fatal); sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); @@ -258,6 +300,26 @@ static void versal_create_sds(Versal *s, qemu_irq *pic) } } +static void versal_create_pmc_apb_irq_orgate(Versal *s, qemu_irq *pic) +{ + DeviceState *orgate; + + /* + * The VERSAL_PMC_APB_IRQ is an 'or' of the interrupts from the following + * models: + * - RTC + * - BBRAM + * - PMC SLCR + */ + object_initialize_child(OBJECT(s), "pmc-apb-irq-orgate", + &s->pmc.apb_irq_orgate, TYPE_OR_IRQ); + orgate = DEVICE(&s->pmc.apb_irq_orgate); + object_property_set_int(OBJECT(orgate), + "num-lines", VERSAL_NUM_PMC_APB_IRQS, &error_fatal); + qdev_realize(orgate, NULL, &error_fatal); + qdev_connect_gpio_out(orgate, 0, pic[VERSAL_PMC_APB_IRQ]); +} + static void versal_create_rtc(Versal *s, qemu_irq *pic) { SysBusDevice *sbd; @@ -275,7 +337,8 @@ static void versal_create_rtc(Versal *s, qemu_irq *pic) * TODO: Connect the ALARM and SECONDS interrupts once our RTC model * supports them. */ - sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]); + sysbus_connect_irq(sbd, 1, + qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 0)); } static void versal_create_xrams(Versal *s, qemu_irq *pic) @@ -312,6 +375,221 @@ static void versal_create_xrams(Versal *s, qemu_irq *pic) } } +static void versal_create_bbram(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->pmc.bbram, + sizeof(s->pmc.bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "0", + NULL); + sbd = SYS_BUS_DEVICE(&s->pmc.bbram); + + sysbus_realize(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, + qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 1)); +} + +static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base) +{ + SysBusDevice *part = SYS_BUS_DEVICE(dev); + + object_property_set_link(OBJECT(part), "efuse", + OBJECT(&s->pmc.efuse), &error_abort); + + sysbus_realize(part, &error_abort); + memory_region_add_subregion(&s->mr_ps, base, + sysbus_mmio_get_region(part, 0)); +} + +static void versal_create_efuse(Versal *s, qemu_irq *pic) +{ + Object *bits = OBJECT(&s->pmc.efuse); + Object *ctrl = OBJECT(&s->pmc.efuse_ctrl); + Object *cache = OBJECT(&s->pmc.efuse_cache); + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->pmc.efuse_ctrl, + TYPE_XLNX_VERSAL_EFUSE_CTRL); + + object_initialize_child(OBJECT(s), "efuse-cache", &s->pmc.efuse_cache, + TYPE_XLNX_VERSAL_EFUSE_CACHE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->pmc.efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "8192", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + versal_realize_efuse_part(s, ctrl, MM_PMC_EFUSE_CTRL); + versal_realize_efuse_part(s, cache, MM_PMC_EFUSE_CACHE); + + sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]); +} + +static void versal_create_pmc_iou_slcr(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + + object_initialize_child(OBJECT(s), "versal-pmc-iou-slcr", &s->pmc.iou.slcr, + TYPE_XILINX_VERSAL_PMC_IOU_SLCR); + + sbd = SYS_BUS_DEVICE(&s->pmc.iou.slcr); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(&s->mr_ps, MM_PMC_PMC_IOU_SLCR, + sysbus_mmio_get_region(sbd, 0)); + + sysbus_connect_irq(sbd, 0, + qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 2)); +} + +static void versal_create_ospi(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + MemoryRegion *mr_dac; + qemu_irq ospi_mux_sel; + DeviceState *orgate; + + memory_region_init(&s->pmc.iou.ospi.linear_mr, OBJECT(s), + "versal-ospi-linear-mr" , MM_PMC_OSPI_DAC_SIZE); + + object_initialize_child(OBJECT(s), "versal-ospi", &s->pmc.iou.ospi.ospi, + TYPE_XILINX_VERSAL_OSPI); + + mr_dac = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 1); + memory_region_add_subregion(&s->pmc.iou.ospi.linear_mr, 0x0, mr_dac); + + /* Create the OSPI destination DMA */ + object_initialize_child(OBJECT(s), "versal-ospi-dma-dst", + &s->pmc.iou.ospi.dma_dst, + TYPE_XLNX_CSU_DMA); + + object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_dst), + "dma", OBJECT(get_system_memory()), + &error_abort); + + sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_DST, + sysbus_mmio_get_region(sbd, 0)); + + /* Create the OSPI source DMA */ + object_initialize_child(OBJECT(s), "versal-ospi-dma-src", + &s->pmc.iou.ospi.dma_src, + TYPE_XLNX_CSU_DMA); + + object_property_set_bool(OBJECT(&s->pmc.iou.ospi.dma_src), "is-dst", + false, &error_abort); + + object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src), + "dma", OBJECT(mr_dac), &error_abort); + + object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src), + "stream-connected-dma", + OBJECT(&s->pmc.iou.ospi.dma_dst), + &error_abort); + + sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_SRC, + sysbus_mmio_get_region(sbd, 0)); + + /* Realize the OSPI */ + object_property_set_link(OBJECT(&s->pmc.iou.ospi.ospi), "dma-src", + OBJECT(&s->pmc.iou.ospi.dma_src), &error_abort); + + sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI, + sysbus_mmio_get_region(sbd, 0)); + + memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DAC, + &s->pmc.iou.ospi.linear_mr); + + /* ospi_mux_sel */ + ospi_mux_sel = qdev_get_gpio_in_named(DEVICE(&s->pmc.iou.ospi.ospi), + "ospi-mux-sel", 0); + qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "ospi-mux-sel", 0, + ospi_mux_sel); + + /* OSPI irq */ + object_initialize_child(OBJECT(s), "ospi-irq-orgate", + &s->pmc.iou.ospi.irq_orgate, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->pmc.iou.ospi.irq_orgate), + "num-lines", NUM_OSPI_IRQ_LINES, &error_fatal); + + orgate = DEVICE(&s->pmc.iou.ospi.irq_orgate); + qdev_realize(orgate, NULL, &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 0, + qdev_get_gpio_in(orgate, 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src), 0, + qdev_get_gpio_in(orgate, 1)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst), 0, + qdev_get_gpio_in(orgate, 2)); + + qdev_connect_gpio_out(orgate, 0, pic[VERSAL_OSPI_IRQ]); +} + +static void versal_create_crl(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + int i; + + object_initialize_child(OBJECT(s), "crl", &s->lpd.crl, + TYPE_XLNX_VERSAL_CRL); + sbd = SYS_BUS_DEVICE(&s->lpd.crl); + + for (i = 0; i < ARRAY_SIZE(s->lpd.rpu.cpu); i++) { + g_autofree gchar *name = g_strdup_printf("cpu_r5[%d]", i); + + object_property_set_link(OBJECT(&s->lpd.crl), + name, OBJECT(&s->lpd.rpu.cpu[i]), + &error_abort); + } + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.gem); i++) { + g_autofree gchar *name = g_strdup_printf("gem[%d]", i); + + object_property_set_link(OBJECT(&s->lpd.crl), + name, OBJECT(&s->lpd.iou.gem[i]), + &error_abort); + } + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.adma); i++) { + g_autofree gchar *name = g_strdup_printf("adma[%d]", i); + + object_property_set_link(OBJECT(&s->lpd.crl), + name, OBJECT(&s->lpd.iou.adma[i]), + &error_abort); + } + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.uart); i++) { + g_autofree gchar *name = g_strdup_printf("uart[%d]", i); + + object_property_set_link(OBJECT(&s->lpd.crl), + name, OBJECT(&s->lpd.iou.uart[i]), + &error_abort); + } + + object_property_set_link(OBJECT(&s->lpd.crl), + "usb", OBJECT(&s->lpd.iou.usb), + &error_abort); + + sysbus_realize(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, MM_CRL, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, pic[VERSAL_CRL_IRQ]); +} + /* This takes the board allocated linear DDR memory and creates aliases * for each split DDR range/aperture on the Versal address map. */ @@ -368,20 +646,68 @@ static void versal_unimp_area(Versal *s, const char *name, memory_region_add_subregion(mr, base, mr_dev); } +static void versal_unimp_sd_emmc_sel(void *opaque, int n, int level) +{ + qemu_log_mask(LOG_UNIMP, + "Selecting between enabling SD mode or eMMC mode on " + "controller %d is not yet implemented\n", n); +} + +static void versal_unimp_qspi_ospi_mux_sel(void *opaque, int n, int level) +{ + qemu_log_mask(LOG_UNIMP, + "Selecting between enabling the QSPI or OSPI linear address " + "region is not yet implemented\n"); +} + +static void versal_unimp_irq_parity_imr(void *opaque, int n, int level) +{ + qemu_log_mask(LOG_UNIMP, + "PMC SLCR parity interrupt behaviour " + "is not yet implemented\n"); +} + static void versal_unimp(Versal *s) { + qemu_irq gpio_in; + versal_unimp_area(s, "psm", &s->mr_ps, MM_PSM_START, MM_PSM_END - MM_PSM_START); - versal_unimp_area(s, "crl", &s->mr_ps, - MM_CRL, MM_CRL_SIZE); versal_unimp_area(s, "crf", &s->mr_ps, MM_FPD_CRF, MM_FPD_CRF_SIZE); + versal_unimp_area(s, "apu", &s->mr_ps, + MM_FPD_FPD_APU, MM_FPD_FPD_APU_SIZE); versal_unimp_area(s, "crp", &s->mr_ps, MM_PMC_CRP, MM_PMC_CRP_SIZE); versal_unimp_area(s, "iou-scntr", &s->mr_ps, MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE); versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps, MM_IOU_SCNTRS, MM_IOU_SCNTRS_SIZE); + + qdev_init_gpio_in_named(DEVICE(s), versal_unimp_sd_emmc_sel, + "sd-emmc-sel-dummy", 2); + qdev_init_gpio_in_named(DEVICE(s), versal_unimp_qspi_ospi_mux_sel, + "qspi-ospi-mux-sel-dummy", 1); + qdev_init_gpio_in_named(DEVICE(s), versal_unimp_irq_parity_imr, + "irq-parity-imr-dummy", 1); + + gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 0); + qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 0, + gpio_in); + + gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 1); + qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 1, + gpio_in); + + gpio_in = qdev_get_gpio_in_named(DEVICE(s), "qspi-ospi-mux-sel-dummy", 0); + qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), + "qspi-ospi-mux-sel", 0, + gpio_in); + + gpio_in = qdev_get_gpio_in_named(DEVICE(s), "irq-parity-imr-dummy", 0); + qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), + SYSBUS_DEVICE_GPIO_IRQ, 0, + gpio_in); } static void versal_realize(DeviceState *dev, Error **errp) @@ -391,13 +717,20 @@ static void versal_realize(DeviceState *dev, Error **errp) versal_create_apu_cpus(s); versal_create_apu_gic(s, pic); + versal_create_rpu_cpus(s); versal_create_uarts(s, pic); versal_create_usbs(s, pic); versal_create_gems(s, pic); versal_create_admas(s, pic); versal_create_sds(s, pic); + versal_create_pmc_apb_irq_orgate(s, pic); versal_create_rtc(s, pic); versal_create_xrams(s, pic); + versal_create_bbram(s, pic); + versal_create_efuse(s, pic); + versal_create_pmc_iou_slcr(s, pic); + versal_create_ospi(s, pic); + versal_create_crl(s, pic); versal_map_ddr(s); versal_unimp(s); @@ -407,6 +740,8 @@ static void versal_realize(DeviceState *dev, Error **errp) memory_region_add_subregion_overlap(&s->mr_ps, MM_OCM, &s->lpd.mr_ocm, 0); memory_region_add_subregion_overlap(&s->fpd.apu.mr, 0, &s->mr_ps, 0); + memory_region_add_subregion_overlap(&s->lpd.rpu.mr, 0, + &s->lpd.rpu.mr_ps_alias, 0); } static void versal_init(Object *obj) @@ -414,13 +749,15 @@ static void versal_init(Object *obj) Versal *s = XLNX_VERSAL(obj); memory_region_init(&s->fpd.apu.mr, obj, "mr-apu", UINT64_MAX); + memory_region_init(&s->lpd.rpu.mr, obj, "mr-rpu", UINT64_MAX); memory_region_init(&s->mr_ps, obj, "mr-ps-switch", UINT64_MAX); + memory_region_init_alias(&s->lpd.rpu.mr_ps_alias, OBJECT(s), + "mr-rpu-ps-alias", &s->mr_ps, 0, UINT64_MAX); } static Property versal_properties[] = { DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_UINT32("psci-conduit", Versal, cfg.psci_conduit, 0), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c index 6c6cb02e86..4c84bb932a 100644 --- a/hw/arm/xlnx-zcu102.c +++ b/hw/arm/xlnx-zcu102.c @@ -98,6 +98,30 @@ static void zcu102_modify_dtb(const struct arm_boot_info *binfo, void *fdt) } } +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 2); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 3); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + static void xlnx_zcu102_init(MachineState *machine) { XlnxZCU102 *s = ZCU102_MACHINE(machine); @@ -136,10 +160,16 @@ static void xlnx_zcu102_init(MachineState *machine) qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.efuse); + /* Create and plug in the SD cards */ for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) { BusState *bus; - DriveInfo *di = drive_get_next(IF_SD); + DriveInfo *di = drive_get(IF_SD, 0, i); BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; DeviceState *carddev; char *bus_name; @@ -160,7 +190,7 @@ static void xlnx_zcu102_init(MachineState *machine) BusState *spi_bus; DeviceState *flash_dev; qemu_irq cs_line; - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, i); gchar *bus_name = g_strdup_printf("spi%d", i); spi_bus = qdev_get_child_bus(DEVICE(&s->soc), bus_name); @@ -182,7 +212,7 @@ static void xlnx_zcu102_init(MachineState *machine) BusState *spi_bus; DeviceState *flash_dev; qemu_irq cs_line; - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, XLNX_ZYNQMP_NUM_SPIS + i); int bus = i / XLNX_ZYNQMP_NUM_QSPI_BUS_CS; gchar *bus_name = g_strdup_printf("qspi%d", bus); @@ -206,6 +236,7 @@ static void xlnx_zcu102_init(MachineState *machine) s->binfo.ram_size = ram_size; s->binfo.loader_start = 0; s->binfo.modify_dtb = zcu102_modify_dtb; + s->binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; arm_load_kernel(s->soc.boot_cpu_ptr, machine, &s->binfo); } diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 3597e8db4d..5905a33015 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -20,6 +20,7 @@ #include "qemu/module.h" #include "hw/arm/xlnx-zynqmp.h" #include "hw/intc/arm_gic_common.h" +#include "hw/misc/unimp.h" #include "hw/boards.h" #include "sysemu/kvm.h" #include "sysemu/sysemu.h" @@ -49,12 +50,26 @@ #define LQSPI_ADDR 0xc0000000 #define QSPI_IRQ 15 #define QSPI_DMA_ADDR 0xff0f0800 +#define NUM_QSPI_IRQ_LINES 2 + +#define CRF_ADDR 0xfd1a0000 +#define CRF_IRQ 120 + +/* Serializer/Deserializer. */ +#define SERDES_ADDR 0xfd400000 +#define SERDES_SIZE 0x20000 #define DP_ADDR 0xfd4a0000 -#define DP_IRQ 113 +#define DP_IRQ 0x77 #define DPDMA_ADDR 0xfd4c0000 -#define DPDMA_IRQ 116 +#define DPDMA_IRQ 0x7a + +#define APU_ADDR 0xfd5c0000 +#define APU_IRQ 153 + +#define TTC0_ADDR 0xFF110000 +#define TTC0_IRQ 36 #define IPI_ADDR 0xFF300000 #define IPI_IRQ 64 @@ -62,6 +77,12 @@ #define RTC_ADDR 0xffa60000 #define RTC_IRQ 26 +#define BBRAM_ADDR 0xffcd0000 +#define BBRAM_IRQ 11 + +#define EFUSE_ADDR 0xffcc0000 +#define EFUSE_IRQ 87 + #define SDHCI_CAPABILITIES 0x280737ec6481 /* Datasheet: UG1085 (v1.7) */ static const uint64_t gem_addr[XLNX_ZYNQMP_NUM_GEMS] = { @@ -122,6 +143,14 @@ static const int adma_ch_intr[XLNX_ZYNQMP_NUM_ADMA_CH] = { 77, 78, 79, 80, 81, 82, 83, 84 }; +static const uint64_t usb_addr[XLNX_ZYNQMP_NUM_USB] = { + 0xFE200000, 0xFE300000 +}; + +static const int usb_intr[XLNX_ZYNQMP_NUM_USB] = { + 65, 70 +}; + typedef struct XlnxZynqMPGICRegion { int region_index; uint32_t address; @@ -184,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { @@ -205,7 +234,9 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, name = object_get_canonical_path_component(OBJECT(&s->rpu_cpu[i])); if (strcmp(name, boot_cpu)) { - /* Secondary CPUs start in PSCI powered-down state */ + /* + * Secondary CPUs start in powered-down state. + */ object_property_set_bool(OBJECT(&s->rpu_cpu[i]), "start-powered-off", true, &error_abort); } else { @@ -222,6 +253,124 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, qdev_realize(DEVICE(&s->rpu_cluster), NULL, &error_fatal); } +static void xlnx_zynqmp_create_bbram(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->bbram, + sizeof(s->bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "1", + NULL); + sbd = SYS_BUS_DEVICE(&s->bbram); + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, BBRAM_ADDR); + sysbus_connect_irq(sbd, 0, gic[BBRAM_IRQ]); +} + +static void xlnx_zynqmp_create_efuse(XlnxZynqMPState *s, qemu_irq *gic) +{ + Object *bits = OBJECT(&s->efuse); + Object *ctrl = OBJECT(&s->efuse_ctrl); + SysBusDevice *sbd; + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->efuse_ctrl, + TYPE_XLNX_ZYNQMP_EFUSE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "2048", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + object_property_set_link(ctrl, "efuse", bits, &error_abort); + + sbd = SYS_BUS_DEVICE(ctrl); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, EFUSE_ADDR); + sysbus_connect_irq(sbd, 0, gic[EFUSE_IRQ]); +} + +static void xlnx_zynqmp_create_apu_ctrl(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + int i; + + object_initialize_child(OBJECT(s), "apu-ctrl", &s->apu_ctrl, + TYPE_XLNX_ZYNQMP_APU_CTRL); + sbd = SYS_BUS_DEVICE(&s->apu_ctrl); + + for (i = 0; i < XLNX_ZYNQMP_NUM_APU_CPUS; i++) { + g_autofree gchar *name = g_strdup_printf("cpu%d", i); + + object_property_set_link(OBJECT(&s->apu_ctrl), name, + OBJECT(&s->apu_cpu[i]), &error_abort); + } + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, APU_ADDR); + sysbus_connect_irq(sbd, 0, gic[APU_IRQ]); +} + +static void xlnx_zynqmp_create_crf(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + + object_initialize_child(OBJECT(s), "crf", &s->crf, TYPE_XLNX_ZYNQMP_CRF); + sbd = SYS_BUS_DEVICE(&s->crf); + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, CRF_ADDR); + sysbus_connect_irq(sbd, 0, gic[CRF_IRQ]); +} + +static void xlnx_zynqmp_create_ttc(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + int i, irq; + + for (i = 0; i < XLNX_ZYNQMP_NUM_TTC; i++) { + object_initialize_child(OBJECT(s), "ttc[*]", &s->ttc[i], + TYPE_CADENCE_TTC); + sbd = SYS_BUS_DEVICE(&s->ttc[i]); + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, TTC0_ADDR + i * 0x10000); + for (irq = 0; irq < 3; irq++) { + sysbus_connect_irq(sbd, irq, gic[TTC0_IRQ + i * 3 + irq]); + } + } +} + +static void xlnx_zynqmp_create_unimp_mmio(XlnxZynqMPState *s) +{ + static const struct UnimpInfo { + const char *name; + hwaddr base; + hwaddr size; + } unimp_areas[ARRAY_SIZE(s->mr_unimp)] = { + { .name = "serdes", SERDES_ADDR, SERDES_SIZE }, + }; + unsigned int nr; + + for (nr = 0; nr < ARRAY_SIZE(unimp_areas); nr++) { + const struct UnimpInfo *info = &unimp_areas[nr]; + DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + assert(info->name && info->base && info->size > 0); + qdev_prop_set_string(dev, "name", info->name); + qdev_prop_set_uint64(dev, "size", info->size); + object_property_add_child(OBJECT(s), info->name, OBJECT(dev)); + + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, info->base); + } +} + static void xlnx_zynqmp_init(Object *obj) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -285,6 +434,12 @@ static void xlnx_zynqmp_init(Object *obj) } object_initialize_child(obj, "qspi-dma", &s->qspi_dma, TYPE_XLNX_CSU_DMA); + object_initialize_child(obj, "qspi-irq-orgate", + &s->qspi_irq_orgate, TYPE_OR_IRQ); + + for (i = 0; i < XLNX_ZYNQMP_NUM_USB; i++) { + object_initialize_child(obj, "usb[*]", &s->usb[i], TYPE_USB_DWC3); + } } static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) @@ -358,12 +513,11 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) for (i = 0; i < num_apus; i++) { const char *name; - object_property_set_int(OBJECT(&s->apu_cpu[i]), "psci-conduit", - QEMU_PSCI_CONDUIT_SMC, &error_abort); - name = object_get_canonical_path_component(OBJECT(&s->apu_cpu[i])); if (strcmp(name, boot_cpu)) { - /* Secondary CPUs start in PSCI powered-down state */ + /* + * Secondary CPUs start in powered-down state. + */ object_property_set_bool(OBJECT(&s->apu_cpu[i]), "start-powered-off", true, &error_abort); } else { @@ -570,26 +724,6 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) g_free(bus_name); } - if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi), errp)) { - return; - } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, gic_spi[QSPI_IRQ]); - - for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) { - gchar *bus_name; - gchar *target_bus; - - /* Alias controller SPI bus to the SoC itself */ - bus_name = g_strdup_printf("qspi%d", i); - target_bus = g_strdup_printf("spi%d", i); - object_property_add_alias(OBJECT(s), bus_name, - OBJECT(&s->qspi), target_bus); - g_free(bus_name); - g_free(target_bus); - } - if (!sysbus_realize(SYS_BUS_DEVICE(&s->dp), errp)) { return; } @@ -616,11 +750,22 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, RTC_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, gic_spi[RTC_IRQ]); + xlnx_zynqmp_create_bbram(s, gic_spi); + xlnx_zynqmp_create_efuse(s, gic_spi); + xlnx_zynqmp_create_apu_ctrl(s, gic_spi); + xlnx_zynqmp_create_crf(s, gic_spi); + xlnx_zynqmp_create_ttc(s, gic_spi); + xlnx_zynqmp_create_unimp_mmio(s); + for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) { if (!object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128, errp)) { return; } + if (!object_property_set_link(OBJECT(&s->gdma[i]), "dma", + OBJECT(system_memory), errp)) { + return; + } if (!sysbus_realize(SYS_BUS_DEVICE(&s->gdma[i]), errp)) { return; } @@ -631,6 +776,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) } for (i = 0; i < XLNX_ZYNQMP_NUM_ADMA_CH; i++) { + if (!object_property_set_link(OBJECT(&s->adma[i]), "dma", + OBJECT(system_memory), errp)) { + return; + } if (!sysbus_realize(SYS_BUS_DEVICE(&s->adma[i]), errp)) { return; } @@ -640,14 +789,67 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) gic_spi[adma_ch_intr[i]]); } + object_property_set_int(OBJECT(&s->qspi_irq_orgate), + "num-lines", NUM_QSPI_IRQ_LINES, &error_fatal); + qdev_realize(DEVICE(&s->qspi_irq_orgate), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(&s->qspi_irq_orgate), 0, gic_spi[QSPI_IRQ]); + + if (!object_property_set_link(OBJECT(&s->qspi_dma), "dma", + OBJECT(system_memory), errp)) { + return; + } if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi_dma), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi_dma), 0, QSPI_DMA_ADDR); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, gic_spi[QSPI_IRQ]); - object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma", - OBJECT(&s->qspi_dma), errp); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, + qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 0)); + + if (!object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma", + OBJECT(&s->qspi_dma), errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, + qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 1)); + + for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) { + g_autofree gchar *bus_name = g_strdup_printf("qspi%d", i); + g_autofree gchar *target_bus = g_strdup_printf("spi%d", i); + + /* Alias controller SPI bus to the SoC itself */ + object_property_add_alias(OBJECT(s), bus_name, + OBJECT(&s->qspi), target_bus); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_USB; i++) { + if (!object_property_set_link(OBJECT(&s->usb[i].sysbus_xhci), "dma", + OBJECT(system_memory), errp)) { + return; + } + + qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "intrs", 4); + qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "slots", 2); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, usb_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 0, + gic_spi[usb_intr[i]]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 1, + gic_spi[usb_intr[i] + 1]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 2, + gic_spi[usb_intr[i] + 2]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 3, + gic_spi[usb_intr[i] + 3]); + } } static Property xlnx_zynqmp_props[] = { diff --git a/hw/audio/Kconfig b/hw/audio/Kconfig index e9c6fed826..e76c69ca7e 100644 --- a/hw/audio/Kconfig +++ b/hw/audio/Kconfig @@ -47,6 +47,3 @@ config PL041 config CS4231 bool - -config MARVELL_88W8618 - bool diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index 3644bd9a17..358cf70379 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -89,42 +89,42 @@ enum { #define GC_CR 2 /* rw */ #define GC_VALID_MASK ((1 << 6) - 1) -#define GS_MD3 (1<<17) /* rw */ -#define GS_AD3 (1<<16) /* rw */ -#define GS_RCS (1<<15) /* rwc */ -#define GS_B3S12 (1<<14) /* ro */ -#define GS_B2S12 (1<<13) /* ro */ -#define GS_B1S12 (1<<12) /* ro */ -#define GS_S1R1 (1<<11) /* rwc */ -#define GS_S0R1 (1<<10) /* rwc */ -#define GS_S1CR (1<<9) /* ro */ -#define GS_S0CR (1<<8) /* ro */ -#define GS_MINT (1<<7) /* ro */ -#define GS_POINT (1<<6) /* ro */ -#define GS_PIINT (1<<5) /* ro */ -#define GS_SOINT (1<<4) /* ro */ -#define GS_RSRVD (1<<3) -#define GS_MOINT (1<<2) /* ro */ -#define GS_MIINT (1<<1) /* ro */ +#define GS_MD3 (1 << 17) /* rw */ +#define GS_AD3 (1 << 16) /* rw */ +#define GS_RCS (1 << 15) /* rwc */ +#define GS_B3S12 (1 << 14) /* ro */ +#define GS_B2S12 (1 << 13) /* ro */ +#define GS_B1S12 (1 << 12) /* ro */ +#define GS_S1R1 (1 << 11) /* rwc */ +#define GS_S0R1 (1 << 10) /* rwc */ +#define GS_S1CR (1 << 9) /* ro */ +#define GS_S0CR (1 << 8) /* ro */ +#define GS_MINT (1 << 7) /* ro */ +#define GS_POINT (1 << 6) /* ro */ +#define GS_PIINT (1 << 5) /* ro */ +#define GS_SOINT (1 << 4) /* ro */ +#define GS_RSRVD (1 << 3) +#define GS_MOINT (1 << 2) /* ro */ +#define GS_MIINT (1 << 1) /* ro */ #define GS_GSCI 1 /* rwc */ -#define GS_RO_MASK (GS_B3S12| \ - GS_B2S12| \ - GS_B1S12| \ - GS_S1CR| \ - GS_S0CR| \ - GS_MINT| \ - GS_POINT| \ - GS_PIINT| \ - GS_SOINT| \ - GS_RSRVD| \ - GS_MOINT| \ +#define GS_RO_MASK (GS_B3S12 | \ + GS_B2S12 | \ + GS_B1S12 | \ + GS_S1CR | \ + GS_S0CR | \ + GS_MINT | \ + GS_POINT | \ + GS_PIINT | \ + GS_SOINT | \ + GS_RSRVD | \ + GS_MOINT | \ GS_MIINT) #define GS_VALID_MASK ((1 << 18) - 1) -#define GS_WCLEAR_MASK (GS_RCS|GS_S1R1|GS_S0R1|GS_GSCI) +#define GS_WCLEAR_MASK (GS_RCS | GS_S1R1 | GS_S0R1 | GS_GSCI) /* Buffer Descriptor */ -#define BD_IOC (1<<31) /* Interrupt on Completion */ -#define BD_BUP (1<<30) /* Buffer Underrun Policy */ +#define BD_IOC (1 << 31) /* Interrupt on Completion */ +#define BD_BUP (1 << 30) /* Buffer Underrun Policy */ #define EACS_VRA 1 #define EACS_VRM 8 @@ -149,7 +149,7 @@ enum { }; #ifdef DEBUG_AC97 -#define dolog(...) AUD_log ("ac97", __VA_ARGS__) +#define dolog(...) AUD_log("ac97", __VA_ARGS__) #else #define dolog(...) #endif @@ -165,10 +165,10 @@ enum { \ prefix ## _CR = start + 11 \ } -MKREGS (PI, PI_INDEX * 16); -MKREGS (PO, PO_INDEX * 16); -MKREGS (MC, MC_INDEX * 16); -MKREGS (SO, SO_INDEX * 16); +MKREGS(PI, PI_INDEX * 16); +MKREGS(PO, PO_INDEX * 16); +MKREGS(MC, MC_INDEX * 16); +MKREGS(SO, SO_INDEX * 16); enum { GLOB_CNT = 0x2c, @@ -178,40 +178,39 @@ enum { #define GET_BM(index) (((index) >> 4) & 7) -static void po_callback (void *opaque, int free); -static void pi_callback (void *opaque, int avail); -static void mc_callback (void *opaque, int avail); +typedef struct AC97DeviceState { + PCIDevice dev; + AC97LinkState state; -static void warm_reset (AC97LinkState *s) -{ - (void) s; -} + MemoryRegion io_nam; + MemoryRegion io_nabm; +} AC97DeviceState; -static void cold_reset (AC97LinkState * s) -{ - (void) s; -} +#define AC97_DEVICE(obj) \ + OBJECT_CHECK(AC97DeviceState, (obj), "AC97") -static void fetch_bd (AC97LinkState *s, AC97BusMasterRegs *r) +static void po_callback(void *opaque, int free); +static void pi_callback(void *opaque, int avail); +static void mc_callback(void *opaque, int avail); + +static void fetch_bd(AC97LinkState *s, AC97BusMasterRegs *r) { uint8_t b[8]; - dma_memory_read(s->as, r->bdbar + r->civ * 8, b, 8); + dma_memory_read(s->as, r->bdbar + r->civ * 8, b, 8, MEMTXATTRS_UNSPECIFIED); r->bd_valid = 1; - r->bd.addr = le32_to_cpu (*(uint32_t *) &b[0]) & ~3; - r->bd.ctl_len = le32_to_cpu (*(uint32_t *) &b[4]); + r->bd.addr = le32_to_cpu(*(uint32_t *) &b[0]) & ~3; + r->bd.ctl_len = le32_to_cpu(*(uint32_t *) &b[4]); r->picb = r->bd.ctl_len & 0xffff; - dolog ("bd %2d addr=%#x ctl=%#06x len=%#x(%d bytes)\n", - r->civ, r->bd.addr, r->bd.ctl_len >> 16, - r->bd.ctl_len & 0xffff, - (r->bd.ctl_len & 0xffff) << 1); + dolog("bd %2d addr=0x%x ctl=0x%06x len=0x%x(%d bytes)\n", + r->civ, r->bd.addr, r->bd.ctl_len >> 16, + r->bd.ctl_len & 0xffff, (r->bd.ctl_len & 0xffff) << 1); } - /** * Update the BM status register */ -static void update_sr (AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) +static void update_sr(AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) { int event = 0; int level = 0; @@ -224,8 +223,7 @@ static void update_sr (AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) if (!new_mask) { event = 1; level = 0; - } - else { + } else { if ((new_mask & SR_LVBCI) && (r->cr & CR_LVBIE)) { event = 1; level = 1; @@ -239,72 +237,70 @@ static void update_sr (AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) r->sr = new_sr; - dolog ("IOC%d LVB%d sr=%#x event=%d level=%d\n", - r->sr & SR_BCIS, r->sr & SR_LVBCI, - r->sr, - event, level); + dolog("IOC%d LVB%d sr=0x%x event=%d level=%d\n", + r->sr & SR_BCIS, r->sr & SR_LVBCI, r->sr, event, level); - if (!event) + if (!event) { return; + } if (level) { - s->glob_sta |= masks[r - s->bm_regs]; - dolog ("set irq level=1\n"); - pci_irq_assert(s->pci_dev); - } - else { - s->glob_sta &= ~masks[r - s->bm_regs]; - dolog ("set irq level=0\n"); - pci_irq_deassert(s->pci_dev); + s->glob_sta |= masks[r - s->bm_regs]; + dolog("set irq level=1\n"); + pci_irq_assert(s->pci_dev); + } else { + s->glob_sta &= ~masks[r - s->bm_regs]; + dolog("set irq level=0\n"); + pci_irq_deassert(s->pci_dev); } } -static void voice_set_active (AC97LinkState *s, int bm_index, int on) +static void voice_set_active(AC97LinkState *s, int bm_index, int on) { switch (bm_index) { case PI_INDEX: - AUD_set_active_in (s->voice_pi, on); + AUD_set_active_in(s->voice_pi, on); break; case PO_INDEX: - AUD_set_active_out (s->voice_po, on); + AUD_set_active_out(s->voice_po, on); break; case MC_INDEX: - AUD_set_active_in (s->voice_mc, on); + AUD_set_active_in(s->voice_mc, on); break; case SO_INDEX: break; default: - AUD_log ("ac97", "invalid bm_index(%d) in voice_set_active\n", bm_index); + AUD_log("ac97", "invalid bm_index(%d) in voice_set_active\n", bm_index); break; } } -static void reset_bm_regs (AC97LinkState *s, AC97BusMasterRegs *r) +static void reset_bm_regs(AC97LinkState *s, AC97BusMasterRegs *r) { - dolog ("reset_bm_regs\n"); + dolog("reset_bm_regs\n"); r->bdbar = 0; r->civ = 0; r->lvi = 0; /** todo do we need to do that? */ - update_sr (s, r, SR_DCH); + update_sr(s, r, SR_DCH); r->picb = 0; r->piv = 0; r->cr = r->cr & CR_DONT_CLEAR_MASK; r->bd_valid = 0; - voice_set_active (s, r - s->bm_regs, 0); - memset (s->silence, 0, sizeof (s->silence)); + voice_set_active(s, r - s->bm_regs, 0); + memset(s->silence, 0, sizeof(s->silence)); } -static void mixer_store (AC97LinkState *s, uint32_t i, uint16_t v) +static void mixer_store(AC97LinkState *s, uint32_t i, uint16_t v) { - if (i + 2 > sizeof (s->mixer_data)) { - dolog ("mixer_store: index %d out of bounds %zd\n", - i, sizeof (s->mixer_data)); + if (i + 2 > sizeof(s->mixer_data)) { + dolog("mixer_store: index %d out of bounds %zd\n", + i, sizeof(s->mixer_data)); return; } @@ -312,22 +308,21 @@ static void mixer_store (AC97LinkState *s, uint32_t i, uint16_t v) s->mixer_data[i + 1] = v >> 8; } -static uint16_t mixer_load (AC97LinkState *s, uint32_t i) +static uint16_t mixer_load(AC97LinkState *s, uint32_t i) { uint16_t val = 0xffff; - if (i + 2 > sizeof (s->mixer_data)) { - dolog ("mixer_load: index %d out of bounds %zd\n", - i, sizeof (s->mixer_data)); - } - else { + if (i + 2 > sizeof(s->mixer_data)) { + dolog("mixer_load: index %d out of bounds %zd\n", + i, sizeof(s->mixer_data)); + } else { val = s->mixer_data[i + 0] | (s->mixer_data[i + 1] << 8); } return val; } -static void open_voice (AC97LinkState *s, int index, int freq) +static void open_voice(AC97LinkState *s, int index, int freq) { struct audsettings as; @@ -340,7 +335,7 @@ static void open_voice (AC97LinkState *s, int index, int freq) s->invalid_freq[index] = 0; switch (index) { case PI_INDEX: - s->voice_pi = AUD_open_in ( + s->voice_pi = AUD_open_in( &s->card, s->voice_pi, "ac97.pi", @@ -351,7 +346,7 @@ static void open_voice (AC97LinkState *s, int index, int freq) break; case PO_INDEX: - s->voice_po = AUD_open_out ( + s->voice_po = AUD_open_out( &s->card, s->voice_po, "ac97.po", @@ -362,7 +357,7 @@ static void open_voice (AC97LinkState *s, int index, int freq) break; case MC_INDEX: - s->voice_mc = AUD_open_in ( + s->voice_mc = AUD_open_in( &s->card, s->voice_mc, "ac97.mc", @@ -375,22 +370,21 @@ static void open_voice (AC97LinkState *s, int index, int freq) case SO_INDEX: break; } - } - else { + } else { s->invalid_freq[index] = freq; switch (index) { case PI_INDEX: - AUD_close_in (&s->card, s->voice_pi); + AUD_close_in(&s->card, s->voice_pi); s->voice_pi = NULL; break; case PO_INDEX: - AUD_close_out (&s->card, s->voice_po); + AUD_close_out(&s->card, s->voice_po); s->voice_po = NULL; break; case MC_INDEX: - AUD_close_in (&s->card, s->voice_mc); + AUD_close_in(&s->card, s->voice_mc); s->voice_mc = NULL; break; @@ -400,25 +394,25 @@ static void open_voice (AC97LinkState *s, int index, int freq) } } -static void reset_voices (AC97LinkState *s, uint8_t active[LAST_INDEX]) +static void reset_voices(AC97LinkState *s, uint8_t active[LAST_INDEX]) { uint16_t freq; - freq = mixer_load (s, AC97_PCM_LR_ADC_Rate); - open_voice (s, PI_INDEX, freq); - AUD_set_active_in (s->voice_pi, active[PI_INDEX]); + freq = mixer_load(s, AC97_PCM_LR_ADC_Rate); + open_voice(s, PI_INDEX, freq); + AUD_set_active_in(s->voice_pi, active[PI_INDEX]); - freq = mixer_load (s, AC97_PCM_Front_DAC_Rate); - open_voice (s, PO_INDEX, freq); - AUD_set_active_out (s->voice_po, active[PO_INDEX]); + freq = mixer_load(s, AC97_PCM_Front_DAC_Rate); + open_voice(s, PO_INDEX, freq); + AUD_set_active_out(s->voice_po, active[PO_INDEX]); - freq = mixer_load (s, AC97_MIC_ADC_Rate); - open_voice (s, MC_INDEX, freq); - AUD_set_active_in (s->voice_mc, active[MC_INDEX]); + freq = mixer_load(s, AC97_MIC_ADC_Rate); + open_voice(s, MC_INDEX, freq); + AUD_set_active_in(s->voice_mc, active[MC_INDEX]); } -static void get_volume (uint16_t vol, uint16_t mask, int inverse, - int *mute, uint8_t *lvol, uint8_t *rvol) +static void get_volume(uint16_t vol, uint16_t mask, int inverse, + int *mute, uint8_t *lvol, uint8_t *rvol) { *mute = (vol >> MUTE_SHIFT) & 1; *rvol = (255 * (vol & mask)) / mask; @@ -430,131 +424,130 @@ static void get_volume (uint16_t vol, uint16_t mask, int inverse, } } -static void update_combined_volume_out (AC97LinkState *s) +static void update_combined_volume_out(AC97LinkState *s) { uint8_t lvol, rvol, plvol, prvol; int mute, pmute; - get_volume (mixer_load (s, AC97_Master_Volume_Mute), 0x3f, 1, - &mute, &lvol, &rvol); - get_volume (mixer_load (s, AC97_PCM_Out_Volume_Mute), 0x1f, 1, - &pmute, &plvol, &prvol); + get_volume(mixer_load(s, AC97_Master_Volume_Mute), 0x3f, 1, + &mute, &lvol, &rvol); + get_volume(mixer_load(s, AC97_PCM_Out_Volume_Mute), 0x1f, 1, + &pmute, &plvol, &prvol); mute = mute | pmute; lvol = (lvol * plvol) / 255; rvol = (rvol * prvol) / 255; - AUD_set_volume_out (s->voice_po, mute, lvol, rvol); + AUD_set_volume_out(s->voice_po, mute, lvol, rvol); } -static void update_volume_in (AC97LinkState *s) +static void update_volume_in(AC97LinkState *s) { uint8_t lvol, rvol; int mute; - get_volume (mixer_load (s, AC97_Record_Gain_Mute), 0x0f, 0, - &mute, &lvol, &rvol); + get_volume(mixer_load(s, AC97_Record_Gain_Mute), 0x0f, 0, + &mute, &lvol, &rvol); - AUD_set_volume_in (s->voice_pi, mute, lvol, rvol); + AUD_set_volume_in(s->voice_pi, mute, lvol, rvol); } -static void set_volume (AC97LinkState *s, int index, uint32_t val) +static void set_volume(AC97LinkState *s, int index, uint32_t val) { switch (index) { case AC97_Master_Volume_Mute: val &= 0xbf3f; - mixer_store (s, index, val); - update_combined_volume_out (s); + mixer_store(s, index, val); + update_combined_volume_out(s); break; case AC97_PCM_Out_Volume_Mute: val &= 0x9f1f; - mixer_store (s, index, val); - update_combined_volume_out (s); + mixer_store(s, index, val); + update_combined_volume_out(s); break; case AC97_Record_Gain_Mute: val &= 0x8f0f; - mixer_store (s, index, val); - update_volume_in (s); + mixer_store(s, index, val); + update_volume_in(s); break; } } -static void record_select (AC97LinkState *s, uint32_t val) +static void record_select(AC97LinkState *s, uint32_t val) { uint8_t rs = val & REC_MASK; uint8_t ls = (val >> 8) & REC_MASK; - mixer_store (s, AC97_Record_Select, rs | (ls << 8)); + mixer_store(s, AC97_Record_Select, rs | (ls << 8)); } -static void mixer_reset (AC97LinkState *s) +static void mixer_reset(AC97LinkState *s) { uint8_t active[LAST_INDEX]; - dolog ("mixer_reset\n"); - memset (s->mixer_data, 0, sizeof (s->mixer_data)); - memset (active, 0, sizeof (active)); - mixer_store (s, AC97_Reset , 0x0000); /* 6940 */ - mixer_store (s, AC97_Headphone_Volume_Mute , 0x0000); - mixer_store (s, AC97_Master_Volume_Mono_Mute , 0x0000); - mixer_store (s, AC97_Master_Tone_RL, 0x0000); - mixer_store (s, AC97_PC_BEEP_Volume_Mute , 0x0000); - mixer_store (s, AC97_Phone_Volume_Mute , 0x0000); - mixer_store (s, AC97_Mic_Volume_Mute , 0x0000); - mixer_store (s, AC97_Line_In_Volume_Mute , 0x0000); - mixer_store (s, AC97_CD_Volume_Mute , 0x0000); - mixer_store (s, AC97_Video_Volume_Mute , 0x0000); - mixer_store (s, AC97_Aux_Volume_Mute , 0x0000); - mixer_store (s, AC97_Record_Gain_Mic_Mute , 0x0000); - mixer_store (s, AC97_General_Purpose , 0x0000); - mixer_store (s, AC97_3D_Control , 0x0000); - mixer_store (s, AC97_Powerdown_Ctrl_Stat , 0x000f); + dolog("mixer_reset\n"); + memset(s->mixer_data, 0, sizeof(s->mixer_data)); + memset(active, 0, sizeof(active)); + mixer_store(s, AC97_Reset, 0x0000); /* 6940 */ + mixer_store(s, AC97_Headphone_Volume_Mute, 0x0000); + mixer_store(s, AC97_Master_Volume_Mono_Mute, 0x0000); + mixer_store(s, AC97_Master_Tone_RL, 0x0000); + mixer_store(s, AC97_PC_BEEP_Volume_Mute, 0x0000); + mixer_store(s, AC97_Phone_Volume_Mute, 0x0000); + mixer_store(s, AC97_Mic_Volume_Mute, 0x0000); + mixer_store(s, AC97_Line_In_Volume_Mute, 0x0000); + mixer_store(s, AC97_CD_Volume_Mute, 0x0000); + mixer_store(s, AC97_Video_Volume_Mute, 0x0000); + mixer_store(s, AC97_Aux_Volume_Mute, 0x0000); + mixer_store(s, AC97_Record_Gain_Mic_Mute, 0x0000); + mixer_store(s, AC97_General_Purpose, 0x0000); + mixer_store(s, AC97_3D_Control, 0x0000); + mixer_store(s, AC97_Powerdown_Ctrl_Stat, 0x000f); /* * Sigmatel 9700 (STAC9700) */ - mixer_store (s, AC97_Vendor_ID1 , 0x8384); - mixer_store (s, AC97_Vendor_ID2 , 0x7600); /* 7608 */ + mixer_store(s, AC97_Vendor_ID1, 0x8384); + mixer_store(s, AC97_Vendor_ID2, 0x7600); /* 7608 */ - mixer_store (s, AC97_Extended_Audio_ID , 0x0809); - mixer_store (s, AC97_Extended_Audio_Ctrl_Stat, 0x0009); - mixer_store (s, AC97_PCM_Front_DAC_Rate , 0xbb80); - mixer_store (s, AC97_PCM_Surround_DAC_Rate , 0xbb80); - mixer_store (s, AC97_PCM_LFE_DAC_Rate , 0xbb80); - mixer_store (s, AC97_PCM_LR_ADC_Rate , 0xbb80); - mixer_store (s, AC97_MIC_ADC_Rate , 0xbb80); + mixer_store(s, AC97_Extended_Audio_ID, 0x0809); + mixer_store(s, AC97_Extended_Audio_Ctrl_Stat, 0x0009); + mixer_store(s, AC97_PCM_Front_DAC_Rate, 0xbb80); + mixer_store(s, AC97_PCM_Surround_DAC_Rate, 0xbb80); + mixer_store(s, AC97_PCM_LFE_DAC_Rate, 0xbb80); + mixer_store(s, AC97_PCM_LR_ADC_Rate, 0xbb80); + mixer_store(s, AC97_MIC_ADC_Rate, 0xbb80); - record_select (s, 0); - set_volume (s, AC97_Master_Volume_Mute, 0x8000); - set_volume (s, AC97_PCM_Out_Volume_Mute, 0x8808); - set_volume (s, AC97_Record_Gain_Mute, 0x8808); + record_select(s, 0); + set_volume(s, AC97_Master_Volume_Mute, 0x8000); + set_volume(s, AC97_PCM_Out_Volume_Mute, 0x8808); + set_volume(s, AC97_Record_Gain_Mute, 0x8808); - reset_voices (s, active); + reset_voices(s, active); } /** * Native audio mixer * I/O Reads */ -static uint32_t nam_readb (void *opaque, uint32_t addr) +static uint32_t nam_readb(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; - dolog ("U nam readb %#x\n", addr); + dolog("U nam readb 0x%x\n", addr); s->cas = 0; return ~0U; } -static uint32_t nam_readw (void *opaque, uint32_t addr) +static uint32_t nam_readw(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; - uint32_t index = addr; s->cas = 0; - return mixer_load(s, index); + return mixer_load(s, addr); } -static uint32_t nam_readl (void *opaque, uint32_t addr) +static uint32_t nam_readl(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; - dolog ("U nam readl %#x\n", addr); + dolog("U nam readl 0x%x\n", addr); s->cas = 0; return ~0U; } @@ -563,89 +556,84 @@ static uint32_t nam_readl (void *opaque, uint32_t addr) * Native audio mixer * I/O Writes */ -static void nam_writeb (void *opaque, uint32_t addr, uint32_t val) +static void nam_writeb(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; - dolog ("U nam writeb %#x <- %#x\n", addr, val); + dolog("U nam writeb 0x%x <- 0x%x\n", addr, val); s->cas = 0; } -static void nam_writew (void *opaque, uint32_t addr, uint32_t val) +static void nam_writew(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; - uint32_t index = addr; + s->cas = 0; - switch (index) { + switch (addr) { case AC97_Reset: - mixer_reset (s); + mixer_reset(s); break; case AC97_Powerdown_Ctrl_Stat: val &= ~0x800f; - val |= mixer_load (s, index) & 0xf; - mixer_store (s, index, val); + val |= mixer_load(s, addr) & 0xf; + mixer_store(s, addr, val); break; case AC97_PCM_Out_Volume_Mute: case AC97_Master_Volume_Mute: case AC97_Record_Gain_Mute: - set_volume (s, index, val); + set_volume(s, addr, val); break; case AC97_Record_Select: - record_select (s, val); + record_select(s, val); break; case AC97_Vendor_ID1: case AC97_Vendor_ID2: - dolog ("Attempt to write vendor ID to %#x\n", val); + dolog("Attempt to write vendor ID to 0x%x\n", val); break; case AC97_Extended_Audio_ID: - dolog ("Attempt to write extended audio ID to %#x\n", val); + dolog("Attempt to write extended audio ID to 0x%x\n", val); break; case AC97_Extended_Audio_Ctrl_Stat: if (!(val & EACS_VRA)) { - mixer_store (s, AC97_PCM_Front_DAC_Rate, 0xbb80); - mixer_store (s, AC97_PCM_LR_ADC_Rate, 0xbb80); - open_voice (s, PI_INDEX, 48000); - open_voice (s, PO_INDEX, 48000); + mixer_store(s, AC97_PCM_Front_DAC_Rate, 0xbb80); + mixer_store(s, AC97_PCM_LR_ADC_Rate, 0xbb80); + open_voice(s, PI_INDEX, 48000); + open_voice(s, PO_INDEX, 48000); } if (!(val & EACS_VRM)) { - mixer_store (s, AC97_MIC_ADC_Rate, 0xbb80); - open_voice (s, MC_INDEX, 48000); + mixer_store(s, AC97_MIC_ADC_Rate, 0xbb80); + open_voice(s, MC_INDEX, 48000); } - dolog ("Setting extended audio control to %#x\n", val); - mixer_store (s, AC97_Extended_Audio_Ctrl_Stat, val); + dolog("Setting extended audio control to 0x%x\n", val); + mixer_store(s, AC97_Extended_Audio_Ctrl_Stat, val); break; case AC97_PCM_Front_DAC_Rate: - if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { - mixer_store (s, index, val); - dolog ("Set front DAC rate to %d\n", val); - open_voice (s, PO_INDEX, val); - } - else { - dolog ("Attempt to set front DAC rate to %d, " - "but VRA is not set\n", - val); + if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { + mixer_store(s, addr, val); + dolog("Set front DAC rate to %d\n", val); + open_voice(s, PO_INDEX, val); + } else { + dolog("Attempt to set front DAC rate to %d, but VRA is not set\n", + val); } break; case AC97_MIC_ADC_Rate: - if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRM) { - mixer_store (s, index, val); - dolog ("Set MIC ADC rate to %d\n", val); - open_voice (s, MC_INDEX, val); - } - else { - dolog ("Attempt to set MIC ADC rate to %d, " - "but VRM is not set\n", - val); + if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRM) { + mixer_store(s, addr, val); + dolog("Set MIC ADC rate to %d\n", val); + open_voice(s, MC_INDEX, val); + } else { + dolog("Attempt to set MIC ADC rate to %d, but VRM is not set\n", + val); } break; case AC97_PCM_LR_ADC_Rate: - if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { - mixer_store (s, index, val); - dolog ("Set front LR ADC rate to %d\n", val); - open_voice (s, PI_INDEX, val); - } - else { - dolog ("Attempt to set LR ADC rate to %d, but VRA is not set\n", - val); + if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { + mixer_store(s, addr, val); + dolog("Set front LR ADC rate to %d\n", val); + open_voice(s, PI_INDEX, val); + } else { + dolog("Attempt to set LR ADC rate to %d, but VRA is not set\n", + val); } break; case AC97_Headphone_Volume_Mute: @@ -666,17 +654,17 @@ static void nam_writew (void *opaque, uint32_t addr, uint32_t val) /* None of the features in these regs are emulated, so they are RO */ break; default: - dolog ("U nam writew %#x <- %#x\n", addr, val); - mixer_store (s, index, val); + dolog("U nam writew 0x%x <- 0x%x\n", addr, val); + mixer_store(s, addr, val); assert(0); break; } } -static void nam_writel (void *opaque, uint32_t addr, uint32_t val) +static void nam_writel(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; - dolog ("U nam writel %#x <- %#x\n", addr, val); + dolog("U nam writel 0x%x <- 0x%x\n", addr, val); s->cas = 0; } @@ -684,16 +672,15 @@ static void nam_writel (void *opaque, uint32_t addr, uint32_t val) * Native audio bus master * I/O Reads */ -static uint32_t nabm_readb (void *opaque, uint32_t addr) +static uint32_t nabm_readb(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; uint32_t val = ~0U; - switch (index) { + switch (addr) { case CAS: - dolog ("CAS %d\n", s->cas); + dolog("CAS %d\n", s->cas); val = s->cas; s->cas = 1; break; @@ -701,126 +688,124 @@ static uint32_t nabm_readb (void *opaque, uint32_t addr) case PO_CIV: case MC_CIV: case SO_CIV: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->civ; - dolog ("CIV[%d] -> %#x\n", GET_BM (index), val); + dolog("CIV[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_LVI: case PO_LVI: case MC_LVI: case SO_LVI: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->lvi; - dolog ("LVI[%d] -> %#x\n", GET_BM (index), val); + dolog("LVI[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_PIV: case PO_PIV: case MC_PIV: case SO_PIV: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->piv; - dolog ("PIV[%d] -> %#x\n", GET_BM (index), val); + dolog("PIV[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_CR: case PO_CR: case MC_CR: case SO_CR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->cr; - dolog ("CR[%d] -> %#x\n", GET_BM (index), val); + dolog("CR[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_SR: case PO_SR: case MC_SR: case SO_SR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->sr & 0xff; - dolog ("SRb[%d] -> %#x\n", GET_BM (index), val); + dolog("SRb[%d] -> 0x%x\n", GET_BM(addr), val); break; default: - dolog ("U nabm readb %#x -> %#x\n", addr, val); + dolog("U nabm readb 0x%x -> 0x%x\n", addr, val); assert(0); break; } return val; } -static uint32_t nabm_readw (void *opaque, uint32_t addr) +static uint32_t nabm_readw(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; uint32_t val = ~0U; - switch (index) { + switch (addr) { case PI_SR: case PO_SR: case MC_SR: case SO_SR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->sr; - dolog ("SR[%d] -> %#x\n", GET_BM (index), val); + dolog("SR[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_PICB: case PO_PICB: case MC_PICB: case SO_PICB: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->picb; - dolog ("PICB[%d] -> %#x\n", GET_BM (index), val); + dolog("PICB[%d] -> 0x%x\n", GET_BM(addr), val); break; default: - dolog ("U nabm readw %#x -> %#x\n", addr, val); + dolog("U nabm readw 0x%x -> 0x%x\n", addr, val); val = nabm_readb(opaque, addr) | (nabm_readb(opaque, addr + 1) << 8); break; } return val; } -static uint32_t nabm_readl (void *opaque, uint32_t addr) +static uint32_t nabm_readl(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; uint32_t val = ~0U; - switch (index) { + switch (addr) { case PI_BDBAR: case PO_BDBAR: case MC_BDBAR: case SO_BDBAR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->bdbar; - dolog ("BMADDR[%d] -> %#x\n", GET_BM (index), val); + dolog("BMADDR[%d] -> 0x%x\n", GET_BM(addr), val); break; case PI_CIV: case PO_CIV: case MC_CIV: case SO_CIV: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->civ | (r->lvi << 8) | (r->sr << 16); - dolog ("CIV LVI SR[%d] -> %#x, %#x, %#x\n", GET_BM (index), + dolog("CIV LVI SR[%d] -> 0x%x, 0x%x, 0x%x\n", GET_BM(addr), r->civ, r->lvi, r->sr); break; case PI_PICB: case PO_PICB: case MC_PICB: case SO_PICB: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; val = r->picb | (r->piv << 16) | (r->cr << 24); - dolog ("PICB PIV CR[%d] -> %#x %#x %#x %#x\n", GET_BM (index), + dolog("PICB PIV CR[%d] -> 0x%x 0x%x 0x%x 0x%x\n", GET_BM(addr), val, r->picb, r->piv, r->cr); break; case GLOB_CNT: val = s->glob_cnt; - dolog ("glob_cnt -> %#x\n", val); + dolog("glob_cnt -> 0x%x\n", val); break; case GLOB_STA: val = s->glob_sta | GS_S0CR; - dolog ("glob_sta -> %#x\n", val); + dolog("glob_sta -> 0x%x\n", val); break; default: - dolog ("U nabm readl %#x -> %#x\n", addr, val); + dolog("U nabm readl 0x%x -> 0x%x\n", addr, val); val = nabm_readw(opaque, addr) | (nabm_readw(opaque, addr + 2) << 16); break; } @@ -831,134 +816,129 @@ static uint32_t nabm_readl (void *opaque, uint32_t addr) * Native audio bus master * I/O Writes */ -static void nabm_writeb (void *opaque, uint32_t addr, uint32_t val) +static void nabm_writeb(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; - switch (index) { + + switch (addr) { case PI_LVI: case PO_LVI: case MC_LVI: case SO_LVI: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; if ((r->cr & CR_RPBM) && (r->sr & SR_DCH)) { r->sr &= ~(SR_DCH | SR_CELV); r->civ = r->piv; r->piv = (r->piv + 1) % 32; - fetch_bd (s, r); + fetch_bd(s, r); } r->lvi = val % 32; - dolog ("LVI[%d] <- %#x\n", GET_BM (index), val); + dolog("LVI[%d] <- 0x%x\n", GET_BM(addr), val); break; case PI_CR: case PO_CR: case MC_CR: case SO_CR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; if (val & CR_RR) { - reset_bm_regs (s, r); - } - else { + reset_bm_regs(s, r); + } else { r->cr = val & CR_VALID_MASK; if (!(r->cr & CR_RPBM)) { - voice_set_active (s, r - s->bm_regs, 0); + voice_set_active(s, r - s->bm_regs, 0); r->sr |= SR_DCH; - } - else { + } else { r->civ = r->piv; r->piv = (r->piv + 1) % 32; - fetch_bd (s, r); + fetch_bd(s, r); r->sr &= ~SR_DCH; - voice_set_active (s, r - s->bm_regs, 1); + voice_set_active(s, r - s->bm_regs, 1); } } - dolog ("CR[%d] <- %#x (cr %#x)\n", GET_BM (index), val, r->cr); + dolog("CR[%d] <- 0x%x (cr 0x%x)\n", GET_BM(addr), val, r->cr); break; case PI_SR: case PO_SR: case MC_SR: case SO_SR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); - update_sr (s, r, r->sr & ~(val & SR_WCLEAR_MASK)); - dolog ("SR[%d] <- %#x (sr %#x)\n", GET_BM (index), val, r->sr); + update_sr(s, r, r->sr & ~(val & SR_WCLEAR_MASK)); + dolog("SR[%d] <- 0x%x (sr 0x%x)\n", GET_BM(addr), val, r->sr); break; default: - dolog ("U nabm writeb %#x <- %#x\n", addr, val); + dolog("U nabm writeb 0x%x <- 0x%x\n", addr, val); break; } } -static void nabm_writew (void *opaque, uint32_t addr, uint32_t val) +static void nabm_writew(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; - switch (index) { + + switch (addr) { case PI_SR: case PO_SR: case MC_SR: case SO_SR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); - update_sr (s, r, r->sr & ~(val & SR_WCLEAR_MASK)); - dolog ("SR[%d] <- %#x (sr %#x)\n", GET_BM (index), val, r->sr); + update_sr(s, r, r->sr & ~(val & SR_WCLEAR_MASK)); + dolog("SR[%d] <- 0x%x (sr 0x%x)\n", GET_BM(addr), val, r->sr); break; default: - dolog ("U nabm writew %#x <- %#x\n", addr, val); + dolog("U nabm writew 0x%x <- 0x%x\n", addr, val); nabm_writeb(opaque, addr, val & 0xff); nabm_writeb(opaque, addr + 1, (val >> 8) & 0xff); break; } } -static void nabm_writel (void *opaque, uint32_t addr, uint32_t val) +static void nabm_writel(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; AC97BusMasterRegs *r = NULL; - uint32_t index = addr; - switch (index) { + + switch (addr) { case PI_BDBAR: case PO_BDBAR: case MC_BDBAR: case SO_BDBAR: - r = &s->bm_regs[GET_BM (index)]; + r = &s->bm_regs[GET_BM(addr)]; r->bdbar = val & ~3; - dolog ("BDBAR[%d] <- %#x (bdbar %#x)\n", - GET_BM (index), val, r->bdbar); + dolog("BDBAR[%d] <- 0x%x (bdbar 0x%x)\n", GET_BM(addr), val, r->bdbar); break; case GLOB_CNT: - if (val & GC_WR) - warm_reset (s); - if (val & GC_CR) - cold_reset (s); - if (!(val & (GC_WR | GC_CR))) + /* TODO: Handle WR or CR being set (warm/cold reset requests) */ + if (!(val & (GC_WR | GC_CR))) { s->glob_cnt = val & GC_VALID_MASK; - dolog ("glob_cnt <- %#x (glob_cnt %#x)\n", val, s->glob_cnt); + } + dolog("glob_cnt <- 0x%x (glob_cnt 0x%x)\n", val, s->glob_cnt); break; case GLOB_STA: s->glob_sta &= ~(val & GS_WCLEAR_MASK); s->glob_sta |= (val & ~(GS_WCLEAR_MASK | GS_RO_MASK)) & GS_VALID_MASK; - dolog ("glob_sta <- %#x (glob_sta %#x)\n", val, s->glob_sta); + dolog("glob_sta <- 0x%x (glob_sta 0x%x)\n", val, s->glob_sta); break; default: - dolog ("U nabm writel %#x <- %#x\n", addr, val); + dolog("U nabm writel 0x%x <- 0x%x\n", addr, val); nabm_writew(opaque, addr, val & 0xffff); nabm_writew(opaque, addr + 2, (val >> 16) & 0xffff); break; } } -static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, - int max, int *stop) +static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r, + int max, int *stop) { uint8_t tmpbuf[4096]; uint32_t addr = r->bd.addr; uint32_t temp = r->picb << 1; uint32_t written = 0; int to_copy = 0; - temp = MIN (temp, max); + temp = MIN(temp, max); if (!temp) { *stop = 1; @@ -967,11 +947,11 @@ static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, while (temp) { int copied; - to_copy = MIN (temp, sizeof (tmpbuf)); - dma_memory_read (s->as, addr, tmpbuf, to_copy); - copied = AUD_write (s->voice_po, tmpbuf, to_copy); - dolog ("write_audio max=%x to_copy=%x copied=%x\n", - max, to_copy, copied); + to_copy = MIN(temp, sizeof(tmpbuf)); + dma_memory_read(s->as, addr, tmpbuf, to_copy, MEMTXATTRS_UNSPECIFIED); + copied = AUD_write(s->voice_po, tmpbuf, to_copy); + dolog("write_audio max=%x to_copy=%x copied=%x\n", + max, to_copy, copied); if (!copied) { *stop = 1; break; @@ -983,11 +963,10 @@ static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, if (!temp) { if (to_copy < 4) { - dolog ("whoops\n"); + dolog("whoops\n"); s->last_samp = 0; - } - else { - s->last_samp = *(uint32_t *) &tmpbuf[to_copy - 4]; + } else { + s->last_samp = *(uint32_t *)&tmpbuf[to_copy - 4]; } } @@ -995,37 +974,37 @@ static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, return written; } -static void write_bup (AC97LinkState *s, int elapsed) +static void write_bup(AC97LinkState *s, int elapsed) { - dolog ("write_bup\n"); + dolog("write_bup\n"); if (!(s->bup_flag & BUP_SET)) { if (s->bup_flag & BUP_LAST) { int i; uint8_t *p = s->silence; - for (i = 0; i < sizeof (s->silence) / 4; i++, p += 4) { + for (i = 0; i < sizeof(s->silence) / 4; i++, p += 4) { *(uint32_t *) p = s->last_samp; } - } - else { - memset (s->silence, 0, sizeof (s->silence)); + } else { + memset(s->silence, 0, sizeof(s->silence)); } s->bup_flag |= BUP_SET; } while (elapsed) { - int temp = MIN (elapsed, sizeof (s->silence)); + int temp = MIN(elapsed, sizeof(s->silence)); while (temp) { - int copied = AUD_write (s->voice_po, s->silence, temp); - if (!copied) + int copied = AUD_write(s->voice_po, s->silence, temp); + if (!copied) { return; + } temp -= copied; elapsed -= copied; } } } -static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, - int max, int *stop) +static int read_audio(AC97LinkState *s, AC97BusMasterRegs *r, + int max, int *stop) { uint8_t tmpbuf[4096]; uint32_t addr = r->bd.addr; @@ -1034,7 +1013,7 @@ static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, int to_copy = 0; SWVoiceIn *voice = (r - s->bm_regs) == MC_INDEX ? s->voice_mc : s->voice_pi; - temp = MIN (temp, max); + temp = MIN(temp, max); if (!temp) { *stop = 1; @@ -1043,13 +1022,13 @@ static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, while (temp) { int acquired; - to_copy = MIN (temp, sizeof (tmpbuf)); - acquired = AUD_read (voice, tmpbuf, to_copy); + to_copy = MIN(temp, sizeof(tmpbuf)); + acquired = AUD_read(voice, tmpbuf, to_copy); if (!acquired) { *stop = 1; break; } - dma_memory_write (s->as, addr, tmpbuf, acquired); + dma_memory_write(s->as, addr, tmpbuf, acquired, MEMTXATTRS_UNSPECIFIED); temp -= acquired; addr += acquired; nread += acquired; @@ -1059,14 +1038,14 @@ static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, return nread; } -static void transfer_audio (AC97LinkState *s, int index, int elapsed) +static void transfer_audio(AC97LinkState *s, int index, int elapsed) { AC97BusMasterRegs *r = &s->bm_regs[index]; int stop = 0; if (s->invalid_freq[index]) { - AUD_log ("ac97", "attempt to use voice %d with invalid frequency %d\n", - index, s->invalid_freq[index]); + AUD_log("ac97", "attempt to use voice %d with invalid frequency %d\n", + index, s->invalid_freq[index]); return; } @@ -1074,7 +1053,7 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed) if (r->cr & CR_RPBM) { switch (index) { case PO_INDEX: - write_bup (s, elapsed); + write_bup(s, elapsed); break; } } @@ -1085,13 +1064,13 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed) int temp; if (!r->bd_valid) { - dolog ("invalid bd\n"); - fetch_bd (s, r); + dolog("invalid bd\n"); + fetch_bd(s, r); } if (!r->picb) { - dolog ("fresh bd %d is empty %#x %#x\n", - r->civ, r->bd.addr, r->bd.ctl_len); + dolog("fresh bd %d is empty 0x%x 0x%x\n", + r->civ, r->bd.addr, r->bd.ctl_len); if (r->civ == r->lvi) { r->sr |= SR_DCH; /* CELV? */ s->bup_flag = 0; @@ -1100,20 +1079,20 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed) r->sr &= ~SR_CELV; r->civ = r->piv; r->piv = (r->piv + 1) % 32; - fetch_bd (s, r); + fetch_bd(s, r); return; } switch (index) { case PO_INDEX: - temp = write_audio (s, r, elapsed, &stop); + temp = write_audio(s, r, elapsed, &stop); elapsed -= temp; r->picb -= (temp >> 1); break; case PI_INDEX: case MC_INDEX: - temp = read_audio (s, r, elapsed, &stop); + temp = read_audio(s, r, elapsed, &stop); elapsed -= temp; r->picb -= (temp >> 1); break; @@ -1127,61 +1106,102 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed) } if (r->civ == r->lvi) { - dolog ("Underrun civ (%d) == lvi (%d)\n", r->civ, r->lvi); + dolog("Underrun civ (%d) == lvi (%d)\n", r->civ, r->lvi); new_sr |= SR_LVBCI | SR_DCH | SR_CELV; stop = 1; s->bup_flag = (r->bd.ctl_len & BD_BUP) ? BUP_LAST : 0; - } - else { + } else { r->civ = r->piv; r->piv = (r->piv + 1) % 32; - fetch_bd (s, r); + fetch_bd(s, r); } - update_sr (s, r, new_sr); + update_sr(s, r, new_sr); } } } -static void pi_callback (void *opaque, int avail) +static void pi_callback(void *opaque, int avail) { - transfer_audio (opaque, PI_INDEX, avail); + transfer_audio(opaque, PI_INDEX, avail); } -static void mc_callback (void *opaque, int avail) +static void mc_callback(void *opaque, int avail) { - transfer_audio (opaque, MC_INDEX, avail); + transfer_audio(opaque, MC_INDEX, avail); } -static void po_callback (void *opaque, int free) +static void po_callback(void *opaque, int free) { - transfer_audio (opaque, PO_INDEX, free); + transfer_audio(opaque, PO_INDEX, free); } -static int ac97_post_load (void *opaque, int version_id) +static const VMStateDescription vmstate_ac97_bm_regs = { + .name = "ac97_bm_regs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(bdbar, AC97BusMasterRegs), + VMSTATE_UINT8(civ, AC97BusMasterRegs), + VMSTATE_UINT8(lvi, AC97BusMasterRegs), + VMSTATE_UINT16(sr, AC97BusMasterRegs), + VMSTATE_UINT16(picb, AC97BusMasterRegs), + VMSTATE_UINT8(piv, AC97BusMasterRegs), + VMSTATE_UINT8(cr, AC97BusMasterRegs), + VMSTATE_UINT32(bd_valid, AC97BusMasterRegs), + VMSTATE_UINT32(bd.addr, AC97BusMasterRegs), + VMSTATE_UINT32(bd.ctl_len, AC97BusMasterRegs), + VMSTATE_END_OF_LIST() + } +}; + +static int ac97_post_load(void *opaque, int version_id) { uint8_t active[LAST_INDEX]; AC97LinkState *s = opaque; - record_select (s, mixer_load (s, AC97_Record_Select)); - set_volume (s, AC97_Master_Volume_Mute, - mixer_load (s, AC97_Master_Volume_Mute)); - set_volume (s, AC97_PCM_Out_Volume_Mute, - mixer_load (s, AC97_PCM_Out_Volume_Mute)); - set_volume (s, AC97_Record_Gain_Mute, - mixer_load (s, AC97_Record_Gain_Mute)); + record_select(s, mixer_load(s, AC97_Record_Select)); + set_volume(s, AC97_Master_Volume_Mute, + mixer_load(s, AC97_Master_Volume_Mute)); + set_volume(s, AC97_PCM_Out_Volume_Mute, + mixer_load(s, AC97_PCM_Out_Volume_Mute)); + set_volume(s, AC97_Record_Gain_Mute, + mixer_load(s, AC97_Record_Gain_Mute)); active[PI_INDEX] = !!(s->bm_regs[PI_INDEX].cr & CR_RPBM); active[PO_INDEX] = !!(s->bm_regs[PO_INDEX].cr & CR_RPBM); active[MC_INDEX] = !!(s->bm_regs[MC_INDEX].cr & CR_RPBM); - reset_voices (s, active); + reset_voices(s, active); s->bup_flag = 0; s->last_samp = 0; return 0; } +static bool is_version_2(void *opaque, int version_id) +{ + return version_id == 2; +} + +static const VMStateDescription vmstate_ac97 = { + .name = "ac97", + .version_id = 3, + .minimum_version_id = 2, + .post_load = ac97_post_load, + .fields = (VMStateField []) { + VMSTATE_PCI_DEVICE(dev, AC97DeviceState), + VMSTATE_UINT32(state.glob_cnt, AC97DeviceState), + VMSTATE_UINT32(state.glob_sta, AC97DeviceState), + VMSTATE_UINT32(state.cas, AC97DeviceState), + VMSTATE_STRUCT_ARRAY(state.bm_regs, AC97DeviceState, LAST_INDEX, 1, + vmstate_ac97_bm_regs, AC97BusMasterRegs), + VMSTATE_BUFFER(state.mixer_data, AC97DeviceState), + VMSTATE_UNUSED_TEST(is_version_2, 3), + VMSTATE_END_OF_LIST() + } +}; + static uint64_t nam_read(void *opaque, hwaddr addr, unsigned size) { dolog("nam_read [0x%llx] (%d)\n", addr, size); @@ -1255,7 +1275,7 @@ static uint64_t nabm_read(void *opaque, hwaddr addr, unsigned size) } static void nabm_write(void *opaque, hwaddr addr, uint64_t val, - unsigned size) + unsigned size) { dolog("nabm_write [0x%llx] = 0x%llx (%d)\n", addr, val, size); // if ((addr / size) > 64) { @@ -1287,94 +1307,35 @@ const MemoryRegionOps ac97_io_nabm_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void ac97_on_reset (AC97LinkState *s) +static void ac97_on_reset(AC97LinkState *s) { - reset_bm_regs (s, &s->bm_regs[0]); - reset_bm_regs (s, &s->bm_regs[1]); - reset_bm_regs (s, &s->bm_regs[2]); + reset_bm_regs(s, &s->bm_regs[0]); + reset_bm_regs(s, &s->bm_regs[1]); + reset_bm_regs(s, &s->bm_regs[2]); /* * Reset the mixer too. The Windows XP driver seems to rely on * this. At least it wants to read the vendor id before it resets * the codec manually. */ - mixer_reset (s); + mixer_reset(s); } -void ac97_common_init (AC97LinkState *s, - PCIDevice* pci_dev, - AddressSpace *as) +void ac97_common_init(AC97LinkState *s, PCIDevice *pci_dev, AddressSpace *as) { - s->pci_dev = pci_dev; - s->as = as; + s->pci_dev = pci_dev; + s->as = as; - AUD_register_card ("ac97", &s->card); - ac97_on_reset (s); + AUD_register_card("ac97", &s->card); + ac97_on_reset(s); } -typedef struct AC97DeviceState { - PCIDevice dev; - AC97LinkState state; - - MemoryRegion io_nam; - MemoryRegion io_nabm; -} AC97DeviceState; - -static void ac97_on_device_reset (DeviceState *dev) +static void ac97_on_device_reset(DeviceState *dev) { AC97DeviceState *s = container_of(dev, AC97DeviceState, dev.qdev); - ac97_on_reset (&s->state); + ac97_on_reset(&s->state); } -#define AC97_DEVICE(obj) \ - OBJECT_CHECK(AC97DeviceState, (obj), "AC97") - - -static const VMStateDescription vmstate_ac97_bm_regs = { - .name = "ac97_bm_regs", - .version_id = 1, - .minimum_version_id = 1, - .minimum_version_id_old = 1, - .fields = (VMStateField []) { - VMSTATE_UINT32 (bdbar, AC97BusMasterRegs), - VMSTATE_UINT8 (civ, AC97BusMasterRegs), - VMSTATE_UINT8 (lvi, AC97BusMasterRegs), - VMSTATE_UINT16 (sr, AC97BusMasterRegs), - VMSTATE_UINT16 (picb, AC97BusMasterRegs), - VMSTATE_UINT8 (piv, AC97BusMasterRegs), - VMSTATE_UINT8 (cr, AC97BusMasterRegs), - VMSTATE_UINT32 (bd_valid, AC97BusMasterRegs), - VMSTATE_UINT32 (bd.addr, AC97BusMasterRegs), - VMSTATE_UINT32 (bd.ctl_len, AC97BusMasterRegs), - VMSTATE_END_OF_LIST () - } -}; - -static bool is_version_2 (void *opaque, int version_id) -{ - return version_id == 2; -} - -static const VMStateDescription vmstate_ac97 = { - .name = "ac97", - .version_id = 3, - .minimum_version_id = 2, - .minimum_version_id_old = 2, - .post_load = ac97_post_load, - .fields = (VMStateField []) { - VMSTATE_PCI_DEVICE (dev, AC97DeviceState), - VMSTATE_UINT32 (state.glob_cnt, AC97DeviceState), - VMSTATE_UINT32 (state.glob_sta, AC97DeviceState), - VMSTATE_UINT32 (state.cas, AC97DeviceState), - VMSTATE_STRUCT_ARRAY (state.bm_regs, AC97DeviceState, LAST_INDEX, 1, - vmstate_ac97_bm_regs, AC97BusMasterRegs), - VMSTATE_BUFFER (state.mixer_data, AC97DeviceState), - VMSTATE_UNUSED_TEST (is_version_2, 3), - VMSTATE_END_OF_LIST () - } -}; - - static void ac97_realize (PCIDevice *dev, Error **errp) { AC97DeviceState *s = AC97_DEVICE(dev); @@ -1407,12 +1368,12 @@ static void ac97_realize (PCIDevice *dev, Error **errp) c[PCI_INTERRUPT_LINE] = 0x00; /* intr_ln interrupt line rw */ c[PCI_INTERRUPT_PIN] = 0x01; /* intr_pn interrupt pin ro */ - memory_region_init_io (&s->io_nam, OBJECT(s), &ac97_io_nam_ops, &s->state, - "ac97-nam", 1024); - memory_region_init_io (&s->io_nabm, OBJECT(s), &ac97_io_nabm_ops, &s->state, - "ac97-nabm", 256); - pci_register_bar (&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nam); - pci_register_bar (&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nabm); + memory_region_init_io(&s->io_nam, OBJECT(s), &ac97_io_nam_ops, &s->state, + "ac97-nam", 1024); + memory_region_init_io(&s->io_nabm, OBJECT(s), &ac97_io_nabm_ops, &s->state, + "ac97-nabm", 256); + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nam); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nabm); ac97_common_init(&s->state, &s->dev, pci_get_address_space(&s->dev)); } @@ -1426,13 +1387,13 @@ static void ac97_exit (PCIDevice *dev) static Property ac97_properties[] = { DEFINE_AUDIO_PROPERTIES(AC97DeviceState, state.card), - DEFINE_PROP_END_OF_LIST (), + DEFINE_PROP_END_OF_LIST(), }; -static void ac97_class_init (ObjectClass *klass, void *data) +static void ac97_class_init(ObjectClass *klass, void *data) { - DeviceClass *dc = DEVICE_CLASS (klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS (klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->realize = ac97_realize; k->exit = ac97_exit; @@ -1450,7 +1411,7 @@ static void ac97_class_init (ObjectClass *klass, void *data) static const TypeInfo ac97_info = { .name = TYPE_AC97, .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof (AC97DeviceState), + .instance_size = sizeof(AC97DeviceState), .class_init = ac97_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, @@ -1458,12 +1419,11 @@ static const TypeInfo ac97_info = { }, }; -static void ac97_register_types (void) +static void ac97_register_types(void) { - type_register_static (&ac97_info); + type_register_static(&ac97_info); deprecated_register_soundhw("ac97", "Intel 82801AA AC97 Audio", 0, TYPE_AC97); } -type_init (ac97_register_types) - +type_init(ac97_register_types) diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 7d60ce6f0e..7f17a72a9c 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -84,7 +84,7 @@ struct CSState { int transferred; int aci_counter; SWVoiceOut *voice; - int16_t *tab; + const int16_t *tab; }; #define MODE2 (1 << 6) @@ -142,13 +142,13 @@ enum { Capture_Lower_Base_Count }; -static int freqs[2][8] = { +static const int freqs[2][8] = { { 8000, 16000, 27420, 32000, -1, -1, 48000, 9000 }, { 5510, 11025, 18900, 22050, 37800, 44100, 33075, 6620 } }; /* Tables courtesy http://hazelware.luggle.com/tutorials/mulawcompression.html */ -static int16_t MuLawDecompressTable[256] = +static const int16_t MuLawDecompressTable[256] = { -32124,-31100,-30076,-29052,-28028,-27004,-25980,-24956, -23932,-22908,-21884,-20860,-19836,-18812,-17788,-16764, @@ -184,7 +184,7 @@ static int16_t MuLawDecompressTable[256] = 56, 48, 40, 32, 24, 16, 8, 0 }; -static int16_t ALawDecompressTable[256] = +static const int16_t ALawDecompressTable[256] = { -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736, -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784, @@ -677,7 +677,7 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp) return; } - isa_init_irq(d, &s->pic, s->irq); + s->pic = isa_get_irq(d, s->irq); k = ISADMA_GET_CLASS(s->isa_dma); k->register_channel(s->isa_dma, s->dma, cs_dma_read, s); diff --git a/hw/audio/gus.c b/hw/audio/gus.c index e8719ee117..42f010b671 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -282,7 +282,7 @@ static void gus_realizefn (DeviceState *dev, Error **errp) s->emu.himemaddr = s->himem; s->emu.gusdatapos = s->emu.himemaddr + 1024 * 1024 + 32; s->emu.opaque = s; - isa_init_irq (d, &s->pic, s->emu.gusirq); + s->pic = isa_get_irq(d, s->emu.gusirq); AUD_set_active_out (s->voice, 1); } diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 4330213fff..b9ed231fe8 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -52,7 +52,7 @@ void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, size_t bus_size, hda_codec_response_func response, hda_codec_xfer_func xfer) { - qbus_create_inplace(bus, bus_size, TYPE_HDA_BUS, dev, NULL); + qbus_init(bus, bus_size, TYPE_HDA_BUS, dev, NULL); bus->response = response; bus->xfer = xfer; } @@ -220,8 +220,6 @@ struct IntelHDAReg { void (*rhandler)(IntelHDAState *d, const IntelHDAReg *reg); }; -static void intel_hda_reset(DeviceState *dev); - /* --------------------------------------------------------------------- */ static hwaddr intel_hda_addr(uint32_t lbase, uint32_t ubase) @@ -335,7 +333,7 @@ static void intel_hda_corb_run(IntelHDAState *d) rp = (d->corb_rp + 1) & 0xff; addr = intel_hda_addr(d->corb_lbase, d->corb_ubase); - verb = ldl_le_pci_dma(&d->pci, addr + 4*rp); + ldl_le_pci_dma(&d->pci, addr + 4 * rp, &verb, MEMTXATTRS_UNSPECIFIED); d->corb_rp = rp; dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __func__, rp, verb); @@ -345,10 +343,12 @@ static void intel_hda_corb_run(IntelHDAState *d) static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response) { + const MemTxAttrs attrs = { .memory = true }; HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); IntelHDAState *d = container_of(bus, IntelHDAState, codecs); hwaddr addr; uint32_t wp, ex; + MemTxResult res = MEMTX_OK; if (d->ics & ICH6_IRS_BUSY) { dprint(d, 2, "%s: [irr] response 0x%x, cad 0x%x\n", @@ -367,8 +367,12 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res ex = (solicited ? 0 : (1 << 4)) | dev->cad; wp = (d->rirb_wp + 1) & 0xff; addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase); - stl_le_pci_dma(&d->pci, addr + 8*wp, response); - stl_le_pci_dma(&d->pci, addr + 8*wp + 4, ex); + res |= stl_le_pci_dma(&d->pci, addr + 8 * wp, response, attrs); + res |= stl_le_pci_dma(&d->pci, addr + 8 * wp + 4, ex, attrs); + if (res != MEMTX_OK && (d->rirb_ctl & ICH6_RBCTL_OVERRUN_EN)) { + d->rirb_sts |= ICH6_RBSTS_OVERRUN; + intel_hda_update_irq(d); + } d->rirb_wp = wp; dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n", @@ -394,6 +398,7 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, uint8_t *buf, uint32_t len) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); IntelHDAState *d = container_of(bus, IntelHDAState, codecs); hwaddr addr; @@ -427,7 +432,8 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, dprint(d, 3, "dma: entry %d, pos %d/%d, copy %d\n", st->be, st->bp, st->bpl[st->be].len, copy); - pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output); + pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output, + attrs); st->lpib += copy; st->bp += copy; buf += copy; @@ -450,7 +456,7 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, if (d->dp_lbase & 0x01) { s = st - d->st; addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase); - stl_le_pci_dma(&d->pci, addr + 8*s, st->lpib); + stl_le_pci_dma(&d->pci, addr + 8 * s, st->lpib, attrs); } dprint(d, 3, "dma: --\n"); @@ -470,7 +476,7 @@ static void intel_hda_parse_bdl(IntelHDAState *d, IntelHDAStream *st) addr = intel_hda_addr(st->bdlp_lbase, st->bdlp_ubase); st->bentries = st->lvi +1; g_free(st->bpl); - st->bpl = g_malloc(sizeof(bpl) * st->bentries); + st->bpl = g_new(bpl, st->bentries); for (i = 0; i < st->bentries; i++, addr += 16) { pci_dma_read(&d->pci, addr, buf, 16); st->bpl[i].addr = le64_to_cpu(*(uint64_t *)buf); @@ -508,7 +514,7 @@ static void intel_hda_notify_codecs(IntelHDAState *d, uint32_t stream, bool runn static void intel_hda_set_g_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) { if ((d->g_ctl & ICH6_GCTL_RESET) == 0) { - intel_hda_reset(DEVICE(d)); + device_cold_reset(DEVICE(d)); } } @@ -578,7 +584,7 @@ static void intel_hda_set_st_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint3 if (st->ctl & 0x01) { /* reset */ dprint(d, 1, "st #%d: reset\n", reg->stream); - st->ctl = SD_STS_FIFO_READY << 24; + st->ctl = SD_STS_FIFO_READY << 24 | SD_CTL_STREAM_RESET; } if ((st->ctl & 0x02) != (old & 0x02)) { uint32_t stnr = (st->ctl >> 20) & 0x0f; @@ -1075,11 +1081,9 @@ static void intel_hda_reset(DeviceState *dev) intel_hda_regs_reset(d); d->wall_base_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - /* reset codecs */ QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) { DeviceState *qdev = kid->child; cdev = HDA_CODEC_DEVICE(qdev); - device_legacy_reset(DEVICE(cdev)); d->state_sts |= (1 << cdev->cad); } intel_hda_update_irq(d); @@ -1303,17 +1307,16 @@ static const TypeInfo hda_codec_device_type_info = { * create intel hda controller with codec attached to it, * so '-soundhw hda' works. */ -static int intel_hda_and_codec_init(PCIBus *bus) +static int intel_hda_and_codec_init(PCIBus *bus, const char *audiodev) { DeviceState *controller; BusState *hdabus; DeviceState *codec; - warn_report("'-soundhw hda' is deprecated, " - "please use '-device intel-hda -device hda-duplex' instead"); controller = DEVICE(pci_create_simple(bus, -1, "intel-hda")); hdabus = QLIST_FIRST(&controller->child_bus); codec = qdev_new("hda-duplex"); + qdev_prop_set_string(codec, "audiodev", audiodev); qdev_realize_and_unref(codec, hdabus, &error_fatal); return 0; } diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index b056c05387..daf92a4ce1 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -209,7 +209,6 @@ static const VMStateDescription vmstate_spk = { .name = "pcspk", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = migrate_needed, .fields = (VMStateField[]) { VMSTATE_UINT8(data_on, PCSpkState), @@ -246,18 +245,8 @@ static const TypeInfo pcspk_info = { .class_init = pcspk_class_initfn, }; -static int pcspk_audio_init_soundhw(ISABus *bus) -{ - PCSpkState *s = pcspk_state; - - warn_report("'-soundhw pcspk' is deprecated, " - "please set a backend using '-machine pcspk-audiodev=' instead"); - return pcspk_audio_init(s); -} - static void pcspk_register(void) { type_register_static(&pcspk_info); - isa_register_soundhw("pcspk", "PC speaker", pcspk_audio_init_soundhw); } type_init(pcspk_register) diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index 60f1f75e3a..2215386ddb 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1408,7 +1408,7 @@ static void sb16_realizefn (DeviceState *dev, Error **errp) return; } - isa_init_irq (isadev, &s->pic, s->irq); + s->pic = isa_get_irq(isadev, s->irq); s->mixer_regs[0x80] = magic_of_irq (s->irq); s->mixer_regs[0x81] = (1 << s->dma) | (1 << s->hdma); diff --git a/hw/audio/soundhw.c b/hw/audio/soundhw.c index 173b674ff5..94d9463e42 100644 --- a/hw/audio/soundhw.c +++ b/hw/audio/soundhw.c @@ -25,7 +25,9 @@ #include "qemu/option.h" #include "qemu/help_option.h" #include "qemu/error-report.h" +#include "qapi/error.h" #include "qom/object.h" +#include "hw/qdev-properties.h" #include "hw/isa/isa.h" #include "hw/pci/pci.h" #include "hw/audio/soundhw.h" @@ -34,36 +36,21 @@ struct soundhw { const char *name; const char *descr; const char *typename; - int enabled; int isa; - union { - int (*init_isa) (ISABus *bus); - int (*init_pci) (PCIBus *bus); - } init; + int (*init_pci) (PCIBus *bus, const char *audiodev); }; static struct soundhw soundhw[9]; static int soundhw_count; -void isa_register_soundhw(const char *name, const char *descr, - int (*init_isa)(ISABus *bus)) -{ - assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); - soundhw[soundhw_count].name = name; - soundhw[soundhw_count].descr = descr; - soundhw[soundhw_count].isa = 1; - soundhw[soundhw_count].init.init_isa = init_isa; - soundhw_count++; -} - void pci_register_soundhw(const char *name, const char *descr, - int (*init_pci)(PCIBus *bus)) + int (*init_pci)(PCIBus *bus, const char *audiodev)) { assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); soundhw[soundhw_count].name = name; soundhw[soundhw_count].descr = descr; soundhw[soundhw_count].isa = 0; - soundhw[soundhw_count].init.init_pci = init_pci; + soundhw[soundhw_count].init_pci = init_pci; soundhw_count++; } @@ -78,100 +65,78 @@ void deprecated_register_soundhw(const char *name, const char *descr, soundhw_count++; } -void select_soundhw(const char *optarg) +void show_valid_soundhw(void) { struct soundhw *c; - if (is_help_option(optarg)) { - show_valid_cards: - - if (soundhw_count) { - printf("Valid sound card names (comma separated):\n"); - for (c = soundhw; c->name; ++c) { - printf ("%-11s %s\n", c->name, c->descr); - } - printf("\n-soundhw all will enable all of the above\n"); - } else { - printf("Machine has no user-selectable audio hardware " - "(it may or may not have always-present audio hardware).\n"); - } - exit(!is_help_option(optarg)); + if (soundhw_count) { + printf("Valid sound card names (comma separated):\n"); + for (c = soundhw; c->name; ++c) { + printf ("%-11s %s\n", c->name, c->descr); + } + } else { + printf("Machine has no user-selectable audio hardware " + "(it may or may not have always-present audio hardware).\n"); } - else { - size_t l; - const char *p; - char *e; - int bad_card = 0; +} - if (!strcmp(optarg, "all")) { - for (c = soundhw; c->name; ++c) { - c->enabled = 1; - } - return; +static struct soundhw *selected = NULL; +static const char *audiodev_id; + +void select_soundhw(const char *optarg, const char *audiodev) +{ + struct soundhw *c; + + if (selected) { + error_setg(&error_fatal, "only one -soundhw option is allowed"); + } + + for (c = soundhw; c->name; ++c) { + if (g_str_equal(c->name, optarg)) { + selected = c; + audiodev_id = audiodev; + break; } + } - p = optarg; - while (*p) { - e = strchr(p, ','); - l = !e ? strlen(p) : (size_t) (e - p); - - for (c = soundhw; c->name; ++c) { - if (!strncmp(c->name, p, l) && !c->name[l]) { - c->enabled = 1; - break; - } - } - - if (!c->name) { - if (l > 80) { - error_report("Unknown sound card name (too big to show)"); - } - else { - error_report("Unknown sound card name `%.*s'", - (int) l, p); - } - bad_card = 1; - } - p += l + (e != NULL); - } - - if (bad_card) { - goto show_valid_cards; - } + if (!c->name) { + error_report("Unknown sound card name `%s'", optarg); + show_valid_soundhw(); + exit(1); } } void soundhw_init(void) { - struct soundhw *c; + struct soundhw *c = selected; ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL); PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); + BusState *bus; - for (c = soundhw; c->name; ++c) { - if (c->enabled) { - if (c->typename) { - warn_report("'-soundhw %s' is deprecated, " - "please use '-device %s' instead", - c->name, c->typename); - if (c->isa) { - isa_create_simple(isa_bus, c->typename); - } else { - pci_create_simple(pci_bus, -1, c->typename); - } - } else if (c->isa) { - if (!isa_bus) { - error_report("ISA bus not available for %s", c->name); - exit(1); - } - c->init.init_isa(isa_bus); - } else { - if (!pci_bus) { - error_report("PCI bus not available for %s", c->name); - exit(1); - } - c->init.init_pci(pci_bus); - } + if (!c) { + return; + } + if (c->isa) { + if (!isa_bus) { + error_report("ISA bus not available for %s", c->name); + exit(1); } + bus = BUS(isa_bus); + } else { + if (!pci_bus) { + error_report("PCI bus not available for %s", c->name); + exit(1); + } + bus = BUS(pci_bus); + } + + if (c->typename) { + DeviceState *dev = qdev_new(c->typename); + qdev_prop_set_string(dev, "audiodev", audiodev_id); + qdev_realize_and_unref(dev, bus, &error_fatal); + } else { + assert(!c->isa); + c->init_pci(pci_bus, audiodev_id); } } diff --git a/hw/avr/atmega.c b/hw/avr/atmega.c index 0608e2d475..a34803e642 100644 --- a/hw/avr/atmega.c +++ b/hw/avr/atmega.c @@ -233,7 +233,7 @@ static void atmega_realize(DeviceState *dev, Error **errp) /* CPU */ object_initialize_child(OBJECT(dev), "cpu", &s->cpu, mc->cpu_type); - object_property_set_bool(OBJECT(&s->cpu), "realized", true, &error_abort); + qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); cpudev = DEVICE(&s->cpu); /* SRAM */ diff --git a/hw/avr/boot.c b/hw/avr/boot.c index cbede775ce..617f3a144c 100644 --- a/hw/avr/boot.c +++ b/hw/avr/boot.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "hw/loader.h" #include "elf.h" diff --git a/hw/block/block.c b/hw/block/block.c index d6f0afb812..ff85ec0757 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -61,7 +61,7 @@ bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, * block device and read only on demand. */ assert(size <= BDRV_REQUEST_MAX_BYTES); - ret = blk_pread(blk, 0, buf, size); + ret = blk_pread(blk, 0, size, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "can't read block backend"); return false; @@ -179,8 +179,7 @@ bool blkconf_apply_backend_options(BlockConf *conf, bool readonly, perm |= BLK_PERM_WRITE; } - shared_perm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | - BLK_PERM_GRAPH_MOD; + shared_perm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; if (resizable) { shared_perm |= BLK_PERM_RESIZE; } @@ -214,6 +213,8 @@ bool blkconf_apply_backend_options(BlockConf *conf, bool readonly, blk_set_enable_write_cache(blk, wce); blk_set_on_error(blk, rerror, werror); + block_acct_setup(blk_get_stats(blk), conf->account_invalid, + conf->account_failed); return true; } diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 252c3a7a23..26f965cabc 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -154,17 +154,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) g_free(s); } -static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev, - VirtQueue *vq) -{ - VirtIOBlock *s = (VirtIOBlock *)vdev; - - assert(s->dataplane); - assert(s->dataplane_started); - - return virtio_blk_handle_vq(s, vq); -} - /* Context: QEMU global mutex held */ int virtio_blk_data_plane_start(VirtIODevice *vdev) { @@ -222,7 +211,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) memory_region_transaction_commit(); while (j--) { - virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j); } goto fail_host_notifiers; } @@ -230,6 +219,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) memory_region_transaction_commit(); + /* + * These fields are visible to the IOThread so we rely on implicit barriers + * in aio_context_acquire() on the write side and aio_notify_accept() on + * the read side. + */ s->starting = false; vblk->dataplane_started = true; trace_virtio_blk_data_plane_start(s); @@ -258,8 +252,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); - virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, - virtio_blk_data_plane_handle_output); + virtio_queue_aio_attach_host_notifier(vq, s->ctx); } aio_context_release(s->ctx); return 0; @@ -302,7 +295,7 @@ static void virtio_blk_data_plane_stop_bh(void *opaque) for (i = 0; i < s->conf->num_queues; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); - virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL); + virtio_queue_aio_detach_host_notifier(vq, s->ctx); } } diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 860787580a..2785b9e849 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include "qemu/memalign.h" #include "qapi/error.h" #include "hw/xen/xen_common.h" #include "hw/block/xen_blkif.h" diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c index 3bf64e0665..fee1ca68a8 100644 --- a/hw/block/fdc-isa.c +++ b/hw/block/fdc-isa.c @@ -32,7 +32,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/timer.h" -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/irq.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" @@ -94,7 +94,7 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp) isa->iobase, fdc_portio_list, fdctrl, "fdc"); - isa_init_irq(isadev, &fdctrl->irq, isa->irq); + fdctrl->irq = isa_get_irq(isadev, isa->irq); fdctrl->dma_chann = isa->dma; if (fdctrl->dma_chann != -1) { IsaDmaClass *k; @@ -214,8 +214,9 @@ int cmos_get_fd_drive_type(FloppyDriveType fd0) return val; } -static void fdc_isa_build_aml(ISADevice *isadev, Aml *scope) +static void build_fdc_aml(AcpiDevAmlIf *adev, Aml *scope) { + FDCtrlISABus *isa = ISA_FDC(adev); Aml *dev; Aml *crs; int i; @@ -227,18 +228,20 @@ static void fdc_isa_build_aml(ISADevice *isadev, Aml *scope) }; crs = aml_resource_template(); - aml_append(crs, aml_io(AML_DECODE16, 0x03F2, 0x03F2, 0x00, 0x04)); - aml_append(crs, aml_io(AML_DECODE16, 0x03F7, 0x03F7, 0x00, 0x01)); - aml_append(crs, aml_irq_no_flags(6)); aml_append(crs, - aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, 2)); + aml_io(AML_DECODE16, isa->iobase + 2, isa->iobase + 2, 0x00, 0x04)); + aml_append(crs, + aml_io(AML_DECODE16, isa->iobase + 7, isa->iobase + 7, 0x00, 0x01)); + aml_append(crs, aml_irq_no_flags(isa->irq)); + aml_append(crs, + aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, isa->dma)); dev = aml_device("FDC0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0700"))); aml_append(dev, aml_name_decl("_CRS", crs)); for (i = 0; i < MIN(MAX_FD, ACPI_FDE_MAX_FD); i++) { - FloppyDriveType type = isa_fdc_get_drive_type(isadev, i); + FloppyDriveType type = isa_fdc_get_drive_type(ISA_DEVICE(adev), i); if (type < FLOPPY_DRIVE_TYPE_NONE) { fde_buf[i] = cpu_to_le32(1); /* drive present */ @@ -280,14 +283,14 @@ static Property isa_fdc_properties[] = { static void isabus_fdc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->desc = "virtual floppy controller"; dc->realize = isabus_fdc_realize; dc->fw_name = "fdc"; dc->reset = fdctrl_external_reset_isa; dc->vmsd = &vmstate_isa_fdc; - isa->build_aml = fdc_isa_build_aml; + adevc->build_dev_aml = build_fdc_aml; device_class_set_props(dc, isa_fdc_properties); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } @@ -310,6 +313,10 @@ static const TypeInfo isa_fdc_info = { .instance_size = sizeof(FDCtrlISABus), .class_init = isabus_fdc_class_init, .instance_init = isabus_fdc_instance_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void isa_fdc_register_types(void) diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c index 57fc8773f1..86ea51d003 100644 --- a/hw/block/fdc-sysbus.c +++ b/hw/block/fdc-sysbus.c @@ -94,18 +94,14 @@ static void fdctrl_handle_tc(void *opaque, int irq, int level) trace_fdctrl_tc_pulse(level); } -void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, - hwaddr mmio_base, DriveInfo **fds) +void fdctrl_init_sysbus(qemu_irq irq, hwaddr mmio_base, DriveInfo **fds) { - FDCtrl *fdctrl; DeviceState *dev; SysBusDevice *sbd; FDCtrlSysBus *sys; dev = qdev_new("sysbus-fdc"); sys = SYSBUS_FDC(dev); - fdctrl = &sys->state; - fdctrl->dma_chann = dma_chann; /* FIXME */ sbd = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sbd, &error_fatal); sysbus_connect_irq(sbd, 0, irq); @@ -138,6 +134,16 @@ static void sysbus_fdc_common_instance_init(Object *obj) FDCtrlSysBus *sys = SYSBUS_FDC(obj); FDCtrl *fdctrl = &sys->state; + /* + * DMA is not currently supported for sysbus floppy controllers. + * If we wanted to add support then probably the best approach is + * to have a QOM link property 'dma-controller' which the board + * code can set to an instance of IsaDmaClass, and an integer + * property 'dma-channel', so that we can set fdctrl->dma and + * fdctrl->dma_chann accordingly. + */ + fdctrl->dma_chann = -1; + qdev_set_legacy_instance_id(dev, 0 /* io */, 2); /* FIXME */ memory_region_init_io(&fdctrl->iomem, obj, diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 9014cd30b3..64ae4a6899 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -32,6 +32,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/timer.h" +#include "qemu/memalign.h" #include "hw/irq.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" @@ -61,6 +62,12 @@ } while (0) +/* Anonymous BlockBackend for empty drive */ +static BlockBackend *blk_create_empty_drive(void) +{ + return blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); +} + /********************************************************/ /* qdev floppy bus */ @@ -77,7 +84,7 @@ static const TypeInfo floppy_bus_info = { static void floppy_bus_create(FDCtrl *fdc, FloppyBus *bus, DeviceState *dev) { - qbus_create_inplace(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL); + qbus_init(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL); bus->fdc = fdc; } @@ -486,8 +493,7 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp) } if (!dev->conf.blk) { - /* Anonymous BlockBackend for an empty drive */ - dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); + dev->conf.blk = blk_create_empty_drive(); ret = blk_attach_dev(dev->conf.blk, qdev); assert(ret == 0); @@ -1161,7 +1167,19 @@ static FDrive *get_drv(FDCtrl *fdctrl, int unit) static FDrive *get_cur_drv(FDCtrl *fdctrl) { - return get_drv(fdctrl, fdctrl->cur_drv); + FDrive *cur_drv = get_drv(fdctrl, fdctrl->cur_drv); + + if (!cur_drv->blk) { + /* + * Kludge: empty drive line selected. Create an anonymous + * BlockBackend to avoid NULL deref with various BlockBackend + * API calls within this model (CVE-2021-20196). + * Due to the controller QOM model limitations, we don't + * attach the created to the controller device. + */ + cur_drv->blk = blk_create_empty_drive(); + } + return cur_drv; } /* Status A register : 0x00 (read-only) */ @@ -1512,6 +1530,14 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) int tmp; fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]); tmp = (fdctrl->fifo[6] - ks + 1); + if (tmp < 0) { + FLOPPY_DPRINTF("invalid EOT: %d\n", tmp); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + } if (fdctrl->fifo[0] & 0x80) tmp += fdctrl->fifo[6]; fdctrl->data_len *= tmp; @@ -1602,8 +1628,8 @@ int fdctrl_transfer_handler(void *opaque, int nchan, int dma_pos, int dma_len) if (fdctrl->data_dir != FD_DIR_WRITE || len < FD_SECTOR_LEN || rel_pos != 0) { /* READ & SCAN commands and realign to a sector for WRITE */ - if (blk_pread(cur_drv->blk, fd_offset(cur_drv), - fdctrl->fifo, BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(cur_drv->blk, fd_offset(cur_drv), BDRV_SECTOR_SIZE, + fdctrl->fifo, 0) < 0) { FLOPPY_DPRINTF("Floppy: error getting sector %d\n", fd_sector(cur_drv)); /* Sure, image size is too small... */ @@ -1630,8 +1656,8 @@ int fdctrl_transfer_handler(void *opaque, int nchan, int dma_pos, int dma_len) k->read_memory(fdctrl->dma, nchan, fdctrl->fifo + rel_pos, fdctrl->data_pos, len); - if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), - fdctrl->fifo, BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), BDRV_SECTOR_SIZE, + fdctrl->fifo, 0) < 0) { FLOPPY_DPRINTF("error writing sector %d\n", fd_sector(cur_drv)); fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); @@ -1714,8 +1740,8 @@ static uint32_t fdctrl_read_data(FDCtrl *fdctrl) fd_sector(cur_drv)); return 0; } - if (blk_pread(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, - BDRV_SECTOR_SIZE) + if (blk_pread(cur_drv->blk, fd_offset(cur_drv), BDRV_SECTOR_SIZE, + fdctrl->fifo, 0) < 0) { FLOPPY_DPRINTF("error getting sector %d\n", fd_sector(cur_drv)); @@ -1794,8 +1820,8 @@ static void fdctrl_format_sector(FDCtrl *fdctrl) } memset(fdctrl->fifo, 0, FD_SECTOR_LEN); if (cur_drv->blk == NULL || - blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, - BDRV_SECTOR_SIZE, 0) < 0) { + blk_pwrite(cur_drv->blk, fd_offset(cur_drv), BDRV_SECTOR_SIZE, + fdctrl->fifo, 0) < 0) { FLOPPY_DPRINTF("error formatting sector %d\n", fd_sector(cur_drv)); fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); } else { @@ -2218,8 +2244,8 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value) if (pos == FD_SECTOR_LEN - 1 || fdctrl->data_pos == fdctrl->data_len) { cur_drv = get_cur_drv(fdctrl); - if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), BDRV_SECTOR_SIZE, + fdctrl->fifo, 0) < 0) { FLOPPY_DPRINTF("error writing sector %d\n", fd_sector(cur_drv)); break; diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c index dcbccee294..dae13ab14d 100644 --- a/hw/block/hd-geometry.c +++ b/hw/block/hd-geometry.c @@ -63,7 +63,7 @@ static int guess_disk_lchs(BlockBackend *blk, blk_get_geometry(blk, &nb_sectors); - if (blk_pread(blk, 0, buf, BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(blk, 0, BDRV_SECTOR_SIZE, buf, 0) < 0) { return -1; } /* test msdos magic */ @@ -150,7 +150,12 @@ void hd_geometry_guess(BlockBackend *blk, translation = BIOS_ATA_TRANSLATION_NONE; } if (ptrans) { - *ptrans = translation; + if (*ptrans == BIOS_ATA_TRANSLATION_AUTO) { + *ptrans = translation; + } else { + /* Defer to the translation specified by the user. */ + translation = *ptrans; + } } trace_hd_geometry_guess(blk, *pcyls, *pheads, *psecs, translation); } diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index b77503dc84..02adc87527 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -35,22 +35,21 @@ #include "qapi/error.h" #include "trace.h" #include "qom/object.h" - -/* Fields for FlashPartInfo->flags */ - -/* erase capabilities */ -#define ER_4K 1 -#define ER_32K 2 -/* set to allow the page program command to write 0s back to 1. Useful for - * modelling EEPROM with SPI flash command set - */ -#define EEPROM 0x100 +#include "m25p80_sfdp.h" /* 16 MiB max in 3 byte address mode */ #define MAX_3BYTES_SIZE 0x1000000 - #define SPI_NOR_MAX_ID_LEN 6 +/* Fields for FlashPartInfo->flags */ +enum spi_flash_option_flags { + ER_4K = BIT(0), + ER_32K = BIT(1), + EEPROM = BIT(2), + HAS_SR_TB = BIT(3), + HAS_SR_BP3_BIT6 = BIT(4), +}; + typedef struct FlashPartInfo { const char *part_name; /* @@ -74,6 +73,7 @@ typedef struct FlashPartInfo { * This field inform how many die is in the chip. */ uint8_t die_cnt; + uint8_t (*sfdp_read)(uint32_t sfdp_addr); } FlashPartInfo; /* adapted from linux */ @@ -232,12 +232,16 @@ static const FlashPartInfo known_devices[] = { { INFO("mx25l6405d", 0xc22017, 0, 64 << 10, 128, 0) }, { INFO("mx25l12805d", 0xc22018, 0, 64 << 10, 256, 0) }, { INFO("mx25l12855e", 0xc22618, 0, 64 << 10, 256, 0) }, - { INFO6("mx25l25635e", 0xc22019, 0xc22019, 64 << 10, 512, 0) }, + { INFO6("mx25l25635e", 0xc22019, 0xc22019, 64 << 10, 512, + ER_4K | ER_32K), .sfdp_read = m25p80_sfdp_mx25l25635e }, + { INFO6("mx25l25635f", 0xc22019, 0xc22019, 64 << 10, 512, + ER_4K | ER_32K), .sfdp_read = m25p80_sfdp_mx25l25635f }, { INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) }, { INFO("mx66l51235f", 0xc2201a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, - { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, + { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K), + .sfdp_read = m25p80_sfdp_mx66l1g45g }, /* Micron */ { INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) }, @@ -247,14 +251,19 @@ static const FlashPartInfo known_devices[] = { { INFO("n25q128a11", 0x20bb18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) }, - { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K), + .sfdp_read = m25p80_sfdp_n25q256a }, { INFO("n25q512a11", 0x20bb20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q512a13", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, - { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, - { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, + ER_4K | HAS_SR_BP3_BIT6 | HAS_SR_TB), + .sfdp_read = m25p80_sfdp_n25q256a }, + { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) }, { INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) }, + { INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024, + ER_4K | ER_32K, 2) }, { INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, @@ -336,8 +345,12 @@ static const FlashPartInfo known_devices[] = { { INFO("w25q64", 0xef4017, 0, 64 << 10, 128, ER_4K) }, { INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) }, { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) }, - { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K) }, - { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K) }, + { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K), + .sfdp_read = m25p80_sfdp_w25q256 }, + { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K), + .sfdp_read = m25p80_sfdp_w25q512jv }, + { INFO("w25q01jvq", 0xef4021, 0, 64 << 10, 2048, ER_4K), + .sfdp_read = m25p80_sfdp_w25q01jvq }, }; typedef enum { @@ -353,6 +366,7 @@ typedef enum { BULK_ERASE = 0xc7, READ_FSR = 0x70, RDCR = 0x15, + RDSFDP = 0x5a, READ = 0x03, READ4 = 0x13, @@ -419,6 +433,7 @@ typedef enum { STATE_COLLECTING_DATA, STATE_COLLECTING_VAR_LEN_DATA, STATE_READING_DATA, + STATE_READING_SFDP, } CMDState; typedef enum { @@ -469,11 +484,18 @@ struct Flash { uint8_t spansion_cr2v; uint8_t spansion_cr3v; uint8_t spansion_cr4v; + bool wp_level; bool write_enable; bool four_bytes_address_mode; bool reset_enable; bool quad_enable; bool aai_enable; + bool block_protect0; + bool block_protect1; + bool block_protect2; + bool block_protect3; + bool top_bottom_bit; + bool status_register_write_disabled; uint8_t ear; int64_t dirty_page; @@ -618,12 +640,36 @@ void flash_write8(Flash *s, uint32_t addr, uint8_t data) { uint32_t page = addr / s->pi->page_size; uint8_t prev = s->storage[s->cur_addr]; + uint32_t block_protect_value = (s->block_protect3 << 3) | + (s->block_protect2 << 2) | + (s->block_protect1 << 1) | + (s->block_protect0 << 0); if (!s->write_enable) { qemu_log_mask(LOG_GUEST_ERROR, "M25P80: write with write protect!\n"); return; } + if (block_protect_value > 0) { + uint32_t num_protected_sectors = 1 << (block_protect_value - 1); + uint32_t sector = addr / s->pi->sector_size; + + /* top_bottom_bit == 0 means TOP */ + if (!s->top_bottom_bit) { + if (s->pi->n_sectors <= sector + num_protected_sectors) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: write with write protect!\n"); + return; + } + } else { + if (sector < num_protected_sectors) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: write with write protect!\n"); + return; + } + } + } + if ((prev ^ data) & data) { trace_m25p80_programming_zero_to_one(s, addr, prev, data); } @@ -646,6 +692,8 @@ static inline int get_addr_length(Flash *s) } switch (s->cmd_in_progress) { + case RDSFDP: + return 3; case PP4: case PP4_4: case QPP_4: @@ -720,6 +768,17 @@ static void complete_collecting_data(Flash *s) flash_erase(s, s->cur_addr, s->cmd_in_progress); break; case WRSR: + s->status_register_write_disabled = extract32(s->data[0], 7, 1); + s->block_protect0 = extract32(s->data[0], 2, 1); + s->block_protect1 = extract32(s->data[0], 3, 1); + s->block_protect2 = extract32(s->data[0], 4, 1); + if (s->pi->flags & HAS_SR_TB) { + s->top_bottom_bit = extract32(s->data[0], 5, 1); + } + if (s->pi->flags & HAS_SR_BP3_BIT6) { + s->block_protect3 = extract32(s->data[0], 6, 1); + } + switch (get_man(s)) { case MAN_SPANSION: s->quad_enable = !!(s->data[1] & 0x02); @@ -779,6 +838,11 @@ static void complete_collecting_data(Flash *s) " by device\n"); } break; + + case RDSFDP: + s->state = STATE_READING_SFDP; + break; + default: break; } @@ -1162,22 +1226,34 @@ static void decode_new_cmd(Flash *s, uint32_t value) break; case WRSR: - if (s->write_enable) { - switch (get_man(s)) { - case MAN_SPANSION: - s->needed_bytes = 2; - s->state = STATE_COLLECTING_DATA; - break; - case MAN_MACRONIX: - s->needed_bytes = 2; - s->state = STATE_COLLECTING_VAR_LEN_DATA; - break; - default: - s->needed_bytes = 1; - s->state = STATE_COLLECTING_DATA; - } - s->pos = 0; + /* + * If WP# is low and status_register_write_disabled is high, + * status register writes are disabled. + * This is also called "hardware protected mode" (HPM). All other + * combinations of the two states are called "software protected mode" + * (SPM), and status register writes are permitted. + */ + if ((s->wp_level == 0 && s->status_register_write_disabled) + || !s->write_enable) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Status register write is disabled!\n"); + break; } + + switch (get_man(s)) { + case MAN_SPANSION: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_DATA; + break; + case MAN_MACRONIX: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_VAR_LEN_DATA; + break; + default: + s->needed_bytes = 1; + s->state = STATE_COLLECTING_DATA; + } + s->pos = 0; break; case WRDI: @@ -1192,6 +1268,17 @@ static void decode_new_cmd(Flash *s, uint32_t value) case RDSR: s->data[0] = (!!s->write_enable) << 1; + s->data[0] |= (!!s->status_register_write_disabled) << 7; + s->data[0] |= (!!s->block_protect0) << 2; + s->data[0] |= (!!s->block_protect1) << 3; + s->data[0] |= (!!s->block_protect2) << 4; + if (s->pi->flags & HAS_SR_TB) { + s->data[0] |= (!!s->top_bottom_bit) << 5; + } + if (s->pi->flags & HAS_SR_BP3_BIT6) { + s->data[0] |= (!!s->block_protect3) << 6; + } + if (get_man(s) == MAN_MACRONIX || get_man(s) == MAN_ISSI) { s->data[0] |= (!!s->quad_enable) << 6; } @@ -1364,6 +1451,16 @@ static void decode_new_cmd(Flash *s, uint32_t value) qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value); } break; + case RDSFDP: + if (s->pi->sfdp_read) { + s->needed_bytes = get_addr_length(s) + 1; /* SFDP addr + dummy */ + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + break; + } + /* Fallthrough */ + default: s->pos = 0; s->len = 1; @@ -1471,6 +1568,12 @@ static uint32_t m25p80_transfer8(SSIPeripheral *ss, uint32_t tx) } } break; + case STATE_READING_SFDP: + assert(s->pi->sfdp_read); + r = s->pi->sfdp_read(s->cur_addr); + trace_m25p80_read_sfdp(s, s->cur_addr, (uint8_t)r); + s->cur_addr = (s->cur_addr + 1) & (M25P80_SFDP_MAX_SIZE - 1); + break; default: case STATE_IDLE: @@ -1481,6 +1584,14 @@ static uint32_t m25p80_transfer8(SSIPeripheral *ss, uint32_t tx) return r; } +static void m25p80_write_protect_pin_irq_handler(void *opaque, int n, int level) +{ + Flash *s = M25P80(opaque); + /* WP# is just a single pin. */ + assert(n == 0); + s->wp_level = !!level; +} + static void m25p80_realize(SSIPeripheral *ss, Error **errp) { Flash *s = M25P80(ss); @@ -1503,7 +1614,7 @@ static void m25p80_realize(SSIPeripheral *ss, Error **errp) trace_m25p80_binding(s); s->storage = blk_blockalign(s->blk, s->size); - if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) { + if (blk_pread(s->blk, 0, s->size, s->storage, 0) < 0) { error_setg(errp, "failed to read the initial flash content"); return; } @@ -1512,12 +1623,23 @@ static void m25p80_realize(SSIPeripheral *ss, Error **errp) s->storage = blk_blockalign(NULL, s->size); memset(s->storage, 0xFF, s->size); } + + qdev_init_gpio_in_named(DEVICE(s), + m25p80_write_protect_pin_irq_handler, "WP#", 1); } static void m25p80_reset(DeviceState *d) { Flash *s = M25P80(d); + s->wp_level = true; + s->status_register_write_disabled = false; + s->block_protect0 = false; + s->block_protect1 = false; + s->block_protect2 = false; + s->block_protect3 = false; + s->top_bottom_bit = false; + reset_memory(s); } @@ -1530,6 +1652,7 @@ static int m25p80_pre_save(void *opaque) static Property m25p80_properties[] = { /* This is default value for Micron flash */ + DEFINE_PROP_BOOL("write-enable", Flash, write_enable, false), DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF), DEFINE_PROP_UINT8("spansion-cr1nv", Flash, spansion_cr1nv, 0x0), DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8), @@ -1583,6 +1706,51 @@ static const VMStateDescription vmstate_m25p80_aai_enable = { } }; +static bool m25p80_wp_level_srwd_needed(void *opaque) +{ + Flash *s = (Flash *)opaque; + + return !s->wp_level || s->status_register_write_disabled; +} + +static const VMStateDescription vmstate_m25p80_write_protect = { + .name = "m25p80/write_protect", + .version_id = 1, + .minimum_version_id = 1, + .needed = m25p80_wp_level_srwd_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(wp_level, Flash), + VMSTATE_BOOL(status_register_write_disabled, Flash), + VMSTATE_END_OF_LIST() + } +}; + +static bool m25p80_block_protect_needed(void *opaque) +{ + Flash *s = (Flash *)opaque; + + return s->block_protect0 || + s->block_protect1 || + s->block_protect2 || + s->block_protect3 || + s->top_bottom_bit; +} + +static const VMStateDescription vmstate_m25p80_block_protect = { + .name = "m25p80/block_protect", + .version_id = 1, + .minimum_version_id = 1, + .needed = m25p80_block_protect_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(block_protect0, Flash), + VMSTATE_BOOL(block_protect1, Flash), + VMSTATE_BOOL(block_protect2, Flash), + VMSTATE_BOOL(block_protect3, Flash), + VMSTATE_BOOL(top_bottom_bit, Flash), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_m25p80 = { .name = "m25p80", .version_id = 0, @@ -1614,6 +1782,8 @@ static const VMStateDescription vmstate_m25p80 = { .subsections = (const VMStateDescription * []) { &vmstate_m25p80_data_read_loop, &vmstate_m25p80_aai_enable, + &vmstate_m25p80_write_protect, + &vmstate_m25p80_block_protect, NULL } }; diff --git a/hw/block/m25p80_sfdp.c b/hw/block/m25p80_sfdp.c new file mode 100644 index 0000000000..77615fa29e --- /dev/null +++ b/hw/block/m25p80_sfdp.c @@ -0,0 +1,332 @@ +/* + * M25P80 Serial Flash Discoverable Parameter (SFDP) + * + * Copyright (c) 2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "m25p80_sfdp.h" + +#define define_sfdp_read(model) \ + uint8_t m25p80_sfdp_##model(uint32_t addr) \ + { \ + assert(is_power_of_2(sizeof(sfdp_##model))); \ + return sfdp_##model[addr & (sizeof(sfdp_##model) - 1)]; \ + } + +/* + * Micron + */ +static const uint8_t sfdp_n25q256a[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x00, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x29, 0xeb, 0x27, 0x6b, 0x08, 0x3b, 0x27, 0xbb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x27, 0xbb, + 0xff, 0xff, 0x29, 0xeb, 0x0c, 0x20, 0x10, 0xd8, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(n25q256a); + + +/* + * Matronix + */ + +/* mx25l25635e. No 4B opcodes */ +static const uint8_t sfdp_mx25l25635e[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x01, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x60, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x00, 0xff, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0xf7, 0x4f, 0xff, 0xff, + 0xd9, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx25l25635e) + +static const uint8_t sfdp_mx25l25635f[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x01, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x60, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x44, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0x9d, 0xf9, 0xc0, 0x64, + 0x85, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xc2, 0xf5, 0x08, 0x0a, + 0x08, 0x04, 0x03, 0x06, 0x00, 0x00, 0x07, 0x29, + 0x17, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx25l25635f); + +static const uint8_t sfdp_mx66l1g45g[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x02, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x10, 0x01, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xc0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x44, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xd6, 0x49, 0xc5, 0x00, + 0x85, 0xdf, 0x04, 0xe3, 0x44, 0x03, 0x67, 0x38, + 0x30, 0xb0, 0x30, 0xb0, 0xf7, 0xbd, 0xd5, 0x5c, + 0x4a, 0x9e, 0x29, 0xff, 0xf0, 0x50, 0xf9, 0x85, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7f, 0xef, 0xff, 0xff, 0x21, 0x5c, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0x9d, 0xf9, 0xc0, 0x64, + 0x85, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc2, 0xf5, 0x08, 0x00, 0x0c, 0x04, 0x08, 0x08, + 0x01, 0x00, 0x19, 0x0f, 0x01, 0x01, 0x06, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx66l1g45g); + +/* + * Windbond + */ + +static const uint8_t sfdp_w25q256[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x00, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x80, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x21, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q256); + +static const uint8_t sfdp_w25q512jv[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xd0, 0x00, 0x00, 0xff, + 0x03, 0x00, 0x01, 0x02, 0xf0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x40, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0x36, 0x02, 0xa6, 0x00, + 0x82, 0xea, 0x14, 0xe2, 0xe9, 0x63, 0x76, 0x33, + 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c, + 0x19, 0xf7, 0x4d, 0xff, 0xe9, 0x70, 0xf9, 0xa5, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x0a, 0xf0, 0xff, 0x21, 0xff, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q512jv); + +static const uint8_t sfdp_w25q01jvq[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xd0, 0x00, 0x00, 0xff, + 0x03, 0x00, 0x01, 0x02, 0xf0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x40, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0x36, 0x02, 0xa6, 0x00, + 0x82, 0xea, 0x14, 0xe2, 0xe9, 0x63, 0x76, 0x33, + 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c, + 0x19, 0xf7, 0x4d, 0xff, 0xe9, 0x70, 0xf9, 0xa5, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x0a, 0xf0, 0xff, 0x21, 0xff, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q01jvq); diff --git a/hw/block/m25p80_sfdp.h b/hw/block/m25p80_sfdp.h new file mode 100644 index 0000000000..df7adfb5ce --- /dev/null +++ b/hw/block/m25p80_sfdp.h @@ -0,0 +1,29 @@ +/* + * M25P80 SFDP + * + * Copyright (c) 2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef HW_M25P80_SFDP_H +#define HW_M25P80_SFDP_H + +/* + * SFDP area has a 3 bytes address space. + */ +#define M25P80_SFDP_MAX_SIZE (1 << 24) + +uint8_t m25p80_sfdp_n25q256a(uint32_t addr); + +uint8_t m25p80_sfdp_mx25l25635e(uint32_t addr); +uint8_t m25p80_sfdp_mx25l25635f(uint32_t addr); +uint8_t m25p80_sfdp_mx66l1g45g(uint32_t addr); + +uint8_t m25p80_sfdp_w25q256(uint32_t addr); +uint8_t m25p80_sfdp_w25q512jv(uint32_t addr); + +uint8_t m25p80_sfdp_w25q01jvq(uint32_t addr); + +#endif diff --git a/hw/block/meson.build b/hw/block/meson.build index 2389326112..b434d5654c 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -12,11 +12,12 @@ softmmu_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) softmmu_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) +softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) -specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) -specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c')) +specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c')) +specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c')) subdir('dataplane') diff --git a/hw/block/nand.c b/hw/block/nand.c index 8bc80e3514..1aee1cb2b1 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -666,8 +666,8 @@ static void glue(nand_blk_write_, NAND_PAGE_SIZE)(NANDFlashState *s) sector = SECTOR(s->addr); off = (s->addr & PAGE_MASK) + s->offset; soff = SECTOR_OFFSET(s->addr); - if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf, - PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) { + if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, + PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); return; } @@ -679,24 +679,24 @@ static void glue(nand_blk_write_, NAND_PAGE_SIZE)(NANDFlashState *s) MIN(OOB_SIZE, off + s->iolen - NAND_PAGE_SIZE)); } - if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf, - PAGE_SECTORS << BDRV_SECTOR_BITS, 0) < 0) { + if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, + PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); } } else { off = PAGE_START(s->addr) + (s->addr & PAGE_MASK) + s->offset; sector = off >> 9; soff = off & 0x1ff; - if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf, - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) { + if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); return; } mem_and(iobuf + soff, s->io, s->iolen); - if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf, - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, 0) < 0) { + if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); } } @@ -723,20 +723,20 @@ static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s) i = SECTOR(addr); page = SECTOR(addr + (1 << (ADDR_SHIFT + s->erase_shift))); for (; i < page; i ++) - if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS, iobuf, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, i); } } else { addr = PAGE_START(addr); page = addr >> 9; - if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf, - BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, page); } memset(iobuf + (addr & 0x1ff), 0xff, (~addr & 0x1ff) + 1); - if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, page); } @@ -744,20 +744,20 @@ static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s) i = (addr & ~0x1ff) + 0x200; for (addr += ((NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift) - 0x200; i < addr; i += 0x200) { - if (blk_pwrite(s->blk, i, iobuf, BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk, i, BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, i >> 9); } } page = i >> 9; - if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf, - BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, page); } memset(iobuf, 0xff, ((addr - 1) & 0x1ff) + 1); - if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, iobuf, 0) < 0) { printf("%s: write error in sector %" PRIu64 "\n", __func__, page); } } @@ -772,8 +772,8 @@ static void glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, if (s->blk) { if (s->mem_oob) { - if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS, s->io, - PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) { + if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS, + PAGE_SECTORS << BDRV_SECTOR_BITS, s->io, 0) < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, SECTOR(addr)); } @@ -782,8 +782,9 @@ static void glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, OOB_SIZE); s->ioaddr = s->io + SECTOR_OFFSET(s->addr) + offset; } else { - if (blk_pread(s->blk, PAGE_START(addr), s->io, - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) { + if (blk_pread(s->blk, PAGE_START(addr), + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, s->io, 0) + < 0) { printf("%s: read error in sector %" PRIu64 "\n", __func__, PAGE_START(addr) >> 9); } diff --git a/hw/block/onenand.c b/hw/block/onenand.c index afc0cd3a0f..1fde975024 100644 --- a/hw/block/onenand.c +++ b/hw/block/onenand.c @@ -229,8 +229,8 @@ static void onenand_reset(OneNANDState *s, int cold) /* Lock the whole flash */ memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks); - if (s->blk_cur && blk_pread(s->blk_cur, 0, s->boot[0], - 8 << BDRV_SECTOR_BITS) < 0) { + if (s->blk_cur && blk_pread(s->blk_cur, 0, 8 << BDRV_SECTOR_BITS, + s->boot[0], 0) < 0) { hw_error("%s: Loading the BootRAM failed.\n", __func__); } } @@ -249,8 +249,8 @@ static inline int onenand_load_main(OneNANDState *s, int sec, int secn, assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec); assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn); if (s->blk_cur) { - return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS, dest, - secn << BDRV_SECTOR_BITS) < 0; + return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS, + secn << BDRV_SECTOR_BITS, dest, 0) < 0; } else if (sec + secn > s->secs_cur) { return 1; } @@ -274,7 +274,7 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn, uint8_t *dp = 0; if (s->blk_cur) { dp = g_malloc(size); - if (!dp || blk_pread(s->blk_cur, offset, dp, size) < 0) { + if (!dp || blk_pread(s->blk_cur, offset, size, dp, 0) < 0) { result = 1; } } else { @@ -290,7 +290,7 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn, dp[i] &= sp[i]; } if (s->blk_cur) { - result = blk_pwrite(s->blk_cur, offset, dp, size, 0) < 0; + result = blk_pwrite(s->blk_cur, offset, size, dp, 0) < 0; } } if (dp && s->blk_cur) { @@ -308,7 +308,7 @@ static inline int onenand_load_spare(OneNANDState *s, int sec, int secn, if (s->blk_cur) { uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS; - if (blk_pread(s->blk_cur, offset, buf, BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, buf, 0) < 0) { return 1; } memcpy(dest, buf + ((sec & 31) << 4), secn << 4); @@ -333,7 +333,7 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn, if (s->blk_cur) { dp = g_malloc(512); if (!dp - || blk_pread(s->blk_cur, offset, dp, BDRV_SECTOR_SIZE) < 0) { + || blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp, 0) < 0) { result = 1; } else { dpp = dp + ((sec & 31) << 4); @@ -351,8 +351,8 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn, dpp[i] &= sp[i]; } if (s->blk_cur) { - result = blk_pwrite(s->blk_cur, offset, dp, - BDRV_SECTOR_SIZE, 0) < 0; + result = blk_pwrite(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp, + 0) < 0; } } g_free(dp); @@ -370,17 +370,17 @@ static inline int onenand_erase(OneNANDState *s, int sec, int num) for (; num > 0; num--, sec++) { if (s->blk_cur) { int erasesec = s->secs_cur + (sec >> 5); - if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS, blankbuf, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, blankbuf, 0) < 0) { goto fail; } - if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf, - BDRV_SECTOR_SIZE) < 0) { + if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) { goto fail; } memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4); - if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf, - BDRV_SECTOR_SIZE, 0) < 0) { + if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) { goto fail; } } else { diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 81f9f971d8..0cbc2fb4cb 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -392,8 +392,8 @@ static void pflash_update(PFlashCFI01 *pfl, int offset, /* widen to sector boundaries */ offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); - ret = blk_pwrite(pfl->blk, offset, pfl->storage + offset, - offset_end - offset, 0); + ret = blk_pwrite(pfl->blk, offset, offset_end - offset, + pfl->storage + offset, 0); if (ret < 0) { /* TODO set error bit in status */ error_report("Could not update PFLASH: %s", strerror(-ret)); @@ -1023,7 +1023,7 @@ static void postload_update_cb(void *opaque, bool running, RunState state) { PFlashCFI01 *pfl = opaque; - /* This is called after bdrv_invalidate_cache_all. */ + /* This is called after bdrv_activate_all. */ qemu_del_vm_change_state_handler(pfl->vmstate); pfl->vmstate = NULL; diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c index 02c514fb6e..2a99b286b0 100644 --- a/hw/block/pflash_cfi02.c +++ b/hw/block/pflash_cfi02.c @@ -400,8 +400,8 @@ static void pflash_update(PFlashCFI02 *pfl, int offset, int size) /* widen to sector boundaries */ offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); - ret = blk_pwrite(pfl->blk, offset, pfl->storage + offset, - offset_end - offset, 0); + ret = blk_pwrite(pfl->blk, offset, offset_end - offset, + pfl->storage + offset, 0); if (ret < 0) { /* TODO set error bit in status */ error_report("Could not update PFLASH: %s", strerror(-ret)); diff --git a/hw/block/swim.c b/hw/block/swim.c index 509c2f4900..333da08ce0 100644 --- a/hw/block/swim.c +++ b/hw/block/swim.c @@ -421,8 +421,7 @@ static void sysbus_swim_realize(DeviceState *dev, Error **errp) Swim *sys = SWIM(dev); SWIMCtrl *swimctrl = &sys->ctrl; - qbus_create_inplace(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, - NULL); + qbus_init(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, NULL); swimctrl->bus.ctrl = swimctrl; } diff --git a/hw/block/trace-events b/hw/block/trace-events index d86b53520c..2c45a62bd5 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -80,5 +80,6 @@ m25p80_page_program(void *s, uint32_t addr, uint8_t tx) "[%p] page program cur_a m25p80_transfer(void *s, uint8_t state, uint32_t len, uint8_t needed, uint32_t pos, uint32_t cur_addr, uint8_t t) "[%p] Transfer state 0x%"PRIx8" len 0x%"PRIx32" needed 0x%"PRIx8" pos 0x%"PRIx32" addr 0x%"PRIx32" tx 0x%"PRIx8 m25p80_read_byte(void *s, uint32_t addr, uint8_t v) "[%p] Read byte 0x%"PRIx32"=0x%"PRIx8 m25p80_read_data(void *s, uint32_t pos, uint8_t v) "[%p] Read data 0x%"PRIx32"=0x%"PRIx8 +m25p80_read_sfdp(void *s, uint32_t addr, uint8_t v) "[%p] Read SFDP 0x%"PRIx32"=0x%"PRIx8 m25p80_binding(void *s) "[%p] Binding to IF_MTD drive" m25p80_binding_no_bdrv(void *s) "[%p] No BDRV - binding to RAM" diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index ba13cb87e5..aff4d2b8cb 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -23,6 +23,7 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "hw/virtio/virtio-blk-common.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user-blk.h" #include "hw/virtio/virtio.h" @@ -51,6 +52,7 @@ static const int user_feature_bits[] = { VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_RING_PACKED, VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_RESET, VHOST_INVALID_FEATURE_BIT }; @@ -63,7 +65,7 @@ static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) /* Our num_queues overrides the device backend */ virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues); - memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config)); + memcpy(config, &s->blkcfg, vdev->config_len); } static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) @@ -92,21 +94,25 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) { int ret; struct virtio_blk_config blkcfg; + VirtIODevice *vdev = dev->vdev; VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; + if (!dev->started) { + return 0; + } + ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, - sizeof(struct virtio_blk_config), - &local_err); + vdev->config_len, &local_err); if (ret < 0) { error_report_err(local_err); - return -1; + return ret; } /* valid for resize only */ if (blkcfg.capacity != s->blkcfg.capacity) { s->blkcfg.capacity = blkcfg.capacity; - memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config)); + memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); virtio_notify_config(dev->vdev); } @@ -163,13 +169,6 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp) goto err_guest_notifiers; } - ret = vhost_dev_start(&s->dev, vdev); - if (ret < 0) { - error_setg_errno(errp, -ret, "Error starting vhost"); - goto err_guest_notifiers; - } - s->started_vu = true; - /* guest_notifier_mask/pending not used yet, so just unmask * everything here. virtio-pci will do the right thing by * enabling/disabling irqfd. @@ -178,9 +177,20 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp) vhost_virtqueue_mask(&s->dev, vdev, i, false); } + s->dev.vq_index_end = s->dev.nvqs; + ret = vhost_dev_start(&s->dev, vdev, true); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error starting vhost"); + goto err_guest_notifiers; + } + s->started_vu = true; + return ret; err_guest_notifiers: + for (i = 0; i < s->dev.nvqs; i++) { + vhost_virtqueue_mask(&s->dev, vdev, i, true); + } k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); err_host_notifiers: vhost_dev_disable_notifiers(&s->dev, vdev); @@ -203,7 +213,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&s->dev, vdev); + vhost_dev_stop(&s->dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); if (ret < 0) { @@ -217,19 +227,15 @@ static void vhost_user_blk_stop(VirtIODevice *vdev) static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserBlk *s = VHOST_USER_BLK(vdev); - bool should_start = virtio_device_started(vdev, status); + bool should_start = virtio_device_should_start(vdev, status); Error *local_err = NULL; int ret; - if (!vdev->vm_running) { - should_start = false; - } - if (!s->connected) { return; } - if (s->dev.started == should_start) { + if (vhost_dev_is_started(&s->dev) == should_start) { return; } @@ -252,18 +258,14 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev, VHostUserBlk *s = VHOST_USER_BLK(vdev); /* Turn on pre-defined features */ + virtio_add_feature(&features, VIRTIO_BLK_F_SIZE_MAX); virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH); virtio_add_feature(&features, VIRTIO_BLK_F_RO); - virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD); - virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES); - if (s->config_wce) { - virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE); - } if (s->num_queues > 1) { virtio_add_feature(&features, VIRTIO_BLK_F_MQ); } @@ -285,7 +287,7 @@ static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) return; } - if (s->dev.started) { + if (vhost_dev_is_started(&s->dev)) { return; } @@ -336,6 +338,7 @@ static int vhost_user_blk_connect(DeviceState *dev, Error **errp) vhost_dev_set_config_notifier(&s->dev, &blk_ops); + s->vhost_user.supports_config = true; ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { @@ -366,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev) vhost_user_blk_stop(vdev); vhost_dev_cleanup(&s->dev); -} -static void vhost_user_blk_chr_closed_bh(void *opaque) -{ - DeviceState *dev = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBlk *s = VHOST_USER_BLK(vdev); - - vhost_user_blk_disconnect(dev); + /* Re-instate the event handler for new connections */ qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event, - NULL, opaque, NULL, true); + NULL, dev, NULL, true); } static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) @@ -395,27 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) } break; case CHR_EVENT_CLOSED: - if (!runstate_check(RUN_STATE_SHUTDOWN)) { - /* - * A close event may happen during a read/write, but vhost - * code assumes the vhost_dev remains setup, so delay the - * stop & clear. - */ - AioContext *ctx = qemu_get_current_aio_context(); - - qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL, - NULL, NULL, false); - aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque); - - /* - * Move vhost device to the stopped state. The vhost-user device - * will be clean up and disconnected in BH. This can be useful in - * the vhost migration code. If disconnect was caught there is an - * option for the general vhost code to get the dev state without - * knowing its type (in this case vhost-user). - */ - s->dev.started = false; - } + /* defer close until later to avoid circular close */ + vhost_user_async_close(dev, &s->chardev, &s->dev, + vhost_user_blk_disconnect); break; case CHR_EVENT_BREAK: case CHR_EVENT_MUX_IN: @@ -445,7 +423,7 @@ static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp) assert(s->connected); ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, - sizeof(struct virtio_blk_config), errp); + s->parent_obj.config_len, errp); if (ret < 0) { qemu_chr_fe_disconnect(&s->chardev); vhost_dev_cleanup(&s->dev); @@ -460,6 +438,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) ERRP_GUARD(); VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserBlk *s = VHOST_USER_BLK(vdev); + size_t config_size; int retries; int i, ret; @@ -490,8 +469,9 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, - sizeof(struct virtio_blk_config)); + config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, + vdev->host_features); + virtio_init(vdev, VIRTIO_ID_BLOCK, config_size); s->virtqs = g_new(VirtQueue *, s->num_queues); for (i = 0; i < s->num_queues; i++) { @@ -511,7 +491,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) *errp = NULL; } ret = vhost_user_blk_realize_connect(s, errp); - } while (ret == -EPROTO && retries--); + } while (ret < 0 && retries--); if (ret < 0) { goto virtio_err; @@ -568,6 +548,12 @@ static void vhost_user_blk_instance_init(Object *obj) "/disk@0,0", DEVICE(obj)); } +static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + return &s->dev; +} + static const VMStateDescription vmstate_vhost_user_blk = { .name = "vhost-user-blk", .minimum_version_id = 1, @@ -583,7 +569,12 @@ static Property vhost_user_blk_properties[] = { DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues, VHOST_USER_BLK_AUTO_NUM_QUEUES), DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128), - DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true), + DEFINE_PROP_BIT64("config-wce", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_CONFIG_WCE, true), + DEFINE_PROP_BIT64("discard", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_DISCARD, true), + DEFINE_PROP_BIT64("write-zeroes", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_WRITE_ZEROES, true), DEFINE_PROP_END_OF_LIST(), }; @@ -602,6 +593,7 @@ static void vhost_user_blk_class_init(ObjectClass *klass, void *data) vdc->get_features = vhost_user_blk_get_features; vdc->set_status = vhost_user_blk_set_status; vdc->reset = vhost_user_blk_reset; + vdc->get_vhost = vhost_user_blk_get_vhost; } static const TypeInfo vhost_user_blk_info = { diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c new file mode 100644 index 0000000000..ac52d7c176 --- /dev/null +++ b/hw/block/virtio-blk-common.c @@ -0,0 +1,39 @@ +/* + * Virtio Block Device common helpers + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "standard-headers/linux/virtio_blk.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-blk-common.h" + +/* Config size before the discard support (hide associated config fields) */ +#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ + max_discard_sectors) + +/* + * Starting from the discard feature, we can use this array to properly + * set the config size depending on the features enabled. + */ +static const VirtIOFeature feature_sizes[] = { + {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, + .end = endof(struct virtio_blk_config, discard_sector_alignment)}, + {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, + .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + {} +}; + +const VirtIOConfigSizeParams virtio_blk_cfg_size_params = { + .min_size = VIRTIO_BLK_CFG_SIZE, + .max_size = sizeof(struct virtio_blk_config), + .feature_sizes = feature_sizes +}; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index f139cd7cc9..f717550fdc 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -21,6 +21,7 @@ #include "hw/block/block.h" #include "hw/qdev-properties.h" #include "sysemu/blockdev.h" +#include "sysemu/block-ram-registrar.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" #include "hw/virtio/virtio-blk.h" @@ -32,29 +33,8 @@ #include "hw/virtio/virtio-bus.h" #include "migration/qemu-file-types.h" #include "hw/virtio/virtio-access.h" - -/* Config size before the discard support (hide associated config fields) */ -#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ - max_discard_sectors) -/* - * Starting from the discard feature, we can use this array to properly - * set the config size depending on the features enabled. - */ -static const VirtIOFeature feature_sizes[] = { - {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, - .end = endof(struct virtio_blk_config, discard_sector_alignment)}, - {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, - .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, - {} -}; - -static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features) -{ - s->config_size = MAX(VIRTIO_BLK_CFG_SIZE, - virtio_feature_get_config_size(feature_sizes, host_features)); - - assert(s->config_size <= sizeof(struct virtio_blk_config)); -} +#include "hw/virtio/virtio-blk-common.h" +#include "qemu/coroutine.h" static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, VirtIOBlockReq *req) @@ -383,12 +363,14 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) } } -static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb, +static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, int start, int num_reqs, int niov) { + BlockBackend *blk = s->blk; QEMUIOVector *qiov = &mrb->reqs[start]->qiov; int64_t sector_num = mrb->reqs[start]->sector_num; bool is_write = mrb->is_write; + BdrvRequestFlags flags = 0; if (num_reqs > 1) { int i; @@ -419,12 +401,18 @@ static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb, num_reqs - 1); } + if (blk_ram_registrar_ok(&s->blk_ram_registrar)) { + flags |= BDRV_REQ_REGISTERED_BUF; + } + if (is_write) { - blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, - virtio_blk_rw_complete, mrb->reqs[start]); + blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, + flags, virtio_blk_rw_complete, + mrb->reqs[start]); } else { - blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, - virtio_blk_rw_complete, mrb->reqs[start]); + blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, + flags, virtio_blk_rw_complete, + mrb->reqs[start]); } } @@ -446,14 +434,14 @@ static int multireq_compare(const void *a, const void *b) } } -static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) +static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb) { int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; uint32_t max_transfer; int64_t sector_num = 0; if (mrb->num_reqs == 1) { - submit_requests(blk, mrb, 0, 1, -1); + submit_requests(s, mrb, 0, 1, -1); mrb->num_reqs = 0; return; } @@ -473,11 +461,11 @@ static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) * 3. merge would exceed maximum transfer length of backend device */ if (sector_num + nb_sectors != req->sector_num || - niov > blk_get_max_iov(blk) - req->qiov.niov || + niov > blk_get_max_iov(s->blk) - req->qiov.niov || req->qiov.size > max_transfer || nb_sectors > (max_transfer - req->qiov.size) / BDRV_SECTOR_SIZE) { - submit_requests(blk, mrb, start, num_reqs, niov); + submit_requests(s, mrb, start, num_reqs, niov); num_reqs = 0; } } @@ -493,7 +481,7 @@ static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) num_reqs++; } - submit_requests(blk, mrb, start, num_reqs, niov); + submit_requests(s, mrb, start, num_reqs, niov); mrb->num_reqs = 0; } @@ -508,7 +496,7 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) * Make sure all outstanding writes are posted to the backing device. */ if (mrb->is_write && mrb->num_reqs > 0) { - virtio_blk_submit_multireq(s->blk, mrb); + virtio_blk_submit_multireq(s, mrb); } blk_aio_flush(s->blk, virtio_blk_flush_complete, req); } @@ -688,7 +676,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || is_write != mrb->is_write || !s->conf.request_merging)) { - virtio_blk_submit_multireq(s->blk, mrb); + virtio_blk_submit_multireq(s, mrb); } assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); @@ -767,12 +755,11 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) return 0; } -bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) +void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) { VirtIOBlockReq *req; MultiReqBuffer mrb = {}; bool suppress_notifications = virtio_queue_get_notification(vq); - bool progress = false; aio_context_acquire(blk_get_aio_context(s->blk)); blk_io_plug(s->blk); @@ -783,7 +770,6 @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) } while ((req = virtio_blk_get_request(s, vq))) { - progress = true; if (virtio_blk_handle_request(req, &mrb)) { virtqueue_detach_element(req->vq, &req->elem, 0); virtio_blk_free_request(req); @@ -797,24 +783,18 @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) } while (!virtio_queue_empty(vq)); if (mrb.num_reqs) { - virtio_blk_submit_multireq(s->blk, &mrb); + virtio_blk_submit_multireq(s, &mrb); } blk_io_unplug(s->blk); aio_context_release(blk_get_aio_context(s->blk)); - return progress; -} - -static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq) -{ - virtio_blk_handle_vq(s, vq); } static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = (VirtIOBlock *)vdev; - if (s->dataplane) { + if (s->dataplane && !s->dataplane_started) { /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start * dataplane here instead of waiting for .set_status(). */ @@ -823,7 +803,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) return; } } - virtio_blk_handle_output_do(s, vq); + virtio_blk_handle_vq(s, vq); } void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) @@ -852,7 +832,7 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) } if (mrb.num_reqs) { - virtio_blk_submit_multireq(s->blk, &mrb); + virtio_blk_submit_multireq(s, &mrb); } if (is_bh) { blk_dec_in_flight(s->conf.conf.blk); @@ -1211,9 +1191,9 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } - virtio_blk_set_config_size(s, s->host_features); - - virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size); + s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, + s->host_features); + virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); s->blk = conf->conf.blk; s->rq = NULL; @@ -1222,6 +1202,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) for (i = 0; i < conf->num_queues; i++) { virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); } + qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2); virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); if (err != NULL) { error_propagate(errp, err); @@ -1233,8 +1214,8 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) } s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); + blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); blk_set_dev_ops(s->blk, &virtio_block_ops, s); - blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size); blk_iostatus_enable(s->blk); @@ -1258,6 +1239,8 @@ static void virtio_blk_device_unrealize(DeviceState *dev) for (i = 0; i < conf->num_queues; i++) { virtio_del_queue(vdev, i); } + qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); + blk_ram_registrar_destroy(&s->blk_ram_registrar); qemu_del_vm_change_state_handler(s->change); blockdev_mark_auto_del(s->blk); virtio_cleanup(vdev); diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index 674953f1ad..345b284d70 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -243,7 +243,6 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) } blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); - blk_set_guest_block_size(blk, conf->logical_block_size); if (conf->discard_granularity == -1) { conf->discard_granularity = conf->physical_block_size; diff --git a/hw/char/Kconfig b/hw/char/Kconfig index 2e4f620b13..6b6cf2fc1d 100644 --- a/hw/char/Kconfig +++ b/hw/char/Kconfig @@ -68,3 +68,6 @@ config SIFIVE_UART config GOLDFISH_TTY bool + +config SHAKTI_UART + bool diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index b4b5e8a3ee..c069a30842 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -235,8 +235,18 @@ static void uart_parameters_setup(CadenceUARTState *s) static int uart_can_receive(void *opaque) { CadenceUARTState *s = opaque; - int ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE); - uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE; + int ret; + uint32_t ch_mode; + + /* ignore characters when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return 0; + } + + ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE); + ch_mode = s->r[R_MR] & UART_MR_CHMODE; if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) { ret = MIN(ret, CADENCE_UART_RX_FIFO_SIZE - s->rx_count); @@ -353,11 +363,6 @@ static void uart_receive(void *opaque, const uint8_t *buf, int size) CadenceUARTState *s = opaque; uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE; - /* ignore characters when unclocked or in reset */ - if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { - return; - } - if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) { uart_write_rx_fifo(opaque, buf, size); } @@ -373,6 +378,8 @@ static void uart_event(void *opaque, QEMUChrEvent event) /* ignore characters when unclocked or in reset */ if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); return; } @@ -403,15 +410,22 @@ static void uart_read_rx_fifo(CadenceUARTState *s, uint32_t *c) uart_update_status(s); } -static void uart_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) +static MemTxResult uart_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size, MemTxAttrs attrs) { CadenceUARTState *s = opaque; + /* ignore access when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return MEMTX_ERROR; + } + DB_PRINT(" offset:%x data:%08x\n", (unsigned)offset, (unsigned)value); offset >>= 2; if (offset >= CADENCE_UART_R_MAX) { - return; + return MEMTX_DECODE_ERROR; } switch (offset) { case R_IER: /* ier (wts imr) */ @@ -458,30 +472,41 @@ static void uart_write(void *opaque, hwaddr offset, break; } uart_update_status(s); + + return MEMTX_OK; } -static uint64_t uart_read(void *opaque, hwaddr offset, - unsigned size) +static MemTxResult uart_read(void *opaque, hwaddr offset, + uint64_t *value, unsigned size, MemTxAttrs attrs) { CadenceUARTState *s = opaque; uint32_t c = 0; + /* ignore access when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return MEMTX_ERROR; + } + offset >>= 2; if (offset >= CADENCE_UART_R_MAX) { - c = 0; - } else if (offset == R_TX_RX) { + return MEMTX_DECODE_ERROR; + } + if (offset == R_TX_RX) { uart_read_rx_fifo(s, &c); } else { - c = s->r[offset]; + c = s->r[offset]; } DB_PRINT(" offset:%x data:%08x\n", (unsigned)(offset << 2), (unsigned)c); - return c; + *value = c; + return MEMTX_OK; } static const MemoryRegionOps uart_ops = { - .read = uart_read, - .write = uart_write, + .read_with_attrs = uart_read, + .write_with_attrs = uart_write, .endianness = DEVICE_NATIVE_ENDIAN, }; diff --git a/hw/char/escc.c b/hw/char/escc.c index 52e7978287..17a908c59b 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -86,12 +86,15 @@ #define W_INTR 1 #define INTR_INTALL 0x01 #define INTR_TXINT 0x02 +#define INTR_PAR_SPEC 0x04 #define INTR_RXMODEMSK 0x18 #define INTR_RXINT1ST 0x08 #define INTR_RXINTALL 0x10 +#define INTR_WTRQ_TXRX 0x20 #define W_IVEC 2 #define W_RXCTRL 3 #define RXCTRL_RXEN 0x01 +#define RXCTRL_HUNT 0x10 #define W_TXCTRL1 4 #define TXCTRL1_PAREN 0x01 #define TXCTRL1_PAREV 0x02 @@ -105,6 +108,7 @@ #define TXCTRL1_CLK64X 0xc0 #define TXCTRL1_CLKMSK 0xc0 #define W_TXCTRL2 5 +#define TXCTRL2_TXCRC 0x01 #define TXCTRL2_TXEN 0x08 #define TXCTRL2_BITMSK 0x60 #define TXCTRL2_5BITS 0x00 @@ -115,18 +119,27 @@ #define W_SYNC2 7 #define W_TXBUF 8 #define W_MINTR 9 +#define MINTR_VIS 0x01 +#define MINTR_NV 0x02 #define MINTR_STATUSHI 0x10 +#define MINTR_SOFTIACK 0x20 #define MINTR_RST_MASK 0xc0 #define MINTR_RST_B 0x40 #define MINTR_RST_A 0x80 #define MINTR_RST_ALL 0xc0 #define W_MISC1 10 +#define MISC1_ENC_MASK 0x60 #define W_CLOCK 11 #define CLOCK_TRXC 0x08 #define W_BRGLO 12 #define W_BRGHI 13 #define W_MISC2 14 -#define MISC2_PLLDIS 0x30 +#define MISC2_BRG_EN 0x01 +#define MISC2_BRG_SRC 0x02 +#define MISC2_LCL_LOOP 0x10 +#define MISC2_PLLCMD0 0x20 +#define MISC2_PLLCMD1 0x40 +#define MISC2_PLLCMD2 0x80 #define W_EXTINT 15 #define EXTINT_DCD 0x08 #define EXTINT_SYNCINT 0x10 @@ -170,6 +183,7 @@ #define R_RXBUF 8 #define R_RXCTRL 9 #define R_MISC 10 +#define MISC_2CLKMISS 0x40 #define R_MISC1 11 #define R_BRGLO 12 #define R_BRGHI 13 @@ -230,20 +244,23 @@ static uint32_t get_queue(void *opaque) q->count--; } trace_escc_get_queue(CHN_C(s), val); - if (q->count > 0) + if (q->count > 0) { serial_receive_byte(s, 0); + } return val; } static int escc_update_irq_chn(ESCCChannelState *s) { if ((((s->wregs[W_INTR] & INTR_TXINT) && (s->txint == 1)) || - // tx ints enabled, pending - ((((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINT1ST) || - ((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINTALL)) && - s->rxint == 1) || // rx ints enabled, pending - ((s->wregs[W_EXTINT] & EXTINT_BRKINT) && - (s->rregs[R_STATUS] & STATUS_BRK)))) { // break int e&p + /* tx ints enabled, pending */ + ((((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINT1ST) || + ((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINTALL)) && + s->rxint == 1) || + /* rx ints enabled, pending */ + ((s->wregs[W_EXTINT] & EXTINT_BRKINT) && + (s->rregs[R_STATUS] & STATUS_BRK)))) { + /* break int e&p */ return 1; } return 0; @@ -262,26 +279,7 @@ static void escc_update_irq(ESCCChannelState *s) static void escc_reset_chn(ESCCChannelState *s) { - int i; - s->reg = 0; - for (i = 0; i < ESCC_SERIAL_REGS; i++) { - s->rregs[i] = 0; - s->wregs[i] = 0; - } - s->wregs[W_TXCTRL1] = TXCTRL1_1STOP; // 1X divisor, 1 stop bit, no parity - s->wregs[W_MINTR] = MINTR_RST_ALL; - s->wregs[W_CLOCK] = CLOCK_TRXC; // Synch mode tx clock = TRxC - s->wregs[W_MISC2] = MISC2_PLLDIS; // PLL disabled - s->wregs[W_EXTINT] = EXTINT_DCD | EXTINT_SYNCINT | EXTINT_CTSINT | - EXTINT_TXUNDRN | EXTINT_BRKINT; // Enable most interrupts - if (s->disabled) - s->rregs[R_STATUS] = STATUS_TXEMPTY | STATUS_DCD | STATUS_SYNC | - STATUS_CTS | STATUS_TXUNDRN; - else - s->rregs[R_STATUS] = STATUS_TXEMPTY | STATUS_TXUNDRN; - s->rregs[R_SPEC] = SPEC_BITS8 | SPEC_ALLSENT; - s->rx = s->tx = 0; s->rxint = s->txint = 0; s->rxint_under_svc = s->txint_under_svc = 0; @@ -289,32 +287,110 @@ static void escc_reset_chn(ESCCChannelState *s) clear_queue(s); } +static void escc_soft_reset_chn(ESCCChannelState *s) +{ + escc_reset_chn(s); + + s->wregs[W_CMD] = 0; + s->wregs[W_INTR] &= INTR_PAR_SPEC | INTR_WTRQ_TXRX; + s->wregs[W_RXCTRL] &= ~RXCTRL_RXEN; + /* 1 stop bit */ + s->wregs[W_TXCTRL1] |= TXCTRL1_1STOP; + s->wregs[W_TXCTRL2] &= TXCTRL2_TXCRC | TXCTRL2_8BITS; + s->wregs[W_MINTR] &= ~MINTR_SOFTIACK; + s->wregs[W_MISC1] &= MISC1_ENC_MASK; + /* PLL disabled */ + s->wregs[W_MISC2] &= MISC2_BRG_EN | MISC2_BRG_SRC | + MISC2_PLLCMD1 | MISC2_PLLCMD2; + s->wregs[W_MISC2] |= MISC2_PLLCMD0; + /* Enable most interrupts */ + s->wregs[W_EXTINT] = EXTINT_DCD | EXTINT_SYNCINT | EXTINT_CTSINT | + EXTINT_TXUNDRN | EXTINT_BRKINT; + + s->rregs[R_STATUS] &= STATUS_DCD | STATUS_SYNC | STATUS_CTS | STATUS_BRK; + s->rregs[R_STATUS] |= STATUS_TXEMPTY | STATUS_TXUNDRN; + if (s->disabled) { + s->rregs[R_STATUS] |= STATUS_DCD | STATUS_SYNC | STATUS_CTS; + } + s->rregs[R_SPEC] &= SPEC_ALLSENT; + s->rregs[R_SPEC] |= SPEC_BITS8; + s->rregs[R_INTR] = 0; + s->rregs[R_MISC] &= MISC_2CLKMISS; +} + +static void escc_hard_reset_chn(ESCCChannelState *s) +{ + escc_soft_reset_chn(s); + + /* + * Hard reset is almost identical to soft reset above, except that the + * values of WR9 (W_MINTR), WR10 (W_MISC1), WR11 (W_CLOCK) and WR14 + * (W_MISC2) have extra bits forced to 0/1 + */ + s->wregs[W_MINTR] &= MINTR_VIS | MINTR_NV; + s->wregs[W_MINTR] |= MINTR_RST_B | MINTR_RST_A; + s->wregs[W_MISC1] = 0; + s->wregs[W_CLOCK] = CLOCK_TRXC; + s->wregs[W_MISC2] &= MISC2_PLLCMD1 | MISC2_PLLCMD2; + s->wregs[W_MISC2] |= MISC2_LCL_LOOP | MISC2_PLLCMD0; +} + static void escc_reset(DeviceState *d) { ESCCState *s = ESCC(d); + int i, j; - escc_reset_chn(&s->chn[0]); - escc_reset_chn(&s->chn[1]); + for (i = 0; i < 2; i++) { + ESCCChannelState *cs = &s->chn[i]; + + /* + * According to the ESCC datasheet "Miscellaneous Questions" section + * on page 384, the values of the ESCC registers are not guaranteed on + * power-on until an explicit hardware or software reset has been + * issued. For now we zero the registers so that a device reset always + * returns the emulated device to a fixed state. + */ + for (j = 0; j < ESCC_SERIAL_REGS; j++) { + cs->rregs[j] = 0; + cs->wregs[j] = 0; + } + + /* + * ...but there is an exception. The "Transmit Interrupts and Transmit + * Buffer Empty Bit" section on page 50 of the ESCC datasheet says of + * the STATUS_TXEMPTY bit in R_STATUS: "After a hardware reset + * (including a hardware reset by software), or a channel reset, this + * bit is set to 1". The Sun PROM checks this bit early on startup and + * gets stuck in an infinite loop if it is not set. + */ + cs->rregs[R_STATUS] |= STATUS_TXEMPTY; + + escc_reset_chn(cs); + } } static inline void set_rxint(ESCCChannelState *s) { s->rxint = 1; - /* XXX: missing daisy chainnig: escc_chn_b rx should have a lower priority - than chn_a rx/tx/special_condition service*/ + /* + * XXX: missing daisy chaining: escc_chn_b rx should have a lower priority + * than chn_a rx/tx/special_condition service + */ s->rxint_under_svc = 1; if (s->chn == escc_chn_a) { s->rregs[R_INTR] |= INTR_RXINTA; - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->otherchn->rregs[R_IVEC] = IVEC_HIRXINTA; - else + } else { s->otherchn->rregs[R_IVEC] = IVEC_LORXINTA; + } } else { s->otherchn->rregs[R_INTR] |= INTR_RXINTB; - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->rregs[R_IVEC] = IVEC_HIRXINTB; - else + } else { s->rregs[R_IVEC] = IVEC_LORXINTB; + } } escc_update_irq(s); } @@ -328,17 +404,18 @@ static inline void set_txint(ESCCChannelState *s) if (s->wregs[W_INTR] & INTR_TXINT) { s->rregs[R_INTR] |= INTR_TXINTA; } - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->otherchn->rregs[R_IVEC] = IVEC_HITXINTA; - else + } else { s->otherchn->rregs[R_IVEC] = IVEC_LOTXINTA; + } } else { s->rregs[R_IVEC] = IVEC_TXINTB; if (s->wregs[W_INTR] & INTR_TXINT) { s->otherchn->rregs[R_INTR] |= INTR_TXINTB; } } - escc_update_irq(s); + escc_update_irq(s); } } @@ -347,20 +424,23 @@ static inline void clr_rxint(ESCCChannelState *s) s->rxint = 0; s->rxint_under_svc = 0; if (s->chn == escc_chn_a) { - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->otherchn->rregs[R_IVEC] = IVEC_HINOINT; - else + } else { s->otherchn->rregs[R_IVEC] = IVEC_LONOINT; + } s->rregs[R_INTR] &= ~INTR_RXINTA; } else { - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->rregs[R_IVEC] = IVEC_HINOINT; - else + } else { s->rregs[R_IVEC] = IVEC_LONOINT; + } s->otherchn->rregs[R_INTR] &= ~INTR_RXINTB; } - if (s->txint) + if (s->txint) { set_txint(s); + } escc_update_irq(s); } @@ -369,21 +449,24 @@ static inline void clr_txint(ESCCChannelState *s) s->txint = 0; s->txint_under_svc = 0; if (s->chn == escc_chn_a) { - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->otherchn->rregs[R_IVEC] = IVEC_HINOINT; - else + } else { s->otherchn->rregs[R_IVEC] = IVEC_LONOINT; + } s->rregs[R_INTR] &= ~INTR_TXINTA; } else { s->otherchn->rregs[R_INTR] &= ~INTR_TXINTB; - if (s->wregs[W_MINTR] & MINTR_STATUSHI) + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { s->rregs[R_IVEC] = IVEC_HINOINT; - else + } else { s->rregs[R_IVEC] = IVEC_LONOINT; + } s->otherchn->rregs[R_INTR] &= ~INTR_TXINTB; } - if (s->rxint) + if (s->rxint) { set_rxint(s); + } escc_update_irq(s); } @@ -392,21 +475,24 @@ static void escc_update_parameters(ESCCChannelState *s) int speed, parity, data_bits, stop_bits; QEMUSerialSetParams ssp; - if (!qemu_chr_fe_backend_connected(&s->chr) || s->type != escc_serial) + if (!qemu_chr_fe_backend_connected(&s->chr) || s->type != escc_serial) { return; + } if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREN) { - if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREV) + if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREV) { parity = 'E'; - else + } else { parity = 'O'; + } } else { parity = 'N'; } - if ((s->wregs[W_TXCTRL1] & TXCTRL1_STPMSK) == TXCTRL1_2STOP) + if ((s->wregs[W_TXCTRL1] & TXCTRL1_STPMSK) == TXCTRL1_2STOP) { stop_bits = 2; - else + } else { stop_bits = 1; + } switch (s->wregs[W_TXCTRL2] & TXCTRL2_BITMSK) { case TXCTRL2_5BITS: data_bits = 5; @@ -487,13 +573,33 @@ static void escc_mem_write(void *opaque, hwaddr addr, break; } break; - case W_INTR ... W_RXCTRL: + case W_RXCTRL: + s->wregs[s->reg] = val; + if (val & RXCTRL_HUNT) { + s->rregs[R_STATUS] |= STATUS_SYNC; + } + break; + case W_INTR ... W_IVEC: case W_SYNC1 ... W_TXBUF: case W_MISC1 ... W_CLOCK: case W_MISC2 ... W_EXTINT: s->wregs[s->reg] = val; break; case W_TXCTRL1: + s->wregs[s->reg] = val; + /* + * The ESCC datasheet states that SPEC_ALLSENT is always set in + * sync mode, and set in async mode when all characters have + * cleared the transmitter. Since writes to SERIAL_DATA use the + * blocking qemu_chr_fe_write_all() function to write each + * character, the guest can never see the state when async data + * is in the process of being transmitted so we can set this bit + * unconditionally regardless of the state of the W_TXCTRL1 mode + * bits. + */ + s->rregs[R_SPEC] |= SPEC_ALLSENT; + escc_update_parameters(s); + break; case W_TXCTRL2: s->wregs[s->reg] = val; escc_update_parameters(s); @@ -510,23 +616,28 @@ static void escc_mem_write(void *opaque, hwaddr addr, default: break; case MINTR_RST_B: - escc_reset_chn(&serial->chn[0]); + trace_escc_soft_reset_chn(CHN_C(&serial->chn[0])); + escc_soft_reset_chn(&serial->chn[0]); return; case MINTR_RST_A: - escc_reset_chn(&serial->chn[1]); + trace_escc_soft_reset_chn(CHN_C(&serial->chn[1])); + escc_soft_reset_chn(&serial->chn[1]); return; case MINTR_RST_ALL: - escc_reset(DEVICE(serial)); + trace_escc_hard_reset(); + escc_hard_reset_chn(&serial->chn[0]); + escc_hard_reset_chn(&serial->chn[1]); return; } break; default: break; } - if (s->reg == 0) + if (s->reg == 0) { s->reg = newreg; - else + } else { s->reg = 0; + } break; case SERIAL_DATA: trace_escc_mem_writeb_data(CHN_C(s), val); @@ -538,17 +649,19 @@ static void escc_mem_write(void *opaque, hwaddr addr, s->txint = 0; escc_update_irq(s); s->tx = val; - if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled + if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { /* tx enabled */ if (qemu_chr_fe_backend_connected(&s->chr)) { - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ qemu_chr_fe_write_all(&s->chr, &s->tx, 1); } else if (s->type == escc_kbd && !s->disabled) { handle_kbd_command(s, val); } } - s->rregs[R_STATUS] |= STATUS_TXEMPTY; // Tx buffer empty - s->rregs[R_SPEC] |= SPEC_ALLSENT; // All sent + s->rregs[R_STATUS] |= STATUS_TXEMPTY; /* Tx buffer empty */ + s->rregs[R_SPEC] |= SPEC_ALLSENT; /* All sent */ set_txint(s); break; default: @@ -606,12 +719,13 @@ static int serial_can_receive(void *opaque) ESCCChannelState *s = opaque; int ret; - if (((s->wregs[W_RXCTRL] & RXCTRL_RXEN) == 0) // Rx not enabled - || ((s->rregs[R_STATUS] & STATUS_RXAV) == STATUS_RXAV)) - // char already available + if (((s->wregs[W_RXCTRL] & RXCTRL_RXEN) == 0) /* Rx not enabled */ + || ((s->rregs[R_STATUS] & STATUS_RXAV) == STATUS_RXAV)) { + /* char already available */ ret = 0; - else + } else { ret = 1; + } return ret; } @@ -638,12 +752,13 @@ static void serial_receive1(void *opaque, const uint8_t *buf, int size) static void serial_event(void *opaque, QEMUChrEvent event) { ESCCChannelState *s = opaque; - if (event == CHR_EVENT_BREAK) + if (event == CHR_EVENT_BREAK) { serial_receive_break(s); + } } static const VMStateDescription vmstate_escc_chn = { - .name ="escc_chn", + .name = "escc_chn", .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { @@ -662,7 +777,7 @@ static const VMStateDescription vmstate_escc_chn = { }; static const VMStateDescription vmstate_escc = { - .name ="escc", + .name = "escc", .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { @@ -713,7 +828,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, } } - if (qcode > qemu_input_map_qcode_to_sun_len) { + if (qcode >= qemu_input_map_qcode_to_sun_len) { return; } @@ -734,21 +849,21 @@ static QemuInputHandler sunkbd_handler = { static void handle_kbd_command(ESCCChannelState *s, int val) { trace_escc_kbd_command(val); - if (s->led_mode) { // Ignore led byte + if (s->led_mode) { /* Ignore led byte */ s->led_mode = 0; return; } switch (val) { - case 1: // Reset, return type code + case 1: /* Reset, return type code */ clear_queue(s); put_queue(s, 0xff); - put_queue(s, 4); // Type 4 + put_queue(s, 4); /* Type 4 */ put_queue(s, 0x7f); break; - case 0xe: // Set leds + case 0xe: /* Set leds */ s->led_mode = 1; break; - case 7: // Query layout + case 7: /* Query layout */ case 0xf: clear_queue(s); put_queue(s, 0xfe); @@ -768,34 +883,39 @@ static void sunmouse_event(void *opaque, trace_escc_sunmouse_event(dx, dy, buttons_state); ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */ - if (buttons_state & MOUSE_EVENT_LBUTTON) + if (buttons_state & MOUSE_EVENT_LBUTTON) { ch ^= 0x4; - if (buttons_state & MOUSE_EVENT_MBUTTON) + } + if (buttons_state & MOUSE_EVENT_MBUTTON) { ch ^= 0x2; - if (buttons_state & MOUSE_EVENT_RBUTTON) + } + if (buttons_state & MOUSE_EVENT_RBUTTON) { ch ^= 0x1; + } put_queue(s, ch); ch = dx; - if (ch > 127) + if (ch > 127) { ch = 127; - else if (ch < -127) + } else if (ch < -127) { ch = -127; + } put_queue(s, ch & 0xff); ch = -dy; - if (ch > 127) + if (ch > 127) { ch = 127; - else if (ch < -127) + } else if (ch < -127) { ch = -127; + } put_queue(s, ch & 0xff); - // MSC protocol specify two extra motion bytes + /* MSC protocol specifies two extra motion bytes */ put_queue(s, 0); put_queue(s, 0); diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c index 80d401a379..7b7c56b6ef 100644 --- a/hw/char/exynos4210_uart.c +++ b/hw/char/exynos4210_uart.c @@ -211,7 +211,7 @@ static void fifo_reset(Exynos4210UartFIFO *q) g_free(q->data); q->data = NULL; - q->data = (uint8_t *)g_malloc0(q->size); + q->data = g_malloc0(q->size); q->sp = 0; q->rp = 0; @@ -628,7 +628,6 @@ static const VMStateDescription vmstate_exynos4210_uart_fifo = { .name = "exynos4210.uart.fifo", .version_id = 1, .minimum_version_id = 1, - .post_load = exynos4210_uart_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(sp, Exynos4210UartFIFO), VMSTATE_UINT32(rp, Exynos4210UartFIFO), @@ -641,6 +640,7 @@ static const VMStateDescription vmstate_exynos4210_uart = { .name = "exynos4210.uart", .version_id = 1, .minimum_version_id = 1, + .post_load = exynos4210_uart_post_load, .fields = (VMStateField[]) { VMSTATE_STRUCT(rx, Exynos4210UartState, 1, vmstate_exynos4210_uart_fifo, Exynos4210UartFIFO), diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index 8365a18761..20b77885c1 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c index 9b0a817713..e58181fcf4 100644 --- a/hw/char/ibex_uart.c +++ b/hw/char/ibex_uart.c @@ -550,6 +550,7 @@ static void ibex_uart_class_init(ObjectClass *klass, void *data) dc->realize = ibex_uart_realize; dc->vmsd = &vmstate_ibex_uart; device_class_set_props(dc, ibex_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo ibex_uart_info = { diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c index 2facf85c2d..22f3e78eb9 100644 --- a/hw/char/mchp_pfsoc_mmuart.c +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -22,20 +22,25 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "chardev/char.h" +#include "qapi/error.h" +#include "migration/vmstate.h" #include "hw/char/mchp_pfsoc_mmuart.h" +#include "hw/qdev-properties.h" + +#define REGS_OFFSET 0x20 static uint64_t mchp_pfsoc_mmuart_read(void *opaque, hwaddr addr, unsigned size) { MchpPfSoCMMUartState *s = opaque; - if (addr >= MCHP_PFSOC_MMUART_REG_SIZE) { + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%" HWADDR_PRIx "\n", - __func__, addr); + __func__, addr << 2); return 0; } - return s->reg[addr / sizeof(uint32_t)]; + return s->reg[addr]; } static void mchp_pfsoc_mmuart_write(void *opaque, hwaddr addr, @@ -44,13 +49,14 @@ static void mchp_pfsoc_mmuart_write(void *opaque, hwaddr addr, MchpPfSoCMMUartState *s = opaque; uint32_t val32 = (uint32_t)value; - if (addr >= MCHP_PFSOC_MMUART_REG_SIZE) { + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx - " v=0x%x\n", __func__, addr, val32); + " v=0x%x\n", __func__, addr << 2, val32); return; } - s->reg[addr / sizeof(uint32_t)] = val32; + s->reg[addr] = val32; } static const MemoryRegionOps mchp_pfsoc_mmuart_ops = { @@ -63,23 +69,95 @@ static const MemoryRegionOps mchp_pfsoc_mmuart_ops = { }, }; -MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem, - hwaddr base, qemu_irq irq, Chardev *chr) +static void mchp_pfsoc_mmuart_reset(DeviceState *dev) { - MchpPfSoCMMUartState *s; + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); - s = g_new0(MchpPfSoCMMUartState, 1); - - memory_region_init_io(&s->iomem, NULL, &mchp_pfsoc_mmuart_ops, s, - "mchp.pfsoc.mmuart", 0x1000); - - s->base = base; - s->irq = irq; - - s->serial = serial_mm_init(sysmem, base, 2, irq, 399193, chr, - DEVICE_LITTLE_ENDIAN); - - memory_region_add_subregion(sysmem, base + 0x20, &s->iomem); - - return s; + memset(s->reg, 0, sizeof(s->reg)); + device_cold_reset(DEVICE(&s->serial_mm)); +} + +static void mchp_pfsoc_mmuart_init(Object *obj) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(obj); + + object_initialize_child(obj, "serial-mm", &s->serial_mm, TYPE_SERIAL_MM); + object_property_add_alias(obj, "chardev", OBJECT(&s->serial_mm), "chardev"); +} + +static void mchp_pfsoc_mmuart_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); + + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(&s->serial_mm), "baudbase", 399193); + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "endianness", + DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->serial_mm), errp)) { + return; + } + + sysbus_pass_irq(SYS_BUS_DEVICE(dev), SYS_BUS_DEVICE(&s->serial_mm)); + + memory_region_init(&s->container, OBJECT(s), "mchp.pfsoc.mmuart", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->serial_mm), 0)); + + memory_region_init_io(&s->iomem, OBJECT(s), &mchp_pfsoc_mmuart_ops, s, + "mchp.pfsoc.mmuart.regs", 0x1000 - REGS_OFFSET); + memory_region_add_subregion(&s->container, REGS_OFFSET, &s->iomem); +} + +static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { + .name = "mchp.pfsoc.uart", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, MchpPfSoCMMUartState, + MCHP_PFSOC_MMUART_REG_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mchp_pfsoc_mmuart_realize; + dc->reset = mchp_pfsoc_mmuart_reset; + dc->vmsd = &mchp_pfsoc_mmuart_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo mchp_pfsoc_mmuart_info = { + .name = TYPE_MCHP_PFSOC_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCMMUartState), + .instance_init = mchp_pfsoc_mmuart_init, + .class_init = mchp_pfsoc_mmuart_class_init, +}; + +static void mchp_pfsoc_mmuart_register_types(void) +{ + type_register_static(&mchp_pfsoc_mmuart_info); +} + +type_init(mchp_pfsoc_mmuart_register_types) + +MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, Chardev *chr) +{ + DeviceState *dev = qdev_new(TYPE_MCHP_PFSOC_UART); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, irq); + + return MCHP_PFSOC_UART(dev); } diff --git a/hw/char/meson.build b/hw/char/meson.build index 8361d0ab28..7b594f51b8 100644 --- a/hw/char/meson.build +++ b/hw/char/meson.build @@ -16,7 +16,7 @@ softmmu_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c')) softmmu_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c')) softmmu_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) -softmmu_ss.add(when: 'CONFIG_SHAKTI', if_true: files('shakti_uart.c')) +softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_console.c')) softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) diff --git a/hw/char/parallel.c b/hw/char/parallel.c index b45e67bfbb..1c9ca47820 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -28,7 +28,7 @@ #include "qemu/module.h" #include "chardev/char-parallel.h" #include "chardev/char-fe.h" -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/irq.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" @@ -553,7 +553,7 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp) index++; base = isa->iobase; - isa_init_irq(isadev, &s->irq, isa->isairq); + s->irq = isa_get_irq(isadev, isa->isairq); qemu_register_reset(parallel_reset, s); qemu_chr_fe_set_handlers(&s->chr, parallel_can_receive, NULL, @@ -570,9 +570,9 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp) s, "parallel"); } -static void parallel_isa_build_aml(ISADevice *isadev, Aml *scope) +static void parallel_isa_build_aml(AcpiDevAmlIf *adev, Aml *scope) { - ISAParallelState *isa = ISA_PARALLEL(isadev); + ISAParallelState *isa = ISA_PARALLEL(adev); Aml *dev; Aml *crs; @@ -622,7 +622,7 @@ bool parallel_mm_init(MemoryRegion *address_space, { ParallelState *s; - s = g_malloc0(sizeof(ParallelState)); + s = g_new0(ParallelState, 1); s->irq = irq; qemu_chr_fe_init(&s->chr, chr, &error_abort); s->it_shift = it_shift; @@ -645,11 +645,11 @@ static Property parallel_isa_properties[] = { static void parallel_isa_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = parallel_isa_realizefn; dc->vmsd = &vmstate_parallel_isa; - isa->build_aml = parallel_isa_build_aml; + adevc->build_dev_aml = parallel_isa_build_aml; device_class_set_props(dc, parallel_isa_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } @@ -659,6 +659,10 @@ static const TypeInfo parallel_isa_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(ISAParallelState), .class_init = parallel_isa_class_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void parallel_register_types(void) diff --git a/hw/char/pl011.c b/hw/char/pl011.c index dc85527a5f..c076813423 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -26,6 +26,7 @@ #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" #include "chardev/char-fe.h" +#include "chardev/char-serial.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -175,7 +176,7 @@ static unsigned int pl011_get_baudrate(const PL011State *s) { uint64_t clk; - if (s->fbrd == 0) { + if (s->ibrd == 0) { return 0; } @@ -231,6 +232,11 @@ static void pl011_write(void *opaque, hwaddr offset, s->read_count = 0; s->read_pos = 0; } + if ((s->lcr ^ value) & 0x1) { + int break_enable = value & 0x1; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, + &break_enable); + } s->lcr = value; pl011_set_read_trigger(s); break; diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c index ddae738d56..6577f0e640 100644 --- a/hw/char/riscv_htif.c +++ b/hw/char/riscv_htif.c @@ -228,15 +228,27 @@ static const MemoryRegionOps htif_mm_ops = { .write = htif_mm_write, }; -HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem, - CPURISCVState *env, Chardev *chr) +bool htif_uses_elf_symbols(void) { - uint64_t base = MIN(tohost_addr, fromhost_addr); - uint64_t size = MAX(tohost_addr + 8, fromhost_addr + 8) - base; - uint64_t tohost_offset = tohost_addr - base; - uint64_t fromhost_offset = fromhost_addr - base; + return (address_symbol_set == 3) ? true : false; +} - HTIFState *s = g_malloc0(sizeof(HTIFState)); +HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem, + CPURISCVState *env, Chardev *chr, uint64_t nonelf_base) +{ + uint64_t base, size, tohost_offset, fromhost_offset; + + if (!htif_uses_elf_symbols()) { + fromhost_addr = nonelf_base; + tohost_addr = nonelf_base + 8; + } + + base = MIN(tohost_addr, fromhost_addr); + size = MAX(tohost_addr + 8, fromhost_addr + 8) - base; + tohost_offset = tohost_addr - base; + fromhost_offset = fromhost_addr - base; + + HTIFState *s = g_new0(HTIFState, 1); s->address_space = address_space; s->main_mem = main_mem; s->main_mem_ram_ptr = memory_region_get_ram_ptr(main_mem); @@ -249,12 +261,11 @@ HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem, qemu_chr_fe_init(&s->chr, chr, &error_abort); qemu_chr_fe_set_handlers(&s->chr, htif_can_recv, htif_recv, htif_event, htif_be_change, s, NULL, true); - if (address_symbol_set == 3) { - memory_region_init_io(&s->mmio, NULL, &htif_mm_ops, s, - TYPE_HTIF_UART, size); - memory_region_add_subregion_overlap(address_space, base, - &s->mmio, 1); - } + + memory_region_init_io(&s->mmio, NULL, &htif_mm_ops, s, + TYPE_HTIF_UART, size); + memory_region_add_subregion_overlap(address_space, base, + &s->mmio, 1); return s; } diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c index 1b8b303079..141a6cb168 100644 --- a/hw/char/serial-isa.c +++ b/hw/char/serial-isa.c @@ -27,7 +27,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "sysemu/sysemu.h" -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/char/serial.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" @@ -75,7 +75,7 @@ static void serial_isa_realizefn(DeviceState *dev, Error **errp) } index++; - isa_init_irq(isadev, &s->irq, isa->isairq); + s->irq = isa_get_irq(isadev, isa->isairq); qdev_realize(DEVICE(s), NULL, errp); qdev_set_legacy_instance_id(dev, isa->iobase, 3); @@ -83,9 +83,9 @@ static void serial_isa_realizefn(DeviceState *dev, Error **errp) isa_register_ioport(isadev, &s->io, isa->iobase); } -static void serial_isa_build_aml(ISADevice *isadev, Aml *scope) +static void serial_isa_build_aml(AcpiDevAmlIf *adev, Aml *scope) { - ISASerialState *isa = ISA_SERIAL(isadev); + ISASerialState *isa = ISA_SERIAL(adev); Aml *dev; Aml *crs; @@ -122,11 +122,11 @@ static Property serial_isa_properties[] = { static void serial_isa_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = serial_isa_realizefn; dc->vmsd = &vmstate_isa_serial; - isa->build_aml = serial_isa_build_aml; + adevc->build_dev_aml = serial_isa_build_aml; device_class_set_props(dc, serial_isa_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } @@ -146,6 +146,10 @@ static const TypeInfo serial_isa_info = { .instance_size = sizeof(ISASerialState), .instance_init = serial_isa_initfn, .class_init = serial_isa_class_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void serial_register_types(void) diff --git a/hw/char/serial.c b/hw/char/serial.c index 7061aacbce..41b5e61977 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -961,6 +961,9 @@ void serial_set_frequency(SerialState *s, uint32_t frequency) const MemoryRegionOps serial_io_ops = { .read = serial_ioport_read, .write = serial_ioport_write, + .valid = { + .unaligned = 1, + }, .impl = { .min_access_size = 1, .max_access_size = 1, diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c index 167f4d8cb9..355886ee3a 100644 --- a/hw/char/sh_serial.c +++ b/hw/char/sh_serial.c @@ -26,13 +26,17 @@ */ #include "qemu/osdep.h" +#include "hw/sysbus.h" #include "hw/irq.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/sh4/sh.h" #include "chardev/char-fe.h" #include "qapi/error.h" #include "qemu/timer.h" - -//#define DEBUG_SERIAL +#include "qemu/log.h" +#include "trace.h" #define SH_SERIAL_FLAG_TEND (1 << 0) #define SH_SERIAL_FLAG_TDE (1 << 1) @@ -42,10 +46,10 @@ #define SH_RX_FIFO_LENGTH (16) -typedef struct { - MemoryRegion iomem; - MemoryRegion iomem_p4; - MemoryRegion iomem_a7; +OBJECT_DECLARE_SIMPLE_TYPE(SHSerialState, SH_SERIAL) + +struct SHSerialState { + SysBusDevice parent; uint8_t smr; uint8_t brr; uint8_t scr; @@ -59,13 +63,12 @@ typedef struct { uint8_t rx_tail; uint8_t rx_head; - int freq; - int feat; + uint8_t feat; int flags; int rtrg; CharBackend chr; - QEMUTimer *fifo_timeout_timer; + QEMUTimer fifo_timeout_timer; uint64_t etu; /* Elementary Time Unit (ns) */ qemu_irq eri; @@ -73,9 +76,13 @@ typedef struct { qemu_irq txi; qemu_irq tei; qemu_irq bri; -} sh_serial_state; +}; -static void sh_serial_clear_fifo(sh_serial_state * s) +typedef struct {} SHSerialStateClass; + +OBJECT_DEFINE_TYPE(SHSerialState, sh_serial, SH_SERIAL, SYS_BUS_DEVICE) + +static void sh_serial_clear_fifo(SHSerialState *s) { memset(s->rx_fifo, 0, SH_RX_FIFO_LENGTH); s->rx_cnt = 0; @@ -86,14 +93,12 @@ static void sh_serial_clear_fifo(sh_serial_state * s) static void sh_serial_write(void *opaque, hwaddr offs, uint64_t val, unsigned size) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); unsigned char ch; -#ifdef DEBUG_SERIAL - printf("sh_serial: write offs=0x%02x val=0x%02x\n", - offs, val); -#endif - switch(offs) { + trace_sh_serial_write(d->id, size, offs, val); + switch (offs) { case 0x00: /* SMR */ s->smr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0x7b : 0xff); return; @@ -103,8 +108,9 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x08: /* SCR */ /* TODO : For SH7751, SCIF mask should be 0xfb. */ s->scr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0xfa : 0xff); - if (!(val & (1 << 5))) + if (!(val & (1 << 5))) { s->flags |= SH_SERIAL_FLAG_TEND; + } if ((s->feat & SH_SERIAL_FEAT_SCIF) && s->txi) { qemu_set_irq(s->txi, val & (1 << 7)); } @@ -115,8 +121,10 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x0c: /* FTDR / TDR */ if (qemu_chr_fe_backend_connected(&s->chr)) { ch = val; - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ qemu_chr_fe_write_all(&s->chr, &ch, 1); } s->dr = val; @@ -129,18 +137,23 @@ static void sh_serial_write(void *opaque, hwaddr offs, #endif } if (s->feat & SH_SERIAL_FEAT_SCIF) { - switch(offs) { + switch (offs) { case 0x10: /* FSR */ - if (!(val & (1 << 6))) + if (!(val & (1 << 6))) { s->flags &= ~SH_SERIAL_FLAG_TEND; - if (!(val & (1 << 5))) + } + if (!(val & (1 << 5))) { s->flags &= ~SH_SERIAL_FLAG_TDE; - if (!(val & (1 << 4))) + } + if (!(val & (1 << 4))) { s->flags &= ~SH_SERIAL_FLAG_BRK; - if (!(val & (1 << 1))) + } + if (!(val & (1 << 1))) { s->flags &= ~SH_SERIAL_FLAG_RDF; - if (!(val & (1 << 0))) + } + if (!(val & (1 << 0))) { s->flags &= ~SH_SERIAL_FLAG_DR; + } if (!(val & (1 << 1)) || !(val & (1 << 0))) { if (s->rxi) { @@ -176,9 +189,8 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x24: /* LSR */ return; } - } - else { - switch(offs) { + } else { + switch (offs) { #if 0 case 0x0c: ret = s->dr; @@ -192,20 +204,20 @@ static void sh_serial_write(void *opaque, hwaddr offs, return; } } - - fprintf(stderr, "sh_serial: unsupported write to 0x%02" - HWADDR_PRIx "\n", offs); - abort(); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported write to 0x%02" HWADDR_PRIx "\n", + __func__, offs); } static uint64_t sh_serial_read(void *opaque, hwaddr offs, unsigned size) { - sh_serial_state *s = opaque; - uint32_t ret = ~0; + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); + uint32_t ret = UINT32_MAX; #if 0 - switch(offs) { + switch (offs) { case 0x00: ret = s->smr; break; @@ -221,7 +233,7 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, } #endif if (s->feat & SH_SERIAL_FEAT_SCIF) { - switch(offs) { + switch (offs) { case 0x00: /* SMR */ ret = s->smr; break; @@ -230,29 +242,37 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, break; case 0x10: /* FSR */ ret = 0; - if (s->flags & SH_SERIAL_FLAG_TEND) + if (s->flags & SH_SERIAL_FLAG_TEND) { ret |= (1 << 6); - if (s->flags & SH_SERIAL_FLAG_TDE) + } + if (s->flags & SH_SERIAL_FLAG_TDE) { ret |= (1 << 5); - if (s->flags & SH_SERIAL_FLAG_BRK) + } + if (s->flags & SH_SERIAL_FLAG_BRK) { ret |= (1 << 4); - if (s->flags & SH_SERIAL_FLAG_RDF) + } + if (s->flags & SH_SERIAL_FLAG_RDF) { ret |= (1 << 1); - if (s->flags & SH_SERIAL_FLAG_DR) + } + if (s->flags & SH_SERIAL_FLAG_DR) { ret |= (1 << 0); + } - if (s->scr & (1 << 5)) + if (s->scr & (1 << 5)) { s->flags |= SH_SERIAL_FLAG_TDE | SH_SERIAL_FLAG_TEND; + } break; case 0x14: if (s->rx_cnt > 0) { ret = s->rx_fifo[s->rx_tail++]; s->rx_cnt--; - if (s->rx_tail == SH_RX_FIFO_LENGTH) + if (s->rx_tail == SH_RX_FIFO_LENGTH) { s->rx_tail = 0; - if (s->rx_cnt < s->rtrg) + } + if (s->rx_cnt < s->rtrg) { s->flags &= ~SH_SERIAL_FLAG_RDF; + } } break; case 0x18: @@ -268,9 +288,8 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, ret = 0; break; } - } - else { - switch(offs) { + } else { + switch (offs) { #if 0 case 0x0c: ret = s->dr; @@ -287,40 +306,39 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, break; } } -#ifdef DEBUG_SERIAL - printf("sh_serial: read offs=0x%02x val=0x%x\n", - offs, ret); -#endif + trace_sh_serial_read(d->id, size, offs, ret); - if (ret & ~((1 << 16) - 1)) { - fprintf(stderr, "sh_serial: unsupported read from 0x%02" - HWADDR_PRIx "\n", offs); - abort(); + if (ret > UINT16_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported read from 0x%02" HWADDR_PRIx "\n", + __func__, offs); + ret = 0; } return ret; } -static int sh_serial_can_receive(sh_serial_state *s) +static int sh_serial_can_receive(SHSerialState *s) { return s->scr & (1 << 4); } -static void sh_serial_receive_break(sh_serial_state *s) +static void sh_serial_receive_break(SHSerialState *s) { - if (s->feat & SH_SERIAL_FEAT_SCIF) + if (s->feat & SH_SERIAL_FEAT_SCIF) { s->sr |= (1 << 4); + } } static int sh_serial_can_receive1(void *opaque) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; return sh_serial_can_receive(s); } static void sh_serial_timeout_int(void *opaque) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; s->flags |= SH_SERIAL_FLAG_RDF; if (s->scr & (1 << 6) && s->rxi) { @@ -330,7 +348,7 @@ static void sh_serial_timeout_int(void *opaque) static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; if (s->feat & SH_SERIAL_FEAT_SCIF) { int i; @@ -344,11 +362,11 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) if (s->rx_cnt >= s->rtrg) { s->flags |= SH_SERIAL_FLAG_RDF; if (s->scr & (1 << 6) && s->rxi) { - timer_del(s->fifo_timeout_timer); + timer_del(&s->fifo_timeout_timer); qemu_set_irq(s->rxi, 1); } } else { - timer_mod(s->fifo_timeout_timer, + timer_mod(&s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); } } @@ -360,9 +378,10 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) static void sh_serial_event(void *opaque, QEMUChrEvent event) { - sh_serial_state *s = opaque; - if (event == CHR_EVENT_BREAK) + SHSerialState *s = opaque; + if (event == CHR_EVENT_BREAK) { sh_serial_receive_break(s); + } } static const MemoryRegionOps sh_serial_ops = { @@ -371,20 +390,10 @@ static const MemoryRegionOps sh_serial_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void sh_serial_init(MemoryRegion *sysmem, - hwaddr base, int feat, - uint32_t freq, Chardev *chr, - qemu_irq eri_source, - qemu_irq rxi_source, - qemu_irq txi_source, - qemu_irq tei_source, - qemu_irq bri_source) +static void sh_serial_reset(DeviceState *dev) { - sh_serial_state *s; + SHSerialState *s = SH_SERIAL(dev); - s = g_malloc0(sizeof(sh_serial_state)); - - s->feat = feat; s->flags = SH_SERIAL_FLAG_TEND | SH_SERIAL_FLAG_TDE; s->rtrg = 1; @@ -393,39 +402,64 @@ void sh_serial_init(MemoryRegion *sysmem, s->scr = 1 << 5; /* pretend that TX is enabled so early printk works */ s->sptr = 0; - if (feat & SH_SERIAL_FEAT_SCIF) { + if (s->feat & SH_SERIAL_FEAT_SCIF) { s->fcr = 0; - } - else { + } else { s->dr = 0xff; } sh_serial_clear_fifo(s); +} - memory_region_init_io(&s->iomem, NULL, &sh_serial_ops, s, - "serial", 0x100000000ULL); +static void sh_serial_realize(DeviceState *d, Error **errp) +{ + SHSerialState *s = SH_SERIAL(d); + MemoryRegion *iomem = g_malloc(sizeof(*iomem)); - memory_region_init_alias(&s->iomem_p4, NULL, "serial-p4", &s->iomem, - 0, 0x28); - memory_region_add_subregion(sysmem, P4ADDR(base), &s->iomem_p4); + assert(d->id); + memory_region_init_io(iomem, OBJECT(d), &sh_serial_ops, s, d->id, 0x28); + sysbus_init_mmio(SYS_BUS_DEVICE(d), iomem); + qdev_init_gpio_out_named(d, &s->eri, "eri", 1); + qdev_init_gpio_out_named(d, &s->rxi, "rxi", 1); + qdev_init_gpio_out_named(d, &s->txi, "txi", 1); + qdev_init_gpio_out_named(d, &s->tei, "tei", 1); + qdev_init_gpio_out_named(d, &s->bri, "bri", 1); - memory_region_init_alias(&s->iomem_a7, NULL, "serial-a7", &s->iomem, - 0, 0x28); - memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7); - - if (chr) { - qemu_chr_fe_init(&s->chr, chr, &error_abort); + if (qemu_chr_fe_backend_connected(&s->chr)) { qemu_chr_fe_set_handlers(&s->chr, sh_serial_can_receive1, sh_serial_receive1, sh_serial_event, NULL, s, NULL, true); } - s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - sh_serial_timeout_int, s); + timer_init_ns(&s->fifo_timeout_timer, QEMU_CLOCK_VIRTUAL, + sh_serial_timeout_int, s); s->etu = NANOSECONDS_PER_SECOND / 9600; - s->eri = eri_source; - s->rxi = rxi_source; - s->txi = txi_source; - s->tei = tei_source; - s->bri = bri_source; +} + +static void sh_serial_finalize(Object *obj) +{ + SHSerialState *s = SH_SERIAL(obj); + + timer_del(&s->fifo_timeout_timer); +} + +static void sh_serial_init(Object *obj) +{ +} + +static Property sh_serial_properties[] = { + DEFINE_PROP_CHR("chardev", SHSerialState, chr), + DEFINE_PROP_UINT8("features", SHSerialState, feat, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void sh_serial_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, sh_serial_properties); + dc->realize = sh_serial_realize; + dc->reset = sh_serial_reset; + /* Reason: part of SuperH CPU/SoC, needs to be wired up */ + dc->user_creatable = false; } diff --git a/hw/char/shakti_uart.c b/hw/char/shakti_uart.c index 6870821325..98b142c7df 100644 --- a/hw/char/shakti_uart.c +++ b/hw/char/shakti_uart.c @@ -168,6 +168,7 @@ static void shakti_uart_class_init(ObjectClass *klass, void *data) dc->reset = shakti_uart_reset; dc->realize = shakti_uart_realize; device_class_set_props(dc, shakti_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo shakti_uart_info = { diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 278e21c434..1c75f792b3 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -248,6 +248,7 @@ static void sifive_uart_class_init(ObjectClass *oc, void *data) rc->phases.enter = sifive_uart_reset_enter; rc->phases.hold = sifive_uart_reset_hold; device_class_set_props(dc, sifive_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo sifive_uart_info = { diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c index 8df0832424..fde67f4f03 100644 --- a/hw/char/stm32f2xx_usart.c +++ b/hw/char/stm32f2xx_usart.c @@ -103,10 +103,11 @@ static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr, return retvalue; case USART_DR: DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr); + retvalue = s->usart_dr & 0x3FF; s->usart_sr &= ~USART_SR_RXNE; qemu_chr_fe_accept_input(&s->chr); qemu_set_irq(s->irq, 0); - return s->usart_dr & 0x3FF; + return retvalue; case USART_BRR: return s->usart_brr; case USART_CR1: diff --git a/hw/char/trace-events b/hw/char/trace-events index 1436fb462d..2ecb36232e 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -36,6 +36,8 @@ grlib_apbuart_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" va grlib_apbuart_readl_unknown(uint64_t addr) "addr 0x%"PRIx64 # escc.c +escc_hard_reset(void) "hard reset" +escc_soft_reset_chn(char channel) "soft reset channel %c" escc_put_queue(char channel, int b) "channel %c put: 0x%02x" escc_get_queue(char channel, int val) "channel %c get 0x%02x" escc_update_irq(int irq) "IRQ = %d" @@ -99,3 +101,7 @@ exynos_uart_rx_timeout(uint32_t channel, uint32_t stat, uint32_t intsp) "UART%d: # cadence_uart.c cadence_uart_baudrate(unsigned baudrate) "baudrate %u" + +# sh_serial.c +sh_serial_read(char *id, unsigned size, uint64_t offs, uint64_t val) " %s size %d offs 0x%02" PRIx64 " -> 0x%02" PRIx64 +sh_serial_write(char *id, unsigned size, uint64_t offs, uint64_t val) "%s size %d offs 0x%02" PRIx64 " <- 0x%02" PRIx64 diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index dd6bc27b3b..7d4601cb5d 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -1044,21 +1044,18 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp) VIRTIO_CONSOLE_F_EMERG_WRITE)) { config_size = offsetof(struct virtio_console_config, emerg_wr); } - virtio_init(vdev, "virtio-serial", VIRTIO_ID_CONSOLE, - config_size); + virtio_init(vdev, VIRTIO_ID_CONSOLE, config_size); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ - qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS, - dev, vdev->bus_name); + qbus_init(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS, + dev, vdev->bus_name); qbus_set_hotplug_handler(BUS(&vser->bus), OBJECT(vser)); vser->bus.vser = vser; QTAILQ_INIT(&vser->ports); vser->bus.max_nr_ports = vser->serial.max_virtserial_ports; - vser->ivqs = g_malloc(vser->serial.max_virtserial_ports - * sizeof(VirtQueue *)); - vser->ovqs = g_malloc(vser->serial.max_virtserial_ports - * sizeof(VirtQueue *)); + vser->ivqs = g_new(VirtQueue *, vser->serial.max_virtserial_ports); + vser->ovqs = g_new(VirtQueue *, vser->serial.max_virtserial_ports); /* Add a queue for host to guest transfers for port 0 (backward compat) */ vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input); diff --git a/hw/core/bus.c b/hw/core/bus.c index 9cfbc3a687..c7831b5293 100644 --- a/hw/core/bus.c +++ b/hw/core/bus.c @@ -99,7 +99,8 @@ static void bus_reset_child_foreach(Object *obj, ResettableChildCallback cb, } } -static void qbus_init(BusState *bus, DeviceState *parent, const char *name) +static void qbus_init_internal(BusState *bus, DeviceState *parent, + const char *name) { const char *typename = object_get_typename(OBJECT(bus)); BusClass *bc; @@ -151,19 +152,19 @@ static void bus_unparent(Object *obj) bus->parent = NULL; } -void qbus_create_inplace(void *bus, size_t size, const char *typename, - DeviceState *parent, const char *name) +void qbus_init(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name) { object_initialize(bus, size, typename); - qbus_init(bus, parent, name); + qbus_init_internal(bus, parent, name); } -BusState *qbus_create(const char *typename, DeviceState *parent, const char *name) +BusState *qbus_new(const char *typename, DeviceState *parent, const char *name) { BusState *bus; bus = BUS(object_new(typename)); - qbus_init(bus, parent, name); + qbus_init_internal(bus, parent, name); return bus; } diff --git a/hw/core/clock-vmstate.c b/hw/core/clock-vmstate.c index 260b13fc2c..7eccb6d4ea 100644 --- a/hw/core/clock-vmstate.c +++ b/hw/core/clock-vmstate.c @@ -14,12 +14,51 @@ #include "migration/vmstate.h" #include "hw/clock.h" +static bool muldiv_needed(void *opaque) +{ + Clock *clk = opaque; + + return clk->multiplier != 1 || clk->divider != 1; +} + +static int clock_pre_load(void *opaque) +{ + Clock *clk = opaque; + /* + * The initial out-of-reset settings of the Clock might have been + * configured by the device to be different from what we set + * in clock_initfn(), so we must here set the default values to + * be used if they are not in the inbound migration state. + */ + clk->multiplier = 1; + clk->divider = 1; + + return 0; +} + +const VMStateDescription vmstate_muldiv = { + .name = "clock/muldiv", + .version_id = 1, + .minimum_version_id = 1, + .needed = muldiv_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(multiplier, Clock), + VMSTATE_UINT32(divider, Clock), + VMSTATE_END_OF_LIST() + }, +}; + const VMStateDescription vmstate_clock = { .name = "clock", .version_id = 0, .minimum_version_id = 0, + .pre_load = clock_pre_load, .fields = (VMStateField[]) { VMSTATE_UINT64(period, Clock), VMSTATE_END_OF_LIST() - } + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_muldiv, + NULL + }, }; diff --git a/hw/core/clock.c b/hw/core/clock.c index fc5a99683f..d82e44cd1a 100644 --- a/hw/core/clock.c +++ b/hw/core/clock.c @@ -64,6 +64,15 @@ bool clock_set(Clock *clk, uint64_t period) return true; } +static uint64_t clock_get_child_period(Clock *clk) +{ + /* + * Return the period to be used for child clocks, which is the parent + * clock period adjusted for multiplier and divider effects. + */ + return muldiv64(clk->period, clk->multiplier, clk->divider); +} + static void clock_call_callback(Clock *clk, ClockEvent event) { /* @@ -78,15 +87,16 @@ static void clock_call_callback(Clock *clk, ClockEvent event) static void clock_propagate_period(Clock *clk, bool call_callbacks) { Clock *child; + uint64_t child_period = clock_get_child_period(clk); QLIST_FOREACH(child, &clk->children, sibling) { - if (child->period != clk->period) { + if (child->period != child_period) { if (call_callbacks) { clock_call_callback(child, ClockPreUpdate); } - child->period = clk->period; + child->period = child_period; trace_clock_update(CLOCK_PATH(child), CLOCK_PATH(clk), - CLOCK_PERIOD_TO_HZ(clk->period), + CLOCK_PERIOD_TO_HZ(child->period), call_callbacks); if (call_callbacks) { clock_call_callback(child, ClockUpdate); @@ -110,7 +120,7 @@ void clock_set_source(Clock *clk, Clock *src) trace_clock_set_source(CLOCK_PATH(clk), CLOCK_PATH(src)); - clk->period = src->period; + clk->period = clock_get_child_period(src); QLIST_INSERT_HEAD(&src->children, clk, sibling); clk->source = src; clock_propagate_period(clk, false); @@ -133,10 +143,23 @@ char *clock_display_freq(Clock *clk) return freq_to_str(clock_get_hz(clk)); } +void clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider) +{ + assert(divider != 0); + + trace_clock_set_mul_div(CLOCK_PATH(clk), clk->multiplier, multiplier, + clk->divider, divider); + clk->multiplier = multiplier; + clk->divider = divider; +} + static void clock_initfn(Object *obj) { Clock *clk = CLOCK(obj); + clk->multiplier = 1; + clk->divider = 1; + QLIST_INIT(&clk->children); } diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index b05d1db7df..b559a6606a 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -137,8 +137,7 @@ static void cpu_common_reset(DeviceState *dev) cpu->cflags_next_tb = -1; if (tcg_enabled()) { - cpu_tb_jmp_cache_clear(cpu); - + tcg_flush_jmp_cache(cpu); tcg_flush_softmmu_tlb(cpu); } } @@ -258,21 +257,6 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu) return cpu->cpu_index; } -static Property cpu_common_props[] = { -#ifndef CONFIG_USER_ONLY - /* Create a memory property for softmmu CPU object, - * so users can wire up its memory. (This can't go in hw/core/cpu.c - * because that file is compiled only once for both user-mode - * and system builds.) The default if no link is set up is to use - * the system address space. - */ - DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, - MemoryRegion *), -#endif - DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), - DEFINE_PROP_END_OF_LIST(), -}; - static void cpu_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -287,7 +271,7 @@ static void cpu_class_init(ObjectClass *klass, void *data) dc->realize = cpu_common_realizefn; dc->unrealize = cpu_common_unrealizefn; dc->reset = cpu_common_reset; - device_class_set_props(dc, cpu_common_props); + cpu_class_init_props(dc); /* * Reason: CPUs still need special care by board code: wiring up * IRQs, adding reset handlers, halting non-first CPUs, ... diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c index 00253f8929..5eaf2e79e6 100644 --- a/hw/core/cpu-sysemu.c +++ b/hw/core/cpu-sysemu.c @@ -69,11 +69,10 @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) { - CPUClass *cc = CPU_GET_CLASS(cpu); int ret = 0; - if (cc->sysemu_ops->asidx_from_attrs) { - ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs); + if (cpu->cc->sysemu_ops->asidx_from_attrs) { + ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs); assert(ret < cpu->num_ases && ret >= 0); } return ret; diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index d14f932eea..4f4d77908d 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -56,8 +56,9 @@ static void generic_loader_reset(void *opaque) } if (s->data_len) { - assert(s->data_len < sizeof(s->data)); - dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len); + assert(s->data_len <= sizeof(s->data)); + dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len, + MEMTXATTRS_UNSPECIFIED); } } @@ -66,7 +67,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) GenericLoaderState *s = GENERIC_LOADER(dev); hwaddr entry; int big_endian; - int size = 0; + ssize_t size = 0; s->set_pc = false; @@ -206,7 +207,7 @@ static void generic_loader_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); } -static TypeInfo generic_loader_info = { +static const TypeInfo generic_loader_info = { .name = TYPE_GENERIC_LOADER, .parent = TYPE_DEVICE, .instance_size = sizeof(GenericLoaderState), diff --git a/hw/core/gpio.c b/hw/core/gpio.c new file mode 100644 index 0000000000..80d07a6ec9 --- /dev/null +++ b/hw/core/gpio.c @@ -0,0 +1,198 @@ +/* + * qdev GPIO helpers + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/irq.h" +#include "qapi/error.h" + +static NamedGPIOList *qdev_get_named_gpio_list(DeviceState *dev, + const char *name) +{ + NamedGPIOList *ngl; + + QLIST_FOREACH(ngl, &dev->gpios, node) { + /* NULL is a valid and matchable name. */ + if (g_strcmp0(name, ngl->name) == 0) { + return ngl; + } + } + + ngl = g_malloc0(sizeof(*ngl)); + ngl->name = g_strdup(name); + QLIST_INSERT_HEAD(&dev->gpios, ngl, node); + return ngl; +} + +void qdev_init_gpio_in_named_with_opaque(DeviceState *dev, + qemu_irq_handler handler, + void *opaque, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_out == 0 || !name); + gpio_list->in = qemu_extend_irqs(gpio_list->in, gpio_list->num_in, handler, + opaque, n); + + if (!name) { + name = "unnamed-gpio-in"; + } + for (i = gpio_list->num_in; i < gpio_list->num_in + n; i++) { + gchar *propname = g_strdup_printf("%s[%u]", name, i); + + object_property_add_child(OBJECT(dev), propname, + OBJECT(gpio_list->in[i])); + g_free(propname); + } + + gpio_list->num_in += n; +} + +void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n) +{ + qdev_init_gpio_in_named(dev, handler, NULL, n); +} + +void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_in == 0 || !name); + + if (!name) { + name = "unnamed-gpio-out"; + } + memset(pins, 0, sizeof(*pins) * n); + for (i = 0; i < n; ++i) { + gchar *propname = g_strdup_printf("%s[%u]", name, + gpio_list->num_out + i); + + object_property_add_link(OBJECT(dev), propname, TYPE_IRQ, + (Object **)&pins[i], + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + g_free(propname); + } + gpio_list->num_out += n; +} + +void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n) +{ + qdev_init_gpio_out_named(dev, pins, NULL, n); +} + +qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n) +{ + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(n >= 0 && n < gpio_list->num_in); + return gpio_list->in[n]; +} + +qemu_irq qdev_get_gpio_in(DeviceState *dev, int n) +{ + return qdev_get_gpio_in_named(dev, NULL, n); +} + +void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, + qemu_irq input_pin) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + if (input_pin && !OBJECT(input_pin)->parent) { + /* We need a name for object_property_set_link to work */ + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + "non-qdev-gpio[*]", OBJECT(input_pin)); + } + object_property_set_link(OBJECT(dev), propname, + OBJECT(input_pin), &error_abort); + g_free(propname); +} + +qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n) +{ + g_autofree char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + + return ret; +} + +/* disconnect a GPIO output, returning the disconnected input (if any) */ + +static qemu_irq qdev_disconnect_gpio_out_named(DeviceState *dev, + const char *name, int n) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + if (ret) { + object_property_set_link(OBJECT(dev), propname, NULL, NULL); + } + g_free(propname); + return ret; +} + +qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt, + const char *name, int n) +{ + qemu_irq disconnected = qdev_disconnect_gpio_out_named(dev, name, n); + qdev_connect_gpio_out_named(dev, name, n, icpt); + return disconnected; +} + +void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq input_pin) +{ + qdev_connect_gpio_out_named(dev, NULL, n, input_pin); +} + +void qdev_pass_gpios(DeviceState *dev, DeviceState *container, + const char *name) +{ + int i; + NamedGPIOList *ngl = qdev_get_named_gpio_list(dev, name); + + for (i = 0; i < ngl->num_in; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-in"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + for (i = 0; i < ngl->num_out; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-out"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + QLIST_REMOVE(ngl, node); + QLIST_INSERT_HEAD(&container->gpios, ngl, node); +} diff --git a/hw/core/guest-loader.c b/hw/core/guest-loader.c index d3f9d1a06e..391c875a29 100644 --- a/hw/core/guest-loader.c +++ b/hw/core/guest-loader.c @@ -129,7 +129,7 @@ static void guest_loader_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); } -static TypeInfo guest_loader_info = { +static const TypeInfo guest_loader_info = { .name = TYPE_GUEST_LOADER, .parent = TYPE_DEVICE, .instance_size = sizeof(GuestLoaderState), diff --git a/hw/core/hotplug-stubs.c b/hw/core/hotplug-stubs.c new file mode 100644 index 0000000000..7aadaa29bd --- /dev/null +++ b/hw/core/hotplug-stubs.c @@ -0,0 +1,34 @@ +/* + * Hotplug handler stubs + * + * Copyright (c) Red Hat + * + * Authors: + * Philippe Mathieu-Daudé , + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/qdev-core.h" + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + return NULL; +} + +void hotplug_handler_pre_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} + +void hotplug_handler_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} diff --git a/hw/core/irq.c b/hw/core/irq.c index 8a9cbdd556..3623f711fe 100644 --- a/hw/core/irq.c +++ b/hw/core/irq.c @@ -106,21 +106,6 @@ qemu_irq qemu_irq_invert(qemu_irq irq) return qemu_allocate_irq(qemu_notirq, irq, 0); } -static void qemu_splitirq(void *opaque, int line, int level) -{ - struct IRQState **irq = opaque; - irq[0]->handler(irq[0]->opaque, irq[0]->n, level); - irq[1]->handler(irq[1]->opaque, irq[1]->n, level); -} - -qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2) -{ - qemu_irq *s = g_malloc0(2 * sizeof(qemu_irq)); - s[0] = irq1; - s[1] = irq2; - return qemu_allocate_irq(qemu_splitirq, s, 0); -} - void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n) { int i; diff --git a/hw/core/loader.c b/hw/core/loader.c index 12a316ecae..c142af70ad 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -43,9 +43,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "trace.h" #include "hw/hw.h" #include "disas/disas.h" @@ -113,17 +114,17 @@ ssize_t read_targphys(const char *name, return did; } -int load_image_targphys(const char *filename, - hwaddr addr, uint64_t max_sz) +ssize_t load_image_targphys(const char *filename, + hwaddr addr, uint64_t max_sz) { return load_image_targphys_as(filename, addr, max_sz, NULL); } /* return the size or -1 if error */ -int load_image_targphys_as(const char *filename, - hwaddr addr, uint64_t max_sz, AddressSpace *as) +ssize_t load_image_targphys_as(const char *filename, + hwaddr addr, uint64_t max_sz, AddressSpace *as) { - int size; + ssize_t size; size = get_image_size(filename); if (size < 0 || size > max_sz) { @@ -137,9 +138,9 @@ int load_image_targphys_as(const char *filename, return size; } -int load_image_mr(const char *filename, MemoryRegion *mr) +ssize_t load_image_mr(const char *filename, MemoryRegion *mr) { - int size; + ssize_t size; if (!memory_access_is_direct(mr, false)) { /* Can only load an image into RAM or ROM */ @@ -221,8 +222,8 @@ static void bswap_ahdr(struct exec *e) : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x, target_page_size), target_page_size))) -int load_aout(const char *filename, hwaddr addr, int max_sz, - int bswap_needed, hwaddr target_page_size) +ssize_t load_aout(const char *filename, hwaddr addr, int max_sz, + int bswap_needed, hwaddr target_page_size) { int fd; ssize_t size, ret; @@ -326,7 +327,7 @@ static void *load_at(int fd, off_t offset, size_t size) #define SZ 64 #include "hw/elf_ops.h" -const char *load_elf_strerror(int error) +const char *load_elf_strerror(ssize_t error) { switch (error) { case 0: @@ -402,12 +403,12 @@ fail: } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab) +ssize_t load_elf(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab) { return load_elf_as(filename, elf_note_fn, translate_fn, translate_opaque, pentry, lowaddr, highaddr, pflags, big_endian, @@ -415,12 +416,13 @@ int load_elf(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_as(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, AddressSpace *as) +ssize_t load_elf_as(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab, + AddressSpace *as) { return load_elf_ram(filename, elf_note_fn, translate_fn, translate_opaque, pentry, lowaddr, highaddr, pflags, big_endian, @@ -428,13 +430,13 @@ int load_elf_as(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom) +ssize_t load_elf_ram(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, + int big_endian, int elf_machine, int clear_lsb, + int data_swab, AddressSpace *as, bool load_rom) { return load_elf_ram_sym(filename, elf_note_fn, translate_fn, translate_opaque, @@ -444,16 +446,17 @@ int load_elf_ram(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_ram_sym(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) +ssize_t load_elf_ram_sym(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int big_endian, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) { - int fd, data_order, target_data_order, must_swab, ret = ELF_LOAD_FAILED; + int fd, data_order, target_data_order, must_swab; + ssize_t ret = ELF_LOAD_FAILED; uint8_t e_ident[EI_NIDENT]; fd = qemu_open(filename, O_RDONLY | O_BINARY, NULL); @@ -470,7 +473,7 @@ int load_elf_ram_sym(const char *filename, ret = ELF_LOAD_NOT_ELF; goto fail; } -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN data_order = ELFDATA2MSB; #else data_order = ELFDATA2LSB; @@ -507,7 +510,7 @@ int load_elf_ram_sym(const char *filename, static void bswap_uboot_header(uboot_image_header_t *hdr) { -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN bswap32s(&hdr->ih_magic); bswap32s(&hdr->ih_hcrc); bswap32s(&hdr->ih_time); @@ -555,24 +558,35 @@ ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) /* skip header */ i = 10; + if (srclen < 4) { + goto toosmall; + } flags = src[3]; if (src[2] != DEFLATED || (flags & RESERVED) != 0) { puts ("Error: Bad gzipped data\n"); return -1; } - if ((flags & EXTRA_FIELD) != 0) + if ((flags & EXTRA_FIELD) != 0) { + if (srclen < 12) { + goto toosmall; + } i = 12 + src[10] + (src[11] << 8); - if ((flags & ORIG_NAME) != 0) - while (src[i++] != 0) - ; - if ((flags & COMMENT) != 0) - while (src[i++] != 0) - ; - if ((flags & HEAD_CRC) != 0) + } + if ((flags & ORIG_NAME) != 0) { + while (i < srclen && src[i++] != 0) { + /* do nothing */ + } + } + if ((flags & COMMENT) != 0) { + while (i < srclen && src[i++] != 0) { + /* do nothing */ + } + } + if ((flags & HEAD_CRC) != 0) { i += 2; + } if (i >= srclen) { - puts ("Error: gunzip out of data in header\n"); - return -1; + goto toosmall; } s.zalloc = zalloc; @@ -596,16 +610,21 @@ ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) inflateEnd(&s); return dstbytes; + +toosmall: + puts("Error: gunzip out of data in header\n"); + return -1; } /* Load a U-Boot image. */ -static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr, - int *is_linux, uint8_t image_type, - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, AddressSpace *as) +static ssize_t load_uboot_image(const char *filename, hwaddr *ep, + hwaddr *loadaddr, int *is_linux, + uint8_t image_type, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, AddressSpace *as) { int fd; - int size; + ssize_t size; hwaddr address; uboot_image_header_t h; uboot_image_header_t *hdr = &h; @@ -678,6 +697,21 @@ static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr, if (is_linux) { if (hdr->ih_os == IH_OS_LINUX) { *is_linux = 1; + } else if (hdr->ih_os == IH_OS_VXWORKS) { + /* + * VxWorks 7 uses the same boot interface as the Linux kernel + * on Arm (64-bit only), PowerPC and RISC-V architectures. + */ + switch (hdr->ih_arch) { + case IH_ARCH_ARM64: + case IH_ARCH_PPC: + case IH_ARCH_RISCV: + *is_linux = 1; + break; + default: + *is_linux = 0; + break; + } } else { *is_linux = 0; } @@ -727,40 +761,40 @@ out: return ret; } -int load_uimage(const char *filename, hwaddr *ep, hwaddr *loadaddr, - int *is_linux, - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque) +ssize_t load_uimage(const char *filename, hwaddr *ep, hwaddr *loadaddr, + int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque) { return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL, translate_fn, translate_opaque, NULL); } -int load_uimage_as(const char *filename, hwaddr *ep, hwaddr *loadaddr, - int *is_linux, - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, AddressSpace *as) +ssize_t load_uimage_as(const char *filename, hwaddr *ep, hwaddr *loadaddr, + int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, AddressSpace *as) { return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL, translate_fn, translate_opaque, as); } /* Load a ramdisk. */ -int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz) +ssize_t load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz) { return load_ramdisk_as(filename, addr, max_sz, NULL); } -int load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz, - AddressSpace *as) +ssize_t load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz, + AddressSpace *as) { return load_uboot_image(filename, NULL, &addr, NULL, IH_TYPE_RAMDISK, NULL, NULL, as); } /* Load a gzip-compressed kernel to a dynamically allocated buffer. */ -int load_image_gzipped_buffer(const char *filename, uint64_t max_sz, - uint8_t **buffer) +ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz, + uint8_t **buffer) { uint8_t *compressed_data = NULL; uint8_t *data = NULL; @@ -805,9 +839,9 @@ int load_image_gzipped_buffer(const char *filename, uint64_t max_sz, } /* Load a gzip-compressed kernel. */ -int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz) +ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz) { - int bytes; + ssize_t bytes; uint8_t *data; bytes = load_image_gzipped_buffer(filename, max_sz, &data); @@ -937,14 +971,15 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro) return data; } -int rom_add_file(const char *file, const char *fw_dir, - hwaddr addr, int32_t bootindex, - bool option_rom, MemoryRegion *mr, - AddressSpace *as) +ssize_t rom_add_file(const char *file, const char *fw_dir, + hwaddr addr, int32_t bootindex, + bool option_rom, MemoryRegion *mr, + AddressSpace *as) { MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); Rom *rom; - int rc, fd = -1; + ssize_t rc; + int fd = -1; char devpath[100]; if (as && mr) { @@ -986,7 +1021,7 @@ int rom_add_file(const char *file, const char *fw_dir, lseek(fd, 0, SEEK_SET); rc = read(fd, rom->data, rom->datasize); if (rc != rom->datasize) { - fprintf(stderr, "rom: file %-20s: read error: rc=%d (expected %zd)\n", + fprintf(stderr, "rom: file %-20s: read error: rc=%zd (expected %zd)\n", rom->name, rc, rom->datasize); goto err; } @@ -1105,12 +1140,12 @@ int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data, return 0; } -int rom_add_vga(const char *file) +ssize_t rom_add_vga(const char *file) { return rom_add_file(file, "vgaroms", 0, -1, true, NULL, NULL); } -int rom_add_option(const char *file, int32_t bootindex) +ssize_t rom_add_option(const char *file, int32_t bootindex) { return rom_add_file(file, "genroms", 0, bootindex, true, NULL, NULL); } @@ -1145,9 +1180,13 @@ static void rom_reset(void *unused) if (rom->mr) { void *host = memory_region_get_ram_ptr(rom->mr); memcpy(host, rom->data, rom->datasize); + memset(host + rom->datasize, 0, rom->romsize - rom->datasize); } else { address_space_write_rom(rom->as, rom->addr, MEMTXATTRS_UNSPECIFIED, rom->data, rom->datasize); + address_space_set(rom->as, rom->addr + rom->datasize, 0, + rom->romsize - rom->datasize, + MEMTXATTRS_UNSPECIFIED); } if (rom->isrom) { /* rom needs to be written only once */ @@ -1310,6 +1349,92 @@ static Rom *find_rom(hwaddr addr, size_t size) return NULL; } +typedef struct RomSec { + hwaddr base; + int se; /* start/end flag */ +} RomSec; + + +/* + * Sort into address order. We break ties between rom-startpoints + * and rom-endpoints in favour of the startpoint, by sorting the 0->1 + * transition before the 1->0 transition. Either way round would + * work, but this way saves a little work later by avoiding + * dealing with "gaps" of 0 length. + */ +static gint sort_secs(gconstpointer a, gconstpointer b) +{ + RomSec *ra = (RomSec *) a; + RomSec *rb = (RomSec *) b; + + if (ra->base == rb->base) { + return ra->se - rb->se; + } + return ra->base > rb->base ? 1 : -1; +} + +static GList *add_romsec_to_list(GList *secs, hwaddr base, int se) +{ + RomSec *cand = g_new(RomSec, 1); + cand->base = base; + cand->se = se; + return g_list_prepend(secs, cand); +} + +RomGap rom_find_largest_gap_between(hwaddr base, size_t size) +{ + Rom *rom; + RomSec *cand; + RomGap res = {0, 0}; + hwaddr gapstart = base; + GList *it, *secs = NULL; + int count = 0; + + QTAILQ_FOREACH(rom, &roms, next) { + /* Ignore blobs being loaded to special places */ + if (rom->mr || rom->fw_file) { + continue; + } + /* ignore anything finishing bellow base */ + if (rom->addr + rom->romsize <= base) { + continue; + } + /* ignore anything starting above the region */ + if (rom->addr >= base + size) { + continue; + } + + /* Save the start and end of each relevant ROM */ + secs = add_romsec_to_list(secs, rom->addr, 1); + + if (rom->addr + rom->romsize < base + size) { + secs = add_romsec_to_list(secs, rom->addr + rom->romsize, -1); + } + } + + /* sentinel */ + secs = add_romsec_to_list(secs, base + size, 1); + + secs = g_list_sort(secs, sort_secs); + + for (it = g_list_first(secs); it; it = g_list_next(it)) { + cand = (RomSec *) it->data; + if (count == 0 && count + cand->se == 1) { + size_t gap = cand->base - gapstart; + if (gap > res.size) { + res.base = gapstart; + res.size = gap; + } + } else if (count == 1 && count + cand->se == 0) { + gapstart = cand->base; + } + count += cand->se; + } + + g_list_free_full(secs, g_free); + return res; +} + /* * Copies memory from registered ROMs to dest. Any memory that is contained in * a ROM between addr and addr + size is copied. Note that this can involve @@ -1457,32 +1582,35 @@ void *rom_ptr_for_as(AddressSpace *as, hwaddr addr, size_t size) return cbdata.rom; } -void hmp_info_roms(Monitor *mon, const QDict *qdict) +HumanReadableText *qmp_x_query_roms(Error **errp) { Rom *rom; + g_autoptr(GString) buf = g_string_new(""); QTAILQ_FOREACH(rom, &roms, next) { if (rom->mr) { - monitor_printf(mon, "%s" - " size=0x%06zx name=\"%s\"\n", - memory_region_name(rom->mr), - rom->romsize, - rom->name); + g_string_append_printf(buf, "%s" + " size=0x%06zx name=\"%s\"\n", + memory_region_name(rom->mr), + rom->romsize, + rom->name); } else if (!rom->fw_file) { - monitor_printf(mon, "addr=" TARGET_FMT_plx - " size=0x%06zx mem=%s name=\"%s\"\n", - rom->addr, rom->romsize, - rom->isrom ? "rom" : "ram", - rom->name); + g_string_append_printf(buf, "addr=" TARGET_FMT_plx + " size=0x%06zx mem=%s name=\"%s\"\n", + rom->addr, rom->romsize, + rom->isrom ? "rom" : "ram", + rom->name); } else { - monitor_printf(mon, "fw=%s/%s" - " size=0x%06zx name=\"%s\"\n", - rom->fw_dir, - rom->fw_file, - rom->romsize, - rom->name); + g_string_append_printf(buf, "fw=%s/%s" + " size=0x%06zx name=\"%s\"\n", + rom->fw_dir, + rom->fw_file, + rom->romsize, + rom->name); } } + + return human_readable_text_from_str(buf); } typedef enum HexRecord HexRecord; @@ -1720,11 +1848,12 @@ out: } /* return size or -1 if error */ -int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as) +ssize_t load_targphys_hex_as(const char *filename, hwaddr *entry, + AddressSpace *as) { gsize hex_blob_size; gchar *hex_blob; - int total_size = 0; + ssize_t total_size = 0; if (!g_file_get_contents(filename, &hex_blob, &hex_blob_size, NULL)) { return -1; diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 76b22b00d6..5cb5eecbfc 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -53,8 +53,7 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) HotpluggableCPUList *saved = l; CpuInstanceProperties *c; - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -78,6 +77,10 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) if (c->has_die_id) { monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id); } + if (c->has_cluster_id) { + monitor_printf(mon, " cluster-id: \"%" PRIu64 "\"\n", + c->cluster_id); + } if (c->has_core_id) { monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id); } @@ -131,38 +134,3 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict) qapi_free_MemdevList(memdev_list); hmp_handle_error(mon, err); } - -void hmp_info_numa(Monitor *mon, const QDict *qdict) -{ - int i, nb_numa_nodes; - NumaNodeMem *node_mem; - CpuInfoFastList *cpu_list, *cpu; - MachineState *ms = MACHINE(qdev_get_machine()); - - nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; - monitor_printf(mon, "%d nodes\n", nb_numa_nodes); - if (!nb_numa_nodes) { - return; - } - - cpu_list = qmp_query_cpus_fast(&error_abort); - node_mem = g_new0(NumaNodeMem, nb_numa_nodes); - - query_numa_node_mem(node_mem, ms); - for (i = 0; i < nb_numa_nodes; i++) { - monitor_printf(mon, "node %d cpus:", i); - for (cpu = cpu_list; cpu; cpu = cpu->next) { - if (cpu->value->has_props && cpu->value->props->has_node_id && - cpu->value->props->node_id == i) { - monitor_printf(mon, " %" PRIi64, cpu->value->cpu_index); - } - } - monitor_printf(mon, "\n"); - monitor_printf(mon, "node %d size: %" PRId64 " MB\n", i, - node_mem[i].node_mem >> 20); - monitor_printf(mon, "node %d plugged: %" PRId64 " MB\n", i, - node_mem[i].node_plugged_mem >> 20); - } - qapi_free_CpuInfoFastList(cpu_list); - g_free(node_mem); -} diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 216fdfaf3a..4f4ab30f8c 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -15,6 +15,7 @@ #include "qapi/qmp/qerror.h" #include "qapi/qmp/qobject.h" #include "qapi/qobject-input-visitor.h" +#include "qapi/type-helpers.h" #include "qemu/main-loop.h" #include "qom/qom-qobject.h" #include "sysemu/hostmem.h" @@ -204,3 +205,42 @@ MemdevList *qmp_query_memdev(Error **errp) object_child_foreach(obj, query_memdev, &list); return list; } + +HumanReadableText *qmp_x_query_numa(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + int i, nb_numa_nodes; + NumaNodeMem *node_mem; + CpuInfoFastList *cpu_list, *cpu; + MachineState *ms = MACHINE(qdev_get_machine()); + + nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; + g_string_append_printf(buf, "%d nodes\n", nb_numa_nodes); + if (!nb_numa_nodes) { + goto done; + } + + cpu_list = qmp_query_cpus_fast(&error_abort); + node_mem = g_new0(NumaNodeMem, nb_numa_nodes); + + query_numa_node_mem(node_mem, ms); + for (i = 0; i < nb_numa_nodes; i++) { + g_string_append_printf(buf, "node %d cpus:", i); + for (cpu = cpu_list; cpu; cpu = cpu->next) { + if (cpu->value->has_props && cpu->value->props->has_node_id && + cpu->value->props->node_id == i) { + g_string_append_printf(buf, " %" PRIi64, cpu->value->cpu_index); + } + } + g_string_append_printf(buf, "\n"); + g_string_append_printf(buf, "node %d size: %" PRId64 " MB\n", i, + node_mem[i].node_mem >> 20); + g_string_append_printf(buf, "node %d plugged: %" PRId64 " MB\n", i, + node_mem[i].node_plugged_mem >> 20); + } + qapi_free_CpuInfoFastList(cpu_list); + g_free(node_mem); + + done: + return human_readable_text_from_str(buf); +} diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c new file mode 100644 index 0000000000..b39ed21e65 --- /dev/null +++ b/hw/core/machine-smp.c @@ -0,0 +1,195 @@ +/* + * QEMU Machine core (related to -smp parsing) + * + * Copyright (c) 2021 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "qapi/error.h" + +/* + * Report information of a machine's supported CPU topology hierarchy. + * Topology members will be ordered from the largest to the smallest + * in the string. + */ +static char *cpu_hierarchy_to_string(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + GString *s = g_string_new(NULL); + + g_string_append_printf(s, "sockets (%u)", ms->smp.sockets); + + if (mc->smp_props.dies_supported) { + g_string_append_printf(s, " * dies (%u)", ms->smp.dies); + } + + if (mc->smp_props.clusters_supported) { + g_string_append_printf(s, " * clusters (%u)", ms->smp.clusters); + } + + g_string_append_printf(s, " * cores (%u)", ms->smp.cores); + g_string_append_printf(s, " * threads (%u)", ms->smp.threads); + + return g_string_free(s, false); +} + +/* + * machine_parse_smp_config: Generic function used to parse the given + * SMP configuration + * + * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be + * automatically computed based on the provided ones. + * + * In the calculation of omitted sockets/cores/threads: we prefer sockets + * over cores over threads before 6.2, while preferring cores over sockets + * over threads since 6.2. + * + * In the calculation of cpus/maxcpus: When both maxcpus and cpus are omitted, + * maxcpus will be computed from the given parameters and cpus will be set + * equal to maxcpus. When only one of maxcpus and cpus is given then the + * omitted one will be set to its given counterpart's value. Both maxcpus and + * cpus may be specified, but maxcpus must be equal to or greater than cpus. + * + * For compatibility, apart from the parameters that will be computed, newly + * introduced topology members which are likely to be target specific should + * be directly set as 1 if they are omitted (e.g. dies for PC since 4.1). + */ +void machine_parse_smp_config(MachineState *ms, + const SMPConfiguration *config, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + unsigned cpus = config->has_cpus ? config->cpus : 0; + unsigned sockets = config->has_sockets ? config->sockets : 0; + unsigned dies = config->has_dies ? config->dies : 0; + unsigned clusters = config->has_clusters ? config->clusters : 0; + unsigned cores = config->has_cores ? config->cores : 0; + unsigned threads = config->has_threads ? config->threads : 0; + unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0; + + /* + * Specified CPU topology parameters must be greater than zero, + * explicit configuration like "cpus=0" is not allowed. + */ + if ((config->has_cpus && config->cpus == 0) || + (config->has_sockets && config->sockets == 0) || + (config->has_dies && config->dies == 0) || + (config->has_clusters && config->clusters == 0) || + (config->has_cores && config->cores == 0) || + (config->has_threads && config->threads == 0) || + (config->has_maxcpus && config->maxcpus == 0)) { + warn_report("Deprecated CPU topology (considered invalid): " + "CPU topology parameters must be greater than zero"); + } + + /* + * If not supported by the machine, a topology parameter must be + * omitted or specified equal to 1. + */ + if (!mc->smp_props.dies_supported && dies > 1) { + error_setg(errp, "dies not supported by this machine's CPU topology"); + return; + } + if (!mc->smp_props.clusters_supported && clusters > 1) { + error_setg(errp, "clusters not supported by this machine's CPU topology"); + return; + } + + dies = dies > 0 ? dies : 1; + clusters = clusters > 0 ? clusters : 1; + + /* compute missing values based on the provided ones */ + if (cpus == 0 && maxcpus == 0) { + sockets = sockets > 0 ? sockets : 1; + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + } else { + maxcpus = maxcpus > 0 ? maxcpus : cpus; + + if (mc->smp_props.prefer_sockets) { + /* prefer sockets over cores before 6.2 */ + if (sockets == 0) { + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * clusters * cores * threads); + } else if (cores == 0) { + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * clusters * threads); + } + } else { + /* prefer cores over sockets since 6.2 */ + if (cores == 0) { + sockets = sockets > 0 ? sockets : 1; + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * clusters * threads); + } else if (sockets == 0) { + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * clusters * cores * threads); + } + } + + /* try to calculate omitted threads at last */ + if (threads == 0) { + threads = maxcpus / (sockets * dies * clusters * cores); + } + } + + maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * clusters * cores * threads; + cpus = cpus > 0 ? cpus : maxcpus; + + ms->smp.cpus = cpus; + ms->smp.sockets = sockets; + ms->smp.dies = dies; + ms->smp.clusters = clusters; + ms->smp.cores = cores; + ms->smp.threads = threads; + ms->smp.max_cpus = maxcpus; + + /* sanity-check of the computed topology */ + if (sockets * dies * clusters * cores * threads != maxcpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "%s != maxcpus (%u)", + topo_msg, maxcpus); + return; + } + + if (maxcpus < cpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "%s == maxcpus (%u) < smp_cpus (%u)", + topo_msg, maxcpus, cpus); + return; + } + + if (ms->smp.cpus < mc->min_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The min CPUs " + "supported by machine '%s' is %d", + ms->smp.cpus, + mc->name, mc->min_cpus); + return; + } + + if (ms->smp.max_cpus > mc->max_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The max CPUs " + "supported by machine '%s' is %d", + ms->smp.max_cpus, + mc->name, mc->max_cpus); + return; + } +} diff --git a/hw/core/machine.c b/hw/core/machine.c index 54e040587d..19f42450f5 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -21,12 +21,14 @@ #include "qapi/qapi-visit-common.h" #include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" +#include "qom/object_interfaces.h" #include "hw/sysbus.h" #include "sysemu/cpus.h" #include "sysemu/sysemu.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" #include "sysemu/numa.h" +#include "sysemu/xen.h" #include "qemu/error-report.h" #include "sysemu/qtest.h" #include "hw/pci/pci.h" @@ -36,6 +38,32 @@ #include "exec/confidential-guest-support.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-pci.h" +#include "qom/object_interfaces.h" + +GlobalProperty hw_compat_7_1[] = { + { "virtio-device", "queue_reset", "false" }, + { "virtio-rng-pci", "vectors", "0" }, + { "virtio-rng-pci-transitional", "vectors", "0" }, + { "virtio-rng-pci-non-transitional", "vectors", "0" }, +}; +const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1); + +GlobalProperty hw_compat_7_0[] = { + { "arm-gicv3-common", "force-8-bit-prio", "on" }, + { "nvme-ns", "eui64-default", "on"}, +}; +const size_t hw_compat_7_0_len = G_N_ELEMENTS(hw_compat_7_0); + +GlobalProperty hw_compat_6_2[] = { + { "PIIX4_PM", "x-not-migrate-acpi-index", "on"}, +}; +const size_t hw_compat_6_2_len = G_N_ELEMENTS(hw_compat_6_2); + +GlobalProperty hw_compat_6_1[] = { + { "vhost-user-vsock-device", "seqpacket", "off" }, + { "nvme-ns", "shared", "off" }, +}; +const size_t hw_compat_6_1_len = G_N_ELEMENTS(hw_compat_6_1); GlobalProperty hw_compat_6_0[] = { { "gpex-pcihost", "allow-unmapped-accesses", "false" }, @@ -43,6 +71,7 @@ GlobalProperty hw_compat_6_0[] = { { "nvme-ns", "eui64-default", "off"}, { "e1000", "init-vet", "off" }, { "e1000e", "init-vet", "off" }, + { "vhost-vsock-device", "seqpacket", "off" }, }; const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0); @@ -50,7 +79,7 @@ GlobalProperty hw_compat_5_2[] = { { "ICH9-LPC", "smm-compat", "on"}, { "PIIX4_PM", "smm-compat", "on"}, { "virtio-blk-device", "report-discard-granularity", "off" }, - { "virtio-net-pci", "vectors", "3"}, + { "virtio-net-pci-base", "vectors", "3"}, }; const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2); @@ -508,6 +537,78 @@ static void machine_set_hmat(Object *obj, bool value, Error **errp) ms->numa_state->hmat_enabled = value; } +static void machine_get_mem(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + MemorySizeConfiguration mem = { + .has_size = true, + .size = ms->ram_size, + .has_max_size = !!ms->ram_slots, + .max_size = ms->maxram_size, + .has_slots = !!ms->ram_slots, + .slots = ms->ram_slots, + }; + MemorySizeConfiguration *p_mem = &mem; + + visit_type_MemorySizeConfiguration(v, name, &p_mem, &error_abort); +} + +static void machine_set_mem(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + MemorySizeConfiguration *mem; + + ERRP_GUARD(); + + if (!visit_type_MemorySizeConfiguration(v, name, &mem, errp)) { + return; + } + + if (!mem->has_size) { + mem->has_size = true; + mem->size = mc->default_ram_size; + } + mem->size = QEMU_ALIGN_UP(mem->size, 8192); + if (mc->fixup_ram_size) { + mem->size = mc->fixup_ram_size(mem->size); + } + if ((ram_addr_t)mem->size != mem->size) { + error_setg(errp, "ram size too large"); + goto out_free; + } + + if (mem->has_max_size) { + if (mem->max_size < mem->size) { + error_setg(errp, "invalid value of maxmem: " + "maximum memory size (0x%" PRIx64 ") must be at least " + "the initial memory size (0x%" PRIx64 ")", + mem->max_size, mem->size); + goto out_free; + } + if (mem->has_slots && mem->slots && mem->max_size == mem->size) { + error_setg(errp, "invalid value of maxmem: " + "memory slots were specified but maximum memory size " + "(0x%" PRIx64 ") is equal to the initial memory size " + "(0x%" PRIx64 ")", mem->max_size, mem->size); + goto out_free; + } + ms->maxram_size = mem->max_size; + } else { + if (mem->has_slots) { + error_setg(errp, "slots specified but no max-size"); + goto out_free; + } + ms->maxram_size = mem->size; + } + ms->ram_size = mem->size; + ms->ram_slots = mem->has_slots ? mem->slots : 0; +out_free: + qapi_free_MemorySizeConfiguration(mem); +} + static char *machine_get_nvdimm_persistence(Object *obj, Error **errp) { MachineState *ms = MACHINE(obj); @@ -542,61 +643,30 @@ void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type) bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev) { - bool allowed = false; - strList *wl; Object *obj = OBJECT(dev); if (!object_dynamic_cast(obj, TYPE_SYS_BUS_DEVICE)) { return false; } + return device_type_is_dynamic_sysbus(mc, object_get_typename(obj)); +} + +bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type) +{ + bool allowed = false; + strList *wl; + ObjectClass *klass = object_class_by_name(type); + for (wl = mc->allowed_dynamic_sysbus_devices; !allowed && wl; wl = wl->next) { - allowed |= !!object_dynamic_cast(obj, wl->value); + allowed |= !!object_class_dynamic_cast(klass, wl->value); } return allowed; } -static void validate_sysbus_device(SysBusDevice *sbdev, void *opaque) -{ - MachineState *machine = opaque; - MachineClass *mc = MACHINE_GET_CLASS(machine); - - if (!device_is_dynamic_sysbus(mc, DEVICE(sbdev))) { - error_report("Option '-device %s' cannot be handled by this machine", - object_class_get_name(object_get_class(OBJECT(sbdev)))); - exit(1); - } -} - -static char *machine_get_memdev(Object *obj, Error **errp) -{ - MachineState *ms = MACHINE(obj); - - return g_strdup(ms->ram_memdev_id); -} - -static void machine_set_memdev(Object *obj, const char *value, Error **errp) -{ - MachineState *ms = MACHINE(obj); - - g_free(ms->ram_memdev_id); - ms->ram_memdev_id = g_strdup(value); -} - -static void machine_init_notify(Notifier *notifier, void *data) -{ - MachineState *machine = MACHINE(qdev_get_machine()); - - /* - * Loop through all dynamically created sysbus devices and check if they are - * all allowed. If a device is not allowed, error out. - */ - foreach_dynamic_sysbus_device(validate_sysbus_device, machine); -} - HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) { int i; @@ -683,6 +753,11 @@ void machine_set_cpu_numa_node(MachineState *machine, return; } + if (props->has_cluster_id && !slot->props.has_cluster_id) { + error_setg(errp, "cluster-id is not supported"); + return; + } + if (props->has_socket_id && !slot->props.has_socket_id) { error_setg(errp, "socket-id is not supported"); return; @@ -702,6 +777,11 @@ void machine_set_cpu_numa_node(MachineState *machine, continue; } + if (props->has_cluster_id && + props->cluster_id != slot->props.cluster_id) { + continue; + } + if (props->has_die_id && props->die_id != slot->props.die_id) { continue; } @@ -743,78 +823,20 @@ void machine_set_cpu_numa_node(MachineState *machine, } } -static void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) -{ - unsigned cpus = config->has_cpus ? config->cpus : 0; - unsigned sockets = config->has_sockets ? config->sockets : 0; - unsigned cores = config->has_cores ? config->cores : 0; - unsigned threads = config->has_threads ? config->threads : 0; - - if (config->has_dies && config->dies != 0 && config->dies != 1) { - error_setg(errp, "dies not supported by this machine's CPU topology"); - return; - } - - /* compute missing values, prefer sockets over cores over threads */ - if (cpus == 0 || sockets == 0) { - cores = cores > 0 ? cores : 1; - threads = threads > 0 ? threads : 1; - if (cpus == 0) { - sockets = sockets > 0 ? sockets : 1; - cpus = cores * threads * sockets; - } else { - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - sockets = ms->smp.max_cpus / (cores * threads); - } - } else if (cores == 0) { - threads = threads > 0 ? threads : 1; - cores = cpus / (sockets * threads); - cores = cores > 0 ? cores : 1; - } else if (threads == 0) { - threads = cpus / (cores * sockets); - threads = threads > 0 ? threads : 1; - } else if (sockets * cores * threads < cpus) { - error_setg(errp, "cpu topology: " - "sockets (%u) * cores (%u) * threads (%u) < " - "smp_cpus (%u)", - sockets, cores, threads, cpus); - return; - } - - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - - if (ms->smp.max_cpus < cpus) { - error_setg(errp, "maxcpus must be equal to or greater than smp"); - return; - } - - if (sockets * cores * threads != ms->smp.max_cpus) { - error_setg(errp, "Invalid CPU topology: " - "sockets (%u) * cores (%u) * threads (%u) " - "!= maxcpus (%u)", - sockets, cores, threads, - ms->smp.max_cpus); - return; - } - - ms->smp.cpus = cpus; - ms->smp.cores = cores; - ms->smp.threads = threads; - ms->smp.sockets = sockets; -} - static void machine_get_smp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { MachineState *ms = MACHINE(obj); SMPConfiguration *config = &(SMPConfiguration){ - .has_cores = true, .cores = ms->smp.cores, + .has_cpus = true, .cpus = ms->smp.cpus, .has_sockets = true, .sockets = ms->smp.sockets, .has_dies = true, .dies = ms->smp.dies, + .has_clusters = true, .clusters = ms->smp.clusters, + .has_cores = true, .cores = ms->smp.cores, .has_threads = true, .threads = ms->smp.threads, - .has_cpus = true, .cpus = ms->smp.cpus, .has_maxcpus = true, .maxcpus = ms->smp.max_cpus, }; + if (!visit_type_SMPConfiguration(v, name, &config, &error_abort)) { return; } @@ -823,35 +845,73 @@ static void machine_get_smp(Object *obj, Visitor *v, const char *name, static void machine_set_smp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - MachineClass *mc = MACHINE_GET_CLASS(obj); MachineState *ms = MACHINE(obj); - SMPConfiguration *config; - ERRP_GUARD(); + g_autoptr(SMPConfiguration) config = NULL; if (!visit_type_SMPConfiguration(v, name, &config, errp)) { return; } - mc->smp_parse(ms, config, errp); - if (*errp) { - goto out_free; + machine_parse_smp_config(ms, config, errp); +} + +static void machine_get_boot(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + BootConfiguration *config = &ms->boot_config; + visit_type_BootConfiguration(v, name, &config, &error_abort); +} + +static void machine_free_boot_config(MachineState *ms) +{ + g_free(ms->boot_config.order); + g_free(ms->boot_config.once); + g_free(ms->boot_config.splash); +} + +static void machine_copy_boot_config(MachineState *ms, BootConfiguration *config) +{ + MachineClass *machine_class = MACHINE_GET_CLASS(ms); + + machine_free_boot_config(ms); + ms->boot_config = *config; + if (!config->has_order) { + ms->boot_config.has_order = true; + ms->boot_config.order = g_strdup(machine_class->default_boot_order); + } +} + +static void machine_set_boot(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ERRP_GUARD(); + MachineState *ms = MACHINE(obj); + BootConfiguration *config = NULL; + + if (!visit_type_BootConfiguration(v, name, &config, errp)) { + return; + } + if (config->has_order) { + validate_bootdevices(config->order, errp); + if (*errp) { + goto out_free; + } + } + if (config->has_once) { + validate_bootdevices(config->once, errp); + if (*errp) { + goto out_free; + } } - /* sanity-check smp_cpus and max_cpus against mc */ - if (ms->smp.cpus < mc->min_cpus) { - error_setg(errp, "Invalid SMP CPUs %d. The min CPUs " - "supported by machine '%s' is %d", - ms->smp.cpus, - mc->name, mc->min_cpus); - } else if (ms->smp.max_cpus > mc->max_cpus) { - error_setg(errp, "Invalid SMP CPUs %d. The max CPUs " - "supported by machine '%s' is %d", - current_machine->smp.max_cpus, - mc->name, mc->max_cpus); - } + machine_copy_boot_config(ms, config); + /* Strings live in ms->boot_config. */ + free(config); + return; out_free: - qapi_free_SMPConfiguration(config); + qapi_free_BootConfiguration(config); } static void machine_class_init(ObjectClass *oc, void *data) @@ -861,7 +921,6 @@ static void machine_class_init(ObjectClass *oc, void *data) /* Default 128 MB as guest ram size */ mc->default_ram_size = 128 * MiB; mc->rom_file_has_mr = true; - mc->smp_parse = smp_parse; /* numa node memory size aligned on 8MB by default. * On Linux, each node's border has to be 8MB aligned @@ -893,6 +952,12 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "dumpdtb", "Dump current dtb to a file and quit"); + object_class_property_add(oc, "boot", "BootConfiguration", + machine_get_boot, machine_set_boot, + NULL, NULL); + object_class_property_set_description(oc, "boot", + "Boot configuration"); + object_class_property_add(oc, "smp", "SMPConfiguration", machine_get_smp, machine_set_smp, NULL, NULL); @@ -954,11 +1019,18 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "memory-encryption", "Set memory encryption object to use"); - object_class_property_add_str(oc, "memory-backend", - machine_get_memdev, machine_set_memdev); + object_class_property_add_link(oc, "memory-backend", TYPE_MEMORY_BACKEND, + offsetof(MachineState, memdev), object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); object_class_property_set_description(oc, "memory-backend", "Set RAM backend" "Valid value is ID of hostmem based backend"); + + object_class_property_add(oc, "memory", "MemorySizeConfiguration", + machine_get_mem, machine_set_mem, + NULL, NULL); + object_class_property_set_description(oc, "memory", + "Memory size configuration"); } static void machine_class_base_init(ObjectClass *oc, void *data) @@ -989,6 +1061,8 @@ static void machine_initfn(Object *obj) ms->mem_merge = true; ms->enable_graphics = true; ms->kernel_cmdline = g_strdup(""); + ms->ram_size = mc->default_ram_size; + ms->maxram_size = mc->default_ram_size; if (mc->nvdimm_supported) { Object *obj = OBJECT(ms); @@ -1018,23 +1092,23 @@ static void machine_initfn(Object *obj) "Table (HMAT)"); } - /* Register notifier when init is done for sysbus sanity checks */ - ms->sysbus_notifier.notify = machine_init_notify; - qemu_add_machine_init_done_notifier(&ms->sysbus_notifier); - /* default to mc->default_cpus */ ms->smp.cpus = mc->default_cpus; ms->smp.max_cpus = mc->default_cpus; - ms->smp.cores = 1; - ms->smp.dies = 1; - ms->smp.threads = 1; ms->smp.sockets = 1; + ms->smp.dies = 1; + ms->smp.clusters = 1; + ms->smp.cores = 1; + ms->smp.threads = 1; + + machine_copy_boot_config(ms, &(BootConfiguration){ 0 }); } static void machine_finalize(Object *obj) { MachineState *ms = MACHINE(obj); + machine_free_boot_config(ms); g_free(ms->kernel_filename); g_free(ms->initrd_filename); g_free(ms->kernel_cmdline); @@ -1074,8 +1148,17 @@ static char *cpu_slot_to_string(const CPUArchId *cpu) g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id); } if (cpu->props.has_die_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id); } + if (cpu->props.has_cluster_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "cluster-id: %"PRId64, cpu->props.cluster_id); + } if (cpu->props.has_core_id) { if (s->len) { g_string_append_printf(s, ", "); @@ -1098,9 +1181,7 @@ static void numa_validate_initiator(NumaState *numa_state) for (i = 0; i < numa_state->num_nodes; i++) { if (numa_info[i].initiator == MAX_NODES) { - error_report("The initiator of NUMA node %d is missing, use " - "'-numa node,initiator' option to declare it", i); - exit(1); + continue; } if (!numa_info[numa_info[i].initiator].present) { @@ -1177,7 +1258,7 @@ MemoryRegion *machine_consume_memdev(MachineState *machine, { MemoryRegion *ret = host_memory_backend_get_memory(backend); - if (memory_region_is_mapped(ret)) { + if (host_memory_backend_is_mapped(backend)) { error_report("memory backend %s can't be used multiple times.", object_get_canonical_path_component(OBJECT(backend))); exit(EXIT_FAILURE); @@ -1187,7 +1268,40 @@ MemoryRegion *machine_consume_memdev(MachineState *machine, return ret; } -void machine_run_board_init(MachineState *machine) +static bool create_default_memdev(MachineState *ms, const char *path, Error **errp) +{ + Object *obj; + MachineClass *mc = MACHINE_GET_CLASS(ms); + bool r = false; + + obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM); + if (path) { + if (!object_property_set_str(obj, "mem-path", path, errp)) { + goto out; + } + } + if (!object_property_set_int(obj, "size", ms->ram_size, errp)) { + goto out; + } + object_property_add_child(object_get_objects_root(), mc->default_ram_id, + obj); + /* Ensure backend's memory region name is equal to mc->default_ram_id */ + if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id", + false, errp)) { + goto out; + } + if (!user_creatable_complete(USER_CREATABLE(obj), errp)) { + goto out; + } + r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp); + +out: + object_unref(obj); + return r; +} + + +void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp) { MachineClass *machine_class = MACHINE_GET_CLASS(machine); ObjectClass *oc = object_class_by_name(machine->cpu_type); @@ -1198,11 +1312,34 @@ void machine_run_board_init(MachineState *machine) clock values from the log. */ replay_checkpoint(CHECKPOINT_INIT); - if (machine->ram_memdev_id) { - Object *o; - o = object_resolve_path_type(machine->ram_memdev_id, - TYPE_MEMORY_BACKEND, NULL); - machine->ram = machine_consume_memdev(machine, MEMORY_BACKEND(o)); + if (!xen_enabled()) { + /* On 32-bit hosts, QEMU is limited by virtual address space */ + if (machine->ram_size > (2047 << 20) && HOST_LONG_BITS == 32) { + error_setg(errp, "at most 2047 MB RAM can be simulated"); + return; + } + } + + if (machine->memdev) { + ram_addr_t backend_size = object_property_get_uint(OBJECT(machine->memdev), + "size", &error_abort); + if (backend_size != machine->ram_size) { + error_setg(errp, "Machine memory size does not match the size of the memory backend"); + return; + } + } else if (machine_class->default_ram_id && machine->ram_size && + numa_uses_legacy_mem()) { + if (object_property_find(object_get_objects_root(), + machine_class->default_ram_id)) { + error_setg(errp, "object name '%s' is reserved for the default" + " RAM backend, it can't be used for any other purposes." + " Change the object's 'id' to something else", + machine_class->default_ram_id); + return; + } + if (!create_default_memdev(current_machine, mem_path, errp)) { + return; + } } if (machine->numa_state) { @@ -1212,6 +1349,10 @@ void machine_run_board_init(MachineState *machine) } } + if (!machine->ram && machine->memdev) { + machine->ram = machine_consume_memdev(machine, machine->memdev); + } + /* If the machine supports the valid_cpu_types check and the user * specified a CPU with -cpu check here that the user CPU is supported. */ @@ -1294,9 +1435,9 @@ void qdev_machine_creation_done(void) { cpu_synchronize_all_post_init(); - if (current_machine->boot_once) { - qemu_boot_set(current_machine->boot_once, &error_fatal); - qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_order)); + if (current_machine->boot_config.has_once) { + qemu_boot_set(current_machine->boot_config.once, &error_fatal); + qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_config.order)); } /* diff --git a/hw/core/meson.build b/hw/core/meson.build index 18f44fb7c2..7a4d02b6c0 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -1,7 +1,6 @@ # core qdev-related obj files, also used by *-user and unit tests -hwcore_files = files( +hwcore_ss.add(files( 'bus.c', - 'hotplug.c', 'qdev-properties.c', 'qdev.c', 'reset.c', @@ -11,22 +10,35 @@ hwcore_files = files( 'irq.c', 'clock.c', 'qdev-clock.c', -) +)) +if have_system + hwcore_ss.add(files( + 'hotplug.c', + 'qdev-hotplug.c', + )) +else + hwcore_ss.add(files( + 'hotplug-stubs.c', + )) +endif common_ss.add(files('cpu-common.c')) -common_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) -common_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) -common_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) -common_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) -common_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) -common_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) -common_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) -common_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) -common_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) +common_ss.add(files('machine-smp.c')) +softmmu_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) +softmmu_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) +softmmu_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) +softmmu_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) +softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) +softmmu_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) +softmmu_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) +softmmu_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) +softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) softmmu_ss.add(files( 'cpu-sysemu.c', 'fw-path-provider.c', + 'gpio.c', 'loader.c', 'machine-hmp-cmds.c', 'machine.c', diff --git a/hw/core/numa.c b/hw/core/numa.c index 510d096a88..ea24a5fa8c 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -695,7 +695,7 @@ void numa_complete_configuration(MachineState *ms) } if (!numa_uses_legacy_mem() && mc->default_ram_id) { - if (ms->ram_memdev_id) { + if (ms->memdev) { error_report("'-machine memory-backend' and '-numa memdev'" " properties are mutually exclusive"); exit(1); @@ -756,6 +756,7 @@ static void numa_stat_memory_devices(NumaNodeMem node_mem[]) PCDIMMDeviceInfo *pcdimm_info; VirtioPMEMDeviceInfo *vpi; VirtioMEMDeviceInfo *vmi; + SgxEPCDeviceInfo *se; for (info = info_list; info; info = info->next) { MemoryDeviceInfo *value = info->value; @@ -781,6 +782,11 @@ static void numa_stat_memory_devices(NumaNodeMem node_mem[]) node_mem[vmi->node].node_mem += vmi->size; node_mem[vmi->node].node_plugged_mem += vmi->size; break; + case MEMORY_DEVICE_INFO_KIND_SGX_EPC: + se = value->u.sgx_epc.data; + node_mem[se->node].node_mem += se->size; + node_mem[se->node].node_plugged_mem = 0; + break; default: g_assert_not_reached(); } @@ -816,6 +822,19 @@ static int ram_block_notify_add_single(RAMBlock *rb, void *opaque) return 0; } +static int ram_block_notify_remove_single(RAMBlock *rb, void *opaque) +{ + const ram_addr_t max_size = qemu_ram_get_max_length(rb); + const ram_addr_t size = qemu_ram_get_used_length(rb); + void *host = qemu_ram_get_host_addr(rb); + RAMBlockNotifier *notifier = opaque; + + if (host) { + notifier->ram_block_removed(notifier, host, size, max_size); + } + return 0; +} + void ram_block_notifier_add(RAMBlockNotifier *n) { QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); @@ -829,13 +848,18 @@ void ram_block_notifier_add(RAMBlockNotifier *n) void ram_block_notifier_remove(RAMBlockNotifier *n) { QLIST_REMOVE(n, next); + + if (n->ram_block_removed) { + qemu_ram_foreach_block(ram_block_notify_remove_single, n); + } } void ram_block_notify_add(void *host, size_t size, size_t max_size) { RAMBlockNotifier *notifier; + RAMBlockNotifier *next; - QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { if (notifier->ram_block_added) { notifier->ram_block_added(notifier, host, size, max_size); } @@ -845,8 +869,9 @@ void ram_block_notify_add(void *host, size_t size, size_t max_size) void ram_block_notify_remove(void *host, size_t size, size_t max_size) { RAMBlockNotifier *notifier; + RAMBlockNotifier *next; - QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { if (notifier->ram_block_removed) { notifier->ram_block_removed(notifier, host, size, max_size); } @@ -856,8 +881,9 @@ void ram_block_notify_remove(void *host, size_t size, size_t max_size) void ram_block_notify_resize(void *host, size_t old_size, size_t new_size) { RAMBlockNotifier *notifier; + RAMBlockNotifier *next; - QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { if (notifier->ram_block_resized) { notifier->ram_block_resized(notifier, host, old_size, new_size); } diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index 6ba19fd965..eb5ba1aff7 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -14,7 +14,6 @@ #include "sysemu/cpu-timers.h" #include "sysemu/qtest.h" #include "block/aio.h" -#include "sysemu/cpus.h" #include "hw/clock.h" #define DELTA_ADJUST 1 diff --git a/hw/core/qdev-hotplug.c b/hw/core/qdev-hotplug.c new file mode 100644 index 0000000000..d495d0e9c7 --- /dev/null +++ b/hw/core/qdev-hotplug.c @@ -0,0 +1,73 @@ +/* + * QDev Hotplug handlers + * + * Copyright (c) Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/boards.h" + +HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->get_hotplug_handler) { + return mc->get_hotplug_handler(machine, dev); + } + } + + return NULL; +} + +bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->hotplug_allowed) { + return mc->hotplug_allowed(machine, dev, errp); + } + } + + return true; +} + +HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) +{ + if (dev->parent_bus) { + return dev->parent_bus->hotplug_handler; + } + return NULL; +} + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_machine_hotplug_handler(dev); + + if (hotplug_ctrl == NULL && dev->parent_bus) { + hotplug_ctrl = qdev_get_bus_hotplug_handler(dev); + } + return hotplug_ctrl; +} + +/* can be used as ->unplug() callback for the simple cases */ +void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + qdev_unrealize(dev); +} diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 2760c21f11..a91f60567a 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -36,11 +36,11 @@ static bool check_prop_still_unset(Object *obj, const char *name, const void *old_val, const char *new_val, - Error **errp) + bool allow_override, Error **errp) { const GlobalProperty *prop = qdev_find_global_prop(obj, name); - if (!old_val) { + if (!old_val || (!prop && allow_override)) { return true; } @@ -93,16 +93,34 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, BlockBackend *blk; bool blk_created = false; int ret; + BlockDriverState *bs; + AioContext *ctx; if (!visit_type_str(v, name, &str, errp)) { return; } - /* - * TODO Should this really be an error? If no, the old value - * needs to be released before we store the new one. - */ - if (!check_prop_still_unset(obj, name, *ptr, str, errp)) { + if (!check_prop_still_unset(obj, name, *ptr, str, true, errp)) { + return; + } + + if (*ptr) { + /* BlockBackend alread exists. So, we want to change attached node */ + blk = *ptr; + ctx = blk_get_aio_context(blk); + bs = bdrv_lookup_bs(NULL, str, errp); + if (!bs) { + return; + } + + if (ctx != bdrv_get_aio_context(bs)) { + error_setg(errp, "Different aio context is not supported for new " + "node"); + } + + aio_context_acquire(ctx); + blk_replace_bs(blk, bs, errp); + aio_context_release(ctx); return; } @@ -114,7 +132,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, blk = blk_by_name(str); if (!blk) { - BlockDriverState *bs = bdrv_lookup_bs(NULL, str, NULL); + bs = bdrv_lookup_bs(NULL, str, NULL); if (bs) { /* * If the device supports iothreads, it will make sure to move the @@ -123,8 +141,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, * aware of iothreads require their BlockBackends to be in the main * AioContext. */ - AioContext *ctx = iothread ? bdrv_get_aio_context(bs) : - qemu_get_aio_context(); + ctx = iothread ? bdrv_get_aio_context(bs) : qemu_get_aio_context(); blk = blk_new(ctx, 0, BLK_PERM_ALL); blk_created = true; @@ -196,6 +213,7 @@ static void release_drive(Object *obj, const char *name, void *opaque) const PropertyInfo qdev_prop_drive = { .name = "str", .description = "Node name or ID of a block device to use as a backend", + .realized_set_allowed = true, .get = get_drive, .set = set_drive, .release = release_drive, @@ -204,6 +222,7 @@ const PropertyInfo qdev_prop_drive = { const PropertyInfo qdev_prop_drive_iothread = { .name = "str", .description = "Node name or ID of a block device to use as a backend", + .realized_set_allowed = true, .get = get_drive, .set = set_drive_iothread, .release = release_drive, @@ -238,7 +257,7 @@ static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque, * TODO Should this really be an error? If no, the old value * needs to be released before we store the new one. */ - if (!check_prop_still_unset(obj, name, be->chr, str, errp)) { + if (!check_prop_still_unset(obj, name, be->chr, str, false, errp)) { return; } @@ -408,10 +427,16 @@ static void set_netdev(Object *obj, Visitor *v, const char *name, * TODO Should this really be an error? If no, the old value * needs to be released before we store the new one. */ - if (!check_prop_still_unset(obj, name, ncs[i], str, errp)) { + if (!check_prop_still_unset(obj, name, ncs[i], str, false, errp)) { goto out; } + if (peers[i]->info->check_peer_type) { + if (!peers[i]->info->check_peer_type(peers[i], obj->class, errp)) { + goto out; + } + } + ncs[i] = peers[i]; ncs[i]->queue_index = i; } diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 50f40949f5..357b8761b5 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -26,11 +26,11 @@ void qdev_prop_set_after_realize(DeviceState *dev, const char *name, /* returns: true if property is allowed to be set, false otherwise */ static bool qdev_prop_allow_set(Object *obj, const char *name, - Error **errp) + const PropertyInfo *info, Error **errp) { DeviceState *dev = DEVICE(obj); - if (dev->realized) { + if (dev->realized && !info->realized_set_allowed) { qdev_prop_set_after_realize(dev, name, errp); return false; } @@ -79,7 +79,7 @@ static void field_prop_set(Object *obj, Visitor *v, const char *name, { Property *prop = opaque; - if (!qdev_prop_allow_set(obj, name, errp)) { + if (!qdev_prop_allow_set(obj, name, prop->info, errp)) { return; } @@ -428,6 +428,25 @@ const PropertyInfo qdev_prop_int64 = { .set_default_value = qdev_propinfo_set_default_value_int, }; +static void set_uint64_checkmask(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint64(v, name, ptr, errp); + if (*ptr & ~prop->bitmask) { + error_setg(errp, "Property value for '%s' has bits outside mask '0x%" PRIx64 "'", + name, prop->bitmask); + } +} + +const PropertyInfo qdev_prop_uint64_checkmask = { + .name = "uint64", + .get = get_uint64, + .set = set_uint64_checkmask, +}; + /* --- string --- */ static void release_string(Object *obj, const char *name, void *opaque) diff --git a/hw/core/qdev.c b/hw/core/qdev.c index cefc5eaa0a..0145501904 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -28,11 +28,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/qapi-events-qdev.h" +#include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "qemu/option.h" -#include "hw/hotplug.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/boards.h" @@ -147,8 +147,21 @@ bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp) DeviceState *qdev_new(const char *name) { - if (!object_class_by_name(name)) { - module_load_qom_one(name); + ObjectClass *oc = object_class_by_name(name); +#ifdef CONFIG_MODULES + if (!oc) { + int rv = module_load_qom(name, &error_fatal); + if (rv > 0) { + oc = object_class_by_name(name); + } else { + error_report("could not find a module for type '%s'", name); + exit(1); + } + } +#endif + if (!oc) { + error_report("unknown type '%s'", name); + abort(); } return DEVICE(object_new(name)); } @@ -211,14 +224,17 @@ void device_listener_unregister(DeviceListener *listener) QTAILQ_REMOVE(&device_listeners, listener, link); } -bool qdev_should_hide_device(QemuOpts *opts) +bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp) { + ERRP_GUARD(); DeviceListener *listener; QTAILQ_FOREACH(listener, &device_listeners, link) { if (listener->hide_device) { - if (listener->hide_device(listener, opts)) { + if (listener->hide_device(listener, opts, from_json, errp)) { return true; + } else if (*errp) { + return false; } } } @@ -234,58 +250,6 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, dev->alias_required_for_version = required_for_version; } -HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) -{ - MachineState *machine; - MachineClass *mc; - Object *m_obj = qdev_get_machine(); - - if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { - machine = MACHINE(m_obj); - mc = MACHINE_GET_CLASS(machine); - if (mc->get_hotplug_handler) { - return mc->get_hotplug_handler(machine, dev); - } - } - - return NULL; -} - -bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) -{ - MachineState *machine; - MachineClass *mc; - Object *m_obj = qdev_get_machine(); - - if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { - machine = MACHINE(m_obj); - mc = MACHINE_GET_CLASS(machine); - if (mc->hotplug_allowed) { - return mc->hotplug_allowed(machine, dev, errp); - } - } - - return true; -} - -HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) -{ - if (dev->parent_bus) { - return dev->parent_bus->hotplug_handler; - } - return NULL; -} - -HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) -{ - HotplugHandler *hotplug_ctrl = qdev_get_machine_hotplug_handler(dev); - - if (hotplug_ctrl == NULL && dev->parent_bus) { - hotplug_ctrl = qdev_get_bus_hotplug_handler(dev); - } - return hotplug_ctrl; -} - static int qdev_prereset(DeviceState *dev, void *opaque) { trace_qdev_reset_tree(dev, object_get_typename(OBJECT(dev))); @@ -367,13 +331,6 @@ static void device_reset_child_foreach(Object *obj, ResettableChildCallback cb, } } -/* can be used as ->unplug() callback for the simple cases */ -void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, - DeviceState *dev, Error **errp) -{ - qdev_unrealize(dev); -} - bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp) { assert(!dev->realized && !dev->parent_bus); @@ -432,180 +389,6 @@ BusState *qdev_get_parent_bus(DeviceState *dev) return dev->parent_bus; } -static NamedGPIOList *qdev_get_named_gpio_list(DeviceState *dev, - const char *name) -{ - NamedGPIOList *ngl; - - QLIST_FOREACH(ngl, &dev->gpios, node) { - /* NULL is a valid and matchable name. */ - if (g_strcmp0(name, ngl->name) == 0) { - return ngl; - } - } - - ngl = g_malloc0(sizeof(*ngl)); - ngl->name = g_strdup(name); - QLIST_INSERT_HEAD(&dev->gpios, ngl, node); - return ngl; -} - -void qdev_init_gpio_in_named_with_opaque(DeviceState *dev, - qemu_irq_handler handler, - void *opaque, - const char *name, int n) -{ - int i; - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(gpio_list->num_out == 0 || !name); - gpio_list->in = qemu_extend_irqs(gpio_list->in, gpio_list->num_in, handler, - opaque, n); - - if (!name) { - name = "unnamed-gpio-in"; - } - for (i = gpio_list->num_in; i < gpio_list->num_in + n; i++) { - gchar *propname = g_strdup_printf("%s[%u]", name, i); - - object_property_add_child(OBJECT(dev), propname, - OBJECT(gpio_list->in[i])); - g_free(propname); - } - - gpio_list->num_in += n; -} - -void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n) -{ - qdev_init_gpio_in_named(dev, handler, NULL, n); -} - -void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, - const char *name, int n) -{ - int i; - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(gpio_list->num_in == 0 || !name); - - if (!name) { - name = "unnamed-gpio-out"; - } - memset(pins, 0, sizeof(*pins) * n); - for (i = 0; i < n; ++i) { - gchar *propname = g_strdup_printf("%s[%u]", name, - gpio_list->num_out + i); - - object_property_add_link(OBJECT(dev), propname, TYPE_IRQ, - (Object **)&pins[i], - object_property_allow_set_link, - OBJ_PROP_LINK_STRONG); - g_free(propname); - } - gpio_list->num_out += n; -} - -void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n) -{ - qdev_init_gpio_out_named(dev, pins, NULL, n); -} - -qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n) -{ - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(n >= 0 && n < gpio_list->num_in); - return gpio_list->in[n]; -} - -qemu_irq qdev_get_gpio_in(DeviceState *dev, int n) -{ - return qdev_get_gpio_in_named(dev, NULL, n); -} - -void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, - qemu_irq pin) -{ - char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - if (pin && !OBJECT(pin)->parent) { - /* We need a name for object_property_set_link to work */ - object_property_add_child(container_get(qdev_get_machine(), - "/unattached"), - "non-qdev-gpio[*]", OBJECT(pin)); - } - object_property_set_link(OBJECT(dev), propname, OBJECT(pin), &error_abort); - g_free(propname); -} - -qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n) -{ - g_autofree char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - - qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, - NULL); - - return ret; -} - -/* disconnect a GPIO output, returning the disconnected input (if any) */ - -static qemu_irq qdev_disconnect_gpio_out_named(DeviceState *dev, - const char *name, int n) -{ - char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - - qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, - NULL); - if (ret) { - object_property_set_link(OBJECT(dev), propname, NULL, NULL); - } - g_free(propname); - return ret; -} - -qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt, - const char *name, int n) -{ - qemu_irq disconnected = qdev_disconnect_gpio_out_named(dev, name, n); - qdev_connect_gpio_out_named(dev, name, n, icpt); - return disconnected; -} - -void qdev_connect_gpio_out(DeviceState * dev, int n, qemu_irq pin) -{ - qdev_connect_gpio_out_named(dev, NULL, n, pin); -} - -void qdev_pass_gpios(DeviceState *dev, DeviceState *container, - const char *name) -{ - int i; - NamedGPIOList *ngl = qdev_get_named_gpio_list(dev, name); - - for (i = 0; i < ngl->num_in; i++) { - const char *nm = ngl->name ? ngl->name : "unnamed-gpio-in"; - char *propname = g_strdup_printf("%s[%d]", nm, i); - - object_property_add_alias(OBJECT(container), propname, - OBJECT(dev), propname); - g_free(propname); - } - for (i = 0; i < ngl->num_out; i++) { - const char *nm = ngl->name ? ngl->name : "unnamed-gpio-out"; - char *propname = g_strdup_printf("%s[%d]", nm, i); - - object_property_add_alias(OBJECT(container), propname, - OBJECT(dev), propname); - g_free(propname); - } - QLIST_REMOVE(ngl, node); - QLIST_INSERT_HEAD(&container->gpios, ngl, node); -} - BusState *qdev_get_child_bus(DeviceState *dev, const char *name) { BusState *bus; @@ -698,6 +481,28 @@ char *qdev_get_dev_path(DeviceState *dev) return NULL; } +void qdev_add_unplug_blocker(DeviceState *dev, Error *reason) +{ + dev->unplug_blockers = g_slist_prepend(dev->unplug_blockers, reason); +} + +void qdev_del_unplug_blocker(DeviceState *dev, Error *reason) +{ + dev->unplug_blockers = g_slist_remove(dev->unplug_blockers, reason); +} + +bool qdev_unplug_blocked(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + + if (dev->unplug_blockers) { + error_propagate(errp, error_copy(dev->unplug_blockers->data)); + return true; + } + + return false; +} + static bool device_get_realized(Object *obj, Error **errp) { DeviceState *dev = DEVICE(obj); @@ -934,6 +739,8 @@ static void device_finalize(Object *obj) DeviceState *dev = DEVICE(obj); + g_assert(!dev->unplug_blockers); + QLIST_FOREACH_SAFE(ngl, &dev->gpios, node, next) { QLIST_REMOVE(ngl, node); qemu_free_irqs(ngl->in, ngl->num_in); @@ -955,7 +762,8 @@ static void device_finalize(Object *obj) dev->canonical_path = NULL; } - qemu_opts_del(dev->opts); + qobject_unref(dev->opts); + g_free(dev->id); } static void device_class_base_init(ObjectClass *class, void *data) diff --git a/hw/core/register.c b/hw/core/register.c index d6f8c20816..95b0150c0a 100644 --- a/hw/core/register.c +++ b/hw/core/register.c @@ -300,6 +300,18 @@ RegisterInfoArray *register_init_block32(DeviceState *owner, data, ops, debug_enabled, memory_size, 32); } +RegisterInfoArray *register_init_block64(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint64_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size) +{ + return register_init_block(owner, rae, num, ri, (void *) + data, ops, debug_enabled, memory_size, 64); +} + void register_finalize_block(RegisterInfoArray *r_array) { object_unparent(OBJECT(&r_array->mem)); diff --git a/hw/core/reset.c b/hw/core/reset.c index 9c477f2bf5..d3263b613e 100644 --- a/hw/core/reset.c +++ b/hw/core/reset.c @@ -33,6 +33,7 @@ typedef struct QEMUResetEntry { QTAILQ_ENTRY(QEMUResetEntry) entry; QEMUResetHandler *func; void *opaque; + bool skip_on_snapshot_load; } QEMUResetEntry; static QTAILQ_HEAD(, QEMUResetEntry) reset_handlers = @@ -40,13 +41,23 @@ static QTAILQ_HEAD(, QEMUResetEntry) reset_handlers = void qemu_register_reset(QEMUResetHandler *func, void *opaque) { - QEMUResetEntry *re = g_malloc0(sizeof(QEMUResetEntry)); + QEMUResetEntry *re = g_new0(QEMUResetEntry, 1); re->func = func; re->opaque = opaque; QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); } +void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque) +{ + QEMUResetEntry *re = g_new0(QEMUResetEntry, 1); + + re->func = func; + re->opaque = opaque; + re->skip_on_snapshot_load = true; + QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); +} + void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) { QEMUResetEntry *re; @@ -60,12 +71,16 @@ void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) } } -void qemu_devices_reset(void) +void qemu_devices_reset(ShutdownCause reason) { QEMUResetEntry *re, *nre; /* reset all devices */ QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) { + if (reason == SHUTDOWN_CAUSE_SNAPSHOT_LOAD && + re->skip_on_snapshot_load) { + continue; + } re->func(re->opaque); } } diff --git a/hw/core/resettable.c b/hw/core/resettable.c index 96a99ce39e..c3df75c6ba 100644 --- a/hw/core/resettable.c +++ b/hw/core/resettable.c @@ -201,12 +201,11 @@ static void resettable_phase_exit(Object *obj, void *opaque, ResetType type) resettable_child_foreach(rc, obj, resettable_phase_exit, NULL, type); assert(s->count > 0); - if (s->count == 1) { + if (--s->count == 0) { trace_resettable_phase_exit_exec(obj, obj_typename, !!rc->phases.exit); if (rc->phases.exit && !resettable_get_tr_func(rc, obj)) { rc->phases.exit(obj); } - s->count = 0; } s->exit_phase_in_progress = false; trace_resettable_phase_exit_end(obj, obj_typename, s->count); diff --git a/hw/arm/sysbus-fdt.c b/hw/core/sysbus-fdt.c similarity index 98% rename from hw/arm/sysbus-fdt.c rename to hw/core/sysbus-fdt.c index 48c5fe9bf1..eebcd28f9a 100644 --- a/hw/arm/sysbus-fdt.c +++ b/hw/core/sysbus-fdt.c @@ -27,7 +27,7 @@ #ifdef CONFIG_LINUX #include #endif -#include "hw/arm/sysbus-fdt.h" +#include "hw/core/sysbus-fdt.h" #include "qemu/error-report.h" #include "sysemu/device_tree.h" #include "sysemu/tpm.h" @@ -299,7 +299,8 @@ static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) void *guest_fdt = data->fdt, *host_fdt; const void *r; int i, prop_len; - uint32_t *irq_attr, *reg_attr, *host_clock_phandles; + uint32_t *irq_attr, *reg_attr; + const uint32_t *host_clock_phandles; uint64_t mmio_base, irq_number; uint32_t guest_clock_phandles[2]; @@ -339,7 +340,7 @@ static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) error_report("%s clocks property should contain 2 handles", __func__); exit(1); } - host_clock_phandles = (uint32_t *)r; + host_clock_phandles = r; guest_clock_phandles[0] = qemu_fdt_alloc_phandle(guest_fdt); guest_clock_phandles[1] = qemu_fdt_alloc_phandle(guest_fdt); @@ -539,7 +540,7 @@ void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr, assert(fdt); - node = g_strdup_printf("/platform@%"PRIx64, addr); + node = g_strdup_printf("/platform-bus@%"PRIx64, addr); /* Create a /platform node that we can put all devices into */ qemu_fdt_add_subnode(fdt, node); diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index aaae8e23cc..05c1da3d31 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -340,11 +340,13 @@ static BusState *main_system_bus; static void main_system_bus_create(void) { - /* assign main_system_bus before qbus_create_inplace() - * in order to make "if (bus != sysbus_get_default())" work */ + /* + * assign main_system_bus before qbus_init() + * in order to make "if (bus != sysbus_get_default())" work + */ main_system_bus = g_malloc0(system_bus_info.instance_size); - qbus_create_inplace(main_system_bus, system_bus_info.instance_size, - TYPE_SYSTEM_BUS, NULL, "main-system-bus"); + qbus_init(main_system_bus, system_bus_info.instance_size, + TYPE_SYSTEM_BUS, NULL, "main-system-bus"); OBJECT(main_system_bus)->free = g_free; } diff --git a/hw/core/trace-events b/hw/core/trace-events index 360ddeb2c8..9b3ecce3b2 100644 --- a/hw/core/trace-events +++ b/hw/core/trace-events @@ -34,3 +34,4 @@ clock_disconnect(const char *clk) "'%s'" clock_set(const char *clk, uint64_t old, uint64_t new) "'%s', %"PRIu64"Hz->%"PRIu64"Hz" clock_propagate(const char *clk) "'%s'" clock_update(const char *clk, const char *src, uint64_t hz, int cb) "'%s', src='%s', val=%"PRIu64"Hz cb=%d" +clock_set_mul_div(const char *clk, uint32_t oldmul, uint32_t mul, uint32_t olddiv, uint32_t div) "'%s', mul: %u -> %u, div: %u -> %u" diff --git a/hw/core/uboot_image.h b/hw/core/uboot_image.h index 608022de6e..18ac293359 100644 --- a/hw/core/uboot_image.h +++ b/hw/core/uboot_image.h @@ -1,23 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* + * (C) Copyright 2008 Semihalf + * * (C) Copyright 2000-2005 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. - * - * See file CREDITS for list of people who contributed to this - * project. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - * ******************************************************************** * NOTE: This header file defines an interface to U-Boot. Including * this (unmodified) header file in another file is considered normal @@ -31,50 +17,83 @@ /* * Operating System Codes + * + * The following are exposed to uImage header. + * New IDs *MUST* be appended at the end of the list and *NEVER* + * inserted for backward compatibility. */ -#define IH_OS_INVALID 0 /* Invalid OS */ -#define IH_OS_OPENBSD 1 /* OpenBSD */ -#define IH_OS_NETBSD 2 /* NetBSD */ -#define IH_OS_FREEBSD 3 /* FreeBSD */ -#define IH_OS_4_4BSD 4 /* 4.4BSD */ -#define IH_OS_LINUX 5 /* Linux */ -#define IH_OS_SVR4 6 /* SVR4 */ -#define IH_OS_ESIX 7 /* Esix */ -#define IH_OS_SOLARIS 8 /* Solaris */ -#define IH_OS_IRIX 9 /* Irix */ -#define IH_OS_SCO 10 /* SCO */ -#define IH_OS_DELL 11 /* Dell */ -#define IH_OS_NCR 12 /* NCR */ -#define IH_OS_LYNXOS 13 /* LynxOS */ -#define IH_OS_VXWORKS 14 /* VxWorks */ -#define IH_OS_PSOS 15 /* pSOS */ -#define IH_OS_QNX 16 /* QNX */ -#define IH_OS_U_BOOT 17 /* Firmware */ -#define IH_OS_RTEMS 18 /* RTEMS */ -#define IH_OS_ARTOS 19 /* ARTOS */ -#define IH_OS_UNITY 20 /* Unity OS */ +enum { + IH_OS_INVALID = 0, /* Invalid OS */ + IH_OS_OPENBSD, /* OpenBSD */ + IH_OS_NETBSD, /* NetBSD */ + IH_OS_FREEBSD, /* FreeBSD */ + IH_OS_4_4BSD, /* 4.4BSD */ + IH_OS_LINUX, /* Linux */ + IH_OS_SVR4, /* SVR4 */ + IH_OS_ESIX, /* Esix */ + IH_OS_SOLARIS, /* Solaris */ + IH_OS_IRIX, /* Irix */ + IH_OS_SCO, /* SCO */ + IH_OS_DELL, /* Dell */ + IH_OS_NCR, /* NCR */ + IH_OS_LYNXOS, /* LynxOS */ + IH_OS_VXWORKS, /* VxWorks */ + IH_OS_PSOS, /* pSOS */ + IH_OS_QNX, /* QNX */ + IH_OS_U_BOOT, /* Firmware */ + IH_OS_RTEMS, /* RTEMS */ + IH_OS_ARTOS, /* ARTOS */ + IH_OS_UNITY, /* Unity OS */ + IH_OS_INTEGRITY, /* INTEGRITY */ + IH_OS_OSE, /* OSE */ + IH_OS_PLAN9, /* Plan 9 */ + IH_OS_OPENRTOS, /* OpenRTOS */ + IH_OS_ARM_TRUSTED_FIRMWARE, /* ARM Trusted Firmware */ + IH_OS_TEE, /* Trusted Execution Environment */ + IH_OS_OPENSBI, /* RISC-V OpenSBI */ + IH_OS_EFI, /* EFI Firmware (e.g. GRUB2) */ + + IH_OS_COUNT, +}; /* * CPU Architecture Codes (supported by Linux) + * + * The following are exposed to uImage header. + * New IDs *MUST* be appended at the end of the list and *NEVER* + * inserted for backward compatibility. */ -#define IH_CPU_INVALID 0 /* Invalid CPU */ -#define IH_CPU_ALPHA 1 /* Alpha */ -#define IH_CPU_ARM 2 /* ARM */ -#define IH_CPU_I386 3 /* Intel x86 */ -#define IH_CPU_IA64 4 /* IA64 */ -#define IH_CPU_MIPS 5 /* MIPS */ -#define IH_CPU_MIPS64 6 /* MIPS 64 Bit */ -#define IH_CPU_PPC 7 /* PowerPC */ -#define IH_CPU_S390 8 /* IBM S390 */ -#define IH_CPU_SH 9 /* SuperH */ -#define IH_CPU_SPARC 10 /* Sparc */ -#define IH_CPU_SPARC64 11 /* Sparc 64 Bit */ -#define IH_CPU_M68K 12 /* M68K */ -#define IH_CPU_NIOS 13 /* Nios-32 */ -#define IH_CPU_MICROBLAZE 14 /* MicroBlaze */ -#define IH_CPU_NIOS2 15 /* Nios-II */ -#define IH_CPU_BLACKFIN 16 /* Blackfin */ -#define IH_CPU_AVR32 17 /* AVR32 */ +enum { + IH_ARCH_INVALID = 0, /* Invalid CPU */ + IH_ARCH_ALPHA, /* Alpha */ + IH_ARCH_ARM, /* ARM */ + IH_ARCH_I386, /* Intel x86 */ + IH_ARCH_IA64, /* IA64 */ + IH_ARCH_MIPS, /* MIPS */ + IH_ARCH_MIPS64, /* MIPS 64 Bit */ + IH_ARCH_PPC, /* PowerPC */ + IH_ARCH_S390, /* IBM S390 */ + IH_ARCH_SH, /* SuperH */ + IH_ARCH_SPARC, /* Sparc */ + IH_ARCH_SPARC64, /* Sparc 64 Bit */ + IH_ARCH_M68K, /* M68K */ + IH_ARCH_NIOS, /* Nios-32 */ + IH_ARCH_MICROBLAZE, /* MicroBlaze */ + IH_ARCH_NIOS2, /* Nios-II */ + IH_ARCH_BLACKFIN, /* Blackfin */ + IH_ARCH_AVR32, /* AVR32 */ + IH_ARCH_ST200, /* STMicroelectronics ST200 */ + IH_ARCH_SANDBOX, /* Sandbox architecture (test only) */ + IH_ARCH_NDS32, /* ANDES Technology - NDS32 */ + IH_ARCH_OPENRISC, /* OpenRISC 1000 */ + IH_ARCH_ARM64, /* ARM64 */ + IH_ARCH_ARC, /* Synopsys DesignWare ARC */ + IH_ARCH_X86_64, /* AMD x86_64, Intel and Via */ + IH_ARCH_XTENSA, /* Xtensa */ + IH_ARCH_RISCV, /* RISC-V */ + + IH_ARCH_COUNT, +}; /* * Image Types @@ -113,33 +132,85 @@ * U-Boot's command interpreter; this feature is especially * useful when you configure U-Boot to use a real shell (hush) * as command interpreter (=> Shell Scripts). + * + * The following are exposed to uImage header. + * New IDs *MUST* be appended at the end of the list and *NEVER* + * inserted for backward compatibility. */ -#define IH_TYPE_INVALID 0 /* Invalid Image */ -#define IH_TYPE_STANDALONE 1 /* Standalone Program */ -#define IH_TYPE_KERNEL 2 /* OS Kernel Image */ -#define IH_TYPE_RAMDISK 3 /* RAMDisk Image */ -#define IH_TYPE_MULTI 4 /* Multi-File Image */ -#define IH_TYPE_FIRMWARE 5 /* Firmware Image */ -#define IH_TYPE_SCRIPT 6 /* Script file */ -#define IH_TYPE_FILESYSTEM 7 /* Filesystem Image (any type) */ -#define IH_TYPE_FLATDT 8 /* Binary Flat Device Tree Blob */ -#define IH_TYPE_KERNEL_NOLOAD 14 /* OS Kernel Image (noload) */ +enum { + IH_TYPE_INVALID = 0, /* Invalid Image */ + IH_TYPE_STANDALONE, /* Standalone Program */ + IH_TYPE_KERNEL, /* OS Kernel Image */ + IH_TYPE_RAMDISK, /* RAMDisk Image */ + IH_TYPE_MULTI, /* Multi-File Image */ + IH_TYPE_FIRMWARE, /* Firmware Image */ + IH_TYPE_SCRIPT, /* Script file */ + IH_TYPE_FILESYSTEM, /* Filesystem Image (any type) */ + IH_TYPE_FLATDT, /* Binary Flat Device Tree Blob */ + IH_TYPE_KWBIMAGE, /* Kirkwood Boot Image */ + IH_TYPE_IMXIMAGE, /* Freescale IMXBoot Image */ + IH_TYPE_UBLIMAGE, /* Davinci UBL Image */ + IH_TYPE_OMAPIMAGE, /* TI OMAP Config Header Image */ + IH_TYPE_AISIMAGE, /* TI Davinci AIS Image */ + /* OS Kernel Image, can run from any load address */ + IH_TYPE_KERNEL_NOLOAD, + IH_TYPE_PBLIMAGE, /* Freescale PBL Boot Image */ + IH_TYPE_MXSIMAGE, /* Freescale MXSBoot Image */ + IH_TYPE_GPIMAGE, /* TI Keystone GPHeader Image */ + IH_TYPE_ATMELIMAGE, /* ATMEL ROM bootable Image */ + IH_TYPE_SOCFPGAIMAGE, /* Altera SOCFPGA CV/AV Preloader */ + IH_TYPE_X86_SETUP, /* x86 setup.bin Image */ + IH_TYPE_LPC32XXIMAGE, /* x86 setup.bin Image */ + IH_TYPE_LOADABLE, /* A list of typeless images */ + IH_TYPE_RKIMAGE, /* Rockchip Boot Image */ + IH_TYPE_RKSD, /* Rockchip SD card */ + IH_TYPE_RKSPI, /* Rockchip SPI image */ + IH_TYPE_ZYNQIMAGE, /* Xilinx Zynq Boot Image */ + IH_TYPE_ZYNQMPIMAGE, /* Xilinx ZynqMP Boot Image */ + IH_TYPE_ZYNQMPBIF, /* Xilinx ZynqMP Boot Image (bif) */ + IH_TYPE_FPGA, /* FPGA Image */ + IH_TYPE_VYBRIDIMAGE, /* VYBRID .vyb Image */ + IH_TYPE_TEE, /* Trusted Execution Environment OS Image */ + IH_TYPE_FIRMWARE_IVT, /* Firmware Image with HABv4 IVT */ + IH_TYPE_PMMC, /* TI Power Management Micro-Controller Firmware */ + IH_TYPE_STM32IMAGE, /* STMicroelectronics STM32 Image */ + IH_TYPE_SOCFPGAIMAGE_V1, /* Altera SOCFPGA A10 Preloader */ + IH_TYPE_MTKIMAGE, /* MediaTek BootROM loadable Image */ + IH_TYPE_IMX8MIMAGE, /* Freescale IMX8MBoot Image */ + IH_TYPE_IMX8IMAGE, /* Freescale IMX8Boot Image */ + IH_TYPE_COPRO, /* Coprocessor Image for remoteproc*/ + IH_TYPE_SUNXI_EGON, /* Allwinner eGON Boot Image */ + + IH_TYPE_COUNT, /* Number of image types */ +}; /* * Compression Types + * + * The following are exposed to uImage header. + * New IDs *MUST* be appended at the end of the list and *NEVER* + * inserted for backward compatibility. */ -#define IH_COMP_NONE 0 /* No Compression Used */ -#define IH_COMP_GZIP 1 /* gzip Compression Used */ -#define IH_COMP_BZIP2 2 /* bzip2 Compression Used */ +enum { + IH_COMP_NONE = 0, /* No Compression Used */ + IH_COMP_GZIP, /* gzip Compression Used */ + IH_COMP_BZIP2, /* bzip2 Compression Used */ + IH_COMP_LZMA, /* lzma Compression Used */ + IH_COMP_LZO, /* lzo Compression Used */ + IH_COMP_LZ4, /* lz4 Compression Used */ + IH_COMP_ZSTD, /* zstd Compression Used */ + + IH_COMP_COUNT, +}; #define IH_MAGIC 0x27051956 /* Image Magic Number */ #define IH_NMLEN 32 /* Image Name Length */ /* - * all data in network byte order (aka natural aka bigendian) + * Legacy format image header, + * all data in network byte order (aka natural aka bigendian). */ - typedef struct uboot_image_header { uint32_t ih_magic; /* Image Header Magic Number */ uint32_t ih_hcrc; /* Image Header CRC Checksum */ diff --git a/hw/cxl/Kconfig b/hw/cxl/Kconfig new file mode 100644 index 0000000000..8e67519b16 --- /dev/null +++ b/hw/cxl/Kconfig @@ -0,0 +1,3 @@ +config CXL + bool + default y if PCI_EXPRESS diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c new file mode 100644 index 0000000000..3653aa56f0 --- /dev/null +++ b/hw/cxl/cxl-cdat.c @@ -0,0 +1,224 @@ +/* + * CXL CDAT Structure + * + * Copyright (C) 2021 Avery Design Systems, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/cxl/cxl.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + +static void cdat_len_check(CDATSubHeader *hdr, Error **errp) +{ + assert(hdr->length); + assert(hdr->reserved == 0); + + switch (hdr->type) { + case CDAT_TYPE_DSMAS: + assert(hdr->length == sizeof(CDATDsmas)); + break; + case CDAT_TYPE_DSLBIS: + assert(hdr->length == sizeof(CDATDslbis)); + break; + case CDAT_TYPE_DSMSCIS: + assert(hdr->length == sizeof(CDATDsmscis)); + break; + case CDAT_TYPE_DSIS: + assert(hdr->length == sizeof(CDATDsis)); + break; + case CDAT_TYPE_DSEMTS: + assert(hdr->length == sizeof(CDATDsemts)); + break; + case CDAT_TYPE_SSLBIS: + assert(hdr->length >= sizeof(CDATSslbisHeader)); + assert((hdr->length - sizeof(CDATSslbisHeader)) % + sizeof(CDATSslbe) == 0); + break; + default: + error_setg(errp, "Type %d is reserved", hdr->type); + } +} + +static void ct3_build_cdat(CDATObject *cdat, Error **errp) +{ + g_autofree CDATTableHeader *cdat_header = NULL; + g_autofree CDATEntry *cdat_st = NULL; + uint8_t sum = 0; + int ent, i; + + /* Use default table if fopen == NULL */ + assert(cdat->build_cdat_table); + + cdat_header = g_malloc0(sizeof(*cdat_header)); + if (!cdat_header) { + error_setg(errp, "Failed to allocate CDAT header"); + return; + } + + cdat->built_buf_len = cdat->build_cdat_table(&cdat->built_buf, cdat->private); + + if (!cdat->built_buf_len) { + /* Build later as not all data available yet */ + cdat->to_update = true; + return; + } + cdat->to_update = false; + + cdat_st = g_malloc0(sizeof(*cdat_st) * (cdat->built_buf_len + 1)); + if (!cdat_st) { + error_setg(errp, "Failed to allocate CDAT entry array"); + return; + } + + /* Entry 0 for CDAT header, starts with Entry 1 */ + for (ent = 1; ent < cdat->built_buf_len + 1; ent++) { + CDATSubHeader *hdr = cdat->built_buf[ent - 1]; + uint8_t *buf = (uint8_t *)cdat->built_buf[ent - 1]; + + cdat_st[ent].base = hdr; + cdat_st[ent].length = hdr->length; + + cdat_header->length += hdr->length; + for (i = 0; i < hdr->length; i++) { + sum += buf[i]; + } + } + + /* CDAT header */ + cdat_header->revision = CXL_CDAT_REV; + /* For now, no runtime updates */ + cdat_header->sequence = 0; + cdat_header->length += sizeof(CDATTableHeader); + sum += cdat_header->revision + cdat_header->sequence + + cdat_header->length; + /* Sum of all bytes including checksum must be 0 */ + cdat_header->checksum = ~sum + 1; + + cdat_st[0].base = g_steal_pointer(&cdat_header); + cdat_st[0].length = sizeof(*cdat_header); + cdat->entry_len = 1 + cdat->built_buf_len; + cdat->entry = g_steal_pointer(&cdat_st); +} + +static void ct3_load_cdat(CDATObject *cdat, Error **errp) +{ + g_autofree CDATEntry *cdat_st = NULL; + uint8_t sum = 0; + int num_ent; + int i = 0, ent = 1, file_size = 0; + CDATSubHeader *hdr; + FILE *fp = NULL; + + /* Read CDAT file and create its cache */ + fp = fopen(cdat->filename, "r"); + if (!fp) { + error_setg(errp, "CDAT: Unable to open file"); + return; + } + + fseek(fp, 0, SEEK_END); + file_size = ftell(fp); + fseek(fp, 0, SEEK_SET); + cdat->buf = g_malloc0(file_size); + + if (fread(cdat->buf, file_size, 1, fp) == 0) { + error_setg(errp, "CDAT: File read failed"); + return; + } + + fclose(fp); + + if (file_size < sizeof(CDATTableHeader)) { + error_setg(errp, "CDAT: File too short"); + return; + } + i = sizeof(CDATTableHeader); + num_ent = 1; + while (i < file_size) { + hdr = (CDATSubHeader *)(cdat->buf + i); + cdat_len_check(hdr, errp); + i += hdr->length; + num_ent++; + } + if (i != file_size) { + error_setg(errp, "CDAT: File length missmatch"); + return; + } + + cdat_st = g_malloc0(sizeof(*cdat_st) * num_ent); + if (!cdat_st) { + error_setg(errp, "CDAT: Failed to allocate entry array"); + return; + } + + /* Set CDAT header, Entry = 0 */ + cdat_st[0].base = cdat->buf; + cdat_st[0].length = sizeof(CDATTableHeader); + i = 0; + + while (i < cdat_st[0].length) { + sum += cdat->buf[i++]; + } + + /* Read CDAT structures */ + while (i < file_size) { + hdr = (CDATSubHeader *)(cdat->buf + i); + cdat_len_check(hdr, errp); + + cdat_st[ent].base = hdr; + cdat_st[ent].length = hdr->length; + + while (cdat->buf + i < + (uint8_t *)cdat_st[ent].base + cdat_st[ent].length) { + assert(i < file_size); + sum += cdat->buf[i++]; + } + + ent++; + } + + if (sum != 0) { + warn_report("CDAT: Found checksum mismatch in %s", cdat->filename); + } + cdat->entry_len = num_ent; + cdat->entry = g_steal_pointer(&cdat_st); +} + +void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp) +{ + CDATObject *cdat = &cxl_cstate->cdat; + + if (cdat->filename) { + ct3_load_cdat(cdat, errp); + } else { + ct3_build_cdat(cdat, errp); + } +} + +void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp) +{ + CDATObject *cdat = &cxl_cstate->cdat; + + if (cdat->to_update) { + ct3_build_cdat(cdat, errp); + } +} + +void cxl_doe_cdat_release(CXLComponentState *cxl_cstate) +{ + CDATObject *cdat = &cxl_cstate->cdat; + + free(cdat->entry); + if (cdat->built_buf) { + cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len, + cdat->private); + } + if (cdat->buf) { + free(cdat->buf); + } +} diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c new file mode 100644 index 0000000000..3edd303a33 --- /dev/null +++ b/hw/cxl/cxl-component-utils.c @@ -0,0 +1,405 @@ +/* + * CXL Utility library for components + * + * Copyright(C) 2020 Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/pci/pci.h" +#include "hw/cxl/cxl.h" + +static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset, + unsigned size) +{ + CXLComponentState *cxl_cstate = opaque; + ComponentRegisters *cregs = &cxl_cstate->crb; + + if (size == 8) { + qemu_log_mask(LOG_UNIMP, + "CXL 8 byte cache mem registers not implemented\n"); + return 0; + } + + if (cregs->special_ops && cregs->special_ops->read) { + return cregs->special_ops->read(cxl_cstate, offset, size); + } else { + return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; + } +} + +static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset, + uint32_t value) +{ + ComponentRegisters *cregs = &cxl_cstate->crb; + uint32_t *cache_mem = cregs->cache_mem_registers; + bool should_commit = false; + + switch (offset) { + case A_CXL_HDM_DECODER0_CTRL: + should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + break; + default: + break; + } + + memory_region_transaction_begin(); + stl_le_p((uint8_t *)cache_mem + offset, value); + if (should_commit) { + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + } + memory_region_transaction_commit(); +} + +static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CXLComponentState *cxl_cstate = opaque; + ComponentRegisters *cregs = &cxl_cstate->crb; + uint32_t mask; + + if (size == 8) { + qemu_log_mask(LOG_UNIMP, + "CXL 8 byte cache mem registers not implemented\n"); + return; + } + mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)]; + value &= mask; + /* RO bits should remain constant. Done by reading existing value */ + value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; + if (cregs->special_ops && cregs->special_ops->write) { + cregs->special_ops->write(cxl_cstate, offset, value, size); + return; + } + + if (offset >= A_CXL_HDM_DECODER_CAPABILITY && + offset <= A_CXL_HDM_DECODER0_TARGET_LIST_HI) { + dumb_hdm_handler(cxl_cstate, offset, value); + } else { + cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value; + } +} + +/* + * 8.2.3 + * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0 + * Component Registers. + * + * 8.2.2 + * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial + * reads are not permitted. + * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial + * reads are not permitted. + * + * As of the spec defined today, only 4 byte registers exist. + */ +static const MemoryRegionOps cache_mem_ops = { + .read = cxl_cache_mem_read_reg, + .write = cxl_cache_mem_write_reg, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +void cxl_component_register_block_init(Object *obj, + CXLComponentState *cxl_cstate, + const char *type) +{ + ComponentRegisters *cregs = &cxl_cstate->crb; + + memory_region_init(&cregs->component_registers, obj, type, + CXL2_COMPONENT_BLOCK_SIZE); + + /* io registers controls link which we don't care about in QEMU */ + memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io", + CXL2_COMPONENT_IO_REGION_SIZE); + memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs, + ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE); + + memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io); + memory_region_add_subregion(&cregs->component_registers, + CXL2_COMPONENT_IO_REGION_SIZE, + &cregs->cache_mem); +} + +static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk) +{ + /* + * Error status is RW1C but given bits are not yet set, it can + * be handled as RO. + */ + reg_state[R_CXL_RAS_UNC_ERR_STATUS] = 0; + /* Bits 12-13 and 17-31 reserved in CXL 2.0 */ + reg_state[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; + write_msk[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; + reg_state[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; + write_msk[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; + reg_state[R_CXL_RAS_COR_ERR_STATUS] = 0; + reg_state[R_CXL_RAS_COR_ERR_MASK] = 0x7f; + write_msk[R_CXL_RAS_COR_ERR_MASK] = 0x7f; + /* CXL switches and devices must set */ + reg_state[R_CXL_RAS_ERR_CAP_CTRL] = 0x00; +} + +static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, + enum reg_type type) +{ + int decoder_count = 1; + int i; + + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, + cxl_decoder_count_enc(decoder_count)); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, + HDM_DECODER_ENABLE, 0); + write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; + for (i = 0; i < decoder_count; i++) { + write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000; + write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff; + write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000; + write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff; + write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff; + if (type == CXL2_DEVICE || + type == CXL2_TYPE3_DEVICE || + type == CXL2_LOGICAL_DEVICE) { + write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xf0000000; + } else { + write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xffffffff; + } + write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * 0x20] = 0xffffffff; + } +} + +void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk, + enum reg_type type) +{ + int caps = 0; + + /* + * In CXL 2.0 the capabilities required for each CXL component are such that, + * with the ordering chosen here, a single number can be used to define + * which capabilities should be provided. + */ + switch (type) { + case CXL2_DOWNSTREAM_PORT: + case CXL2_DEVICE: + /* RAS, Link */ + caps = 2; + break; + case CXL2_UPSTREAM_PORT: + case CXL2_TYPE3_DEVICE: + case CXL2_LOGICAL_DEVICE: + /* + HDM */ + caps = 3; + break; + case CXL2_ROOT_PORT: + /* + Extended Security, + Snoop */ + caps = 5; + break; + default: + abort(); + } + + memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE); + + /* CXL Capability Header Register */ + ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1); + ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1); + ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1); + ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); + +#define init_cap_reg(reg, id, version) \ + QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \ + do { \ + int which = R_CXL_##reg##_CAPABILITY_HEADER; \ + reg_state[which] = FIELD_DP32(reg_state[which], \ + CXL_##reg##_CAPABILITY_HEADER, ID, id); \ + reg_state[which] = \ + FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \ + VERSION, version); \ + reg_state[which] = \ + FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \ + CXL_##reg##_REGISTERS_OFFSET); \ + } while (0) + + init_cap_reg(RAS, 2, 2); + ras_init_common(reg_state, write_msk); + + init_cap_reg(LINK, 4, 2); + + if (caps < 3) { + return; + } + + init_cap_reg(HDM, 5, 1); + hdm_init_common(reg_state, write_msk, type); + + if (caps < 5) { + return; + } + + init_cap_reg(EXTSEC, 6, 1); + init_cap_reg(SNOOP, 8, 1); + +#undef init_cap_reg +} + +/* + * Helper to creates a DVSEC header for a CXL entity. The caller is responsible + * for tracking the valid offset. + * + * This function will build the DVSEC header on behalf of the caller and then + * copy in the remaining data for the vendor specific bits. + * It will also set up appropriate write masks. + */ +void cxl_component_create_dvsec(CXLComponentState *cxl, + enum reg_type cxl_dev_type, uint16_t length, + uint16_t type, uint8_t rev, uint8_t *body) +{ + PCIDevice *pdev = cxl->pdev; + uint16_t offset = cxl->dvsec_offset; + uint8_t *wmask = pdev->wmask; + + assert(offset >= PCI_CFG_SPACE_SIZE && + ((offset + length) < PCI_CFG_SPACE_EXP_SIZE)); + assert((length & 0xf000) == 0); + assert((rev & ~0xf) == 0); + + /* Create the DVSEC in the MCFG space */ + pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length); + pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET, + (length << 20) | (rev << 16) | CXL_VENDOR_ID); + pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type); + memcpy(pdev->config + offset + sizeof(DVSECHeader), + body + sizeof(DVSECHeader), + length - sizeof(DVSECHeader)); + + /* Configure write masks */ + switch (type) { + case PCIE_CXL_DEVICE_DVSEC: + /* Cntrl RW Lock - so needs explicit blocking when lock is set */ + wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD; + wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F; + /* Status is RW1CS */ + wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F; + /* Lock is RW Once */ + wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01; + /* range1/2_base_high/low is RW Lock */ + wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0; + wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF; + wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0; + break; + case NON_CXL_FUNCTION_MAP_DVSEC: + break; /* Not yet implemented */ + case EXTENSIONS_PORT_DVSEC: + wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F; + wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF; + break; + case GPF_PORT_DVSEC: + wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F; + wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F; + wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F; + wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F; + break; + case GPF_DEVICE_DVSEC: + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F; + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F; + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF; + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF; + wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF; + break; + case PCIE_FLEXBUS_PORT_DVSEC: + switch (cxl_dev_type) { + case CXL2_ROOT_PORT: + /* No MLD */ + wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd; + break; + case CXL2_DOWNSTREAM_PORT: + wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd; + break; + default: /* Registers are RO for other component types */ + break; + } + /* There are rw1cs bits in the status register but never set currently */ + break; + } + + /* Update state for future DVSEC additions */ + range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length); + cxl->dvsec_offset += length; +} + +uint8_t cxl_interleave_ways_enc(int iw, Error **errp) +{ + switch (iw) { + case 1: return 0x0; + case 2: return 0x1; + case 4: return 0x2; + case 8: return 0x3; + case 16: return 0x4; + case 3: return 0x8; + case 6: return 0x9; + case 12: return 0xa; + default: + error_setg(errp, "Interleave ways: %d not supported", iw); + return 0; + } +} + +uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp) +{ + switch (gran) { + case 256: return 0; + case 512: return 1; + case 1024: return 2; + case 2048: return 3; + case 4096: return 4; + case 8192: return 5; + case 16384: return 6; + default: + error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran); + return 0; + } +} diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c new file mode 100644 index 0000000000..83ce7a8270 --- /dev/null +++ b/hw/cxl/cxl-device-utils.c @@ -0,0 +1,271 @@ +/* + * CXL Utility library for devices + * + * Copyright(C) 2020 Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/cxl/cxl.h" + +/* + * Device registers have no restrictions per the spec, and so fall back to the + * default memory mapped register rules in 8.2: + * Software shall use CXL.io Memory Read and Write to access memory mapped + * register defined in this section. Unless otherwise specified, software + * shall restrict the accesses width based on the following: + * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes + * quantity. + * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8 + * Bytes + * • The address shall be a multiple of the access width, e.g. when + * accessing a register as a 4 Byte quantity, the address shall be + * multiple of 4. + * • The accesses shall map to contiguous bytes.If these rules are not + * followed, the behavior is undefined + */ + +static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size) +{ + CXLDeviceState *cxl_dstate = opaque; + + if (size == 4) { + return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)]; + } else { + return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)]; + } +} + +static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size) +{ + return 0; +} + +static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) +{ + CXLDeviceState *cxl_dstate = opaque; + + switch (size) { + case 1: + return cxl_dstate->mbox_reg_state[offset]; + case 2: + return cxl_dstate->mbox_reg_state16[offset / size]; + case 4: + return cxl_dstate->mbox_reg_state32[offset / size]; + case 8: + return cxl_dstate->mbox_reg_state64[offset / size]; + default: + g_assert_not_reached(); + } +} + +static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset, + uint64_t value) +{ + switch (offset) { + case A_CXL_DEV_MAILBOX_CTRL: + /* fallthrough */ + case A_CXL_DEV_MAILBOX_CAP: + /* RO register */ + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n", + __func__, offset); + return; + } + + reg_state[offset / sizeof(*reg_state)] = value; +} + +static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, + uint64_t value) +{ + switch (offset) { + case A_CXL_DEV_MAILBOX_CMD: + break; + case A_CXL_DEV_BG_CMD_STS: + /* BG not supported */ + /* fallthrough */ + case A_CXL_DEV_MAILBOX_STS: + /* Read only register, will get updated by the state machine */ + return; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n", + __func__, offset); + return; + } + + + reg_state[offset / sizeof(*reg_state)] = value; +} + +static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CXLDeviceState *cxl_dstate = opaque; + + if (offset >= A_CXL_DEV_CMD_PAYLOAD) { + memcpy(cxl_dstate->mbox_reg_state + offset, &value, size); + return; + } + + switch (size) { + case 4: + mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value); + break; + case 8: + mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value); + break; + default: + g_assert_not_reached(); + } + + if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, + DOORBELL)) { + cxl_process_mailbox(cxl_dstate); + } +} + +static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size) +{ + uint64_t retval = 0; + + retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1); + retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1); + + return retval; +} + +static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + /* Many register sets are read only */ +} + +static const MemoryRegionOps mdev_ops = { + .read = mdev_reg_read, + .write = ro_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps mailbox_ops = { + .read = mailbox_reg_read, + .write = mailbox_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps dev_ops = { + .read = dev_reg_read, + .write = ro_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps caps_ops = { + .read = caps_reg_read, + .write = ro_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate) +{ + /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */ + memory_region_init(&cxl_dstate->device_registers, obj, "device-registers", + pow2ceil(CXL_MMIO_SIZE)); + + memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate, + "cap-array", CXL_CAPS_SIZE); + memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate, + "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH); + memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate, + "mailbox", CXL_MAILBOX_REGISTERS_LENGTH); + memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops, + cxl_dstate, "memory device caps", + CXL_MEMORY_DEVICE_REGISTERS_LENGTH); + + memory_region_add_subregion(&cxl_dstate->device_registers, 0, + &cxl_dstate->caps); + memory_region_add_subregion(&cxl_dstate->device_registers, + CXL_DEVICE_STATUS_REGISTERS_OFFSET, + &cxl_dstate->device); + memory_region_add_subregion(&cxl_dstate->device_registers, + CXL_MAILBOX_REGISTERS_OFFSET, + &cxl_dstate->mailbox); + memory_region_add_subregion(&cxl_dstate->device_registers, + CXL_MEMORY_DEVICE_REGISTERS_OFFSET, + &cxl_dstate->memory_device); +} + +static void device_reg_init_common(CXLDeviceState *cxl_dstate) { } + +static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) +{ + /* 2048 payload size, with no interrupt or background support */ + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, + PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); + cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE; +} + +static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { } + +void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) +{ + uint64_t *cap_hdrs = cxl_dstate->caps_reg_state64; + const int cap_count = 3; + + /* CXL Device Capabilities Array Register */ + ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_ID, 0); + ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); + ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); + + cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1); + device_reg_init_common(cxl_dstate); + + cxl_device_cap_init(cxl_dstate, MAILBOX, 2); + mailbox_reg_init_common(cxl_dstate); + + cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000); + memdev_reg_init_common(cxl_dstate); + + assert(cxl_initialize_mailbox(cxl_dstate) == 0); +} diff --git a/hw/cxl/cxl-host-stubs.c b/hw/cxl/cxl-host-stubs.c new file mode 100644 index 0000000000..cae4afcdde --- /dev/null +++ b/hw/cxl/cxl-host-stubs.c @@ -0,0 +1,15 @@ +/* + * CXL host parameter parsing routine stubs + * + * Copyright (c) 2022 Huawei + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/cxl/cxl.h" +#include "hw/cxl/cxl_host.h" + +void cxl_fmws_link_targets(CXLState *stat, Error **errp) {}; +void cxl_machine_init(Object *obj, CXLState *state) {}; +void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) {}; + +const MemoryRegionOps cfmws_ops; diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c new file mode 100644 index 0000000000..1adf61231a --- /dev/null +++ b/hw/cxl/cxl-host.c @@ -0,0 +1,340 @@ +/* + * CXL host parameter parsing routines + * + * Copyright (c) 2022 Huawei + * Modeled loosely on the NUMA options handling in hw/core/numa.c + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/bitmap.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "sysemu/qtest.h" +#include "hw/boards.h" + +#include "qapi/qapi-visit-machine.h" +#include "hw/cxl/cxl.h" +#include "hw/cxl/cxl_host.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/pci-bridge/pci_expander_bridge.h" + +static void cxl_fixed_memory_window_config(CXLState *cxl_state, + CXLFixedMemoryWindowOptions *object, + Error **errp) +{ + g_autofree CXLFixedWindow *fw = g_malloc0(sizeof(*fw)); + strList *target; + int i; + + for (target = object->targets; target; target = target->next) { + fw->num_targets++; + } + + fw->enc_int_ways = cxl_interleave_ways_enc(fw->num_targets, errp); + if (*errp) { + return; + } + + fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets)); + for (i = 0, target = object->targets; target; i++, target = target->next) { + /* This link cannot be resolved yet, so stash the name for now */ + fw->targets[i] = g_strdup(target->value); + } + + if (object->size % (256 * MiB)) { + error_setg(errp, + "Size of a CXL fixed memory window must my a multiple of 256MiB"); + return; + } + fw->size = object->size; + + if (object->has_interleave_granularity) { + fw->enc_int_gran = + cxl_interleave_granularity_enc(object->interleave_granularity, + errp); + if (*errp) { + return; + } + } else { + /* Default to 256 byte interleave */ + fw->enc_int_gran = 0; + } + + cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows, + g_steal_pointer(&fw)); + + return; +} + +void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp) +{ + if (cxl_state && cxl_state->fixed_windows) { + GList *it; + + for (it = cxl_state->fixed_windows; it; it = it->next) { + CXLFixedWindow *fw = it->data; + int i; + + for (i = 0; i < fw->num_targets; i++) { + Object *o; + bool ambig; + + o = object_resolve_path_type(fw->targets[i], + TYPE_PXB_CXL_DEVICE, + &ambig); + if (!o) { + error_setg(errp, "Could not resolve CXLFM target %s", + fw->targets[i]); + return; + } + fw->target_hbs[i] = PXB_CXL_DEV(o); + } + } + } +} + +/* TODO: support, multiple hdm decoders */ +static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr, + uint8_t *target) +{ + uint32_t ctrl; + uint32_t ig_enc; + uint32_t iw_enc; + uint32_t target_idx; + + ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL]; + if (!FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) { + return false; + } + + ig_enc = FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, IG); + iw_enc = FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, IW); + target_idx = (addr / cxl_decode_ig(ig_enc)) % (1 << iw_enc); + + if (target_idx < 4) { + *target = extract32(cache_mem[R_CXL_HDM_DECODER0_TARGET_LIST_LO], + target_idx * 8, 8); + } else { + *target = extract32(cache_mem[R_CXL_HDM_DECODER0_TARGET_LIST_HI], + (target_idx - 4) * 8, 8); + } + + return true; +} + +static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr) +{ + CXLComponentState *hb_cstate, *usp_cstate; + PCIHostState *hb; + CXLUpstreamPort *usp; + int rb_index; + uint32_t *cache_mem; + uint8_t target; + bool target_found; + PCIDevice *rp, *d; + + /* Address is relative to memory region. Convert to HPA */ + addr += fw->base; + + rb_index = (addr / cxl_decode_ig(fw->enc_int_gran)) % fw->num_targets; + hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl.cxl_host_bridge); + if (!hb || !hb->bus || !pci_bus_is_cxl(hb->bus)) { + return NULL; + } + + hb_cstate = cxl_get_hb_cstate(hb); + if (!hb_cstate) { + return NULL; + } + + cache_mem = hb_cstate->crb.cache_mem_registers; + + target_found = cxl_hdm_find_target(cache_mem, addr, &target); + if (!target_found) { + return NULL; + } + + rp = pcie_find_port_by_pn(hb->bus, target); + if (!rp) { + return NULL; + } + + d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0]; + if (!d) { + return NULL; + } + + if (object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) { + return d; + } + + /* + * Could also be a switch. Note only one level of switching currently + * supported. + */ + if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_USP)) { + return NULL; + } + usp = CXL_USP(d); + + usp_cstate = cxl_usp_to_cstate(usp); + if (!usp_cstate) { + return NULL; + } + + cache_mem = usp_cstate->crb.cache_mem_registers; + + target_found = cxl_hdm_find_target(cache_mem, addr, &target); + if (!target_found) { + return NULL; + } + + d = pcie_find_port_by_pn(&PCI_BRIDGE(d)->sec_bus, target); + if (!d) { + return NULL; + } + + d = pci_bridge_get_sec_bus(PCI_BRIDGE(d))->devices[0]; + if (!d) { + return NULL; + } + + if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) { + return NULL; + } + + return d; +} + +static MemTxResult cxl_read_cfmws(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + CXLFixedWindow *fw = opaque; + PCIDevice *d; + + d = cxl_cfmws_find_device(fw, addr); + if (d == NULL) { + *data = 0; + /* Reads to invalid address return poison */ + return MEMTX_ERROR; + } + + return cxl_type3_read(d, addr + fw->base, data, size, attrs); +} + +static MemTxResult cxl_write_cfmws(void *opaque, hwaddr addr, + uint64_t data, unsigned size, + MemTxAttrs attrs) +{ + CXLFixedWindow *fw = opaque; + PCIDevice *d; + + d = cxl_cfmws_find_device(fw, addr); + if (d == NULL) { + /* Writes to invalid address are silent */ + return MEMTX_OK; + } + + return cxl_type3_write(d, addr + fw->base, data, size, attrs); +} + +const MemoryRegionOps cfmws_ops = { + .read_with_attrs = cxl_read_cfmws, + .write_with_attrs = cxl_write_cfmws, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, +}; + +static void machine_get_cxl(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CXLState *cxl_state = opaque; + bool value = cxl_state->is_enabled; + + visit_type_bool(v, name, &value, errp); +} + +static void machine_set_cxl(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CXLState *cxl_state = opaque; + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + cxl_state->is_enabled = value; +} + +static void machine_get_cfmw(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CXLFixedMemoryWindowOptionsList **list = opaque; + + visit_type_CXLFixedMemoryWindowOptionsList(v, name, list, errp); +} + +static void machine_set_cfmw(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CXLState *state = opaque; + CXLFixedMemoryWindowOptionsList *cfmw_list = NULL; + CXLFixedMemoryWindowOptionsList *it; + + visit_type_CXLFixedMemoryWindowOptionsList(v, name, &cfmw_list, errp); + if (!cfmw_list) { + return; + } + + for (it = cfmw_list; it; it = it->next) { + cxl_fixed_memory_window_config(state, it->value, errp); + } + state->cfmw_list = cfmw_list; +} + +void cxl_machine_init(Object *obj, CXLState *state) +{ + object_property_add(obj, "cxl", "bool", machine_get_cxl, + machine_set_cxl, NULL, state); + object_property_set_description(obj, "cxl", + "Set on/off to enable/disable " + "CXL instantiation"); + + object_property_add(obj, "cxl-fmw", "CXLFixedMemoryWindow", + machine_get_cfmw, machine_set_cfmw, + NULL, state); + object_property_set_description(obj, "cxl-fmw", + "CXL Fixed Memory Windows (array)"); +} + +void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) +{ + /* Walk the pci busses looking for pxb busses to hook up */ + if (bus) { + QLIST_FOREACH(bus, &bus->child, sibling) { + if (!pci_bus_is_root(bus)) { + continue; + } + if (pci_bus_is_cxl(bus)) { + if (!state->is_enabled) { + error_setg(errp, "CXL host bridges present, but cxl=off"); + return; + } + pxb_cxl_hook_up_registers(state, bus, errp); + } + } + } +} diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c new file mode 100644 index 0000000000..bc1bb18844 --- /dev/null +++ b/hw/cxl/cxl-mailbox-utils.c @@ -0,0 +1,478 @@ +/* + * CXL Utility library for mailbox interface + * + * Copyright(C) 2020 Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/cxl/cxl.h" +#include "hw/pci/pci.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/uuid.h" + +/* + * How to add a new command, example. The command set FOO, with cmd BAR. + * 1. Add the command set and cmd to the enum. + * FOO = 0x7f, + * #define BAR 0 + * 2. Implement the handler + * static ret_code cmd_foo_bar(struct cxl_cmd *cmd, + * CXLDeviceState *cxl_dstate, uint16_t *len) + * 3. Add the command to the cxl_cmd_set[][] + * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, + * 4. Implement your handler + * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } + * + * + * Writing the handler: + * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the + * in/out length of the payload. The handler is responsible for consuming the + * payload from cmd->payload and operating upon it as necessary. It must then + * fill the output data into cmd->payload (overwriting what was there), + * setting the length, and returning a valid return code. + * + * XXX: The handler need not worry about endianess. The payload is read out of + * a register interface that already deals with it. + */ + +enum { + EVENTS = 0x01, + #define GET_RECORDS 0x0 + #define CLEAR_RECORDS 0x1 + #define GET_INTERRUPT_POLICY 0x2 + #define SET_INTERRUPT_POLICY 0x3 + FIRMWARE_UPDATE = 0x02, + #define GET_INFO 0x0 + TIMESTAMP = 0x03, + #define GET 0x0 + #define SET 0x1 + LOGS = 0x04, + #define GET_SUPPORTED 0x0 + #define GET_LOG 0x1 + IDENTIFY = 0x40, + #define MEMORY_DEVICE 0x0 + CCLS = 0x41, + #define GET_PARTITION_INFO 0x0 + #define GET_LSA 0x2 + #define SET_LSA 0x3 +}; + +/* 8.2.8.4.5.1 Command Return Codes */ +typedef enum { + CXL_MBOX_SUCCESS = 0x0, + CXL_MBOX_BG_STARTED = 0x1, + CXL_MBOX_INVALID_INPUT = 0x2, + CXL_MBOX_UNSUPPORTED = 0x3, + CXL_MBOX_INTERNAL_ERROR = 0x4, + CXL_MBOX_RETRY_REQUIRED = 0x5, + CXL_MBOX_BUSY = 0x6, + CXL_MBOX_MEDIA_DISABLED = 0x7, + CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8, + CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9, + CXL_MBOX_FW_AUTH_FAILED = 0xa, + CXL_MBOX_FW_INVALID_SLOT = 0xb, + CXL_MBOX_FW_ROLLEDBACK = 0xc, + CXL_MBOX_FW_REST_REQD = 0xd, + CXL_MBOX_INVALID_HANDLE = 0xe, + CXL_MBOX_INVALID_PA = 0xf, + CXL_MBOX_INJECT_POISON_LIMIT = 0x10, + CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11, + CXL_MBOX_ABORTED = 0x12, + CXL_MBOX_INVALID_SECURITY_STATE = 0x13, + CXL_MBOX_INCORRECT_PASSPHRASE = 0x14, + CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, + CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, + CXL_MBOX_MAX = 0x17 +} ret_code; + +struct cxl_cmd; +typedef ret_code (*opcode_handler)(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, uint16_t *len); +struct cxl_cmd { + const char *name; + opcode_handler handler; + ssize_t in; + uint16_t effect; /* Reported in CEL */ + uint8_t *payload; +}; + +#define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \ + uint16_t __zero##name = size; \ + static ret_code cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ + { \ + *len = __zero##name; \ + memset(cmd->payload, 0, *len); \ + return CXL_MBOX_SUCCESS; \ + } +#define DEFINE_MAILBOX_HANDLER_NOP(name) \ + static ret_code cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ + { \ + return CXL_MBOX_SUCCESS; \ + } + +DEFINE_MAILBOX_HANDLER_ZEROED(events_get_records, 0x20); +DEFINE_MAILBOX_HANDLER_NOP(events_clear_records); +DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4); +DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy); + +/* 8.2.9.2.1 */ +static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + uint8_t slots_supported; + uint8_t slot_info; + uint8_t caps; + uint8_t rsvd[0xd]; + char fw_rev1[0x10]; + char fw_rev2[0x10]; + char fw_rev3[0x10]; + char fw_rev4[0x10]; + } QEMU_PACKED *fw_info; + QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); + + if (cxl_dstate->pmem_size < (256 << 20)) { + return CXL_MBOX_INTERNAL_ERROR; + } + + fw_info = (void *)cmd->payload; + memset(fw_info, 0, sizeof(*fw_info)); + + fw_info->slots_supported = 2; + fw_info->slot_info = BIT(0) | BIT(3); + fw_info->caps = 0; + pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); + + *len = sizeof(*fw_info); + return CXL_MBOX_SUCCESS; +} + +/* 8.2.9.3.1 */ +static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + uint64_t time, delta; + uint64_t final_time = 0; + + if (cxl_dstate->timestamp.set) { + /* First find the delta from the last time the host set the time. */ + time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + delta = time - cxl_dstate->timestamp.last_set; + final_time = cxl_dstate->timestamp.host_set + delta; + } + + /* Then adjust the actual time */ + stq_le_p(cmd->payload, final_time); + *len = 8; + + return CXL_MBOX_SUCCESS; +} + +/* 8.2.9.3.2 */ +static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + cxl_dstate->timestamp.set = true; + cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)cmd->payload); + + *len = 0; + return CXL_MBOX_SUCCESS; +} + +static QemuUUID cel_uuid; + +/* 8.2.9.4.1 */ +static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + uint16_t entries; + uint8_t rsvd[6]; + struct { + QemuUUID uuid; + uint32_t size; + } log_entries[1]; + } QEMU_PACKED *supported_logs = (void *)cmd->payload; + QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); + + supported_logs->entries = 1; + supported_logs->log_entries[0].uuid = cel_uuid; + supported_logs->log_entries[0].size = 4 * cxl_dstate->cel_size; + + *len = sizeof(*supported_logs); + return CXL_MBOX_SUCCESS; +} + +/* 8.2.9.4.2 */ +static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + QemuUUID uuid; + uint32_t offset; + uint32_t length; + } QEMU_PACKED QEMU_ALIGNED(16) *get_log = (void *)cmd->payload; + + /* + * 8.2.9.4.2 + * The device shall return Invalid Parameter if the Offset or Length + * fields attempt to access beyond the size of the log as reported by Get + * Supported Logs. + * + * XXX: Spec is wrong, "Invalid Parameter" isn't a thing. + * XXX: Spec doesn't address incorrect UUID incorrectness. + * + * The CEL buffer is large enough to fit all commands in the emulation, so + * the only possible failure would be if the mailbox itself isn't big + * enough. + */ + if (get_log->offset + get_log->length > cxl_dstate->payload_size) { + return CXL_MBOX_INVALID_INPUT; + } + + if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { + return CXL_MBOX_UNSUPPORTED; + } + + /* Store off everything to local variables so we can wipe out the payload */ + *len = get_log->length; + + memmove(cmd->payload, cxl_dstate->cel_log + get_log->offset, + get_log->length); + + return CXL_MBOX_SUCCESS; +} + +/* 8.2.9.5.1.1 */ +static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + char fw_revision[0x10]; + uint64_t total_capacity; + uint64_t volatile_capacity; + uint64_t persistent_capacity; + uint64_t partition_align; + uint16_t info_event_log_size; + uint16_t warning_event_log_size; + uint16_t failure_event_log_size; + uint16_t fatal_event_log_size; + uint32_t lsa_size; + uint8_t poison_list_max_mer[3]; + uint16_t inject_poison_limit; + uint8_t poison_caps; + uint8_t qos_telemetry_caps; + } QEMU_PACKED *id; + QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43); + + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); + uint64_t size = cxl_dstate->pmem_size; + + if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + return CXL_MBOX_INTERNAL_ERROR; + } + + id = (void *)cmd->payload; + memset(id, 0, sizeof(*id)); + + /* PMEM only */ + snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); + + id->total_capacity = size / (256 << 20); + id->persistent_capacity = size / (256 << 20); + id->lsa_size = cvc->get_lsa_size(ct3d); + + *len = sizeof(*id); + return CXL_MBOX_SUCCESS; +} + +static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + uint64_t active_vmem; + uint64_t active_pmem; + uint64_t next_vmem; + uint64_t next_pmem; + } QEMU_PACKED *part_info = (void *)cmd->payload; + QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); + uint64_t size = cxl_dstate->pmem_size; + + if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + return CXL_MBOX_INTERNAL_ERROR; + } + + /* PMEM only */ + part_info->active_vmem = 0; + part_info->next_vmem = 0; + part_info->active_pmem = size / (256 << 20); + part_info->next_pmem = 0; + + *len = sizeof(*part_info); + return CXL_MBOX_SUCCESS; +} + +static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct { + uint32_t offset; + uint32_t length; + } QEMU_PACKED *get_lsa; + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); + uint32_t offset, length; + + get_lsa = (void *)cmd->payload; + offset = get_lsa->offset; + length = get_lsa->length; + + if (offset + length > cvc->get_lsa_size(ct3d)) { + *len = 0; + return CXL_MBOX_INVALID_INPUT; + } + + *len = cvc->get_lsa(ct3d, get_lsa, length, offset); + return CXL_MBOX_SUCCESS; +} + +static ret_code cmd_ccls_set_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct set_lsa_pl { + uint32_t offset; + uint32_t rsvd; + uint8_t data[]; + } QEMU_PACKED; + struct set_lsa_pl *set_lsa_payload = (void *)cmd->payload; + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); + const size_t hdr_len = offsetof(struct set_lsa_pl, data); + uint16_t plen = *len; + + *len = 0; + if (!plen) { + return CXL_MBOX_SUCCESS; + } + + if (set_lsa_payload->offset + plen > cvc->get_lsa_size(ct3d) + hdr_len) { + return CXL_MBOX_INVALID_INPUT; + } + plen -= hdr_len; + + cvc->set_lsa(ct3d, set_lsa_payload->data, plen, set_lsa_payload->offset); + return CXL_MBOX_SUCCESS; +} + +#define IMMEDIATE_CONFIG_CHANGE (1 << 1) +#define IMMEDIATE_DATA_CHANGE (1 << 2) +#define IMMEDIATE_POLICY_CHANGE (1 << 3) +#define IMMEDIATE_LOG_CHANGE (1 << 4) + +static struct cxl_cmd cxl_cmd_set[256][256] = { + [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", + cmd_events_get_records, 1, 0 }, + [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", + cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE }, + [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", + cmd_events_get_interrupt_policy, 0, 0 }, + [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", + cmd_events_set_interrupt_policy, 4, IMMEDIATE_CONFIG_CHANGE }, + [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", + cmd_firmware_update_get_info, 0, 0 }, + [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, + [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, IMMEDIATE_POLICY_CHANGE }, + [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 }, + [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, + [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", + cmd_identify_memory_device, 0, 0 }, + [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", + cmd_ccls_get_partition_info, 0, 0 }, + [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, + [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, + ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, +}; + +void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +{ + uint16_t ret = CXL_MBOX_SUCCESS; + struct cxl_cmd *cxl_cmd; + uint64_t status_reg; + opcode_handler h; + uint64_t command_reg = cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; + + uint8_t set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET); + uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); + uint16_t len = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); + cxl_cmd = &cxl_cmd_set[set][cmd]; + h = cxl_cmd->handler; + if (h) { + if (len == cxl_cmd->in || cxl_cmd->in == ~0) { + cxl_cmd->payload = cxl_dstate->mbox_reg_state + + A_CXL_DEV_CMD_PAYLOAD; + ret = (*h)(cxl_cmd, cxl_dstate, &len); + assert(len <= cxl_dstate->payload_size); + } else { + ret = CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + } else { + qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", + set << 8 | cmd); + ret = CXL_MBOX_UNSUPPORTED; + } + + /* Set the return code */ + status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, ERRNO, ret); + + /* Set the return length */ + command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET, 0); + command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND, 0); + command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH, len); + + cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; + cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; + + /* Tell the host we're done */ + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, + DOORBELL, 0); +} + +int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) +{ + /* CXL 2.0: Table 169 Get Supported Logs Log Entry */ + const char *cel_uuidstr = "0da9c0b5-bf41-4b78-8f79-96b1623b3f17"; + + for (int set = 0; set < 256; set++) { + for (int cmd = 0; cmd < 256; cmd++) { + if (cxl_cmd_set[set][cmd].handler) { + struct cxl_cmd *c = &cxl_cmd_set[set][cmd]; + struct cel_log *log = + &cxl_dstate->cel_log[cxl_dstate->cel_size]; + + log->opcode = (set << 8) | cmd; + log->effect = c->effect; + cxl_dstate->cel_size++; + } + } + } + + return qemu_uuid_parse(cel_uuidstr, &cel_uuid); +} diff --git a/hw/cxl/meson.build b/hw/cxl/meson.build new file mode 100644 index 0000000000..cfa95ffd40 --- /dev/null +++ b/hw/cxl/meson.build @@ -0,0 +1,13 @@ +softmmu_ss.add(when: 'CONFIG_CXL', + if_true: files( + 'cxl-component-utils.c', + 'cxl-device-utils.c', + 'cxl-mailbox-utils.c', + 'cxl-host.c', + 'cxl-cdat.c', + ), + if_false: files( + 'cxl-host-stubs.c', + )) + +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('cxl-host-stubs.c')) diff --git a/hw/display/Kconfig b/hw/display/Kconfig index a2306b67d8..a1b159becd 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -49,7 +49,7 @@ config VGA_ISA depends on ISA_BUS select VGA -config VGA_ISA_MM +config VGA_MMIO bool select VGA diff --git a/hw/display/acpi-vga-stub.c b/hw/display/acpi-vga-stub.c new file mode 100644 index 0000000000..a9b0ecf76d --- /dev/null +++ b/hw/display/acpi-vga-stub.c @@ -0,0 +1,7 @@ +#include "qemu/osdep.h" +#include "hw/acpi/acpi_aml_interface.h" +#include "vga_int.h" + +void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope) +{ +} diff --git a/hw/display/acpi-vga.c b/hw/display/acpi-vga.c new file mode 100644 index 0000000000..f0e9ef1fcf --- /dev/null +++ b/hw/display/acpi-vga.c @@ -0,0 +1,26 @@ +#include "qemu/osdep.h" +#include "hw/acpi/acpi_aml_interface.h" +#include "hw/pci/pci.h" +#include "vga_int.h" + +void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + int s3d = 0; + Aml *method; + + if (object_dynamic_cast(OBJECT(adev), "qxl-vga")) { + s3d = 3; + } + + method = aml_method("_S1D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0))); + aml_append(scope, method); + + method = aml_method("_S2D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0))); + aml_append(scope, method); + + method = aml_method("_S3D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(s3d))); + aml_append(scope, method); +} diff --git a/hw/display/artist.c b/hw/display/artist.c index aa7bd594aa..fde050c882 100644 --- a/hw/display/artist.c +++ b/hw/display/artist.c @@ -1,13 +1,13 @@ /* * QEMU HP Artist Emulation * - * Copyright (c) 2019 Sven Schnelle + * Copyright (c) 2019-2022 Sven Schnelle + * Copyright (c) 2022 Helge Deller * * This work is licensed under the terms of the GNU GPL, version 2 or later. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" @@ -26,12 +26,6 @@ #define TYPE_ARTIST "artist" OBJECT_DECLARE_SIMPLE_TYPE(ARTISTState, ARTIST) -#ifdef HOST_WORDS_BIGENDIAN -#define ROP8OFF(_i) (3 - (_i)) -#else -#define ROP8OFF -#endif - struct vram_buffer { MemoryRegion mr; uint8_t *data; @@ -80,6 +74,7 @@ struct ARTISTState { uint32_t line_pattern_skip; uint32_t cursor_pos; + uint32_t cursor_cntrl; uint32_t cursor_height; uint32_t cursor_width; @@ -87,11 +82,11 @@ struct ARTISTState { uint32_t plane_mask; uint32_t reg_100080; - uint32_t reg_300200; - uint32_t reg_300208; - uint32_t reg_300218; + uint32_t horiz_backporch; + uint32_t active_lines_low; + uint32_t misc_video; + uint32_t misc_ctrl; - uint32_t cmap_bm_access; uint32_t dst_bm_access; uint32_t src_bm_access; uint32_t control_plane; @@ -105,6 +100,9 @@ struct ARTISTState { int draw_line_pattern; }; +/* hardware allows up to 64x64, but we emulate 32x32 only. */ +#define NGLE_MAX_SPRITE_SIZE 32 + typedef enum { ARTIST_BUFFER_AP = 1, ARTIST_BUFFER_OVERLAY = 2, @@ -134,7 +132,7 @@ typedef enum { PATTERN_LINE_START = 0x100ecc, LINE_SIZE = 0x100e04, LINE_END = 0x100e44, - CMAP_BM_ACCESS = 0x118000, + DST_SRC_BM_ACCESS = 0x118000, DST_BM_ACCESS = 0x118004, SRC_BM_ACCESS = 0x118008, CONTROL_PLANE = 0x11800c, @@ -142,8 +140,14 @@ typedef enum { BG_COLOR = 0x118014, PLANE_MASK = 0x118018, IMAGE_BITMAP_OP = 0x11801c, - CURSOR_POS = 0x300100, - CURSOR_CTRL = 0x300104, + CURSOR_POS = 0x300100, /* reg17 */ + CURSOR_CTRL = 0x300104, /* reg18 */ + MISC_VIDEO = 0x300218, /* reg21 */ + MISC_CTRL = 0x300308, /* reg27 */ + HORIZ_BACKPORCH = 0x300200, /* reg19 */ + ACTIVE_LINES_LOW = 0x300208,/* reg20 */ + FIFO1 = 0x300008, /* reg34 */ + FIFO2 = 0x380008, } artist_reg_t; typedef enum { @@ -176,22 +180,30 @@ static const char *artist_reg_name(uint64_t addr) REG_NAME(TRANSFER_DATA); REG_NAME(CONTROL_PLANE); REG_NAME(IMAGE_BITMAP_OP); - REG_NAME(CMAP_BM_ACCESS); + REG_NAME(DST_SRC_BM_ACCESS); REG_NAME(DST_BM_ACCESS); REG_NAME(SRC_BM_ACCESS); REG_NAME(CURSOR_POS); REG_NAME(CURSOR_CTRL); + REG_NAME(HORIZ_BACKPORCH); + REG_NAME(ACTIVE_LINES_LOW); + REG_NAME(MISC_VIDEO); + REG_NAME(MISC_CTRL); REG_NAME(LINE_XY); REG_NAME(PATTERN_LINE_START); REG_NAME(LINE_SIZE); REG_NAME(LINE_END); REG_NAME(FONT_WRITE_INCR_Y); REG_NAME(FONT_WRITE_START); + REG_NAME(FIFO1); + REG_NAME(FIFO2); } return ""; } #undef REG_NAME +static void artist_invalidate(void *opaque); + /* artist has a fixed line length of 2048 bytes. */ #define ADDR_TO_Y(addr) extract32(addr, 11, 11) #define ADDR_TO_X(addr) extract32(addr, 0, 11) @@ -212,8 +224,9 @@ static void artist_invalidate_lines(struct vram_buffer *buf, int start = starty * buf->width; int size; - if (starty + height > buf->height) + if (starty + height > buf->height) { height = buf->height - starty; + } size = height * buf->width; @@ -222,40 +235,14 @@ static void artist_invalidate_lines(struct vram_buffer *buf, } } -static int vram_write_pix_per_transfer(ARTISTState *s) -{ - if (s->cmap_bm_access) { - return 1 << ((s->cmap_bm_access >> 27) & 0x0f); - } else { - return 1 << ((s->dst_bm_access >> 27) & 0x0f); - } -} - -static int vram_pixel_length(ARTISTState *s) -{ - if (s->cmap_bm_access) { - return (s->cmap_bm_access >> 24) & 0x07; - } else { - return (s->dst_bm_access >> 24) & 0x07; - } -} - static int vram_write_bufidx(ARTISTState *s) { - if (s->cmap_bm_access) { - return (s->cmap_bm_access >> 12) & 0x0f; - } else { - return (s->dst_bm_access >> 12) & 0x0f; - } + return (s->dst_bm_access >> 12) & 0x0f; } static int vram_read_bufidx(ARTISTState *s) { - if (s->cmap_bm_access) { - return (s->cmap_bm_access >> 12) & 0x0f; - } else { - return (s->src_bm_access >> 12) & 0x0f; - } + return (s->src_bm_access >> 12) & 0x0f; } static struct vram_buffer *vram_read_buffer(ARTISTState *s) @@ -327,153 +314,68 @@ static void artist_rop8(ARTISTState *s, struct vram_buffer *buf, static void artist_get_cursor_pos(ARTISTState *s, int *x, int *y) { /* - * Don't know whether these magic offset values are configurable via - * some register. They are the same for all resolutions, so don't - * bother about it. + * The emulated Artist graphic is like a CRX graphic, and as such + * it's usually fixed at 1280x1024 pixels. + * Other resolutions may work, but no guarantee. */ - *y = 0x47a - artist_get_y(s->cursor_pos); - *x = ((artist_get_x(s->cursor_pos) - 338) / 2); + unsigned int hbp_times_vi, horizBackPorch; + int16_t xHi, xLo; + const int videoInterleave = 4; + const int pipelineDelay = 4; + + /* ignore if uninitialized */ + if (s->cursor_pos == 0) { + *x = *y = 0; + return; + } + + /* + * Calculate X position based on backporch and interleave values. + * Based on code from Xorg X11R6.6 + */ + horizBackPorch = ((s->horiz_backporch & 0xff0000) >> 16) + + ((s->horiz_backporch & 0xff00) >> 8) + 2; + hbp_times_vi = horizBackPorch * videoInterleave; + xHi = s->cursor_pos >> 19; + *x = ((xHi + pipelineDelay) * videoInterleave) - hbp_times_vi; + + xLo = (s->cursor_pos >> 16) & 0x07; + *x += ((xLo - hbp_times_vi) & (videoInterleave - 1)) + 8 - 1; + + /* subtract cursor offset from cursor control register */ + *x -= (s->cursor_cntrl & 0xf0) >> 4; + + /* Calculate Y position */ + *y = s->height - artist_get_y(s->cursor_pos); + *y -= (s->cursor_cntrl & 0x0f); if (*x > s->width) { - *x = 0; + *x = s->width; } if (*y > s->height) { - *y = 0; + *y = s->height; } } +static inline bool cursor_visible(ARTISTState *s) +{ + /* cursor is visible if bit 0x80 is set in cursor_cntrl */ + return s->cursor_cntrl & 0x80; +} + static void artist_invalidate_cursor(ARTISTState *s) { int x, y; - artist_get_cursor_pos(s, &x, &y); - artist_invalidate_lines(&s->vram_buffer[ARTIST_BUFFER_AP], - y, s->cursor_height); -} -static void vram_bit_write(ARTISTState *s, int posy, bool incr_x, - int size, uint32_t data) -{ - struct vram_buffer *buf; - uint32_t vram_bitmask = s->vram_bitmask; - int mask, i, pix_count, pix_length; - unsigned int posx, offset, width; - uint8_t *data8, *p; - - pix_count = vram_write_pix_per_transfer(s); - pix_length = vram_pixel_length(s); - - buf = vram_write_buffer(s); - width = buf->width; - - if (s->cmap_bm_access) { - offset = s->vram_pos; - } else { - posx = ADDR_TO_X(s->vram_pos >> 2); - posy += ADDR_TO_Y(s->vram_pos >> 2); - offset = posy * width + posx; - } - - if (!buf->size || offset >= buf->size) { + if (!cursor_visible(s)) { return; } - p = buf->data; - - if (pix_count > size * 8) { - pix_count = size * 8; - } - - switch (pix_length) { - case 0: - if (s->image_bitmap_op & 0x20000000) { - data &= vram_bitmask; - } - - for (i = 0; i < pix_count; i++) { - uint32_t off = offset + pix_count - 1 - i; - if (off < buf->size) { - artist_rop8(s, buf, off, - (data & 1) ? (s->plane_mask >> 24) : 0); - } - data >>= 1; - } - memory_region_set_dirty(&buf->mr, offset, pix_count); - break; - - case 3: - if (s->cmap_bm_access) { - if (offset + 3 < buf->size) { - *(uint32_t *)(p + offset) = data; - } - break; - } - data8 = (uint8_t *)&data; - - for (i = 3; i >= 0; i--) { - if (!(s->image_bitmap_op & 0x20000000) || - s->vram_bitmask & (1 << (28 + i))) { - uint32_t off = offset + 3 - i; - if (off < buf->size) { - artist_rop8(s, buf, off, data8[ROP8OFF(i)]); - } - } - } - memory_region_set_dirty(&buf->mr, offset, 3); - break; - - case 6: - switch (size) { - default: - case 4: - vram_bitmask = s->vram_bitmask; - break; - - case 2: - vram_bitmask = s->vram_bitmask >> 16; - break; - - case 1: - vram_bitmask = s->vram_bitmask >> 24; - break; - } - - for (i = 0; i < pix_count && offset + i < buf->size; i++) { - mask = 1 << (pix_count - 1 - i); - - if (!(s->image_bitmap_op & 0x20000000) || - (vram_bitmask & mask)) { - if (data & mask) { - artist_rop8(s, buf, offset + i, s->fg_color); - } else { - if (!(s->image_bitmap_op & 0x10000002)) { - artist_rop8(s, buf, offset + i, s->bg_color); - } - } - } - } - memory_region_set_dirty(&buf->mr, offset, pix_count); - break; - - default: - qemu_log_mask(LOG_UNIMP, "%s: unknown pixel length %d\n", - __func__, pix_length); - break; - } - - if (incr_x) { - if (s->cmap_bm_access) { - s->vram_pos += 4; - } else { - s->vram_pos += pix_count << 2; - } - } - - if (vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR1 || - vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR2) { - artist_invalidate_cursor(s); - } + artist_get_cursor_pos(s, &x, &y); + artist_invalidate_lines(&s->vram_buffer[ARTIST_BUFFER_AP], + y, s->cursor_height); } static void block_move(ARTISTState *s, @@ -598,10 +500,9 @@ static void draw_line(ARTISTState *s, if ((x1 >= buf->width && x2 >= buf->width) || (y1 >= buf->height && y2 >= buf->height)) { - return; + return; } - if (update_start) { s->vram_start = (x2 << 16) | y2; } @@ -680,15 +581,16 @@ static void draw_line(ARTISTState *s, } x++; } while (x <= x2 && (max_pix == -1 || --max_pix > 0)); - if (c1) - artist_invalidate_lines(buf, x, dy+1); - else - artist_invalidate_lines(buf, y, dx+1); + + if (c1) { + artist_invalidate_lines(buf, x1, x2 - x1); + } else { + artist_invalidate_lines(buf, y1 > y2 ? y2 : y1, x2 - x1); + } } static void draw_line_pattern_start(ARTISTState *s) { - int startx = artist_get_x(s->vram_start); int starty = artist_get_y(s->vram_start); int endx = artist_get_x(s->blockmove_size); @@ -701,7 +603,6 @@ static void draw_line_pattern_start(ARTISTState *s) static void draw_line_pattern_next(ARTISTState *s) { - int startx = artist_get_x(s->vram_start); int starty = artist_get_y(s->vram_start); int endx = artist_get_x(s->blockmove_size); @@ -716,7 +617,6 @@ static void draw_line_pattern_next(ARTISTState *s) static void draw_line_size(ARTISTState *s, bool update_start) { - int startx = artist_get_x(s->vram_start); int starty = artist_get_y(s->vram_start); int endx = artist_get_x(s->line_size); @@ -727,7 +627,6 @@ static void draw_line_size(ARTISTState *s, bool update_start) static void draw_line_xy(ARTISTState *s, bool update_start) { - int startx = artist_get_x(s->vram_start); int starty = artist_get_y(s->vram_start); int sizex = artist_get_x(s->blockmove_size); @@ -777,7 +676,6 @@ static void draw_line_xy(ARTISTState *s, bool update_start) static void draw_line_end(ARTISTState *s, bool update_start) { - int startx = artist_get_x(s->vram_start); int starty = artist_get_y(s->vram_start); int endx = artist_get_x(s->line_end); @@ -838,7 +736,7 @@ static void combine_write_reg(hwaddr addr, uint64_t val, int size, void *out) * FIXME: is there a qemu helper for this? */ -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN addr ^= 3; #endif @@ -860,11 +758,158 @@ static void combine_write_reg(hwaddr addr, uint64_t val, int size, void *out) } } +static void artist_vram_write4(ARTISTState *s, struct vram_buffer *buf, + uint32_t offset, uint32_t data) +{ + int i; + int mask = s->vram_bitmask >> 28; + + for (i = 0; i < 4; i++) { + if (!(s->image_bitmap_op & 0x20000000) || (mask & 8)) { + artist_rop8(s, buf, offset + i, data >> 24); + data <<= 8; + mask <<= 1; + } + } + memory_region_set_dirty(&buf->mr, offset, 3); +} + +static void artist_vram_write32(ARTISTState *s, struct vram_buffer *buf, + uint32_t offset, int size, uint32_t data, + int fg, int bg) +{ + uint32_t mask, vram_bitmask = s->vram_bitmask >> ((4 - size) * 8); + int i, pix_count = size * 8; + + for (i = 0; i < pix_count && offset + i < buf->size; i++) { + mask = 1 << (pix_count - 1 - i); + + if (!(s->image_bitmap_op & 0x20000000) || (vram_bitmask & mask)) { + if (data & mask) { + artist_rop8(s, buf, offset + i, fg); + } else { + if (!(s->image_bitmap_op & 0x10000002)) { + artist_rop8(s, buf, offset + i, bg); + } + } + } + } + memory_region_set_dirty(&buf->mr, offset, pix_count); +} + +static int get_vram_offset(ARTISTState *s, struct vram_buffer *buf, + int pos, int posy) +{ + unsigned int posx, width; + + width = buf->width; + posx = ADDR_TO_X(pos); + posy += ADDR_TO_Y(pos); + return posy * width + posx; +} + +static int vram_bit_write(ARTISTState *s, uint32_t pos, int posy, + uint32_t data, int size) +{ + struct vram_buffer *buf = vram_write_buffer(s); + + switch (s->dst_bm_access >> 16) { + case 0x3ba0: + case 0xbbe0: + artist_vram_write4(s, buf, pos, bswap32(data)); + pos += 4; + break; + + case 0x1360: /* linux */ + artist_vram_write4(s, buf, get_vram_offset(s, buf, pos, posy), data); + pos += 4; + break; + + case 0x13a0: + artist_vram_write4(s, buf, get_vram_offset(s, buf, pos >> 2, posy), + data); + pos += 16; + break; + + case 0x2ea0: + artist_vram_write32(s, buf, get_vram_offset(s, buf, pos >> 2, posy), + size, data, s->fg_color, s->bg_color); + pos += 4; + break; + + case 0x28a0: + artist_vram_write32(s, buf, get_vram_offset(s, buf, pos >> 2, posy), + size, data, 1, 0); + pos += 4; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown dst bm access %08x\n", + __func__, s->dst_bm_access); + break; + } + + if (vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR1 || + vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR2) { + artist_invalidate_cursor(s); + } + return pos; +} + +static void artist_vram_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + ARTISTState *s = opaque; + + s->vram_char_y = 0; + trace_artist_vram_write(size, addr, val); + vram_bit_write(opaque, addr, 0, val, size); +} + +static uint64_t artist_vram_read(void *opaque, hwaddr addr, unsigned size) +{ + ARTISTState *s = opaque; + struct vram_buffer *buf; + unsigned int offset; + uint64_t val; + + buf = vram_read_buffer(s); + if (!buf->size) { + return 0; + } + + offset = get_vram_offset(s, buf, addr >> 2, 0); + + if (offset > buf->size) { + return 0; + } + + switch (s->src_bm_access >> 16) { + case 0x3ba0: + val = *(uint32_t *)(buf->data + offset); + break; + + case 0x13a0: + case 0x2ea0: + val = bswap32(*(uint32_t *)(buf->data + offset)); + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown src bm access %08x\n", + __func__, s->dst_bm_access); + val = -1ULL; + break; + } + trace_artist_vram_read(size, addr, val); + return val; +} + static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { ARTISTState *s = opaque; int width, height; + uint64_t oldval; trace_artist_reg_write(size, addr, artist_reg_name(addr & ~3ULL), val); @@ -886,12 +931,12 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, break; case VRAM_WRITE_INCR_Y: - vram_bit_write(s, s->vram_char_y++, false, size, val); + vram_bit_write(s, s->vram_pos, s->vram_char_y++, val, size); break; case VRAM_WRITE_INCR_X: case VRAM_WRITE_INCR_X2: - vram_bit_write(s, s->vram_char_y, true, size, val); + s->vram_pos = vram_bit_write(s, s->vram_pos, s->vram_char_y, val, size); break; case VRAM_IDX: @@ -993,18 +1038,17 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, combine_write_reg(addr, val, size, &s->plane_mask); break; - case CMAP_BM_ACCESS: - combine_write_reg(addr, val, size, &s->cmap_bm_access); + case DST_SRC_BM_ACCESS: + combine_write_reg(addr, val, size, &s->dst_bm_access); + combine_write_reg(addr, val, size, &s->src_bm_access); break; case DST_BM_ACCESS: combine_write_reg(addr, val, size, &s->dst_bm_access); - s->cmap_bm_access = 0; break; case SRC_BM_ACCESS: combine_write_reg(addr, val, size, &s->src_bm_access); - s->cmap_bm_access = 0; break; case CONTROL_PLANE: @@ -1015,16 +1059,33 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, combine_write_reg(addr, val, size, &s->transfer_data); break; - case 0x300200: - combine_write_reg(addr, val, size, &s->reg_300200); + case HORIZ_BACKPORCH: + /* overwrite HP-UX settings to fix X cursor position. */ + val = (NGLE_MAX_SPRITE_SIZE << 16) + (NGLE_MAX_SPRITE_SIZE << 8); + combine_write_reg(addr, val, size, &s->horiz_backporch); break; - case 0x300208: - combine_write_reg(addr, val, size, &s->reg_300208); + case ACTIVE_LINES_LOW: + combine_write_reg(addr, val, size, &s->active_lines_low); break; - case 0x300218: - combine_write_reg(addr, val, size, &s->reg_300218); + case MISC_VIDEO: + oldval = s->misc_video; + combine_write_reg(addr, val, size, &s->misc_video); + /* Invalidate and hide screen if graphics signal is turned off. */ + if (((oldval & 0x0A000000) == 0x0A000000) && + ((val & 0x0A000000) != 0x0A000000)) { + artist_invalidate(s); + } + /* Invalidate and redraw screen if graphics signal is turned back on. */ + if (((oldval & 0x0A000000) != 0x0A000000) && + ((val & 0x0A000000) == 0x0A000000)) { + artist_invalidate(s); + } + break; + + case MISC_CTRL: + combine_write_reg(addr, val, size, &s->misc_ctrl); break; case CURSOR_POS: @@ -1034,6 +1095,7 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, break; case CURSOR_CTRL: + combine_write_reg(addr, val, size, &s->cursor_cntrl); break; case IMAGE_BITMAP_OP: @@ -1068,7 +1130,7 @@ static uint64_t combine_read_reg(hwaddr addr, int size, void *in) * FIXME: is there a qemu helper for this? */ -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN addr ^= 3; #endif @@ -1108,12 +1170,11 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size) case 0x100000: case 0x300000: case 0x300004: - case 0x300308: case 0x380000: break; - case 0x300008: - case 0x380008: + case FIFO1: + case FIFO2: /* * FIFO ready flag. we're not emulating the FIFOs * so we're always ready @@ -1121,16 +1182,28 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size) val = 0x10; break; - case 0x300200: - val = s->reg_300200; + case HORIZ_BACKPORCH: + val = s->horiz_backporch; break; - case 0x300208: - val = s->reg_300208; + case ACTIVE_LINES_LOW: + val = s->active_lines_low; + /* activeLinesLo for cursor is in reg20.b.b0 */ + val &= ~(0xff << 24); + val |= (s->height & 0xff) << 24; break; - case 0x300218: - val = s->reg_300218; + case MISC_VIDEO: + /* emulate V-blank */ + s->misc_video ^= 0x00040000; + /* activeLinesHi for cursor is in reg21.b.b2 */ + val = s->misc_video; + val &= ~0xff00UL; + val |= (s->height & 0xff00); + break; + + case MISC_CTRL: + val = s->misc_ctrl; break; case 0x30023c: @@ -1152,98 +1225,6 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size) return val; } -static void artist_vram_write(void *opaque, hwaddr addr, uint64_t val, - unsigned size) -{ - ARTISTState *s = opaque; - struct vram_buffer *buf; - unsigned int posy, posx; - unsigned int offset; - trace_artist_vram_write(size, addr, val); - - if (s->cmap_bm_access) { - buf = &s->vram_buffer[ARTIST_BUFFER_CMAP]; - if (addr + 3 < buf->size) { - *(uint32_t *)(buf->data + addr) = val; - } - return; - } - - buf = vram_write_buffer(s); - posy = ADDR_TO_Y(addr); - posx = ADDR_TO_X(addr); - - if (!buf->size) { - return; - } - - if (posy > buf->height || posx > buf->width) { - return; - } - - offset = posy * buf->width + posx; - if (offset >= buf->size) { - return; - } - - switch (size) { - case 4: - if (offset + 3 < buf->size) { - *(uint32_t *)(buf->data + offset) = be32_to_cpu(val); - memory_region_set_dirty(&buf->mr, offset, 4); - } - break; - case 2: - if (offset + 1 < buf->size) { - *(uint16_t *)(buf->data + offset) = be16_to_cpu(val); - memory_region_set_dirty(&buf->mr, offset, 2); - } - break; - case 1: - if (offset < buf->size) { - *(uint8_t *)(buf->data + offset) = val; - memory_region_set_dirty(&buf->mr, offset, 1); - } - break; - default: - break; - } -} - -static uint64_t artist_vram_read(void *opaque, hwaddr addr, unsigned size) -{ - ARTISTState *s = opaque; - struct vram_buffer *buf; - uint64_t val; - unsigned int posy, posx; - - if (s->cmap_bm_access) { - buf = &s->vram_buffer[ARTIST_BUFFER_CMAP]; - val = 0; - if (addr < buf->size && addr + 3 < buf->size) { - val = *(uint32_t *)(buf->data + addr); - } - trace_artist_vram_read(size, addr, 0, 0, val); - return val; - } - - buf = vram_read_buffer(s); - if (!buf->size) { - return 0; - } - - posy = ADDR_TO_Y(addr); - posx = ADDR_TO_X(addr); - - if (posy > buf->height || posx > buf->width) { - return 0; - } - - val = cpu_to_be32(*(uint32_t *)(buf->data + posy * buf->width + posx)); - trace_artist_vram_read(size, addr, posx, posy, val); - return val; -} - static const MemoryRegionOps artist_reg_ops = { .read = artist_reg_read, .write = artist_reg_write, @@ -1267,6 +1248,10 @@ static void artist_draw_cursor(ARTISTState *s) struct vram_buffer *cursor0, *cursor1 , *buf; int cx, cy, cursor_pos_x, cursor_pos_y; + if (!cursor_visible(s)) { + return; + } + cursor0 = &s->vram_buffer[ARTIST_BUFFER_CURSOR1]; cursor1 = &s->vram_buffer[ARTIST_BUFFER_CURSOR2]; buf = &s->vram_buffer[ARTIST_BUFFER_AP]; @@ -1298,6 +1283,12 @@ static void artist_draw_cursor(ARTISTState *s) } } +static bool artist_screen_enabled(ARTISTState *s) +{ + /* We could check for (s->misc_ctrl & 0x00800000) too... */ + return ((s->misc_video & 0x0A000000) == 0x0A000000); +} + static void artist_draw_line(void *opaque, uint8_t *d, const uint8_t *src, int width, int pitch) { @@ -1305,6 +1296,12 @@ static void artist_draw_line(void *opaque, uint8_t *d, const uint8_t *src, uint32_t *cmap, *data = (uint32_t *)d; int x; + if (!artist_screen_enabled(s)) { + /* clear screen */ + memset(data, 0, s->width * sizeof(uint32_t)); + return; + } + cmap = (uint32_t *)(s->vram_buffer[ARTIST_BUFFER_CMAP].data + 0x400); for (x = 0; x < s->width; x++) { @@ -1318,20 +1315,22 @@ static void artist_update_display(void *opaque) DisplaySurface *surface = qemu_console_surface(s->con); int first = 0, last; - framebuffer_update_display(surface, &s->fbsection, s->width, s->height, s->width, s->width * 4, 0, 0, artist_draw_line, s, &first, &last); artist_draw_cursor(s); - dpy_gfx_update(s->con, 0, 0, s->width, s->height); + if (first >= 0) { + dpy_gfx_update(s->con, 0, first, s->width, last - first + 1); + } } static void artist_invalidate(void *opaque) { ARTISTState *s = ARTIST(opaque); struct vram_buffer *buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + memory_region_set_dirty(&buf->mr, 0, buf->size); } @@ -1359,7 +1358,7 @@ static void artist_create_buffer(ARTISTState *s, const char *name, { struct vram_buffer *buf = s->vram_buffer + idx; - memory_region_init_ram(&buf->mr, NULL, name, width * height, + memory_region_init_ram(&buf->mr, OBJECT(s), name, width * height, &error_fatal); memory_region_add_subregion_overlap(&s->mem_as_root, *offset, &buf->mr, 0); @@ -1404,11 +1403,22 @@ static void artist_realizefn(DeviceState *dev, Error **errp) framebuffer_update_memory_section(&s->fbsection, &buf->mr, 0, buf->width, buf->height); /* - * no idea whether the cursor is fixed size or not, so assume 32x32 which - * seems sufficient for HP-UX X11. + * Artist cursor max size */ - s->cursor_height = 32; - s->cursor_width = 32; + s->cursor_height = NGLE_MAX_SPRITE_SIZE; + s->cursor_width = NGLE_MAX_SPRITE_SIZE; + + /* + * These two registers are not initialized by seabios's STI implementation. + * Initialize them here to sane values so artist also works with older + * (not-fixed) seabios versions. + */ + s->image_bitmap_op = 0x23000300; + s->plane_mask = 0xff; + + /* enable screen */ + s->misc_video |= 0x0A000000; + s->misc_ctrl |= 0x00800000; s->con = graphic_console_init(dev, 0, &artist_ops, s); qemu_console_resize(s->con, s->width, s->height); @@ -1422,8 +1432,8 @@ static int vmstate_artist_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_artist = { .name = "artist", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .post_load = vmstate_artist_post_load, .fields = (VMStateField[]) { VMSTATE_UINT16(height, ARTISTState), @@ -1443,14 +1453,15 @@ static const VMStateDescription vmstate_artist = { VMSTATE_UINT32(line_end, ARTISTState), VMSTATE_UINT32(line_xy, ARTISTState), VMSTATE_UINT32(cursor_pos, ARTISTState), + VMSTATE_UINT32(cursor_cntrl, ARTISTState), VMSTATE_UINT32(cursor_height, ARTISTState), VMSTATE_UINT32(cursor_width, ARTISTState), VMSTATE_UINT32(plane_mask, ARTISTState), VMSTATE_UINT32(reg_100080, ARTISTState), - VMSTATE_UINT32(reg_300200, ARTISTState), - VMSTATE_UINT32(reg_300208, ARTISTState), - VMSTATE_UINT32(reg_300218, ARTISTState), - VMSTATE_UINT32(cmap_bm_access, ARTISTState), + VMSTATE_UINT32(horiz_backporch, ARTISTState), + VMSTATE_UINT32(active_lines_low, ARTISTState), + VMSTATE_UINT32(misc_video, ARTISTState), + VMSTATE_UINT32(misc_ctrl, ARTISTState), VMSTATE_UINT32(dst_bm_access, ARTISTState), VMSTATE_UINT32(src_bm_access, ARTISTState), VMSTATE_UINT32(control_plane, ARTISTState), diff --git a/hw/display/ati.c b/hw/display/ati.c index 31f22754dc..6e38e00502 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -955,7 +955,9 @@ static void ati_vga_realize(PCIDevice *dev, Error **errp) } /* init vga bits */ - vga_common_init(vga, OBJECT(s)); + if (!vga_common_init(vga, OBJECT(s), errp)) { + return; + } vga_init(vga, OBJECT(s), pci_address_space(dev), pci_address_space_io(dev), true); vga->con = graphic_console_init(DEVICE(s), 0, s->vga.hw_ops, &s->vga); diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c index 4dc10ea795..7d786653e8 100644 --- a/hw/display/ati_2d.c +++ b/hw/display/ati_2d.c @@ -12,6 +12,7 @@ #include "ati_regs.h" #include "qemu/log.h" #include "ui/pixel_ops.h" +#include "ui/console.h" /* * NOTE: @@ -84,7 +85,7 @@ void ati_2d_blt(ATIVGAState *s) DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n", s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset, s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch, - s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, + s->regs.src_x, s->regs.src_y, dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'), (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^')); @@ -180,11 +181,11 @@ void ati_2d_blt(ATIVGAState *s) dst_stride /= sizeof(uint32_t); DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", dst_bits, dst_stride, bpp, - s->regs.dst_x, s->regs.dst_y, + dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, filler); pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, - s->regs.dst_x, s->regs.dst_y, + dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, filler); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index 2be77bdd3a..a05277674f 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -279,8 +279,7 @@ static void bcm2835_fb_mbox_push(BCM2835FBState *s, uint32_t value) newconf.xoffset = ldl_le_phys(&s->dma_as, value + 24); newconf.yoffset = ldl_le_phys(&s->dma_as, value + 28); - newconf.base = s->vcram_base | (value & 0xc0000000); - newconf.base += BCM2835_FB_OFFSET; + newconf.base = s->vcram_base + BCM2835_FB_OFFSET; /* Copy fields which we don't want to change from the existing config */ newconf.pixo = s->config.pixo; @@ -454,7 +453,7 @@ static void bcm2835_fb_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_fb; } -static TypeInfo bcm2835_fb_info = { +static const TypeInfo bcm2835_fb_info = { .name = TYPE_BCM2835_FB, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835FBState), diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c index 105241577d..030abbe7d4 100644 --- a/hw/display/blizzard.c +++ b/hw/display/blizzard.c @@ -123,14 +123,14 @@ typedef struct { /* Bytes(!) per pixel */ static const int blizzard_iformat_bpp[0x10] = { 0, - 2, /* RGB 5:6:5*/ - 3, /* RGB 6:6:6 mode 1 */ - 3, /* RGB 8:8:8 mode 1 */ + 2, /* RGB 5:6:5*/ + 3, /* RGB 6:6:6 mode 1 */ + 3, /* RGB 8:8:8 mode 1 */ 0, 0, - 4, /* RGB 6:6:6 mode 2 */ - 4, /* RGB 8:8:8 mode 2 */ - 0, /* YUV 4:2:2 */ - 0, /* YUV 4:2:0 */ + 4, /* RGB 6:6:6 mode 2 */ + 4, /* RGB 8:8:8 mode 2 */ + 0, /* YUV 4:2:2 */ + 0, /* YUV 4:2:0 */ 0, 0, 0, 0, 0, 0, }; @@ -281,196 +281,196 @@ static uint16_t blizzard_reg_read(void *opaque, uint8_t reg) BlizzardState *s = (BlizzardState *) opaque; switch (reg) { - case 0x00: /* Revision Code */ + case 0x00: /* Revision Code */ return 0xa5; - case 0x02: /* Configuration Readback */ - return 0x83; /* Macrovision OK, CNF[2:0] = 3 */ + case 0x02: /* Configuration Readback */ + return 0x83; /* Macrovision OK, CNF[2:0] = 3 */ - case 0x04: /* PLL M-Divider */ + case 0x04: /* PLL M-Divider */ return (s->pll - 1) | (1 << 7); - case 0x06: /* PLL Lock Range Control */ + case 0x06: /* PLL Lock Range Control */ return s->pll_range; - case 0x08: /* PLL Lock Synthesis Control 0 */ + case 0x08: /* PLL Lock Synthesis Control 0 */ return s->pll_ctrl & 0xff; - case 0x0a: /* PLL Lock Synthesis Control 1 */ + case 0x0a: /* PLL Lock Synthesis Control 1 */ return s->pll_ctrl >> 8; - case 0x0c: /* PLL Mode Control 0 */ + case 0x0c: /* PLL Mode Control 0 */ return s->pll_mode; - case 0x0e: /* Clock-Source Select */ + case 0x0e: /* Clock-Source Select */ return s->clksel; - case 0x10: /* Memory Controller Activate */ - case 0x14: /* Memory Controller Bank 0 Status Flag */ + case 0x10: /* Memory Controller Activate */ + case 0x14: /* Memory Controller Bank 0 Status Flag */ return s->memenable; - case 0x18: /* Auto-Refresh Interval Setting 0 */ + case 0x18: /* Auto-Refresh Interval Setting 0 */ return s->memrefresh & 0xff; - case 0x1a: /* Auto-Refresh Interval Setting 1 */ + case 0x1a: /* Auto-Refresh Interval Setting 1 */ return s->memrefresh >> 8; - case 0x1c: /* Power-On Sequence Timing Control */ + case 0x1c: /* Power-On Sequence Timing Control */ return s->timing[0]; - case 0x1e: /* Timing Control 0 */ + case 0x1e: /* Timing Control 0 */ return s->timing[1]; - case 0x20: /* Timing Control 1 */ + case 0x20: /* Timing Control 1 */ return s->timing[2]; - case 0x24: /* Arbitration Priority Control */ + case 0x24: /* Arbitration Priority Control */ return s->priority; - case 0x28: /* LCD Panel Configuration */ + case 0x28: /* LCD Panel Configuration */ return s->lcd_config; - case 0x2a: /* LCD Horizontal Display Width */ + case 0x2a: /* LCD Horizontal Display Width */ return s->x >> 3; - case 0x2c: /* LCD Horizontal Non-display Period */ + case 0x2c: /* LCD Horizontal Non-display Period */ return s->hndp; - case 0x2e: /* LCD Vertical Display Height 0 */ + case 0x2e: /* LCD Vertical Display Height 0 */ return s->y & 0xff; - case 0x30: /* LCD Vertical Display Height 1 */ + case 0x30: /* LCD Vertical Display Height 1 */ return s->y >> 8; - case 0x32: /* LCD Vertical Non-display Period */ + case 0x32: /* LCD Vertical Non-display Period */ return s->vndp; - case 0x34: /* LCD HS Pulse-width */ + case 0x34: /* LCD HS Pulse-width */ return s->hsync; - case 0x36: /* LCd HS Pulse Start Position */ + case 0x36: /* LCd HS Pulse Start Position */ return s->skipx >> 3; - case 0x38: /* LCD VS Pulse-width */ + case 0x38: /* LCD VS Pulse-width */ return s->vsync; - case 0x3a: /* LCD VS Pulse Start Position */ + case 0x3a: /* LCD VS Pulse Start Position */ return s->skipy; - case 0x3c: /* PCLK Polarity */ + case 0x3c: /* PCLK Polarity */ return s->pclk; - case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ + case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ return s->hssi_config[0]; - case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ + case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ return s->hssi_config[1]; - case 0x42: /* High-speed Serial Interface Tx Mode */ + case 0x42: /* High-speed Serial Interface Tx Mode */ return s->hssi_config[2]; - case 0x44: /* TV Display Configuration */ + case 0x44: /* TV Display Configuration */ return s->tv_config; - case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */ + case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */ return s->tv_timing[(reg - 0x46) >> 1]; - case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ + case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ return s->vbi; - case 0x50: /* TV Horizontal Start Position */ + case 0x50: /* TV Horizontal Start Position */ return s->tv_x; - case 0x52: /* TV Vertical Start Position */ + case 0x52: /* TV Vertical Start Position */ return s->tv_y; - case 0x54: /* TV Test Pattern Setting */ + case 0x54: /* TV Test Pattern Setting */ return s->tv_test; - case 0x56: /* TV Filter Setting */ + case 0x56: /* TV Filter Setting */ return s->tv_filter_config; - case 0x58: /* TV Filter Coefficient Index */ + case 0x58: /* TV Filter Coefficient Index */ return s->tv_filter_idx; - case 0x5a: /* TV Filter Coefficient Data */ + case 0x5a: /* TV Filter Coefficient Data */ if (s->tv_filter_idx < 0x20) return s->tv_filter_coeff[s->tv_filter_idx ++]; return 0; - case 0x60: /* Input YUV/RGB Translate Mode 0 */ + case 0x60: /* Input YUV/RGB Translate Mode 0 */ return s->yrc[0]; - case 0x62: /* Input YUV/RGB Translate Mode 1 */ + case 0x62: /* Input YUV/RGB Translate Mode 1 */ return s->yrc[1]; - case 0x64: /* U Data Fix */ + case 0x64: /* U Data Fix */ return s->u; - case 0x66: /* V Data Fix */ + case 0x66: /* V Data Fix */ return s->v; - case 0x68: /* Display Mode */ + case 0x68: /* Display Mode */ return s->mode; - case 0x6a: /* Special Effects */ + case 0x6a: /* Special Effects */ return s->effect; - case 0x6c: /* Input Window X Start Position 0 */ + case 0x6c: /* Input Window X Start Position 0 */ return s->ix[0] & 0xff; - case 0x6e: /* Input Window X Start Position 1 */ + case 0x6e: /* Input Window X Start Position 1 */ return s->ix[0] >> 3; - case 0x70: /* Input Window Y Start Position 0 */ + case 0x70: /* Input Window Y Start Position 0 */ return s->ix[0] & 0xff; - case 0x72: /* Input Window Y Start Position 1 */ + case 0x72: /* Input Window Y Start Position 1 */ return s->ix[0] >> 3; - case 0x74: /* Input Window X End Position 0 */ + case 0x74: /* Input Window X End Position 0 */ return s->ix[1] & 0xff; - case 0x76: /* Input Window X End Position 1 */ + case 0x76: /* Input Window X End Position 1 */ return s->ix[1] >> 3; - case 0x78: /* Input Window Y End Position 0 */ + case 0x78: /* Input Window Y End Position 0 */ return s->ix[1] & 0xff; - case 0x7a: /* Input Window Y End Position 1 */ + case 0x7a: /* Input Window Y End Position 1 */ return s->ix[1] >> 3; - case 0x7c: /* Output Window X Start Position 0 */ + case 0x7c: /* Output Window X Start Position 0 */ return s->ox[0] & 0xff; - case 0x7e: /* Output Window X Start Position 1 */ + case 0x7e: /* Output Window X Start Position 1 */ return s->ox[0] >> 3; - case 0x80: /* Output Window Y Start Position 0 */ + case 0x80: /* Output Window Y Start Position 0 */ return s->oy[0] & 0xff; - case 0x82: /* Output Window Y Start Position 1 */ + case 0x82: /* Output Window Y Start Position 1 */ return s->oy[0] >> 3; - case 0x84: /* Output Window X End Position 0 */ + case 0x84: /* Output Window X End Position 0 */ return s->ox[1] & 0xff; - case 0x86: /* Output Window X End Position 1 */ + case 0x86: /* Output Window X End Position 1 */ return s->ox[1] >> 3; - case 0x88: /* Output Window Y End Position 0 */ + case 0x88: /* Output Window Y End Position 0 */ return s->oy[1] & 0xff; - case 0x8a: /* Output Window Y End Position 1 */ + case 0x8a: /* Output Window Y End Position 1 */ return s->oy[1] >> 3; - case 0x8c: /* Input Data Format */ + case 0x8c: /* Input Data Format */ return s->iformat; - case 0x8e: /* Data Source Select */ + case 0x8e: /* Data Source Select */ return s->source; - case 0x90: /* Display Memory Data Port */ + case 0x90: /* Display Memory Data Port */ return 0; - case 0xa8: /* Border Color 0 */ + case 0xa8: /* Border Color 0 */ return s->border_r; - case 0xaa: /* Border Color 1 */ + case 0xaa: /* Border Color 1 */ return s->border_g; - case 0xac: /* Border Color 2 */ + case 0xac: /* Border Color 2 */ return s->border_b; - case 0xb4: /* Gamma Correction Enable */ + case 0xb4: /* Gamma Correction Enable */ return s->gamma_config; - case 0xb6: /* Gamma Correction Table Index */ + case 0xb6: /* Gamma Correction Table Index */ return s->gamma_idx; - case 0xb8: /* Gamma Correction Table Data */ + case 0xb8: /* Gamma Correction Table Data */ return s->gamma_lut[s->gamma_idx ++]; - case 0xba: /* 3x3 Matrix Enable */ + case 0xba: /* 3x3 Matrix Enable */ return s->matrix_ena; - case 0xbc ... 0xde: /* Coefficient Registers */ + case 0xbc ... 0xde: /* Coefficient Registers */ return s->matrix_coeff[(reg - 0xbc) >> 1]; - case 0xe0: /* 3x3 Matrix Red Offset */ + case 0xe0: /* 3x3 Matrix Red Offset */ return s->matrix_r; - case 0xe2: /* 3x3 Matrix Green Offset */ + case 0xe2: /* 3x3 Matrix Green Offset */ return s->matrix_g; - case 0xe4: /* 3x3 Matrix Blue Offset */ + case 0xe4: /* 3x3 Matrix Blue Offset */ return s->matrix_b; - case 0xe6: /* Power-save */ + case 0xe6: /* Power-save */ return s->pm; - case 0xe8: /* Non-display Period Control / Status */ + case 0xe8: /* Non-display Period Control / Status */ return s->status | (1 << 5); - case 0xea: /* RGB Interface Control */ + case 0xea: /* RGB Interface Control */ return s->rgbgpio_dir; - case 0xec: /* RGB Interface Status */ + case 0xec: /* RGB Interface Status */ return s->rgbgpio; - case 0xee: /* General-purpose IO Pins Configuration */ + case 0xee: /* General-purpose IO Pins Configuration */ return s->gpio_dir; - case 0xf0: /* General-purpose IO Pins Status / Control */ + case 0xf0: /* General-purpose IO Pins Status / Control */ return s->gpio; - case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ + case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ return s->gpio_edge[0]; - case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ + case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ return s->gpio_edge[1]; - case 0xf6: /* GPIO Interrupt Status */ + case 0xf6: /* GPIO Interrupt Status */ return s->gpio_irq; - case 0xf8: /* GPIO Pull-down Control */ + case 0xf8: /* GPIO Pull-down Control */ return s->gpio_pdown; default: @@ -484,157 +484,157 @@ static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) BlizzardState *s = (BlizzardState *) opaque; switch (reg) { - case 0x04: /* PLL M-Divider */ + case 0x04: /* PLL M-Divider */ s->pll = (value & 0x3f) + 1; break; - case 0x06: /* PLL Lock Range Control */ + case 0x06: /* PLL Lock Range Control */ s->pll_range = value & 3; break; - case 0x08: /* PLL Lock Synthesis Control 0 */ + case 0x08: /* PLL Lock Synthesis Control 0 */ s->pll_ctrl &= 0xf00; s->pll_ctrl |= (value << 0) & 0x0ff; break; - case 0x0a: /* PLL Lock Synthesis Control 1 */ + case 0x0a: /* PLL Lock Synthesis Control 1 */ s->pll_ctrl &= 0x0ff; s->pll_ctrl |= (value << 8) & 0xf00; break; - case 0x0c: /* PLL Mode Control 0 */ + case 0x0c: /* PLL Mode Control 0 */ s->pll_mode = value & 0x77; if ((value & 3) == 0 || (value & 3) == 3) fprintf(stderr, "%s: wrong PLL Control bits (%i)\n", __func__, value & 3); break; - case 0x0e: /* Clock-Source Select */ + case 0x0e: /* Clock-Source Select */ s->clksel = value & 0xff; break; - case 0x10: /* Memory Controller Activate */ + case 0x10: /* Memory Controller Activate */ s->memenable = value & 1; break; - case 0x14: /* Memory Controller Bank 0 Status Flag */ + case 0x14: /* Memory Controller Bank 0 Status Flag */ break; - case 0x18: /* Auto-Refresh Interval Setting 0 */ + case 0x18: /* Auto-Refresh Interval Setting 0 */ s->memrefresh &= 0xf00; s->memrefresh |= (value << 0) & 0x0ff; break; - case 0x1a: /* Auto-Refresh Interval Setting 1 */ + case 0x1a: /* Auto-Refresh Interval Setting 1 */ s->memrefresh &= 0x0ff; s->memrefresh |= (value << 8) & 0xf00; break; - case 0x1c: /* Power-On Sequence Timing Control */ + case 0x1c: /* Power-On Sequence Timing Control */ s->timing[0] = value & 0x7f; break; - case 0x1e: /* Timing Control 0 */ + case 0x1e: /* Timing Control 0 */ s->timing[1] = value & 0x17; break; - case 0x20: /* Timing Control 1 */ + case 0x20: /* Timing Control 1 */ s->timing[2] = value & 0x35; break; - case 0x24: /* Arbitration Priority Control */ + case 0x24: /* Arbitration Priority Control */ s->priority = value & 1; break; - case 0x28: /* LCD Panel Configuration */ + case 0x28: /* LCD Panel Configuration */ s->lcd_config = value & 0xff; if (value & (1 << 7)) fprintf(stderr, "%s: data swap not supported!\n", __func__); break; - case 0x2a: /* LCD Horizontal Display Width */ + case 0x2a: /* LCD Horizontal Display Width */ s->x = value << 3; break; - case 0x2c: /* LCD Horizontal Non-display Period */ + case 0x2c: /* LCD Horizontal Non-display Period */ s->hndp = value & 0xff; break; - case 0x2e: /* LCD Vertical Display Height 0 */ + case 0x2e: /* LCD Vertical Display Height 0 */ s->y &= 0x300; s->y |= (value << 0) & 0x0ff; break; - case 0x30: /* LCD Vertical Display Height 1 */ + case 0x30: /* LCD Vertical Display Height 1 */ s->y &= 0x0ff; s->y |= (value << 8) & 0x300; break; - case 0x32: /* LCD Vertical Non-display Period */ + case 0x32: /* LCD Vertical Non-display Period */ s->vndp = value & 0xff; break; - case 0x34: /* LCD HS Pulse-width */ + case 0x34: /* LCD HS Pulse-width */ s->hsync = value & 0xff; break; - case 0x36: /* LCD HS Pulse Start Position */ + case 0x36: /* LCD HS Pulse Start Position */ s->skipx = value & 0xff; break; - case 0x38: /* LCD VS Pulse-width */ + case 0x38: /* LCD VS Pulse-width */ s->vsync = value & 0xbf; break; - case 0x3a: /* LCD VS Pulse Start Position */ + case 0x3a: /* LCD VS Pulse Start Position */ s->skipy = value & 0xff; break; - case 0x3c: /* PCLK Polarity */ + case 0x3c: /* PCLK Polarity */ s->pclk = value & 0x82; /* Affects calculation of s->hndp, s->hsync and s->skipx. */ break; - case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ + case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ s->hssi_config[0] = value; break; - case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ + case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ s->hssi_config[1] = value; if (((value >> 4) & 3) == 3) fprintf(stderr, "%s: Illegal active-data-links value\n", __func__); break; - case 0x42: /* High-speed Serial Interface Tx Mode */ + case 0x42: /* High-speed Serial Interface Tx Mode */ s->hssi_config[2] = value & 0xbd; break; - case 0x44: /* TV Display Configuration */ + case 0x44: /* TV Display Configuration */ s->tv_config = value & 0xfe; break; - case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */ + case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */ s->tv_timing[(reg - 0x46) >> 1] = value; break; - case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ + case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ s->vbi = value; break; - case 0x50: /* TV Horizontal Start Position */ + case 0x50: /* TV Horizontal Start Position */ s->tv_x = value; break; - case 0x52: /* TV Vertical Start Position */ + case 0x52: /* TV Vertical Start Position */ s->tv_y = value & 0x7f; break; - case 0x54: /* TV Test Pattern Setting */ + case 0x54: /* TV Test Pattern Setting */ s->tv_test = value; break; - case 0x56: /* TV Filter Setting */ + case 0x56: /* TV Filter Setting */ s->tv_filter_config = value & 0xbf; break; - case 0x58: /* TV Filter Coefficient Index */ + case 0x58: /* TV Filter Coefficient Index */ s->tv_filter_idx = value & 0x1f; break; - case 0x5a: /* TV Filter Coefficient Data */ + case 0x5a: /* TV Filter Coefficient Data */ if (s->tv_filter_idx < 0x20) s->tv_filter_coeff[s->tv_filter_idx ++] = value; break; - case 0x60: /* Input YUV/RGB Translate Mode 0 */ + case 0x60: /* Input YUV/RGB Translate Mode 0 */ s->yrc[0] = value & 0xb0; break; - case 0x62: /* Input YUV/RGB Translate Mode 1 */ + case 0x62: /* Input YUV/RGB Translate Mode 1 */ s->yrc[1] = value & 0x30; break; - case 0x64: /* U Data Fix */ + case 0x64: /* U Data Fix */ s->u = value & 0xff; break; - case 0x66: /* V Data Fix */ + case 0x66: /* V Data Fix */ s->v = value & 0xff; break; - case 0x68: /* Display Mode */ + case 0x68: /* Display Mode */ if ((s->mode ^ value) & 3) s->invalidate = 1; s->mode = value & 0xb7; @@ -644,83 +644,83 @@ static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) fprintf(stderr, "%s: Macrovision enable attempt!\n", __func__); break; - case 0x6a: /* Special Effects */ + case 0x6a: /* Special Effects */ s->effect = value & 0xfb; break; - case 0x6c: /* Input Window X Start Position 0 */ + case 0x6c: /* Input Window X Start Position 0 */ s->ix[0] &= 0x300; s->ix[0] |= (value << 0) & 0x0ff; break; - case 0x6e: /* Input Window X Start Position 1 */ + case 0x6e: /* Input Window X Start Position 1 */ s->ix[0] &= 0x0ff; s->ix[0] |= (value << 8) & 0x300; break; - case 0x70: /* Input Window Y Start Position 0 */ + case 0x70: /* Input Window Y Start Position 0 */ s->iy[0] &= 0x300; s->iy[0] |= (value << 0) & 0x0ff; break; - case 0x72: /* Input Window Y Start Position 1 */ + case 0x72: /* Input Window Y Start Position 1 */ s->iy[0] &= 0x0ff; s->iy[0] |= (value << 8) & 0x300; break; - case 0x74: /* Input Window X End Position 0 */ + case 0x74: /* Input Window X End Position 0 */ s->ix[1] &= 0x300; s->ix[1] |= (value << 0) & 0x0ff; break; - case 0x76: /* Input Window X End Position 1 */ + case 0x76: /* Input Window X End Position 1 */ s->ix[1] &= 0x0ff; s->ix[1] |= (value << 8) & 0x300; break; - case 0x78: /* Input Window Y End Position 0 */ + case 0x78: /* Input Window Y End Position 0 */ s->iy[1] &= 0x300; s->iy[1] |= (value << 0) & 0x0ff; break; - case 0x7a: /* Input Window Y End Position 1 */ + case 0x7a: /* Input Window Y End Position 1 */ s->iy[1] &= 0x0ff; s->iy[1] |= (value << 8) & 0x300; break; - case 0x7c: /* Output Window X Start Position 0 */ + case 0x7c: /* Output Window X Start Position 0 */ s->ox[0] &= 0x300; s->ox[0] |= (value << 0) & 0x0ff; break; - case 0x7e: /* Output Window X Start Position 1 */ + case 0x7e: /* Output Window X Start Position 1 */ s->ox[0] &= 0x0ff; s->ox[0] |= (value << 8) & 0x300; break; - case 0x80: /* Output Window Y Start Position 0 */ + case 0x80: /* Output Window Y Start Position 0 */ s->oy[0] &= 0x300; s->oy[0] |= (value << 0) & 0x0ff; break; - case 0x82: /* Output Window Y Start Position 1 */ + case 0x82: /* Output Window Y Start Position 1 */ s->oy[0] &= 0x0ff; s->oy[0] |= (value << 8) & 0x300; break; - case 0x84: /* Output Window X End Position 0 */ + case 0x84: /* Output Window X End Position 0 */ s->ox[1] &= 0x300; s->ox[1] |= (value << 0) & 0x0ff; break; - case 0x86: /* Output Window X End Position 1 */ + case 0x86: /* Output Window X End Position 1 */ s->ox[1] &= 0x0ff; s->ox[1] |= (value << 8) & 0x300; break; - case 0x88: /* Output Window Y End Position 0 */ + case 0x88: /* Output Window Y End Position 0 */ s->oy[1] &= 0x300; s->oy[1] |= (value << 0) & 0x0ff; break; - case 0x8a: /* Output Window Y End Position 1 */ + case 0x8a: /* Output Window Y End Position 1 */ s->oy[1] &= 0x0ff; s->oy[1] |= (value << 8) & 0x300; break; - case 0x8c: /* Input Data Format */ + case 0x8c: /* Input Data Format */ s->iformat = value & 0xf; s->bpp = blizzard_iformat_bpp[s->iformat]; if (!s->bpp) fprintf(stderr, "%s: Illegal or unsupported input format %x\n", __func__, s->iformat); break; - case 0x8e: /* Data Source Select */ + case 0x8e: /* Data Source Select */ s->source = value & 7; /* Currently all windows will be "destructive overlays". */ if ((!(s->effect & (1 << 3)) && (s->ix[0] != s->ox[0] || @@ -735,7 +735,7 @@ static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) blizzard_transfer_setup(s); break; - case 0x90: /* Display Memory Data Port */ + case 0x90: /* Display Memory Data Port */ if (!s->data.len && !blizzard_transfer_setup(s)) break; @@ -744,73 +744,73 @@ static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) blizzard_window(s); break; - case 0xa8: /* Border Color 0 */ + case 0xa8: /* Border Color 0 */ s->border_r = value; break; - case 0xaa: /* Border Color 1 */ + case 0xaa: /* Border Color 1 */ s->border_g = value; break; - case 0xac: /* Border Color 2 */ + case 0xac: /* Border Color 2 */ s->border_b = value; break; - case 0xb4: /* Gamma Correction Enable */ + case 0xb4: /* Gamma Correction Enable */ s->gamma_config = value & 0x87; break; - case 0xb6: /* Gamma Correction Table Index */ + case 0xb6: /* Gamma Correction Table Index */ s->gamma_idx = value; break; - case 0xb8: /* Gamma Correction Table Data */ + case 0xb8: /* Gamma Correction Table Data */ s->gamma_lut[s->gamma_idx ++] = value; break; - case 0xba: /* 3x3 Matrix Enable */ + case 0xba: /* 3x3 Matrix Enable */ s->matrix_ena = value & 1; break; - case 0xbc ... 0xde: /* Coefficient Registers */ + case 0xbc ... 0xde: /* Coefficient Registers */ s->matrix_coeff[(reg - 0xbc) >> 1] = value & ((reg & 2) ? 0x80 : 0xff); break; - case 0xe0: /* 3x3 Matrix Red Offset */ + case 0xe0: /* 3x3 Matrix Red Offset */ s->matrix_r = value; break; - case 0xe2: /* 3x3 Matrix Green Offset */ + case 0xe2: /* 3x3 Matrix Green Offset */ s->matrix_g = value; break; - case 0xe4: /* 3x3 Matrix Blue Offset */ + case 0xe4: /* 3x3 Matrix Blue Offset */ s->matrix_b = value; break; - case 0xe6: /* Power-save */ + case 0xe6: /* Power-save */ s->pm = value & 0x83; if (value & s->mode & 1) fprintf(stderr, "%s: The display must be disabled before entering " "Standby Mode\n", __func__); break; - case 0xe8: /* Non-display Period Control / Status */ + case 0xe8: /* Non-display Period Control / Status */ s->status = value & 0x1b; break; - case 0xea: /* RGB Interface Control */ + case 0xea: /* RGB Interface Control */ s->rgbgpio_dir = value & 0x8f; break; - case 0xec: /* RGB Interface Status */ + case 0xec: /* RGB Interface Status */ s->rgbgpio = value & 0xcf; break; - case 0xee: /* General-purpose IO Pins Configuration */ + case 0xee: /* General-purpose IO Pins Configuration */ s->gpio_dir = value; break; - case 0xf0: /* General-purpose IO Pins Status / Control */ + case 0xf0: /* General-purpose IO Pins Status / Control */ s->gpio = value; break; - case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ + case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ s->gpio_edge[0] = value; break; - case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ + case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ s->gpio_edge[1] = value; break; - case 0xf6: /* GPIO Interrupt Status */ + case 0xf6: /* GPIO Interrupt Status */ s->gpio_irq &= value; break; - case 0xf8: /* GPIO Pull-down Control */ + case 0xf8: /* GPIO Pull-down Control */ s->gpio_pdown = value; break; @@ -1007,7 +1007,7 @@ static const GraphicHwOps blizzard_ops = { void *s1d13745_init(qemu_irq gpio_int) { - BlizzardState *s = (BlizzardState *) g_malloc0(sizeof(*s)); + BlizzardState *s = g_malloc0(sizeof(*s)); DisplaySurface *surface; s->fb = g_malloc(0x180000); diff --git a/hw/display/cg3.c b/hw/display/cg3.c index 4b7e78d919..2e9656ae1c 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "qemu/error-report.h" diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c index fdca6ca659..6e8c747c46 100644 --- a/hw/display/cirrus_vga.c +++ b/hw/display/cirrus_vga.c @@ -45,6 +45,7 @@ #include "ui/pixel_ops.h" #include "cirrus_vga_internal.h" #include "qom/object.h" +#include "ui/console.h" /* * TODO: @@ -76,12 +77,12 @@ #define CIRRUS_MEMSIZE_512k 0x08 #define CIRRUS_MEMSIZE_1M 0x10 #define CIRRUS_MEMSIZE_2M 0x18 -#define CIRRUS_MEMFLAGS_BANKSWITCH 0x80 // bank switching is enabled. +#define CIRRUS_MEMFLAGS_BANKSWITCH 0x80 // bank switching is enabled. // sequencer 0x12 #define CIRRUS_CURSOR_SHOW 0x01 #define CIRRUS_CURSOR_HIDDENPEL 0x02 -#define CIRRUS_CURSOR_LARGE 0x04 // 64x64 if set, 32x32 if clear +#define CIRRUS_CURSOR_LARGE 0x04 // 64x64 if set, 32x32 if clear // sequencer 0x17 #define CIRRUS_BUSTYPE_VLBFAST 0x10 @@ -89,12 +90,12 @@ #define CIRRUS_BUSTYPE_VLBSLOW 0x30 #define CIRRUS_BUSTYPE_ISA 0x38 #define CIRRUS_MMIO_ENABLE 0x04 -#define CIRRUS_MMIO_USE_PCIADDR 0x40 // 0xb8000 if cleared. +#define CIRRUS_MMIO_USE_PCIADDR 0x40 // 0xb8000 if cleared. #define CIRRUS_MEMSIZEEXT_DOUBLE 0x80 // control 0x0b #define CIRRUS_BANKING_DUAL 0x01 -#define CIRRUS_BANKING_GRANULARITY_16K 0x20 // set:16k, clear:4k +#define CIRRUS_BANKING_GRANULARITY_16K 0x20 // set:16k, clear:4k // control 0x30 #define CIRRUS_BLTMODE_BACKWARDS 0x01 @@ -143,35 +144,35 @@ #define CIRRUS_BLTMODEEXT_DWORDGRANULARITY 0x01 // memory-mapped IO -#define CIRRUS_MMIO_BLTBGCOLOR 0x00 // dword -#define CIRRUS_MMIO_BLTFGCOLOR 0x04 // dword -#define CIRRUS_MMIO_BLTWIDTH 0x08 // word -#define CIRRUS_MMIO_BLTHEIGHT 0x0a // word -#define CIRRUS_MMIO_BLTDESTPITCH 0x0c // word -#define CIRRUS_MMIO_BLTSRCPITCH 0x0e // word -#define CIRRUS_MMIO_BLTDESTADDR 0x10 // dword -#define CIRRUS_MMIO_BLTSRCADDR 0x14 // dword -#define CIRRUS_MMIO_BLTWRITEMASK 0x17 // byte -#define CIRRUS_MMIO_BLTMODE 0x18 // byte -#define CIRRUS_MMIO_BLTROP 0x1a // byte -#define CIRRUS_MMIO_BLTMODEEXT 0x1b // byte -#define CIRRUS_MMIO_BLTTRANSPARENTCOLOR 0x1c // word? -#define CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK 0x20 // word? -#define CIRRUS_MMIO_LINEARDRAW_START_X 0x24 // word -#define CIRRUS_MMIO_LINEARDRAW_START_Y 0x26 // word -#define CIRRUS_MMIO_LINEARDRAW_END_X 0x28 // word -#define CIRRUS_MMIO_LINEARDRAW_END_Y 0x2a // word -#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_INC 0x2c // byte -#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ROLLOVER 0x2d // byte -#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_MASK 0x2e // byte -#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ACCUM 0x2f // byte -#define CIRRUS_MMIO_BRESENHAM_K1 0x30 // word -#define CIRRUS_MMIO_BRESENHAM_K3 0x32 // word -#define CIRRUS_MMIO_BRESENHAM_ERROR 0x34 // word -#define CIRRUS_MMIO_BRESENHAM_DELTA_MAJOR 0x36 // word -#define CIRRUS_MMIO_BRESENHAM_DIRECTION 0x38 // byte -#define CIRRUS_MMIO_LINEDRAW_MODE 0x39 // byte -#define CIRRUS_MMIO_BLTSTATUS 0x40 // byte +#define CIRRUS_MMIO_BLTBGCOLOR 0x00 // dword +#define CIRRUS_MMIO_BLTFGCOLOR 0x04 // dword +#define CIRRUS_MMIO_BLTWIDTH 0x08 // word +#define CIRRUS_MMIO_BLTHEIGHT 0x0a // word +#define CIRRUS_MMIO_BLTDESTPITCH 0x0c // word +#define CIRRUS_MMIO_BLTSRCPITCH 0x0e // word +#define CIRRUS_MMIO_BLTDESTADDR 0x10 // dword +#define CIRRUS_MMIO_BLTSRCADDR 0x14 // dword +#define CIRRUS_MMIO_BLTWRITEMASK 0x17 // byte +#define CIRRUS_MMIO_BLTMODE 0x18 // byte +#define CIRRUS_MMIO_BLTROP 0x1a // byte +#define CIRRUS_MMIO_BLTMODEEXT 0x1b // byte +#define CIRRUS_MMIO_BLTTRANSPARENTCOLOR 0x1c // word? +#define CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK 0x20 // word? +#define CIRRUS_MMIO_LINEARDRAW_START_X 0x24 // word +#define CIRRUS_MMIO_LINEARDRAW_START_Y 0x26 // word +#define CIRRUS_MMIO_LINEARDRAW_END_X 0x28 // word +#define CIRRUS_MMIO_LINEARDRAW_END_Y 0x2a // word +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_INC 0x2c // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ROLLOVER 0x2d // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_MASK 0x2e // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ACCUM 0x2f // byte +#define CIRRUS_MMIO_BRESENHAM_K1 0x30 // word +#define CIRRUS_MMIO_BRESENHAM_K3 0x32 // word +#define CIRRUS_MMIO_BRESENHAM_ERROR 0x34 // word +#define CIRRUS_MMIO_BRESENHAM_DELTA_MAJOR 0x36 // word +#define CIRRUS_MMIO_BRESENHAM_DIRECTION 0x38 // byte +#define CIRRUS_MMIO_LINEDRAW_MODE 0x39 // byte +#define CIRRUS_MMIO_BLTSTATUS 0x40 // byte #define CIRRUS_PNPMMIO_SIZE 0x1000 @@ -628,8 +629,8 @@ static inline void cirrus_bitblt_bgcol(CirrusVGAState *s) } static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin, - int off_pitch, int bytesperline, - int lines) + int off_pitch, int bytesperline, + int lines) { int y; int off_cur; @@ -708,8 +709,8 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop) s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, - s->cirrus_blt_dstpitch, s->cirrus_blt_width, - s->cirrus_blt_height); + s->cirrus_blt_dstpitch, s->cirrus_blt_width, + s->cirrus_blt_height); cirrus_bitblt_reset(s); return 1; } @@ -773,8 +774,8 @@ static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) (*s->cirrus_rop) (s, s->cirrus_blt_dstaddr, s->cirrus_blt_srcaddr, - s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, - s->cirrus_blt_width, s->cirrus_blt_height); + s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, + s->cirrus_blt_width, s->cirrus_blt_height); if (notify) { dpy_gfx_update(s->vga.con, dx, dy, @@ -786,8 +787,8 @@ static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) changed since qemu_console_copy implies this */ cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, - s->cirrus_blt_dstpitch, s->cirrus_blt_width, - s->cirrus_blt_height); + s->cirrus_blt_dstpitch, s->cirrus_blt_width, + s->cirrus_blt_height); return 1; } @@ -834,7 +835,7 @@ static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s) word alignment, so we keep them for the next line */ /* XXX: keep alignment to speed up transfer */ end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; - copy_count = s->cirrus_srcptr_end - end_ptr; + copy_count = MIN(s->cirrus_srcptr_end - end_ptr, CIRRUS_BLTBUFSIZE); memmove(s->cirrus_bltbuf, end_ptr, copy_count); s->cirrus_srcptr = s->cirrus_bltbuf + copy_count; s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; @@ -854,7 +855,7 @@ static void cirrus_bitblt_reset(CirrusVGAState * s) int need_update; s->vga.gr[0x31] &= - ~(CIRRUS_BLT_START | CIRRUS_BLT_BUSY | CIRRUS_BLT_FIFOUSED); + ~(CIRRUS_BLT_START | CIRRUS_BLT_BUSY | CIRRUS_BLT_FIFOUSED); need_update = s->cirrus_srcptr != &s->cirrus_bltbuf[0] || s->cirrus_srcptr_end != &s->cirrus_bltbuf[0]; s->cirrus_srcptr = &s->cirrus_bltbuf[0]; @@ -878,24 +879,24 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s) s->cirrus_srcptr_end = &s->cirrus_bltbuf[0]; if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { - if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { - s->cirrus_blt_srcpitch = 8; - } else { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { + s->cirrus_blt_srcpitch = 8; + } else { /* XXX: check for 24 bpp */ - s->cirrus_blt_srcpitch = 8 * 8 * s->cirrus_blt_pixelwidth; - } - s->cirrus_srccounter = s->cirrus_blt_srcpitch; + s->cirrus_blt_srcpitch = 8 * 8 * s->cirrus_blt_pixelwidth; + } + s->cirrus_srccounter = s->cirrus_blt_srcpitch; } else { - if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { w = s->cirrus_blt_width / s->cirrus_blt_pixelwidth; if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_DWORDGRANULARITY) s->cirrus_blt_srcpitch = ((w + 31) >> 5); else s->cirrus_blt_srcpitch = ((w + 7) >> 3); - } else { + } else { /* always align input size to 32 bits */ - s->cirrus_blt_srcpitch = (s->cirrus_blt_width + 3) & ~3; - } + s->cirrus_blt_srcpitch = (s->cirrus_blt_width + 3) & ~3; + } s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height; } @@ -921,12 +922,12 @@ static int cirrus_bitblt_videotovideo(CirrusVGAState * s) int ret; if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { - ret = cirrus_bitblt_videotovideo_patterncopy(s); + ret = cirrus_bitblt_videotovideo_patterncopy(s); } else { - ret = cirrus_bitblt_videotovideo_copy(s); + ret = cirrus_bitblt_videotovideo_copy(s); } if (ret) - cirrus_bitblt_reset(s); + cirrus_bitblt_reset(s); return ret; } @@ -945,9 +946,9 @@ static void cirrus_bitblt_start(CirrusVGAState * s) s->cirrus_blt_dstpitch = (s->vga.gr[0x24] | (s->vga.gr[0x25] << 8)); s->cirrus_blt_srcpitch = (s->vga.gr[0x26] | (s->vga.gr[0x27] << 8)); s->cirrus_blt_dstaddr = - (s->vga.gr[0x28] | (s->vga.gr[0x29] << 8) | (s->vga.gr[0x2a] << 16)); + (s->vga.gr[0x28] | (s->vga.gr[0x29] << 8) | (s->vga.gr[0x2a] << 16)); s->cirrus_blt_srcaddr = - (s->vga.gr[0x2c] | (s->vga.gr[0x2d] << 8) | (s->vga.gr[0x2e] << 16)); + (s->vga.gr[0x2c] | (s->vga.gr[0x2d] << 8) | (s->vga.gr[0x2e] << 16)); s->cirrus_blt_mode = s->vga.gr[0x30]; s->cirrus_blt_modeext = s->vga.gr[0x33]; blt_rop = s->vga.gr[0x32]; @@ -968,31 +969,31 @@ static void cirrus_bitblt_start(CirrusVGAState * s) switch (s->cirrus_blt_mode & CIRRUS_BLTMODE_PIXELWIDTHMASK) { case CIRRUS_BLTMODE_PIXELWIDTH8: - s->cirrus_blt_pixelwidth = 1; - break; + s->cirrus_blt_pixelwidth = 1; + break; case CIRRUS_BLTMODE_PIXELWIDTH16: - s->cirrus_blt_pixelwidth = 2; - break; + s->cirrus_blt_pixelwidth = 2; + break; case CIRRUS_BLTMODE_PIXELWIDTH24: - s->cirrus_blt_pixelwidth = 3; - break; + s->cirrus_blt_pixelwidth = 3; + break; case CIRRUS_BLTMODE_PIXELWIDTH32: - s->cirrus_blt_pixelwidth = 4; - break; + s->cirrus_blt_pixelwidth = 4; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: bitblt - pixel width is unknown\n"); - goto bitblt_ignore; + goto bitblt_ignore; } s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_PIXELWIDTHMASK; if ((s-> - cirrus_blt_mode & (CIRRUS_BLTMODE_MEMSYSSRC | - CIRRUS_BLTMODE_MEMSYSDEST)) - == (CIRRUS_BLTMODE_MEMSYSSRC | CIRRUS_BLTMODE_MEMSYSDEST)) { + cirrus_blt_mode & (CIRRUS_BLTMODE_MEMSYSSRC | + CIRRUS_BLTMODE_MEMSYSDEST)) + == (CIRRUS_BLTMODE_MEMSYSSRC | CIRRUS_BLTMODE_MEMSYSDEST)) { qemu_log_mask(LOG_UNIMP, "cirrus: bitblt - memory-to-memory copy requested\n"); - goto bitblt_ignore; + goto bitblt_ignore; } if ((s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_SOLIDFILL) && @@ -1036,30 +1037,30 @@ static void cirrus_bitblt_start(CirrusVGAState * s) s->cirrus_rop = cirrus_patternfill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; } } else { - if (s->cirrus_blt_mode & CIRRUS_BLTMODE_TRANSPARENTCOMP) { - if (s->cirrus_blt_pixelwidth > 2) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_TRANSPARENTCOMP) { + if (s->cirrus_blt_pixelwidth > 2) { qemu_log_mask(LOG_GUEST_ERROR, "cirrus: src transparent without colorexpand " "must be 8bpp or 16bpp\n"); - goto bitblt_ignore; - } - if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { - s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; - s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; - s->cirrus_rop = cirrus_bkwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; - } else { - s->cirrus_rop = cirrus_fwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; - } - } else { - if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { - s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; - s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; - s->cirrus_rop = cirrus_bkwd_rop[rop_to_index[blt_rop]]; - } else { - s->cirrus_rop = cirrus_fwd_rop[rop_to_index[blt_rop]]; - } - } - } + goto bitblt_ignore; + } + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { + s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; + s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; + s->cirrus_rop = cirrus_bkwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } else { + s->cirrus_rop = cirrus_fwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } + } else { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { + s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; + s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; + s->cirrus_rop = cirrus_bkwd_rop[rop_to_index[blt_rop]]; + } else { + s->cirrus_rop = cirrus_fwd_rop[rop_to_index[blt_rop]]; + } + } + } // setup bitblt engine. if (s->cirrus_blt_mode & CIRRUS_BLTMODE_MEMSYSSRC) { if (!cirrus_bitblt_cputovideo(s)) @@ -1085,11 +1086,11 @@ static void cirrus_write_bitblt(CirrusVGAState * s, unsigned reg_value) s->vga.gr[0x31] = reg_value; if (((old_value & CIRRUS_BLT_RESET) != 0) && - ((reg_value & CIRRUS_BLT_RESET) == 0)) { - cirrus_bitblt_reset(s); + ((reg_value & CIRRUS_BLT_RESET) == 0)) { + cirrus_bitblt_reset(s); } else if (((old_value & CIRRUS_BLT_START) == 0) && - ((reg_value & CIRRUS_BLT_START) != 0)) { - cirrus_bitblt_start(s); + ((reg_value & CIRRUS_BLT_START) != 0)) { + cirrus_bitblt_start(s); } } @@ -1109,15 +1110,15 @@ static void cirrus_get_offsets(VGACommonState *s1, uint32_t start_addr, line_offset, line_compare; line_offset = s->vga.cr[0x13] - | ((s->vga.cr[0x1b] & 0x10) << 4); + | ((s->vga.cr[0x1b] & 0x10) << 4); line_offset <<= 3; *pline_offset = line_offset; start_addr = (s->vga.cr[0x0c] << 8) - | s->vga.cr[0x0d] - | ((s->vga.cr[0x1b] & 0x01) << 16) - | ((s->vga.cr[0x1b] & 0x0c) << 15) - | ((s->vga.cr[0x1d] & 0x80) << 12); + | s->vga.cr[0x0d] + | ((s->vga.cr[0x1b] & 0x01) << 16) + | ((s->vga.cr[0x1b] & 0x0c) << 15) + | ((s->vga.cr[0x1d] & 0x80) << 12); *pstart_addr = start_addr; line_compare = s->vga.cr[0x18] | @@ -1132,17 +1133,17 @@ static uint32_t cirrus_get_bpp16_depth(CirrusVGAState * s) switch (s->cirrus_hidden_dac_data & 0xf) { case 0: - ret = 15; - break; /* Sierra HiColor */ + ret = 15; + break; /* Sierra HiColor */ case 1: - ret = 16; - break; /* XGA HiColor */ + ret = 16; + break; /* XGA HiColor */ default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: invalid DAC value 0x%x in 16bpp\n", (s->cirrus_hidden_dac_data & 0xf)); - ret = 15; /* XXX */ - break; + ret = 15; /* XXX */ + break; } return ret; } @@ -1153,33 +1154,33 @@ static int cirrus_get_bpp(VGACommonState *s1) uint32_t ret = 8; if ((s->vga.sr[0x07] & 0x01) != 0) { - /* Cirrus SVGA */ - switch (s->vga.sr[0x07] & CIRRUS_SR7_BPP_MASK) { - case CIRRUS_SR7_BPP_8: - ret = 8; - break; - case CIRRUS_SR7_BPP_16_DOUBLEVCLK: - ret = cirrus_get_bpp16_depth(s); - break; - case CIRRUS_SR7_BPP_24: - ret = 24; - break; - case CIRRUS_SR7_BPP_16: - ret = cirrus_get_bpp16_depth(s); - break; - case CIRRUS_SR7_BPP_32: - ret = 32; - break; - default: + /* Cirrus SVGA */ + switch (s->vga.sr[0x07] & CIRRUS_SR7_BPP_MASK) { + case CIRRUS_SR7_BPP_8: + ret = 8; + break; + case CIRRUS_SR7_BPP_16_DOUBLEVCLK: + ret = cirrus_get_bpp16_depth(s); + break; + case CIRRUS_SR7_BPP_24: + ret = 24; + break; + case CIRRUS_SR7_BPP_16: + ret = cirrus_get_bpp16_depth(s); + break; + case CIRRUS_SR7_BPP_32: + ret = 32; + break; + default: #ifdef DEBUG_CIRRUS - printf("cirrus: unknown bpp - sr7=%x\n", s->vga.sr[0x7]); + printf("cirrus: unknown bpp - sr7=%x\n", s->vga.sr[0x7]); #endif - ret = 8; - break; - } + ret = 8; + break; + } } else { - /* VGA */ - ret = 0; + /* VGA */ + ret = 0; } return ret; @@ -1212,36 +1213,36 @@ static void cirrus_update_bank_ptr(CirrusVGAState * s, unsigned bank_index) unsigned offset; unsigned limit; - if ((s->vga.gr[0x0b] & 0x01) != 0) /* dual bank */ - offset = s->vga.gr[0x09 + bank_index]; - else /* single bank */ - offset = s->vga.gr[0x09]; + if ((s->vga.gr[0x0b] & 0x01) != 0) /* dual bank */ + offset = s->vga.gr[0x09 + bank_index]; + else /* single bank */ + offset = s->vga.gr[0x09]; if ((s->vga.gr[0x0b] & 0x20) != 0) - offset <<= 14; + offset <<= 14; else - offset <<= 12; + offset <<= 12; if (s->real_vram_size <= offset) - limit = 0; + limit = 0; else - limit = s->real_vram_size - offset; + limit = s->real_vram_size - offset; if (((s->vga.gr[0x0b] & 0x01) == 0) && (bank_index != 0)) { - if (limit > 0x8000) { - offset += 0x8000; - limit -= 0x8000; - } else { - limit = 0; - } + if (limit > 0x8000) { + offset += 0x8000; + limit -= 0x8000; + } else { + limit = 0; + } } if (limit > 0) { - s->cirrus_bank_base[bank_index] = offset; - s->cirrus_bank_limit[bank_index] = limit; + s->cirrus_bank_base[bank_index] = offset; + s->cirrus_bank_limit[bank_index] = limit; } else { - s->cirrus_bank_base[bank_index] = 0; - s->cirrus_bank_limit[bank_index] = 0; + s->cirrus_bank_base[bank_index] = 0; + s->cirrus_bank_limit[bank_index] = 0; } } @@ -1254,148 +1255,148 @@ static void cirrus_update_bank_ptr(CirrusVGAState * s, unsigned bank_index) static int cirrus_vga_read_sr(CirrusVGAState * s) { switch (s->vga.sr_index) { - case 0x00: // Standard VGA - case 0x01: // Standard VGA - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - return s->vga.sr[s->vga.sr_index]; - case 0x06: // Unlock Cirrus extensions - return s->vga.sr[s->vga.sr_index]; + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + return s->vga.sr[s->vga.sr_index]; + case 0x06: // Unlock Cirrus extensions + return s->vga.sr[s->vga.sr_index]; case 0x10: case 0x30: case 0x50: - case 0x70: // Graphics Cursor X + case 0x70: // Graphics Cursor X case 0x90: case 0xb0: case 0xd0: - case 0xf0: // Graphics Cursor X - return s->vga.sr[0x10]; + case 0xf0: // Graphics Cursor X + return s->vga.sr[0x10]; case 0x11: case 0x31: case 0x51: - case 0x71: // Graphics Cursor Y + case 0x71: // Graphics Cursor Y case 0x91: case 0xb1: case 0xd1: - case 0xf1: // Graphics Cursor Y - return s->vga.sr[0x11]; - case 0x05: // ??? - case 0x07: // Extended Sequencer Mode - case 0x08: // EEPROM Control - case 0x09: // Scratch Register 0 - case 0x0a: // Scratch Register 1 - case 0x0b: // VCLK 0 - case 0x0c: // VCLK 1 - case 0x0d: // VCLK 2 - case 0x0e: // VCLK 3 - case 0x0f: // DRAM Control - case 0x12: // Graphics Cursor Attribute - case 0x13: // Graphics Cursor Pattern Address - case 0x14: // Scratch Register 2 - case 0x15: // Scratch Register 3 - case 0x16: // Performance Tuning Register - case 0x17: // Configuration Readback and Extended Control - case 0x18: // Signature Generator Control - case 0x19: // Signal Generator Result - case 0x1a: // Signal Generator Result - case 0x1b: // VCLK 0 Denominator & Post - case 0x1c: // VCLK 1 Denominator & Post - case 0x1d: // VCLK 2 Denominator & Post - case 0x1e: // VCLK 3 Denominator & Post - case 0x1f: // BIOS Write Enable and MCLK select + case 0xf1: // Graphics Cursor Y + return s->vga.sr[0x11]; + case 0x05: // ??? + case 0x07: // Extended Sequencer Mode + case 0x08: // EEPROM Control + case 0x09: // Scratch Register 0 + case 0x0a: // Scratch Register 1 + case 0x0b: // VCLK 0 + case 0x0c: // VCLK 1 + case 0x0d: // VCLK 2 + case 0x0e: // VCLK 3 + case 0x0f: // DRAM Control + case 0x12: // Graphics Cursor Attribute + case 0x13: // Graphics Cursor Pattern Address + case 0x14: // Scratch Register 2 + case 0x15: // Scratch Register 3 + case 0x16: // Performance Tuning Register + case 0x17: // Configuration Readback and Extended Control + case 0x18: // Signature Generator Control + case 0x19: // Signal Generator Result + case 0x1a: // Signal Generator Result + case 0x1b: // VCLK 0 Denominator & Post + case 0x1c: // VCLK 1 Denominator & Post + case 0x1d: // VCLK 2 Denominator & Post + case 0x1e: // VCLK 3 Denominator & Post + case 0x1f: // BIOS Write Enable and MCLK select #ifdef DEBUG_CIRRUS - printf("cirrus: handled inport sr_index %02x\n", s->vga.sr_index); + printf("cirrus: handled inport sr_index %02x\n", s->vga.sr_index); #endif - return s->vga.sr[s->vga.sr_index]; + return s->vga.sr[s->vga.sr_index]; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: inport sr_index 0x%02x\n", s->vga.sr_index); - return 0xff; + return 0xff; } } static void cirrus_vga_write_sr(CirrusVGAState * s, uint32_t val) { switch (s->vga.sr_index) { - case 0x00: // Standard VGA - case 0x01: // Standard VGA - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - s->vga.sr[s->vga.sr_index] = val & sr_mask[s->vga.sr_index]; - if (s->vga.sr_index == 1) + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + s->vga.sr[s->vga.sr_index] = val & sr_mask[s->vga.sr_index]; + if (s->vga.sr_index == 1) s->vga.update_retrace_info(&s->vga); break; - case 0x06: // Unlock Cirrus extensions - val &= 0x17; - if (val == 0x12) { - s->vga.sr[s->vga.sr_index] = 0x12; - } else { - s->vga.sr[s->vga.sr_index] = 0x0f; - } - break; + case 0x06: // Unlock Cirrus extensions + val &= 0x17; + if (val == 0x12) { + s->vga.sr[s->vga.sr_index] = 0x12; + } else { + s->vga.sr[s->vga.sr_index] = 0x0f; + } + break; case 0x10: case 0x30: case 0x50: - case 0x70: // Graphics Cursor X + case 0x70: // Graphics Cursor X case 0x90: case 0xb0: case 0xd0: - case 0xf0: // Graphics Cursor X - s->vga.sr[0x10] = val; + case 0xf0: // Graphics Cursor X + s->vga.sr[0x10] = val; s->vga.hw_cursor_x = (val << 3) | (s->vga.sr_index >> 5); - break; + break; case 0x11: case 0x31: case 0x51: - case 0x71: // Graphics Cursor Y + case 0x71: // Graphics Cursor Y case 0x91: case 0xb1: case 0xd1: - case 0xf1: // Graphics Cursor Y - s->vga.sr[0x11] = val; + case 0xf1: // Graphics Cursor Y + s->vga.sr[0x11] = val; s->vga.hw_cursor_y = (val << 3) | (s->vga.sr_index >> 5); - break; - case 0x07: // Extended Sequencer Mode + break; + case 0x07: // Extended Sequencer Mode cirrus_update_memory_access(s); /* fall through */ - case 0x08: // EEPROM Control - case 0x09: // Scratch Register 0 - case 0x0a: // Scratch Register 1 - case 0x0b: // VCLK 0 - case 0x0c: // VCLK 1 - case 0x0d: // VCLK 2 - case 0x0e: // VCLK 3 - case 0x0f: // DRAM Control - case 0x13: // Graphics Cursor Pattern Address - case 0x14: // Scratch Register 2 - case 0x15: // Scratch Register 3 - case 0x16: // Performance Tuning Register - case 0x18: // Signature Generator Control - case 0x19: // Signature Generator Result - case 0x1a: // Signature Generator Result - case 0x1b: // VCLK 0 Denominator & Post - case 0x1c: // VCLK 1 Denominator & Post - case 0x1d: // VCLK 2 Denominator & Post - case 0x1e: // VCLK 3 Denominator & Post - case 0x1f: // BIOS Write Enable and MCLK select - s->vga.sr[s->vga.sr_index] = val; + case 0x08: // EEPROM Control + case 0x09: // Scratch Register 0 + case 0x0a: // Scratch Register 1 + case 0x0b: // VCLK 0 + case 0x0c: // VCLK 1 + case 0x0d: // VCLK 2 + case 0x0e: // VCLK 3 + case 0x0f: // DRAM Control + case 0x13: // Graphics Cursor Pattern Address + case 0x14: // Scratch Register 2 + case 0x15: // Scratch Register 3 + case 0x16: // Performance Tuning Register + case 0x18: // Signature Generator Control + case 0x19: // Signature Generator Result + case 0x1a: // Signature Generator Result + case 0x1b: // VCLK 0 Denominator & Post + case 0x1c: // VCLK 1 Denominator & Post + case 0x1d: // VCLK 2 Denominator & Post + case 0x1e: // VCLK 3 Denominator & Post + case 0x1f: // BIOS Write Enable and MCLK select + s->vga.sr[s->vga.sr_index] = val; #ifdef DEBUG_CIRRUS - printf("cirrus: handled outport sr_index %02x, sr_value %02x\n", - s->vga.sr_index, val); + printf("cirrus: handled outport sr_index %02x, sr_value %02x\n", + s->vga.sr_index, val); #endif - break; - case 0x12: // Graphics Cursor Attribute - s->vga.sr[0x12] = val; + break; + case 0x12: // Graphics Cursor Attribute + s->vga.sr[0x12] = val; s->vga.force_shadow = !!(val & CIRRUS_CURSOR_SHOW); #ifdef DEBUG_CIRRUS printf("cirrus: cursor ctl SR12=%02x (force shadow: %d)\n", val, s->vga.force_shadow); #endif break; - case 0x17: // Configuration Readback and Extended Control - s->vga.sr[s->vga.sr_index] = (s->vga.sr[s->vga.sr_index] & 0x38) + case 0x17: // Configuration Readback and Extended Control + s->vga.sr[s->vga.sr_index] = (s->vga.sr[s->vga.sr_index] & 0x38) | (val & 0xc7); cirrus_update_memory_access(s); break; @@ -1403,7 +1404,7 @@ static void cirrus_vga_write_sr(CirrusVGAState * s, uint32_t val) qemu_log_mask(LOG_GUEST_ERROR, "cirrus: outport sr_index 0x%02x, sr_value 0x%02x\n", s->vga.sr_index, val); - break; + break; } } @@ -1425,9 +1426,9 @@ static int cirrus_read_hidden_dac(CirrusVGAState * s) static void cirrus_write_hidden_dac(CirrusVGAState * s, int reg_value) { if (s->cirrus_hidden_dac_lockindex == 4) { - s->cirrus_hidden_dac_data = reg_value; + s->cirrus_hidden_dac_data = reg_value; #if defined(DEBUG_CIRRUS) - printf("cirrus: outport hidden DAC, value %02x\n", reg_value); + printf("cirrus: outport hidden DAC, value %02x\n", reg_value); #endif } s->cirrus_hidden_dac_lockindex = 0; @@ -1450,8 +1451,8 @@ static int cirrus_vga_read_palette(CirrusVGAState * s) val = s->vga.palette[s->vga.dac_read_index * 3 + s->vga.dac_sub_index]; } if (++s->vga.dac_sub_index == 3) { - s->vga.dac_sub_index = 0; - s->vga.dac_read_index++; + s->vga.dac_sub_index = 0; + s->vga.dac_read_index++; } return val; } @@ -1467,8 +1468,8 @@ static void cirrus_vga_write_palette(CirrusVGAState * s, int reg_value) memcpy(&s->vga.palette[s->vga.dac_write_index * 3], s->vga.dac_cache, 3); } /* XXX update cursor */ - s->vga.dac_sub_index = 0; - s->vga.dac_write_index++; + s->vga.dac_sub_index = 0; + s->vga.dac_write_index++; } } @@ -1485,24 +1486,24 @@ static int cirrus_vga_read_gr(CirrusVGAState * s, unsigned reg_index) return s->cirrus_shadow_gr0; case 0x01: // Standard VGA, FGCOLOR 0x000000ff return s->cirrus_shadow_gr1; - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - case 0x06: // Standard VGA - case 0x07: // Standard VGA - case 0x08: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA return s->vga.gr[s->vga.gr_index]; - case 0x05: // Standard VGA, Cirrus extended mode + case 0x05: // Standard VGA, Cirrus extended mode default: - break; + break; } if (reg_index < 0x3a) { - return s->vga.gr[reg_index]; + return s->vga.gr[reg_index]; } else { qemu_log_mask(LOG_GUEST_ERROR, "cirrus: inport gr_index 0x%02x\n", reg_index); - return 0xff; + return 0xff; } } @@ -1511,87 +1512,87 @@ cirrus_vga_write_gr(CirrusVGAState * s, unsigned reg_index, int reg_value) { trace_vga_cirrus_write_gr(reg_index, reg_value); switch (reg_index) { - case 0x00: // Standard VGA, BGCOLOR 0x000000ff - s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; - s->cirrus_shadow_gr0 = reg_value; - break; - case 0x01: // Standard VGA, FGCOLOR 0x000000ff - s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; - s->cirrus_shadow_gr1 = reg_value; - break; - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - case 0x06: // Standard VGA - case 0x07: // Standard VGA - case 0x08: // Standard VGA - s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + case 0x00: // Standard VGA, BGCOLOR 0x000000ff + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + s->cirrus_shadow_gr0 = reg_value; break; - case 0x05: // Standard VGA, Cirrus extended mode - s->vga.gr[reg_index] = reg_value & 0x7f; + case 0x01: // Standard VGA, FGCOLOR 0x000000ff + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + s->cirrus_shadow_gr1 = reg_value; + break; + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + break; + case 0x05: // Standard VGA, Cirrus extended mode + s->vga.gr[reg_index] = reg_value & 0x7f; cirrus_update_memory_access(s); - break; - case 0x09: // bank offset #0 - case 0x0A: // bank offset #1 - s->vga.gr[reg_index] = reg_value; - cirrus_update_bank_ptr(s, 0); - cirrus_update_bank_ptr(s, 1); + break; + case 0x09: // bank offset #0 + case 0x0A: // bank offset #1 + s->vga.gr[reg_index] = reg_value; + cirrus_update_bank_ptr(s, 0); + cirrus_update_bank_ptr(s, 1); cirrus_update_memory_access(s); break; case 0x0B: - s->vga.gr[reg_index] = reg_value; - cirrus_update_bank_ptr(s, 0); - cirrus_update_bank_ptr(s, 1); + s->vga.gr[reg_index] = reg_value; + cirrus_update_bank_ptr(s, 0); + cirrus_update_bank_ptr(s, 1); cirrus_update_memory_access(s); - break; - case 0x10: // BGCOLOR 0x0000ff00 - case 0x11: // FGCOLOR 0x0000ff00 - case 0x12: // BGCOLOR 0x00ff0000 - case 0x13: // FGCOLOR 0x00ff0000 - case 0x14: // BGCOLOR 0xff000000 - case 0x15: // FGCOLOR 0xff000000 - case 0x20: // BLT WIDTH 0x0000ff - case 0x22: // BLT HEIGHT 0x0000ff - case 0x24: // BLT DEST PITCH 0x0000ff - case 0x26: // BLT SRC PITCH 0x0000ff - case 0x28: // BLT DEST ADDR 0x0000ff - case 0x29: // BLT DEST ADDR 0x00ff00 - case 0x2c: // BLT SRC ADDR 0x0000ff - case 0x2d: // BLT SRC ADDR 0x00ff00 + break; + case 0x10: // BGCOLOR 0x0000ff00 + case 0x11: // FGCOLOR 0x0000ff00 + case 0x12: // BGCOLOR 0x00ff0000 + case 0x13: // FGCOLOR 0x00ff0000 + case 0x14: // BGCOLOR 0xff000000 + case 0x15: // FGCOLOR 0xff000000 + case 0x20: // BLT WIDTH 0x0000ff + case 0x22: // BLT HEIGHT 0x0000ff + case 0x24: // BLT DEST PITCH 0x0000ff + case 0x26: // BLT SRC PITCH 0x0000ff + case 0x28: // BLT DEST ADDR 0x0000ff + case 0x29: // BLT DEST ADDR 0x00ff00 + case 0x2c: // BLT SRC ADDR 0x0000ff + case 0x2d: // BLT SRC ADDR 0x00ff00 case 0x2f: // BLT WRITEMASK - case 0x30: // BLT MODE - case 0x32: // RASTER OP - case 0x33: // BLT MODEEXT - case 0x34: // BLT TRANSPARENT COLOR 0x00ff - case 0x35: // BLT TRANSPARENT COLOR 0xff00 - case 0x38: // BLT TRANSPARENT COLOR MASK 0x00ff - case 0x39: // BLT TRANSPARENT COLOR MASK 0xff00 - s->vga.gr[reg_index] = reg_value; - break; - case 0x21: // BLT WIDTH 0x001f00 - case 0x23: // BLT HEIGHT 0x001f00 - case 0x25: // BLT DEST PITCH 0x001f00 - case 0x27: // BLT SRC PITCH 0x001f00 - s->vga.gr[reg_index] = reg_value & 0x1f; - break; - case 0x2a: // BLT DEST ADDR 0x3f0000 - s->vga.gr[reg_index] = reg_value & 0x3f; + case 0x30: // BLT MODE + case 0x32: // RASTER OP + case 0x33: // BLT MODEEXT + case 0x34: // BLT TRANSPARENT COLOR 0x00ff + case 0x35: // BLT TRANSPARENT COLOR 0xff00 + case 0x38: // BLT TRANSPARENT COLOR MASK 0x00ff + case 0x39: // BLT TRANSPARENT COLOR MASK 0xff00 + s->vga.gr[reg_index] = reg_value; + break; + case 0x21: // BLT WIDTH 0x001f00 + case 0x23: // BLT HEIGHT 0x001f00 + case 0x25: // BLT DEST PITCH 0x001f00 + case 0x27: // BLT SRC PITCH 0x001f00 + s->vga.gr[reg_index] = reg_value & 0x1f; + break; + case 0x2a: // BLT DEST ADDR 0x3f0000 + s->vga.gr[reg_index] = reg_value & 0x3f; /* if auto start mode, starts bit blt now */ if (s->vga.gr[0x31] & CIRRUS_BLT_AUTOSTART) { cirrus_bitblt_start(s); } - break; - case 0x2e: // BLT SRC ADDR 0x3f0000 - s->vga.gr[reg_index] = reg_value & 0x3f; - break; - case 0x31: // BLT STATUS/START - cirrus_write_bitblt(s, reg_value); - break; + break; + case 0x2e: // BLT SRC ADDR 0x3f0000 + s->vga.gr[reg_index] = reg_value & 0x3f; + break; + case 0x31: // BLT STATUS/START + cirrus_write_bitblt(s, reg_value); + break; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: outport gr_index 0x%02x, gr_value 0x%02x\n", reg_index, reg_value); - break; + break; } } @@ -1604,122 +1605,122 @@ cirrus_vga_write_gr(CirrusVGAState * s, unsigned reg_index, int reg_value) static int cirrus_vga_read_cr(CirrusVGAState * s, unsigned reg_index) { switch (reg_index) { - case 0x00: // Standard VGA - case 0x01: // Standard VGA - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - case 0x05: // Standard VGA - case 0x06: // Standard VGA - case 0x07: // Standard VGA - case 0x08: // Standard VGA - case 0x09: // Standard VGA - case 0x0a: // Standard VGA - case 0x0b: // Standard VGA - case 0x0c: // Standard VGA - case 0x0d: // Standard VGA - case 0x0e: // Standard VGA - case 0x0f: // Standard VGA - case 0x10: // Standard VGA - case 0x11: // Standard VGA - case 0x12: // Standard VGA - case 0x13: // Standard VGA - case 0x14: // Standard VGA - case 0x15: // Standard VGA - case 0x16: // Standard VGA - case 0x17: // Standard VGA - case 0x18: // Standard VGA - return s->vga.cr[s->vga.cr_index]; - case 0x24: // Attribute Controller Toggle Readback (R) + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x05: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + case 0x09: // Standard VGA + case 0x0a: // Standard VGA + case 0x0b: // Standard VGA + case 0x0c: // Standard VGA + case 0x0d: // Standard VGA + case 0x0e: // Standard VGA + case 0x0f: // Standard VGA + case 0x10: // Standard VGA + case 0x11: // Standard VGA + case 0x12: // Standard VGA + case 0x13: // Standard VGA + case 0x14: // Standard VGA + case 0x15: // Standard VGA + case 0x16: // Standard VGA + case 0x17: // Standard VGA + case 0x18: // Standard VGA + return s->vga.cr[s->vga.cr_index]; + case 0x24: // Attribute Controller Toggle Readback (R) return (s->vga.ar_flip_flop << 7); - case 0x19: // Interlace End - case 0x1a: // Miscellaneous Control - case 0x1b: // Extended Display Control - case 0x1c: // Sync Adjust and Genlock - case 0x1d: // Overlay Extended Control - case 0x22: // Graphics Data Latches Readback (R) - case 0x25: // Part Status - case 0x27: // Part ID (R) - return s->vga.cr[s->vga.cr_index]; - case 0x26: // Attribute Controller Index Readback (R) - return s->vga.ar_index & 0x3f; + case 0x19: // Interlace End + case 0x1a: // Miscellaneous Control + case 0x1b: // Extended Display Control + case 0x1c: // Sync Adjust and Genlock + case 0x1d: // Overlay Extended Control + case 0x22: // Graphics Data Latches Readback (R) + case 0x25: // Part Status + case 0x27: // Part ID (R) + return s->vga.cr[s->vga.cr_index]; + case 0x26: // Attribute Controller Index Readback (R) + return s->vga.ar_index & 0x3f; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: inport cr_index 0x%02x\n", reg_index); - return 0xff; + return 0xff; } } static void cirrus_vga_write_cr(CirrusVGAState * s, int reg_value) { switch (s->vga.cr_index) { - case 0x00: // Standard VGA - case 0x01: // Standard VGA - case 0x02: // Standard VGA - case 0x03: // Standard VGA - case 0x04: // Standard VGA - case 0x05: // Standard VGA - case 0x06: // Standard VGA - case 0x07: // Standard VGA - case 0x08: // Standard VGA - case 0x09: // Standard VGA - case 0x0a: // Standard VGA - case 0x0b: // Standard VGA - case 0x0c: // Standard VGA - case 0x0d: // Standard VGA - case 0x0e: // Standard VGA - case 0x0f: // Standard VGA - case 0x10: // Standard VGA - case 0x11: // Standard VGA - case 0x12: // Standard VGA - case 0x13: // Standard VGA - case 0x14: // Standard VGA - case 0x15: // Standard VGA - case 0x16: // Standard VGA - case 0x17: // Standard VGA - case 0x18: // Standard VGA - /* handle CR0-7 protection */ - if ((s->vga.cr[0x11] & 0x80) && s->vga.cr_index <= 7) { - /* can always write bit 4 of CR7 */ - if (s->vga.cr_index == 7) - s->vga.cr[7] = (s->vga.cr[7] & ~0x10) | (reg_value & 0x10); - return; - } - s->vga.cr[s->vga.cr_index] = reg_value; - switch(s->vga.cr_index) { - case 0x00: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x11: - case 0x17: - s->vga.update_retrace_info(&s->vga); - break; - } + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x05: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + case 0x09: // Standard VGA + case 0x0a: // Standard VGA + case 0x0b: // Standard VGA + case 0x0c: // Standard VGA + case 0x0d: // Standard VGA + case 0x0e: // Standard VGA + case 0x0f: // Standard VGA + case 0x10: // Standard VGA + case 0x11: // Standard VGA + case 0x12: // Standard VGA + case 0x13: // Standard VGA + case 0x14: // Standard VGA + case 0x15: // Standard VGA + case 0x16: // Standard VGA + case 0x17: // Standard VGA + case 0x18: // Standard VGA + /* handle CR0-7 protection */ + if ((s->vga.cr[0x11] & 0x80) && s->vga.cr_index <= 7) { + /* can always write bit 4 of CR7 */ + if (s->vga.cr_index == 7) + s->vga.cr[7] = (s->vga.cr[7] & ~0x10) | (reg_value & 0x10); + return; + } + s->vga.cr[s->vga.cr_index] = reg_value; + switch(s->vga.cr_index) { + case 0x00: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x11: + case 0x17: + s->vga.update_retrace_info(&s->vga); + break; + } break; - case 0x19: // Interlace End - case 0x1a: // Miscellaneous Control - case 0x1b: // Extended Display Control - case 0x1c: // Sync Adjust and Genlock - case 0x1d: // Overlay Extended Control - s->vga.cr[s->vga.cr_index] = reg_value; + case 0x19: // Interlace End + case 0x1a: // Miscellaneous Control + case 0x1b: // Extended Display Control + case 0x1c: // Sync Adjust and Genlock + case 0x1d: // Overlay Extended Control + s->vga.cr[s->vga.cr_index] = reg_value; #ifdef DEBUG_CIRRUS - printf("cirrus: handled outport cr_index %02x, cr_value %02x\n", - s->vga.cr_index, reg_value); + printf("cirrus: handled outport cr_index %02x, cr_value %02x\n", + s->vga.cr_index, reg_value); #endif - break; - case 0x22: // Graphics Data Latches Readback (R) - case 0x24: // Attribute Controller Toggle Readback (R) - case 0x26: // Attribute Controller Index Readback (R) - case 0x27: // Part ID (R) - break; - case 0x25: // Part Status + break; + case 0x22: // Graphics Data Latches Readback (R) + case 0x24: // Attribute Controller Toggle Readback (R) + case 0x26: // Attribute Controller Index Readback (R) + case 0x27: // Part ID (R) + break; + case 0x25: // Part Status default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: outport cr_index 0x%02x, cr_value 0x%02x\n", s->vga.cr_index, reg_value); - break; + break; } } @@ -1735,102 +1736,102 @@ static uint8_t cirrus_mmio_blt_read(CirrusVGAState * s, unsigned address) switch (address) { case (CIRRUS_MMIO_BLTBGCOLOR + 0): - value = cirrus_vga_read_gr(s, 0x00); - break; + value = cirrus_vga_read_gr(s, 0x00); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 1): - value = cirrus_vga_read_gr(s, 0x10); - break; + value = cirrus_vga_read_gr(s, 0x10); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 2): - value = cirrus_vga_read_gr(s, 0x12); - break; + value = cirrus_vga_read_gr(s, 0x12); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 3): - value = cirrus_vga_read_gr(s, 0x14); - break; + value = cirrus_vga_read_gr(s, 0x14); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 0): - value = cirrus_vga_read_gr(s, 0x01); - break; + value = cirrus_vga_read_gr(s, 0x01); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 1): - value = cirrus_vga_read_gr(s, 0x11); - break; + value = cirrus_vga_read_gr(s, 0x11); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 2): - value = cirrus_vga_read_gr(s, 0x13); - break; + value = cirrus_vga_read_gr(s, 0x13); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 3): - value = cirrus_vga_read_gr(s, 0x15); - break; + value = cirrus_vga_read_gr(s, 0x15); + break; case (CIRRUS_MMIO_BLTWIDTH + 0): - value = cirrus_vga_read_gr(s, 0x20); - break; + value = cirrus_vga_read_gr(s, 0x20); + break; case (CIRRUS_MMIO_BLTWIDTH + 1): - value = cirrus_vga_read_gr(s, 0x21); - break; + value = cirrus_vga_read_gr(s, 0x21); + break; case (CIRRUS_MMIO_BLTHEIGHT + 0): - value = cirrus_vga_read_gr(s, 0x22); - break; + value = cirrus_vga_read_gr(s, 0x22); + break; case (CIRRUS_MMIO_BLTHEIGHT + 1): - value = cirrus_vga_read_gr(s, 0x23); - break; + value = cirrus_vga_read_gr(s, 0x23); + break; case (CIRRUS_MMIO_BLTDESTPITCH + 0): - value = cirrus_vga_read_gr(s, 0x24); - break; + value = cirrus_vga_read_gr(s, 0x24); + break; case (CIRRUS_MMIO_BLTDESTPITCH + 1): - value = cirrus_vga_read_gr(s, 0x25); - break; + value = cirrus_vga_read_gr(s, 0x25); + break; case (CIRRUS_MMIO_BLTSRCPITCH + 0): - value = cirrus_vga_read_gr(s, 0x26); - break; + value = cirrus_vga_read_gr(s, 0x26); + break; case (CIRRUS_MMIO_BLTSRCPITCH + 1): - value = cirrus_vga_read_gr(s, 0x27); - break; + value = cirrus_vga_read_gr(s, 0x27); + break; case (CIRRUS_MMIO_BLTDESTADDR + 0): - value = cirrus_vga_read_gr(s, 0x28); - break; + value = cirrus_vga_read_gr(s, 0x28); + break; case (CIRRUS_MMIO_BLTDESTADDR + 1): - value = cirrus_vga_read_gr(s, 0x29); - break; + value = cirrus_vga_read_gr(s, 0x29); + break; case (CIRRUS_MMIO_BLTDESTADDR + 2): - value = cirrus_vga_read_gr(s, 0x2a); - break; + value = cirrus_vga_read_gr(s, 0x2a); + break; case (CIRRUS_MMIO_BLTSRCADDR + 0): - value = cirrus_vga_read_gr(s, 0x2c); - break; + value = cirrus_vga_read_gr(s, 0x2c); + break; case (CIRRUS_MMIO_BLTSRCADDR + 1): - value = cirrus_vga_read_gr(s, 0x2d); - break; + value = cirrus_vga_read_gr(s, 0x2d); + break; case (CIRRUS_MMIO_BLTSRCADDR + 2): - value = cirrus_vga_read_gr(s, 0x2e); - break; + value = cirrus_vga_read_gr(s, 0x2e); + break; case CIRRUS_MMIO_BLTWRITEMASK: - value = cirrus_vga_read_gr(s, 0x2f); - break; + value = cirrus_vga_read_gr(s, 0x2f); + break; case CIRRUS_MMIO_BLTMODE: - value = cirrus_vga_read_gr(s, 0x30); - break; + value = cirrus_vga_read_gr(s, 0x30); + break; case CIRRUS_MMIO_BLTROP: - value = cirrus_vga_read_gr(s, 0x32); - break; + value = cirrus_vga_read_gr(s, 0x32); + break; case CIRRUS_MMIO_BLTMODEEXT: - value = cirrus_vga_read_gr(s, 0x33); - break; + value = cirrus_vga_read_gr(s, 0x33); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 0): - value = cirrus_vga_read_gr(s, 0x34); - break; + value = cirrus_vga_read_gr(s, 0x34); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 1): - value = cirrus_vga_read_gr(s, 0x35); - break; + value = cirrus_vga_read_gr(s, 0x35); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 0): - value = cirrus_vga_read_gr(s, 0x38); - break; + value = cirrus_vga_read_gr(s, 0x38); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 1): - value = cirrus_vga_read_gr(s, 0x39); - break; + value = cirrus_vga_read_gr(s, 0x39); + break; case CIRRUS_MMIO_BLTSTATUS: - value = cirrus_vga_read_gr(s, 0x31); - break; + value = cirrus_vga_read_gr(s, 0x31); + break; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: mmio read - address 0x%04x\n", address); - break; + break; } trace_vga_cirrus_write_blt(address, value); @@ -1838,111 +1839,111 @@ static uint8_t cirrus_mmio_blt_read(CirrusVGAState * s, unsigned address) } static void cirrus_mmio_blt_write(CirrusVGAState * s, unsigned address, - uint8_t value) + uint8_t value) { trace_vga_cirrus_write_blt(address, value); switch (address) { case (CIRRUS_MMIO_BLTBGCOLOR + 0): - cirrus_vga_write_gr(s, 0x00, value); - break; + cirrus_vga_write_gr(s, 0x00, value); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 1): - cirrus_vga_write_gr(s, 0x10, value); - break; + cirrus_vga_write_gr(s, 0x10, value); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 2): - cirrus_vga_write_gr(s, 0x12, value); - break; + cirrus_vga_write_gr(s, 0x12, value); + break; case (CIRRUS_MMIO_BLTBGCOLOR + 3): - cirrus_vga_write_gr(s, 0x14, value); - break; + cirrus_vga_write_gr(s, 0x14, value); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 0): - cirrus_vga_write_gr(s, 0x01, value); - break; + cirrus_vga_write_gr(s, 0x01, value); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 1): - cirrus_vga_write_gr(s, 0x11, value); - break; + cirrus_vga_write_gr(s, 0x11, value); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 2): - cirrus_vga_write_gr(s, 0x13, value); - break; + cirrus_vga_write_gr(s, 0x13, value); + break; case (CIRRUS_MMIO_BLTFGCOLOR + 3): - cirrus_vga_write_gr(s, 0x15, value); - break; + cirrus_vga_write_gr(s, 0x15, value); + break; case (CIRRUS_MMIO_BLTWIDTH + 0): - cirrus_vga_write_gr(s, 0x20, value); - break; + cirrus_vga_write_gr(s, 0x20, value); + break; case (CIRRUS_MMIO_BLTWIDTH + 1): - cirrus_vga_write_gr(s, 0x21, value); - break; + cirrus_vga_write_gr(s, 0x21, value); + break; case (CIRRUS_MMIO_BLTHEIGHT + 0): - cirrus_vga_write_gr(s, 0x22, value); - break; + cirrus_vga_write_gr(s, 0x22, value); + break; case (CIRRUS_MMIO_BLTHEIGHT + 1): - cirrus_vga_write_gr(s, 0x23, value); - break; + cirrus_vga_write_gr(s, 0x23, value); + break; case (CIRRUS_MMIO_BLTDESTPITCH + 0): - cirrus_vga_write_gr(s, 0x24, value); - break; + cirrus_vga_write_gr(s, 0x24, value); + break; case (CIRRUS_MMIO_BLTDESTPITCH + 1): - cirrus_vga_write_gr(s, 0x25, value); - break; + cirrus_vga_write_gr(s, 0x25, value); + break; case (CIRRUS_MMIO_BLTSRCPITCH + 0): - cirrus_vga_write_gr(s, 0x26, value); - break; + cirrus_vga_write_gr(s, 0x26, value); + break; case (CIRRUS_MMIO_BLTSRCPITCH + 1): - cirrus_vga_write_gr(s, 0x27, value); - break; + cirrus_vga_write_gr(s, 0x27, value); + break; case (CIRRUS_MMIO_BLTDESTADDR + 0): - cirrus_vga_write_gr(s, 0x28, value); - break; + cirrus_vga_write_gr(s, 0x28, value); + break; case (CIRRUS_MMIO_BLTDESTADDR + 1): - cirrus_vga_write_gr(s, 0x29, value); - break; + cirrus_vga_write_gr(s, 0x29, value); + break; case (CIRRUS_MMIO_BLTDESTADDR + 2): - cirrus_vga_write_gr(s, 0x2a, value); - break; + cirrus_vga_write_gr(s, 0x2a, value); + break; case (CIRRUS_MMIO_BLTDESTADDR + 3): - /* ignored */ - break; + /* ignored */ + break; case (CIRRUS_MMIO_BLTSRCADDR + 0): - cirrus_vga_write_gr(s, 0x2c, value); - break; + cirrus_vga_write_gr(s, 0x2c, value); + break; case (CIRRUS_MMIO_BLTSRCADDR + 1): - cirrus_vga_write_gr(s, 0x2d, value); - break; + cirrus_vga_write_gr(s, 0x2d, value); + break; case (CIRRUS_MMIO_BLTSRCADDR + 2): - cirrus_vga_write_gr(s, 0x2e, value); - break; + cirrus_vga_write_gr(s, 0x2e, value); + break; case CIRRUS_MMIO_BLTWRITEMASK: - cirrus_vga_write_gr(s, 0x2f, value); - break; + cirrus_vga_write_gr(s, 0x2f, value); + break; case CIRRUS_MMIO_BLTMODE: - cirrus_vga_write_gr(s, 0x30, value); - break; + cirrus_vga_write_gr(s, 0x30, value); + break; case CIRRUS_MMIO_BLTROP: - cirrus_vga_write_gr(s, 0x32, value); - break; + cirrus_vga_write_gr(s, 0x32, value); + break; case CIRRUS_MMIO_BLTMODEEXT: - cirrus_vga_write_gr(s, 0x33, value); - break; + cirrus_vga_write_gr(s, 0x33, value); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 0): - cirrus_vga_write_gr(s, 0x34, value); - break; + cirrus_vga_write_gr(s, 0x34, value); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 1): - cirrus_vga_write_gr(s, 0x35, value); - break; + cirrus_vga_write_gr(s, 0x35, value); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 0): - cirrus_vga_write_gr(s, 0x38, value); - break; + cirrus_vga_write_gr(s, 0x38, value); + break; case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 1): - cirrus_vga_write_gr(s, 0x39, value); - break; + cirrus_vga_write_gr(s, 0x39, value); + break; case CIRRUS_MMIO_BLTSTATUS: - cirrus_vga_write_gr(s, 0x31, value); - break; + cirrus_vga_write_gr(s, 0x31, value); + break; default: qemu_log_mask(LOG_GUEST_ERROR, "cirrus: mmio write - addr 0x%04x val 0x%02x (ignored)\n", address, value); - break; + break; } } @@ -1953,9 +1954,9 @@ static void cirrus_mmio_blt_write(CirrusVGAState * s, unsigned address, ***************************************/ static void cirrus_mem_writeb_mode4and5_8bpp(CirrusVGAState * s, - unsigned mode, - unsigned offset, - uint32_t mem_value) + unsigned mode, + unsigned offset, + uint32_t mem_value) { int x; unsigned val = mem_value; @@ -1963,20 +1964,20 @@ static void cirrus_mem_writeb_mode4and5_8bpp(CirrusVGAState * s, for (x = 0; x < 8; x++) { dst = s->vga.vram_ptr + ((offset + x) & s->cirrus_addr_mask); - if (val & 0x80) { - *dst = s->cirrus_shadow_gr1; - } else if (mode == 5) { - *dst = s->cirrus_shadow_gr0; - } - val <<= 1; + if (val & 0x80) { + *dst = s->cirrus_shadow_gr1; + } else if (mode == 5) { + *dst = s->cirrus_shadow_gr0; + } + val <<= 1; } memory_region_set_dirty(&s->vga.vram, offset, 8); } static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, - unsigned mode, - unsigned offset, - uint32_t mem_value) + unsigned mode, + unsigned offset, + uint32_t mem_value) { int x; unsigned val = mem_value; @@ -1984,14 +1985,14 @@ static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, for (x = 0; x < 8; x++) { dst = s->vga.vram_ptr + ((offset + 2 * x) & s->cirrus_addr_mask & ~1); - if (val & 0x80) { - *dst = s->cirrus_shadow_gr1; - *(dst + 1) = s->vga.gr[0x11]; - } else if (mode == 5) { - *dst = s->cirrus_shadow_gr0; - *(dst + 1) = s->vga.gr[0x10]; - } - val <<= 1; + if (val & 0x80) { + *dst = s->cirrus_shadow_gr1; + *(dst + 1) = s->vga.gr[0x11]; + } else if (mode == 5) { + *dst = s->cirrus_shadow_gr0; + *(dst + 1) = s->vga.gr[0x10]; + } + val <<= 1; } memory_region_set_dirty(&s->vga.vram, offset, 16); } @@ -2016,29 +2017,29 @@ static uint64_t cirrus_vga_mem_read(void *opaque, } if (addr < 0x10000) { - /* XXX handle bitblt */ - /* video memory */ - bank_index = addr >> 15; - bank_offset = addr & 0x7fff; - if (bank_offset < s->cirrus_bank_limit[bank_index]) { - bank_offset += s->cirrus_bank_base[bank_index]; - if ((s->vga.gr[0x0B] & 0x14) == 0x14) { - bank_offset <<= 4; - } else if (s->vga.gr[0x0B] & 0x02) { - bank_offset <<= 3; - } - bank_offset &= s->cirrus_addr_mask; - val = *(s->vga.vram_ptr + bank_offset); - } else - val = 0xff; + /* XXX handle bitblt */ + /* video memory */ + bank_index = addr >> 15; + bank_offset = addr & 0x7fff; + if (bank_offset < s->cirrus_bank_limit[bank_index]) { + bank_offset += s->cirrus_bank_base[bank_index]; + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + bank_offset <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + bank_offset <<= 3; + } + bank_offset &= s->cirrus_addr_mask; + val = *(s->vga.vram_ptr + bank_offset); + } else + val = 0xff; } else if (addr >= 0x18000 && addr < 0x18100) { - /* memory-mapped I/O */ - val = 0xff; - if ((s->vga.sr[0x17] & 0x44) == 0x04) { - val = cirrus_mmio_blt_read(s, addr & 0xff); - } + /* memory-mapped I/O */ + val = 0xff; + if ((s->vga.sr[0x17] & 0x44) == 0x04) { + val = cirrus_mmio_blt_read(s, addr & 0xff); + } } else { - val = 0xff; + val = 0xff; qemu_log_mask(LOG_GUEST_ERROR, "cirrus: mem_readb 0x" TARGET_FMT_plx "\n", addr); } @@ -2061,47 +2062,47 @@ static void cirrus_vga_mem_write(void *opaque, } if (addr < 0x10000) { - if (s->cirrus_srcptr != s->cirrus_srcptr_end) { - /* bitblt */ - *s->cirrus_srcptr++ = (uint8_t) mem_value; - if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { - cirrus_bitblt_cputovideo_next(s); - } - } else { - /* video memory */ - bank_index = addr >> 15; - bank_offset = addr & 0x7fff; - if (bank_offset < s->cirrus_bank_limit[bank_index]) { - bank_offset += s->cirrus_bank_base[bank_index]; - if ((s->vga.gr[0x0B] & 0x14) == 0x14) { - bank_offset <<= 4; - } else if (s->vga.gr[0x0B] & 0x02) { - bank_offset <<= 3; - } - bank_offset &= s->cirrus_addr_mask; - mode = s->vga.gr[0x05] & 0x7; - if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { - *(s->vga.vram_ptr + bank_offset) = mem_value; + if (s->cirrus_srcptr != s->cirrus_srcptr_end) { + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) mem_value; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } + } else { + /* video memory */ + bank_index = addr >> 15; + bank_offset = addr & 0x7fff; + if (bank_offset < s->cirrus_bank_limit[bank_index]) { + bank_offset += s->cirrus_bank_base[bank_index]; + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + bank_offset <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + bank_offset <<= 3; + } + bank_offset &= s->cirrus_addr_mask; + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + *(s->vga.vram_ptr + bank_offset) = mem_value; memory_region_set_dirty(&s->vga.vram, bank_offset, sizeof(mem_value)); - } else { - if ((s->vga.gr[0x0B] & 0x14) != 0x14) { - cirrus_mem_writeb_mode4and5_8bpp(s, mode, - bank_offset, - mem_value); - } else { - cirrus_mem_writeb_mode4and5_16bpp(s, mode, - bank_offset, - mem_value); - } - } - } - } + } else { + if ((s->vga.gr[0x0B] & 0x14) != 0x14) { + cirrus_mem_writeb_mode4and5_8bpp(s, mode, + bank_offset, + mem_value); + } else { + cirrus_mem_writeb_mode4and5_16bpp(s, mode, + bank_offset, + mem_value); + } + } + } + } } else if (addr >= 0x18000 && addr < 0x18100) { - /* memory-mapped I/O */ - if ((s->vga.sr[0x17] & 0x44) == 0x04) { - cirrus_mmio_blt_write(s, addr & 0xff, mem_value); - } + /* memory-mapped I/O */ + if ((s->vga.sr[0x17] & 0x44) == 0x04) { + cirrus_mmio_blt_write(s, addr & 0xff, mem_value); + } } else { qemu_log_mask(LOG_GUEST_ERROR, "cirrus: mem_writeb 0x" TARGET_FMT_plx " " @@ -2326,20 +2327,20 @@ static uint64_t cirrus_linear_read(void *opaque, hwaddr addr, if (((s->vga.sr[0x17] & 0x44) == 0x44) && ((addr & s->linear_mmio_mask) == s->linear_mmio_mask)) { - /* memory-mapped I/O */ - ret = cirrus_mmio_blt_read(s, addr & 0xff); + /* memory-mapped I/O */ + ret = cirrus_mmio_blt_read(s, addr & 0xff); } else if (0) { - /* XXX handle bitblt */ - ret = 0xff; + /* XXX handle bitblt */ + ret = 0xff; } else { - /* video memory */ - if ((s->vga.gr[0x0B] & 0x14) == 0x14) { - addr <<= 4; - } else if (s->vga.gr[0x0B] & 0x02) { - addr <<= 3; - } - addr &= s->cirrus_addr_mask; - ret = *(s->vga.vram_ptr + addr); + /* video memory */ + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + addr <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + addr <<= 3; + } + addr &= s->cirrus_addr_mask; + ret = *(s->vga.vram_ptr + addr); } return ret; @@ -2355,34 +2356,34 @@ static void cirrus_linear_write(void *opaque, hwaddr addr, if (((s->vga.sr[0x17] & 0x44) == 0x44) && ((addr & s->linear_mmio_mask) == s->linear_mmio_mask)) { - /* memory-mapped I/O */ - cirrus_mmio_blt_write(s, addr & 0xff, val); + /* memory-mapped I/O */ + cirrus_mmio_blt_write(s, addr & 0xff, val); } else if (s->cirrus_srcptr != s->cirrus_srcptr_end) { - /* bitblt */ - *s->cirrus_srcptr++ = (uint8_t) val; - if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { - cirrus_bitblt_cputovideo_next(s); - } + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) val; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } } else { - /* video memory */ - if ((s->vga.gr[0x0B] & 0x14) == 0x14) { - addr <<= 4; - } else if (s->vga.gr[0x0B] & 0x02) { - addr <<= 3; - } - addr &= s->cirrus_addr_mask; + /* video memory */ + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + addr <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + addr <<= 3; + } + addr &= s->cirrus_addr_mask; - mode = s->vga.gr[0x05] & 0x7; - if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { - *(s->vga.vram_ptr + addr) = (uint8_t) val; + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + *(s->vga.vram_ptr + addr) = (uint8_t) val; memory_region_set_dirty(&s->vga.vram, addr, 1); - } else { - if ((s->vga.gr[0x0B] & 0x14) != 0x14) { - cirrus_mem_writeb_mode4and5_8bpp(s, mode, addr, val); - } else { - cirrus_mem_writeb_mode4and5_16bpp(s, mode, addr, val); - } - } + } else { + if ((s->vga.gr[0x0B] & 0x14) != 0x14) { + cirrus_mem_writeb_mode4and5_8bpp(s, mode, addr, val); + } else { + cirrus_mem_writeb_mode4and5_16bpp(s, mode, addr, val); + } + } } } @@ -2415,11 +2416,11 @@ static void cirrus_linear_bitblt_write(void *opaque, CirrusVGAState *s = opaque; if (s->cirrus_srcptr != s->cirrus_srcptr_end) { - /* bitblt */ - *s->cirrus_srcptr++ = (uint8_t) val; - if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { - cirrus_bitblt_cputovideo_next(s); - } + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) val; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } } } @@ -2476,14 +2477,14 @@ static void cirrus_update_memory_access(CirrusVGAState *s) } else if (s->cirrus_srcptr != s->cirrus_srcptr_end) { goto generic_io; } else { - if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { goto generic_io; - } else if (s->vga.gr[0x0B] & 0x02) { + } else if (s->vga.gr[0x0B] & 0x02) { goto generic_io; } - mode = s->vga.gr[0x05] & 0x7; - if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { map_linear_vram(s); } else { generic_io: @@ -2506,76 +2507,76 @@ static uint64_t cirrus_vga_ioport_read(void *opaque, hwaddr addr, addr += 0x3b0; if (vga_ioport_invalid(s, addr)) { - val = 0xff; + val = 0xff; } else { - switch (addr) { - case 0x3c0: - if (s->ar_flip_flop == 0) { - val = s->ar_index; - } else { - val = 0; - } - break; - case 0x3c1: - index = s->ar_index & 0x1f; - if (index < 21) - val = s->ar[index]; - else - val = 0; - break; - case 0x3c2: - val = s->st00; - break; - case 0x3c4: - val = s->sr_index; - break; - case 0x3c5: - val = cirrus_vga_read_sr(c); + switch (addr) { + case 0x3c0: + if (s->ar_flip_flop == 0) { + val = s->ar_index; + } else { + val = 0; + } + break; + case 0x3c1: + index = s->ar_index & 0x1f; + if (index < 21) + val = s->ar[index]; + else + val = 0; + break; + case 0x3c2: + val = s->st00; + break; + case 0x3c4: + val = s->sr_index; + break; + case 0x3c5: + val = cirrus_vga_read_sr(c); + break; + break; + case 0x3c6: + val = cirrus_read_hidden_dac(c); + break; + case 0x3c7: + val = s->dac_state; + break; + case 0x3c8: + val = s->dac_write_index; + c->cirrus_hidden_dac_lockindex = 0; break; - break; - case 0x3c6: - val = cirrus_read_hidden_dac(c); - break; - case 0x3c7: - val = s->dac_state; - break; - case 0x3c8: - val = s->dac_write_index; - c->cirrus_hidden_dac_lockindex = 0; - break; case 0x3c9: val = cirrus_vga_read_palette(c); break; - case 0x3ca: - val = s->fcr; - break; - case 0x3cc: - val = s->msr; - break; - case 0x3ce: - val = s->gr_index; - break; - case 0x3cf: - val = cirrus_vga_read_gr(c, s->gr_index); - break; - case 0x3b4: - case 0x3d4: - val = s->cr_index; - break; - case 0x3b5: - case 0x3d5: + case 0x3ca: + val = s->fcr; + break; + case 0x3cc: + val = s->msr; + break; + case 0x3ce: + val = s->gr_index; + break; + case 0x3cf: + val = cirrus_vga_read_gr(c, s->gr_index); + break; + case 0x3b4: + case 0x3d4: + val = s->cr_index; + break; + case 0x3b5: + case 0x3d5: val = cirrus_vga_read_cr(c, s->cr_index); - break; - case 0x3ba: - case 0x3da: - /* just toggle to fool polling */ - val = s->st01 = s->retrace(s); - s->ar_flip_flop = 0; - break; - default: - val = 0x00; - break; - } + break; + case 0x3ba: + case 0x3da: + /* just toggle to fool polling */ + val = s->st01 = s->retrace(s); + s->ar_flip_flop = 0; + break; + default: + val = 0x00; + break; + } } trace_vga_cirrus_read_io(addr, val); return val; @@ -2592,86 +2593,86 @@ static void cirrus_vga_ioport_write(void *opaque, hwaddr addr, uint64_t val, /* check port range access depending on color/monochrome mode */ if (vga_ioport_invalid(s, addr)) { - return; + return; } trace_vga_cirrus_write_io(addr, val); switch (addr) { case 0x3c0: - if (s->ar_flip_flop == 0) { - val &= 0x3f; - s->ar_index = val; - } else { - index = s->ar_index & 0x1f; - switch (index) { - case 0x00 ... 0x0f: - s->ar[index] = val & 0x3f; - break; - case 0x10: - s->ar[index] = val & ~0x10; - break; - case 0x11: - s->ar[index] = val; - break; - case 0x12: - s->ar[index] = val & ~0xc0; - break; - case 0x13: - s->ar[index] = val & ~0xf0; - break; - case 0x14: - s->ar[index] = val & ~0xf0; - break; - default: - break; - } - } - s->ar_flip_flop ^= 1; - break; + if (s->ar_flip_flop == 0) { + val &= 0x3f; + s->ar_index = val; + } else { + index = s->ar_index & 0x1f; + switch (index) { + case 0x00 ... 0x0f: + s->ar[index] = val & 0x3f; + break; + case 0x10: + s->ar[index] = val & ~0x10; + break; + case 0x11: + s->ar[index] = val; + break; + case 0x12: + s->ar[index] = val & ~0xc0; + break; + case 0x13: + s->ar[index] = val & ~0xf0; + break; + case 0x14: + s->ar[index] = val & ~0xf0; + break; + default: + break; + } + } + s->ar_flip_flop ^= 1; + break; case 0x3c2: - s->msr = val & ~0x10; - s->update_retrace_info(s); - break; + s->msr = val & ~0x10; + s->update_retrace_info(s); + break; case 0x3c4: - s->sr_index = val; - break; + s->sr_index = val; + break; case 0x3c5: - cirrus_vga_write_sr(c, val); + cirrus_vga_write_sr(c, val); break; case 0x3c6: - cirrus_write_hidden_dac(c, val); - break; + cirrus_write_hidden_dac(c, val); + break; case 0x3c7: - s->dac_read_index = val; - s->dac_sub_index = 0; - s->dac_state = 3; - break; + s->dac_read_index = val; + s->dac_sub_index = 0; + s->dac_state = 3; + break; case 0x3c8: - s->dac_write_index = val; - s->dac_sub_index = 0; - s->dac_state = 0; - break; + s->dac_write_index = val; + s->dac_sub_index = 0; + s->dac_state = 0; + break; case 0x3c9: cirrus_vga_write_palette(c, val); break; case 0x3ce: - s->gr_index = val; - break; + s->gr_index = val; + break; case 0x3cf: - cirrus_vga_write_gr(c, s->gr_index, val); - break; + cirrus_vga_write_gr(c, s->gr_index, val); + break; case 0x3b4: case 0x3d4: - s->cr_index = val; - break; + s->cr_index = val; + break; case 0x3b5: case 0x3d5: - cirrus_vga_write_cr(c, val); - break; + cirrus_vga_write_cr(c, val); + break; case 0x3ba: case 0x3da: - s->fcr = val & 0x10; - break; + s->fcr = val & 0x10; + break; } } @@ -2699,7 +2700,7 @@ static void cirrus_mmio_write(void *opaque, hwaddr addr, CirrusVGAState *s = opaque; if (addr >= 0x100) { - cirrus_mmio_blt_write(s, addr - 0x100, val); + cirrus_mmio_blt_write(s, addr - 0x100, val); } else { cirrus_vga_ioport_write(s, addr + 0x10, val, size); } @@ -2799,13 +2800,13 @@ static void cirrus_reset(void *opaque) s->vga.sr[0x06] = 0x0f; if (s->device_id == CIRRUS_ID_CLGD5446) { /* 4MB 64 bit memory config, always PCI */ - s->vga.sr[0x1F] = 0x2d; // MemClock + s->vga.sr[0x1F] = 0x2d; // MemClock s->vga.gr[0x18] = 0x0f; // fastest memory configuration s->vga.sr[0x0f] = 0x98; s->vga.sr[0x17] = 0x20; s->vga.sr[0x15] = 0x04; /* memory size, 3=2MB, 4=4MB */ } else { - s->vga.sr[0x1F] = 0x22; // MemClock + s->vga.sr[0x1F] = 0x22; // MemClock s->vga.sr[0x0F] = CIRRUS_MEMSIZE_2M; s->vga.sr[0x17] = s->bustype; s->vga.sr[0x15] = 0x03; /* memory size, 3=2MB, 4=4MB */ @@ -2940,27 +2941,30 @@ void cirrus_init_common(CirrusVGAState *s, Object *owner, static void pci_cirrus_vga_realize(PCIDevice *dev, Error **errp) { - PCICirrusVGAState *d = PCI_CIRRUS_VGA(dev); - CirrusVGAState *s = &d->cirrus_vga; - PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); - int16_t device_id = pc->device_id; + PCICirrusVGAState *d = PCI_CIRRUS_VGA(dev); + CirrusVGAState *s = &d->cirrus_vga; + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + int16_t device_id = pc->device_id; - /* follow real hardware, cirrus card emulated has 4 MB video memory. - Also accept 8 MB/16 MB for backward compatibility. */ - if (s->vga.vram_size_mb != 4 && s->vga.vram_size_mb != 8 && - s->vga.vram_size_mb != 16) { - error_setg(errp, "Invalid cirrus_vga ram size '%u'", - s->vga.vram_size_mb); - return; - } - /* setup VGA */ - vga_common_init(&s->vga, OBJECT(dev)); - cirrus_init_common(s, OBJECT(dev), device_id, 1, pci_address_space(dev), - pci_address_space_io(dev)); - s->vga.con = graphic_console_init(DEVICE(dev), 0, s->vga.hw_ops, &s->vga); - - /* setup PCI */ + /* + * Follow real hardware, cirrus card emulated has 4 MB video memory. + * Also accept 8 MB/16 MB for backward compatibility. + */ + if (s->vga.vram_size_mb != 4 && s->vga.vram_size_mb != 8 && + s->vga.vram_size_mb != 16) { + error_setg(errp, "Invalid cirrus_vga ram size '%u'", + s->vga.vram_size_mb); + return; + } + /* setup VGA */ + if (!vga_common_init(&s->vga, OBJECT(dev), errp)) { + return; + } + cirrus_init_common(s, OBJECT(dev), device_id, 1, pci_address_space(dev), + pci_address_space_io(dev)); + s->vga.con = graphic_console_init(DEVICE(dev), 0, s->vga.hw_ops, &s->vga); + /* setup PCI */ memory_region_init(&s->pci_bar, OBJECT(dev), "cirrus-pci-bar0", 0x2000000); /* XXX: add byte swapping apertures */ @@ -2968,14 +2972,14 @@ static void pci_cirrus_vga_realize(PCIDevice *dev, Error **errp) memory_region_add_subregion(&s->pci_bar, 0x1000000, &s->cirrus_linear_bitblt_io); - /* setup memory space */ - /* memory #0 LFB */ - /* memory #1 memory-mapped I/O */ - /* XXX: s->vga.vram_size must be a power of two */ - pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->pci_bar); - if (device_id == CIRRUS_ID_CLGD5446) { - pci_register_bar(&d->dev, 1, 0, &s->cirrus_mmio_io); - } + /* setup memory space */ + /* memory #0 LFB */ + /* memory #1 memory-mapped I/O */ + /* XXX: s->vga.vram_size must be a power of two */ + pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->pci_bar); + if (device_id == CIRRUS_ID_CLGD5446) { + pci_register_bar(&d->dev, 1, 0, &s->cirrus_mmio_io); + } } static Property pci_vga_cirrus_properties[] = { diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c index 4f6fb1af3b..84be51670e 100644 --- a/hw/display/cirrus_vga_isa.c +++ b/hw/display/cirrus_vga_isa.c @@ -31,6 +31,7 @@ #include "hw/isa/isa.h" #include "cirrus_vga_internal.h" #include "qom/object.h" +#include "ui/console.h" #define TYPE_ISA_CIRRUS_VGA "isa-cirrus-vga" OBJECT_DECLARE_SIMPLE_TYPE(ISACirrusVGAState, ISA_CIRRUS_VGA) @@ -56,7 +57,9 @@ static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp) return; } s->global_vmstate = true; - vga_common_init(s, OBJECT(dev)); + if (!vga_common_init(s, OBJECT(dev), errp)) { + return; + } cirrus_init_common(&d->cirrus_vga, OBJECT(dev), CIRRUS_ID_CLGD5430, 0, isa_address_space(isadev), isa_address_space_io(isadev)); diff --git a/hw/display/edid-generate.c b/hw/display/edid-generate.c index f2b874d5e3..2cb819675e 100644 --- a/hw/display/edid-generate.c +++ b/hw/display/edid-generate.c @@ -24,6 +24,9 @@ static const struct edid_mode { { .xres = 2048, .yres = 1152 }, { .xres = 1920, .yres = 1080, .dta = 31 }, + /* dea/dta extension timings (all @ 60 Hz) */ + { .xres = 3840, .yres = 2160, .dta = 97 }, + /* additional standard timings 3 (all @ 60Hz) */ { .xres = 1920, .yres = 1200, .xtra3 = 10, .bit = 0 }, { .xres = 1600, .yres = 1200, .xtra3 = 9, .bit = 2 }, @@ -252,33 +255,31 @@ static void edid_desc_dummy(uint8_t *desc) edid_desc_type(desc, 0x10); } -static void edid_desc_timing(uint8_t *desc, uint32_t refresh_rate, +static void edid_desc_timing(uint8_t *desc, const Timings *timings, uint32_t xres, uint32_t yres, uint32_t xmm, uint32_t ymm) { - Timings timings; - generate_timings(&timings, refresh_rate, xres, yres); - stl_le_p(desc, timings.clock); + stw_le_p(desc, timings->clock); desc[2] = xres & 0xff; - desc[3] = timings.xblank & 0xff; + desc[3] = timings->xblank & 0xff; desc[4] = (((xres & 0xf00) >> 4) | - ((timings.xblank & 0xf00) >> 8)); + ((timings->xblank & 0xf00) >> 8)); desc[5] = yres & 0xff; - desc[6] = timings.yblank & 0xff; + desc[6] = timings->yblank & 0xff; desc[7] = (((yres & 0xf00) >> 4) | - ((timings.yblank & 0xf00) >> 8)); + ((timings->yblank & 0xf00) >> 8)); - desc[8] = timings.xfront & 0xff; - desc[9] = timings.xsync & 0xff; + desc[8] = timings->xfront & 0xff; + desc[9] = timings->xsync & 0xff; - desc[10] = (((timings.yfront & 0x00f) << 4) | - ((timings.ysync & 0x00f) << 0)); - desc[11] = (((timings.xfront & 0x300) >> 2) | - ((timings.xsync & 0x300) >> 4) | - ((timings.yfront & 0x030) >> 2) | - ((timings.ysync & 0x030) >> 4)); + desc[10] = (((timings->yfront & 0x00f) << 4) | + ((timings->ysync & 0x00f) << 0)); + desc[11] = (((timings->xfront & 0x300) >> 2) | + ((timings->xsync & 0x300) >> 4) | + ((timings->yfront & 0x030) >> 2) | + ((timings->ysync & 0x030) >> 4)); desc[12] = xmm & 0xff; desc[13] = ymm & 0xff; @@ -345,13 +346,10 @@ static void init_displayid(uint8_t *did) edid_checksum(did + 1, did[2] + 4); } -static void qemu_displayid_generate(uint8_t *did, uint32_t refresh_rate, +static void qemu_displayid_generate(uint8_t *did, const Timings *timings, uint32_t xres, uint32_t yres, uint32_t xmm, uint32_t ymm) { - Timings timings; - generate_timings(&timings, refresh_rate, xres, yres); - did[0] = 0x70; /* display id extension */ did[1] = 0x13; /* version 1.3 */ did[2] = 23; /* length */ @@ -361,21 +359,21 @@ static void qemu_displayid_generate(uint8_t *did, uint32_t refresh_rate, did[6] = 0x00; /* revision */ did[7] = 0x14; /* block length */ - did[8] = timings.clock & 0xff; - did[9] = (timings.clock & 0xff00) >> 8; - did[10] = (timings.clock & 0xff0000) >> 16; + did[8] = timings->clock & 0xff; + did[9] = (timings->clock & 0xff00) >> 8; + did[10] = (timings->clock & 0xff0000) >> 16; did[11] = 0x88; /* leave aspect ratio undefined */ stw_le_p(did + 12, 0xffff & (xres - 1)); - stw_le_p(did + 14, 0xffff & (timings.xblank - 1)); - stw_le_p(did + 16, 0xffff & (timings.xfront - 1)); - stw_le_p(did + 18, 0xffff & (timings.xsync - 1)); + stw_le_p(did + 14, 0xffff & (timings->xblank - 1)); + stw_le_p(did + 16, 0xffff & (timings->xfront - 1)); + stw_le_p(did + 18, 0xffff & (timings->xsync - 1)); stw_le_p(did + 20, 0xffff & (yres - 1)); - stw_le_p(did + 22, 0xffff & (timings.yblank - 1)); - stw_le_p(did + 24, 0xffff & (timings.yfront - 1)); - stw_le_p(did + 26, 0xffff & (timings.ysync - 1)); + stw_le_p(did + 22, 0xffff & (timings->yblank - 1)); + stw_le_p(did + 24, 0xffff & (timings->yfront - 1)); + stw_le_p(did + 26, 0xffff & (timings->ysync - 1)); edid_checksum(did + 1, did[2] + 4); } @@ -383,6 +381,7 @@ static void qemu_displayid_generate(uint8_t *did, uint32_t refresh_rate, void qemu_edid_generate(uint8_t *edid, size_t size, qemu_edid_info *info) { + Timings timings; uint8_t *desc = edid + 54; uint8_t *xtra3 = NULL; uint8_t *dta = NULL; @@ -401,13 +400,10 @@ void qemu_edid_generate(uint8_t *edid, size_t size, info->name = "QEMU Monitor"; } if (!info->prefx) { - info->prefx = 1024; + info->prefx = 1280; } if (!info->prefy) { - info->prefy = 768; - } - if (info->prefx >= 4096 || info->prefy >= 4096) { - large_screen = 1; + info->prefy = 800; } if (info->width_mm && info->height_mm) { width_mm = info->width_mm; @@ -418,6 +414,11 @@ void qemu_edid_generate(uint8_t *edid, size_t size, height_mm = qemu_edid_dpi_to_mm(dpi, info->prefy); } + generate_timings(&timings, refresh_rate, info->prefx, info->prefy); + if (info->prefx >= 4096 || info->prefy >= 4096 || timings.clock >= 65536) { + large_screen = 1; + } + /* =============== extensions =============== */ if (size >= 256) { @@ -498,7 +499,7 @@ void qemu_edid_generate(uint8_t *edid, size_t size, if (!large_screen) { /* The DTD section has only 12 bits to store the resolution */ - edid_desc_timing(desc, refresh_rate, info->prefx, info->prefy, + edid_desc_timing(desc, &timings, info->prefx, info->prefy, width_mm, height_mm); desc = edid_desc_next(edid, dta, desc); } @@ -533,7 +534,7 @@ void qemu_edid_generate(uint8_t *edid, size_t size, /* =============== display id extensions =============== */ if (did && large_screen) { - qemu_displayid_generate(did, refresh_rate, info->prefx, info->prefy, + qemu_displayid_generate(did, &timings, info->prefx, info->prefy, width_mm, height_mm); } diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c index 13eb529fc1..146489518c 100644 --- a/hw/display/i2c-ddc.c +++ b/hw/display/i2c-ddc.c @@ -113,7 +113,7 @@ static void i2c_ddc_class_init(ObjectClass *oc, void *data) isc->send = i2c_ddc_tx; } -static TypeInfo i2c_ddc_info = { +static const TypeInfo i2c_ddc_info = { .name = TYPE_I2CDDC, .parent = TYPE_I2C_SLAVE, .instance_size = sizeof(I2CDDCState), diff --git a/hw/display/macfb.c b/hw/display/macfb.c index d8183b9bbd..2f8e016566 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -20,16 +20,102 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" +#include "trace.h" -#define VIDEO_BASE 0x00001000 +#define VIDEO_BASE 0x0 #define DAFB_BASE 0x00800000 #define MACFB_PAGE_SIZE 4096 #define MACFB_VRAM_SIZE (4 * MiB) -#define DAFB_RESET 0x200 -#define DAFB_LUT 0x213 +#define DAFB_MODE_VADDR1 0x0 +#define DAFB_MODE_VADDR2 0x4 +#define DAFB_MODE_CTRL1 0x8 +#define DAFB_MODE_CTRL2 0xc +#define DAFB_MODE_SENSE 0x1c +#define DAFB_INTR_MASK 0x104 +#define DAFB_INTR_STAT 0x108 +#define DAFB_INTR_CLEAR 0x10c +#define DAFB_RESET 0x200 +#define DAFB_LUT 0x213 +#define DAFB_INTR_VBL 0x4 + +/* Vertical Blank period (60.15Hz) */ +#define DAFB_INTR_VBL_PERIOD_NS 16625800 + +/* + * Quadra sense codes taken from Apple Technical Note HW26: + * "Macintosh Quadra Built-In Video". The sense codes and + * extended sense codes have different meanings: + * + * Sense: + * bit 2: SENSE2 (pin 10) + * bit 1: SENSE1 (pin 7) + * bit 0: SENSE0 (pin 4) + * + * 0 = pin tied to ground + * 1 = pin unconnected + * + * Extended Sense: + * bit 2: pins 4-10 + * bit 1: pins 10-7 + * bit 0: pins 7-4 + * + * 0 = pins tied together + * 1 = pins unconnected + * + * Reads from the sense register appear to be active low, i.e. a 1 indicates + * that the pin is tied to ground, a 0 indicates the pin is disconnected. + * + * Writes to the sense register appear to activate pulldowns i.e. a 1 enables + * a pulldown on a particular pin. + * + * The MacOS toolbox appears to use a series of reads and writes to first + * determine if extended sense is to be used, and then check which pins are + * tied together in order to determine the display type. + */ + +typedef struct MacFbSense { + uint8_t type; + uint8_t sense; + uint8_t ext_sense; +} MacFbSense; + +static MacFbSense macfb_sense_table[] = { + { MACFB_DISPLAY_APPLE_21_COLOR, 0x0, 0 }, + { MACFB_DISPLAY_APPLE_PORTRAIT, 0x1, 0 }, + { MACFB_DISPLAY_APPLE_12_RGB, 0x2, 0 }, + { MACFB_DISPLAY_APPLE_2PAGE_MONO, 0x3, 0 }, + { MACFB_DISPLAY_NTSC_UNDERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_NTSC_OVERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_APPLE_12_MONO, 0x6, 0 }, + { MACFB_DISPLAY_APPLE_13_RGB, 0x6, 0 }, + { MACFB_DISPLAY_16_COLOR, 0x7, 0x3 }, + { MACFB_DISPLAY_PAL1_UNDERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL1_OVERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL2_UNDERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_PAL2_OVERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_VGA, 0x7, 0x5 }, + { MACFB_DISPLAY_SVGA, 0x7, 0x5 }, +}; + +static MacFbMode macfb_mode_table[] = { + { MACFB_DISPLAY_VGA, 1, 0x100, 0x71e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 2, 0x100, 0x70e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 4, 0x100, 0x706, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 8, 0x100, 0x702, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 24, 0x100, 0x7ff, 640, 480, 0x1000, 0x1000 }, + { MACFB_DISPLAY_VGA, 1, 0xd0 , 0x70e, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 2, 0xd0 , 0x706, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 4, 0xd0 , 0x702, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 8, 0xd0, 0x700, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 24, 0x340, 0x100, 800, 600, 0xd00, 0xe00 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 1, 0x90, 0x506, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 2, 0x90, 0x502, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 4, 0x90, 0x500, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 8, 0x120, 0x5ff, 1152, 870, 0x480, 0x80 }, +}; typedef void macfb_draw_line_func(MacfbState *s, uint8_t *d, uint32_t addr, int width); @@ -49,7 +135,9 @@ static void macfb_draw_line1(MacfbState *s, uint8_t *d, uint32_t addr, for (x = 0; x < width; x++) { int bit = x & 7; int idx = (macfb_read_byte(s, addr) >> (7 - bit)) & 1; - r = g = b = ((1 - idx) << 7); + r = s->color_palette[idx * 3]; + g = s->color_palette[idx * 3 + 1]; + b = s->color_palette[idx * 3 + 2]; addr += (bit == 7); *(uint32_t *)d = rgb_to_pixel32(r, g, b); @@ -143,10 +231,10 @@ static void macfb_draw_line24(MacfbState *s, uint8_t *d, uint32_t addr, int x; for (x = 0; x < width; x++) { - r = macfb_read_byte(s, addr); - g = macfb_read_byte(s, addr + 1); - b = macfb_read_byte(s, addr + 2); - addr += 3; + r = macfb_read_byte(s, addr + 1); + g = macfb_read_byte(s, addr + 2); + b = macfb_read_byte(s, addr + 3); + addr += 4; *(uint32_t *)d = rgb_to_pixel32(r, g, b); d += 4; @@ -187,7 +275,7 @@ static void macfb_draw_graphic(MacfbState *s) ram_addr_t page; uint32_t v = 0; int y, ymin; - int macfb_stride = (s->depth * s->width + 7) / 8; + int macfb_stride = s->mode->stride; macfb_draw_line_func *macfb_draw_line; switch (s->depth) { @@ -219,7 +307,7 @@ static void macfb_draw_graphic(MacfbState *s) DIRTY_MEMORY_VGA); ymin = -1; - page = 0; + page = s->mode->offset; for (y = 0; y < s->height; y++, page += macfb_stride) { if (macfb_check_dirty(s, snap, page, macfb_stride)) { uint8_t *data_display; @@ -252,6 +340,121 @@ static void macfb_invalidate_display(void *opaque) memory_region_set_dirty(&s->mem_vram, 0, MACFB_VRAM_SIZE); } +static uint32_t macfb_sense_read(MacfbState *s) +{ + MacFbSense *macfb_sense; + uint8_t sense; + + assert(s->type < ARRAY_SIZE(macfb_sense_table)); + macfb_sense = &macfb_sense_table[s->type]; + if (macfb_sense->sense == 0x7) { + /* Extended sense */ + sense = 0; + if (!(macfb_sense->ext_sense & 1)) { + /* Pins 7-4 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 3) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 3; + } + } + if (!(macfb_sense->ext_sense & 2)) { + /* Pins 10-7 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 6) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 6; + } + } + if (!(macfb_sense->ext_sense & 4)) { + /* Pins 4-10 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 5) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 5; + } + } + } else { + /* Normal sense */ + sense = (~macfb_sense->sense & 7) | + (~s->regs[DAFB_MODE_SENSE >> 2] & 7); + } + + trace_macfb_sense_read(sense); + return sense; +} + +static void macfb_sense_write(MacfbState *s, uint32_t val) +{ + s->regs[DAFB_MODE_SENSE >> 2] = val; + + trace_macfb_sense_write(val); + return; +} + +static void macfb_update_mode(MacfbState *s) +{ + s->width = s->mode->width; + s->height = s->mode->height; + s->depth = s->mode->depth; + + trace_macfb_update_mode(s->width, s->height, s->depth); + macfb_invalidate_display(s); +} + +static void macfb_mode_write(MacfbState *s) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (s->type != macfb_mode->type) { + continue; + } + + if ((s->regs[DAFB_MODE_CTRL1 >> 2] & 0xff) == + (macfb_mode->mode_ctrl1 & 0xff) && + (s->regs[DAFB_MODE_CTRL2 >> 2] & 0xff) == + (macfb_mode->mode_ctrl2 & 0xff)) { + s->mode = macfb_mode; + macfb_update_mode(s); + break; + } + } +} + +static MacFbMode *macfb_find_mode(MacfbDisplayType display_type, + uint16_t width, uint16_t height, + uint8_t depth) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (display_type == macfb_mode->type && width == macfb_mode->width && + height == macfb_mode->height && depth == macfb_mode->depth) { + return macfb_mode; + } + } + + return NULL; +} + +static gchar *macfb_mode_list(void) +{ + GString *list = g_string_new(""); + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + g_string_append_printf(list, " %dx%dx%d\n", macfb_mode->width, + macfb_mode->height, macfb_mode->depth); + } + + return g_string_free(list, FALSE); +} + + static void macfb_update_display(void *opaque) { MacfbState *s = opaque; @@ -271,6 +474,37 @@ static void macfb_update_display(void *opaque) macfb_draw_graphic(s); } +static void macfb_update_irq(MacfbState *s) +{ + uint32_t irq_state = s->regs[DAFB_INTR_STAT >> 2] & + s->regs[DAFB_INTR_MASK >> 2]; + + if (irq_state) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static int64_t macfb_next_vbl(void) +{ + return (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + DAFB_INTR_VBL_PERIOD_NS) / + DAFB_INTR_VBL_PERIOD_NS * DAFB_INTR_VBL_PERIOD_NS; +} + +static void macfb_vbl_timer(void *opaque) +{ + MacfbState *s = opaque; + int64_t next_vbl; + + s->regs[DAFB_INTR_STAT >> 2] |= DAFB_INTR_VBL; + macfb_update_irq(s); + + /* 60 Hz irq */ + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); +} + static void macfb_reset(MacfbState *s) { int i; @@ -289,7 +523,28 @@ static uint64_t macfb_ctrl_read(void *opaque, hwaddr addr, unsigned int size) { - return 0; + MacfbState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + case DAFB_MODE_CTRL1: + case DAFB_MODE_CTRL2: + case DAFB_INTR_STAT: + val = s->regs[addr >> 2]; + break; + case DAFB_MODE_SENSE: + val = macfb_sense_read(s); + break; + default: + if (addr < MACFB_CTRL_TOPADDR) { + val = s->regs[addr >> 2]; + } + } + + trace_macfb_ctrl_read(addr, val, size); + return val; } static void macfb_ctrl_write(void *opaque, @@ -298,17 +553,56 @@ static void macfb_ctrl_write(void *opaque, unsigned int size) { MacfbState *s = opaque; + int64_t next_vbl; + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + s->regs[addr >> 2] = val; + break; + case DAFB_MODE_CTRL1 ... DAFB_MODE_CTRL1 + 3: + case DAFB_MODE_CTRL2 ... DAFB_MODE_CTRL2 + 3: + s->regs[addr >> 2] = val; + if (val) { + macfb_mode_write(s); + } + break; + case DAFB_MODE_SENSE: + macfb_sense_write(s, val); + break; + case DAFB_INTR_MASK: + s->regs[addr >> 2] = val; + if (val & DAFB_INTR_VBL) { + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); + } else { + timer_del(s->vbl_timer); + } + break; + case DAFB_INTR_CLEAR: + s->regs[DAFB_INTR_STAT >> 2] &= ~DAFB_INTR_VBL; + macfb_update_irq(s); + break; case DAFB_RESET: s->palette_current = 0; + s->regs[DAFB_INTR_STAT >> 2] &= ~DAFB_INTR_VBL; + macfb_update_irq(s); break; case DAFB_LUT: - s->color_palette[s->palette_current++] = val; + s->color_palette[s->palette_current] = val; + s->palette_current = (s->palette_current + 1) % + ARRAY_SIZE(s->color_palette); if (s->palette_current % 3) { macfb_invalidate_display(s); } break; + default: + if (addr < MACFB_CTRL_TOPADDR) { + s->regs[addr >> 2] = val; + } } + + trace_macfb_ctrl_write(addr, val, size); } static const MemoryRegionOps macfb_ctrl_ops = { @@ -321,7 +615,7 @@ static const MemoryRegionOps macfb_ctrl_ops = { static int macfb_post_load(void *opaque, int version_id) { - macfb_invalidate_display(opaque); + macfb_mode_write(opaque); return 0; } @@ -329,11 +623,13 @@ static const VMStateDescription vmstate_macfb = { .name = "macfb", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .post_load = macfb_post_load, .fields = (VMStateField[]) { + VMSTATE_UINT8(type, MacfbState), VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), VMSTATE_UINT32(palette_current, MacfbState), + VMSTATE_UINT32_ARRAY(regs, MacfbState, MACFB_NUM_REGS), + VMSTATE_TIMER_PTR(vbl_timer, MacfbState), VMSTATE_END_OF_LIST() } }; @@ -343,34 +639,51 @@ static const GraphicHwOps macfb_ops = { .gfx_update = macfb_update_display, }; -static void macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) +static bool macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) { DisplaySurface *surface; - if (s->depth != 1 && s->depth != 2 && s->depth != 4 && s->depth != 8 && - s->depth != 16 && s->depth != 24) { - error_setg(errp, "unknown guest depth %d", s->depth); - return; + s->mode = macfb_find_mode(s->type, s->width, s->height, s->depth); + if (!s->mode) { + gchar *list; + error_setg(errp, "unknown display mode: width %d, height %d, depth %d", + s->width, s->height, s->depth); + list = macfb_mode_list(); + error_append_hint(errp, "Available modes:\n%s", list); + g_free(list); + + return false; } + /* + * Set mode control registers to match the mode found above so that + * macfb_mode_write() does the right thing if no MacOS toolbox ROM + * is present to initialise them + */ + s->regs[DAFB_MODE_CTRL1 >> 2] = s->mode->mode_ctrl1; + s->regs[DAFB_MODE_CTRL2 >> 2] = s->mode->mode_ctrl2; + s->con = graphic_console_init(dev, 0, &macfb_ops, s); surface = qemu_console_surface(s->con); if (surface_bits_per_pixel(surface) != 32) { error_setg(errp, "unknown host depth %d", surface_bits_per_pixel(surface)); - return; + return false; } memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &macfb_ctrl_ops, s, "macfb-ctrl", 0x1000); - memory_region_init_ram_nomigrate(&s->mem_vram, OBJECT(s), "macfb-vram", - MACFB_VRAM_SIZE, errp); + memory_region_init_ram(&s->mem_vram, OBJECT(dev), "macfb-vram", + MACFB_VRAM_SIZE, &error_abort); + memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA); s->vram = memory_region_get_ram_ptr(&s->mem_vram); s->vram_bit_mask = MACFB_VRAM_SIZE - 1; - vmstate_register_ram(&s->mem_vram, dev); - memory_region_set_coalescing(&s->mem_vram); + + s->vbl_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, macfb_vbl_timer, s); + macfb_update_mode(s); + return true; } static void macfb_sysbus_realize(DeviceState *dev, Error **errp) @@ -378,14 +691,23 @@ static void macfb_sysbus_realize(DeviceState *dev, Error **errp) MacfbSysBusState *s = MACFB(dev); MacfbState *ms = &s->macfb; - macfb_common_realize(dev, ms, errp); + if (!macfb_common_realize(dev, ms, errp)) { + return; + } + sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_ctrl); sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_vram); + + qdev_init_gpio_out(dev, &ms->irq, 1); } -const uint8_t macfb_rom[] = { - 255, 0, 0, 0, -}; +static void macfb_nubus_set_irq(void *opaque, int n, int level) +{ + MacfbNubusState *s = NUBUS_MACFB(opaque); + NubusDevice *nd = NUBUS_DEVICE(s); + + nubus_set_irq(nd, level); +} static void macfb_nubus_realize(DeviceState *dev, Error **errp) { @@ -395,12 +717,29 @@ static void macfb_nubus_realize(DeviceState *dev, Error **errp) MacfbState *ms = &s->macfb; ndc->parent_realize(dev, errp); + if (*errp) { + return; + } + + if (!macfb_common_realize(dev, ms, errp)) { + return; + } - macfb_common_realize(dev, ms, errp); memory_region_add_subregion(&nd->slot_mem, DAFB_BASE, &ms->mem_ctrl); memory_region_add_subregion(&nd->slot_mem, VIDEO_BASE, &ms->mem_vram); - nubus_register_rom(nd, macfb_rom, sizeof(macfb_rom), 1, 9, 0xf); + ms->irq = qemu_allocate_irq(macfb_nubus_set_irq, s, 0); +} + +static void macfb_nubus_unrealize(DeviceState *dev) +{ + MacfbNubusState *s = NUBUS_MACFB(dev); + MacfbNubusDeviceClass *ndc = NUBUS_MACFB_GET_CLASS(dev); + MacfbState *ms = &s->macfb; + + ndc->parent_unrealize(dev); + + qemu_free_irq(ms->irq); } static void macfb_sysbus_reset(DeviceState *d) @@ -419,16 +758,40 @@ static Property macfb_sysbus_properties[] = { DEFINE_PROP_UINT32("width", MacfbSysBusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbSysBusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbSysBusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbSysBusState, macfb.type, + MACFB_DISPLAY_VGA), DEFINE_PROP_END_OF_LIST(), }; +static const VMStateDescription vmstate_macfb_sysbus = { + .name = "macfb-sysbus", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(macfb, MacfbSysBusState, 1, vmstate_macfb, MacfbState), + VMSTATE_END_OF_LIST() + } +}; + static Property macfb_nubus_properties[] = { DEFINE_PROP_UINT32("width", MacfbNubusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbNubusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbNubusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbNubusState, macfb.type, + MACFB_DISPLAY_VGA), DEFINE_PROP_END_OF_LIST(), }; +static const VMStateDescription vmstate_macfb_nubus = { + .name = "macfb-nubus", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(macfb, MacfbNubusState, 1, vmstate_macfb, MacfbState), + VMSTATE_END_OF_LIST() + } +}; + static void macfb_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -436,7 +799,7 @@ static void macfb_sysbus_class_init(ObjectClass *klass, void *data) dc->realize = macfb_sysbus_realize; dc->desc = "SysBus Macintosh framebuffer"; dc->reset = macfb_sysbus_reset; - dc->vmsd = &vmstate_macfb; + dc->vmsd = &vmstate_macfb_sysbus; device_class_set_props(dc, macfb_sysbus_properties); } @@ -447,21 +810,23 @@ static void macfb_nubus_class_init(ObjectClass *klass, void *data) device_class_set_parent_realize(dc, macfb_nubus_realize, &ndc->parent_realize); + device_class_set_parent_unrealize(dc, macfb_nubus_unrealize, + &ndc->parent_unrealize); dc->desc = "Nubus Macintosh framebuffer"; dc->reset = macfb_nubus_reset; - dc->vmsd = &vmstate_macfb; + dc->vmsd = &vmstate_macfb_nubus; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); device_class_set_props(dc, macfb_nubus_properties); } -static TypeInfo macfb_sysbus_info = { +static const TypeInfo macfb_sysbus_info = { .name = TYPE_MACFB, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(MacfbSysBusState), .class_init = macfb_sysbus_class_init, }; -static TypeInfo macfb_nubus_info = { +static const TypeInfo macfb_nubus_info = { .name = TYPE_NUBUS_MACFB, .parent = TYPE_NUBUS_DEVICE, .instance_size = sizeof(MacfbNubusState), diff --git a/hw/display/meson.build b/hw/display/meson.build index 1e6b707d3c..7a725ed80e 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -18,7 +18,7 @@ softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xenfb.c')) softmmu_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c')) softmmu_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c')) -softmmu_ss.add(when: 'CONFIG_VGA_ISA_MM', if_true: files('vga-isa-mm.c')) +softmmu_ss.add(when: 'CONFIG_VGA_MMIO', if_true: files('vga-mmio.c')) softmmu_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c')) softmmu_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c')) @@ -38,10 +38,21 @@ softmmu_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c')) specific_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) +if (config_all_devices.has_key('CONFIG_VGA_CIRRUS') or + config_all_devices.has_key('CONFIG_VGA_PCI') or + config_all_devices.has_key('CONFIG_VMWARE_VGA') or + config_all_devices.has_key('CONFIG_ATI_VGA') + ) + softmmu_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), + if_false: files('acpi-vga-stub.c')) +endif + if config_all_devices.has_key('CONFIG_QXL') qxl_ss = ss.source_set() qxl_ss.add(when: 'CONFIG_QXL', if_true: [files('qxl.c', 'qxl-logger.c', 'qxl-render.c'), pixman, spice]) + qxl_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), + if_false: files('acpi-vga-stub.c')) hw_display_modules += {'qxl': qxl_ss} endif @@ -52,11 +63,13 @@ softmmu_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) softmmu_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c')) + if config_all_devices.has_key('CONFIG_VIRTIO_GPU') virtio_gpu_ss = ss.source_set() virtio_gpu_ss.add(when: 'CONFIG_VIRTIO_GPU', if_true: [files('virtio-gpu-base.c', 'virtio-gpu.c'), pixman]) - virtio_gpu_ss.add(when: 'CONFIG_LINUX', if_true: files('virtio-gpu-udmabuf.c')) + virtio_gpu_ss.add(when: 'CONFIG_LINUX', if_true: files('virtio-gpu-udmabuf.c'), + if_false: files('virtio-gpu-udmabuf-stubs.c')) virtio_gpu_ss.add(when: 'CONFIG_VHOST_USER_GPU', if_true: files('vhost-user-gpu.c')) hw_display_modules += {'virtio-gpu': virtio_gpu_ss} @@ -86,14 +99,19 @@ if config_all_devices.has_key('CONFIG_VIRTIO_VGA') if_true: [files('virtio-vga.c'), pixman]) virtio_vga_ss.add(when: 'CONFIG_VHOST_USER_VGA', if_true: files('vhost-user-vga.c')) + virtio_vga_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), + if_false: files('acpi-vga-stub.c')) hw_display_modules += {'virtio-vga': virtio_vga_ss} virtio_vga_gl_ss = ss.source_set() virtio_vga_gl_ss.add(when: ['CONFIG_VIRTIO_VGA', virgl, opengl], if_true: [files('virtio-vga-gl.c'), pixman]) + virtio_vga_gl_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), + if_false: files('acpi-vga-stub.c')) hw_display_modules += {'virtio-vga-gl': virtio_vga_gl_ss} endif specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c')) modules += { 'hw-display': hw_display_modules } diff --git a/hw/display/next-fb.c b/hw/display/next-fb.c index dd6a1aa8ae..8446ff3c00 100644 --- a/hw/display/next-fb.c +++ b/hw/display/next-fb.c @@ -126,7 +126,7 @@ static void nextfb_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->realize = nextfb_realize; - /* Note: This device does not any state that we have to reset or migrate */ + /* Note: This device does not have any state that we have to reset or migrate */ } static const TypeInfo nextfb_info = { diff --git a/hw/display/omap_dss.c b/hw/display/omap_dss.c index 21fde58a26..09e18407b4 100644 --- a/hw/display/omap_dss.c +++ b/hw/display/omap_dss.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/irq.h" #include "ui/console.h" @@ -181,25 +182,25 @@ static uint64_t omap_diss_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* DSS_REVISIONNUMBER */ + case 0x00: /* DSS_REVISIONNUMBER */ return 0x20; - case 0x10: /* DSS_SYSCONFIG */ + case 0x10: /* DSS_SYSCONFIG */ return s->autoidle; - case 0x14: /* DSS_SYSSTATUS */ - return 1; /* RESETDONE */ + case 0x14: /* DSS_SYSSTATUS */ + return 1; /* RESETDONE */ - case 0x40: /* DSS_CONTROL */ + case 0x40: /* DSS_CONTROL */ return s->control; - case 0x50: /* DSS_PSA_LCD_REG_1 */ - case 0x54: /* DSS_PSA_LCD_REG_2 */ - case 0x58: /* DSS_PSA_VIDEO_REG */ + case 0x50: /* DSS_PSA_LCD_REG_1 */ + case 0x54: /* DSS_PSA_LCD_REG_2 */ + case 0x58: /* DSS_PSA_VIDEO_REG */ /* TODO: fake some values when appropriate s->control bits are set */ return 0; - case 0x5c: /* DSS_STATUS */ + case 0x5c: /* DSS_STATUS */ return 1 + (s->control & 1); default: @@ -220,22 +221,22 @@ static void omap_diss_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* DSS_REVISIONNUMBER */ - case 0x14: /* DSS_SYSSTATUS */ - case 0x50: /* DSS_PSA_LCD_REG_1 */ - case 0x54: /* DSS_PSA_LCD_REG_2 */ - case 0x58: /* DSS_PSA_VIDEO_REG */ - case 0x5c: /* DSS_STATUS */ + case 0x00: /* DSS_REVISIONNUMBER */ + case 0x14: /* DSS_SYSSTATUS */ + case 0x50: /* DSS_PSA_LCD_REG_1 */ + case 0x54: /* DSS_PSA_LCD_REG_2 */ + case 0x58: /* DSS_PSA_VIDEO_REG */ + case 0x5c: /* DSS_STATUS */ OMAP_RO_REG(addr); break; - case 0x10: /* DSS_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ + case 0x10: /* DSS_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ omap_dss_reset(s); s->autoidle = value & 1; break; - case 0x40: /* DSS_CONTROL */ + case 0x40: /* DSS_CONTROL */ s->control = value & 0x3dd; break; @@ -260,112 +261,112 @@ static uint64_t omap_disc_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x000: /* DISPC_REVISION */ + case 0x000: /* DISPC_REVISION */ return 0x20; - case 0x010: /* DISPC_SYSCONFIG */ + case 0x010: /* DISPC_SYSCONFIG */ return s->dispc.idlemode; - case 0x014: /* DISPC_SYSSTATUS */ - return 1; /* RESETDONE */ + case 0x014: /* DISPC_SYSSTATUS */ + return 1; /* RESETDONE */ - case 0x018: /* DISPC_IRQSTATUS */ + case 0x018: /* DISPC_IRQSTATUS */ return s->dispc.irqst; - case 0x01c: /* DISPC_IRQENABLE */ + case 0x01c: /* DISPC_IRQENABLE */ return s->dispc.irqen; - case 0x040: /* DISPC_CONTROL */ + case 0x040: /* DISPC_CONTROL */ return s->dispc.control; - case 0x044: /* DISPC_CONFIG */ + case 0x044: /* DISPC_CONFIG */ return s->dispc.config; - case 0x048: /* DISPC_CAPABLE */ + case 0x048: /* DISPC_CAPABLE */ return s->dispc.capable; - case 0x04c: /* DISPC_DEFAULT_COLOR0 */ + case 0x04c: /* DISPC_DEFAULT_COLOR0 */ return s->dispc.bg[0]; - case 0x050: /* DISPC_DEFAULT_COLOR1 */ + case 0x050: /* DISPC_DEFAULT_COLOR1 */ return s->dispc.bg[1]; - case 0x054: /* DISPC_TRANS_COLOR0 */ + case 0x054: /* DISPC_TRANS_COLOR0 */ return s->dispc.trans[0]; - case 0x058: /* DISPC_TRANS_COLOR1 */ + case 0x058: /* DISPC_TRANS_COLOR1 */ return s->dispc.trans[1]; - case 0x05c: /* DISPC_LINE_STATUS */ + case 0x05c: /* DISPC_LINE_STATUS */ return 0x7ff; - case 0x060: /* DISPC_LINE_NUMBER */ + case 0x060: /* DISPC_LINE_NUMBER */ return s->dispc.line; - case 0x064: /* DISPC_TIMING_H */ + case 0x064: /* DISPC_TIMING_H */ return s->dispc.timing[0]; - case 0x068: /* DISPC_TIMING_V */ + case 0x068: /* DISPC_TIMING_V */ return s->dispc.timing[1]; - case 0x06c: /* DISPC_POL_FREQ */ + case 0x06c: /* DISPC_POL_FREQ */ return s->dispc.timing[2]; - case 0x070: /* DISPC_DIVISOR */ + case 0x070: /* DISPC_DIVISOR */ return s->dispc.timing[3]; - case 0x078: /* DISPC_SIZE_DIG */ + case 0x078: /* DISPC_SIZE_DIG */ return ((s->dig.ny - 1) << 16) | (s->dig.nx - 1); - case 0x07c: /* DISPC_SIZE_LCD */ + case 0x07c: /* DISPC_SIZE_LCD */ return ((s->lcd.ny - 1) << 16) | (s->lcd.nx - 1); - case 0x080: /* DISPC_GFX_BA0 */ + case 0x080: /* DISPC_GFX_BA0 */ return s->dispc.l[0].addr[0]; - case 0x084: /* DISPC_GFX_BA1 */ + case 0x084: /* DISPC_GFX_BA1 */ return s->dispc.l[0].addr[1]; - case 0x088: /* DISPC_GFX_POSITION */ + case 0x088: /* DISPC_GFX_POSITION */ return (s->dispc.l[0].posy << 16) | s->dispc.l[0].posx; - case 0x08c: /* DISPC_GFX_SIZE */ + case 0x08c: /* DISPC_GFX_SIZE */ return ((s->dispc.l[0].ny - 1) << 16) | (s->dispc.l[0].nx - 1); - case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ + case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ return s->dispc.l[0].attr; - case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ + case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ return s->dispc.l[0].tresh; - case 0x0a8: /* DISPC_GFX_FIFO_SIZE_STATUS */ + case 0x0a8: /* DISPC_GFX_FIFO_SIZE_STATUS */ return 256; - case 0x0ac: /* DISPC_GFX_ROW_INC */ + case 0x0ac: /* DISPC_GFX_ROW_INC */ return s->dispc.l[0].rowinc; - case 0x0b0: /* DISPC_GFX_PIXEL_INC */ + case 0x0b0: /* DISPC_GFX_PIXEL_INC */ return s->dispc.l[0].colinc; - case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ + case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ return s->dispc.l[0].wininc; - case 0x0b8: /* DISPC_GFX_TABLE_BA */ + case 0x0b8: /* DISPC_GFX_TABLE_BA */ return s->dispc.l[0].addr[2]; - case 0x0bc: /* DISPC_VID1_BA0 */ - case 0x0c0: /* DISPC_VID1_BA1 */ - case 0x0c4: /* DISPC_VID1_POSITION */ - case 0x0c8: /* DISPC_VID1_SIZE */ - case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ - case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ - case 0x0d4: /* DISPC_VID1_FIFO_SIZE_STATUS */ - case 0x0d8: /* DISPC_VID1_ROW_INC */ - case 0x0dc: /* DISPC_VID1_PIXEL_INC */ - case 0x0e0: /* DISPC_VID1_FIR */ - case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ - case 0x0e8: /* DISPC_VID1_ACCU0 */ - case 0x0ec: /* DISPC_VID1_ACCU1 */ - case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ - case 0x14c: /* DISPC_VID2_BA0 */ - case 0x150: /* DISPC_VID2_BA1 */ - case 0x154: /* DISPC_VID2_POSITION */ - case 0x158: /* DISPC_VID2_SIZE */ - case 0x15c: /* DISPC_VID2_ATTRIBUTES */ - case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ - case 0x164: /* DISPC_VID2_FIFO_SIZE_STATUS */ - case 0x168: /* DISPC_VID2_ROW_INC */ - case 0x16c: /* DISPC_VID2_PIXEL_INC */ - case 0x170: /* DISPC_VID2_FIR */ - case 0x174: /* DISPC_VID2_PICTURE_SIZE */ - case 0x178: /* DISPC_VID2_ACCU0 */ - case 0x17c: /* DISPC_VID2_ACCU1 */ - case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ - case 0x1d4: /* DISPC_DATA_CYCLE1 */ - case 0x1d8: /* DISPC_DATA_CYCLE2 */ - case 0x1dc: /* DISPC_DATA_CYCLE3 */ + case 0x0bc: /* DISPC_VID1_BA0 */ + case 0x0c0: /* DISPC_VID1_BA1 */ + case 0x0c4: /* DISPC_VID1_POSITION */ + case 0x0c8: /* DISPC_VID1_SIZE */ + case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ + case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ + case 0x0d4: /* DISPC_VID1_FIFO_SIZE_STATUS */ + case 0x0d8: /* DISPC_VID1_ROW_INC */ + case 0x0dc: /* DISPC_VID1_PIXEL_INC */ + case 0x0e0: /* DISPC_VID1_FIR */ + case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ + case 0x0e8: /* DISPC_VID1_ACCU0 */ + case 0x0ec: /* DISPC_VID1_ACCU1 */ + case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ + case 0x14c: /* DISPC_VID2_BA0 */ + case 0x150: /* DISPC_VID2_BA1 */ + case 0x154: /* DISPC_VID2_POSITION */ + case 0x158: /* DISPC_VID2_SIZE */ + case 0x15c: /* DISPC_VID2_ATTRIBUTES */ + case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ + case 0x164: /* DISPC_VID2_FIFO_SIZE_STATUS */ + case 0x168: /* DISPC_VID2_ROW_INC */ + case 0x16c: /* DISPC_VID2_PIXEL_INC */ + case 0x170: /* DISPC_VID2_FIR */ + case 0x174: /* DISPC_VID2_PICTURE_SIZE */ + case 0x178: /* DISPC_VID2_ACCU0 */ + case 0x17c: /* DISPC_VID2_ACCU1 */ + case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ + case 0x1d4: /* DISPC_DATA_CYCLE1 */ + case 0x1d8: /* DISPC_DATA_CYCLE2 */ + case 0x1dc: /* DISPC_DATA_CYCLE3 */ return 0; default: @@ -386,33 +387,33 @@ static void omap_disc_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x010: /* DISPC_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ + case 0x010: /* DISPC_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ omap_dss_reset(s); s->dispc.idlemode = value & 0x301b; break; - case 0x018: /* DISPC_IRQSTATUS */ + case 0x018: /* DISPC_IRQSTATUS */ s->dispc.irqst &= ~value; omap_dispc_interrupt_update(s); break; - case 0x01c: /* DISPC_IRQENABLE */ + case 0x01c: /* DISPC_IRQENABLE */ s->dispc.irqen = value & 0xffff; omap_dispc_interrupt_update(s); break; - case 0x040: /* DISPC_CONTROL */ + case 0x040: /* DISPC_CONTROL */ s->dispc.control = value & 0x07ff9fff; s->dig.enable = (value >> 1) & 1; s->lcd.enable = (value >> 0) & 1; - if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */ + if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */ if (!((s->dispc.l[1].attr | s->dispc.l[2].attr) & 1)) { fprintf(stderr, "%s: Overlay Optimization when no overlay " "region effectively exists leads to " "unpredictable behaviour!\n", __func__); } - if (value & (1 << 6)) { /* GODIGITAL */ + if (value & (1 << 6)) { /* GODIGITAL */ /* XXX: Shadowed fields are: * s->dispc.config * s->dispc.capable @@ -443,13 +444,13 @@ static void omap_disc_write(void *opaque, hwaddr addr, * All they need to be loaded here from their shadow registers. */ } - if (value & (1 << 5)) { /* GOLCD */ + if (value & (1 << 5)) { /* GOLCD */ /* XXX: Likewise for LCD here. */ } s->dispc.invalidate = 1; break; - case 0x044: /* DISPC_CONFIG */ + case 0x044: /* DISPC_CONFIG */ s->dispc.config = value & 0x3fff; /* XXX: * bits 2:1 (LOADMODE) reset to 0 after set to 1 and palette loaded @@ -458,73 +459,73 @@ static void omap_disc_write(void *opaque, hwaddr addr, s->dispc.invalidate = 1; break; - case 0x048: /* DISPC_CAPABLE */ + case 0x048: /* DISPC_CAPABLE */ s->dispc.capable = value & 0x3ff; break; - case 0x04c: /* DISPC_DEFAULT_COLOR0 */ + case 0x04c: /* DISPC_DEFAULT_COLOR0 */ s->dispc.bg[0] = value & 0xffffff; s->dispc.invalidate = 1; break; - case 0x050: /* DISPC_DEFAULT_COLOR1 */ + case 0x050: /* DISPC_DEFAULT_COLOR1 */ s->dispc.bg[1] = value & 0xffffff; s->dispc.invalidate = 1; break; - case 0x054: /* DISPC_TRANS_COLOR0 */ + case 0x054: /* DISPC_TRANS_COLOR0 */ s->dispc.trans[0] = value & 0xffffff; s->dispc.invalidate = 1; break; - case 0x058: /* DISPC_TRANS_COLOR1 */ + case 0x058: /* DISPC_TRANS_COLOR1 */ s->dispc.trans[1] = value & 0xffffff; s->dispc.invalidate = 1; break; - case 0x060: /* DISPC_LINE_NUMBER */ + case 0x060: /* DISPC_LINE_NUMBER */ s->dispc.line = value & 0x7ff; break; - case 0x064: /* DISPC_TIMING_H */ + case 0x064: /* DISPC_TIMING_H */ s->dispc.timing[0] = value & 0x0ff0ff3f; break; - case 0x068: /* DISPC_TIMING_V */ + case 0x068: /* DISPC_TIMING_V */ s->dispc.timing[1] = value & 0x0ff0ff3f; break; - case 0x06c: /* DISPC_POL_FREQ */ + case 0x06c: /* DISPC_POL_FREQ */ s->dispc.timing[2] = value & 0x0003ffff; break; - case 0x070: /* DISPC_DIVISOR */ + case 0x070: /* DISPC_DIVISOR */ s->dispc.timing[3] = value & 0x00ff00ff; break; - case 0x078: /* DISPC_SIZE_DIG */ - s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ - s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ + case 0x078: /* DISPC_SIZE_DIG */ + s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ + s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ s->dispc.invalidate = 1; break; - case 0x07c: /* DISPC_SIZE_LCD */ - s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ - s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ + case 0x07c: /* DISPC_SIZE_LCD */ + s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ + s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ s->dispc.invalidate = 1; break; - case 0x080: /* DISPC_GFX_BA0 */ + case 0x080: /* DISPC_GFX_BA0 */ s->dispc.l[0].addr[0] = (hwaddr) value; s->dispc.invalidate = 1; break; - case 0x084: /* DISPC_GFX_BA1 */ + case 0x084: /* DISPC_GFX_BA1 */ s->dispc.l[0].addr[1] = (hwaddr) value; s->dispc.invalidate = 1; break; - case 0x088: /* DISPC_GFX_POSITION */ - s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */ - s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */ + case 0x088: /* DISPC_GFX_POSITION */ + s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */ + s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */ s->dispc.invalidate = 1; break; - case 0x08c: /* DISPC_GFX_SIZE */ - s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */ - s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */ + case 0x08c: /* DISPC_GFX_SIZE */ + s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */ + s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */ s->dispc.invalidate = 1; break; - case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ + case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ s->dispc.l[0].attr = value & 0x7ff; if (value & (3 << 9)) fprintf(stderr, "%s: Big-endian pixel format not supported\n", @@ -533,54 +534,54 @@ static void omap_disc_write(void *opaque, hwaddr addr, s->dispc.l[0].bpp = (value >> 1) & 0xf; s->dispc.invalidate = 1; break; - case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ + case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ s->dispc.l[0].tresh = value & 0x01ff01ff; break; - case 0x0ac: /* DISPC_GFX_ROW_INC */ + case 0x0ac: /* DISPC_GFX_ROW_INC */ s->dispc.l[0].rowinc = value; s->dispc.invalidate = 1; break; - case 0x0b0: /* DISPC_GFX_PIXEL_INC */ + case 0x0b0: /* DISPC_GFX_PIXEL_INC */ s->dispc.l[0].colinc = value; s->dispc.invalidate = 1; break; - case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ + case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ s->dispc.l[0].wininc = value; break; - case 0x0b8: /* DISPC_GFX_TABLE_BA */ + case 0x0b8: /* DISPC_GFX_TABLE_BA */ s->dispc.l[0].addr[2] = (hwaddr) value; s->dispc.invalidate = 1; break; - case 0x0bc: /* DISPC_VID1_BA0 */ - case 0x0c0: /* DISPC_VID1_BA1 */ - case 0x0c4: /* DISPC_VID1_POSITION */ - case 0x0c8: /* DISPC_VID1_SIZE */ - case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ - case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ - case 0x0d8: /* DISPC_VID1_ROW_INC */ - case 0x0dc: /* DISPC_VID1_PIXEL_INC */ - case 0x0e0: /* DISPC_VID1_FIR */ - case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ - case 0x0e8: /* DISPC_VID1_ACCU0 */ - case 0x0ec: /* DISPC_VID1_ACCU1 */ - case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ - case 0x14c: /* DISPC_VID2_BA0 */ - case 0x150: /* DISPC_VID2_BA1 */ - case 0x154: /* DISPC_VID2_POSITION */ - case 0x158: /* DISPC_VID2_SIZE */ - case 0x15c: /* DISPC_VID2_ATTRIBUTES */ - case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ - case 0x168: /* DISPC_VID2_ROW_INC */ - case 0x16c: /* DISPC_VID2_PIXEL_INC */ - case 0x170: /* DISPC_VID2_FIR */ - case 0x174: /* DISPC_VID2_PICTURE_SIZE */ - case 0x178: /* DISPC_VID2_ACCU0 */ - case 0x17c: /* DISPC_VID2_ACCU1 */ - case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ - case 0x1d4: /* DISPC_DATA_CYCLE1 */ - case 0x1d8: /* DISPC_DATA_CYCLE2 */ - case 0x1dc: /* DISPC_DATA_CYCLE3 */ + case 0x0bc: /* DISPC_VID1_BA0 */ + case 0x0c0: /* DISPC_VID1_BA1 */ + case 0x0c4: /* DISPC_VID1_POSITION */ + case 0x0c8: /* DISPC_VID1_SIZE */ + case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ + case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ + case 0x0d8: /* DISPC_VID1_ROW_INC */ + case 0x0dc: /* DISPC_VID1_PIXEL_INC */ + case 0x0e0: /* DISPC_VID1_FIR */ + case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ + case 0x0e8: /* DISPC_VID1_ACCU0 */ + case 0x0ec: /* DISPC_VID1_ACCU1 */ + case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ + case 0x14c: /* DISPC_VID2_BA0 */ + case 0x150: /* DISPC_VID2_BA1 */ + case 0x154: /* DISPC_VID2_POSITION */ + case 0x158: /* DISPC_VID2_SIZE */ + case 0x15c: /* DISPC_VID2_ATTRIBUTES */ + case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ + case 0x168: /* DISPC_VID2_ROW_INC */ + case 0x16c: /* DISPC_VID2_PIXEL_INC */ + case 0x170: /* DISPC_VID2_FIR */ + case 0x174: /* DISPC_VID2_PICTURE_SIZE */ + case 0x178: /* DISPC_VID2_ACCU0 */ + case 0x17c: /* DISPC_VID2_ACCU1 */ + case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ + case 0x1d4: /* DISPC_DATA_CYCLE1 */ + case 0x1d8: /* DISPC_DATA_CYCLE2 */ + case 0x1dc: /* DISPC_DATA_CYCLE3 */ break; default: @@ -616,14 +617,14 @@ static void omap_rfbi_transfer_start(struct omap_dss_s *s) if (!s->rfbi.enable || s->rfbi.busy) return; - if (s->rfbi.control & (1 << 1)) { /* BYPASS */ + if (s->rfbi.control & (1 << 1)) { /* BYPASS */ /* TODO: in non-Bypass mode we probably need to just assert the * DRQ and wait for DMA to write the pixels. */ qemu_log_mask(LOG_UNIMP, "%s: Bypass mode unimplemented\n", __func__); return; } - if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */ + if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */ return; /* TODO: check that LCD output is enabled in DISPC. */ @@ -664,7 +665,7 @@ static void omap_rfbi_transfer_start(struct omap_dss_s *s) omap_rfbi_transfer_stop(s); /* TODO */ - s->dispc.irqst |= 1; /* FRAMEDONE */ + s->dispc.irqst |= 1; /* FRAMEDONE */ omap_dispc_interrupt_update(s); } @@ -678,57 +679,57 @@ static uint64_t omap_rfbi_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* RFBI_REVISION */ + case 0x00: /* RFBI_REVISION */ return 0x10; - case 0x10: /* RFBI_SYSCONFIG */ + case 0x10: /* RFBI_SYSCONFIG */ return s->rfbi.idlemode; - case 0x14: /* RFBI_SYSSTATUS */ - return 1 | (s->rfbi.busy << 8); /* RESETDONE */ + case 0x14: /* RFBI_SYSSTATUS */ + return 1 | (s->rfbi.busy << 8); /* RESETDONE */ - case 0x40: /* RFBI_CONTROL */ + case 0x40: /* RFBI_CONTROL */ return s->rfbi.control; - case 0x44: /* RFBI_PIXELCNT */ + case 0x44: /* RFBI_PIXELCNT */ return s->rfbi.pixels; - case 0x48: /* RFBI_LINE_NUMBER */ + case 0x48: /* RFBI_LINE_NUMBER */ return s->rfbi.skiplines; - case 0x58: /* RFBI_READ */ - case 0x5c: /* RFBI_STATUS */ + case 0x58: /* RFBI_READ */ + case 0x5c: /* RFBI_STATUS */ return s->rfbi.rxbuf; - case 0x60: /* RFBI_CONFIG0 */ + case 0x60: /* RFBI_CONFIG0 */ return s->rfbi.config[0]; - case 0x64: /* RFBI_ONOFF_TIME0 */ + case 0x64: /* RFBI_ONOFF_TIME0 */ return s->rfbi.time[0]; - case 0x68: /* RFBI_CYCLE_TIME0 */ + case 0x68: /* RFBI_CYCLE_TIME0 */ return s->rfbi.time[1]; - case 0x6c: /* RFBI_DATA_CYCLE1_0 */ + case 0x6c: /* RFBI_DATA_CYCLE1_0 */ return s->rfbi.data[0]; - case 0x70: /* RFBI_DATA_CYCLE2_0 */ + case 0x70: /* RFBI_DATA_CYCLE2_0 */ return s->rfbi.data[1]; - case 0x74: /* RFBI_DATA_CYCLE3_0 */ + case 0x74: /* RFBI_DATA_CYCLE3_0 */ return s->rfbi.data[2]; - case 0x78: /* RFBI_CONFIG1 */ + case 0x78: /* RFBI_CONFIG1 */ return s->rfbi.config[1]; - case 0x7c: /* RFBI_ONOFF_TIME1 */ + case 0x7c: /* RFBI_ONOFF_TIME1 */ return s->rfbi.time[2]; - case 0x80: /* RFBI_CYCLE_TIME1 */ + case 0x80: /* RFBI_CYCLE_TIME1 */ return s->rfbi.time[3]; - case 0x84: /* RFBI_DATA_CYCLE1_1 */ + case 0x84: /* RFBI_DATA_CYCLE1_1 */ return s->rfbi.data[3]; - case 0x88: /* RFBI_DATA_CYCLE2_1 */ + case 0x88: /* RFBI_DATA_CYCLE2_1 */ return s->rfbi.data[4]; - case 0x8c: /* RFBI_DATA_CYCLE3_1 */ + case 0x8c: /* RFBI_DATA_CYCLE3_1 */ return s->rfbi.data[5]; - case 0x90: /* RFBI_VSYNC_WIDTH */ + case 0x90: /* RFBI_VSYNC_WIDTH */ return s->rfbi.vsync; - case 0x94: /* RFBI_HSYNC_WIDTH */ + case 0x94: /* RFBI_HSYNC_WIDTH */ return s->rfbi.hsync; } OMAP_BAD_REG(addr); @@ -746,41 +747,41 @@ static void omap_rfbi_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x10: /* RFBI_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ + case 0x10: /* RFBI_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ omap_rfbi_reset(s); s->rfbi.idlemode = value & 0x19; break; - case 0x40: /* RFBI_CONTROL */ + case 0x40: /* RFBI_CONTROL */ s->rfbi.control = value & 0xf; s->rfbi.enable = value & 1; - if (value & (1 << 4) && /* ITE */ + if (value & (1 << 4) && /* ITE */ !(s->rfbi.config[0] & s->rfbi.config[1] & 0xc)) omap_rfbi_transfer_start(s); break; - case 0x44: /* RFBI_PIXELCNT */ + case 0x44: /* RFBI_PIXELCNT */ s->rfbi.pixels = value; break; - case 0x48: /* RFBI_LINE_NUMBER */ + case 0x48: /* RFBI_LINE_NUMBER */ s->rfbi.skiplines = value & 0x7ff; break; - case 0x4c: /* RFBI_CMD */ + case 0x4c: /* RFBI_CMD */ if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 0, value & 0xffff); if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 0, value & 0xffff); break; - case 0x50: /* RFBI_PARAM */ + case 0x50: /* RFBI_PARAM */ if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff); if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff); break; - case 0x54: /* RFBI_DATA */ + case 0x54: /* RFBI_DATA */ /* TODO: take into account the format set up in s->rfbi.config[?] and * s->rfbi.data[?], but special-case the most usual scenario so that * speed doesn't suffer. */ @@ -795,7 +796,7 @@ static void omap_rfbi_write(void *opaque, hwaddr addr, if (!-- s->rfbi.pixels) omap_rfbi_transfer_stop(s); break; - case 0x58: /* RFBI_READ */ + case 0x58: /* RFBI_READ */ if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 1); else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) @@ -804,7 +805,7 @@ static void omap_rfbi_write(void *opaque, hwaddr addr, omap_rfbi_transfer_stop(s); break; - case 0x5c: /* RFBI_STATUS */ + case 0x5c: /* RFBI_STATUS */ if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 0); else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) @@ -813,49 +814,49 @@ static void omap_rfbi_write(void *opaque, hwaddr addr, omap_rfbi_transfer_stop(s); break; - case 0x60: /* RFBI_CONFIG0 */ + case 0x60: /* RFBI_CONFIG0 */ s->rfbi.config[0] = value & 0x003f1fff; break; - case 0x64: /* RFBI_ONOFF_TIME0 */ + case 0x64: /* RFBI_ONOFF_TIME0 */ s->rfbi.time[0] = value & 0x3fffffff; break; - case 0x68: /* RFBI_CYCLE_TIME0 */ + case 0x68: /* RFBI_CYCLE_TIME0 */ s->rfbi.time[1] = value & 0x0fffffff; break; - case 0x6c: /* RFBI_DATA_CYCLE1_0 */ + case 0x6c: /* RFBI_DATA_CYCLE1_0 */ s->rfbi.data[0] = value & 0x0f1f0f1f; break; - case 0x70: /* RFBI_DATA_CYCLE2_0 */ + case 0x70: /* RFBI_DATA_CYCLE2_0 */ s->rfbi.data[1] = value & 0x0f1f0f1f; break; - case 0x74: /* RFBI_DATA_CYCLE3_0 */ + case 0x74: /* RFBI_DATA_CYCLE3_0 */ s->rfbi.data[2] = value & 0x0f1f0f1f; break; - case 0x78: /* RFBI_CONFIG1 */ + case 0x78: /* RFBI_CONFIG1 */ s->rfbi.config[1] = value & 0x003f1fff; break; - case 0x7c: /* RFBI_ONOFF_TIME1 */ + case 0x7c: /* RFBI_ONOFF_TIME1 */ s->rfbi.time[2] = value & 0x3fffffff; break; - case 0x80: /* RFBI_CYCLE_TIME1 */ + case 0x80: /* RFBI_CYCLE_TIME1 */ s->rfbi.time[3] = value & 0x0fffffff; break; - case 0x84: /* RFBI_DATA_CYCLE1_1 */ + case 0x84: /* RFBI_DATA_CYCLE1_1 */ s->rfbi.data[3] = value & 0x0f1f0f1f; break; - case 0x88: /* RFBI_DATA_CYCLE2_1 */ + case 0x88: /* RFBI_DATA_CYCLE2_1 */ s->rfbi.data[4] = value & 0x0f1f0f1f; break; - case 0x8c: /* RFBI_DATA_CYCLE3_1 */ + case 0x8c: /* RFBI_DATA_CYCLE3_1 */ s->rfbi.data[5] = value & 0x0f1f0f1f; break; - case 0x90: /* RFBI_VSYNC_WIDTH */ + case 0x90: /* RFBI_VSYNC_WIDTH */ s->rfbi.vsync = value & 0xffff; break; - case 0x94: /* RFBI_HSYNC_WIDTH */ + case 0x94: /* RFBI_HSYNC_WIDTH */ s->rfbi.hsync = value & 0xffff; break; @@ -878,49 +879,49 @@ static uint64_t omap_venc_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* REV_ID */ - case 0x04: /* STATUS */ - case 0x08: /* F_CONTROL */ - case 0x10: /* VIDOUT_CTRL */ - case 0x14: /* SYNC_CTRL */ - case 0x1c: /* LLEN */ - case 0x20: /* FLENS */ - case 0x24: /* HFLTR_CTRL */ - case 0x28: /* CC_CARR_WSS_CARR */ - case 0x2c: /* C_PHASE */ - case 0x30: /* GAIN_U */ - case 0x34: /* GAIN_V */ - case 0x38: /* GAIN_Y */ - case 0x3c: /* BLACK_LEVEL */ - case 0x40: /* BLANK_LEVEL */ - case 0x44: /* X_COLOR */ - case 0x48: /* M_CONTROL */ - case 0x4c: /* BSTAMP_WSS_DATA */ - case 0x50: /* S_CARR */ - case 0x54: /* LINE21 */ - case 0x58: /* LN_SEL */ - case 0x5c: /* L21__WC_CTL */ - case 0x60: /* HTRIGGER_VTRIGGER */ - case 0x64: /* SAVID__EAVID */ - case 0x68: /* FLEN__FAL */ - case 0x6c: /* LAL__PHASE_RESET */ - case 0x70: /* HS_INT_START_STOP_X */ - case 0x74: /* HS_EXT_START_STOP_X */ - case 0x78: /* VS_INT_START_X */ - case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ - case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ - case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ - case 0x88: /* VS_EXT_STOP_Y */ - case 0x90: /* AVID_START_STOP_X */ - case 0x94: /* AVID_START_STOP_Y */ - case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ - case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ - case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ - case 0xb0: /* TVDETGP_INT_START_STOP_X */ - case 0xb4: /* TVDETGP_INT_START_STOP_Y */ - case 0xb8: /* GEN_CTRL */ - case 0xc4: /* DAC_TST__DAC_A */ - case 0xc8: /* DAC_B__DAC_C */ + case 0x00: /* REV_ID */ + case 0x04: /* STATUS */ + case 0x08: /* F_CONTROL */ + case 0x10: /* VIDOUT_CTRL */ + case 0x14: /* SYNC_CTRL */ + case 0x1c: /* LLEN */ + case 0x20: /* FLENS */ + case 0x24: /* HFLTR_CTRL */ + case 0x28: /* CC_CARR_WSS_CARR */ + case 0x2c: /* C_PHASE */ + case 0x30: /* GAIN_U */ + case 0x34: /* GAIN_V */ + case 0x38: /* GAIN_Y */ + case 0x3c: /* BLACK_LEVEL */ + case 0x40: /* BLANK_LEVEL */ + case 0x44: /* X_COLOR */ + case 0x48: /* M_CONTROL */ + case 0x4c: /* BSTAMP_WSS_DATA */ + case 0x50: /* S_CARR */ + case 0x54: /* LINE21 */ + case 0x58: /* LN_SEL */ + case 0x5c: /* L21__WC_CTL */ + case 0x60: /* HTRIGGER_VTRIGGER */ + case 0x64: /* SAVID__EAVID */ + case 0x68: /* FLEN__FAL */ + case 0x6c: /* LAL__PHASE_RESET */ + case 0x70: /* HS_INT_START_STOP_X */ + case 0x74: /* HS_EXT_START_STOP_X */ + case 0x78: /* VS_INT_START_X */ + case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ + case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ + case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ + case 0x88: /* VS_EXT_STOP_Y */ + case 0x90: /* AVID_START_STOP_X */ + case 0x94: /* AVID_START_STOP_Y */ + case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ + case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ + case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ + case 0xb0: /* TVDETGP_INT_START_STOP_X */ + case 0xb4: /* TVDETGP_INT_START_STOP_Y */ + case 0xb8: /* GEN_CTRL */ + case 0xc4: /* DAC_TST__DAC_A */ + case 0xc8: /* DAC_B__DAC_C */ return 0; default: @@ -939,47 +940,47 @@ static void omap_venc_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x08: /* F_CONTROL */ - case 0x10: /* VIDOUT_CTRL */ - case 0x14: /* SYNC_CTRL */ - case 0x1c: /* LLEN */ - case 0x20: /* FLENS */ - case 0x24: /* HFLTR_CTRL */ - case 0x28: /* CC_CARR_WSS_CARR */ - case 0x2c: /* C_PHASE */ - case 0x30: /* GAIN_U */ - case 0x34: /* GAIN_V */ - case 0x38: /* GAIN_Y */ - case 0x3c: /* BLACK_LEVEL */ - case 0x40: /* BLANK_LEVEL */ - case 0x44: /* X_COLOR */ - case 0x48: /* M_CONTROL */ - case 0x4c: /* BSTAMP_WSS_DATA */ - case 0x50: /* S_CARR */ - case 0x54: /* LINE21 */ - case 0x58: /* LN_SEL */ - case 0x5c: /* L21__WC_CTL */ - case 0x60: /* HTRIGGER_VTRIGGER */ - case 0x64: /* SAVID__EAVID */ - case 0x68: /* FLEN__FAL */ - case 0x6c: /* LAL__PHASE_RESET */ - case 0x70: /* HS_INT_START_STOP_X */ - case 0x74: /* HS_EXT_START_STOP_X */ - case 0x78: /* VS_INT_START_X */ - case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ - case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ - case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ - case 0x88: /* VS_EXT_STOP_Y */ - case 0x90: /* AVID_START_STOP_X */ - case 0x94: /* AVID_START_STOP_Y */ - case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ - case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ - case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ - case 0xb0: /* TVDETGP_INT_START_STOP_X */ - case 0xb4: /* TVDETGP_INT_START_STOP_Y */ - case 0xb8: /* GEN_CTRL */ - case 0xc4: /* DAC_TST__DAC_A */ - case 0xc8: /* DAC_B__DAC_C */ + case 0x08: /* F_CONTROL */ + case 0x10: /* VIDOUT_CTRL */ + case 0x14: /* SYNC_CTRL */ + case 0x1c: /* LLEN */ + case 0x20: /* FLENS */ + case 0x24: /* HFLTR_CTRL */ + case 0x28: /* CC_CARR_WSS_CARR */ + case 0x2c: /* C_PHASE */ + case 0x30: /* GAIN_U */ + case 0x34: /* GAIN_V */ + case 0x38: /* GAIN_Y */ + case 0x3c: /* BLACK_LEVEL */ + case 0x40: /* BLANK_LEVEL */ + case 0x44: /* X_COLOR */ + case 0x48: /* M_CONTROL */ + case 0x4c: /* BSTAMP_WSS_DATA */ + case 0x50: /* S_CARR */ + case 0x54: /* LINE21 */ + case 0x58: /* LN_SEL */ + case 0x5c: /* L21__WC_CTL */ + case 0x60: /* HTRIGGER_VTRIGGER */ + case 0x64: /* SAVID__EAVID */ + case 0x68: /* FLEN__FAL */ + case 0x6c: /* LAL__PHASE_RESET */ + case 0x70: /* HS_INT_START_STOP_X */ + case 0x74: /* HS_EXT_START_STOP_X */ + case 0x78: /* VS_INT_START_X */ + case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ + case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ + case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ + case 0x88: /* VS_EXT_STOP_Y */ + case 0x90: /* AVID_START_STOP_X */ + case 0x94: /* AVID_START_STOP_Y */ + case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ + case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ + case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ + case 0xb0: /* TVDETGP_INT_START_STOP_X */ + case 0xb4: /* TVDETGP_INT_START_STOP_Y */ + case 0xb8: /* GEN_CTRL */ + case 0xc4: /* DAC_TST__DAC_A */ + case 0xc8: /* DAC_B__DAC_C */ break; default: @@ -1001,15 +1002,15 @@ static uint64_t omap_im3_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x0a8: /* SBIMERRLOGA */ - case 0x0b0: /* SBIMERRLOG */ - case 0x190: /* SBIMSTATE */ - case 0x198: /* SBTMSTATE_L */ - case 0x19c: /* SBTMSTATE_H */ - case 0x1a8: /* SBIMCONFIG_L */ - case 0x1ac: /* SBIMCONFIG_H */ - case 0x1f8: /* SBID_L */ - case 0x1fc: /* SBID_H */ + case 0x0a8: /* SBIMERRLOGA */ + case 0x0b0: /* SBIMERRLOG */ + case 0x190: /* SBIMSTATE */ + case 0x198: /* SBTMSTATE_L */ + case 0x19c: /* SBTMSTATE_H */ + case 0x1a8: /* SBIMCONFIG_L */ + case 0x1ac: /* SBIMCONFIG_H */ + case 0x1f8: /* SBID_L */ + case 0x1fc: /* SBID_H */ return 0; default: @@ -1028,12 +1029,12 @@ static void omap_im3_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x0b0: /* SBIMERRLOG */ - case 0x190: /* SBIMSTATE */ - case 0x198: /* SBTMSTATE_L */ - case 0x19c: /* SBTMSTATE_H */ - case 0x1a8: /* SBIMCONFIG_L */ - case 0x1ac: /* SBIMCONFIG_H */ + case 0x0b0: /* SBIMERRLOG */ + case 0x190: /* SBIMSTATE */ + case 0x198: /* SBTMSTATE_L */ + case 0x19c: /* SBTMSTATE_H */ + case 0x1a8: /* SBIMCONFIG_L */ + case 0x1ac: /* SBIMCONFIG_H */ break; default: diff --git a/hw/display/pl110_template.h b/hw/display/pl110_template.h index 877419aa81..0087785322 100644 --- a/hw/display/pl110_template.h +++ b/hw/display/pl110_template.h @@ -15,18 +15,18 @@ #if ORDER == 0 #define NAME glue(lblp_, BORDER) -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define SWAP_WORDS 1 #endif #elif ORDER == 1 #define NAME glue(bbbp_, BORDER) -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN #define SWAP_WORDS 1 #endif #else #define SWAP_PIXELS 1 #define NAME glue(lbbp_, BORDER) -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define SWAP_WORDS 1 #endif #endif diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c index 2887ce496b..eb83d88222 100644 --- a/hw/display/pxa2xx_lcd.c +++ b/hw/display/pxa2xx_lcd.c @@ -86,106 +86,106 @@ typedef struct QEMU_PACKED { uint32_t ldcmd; } PXAFrameDescriptor; -#define LCCR0 0x000 /* LCD Controller Control register 0 */ -#define LCCR1 0x004 /* LCD Controller Control register 1 */ -#define LCCR2 0x008 /* LCD Controller Control register 2 */ -#define LCCR3 0x00c /* LCD Controller Control register 3 */ -#define LCCR4 0x010 /* LCD Controller Control register 4 */ -#define LCCR5 0x014 /* LCD Controller Control register 5 */ +#define LCCR0 0x000 /* LCD Controller Control register 0 */ +#define LCCR1 0x004 /* LCD Controller Control register 1 */ +#define LCCR2 0x008 /* LCD Controller Control register 2 */ +#define LCCR3 0x00c /* LCD Controller Control register 3 */ +#define LCCR4 0x010 /* LCD Controller Control register 4 */ +#define LCCR5 0x014 /* LCD Controller Control register 5 */ -#define FBR0 0x020 /* DMA Channel 0 Frame Branch register */ -#define FBR1 0x024 /* DMA Channel 1 Frame Branch register */ -#define FBR2 0x028 /* DMA Channel 2 Frame Branch register */ -#define FBR3 0x02c /* DMA Channel 3 Frame Branch register */ -#define FBR4 0x030 /* DMA Channel 4 Frame Branch register */ -#define FBR5 0x110 /* DMA Channel 5 Frame Branch register */ -#define FBR6 0x114 /* DMA Channel 6 Frame Branch register */ +#define FBR0 0x020 /* DMA Channel 0 Frame Branch register */ +#define FBR1 0x024 /* DMA Channel 1 Frame Branch register */ +#define FBR2 0x028 /* DMA Channel 2 Frame Branch register */ +#define FBR3 0x02c /* DMA Channel 3 Frame Branch register */ +#define FBR4 0x030 /* DMA Channel 4 Frame Branch register */ +#define FBR5 0x110 /* DMA Channel 5 Frame Branch register */ +#define FBR6 0x114 /* DMA Channel 6 Frame Branch register */ -#define LCSR1 0x034 /* LCD Controller Status register 1 */ -#define LCSR0 0x038 /* LCD Controller Status register 0 */ -#define LIIDR 0x03c /* LCD Controller Interrupt ID register */ +#define LCSR1 0x034 /* LCD Controller Status register 1 */ +#define LCSR0 0x038 /* LCD Controller Status register 0 */ +#define LIIDR 0x03c /* LCD Controller Interrupt ID register */ -#define TRGBR 0x040 /* TMED RGB Seed register */ -#define TCR 0x044 /* TMED Control register */ +#define TRGBR 0x040 /* TMED RGB Seed register */ +#define TCR 0x044 /* TMED Control register */ -#define OVL1C1 0x050 /* Overlay 1 Control register 1 */ -#define OVL1C2 0x060 /* Overlay 1 Control register 2 */ -#define OVL2C1 0x070 /* Overlay 2 Control register 1 */ -#define OVL2C2 0x080 /* Overlay 2 Control register 2 */ -#define CCR 0x090 /* Cursor Control register */ +#define OVL1C1 0x050 /* Overlay 1 Control register 1 */ +#define OVL1C2 0x060 /* Overlay 1 Control register 2 */ +#define OVL2C1 0x070 /* Overlay 2 Control register 1 */ +#define OVL2C2 0x080 /* Overlay 2 Control register 2 */ +#define CCR 0x090 /* Cursor Control register */ -#define CMDCR 0x100 /* Command Control register */ -#define PRSR 0x104 /* Panel Read Status register */ +#define CMDCR 0x100 /* Command Control register */ +#define PRSR 0x104 /* Panel Read Status register */ -#define PXA_LCDDMA_CHANS 7 -#define DMA_FDADR 0x00 /* Frame Descriptor Address register */ -#define DMA_FSADR 0x04 /* Frame Source Address register */ -#define DMA_FIDR 0x08 /* Frame ID register */ -#define DMA_LDCMD 0x0c /* Command register */ +#define PXA_LCDDMA_CHANS 7 +#define DMA_FDADR 0x00 /* Frame Descriptor Address register */ +#define DMA_FSADR 0x04 /* Frame Source Address register */ +#define DMA_FIDR 0x08 /* Frame ID register */ +#define DMA_LDCMD 0x0c /* Command register */ /* LCD Buffer Strength Control register */ -#define BSCNTR 0x04000054 +#define BSCNTR 0x04000054 /* Bitfield masks */ -#define LCCR0_ENB (1 << 0) -#define LCCR0_CMS (1 << 1) -#define LCCR0_SDS (1 << 2) -#define LCCR0_LDM (1 << 3) -#define LCCR0_SOFM0 (1 << 4) -#define LCCR0_IUM (1 << 5) -#define LCCR0_EOFM0 (1 << 6) -#define LCCR0_PAS (1 << 7) -#define LCCR0_DPD (1 << 9) -#define LCCR0_DIS (1 << 10) -#define LCCR0_QDM (1 << 11) -#define LCCR0_PDD (0xff << 12) -#define LCCR0_BSM0 (1 << 20) -#define LCCR0_OUM (1 << 21) -#define LCCR0_LCDT (1 << 22) -#define LCCR0_RDSTM (1 << 23) -#define LCCR0_CMDIM (1 << 24) -#define LCCR0_OUC (1 << 25) -#define LCCR0_LDDALT (1 << 26) -#define LCCR1_PPL(x) ((x) & 0x3ff) -#define LCCR2_LPP(x) ((x) & 0x3ff) -#define LCCR3_API (15 << 16) -#define LCCR3_BPP(x) ((((x) >> 24) & 7) | (((x) >> 26) & 8)) -#define LCCR3_PDFOR(x) (((x) >> 30) & 3) -#define LCCR4_K1(x) (((x) >> 0) & 7) -#define LCCR4_K2(x) (((x) >> 3) & 7) -#define LCCR4_K3(x) (((x) >> 6) & 7) -#define LCCR4_PALFOR(x) (((x) >> 15) & 3) -#define LCCR5_SOFM(ch) (1 << (ch - 1)) -#define LCCR5_EOFM(ch) (1 << (ch + 7)) -#define LCCR5_BSM(ch) (1 << (ch + 15)) -#define LCCR5_IUM(ch) (1 << (ch + 23)) -#define OVLC1_EN (1 << 31) -#define CCR_CEN (1 << 31) -#define FBR_BRA (1 << 0) -#define FBR_BINT (1 << 1) -#define FBR_SRCADDR (0xfffffff << 4) -#define LCSR0_LDD (1 << 0) -#define LCSR0_SOF0 (1 << 1) -#define LCSR0_BER (1 << 2) -#define LCSR0_ABC (1 << 3) -#define LCSR0_IU0 (1 << 4) -#define LCSR0_IU1 (1 << 5) -#define LCSR0_OU (1 << 6) -#define LCSR0_QD (1 << 7) -#define LCSR0_EOF0 (1 << 8) -#define LCSR0_BS0 (1 << 9) -#define LCSR0_SINT (1 << 10) -#define LCSR0_RDST (1 << 11) -#define LCSR0_CMDINT (1 << 12) -#define LCSR0_BERCH(x) (((x) & 7) << 28) -#define LCSR1_SOF(ch) (1 << (ch - 1)) -#define LCSR1_EOF(ch) (1 << (ch + 7)) -#define LCSR1_BS(ch) (1 << (ch + 15)) -#define LCSR1_IU(ch) (1 << (ch + 23)) -#define LDCMD_LENGTH(x) ((x) & 0x001ffffc) -#define LDCMD_EOFINT (1 << 21) -#define LDCMD_SOFINT (1 << 22) -#define LDCMD_PAL (1 << 26) +#define LCCR0_ENB (1 << 0) +#define LCCR0_CMS (1 << 1) +#define LCCR0_SDS (1 << 2) +#define LCCR0_LDM (1 << 3) +#define LCCR0_SOFM0 (1 << 4) +#define LCCR0_IUM (1 << 5) +#define LCCR0_EOFM0 (1 << 6) +#define LCCR0_PAS (1 << 7) +#define LCCR0_DPD (1 << 9) +#define LCCR0_DIS (1 << 10) +#define LCCR0_QDM (1 << 11) +#define LCCR0_PDD (0xff << 12) +#define LCCR0_BSM0 (1 << 20) +#define LCCR0_OUM (1 << 21) +#define LCCR0_LCDT (1 << 22) +#define LCCR0_RDSTM (1 << 23) +#define LCCR0_CMDIM (1 << 24) +#define LCCR0_OUC (1 << 25) +#define LCCR0_LDDALT (1 << 26) +#define LCCR1_PPL(x) ((x) & 0x3ff) +#define LCCR2_LPP(x) ((x) & 0x3ff) +#define LCCR3_API (15 << 16) +#define LCCR3_BPP(x) ((((x) >> 24) & 7) | (((x) >> 26) & 8)) +#define LCCR3_PDFOR(x) (((x) >> 30) & 3) +#define LCCR4_K1(x) (((x) >> 0) & 7) +#define LCCR4_K2(x) (((x) >> 3) & 7) +#define LCCR4_K3(x) (((x) >> 6) & 7) +#define LCCR4_PALFOR(x) (((x) >> 15) & 3) +#define LCCR5_SOFM(ch) (1 << (ch - 1)) +#define LCCR5_EOFM(ch) (1 << (ch + 7)) +#define LCCR5_BSM(ch) (1 << (ch + 15)) +#define LCCR5_IUM(ch) (1 << (ch + 23)) +#define OVLC1_EN (1 << 31) +#define CCR_CEN (1 << 31) +#define FBR_BRA (1 << 0) +#define FBR_BINT (1 << 1) +#define FBR_SRCADDR (0xfffffff << 4) +#define LCSR0_LDD (1 << 0) +#define LCSR0_SOF0 (1 << 1) +#define LCSR0_BER (1 << 2) +#define LCSR0_ABC (1 << 3) +#define LCSR0_IU0 (1 << 4) +#define LCSR0_IU1 (1 << 5) +#define LCSR0_OU (1 << 6) +#define LCSR0_QD (1 << 7) +#define LCSR0_EOF0 (1 << 8) +#define LCSR0_BS0 (1 << 9) +#define LCSR0_SINT (1 << 10) +#define LCSR0_RDST (1 << 11) +#define LCSR0_CMDINT (1 << 12) +#define LCSR0_BERCH(x) (((x) & 7) << 28) +#define LCSR1_SOF(ch) (1 << (ch - 1)) +#define LCSR1_EOF(ch) (1 << (ch + 7)) +#define LCSR1_BS(ch) (1 << (ch + 15)) +#define LCSR1_IU(ch) (1 << (ch + 23)) +#define LDCMD_LENGTH(x) ((x) & 0x001ffffc) +#define LDCMD_EOFINT (1 << 21) +#define LDCMD_SOFINT (1 << 22) +#define LDCMD_PAL (1 << 26) /* Size of a pixel in the QEMU UI output surface, in bytes */ #define DEST_PIXEL_WIDTH 4 @@ -199,7 +199,7 @@ typedef struct QEMU_PACKED { SKIP_PIXEL(to); \ } while (0) -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define SWAP_WORDS 1 #endif @@ -788,7 +788,7 @@ static uint64_t pxa2xx_lcdc_read(void *opaque, hwaddr offset, case TCR: return s->tcr; - case 0x200 ... 0x1000: /* DMA per-channel registers */ + case 0x200 ... 0x1000: /* DMA per-channel registers */ ch = (offset - 0x200) >> 4; if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) goto fail; @@ -938,7 +938,7 @@ static void pxa2xx_lcdc_write(void *opaque, hwaddr offset, s->tcr = value & 0x7fff; break; - case 0x200 ... 0x1000: /* DMA per-channel registers */ + case 0x200 ... 0x1000: /* DMA per-channel registers */ ch = (offset - 0x200) >> 4; if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) goto fail; @@ -1427,7 +1427,7 @@ PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem, { PXA2xxLCDState *s; - s = (PXA2xxLCDState *) g_malloc0(sizeof(PXA2xxLCDState)); + s = g_new0(PXA2xxLCDState, 1); s->invalidated = 1; s->irq = irq; s->sysmem = sysmem; diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c index 68bfa47568..35c38f6252 100644 --- a/hw/display/qxl-logger.c +++ b/hw/display/qxl-logger.c @@ -106,7 +106,7 @@ static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id) QXLImage *image; QXLImageDescriptor *desc; - image = qxl_phys2virt(qxl, addr, group_id); + image = qxl_phys2virt(qxl, addr, group_id, sizeof(QXLImage)); if (!image) { return 1; } @@ -214,7 +214,8 @@ int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id) cmd->u.set.position.y, cmd->u.set.visible ? "yes" : "no", cmd->u.set.shape); - cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id); + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id, + sizeof(QXLCursor)); if (!cursor) { return 1; } @@ -236,6 +237,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) { bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT; void *data; + size_t datasz; int ret; if (!qxl->cmdlog) { @@ -247,7 +249,20 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) qxl_name(qxl_type, ext->cmd.type), compat ? "(compat)" : ""); - data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + switch (ext->cmd.type) { + case QXL_CMD_DRAW: + datasz = compat ? sizeof(QXLCompatDrawable) : sizeof(QXLDrawable); + break; + case QXL_CMD_SURFACE: + datasz = sizeof(QXLSurfaceCmd); + break; + case QXL_CMD_CURSOR: + datasz = sizeof(QXLCursorCmd); + break; + default: + goto out; + } + data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, datasz); if (!data) { return 1; } @@ -269,6 +284,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) qxl_log_cmd_cursor(qxl, data, ext->group_id); break; } +out: fprintf(stderr, "\n"); return 0; } diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c index d28849b121..fcfd40c3ac 100644 --- a/hw/display/qxl-render.c +++ b/hw/display/qxl-render.c @@ -107,7 +107,9 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl) qxl->guest_primary.resized = 0; qxl->guest_primary.data = qxl_phys2virt(qxl, qxl->guest_primary.surface.mem, - MEMSLOT_GROUP_GUEST); + MEMSLOT_GROUP_GUEST, + qxl->guest_primary.abs_stride + * height); if (!qxl->guest_primary.data) { goto end; } @@ -228,7 +230,8 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, if (offset == size) { return; } - chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id); + chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id, + sizeof(QXLDataChunk) + chunk->data_size); if (!chunk) { return; } @@ -247,6 +250,13 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, size_t size; c = cursor_alloc(cursor->header.width, cursor->header.height); + + if (!c) { + qxl_set_guest_bug(qxl, "%s: cursor %ux%u alloc error", __func__, + cursor->header.width, cursor->header.height); + goto fail; + } + c->hot_x = cursor->header.hot_spot_x; c->hot_y = cursor->header.hot_spot_y; switch (cursor->header.type) { @@ -266,7 +276,7 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, } break; case SPICE_CURSOR_TYPE_ALPHA: - size = sizeof(uint32_t) * cursor->header.width * cursor->header.height; + size = sizeof(uint32_t) * c->width * c->height; qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id); if (qxl->debug > 2) { cursor_print_ascii_art(c, "qxl/alpha"); @@ -288,7 +298,8 @@ fail: /* called from spice server thread context only */ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) { - QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLCursorCmd)); QXLCursor *cursor; QEMUCursor *c; @@ -307,7 +318,15 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) } switch (cmd->type) { case QXL_CURSOR_SET: - cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); + /* First read the QXLCursor to get QXLDataChunk::data_size ... */ + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id, + sizeof(QXLCursor)); + if (!cursor) { + return 1; + } + /* Then read including the chunked data following QXLCursor. */ + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id, + sizeof(QXLCursor) + cursor->chunk.data_size); if (!cursor) { return 1; } diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 43482d4364..6772849dec 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -274,7 +274,8 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay) QXL_IO_MONITORS_CONFIG_ASYNC)); } - cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST); + cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST, + sizeof(QXLMonitorsConfig)); if (cfg != NULL && cfg->count == 1) { qxl->guest_primary.resized = 1; qxl->guest_head0_width = cfg->heads[0].width; @@ -320,7 +321,7 @@ static ram_addr_t qxl_rom_size(void) #define QXL_ROM_SZ 8192 QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ); - return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size); + return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size()); } static void init_qxl_rom(PCIQXLDevice *d) @@ -459,7 +460,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) switch (le32_to_cpu(ext->cmd.type)) { case QXL_CMD_SURFACE: { - QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLSurfaceCmd)); if (!cmd) { return 1; @@ -494,7 +496,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) } case QXL_CMD_CURSOR: { - QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLCursorCmd)); if (!cmd) { return 1; @@ -517,13 +520,20 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) /* spice display interface callbacks */ -static void interface_attach_worker(QXLInstance *sin, QXLWorker *qxl_worker) +static void interface_attached_worker(QXLInstance *sin) { PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); trace_qxl_interface_attach_worker(qxl->id); } +#if !(SPICE_HAS_ATTACHED_WORKER) +static void interface_attach_worker(QXLInstance *sin, QXLWorker *qxl_worker) +{ + interface_attached_worker(sin); +} +#endif + static void interface_set_compression_level(QXLInstance *sin, int level) { PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); @@ -1131,7 +1141,12 @@ static const QXLInterface qxl_interface = { .base.major_version = SPICE_INTERFACE_QXL_MAJOR, .base.minor_version = SPICE_INTERFACE_QXL_MINOR, +#if SPICE_HAS_ATTACHED_WORKER + .attached_worker = interface_attached_worker, +#else .attache_worker = interface_attach_worker, +#endif + .set_compression_level = interface_set_compression_level, #if SPICE_NEEDS_SET_MM_TIME .set_mm_time = interface_set_mm_time, @@ -1369,6 +1384,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, qxl_set_guest_bug(d, "%s: pci_region = %d", __func__, pci_region); return 1; } + assert(guest_end - pci_start <= memory_region_size(mr)); virt_start = (intptr_t)memory_region_get_ram_ptr(mr); memslot.slot_id = slot_id; @@ -1409,11 +1425,13 @@ static void qxl_reset_surfaces(PCIQXLDevice *d) /* can be also called from spice server thread context */ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, - uint32_t *s, uint64_t *o) + uint32_t *s, uint64_t *o, + size_t size_requested) { uint64_t phys = le64_to_cpu(pqxl); uint32_t slot = (phys >> (64 - 8)) & 0xff; uint64_t offset = phys & 0xffffffffffff; + uint64_t size_available; if (slot >= NUM_MEMSLOTS) { qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, @@ -1437,6 +1455,23 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, slot, offset, qxl->guest_slots[slot].size); return false; } + size_available = memory_region_size(qxl->guest_slots[slot].mr); + if (qxl->guest_slots[slot].offset + offset >= size_available) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" > region size %"PRIu64"\n", + slot, qxl->guest_slots[slot].offset + offset, + size_available); + return false; + } + size_available -= qxl->guest_slots[slot].offset + offset; + if (size_requested > size_available) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" size %zu: " + "overrun by %"PRIu64" bytes\n", + slot, offset, size_requested, + size_requested - size_available); + return false; + } *s = slot; *o = offset; @@ -1444,7 +1479,8 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, } /* can be also called from spice server thread context */ -void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id, + size_t size) { uint64_t offset; uint32_t slot; @@ -1455,7 +1491,7 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) offset = le64_to_cpu(pqxl) & 0xffffffffffff; return (void *)(intptr_t)offset; case MEMSLOT_GROUP_GUEST: - if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) { + if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) { return NULL; } ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr); @@ -1921,9 +1957,9 @@ static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, uint32_t slot; bool rc; - rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset); - assert(rc == true); size = (uint64_t)height * abs(stride); + rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size); + assert(rc == true); trace_qxl_surfaces_dirty(qxl->id, offset, size); qxl_set_dirty(qxl->guest_slots[slot].mr, qxl->guest_slots[slot].offset + offset, @@ -1952,7 +1988,7 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl) } cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i], - MEMSLOT_GROUP_GUEST); + MEMSLOT_GROUP_GUEST, sizeof(QXLSurfaceCmd)); assert(cmd); assert(cmd->type == QXL_SURFACE_CMD_CREATE); qxl_dirty_one_surface(qxl, cmd->u.surface_create.data, @@ -2171,12 +2207,17 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) } #if SPICE_SERVER_VERSION >= 0x000e02 /* release 0.14.2 */ + Error *err = NULL; char device_address[256] = ""; - if (qemu_spice_fill_device_address(qxl->vga.con, device_address, 256)) { + if (qemu_console_fill_device_address(qxl->vga.con, + device_address, sizeof(device_address), + &err)) { spice_qxl_set_device_info(&qxl->ssd.qxl, device_address, 0, qxl->max_outputs); + } else { + error_report_err(err); } #endif @@ -2198,7 +2239,11 @@ static void qxl_realize_primary(PCIDevice *dev, Error **errp) qxl_init_ramsize(qxl); vga->vbe_size = qxl->vgamem_size; vga->vram_size_mb = qxl->vga.vram_size / MiB; - vga_common_init(vga, OBJECT(dev)); + vga_common_init(vga, OBJECT(dev), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } vga_init(vga, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev), false); portio_list_init(&qxl->vga_port_list, OBJECT(dev), qxl_vga_portio_list, @@ -2252,7 +2297,7 @@ static int qxl_pre_save(void *opaque) } else { d->last_release_offset = (uint8_t *)d->last_release - ram_start; } - if (d->last_release_offset < d->vga.vram_size) { + if (d->last_release_offset >= d->vga.vram_size) { return 1; } @@ -2494,6 +2539,7 @@ static const TypeInfo qxl_primary_info = { .class_init = qxl_primary_class_init, }; module_obj("qxl-vga"); +module_kconfig(QXL); static void qxl_secondary_class_init(ObjectClass *klass, void *data) { diff --git a/hw/display/qxl.h b/hw/display/qxl.h index 30d21f4d0b..7894bd5134 100644 --- a/hw/display/qxl.h +++ b/hw/display/qxl.h @@ -147,9 +147,30 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL) #define QXL_DEFAULT_REVISION (QXL_REVISION_STABLE_V12 + 1) /* qxl.c */ -void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id); +/** + * qxl_phys2virt: Get a pointer within a PCI VRAM memory region. + * + * @qxl: QXL device + * @phys: physical offset of buffer within the VRAM + * @group_id: memory slot group + * @size: size of the buffer + * + * Returns a host pointer to a buffer placed at offset @phys within the + * active slot @group_id of the PCI VGA RAM memory region associated with + * the @qxl device. If the slot is inactive, or the offset + size are out + * of the memory region, returns NULL. + * + * Use with care; by the time this function returns, the returned pointer is + * not protected by RCU anymore. If the caller is not within an RCU critical + * section and does not hold the iothread lock, it must have other means of + * protecting the pointer, such as a reference to the region that includes + * the incoming ram_addr_t. + * + */ +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id, + size_t size); void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id, struct QXLRect *area, struct QXLRect *dirty_rects, diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c index b591a58789..664fd4046d 100644 --- a/hw/display/sii9022.c +++ b/hw/display/sii9022.c @@ -76,6 +76,8 @@ static int sii9022_event(I2CSlave *i2c, enum i2c_event event) break; case I2C_NACK: break; + default: + return -1; } return 0; diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c index aeae22da9c..d67b0ad7b5 100644 --- a/hw/display/ssd0303.c +++ b/hw/display/ssd0303.c @@ -196,6 +196,8 @@ static int ssd0303_event(I2CSlave *i2c, enum i2c_event event) case I2C_NACK: /* Nothing to do. */ break; + default: + return -1; } return 0; diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c index 1f28223c7b..c7beba453b 100644 --- a/hw/display/tc6393xb.c +++ b/hw/display/tc6393xb.c @@ -540,7 +540,7 @@ TC6393xbState *tc6393xb_init(MemoryRegion *sysmem, uint32_t base, qemu_irq irq) }, }; - s = (TC6393xbState *) g_malloc0(sizeof(TC6393xbState)); + s = g_new0(TC6393xbState, 1); s->irq = irq; s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS); diff --git a/hw/display/tcx.c b/hw/display/tcx.c index d4d09d0df8..1b27b64f6d 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "ui/console.h" diff --git a/hw/display/trace-events b/hw/display/trace-events index f03f6655bc..0c0ffcbe42 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -21,6 +21,10 @@ vmware_palette_write(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_scratch_read(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_scratch_write(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp" +vmware_verify_rect_less_than_zero(const char *name, const char *param, int x) "%s: %s was < 0 (%d)" +vmware_verify_rect_greater_than_bound(const char *name, const char *param, int bound, int x) "%s: %s was > %d (%d)" +vmware_verify_rect_surface_bound_exceeded(const char *name, const char *component, int bound, const char *param1, int value1, const char *param2, int value2) "%s: %s > %d (%s: %d, %s: %d)" +vmware_update_rect_delayed_flush(void) "display update FIFO full - forcing flush" # virtio-gpu-base.c virtio_gpu_features(bool virgl) "virgl %d" @@ -140,10 +144,10 @@ ati_mm_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "% ati_mm_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 " %s <- 0x%"PRIx64 # artist.c -artist_reg_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s -> 0x%"PRIx64 -artist_reg_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s <- 0x%"PRIx64 -artist_vram_read(unsigned int size, uint64_t addr, int posx, int posy, uint64_t val) "%u 0x%"PRIx64 " %ux%u-> 0x%"PRIx64 -artist_vram_write(unsigned int size, uint64_t addr, uint64_t val) "%u 0x%"PRIx64 " <- 0x%"PRIx64 +artist_reg_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s -> 0x%08"PRIx64 +artist_reg_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s <- 0x%08"PRIx64 +artist_vram_read(unsigned int size, uint64_t addr, uint64_t val) "%u 0x%08"PRIx64 " -> 0x%08"PRIx64 +artist_vram_write(unsigned int size, uint64_t addr, uint64_t val) "%u 0x%08"PRIx64 " <- 0x%08"PRIx64 artist_fill_window(unsigned int start_x, unsigned int start_y, unsigned int width, unsigned int height, uint32_t op, uint32_t ctlpln) "start=%ux%u length=%ux%u op=0x%08x ctlpln=0x%08x" artist_block_move(unsigned int start_x, unsigned int start_y, unsigned int dest_x, unsigned int dest_y, unsigned int width, unsigned int height) "source %ux%u -> dest %ux%u size %ux%u" artist_draw_line(unsigned int start_x, unsigned int start_y, unsigned int end_x, unsigned int end_y) "%ux%u %ux%u" @@ -167,3 +171,10 @@ sm501_disp_ctrl_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_disp_ctrl_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_2d_engine_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_2d_engine_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" + +# macfb.c +macfb_ctrl_read(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_ctrl_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_sense_read(uint32_t value) "video sense: 0x%"PRIx32 +macfb_sense_write(uint32_t value) "video sense: 0x%"PRIx32 +macfb_update_mode(uint32_t width, uint32_t height, uint8_t depth) "setting mode to width %"PRId32 " height %"PRId32 " size %d" diff --git a/hw/display/vga-isa-mm.c b/hw/display/vga-isa-mm.c deleted file mode 100644 index 7321b7a06d..0000000000 --- a/hw/display/vga-isa-mm.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * QEMU ISA MM VGA Emulator. - * - * Copyright (c) 2003 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/bitops.h" -#include "qemu/units.h" -#include "migration/vmstate.h" -#include "hw/display/vga.h" -#include "vga_int.h" -#include "ui/pixel_ops.h" - -#define VGA_RAM_SIZE (8 * MiB) - -typedef struct ISAVGAMMState { - VGACommonState vga; - int it_shift; -} ISAVGAMMState; - -/* Memory mapped interface */ -static uint64_t vga_mm_read(void *opaque, hwaddr addr, unsigned size) -{ - ISAVGAMMState *s = opaque; - - return vga_ioport_read(&s->vga, addr >> s->it_shift) & - MAKE_64BIT_MASK(0, size * 8); -} - -static void vga_mm_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - ISAVGAMMState *s = opaque; - - vga_ioport_write(&s->vga, addr >> s->it_shift, - value & MAKE_64BIT_MASK(0, size * 8)); -} - -static const MemoryRegionOps vga_mm_ctrl_ops = { - .read = vga_mm_read, - .write = vga_mm_write, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .impl.min_access_size = 1, - .impl.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void vga_mm_init(ISAVGAMMState *s, hwaddr vram_base, - hwaddr ctrl_base, int it_shift, - MemoryRegion *address_space) -{ - MemoryRegion *s_ioport_ctrl, *vga_io_memory; - - s->it_shift = it_shift; - s_ioport_ctrl = g_malloc(sizeof(*s_ioport_ctrl)); - memory_region_init_io(s_ioport_ctrl, NULL, &vga_mm_ctrl_ops, s, - "vga-mm-ctrl", 0x100000); - memory_region_set_flush_coalesced(s_ioport_ctrl); - - vga_io_memory = g_malloc(sizeof(*vga_io_memory)); - /* XXX: endianness? */ - memory_region_init_io(vga_io_memory, NULL, &vga_mem_ops, &s->vga, - "vga-mem", 0x20000); - - vmstate_register(NULL, 0, &vmstate_vga_common, s); - - memory_region_add_subregion(address_space, ctrl_base, s_ioport_ctrl); - s->vga.bank_offset = 0; - memory_region_add_subregion(address_space, - vram_base + 0x000a0000, vga_io_memory); - memory_region_set_coalescing(vga_io_memory); -} - -int isa_vga_mm_init(hwaddr vram_base, - hwaddr ctrl_base, int it_shift, - MemoryRegion *address_space) -{ - ISAVGAMMState *s; - - s = g_malloc0(sizeof(*s)); - - s->vga.vram_size_mb = VGA_RAM_SIZE / MiB; - s->vga.global_vmstate = true; - vga_common_init(&s->vga, NULL); - vga_mm_init(s, vram_base, ctrl_base, it_shift, address_space); - - s->vga.con = graphic_console_init(NULL, 0, s->vga.hw_ops, s); - - memory_region_add_subregion(address_space, - VBE_DISPI_LFB_PHYSICAL_ADDRESS, - &s->vga.vram); - - return 0; -} diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c index 90851e730b..2a5437d803 100644 --- a/hw/display/vga-isa.c +++ b/hw/display/vga-isa.c @@ -32,6 +32,7 @@ #include "qemu/timer.h" #include "hw/loader.h" #include "hw/qdev-properties.h" +#include "ui/console.h" #include "qom/object.h" #define TYPE_ISA_VGA "isa-vga" @@ -62,7 +63,10 @@ static void vga_isa_realizefn(DeviceState *dev, Error **errp) const MemoryRegionPortio *vga_ports, *vbe_ports; s->global_vmstate = true; - vga_common_init(s, OBJECT(dev)); + if (!vga_common_init(s, OBJECT(dev), errp)) { + return; + } + s->legacy_address_space = isa_address_space(isadev); vga_io_memory = vga_init_io(s, OBJECT(dev), &vga_ports, &vbe_ports); isa_register_portio_list(isadev, &d->portio_vga, diff --git a/hw/display/vga-mmio.c b/hw/display/vga-mmio.c new file mode 100644 index 0000000000..cd2c46776d --- /dev/null +++ b/hw/display/vga-mmio.c @@ -0,0 +1,143 @@ +/* + * QEMU MMIO VGA Emulator. + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/display/vga.h" +#include "hw/qdev-properties.h" +#include "ui/console.h" +#include "vga_int.h" + +/* + * QEMU interface: + * + sysbus MMIO region 0: VGA I/O registers + * + sysbus MMIO region 1: VGA MMIO registers + * + sysbus MMIO region 2: VGA memory + */ + +OBJECT_DECLARE_SIMPLE_TYPE(VGAMmioState, VGA_MMIO) + +struct VGAMmioState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + VGACommonState vga; + MemoryRegion iomem; + MemoryRegion lowmem; + + uint8_t it_shift; +}; + +static uint64_t vga_mm_read(void *opaque, hwaddr addr, unsigned size) +{ + VGAMmioState *s = opaque; + + return vga_ioport_read(&s->vga, addr >> s->it_shift) & + MAKE_64BIT_MASK(0, size * 8); +} + +static void vga_mm_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + VGAMmioState *s = opaque; + + vga_ioport_write(&s->vga, addr >> s->it_shift, + value & MAKE_64BIT_MASK(0, size * 8)); +} + +static const MemoryRegionOps vga_mm_ctrl_ops = { + .read = vga_mm_read, + .write = vga_mm_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void vga_mmio_reset(DeviceState *dev) +{ + VGAMmioState *s = VGA_MMIO(dev); + + vga_common_reset(&s->vga); +} + +static void vga_mmio_realizefn(DeviceState *dev, Error **errp) +{ + VGAMmioState *s = VGA_MMIO(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(dev), &vga_mm_ctrl_ops, s, + "vga-mmio", 0x100000); + memory_region_set_flush_coalesced(&s->iomem); + sysbus_init_mmio(sbd, &s->iomem); + + /* XXX: endianness? */ + memory_region_init_io(&s->lowmem, OBJECT(dev), &vga_mem_ops, &s->vga, + "vga-lowmem", 0x20000); + memory_region_set_coalescing(&s->lowmem); + sysbus_init_mmio(sbd, &s->lowmem); + + s->vga.bank_offset = 0; + s->vga.global_vmstate = true; + if (!vga_common_init(&s->vga, OBJECT(dev), errp)) { + return; + } + + sysbus_init_mmio(sbd, &s->vga.vram); + s->vga.con = graphic_console_init(dev, 0, s->vga.hw_ops, &s->vga); +} + +static Property vga_mmio_properties[] = { + DEFINE_PROP_UINT8("it_shift", VGAMmioState, it_shift, 0), + DEFINE_PROP_UINT32("vgamem_mb", VGAMmioState, vga.vram_size_mb, 8), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vga_mmio_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = vga_mmio_realizefn; + dc->reset = vga_mmio_reset; + dc->vmsd = &vmstate_vga_common; + device_class_set_props(dc, vga_mmio_properties); + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo vga_mmio_info = { + .name = TYPE_VGA_MMIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VGAMmioState), + .class_init = vga_mmio_class_initfn, +}; + +static void vga_mmio_register_types(void) +{ + type_register_static(&vga_mmio_info); +} + +type_init(vga_mmio_register_types) diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c index 62fb5c38c1..df23dbf3a0 100644 --- a/hw/display/vga-pci.c +++ b/hw/display/vga-pci.c @@ -30,11 +30,13 @@ #include "migration/vmstate.h" #include "vga_int.h" #include "ui/pixel_ops.h" +#include "ui/console.h" #include "qemu/module.h" #include "qemu/timer.h" #include "hw/loader.h" #include "hw/display/edid.h" #include "qom/object.h" +#include "hw/acpi/acpi_aml_interface.h" enum vga_pci_flags { PCI_VGA_FLAG_ENABLE_MMIO = 1, @@ -239,7 +241,9 @@ static void pci_std_vga_realize(PCIDevice *dev, Error **errp) bool edid = false; /* vga + console init */ - vga_common_init(s, OBJECT(dev)); + if (!vga_common_init(s, OBJECT(dev), errp)) { + return; + } vga_init(s, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev), true); @@ -275,7 +279,9 @@ static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp) bool edid = false; /* vga + console init */ - vga_common_init(s, OBJECT(dev)); + if (!vga_common_init(s, OBJECT(dev), errp)) { + return; + } s->con = graphic_console_init(DEVICE(dev), 0, s->hw_ops, s); /* mmio bar */ @@ -350,11 +356,13 @@ static void vga_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); k->vendor_id = PCI_VENDOR_ID_QEMU; k->device_id = PCI_DEVICE_ID_QEMU_VGA; dc->vmsd = &vmstate_vga_pci; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + adevc->build_dev_aml = build_vga_aml; } static const TypeInfo vga_pci_type_info = { @@ -365,6 +373,7 @@ static const TypeInfo vga_pci_type_info = { .class_init = vga_pci_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { TYPE_ACPI_DEV_AML_IF }, { }, }, }; diff --git a/hw/display/vga.c b/hw/display/vga.c index 9d1f66af40..0cb26a791b 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -31,6 +31,7 @@ #include "vga_int.h" #include "vga_regs.h" #include "ui/pixel_ops.h" +#include "ui/console.h" #include "qemu/timer.h" #include "hw/xen/xen.h" #include "migration/vmstate.h" @@ -94,19 +95,19 @@ const uint8_t gr_mask[16] = { (((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \ (((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )) -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define PAT(x) cbswap_32(x) #else #define PAT(x) (x) #endif -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define BIG 1 #else #define BIG 0 #endif -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define GET_PLANE(data, p) (((data) >> (24 - (p) * 8)) & 0xff) #else #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff) @@ -133,7 +134,7 @@ static const uint32_t mask16[16] = { #undef PAT -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define PAT(x) (x) #else #define PAT(x) cbswap_32(x) @@ -1296,7 +1297,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) if (cx > cx_max) cx_max = cx; *ch_attr_ptr = ch_attr; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN ch = ch_attr >> 8; cattr = ch_attr & 0xff; #else @@ -1477,7 +1478,7 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) vga_draw_line_func *vga_draw_line = NULL; bool share_surface, force_shadow = false; pixman_format_code_t format; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN bool byteswap = !s->big_endian_fb; #else bool byteswap = s->big_endian_fb; @@ -1514,9 +1515,10 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) force_shadow = true; } + /* bits 5-6: 0 = 16-color mode, 1 = 4-color mode, 2 = 256-color mode. */ shift_control = (s->gr[VGA_GFX_MODE] >> 5) & 3; double_scan = (s->cr[VGA_CRTC_MAX_SCAN] >> 7); - if (shift_control != 1) { + if (s->cr[VGA_CRTC_MODE] & 1) { multi_scan = (((s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1) << double_scan) - 1; } else { @@ -2168,9 +2170,10 @@ static inline uint32_t uint_clamp(uint32_t val, uint32_t vmin, uint32_t vmax) return val; } -void vga_common_init(VGACommonState *s, Object *obj) +bool vga_common_init(VGACommonState *s, Object *obj, Error **errp) { int i, j, v, b; + Error *local_err = NULL; for(i = 0;i < 256; i++) { v = 0; @@ -2205,8 +2208,18 @@ void vga_common_init(VGACommonState *s, Object *obj) s->vbe_size_mask = s->vbe_size - 1; s->is_vbe_vmstate = 1; + + if (s->global_vmstate && qemu_ram_block_by_name("vga.vram")) { + error_setg(errp, "Only one global VGA device can be used at a time"); + return false; + } + memory_region_init_ram_nomigrate(&s->vram, obj, "vga.vram", s->vram_size, - &error_fatal); + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return false; + } vmstate_register_ram(&s->vram, s->global_vmstate ? NULL : DEVICE(obj)); xen_register_framebuffer(&s->vram); s->vram_ptr = memory_region_get_ram_ptr(&s->vram); @@ -2231,12 +2244,14 @@ void vga_common_init(VGACommonState *s, Object *obj) * into a device attribute set by the machine/platform to remove * all target endian dependencies from this file. */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN s->default_endian_fb = true; #else s->default_endian_fb = false; #endif vga_dirty_log_start(s); + + return true; } static const MemoryRegionPortio vga_portio_list[] = { diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h index 847e784ca6..7cf0d11201 100644 --- a/hw/display/vga_int.h +++ b/hw/display/vga_int.h @@ -27,9 +27,9 @@ #include "exec/ioport.h" #include "exec/memory.h" -#include "ui/console.h" #include "hw/display/bochs-vbe.h" +#include "hw/acpi/acpi_aml_interface.h" #define ST01_V_RETRACE 0x08 #define ST01_DISP_ENABLE 0x01 @@ -156,7 +156,7 @@ static inline int c6_to_8(int v) return (v << 2) | (b << 1) | b; } -void vga_common_init(VGACommonState *s, Object *obj); +bool vga_common_init(VGACommonState *s, Object *obj, Error **errp); void vga_init(VGACommonState *s, Object *obj, MemoryRegion *address_space, MemoryRegion *address_space_io, bool init_vga_ports); MemoryRegion *vga_init_io(VGACommonState *s, Object *obj, @@ -195,4 +195,5 @@ void pci_std_vga_mmio_region_init(VGACommonState *s, MemoryRegion *subs, bool qext, bool edid); +void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope); #endif diff --git a/hw/display/vga_regs.h b/hw/display/vga_regs.h index 30a98b8736..7fdba34b9b 100644 --- a/hw/display/vga_regs.h +++ b/hw/display/vga_regs.h @@ -4,9 +4,9 @@ * Copyright 1999 Jeff Garzik * * Copyright history from vga16fb.c: - * Copyright 1999 Ben Pfaff and Petr Vandrovec - * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm - * Based on VESA framebuffer (c) 1998 Gerd Knorr + * Copyright 1999 Ben Pfaff and Petr Vandrovec + * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm + * Based on VESA framebuffer (c) 1998 Gerd Knorr * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this diff --git a/hw/display/vhost-user-gpu-pci.c b/hw/display/vhost-user-gpu-pci.c index daefcf7101..d119bcae45 100644 --- a/hw/display/vhost-user-gpu-pci.c +++ b/hw/display/vhost-user-gpu-pci.c @@ -44,6 +44,7 @@ static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = { .instance_init = vhost_user_gpu_pci_initfn, }; module_obj(TYPE_VHOST_USER_GPU_PCI); +module_kconfig(VHOST_USER_GPU); static void vhost_user_gpu_pci_register_types(void) { diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c index 49df56cd14..19c0e20103 100644 --- a/hw/display/vhost-user-gpu.c +++ b/hw/display/vhost-user-gpu.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "qemu/sockets.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-gpu.h" #include "chardev/char-fe.h" @@ -254,8 +255,8 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) vhost_user_gpu_unblock(g); break; } - dpy_gl_update(con, m->x, m->y, m->width, m->height); g->backend_blocked = true; + dpy_gl_update(con, m->x, m->y, m->width, m->height); break; } case VHOST_USER_GPU_UPDATE: { @@ -375,7 +376,7 @@ vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp) Chardev *chr; int sv[2]; - if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { error_setg_errno(errp, errno, "socketpair() failed"); return false; } @@ -565,6 +566,12 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) g->vhost_gpu_fd = -1; } +static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + return &g->vhost->dev; +} + static Property vhost_user_gpu_properties[] = { VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf), DEFINE_PROP_END_OF_LIST(), @@ -586,6 +593,7 @@ vhost_user_gpu_class_init(ObjectClass *klass, void *data) vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending; vdc->get_config = vhost_user_gpu_get_config; vdc->set_config = vhost_user_gpu_set_config; + vdc->get_vhost = vhost_user_gpu_get_vhost; device_class_set_props(dc, vhost_user_gpu_properties); } @@ -599,6 +607,7 @@ static const TypeInfo vhost_user_gpu_info = { .class_init = vhost_user_gpu_class_init, }; module_obj(TYPE_VHOST_USER_GPU); +module_kconfig(VHOST_USER_GPU); static void vhost_user_gpu_register_types(void) { diff --git a/hw/display/vhost-user-vga.c b/hw/display/vhost-user-vga.c index 072c9c65bc..0c146080fd 100644 --- a/hw/display/vhost-user-vga.c +++ b/hw/display/vhost-user-vga.c @@ -45,6 +45,7 @@ static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = { .instance_init = vhost_user_vga_inst_initfn, }; module_obj(TYPE_VHOST_USER_VGA); +module_kconfig(VHOST_USER_VGA); static void vhost_user_vga_register_types(void) { diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c index c8da4806e0..a29f191aa8 100644 --- a/hw/display/virtio-gpu-base.c +++ b/hw/display/virtio-gpu-base.c @@ -69,16 +69,17 @@ static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type) virtio_notify_config(&g->parent_obj); } -static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) { VirtIOGPUBase *g = opaque; if (idx >= g->conf.max_outputs) { - return -1; + return; } g->req_state[idx].x = info->xoff; g->req_state[idx].y = info->yoff; + g->req_state[idx].refresh_rate = info->refresh_rate; g->req_state[idx].width = info->width; g->req_state[idx].height = info->height; g->req_state[idx].width_mm = info->width_mm; @@ -92,7 +93,7 @@ static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) /* send event to guest */ virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); - return 0; + return; } static void @@ -117,6 +118,10 @@ virtio_gpu_gl_block(void *opaque, bool block) g->renderer_blocked--; } assert(g->renderer_blocked >= 0); + + if (!block && g->renderer_blocked == 0) { + virtio_gpu_gl_flushed(g); + } } static int @@ -143,7 +148,6 @@ static const GraphicHwOps virtio_gpu_ops = { .text_update = virtio_gpu_text_update, .ui_info = virtio_gpu_ui_info, .gl_block = virtio_gpu_gl_block, - .gl_flushed = virtio_gpu_gl_flushed, }; bool @@ -170,7 +174,7 @@ virtio_gpu_base_device_realize(DeviceState *qdev, } g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); - virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, + virtio_init(VIRTIO_DEVICE(g), VIRTIO_ID_GPU, sizeof(struct virtio_gpu_config)); if (virtio_gpu_virgl_enabled(g->conf)) { @@ -257,6 +261,7 @@ static const TypeInfo virtio_gpu_base_info = { .abstract = true }; module_obj(TYPE_VIRTIO_GPU_BASE); +module_kconfig(VIRTIO_GPU); static void virtio_register_types(void) diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index 6cc4313b1a..e06be60dfb 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -108,7 +108,7 @@ static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) { VirtIOGPU *g = VIRTIO_GPU(qdev); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN error_setg(errp, "virgl is not supported on bigendian platforms"); return; #endif @@ -160,6 +160,7 @@ static const TypeInfo virtio_gpu_gl_info = { .class_init = virtio_gpu_gl_class_init, }; module_obj(TYPE_VIRTIO_GPU_GL); +module_kconfig(VIRTIO_GPU); static void virtio_register_types(void) { diff --git a/hw/display/virtio-gpu-pci-gl.c b/hw/display/virtio-gpu-pci-gl.c index 99b14a0718..a2819e1ca9 100644 --- a/hw/display/virtio-gpu-pci-gl.c +++ b/hw/display/virtio-gpu-pci-gl.c @@ -47,6 +47,7 @@ static const VirtioPCIDeviceTypeInfo virtio_gpu_gl_pci_info = { .instance_init = virtio_gpu_gl_initfn, }; module_obj(TYPE_VIRTIO_GPU_GL_PCI); +module_kconfig(VIRTIO_PCI); static void virtio_gpu_gl_pci_register_types(void) { diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c index e36eee0c40..93f214ff58 100644 --- a/hw/display/virtio-gpu-pci.c +++ b/hw/display/virtio-gpu-pci.c @@ -65,6 +65,7 @@ static const TypeInfo virtio_gpu_pci_base_info = { .abstract = true }; module_obj(TYPE_VIRTIO_GPU_PCI_BASE); +module_kconfig(VIRTIO_PCI); #define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci" typedef struct VirtIOGPUPCI VirtIOGPUPCI; diff --git a/stubs/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf-stubs.c similarity index 81% rename from stubs/virtio-gpu-udmabuf.c rename to hw/display/virtio-gpu-udmabuf-stubs.c index 81f661441a..f692e13510 100644 --- a/stubs/virtio-gpu-udmabuf.c +++ b/hw/display/virtio-gpu-udmabuf-stubs.c @@ -20,7 +20,8 @@ void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res) int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { /* nothing (stub) */ return 0; diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c index 3c01a415e7..8bdf4bac6e 100644 --- a/hw/display/virtio-gpu-udmabuf.c +++ b/hw/display/virtio-gpu-udmabuf.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu-common.h" #include "qemu/iov.h" #include "ui/console.h" #include "hw/virtio/virtio-gpu.h" @@ -171,7 +170,8 @@ static VGPUDMABuf *virtio_gpu_create_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { VGPUDMABuf *dmabuf; @@ -183,9 +183,14 @@ static VGPUDMABuf dmabuf->buf.width = fb->width; dmabuf->buf.height = fb->height; dmabuf->buf.stride = fb->stride; + dmabuf->buf.x = r->x; + dmabuf->buf.y = r->y; + dmabuf->buf.scanout_width = r->width; + dmabuf->buf.scanout_height = r->height; dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format); dmabuf->buf.fd = res->dmabuf_fd; - + dmabuf->buf.allow_fences = true; + dmabuf->buf.draw_submitted = false; dmabuf->scanout_id = scanout_id; QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next); @@ -195,24 +200,25 @@ static VGPUDMABuf int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; VGPUDMABuf *new_primary, *old_primary = NULL; - new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb); + new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r); if (!new_primary) { return -EINVAL; } - if (g->dmabuf.primary) { - old_primary = g->dmabuf.primary; + if (g->dmabuf.primary[scanout_id]) { + old_primary = g->dmabuf.primary[scanout_id]; } - g->dmabuf.primary = new_primary; + g->dmabuf.primary[scanout_id] = new_primary; qemu_console_resize(scanout->con, - new_primary->buf.width, - new_primary->buf.height); + new_primary->buf.scanout_width, + new_primary->buf.scanout_height); dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf); if (old_primary) { diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 18d054922f..73cb92c8d5 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -175,7 +175,7 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, virgl_renderer_force_ctx_0(); dpy_gl_scanout_texture( g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, - info.flags & 1 /* FIXME: Y_0_TOP */, + info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP, info.width, info.height, ss.r.x, ss.r.y, ss.r.width, ss.r.height); } else { @@ -609,6 +609,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); if (ret != 0) { + error_report("virgl could not be initialized: %d", ret); return ret; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 990e71fd40..4e2e0dd53a 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -217,6 +217,7 @@ virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, .height_mm = b->req_state[scanout].height_mm, .prefx = b->req_state[scanout].width, .prefy = b->req_state[scanout].height, + .refresh_rate = b->req_state[scanout].refresh_rate, }; edid->size = cpu_to_le32(sizeof(edid->edid)); @@ -362,7 +363,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g, ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), cmd, &res->addrs, &res->iov, &res->iov_cnt); - if (ret != 0 || res->iov) { + if (ret != 0) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; g_free(res); return; @@ -497,6 +498,8 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, struct virtio_gpu_resource_flush rf; struct virtio_gpu_scanout *scanout; pixman_region16_t flush_region; + bool within_bounds = false; + bool update_submitted = false; int i; VIRTIO_GPU_FILL_CMD(rf); @@ -514,12 +517,31 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { scanout = &g->parent_obj.scanout[i]; if (scanout->resource_id == res->resource_id && - console_has_gl(scanout->con)) { - dpy_gl_update(scanout->con, 0, 0, scanout->width, - scanout->height); - return; + rf.r.x < scanout->x + scanout->width && + rf.r.x + rf.r.width >= scanout->x && + rf.r.y < scanout->y + scanout->height && + rf.r.y + rf.r.height >= scanout->y) { + within_bounds = true; + + if (console_has_gl(scanout->con)) { + dpy_gl_update(scanout->con, 0, 0, scanout->width, + scanout->height); + update_submitted = true; + } } } + + if (update_submitted) { + return; + } + if (!within_bounds) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" + " bounds for flush %d: %d %d %d %d\n", + __func__, rf.resource_id, rf.r.x, rf.r.y, + rf.r.width, rf.r.height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } } if (!res->blob && @@ -627,7 +649,7 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, if (res->blob) { if (console_has_gl(scanout->con)) { - if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb)) { + if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { virtio_gpu_update_scanout(g, scanout_id, res, r); return; } @@ -814,8 +836,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g, do { len = l; - map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, - a, &len, DMA_DIRECTION_TO_DEVICE); + map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!map) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" " element %d\n", __func__, e); @@ -830,9 +853,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g, } if (!(v % 16)) { - *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); + *iov = g_renew(struct iovec, *iov, v + 16); if (addr) { - *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); + *addr = g_renew(uint64_t, *addr, v + 16); } } (*iov)[v].iov_base = map; @@ -985,8 +1008,10 @@ void virtio_gpu_simple_process_cmd(VirtIOGPU *g, break; } if (!cmd->finished) { - virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : - VIRTIO_GPU_RESP_OK_NODATA); + if (!g->parent_obj.renderer_blocked) { + virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : + VIRTIO_GPU_RESP_OK_NODATA); + } } } @@ -1042,6 +1067,30 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) g->processing_cmdq = false; } +static void virtio_gpu_process_fenceq(VirtIOGPU *g) +{ + struct virtio_gpu_ctrl_command *cmd, *tmp; + + QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { + trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g_free(cmd); + g->inflight--; + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + } + } +} + +static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) +{ + VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); + + virtio_gpu_process_fenceq(g); + virtio_gpu_process_cmdq(g); +} + static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOGPU *g = VIRTIO_GPU(vdev); @@ -1226,8 +1275,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, for (i = 0; i < res->iov_cnt; i++) { hwaddr len = res->iov[i].iov_len; res->iov[i].iov_base = - dma_memory_map(VIRTIO_DEVICE(g)->dma_as, - res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); + dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { /* Clean up the half-a-mapping we just created... */ @@ -1400,10 +1450,12 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); + VirtIOGPUBaseClass *vgbc = &vgc->parent; vgc->handle_ctrl = virtio_gpu_handle_ctrl; vgc->process_cmd = virtio_gpu_simple_process_cmd; vgc->update_cursor_data = virtio_gpu_update_cursor_data; + vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; vdc->realize = virtio_gpu_device_realize; vdc->reset = virtio_gpu_reset; @@ -1422,6 +1474,7 @@ static const TypeInfo virtio_gpu_info = { .class_init = virtio_gpu_class_init, }; module_obj(TYPE_VIRTIO_GPU); +module_kconfig(VIRTIO_GPU); static void virtio_register_types(void) { diff --git a/hw/display/virtio-vga-gl.c b/hw/display/virtio-vga-gl.c index f22549097c..984faa6b39 100644 --- a/hw/display/virtio-vga-gl.c +++ b/hw/display/virtio-vga-gl.c @@ -37,6 +37,7 @@ static VirtioPCIDeviceTypeInfo virtio_vga_gl_info = { .instance_init = virtio_vga_gl_inst_initfn, }; module_obj(TYPE_VIRTIO_VGA_GL); +module_kconfig(VIRTIO_VGA); static void virtio_vga_register_types(void) { diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index 9e57f61e9e..4dcb34c4a7 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -47,15 +47,14 @@ static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata) } } -static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +static void virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) { VirtIOVGABase *vvga = opaque; VirtIOGPUBase *g = vvga->vgpu; if (g->hw_ops->ui_info) { - return g->hw_ops->ui_info(g, idx, info); + g->hw_ops->ui_info(g, idx, info); } - return -1; } static void virtio_vga_base_gl_block(void *opaque, bool block) @@ -68,16 +67,6 @@ static void virtio_vga_base_gl_block(void *opaque, bool block) } } -static void virtio_vga_base_gl_flushed(void *opaque) -{ - VirtIOVGABase *vvga = opaque; - VirtIOGPUBase *g = vvga->vgpu; - - if (g->hw_ops->gl_flushed) { - g->hw_ops->gl_flushed(g); - } -} - static int virtio_vga_base_get_flags(void *opaque) { VirtIOVGABase *vvga = opaque; @@ -93,7 +82,6 @@ static const GraphicHwOps virtio_vga_base_ops = { .text_update = virtio_vga_base_text_update, .ui_info = virtio_vga_base_ui_info, .gl_block = virtio_vga_base_gl_block, - .gl_flushed = virtio_vga_base_gl_flushed, }; static const VMStateDescription vmstate_virtio_vga_base = { @@ -119,7 +107,9 @@ static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) /* init vga compat bits */ vga->vram_size_mb = 8; - vga_common_init(vga, OBJECT(vpci_dev)); + if (!vga_common_init(vga, OBJECT(vpci_dev), errp)) { + return; + } vga_init(vga, OBJECT(vpci_dev), pci_address_space(&vpci_dev->pci_dev), pci_address_space_io(&vpci_dev->pci_dev), true); pci_register_bar(&vpci_dev->pci_dev, 0, @@ -231,7 +221,7 @@ static void virtio_vga_base_class_init(ObjectClass *klass, void *data) virtio_vga_set_big_endian_fb); } -static TypeInfo virtio_vga_base_info = { +static const TypeInfo virtio_vga_base_info = { .name = TYPE_VIRTIO_VGA_BASE, .parent = TYPE_VIRTIO_PCI, .instance_size = sizeof(VirtIOVGABase), @@ -240,6 +230,7 @@ static TypeInfo virtio_vga_base_info = { .abstract = true, }; module_obj(TYPE_VIRTIO_VGA_BASE); +module_kconfig(VIRTIO_VGA); #define TYPE_VIRTIO_VGA "virtio-vga" diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index e2969a6c81..53949d2539 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -33,6 +33,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "ui/console.h" #undef VERBOSE #define HW_RECT_ACCEL @@ -80,7 +81,7 @@ struct vmsvga_state_s { struct vmsvga_rect_s { int x, y, w, h; } redraw_fifo[REDRAW_FIFO_LEN]; - int redraw_fifo_first, redraw_fifo_last; + int redraw_fifo_last; }; #define TYPE_VMWARE_SVGA "vmware-svga" @@ -297,46 +298,52 @@ static inline bool vmsvga_verify_rect(DisplaySurface *surface, int x, int y, int w, int h) { if (x < 0) { - fprintf(stderr, "%s: x was < 0 (%d)\n", name, x); + trace_vmware_verify_rect_less_than_zero(name, "x", x); return false; } if (x > SVGA_MAX_WIDTH) { - fprintf(stderr, "%s: x was > %d (%d)\n", name, SVGA_MAX_WIDTH, x); + trace_vmware_verify_rect_greater_than_bound(name, "x", SVGA_MAX_WIDTH, + x); return false; } if (w < 0) { - fprintf(stderr, "%s: w was < 0 (%d)\n", name, w); + trace_vmware_verify_rect_less_than_zero(name, "w", w); return false; } if (w > SVGA_MAX_WIDTH) { - fprintf(stderr, "%s: w was > %d (%d)\n", name, SVGA_MAX_WIDTH, w); + trace_vmware_verify_rect_greater_than_bound(name, "w", SVGA_MAX_WIDTH, + w); return false; } if (x + w > surface_width(surface)) { - fprintf(stderr, "%s: width was > %d (x: %d, w: %d)\n", - name, surface_width(surface), x, w); + trace_vmware_verify_rect_surface_bound_exceeded(name, "width", + surface_width(surface), + "x", x, "w", w); return false; } if (y < 0) { - fprintf(stderr, "%s: y was < 0 (%d)\n", name, y); + trace_vmware_verify_rect_less_than_zero(name, "y", y); return false; } if (y > SVGA_MAX_HEIGHT) { - fprintf(stderr, "%s: y was > %d (%d)\n", name, SVGA_MAX_HEIGHT, y); + trace_vmware_verify_rect_greater_than_bound(name, "y", SVGA_MAX_HEIGHT, + y); return false; } if (h < 0) { - fprintf(stderr, "%s: h was < 0 (%d)\n", name, h); + trace_vmware_verify_rect_less_than_zero(name, "h", h); return false; } if (h > SVGA_MAX_HEIGHT) { - fprintf(stderr, "%s: h was > %d (%d)\n", name, SVGA_MAX_HEIGHT, h); + trace_vmware_verify_rect_greater_than_bound(name, "y", SVGA_MAX_HEIGHT, + y); return false; } if (y + h > surface_height(surface)) { - fprintf(stderr, "%s: update height > %d (y: %d, h: %d)\n", - name, surface_height(surface), y, h); + trace_vmware_verify_rect_surface_bound_exceeded(name, "height", + surface_height(surface), + "y", y, "h", h); return false; } @@ -374,33 +381,39 @@ static inline void vmsvga_update_rect(struct vmsvga_state_s *s, dpy_gfx_update(s->vga.con, x, y, w, h); } -static inline void vmsvga_update_rect_delayed(struct vmsvga_state_s *s, - int x, int y, int w, int h) -{ - struct vmsvga_rect_s *rect = &s->redraw_fifo[s->redraw_fifo_last++]; - - s->redraw_fifo_last &= REDRAW_FIFO_LEN - 1; - rect->x = x; - rect->y = y; - rect->w = w; - rect->h = h; -} - static inline void vmsvga_update_rect_flush(struct vmsvga_state_s *s) { struct vmsvga_rect_s *rect; if (s->invalidated) { - s->redraw_fifo_first = s->redraw_fifo_last; + s->redraw_fifo_last = 0; return; } /* Overlapping region updates can be optimised out here - if someone * knows a smart algorithm to do that, please share. */ - while (s->redraw_fifo_first != s->redraw_fifo_last) { - rect = &s->redraw_fifo[s->redraw_fifo_first++]; - s->redraw_fifo_first &= REDRAW_FIFO_LEN - 1; + for (int i = 0; i < s->redraw_fifo_last; i++) { + rect = &s->redraw_fifo[i]; vmsvga_update_rect(s, rect->x, rect->y, rect->w, rect->h); } + + s->redraw_fifo_last = 0; +} + +static inline void vmsvga_update_rect_delayed(struct vmsvga_state_s *s, + int x, int y, int w, int h) +{ + + if (s->redraw_fifo_last >= REDRAW_FIFO_LEN) { + trace_vmware_update_rect_delayed_flush(); + vmsvga_update_rect_flush(s); + } + + struct vmsvga_rect_s *rect = &s->redraw_fifo[s->redraw_fifo_last++]; + + rect->x = x; + rect->y = y; + rect->w = w; + rect->h = h; } #ifdef HW_RECT_ACCEL @@ -509,6 +522,8 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s, int i, pixels; qc = cursor_alloc(c->width, c->height); + assert(qc != NULL); + qc->hot_x = c->hot_x; qc->hot_y = c->hot_y; switch (c->bpp) { @@ -1153,7 +1168,6 @@ static void vmsvga_reset(DeviceState *dev) s->config = 0; s->svgaid = SVGA_ID; s->cursor.on = 0; - s->redraw_fifo_first = 0; s->redraw_fifo_last = 0; s->syncing = 0; @@ -1248,7 +1262,7 @@ static void vmsvga_init(DeviceState *dev, struct vmsvga_state_s *s, &error_fatal); s->fifo_ptr = memory_region_get_ram_ptr(&s->fifo_ram); - vga_common_init(&s->vga, OBJECT(dev)); + vga_common_init(&s->vga, OBJECT(dev), &error_fatal); vga_init(&s->vga, OBJECT(dev), address_space, io, true); vmstate_register(NULL, 0, &vmstate_vga_common, &s->vga); s->new_depth = 32; diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c index 838260b6ad..260eb38a76 100644 --- a/hw/display/xenfb.c +++ b/hw/display/xenfb.c @@ -76,7 +76,7 @@ struct XenFB { int do_resize; struct { - int x,y,w,h; + int x,y,w,h; } up_rects[UP_QUEUE]; int up_count; int up_fullscreen; @@ -116,32 +116,32 @@ static void common_unbind(struct common *c) xen_pv_unbind_evtchn(&c->xendev); if (c->page) { xenforeignmemory_unmap(xen_fmem, c->page, 1); - c->page = NULL; + c->page = NULL; } } /* -------------------------------------------------------------------- */ /* Send an event to the keyboard frontend driver */ static int xenfb_kbd_event(struct XenInput *xenfb, - union xenkbd_in_event *event) + union xenkbd_in_event *event) { struct xenkbd_page *page = xenfb->c.page; uint32_t prod; if (xenfb->c.xendev.be_state != XenbusStateConnected) - return 0; + return 0; if (!page) return 0; prod = page->in_prod; if (prod - page->in_cons == XENKBD_IN_RING_LEN) { - errno = EAGAIN; - return -1; + errno = EAGAIN; + return -1; } - xen_mb(); /* ensure ring space available */ + xen_mb(); /* ensure ring space available */ XENKBD_IN_RING_REF(page, prod) = *event; - xen_wmb(); /* ensure ring contents visible */ + xen_wmb(); /* ensure ring contents visible */ page->in_prod = prod + 1; return xen_pv_send_notify(&xenfb->c.xendev); } @@ -161,7 +161,7 @@ static int xenfb_send_key(struct XenInput *xenfb, bool down, int keycode) /* Send a relative mouse movement event */ static int xenfb_send_motion(struct XenInput *xenfb, - int rel_x, int rel_y, int rel_z) + int rel_x, int rel_y, int rel_z) { union xenkbd_in_event event; @@ -176,7 +176,7 @@ static int xenfb_send_motion(struct XenInput *xenfb, /* Send an absolute mouse movement event */ static int xenfb_send_position(struct XenInput *xenfb, - int abs_x, int abs_y, int z) + int abs_x, int abs_y, int z) { union xenkbd_in_event event; @@ -354,7 +354,7 @@ static int input_initialise(struct XenLegacyDevice *xendev) rc = common_bind(&in->c); if (rc != 0) - return rc; + return rc; return 0; } @@ -415,7 +415,7 @@ static void input_event(struct XenLegacyDevice *xendev) /* We don't understand any keyboard events, so just ignore them. */ if (page->out_prod == page->out_cons) - return; + return; page->out_cons = page->out_prod; xen_pv_send_notify(&xenfb->c.xendev); } @@ -429,7 +429,7 @@ static void xenfb_copy_mfns(int mode, int count, xen_pfn_t *dst, void *src) int i; for (i = 0; i < count; i++) - dst[i] = (mode == 32) ? src32[i] : src64[i]; + dst[i] = (mode == 32) ? src32[i] : src64[i]; } static int xenfb_map_fb(struct XenFB *xenfb) @@ -447,43 +447,43 @@ static int xenfb_map_fb(struct XenFB *xenfb) mode = sizeof(unsigned long) * 8; if (!protocol) { - /* - * Undefined protocol, some guesswork needed. - * - * Old frontends which don't set the protocol use - * one page directory only, thus pd[1] must be zero. - * pd[1] of the 32bit struct layout and the lower - * 32 bits of pd[0] of the 64bit struct layout have - * the same location, so we can check that ... - */ - uint32_t *ptr32 = NULL; - uint32_t *ptr64 = NULL; + /* + * Undefined protocol, some guesswork needed. + * + * Old frontends which don't set the protocol use + * one page directory only, thus pd[1] must be zero. + * pd[1] of the 32bit struct layout and the lower + * 32 bits of pd[0] of the 64bit struct layout have + * the same location, so we can check that ... + */ + uint32_t *ptr32 = NULL; + uint32_t *ptr64 = NULL; #if defined(__i386__) - ptr32 = (void*)page->pd; - ptr64 = ((void*)page->pd) + 4; + ptr32 = (void*)page->pd; + ptr64 = ((void*)page->pd) + 4; #elif defined(__x86_64__) - ptr32 = ((void*)page->pd) - 4; - ptr64 = (void*)page->pd; + ptr32 = ((void*)page->pd) - 4; + ptr64 = (void*)page->pd; #endif - if (ptr32) { - if (ptr32[1] == 0) { - mode = 32; - pd = ptr32; - } else { - mode = 64; - pd = ptr64; - } - } + if (ptr32) { + if (ptr32[1] == 0) { + mode = 32; + pd = ptr32; + } else { + mode = 64; + pd = ptr64; + } + } #if defined(__x86_64__) } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { - /* 64bit dom0, 32bit domU */ - mode = 32; - pd = ((void*)page->pd) - 4; + /* 64bit dom0, 32bit domU */ + mode = 32; + pd = ((void*)page->pd) - 4; #elif defined(__i386__) } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { - /* 32bit dom0, 64bit domU */ - mode = 64; - pd = ((void*)page->pd) + 4; + /* 32bit dom0, 64bit domU */ + mode = 64; + pd = ((void*)page->pd) + 4; #endif } @@ -496,21 +496,21 @@ static int xenfb_map_fb(struct XenFB *xenfb) n_fbdirs = xenfb->fbpages * mode / 8; n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE); - pgmfns = g_malloc0(sizeof(xen_pfn_t) * n_fbdirs); - fbmfns = g_malloc0(sizeof(xen_pfn_t) * xenfb->fbpages); + pgmfns = g_new0(xen_pfn_t, n_fbdirs); + fbmfns = g_new0(xen_pfn_t, xenfb->fbpages); xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd); map = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, PROT_READ, n_fbdirs, pgmfns, NULL); if (map == NULL) - goto out; + goto out; xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map); xenforeignmemory_unmap(xen_fmem, map, n_fbdirs); xenfb->pixels = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, PROT_READ, xenfb->fbpages, fbmfns, NULL); if (xenfb->pixels == NULL) - goto out; + goto out; ret = 0; /* all is fine */ @@ -589,35 +589,35 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim, /* A convenient function for munging pixels between different depths */ #define BLT(SRC_T,DST_T,RSB,GSB,BSB,RDB,GDB,BDB) \ - for (line = y ; line < (y+h) ; line++) { \ - SRC_T *src = (SRC_T *)(xenfb->pixels \ - + xenfb->offset \ - + (line * xenfb->row_stride) \ - + (x * xenfb->depth / 8)); \ - DST_T *dst = (DST_T *)(data \ - + (line * linesize) \ - + (x * bpp / 8)); \ - int col; \ - const int RSS = 32 - (RSB + GSB + BSB); \ - const int GSS = 32 - (GSB + BSB); \ - const int BSS = 32 - (BSB); \ - const uint32_t RSM = (~0U) << (32 - RSB); \ - const uint32_t GSM = (~0U) << (32 - GSB); \ - const uint32_t BSM = (~0U) << (32 - BSB); \ - const int RDS = 32 - (RDB + GDB + BDB); \ - const int GDS = 32 - (GDB + BDB); \ - const int BDS = 32 - (BDB); \ - const uint32_t RDM = (~0U) << (32 - RDB); \ - const uint32_t GDM = (~0U) << (32 - GDB); \ - const uint32_t BDM = (~0U) << (32 - BDB); \ - for (col = x ; col < (x+w) ; col++) { \ - uint32_t spix = *src; \ - *dst = (((spix << RSS) & RSM & RDM) >> RDS) | \ - (((spix << GSS) & GSM & GDM) >> GDS) | \ - (((spix << BSS) & BSM & BDM) >> BDS); \ - src = (SRC_T *) ((unsigned long) src + xenfb->depth / 8); \ - dst = (DST_T *) ((unsigned long) dst + bpp / 8); \ - } \ + for (line = y ; line < (y+h) ; line++) { \ + SRC_T *src = (SRC_T *)(xenfb->pixels \ + + xenfb->offset \ + + (line * xenfb->row_stride) \ + + (x * xenfb->depth / 8)); \ + DST_T *dst = (DST_T *)(data \ + + (line * linesize) \ + + (x * bpp / 8)); \ + int col; \ + const int RSS = 32 - (RSB + GSB + BSB); \ + const int GSS = 32 - (GSB + BSB); \ + const int BSS = 32 - (BSB); \ + const uint32_t RSM = (~0U) << (32 - RSB); \ + const uint32_t GSM = (~0U) << (32 - GSB); \ + const uint32_t BSM = (~0U) << (32 - BSB); \ + const int RDS = 32 - (RDB + GDB + BDB); \ + const int GDS = 32 - (GDB + BDB); \ + const int BDS = 32 - (BDB); \ + const uint32_t RDM = (~0U) << (32 - RDB); \ + const uint32_t GDM = (~0U) << (32 - GDB); \ + const uint32_t BDM = (~0U) << (32 - BDB); \ + for (col = x ; col < (x+w) ; col++) { \ + uint32_t spix = *src; \ + *dst = (((spix << RSS) & RSM & RDM) >> RDS) | \ + (((spix << GSS) & GSM & GDM) >> GDS) | \ + (((spix << BSS) & BSM & BDM) >> BDS); \ + src = (SRC_T *) ((unsigned long) src + xenfb->depth / 8); \ + dst = (DST_T *) ((unsigned long) dst + bpp / 8); \ + } \ } @@ -657,7 +657,7 @@ static void xenfb_guest_copy(struct XenFB *xenfb, int x, int y, int w, int h) break; default: oops = 1; - } + } } if (oops) /* should not happen */ xen_pv_printf(&xenfb->c.xendev, 0, "%s: oops: convert %d -> %d bpp?\n", @@ -777,16 +777,24 @@ static void xenfb_update(void *opaque) xenfb->up_fullscreen = 0; } -static void xenfb_update_interval(void *opaque, uint64_t interval) +static void xenfb_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) { struct XenFB *xenfb = opaque; + uint32_t refresh_rate; if (xenfb->feature_update) { #ifdef XENFB_TYPE_REFRESH_PERIOD if (xenfb_queue_full(xenfb)) { return; } - xenfb_send_refresh_period(xenfb, interval); + + refresh_rate = info->refresh_rate; + if (!refresh_rate) { + refresh_rate = 75; + } + + /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ + xenfb_send_refresh_period(xenfb, 1000 * 1000 / refresh_rate); #endif } } @@ -808,60 +816,60 @@ static void xenfb_handle_events(struct XenFB *xenfb) if (prod - out_cons > XENFB_OUT_RING_LEN) { return; } - xen_rmb(); /* ensure we see ring contents up to prod */ + xen_rmb(); /* ensure we see ring contents up to prod */ for (cons = out_cons; cons != prod; cons++) { - union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); + union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); uint8_t type = event->type; - int x, y, w, h; + int x, y, w, h; - switch (type) { - case XENFB_TYPE_UPDATE: - if (xenfb->up_count == UP_QUEUE) - xenfb->up_fullscreen = 1; - if (xenfb->up_fullscreen) - break; - x = MAX(event->update.x, 0); - y = MAX(event->update.y, 0); - w = MIN(event->update.width, xenfb->width - x); - h = MIN(event->update.height, xenfb->height - y); - if (w < 0 || h < 0) { + switch (type) { + case XENFB_TYPE_UPDATE: + if (xenfb->up_count == UP_QUEUE) + xenfb->up_fullscreen = 1; + if (xenfb->up_fullscreen) + break; + x = MAX(event->update.x, 0); + y = MAX(event->update.y, 0); + w = MIN(event->update.width, xenfb->width - x); + h = MIN(event->update.height, xenfb->height - y); + if (w < 0 || h < 0) { xen_pv_printf(&xenfb->c.xendev, 1, "bogus update ignored\n"); - break; - } - if (x != event->update.x || + break; + } + if (x != event->update.x || y != event->update.y || - w != event->update.width || - h != event->update.height) { + w != event->update.width || + h != event->update.height) { xen_pv_printf(&xenfb->c.xendev, 1, "bogus update clipped\n"); - } - if (w == xenfb->width && h > xenfb->height / 2) { - /* scroll detector: updated more than 50% of the lines, - * don't bother keeping track of the rectangles then */ - xenfb->up_fullscreen = 1; - } else { - xenfb->up_rects[xenfb->up_count].x = x; - xenfb->up_rects[xenfb->up_count].y = y; - xenfb->up_rects[xenfb->up_count].w = w; - xenfb->up_rects[xenfb->up_count].h = h; - xenfb->up_count++; - } - break; + } + if (w == xenfb->width && h > xenfb->height / 2) { + /* scroll detector: updated more than 50% of the lines, + * don't bother keeping track of the rectangles then */ + xenfb->up_fullscreen = 1; + } else { + xenfb->up_rects[xenfb->up_count].x = x; + xenfb->up_rects[xenfb->up_count].y = y; + xenfb->up_rects[xenfb->up_count].w = w; + xenfb->up_rects[xenfb->up_count].h = h; + xenfb->up_count++; + } + break; #ifdef XENFB_TYPE_RESIZE - case XENFB_TYPE_RESIZE: - if (xenfb_configure_fb(xenfb, xenfb->fb_len, - event->resize.width, - event->resize.height, - event->resize.depth, - xenfb->fb_len, - event->resize.offset, - event->resize.stride) < 0) - break; - xenfb_invalidate(xenfb); - break; + case XENFB_TYPE_RESIZE: + if (xenfb_configure_fb(xenfb, xenfb->fb_len, + event->resize.width, + event->resize.height, + event->resize.depth, + xenfb->fb_len, + event->resize.offset, + event->resize.stride) < 0) + break; + xenfb_invalidate(xenfb); + break; #endif - } + } } - xen_mb(); /* ensure we're done with ring contents */ + xen_mb(); /* ensure we're done with ring contents */ page->out_cons = cons; } @@ -881,32 +889,32 @@ static int fb_initialise(struct XenLegacyDevice *xendev) int rc; if (xenstore_read_fe_int(xendev, "videoram", &videoram) == -1) - videoram = 0; + videoram = 0; rc = common_bind(&fb->c); if (rc != 0) - return rc; + return rc; fb_page = fb->c.page; rc = xenfb_configure_fb(fb, videoram * MiB, - fb_page->width, fb_page->height, fb_page->depth, - fb_page->mem_length, 0, fb_page->line_length); + fb_page->width, fb_page->height, fb_page->depth, + fb_page->mem_length, 0, fb_page->line_length); if (rc != 0) - return rc; + return rc; rc = xenfb_map_fb(fb); if (rc != 0) - return rc; + return rc; fb->con = graphic_console_init(NULL, 0, &xenfb_ops, fb); if (xenstore_read_fe_int(xendev, "feature-update", &fb->feature_update) == -1) - fb->feature_update = 0; + fb->feature_update = 0; if (fb->feature_update) - xenstore_write_be_int(xendev, "request-update", 1); + xenstore_write_be_int(xendev, "request-update", 1); xen_pv_printf(xendev, 1, "feature-update=%d, videoram=%d\n", - fb->feature_update, videoram); + fb->feature_update, videoram); return 0; } @@ -983,5 +991,5 @@ struct XenDevOps xen_framebuffer_ops = { static const GraphicHwOps xenfb_ops = { .invalidate = xenfb_invalidate, .gfx_update = xenfb_update, - .update_interval = xenfb_update_interval, + .ui_info = xenfb_ui_info, }; diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index 2bb7a5441a..b0828d65aa 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -114,6 +114,7 @@ #define DP_TX_N_AUD (0x032C >> 2) #define DP_TX_AUDIO_EXT_DATA(n) ((0x0330 + 4 * n) >> 2) #define DP_INT_STATUS (0x03A0 >> 2) +#define DP_INT_VBLNK_START (1 << 13) #define DP_INT_MASK (0x03A4 >> 2) #define DP_INT_EN (0x03A8 >> 2) #define DP_INT_DS (0x03AC >> 2) @@ -260,7 +261,7 @@ typedef enum DPVideoFmt DPVideoFmt; static const VMStateDescription vmstate_dp = { .name = TYPE_XLNX_DP, - .version_id = 1, + .version_id = 2, .fields = (VMStateField[]){ VMSTATE_UINT32_ARRAY(core_registers, XlnxDPState, DP_CORE_REG_ARRAY_SIZE), @@ -270,10 +271,15 @@ static const VMStateDescription vmstate_dp = { DP_VBLEND_REG_ARRAY_SIZE), VMSTATE_UINT32_ARRAY(audio_registers, XlnxDPState, DP_AUDIO_REG_ARRAY_SIZE), + VMSTATE_PTIMER(vblank, XlnxDPState), VMSTATE_END_OF_LIST() } }; +#define DP_VBLANK_PTIMER_POLICY (PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | \ + PTIMER_POLICY_CONTINUOUS_TRIGGER | \ + PTIMER_POLICY_NO_IMMEDIATE_TRIGGER) + static void xlnx_dp_update_irq(XlnxDPState *s); static uint64_t xlnx_dp_audio_read(void *opaque, hwaddr offset, unsigned size) @@ -526,8 +532,8 @@ static void xlnx_dp_aux_set_command(XlnxDPState *s, uint32_t value) qemu_log_mask(LOG_UNIMP, "xlnx_dp: Write i2c status not implemented\n"); break; default: - error_report("%s: invalid command: %u", __func__, cmd); - abort(); + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid command: %u", __func__, cmd); + return; } s->core_registers[DP_INTERRUPT_SIGNAL_STATE] |= 0x04; @@ -714,7 +720,11 @@ static uint64_t xlnx_dp_read(void *opaque, hwaddr offset, unsigned size) break; default: assert(offset <= (0x3AC >> 2)); - ret = s->core_registers[offset]; + if (offset == (0x3A8 >> 2) || offset == (0x3AC >> 2)) { + ret = s->core_registers[DP_INT_MASK]; + } else { + ret = s->core_registers[offset]; + } break; } @@ -769,6 +779,13 @@ static void xlnx_dp_write(void *opaque, hwaddr offset, uint64_t value, break; case DP_TRANSMITTER_ENABLE: s->core_registers[offset] = value & 0x01; + ptimer_transaction_begin(s->vblank); + if (value & 0x1) { + ptimer_run(s->vblank, 0); + } else { + ptimer_stop(s->vblank); + } + ptimer_transaction_commit(s->vblank); break; case DP_FORCE_SCRAMBLER_RESET: /* @@ -872,7 +889,7 @@ static void xlnx_dp_write(void *opaque, hwaddr offset, uint64_t value, xlnx_dp_update_irq(s); break; case DP_INT_DS: - s->core_registers[DP_INT_MASK] |= ~value; + s->core_registers[DP_INT_MASK] |= value; xlnx_dp_update_irq(s); break; default: @@ -1173,9 +1190,6 @@ static void xlnx_dp_update_display(void *opaque) return; } - s->core_registers[DP_INT_STATUS] |= (1 << 13); - xlnx_dp_update_irq(s); - xlnx_dpdma_trigger_vsync_irq(s->dpdma); /* @@ -1215,19 +1229,22 @@ static void xlnx_dp_init(Object *obj) SysBusDevice *sbd = SYS_BUS_DEVICE(obj); XlnxDPState *s = XLNX_DP(obj); - memory_region_init(&s->container, obj, TYPE_XLNX_DP, 0xC050); + memory_region_init(&s->container, obj, TYPE_XLNX_DP, DP_CONTAINER_SIZE); memory_region_init_io(&s->core_iomem, obj, &dp_ops, s, TYPE_XLNX_DP - ".core", 0x3AF); - memory_region_add_subregion(&s->container, 0x0000, &s->core_iomem); + ".core", sizeof(s->core_registers)); + memory_region_add_subregion(&s->container, DP_CORE_REG_OFFSET, + &s->core_iomem); memory_region_init_io(&s->vblend_iomem, obj, &vblend_ops, s, TYPE_XLNX_DP - ".v_blend", 0x1DF); - memory_region_add_subregion(&s->container, 0xA000, &s->vblend_iomem); + ".v_blend", sizeof(s->vblend_registers)); + memory_region_add_subregion(&s->container, DP_VBLEND_REG_OFFSET, + &s->vblend_iomem); memory_region_init_io(&s->avbufm_iomem, obj, &avbufm_ops, s, TYPE_XLNX_DP - ".av_buffer_manager", 0x238); - memory_region_add_subregion(&s->container, 0xB000, &s->avbufm_iomem); + ".av_buffer_manager", sizeof(s->avbufm_registers)); + memory_region_add_subregion(&s->container, DP_AVBUF_REG_OFFSET, + &s->avbufm_iomem); memory_region_init_io(&s->audio_iomem, obj, &audio_ops, s, TYPE_XLNX_DP ".audio", sizeof(s->audio_registers)); @@ -1268,6 +1285,14 @@ static void xlnx_dp_finalize(Object *obj) fifo8_destroy(&s->rx_fifo); } +static void vblank_hit(void *opaque) +{ + XlnxDPState *s = XLNX_DP(opaque); + + s->core_registers[DP_INT_STATUS] |= DP_INT_VBLNK_START; + xlnx_dp_update_irq(s); +} + static void xlnx_dp_realize(DeviceState *dev, Error **errp) { XlnxDPState *s = XLNX_DP(dev); @@ -1302,6 +1327,10 @@ static void xlnx_dp_realize(DeviceState *dev, Error **errp) &as); AUD_set_volume_out(s->amixer_output_stream, 0, 255, 255); xlnx_dp_audio_activate(s); + s->vblank = ptimer_init(vblank_hit, s, DP_VBLANK_PTIMER_POLICY); + ptimer_transaction_begin(s->vblank); + ptimer_set_freq(s->vblank, 30); + ptimer_transaction_commit(s->vblank); } static void xlnx_dp_reset(DeviceState *dev) diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c index eb0002a2b9..5e9306110d 100644 --- a/hw/dma/bcm2835_dma.c +++ b/hw/dma/bcm2835_dma.c @@ -394,7 +394,7 @@ static void bcm2835_dma_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_dma; } -static TypeInfo bcm2835_dma_info = { +static const TypeInfo bcm2835_dma_info = { .name = TYPE_BCM2835_DMA, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835DMAState), diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index 944ba296b0..e5d521c329 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -15,7 +15,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" @@ -269,6 +268,9 @@ struct PL330State { uint8_t num_faulting; uint8_t periph_busy[PL330_PERIPH_NUM]; + /* Memory region that DMA operation access */ + MemoryRegion *mem_mr; + AddressSpace *mem_as; }; #define TYPE_PL330 "pl330" @@ -1108,7 +1110,8 @@ static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch) uint8_t opcode; int i; - dma_memory_read(&address_space_memory, ch->pc, &opcode, 1); + dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1, + MEMTXATTRS_UNSPECIFIED); for (i = 0; insn_desc[i].size; i++) { if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) { return &insn_desc[i]; @@ -1122,7 +1125,8 @@ static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn) uint8_t buf[PL330_INSN_MAXSIZE]; assert(insn->size <= PL330_INSN_MAXSIZE); - dma_memory_read(&address_space_memory, ch->pc, buf, insn->size); + dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size, + MEMTXATTRS_UNSPECIFIED); insn->exec(ch, buf[0], &buf[1], insn->size - 1); } @@ -1186,7 +1190,8 @@ static int pl330_exec_cycle(PL330Chan *channel) if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) { int len = q->len - (q->addr & (q->len - 1)); - dma_memory_read(&address_space_memory, q->addr, buf, len); + dma_memory_read(s->mem_as, q->addr, buf, len, + MEMTXATTRS_UNSPECIFIED); trace_pl330_exec_cycle(q->addr, len); if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { pl330_hexdump(buf, len); @@ -1217,7 +1222,8 @@ static int pl330_exec_cycle(PL330Chan *channel) fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag); } if (fifo_res == PL330_FIFO_OK || q->z) { - dma_memory_write(&address_space_memory, q->addr, buf, len); + dma_memory_write(s->mem_as, q->addr, buf, len, + MEMTXATTRS_UNSPECIFIED); trace_pl330_exec_cycle(q->addr, len); if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { pl330_hexdump(buf, len); @@ -1322,7 +1328,7 @@ static void pl330_debug_exec(PL330State *s) } if (!insn) { pl330_fault(ch, PL330_FAULT_UNDEF_INSTR | PL330_FAULT_DBG_INSTR); - return ; + return; } ch->stall = 0; insn->exec(ch, opcode, args, insn->size - 1); @@ -1562,6 +1568,18 @@ static void pl330_realize(DeviceState *dev, Error **errp) "dma", PL330_IOMEM_SIZE); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + if (!s->mem_mr) { + error_setg(errp, "'memory' link is not set"); + return; + } else if (s->mem_mr == get_system_memory()) { + /* Avoid creating new AS for system memory. */ + s->mem_as = &address_space_memory; + } else { + s->mem_as = g_new0(AddressSpace, 1); + address_space_init(s->mem_as, s->mem_mr, + memory_region_name(s->mem_mr)); + } + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pl330_exec_cycle_timer, s); s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) | @@ -1656,6 +1674,9 @@ static Property pl330_properties[] = { DEFINE_PROP_UINT8("rd_q_dep", PL330State, rd_q_dep, 16), DEFINE_PROP_UINT16("data_buffer_dep", PL330State, data_buffer_dep, 256), + DEFINE_PROP_LINK("memory", PL330State, mem_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c index e4d2f1725b..aa1d323a36 100644 --- a/hw/dma/rc4030.c +++ b/hw/dma/rc4030.c @@ -646,8 +646,8 @@ static rc4030_dma *rc4030_allocate_dmas(void *opaque, int n) struct rc4030DMAState *p; int i; - s = (rc4030_dma *)g_new0(rc4030_dma, n); - p = (struct rc4030DMAState *)g_new0(struct rc4030DMAState, n); + s = g_new0(rc4030_dma, n); + p = g_new0(struct rc4030DMAState, n); for (i = 0; i < n; i++) { p->opaque = opaque; p->n = i; diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c index 9b2ac2017d..1dd88f3479 100644 --- a/hw/dma/sifive_pdma.c +++ b/hw/dma/sifive_pdma.c @@ -54,6 +54,13 @@ #define DMA_EXEC_DST 0x110 #define DMA_EXEC_SRC 0x118 +/* + * FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values. + * The reset values tested on Unleashed/Unmatched boards are 6 instead of 0. + */ +#define CONFIG_WRSZ_DEFAULT 6 +#define CONFIG_RDSZ_DEFAULT 6 + enum dma_chan_state { DMA_CHAN_STATE_IDLE, DMA_CHAN_STATE_STARTED, @@ -67,13 +74,13 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch) uint64_t dst = s->chan[ch].next_dst; uint64_t src = s->chan[ch].next_src; uint32_t config = s->chan[ch].next_config; - int wsize, rsize, size; + int wsize, rsize, size, remainder; uint8_t buf[64]; int n; /* do nothing if bytes to transfer is zero */ if (!bytes) { - goto error; + goto done; } /* @@ -99,11 +106,7 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch) size = 6; } size = 1 << size; - - /* the bytes to transfer should be multiple of transaction size */ - if (bytes % size) { - goto error; - } + remainder = bytes % size; /* indicate a DMA transfer is started */ s->chan[ch].state = DMA_CHAN_STATE_STARTED; @@ -124,10 +127,13 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch) s->chan[ch].exec_bytes -= size; } - /* indicate a DMA transfer is done */ - s->chan[ch].state = DMA_CHAN_STATE_DONE; - s->chan[ch].control &= ~CONTROL_RUN; - s->chan[ch].control |= CONTROL_DONE; + if (remainder) { + cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder); + cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder); + s->chan[ch].exec_src += remainder; + s->chan[ch].exec_dst += remainder; + s->chan[ch].exec_bytes -= remainder; + } /* reload exec_ registers if repeat is required */ if (s->chan[ch].next_config & CONFIG_REPEAT) { @@ -136,6 +142,11 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch) s->chan[ch].exec_src = src; } +done: + /* indicate a DMA transfer is done */ + s->chan[ch].state = DMA_CHAN_STATE_DONE; + s->chan[ch].control &= ~CONTROL_RUN; + s->chan[ch].control |= CONTROL_DONE; return; error: @@ -166,6 +177,101 @@ static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch) s->chan[ch].state = DMA_CHAN_STATE_IDLE; } +static uint64_t sifive_pdma_readq(SiFivePDMAState *s, int ch, hwaddr offset) +{ + uint64_t val = 0; + + offset &= 0xfff; + switch (offset) { + case DMA_NEXT_BYTES: + val = s->chan[ch].next_bytes; + break; + case DMA_NEXT_DST: + val = s->chan[ch].next_dst; + break; + case DMA_NEXT_SRC: + val = s->chan[ch].next_src; + break; + case DMA_EXEC_BYTES: + val = s->chan[ch].exec_bytes; + break; + case DMA_EXEC_DST: + val = s->chan[ch].exec_dst; + break; + case DMA_EXEC_SRC: + val = s->chan[ch].exec_src; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } + + return val; +} + +static uint32_t sifive_pdma_readl(SiFivePDMAState *s, int ch, hwaddr offset) +{ + uint32_t val = 0; + + offset &= 0xfff; + switch (offset) { + case DMA_CONTROL: + val = s->chan[ch].control; + break; + case DMA_NEXT_CONFIG: + val = s->chan[ch].next_config; + break; + case DMA_NEXT_BYTES: + val = extract64(s->chan[ch].next_bytes, 0, 32); + break; + case DMA_NEXT_BYTES + 4: + val = extract64(s->chan[ch].next_bytes, 32, 32); + break; + case DMA_NEXT_DST: + val = extract64(s->chan[ch].next_dst, 0, 32); + break; + case DMA_NEXT_DST + 4: + val = extract64(s->chan[ch].next_dst, 32, 32); + break; + case DMA_NEXT_SRC: + val = extract64(s->chan[ch].next_src, 0, 32); + break; + case DMA_NEXT_SRC + 4: + val = extract64(s->chan[ch].next_src, 32, 32); + break; + case DMA_EXEC_CONFIG: + val = s->chan[ch].exec_config; + break; + case DMA_EXEC_BYTES: + val = extract64(s->chan[ch].exec_bytes, 0, 32); + break; + case DMA_EXEC_BYTES + 4: + val = extract64(s->chan[ch].exec_bytes, 32, 32); + break; + case DMA_EXEC_DST: + val = extract64(s->chan[ch].exec_dst, 0, 32); + break; + case DMA_EXEC_DST + 4: + val = extract64(s->chan[ch].exec_dst, 32, 32); + break; + case DMA_EXEC_SRC: + val = extract64(s->chan[ch].exec_src, 0, 32); + break; + case DMA_EXEC_SRC + 4: + val = extract64(s->chan[ch].exec_src, 32, 32); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } + + return val; +} + static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size) { SiFivePDMAState *s = opaque; @@ -178,44 +284,132 @@ static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size) return 0; } - offset &= 0xfff; - switch (offset) { - case DMA_CONTROL: - val = s->chan[ch].control; + switch (size) { + case 8: + val = sifive_pdma_readq(s, ch, offset); break; - case DMA_NEXT_CONFIG: - val = s->chan[ch].next_config; - break; - case DMA_NEXT_BYTES: - val = s->chan[ch].next_bytes; - break; - case DMA_NEXT_DST: - val = s->chan[ch].next_dst; - break; - case DMA_NEXT_SRC: - val = s->chan[ch].next_src; - break; - case DMA_EXEC_CONFIG: - val = s->chan[ch].exec_config; - break; - case DMA_EXEC_BYTES: - val = s->chan[ch].exec_bytes; - break; - case DMA_EXEC_DST: - val = s->chan[ch].exec_dst; - break; - case DMA_EXEC_SRC: - val = s->chan[ch].exec_src; + case 4: + val = sifive_pdma_readl(s, ch, offset); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - break; + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid read size %u to PDMA\n", + __func__, size); + return 0; } return val; } +static void sifive_pdma_writeq(SiFivePDMAState *s, int ch, + hwaddr offset, uint64_t value) +{ + offset &= 0xfff; + switch (offset) { + case DMA_NEXT_BYTES: + s->chan[ch].next_bytes = value; + break; + case DMA_NEXT_DST: + s->chan[ch].next_dst = value; + break; + case DMA_NEXT_SRC: + s->chan[ch].next_src = value; + break; + case DMA_EXEC_BYTES: + case DMA_EXEC_DST: + case DMA_EXEC_SRC: + /* these are read-only registers */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } +} + +static void sifive_pdma_writel(SiFivePDMAState *s, int ch, + hwaddr offset, uint32_t value) +{ + bool claimed, run; + + offset &= 0xfff; + switch (offset) { + case DMA_CONTROL: + claimed = !!(s->chan[ch].control & CONTROL_CLAIM); + run = !!(s->chan[ch].control & CONTROL_RUN); + + if (!claimed && (value & CONTROL_CLAIM)) { + /* reset Next* registers */ + s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) | + (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT); + s->chan[ch].next_bytes = 0; + s->chan[ch].next_dst = 0; + s->chan[ch].next_src = 0; + } + + /* claim bit can only be cleared when run is low */ + if (run && !(value & CONTROL_CLAIM)) { + value |= CONTROL_CLAIM; + } + + s->chan[ch].control = value; + + /* + * If channel was not claimed before run bit is set, + * or if the channel is disclaimed when run was low, + * DMA won't run. + */ + if (!claimed || (!run && !(value & CONTROL_CLAIM))) { + s->chan[ch].control &= ~CONTROL_RUN; + return; + } + + if (value & CONTROL_RUN) { + sifive_pdma_run(s, ch); + } + + sifive_pdma_update_irq(s, ch); + break; + case DMA_NEXT_CONFIG: + s->chan[ch].next_config = value; + break; + case DMA_NEXT_BYTES: + s->chan[ch].next_bytes = + deposit64(s->chan[ch].next_bytes, 0, 32, value); + break; + case DMA_NEXT_BYTES + 4: + s->chan[ch].next_bytes = + deposit64(s->chan[ch].next_bytes, 32, 32, value); + break; + case DMA_NEXT_DST: + s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 0, 32, value); + break; + case DMA_NEXT_DST + 4: + s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 32, 32, value); + break; + case DMA_NEXT_SRC: + s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 0, 32, value); + break; + case DMA_NEXT_SRC + 4: + s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 32, 32, value); + break; + case DMA_EXEC_CONFIG: + case DMA_EXEC_BYTES: + case DMA_EXEC_BYTES + 4: + case DMA_EXEC_DST: + case DMA_EXEC_DST + 4: + case DMA_EXEC_SRC: + case DMA_EXEC_SRC + 4: + /* these are read-only registers */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } +} + static void sifive_pdma_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { @@ -228,38 +422,16 @@ static void sifive_pdma_write(void *opaque, hwaddr offset, return; } - offset &= 0xfff; - switch (offset) { - case DMA_CONTROL: - s->chan[ch].control = value; - - if (value & CONTROL_RUN) { - sifive_pdma_run(s, ch); - } - - sifive_pdma_update_irq(s, ch); + switch (size) { + case 8: + sifive_pdma_writeq(s, ch, offset, value); break; - case DMA_NEXT_CONFIG: - s->chan[ch].next_config = value; - break; - case DMA_NEXT_BYTES: - s->chan[ch].next_bytes = value; - break; - case DMA_NEXT_DST: - s->chan[ch].next_dst = value; - break; - case DMA_NEXT_SRC: - s->chan[ch].next_src = value; - break; - case DMA_EXEC_CONFIG: - case DMA_EXEC_BYTES: - case DMA_EXEC_DST: - case DMA_EXEC_SRC: - /* these are read-only registers */ + case 4: + sifive_pdma_writel(s, ch, offset, (uint32_t) value); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid write size %u to PDMA\n", + __func__, size); break; } } @@ -272,6 +444,10 @@ static const MemoryRegionOps sifive_pdma_ops = { .impl = { .min_access_size = 4, .max_access_size = 8, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, } }; diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c index 03bc500878..0ef13c5e9a 100644 --- a/hw/dma/sparc32_dma.c +++ b/hw/dma/sparc32_dma.c @@ -81,11 +81,11 @@ void ledma_memory_read(void *opaque, hwaddr addr, addr |= s->dmaregs[3]; trace_ledma_memory_read(addr, len); if (do_bswap) { - dma_memory_read(&is->iommu_as, addr, buf, len); + dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED); } else { addr &= ~1; len &= ~1; - dma_memory_read(&is->iommu_as, addr, buf, len); + dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED); for(i = 0; i < len; i += 2) { bswap16s((uint16_t *)(buf + i)); } @@ -103,7 +103,8 @@ void ledma_memory_write(void *opaque, hwaddr addr, addr |= s->dmaregs[3]; trace_ledma_memory_write(addr, len); if (do_bswap) { - dma_memory_write(&is->iommu_as, addr, buf, len); + dma_memory_write(&is->iommu_as, addr, buf, len, + MEMTXATTRS_UNSPECIFIED); } else { addr &= ~1; len &= ~1; @@ -114,7 +115,8 @@ void ledma_memory_write(void *opaque, hwaddr addr, for(i = 0; i < l; i += 2) { tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i)); } - dma_memory_write(&is->iommu_as, addr, tmp_buf, l); + dma_memory_write(&is->iommu_as, addr, tmp_buf, l, + MEMTXATTRS_UNSPECIFIED); len -= l; buf += l; addr += l; @@ -148,7 +150,8 @@ void espdma_memory_read(void *opaque, uint8_t *buf, int len) IOMMUState *is = (IOMMUState *)s->iommu; trace_espdma_memory_read(s->dmaregs[1], len); - dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len); + dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len, + MEMTXATTRS_UNSPECIFIED); s->dmaregs[1] += len; } @@ -158,7 +161,8 @@ void espdma_memory_write(void *opaque, uint8_t *buf, int len) IOMMUState *is = (IOMMUState *)s->iommu; trace_espdma_memory_write(s->dmaregs[1], len); - dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len); + dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len, + MEMTXATTRS_UNSPECIFIED); s->dmaregs[1] += len; } diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index bc383f53cc..99e50f65e2 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s) return !!(s->regs[R_DMASR] & DMASR_IDLE); } +static inline int stream_halted(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_HALTED); +} + static void stream_reset(struct Stream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ @@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, uint64_t addr; bool eop; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return; } @@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, unsigned int rxlen; size_t pos = 0; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return 0; } @@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj, XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); struct Stream *s = &ds->dma->streams[1]; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { ds->dma->notify = notify; ds->dma->notify_opaque = notify_opaque; return false; @@ -552,7 +557,7 @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp) st->dma = s; st->nr = i; - st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT); + st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(st->ptimer); ptimer_set_freq(st->ptimer, s->freqhz); ptimer_transaction_commit(st->ptimer); diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c index fa38a55634..4eb7f66e9f 100644 --- a/hw/dma/xlnx-zdma.c +++ b/hw/dma/xlnx-zdma.c @@ -320,9 +320,9 @@ static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, return false; } - descr->addr = address_space_ldq_le(s->dma_as, addr, s->attr, NULL); - descr->size = address_space_ldl_le(s->dma_as, addr + 8, s->attr, NULL); - descr->attr = address_space_ldl_le(s->dma_as, addr + 12, s->attr, NULL); + descr->addr = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL); + descr->size = address_space_ldl_le(&s->dma_as, addr + 8, s->attr, NULL); + descr->attr = address_space_ldl_le(&s->dma_as, addr + 12, s->attr, NULL); return true; } @@ -354,7 +354,7 @@ static void zdma_update_descr_addr(XlnxZDMA *s, bool type, } else { addr = zdma_get_regaddr64(s, basereg); addr += sizeof(s->dsc_dst); - next = address_space_ldq_le(s->dma_as, addr, s->attr, NULL); + next = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL); } zdma_put_regaddr64(s, basereg, next); @@ -421,7 +421,7 @@ static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len) } } - address_space_write(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen); + address_space_write(&s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen); if (burst_type == AXI_BURST_INCR) { s->dsc_dst.addr += dlen; } @@ -497,7 +497,7 @@ static void zdma_process_descr(XlnxZDMA *s) len = s->cfg.bus_width / 8; } } else { - address_space_read(s->dma_as, src_addr, s->attr, s->buf, len); + address_space_read(&s->dma_as, src_addr, s->attr, s->buf, len); if (burst_type == AXI_BURST_INCR) { src_addr += len; } @@ -765,6 +765,12 @@ static void zdma_realize(DeviceState *dev, Error **errp) XlnxZDMA *s = XLNX_ZDMA(dev); unsigned int i; + if (!s->dma_mr) { + error_setg(errp, TYPE_XLNX_ZDMA " 'dma' link not set"); + return; + } + address_space_init(&s->dma_as, s->dma_mr, "zdma-dma"); + for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) { RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4]; @@ -777,12 +783,6 @@ static void zdma_realize(DeviceState *dev, Error **errp) }; } - if (s->dma_mr) { - s->dma_as = g_malloc0(sizeof(AddressSpace)); - address_space_init(s->dma_as, s->dma_mr, NULL); - } else { - s->dma_as = &address_space_memory; - } s->attr = MEMTXATTRS_UNSPECIFIED; } @@ -806,7 +806,6 @@ static const VMStateDescription vmstate_zdma = { .name = TYPE_XLNX_ZDMA, .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), VMSTATE_UINT32(state, XlnxZDMA), diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c index e33112b6f0..f5ad1a0d22 100644 --- a/hw/dma/xlnx-zynq-devcfg.c +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -161,12 +161,14 @@ static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s) btt = MIN(btt, dmah->dest_len); } DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr); - dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt); + dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt, + MEMTXATTRS_UNSPECIFIED); dmah->src_len -= btt; dmah->src_addr += btt; if (loopback && (dmah->src_len || dmah->dest_len)) { DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr); - dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt); + dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt, + MEMTXATTRS_UNSPECIFIED); dmah->dest_len -= btt; dmah->dest_addr += btt; } diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c index 797b4fed8f..1ce52ea5a2 100644 --- a/hw/dma/xlnx_csu_dma.c +++ b/hw/dma/xlnx_csu_dma.c @@ -201,11 +201,11 @@ static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len) for (i = 0; i < len && (result == MEMTX_OK); i += s->width) { uint32_t mlen = MIN(len - i, s->width); - result = address_space_rw(s->dma_as, addr, s->attr, + result = address_space_rw(&s->dma_as, addr, s->attr, buf + i, mlen, false); } } else { - result = address_space_rw(s->dma_as, addr, s->attr, buf, len, false); + result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, false); } if (result == MEMTX_OK) { @@ -232,12 +232,12 @@ static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len) for (i = 0; i < len && (result == MEMTX_OK); i += s->width) { uint32_t mlen = MIN(len - i, s->width); - result = address_space_rw(s->dma_as, addr, s->attr, + result = address_space_rw(&s->dma_as, addr, s->attr, buf, mlen, true); buf += mlen; } } else { - result = address_space_rw(s->dma_as, addr, s->attr, buf, len, true); + result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, true); } if (result != MEMTX_OK) { @@ -472,6 +472,20 @@ static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val) return val & R_ADDR_MSB_ADDR_MSB_MASK; } +static MemTxResult xlnx_csu_dma_class_read(XlnxCSUDMA *s, hwaddr addr, + uint32_t len) +{ + RegisterInfo *reg = &s->regs_info[R_SIZE]; + uint64_t we = MAKE_64BIT_MASK(0, 4 * 8); + + s->regs[R_ADDR] = addr; + s->regs[R_ADDR_MSB] = (uint64_t)addr >> 32; + + register_write(reg, len, we, object_get_typename(OBJECT(s)), false); + + return (s->regs[R_SIZE] == 0) ? MEMTX_OK : MEMTX_ERROR; +} + static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = { #define DMACH_REGINFO(NAME, snd) \ (const RegisterAccessInfo []) { \ @@ -626,6 +640,17 @@ static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp) XlnxCSUDMA *s = XLNX_CSU_DMA(dev); RegisterInfoArray *reg_array; + if (!s->is_dst && !s->tx_dev) { + error_setg(errp, "zynqmp.csu-dma: Stream not connected"); + return; + } + + if (!s->dma_mr) { + error_setg(errp, TYPE_XLNX_CSU_DMA " 'dma' link not set"); + return; + } + address_space_init(&s->dma_as, s->dma_mr, "csu-dma"); + reg_array = register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst], XLNX_CSU_DMA_R_MAX, @@ -640,20 +665,8 @@ static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); - if (!s->is_dst && !s->tx_dev) { - error_setg(errp, "zynqmp.csu-dma: Stream not connected"); - return; - } - s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit, - s, PTIMER_POLICY_DEFAULT); - - if (s->dma_mr) { - s->dma_as = g_malloc0(sizeof(AddressSpace)); - address_space_init(s->dma_as, s->dma_mr, NULL); - } else { - s->dma_as = &address_space_memory; - } + s, PTIMER_POLICY_LEGACY); s->attr = MEMTXATTRS_UNSPECIFIED; @@ -664,7 +677,6 @@ static const VMStateDescription vmstate_xlnx_csu_dma = { .name = TYPE_XLNX_CSU_DMA, .version_id = 0, .minimum_version_id = 0, - .minimum_version_id_old = 0, .fields = (VMStateField[]) { VMSTATE_PTIMER(src_timer, XlnxCSUDMA), VMSTATE_UINT16(width, XlnxCSUDMA), @@ -697,6 +709,7 @@ static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); + XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass); dc->reset = xlnx_csu_dma_reset; dc->realize = xlnx_csu_dma_realize; @@ -705,6 +718,8 @@ static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data) ssc->push = xlnx_csu_dma_stream_push; ssc->can_push = xlnx_csu_dma_stream_can_push; + + xcdc->read = xlnx_csu_dma_class_read; } static void xlnx_csu_dma_init(Object *obj) @@ -729,6 +744,7 @@ static const TypeInfo xlnx_csu_dma_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(XlnxCSUDMA), .class_init = xlnx_csu_dma_class_init, + .class_size = sizeof(XlnxCSUDMAClass), .instance_init = xlnx_csu_dma_init, .interfaces = (InterfaceInfo[]) { { TYPE_STREAM_SINK }, diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c index 967548abd3..dd66be5265 100644 --- a/hw/dma/xlnx_dpdma.c +++ b/hw/dma/xlnx_dpdma.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/module.h" #include "hw/dma/xlnx_dpdma.h" @@ -652,7 +652,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, } if (dma_memory_read(&address_space_memory, desc_addr, &desc, - sizeof(DPDMADescriptor))) { + sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_EISR] |= ((1 << 1) << channel); xlnx_dpdma_update_irq(s); s->operation_finished[channel] = true; @@ -708,7 +708,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, if (dma_memory_read(&address_space_memory, source_addr[0], &s->data[channel][ptr], - line_size)) { + line_size, + MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_ISR] |= ((1 << 12) << channel); xlnx_dpdma_update_irq(s); DPRINTF("Can't get data.\n"); @@ -736,7 +737,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, if (dma_memory_read(&address_space_memory, source_addr[frag], &(s->data[channel][ptr]), - fragment_len)) { + fragment_len, + MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_ISR] |= ((1 << 12) << channel); xlnx_dpdma_update_irq(s); DPRINTF("Can't get data.\n"); @@ -754,7 +756,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, DPRINTF("update the descriptor with the done flag set.\n"); xlnx_dpdma_desc_set_done(&desc); dma_memory_write(&address_space_memory, desc_addr, &desc, - sizeof(DPDMADescriptor)); + sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED); } if (xlnx_dpdma_desc_completion_interrupt(&desc)) { diff --git a/hw/gpio/Kconfig b/hw/gpio/Kconfig index f0e7405f6e..d2cf3accc8 100644 --- a/hw/gpio/Kconfig +++ b/hw/gpio/Kconfig @@ -8,6 +8,9 @@ config PL061 config GPIO_KEY bool +config GPIO_MPC8XXX + bool + config GPIO_PWR bool diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index b3dec44480..1e267dd482 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -15,12 +15,10 @@ #include "qapi/visitor.h" #include "hw/irq.h" #include "migration/vmstate.h" +#include "trace.h" +#include "hw/registerfields.h" -#define GPIOS_PER_REG 32 -#define GPIOS_PER_SET GPIOS_PER_REG -#define GPIO_PIN_GAP_SIZE 4 #define GPIOS_PER_GROUP 8 -#define GPIO_GROUP_SHIFT 3 /* GPIO Source Types */ #define ASPEED_CMD_SRC_MASK 0x01010101 @@ -164,49 +162,70 @@ #define GPIO_YZAAAB_DIRECTION (0x1E4 >> 2) #define GPIO_AC_DATA_VALUE (0x1E8 >> 2) #define GPIO_AC_DIRECTION (0x1EC >> 2) -#define GPIO_3_6V_MEM_SIZE 0x1F0 -#define GPIO_3_6V_REG_ARRAY_SIZE (GPIO_3_6V_MEM_SIZE >> 2) +#define GPIO_3_3V_MEM_SIZE 0x1F0 +#define GPIO_3_3V_REG_ARRAY_SIZE (GPIO_3_3V_MEM_SIZE >> 2) /* AST2600 only - 1.8V gpios */ /* - * The AST2600 has same 3.6V gpios as the AST2400 (memory offsets 0x0-0x198) - * and additional 1.8V gpios (memory offsets 0x800-0x9D4). + * The AST2600 two copies of the GPIO controller: the same 3.3V gpios as the + * AST2400 (memory offsets 0x0-0x198) and a second controller with 1.8V gpios + * (memory offsets 0x800-0x9D4). */ -#define GPIO_1_8V_REG_OFFSET 0x800 -#define GPIO_1_8V_ABCD_DATA_VALUE ((0x800 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_DIRECTION ((0x804 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INT_ENABLE ((0x808 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INT_SENS_0 ((0x80C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INT_SENS_1 ((0x810 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INT_SENS_2 ((0x814 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INT_STATUS ((0x818 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_RESET_TOLERANT ((0x81C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_DATA_VALUE ((0x820 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_DIRECTION ((0x824 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INT_ENABLE ((0x828 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INT_SENS_0 ((0x82C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INT_SENS_1 ((0x830 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INT_SENS_2 ((0x834 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INT_STATUS ((0x838 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_RESET_TOLERANT ((0x83C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_DEBOUNCE_1 ((0x840 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_DEBOUNCE_2 ((0x844 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_DEBOUNCE_1 ((0x848 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_DEBOUNCE_2 ((0x84C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_DEBOUNCE_TIME_1 ((0x850 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_DEBOUNCE_TIME_2 ((0x854 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_DEBOUNCE_TIME_3 ((0x858 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_COMMAND_SRC_0 ((0x860 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_COMMAND_SRC_1 ((0x864 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_COMMAND_SRC_0 ((0x868 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_COMMAND_SRC_1 ((0x86C - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_DATA_READ ((0x8C0 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_DATA_READ ((0x8C4 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_ABCD_INPUT_MASK ((0x9D0 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_E_INPUT_MASK ((0x9D4 - GPIO_1_8V_REG_OFFSET) >> 2) -#define GPIO_1_8V_MEM_SIZE 0x9D8 -#define GPIO_1_8V_REG_ARRAY_SIZE ((GPIO_1_8V_MEM_SIZE - \ - GPIO_1_8V_REG_OFFSET) >> 2) +#define GPIO_1_8V_ABCD_DATA_VALUE (0x000 >> 2) +#define GPIO_1_8V_ABCD_DIRECTION (0x004 >> 2) +#define GPIO_1_8V_ABCD_INT_ENABLE (0x008 >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_0 (0x00C >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_1 (0x010 >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_2 (0x014 >> 2) +#define GPIO_1_8V_ABCD_INT_STATUS (0x018 >> 2) +#define GPIO_1_8V_ABCD_RESET_TOLERANT (0x01C >> 2) +#define GPIO_1_8V_E_DATA_VALUE (0x020 >> 2) +#define GPIO_1_8V_E_DIRECTION (0x024 >> 2) +#define GPIO_1_8V_E_INT_ENABLE (0x028 >> 2) +#define GPIO_1_8V_E_INT_SENS_0 (0x02C >> 2) +#define GPIO_1_8V_E_INT_SENS_1 (0x030 >> 2) +#define GPIO_1_8V_E_INT_SENS_2 (0x034 >> 2) +#define GPIO_1_8V_E_INT_STATUS (0x038 >> 2) +#define GPIO_1_8V_E_RESET_TOLERANT (0x03C >> 2) +#define GPIO_1_8V_ABCD_DEBOUNCE_1 (0x040 >> 2) +#define GPIO_1_8V_ABCD_DEBOUNCE_2 (0x044 >> 2) +#define GPIO_1_8V_E_DEBOUNCE_1 (0x048 >> 2) +#define GPIO_1_8V_E_DEBOUNCE_2 (0x04C >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_1 (0x050 >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_2 (0x054 >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_3 (0x058 >> 2) +#define GPIO_1_8V_ABCD_COMMAND_SRC_0 (0x060 >> 2) +#define GPIO_1_8V_ABCD_COMMAND_SRC_1 (0x064 >> 2) +#define GPIO_1_8V_E_COMMAND_SRC_0 (0x068 >> 2) +#define GPIO_1_8V_E_COMMAND_SRC_1 (0x06C >> 2) +#define GPIO_1_8V_ABCD_DATA_READ (0x0C0 >> 2) +#define GPIO_1_8V_E_DATA_READ (0x0C4 >> 2) +#define GPIO_1_8V_ABCD_INPUT_MASK (0x1D0 >> 2) +#define GPIO_1_8V_E_INPUT_MASK (0x1D4 >> 2) +#define GPIO_1_8V_MEM_SIZE 0x1D8 +#define GPIO_1_8V_REG_ARRAY_SIZE (GPIO_1_8V_MEM_SIZE >> 2) + +/* + * GPIO index mode support + * It only supports write operation + */ +REG32(GPIO_INDEX_REG, 0x2AC) + FIELD(GPIO_INDEX_REG, NUMBER, 0, 8) + FIELD(GPIO_INDEX_REG, COMMAND, 12, 1) + FIELD(GPIO_INDEX_REG, TYPE, 16, 4) + FIELD(GPIO_INDEX_REG, DATA_VALUE, 20, 1) + FIELD(GPIO_INDEX_REG, DIRECTION, 20, 1) + FIELD(GPIO_INDEX_REG, INT_ENABLE, 20, 1) + FIELD(GPIO_INDEX_REG, INT_SENS_0, 21, 1) + FIELD(GPIO_INDEX_REG, INT_SENS_1, 22, 1) + FIELD(GPIO_INDEX_REG, INT_SENS_2, 23, 1) + FIELD(GPIO_INDEX_REG, INT_STATUS, 24, 1) + FIELD(GPIO_INDEX_REG, DEBOUNCE_1, 20, 1) + FIELD(GPIO_INDEX_REG, DEBOUNCE_2, 21, 1) + FIELD(GPIO_INDEX_REG, RESET_TOLERANT, 20, 1) + FIELD(GPIO_INDEX_REG, COMMAND_SRC_0, 20, 1) + FIELD(GPIO_INDEX_REG, COMMAND_SRC_1, 21, 1) + FIELD(GPIO_INDEX_REG, INPUT_MASK, 20, 1) static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio) { @@ -249,7 +268,7 @@ static ptrdiff_t aspeed_gpio_set_idx(AspeedGPIOState *s, GPIOSets *regs) } static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, - uint32_t value) + uint32_t value, uint32_t mode_mask) { uint32_t input_mask = regs->input_mask; uint32_t direction = regs->direction; @@ -258,9 +277,10 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, uint32_t diff; int gpio; - diff = old ^ new; + diff = (old ^ new); + diff &= mode_mask; if (diff) { - for (gpio = 0; gpio < GPIOS_PER_REG; gpio++) { + for (gpio = 0; gpio < ASPEED_GPIOS_PER_SET; gpio++) { uint32_t mask = 1 << gpio; /* If the gpio needs to be updated... */ @@ -284,8 +304,7 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, if (direction & mask) { /* ...trigger the line-state IRQ */ ptrdiff_t set = aspeed_gpio_set_idx(s, regs); - size_t offset = set * GPIOS_PER_SET + gpio; - qemu_set_irq(s->gpios[offset], !!(new & mask)); + qemu_set_irq(s->gpios[set][gpio], !!(new & mask)); } else { /* ...otherwise if we meet the line's current IRQ policy... */ if (aspeed_evaluate_irq(regs, old & mask, gpio)) { @@ -298,21 +317,6 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, qemu_set_irq(s->irq, !!(s->pending)); } -static uint32_t aspeed_adjust_pin(AspeedGPIOState *s, uint32_t pin) -{ - AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - /* - * The 2500 has a 4 pin gap in group AB and the 2400 has a 4 pin - * gap in group Y (and only four pins in AB but this is the last group so - * it doesn't matter). - */ - if (agc->gap && pin >= agc->gap) { - pin += GPIO_PIN_GAP_SIZE; - } - - return pin; -} - static bool aspeed_gpio_get_pin_level(AspeedGPIOState *s, uint32_t set_idx, uint32_t pin) { @@ -333,10 +337,10 @@ static void aspeed_gpio_set_pin_level(AspeedGPIOState *s, uint32_t set_idx, if (level) { value |= pin_mask; } else { - value &= !pin_mask; + value &= ~pin_mask; } - aspeed_gpio_update(s, &s->sets[set_idx], value); + aspeed_gpio_update(s, &s->sets[set_idx], value, ~s->sets[set_idx].direction); } /* @@ -368,7 +372,7 @@ static uint32_t update_value_control_source(GPIOSets *regs, uint32_t old_value, uint32_t new_value = 0; /* for each group in set */ - for (i = 0; i < GPIOS_PER_REG; i += GPIOS_PER_GROUP) { + for (i = 0; i < ASPEED_GPIOS_PER_SET; i += GPIOS_PER_GROUP) { cmd_source = extract32(regs->cmd_source_0, i, 1) | (extract32(regs->cmd_source_1, i, 1) << 1); @@ -381,7 +385,7 @@ static uint32_t update_value_control_source(GPIOSets *regs, uint32_t old_value, return new_value; } -static const AspeedGPIOReg aspeed_3_6v_gpios[GPIO_3_6V_REG_ARRAY_SIZE] = { +static const AspeedGPIOReg aspeed_3_3v_gpios[GPIO_3_3V_REG_ARRAY_SIZE] = { /* Set ABCD */ [GPIO_ABCD_DATA_VALUE] = { 0, gpio_reg_data_value }, [GPIO_ABCD_DIRECTION] = { 0, gpio_reg_direction }, @@ -544,55 +548,214 @@ static uint64_t aspeed_gpio_read(void *opaque, hwaddr offset, uint32_t size) uint64_t idx = -1; const AspeedGPIOReg *reg; GPIOSets *set; + uint32_t value = 0; + uint64_t debounce_value; idx = offset >> 2; if (idx >= GPIO_DEBOUNCE_TIME_1 && idx <= GPIO_DEBOUNCE_TIME_3) { idx -= GPIO_DEBOUNCE_TIME_1; - return (uint64_t) s->debounce_regs[idx]; + debounce_value = (uint64_t) s->debounce_regs[idx]; + trace_aspeed_gpio_read(offset, debounce_value); + return debounce_value; } reg = &agc->reg_table[idx]; if (reg->set_idx >= agc->nr_gpio_sets) { qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" - HWADDR_PRIx"\n", __func__, offset); + PRIx64"\n", __func__, offset); return 0; } set = &s->sets[reg->set_idx]; switch (reg->type) { case gpio_reg_data_value: - return set->data_value; + value = set->data_value; + break; case gpio_reg_direction: - return set->direction; + value = set->direction; + break; case gpio_reg_int_enable: - return set->int_enable; + value = set->int_enable; + break; case gpio_reg_int_sens_0: - return set->int_sens_0; + value = set->int_sens_0; + break; case gpio_reg_int_sens_1: - return set->int_sens_1; + value = set->int_sens_1; + break; case gpio_reg_int_sens_2: - return set->int_sens_2; + value = set->int_sens_2; + break; case gpio_reg_int_status: - return set->int_status; + value = set->int_status; + break; case gpio_reg_reset_tolerant: - return set->reset_tol; + value = set->reset_tol; + break; case gpio_reg_debounce_1: - return set->debounce_1; + value = set->debounce_1; + break; case gpio_reg_debounce_2: - return set->debounce_2; + value = set->debounce_2; + break; case gpio_reg_cmd_source_0: - return set->cmd_source_0; + value = set->cmd_source_0; + break; case gpio_reg_cmd_source_1: - return set->cmd_source_1; + value = set->cmd_source_1; + break; case gpio_reg_data_read: - return set->data_read; + value = set->data_read; + break; case gpio_reg_input_mask: - return set->input_mask; + value = set->input_mask; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" - HWADDR_PRIx"\n", __func__, offset); + PRIx64"\n", __func__, offset); return 0; - }; + } + + trace_aspeed_gpio_read(offset, value); + return value; +} + +static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset, + uint64_t data, uint32_t size) +{ + + AspeedGPIOState *s = ASPEED_GPIO(opaque); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + const GPIOSetProperties *props; + GPIOSets *set; + uint32_t reg_idx_number = FIELD_EX32(data, GPIO_INDEX_REG, NUMBER); + uint32_t reg_idx_type = FIELD_EX32(data, GPIO_INDEX_REG, TYPE); + uint32_t reg_idx_command = FIELD_EX32(data, GPIO_INDEX_REG, COMMAND); + uint32_t set_idx = reg_idx_number / ASPEED_GPIOS_PER_SET; + uint32_t pin_idx = reg_idx_number % ASPEED_GPIOS_PER_SET; + uint32_t group_idx = pin_idx / GPIOS_PER_GROUP; + uint32_t reg_value = 0; + uint32_t cleared; + + set = &s->sets[set_idx]; + props = &agc->props[set_idx]; + + if (reg_idx_command) + qemu_log_mask(LOG_GUEST_ERROR, "%s: offset 0x%" PRIx64 "data 0x%" + PRIx64 "index mode wrong command 0x%x\n", + __func__, offset, data, reg_idx_command); + + switch (reg_idx_type) { + case gpio_reg_idx_data: + reg_value = set->data_read; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, DATA_VALUE)); + reg_value &= props->output; + reg_value = update_value_control_source(set, set->data_value, + reg_value); + set->data_read = reg_value; + aspeed_gpio_update(s, set, reg_value, set->direction); + return; + case gpio_reg_idx_direction: + reg_value = set->direction; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, DIRECTION)); + /* + * where data is the value attempted to be written to the pin: + * pin type | input mask | output mask | expected value + * ------------------------------------------------------------ + * bidirectional | 1 | 1 | data + * input only | 1 | 0 | 0 + * output only | 0 | 1 | 1 + * no pin | 0 | 0 | 0 + * + * which is captured by: + * data = ( data | ~input) & output; + */ + reg_value = (reg_value | ~props->input) & props->output; + set->direction = update_value_control_source(set, set->direction, + reg_value); + break; + case gpio_reg_idx_interrupt: + reg_value = set->int_enable; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INT_ENABLE)); + set->int_enable = update_value_control_source(set, set->int_enable, + reg_value); + reg_value = set->int_sens_0; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INT_SENS_0)); + set->int_sens_0 = update_value_control_source(set, set->int_sens_0, + reg_value); + reg_value = set->int_sens_1; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INT_SENS_1)); + set->int_sens_1 = update_value_control_source(set, set->int_sens_1, + reg_value); + reg_value = set->int_sens_2; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INT_SENS_2)); + set->int_sens_2 = update_value_control_source(set, set->int_sens_2, + reg_value); + /* set interrupt status */ + reg_value = set->int_status; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INT_STATUS)); + cleared = ctpop32(reg_value & set->int_status); + if (s->pending && cleared) { + assert(s->pending >= cleared); + s->pending -= cleared; + } + set->int_status &= ~reg_value; + break; + case gpio_reg_idx_debounce: + reg_value = set->debounce_1; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, DEBOUNCE_1)); + set->debounce_1 = update_value_control_source(set, set->debounce_1, + reg_value); + reg_value = set->debounce_2; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, DEBOUNCE_2)); + set->debounce_2 = update_value_control_source(set, set->debounce_2, + reg_value); + return; + case gpio_reg_idx_tolerance: + reg_value = set->reset_tol; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, RESET_TOLERANT)); + set->reset_tol = update_value_control_source(set, set->reset_tol, + reg_value); + return; + case gpio_reg_idx_cmd_src: + reg_value = set->cmd_source_0; + reg_value = deposit32(reg_value, GPIOS_PER_GROUP * group_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, COMMAND_SRC_0)); + set->cmd_source_0 = reg_value & ASPEED_CMD_SRC_MASK; + reg_value = set->cmd_source_1; + reg_value = deposit32(reg_value, GPIOS_PER_GROUP * group_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, COMMAND_SRC_1)); + set->cmd_source_1 = reg_value & ASPEED_CMD_SRC_MASK; + return; + case gpio_reg_idx_input_mask: + reg_value = set->input_mask; + reg_value = deposit32(reg_value, pin_idx, 1, + FIELD_EX32(data, GPIO_INDEX_REG, INPUT_MASK)); + /* + * feeds into interrupt generation + * 0: read from data value reg will be updated + * 1: read from data value reg will not be updated + */ + set->input_mask = reg_value & props->input; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: offset 0x%" PRIx64 "data 0x%" + PRIx64 "index mode wrong type 0x%x\n", + __func__, offset, data, reg_idx_type); + return; + } + aspeed_gpio_update(s, set, set->data_value, UINT32_MAX); + return; } static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, @@ -606,7 +769,16 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, GPIOSets *set; uint32_t cleared; + trace_aspeed_gpio_write(offset, data); + idx = offset >> 2; + + /* check gpio index mode */ + if (idx == R_GPIO_INDEX_REG) { + aspeed_gpio_write_index_mode(opaque, offset, data, size); + return; + } + if (idx >= GPIO_DEBOUNCE_TIME_1 && idx <= GPIO_DEBOUNCE_TIME_3) { idx -= GPIO_DEBOUNCE_TIME_1; s->debounce_regs[idx] = (uint32_t) data; @@ -616,7 +788,7 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, reg = &agc->reg_table[idx]; if (reg->set_idx >= agc->nr_gpio_sets) { qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" - HWADDR_PRIx"\n", __func__, offset); + PRIx64"\n", __func__, offset); return; } @@ -628,7 +800,7 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, data &= props->output; data = update_value_control_source(set, set->data_value, data); set->data_read = data; - aspeed_gpio_update(s, set, data); + aspeed_gpio_update(s, set, data, set->direction); return; case gpio_reg_direction: /* @@ -638,7 +810,7 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, * bidirectional | 1 | 1 | data * input only | 1 | 0 | 0 * output only | 0 | 1 | 1 - * no pin / gap | 0 | 0 | 0 + * no pin | 0 | 0 | 0 * * which is captured by: * data = ( data | ~input) & output; @@ -701,10 +873,10 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" - HWADDR_PRIx"\n", __func__, offset); + PRIx64"\n", __func__, offset); return; } - aspeed_gpio_update(s, set, set->data_value); + aspeed_gpio_update(s, set, set->data_value, UINT32_MAX); return; } @@ -780,7 +952,7 @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name, } /****************** Setup functions ******************/ -static const GPIOSetProperties ast2400_set_props[] = { +static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, @@ -790,32 +962,41 @@ static const GPIOSetProperties ast2400_set_props[] = { [6] = {0x0000000f, 0x0fffff0f, {"Y", "Z", "AA", "AB"} }, }; -static const GPIOSetProperties ast2500_set_props[] = { +static const GPIOSetProperties ast2500_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, - [6] = {0xffffff0f, 0x0fffff0f, {"Y", "Z", "AA", "AB"} }, + [6] = {0x0fffffff, 0x0fffffff, {"Y", "Z", "AA", "AB"} }, [7] = {0x000000ff, 0x000000ff, {"AC"} }, }; -static GPIOSetProperties ast2600_3_6v_set_props[] = { +static GPIOSetProperties ast2600_3_3v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, - [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, - [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, - [6] = {0xffff0000, 0x0fff0000, {"Y", "Z", "", ""} }, + [4] = {0xffffffff, 0x00ffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0xffffff00, {"U", "V", "W", "X"} }, + [6] = {0x0000ffff, 0x0000ffff, {"Y", "Z"} }, }; -static GPIOSetProperties ast2600_1_8v_set_props[] = { +static GPIOSetProperties ast2600_1_8v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"18A", "18B", "18C", "18D"} }, [1] = {0x0000000f, 0x0000000f, {"18E"} }, }; +static GPIOSetProperties ast1030_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, + [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, + [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, + [3] = {0xffffff3f, 0xffffff3f, {"M", "N", "O", "P"} }, + [4] = {0xff060c1f, 0x00060c1f, {"Q", "R", "S", "T"} }, + [5] = {0x000000ff, 0x00000000, {"U"} }, +}; + static const MemoryRegionOps aspeed_gpio_ops = { .read = aspeed_gpio_read, .write = aspeed_gpio_write, @@ -837,14 +1018,20 @@ static void aspeed_gpio_realize(DeviceState *dev, Error **errp) AspeedGPIOState *s = ASPEED_GPIO(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - int pin; /* Interrupt parent line */ sysbus_init_irq(sbd, &s->irq); /* Individual GPIOs */ - for (pin = 0; pin < agc->nr_gpio_pins; pin++) { - sysbus_init_irq(sbd, &s->gpios[pin]); + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + sysbus_init_irq(sbd, &s->gpios[i][j]); + } } memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s, @@ -857,20 +1044,22 @@ static void aspeed_gpio_init(Object *obj) { AspeedGPIOState *s = ASPEED_GPIO(obj); AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - int pin; - for (pin = 0; pin < agc->nr_gpio_pins; pin++) { - char *name; - int set_idx = pin / GPIOS_PER_SET; - int pin_idx = aspeed_adjust_pin(s, pin) - (set_idx * GPIOS_PER_SET); - int group_idx = pin_idx >> GPIO_GROUP_SHIFT; - const GPIOSetProperties *props = &agc->props[set_idx]; - - name = g_strdup_printf("gpio%s%d", props->group_label[group_idx], - pin_idx % GPIOS_PER_GROUP); - object_property_add(obj, name, "bool", aspeed_gpio_get_pin, - aspeed_gpio_set_pin, NULL, NULL); - g_free(name); + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + int group_idx = j / GPIOS_PER_GROUP; + int pin_idx = j % GPIOS_PER_GROUP; + const char *group = &props->group_label[group_idx][0]; + char *name = g_strdup_printf("gpio%s%d", group, pin_idx); + object_property_add(obj, name, "bool", aspeed_gpio_get_pin, + aspeed_gpio_set_pin, NULL, NULL); + g_free(name); + } } } @@ -927,8 +1116,7 @@ static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data) agc->props = ast2400_set_props; agc->nr_gpio_pins = 216; agc->nr_gpio_sets = 7; - agc->gap = 196; - agc->reg_table = aspeed_3_6v_gpios; + agc->reg_table = aspeed_3_3v_gpios; } static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) @@ -938,18 +1126,17 @@ static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) agc->props = ast2500_set_props; agc->nr_gpio_pins = 228; agc->nr_gpio_sets = 8; - agc->gap = 220; - agc->reg_table = aspeed_3_6v_gpios; + agc->reg_table = aspeed_3_3v_gpios; } -static void aspeed_gpio_ast2600_3_6v_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); - agc->props = ast2600_3_6v_set_props; + agc->props = ast2600_3_3v_set_props; agc->nr_gpio_pins = 208; agc->nr_gpio_sets = 7; - agc->reg_table = aspeed_3_6v_gpios; + agc->reg_table = aspeed_3_3v_gpios; } static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data) @@ -962,6 +1149,16 @@ static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data) agc->reg_table = aspeed_1_8v_gpios; } +static void aspeed_gpio_1030_class_init(ObjectClass *klass, void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast1030_set_props; + agc->nr_gpio_pins = 151; + agc->nr_gpio_sets = 6; + agc->reg_table = aspeed_3_3v_gpios; +} + static const TypeInfo aspeed_gpio_info = { .name = TYPE_ASPEED_GPIO, .parent = TYPE_SYS_BUS_DEVICE, @@ -985,10 +1182,10 @@ static const TypeInfo aspeed_gpio_ast2500_info = { .instance_init = aspeed_gpio_init, }; -static const TypeInfo aspeed_gpio_ast2600_3_6v_info = { +static const TypeInfo aspeed_gpio_ast2600_3_3v_info = { .name = TYPE_ASPEED_GPIO "-ast2600", .parent = TYPE_ASPEED_GPIO, - .class_init = aspeed_gpio_ast2600_3_6v_class_init, + .class_init = aspeed_gpio_ast2600_3_3v_class_init, .instance_init = aspeed_gpio_init, }; @@ -999,13 +1196,21 @@ static const TypeInfo aspeed_gpio_ast2600_1_8v_info = { .instance_init = aspeed_gpio_init, }; +static const TypeInfo aspeed_gpio_ast1030_info = { + .name = TYPE_ASPEED_GPIO "-ast1030", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_1030_class_init, + .instance_init = aspeed_gpio_init, +}; + static void aspeed_gpio_register_types(void) { type_register_static(&aspeed_gpio_info); type_register_static(&aspeed_gpio_ast2400_info); type_register_static(&aspeed_gpio_ast2500_info); - type_register_static(&aspeed_gpio_ast2600_3_6v_info); + type_register_static(&aspeed_gpio_ast2600_3_3v_info); type_register_static(&aspeed_gpio_ast2600_1_8v_info); + type_register_static(&aspeed_gpio_ast1030_info); } type_init(aspeed_gpio_register_types); diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c index abdddbc67c..c995bba1d9 100644 --- a/hw/gpio/bcm2835_gpio.c +++ b/hw/gpio/bcm2835_gpio.c @@ -299,8 +299,7 @@ static void bcm2835_gpio_init(Object *obj) DeviceState *dev = DEVICE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_SD_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &bcm2835_gpio_ops, s, "bcm2835_gpio", 0x1000); diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c index 7a591804a9..c7f98b7bb1 100644 --- a/hw/gpio/imx_gpio.c +++ b/hw/gpio/imx_gpio.c @@ -277,7 +277,6 @@ static const VMStateDescription vmstate_imx_gpio = { .name = TYPE_IMX_GPIO, .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(dr, IMXGPIOState), VMSTATE_UINT32(gdir, IMXGPIOState), diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build index 7bd6a57264..b726e6d27a 100644 --- a/hw/gpio/meson.build +++ b/hw/gpio/meson.build @@ -1,5 +1,5 @@ -softmmu_ss.add(when: 'CONFIG_E500', if_true: files('mpc8xxx.c')) softmmu_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) +softmmu_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c')) softmmu_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) softmmu_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) softmmu_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c index e25084b40c..bd0841d57f 100644 --- a/hw/gpio/omap_gpio.c +++ b/hw/gpio/omap_gpio.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/arm/omap.h" diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events index 1dab99c560..9736b362ac 100644 --- a/hw/gpio/trace-events +++ b/hw/gpio/trace-events @@ -27,3 +27,7 @@ sifive_gpio_read(uint64_t offset, uint64_t r) "offset 0x%" PRIx64 " value 0x%" P sifive_gpio_write(uint64_t offset, uint64_t value) "offset 0x%" PRIx64 " value 0x%" PRIx64 sifive_gpio_set(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 sifive_gpio_update_output_irq(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 + +# aspeed_gpio.c +aspeed_gpio_read(uint64_t offset, uint64_t value) "offset: 0x%" PRIx64 " value 0x%" PRIx64 +aspeed_gpio_write(uint64_t offset, uint64_t value) "offset: 0x%" PRIx64 " value 0x%" PRIx64 diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig index 22948db025..5dd8b5b21e 100644 --- a/hw/hppa/Kconfig +++ b/hw/hppa/Kconfig @@ -1,9 +1,10 @@ -config DINO +config HPPA_B160L bool imply PCI_DEVICES imply E1000_PCI imply VIRTIO_VGA - select PCI + select DINO + select LASI select SERIAL select ISA_BUS select I8259 diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h index bc258895c9..a5ac3dd0fd 100644 --- a/hw/hppa/hppa_hardware.h +++ b/hw/hppa/hppa_hardware.h @@ -1,4 +1,5 @@ /* HPPA cores and system support chips. */ +/* Be aware: QEMU and seabios-hppa repositories share this file as-is. */ #ifndef HW_HPPA_HPPA_HARDWARE_H #define HW_HPPA_HPPA_HARDWARE_H @@ -25,7 +26,7 @@ #define LASI_GFX_HPA 0xf8000000 #define ARTIST_FB_ADDR 0xf9000000 #define CPU_HPA 0xfffb0000 -#define MEMORY_HPA 0xfffbf000 +#define MEMORY_HPA 0xfffff000 #define PCI_HPA DINO_HPA /* PCI bus */ #define IDE_HPA 0xf9000000 /* Boot disc controller */ @@ -40,12 +41,13 @@ #define FW_CFG_IO_BASE 0xfffa0000 -#define PORT_SERIAL1 (DINO_UART_HPA + 0x800) -#define PORT_SERIAL2 (LASI_UART_HPA + 0x800) +#define PORT_SERIAL1 (LASI_UART_HPA + 0x800) +#define PORT_SERIAL2 (DINO_UART_HPA + 0x800) -#define HPPA_MAX_CPUS 8 /* max. number of SMP CPUs */ +#define HPPA_MAX_CPUS 16 /* max. number of SMP CPUs */ #define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */ #define CPU_HPA_CR_REG 7 /* store CPU HPA in cr7 (SeaBIOS internal) */ +#define PIM_STORAGE_SIZE 600 /* storage size of pdc_pim_toc_struct (64bit) */ #endif diff --git a/hw/hppa/hppa_sys.h b/hw/hppa/hppa_sys.h deleted file mode 100644 index 0b18271cc9..0000000000 --- a/hw/hppa/hppa_sys.h +++ /dev/null @@ -1,24 +0,0 @@ -/* HPPA cores and system support chips. */ - -#ifndef HW_HPPA_SYS_H -#define HW_HPPA_SYS_H - -#include "hw/pci/pci.h" -#include "hw/pci/pci_host.h" -#include "hw/boards.h" -#include "hw/intc/i8259.h" - -#include "hppa_hardware.h" - -PCIBus *dino_init(MemoryRegion *, qemu_irq *, qemu_irq *); -DeviceState *lasi_init(MemoryRegion *); -#define enable_lasi_lan() 0 - -#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost" - -/* hppa_pci.c. */ -extern const MemoryRegionOps hppa_pci_ignore_ops; -extern const MemoryRegionOps hppa_pci_conf1_ops; -extern const MemoryRegionOps hppa_pci_iack_ops; - -#endif diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 2a46af5bc9..98a78b84b4 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -4,7 +4,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "elf.h" @@ -16,20 +15,28 @@ #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" #include "hw/char/serial.h" +#include "hw/char/parallel.h" +#include "hw/intc/i8259.h" +#include "hw/input/lasips2.h" #include "hw/net/lasi_82596.h" -#include "hppa_sys.h" +#include "hw/nmi.h" +#include "hw/pci/pci.h" +#include "hw/pci-host/dino.h" +#include "hw/misc/lasi.h" +#include "hppa_hardware.h" #include "qemu/units.h" #include "qapi/error.h" #include "net/net.h" #include "qemu/log.h" #include "net/net.h" -#define MAX_IDE_BUS 2 - -#define MIN_SEABIOS_HPPA_VERSION 1 /* require at least this fw version */ +#define MIN_SEABIOS_HPPA_VERSION 6 /* require at least this fw version */ #define HPA_POWER_BUTTON (FIRMWARE_END - 0x10) +#define enable_lasi_lan() 0 + + static void hppa_powerdown_req(Notifier *n, void *opaque) { hwaddr soft_power_reg = HPA_POWER_BUTTON; @@ -51,6 +58,29 @@ static Notifier hppa_system_powerdown_notifier = { .notify = hppa_powerdown_req }; +/* Fallback for unassigned PCI I/O operations. Avoids MCHK. */ +static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) +{ +} + +static const MemoryRegionOps hppa_pci_ignore_ops = { + .read = ignore_read, + .write = ignore_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; static ISABus *hppa_isa_bus(void) { @@ -93,6 +123,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms) { FWCfgState *fw_cfg; uint64_t val; + const char qemu_version[] = QEMU_VERSION; fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus); @@ -115,21 +146,46 @@ static FWCfgState *create_fw_cfg(MachineState *ms) fw_cfg_add_file(fw_cfg, "/etc/power-button-addr", g_memdup(&val, sizeof(val)), sizeof(val)); - fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_order[0]); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_config.order[0]); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + fw_cfg_add_file(fw_cfg, "/etc/qemu-version", + g_memdup(qemu_version, sizeof(qemu_version)), + sizeof(qemu_version)); + return fw_cfg; } +static LasiState *lasi_init(void) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_LASI_CHIP); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + return LASI_CHIP(dev); +} + +static DinoState *dino_init(MemoryRegion *addr_space) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_DINO_PCI_HOST_BRIDGE); + object_property_set_link(OBJECT(dev), "memory-as", OBJECT(addr_space), + &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + return DINO_PCI_HOST_BRIDGE(dev); +} + static void machine_hppa_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; - DeviceState *dev; + DeviceState *dev, *dino_dev, *lasi_dev; PCIBus *pci_bus; ISABus *isa_bus; - qemu_irq rtc_irq, serial_irq; char *firmware_filename; uint64_t firmware_low, firmware_high; long size; @@ -163,10 +219,17 @@ static void machine_hppa_init(MachineState *machine) /* Init Lasi chip */ - lasi_init(addr_space); + lasi_dev = DEVICE(lasi_init()); + memory_region_add_subregion(addr_space, LASI_HPA, + sysbus_mmio_get_region( + SYS_BUS_DEVICE(lasi_dev), 0)); /* Init Dino (PCI host bus chip). */ - pci_bus = dino_init(addr_space, &rtc_irq, &serial_irq); + dino_dev = DEVICE(dino_init(addr_space)); + memory_region_add_subregion(addr_space, DINO_HPA, + sysbus_mmio_get_region( + SYS_BUS_DEVICE(dino_dev), 0)); + pci_bus = PCI_BUS(qdev_get_child_bus(dino_dev, "pci")); assert(pci_bus); /* Create ISA bus. */ @@ -174,14 +237,21 @@ static void machine_hppa_init(MachineState *machine) assert(isa_bus); /* Realtime clock, used by firmware for PDC_TOD call. */ - mc146818_rtc_init(isa_bus, 2000, rtc_irq); + mc146818_rtc_init(isa_bus, 2000, NULL); - /* Serial code setup. */ - if (serial_hd(0)) { - uint32_t addr = DINO_UART_HPA + 0x800; - serial_mm_init(addr_space, addr, 0, serial_irq, - 115200, serial_hd(0), DEVICE_BIG_ENDIAN); - } + /* Serial ports: Lasi and Dino use a 7.272727 MHz clock. */ + serial_mm_init(addr_space, LASI_UART_HPA + 0x800, 0, + qdev_get_gpio_in(lasi_dev, LASI_IRQ_UART_HPA), 7272727 / 16, + serial_hd(0), DEVICE_BIG_ENDIAN); + + serial_mm_init(addr_space, DINO_UART_HPA + 0x800, 0, + qdev_get_gpio_in(dino_dev, DINO_IRQ_RS232INT), 7272727 / 16, + serial_hd(1), DEVICE_BIG_ENDIAN); + + /* Parallel port */ + parallel_mm_init(addr_space, LASI_LPT_HPA + 0x800, 0, + qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA), + parallel_hds[0]); /* fw_cfg configuration interface */ create_fw_cfg(machine); @@ -192,6 +262,7 @@ static void machine_hppa_init(MachineState *machine) /* Graphics setup. */ if (machine->enable_graphics && vga_interface_type != VGA_NONE) { + vga_interface_created = true; dev = qdev_new("artist"); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -200,12 +271,29 @@ static void machine_hppa_init(MachineState *machine) } /* Network setup. */ + if (enable_lasi_lan()) { + lasi_82596_init(addr_space, LASI_LAN_HPA, + qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA)); + } + for (i = 0; i < nb_nics; i++) { if (!enable_lasi_lan()) { pci_nic_init_nofail(&nd_table[i], pci_bus, "tulip", NULL); } } + /* PS/2 Keyboard/Mouse */ + dev = qdev_new(TYPE_LASIPS2); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(lasi_dev, LASI_IRQ_PS2KBD_HPA)); + memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); + memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA + 0x100, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 1)); + /* register power switch emulation */ qemu_register_powerdown_notifier(&hppa_system_powerdown_notifier); @@ -308,8 +396,8 @@ static void machine_hppa_init(MachineState *machine) * mode (kernel_entry=1), and to boot from CD (gr[24]='d') * or hard disc * (gr[24]='c'). */ - kernel_entry = boot_menu ? 1 : 0; - cpu[0]->env.gr[24] = machine->boot_order[0]; + kernel_entry = machine->boot_config.has_menu ? machine->boot_config.menu : 0; + cpu[0]->env.gr[24] = machine->boot_config.order[0]; } /* We jump to the firmware entry routine and pass the @@ -326,19 +414,25 @@ static void machine_hppa_init(MachineState *machine) cpu[0]->env.gr[19] = FW_CFG_IO_BASE; } -static void hppa_machine_reset(MachineState *ms) +static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) { unsigned int smp_cpus = ms->smp.cpus; int i; - qemu_devices_reset(); + qemu_devices_reset(reason); /* Start all CPUs at the firmware entry point. * Monarch CPU will initialize firmware, secondary CPUs - * will enter a small idle look and wait for rendevouz. */ + * will enter a small idle loop and wait for rendevouz. */ for (i = 0; i < smp_cpus; i++) { - cpu_set_pc(CPU(cpu[i]), firmware_entry); + CPUState *cs = CPU(cpu[i]); + + cpu_set_pc(cs, firmware_entry); + cpu[i]->env.psw = PSW_Q; cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000; + + cs->exception_index = -1; + cs->halted = 0; } /* already initialized by machine_hppa_init()? */ @@ -355,10 +449,21 @@ static void hppa_machine_reset(MachineState *ms) cpu[0]->env.gr[19] = FW_CFG_IO_BASE; } - -static void machine_hppa_machine_init(MachineClass *mc) +static void hppa_nmi(NMIState *n, int cpu_index, Error **errp) { - mc->desc = "HPPA generic machine"; + CPUState *cs; + + CPU_FOREACH(cs) { + cpu_interrupt(cs, CPU_INTERRUPT_NMI); + } +} + +static void hppa_machine_init_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + mc->desc = "HPPA B160L machine"; mc->default_cpu_type = TYPE_HPPA_CPU; mc->init = machine_hppa_init; mc->reset = hppa_machine_reset; @@ -369,6 +474,23 @@ static void machine_hppa_machine_init(MachineClass *mc) mc->default_ram_size = 512 * MiB; mc->default_boot_order = "cd"; mc->default_ram_id = "ram"; + + nc->nmi_monitor_handler = hppa_nmi; } -DEFINE_MACHINE("hppa", machine_hppa_machine_init) +static const TypeInfo hppa_machine_init_typeinfo = { + .name = MACHINE_TYPE_NAME("hppa"), + .parent = TYPE_MACHINE, + .class_init = hppa_machine_init_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, +}; + +static void hppa_machine_init_register_types(void) +{ + type_register_static(&hppa_machine_init_typeinfo); +} + +type_init(hppa_machine_init_register_types) diff --git a/hw/hppa/meson.build b/hw/hppa/meson.build index 1deae83aee..3d0c586c30 100644 --- a/hw/hppa/meson.build +++ b/hw/hppa/meson.build @@ -1,4 +1,4 @@ hppa_ss = ss.source_set() -hppa_ss.add(when: 'CONFIG_DINO', if_true: files('pci.c', 'machine.c', 'dino.c', 'lasi.c')) +hppa_ss.add(when: 'CONFIG_HPPA_B160L', if_true: files('machine.c')) hw_arch += {'hppa': hppa_ss} diff --git a/hw/hppa/pci.c b/hw/hppa/pci.c deleted file mode 100644 index 32609aba63..0000000000 --- a/hw/hppa/pci.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * QEMU HP-PARISC PCI support functions. - * - */ - -#include "qemu/osdep.h" -#include "hppa_sys.h" -#include "qemu/log.h" -#include "trace.h" - - -/* Fallback for unassigned PCI I/O operations. Avoids MCHK. */ - -static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) -{ - return 0; -} - -static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) -{ -} - -const MemoryRegionOps hppa_pci_ignore_ops = { - .read = ignore_read, - .write = ignore_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - }, -}; - - -/* PCI config space reads/writes, to byte-word addressable memory. */ -static uint64_t bw_conf1_read(void *opaque, hwaddr addr, - unsigned size) -{ - PCIBus *b = opaque; - return pci_data_read(b, addr, size); -} - -static void bw_conf1_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - PCIBus *b = opaque; - pci_data_write(b, addr, val, size); -} - -const MemoryRegionOps hppa_pci_conf1_ops = { - .read = bw_conf1_read, - .write = bw_conf1_write, - .endianness = DEVICE_BIG_ENDIAN, - .impl = { - .min_access_size = 1, - .max_access_size = 4, - }, -}; - -/* PCI/EISA Interrupt Acknowledge Cycle. */ - -static uint64_t iack_read(void *opaque, hwaddr addr, unsigned size) -{ - return pic_read_irq(isa_pic); -} - -static void special_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - trace_hppa_pci_iack_write(); -} - -const MemoryRegionOps hppa_pci_iack_ops = { - .read = iack_read, - .write = special_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4, - }, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, -}; diff --git a/hw/hppa/trace-events b/hw/hppa/trace-events deleted file mode 100644 index 3f42be9056..0000000000 --- a/hw/hppa/trace-events +++ /dev/null @@ -1,14 +0,0 @@ -# See docs/devel/tracing.rst for syntax documentation. - -# pci.c -hppa_pci_iack_write(void) "" - -# dino.c -dino_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" -dino_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" -dino_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" - -# lasi.c -lasi_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" -lasi_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" -lasi_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" diff --git a/hw/hyperv/Kconfig b/hw/hyperv/Kconfig index 3fbfe41c9e..fcf65903bd 100644 --- a/hw/hyperv/Kconfig +++ b/hw/hyperv/Kconfig @@ -11,3 +11,8 @@ config VMBUS bool default y depends on HYPERV + +config SYNDBG + bool + default y + depends on VMBUS diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index cb1074f234..57b402b956 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -27,13 +27,16 @@ struct SynICState { CPUState *cs; - bool enabled; + bool sctl_enabled; hwaddr msg_page_addr; hwaddr event_page_addr; MemoryRegion msg_page_mr; MemoryRegion event_page_mr; struct hyperv_message_page *msg_page; struct hyperv_event_flags_page *event_page; + + QemuMutex sint_routes_mutex; + QLIST_HEAD(, HvSintRoute) sint_routes; }; #define TYPE_SYNIC "hyperv-synic" @@ -51,11 +54,11 @@ static SynICState *get_synic(CPUState *cs) return SYNIC(object_resolve_path_component(OBJECT(cs), "synic")); } -static void synic_update(SynICState *synic, bool enable, +static void synic_update(SynICState *synic, bool sctl_enable, hwaddr msg_page_addr, hwaddr event_page_addr) { - synic->enabled = enable; + synic->sctl_enabled = sctl_enable; if (synic->msg_page_addr != msg_page_addr) { if (synic->msg_page_addr) { memory_region_del_subregion(get_system_memory(), @@ -80,7 +83,7 @@ static void synic_update(SynICState *synic, bool enable, } } -void hyperv_synic_update(CPUState *cs, bool enable, +void hyperv_synic_update(CPUState *cs, bool sctl_enable, hwaddr msg_page_addr, hwaddr event_page_addr) { SynICState *synic = get_synic(cs); @@ -89,7 +92,7 @@ void hyperv_synic_update(CPUState *cs, bool enable, return; } - synic_update(synic, enable, msg_page_addr, event_page_addr); + synic_update(synic, sctl_enable, msg_page_addr, event_page_addr); } static void synic_realize(DeviceState *dev, Error **errp) @@ -110,16 +113,20 @@ static void synic_realize(DeviceState *dev, Error **errp) sizeof(*synic->event_page), &error_abort); synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr); synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr); + qemu_mutex_init(&synic->sint_routes_mutex); + QLIST_INIT(&synic->sint_routes); g_free(msgp_name); g_free(eventp_name); } + static void synic_reset(DeviceState *dev) { SynICState *synic = SYNIC(dev); memset(synic->msg_page, 0, sizeof(*synic->msg_page)); memset(synic->event_page, 0, sizeof(*synic->event_page)); synic_update(synic, false, 0, 0); + assert(QLIST_EMPTY(&synic->sint_routes)); } static void synic_class_init(ObjectClass *klass, void *data) @@ -150,7 +157,7 @@ void hyperv_synic_reset(CPUState *cs) SynICState *synic = get_synic(cs); if (synic) { - device_legacy_reset(DEVICE(synic)); + device_cold_reset(DEVICE(synic)); } } @@ -214,6 +221,7 @@ struct HvSintRoute { HvSintStagedMessage *staged_msg; unsigned refcount; + QLIST_ENTRY(HvSintRoute) link; }; static CPUState *hyperv_find_vcpu(uint32_t vp_index) @@ -259,7 +267,7 @@ static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) assert(staged_msg->state == HV_STAGED_MSG_BUSY); - if (!synic->enabled || !synic->msg_page_addr) { + if (!synic->msg_page_addr) { staged_msg->status = -ENXIO; goto posted; } @@ -343,7 +351,7 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) if (eventno > HV_EVENT_FLAGS_COUNT) { return -EINVAL; } - if (!synic->enabled || !synic->event_page_addr) { + if (!synic->sctl_enabled || !synic->event_page_addr) { return -ENXIO; } @@ -364,11 +372,12 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, HvSintMsgCb cb, void *cb_data) { - HvSintRoute *sint_route; - EventNotifier *ack_notifier; + HvSintRoute *sint_route = NULL; + EventNotifier *ack_notifier = NULL; int r, gsi; CPUState *cs; SynICState *synic; + bool ack_event_initialized = false; cs = hyperv_find_vcpu(vp_index); if (!cs) { @@ -381,57 +390,77 @@ HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, } sint_route = g_new0(HvSintRoute, 1); - r = event_notifier_init(&sint_route->sint_set_notifier, false); - if (r) { - goto err; + if (!sint_route) { + return NULL; } + sint_route->synic = synic; + sint_route->sint = sint; + sint_route->refcount = 1; ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL; if (ack_notifier) { sint_route->staged_msg = g_new0(HvSintStagedMessage, 1); + if (!sint_route->staged_msg) { + goto cleanup_err_sint; + } sint_route->staged_msg->cb = cb; sint_route->staged_msg->cb_data = cb_data; r = event_notifier_init(ack_notifier, false); if (r) { - goto err_sint_set_notifier; + goto cleanup_err_sint; } - event_notifier_set_handler(ack_notifier, sint_ack_handler); + ack_event_initialized = true; + } + + /* See if we are done or we need to setup a GSI for this SintRoute */ + if (!synic->sctl_enabled) { + goto cleanup; + } + + /* We need to setup a GSI for this SintRoute */ + r = event_notifier_init(&sint_route->sint_set_notifier, false); + if (r) { + goto cleanup_err_sint; } gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); if (gsi < 0) { - goto err_gsi; + goto cleanup_err_sint_notifier; } r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &sint_route->sint_set_notifier, ack_notifier, gsi); if (r) { - goto err_irqfd; + goto cleanup_err_irqfd; } sint_route->gsi = gsi; - sint_route->synic = synic; - sint_route->sint = sint; - sint_route->refcount = 1; - +cleanup: + qemu_mutex_lock(&synic->sint_routes_mutex); + QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link); + qemu_mutex_unlock(&synic->sint_routes_mutex); return sint_route; -err_irqfd: +cleanup_err_irqfd: kvm_irqchip_release_virq(kvm_state, gsi); -err_gsi: + +cleanup_err_sint_notifier: + event_notifier_cleanup(&sint_route->sint_set_notifier); + +cleanup_err_sint: if (ack_notifier) { - event_notifier_set_handler(ack_notifier, NULL); - event_notifier_cleanup(ack_notifier); + if (ack_event_initialized) { + event_notifier_set_handler(ack_notifier, NULL); + event_notifier_cleanup(ack_notifier); + } + g_free(sint_route->staged_msg); } -err_sint_set_notifier: - event_notifier_cleanup(&sint_route->sint_set_notifier); -err: - g_free(sint_route); + g_free(sint_route); return NULL; } @@ -442,6 +471,8 @@ void hyperv_sint_route_ref(HvSintRoute *sint_route) void hyperv_sint_route_unref(HvSintRoute *sint_route) { + SynICState *synic; + if (!sint_route) { return; } @@ -452,21 +483,33 @@ void hyperv_sint_route_unref(HvSintRoute *sint_route) return; } - kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, - &sint_route->sint_set_notifier, - sint_route->gsi); - kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + synic = sint_route->synic; + qemu_mutex_lock(&synic->sint_routes_mutex); + QLIST_REMOVE(sint_route, link); + qemu_mutex_unlock(&synic->sint_routes_mutex); + + if (sint_route->gsi) { + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + sint_route->gsi); + kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + event_notifier_cleanup(&sint_route->sint_set_notifier); + } + if (sint_route->staged_msg) { event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); event_notifier_cleanup(&sint_route->sint_ack_notifier); g_free(sint_route->staged_msg); } - event_notifier_cleanup(&sint_route->sint_set_notifier); g_free(sint_route); } int hyperv_sint_route_set_sint(HvSintRoute *sint_route) { + if (!sint_route->gsi) { + return 0; + } + return event_notifier_set(&sint_route->sint_set_notifier); } @@ -661,3 +704,246 @@ uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast) } return HV_STATUS_INVALID_CONNECTION_ID; } + +static HvSynDbgHandler hv_syndbg_handler; +static void *hv_syndbg_context; + +void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context) +{ + assert(!hv_syndbg_handler); + hv_syndbg_handler = handler; + hv_syndbg_context = context; +} + +uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa) +{ + uint16_t ret; + HvSynDbgMsg msg; + struct hyperv_reset_debug_session_output *reset_dbg_session = NULL; + hwaddr len; + + if (!hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + len = sizeof(*reset_dbg_session); + reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1); + if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_CONNECTION_INFO; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret) { + goto cleanup; + } + + reset_dbg_session->host_ip = msg.u.connection_info.host_ip; + reset_dbg_session->host_port = msg.u.connection_info.host_port; + /* The following fields are only used as validation for KDVM */ + memset(&reset_dbg_session->host_mac, 0, + sizeof(reset_dbg_session->host_mac)); + reset_dbg_session->target_ip = msg.u.connection_info.host_ip; + reset_dbg_session->target_port = msg.u.connection_info.host_port; + memset(&reset_dbg_session->target_mac, 0, + sizeof(reset_dbg_session->target_mac)); +cleanup: + if (reset_dbg_session) { + cpu_physical_memory_unmap(reset_dbg_session, + sizeof(*reset_dbg_session), 1, len); + } + + return ret; +} + +uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa, + bool fast) +{ + uint16_t ret; + struct hyperv_retrieve_debug_data_input *debug_data_in = NULL; + struct hyperv_retrieve_debug_data_output *debug_data_out = NULL; + hwaddr in_len, out_len; + HvSynDbgMsg msg; + + if (fast || !hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + in_len = sizeof(*debug_data_in); + debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); + if (!debug_data_in || in_len < sizeof(*debug_data_in)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + out_len = sizeof(*debug_data_out); + debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); + if (!debug_data_out || out_len < sizeof(*debug_data_out)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_RECV; + msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out); + msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out); + msg.u.recv.options = debug_data_in->options; + msg.u.recv.timeout = debug_data_in->timeout; + msg.u.recv.is_raw = true; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret == HV_STATUS_NO_DATA) { + debug_data_out->retrieved_count = 0; + debug_data_out->remaining_count = debug_data_in->count; + goto cleanup; + } else if (ret != HV_STATUS_SUCCESS) { + goto cleanup; + } + + debug_data_out->retrieved_count = msg.u.recv.retrieved_count; + debug_data_out->remaining_count = + debug_data_in->count - msg.u.recv.retrieved_count; +cleanup: + if (debug_data_out) { + cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1, + out_len); + } + + if (debug_data_in) { + cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0, + in_len); + } + + return ret; +} + +uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast) +{ + uint16_t ret; + struct hyperv_post_debug_data_input *post_data_in = NULL; + struct hyperv_post_debug_data_output *post_data_out = NULL; + hwaddr in_len, out_len; + HvSynDbgMsg msg; + + if (fast || !hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + in_len = sizeof(*post_data_in); + post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); + if (!post_data_in || in_len < sizeof(*post_data_in)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) { + ret = HV_STATUS_INVALID_PARAMETER; + goto cleanup; + } + + out_len = sizeof(*post_data_out); + post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); + if (!post_data_out || out_len < sizeof(*post_data_out)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_SEND; + msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in); + msg.u.send.count = post_data_in->count; + msg.u.send.is_raw = true; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret != HV_STATUS_SUCCESS) { + goto cleanup; + } + + post_data_out->pending_count = msg.u.send.pending_count; + ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS : + HV_STATUS_SUCCESS; +cleanup: + if (post_data_out) { + cpu_physical_memory_unmap(post_data_out, + sizeof(*post_data_out), 1, out_len); + } + + if (post_data_in) { + cpu_physical_memory_unmap(post_data_in, + sizeof(*post_data_in), 0, in_len); + } + + return ret; +} + +uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return HV_SYNDBG_STATUS_INVALID; + } + + msg.type = HV_SYNDBG_MSG_SEND; + msg.u.send.buf_gpa = ingpa; + msg.u.send.count = count; + msg.u.send.is_raw = false; + if (hv_syndbg_handler(hv_syndbg_context, &msg)) { + return HV_SYNDBG_STATUS_INVALID; + } + + return HV_SYNDBG_STATUS_SEND_SUCCESS; +} + +uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count) +{ + uint16_t ret; + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return HV_SYNDBG_STATUS_INVALID; + } + + msg.type = HV_SYNDBG_MSG_RECV; + msg.u.recv.buf_gpa = ingpa; + msg.u.recv.count = count; + msg.u.recv.options = 0; + msg.u.recv.timeout = 0; + msg.u.recv.is_raw = false; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret != HV_STATUS_SUCCESS) { + return 0; + } + + return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS, + msg.u.recv.retrieved_count); +} + +void hyperv_syndbg_set_pending_page(uint64_t ingpa) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return; + } + + msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE; + msg.u.pending_page.buf_gpa = ingpa; + hv_syndbg_handler(hv_syndbg_context, &msg); +} + +uint64_t hyperv_syndbg_query_options(void) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return 0; + } + + msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS; + if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) { + return 0; + } + + return msg.u.query_options.options; +} diff --git a/hw/hyperv/meson.build b/hw/hyperv/meson.build index 1367e2994f..b43f119ea5 100644 --- a/hw/hyperv/meson.build +++ b/hw/hyperv/meson.build @@ -1,3 +1,4 @@ specific_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c')) specific_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c')) specific_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c')) +specific_ss.add(when: 'CONFIG_SYNDBG', if_true: files('syndbg.c')) diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c new file mode 100644 index 0000000000..16d04cfdc6 --- /dev/null +++ b/hw/hyperv/syndbg.c @@ -0,0 +1,401 @@ +/* + * QEMU Hyper-V Synthetic Debugging device + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/ctype.h" +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/loader.h" +#include "cpu.h" +#include "hw/hyperv/hyperv.h" +#include "hw/hyperv/vmbus-bridge.h" +#include "hw/hyperv/hyperv-proto.h" +#include "net/net.h" +#include "net/eth.h" +#include "net/checksum.h" +#include "trace.h" + +#define TYPE_HV_SYNDBG "hv-syndbg" + +typedef struct HvSynDbg { + DeviceState parent_obj; + + char *host_ip; + uint16_t host_port; + bool use_hcalls; + + uint32_t target_ip; + struct sockaddr_in servaddr; + int socket; + bool has_data_pending; + uint64_t pending_page_gpa; +} HvSynDbg; + +#define HVSYNDBG(obj) OBJECT_CHECK(HvSynDbg, (obj), TYPE_HV_SYNDBG) + +/* returns NULL unless there is exactly one HV Synth debug device */ +static HvSynDbg *hv_syndbg_find(void) +{ + /* Returns NULL unless there is exactly one hvsd device */ + return HVSYNDBG(object_resolve_path_type("", TYPE_HV_SYNDBG, NULL)); +} + +static void set_pending_state(HvSynDbg *syndbg, bool has_pending) +{ + hwaddr out_len; + void *out_data; + + syndbg->has_data_pending = has_pending; + + if (!syndbg->pending_page_gpa) { + return; + } + + out_len = 1; + out_data = cpu_physical_memory_map(syndbg->pending_page_gpa, &out_len, 1); + if (out_data) { + *(uint8_t *)out_data = !!has_pending; + cpu_physical_memory_unmap(out_data, out_len, 1, out_len); + } +} + +static bool get_udb_pkt_data(void *p, uint32_t len, uint32_t *data_ofs, + uint32_t *src_ip) +{ + uint32_t offset, curr_len = len; + + if (curr_len < sizeof(struct eth_header) || + (be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto) != ETH_P_IP)) { + return false; + } + offset = sizeof(struct eth_header); + curr_len -= sizeof(struct eth_header); + + if (curr_len < sizeof(struct ip_header) || + PKT_GET_IP_HDR(p)->ip_p != IP_PROTO_UDP) { + return false; + } + offset += PKT_GET_IP_HDR_LEN(p); + curr_len -= PKT_GET_IP_HDR_LEN(p); + + if (curr_len < sizeof(struct udp_header)) { + return false; + } + + offset += sizeof(struct udp_header); + *data_ofs = offset; + *src_ip = PKT_GET_IP_HDR(p)->ip_src; + return true; +} + +static uint16_t handle_send_msg(HvSynDbg *syndbg, uint64_t ingpa, + uint32_t count, bool is_raw, + uint32_t *pending_count) +{ + uint16_t ret; + hwaddr data_len; + void *debug_data = NULL; + uint32_t udp_data_ofs = 0; + const void *pkt_data; + int sent_count; + + data_len = count; + debug_data = cpu_physical_memory_map(ingpa, &data_len, 0); + if (!debug_data || data_len < count) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (is_raw && + !get_udb_pkt_data(debug_data, count, &udp_data_ofs, + &syndbg->target_ip)) { + ret = HV_STATUS_SUCCESS; + goto cleanup; + } + + pkt_data = (const void *)((uintptr_t)debug_data + udp_data_ofs); + sent_count = sendto(syndbg->socket, pkt_data, count - udp_data_ofs, + MSG_NOSIGNAL, NULL, 0); + if (sent_count == -1) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + *pending_count = count - (sent_count + udp_data_ofs); + ret = HV_STATUS_SUCCESS; +cleanup: + if (debug_data) { + cpu_physical_memory_unmap(debug_data, count, 0, data_len); + } + + return ret; +} + +#define UDP_PKT_HEADER_SIZE \ + (sizeof(struct eth_header) + sizeof(struct ip_header) +\ + sizeof(struct udp_header)) + +static bool create_udp_pkt(HvSynDbg *syndbg, void *pkt, uint32_t pkt_len, + void *udp_data, uint32_t udp_data_len) +{ + struct udp_header *udp_part; + + if (pkt_len < (UDP_PKT_HEADER_SIZE + udp_data_len)) { + return false; + } + + /* Setup the eth */ + memset(&PKT_GET_ETH_HDR(pkt)->h_source, 0, ETH_ALEN); + memset(&PKT_GET_ETH_HDR(pkt)->h_dest, 0, ETH_ALEN); + PKT_GET_ETH_HDR(pkt)->h_proto = cpu_to_be16(ETH_P_IP); + + /* Setup the ip */ + PKT_GET_IP_HDR(pkt)->ip_ver_len = + (4 << 4) | (sizeof(struct ip_header) >> 2); + PKT_GET_IP_HDR(pkt)->ip_tos = 0; + PKT_GET_IP_HDR(pkt)->ip_id = 0; + PKT_GET_IP_HDR(pkt)->ip_off = 0; + PKT_GET_IP_HDR(pkt)->ip_ttl = 64; /* IPDEFTTL */ + PKT_GET_IP_HDR(pkt)->ip_p = IP_PROTO_UDP; + PKT_GET_IP_HDR(pkt)->ip_src = syndbg->servaddr.sin_addr.s_addr; + PKT_GET_IP_HDR(pkt)->ip_dst = syndbg->target_ip; + PKT_GET_IP_HDR(pkt)->ip_len = + cpu_to_be16(sizeof(struct ip_header) + sizeof(struct udp_header) + + udp_data_len); + eth_fix_ip4_checksum(PKT_GET_IP_HDR(pkt), PKT_GET_IP_HDR_LEN(pkt)); + + udp_part = (struct udp_header *)((uintptr_t)pkt + + sizeof(struct eth_header) + + PKT_GET_IP_HDR_LEN(pkt)); + udp_part->uh_sport = syndbg->servaddr.sin_port; + udp_part->uh_dport = syndbg->servaddr.sin_port; + udp_part->uh_ulen = cpu_to_be16(sizeof(struct udp_header) + udp_data_len); + memcpy(udp_part + 1, udp_data, udp_data_len); + net_checksum_calculate(pkt, UDP_PKT_HEADER_SIZE + udp_data_len, CSUM_ALL); + return true; +} + +static uint16_t handle_recv_msg(HvSynDbg *syndbg, uint64_t outgpa, + uint32_t count, bool is_raw, uint32_t options, + uint64_t timeout, uint32_t *retrieved_count) +{ + uint16_t ret; + uint8_t data_buf[TARGET_PAGE_SIZE - UDP_PKT_HEADER_SIZE]; + hwaddr out_len; + void *out_data; + ssize_t recv_byte_count; + + /* TODO: Handle options and timeout */ + (void)options; + (void)timeout; + + if (!syndbg->has_data_pending) { + recv_byte_count = 0; + } else { + recv_byte_count = recv(syndbg->socket, data_buf, + MIN(sizeof(data_buf), count), MSG_WAITALL); + if (recv_byte_count == -1) { + return HV_STATUS_INVALID_PARAMETER; + } + } + + if (!recv_byte_count) { + *retrieved_count = 0; + return HV_STATUS_NO_DATA; + } + + set_pending_state(syndbg, false); + + out_len = recv_byte_count; + if (is_raw) { + out_len += UDP_PKT_HEADER_SIZE; + } + out_data = cpu_physical_memory_map(outgpa, &out_len, 1); + if (!out_data) { + return HV_STATUS_INSUFFICIENT_MEMORY; + } + + if (is_raw && + !create_udp_pkt(syndbg, out_data, + recv_byte_count + UDP_PKT_HEADER_SIZE, + data_buf, recv_byte_count)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup_out_data; + } else if (!is_raw) { + memcpy(out_data, data_buf, recv_byte_count); + } + + *retrieved_count = recv_byte_count; + if (is_raw) { + *retrieved_count += UDP_PKT_HEADER_SIZE; + } + ret = HV_STATUS_SUCCESS; + +cleanup_out_data: + cpu_physical_memory_unmap(out_data, out_len, 1, out_len); + return ret; +} + +static uint16_t hv_syndbg_handler(void *context, HvSynDbgMsg *msg) +{ + HvSynDbg *syndbg = context; + uint16_t ret = HV_STATUS_INVALID_HYPERCALL_CODE; + + switch (msg->type) { + case HV_SYNDBG_MSG_CONNECTION_INFO: + msg->u.connection_info.host_ip = + ntohl(syndbg->servaddr.sin_addr.s_addr); + msg->u.connection_info.host_port = + ntohs(syndbg->servaddr.sin_port); + ret = HV_STATUS_SUCCESS; + break; + case HV_SYNDBG_MSG_SEND: + ret = handle_send_msg(syndbg, msg->u.send.buf_gpa, msg->u.send.count, + msg->u.send.is_raw, &msg->u.send.pending_count); + break; + case HV_SYNDBG_MSG_RECV: + ret = handle_recv_msg(syndbg, msg->u.recv.buf_gpa, msg->u.recv.count, + msg->u.recv.is_raw, msg->u.recv.options, + msg->u.recv.timeout, + &msg->u.recv.retrieved_count); + break; + case HV_SYNDBG_MSG_SET_PENDING_PAGE: + syndbg->pending_page_gpa = msg->u.pending_page.buf_gpa; + ret = HV_STATUS_SUCCESS; + break; + case HV_SYNDBG_MSG_QUERY_OPTIONS: + msg->u.query_options.options = 0; + if (syndbg->use_hcalls) { + msg->u.query_options.options = HV_X64_SYNDBG_OPTION_USE_HCALLS; + } + ret = HV_STATUS_SUCCESS; + break; + default: + break; + } + + return ret; +} + +static void hv_syndbg_recv_event(void *opaque) +{ + HvSynDbg *syndbg = opaque; + struct timeval tv; + fd_set rfds; + + tv.tv_sec = 0; + tv.tv_usec = 0; + FD_ZERO(&rfds); + FD_SET(syndbg->socket, &rfds); + if (select(syndbg->socket + 1, &rfds, NULL, NULL, &tv) > 0) { + set_pending_state(syndbg, true); + } +} + +static void hv_syndbg_realize(DeviceState *dev, Error **errp) +{ + HvSynDbg *syndbg = HVSYNDBG(dev); + + if (!hv_syndbg_find()) { + error_setg(errp, "at most one %s device is permitted", TYPE_HV_SYNDBG); + return; + } + + if (!vmbus_bridge_find()) { + error_setg(errp, "%s device requires vmbus-bridge device", + TYPE_HV_SYNDBG); + return; + } + + /* Parse and host_ip */ + if (qemu_isdigit(syndbg->host_ip[0])) { + syndbg->servaddr.sin_addr.s_addr = inet_addr(syndbg->host_ip); + } else { + struct hostent *he = gethostbyname(syndbg->host_ip); + if (!he) { + error_setg(errp, "%s failed to resolve host name %s", + TYPE_HV_SYNDBG, syndbg->host_ip); + return; + } + syndbg->servaddr.sin_addr = *(struct in_addr *)he->h_addr; + } + + syndbg->socket = socket(AF_INET, SOCK_DGRAM, 0); + if (syndbg->socket < 0) { + error_setg(errp, "%s failed to create socket", TYPE_HV_SYNDBG); + return; + } + + qemu_socket_set_nonblock(syndbg->socket); + + syndbg->servaddr.sin_port = htons(syndbg->host_port); + syndbg->servaddr.sin_family = AF_INET; + if (connect(syndbg->socket, (struct sockaddr *)&syndbg->servaddr, + sizeof(syndbg->servaddr)) < 0) { + closesocket(syndbg->socket); + error_setg(errp, "%s failed to connect to socket", TYPE_HV_SYNDBG); + return; + } + + syndbg->pending_page_gpa = 0; + syndbg->has_data_pending = false; + hyperv_set_syndbg_handler(hv_syndbg_handler, syndbg); + qemu_set_fd_handler(syndbg->socket, hv_syndbg_recv_event, NULL, syndbg); +} + +static void hv_syndbg_unrealize(DeviceState *dev) +{ + HvSynDbg *syndbg = HVSYNDBG(dev); + + if (syndbg->socket > 0) { + qemu_set_fd_handler(syndbg->socket, NULL, NULL, NULL); + closesocket(syndbg->socket); + } +} + +static const VMStateDescription vmstate_hv_syndbg = { + .name = TYPE_HV_SYNDBG, + .unmigratable = 1, +}; + +static Property hv_syndbg_properties[] = { + DEFINE_PROP_STRING("host_ip", HvSynDbg, host_ip), + DEFINE_PROP_UINT16("host_port", HvSynDbg, host_port, 50000), + DEFINE_PROP_BOOL("use_hcalls", HvSynDbg, use_hcalls, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void hv_syndbg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, hv_syndbg_properties); + dc->fw_name = TYPE_HV_SYNDBG; + dc->vmsd = &vmstate_hv_syndbg; + dc->realize = hv_syndbg_realize; + dc->unrealize = hv_syndbg_unrealize; + dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo hv_syndbg_type_info = { + .name = TYPE_HV_SYNDBG, + .parent = TYPE_DEVICE, + .instance_size = sizeof(HvSynDbg), + .class_init = hv_syndbg_class_init, +}; + +static void hv_syndbg_register_types(void) +{ + type_register_static(&hv_syndbg_type_info); +} + +type_init(hv_syndbg_register_types) diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index c9887d5a7b..30bc04e1c4 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -373,7 +373,8 @@ static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len) maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page; - iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir); + iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir, + MEMTXATTRS_UNSPECIFIED); if (mlen != pgleft) { dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0); iter->map = NULL; @@ -490,7 +491,8 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, goto err; } - iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir); + iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir, + MEMTXATTRS_UNSPECIFIED); if (!l) { ret = -EFAULT; goto err; @@ -566,7 +568,7 @@ static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf) dma_addr_t mlen = sizeof(*rb); rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED); if (mlen != sizeof(*rb)) { dma_memory_unmap(ringbuf->as, rb, mlen, DMA_DIRECTION_FROM_DEVICE, 0); @@ -1271,105 +1273,6 @@ void vmbus_free_req(void *req) g_free(req); } -static const VMStateDescription vmstate_sgent = { - .name = "vmbus/sgentry", - .version_id = 0, - .minimum_version_id = 0, - .fields = (VMStateField[]) { - VMSTATE_UINT64(base, ScatterGatherEntry), - VMSTATE_UINT64(len, ScatterGatherEntry), - VMSTATE_END_OF_LIST() - } -}; - -typedef struct VMBusChanReqSave { - uint16_t chan_idx; - uint16_t pkt_type; - uint32_t msglen; - void *msg; - uint64_t transaction_id; - bool need_comp; - uint32_t num; - ScatterGatherEntry *sgl; -} VMBusChanReqSave; - -static const VMStateDescription vmstate_vmbus_chan_req = { - .name = "vmbus/vmbus_chan_req", - .version_id = 0, - .minimum_version_id = 0, - .fields = (VMStateField[]) { - VMSTATE_UINT16(chan_idx, VMBusChanReqSave), - VMSTATE_UINT16(pkt_type, VMBusChanReqSave), - VMSTATE_UINT32(msglen, VMBusChanReqSave), - VMSTATE_VBUFFER_ALLOC_UINT32(msg, VMBusChanReqSave, 0, NULL, msglen), - VMSTATE_UINT64(transaction_id, VMBusChanReqSave), - VMSTATE_BOOL(need_comp, VMBusChanReqSave), - VMSTATE_UINT32(num, VMBusChanReqSave), - VMSTATE_STRUCT_VARRAY_POINTER_UINT32(sgl, VMBusChanReqSave, num, - vmstate_sgent, ScatterGatherEntry), - VMSTATE_END_OF_LIST() - } -}; - -void vmbus_save_req(QEMUFile *f, VMBusChanReq *req) -{ - VMBusChanReqSave req_save; - - req_save.chan_idx = req->chan->subchan_idx; - req_save.pkt_type = req->pkt_type; - req_save.msglen = req->msglen; - req_save.msg = req->msg; - req_save.transaction_id = req->transaction_id; - req_save.need_comp = req->need_comp; - req_save.num = req->sgl.nsg; - req_save.sgl = g_memdup(req->sgl.sg, - req_save.num * sizeof(ScatterGatherEntry)); - - vmstate_save_state(f, &vmstate_vmbus_chan_req, &req_save, NULL); - - g_free(req_save.sgl); -} - -void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size) -{ - VMBusChanReqSave req_save; - VMBusChanReq *req = NULL; - VMBusChannel *chan = NULL; - uint32_t i; - - vmstate_load_state(f, &vmstate_vmbus_chan_req, &req_save, 0); - - if (req_save.chan_idx >= dev->num_channels) { - error_report("%s: %u(chan_idx) > %u(num_channels)", __func__, - req_save.chan_idx, dev->num_channels); - goto out; - } - chan = &dev->channels[req_save.chan_idx]; - - if (vmbus_channel_reserve(chan, 0, req_save.msglen)) { - goto out; - } - - req = vmbus_alloc_req(chan, size, req_save.pkt_type, req_save.msglen, - req_save.transaction_id, req_save.need_comp); - if (req_save.msglen) { - memcpy(req->msg, req_save.msg, req_save.msglen); - } - - for (i = 0; i < req_save.num; i++) { - qemu_sglist_add(&req->sgl, req_save.sgl[i].base, req_save.sgl[i].len); - } - -out: - if (req_save.msglen) { - g_free(req_save.msg); - } - if (req_save.num) { - g_free(req_save.sgl); - } - return req; -} - static void channel_event_cb(EventNotifier *e) { VMBusChannel *chan = container_of(e, VMBusChannel, notifier); @@ -2729,7 +2632,7 @@ static void vmbus_bridge_realize(DeviceState *dev, Error **errp) return; } - bridge->bus = VMBUS(qbus_create(TYPE_VMBUS, dev, "vmbus")); + bridge->bus = VMBUS(qbus_new(TYPE_VMBUS, dev, "vmbus")); } static char *vmbus_bridge_ofw_unit_address(const SysBusDevice *dev) diff --git a/hw/i2c/Kconfig b/hw/i2c/Kconfig index 8217cb5041..9bb8870517 100644 --- a/hw/i2c/Kconfig +++ b/hw/i2c/Kconfig @@ -1,6 +1,11 @@ config I2C bool +config I2C_DEVICES + # Device group for i2c devices which can reasonably be user-plugged + # to any board's i2c bus + bool + config SMBUS bool select I2C diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index 8d276d9ed3..c166fd20fa 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "migration/vmstate.h" +#include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/module.h" #include "qemu/error-report.h" @@ -28,196 +29,98 @@ #include "hw/i2c/aspeed_i2c.h" #include "hw/irq.h" #include "hw/qdev-properties.h" +#include "hw/registerfields.h" #include "trace.h" -/* I2C Global Register */ - -#define I2C_CTRL_STATUS 0x00 /* Device Interrupt Status */ -#define I2C_CTRL_ASSIGN 0x08 /* Device Interrupt Target - Assignment */ -#define I2C_CTRL_GLOBAL 0x0C /* Global Control Register */ -#define I2C_CTRL_SRAM_EN BIT(0) - -/* I2C Device (Bus) Register */ - -#define I2CD_FUN_CTRL_REG 0x00 /* I2CD Function Control */ -#define I2CD_POOL_PAGE_SEL(x) (((x) >> 20) & 0x7) /* AST2400 */ -#define I2CD_M_SDA_LOCK_EN (0x1 << 16) -#define I2CD_MULTI_MASTER_DIS (0x1 << 15) -#define I2CD_M_SCL_DRIVE_EN (0x1 << 14) -#define I2CD_MSB_STS (0x1 << 9) -#define I2CD_SDA_DRIVE_1T_EN (0x1 << 8) -#define I2CD_M_SDA_DRIVE_1T_EN (0x1 << 7) -#define I2CD_M_HIGH_SPEED_EN (0x1 << 6) -#define I2CD_DEF_ADDR_EN (0x1 << 5) -#define I2CD_DEF_ALERT_EN (0x1 << 4) -#define I2CD_DEF_ARP_EN (0x1 << 3) -#define I2CD_DEF_GCALL_EN (0x1 << 2) -#define I2CD_SLAVE_EN (0x1 << 1) -#define I2CD_MASTER_EN (0x1) - -#define I2CD_AC_TIMING_REG1 0x04 /* Clock and AC Timing Control #1 */ -#define I2CD_AC_TIMING_REG2 0x08 /* Clock and AC Timing Control #1 */ -#define I2CD_INTR_CTRL_REG 0x0c /* I2CD Interrupt Control */ -#define I2CD_INTR_STS_REG 0x10 /* I2CD Interrupt Status */ - -#define I2CD_INTR_SLAVE_ADDR_MATCH (0x1 << 31) /* 0: addr1 1: addr2 */ -#define I2CD_INTR_SLAVE_ADDR_RX_PENDING (0x1 << 30) -/* bits[19-16] Reserved */ - -/* All bits below are cleared by writing 1 */ -#define I2CD_INTR_SLAVE_INACTIVE_TIMEOUT (0x1 << 15) -#define I2CD_INTR_SDA_DL_TIMEOUT (0x1 << 14) -#define I2CD_INTR_BUS_RECOVER_DONE (0x1 << 13) -#define I2CD_INTR_SMBUS_ALERT (0x1 << 12) /* Bus [0-3] only */ -#define I2CD_INTR_SMBUS_ARP_ADDR (0x1 << 11) /* Removed */ -#define I2CD_INTR_SMBUS_DEV_ALERT_ADDR (0x1 << 10) /* Removed */ -#define I2CD_INTR_SMBUS_DEF_ADDR (0x1 << 9) /* Removed */ -#define I2CD_INTR_GCALL_ADDR (0x1 << 8) /* Removed */ -#define I2CD_INTR_SLAVE_ADDR_RX_MATCH (0x1 << 7) /* use RX_DONE */ -#define I2CD_INTR_SCL_TIMEOUT (0x1 << 6) -#define I2CD_INTR_ABNORMAL (0x1 << 5) -#define I2CD_INTR_NORMAL_STOP (0x1 << 4) -#define I2CD_INTR_ARBIT_LOSS (0x1 << 3) -#define I2CD_INTR_RX_DONE (0x1 << 2) -#define I2CD_INTR_TX_NAK (0x1 << 1) -#define I2CD_INTR_TX_ACK (0x1 << 0) - -#define I2CD_CMD_REG 0x14 /* I2CD Command/Status */ -#define I2CD_SDA_OE (0x1 << 28) -#define I2CD_SDA_O (0x1 << 27) -#define I2CD_SCL_OE (0x1 << 26) -#define I2CD_SCL_O (0x1 << 25) -#define I2CD_TX_TIMING (0x1 << 24) -#define I2CD_TX_STATUS (0x1 << 23) - -#define I2CD_TX_STATE_SHIFT 19 /* Tx State Machine */ -#define I2CD_TX_STATE_MASK 0xf -#define I2CD_IDLE 0x0 -#define I2CD_MACTIVE 0x8 -#define I2CD_MSTART 0x9 -#define I2CD_MSTARTR 0xa -#define I2CD_MSTOP 0xb -#define I2CD_MTXD 0xc -#define I2CD_MRXACK 0xd -#define I2CD_MRXD 0xe -#define I2CD_MTXACK 0xf -#define I2CD_SWAIT 0x1 -#define I2CD_SRXD 0x4 -#define I2CD_STXACK 0x5 -#define I2CD_STXD 0x6 -#define I2CD_SRXACK 0x7 -#define I2CD_RECOVER 0x3 - -#define I2CD_SCL_LINE_STS (0x1 << 18) -#define I2CD_SDA_LINE_STS (0x1 << 17) -#define I2CD_BUS_BUSY_STS (0x1 << 16) -#define I2CD_SDA_OE_OUT_DIR (0x1 << 15) -#define I2CD_SDA_O_OUT_DIR (0x1 << 14) -#define I2CD_SCL_OE_OUT_DIR (0x1 << 13) -#define I2CD_SCL_O_OUT_DIR (0x1 << 12) -#define I2CD_BUS_RECOVER_CMD_EN (0x1 << 11) -#define I2CD_S_ALT_EN (0x1 << 10) - -/* Command Bit */ -#define I2CD_RX_DMA_ENABLE (0x1 << 9) -#define I2CD_TX_DMA_ENABLE (0x1 << 8) -#define I2CD_RX_BUFF_ENABLE (0x1 << 7) -#define I2CD_TX_BUFF_ENABLE (0x1 << 6) -#define I2CD_M_STOP_CMD (0x1 << 5) -#define I2CD_M_S_RX_CMD_LAST (0x1 << 4) -#define I2CD_M_RX_CMD (0x1 << 3) -#define I2CD_S_TX_CMD (0x1 << 2) -#define I2CD_M_TX_CMD (0x1 << 1) -#define I2CD_M_START_CMD (0x1) - -#define I2CD_DEV_ADDR_REG 0x18 /* Slave Device Address */ -#define I2CD_POOL_CTRL_REG 0x1c /* Pool Buffer Control */ -#define I2CD_POOL_RX_COUNT(x) (((x) >> 24) & 0xff) -#define I2CD_POOL_RX_SIZE(x) ((((x) >> 16) & 0xff) + 1) -#define I2CD_POOL_TX_COUNT(x) ((((x) >> 8) & 0xff) + 1) -#define I2CD_POOL_OFFSET(x) (((x) & 0x3f) << 2) /* AST2400 */ -#define I2CD_BYTE_BUF_REG 0x20 /* Transmit/Receive Byte Buffer */ -#define I2CD_BYTE_BUF_TX_SHIFT 0 -#define I2CD_BYTE_BUF_TX_MASK 0xff -#define I2CD_BYTE_BUF_RX_SHIFT 8 -#define I2CD_BYTE_BUF_RX_MASK 0xff -#define I2CD_DMA_ADDR 0x24 /* DMA Buffer Address */ -#define I2CD_DMA_LEN 0x28 /* DMA Transfer Length < 4KB */ - -static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus) -{ - return bus->ctrl & I2CD_MASTER_EN; -} - -static inline bool aspeed_i2c_bus_is_enabled(AspeedI2CBus *bus) -{ - return bus->ctrl & (I2CD_MASTER_EN | I2CD_SLAVE_EN); -} +/* Enable SLAVE_ADDR_RX_MATCH always */ +#define R_I2CD_INTR_STS_ALWAYS_ENABLE R_I2CD_INTR_STS_SLAVE_ADDR_RX_MATCH_MASK static inline void aspeed_i2c_bus_raise_interrupt(AspeedI2CBus *bus) { AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + uint32_t intr_ctrl_reg = aspeed_i2c_bus_intr_ctrl_offset(bus); + uint32_t intr_ctrl_mask = bus->regs[intr_ctrl_reg] | + R_I2CD_INTR_STS_ALWAYS_ENABLE; + bool raise_irq; - trace_aspeed_i2c_bus_raise_interrupt(bus->intr_status, - bus->intr_status & I2CD_INTR_TX_NAK ? "nak|" : "", - bus->intr_status & I2CD_INTR_TX_ACK ? "ack|" : "", - bus->intr_status & I2CD_INTR_RX_DONE ? "done|" : "", - bus->intr_status & I2CD_INTR_NORMAL_STOP ? "normal|" : "", - bus->intr_status & I2CD_INTR_ABNORMAL ? "abnormal" : ""); + if (trace_event_get_state_backends(TRACE_ASPEED_I2C_BUS_RAISE_INTERRUPT)) { + g_autofree char *buf = g_strdup_printf("%s%s%s%s%s%s%s", + aspeed_i2c_bus_pkt_mode_en(bus) && + ARRAY_FIELD_EX32(bus->regs, I2CM_INTR_STS, PKT_CMD_DONE) ? + "pktdone|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, TX_NAK) ? + "nak|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, TX_ACK) ? + "ack|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, RX_DONE) ? + "done|" : "", + ARRAY_FIELD_EX32(bus->regs, I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH) ? + "slave-match|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, NORMAL_STOP) ? + "stop|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, ABNORMAL) ? + "abnormal" : ""); - bus->intr_status &= bus->intr_ctrl; - if (bus->intr_status) { + trace_aspeed_i2c_bus_raise_interrupt(bus->regs[reg_intr_sts], buf); + } + + raise_irq = bus->regs[reg_intr_sts] & intr_ctrl_mask ; + + /* In packet mode we don't mask off INTR_STS */ + if (!aspeed_i2c_bus_pkt_mode_en(bus)) { + bus->regs[reg_intr_sts] &= intr_ctrl_mask; + } + + if (raise_irq) { bus->controller->intr_status |= 1 << bus->id; qemu_irq_raise(aic->bus_get_irq(bus)); } } -static uint64_t aspeed_i2c_bus_read(void *opaque, hwaddr offset, - unsigned size) +static inline void aspeed_i2c_bus_raise_slave_interrupt(AspeedI2CBus *bus) { - AspeedI2CBus *bus = opaque; AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); - uint64_t value = -1; + + if (!bus->regs[R_I2CS_INTR_STS]) { + return; + } + + bus->controller->intr_status |= 1 << bus->id; + qemu_irq_raise(aic->bus_get_irq(bus)); +} + +static uint64_t aspeed_i2c_bus_old_read(AspeedI2CBus *bus, hwaddr offset, + unsigned size) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + uint64_t value = bus->regs[offset / sizeof(*bus->regs)]; switch (offset) { - case I2CD_FUN_CTRL_REG: - value = bus->ctrl; + case A_I2CD_FUN_CTRL: + case A_I2CD_AC_TIMING1: + case A_I2CD_AC_TIMING2: + case A_I2CD_INTR_CTRL: + case A_I2CD_INTR_STS: + case A_I2CD_DEV_ADDR: + case A_I2CD_POOL_CTRL: + case A_I2CD_BYTE_BUF: + /* Value is already set, don't do anything. */ break; - case I2CD_AC_TIMING_REG1: - value = bus->timing[0]; + case A_I2CD_CMD: + value = SHARED_FIELD_DP32(value, BUS_BUSY_STS, i2c_bus_busy(bus->bus)); break; - case I2CD_AC_TIMING_REG2: - value = bus->timing[1]; - break; - case I2CD_INTR_CTRL_REG: - value = bus->intr_ctrl; - break; - case I2CD_INTR_STS_REG: - value = bus->intr_status; - break; - case I2CD_POOL_CTRL_REG: - value = bus->pool_ctrl; - break; - case I2CD_BYTE_BUF_REG: - value = bus->buf; - break; - case I2CD_CMD_REG: - value = bus->cmd | (i2c_bus_busy(bus->bus) << 16); - break; - case I2CD_DMA_ADDR: + case A_I2CD_DMA_ADDR: if (!aic->has_dma) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); - break; + value = -1; } - value = bus->dma_addr; break; - case I2CD_DMA_LEN: + case A_I2CD_DMA_LEN: if (!aic->has_dma) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); - break; + value = -1; } - value = bus->dma_len; break; default: @@ -231,32 +134,95 @@ static uint64_t aspeed_i2c_bus_read(void *opaque, hwaddr offset, return value; } +static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset, + unsigned size) +{ + uint64_t value = bus->regs[offset / sizeof(*bus->regs)]; + + switch (offset) { + case A_I2CC_FUN_CTRL: + case A_I2CC_AC_TIMING: + case A_I2CC_POOL_CTRL: + case A_I2CM_INTR_CTRL: + case A_I2CM_INTR_STS: + case A_I2CC_MS_TXRX_BYTE_BUF: + case A_I2CM_DMA_LEN: + case A_I2CM_DMA_TX_ADDR: + case A_I2CM_DMA_RX_ADDR: + case A_I2CM_DMA_LEN_STS: + case A_I2CC_DMA_ADDR: + case A_I2CC_DMA_LEN: + + case A_I2CS_DEV_ADDR: + case A_I2CS_DMA_RX_ADDR: + case A_I2CS_DMA_LEN: + case A_I2CS_CMD: + case A_I2CS_INTR_CTRL: + case A_I2CS_DMA_LEN_STS: + /* Value is already set, don't do anything. */ + break; + case A_I2CS_INTR_STS: + break; + case A_I2CM_CMD: + value = SHARED_FIELD_DP32(value, BUS_BUSY_STS, i2c_bus_busy(bus->bus)); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); + value = -1; + break; + } + + trace_aspeed_i2c_bus_read(bus->id, offset, size, value); + return value; +} + +static uint64_t aspeed_i2c_bus_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI2CBus *bus = opaque; + if (aspeed_i2c_is_new_mode(bus->controller)) { + return aspeed_i2c_bus_new_read(bus, offset, size); + } + return aspeed_i2c_bus_old_read(bus, offset, size); +} + static void aspeed_i2c_set_state(AspeedI2CBus *bus, uint8_t state) { - bus->cmd &= ~(I2CD_TX_STATE_MASK << I2CD_TX_STATE_SHIFT); - bus->cmd |= (state & I2CD_TX_STATE_MASK) << I2CD_TX_STATE_SHIFT; + if (aspeed_i2c_is_new_mode(bus->controller)) { + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CC_MS_TXRX_BYTE_BUF, TX_STATE, + state); + } else { + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CD_CMD, TX_STATE, state); + } } static uint8_t aspeed_i2c_get_state(AspeedI2CBus *bus) { - return (bus->cmd >> I2CD_TX_STATE_SHIFT) & I2CD_TX_STATE_MASK; + if (aspeed_i2c_is_new_mode(bus->controller)) { + return SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CC_MS_TXRX_BYTE_BUF, + TX_STATE); + } + return SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CD_CMD, TX_STATE); } static int aspeed_i2c_dma_read(AspeedI2CBus *bus, uint8_t *data) { MemTxResult result; AspeedI2CState *s = bus->controller; + uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus); + uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); - result = address_space_read(&s->dram_as, bus->dma_addr, + result = address_space_read(&s->dram_as, bus->regs[reg_dma_addr], MEMTXATTRS_UNSPECIFIED, data, 1); if (result != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", - __func__, bus->dma_addr); + __func__, bus->regs[reg_dma_addr]); return -1; } - bus->dma_addr++; - bus->dma_len--; + bus->regs[reg_dma_addr]++; + bus->regs[reg_dma_len]--; return 0; } @@ -265,34 +231,51 @@ static int aspeed_i2c_bus_send(AspeedI2CBus *bus, uint8_t pool_start) AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); int ret = -1; int i; + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus); + uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); + uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); + int pool_tx_count = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl, + TX_COUNT); - if (bus->cmd & I2CD_TX_BUFF_ENABLE) { - for (i = pool_start; i < I2CD_POOL_TX_COUNT(bus->pool_ctrl); i++) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_BUFF_EN)) { + for (i = pool_start; i < pool_tx_count; i++) { uint8_t *pool_base = aic->bus_pool_base(bus); - trace_aspeed_i2c_bus_send("BUF", i + 1, - I2CD_POOL_TX_COUNT(bus->pool_ctrl), + trace_aspeed_i2c_bus_send("BUF", i + 1, pool_tx_count, pool_base[i]); ret = i2c_send(bus->bus, pool_base[i]); if (ret) { break; } } - bus->cmd &= ~I2CD_TX_BUFF_ENABLE; - } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { - while (bus->dma_len) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, TX_BUFF_EN, 0); + } else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_DMA_EN)) { + /* In new mode, clear how many bytes we TXed */ + if (aspeed_i2c_is_new_mode(bus->controller)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, TX_LEN, 0); + } + while (bus->regs[reg_dma_len]) { uint8_t data; aspeed_i2c_dma_read(bus, &data); - trace_aspeed_i2c_bus_send("DMA", bus->dma_len, bus->dma_len, data); + trace_aspeed_i2c_bus_send("DMA", bus->regs[reg_dma_len], + bus->regs[reg_dma_len], data); ret = i2c_send(bus->bus, data); if (ret) { break; } + /* In new mode, keep track of how many bytes we TXed */ + if (aspeed_i2c_is_new_mode(bus->controller)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, TX_LEN, + ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN_STS, + TX_LEN) + 1); + } } - bus->cmd &= ~I2CD_TX_DMA_ENABLE; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, TX_DMA_EN, 0); } else { - trace_aspeed_i2c_bus_send("BYTE", pool_start, 1, bus->buf); - ret = i2c_send(bus->bus, bus->buf); + trace_aspeed_i2c_bus_send("BYTE", pool_start, 1, + bus->regs[reg_byte_buf]); + ret = i2c_send(bus->bus, bus->regs[reg_byte_buf]); } return ret; @@ -304,74 +287,100 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus) AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); uint8_t data; int i; + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus); + uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); + uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); + uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus); + int pool_rx_count = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl, + RX_COUNT); - if (bus->cmd & I2CD_RX_BUFF_ENABLE) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_BUFF_EN)) { uint8_t *pool_base = aic->bus_pool_base(bus); - for (i = 0; i < I2CD_POOL_RX_SIZE(bus->pool_ctrl); i++) { + for (i = 0; i < pool_rx_count; i++) { pool_base[i] = i2c_recv(bus->bus); - trace_aspeed_i2c_bus_recv("BUF", i + 1, - I2CD_POOL_RX_SIZE(bus->pool_ctrl), + trace_aspeed_i2c_bus_recv("BUF", i + 1, pool_rx_count, pool_base[i]); } /* Update RX count */ - bus->pool_ctrl &= ~(0xff << 24); - bus->pool_ctrl |= (i & 0xff) << 24; - bus->cmd &= ~I2CD_RX_BUFF_ENABLE; - } else if (bus->cmd & I2CD_RX_DMA_ENABLE) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_pool_ctrl, RX_COUNT, i & 0xff); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, RX_BUFF_EN, 0); + } else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN)) { uint8_t data; + /* In new mode, clear how many bytes we RXed */ + if (aspeed_i2c_is_new_mode(bus->controller)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, RX_LEN, 0); + } - while (bus->dma_len) { + while (bus->regs[reg_dma_len]) { MemTxResult result; data = i2c_recv(bus->bus); - trace_aspeed_i2c_bus_recv("DMA", bus->dma_len, bus->dma_len, data); - result = address_space_write(&s->dram_as, bus->dma_addr, + trace_aspeed_i2c_bus_recv("DMA", bus->regs[reg_dma_len], + bus->regs[reg_dma_len], data); + result = address_space_write(&s->dram_as, bus->regs[reg_dma_addr], MEMTXATTRS_UNSPECIFIED, &data, 1); if (result != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", - __func__, bus->dma_addr); + __func__, bus->regs[reg_dma_addr]); return; } - bus->dma_addr++; - bus->dma_len--; + bus->regs[reg_dma_addr]++; + bus->regs[reg_dma_len]--; + /* In new mode, keep track of how many bytes we RXed */ + if (aspeed_i2c_is_new_mode(bus->controller)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, RX_LEN, + ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN_STS, + RX_LEN) + 1); + } } - bus->cmd &= ~I2CD_RX_DMA_ENABLE; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, RX_DMA_EN, 0); } else { data = i2c_recv(bus->bus); - trace_aspeed_i2c_bus_recv("BYTE", 1, 1, bus->buf); - bus->buf = (data & I2CD_BYTE_BUF_RX_MASK) << I2CD_BYTE_BUF_RX_SHIFT; + trace_aspeed_i2c_bus_recv("BYTE", 1, 1, bus->regs[reg_byte_buf]); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_byte_buf, RX_BUF, data); } } static void aspeed_i2c_handle_rx_cmd(AspeedI2CBus *bus) { + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + aspeed_i2c_set_state(bus, I2CD_MRXD); aspeed_i2c_bus_recv(bus); - bus->intr_status |= I2CD_INTR_RX_DONE; - if (bus->cmd & I2CD_M_S_RX_CMD_LAST) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, RX_DONE, 1); + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_S_RX_CMD_LAST)) { i2c_nack(bus->bus); } - bus->cmd &= ~(I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_RX_CMD, 0); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_S_RX_CMD_LAST, 0); aspeed_i2c_set_state(bus, I2CD_MACTIVE); } static uint8_t aspeed_i2c_get_addr(AspeedI2CBus *bus) { AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); - if (bus->cmd & I2CD_TX_BUFF_ENABLE) { + if (aspeed_i2c_bus_pkt_mode_en(bus)) { + return (ARRAY_FIELD_EX32(bus->regs, I2CM_CMD, PKT_DEV_ADDR) << 1) | + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_RX_CMD); + } + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_BUFF_EN)) { uint8_t *pool_base = aic->bus_pool_base(bus); return pool_base[0]; - } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { + } else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_DMA_EN)) { uint8_t data; aspeed_i2c_dma_read(bus, &data); return data; } else { - return bus->buf; + return bus->regs[reg_byte_buf]; } } @@ -379,7 +388,11 @@ static bool aspeed_i2c_check_sram(AspeedI2CBus *bus) { AspeedI2CState *s = bus->controller; AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); - + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + bool dma_en = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN) || + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_DMA_EN) || + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_BUFF_EN) || + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_BUFF_EN); if (!aic->check_sram) { return true; } @@ -388,9 +401,7 @@ static bool aspeed_i2c_check_sram(AspeedI2CBus *bus) * AST2500: SRAM must be enabled before using the Buffer Pool or * DMA mode. */ - if (!(s->ctrl_global & I2C_CTRL_SRAM_EN) && - (bus->cmd & (I2CD_RX_DMA_ENABLE | I2CD_TX_DMA_ENABLE | - I2CD_RX_BUFF_ENABLE | I2CD_TX_BUFF_ENABLE))) { + if (!FIELD_EX32(s->ctrl_global, I2C_CTRL_GLOBAL, SRAM_EN) && dma_en) { qemu_log_mask(LOG_GUEST_ERROR, "%s: SRAM is not enabled\n", __func__); return false; } @@ -402,27 +413,31 @@ static void aspeed_i2c_bus_cmd_dump(AspeedI2CBus *bus) { g_autofree char *cmd_flags = NULL; uint32_t count; - - if (bus->cmd & (I2CD_RX_BUFF_ENABLE | I2CD_RX_BUFF_ENABLE)) { - count = I2CD_POOL_TX_COUNT(bus->pool_ctrl); - } else if (bus->cmd & (I2CD_RX_DMA_ENABLE | I2CD_RX_DMA_ENABLE)) { - count = bus->dma_len; + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus); + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_BUFF_EN)) { + count = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl, TX_COUNT); + } else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN)) { + count = bus->regs[reg_dma_len]; } else { /* BYTE mode */ count = 1; } cmd_flags = g_strdup_printf("%s%s%s%s%s%s%s%s%s", - bus->cmd & I2CD_M_START_CMD ? "start|" : "", - bus->cmd & I2CD_RX_DMA_ENABLE ? "rxdma|" : "", - bus->cmd & I2CD_TX_DMA_ENABLE ? "txdma|" : "", - bus->cmd & I2CD_RX_BUFF_ENABLE ? "rxbuf|" : "", - bus->cmd & I2CD_TX_BUFF_ENABLE ? "txbuf|" : "", - bus->cmd & I2CD_M_TX_CMD ? "tx|" : "", - bus->cmd & I2CD_M_RX_CMD ? "rx|" : "", - bus->cmd & I2CD_M_S_RX_CMD_LAST ? "last|" : "", - bus->cmd & I2CD_M_STOP_CMD ? "stop" : ""); + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_START_CMD) ? "start|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN) ? "rxdma|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_DMA_EN) ? "txdma|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_BUFF_EN) ? "rxbuf|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_BUFF_EN) ? "txbuf|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_TX_CMD) ? "tx|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_RX_CMD) ? "rx|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_S_RX_CMD_LAST) ? "last|" : "", + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_STOP_CMD) ? "stop|" : ""); - trace_aspeed_i2c_bus_cmd(bus->cmd, cmd_flags, count, bus->intr_status); + trace_aspeed_i2c_bus_cmd(bus->regs[reg_cmd], cmd_flags, count, + bus->regs[reg_intr_sts]); } /* @@ -432,9 +447,10 @@ static void aspeed_i2c_bus_cmd_dump(AspeedI2CBus *bus) static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) { uint8_t pool_start = 0; - - bus->cmd &= ~0xFFFF; - bus->cmd |= value & 0xFFFF; + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + uint32_t reg_cmd = aspeed_i2c_bus_cmd_offset(bus); + uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus); + uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); if (!aspeed_i2c_check_sram(bus)) { return; @@ -444,7 +460,7 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) aspeed_i2c_bus_cmd_dump(bus); } - if (bus->cmd & I2CD_M_START_CMD) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_START_CMD)) { uint8_t state = aspeed_i2c_get_state(bus) & I2CD_MACTIVE ? I2CD_MSTARTR : I2CD_MSTART; uint8_t addr; @@ -452,24 +468,30 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) aspeed_i2c_set_state(bus, state); addr = aspeed_i2c_get_addr(bus); - if (i2c_start_transfer(bus->bus, extract32(addr, 1, 7), extract32(addr, 0, 1))) { - bus->intr_status |= I2CD_INTR_TX_NAK; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, TX_NAK, 1); + if (aspeed_i2c_bus_pkt_mode_en(bus)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_INTR_STS, PKT_CMD_FAIL, 1); + } } else { - bus->intr_status |= I2CD_INTR_TX_ACK; + /* START doesn't set TX_ACK in packet mode */ + if (!aspeed_i2c_bus_pkt_mode_en(bus)) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, TX_ACK, 1); + } } - bus->cmd &= ~I2CD_M_START_CMD; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_START_CMD, 0); /* * The START command is also a TX command, as the slave * address is sent on the bus. Drop the TX flag if nothing * else needs to be sent in this sequence. */ - if (bus->cmd & I2CD_TX_BUFF_ENABLE) { - if (I2CD_POOL_TX_COUNT(bus->pool_ctrl) == 1) { - bus->cmd &= ~I2CD_M_TX_CMD; + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_BUFF_EN)) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl, TX_COUNT) + == 1) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_TX_CMD, 0); } else { /* * Increase the start index in the TX pool buffer to @@ -477,141 +499,342 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) */ pool_start++; } - } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { - if (bus->dma_len == 0) { - bus->cmd &= ~I2CD_M_TX_CMD; + } else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, TX_DMA_EN)) { + if (bus->regs[reg_dma_len] == 0) { + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_TX_CMD, 0); } } else { - bus->cmd &= ~I2CD_M_TX_CMD; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_TX_CMD, 0); } /* No slave found */ if (!i2c_bus_busy(bus->bus)) { + if (aspeed_i2c_bus_pkt_mode_en(bus)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_INTR_STS, PKT_CMD_FAIL, 1); + ARRAY_FIELD_DP32(bus->regs, I2CM_INTR_STS, PKT_CMD_DONE, 1); + } return; } aspeed_i2c_set_state(bus, I2CD_MACTIVE); } - if (bus->cmd & I2CD_M_TX_CMD) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_TX_CMD)) { aspeed_i2c_set_state(bus, I2CD_MTXD); if (aspeed_i2c_bus_send(bus, pool_start)) { - bus->intr_status |= (I2CD_INTR_TX_NAK); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, TX_NAK, 1); i2c_end_transfer(bus->bus); } else { - bus->intr_status |= I2CD_INTR_TX_ACK; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, TX_ACK, 1); } - bus->cmd &= ~I2CD_M_TX_CMD; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_TX_CMD, 0); aspeed_i2c_set_state(bus, I2CD_MACTIVE); } - if ((bus->cmd & (I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST)) && - !(bus->intr_status & I2CD_INTR_RX_DONE)) { + if ((SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_RX_CMD) || + SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_S_RX_CMD_LAST)) && + !SHARED_ARRAY_FIELD_EX32(bus->regs, reg_intr_sts, RX_DONE)) { aspeed_i2c_handle_rx_cmd(bus); } - if (bus->cmd & I2CD_M_STOP_CMD) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, M_STOP_CMD)) { if (!(aspeed_i2c_get_state(bus) & I2CD_MACTIVE)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: abnormal stop\n", __func__); - bus->intr_status |= I2CD_INTR_ABNORMAL; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, ABNORMAL, 1); + if (aspeed_i2c_bus_pkt_mode_en(bus)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_INTR_STS, PKT_CMD_FAIL, 1); + } } else { aspeed_i2c_set_state(bus, I2CD_MSTOP); i2c_end_transfer(bus->bus); - bus->intr_status |= I2CD_INTR_NORMAL_STOP; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, NORMAL_STOP, 1); } - bus->cmd &= ~I2CD_M_STOP_CMD; + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_STOP_CMD, 0); aspeed_i2c_set_state(bus, I2CD_IDLE); } + + if (aspeed_i2c_bus_pkt_mode_en(bus)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_INTR_STS, PKT_CMD_DONE, 1); + } } -static void aspeed_i2c_bus_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) +static void aspeed_i2c_bus_new_write(AspeedI2CBus *bus, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + bool handle_rx; + bool w1t; + + trace_aspeed_i2c_bus_write(bus->id, offset, size, value); + + switch (offset) { + case A_I2CC_FUN_CTRL: + bus->regs[R_I2CC_FUN_CTRL] = value; + break; + case A_I2CC_AC_TIMING: + bus->regs[R_I2CC_AC_TIMING] = value & 0x1ffff0ff; + break; + case A_I2CC_MS_TXRX_BYTE_BUF: + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CC_MS_TXRX_BYTE_BUF, TX_BUF, + value); + break; + case A_I2CC_POOL_CTRL: + bus->regs[R_I2CC_POOL_CTRL] &= ~0xffffff; + bus->regs[R_I2CC_POOL_CTRL] |= (value & 0xffffff); + break; + case A_I2CM_INTR_CTRL: + bus->regs[R_I2CM_INTR_CTRL] = value & 0x0007f07f; + break; + case A_I2CM_INTR_STS: + handle_rx = SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CM_INTR_STS, RX_DONE) + && SHARED_FIELD_EX32(value, RX_DONE); + + /* In packet mode, clearing PKT_CMD_DONE clears other interrupts. */ + if (aspeed_i2c_bus_pkt_mode_en(bus) && + FIELD_EX32(value, I2CM_INTR_STS, PKT_CMD_DONE)) { + bus->regs[R_I2CM_INTR_STS] &= 0xf0001000; + if (!bus->regs[R_I2CM_INTR_STS]) { + bus->controller->intr_status &= ~(1 << bus->id); + qemu_irq_lower(aic->bus_get_irq(bus)); + } + aspeed_i2c_bus_raise_slave_interrupt(bus); + break; + } + bus->regs[R_I2CM_INTR_STS] &= ~(value & 0xf007f07f); + if (!bus->regs[R_I2CM_INTR_STS]) { + bus->controller->intr_status &= ~(1 << bus->id); + qemu_irq_lower(aic->bus_get_irq(bus)); + } + if (handle_rx && (SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CM_CMD, + M_RX_CMD) || + SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CM_CMD, + M_S_RX_CMD_LAST))) { + aspeed_i2c_handle_rx_cmd(bus); + aspeed_i2c_bus_raise_interrupt(bus); + } + break; + case A_I2CM_CMD: + if (!aspeed_i2c_bus_is_enabled(bus)) { + break; + } + + if (!aspeed_i2c_bus_is_master(bus)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Master mode is not enabled\n", + __func__); + break; + } + + if (!aic->has_dma && + (SHARED_FIELD_EX32(value, RX_DMA_EN) || + SHARED_FIELD_EX32(value, TX_DMA_EN))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + + if (bus->regs[R_I2CM_INTR_STS] & 0xffff0000) { + qemu_log_mask(LOG_UNIMP, "%s: Packet mode is not implemented\n", + __func__); + break; + } + + value &= 0xff0ffbfb; + if (ARRAY_FIELD_EX32(bus->regs, I2CM_CMD, W1_CTRL)) { + bus->regs[R_I2CM_CMD] |= value; + } else { + bus->regs[R_I2CM_CMD] = value; + } + + aspeed_i2c_bus_handle_cmd(bus, value); + aspeed_i2c_bus_raise_interrupt(bus); + break; + case A_I2CM_DMA_TX_ADDR: + bus->regs[R_I2CM_DMA_TX_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR, + ADDR); + bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR); + bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN, + TX_BUF_LEN) + 1; + break; + case A_I2CM_DMA_RX_ADDR: + bus->regs[R_I2CM_DMA_RX_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR, + ADDR); + bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR); + bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN, + RX_BUF_LEN) + 1; + break; + case A_I2CM_DMA_LEN: + w1t = FIELD_EX32(value, I2CM_DMA_LEN, RX_BUF_LEN_W1T) || + FIELD_EX32(value, I2CM_DMA_LEN, TX_BUF_LEN_W1T); + /* If none of the w1t bits are set, just write to the reg as normal. */ + if (!w1t) { + bus->regs[R_I2CM_DMA_LEN] = value; + break; + } + if (FIELD_EX32(value, I2CM_DMA_LEN, RX_BUF_LEN_W1T)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN, RX_BUF_LEN, + FIELD_EX32(value, I2CM_DMA_LEN, RX_BUF_LEN)); + } + if (FIELD_EX32(value, I2CM_DMA_LEN, TX_BUF_LEN_W1T)) { + ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN, TX_BUF_LEN, + FIELD_EX32(value, I2CM_DMA_LEN, TX_BUF_LEN)); + } + break; + case A_I2CM_DMA_LEN_STS: + /* Writes clear to 0 */ + bus->regs[R_I2CM_DMA_LEN_STS] = 0; + break; + case A_I2CC_DMA_ADDR: + case A_I2CC_DMA_LEN: + /* RO */ + break; + case A_I2CS_DEV_ADDR: + bus->regs[R_I2CS_DEV_ADDR] = value; + break; + case A_I2CS_DMA_RX_ADDR: + bus->regs[R_I2CS_DMA_RX_ADDR] = value; + break; + case A_I2CS_DMA_LEN: + assert(FIELD_EX32(value, I2CS_DMA_LEN, TX_BUF_LEN) == 0); + if (FIELD_EX32(value, I2CS_DMA_LEN, RX_BUF_LEN_W1T)) { + ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN, RX_BUF_LEN, + FIELD_EX32(value, I2CS_DMA_LEN, RX_BUF_LEN)); + } else { + bus->regs[R_I2CS_DMA_LEN] = value; + } + break; + case A_I2CS_CMD: + if (FIELD_EX32(value, I2CS_CMD, W1_CTRL)) { + bus->regs[R_I2CS_CMD] |= value; + } else { + bus->regs[R_I2CS_CMD] = value; + } + i2c_slave_set_address(bus->slave, bus->regs[R_I2CS_DEV_ADDR]); + break; + case A_I2CS_INTR_CTRL: + bus->regs[R_I2CS_INTR_CTRL] = value; + break; + + case A_I2CS_INTR_STS: + if (ARRAY_FIELD_EX32(bus->regs, I2CS_INTR_CTRL, PKT_CMD_DONE)) { + if (ARRAY_FIELD_EX32(bus->regs, I2CS_INTR_STS, PKT_CMD_DONE) && + FIELD_EX32(value, I2CS_INTR_STS, PKT_CMD_DONE)) { + bus->regs[R_I2CS_INTR_STS] &= 0xfffc0000; + } + } else { + bus->regs[R_I2CS_INTR_STS] &= ~value; + } + if (!bus->regs[R_I2CS_INTR_STS]) { + bus->controller->intr_status &= ~(1 << bus->id); + qemu_irq_lower(aic->bus_get_irq(bus)); + } + aspeed_i2c_bus_raise_interrupt(bus); + break; + case A_I2CS_DMA_LEN_STS: + bus->regs[R_I2CS_DMA_LEN_STS] = 0; + break; + case A_I2CS_DMA_TX_ADDR: + qemu_log_mask(LOG_UNIMP, "%s: Slave mode DMA TX is not implemented\n", + __func__); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } +} + +static void aspeed_i2c_bus_old_write(AspeedI2CBus *bus, hwaddr offset, + uint64_t value, unsigned size) { - AspeedI2CBus *bus = opaque; AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); bool handle_rx; trace_aspeed_i2c_bus_write(bus->id, offset, size, value); switch (offset) { - case I2CD_FUN_CTRL_REG: - if (value & I2CD_SLAVE_EN) { - qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", - __func__); - break; + case A_I2CD_FUN_CTRL: + if (SHARED_FIELD_EX32(value, SLAVE_EN)) { + i2c_slave_set_address(bus->slave, bus->regs[R_I2CD_DEV_ADDR]); } - bus->ctrl = value & 0x0071C3FF; + bus->regs[R_I2CD_FUN_CTRL] = value & 0x0071C3FF; break; - case I2CD_AC_TIMING_REG1: - bus->timing[0] = value & 0xFFFFF0F; + case A_I2CD_AC_TIMING1: + bus->regs[R_I2CD_AC_TIMING1] = value & 0xFFFFF0F; break; - case I2CD_AC_TIMING_REG2: - bus->timing[1] = value & 0x7; + case A_I2CD_AC_TIMING2: + bus->regs[R_I2CD_AC_TIMING2] = value & 0x7; break; - case I2CD_INTR_CTRL_REG: - bus->intr_ctrl = value & 0x7FFF; + case A_I2CD_INTR_CTRL: + bus->regs[R_I2CD_INTR_CTRL] = value & 0x7FFF; break; - case I2CD_INTR_STS_REG: - handle_rx = (bus->intr_status & I2CD_INTR_RX_DONE) && - (value & I2CD_INTR_RX_DONE); - bus->intr_status &= ~(value & 0x7FFF); - if (!bus->intr_status) { + case A_I2CD_INTR_STS: + handle_rx = SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CD_INTR_STS, RX_DONE) + && SHARED_FIELD_EX32(value, RX_DONE); + bus->regs[R_I2CD_INTR_STS] &= ~(value & 0x7FFF); + if (!bus->regs[R_I2CD_INTR_STS]) { bus->controller->intr_status &= ~(1 << bus->id); qemu_irq_lower(aic->bus_get_irq(bus)); } - if (handle_rx && (bus->cmd & (I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST))) { - aspeed_i2c_handle_rx_cmd(bus); - aspeed_i2c_bus_raise_interrupt(bus); + if (handle_rx) { + if (SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CD_CMD, M_RX_CMD) || + SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CD_CMD, + M_S_RX_CMD_LAST)) { + aspeed_i2c_handle_rx_cmd(bus); + aspeed_i2c_bus_raise_interrupt(bus); + } else if (aspeed_i2c_get_state(bus) == I2CD_STXD) { + i2c_ack(bus->bus); + } } break; - case I2CD_DEV_ADDR_REG: - qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", - __func__); + case A_I2CD_DEV_ADDR: + bus->regs[R_I2CD_DEV_ADDR] = value; break; - case I2CD_POOL_CTRL_REG: - bus->pool_ctrl &= ~0xffffff; - bus->pool_ctrl |= (value & 0xffffff); + case A_I2CD_POOL_CTRL: + bus->regs[R_I2CD_POOL_CTRL] &= ~0xffffff; + bus->regs[R_I2CD_POOL_CTRL] |= (value & 0xffffff); break; - case I2CD_BYTE_BUF_REG: - bus->buf = (value & I2CD_BYTE_BUF_TX_MASK) << I2CD_BYTE_BUF_TX_SHIFT; + case A_I2CD_BYTE_BUF: + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CD_BYTE_BUF, TX_BUF, value); break; - case I2CD_CMD_REG: + case A_I2CD_CMD: if (!aspeed_i2c_bus_is_enabled(bus)) { break; } if (!aspeed_i2c_bus_is_master(bus)) { - qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", + qemu_log_mask(LOG_GUEST_ERROR, "%s: Master mode is not enabled\n", __func__); break; } if (!aic->has_dma && - value & (I2CD_RX_DMA_ENABLE | I2CD_TX_DMA_ENABLE)) { + (SHARED_FIELD_EX32(value, RX_DMA_EN) || + SHARED_FIELD_EX32(value, TX_DMA_EN))) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); break; } + bus->regs[R_I2CD_CMD] &= ~0xFFFF; + bus->regs[R_I2CD_CMD] |= value & 0xFFFF; + aspeed_i2c_bus_handle_cmd(bus, value); aspeed_i2c_bus_raise_interrupt(bus); break; - case I2CD_DMA_ADDR: + case A_I2CD_DMA_ADDR: if (!aic->has_dma) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); break; } - bus->dma_addr = value & 0x3ffffffc; + bus->regs[R_I2CD_DMA_ADDR] = value & 0x3ffffffc; break; - case I2CD_DMA_LEN: + case A_I2CD_DMA_LEN: if (!aic->has_dma) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); break; } - bus->dma_len = value & 0xfff; - if (!bus->dma_len) { + bus->regs[R_I2CD_DMA_LEN] = value & 0xfff; + if (!bus->regs[R_I2CD_DMA_LEN]) { qemu_log_mask(LOG_UNIMP, "%s: invalid DMA length\n", __func__); } break; @@ -622,16 +845,34 @@ static void aspeed_i2c_bus_write(void *opaque, hwaddr offset, } } +static void aspeed_i2c_bus_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CBus *bus = opaque; + if (aspeed_i2c_is_new_mode(bus->controller)) { + aspeed_i2c_bus_new_write(bus, offset, value, size); + } else { + aspeed_i2c_bus_old_write(bus, offset, value, size); + } +} + static uint64_t aspeed_i2c_ctrl_read(void *opaque, hwaddr offset, unsigned size) { AspeedI2CState *s = opaque; switch (offset) { - case I2C_CTRL_STATUS: + case A_I2C_CTRL_STATUS: return s->intr_status; - case I2C_CTRL_GLOBAL: + case A_I2C_CTRL_GLOBAL: return s->ctrl_global; + case A_I2C_CTRL_NEW_CLK_DIVIDER: + if (aspeed_i2c_is_new_mode(s)) { + return s->new_clk_divider; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -647,10 +888,18 @@ static void aspeed_i2c_ctrl_write(void *opaque, hwaddr offset, AspeedI2CState *s = opaque; switch (offset) { - case I2C_CTRL_GLOBAL: + case A_I2C_CTRL_GLOBAL: s->ctrl_global = value; break; - case I2C_CTRL_STATUS: + case A_I2C_CTRL_NEW_CLK_DIVIDER: + if (aspeed_i2c_is_new_mode(s)) { + s->new_clk_divider = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx + "\n", __func__, offset); + } + break; + case A_I2C_CTRL_STATUS: default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -707,19 +956,10 @@ static const MemoryRegionOps aspeed_i2c_pool_ops = { static const VMStateDescription aspeed_i2c_bus_vmstate = { .name = TYPE_ASPEED_I2C, - .version_id = 3, - .minimum_version_id = 3, + .version_id = 5, + .minimum_version_id = 5, .fields = (VMStateField[]) { - VMSTATE_UINT8(id, AspeedI2CBus), - VMSTATE_UINT32(ctrl, AspeedI2CBus), - VMSTATE_UINT32_ARRAY(timing, AspeedI2CBus, 2), - VMSTATE_UINT32(intr_ctrl, AspeedI2CBus), - VMSTATE_UINT32(intr_status, AspeedI2CBus), - VMSTATE_UINT32(cmd, AspeedI2CBus), - VMSTATE_UINT32(buf, AspeedI2CBus), - VMSTATE_UINT32(pool_ctrl, AspeedI2CBus), - VMSTATE_UINT32(dma_addr, AspeedI2CBus), - VMSTATE_UINT32(dma_len, AspeedI2CBus), + VMSTATE_UINT32_ARRAY(regs, AspeedI2CBus, ASPEED_I2C_NEW_NUM_REG), VMSTATE_END_OF_LIST() } }; @@ -740,20 +980,20 @@ static const VMStateDescription aspeed_i2c_vmstate = { static void aspeed_i2c_reset(DeviceState *dev) { - int i; AspeedI2CState *s = ASPEED_I2C(dev); - AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); s->intr_status = 0; +} + +static void aspeed_i2c_instance_init(Object *obj) +{ + AspeedI2CState *s = ASPEED_I2C(obj); + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + int i; for (i = 0; i < aic->num_busses; i++) { - s->busses[i].intr_ctrl = 0; - s->busses[i].intr_status = 0; - s->busses[i].cmd = 0; - s->busses[i].buf = 0; - s->busses[i].dma_addr = 0; - s->busses[i].dma_len = 0; - i2c_end_transfer(s->busses[i].bus); + object_initialize_child(obj, "bus[*]", &s->busses[i], + TYPE_ASPEED_I2C_BUS); } } @@ -791,17 +1031,21 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); for (i = 0; i < aic->num_busses; i++) { - char name[32]; + Object *bus = OBJECT(&s->busses[i]); int offset = i < aic->gap ? 1 : 5; - sysbus_init_irq(sbd, &s->busses[i].irq); - snprintf(name, sizeof(name), "aspeed.i2c.%d", i); - s->busses[i].controller = s; - s->busses[i].id = i; - s->busses[i].bus = i2c_init_bus(dev, name); - memory_region_init_io(&s->busses[i].mr, OBJECT(dev), - &aspeed_i2c_bus_ops, &s->busses[i], name, - aic->reg_size); + if (!object_property_set_link(bus, "controller", OBJECT(s), errp)) { + return; + } + + if (!object_property_set_uint(bus, "bus-id", i, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(bus), errp)) { + return; + } + memory_region_add_subregion(&s->iomem, aic->reg_size * (i + offset), &s->busses[i].mr); } @@ -841,12 +1085,192 @@ static void aspeed_i2c_class_init(ObjectClass *klass, void *data) static const TypeInfo aspeed_i2c_info = { .name = TYPE_ASPEED_I2C, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_i2c_instance_init, .instance_size = sizeof(AspeedI2CState), .class_init = aspeed_i2c_class_init, .class_size = sizeof(AspeedI2CClass), .abstract = true, }; +static int aspeed_i2c_bus_new_slave_event(AspeedI2CBus *bus, + enum i2c_event event) +{ + switch (event) { + case I2C_START_SEND_ASYNC: + if (!SHARED_ARRAY_FIELD_EX32(bus->regs, R_I2CS_CMD, RX_DMA_EN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Slave mode RX DMA is not enabled\n", __func__); + return -1; + } + ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN, 0); + bus->regs[R_I2CC_DMA_ADDR] = + ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_RX_ADDR, ADDR); + bus->regs[R_I2CC_DMA_LEN] = + ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN, RX_BUF_LEN) + 1; + i2c_ack(bus->bus); + break; + case I2C_FINISH: + ARRAY_FIELD_DP32(bus->regs, I2CS_INTR_STS, PKT_CMD_DONE, 1); + ARRAY_FIELD_DP32(bus->regs, I2CS_INTR_STS, SLAVE_ADDR_RX_MATCH, 1); + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CS_INTR_STS, NORMAL_STOP, 1); + SHARED_ARRAY_FIELD_DP32(bus->regs, R_I2CS_INTR_STS, RX_DONE, 1); + aspeed_i2c_bus_raise_slave_interrupt(bus); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: i2c event %d unimplemented\n", + __func__, event); + return -1; + } + + return 0; +} + +static int aspeed_i2c_bus_slave_event(I2CSlave *slave, enum i2c_event event) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(slave)); + AspeedI2CBus *bus = ASPEED_I2C_BUS(qbus->parent); + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); + uint32_t reg_dev_addr = aspeed_i2c_bus_dev_addr_offset(bus); + uint32_t dev_addr = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_dev_addr, + SLAVE_DEV_ADDR1); + + if (aspeed_i2c_is_new_mode(bus->controller)) { + return aspeed_i2c_bus_new_slave_event(bus, event); + } + + switch (event) { + case I2C_START_SEND_ASYNC: + /* Bit[0] == 0 indicates "send". */ + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_byte_buf, RX_BUF, dev_addr << 1); + + ARRAY_FIELD_DP32(bus->regs, I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH, 1); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, RX_DONE, 1); + + aspeed_i2c_set_state(bus, I2CD_STXD); + + break; + + case I2C_FINISH: + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, NORMAL_STOP, 1); + + aspeed_i2c_set_state(bus, I2CD_IDLE); + + break; + + default: + return -1; + } + + aspeed_i2c_bus_raise_interrupt(bus); + + return 0; +} + +static void aspeed_i2c_bus_new_slave_send_async(AspeedI2CBus *bus, uint8_t data) +{ + assert(address_space_write(&bus->controller->dram_as, + bus->regs[R_I2CC_DMA_ADDR], + MEMTXATTRS_UNSPECIFIED, &data, 1) == MEMTX_OK); + + bus->regs[R_I2CC_DMA_ADDR]++; + bus->regs[R_I2CC_DMA_LEN]--; + ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN, + ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN) + 1); + i2c_ack(bus->bus); +} + +static void aspeed_i2c_bus_slave_send_async(I2CSlave *slave, uint8_t data) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(slave)); + AspeedI2CBus *bus = ASPEED_I2C_BUS(qbus->parent); + uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); + uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); + + if (aspeed_i2c_is_new_mode(bus->controller)) { + return aspeed_i2c_bus_new_slave_send_async(bus, data); + } + + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_byte_buf, RX_BUF, data); + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, RX_DONE, 1); + + aspeed_i2c_bus_raise_interrupt(bus); +} + +static void aspeed_i2c_bus_slave_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + + dc->desc = "Aspeed I2C Bus Slave"; + + sc->event = aspeed_i2c_bus_slave_event; + sc->send_async = aspeed_i2c_bus_slave_send_async; +} + +static const TypeInfo aspeed_i2c_bus_slave_info = { + .name = TYPE_ASPEED_I2C_BUS_SLAVE, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(AspeedI2CBusSlave), + .class_init = aspeed_i2c_bus_slave_class_init, +}; + +static void aspeed_i2c_bus_reset(DeviceState *dev) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + + memset(s->regs, 0, sizeof(s->regs)); + i2c_end_transfer(s->bus); +} + +static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + AspeedI2CClass *aic; + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I2C_BUS ".%d", s->id); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_I2C_BUS ": 'controller' link not set"); + return; + } + + aic = ASPEED_I2C_GET_CLASS(s->controller); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + s->bus = i2c_init_bus(dev, name); + s->slave = i2c_slave_create_simple(s->bus, TYPE_ASPEED_I2C_BUS_SLAVE, + 0xff); + + memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i2c_bus_ops, + s, name, aic->reg_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); +} + +static Property aspeed_i2c_bus_properties[] = { + DEFINE_PROP_UINT8("bus-id", AspeedI2CBus, id, 0), + DEFINE_PROP_LINK("controller", AspeedI2CBus, controller, TYPE_ASPEED_I2C, + AspeedI2CState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_i2c_bus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed I2C Bus"; + dc->realize = aspeed_i2c_bus_realize; + dc->reset = aspeed_i2c_bus_reset; + device_class_set_props(dc, aspeed_i2c_bus_properties); +} + +static const TypeInfo aspeed_i2c_bus_info = { + .name = TYPE_ASPEED_I2C_BUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedI2CBus), + .class_init = aspeed_i2c_bus_class_init, +}; + static qemu_irq aspeed_2400_i2c_bus_get_irq(AspeedI2CBus *bus) { return bus->controller->irq; @@ -855,9 +1279,10 @@ static qemu_irq aspeed_2400_i2c_bus_get_irq(AspeedI2CBus *bus) static uint8_t *aspeed_2400_i2c_bus_pool_base(AspeedI2CBus *bus) { uint8_t *pool_page = - &bus->controller->pool[I2CD_POOL_PAGE_SEL(bus->ctrl) * 0x100]; + &bus->controller->pool[ARRAY_FIELD_EX32(bus->regs, I2CD_FUN_CTRL, + POOL_PAGE_SEL) * 0x100]; - return &pool_page[I2CD_POOL_OFFSET(bus->pool_ctrl)]; + return &pool_page[ARRAY_FIELD_EX32(bus->regs, I2CD_POOL_CTRL, OFFSET)]; } static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data) @@ -949,12 +1374,38 @@ static const TypeInfo aspeed_2600_i2c_info = { .class_init = aspeed_2600_i2c_class_init, }; +static void aspeed_1030_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); + + dc->desc = "ASPEED 1030 I2C Controller"; + + aic->num_busses = 14; + aic->reg_size = 0x80; + aic->gap = -1; /* no gap */ + aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq; + aic->pool_size = 0x200; + aic->pool_base = 0xC00; + aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base; + aic->has_dma = true; +} + +static const TypeInfo aspeed_1030_i2c_info = { + .name = TYPE_ASPEED_1030_I2C, + .parent = TYPE_ASPEED_I2C, + .class_init = aspeed_1030_i2c_class_init, +}; + static void aspeed_i2c_register_types(void) { + type_register_static(&aspeed_i2c_bus_info); + type_register_static(&aspeed_i2c_bus_slave_info); type_register_static(&aspeed_i2c_info); type_register_static(&aspeed_2400_i2c_info); type_register_static(&aspeed_2500_i2c_info); type_register_static(&aspeed_2600_i2c_info); + type_register_static(&aspeed_1030_i2c_info); } type_init(aspeed_i2c_register_types) diff --git a/hw/i2c/core.c b/hw/i2c/core.c index 416372ad00..d4ba8146bf 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -13,6 +13,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/main-loop.h" #include "trace.h" #define I2C_BROADCAST 0x00 @@ -60,8 +61,9 @@ I2CBus *i2c_init_bus(DeviceState *parent, const char *name) { I2CBus *bus; - bus = I2C_BUS(qbus_create(TYPE_I2C_BUS, parent, name)); + bus = I2C_BUS(qbus_new(TYPE_I2C_BUS, parent, name)); QLIST_INIT(&bus->current_devs); + QSIMPLEQ_INIT(&bus->pending_masters); vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_i2c_bus, bus); return bus; } @@ -74,7 +76,7 @@ void i2c_slave_set_address(I2CSlave *dev, uint8_t address) /* Return nonzero if bus is busy. */ int i2c_bus_busy(I2CBus *bus) { - return !QLIST_EMPTY(&bus->current_devs); + return !QLIST_EMPTY(&bus->current_devs) || bus->bh; } bool i2c_scan_bus(I2CBus *bus, uint8_t address, bool broadcast, @@ -159,7 +161,8 @@ static int i2c_do_start_transfer(I2CBus *bus, uint8_t address, start condition. */ if (sc->event) { - trace_i2c_event("start", s->address); + trace_i2c_event(event == I2C_START_SEND ? "start" : "start_async", + s->address); rv = sc->event(s, event); if (rv && !bus->broadcast) { if (bus_scanned) { @@ -180,6 +183,26 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv) : I2C_START_SEND); } +void i2c_bus_master(I2CBus *bus, QEMUBH *bh) +{ + if (i2c_bus_busy(bus)) { + I2CPendingMaster *node = g_new(struct I2CPendingMaster, 1); + node->bh = bh; + + QSIMPLEQ_INSERT_TAIL(&bus->pending_masters, node, entry); + + return; + } + + bus->bh = bh; + qemu_bh_schedule(bus->bh); +} + +void i2c_bus_release(I2CBus *bus) +{ + bus->bh = NULL; +} + int i2c_start_recv(I2CBus *bus, uint8_t address) { return i2c_do_start_transfer(bus, address, I2C_START_RECV); @@ -190,6 +213,11 @@ int i2c_start_send(I2CBus *bus, uint8_t address) return i2c_do_start_transfer(bus, address, I2C_START_SEND); } +int i2c_start_send_async(I2CBus *bus, uint8_t address) +{ + return i2c_do_start_transfer(bus, address, I2C_START_SEND_ASYNC); +} + void i2c_end_transfer(I2CBus *bus) { I2CSlaveClass *sc; @@ -206,6 +234,16 @@ void i2c_end_transfer(I2CBus *bus) g_free(node); } bus->broadcast = false; + + if (!QSIMPLEQ_EMPTY(&bus->pending_masters)) { + I2CPendingMaster *node = QSIMPLEQ_FIRST(&bus->pending_masters); + bus->bh = node->bh; + + QSIMPLEQ_REMOVE_HEAD(&bus->pending_masters, entry); + g_free(node); + + qemu_bh_schedule(bus->bh); + } } int i2c_send(I2CBus *bus, uint8_t data) @@ -229,6 +267,23 @@ int i2c_send(I2CBus *bus, uint8_t data) return ret ? -1 : 0; } +int i2c_send_async(I2CBus *bus, uint8_t data) +{ + I2CNode *node = QLIST_FIRST(&bus->current_devs); + I2CSlave *slave = node->elt; + I2CSlaveClass *sc = I2C_SLAVE_GET_CLASS(slave); + + if (!sc->send_async) { + return -1; + } + + trace_i2c_send_async(slave->address, data); + + sc->send_async(slave, data); + + return 0; +} + uint8_t i2c_recv(I2CBus *bus) { uint8_t data = 0xff; @@ -265,6 +320,17 @@ void i2c_nack(I2CBus *bus) } } +void i2c_ack(I2CBus *bus) +{ + if (!bus->bh) { + return; + } + + trace_i2c_ack(); + + qemu_bh_schedule(bus->bh); +} + static int i2c_slave_post_load(void *opaque, int version_id) { I2CSlave *dev = opaque; @@ -274,7 +340,7 @@ static int i2c_slave_post_load(void *opaque, int version_id) bus = I2C_BUS(qdev_get_parent_bus(DEVICE(dev))); if ((bus->saved_address == dev->address) || (bus->saved_address == I2C_BROADCAST)) { - node = g_malloc(sizeof(struct I2CNode)); + node = g_new(struct I2CNode, 1); node->elt = dev; QLIST_INSERT_HEAD(&bus->current_devs, node, next); } @@ -319,7 +385,7 @@ static bool i2c_slave_match(I2CSlave *candidate, uint8_t address, bool broadcast, I2CNodeList *current_devs) { if ((candidate->address == address) || (broadcast)) { - I2CNode *node = g_malloc(sizeof(struct I2CNode)); + I2CNode *node = g_new(struct I2CNode, 1); node->elt = candidate; QLIST_INSERT_HEAD(current_devs, node, next); return true; diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c index 847c59921c..3945de795c 100644 --- a/hw/i2c/i2c_mux_pca954x.c +++ b/hw/i2c/i2c_mux_pca954x.c @@ -30,24 +30,6 @@ #define PCA9548_CHANNEL_COUNT 8 #define PCA9546_CHANNEL_COUNT 4 -/* - * struct Pca954xChannel - The i2c mux device will have N of these states - * that own the i2c channel bus. - * @bus: The owned channel bus. - * @enabled: Is this channel active? - */ -typedef struct Pca954xChannel { - SysBusDevice parent; - - I2CBus *bus; - - bool enabled; -} Pca954xChannel; - -#define TYPE_PCA954X_CHANNEL "pca954x-channel" -#define PCA954X_CHANNEL(obj) \ - OBJECT_CHECK(Pca954xChannel, (obj), TYPE_PCA954X_CHANNEL) - /* * struct Pca954xState - The pca954x state object. * @control: The value written to the mux control. @@ -59,8 +41,8 @@ typedef struct Pca954xState { uint8_t control; - /* The channel i2c buses. */ - Pca954xChannel channel[PCA9548_CHANNEL_COUNT]; + bool enabled[PCA9548_CHANNEL_COUNT]; + I2CBus *bus[PCA9548_CHANNEL_COUNT]; } Pca954xState; /* @@ -89,7 +71,7 @@ static bool pca954x_match(I2CSlave *candidate, uint8_t address, /* They are talking to the mux itself (or all devices enabled). */ if ((candidate->address == address) || broadcast) { - I2CNode *node = g_malloc(sizeof(struct I2CNode)); + I2CNode *node = g_new(struct I2CNode, 1); node->elt = candidate; QLIST_INSERT_HEAD(current_devs, node, next); if (!broadcast) { @@ -98,11 +80,11 @@ static bool pca954x_match(I2CSlave *candidate, uint8_t address, } for (i = 0; i < mc->nchans; i++) { - if (!mux->channel[i].enabled) { + if (!mux->enabled[i]) { continue; } - if (i2c_scan_bus(mux->channel[i].bus, address, broadcast, + if (i2c_scan_bus(mux->bus[i], address, broadcast, current_devs)) { if (!broadcast) { return true; @@ -125,9 +107,9 @@ static void pca954x_enable_channel(Pca954xState *s, uint8_t enable_mask) */ for (i = 0; i < mc->nchans; i++) { if (enable_mask & (1 << i)) { - s->channel[i].enabled = true; + s->enabled[i] = true; } else { - s->channel[i].enabled = false; + s->enabled[i] = false; } } } @@ -184,23 +166,7 @@ I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel) Pca954xState *pca954x = PCA954X(mux); g_assert(channel < pc->nchans); - return I2C_BUS(qdev_get_child_bus(DEVICE(&pca954x->channel[channel]), - "i2c-bus")); -} - -static void pca954x_channel_init(Object *obj) -{ - Pca954xChannel *s = PCA954X_CHANNEL(obj); - s->bus = i2c_init_bus(DEVICE(s), "i2c-bus"); - - /* Start all channels as disabled. */ - s->enabled = false; -} - -static void pca954x_channel_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - dc->desc = "Pca954x Channel"; + return pca954x->bus[channel]; } static void pca9546_class_init(ObjectClass *klass, void *data) @@ -215,28 +181,19 @@ static void pca9548_class_init(ObjectClass *klass, void *data) s->nchans = PCA9548_CHANNEL_COUNT; } -static void pca954x_realize(DeviceState *dev, Error **errp) -{ - Pca954xState *s = PCA954X(dev); - Pca954xClass *c = PCA954X_GET_CLASS(s); - int i; - - /* SMBus modules. Cannot fail. */ - for (i = 0; i < c->nchans; i++) { - sysbus_realize(SYS_BUS_DEVICE(&s->channel[i]), &error_abort); - } -} - static void pca954x_init(Object *obj) { Pca954xState *s = PCA954X(obj); Pca954xClass *c = PCA954X_GET_CLASS(obj); int i; - /* Only initialize the children we expect. */ + /* SMBus modules. Cannot fail. */ for (i = 0; i < c->nchans; i++) { - object_initialize_child(obj, "channel[*]", &s->channel[i], - TYPE_PCA954X_CHANNEL); + g_autofree gchar *bus_name = g_strdup_printf("i2c.%d", i); + + /* start all channels as disabled. */ + s->enabled[i] = false; + s->bus[i] = i2c_init_bus(DEVICE(s), bus_name); } } @@ -252,7 +209,6 @@ static void pca954x_class_init(ObjectClass *klass, void *data) rc->phases.enter = pca954x_enter_reset; dc->desc = "Pca954x i2c-mux"; - dc->realize = pca954x_realize; k->write_data = pca954x_write_data; k->receive_byte = pca954x_read_byte; @@ -278,13 +234,6 @@ static const TypeInfo pca954x_info[] = { .parent = TYPE_PCA954X, .class_init = pca9548_class_init, }, - { - .name = TYPE_PCA954X_CHANNEL, - .parent = TYPE_SYS_BUS_DEVICE, - .class_init = pca954x_channel_class_init, - .instance_size = sizeof(Pca954xChannel), - .instance_init = pca954x_channel_init, - } }; DEFINE_TYPES(pca954x_info) diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index 24f8f522d9..4071a88cfc 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -28,6 +28,24 @@ uint32_t pmbus_direct_mode2data(PMBusCoefficients c, uint16_t value) return x; } +uint16_t pmbus_data2linear_mode(uint16_t value, int exp) +{ + /* L = D * 2^(-e) */ + if (exp < 0) { + return value << (-exp); + } + return value >> exp; +} + +uint16_t pmbus_linear_mode2data(uint16_t value, int exp) +{ + /* D = L * 2^e */ + if (exp < 0) { + return value >> (-exp); + } + return value << exp; +} + void pmbus_send(PMBusDevice *pmdev, const uint8_t *data, uint16_t len) { if (pmdev->out_buf_len + len > SMBUS_DATA_MAX_LEN) { @@ -89,16 +107,16 @@ void pmbus_send_string(PMBusDevice *pmdev, const char *data) } -static uint64_t pmbus_receive_uint(const uint8_t *buf, uint8_t len) +static uint64_t pmbus_receive_uint(PMBusDevice *pmdev) { uint64_t ret = 0; /* Exclude command code from return value */ - buf++; - len--; + pmdev->in_buf++; + pmdev->in_buf_len--; - for (int i = len - 1; i >= 0; i--) { - ret = ret << 8 | buf[i]; + for (int i = pmdev->in_buf_len - 1; i >= 0; i--) { + ret = ret << 8 | pmdev->in_buf[i]; } return ret; } @@ -110,7 +128,7 @@ uint8_t pmbus_receive8(PMBusDevice *pmdev) "%s: length mismatch. Expected 1 byte, got %d bytes\n", __func__, pmdev->in_buf_len - 1); } - return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); + return pmbus_receive_uint(pmdev); } uint16_t pmbus_receive16(PMBusDevice *pmdev) @@ -120,7 +138,7 @@ uint16_t pmbus_receive16(PMBusDevice *pmdev) "%s: length mismatch. Expected 2 bytes, got %d bytes\n", __func__, pmdev->in_buf_len - 1); } - return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); + return pmbus_receive_uint(pmdev); } uint32_t pmbus_receive32(PMBusDevice *pmdev) @@ -130,7 +148,7 @@ uint32_t pmbus_receive32(PMBusDevice *pmdev) "%s: length mismatch. Expected 4 bytes, got %d bytes\n", __func__, pmdev->in_buf_len - 1); } - return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); + return pmbus_receive_uint(pmdev); } uint64_t pmbus_receive64(PMBusDevice *pmdev) @@ -140,7 +158,7 @@ uint64_t pmbus_receive64(PMBusDevice *pmdev) "%s: length mismatch. Expected 8 bytes, got %d bytes\n", __func__, pmdev->in_buf_len - 1); } - return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); + return pmbus_receive_uint(pmdev); } static uint8_t pmbus_out_buf_pop(PMBusDevice *pmdev) @@ -149,7 +167,7 @@ static uint8_t pmbus_out_buf_pop(PMBusDevice *pmdev) qemu_log_mask(LOG_GUEST_ERROR, "%s: tried to read from empty buffer", __func__); - return 0xFF; + return PMBUS_ERR_BYTE; } uint8_t data = pmdev->out_buf[pmdev->out_buf_len - 1]; pmdev->out_buf_len--; @@ -243,18 +261,48 @@ void pmbus_check_limits(PMBusDevice *pmdev) } } +void pmbus_idle(PMBusDevice *pmdev) +{ + pmdev->code = PMBUS_IDLE_STATE; +} + +/* assert the status_cml error upon receipt of malformed command */ +static void pmbus_cml_error(PMBusDevice *pmdev) +{ + for (int i = 0; i < pmdev->num_pages; i++) { + pmdev->pages[i].status_word |= PMBUS_STATUS_CML; + pmdev->pages[i].status_cml |= PB_CML_FAULT_INVALID_CMD; + } +} + static uint8_t pmbus_receive_byte(SMBusDevice *smd) { PMBusDevice *pmdev = PMBUS_DEVICE(smd); PMBusDeviceClass *pmdc = PMBUS_DEVICE_GET_CLASS(pmdev); - uint8_t ret = 0xFF; - uint8_t index = pmdev->page; + uint8_t ret = PMBUS_ERR_BYTE; + uint8_t index; if (pmdev->out_buf_len != 0) { ret = pmbus_out_buf_pop(pmdev); return ret; } + /* + * Reading from all pages will return the value from page 0, + * means that all subsequent commands are to be applied to all output. + */ + if (pmdev->page == PB_ALL_PAGES) { + index = 0; + } else if (pmdev->page > pmdev->num_pages - 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: page %d is out of range\n", + __func__, pmdev->page); + pmbus_cml_error(pmdev); + return PMBUS_ERR_BYTE; + } else { + index = pmdev->page; + } + switch (pmdev->code) { case PMBUS_PAGE: pmbus_send8(pmdev, pmdev->page); @@ -278,6 +326,11 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) case PMBUS_CAPABILITY: pmbus_send8(pmdev, pmdev->capability); + if (pmdev->capability & BIT(7)) { + qemu_log_mask(LOG_UNIMP, + "%s: PEC is enabled but not yet supported.\n", + __func__); + } break; case PMBUS_VOUT_MODE: /* R/W byte */ @@ -368,6 +421,14 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) } break; + case PMBUS_VOUT_MIN: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].vout_min); + } else { + goto passthough; + } + break; + /* TODO: implement coefficients support */ case PMBUS_POUT_MAX: /* R/W word */ @@ -708,6 +769,10 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) pmbus_send8(pmdev, pmdev->pages[index].status_other); break; + case PMBUS_STATUS_MFR_SPECIFIC: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].status_mfr_specific); + break; + case PMBUS_READ_EIN: /* Read-Only block 5 bytes */ if (pmdev->pages[index].page_flags & PB_HAS_EIN) { pmbus_send(pmdev, pmdev->pages[index].read_ein, 5); @@ -920,6 +985,10 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) } break; + case PMBUS_IDLE_STATE: + pmbus_send8(pmdev, PMBUS_ERR_BYTE); + break; + case PMBUS_CLEAR_FAULTS: /* Send Byte */ case PMBUS_PAGE_PLUS_WRITE: /* Block Write-only */ case PMBUS_STORE_DEFAULT_ALL: /* Send Byte */ @@ -1007,7 +1076,7 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) if (len == 0) { qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); - return -1; + return PMBUS_ERR_BYTE; } if (!pmdev->pages) { /* allocate memory for pages on first use */ @@ -1026,6 +1095,7 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) pmdev->page = pmbus_receive8(pmdev); return 0; } + /* loop through all the pages when 0xFF is received */ if (pmdev->page == PB_ALL_PAGES) { for (int i = 0; i < pmdev->num_pages; i++) { @@ -1036,6 +1106,15 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) return 0; } + if (pmdev->page > pmdev->num_pages - 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: page %u is out of range\n", + __func__, pmdev->page); + pmdev->page = 0; /* undefined behaviour - reset to page 0 */ + pmbus_cml_error(pmdev); + return PMBUS_ERR_BYTE; + } + index = pmdev->page; switch (pmdev->code) { @@ -1149,6 +1228,14 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) } break; + case PMBUS_VOUT_MIN: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_RATING) { + pmdev->pages[index].vout_min = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + case PMBUS_POUT_MAX: /* R/W word */ if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { pmdev->pages[index].pout_max = pmbus_receive16(pmdev); @@ -1482,6 +1569,10 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) pmdev->pages[index].status_other = pmbus_receive8(pmdev); break; + case PMBUS_STATUS_MFR_SPECIFIC: /* R/W byte */ + pmdev->pages[index].status_mfr_specific = pmbus_receive8(pmdev); + break; + case PMBUS_PAGE_PLUS_READ: /* Block Read-only */ case PMBUS_CAPABILITY: /* Read-Only byte */ case PMBUS_COEFFICIENTS: /* Read-only block 5 bytes */ diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c index 44dd5653b7..ee50ba1f2c 100644 --- a/hw/i2c/smbus_ich9.c +++ b/hw/i2c/smbus_ich9.c @@ -29,6 +29,7 @@ #include "hw/i386/ich9.h" #include "qom/object.h" +#include "hw/acpi/acpi_aml_interface.h" OBJECT_DECLARE_SIMPLE_TYPE(ICH9SMBState, ICH9_SMB_DEVICE) @@ -94,10 +95,22 @@ static void ich9_smbus_realize(PCIDevice *d, Error **errp) &s->smb.io); } +static void build_ich9_smb_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + BusChild *kid; + ICH9SMBState *s = ICH9_SMB_DEVICE(adev); + BusState *bus = BUS(s->smb.smbus); + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + call_dev_aml_func(DEVICE(kid->child), scope); + } +} + static void ich9_smb_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_ICH9_6; @@ -112,6 +125,7 @@ static void ich9_smb_class_init(ObjectClass *klass, void *data) * pc_q35_init() */ dc->user_creatable = false; + adevc->build_dev_aml = build_ich9_smb_aml; } static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled) @@ -143,6 +157,7 @@ static const TypeInfo ich9_smb_info = { .class_init = ich9_smb_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { TYPE_ACPI_DEV_AML_IF }, { }, }, }; diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c index 5d10e27664..feb3ec6333 100644 --- a/hw/i2c/smbus_slave.c +++ b/hw/i2c/smbus_slave.c @@ -143,6 +143,10 @@ static int smbus_i2c_event(I2CSlave *s, enum i2c_event event) dev->mode = SMBUS_CONFUSED; break; } + break; + + default: + return -1; } return 0; diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events index 7d8907c1ee..af181d43ee 100644 --- a/hw/i2c/trace-events +++ b/hw/i2c/trace-events @@ -4,12 +4,14 @@ i2c_event(const char *event, uint8_t address) "%s(addr:0x%02x)" i2c_send(uint8_t address, uint8_t data) "send(addr:0x%02x) data:0x%02x" +i2c_send_async(uint8_t address, uint8_t data) "send_async(addr:0x%02x) data:0x%02x" i2c_recv(uint8_t address, uint8_t data) "recv(addr:0x%02x) data:0x%02x" +i2c_ack(void) "" # aspeed_i2c.c aspeed_i2c_bus_cmd(uint32_t cmd, const char *cmd_flags, uint32_t count, uint32_t intr_status) "handling cmd=0x%x %s count=%d intr=0x%x" -aspeed_i2c_bus_raise_interrupt(uint32_t intr_status, const char *str1, const char *str2, const char *str3, const char *str4, const char *str5) "handled intr=0x%x %s%s%s%s%s" +aspeed_i2c_bus_raise_interrupt(uint32_t intr_status, const char *s) "handled intr=0x%x %s" aspeed_i2c_bus_read(uint32_t busid, uint64_t offset, unsigned size, uint64_t value) "bus[%d]: To 0x%" PRIx64 " of size %u: 0x%" PRIx64 aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t value) "bus[%d]: To 0x%" PRIx64 " of size %u: 0x%" PRIx64 aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x" diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index dc9e2d2e54..c25a2a4174 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -6,6 +6,10 @@ config SEV select X86_FW_OVMF depends on KVM +config SGX + bool + depends on KVM + config PC bool imply APPLESMC @@ -21,6 +25,7 @@ config PC imply PVPANIC_ISA imply QXL imply SEV + imply SGX imply SGA imply TEST_DEVICES imply TPM_CRB @@ -54,6 +59,7 @@ config PC_ACPI select ACPI_X86 select ACPI_CPU_HOTPLUG select ACPI_MEMORY_HOTPLUG + select ACPI_VIOT select SMBUS_EEPROM select PFLASH_CFI01 depends on ACPI_SMBUS diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 689247cd0f..44b20f175f 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -28,21 +28,23 @@ #include "qemu/bitmap.h" #include "qemu/error-report.h" #include "hw/pci/pci.h" +#include "hw/cxl/cxl.h" #include "hw/core/cpu.h" #include "target/i386/cpu.h" -#include "hw/misc/pvpanic.h" #include "hw/timer/hpet.h" #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" #include "hw/acpi/cpu.h" #include "hw/nvram/fw_cfg.h" #include "hw/acpi/bios-linker-loader.h" -#include "hw/isa/isa.h" -#include "hw/block/fdc.h" +#include "hw/acpi/acpi_aml_interface.h" +#include "hw/input/i8042.h" #include "hw/acpi/memory_hotplug.h" #include "sysemu/tpm.h" #include "hw/acpi/tpm.h" #include "hw/acpi/vmgenid.h" +#include "hw/acpi/erst.h" +#include "hw/acpi/piix4.h" #include "sysemu/tpm_backend.h" #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" @@ -58,19 +60,25 @@ #include "hw/i386/fw_cfg.h" #include "hw/i386/ich9.h" #include "hw/pci/pci_bus.h" +#include "hw/pci-host/i440fx.h" #include "hw/pci-host/q35.h" #include "hw/i386/x86-iommu.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" +#include "hw/acpi/cxl.h" #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/virtio/virtio-iommu.h" -#include "hw/acpi/ipmi.h" #include "hw/acpi/hmat.h" +#include "hw/acpi/viot.h" +#include "hw/acpi/cxl.h" + +#include CONFIG_DEVICES /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows @@ -105,24 +113,14 @@ typedef struct AcpiPmInfo { } AcpiPmInfo; typedef struct AcpiMiscInfo { - bool is_piix4; bool has_hpet; #ifdef CONFIG_TPM TPMVersion tpm_version; #endif const unsigned char *dsdt_code; unsigned dsdt_size; - uint16_t pvpanic_port; - uint16_t applesmc_io_base; } AcpiMiscInfo; -typedef struct AcpiBuildPciBusHotplugState { - GArray *device_table; - GArray *notify_table; - struct AcpiBuildPciBusHotplugState *parent; - bool pcihp_bridge_en; -} AcpiBuildPciBusHotplugState; - typedef struct FwCfgTPMConfig { uint32_t tpmppi_address; uint8_t tpm_version; @@ -190,6 +188,13 @@ static void init_common_fadt_data(MachineState *ms, Object *o, .address = object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK, NULL) }, }; + + /* + * ACPI v2, Table 5-10 - Fixed ACPI Description Table Boot Architecture + * Flags, bit offset 1 - 8042. + */ + fadt.iapc_boot_arch = iapc_boot_arch_8042(); + *data = fadt; } @@ -270,32 +275,19 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm) qobject_unref(o); pm->pcihp_bridge_en = - object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support", + object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, NULL); pm->pcihp_root_en = - object_property_get_bool(obj, "acpi-root-pci-hotplug", + object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCI_ROOTHP, NULL); } static void acpi_get_misc_info(AcpiMiscInfo *info) { - Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM); - Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE); - assert(!!piix != !!lpc); - - if (piix) { - info->is_piix4 = true; - } - if (lpc) { - info->is_piix4 = false; - } - info->has_hpet = hpet_find(); #ifdef CONFIG_TPM info->tpm_version = tpm_get_version(tpm_find()); #endif - info->pvpanic_port = pvpanic_port(); - info->applesmc_io_base = applesmc_port(); } /* @@ -306,13 +298,9 @@ Object *acpi_get_i386_pci_host(void) { PCIHostState *host; - host = OBJECT_CHECK(PCIHostState, - object_resolve_path("/machine/i440fx", NULL), - TYPE_PCI_HOST_BRIDGE); + host = PCI_HOST_BRIDGE(object_resolve_path("/machine/i440fx", NULL)); if (!host) { - host = OBJECT_CHECK(PCIHostState, - object_resolve_path("/machine/q35", NULL), - TYPE_PCI_HOST_BRIDGE); + host = PCI_HOST_BRIDGE(object_resolve_path("/machine/q35", NULL)); } return OBJECT(host); @@ -352,13 +340,42 @@ static void acpi_align_size(GArray *blob, unsigned align) g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); } -/* FACS */ +/* + * ACPI spec 1.0b, + * 5.2.6 Firmware ACPI Control Structure + */ static void build_facs(GArray *table_data) { - AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs); - memcpy(&facs->signature, "FACS", 4); - facs->length = cpu_to_le32(sizeof(*facs)); + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +Aml *aml_pci_device_dsm(void) +{ + Aml *method; + + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + Aml *params = aml_local(0); + Aml *pkg = aml_package(2); + aml_append(pkg, aml_name("BSEL")); + aml_append(pkg, aml_name("ASUN")); + aml_append(method, aml_store(pkg, params)); + aml_append(method, + aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), params)) + ); + } + return method; } static void build_append_pcihp_notify_entry(Aml *method, int slot) @@ -395,13 +412,35 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, int func = PCI_FUNC(devfn); /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ int adr = slot << 16 | func; - bool hotplug_enabled_dev; - bool bridge_in_acpi; - bool cold_plugged_bridge; + bool hotpluggbale_slot = false; + bool bridge_in_acpi = false; + bool cold_plugged_bridge = false; + + if (pdev) { + pc = PCI_DEVICE_GET_CLASS(pdev); + dc = DEVICE_GET_CLASS(pdev); - if (!pdev) { /* - * add hotplug slots for non present devices. + * Cold plugged bridges aren't themselves hot-pluggable. + * Hotplugged bridges *are* hot-pluggable. + */ + cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged; + bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; + + hotpluggbale_slot = bsel && dc->hotpluggable && + !cold_plugged_bridge; + + /* + * allow describing coldplugged bridges in ACPI even if they are not + * on function 0, as they are not unpluggable, for all other devices + * generate description only for function 0 per slot, and for other + * functions if device on function provides its own AML + */ + if (func && !bridge_in_acpi && !get_dev_aml_func(DEVICE(pdev))) { + continue; + } + } else { + /* * hotplug is supported only for non-multifunction device * so generate device description only for function 0 */ @@ -409,51 +448,11 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, if (pci_bus_is_express(bus) && slot > 0) { break; } - dev = aml_device("S%.02X", devfn); - aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); - aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); - method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); - aml_append(method, - aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) - ); - aml_append(dev, method); - method = aml_method("_DSM", 4, AML_SERIALIZED); - aml_append(method, - aml_return(aml_call6("PDSM", aml_arg(0), aml_arg(1), - aml_arg(2), aml_arg(3), - aml_name("BSEL"), aml_name("_SUN"))) - ); - aml_append(dev, method); - aml_append(parent_scope, dev); - - build_append_pcihp_notify_entry(notify_method, slot); + /* mark it as empty hotpluggable slot */ + hotpluggbale_slot = true; + } else { + continue; } - continue; - } - - pc = PCI_DEVICE_GET_CLASS(pdev); - dc = DEVICE_GET_CLASS(pdev); - - /* - * Cold plugged bridges aren't themselves hot-pluggable. - * Hotplugged bridges *are* hot-pluggable. - */ - cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged; - bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; - - hotplug_enabled_dev = bsel && dc->hotpluggable && !cold_plugged_bridge; - - if (pc->class_id == PCI_CLASS_BRIDGE_ISA) { - continue; - } - - /* - * allow describing coldplugged bridges in ACPI even if they are not - * on function 0, as they are not unpluggable, for all other devices - * generate description only for function 0 per slot - */ - if (func && !bridge_in_acpi) { - continue; } /* start to compose PCI device descriptor */ @@ -466,48 +465,13 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, * enumeration order in linux kernel, so use another variable for it */ aml_append(dev, aml_name_decl("ASUN", aml_int(slot))); - method = aml_method("_DSM", 4, AML_SERIALIZED); - aml_append(method, aml_return( - aml_call6("PDSM", aml_arg(0), aml_arg(1), aml_arg(2), - aml_arg(3), aml_name("BSEL"), aml_name("ASUN")) - )); - aml_append(dev, method); + aml_append(dev, aml_pci_device_dsm()); } - if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { - /* add VGA specific AML methods */ - int s3d; + call_dev_aml_func(DEVICE(pdev), dev); - if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) { - s3d = 3; - } else { - s3d = 0; - } - - method = aml_method("_S1D", 0, AML_NOTSERIALIZED); - aml_append(method, aml_return(aml_int(0))); - aml_append(dev, method); - - method = aml_method("_S2D", 0, AML_NOTSERIALIZED); - aml_append(method, aml_return(aml_int(0))); - aml_append(dev, method); - - method = aml_method("_S3D", 0, AML_NOTSERIALIZED); - aml_append(method, aml_return(aml_int(s3d))); - aml_append(dev, method); - } else if (hotplug_enabled_dev) { - aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); - /* add _EJ0 to make slot hotpluggable */ - method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); - aml_append(method, - aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) - ); - aml_append(dev, method); - - if (bsel) { - build_append_pcihp_notify_entry(notify_method, slot); - } - } else if (bridge_in_acpi) { + bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; + if (bridge_in_acpi) { /* * device is coldplugged bridge, * add child device descriptions into its scope @@ -516,6 +480,19 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, build_append_pci_bus_devices(dev, sec_bus, pcihp_bridge_en); } + + if (hotpluggbale_slot) { + aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); + /* add _EJ0 to make slot hotpluggable */ + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) + ); + aml_append(dev, method); + + build_append_pcihp_notify_entry(notify_method, slot); + } + /* device descriptor has been composed, add it into parent context */ aml_append(parent_scope, dev); } @@ -559,84 +536,100 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, qobject_unref(bsel); } -Aml *aml_pci_device_dsm(void) +static Aml *aml_pci_pdsm(void) { - Aml *method, *UUID, *ifctx, *ifctx1, *ifctx2, *ifctx3, *elsectx; - Aml *acpi_index = aml_local(0); + Aml *method, *UUID, *ifctx, *ifctx1; + Aml *ret = aml_local(0); + Aml *caps = aml_local(1); + Aml *acpi_index = aml_local(2); Aml *zero = aml_int(0); - Aml *bnum = aml_arg(4); + Aml *one = aml_int(1); Aml *func = aml_arg(2); Aml *rev = aml_arg(1); - Aml *sun = aml_arg(5); + Aml *params = aml_arg(4); + Aml *bnum = aml_derefof(aml_index(params, aml_int(0))); + Aml *sunum = aml_derefof(aml_index(params, aml_int(1))); - method = aml_method("PDSM", 6, AML_SERIALIZED); + method = aml_method("PDSM", 5, AML_SERIALIZED); + /* get supported functions */ + ifctx = aml_if(aml_equal(func, zero)); + { + uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ + aml_append(ifctx, aml_store(aml_buffer(1, byte_list), ret)); + aml_append(ifctx, aml_store(zero, caps)); + + /* + * PCI Firmware Specification 3.1 + * 4.6. _DSM Definitions for PCI + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); + { + /* call is for unsupported UUID, bail out */ + aml_append(ifctx1, aml_return(ret)); + } + aml_append(ifctx, ifctx1); + + ifctx1 = aml_if(aml_lless(rev, aml_int(2))); + { + /* call is for unsupported REV, bail out */ + aml_append(ifctx1, aml_return(ret)); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, + aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + /* + * advertise function 7 if device has acpi-index + * acpi_index values: + * 0: not present (default value) + * FFFFFFFF: not supported (old QEMU without PIDX reg) + * other: device's acpi-index + */ + ifctx1 = aml_if(aml_lnot( + aml_or(aml_equal(acpi_index, zero), + aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) + )); + { + /* have supported functions */ + aml_append(ifctx1, aml_or(caps, one, caps)); + /* support for function 7 */ + aml_append(ifctx1, + aml_or(caps, aml_shiftleft(one, aml_int(7)), caps)); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, aml_store(caps, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + /* handle specific functions requests */ /* * PCI Firmware Specification 3.1 - * 4.6. _DSM Definitions for PCI + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems */ - UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); - ifctx = aml_if(aml_equal(aml_arg(0), UUID)); + ifctx = aml_if(aml_equal(func, aml_int(7))); { - aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sun), acpi_index)); - ifctx1 = aml_if(aml_equal(func, zero)); - { - uint8_t byte_list[1]; + Aml *pkg = aml_package(2); - ifctx2 = aml_if(aml_equal(rev, aml_int(2))); - { - /* - * advertise function 7 if device has acpi-index - * acpi_index values: - * 0: not present (default value) - * FFFFFFFF: not supported (old QEMU without PIDX reg) - * other: device's acpi-index - */ - ifctx3 = aml_if(aml_lnot( - aml_or(aml_equal(acpi_index, zero), - aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) - )); - { - byte_list[0] = - 1 /* have supported functions */ | - 1 << 7 /* support for function 7 */ - ; - aml_append(ifctx3, aml_return(aml_buffer(1, byte_list))); - } - aml_append(ifctx2, ifctx3); - } - aml_append(ifctx1, ifctx2); + aml_append(pkg, zero); + /* + * optional, if not impl. should return null string + */ + aml_append(pkg, aml_string("%s", "")); + aml_append(ifctx, aml_store(pkg, ret)); - byte_list[0] = 0; /* nothing supported */ - aml_append(ifctx1, aml_return(aml_buffer(1, byte_list))); - } - aml_append(ifctx, ifctx1); - elsectx = aml_else(); - /* - * PCI Firmware Specification 3.1 - * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under - * Operating Systems - */ - ifctx1 = aml_if(aml_equal(func, aml_int(7))); - { - Aml *pkg = aml_package(2); - Aml *ret = aml_local(1); - - aml_append(pkg, zero); - /* - * optional, if not impl. should return null string - */ - aml_append(pkg, aml_string("%s", "")); - aml_append(ifctx1, aml_store(pkg, ret)); - /* - * update acpi-index to actual value - */ - aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero))); - aml_append(ifctx1, aml_return(ret)); - } - aml_append(elsectx, ifctx1); - aml_append(ifctx, elsectx); + aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + /* + * update acpi-index to actual value + */ + aml_append(ifctx, aml_store(acpi_index, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); } + aml_append(method, ifctx); return method; } @@ -846,21 +839,6 @@ static Aml *build_vmbus_device_aml(VMBusBridge *vmbus_bridge) return dev; } -static void build_isa_devices_aml(Aml *table) -{ - bool ambiguous; - Object *obj = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous); - Aml *scope; - - assert(obj && !ambiguous); - - scope = aml_scope("_SB.PCI0.ISA"); - build_acpi_ipmi_devices(scope, BUS(obj), "\\_SB.PCI0.ISA"); - isa_build_aml(ISA_BUS(obj), scope); - - aml_append(table, scope); -} - static void build_dbg_aml(Aml *table) { Aml *field; @@ -1010,7 +988,6 @@ static void build_piix4_pci0_int(Aml *table) { Aml *dev; Aml *crs; - Aml *field; Aml *method; uint32_t irqs; Aml *sb_scope = aml_scope("_SB"); @@ -1019,13 +996,6 @@ static void build_piix4_pci0_int(Aml *table) aml_append(pci0_scope, build_prt(true)); aml_append(sb_scope, pci0_scope); - field = aml_field("PCI0.ISA.P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); - aml_append(field, aml_named_field("PRQ0", 8)); - aml_append(field, aml_named_field("PRQ1", 8)); - aml_append(field, aml_named_field("PRQ2", 8)); - aml_append(field, aml_named_field("PRQ3", 8)); - aml_append(sb_scope, field); - aml_append(sb_scope, build_irq_status_method()); aml_append(sb_scope, build_iqcr_method(true)); @@ -1129,7 +1099,6 @@ static Aml *build_q35_routing_table(const char *str) static void build_q35_pci0_int(Aml *table) { - Aml *field; Aml *method; Aml *sb_scope = aml_scope("_SB"); Aml *pci0_scope = aml_scope("PCI0"); @@ -1166,18 +1135,6 @@ static void build_q35_pci0_int(Aml *table) aml_append(pci0_scope, method); aml_append(sb_scope, pci0_scope); - field = aml_field("PCI0.ISA.PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); - aml_append(field, aml_named_field("PRQA", 8)); - aml_append(field, aml_named_field("PRQB", 8)); - aml_append(field, aml_named_field("PRQC", 8)); - aml_append(field, aml_named_field("PRQD", 8)); - aml_append(field, aml_reserved_field(0x20)); - aml_append(field, aml_named_field("PRQE", 8)); - aml_append(field, aml_named_field("PRQF", 8)); - aml_append(field, aml_named_field("PRQG", 8)); - aml_append(field, aml_named_field("PRQH", 8)); - aml_append(sb_scope, field); - aml_append(sb_scope, build_irq_status_method()); aml_append(sb_scope, build_iqcr_method(false)); @@ -1242,40 +1199,6 @@ static Aml *build_q35_dram_controller(const AcpiMcfgInfo *mcfg) return dev; } -static void build_q35_isa_bridge(Aml *table) -{ - Aml *dev; - Aml *scope; - - scope = aml_scope("_SB.PCI0"); - dev = aml_device("ISA"); - aml_append(dev, aml_name_decl("_ADR", aml_int(0x001F0000))); - - /* ICH9 PCI to ISA irq remapping */ - aml_append(dev, aml_operation_region("PIRQ", AML_PCI_CONFIG, - aml_int(0x60), 0x0C)); - - aml_append(scope, dev); - aml_append(table, scope); -} - -static void build_piix4_isa_bridge(Aml *table) -{ - Aml *dev; - Aml *scope; - - scope = aml_scope("_SB.PCI0"); - dev = aml_device("ISA"); - aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010000))); - - /* PIIX PCI to ISA irq remapping */ - aml_append(dev, aml_operation_region("P40C", AML_PCI_CONFIG, - aml_int(0x60), 0x04)); - - aml_append(scope, dev); - aml_append(table, scope); -} - static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr) { Aml *scope; @@ -1327,12 +1250,12 @@ static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr) aml_append(method, aml_return(aml_local(0))); aml_append(scope, method); - aml_append(scope, aml_pci_device_dsm()); + aml_append(scope, aml_pci_pdsm()); aml_append(table, scope); } -static Aml *build_q35_osc_method(void) +static Aml *build_q35_osc_method(bool enable_native_pcie_hotplug) { Aml *if_ctx; Aml *if_ctx2; @@ -1354,8 +1277,10 @@ static Aml *build_q35_osc_method(void) /* * Always allow native PME, AER (no dependencies) * Allow SHPC (PCI bridges can have SHPC controller) + * Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled. */ - aml_append(if_ctx, aml_and(a_ctrl, aml_int(0x1F), a_ctrl)); + aml_append(if_ctx, aml_and(a_ctrl, + aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl)); if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); /* Unknown revision */ @@ -1380,13 +1305,18 @@ static Aml *build_q35_osc_method(void) return method; } -static void build_smb0(Aml *table, I2CBus *smbus, int devnr, int func) +static void build_acpi0017(Aml *table) { - Aml *scope = aml_scope("_SB.PCI0"); - Aml *dev = aml_device("SMB0"); + Aml *dev, *scope, *method; + + scope = aml_scope("_SB"); + dev = aml_device("CXLM"); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017"))); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x01))); + aml_append(dev, method); - aml_append(dev, aml_name_decl("_ADR", aml_int(devnr << 16 | func))); - build_acpi_ipmi_devices(dev, BUS(smbus), "\\_SB.PCI0.SMB0"); aml_append(scope, dev); aml_append(table, scope); } @@ -1396,6 +1326,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, Range *pci_hole, Range *pci_hole64, MachineState *machine) { + Object *i440fx = object_resolve_type_unambiguous(TYPE_I440FX_PCI_HOST_BRIDGE); + Object *q35 = object_resolve_type_unambiguous(TYPE_Q35_HOST_DEVICE); CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; CrsRangeSet crs_range_set; @@ -1410,16 +1342,19 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, #ifdef CONFIG_TPM TPMIf *tpm = tpm_find(); #endif + bool cxl_present = false; int i; VMBusBridge *vmbus_bridge = vmbus_bridge_find(); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; + assert(!!i440fx != !!q35); + + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); - build_dbg_aml(dsdt); - if (misc->is_piix4) { + if (i440fx) { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); @@ -1428,23 +1363,18 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); - if (misc->has_hpet) { - build_hpet_aml(dsdt); - } - build_piix4_isa_bridge(dsdt); - build_isa_devices_aml(dsdt); if (pm->pcihp_bridge_en || pm->pcihp_root_en) { build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); } build_piix4_pci0_int(dsdt); - } else { + } else if (q35) { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); - aml_append(dev, build_q35_osc_method()); + aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en)); aml_append(sb_scope, dev); if (mcfg_valid) { aml_append(sb_scope, build_q35_dram_controller(&mcfg)); @@ -1477,18 +1407,14 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dsdt, sb_scope); - if (misc->has_hpet) { - build_hpet_aml(dsdt); - } - build_q35_isa_bridge(dsdt); - build_isa_devices_aml(dsdt); if (pm->pcihp_bridge_en) { build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); } build_q35_pci0_int(dsdt); - if (pcms->smbus && !pcmc->do_not_add_smb_acpi) { - build_smb0(dsdt, pcms->smbus, ICH9_SMB_DEV, ICH9_SMB_FUNC); - } + } + + if (misc->has_hpet) { + build_hpet_aml(dsdt); } if (vmbus_bridge) { @@ -1497,6 +1423,18 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dsdt, sb_scope); } + scope = aml_scope("_GPE"); + { + aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); + if (machine->nvdimms_state->is_enabled) { + method = aml_method("_E04", 0, AML_NOTSERIALIZED); + aml_append(method, aml_notify(aml_name("\\_SB.NVDR"), + aml_int(0x80))); + aml_append(scope, method); + } + } + aml_append(dsdt, scope); + if (pcmc->legacy_cpu_hotplug) { build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base); } else { @@ -1515,28 +1453,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, pcms->memhp_io_base); } - scope = aml_scope("_GPE"); - { - aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); - - if (pm->pcihp_bridge_en || pm->pcihp_root_en) { - method = aml_method("_E01", 0, AML_NOTSERIALIZED); - aml_append(method, - aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); - aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); - aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); - aml_append(scope, method); - } - - if (machine->nvdimms_state->is_enabled) { - method = aml_method("_E04", 0, AML_NOTSERIALIZED); - aml_append(method, aml_notify(aml_name("\\_SB.NVDR"), - aml_int(0x80))); - aml_append(scope, method); - } - } - aml_append(dsdt, scope); - crs_range_set_init(&crs_range_set); bus = PC_MACHINE(machine)->bus; if (bus) { @@ -1554,13 +1470,30 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } scope = aml_scope("\\_SB"); - dev = aml_device("PC%.02X", bus_num); + + if (pci_bus_is_cxl(bus)) { + dev = aml_device("CL%.02X", bus_num); + } else { + dev = aml_device("PC%.02X", bus_num); + } aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); - if (pci_bus_is_express(bus)) { + if (pci_bus_is_cxl(bus)) { + struct Aml *pkg = aml_package(2); + + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0016"))); + aml_append(pkg, aml_eisaid("PNP0A08")); + aml_append(pkg, aml_eisaid("PNP0A03")); + aml_append(dev, aml_name_decl("_CID", pkg)); + aml_append(dev, aml_name_decl("_ADR", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + build_cxl_osc_method(dev); + } else if (pci_bus_is_express(bus)) { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); - aml_append(dev, build_q35_osc_method()); + + /* Expander bridges do not have ACPI PCI Hot-plug enabled */ + aml_append(dev, build_q35_osc_method(true)); } else { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); } @@ -1575,9 +1508,23 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); + + /* Handle the ranges for the PXB expanders */ + if (pci_bus_is_cxl(bus)) { + MemoryRegion *mr = &pcms->cxl_devices_state.host_mr; + uint64_t base = mr->addr; + + cxl_present = true; + crs_range_insert(crs_range_set.mem_ranges, base, + base + memory_region_size(mr) - 1); + } } } + if (cxl_present) { + build_acpi0017(dsdt); + } + /* * At this point crs_range_set has all the ranges used by pci * busses *other* than PCI0. These ranges will be excluded from @@ -1727,106 +1674,15 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dsdt, scope); } - if (misc->applesmc_io_base) { - scope = aml_scope("\\_SB.PCI0.ISA"); - dev = aml_device("SMC"); - - aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - - crs = aml_resource_template(); - aml_append(crs, - aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, - 0x01, APPLESMC_MAX_DATA_LENGTH) - ); - aml_append(crs, aml_irq_no_flags(6)); - aml_append(dev, aml_name_decl("_CRS", crs)); - - aml_append(scope, dev); - aml_append(dsdt, scope); - } - - if (misc->pvpanic_port) { - scope = aml_scope("\\_SB.PCI0.ISA"); - - dev = aml_device("PEVT"); - aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); - - crs = aml_resource_template(); - aml_append(crs, - aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) - ); - aml_append(dev, aml_name_decl("_CRS", crs)); - - aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, - aml_int(misc->pvpanic_port), 1)); - field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); - aml_append(field, aml_named_field("PEPT", 8)); - aml_append(dev, field); - - /* device present, functioning, decoding, shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); - - method = aml_method("RDPT", 0, AML_NOTSERIALIZED); - aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); - aml_append(method, aml_return(aml_local(0))); - aml_append(dev, method); - - method = aml_method("WRPT", 1, AML_NOTSERIALIZED); - aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); - aml_append(dev, method); - - aml_append(scope, dev); - aml_append(dsdt, scope); - } - sb_scope = aml_scope("\\_SB"); { - Object *pci_host; - PCIBus *bus = NULL; - - pci_host = acpi_get_i386_pci_host(); + Object *pci_host = acpi_get_i386_pci_host(); if (pci_host) { - bus = PCI_HOST_BRIDGE(pci_host)->bus; - } - - if (bus) { + PCIBus *bus = PCI_HOST_BRIDGE(pci_host)->bus; Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); - -#ifdef CONFIG_TPM - if (TPM_IS_TIS_ISA(tpm)) { - if (misc->tpm_version == TPM_VERSION_2_0) { - dev = aml_device("TPM"); - aml_append(dev, aml_name_decl("_HID", - aml_string("MSFT0101"))); - } else { - dev = aml_device("ISA.TPM"); - aml_append(dev, aml_name_decl("_HID", - aml_eisaid("PNP0C31"))); - } - - aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); - crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, - TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); - /* - FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs, - Rewrite to take IRQ from TPM device model and - fix default IRQ value there to use some unused IRQ - */ - /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */ - aml_append(dev, aml_name_decl("_CRS", crs)); - - tpm_build_ppi_acpi(tpm, dev); - - aml_append(scope, dev); - } -#endif - aml_append(sb_scope, scope); } } @@ -1835,12 +1691,15 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, if (TPM_IS_CRB(tpm)) { dev = aml_device("TPM"); aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", + aml_string("TPM 2.0 Device"))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_CRB_ADDR_BASE, TPM_CRB_ADDR_SIZE, AML_READ_WRITE)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(dev, aml_name_decl("_STA", aml_int(0xf))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); tpm_build_ppi_acpi(tpm, dev); @@ -1848,125 +1707,187 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } #endif + if (pcms->sgx_epc.size != 0) { + uint64_t epc_base = pcms->sgx_epc.base; + uint64_t epc_size = pcms->sgx_epc.size; + + dev = aml_device("EPC"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("INT0E0C"))); + aml_append(dev, aml_name_decl("_STR", + aml_unicode("Enclave Page Cache 1.0"))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, 0, epc_base, + epc_base + epc_size - 1, 0, epc_size)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0f))); + aml_append(dev, method); + + aml_append(sb_scope, dev); + } aml_append(dsdt, sb_scope); + if (pm->pcihp_bridge_en || pm->pcihp_root_en) { + scope = aml_scope("_GPE"); + { + method = aml_method("_E01", 0, AML_NOTSERIALIZED); + aml_append(method, + aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); + aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); + aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); + aml_append(scope, method); + } + aml_append(dsdt, scope); + } + /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 1, x86ms->oem_id, x86ms->oem_table_id); + acpi_table_end(linker, &table); free_aml_allocator(); } +/* + * IA-PC HPET (High Precision Event Timers) Specification (Revision: 1.0a) + * 3.2.4The ACPI 2.0 HPET Description Table (HPET) + */ static void build_hpet(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - Acpi20Hpet *hpet; - int hpet_start = table_data->len; + AcpiTable table = { .sig = "HPET", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - hpet = acpi_data_push(table_data, sizeof(*hpet)); + acpi_table_begin(&table, table_data); /* Note timer_block_id value must be kept in sync with value advertised by * emulated hpet */ - hpet->timer_block_id = cpu_to_le32(0x8086a201); - hpet->addr.address = cpu_to_le64(HPET_BASE); - build_header(linker, table_data, - (void *)(table_data->data + hpet_start), - "HPET", sizeof(*hpet), 1, oem_id, oem_table_id); + /* Event Timer Block ID */ + build_append_int_noprefix(table_data, 0x8086a201, 4); + /* BASE_ADDRESS */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0, 0, 0, HPET_BASE); + /* HPET Number */ + build_append_int_noprefix(table_data, 0, 1); + /* Main Counter Minimum Clock_tick in Periodic Mode */ + build_append_int_noprefix(table_data, 0, 2); + /* Page Protection And OEM Attribute */ + build_append_int_noprefix(table_data, 0, 1); + acpi_table_end(linker, &table); } #ifdef CONFIG_TPM +/* + * TCPA Description Table + * + * Following Level 00, Rev 00.37 of specs: + * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification + * 7.1.2 ACPI Table Layout + */ static void build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, const char *oem_id, const char *oem_table_id) { - int tcpa_start = table_data->len; - Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa); - unsigned log_addr_size = sizeof(tcpa->log_area_start_address); - unsigned log_addr_offset = - (char *)&tcpa->log_area_start_address - table_data->data; + unsigned log_addr_offset; + AcpiTable table = { .sig = "TCPA", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT); - tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE); - acpi_data_push(tcpalog, le32_to_cpu(tcpa->log_area_minimum_length)); + acpi_table_begin(&table, table_data); + /* Platform Class */ + build_append_int_noprefix(table_data, TPM_TCPA_ACPI_CLASS_CLIENT, 2); + /* Log Area Minimum Length (LAML) */ + build_append_int_noprefix(table_data, TPM_LOG_AREA_MINIMUM_SIZE, 4); + /* Log Area Start Address (LASA) */ + log_addr_offset = table_data->len; + build_append_int_noprefix(table_data, 0, 8); + /* allocate/reserve space for TPM log area */ + acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, tcpalog, 1, false /* high memory */); - /* log area start address to be filled by Guest linker */ - bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, log_addr_offset, log_addr_size, - ACPI_BUILD_TPMLOG_FILE, 0); + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + log_addr_offset, 8, ACPI_BUILD_TPMLOG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + tcpa_start), - "TCPA", sizeof(*tcpa), 2, oem_id, oem_table_id); + acpi_table_end(linker, &table); } #endif #define HOLE_640K_START (640 * KiB) #define HOLE_640K_END (1 * MiB) +/* + * ACPI spec, Revision 3.0 + * 5.2.15 System Resource Affinity Table (SRAT) + */ static void build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) { - AcpiSystemResourceAffinityTable *srat; - AcpiSratMemoryAffinity *numamem; - int i; - int srat_start, numa_start, slots; + int numa_mem_start, slots; uint64_t mem_len, mem_base, next_base; MachineClass *mc = MACHINE_GET_CLASS(machine); X86MachineState *x86ms = X86_MACHINE(machine); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); PCMachineState *pcms = PC_MACHINE(machine); - ram_addr_t hotplugabble_address_space_size = + int nb_numa_nodes = machine->numa_state->num_nodes; + NodeInfo *numa_info = machine->numa_state->nodes; + ram_addr_t hotpluggable_address_space_size = object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE, NULL); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; - srat_start = table_data->len; - - srat = acpi_data_push(table_data, sizeof *srat); - srat->reserved1 = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ for (i = 0; i < apic_ids->len; i++) { int node_id = apic_ids->cpus[i].props.node_id; uint32_t apic_id = apic_ids->cpus[i].arch_id; if (apic_id < 255) { - AcpiSratProcessorAffinity *core; - - core = acpi_data_push(table_data, sizeof *core); - core->type = ACPI_SRAT_PROCESSOR_APIC; - core->length = sizeof(*core); - core->local_apic_id = apic_id; - core->proximity_lo = node_id; - memset(core->proximity_hi, 0, 3); - core->local_sapic_eid = 0; - core->flags = cpu_to_le32(1); + /* 5.2.15.1 Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, node_id, 1); + build_append_int_noprefix(table_data, apic_id, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } else { - AcpiSratProcessorX2ApicAffinity *core; - - core = acpi_data_push(table_data, sizeof *core); - core->type = ACPI_SRAT_PROCESSOR_x2APIC; - core->length = sizeof(*core); - core->x2apic_id = cpu_to_le32(apic_id); - core->proximity_domain = cpu_to_le32(node_id); - core->flags = cpu_to_le32(1); + /* + * ACPI spec, Revision 4.0 + * 5.2.16.3 Processor Local x2APIC Affinity Structure + */ + build_append_int_noprefix(table_data, 2, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Proximity Domain */ + build_append_int_noprefix(table_data, node_id, 4); + build_append_int_noprefix(table_data, apic_id, 4); /* X2APIC ID */ + /* Flags, Table 5-39 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } } - /* the memory map is a bit tricky, it contains at least one hole * from 640k-1M and possibly another one from 3.5G-4G. */ next_base = 0; - numa_start = table_data->len; + numa_mem_start = table_data->len; - for (i = 1; i < pcms->numa_nodes + 1; ++i) { + for (i = 1; i < nb_numa_nodes + 1; ++i) { mem_base = next_base; - mem_len = pcms->node_mem[i - 1]; + mem_len = numa_info[i - 1].node_mem; next_base = mem_base + mem_len; /* Cut out the 640K hole */ @@ -1974,8 +1895,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) next_base > HOLE_640K_START) { mem_len -= next_base - HOLE_640K_START; if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } @@ -1993,18 +1913,16 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) next_base > x86ms->below_4g_mem_size) { mem_len -= next_base - x86ms->below_4g_mem_size; if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } - mem_base = 1ULL << 32; + mem_base = x86ms->above_4g_mem_start; mem_len = next_base - x86ms->below_4g_mem_size; next_base = mem_base + mem_len; } if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } } @@ -2013,10 +1931,17 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) nvdimm_build_srat(table_data); } - slots = (table_data->len - numa_start) / sizeof *numamem; - for (; slots < pcms->numa_nodes + 2; slots++) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS); + sgx_epc_build_srat(table_data); + + /* + * TODO: this part is not in ACPI spec and current linux kernel boots fine + * without these entries. But I recall there were issues the last time I + * tried to remove it with some ancient guest OS, however I can't remember + * what that was so keep this around for now + */ + slots = (table_data->len - numa_mem_start) / 40 /* mem affinity len */; + for (; slots < nb_numa_nodes + 2; slots++) { + build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } /* @@ -2027,18 +1952,13 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) * Memory devices may override proximity set by this entry, * providing _PXM method if necessary. */ - if (hotplugabble_address_space_size) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, machine->device_memory->base, - hotplugabble_address_space_size, pcms->numa_nodes - 1, + if (hotpluggable_address_space_size) { + build_srat_memory(table_data, machine->device_memory->base, + hotpluggable_address_space_size, nb_numa_nodes - 1, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } - build_header(linker, table_data, - (void *)(table_data->data + srat_start), - "SRAT", - table_data->len - srat_start, 1, x86ms->oem_id, - x86ms->oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2047,8 +1967,9 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) static void insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) { + const size_t device_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; GArray *scope_blob = opaque; - AcpiDmarDeviceScope *scope = NULL; if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { /* Dmar Scope Type: 0x02 for PCI Bridge */ @@ -2059,8 +1980,7 @@ insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) } /* length */ - build_append_int_noprefix(scope_blob, - sizeof(*scope) + sizeof(scope->path[0]), 1); + build_append_int_noprefix(scope_blob, device_scope_size, 1); /* reserved */ build_append_int_noprefix(scope_blob, 0, 2); /* enumeration_id */ @@ -2083,8 +2003,7 @@ dmar_host_bridges(Object *obj, void *opaque) PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; if (bus && !pci_bus_bypass_iommu(bus)) { - pci_for_each_device(bus, pci_bus_num(bus), insert_scope, - scope_blob); + pci_for_each_device_under_bus(bus, insert_scope, scope_blob); } } @@ -2092,26 +2011,26 @@ dmar_host_bridges(Object *obj, void *opaque) } /* - * VT-d spec 8.1 DMA Remapping Reporting Structure - * (version Oct. 2014 or later) + * Intel ® Virtualization Technology for Directed I/O + * Architecture Specification. Revision 3.3 + * 8.1 DMA Remapping Reporting Structure */ static void build_dmar_q35(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - int dmar_start = table_data->len; - - AcpiTableDmar *dmar; - AcpiDmarHardwareUnit *drhd; - AcpiDmarRootPortATS *atsr; uint8_t dmar_flags = 0; + uint8_t rsvd10[10] = {}; + /* Root complex IOAPIC uses one path only */ + const size_t ioapic_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; X86IOMMUState *iommu = x86_iommu_get_default(); - AcpiDmarDeviceScope *scope = NULL; - /* Root complex IOAPIC use one path[0] only */ - size_t ioapic_scope_size = sizeof(*scope) + sizeof(scope->path[0]); IntelIOMMUState *intel_iommu = INTEL_IOMMU_DEVICE(iommu); GArray *scope_blob = g_array_new(false, true, 1); + AcpiTable table = { .sig = "DMAR", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + /* * A PCI bus walk, for each PCI host bridge. * Insert scope for each PCI bridge and endpoint device which @@ -2125,43 +2044,52 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker, const char *oem_id, dmar_flags |= 0x1; /* Flags: 0x1: INT_REMAP */ } - dmar = acpi_data_push(table_data, sizeof(*dmar)); - dmar->host_address_width = intel_iommu->aw_bits - 1; - dmar->flags = dmar_flags; + acpi_table_begin(&table, table_data); + /* Host Address Width */ + build_append_int_noprefix(table_data, intel_iommu->aw_bits - 1, 1); + build_append_int_noprefix(table_data, dmar_flags, 1); /* Flags */ + g_array_append_vals(table_data, rsvd10, sizeof(rsvd10)); /* Reserved */ - /* DMAR Remapping Hardware Unit Definition structure */ - drhd = acpi_data_push(table_data, sizeof(*drhd) + ioapic_scope_size); - drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT); - drhd->length = - cpu_to_le16(sizeof(*drhd) + ioapic_scope_size + scope_blob->len); - drhd->flags = 0; /* Don't include all pci device */ - drhd->pci_segment = cpu_to_le16(0); - drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR); + /* 8.3 DMAR Remapping Hardware Unit Definition structure */ + build_append_int_noprefix(table_data, 0, 2); /* Type */ + /* Length */ + build_append_int_noprefix(table_data, + 16 + ioapic_scope_size + scope_blob->len, 2); + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Don't include all pci device */ , + 1); + build_append_int_noprefix(table_data, 0 , 1); /* Reserved */ + build_append_int_noprefix(table_data, 0 , 2); /* Segment Number */ + /* Register Base Address */ + build_append_int_noprefix(table_data, Q35_HOST_BRIDGE_IOMMU_ADDR , 8); /* Scope definition for the root-complex IOAPIC. See VT-d spec * 8.3.1 (version Oct. 2014 or later). */ - scope = &drhd->scope[0]; - scope->entry_type = 0x03; /* Type: 0x03 for IOAPIC */ - scope->length = ioapic_scope_size; - scope->enumeration_id = ACPI_BUILD_IOAPIC_ID; - scope->bus = Q35_PSEUDO_BUS_PLATFORM; - scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC); - scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC); + build_append_int_noprefix(table_data, 0x03 /* IOAPIC */, 1); /* Type */ + build_append_int_noprefix(table_data, ioapic_scope_size, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Enumeration ID */ + build_append_int_noprefix(table_data, ACPI_BUILD_IOAPIC_ID, 1); + /* Start Bus Number */ + build_append_int_noprefix(table_data, Q35_PSEUDO_BUS_PLATFORM, 1); + /* Path, {Device, Function} pair */ + build_append_int_noprefix(table_data, PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC), 1); + build_append_int_noprefix(table_data, PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC), 1); /* Add scope found above */ g_array_append_vals(table_data, scope_blob->data, scope_blob->len); g_array_free(scope_blob, true); if (iommu->dt_supported) { - atsr = acpi_data_push(table_data, sizeof(*atsr)); - atsr->type = cpu_to_le16(ACPI_DMAR_TYPE_ATSR); - atsr->length = cpu_to_le16(sizeof(*atsr)); - atsr->flags = ACPI_DMAR_ATSR_ALL_PORTS; - atsr->pci_segment = cpu_to_le16(0); + /* 8.5 Root Port ATS Capability Reporting Structure */ + build_append_int_noprefix(table_data, 2, 2); /* Type */ + build_append_int_noprefix(table_data, 8, 2); /* Length */ + build_append_int_noprefix(table_data, 1 /* ALL_PORTS */, 1); /* Flags */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); /* Segment Number */ } - build_header(linker, table_data, (void *)(table_data->data + dmar_start), - "DMAR", table_data->len - dmar_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2175,10 +2103,10 @@ static void build_waet(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - int waet_start = table_data->len; + AcpiTable table = { .sig = "WAET", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - /* WAET header */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* * Set "ACPI PM timer good" flag. * @@ -2187,9 +2115,7 @@ build_waet(GArray *table_data, BIOSLinker *linker, const char *oem_id, * Which avoids costly VMExits caused by guest re-reading it unnecessarily. */ build_append_int_noprefix(table_data, 1 << 1 /* ACPI PM timer good */, 4); - - build_header(linker, table_data, (void *)(table_data->data + waet_start), - "WAET", table_data->len - waet_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2283,7 +2209,7 @@ ivrs_host_bridges(Object *obj, void *opaque) PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; if (bus && !pci_bus_bypass_iommu(bus)) { - pci_for_each_device(bus, pci_bus_num(bus), insert_ivhd, ivhd_blob); + pci_for_each_device_under_bus(bus, insert_ivhd, ivhd_blob); } } @@ -2295,12 +2221,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { int ivhd_table_len = 24; - int iommu_start = table_data->len; AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default()); GArray *ivhd_blob = g_array_new(false, true, 1); + AcpiTable table = { .sig = "IVRS", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - /* IVRS header */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* IVinfo - IO virtualization information common to all * IOMMU units in a system */ @@ -2385,10 +2311,7 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, 0x48, /* special device */ 8); } - - build_header(linker, table_data, (void *)(table_data->data + iommu_start), - "IVRS", table_data->len - iommu_start, 1, oem_id, - oem_table_id); + acpi_table_end(linker, &table); } typedef @@ -2435,6 +2358,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); X86MachineState *x86ms = X86_MACHINE(machine); + DeviceState *iommu = pcms->iommu; GArray *table_offsets; unsigned facs, dsdt, rsdt, fadt; AcpiPmInfo pm; @@ -2507,6 +2431,18 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) ACPI_DEVICE_IF(x86ms->acpi_dev), x86ms->oem_id, x86ms->oem_table_id); +#ifdef CONFIG_ACPI_ERST + { + Object *erst_dev; + erst_dev = find_erst_dev(); + if (erst_dev) { + acpi_add_table(table_offsets, tables_blob); + build_erst(tables_blob, tables->linker, erst_dev, + x86ms->oem_id, x86ms->oem_table_id); + } + } +#endif + vmgenid_dev = find_vmgenid_dev(); if (vmgenid_dev) { acpi_add_table(table_offsets, tables_blob); @@ -2532,7 +2468,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) } } #endif - if (pcms->numa_nodes) { + if (machine->numa_state->num_nodes) { acpi_add_table(table_offsets, tables_blob); build_srat(tables_blob, tables->linker, machine); if (machine->numa_state->have_numa_distance) { @@ -2551,23 +2487,30 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) build_mcfg(tables_blob, tables->linker, &mcfg, x86ms->oem_id, x86ms->oem_table_id); } - if (x86_iommu_get_default()) { - IommuType IOMMUType = x86_iommu_get_type(); - if (IOMMUType == TYPE_AMD) { - acpi_add_table(table_offsets, tables_blob); - build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id, - x86ms->oem_table_id); - } else if (IOMMUType == TYPE_INTEL) { - acpi_add_table(table_offsets, tables_blob); - build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id, - x86ms->oem_table_id); - } + if (object_dynamic_cast(OBJECT(iommu), TYPE_AMD_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_INTEL_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_VIRTIO_IOMMU_PCI)) { + PCIDevice *pdev = PCI_DEVICE(iommu); + + acpi_add_table(table_offsets, tables_blob); + build_viot(machine, tables_blob, tables->linker, pci_get_bdf(pdev), + x86ms->oem_id, x86ms->oem_table_id); } if (machine->nvdimms_state->is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, machine->nvdimms_state, machine->ram_slots, x86ms->oem_id, x86ms->oem_table_id); } + if (pcms->cxl_devices_state.is_enabled) { + cxl_build_cedt(table_offsets, tables_blob, tables->linker, + x86ms->oem_id, x86ms->oem_table_id, &pcms->cxl_devices_state); + } acpi_add_table(table_offsets, tables_blob); build_waet(tables_blob, tables->linker, x86ms->oem_id, x86ms->oem_table_id); @@ -2659,6 +2602,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) /* Cleanup memory that's no longer used. */ g_array_free(table_offsets, true); + g_free(slic_oem.id); + g_free(slic_oem.table_id); } static void acpi_ram_update(MemoryRegion *mr, GArray *data) diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 1f5947fcf9..4aaafbdd7b 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -34,9 +34,13 @@ #include "acpi-common.h" void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry) + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) { uint32_t apic_id = apic_ids->cpus[uid].arch_id; + /* Flags – Local APIC Flags */ + uint32_t flags = apic_ids->cpus[uid].cpu != NULL || force_enabled ? + 1 /* Enabled */ : 0; /* ACPI spec says that LAPIC entry for non present * CPU may be omitted from MADT or it must be marked @@ -45,82 +49,84 @@ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, * should be put in MADT but kept disabled. */ if (apic_id < 255) { - AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic); - - apic->type = ACPI_APIC_PROCESSOR; - apic->length = sizeof(*apic); - apic->processor_id = uid; - apic->local_apic_id = apic_id; - if (apic_ids->cpus[uid].cpu != NULL) { - apic->flags = cpu_to_le32(1); - } else { - apic->flags = cpu_to_le32(0); - } + /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */ + build_append_int_noprefix(entry, 0, 1); /* Type */ + build_append_int_noprefix(entry, 8, 1); /* Length */ + build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */ + build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ } else { - AcpiMadtProcessorX2Apic *apic = acpi_data_push(entry, sizeof *apic); - - apic->type = ACPI_APIC_LOCAL_X2APIC; - apic->length = sizeof(*apic); - apic->uid = cpu_to_le32(uid); - apic->x2apic_id = cpu_to_le32(apic_id); - if (apic_ids->cpus[uid].cpu != NULL) { - apic->flags = cpu_to_le32(1); - } else { - apic->flags = cpu_to_le32(0); - } + /* Rev 4.0, 5.2.12.12 Processor Local x2APIC Structure */ + build_append_int_noprefix(entry, 9, 1); /* Type */ + build_append_int_noprefix(entry, 16, 1); /* Length */ + build_append_int_noprefix(entry, 0, 2); /* Reserved */ + build_append_int_noprefix(entry, apic_id, 4); /* X2APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ + build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ } } +static void build_ioapic(GArray *entry, uint8_t id, uint32_t addr, uint32_t irq) +{ + /* Rev 1.0b, 5.2.8.2 IO APIC */ + build_append_int_noprefix(entry, 1, 1); /* Type */ + build_append_int_noprefix(entry, 12, 1); /* Length */ + build_append_int_noprefix(entry, id, 1); /* IO APIC ID */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, addr, 4); /* IO APIC Address */ + build_append_int_noprefix(entry, irq, 4); /* System Vector Base */ +} + +static void +build_xrupt_override(GArray *entry, uint8_t src, uint32_t gsi, uint16_t flags) +{ + /* Rev 1.0b, 5.2.8.3.1 Interrupt Source Overrides */ + build_append_int_noprefix(entry, 2, 1); /* Type */ + build_append_int_noprefix(entry, 10, 1); /* Length */ + build_append_int_noprefix(entry, 0, 1); /* Bus */ + build_append_int_noprefix(entry, src, 1); /* Source */ + /* Global System Interrupt Vector */ + build_append_int_noprefix(entry, gsi, 4); + build_append_int_noprefix(entry, flags, 2); /* Flags */ +} + +/* + * ACPI spec, Revision 1.0b + * 5.2.8 Multiple APIC Description Table + */ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, X86MachineState *x86ms, AcpiDeviceIf *adev, const char *oem_id, const char *oem_table_id) { + int i; + bool x2apic_mode = false; MachineClass *mc = MACHINE_GET_CLASS(x86ms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); - int madt_start = table_data->len; AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); - bool x2apic_mode = false; + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - AcpiMultipleApicTable *madt; - AcpiMadtIoApic *io_apic; - AcpiMadtIntsrcovr *intsrcovr; - int i; - - madt = acpi_data_push(table_data, sizeof *madt); - madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); - madt->flags = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + /* Local APIC Address */ + build_append_int_noprefix(table_data, APIC_DEFAULT_ADDRESS, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ for (i = 0; i < apic_ids->len; i++) { - adevc->madt_cpu(adev, i, apic_ids, table_data); + adevc->madt_cpu(adev, i, apic_ids, table_data, false); if (apic_ids->cpus[i].arch_id > 254) { x2apic_mode = true; } } - io_apic = acpi_data_push(table_data, sizeof *io_apic); - io_apic->type = ACPI_APIC_IO; - io_apic->length = sizeof(*io_apic); - io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; - io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); - io_apic->interrupt = cpu_to_le32(0); - + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID, IO_APIC_DEFAULT_ADDRESS, 0); if (x86ms->ioapic2) { - AcpiMadtIoApic *io_apic2; - io_apic2 = acpi_data_push(table_data, sizeof *io_apic); - io_apic2->type = ACPI_APIC_IO; - io_apic2->length = sizeof(*io_apic); - io_apic2->io_apic_id = ACPI_BUILD_IOAPIC_ID + 1; - io_apic2->address = cpu_to_le32(IO_APIC_SECONDARY_ADDRESS); - io_apic2->interrupt = cpu_to_le32(IO_APIC_SECONDARY_IRQBASE); + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID + 1, + IO_APIC_SECONDARY_ADDRESS, IO_APIC_SECONDARY_IRQBASE); } if (x86ms->apic_xrupt_override) { - intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); - intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; - intsrcovr->length = sizeof(*intsrcovr); - intsrcovr->source = 0; - intsrcovr->gsi = cpu_to_le32(2); - intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ + build_xrupt_override(table_data, 0, 2, + 0 /* Flags: Conforms to the specifications of the bus */); } for (i = 1; i < 16; i++) { @@ -128,36 +134,32 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, /* No need for a INT source override structure. */ continue; } - intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); - intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; - intsrcovr->length = sizeof(*intsrcovr); - intsrcovr->source = i; - intsrcovr->gsi = cpu_to_le32(i); - intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ + build_xrupt_override(table_data, i, i, + 0xd /* Flags: Active high, Level Triggered */); } if (x2apic_mode) { - AcpiMadtLocalX2ApicNmi *local_nmi; - - local_nmi = acpi_data_push(table_data, sizeof *local_nmi); - local_nmi->type = ACPI_APIC_LOCAL_X2APIC_NMI; - local_nmi->length = sizeof(*local_nmi); - local_nmi->uid = 0xFFFFFFFF; /* all processors */ - local_nmi->flags = cpu_to_le16(0); - local_nmi->lint = 1; /* ACPI_LINT1 */ + /* Rev 4.0, 5.2.12.13 Local x2APIC NMI Structure*/ + build_append_int_noprefix(table_data, 0xA, 1); /* Type */ + build_append_int_noprefix(table_data, 12, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* ACPI Processor UID */ + build_append_int_noprefix(table_data, 0xFFFFFFFF /* all processors */, + 4); + /* Local x2APIC LINT# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ } else { - AcpiMadtLocalNmi *local_nmi; - - local_nmi = acpi_data_push(table_data, sizeof *local_nmi); - local_nmi->type = ACPI_APIC_LOCAL_NMI; - local_nmi->length = sizeof(*local_nmi); - local_nmi->processor_id = 0xff; /* all processors */ - local_nmi->flags = cpu_to_le16(0); - local_nmi->lint = 1; /* ACPI_LINT1 */ + /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */ + build_append_int_noprefix(table_data, 4, 1); /* Type */ + build_append_int_noprefix(table_data, 6, 1); /* Length */ + /* ACPI Processor ID */ + build_append_int_noprefix(table_data, 0xFF /* all processors */, 1); + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* Local APIC INTI# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); } - build_header(linker, table_data, - (void *)(table_data->data + madt_start), "APIC", - table_data->len - madt_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c index 1a0f77b911..fb09185cbd 100644 --- a/hw/i386/acpi-microvm.c +++ b/hw/i386/acpi-microvm.c @@ -30,16 +30,20 @@ #include "hw/acpi/bios-linker-loader.h" #include "hw/acpi/generic_event_device.h" #include "hw/acpi/utils.h" +#include "hw/acpi/erst.h" #include "hw/i386/fw_cfg.h" #include "hw/i386/microvm.h" #include "hw/pci/pci.h" #include "hw/pci/pcie_host.h" #include "hw/usb/xhci.h" #include "hw/virtio/virtio-mmio.h" +#include "hw/input/i8042.h" #include "acpi-common.h" #include "acpi-microvm.h" +#include CONFIG_DEVICES + static void acpi_dsdt_add_virtio(Aml *scope, MicrovmMachineState *mms) { @@ -113,16 +117,16 @@ build_dsdt_microvm(GArray *table_data, BIOSLinker *linker, Aml *dsdt, *sb_scope, *scope, *pkg; bool ambiguous; Object *isabus; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; isabus = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous); assert(isabus); assert(!ambiguous); + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); - sb_scope = aml_scope("_SB"); fw_cfg_add_acpi_dsdt(sb_scope, x86ms->fw_cfg); isa_build_aml(ISA_BUS(isabus), sb_scope); @@ -144,11 +148,10 @@ build_dsdt_microvm(GArray *table_data, BIOSLinker *linker, aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(dsdt, scope); - /* copy AML table into ACPI tables blob and patch header there */ + /* copy AML bytecode into ACPI tables blob */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 2, x86ms->oem_id, x86ms->oem_table_id); + + acpi_table_end(linker, &table); free_aml_allocator(); } @@ -185,6 +188,11 @@ static void acpi_build_microvm(AcpiBuildTables *tables, .address = GED_MMIO_BASE_REGS + ACPI_GED_REG_RESET, }, .reset_val = ACPI_GED_RESET_VALUE, + /* + * ACPI v2, Table 5-10 - Fixed ACPI Description Table Boot Architecture + * Flags, bit offset 1 - 8042. + */ + .iapc_boot_arch = iapc_boot_arch_8042(), }; table_offsets = g_array_new(false, true /* clear */, @@ -208,6 +216,18 @@ static void acpi_build_microvm(AcpiBuildTables *tables, ACPI_DEVICE_IF(x86ms->acpi_dev), x86ms->oem_id, x86ms->oem_table_id); +#ifdef CONFIG_ACPI_ERST + { + Object *erst_dev; + erst_dev = find_erst_dev(); + if (erst_dev) { + acpi_add_table(table_offsets, tables_blob); + build_erst(tables_blob, tables->linker, erst_dev, + x86ms->oem_id, x86ms->oem_table_id); + } + } +#endif + xsdt = tables_blob->len; build_xsdt(tables_blob, tables->linker, table_offsets, x86ms->oem_id, x86ms->oem_table_id); diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 2801dff97c..725f69095b 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -181,7 +181,7 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt) } if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, - evt, AMDVI_EVENT_LEN)) { + evt, AMDVI_EVENT_LEN, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); } @@ -201,15 +201,18 @@ static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, /* * AMDVi event structure * 0:15 -> DeviceID - * 55:63 -> event type + miscellaneous info - * 63:127 -> related address + * 48:63 -> event type + miscellaneous info + * 64:127 -> related address */ static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr, uint16_t info) { + evt[0] = 0; + evt[1] = 0; + amdvi_setevent_bits(evt, devid, 0, 16); - amdvi_setevent_bits(evt, info, 55, 8); - amdvi_setevent_bits(evt, addr, 63, 64); + amdvi_setevent_bits(evt, info, 48, 16); + amdvi_setevent_bits(evt, addr, 64, 64); } /* log an error encountered during a page walk * @@ -218,7 +221,7 @@ static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr, static void amdvi_page_fault(AMDVIState *s, uint16_t devid, hwaddr addr, uint16_t info) { - uint64_t evt[4]; + uint64_t evt[2]; info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF; amdvi_encode_event(evt, devid, addr, info); @@ -234,7 +237,7 @@ static void amdvi_page_fault(AMDVIState *s, uint16_t devid, static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, hwaddr devtab, uint16_t info) { - uint64_t evt[4]; + uint64_t evt[2]; info |= AMDVI_EVENT_DEV_TAB_HW_ERROR; @@ -248,7 +251,8 @@ static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, */ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) { - uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR; + uint64_t evt[2]; + uint16_t info = AMDVI_EVENT_COMMAND_HW_ERROR; amdvi_encode_event(evt, 0, addr, info); amdvi_log_event(s, evt); @@ -261,7 +265,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, hwaddr addr) { - uint64_t evt[4]; + uint64_t evt[2]; info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR; amdvi_encode_event(evt, 0, addr, info); @@ -276,7 +280,7 @@ static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid, hwaddr addr, uint16_t info) { - uint64_t evt[4]; + uint64_t evt[2]; info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY; amdvi_encode_event(evt, devid, addr, info); @@ -288,7 +292,7 @@ static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid, static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid, hwaddr addr, uint16_t info) { - uint64_t evt[4]; + uint64_t evt[2]; info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR; amdvi_encode_event(evt, devid, addr, info); @@ -376,7 +380,8 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) } if (extract64(cmd[0], 0, 1)) { if (dma_memory_write(&address_space_memory, addr, &data, - AMDVI_COMPLETION_DATA_SIZE)) { + AMDVI_COMPLETION_DATA_SIZE, + MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_completion_wait_fail(addr); } } @@ -502,7 +507,7 @@ static void amdvi_cmdbuf_exec(AMDVIState *s) uint64_t cmd[2]; if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, - cmd, AMDVI_COMMAND_SIZE)) { + cmd, AMDVI_COMMAND_SIZE, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head); amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head); return; @@ -836,7 +841,7 @@ static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, - AMDVI_DEVTAB_ENTRY_SIZE)) { + AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_dte_get_fail(s->devtab, offset); /* log error accessing dte */ amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); @@ -881,7 +886,8 @@ static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, { uint64_t pte; - if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) { + if (dma_memory_read(&address_space_memory, pte_addr, + &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_get_pte_hwerror(pte_addr); amdvi_log_pagetab_error(s, devid, pte_addr, 0); pte = 0; @@ -911,7 +917,7 @@ static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, } /* we are at the leaf page table or page table encodes a huge page */ - while (level > 0) { + do { pte_perms = amdvi_get_perms(pte); present = pte & 1; if (!present || perms != (perms & pte_perms)) { @@ -930,10 +936,7 @@ static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, } oldlevel = level; level = get_pte_translation_mode(pte); - if (level == 0x7) { - break; - } - } + } while (level > 0 && level < 7); if (level == 0x7) { page_mask = pte_override_page_mask(pte); @@ -1048,7 +1051,7 @@ static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte, trace_amdvi_ir_irte(irte_root, offset); if (dma_memory_read(&address_space_memory, irte_root + offset, - irte, sizeof(*irte))) { + irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_ir_err("failed to get irte"); return -AMDVI_IR_GET_IRTE; } @@ -1108,7 +1111,7 @@ static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte, trace_amdvi_ir_irte(irte_root, offset); if (dma_memory_read(&address_space_memory, irte_root + offset, - irte, sizeof(*irte))) { + irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_ir_err("failed to get irte_ga"); return -AMDVI_IR_GET_IRTE; } @@ -1403,7 +1406,7 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) /* allocate memory during the first run */ if (!iommu_as) { - iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX); + iommu_as = g_new0(AMDVIAddressSpace *, PCI_DEVFN_MAX); s->address_spaces[bus_num] = iommu_as; } @@ -1411,7 +1414,7 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) if (!iommu_as[devfn]) { snprintf(name, sizeof(name), "amd_iommu_devfn_%d", devfn); - iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace)); + iommu_as[devfn] = g_new0(AMDVIAddressSpace, 1); iommu_as[devfn]->bus_num = (uint8_t)bus_num; iommu_as[devfn]->devfn = (uint8_t)devfn; iommu_as[devfn]->iommu_state = s; @@ -1526,7 +1529,7 @@ static void amdvi_init(AMDVIState *s) AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); } -static void amdvi_reset(DeviceState *dev) +static void amdvi_sysbus_reset(DeviceState *dev) { AMDVIState *s = AMD_IOMMU_DEVICE(dev); @@ -1534,11 +1537,10 @@ static void amdvi_reset(DeviceState *dev) amdvi_init(s); } -static void amdvi_realize(DeviceState *dev, Error **errp) +static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) { int ret = 0; AMDVIState *s = AMD_IOMMU_DEVICE(dev); - X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); X86MachineState *x86ms = X86_MACHINE(ms); @@ -1548,7 +1550,6 @@ static void amdvi_realize(DeviceState *dev, Error **errp) amdvi_uint64_equal, g_free, g_free); /* This device should take care of IOMMU PCI properties */ - x86_iommu->type = TYPE_AMD; if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { return; } @@ -1585,27 +1586,27 @@ static void amdvi_realize(DeviceState *dev, Error **errp) amdvi_init(s); } -static const VMStateDescription vmstate_amdvi = { +static const VMStateDescription vmstate_amdvi_sysbus = { .name = "amd-iommu", .unmigratable = 1 }; -static void amdvi_instance_init(Object *klass) +static void amdvi_sysbus_instance_init(Object *klass) { AMDVIState *s = AMD_IOMMU_DEVICE(klass); object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); } -static void amdvi_class_init(ObjectClass *klass, void* data) +static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass); - dc->reset = amdvi_reset; - dc->vmsd = &vmstate_amdvi; + dc->reset = amdvi_sysbus_reset; + dc->vmsd = &vmstate_amdvi_sysbus; dc->hotpluggable = false; - dc_class->realize = amdvi_realize; + dc_class->realize = amdvi_sysbus_realize; dc_class->int_remap = amdvi_int_remap; /* Supported by the pc-q35-* machine types */ dc->user_creatable = true; @@ -1613,18 +1614,27 @@ static void amdvi_class_init(ObjectClass *klass, void* data) dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; } -static const TypeInfo amdvi = { +static const TypeInfo amdvi_sysbus = { .name = TYPE_AMD_IOMMU_DEVICE, .parent = TYPE_X86_IOMMU_DEVICE, .instance_size = sizeof(AMDVIState), - .instance_init = amdvi_instance_init, - .class_init = amdvi_class_init + .instance_init = amdvi_sysbus_instance_init, + .class_init = amdvi_sysbus_class_init }; -static const TypeInfo amdviPCI = { +static void amdvi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; +} + +static const TypeInfo amdvi_pci = { .name = TYPE_AMD_IOMMU_PCI, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(AMDVIPCIState), + .class_init = amdvi_pci_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, @@ -1645,11 +1655,11 @@ static const TypeInfo amdvi_iommu_memory_region_info = { .class_init = amdvi_iommu_memory_region_class_init, }; -static void amdviPCI_register_types(void) +static void amdvi_register_types(void) { - type_register_static(&amdviPCI); - type_register_static(&amdvi); + type_register_static(&amdvi_pci); + type_register_static(&amdvi_sysbus); type_register_static(&amdvi_iommu_memory_region_info); } -type_init(amdviPCI_register_types); +type_init(amdvi_register_types); diff --git a/hw/i386/e820_memory_layout.c b/hw/i386/e820_memory_layout.c index bcf9eaf837..06970ac44a 100644 --- a/hw/i386/e820_memory_layout.c +++ b/hw/i386/e820_memory_layout.c @@ -11,29 +11,11 @@ #include "e820_memory_layout.h" static size_t e820_entries; -struct e820_table e820_reserve; struct e820_entry *e820_table; int e820_add_entry(uint64_t address, uint64_t length, uint32_t type) { - int index = le32_to_cpu(e820_reserve.count); - struct e820_entry *entry; - - if (type != E820_RAM) { - /* old FW_CFG_E820_TABLE entry -- reservations only */ - if (index >= E820_NR_ENTRIES) { - return -EBUSY; - } - entry = &e820_reserve.entry[index++]; - - entry->address = cpu_to_le64(address); - entry->length = cpu_to_le64(length); - entry->type = cpu_to_le32(type); - - e820_reserve.count = cpu_to_le32(index); - } - - /* new "etc/e820" file -- include ram too */ + /* new "etc/e820" file -- include ram and reserved entries */ e820_table = g_renew(struct e820_entry, e820_table, e820_entries + 1); e820_table[e820_entries].address = cpu_to_le64(address); e820_table[e820_entries].length = cpu_to_le64(length); diff --git a/hw/i386/e820_memory_layout.h b/hw/i386/e820_memory_layout.h index 2a0ceb8b9c..7c239aa033 100644 --- a/hw/i386/e820_memory_layout.h +++ b/hw/i386/e820_memory_layout.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: MIT */ -#ifndef HW_I386_E820_H -#define HW_I386_E820_H +#ifndef HW_I386_E820_MEMORY_LAYOUT_H +#define HW_I386_E820_MEMORY_LAYOUT_H /* e820 types */ #define E820_RAM 1 @@ -16,20 +16,12 @@ #define E820_NVS 4 #define E820_UNUSABLE 5 -#define E820_NR_ENTRIES 16 - struct e820_entry { uint64_t address; uint64_t length; uint32_t type; } QEMU_PACKED __attribute((__aligned__(4))); -struct e820_table { - uint32_t count; - struct e820_entry entry[E820_NR_ENTRIES]; -} QEMU_PACKED __attribute((__aligned__(4))); - -extern struct e820_table e820_reserve; extern struct e820_entry *e820_table; int e820_add_entry(uint64_t address, uint64_t length, uint32_t type); diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c index 4e68d5dea4..72a42f3c66 100644 --- a/hw/i386/fw_cfg.c +++ b/hw/i386/fw_cfg.c @@ -36,7 +36,6 @@ const char *fw_cfg_arch_key_name(uint16_t key) {FW_CFG_ACPI_TABLES, "acpi_tables"}, {FW_CFG_SMBIOS_ENTRIES, "smbios_entries"}, {FW_CFG_IRQ0_OVERRIDE, "irq0_override"}, - {FW_CFG_E820_TABLE, "e820_table"}, {FW_CFG_HPET, "hpet"}, }; @@ -127,8 +126,6 @@ FWCfgState *fw_cfg_arch_create(MachineState *ms, #endif fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1); - fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, - &e820_reserve, sizeof(e820_reserve)); fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, sizeof(struct e820_entry) * e820_get_num_entries()); @@ -159,7 +156,7 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) { X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); CPUX86State *env = &cpu->env; - uint32_t unused, ecx, edx; + uint32_t unused, ebx, ecx, edx; uint64_t feature_control_bits = 0; uint64_t *val; @@ -174,6 +171,16 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) feature_control_bits |= FEATURE_CONTROL_LMCE; } + if (env->cpuid_level >= 7) { + cpu_x86_cpuid(env, 0x7, 0, &unused, &ebx, &ecx, &unused); + if (ebx & CPUID_7_0_EBX_SGX) { + feature_control_bits |= FEATURE_CONTROL_SGX; + } + if (ecx & CPUID_7_0_ECX_SGX_LC) { + feature_control_bits |= FEATURE_CONTROL_SGX_LC; + } + } + if (!feature_control_bits) { return; } diff --git a/hw/i386/fw_cfg.h b/hw/i386/fw_cfg.h index 275f15c1c5..86ca7c1c0c 100644 --- a/hw/i386/fw_cfg.h +++ b/hw/i386/fw_cfg.h @@ -17,7 +17,6 @@ #define FW_CFG_ACPI_TABLES (FW_CFG_ARCH_LOCAL + 0) #define FW_CFG_SMBIOS_ENTRIES (FW_CFG_ARCH_LOCAL + 1) #define FW_CFG_IRQ0_OVERRIDE (FW_CFG_ARCH_LOCAL + 2) -#define FW_CFG_E820_TABLE (FW_CFG_ARCH_LOCAL + 3) #define FW_CFG_HPET (FW_CFG_ARCH_LOCAL + 4) FWCfgState *fw_cfg_arch_create(MachineState *ms, diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 209b3f5553..d025ef2873 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -49,17 +49,24 @@ /* pe operations */ #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) -#define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ - if (ret_fr) { \ - ret_fr = -ret_fr; \ - if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ - trace_vtd_fault_disabled(); \ - } else { \ - vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ - } \ - goto error; \ - } \ -} + +/* + * PCI bus number (or SID) is not reliable since the device is usaully + * initalized before guest can configure the PCI bridge + * (SECONDARY_BUS_NUMBER). + */ +struct vtd_as_key { + PCIBus *bus; + uint8_t devfn; + uint32_t pasid; +}; + +struct vtd_iotlb_key { + uint64_t gfn; + uint32_t pasid; + uint32_t level; + uint16_t sid; +}; static void vtd_address_space_refresh_all(IntelIOMMUState *s); static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); @@ -181,6 +188,18 @@ static void vtd_update_scalable_state(IntelIOMMUState *s) } } +static void vtd_update_iq_dw(IntelIOMMUState *s) +{ + uint64_t val = vtd_get_quad_raw(s, DMAR_IQA_REG); + + if (s->ecap & VTD_ECAP_SMTS && + val & VTD_IQA_DW_MASK) { + s->iq_dw = true; + } else { + s->iq_dw = false; + } +} + /* Whether the address space needs to notify new mappings */ static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) { @@ -188,14 +207,46 @@ static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) } /* GHashTable functions */ -static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) +static gboolean vtd_iotlb_equal(gconstpointer v1, gconstpointer v2) { - return *((const uint64_t *)v1) == *((const uint64_t *)v2); + const struct vtd_iotlb_key *key1 = v1; + const struct vtd_iotlb_key *key2 = v2; + + return key1->sid == key2->sid && + key1->pasid == key2->pasid && + key1->level == key2->level && + key1->gfn == key2->gfn; } -static guint vtd_uint64_hash(gconstpointer v) +static guint vtd_iotlb_hash(gconstpointer v) { - return (guint)*(const uint64_t *)v; + const struct vtd_iotlb_key *key = v; + + return key->gfn | ((key->sid) << VTD_IOTLB_SID_SHIFT) | + (key->level) << VTD_IOTLB_LVL_SHIFT | + (key->pasid) << VTD_IOTLB_PASID_SHIFT; +} + +static gboolean vtd_as_equal(gconstpointer v1, gconstpointer v2) +{ + const struct vtd_as_key *key1 = v1; + const struct vtd_as_key *key2 = v2; + + return (key1->bus == key2->bus) && (key1->devfn == key2->devfn) && + (key1->pasid == key2->pasid); +} + +/* + * Note that we use pointer to PCIBus as the key, so hashing/shifting + * based on the pointer value is intended. Note that we deal with + * collisions through vtd_as_equal(). + */ +static guint vtd_as_hash(gconstpointer v) +{ + const struct vtd_as_key *key = v; + guint value = (guint)(uintptr_t)key->bus; + + return (guint)(value << 8 | key->devfn); } static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, @@ -236,22 +287,14 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, static void vtd_reset_context_cache_locked(IntelIOMMUState *s) { VTDAddressSpace *vtd_as; - VTDBus *vtd_bus; - GHashTableIter bus_it; - uint32_t devfn_it; + GHashTableIter as_it; trace_vtd_context_cache_reset(); - g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); + g_hash_table_iter_init(&as_it, s->vtd_address_spaces); - while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { - for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { - vtd_as = vtd_bus->dev_as[devfn_it]; - if (!vtd_as) { - continue; - } - vtd_as->context_cache_entry.context_cache_gen = 0; - } + while (g_hash_table_iter_next(&as_it, NULL, (void **)&vtd_as)) { + vtd_as->context_cache_entry.context_cache_gen = 0; } s->context_cache_gen = 1; } @@ -278,13 +321,6 @@ static void vtd_reset_caches(IntelIOMMUState *s) vtd_iommu_unlock(s); } -static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, - uint32_t level) -{ - return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | - ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); -} - static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) { return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; @@ -292,15 +328,17 @@ static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) /* Must be called with IOMMU lock held */ static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, - hwaddr addr) + uint32_t pasid, hwaddr addr) { + struct vtd_iotlb_key key; VTDIOTLBEntry *entry; - uint64_t key; int level; for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { - key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), - source_id, level); + key.gfn = vtd_get_iotlb_gfn(addr, level); + key.level = level; + key.sid = source_id; + key.pasid = pasid; entry = g_hash_table_lookup(s->iotlb, &key); if (entry) { goto out; @@ -314,10 +352,11 @@ out: /* Must be with IOMMU lock held */ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, uint16_t domain_id, hwaddr addr, uint64_t slpte, - uint8_t access_flags, uint32_t level) + uint8_t access_flags, uint32_t level, + uint32_t pasid) { VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); - uint64_t *key = g_malloc(sizeof(*key)); + struct vtd_iotlb_key *key = g_malloc(sizeof(*key)); uint64_t gfn = vtd_get_iotlb_gfn(addr, level); trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); @@ -331,7 +370,13 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, entry->slpte = slpte; entry->access_flags = access_flags; entry->mask = vtd_slpt_level_page_mask(level); - *key = vtd_get_iotlb_key(gfn, source_id, level); + entry->pasid = pasid; + + key->gfn = gfn; + key->sid = source_id; + key->level = level; + key->pasid = pasid; + g_hash_table_replace(s->iotlb, key, entry); } @@ -424,7 +469,8 @@ static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) /* Must not update F field now, should be done later */ static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, uint16_t source_id, hwaddr addr, - VTDFaultReason fault, bool is_write) + VTDFaultReason fault, bool is_write, + bool is_pasid, uint32_t pasid) { uint64_t hi = 0, lo; hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); @@ -432,7 +478,8 @@ static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, assert(index < DMAR_FRCD_REG_NR); lo = VTD_FRCD_FI(addr); - hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); + hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault) | + VTD_FRCD_PV(pasid) | VTD_FRCD_PP(is_pasid); if (!is_write) { hi |= VTD_FRCD_T; } @@ -463,17 +510,13 @@ static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) /* Log and report an DMAR (address translation) fault to software */ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, hwaddr addr, VTDFaultReason fault, - bool is_write) + bool is_write, bool is_pasid, + uint32_t pasid) { uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); assert(fault < VTD_FR_MAX); - if (fault == VTD_FR_RESERVED_ERR) { - /* This is not a normal fault reason case. Drop it. */ - return; - } - trace_vtd_dmar_fault(source_id, fault, addr, is_write); if (fsts_reg & VTD_FSTS_PFO) { @@ -495,7 +538,8 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, return; } - vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); + vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, + is_write, is_pasid, pasid); if (fsts_reg & VTD_FSTS_PPF) { error_report_once("There are pending faults already, " @@ -569,7 +613,8 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, dma_addr_t addr; addr = s->root + index * sizeof(*re); - if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { + if (dma_memory_read(&address_space_memory, addr, + re, sizeof(*re), MEMTXATTRS_UNSPECIFIED)) { re->lo = 0; return -VTD_FR_ROOT_TABLE_INV; } @@ -602,7 +647,8 @@ static int vtd_get_context_entry_from_root(IntelIOMMUState *s, } addr = addr + index * ce_size; - if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { + if (dma_memory_read(&address_space_memory, addr, + ce, ce_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_CONTEXT_TABLE_INV; } @@ -639,8 +685,8 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) assert(index < VTD_SL_PT_ENTRY_NR); if (dma_memory_read(&address_space_memory, - base_addr + index * sizeof(slpte), &slpte, - sizeof(slpte))) { + base_addr + index * sizeof(slpte), + &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) { slpte = (uint64_t)-1; return slpte; } @@ -679,7 +725,7 @@ static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, } break; default: - /* Unknwon type */ + /* Unknown type */ return false; } return true; @@ -692,7 +738,7 @@ static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire) /** * Caller of this function should check present bit if wants - * to use pdir entry for futher usage except for fpd bit check. + * to use pdir entry for further usage except for fpd bit check. */ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, uint32_t pasid, @@ -704,7 +750,8 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, index = VTD_PASID_DIR_INDEX(pasid); entry_size = VTD_PASID_DIR_ENTRY_SIZE; addr = pasid_dir_base + index * entry_size; - if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { + if (dma_memory_read(&address_space_memory, addr, + pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_PASID_TABLE_INV; } @@ -728,7 +775,8 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, index = VTD_PASID_TABLE_INDEX(pasid); entry_size = VTD_PASID_ENTRY_SIZE; addr = addr + index * entry_size; - if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { + if (dma_memory_read(&address_space_memory, addr, + pe, entry_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_PASID_TABLE_INV; } @@ -746,7 +794,7 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, /** * Caller of this function should check present bit if wants - * to use pasid entry for futher usage except for fpd bit check. + * to use pasid entry for further usage except for fpd bit check. */ static int vtd_get_pe_from_pdire(IntelIOMMUState *s, uint32_t pasid, @@ -796,13 +844,15 @@ static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s, static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, VTDContextEntry *ce, - VTDPASIDEntry *pe) + VTDPASIDEntry *pe, + uint32_t pasid) { - uint32_t pasid; dma_addr_t pasid_dir_base; int ret = 0; - pasid = VTD_CE_GET_RID2PASID(ce); + if (pasid == PCI_NO_PASID) { + pasid = VTD_CE_GET_RID2PASID(ce); + } pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); ret = vtd_get_pe_from_pasid_table(s, pasid_dir_base, pasid, pe); @@ -811,15 +861,17 @@ static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, VTDContextEntry *ce, - bool *pe_fpd_set) + bool *pe_fpd_set, + uint32_t pasid) { int ret; - uint32_t pasid; dma_addr_t pasid_dir_base; VTDPASIDDirEntry pdire; VTDPASIDEntry pe; - pasid = VTD_CE_GET_RID2PASID(ce); + if (pasid == PCI_NO_PASID) { + pasid = VTD_CE_GET_RID2PASID(ce); + } pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); /* @@ -865,12 +917,13 @@ static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) } static uint32_t vtd_get_iova_level(IntelIOMMUState *s, - VTDContextEntry *ce) + VTDContextEntry *ce, + uint32_t pasid) { VTDPASIDEntry pe; if (s->root_scalable) { - vtd_ce_get_rid2pasid_entry(s, ce, &pe); + vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); return VTD_PE_GET_LEVEL(&pe); } @@ -883,12 +936,13 @@ static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) } static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, - VTDContextEntry *ce) + VTDContextEntry *ce, + uint32_t pasid) { VTDPASIDEntry pe; if (s->root_scalable) { - vtd_ce_get_rid2pasid_entry(s, ce, &pe); + vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; } @@ -930,31 +984,33 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, } static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, - VTDContextEntry *ce, uint8_t aw) + VTDContextEntry *ce, uint8_t aw, + uint32_t pasid) { - uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); + uint32_t ce_agaw = vtd_get_iova_agaw(s, ce, pasid); return 1ULL << MIN(ce_agaw, aw); } /* Return true if IOVA passes range check, otherwise false. */ static inline bool vtd_iova_range_check(IntelIOMMUState *s, uint64_t iova, VTDContextEntry *ce, - uint8_t aw) + uint8_t aw, uint32_t pasid) { /* * Check if @iova is above 2^X-1, where X is the minimum of MGAW * in CAP_REG and AW in context-entry. */ - return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); + return !(iova & ~(vtd_iova_limit(s, ce, aw, pasid) - 1)); } static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, - VTDContextEntry *ce) + VTDContextEntry *ce, + uint32_t pasid) { VTDPASIDEntry pe; if (s->root_scalable) { - vtd_ce_get_rid2pasid_entry(s, ce, &pe); + vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; } @@ -982,49 +1038,25 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) return slpte & rsvd_mask; } -/* Find the VTD address space associated with a given bus number */ -static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) -{ - VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; - GHashTableIter iter; - - if (vtd_bus) { - return vtd_bus; - } - - /* - * Iterate over the registered buses to find the one which - * currently holds this bus number and update the bus_num - * lookup table. - */ - g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); - while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { - if (pci_bus_num(vtd_bus->bus) == bus_num) { - s->vtd_as_by_bus_num[bus_num] = vtd_bus; - return vtd_bus; - } - } - - return NULL; -} - /* Given the @iova, get relevant @slptep. @slpte_level will be the last level * of the translation, can be used for deciding the size of large page. */ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, uint64_t iova, bool is_write, uint64_t *slptep, uint32_t *slpte_level, - bool *reads, bool *writes, uint8_t aw_bits) + bool *reads, bool *writes, uint8_t aw_bits, + uint32_t pasid) { - dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); - uint32_t level = vtd_get_iova_level(s, ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid); + uint32_t level = vtd_get_iova_level(s, ce, pasid); uint32_t offset; uint64_t slpte; uint64_t access_right_check; + uint64_t xlat, size; - if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { - error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", - __func__, iova); + if (!vtd_iova_range_check(s, iova, ce, aw_bits, pasid)) { + error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 "," + "pasid=0x%" PRIx32 ")", __func__, iova, pasid); return -VTD_FR_ADDR_BEYOND_MGAW; } @@ -1037,8 +1069,9 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, if (slpte == (uint64_t)-1) { error_report_once("%s: detected read error on DMAR slpte " - "(iova=0x%" PRIx64 ")", __func__, iova); - if (level == vtd_get_iova_level(s, ce)) { + "(iova=0x%" PRIx64 ", pasid=0x%" PRIx32 ")", + __func__, iova, pasid); + if (level == vtd_get_iova_level(s, ce, pasid)) { /* Invalid programming of context-entry */ return -VTD_FR_CONTEXT_ENTRY_INV; } else { @@ -1050,26 +1083,50 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, if (!(slpte & access_right_check)) { error_report_once("%s: detected slpte permission error " "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " - "slpte=0x%" PRIx64 ", write=%d)", __func__, - iova, level, slpte, is_write); + "slpte=0x%" PRIx64 ", write=%d, pasid=0x%" + PRIx32 ")", __func__, iova, level, + slpte, is_write, pasid); return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; } if (vtd_slpte_nonzero_rsvd(slpte, level)) { error_report_once("%s: detected splte reserve non-zero " "iova=0x%" PRIx64 ", level=0x%" PRIx32 - "slpte=0x%" PRIx64 ")", __func__, iova, - level, slpte); + "slpte=0x%" PRIx64 ", pasid=0x%" PRIX32 ")", + __func__, iova, level, slpte, pasid); return -VTD_FR_PAGING_ENTRY_RSVD; } if (vtd_is_last_slpte(slpte, level)) { *slptep = slpte; *slpte_level = level; - return 0; + break; } addr = vtd_get_slpte_addr(slpte, aw_bits); level--; } + + xlat = vtd_get_slpte_addr(*slptep, aw_bits); + size = ~vtd_slpt_level_page_mask(level) + 1; + + /* + * From VT-d spec 3.14: Untranslated requests and translation + * requests that result in an address in the interrupt range will be + * blocked with condition code LGN.4 or SGN.8. + */ + if ((xlat > VTD_INTERRUPT_ADDR_LAST || + xlat + size - 1 < VTD_INTERRUPT_ADDR_FIRST)) { + return 0; + } else { + error_report_once("%s: xlat address is in interrupt range " + "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " + "slpte=0x%" PRIx64 ", write=%d, " + "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ", " + "pasid=0x%" PRIx32 ")", + __func__, iova, level, slpte, is_write, + xlat, size, pasid); + return s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR : + -VTD_FR_INTERRUPT_ADDR; + } } typedef int (*vtd_page_walk_hook)(IOMMUTLBEvent *event, void *private); @@ -1105,7 +1162,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) .translated_addr = entry->translated_addr, .perm = entry->perm, }; - DMAMap *mapped = iova_tree_find(as->iova_tree, &target); + const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); @@ -1153,7 +1210,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) return ret; } /* Drop any existing mapping */ - iova_tree_remove(as->iova_tree, &target); + iova_tree_remove(as->iova_tree, target); /* Recover the correct type */ event->type = IOMMU_NOTIFIER_MAP; entry->perm = cache_perm; @@ -1166,7 +1223,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); return 0; } - iova_tree_remove(as->iova_tree, &target); + iova_tree_remove(as->iova_tree, target); } trace_vtd_page_walk_one(info->domain_id, entry->iova, @@ -1280,18 +1337,19 @@ next: */ static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, uint64_t start, uint64_t end, - vtd_page_walk_info *info) + vtd_page_walk_info *info, + uint32_t pasid) { - dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); - uint32_t level = vtd_get_iova_level(s, ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid); + uint32_t level = vtd_get_iova_level(s, ce, pasid); - if (!vtd_iova_range_check(s, start, ce, info->aw)) { + if (!vtd_iova_range_check(s, start, ce, info->aw, pasid)) { return -VTD_FR_ADDR_BEYOND_MGAW; } - if (!vtd_iova_range_check(s, end, ce, info->aw)) { + if (!vtd_iova_range_check(s, end, ce, info->aw, pasid)) { /* Fix end so that it reaches the maximum */ - end = vtd_iova_limit(s, ce, info->aw); + end = vtd_iova_limit(s, ce, info->aw, pasid); } return vtd_page_walk_level(addr, start, end, level, true, true, info); @@ -1359,7 +1417,7 @@ static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, * has valid rid2pasid setting, which includes valid * rid2pasid field and corresponding pasid entry setting */ - return vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return vtd_ce_get_rid2pasid_entry(s, ce, &pe, PCI_NO_PASID); } /* Map a device to its corresponding domain (context-entry) */ @@ -1442,12 +1500,13 @@ static int vtd_sync_shadow_page_hook(IOMMUTLBEvent *event, } static uint16_t vtd_get_domain_id(IntelIOMMUState *s, - VTDContextEntry *ce) + VTDContextEntry *ce, + uint32_t pasid) { VTDPASIDEntry pe; if (s->root_scalable) { - vtd_ce_get_rid2pasid_entry(s, ce, &pe); + vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); return VTD_SM_PASID_ENTRY_DID(pe.val[1]); } @@ -1465,10 +1524,10 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, .notify_unmap = true, .aw = s->aw_bits, .as = vtd_as, - .domain_id = vtd_get_domain_id(s, ce), + .domain_id = vtd_get_domain_id(s, ce, vtd_as->pasid), }; - return vtd_page_walk(s, ce, addr, addr + size, &info); + return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid); } static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) @@ -1507,24 +1566,43 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) } /* - * Check if specific device is configed to bypass address + * Check if specific device is configured to bypass address * translation for DMA requests. In Scalable Mode, bypass * 1st-level translation or 2nd-level translation, it depends * on PGTT setting. */ -static bool vtd_dev_pt_enabled(VTDAddressSpace *as) +static bool vtd_dev_pt_enabled(IntelIOMMUState *s, VTDContextEntry *ce, + uint32_t pasid) +{ + VTDPASIDEntry pe; + int ret; + + if (s->root_scalable) { + ret = vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); + if (ret) { + /* + * This error is guest triggerable. We should assumt PT + * not enabled for safety. + */ + return false; + } + return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); + } + + return (vtd_ce_get_type(ce) == VTD_CONTEXT_TT_PASS_THROUGH); + +} + +static bool vtd_as_pt_enabled(VTDAddressSpace *as) { IntelIOMMUState *s; VTDContextEntry ce; - VTDPASIDEntry pe; - int ret; assert(as); s = as->iommu_state; - ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), - as->devfn, &ce); - if (ret) { + if (vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn, + &ce)) { /* * Possibly failed to parse the context entry for some reason * (e.g., during init, or any guest configuration errors on @@ -1534,29 +1612,20 @@ static bool vtd_dev_pt_enabled(VTDAddressSpace *as) return false; } - if (s->root_scalable) { - ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); - if (ret) { - error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32, - __func__, ret); - return false; - } - return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); - } - - return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); + return vtd_dev_pt_enabled(s, &ce, as->pasid); } /* Return whether the device is using IOMMU translation. */ static bool vtd_switch_address_space(VTDAddressSpace *as) { - bool use_iommu; + bool use_iommu, pt; /* Whether we need to take the BQL on our own */ bool take_bql = !qemu_mutex_iothread_locked(); assert(as); - use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as); + use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as); + pt = as->iommu_state->dmar_enabled && vtd_as_pt_enabled(as); trace_vtd_switch_address_space(pci_bus_num(as->bus), VTD_PCI_SLOT(as->devfn), @@ -1576,11 +1645,53 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) if (use_iommu) { memory_region_set_enabled(&as->nodmar, false); memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); + /* + * vt-d spec v3.4 3.14: + * + * """ + * Requests-with-PASID with input address in range 0xFEEx_xxxx + * are translated normally like any other request-with-PASID + * through DMA-remapping hardware. + * """ + * + * Need to disable ir for as with PASID. + */ + if (as->pasid != PCI_NO_PASID) { + memory_region_set_enabled(&as->iommu_ir, false); + } else { + memory_region_set_enabled(&as->iommu_ir, true); + } } else { memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); memory_region_set_enabled(&as->nodmar, true); } + /* + * vtd-spec v3.4 3.14: + * + * """ + * Requests-with-PASID with input address in range 0xFEEx_xxxx are + * translated normally like any other request-with-PASID through + * DMA-remapping hardware. However, if such a request is processed + * using pass-through translation, it will be blocked as described + * in the paragraph below. + * + * Software must not program paging-structure entries to remap any + * address to the interrupt address range. Untranslated requests + * and translation requests that result in an address in the + * interrupt range will be blocked with condition code LGN.4 or + * SGN.8. + * """ + * + * We enable per as memory region (iommu_ir_fault) for catching + * the tranlsation for interrupt range through PASID + PT. + */ + if (pt && as->pasid != PCI_NO_PASID) { + memory_region_set_enabled(&as->iommu_ir_fault, true); + } else { + memory_region_set_enabled(&as->iommu_ir_fault, false); + } + if (take_bql) { qemu_mutex_unlock_iothread(); } @@ -1590,26 +1701,15 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) static void vtd_switch_address_space_all(IntelIOMMUState *s) { + VTDAddressSpace *vtd_as; GHashTableIter iter; - VTDBus *vtd_bus; - int i; - g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); - while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { - for (i = 0; i < PCI_DEVFN_MAX; i++) { - if (!vtd_bus->dev_as[i]) { - continue; - } - vtd_switch_address_space(vtd_bus->dev_as[i]); - } + g_hash_table_iter_init(&iter, s->vtd_address_spaces); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_as)) { + vtd_switch_address_space(vtd_as); } } -static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) -{ - return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); -} - static const bool vtd_qualified_faults[] = { [VTD_FR_RESERVED] = false, [VTD_FR_ROOT_ENTRY_P] = false, @@ -1621,11 +1721,12 @@ static const bool vtd_qualified_faults[] = { [VTD_FR_PAGING_ENTRY_INV] = true, [VTD_FR_ROOT_TABLE_INV] = false, [VTD_FR_CONTEXT_TABLE_INV] = false, + [VTD_FR_INTERRUPT_ADDR] = true, [VTD_FR_ROOT_ENTRY_RSVD] = false, [VTD_FR_PAGING_ENTRY_RSVD] = true, [VTD_FR_CONTEXT_ENTRY_TT] = true, [VTD_FR_PASID_TABLE_INV] = false, - [VTD_FR_RESERVED_ERR] = false, + [VTD_FR_SM_INTERRUPT_ADDR] = true, [VTD_FR_MAX] = false, }; @@ -1643,18 +1744,37 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr) return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; } +static gboolean vtd_find_as_by_sid(gpointer key, gpointer value, + gpointer user_data) +{ + struct vtd_as_key *as_key = (struct vtd_as_key *)key; + uint16_t target_sid = *(uint16_t *)user_data; + uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn); + return sid == target_sid; +} + +static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid) +{ + uint8_t bus_num = PCI_BUS_NUM(sid); + VTDAddressSpace *vtd_as = s->vtd_as_cache[bus_num]; + + if (vtd_as && + (sid == PCI_BUILD_BDF(pci_bus_num(vtd_as->bus), vtd_as->devfn))) { + return vtd_as; + } + + vtd_as = g_hash_table_find(s->vtd_address_spaces, vtd_find_as_by_sid, &sid); + s->vtd_as_cache[bus_num] = vtd_as; + + return vtd_as; +} + static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) { - VTDBus *vtd_bus; VTDAddressSpace *vtd_as; bool success = false; - vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); - if (!vtd_bus) { - goto out; - } - - vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; + vtd_as = vtd_get_as_by_sid(s, source_id); if (!vtd_as) { goto out; } @@ -1668,6 +1788,22 @@ out: trace_vtd_pt_enable_fast_path(source_id, success); } +static void vtd_report_fault(IntelIOMMUState *s, + int err, bool is_fpd_set, + uint16_t source_id, + hwaddr addr, + bool is_write, + bool is_pasid, + uint32_t pasid) +{ + if (is_fpd_set && vtd_is_qualified_fault(err)) { + trace_vtd_fault_disabled(); + } else { + vtd_report_dmar_fault(s, source_id, addr, err, is_write, + is_pasid, pasid); + } +} + /* Map dev to context-entry then do a paging-structures walk to do a iommu * translation. * @@ -1689,13 +1825,14 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, uint8_t bus_num = pci_bus_num(bus); VTDContextCacheEntry *cc_entry; uint64_t slpte, page_mask; - uint32_t level; - uint16_t source_id = vtd_make_source_id(bus_num, devfn); + uint32_t level, pasid = vtd_as->pasid; + uint16_t source_id = PCI_BUILD_BDF(bus_num, devfn); int ret_fr; bool is_fpd_set = false; bool reads = true; bool writes = true; uint8_t access_flags; + bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; VTDIOTLBEntry *iotlb_entry; /* @@ -1708,15 +1845,17 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry = &vtd_as->context_cache_entry; - /* Try to fetch slpte form IOTLB */ - iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); - if (iotlb_entry) { - trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, - iotlb_entry->domain_id); - slpte = iotlb_entry->slpte; - access_flags = iotlb_entry->access_flags; - page_mask = iotlb_entry->mask; - goto out; + /* Try to fetch slpte form IOTLB, we don't need RID2PASID logic */ + if (!rid2pasid) { + iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr); + if (iotlb_entry) { + trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, + iotlb_entry->domain_id); + slpte = iotlb_entry->slpte; + access_flags = iotlb_entry->access_flags; + page_mask = iotlb_entry->mask; + goto out; + } } /* Try to fetch context-entry from cache first */ @@ -1727,16 +1866,26 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, ce = cc_entry->context_entry; is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; if (!is_fpd_set && s->root_scalable) { - ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); - VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, pasid); + if (ret_fr) { + vtd_report_fault(s, -ret_fr, is_fpd_set, + source_id, addr, is_write, + false, 0); + goto error; + } } } else { ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; if (!ret_fr && !is_fpd_set && s->root_scalable) { - ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, pasid); + } + if (ret_fr) { + vtd_report_fault(s, -ret_fr, is_fpd_set, + source_id, addr, is_write, + false, 0); + goto error; } - VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); /* Update context-cache */ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, cc_entry->context_cache_gen, @@ -1745,11 +1894,15 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry->context_cache_gen = s->context_cache_gen; } + if (rid2pasid) { + pasid = VTD_CE_GET_RID2PASID(&ce); + } + /* * We don't need to translate for pass-through context entries. * Also, let's ignore IOTLB caching as well for PT devices. */ - if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { + if (vtd_dev_pt_enabled(s, &ce, pasid)) { entry->iova = addr & VTD_PAGE_MASK_4K; entry->translated_addr = entry->iova; entry->addr_mask = ~VTD_PAGE_MASK_4K; @@ -1770,14 +1923,31 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, return true; } + /* Try to fetch slpte form IOTLB for RID2PASID slow path */ + if (rid2pasid) { + iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr); + if (iotlb_entry) { + trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, + iotlb_entry->domain_id); + slpte = iotlb_entry->slpte; + access_flags = iotlb_entry->access_flags; + page_mask = iotlb_entry->mask; + goto out; + } + } + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, - &reads, &writes, s->aw_bits); - VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + &reads, &writes, s->aw_bits, pasid); + if (ret_fr) { + vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, + addr, is_write, pasid != PCI_NO_PASID, pasid); + goto error; + } page_mask = vtd_slpt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); - vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, - access_flags, level); + vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid), + addr, slpte, access_flags, level, pasid); out: vtd_iommu_unlock(s); entry->iova = addr & page_mask; @@ -1862,11 +2032,10 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, uint16_t source_id, uint16_t func_mask) { + GHashTableIter as_it; uint16_t mask; - VTDBus *vtd_bus; VTDAddressSpace *vtd_as; uint8_t bus_n, devfn; - uint16_t devfn_it; trace_vtd_inv_desc_cc_devices(source_id, func_mask); @@ -1889,32 +2058,31 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, mask = ~mask; bus_n = VTD_SID_TO_BUS(source_id); - vtd_bus = vtd_find_as_from_bus_num(s, bus_n); - if (vtd_bus) { - devfn = VTD_SID_TO_DEVFN(source_id); - for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { - vtd_as = vtd_bus->dev_as[devfn_it]; - if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { - trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), - VTD_PCI_FUNC(devfn_it)); - vtd_iommu_lock(s); - vtd_as->context_cache_entry.context_cache_gen = 0; - vtd_iommu_unlock(s); - /* - * Do switch address space when needed, in case if the - * device passthrough bit is switched. - */ - vtd_switch_address_space(vtd_as); - /* - * So a device is moving out of (or moving into) a - * domain, resync the shadow page table. - * This won't bring bad even if we have no such - * notifier registered - the IOMMU notification - * framework will skip MAP notifications if that - * happened. - */ - vtd_sync_shadow_page_table(vtd_as); - } + devfn = VTD_SID_TO_DEVFN(source_id); + + g_hash_table_iter_init(&as_it, s->vtd_address_spaces); + while (g_hash_table_iter_next(&as_it, NULL, (void **)&vtd_as)) { + if ((pci_bus_num(vtd_as->bus) == bus_n) && + (vtd_as->devfn & mask) == (devfn & mask)) { + trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(vtd_as->devfn), + VTD_PCI_FUNC(vtd_as->devfn)); + vtd_iommu_lock(s); + vtd_as->context_cache_entry.context_cache_gen = 0; + vtd_iommu_unlock(s); + /* + * Do switch address space when needed, in case if the + * device passthrough bit is switched. + */ + vtd_switch_address_space(vtd_as); + /* + * So a device is moving out of (or moving into) a + * domain, resync the shadow page table. + * This won't bring bad even if we have no such + * notifier registered - the IOMMU notification + * framework will skip MAP notifications if that + * happened. + */ + vtd_sync_shadow_page_table(vtd_as); } } } @@ -1971,7 +2139,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce) && - domain_id == vtd_get_domain_id(s, &ce)) { + domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { vtd_sync_shadow_page_table(vtd_as); } } @@ -1979,7 +2147,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, uint16_t domain_id, hwaddr addr, - uint8_t am) + uint8_t am, uint32_t pasid) { VTDAddressSpace *vtd_as; VTDContextEntry ce; @@ -1987,9 +2155,12 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, hwaddr size = (1 << am) * VTD_PAGE_SIZE; QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { + if (pasid != PCI_NO_PASID && pasid != vtd_as->pasid) { + continue; + } ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce); - if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { + if (!ret && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { if (vtd_as_has_map_notifier(vtd_as)) { /* * As long as we have MAP notifications registered in @@ -2033,7 +2204,7 @@ static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, vtd_iommu_lock(s); g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); vtd_iommu_unlock(s); - vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); + vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am, PCI_NO_PASID); } /* Flush IOTLB @@ -2197,12 +2368,13 @@ static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) /* Handle write to Global Command Register */ static void vtd_handle_gcmd_write(IntelIOMMUState *s) { + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); uint32_t changed = status ^ val; trace_vtd_reg_write_gcmd(status, val); - if (changed & VTD_GCMD_TE) { + if ((changed & VTD_GCMD_TE) && s->dma_translation) { /* Translation enable/disable */ vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); } @@ -2218,7 +2390,8 @@ static void vtd_handle_gcmd_write(IntelIOMMUState *s) /* Set/update the interrupt remapping root-table pointer */ vtd_handle_gcmd_sirtp(s); } - if (changed & VTD_GCMD_IRE) { + if ((changed & VTD_GCMD_IRE) && + x86_iommu_ir_supported(x86_iommu)) { /* Interrupt remap enable/disable */ vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); } @@ -2275,7 +2448,8 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s, uint32_t dw = s->iq_dw ? 32 : 16; dma_addr_t addr = base_addr + offset * dw; - if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { + if (dma_memory_read(&address_space_memory, addr, + inv_desc, dw, MEMTXATTRS_UNSPECIFIED)) { error_report_once("Read INV DESC failed."); return false; } @@ -2308,8 +2482,9 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) dma_addr_t status_addr = inv_desc->hi; trace_vtd_inv_desc_wait_sw(status_addr, status_data); status_data = cpu_to_le32(status_data); - if (dma_memory_write(&address_space_memory, status_addr, &status_data, - sizeof(status_data))) { + if (dma_memory_write(&address_space_memory, status_addr, + &status_data, sizeof(status_data), + MEMTXATTRS_UNSPECIFIED)) { trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); return false; } @@ -2426,18 +2601,13 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, { VTDAddressSpace *vtd_dev_as; IOMMUTLBEvent event; - struct VTDBus *vtd_bus; hwaddr addr; uint64_t sz; uint16_t sid; - uint8_t devfn; bool size; - uint8_t bus_num; addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); - devfn = sid & 0xff; - bus_num = sid >> 8; size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || @@ -2448,12 +2618,11 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, return false; } - vtd_bus = vtd_find_as_from_bus_num(s, bus_num); - if (!vtd_bus) { - goto done; - } - - vtd_dev_as = vtd_bus->dev_as[devfn]; + /* + * Using sid is OK since the guest should have finished the + * initialization of both the bus and device. + */ + vtd_dev_as = vtd_get_as_by_sid(s, sid); if (!vtd_dev_as) { goto done; } @@ -2869,12 +3038,7 @@ static void vtd_mem_write(void *opaque, hwaddr addr, } else { vtd_set_quad(s, addr, val); } - if (s->ecap & VTD_ECAP_SMTS && - val & VTD_IQA_DW_MASK) { - s->iq_dw = true; - } else { - s->iq_dw = false; - } + vtd_update_iq_dw(s); break; case DMAR_IQA_REG_HI: @@ -3015,6 +3179,28 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + /* TODO: add support for VFIO and vhost users */ + if (s->snoop_control) { + error_setg_errno(errp, ENOTSUP, + "Snoop Control with vhost or VFIO is not supported"); + return -ENOTSUP; + } + if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires caching mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } + if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires device IOTLB mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } /* Update per-address-space notifier flags */ vtd_as->notifier_flags = new; @@ -3031,13 +3217,6 @@ static int vtd_post_load(void *opaque, int version_id) { IntelIOMMUState *iommu = opaque; - /* - * Memory regions are dynamically turned on/off depending on - * context entry configurations from the guest. After migration, - * we need to make sure the memory regions are still correct. - */ - vtd_switch_address_space_all(iommu); - /* * We don't need to migrate the root_scalable because we can * simply do the calculation after the loading is complete. We @@ -3047,6 +3226,15 @@ static int vtd_post_load(void *opaque, int version_id) */ vtd_update_scalable_state(iommu); + vtd_update_iq_dw(iommu); + + /* + * Memory regions are dynamically turned on/off depending on + * context entry configurations from the guest. After migration, + * we need to make sure the memory regions are still correct. + */ + vtd_switch_address_space_all(iommu); + return 0; } @@ -3099,7 +3287,10 @@ static Property vtd_properties[] = { VTD_HOST_ADDRESS_WIDTH), DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), + DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false), + DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), + DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true), DEFINE_PROP_END_OF_LIST(), }; @@ -3120,8 +3311,8 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, } addr = iommu->intr_root + index * sizeof(*entry); - if (dma_memory_read(&address_space_memory, addr, entry, - sizeof(*entry))) { + if (dma_memory_read(&address_space_memory, addr, + entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) { error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, __func__, index, addr); return -VTD_FR_IR_ROOT_INVAL; @@ -3372,32 +3563,98 @@ static const MemoryRegionOps vtd_mem_ir_ops = { }, }; -VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) +static void vtd_report_ir_illegal_access(VTDAddressSpace *vtd_as, + hwaddr addr, bool is_write) { - uintptr_t key = (uintptr_t)bus; - VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); + IntelIOMMUState *s = vtd_as->iommu_state; + uint8_t bus_n = pci_bus_num(vtd_as->bus); + uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn); + bool is_fpd_set = false; + VTDContextEntry ce; + + assert(vtd_as->pasid != PCI_NO_PASID); + + /* Try out best to fetch FPD, we can't do anything more */ + if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { + is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + if (!is_fpd_set && s->root_scalable) { + vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid); + } + } + + vtd_report_fault(s, VTD_FR_SM_INTERRUPT_ADDR, + is_fpd_set, sid, addr, is_write, + true, vtd_as->pasid); +} + +static MemTxResult vtd_mem_ir_fault_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + vtd_report_ir_illegal_access(opaque, addr, false); + + return MEMTX_ERROR; +} + +static MemTxResult vtd_mem_ir_fault_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + vtd_report_ir_illegal_access(opaque, addr, true); + + return MEMTX_ERROR; +} + +static const MemoryRegionOps vtd_mem_ir_fault_ops = { + .read_with_attrs = vtd_mem_ir_fault_read, + .write_with_attrs = vtd_mem_ir_fault_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, + int devfn, unsigned int pasid) +{ + /* + * We can't simply use sid here since the bus number might not be + * initialized by the guest. + */ + struct vtd_as_key key = { + .bus = bus, + .devfn = devfn, + .pasid = pasid, + }; VTDAddressSpace *vtd_dev_as; char name[128]; - if (!vtd_bus) { - uintptr_t *new_key = g_malloc(sizeof(*new_key)); - *new_key = (uintptr_t)bus; - /* No corresponding free() */ - vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ - PCI_DEVFN_MAX); - vtd_bus->bus = bus; - g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); - } - - vtd_dev_as = vtd_bus->dev_as[devfn]; - + vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); if (!vtd_dev_as) { - snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), - PCI_FUNC(devfn)); - vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); + struct vtd_as_key *new_key = g_malloc(sizeof(*new_key)); + + new_key->bus = bus; + new_key->devfn = devfn; + new_key->pasid = pasid; + + if (pasid == PCI_NO_PASID) { + snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), + PCI_FUNC(devfn)); + } else { + snprintf(name, sizeof(name), "vtd-%02x.%x-pasid-%x", PCI_SLOT(devfn), + PCI_FUNC(devfn), pasid); + } + + vtd_dev_as = g_new0(VTDAddressSpace, 1); vtd_dev_as->bus = bus; vtd_dev_as->devfn = (uint8_t)devfn; + vtd_dev_as->pasid = pasid; vtd_dev_as->iommu_state = s; vtd_dev_as->context_cache_entry.context_cache_gen = 0; vtd_dev_as->iova_tree = iova_tree_new(); @@ -3438,6 +3695,24 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) VTD_INTERRUPT_ADDR_FIRST, &vtd_dev_as->iommu_ir, 1); + /* + * This region is used for catching fault to access interrupt + * range via passthrough + PASID. See also + * vtd_switch_address_space(). We can't use alias since we + * need to know the sid which is valid for MSI who uses + * bus_master_as (see msi_send_message()). + */ + memory_region_init_io(&vtd_dev_as->iommu_ir_fault, OBJECT(s), + &vtd_mem_ir_fault_ops, vtd_dev_as, "vtd-no-ir", + VTD_INTERRUPT_ADDR_SIZE); + /* + * Hook to root since when PT is enabled vtd_dev_as->iommu + * will be disabled. + */ + memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->root), + VTD_INTERRUPT_ADDR_FIRST, + &vtd_dev_as->iommu_ir_fault, 2); + /* * Hook both the containers under the root container, we * switch between DMAR & noDMAR by enable/disable @@ -3450,6 +3725,8 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) &vtd_dev_as->nodmar, 0); vtd_switch_address_space(vtd_dev_as); + + g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as); } return vtd_dev_as; } @@ -3510,7 +3787,7 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) map.iova = n->start; map.size = size; - iova_tree_remove(as->iova_tree, &map); + iova_tree_remove(as->iova_tree, map); } static void vtd_address_space_unmap_all(IntelIOMMUState *s) @@ -3556,7 +3833,7 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) "legacy mode", bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn), - vtd_get_domain_id(s, &ce), + vtd_get_domain_id(s, &ce, vtd_as->pasid), ce.hi, ce.lo); if (vtd_as_has_map_notifier(vtd_as)) { /* This is required only for MAP typed notifiers */ @@ -3566,10 +3843,10 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) .notify_unmap = false, .aw = s->aw_bits, .as = vtd_as, - .domain_id = vtd_get_domain_id(s, &ce), + .domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid), }; - vtd_page_walk(s, &ce, 0, ~0ULL, &info); + vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid); } } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), @@ -3605,12 +3882,17 @@ static void vtd_init(IntelIOMMUState *s) s->next_frcd_reg = 0; s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | - VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits); + VTD_CAP_MGAW(s->aw_bits); if (s->dma_drain) { s->cap |= VTD_CAP_DRAIN; } - if (s->aw_bits == VTD_HOST_AW_48BIT) { - s->cap |= VTD_CAP_SAGAW_48bit; + if (s->dma_translation) { + if (s->aw_bits >= VTD_HOST_AW_39BIT) { + s->cap |= VTD_CAP_SAGAW_39bit; + } + if (s->aw_bits >= VTD_HOST_AW_48BIT) { + s->cap |= VTD_CAP_SAGAW_48bit; + } } s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; @@ -3629,6 +3911,12 @@ static void vtd_init(IntelIOMMUState *s) vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, x86_iommu->dt_supported); + if (s->scalable_mode || s->snoop_control) { + vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[2] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP; + } + if (x86_iommu_ir_supported(x86_iommu)) { s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; if (s->intr_eim == ON_OFF_AUTO_ON) { @@ -3654,6 +3942,14 @@ static void vtd_init(IntelIOMMUState *s) s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; } + if (s->snoop_control) { + s->ecap |= VTD_ECAP_SC; + } + + if (s->pasid) { + s->ecap |= VTD_ECAP_PASID; + } + vtd_reset_caches(s); /* Define registers with default values and bit semantics */ @@ -3727,7 +4023,7 @@ static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) assert(0 <= devfn && devfn < PCI_DEVFN_MAX); - vtd_as = vtd_find_add_as(s, bus, devfn); + vtd_as = vtd_find_add_as(s, bus, devfn, PCI_NO_PASID); return &vtd_as->as; } @@ -3746,7 +4042,7 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { - if (!kvm_irqchip_in_kernel()) { + if (!kvm_irqchip_is_split()) { error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); return false; } @@ -3770,6 +4066,11 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) return false; } + if (s->pasid && !s->scalable_mode) { + error_setg(errp, "Need to set scalable mode for PASID"); + return false; + } + return true; } @@ -3806,9 +4107,17 @@ static void vtd_realize(DeviceState *dev, Error **errp) X86MachineState *x86ms = X86_MACHINE(ms); PCIBus *bus = pcms->bus; IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); - X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); - x86_iommu->type = TYPE_INTEL; + if (s->pasid && x86_iommu->dt_supported) { + /* + * PASID-based-Device-TLB Invalidate Descriptor is not + * implemented and it requires support from vhost layer which + * needs to be implemented in the future. + */ + error_setg(errp, "PASID based device IOTLB is not supported"); + return; + } if (!vtd_decide_config(s, errp)) { return; @@ -3816,7 +4125,6 @@ static void vtd_realize(DeviceState *dev, Error **errp) QLIST_INIT(&s->vtd_as_with_notifiers); qemu_mutex_init(&s->iommu_lock); - memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, "intel_iommu", DMAR_REG_SIZE); @@ -3836,10 +4144,10 @@ static void vtd_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); /* No corresponding destroy */ - s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, + s->iotlb = g_hash_table_new_full(vtd_iotlb_hash, vtd_iotlb_equal, g_free, g_free); - s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, - g_free, g_free); + s->vtd_address_spaces = g_hash_table_new_full(vtd_as_hash, vtd_as_equal, + g_free, g_free); vtd_init(s); sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); pci_setup_iommu(bus, vtd_host_dma_iommu, dev); diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 3d5487fe2c..f090e61e11 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -114,8 +114,9 @@ VTD_INTERRUPT_ADDR_FIRST + 1) /* The shift of source_id in the key of IOTLB hash table */ -#define VTD_IOTLB_SID_SHIFT 36 -#define VTD_IOTLB_LVL_SHIFT 52 +#define VTD_IOTLB_SID_SHIFT 20 +#define VTD_IOTLB_LVL_SHIFT 28 +#define VTD_IOTLB_PASID_SHIFT 30 #define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */ /* IOTLB_REG */ @@ -188,8 +189,10 @@ #define VTD_ECAP_IR (1ULL << 3) #define VTD_ECAP_EIM (1ULL << 4) #define VTD_ECAP_PT (1ULL << 6) +#define VTD_ECAP_SC (1ULL << 7) #define VTD_ECAP_MHMV (15ULL << 20) #define VTD_ECAP_SRS (1ULL << 31) +#define VTD_ECAP_PASID (1ULL << 40) #define VTD_ECAP_SMTS (1ULL << 43) #define VTD_ECAP_SLTS (1ULL << 46) @@ -210,6 +213,8 @@ #define VTD_CAP_DRAIN_READ (1ULL << 55) #define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE) #define VTD_CAP_CM (1ULL << 7) +#define VTD_PASID_ID_SHIFT 20 +#define VTD_PASID_ID_MASK ((1ULL << VTD_PASID_ID_SHIFT) - 1) /* Supported Adjusted Guest Address Widths */ #define VTD_CAP_SAGAW_SHIFT 8 @@ -261,6 +266,8 @@ #define VTD_FRCD_SID(val) ((val) & VTD_FRCD_SID_MASK) /* For the low 64-bit of 128-bit */ #define VTD_FRCD_FI(val) ((val) & ~0xfffULL) +#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40) +#define VTD_FRCD_PP(val) (((val) & 0x1) << 31) /* DMA Remapping Fault Conditions */ typedef enum VTDFaultReason { @@ -288,6 +295,8 @@ typedef enum VTDFaultReason { * context-entry. */ VTD_FR_CONTEXT_ENTRY_TT, + /* Output address in the interrupt address range */ + VTD_FR_INTERRUPT_ADDR = 0xE, /* Interrupt remapping transition faults */ VTD_FR_IR_REQ_RSVD = 0x20, /* One or more IR request reserved @@ -303,11 +312,8 @@ typedef enum VTDFaultReason { VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */ - /* This is not a normal fault reason. We use this to indicate some faults - * that are not referenced by the VT-d specification. - * Fault event with such reason should not be recorded. - */ - VTD_FR_RESERVED_ERR, + /* Output address in the interrupt address range for scalable mode */ + VTD_FR_SM_INTERRUPT_ADDR = 0x87, VTD_FR_MAX, /* Guard */ } VTDFaultReason; @@ -379,6 +385,11 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL) #define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL #define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL +#define VTD_INV_DESC_IOTLB_PASID_PASID (2ULL << 4) +#define VTD_INV_DESC_IOTLB_PASID_PAGE (3ULL << 4) +#define VTD_INV_DESC_IOTLB_PASID(val) (((val) >> 32) & VTD_PASID_ID_MASK) +#define VTD_INV_DESC_IOTLB_PASID_RSVD_LO 0xfff00000000001c0ULL +#define VTD_INV_DESC_IOTLB_PASID_RSVD_HI 0xf80ULL /* Mask for Device IOTLB Invalidate Descriptor */ #define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL) @@ -388,6 +399,8 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8 /* Rsvd field masks for spte */ +#define VTD_SPTE_SNP 0x800ULL + #define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \ dt_supported ? \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ @@ -411,6 +424,7 @@ typedef union VTDInvDesc VTDInvDesc; /* Information about page-selective IOTLB invalidate */ struct VTDIOTLBPageInvInfo { uint16_t domain_id; + uint32_t pasid; uint64_t addr; uint8_t mask; }; diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index fa68669e8a..191a26fa57 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -59,11 +59,6 @@ struct KVMPITClass { DeviceRealize parent_realize; }; -static int64_t abs64(int64_t v) -{ - return v < 0 ? -v : v; -} - static void kvm_pit_update_clock_offset(KVMPITState *s) { int64_t offset, clock_offset; @@ -81,7 +76,7 @@ static void kvm_pit_update_clock_offset(KVMPITState *s) clock_gettime(CLOCK_MONOTONIC, &ts); offset -= ts.tv_nsec; offset -= (int64_t)ts.tv_sec * 1000000000; - if (abs64(offset) < abs64(clock_offset)) { + if (uabs64(offset) < uabs64(clock_offset)) { clock_offset = offset; } } diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 80dad29f2b..213e2e82b3 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -11,11 +11,13 @@ i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'), if_false: files('x86-iommu-stub.c')) i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c')) i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) -i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c')) +i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c')) i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c')) i386_ss.add(when: 'CONFIG_VTD', if_true: files('intel_iommu.c')) +i386_ss.add(when: 'CONFIG_SGX', if_true: files('sgx-epc.c','sgx.c'), + if_false: files('sgx-stub.c')) i386_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-common.c')) i386_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device_x86.c')) diff --git a/hw/i386/microvm-dt.c b/hw/i386/microvm-dt.c new file mode 100644 index 0000000000..b3049e4f9f --- /dev/null +++ b/hw/i386/microvm-dt.c @@ -0,0 +1,349 @@ +/* + * microvm device tree support + * + * This generates an device tree for microvm and exports it via fw_cfg + * as "etc/fdt" to the firmware (edk2 specifically). + * + * The use case is to allow edk2 find the pcie ecam and the virtio + * devices, without adding an ACPI parser, reusing the fdt parser + * which is needed anyway for the arm platform. + * + * Note 1: The device tree is incomplete. CPUs and memory is missing + * for example, those can be detected using other fw_cfg files. + * Also pci ecam irq routing is not there, edk2 doesn't use + * interrupts. + * + * Note 2: This is for firmware only. OSes should use the more + * complete ACPI tables for hardware discovery. + * + * ---------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "sysemu/device_tree.h" +#include "hw/char/serial.h" +#include "hw/i386/fw_cfg.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/sysbus.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/usb/xhci.h" + +#include "microvm-dt.h" + +static bool debug; + +static void dt_add_microvm_irq(MicrovmMachineState *mms, + const char *nodename, uint32_t irq) +{ + int index = 0; + + if (irq >= IO_APIC_SECONDARY_IRQBASE) { + irq -= IO_APIC_SECONDARY_IRQBASE; + index++; + } + + qemu_fdt_setprop_cell(mms->fdt, nodename, "interrupt-parent", + mms->ioapic_phandle[index]); + qemu_fdt_setprop_cells(mms->fdt, nodename, "interrupts", irq, 0); +} + +static void dt_add_virtio(MicrovmMachineState *mms, VirtIOMMIOProxy *mmio) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(mmio); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + char *nodename; + + if (QTAILQ_EMPTY(&mmio_bus->children)) { + return; + } + + hwaddr base = dev->mmio[0].addr; + hwaddr size = 512; + unsigned index = (base - VIRTIO_MMIO_BASE) / size; + uint32_t irq = mms->virtio_irq_base + index; + + nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, "compatible", "virtio,mmio"); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_xhci(MicrovmMachineState *mms) +{ + const char compat[] = "generic-xhci"; + uint32_t irq = MICROVM_XHCI_IRQ; + hwaddr base = MICROVM_XHCI_BASE; + hwaddr size = XHCI_LEN_REGS; + char *nodename; + + nodename = g_strdup_printf("/usb@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_pcie(MicrovmMachineState *mms) +{ + hwaddr base = PCIE_MMIO_BASE; + int nr_pcie_buses; + char *nodename; + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(mms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, PCIE_ECAM_BASE, 2, PCIE_ECAM_SIZE); + if (mms->gpex.mmio64.size) { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size, + + 1, FDT_PCI_RANGE_MMIO_64BIT, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.size); + } else { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size); + } + + nr_pcie_buses = PCIE_ECAM_SIZE / PCIE_MMCFG_SIZE_MIN; + qemu_fdt_setprop_cells(mms->fdt, nodename, "bus-range", 0, + nr_pcie_buses - 1); + + g_free(nodename); +} + +static void dt_add_ioapic(MicrovmMachineState *mms, SysBusDevice *dev) +{ + hwaddr base = dev->mmio[0].addr; + char *nodename; + uint32_t ph; + int index; + + switch (base) { + case IO_APIC_DEFAULT_ADDRESS: + index = 0; + break; + case IO_APIC_SECONDARY_ADDRESS: + index = 1; + break; + default: + fprintf(stderr, "unknown ioapic @ %" PRIx64 "\n", base); + return; + } + + nodename = g_strdup_printf("/ioapic%d@%" PRIx64, index + 1, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "intel,ce4100-ioapic"); + qemu_fdt_setprop(mms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#interrupt-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 0x2); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, base, 2, 0x1000); + + ph = qemu_fdt_alloc_phandle(mms->fdt); + qemu_fdt_setprop_cell(mms->fdt, nodename, "phandle", ph); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,phandle", ph); + mms->ioapic_phandle[index] = ph; + + g_free(nodename); +} + +static void dt_add_isa_serial(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "ns16550"; + uint32_t irq = object_property_get_int(OBJECT(dev), "irq", &error_fatal); + hwaddr base = object_property_get_int(OBJECT(dev), "iobase", &error_fatal); + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/serial@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + + if (base == 0x3f8 /* com1 */) { + qemu_fdt_setprop_string(mms->fdt, "/chosen", "stdout-path", nodename); + } + + g_free(nodename); +} + +static void dt_add_isa_rtc(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "motorola,mc146818"; + uint32_t irq = object_property_get_uint(OBJECT(dev), "irq", &error_fatal); + hwaddr base = object_property_get_uint(OBJECT(dev), "iobase", &error_fatal); + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/rtc@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_setup_isa_bus(MicrovmMachineState *mms, DeviceState *bridge) +{ + BusState *bus = qdev_get_child_bus(bridge, "isa.0"); + BusChild *kid; + Object *obj; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* serial */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_ISA_SERIAL); + if (obj) { + dt_add_isa_serial(mms, ISA_DEVICE(obj)); + continue; + } + + /* rtc */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC); + if (obj) { + dt_add_isa_rtc(mms, ISA_DEVICE(obj)); + continue; + } + + if (debug) { + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +static void dt_setup_sys_bus(MicrovmMachineState *mms) +{ + BusState *bus; + BusChild *kid; + Object *obj; + + /* sysbus devices */ + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* ioapic */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + dt_add_ioapic(mms, SYS_BUS_DEVICE(obj)); + continue; + } + } + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* virtio */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO); + if (obj) { + dt_add_virtio(mms, VIRTIO_MMIO(obj)); + continue; + } + + /* xhci */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_XHCI_SYSBUS); + if (obj) { + dt_add_xhci(mms); + continue; + } + + /* pcie */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_GPEX_HOST); + if (obj) { + dt_add_pcie(mms); + continue; + } + + /* isa */ + obj = object_dynamic_cast(OBJECT(dev), "isabus-bridge"); + if (obj) { + dt_setup_isa_bus(mms, DEVICE(obj)); + continue; + } + + if (debug) { + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + /* ioapic already added in first pass */ + continue; + } + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +void dt_setup_microvm(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + int size = 0; + + mms->fdt = create_device_tree(&size); + + /* root node */ + qemu_fdt_setprop_string(mms->fdt, "/", "compatible", "linux,microvm"); + qemu_fdt_setprop_cell(mms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, "/", "#size-cells", 0x2); + + qemu_fdt_add_subnode(mms->fdt, "/chosen"); + dt_setup_sys_bus(mms); + + /* add to fw_cfg */ + if (debug) { + fprintf(stderr, "%s: add etc/fdt to fw_cfg\n", __func__); + } + fw_cfg_add_file(x86ms->fw_cfg, "etc/fdt", mms->fdt, size); + + if (debug) { + fprintf(stderr, "%s: writing microvm.fdt\n", __func__); + if (!g_file_set_contents("microvm.fdt", mms->fdt, size, NULL)) { + fprintf(stderr, "%s: writing microvm.fdt failed\n", __func__); + return; + } + int ret = system("dtc -I dtb -O dts microvm.fdt"); + if (ret != 0) { + fprintf(stderr, "%s: oops, dtc not installed?\n", __func__); + } + } +} diff --git a/hw/i386/microvm-dt.h b/hw/i386/microvm-dt.h new file mode 100644 index 0000000000..77c79cbdd9 --- /dev/null +++ b/hw/i386/microvm-dt.h @@ -0,0 +1,8 @@ +#ifndef HW_I386_MICROVM_DT_H +#define HW_I386_MICROVM_DT_H + +#include "hw/i386/microvm.h" + +void dt_setup_microvm(MicrovmMachineState *mms); + +#endif diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index aba0c83219..b231ceda9a 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -28,6 +28,7 @@ #include "sysemu/reset.h" #include "sysemu/runstate.h" #include "acpi-microvm.h" +#include "microvm-dt.h" #include "hw/loader.h" #include "hw/irq.h" @@ -246,7 +247,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) x86ms->pci_irq_mask = 0; } - if (mms->pic == ON_OFF_AUTO_ON || mms->pic == ON_OFF_AUTO_AUTO) { + if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { qemu_irq *i8259; i8259 = i8259_init(isa_bus, x86_allocate_cpu_irq()); @@ -256,7 +257,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) g_free(i8259); } - if (mms->pit == ON_OFF_AUTO_ON || mms->pit == ON_OFF_AUTO_AUTO) { + if (x86ms->pit == ON_OFF_AUTO_ON || x86ms->pit == ON_OFF_AUTO_AUTO) { if (kvm_pit_in_kernel()) { kvm_pit_init(isa_bus, 0x40); } else { @@ -323,15 +324,13 @@ static void microvm_memory_init(MicrovmMachineState *mms) fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, machine->smp.max_cpus); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1); - fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, - &e820_reserve, sizeof(e820_reserve)); fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, sizeof(struct e820_entry) * e820_get_num_entries()); rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true, true); + x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { @@ -458,20 +457,15 @@ static void microvm_machine_state_init(MachineState *machine) { MicrovmMachineState *mms = MICROVM_MACHINE(machine); X86MachineState *x86ms = X86_MACHINE(machine); - Error *local_err = NULL; microvm_memory_init(mms); x86_cpus_init(x86ms, CPU_VERSION_LATEST); - if (local_err) { - error_report_err(local_err); - exit(1); - } microvm_devices_init(mms); } -static void microvm_machine_reset(MachineState *machine) +static void microvm_machine_reset(MachineState *machine, ShutdownCause reason) { MicrovmMachineState *mms = MICROVM_MACHINE(machine); CPUState *cs; @@ -484,51 +478,15 @@ static void microvm_machine_reset(MachineState *machine) mms->kernel_cmdline_fixed = true; } - qemu_devices_reset(); + qemu_devices_reset(reason); CPU_FOREACH(cs) { cpu = X86_CPU(cs); - if (cpu->apic_state) { - device_legacy_reset(cpu->apic_state); - } + x86_cpu_after_reset(cpu); } } -static void microvm_machine_get_pic(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - MicrovmMachineState *mms = MICROVM_MACHINE(obj); - OnOffAuto pic = mms->pic; - - visit_type_OnOffAuto(v, name, &pic, errp); -} - -static void microvm_machine_set_pic(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - MicrovmMachineState *mms = MICROVM_MACHINE(obj); - - visit_type_OnOffAuto(v, name, &mms->pic, errp); -} - -static void microvm_machine_get_pit(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - MicrovmMachineState *mms = MICROVM_MACHINE(obj); - OnOffAuto pit = mms->pit; - - visit_type_OnOffAuto(v, name, &pit, errp); -} - -static void microvm_machine_set_pit(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - MicrovmMachineState *mms = MICROVM_MACHINE(obj); - - visit_type_OnOffAuto(v, name, &mms->pit, errp); -} - static void microvm_machine_get_rtc(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -631,6 +589,7 @@ static void microvm_machine_done(Notifier *notifier, void *data) machine_done); acpi_setup_microvm(mms); + dt_setup_microvm(mms); } static void microvm_powerdown_req(Notifier *notifier, void *data) @@ -652,8 +611,6 @@ static void microvm_machine_initfn(Object *obj) MicrovmMachineState *mms = MICROVM_MACHINE(obj); /* Configuration */ - mms->pic = ON_OFF_AUTO_AUTO; - mms->pit = ON_OFF_AUTO_AUTO; mms->rtc = ON_OFF_AUTO_AUTO; mms->pcie = ON_OFF_AUTO_AUTO; mms->ioapic2 = ON_OFF_AUTO_AUTO; @@ -670,8 +627,17 @@ static void microvm_machine_initfn(Object *obj) qemu_register_powerdown_notifier(&mms->powerdown_req); } +GlobalProperty microvm_properties[] = { + /* + * pcie host bridge (gpex) on microvm has no io address window, + * so reserving io space is not going to work. Turn it off. + */ + { "pcie-root-port", "io-reserve", "0" }, +}; + static void microvm_class_init(ObjectClass *oc, void *data) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -702,19 +668,7 @@ static void microvm_class_init(ObjectClass *oc, void *data) hc->unplug_request = microvm_device_unplug_request_cb; hc->unplug = microvm_device_unplug_cb; - object_class_property_add(oc, MICROVM_MACHINE_PIC, "OnOffAuto", - microvm_machine_get_pic, - microvm_machine_set_pic, - NULL, NULL); - object_class_property_set_description(oc, MICROVM_MACHINE_PIC, - "Enable i8259 PIC"); - - object_class_property_add(oc, MICROVM_MACHINE_PIT, "OnOffAuto", - microvm_machine_get_pit, - microvm_machine_set_pit, - NULL, NULL); - object_class_property_set_description(oc, MICROVM_MACHINE_PIT, - "Enable i8254 PIT"); + x86mc->fwcfg_dma_enabled = true; object_class_property_add(oc, MICROVM_MACHINE_RTC, "OnOffAuto", microvm_machine_get_rtc, @@ -757,6 +711,9 @@ static void microvm_class_init(ObjectClass *oc, void *data) "Set off to disable adding virtio-mmio devices to the kernel cmdline"); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + + compat_props_add(mc->compat_props, microvm_properties, + G_N_ELEMENTS(microvm_properties)); } static const TypeInfo microvm_machine_info = { diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c index 9e7d69d470..963e29362e 100644 --- a/hw/i386/multiboot.c +++ b/hw/i386/multiboot.c @@ -143,7 +143,8 @@ static void mb_add_mod(MultibootState *s, s->mb_mods_count++; } -int load_multiboot(FWCfgState *fw_cfg, +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, @@ -151,6 +152,7 @@ int load_multiboot(FWCfgState *fw_cfg, int kernel_file_size, uint8_t *header) { + bool multiboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; int i, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; @@ -161,6 +163,7 @@ int load_multiboot(FWCfgState *fw_cfg, uint8_t *mb_bootinfo_data; uint32_t cmdline_len; GList *mods = NULL; + g_autofree char *kcmdline = NULL; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ @@ -360,9 +363,7 @@ int load_multiboot(FWCfgState *fw_cfg, } /* Commandline support */ - char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; - snprintf(kcmdline, sizeof(kcmdline), "%s %s", - kernel_filename, kernel_cmdline); + kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline); stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name)); @@ -401,7 +402,11 @@ int load_multiboot(FWCfgState *fw_cfg, fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, sizeof(bootinfo)); - option_rom[nb_option_roms].name = "multiboot.bin"; + if (multiboot_dma_enabled) { + option_rom[nb_option_roms].name = "multiboot_dma.bin"; + } else { + option_rom[nb_option_roms].name = "multiboot.bin"; + } option_rom[nb_option_roms].bootindex = 0; nb_option_roms++; diff --git a/hw/i386/multiboot.h b/hw/i386/multiboot.h index 60de309cd1..2b9182a8ea 100644 --- a/hw/i386/multiboot.h +++ b/hw/i386/multiboot.h @@ -2,8 +2,10 @@ #define QEMU_MULTIBOOT_H #include "hw/nvram/fw_cfg.h" +#include "hw/i386/x86.h" -int load_multiboot(FWCfgState *fw_cfg, +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, diff --git a/hw/i386/pc.c b/hw/i386/pc.c index c2b9d62a35..ec5a10534b 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -37,6 +37,7 @@ #include "hw/ide.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" +#include "hw/pci-bridge/pci_expander_bridge.h" #include "hw/nvram/fw_cfg.h" #include "hw/timer/hpet.h" #include "hw/firmware/smbios.h" @@ -46,7 +47,6 @@ #include "multiboot.h" #include "hw/rtc/mc146818rtc.h" #include "hw/intc/i8259.h" -#include "hw/dma/i8257.h" #include "hw/timer/i8254.h" #include "hw/input/i8042.h" #include "hw/irq.h" @@ -65,7 +65,6 @@ #include "hw/xen/start_info.h" #include "ui/qemu-spice.h" #include "exec/memory.h" -#include "sysemu/arch_init.h" #include "qemu/bitmap.h" #include "qemu/config-file.h" #include "qemu/error-report.h" @@ -76,30 +75,64 @@ #include "acpi-build.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" +#include "hw/cxl/cxl.h" +#include "hw/cxl/cxl_host.h" #include "qapi/error.h" #include "qapi/qapi-visit-common.h" +#include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" #include "hw/core/cpu.h" #include "hw/usb.h" #include "hw/i386/intel_iommu.h" #include "hw/net/ne2000-isa.h" #include "standard-headers/asm-x86/bootparam.h" +#include "hw/virtio/virtio-iommu.h" #include "hw/virtio/virtio-pmem-pci.h" #include "hw/virtio/virtio-mem-pci.h" #include "hw/mem/memory-device.h" #include "sysemu/replay.h" +#include "target/i386/cpu.h" #include "qapi/qmp/qerror.h" #include "e820_memory_layout.h" #include "fw_cfg.h" #include "trace.h" #include CONFIG_DEVICES +/* + * Helper for setting model-id for CPU models that changed model-id + * depending on QEMU versions up to QEMU 2.4. + */ +#define PC_CPU_MODEL_IDS(v) \ + { "qemu32-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ + { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ + { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, + +GlobalProperty pc_compat_7_1[] = {}; +const size_t pc_compat_7_1_len = G_N_ELEMENTS(pc_compat_7_1); + +GlobalProperty pc_compat_7_0[] = {}; +const size_t pc_compat_7_0_len = G_N_ELEMENTS(pc_compat_7_0); + +GlobalProperty pc_compat_6_2[] = { + { "virtio-mem", "unplugged-inaccessible", "off" }, +}; +const size_t pc_compat_6_2_len = G_N_ELEMENTS(pc_compat_6_2); + +GlobalProperty pc_compat_6_1[] = { + { TYPE_X86_CPU, "hv-version-id-build", "0x1bbc" }, + { TYPE_X86_CPU, "hv-version-id-major", "0x0006" }, + { TYPE_X86_CPU, "hv-version-id-minor", "0x0001" }, + { "ICH9-LPC", "x-keep-pci-slot-hpc", "false" }, +}; +const size_t pc_compat_6_1_len = G_N_ELEMENTS(pc_compat_6_1); + GlobalProperty pc_compat_6_0[] = { { "qemu64" "-" TYPE_X86_CPU, "family", "6" }, { "qemu64" "-" TYPE_X86_CPU, "model", "6" }, { "qemu64" "-" TYPE_X86_CPU, "stepping", "3" }, { TYPE_X86_CPU, "x-vendor-cpuid-only", "off" }, - { "ICH9-LPC", "acpi-pci-hotplug-with-bridge-support", "off" }, + { "ICH9-LPC", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" }, + { "ICH9-LPC", "x-keep-pci-slot-hpc", "true" }, }; const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0); @@ -303,15 +336,13 @@ GlobalProperty pc_compat_2_0[] = { { "pci-serial-4x", "prog_if", "0" }, { "virtio-net-pci", "guest_announce", "off" }, { "ICH9-LPC", "memory-hotplug-support", "off" }, - { "xio3130-downstream", COMPAT_PROP_PCP, "off" }, - { "ioh3420", COMPAT_PROP_PCP, "off" }, }; const size_t pc_compat_2_0_len = G_N_ELEMENTS(pc_compat_2_0); GlobalProperty pc_compat_1_7[] = { PC_CPU_MODEL_IDS("1.7.0") { TYPE_USB_DEVICE, "msos-desc", "no" }, - { "PIIX4_PM", "acpi-pci-hotplug-with-bridge-support", "off" }, + { "PIIX4_PM", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" }, { "hpet", HPET_INTCAP, "4" }, }; const size_t pc_compat_1_7_len = G_N_ELEMENTS(pc_compat_1_7); @@ -544,7 +575,7 @@ static const char * const fdc_container_path[] = { * Locate the FDC at IO address 0x3f0, in order to configure the CMOS registers * and ACPI objects. */ -ISADevice *pc_find_fdc0(void) +static ISADevice *pc_find_fdc0(void) { int i; Object *container; @@ -659,7 +690,7 @@ void pc_cmos_init(PCMachineState *pcms, object_property_set_link(OBJECT(pcms), "rtc_state", OBJECT(s), &error_abort); - set_boot_dev(s, MACHINE(pcms)->boot_order, &error_fatal); + set_boot_dev(s, MACHINE(pcms)->boot_config.order, &error_fatal); val = 0; val |= 0x02; /* FPU is there */ @@ -688,7 +719,7 @@ static const int ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340, 0x360, 0x280, 0x380 }; static const int ne2000_irq[NE2000_NB_MAX] = { 9, 10, 11, 3, 4, 5 }; -void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd) +static void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd) { static int nb_ne2k = 0; @@ -708,67 +739,6 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level) } } -/* - * This function is very similar to smp_parse() - * in hw/core/machine.c but includes CPU die support. - */ -static void pc_smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) -{ - unsigned cpus = config->has_cpus ? config->cpus : 0; - unsigned sockets = config->has_sockets ? config->sockets : 0; - unsigned dies = config->has_dies ? config->dies : 1; - unsigned cores = config->has_cores ? config->cores : 0; - unsigned threads = config->has_threads ? config->threads : 0; - - /* compute missing values, prefer sockets over cores over threads */ - if (cpus == 0 || sockets == 0) { - cores = cores > 0 ? cores : 1; - threads = threads > 0 ? threads : 1; - if (cpus == 0) { - sockets = sockets > 0 ? sockets : 1; - cpus = cores * threads * dies * sockets; - } else { - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - sockets = ms->smp.max_cpus / (cores * threads * dies); - } - } else if (cores == 0) { - threads = threads > 0 ? threads : 1; - cores = cpus / (sockets * dies * threads); - cores = cores > 0 ? cores : 1; - } else if (threads == 0) { - threads = cpus / (cores * dies * sockets); - threads = threads > 0 ? threads : 1; - } else if (sockets * dies * cores * threads < cpus) { - error_setg(errp, "cpu topology: " - "sockets (%u) * dies (%u) * cores (%u) * threads (%u) < " - "smp_cpus (%u)", - sockets, dies, cores, threads, cpus); - return; - } - - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - - if (ms->smp.max_cpus < cpus) { - error_setg(errp, "maxcpus must be equal to or greater than smp"); - return; - } - - if (sockets * dies * cores * threads != ms->smp.max_cpus) { - error_setg(errp, "Invalid CPU topology deprecated: " - "sockets (%u) * dies (%u) * cores (%u) * threads (%u) " - "!= maxcpus (%u)", - sockets, dies, cores, threads, - ms->smp.max_cpus); - return; - } - - ms->smp.cpus = cpus; - ms->smp.cores = cores; - ms->smp.threads = threads; - ms->smp.sockets = sockets; - ms->smp.dies = dies; -} - static void pc_machine_done(Notifier *notifier, void *data) { @@ -776,6 +746,13 @@ void pc_machine_done(Notifier *notifier, void *data) PCMachineState, machine_done); X86MachineState *x86ms = X86_MACHINE(pcms); + cxl_hook_up_pxb_registers(pcms->bus, &pcms->cxl_devices_state, + &error_fatal); + + if (pcms->cxl_devices_state.is_enabled) { + cxl_fmws_link_targets(&pcms->cxl_devices_state, &error_fatal); + } + /* set the number of CPUs */ x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); @@ -788,30 +765,13 @@ void pc_machine_done(Notifier *notifier, void *data) /* update FW_CFG_NB_CPUS to account for -device added CPUs */ fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); } - - - if (x86ms->apic_id_limit > 255 && !xen_enabled() && - !kvm_irqchip_in_kernel()) { - error_report("current -smp configuration requires kernel " - "irqchip support."); - exit(EXIT_FAILURE); - } } void pc_guest_info_init(PCMachineState *pcms) { - int i; - MachineState *ms = MACHINE(pcms); X86MachineState *x86ms = X86_MACHINE(pcms); x86ms->apic_xrupt_override = true; - pcms->numa_nodes = ms->numa_state->num_nodes; - pcms->node_mem = g_malloc0(pcms->numa_nodes * - sizeof *pcms->node_mem); - for (i = 0; i < ms->numa_state->num_nodes; i++) { - pcms->node_mem[i] = ms->numa_state->nodes[i].node_mem; - } - pcms->machine_done.notify = pc_machine_done; qemu_add_machine_init_done_notifier(&pcms->machine_done); } @@ -839,21 +799,140 @@ void xen_load_linux(PCMachineState *pcms) rom_set_fw(fw_cfg); x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); + pcmc->pvh_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || !strcmp(option_rom[i].name, "pvh.bin") || - !strcmp(option_rom[i].name, "multiboot.bin")); + !strcmp(option_rom[i].name, "multiboot.bin") || + !strcmp(option_rom[i].name, "multiboot_dma.bin")); rom_add_option(option_rom[i].name, option_rom[i].bootindex); } x86ms->fw_cfg = fw_cfg; } +#define PC_ROM_MIN_VGA 0xc0000 +#define PC_ROM_MIN_OPTION 0xc8000 +#define PC_ROM_MAX 0xe0000 +#define PC_ROM_ALIGN 0x800 +#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA) + +static hwaddr pc_above_4g_end(PCMachineState *pcms) +{ + X86MachineState *x86ms = X86_MACHINE(pcms); + + if (pcms->sgx_epc.size != 0) { + return sgx_epc_above_4g_end(&pcms->sgx_epc); + } + + return x86ms->above_4g_mem_start + x86ms->above_4g_mem_size; +} + +static void pc_get_device_memory_range(PCMachineState *pcms, + hwaddr *base, + ram_addr_t *device_mem_size) +{ + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + MachineState *machine = MACHINE(pcms); + ram_addr_t size; + hwaddr addr; + + size = machine->maxram_size - machine->ram_size; + addr = ROUND_UP(pc_above_4g_end(pcms), 1 * GiB); + + if (pcmc->enforce_aligned_dimm) { + /* size device region assuming 1G page max alignment per slot */ + size += (1 * GiB) * machine->ram_slots; + } + + *base = addr; + *device_mem_size = size; +} + +static uint64_t pc_get_cxl_range_start(PCMachineState *pcms) +{ + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + hwaddr cxl_base; + ram_addr_t size; + + if (pcmc->has_reserved_memory) { + pc_get_device_memory_range(pcms, &cxl_base, &size); + cxl_base += size; + } else { + cxl_base = pc_above_4g_end(pcms); + } + + return cxl_base; +} + +static uint64_t pc_get_cxl_range_end(PCMachineState *pcms) +{ + uint64_t start = pc_get_cxl_range_start(pcms) + MiB; + + if (pcms->cxl_devices_state.fixed_windows) { + GList *it; + + start = ROUND_UP(start, 256 * MiB); + for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) { + CXLFixedWindow *fw = it->data; + start += fw->size; + } + } + + return start; +} + +static hwaddr pc_max_used_gpa(PCMachineState *pcms, uint64_t pci_hole64_size) +{ + X86CPU *cpu = X86_CPU(first_cpu); + + /* 32-bit systems don't have hole64 thus return max CPU address */ + if (cpu->phys_bits <= 32) { + return ((hwaddr)1 << cpu->phys_bits) - 1; + } + + return pc_pci_hole64_start() + pci_hole64_size - 1; +} + +/* + * AMD systems with an IOMMU have an additional hole close to the + * 1Tb, which are special GPAs that cannot be DMA mapped. Depending + * on kernel version, VFIO may or may not let you DMA map those ranges. + * Starting Linux v5.4 we validate it, and can't create guests on AMD machines + * with certain memory sizes. It's also wrong to use those IOVA ranges + * in detriment of leading to IOMMU INVALID_DEVICE_REQUEST or worse. + * The ranges reserved for Hyper-Transport are: + * + * FD_0000_0000h - FF_FFFF_FFFFh + * + * The ranges represent the following: + * + * Base Address Top Address Use + * + * FD_0000_0000h FD_F7FF_FFFFh Reserved interrupt address space + * FD_F800_0000h FD_F8FF_FFFFh Interrupt/EOI IntCtl + * FD_F900_0000h FD_F90F_FFFFh Legacy PIC IACK + * FD_F910_0000h FD_F91F_FFFFh System Management + * FD_F920_0000h FD_FAFF_FFFFh Reserved Page Tables + * FD_FB00_0000h FD_FBFF_FFFFh Address Translation + * FD_FC00_0000h FD_FDFF_FFFFh I/O Space + * FD_FE00_0000h FD_FFFF_FFFFh Configuration + * FE_0000_0000h FE_1FFF_FFFFh Extended Configuration/Device Messages + * FE_2000_0000h FF_FFFF_FFFFh Reserved + * + * See AMD IOMMU spec, section 2.1.2 "IOMMU Logical Topology", + * Table 3: Special Address Controls (GPA) for more information. + */ +#define AMD_HT_START 0xfd00000000UL +#define AMD_HT_END 0xffffffffffUL +#define AMD_ABOVE_1TB_START (AMD_HT_END + 1) +#define AMD_HT_SIZE (AMD_ABOVE_1TB_START - AMD_HT_START) + void pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, - MemoryRegion **ram_memory) + MemoryRegion **ram_memory, + uint64_t pci_hole64_size) { int linux_boot, i; MemoryRegion *option_rom_mr; @@ -863,12 +942,49 @@ void pc_memory_init(PCMachineState *pcms, MachineClass *mc = MACHINE_GET_CLASS(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); X86MachineState *x86ms = X86_MACHINE(pcms); + hwaddr maxphysaddr, maxusedaddr; + hwaddr cxl_base, cxl_resv_end = 0; + X86CPU *cpu = X86_CPU(first_cpu); assert(machine->ram_size == x86ms->below_4g_mem_size + x86ms->above_4g_mem_size); linux_boot = (machine->kernel_filename != NULL); + /* + * The HyperTransport range close to the 1T boundary is unique to AMD + * hosts with IOMMUs enabled. Restrict the ram-above-4g relocation + * to above 1T to AMD vCPUs only. @enforce_amd_1tb_hole is only false in + * older machine types (<= 7.0) for compatibility purposes. + */ + if (IS_AMD_CPU(&cpu->env) && pcmc->enforce_amd_1tb_hole) { + /* Bail out if max possible address does not cross HT range */ + if (pc_max_used_gpa(pcms, pci_hole64_size) >= AMD_HT_START) { + x86ms->above_4g_mem_start = AMD_ABOVE_1TB_START; + } + + /* + * Advertise the HT region if address space covers the reserved + * region or if we relocate. + */ + if (cpu->phys_bits >= 40) { + e820_add_entry(AMD_HT_START, AMD_HT_SIZE, E820_RESERVED); + } + } + + /* + * phys-bits is required to be appropriately configured + * to make sure max used GPA is reachable. + */ + maxusedaddr = pc_max_used_gpa(pcms, pci_hole64_size); + maxphysaddr = ((hwaddr)1 << cpu->phys_bits) - 1; + if (maxphysaddr < maxusedaddr) { + error_report("Address space limit 0x%"PRIx64" < 0x%"PRIx64 + " phys-bits too low (%u)", + maxphysaddr, maxusedaddr, cpu->phys_bits); + exit(EXIT_FAILURE); + } + /* * Split single memory region and use aliases to address portions of it, * done for backwards compatibility with older qemus. @@ -885,9 +1001,14 @@ void pc_memory_init(PCMachineState *pcms, machine->ram, x86ms->below_4g_mem_size, x86ms->above_4g_mem_size); - memory_region_add_subregion(system_memory, 0x100000000ULL, + memory_region_add_subregion(system_memory, x86ms->above_4g_mem_start, ram_above_4g); - e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); + e820_add_entry(x86ms->above_4g_mem_start, x86ms->above_4g_mem_size, + E820_RAM); + } + + if (pcms->sgx_epc.size != 0) { + e820_add_entry(pcms->sgx_epc.base, pcms->sgx_epc.size, E820_RESERVED); } if (!pcmc->has_reserved_memory && @@ -905,7 +1026,7 @@ void pc_memory_init(PCMachineState *pcms, /* initialize device memory address space */ if (pcmc->has_reserved_memory && (machine->ram_size < machine->maxram_size)) { - ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; + ram_addr_t device_mem_size; if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { error_report("unsupported amount of memory slots: %"PRIu64, @@ -920,13 +1041,7 @@ void pc_memory_init(PCMachineState *pcms, exit(EXIT_FAILURE); } - machine->device_memory->base = - ROUND_UP(0x100000000ULL + x86ms->above_4g_mem_size, 1 * GiB); - - if (pcmc->enforce_aligned_dimm) { - /* size device region assuming 1G page max alignment per slot */ - device_mem_size += (1 * GiB) * machine->ram_slots; - } + pc_get_device_memory_range(pcms, &machine->device_memory->base, &device_mem_size); if ((machine->device_memory->base + device_mem_size) < device_mem_size) { @@ -941,6 +1056,32 @@ void pc_memory_init(PCMachineState *pcms, &machine->device_memory->mr); } + if (pcms->cxl_devices_state.is_enabled) { + MemoryRegion *mr = &pcms->cxl_devices_state.host_mr; + hwaddr cxl_size = MiB; + + cxl_base = pc_get_cxl_range_start(pcms); + memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); + memory_region_add_subregion(system_memory, cxl_base, mr); + cxl_resv_end = cxl_base + cxl_size; + if (pcms->cxl_devices_state.fixed_windows) { + hwaddr cxl_fmw_base; + GList *it; + + cxl_fmw_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB); + for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) { + CXLFixedWindow *fw = it->data; + + fw->base = cxl_fmw_base; + memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw, + "cxl-fixed-memory-region", fw->size); + memory_region_add_subregion(system_memory, fw->base, &fw->mr); + cxl_fmw_base += fw->size; + cxl_resv_end = cxl_fmw_base; + } + } + } + /* Initialize PC system firmware */ pc_system_firmware_init(pcms, rom_memory); @@ -968,13 +1109,17 @@ void pc_memory_init(PCMachineState *pcms, if (!pcmc->broken_reserved_end) { res_mem_end += memory_region_size(&machine->device_memory->mr); } + + if (pcms->cxl_devices_state.is_enabled) { + res_mem_end = cxl_resv_end; + } *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val)); } if (linux_boot) { x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); + pcmc->pvh_enabled); } for (i = 0; i < nb_option_roms; i++) { @@ -998,16 +1143,18 @@ uint64_t pc_pci_hole64_start(void) PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); MachineState *ms = MACHINE(pcms); - X86MachineState *x86ms = X86_MACHINE(pcms); uint64_t hole64_start = 0; + ram_addr_t size = 0; - if (pcmc->has_reserved_memory && ms->device_memory->base) { - hole64_start = ms->device_memory->base; + if (pcms->cxl_devices_state.is_enabled) { + hole64_start = pc_get_cxl_range_end(pcms); + } else if (pcmc->has_reserved_memory && (ms->ram_size < ms->maxram_size)) { + pc_get_device_memory_range(pcms, &hole64_start, &size); if (!pcmc->broken_reserved_end) { - hole64_start += memory_region_size(&ms->device_memory->mr); + hole64_start += size; } } else { - hole64_start = 0x100000000ULL + x86ms->above_4g_mem_size; + hole64_start = pc_above_4g_end(pcms); } return ROUND_UP(hole64_start, 1 * GiB); @@ -1049,7 +1196,8 @@ static const MemoryRegionOps ioportF0_io_ops = { }, }; -static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport) +static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, + bool create_i8042, bool no_vmport) { int i; DriveInfo *fd[MAX_FD]; @@ -1071,7 +1219,11 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport) } } - i8042 = isa_create_simple(isa_bus, "i8042"); + if (!create_i8042) { + return; + } + + i8042 = isa_create_simple(isa_bus, TYPE_I8042); if (!no_vmport) { isa_create_simple(isa_bus, TYPE_VMPORT); vmmouse = isa_try_new("vmmouse"); @@ -1079,7 +1231,7 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport) vmmouse = NULL; } if (vmmouse) { - object_property_set_link(OBJECT(vmmouse), "i8042", OBJECT(i8042), + object_property_set_link(OBJECT(vmmouse), TYPE_I8042, OBJECT(i8042), &error_abort); isa_realize_and_unref(vmmouse, isa_bus, &error_fatal); } @@ -1106,6 +1258,7 @@ void pc_basic_device_init(struct PCMachineState *pcms, ISADevice *pit = NULL; MemoryRegion *ioport80_io = g_new(MemoryRegion, 1); MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1); + X86MachineState *x86ms = X86_MACHINE(pcms); memory_region_init_io(ioport80_io, NULL, &ioport80_io_ops, NULL, "ioport80", 1); memory_region_add_subregion(isa_bus->address_space_io, 0x80, ioport80_io); @@ -1150,7 +1303,8 @@ void pc_basic_device_init(struct PCMachineState *pcms, qemu_register_boot_set(pc_boot_set, *rtc_state); - if (!xen_enabled() && pcms->pit_enabled) { + if (!xen_enabled() && + (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { if (kvm_pit_in_kernel()) { pit = kvm_pit_init(isa_bus, 0x40); } else { @@ -1163,10 +1317,9 @@ void pc_basic_device_init(struct PCMachineState *pcms, pcspk_init(pcms->pcspk, isa_bus, pit); } - i8257_dma_init(isa_bus, 0); - /* Super I/O */ - pc_superio_init(isa_bus, create_fdctrl, pcms->vmport != ON_OFF_AUTO_ON); + pc_superio_init(isa_bus, create_fdctrl, pcms->i8042_enabled, + pcms->vmport != ON_OFF_AUTO_ON); } void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) @@ -1375,6 +1528,27 @@ static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { pc_virtio_md_pci_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + /* Declare the APIC range as the reserved MSI region */ + char *resv_prop_str = g_strdup_printf("0xfee00000:0xfeefffff:%d", + VIRTIO_IOMMU_RESV_MEM_T_MSI); + + object_property_set_uint(OBJECT(dev), "len-reserved-regions", 1, errp); + object_property_set_str(OBJECT(dev), "reserved-regions[0]", + resv_prop_str, errp); + g_free(resv_prop_str); + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + PCMachineState *pcms = PC_MACHINE(hotplug_dev); + + if (pcms->iommu) { + error_setg(errp, "QEMU does not support multiple vIOMMUs " + "for x86 yet."); + return; + } + pcms->iommu = dev; } } @@ -1429,7 +1603,9 @@ static HotplugHandler *pc_get_hotplug_handler(MachineState *machine, if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || object_dynamic_cast(OBJECT(dev), TYPE_CPU) || object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || - object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE)) { return HOTPLUG_HANDLER(machine); } @@ -1496,20 +1672,6 @@ static void pc_machine_set_sata(Object *obj, bool value, Error **errp) pcms->sata_enabled = value; } -static bool pc_machine_get_pit(Object *obj, Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - - return pcms->pit_enabled; -} - -static void pc_machine_set_pit(Object *obj, bool value, Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - - pcms->pit_enabled = value; -} - static bool pc_machine_get_hpet(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); @@ -1524,6 +1686,20 @@ static void pc_machine_set_hpet(Object *obj, bool value, Error **errp) pcms->hpet_enabled = value; } +static bool pc_machine_get_i8042(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->i8042_enabled; +} + +static void pc_machine_set_i8042(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->i8042_enabled = value; +} + static bool pc_machine_get_default_bus_bypass_iommu(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); @@ -1539,6 +1715,23 @@ static void pc_machine_set_default_bus_bypass_iommu(Object *obj, bool value, pcms->default_bus_bypass_iommu = value; } +static void pc_machine_get_smbios_ep(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + SmbiosEntryPointType smbios_entry_point_type = pcms->smbios_entry_point_type; + + visit_type_SmbiosEntryPointType(v, name, &smbios_entry_point_type, errp); +} + +static void pc_machine_set_smbios_ep(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + visit_type_SmbiosEntryPointType(v, name, &pcms->smbios_entry_point_type, errp); +} + static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -1629,11 +1822,13 @@ static void pc_machine_initfn(Object *obj) pcms->vmport = ON_OFF_AUTO_OFF; #endif /* CONFIG_VMPORT */ pcms->max_ram_below_4g = 0; /* use default */ + pcms->smbios_entry_point_type = SMBIOS_ENTRY_POINT_TYPE_32; + /* acpi build is enabled by default if machine supports it */ pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build; pcms->smbus_enabled = true; pcms->sata_enabled = true; - pcms->pit_enabled = true; + pcms->i8042_enabled = true; pcms->max_fw_size = 8 * MiB; #ifdef CONFIG_HPET pcms->hpet_enabled = true; @@ -1644,14 +1839,15 @@ static void pc_machine_initfn(Object *obj) pcms->pcspk = isa_new(TYPE_PC_SPEAKER); object_property_add_alias(OBJECT(pcms), "pcspk-audiodev", OBJECT(pcms->pcspk), "audiodev"); + cxl_machine_init(obj, &pcms->cxl_devices_state); } -static void pc_machine_reset(MachineState *machine) +static void pc_machine_reset(MachineState *machine, ShutdownCause reason) { CPUState *cs; X86CPU *cpu; - qemu_devices_reset(); + qemu_devices_reset(reason); /* Reset APIC after devices have been reset to cancel * any changes that qemu_devices_reset() might have done. @@ -1659,16 +1855,14 @@ static void pc_machine_reset(MachineState *machine) CPU_FOREACH(cs) { cpu = X86_CPU(cs); - if (cpu->apic_state) { - device_legacy_reset(cpu->apic_state); - } + x86_cpu_after_reset(cpu); } } static void pc_machine_wakeup(MachineState *machine) { cpu_synchronize_all_states(); - pc_machine_reset(machine); + pc_machine_reset(machine, SHUTDOWN_CAUSE_NONE); cpu_synchronize_all_post_reset(); } @@ -1706,10 +1900,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->has_reserved_memory = true; pcmc->kvmclock_enabled = true; pcmc->enforce_aligned_dimm = true; + pcmc->enforce_amd_1tb_hole = true; /* BIOS ACPI tables: 128K. Other BIOS datastructures: less than 4K reported * to be used at the moment, 32K should be enough for a while. */ pcmc->acpi_data_size = 0x20000 + 0x8000; - pcmc->linuxboot_dma_enabled = true; pcmc->pvh_enabled = true; pcmc->kvmclock_create_always = true; assert(!mc->get_hotplug_handler); @@ -1722,7 +1916,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) mc->auto_enable_numa_with_memdev = true; mc->has_hotpluggable_cpus = true; mc->default_boot_order = "cad"; - mc->smp_parse = pc_smp_parse; mc->block_default_type = IF_IDE; mc->max_cpus = 255; mc->reset = pc_machine_reset; @@ -1733,6 +1926,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) hc->unplug = pc_machine_device_unplug_cb; mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; mc->nvdimm_supported = true; + mc->smp_props.dies_supported = true; mc->default_ram_id = "pc.ram"; object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size", @@ -1753,17 +1947,23 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, PC_MACHINE_SMBUS, pc_machine_get_smbus, pc_machine_set_smbus); + object_class_property_set_description(oc, PC_MACHINE_SMBUS, + "Enable/disable system management bus"); object_class_property_add_bool(oc, PC_MACHINE_SATA, pc_machine_get_sata, pc_machine_set_sata); - - object_class_property_add_bool(oc, PC_MACHINE_PIT, - pc_machine_get_pit, pc_machine_set_pit); + object_class_property_set_description(oc, PC_MACHINE_SATA, + "Enable/disable Serial ATA bus"); object_class_property_add_bool(oc, "hpet", pc_machine_get_hpet, pc_machine_set_hpet); + object_class_property_set_description(oc, "hpet", + "Enable/disable high precision event timer emulation"); - object_class_property_add_bool(oc, "default_bus_bypass_iommu", + object_class_property_add_bool(oc, PC_MACHINE_I8042, + pc_machine_get_i8042, pc_machine_set_i8042); + + object_class_property_add_bool(oc, "default-bus-bypass-iommu", pc_machine_get_default_bus_bypass_iommu, pc_machine_set_default_bus_bypass_iommu); @@ -1772,6 +1972,12 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, PC_MACHINE_MAX_FW_SIZE, "Maximum combined firmware size"); + + object_class_property_add(oc, PC_MACHINE_SMBIOS_EP, "str", + pc_machine_get_smbios_ep, pc_machine_set_smbios_ep, + NULL, NULL); + object_class_property_set_description(oc, PC_MACHINE_SMBIOS_EP, + "SMBIOS Entry Point type [32, 64]"); } static const TypeInfo pc_machine_info = { diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 30b8bd6ea9..04f793cca1 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -26,6 +26,7 @@ #include CONFIG_DEVICES #include "qemu/units.h" +#include "hw/dma/i8257.h" #include "hw/loader.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" @@ -39,15 +40,16 @@ #include "hw/usb.h" #include "net/net.h" #include "hw/ide/pci.h" +#include "hw/ide/piix.h" #include "hw/irq.h" #include "sysemu/kvm.h" #include "hw/kvm/clock.h" #include "hw/sysbus.h" -#include "sysemu/arch_init.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/xen/xen-x86.h" #include "exec/memory.h" #include "hw/acpi/acpi.h" +#include "hw/acpi/piix4.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/xen.h" @@ -82,7 +84,6 @@ static void pc_init1(MachineState *machine, MemoryRegion *system_io = get_system_io(); PCIBus *pci_bus; ISABus *isa_bus; - PCII440FXState *i440fx_state; int piix3_devfn = -1; qemu_irq smi_irq; GSIState *gsi_state; @@ -92,6 +93,8 @@ static void pc_init1(MachineState *machine, MemoryRegion *pci_memory; MemoryRegion *rom_memory; ram_addr_t lowmem; + uint64_t hole64_size; + DeviceState *i440fx_host; /* * Calculate ram split, for memory below and above 4G. It's a bit @@ -154,6 +157,7 @@ static void pc_init1(MachineState *machine, } } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); if (pcmc->kvmclock_enabled) { @@ -164,9 +168,15 @@ static void pc_init1(MachineState *machine, pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); rom_memory = pci_memory; + i440fx_host = qdev_new(host_type); + hole64_size = object_property_get_uint(OBJECT(i440fx_host), + PCI_HOST_PROP_PCI_HOLE64_SIZE, + &error_abort); } else { pci_memory = NULL; rom_memory = system_memory; + i440fx_host = NULL; + hole64_size = 0; } pc_guest_info_init(pcms); @@ -177,13 +187,13 @@ static void pc_init1(MachineState *machine, smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, - SMBIOS_ENTRY_POINT_21); + pcms->smbios_entry_point_type); } /* allocate ram and load rom/bios */ if (!xen_enabled()) { pc_memory_init(pcms, system_memory, - rom_memory, &ram_memory); + rom_memory, &ram_memory, hole64_size); } else { pc_system_flash_cleanup_unused(pcms); if (machine->kernel_filename != NULL) { @@ -196,29 +206,35 @@ static void pc_init1(MachineState *machine, if (pcmc->pci_enabled) { PIIX3State *piix3; + PCIDevice *pci_dev; + const char *type = xen_enabled() ? TYPE_PIIX3_XEN_DEVICE + : TYPE_PIIX3_DEVICE; - pci_bus = i440fx_init(host_type, - pci_type, - &i440fx_state, + pci_bus = i440fx_init(pci_type, + i440fx_host, system_memory, system_io, machine->ram_size, x86ms->below_4g_mem_size, x86ms->above_4g_mem_size, pci_memory, ram_memory); pcms->bus = pci_bus; - piix3 = piix3_create(pci_bus, &isa_bus); + pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, type); + piix3 = PIIX3_PCI_DEVICE(pci_dev); piix3->pic = x86ms->gsi; piix3_devfn = piix3->dev.devfn; + isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); } else { pci_bus = NULL; - i440fx_state = NULL; isa_bus = isa_bus_new(NULL, get_system_memory(), system_io, &error_abort); + i8257_dma_init(isa_bus, 0); pcms->hpet_enabled = false; } isa_bus_irqs(isa_bus, x86ms->gsi); - pc_i8259_create(isa_bus, gsi_state->i8259_irq); + if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { + pc_i8259_create(isa_bus, gsi_state->i8259_irq); + } if (pcmc->pci_enabled) { ioapic_init_gsi(gsi_state, "i440fx"); @@ -244,8 +260,7 @@ static void pc_init1(MachineState *machine, if (pcmc->pci_enabled) { PCIDevice *dev; - dev = pci_create_simple(pci_bus, piix3_devfn + 1, - xen_enabled() ? "piix3-ide-xen" : "piix3-ide"); + dev = pci_create_simple(pci_bus, piix3_devfn + 1, TYPE_PIIX3_IDE); pci_ide_create_devs(dev); idebus[0] = qdev_get_child_bus(&dev->qdev, "ide.0"); idebus[1] = qdev_get_child_bus(&dev->qdev, "ide.1"); @@ -279,14 +294,19 @@ static void pc_init1(MachineState *machine, } if (pcmc->pci_enabled && x86_machine_is_acpi_enabled(X86_MACHINE(pcms))) { - DeviceState *piix4_pm; + PCIDevice *piix4_pm; smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0); + piix4_pm = pci_new(piix3_devfn + 3, TYPE_PIIX4_PM); + qdev_prop_set_uint32(DEVICE(piix4_pm), "smb_io_base", 0xb100); + qdev_prop_set_bit(DEVICE(piix4_pm), "smm-enabled", + x86_machine_is_smm_enabled(x86ms)); + pci_realize_and_unref(piix4_pm, pci_bus, &error_fatal); + + qdev_connect_gpio_out(DEVICE(piix4_pm), 0, x86ms->gsi[9]); + qdev_connect_gpio_out_named(DEVICE(piix4_pm), "smi-irq", 0, smi_irq); + pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(piix4_pm), "i2c")); /* TODO: Populate SPD eeprom data. */ - pcms->smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, - x86ms->gsi[9], smi_irq, - x86_machine_is_smm_enabled(x86ms), - &piix4_pm); smbus_eeprom_init(pcms->smbus, 8, NULL, 0); object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, @@ -357,10 +377,12 @@ static void pc_compat_1_4_fn(MachineState *machine) pc_compat_1_5_fn(machine); } +#ifdef CONFIG_ISAPC static void pc_init_isa(MachineState *machine) { pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE); } +#endif #ifdef CONFIG_XEN static void pc_xen_hvm_init_pci(MachineState *machine) @@ -383,6 +405,7 @@ static void pc_xen_hvm_init(MachineState *machine) } pc_xen_hvm_init_pci(machine); + xen_igd_reserve_slot(pcms->bus); pci_create_simple(pcms->bus, -1, "xen-platform"); } #endif @@ -413,7 +436,7 @@ static void pc_i440fx_machine_options(MachineClass *m) machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); } -static void pc_i440fx_6_1_machine_options(MachineClass *m) +static void pc_i440fx_7_2_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_machine_options(m); @@ -422,6 +445,57 @@ static void pc_i440fx_6_1_machine_options(MachineClass *m) pcmc->default_cpu_version = 1; } +DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL, + pc_i440fx_7_2_machine_options); + +static void pc_i440fx_7_1_machine_options(MachineClass *m) +{ + pc_i440fx_7_2_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); + compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); +} + +DEFINE_I440FX_MACHINE(v7_1, "pc-i440fx-7.1", NULL, + pc_i440fx_7_1_machine_options); + +static void pc_i440fx_7_0_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_i440fx_7_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + pcmc->enforce_amd_1tb_hole = false; + compat_props_add(m->compat_props, hw_compat_7_0, hw_compat_7_0_len); + compat_props_add(m->compat_props, pc_compat_7_0, pc_compat_7_0_len); +} + +DEFINE_I440FX_MACHINE(v7_0, "pc-i440fx-7.0", NULL, + pc_i440fx_7_0_machine_options); + +static void pc_i440fx_6_2_machine_options(MachineClass *m) +{ + pc_i440fx_7_0_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len); + compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len); +} + +DEFINE_I440FX_MACHINE(v6_2, "pc-i440fx-6.2", NULL, + pc_i440fx_6_2_machine_options); + +static void pc_i440fx_6_1_machine_options(MachineClass *m) +{ + pc_i440fx_6_2_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); + compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; +} + DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL, pc_i440fx_6_1_machine_options); @@ -523,7 +597,6 @@ static void pc_i440fx_3_1_machine_options(MachineClass *m) pc_i440fx_4_0_machine_options(m); m->is_default = false; - pcmc->do_not_add_smb_acpi = true; m->smbus_no_migration_support = true; m->alias = NULL; pcmc->pvh_enabled = false; @@ -607,11 +680,12 @@ DEFINE_I440FX_MACHINE(v2_7, "pc-i440fx-2.7", NULL, static void pc_i440fx_2_6_machine_options(MachineClass *m) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_2_7_machine_options(m); pcmc->legacy_cpu_hotplug = true; - pcmc->linuxboot_dma_enabled = false; + x86mc->fwcfg_dma_enabled = false; compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); } @@ -729,6 +803,7 @@ static void pc_i440fx_1_7_machine_options(MachineClass *m) m->hw_version = "1.7.0"; m->default_machine_opts = NULL; m->option_rom_has_mr = true; + m->deprecation_reason = "old and unattended - use a newer version instead"; compat_props_add(m->compat_props, pc_compat_1_7, pc_compat_1_7_len); pcmc->smbios_defaults = false; pcmc->gigabyte_align = false; @@ -772,124 +847,7 @@ static void pc_i440fx_1_4_machine_options(MachineClass *m) DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn, pc_i440fx_1_4_machine_options); -typedef struct { - uint16_t gpu_device_id; - uint16_t pch_device_id; - uint8_t pch_revision_id; -} IGDDeviceIDInfo; - -/* In real world different GPU should have different PCH. But actually - * the different PCH DIDs likely map to different PCH SKUs. We do the - * same thing for the GPU. For PCH, the different SKUs are going to be - * all the same silicon design and implementation, just different - * features turn on and off with fuses. The SW interfaces should be - * consistent across all SKUs in a given family (eg LPT). But just same - * features may not be supported. - * - * Most of these different PCH features probably don't matter to the - * Gfx driver, but obviously any difference in display port connections - * will so it should be fine with any PCH in case of passthrough. - * - * So currently use one PCH version, 0x8c4e, to cover all HSW(Haswell) - * scenarios, 0x9cc3 for BDW(Broadwell). - */ -static const IGDDeviceIDInfo igd_combo_id_infos[] = { - /* HSW Classic */ - {0x0402, 0x8c4e, 0x04}, /* HSWGT1D, HSWD_w7 */ - {0x0406, 0x8c4e, 0x04}, /* HSWGT1M, HSWM_w7 */ - {0x0412, 0x8c4e, 0x04}, /* HSWGT2D, HSWD_w7 */ - {0x0416, 0x8c4e, 0x04}, /* HSWGT2M, HSWM_w7 */ - {0x041E, 0x8c4e, 0x04}, /* HSWGT15D, HSWD_w7 */ - /* HSW ULT */ - {0x0A06, 0x8c4e, 0x04}, /* HSWGT1UT, HSWM_w7 */ - {0x0A16, 0x8c4e, 0x04}, /* HSWGT2UT, HSWM_w7 */ - {0x0A26, 0x8c4e, 0x06}, /* HSWGT3UT, HSWM_w7 */ - {0x0A2E, 0x8c4e, 0x04}, /* HSWGT3UT28W, HSWM_w7 */ - {0x0A1E, 0x8c4e, 0x04}, /* HSWGT2UX, HSWM_w7 */ - {0x0A0E, 0x8c4e, 0x04}, /* HSWGT1ULX, HSWM_w7 */ - /* HSW CRW */ - {0x0D26, 0x8c4e, 0x04}, /* HSWGT3CW, HSWM_w7 */ - {0x0D22, 0x8c4e, 0x04}, /* HSWGT3CWDT, HSWD_w7 */ - /* HSW Server */ - {0x041A, 0x8c4e, 0x04}, /* HSWSVGT2, HSWD_w7 */ - /* HSW SRVR */ - {0x040A, 0x8c4e, 0x04}, /* HSWSVGT1, HSWD_w7 */ - /* BSW */ - {0x1606, 0x9cc3, 0x03}, /* BDWULTGT1, BDWM_w7 */ - {0x1616, 0x9cc3, 0x03}, /* BDWULTGT2, BDWM_w7 */ - {0x1626, 0x9cc3, 0x03}, /* BDWULTGT3, BDWM_w7 */ - {0x160E, 0x9cc3, 0x03}, /* BDWULXGT1, BDWM_w7 */ - {0x161E, 0x9cc3, 0x03}, /* BDWULXGT2, BDWM_w7 */ - {0x1602, 0x9cc3, 0x03}, /* BDWHALOGT1, BDWM_w7 */ - {0x1612, 0x9cc3, 0x03}, /* BDWHALOGT2, BDWM_w7 */ - {0x1622, 0x9cc3, 0x03}, /* BDWHALOGT3, BDWM_w7 */ - {0x162B, 0x9cc3, 0x03}, /* BDWHALO28W, BDWM_w7 */ - {0x162A, 0x9cc3, 0x03}, /* BDWGT3WRKS, BDWM_w7 */ - {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */ -}; - -static void isa_bridge_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - dc->desc = "ISA bridge faked to support IGD PT"; - set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - k->vendor_id = PCI_VENDOR_ID_INTEL; - k->class_id = PCI_CLASS_BRIDGE_ISA; -}; - -static TypeInfo isa_bridge_info = { - .name = "igd-passthrough-isa-bridge", - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PCIDevice), - .class_init = isa_bridge_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }, -}; - -static void pt_graphics_register_types(void) -{ - type_register_static(&isa_bridge_info); -} -type_init(pt_graphics_register_types) - -void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id) -{ - struct PCIDevice *bridge_dev; - int i, num; - uint16_t pch_dev_id = 0xffff; - uint8_t pch_rev_id = 0; - - num = ARRAY_SIZE(igd_combo_id_infos); - for (i = 0; i < num; i++) { - if (gpu_dev_id == igd_combo_id_infos[i].gpu_device_id) { - pch_dev_id = igd_combo_id_infos[i].pch_device_id; - pch_rev_id = igd_combo_id_infos[i].pch_revision_id; - } - } - - if (pch_dev_id == 0xffff) { - return; - } - - /* Currently IGD drivers always need to access PCH by 1f.0. */ - bridge_dev = pci_create_simple(bus, PCI_DEVFN(0x1f, 0), - "igd-passthrough-isa-bridge"); - - /* - * Note that vendor id is always PCI_VENDOR_ID_INTEL. - */ - if (!bridge_dev) { - fprintf(stderr, "set igd-passthrough-isa-bridge failed!\n"); - return; - } - pci_config_set_device_id(bridge_dev->config, pch_dev_id); - pci_config_set_revision(bridge_dev->config, pch_rev_id); -} - +#ifdef CONFIG_ISAPC static void isapc_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); @@ -909,7 +867,7 @@ static void isapc_machine_options(MachineClass *m) DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, isapc_machine_options); - +#endif #ifdef CONFIG_XEN static void xenfv_4_2_machine_options(MachineClass *m) diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 04b4a4788d..f522874add 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -31,7 +31,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "hw/loader.h" -#include "sysemu/arch_init.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/rtc/mc146818rtc.h" #include "sysemu/kvm.h" @@ -138,6 +137,8 @@ static void pc_q35_init(MachineState *machine) DriveInfo *hd[MAX_SATA_PORTS]; MachineClass *mc = MACHINE_GET_CLASS(machine); bool acpi_pcihp; + bool keep_pci_slot_hpc; + uint64_t pci_hole64_size = 0; /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping @@ -178,6 +179,7 @@ static void pc_q35_init(MachineState *machine) x86ms->below_4g_mem_size = machine->ram_size; } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); kvmclock_create(pcmc->kvmclock_create_always); @@ -199,15 +201,22 @@ static void pc_q35_init(MachineState *machine) smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)", mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, - SMBIOS_ENTRY_POINT_21); + pcms->smbios_entry_point_type); } - /* allocate ram and load rom/bios */ - pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory); - /* create pci host bus */ q35_host = Q35_HOST_DEVICE(qdev_new(TYPE_Q35_HOST_DEVICE)); + if (pcmc->pci_enabled) { + pci_hole64_size = object_property_get_uint(OBJECT(q35_host), + PCI_HOST_PROP_PCI_HOLE64_SIZE, + &error_abort); + } + + /* allocate ram and load rom/bios */ + pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory, + pci_hole64_size); + object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host)); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM, OBJECT(ram_memory), NULL); @@ -239,11 +248,15 @@ static void pc_q35_init(MachineState *machine) OBJECT(lpc), &error_abort); acpi_pcihp = object_property_get_bool(OBJECT(lpc), - "acpi-pci-hotplug-with-bridge-support", + ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, NULL); - if (acpi_pcihp) { - object_register_sugar_prop(TYPE_PCIE_SLOT, "native-hotplug", + keep_pci_slot_hpc = object_property_get_bool(OBJECT(lpc), + "x-keep-pci-slot-hpc", + NULL); + + if (!keep_pci_slot_hpc && acpi_pcihp) { + object_register_sugar_prop(TYPE_PCIE_SLOT, "x-native-hotplug", "false", true); } @@ -260,7 +273,9 @@ static void pc_q35_init(MachineState *machine) pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); isa_bus = ich9_lpc->isa_bus; - pc_i8259_create(isa_bus, gsi_state->i8259_irq); + if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { + pc_i8259_create(isa_bus, gsi_state->i8259_irq); + } if (pcmc->pci_enabled) { ioapic_init_gsi(gsi_state, "q35"); @@ -355,7 +370,7 @@ static void pc_q35_machine_options(MachineClass *m) m->max_cpus = 288; } -static void pc_q35_6_1_machine_options(MachineClass *m) +static void pc_q35_7_2_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_machine_options(m); @@ -363,6 +378,53 @@ static void pc_q35_6_1_machine_options(MachineClass *m) pcmc->default_cpu_version = 1; } +DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL, + pc_q35_7_2_machine_options); + +static void pc_q35_7_1_machine_options(MachineClass *m) +{ + pc_q35_7_2_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); + compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); +} + +DEFINE_Q35_MACHINE(v7_1, "pc-q35-7.1", NULL, + pc_q35_7_1_machine_options); + +static void pc_q35_7_0_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_q35_7_1_machine_options(m); + m->alias = NULL; + pcmc->enforce_amd_1tb_hole = false; + compat_props_add(m->compat_props, hw_compat_7_0, hw_compat_7_0_len); + compat_props_add(m->compat_props, pc_compat_7_0, pc_compat_7_0_len); +} + +DEFINE_Q35_MACHINE(v7_0, "pc-q35-7.0", NULL, + pc_q35_7_0_machine_options); + +static void pc_q35_6_2_machine_options(MachineClass *m) +{ + pc_q35_7_0_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len); + compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len); +} + +DEFINE_Q35_MACHINE(v6_2, "pc-q35-6.2", NULL, + pc_q35_6_2_machine_options); + +static void pc_q35_6_1_machine_options(MachineClass *m) +{ + pc_q35_6_2_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); + compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; +} + DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL, pc_q35_6_1_machine_options); @@ -473,7 +535,6 @@ static void pc_q35_3_1_machine_options(MachineClass *m) pc_q35_4_0_machine_options(m); m->default_kernel_irqchip_split = false; - pcmc->do_not_add_smb_acpi = true; m->smbus_no_migration_support = true; m->alias = NULL; pcmc->pvh_enabled = false; @@ -561,11 +622,12 @@ DEFINE_Q35_MACHINE(v2_7, "pc-q35-2.7", NULL, static void pc_q35_2_6_machine_options(MachineClass *m) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_2_7_machine_options(m); pcmc->legacy_cpu_hotplug = true; - pcmc->linuxboot_dma_enabled = false; + x86mc->fwcfg_dma_enabled = false; compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); } diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index 68d6b1f783..c8d9e71b88 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "sysemu/block-backend.h" #include "qemu/error-report.h" @@ -37,7 +36,7 @@ #include "hw/qdev-properties.h" #include "hw/block/flash.h" #include "sysemu/kvm.h" -#include "sysemu/sev.h" +#include "sev.h" #define FLASH_SECTOR_SIZE 4096 @@ -148,7 +147,6 @@ static void pc_system_flash_map(PCMachineState *pcms, MemoryRegion *flash_mem; void *flash_ptr; int flash_size; - int ret; assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); @@ -196,19 +194,7 @@ static void pc_system_flash_map(PCMachineState *pcms, if (sev_enabled()) { flash_ptr = memory_region_get_ram_ptr(flash_mem); flash_size = memory_region_size(flash_mem); - /* - * OVMF places a GUIDed structures in the flash, so - * search for them - */ - pc_system_parse_ovmf_flash(flash_ptr, flash_size); - - ret = sev_es_save_reset_vector(flash_ptr, flash_size); - if (ret) { - error_report("failed to locate and/or save reset vector"); - exit(1); - } - - sev_encrypt_flash(flash_ptr, flash_size, &error_fatal); + x86_firmware_configure(flash_ptr, flash_size); } } } @@ -260,3 +246,24 @@ void pc_system_firmware_init(PCMachineState *pcms, pc_system_flash_cleanup_unused(pcms); } + +void x86_firmware_configure(void *ptr, int size) +{ + int ret; + + /* + * OVMF places a GUIDed structures in the flash, so + * search for them + */ + pc_system_parse_ovmf_flash(ptr, size); + + if (sev_enabled()) { + ret = sev_es_save_reset_vector(ptr, size); + if (ret) { + error_report("failed to locate and/or save reset vector"); + exit(1); + } + + sev_encrypt_flash(ptr, size, &error_fatal); + } +} diff --git a/hw/i386/pc_sysfw_ovmf.c b/hw/i386/pc_sysfw_ovmf.c index f4dd92c588..07a4c267fa 100644 --- a/hw/i386/pc_sysfw_ovmf.c +++ b/hw/i386/pc_sysfw_ovmf.c @@ -24,11 +24,14 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "hw/i386/pc.h" #include "cpu.h" #define OVMF_TABLE_FOOTER_GUID "96b582de-1fb2-45f7-baea-a366c55a082d" +static const int bytes_after_table_footer = 32; + static bool ovmf_flash_parsed; static uint8_t *ovmf_table; static int ovmf_table_len; @@ -52,12 +55,13 @@ void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size) /* * if this is OVMF there will be a table footer - * guid 48 bytes before the end of the flash file. If it's - * not found, silently abort the flash parsing. + * guid 48 bytes before the end of the flash file + * (= 32 bytes after the table + 16 bytes the GUID itself). + * If it's not found, silently abort the flash parsing. */ qemu_uuid_parse(OVMF_TABLE_FOOTER_GUID, &guid); guid = qemu_uuid_bswap(guid); /* guids are LE */ - ptr = flash_ptr + flash_size - 48; + ptr = flash_ptr + flash_size - (bytes_after_table_footer + sizeof(guid)); if (!qemu_uuid_is_equal((QemuUUID *)ptr, &guid)) { return; } @@ -66,7 +70,13 @@ void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size) ptr -= sizeof(uint16_t); tot_len = le16_to_cpu(*(uint16_t *)ptr) - sizeof(guid) - sizeof(uint16_t); - if (tot_len <= 0) { + if (tot_len < 0 || tot_len > (ptr - flash_ptr)) { + error_report("OVMF table has invalid size %d", tot_len); + return; + } + + if (tot_len == 0) { + /* no entries in the OVMF table */ return; } diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c new file mode 100644 index 0000000000..d664829d35 --- /dev/null +++ b/hw/i386/sgx-epc.c @@ -0,0 +1,188 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "target/i386/cpu.h" +#include "exec/address-spaces.h" + +static Property sgx_epc_properties[] = { + DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), + DEFINE_PROP_UINT32(SGX_EPC_NUMA_NODE_PROP, SGXEPCDevice, node, 0), + DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, + TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + uint64_t value; + + value = memory_device_get_region_size(MEMORY_DEVICE(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + visit_type_uint64(v, name, &value, errp); +} + +static void sgx_epc_init(Object *obj) +{ + object_property_add(obj, SGX_EPC_SIZE_PROP, "uint64", sgx_epc_get_size, + NULL, NULL, NULL); +} + +static void sgx_epc_realize(DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryDeviceState *md = MEMORY_DEVICE(dev); + SGXEPCState *sgx_epc = &pcms->sgx_epc; + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem; + const char *path; + + if (x86ms->boot_cpus != 0) { + error_setg(errp, "'" TYPE_SGX_EPC "' can't be created after vCPUs," + "e.g. via -device"); + return; + } + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property is not set"); + return; + } + hostmem = MEMORY_BACKEND(epc->hostmem); + if (host_memory_backend_is_mapped(hostmem)) { + path = object_get_canonical_path_component(OBJECT(hostmem)); + error_setg(errp, "can't use already busy memdev: %s", path); + return; + } + + epc->addr = sgx_epc->base + sgx_epc->size; + + memory_region_add_subregion(&sgx_epc->mr, epc->addr - sgx_epc->base, + host_memory_backend_get_memory(hostmem)); + + host_memory_backend_set_mapped(hostmem, true); + + sgx_epc->sections = g_renew(SGXEPCDevice *, sgx_epc->sections, + sgx_epc->nr_sections + 1); + sgx_epc->sections[sgx_epc->nr_sections++] = epc; + + sgx_epc->size += memory_device_get_region_size(md, errp); +} + +static void sgx_epc_unrealize(DeviceState *dev) +{ + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem = MEMORY_BACKEND(epc->hostmem); + + host_memory_backend_set_mapped(hostmem, false); +} + +static uint64_t sgx_epc_md_get_addr(const MemoryDeviceState *md) +{ + const SGXEPCDevice *epc = SGX_EPC(md); + + return epc->addr; +} + +static void sgx_epc_md_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), SGX_EPC_ADDR_PROP, addr, errp); +} + +static uint64_t sgx_epc_md_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return 0; +} + +static MemoryRegion *sgx_epc_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + SGXEPCDevice *epc = SGX_EPC(md); + HostMemoryBackend *hostmem; + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property must be set"); + return NULL; + } + + hostmem = MEMORY_BACKEND(epc->hostmem); + return host_memory_backend_get_memory(hostmem); +} + +static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + SgxEPCDeviceInfo *se = g_new0(SgxEPCDeviceInfo, 1); + SGXEPCDevice *epc = SGX_EPC(md); + + se->memaddr = epc->addr; + se->size = object_property_get_uint(OBJECT(epc), SGX_EPC_SIZE_PROP, + NULL); + se->node = object_property_get_uint(OBJECT(epc), SGX_EPC_NUMA_NODE_PROP, + NULL); + se->memdev = object_get_canonical_path(OBJECT(epc->hostmem)); + + info->u.sgx_epc.data = se; + info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC; +} + +static void sgx_epc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + + dc->hotpluggable = false; + dc->realize = sgx_epc_realize; + dc->unrealize = sgx_epc_unrealize; + dc->desc = "SGX EPC section"; + dc->user_creatable = false; + device_class_set_props(dc, sgx_epc_properties); + + mdc->get_addr = sgx_epc_md_get_addr; + mdc->set_addr = sgx_epc_md_set_addr; + mdc->get_plugged_size = sgx_epc_md_get_plugged_size; + mdc->get_memory_region = sgx_epc_md_get_memory_region; + mdc->fill_device_info = sgx_epc_md_fill_device_info; +} + +static const TypeInfo sgx_epc_info = { + .name = TYPE_SGX_EPC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SGXEPCDevice), + .instance_init = sgx_epc_init, + .class_init = sgx_epc_class_init, + .class_size = sizeof(DeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void sgx_epc_register_types(void) +{ + type_register_static(&sgx_epc_info); +} + +type_init(sgx_epc_register_types) diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c new file mode 100644 index 0000000000..26833eb233 --- /dev/null +++ b/hw/i386/sgx-stub.c @@ -0,0 +1,38 @@ +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" + +void sgx_epc_build_srat(GArray *table_data) +{ +} + +SGXInfo *qmp_query_sgx(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SGX is not available in this QEMU\n"); +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + memset(&pcms->sgx_epc, 0, sizeof(SGXEPCState)); +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + g_assert_not_reached(); +} diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c new file mode 100644 index 0000000000..09d9c7c73d --- /dev/null +++ b/hw/i386/sgx.c @@ -0,0 +1,327 @@ +/* + * SGX common code + * + * Copyright (C) 2021 Intel Corporation + * + * Authors: + * Yang Zhong + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "monitor/qdev.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" +#include "exec/address-spaces.h" +#include "sysemu/hw_accel.h" +#include "sysemu/reset.h" +#include +#include "hw/acpi/aml-build.h" + +#define SGX_MAX_EPC_SECTIONS 8 +#define SGX_CPUID_EPC_INVALID 0x0 + +/* A valid EPC section. */ +#define SGX_CPUID_EPC_SECTION 0x1 +#define SGX_CPUID_EPC_MASK 0xF + +#define SGX_MAGIC 0xA4 +#define SGX_IOC_VEPC_REMOVE_ALL _IO(SGX_MAGIC, 0x04) + +#define RETRY_NUM 2 + +static int sgx_epc_device_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_SGX_EPC)) { + *list = g_slist_append(*list, DEVICE(obj)); + } + + object_child_foreach(obj, sgx_epc_device_list, opaque); + return 0; +} + +static GSList *sgx_epc_get_device_list(void) +{ + GSList *list = NULL; + + object_child_foreach(qdev_get_machine(), sgx_epc_device_list, &list); + return list; +} + +void sgx_epc_build_srat(GArray *table_data) +{ + GSList *device_list = sgx_epc_get_device_list(); + + for (; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + Object *obj = OBJECT(dev); + uint64_t addr, size; + int node; + + node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP, + &error_abort); + addr = object_property_get_uint(obj, SGX_EPC_ADDR_PROP, &error_abort); + size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP, &error_abort); + + build_srat_memory(table_data, addr, size, node, MEM_AFFINITY_ENABLED); + } + g_slist_free(device_list); +} + +static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) +{ + return (low & MAKE_64BIT_MASK(12, 20)) + + ((high & MAKE_64BIT_MASK(0, 20)) << 32); +} + +static SGXEPCSectionList *sgx_calc_host_epc_sections(uint64_t *size) +{ + SGXEPCSectionList *head = NULL, **tail = &head; + SGXEPCSection *section; + uint32_t i, type; + uint32_t eax, ebx, ecx, edx; + uint32_t j = 0; + + for (i = 0; i < SGX_MAX_EPC_SECTIONS; i++) { + host_cpuid(0x12, i + 2, &eax, &ebx, &ecx, &edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) { + break; + } + + if (type != SGX_CPUID_EPC_SECTION) { + break; + } + + section = g_new0(SGXEPCSection, 1); + section->node = j++; + section->size = sgx_calc_section_metric(ecx, edx); + *size += section->size; + QAPI_LIST_APPEND(tail, section); + } + + return head; +} + +static void sgx_epc_reset(void *opaque) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + HostMemoryBackend *hostmem; + SGXEPCDevice *epc; + int failures; + int fd, i, j, r; + static bool warned = false; + + /* + * The second pass is needed to remove SECS pages that could not + * be removed during the first. + */ + for (i = 0; i < RETRY_NUM; i++) { + failures = 0; + for (j = 0; j < pcms->sgx_epc.nr_sections; j++) { + epc = pcms->sgx_epc.sections[j]; + hostmem = MEMORY_BACKEND(epc->hostmem); + fd = memory_region_get_fd(host_memory_backend_get_memory(hostmem)); + + r = ioctl(fd, SGX_IOC_VEPC_REMOVE_ALL); + if (r == -ENOTTY && !warned) { + warned = true; + warn_report("kernel does not support SGX_IOC_VEPC_REMOVE_ALL"); + warn_report("SGX might operate incorrectly in the guest after reset"); + break; + } else if (r > 0) { + /* SECS pages remain */ + failures++; + if (i == 1) { + error_report("cannot reset vEPC section %d", j); + } + } + } + if (!failures) { + break; + } + } +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + SGXInfo *info = NULL; + uint32_t eax, ebx, ecx, edx; + uint64_t size = 0; + + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg(errp, "SGX is not enabled in KVM"); + return NULL; + } + + info = g_new0(SGXInfo, 1); + host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx); + + info->sgx = ebx & (1U << 2) ? true : false; + info->flc = ecx & (1U << 30) ? true : false; + + host_cpuid(0x12, 0, &eax, &ebx, &ecx, &edx); + info->sgx1 = eax & (1U << 0) ? true : false; + info->sgx2 = eax & (1U << 1) ? true : false; + + info->sections = sgx_calc_host_epc_sections(&size); + info->section_size = size; + + close(fd); + + return info; +} + +static SGXEPCSectionList *sgx_get_epc_sections_list(void) +{ + GSList *device_list = sgx_epc_get_device_list(); + SGXEPCSectionList *head = NULL, **tail = &head; + SGXEPCSection *section; + + for (; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + Object *obj = OBJECT(dev); + + section = g_new0(SGXEPCSection, 1); + section->node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP, + &error_abort); + section->size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP, + &error_abort); + QAPI_LIST_APPEND(tail, section); + } + g_slist_free(device_list); + + return head; +} + +SGXInfo *qmp_query_sgx(Error **errp) +{ + SGXInfo *info = NULL; + X86MachineState *x86ms; + PCMachineState *pcms = + (PCMachineState *)object_dynamic_cast(qdev_get_machine(), + TYPE_PC_MACHINE); + if (!pcms) { + error_setg(errp, "SGX is only supported on PC machines"); + return NULL; + } + + x86ms = X86_MACHINE(pcms); + if (!x86ms->sgx_epc_list) { + error_setg(errp, "No EPC regions defined, SGX not available"); + return NULL; + } + + SGXEPCState *sgx_epc = &pcms->sgx_epc; + info = g_new0(SGXInfo, 1); + + info->sgx = true; + info->sgx1 = true; + info->sgx2 = true; + info->flc = true; + info->section_size = sgx_epc->size; + info->sections = sgx_get_epc_sections_list(); + + return info; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + SGXEPCSectionList *section_list, *section; + g_autoptr(SGXInfo) info = qmp_query_sgx(&err); + + if (err) { + error_report_err(err); + return; + } + monitor_printf(mon, "SGX support: %s\n", + info->sgx ? "enabled" : "disabled"); + monitor_printf(mon, "SGX1 support: %s\n", + info->sgx1 ? "enabled" : "disabled"); + monitor_printf(mon, "SGX2 support: %s\n", + info->sgx2 ? "enabled" : "disabled"); + monitor_printf(mon, "FLC support: %s\n", + info->flc ? "enabled" : "disabled"); + monitor_printf(mon, "size: %" PRIu64 "\n", + info->section_size); + + section_list = info->sections; + for (section = section_list; section; section = section->next) { + monitor_printf(mon, "NUMA node #%" PRId64 ": ", + section->value->node); + monitor_printf(mon, "size=%" PRIu64 "\n", + section->value->size); + } +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + SGXEPCDevice *epc; + + if (pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) { + return true; + } + + epc = pcms->sgx_epc.sections[section_nr]; + + *addr = epc->addr; + *size = memory_device_get_region_size(MEMORY_DEVICE(epc), &error_fatal); + + return false; +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + SGXEPCState *sgx_epc = &pcms->sgx_epc; + X86MachineState *x86ms = X86_MACHINE(pcms); + SgxEPCList *list = NULL; + Object *obj; + + memset(sgx_epc, 0, sizeof(SGXEPCState)); + if (!x86ms->sgx_epc_list) { + return; + } + + sgx_epc->base = x86ms->above_4g_mem_start + x86ms->above_4g_mem_size; + + memory_region_init(&sgx_epc->mr, OBJECT(pcms), "sgx-epc", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), sgx_epc->base, + &sgx_epc->mr); + + for (list = x86ms->sgx_epc_list; list; list = list->next) { + obj = object_new("sgx-epc"); + + /* set the memdev link with memory backend */ + object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, + &error_fatal); + /* set the numa node property for sgx epc object */ + object_property_set_uint(obj, SGX_EPC_NUMA_NODE_PROP, list->value->node, + &error_fatal); + object_property_set_bool(obj, "realized", true, &error_fatal); + object_unref(obj); + } + + if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) { + error_report("Size of all 'sgx-epc' =0x%"PRIx64" causes EPC to wrap", + sgx_epc->size); + exit(EXIT_FAILURE); + } + + memory_region_set_size(&sgx_epc->mr, sgx_epc->size); + + /* register the reset callback for sgx epc */ + qemu_register_reset(sgx_epc_reset, NULL); +} diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 5bf7e52bf5..04fd71bfc4 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -12,6 +12,8 @@ vtd_inv_desc_cc_devices(uint16_t sid, uint16_t fmask) "context invalidate device vtd_inv_desc_iotlb_global(void) "iotlb invalidate global" vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16 vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8 +vtd_inv_desc_iotlb_pasid_pages(uint16_t domain, uint64_t addr, uint8_t mask, uint32_t pasid) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8" pasid 0x%"PRIx32 +vtd_inv_desc_iotlb_pasid(uint16_t domain, uint32_t pasid) "iotlb invalidate domain 0x%"PRIx16" pasid 0x%"PRIx32 vtd_inv_desc_wait_sw(uint64_t addr, uint32_t data) "wait invalidate status write addr 0x%"PRIx64" data 0x%"PRIx32 vtd_inv_desc_wait_irq(const char *msg) "%s" vtd_inv_desc_wait_write_fail(uint64_t hi, uint64_t lo) "write fail for wait desc hi 0x%"PRIx64" lo 0x%"PRIx64 @@ -33,15 +35,15 @@ vtd_fault_disabled(void) "Fault processing disabled for context entry" vtd_replay_ce_valid(const char *mode, uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "%s: replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8 vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64 -vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIu16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" +vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIx16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" vtd_page_walk_one_skip_map(uint64_t iova, uint64_t mask, uint64_t translated) "iova 0x%"PRIx64" mask 0x%"PRIx64" translated 0x%"PRIx64 vtd_page_walk_one_skip_unmap(uint64_t iova, uint64_t mask) "iova 0x%"PRIx64" mask 0x%"PRIx64 vtd_page_walk_skip_read(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to unable to read" vtd_page_walk_skip_reserve(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to rsrv set" vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" vtd_as_unmap_whole(uint8_t bus, uint8_t slot, uint8_t fn, uint64_t iova, uint64_t size) "Device %02x:%02x.%x start 0x%"PRIx64" size 0x%"PRIx64 -vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIu16", iova 0x%"PRIx64 -vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIu16" %d" +vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIx16", iova 0x%"PRIx64 +vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIx16" %d" vtd_irq_generate(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64 vtd_reg_read(uint64_t addr, uint64_t size) "addr 0x%"PRIx64" size 0x%"PRIx64 vtd_reg_write(uint64_t addr, uint64_t size, uint64_t val) "addr 0x%"PRIx64" size 0x%"PRIx64" value 0x%"PRIx64 diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index df4798f502..a56c185f15 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -158,6 +158,7 @@ static void vmmouse_read_id(VMMouseState *s) s->queue[s->nb_queue++] = VMMOUSE_VERSION; s->status = 0; + vmmouse_update_handler(s, s->absolute); } static void vmmouse_request_relative(VMMouseState *s) @@ -285,6 +286,10 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp) DPRINTF("vmmouse_init\n"); + if (!s->i8042) { + error_setg(errp, "'i8042' link is not set"); + return; + } if (!object_resolve_path_type("", TYPE_VMPORT, NULL)) { error_setg(errp, "vmmouse needs a machine with vmport"); return; diff --git a/hw/i386/x86-iommu-stub.c b/hw/i386/x86-iommu-stub.c index c5ba077f9d..781b5ff922 100644 --- a/hw/i386/x86-iommu-stub.c +++ b/hw/i386/x86-iommu-stub.c @@ -36,8 +36,3 @@ bool x86_iommu_ir_supported(X86IOMMUState *s) { return false; } - -IommuType x86_iommu_get_type(void) -{ - abort(); -} diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 86ad03972e..01d11325a6 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -77,30 +77,17 @@ void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out) msg_out->data = msg.msi_data; } -/* Default X86 IOMMU device */ -static X86IOMMUState *x86_iommu_default = NULL; - -static void x86_iommu_set_default(X86IOMMUState *x86_iommu) -{ - assert(x86_iommu); - - if (x86_iommu_default) { - error_report("QEMU does not support multiple vIOMMUs " - "for x86 yet."); - exit(1); - } - - x86_iommu_default = x86_iommu; -} - X86IOMMUState *x86_iommu_get_default(void) { - return x86_iommu_default; -} + MachineState *ms = MACHINE(qdev_get_machine()); + PCMachineState *pcms = + PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)); -IommuType x86_iommu_get_type(void) -{ - return x86_iommu_default->type; + if (pcms && + object_dynamic_cast(OBJECT(pcms->iommu), TYPE_X86_IOMMU_DEVICE)) { + return X86_IOMMU_DEVICE(pcms->iommu); + } + return NULL; } static void x86_iommu_realize(DeviceState *dev, Error **errp) @@ -136,8 +123,6 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp) if (x86_class->realize) { x86_class->realize(dev, errp); } - - x86_iommu_set_default(X86_IOMMU_DEVICE(dev)); } static Property x86_iommu_properties[] = { diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 02dd69b9c1..018fbb8b0a 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -25,11 +25,12 @@ #include "qemu/option.h" #include "qemu/cutils.h" #include "qemu/units.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-visit-common.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" #include "sysemu/qtest.h" #include "sysemu/whpx.h" @@ -37,6 +38,7 @@ #include "sysemu/replay.h" #include "sysemu/sysemu.h" #include "sysemu/cpu-timers.h" +#include "sysemu/xen.h" #include "trace.h" #include "hw/i386/x86.h" @@ -45,6 +47,7 @@ #include "hw/i386/fw_cfg.h" #include "hw/intc/i8259.h" #include "hw/rtc/mc146818rtc.h" +#include "target/i386/sev.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/irq.h" @@ -80,24 +83,11 @@ inline void init_topo_info(X86CPUTopoInfo *topo_info, uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms, unsigned int cpu_index) { - X86MachineClass *x86mc = X86_MACHINE_GET_CLASS(x86ms); X86CPUTopoInfo topo_info; - uint32_t correct_id; - static bool warned; init_topo_info(&topo_info, x86ms); - correct_id = x86_apicid_from_cpu_idx(&topo_info, cpu_index); - if (x86mc->compat_apic_id_mode) { - if (cpu_index != correct_id && !warned && !qtest_enabled()) { - error_report("APIC IDs set in compatibility mode, " - "CPU topology won't match the configuration"); - warned = true; - } - return cpu_index; - } else { - return correct_id; - } + return x86_apicid_from_cpu_idx(&topo_info, cpu_index); } @@ -133,6 +123,25 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) */ x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms, ms->smp.max_cpus - 1) + 1; + + /* + * Can we support APIC ID 255 or higher? + * + * Under Xen: yes. + * With userspace emulated lapic: no + * With KVM's in-kernel lapic: only if X2APIC API is enabled. + */ + if (x86ms->apic_id_limit > 255 && !xen_enabled() && + (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) { + error_report("current -smp configuration requires kernel " + "irqchip and X2APIC API support."); + exit(EXIT_FAILURE); + } + + if (kvm_enabled()) { + kvm_set_max_apic_id(x86ms->apic_id_limit); + } + possible_cpus = mc->possible_cpu_arch_ids(ms); for (i = 0; i < ms->smp.cpus; i++) { x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); @@ -766,9 +775,9 @@ static bool load_elfboot(const char *kernel_filename, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool linuxboot_dma_enabled) + bool pvh_enabled) { + bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; uint16_t protocol; int setup_size, kernel_size, cmdline_size; int dtb_size, setup_data_offset; @@ -783,6 +792,7 @@ void x86_load_linux(X86MachineState *x86ms, const char *initrd_filename = machine->initrd_filename; const char *dtb_filename = machine->dtb; const char *kernel_cmdline = machine->kernel_cmdline; + SevKernelLoaderContext sev_load_ctx = {}; /* Align to 16 bytes as a paranoia measure */ cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; @@ -815,7 +825,7 @@ void x86_load_linux(X86MachineState *x86ms, * PVH), so we try multiboot first since we check the multiboot magic * header before to load it. */ - if (load_multiboot(fw_cfg, f, kernel_filename, initrd_filename, + if (load_multiboot(x86ms, fw_cfg, f, kernel_filename, initrd_filename, kernel_cmdline, kernel_size, header)) { return; } @@ -929,6 +939,8 @@ void x86_load_linux(X86MachineState *x86ms, fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + sev_load_ctx.cmdline_data = (char *)kernel_cmdline; + sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; if (protocol >= 0x202) { stl_p(header + 0x228, cmdline_addr); @@ -1010,6 +1022,8 @@ void x86_load_linux(X86MachineState *x86ms, fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size); + sev_load_ctx.initrd_data = initrd_data; + sev_load_ctx.initrd_size = initrd_size; stl_p(header + 0x218, initrd_addr); stl_p(header + 0x21c, initrd_size); @@ -1068,15 +1082,32 @@ void x86_load_linux(X86MachineState *x86ms, load_image_size(dtb_filename, setup_data->data, dtb_size); } - memcpy(setup, header, MIN(sizeof(header), setup_size)); + /* + * If we're starting an encrypted VM, it will be OVMF based, which uses the + * efi stub for booting and doesn't require any values to be placed in the + * kernel header. We therefore don't update the header so the hash of the + * kernel on the other side of the fw_cfg interface matches the hash of the + * file the user passed in. + */ + if (!sev_enabled()) { + memcpy(setup, header, MIN(sizeof(header), setup_size)); + } fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); + sev_load_ctx.setup_data = (char *)setup; + sev_load_ctx.setup_size = setup_size; + + if (sev_enabled()) { + sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal); + } option_rom[nb_option_roms].bootindex = 0; option_rom[nb_option_roms].name = "linuxboot.bin"; @@ -1093,7 +1124,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, char *filename; MemoryRegion *bios, *isa_bios; int bios_size, isa_bios_size; - int ret; + ssize_t ret; /* BIOS load */ bios_name = ms->firmware ?: default_firmware; @@ -1109,14 +1140,25 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, } bios = g_malloc(sizeof(*bios)); memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal); - if (!isapc_ram_fw) { - memory_region_set_readonly(bios, true); - } - ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); - if (ret != 0) { - bios_error: - fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); - exit(1); + if (sev_enabled()) { + /* + * The concept of a "reset" simply doesn't exist for + * confidential computing guests, we have to destroy and + * re-launch them instead. So there is no need to register + * the firmware as rom to properly re-initialize on reset. + * Just go for a straight file load instead. + */ + void *ptr = memory_region_get_ram_ptr(bios); + load_image_size(filename, ptr, bios_size); + x86_firmware_configure(ptr, bios_size); + } else { + if (!isapc_ram_fw) { + memory_region_set_readonly(bios, true); + } + ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); + if (ret != 0) { + goto bios_error; + } } g_free(filename); @@ -1137,6 +1179,11 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, memory_region_add_subregion(rom_memory, (uint32_t)(-bios_size), bios); + return; + +bios_error: + fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); + exit(1); } bool x86_machine_is_smm_enabled(const X86MachineState *x86ms) @@ -1206,6 +1253,40 @@ static void x86_machine_set_acpi(Object *obj, Visitor *v, const char *name, visit_type_OnOffAuto(v, name, &x86ms->acpi, errp); } +static void x86_machine_get_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + OnOffAuto pit = x86ms->pit; + + visit_type_OnOffAuto(v, name, &pit, errp); +} + +static void x86_machine_set_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj);; + + visit_type_OnOffAuto(v, name, &x86ms->pit, errp); +} + +static void x86_machine_get_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + OnOffAuto pic = x86ms->pic; + + visit_type_OnOffAuto(v, name, &pic, errp); +} + +static void x86_machine_set_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &x86ms->pic, errp); +} + static char *x86_machine_get_oem_id(Object *obj, Error **errp) { X86MachineState *x86ms = X86_MACHINE(obj); @@ -1268,16 +1349,40 @@ static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v, visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp); } +static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list = x86ms->sgx_epc_list; + + visit_type_SgxEPCList(v, name, &list, errp); +} + +static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list; + + list = x86ms->sgx_epc_list; + visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp); + + qapi_free_SgxEPCList(list); +} + static void x86_machine_initfn(Object *obj) { X86MachineState *x86ms = X86_MACHINE(obj); x86ms->smm = ON_OFF_AUTO_AUTO; x86ms->acpi = ON_OFF_AUTO_AUTO; + x86ms->pit = ON_OFF_AUTO_AUTO; + x86ms->pic = ON_OFF_AUTO_AUTO; x86ms->pci_irq_mask = ACPI_BUILD_PCI_IRQS; x86ms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); x86ms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); x86ms->bus_lock_ratelimit = 0; + x86ms->above_4g_mem_start = 4 * GiB; } static void x86_machine_class_init(ObjectClass *oc, void *data) @@ -1289,8 +1394,8 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = x86_cpu_index_to_props; mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; - x86mc->compat_apic_id_mode = false; x86mc->save_tsc_khz = true; + x86mc->fwcfg_dma_enabled = true; nc->nmi_monitor_handler = x86_nmi; object_class_property_add(oc, X86_MACHINE_SMM, "OnOffAuto", @@ -1305,6 +1410,20 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, X86_MACHINE_ACPI, "Enable ACPI"); + object_class_property_add(oc, X86_MACHINE_PIT, "OnOffAuto", + x86_machine_get_pit, + x86_machine_set_pit, + NULL, NULL); + object_class_property_set_description(oc, X86_MACHINE_PIT, + "Enable i8254 PIT"); + + object_class_property_add(oc, X86_MACHINE_PIC, "OnOffAuto", + x86_machine_get_pic, + x86_machine_set_pic, + NULL, NULL); + object_class_property_set_description(oc, X86_MACHINE_PIC, + "Enable i8259 PIC"); + object_class_property_add_str(oc, X86_MACHINE_OEM_ID, x86_machine_get_oem_id, x86_machine_set_oem_id); @@ -1327,6 +1446,12 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) x86_machine_set_bus_lock_ratelimit, NULL, NULL); object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "Set the ratelimit for the bus locks acquired in VMs"); + + object_class_property_add(oc, "sgx-epc", "SgxEPC", + machine_get_sgx_epc, machine_set_sgx_epc, + NULL, NULL); + object_class_property_set_description(oc, "sgx-epc", + "SGX EPC device"); } static const TypeInfo x86_machine_info = { diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 9b432773f0..e4293d6d66 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -15,7 +15,6 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_host.h" #include "hw/i386/pc.h" -#include "hw/southbridge/piix.h" #include "hw/irq.h" #include "hw/hw.h" #include "hw/i386/apic-msidef.h" @@ -149,21 +148,9 @@ void xen_piix3_set_irq(void *opaque, int irq_num, int level) irq_num & 3, level); } -void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +int xen_set_pci_link_route(uint8_t link, uint8_t irq) { - int i; - - /* Scan for updates to PCI link routes (0x60-0x63). */ - for (i = 0; i < len; i++) { - uint8_t v = (val >> (8 * i)) & 0xff; - if (v & 0x80) { - v = 0; - } - v &= 0xf; - if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { - xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v); - } - } + return xendevicemodel_set_pci_link_route(xen_dmod, xen_domid, link, irq); } int xen_is_pirq_msi(uint32_t msi_data) @@ -396,7 +383,7 @@ go_physmap: mr_name = memory_region_name(mr); - physmap = g_malloc(sizeof(XenPhysmap)); + physmap = g_new(XenPhysmap, 1); physmap->start_addr = start_addr; physmap->size = size; @@ -721,6 +708,7 @@ static void xen_log_global_stop(MemoryListener *listener) } static MemoryListener xen_memory_listener = { + .name = "xen-memory", .region_add = xen_region_add, .region_del = xen_region_del, .log_start = xen_log_start, @@ -732,6 +720,7 @@ static MemoryListener xen_memory_listener = { }; static MemoryListener xen_io_listener = { + .name = "xen-io", .region_add = xen_io_add, .region_del = xen_io_del, .priority = 10, @@ -1085,10 +1074,11 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req) } } -static int handle_buffered_iopage(XenIOState *state) +static bool handle_buffered_iopage(XenIOState *state) { buffered_iopage_t *buf_page = state->buffered_io_page; buf_ioreq_t *buf_req = NULL; + bool handled_ioreq = false; ioreq_t req; int qw; @@ -1142,9 +1132,10 @@ static int handle_buffered_iopage(XenIOState *state) assert(!req.data_is_ptr); qatomic_add(&buf_page->read_pointer, qw + 1); + handled_ioreq = true; } - return req.count; + return handled_ioreq; } static void handle_buffered_io(void *opaque) @@ -1277,7 +1268,7 @@ static void xen_read_physmap(XenIOState *state) return; for (i = 0; i < num; i++) { - physmap = g_malloc(sizeof (XenPhysmap)); + physmap = g_new(XenPhysmap, 1); physmap->phys_offset = strtoull(entries[i], NULL, 16); snprintf(path, sizeof(path), "/local/domain/0/device-model/%d/physmap/%s/start_addr", @@ -1406,7 +1397,7 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) xen_pfn_t ioreq_pfn; XenIOState *state; - state = g_malloc0(sizeof (XenIOState)); + state = g_new0(XenIOState, 1); state->xce_handle = xenevtchn_open(NULL, 0); if (state->xce_handle == NULL) { @@ -1459,7 +1450,7 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) } /* Note: cpus is empty at this point in init */ - state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); + state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus); rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); if (rc < 0) { @@ -1468,7 +1459,7 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) goto err; } - state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); + state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus); /* FIXME: how about if we overflow the page here? */ for (i = 0; i < max_cpus; i++) { @@ -1611,8 +1602,8 @@ void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) void qmp_xen_set_global_dirty_log(bool enable, Error **errp) { if (enable) { - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); } else { - memory_global_dirty_log_stop(); + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); } } diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index bd47c3d672..a2f93096e7 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -52,7 +52,7 @@ typedef struct MapCacheEntry { hwaddr paddr_index; uint8_t *vaddr_base; unsigned long *valid_mapping; - uint8_t lock; + uint32_t lock; #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0) uint8_t flags; hwaddr size; @@ -108,7 +108,7 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) unsigned long size; struct rlimit rlimit_as; - mapcache = g_malloc0(sizeof (MapCache)); + mapcache = g_new0(MapCache, 1); mapcache->phys_offset_to_gaddr = f; mapcache->opaque = opaque; @@ -164,8 +164,8 @@ static void xen_remap_bucket(MapCacheEntry *entry, trace_xen_remap_bucket(address_index); - pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); - err = g_malloc0(nb_pfn * sizeof (int)); + pfns = g_new0(xen_pfn_t, nb_pfn); + err = g_new0(int, nb_pfn); if (entry->vaddr_base != NULL) { if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { @@ -231,8 +231,8 @@ static void xen_remap_bucket(MapCacheEntry *entry, entry->vaddr_base = vaddr_base; entry->paddr_index = address_index; entry->size = size; - entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * - BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); + entry->valid_mapping = g_new0(unsigned long, + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); if (dummy) { entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; @@ -319,7 +319,7 @@ tryagain: pentry = free_pentry; } if (!entry) { - entry = g_malloc0(sizeof (MapCacheEntry)); + entry = g_new0(MapCacheEntry, 1); pentry->next = entry; xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); } else if (!entry->lock) { @@ -353,8 +353,14 @@ tryagain: mapcache->last_entry = entry; if (lock) { - MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); + MapCacheRev *reventry = g_new0(MapCacheRev, 1); entry->lock++; + if (entry->lock == 0) { + fprintf(stderr, + "mapcache entry lock overflow: "TARGET_FMT_plx" -> %p\n", + entry->paddr_index, entry->vaddr_base); + abort(); + } reventry->dma = dma; reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; reventry->paddr_index = mapcache->last_entry->paddr_index; diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 72028449ba..a64265cca0 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/ide.h" +#include "hw/ide/pci.h" #include "hw/pci/pci.h" #include "hw/xen/xen_common.h" #include "migration/vmstate.h" @@ -134,6 +135,51 @@ static void pci_unplug_nics(PCIBus *bus) pci_for_each_device(bus, 0, unplug_nic, NULL); } +/* + * The Xen HVM unplug protocol [1] specifies a mechanism to allow guests to + * request unplug of 'aux' disks (which is stated to mean all IDE disks, + * except the primary master). + * + * NOTE: The semantics of what happens if unplug of all disks and 'aux' disks + * is simultaneously requested is not clear. The implementation assumes + * that an 'all' request overrides an 'aux' request. + * + * [1] https://xenbits.xen.org/gitweb/?p=xen.git;a=blob;f=docs/misc/hvm-emulated-unplug.pandoc + */ +static void pci_xen_ide_unplug(DeviceState *dev, bool aux) +{ + PCIIDEState *pci_ide; + int i; + IDEDevice *idedev; + IDEBus *idebus; + BlockBackend *blk; + + pci_ide = PCI_IDE(dev); + + for (i = aux ? 1 : 0; i < 4; i++) { + idebus = &pci_ide->bus[i / 2]; + blk = idebus->ifs[i % 2].blk; + + if (blk && idebus->ifs[i % 2].drive_kind != IDE_CD) { + if (!(i % 2)) { + idedev = idebus->master; + } else { + idedev = idebus->slave; + } + + blk_drain(blk); + blk_flush(blk); + + blk_detach_dev(blk, DEVICE(idedev)); + idebus->ifs[i % 2].blk = NULL; + idedev->conf.blk = NULL; + monitor_remove_blk(blk); + blk_unref(blk); + } + } + qdev_reset_all(dev); +} + static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) { uint32_t flags = *(uint32_t *)opaque; @@ -147,7 +193,7 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) { case PCI_CLASS_STORAGE_IDE: - pci_piix3_xen_ide_unplug(DEVICE(d), aux); + pci_xen_ide_unplug(DEVICE(d), aux); break; case PCI_CLASS_STORAGE_SCSI: diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index f2c5157483..7ce001cacd 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -249,7 +249,8 @@ static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); } - *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); + *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (len < wanted && *ptr) { dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); *ptr = NULL; @@ -939,7 +940,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, /* map PRDT */ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, - DMA_DIRECTION_TO_DEVICE))){ + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED))){ trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no); return -1; } @@ -1157,7 +1159,7 @@ static void process_ncq_command(AHCIState *s, int port, const uint8_t *cmd_fis, ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0); if (ncq_tfs->sglist.size < size) { - error_report("ahci: PRDT length for NCQ command (0x%zx) " + error_report("ahci: PRDT length for NCQ command (0x" DMA_ADDR_FMT ") " "is smaller than the requested size (0x%zx)", ncq_tfs->sglist.size, size); ncq_err(ncq_tfs); @@ -1301,7 +1303,7 @@ static int handle_cmd(AHCIState *s, int port, uint8_t slot) tbl_addr = le64_to_cpu(cmd->tbl_addr); cmd_len = 0x80; cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); if (!cmd_fis) { trace_handle_cmd_badfis(s, port); return -1; @@ -1379,10 +1381,12 @@ static void ahci_pio_transfer(const IDEDMA *dma) has_sglist ? "" : "o"); if (has_sglist && size) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + if (is_write) { - dma_buf_write(s->data_ptr, size, &s->sg); + dma_buf_write(s->data_ptr, size, NULL, &s->sg, attrs); } else { - dma_buf_read(s->data_ptr, size, &s->sg); + dma_buf_read(s->data_ptr, size, NULL, &s->sg, attrs); } } @@ -1475,9 +1479,9 @@ static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write) } if (is_write) { - dma_buf_read(p, l, &s->sg); + dma_buf_read(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED); } else { - dma_buf_write(p, l, &s->sg); + dma_buf_write(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED); } /* free sglist, update byte count */ @@ -1548,7 +1552,7 @@ void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) for (i = 0; i < s->ports; i++) { AHCIDevice *ad = &s->dev[i]; - ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); + ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1); ide_init2(&ad->port, irqs[i]); ad->hba = s; diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index b626199e3d..0a9aa6f009 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -98,11 +98,11 @@ cd_read_sector_sync(IDEState *s) switch (s->cd_sector_size) { case 2048: ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS, - s->io_buffer, ATAPI_SECTOR_SIZE); + ATAPI_SECTOR_SIZE, s->io_buffer, 0); break; case 2352: ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS, - s->io_buffer + 16, ATAPI_SECTOR_SIZE); + ATAPI_SECTOR_SIZE, s->io_buffer + 16, 0); if (ret >= 0) { cd_data_to_raw(s->io_buffer, s->lba); } @@ -318,7 +318,7 @@ static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size) } } -/* start a CD-CDROM read command */ +/* start a CD-ROM read command */ static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors, int sector_size) { @@ -417,7 +417,7 @@ eot: ide_set_inactive(s, false); } -/* start a CD-CDROM read command with DMA */ +/* start a CD-ROM read command with DMA */ /* XXX: test if DMA is available */ static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors, int sector_size) diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index c254631485..94c576262c 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -293,7 +293,7 @@ static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, cmd646_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); diff --git a/hw/ide/core.c b/hw/ide/core.c index eb7cacd164..be6a270a71 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -29,6 +29,8 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/timer.h" +#include "qemu/hw-version.h" +#include "qemu/memalign.h" #include "sysemu/sysemu.h" #include "sysemu/blockdev.h" #include "sysemu/dma.h" @@ -99,8 +101,12 @@ static void put_le16(uint16_t *p, unsigned int v) static void ide_identify_size(IDEState *s) { uint16_t *p = (uint16_t *)s->identify_data; - put_le16(p + 60, s->nb_sectors); - put_le16(p + 61, s->nb_sectors >> 16); + int64_t nb_sectors_lba28 = s->nb_sectors; + if (nb_sectors_lba28 >= 1 << 28) { + nb_sectors_lba28 = (1 << 28) - 1; + } + put_le16(p + 60, nb_sectors_lba28); + put_le16(p + 61, nb_sectors_lba28 >> 16); put_le16(p + 100, s->nb_sectors); put_le16(p + 101, s->nb_sectors >> 16); put_le16(p + 102, s->nb_sectors >> 32); @@ -437,12 +443,16 @@ static const AIOCBInfo trim_aiocb_info = { static void ide_trim_bh_cb(void *opaque) { TrimAIOCB *iocb = opaque; + BlockBackend *blk = iocb->s->blk; iocb->common.cb(iocb->common.opaque, iocb->ret); qemu_bh_delete(iocb->bh); iocb->bh = NULL; qemu_aio_unref(iocb); + + /* Paired with an increment in ide_issue_trim() */ + blk_dec_in_flight(blk); } static void ide_issue_trim_cb(void *opaque, int ret) @@ -512,6 +522,9 @@ BlockAIOCB *ide_issue_trim( IDEState *s = opaque; TrimAIOCB *iocb; + /* Paired with a decrement in ide_trim_bh_cb() */ + blk_inc_in_flight(s->blk); + iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); iocb->s = s; iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); @@ -1335,6 +1348,11 @@ static void ide_reset(IDEState *s) s->pio_aiocb = NULL; } + if (s->reset_reverts) { + s->reset_reverts = false; + s->heads = s->drive_heads; + s->sectors = s->drive_sectors; + } if (s->drive_kind == IDE_CFATA) s->mult_sectors = 0; else @@ -1613,6 +1631,20 @@ static bool cmd_check_power_mode(IDEState *s, uint8_t cmd) return true; } +/* INITIALIZE DEVICE PARAMETERS */ +static bool cmd_specify(IDEState *s, uint8_t cmd) +{ + if (s->blk && s->drive_kind != IDE_CD) { + s->heads = (s->select & (ATA_DEV_HS)) + 1; + s->sectors = s->nsector; + ide_set_irq(s->bus); + } else { + ide_abort_command(s); + } + + return true; +} + static bool cmd_set_features(IDEState *s, uint8_t cmd) { uint16_t *identify_data; @@ -1636,7 +1668,11 @@ static bool cmd_set_features(IDEState *s, uint8_t cmd) ide_flush_cache(s); return false; case 0xcc: /* reverting to power-on defaults enable */ + s->reset_reverts = true; + return true; case 0x66: /* reverting to power-on defaults disable */ + s->reset_reverts = false; + return true; case 0xaa: /* read look-ahead enable */ case 0x55: /* read look-ahead disable */ case 0x05: /* set advanced power management mode */ @@ -1699,8 +1735,14 @@ static bool cmd_identify_packet(IDEState *s, uint8_t cmd) return false; } +/* EXECUTE DEVICE DIAGNOSTIC */ static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd) { + /* + * Clear the device register per the ATA (v6) specification, + * because ide_set_signature does not clear LBA or drive bits. + */ + s->select = (ATA_DEV_ALWAYS_ON); ide_set_signature(s); if (s->drive_kind == IDE_CD) { @@ -2049,7 +2091,7 @@ static const struct { [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC }, [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK }, [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK }, - [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC }, + [WIN_SPECIFY] = { cmd_specify, HD_CFA_OK | SET_DSC }, [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK }, [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK }, [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK }, @@ -2171,7 +2213,11 @@ uint32_t ide_ioport_read(void *opaque, uint32_t addr) hob = bus->cmd & (IDE_CTRL_HOB); switch (reg_num) { case ATA_IOPORT_RR_DATA: - ret = 0xff; + /* + * The pre-GRUB Solaris x86 bootloader relies upon inb + * consuming a word from the drive's sector buffer. + */ + ret = ide_data_readw(bus, addr) & 0xff; break; case ATA_IOPORT_RR_ERROR: if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || @@ -2548,8 +2594,8 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, blk_get_geometry(blk, &nb_sectors); s->cylinders = cylinders; - s->heads = heads; - s->sectors = secs; + s->heads = s->drive_heads = heads; + s->sectors = s->drive_sectors = secs; s->chs_trans = chs_trans; s->nb_sectors = nb_sectors; s->wwn = wwn; @@ -2561,7 +2607,6 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, s->smart_selftest_count = 0; if (kind == IDE_CD) { blk_set_dev_ops(blk, &ide_cd_block_ops, s); - blk_set_guest_block_size(blk, 2048); } else { if (!blk_is_inserted(s->blk)) { error_setg(errp, "Device needs media, but drive is empty"); diff --git a/hw/ide/isa.c b/hw/ide/isa.c index 6bc19de226..8bedbd13f1 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -73,9 +73,9 @@ static void isa_ide_realizefn(DeviceState *dev, Error **errp) ISADevice *isadev = ISA_DEVICE(dev); ISAIDEState *s = ISA_IDE(dev); - ide_bus_new(&s->bus, sizeof(s->bus), dev, 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2); ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2); - isa_init_irq(isadev, &s->irq, s->isairq); + s->irq = isa_get_irq(isadev, s->isairq); ide_init2(&s->bus, s->irq); vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_isa, s); ide_register_restart_cb(&s->bus); diff --git a/hw/ide/macio.c b/hw/ide/macio.c index b270a10163..e604466acb 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "hw/ppc/mac.h" #include "hw/ppc/mac_dbdma.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -97,7 +96,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) /* Non-block ATAPI transfer - just copy to RAM */ s->io_buffer_size = MIN(s->io_buffer_size, io->len); dma_memory_write(&address_space_memory, io->addr, s->io_buffer, - s->io_buffer_size); + s->io_buffer_size, MEMTXATTRS_UNSPECIFIED); io->len = 0; ide_atapi_cmd_ok(s); m->dma_active = false; @@ -267,7 +266,9 @@ static uint64_t pmac_ide_read(void *opaque, hwaddr addr, unsigned size) switch (reg) { case 0x0: - if (size == 2) { + if (size == 1) { + retval = ide_data_readw(&d->bus, 0) & 0xFF; + } else if (size == 2) { retval = ide_data_readw(&d->bus, 0); } else if (size == 4) { retval = ide_data_readl(&d->bus, 0); @@ -449,7 +450,7 @@ static void macio_ide_initfn(Object *obj) SysBusDevice *d = SYS_BUS_DEVICE(obj); MACIOIDEState *s = MACIO_IDE(obj); - ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000); sysbus_init_mmio(d, &s->mem); sysbus_init_irq(d, &s->real_ide_irq); diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 58a14fea36..56c5be3655 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -175,7 +175,7 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) case 0x00: /* Configuration Option Register */ s->opt = value & 0xcf; if (value & OPT_SRESET) { - device_legacy_reset(DEVICE(s)); + device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; @@ -318,7 +318,7 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) case 0xe: /* Device Control */ s->ctrl = value; if (value & CTRL_SRST) { - device_legacy_reset(DEVICE(s)); + device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; @@ -543,7 +543,7 @@ static int dscm1xxxx_attach(PCMCIACardState *card) md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8); md->io_base = 0x0; - device_legacy_reset(DEVICE(md)); + device_cold_reset(DEVICE(md)); md_interrupt_update(md); return 0; @@ -553,7 +553,7 @@ static int dscm1xxxx_detach(PCMCIACardState *card) { MicroDriveState *md = MICRODRIVE(card); - device_legacy_reset(DEVICE(md)); + device_cold_reset(DEVICE(md)); return 0; } @@ -605,7 +605,7 @@ static void microdrive_init(Object *obj) { MicroDriveState *md = MICRODRIVE(obj); - ide_bus_new(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); + ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); } static void microdrive_class_init(ObjectClass *oc, void *data) diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index 36e2f4790a..fb2ebd4847 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -142,7 +142,7 @@ static void mmio_ide_initfn(Object *obj) SysBusDevice *d = SYS_BUS_DEVICE(obj); MMIOState *s = MMIO_IDE(obj); - ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); sysbus_init_irq(d, &s->irq); } diff --git a/hw/ide/piix.c b/hw/ide/piix.c index e372e0fa84..83f5d5b0c7 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -21,6 +21,10 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. + * + * References: + * [1] 82371FB (PIIX) AND 82371SB (PIIX3) PCI ISA IDE XCELERATOR, + * 290550-002, Intel Corporation, April 1997. */ #include "qemu/osdep.h" @@ -32,6 +36,7 @@ #include "sysemu/blockdev.h" #include "sysemu/dma.h" +#include "hw/ide/piix.h" #include "hw/ide/pci.h" #include "trace.h" @@ -114,14 +119,11 @@ static void piix_ide_reset(DeviceState *dev) ide_bus_reset(&d->bus[i]); } - /* TODO: this is the default. do not override. */ - pci_conf[PCI_COMMAND] = 0x00; - /* TODO: this is the default. do not override. */ - pci_conf[PCI_COMMAND + 1] = 0x00; - /* TODO: use pci_set_word */ - pci_conf[PCI_STATUS] = PCI_STATUS_FAST_BACK; - pci_conf[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_MEDIUM >> 8; - pci_conf[0x20] = 0x01; /* BMIBA: 20-23h */ + /* PCI command register default value (0000h) per [1, p.48]. */ + pci_set_word(pci_conf + PCI_COMMAND, 0x0000); + pci_set_word(pci_conf + PCI_STATUS, + PCI_STATUS_DEVSEL_MEDIUM | PCI_STATUS_FAST_BACK); + pci_set_byte(pci_conf + 0x20, 0x01); /* BMIBA: 20-23h */ } static int pci_piix_init_ports(PCIIDEState *d) @@ -137,7 +139,7 @@ static int pci_piix_init_ports(PCIIDEState *d) int i, ret; for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, port_info[i].iobase2); if (ret) { @@ -173,41 +175,6 @@ static void pci_piix_ide_realize(PCIDevice *dev, Error **errp) } } -int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux) -{ - PCIIDEState *pci_ide; - int i; - IDEDevice *idedev; - IDEBus *idebus; - BlockBackend *blk; - - pci_ide = PCI_IDE(dev); - - for (i = aux ? 1 : 0; i < 4; i++) { - idebus = &pci_ide->bus[i / 2]; - blk = idebus->ifs[i % 2].blk; - - if (blk && idebus->ifs[i % 2].drive_kind != IDE_CD) { - if (!(i % 2)) { - idedev = idebus->master; - } else { - idedev = idebus->slave; - } - - blk_drain(blk); - blk_flush(blk); - - blk_detach_dev(blk, DEVICE(idedev)); - idebus->ifs[i % 2].blk = NULL; - idedev->conf.blk = NULL; - monitor_remove_blk(blk); - blk_unref(blk); - } - } - qdev_reset_all(dev); - return 0; -} - static void pci_piix_ide_exitfn(PCIDevice *dev) { PCIIDEState *d = PCI_IDE(dev); @@ -242,13 +209,7 @@ static void piix3_ide_class_init(ObjectClass *klass, void *data) } static const TypeInfo piix3_ide_info = { - .name = "piix3-ide", - .parent = TYPE_PCI_IDE, - .class_init = piix3_ide_class_init, -}; - -static const TypeInfo piix3_ide_xen_info = { - .name = "piix3-ide-xen", + .name = TYPE_PIIX3_IDE, .parent = TYPE_PCI_IDE, .class_init = piix3_ide_class_init, }; @@ -270,7 +231,7 @@ static void piix4_ide_class_init(ObjectClass *klass, void *data) } static const TypeInfo piix4_ide_info = { - .name = "piix4-ide", + .name = TYPE_PIIX4_IDE, .parent = TYPE_PCI_IDE, .class_init = piix4_ide_class_init, }; @@ -278,7 +239,6 @@ static const TypeInfo piix4_ide_info = { static void piix_ide_register_types(void) { type_register_static(&piix3_ide_info); - type_register_static(&piix3_ide_xen_info); type_register_static(&piix4_ide_info); } diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index 13e5e0e093..0dc0d83638 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -68,10 +68,10 @@ static const TypeInfo ide_bus_info = { .class_init = ide_bus_class_init, }; -void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, int bus_id, int max_units) { - qbus_create_inplace(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); + qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); idebus->bus_id = bus_id; idebus->max_units = max_units; } diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c index 34c347b9c2..46204f10d7 100644 --- a/hw/ide/sii3112.c +++ b/hw/ide/sii3112.c @@ -283,7 +283,7 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, sii3112_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); + ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&s->bus[i], &s->bmdma[i], s); diff --git a/hw/ide/via.c b/hw/ide/via.c index be09912b33..e1a429405d 100644 --- a/hw/ide/via.c +++ b/hw/ide/via.c @@ -29,7 +29,7 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "sysemu/dma.h" - +#include "hw/isa/vt82c686.h" #include "hw/ide/pci.h" #include "trace.h" @@ -112,7 +112,7 @@ static void via_ide_set_irq(void *opaque, int n, int level) d->config[0x70 + n * 8] &= ~0x80; } - qemu_set_irq(isa_get_irq(NULL, 14 + n), level); + via_isa_set_irq(pci_get_function_0(d), 14 + n, level); } static void via_ide_reset(DeviceState *dev) @@ -190,7 +190,7 @@ static void via_ide_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, via_ide_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); @@ -217,6 +217,9 @@ static void via_ide_class_init(ObjectClass *klass, void *data) dc->reset = via_ide_reset; dc->vmsd = &vmstate_ide_pci; + /* Reason: only works as function of VIA southbridge */ + dc->user_creatable = false; + k->realize = via_ide_realize; k->exit = via_ide_exitfn; k->vendor_id = PCI_VENDOR_ID_VIA; @@ -227,7 +230,7 @@ static void via_ide_class_init(ObjectClass *klass, void *data) } static const TypeInfo via_ide_info = { - .name = "via-ide", + .name = TYPE_VIA_IDE, .parent = TYPE_PCI_IDE, .class_init = via_ide_class_init, }; diff --git a/hw/input/hid.c b/hw/input/hid.c index 8aab0521f4..e7ecebdf8f 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -114,6 +114,8 @@ static void hid_pointer_event(DeviceState *dev, QemuConsole *src, [INPUT_BUTTON_LEFT] = 0x01, [INPUT_BUTTON_RIGHT] = 0x02, [INPUT_BUTTON_MIDDLE] = 0x04, + [INPUT_BUTTON_SIDE] = 0x08, + [INPUT_BUTTON_EXTRA] = 0x10, }; HIDState *hs = (HIDState *)dev; HIDPointerEvent *e; diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index e7faf24058..ea7c07a2ba 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "hw/qdev-properties.h" +#include "hw/sysbus.h" #include "hw/input/ps2.h" #include "hw/input/lasips2.h" #include "exec/hwaddr.h" @@ -31,37 +32,31 @@ #include "exec/address-spaces.h" #include "migration/vmstate.h" #include "hw/irq.h" +#include "qapi/error.h" -struct LASIPS2State; -typedef struct LASIPS2Port { - struct LASIPS2State *parent; - MemoryRegion reg; - void *dev; - uint8_t id; - uint8_t control; - uint8_t buf; - bool loopback_rbne; - bool irq; -} LASIPS2Port; - -typedef struct LASIPS2State { - LASIPS2Port kbd; - LASIPS2Port mouse; - qemu_irq irq; -} LASIPS2State; +static const VMStateDescription vmstate_lasips2_port = { + .name = "lasips2-port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(control, LASIPS2Port), + VMSTATE_UINT8(buf, LASIPS2Port), + VMSTATE_BOOL(loopback_rbne, LASIPS2Port), + VMSTATE_END_OF_LIST() + } +}; static const VMStateDescription vmstate_lasips2 = { .name = "lasips2", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINT8(kbd.control, LASIPS2State), - VMSTATE_UINT8(kbd.id, LASIPS2State), - VMSTATE_BOOL(kbd.irq, LASIPS2State), - VMSTATE_UINT8(mouse.control, LASIPS2State), - VMSTATE_UINT8(mouse.id, LASIPS2State), - VMSTATE_BOOL(mouse.irq, LASIPS2State), + VMSTATE_UINT8(int_status, LASIPS2State), + VMSTATE_STRUCT(kbd_port.parent_obj, LASIPS2State, 1, + vmstate_lasips2_port, LASIPS2Port), + VMSTATE_STRUCT(mouse_port.parent_obj, LASIPS2State, 1, + vmstate_lasips2_port, LASIPS2Port), VMSTATE_END_OF_LIST() } }; @@ -96,7 +91,7 @@ typedef enum { LASIPS2_STATUS_CLKSHD = 0x80, } lasips2_status_reg_t; -static const char *artist_read_reg_name(uint64_t addr) +static const char *lasips2_read_reg_name(uint64_t addr) { switch (addr & 0xc) { case REG_PS2_ID: @@ -116,7 +111,7 @@ static const char *artist_read_reg_name(uint64_t addr) } } -static const char *artist_write_reg_name(uint64_t addr) +static const char *lasips2_write_reg_name(uint64_t addr) { switch (addr & 0x0c) { case REG_PS2_RESET: @@ -135,36 +130,50 @@ static const char *artist_write_reg_name(uint64_t addr) static void lasips2_update_irq(LASIPS2State *s) { - trace_lasips2_intr(s->kbd.irq | s->mouse.irq); - qemu_set_irq(s->irq, s->kbd.irq | s->mouse.irq); + int level = s->int_status ? 1 : 0; + + trace_lasips2_intr(level); + qemu_set_irq(s->irq, level); +} + +static void lasips2_set_irq(void *opaque, int n, int level) +{ + LASIPS2State *s = LASIPS2(opaque); + + if (level) { + s->int_status |= BIT(n); + } else { + s->int_status &= ~BIT(n); + } + + lasips2_update_irq(s); } static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - LASIPS2Port *port = opaque; + LASIPS2Port *lp = LASIPS2_PORT(opaque); - trace_lasips2_reg_write(size, port->id, addr, - artist_write_reg_name(addr), val); + trace_lasips2_reg_write(size, lp->id, addr, + lasips2_write_reg_name(addr), val); switch (addr & 0xc) { case REG_PS2_CONTROL: - port->control = val; + lp->control = val; break; case REG_PS2_XMTDATA: - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - port->buf = val; - port->irq = true; - port->loopback_rbne = true; - lasips2_update_irq(port->parent); + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + lp->buf = val; + lp->loopback_rbne = true; + qemu_set_irq(lp->irq, 1); break; } - if (port->id) { - ps2_write_mouse(port->dev, val); + if (lp->id) { + ps2_write_mouse(PS2_MOUSE_DEVICE(lp->ps2dev), val); } else { - ps2_write_keyboard(port->dev, val); + ps2_write_keyboard(PS2_KBD_DEVICE(lp->ps2dev), val); } break; @@ -180,55 +189,53 @@ static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) { - LASIPS2Port *port = opaque; + LASIPS2Port *lp = LASIPS2_PORT(opaque); uint64_t ret = 0; switch (addr & 0xc) { case REG_PS2_ID: - ret = port->id; + ret = lp->id; break; case REG_PS2_RCVDATA: - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - port->irq = false; - port->loopback_rbne = false; - lasips2_update_irq(port->parent); - ret = port->buf; + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + lp->loopback_rbne = false; + qemu_set_irq(lp->irq, 0); + ret = lp->buf; break; } - ret = ps2_read_data(port->dev); + ret = ps2_read_data(lp->ps2dev); break; case REG_PS2_CONTROL: - ret = port->control; + ret = lp->control; break; case REG_PS2_STATUS: - ret = LASIPS2_STATUS_DATSHD | LASIPS2_STATUS_CLKSHD; - if (port->control & LASIPS2_CONTROL_DIAG) { - if (!(port->control & LASIPS2_CONTROL_DATDIR)) { + if (lp->control & LASIPS2_CONTROL_DIAG) { + if (!(lp->control & LASIPS2_CONTROL_DATDIR)) { ret &= ~LASIPS2_STATUS_DATSHD; } - if (!(port->control & LASIPS2_CONTROL_CLKDIR)) { + if (!(lp->control & LASIPS2_CONTROL_CLKDIR)) { ret &= ~LASIPS2_STATUS_CLKSHD; } } - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - if (port->loopback_rbne) { + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + if (lp->loopback_rbne) { ret |= LASIPS2_STATUS_RBNE; } } else { - if (!ps2_queue_empty(port->dev)) { + if (!ps2_queue_empty(lp->ps2dev)) { ret |= LASIPS2_STATUS_RBNE; } } - if (port->parent->kbd.irq || port->parent->mouse.irq) { + if (lp->lasips2->int_status) { ret |= LASIPS2_STATUS_CMPINTR; } break; @@ -238,9 +245,9 @@ static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) __func__, addr); break; } - trace_lasips2_reg_read(size, port->id, addr, - artist_read_reg_name(addr), ret); + trace_lasips2_reg_read(size, lp->id, addr, + lasips2_read_reg_name(addr), ret); return ret; } @@ -251,38 +258,208 @@ static const MemoryRegionOps lasips2_reg_ops = { .min_access_size = 1, .max_access_size = 4, }, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; -static void ps2dev_update_irq(void *opaque, int level) +static void lasips2_realize(DeviceState *dev, Error **errp) { - LASIPS2Port *port = opaque; - port->irq = level; - lasips2_update_irq(port->parent); + LASIPS2State *s = LASIPS2(dev); + LASIPS2Port *lp; + + lp = LASIPS2_PORT(&s->kbd_port); + if (!(qdev_realize(DEVICE(lp), NULL, errp))) { + return; + } + + qdev_connect_gpio_out(DEVICE(lp), 0, + qdev_get_gpio_in_named(dev, "lasips2-port-input-irq", + lp->id)); + + lp = LASIPS2_PORT(&s->mouse_port); + if (!(qdev_realize(DEVICE(lp), NULL, errp))) { + return; + } + + qdev_connect_gpio_out(DEVICE(lp), 0, + qdev_get_gpio_in_named(dev, "lasips2-port-input-irq", + lp->id)); } -void lasips2_init(MemoryRegion *address_space, - hwaddr base, qemu_irq irq) +static void lasips2_init(Object *obj) { - LASIPS2State *s; + LASIPS2State *s = LASIPS2(obj); + LASIPS2Port *lp; - s = g_malloc0(sizeof(LASIPS2State)); + object_initialize_child(obj, "lasips2-kbd-port", &s->kbd_port, + TYPE_LASIPS2_KBD_PORT); + object_initialize_child(obj, "lasips2-mouse-port", &s->mouse_port, + TYPE_LASIPS2_MOUSE_PORT); - s->irq = irq; - s->mouse.id = 1; - s->kbd.parent = s; - s->mouse.parent = s; + lp = LASIPS2_PORT(&s->kbd_port); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &lp->reg); + lp = LASIPS2_PORT(&s->mouse_port); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &lp->reg); - vmstate_register(NULL, base, &vmstate_lasips2, s); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); - s->kbd.dev = ps2_kbd_init(ps2dev_update_irq, &s->kbd); - s->mouse.dev = ps2_mouse_init(ps2dev_update_irq, &s->mouse); - - memory_region_init_io(&s->kbd.reg, NULL, &lasips2_reg_ops, &s->kbd, - "lasips2-kbd", 0x100); - memory_region_add_subregion(address_space, base, &s->kbd.reg); - - memory_region_init_io(&s->mouse.reg, NULL, &lasips2_reg_ops, &s->mouse, - "lasips2-mouse", 0x100); - memory_region_add_subregion(address_space, base + 0x100, &s->mouse.reg); + qdev_init_gpio_in_named(DEVICE(obj), lasips2_set_irq, + "lasips2-port-input-irq", 2); } + +static void lasips2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = lasips2_realize; + dc->vmsd = &vmstate_lasips2; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo lasips2_info = { + .name = TYPE_LASIPS2, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = lasips2_init, + .instance_size = sizeof(LASIPS2State), + .class_init = lasips2_class_init, +}; + +static void lasips2_port_set_irq(void *opaque, int n, int level) +{ + LASIPS2Port *s = LASIPS2_PORT(opaque); + + qemu_set_irq(s->irq, level); +} + +static void lasips2_port_realize(DeviceState *dev, Error **errp) +{ + LASIPS2Port *s = LASIPS2_PORT(dev); + + qdev_connect_gpio_out(DEVICE(s->ps2dev), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-input-irq", 0)); +} + +static void lasips2_port_init(Object *obj) +{ + LASIPS2Port *s = LASIPS2_PORT(obj); + + qdev_init_gpio_out(DEVICE(obj), &s->irq, 1); + qdev_init_gpio_in_named(DEVICE(obj), lasips2_port_set_irq, + "ps2-input-irq", 1); +} + +static void lasips2_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = lasips2_port_realize; +} + +static const TypeInfo lasips2_port_info = { + .name = TYPE_LASIPS2_PORT, + .parent = TYPE_DEVICE, + .instance_init = lasips2_port_init, + .instance_size = sizeof(LASIPS2Port), + .class_init = lasips2_port_class_init, + .class_size = sizeof(LASIPS2PortDeviceClass), + .abstract = true, +}; + +static void lasips2_kbd_port_realize(DeviceState *dev, Error **errp) +{ + LASIPS2KbdPort *s = LASIPS2_KBD_PORT(dev); + LASIPS2Port *lp = LASIPS2_PORT(dev); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_GET_CLASS(lp); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->kbd), errp)) { + return; + } + + lp->ps2dev = PS2_DEVICE(&s->kbd); + lpdc->parent_realize(dev, errp); +} + +static void lasips2_kbd_port_init(Object *obj) +{ + LASIPS2KbdPort *s = LASIPS2_KBD_PORT(obj); + LASIPS2Port *lp = LASIPS2_PORT(obj); + + memory_region_init_io(&lp->reg, obj, &lasips2_reg_ops, lp, "lasips2-kbd", + 0x100); + + object_initialize_child(obj, "kbd", &s->kbd, TYPE_PS2_KBD_DEVICE); + + lp->id = 0; + lp->lasips2 = container_of(s, LASIPS2State, kbd_port); +} + +static void lasips2_kbd_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); + + device_class_set_parent_realize(dc, lasips2_kbd_port_realize, + &lpdc->parent_realize); +} + +static const TypeInfo lasips2_kbd_port_info = { + .name = TYPE_LASIPS2_KBD_PORT, + .parent = TYPE_LASIPS2_PORT, + .instance_size = sizeof(LASIPS2KbdPort), + .instance_init = lasips2_kbd_port_init, + .class_init = lasips2_kbd_port_class_init, +}; + +static void lasips2_mouse_port_realize(DeviceState *dev, Error **errp) +{ + LASIPS2MousePort *s = LASIPS2_MOUSE_PORT(dev); + LASIPS2Port *lp = LASIPS2_PORT(dev); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_GET_CLASS(lp); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mouse), errp)) { + return; + } + + lp->ps2dev = PS2_DEVICE(&s->mouse); + lpdc->parent_realize(dev, errp); +} + +static void lasips2_mouse_port_init(Object *obj) +{ + LASIPS2MousePort *s = LASIPS2_MOUSE_PORT(obj); + LASIPS2Port *lp = LASIPS2_PORT(obj); + + memory_region_init_io(&lp->reg, obj, &lasips2_reg_ops, lp, "lasips2-mouse", + 0x100); + + object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE); + + lp->id = 1; + lp->lasips2 = container_of(s, LASIPS2State, mouse_port); +} + +static void lasips2_mouse_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); + + device_class_set_parent_realize(dc, lasips2_mouse_port_realize, + &lpdc->parent_realize); +} + +static const TypeInfo lasips2_mouse_port_info = { + .name = TYPE_LASIPS2_MOUSE_PORT, + .parent = TYPE_LASIPS2_PORT, + .instance_size = sizeof(LASIPS2MousePort), + .instance_init = lasips2_mouse_port_init, + .class_init = lasips2_mouse_port_class_init, +}; + +static void lasips2_register_types(void) +{ + type_register_static(&lasips2_info); + type_register_static(&lasips2_port_info); + type_register_static(&lasips2_kbd_port_info); + type_register_static(&lasips2_mouse_port_info); +} + +type_init(lasips2_register_types) diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index baba62f357..b92b63bedc 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -26,9 +26,10 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/timer.h" +#include "qapi/error.h" #include "hw/isa/isa.h" #include "migration/vmstate.h" -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/input/ps2.h" #include "hw/irq.h" #include "hw/input/i8042.h" @@ -38,49 +39,86 @@ #include "trace.h" -/* Keyboard Controller Commands */ -#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */ -#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */ -#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */ -#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */ -#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */ -#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */ -#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */ -#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */ -#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */ -#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */ -#define KBD_CCMD_READ_INPORT 0xC0 /* read input port */ -#define KBD_CCMD_READ_OUTPORT 0xD0 /* read output port */ -#define KBD_CCMD_WRITE_OUTPORT 0xD1 /* write output port */ -#define KBD_CCMD_WRITE_OBUF 0xD2 -#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if - initiated by the auxiliary device */ -#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */ -#define KBD_CCMD_DISABLE_A20 0xDD /* HP vectra only ? */ -#define KBD_CCMD_ENABLE_A20 0xDF /* HP vectra only ? */ -#define KBD_CCMD_PULSE_BITS_3_0 0xF0 /* Pulse bits 3-0 of the output port P2. */ -#define KBD_CCMD_RESET 0xFE /* Pulse bit 0 of the output port P2 = CPU reset. */ -#define KBD_CCMD_NO_OP 0xFF /* Pulse no bits of the output port P2. */ +/* Keyboard Controller Commands */ + +/* Read mode bits */ +#define KBD_CCMD_READ_MODE 0x20 +/* Write mode bits */ +#define KBD_CCMD_WRITE_MODE 0x60 +/* Get controller version */ +#define KBD_CCMD_GET_VERSION 0xA1 +/* Disable mouse interface */ +#define KBD_CCMD_MOUSE_DISABLE 0xA7 +/* Enable mouse interface */ +#define KBD_CCMD_MOUSE_ENABLE 0xA8 +/* Mouse interface test */ +#define KBD_CCMD_TEST_MOUSE 0xA9 +/* Controller self test */ +#define KBD_CCMD_SELF_TEST 0xAA +/* Keyboard interface test */ +#define KBD_CCMD_KBD_TEST 0xAB +/* Keyboard interface disable */ +#define KBD_CCMD_KBD_DISABLE 0xAD +/* Keyboard interface enable */ +#define KBD_CCMD_KBD_ENABLE 0xAE +/* read input port */ +#define KBD_CCMD_READ_INPORT 0xC0 +/* read output port */ +#define KBD_CCMD_READ_OUTPORT 0xD0 +/* write output port */ +#define KBD_CCMD_WRITE_OUTPORT 0xD1 +#define KBD_CCMD_WRITE_OBUF 0xD2 +/* Write to output buffer as if initiated by the auxiliary device */ +#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 +/* Write the following byte to the mouse */ +#define KBD_CCMD_WRITE_MOUSE 0xD4 +/* HP vectra only ? */ +#define KBD_CCMD_DISABLE_A20 0xDD +/* HP vectra only ? */ +#define KBD_CCMD_ENABLE_A20 0xDF +/* Pulse bits 3-0 of the output port P2. */ +#define KBD_CCMD_PULSE_BITS_3_0 0xF0 +/* Pulse bit 0 of the output port P2 = CPU reset. */ +#define KBD_CCMD_RESET 0xFE +/* Pulse no bits of the output port P2. */ +#define KBD_CCMD_NO_OP 0xFF /* Status Register Bits */ -#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ -#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */ -#define KBD_STAT_SELFTEST 0x04 /* Self test successful */ -#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */ -#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */ -#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ -#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */ -#define KBD_STAT_PERR 0x80 /* Parity error */ + +/* Keyboard output buffer full */ +#define KBD_STAT_OBF 0x01 +/* Keyboard input buffer full */ +#define KBD_STAT_IBF 0x02 +/* Self test successful */ +#define KBD_STAT_SELFTEST 0x04 +/* Last write was a command write (0=data) */ +#define KBD_STAT_CMD 0x08 +/* Zero if keyboard locked */ +#define KBD_STAT_UNLOCKED 0x10 +/* Mouse output buffer full */ +#define KBD_STAT_MOUSE_OBF 0x20 +/* General receive/xmit timeout */ +#define KBD_STAT_GTO 0x40 +/* Parity error */ +#define KBD_STAT_PERR 0x80 /* Controller Mode Register Bits */ -#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */ -#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */ -#define KBD_MODE_SYS 0x04 /* The system flag (?) */ -#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */ -#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */ -#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */ -#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */ -#define KBD_MODE_RFU 0x80 + +/* Keyboard data generate IRQ1 */ +#define KBD_MODE_KBD_INT 0x01 +/* Mouse data generate IRQ12 */ +#define KBD_MODE_MOUSE_INT 0x02 +/* The system flag (?) */ +#define KBD_MODE_SYS 0x04 +/* The keylock doesn't affect the keyboard if set */ +#define KBD_MODE_NO_KEYLOCK 0x08 +/* Disable keyboard interface */ +#define KBD_MODE_DISABLE_KBD 0x10 +/* Disable mouse interface */ +#define KBD_MODE_DISABLE_MOUSE 0x20 +/* Scan code conversion to PC format */ +#define KBD_MODE_KCC 0x40 +#define KBD_MODE_RFU 0x80 /* Output Port Bits */ #define KBD_OUT_RESET 0x01 /* 1=normal mode, 0=reset */ @@ -88,7 +126,8 @@ #define KBD_OUT_OBF 0x10 /* Keyboard output buffer full */ #define KBD_OUT_MOUSE_OBF 0x20 /* Mouse output buffer full */ -/* OSes typically write 0xdd/0xdf to turn the A20 line off and on. +/* + * OSes typically write 0xdd/0xdf to turn the A20 line off and on. * We make the default value of the outport include these four bits, * so that the subsection is rarely necessary. */ @@ -107,33 +146,11 @@ #define KBD_OBSRC_MOUSE 0x02 #define KBD_OBSRC_CTRL 0x04 -typedef struct KBDState { - uint8_t write_cmd; /* if non zero, write data to port 60 is expected */ - uint8_t status; - uint8_t mode; - uint8_t outport; - uint32_t migration_flags; - uint32_t obsrc; - bool outport_present; - bool extended_state; - bool extended_state_loaded; - /* Bitmask of devices with data available. */ - uint8_t pending; - uint8_t obdata; - uint8_t cbdata; - uint8_t pending_tmp; - void *kbd; - void *mouse; - QEMUTimer *throttle_timer; - qemu_irq irq_kbd; - qemu_irq irq_mouse; - qemu_irq a20_out; - hwaddr mask; -} KBDState; - -/* XXX: not generating the irqs if KBD_MODE_DISABLE_KBD is set may be - incorrect, but it avoids having to simulate exact delays */ +/* + * XXX: not generating the irqs if KBD_MODE_DISABLE_KBD is set may be + * incorrect, but it avoids having to simulate exact delays + */ static void kbd_update_irq_lines(KBDState *s) { int irq_kbd_level, irq_mouse_level; @@ -153,8 +170,8 @@ static void kbd_update_irq_lines(KBDState *s) } } } - qemu_set_irq(s->irq_kbd, irq_kbd_level); - qemu_set_irq(s->irq_mouse, irq_mouse_level); + qemu_set_irq(s->irqs[I8042_KBD_IRQ], irq_kbd_level); + qemu_set_irq(s->irqs[I8042_MOUSE_IRQ], irq_mouse_level); } static void kbd_deassert_irq(KBDState *s) @@ -269,7 +286,7 @@ static void kbd_queue(KBDState *s, int b, int aux) s->pending |= aux ? KBD_PENDING_CTRL_AUX : KBD_PENDING_CTRL_KBD; kbd_safe_update_irq(s); } else { - ps2_queue(aux ? s->mouse : s->kbd, b); + ps2_queue(aux ? PS2_DEVICE(&s->ps2mouse) : PS2_DEVICE(&s->ps2kbd), b); } } @@ -301,21 +318,23 @@ static void kbd_write_command(void *opaque, hwaddr addr, trace_pckbd_kbd_write_command(val); - /* Bits 3-0 of the output port P2 of the keyboard controller may be pulsed + /* + * Bits 3-0 of the output port P2 of the keyboard controller may be pulsed * low for approximately 6 micro seconds. Bits 3-0 of the KBD_CCMD_PULSE * command specify the output port bits to be pulsed. * 0: Bit should be pulsed. 1: Bit should not be modified. * The only useful version of this command is pulsing bit 0, * which does a CPU reset. */ - if((val & KBD_CCMD_PULSE_BITS_3_0) == KBD_CCMD_PULSE_BITS_3_0) { - if(!(val & 1)) + if ((val & KBD_CCMD_PULSE_BITS_3_0) == KBD_CCMD_PULSE_BITS_3_0) { + if (!(val & 1)) { val = KBD_CCMD_RESET; - else + } else { val = KBD_CCMD_NO_OP; + } } - switch(val) { + switch (val) { case KBD_CCMD_READ_MODE: kbd_queue(s, s->mode, 0); break; @@ -389,9 +408,9 @@ static uint64_t kbd_read_data(void *opaque, hwaddr addr, timer_mod(s->throttle_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 1000); } - s->obdata = ps2_read_data(s->kbd); + s->obdata = ps2_read_data(PS2_DEVICE(&s->ps2kbd)); } else if (s->obsrc & KBD_OBSRC_MOUSE) { - s->obdata = ps2_read_data(s->mouse); + s->obdata = ps2_read_data(PS2_DEVICE(&s->ps2mouse)); } else if (s->obsrc & KBD_OBSRC_CTRL) { s->obdata = kbd_dequeue(s); } @@ -408,16 +427,17 @@ static void kbd_write_data(void *opaque, hwaddr addr, trace_pckbd_kbd_write_data(val); - switch(s->write_cmd) { + switch (s->write_cmd) { case 0: - ps2_write_keyboard(s->kbd, val); + ps2_write_keyboard(&s->ps2kbd, val); /* sending data to the keyboard reenables PS/2 communication */ s->mode &= ~KBD_MODE_DISABLE_KBD; kbd_safe_update_irq(s); break; case KBD_CCMD_WRITE_MODE: s->mode = val; - ps2_keyboard_set_translation(s->kbd, (s->mode & KBD_MODE_KCC) != 0); + ps2_keyboard_set_translation(&s->ps2kbd, + (s->mode & KBD_MODE_KCC) != 0); /* * a write to the mode byte interrupt enable flags directly updates * the irq lines @@ -439,7 +459,7 @@ static void kbd_write_data(void *opaque, hwaddr addr, outport_write(s, val); break; case KBD_CCMD_WRITE_MOUSE: - ps2_write_mouse(s->mouse, val); + ps2_write_mouse(&s->ps2mouse, val); /* sending data to the mouse reenables PS/2 communication */ s->mode &= ~KBD_MODE_DISABLE_MOUSE; kbd_safe_update_irq(s); @@ -606,7 +626,7 @@ static const VMStateDescription vmstate_kbd = { VMSTATE_UINT8(pending_tmp, KBDState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * []) { &vmstate_kbd_outport, &vmstate_kbd_extended_state, NULL @@ -618,10 +638,11 @@ static uint64_t kbd_mm_readfn(void *opaque, hwaddr addr, unsigned size) { KBDState *s = opaque; - if (addr & s->mask) + if (addr & s->mask) { return kbd_read_status(s, 0, 1) & 0xff; - else + } else { return kbd_read_data(s, 0, 1) & 0xff; + } } static void kbd_mm_writefn(void *opaque, hwaddr addr, @@ -629,10 +650,11 @@ static void kbd_mm_writefn(void *opaque, hwaddr addr, { KBDState *s = opaque; - if (addr & s->mask) + if (addr & s->mask) { kbd_write_command(s, 0, value & 0xff, 1); - else + } else { kbd_write_data(s, 0, value & 0xff, 1); + } } @@ -644,40 +666,115 @@ static const MemoryRegionOps i8042_mmio_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq, - MemoryRegion *region, ram_addr_t size, - hwaddr mask) +static void i8042_mmio_set_kbd_irq(void *opaque, int n, int level) { - KBDState *s = g_malloc0(sizeof(KBDState)); + MMIOKBDState *s = I8042_MMIO(opaque); + KBDState *ks = &s->kbd; - s->irq_kbd = kbd_irq; - s->irq_mouse = mouse_irq; - s->mask = mask; - - s->extended_state = true; - - vmstate_register(NULL, 0, &vmstate_kbd, s); - - memory_region_init_io(region, NULL, &i8042_mmio_ops, s, "i8042", size); - - s->kbd = ps2_kbd_init(kbd_update_kbd_irq, s); - s->mouse = ps2_mouse_init(kbd_update_aux_irq, s); - qemu_register_reset(kbd_reset, s); + kbd_update_kbd_irq(ks, level); } -struct ISAKBDState { - ISADevice parent_obj; +static void i8042_mmio_set_mouse_irq(void *opaque, int n, int level) +{ + MMIOKBDState *s = I8042_MMIO(opaque); + KBDState *ks = &s->kbd; - KBDState kbd; - bool kbd_throttle; - MemoryRegion io[2]; + kbd_update_aux_irq(ks, level); +} + +static void i8042_mmio_reset(DeviceState *dev) +{ + MMIOKBDState *s = I8042_MMIO(dev); + KBDState *ks = &s->kbd; + + kbd_reset(ks); +} + +static void i8042_mmio_realize(DeviceState *dev, Error **errp) +{ + MMIOKBDState *s = I8042_MMIO(dev); + KBDState *ks = &s->kbd; + + memory_region_init_io(&s->region, OBJECT(dev), &i8042_mmio_ops, ks, + "i8042", s->size); + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->region); + + if (!sysbus_realize(SYS_BUS_DEVICE(&ks->ps2kbd), errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&ks->ps2mouse), errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&ks->ps2kbd), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-kbd-input-irq", + 0)); + + qdev_connect_gpio_out(DEVICE(&ks->ps2mouse), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-mouse-input-irq", + 0)); +} + +static void i8042_mmio_init(Object *obj) +{ + MMIOKBDState *s = I8042_MMIO(obj); + KBDState *ks = &s->kbd; + + ks->extended_state = true; + + object_initialize_child(obj, "ps2kbd", &ks->ps2kbd, TYPE_PS2_KBD_DEVICE); + object_initialize_child(obj, "ps2mouse", &ks->ps2mouse, + TYPE_PS2_MOUSE_DEVICE); + + qdev_init_gpio_out(DEVICE(obj), ks->irqs, 2); + qdev_init_gpio_in_named(DEVICE(obj), i8042_mmio_set_kbd_irq, + "ps2-kbd-input-irq", 1); + qdev_init_gpio_in_named(DEVICE(obj), i8042_mmio_set_mouse_irq, + "ps2-mouse-input-irq", 1); +} + +static Property i8042_mmio_properties[] = { + DEFINE_PROP_UINT64("mask", MMIOKBDState, kbd.mask, UINT64_MAX), + DEFINE_PROP_UINT32("size", MMIOKBDState, size, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_kbd_mmio = { + .name = "pckbd-mmio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(kbd, MMIOKBDState, 0, vmstate_kbd, KBDState), + VMSTATE_END_OF_LIST() + } +}; + +static void i8042_mmio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = i8042_mmio_realize; + dc->reset = i8042_mmio_reset; + dc->vmsd = &vmstate_kbd_mmio; + device_class_set_props(dc, i8042_mmio_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo i8042_mmio_info = { + .name = TYPE_I8042_MMIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = i8042_mmio_init, + .instance_size = sizeof(MMIOKBDState), + .class_init = i8042_mmio_class_init }; void i8042_isa_mouse_fake_event(ISAKBDState *isa) { KBDState *s = &isa->kbd; - ps2_mouse_fake_event(s->mouse); + ps2_mouse_fake_event(&s->ps2mouse); } void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out) @@ -715,6 +812,31 @@ static const MemoryRegionOps i8042_cmd_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static void i8042_set_kbd_irq(void *opaque, int n, int level) +{ + ISAKBDState *s = I8042(opaque); + KBDState *ks = &s->kbd; + + kbd_update_kbd_irq(ks, level); +} + +static void i8042_set_mouse_irq(void *opaque, int n, int level) +{ + ISAKBDState *s = I8042(opaque); + KBDState *ks = &s->kbd; + + kbd_update_aux_irq(ks, level); +} + + +static void i8042_reset(DeviceState *dev) +{ + ISAKBDState *s = I8042(dev); + KBDState *ks = &s->kbd; + + kbd_reset(ks); +} + static void i8042_initfn(Object *obj) { ISAKBDState *isa_s = I8042(obj); @@ -725,7 +847,17 @@ static void i8042_initfn(Object *obj) memory_region_init_io(isa_s->io + 1, obj, &i8042_cmd_ops, s, "i8042-cmd", 1); + object_initialize_child(obj, "ps2kbd", &s->ps2kbd, TYPE_PS2_KBD_DEVICE); + object_initialize_child(obj, "ps2mouse", &s->ps2mouse, + TYPE_PS2_MOUSE_DEVICE); + qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, I8042_A20_LINE, 1); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, 2); + qdev_init_gpio_in_named(DEVICE(obj), i8042_set_kbd_irq, + "ps2-kbd-input-irq", 1); + qdev_init_gpio_in_named(DEVICE(obj), i8042_set_mouse_irq, + "ps2-mouse-input-irq", 1); } static void i8042_realizefn(DeviceState *dev, Error **errp) @@ -734,14 +866,40 @@ static void i8042_realizefn(DeviceState *dev, Error **errp) ISAKBDState *isa_s = I8042(dev); KBDState *s = &isa_s->kbd; - isa_init_irq(isadev, &s->irq_kbd, 1); - isa_init_irq(isadev, &s->irq_mouse, 12); + if (isa_s->kbd_irq >= ISA_NUM_IRQS) { + error_setg(errp, "Maximum value for \"kbd-irq\" is: %u", + ISA_NUM_IRQS - 1); + return; + } + + if (isa_s->mouse_irq >= ISA_NUM_IRQS) { + error_setg(errp, "Maximum value for \"mouse-irq\" is: %u", + ISA_NUM_IRQS - 1); + return; + } + + isa_connect_gpio_out(isadev, I8042_KBD_IRQ, isa_s->kbd_irq); + isa_connect_gpio_out(isadev, I8042_MOUSE_IRQ, isa_s->mouse_irq); isa_register_ioport(isadev, isa_s->io + 0, 0x60); isa_register_ioport(isadev, isa_s->io + 1, 0x64); - s->kbd = ps2_kbd_init(kbd_update_kbd_irq, s); - s->mouse = ps2_mouse_init(kbd_update_aux_irq, s); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ps2kbd), errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->ps2kbd), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-kbd-input-irq", + 0)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ps2mouse), errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->ps2mouse), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-mouse-input-irq", + 0)); + if (isa_s->kbd_throttle && !isa_s->kbd.extended_state) { warn_report(TYPE_I8042 ": can't enable kbd-throttle without" " extended-state, disabling kbd-throttle"); @@ -749,11 +907,11 @@ static void i8042_realizefn(DeviceState *dev, Error **errp) s->throttle_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, kbd_throttle_timeout, s); } - qemu_register_reset(kbd_reset, s); } -static void i8042_build_aml(ISADevice *isadev, Aml *scope) +static void i8042_build_aml(AcpiDevAmlIf *adev, Aml *scope) { + ISAKBDState *isa_s = I8042(adev); Aml *kbd; Aml *mou; Aml *crs; @@ -761,7 +919,7 @@ static void i8042_build_aml(ISADevice *isadev, Aml *scope) crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, 0x0060, 0x0060, 0x01, 0x01)); aml_append(crs, aml_io(AML_DECODE16, 0x0064, 0x0064, 0x01, 0x01)); - aml_append(crs, aml_irq_no_flags(1)); + aml_append(crs, aml_irq_no_flags(isa_s->kbd_irq)); kbd = aml_device("KBD"); aml_append(kbd, aml_name_decl("_HID", aml_eisaid("PNP0303"))); @@ -769,7 +927,7 @@ static void i8042_build_aml(ISADevice *isadev, Aml *scope) aml_append(kbd, aml_name_decl("_CRS", crs)); crs = aml_resource_template(); - aml_append(crs, aml_irq_no_flags(12)); + aml_append(crs, aml_irq_no_flags(isa_s->mouse_irq)); mou = aml_device("MOU"); aml_append(mou, aml_name_decl("_HID", aml_eisaid("PNP0F13"))); @@ -783,18 +941,21 @@ static void i8042_build_aml(ISADevice *isadev, Aml *scope) static Property i8042_properties[] = { DEFINE_PROP_BOOL("extended-state", ISAKBDState, kbd.extended_state, true), DEFINE_PROP_BOOL("kbd-throttle", ISAKBDState, kbd_throttle, false), + DEFINE_PROP_UINT8("kbd-irq", ISAKBDState, kbd_irq, 1), + DEFINE_PROP_UINT8("mouse-irq", ISAKBDState, mouse_irq, 12), DEFINE_PROP_END_OF_LIST(), }; static void i8042_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); device_class_set_props(dc, i8042_properties); + dc->reset = i8042_reset; dc->realize = i8042_realizefn; dc->vmsd = &vmstate_kbd_isa; - isa->build_aml = i8042_build_aml; + adevc->build_dev_aml = i8042_build_aml; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } @@ -804,11 +965,16 @@ static const TypeInfo i8042_info = { .instance_size = sizeof(ISAKBDState), .instance_init = i8042_initfn, .class_init = i8042_class_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void i8042_register_types(void) { type_register_static(&i8042_info); + type_register_static(&i8042_mmio_info); } type_init(i8042_register_types) diff --git a/hw/input/pl050.c b/hw/input/pl050.c index d279b6c148..ec5e19285e 100644 --- a/hw/input/pl050.c +++ b/hw/input/pl050.c @@ -7,30 +7,24 @@ * This code is licensed under the GPL. */ +/* + * QEMU interface: + * + sysbus MMIO region 0: MemoryRegion defining the PL050 registers + * + Named GPIO input "ps2-input-irq": set to 1 if the downstream PS2 device + * has asserted its irq + * + sysbus IRQ 0: PL050 output irq + */ + #include "qemu/osdep.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/input/ps2.h" +#include "hw/input/pl050.h" #include "hw/irq.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" -#define TYPE_PL050 "pl050" -OBJECT_DECLARE_SIMPLE_TYPE(PL050State, PL050) - -struct PL050State { - SysBusDevice parent_obj; - - MemoryRegion iomem; - void *dev; - uint32_t cr; - uint32_t clk; - uint32_t last; - int pending; - qemu_irq irq; - bool is_mouse; -}; static const VMStateDescription vmstate_pl050 = { .name = "pl050", @@ -53,26 +47,34 @@ static const VMStateDescription vmstate_pl050 = { #define PL050_KMIC (1 << 1) #define PL050_KMID (1 << 0) -static const unsigned char pl050_id[] = -{ 0x50, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; +static const unsigned char pl050_id[] = { + 0x50, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; -static void pl050_update(void *opaque, int level) +static void pl050_update_irq(PL050State *s) +{ + int level = (s->pending && (s->cr & 0x10) != 0) + || (s->cr & 0x08) != 0; + + qemu_set_irq(s->irq, level); +} + +static void pl050_set_irq(void *opaque, int n, int level) { PL050State *s = (PL050State *)opaque; - int raise; s->pending = level; - raise = (s->pending && (s->cr & 0x10) != 0) - || (s->cr & 0x08) != 0; - qemu_set_irq(s->irq, raise); + pl050_update_irq(s); } static uint64_t pl050_read(void *opaque, hwaddr offset, unsigned size) { PL050State *s = (PL050State *)opaque; - if (offset >= 0xfe0 && offset < 0x1000) + + if (offset >= 0xfe0 && offset < 0x1000) { return pl050_id[(offset - 0xfe0) >> 2]; + } switch (offset >> 2) { case 0: /* KMICR */ @@ -88,16 +90,19 @@ static uint64_t pl050_read(void *opaque, hwaddr offset, val = (val ^ (val >> 1)) & 1; stat = PL050_TXEMPTY; - if (val) + if (val) { stat |= PL050_RXPARITY; - if (s->pending) + } + if (s->pending) { stat |= PL050_RXFULL; + } return stat; } case 2: /* KMIDATA */ - if (s->pending) - s->last = ps2_read_data(s->dev); + if (s->pending) { + s->last = ps2_read_data(s->ps2dev); + } return s->last; case 3: /* KMICLKDIV */ return s->clk; @@ -114,19 +119,20 @@ static void pl050_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PL050State *s = (PL050State *)opaque; + switch (offset >> 2) { case 0: /* KMICR */ s->cr = value; - pl050_update(s, s->pending); + pl050_update_irq(s); /* ??? Need to implement the enable/disable bit. */ break; case 2: /* KMIDATA */ /* ??? This should toggle the TX interrupt line. */ /* ??? This means kbd/mouse can block each other. */ if (s->is_mouse) { - ps2_write_mouse(s->dev, value); + ps2_write_mouse(PS2_MOUSE_DEVICE(s->ps2dev), value); } else { - ps2_write_keyboard(s->dev, value); + ps2_write_keyboard(PS2_KBD_DEVICE(s->ps2dev), value); } break; case 3: /* KMICLKDIV */ @@ -146,44 +152,103 @@ static const MemoryRegionOps pl050_ops = { static void pl050_realize(DeviceState *dev, Error **errp) { PL050State *s = PL050(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - memory_region_init_io(&s->iomem, OBJECT(s), &pl050_ops, s, "pl050", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); - if (s->is_mouse) { - s->dev = ps2_mouse_init(pl050_update, s); - } else { - s->dev = ps2_kbd_init(pl050_update, s); - } + qdev_connect_gpio_out(DEVICE(s->ps2dev), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-input-irq", 0)); } -static void pl050_keyboard_init(Object *obj) +static void pl050_kbd_realize(DeviceState *dev, Error **errp) { - PL050State *s = PL050(obj); + PL050DeviceClass *pdc = PL050_GET_CLASS(dev); + PL050KbdState *s = PL050_KBD_DEVICE(dev); + PL050State *ps = PL050(dev); - s->is_mouse = false; + if (!sysbus_realize(SYS_BUS_DEVICE(&s->kbd), errp)) { + return; + } + + ps->ps2dev = PS2_DEVICE(&s->kbd); + pdc->parent_realize(dev, errp); +} + +static void pl050_kbd_init(Object *obj) +{ + PL050KbdState *s = PL050_KBD_DEVICE(obj); + PL050State *ps = PL050(obj); + + ps->is_mouse = false; + object_initialize_child(obj, "kbd", &s->kbd, TYPE_PS2_KBD_DEVICE); +} + +static void pl050_mouse_realize(DeviceState *dev, Error **errp) +{ + PL050DeviceClass *pdc = PL050_GET_CLASS(dev); + PL050MouseState *s = PL050_MOUSE_DEVICE(dev); + PL050State *ps = PL050(dev); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mouse), errp)) { + return; + } + + ps->ps2dev = PS2_DEVICE(&s->mouse); + pdc->parent_realize(dev, errp); } static void pl050_mouse_init(Object *obj) { - PL050State *s = PL050(obj); + PL050MouseState *s = PL050_MOUSE_DEVICE(obj); + PL050State *ps = PL050(obj); - s->is_mouse = true; + ps->is_mouse = true; + object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE); +} + +static void pl050_kbd_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PL050DeviceClass *pdc = PL050_CLASS(oc); + + device_class_set_parent_realize(dc, pl050_kbd_realize, + &pdc->parent_realize); } static const TypeInfo pl050_kbd_info = { - .name = "pl050_keyboard", + .name = TYPE_PL050_KBD_DEVICE, .parent = TYPE_PL050, - .instance_init = pl050_keyboard_init, + .instance_init = pl050_kbd_init, + .instance_size = sizeof(PL050KbdState), + .class_init = pl050_kbd_class_init, }; +static void pl050_mouse_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PL050DeviceClass *pdc = PL050_CLASS(oc); + + device_class_set_parent_realize(dc, pl050_mouse_realize, + &pdc->parent_realize); +} + static const TypeInfo pl050_mouse_info = { - .name = "pl050_mouse", + .name = TYPE_PL050_MOUSE_DEVICE, .parent = TYPE_PL050, .instance_init = pl050_mouse_init, + .instance_size = sizeof(PL050MouseState), + .class_init = pl050_mouse_class_init, }; +static void pl050_init(Object *obj) +{ + PL050State *s = PL050(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pl050_ops, s, "pl050", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + + qdev_init_gpio_in_named(DEVICE(obj), pl050_set_irq, "ps2-input-irq", 1); +} + static void pl050_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -195,7 +260,10 @@ static void pl050_class_init(ObjectClass *oc, void *data) static const TypeInfo pl050_type_info = { .name = TYPE_PL050, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = pl050_init, .instance_size = sizeof(PL050State), + .class_init = pl050_class_init, + .class_size = sizeof(PL050DeviceClass), .abstract = true, .class_init = pl050_class_init, }; diff --git a/hw/input/ps2.c b/hw/input/ps2.c index 8dd482c1f6..05cf7111e3 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -24,57 +24,61 @@ #include "qemu/osdep.h" #include "qemu/log.h" +#include "hw/irq.h" +#include "hw/sysbus.h" #include "hw/input/ps2.h" #include "migration/vmstate.h" #include "ui/console.h" #include "ui/input.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" +#include "qapi/error.h" #include "trace.h" /* Keyboard Commands */ -#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ -#define KBD_CMD_ECHO 0xEE -#define KBD_CMD_SCANCODE 0xF0 /* Get/set scancode set */ -#define KBD_CMD_GET_ID 0xF2 /* get keyboard ID */ -#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */ -#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */ -#define KBD_CMD_RESET_DISABLE 0xF5 /* reset and disable scanning */ -#define KBD_CMD_RESET_ENABLE 0xF6 /* reset and enable scanning */ -#define KBD_CMD_RESET 0xFF /* Reset */ +#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ +#define KBD_CMD_ECHO 0xEE +#define KBD_CMD_SCANCODE 0xF0 /* Get/set scancode set */ +#define KBD_CMD_GET_ID 0xF2 /* get keyboard ID */ +#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */ +#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */ +#define KBD_CMD_RESET_DISABLE 0xF5 /* reset and disable scanning */ +#define KBD_CMD_RESET_ENABLE 0xF6 /* reset and enable scanning */ +#define KBD_CMD_RESET 0xFF /* Reset */ #define KBD_CMD_SET_MAKE_BREAK 0xFC /* Set Make and Break mode */ #define KBD_CMD_SET_TYPEMATIC 0xFA /* Set Typematic Make and Break mode */ /* Keyboard Replies */ -#define KBD_REPLY_POR 0xAA /* Power on reset */ -#define KBD_REPLY_ID 0xAB /* Keyboard ID */ -#define KBD_REPLY_ACK 0xFA /* Command ACK */ -#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */ +#define KBD_REPLY_POR 0xAA /* Power on reset */ +#define KBD_REPLY_ID 0xAB /* Keyboard ID */ +#define KBD_REPLY_ACK 0xFA /* Command ACK */ +#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */ /* Mouse Commands */ -#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */ -#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */ -#define AUX_SET_RES 0xE8 /* Set resolution */ -#define AUX_GET_SCALE 0xE9 /* Get scaling factor */ -#define AUX_SET_STREAM 0xEA /* Set stream mode */ -#define AUX_POLL 0xEB /* Poll */ -#define AUX_RESET_WRAP 0xEC /* Reset wrap mode */ -#define AUX_SET_WRAP 0xEE /* Set wrap mode */ -#define AUX_SET_REMOTE 0xF0 /* Set remote mode */ -#define AUX_GET_TYPE 0xF2 /* Get type */ -#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */ -#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */ -#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */ -#define AUX_SET_DEFAULT 0xF6 -#define AUX_RESET 0xFF /* Reset aux device */ -#define AUX_ACK 0xFA /* Command byte ACK. */ +#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */ +#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */ +#define AUX_SET_RES 0xE8 /* Set resolution */ +#define AUX_GET_SCALE 0xE9 /* Get scaling factor */ +#define AUX_SET_STREAM 0xEA /* Set stream mode */ +#define AUX_POLL 0xEB /* Poll */ +#define AUX_RESET_WRAP 0xEC /* Reset wrap mode */ +#define AUX_SET_WRAP 0xEE /* Set wrap mode */ +#define AUX_SET_REMOTE 0xF0 /* Set remote mode */ +#define AUX_GET_TYPE 0xF2 /* Get type */ +#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */ +#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */ +#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */ +#define AUX_SET_DEFAULT 0xF6 +#define AUX_RESET 0xFF /* Reset aux device */ +#define AUX_ACK 0xFA /* Command byte ACK. */ #define MOUSE_STATUS_REMOTE 0x40 #define MOUSE_STATUS_ENABLED 0x20 #define MOUSE_STATUS_SCALE21 0x10 -#define PS2_QUEUE_SIZE 16 /* Buffer size required by PS/2 protocol */ +#define PS2_QUEUE_SIZE 16 /* Queue size required by PS/2 protocol */ +#define PS2_QUEUE_HEADROOM 8 /* Queue size for keyboard command replies */ /* Bits for 'modifiers' field in PS2KbdState */ #define MOD_CTRL_L (1 << 0) @@ -84,44 +88,6 @@ #define MOD_SHIFT_R (1 << 4) #define MOD_ALT_R (1 << 5) -typedef struct { - /* Keep the data array 256 bytes long, which compatibility - with older qemu versions. */ - uint8_t data[256]; - int rptr, wptr, count; -} PS2Queue; - -struct PS2State { - PS2Queue queue; - int32_t write_cmd; - void (*update_irq)(void *, int); - void *update_arg; -}; - -typedef struct { - PS2State common; - int scan_enabled; - int translate; - int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */ - int ledstate; - bool need_high_bit; - unsigned int modifiers; /* bitmask of MOD_* constants above */ -} PS2KbdState; - -typedef struct { - PS2State common; - uint8_t mouse_status; - uint8_t mouse_resolution; - uint8_t mouse_sample_rate; - uint8_t mouse_wrap; - uint8_t mouse_type; /* 0 = PS2, 3 = IMPS/2, 4 = IMEX */ - uint8_t mouse_detect_state; - int mouse_dx; /* current values, needed for 'poll' mode */ - int mouse_dy; - int mouse_dz; - uint8_t mouse_buttons; -} PS2MouseState; - static uint8_t translate_table[256] = { 0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58, 0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59, @@ -183,6 +149,7 @@ static void ps2_reset_queue(PS2State *s) q->rptr = 0; q->wptr = 0; + q->cwptr = -1; q->count = 0; } @@ -195,19 +162,25 @@ void ps2_queue_noirq(PS2State *s, int b) { PS2Queue *q = &s->queue; - if (q->count == PS2_QUEUE_SIZE) { + if (q->count >= PS2_QUEUE_SIZE) { return; } q->data[q->wptr] = b; - if (++q->wptr == PS2_QUEUE_SIZE) + if (++q->wptr == PS2_BUFFER_SIZE) { q->wptr = 0; + } q->count++; } -void ps2_raise_irq(PS2State *s) +static void ps2_raise_irq(PS2State *s) { - s->update_irq(s->update_arg, 1); + qemu_set_irq(s->irq, 1); +} + +static void ps2_lower_irq(PS2State *s) +{ + qemu_set_irq(s->irq, 0); } void ps2_queue(PS2State *s, int b) @@ -256,10 +229,68 @@ void ps2_queue_4(PS2State *s, int b1, int b2, int b3, int b4) ps2_raise_irq(s); } +static void ps2_cqueue_data(PS2Queue *q, int b) +{ + q->data[q->cwptr] = b; + if (++q->cwptr >= PS2_BUFFER_SIZE) { + q->cwptr = 0; + } + q->count++; +} + +static void ps2_cqueue_1(PS2State *s, int b1) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 1) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_raise_irq(s); +} + +static void ps2_cqueue_2(PS2State *s, int b1, int b2) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 2) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_cqueue_data(q, b2); + ps2_raise_irq(s); +} + +static void ps2_cqueue_3(PS2State *s, int b1, int b2, int b3) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 3) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_cqueue_data(q, b2); + ps2_cqueue_data(q, b3); + ps2_raise_irq(s); +} + +static void ps2_cqueue_reset(PS2State *s) +{ + PS2Queue *q = &s->queue; + int ccount; + + if (q->cwptr == -1) { + return; + } + + ccount = (q->cwptr - q->rptr) & (PS2_BUFFER_SIZE - 1); + q->count -= ccount; + q->rptr = q->cwptr; + q->cwptr = -1; +} + /* keycode is the untranslated scancode in the current scancode set. */ static void ps2_put_keycode(void *opaque, int keycode) { PS2KbdState *s = opaque; + PS2State *ps = PS2_DEVICE(s); trace_ps2_put_keycode(opaque, keycode); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); @@ -268,13 +299,13 @@ static void ps2_put_keycode(void *opaque, int keycode) if (keycode == 0xf0) { s->need_high_bit = true; } else if (s->need_high_bit) { - ps2_queue(&s->common, translate_table[keycode] | 0x80); + ps2_queue(ps, translate_table[keycode] | 0x80); s->need_high_bit = false; } else { - ps2_queue(&s->common, translate_table[keycode]); + ps2_queue(ps, translate_table[keycode]); } } else { - ps2_queue(&s->common, keycode); + ps2_queue(ps, keycode); } } @@ -372,8 +403,9 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, } } } else { - if (qcode < qemu_input_map_qcode_to_atset1_len) + if (qcode < qemu_input_map_qcode_to_atset1_len) { keycode = qemu_input_map_qcode_to_atset1[qcode]; + } if (keycode) { if (keycode & 0xff00) { ps2_put_keycode(s, keycode >> 8); @@ -466,8 +498,9 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, } } } else { - if (qcode < qemu_input_map_qcode_to_atset2_len) + if (qcode < qemu_input_map_qcode_to_atset2_len) { keycode = qemu_input_map_qcode_to_atset2[qcode]; + } if (keycode) { if (keycode & 0xff00) { ps2_put_keycode(s, keycode >> 8); @@ -482,8 +515,9 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, } } } else if (s->scancode_set == 3) { - if (qcode < qemu_input_map_qcode_to_atset3_len) + if (qcode < qemu_input_map_qcode_to_atset3_len) { keycode = qemu_input_map_qcode_to_atset3[qcode]; + } if (keycode) { /* FIXME: break code should be configured on a key by key basis */ if (!key->down) { @@ -505,23 +539,31 @@ uint32_t ps2_read_data(PS2State *s) trace_ps2_read_data(s); q = &s->queue; if (q->count == 0) { - /* NOTE: if no data left, we return the last keyboard one - (needed for EMM386) */ + /* + * NOTE: if no data left, we return the last keyboard one + * (needed for EMM386) + */ /* XXX: need a timer to do things correctly */ index = q->rptr - 1; - if (index < 0) - index = PS2_QUEUE_SIZE - 1; + if (index < 0) { + index = PS2_BUFFER_SIZE - 1; + } val = q->data[index]; } else { val = q->data[q->rptr]; - if (++q->rptr == PS2_QUEUE_SIZE) + if (++q->rptr == PS2_BUFFER_SIZE) { q->rptr = 0; + } q->count--; + if (q->rptr == q->cwptr) { + /* command reply queue is empty */ + q->cwptr = -1; + } /* reading deasserts IRQ */ - s->update_irq(s->update_arg, 0); + ps2_lower_irq(s); /* reassert IRQs if data left */ if (q->count) { - s->update_irq(s->update_arg, 1); + ps2_raise_irq(s); } } return val; @@ -536,175 +578,204 @@ static void ps2_set_ledstate(PS2KbdState *s, int ledstate) static void ps2_reset_keyboard(PS2KbdState *s) { + PS2State *ps2 = PS2_DEVICE(s); + trace_ps2_reset_keyboard(s); s->scan_enabled = 1; s->scancode_set = 2; - ps2_reset_queue(&s->common); + ps2_reset_queue(ps2); ps2_set_ledstate(s, 0); } -void ps2_write_keyboard(void *opaque, int val) +void ps2_write_keyboard(PS2KbdState *s, int val) { - PS2KbdState *s = (PS2KbdState *)opaque; + PS2State *ps2 = PS2_DEVICE(s); - trace_ps2_write_keyboard(opaque, val); - switch(s->common.write_cmd) { + trace_ps2_write_keyboard(s, val); + ps2_cqueue_reset(ps2); + switch (ps2->write_cmd) { default: case -1: - switch(val) { + switch (val) { case 0x00: - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; case 0x05: - ps2_queue(&s->common, KBD_REPLY_RESEND); + ps2_cqueue_1(ps2, KBD_REPLY_RESEND); break; case KBD_CMD_GET_ID: /* We emulate a MF2 AT keyboard here */ - if (s->translate) - ps2_queue_3(&s->common, - KBD_REPLY_ACK, - KBD_REPLY_ID, - 0x41); - else - ps2_queue_3(&s->common, - KBD_REPLY_ACK, - KBD_REPLY_ID, - 0x83); + ps2_cqueue_3(ps2, KBD_REPLY_ACK, KBD_REPLY_ID, + s->translate ? 0x41 : 0x83); break; case KBD_CMD_ECHO: - ps2_queue(&s->common, KBD_CMD_ECHO); + ps2_cqueue_1(ps2, KBD_CMD_ECHO); break; case KBD_CMD_ENABLE: s->scan_enabled = 1; - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; case KBD_CMD_SCANCODE: case KBD_CMD_SET_LEDS: case KBD_CMD_SET_RATE: case KBD_CMD_SET_MAKE_BREAK: - s->common.write_cmd = val; - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2->write_cmd = val; + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; case KBD_CMD_RESET_DISABLE: ps2_reset_keyboard(s); s->scan_enabled = 0; - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; case KBD_CMD_RESET_ENABLE: ps2_reset_keyboard(s); s->scan_enabled = 1; - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; case KBD_CMD_RESET: ps2_reset_keyboard(s); - ps2_queue_2(&s->common, - KBD_REPLY_ACK, - KBD_REPLY_POR); + ps2_cqueue_2(ps2, + KBD_REPLY_ACK, + KBD_REPLY_POR); break; case KBD_CMD_SET_TYPEMATIC: - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); break; default: - ps2_queue(&s->common, KBD_REPLY_RESEND); + ps2_cqueue_1(ps2, KBD_REPLY_RESEND); break; } break; case KBD_CMD_SET_MAKE_BREAK: - ps2_queue(&s->common, KBD_REPLY_ACK); - s->common.write_cmd = -1; + ps2_cqueue_1(ps2, KBD_REPLY_ACK); + ps2->write_cmd = -1; break; case KBD_CMD_SCANCODE: if (val == 0) { - if (s->common.queue.count <= PS2_QUEUE_SIZE - 2) { - ps2_queue(&s->common, KBD_REPLY_ACK); - ps2_put_keycode(s, s->scancode_set); - } + ps2_cqueue_2(ps2, KBD_REPLY_ACK, s->translate ? + translate_table[s->scancode_set] : s->scancode_set); } else if (val >= 1 && val <= 3) { s->scancode_set = val; - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_cqueue_1(ps2, KBD_REPLY_ACK); } else { - ps2_queue(&s->common, KBD_REPLY_RESEND); + ps2_cqueue_1(ps2, KBD_REPLY_RESEND); } - s->common.write_cmd = -1; + ps2->write_cmd = -1; break; case KBD_CMD_SET_LEDS: ps2_set_ledstate(s, val); - ps2_queue(&s->common, KBD_REPLY_ACK); - s->common.write_cmd = -1; + ps2_cqueue_1(ps2, KBD_REPLY_ACK); + ps2->write_cmd = -1; break; case KBD_CMD_SET_RATE: - ps2_queue(&s->common, KBD_REPLY_ACK); - s->common.write_cmd = -1; + ps2_cqueue_1(ps2, KBD_REPLY_ACK); + ps2->write_cmd = -1; break; } } -/* Set the scancode translation mode. - 0 = raw scancodes. - 1 = translated scancodes (used by qemu internally). */ +/* + * Set the scancode translation mode. + * 0 = raw scancodes. + * 1 = translated scancodes (used by qemu internally). + */ -void ps2_keyboard_set_translation(void *opaque, int mode) +void ps2_keyboard_set_translation(PS2KbdState *s, int mode) { - PS2KbdState *s = (PS2KbdState *)opaque; - trace_ps2_keyboard_set_translation(opaque, mode); + trace_ps2_keyboard_set_translation(s, mode); s->translate = mode; } static int ps2_mouse_send_packet(PS2MouseState *s) { + PS2State *ps2 = PS2_DEVICE(s); /* IMPS/2 and IMEX send 4 bytes, PS2 sends 3 bytes */ const int needed = s->mouse_type ? 4 : 3; unsigned int b; - int dx1, dy1, dz1; + int dx1, dy1, dz1, dw1; - if (PS2_QUEUE_SIZE - s->common.queue.count < needed) { + if (PS2_QUEUE_SIZE - ps2->queue.count < needed) { return 0; } dx1 = s->mouse_dx; dy1 = s->mouse_dy; dz1 = s->mouse_dz; + dw1 = s->mouse_dw; /* XXX: increase range to 8 bits ? */ - if (dx1 > 127) + if (dx1 > 127) { dx1 = 127; - else if (dx1 < -127) + } else if (dx1 < -127) { dx1 = -127; - if (dy1 > 127) + } + if (dy1 > 127) { dy1 = 127; - else if (dy1 < -127) + } else if (dy1 < -127) { dy1 = -127; + } b = 0x08 | ((dx1 < 0) << 4) | ((dy1 < 0) << 5) | (s->mouse_buttons & 0x07); - ps2_queue_noirq(&s->common, b); - ps2_queue_noirq(&s->common, dx1 & 0xff); - ps2_queue_noirq(&s->common, dy1 & 0xff); + ps2_queue_noirq(ps2, b); + ps2_queue_noirq(ps2, dx1 & 0xff); + ps2_queue_noirq(ps2, dy1 & 0xff); /* extra byte for IMPS/2 or IMEX */ - switch(s->mouse_type) { + switch (s->mouse_type) { default: + /* Just ignore the wheels if not supported */ + s->mouse_dz = 0; + s->mouse_dw = 0; break; case 3: - if (dz1 > 127) + if (dz1 > 127) { dz1 = 127; - else if (dz1 < -127) - dz1 = -127; - ps2_queue_noirq(&s->common, dz1 & 0xff); + } else if (dz1 < -127) { + dz1 = -127; + } + ps2_queue_noirq(ps2, dz1 & 0xff); + s->mouse_dz -= dz1; + s->mouse_dw = 0; break; case 4: - if (dz1 > 7) - dz1 = 7; - else if (dz1 < -7) - dz1 = -7; - b = (dz1 & 0x0f) | ((s->mouse_buttons & 0x18) << 1); - ps2_queue_noirq(&s->common, b); + /* + * This matches what the Linux kernel expects for exps/2 in + * drivers/input/mouse/psmouse-base.c. Note, if you happen to + * press/release the 4th or 5th buttons at the same moment as a + * horizontal wheel scroll, those button presses will get lost. I'm not + * sure what to do about that, since by this point we don't know + * whether those buttons actually changed state. + */ + if (dw1 != 0) { + if (dw1 > 31) { + dw1 = 31; + } else if (dw1 < -31) { + dw1 = -31; + } + + /* + * linux kernel expects first 6 bits to represent the value + * for horizontal scroll + */ + b = (dw1 & 0x3f) | 0x40; + s->mouse_dw -= dw1; + } else { + if (dz1 > 7) { + dz1 = 7; + } else if (dz1 < -7) { + dz1 = -7; + } + + b = (dz1 & 0x0f) | ((s->mouse_buttons & 0x18) << 1); + s->mouse_dz -= dz1; + } + ps2_queue_noirq(ps2, b); break; } - ps2_raise_irq(&s->common); + ps2_raise_irq(ps2); trace_ps2_mouse_send_packet(s, dx1, dy1, dz1, b); /* update deltas */ s->mouse_dx -= dx1; s->mouse_dy -= dy1; - s->mouse_dz -= dz1; return 1; } @@ -724,8 +795,9 @@ static void ps2_mouse_event(DeviceState *dev, QemuConsole *src, InputBtnEvent *btn; /* check if deltas are recorded when disabled */ - if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) + if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) { return; + } switch (evt->type) { case INPUT_EVENT_KIND_REL: @@ -746,6 +818,12 @@ static void ps2_mouse_event(DeviceState *dev, QemuConsole *src, } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { s->mouse_dz++; } + + if (btn->button == INPUT_BUTTON_WHEEL_RIGHT) { + s->mouse_dw--; + } else if (btn->button == INPUT_BUTTON_WHEEL_LEFT) { + s->mouse_dw++; + } } else { s->mouse_buttons &= ~bmap[btn->button]; } @@ -770,105 +848,108 @@ static void ps2_mouse_sync(DeviceState *dev) qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); } if (!(s->mouse_status & MOUSE_STATUS_REMOTE)) { - /* if not remote, send event. Multiple events are sent if - too big deltas */ + /* + * if not remote, send event. Multiple events are sent if + * too big deltas + */ while (ps2_mouse_send_packet(s)) { - if (s->mouse_dx == 0 && s->mouse_dy == 0 && s->mouse_dz == 0) + if (s->mouse_dx == 0 && s->mouse_dy == 0 + && s->mouse_dz == 0 && s->mouse_dw == 0) { break; + } } } } -void ps2_mouse_fake_event(void *opaque) +void ps2_mouse_fake_event(PS2MouseState *s) { - PS2MouseState *s = opaque; - trace_ps2_mouse_fake_event(opaque); + trace_ps2_mouse_fake_event(s); s->mouse_dx++; - ps2_mouse_sync(opaque); + ps2_mouse_sync(DEVICE(s)); } -void ps2_write_mouse(void *opaque, int val) +void ps2_write_mouse(PS2MouseState *s, int val) { - PS2MouseState *s = (PS2MouseState *)opaque; + PS2State *ps2 = PS2_DEVICE(s); - trace_ps2_write_mouse(opaque, val); - switch(s->common.write_cmd) { + trace_ps2_write_mouse(s, val); + switch (ps2->write_cmd) { default: case -1: /* mouse command */ if (s->mouse_wrap) { if (val == AUX_RESET_WRAP) { s->mouse_wrap = 0; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); return; } else if (val != AUX_RESET) { - ps2_queue(&s->common, val); + ps2_queue(ps2, val); return; } } - switch(val) { + switch (val) { case AUX_SET_SCALE11: s->mouse_status &= ~MOUSE_STATUS_SCALE21; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_SET_SCALE21: s->mouse_status |= MOUSE_STATUS_SCALE21; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_SET_STREAM: s->mouse_status &= ~MOUSE_STATUS_REMOTE; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_SET_WRAP: s->mouse_wrap = 1; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_SET_REMOTE: s->mouse_status |= MOUSE_STATUS_REMOTE; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_GET_TYPE: - ps2_queue_2(&s->common, + ps2_queue_2(ps2, AUX_ACK, s->mouse_type); break; case AUX_SET_RES: case AUX_SET_SAMPLE: - s->common.write_cmd = val; - ps2_queue(&s->common, AUX_ACK); + ps2->write_cmd = val; + ps2_queue(ps2, AUX_ACK); break; case AUX_GET_SCALE: - ps2_queue_4(&s->common, + ps2_queue_4(ps2, AUX_ACK, s->mouse_status, s->mouse_resolution, s->mouse_sample_rate); break; case AUX_POLL: - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); ps2_mouse_send_packet(s); break; case AUX_ENABLE_DEV: s->mouse_status |= MOUSE_STATUS_ENABLED; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_DISABLE_DEV: s->mouse_status &= ~MOUSE_STATUS_ENABLED; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_SET_DEFAULT: s->mouse_sample_rate = 100; s->mouse_resolution = 2; s->mouse_status = 0; - ps2_queue(&s->common, AUX_ACK); + ps2_queue(ps2, AUX_ACK); break; case AUX_RESET: s->mouse_sample_rate = 100; s->mouse_resolution = 2; s->mouse_status = 0; s->mouse_type = 0; - ps2_reset_queue(&s->common); - ps2_queue_3(&s->common, + ps2_reset_queue(ps2); + ps2_queue_3(ps2, AUX_ACK, 0xaa, s->mouse_type); @@ -880,96 +961,103 @@ void ps2_write_mouse(void *opaque, int val) case AUX_SET_SAMPLE: s->mouse_sample_rate = val; /* detect IMPS/2 or IMEX */ - switch(s->mouse_detect_state) { + switch (s->mouse_detect_state) { default: case 0: - if (val == 200) + if (val == 200) { s->mouse_detect_state = 1; + } break; case 1: - if (val == 100) + if (val == 100) { s->mouse_detect_state = 2; - else if (val == 200) + } else if (val == 200) { s->mouse_detect_state = 3; - else + } else { s->mouse_detect_state = 0; + } break; case 2: - if (val == 80) + if (val == 80) { s->mouse_type = 3; /* IMPS/2 */ + } s->mouse_detect_state = 0; break; case 3: - if (val == 80) + if (val == 80) { s->mouse_type = 4; /* IMEX */ + } s->mouse_detect_state = 0; break; } - ps2_queue(&s->common, AUX_ACK); - s->common.write_cmd = -1; + ps2_queue(ps2, AUX_ACK); + ps2->write_cmd = -1; break; case AUX_SET_RES: s->mouse_resolution = val; - ps2_queue(&s->common, AUX_ACK); - s->common.write_cmd = -1; + ps2_queue(ps2, AUX_ACK); + ps2->write_cmd = -1; break; } } -static void ps2_common_reset(PS2State *s) +static void ps2_reset(DeviceState *dev) { + PS2State *s = PS2_DEVICE(dev); + s->write_cmd = -1; ps2_reset_queue(s); - s->update_irq(s->update_arg, 0); + ps2_lower_irq(s); } static void ps2_common_post_load(PS2State *s) { PS2Queue *q = &s->queue; - uint8_t i, size; - uint8_t tmp_data[PS2_QUEUE_SIZE]; + int ccount = 0; - /* set the useful data buffer queue size, < PS2_QUEUE_SIZE */ - size = q->count; - if (q->count < 0) { - size = 0; - } else if (q->count > PS2_QUEUE_SIZE) { - size = PS2_QUEUE_SIZE; - } - - /* move the queue elements to the start of data array */ - for (i = 0; i < size; i++) { - if (q->rptr < 0 || q->rptr >= sizeof(q->data)) { - q->rptr = 0; + /* limit the number of queued command replies to PS2_QUEUE_HEADROOM */ + if (q->cwptr != -1) { + ccount = (q->cwptr - q->rptr) & (PS2_BUFFER_SIZE - 1); + if (ccount > PS2_QUEUE_HEADROOM) { + ccount = PS2_QUEUE_HEADROOM; } - tmp_data[i] = q->data[q->rptr++]; } - memcpy(q->data, tmp_data, size); - /* reset rptr/wptr/count */ - q->rptr = 0; - q->wptr = (size == PS2_QUEUE_SIZE) ? 0 : size; - q->count = size; + /* limit the scancode queue size to PS2_QUEUE_SIZE */ + if (q->count < ccount) { + q->count = ccount; + } else if (q->count > ccount + PS2_QUEUE_SIZE) { + q->count = ccount + PS2_QUEUE_SIZE; + } + + /* sanitize rptr and recalculate wptr and cwptr */ + q->rptr = q->rptr & (PS2_BUFFER_SIZE - 1); + q->wptr = (q->rptr + q->count) & (PS2_BUFFER_SIZE - 1); + q->cwptr = ccount ? (q->rptr + ccount) & (PS2_BUFFER_SIZE - 1) : -1; } -static void ps2_kbd_reset(void *opaque) +static void ps2_kbd_reset(DeviceState *dev) { - PS2KbdState *s = (PS2KbdState *) opaque; + PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(dev); + PS2KbdState *s = PS2_KBD_DEVICE(dev); + + trace_ps2_kbd_reset(s); + ps2dc->parent_reset(dev); - trace_ps2_kbd_reset(opaque); - ps2_common_reset(&s->common); s->scan_enabled = 1; s->translate = 0; s->scancode_set = 2; s->modifiers = 0; } -static void ps2_mouse_reset(void *opaque) +static void ps2_mouse_reset(DeviceState *dev) { - PS2MouseState *s = (PS2MouseState *) opaque; + PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(dev); + PS2MouseState *s = PS2_MOUSE_DEVICE(dev); + + trace_ps2_mouse_reset(s); + ps2dc->parent_reset(dev); - trace_ps2_mouse_reset(opaque); - ps2_common_reset(&s->common); s->mouse_status = 0; s->mouse_resolution = 0; s->mouse_sample_rate = 0; @@ -979,6 +1067,7 @@ static void ps2_mouse_reset(void *opaque) s->mouse_dx = 0; s->mouse_dy = 0; s->mouse_dz = 0; + s->mouse_dw = 0; s->mouse_buttons = 0; } @@ -1040,23 +1129,31 @@ static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = { } }; -static int ps2_kbd_post_load(void* opaque, int version_id) +static bool ps2_keyboard_cqueue_needed(void *opaque) { - PS2KbdState *s = (PS2KbdState*)opaque; - PS2State *ps2 = &s->common; + PS2KbdState *s = opaque; + PS2State *ps2 = PS2_DEVICE(s); - if (version_id == 2) - s->scancode_set=2; - - ps2_common_post_load(ps2); - - return 0; + return ps2->queue.cwptr != -1; /* the queue is mostly empty */ } -static int ps2_kbd_pre_save(void *opaque) +static const VMStateDescription vmstate_ps2_keyboard_cqueue = { + .name = "ps2kbd/command_reply_queue", + .needed = ps2_keyboard_cqueue_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(parent_obj.queue.cwptr, PS2KbdState), + VMSTATE_END_OF_LIST() + } +}; + +static int ps2_kbd_post_load(void *opaque, int version_id) { PS2KbdState *s = (PS2KbdState *)opaque; - PS2State *ps2 = &s->common; + PS2State *ps2 = PS2_DEVICE(s); + + if (version_id == 2) { + s->scancode_set = 2; + } ps2_common_post_load(ps2); @@ -1068,17 +1165,18 @@ static const VMStateDescription vmstate_ps2_keyboard = { .version_id = 3, .minimum_version_id = 2, .post_load = ps2_kbd_post_load, - .pre_save = ps2_kbd_pre_save, .fields = (VMStateField[]) { - VMSTATE_STRUCT(common, PS2KbdState, 0, vmstate_ps2_common, PS2State), + VMSTATE_STRUCT(parent_obj, PS2KbdState, 0, vmstate_ps2_common, + PS2State), VMSTATE_INT32(scan_enabled, PS2KbdState), VMSTATE_INT32(translate, PS2KbdState), - VMSTATE_INT32_V(scancode_set, PS2KbdState,3), + VMSTATE_INT32_V(scancode_set, PS2KbdState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * []) { &vmstate_ps2_keyboard_ledstate, &vmstate_ps2_keyboard_need_high_bit, + &vmstate_ps2_keyboard_cqueue, NULL } }; @@ -1086,17 +1184,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { static int ps2_mouse_post_load(void *opaque, int version_id) { PS2MouseState *s = (PS2MouseState *)opaque; - PS2State *ps2 = &s->common; - - ps2_common_post_load(ps2); - - return 0; -} - -static int ps2_mouse_pre_save(void *opaque) -{ - PS2MouseState *s = (PS2MouseState *)opaque; - PS2State *ps2 = &s->common; + PS2State *ps2 = PS2_DEVICE(s); ps2_common_post_load(ps2); @@ -1108,9 +1196,9 @@ static const VMStateDescription vmstate_ps2_mouse = { .version_id = 2, .minimum_version_id = 2, .post_load = ps2_mouse_post_load, - .pre_save = ps2_mouse_pre_save, .fields = (VMStateField[]) { - VMSTATE_STRUCT(common, PS2MouseState, 0, vmstate_ps2_common, PS2State), + VMSTATE_STRUCT(parent_obj, PS2MouseState, 0, vmstate_ps2_common, + PS2State), VMSTATE_UINT8(mouse_status, PS2MouseState), VMSTATE_UINT8(mouse_resolution, PS2MouseState), VMSTATE_UINT8(mouse_sample_rate, PS2MouseState), @@ -1131,19 +1219,9 @@ static QemuInputHandler ps2_keyboard_handler = { .event = ps2_keyboard_event, }; -void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg) +static void ps2_kbd_realize(DeviceState *dev, Error **errp) { - PS2KbdState *s = (PS2KbdState *)g_malloc0(sizeof(PS2KbdState)); - - trace_ps2_kbd_init(s); - s->common.update_irq = update_irq; - s->common.update_arg = update_arg; - s->scancode_set = 2; - vmstate_register(NULL, 0, &vmstate_ps2_keyboard, s); - qemu_input_handler_register((DeviceState *)s, - &ps2_keyboard_handler); - qemu_register_reset(ps2_kbd_reset, s); - return s; + qemu_input_handler_register(dev, &ps2_keyboard_handler); } static QemuInputHandler ps2_mouse_handler = { @@ -1153,16 +1231,76 @@ static QemuInputHandler ps2_mouse_handler = { .sync = ps2_mouse_sync, }; -void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg) +static void ps2_mouse_realize(DeviceState *dev, Error **errp) { - PS2MouseState *s = (PS2MouseState *)g_malloc0(sizeof(PS2MouseState)); - - trace_ps2_mouse_init(s); - s->common.update_irq = update_irq; - s->common.update_arg = update_arg; - vmstate_register(NULL, 0, &vmstate_ps2_mouse, s); - qemu_input_handler_register((DeviceState *)s, - &ps2_mouse_handler); - qemu_register_reset(ps2_mouse_reset, s); - return s; + qemu_input_handler_register(dev, &ps2_mouse_handler); } + +static void ps2_kbd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PS2DeviceClass *ps2dc = PS2_DEVICE_CLASS(klass); + + dc->realize = ps2_kbd_realize; + device_class_set_parent_reset(dc, ps2_kbd_reset, &ps2dc->parent_reset); + dc->vmsd = &vmstate_ps2_keyboard; +} + +static const TypeInfo ps2_kbd_info = { + .name = TYPE_PS2_KBD_DEVICE, + .parent = TYPE_PS2_DEVICE, + .instance_size = sizeof(PS2KbdState), + .class_init = ps2_kbd_class_init +}; + +static void ps2_mouse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PS2DeviceClass *ps2dc = PS2_DEVICE_CLASS(klass); + + dc->realize = ps2_mouse_realize; + device_class_set_parent_reset(dc, ps2_mouse_reset, + &ps2dc->parent_reset); + dc->vmsd = &vmstate_ps2_mouse; +} + +static const TypeInfo ps2_mouse_info = { + .name = TYPE_PS2_MOUSE_DEVICE, + .parent = TYPE_PS2_DEVICE, + .instance_size = sizeof(PS2MouseState), + .class_init = ps2_mouse_class_init +}; + +static void ps2_init(Object *obj) +{ + PS2State *s = PS2_DEVICE(obj); + + qdev_init_gpio_out(DEVICE(obj), &s->irq, 1); +} + +static void ps2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ps2_reset; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo ps2_info = { + .name = TYPE_PS2_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = ps2_init, + .instance_size = sizeof(PS2State), + .class_init = ps2_class_init, + .class_size = sizeof(PS2DeviceClass), + .abstract = true +}; + +static void ps2_register_types(void) +{ + type_register_static(&ps2_info); + type_register_static(&ps2_kbd_info); + type_register_static(&ps2_mouse_info); +} + +type_init(ps2_register_types) diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c index 7f2f739fb3..3dd03e8c9f 100644 --- a/hw/input/pxa2xx_keypad.c +++ b/hw/input/pxa2xx_keypad.c @@ -306,7 +306,7 @@ PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem, { PXA2xxKeyPadState *s; - s = (PXA2xxKeyPadState *) g_malloc0(sizeof(PXA2xxKeyPadState)); + s = g_new0(PXA2xxKeyPadState, 1); s->irq = irq; memory_region_init_io(&s->iomem, NULL, &pxa2xx_keypad_ops, s, diff --git a/hw/input/trace-events b/hw/input/trace-events index e0bfe7f3ee..29001a827d 100644 --- a/hw/input/trace-events +++ b/hw/input/trace-events @@ -41,8 +41,6 @@ ps2_mouse_fake_event(void *opaque) "%p" ps2_write_mouse(void *opaque, int val) "%p val %d" ps2_kbd_reset(void *opaque) "%p" ps2_mouse_reset(void *opaque) "%p" -ps2_kbd_init(void *s) "%p" -ps2_mouse_init(void *s) "%p" # hid.c hid_kbd_queue_full(void) "queue full" diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index 55d61cc843..14698ce109 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -489,8 +489,7 @@ void *tsc2005_init(qemu_irq pintdav) { TSC2005State *s; - s = (TSC2005State *) - g_malloc0(sizeof(TSC2005State)); + s = g_new0(TSC2005State, 1); s->x = 400; s->y = 240; s->pressure = false; diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c index 182d3725fc..df7313db5d 100644 --- a/hw/input/tsc210x.c +++ b/hw/input/tsc210x.c @@ -20,9 +20,11 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "audio/audio.h" #include "qemu/timer.h" +#include "qemu/log.h" #include "sysemu/reset.h" #include "ui/console.h" #include "hw/arm/omap.h" /* For I2SCodec */ @@ -909,8 +911,11 @@ uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len) TSC210xState *s = opaque; uint32_t ret = 0; - if (len != 16) - hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len); + if (len != 16) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad SPI word width %i\n", __func__, len); + return 0; + } /* TODO: sequential reads etc - how do we make sure the host doesn't * unintentionally read out a conversion result from a register while diff --git a/hw/input/vhost-user-input.c b/hw/input/vhost-user-input.c index 273e96a7b1..1352e372ff 100644 --- a/hw/input/vhost-user-input.c +++ b/hw/input/vhost-user-input.c @@ -7,7 +7,6 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/virtio/virtio-input.h" @@ -79,6 +78,12 @@ static void vhost_input_set_config(VirtIODevice *vdev, virtio_notify_config(vdev); } +static struct vhost_dev *vhost_input_get_vhost(VirtIODevice *vdev) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(vdev); + return &vhi->vhost->dev; +} + static const VMStateDescription vmstate_vhost_input = { .name = "vhost-user-input", .unmigratable = 1, @@ -93,6 +98,7 @@ static void vhost_input_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_vhost_input; vdc->get_config = vhost_input_get_config; vdc->set_config = vhost_input_set_config; + vdc->get_vhost = vhost_input_get_vhost; vic->realize = vhost_input_realize; vic->change_active = vhost_input_change_active; } diff --git a/hw/input/virtio-input-host.c b/hw/input/virtio-input-host.c index 137efba57b..fea7139382 100644 --- a/hw/input/virtio-input-host.c +++ b/hw/input/virtio-input-host.c @@ -114,7 +114,10 @@ static void virtio_input_host_realize(DeviceState *dev, Error **errp) error_setg_file_open(errp, errno, vih->evdev); return; } - qemu_set_nonblock(vih->fd); + if (!g_unix_set_fd_nonblocking(vih->fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + goto err_close; + } rc = ioctl(vih->fd, EVIOCGVERSION, &ver); if (rc < 0) { diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c index 54bcb46c74..5b5398b3ca 100644 --- a/hw/input/virtio-input.c +++ b/hw/input/virtio-input.c @@ -257,8 +257,7 @@ static void virtio_input_device_realize(DeviceState *dev, Error **errp) vinput->cfg_size += 8; assert(vinput->cfg_size <= sizeof(virtio_input_config)); - virtio_init(vdev, "virtio-input", VIRTIO_ID_INPUT, - vinput->cfg_size); + virtio_init(vdev, VIRTIO_ID_INPUT, vinput->cfg_size); vinput->evt = virtio_add_queue(vdev, 64, virtio_input_handle_evt); vinput->sts = virtio_add_queue(vdev, 64, virtio_input_handle_sts); } diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index f4694088a4..ecd2883ceb 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -25,6 +25,11 @@ config APIC select MSI_NONBROKEN select I8259 +config ARM_GICV3_TCG + bool + default y + depends on ARM_GIC && TCG + config ARM_GIC_KVM bool default y @@ -62,7 +67,13 @@ config RX_ICU config LOONGSON_LIOINTC bool -config SIFIVE_CLINT +config RISCV_ACLINT + bool + +config RISCV_APLIC + bool + +config RISCV_IMSIC bool config SIFIVE_PLIC @@ -73,3 +84,21 @@ config GOLDFISH_PIC config M68K_IRQC bool + +config NIOS2_VIC + bool + +config LOONGARCH_IPI + bool + +config LOONGARCH_PCH_PIC + bool + select UNIMP + +config LOONGARCH_PCH_MSI + select MSI_NONBROKEN + bool + select UNIMP + +config LOONGARCH_EXTIOI + bool diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index 8cca124807..d0bf8d545b 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -49,12 +49,9 @@ static void aw_a10_pic_update(AwA10PICState *s) static void aw_a10_pic_set_irq(void *opaque, int irq, int level) { AwA10PICState *s = opaque; + uint32_t *pending_reg = &s->irq_pending[irq / 32]; - if (level) { - set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } else { - clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } + *pending_reg = deposit32(*pending_reg, irq % 32, 1, !!level); aw_a10_pic_update(s); } diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index a994b1f024..7a34bc0998 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -941,7 +941,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) gic_update(s); } -static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) +static uint8_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) { GICState *s = (GICState *)opaque; uint32_t res; @@ -955,6 +955,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) cm = 1 << cpu; if (offset < 0x100) { if (offset == 0) { /* GICD_CTLR */ + /* We rely here on the only non-zero bits being in byte 0 */ if (s->security_extn && !attrs.secure) { /* The NS bank of this register is just an alias of the * EnableGrp1 bit in the S bank version. @@ -964,13 +965,26 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) return s->ctlr; } } - if (offset == 4) - /* Interrupt Controller Type Register */ - return ((s->num_irq / 32) - 1) - | ((s->num_cpu - 1) << 5) - | (s->security_extn << 10); - if (offset < 0x08) + if (offset == 4) { + /* GICD_TYPER byte 0 */ + return ((s->num_irq / 32) - 1) | ((s->num_cpu - 1) << 5); + } + if (offset == 5) { + /* GICD_TYPER byte 1 */ + return (s->security_extn << 2); + } + if (offset == 8) { + /* GICD_IIDR byte 0 */ + return 0x3b; /* Arm JEP106 identity */ + } + if (offset == 9) { + /* GICD_IIDR byte 1 */ + return 0x04; /* Arm JEP106 identity */ + } + if (offset < 0x0c) { + /* All other bytes in this range are RAZ */ return 0; + } if (offset >= 0x80) { /* Interrupt Group Registers: these RAZ/WI if this is an NS * access to a GIC with the security extensions, or if the GIC @@ -1662,6 +1676,15 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, } break; } + case 0xfc: + if (s->revision == REV_11MPCORE) { + /* Reserved on 11MPCore */ + *data = 0; + } else { + /* GICv1 or v2; Arm implementation */ + *data = (s->revision << 16) | 0x43b; + } + break; default: qemu_log_mask(LOG_GUEST_ERROR, "gic_cpu_read: Bad offset %x\n", (int)offset); @@ -1727,6 +1750,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, } else { s->apr[regno][cpu] = value; } + s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); break; } case 0xe0: case 0xe4: case 0xe8: case 0xec: @@ -1743,6 +1767,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, return MEMTX_OK; } s->nsapr[regno][cpu] = value; + s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); break; } case 0x1000: diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c index d63f8af604..0b8f79a122 100644 --- a/hw/intc/arm_gicv3.c +++ b/hw/intc/arm_gicv3.c @@ -1,5 +1,5 @@ /* - * ARM Generic Interrupt Controller v3 + * ARM Generic Interrupt Controller v3 (emulation) * * Copyright (c) 2015 Huawei. * Copyright (c) 2016 Linaro Limited @@ -165,6 +165,17 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); } + if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable && + (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) && + (cs->hpplpi.prio != 0xff)) { + if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) { + cs->hppi.irq = cs->hpplpi.irq; + cs->hppi.prio = cs->hpplpi.prio; + cs->hppi.grp = cs->hpplpi.grp; + seenbetter = true; + } + } + /* If the best interrupt we just found would preempt whatever * was the previous best interrupt before this update, then * we know it's definitely the best one now. @@ -176,7 +187,9 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) * interrupt has reduced in priority and any other interrupt could * now be the new best one). */ - if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) { + if (!seenbetter && cs->hppi.prio != 0xff && + (cs->hppi.irq < GIC_INTERNAL || + cs->hppi.irq >= GICV3_LPI_INTID_START)) { gicv3_full_update_noirqset(cs->gic); } } @@ -339,9 +352,13 @@ static void gicv3_set_irq(void *opaque, int irq, int level) static void arm_gicv3_post_load(GICv3State *s) { + int i; /* Recalculate our cached idea of the current highest priority * pending interrupt, but don't set IRQ or FIQ lines. */ + for (i = 0; i < s->num_cpu; i++) { + gicv3_redist_update_lpi_only(&s->cpu[i]); + } gicv3_full_update_noirqset(s); /* Repopulate the cache of GICv3CPUState pointers for target CPUs */ gicv3_cache_all_target_cpustates(s); @@ -352,11 +369,19 @@ static const MemoryRegionOps gic_ops[] = { .read_with_attrs = gicv3_dist_read, .write_with_attrs = gicv3_dist_write, .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, }, { .read_with_attrs = gicv3_redist_read, .write_with_attrs = gicv3_redist_write, .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, } }; @@ -373,17 +398,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) return; } - if (s->nb_redist_regions != 1) { - error_setg(errp, "VGICv3 redist region number(%d) not equal to 1", - s->nb_redist_regions); - return; - } - - gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } + gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops); gicv3_init_cpuif(s); } diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 58ef65f589..351843db4a 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -144,6 +144,25 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { } }; +static bool gicv4_needed(void *opaque) +{ + GICv3CPUState *cs = opaque; + + return cs->gic->revision > 3; +} + +const VMStateDescription vmstate_gicv3_gicv4 = { + .name = "arm_gicv3_cpu/gicv4", + .version_id = 1, + .minimum_version_id = 1, + .needed = gicv4_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState), + VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_gicv3_cpu = { .name = "arm_gicv3_cpu", .version_id = 1, @@ -175,6 +194,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { .subsections = (const VMStateDescription * []) { &vmstate_gicv3_cpu_virt, &vmstate_gicv3_cpu_sre_el1, + &vmstate_gicv3_gicv4, NULL } }; @@ -250,21 +270,11 @@ static const VMStateDescription vmstate_gicv3 = { }; void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, - const MemoryRegionOps *ops, Error **errp) + const MemoryRegionOps *ops) { SysBusDevice *sbd = SYS_BUS_DEVICE(s); - int rdist_capacity = 0; int i; - - for (i = 0; i < s->nb_redist_regions; i++) { - rdist_capacity += s->redist_region_count[i]; - } - if (rdist_capacity < s->num_cpu) { - error_setg(errp, "Capacity of the redist regions(%d) " - "is less than number of vcpus(%d)", - rdist_capacity, s->num_cpu); - return; - } + int cpuidx; /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. * GPIO array layout is thus: @@ -293,14 +303,20 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, "gicv3_dist", 0x10000); sysbus_init_mmio(sbd, &s->iomem_dist); - s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions); + s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions); + cpuidx = 0; for (i = 0; i < s->nb_redist_regions; i++) { char *name = g_strdup_printf("gicv3_redist_region[%d]", i); + GICv3RedistRegion *region = &s->redist_regions[i]; - memory_region_init_io(&s->iomem_redist[i], OBJECT(s), - ops ? &ops[1] : NULL, s, name, - s->redist_region_count[i] * GICV3_REDIST_SIZE); - sysbus_init_mmio(sbd, &s->iomem_redist[i]); + region->gic = s; + region->cpuidx = cpuidx; + cpuidx += s->redist_region_count[i]; + + memory_region_init_io(®ion->iomem, OBJECT(s), + ops ? &ops[1] : NULL, region, name, + s->redist_region_count[i] * gicv3_redist_size(s)); + sysbus_init_mmio(sbd, ®ion->iomem); g_free(name); } } @@ -308,14 +324,16 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) { GICv3State *s = ARM_GICV3_COMMON(dev); - int i; + int i, rdist_capacity, cpuidx; - /* revision property is actually reserved and currently used only in order - * to keep the interface compatible with GICv2 code, avoiding extra - * conditions. However, in future it could be used, for example, if we - * implement GICv4. + /* + * This GIC device supports only revisions 3 and 4. The GICv1/v2 + * is a separate device. + * Note that subclasses of this device may impose further restrictions + * on the GIC revision: notably, the in-kernel KVM GIC doesn't + * support GICv4. */ - if (s->revision != 3) { + if (s->revision != 3 && s->revision != 4) { error_setg(errp, "unsupported GIC revision %d", s->revision); return; } @@ -332,6 +350,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) s->num_irq, GIC_INTERNAL); return; } + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } /* ITLinesNumber is represented as (N / 32) - 1, so this is an * implementation imposed restriction, not an architectural one, @@ -345,12 +367,32 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) return; } + if (s->lpi_enable && !s->dma) { + error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set"); + return; + } + + rdist_capacity = 0; + for (i = 0; i < s->nb_redist_regions; i++) { + rdist_capacity += s->redist_region_count[i]; + } + if (rdist_capacity != s->num_cpu) { + error_setg(errp, "Capacity of the redist regions(%d) " + "does not match the number of vcpus(%d)", + rdist_capacity, s->num_cpu); + return; + } + + if (s->lpi_enable) { + address_space_init(&s->dma_as, s->dma, + "gicv3-its-sysmem"); + } + s->cpu = g_new0(GICv3CPUState, s->num_cpu); for (i = 0; i < s->num_cpu; i++) { CPUState *cpu = qemu_get_cpu(i); uint64_t cpu_affid; - int last; s->cpu[i].cpu = cpu; s->cpu[i].gic = s; @@ -366,11 +408,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) * Last == 1 if this is the last redistributor in a series of * contiguous redistributor pages * DirectLPI == 0 (direct injection of LPIs not supported) - * VLPIS == 0 (virtual LPIs not supported) - * PLPIS == 0 (physical LPIs not supported) + * VLPIS == 1 if vLPIs supported (GICv4 and up) + * PLPIS == 1 if LPIs supported */ cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); - last = (i == s->num_cpu - 1); /* The CPU mp-affinity property is in MPIDR register format; squash * the affinity bytes into 32 bits as the GICR_TYPER has them. @@ -379,9 +420,27 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) (cpu_affid & 0xFFFFFF); s->cpu[i].gicr_typer = (cpu_affid << 32) | (1 << 24) | - (i << 8) | - (last << 4); + (i << 8); + + if (s->lpi_enable) { + s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS; + if (s->revision > 3) { + s->cpu[i].gicr_typer |= GICR_TYPER_VLPIS; + } + } } + + /* + * Now go through and set GICR_TYPER.Last for the final + * redistributor in each region. + */ + cpuidx = 0; + for (i = 0; i < s->nb_redist_regions; i++) { + cpuidx += s->redist_region_count[i]; + s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST; + } + + s->itslist = g_ptr_array_new(); } static void arm_gicv3_finalize(Object *obj) @@ -401,11 +460,17 @@ static void arm_gicv3_common_reset(DeviceState *dev) cs->level = 0; cs->gicr_ctlr = 0; + if (s->lpi_enable) { + /* Our implementation supports clearing GICR_CTLR.EnableLPIs */ + cs->gicr_ctlr |= GICR_CTLR_CES; + } cs->gicr_statusr[GICV3_S] = 0; cs->gicr_statusr[GICV3_NS] = 0; cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep; cs->gicr_propbaser = 0; cs->gicr_pendbaser = 0; + cs->gicr_vpropbaser = 0; + cs->gicr_vpendbaser = 0; /* If we're resetting a TZ-aware GIC as if secure firmware * had set it up ready to start a kernel in non-secure, we * need to set interrupts to group 1 so the kernel can use them. @@ -426,6 +491,8 @@ static void arm_gicv3_common_reset(DeviceState *dev) memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr)); cs->hppi.prio = 0xff; + cs->hpplpi.prio = 0xff; + cs->hppvlpi.prio = 0xff; /* State in the CPU interface must *not* be reset here, because it * is part of the CPU's reset domain, not the GIC device's. @@ -494,9 +561,17 @@ static Property arm_gicv3_common_properties[] = { DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1), DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), + DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), + /* + * Compatibility property: force 8 bits of physical priority, even + * if the CPU being emulated should have fewer. + */ + DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State, force_8bit_prio, 0), DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions, redist_region_count, qdev_prop_uint32, uint32_t), + DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION, + MemoryRegion *), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index a032d505f5..b17b29288c 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -1,5 +1,5 @@ /* - * ARM Generic Interrupt Controller v3 + * ARM Generic Interrupt Controller v3 (emulation) * * Copyright (c) 2016 Linaro Limited * Written by Peter Maydell @@ -20,14 +20,13 @@ #include "gicv3_internal.h" #include "hw/irq.h" #include "cpu.h" +#include "target/arm/cpregs.h" -void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s) -{ - ARMCPU *arm_cpu = ARM_CPU(cpu); - CPUARMState *env = &arm_cpu->env; - - env->gicv3state = (void *)s; -}; +/* + * Special case return value from hppvi_index(); must be larger than + * the architecturally maximum possible list register index (which is 15) + */ +#define HPPVI_INDEX_VLPI 16 static GICv3CPUState *icc_cs_from_env(CPUARMState *env) { @@ -50,6 +49,14 @@ static inline int icv_min_vbpr(GICv3CPUState *cs) return 7 - cs->vprebits; } +static inline int ich_num_aprs(GICv3CPUState *cs) +{ + /* Return the number of virtual APR registers (1, 2, or 4) */ + int aprmax = 1 << (cs->vprebits - 5); + assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); + return aprmax; +} + /* Simple accessor functions for LR fields */ static uint32_t ich_lr_vintid(uint64_t lr) { @@ -146,9 +153,7 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs) * in the ICH Active Priority Registers. */ int i; - int aprmax = 1 << (cs->vprebits - 5); - - assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); + int aprmax = ich_num_aprs(cs); for (i = 0; i < aprmax; i++) { uint32_t apr = cs->ich_apr[GICV3_G0][i] | @@ -165,10 +170,18 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs) static int hppvi_index(GICv3CPUState *cs) { - /* Return the list register index of the highest priority pending + /* + * Return the list register index of the highest priority pending * virtual interrupt, as per the HighestPriorityVirtualInterrupt * pseudocode. If no pending virtual interrupts, return -1. + * If the highest priority pending virtual interrupt is a vLPI, + * return HPPVI_INDEX_VLPI. + * (The pseudocode handles checking whether the vLPI is higher + * priority than the highest priority list register at every + * callsite of HighestPriorityVirtualInterrupt; we check it here.) */ + ARMCPU *cpu = ARM_CPU(cs->cpu); + CPUARMState *env = &cpu->env; int idx = -1; int i; /* Note that a list register entry with a priority of 0xff will @@ -210,6 +223,23 @@ static int hppvi_index(GICv3CPUState *cs) } } + /* + * "no pending vLPI" is indicated with prio = 0xff, which always + * fails the priority check here. vLPIs are only considered + * when we are in Non-Secure state. + */ + if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) { + if (cs->hppvlpi.grp == GICV3_G0) { + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) { + return HPPVI_INDEX_VLPI; + } + } else { + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) { + return HPPVI_INDEX_VLPI; + } + } + } + return idx; } @@ -297,6 +327,47 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) return false; } +static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs) +{ + /* + * Return true if we can signal the highest priority pending vLPI. + * We can assume we're Non-secure because hppvi_index() already + * tested for that. + */ + uint32_t mask, rprio, vpmr; + + if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { + /* Virtual interface disabled */ + return false; + } + + vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, + ICH_VMCR_EL2_VPMR_LENGTH); + + if (cs->hppvlpi.prio >= vpmr) { + /* Priority mask masks this interrupt */ + return false; + } + + rprio = ich_highest_active_virt_prio(cs); + if (rprio == 0xff) { + /* No running interrupt so we can preempt */ + return true; + } + + mask = icv_gprio_mask(cs, cs->hppvlpi.grp); + + /* + * We only preempt a running interrupt if the pending interrupt's + * group priority is sufficient (the subpriorities are not considered). + */ + if ((cs->hppvlpi.prio & mask) < (rprio & mask)) { + return true; + } + + return false; +} + static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, uint32_t *misr) { @@ -351,7 +422,8 @@ static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) /* Scan list registers and fill in the U, NP and EOI bits */ eoi_maintenance_interrupt_state(cs, &value); - if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) { + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) && + (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) { value |= ICH_MISR_EL2_LRENP; } @@ -377,9 +449,55 @@ static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) return value; } +void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) +{ + /* + * Tell the CPU about any pending virtual interrupts. + * This should only be called for changes that affect the + * vIRQ and vFIQ status and do not change the maintenance + * interrupt status. This means that unlike gicv3_cpuif_virt_update() + * this function won't recursively call back into the GIC code. + * The main use of this is when the redistributor has changed the + * highest priority pending virtual LPI. + */ + int idx; + int irqlevel = 0; + int fiqlevel = 0; + + idx = hppvi_index(cs); + trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx, + cs->hppvlpi.irq, cs->hppvlpi.grp, + cs->hppvlpi.prio); + if (idx == HPPVI_INDEX_VLPI) { + if (icv_hppvlpi_can_preempt(cs)) { + if (cs->hppvlpi.grp == GICV3_G0) { + fiqlevel = 1; + } else { + irqlevel = 1; + } + } + } else if (idx >= 0) { + uint64_t lr = cs->ich_lr_el2[idx]; + + if (icv_hppi_can_preempt(cs, lr)) { + /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ + if (lr & ICH_LR_EL2_GROUP) { + irqlevel = 1; + } else { + fiqlevel = 1; + } + } + } + + trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); + qemu_set_irq(cs->parent_vfiq, fiqlevel); + qemu_set_irq(cs->parent_virq, irqlevel); +} + static void gicv3_cpuif_virt_update(GICv3CPUState *cs) { - /* Tell the CPU about any pending virtual interrupts or + /* + * Tell the CPU about any pending virtual interrupts or * maintenance interrupts, following a change to the state * of the CPU interface relevant to virtual interrupts. * @@ -396,36 +514,17 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs) * naturally as a result of there being no architectural * linkage between the physical and virtual GIC logic. */ - int idx; - int irqlevel = 0; - int fiqlevel = 0; - int maintlevel = 0; ARMCPU *cpu = ARM_CPU(cs->cpu); + int maintlevel = 0; - idx = hppvi_index(cs); - trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx); - if (idx >= 0) { - uint64_t lr = cs->ich_lr_el2[idx]; + gicv3_cpuif_virt_irq_fiq_update(cs); - if (icv_hppi_can_preempt(cs, lr)) { - /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ - if (lr & ICH_LR_EL2_GROUP) { - irqlevel = 1; - } else { - fiqlevel = 1; - } - } + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && + maintenance_interrupt_state(cs) != 0) { + maintlevel = 1; } - if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) { - maintlevel = maintenance_interrupt_state(cs); - } - - trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, - irqlevel, maintlevel); - - qemu_set_irq(cs->parent_vfiq, fiqlevel); - qemu_set_irq(cs->parent_virq, irqlevel); + trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel); qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); } @@ -451,7 +550,7 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); return; } @@ -496,7 +595,7 @@ static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, write_vbpr(cs, grp, value); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -523,7 +622,7 @@ static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, ICH_VMCR_EL2_VPMR_LENGTH, value); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -564,7 +663,7 @@ static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) * should match the ones reported in ich_vtr_read(). */ value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | - (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) { value |= ICC_CTLR_EL1_EOIMODE; @@ -590,7 +689,7 @@ static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -609,7 +708,11 @@ static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) int idx = hppvi_index(cs); uint64_t value = INTID_SPURIOUS; - if (idx >= 0) { + if (idx == HPPVI_INDEX_VLPI) { + if (cs->hppvlpi.grp == grp) { + value = cs->hppvlpi.irq; + } + } else if (idx >= 0) { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; @@ -618,7 +721,8 @@ static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) } } - trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value); + trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1, + gicv3_redist_affid(cs), value); return value; } @@ -639,6 +743,18 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) cs->ich_apr[grp][regno] |= (1 << regbit); } +static void icv_activate_vlpi(GICv3CPUState *cs) +{ + uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp); + int prio = cs->hppvlpi.prio & mask; + int aprbit = prio >> (8 - cs->vprebits); + int regno = aprbit / 32; + int regbit = aprbit % 32; + + cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit); + gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0); +} + static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); @@ -646,13 +762,18 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) int idx = hppvi_index(cs); uint64_t intid = INTID_SPURIOUS; - if (idx >= 0) { + if (idx == HPPVI_INDEX_VLPI) { + if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) { + intid = cs->hppvlpi.irq; + icv_activate_vlpi(cs); + } + } else if (idx >= 0) { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { intid = ich_lr_vintid(lr); - if (intid < INTID_SECURE) { + if (!gicv3_intid_is_special(intid)) { icv_activate_irq(cs, idx, grp); } else { /* Interrupt goes from Pending to Invalid */ @@ -672,6 +793,36 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) return intid; } +static uint32_t icc_fullprio_mask(GICv3CPUState *cs) +{ + /* + * Return a mask word which clears the unimplemented priority bits + * from a priority value for a physical interrupt. (Not to be confused + * with the group priority, whose mask depends on the value of BPR + * for the interrupt group.) + */ + return ~0U << (8 - cs->pribits); +} + +static inline int icc_min_bpr(GICv3CPUState *cs) +{ + /* The minimum BPR for the physical interface. */ + return 7 - cs->prebits; +} + +static inline int icc_min_bpr_ns(GICv3CPUState *cs) +{ + return icc_min_bpr(cs) + 1; +} + +static inline int icc_num_aprs(GICv3CPUState *cs) +{ + /* Return the number of APR registers (1, 2, or 4) */ + int aprmax = 1 << MAX(cs->prebits - 5, 0); + assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0])); + return aprmax; +} + static int icc_highest_active_prio(GICv3CPUState *cs) { /* Calculate the current running priority based on the set bits @@ -679,14 +830,14 @@ static int icc_highest_active_prio(GICv3CPUState *cs) */ int i; - for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { + for (i = 0; i < icc_num_aprs(cs); i++) { uint32_t apr = cs->icc_apr[GICV3_G0][i] | cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; if (!apr) { continue; } - return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); + return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1); } /* No current active interrupts: return idle priority */ return 0xff; @@ -865,8 +1016,6 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value); - value &= 0xff; - if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { /* NS access and Group 0 is inaccessible to NS: return the @@ -878,6 +1027,7 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, } value = (value >> 1) | 0x80; } + value &= icc_fullprio_mask(cs); cs->icc_pmr_el1 = value; gicv3_cpuif_update(cs); } @@ -889,7 +1039,7 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq) */ uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp); int prio = cs->hppi.prio & mask; - int aprbit = prio >> 1; + int aprbit = prio >> (8 - cs->prebits); int regno = aprbit / 32; int regbit = aprbit % 32; @@ -899,10 +1049,12 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq) cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0); gicv3_redist_update(cs); - } else { + } else if (irq < GICV3_LPI_INTID_START) { gicv3_gicd_active_set(cs->gic, irq); gicv3_gicd_pending_clear(cs->gic, irq); gicv3_update(cs->gic, irq, 1); + } else { + gicv3_redist_lpi_pending(cs, irq, 0); } } @@ -994,7 +1146,7 @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) intid = icc_hppir0_value(cs, env); } - if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { + if (!gicv3_intid_is_special(intid)) { icc_activate_irq(cs, intid); } @@ -1017,7 +1169,7 @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) intid = icc_hppir1_value(cs, env); } - if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { + if (!gicv3_intid_is_special(intid)) { icc_activate_irq(cs, intid); } @@ -1045,7 +1197,7 @@ static void icc_drop_prio(GICv3CPUState *cs, int grp) */ int i; - for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) { + for (i = 0; i < icc_num_aprs(cs); i++) { uint64_t *papr = &cs->icc_apr[grp][i]; if (!*papr) { @@ -1186,9 +1338,7 @@ static int icv_drop_prio(GICv3CPUState *cs) * 32 bits are actually relevant. */ int i; - int aprmax = 1 << (cs->vprebits - 5); - - assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); + int aprmax = ich_num_aprs(cs); for (i = 0; i < aprmax; i++) { uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i]; @@ -1262,8 +1412,7 @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), value); - if (irq >= GICV3_MAXIRQ) { - /* Also catches special interrupt numbers and LPIs */ + if (gicv3_intid_is_special(irq)) { return; } @@ -1318,7 +1467,8 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, gicv3_redist_affid(cs), value); - if (irq >= cs->gic->num_irq) { + if ((irq >= cs->gic->num_irq) && + !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] * to the GICC_EOIR, the GIC ignores that write. @@ -1473,7 +1623,7 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, return; } - minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR; + minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs); if (value < minval) { value = minval; } @@ -1896,7 +2046,7 @@ static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR; } - /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */ + /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */ mask = ICC_CTLR_EL3_EOIMODE_EL3; cs->icc_ctlr_el3 &= ~mask; @@ -2054,19 +2204,19 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | - (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | - (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); cs->icc_pmr_el1 = 0; - cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR; - cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR; - cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS; + cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs); + cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs); + cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs); memset(cs->icc_apr, 0, sizeof(cs->icc_apr)); memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen)); cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V | (1 << ICC_CTLR_EL3_IDBITS_SHIFT) | - (7 << ICC_CTLR_EL3_PRIBITS_SHIFT); + ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT); memset(cs->ich_apr, 0, sizeof(cs->ich_apr)); cs->ich_hcr_el2 = 0; @@ -2121,27 +2271,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { .readfn = icc_ap_read, .writefn = icc_ap_write, }, - { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_fiq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, - { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_fiq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, - { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_fiq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, /* All the ICC_AP1R*_EL1 registers are banked */ { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0, @@ -2150,27 +2279,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { .readfn = icc_ap_read, .writefn = icc_ap_write, }, - { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_irq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, - { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_irq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, - { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL1_RW, .accessfn = gicv3_irq_access, - .readfn = icc_ap_read, - .writefn = icc_ap_write, - }, { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_NO_RAW, @@ -2311,7 +2419,54 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { .readfn = icc_igrpen1_el3_read, .writefn = icc_igrpen1_el3_write, }, - REGINFO_SENTINEL +}; + +static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = { + { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, +}; + +static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = { + { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, }; static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2336,7 +2491,7 @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2462,11 +2617,15 @@ static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) uint64_t value; value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) - | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V + | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V | (1 << ICH_VTR_EL2_IDBITS_SHIFT) | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); + if (cs->gic->revision < 4) { + value |= ICH_VTR_EL2_NV4; + } + trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); return value; } @@ -2561,7 +2720,6 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { .readfn = ich_vmcr_read, .writefn = ich_vmcr_write, }, - REGINFO_SENTINEL }; static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { @@ -2579,7 +2737,6 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { .readfn = ich_ap_read, .writefn = ich_ap_write, }, - REGINFO_SENTINEL }; static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { @@ -2611,7 +2768,6 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { .readfn = ich_ap_read, .writefn = ich_ap_write, }, - REGINFO_SENTINEL }; static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) @@ -2619,6 +2775,12 @@ static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) GICv3CPUState *cs = opaque; gicv3_cpuif_update(cs); + /* + * Because vLPIs are only pending in NonSecure state, + * an EL change can change the VIRQ/VFIQ status (but + * cannot affect the maintenance interrupt state) + */ + gicv3_cpuif_virt_irq_fiq_update(cs); } void gicv3_init_cpuif(GICv3State *s) @@ -2632,6 +2794,16 @@ void gicv3_init_cpuif(GICv3State *s) ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); GICv3CPUState *cs = &s->cpu[i]; + /* + * If the CPU doesn't define a GICv3 configuration, probably because + * in real hardware it doesn't have one, then we use default values + * matching the one used by most Arm CPUs. This applies to: + * cpu->gic_num_lrs + * cpu->gic_vpribits + * cpu->gic_vprebits + * cpu->gic_pribits + */ + /* Note that we can't just use the GICv3CPUState as an opaque pointer * in define_arm_cp_regs_with_opaque(), because when we're called back * it might be with code translated by CPU 0 but run by CPU 1, in @@ -2640,13 +2812,56 @@ void gicv3_init_cpuif(GICv3State *s) * get back to the GICv3CPUState from the CPUARMState. */ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); - if (arm_feature(&cpu->env, ARM_FEATURE_EL2) - && cpu->gic_num_lrs) { + + /* + * The CPU implementation specifies the number of supported + * bits of physical priority. For backwards compatibility + * of migration, we have a compat property that forces use + * of 8 priority bits regardless of what the CPU really has. + */ + if (s->force_8bit_prio) { + cs->pribits = 8; + } else { + cs->pribits = cpu->gic_pribits ?: 5; + } + + /* + * The GICv3 has separate ID register fields for virtual priority + * and preemption bit values, but only a single ID register field + * for the physical priority bits. The preemption bit count is + * always the same as the priority bit count, except that 8 bits + * of priority means 7 preemption bits. We precalculate the + * preemption bits because it simplifies the code and makes the + * parallels between the virtual and physical bits of the GIC + * a bit clearer. + */ + cs->prebits = cs->pribits; + if (cs->prebits == 8) { + cs->prebits--; + } + /* + * Check that CPU code defining pribits didn't violate + * architectural constraints our implementation relies on. + */ + g_assert(cs->pribits >= 4 && cs->pribits <= 8); + + /* + * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions + * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them. + */ + if (cs->prebits >= 6) { + define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo); + } + if (cs->prebits == 7) { + define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo); + } + + if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) { int j; - cs->num_list_regs = cpu->gic_num_lrs; - cs->vpribits = cpu->gic_vpribits; - cs->vprebits = cpu->gic_vprebits; + cs->num_list_regs = cpu->gic_num_lrs ?: 4; + cs->vpribits = cpu->gic_vpribits ?: 5; + cs->vprebits = cpu->gic_vprebits ?: 5; /* Check against architectural constraints: getting these * wrong would be a bug in the CPU code defining these, @@ -2680,7 +2895,6 @@ void gicv3_init_cpuif(GICv3State *s) .readfn = ich_lr_read, .writefn = ich_lr_write, }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, lr_regset); } diff --git a/hw/intc/arm_gicv3_cpuif_common.c b/hw/intc/arm_gicv3_cpuif_common.c new file mode 100644 index 0000000000..ff1239f65d --- /dev/null +++ b/hw/intc/arm_gicv3_cpuif_common.c @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ARM Generic Interrupt Controller v3 + * + * Copyright (c) 2016 Linaro Limited + * Written by Peter Maydell + * + * This code is licensed under the GPL, version 2 or (at your option) + * any later version. + */ + +#include "qemu/osdep.h" +#include "gicv3_internal.h" +#include "cpu.h" + +void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + + env->gicv3state = (void *)s; +}; diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c index b65f56f903..eea0368118 100644 --- a/hw/intc/arm_gicv3_dist.c +++ b/hw/intc/arm_gicv3_dist.c @@ -262,8 +262,21 @@ static void gicd_write_irouter(GICv3State *s, MemTxAttrs attrs, int irq, gicv3_update(s, irq, 1); } -static MemTxResult gicd_readb(GICv3State *s, hwaddr offset, - uint64_t *data, MemTxAttrs attrs) +/** + * gicd_readb + * gicd_readw + * gicd_readl + * gicd_readq + * gicd_writeb + * gicd_writew + * gicd_writel + * gicd_writeq + * + * Return %true if the operation succeeded, %false otherwise. + */ + +static bool gicd_readb(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) { /* Most GICv3 distributor registers do not support byte accesses. */ switch (offset) { @@ -273,17 +286,17 @@ static MemTxResult gicd_readb(GICv3State *s, hwaddr offset, /* This GIC implementation always has affinity routing enabled, * so these registers are all RAZ/WI. */ - return MEMTX_OK; + return true; case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: *data = gicd_read_ipriorityr(s, attrs, offset - GICD_IPRIORITYR); - return MEMTX_OK; + return true; default: - return MEMTX_ERROR; + return false; } } -static MemTxResult gicd_writeb(GICv3State *s, hwaddr offset, - uint64_t value, MemTxAttrs attrs) +static bool gicd_writeb(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) { /* Most GICv3 distributor registers do not support byte accesses. */ switch (offset) { @@ -293,25 +306,25 @@ static MemTxResult gicd_writeb(GICv3State *s, hwaddr offset, /* This GIC implementation always has affinity routing enabled, * so these registers are all RAZ/WI. */ - return MEMTX_OK; + return true; case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: { int irq = offset - GICD_IPRIORITYR; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } gicd_write_ipriorityr(s, attrs, irq, value); gicv3_update(s, irq, 1); - return MEMTX_OK; + return true; } default: - return MEMTX_ERROR; + return false; } } -static MemTxResult gicd_readw(GICv3State *s, hwaddr offset, - uint64_t *data, MemTxAttrs attrs) +static bool gicd_readw(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) { /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR * support 16 bit accesses, and those registers are all part of the @@ -319,11 +332,11 @@ static MemTxResult gicd_readw(GICv3State *s, hwaddr offset, * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are * reserved. */ - return MEMTX_ERROR; + return false; } -static MemTxResult gicd_writew(GICv3State *s, hwaddr offset, - uint64_t value, MemTxAttrs attrs) +static bool gicd_writew(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) { /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR * support 16 bit accesses, and those registers are all part of the @@ -331,11 +344,11 @@ static MemTxResult gicd_writew(GICv3State *s, hwaddr offset, * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are * reserved. */ - return MEMTX_ERROR; + return false; } -static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, - uint64_t *data, MemTxAttrs attrs) +static bool gicd_readl(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) { /* Almost all GICv3 distributor registers are 32-bit. * Note that WO registers must return an UNKNOWN value on reads, @@ -363,15 +376,17 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, } else { *data = s->gicd_ctlr; } - return MEMTX_OK; + return true; case GICD_TYPER: { /* For this implementation: * No1N == 1 (1-of-N SPI interrupts not supported) * A3V == 1 (non-zero values of Affinity level 3 supported) * IDbits == 0xf (we support 16-bit interrupt identifiers) - * DVIS == 0 (Direct virtual LPI injection not supported) - * LPIS == 0 (LPIs not supported) + * DVIS == 1 (Direct virtual LPI injection supported) if GICv4 + * LPIS == 1 (LPIs are supported if affinity routing is enabled) + * num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated + * by GICD_TYPER.IDbits) * MBIS == 0 (message-based SPIs not supported) * SecurityExtn == 1 if security extns supported * CPUNumber == 0 since for us ARE is always 1 @@ -384,64 +399,66 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, * so we only need to check the DS bit. */ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS); + bool dvis = s->revision >= 4; - *data = (1 << 25) | (1 << 24) | (sec_extn << 10) | + *data = (1 << 25) | (1 << 24) | (dvis << 18) | (sec_extn << 10) | + (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) | (0xf << 19) | itlinesnumber; - return MEMTX_OK; + return true; } case GICD_IIDR: /* We claim to be an ARM r0p0 with a zero ProductID. * This is the same as an r0p0 GIC-500. */ *data = gicv3_iidr(); - return MEMTX_OK; + return true; case GICD_STATUSR: /* RAZ/WI for us (this is an optional register and our implementation * does not track RO/WO/reserved violations to report them to the guest) */ *data = 0; - return MEMTX_OK; + return true; case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: { int irq; if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { *data = 0; - return MEMTX_OK; + return true; } /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ irq = (offset - GICD_IGROUPR) * 8; if (irq < GIC_INTERNAL || irq >= s->num_irq) { *data = 0; - return MEMTX_OK; + return true; } *data = *gic_bmp_ptr32(s->group, irq); - return MEMTX_OK; + return true; } case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, offset - GICD_ISENABLER); - return MEMTX_OK; + return true; case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, offset - GICD_ICENABLER); - return MEMTX_OK; + return true; case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, offset - GICD_ISPENDR); - return MEMTX_OK; + return true; case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, offset - GICD_ICPENDR); - return MEMTX_OK; + return true; case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, offset - GICD_ISACTIVER); - return MEMTX_OK; + return true; case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, offset - GICD_ICACTIVER); - return MEMTX_OK; + return true; case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: { int i, irq = offset - GICD_IPRIORITYR; @@ -452,12 +469,12 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, value |= gicd_read_ipriorityr(s, attrs, i); } *data = value; - return MEMTX_OK; + return true; } case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: /* RAZ/WI since affinity routing is always enabled */ *data = 0; - return MEMTX_OK; + return true; case GICD_ICFGR ... GICD_ICFGR + 0xff: { /* Here only the even bits are used; odd bits are RES0 */ @@ -466,7 +483,7 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, if (irq < GIC_INTERNAL || irq >= s->num_irq) { *data = 0; - return MEMTX_OK; + return true; } /* Since our edge_trigger bitmap is one bit per irq, we only need @@ -478,7 +495,7 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, value = extract32(value, (irq & 0x1f) ? 16 : 0, 16); value = half_shuffle32(value) << 1; *data = value; - return MEMTX_OK; + return true; } case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: { @@ -489,16 +506,16 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, * security enabled and this is an NS access */ *data = 0; - return MEMTX_OK; + return true; } /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ irq = (offset - GICD_IGRPMODR) * 8; if (irq < GIC_INTERNAL || irq >= s->num_irq) { *data = 0; - return MEMTX_OK; + return true; } *data = *gic_bmp_ptr32(s->grpmod, irq); - return MEMTX_OK; + return true; } case GICD_NSACR ... GICD_NSACR + 0xff: { @@ -507,7 +524,7 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, if (irq < GIC_INTERNAL || irq >= s->num_irq) { *data = 0; - return MEMTX_OK; + return true; } if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { @@ -515,17 +532,17 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, * security enabled and this is an NS access */ *data = 0; - return MEMTX_OK; + return true; } *data = s->gicd_nsacr[irq / 16]; - return MEMTX_OK; + return true; } case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: /* RAZ/WI since affinity routing is always enabled */ *data = 0; - return MEMTX_OK; + return true; case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: { uint64_t r; @@ -537,26 +554,26 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, } else { *data = (uint32_t)r; } - return MEMTX_OK; + return true; } case GICD_IDREGS ... GICD_IDREGS + 0x2f: /* ID registers */ - *data = gicv3_idreg(offset - GICD_IDREGS); - return MEMTX_OK; + *data = gicv3_idreg(s, offset - GICD_IDREGS, GICV3_PIDR0_DIST); + return true; case GICD_SGIR: /* WO registers, return unknown value */ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest read from WO register at offset " TARGET_FMT_plx "\n", __func__, offset); *data = 0; - return MEMTX_OK; + return true; default: - return MEMTX_ERROR; + return false; } } -static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, - uint64_t value, MemTxAttrs attrs) +static bool gicd_writel(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) { /* Almost all GICv3 distributor registers are 32-bit. Note that * RO registers must ignore writes, not abort. @@ -594,74 +611,74 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, if (value & mask & GICD_CTLR_DS) { /* We just set DS, so the ARE_NS and EnG1S bits are now RES0. * Note that this is a one-way transition because if DS is set - * then it's not writeable, so it can only go back to 0 with a + * then it's not writable, so it can only go back to 0 with a * hardware reset. */ s->gicd_ctlr &= ~(GICD_CTLR_EN_GRP1S | GICD_CTLR_ARE_NS); } gicv3_full_update(s); - return MEMTX_OK; + return true; } case GICD_STATUSR: /* RAZ/WI for our implementation */ - return MEMTX_OK; + return true; case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: { int irq; if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { - return MEMTX_OK; + return true; } /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ irq = (offset - GICD_IGROUPR) * 8; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } *gic_bmp_ptr32(s->group, irq) = value; gicv3_update(s, irq, 32); - return MEMTX_OK; + return true; } case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: gicd_write_set_bitmap_reg(s, attrs, s->enabled, NULL, offset - GICD_ISENABLER, value); - return MEMTX_OK; + return true; case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: gicd_write_clear_bitmap_reg(s, attrs, s->enabled, NULL, offset - GICD_ICENABLER, value); - return MEMTX_OK; + return true; case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: gicd_write_set_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, offset - GICD_ISPENDR, value); - return MEMTX_OK; + return true; case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: gicd_write_clear_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, offset - GICD_ICPENDR, value); - return MEMTX_OK; + return true; case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: gicd_write_set_bitmap_reg(s, attrs, s->active, NULL, offset - GICD_ISACTIVER, value); - return MEMTX_OK; + return true; case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: gicd_write_clear_bitmap_reg(s, attrs, s->active, NULL, offset - GICD_ICACTIVER, value); - return MEMTX_OK; + return true; case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: { int i, irq = offset - GICD_IPRIORITYR; if (irq < GIC_INTERNAL || irq + 3 >= s->num_irq) { - return MEMTX_OK; + return true; } for (i = irq; i < irq + 4; i++, value >>= 8) { gicd_write_ipriorityr(s, attrs, i, value); } gicv3_update(s, irq, 4); - return MEMTX_OK; + return true; } case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: /* RAZ/WI since affinity routing is always enabled */ - return MEMTX_OK; + return true; case GICD_ICFGR ... GICD_ICFGR + 0xff: { /* Here only the odd bits are used; even bits are RES0 */ @@ -669,7 +686,7 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, uint32_t mask, oldval; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } /* Since our edge_trigger bitmap is one bit per irq, our input @@ -687,7 +704,7 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, oldval = *gic_bmp_ptr32(s->edge_trigger, (irq & ~0x1f)); value = (oldval & ~mask) | (value & mask); *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f) = value; - return MEMTX_OK; + return true; } case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: { @@ -697,16 +714,16 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, /* RAZ/WI if security disabled, or if * security enabled and this is an NS access */ - return MEMTX_OK; + return true; } /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ irq = (offset - GICD_IGRPMODR) * 8; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } *gic_bmp_ptr32(s->grpmod, irq) = value; gicv3_update(s, irq, 32); - return MEMTX_OK; + return true; } case GICD_NSACR ... GICD_NSACR + 0xff: { @@ -714,41 +731,41 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, int irq = (offset - GICD_NSACR) * 4; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { /* RAZ/WI if security disabled, or if * security enabled and this is an NS access */ - return MEMTX_OK; + return true; } s->gicd_nsacr[irq / 16] = value; /* No update required as this only affects access permission checks */ - return MEMTX_OK; + return true; } case GICD_SGIR: /* RES0 if affinity routing is enabled */ - return MEMTX_OK; + return true; case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: /* RAZ/WI since affinity routing is always enabled */ - return MEMTX_OK; + return true; case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: { uint64_t r; int irq = (offset - GICD_IROUTER) / 8; if (irq < GIC_INTERNAL || irq >= s->num_irq) { - return MEMTX_OK; + return true; } /* Write half of the 64-bit register */ r = gicd_read_irouter(s, attrs, irq); r = deposit64(r, (offset & 7) ? 32 : 0, 32, value); gicd_write_irouter(s, attrs, irq, r); - return MEMTX_OK; + return true; } case GICD_IDREGS ... GICD_IDREGS + 0x2f: case GICD_TYPER: @@ -757,14 +774,14 @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest write to RO register at offset " TARGET_FMT_plx "\n", __func__, offset); - return MEMTX_OK; + return true; default: - return MEMTX_ERROR; + return false; } } -static MemTxResult gicd_writell(GICv3State *s, hwaddr offset, - uint64_t value, MemTxAttrs attrs) +static bool gicd_writeq(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) { /* Our only 64-bit registers are GICD_IROUTER */ int irq; @@ -773,14 +790,14 @@ static MemTxResult gicd_writell(GICv3State *s, hwaddr offset, case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: irq = (offset - GICD_IROUTER) / 8; gicd_write_irouter(s, attrs, irq, value); - return MEMTX_OK; + return true; default: - return MEMTX_ERROR; + return false; } } -static MemTxResult gicd_readll(GICv3State *s, hwaddr offset, - uint64_t *data, MemTxAttrs attrs) +static bool gicd_readq(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) { /* Our only 64-bit registers are GICD_IROUTER */ int irq; @@ -789,9 +806,9 @@ static MemTxResult gicd_readll(GICv3State *s, hwaddr offset, case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: irq = (offset - GICD_IROUTER) / 8; *data = gicd_read_irouter(s, attrs, irq); - return MEMTX_OK; + return true; default: - return MEMTX_ERROR; + return false; } } @@ -799,7 +816,7 @@ MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, unsigned size, MemTxAttrs attrs) { GICv3State *s = (GICv3State *)opaque; - MemTxResult r; + bool r; switch (size) { case 1: @@ -812,36 +829,35 @@ MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, r = gicd_readl(s, offset, data, attrs); break; case 8: - r = gicd_readll(s, offset, data, attrs); + r = gicd_readq(s, offset, data, attrs); break; default: - r = MEMTX_ERROR; + r = false; break; } - if (r == MEMTX_ERROR) { + if (!r) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest read at offset " TARGET_FMT_plx - "size %u\n", __func__, offset, size); + " size %u\n", __func__, offset, size); trace_gicv3_dist_badread(offset, size, attrs.secure); /* The spec requires that reserved registers are RAZ/WI; * so use MEMTX_ERROR returns from leaf functions as a way to * trigger the guest-error logging but don't return it to * the caller, or we'll cause a spurious guest data abort. */ - r = MEMTX_OK; *data = 0; } else { trace_gicv3_dist_read(offset, *data, size, attrs.secure); } - return r; + return MEMTX_OK; } MemTxResult gicv3_dist_write(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs) { GICv3State *s = (GICv3State *)opaque; - MemTxResult r; + bool r; switch (size) { case 1: @@ -854,28 +870,27 @@ MemTxResult gicv3_dist_write(void *opaque, hwaddr offset, uint64_t data, r = gicd_writel(s, offset, data, attrs); break; case 8: - r = gicd_writell(s, offset, data, attrs); + r = gicd_writeq(s, offset, data, attrs); break; default: - r = MEMTX_ERROR; + r = false; break; } - if (r == MEMTX_ERROR) { + if (!r) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest write at offset " TARGET_FMT_plx - "size %u\n", __func__, offset, size); + " size %u\n", __func__, offset, size); trace_gicv3_dist_badwrite(offset, data, size, attrs.secure); /* The spec requires that reserved registers are RAZ/WI; * so use MEMTX_ERROR returns from leaf functions as a way to * trigger the guest-error logging but don't return it to * the caller, or we'll cause a spurious guest data abort. */ - r = MEMTX_OK; } else { trace_gicv3_dist_write(offset, data, size, attrs.secure); } - return r; + return MEMTX_OK; } void gicv3_dist_set_irq(GICv3State *s, int irq, int level) diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c new file mode 100644 index 0000000000..2ff21ed6bb --- /dev/null +++ b/hw/intc/arm_gicv3_its.c @@ -0,0 +1,2037 @@ +/* + * ITS emulation for a GICv3-based system + * + * Copyright Linaro.org 2021 + * + * Authors: + * Shashi Mallela + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "hw/intc/arm_gicv3_its_common.h" +#include "gicv3_internal.h" +#include "qom/object.h" +#include "qapi/error.h" + +typedef struct GICv3ITSClass GICv3ITSClass; +/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ +DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, + ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) + +struct GICv3ITSClass { + GICv3ITSCommonClass parent_class; + void (*parent_reset)(DeviceState *dev); +}; + +/* + * This is an internal enum used to distinguish between LPI triggered + * via command queue and LPI triggered via gits_translater write. + */ +typedef enum ItsCmdType { + NONE = 0, /* internal indication for GITS_TRANSLATER write */ + CLEAR = 1, + DISCARD = 2, + INTERRUPT = 3, +} ItsCmdType; + +typedef struct DTEntry { + bool valid; + unsigned size; + uint64_t ittaddr; +} DTEntry; + +typedef struct CTEntry { + bool valid; + uint32_t rdbase; +} CTEntry; + +typedef struct ITEntry { + bool valid; + int inttype; + uint32_t intid; + uint32_t doorbell; + uint32_t icid; + uint32_t vpeid; +} ITEntry; + +typedef struct VTEntry { + bool valid; + unsigned vptsize; + uint32_t rdbase; + uint64_t vptaddr; +} VTEntry; + +/* + * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options + * if a command parameter is not correct. These include both "stall + * processing of the command queue" and "ignore this command, and + * keep processing the queue". In our implementation we choose that + * memory transaction errors reading the command packet provoke a + * stall, but errors in parameters cause us to ignore the command + * and continue processing. + * The process_* functions which handle individual ITS commands all + * return an ItsCmdResult which tells process_cmdq() whether it should + * stall, keep going because of an error, or keep going because the + * command was a success. + */ +typedef enum ItsCmdResult { + CMD_STALL = 0, + CMD_CONTINUE = 1, + CMD_CONTINUE_OK = 2, +} ItsCmdResult; + +/* True if the ITS supports the GICv4 virtual LPI feature */ +static bool its_feature_virtual(GICv3ITSState *s) +{ + return s->typer & R_GITS_TYPER_VIRTUAL_MASK; +} + +static inline bool intid_in_lpi_range(uint32_t id) +{ + return id >= GICV3_LPI_INTID_START && + id < (1 << (GICD_TYPER_IDBITS + 1)); +} + +static inline bool valid_doorbell(uint32_t id) +{ + /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */ + return id == INTID_SPURIOUS || intid_in_lpi_range(id); +} + +static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) +{ + uint64_t result = 0; + + switch (page_sz) { + case GITS_PAGE_SIZE_4K: + case GITS_PAGE_SIZE_16K: + result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; + break; + + case GITS_PAGE_SIZE_64K: + result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; + result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; + break; + + default: + break; + } + return result; +} + +static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td, + uint32_t idx, MemTxResult *res) +{ + /* + * Given a TableDesc describing one of the ITS in-guest-memory + * tables and an index into it, return the guest address + * corresponding to that table entry. + * If there was a memory error reading the L1 table of an + * indirect table, *res is set accordingly, and we return -1. + * If the L1 table entry is marked not valid, we return -1 with + * *res set to MEMTX_OK. + * + * The specification defines the format of level 1 entries of a + * 2-level table, but the format of level 2 entries and the format + * of flat-mapped tables is IMPDEF. + */ + AddressSpace *as = &s->gicv3->dma_as; + uint32_t l2idx; + uint64_t l2; + uint32_t num_l2_entries; + + *res = MEMTX_OK; + + if (!td->indirect) { + /* Single level table */ + return td->base_addr + idx * td->entry_sz; + } + + /* Two level table */ + l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE); + + l2 = address_space_ldq_le(as, + td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + if (*res != MEMTX_OK) { + return -1; + } + if (!(l2 & L2_TABLE_VALID_MASK)) { + return -1; + } + + num_l2_entries = td->page_sz / td->entry_sz; + return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz; +} + +/* + * Read the Collection Table entry at index @icid. On success (including + * successfully determining that there is no valid CTE for this index), + * we return MEMTX_OK and populate the CTEntry struct @cte accordingly. + * If there is an error reading memory then we return the error code. + */ +static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte) +{ + AddressSpace *as = &s->gicv3->dma_as; + MemTxResult res = MEMTX_OK; + uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res); + uint64_t cteval; + + if (entry_addr == -1) { + /* No L2 table entry, i.e. no valid CTE, or a memory error */ + cte->valid = false; + goto out; + } + + cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + goto out; + } + cte->valid = FIELD_EX64(cteval, CTE, VALID); + cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE); +out: + if (res != MEMTX_OK) { + trace_gicv3_its_cte_read_fault(icid); + } else { + trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase); + } + return res; +} + +/* + * Update the Interrupt Table entry at index @evinted in the table specified + * by the dte @dte. Returns true on success, false if there was a memory + * access error. + */ +static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte, + const ITEntry *ite) +{ + AddressSpace *as = &s->gicv3->dma_as; + MemTxResult res = MEMTX_OK; + hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; + uint64_t itel = 0; + uint32_t iteh = 0; + + trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid, + ite->inttype, ite->intid, ite->icid, + ite->vpeid, ite->doorbell); + + if (ite->valid) { + itel = FIELD_DP64(itel, ITE_L, VALID, 1); + itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype); + itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid); + itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid); + itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid); + iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell); + } + + address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + return false; + } + address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res); + return res == MEMTX_OK; +} + +/* + * Read the Interrupt Table entry at index @eventid from the table specified + * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry + * struct @ite accordingly. If there is an error reading memory then we return + * the error code. + */ +static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid, + const DTEntry *dte, ITEntry *ite) +{ + AddressSpace *as = &s->gicv3->dma_as; + MemTxResult res = MEMTX_OK; + uint64_t itel; + uint32_t iteh; + hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; + + itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); + return res; + } + + iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); + return res; + } + + ite->valid = FIELD_EX64(itel, ITE_L, VALID); + ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE); + ite->intid = FIELD_EX64(itel, ITE_L, INTID); + ite->icid = FIELD_EX64(itel, ITE_L, ICID); + ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID); + ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL); + trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid, + ite->inttype, ite->intid, ite->icid, + ite->vpeid, ite->doorbell); + return MEMTX_OK; +} + +/* + * Read the Device Table entry at index @devid. On success (including + * successfully determining that there is no valid DTE for this index), + * we return MEMTX_OK and populate the DTEntry struct accordingly. + * If there is an error reading memory then we return the error code. + */ +static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte) +{ + MemTxResult res = MEMTX_OK; + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res); + uint64_t dteval; + + if (entry_addr == -1) { + /* No L2 table entry, i.e. no valid DTE, or a memory error */ + dte->valid = false; + goto out; + } + dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + goto out; + } + dte->valid = FIELD_EX64(dteval, DTE, VALID); + dte->size = FIELD_EX64(dteval, DTE, SIZE); + /* DTE word field stores bits [51:8] of the ITT address */ + dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT; +out: + if (res != MEMTX_OK) { + trace_gicv3_its_dte_read_fault(devid); + } else { + trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr); + } + return res; +} + +/* + * Read the vPE Table entry at index @vpeid. On success (including + * successfully determining that there is no valid entry for this index), + * we return MEMTX_OK and populate the VTEntry struct accordingly. + * If there is an error reading memory then we return the error code. + */ +static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte) +{ + MemTxResult res = MEMTX_OK; + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res); + uint64_t vteval; + + if (entry_addr == -1) { + /* No L2 table entry, i.e. no valid VTE, or a memory error */ + vte->valid = false; + goto out; + } + vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + goto out; + } + vte->valid = FIELD_EX64(vteval, VTE, VALID); + vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE); + vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR); + vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE); +out: + if (res != MEMTX_OK) { + trace_gicv3_its_vte_read_fault(vpeid); + } else { + trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize, + vte->vptaddr, vte->rdbase); + } + return res; +} + +/* + * Given a (DeviceID, EventID), look up the corresponding ITE, including + * checking for the various invalid-value cases. If we find a valid ITE, + * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return + * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite + * should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who, + uint32_t devid, uint32_t eventid, ITEntry *ite, + DTEntry *dte) +{ + uint64_t num_eventids; + + if (devid >= s->dt.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: devid %d>=%d", + who, devid, s->dt.num_entries); + return CMD_CONTINUE; + } + + if (get_dte(s, devid, dte) != MEMTX_OK) { + return CMD_STALL; + } + if (!dte->valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: " + "invalid dte for %d\n", who, devid); + return CMD_CONTINUE; + } + + num_eventids = 1ULL << (dte->size + 1); + if (eventid >= num_eventids) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: eventid %d >= %" + PRId64 "\n", who, eventid, num_eventids); + return CMD_CONTINUE; + } + + if (get_ite(s, eventid, dte, ite) != MEMTX_OK) { + return CMD_STALL; + } + + if (!ite->valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: invalid ITE\n", who); + return CMD_CONTINUE; + } + + return CMD_CONTINUE_OK; +} + +/* + * Given an ICID, look up the corresponding CTE, including checking for various + * invalid-value cases. If we find a valid CTE, fill in @cte and return + * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the + * contents of @cte should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who, + uint32_t icid, CTEntry *cte) +{ + if (icid >= s->ct.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid); + return CMD_CONTINUE; + } + if (get_cte(s, icid, cte) != MEMTX_OK) { + return CMD_STALL; + } + if (!cte->valid) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who); + return CMD_CONTINUE; + } + if (cte->rdbase >= s->gicv3->num_cpu) { + return CMD_CONTINUE; + } + return CMD_CONTINUE_OK; +} + +/* + * Given a VPEID, look up the corresponding VTE, including checking + * for various invalid-value cases. if we find a valid VTE, fill in @vte + * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE + * (and the contents of @vte should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who, + uint32_t vpeid, VTEntry *vte) +{ + if (vpeid >= s->vpet.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid VPEID 0x%x\n", who, vpeid); + return CMD_CONTINUE; + } + + if (get_vte(s, vpeid, vte) != MEMTX_OK) { + return CMD_STALL; + } + if (!vte->valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid VTE for VPEID 0x%x\n", who, vpeid); + return CMD_CONTINUE; + } + + if (vte->rdbase >= s->gicv3->num_cpu) { + return CMD_CONTINUE; + } + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite, + int irqlevel) +{ + CTEntry cte; + ItsCmdResult cmdres; + + cmdres = lookup_cte(s, __func__, ite->icid, &cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel); + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite, + int irqlevel) +{ + VTEntry vte; + ItsCmdResult cmdres; + + cmdres = lookup_vte(s, __func__, ite->vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (!intid_in_lpi_range(ite->intid) || + ite->intid >= (1ULL << (vte.vptsize + 1))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n", + __func__, ite->intid); + return CMD_CONTINUE; + } + + /* + * For QEMU the actual pending of the vLPI is handled in the + * redistributor code + */ + gicv3_redist_process_vlpi(&s->gicv3->cpu[vte.rdbase], ite->intid, + vte.vptaddr << 16, ite->doorbell, irqlevel); + return CMD_CONTINUE_OK; +} + +/* + * This function handles the processing of following commands based on + * the ItsCmdType parameter passed:- + * 1. triggering of lpi interrupt translation via ITS INT command + * 2. triggering of lpi interrupt translation via gits_translater register + * 3. handling of ITS CLEAR command + * 4. handling of ITS DISCARD command + */ +static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, + uint32_t eventid, ItsCmdType cmd) +{ + DTEntry dte; + ITEntry ite; + ItsCmdResult cmdres; + int irqlevel; + + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1; + + switch (ite.inttype) { + case ITE_INTTYPE_PHYSICAL: + cmdres = process_its_cmd_phys(s, &ite, irqlevel); + break; + case ITE_INTTYPE_VIRTUAL: + if (!its_feature_virtual(s)) { + /* Can't happen unless guest is illegally writing to table memory */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid type %d in ITE (table corrupted?)\n", + __func__, ite.inttype); + return CMD_CONTINUE; + } + cmdres = process_its_cmd_virt(s, &ite, irqlevel); + break; + default: + g_assert_not_reached(); + } + + if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) { + ITEntry ite = {}; + /* remove mapping from interrupt translation table */ + ite.valid = false; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; + } + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt, + ItsCmdType cmd) +{ + uint32_t devid, eventid; + + devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; + eventid = cmdpkt[1] & EVENTID_MASK; + switch (cmd) { + case INTERRUPT: + trace_gicv3_its_cmd_int(devid, eventid); + break; + case CLEAR: + trace_gicv3_its_cmd_clear(devid, eventid); + break; + case DISCARD: + trace_gicv3_its_cmd_discard(devid, eventid); + break; + default: + g_assert_not_reached(); + } + return do_process_its_cmd(s, devid, eventid, cmd); +} + +static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, + bool ignore_pInt) +{ + uint32_t devid, eventid; + uint32_t pIntid = 0; + uint64_t num_eventids; + uint16_t icid = 0; + DTEntry dte; + ITEntry ite; + + devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; + eventid = cmdpkt[1] & EVENTID_MASK; + icid = cmdpkt[2] & ICID_MASK; + + if (ignore_pInt) { + pIntid = eventid; + trace_gicv3_its_cmd_mapi(devid, eventid, icid); + } else { + pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT; + trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid); + } + + if (devid >= s->dt.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: devid %d>=%d", + __func__, devid, s->dt.num_entries); + return CMD_CONTINUE; + } + + if (get_dte(s, devid, &dte) != MEMTX_OK) { + return CMD_STALL; + } + num_eventids = 1ULL << (dte.size + 1); + + if (icid >= s->ct.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid ICID 0x%x >= 0x%x\n", + __func__, icid, s->ct.num_entries); + return CMD_CONTINUE; + } + + if (!dte.valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: no valid DTE for devid 0x%x\n", __func__, devid); + return CMD_CONTINUE; + } + + if (eventid >= num_eventids) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n", + __func__, eventid, num_eventids); + return CMD_CONTINUE; + } + + if (!intid_in_lpi_range(pIntid)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid interrupt ID 0x%x\n", __func__, pIntid); + return CMD_CONTINUE; + } + + /* add ite entry to interrupt translation table */ + ite.valid = true; + ite.inttype = ITE_INTTYPE_PHYSICAL; + ite.intid = pIntid; + ite.icid = icid; + ite.doorbell = INTID_SPURIOUS; + ite.vpeid = 0; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt, + bool ignore_vintid) +{ + uint32_t devid, eventid, vintid, doorbell, vpeid; + uint32_t num_eventids; + DTEntry dte; + ITEntry ite; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID); + vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID); + doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL); + if (ignore_vintid) { + vintid = eventid; + trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell); + } else { + vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID); + trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell); + } + + if (devid >= s->dt.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n", + __func__, devid, s->dt.num_entries); + return CMD_CONTINUE; + } + + if (get_dte(s, devid, &dte) != MEMTX_OK) { + return CMD_STALL; + } + + if (!dte.valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: no entry in device table for DeviceID 0x%x\n", + __func__, devid); + return CMD_CONTINUE; + } + + num_eventids = 1ULL << (dte.size + 1); + + if (eventid >= num_eventids) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: EventID 0x%x too large for DeviceID 0x%x " + "(must be less than 0x%x)\n", + __func__, eventid, devid, num_eventids); + return CMD_CONTINUE; + } + if (!intid_in_lpi_range(vintid)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VIntID 0x%x not a valid LPI\n", + __func__, vintid); + return CMD_CONTINUE; + } + if (!valid_doorbell(doorbell)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Doorbell %d not 1023 and not a valid LPI\n", + __func__, doorbell); + return CMD_CONTINUE; + } + if (vpeid >= s->vpet.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", + __func__, vpeid, s->vpet.num_entries); + return CMD_CONTINUE; + } + /* add ite entry to interrupt translation table */ + ite.valid = true; + ite.inttype = ITE_INTTYPE_VIRTUAL; + ite.intid = vintid; + ite.icid = 0; + ite.doorbell = doorbell; + ite.vpeid = vpeid; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +/* + * Update the Collection Table entry for @icid to @cte. Returns true + * on success, false if there was a memory access error. + */ +static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr; + uint64_t cteval = 0; + MemTxResult res = MEMTX_OK; + + trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase); + + if (cte->valid) { + /* add mapping entry to collection table */ + cteval = FIELD_DP64(cteval, CTE, VALID, 1); + cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase); + } + + entry_addr = table_entry_addr(s, &s->ct, icid, &res); + if (res != MEMTX_OK) { + /* memory access error: stall */ + return false; + } + if (entry_addr == -1) { + /* No L2 table for this index: discard write and continue */ + return true; + } + + address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res); + return res == MEMTX_OK; +} + +static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint16_t icid; + CTEntry cte; + + icid = cmdpkt[2] & ICID_MASK; + cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; + if (cte.valid) { + cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; + cte.rdbase &= RDBASE_PROCNUM_MASK; + } else { + cte.rdbase = 0; + } + trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid); + + if (icid >= s->ct.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid); + return CMD_CONTINUE; + } + if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS MAPC: invalid RDBASE %u\n", cte.rdbase); + return CMD_CONTINUE; + } + + return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL; +} + +/* + * Update the Device Table entry for @devid to @dte. Returns true + * on success, false if there was a memory access error. + */ +static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr; + uint64_t dteval = 0; + MemTxResult res = MEMTX_OK; + + trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr); + + if (dte->valid) { + /* add mapping entry to device table */ + dteval = FIELD_DP64(dteval, DTE, VALID, 1); + dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size); + dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr); + } + + entry_addr = table_entry_addr(s, &s->dt, devid, &res); + if (res != MEMTX_OK) { + /* memory access error: stall */ + return false; + } + if (entry_addr == -1) { + /* No L2 table for this index: discard write and continue */ + return true; + } + address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res); + return res == MEMTX_OK; +} + +static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid; + DTEntry dte; + + devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; + dte.size = cmdpkt[1] & SIZE_MASK; + dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT; + dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; + + trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid); + + if (devid >= s->dt.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n", + devid, s->dt.num_entries); + return CMD_CONTINUE; + } + + if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS MAPD: invalid size %d\n", dte.size); + return CMD_CONTINUE; + } + + return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL; +} + +static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint64_t rd1, rd2; + + rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1); + rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2); + + trace_gicv3_its_cmd_movall(rd1, rd2); + + if (rd1 >= s->gicv3->num_cpu) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: RDBASE1 %" PRId64 + " out of range (must be less than %d)\n", + __func__, rd1, s->gicv3->num_cpu); + return CMD_CONTINUE; + } + if (rd2 >= s->gicv3->num_cpu) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: RDBASE2 %" PRId64 + " out of range (must be less than %d)\n", + __func__, rd2, s->gicv3->num_cpu); + return CMD_CONTINUE; + } + + if (rd1 == rd2) { + /* Move to same target must succeed as a no-op */ + return CMD_CONTINUE_OK; + } + + /* Move all pending LPIs from redistributor 1 to redistributor 2 */ + gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]); + + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid, eventid; + uint16_t new_icid; + DTEntry dte; + CTEntry old_cte, new_cte; + ITEntry old_ite; + ItsCmdResult cmdres; + + devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID); + new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID); + + trace_gicv3_its_cmd_movi(devid, eventid, new_icid); + + cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: invalid ITE\n", + __func__); + return CMD_CONTINUE; + } + + cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + cmdres = lookup_cte(s, __func__, new_icid, &new_cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (old_cte.rdbase != new_cte.rdbase) { + /* Move the LPI from the old redistributor to the new one */ + gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase], + &s->gicv3->cpu[new_cte.rdbase], + old_ite.intid); + } + + /* Update the ICID field in the interrupt translation table entry */ + old_ite.icid = new_icid; + return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +/* + * Update the vPE Table entry at index @vpeid with the entry @vte. + * Returns true on success, false if there was a memory access error. + */ +static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr; + uint64_t vteval = 0; + MemTxResult res = MEMTX_OK; + + trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr, + vte->rdbase); + + if (vte->valid) { + vteval = FIELD_DP64(vteval, VTE, VALID, 1); + vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize); + vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr); + vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase); + } + + entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res); + if (res != MEMTX_OK) { + return false; + } + if (entry_addr == -1) { + /* No L2 table for this index: discard write and continue */ + return true; + } + address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res); + return res == MEMTX_OK; +} + +static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VTEntry vte; + uint32_t vpeid; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID); + vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE); + vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V); + vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE); + vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR); + + trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid, + vte.vptaddr, vte.vptsize); + + /* + * For GICv4.0 the VPT_size field is only 5 bits, whereas we + * define our field macros to include the full GICv4.1 8 bits. + * The range check on VPT_size will catch the cases where + * the guest set the RES0-in-GICv4.0 bits [7:6]. + */ + if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize); + return CMD_CONTINUE; + } + + if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase); + return CMD_CONTINUE; + } + + if (vpeid >= s->vpet.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", + __func__, vpeid, s->vpet.num_entries); + return CMD_CONTINUE; + } + + return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL; +} + +typedef struct VmovpCallbackData { + uint64_t rdbase; + uint32_t vpeid; + /* + * Overall command result. If more than one callback finds an + * error, STALL beats CONTINUE. + */ + ItsCmdResult result; +} VmovpCallbackData; + +static void vmovp_callback(gpointer data, gpointer opaque) +{ + /* + * This function is called to update the VPEID field in a VPE + * table entry for this ITS. This might be because of a VMOVP + * command executed on any ITS that is connected to the same GIC + * as this ITS. We need to read the VPE table entry for the VPEID + * and update its RDBASE field. + */ + GICv3ITSState *s = data; + VmovpCallbackData *cbdata = opaque; + VTEntry vte; + ItsCmdResult cmdres; + + cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte); + switch (cmdres) { + case CMD_STALL: + cbdata->result = CMD_STALL; + return; + case CMD_CONTINUE: + if (cbdata->result != CMD_STALL) { + cbdata->result = CMD_CONTINUE; + } + return; + case CMD_CONTINUE_OK: + break; + } + + vte.rdbase = cbdata->rdbase; + if (!update_vte(s, cbdata->vpeid, &vte)) { + cbdata->result = CMD_STALL; + } +} + +static ItsCmdResult process_vmovp(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VmovpCallbackData cbdata; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + cbdata.vpeid = FIELD_EX64(cmdpkt[1], VMOVP_1, VPEID); + cbdata.rdbase = FIELD_EX64(cmdpkt[2], VMOVP_2, RDBASE); + + trace_gicv3_its_cmd_vmovp(cbdata.vpeid, cbdata.rdbase); + + if (cbdata.rdbase >= s->gicv3->num_cpu) { + return CMD_CONTINUE; + } + + /* + * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means + * that when the VMOVP command is executed on an ITS to change the + * VPEID field in a VPE table entry the change must be propagated + * to all the ITSes connected to the same GIC. + */ + cbdata.result = CMD_CONTINUE_OK; + gicv3_foreach_its(s->gicv3, vmovp_callback, &cbdata); + return cbdata.result; +} + +static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid, eventid, vpeid, doorbell; + bool doorbell_valid; + DTEntry dte; + ITEntry ite; + VTEntry old_vte, new_vte; + ItsCmdResult cmdres; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + devid = FIELD_EX64(cmdpkt[0], VMOVI_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], VMOVI_1, EVENTID); + vpeid = FIELD_EX64(cmdpkt[1], VMOVI_1, VPEID); + doorbell_valid = FIELD_EX64(cmdpkt[2], VMOVI_2, D); + doorbell = FIELD_EX64(cmdpkt[2], VMOVI_2, DOORBELL); + + trace_gicv3_its_cmd_vmovi(devid, eventid, vpeid, doorbell_valid, doorbell); + + if (doorbell_valid && !valid_doorbell(doorbell)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid doorbell 0x%x\n", __func__, doorbell); + return CMD_CONTINUE; + } + + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (ite.inttype != ITE_INTTYPE_VIRTUAL) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: ITE is not for virtual interrupt\n", + __func__); + return CMD_CONTINUE; + } + + cmdres = lookup_vte(s, __func__, ite.vpeid, &old_vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + cmdres = lookup_vte(s, __func__, vpeid, &new_vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (!intid_in_lpi_range(ite.intid) || + ite.intid >= (1ULL << (old_vte.vptsize + 1)) || + ite.intid >= (1ULL << (new_vte.vptsize + 1))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: ITE intid 0x%x out of range\n", + __func__, ite.intid); + return CMD_CONTINUE; + } + + ite.vpeid = vpeid; + if (doorbell_valid) { + ite.doorbell = doorbell; + } + + /* + * Move the LPI from the old redistributor to the new one. We don't + * need to do anything if the guest somehow specified the + * same pending table for source and destination. + */ + if (old_vte.vptaddr != new_vte.vptaddr) { + gicv3_redist_mov_vlpi(&s->gicv3->cpu[old_vte.rdbase], + old_vte.vptaddr << 16, + &s->gicv3->cpu[new_vte.rdbase], + new_vte.vptaddr << 16, + ite.intid, + ite.doorbell); + } + + /* Update the ITE to the new VPEID and possibly doorbell values */ + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VTEntry vte; + uint32_t vpeid; + ItsCmdResult cmdres; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + vpeid = FIELD_EX64(cmdpkt[1], VINVALL_1, VPEID); + + trace_gicv3_its_cmd_vinvall(vpeid); + + cmdres = lookup_vte(s, __func__, vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + gicv3_redist_vinvall(&s->gicv3->cpu[vte.rdbase], vte.vptaddr << 16); + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid, eventid; + ITEntry ite; + DTEntry dte; + CTEntry cte; + VTEntry vte; + ItsCmdResult cmdres; + + devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], INV_1, EVENTID); + + trace_gicv3_its_cmd_inv(devid, eventid); + + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + switch (ite.inttype) { + case ITE_INTTYPE_PHYSICAL: + cmdres = lookup_cte(s, __func__, ite.icid, &cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + gicv3_redist_inv_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid); + break; + case ITE_INTTYPE_VIRTUAL: + if (!its_feature_virtual(s)) { + /* Can't happen unless guest is illegally writing to table memory */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid type %d in ITE (table corrupted?)\n", + __func__, ite.inttype); + return CMD_CONTINUE; + } + + cmdres = lookup_vte(s, __func__, ite.vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + if (!intid_in_lpi_range(ite.intid) || + ite.intid >= (1ULL << (vte.vptsize + 1))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n", + __func__, ite.intid); + return CMD_CONTINUE; + } + gicv3_redist_inv_vlpi(&s->gicv3->cpu[vte.rdbase], ite.intid, + vte.vptaddr << 16); + break; + default: + g_assert_not_reached(); + } + + return CMD_CONTINUE_OK; +} + +/* + * Current implementation blocks until all + * commands are processed + */ +static void process_cmdq(GICv3ITSState *s) +{ + uint32_t wr_offset = 0; + uint32_t rd_offset = 0; + uint32_t cq_offset = 0; + AddressSpace *as = &s->gicv3->dma_as; + uint8_t cmd; + int i; + + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + return; + } + + wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); + + if (wr_offset >= s->cq.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid write offset " + "%d\n", __func__, wr_offset); + return; + } + + rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); + + if (rd_offset >= s->cq.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid read offset " + "%d\n", __func__, rd_offset); + return; + } + + while (wr_offset != rd_offset) { + ItsCmdResult result = CMD_CONTINUE_OK; + void *hostmem; + hwaddr buflen; + uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS]; + + cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); + + buflen = GITS_CMDQ_ENTRY_SIZE; + hostmem = address_space_map(as, s->cq.base_addr + cq_offset, + &buflen, false, MEMTXATTRS_UNSPECIFIED); + if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) { + if (hostmem) { + address_space_unmap(as, hostmem, buflen, false, 0); + } + s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: could not read command at 0x%" PRIx64 "\n", + __func__, s->cq.base_addr + cq_offset); + break; + } + for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) { + cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t)); + } + address_space_unmap(as, hostmem, buflen, false, 0); + + cmd = cmdpkt[0] & CMD_MASK; + + trace_gicv3_its_process_command(rd_offset, cmd); + + switch (cmd) { + case GITS_CMD_INT: + result = process_its_cmd(s, cmdpkt, INTERRUPT); + break; + case GITS_CMD_CLEAR: + result = process_its_cmd(s, cmdpkt, CLEAR); + break; + case GITS_CMD_SYNC: + /* + * Current implementation makes a blocking synchronous call + * for every command issued earlier, hence the internal state + * is already consistent by the time SYNC command is executed. + * Hence no further processing is required for SYNC command. + */ + trace_gicv3_its_cmd_sync(); + break; + case GITS_CMD_VSYNC: + /* + * VSYNC also is a nop, because our implementation is always + * in sync. + */ + if (!its_feature_virtual(s)) { + result = CMD_CONTINUE; + break; + } + trace_gicv3_its_cmd_vsync(); + break; + case GITS_CMD_MAPD: + result = process_mapd(s, cmdpkt); + break; + case GITS_CMD_MAPC: + result = process_mapc(s, cmdpkt); + break; + case GITS_CMD_MAPTI: + result = process_mapti(s, cmdpkt, false); + break; + case GITS_CMD_MAPI: + result = process_mapti(s, cmdpkt, true); + break; + case GITS_CMD_DISCARD: + result = process_its_cmd(s, cmdpkt, DISCARD); + break; + case GITS_CMD_INV: + result = process_inv(s, cmdpkt); + break; + case GITS_CMD_INVALL: + /* + * Current implementation doesn't cache any ITS tables, + * but the calculated lpi priority information. We only + * need to trigger lpi priority re-calculation to be in + * sync with LPI config table or pending table changes. + * INVALL operates on a collection specified by ICID so + * it only affects physical LPIs. + */ + trace_gicv3_its_cmd_invall(); + for (i = 0; i < s->gicv3->num_cpu; i++) { + gicv3_redist_update_lpi(&s->gicv3->cpu[i]); + } + break; + case GITS_CMD_MOVI: + result = process_movi(s, cmdpkt); + break; + case GITS_CMD_MOVALL: + result = process_movall(s, cmdpkt); + break; + case GITS_CMD_VMAPTI: + result = process_vmapti(s, cmdpkt, false); + break; + case GITS_CMD_VMAPI: + result = process_vmapti(s, cmdpkt, true); + break; + case GITS_CMD_VMAPP: + result = process_vmapp(s, cmdpkt); + break; + case GITS_CMD_VMOVP: + result = process_vmovp(s, cmdpkt); + break; + case GITS_CMD_VMOVI: + result = process_vmovi(s, cmdpkt); + break; + case GITS_CMD_VINVALL: + result = process_vinvall(s, cmdpkt); + break; + default: + trace_gicv3_its_cmd_unknown(cmd); + break; + } + if (result != CMD_STALL) { + /* CMD_CONTINUE or CMD_CONTINUE_OK */ + rd_offset++; + rd_offset %= s->cq.num_entries; + s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); + } else { + /* CMD_STALL */ + s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: 0x%x cmd processing failed, stalling\n", + __func__, cmd); + break; + } + } +} + +/* + * This function extracts the ITS Device and Collection table specific + * parameters (like base_addr, size etc) from GITS_BASER register. + * It is called during ITS enable and also during post_load migration + */ +static void extract_table_params(GICv3ITSState *s) +{ + uint16_t num_pages = 0; + uint8_t page_sz_type; + uint8_t type; + uint32_t page_sz = 0; + uint64_t value; + + for (int i = 0; i < 8; i++) { + TableDesc *td; + int idbits; + + value = s->baser[i]; + + if (!value) { + continue; + } + + page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); + + switch (page_sz_type) { + case 0: + page_sz = GITS_PAGE_SIZE_4K; + break; + + case 1: + page_sz = GITS_PAGE_SIZE_16K; + break; + + case 2: + case 3: + page_sz = GITS_PAGE_SIZE_64K; + break; + + default: + g_assert_not_reached(); + } + + num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; + + type = FIELD_EX64(value, GITS_BASER, TYPE); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + td = &s->dt; + idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; + break; + case GITS_BASER_TYPE_COLLECTION: + td = &s->ct; + if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { + idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; + } else { + /* 16-bit CollectionId supported when CIL == 0 */ + idbits = 16; + } + break; + case GITS_BASER_TYPE_VPE: + td = &s->vpet; + /* + * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an + * implementation to implement fewer bits and report this + * via GICD_TYPER2.) + */ + idbits = 16; + break; + default: + /* + * GITS_BASER.TYPE is read-only, so GITS_BASER_RO_MASK + * ensures we will only see type values corresponding to + * the values set up in gicv3_its_reset(). + */ + g_assert_not_reached(); + } + + memset(td, 0, sizeof(*td)); + /* + * If GITS_BASER.Valid is 0 for any then we will not process + * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we + * do not have a special case where the GITS_BASER.Valid bit is 0 + * for the register corresponding to the Collection table but we + * still have to process interrupts using non-memory-backed + * Collection table entries.) + * The specification makes it UNPREDICTABLE to enable the ITS without + * marking each BASER as valid. We choose to handle these as if + * the table was zero-sized, so commands using the table will fail + * and interrupts requested via GITS_TRANSLATER writes will be ignored. + * This happens automatically by leaving the num_entries field at + * zero, which will be caught by the bounds checks we have before + * every table lookup anyway. + */ + if (!FIELD_EX64(value, GITS_BASER, VALID)) { + continue; + } + td->page_sz = page_sz; + td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); + td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; + td->base_addr = baser_base_addr(value, page_sz); + if (!td->indirect) { + td->num_entries = (num_pages * page_sz) / td->entry_sz; + } else { + td->num_entries = (((num_pages * page_sz) / + L1TABLE_ENTRY_SIZE) * + (page_sz / td->entry_sz)); + } + td->num_entries = MIN(td->num_entries, 1ULL << idbits); + } +} + +static void extract_cmdq_params(GICv3ITSState *s) +{ + uint16_t num_pages = 0; + uint64_t value = s->cbaser; + + num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; + + memset(&s->cq, 0 , sizeof(s->cq)); + + if (FIELD_EX64(value, GITS_CBASER, VALID)) { + s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / + GITS_CMDQ_ENTRY_SIZE; + s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); + s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; + } +} + +static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + /* + * GITS_TRANSLATER is write-only, and all other addresses + * in the interrupt translation space frame are RES0. + */ + *data = 0; + return MEMTX_OK; +} + +static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size, + MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result = true; + + trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id); + + switch (offset) { + case GITS_TRANSLATER: + if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { + result = do_process_its_cmd(s, attrs.requester_id, data, NONE); + } + break; + default: + break; + } + + if (result) { + return MEMTX_OK; + } else { + return MEMTX_ERROR; + } +} + +static bool its_writel(GICv3ITSState *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_CTLR: + if (value & R_GITS_CTLR_ENABLED_MASK) { + s->ctlr |= R_GITS_CTLR_ENABLED_MASK; + extract_table_params(s); + extract_cmdq_params(s); + process_cmdq(s); + } else { + s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; + } + break; + case GITS_CBASER: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + s->cbaser = deposit64(s->cbaser, 0, 32, value); + s->creadr = 0; + } + break; + case GITS_CBASER + 4: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + s->cbaser = deposit64(s->cbaser, 32, 32, value); + s->creadr = 0; + } + break; + case GITS_CWRITER: + s->cwriter = deposit64(s->cwriter, 0, 32, + (value & ~R_GITS_CWRITER_RETRY_MASK)); + if (s->cwriter != s->creadr) { + process_cmdq(s); + } + break; + case GITS_CWRITER + 4: + s->cwriter = deposit64(s->cwriter, 32, 32, value); + break; + case GITS_CREADR: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = deposit64(s->creadr, 0, 32, + (value & ~R_GITS_CREADR_STALLED_MASK)); + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_CREADR + 4: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = deposit64(s->creadr, 32, 32, value); + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_BASER ... GITS_BASER + 0x3f: + /* + * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + index = (offset - GITS_BASER) / 8; + + if (s->baser[index] == 0) { + /* Unimplemented GITS_BASERn: RAZ/WI */ + break; + } + if (offset & 7) { + value <<= 32; + value &= ~GITS_BASER_RO_MASK; + s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); + s->baser[index] |= value; + } else { + value &= ~GITS_BASER_RO_MASK; + s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); + s->baser[index] |= value; + } + } + break; + case GITS_IIDR: + case GITS_IDREGS ... GITS_IDREGS + 0x2f: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + break; + default: + result = false; + break; + } + return result; +} + +static bool its_readl(GICv3ITSState *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_CTLR: + *data = s->ctlr; + break; + case GITS_IIDR: + *data = gicv3_iidr(); + break; + case GITS_IDREGS ... GITS_IDREGS + 0x2f: + /* ID registers */ + *data = gicv3_idreg(s->gicv3, offset - GITS_IDREGS, GICV3_PIDR0_ITS); + break; + case GITS_TYPER: + *data = extract64(s->typer, 0, 32); + break; + case GITS_TYPER + 4: + *data = extract64(s->typer, 32, 32); + break; + case GITS_CBASER: + *data = extract64(s->cbaser, 0, 32); + break; + case GITS_CBASER + 4: + *data = extract64(s->cbaser, 32, 32); + break; + case GITS_CREADR: + *data = extract64(s->creadr, 0, 32); + break; + case GITS_CREADR + 4: + *data = extract64(s->creadr, 32, 32); + break; + case GITS_CWRITER: + *data = extract64(s->cwriter, 0, 32); + break; + case GITS_CWRITER + 4: + *data = extract64(s->cwriter, 32, 32); + break; + case GITS_BASER ... GITS_BASER + 0x3f: + index = (offset - GITS_BASER) / 8; + if (offset & 7) { + *data = extract64(s->baser[index], 32, 32); + } else { + *data = extract64(s->baser[index], 0, 32); + } + break; + default: + result = false; + break; + } + return result; +} + +static bool its_writell(GICv3ITSState *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_BASER ... GITS_BASER + 0x3f: + /* + * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + index = (offset - GITS_BASER) / 8; + if (s->baser[index] == 0) { + /* Unimplemented GITS_BASERn: RAZ/WI */ + break; + } + s->baser[index] &= GITS_BASER_RO_MASK; + s->baser[index] |= (value & ~GITS_BASER_RO_MASK); + } + break; + case GITS_CBASER: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { + s->cbaser = value; + s->creadr = 0; + } + break; + case GITS_CWRITER: + s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; + if (s->cwriter != s->creadr) { + process_cmdq(s); + } + break; + case GITS_CREADR: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_TYPER: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + break; + default: + result = false; + break; + } + return result; +} + +static bool its_readll(GICv3ITSState *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_TYPER: + *data = s->typer; + break; + case GITS_BASER ... GITS_BASER + 0x3f: + index = (offset - GITS_BASER) / 8; + *data = s->baser[index]; + break; + case GITS_CBASER: + *data = s->cbaser; + break; + case GITS_CREADR: + *data = s->creadr; + break; + case GITS_CWRITER: + *data = s->cwriter; + break; + default: + result = false; + break; + } + return result; +} + +static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result; + + switch (size) { + case 4: + result = its_readl(s, offset, data, attrs); + break; + case 8: + result = its_readll(s, offset, data, attrs); + break; + default: + result = false; + break; + } + + if (!result) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest read at offset " TARGET_FMT_plx + " size %u\n", __func__, offset, size); + trace_gicv3_its_badread(offset, size); + /* + * The spec requires that reserved registers are RAZ/WI; + * so use false returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + *data = 0; + } else { + trace_gicv3_its_read(offset, *data, size); + } + return MEMTX_OK; +} + +static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result; + + switch (size) { + case 4: + result = its_writel(s, offset, data, attrs); + break; + case 8: + result = its_writell(s, offset, data, attrs); + break; + default: + result = false; + break; + } + + if (!result) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write at offset " TARGET_FMT_plx + " size %u\n", __func__, offset, size); + trace_gicv3_its_badwrite(offset, data, size); + /* + * The spec requires that reserved registers are RAZ/WI; + * so use false returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + } else { + trace_gicv3_its_write(offset, data, size); + } + return MEMTX_OK; +} + +static const MemoryRegionOps gicv3_its_control_ops = { + .read_with_attrs = gicv3_its_read, + .write_with_attrs = gicv3_its_write, + .valid.min_access_size = 4, + .valid.max_access_size = 8, + .impl.min_access_size = 4, + .impl.max_access_size = 8, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps gicv3_its_translation_ops = { + .read_with_attrs = gicv3_its_translation_read, + .write_with_attrs = gicv3_its_translation_write, + .valid.min_access_size = 2, + .valid.max_access_size = 4, + .impl.min_access_size = 2, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + int i; + + for (i = 0; i < s->gicv3->num_cpu; i++) { + if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { + error_setg(errp, "Physical LPI not supported by CPU %d", i); + return; + } + } + + gicv3_add_its(s->gicv3, dev); + + gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); + + /* set the ITS default features supported */ + s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, + ITS_ITT_ENTRY_SIZE - 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); + if (s->gicv3->revision >= 4) { + /* Our VMOVP handles cross-ITS synchronization itself */ + s->typer = FIELD_DP64(s->typer, GITS_TYPER, VMOVP, 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, VIRTUAL, 1); + } +} + +static void gicv3_its_reset(DeviceState *dev) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); + + c->parent_reset(dev); + + /* Quiescent bit reset to 1 */ + s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); + + /* + * setting GITS_BASER0.Type = 0b001 (Device) + * GITS_BASER1.Type = 0b100 (Collection Table) + * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later + * GITS_BASER.Type,where n = 3 to 7 are 0b00 (Unimplemented) + * GITS_BASER<0,1>.Page_Size = 64KB + * and default translation table entry size to 16 bytes + */ + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, + GITS_BASER_TYPE_DEVICE); + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, + GITS_DTE_SIZE - 1); + + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, + GITS_BASER_TYPE_COLLECTION); + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, + GITS_CTE_SIZE - 1); + + if (its_feature_virtual(s)) { + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE, + GITS_BASER_TYPE_VPE); + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE, + GITS_VPE_SIZE - 1); + } +} + +static void gicv3_its_post_load(GICv3ITSState *s) +{ + if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { + extract_table_params(s); + extract_cmdq_params(s); + } +} + +static Property gicv3_its_props[] = { + DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", + GICv3State *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void gicv3_its_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); + GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); + + dc->realize = gicv3_arm_its_realize; + device_class_set_props(dc, gicv3_its_props); + device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); + icc->post_load = gicv3_its_post_load; +} + +static const TypeInfo gicv3_its_info = { + .name = TYPE_ARM_GICV3_ITS, + .parent = TYPE_ARM_GICV3_ITS_COMMON, + .instance_size = sizeof(GICv3ITSState), + .class_init = gicv3_its_class_init, + .class_size = sizeof(GICv3ITSClass), +}; + +static void gicv3_its_register_types(void) +{ + type_register_static(&gicv3_its_info); +} + +type_init(gicv3_its_register_types) diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index 66c4c6a188..90b85f1e25 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -99,14 +99,15 @@ static const MemoryRegionOps gicv3_its_trans_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops) +void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops, + const MemoryRegionOps *tops) { SysBusDevice *sbd = SYS_BUS_DEVICE(s); memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s, "control", ITS_CONTROL_SIZE); memory_region_init_io(&s->iomem_its_translation, OBJECT(s), - &gicv3_its_trans_ops, s, + tops ? tops : &gicv3_its_trans_ops, s, "translation", ITS_TRANS_SIZE); /* Our two regions are always adjacent, therefore we now combine them diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index b554d2ede0..529c7bd494 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -106,7 +106,9 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp) kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0); - gicv3_its_init_mmio(s, NULL); + gicv3_add_its(s->gicv3, dev); + + gicv3_its_init_mmio(s, NULL, NULL); if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, GITS_CTLR)) { diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 5c09f00dec..3ca643ecba 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -31,6 +31,8 @@ #include "vgic_common.h" #include "migration/blocker.h" #include "qom/object.h" +#include "target/arm/cpregs.h" + #ifdef DEBUG_GICV3_KVM #define DPRINTF(fmt, ...) \ @@ -671,9 +673,19 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) s = c->gic; c->icc_pmr_el1 = 0; - c->icc_bpr[GICV3_G0] = GIC_MIN_BPR; - c->icc_bpr[GICV3_G1] = GIC_MIN_BPR; - c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; + /* + * Architecturally the reset value of the ICC_BPR registers + * is UNKNOWN. We set them all to 0 here; when the kernel + * uses these values to program the ICH_VMCR_EL2 fields that + * determine the guest-visible ICC_BPR register values, the + * hardware's "writing a value less than the minimum sets + * the field to the minimum value" behaviour will result in + * them effectively resetting to the correct minimum value + * for the host GIC. + */ + c->icc_bpr[GICV3_G0] = 0; + c->icc_bpr[GICV3_G1] = 0; + c->icc_bpr[GICV3_G1NS] = 0; c->icc_sre_el1 = 0x7; memset(c->icc_apr, 0, sizeof(c->icc_apr)); @@ -733,7 +745,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { */ .resetfn = arm_gicv3_icc_reset, }, - REGINFO_SENTINEL }; /** @@ -781,17 +792,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->revision != 3) { + error_setg(errp, "unsupported GIC revision %d for in-kernel GIC", + s->revision); + } + if (s->security_extn) { error_setg(errp, "the in-kernel VGICv3 does not implement the " "security extensions"); return; } - gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } + gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); for (i = 0; i < s->num_cpu; i++) { ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); @@ -829,7 +841,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0); if (!multiple_redist_region_allowed) { - kvm_arm_register_device(&s->iomem_redist[0], -1, + kvm_arm_register_device(&s->redist_regions[0].iomem, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0); } else { @@ -842,7 +854,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) uint64_t addr_ormask = i | ((uint64_t)s->redist_region_count[i] << 52); - kvm_arm_register_device(&s->iomem_redist[i], -1, + kvm_arm_register_device(&s->redist_regions[i].iomem, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, s->dev_fd, addr_ormask); diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c index 53da703ed8..c92ceecc16 100644 --- a/hw/intc/arm_gicv3_redist.c +++ b/hw/intc/arm_gicv3_redist.c @@ -60,6 +60,132 @@ static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, return reg; } +static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr) +{ + /* + * Return true if a vCPU is resident, which is defined by + * whether the GICR_VPENDBASER register is marked VALID and + * has the right virtual pending table address. + */ + if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) { + return false; + } + return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK); +} + +/** + * update_for_one_lpi: Update pending information if this LPI is better + * + * @cs: GICv3CPUState + * @irq: interrupt to look up in the LPI Configuration table + * @ctbase: physical address of the LPI Configuration table to use + * @ds: true if priority value should not be shifted + * @hpp: points to pending information to update + * + * Look up @irq in the Configuration table specified by @ctbase + * to see if it is enabled and what its priority is. If it is an + * enabled interrupt with a higher priority than that currently + * recorded in @hpp, update @hpp. + */ +static void update_for_one_lpi(GICv3CPUState *cs, int irq, + uint64_t ctbase, bool ds, PendingIrq *hpp) +{ + uint8_t lpite; + uint8_t prio; + + address_space_read(&cs->gic->dma_as, + ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)), + MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite)); + + if (!(lpite & LPI_CTE_ENABLED)) { + return; + } + + if (ds) { + prio = lpite & LPI_PRIORITY_MASK; + } else { + prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80; + } + + if ((prio < hpp->prio) || + ((prio == hpp->prio) && (irq <= hpp->irq))) { + hpp->irq = irq; + hpp->prio = prio; + /* LPIs and vLPIs are always non-secure Grp1 interrupts */ + hpp->grp = GICV3_G1NS; + } +} + +/** + * update_for_all_lpis: Fully scan LPI tables and find best pending LPI + * + * @cs: GICv3CPUState + * @ptbase: physical address of LPI Pending table + * @ctbase: physical address of LPI Configuration table + * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1 + * @ds: true if priority value should not be shifted + * @hpp: points to pending information to set + * + * Recalculate the highest priority pending enabled LPI from scratch, + * and set @hpp accordingly. + * + * We scan the LPI pending table @ptbase; for each pending LPI, we read the + * corresponding entry in the LPI configuration table @ctbase to extract + * the priority and enabled information. + * + * We take @ptsizebits in the form idbits-1 because this is the way that + * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits + * and in the VMAPP command's VPT_size field. + */ +static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase, + uint64_t ctbase, unsigned ptsizebits, + bool ds, PendingIrq *hpp) +{ + AddressSpace *as = &cs->gic->dma_as; + uint8_t pend; + uint32_t pendt_size = (1ULL << (ptsizebits + 1)); + int i, bit; + + hpp->prio = 0xff; + + for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { + address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1); + while (pend) { + bit = ctz32(pend); + update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp); + pend &= ~(1 << bit); + } + } +} + +/** + * set_lpi_pending_bit: Set or clear pending bit for an LPI + * + * @cs: GICv3CPUState + * @ptbase: physical address of LPI Pending table + * @irq: LPI to change pending state for + * @level: false to clear pending state, true to set + * + * Returns true if we needed to do something, false if the pending bit + * was already at @level. + */ +static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase, + int irq, bool level) +{ + AddressSpace *as = &cs->gic->dma_as; + uint64_t addr = ptbase + irq / 8; + uint8_t pend; + + address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1); + if (extract32(pend, irq % 8, 1) == level) { + /* Bit already at requested state, no action required */ + return false; + } + pend = deposit32(pend, irq % 8, 1, level ? 1 : 0); + address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1); + return true; +} + static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq) { @@ -100,6 +226,87 @@ static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq, cs->gicr_ipriorityr[irq] = value; } +static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs) +{ + uint64_t ptbase, ctbase, idbits; + + if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) { + cs->hppvlpi.prio = 0xff; + return; + } + + ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK; + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS); + + update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi); +} + +static void gicv3_redist_update_vlpi(GICv3CPUState *cs) +{ + gicv3_redist_update_vlpi_only(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); +} + +static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval) +{ + /* Write @newval to GICR_VPENDBASER, handling its effects */ + bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID); + bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID); + bool pendinglast; + + /* + * The DIRTY bit is read-only and for us is always zero; + * other fields are writable. + */ + newval &= R_GICR_VPENDBASER_INNERCACHE_MASK | + R_GICR_VPENDBASER_SHAREABILITY_MASK | + R_GICR_VPENDBASER_PHYADDR_MASK | + R_GICR_VPENDBASER_OUTERCACHE_MASK | + R_GICR_VPENDBASER_PENDINGLAST_MASK | + R_GICR_VPENDBASER_IDAI_MASK | + R_GICR_VPENDBASER_VALID_MASK; + + if (oldvalid && newvalid) { + /* + * Changing other fields while VALID is 1 is UNPREDICTABLE; + * we choose to log and ignore the write. + */ + if (cs->gicr_vpendbaser ^ newval) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Changing GICR_VPENDBASER when VALID=1 " + "is UNPREDICTABLE\n", __func__); + } + return; + } + if (!oldvalid && !newvalid) { + cs->gicr_vpendbaser = newval; + return; + } + + if (newvalid) { + /* + * Valid going from 0 to 1: update hppvlpi from tables. + * If IDAI is 0 we are allowed to use the info we cached in + * the IMPDEF area of the table. + * PendingLast is RES1 when we make this transition. + */ + pendinglast = true; + } else { + /* + * Valid going from 1 to 0: + * Set PendingLast if there was a pending enabled interrupt + * for the vPE that was just descheduled. + * If we cache info in the IMPDEF area, write it out here. + */ + pendinglast = cs->hppvlpi.prio != 0xff; + } + + newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast); + cs->gicr_vpendbaser = newval; + gicv3_redist_update_vlpi(cs); +} + static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset, uint64_t *data, MemTxAttrs attrs) { @@ -234,7 +441,24 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset, *data = cs->gicr_nsacr; return MEMTX_OK; case GICR_IDREGS ... GICR_IDREGS + 0x2f: - *data = gicv3_idreg(offset - GICR_IDREGS); + *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST); + return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + *data = extract64(cs->gicr_vpropbaser, 0, 32); + return MEMTX_OK; + case GICR_VPROPBASER + 4: + *data = extract64(cs->gicr_vpropbaser, 32, 32); + return MEMTX_OK; + case GICR_VPENDBASER: + *data = extract64(cs->gicr_vpendbaser, 0, 32); + return MEMTX_OK; + case GICR_VPENDBASER + 4: + *data = extract64(cs->gicr_vpendbaser, 32, 32); return MEMTX_OK; default: return MEMTX_ERROR; @@ -248,17 +472,27 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, case GICR_CTLR: /* For our implementation, GICR_TYPER.DPGS is 0 and so all * the DPG bits are RAZ/WI. We don't do anything asynchronously, - * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't - * implement LPIs) so Enable_LPIs is RES0. So there are no writable - * bits for us. + * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we + * implement LPIs) so Enable_LPIs is programmable. */ + if (cs->gicr_typer & GICR_TYPER_PLPIS) { + if (value & GICR_CTLR_ENABLE_LPIS) { + cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS; + /* Check for any pending interr in pending table */ + gicv3_redist_update_lpi(cs); + } else { + cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS; + /* cs->hppi might have been an LPI; recalculate */ + gicv3_redist_update(cs); + } + } return MEMTX_OK; case GICR_STATUSR: /* RAZ/WI for our implementation */ return MEMTX_OK; case GICR_WAKER: - /* Only the ProcessorSleep bit is writeable. When the guest sets - * it it requests that we transition the channel between the + /* Only the ProcessorSleep bit is writable. When the guest sets + * it, it requests that we transition the channel between the * redistributor and the cpu interface to quiescent, and that * we set the ChildrenAsleep bit once the inteface has reached the * quiescent state. @@ -369,6 +603,23 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, "%s: invalid guest write to RO register at offset " TARGET_FMT_plx "\n", __func__, offset); return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value); + return MEMTX_OK; + case GICR_VPROPBASER + 4: + cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value); + return MEMTX_OK; + case GICR_VPENDBASER: + gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value)); + return MEMTX_OK; + case GICR_VPENDBASER + 4: + gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value)); + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -387,6 +638,17 @@ static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset, case GICR_PENDBASER: *data = cs->gicr_pendbaser; return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + *data = cs->gicr_vpropbaser; + return MEMTX_OK; + case GICR_VPENDBASER: + *data = cs->gicr_vpendbaser; + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -408,6 +670,17 @@ static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset, "%s: invalid guest write to RO register at offset " TARGET_FMT_plx "\n", __func__, offset); return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + cs->gicr_vpropbaser = value; + return MEMTX_OK; + case GICR_VPENDBASER: + gicr_write_vpendbaser(cs, value); + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -416,22 +689,24 @@ static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset, MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, unsigned size, MemTxAttrs attrs) { - GICv3State *s = opaque; + GICv3RedistRegion *region = opaque; + GICv3State *s = region->gic; GICv3CPUState *cs; MemTxResult r; int cpuidx; assert((offset & (size - 1)) == 0); - /* This region covers all the redistributor pages; there are - * (for GICv3) two 64K pages per CPU. At the moment they are - * all contiguous (ie in this one region), though we might later - * want to allow splitting of redistributor pages into several - * blocks so we can support more CPUs. + /* + * There are (for GICv3) two 64K redistributor pages per CPU. + * In some cases the redistributor pages for all CPUs are not + * contiguous (eg on the virt board they are split into two + * parts if there are too many CPUs to all fit in the same place + * in the memory map); if so then the GIC has multiple MemoryRegions + * for the redistributors. */ - cpuidx = offset / 0x20000; - offset %= 0x20000; - assert(cpuidx < s->num_cpu); + cpuidx = region->cpuidx + offset / gicv3_redist_size(s); + offset %= gicv3_redist_size(s); cs = &s->cpu[cpuidx]; @@ -450,7 +725,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, break; } - if (r == MEMTX_ERROR) { + if (r != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest read at offset " TARGET_FMT_plx " size %u\n", __func__, offset, size); @@ -473,22 +748,24 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs) { - GICv3State *s = opaque; + GICv3RedistRegion *region = opaque; + GICv3State *s = region->gic; GICv3CPUState *cs; MemTxResult r; int cpuidx; assert((offset & (size - 1)) == 0); - /* This region covers all the redistributor pages; there are - * (for GICv3) two 64K pages per CPU. At the moment they are - * all contiguous (ie in this one region), though we might later - * want to allow splitting of redistributor pages into several - * blocks so we can support more CPUs. + /* + * There are (for GICv3) two 64K redistributor pages per CPU. + * In some cases the redistributor pages for all CPUs are not + * contiguous (eg on the virt board they are split into two + * parts if there are too many CPUs to all fit in the same place + * in the memory map); if so then the GIC has multiple MemoryRegions + * for the redistributors. */ - cpuidx = offset / 0x20000; - offset %= 0x20000; - assert(cpuidx < s->num_cpu); + cpuidx = region->cpuidx + offset / gicv3_redist_size(s); + offset %= gicv3_redist_size(s); cs = &s->cpu[cpuidx]; @@ -507,7 +784,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, break; } - if (r == MEMTX_ERROR) { + if (r != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest write at offset " TARGET_FMT_plx " size %u\n", __func__, offset, size); @@ -526,6 +803,313 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, return r; } +static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq) +{ + uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; + + update_for_one_lpi(cs, irq, lpict_baddr, + cs->gic->gicd_ctlr & GICD_CTLR_DS, + &cs->hpplpi); +} + +void gicv3_redist_update_lpi_only(GICv3CPUState *cs) +{ + /* + * This function scans the LPI pending table and for each pending + * LPI, reads the corresponding entry from LPI configuration table + * to extract the priority info and determine if the current LPI + * priority is lower than the last computed high priority lpi interrupt. + * If yes, replace current LPI as the new high priority lpi interrupt. + */ + uint64_t lpipt_baddr, lpict_baddr; + uint64_t idbits; + + idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + + if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { + return; + } + + lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; + + update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits, + cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi); +} + +void gicv3_redist_update_lpi(GICv3CPUState *cs) +{ + gicv3_redist_update_lpi_only(cs); + gicv3_redist_update(cs); +} + +void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) +{ + /* + * This function updates the pending bit in lpi pending table for + * the irq being activated or deactivated. + */ + uint64_t lpipt_baddr; + + lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) { + /* no change in the value of pending bit, return */ + return; + } + + /* + * check if this LPI is better than the current hpplpi, if yes + * just set hpplpi.prio and .irq without doing a full rescan + */ + if (level) { + gicv3_redist_check_lpi_priority(cs, irq); + gicv3_redist_update(cs); + } else { + if (irq == cs->hpplpi.irq) { + gicv3_redist_update_lpi(cs); + } + } +} + +void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level) +{ + uint64_t idbits; + + idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + + if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || + (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) { + return; + } + + /* set/clear the pending bit for this irq */ + gicv3_redist_lpi_pending(cs, irq, level); +} + +void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq) +{ + /* + * The only cached information for LPIs we have is the HPPLPI. + * We could be cleverer about identifying when we don't need + * to do a full rescan of the pending table, but until we find + * this is a performance issue, just always recalculate. + */ + gicv3_redist_update_lpi(cs); +} + +void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq) +{ + /* + * Move the specified LPI's pending state from the source redistributor + * to the destination. + * + * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE: + * we choose to NOP. If LPIs are disabled on source there's nothing + * to be transferred anyway. + */ + uint64_t idbits; + uint32_t pendt_size; + uint64_t src_baddr; + + if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || + !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { + return; + } + + idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS), + idbits); + + pendt_size = 1ULL << (idbits + 1); + if ((irq / 8) >= pendt_size) { + return; + } + + src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + + if (!set_pending_table_bit(src, src_baddr, irq, 0)) { + /* Not pending on source, nothing to do */ + return; + } + if (irq == src->hpplpi.irq) { + /* + * We just made this LPI not-pending so only need to update + * if it was previously the highest priority pending LPI + */ + gicv3_redist_update_lpi(src); + } + /* Mark it pending on the destination */ + gicv3_redist_lpi_pending(dest, irq, 1); +} + +void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest) +{ + /* + * We must move all pending LPIs from the source redistributor + * to the destination. That is, for every pending LPI X on + * src, we must set it not-pending on src and pending on dest. + * LPIs that are already pending on dest are not cleared. + * + * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE: + * we choose to NOP. If LPIs are disabled on source there's nothing + * to be transferred anyway. + */ + AddressSpace *as = &src->gic->dma_as; + uint64_t idbits; + uint32_t pendt_size; + uint64_t src_baddr, dest_baddr; + int i; + + if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || + !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { + return; + } + + idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS), + idbits); + + pendt_size = 1ULL << (idbits + 1); + src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + + for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { + uint8_t src_pend, dest_pend; + + address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED, + &src_pend, sizeof(src_pend)); + if (!src_pend) { + continue; + } + address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED, + &dest_pend, sizeof(dest_pend)); + dest_pend |= src_pend; + src_pend = 0; + address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED, + &src_pend, sizeof(src_pend)); + address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED, + &dest_pend, sizeof(dest_pend)); + } + + gicv3_redist_update_lpi(src); + gicv3_redist_update_lpi(dest); +} + +void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level) +{ + /* + * Change the pending state of the specified vLPI. + * Unlike gicv3_redist_process_vlpi(), we know here that the + * vCPU is definitely resident on this redistributor, and that + * the irq is in range. + */ + uint64_t vptbase, ctbase; + + vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16; + + if (set_pending_table_bit(cs, vptbase, irq, level)) { + if (level) { + /* Check whether this vLPI is now the best */ + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi); + gicv3_cpuif_virt_irq_fiq_update(cs); + } else { + /* Only need to recalculate if this was previously the best vLPI */ + if (irq == cs->hppvlpi.irq) { + gicv3_redist_update_vlpi(cs); + } + } + } +} + +void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, + int doorbell, int level) +{ + bool bit_changed; + bool resident = vcpu_resident(cs, vptaddr); + uint64_t ctbase; + + if (resident) { + uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS); + if (irq >= (1ULL << (idbits + 1))) { + return; + } + } + + bit_changed = set_pending_table_bit(cs, vptaddr, irq, level); + if (resident && bit_changed) { + if (level) { + /* Check whether this vLPI is now the best */ + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi); + gicv3_cpuif_virt_irq_fiq_update(cs); + } else { + /* Only need to recalculate if this was previously the best vLPI */ + if (irq == cs->hppvlpi.irq) { + gicv3_redist_update_vlpi(cs); + } + } + } + + if (!resident && level && doorbell != INTID_SPURIOUS && + (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { + /* vCPU is not currently resident: ring the doorbell */ + gicv3_redist_process_lpi(cs, doorbell, 1); + } +} + +void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, + GICv3CPUState *dest, uint64_t dest_vptaddr, + int irq, int doorbell) +{ + /* + * Move the specified vLPI's pending state from the source redistributor + * to the destination. + */ + if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) { + /* Not pending on source, nothing to do */ + return; + } + if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) { + /* + * Update src's cached highest-priority pending vLPI if we just made + * it not-pending + */ + gicv3_redist_update_vlpi(src); + } + /* + * Mark the vLPI pending on the destination (ringing the doorbell + * if the vCPU isn't resident) + */ + gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq); +} + +void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr) +{ + if (!vcpu_resident(cs, vptaddr)) { + /* We don't have anything cached if the vCPU isn't resident */ + return; + } + + /* Otherwise, our only cached information is the HPPVLPI info */ + gicv3_redist_update_vlpi(cs); +} + +void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr) +{ + /* + * The only cached information for LPIs we have is the HPPLPI. + * We could be cleverer about identifying when we don't need + * to do a full rescan of the pending table, but until we find + * this is a performance issue, just always recalculate. + */ + gicv3_redist_vinvall(cs, vptaddr); +} + void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level) { /* Update redistributor state for a change in an external PPI input line */ diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 1e7ddcb94c..1f7763964c 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -2389,8 +2389,15 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + /* + * Note that if the input line is still held high and the interrupt + * is not active then rule R_CVJS requires that the Pending state + * remains set; in that case we mustn't let it be cleared. + */ if (value & (1 << i) && - (attrs.secure || s->itns[startvec + i])) { + (attrs.secure || s->itns[startvec + i]) && + !(setval == 0 && s->vectors[startvec + i].level && + !s->vectors[startvec + i].active)) { s->vectors[startvec + i].pending = setval; } } @@ -2470,172 +2477,6 @@ static const MemoryRegionOps nvic_sysreg_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size, - MemTxAttrs attrs) -{ - MemoryRegion *mr = opaque; - - if (attrs.secure) { - /* S accesses to the alias act like NS accesses to the real region */ - attrs.secure = 0; - return memory_region_dispatch_write(mr, addr, value, - size_memop(size) | MO_TE, attrs); - } else { - /* NS attrs are RAZ/WI for privileged, and BusFault for user */ - if (attrs.user) { - return MEMTX_ERROR; - } - return MEMTX_OK; - } -} - -static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, - uint64_t *data, unsigned size, - MemTxAttrs attrs) -{ - MemoryRegion *mr = opaque; - - if (attrs.secure) { - /* S accesses to the alias act like NS accesses to the real region */ - attrs.secure = 0; - return memory_region_dispatch_read(mr, addr, data, - size_memop(size) | MO_TE, attrs); - } else { - /* NS attrs are RAZ/WI for privileged, and BusFault for user */ - if (attrs.user) { - return MEMTX_ERROR; - } - *data = 0; - return MEMTX_OK; - } -} - -static const MemoryRegionOps nvic_sysreg_ns_ops = { - .read_with_attrs = nvic_sysreg_ns_read, - .write_with_attrs = nvic_sysreg_ns_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size, - MemTxAttrs attrs) -{ - NVICState *s = opaque; - MemoryRegion *mr; - - /* Direct the access to the correct systick */ - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_write(mr, addr, value, - size_memop(size) | MO_TE, attrs); -} - -static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, - uint64_t *data, unsigned size, - MemTxAttrs attrs) -{ - NVICState *s = opaque; - MemoryRegion *mr; - - /* Direct the access to the correct systick */ - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, - attrs); -} - -static const MemoryRegionOps nvic_systick_ops = { - .read_with_attrs = nvic_systick_read, - .write_with_attrs = nvic_systick_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - - -static MemTxResult ras_read(void *opaque, hwaddr addr, - uint64_t *data, unsigned size, - MemTxAttrs attrs) -{ - if (attrs.user) { - return MEMTX_ERROR; - } - - switch (addr) { - case 0xe10: /* ERRIIDR */ - /* architect field = Arm; product/variant/revision 0 */ - *data = 0x43b; - break; - case 0xfc8: /* ERRDEVID */ - /* Minimal RAS: we implement 0 error record indexes */ - *data = 0; - break; - default: - qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n", - (uint32_t)addr); - *data = 0; - break; - } - return MEMTX_OK; -} - -static MemTxResult ras_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size, - MemTxAttrs attrs) -{ - if (attrs.user) { - return MEMTX_ERROR; - } - - switch (addr) { - default: - qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n", - (uint32_t)addr); - break; - } - return MEMTX_OK; -} - -static const MemoryRegionOps ras_ops = { - .read_with_attrs = ras_read, - .write_with_attrs = ras_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -/* - * Unassigned portions of the PPB space are RAZ/WI for privileged - * accesses, and fault for non-privileged accesses. - */ -static MemTxResult ppb_default_read(void *opaque, hwaddr addr, - uint64_t *data, unsigned size, - MemTxAttrs attrs) -{ - qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n", - (uint32_t)addr); - if (attrs.user) { - return MEMTX_ERROR; - } - *data = 0; - return MEMTX_OK; -} - -static MemTxResult ppb_default_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size, - MemTxAttrs attrs) -{ - qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n", - (uint32_t)addr); - if (attrs.user) { - return MEMTX_ERROR; - } - return MEMTX_OK; -} - -static const MemoryRegionOps ppb_default_ops = { - .read_with_attrs = ppb_default_read, - .write_with_attrs = ppb_default_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid.min_access_size = 1, - .valid.max_access_size = 8, -}; - static int nvic_post_load(void *opaque, int version_id) { NVICState *s = opaque; @@ -2851,108 +2692,14 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; - if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) { - return; - } - sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, - qdev_get_gpio_in_named(dev, "systick-trigger", - M_REG_NS)); - - if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { - /* We couldn't init the secure systick device in instance_init - * as we didn't know then if the CPU had the security extensions; - * so we have to do it here. - */ - object_initialize_child(OBJECT(dev), "systick-reg-s", - &s->systick[M_REG_S], TYPE_SYSTICK); - - if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) { - return; - } - sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, - qdev_get_gpio_in_named(dev, "systick-trigger", - M_REG_S)); - } - /* - * This device provides a single sysbus memory region which - * represents the whole of the "System PPB" space. This is the - * range from 0xe0000000 to 0xe00fffff and includes the NVIC, - * the System Control Space (system registers), the systick timer, - * and for CPUs with the Security extension an NS banked version - * of all of these. - * - * The default behaviour for unimplemented registers/ranges - * (for instance the Data Watchpoint and Trace unit at 0xe0001000) - * is to RAZ/WI for privileged access and BusFault for non-privileged - * access. - * - * The NVIC and System Control Space (SCS) starts at 0xe000e000 - * and looks like this: - * 0x004 - ICTR - * 0x010 - 0xff - systick - * 0x100..0x7ec - NVIC - * 0x7f0..0xcff - Reserved - * 0xd00..0xd3c - SCS registers - * 0xd40..0xeff - Reserved or Not implemented - * 0xf00 - STIR - * - * Some registers within this space are banked between security states. - * In v8M there is a second range 0xe002e000..0xe002efff which is the - * NonSecure alias SCS; secure accesses to this behave like NS accesses - * to the main SCS range, and non-secure accesses (including when - * the security extension is not implemented) are RAZ/WI. - * Note that both the main SCS range and the alias range are defined - * to be exempt from memory attribution (R_BLJT) and so the memory - * transaction attribute always matches the current CPU security - * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops - * wrappers we change attrs.secure to indicate the NS access; so - * generally code determining which banked register to use should - * use attrs.secure; code determining actual behaviour of the system - * should use env->v7m.secure. - * - * The container covers the whole PPB space. Within it the priority - * of overlapping regions is: - * - default region (for RAZ/WI and BusFault) : -1 - * - system register regions : 0 - * - systick : 1 - * This is because the systick device is a small block of registers - * in the middle of the other system control registers. + * This device provides a single memory region which covers the + * sysreg/NVIC registers from 0xE000E000 .. 0xE000EFFF, with the + * exception of the systick timer registers 0xE000E010 .. 0xE000E0FF. */ - memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000); - memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s, - "nvic-default", 0x100000); - memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1); memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, "nvic_sysregs", 0x1000); - memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem); - - memory_region_init_io(&s->systickmem, OBJECT(s), - &nvic_systick_ops, s, - "nvic_systick", 0xe0); - - memory_region_add_subregion_overlap(&s->container, 0xe010, - &s->systickmem, 1); - - if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { - memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), - &nvic_sysreg_ns_ops, &s->sysregmem, - "nvic_sysregs_ns", 0x1000); - memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem); - memory_region_init_io(&s->systick_ns_mem, OBJECT(s), - &nvic_sysreg_ns_ops, &s->systickmem, - "nvic_systick_ns", 0xe0); - memory_region_add_subregion_overlap(&s->container, 0x2e010, - &s->systick_ns_mem, 1); - } - - if (cpu_isar_feature(aa32_ras, s->cpu)) { - memory_region_init_io(&s->ras_mem, OBJECT(s), - &ras_ops, s, "nvic_ras", 0x1000); - memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem); - } - - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysregmem); } static void armv7m_nvic_instance_init(Object *obj) @@ -2961,12 +2708,6 @@ static void armv7m_nvic_instance_init(Object *obj) NVICState *nvic = NVIC(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - object_initialize_child(obj, "systick-reg-ns", &nvic->systick[M_REG_NS], - TYPE_SYSTICK); - /* We can't initialize the secure systick here, as we don't know - * yet if we need it. - */ - sysbus_init_irq(sbd, &nvic->excpout); qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c index 9000d995e8..4513fad16f 100644 --- a/hw/intc/bcm2835_ic.c +++ b/hw/intc/bcm2835_ic.c @@ -227,7 +227,7 @@ static void bcm2835_ic_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_ic; } -static TypeInfo bcm2835_ic_info = { +static const TypeInfo bcm2835_ic_info = { .name = TYPE_BCM2835_IC, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835ICState), diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index 2ead76ffdc..b0589df188 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -392,7 +392,7 @@ static void bcm2836_control_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2836_control; } -static TypeInfo bcm2836_control_info = { +static const TypeInfo bcm2836_control_info = { .name = TYPE_BCM2836_CONTROL, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2836ControlState), diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c index 4534ee248d..a289510bdb 100644 --- a/hw/intc/exynos4210_combiner.c +++ b/hw/intc/exynos4210_combiner.c @@ -31,7 +31,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" - +#include "hw/intc/exynos4210_combiner.h" #include "hw/arm/exynos4210.h" #include "hw/hw.h" #include "hw/irq.h" @@ -48,36 +48,7 @@ #define DPRINTF(fmt, ...) do {} while (0) #endif -#define IIC_NGRP 64 /* Internal Interrupt Combiner - Groups number */ -#define IIC_NIRQ (IIC_NGRP * 8)/* Internal Interrupt Combiner - Interrupts number */ #define IIC_REGION_SIZE 0x108 /* Size of memory mapped region */ -#define IIC_REGSET_SIZE 0x41 - -/* - * State for each output signal of internal combiner - */ -typedef struct CombinerGroupState { - uint8_t src_mask; /* 1 - source enabled, 0 - disabled */ - uint8_t src_pending; /* Pending source interrupts before masking */ -} CombinerGroupState; - -#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner" -OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER) - -struct Exynos4210CombinerState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - - struct CombinerGroupState group[IIC_NGRP]; - uint32_t reg_set[IIC_REGSET_SIZE]; - uint32_t icipsr[2]; - uint32_t external; /* 1 means that this combiner is external */ - - qemu_irq output_irq[IIC_NGRP]; -}; static const VMStateDescription vmstate_exynos4210_combiner_group_state = { .name = "exynos4210.combiner.groupstate", @@ -105,83 +76,6 @@ static const VMStateDescription vmstate_exynos4210_combiner = { } }; -/* - * Get Combiner input GPIO into irqs structure - */ -void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev, - int ext) -{ - int n; - int bit; - int max; - qemu_irq *irq; - - max = ext ? EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ : - EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; - irq = ext ? irqs->ext_combiner_irq : irqs->int_combiner_irq; - - /* - * Some IRQs of Int/External Combiner are going to two Combiners groups, - * so let split them. - */ - for (n = 0; n < max; n++) { - - bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); - - switch (n) { - /* MDNIE_LCD1 INTG1 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 0) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 3): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(0, bit + 4)]); - continue; - - /* TMU INTG3 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(3, 4): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(2, bit)]); - continue; - - /* LCD1 INTG12 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 0) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 3): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(11, bit + 4)]); - continue; - - /* Multi-Core Timer INTG12 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 8): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); - continue; - - /* Multi-Core Timer INTG35 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 4) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 8): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); - continue; - - /* Multi-Core Timer INTG51 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 4) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 8): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); - continue; - - /* Multi-Core Timer INTG53 */ - case EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 4) ... - EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 8): - irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), - irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); - continue; - } - - irq[n] = qdev_get_gpio_in(dev, n); - } -} - static uint64_t exynos4210_combiner_read(void *opaque, hwaddr offset, unsigned size) { diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c index bc73d1f115..fcca85c6c6 100644 --- a/hw/intc/exynos4210_gic.c +++ b/hw/intc/exynos4210_gic.c @@ -27,157 +27,10 @@ #include "qemu/module.h" #include "hw/irq.h" #include "hw/qdev-properties.h" +#include "hw/intc/exynos4210_gic.h" #include "hw/arm/exynos4210.h" #include "qom/object.h" -enum ExtGicId { - EXT_GIC_ID_MDMA_LCD0 = 66, - EXT_GIC_ID_PDMA0, - EXT_GIC_ID_PDMA1, - EXT_GIC_ID_TIMER0, - EXT_GIC_ID_TIMER1, - EXT_GIC_ID_TIMER2, - EXT_GIC_ID_TIMER3, - EXT_GIC_ID_TIMER4, - EXT_GIC_ID_MCT_L0, - EXT_GIC_ID_WDT, - EXT_GIC_ID_RTC_ALARM, - EXT_GIC_ID_RTC_TIC, - EXT_GIC_ID_GPIO_XB, - EXT_GIC_ID_GPIO_XA, - EXT_GIC_ID_MCT_L1, - EXT_GIC_ID_IEM_APC, - EXT_GIC_ID_IEM_IEC, - EXT_GIC_ID_NFC, - EXT_GIC_ID_UART0, - EXT_GIC_ID_UART1, - EXT_GIC_ID_UART2, - EXT_GIC_ID_UART3, - EXT_GIC_ID_UART4, - EXT_GIC_ID_MCT_G0, - EXT_GIC_ID_I2C0, - EXT_GIC_ID_I2C1, - EXT_GIC_ID_I2C2, - EXT_GIC_ID_I2C3, - EXT_GIC_ID_I2C4, - EXT_GIC_ID_I2C5, - EXT_GIC_ID_I2C6, - EXT_GIC_ID_I2C7, - EXT_GIC_ID_SPI0, - EXT_GIC_ID_SPI1, - EXT_GIC_ID_SPI2, - EXT_GIC_ID_MCT_G1, - EXT_GIC_ID_USB_HOST, - EXT_GIC_ID_USB_DEVICE, - EXT_GIC_ID_MODEMIF, - EXT_GIC_ID_HSMMC0, - EXT_GIC_ID_HSMMC1, - EXT_GIC_ID_HSMMC2, - EXT_GIC_ID_HSMMC3, - EXT_GIC_ID_SDMMC, - EXT_GIC_ID_MIPI_CSI_4LANE, - EXT_GIC_ID_MIPI_DSI_4LANE, - EXT_GIC_ID_MIPI_CSI_2LANE, - EXT_GIC_ID_MIPI_DSI_2LANE, - EXT_GIC_ID_ONENAND_AUDI, - EXT_GIC_ID_ROTATOR, - EXT_GIC_ID_FIMC0, - EXT_GIC_ID_FIMC1, - EXT_GIC_ID_FIMC2, - EXT_GIC_ID_FIMC3, - EXT_GIC_ID_JPEG, - EXT_GIC_ID_2D, - EXT_GIC_ID_PCIe, - EXT_GIC_ID_MIXER, - EXT_GIC_ID_HDMI, - EXT_GIC_ID_HDMI_I2C, - EXT_GIC_ID_MFC, - EXT_GIC_ID_TVENC, -}; - -enum ExtInt { - EXT_GIC_ID_EXTINT0 = 48, - EXT_GIC_ID_EXTINT1, - EXT_GIC_ID_EXTINT2, - EXT_GIC_ID_EXTINT3, - EXT_GIC_ID_EXTINT4, - EXT_GIC_ID_EXTINT5, - EXT_GIC_ID_EXTINT6, - EXT_GIC_ID_EXTINT7, - EXT_GIC_ID_EXTINT8, - EXT_GIC_ID_EXTINT9, - EXT_GIC_ID_EXTINT10, - EXT_GIC_ID_EXTINT11, - EXT_GIC_ID_EXTINT12, - EXT_GIC_ID_EXTINT13, - EXT_GIC_ID_EXTINT14, - EXT_GIC_ID_EXTINT15 -}; - -/* - * External GIC sources which are not from External Interrupt Combiner or - * External Interrupts are starting from EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ, - * which is INTG16 in Internal Interrupt Combiner. - */ - -static const uint32_t -combiner_grp_to_gic_id[64-EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = { - /* int combiner groups 16-19 */ - { }, { }, { }, { }, - /* int combiner group 20 */ - { 0, EXT_GIC_ID_MDMA_LCD0 }, - /* int combiner group 21 */ - { EXT_GIC_ID_PDMA0, EXT_GIC_ID_PDMA1 }, - /* int combiner group 22 */ - { EXT_GIC_ID_TIMER0, EXT_GIC_ID_TIMER1, EXT_GIC_ID_TIMER2, - EXT_GIC_ID_TIMER3, EXT_GIC_ID_TIMER4 }, - /* int combiner group 23 */ - { EXT_GIC_ID_RTC_ALARM, EXT_GIC_ID_RTC_TIC }, - /* int combiner group 24 */ - { EXT_GIC_ID_GPIO_XB, EXT_GIC_ID_GPIO_XA }, - /* int combiner group 25 */ - { EXT_GIC_ID_IEM_APC, EXT_GIC_ID_IEM_IEC }, - /* int combiner group 26 */ - { EXT_GIC_ID_UART0, EXT_GIC_ID_UART1, EXT_GIC_ID_UART2, EXT_GIC_ID_UART3, - EXT_GIC_ID_UART4 }, - /* int combiner group 27 */ - { EXT_GIC_ID_I2C0, EXT_GIC_ID_I2C1, EXT_GIC_ID_I2C2, EXT_GIC_ID_I2C3, - EXT_GIC_ID_I2C4, EXT_GIC_ID_I2C5, EXT_GIC_ID_I2C6, - EXT_GIC_ID_I2C7 }, - /* int combiner group 28 */ - { EXT_GIC_ID_SPI0, EXT_GIC_ID_SPI1, EXT_GIC_ID_SPI2 , EXT_GIC_ID_USB_HOST}, - /* int combiner group 29 */ - { EXT_GIC_ID_HSMMC0, EXT_GIC_ID_HSMMC1, EXT_GIC_ID_HSMMC2, - EXT_GIC_ID_HSMMC3, EXT_GIC_ID_SDMMC }, - /* int combiner group 30 */ - { EXT_GIC_ID_MIPI_CSI_4LANE, EXT_GIC_ID_MIPI_CSI_2LANE }, - /* int combiner group 31 */ - { EXT_GIC_ID_MIPI_DSI_4LANE, EXT_GIC_ID_MIPI_DSI_2LANE }, - /* int combiner group 32 */ - { EXT_GIC_ID_FIMC0, EXT_GIC_ID_FIMC1 }, - /* int combiner group 33 */ - { EXT_GIC_ID_FIMC2, EXT_GIC_ID_FIMC3 }, - /* int combiner group 34 */ - { EXT_GIC_ID_ONENAND_AUDI, EXT_GIC_ID_NFC }, - /* int combiner group 35 */ - { 0, 0, 0, EXT_GIC_ID_MCT_L1, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, - /* int combiner group 36 */ - { EXT_GIC_ID_MIXER }, - /* int combiner group 37 */ - { EXT_GIC_ID_EXTINT4, EXT_GIC_ID_EXTINT5, EXT_GIC_ID_EXTINT6, - EXT_GIC_ID_EXTINT7 }, - /* groups 38-50 */ - { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, - /* int combiner group 51 */ - { EXT_GIC_ID_MCT_L0, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, - /* group 52 */ - { }, - /* int combiner group 53 */ - { EXT_GIC_ID_WDT, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, - /* groups 54-63 */ - { }, { }, { }, { }, { }, { }, { }, { }, { }, { } -}; - #define EXYNOS4210_GIC_NIRQ 160 #define EXYNOS4210_EXT_GIC_CPU_REGION_SIZE 0x10000 @@ -192,92 +45,6 @@ combiner_grp_to_gic_id[64-EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = { #define EXYNOS4210_GIC_CPU_REGION_SIZE 0x100 #define EXYNOS4210_GIC_DIST_REGION_SIZE 0x1000 -static void exynos4210_irq_handler(void *opaque, int irq, int level) -{ - Exynos4210Irq *s = (Exynos4210Irq *)opaque; - - /* Bypass */ - qemu_set_irq(s->board_irqs[irq], level); -} - -/* - * Initialize exynos4210 IRQ subsystem stub. - */ -qemu_irq *exynos4210_init_irq(Exynos4210Irq *s) -{ - return qemu_allocate_irqs(exynos4210_irq_handler, s, - EXYNOS4210_MAX_INT_COMBINER_IN_IRQ); -} - -/* - * Initialize board IRQs. - * These IRQs contain splitted Int/External Combiner and External Gic IRQs. - */ -void exynos4210_init_board_irqs(Exynos4210Irq *s) -{ - uint32_t grp, bit, irq_id, n; - - for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) { - irq_id = 0; - if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4) || - n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4)) { - /* MCT_G0 is passed to External GIC */ - irq_id = EXT_GIC_ID_MCT_G0; - } - if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 5) || - n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 5)) { - /* MCT_G1 is passed to External and GIC */ - irq_id = EXT_GIC_ID_MCT_G1; - } - if (irq_id) { - s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], - s->ext_gic_irq[irq_id-32]); - } else { - s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], - s->ext_combiner_irq[n]); - } - } - for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) { - /* these IDs are passed to Internal Combiner and External GIC */ - grp = EXYNOS4210_COMBINER_GET_GRP_NUM(n); - bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); - irq_id = combiner_grp_to_gic_id[grp - - EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][bit]; - - if (irq_id) { - s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], - s->ext_gic_irq[irq_id-32]); - } - } -} - -/* - * Get IRQ number from exynos4210 IRQ subsystem stub. - * To identify IRQ source use internal combiner group and bit number - * grp - group number - * bit - bit number inside group - */ -uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit) -{ - return EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit); -} - -/********* GIC part *********/ - -#define TYPE_EXYNOS4210_GIC "exynos4210.gic" -OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC) - -struct Exynos4210GicState { - SysBusDevice parent_obj; - - MemoryRegion cpu_container; - MemoryRegion dist_container; - MemoryRegion cpu_alias[EXYNOS4210_NCPUS]; - MemoryRegion dist_alias[EXYNOS4210_NCPUS]; - uint32_t num_cpu; - DeviceState *gic; -}; - static void exynos4210_gic_set_irq(void *opaque, int irq, int level) { Exynos4210GicState *s = (Exynos4210GicState *)opaque; @@ -289,10 +56,6 @@ static void exynos4210_gic_realize(DeviceState *dev, Error **errp) Object *obj = OBJECT(dev); Exynos4210GicState *s = EXYNOS4210_GIC(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - const char cpu_prefix[] = "exynos4210-gic-alias_cpu"; - const char dist_prefix[] = "exynos4210-gic-alias_dist"; - char cpu_alias_name[sizeof(cpu_prefix) + 3]; - char dist_alias_name[sizeof(cpu_prefix) + 3]; SysBusDevice *gicbusdev; uint32_t n = s->num_cpu; uint32_t i; @@ -320,10 +83,12 @@ static void exynos4210_gic_realize(DeviceState *dev, Error **errp) * enough room for the cpu numbers. gcc 9.2.1 on 32-bit x86 * doesn't figure this out, otherwise and gives spurious warnings. */ - assert(n <= EXYNOS4210_NCPUS); + assert(n <= EXYNOS4210_GIC_NCPUS); for (i = 0; i < n; i++) { + g_autofree char *cpu_alias_name = g_strdup_printf("exynos4210-gic-alias_cpu%u", i); + g_autofree char *dist_alias_name = g_strdup_printf("exynos4210-gic-alias_dist%u", i); + /* Map CPU interface per SMP Core */ - sprintf(cpu_alias_name, "%s%x", cpu_prefix, i); memory_region_init_alias(&s->cpu_alias[i], obj, cpu_alias_name, sysbus_mmio_get_region(gicbusdev, 1), @@ -333,7 +98,6 @@ static void exynos4210_gic_realize(DeviceState *dev, Error **errp) EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(i), &s->cpu_alias[i]); /* Map Distributor per SMP Core */ - sprintf(dist_alias_name, "%s%x", dist_prefix, i); memory_region_init_alias(&s->dist_alias[i], obj, dist_alias_name, sysbus_mmio_get_region(gicbusdev, 0), @@ -373,110 +137,3 @@ static void exynos4210_gic_register_types(void) } type_init(exynos4210_gic_register_types) - -/* IRQ OR Gate struct. - * - * This device models an OR gate. There are n_in input qdev gpio lines and one - * output sysbus IRQ line. The output IRQ level is formed as OR between all - * gpio inputs. - */ - -#define TYPE_EXYNOS4210_IRQ_GATE "exynos4210.irq_gate" -OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210IRQGateState, EXYNOS4210_IRQ_GATE) - -struct Exynos4210IRQGateState { - SysBusDevice parent_obj; - - uint32_t n_in; /* inputs amount */ - uint32_t *level; /* input levels */ - qemu_irq out; /* output IRQ */ -}; - -static Property exynos4210_irq_gate_properties[] = { - DEFINE_PROP_UINT32("n_in", Exynos4210IRQGateState, n_in, 1), - DEFINE_PROP_END_OF_LIST(), -}; - -static const VMStateDescription vmstate_exynos4210_irq_gate = { - .name = "exynos4210.irq_gate", - .version_id = 2, - .minimum_version_id = 2, - .fields = (VMStateField[]) { - VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, n_in), - VMSTATE_END_OF_LIST() - } -}; - -/* Process a change in IRQ input. */ -static void exynos4210_irq_gate_handler(void *opaque, int irq, int level) -{ - Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)opaque; - uint32_t i; - - assert(irq < s->n_in); - - s->level[irq] = level; - - for (i = 0; i < s->n_in; i++) { - if (s->level[i] >= 1) { - qemu_irq_raise(s->out); - return; - } - } - - qemu_irq_lower(s->out); -} - -static void exynos4210_irq_gate_reset(DeviceState *d) -{ - Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(d); - - memset(s->level, 0, s->n_in * sizeof(*s->level)); -} - -/* - * IRQ Gate initialization. - */ -static void exynos4210_irq_gate_init(Object *obj) -{ - Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - sysbus_init_irq(sbd, &s->out); -} - -static void exynos4210_irq_gate_realize(DeviceState *dev, Error **errp) -{ - Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(dev); - - /* Allocate general purpose input signals and connect a handler to each of - * them */ - qdev_init_gpio_in(dev, exynos4210_irq_gate_handler, s->n_in); - - s->level = g_malloc0(s->n_in * sizeof(*s->level)); -} - -static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = exynos4210_irq_gate_reset; - dc->vmsd = &vmstate_exynos4210_irq_gate; - device_class_set_props(dc, exynos4210_irq_gate_properties); - dc->realize = exynos4210_irq_gate_realize; -} - -static const TypeInfo exynos4210_irq_gate_info = { - .name = TYPE_EXYNOS4210_IRQ_GATE, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Exynos4210IRQGateState), - .instance_init = exynos4210_irq_gate_init, - .class_init = exynos4210_irq_gate_class_init, -}; - -static void exynos4210_irq_gate_register_types(void) -{ - type_register_static(&exynos4210_irq_gate_info); -} - -type_init(exynos4210_irq_gate_register_types) diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h index 05303a55c8..29d5cdc1b6 100644 --- a/hw/intc/gicv3_internal.h +++ b/hw/intc/gicv3_internal.h @@ -24,6 +24,7 @@ #ifndef QEMU_ARM_GICV3_INTERNAL_H #define QEMU_ARM_GICV3_INTERNAL_H +#include "hw/registerfields.h" #include "hw/intc/arm_gicv3_common.h" /* Distributor registers, as offsets from the distributor base address */ @@ -67,10 +68,16 @@ #define GICD_CTLR_E1NWF (1U << 7) #define GICD_CTLR_RWP (1U << 31) +#define GICD_TYPER_LPIS_SHIFT 17 + +/* 16 bits EventId */ +#define GICD_TYPER_IDBITS 0xf + /* * Redistributor frame offsets from RD_base */ #define GICR_SGI_OFFSET 0x10000 +#define GICR_VLPI_OFFSET 0x20000 /* * Redistributor registers, offsets from RD_base @@ -103,7 +110,12 @@ #define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) #define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) +/* VLPI redistributor registers, offsets from VLPI_base */ +#define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70) +#define GICR_VPENDBASER (GICR_VLPI_OFFSET + 0x78) + #define GICR_CTLR_ENABLE_LPIS (1U << 0) +#define GICR_CTLR_CES (1U << 1) #define GICR_CTLR_RWP (1U << 3) #define GICR_CTLR_DPG0 (1U << 24) #define GICR_CTLR_DPG1NS (1U << 25) @@ -122,17 +134,35 @@ #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) -#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK (7ULL << 56) -#define GICR_PROPBASER_ADDR_MASK (0xfffffffffULL << 12) -#define GICR_PROPBASER_SHAREABILITY_MASK (3U << 10) -#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) -#define GICR_PROPBASER_IDBITS_MASK (0x1f) +FIELD(GICR_PROPBASER, IDBITS, 0, 5) +FIELD(GICR_PROPBASER, INNERCACHE, 7, 3) +FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2) +FIELD(GICR_PROPBASER, PHYADDR, 12, 40) +FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3) -#define GICR_PENDBASER_PTZ (1ULL << 62) -#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK (7ULL << 56) -#define GICR_PENDBASER_ADDR_MASK (0xffffffffULL << 16) -#define GICR_PENDBASER_SHAREABILITY_MASK (3U << 10) -#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) +FIELD(GICR_PENDBASER, INNERCACHE, 7, 3) +FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2) +FIELD(GICR_PENDBASER, PHYADDR, 16, 36) +FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3) +FIELD(GICR_PENDBASER, PTZ, 62, 1) + +#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd + +/* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */ +FIELD(GICR_VPROPBASER, IDBITS, 0, 5) +FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3) +FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2) +FIELD(GICR_VPROPBASER, PHYADDR, 12, 40) +FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3) + +FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3) +FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2) +FIELD(GICR_VPENDBASER, PHYADDR, 16, 36) +FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3) +FIELD(GICR_VPENDBASER, DIRTY, 60, 1) +FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1) +FIELD(GICR_VPENDBASER, IDAI, 62, 1) +FIELD(GICR_VPENDBASER, VALID, 63, 1) #define ICC_CTLR_EL1_CBPR (1U << 0) #define ICC_CTLR_EL1_EOIMODE (1U << 1) @@ -239,6 +269,241 @@ #define ICH_VTR_EL2_PREBITS_SHIFT 26 #define ICH_VTR_EL2_PRIBITS_SHIFT 29 +/* ITS Registers */ + +FIELD(GITS_BASER, SIZE, 0, 8) +FIELD(GITS_BASER, PAGESIZE, 8, 2) +FIELD(GITS_BASER, SHAREABILITY, 10, 2) +FIELD(GITS_BASER, PHYADDR, 12, 36) +FIELD(GITS_BASER, PHYADDRL_64K, 16, 32) +FIELD(GITS_BASER, PHYADDRH_64K, 12, 4) +FIELD(GITS_BASER, ENTRYSIZE, 48, 5) +FIELD(GITS_BASER, OUTERCACHE, 53, 3) +FIELD(GITS_BASER, TYPE, 56, 3) +FIELD(GITS_BASER, INNERCACHE, 59, 3) +FIELD(GITS_BASER, INDIRECT, 62, 1) +FIELD(GITS_BASER, VALID, 63, 1) + +FIELD(GITS_CBASER, SIZE, 0, 8) +FIELD(GITS_CBASER, SHAREABILITY, 10, 2) +FIELD(GITS_CBASER, PHYADDR, 12, 40) +FIELD(GITS_CBASER, OUTERCACHE, 53, 3) +FIELD(GITS_CBASER, INNERCACHE, 59, 3) +FIELD(GITS_CBASER, VALID, 63, 1) + +FIELD(GITS_CREADR, STALLED, 0, 1) +FIELD(GITS_CREADR, OFFSET, 5, 15) + +FIELD(GITS_CWRITER, RETRY, 0, 1) +FIELD(GITS_CWRITER, OFFSET, 5, 15) + +FIELD(GITS_CTLR, ENABLED, 0, 1) +FIELD(GITS_CTLR, QUIESCENT, 31, 1) + +FIELD(GITS_TYPER, PHYSICAL, 0, 1) +FIELD(GITS_TYPER, VIRTUAL, 1, 1) +FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4) +FIELD(GITS_TYPER, IDBITS, 8, 5) +FIELD(GITS_TYPER, DEVBITS, 13, 5) +FIELD(GITS_TYPER, SEIS, 18, 1) +FIELD(GITS_TYPER, PTA, 19, 1) +FIELD(GITS_TYPER, CIDBITS, 32, 4) +FIELD(GITS_TYPER, CIL, 36, 1) +FIELD(GITS_TYPER, VMOVP, 37, 1) + +#define GITS_IDREGS 0xFFD0 + +#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \ + R_GITS_BASER_TYPE_MASK) + +#define GITS_BASER_PAGESIZE_4K 0 +#define GITS_BASER_PAGESIZE_16K 1 +#define GITS_BASER_PAGESIZE_64K 2 + +#define GITS_BASER_TYPE_DEVICE 1ULL +#define GITS_BASER_TYPE_VPE 2ULL +#define GITS_BASER_TYPE_COLLECTION 4ULL + +#define GITS_PAGE_SIZE_4K 0x1000 +#define GITS_PAGE_SIZE_16K 0x4000 +#define GITS_PAGE_SIZE_64K 0x10000 + +#define L1TABLE_ENTRY_SIZE 8 + +#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK +#define LPI_PRIORITY_MASK 0xfc + +#define GITS_CMDQ_ENTRY_WORDS 4 +#define GITS_CMDQ_ENTRY_SIZE (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t)) + +#define CMD_MASK 0xff + +/* ITS Commands */ +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0A +#define GITS_CMD_MAPI 0x0B +#define GITS_CMD_INV 0x0C +#define GITS_CMD_INVALL 0x0D +#define GITS_CMD_MOVALL 0x0E +#define GITS_CMD_DISCARD 0x0F +#define GITS_CMD_VMOVI 0x21 +#define GITS_CMD_VMOVP 0x22 +#define GITS_CMD_VSYNC 0x25 +#define GITS_CMD_VMAPP 0x29 +#define GITS_CMD_VMAPTI 0x2A +#define GITS_CMD_VMAPI 0x2B +#define GITS_CMD_VINVALL 0x2D + +/* MAPC command fields */ +#define ICID_LENGTH 16 +#define ICID_MASK ((1U << ICID_LENGTH) - 1) +FIELD(MAPC, RDBASE, 16, 32) + +#define RDBASE_PROCNUM_LENGTH 16 +#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1) + +/* MAPD command fields */ +#define ITTADDR_LENGTH 44 +#define ITTADDR_SHIFT 8 +#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH) +#define SIZE_MASK 0x1f + +/* MAPI command fields */ +#define EVENTID_MASK ((1ULL << 32) - 1) + +/* MAPTI command fields */ +#define pINTID_SHIFT 32 +#define pINTID_MASK MAKE_64BIT_MASK(32, 32) + +#define DEVID_SHIFT 32 +#define DEVID_MASK MAKE_64BIT_MASK(32, 32) + +#define VALID_SHIFT 63 +#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT) +#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK +#define TABLE_ENTRY_VALID_MASK (1ULL << 0) + +/* MOVALL command fields */ +FIELD(MOVALL_2, RDBASE1, 16, 36) +FIELD(MOVALL_3, RDBASE2, 16, 36) + +/* MOVI command fields */ +FIELD(MOVI_0, DEVICEID, 32, 32) +FIELD(MOVI_1, EVENTID, 0, 32) +FIELD(MOVI_2, ICID, 0, 16) + +/* INV command fields */ +FIELD(INV_0, DEVICEID, 32, 32) +FIELD(INV_1, EVENTID, 0, 32) + +/* VMAPI, VMAPTI command fields */ +FIELD(VMAPTI_0, DEVICEID, 32, 32) +FIELD(VMAPTI_1, EVENTID, 0, 32) +FIELD(VMAPTI_1, VPEID, 32, 16) +FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */ +FIELD(VMAPTI_2, DOORBELL, 32, 32) + +/* VMAPP command fields */ +FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */ +FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */ +FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */ +FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ +FIELD(VMAPP_1, VPEID, 32, 16) +FIELD(VMAPP_2, RDBASE, 16, 36) +FIELD(VMAPP_2, V, 63, 1) +FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */ +FIELD(VMAPP_3, VPTADDR, 16, 36) + +/* VMOVP command fields */ +FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */ +FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */ +FIELD(VMOVP_1, VPEID, 32, 16) +FIELD(VMOVP_2, RDBASE, 16, 36) +FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */ +FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ + +/* VMOVI command fields */ +FIELD(VMOVI_0, DEVICEID, 32, 32) +FIELD(VMOVI_1, EVENTID, 0, 32) +FIELD(VMOVI_1, VPEID, 32, 16) +FIELD(VMOVI_2, D, 0, 1) +FIELD(VMOVI_2, DOORBELL, 32, 32) + +/* VINVALL command fields */ +FIELD(VINVALL_1, VPEID, 32, 16) + +/* + * 12 bytes Interrupt translation Table Entry size + * as per Table 5.3 in GICv3 spec + * ITE Lower 8 Bytes + * Bits: | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 | 1 | 0 | + * Values: | vPEID | ICID | unused | IntNum | IntType | Valid | + * ITE Higher 4 Bytes + * Bits: | 31 ... 25 | 24 ... 0 | + * Values: | unused | Doorbell | + * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL, + * the value of that field in memory cannot be relied upon -- older + * versions of QEMU did not correctly write to that memory.) + */ +#define ITS_ITT_ENTRY_SIZE 0xC + +FIELD(ITE_L, VALID, 0, 1) +FIELD(ITE_L, INTTYPE, 1, 1) +FIELD(ITE_L, INTID, 2, 24) +FIELD(ITE_L, ICID, 32, 16) +FIELD(ITE_L, VPEID, 48, 16) +FIELD(ITE_H, DOORBELL, 0, 24) + +/* Possible values for ITE_L INTTYPE */ +#define ITE_INTTYPE_VIRTUAL 0 +#define ITE_INTTYPE_PHYSICAL 1 + +/* 16 bits EventId */ +#define ITS_IDBITS GICD_TYPER_IDBITS + +/* 16 bits DeviceId */ +#define ITS_DEVBITS 0xF + +/* 16 bits CollectionId */ +#define ITS_CIDBITS 0xF + +/* + * 8 bytes Device Table Entry size + * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits + */ +#define GITS_DTE_SIZE (0x8ULL) + +FIELD(DTE, VALID, 0, 1) +FIELD(DTE, SIZE, 1, 5) +FIELD(DTE, ITTADDR, 6, 44) + +/* + * 8 bytes Collection Table Entry size + * Valid = 1 bit, RDBase = 16 bits + */ +#define GITS_CTE_SIZE (0x8ULL) +FIELD(CTE, VALID, 0, 1) +FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH) + +/* + * 8 bytes VPE table entry size: + * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits + * + * Field sizes for Valid and size are mandated; field sizes for RDbase + * and VPT_addr are IMPDEF. + */ +#define GITS_VPE_SIZE 0x8ULL + +FIELD(VTE, VALID, 0, 1) +FIELD(VTE, VPTSIZE, 1, 5) +FIELD(VTE, VPTADDR, 6, 36) +FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH) + /* Special interrupt IDs */ #define INTID_SECURE 1020 #define INTID_NONSECURE 1021 @@ -246,6 +511,40 @@ /* Functions internal to the emulated GICv3 */ +/** + * gicv3_redist_size: + * @s: GICv3State + * + * Return the size of the redistributor register frame in bytes + * (which depends on what GIC version this is) + */ +static inline int gicv3_redist_size(GICv3State *s) +{ + /* + * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS. + * It's the same for every redistributor in the GIC, so arbitrarily + * use the register field in the first one. + */ + if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) { + return GICV4_REDIST_SIZE; + } else { + return GICV3_REDIST_SIZE; + } +} + +/** + * gicv3_intid_is_special: + * @intid: interrupt ID + * + * Return true if @intid is a special interrupt ID (1020 to + * 1023 inclusive). This corresponds to the GIC spec pseudocode + * IsSpecial() function. + */ +static inline bool gicv3_intid_is_special(int intid) +{ + return intid >= INTID_SECURE && intid <= INTID_SPURIOUS; +} + /** * gicv3_redist_update: * @cs: GICv3CPUState for this redistributor @@ -296,6 +595,118 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs); void gicv3_dist_set_irq(GICv3State *s, int irq, int level); void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level); +void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level); +/** + * gicv3_redist_process_vlpi: + * @cs: GICv3CPUState + * @irq: (virtual) interrupt number + * @vptaddr: (guest) address of VLPI table + * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell") + * @level: level to set @irq to + * + * Process a virtual LPI being directly injected by the ITS. This function + * will update the VLPI table specified by @vptaddr and @vptsize. If the + * vCPU corresponding to that VLPI table is currently running on + * the CPU associated with this redistributor, directly inject the VLPI + * @irq. If the vCPU is not running on this CPU, raise the doorbell + * interrupt instead. + */ +void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, + int doorbell, int level); +/** + * gicv3_redist_vlpi_pending: + * @cs: GICv3CPUState + * @irq: (virtual) interrupt number + * @level: level to set @irq to + * + * Set/clear the pending status of a virtual LPI in the vLPI table + * that this redistributor is currently using. (The difference between + * this and gicv3_redist_process_vlpi() is that this is called from + * the cpuif and does not need to do the not-running-on-this-vcpu checks.) + */ +void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level); + +void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level); +/** + * gicv3_redist_update_lpi: + * @cs: GICv3CPUState + * + * Scan the LPI pending table and recalculate the highest priority + * pending LPI and also the overall highest priority pending interrupt. + */ +void gicv3_redist_update_lpi(GICv3CPUState *cs); +/** + * gicv3_redist_update_lpi_only: + * @cs: GICv3CPUState + * + * Scan the LPI pending table and recalculate cs->hpplpi only, + * without calling gicv3_redist_update() to recalculate the overall + * highest priority pending interrupt. This should be called after + * an incoming migration has loaded new state. + */ +void gicv3_redist_update_lpi_only(GICv3CPUState *cs); +/** + * gicv3_redist_inv_lpi: + * @cs: GICv3CPUState + * @irq: LPI to invalidate cached information for + * + * Forget or update any cached information associated with this LPI. + */ +void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq); +/** + * gicv3_redist_inv_vlpi: + * @cs: GICv3CPUState + * @irq: vLPI to invalidate cached information for + * @vptaddr: (guest) address of vLPI table + * + * Forget or update any cached information associated with this vLPI. + */ +void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr); +/** + * gicv3_redist_mov_lpi: + * @src: source redistributor + * @dest: destination redistributor + * @irq: LPI to update + * + * Move the pending state of the specified LPI from @src to @dest, + * as required by the ITS MOVI command. + */ +void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq); +/** + * gicv3_redist_movall_lpis: + * @src: source redistributor + * @dest: destination redistributor + * + * Scan the LPI pending table for @src, and for each pending LPI there + * mark it as not-pending for @src and pending for @dest, as required + * by the ITS MOVALL command. + */ +void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest); +/** + * gicv3_redist_mov_vlpi: + * @src: source redistributor + * @src_vptaddr: (guest) address of source VLPI table + * @dest: destination redistributor + * @dest_vptaddr: (guest) address of destination VLPI table + * @irq: VLPI to update + * @doorbell: doorbell for destination (1023 for "no doorbell") + * + * Move the pending state of the specified VLPI from @src to @dest, + * as required by the ITS VMOVI command. + */ +void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, + GICv3CPUState *dest, uint64_t dest_vptaddr, + int irq, int doorbell); +/** + * gicv3_redist_vinvall: + * @cs: GICv3CPUState + * @vptaddr: address of VLPI pending table + * + * On redistributor @cs, invalidate all cached information associated + * with the vCPU defined by @vptaddr. + */ +void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr); + void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); void gicv3_init_cpuif(GICv3State *s); @@ -310,6 +721,17 @@ void gicv3_init_cpuif(GICv3State *s); */ void gicv3_cpuif_update(GICv3CPUState *cs); +/* + * gicv3_cpuif_virt_irq_fiq_update: + * @cs: GICv3CPUState for the CPU to update + * + * Recalculate whether to assert the virtual IRQ or FIQ lines after + * a change to the current highest priority pending virtual interrupt. + * Note that this does not recalculate and change the maintenance + * interrupt status (for that, see gicv3_cpuif_virt_update()). + */ +void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs); + static inline uint32_t gicv3_iidr(void) { /* Return the Implementer Identification Register value @@ -321,17 +743,34 @@ static inline uint32_t gicv3_iidr(void) return 0x43b; } -static inline uint32_t gicv3_idreg(int regoffset) +/* CoreSight PIDR0 values for ARM GICv3 implementations */ +#define GICV3_PIDR0_DIST 0x92 +#define GICV3_PIDR0_REDIST 0x93 +#define GICV3_PIDR0_ITS 0x94 + +static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0) { /* Return the value of the CoreSight ID register at the specified * offset from the first ID register (as found in the distributor * and redistributor register banks). - * These values indicate an ARM implementation of a GICv3. + * These values indicate an ARM implementation of a GICv3 or v4. */ static const uint8_t gicd_ids[] = { - 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 + 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 }; - return gicd_ids[regoffset / 4]; + uint32_t id; + + regoffset /= 4; + + if (regoffset == 4) { + return pidr0; + } + id = gicd_ids[regoffset]; + if (regoffset == 6) { + /* PIDR2 bits [7:4] are the GIC architecture revision */ + id |= s->revision << 4; + } + return id; } /** diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index e3b43a69f1..dfd53275f6 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c index cb97c315da..13048a2735 100644 --- a/hw/intc/heathrow_pic.c +++ b/hw/intc/heathrow_pic.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "hw/ppc/mac.h" #include "migration/vmstate.h" #include "qemu/module.h" #include "hw/intc/heathrow_pic.h" diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index b93bd8de19..f7033778bb 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -118,8 +118,8 @@ void pic_stat_update_irq(int irq, int level) } } -bool pic_get_statistics(InterruptStatsProvider *obj, - uint64_t **irq_counts, unsigned int *nb_irqs) +static bool pic_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, unsigned int *nb_irqs) { PICCommonState *s = PIC_COMMON(obj); @@ -134,7 +134,7 @@ bool pic_get_statistics(InterruptStatsProvider *obj, return true; } -void pic_print_info(InterruptStatsProvider *obj, Monitor *mon) +static void pic_print_info(InterruptStatsProvider *obj, Monitor *mon) { PICCommonState *s = PIC_COMMON(obj); diff --git a/hw/intc/ibex_plic.c b/hw/intc/ibex_plic.c deleted file mode 100644 index edf76e4f61..0000000000 --- a/hw/intc/ibex_plic.c +++ /dev/null @@ -1,312 +0,0 @@ -/* - * QEMU RISC-V lowRISC Ibex PLIC - * - * Copyright (c) 2020 Western Digital - * - * Documentation avaliable: https://docs.opentitan.org/hw/ip/rv_plic/doc/ - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/qdev-properties.h" -#include "hw/core/cpu.h" -#include "hw/boards.h" -#include "hw/pci/msi.h" -#include "target/riscv/cpu_bits.h" -#include "target/riscv/cpu.h" -#include "hw/intc/ibex_plic.h" - -static bool addr_between(uint32_t addr, uint32_t base, uint32_t num) -{ - uint32_t end = base + (num * 0x04); - - if (addr >= base && addr < end) { - return true; - } - - return false; -} - -static void ibex_plic_irqs_set_pending(IbexPlicState *s, int irq, bool level) -{ - int pending_num = irq / 32; - - if (!level) { - /* - * If the level is low make sure we clear the hidden_pending. - */ - s->hidden_pending[pending_num] &= ~(1 << (irq % 32)); - } - - if (s->claimed[pending_num] & 1 << (irq % 32)) { - /* - * The interrupt has been claimed, but not completed. - * The pending bit can't be set. - * Save the pending level for after the interrupt is completed. - */ - s->hidden_pending[pending_num] |= level << (irq % 32); - } else { - s->pending[pending_num] |= level << (irq % 32); - } -} - -static bool ibex_plic_irqs_pending(IbexPlicState *s, uint32_t context) -{ - int i; - uint32_t max_irq = 0; - uint32_t max_prio = s->threshold; - - for (i = 0; i < s->pending_num; i++) { - uint32_t irq_num = ctz64(s->pending[i]) + (i * 32); - - if (!(s->pending[i] & s->enable[i])) { - /* No pending and enabled IRQ */ - continue; - } - - if (s->priority[irq_num] > max_prio) { - max_irq = irq_num; - max_prio = s->priority[irq_num]; - } - } - - if (max_irq) { - s->claim = max_irq; - return true; - } - - return false; -} - -static void ibex_plic_update(IbexPlicState *s) -{ - CPUState *cpu; - int level, i; - - for (i = 0; i < s->num_cpus; i++) { - cpu = qemu_get_cpu(i); - - if (!cpu) { - continue; - } - - level = ibex_plic_irqs_pending(s, 0); - - riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MEIP, BOOL_TO_MASK(level)); - } -} - -static void ibex_plic_reset(DeviceState *dev) -{ - IbexPlicState *s = IBEX_PLIC(dev); - - s->threshold = 0x00000000; - s->claim = 0x00000000; -} - -static uint64_t ibex_plic_read(void *opaque, hwaddr addr, - unsigned int size) -{ - IbexPlicState *s = opaque; - int offset; - uint32_t ret = 0; - - if (addr_between(addr, s->pending_base, s->pending_num)) { - offset = (addr - s->pending_base) / 4; - ret = s->pending[offset]; - } else if (addr_between(addr, s->source_base, s->source_num)) { - qemu_log_mask(LOG_UNIMP, - "%s: Interrupt source mode not supported\n", __func__); - } else if (addr_between(addr, s->priority_base, s->priority_num)) { - offset = (addr - s->priority_base) / 4; - ret = s->priority[offset]; - } else if (addr_between(addr, s->enable_base, s->enable_num)) { - offset = (addr - s->enable_base) / 4; - ret = s->enable[offset]; - } else if (addr_between(addr, s->threshold_base, 1)) { - ret = s->threshold; - } else if (addr_between(addr, s->claim_base, 1)) { - int pending_num = s->claim / 32; - s->pending[pending_num] &= ~(1 << (s->claim % 32)); - - /* Set the interrupt as claimed, but not completed */ - s->claimed[pending_num] |= 1 << (s->claim % 32); - - /* Return the current claimed interrupt */ - ret = s->claim; - - /* Clear the claimed interrupt */ - s->claim = 0x00000000; - - /* Update the interrupt status after the claim */ - ibex_plic_update(s); - } - - return ret; -} - -static void ibex_plic_write(void *opaque, hwaddr addr, - uint64_t value, unsigned int size) -{ - IbexPlicState *s = opaque; - - if (addr_between(addr, s->pending_base, s->pending_num)) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Pending registers are read only\n", __func__); - } else if (addr_between(addr, s->source_base, s->source_num)) { - qemu_log_mask(LOG_UNIMP, - "%s: Interrupt source mode not supported\n", __func__); - } else if (addr_between(addr, s->priority_base, s->priority_num)) { - uint32_t irq = ((addr - s->priority_base) >> 2) + 1; - s->priority[irq] = value & 7; - ibex_plic_update(s); - } else if (addr_between(addr, s->enable_base, s->enable_num)) { - uint32_t enable_reg = (addr - s->enable_base) / 4; - - s->enable[enable_reg] = value; - } else if (addr_between(addr, s->threshold_base, 1)) { - s->threshold = value & 3; - } else if (addr_between(addr, s->claim_base, 1)) { - if (s->claim == value) { - /* Interrupt was completed */ - s->claim = 0; - } - if (s->claimed[value / 32] & 1 << (value % 32)) { - int pending_num = value / 32; - - /* This value was already claimed, clear it. */ - s->claimed[pending_num] &= ~(1 << (value % 32)); - - if (s->hidden_pending[pending_num] & (1 << (value % 32))) { - /* - * If the bit in hidden_pending is set then that means we - * received an interrupt between claiming and completing - * the interrupt that hasn't since been de-asserted. - * On hardware this would trigger an interrupt, so let's - * trigger one here as well. - */ - s->pending[pending_num] |= 1 << (value % 32); - } - } - } - - ibex_plic_update(s); -} - -static const MemoryRegionOps ibex_plic_ops = { - .read = ibex_plic_read, - .write = ibex_plic_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static void ibex_plic_irq_request(void *opaque, int irq, int level) -{ - IbexPlicState *s = opaque; - - ibex_plic_irqs_set_pending(s, irq, level > 0); - ibex_plic_update(s); -} - -static Property ibex_plic_properties[] = { - DEFINE_PROP_UINT32("num-cpus", IbexPlicState, num_cpus, 1), - DEFINE_PROP_UINT32("num-sources", IbexPlicState, num_sources, 176), - - DEFINE_PROP_UINT32("pending-base", IbexPlicState, pending_base, 0), - DEFINE_PROP_UINT32("pending-num", IbexPlicState, pending_num, 6), - - DEFINE_PROP_UINT32("source-base", IbexPlicState, source_base, 0x18), - DEFINE_PROP_UINT32("source-num", IbexPlicState, source_num, 6), - - DEFINE_PROP_UINT32("priority-base", IbexPlicState, priority_base, 0x30), - DEFINE_PROP_UINT32("priority-num", IbexPlicState, priority_num, 177), - - DEFINE_PROP_UINT32("enable-base", IbexPlicState, enable_base, 0x300), - DEFINE_PROP_UINT32("enable-num", IbexPlicState, enable_num, 6), - - DEFINE_PROP_UINT32("threshold-base", IbexPlicState, threshold_base, 0x318), - - DEFINE_PROP_UINT32("claim-base", IbexPlicState, claim_base, 0x31c), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ibex_plic_init(Object *obj) -{ - IbexPlicState *s = IBEX_PLIC(obj); - - memory_region_init_io(&s->mmio, obj, &ibex_plic_ops, s, - TYPE_IBEX_PLIC, 0x400); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); -} - -static void ibex_plic_realize(DeviceState *dev, Error **errp) -{ - IbexPlicState *s = IBEX_PLIC(dev); - int i; - - s->pending = g_new0(uint32_t, s->pending_num); - s->hidden_pending = g_new0(uint32_t, s->pending_num); - s->claimed = g_new0(uint32_t, s->pending_num); - s->source = g_new0(uint32_t, s->source_num); - s->priority = g_new0(uint32_t, s->priority_num); - s->enable = g_new0(uint32_t, s->enable_num); - - qdev_init_gpio_in(dev, ibex_plic_irq_request, s->num_sources); - - /* - * We can't allow the supervisor to control SEIP as this would allow the - * supervisor to clear a pending external interrupt which will result in - * a lost interrupt in the case a PLIC is attached. The SEIP bit must be - * hardware controlled when a PLIC is attached. - */ - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int smp_cpus = ms->smp.cpus; - for (i = 0; i < smp_cpus; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(i)); - if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { - error_report("SEIP already claimed"); - exit(1); - } - } - - msi_nonbroken = true; -} - -static void ibex_plic_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = ibex_plic_reset; - device_class_set_props(dc, ibex_plic_properties); - dc->realize = ibex_plic_realize; -} - -static const TypeInfo ibex_plic_info = { - .name = TYPE_IBEX_PLIC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(IbexPlicState), - .instance_init = ibex_plic_init, - .class_init = ibex_plic_class_init, -}; - -static void ibex_plic_register_types(void) -{ - type_register_static(&ibex_plic_info); -} - -type_init(ibex_plic_register_types) diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index 3cccfc1556..aa5f760871 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -76,7 +76,7 @@ static void ioapic_irr_dump(Monitor *mon, const char *name, uint32_t bitmap) monitor_printf(mon, "\n"); } -void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s) +static void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s) { static const char *delm_str[] = { "fixed", "lowest", "SMI", "...", "NMI", "INIT", "...", "extINT"}; diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c new file mode 100644 index 0000000000..4b8ec3f28a --- /dev/null +++ b/hw/intc/loongarch_extioi.c @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Loongson 3A5000 ext interrupt controller emulation + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/loongarch/virt.h" +#include "hw/qdev-properties.h" +#include "exec/address-spaces.h" +#include "hw/intc/loongarch_extioi.h" +#include "migration/vmstate.h" +#include "trace.h" + + +static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) +{ + int ipnum, cpu, found, irq_index, irq_mask; + + ipnum = s->sw_ipmap[irq / 32]; + cpu = s->sw_coremap[irq]; + irq_index = irq / 32; + irq_mask = 1 << (irq & 0x1f); + + if (level) { + /* if not enable return false */ + if (((s->enable[irq_index]) & irq_mask) == 0) { + return; + } + s->coreisr[cpu][irq_index] |= irq_mask; + found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); + set_bit(irq, s->sw_isr[cpu][ipnum]); + if (found < EXTIOI_IRQS) { + /* other irq is handling, need not update parent irq level */ + return; + } + } else { + s->coreisr[cpu][irq_index] &= ~irq_mask; + clear_bit(irq, s->sw_isr[cpu][ipnum]); + found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); + if (found < EXTIOI_IRQS) { + /* other irq is handling, need not update parent irq level */ + return; + } + } + qemu_set_irq(s->parent_irq[cpu][ipnum], level); +} + +static void extioi_setirq(void *opaque, int irq, int level) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + trace_loongarch_extioi_setirq(irq, level); + if (level) { + /* + * s->isr should be used in vmstate structure, + * but it not support 'unsigned long', + * so we have to switch it. + */ + set_bit(irq, (unsigned long *)s->isr); + } else { + clear_bit(irq, (unsigned long *)s->isr); + } + extioi_update_irq(s, irq, level); +} + +static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + unsigned long offset = addr & 0xffff; + uint32_t index, cpu; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + *data = s->nodetype[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1: + index = (offset - EXTIOI_IPMAP_START) >> 2; + *data = s->ipmap[index]; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: + index = (offset - EXTIOI_ENABLE_START) >> 2; + *data = s->enable[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1: + index = (offset - EXTIOI_BOUNCE_START) >> 2; + *data = s->bounce[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1: + index = (offset - EXTIOI_COREISR_START) >> 2; + /* using attrs to get current cpu index */ + cpu = attrs.requester_id; + *data = s->coreisr[cpu][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: + index = (offset - EXTIOI_COREMAP_START) >> 2; + *data = s->coremap[index]; + break; + default: + break; + } + + trace_loongarch_extioi_readw(addr, *data); + return MEMTX_OK; +} + +static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ + uint32_t mask, int level) +{ + uint32_t val; + int irq; + + val = mask & s->isr[index]; + irq = ctz32(val); + while (irq != 32) { + /* + * enable bit change from 0 to 1, + * need to update irq by pending bits + */ + extioi_update_irq(s, irq + index * 32, level); + val &= ~(1 << irq); + irq = ctz32(val); + } +} + +static MemTxResult extioi_writew(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + int i, cpu, index, old_data, irq; + uint32_t offset; + + trace_loongarch_extioi_writew(addr, val); + offset = addr & 0xffff; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + s->nodetype[index] = val; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START) >> 2; + s->ipmap[index] = val; + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + for (i = 0; i < 4; i++) { + uint8_t ipnum; + ipnum = val & 0xff; + ipnum = ctz32(ipnum); + ipnum = (ipnum >= 4) ? 0 : ipnum; + s->sw_ipmap[index * 4 + i] = ipnum; + val = val >> 8; + } + + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: + index = (offset - EXTIOI_ENABLE_START) >> 2; + old_data = s->enable[index]; + s->enable[index] = val; + + /* unmask irq */ + val = s->enable[index] & ~old_data; + extioi_enable_irq(s, index, val, 1); + + /* mask irq */ + val = ~s->enable[index] & old_data; + extioi_enable_irq(s, index, val, 0); + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1: + /* do not emulate hw bounced irq routing */ + index = (offset - EXTIOI_BOUNCE_START) >> 2; + s->bounce[index] = val; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1: + index = (offset - EXTIOI_COREISR_START) >> 2; + /* using attrs to get current cpu index */ + cpu = attrs.requester_id; + old_data = s->coreisr[cpu][index]; + s->coreisr[cpu][index] = old_data & ~val; + /* write 1 to clear interrrupt */ + old_data &= val; + irq = ctz32(old_data); + while (irq != 32) { + extioi_update_irq(s, irq + index * 32, 0); + old_data &= ~(1 << irq); + irq = ctz32(old_data); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: + irq = offset - EXTIOI_COREMAP_START; + index = irq / 4; + s->coremap[index] = val; + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + + for (i = 0; i < 4; i++) { + cpu = val & 0xff; + cpu = ctz32(cpu); + cpu = (cpu >= 4) ? 0 : cpu; + val = val >> 8; + + if (s->sw_coremap[irq + i] == cpu) { + continue; + } + + if (test_bit(irq, (unsigned long *)s->isr)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } + break; + default: + break; + } + return MEMTX_OK; +} + +static const MemoryRegionOps extioi_ops = { + .read_with_attrs = extioi_readw, + .write_with_attrs = extioi_writew, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 4, + .valid.max_access_size = 8, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_loongarch_extioi = { + .name = TYPE_LOONGARCH_EXTIOI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, LOONGARCH_MAX_VCPUS, + EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, + EXTIOI_IRQS_NODETYPE_COUNT / 2), + VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32), + VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32), + VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4), + VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4), + VMSTATE_UINT8_ARRAY(sw_ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE), + VMSTATE_UINT8_ARRAY(sw_coremap, LoongArchExtIOI, EXTIOI_IRQS), + + VMSTATE_END_OF_LIST() + } +}; + +static void loongarch_extioi_instance_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); + int i, cpu, pin; + + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } + + qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); + + for (cpu = 0; cpu < LOONGARCH_MAX_VCPUS; cpu++) { + memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, + s, "extioi_iocsr", 0x900); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_iocsr_mem[cpu]); + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_init_gpio_out(DEVICE(obj), &s->parent_irq[cpu][pin], 1); + } + } + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_system_mem); +} + +static void loongarch_extioi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_loongarch_extioi; +} + +static const TypeInfo loongarch_extioi_info = { + .name = TYPE_LOONGARCH_EXTIOI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = loongarch_extioi_instance_init, + .instance_size = sizeof(struct LoongArchExtIOI), + .class_init = loongarch_extioi_class_init, +}; + +static void loongarch_extioi_register_types(void) +{ + type_register_static(&loongarch_extioi_info); +} + +type_init(loongarch_extioi_register_types) diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c new file mode 100644 index 0000000000..aa4bf9eb74 --- /dev/null +++ b/hw/intc/loongarch_ipi.c @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch ipi interrupt support + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/intc/loongarch_ipi.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "exec/address-spaces.h" +#include "hw/loongarch/virt.h" +#include "migration/vmstate.h" +#include "target/loongarch/internals.h" +#include "trace.h" + +static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) +{ + IPICore *s = opaque; + uint64_t ret = 0; + int index = 0; + + addr &= 0xff; + switch (addr) { + case CORE_STATUS_OFF: + ret = s->status; + break; + case CORE_EN_OFF: + ret = s->en; + break; + case CORE_SET_OFF: + ret = 0; + break; + case CORE_CLEAR_OFF: + ret = 0; + break; + case CORE_BUF_20 ... CORE_BUF_38 + 4: + index = (addr - CORE_BUF_20) >> 2; + ret = s->buf[index]; + break; + default: + qemu_log_mask(LOG_UNIMP, "invalid read: %x", (uint32_t)addr); + break; + } + + trace_loongarch_ipi_read(size, (uint64_t)addr, ret); + return ret; +} + +static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong addr) +{ + int i, mask = 0, data = 0; + + /* + * bit 27-30 is mask for byte writing, + * if the mask is 0, we need not to do anything. + */ + if ((val >> 27) & 0xf) { + data = address_space_ldl(&env->address_space_iocsr, addr, + MEMTXATTRS_UNSPECIFIED, NULL); + for (i = 0; i < 4; i++) { + /* get mask for byte writing */ + if (val & (0x1 << (27 + i))) { + mask |= 0xff << (i * 8); + } + } + } + + data &= mask; + data |= (val >> 32) & ~mask; + address_space_stl(&env->address_space_iocsr, addr, + data, MEMTXATTRS_UNSPECIFIED, NULL); +} + +static void ipi_send(uint64_t val) +{ + int cpuid, data; + CPULoongArchState *env; + CPUState *cs; + LoongArchCPU *cpu; + + cpuid = (val >> 16) & 0x3ff; + /* IPI status vector */ + data = 1 << (val & 0x1f); + cs = qemu_get_cpu(cpuid); + cpu = LOONGARCH_CPU(cs); + env = &cpu->env; + address_space_stl(&env->address_space_iocsr, 0x1008, + data, MEMTXATTRS_UNSPECIFIED, NULL); + +} + +static void mail_send(uint64_t val) +{ + int cpuid; + hwaddr addr; + CPULoongArchState *env; + CPUState *cs; + LoongArchCPU *cpu; + + cpuid = (val >> 16) & 0x3ff; + addr = 0x1020 + (val & 0x1c); + cs = qemu_get_cpu(cpuid); + cpu = LOONGARCH_CPU(cs); + env = &cpu->env; + send_ipi_data(env, val, addr); +} + +static void any_send(uint64_t val) +{ + int cpuid; + hwaddr addr; + CPULoongArchState *env; + + cpuid = (val >> 16) & 0x3ff; + addr = val & 0xffff; + CPUState *cs = qemu_get_cpu(cpuid); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + env = &cpu->env; + send_ipi_data(env, val, addr); +} + +static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + IPICore *s = opaque; + int index = 0; + + addr &= 0xff; + trace_loongarch_ipi_write(size, (uint64_t)addr, val); + switch (addr) { + case CORE_STATUS_OFF: + qemu_log_mask(LOG_GUEST_ERROR, "can not be written"); + break; + case CORE_EN_OFF: + s->en = val; + break; + case CORE_SET_OFF: + s->status |= val; + if (s->status != 0 && (s->status & s->en) != 0) { + qemu_irq_raise(s->irq); + } + break; + case CORE_CLEAR_OFF: + s->status &= ~val; + if (s->status == 0 && s->en != 0) { + qemu_irq_lower(s->irq); + } + break; + case CORE_BUF_20 ... CORE_BUF_38 + 4: + index = (addr - CORE_BUF_20) >> 2; + s->buf[index] = val; + break; + case IOCSR_IPI_SEND: + ipi_send(val); + break; + default: + qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); + break; + } +} + +static const MemoryRegionOps loongarch_ipi_ops = { + .read = loongarch_ipi_readl, + .write = loongarch_ipi_writel, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 4, + .valid.max_access_size = 8, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* mail send and any send only support writeq */ +static void loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + addr &= 0xfff; + switch (addr) { + case MAIL_SEND_OFFSET: + mail_send(val); + break; + case ANY_SEND_OFFSET: + any_send(val); + break; + default: + break; + } +} + +static const MemoryRegionOps loongarch_ipi64_ops = { + .write = loongarch_ipi_writeq, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void loongarch_ipi_init(Object *obj) +{ + int cpu; + LoongArchMachineState *lams; + LoongArchIPI *s = LOONGARCH_IPI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + Object *machine = qdev_get_machine(); + ObjectClass *mc = object_get_class(machine); + /* 'lams' should be initialized */ + if (!strcmp(MACHINE_CLASS(mc)->name, "none")) { + return; + } + lams = LOONGARCH_MACHINE(machine); + for (cpu = 0; cpu < MAX_IPI_CORE_NUM; cpu++) { + memory_region_init_io(&s->ipi_iocsr_mem[cpu], obj, &loongarch_ipi_ops, + &lams->ipi_core[cpu], "loongarch_ipi_iocsr", 0x48); + sysbus_init_mmio(sbd, &s->ipi_iocsr_mem[cpu]); + + memory_region_init_io(&s->ipi64_iocsr_mem[cpu], obj, &loongarch_ipi64_ops, + &lams->ipi_core[cpu], "loongarch_ipi64_iocsr", 0x118); + sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem[cpu]); + qdev_init_gpio_out(DEVICE(obj), &lams->ipi_core[cpu].irq, 1); + } +} + +static const VMStateDescription vmstate_ipi_core = { + .name = "ipi-single", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(status, IPICore), + VMSTATE_UINT32(en, IPICore), + VMSTATE_UINT32(set, IPICore), + VMSTATE_UINT32(clear, IPICore), + VMSTATE_UINT32_ARRAY(buf, IPICore, MAX_IPI_MBX_NUM * 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongarch_ipi = { + .name = TYPE_LOONGARCH_IPI, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(ipi_core, LoongArchMachineState, + MAX_IPI_CORE_NUM, 0, + vmstate_ipi_core, IPICore), + VMSTATE_END_OF_LIST() + } +}; + +static void loongarch_ipi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_loongarch_ipi; +} + +static const TypeInfo loongarch_ipi_info = { + .name = TYPE_LOONGARCH_IPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchIPI), + .instance_init = loongarch_ipi_init, + .class_init = loongarch_ipi_class_init, +}; + +static void loongarch_ipi_register_types(void) +{ + type_register_static(&loongarch_ipi_info); +} + +type_init(loongarch_ipi_register_types) diff --git a/hw/intc/loongarch_pch_msi.c b/hw/intc/loongarch_pch_msi.c new file mode 100644 index 0000000000..b36d6d76e4 --- /dev/null +++ b/hw/intc/loongarch_pch_msi.c @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU Loongson 7A1000 msi interrupt controller. + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/intc/loongarch_pch_msi.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "hw/pci/msi.h" +#include "hw/misc/unimp.h" +#include "migration/vmstate.h" +#include "trace.h" + +static uint64_t loongarch_msi_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void loongarch_msi_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + LoongArchPCHMSI *s = (LoongArchPCHMSI *)opaque; + int irq_num; + + /* + * vector number is irq number from upper extioi intc + * need subtract irq base to get msi vector offset + */ + irq_num = (val & 0xff) - s->irq_base; + trace_loongarch_msi_set_irq(irq_num); + assert(irq_num < PCH_MSI_IRQ_NUM); + qemu_set_irq(s->pch_msi_irq[irq_num], 1); +} + +static const MemoryRegionOps loongarch_pch_msi_ops = { + .read = loongarch_msi_mem_read, + .write = loongarch_msi_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void pch_msi_irq_handler(void *opaque, int irq, int level) +{ + LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(opaque); + + qemu_set_irq(s->pch_msi_irq[irq], level); +} + +static void loongarch_pch_msi_init(Object *obj) +{ + LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->msi_mmio, obj, &loongarch_pch_msi_ops, + s, TYPE_LOONGARCH_PCH_MSI, 0x8); + sysbus_init_mmio(sbd, &s->msi_mmio); + msi_nonbroken = true; + + qdev_init_gpio_out(DEVICE(obj), s->pch_msi_irq, PCH_MSI_IRQ_NUM); + qdev_init_gpio_in(DEVICE(obj), pch_msi_irq_handler, PCH_MSI_IRQ_NUM); +} + +static Property loongarch_msi_properties[] = { + DEFINE_PROP_UINT32("msi_irq_base", LoongArchPCHMSI, irq_base, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void loongarch_pch_msi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, loongarch_msi_properties); +} + +static const TypeInfo loongarch_pch_msi_info = { + .name = TYPE_LOONGARCH_PCH_MSI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchPCHMSI), + .instance_init = loongarch_pch_msi_init, + .class_init = loongarch_pch_msi_class_init, +}; + +static void loongarch_pch_msi_register_types(void) +{ + type_register_static(&loongarch_pch_msi_info); +} + +type_init(loongarch_pch_msi_register_types) diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c new file mode 100644 index 0000000000..3380b09807 --- /dev/null +++ b/hw/intc/loongarch_pch_pic.c @@ -0,0 +1,431 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU Loongson 7A1000 I/O interrupt controller. + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/loongarch/virt.h" +#include "hw/irq.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "migration/vmstate.h" +#include "trace.h" + +static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level) +{ + uint64_t val; + int irq; + + if (level) { + val = mask & s->intirr & ~s->int_mask; + if (val) { + irq = ctz64(val); + s->intisr |= MAKE_64BIT_MASK(irq, 1); + qemu_set_irq(s->parent_irq[s->htmsi_vector[irq]], 1); + } + } else { + val = mask & s->intisr; + if (val) { + irq = ctz64(val); + s->intisr &= ~MAKE_64BIT_MASK(irq, 1); + qemu_set_irq(s->parent_irq[s->htmsi_vector[irq]], 0); + } + } +} + +static void pch_pic_irq_handler(void *opaque, int irq, int level) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint64_t mask = 1ULL << irq; + + assert(irq < PCH_PIC_IRQ_NUM); + trace_loongarch_pch_pic_irq_handler(irq, level); + + if (s->intedge & mask) { + /* Edge triggered */ + if (level) { + if ((s->last_intirr & mask) == 0) { + s->intirr |= mask; + } + s->last_intirr |= mask; + } else { + s->last_intirr &= ~mask; + } + } else { + /* Level triggered */ + if (level) { + s->intirr |= mask; + s->last_intirr |= mask; + } else { + s->intirr &= ~mask; + s->last_intirr &= ~mask; + } + } + pch_pic_update_irq(s, mask, level); +} + +static uint64_t loongarch_pch_pic_low_readw(void *opaque, hwaddr addr, + unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint64_t val = 0; + uint32_t offset = addr & 0xfff; + + switch (offset) { + case PCH_PIC_INT_ID_LO: + val = PCH_PIC_INT_ID_VAL; + break; + case PCH_PIC_INT_ID_HI: + val = PCH_PIC_INT_ID_NUM; + break; + case PCH_PIC_INT_MASK_LO: + val = (uint32_t)s->int_mask; + break; + case PCH_PIC_INT_MASK_HI: + val = s->int_mask >> 32; + break; + case PCH_PIC_INT_EDGE_LO: + val = (uint32_t)s->intedge; + break; + case PCH_PIC_INT_EDGE_HI: + val = s->intedge >> 32; + break; + case PCH_PIC_HTMSI_EN_LO: + val = (uint32_t)s->htmsi_en; + break; + case PCH_PIC_HTMSI_EN_HI: + val = s->htmsi_en >> 32; + break; + case PCH_PIC_AUTO_CTRL0_LO: + case PCH_PIC_AUTO_CTRL0_HI: + case PCH_PIC_AUTO_CTRL1_LO: + case PCH_PIC_AUTO_CTRL1_HI: + break; + default: + break; + } + + trace_loongarch_pch_pic_low_readw(size, addr, val); + return val; +} + +static uint64_t get_writew_val(uint64_t value, uint32_t target, bool hi) +{ + uint64_t mask = 0xffffffff00000000; + uint64_t data = target; + + return hi ? (value & ~mask) | (data << 32) : (value & mask) | data; +} + +static void loongarch_pch_pic_low_writew(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint32_t offset, old_valid, data = (uint32_t)value; + uint64_t old, int_mask; + offset = addr & 0xfff; + + trace_loongarch_pch_pic_low_writew(size, addr, data); + + switch (offset) { + case PCH_PIC_INT_MASK_LO: + old = s->int_mask; + s->int_mask = get_writew_val(old, data, 0); + old_valid = (uint32_t)old; + if (old_valid & ~data) { + pch_pic_update_irq(s, (old_valid & ~data), 1); + } + if (~old_valid & data) { + pch_pic_update_irq(s, (~old_valid & data), 0); + } + break; + case PCH_PIC_INT_MASK_HI: + old = s->int_mask; + s->int_mask = get_writew_val(old, data, 1); + old_valid = (uint32_t)(old >> 32); + int_mask = old_valid & ~data; + if (int_mask) { + pch_pic_update_irq(s, int_mask << 32, 1); + } + int_mask = ~old_valid & data; + if (int_mask) { + pch_pic_update_irq(s, int_mask << 32, 0); + } + break; + case PCH_PIC_INT_EDGE_LO: + s->intedge = get_writew_val(s->intedge, data, 0); + break; + case PCH_PIC_INT_EDGE_HI: + s->intedge = get_writew_val(s->intedge, data, 1); + break; + case PCH_PIC_INT_CLEAR_LO: + if (s->intedge & data) { + s->intirr &= (~data); + pch_pic_update_irq(s, data, 0); + s->intisr &= (~data); + } + break; + case PCH_PIC_INT_CLEAR_HI: + value <<= 32; + if (s->intedge & value) { + s->intirr &= (~value); + pch_pic_update_irq(s, value, 0); + s->intisr &= (~value); + } + break; + case PCH_PIC_HTMSI_EN_LO: + s->htmsi_en = get_writew_val(s->htmsi_en, data, 0); + break; + case PCH_PIC_HTMSI_EN_HI: + s->htmsi_en = get_writew_val(s->htmsi_en, data, 1); + break; + case PCH_PIC_AUTO_CTRL0_LO: + case PCH_PIC_AUTO_CTRL0_HI: + case PCH_PIC_AUTO_CTRL1_LO: + case PCH_PIC_AUTO_CTRL1_HI: + break; + default: + break; + } +} + +static uint64_t loongarch_pch_pic_high_readw(void *opaque, hwaddr addr, + unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint64_t val = 0; + uint32_t offset = addr & 0xfff; + + switch (offset) { + case STATUS_LO_START: + val = (uint32_t)(s->intisr & (~s->int_mask)); + break; + case STATUS_HI_START: + val = (s->intisr & (~s->int_mask)) >> 32; + break; + case POL_LO_START: + val = (uint32_t)s->int_polarity; + break; + case POL_HI_START: + val = s->int_polarity >> 32; + break; + default: + break; + } + + trace_loongarch_pch_pic_high_readw(size, addr, val); + return val; +} + +static void loongarch_pch_pic_high_writew(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint32_t offset, data = (uint32_t)value; + offset = addr & 0xfff; + + trace_loongarch_pch_pic_high_writew(size, addr, data); + + switch (offset) { + case STATUS_LO_START: + s->intisr = get_writew_val(s->intisr, data, 0); + break; + case STATUS_HI_START: + s->intisr = get_writew_val(s->intisr, data, 1); + break; + case POL_LO_START: + s->int_polarity = get_writew_val(s->int_polarity, data, 0); + break; + case POL_HI_START: + s->int_polarity = get_writew_val(s->int_polarity, data, 1); + break; + default: + break; + } +} + +static uint64_t loongarch_pch_pic_readb(void *opaque, hwaddr addr, + unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + uint64_t val = 0; + uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; + int64_t offset_tmp; + + switch (offset) { + case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: + offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + val = s->htmsi_vector[offset_tmp]; + } + break; + case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: + offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + val = s->route_entry[offset_tmp]; + } + break; + default: + break; + } + + trace_loongarch_pch_pic_readb(size, addr, val); + return val; +} + +static void loongarch_pch_pic_writeb(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + int32_t offset_tmp; + uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; + + trace_loongarch_pch_pic_writeb(size, addr, data); + + switch (offset) { + case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: + offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + s->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff); + } + break; + case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: + offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + s->route_entry[offset_tmp] = (uint8_t)(data & 0xff); + } + break; + default: + break; + } +} + +static const MemoryRegionOps loongarch_pch_pic_reg32_low_ops = { + .read = loongarch_pch_pic_low_readw, + .write = loongarch_pch_pic_low_writew, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps loongarch_pch_pic_reg32_high_ops = { + .read = loongarch_pch_pic_high_readw, + .write = loongarch_pch_pic_high_writew, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps loongarch_pch_pic_reg8_ops = { + .read = loongarch_pch_pic_readb, + .write = loongarch_pch_pic_writeb, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void loongarch_pch_pic_reset(DeviceState *d) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(d); + int i; + + s->int_mask = -1; + s->htmsi_en = 0x0; + s->intedge = 0x0; + s->intclr = 0x0; + s->auto_crtl0 = 0x0; + s->auto_crtl1 = 0x0; + for (i = 0; i < 64; i++) { + s->route_entry[i] = 0x1; + s->htmsi_vector[i] = 0x0; + } + s->intirr = 0x0; + s->intisr = 0x0; + s->last_intirr = 0x0; + s->int_polarity = 0x0; +} + +static void loongarch_pch_pic_init(Object *obj) +{ + LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem32_low, obj, + &loongarch_pch_pic_reg32_low_ops, + s, PCH_PIC_NAME(.reg32_part1), 0x100); + memory_region_init_io(&s->iomem8, obj, &loongarch_pch_pic_reg8_ops, + s, PCH_PIC_NAME(.reg8), 0x2a0); + memory_region_init_io(&s->iomem32_high, obj, + &loongarch_pch_pic_reg32_high_ops, + s, PCH_PIC_NAME(.reg32_part2), 0xc60); + sysbus_init_mmio(sbd, &s->iomem32_low); + sysbus_init_mmio(sbd, &s->iomem8); + sysbus_init_mmio(sbd, &s->iomem32_high); + + qdev_init_gpio_out(DEVICE(obj), s->parent_irq, PCH_PIC_IRQ_NUM); + qdev_init_gpio_in(DEVICE(obj), pch_pic_irq_handler, PCH_PIC_IRQ_NUM); +} + +static const VMStateDescription vmstate_loongarch_pch_pic = { + .name = TYPE_LOONGARCH_PCH_PIC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(int_mask, LoongArchPCHPIC), + VMSTATE_UINT64(htmsi_en, LoongArchPCHPIC), + VMSTATE_UINT64(intedge, LoongArchPCHPIC), + VMSTATE_UINT64(intclr, LoongArchPCHPIC), + VMSTATE_UINT64(auto_crtl0, LoongArchPCHPIC), + VMSTATE_UINT64(auto_crtl1, LoongArchPCHPIC), + VMSTATE_UINT8_ARRAY(route_entry, LoongArchPCHPIC, 64), + VMSTATE_UINT8_ARRAY(htmsi_vector, LoongArchPCHPIC, 64), + VMSTATE_UINT64(last_intirr, LoongArchPCHPIC), + VMSTATE_UINT64(intirr, LoongArchPCHPIC), + VMSTATE_UINT64(intisr, LoongArchPCHPIC), + VMSTATE_UINT64(int_polarity, LoongArchPCHPIC), + VMSTATE_END_OF_LIST() + } +}; + +static void loongarch_pch_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = loongarch_pch_pic_reset; + dc->vmsd = &vmstate_loongarch_pch_pic; +} + +static const TypeInfo loongarch_pch_pic_info = { + .name = TYPE_LOONGARCH_PCH_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchPCHPIC), + .instance_init = loongarch_pch_pic_init, + .class_init = loongarch_pch_pic_class_init, +}; + +static void loongarch_pch_pic_register_types(void) +{ + type_register_static(&loongarch_pch_pic_info); +} + +type_init(loongarch_pch_pic_register_types) diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index 2133d2a698..0c515e4ecb 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/hw/intc/meson.build b/hw/intc/meson.build index 6e52a166e3..bcbf22ff51 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -3,10 +3,13 @@ softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files( 'arm_gic.c', 'arm_gic_common.c', 'arm_gicv2m.c', - 'arm_gicv3.c', 'arm_gicv3_common.c', - 'arm_gicv3_dist.c', 'arm_gicv3_its_common.c', +)) +softmmu_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files( + 'arm_gicv3.c', + 'arm_gicv3_dist.c', + 'arm_gicv3_its.c', 'arm_gicv3_redist.c', )) softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c')) @@ -24,14 +27,14 @@ softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-pmu-iomod-in specific_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c')) specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c')) -specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif.c')) +specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif_common.c')) +specific_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files('arm_gicv3_cpuif.c')) specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c')) specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c')) specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) specific_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) specific_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) specific_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_irqmp.c')) -specific_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_plic.c')) specific_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) specific_ss.add(when: 'CONFIG_LOONGSON_LIOINTC', if_true: files('loongson_liointc.c')) specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gic.c')) @@ -39,16 +42,18 @@ specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c')) specific_ss.add(when: 'CONFIG_OMPIC', if_true: files('ompic.c')) specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_OPENPIC'], if_true: files('openpic_kvm.c')) -specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c')) +specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c', 'pnv_xive2.c')) specific_ss.add(when: 'CONFIG_PPC_UIC', if_true: files('ppc-uic.c')) specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c')) specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c')) specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c')) specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c')) specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c')) -specific_ss.add(when: 'CONFIG_SIFIVE_CLINT', if_true: files('sifive_clint.c')) +specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c')) +specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c')) +specific_ss.add(when: 'CONFIG_RISCV_IMSIC', if_true: files('riscv_imsic.c')) specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c')) -specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c')) +specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c')) specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'], if_true: files('xics_kvm.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('xics_spapr.c', 'spapr_xive.c')) @@ -57,3 +62,8 @@ specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'], if_true: files('spapr_xive_kvm.c')) specific_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c')) +specific_ss.add(when: 'CONFIG_NIOS2_VIC', if_true: files('nios2_vic.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_IPI', if_true: files('loongarch_ipi.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_MSI', if_true: files('loongarch_pch_msi.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c')) diff --git a/hw/intc/nios2_vic.c b/hw/intc/nios2_vic.c new file mode 100644 index 0000000000..cf63212a88 --- /dev/null +++ b/hw/intc/nios2_vic.c @@ -0,0 +1,313 @@ +/* + * Vectored Interrupt Controller for nios2 processor + * + * Copyright (c) 2022 Neuroblade + * + * Interface: + * QOM property "cpu": link to the Nios2 CPU (must be set) + * Unnamed GPIO inputs 0..NIOS2_VIC_MAX_IRQ-1: input IRQ lines + * IRQ should be connected to nios2 IRQ0. + * + * Reference: "Embedded Peripherals IP User Guide + * for Intel® Quartus® Prime Design Suite: 21.4" + * Chapter 38 "Vectored Interrupt Controller Core" + * See: https://www.intel.com/content/www/us/en/docs/programmable/683130/21-4/vectored-interrupt-controller-core.html + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qom/object.h" +#include "hw/intc/nios2_vic.h" +#include "cpu.h" + + +enum { + INT_CONFIG0 = 0, + INT_CONFIG31 = 31, + INT_ENABLE = 32, + INT_ENABLE_SET = 33, + INT_ENABLE_CLR = 34, + INT_PENDING = 35, + INT_RAW_STATUS = 36, + SW_INTERRUPT = 37, + SW_INTERRUPT_SET = 38, + SW_INTERRUPT_CLR = 39, + VIC_CONFIG = 40, + VIC_STATUS = 41, + VEC_TBL_BASE = 42, + VEC_TBL_ADDR = 43, + CSR_COUNT /* Last! */ +}; + +/* Requested interrupt level (INT_CONFIG[0:5]) */ +static inline uint32_t vic_int_config_ril(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 0, 6); +} + +/* Requested NMI (INT_CONFIG[6]) */ +static inline uint32_t vic_int_config_rnmi(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 6, 1); +} + +/* Requested register set (INT_CONFIG[7:12]) */ +static inline uint32_t vic_int_config_rrs(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 7, 6); +} + +static inline uint32_t vic_config_vec_size(const Nios2VIC *vic) +{ + return 1 << (2 + extract32(vic->vic_config, 0, 3)); +} + +static inline uint32_t vic_int_pending(const Nios2VIC *vic) +{ + return (vic->int_raw_status | vic->sw_int) & vic->int_enable; +} + +static void vic_update_irq(Nios2VIC *vic) +{ + Nios2CPU *cpu = NIOS2_CPU(vic->cpu); + uint32_t pending = vic_int_pending(vic); + int irq = -1; + int max_ril = 0; + /* Note that if RIL is 0 for an interrupt it is effectively disabled */ + + vic->vec_tbl_addr = 0; + vic->vic_status = 0; + + if (pending == 0) { + qemu_irq_lower(vic->output_int); + return; + } + + for (int i = 0; i < NIOS2_VIC_MAX_IRQ; i++) { + if (pending & BIT(i)) { + int ril = vic_int_config_ril(vic, i); + if (ril > max_ril) { + irq = i; + max_ril = ril; + } + } + } + + if (irq < 0) { + qemu_irq_lower(vic->output_int); + return; + } + + vic->vec_tbl_addr = irq * vic_config_vec_size(vic) + vic->vec_tbl_base; + vic->vic_status = irq | BIT(31); + + /* + * In hardware, the interface between the VIC and the CPU is via the + * External Interrupt Controller interface, where the interrupt controller + * presents the CPU with a packet of data containing: + * - Requested Handler Address (RHA): 32 bits + * - Requested Register Set (RRS) : 6 bits + * - Requested Interrupt Level (RIL) : 6 bits + * - Requested NMI flag (RNMI) : 1 bit + * In our emulation, we implement this by writing the data directly to + * fields in the CPU object and then raising the IRQ line to tell + * the CPU that we've done so. + */ + + cpu->rha = vic->vec_tbl_addr; + cpu->ril = max_ril; + cpu->rrs = vic_int_config_rrs(vic, irq); + cpu->rnmi = vic_int_config_rnmi(vic, irq); + + qemu_irq_raise(vic->output_int); +} + +static void vic_set_irq(void *opaque, int irq_num, int level) +{ + Nios2VIC *vic = opaque; + + vic->int_raw_status = deposit32(vic->int_raw_status, irq_num, 1, !!level); + vic_update_irq(vic); +} + +static void nios2_vic_reset(DeviceState *dev) +{ + Nios2VIC *vic = NIOS2_VIC(dev); + + memset(&vic->int_config, 0, sizeof(vic->int_config)); + vic->vic_config = 0; + vic->int_raw_status = 0; + vic->int_enable = 0; + vic->sw_int = 0; + vic->vic_status = 0; + vic->vec_tbl_base = 0; + vic->vec_tbl_addr = 0; +} + +static uint64_t nios2_vic_csr_read(void *opaque, hwaddr offset, unsigned size) +{ + Nios2VIC *vic = opaque; + int index = offset / 4; + + switch (index) { + case INT_CONFIG0 ... INT_CONFIG31: + return vic->int_config[index - INT_CONFIG0]; + case INT_ENABLE: + return vic->int_enable; + case INT_PENDING: + return vic_int_pending(vic); + case INT_RAW_STATUS: + return vic->int_raw_status; + case SW_INTERRUPT: + return vic->sw_int; + case VIC_CONFIG: + return vic->vic_config; + case VIC_STATUS: + return vic->vic_status; + case VEC_TBL_BASE: + return vic->vec_tbl_base; + case VEC_TBL_ADDR: + return vic->vec_tbl_addr; + default: + return 0; + } +} + +static void nios2_vic_csr_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + Nios2VIC *vic = opaque; + int index = offset / 4; + + switch (index) { + case INT_CONFIG0 ... INT_CONFIG31: + vic->int_config[index - INT_CONFIG0] = value; + break; + case INT_ENABLE: + vic->int_enable = value; + break; + case INT_ENABLE_SET: + vic->int_enable |= value; + break; + case INT_ENABLE_CLR: + vic->int_enable &= ~value; + break; + case SW_INTERRUPT: + vic->sw_int = value; + break; + case SW_INTERRUPT_SET: + vic->sw_int |= value; + break; + case SW_INTERRUPT_CLR: + vic->sw_int &= ~value; + break; + case VIC_CONFIG: + vic->vic_config = value; + break; + case VEC_TBL_BASE: + vic->vec_tbl_base = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "nios2-vic: write to invalid CSR address %#" + HWADDR_PRIx "\n", offset); + } + + vic_update_irq(vic); +} + +static const MemoryRegionOps nios2_vic_csr_ops = { + .read = nios2_vic_csr_read, + .write = nios2_vic_csr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { .min_access_size = 4, .max_access_size = 4 } +}; + +static void nios2_vic_realize(DeviceState *dev, Error **errp) +{ + Nios2VIC *vic = NIOS2_VIC(dev); + + if (!vic->cpu) { + /* This is a programming error in the code using this device */ + error_setg(errp, "nios2-vic 'cpu' link property was not set"); + return; + } + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &vic->output_int); + qdev_init_gpio_in(dev, vic_set_irq, NIOS2_VIC_MAX_IRQ); + + memory_region_init_io(&vic->csr, OBJECT(dev), &nios2_vic_csr_ops, vic, + "nios2.vic.csr", CSR_COUNT * sizeof(uint32_t)); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &vic->csr); +} + +static Property nios2_vic_properties[] = { + DEFINE_PROP_LINK("cpu", Nios2VIC, cpu, TYPE_CPU, CPUState *), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription nios2_vic_vmstate = { + .name = "nios2-vic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ + VMSTATE_UINT32_ARRAY(int_config, Nios2VIC, 32), + VMSTATE_UINT32(vic_config, Nios2VIC), + VMSTATE_UINT32(int_raw_status, Nios2VIC), + VMSTATE_UINT32(int_enable, Nios2VIC), + VMSTATE_UINT32(sw_int, Nios2VIC), + VMSTATE_UINT32(vic_status, Nios2VIC), + VMSTATE_UINT32(vec_tbl_base, Nios2VIC), + VMSTATE_UINT32(vec_tbl_addr, Nios2VIC), + VMSTATE_END_OF_LIST() + }, +}; + +static void nios2_vic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = nios2_vic_reset; + dc->realize = nios2_vic_realize; + dc->vmsd = &nios2_vic_vmstate; + device_class_set_props(dc, nios2_vic_properties); +} + +static const TypeInfo nios2_vic_info = { + .name = TYPE_NIOS2_VIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Nios2VIC), + .class_init = nios2_vic_class_init, +}; + +static void nios2_vic_register_types(void) +{ + type_register_static(&nios2_vic_info); +} + +type_init(nios2_vic_register_types); diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index 9b4c17854d..c757adbe53 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -25,18 +25,13 @@ /* * * Based on OpenPic implementations: - * - Intel GW80314 I/O companion chip developer's manual * - Motorola MPC8245 & MPC8540 user manuals. - * - Motorola MCP750 (aka Raven) programmer manual. - * - Motorola Harrier programmer manuel - * - * Serial interrupts, as implemented in Raven chipset are not supported yet. + * - Motorola Harrier programmer manual * */ #include "qemu/osdep.h" #include "hw/irq.h" -#include "hw/ppc/mac.h" #include "hw/pci/pci.h" #include "hw/ppc/openpic.h" #include "hw/ppc/ppc_e500.h" @@ -51,7 +46,7 @@ #include "qemu/timer.h" #include "qemu/error-report.h" -//#define DEBUG_OPENPIC +/* #define DEBUG_OPENPIC */ #ifdef DEBUG_OPENPIC static const int debug_openpic = 1; @@ -122,7 +117,8 @@ static FslMpicInfo fsl_mpic_42 = { #define ILR_INTTGT_CINT 0x01 /* critical */ #define ILR_INTTGT_MCP 0x02 /* machine check */ -/* The currently supported INTTGT values happen to be the same as QEMU's +/* + * The currently supported INTTGT values happen to be the same as QEMU's * openpic output codes, but don't depend on this. The output codes * could change (unlikely, but...) or support could be added for * more INTTGT values. @@ -181,10 +177,11 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr, uint32_t val, int idx); static void openpic_reset(DeviceState *d); -/* Convert between openpic clock ticks and nanosecs. In the hardware the clock - frequency is driven by board inputs to the PIC which the PIC would then - divide by 4 or 8. For now hard code to 25MZ. -*/ +/* + * Convert between openpic clock ticks and nanosecs. In the hardware the clock + * frequency is driven by board inputs to the PIC which the PIC would then + * divide by 4 or 8. For now hard code to 25MZ. + */ #define OPENPIC_TIMER_FREQ_MHZ 25 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) static inline uint64_t ns_to_ticks(uint64_t ns) @@ -257,7 +254,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, __func__, src->output, n_IRQ, active, was_active, dst->outputs_active[src->output]); - /* On Freescale MPIC, critical interrupts ignore priority, + /* + * On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking. */ @@ -280,7 +278,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, priority = IVPR_PRIORITY(src->ivpr); - /* Even if the interrupt doesn't have enough priority, + /* + * Even if the interrupt doesn't have enough priority, * it is still raised, in case ctpr is lowered later. */ if (active) { @@ -412,7 +411,8 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level) } if (src->output != OPENPIC_OUTPUT_INT) { - /* Edge-triggered interrupts shouldn't be used + /* + * Edge-triggered interrupts shouldn't be used * with non-INT delivery, but just in case, * try to make it do something sane rather than * cause an interrupt storm. This is close to @@ -505,7 +505,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) { uint32_t mask; - /* NOTE when implementing newer FSL MPIC models: starting with v4.0, + /* + * NOTE when implementing newer FSL MPIC models: starting with v4.0, * the polarity bit is read-only on internal interrupts. */ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | @@ -515,7 +516,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) opp->src[n_IRQ].ivpr = (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); - /* For FSL internal interrupts, The sense bit is reserved and zero, + /* + * For FSL internal interrupts, The sense bit is reserved and zero, * and the interrupt is always level-triggered. Timers and IPIs * have no sense or polarity bits, and are edge-triggered. */ @@ -699,16 +701,20 @@ static void qemu_timer_cb(void *opaque) openpic_set_irq(opp, n_IRQ, 0); } -/* If enabled is true, arranges for an interrupt to be raised val clocks into - the future, if enabled is false cancels the timer. */ +/* + * If enabled is true, arranges for an interrupt to be raised val clocks into + * the future, if enabled is false cancels the timer. + */ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) { uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); - /* A count of zero causes a timer to be set to expire immediately. This - effectively stops the simulation since the timer is constantly expiring - which prevents guest code execution, so we don't honor that - configuration. On real hardware, this situation would generate an - interrupt on every clock cycle if the interrupt was unmasked. */ + /* + * A count of zero causes a timer to be set to expire immediately. This + * effectively stops the simulation since the timer is constantly expiring + * which prevents guest code execution, so we don't honor that + * configuration. On real hardware, this situation would generate an + * interrupt on every clock cycle if the interrupt was unmasked. + */ if ((ns == 0) || !enabled) { tmr->qemu_timer_active = false; tmr->tccr = tmr->tccr & TCCR_TOG; @@ -721,8 +727,10 @@ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) } } -/* Returns the currrent tccr value, i.e., timer value (in clocks) with - appropriate TOG. */ +/* + * Returns the current tccr value, i.e., timer value (in clocks) with + * appropriate TOG. + */ static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) { uint64_t retval; @@ -1276,6 +1284,15 @@ static void openpic_reset(DeviceState *d) break; } + /* Mask all IPI interrupts for Freescale OpenPIC */ + if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) || + (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) { + if (i >= opp->irq_ipi0 && i < opp->irq_tim0) { + write_IRQreg_idr(opp, i, 0); + continue; + } + } + write_IRQreg_idr(opp, i, opp->idr_reset); } /* Initialise IRQ destinations */ @@ -1304,7 +1321,7 @@ static void openpic_reset(DeviceState *d) typedef struct MemReg { const char *name; MemoryRegionOps const *ops; - hwaddr start_addr; + hwaddr start_addr; ram_addr_t size; } MemReg; @@ -1555,28 +1572,6 @@ static void openpic_realize(DeviceState *dev, Error **errp) break; - case OPENPIC_MODEL_RAVEN: - opp->nb_irqs = RAVEN_MAX_EXT; - opp->vid = VID_REVISION_1_3; - opp->vir = VIR_GENERIC; - opp->vector_mask = 0xFF; - opp->tfrr_reset = 4160000; - opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; - opp->idr_reset = 0; - opp->max_irq = RAVEN_MAX_IRQ; - opp->irq_ipi0 = RAVEN_IPI_IRQ; - opp->irq_tim0 = RAVEN_TMR_IRQ; - opp->brr1 = -1; - opp->mpic_mode_mask = GCR_MODE_MIXED; - - if (opp->nb_cpus != 1) { - error_setg(errp, "Only UP supported today"); - return; - } - - map_list(opp, list_le, &list_count); - break; - case OPENPIC_MODEL_KEYLARGO: opp->nb_irqs = KEYLARGO_MAX_EXT; opp->vid = VID_REVISION_1_2; diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c index 21da680389..557dd0c2bf 100644 --- a/hw/intc/openpic_kvm.c +++ b/hw/intc/openpic_kvm.c @@ -234,6 +234,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp) opp->mem_listener.region_add = kvm_openpic_region_add; opp->mem_listener.region_del = kvm_openpic_region_del; + opp->mem_listener.name = "openpic-kvm"; memory_listener_register(&opp->mem_listener, &address_space_memory); /* indicate pic capabilities */ diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index ad43483612..c7b75ed12e 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -66,26 +66,6 @@ static const XiveVstInfo vst_infos[] = { qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ (xive)->chip->chip_id, ## __VA_ARGS__); -/* - * QEMU version of the GETFIELD/SETFIELD macros - * - * TODO: It might be better to use the existing extract64() and - * deposit64() but this means that all the register definitions will - * change and become incompatible with the ones found in skiboot. - * - * Keep it as it is for now until we find a common ground. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - /* * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID * field overrides the hardwired chip ID in the Powerbus operations @@ -172,7 +152,12 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, /* Get the page size of the indirect table. */ vsd_addr = vsd & VSD_ADDRESS_MASK; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, + MEMTXATTRS_UNSPECIFIED)) { + xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64, + info->name, idx, vsd_addr); + return 0; + } if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG @@ -195,7 +180,12 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, /* Load the VSD we are looking for, if not already done */ if (vsd_idx) { vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, + MEMTXATTRS_UNSPECIFIED)) { + xive_error(xive, "VST: failed to access %s entry %x @0x%" + PRIx64, info->name, vsd_idx, vsd_addr); + return 0; + } if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG @@ -393,6 +383,34 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); } +static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + PnvXive *xive = PNV_XIVE(xrtr); + + if (pnv_xive_block_id(xive) != blk) { + xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + *pq = xive_source_esb_get(&xive->ipi_source, idx); + return 0; +} + +static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + PnvXive *xive = PNV_XIVE(xrtr); + + if (pnv_xive_block_id(xive) != blk) { + xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq); + return 0; +} + /* * One bit per thread id. The first register PC_THREAD_EN_REG0 covers * the first cores 0-15 (normal) of the chip or 0-7 (fused). The @@ -489,12 +507,12 @@ static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) * event notification to the Router. This is required on a multichip * system. */ -static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) +static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked) { PnvXive *xive = PNV_XIVE(xn); uint8_t blk = pnv_xive_block_id(xive); - xive_router_notify(xn, XIVE_EAS(blk, srcno)); + xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked); } /* @@ -542,7 +560,12 @@ static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) /* Get the page size of the indirect table. */ vsd_addr = vsd & VSD_ADDRESS_MASK; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, + MEMTXATTRS_UNSPECIFIED)) { + xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64, + info->name, vsd_addr); + return 0; + } if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG @@ -1336,7 +1359,8 @@ static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) blk = XIVE_EAS_BLOCK(val); idx = XIVE_EAS_INDEX(val); - xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); + xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx), + !!(val & XIVE_TRIGGER_PQ)); } static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, @@ -1956,6 +1980,8 @@ static void pnv_xive_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, pnv_xive_properties); xrc->get_eas = pnv_xive_get_eas; + xrc->get_pq = pnv_xive_get_pq; + xrc->set_pq = pnv_xive_set_pq; xrc->get_end = pnv_xive_get_end; xrc->write_end = pnv_xive_write_end; xrc->get_nvt = pnv_xive_get_nvt; diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c new file mode 100644 index 0000000000..f22ce5ca59 --- /dev/null +++ b/hw/intc/pnv_xive2.c @@ -0,0 +1,2115 @@ +/* + * QEMU PowerPC XIVE2 interrupt controller model (POWER10) + * + * Copyright (c) 2019-2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "monitor/monitor.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_core.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/xive2.h" +#include "hw/ppc/pnv_xive.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "sysemu/reset.h" + +#include + +#include "pnv_xive2_regs.h" + +#undef XIVE2_DEBUG + +/* + * Virtual structures table (VST) + */ +#define SBE_PER_BYTE 4 + +typedef struct XiveVstInfo { + const char *name; + uint32_t size; + uint32_t max_blocks; +} XiveVstInfo; + +static const XiveVstInfo vst_infos[] = { + + [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 }, + [VST_ESB] = { "ESB", 1, 16 }, + [VST_END] = { "ENDT", sizeof(Xive2End), 16 }, + + [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 }, + [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 }, + [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 }, + + [VST_IC] = { "IC", 1 /* ? */ , 16 }, /* Topology # */ + [VST_SYNC] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */ + + /* + * This table contains the backing store pages for the interrupt + * fifos of the VC sub-engine in case of overflow. + * + * 0 - IPI, + * 1 - HWD, + * 2 - NxC, + * 3 - INT, + * 4 - OS-Queue, + * 5 - Pool-Queue, + * 6 - Hard-Queue + */ + [VST_ERQ] = { "ERQ", 1, VC_QUEUE_COUNT }, +}; + +#define xive2_error(xive, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ + (xive)->chip->chip_id, ## __VA_ARGS__); + +/* + * TODO: Document block id override + */ +static uint32_t pnv_xive2_block_id(PnvXive2 *xive) +{ + uint8_t blk = xive->chip->chip_id; + uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3]; + + if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) { + blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val); + } + + return blk; +} + +/* + * Remote access to controllers. HW uses MMIOs. For now, a simple scan + * of the chips is good enough. + * + * TODO: Block scope support + */ +static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + PnvXive2 *xive = &chip10->xive; + + if (pnv_xive2_block_id(xive) == blk) { + return xive; + } + } + return NULL; +} + +/* + * VST accessors for ESB, EAT, ENDT, NVP + * + * Indirect VST tables are arrays of VSDs pointing to a page (of same + * size). Each page is a direct VST table. + */ + +#define XIVE_VSD_SIZE 8 + +/* Indirect page size can be 4K, 64K, 2M, 16M. */ +static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift) +{ + return page_shift == 12 || page_shift == 16 || + page_shift == 21 || page_shift == 24; +} + +static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type, + uint64_t vsd, uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); + uint32_t idx_max; + + idx_max = vst_tsize / info->size - 1; + if (idx > idx_max) { +#ifdef XIVE2_DEBUG + xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", + info->name, idx, idx_max); +#endif + return 0; + } + + return vst_addr + idx * info->size; +} + +static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type, + uint64_t vsd, uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd_addr; + uint32_t vsd_idx; + uint32_t page_shift; + uint32_t vst_per_page; + + /* Get the page size of the indirect table. */ + vsd_addr = vsd & VSD_ADDRESS_MASK; + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); + + if (!(vsd & VSD_ADDRESS_MASK)) { + xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); + return 0; + } + + page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + + if (!pnv_xive2_vst_page_size_allowed(page_shift)) { + xive2_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return 0; + } + + vst_per_page = (1ull << page_shift) / info->size; + vsd_idx = idx / vst_per_page; + + /* Load the VSD we are looking for, if not already done */ + if (vsd_idx) { + vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, + MEMTXATTRS_UNSPECIFIED); + + if (!(vsd & VSD_ADDRESS_MASK)) { + xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); + return 0; + } + + /* + * Check that the pages have a consistent size across the + * indirect table + */ + if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { + xive2_error(xive, "VST: %s entry %x indirect page size differ !?", + info->name, idx); + return 0; + } + } + + return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); +} + +static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk, + uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd; + + if (blk >= info->max_blocks) { + xive2_error(xive, "VST: invalid block id %d for VST %s %d !?", + blk, info->name, idx); + return 0; + } + + vsd = xive->vsds[type][blk]; + + /* Remote VST access */ + if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { + xive = pnv_xive2_get_remote(blk); + + return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0; + } + + if (VSD_INDIRECT & vsd) { + return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx); + } + + return pnv_xive2_vst_addr_direct(xive, type, vsd, idx); +} + +static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk, + uint32_t idx, void *data) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx); + + if (!addr) { + return -1; + } + + cpu_physical_memory_read(addr, data, info->size); + return 0; +} + +#define XIVE_VST_WORD_ALL -1 + +static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk, + uint32_t idx, void *data, uint32_t word_number) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx); + + if (!addr) { + return -1; + } + + if (word_number == XIVE_VST_WORD_ALL) { + cpu_physical_memory_write(addr, data, info->size); + } else { + cpu_physical_memory_write(addr + word_number * 4, + data + word_number * 4, 4); + } + return 0; +} + +static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + PnvXive2 *xive = PNV_XIVE2(xrtr); + + if (pnv_xive2_block_id(xive) != blk) { + xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + *pq = xive_source_esb_get(&xive->ipi_source, idx); + return 0; +} + +static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + PnvXive2 *xive = PNV_XIVE2(xrtr); + + if (pnv_xive2_block_id(xive) != blk) { + xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq); + return 0; +} + +static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + Xive2End *end) +{ + return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end); +} + +static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + Xive2End *end, uint8_t word_number) +{ + return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end, + word_number); +} + +static int pnv_xive2_end_update(PnvXive2 *xive) +{ + uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, + xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX, + xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); + int i; + uint64_t endc_watch[4]; + + for (i = 0; i < ARRAY_SIZE(endc_watch); i++) { + endc_watch[i] = + cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]); + } + + return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch, + XIVE_VST_WORD_ALL); +} + +static void pnv_xive2_end_cache_load(PnvXive2 *xive) +{ + uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, + xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX, + xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); + uint64_t endc_watch[4] = { 0 }; + int i; + + if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) { + xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(endc_watch); i++) { + xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] = + be64_to_cpu(endc_watch[i]); + } +} + +static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + Xive2Nvp *nvp) +{ + return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp); +} + +static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + Xive2Nvp *nvp, uint8_t word_number) +{ + return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp, + word_number); +} + +static int pnv_xive2_nvp_update(PnvXive2 *xive) +{ + uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, + xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX, + xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); + int i; + uint64_t nxc_watch[4]; + + for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) { + nxc_watch[i] = + cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]); + } + + return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch, + XIVE_VST_WORD_ALL); +} + +static void pnv_xive2_nvp_cache_load(PnvXive2 *xive) +{ + uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, + xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX, + xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); + uint64_t nxc_watch[4] = { 0 }; + int i; + + if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) { + xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) { + xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] = + be64_to_cpu(nxc_watch[i]); + } +} + +static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx, + Xive2Eas *eas) +{ + PnvXive2 *xive = PNV_XIVE2(xrtr); + + if (pnv_xive2_block_id(xive) != blk) { + xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas); +} + +static uint32_t pnv_xive2_get_config(Xive2Router *xrtr) +{ + PnvXive2 *xive = PNV_XIVE2(xrtr); + uint32_t cfg = 0; + + if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) { + cfg |= XIVE2_GEN1_TIMA_OS; + } + + if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { + cfg |= XIVE2_VP_SAVE_RESTORE; + } + + if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, + xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) { + cfg |= XIVE2_THREADID_8BITS; + } + + return cfg; +} + +static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) +{ + int pir = ppc_cpu_pir(cpu); + uint32_t fc = PNV10_PIR2FUSEDCORE(pir); + uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1; + uint32_t bit = pir & 0x3f; + + return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit); +} + +static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + PnvXive2 *xive = PNV_XIVE2(xptr); + PnvChip *chip = xive->chip; + int count = 0; + int i, j; + bool gen1_tima_os = + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + + for (j = 0; j < cc->nr_threads; j++) { + PowerPCCPU *cpu = pc->threads[j]; + XiveTCTX *tctx; + int ring; + + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { + continue; + } + + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + + if (gen1_tima_os) { + ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, + nvt_idx, cam_ignore, + logic_serv); + } else { + ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, + nvt_idx, cam_ignore, + logic_serv); + } + + /* + * Save the context and follow on to catch duplicates, + * that we don't support yet. + */ + if (ring != -1) { + if (match->tctx) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " + "thread context NVT %x/%x\n", + nvt_blk, nvt_idx); + return false; + } + + match->ring = ring; + match->tctx = tctx; + count++; + } + } + } + + return count; +} + +static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) +{ + return pnv_xive2_block_id(PNV_XIVE2(xrtr)); +} + +/* + * The TIMA MMIO space is shared among the chips and to identify the + * chip from which the access is being done, we extract the chip id + * from the PIR. + */ +static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu) +{ + int pir = ppc_cpu_pir(cpu); + XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; + PnvXive2 *xive = PNV_XIVE2(xptr); + + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { + xive2_error(xive, "IC: CPU %x is not enabled", pir); + } + return xive; +} + +/* + * The internal sources of the interrupt controller have no knowledge + * of the XIVE2 chip on which they reside. Encode the block id in the + * source interrupt number before forwarding the source event + * notification to the Router. This is required on a multichip system. + */ +static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked) +{ + PnvXive2 *xive = PNV_XIVE2(xn); + uint8_t blk = pnv_xive2_block_id(xive); + + xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked); +} + +/* + * Set Translation Tables + * + * TODO add support for multiple sets + */ +static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val) +{ + uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]); + uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT, + xive->cq_regs[CQ_TAR >> 3]); + + switch (tsel) { + case CQ_TAR_NVPG: + case CQ_TAR_ESB: + case CQ_TAR_END: + xive->tables[tsel][entry] = val; + break; + default: + xive2_error(xive, "IC: unsupported table %d", tsel); + return -1; + } + + if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) { + xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT, + xive->cq_regs[CQ_TAR >> 3], ++entry); + } + + return 0; +} +/* + * Virtual Structure Tables (VST) configuration + */ +static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type, + uint8_t blk, uint64_t vsd) +{ + Xive2EndSource *end_xsrc = &xive->end_source; + XiveSource *xsrc = &xive->ipi_source; + const XiveVstInfo *info = &vst_infos[type]; + uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + uint64_t vst_tsize = 1ull << page_shift; + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + + /* Basic checks */ + + if (VSD_INDIRECT & vsd) { + if (!pnv_xive2_vst_page_size_allowed(page_shift)) { + xive2_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return; + } + } + + if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { + xive2_error(xive, "VST: %s table address 0x%"PRIx64 + " is not aligned with page shift %d", + info->name, vst_addr, page_shift); + return; + } + + /* Record the table configuration (in SRAM on HW) */ + xive->vsds[type][blk] = vsd; + + /* Now tune the models with the configuration provided by the FW */ + + switch (type) { + case VST_ESB: + /* + * Backing store pages for the source PQ bits. The model does + * not use these PQ bits backed in RAM because the XiveSource + * model has its own. + * + * If the table is direct, we can compute the number of PQ + * entries provisioned by FW (such as skiboot) and resize the + * ESB window accordingly. + */ + if (!(VSD_INDIRECT & vsd)) { + memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE + * (1ull << xsrc->esb_shift)); + } + + memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio); + break; + + case VST_EAS: /* Nothing to be done */ + break; + + case VST_END: + /* + * Backing store pages for the END. + */ + if (!(VSD_INDIRECT & vsd)) { + memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) + * (1ull << end_xsrc->esb_shift)); + } + memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio); + break; + + case VST_NVP: /* Not modeled */ + case VST_NVG: /* Not modeled */ + case VST_NVC: /* Not modeled */ + case VST_IC: /* Not modeled */ + case VST_SYNC: /* Not modeled */ + case VST_ERQ: /* Not modeled */ + break; + + default: + g_assert_not_reached(); + } +} + +/* + * Both PC and VC sub-engines are configured as each use the Virtual + * Structure Tables + */ +static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd) +{ + uint8_t mode = GETFIELD(VSD_MODE, vsd); + uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT, + xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]); + uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS, + xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]); + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + + if (type > VST_ERQ) { + xive2_error(xive, "VST: invalid table type %d", type); + return; + } + + if (blk >= vst_infos[type].max_blocks) { + xive2_error(xive, "VST: invalid block id %d for" + " %s table", blk, vst_infos[type].name); + return; + } + + if (!vst_addr) { + xive2_error(xive, "VST: invalid %s table address", + vst_infos[type].name); + return; + } + + switch (mode) { + case VSD_MODE_FORWARD: + xive->vsds[type][blk] = vsd; + break; + + case VSD_MODE_EXCLUSIVE: + pnv_xive2_vst_set_exclusive(xive, type, blk, vsd); + break; + + default: + xive2_error(xive, "VST: unsupported table mode %d", mode); + return; + } +} + +/* + * MMIO handlers + */ + + +/* + * IC BAR layout + * + * Page 0: Internal CQ register accesses (reads & writes) + * Page 1: Internal PC register accesses (reads & writes) + * Page 2: Internal VC register accesses (reads & writes) + * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes) + * Page 4: Notify Port page (writes only, w/data), + * Page 5: Reserved + * Page 6: Sync Poll page (writes only, dataless) + * Page 7: Sync Inject page (writes only, dataless) + * Page 8: LSI Trigger page (writes only, dataless) + * Page 9: LSI SB Management page (reads & writes dataless) + * Pages 10-255: Reserved + * Pages 256-383: Direct mapped Thread Context Area (reads & writes) + * covering the 128 threads in P10. + * Pages 384-511: Reserved + */ +typedef struct PnvXive2Region { + const char *name; + uint32_t pgoff; + uint32_t pgsize; + const MemoryRegionOps *ops; +} PnvXive2Region; + +static const MemoryRegionOps pnv_xive2_ic_cq_ops; +static const MemoryRegionOps pnv_xive2_ic_pc_ops; +static const MemoryRegionOps pnv_xive2_ic_vc_ops; +static const MemoryRegionOps pnv_xive2_ic_tctxt_ops; +static const MemoryRegionOps pnv_xive2_ic_notify_ops; +static const MemoryRegionOps pnv_xive2_ic_sync_ops; +static const MemoryRegionOps pnv_xive2_ic_lsi_ops; +static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops; + +/* 512 pages. 4K: 2M range, 64K: 32M range */ +static const PnvXive2Region pnv_xive2_ic_regions[] = { + { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops }, + { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops }, + { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops }, + { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops }, + { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops }, + /* page 5 reserved */ + { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops }, + { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops }, + /* pages 10-255 reserved */ + { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops }, + /* pages 384-511 reserved */ +}; + +/* + * CQ operations + */ + +static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t reg = offset >> 3; + uint64_t val = 0; + + switch (offset) { + case CQ_XIVE_CAP: /* Set at reset */ + case CQ_XIVE_CFG: + val = xive->cq_regs[reg]; + break; + case CQ_MSGSND: /* TODO check the #cores of the machine */ + val = 0xffffffff00000000; + break; + case CQ_CFG_PB_GEN: + val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */ + break; + default: + xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset); + } + + return val; +} + +static uint64_t pnv_xive2_bar_size(uint64_t val) +{ + return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24); +} + +static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + MemoryRegion *sysmem = get_system_memory(); + uint32_t reg = offset >> 3; + int i; + + switch (offset) { + case CQ_XIVE_CFG: + case CQ_RST_CTL: /* TODO: reset all BARs */ + break; + + case CQ_IC_BAR: + xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; + if (!(val & CQ_IC_BAR_VALID)) { + xive->ic_base = 0; + if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) { + for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { + memory_region_del_subregion(&xive->ic_mmio, + &xive->ic_mmios[i]); + } + memory_region_del_subregion(sysmem, &xive->ic_mmio); + } + } else { + xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); + if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) { + for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { + memory_region_add_subregion(&xive->ic_mmio, + pnv_xive2_ic_regions[i].pgoff << xive->ic_shift, + &xive->ic_mmios[i]); + } + memory_region_add_subregion(sysmem, xive->ic_base, + &xive->ic_mmio); + } + } + break; + + case CQ_TM_BAR: + xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; + if (!(val & CQ_TM_BAR_VALID)) { + xive->tm_base = 0; + if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->tm_mmio); + } + } else { + xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); + if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) { + memory_region_add_subregion(sysmem, xive->tm_base, + &xive->tm_mmio); + } + } + break; + + case CQ_ESB_BAR: + xive->esb_shift = val & CQ_BAR_64K ? 16 : 12; + if (!(val & CQ_BAR_VALID)) { + xive->esb_base = 0; + if (xive->cq_regs[reg] & CQ_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->esb_mmio); + } + } else { + xive->esb_base = val & CQ_BAR_ADDR; + if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { + memory_region_set_size(&xive->esb_mmio, + pnv_xive2_bar_size(val)); + memory_region_add_subregion(sysmem, xive->esb_base, + &xive->esb_mmio); + } + } + break; + + case CQ_END_BAR: + xive->end_shift = val & CQ_BAR_64K ? 16 : 12; + if (!(val & CQ_BAR_VALID)) { + xive->end_base = 0; + if (xive->cq_regs[reg] & CQ_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->end_mmio); + } + } else { + xive->end_base = val & CQ_BAR_ADDR; + if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { + memory_region_set_size(&xive->end_mmio, + pnv_xive2_bar_size(val)); + memory_region_add_subregion(sysmem, xive->end_base, + &xive->end_mmio); + } + } + break; + + case CQ_NVC_BAR: + xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12; + if (!(val & CQ_BAR_VALID)) { + xive->nvc_base = 0; + if (xive->cq_regs[reg] & CQ_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->nvc_mmio); + } + } else { + xive->nvc_base = val & CQ_BAR_ADDR; + if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { + memory_region_set_size(&xive->nvc_mmio, + pnv_xive2_bar_size(val)); + memory_region_add_subregion(sysmem, xive->nvc_base, + &xive->nvc_mmio); + } + } + break; + + case CQ_NVPG_BAR: + xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12; + if (!(val & CQ_BAR_VALID)) { + xive->nvpg_base = 0; + if (xive->cq_regs[reg] & CQ_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->nvpg_mmio); + } + } else { + xive->nvpg_base = val & CQ_BAR_ADDR; + if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { + memory_region_set_size(&xive->nvpg_mmio, + pnv_xive2_bar_size(val)); + memory_region_add_subregion(sysmem, xive->nvpg_base, + &xive->nvpg_mmio); + } + } + break; + + case CQ_TAR: /* Set Translation Table Address */ + break; + case CQ_TDR: /* Set Translation Table Data */ + pnv_xive2_stt_set_data(xive, val); + break; + case CQ_FIRMASK_OR: /* FIR error reporting */ + break; + default: + xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset); + return; + } + + xive->cq_regs[reg] = val; +} + +static const MemoryRegionOps pnv_xive2_ic_cq_ops = { + .read = pnv_xive2_ic_cq_read, + .write = pnv_xive2_ic_cq_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint64_t val = 0; + uint32_t reg = offset >> 3; + + switch (offset) { + /* + * VSD table settings. + */ + case VC_VSD_TABLE_ADDR: + case VC_VSD_TABLE_DATA: + val = xive->vc_regs[reg]; + break; + + /* + * ESB cache updates (not modeled) + */ + case VC_ESBC_FLUSH_CTRL: + xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID; + val = xive->vc_regs[reg]; + break; + + /* + * EAS cache updates (not modeled) + */ + case VC_EASC_FLUSH_CTRL: + xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID; + val = xive->vc_regs[reg]; + break; + + /* + * END cache updates + */ + case VC_ENDC_WATCH0_SPEC: + xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT); + val = xive->vc_regs[reg]; + break; + + case VC_ENDC_WATCH0_DATA0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive2_end_cache_load(xive); + val = xive->vc_regs[reg]; + break; + + case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3: + val = xive->vc_regs[reg]; + break; + + case VC_ENDC_FLUSH_CTRL: + xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID; + val = xive->vc_regs[reg]; + break; + + /* + * Indirect invalidation + */ + case VC_AT_MACRO_KILL_MASK: + val = xive->vc_regs[reg]; + break; + + case VC_AT_MACRO_KILL: + xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID; + val = xive->vc_regs[reg]; + break; + + /* + * Interrupt fifo overflow in memory backing store (Not modeled) + */ + case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6: + val = xive->vc_regs[reg]; + break; + + /* + * Synchronisation + */ + case VC_ENDC_SYNC_DONE: + val = VC_ENDC_SYNC_POLL_DONE; + break; + default: + xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset); + } + + return val; +} + +static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t reg = offset >> 3; + + switch (offset) { + /* + * VSD table settings. + */ + case VC_VSD_TABLE_ADDR: + break; + case VC_VSD_TABLE_DATA: + pnv_xive2_vst_set_data(xive, val); + break; + + /* + * ESB cache updates (not modeled) + */ + /* case VC_ESBC_FLUSH_CTRL: */ + case VC_ESBC_FLUSH_POLL: + xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID; + /* ESB update */ + break; + + /* + * EAS cache updates (not modeled) + */ + /* case VC_EASC_FLUSH_CTRL: */ + case VC_EASC_FLUSH_POLL: + xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID; + /* EAS update */ + break; + + /* + * END cache updates + */ + case VC_ENDC_WATCH0_SPEC: + val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */ + break; + + case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3: + break; + case VC_ENDC_WATCH0_DATA0: + /* writing to DATA0 triggers the cache write */ + xive->vc_regs[reg] = val; + pnv_xive2_end_update(xive); + break; + + + /* case VC_ENDC_FLUSH_CTRL: */ + case VC_ENDC_FLUSH_POLL: + xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID; + break; + + /* + * Indirect invalidation + */ + case VC_AT_MACRO_KILL: + case VC_AT_MACRO_KILL_MASK: + break; + + /* + * Interrupt fifo overflow in memory backing store (Not modeled) + */ + case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6: + break; + + /* + * Synchronisation + */ + case VC_ENDC_SYNC_DONE: + break; + + default: + xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset); + return; + } + + xive->vc_regs[reg] = val; +} + +static const MemoryRegionOps pnv_xive2_ic_vc_ops = { + .read = pnv_xive2_ic_vc_read, + .write = pnv_xive2_ic_vc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint64_t val = -1; + uint32_t reg = offset >> 3; + + switch (offset) { + /* + * VSD table settings. + */ + case PC_VSD_TABLE_ADDR: + case PC_VSD_TABLE_DATA: + val = xive->pc_regs[reg]; + break; + + /* + * cache updates + */ + case PC_NXC_WATCH0_SPEC: + xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT); + val = xive->pc_regs[reg]; + break; + + case PC_NXC_WATCH0_DATA0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive2_nvp_cache_load(xive); + val = xive->pc_regs[reg]; + break; + + case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3: + val = xive->pc_regs[reg]; + break; + + case PC_NXC_FLUSH_CTRL: + xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID; + val = xive->pc_regs[reg]; + break; + + /* + * Indirect invalidation + */ + case PC_AT_KILL: + xive->pc_regs[reg] &= ~PC_AT_KILL_VALID; + val = xive->pc_regs[reg]; + break; + + default: + xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset); + } + + return val; +} + +static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t reg = offset >> 3; + + switch (offset) { + + /* + * VSD table settings. Only taken into account in the VC + * sub-engine because the Xive2Router model combines both VC and PC + * sub-engines + */ + case PC_VSD_TABLE_ADDR: + case PC_VSD_TABLE_DATA: + break; + + /* + * cache updates + */ + case PC_NXC_WATCH0_SPEC: + val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */ + break; + + case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3: + break; + case PC_NXC_WATCH0_DATA0: + /* writing to DATA0 triggers the cache write */ + xive->pc_regs[reg] = val; + pnv_xive2_nvp_update(xive); + break; + + /* case PC_NXC_FLUSH_CTRL: */ + case PC_NXC_FLUSH_POLL: + xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID; + break; + + /* + * Indirect invalidation + */ + case PC_AT_KILL: + case PC_AT_KILL_MASK: + break; + + default: + xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset); + return; + } + + xive->pc_regs[reg] = val; +} + +static const MemoryRegionOps pnv_xive2_ic_pc_ops = { + .read = pnv_xive2_ic_pc_read, + .write = pnv_xive2_ic_pc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + + +static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint64_t val = -1; + uint32_t reg = offset >> 3; + + switch (offset) { + /* + * XIVE2 hardware thread enablement + */ + case TCTXT_EN0: + case TCTXT_EN1: + val = xive->tctxt_regs[reg]; + break; + + case TCTXT_EN0_SET: + case TCTXT_EN0_RESET: + val = xive->tctxt_regs[TCTXT_EN0 >> 3]; + break; + case TCTXT_EN1_SET: + case TCTXT_EN1_RESET: + val = xive->tctxt_regs[TCTXT_EN1 >> 3]; + break; + default: + xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset); + } + + return val; +} + +static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + switch (offset) { + /* + * XIVE2 hardware thread enablement + */ + case TCTXT_EN0: /* Physical Thread Enable */ + case TCTXT_EN1: /* Physical Thread Enable (fused core) */ + break; + + case TCTXT_EN0_SET: + xive->tctxt_regs[TCTXT_EN0 >> 3] |= val; + break; + case TCTXT_EN1_SET: + xive->tctxt_regs[TCTXT_EN1 >> 3] |= val; + break; + case TCTXT_EN0_RESET: + xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val; + break; + case TCTXT_EN1_RESET: + xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val; + break; + + default: + xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); + return; + } +} + +static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = { + .read = pnv_xive2_ic_tctxt_read, + .write = pnv_xive2_ic_tctxt_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * Redirect XSCOM to MMIO handlers + */ +static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint64_t val = -1; + uint32_t xscom_reg = offset >> 3; + uint32_t mmio_offset = (xscom_reg & 0xFF) << 3; + + switch (xscom_reg) { + case 0x000 ... 0x0FF: + val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size); + break; + case 0x100 ... 0x1FF: + val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size); + break; + case 0x200 ... 0x2FF: + val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size); + break; + case 0x300 ... 0x3FF: + val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size); + break; + default: + xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset); + } + + return val; +} + +static void pnv_xive2_xscom_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t xscom_reg = offset >> 3; + uint32_t mmio_offset = (xscom_reg & 0xFF) << 3; + + switch (xscom_reg) { + case 0x000 ... 0x0FF: + pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size); + break; + case 0x100 ... 0x1FF: + pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size); + break; + case 0x200 ... 0x2FF: + pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size); + break; + case 0x300 ... 0x3FF: + pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size); + break; + default: + xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset); + } +} + +static const MemoryRegionOps pnv_xive2_xscom_ops = { + .read = pnv_xive2_xscom_read, + .write = pnv_xive2_xscom_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * Notify port page. The layout is compatible between 4K and 64K pages : + * + * Page 1 Notify page (writes only) + * 0x000 - 0x7FF IPI interrupt (NPU) + * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB) + */ + +static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr, + uint64_t val) +{ + uint8_t blk; + uint32_t idx; + + if (val & XIVE_TRIGGER_END) { + xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, + addr, val); + return; + } + + /* + * Forward the source event notification directly to the Router. + * The source interrupt number should already be correctly encoded + * with the chip block id by the sending device (PHB, PSI). + */ + blk = XIVE_EAS_BLOCK(val); + idx = XIVE_EAS_INDEX(val); + + xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx), + !!(val & XIVE_TRIGGER_PQ)); +} + +static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + /* VC: IPI triggers */ + switch (offset) { + case 0x000 ... 0x7FF: + /* TODO: check IPI notify sub-page routing */ + pnv_xive2_ic_hw_trigger(opaque, offset, val); + break; + + /* VC: HW triggers */ + case 0x800 ... 0xFFF: + pnv_xive2_ic_hw_trigger(opaque, offset, val); + break; + + default: + xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset); + } +} + +static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + /* loads are invalid */ + xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset); + return -1; +} + +static const MemoryRegionOps pnv_xive2_ic_notify_ops = { + .read = pnv_xive2_ic_notify_read, + .write = pnv_xive2_ic_notify_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset); + return -1; +} + +static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset); +} + +static const MemoryRegionOps pnv_xive2_ic_lsi_ops = { + .read = pnv_xive2_ic_lsi_read, + .write = pnv_xive2_ic_lsi_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * Sync MMIO page (write only) + */ +#define PNV_XIVE2_SYNC_IPI 0x000 +#define PNV_XIVE2_SYNC_HW 0x080 +#define PNV_XIVE2_SYNC_NxC 0x100 +#define PNV_XIVE2_SYNC_INT 0x180 +#define PNV_XIVE2_SYNC_OS_ESC 0x200 +#define PNV_XIVE2_SYNC_POOL_ESC 0x280 +#define PNV_XIVE2_SYNC_HARD_ESC 0x300 + +static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + /* loads are invalid */ + xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset); + return -1; +} + +static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + switch (offset) { + case PNV_XIVE2_SYNC_IPI: + case PNV_XIVE2_SYNC_HW: + case PNV_XIVE2_SYNC_NxC: + case PNV_XIVE2_SYNC_INT: + case PNV_XIVE2_SYNC_OS_ESC: + case PNV_XIVE2_SYNC_POOL_ESC: + case PNV_XIVE2_SYNC_HARD_ESC: + break; + default: + xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset); + } +} + +static const MemoryRegionOps pnv_xive2_ic_sync_ops = { + .read = pnv_xive2_ic_sync_read, + .write = pnv_xive2_ic_sync_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * When the TM direct pages of the IC controller are accessed, the + * target HW thread is deduced from the page offset. + */ +static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset) +{ + /* On P10, the node ID shift in the PIR register is 8 bits */ + return xive->chip->chip_id << 8 | offset >> xive->ic_shift; +} + +static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir) +{ + PnvChip *chip = xive->chip; + PowerPCCPU *cpu = NULL; + + cpu = pnv_chip_find_cpu(chip, pir); + if (!cpu) { + xive2_error(xive, "IC: invalid PIR %x for indirect access", pir); + return NULL; + } + + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { + xive2_error(xive, "IC: CPU %x is not enabled", pir); + } + + return XIVE_TCTX(pnv_cpu_state(cpu)->intc); +} + +static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t pir; + XiveTCTX *tctx; + uint64_t val = -1; + + pir = pnv_xive2_ic_tm_get_pir(xive, offset); + tctx = pnv_xive2_get_indirect_tctx(xive, pir); + if (tctx) { + val = xive_tctx_tm_read(NULL, tctx, offset, size); + } + + return val; +} + +static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t pir; + XiveTCTX *tctx; + + pir = pnv_xive2_ic_tm_get_pir(xive, offset); + tctx = pnv_xive2_get_indirect_tctx(xive, pir); + if (tctx) { + xive_tctx_tm_write(NULL, tctx, offset, val, size); + } +} + +static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = { + .read = pnv_xive2_ic_tm_indirect_read, + .write = pnv_xive2_ic_tm_indirect_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * TIMA ops + */ + +/* + * Special TIMA offsets to handle accesses in a POWER10 way. + * + * Only the CAM line updates done by the hypervisor should be handled + * specifically. + */ +#define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT) +#define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2)) +#define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX) + +static void pnv_xive2_tm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); + XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + XivePresenter *xptr = XIVE_PRESENTER(xive); + bool gen1_tima_os = + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + + /* TODO: should we switch the TM ops table instead ? */ + if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) { + xive2_tm_push_os_ctx(xptr, tctx, offset, value, size); + return; + } + + /* Other TM ops are the same as XIVE1 */ + xive_tctx_tm_write(xptr, tctx, offset, value, size); +} + +static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); + XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + XivePresenter *xptr = XIVE_PRESENTER(xive); + bool gen1_tima_os = + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + + /* TODO: should we switch the TM ops table instead ? */ + if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) { + return xive2_tm_pull_os_ctx(xptr, tctx, offset, size); + } + + /* Other TM ops are the same as XIVE1 */ + return xive_tctx_tm_read(xptr, tctx, offset, size); +} + +static const MemoryRegionOps pnv_xive2_tm_ops = { + .read = pnv_xive2_tm_read, + .write = pnv_xive2_tm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); + return -1; +} + +static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); +} + +static const MemoryRegionOps pnv_xive2_nvc_ops = { + .read = pnv_xive2_nvc_read, + .write = pnv_xive2_nvc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); + return -1; +} + +static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive2 *xive = PNV_XIVE2(opaque); + + xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); +} + +static const MemoryRegionOps pnv_xive2_nvpg_ops = { + .read = pnv_xive2_nvpg_read, + .write = pnv_xive2_nvpg_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * POWER10 default capabilities: 0x2000120076f000FC + */ +#define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC + +/* + * POWER10 default configuration: 0x0030000033000000 + * + * 8bits thread id was dropped for P10 + */ +#define PNV_XIVE2_CONFIGURATION 0x0030000033000000 + +static void pnv_xive2_reset(void *dev) +{ + PnvXive2 *xive = PNV_XIVE2(dev); + XiveSource *xsrc = &xive->ipi_source; + Xive2EndSource *end_xsrc = &xive->end_source; + + xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities; + xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config; + + /* HW hardwires the #Topology of the chip in the block field */ + xive->cq_regs[CQ_XIVE_CFG >> 3] |= + SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id); + + /* Set default page size to 64k */ + xive->ic_shift = xive->esb_shift = xive->end_shift = 16; + xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16; + + /* Clear source MMIOs */ + if (memory_region_is_mapped(&xsrc->esb_mmio)) { + memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio); + } + + if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { + memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio); + } +} + +/* + * Maximum number of IRQs and ENDs supported by HW. Will be tuned by + * software. + */ +#define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) +#define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) + +static void pnv_xive2_realize(DeviceState *dev, Error **errp) +{ + PnvXive2 *xive = PNV_XIVE2(dev); + PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev); + XiveSource *xsrc = &xive->ipi_source; + Xive2EndSource *end_xsrc = &xive->end_source; + Error *local_err = NULL; + int i; + + pxc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + assert(xive->chip); + + /* + * The XiveSource and Xive2EndSource objects are realized with the + * maximum allowed HW configuration. The ESB MMIO regions will be + * resized dynamically when the controller is configured by the FW + * to limit accesses to resources not provisioned. + */ + object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI, + &error_fatal); + object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS, + &error_fatal); + object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), + &error_fatal); + qdev_realize(DEVICE(xsrc), NULL, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS, + &error_fatal); + object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), + &error_abort); + qdev_realize(DEVICE(end_xsrc), NULL, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* XSCOM region, used for initial configuration of the BARs */ + memory_region_init_io(&xive->xscom_regs, OBJECT(dev), + &pnv_xive2_xscom_ops, xive, "xscom-xive", + PNV10_XSCOM_XIVE2_SIZE << 3); + + /* Interrupt controller MMIO regions */ + xive->ic_shift = 16; + memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", + PNV10_XIVE2_IC_SIZE); + + for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { + memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev), + pnv_xive2_ic_regions[i].ops, xive, + pnv_xive2_ic_regions[i].name, + pnv_xive2_ic_regions[i].pgsize << xive->ic_shift); + } + + /* + * VC MMIO regions. + */ + xive->esb_shift = 16; + xive->end_shift = 16; + memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb", + PNV10_XIVE2_ESB_SIZE); + memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end", + PNV10_XIVE2_END_SIZE); + + /* Presenter Controller MMIO region (not modeled) */ + xive->nvc_shift = 16; + xive->nvpg_shift = 16; + memory_region_init_io(&xive->nvc_mmio, OBJECT(dev), + &pnv_xive2_nvc_ops, xive, + "xive-nvc", PNV10_XIVE2_NVC_SIZE); + + memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev), + &pnv_xive2_nvpg_ops, xive, + "xive-nvpg", PNV10_XIVE2_NVPG_SIZE); + + /* Thread Interrupt Management Area (Direct) */ + xive->tm_shift = 16; + memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops, + xive, "xive-tima", PNV10_XIVE2_TM_SIZE); + + qemu_register_reset(pnv_xive2_reset, dev); +} + +static Property pnv_xive2_properties[] = { + DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0), + DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0), + DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0), + DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0), + DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0), + DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0), + DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities, + PNV_XIVE2_CAPABILITIES), + DEFINE_PROP_UINT64("config", PnvXive2, config, + PNV_XIVE2_CONFIGURATION), + DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_xive2_instance_init(Object *obj) +{ + PnvXive2 *xive = PNV_XIVE2(obj); + + object_initialize_child(obj, "ipi_source", &xive->ipi_source, + TYPE_XIVE_SOURCE); + object_initialize_child(obj, "end_source", &xive->end_source, + TYPE_XIVE2_END_SOURCE); +} + +static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat_p10[] = "ibm,power10-xive-x"; + char *name; + int offset; + uint32_t reg[] = { + cpu_to_be32(PNV10_XSCOM_XIVE2_BASE), + cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE) + }; + + name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10, + sizeof(compat_p10))); + return 0; +} + +static void pnv_xive2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); + PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass); + + xdc->dt_xscom = pnv_xive2_dt_xscom; + + dc->desc = "PowerNV XIVE2 Interrupt Controller (POWER10)"; + device_class_set_parent_realize(dc, pnv_xive2_realize, + &pxc->parent_realize); + device_class_set_props(dc, pnv_xive2_properties); + + xrc->get_eas = pnv_xive2_get_eas; + xrc->get_pq = pnv_xive2_get_pq; + xrc->set_pq = pnv_xive2_set_pq; + xrc->get_end = pnv_xive2_get_end; + xrc->write_end = pnv_xive2_write_end; + xrc->get_nvp = pnv_xive2_get_nvp; + xrc->write_nvp = pnv_xive2_write_nvp; + xrc->get_config = pnv_xive2_get_config; + xrc->get_block_id = pnv_xive2_get_block_id; + + xnc->notify = pnv_xive2_notify; + + xpc->match_nvt = pnv_xive2_match_nvt; +}; + +static const TypeInfo pnv_xive2_info = { + .name = TYPE_PNV_XIVE2, + .parent = TYPE_XIVE2_ROUTER, + .instance_init = pnv_xive2_instance_init, + .instance_size = sizeof(PnvXive2), + .class_init = pnv_xive2_class_init, + .class_size = sizeof(PnvXive2Class), + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_xive2_register_types(void) +{ + type_register_static(&pnv_xive2_info); +} + +type_init(pnv_xive2_register_types) + +static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, + Monitor *mon) +{ + uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); + uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); + + if (!xive2_nvp_is_valid(nvp)) { + return; + } + + monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x", + nvp_idx, eq_blk, eq_idx, + xive_get_field32(NVP2_W2_IPB, nvp->w2)); + /* + * When the NVP is HW controlled, more fields are updated + */ + if (xive2_nvp_is_hw(nvp)) { + monitor_printf(mon, " CPPR:%02x", + xive_get_field32(NVP2_W2_CPPR, nvp->w2)); + if (xive2_nvp_is_co(nvp)) { + monitor_printf(mon, " CO:%04x", + xive_get_field32(NVP2_W1_CO_THRID, nvp->w1)); + } + } + monitor_printf(mon, "\n"); +} + +/* + * If the table is direct, we can compute the number of PQ entries + * provisioned by FW. + */ +static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive) +{ + uint8_t blk = pnv_xive2_block_id(xive); + uint64_t vsd = xive->vsds[VST_ESB][blk]; + uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); + + return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; +} + +/* + * Compute the number of entries per indirect subpage. + */ +static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type) +{ + uint8_t blk = pnv_xive2_block_id(xive); + uint64_t vsd = xive->vsds[type][blk]; + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd_addr; + uint32_t page_shift; + + /* For direct tables, fake a valid value */ + if (!(VSD_INDIRECT & vsd)) { + return 1; + } + + /* Get the page size of the indirect table. */ + vsd_addr = vsd & VSD_ADDRESS_MASK; + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); + + if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE2_DEBUG + xive2_error(xive, "VST: invalid %s entry!?", info->name); +#endif + return 0; + } + + page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + + if (!pnv_xive2_vst_page_size_allowed(page_shift)) { + xive2_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return 0; + } + + return (1ull << page_shift) / info->size; +} + +void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xive); + uint8_t blk = pnv_xive2_block_id(xive); + uint8_t chip_id = xive->chip->chip_id; + uint32_t srcno0 = XIVE_EAS(blk, 0); + uint32_t nr_esbs = pnv_xive2_nr_esbs(xive); + Xive2Eas eas; + Xive2End end; + Xive2Nvp nvp; + int i; + uint64_t xive_nvp_per_subpage; + + monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, + srcno0 + nr_esbs - 1); + xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); + + monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, + srcno0 + nr_esbs - 1); + for (i = 0; i < nr_esbs; i++) { + if (xive2_router_get_eas(xrtr, blk, i, &eas)) { + break; + } + if (!xive2_eas_is_masked(&eas)) { + xive2_eas_pic_print_info(&eas, i, mon); + } + } + + monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); + i = 0; + while (!xive2_router_get_end(xrtr, blk, i, &end)) { + xive2_end_eas_pic_print_info(&end, i++, mon); + } + + monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); + i = 0; + while (!xive2_router_get_end(xrtr, blk, i, &end)) { + xive2_end_pic_print_info(&end, i++, mon); + } + + monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk, + 0, XIVE2_NVP_COUNT - 1); + xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); + for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) { + while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) { + xive2_nvp_pic_print_info(&nvp, i++, mon); + } + } +} diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h new file mode 100644 index 0000000000..0c096e4adb --- /dev/null +++ b/hw/intc/pnv_xive2_regs.h @@ -0,0 +1,442 @@ +/* + * QEMU PowerPC XIVE2 interrupt controller model (POWER10) + * + * Copyright (c) 2019-2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_PNV_XIVE2_REGS_H +#define PPC_PNV_XIVE2_REGS_H + +/* + * CQ Common Queue (PowerBus bridge) Registers + */ + +/* XIVE2 Capabilities */ +#define X_CQ_XIVE_CAP 0x02 +#define CQ_XIVE_CAP 0x010 +#define CQ_XIVE_CAP_VERSION PPC_BITMASK(0, 3) +/* 4:6 reserved */ +#define CQ_XIVE_CAP_USER_INT_PRIO PPC_BITMASK(8, 9) +#define CQ_XIVE_CAP_USER_INT_PRIO_1 0 +#define CQ_XIVE_CAP_USER_INT_PRIO_1_2 1 +#define CQ_XIVE_CAP_USER_INT_PRIO_1_4 2 +#define CQ_XIVE_CAP_USER_INT_PRIO_1_8 3 +#define CQ_XIVE_CAP_VP_INT_PRIO PPC_BITMASK(10, 11) +#define CQ_XIVE_CAP_VP_INT_PRIO_1_8 0 +#define CQ_XIVE_CAP_VP_INT_PRIO_2_8 1 +#define CQ_XIVE_CAP_VP_INT_PRIO_4_8 2 +#define CQ_XIVE_CAP_VP_INT_PRIO_8 3 +#define CQ_XIVE_CAP_BLOCK_ID_WIDTH PPC_BITMASK(12, 13) +#define CQ_XIVE_CAP_VP_SAVE_RESTORE PPC_BIT(38) + +#define CQ_XIVE_CAP_PHB_PQ_DISABLE PPC_BIT(56) +#define CQ_XIVE_CAP_PHB_ABT PPC_BIT(57) +#define CQ_XIVE_CAP_EXPLOITATION_MODE PPC_BIT(58) +#define CQ_XIVE_CAP_STORE_EOI PPC_BIT(59) + +/* XIVE2 Configuration */ +#define X_CQ_XIVE_CFG 0x03 +#define CQ_XIVE_CFG 0x018 + +/* 0:7 reserved */ +#define CQ_XIVE_CFG_USER_INT_PRIO PPC_BITMASK(8, 9) +#define CQ_XIVE_CFG_VP_INT_PRIO PPC_BITMASK(10, 11) +#define CQ_XIVE_CFG_INT_PRIO_1 0 +#define CQ_XIVE_CFG_INT_PRIO_2 1 +#define CQ_XIVE_CFG_INT_PRIO_4 2 +#define CQ_XIVE_CFG_INT_PRIO_8 3 +#define CQ_XIVE_CFG_BLOCK_ID_WIDTH PPC_BITMASK(12, 13) +#define CQ_XIVE_CFG_BLOCK_ID_4BITS 0 +#define CQ_XIVE_CFG_BLOCK_ID_5BITS 1 +#define CQ_XIVE_CFG_BLOCK_ID_6BITS 2 +#define CQ_XIVE_CFG_BLOCK_ID_7BITS 3 +#define CQ_XIVE_CFG_HYP_HARD_RANGE PPC_BITMASK(14, 15) +#define CQ_XIVE_CFG_THREADID_7BITS 0 +#define CQ_XIVE_CFG_THREADID_8BITS 1 +#define CQ_XIVE_CFG_THREADID_9BITS 2 +#define CQ_XIVE_CFG_THREADID_10BITs 3 +#define CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE PPC_BIT(16) +#define CQ_XIVE_CFG_HYP_HARD_BLOCK_ID PPC_BITMASK(17, 23) + +#define CQ_XIVE_CFG_GEN1_TIMA_OS PPC_BIT(24) +#define CQ_XIVE_CFG_GEN1_TIMA_HYP PPC_BIT(25) +#define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */ +#define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */ +#define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28) +#define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */ +#define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */ + +/* Interrupt Controller Base Address Register - 512 pages (32M) */ +#define X_CQ_IC_BAR 0x08 +#define CQ_IC_BAR 0x040 +#define CQ_IC_BAR_VALID PPC_BIT(0) +#define CQ_IC_BAR_64K PPC_BIT(1) +/* 2:7 reserved */ +#define CQ_IC_BAR_ADDR PPC_BITMASK(8, 42) +/* 43:63 reserved */ + +/* Thread Management Base Address Register - 4 pages */ +#define X_CQ_TM_BAR 0x09 +#define CQ_TM_BAR 0x048 +#define CQ_TM_BAR_VALID PPC_BIT(0) +#define CQ_TM_BAR_64K PPC_BIT(1) +#define CQ_TM_BAR_ADDR PPC_BITMASK(8, 49) + +/* ESB Base Address Register */ +#define X_CQ_ESB_BAR 0x0A +#define CQ_ESB_BAR 0x050 +#define CQ_BAR_VALID PPC_BIT(0) +#define CQ_BAR_64K PPC_BIT(1) +/* 2:7 reserved */ +#define CQ_BAR_ADDR PPC_BITMASK(8, 39) +#define CQ_BAR_SET_DIV PPC_BITMASK(56, 58) +#define CQ_BAR_RANGE PPC_BITMASK(59, 63) + /* 0 (16M) - 16 (16T) */ + +/* END Base Address Register */ +#define X_CQ_END_BAR 0x0B +#define CQ_END_BAR 0x058 + +/* NVPG Base Address Register */ +#define X_CQ_NVPG_BAR 0x0C +#define CQ_NVPG_BAR 0x060 + +/* NVC Base Address Register */ +#define X_CQ_NVC_BAR 0x0D +#define CQ_NVC_BAR 0x068 + +/* Table Address Register */ +#define X_CQ_TAR 0x0E +#define CQ_TAR 0x070 +#define CQ_TAR_AUTOINC PPC_BIT(0) +#define CQ_TAR_SELECT PPC_BITMASK(12, 15) +#define CQ_TAR_ESB 0 /* 0 - 15 */ +#define CQ_TAR_END 2 /* 0 - 15 */ +#define CQ_TAR_NVPG 3 /* 0 - 15 */ +#define CQ_TAR_NVC 5 /* 0 - 15 */ +#define CQ_TAR_ENTRY_SELECT PPC_BITMASK(28, 31) + +/* Table Data Register */ +#define X_CQ_TDR 0x0F +#define CQ_TDR 0x078 +/* for the NVPG, NVC, ESB, END Set Translation Tables */ +#define CQ_TDR_VALID PPC_BIT(0) +#define CQ_TDR_BLOCK_ID PPC_BITMASK(60, 63) + +/* + * Processor Cores Enabled for MsgSnd + * Identifies which of the 32 possible core chiplets are enabled and + * available to receive the MsgSnd command + */ +#define X_CQ_MSGSND 0x10 +#define CQ_MSGSND 0x080 + +/* Interrupt Unit Reset Control */ +#define X_CQ_RST_CTL 0x12 +#define CQ_RST_CTL 0x090 +#define CQ_RST_SYNC_RESET PPC_BIT(0) /* Write Only */ +#define CQ_RST_QUIESCE_PB PPC_BIT(1) /* RW */ +#define CQ_RST_MASTER_IDLE PPC_BIT(2) /* Read Only */ +#define CQ_RST_SAVE_IDLE PPC_BIT(3) /* Read Only */ +#define CQ_RST_PB_BAR_RESET PPC_BIT(4) /* Write Only */ + +/* PowerBus General Configuration */ +#define X_CQ_CFG_PB_GEN 0x14 +#define CQ_CFG_PB_GEN 0x0A0 +#define CQ_CFG_PB_GEN_PB_INIT PPC_BIT(45) + +/* + * FIR + * (And-Mask) + * (Or-Mask) + */ +#define X_CQ_FIR 0x30 +#define X_CQ_FIR_AND 0x31 +#define X_CQ_FIR_OR 0x32 +#define CQ_FIR 0x180 +#define CQ_FIR_AND 0x188 +#define CQ_FIR_OR 0x190 +#define CQ_FIR_PB_RCMDX_CI_ERR1 PPC_BIT(19) +#define CQ_FIR_VC_INFO_ERROR_0_2 PPC_BITMASK(61, 63) + +/* + * FIR Mask + * (And-Mask) + * (Or-Mask) + */ +#define X_CQ_FIRMASK 0x33 +#define X_CQ_FIRMASK_AND 0x34 +#define X_CQ_FIRMASK_OR 0x35 +#define CQ_FIRMASK 0x198 +#define CQ_FIRMASK_AND 0x1A0 +#define CQ_FIRMASK_OR 0x1A8 + +/* + * VC0 + */ + +/* VSD table address */ +#define X_VC_VSD_TABLE_ADDR 0x100 +#define VC_VSD_TABLE_ADDR 0x000 +#define VC_VSD_TABLE_AUTOINC PPC_BIT(0) +#define VC_VSD_TABLE_SELECT PPC_BITMASK(12, 15) +#define VC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31) + +/* VSD table data */ +#define X_VC_VSD_TABLE_DATA 0x101 +#define VC_VSD_TABLE_DATA 0x008 + +/* AIB AT macro indirect kill */ +#define X_VC_AT_MACRO_KILL 0x102 +#define VC_AT_MACRO_KILL 0x010 +#define VC_AT_MACRO_KILL_VALID PPC_BIT(0) +#define VC_AT_MACRO_KILL_VSD PPC_BITMASK(12, 15) +#define VC_AT_MACRO_KILL_BLOCK_ID PPC_BITMASK(28, 31) +#define VC_AT_MACRO_KILL_OFFSET PPC_BITMASK(48, 60) + +/* AIB AT macro indirect kill mask (same bit definitions) */ +#define X_VC_AT_MACRO_KILL_MASK 0x103 +#define VC_AT_MACRO_KILL_MASK 0x018 + +/* Remote IRQs and ERQs configuration [n] (n = 0:6) */ +#define X_VC_QUEUES_CFG_REM0 0x117 + +#define VC_QUEUES_CFG_REM0 0x0B8 +#define VC_QUEUES_CFG_REM1 0x0C0 +#define VC_QUEUES_CFG_REM2 0x0C8 +#define VC_QUEUES_CFG_REM3 0x0D0 +#define VC_QUEUES_CFG_REM4 0x0D8 +#define VC_QUEUES_CFG_REM5 0x0E0 +#define VC_QUEUES_CFG_REM6 0x0E8 +#define VC_QUEUES_CFG_MEMB_EN PPC_BIT(38) +#define VC_QUEUES_CFG_MEMB_SZ PPC_BITMASK(42, 47) + +/* + * VC1 + */ + +/* ESBC cache flush control trigger */ +#define X_VC_ESBC_FLUSH_CTRL 0x140 +#define VC_ESBC_FLUSH_CTRL 0x200 +#define VC_ESBC_FLUSH_CTRL_POLL_VALID PPC_BIT(0) +#define VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2) + +/* ESBC cache flush poll trigger */ +#define X_VC_ESBC_FLUSH_POLL 0x141 +#define VC_ESBC_FLUSH_POLL 0x208 +#define VC_ESBC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3) +#define VC_ESBC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */ +#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35) +#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */ + +/* EASC flush control register */ +#define X_VC_EASC_FLUSH_CTRL 0x160 +#define VC_EASC_FLUSH_CTRL 0x300 +#define VC_EASC_FLUSH_CTRL_POLL_VALID PPC_BIT(0) +#define VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2) + +/* EASC flush poll register */ +#define X_VC_EASC_FLUSH_POLL 0x161 +#define VC_EASC_FLUSH_POLL 0x308 +#define VC_EASC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3) +#define VC_EASC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */ +#define VC_EASC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35) +#define VC_EASC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */ + +/* + * VC2 + */ + +/* ENDC flush control register */ +#define X_VC_ENDC_FLUSH_CTRL 0x180 +#define VC_ENDC_FLUSH_CTRL 0x400 +#define VC_ENDC_FLUSH_CTRL_POLL_VALID PPC_BIT(0) +#define VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2) +#define VC_ENDC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3) +#define VC_ENDC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7) + +/* ENDC flush poll register */ +#define X_VC_ENDC_FLUSH_POLL 0x181 +#define VC_ENDC_FLUSH_POLL 0x408 +#define VC_ENDC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7) +#define VC_ENDC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */ +#define VC_ENDC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39) +#define VC_ENDC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */ + +/* ENDC Sync done */ +#define X_VC_ENDC_SYNC_DONE 0x184 +#define VC_ENDC_SYNC_DONE 0x420 +#define VC_ENDC_SYNC_POLL_DONE PPC_BITMASK(0, 6) +#define VC_ENDC_SYNC_QUEUE_IPI PPC_BIT(0) +#define VC_ENDC_SYNC_QUEUE_HWD PPC_BIT(1) +#define VC_ENDC_SYNC_QUEUE_NXC PPC_BIT(2) +#define VC_ENDC_SYNC_QUEUE_INT PPC_BIT(3) +#define VC_ENDC_SYNC_QUEUE_OS PPC_BIT(4) +#define VC_ENDC_SYNC_QUEUE_POOL PPC_BIT(5) +#define VC_ENDC_SYNC_QUEUE_HARD PPC_BIT(6) +#define VC_QUEUE_COUNT 7 + +/* ENDC cache watch specification 0 */ +#define X_VC_ENDC_WATCH0_SPEC 0x1A0 +#define VC_ENDC_WATCH0_SPEC 0x500 +#define VC_ENDC_WATCH_CONFLICT PPC_BIT(0) +#define VC_ENDC_WATCH_FULL PPC_BIT(8) +#define VC_ENDC_WATCH_BLOCK_ID PPC_BITMASK(28, 31) +#define VC_ENDC_WATCH_INDEX PPC_BITMASK(40, 63) + +/* ENDC cache watch data 0 */ +#define X_VC_ENDC_WATCH0_DATA0 0x1A4 +#define X_VC_ENDC_WATCH0_DATA1 0x1A5 +#define X_VC_ENDC_WATCH0_DATA2 0x1A6 +#define X_VC_ENDC_WATCH0_DATA3 0x1A7 + +#define VC_ENDC_WATCH0_DATA0 0x520 +#define VC_ENDC_WATCH0_DATA1 0x528 +#define VC_ENDC_WATCH0_DATA2 0x530 +#define VC_ENDC_WATCH0_DATA3 0x538 + +/* + * PC LSB1 + */ + +/* VSD table address register */ +#define X_PC_VSD_TABLE_ADDR 0x200 +#define PC_VSD_TABLE_ADDR 0x000 +#define PC_VSD_TABLE_AUTOINC PPC_BIT(0) +#define PC_VSD_TABLE_SELECT PPC_BITMASK(12, 15) +#define PC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31) + +/* VSD table data register */ +#define X_PC_VSD_TABLE_DATA 0x201 +#define PC_VSD_TABLE_DATA 0x008 + +/* AT indirect kill register */ +#define X_PC_AT_KILL 0x202 +#define PC_AT_KILL 0x010 +#define PC_AT_KILL_VALID PPC_BIT(0) +#define PC_AT_KILL_VSD_TYPE PPC_BITMASK(24, 27) +/* Only NVP, NVG, NVC */ +#define PC_AT_KILL_BLOCK_ID PPC_BITMASK(28, 31) +#define PC_AT_KILL_OFFSET PPC_BITMASK(48, 60) + +/* AT indirect kill mask register */ +#define X_PC_AT_KILL_MASK 0x203 +#define PC_AT_KILL_MASK 0x018 +#define PC_AT_KILL_MASK_VSD_TYPE PPC_BITMASK(24, 27) +#define PC_AT_KILL_MASK_BLOCK_ID PPC_BITMASK(28, 31) +#define PC_AT_KILL_MASK_OFFSET PPC_BITMASK(48, 60) + +/* + * PC LSB2 + */ + +/* NxC Cache flush control */ +#define X_PC_NXC_FLUSH_CTRL 0x280 +#define PC_NXC_FLUSH_CTRL 0x400 +#define PC_NXC_FLUSH_CTRL_POLL_VALID PPC_BIT(0) +#define PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2) +#define PC_NXC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3) +#define PC_NXC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7) + +/* NxC Cache flush poll */ +#define X_PC_NXC_FLUSH_POLL 0x281 +#define PC_NXC_FLUSH_POLL 0x408 +#define PC_NXC_FLUSH_POLL_NXC_TYPE PPC_BITMASK(2, 3) +#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVP 0 +#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVG 2 +#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVC 3 +#define PC_NXC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7) +#define PC_NXC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */ +#define PC_NXC_FLUSH_POLL_NXC_TYPE_MASK PPC_BITMASK(34, 35) /* 0: Ign */ +#define PC_NXC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39) +#define PC_NXC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */ + +/* NxC Cache Watch 0 Specification */ +#define X_PC_NXC_WATCH0_SPEC 0x2A0 +#define PC_NXC_WATCH0_SPEC 0x500 +#define PC_NXC_WATCH_CONFLICT PPC_BIT(0) +#define PC_NXC_WATCH_FULL PPC_BIT(8) +#define PC_NXC_WATCH_NXC_TYPE PPC_BITMASK(26, 27) +#define PC_NXC_WATCH_NXC_NVP 0 +#define PC_NXC_WATCH_NXC_NVG 2 +#define PC_NXC_WATCH_NXC_NVC 3 +#define PC_NXC_WATCH_BLOCK_ID PPC_BITMASK(28, 31) +#define PC_NXC_WATCH_INDEX PPC_BITMASK(40, 63) + +/* NxC Cache Watch 0 Data */ +#define X_PC_NXC_WATCH0_DATA0 0x2A4 +#define X_PC_NXC_WATCH0_DATA1 0x2A5 +#define X_PC_NXC_WATCH0_DATA2 0x2A6 +#define X_PC_NXC_WATCH0_DATA3 0x2A7 + +#define PC_NXC_WATCH0_DATA0 0x520 +#define PC_NXC_WATCH0_DATA1 0x528 +#define PC_NXC_WATCH0_DATA2 0x530 +#define PC_NXC_WATCH0_DATA3 0x538 + +/* + * TCTXT Registers + */ + +/* Physical Thread Enable0 register */ +#define X_TCTXT_EN0 0x300 +#define TCTXT_EN0 0x000 + +/* Physical Thread Enable0 Set register */ +#define X_TCTXT_EN0_SET 0x302 +#define TCTXT_EN0_SET 0x010 + +/* Physical Thread Enable0 Reset register */ +#define X_TCTXT_EN0_RESET 0x303 +#define TCTXT_EN0_RESET 0x018 + +/* Physical Thread Enable1 register */ +#define X_TCTXT_EN1 0x304 +#define TCTXT_EN1 0x020 + +/* Physical Thread Enable1 Set register */ +#define X_TCTXT_EN1_SET 0x306 +#define TCTXT_EN1_SET 0x030 + +/* Physical Thread Enable1 Reset register */ +#define X_TCTXT_EN1_RESET 0x307 +#define TCTXT_EN1_RESET 0x038 + +/* + * VSD Tables + */ +#define VST_ESB 0 +#define VST_EAS 1 /* No used by PC */ +#define VST_END 2 +#define VST_NVP 3 +#define VST_NVG 4 +#define VST_NVC 5 +#define VST_IC 6 /* No used by PC */ +#define VST_SYNC 7 +#define VST_ERQ 8 /* No used by PC */ + +/* + * Bits in a VSD entry. + * + * Note: the address is naturally aligned, we don't use a PPC_BITMASK, + * but just a mask to apply to the address before OR'ing it in. + * + * Note: VSD_FIRMWARE is a SW bit ! It hijacks an unused bit in the + * VSD and is only meant to be used in indirect mode ! + */ +#define VSD_MODE PPC_BITMASK(0, 1) +#define VSD_MODE_SHARED 1 +#define VSD_MODE_EXCLUSIVE 2 +#define VSD_MODE_FORWARD 3 +#define VSD_FIRMWARE PPC_BIT(2) /* Read warning */ +#define VSD_FIRMWARE2 PPC_BIT(3) /* unused */ +#define VSD_RESERVED PPC_BITMASK(4, 7) /* P10 reserved */ +#define VSD_ADDRESS_MASK 0x00fffffffffff000ull +#define VSD_MIGRATION_REG PPC_BITMASK(52, 55) +#define VSD_INDIRECT PPC_BIT(56) +#define VSD_TSIZE PPC_BITMASK(59, 63) + +#endif /* PPC_PNV_XIVE2_REGS_H */ diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c index 60013f2dde..dcf5de5d43 100644 --- a/hw/intc/ppc-uic.c +++ b/hw/intc/ppc-uic.c @@ -25,11 +25,8 @@ #include "qemu/osdep.h" #include "hw/intc/ppc-uic.h" #include "hw/irq.h" -#include "cpu.h" -#include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "qapi/error.h" enum { DCR_UICSR = 0x000, @@ -105,10 +102,9 @@ static void ppcuic_trigger_irq(PPCUIC *uic) static void ppcuic_set_irq(void *opaque, int irq_num, int level) { - PPCUIC *uic; + PPCUIC *uic = opaque; uint32_t mask, sr; - uic = opaque; mask = 1U << (31 - irq_num); LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32 " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n", @@ -144,10 +140,9 @@ static void ppcuic_set_irq(void *opaque, int irq_num, int level) static uint32_t dcr_read_uic(void *opaque, int dcrn) { - PPCUIC *uic; + PPCUIC *uic = opaque; uint32_t ret; - uic = opaque; dcrn -= uic->dcr_base; switch (dcrn) { case DCR_UICSR: @@ -192,9 +187,8 @@ static uint32_t dcr_read_uic(void *opaque, int dcrn) static void dcr_write_uic(void *opaque, int dcrn, uint32_t val) { - PPCUIC *uic; + PPCUIC *uic = opaque; - uic = opaque; dcrn -= uic->dcr_base; LOG_UIC("%s: dcr %d val 0x%x\n", __func__, dcrn, val); switch (dcrn) { @@ -251,19 +245,12 @@ static void ppc_uic_reset(DeviceState *dev) static void ppc_uic_realize(DeviceState *dev, Error **errp) { PPCUIC *uic = PPC_UIC(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - PowerPCCPU *cpu; int i; - if (!uic->cpu) { - /* This is a programming error in the code using this device */ - error_setg(errp, "ppc-uic 'cpu' link property was not set"); - return; - } - - cpu = POWERPC_CPU(uic->cpu); for (i = 0; i < DCR_UICMAX; i++) { - ppc_dcr_register(&cpu->env, uic->dcr_base + i, uic, + ppc4xx_dcr_register(dcr, uic->dcr_base + i, uic, &dcr_read_uic, &dcr_write_uic); } @@ -273,7 +260,6 @@ static void ppc_uic_realize(DeviceState *dev, Error **errp) } static Property ppc_uic_properties[] = { - DEFINE_PROP_LINK("cpu", PPCUIC, cpu, TYPE_CPU, CPUState *), DEFINE_PROP_UINT32("dcr-base", PPCUIC, dcr_base, 0xc0), DEFINE_PROP_BOOL("use-vectors", PPCUIC, use_vectors, true), DEFINE_PROP_END_OF_LIST() @@ -308,7 +294,7 @@ static void ppc_uic_class_init(ObjectClass *klass, void *data) static const TypeInfo ppc_uic_info = { .name = TYPE_PPC_UIC, - .parent = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_PPC4xx_DCR_DEVICE, .instance_size = sizeof(PPCUIC), .class_init = ppc_uic_class_init, }; diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c new file mode 100644 index 0000000000..eee04643cb --- /dev/null +++ b/hw/intc/riscv_aclint.c @@ -0,0 +1,565 @@ +/* + * RISC-V ACLINT (Advanced Core Local Interruptor) + * URL: https://github.com/riscv/riscv-aclint + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This provides real-time clock, timer and interprocessor interrupts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "target/riscv/cpu.h" +#include "hw/qdev-properties.h" +#include "hw/intc/riscv_aclint.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +typedef struct riscv_aclint_mtimer_callback { + RISCVAclintMTimerState *s; + int num; +} riscv_aclint_mtimer_callback; + +static uint64_t cpu_riscv_read_rtc_raw(uint32_t timebase_freq) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + timebase_freq, NANOSECONDS_PER_SECOND); +} + +static uint64_t cpu_riscv_read_rtc(void *opaque) +{ + RISCVAclintMTimerState *mtimer = opaque; + return cpu_riscv_read_rtc_raw(mtimer->timebase_freq) + mtimer->time_delta; +} + +/* + * Called when timecmp is written to update the QEMU timer or immediately + * trigger timer interrupt if mtimecmp <= current timer value. + */ +static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer, + RISCVCPU *cpu, + int hartid, + uint64_t value) +{ + uint32_t timebase_freq = mtimer->timebase_freq; + uint64_t next; + uint64_t diff; + + uint64_t rtc_r = cpu_riscv_read_rtc(mtimer); + + /* Compute the relative hartid w.r.t the socket */ + hartid = hartid - mtimer->hartid_base; + + mtimer->timecmp[hartid] = value; + if (mtimer->timecmp[hartid] <= rtc_r) { + /* + * If we're setting an MTIMECMP value in the "past", + * immediately raise the timer interrupt + */ + qemu_irq_raise(mtimer->timer_irqs[hartid]); + return; + } + + /* otherwise, set up the future timer interrupt */ + qemu_irq_lower(mtimer->timer_irqs[hartid]); + diff = mtimer->timecmp[hartid] - rtc_r; + /* back to ns (note args switched in muldiv64) */ + uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq); + + /* + * check if ns_diff overflowed and check if the addition would potentially + * overflow + */ + if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) || + ns_diff > INT64_MAX) { + next = INT64_MAX; + } else { + /* + * as it is very unlikely qemu_clock_get_ns will return a value + * greater than INT64_MAX, no additional check is needed for an + * unsigned integer overflow. + */ + next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff; + /* + * if ns_diff is INT64_MAX next may still be outside the range + * of a signed integer. + */ + next = MIN(next, INT64_MAX); + } + + timer_mod(mtimer->timers[hartid], next); +} + +/* + * Callback used when the timer set using timer_mod expires. + * Should raise the timer interrupt line + */ +static void riscv_aclint_mtimer_cb(void *opaque) +{ + riscv_aclint_mtimer_callback *state = opaque; + + qemu_irq_raise(state->s->timer_irqs[state->num]); +} + +/* CPU read MTIMER register */ +static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVAclintMTimerState *mtimer = opaque; + + if (addr >= mtimer->timecmp_base && + addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { + size_t hartid = mtimer->hartid_base + + ((addr - mtimer->timecmp_base) >> 3); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid hartid: %zu", hartid); + } else if ((addr & 0x7) == 0) { + /* timecmp_lo for RV32/RV64 or timecmp for RV64 */ + uint64_t timecmp = mtimer->timecmp[hartid]; + return (size == 4) ? (timecmp & 0xFFFFFFFF) : timecmp; + } else if ((addr & 0x7) == 4) { + /* timecmp_hi */ + uint64_t timecmp = mtimer->timecmp[hartid]; + return (timecmp >> 32) & 0xFFFFFFFF; + } else { + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid read: %08x", (uint32_t)addr); + return 0; + } + } else if (addr == mtimer->time_base) { + /* time_lo for RV32/RV64 or timecmp for RV64 */ + uint64_t rtc = cpu_riscv_read_rtc(mtimer); + return (size == 4) ? (rtc & 0xFFFFFFFF) : rtc; + } else if (addr == mtimer->time_base + 4) { + /* time_hi */ + return (cpu_riscv_read_rtc(mtimer) >> 32) & 0xFFFFFFFF; + } + + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid read: %08x", (uint32_t)addr); + return 0; +} + +/* CPU write MTIMER register */ +static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + RISCVAclintMTimerState *mtimer = opaque; + int i; + + if (addr >= mtimer->timecmp_base && + addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { + size_t hartid = mtimer->hartid_base + + ((addr - mtimer->timecmp_base) >> 3); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid hartid: %zu", hartid); + } else if ((addr & 0x7) == 0) { + if (size == 4) { + /* timecmp_lo for RV32/RV64 */ + uint64_t timecmp_hi = mtimer->timecmp[hartid] >> 32; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + timecmp_hi << 32 | (value & 0xFFFFFFFF)); + } else { + /* timecmp for RV64 */ + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + value); + } + } else if ((addr & 0x7) == 4) { + if (size == 4) { + /* timecmp_hi for RV32/RV64 */ + uint64_t timecmp_lo = mtimer->timecmp[hartid]; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + value << 32 | (timecmp_lo & 0xFFFFFFFF)); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid timecmp_hi write: %08x", + (uint32_t)addr); + } + } else { + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid timecmp write: %08x", + (uint32_t)addr); + } + return; + } else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) { + uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq); + + if (addr == mtimer->time_base) { + if (size == 4) { + /* time_lo for RV32/RV64 */ + mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r; + } else { + /* time for RV64 */ + mtimer->time_delta = value - rtc_r; + } + } else { + if (size == 4) { + /* time_hi for RV32/RV64 */ + mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid time_hi write: %08x", + (uint32_t)addr); + return; + } + } + + /* Check if timer interrupt is triggered for each hart. */ + for (i = 0; i < mtimer->num_harts; i++) { + CPUState *cpu = qemu_get_cpu(mtimer->hartid_base + i); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + continue; + } + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), + mtimer->hartid_base + i, + mtimer->timecmp[i]); + } + return; + } + + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid write: %08x", (uint32_t)addr); +} + +static const MemoryRegionOps riscv_aclint_mtimer_ops = { + .read = riscv_aclint_mtimer_read, + .write = riscv_aclint_mtimer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8 + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + } +}; + +static Property riscv_aclint_mtimer_properties[] = { + DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState, + hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1), + DEFINE_PROP_UINT32("timecmp-base", RISCVAclintMTimerState, + timecmp_base, RISCV_ACLINT_DEFAULT_MTIMECMP), + DEFINE_PROP_UINT32("time-base", RISCVAclintMTimerState, + time_base, RISCV_ACLINT_DEFAULT_MTIME), + DEFINE_PROP_UINT32("aperture-size", RISCVAclintMTimerState, + aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE), + DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState, + timebase_freq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) +{ + RISCVAclintMTimerState *s = RISCV_ACLINT_MTIMER(dev); + int i; + + memory_region_init_io(&s->mmio, OBJECT(dev), &riscv_aclint_mtimer_ops, + s, TYPE_RISCV_ACLINT_MTIMER, s->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + s->timer_irqs = g_new(qemu_irq, s->num_harts); + qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts); + + s->timers = g_new0(QEMUTimer *, s->num_harts); + s->timecmp = g_new0(uint64_t, s->num_harts); + /* Claim timer interrupt bits */ + for (i = 0; i < s->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) { + error_report("MTIP already claimed"); + exit(1); + } + } +} + +static void riscv_aclint_mtimer_reset_enter(Object *obj, ResetType type) +{ + /* + * According to RISC-V ACLINT spec: + * - On MTIMER device reset, the MTIME register is cleared to zero. + * - On MTIMER device reset, the MTIMECMP registers are in unknown state. + */ + RISCVAclintMTimerState *mtimer = RISCV_ACLINT_MTIMER(obj); + + /* + * Clear mtime register by writing to 0 it. + * Pending mtime interrupts will also be cleared at the same time. + */ + riscv_aclint_mtimer_write(mtimer, mtimer->time_base, 0, 8); +} + +static const VMStateDescription vmstate_riscv_mtimer = { + .name = "riscv_mtimer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT32(timecmp, RISCVAclintMTimerState, + num_harts, 0, + vmstate_info_uint64, uint64_t), + VMSTATE_END_OF_LIST() + } +}; + +static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = riscv_aclint_mtimer_realize; + device_class_set_props(dc, riscv_aclint_mtimer_properties); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.enter = riscv_aclint_mtimer_reset_enter; + dc->vmsd = &vmstate_riscv_mtimer; +} + +static const TypeInfo riscv_aclint_mtimer_info = { + .name = TYPE_RISCV_ACLINT_MTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAclintMTimerState), + .class_init = riscv_aclint_mtimer_class_init, +}; + +/* + * Create ACLINT MTIMER device. + */ +DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, + uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq, + bool provide_rdtime) +{ + int i; + DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_MTIMER); + RISCVAclintMTimerState *s = RISCV_ACLINT_MTIMER(dev); + + assert(num_harts <= RISCV_ACLINT_MAX_HARTS); + assert(!(addr & 0x7)); + assert(!(timecmp_base & 0x7)); + assert(!(time_base & 0x7)); + + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base); + qdev_prop_set_uint32(dev, "time-base", time_base); + qdev_prop_set_uint32(dev, "aperture-size", size); + qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + RISCVCPU *rvcpu = RISCV_CPU(cpu); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + riscv_aclint_mtimer_callback *cb = + g_new0(riscv_aclint_mtimer_callback, 1); + + if (!env) { + g_free(cb); + continue; + } + if (provide_rdtime) { + riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, dev); + } + + cb->s = s; + cb->num = i; + s->timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &riscv_aclint_mtimer_cb, cb); + s->timecmp[i] = 0; + + qdev_connect_gpio_out(dev, i, + qdev_get_gpio_in(DEVICE(rvcpu), IRQ_M_TIMER)); + } + + return dev; +} + +/* CPU read [M|S]SWI register */ +static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVAclintSwiState *swi = opaque; + + if (addr < (swi->num_harts << 2)) { + size_t hartid = swi->hartid_base + (addr >> 2); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-swi: invalid hartid: %zu", hartid); + } else if ((addr & 0x3) == 0) { + return (swi->sswi) ? 0 : ((env->mip & MIP_MSIP) > 0); + } + } + + qemu_log_mask(LOG_UNIMP, + "aclint-swi: invalid read: %08x", (uint32_t)addr); + return 0; +} + +/* CPU write [M|S]SWI register */ +static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + RISCVAclintSwiState *swi = opaque; + + if (addr < (swi->num_harts << 2)) { + size_t hartid = swi->hartid_base + (addr >> 2); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-swi: invalid hartid: %zu", hartid); + } else if ((addr & 0x3) == 0) { + if (value & 0x1) { + qemu_irq_raise(swi->soft_irqs[hartid - swi->hartid_base]); + } else { + if (!swi->sswi) { + qemu_irq_lower(swi->soft_irqs[hartid - swi->hartid_base]); + } + } + return; + } + } + + qemu_log_mask(LOG_UNIMP, + "aclint-swi: invalid write: %08x", (uint32_t)addr); +} + +static const MemoryRegionOps riscv_aclint_swi_ops = { + .read = riscv_aclint_swi_read, + .write = riscv_aclint_swi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static Property riscv_aclint_swi_properties[] = { + DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1), + DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp) +{ + RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(dev); + int i; + + memory_region_init_io(&swi->mmio, OBJECT(dev), &riscv_aclint_swi_ops, swi, + TYPE_RISCV_ACLINT_SWI, RISCV_ACLINT_SWI_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &swi->mmio); + + swi->soft_irqs = g_new(qemu_irq, swi->num_harts); + qdev_init_gpio_out(dev, swi->soft_irqs, swi->num_harts); + + /* Claim software interrupt bits */ + for (i = 0; i < swi->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(swi->hartid_base + i)); + /* We don't claim mip.SSIP because it is writable by software */ + if (riscv_cpu_claim_interrupts(cpu, swi->sswi ? 0 : MIP_MSIP) < 0) { + error_report("MSIP already claimed"); + exit(1); + } + } +} + +static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type) +{ + /* + * According to RISC-V ACLINT spec: + * - On MSWI device reset, each MSIP register is cleared to zero. + * + * p.s. SSWI device reset does nothing since SETSIP register always reads 0. + */ + RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(obj); + int i; + + if (!swi->sswi) { + for (i = 0; i < swi->num_harts; i++) { + /* Clear MSIP registers by lowering software interrupts. */ + qemu_irq_lower(swi->soft_irqs[i]); + } + } +} + +static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = riscv_aclint_swi_realize; + device_class_set_props(dc, riscv_aclint_swi_properties); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.enter = riscv_aclint_swi_reset_enter; +} + +static const TypeInfo riscv_aclint_swi_info = { + .name = TYPE_RISCV_ACLINT_SWI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAclintSwiState), + .class_init = riscv_aclint_swi_class_init, +}; + +/* + * Create ACLINT [M|S]SWI device. + */ +DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base, + uint32_t num_harts, bool sswi) +{ + int i; + DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_SWI); + + assert(num_harts <= RISCV_ACLINT_MAX_HARTS); + assert(!(addr & 0x3)); + + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "sswi", sswi ? true : false); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + RISCVCPU *rvcpu = RISCV_CPU(cpu); + + qdev_connect_gpio_out(dev, i, + qdev_get_gpio_in(DEVICE(rvcpu), + (sswi) ? IRQ_S_SOFT : IRQ_M_SOFT)); + } + + return dev; +} + +static void riscv_aclint_register_types(void) +{ + type_register_static(&riscv_aclint_mtimer_info); + type_register_static(&riscv_aclint_swi_info); +} + +type_init(riscv_aclint_register_types) diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c new file mode 100644 index 0000000000..cfd007e629 --- /dev/null +++ b/hw/intc/riscv_aplic.c @@ -0,0 +1,978 @@ +/* + * RISC-V APLIC (Advanced Platform Level Interrupt Controller) + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qemu/bswap.h" +#include "exec/address-spaces.h" +#include "hw/sysbus.h" +#include "hw/pci/msi.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/intc/riscv_aplic.h" +#include "hw/irq.h" +#include "target/riscv/cpu.h" +#include "sysemu/sysemu.h" +#include "migration/vmstate.h" + +#define APLIC_MAX_IDC (1UL << 14) +#define APLIC_MAX_SOURCE 1024 +#define APLIC_MIN_IPRIO_BITS 1 +#define APLIC_MAX_IPRIO_BITS 8 +#define APLIC_MAX_CHILDREN 1024 + +#define APLIC_DOMAINCFG 0x0000 +#define APLIC_DOMAINCFG_RDONLY 0x80000000 +#define APLIC_DOMAINCFG_IE (1 << 8) +#define APLIC_DOMAINCFG_DM (1 << 2) +#define APLIC_DOMAINCFG_BE (1 << 0) + +#define APLIC_SOURCECFG_BASE 0x0004 +#define APLIC_SOURCECFG_D (1 << 10) +#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +#define APLIC_SOURCECFG_SM_MASK 0x00000007 +#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +#define APLIC_SOURCECFG_SM_DETACH 0x1 +#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 + +#define APLIC_MMSICFGADDR 0x1bc0 +#define APLIC_MMSICFGADDRH 0x1bc4 +#define APLIC_SMSICFGADDR 0x1bc8 +#define APLIC_SMSICFGADDRH 0x1bcc + +#define APLIC_xMSICFGADDRH_L (1UL << 31) +#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f +#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24 +#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7 +#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20 +#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7 +#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16 +#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf +#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12 +#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff + +#define APLIC_xMSICFGADDR_PPN_SHIFT 12 + +#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \ + ((1UL << (__lhxs)) - 1) + +#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \ + ((1UL << (__lhxw)) - 1) +#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \ + ((__lhxs)) +#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \ + (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \ + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs)) + +#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \ + ((1UL << (__hhxw)) - 1) +#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \ + ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT) +#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \ + (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \ + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs)) + +#define APLIC_xMSICFGADDRH_VALID_MASK \ + (APLIC_xMSICFGADDRH_L | \ + (APLIC_xMSICFGADDRH_HHXS_MASK << APLIC_xMSICFGADDRH_HHXS_SHIFT) | \ + (APLIC_xMSICFGADDRH_LHXS_MASK << APLIC_xMSICFGADDRH_LHXS_SHIFT) | \ + (APLIC_xMSICFGADDRH_HHXW_MASK << APLIC_xMSICFGADDRH_HHXW_SHIFT) | \ + (APLIC_xMSICFGADDRH_LHXW_MASK << APLIC_xMSICFGADDRH_LHXW_SHIFT) | \ + APLIC_xMSICFGADDRH_BAPPN_MASK) + +#define APLIC_SETIP_BASE 0x1c00 +#define APLIC_SETIPNUM 0x1cdc + +#define APLIC_CLRIP_BASE 0x1d00 +#define APLIC_CLRIPNUM 0x1ddc + +#define APLIC_SETIE_BASE 0x1e00 +#define APLIC_SETIENUM 0x1edc + +#define APLIC_CLRIE_BASE 0x1f00 +#define APLIC_CLRIENUM 0x1fdc + +#define APLIC_SETIPNUM_LE 0x2000 +#define APLIC_SETIPNUM_BE 0x2004 + +#define APLIC_ISTATE_PENDING (1U << 0) +#define APLIC_ISTATE_ENABLED (1U << 1) +#define APLIC_ISTATE_ENPEND (APLIC_ISTATE_ENABLED | \ + APLIC_ISTATE_PENDING) +#define APLIC_ISTATE_INPUT (1U << 8) + +#define APLIC_GENMSI 0x3000 + +#define APLIC_TARGET_BASE 0x3004 +#define APLIC_TARGET_HART_IDX_SHIFT 18 +#define APLIC_TARGET_HART_IDX_MASK 0x3fff +#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +#define APLIC_TARGET_IPRIO_MASK 0xff +#define APLIC_TARGET_EIID_MASK 0x7ff + +#define APLIC_IDC_BASE 0x4000 +#define APLIC_IDC_SIZE 32 + +#define APLIC_IDC_IDELIVERY 0x00 + +#define APLIC_IDC_IFORCE 0x04 + +#define APLIC_IDC_ITHRESHOLD 0x08 + +#define APLIC_IDC_TOPI 0x18 +#define APLIC_IDC_TOPI_ID_SHIFT 16 +#define APLIC_IDC_TOPI_ID_MASK 0x3ff +#define APLIC_IDC_TOPI_PRIO_MASK 0xff + +#define APLIC_IDC_CLAIMI 0x1c + +static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic, + uint32_t word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0) << i; + } + + return ret; +} + +static uint32_t riscv_aplic_read_pending_word(RISCVAPLICState *aplic, + uint32_t word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_PENDING) ? 1 : 0) << i; + } + + return ret; +} + +static void riscv_aplic_set_pending_raw(RISCVAPLICState *aplic, + uint32_t irq, bool pending) +{ + if (pending) { + aplic->state[irq] |= APLIC_ISTATE_PENDING; + } else { + aplic->state[irq] &= ~APLIC_ISTATE_PENDING; + } +} + +static void riscv_aplic_set_pending(RISCVAPLICState *aplic, + uint32_t irq, bool pending) +{ + uint32_t sourcecfg, sm; + + if ((irq <= 0) || (aplic->num_irqs <= irq)) { + return; + } + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + return; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if ((sm == APLIC_SOURCECFG_SM_INACTIVE) || + ((!aplic->msimode || (aplic->msimode && !pending)) && + ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || + (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))) { + return; + } + + riscv_aplic_set_pending_raw(aplic, irq, pending); +} + +static void riscv_aplic_set_pending_word(RISCVAPLICState *aplic, + uint32_t word, uint32_t value, + bool pending) +{ + uint32_t i, irq; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + if (value & (1U << i)) { + riscv_aplic_set_pending(aplic, irq, pending); + } + } +} + +static uint32_t riscv_aplic_read_enabled_word(RISCVAPLICState *aplic, + int word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_ENABLED) ? 1 : 0) << i; + } + + return ret; +} + +static void riscv_aplic_set_enabled_raw(RISCVAPLICState *aplic, + uint32_t irq, bool enabled) +{ + if (enabled) { + aplic->state[irq] |= APLIC_ISTATE_ENABLED; + } else { + aplic->state[irq] &= ~APLIC_ISTATE_ENABLED; + } +} + +static void riscv_aplic_set_enabled(RISCVAPLICState *aplic, + uint32_t irq, bool enabled) +{ + uint32_t sourcecfg, sm; + + if ((irq <= 0) || (aplic->num_irqs <= irq)) { + return; + } + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + return; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return; + } + + riscv_aplic_set_enabled_raw(aplic, irq, enabled); +} + +static void riscv_aplic_set_enabled_word(RISCVAPLICState *aplic, + uint32_t word, uint32_t value, + bool enabled) +{ + uint32_t i, irq; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + if (value & (1U << i)) { + riscv_aplic_set_enabled(aplic, irq, enabled); + } + } +} + +static void riscv_aplic_msi_send(RISCVAPLICState *aplic, + uint32_t hart_idx, uint32_t guest_idx, + uint32_t eiid) +{ + uint64_t addr; + MemTxResult result; + RISCVAPLICState *aplic_m; + uint32_t lhxs, lhxw, hhxs, hhxw, group_idx, msicfgaddr, msicfgaddrH; + + aplic_m = aplic; + while (aplic_m && !aplic_m->mmode) { + aplic_m = aplic_m->parent; + } + if (!aplic_m) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n", + __func__); + return; + } + + if (aplic->mmode) { + msicfgaddr = aplic_m->mmsicfgaddr; + msicfgaddrH = aplic_m->mmsicfgaddrH; + } else { + msicfgaddr = aplic_m->smsicfgaddr; + msicfgaddrH = aplic_m->smsicfgaddrH; + } + + lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) & + APLIC_xMSICFGADDRH_LHXS_MASK; + lhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXW_SHIFT) & + APLIC_xMSICFGADDRH_LHXW_MASK; + hhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXS_SHIFT) & + APLIC_xMSICFGADDRH_HHXS_MASK; + hhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXW_SHIFT) & + APLIC_xMSICFGADDRH_HHXW_MASK; + + group_idx = hart_idx >> lhxw; + hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw); + + addr = msicfgaddr; + addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32; + addr |= ((uint64_t)(group_idx & APLIC_xMSICFGADDR_PPN_HHX_MASK(hhxw))) << + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(hhxs); + addr |= ((uint64_t)(hart_idx & APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw))) << + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(lhxs); + addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs)); + addr <<= APLIC_xMSICFGADDR_PPN_SHIFT; + + address_space_stl_le(&address_space_memory, addr, + eiid, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: MSI write failed for " + "hart_index=%d guest_index=%d eiid=%d\n", + __func__, hart_idx, guest_idx, eiid); + } +} + +static void riscv_aplic_msi_irq_update(RISCVAPLICState *aplic, uint32_t irq) +{ + uint32_t hart_idx, guest_idx, eiid; + + if (!aplic->msimode || (aplic->num_irqs <= irq) || + !(aplic->domaincfg & APLIC_DOMAINCFG_IE)) { + return; + } + + if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) != APLIC_ISTATE_ENPEND) { + return; + } + + riscv_aplic_set_pending_raw(aplic, irq, false); + + hart_idx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + hart_idx &= APLIC_TARGET_HART_IDX_MASK; + if (aplic->mmode) { + /* M-level APLIC ignores guest_index */ + guest_idx = 0; + } else { + guest_idx = aplic->target[irq] >> APLIC_TARGET_GUEST_IDX_SHIFT; + guest_idx &= APLIC_TARGET_GUEST_IDX_MASK; + } + eiid = aplic->target[irq] & APLIC_TARGET_EIID_MASK; + riscv_aplic_msi_send(aplic, hart_idx, guest_idx, eiid); +} + +static uint32_t riscv_aplic_idc_topi(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t best_irq, best_iprio; + uint32_t irq, iprio, ihartidx, ithres; + + if (aplic->num_harts <= idc) { + return 0; + } + + ithres = aplic->ithreshold[idc]; + best_irq = best_iprio = UINT32_MAX; + for (irq = 1; irq < aplic->num_irqs; irq++) { + if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) != + APLIC_ISTATE_ENPEND) { + continue; + } + + ihartidx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + ihartidx &= APLIC_TARGET_HART_IDX_MASK; + if (ihartidx != idc) { + continue; + } + + iprio = aplic->target[irq] & aplic->iprio_mask; + if (ithres && iprio >= ithres) { + continue; + } + + if (iprio < best_iprio) { + best_irq = irq; + best_iprio = iprio; + } + } + + if (best_irq < aplic->num_irqs && best_iprio <= aplic->iprio_mask) { + return (best_irq << APLIC_IDC_TOPI_ID_SHIFT) | best_iprio; + } + + return 0; +} + +static void riscv_aplic_idc_update(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t topi; + + if (aplic->msimode || aplic->num_harts <= idc) { + return; + } + + topi = riscv_aplic_idc_topi(aplic, idc); + if ((aplic->domaincfg & APLIC_DOMAINCFG_IE) && + aplic->idelivery[idc] && + (aplic->iforce[idc] || topi)) { + qemu_irq_raise(aplic->external_irqs[idc]); + } else { + qemu_irq_lower(aplic->external_irqs[idc]); + } +} + +static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t irq, state, sm, topi = riscv_aplic_idc_topi(aplic, idc); + + if (!topi) { + aplic->iforce[idc] = 0; + return 0; + } + + irq = (topi >> APLIC_IDC_TOPI_ID_SHIFT) & APLIC_IDC_TOPI_ID_MASK; + sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK; + state = aplic->state[irq]; + riscv_aplic_set_pending_raw(aplic, irq, false); + if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) && + (state & APLIC_ISTATE_INPUT)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + } else if ((sm == APLIC_SOURCECFG_SM_LEVEL_LOW) && + !(state & APLIC_ISTATE_INPUT)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + } + riscv_aplic_idc_update(aplic, idc); + + return topi; +} + +static void riscv_aplic_request(void *opaque, int irq, int level) +{ + bool update = false; + RISCVAPLICState *aplic = opaque; + uint32_t sourcecfg, childidx, state, idc; + + assert((0 < irq) && (irq < aplic->num_irqs)); + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + childidx = sourcecfg & APLIC_SOURCECFG_CHILDIDX_MASK; + if (childidx < aplic->num_children) { + riscv_aplic_request(aplic->children[childidx], irq, level); + } + return; + } + + state = aplic->state[irq]; + switch (sourcecfg & APLIC_SOURCECFG_SM_MASK) { + case APLIC_SOURCECFG_SM_EDGE_RISE: + if ((level > 0) && !(state & APLIC_ISTATE_INPUT) && + !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_EDGE_FALL: + if ((level <= 0) && (state & APLIC_ISTATE_INPUT) && + !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_LEVEL_HIGH: + if ((level > 0) && !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_LEVEL_LOW: + if ((level <= 0) && !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + default: + break; + } + + if (level <= 0) { + aplic->state[irq] &= ~APLIC_ISTATE_INPUT; + } else { + aplic->state[irq] |= APLIC_ISTATE_INPUT; + } + + if (update) { + if (aplic->msimode) { + riscv_aplic_msi_irq_update(aplic, irq); + } else { + idc = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + idc &= APLIC_TARGET_HART_IDX_MASK; + riscv_aplic_idc_update(aplic, idc); + } + } +} + +static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) +{ + uint32_t irq, word, idc; + RISCVAPLICState *aplic = opaque; + + /* Reads must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr == APLIC_DOMAINCFG) { + return APLIC_DOMAINCFG_RDONLY | aplic->domaincfg | + (aplic->msimode ? APLIC_DOMAINCFG_DM : 0); + } else if ((APLIC_SOURCECFG_BASE <= addr) && + (addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1; + return aplic->sourcecfg[irq]; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDR)) { + return aplic->mmsicfgaddr; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDRH)) { + return aplic->mmsicfgaddrH; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDR)) { + /* + * Registers SMSICFGADDR and SMSICFGADDRH are implemented only if: + * (a) the interrupt domain is at machine level + * (b) the domain's harts implement supervisor mode + * (c) the domain has one or more child supervisor-level domains + * that support MSI delivery mode (domaincfg.DM is not read- + * only zero in at least one of the supervisor-level child + * domains). + */ + return (aplic->num_children) ? aplic->smsicfgaddr : 0; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDRH)) { + return (aplic->num_children) ? aplic->smsicfgaddrH : 0; + } else if ((APLIC_SETIP_BASE <= addr) && + (addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIP_BASE) >> 2; + return riscv_aplic_read_pending_word(aplic, word); + } else if (addr == APLIC_SETIPNUM) { + return 0; + } else if ((APLIC_CLRIP_BASE <= addr) && + (addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIP_BASE) >> 2; + return riscv_aplic_read_input_word(aplic, word); + } else if (addr == APLIC_CLRIPNUM) { + return 0; + } else if ((APLIC_SETIE_BASE <= addr) && + (addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIE_BASE) >> 2; + return riscv_aplic_read_enabled_word(aplic, word); + } else if (addr == APLIC_SETIENUM) { + return 0; + } else if ((APLIC_CLRIE_BASE <= addr) && + (addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) { + return 0; + } else if (addr == APLIC_CLRIENUM) { + return 0; + } else if (addr == APLIC_SETIPNUM_LE) { + return 0; + } else if (addr == APLIC_SETIPNUM_BE) { + return 0; + } else if (addr == APLIC_GENMSI) { + return (aplic->msimode) ? aplic->genmsi : 0; + } else if ((APLIC_TARGET_BASE <= addr) && + (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + return aplic->target[irq]; + } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && + (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { + idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE; + switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) { + case APLIC_IDC_IDELIVERY: + return aplic->idelivery[idc]; + case APLIC_IDC_IFORCE: + return aplic->iforce[idc]; + case APLIC_IDC_ITHRESHOLD: + return aplic->ithreshold[idc]; + case APLIC_IDC_TOPI: + return riscv_aplic_idc_topi(aplic, idc); + case APLIC_IDC_CLAIMI: + return riscv_aplic_idc_claimi(aplic, idc); + default: + goto err; + }; + } + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register read 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; +} + +static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + RISCVAPLICState *aplic = opaque; + uint32_t irq, word, idc = UINT32_MAX; + + /* Writes must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr == APLIC_DOMAINCFG) { + /* Only IE bit writable at the moment */ + value &= APLIC_DOMAINCFG_IE; + aplic->domaincfg = value; + } else if ((APLIC_SOURCECFG_BASE <= addr) && + (addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1; + if (!aplic->num_children && (value & APLIC_SOURCECFG_D)) { + value = 0; + } + if (value & APLIC_SOURCECFG_D) { + value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_CHILDIDX_MASK); + } else { + value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_SM_MASK); + } + aplic->sourcecfg[irq] = value; + if ((aplic->sourcecfg[irq] & APLIC_SOURCECFG_D) || + (aplic->sourcecfg[irq] == 0)) { + riscv_aplic_set_pending_raw(aplic, irq, false); + riscv_aplic_set_enabled_raw(aplic, irq, false); + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDR)) { + if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->mmsicfgaddr = value; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDRH)) { + if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->mmsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDR)) { + /* + * Registers SMSICFGADDR and SMSICFGADDRH are implemented only if: + * (a) the interrupt domain is at machine level + * (b) the domain's harts implement supervisor mode + * (c) the domain has one or more child supervisor-level domains + * that support MSI delivery mode (domaincfg.DM is not read- + * only zero in at least one of the supervisor-level child + * domains). + */ + if (aplic->num_children && + !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->smsicfgaddr = value; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDRH)) { + if (aplic->num_children && + !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->smsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK; + } + } else if ((APLIC_SETIP_BASE <= addr) && + (addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIP_BASE) >> 2; + riscv_aplic_set_pending_word(aplic, word, value, true); + } else if (addr == APLIC_SETIPNUM) { + riscv_aplic_set_pending(aplic, value, true); + } else if ((APLIC_CLRIP_BASE <= addr) && + (addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIP_BASE) >> 2; + riscv_aplic_set_pending_word(aplic, word, value, false); + } else if (addr == APLIC_CLRIPNUM) { + riscv_aplic_set_pending(aplic, value, false); + } else if ((APLIC_SETIE_BASE <= addr) && + (addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIE_BASE) >> 2; + riscv_aplic_set_enabled_word(aplic, word, value, true); + } else if (addr == APLIC_SETIENUM) { + riscv_aplic_set_enabled(aplic, value, true); + } else if ((APLIC_CLRIE_BASE <= addr) && + (addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIE_BASE) >> 2; + riscv_aplic_set_enabled_word(aplic, word, value, false); + } else if (addr == APLIC_CLRIENUM) { + riscv_aplic_set_enabled(aplic, value, false); + } else if (addr == APLIC_SETIPNUM_LE) { + riscv_aplic_set_pending(aplic, value, true); + } else if (addr == APLIC_SETIPNUM_BE) { + riscv_aplic_set_pending(aplic, bswap32(value), true); + } else if (addr == APLIC_GENMSI) { + if (aplic->msimode) { + aplic->genmsi = value & ~(APLIC_TARGET_GUEST_IDX_MASK << + APLIC_TARGET_GUEST_IDX_SHIFT); + riscv_aplic_msi_send(aplic, + value >> APLIC_TARGET_HART_IDX_SHIFT, + 0, + value & APLIC_TARGET_EIID_MASK); + } + } else if ((APLIC_TARGET_BASE <= addr) && + (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + if (aplic->msimode) { + aplic->target[irq] = value; + } else { + aplic->target[irq] = (value & ~APLIC_TARGET_IPRIO_MASK) | + ((value & aplic->iprio_mask) ? + (value & aplic->iprio_mask) : 1); + } + } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && + (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { + idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE; + switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) { + case APLIC_IDC_IDELIVERY: + aplic->idelivery[idc] = value & 0x1; + break; + case APLIC_IDC_IFORCE: + aplic->iforce[idc] = value & 0x1; + break; + case APLIC_IDC_ITHRESHOLD: + aplic->ithreshold[idc] = value & aplic->iprio_mask; + break; + default: + goto err; + }; + } else { + goto err; + } + + if (aplic->msimode) { + for (irq = 1; irq < aplic->num_irqs; irq++) { + riscv_aplic_msi_irq_update(aplic, irq); + } + } else { + if (idc == UINT32_MAX) { + for (idc = 0; idc < aplic->num_harts; idc++) { + riscv_aplic_idc_update(aplic, idc); + } + } else { + riscv_aplic_idc_update(aplic, idc); + } + } + + return; + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); +} + +static const MemoryRegionOps riscv_aplic_ops = { + .read = riscv_aplic_read, + .write = riscv_aplic_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void riscv_aplic_realize(DeviceState *dev, Error **errp) +{ + uint32_t i; + RISCVAPLICState *aplic = RISCV_APLIC(dev); + + aplic->bitfield_words = (aplic->num_irqs + 31) >> 5; + aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs); + aplic->state = g_new(uint32_t, aplic->num_irqs); + aplic->target = g_new0(uint32_t, aplic->num_irqs); + if (!aplic->msimode) { + for (i = 0; i < aplic->num_irqs; i++) { + aplic->target[i] = 1; + } + } + aplic->idelivery = g_new0(uint32_t, aplic->num_harts); + aplic->iforce = g_new0(uint32_t, aplic->num_harts); + aplic->ithreshold = g_new0(uint32_t, aplic->num_harts); + + memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic, + TYPE_RISCV_APLIC, aplic->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio); + + /* + * Only root APLICs have hardware IRQ lines. All non-root APLICs + * have IRQ lines delegated by their parent APLIC. + */ + if (!aplic->parent) { + qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs); + } + + /* Create output IRQ lines for non-MSI mode */ + if (!aplic->msimode) { + aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts); + qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts); + + /* Claim the CPU interrupt to be triggered by this APLIC */ + for (i = 0; i < aplic->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(aplic->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, + (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { + error_report("%s already claimed", + (aplic->mmode) ? "MEIP" : "SEIP"); + exit(1); + } + } + } + + msi_nonbroken = true; +} + +static Property riscv_aplic_properties[] = { + DEFINE_PROP_UINT32("aperture-size", RISCVAPLICState, aperture_size, 0), + DEFINE_PROP_UINT32("hartid-base", RISCVAPLICState, hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAPLICState, num_harts, 0), + DEFINE_PROP_UINT32("iprio-mask", RISCVAPLICState, iprio_mask, 0), + DEFINE_PROP_UINT32("num-irqs", RISCVAPLICState, num_irqs, 0), + DEFINE_PROP_BOOL("msimode", RISCVAPLICState, msimode, 0), + DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_riscv_aplic = { + .name = "riscv_aplic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(domaincfg, RISCVAPLICState), + VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), + VMSTATE_UINT32(mmsicfgaddrH, RISCVAPLICState), + VMSTATE_UINT32(smsicfgaddr, RISCVAPLICState), + VMSTATE_UINT32(smsicfgaddrH, RISCVAPLICState), + VMSTATE_UINT32(genmsi, RISCVAPLICState), + VMSTATE_VARRAY_UINT32(sourcecfg, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(state, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(target, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(idelivery, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(iforce, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(ithreshold, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static void riscv_aplic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, riscv_aplic_properties); + dc->realize = riscv_aplic_realize; + dc->vmsd = &vmstate_riscv_aplic; +} + +static const TypeInfo riscv_aplic_info = { + .name = TYPE_RISCV_APLIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAPLICState), + .class_init = riscv_aplic_class_init, +}; + +static void riscv_aplic_register_types(void) +{ + type_register_static(&riscv_aplic_info); +} + +type_init(riscv_aplic_register_types) + +/* + * Add a APLIC device to another APLIC device as child for + * interrupt delegation. + */ +void riscv_aplic_add_child(DeviceState *parent, DeviceState *child) +{ + RISCVAPLICState *caplic, *paplic; + + assert(parent && child); + caplic = RISCV_APLIC(child); + paplic = RISCV_APLIC(parent); + + assert(paplic->num_irqs == caplic->num_irqs); + assert(paplic->num_children <= QEMU_APLIC_MAX_CHILDREN); + + caplic->parent = paplic; + paplic->children[paplic->num_children] = caplic; + paplic->num_children++; +} + +/* + * Create APLIC device. + */ +DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources, + uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent) +{ + DeviceState *dev = qdev_new(TYPE_RISCV_APLIC); + uint32_t i; + + assert(num_harts < APLIC_MAX_IDC); + assert((APLIC_IDC_BASE + (num_harts * APLIC_IDC_SIZE)) <= size); + assert(num_sources < APLIC_MAX_SOURCE); + assert(APLIC_MIN_IPRIO_BITS <= iprio_bits); + assert(iprio_bits <= APLIC_MAX_IPRIO_BITS); + + qdev_prop_set_uint32(dev, "aperture-size", size); + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "iprio-mask", ((1U << iprio_bits) - 1)); + qdev_prop_set_uint32(dev, "num-irqs", num_sources + 1); + qdev_prop_set_bit(dev, "msimode", msimode); + qdev_prop_set_bit(dev, "mmode", mmode); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + if (parent) { + riscv_aplic_add_child(parent, dev); + } + + if (!msimode) { + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), + (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); + } + } + + return dev; +} diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c new file mode 100644 index 0000000000..4d4d5b50ca --- /dev/null +++ b/hw/intc/riscv_imsic.c @@ -0,0 +1,450 @@ +/* + * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller) + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qemu/bswap.h" +#include "exec/address-spaces.h" +#include "hw/sysbus.h" +#include "hw/pci/msi.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/intc/riscv_imsic.h" +#include "hw/irq.h" +#include "target/riscv/cpu.h" +#include "target/riscv/cpu_bits.h" +#include "sysemu/sysemu.h" +#include "migration/vmstate.h" + +#define IMSIC_MMIO_PAGE_LE 0x00 +#define IMSIC_MMIO_PAGE_BE 0x04 + +#define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1) +#define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK) + +#define IMSIC_EISTATE_PENDING (1U << 0) +#define IMSIC_EISTATE_ENABLED (1U << 1) +#define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \ + IMSIC_EISTATE_PENDING) + +static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page) +{ + uint32_t i, max_irq, base; + + base = page * imsic->num_irqs; + max_irq = (imsic->eithreshold[page] && + (imsic->eithreshold[page] <= imsic->num_irqs)) ? + imsic->eithreshold[page] : imsic->num_irqs; + for (i = 1; i < max_irq; i++) { + if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) == + IMSIC_EISTATE_ENPEND) { + return (i << IMSIC_TOPEI_IID_SHIFT) | i; + } + } + + return 0; +} + +static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page) +{ + if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) { + qemu_irq_raise(imsic->external_irqs[page]); + } else { + qemu_irq_lower(imsic->external_irqs[page]); + } +} + +static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page, + target_ulong *val, + target_ulong new_val, + target_ulong wr_mask) +{ + target_ulong old_val = imsic->eidelivery[page]; + + if (val) { + *val = old_val; + } + + wr_mask &= 0x1; + imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask); + + riscv_imsic_update(imsic, page); + return 0; +} + +static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page, + target_ulong *val, + target_ulong new_val, + target_ulong wr_mask) +{ + target_ulong old_val = imsic->eithreshold[page]; + + if (val) { + *val = old_val; + } + + wr_mask &= IMSIC_MAX_ID; + imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask); + + riscv_imsic_update(imsic, page); + return 0; +} + +static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) +{ + uint32_t base, topei = riscv_imsic_topei(imsic, page); + + /* Read pending and enabled interrupt with highest priority */ + if (val) { + *val = topei; + } + + /* Writes ignore value and clear top pending interrupt */ + if (topei && wr_mask) { + topei >>= IMSIC_TOPEI_IID_SHIFT; + base = page * imsic->num_irqs; + if (topei) { + imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING; + } + + riscv_imsic_update(imsic, page); + } + + return 0; +} + +static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic, + uint32_t xlen, uint32_t page, + uint32_t num, bool pend, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + uint32_t i, base; + target_ulong mask; + uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED; + + if (xlen != 32) { + if (num & 0x1) { + return -EINVAL; + } + num >>= 1; + } + if (num >= (imsic->num_irqs / xlen)) { + return -EINVAL; + } + + base = (page * imsic->num_irqs) + (num * xlen); + + if (val) { + *val = 0; + for (i = 0; i < xlen; i++) { + mask = (target_ulong)1 << i; + *val |= (imsic->eistate[base + i] & state) ? mask : 0; + } + } + + for (i = 0; i < xlen; i++) { + /* Bit0 of eip0 and eie0 are read-only zero */ + if (!num && !i) { + continue; + } + + mask = (target_ulong)1 << i; + if (wr_mask & mask) { + if (new_val & mask) { + imsic->eistate[base + i] |= state; + } else { + imsic->eistate[base + i] &= ~state; + } + } + } + + riscv_imsic_update(imsic, page); + return 0; +} + +static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + RISCVIMSICState *imsic = arg; + uint32_t isel, priv, virt, vgein, xlen, page; + + priv = AIA_IREG_PRIV(reg); + virt = AIA_IREG_VIRT(reg); + isel = AIA_IREG_ISEL(reg); + vgein = AIA_IREG_VGEIN(reg); + xlen = AIA_IREG_XLEN(reg); + + if (imsic->mmode) { + if (priv == PRV_M && !virt) { + page = 0; + } else { + goto err; + } + } else { + if (priv == PRV_S) { + if (virt) { + if (vgein && vgein < imsic->num_pages) { + page = vgein; + } else { + goto err; + } + } else { + page = 0; + } + } else { + goto err; + } + } + + switch (isel) { + case ISELECT_IMSIC_EIDELIVERY: + return riscv_imsic_eidelivery_rmw(imsic, page, val, + new_val, wr_mask); + case ISELECT_IMSIC_EITHRESHOLD: + return riscv_imsic_eithreshold_rmw(imsic, page, val, + new_val, wr_mask); + case ISELECT_IMSIC_TOPEI: + return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask); + case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63: + return riscv_imsic_eix_rmw(imsic, xlen, page, + isel - ISELECT_IMSIC_EIP0, + true, val, new_val, wr_mask); + case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63: + return riscv_imsic_eix_rmw(imsic, xlen, page, + isel - ISELECT_IMSIC_EIE0, + false, val, new_val, wr_mask); + default: + break; + }; + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n", + __func__, priv, virt, isel, vgein); + return -EINVAL; +} + +static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size) +{ + RISCVIMSICState *imsic = opaque; + + /* Reads must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + /* Reads cannot be out of range */ + if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) { + goto err; + } + + return 0; + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register read 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; +} + +static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + RISCVIMSICState *imsic = opaque; + uint32_t page; + + /* Writes must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + /* Writes cannot be out of range */ + if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) { + goto err; + } + + /* Writes only supported for MSI little-endian registers */ + page = addr >> IMSIC_MMIO_PAGE_SHIFT; + if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) { + if (value && (value < imsic->num_irqs)) { + imsic->eistate[(page * imsic->num_irqs) + value] |= + IMSIC_EISTATE_PENDING; + } + } + + /* Update CPU external interrupt status */ + riscv_imsic_update(imsic, page); + + return; + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); +} + +static const MemoryRegionOps riscv_imsic_ops = { + .read = riscv_imsic_read, + .write = riscv_imsic_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void riscv_imsic_realize(DeviceState *dev, Error **errp) +{ + RISCVIMSICState *imsic = RISCV_IMSIC(dev); + RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid)); + CPUState *cpu = qemu_get_cpu(imsic->hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + + imsic->num_eistate = imsic->num_pages * imsic->num_irqs; + imsic->eidelivery = g_new0(uint32_t, imsic->num_pages); + imsic->eithreshold = g_new0(uint32_t, imsic->num_pages); + imsic->eistate = g_new0(uint32_t, imsic->num_eistate); + + memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops, + imsic, TYPE_RISCV_IMSIC, + IMSIC_MMIO_SIZE(imsic->num_pages)); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio); + + /* Claim the CPU interrupt to be triggered by this IMSIC */ + if (riscv_cpu_claim_interrupts(rcpu, + (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { + error_setg(errp, "%s already claimed", + (imsic->mmode) ? "MEIP" : "SEIP"); + return; + } + + /* Create output IRQ lines */ + imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages); + qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages); + + /* Force select AIA feature and setup CSR read-modify-write callback */ + if (env) { + if (!imsic->mmode) { + rcpu->cfg.ext_ssaia = true; + riscv_cpu_set_geilen(env, imsic->num_pages - 1); + } else { + rcpu->cfg.ext_smaia = true; + } + riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S, + riscv_imsic_rmw, imsic); + } + + msi_nonbroken = true; +} + +static Property riscv_imsic_properties[] = { + DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0), + DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0), + DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0), + DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_riscv_imsic = { + .name = "riscv_imsic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, + num_pages, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState, + num_pages, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState, + num_eistate, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static void riscv_imsic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, riscv_imsic_properties); + dc->realize = riscv_imsic_realize; + dc->vmsd = &vmstate_riscv_imsic; +} + +static const TypeInfo riscv_imsic_info = { + .name = TYPE_RISCV_IMSIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVIMSICState), + .class_init = riscv_imsic_class_init, +}; + +static void riscv_imsic_register_types(void) +{ + type_register_static(&riscv_imsic_info); +} + +type_init(riscv_imsic_register_types) + +/* + * Create IMSIC device. + */ +DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, + uint32_t num_pages, uint32_t num_ids) +{ + DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC); + CPUState *cpu = qemu_get_cpu(hartid); + uint32_t i; + + assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1))); + if (mmode) { + assert(num_pages == 1); + } else { + assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1)); + } + assert(IMSIC_MIN_ID <= num_ids); + assert(num_ids <= IMSIC_MAX_ID); + assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID); + + qdev_prop_set_bit(dev, "mmode", mmode); + qdev_prop_set_uint32(dev, "hartid", hartid); + qdev_prop_set_uint32(dev, "num-pages", num_pages); + qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_pages; i++) { + if (!i) { + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), + (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); + } else { + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), + IRQ_LOCAL_MAX + i - 1)); + } + } + + return dev; +} diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index efe5054182..4e86d2d436 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -24,7 +24,7 @@ #include "trace.h" #include "qom/object.h" -#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size +#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size() #define FLIC_FAILED (-1UL) #define FLIC_SAVEVM_VERSION 1 diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c index 72a55e32dd..c9b0b0c1ec 100644 --- a/hw/intc/sh_intc.c +++ b/hw/intc/sh_intc.c @@ -9,40 +9,37 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/sh4/sh_intc.h" #include "hw/irq.h" #include "hw/sh4/sh.h" - -//#define DEBUG_INTC -//#define DEBUG_INTC_SOURCES - -#define INTC_A7(x) ((x) & 0x1fffffff) +#include "trace.h" void sh_intc_toggle_source(struct intc_source *source, - int enable_adj, int assert_adj) + int enable_adj, int assert_adj) { int enable_changed = 0; int pending_changed = 0; int old_pending; - if ((source->enable_count == source->enable_max) && (enable_adj == -1)) + if (source->enable_count == source->enable_max && enable_adj == -1) { enable_changed = -1; - + } source->enable_count += enable_adj; - if (source->enable_count == source->enable_max) + if (source->enable_count == source->enable_max) { enable_changed = 1; - + } source->asserted += assert_adj; old_pending = source->pending; source->pending = source->asserted && (source->enable_count == source->enable_max); - if (old_pending != source->pending) + if (old_pending != source->pending) { pending_changed = 1; - + } if (pending_changed) { if (source->pending) { source->parent->pending++; @@ -54,35 +51,30 @@ void sh_intc_toggle_source(struct intc_source *source, if (source->parent->pending == 0) { cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); } - } + } } - if (enable_changed || assert_adj || pending_changed) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: (%d/%d/%d/%d) interrupt source 0x%x %s%s%s\n", - source->parent->pending, - source->asserted, - source->enable_count, - source->enable_max, - source->vect, - source->asserted ? "asserted " : - assert_adj ? "deasserted" : "", - enable_changed == 1 ? "enabled " : - enable_changed == -1 ? "disabled " : "", - source->pending ? "pending" : ""); -#endif - } + if (enable_changed || assert_adj || pending_changed) { + trace_sh_intc_sources(source->parent->pending, source->asserted, + source->enable_count, source->enable_max, + source->vect, source->asserted ? "asserted " : + assert_adj ? "deasserted" : "", + enable_changed == 1 ? "enabled " : + enable_changed == -1 ? "disabled " : "", + source->pending ? "pending" : ""); + } } -static void sh_intc_set_irq (void *opaque, int n, int level) +static void sh_intc_set_irq(void *opaque, int n, int level) { - struct intc_desc *desc = opaque; - struct intc_source *source = &(desc->sources[n]); + struct intc_desc *desc = opaque; + struct intc_source *source = &desc->sources[n]; - if (level && !source->asserted) - sh_intc_toggle_source(source, 0, 1); - else if (!level && source->asserted) - sh_intc_toggle_source(source, 0, -1); + if (level && !source->asserted) { + sh_intc_toggle_source(source, 0, 1); + } else if (!level && source->asserted) { + sh_intc_toggle_source(source, 0, -1); + } } int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) @@ -97,147 +89,124 @@ int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) } for (i = 0; i < desc->nr_sources; i++) { - struct intc_source *source = desc->sources + i; + struct intc_source *source = &desc->sources[i]; - if (source->pending) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: (%d) returning interrupt source 0x%x\n", - desc->pending, source->vect); -#endif + if (source->pending) { + trace_sh_intc_pending(desc->pending, source->vect); return source->vect; - } + } } - - abort(); + g_assert_not_reached(); } -#define INTC_MODE_NONE 0 -#define INTC_MODE_DUAL_SET 1 -#define INTC_MODE_DUAL_CLR 2 -#define INTC_MODE_ENABLE_REG 3 -#define INTC_MODE_MASK_REG 4 -#define INTC_MODE_IS_PRIO 8 +typedef enum { + INTC_MODE_NONE, + INTC_MODE_DUAL_SET, + INTC_MODE_DUAL_CLR, + INTC_MODE_ENABLE_REG, + INTC_MODE_MASK_REG, +} SHIntCMode; +#define INTC_MODE_IS_PRIO 0x80 -static unsigned int sh_intc_mode(unsigned long address, - unsigned long set_reg, unsigned long clr_reg) +static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg, + unsigned long clr_reg) { - if ((address != INTC_A7(set_reg)) && - (address != INTC_A7(clr_reg))) + if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) { return INTC_MODE_NONE; - - if (set_reg && clr_reg) { - if (address == INTC_A7(set_reg)) - return INTC_MODE_DUAL_SET; - else - return INTC_MODE_DUAL_CLR; } - - if (set_reg) - return INTC_MODE_ENABLE_REG; - else - return INTC_MODE_MASK_REG; + if (set_reg && clr_reg) { + return address == A7ADDR(set_reg) ? + INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR; + } + return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG; } static void sh_intc_locate(struct intc_desc *desc, - unsigned long address, - unsigned long **datap, - intc_enum **enums, - unsigned int *first, - unsigned int *width, - unsigned int *modep) + unsigned long address, + unsigned long **datap, + intc_enum **enums, + unsigned int *first, + unsigned int *width, + unsigned int *modep) { - unsigned int i, mode; + SHIntCMode mode; + unsigned int i; /* this is slow but works for now */ if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; + struct intc_mask_reg *mr = &desc->mask_regs[i]; - mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); - if (mode == INTC_MODE_NONE) - continue; - - *modep = mode; - *datap = &mr->value; - *enums = mr->enum_ids; - *first = mr->reg_width - 1; - *width = 1; - return; - } + mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode; + *datap = &mr->value; + *enums = mr->enum_ids; + *first = mr->reg_width - 1; + *width = 1; + return; + } + } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; + struct intc_prio_reg *pr = &desc->prio_regs[i]; - mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); - if (mode == INTC_MODE_NONE) - continue; - - *modep = mode | INTC_MODE_IS_PRIO; - *datap = &pr->value; - *enums = pr->enum_ids; - *first = (pr->reg_width / pr->field_width) - 1; - *width = pr->field_width; - return; - } + mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode | INTC_MODE_IS_PRIO; + *datap = &pr->value; + *enums = pr->enum_ids; + *first = pr->reg_width / pr->field_width - 1; + *width = pr->field_width; + return; + } + } } - - abort(); + g_assert_not_reached(); } static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, - int enable, int is_group) + int enable, int is_group) { - struct intc_source *source = desc->sources + id; - - if (!id) - return; + struct intc_source *source = &desc->sources[id]; + if (!id) { + return; + } if (!source->next_enum_id && (!source->enable_max || !source->vect)) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: reserved interrupt source %d modified\n", id); -#endif - return; + qemu_log_mask(LOG_UNIMP, + "sh_intc: reserved interrupt source %d modified\n", id); + return; } - if (source->vect) + if (source->vect) { sh_intc_toggle_source(source, enable ? 1 : -1, 0); - -#ifdef DEBUG_INTC - else { - printf("setting interrupt group %d to %d\n", id, !!enable); } -#endif if ((is_group || !source->vect) && source->next_enum_id) { sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); } -#ifdef DEBUG_INTC if (!source->vect) { - printf("setting interrupt group %d to %d - done\n", id, !!enable); + trace_sh_intc_set(id, !!enable); } -#endif } -static uint64_t sh_intc_read(void *opaque, hwaddr offset, - unsigned size) +static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size) { struct intc_desc *desc = opaque; - intc_enum *enum_ids = NULL; - unsigned int first = 0; - unsigned int width = 0; - unsigned int mode = 0; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; unsigned long *valuep; -#ifdef DEBUG_INTC - printf("sh_intc_read 0x%lx\n", (unsigned long) offset); -#endif - - sh_intc_locate(desc, (unsigned long)offset, &valuep, - &enum_ids, &first, &width, &mode); + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); + trace_sh_intc_read(size, (uint64_t)offset, *valuep); return *valuep; } @@ -245,45 +214,40 @@ static void sh_intc_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { struct intc_desc *desc = opaque; - intc_enum *enum_ids = NULL; - unsigned int first = 0; - unsigned int width = 0; - unsigned int mode = 0; - unsigned int k; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; unsigned long *valuep; + unsigned int k; unsigned long mask; -#ifdef DEBUG_INTC - printf("sh_intc_write 0x%lx 0x%08x\n", (unsigned long) offset, value); -#endif - - sh_intc_locate(desc, (unsigned long)offset, &valuep, - &enum_ids, &first, &width, &mode); - + trace_sh_intc_write(size, (uint64_t)offset, value); + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); switch (mode) { - case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: break; - case INTC_MODE_DUAL_SET: value |= *valuep; break; - case INTC_MODE_DUAL_CLR: value = *valuep & ~value; break; - default: abort(); + case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: + break; + case INTC_MODE_DUAL_SET: + value |= *valuep; + break; + case INTC_MODE_DUAL_CLR: + value = *valuep & ~value; + break; + default: + g_assert_not_reached(); } for (k = 0; k <= first; k++) { - mask = ((1 << width) - 1) << ((first - k) * width); + mask = (1 << width) - 1; + mask <<= (first - k) * width; - if ((*valuep & mask) == (value & mask)) - continue; -#if 0 - printf("k = %d, first = %d, enum = %d, mask = 0x%08x\n", - k, first, enum_ids[k], (unsigned int)mask); -#endif - sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); + if ((*valuep & mask) != (value & mask)) { + sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); + } } *valuep = value; - -#ifdef DEBUG_INTC - printf("sh_intc_write 0x%lx -> 0x%08x\n", (unsigned long) offset, value); -#endif } static const MemoryRegionOps sh_intc_ops = { @@ -292,20 +256,105 @@ static const MemoryRegionOps sh_intc_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id) +static void sh_intc_register_source(struct intc_desc *desc, + intc_enum source, + struct intc_group *groups, + int nr_groups) { - if (id) - return desc->sources + id; + unsigned int i, k; + intc_enum id; - return NULL; + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + struct intc_mask_reg *mr = &desc->mask_regs[i]; + + for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { + id = mr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (desc->prio_regs) { + for (i = 0; i < desc->nr_prio_regs; i++) { + struct intc_prio_reg *pr = &desc->prio_regs[i]; + + for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { + id = pr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { + id = gr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + +} + +void sh_intc_register_sources(struct intc_desc *desc, + struct intc_vect *vectors, + int nr_vectors, + struct intc_group *groups, + int nr_groups) +{ + unsigned int i, k; + intc_enum id; + struct intc_source *s; + + for (i = 0; i < nr_vectors; i++) { + struct intc_vect *vect = &vectors[i]; + + sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); + id = vect->enum_id; + if (id) { + s = &desc->sources[id]; + s->vect = vect->vect; + trace_sh_intc_register("source", vect->enum_id, s->vect, + s->enable_count, s->enable_max); + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + id = gr->enum_id; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[0]; + + for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { + if (gr->enum_ids[k]) { + id = gr->enum_ids[k - 1]; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[k]; + } + } + trace_sh_intc_register("group", gr->enum_id, 0xffff, + s->enable_count, s->enable_max); + } + } } static unsigned int sh_intc_register(MemoryRegion *sysmem, - struct intc_desc *desc, - const unsigned long address, - const char *type, - const char *action, - const unsigned int index) + struct intc_desc *desc, + const unsigned long address, + const char *type, + const char *action, + const unsigned int index) { char name[60]; MemoryRegion *iomem, *iomem_p4, *iomem_a7; @@ -315,132 +364,28 @@ static unsigned int sh_intc_register(MemoryRegion *sysmem, } iomem = &desc->iomem; - iomem_p4 = desc->iomem_aliases + index; + iomem_p4 = &desc->iomem_aliases[index]; iomem_a7 = iomem_p4 + 1; -#define SH_INTC_IOMEM_FORMAT "interrupt-controller-%s-%s-%s" - snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "p4"); - memory_region_init_alias(iomem_p4, NULL, name, iomem, INTC_A7(address), 4); + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4"); + memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4); memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4); - snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "a7"); - memory_region_init_alias(iomem_a7, NULL, name, iomem, INTC_A7(address), 4); + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7"); + memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4); memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7); -#undef SH_INTC_IOMEM_FORMAT /* used to increment aliases index */ return 2; } -static void sh_intc_register_source(struct intc_desc *desc, - intc_enum source, - struct intc_group *groups, - int nr_groups) -{ - unsigned int i, k; - struct intc_source *s; - - if (desc->mask_regs) { - for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; - - for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { - if (mr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, mr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - - if (desc->prio_regs) { - for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; - - for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { - if (pr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, pr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - - if (groups) { - for (i = 0; i < nr_groups; i++) { - struct intc_group *gr = groups + i; - - for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { - if (gr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, gr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - -} - -void sh_intc_register_sources(struct intc_desc *desc, - struct intc_vect *vectors, - int nr_vectors, - struct intc_group *groups, - int nr_groups) -{ - unsigned int i, k; - struct intc_source *s; - - for (i = 0; i < nr_vectors; i++) { - struct intc_vect *vect = vectors + i; - - sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); - s = sh_intc_source(desc, vect->enum_id); - if (s) { - s->vect = vect->vect; - -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: registered source %d -> 0x%04x (%d/%d)\n", - vect->enum_id, s->vect, s->enable_count, s->enable_max); -#endif - } - } - - if (groups) { - for (i = 0; i < nr_groups; i++) { - struct intc_group *gr = groups + i; - - s = sh_intc_source(desc, gr->enum_id); - s->next_enum_id = gr->enum_ids[0]; - - for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { - if (!gr->enum_ids[k]) - continue; - - s = sh_intc_source(desc, gr->enum_ids[k - 1]); - s->next_enum_id = gr->enum_ids[k]; - } - -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: registered group %d (%d/%d)\n", - gr->enum_id, s->enable_count, s->enable_max); -#endif - } - } -} - int sh_intc_init(MemoryRegion *sysmem, - struct intc_desc *desc, - int nr_sources, - struct intc_mask_reg *mask_regs, - int nr_mask_regs, - struct intc_prio_reg *prio_regs, - int nr_prio_regs) + struct intc_desc *desc, + int nr_sources, + struct intc_mask_reg *mask_regs, + int nr_mask_regs, + struct intc_prio_reg *prio_regs, + int nr_prio_regs) { unsigned int i, j; @@ -450,65 +395,55 @@ int sh_intc_init(MemoryRegion *sysmem, desc->nr_mask_regs = nr_mask_regs; desc->prio_regs = prio_regs; desc->nr_prio_regs = nr_prio_regs; - /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases). - **/ + /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */ desc->iomem_aliases = g_new0(MemoryRegion, (nr_mask_regs + nr_prio_regs) * 4); - - j = 0; - i = sizeof(struct intc_source) * nr_sources; - desc->sources = g_malloc0(i); - - for (i = 0; i < desc->nr_sources; i++) { - struct intc_source *source = desc->sources + i; - - source->parent = desc; + desc->sources = g_new0(struct intc_source, nr_sources); + for (i = 0; i < nr_sources; i++) { + desc->sources[i].parent = desc; } - desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); - - memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, - "interrupt-controller", 0x100000000ULL); - -#define INT_REG_PARAMS(reg_struct, type, action, j) \ - reg_struct->action##_reg, #type, #action, j + memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc", + 0x100000000ULL); + j = 0; if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; + struct intc_mask_reg *mr = &desc->mask_regs[i]; - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(mr, mask, set, j)); - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(mr, mask, clr, j)); - } + j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j); + j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j); + } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; + struct intc_prio_reg *pr = &desc->prio_regs[i]; - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(pr, prio, set, j)); - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(pr, prio, clr, j)); - } + j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j); + j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j); + } } -#undef INT_REG_PARAMS return 0; } -/* Assert level IRL interrupt. - 0:deassert. 1:lowest priority,... 15:highest priority. */ +/* + * Assert level IRL interrupt. + * 0:deassert. 1:lowest priority,... 15:highest priority + */ void sh_intc_set_irl(void *opaque, int n, int level) { struct intc_source *s = opaque; int i, irl = level ^ 15; - for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) { - if (i == irl) - sh_intc_toggle_source(s, s->enable_count?0:1, s->asserted?0:1); - else - if (s->asserted) - sh_intc_toggle_source(s, 0, -1); + intc_enum id = s->next_enum_id; + + for (i = 0; id; id = s->next_enum_id, i++) { + s = &s->parent->sources[id]; + if (i == irl) { + sh_intc_toggle_source(s, s->enable_count ? 0 : 1, + s->asserted ? 0 : 1); + } else if (s->asserted) { + sh_intc_toggle_source(s, 0, -1); + } } } diff --git a/hw/intc/sifive_clint.c b/hw/intc/sifive_clint.c deleted file mode 100644 index 0f41e5ea1c..0000000000 --- a/hw/intc/sifive_clint.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * SiFive CLINT (Core Local Interruptor) - * - * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu - * Copyright (c) 2017 SiFive, Inc. - * - * This provides real-time clock, timer and interprocessor interrupts. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "hw/sysbus.h" -#include "target/riscv/cpu.h" -#include "hw/qdev-properties.h" -#include "hw/intc/sifive_clint.h" -#include "qemu/timer.h" - -static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) -{ - return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - timebase_freq, NANOSECONDS_PER_SECOND); -} - -/* - * Called when timecmp is written to update the QEMU timer or immediately - * trigger timer interrupt if mtimecmp <= current timer value. - */ -static void sifive_clint_write_timecmp(RISCVCPU *cpu, uint64_t value, - uint32_t timebase_freq) -{ - uint64_t next; - uint64_t diff; - - uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq); - - cpu->env.timecmp = value; - if (cpu->env.timecmp <= rtc_r) { - /* if we're setting an MTIMECMP value in the "past", - immediately raise the timer interrupt */ - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1)); - return; - } - - /* otherwise, set up the future timer interrupt */ - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(0)); - diff = cpu->env.timecmp - rtc_r; - /* back to ns (note args switched in muldiv64) */ - next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq); - timer_mod(cpu->env.timer, next); -} - -/* - * Callback used when the timer set using timer_mod expires. - * Should raise the timer interrupt line - */ -static void sifive_clint_timer_cb(void *opaque) -{ - RISCVCPU *cpu = opaque; - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1)); -} - -/* CPU wants to read rtc or timecmp register */ -static uint64_t sifive_clint_read(void *opaque, hwaddr addr, unsigned size) -{ - SiFiveCLINTState *clint = opaque; - if (addr >= clint->sip_base && - addr < clint->sip_base + (clint->num_harts << 2)) { - size_t hartid = clint->hartid_base + ((addr - clint->sip_base) >> 2); - CPUState *cpu = qemu_get_cpu(hartid); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - error_report("clint: invalid timecmp hartid: %zu", hartid); - } else if ((addr & 0x3) == 0) { - return (env->mip & MIP_MSIP) > 0; - } else { - error_report("clint: invalid read: %08x", (uint32_t)addr); - return 0; - } - } else if (addr >= clint->timecmp_base && - addr < clint->timecmp_base + (clint->num_harts << 3)) { - size_t hartid = clint->hartid_base + - ((addr - clint->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - error_report("clint: invalid timecmp hartid: %zu", hartid); - } else if ((addr & 0x7) == 0) { - /* timecmp_lo */ - uint64_t timecmp = env->timecmp; - return timecmp & 0xFFFFFFFF; - } else if ((addr & 0x7) == 4) { - /* timecmp_hi */ - uint64_t timecmp = env->timecmp; - return (timecmp >> 32) & 0xFFFFFFFF; - } else { - error_report("clint: invalid read: %08x", (uint32_t)addr); - return 0; - } - } else if (addr == clint->time_base) { - /* time_lo */ - return cpu_riscv_read_rtc(clint->timebase_freq) & 0xFFFFFFFF; - } else if (addr == clint->time_base + 4) { - /* time_hi */ - return (cpu_riscv_read_rtc(clint->timebase_freq) >> 32) & 0xFFFFFFFF; - } - - error_report("clint: invalid read: %08x", (uint32_t)addr); - return 0; -} - -/* CPU wrote to rtc or timecmp register */ -static void sifive_clint_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - SiFiveCLINTState *clint = opaque; - - if (addr >= clint->sip_base && - addr < clint->sip_base + (clint->num_harts << 2)) { - size_t hartid = clint->hartid_base + ((addr - clint->sip_base) >> 2); - CPUState *cpu = qemu_get_cpu(hartid); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - error_report("clint: invalid timecmp hartid: %zu", hartid); - } else if ((addr & 0x3) == 0) { - riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MSIP, BOOL_TO_MASK(value)); - } else { - error_report("clint: invalid sip write: %08x", (uint32_t)addr); - } - return; - } else if (addr >= clint->timecmp_base && - addr < clint->timecmp_base + (clint->num_harts << 3)) { - size_t hartid = clint->hartid_base + - ((addr - clint->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - error_report("clint: invalid timecmp hartid: %zu", hartid); - } else if ((addr & 0x7) == 0) { - /* timecmp_lo */ - uint64_t timecmp_hi = env->timecmp >> 32; - sifive_clint_write_timecmp(RISCV_CPU(cpu), - timecmp_hi << 32 | (value & 0xFFFFFFFF), clint->timebase_freq); - return; - } else if ((addr & 0x7) == 4) { - /* timecmp_hi */ - uint64_t timecmp_lo = env->timecmp; - sifive_clint_write_timecmp(RISCV_CPU(cpu), - value << 32 | (timecmp_lo & 0xFFFFFFFF), clint->timebase_freq); - } else { - error_report("clint: invalid timecmp write: %08x", (uint32_t)addr); - } - return; - } else if (addr == clint->time_base) { - /* time_lo */ - error_report("clint: time_lo write not implemented"); - return; - } else if (addr == clint->time_base + 4) { - /* time_hi */ - error_report("clint: time_hi write not implemented"); - return; - } - - error_report("clint: invalid write: %08x", (uint32_t)addr); -} - -static const MemoryRegionOps sifive_clint_ops = { - .read = sifive_clint_read, - .write = sifive_clint_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 8 - } -}; - -static Property sifive_clint_properties[] = { - DEFINE_PROP_UINT32("hartid-base", SiFiveCLINTState, hartid_base, 0), - DEFINE_PROP_UINT32("num-harts", SiFiveCLINTState, num_harts, 0), - DEFINE_PROP_UINT32("sip-base", SiFiveCLINTState, sip_base, 0), - DEFINE_PROP_UINT32("timecmp-base", SiFiveCLINTState, timecmp_base, 0), - DEFINE_PROP_UINT32("time-base", SiFiveCLINTState, time_base, 0), - DEFINE_PROP_UINT32("aperture-size", SiFiveCLINTState, aperture_size, 0), - DEFINE_PROP_UINT32("timebase-freq", SiFiveCLINTState, timebase_freq, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void sifive_clint_realize(DeviceState *dev, Error **errp) -{ - SiFiveCLINTState *s = SIFIVE_CLINT(dev); - memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_clint_ops, s, - TYPE_SIFIVE_CLINT, s->aperture_size); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); -} - -static void sifive_clint_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - dc->realize = sifive_clint_realize; - device_class_set_props(dc, sifive_clint_properties); -} - -static const TypeInfo sifive_clint_info = { - .name = TYPE_SIFIVE_CLINT, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SiFiveCLINTState), - .class_init = sifive_clint_class_init, -}; - -static void sifive_clint_register_types(void) -{ - type_register_static(&sifive_clint_info); -} - -type_init(sifive_clint_register_types) - - -/* - * Create CLINT device. - */ -DeviceState *sifive_clint_create(hwaddr addr, hwaddr size, - uint32_t hartid_base, uint32_t num_harts, uint32_t sip_base, - uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq, - bool provide_rdtime) -{ - int i; - for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - continue; - } - if (provide_rdtime) { - riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq); - } - env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - &sifive_clint_timer_cb, cpu); - env->timecmp = 0; - } - - DeviceState *dev = qdev_new(TYPE_SIFIVE_CLINT); - qdev_prop_set_uint32(dev, "hartid-base", hartid_base); - qdev_prop_set_uint32(dev, "num-harts", num_harts); - qdev_prop_set_uint32(dev, "sip-base", sip_base); - qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base); - qdev_prop_set_uint32(dev, "time-base", time_base); - qdev_prop_set_uint32(dev, "aperture-size", size); - qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); - return dev; -} diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index 78903beb06..c2dfacf028 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -29,8 +29,13 @@ #include "hw/intc/sifive_plic.h" #include "target/riscv/cpu.h" #include "migration/vmstate.h" +#include "hw/irq.h" +#include "sysemu/kvm.h" -#define RISCV_DEBUG_PLIC 0 +static bool addr_between(uint32_t addr, uint32_t base, uint32_t num) +{ + return addr >= base && addr - base < num; +} static PLICMode char_to_mode(char c) { @@ -45,47 +50,6 @@ static PLICMode char_to_mode(char c) } } -static char mode_to_char(PLICMode m) -{ - switch (m) { - case PLICMode_U: return 'U'; - case PLICMode_S: return 'S'; - case PLICMode_H: return 'H'; - case PLICMode_M: return 'M'; - default: return '?'; - } -} - -static void sifive_plic_print_state(SiFivePLICState *plic) -{ - int i; - int addrid; - - /* pending */ - qemu_log("pending : "); - for (i = plic->bitfield_words - 1; i >= 0; i--) { - qemu_log("%08x", plic->pending[i]); - } - qemu_log("\n"); - - /* pending */ - qemu_log("claimed : "); - for (i = plic->bitfield_words - 1; i >= 0; i--) { - qemu_log("%08x", plic->claimed[i]); - } - qemu_log("\n"); - - for (addrid = 0; addrid < plic->num_addrs; addrid++) { - qemu_log("hart%d-%c enable: ", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode)); - for (i = plic->bitfield_words - 1; i >= 0; i--) { - qemu_log("%08x", plic->enable[addrid * plic->bitfield_words + i]); - } - qemu_log("\n"); - } -} - static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value) { uint32_t old, new, cmp = qatomic_read(a); @@ -109,26 +73,34 @@ static void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool level) atomic_set_masked(&plic->claimed[irq >> 5], 1 << (irq & 31), -!!level); } -static int sifive_plic_irqs_pending(SiFivePLICState *plic, uint32_t addrid) +static uint32_t sifive_plic_claimed(SiFivePLICState *plic, uint32_t addrid) { + uint32_t max_irq = 0; + uint32_t max_prio = plic->target_priority[addrid]; int i, j; + for (i = 0; i < plic->bitfield_words; i++) { uint32_t pending_enabled_not_claimed = - (plic->pending[i] & ~plic->claimed[i]) & - plic->enable[addrid * plic->bitfield_words + i]; + (plic->pending[i] & ~plic->claimed[i]) & + plic->enable[addrid * plic->bitfield_words + i]; + if (!pending_enabled_not_claimed) { continue; } + for (j = 0; j < 32; j++) { int irq = (i << 5) + j; uint32_t prio = plic->source_priority[irq]; int enabled = pending_enabled_not_claimed & (1 << j); - if (enabled && prio > plic->target_priority[addrid]) { - return 1; + + if (enabled && prio > max_prio) { + max_irq = irq; + max_prio = prio; } } } - return 0; + + return max_irq; } static void sifive_plic_update(SiFivePLICState *plic) @@ -139,128 +111,61 @@ static void sifive_plic_update(SiFivePLICState *plic) for (addrid = 0; addrid < plic->num_addrs; addrid++) { uint32_t hartid = plic->addr_config[addrid].hartid; PLICMode mode = plic->addr_config[addrid].mode; - CPUState *cpu = qemu_get_cpu(hartid); - CPURISCVState *env = cpu ? cpu->env_ptr : NULL; - if (!env) { - continue; - } - int level = sifive_plic_irqs_pending(plic, addrid); + bool level = !!sifive_plic_claimed(plic, addrid); + switch (mode) { case PLICMode_M: - riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MEIP, BOOL_TO_MASK(level)); + qemu_set_irq(plic->m_external_irqs[hartid - plic->hartid_base], level); break; case PLICMode_S: - riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_SEIP, BOOL_TO_MASK(level)); + qemu_set_irq(plic->s_external_irqs[hartid - plic->hartid_base], level); break; default: break; } } - - if (RISCV_DEBUG_PLIC) { - sifive_plic_print_state(plic); - } -} - -static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid) -{ - int i, j; - uint32_t max_irq = 0; - uint32_t max_prio = plic->target_priority[addrid]; - - for (i = 0; i < plic->bitfield_words; i++) { - uint32_t pending_enabled_not_claimed = - (plic->pending[i] & ~plic->claimed[i]) & - plic->enable[addrid * plic->bitfield_words + i]; - if (!pending_enabled_not_claimed) { - continue; - } - for (j = 0; j < 32; j++) { - int irq = (i << 5) + j; - uint32_t prio = plic->source_priority[irq]; - int enabled = pending_enabled_not_claimed & (1 << j); - if (enabled && prio > max_prio) { - max_irq = irq; - max_prio = prio; - } - } - } - - if (max_irq) { - sifive_plic_set_pending(plic, max_irq, false); - sifive_plic_set_claimed(plic, max_irq, true); - } - return max_irq; } static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size) { SiFivePLICState *plic = opaque; - /* writes must be 4 byte words */ - if ((addr & 0x3) != 0) { - goto err; - } - - if (addr >= plic->priority_base && /* 4 bytes per source */ - addr < plic->priority_base + (plic->num_sources << 2)) - { + if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) { uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: read priority: irq=%d priority=%d\n", - irq, plic->source_priority[irq]); - } + return plic->source_priority[irq]; - } else if (addr >= plic->pending_base && /* 1 bit per source */ - addr < plic->pending_base + (plic->num_sources >> 3)) - { + } else if (addr_between(addr, plic->pending_base, plic->num_sources >> 3)) { uint32_t word = (addr - plic->pending_base) >> 2; - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: read pending: word=%d value=%d\n", - word, plic->pending[word]); - } + return plic->pending[word]; - } else if (addr >= plic->enable_base && /* 1 bit per source */ - addr < plic->enable_base + plic->num_addrs * plic->enable_stride) - { + } else if (addr_between(addr, plic->enable_base, + plic->num_addrs * plic->enable_stride)) { uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride; uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2; + if (wordid < plic->bitfield_words) { - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: read enable: hart%d-%c word=%d value=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), wordid, - plic->enable[addrid * plic->bitfield_words + wordid]); - } return plic->enable[addrid * plic->bitfield_words + wordid]; } - } else if (addr >= plic->context_base && /* 1 bit per source */ - addr < plic->context_base + plic->num_addrs * plic->context_stride) - { + } else if (addr_between(addr, plic->context_base, + plic->num_addrs * plic->context_stride)) { uint32_t addrid = (addr - plic->context_base) / plic->context_stride; uint32_t contextid = (addr & (plic->context_stride - 1)); + if (contextid == 0) { - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: read priority: hart%d-%c priority=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), - plic->target_priority[addrid]); - } return plic->target_priority[addrid]; } else if (contextid == 4) { - uint32_t value = sifive_plic_claim(plic, addrid); - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: read claim: hart%d-%c irq=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), - value); + uint32_t max_irq = sifive_plic_claimed(plic, addrid); + + if (max_irq) { + sifive_plic_set_pending(plic, max_irq, false); + sifive_plic_set_claimed(plic, max_irq, true); } + sifive_plic_update(plic); - return value; + return max_irq; } } -err: qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid register read 0x%" HWADDR_PRIx "\n", __func__, addr); @@ -272,80 +177,72 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, { SiFivePLICState *plic = opaque; - /* writes must be 4 byte words */ - if ((addr & 0x3) != 0) { - goto err; - } - - if (addr >= plic->priority_base && /* 4 bytes per source */ - addr < plic->priority_base + (plic->num_sources << 2)) - { + if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) { uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; - plic->source_priority[irq] = value & 7; - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: write priority: irq=%d priority=%d\n", - irq, plic->source_priority[irq]); + + if (((plic->num_priorities + 1) & plic->num_priorities) == 0) { + /* + * if "num_priorities + 1" is power-of-2, make each register bit of + * interrupt priority WARL (Write-Any-Read-Legal). Just filter + * out the access to unsupported priority bits. + */ + plic->source_priority[irq] = value % (plic->num_priorities + 1); + sifive_plic_update(plic); + } else if (value <= plic->num_priorities) { + plic->source_priority[irq] = value; + sifive_plic_update(plic); } - sifive_plic_update(plic); - return; - } else if (addr >= plic->pending_base && /* 1 bit per source */ - addr < plic->pending_base + (plic->num_sources >> 3)) - { + } else if (addr_between(addr, plic->pending_base, + plic->num_sources >> 3)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid pending write: 0x%" HWADDR_PRIx "", __func__, addr); - return; - } else if (addr >= plic->enable_base && /* 1 bit per source */ - addr < plic->enable_base + plic->num_addrs * plic->enable_stride) - { + } else if (addr_between(addr, plic->enable_base, + plic->num_addrs * plic->enable_stride)) { uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride; uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2; + if (wordid < plic->bitfield_words) { plic->enable[addrid * plic->bitfield_words + wordid] = value; - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: write enable: hart%d-%c word=%d value=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), wordid, - plic->enable[addrid * plic->bitfield_words + wordid]); - } - return; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid enable write 0x%" HWADDR_PRIx "\n", + __func__, addr); } - } else if (addr >= plic->context_base && /* 4 bytes per reg */ - addr < plic->context_base + plic->num_addrs * plic->context_stride) - { + } else if (addr_between(addr, plic->context_base, + plic->num_addrs * plic->context_stride)) { uint32_t addrid = (addr - plic->context_base) / plic->context_stride; uint32_t contextid = (addr & (plic->context_stride - 1)); + if (contextid == 0) { - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: write priority: hart%d-%c priority=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), - plic->target_priority[addrid]); - } - if (value <= plic->num_priorities) { + if (((plic->num_priorities + 1) & plic->num_priorities) == 0) { + /* + * if "num_priorities + 1" is power-of-2, each register bit of + * interrupt priority is WARL (Write-Any-Read-Legal). Just + * filter out the access to unsupported priority bits. + */ + plic->target_priority[addrid] = value % + (plic->num_priorities + 1); + sifive_plic_update(plic); + } else if (value <= plic->num_priorities) { plic->target_priority[addrid] = value; sifive_plic_update(plic); } - return; } else if (contextid == 4) { - if (RISCV_DEBUG_PLIC) { - qemu_log("plic: write claim: hart%d-%c irq=%x\n", - plic->addr_config[addrid].hartid, - mode_to_char(plic->addr_config[addrid].mode), - (uint32_t)value); - } if (value < plic->num_sources) { sifive_plic_set_claimed(plic, value, false); sifive_plic_update(plic); } - return; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid context write 0x%" HWADDR_PRIx "\n", + __func__, addr); } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); } - -err: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Invalid register write 0x%" HWADDR_PRIx "\n", - __func__, addr); } static const MemoryRegionOps sifive_plic_ops = { @@ -358,20 +255,22 @@ static const MemoryRegionOps sifive_plic_ops = { } }; -static Property sifive_plic_properties[] = { - DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), - DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), - DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0), - DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0), - DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0), - DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0), - DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0), - DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0), - DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), - DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), - DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), - DEFINE_PROP_END_OF_LIST(), -}; +static void sifive_plic_reset(DeviceState *dev) +{ + SiFivePLICState *s = SIFIVE_PLIC(dev); + int i; + + memset(s->source_priority, 0, sizeof(uint32_t) * s->num_sources); + memset(s->target_priority, 0, sizeof(uint32_t) * s->num_addrs); + memset(s->pending, 0, sizeof(uint32_t) * s->bitfield_words); + memset(s->claimed, 0, sizeof(uint32_t) * s->bitfield_words); + memset(s->enable, 0, sizeof(uint32_t) * s->num_enables); + + for (i = 0; i < s->num_harts; i++) { + qemu_set_irq(s->m_external_irqs[i], 0); + qemu_set_irq(s->s_external_irqs[i], 0); + } +} /* * parse PLIC hart/mode address offset config @@ -430,39 +329,46 @@ static void parse_hart_config(SiFivePLICState *plic) static void sifive_plic_irq_request(void *opaque, int irq, int level) { - SiFivePLICState *plic = opaque; - if (RISCV_DEBUG_PLIC) { - qemu_log("sifive_plic_irq_request: irq=%d level=%d\n", irq, level); - } - sifive_plic_set_pending(plic, irq, level > 0); - sifive_plic_update(plic); + SiFivePLICState *s = opaque; + + sifive_plic_set_pending(s, irq, level > 0); + sifive_plic_update(s); } static void sifive_plic_realize(DeviceState *dev, Error **errp) { - SiFivePLICState *plic = SIFIVE_PLIC(dev); + SiFivePLICState *s = SIFIVE_PLIC(dev); int i; - memory_region_init_io(&plic->mmio, OBJECT(dev), &sifive_plic_ops, plic, - TYPE_SIFIVE_PLIC, plic->aperture_size); - parse_hart_config(plic); - plic->bitfield_words = (plic->num_sources + 31) >> 5; - plic->num_enables = plic->bitfield_words * plic->num_addrs; - plic->source_priority = g_new0(uint32_t, plic->num_sources); - plic->target_priority = g_new(uint32_t, plic->num_addrs); - plic->pending = g_new0(uint32_t, plic->bitfield_words); - plic->claimed = g_new0(uint32_t, plic->bitfield_words); - plic->enable = g_new0(uint32_t, plic->num_enables); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &plic->mmio); - qdev_init_gpio_in(dev, sifive_plic_irq_request, plic->num_sources); + memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_plic_ops, s, + TYPE_SIFIVE_PLIC, s->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + parse_hart_config(s); + + s->bitfield_words = (s->num_sources + 31) >> 5; + s->num_enables = s->bitfield_words * s->num_addrs; + s->source_priority = g_new0(uint32_t, s->num_sources); + s->target_priority = g_new(uint32_t, s->num_addrs); + s->pending = g_new0(uint32_t, s->bitfield_words); + s->claimed = g_new0(uint32_t, s->bitfield_words); + s->enable = g_new0(uint32_t, s->num_enables); + + qdev_init_gpio_in(dev, sifive_plic_irq_request, s->num_sources); + + s->s_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->s_external_irqs, s->num_harts); + + s->m_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->m_external_irqs, s->num_harts); /* We can't allow the supervisor to control SEIP as this would allow the * supervisor to clear a pending external interrupt which will result in * lost a interrupt in the case a PLIC is attached. The SEIP bit must be * hardware controlled when a PLIC is attached. */ - for (i = 0; i < plic->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(plic->hartid_base + i)); + for (i = 0; i < s->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { error_report("SEIP already claimed"); exit(1); @@ -493,10 +399,26 @@ static const VMStateDescription vmstate_sifive_plic = { } }; +static Property sifive_plic_properties[] = { + DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), + DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), + DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0), + DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0), + DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0), + DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0), + DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0), + DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0), + DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), + DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), + DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + static void sifive_plic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = sifive_plic_reset; device_class_set_props(dc, sifive_plic_properties); dc->realize = sifive_plic_realize; dc->vmsd = &vmstate_sifive_plic; @@ -520,6 +442,7 @@ type_init(sifive_plic_register_types) * Create PLIC device. */ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config, + uint32_t num_harts, uint32_t hartid_base, uint32_t num_sources, uint32_t num_priorities, uint32_t priority_base, uint32_t pending_base, uint32_t enable_base, @@ -527,6 +450,9 @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config, uint32_t context_stride, uint32_t aperture_size) { DeviceState *dev = qdev_new(TYPE_SIFIVE_PLIC); + int i; + SiFivePLICState *plic; + assert(enable_stride == (enable_stride & -enable_stride)); assert(context_stride == (context_stride & -context_stride)); qdev_prop_set_string(dev, "hart-config", hart_config); @@ -542,5 +468,22 @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config, qdev_prop_set_uint32(dev, "aperture-size", aperture_size); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + plic = SIFIVE_PLIC(dev); + + for (i = 0; i < plic->num_addrs; i++) { + int cpu_num = plic->addr_config[i].hartid; + CPUState *cpu = qemu_get_cpu(cpu_num); + + if (plic->addr_config[i].mode == PLICMode_M) { + qdev_connect_gpio_out(dev, num_harts - plic->hartid_base + cpu_num, + qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT)); + } + if (plic->addr_config[i].mode == PLICMode_S) { + qdev_connect_gpio_out(dev, cpu_num, + qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT)); + } + } + return dev; } diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 89cfa018f5..dc641cc604 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -185,7 +185,7 @@ static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', pq & XIVE_ESB_VAL_Q ? 'Q' : '-', - xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', + xive_source_is_asserted(xsrc, i) ? 'A' : ' ', xive_eas_is_masked(eas) ? "M" : " ", (int) xive_get_field64(EAS_END_DATA, eas->w)); @@ -480,6 +480,29 @@ static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr) return SPAPR_XIVE_BLOCK_ID; } +static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + SpaprXive *xive = SPAPR_XIVE(xrtr); + + assert(SPAPR_XIVE_BLOCK_ID == blk); + + *pq = xive_source_esb_get(&xive->source, idx); + return 0; +} + +static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + uint8_t *pq) +{ + SpaprXive *xive = SPAPR_XIVE(xrtr); + + assert(SPAPR_XIVE_BLOCK_ID == blk); + + *pq = xive_source_esb_set(&xive->source, idx, *pq); + return 0; +} + + static const VMStateDescription vmstate_spapr_xive_end = { .name = TYPE_SPAPR_XIVE "/end", .version_id = 1, @@ -788,6 +811,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_spapr_xive; xrc->get_eas = spapr_xive_get_eas; + xrc->get_pq = spapr_xive_get_pq; + xrc->set_pq = spapr_xive_set_pq; xrc->get_end = spapr_xive_get_end; xrc->write_end = spapr_xive_write_end; xrc->get_nvt = spapr_xive_get_nvt; @@ -1684,7 +1709,8 @@ static target_ulong h_int_esb(PowerPCCPU *cpu, mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, - (flags & SPAPR_XIVE_ESB_STORE))) { + (flags & SPAPR_XIVE_ESB_STORE), + MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" HWADDR_PRIx "\n", mmio_addr); return H_HARDWARE; diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index c008331160..61fe7bd2d3 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -236,11 +236,13 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) SpaprXive *xive = SPAPR_XIVE(xsrc->xive); uint64_t state = 0; + trace_kvm_xive_source_reset(srcno); + assert(xive->fd != -1); if (xive_source_irq_is_lsi(xsrc, srcno)) { state |= KVM_XIVE_LEVEL_SENSITIVE; - if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + if (xive_source_is_asserted(xsrc, srcno)) { state |= KVM_XIVE_LEVEL_ASSERTED; } } @@ -297,11 +299,9 @@ static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3; } -static void xive_esb_trigger(XiveSource *xsrc, int srcno) +static void kvmppc_xive_esb_trigger(XiveSource *xsrc, int srcno) { - uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno); - - *addr = 0x0; + xive_esb_rw(xsrc, srcno, 0, 0, true); } uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, @@ -311,8 +311,6 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, return xive_esb_rw(xsrc, srcno, offset, data, 1); } - trace_kvm_xive_source_reset(srcno); - /* * Special Load EOI handling for LSI sources. Q bit is never set * and the interrupt should be re-triggered if the level is still @@ -321,8 +319,8 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, if (xive_source_irq_is_lsi(xsrc, srcno) && offset == XIVE_ESB_LOAD_EOI) { xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); - if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { - xive_esb_trigger(xsrc, srcno); + if (xive_source_is_asserted(xsrc, srcno)) { + kvmppc_xive_esb_trigger(xsrc, srcno); } return 0; } else { @@ -359,14 +357,10 @@ void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) return; } } else { - if (val) { - xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; - } else { - xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; - } + xive_source_set_asserted(xsrc, srcno, val); } - xive_esb_trigger(xsrc, srcno); + kvmppc_xive_esb_trigger(xsrc, srcno); } /* @@ -533,7 +527,7 @@ static void kvmppc_xive_change_state_handler(void *opaque, bool running, * generate a trigger. */ if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) { - xive_esb_trigger(xsrc, i); + kvmppc_xive_esb_trigger(xsrc, i); } } diff --git a/hw/intc/trace-events b/hw/intc/trace-events index e56e7dd3b6..6fbc2045e6 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -151,8 +151,9 @@ gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d rea gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64 gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64 gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64 -gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d" -gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d" +gicv3_cpuif_virt_update(uint32_t cpuid, int idx, int hppvlpi, int grp, int prio) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d HPPVLPI %d grp %d prio %d" +gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d" +gicv3_cpuif_virt_set_maint_irq(uint32_t cpuid, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting maintenance-irq %d" # arm_gicv3_dist.c gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" @@ -169,6 +170,46 @@ gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned siz gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x interrupt %d level changed to %d" gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d" +# arm_gicv3_its.c +gicv3_its_read(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +gicv3_its_badread(uint64_t offset, unsigned size) "GICv3 ITS read: offset 0x%" PRIx64 " size %u: error" +gicv3_its_write(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +gicv3_its_badwrite(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u: error" +gicv3_its_translation_write(uint64_t offset, uint64_t data, unsigned size, uint32_t requester_id) "GICv3 ITS TRANSLATER write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u requester_id 0x%x" +gicv3_its_process_command(uint32_t rd_offset, uint8_t cmd) "GICv3 ITS: processing command at offset 0x%x: 0x%x" +gicv3_its_cmd_int(uint32_t devid, uint32_t eventid) "GICv3 ITS: command INT DeviceID 0x%x EventID 0x%x" +gicv3_its_cmd_clear(uint32_t devid, uint32_t eventid) "GICv3 ITS: command CLEAR DeviceID 0x%x EventID 0x%x" +gicv3_its_cmd_discard(uint32_t devid, uint32_t eventid) "GICv3 ITS: command DISCARD DeviceID 0x%x EventID 0x%x" +gicv3_its_cmd_sync(void) "GICv3 ITS: command SYNC" +gicv3_its_cmd_mapd(uint32_t devid, uint32_t size, uint64_t ittaddr, int valid) "GICv3 ITS: command MAPD DeviceID 0x%x Size 0x%x ITT_addr 0x%" PRIx64 " V %d" +gicv3_its_cmd_mapc(uint32_t icid, uint64_t rdbase, int valid) "GICv3 ITS: command MAPC ICID 0x%x RDbase 0x%" PRIx64 " V %d" +gicv3_its_cmd_mapi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MAPI DeviceID 0x%x EventID 0x%x ICID 0x%x" +gicv3_its_cmd_mapti(uint32_t devid, uint32_t eventid, uint32_t icid, uint32_t intid) "GICv3 ITS: command MAPTI DeviceID 0x%x EventID 0x%x ICID 0x%x pINTID 0x%x" +gicv3_its_cmd_inv(uint32_t devid, uint32_t eventid) "GICv3 ITS: command INV DeviceID 0x%x EventID 0x%x" +gicv3_its_cmd_invall(void) "GICv3 ITS: command INVALL" +gicv3_its_cmd_movall(uint64_t rd1, uint64_t rd2) "GICv3 ITS: command MOVALL RDbase1 0x%" PRIx64 " RDbase2 0x%" PRIx64 +gicv3_its_cmd_movi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MOVI DeviceID 0x%x EventID 0x%x ICID 0x%x" +gicv3_its_cmd_vmapi(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x Dbell_pINTID 0x%x" +gicv3_its_cmd_vmapti(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t vintid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x vINTID 0x%x Dbell_pINTID 0x%x" +gicv3_its_cmd_vmapp(uint32_t vpeid, uint64_t rdbase, int valid, uint64_t vptaddr, uint32_t vptsize) "GICv3 ITS: command VMAPP vPEID 0x%x RDbase 0x%" PRIx64 " V %d VPT_addr 0x%" PRIx64 " VPT_size 0x%x" +gicv3_its_cmd_vmovp(uint32_t vpeid, uint64_t rdbase) "GICv3 ITS: command VMOVP vPEID 0x%x RDbase 0x%" PRIx64 +gicv3_its_cmd_vsync(void) "GICv3 ITS: command VSYNC" +gicv3_its_cmd_vmovi(uint32_t devid, uint32_t eventid, uint32_t vpeid, int dbvalid, uint32_t doorbell) "GICv3 ITS: command VMOVI DeviceID 0x%x EventID 0x%x vPEID 0x%x D %d Dbell_pINTID 0x%x" +gicv3_its_cmd_vinvall(uint32_t vpeid) "GICv3 ITS: command VINVALL vPEID 0x%x" +gicv3_its_cmd_unknown(unsigned cmd) "GICv3 ITS: unknown command 0x%x" +gicv3_its_cte_read(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table read for ICID 0x%x: valid %d RDBase 0x%x" +gicv3_its_cte_write(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table write for ICID 0x%x: valid %d RDBase 0x%x" +gicv3_its_cte_read_fault(uint32_t icid) "GICv3 ITS: Collection Table read for ICID 0x%x: faulted" +gicv3_its_ite_read(uint64_t ittaddr, uint32_t eventid, int valid, int inttype, uint32_t intid, uint32_t icid, uint32_t vpeid, uint32_t doorbell) "GICv3 ITS: Interrupt Table read for ITTaddr 0x%" PRIx64 " EventID 0x%x: valid %d inttype %d intid 0x%x ICID 0x%x vPEID 0x%x doorbell 0x%x" +gicv3_its_ite_read_fault(uint64_t ittaddr, uint32_t eventid) "GICv3 ITS: Interrupt Table read for ITTaddr 0x%" PRIx64 " EventID 0x%x: faulted" +gicv3_its_ite_write(uint64_t ittaddr, uint32_t eventid, int valid, int inttype, uint32_t intid, uint32_t icid, uint32_t vpeid, uint32_t doorbell) "GICv3 ITS: Interrupt Table write for ITTaddr 0x%" PRIx64 " EventID 0x%x: valid %d inttype %d intid 0x%x ICID 0x%x vPEID 0x%x doorbell 0x%x" +gicv3_its_dte_read(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table read for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64 +gicv3_its_dte_write(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table write for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64 +gicv3_its_dte_read_fault(uint32_t devid) "GICv3 ITS: Device Table read for DeviceID 0x%x: faulted" +gicv3_its_vte_read(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table read for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x" +gicv3_its_vte_read_fault(uint32_t vpeid) "GICv3 ITS: vPE Table read for vPEID 0x%x: faulted" +gicv3_its_vte_write(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table write for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x" + # armv7m_nvic.c nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d" nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d" @@ -219,14 +260,14 @@ kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x" xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" -xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x0x%"PRIx64" IRQ 0x%x val=0x0x%"PRIx64 -xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x0x%"PRIx64" IRQ 0x%x val=0x0x%"PRIx64 +xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x" xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" -xive_tctx_tm_write(uint64_t offset, unsigned int size, uint64_t value) "@0x0x%"PRIx64" sz=%d val=0x%" PRIx64 -xive_tctx_tm_read(uint64_t offset, unsigned int size, uint64_t value) "@0x0x%"PRIx64" sz=%d val=0x%" PRIx64 +xive_tctx_tm_write(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64 +xive_tctx_tm_read(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64 xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" -xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x0x%"PRIx64 +xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 # pnv_xive.c pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 @@ -238,3 +279,32 @@ goldfish_pic_write(void *dev, int idx, unsigned int addr, unsigned int size, uin goldfish_pic_reset(void *dev, int idx) "pic: %p goldfish-irq.%d" goldfish_pic_realize(void *dev, int idx) "pic: %p goldfish-irq.%d" goldfish_pic_instance_init(void *dev) "pic: %p goldfish-irq" + +# sh_intc.c +sh_intc_sources(int p, int a, int c, int m, unsigned short v, const char *s1, const char *s2, const char *s3) "(%d/%d/%d/%d) interrupt source 0x%x %s%s%s" +sh_intc_pending(int p, unsigned short v) "(%d) returning interrupt source 0x%x" +sh_intc_register(const char *s, int id, unsigned short v, int c, int m) "%s %u -> 0x%04x (%d/%d)" +sh_intc_read(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " -> 0x%lx" +sh_intc_write(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " <- 0x%lx" +sh_intc_set(int id, int enable) "setting interrupt group %d to %d" + +# loongarch_ipi.c +loongarch_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 +loongarch_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 + +# loongarch_pch_pic.c +loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" +loongarch_pch_pic_low_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_low_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_high_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_high_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_readb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_writeb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 + +# loongarch_pch_msi.c +loongarch_msi_set_irq(int irq_num) "set msi irq %d" + +# loongarch_extioi.c +loongarch_extioi_setirq(int irq, int level) "set extirq irq %d level %d" +loongarch_extioi_readw(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_extioi_writew(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64 "val: 0x%" PRIx64 diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 48a835eab7..dcd021af66 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -301,23 +301,25 @@ void icp_reset(ICPState *icp) static void icp_realize(DeviceState *dev, Error **errp) { ICPState *icp = ICP(dev); + PowerPCCPU *cpu; CPUPPCState *env; Error *err = NULL; assert(icp->xics); assert(icp->cs); - env = &POWERPC_CPU(icp->cs)->env; + cpu = POWERPC_CPU(icp->cs); + env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER7: - icp->output = env->irq_inputs[POWER7_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT); break; case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */ - icp->output = env->irq_inputs[POWER9_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); break; case PPC_FLAGS_INPUT_970: - icp->output = env->irq_inputs[PPC970_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT); break; default: @@ -565,8 +567,8 @@ static void ics_reset_irq(ICSIRQState *irq) static void ics_reset(DeviceState *dev) { ICSState *ics = ICS(dev); + g_autofree uint8_t *flags = g_malloc(ics->nr_irqs); int i; - uint8_t flags[ics->nr_irqs]; for (i = 0; i < ics->nr_irqs; i++) { flags[i] = ics->irqs[i].flags; @@ -604,7 +606,7 @@ static void ics_realize(DeviceState *dev, Error **errp) error_setg(errp, "Number of interrupts needs to be greater 0"); return; } - ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); + ics->irqs = g_new0(ICSIRQState, ics->nr_irqs); qemu_register_reset(ics_reset_handler, ics); } diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index f5bfc501bc..9719d98a17 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -27,7 +27,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "trace.h" #include "sysemu/kvm.h" #include "hw/ppc/spapr.h" diff --git a/hw/intc/xive.c b/hw/intc/xive.c index eeb4e62ba9..a986b96843 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -27,17 +27,6 @@ * XIVE Thread Interrupt Management context */ -/* - * Convert a priority number to an Interrupt Pending Buffer (IPB) - * register, which indicates a pending interrupt at the priority - * corresponding to the bit number - */ -static uint8_t priority_to_ipb(uint8_t priority) -{ - return priority > XIVE_PRIORITY_MAX ? - 0 : 1 << (XIVE_PRIORITY_MAX - priority); -} - /* * Convert an Interrupt Pending Buffer (IPB) register to a Pending * Interrupt Priority Register (PIPR), which contains the priority of @@ -89,7 +78,7 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) regs[TM_CPPR] = cppr; /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~priority_to_ipb(cppr); + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); /* Drop Exception bit */ @@ -125,6 +114,17 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) } } +void xive_tctx_reset_os_signal(XiveTCTX *tctx) +{ + /* + * Lower the External interrupt. Used when pulling an OS + * context. It is necessary to avoid catching it in the hypervisor + * context. It should be raised again when re-pushing the OS + * context. + */ + qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS)); +} + static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { uint8_t *regs = &tctx->regs[ring]; @@ -152,11 +152,6 @@ void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) xive_tctx_notify(tctx, ring); } -static inline uint32_t xive_tctx_word2(uint8_t *ring) -{ - return *((uint32_t *) &ring[TM_WORD2]); -} - /* * XIVE Thread Interrupt Management Area (TIMA) */ @@ -353,7 +348,7 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_ipb_update(tctx, TM_QW1_OS, priority_to_ipb(value & 0xff)); + xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -404,6 +399,8 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, /* Invalidate CAM line */ qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); xive_tctx_set_os_cam(tctx, qw1w2_new); + + xive_tctx_reset_os_signal(tctx); return qw1w2; } @@ -429,10 +426,16 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, /* Reset the NVT value */ nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); - - /* Merge in current context */ - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); } + /* + * Always call xive_tctx_ipb_update(). Even if there were no + * escalation triggered, there could be a pending interrupt which + * was saved when the context was pulled and that we need to take + * into account by recalculating the PIPR (which is not + * saved/restored). + * It will also raise the External interrupt signal if needed. + */ + xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); } /* @@ -692,8 +695,8 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER9: - tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; - tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; + tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT); + tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); break; default: @@ -816,7 +819,7 @@ void xive_tctx_destroy(XiveTCTX *tctx) * XIVE ESB helpers */ -static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) +uint8_t xive_esb_set(uint8_t *pq, uint8_t value) { uint8_t old_pq = *pq & 0x3; @@ -826,7 +829,7 @@ static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) return old_pq; } -static bool xive_esb_trigger(uint8_t *pq) +bool xive_esb_trigger(uint8_t *pq) { uint8_t old_pq = *pq & 0x3; @@ -846,7 +849,7 @@ static bool xive_esb_trigger(uint8_t *pq) } } -static bool xive_esb_eoi(uint8_t *pq) +bool xive_esb_eoi(uint8_t *pq) { uint8_t old_pq = *pq & 0x3; @@ -891,7 +894,7 @@ static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) { uint8_t old_pq = xive_source_esb_get(xsrc, srcno); - xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; + xive_source_set_asserted(xsrc, srcno, true); switch (old_pq) { case XIVE_ESB_RESET: @@ -902,6 +905,16 @@ static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) } } +/* + * Sources can be configured with PQ offloading in which case the check + * on the PQ state bits of MSIs is disabled + */ +static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno) +{ + return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) && + !xive_source_irq_is_lsi(xsrc, srcno); +} + /* * Returns whether the event notification should be forwarded. */ @@ -911,6 +924,10 @@ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) assert(srcno < xsrc->nr_irqs); + if (xive_source_esb_disabled(xsrc, srcno)) { + return true; + } + ret = xive_esb_trigger(&xsrc->status[srcno]); if (xive_source_irq_is_lsi(xsrc, srcno) && @@ -931,6 +948,11 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) assert(srcno < xsrc->nr_irqs); + if (xive_source_esb_disabled(xsrc, srcno)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno); + return false; + } + ret = xive_esb_eoi(&xsrc->status[srcno]); /* @@ -939,7 +961,7 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) * notification */ if (xive_source_irq_is_lsi(xsrc, srcno) && - xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + xive_source_is_asserted(xsrc, srcno)) { ret = xive_source_lsi_trigger(xsrc, srcno); } @@ -952,9 +974,10 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) static void xive_source_notify(XiveSource *xsrc, int srcno) { XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); + bool pq_checked = !xive_source_esb_disabled(xsrc, srcno); if (xnc->notify) { - xnc->notify(xsrc->xive, srcno); + xnc->notify(xsrc->xive, srcno, pq_checked); } } @@ -1077,6 +1100,15 @@ static void xive_source_esb_write(void *opaque, hwaddr addr, notify = xive_source_esb_eoi(xsrc, srcno); break; + /* + * This is an internal offset used to inject triggers when the PQ + * state bits are not controlled locally. Such as for LSIs when + * under ABT mode. + */ + case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: + notify = true; + break; + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: @@ -1120,7 +1152,7 @@ void xive_source_set_irq(void *opaque, int srcno, int val) if (val) { notify = xive_source_lsi_trigger(xsrc, srcno); } else { - xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; + xive_source_set_asserted(xsrc, srcno, false); } } else { if (val) { @@ -1149,7 +1181,7 @@ void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', pq & XIVE_ESB_VAL_Q ? 'Q' : '-', - xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); + xive_source_is_asserted(xsrc, i) ? 'A' : ' '); } } @@ -1262,8 +1294,8 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) uint64_t qaddr = qaddr_base + (qindex << 2); uint32_t qdata = -1; - if (dma_memory_read(&address_space_memory, qaddr, &qdata, - sizeof(qdata))) { + if (dma_memory_read(&address_space_memory, qaddr, + &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" HWADDR_PRIx "\n", qaddr); return; @@ -1327,7 +1359,8 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data) uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); uint32_t qentries = 1 << (qsize + 10); - if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { + if (dma_memory_write(&address_space_memory, qaddr, + &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" HWADDR_PRIx "\n", qaddr); return; @@ -1376,6 +1409,24 @@ int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); } +static +int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); +} + +static +int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); +} + int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end) { @@ -1514,10 +1565,10 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, * * The parameters represent what is sent on the PowerBus */ -static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv) +bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; @@ -1535,7 +1586,8 @@ static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, /* handle CPU exception delivery */ if (count) { trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); - xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority)); + xive_tctx_ipb_update(match.tctx, match.ring, + xive_priority_to_ipb(priority)); } return !!count; @@ -1682,7 +1734,8 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * use. The presenter will resend the interrupt when the vCPU * is dispatched again on a HW thread. */ - ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority); + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | + xive_priority_to_ipb(priority); nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); @@ -1725,7 +1778,7 @@ do_escalation: xive_get_field32(END_W5_ESC_END_DATA, end.w5)); } -void xive_router_notify(XiveNotifier *xn, uint32_t lisn) +void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) { XiveRouter *xrtr = XIVE_ROUTER(xn); uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); @@ -1738,11 +1791,27 @@ void xive_router_notify(XiveNotifier *xn, uint32_t lisn) return; } - /* - * The IVRE checks the State Bit Cache at this point. We skip the - * SBC lookup because the state bits of the sources are modeled - * internally in QEMU. - */ + if (!pq_checked) { + bool notify; + uint8_t pq; + + /* PQ cache lookup */ + if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { + /* Set FIR */ + g_assert_not_reached(); + } + + notify = xive_esb_trigger(&pq); + + if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { + /* Set FIR */ + g_assert_not_reached(); + } + + if (!notify) { + return; + } + } if (!xive_eas_is_valid(&eas)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c new file mode 100644 index 0000000000..4d9ff41956 --- /dev/null +++ b/hw/intc/xive2.c @@ -0,0 +1,1023 @@ +/* + * QEMU PowerPC XIVE2 interrupt controller model (POWER10) + * + * Copyright (c) 2019-2022, IBM Corporation.. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "hw/qdev-properties.h" +#include "monitor/monitor.h" +#include "hw/ppc/xive.h" +#include "hw/ppc/xive2.h" +#include "hw/ppc/xive2_regs.h" + +uint32_t xive2_router_get_config(Xive2Router *xrtr) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_config(xrtr); +} + +void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon) +{ + if (!xive2_eas_is_valid(eas)) { + return; + } + + monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", + lisn, xive2_eas_is_masked(eas) ? "M" : " ", + (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); +} + +void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, + Monitor *mon) +{ + uint64_t qaddr_base = xive2_end_qaddr(end); + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); + uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); + uint32_t qentries = 1 << (qsize + 10); + int i; + + /* + * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window + */ + monitor_printf(mon, " [ "); + qindex = (qindex - (width - 1)) & (qentries - 1); + for (i = 0; i < width; i++) { + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = -1; + + if (dma_memory_read(&address_space_memory, qaddr, &qdata, + sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", + be32_to_cpu(qdata)); + qindex = (qindex + 1) & (qentries - 1); + } + monitor_printf(mon, "]"); +} + +void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon) +{ + uint64_t qaddr_base = xive2_end_qaddr(end); + uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); + uint32_t qentries = 1 << (qsize + 10); + + uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); + uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); + uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); + uint8_t pq; + + if (!xive2_end_is_valid(end)) { + return; + } + + pq = xive_get_field32(END2_W1_ESn, end->w1); + + monitor_printf(mon, + " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x", + end_idx, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive2_end_is_valid(end) ? 'v' : '-', + xive2_end_is_enqueue(end) ? 'q' : '-', + xive2_end_is_notify(end) ? 'n' : '-', + xive2_end_is_backlog(end) ? 'b' : '-', + xive2_end_is_escalate(end) ? 'e' : '-', + xive2_end_is_escalate_end(end) ? 'N' : '-', + xive2_end_is_uncond_escalation(end) ? 'u' : '-', + xive2_end_is_silent_escalation(end) ? 's' : '-', + xive2_end_is_firmware1(end) ? 'f' : '-', + xive2_end_is_firmware2(end) ? 'F' : '-', + priority, nvp_blk, nvp_idx); + + if (qaddr_base) { + monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d", + qaddr_base, qindex, qentries, qgen); + xive2_end_queue_pic_print_info(end, 6, mon); + } + monitor_printf(mon, "\n"); +} + +void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, + Monitor *mon) +{ + Xive2Eas *eas = (Xive2Eas *) &end->w4; + uint8_t pq; + + if (!xive2_end_is_escalate(end)) { + return; + } + + pq = xive_get_field32(END2_W1_ESe, end->w1); + + monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", + end_idx, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive2_eas_is_valid(eas) ? 'v' : ' ', + xive2_eas_is_masked(eas) ? 'M' : ' ', + (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); +} + +static void xive2_end_enqueue(Xive2End *end, uint32_t data) +{ + uint64_t qaddr_base = xive2_end_qaddr(end); + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); + uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); + + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); + uint32_t qentries = 1 << (qsize + 10); + + if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), + MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + + qindex = (qindex + 1) & (qentries - 1); + if (qindex == 0) { + qgen ^= 1; + end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); + + /* TODO(PowerNV): reset GF bit on a cache watch operation */ + end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); + } + end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); +} + +/* + * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode + * + * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit + * + * - if a context is enabled with the H bit set, the VP context + * information is retrieved from the NVP structure (“check out”) + * and stored back on a context pull (“check in”), the SW receives + * the same context pull information as on P9 + * + * - the H bit cannot be changed while the V bit is set, i.e. a + * context cannot be set up in the TIMA and then be “pushed” into + * the NVP by changing the H bit while the context is enabled + */ + +static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t nvp_blk, uint32_t nvp_idx) +{ + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + Xive2Nvp nvp; + uint8_t *regs = &tctx->regs[TM_QW1_OS]; + + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_hw(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_co(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n", + nvp_blk, nvp_idx); + return; + } + + if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) && + xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: NVP %x/%x invalid checkout Thread %x\n", + nvp_blk, nvp_idx, pir); + return; + } + + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); + nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); + nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); + + nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); + /* NVP2_W1_CO_THRID_VALID only set once */ + nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF); + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); +} + +static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, + uint32_t *nvp_idx, bool *vo, bool *ho) +{ + *nvp_blk = xive2_nvp_blk(cam); + *nvp_idx = xive2_nvp_idx(cam); + *vo = !!(cam & TM2_QW1W2_VO); + *ho = !!(cam & TM2_QW1W2_HO); +} + +uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t qw1w2_new; + uint32_t cam = be32_to_cpu(qw1w2); + uint8_t nvp_blk; + uint32_t nvp_idx; + bool vo; + bool do_save; + + xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); + + if (!vo) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", + nvp_blk, nvp_idx); + } + + /* Invalidate CAM line */ + qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); + + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { + xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); + } + + xive_tctx_reset_os_signal(tctx); + return qw1w2; +} + +static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp) +{ + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t cppr; + + if (!xive2_nvp_is_hw(nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", + nvp_blk, nvp_idx); + return 0; + } + + cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2); + nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); + + tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; + /* we don't model LSMFB */ + + nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); + nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); + nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir); + + /* + * Checkout privilege: 0:OS, 1:Pool, 2:Hard + * + * TODO: we only support OS push/pull + */ + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); + + /* return restored CPPR to generate a CPU exception if needed */ + return cppr; +} + +static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t nvp_blk, uint32_t nvp_idx, + bool do_restore) +{ + Xive2Nvp nvp; + uint8_t ipb; + + /* + * Grab the associated thread interrupt context registers in the + * associated NVP + */ + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + /* Automatically restore thread context registers */ + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && + do_restore) { + xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); + } + + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); + if (ipb) { + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); + } + /* + * Always call xive_tctx_ipb_update(). Even if there were no + * escalation triggered, there could be a pending interrupt which + * was saved when the context was pulled and that we need to take + * into account by recalculating the PIPR (which is not + * saved/restored). + * It will also raise the External interrupt signal if needed. + */ + xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); +} + +/* + * Updating the OS CAM line can trigger a resend of interrupt + */ +void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + uint32_t cam = value; + uint32_t qw1w2 = cpu_to_be32(cam); + uint8_t nvp_blk; + uint32_t nvp_idx; + bool vo; + bool do_restore; + + xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); + + /* First update the thead context */ + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + + /* Check the interrupt pending bits */ + if (vo) { + xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, + do_restore); + } +} + +/* + * XIVE Router (aka. Virtualization Controller or IVRE) + */ + +int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + Xive2Eas *eas) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); +} + +static +int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); +} + +static +int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); +} + +int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_end(xrtr, end_blk, end_idx, end); +} + +int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end, uint8_t word_number) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); +} + +int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp); +} + +int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp, uint8_t word_number) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); +} + +static int xive2_router_get_block_id(Xive2Router *xrtr) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_block_id(xrtr); +} + +/* + * Encode the HW CAM line with 7bit or 8bit thread id. The thread id + * width and block id width is configurable at the IC level. + * + * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) + * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) + */ +static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive2_router_get_block_id(xrtr); + uint8_t tid_shift = + xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; + uint8_t tid_mask = (1 << tid_shift) - 1; + + return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); +} + +/* + * The thread context register words are in big-endian format. + */ +int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv) +{ + uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); + uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); + + /* + * TODO (PowerNV): ignore mode. The low order bits of the NVT + * identifier are ignored in the "CAM" match. + */ + + if (format == 0) { + if (cam_ignore == true) { + /* + * F=0 & i=1: Logical server notification (bits ignored at + * the end of the NVT identifier) + */ + qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", + nvt_blk, nvt_idx); + return -1; + } + + /* F=0 & i=0: Specific NVT notification */ + + /* PHYS ring */ + if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && + cam == xive2_tctx_hw_cam_line(xptr, tctx)) { + return TM_QW3_HV_PHYS; + } + + /* HV POOL ring */ + if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && + cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { + return TM_QW2_HV_POOL; + } + + /* OS ring */ + if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && + cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { + return TM_QW1_OS; + } + } else { + /* F=1 : User level Event-Based Branch (EBB) notification */ + + /* USER ring */ + if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && + (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && + (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) && + (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) { + return TM_QW0_USER; + } + } + return -1; +} + +static void xive2_router_realize(DeviceState *dev, Error **errp) +{ + Xive2Router *xrtr = XIVE2_ROUTER(dev); + + assert(xrtr->xfb); +} + +/* + * Notification using the END ESe/ESn bit (Event State Buffer for + * escalation and notification). Profide futher coalescing in the + * Router. + */ +static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, + uint32_t end_idx, Xive2End *end, + uint32_t end_esmask) +{ + uint8_t pq = xive_get_field32(end_esmask, end->w1); + bool notify = xive_esb_trigger(&pq); + + if (pq != xive_get_field32(end_esmask, end->w1)) { + end->w1 = xive_set_field32(end_esmask, end->w1, pq); + xive2_router_write_end(xrtr, end_blk, end_idx, end, 1); + } + + /* ESe/n[Q]=1 : end of notification */ + return notify; +} + +/* + * An END trigger can come from an event trigger (IPI or HW) or from + * another chip. We don't model the PowerBus but the END trigger + * message has the same parameters than in the function below. + */ +static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data) +{ + Xive2End end; + uint8_t priority; + uint8_t format; + bool found; + Xive2Nvp nvp; + uint8_t nvp_blk; + uint32_t nvp_idx; + + /* END cache lookup */ + if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return; + } + + if (!xive2_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return; + } + + if (xive2_end_is_enqueue(&end)) { + xive2_end_enqueue(&end, end_data); + /* Enqueuing event data modifies the EQ toggle and index */ + xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* + * When the END is silent, we skip the notification part. + */ + if (xive2_end_is_silent_escalation(&end)) { + goto do_escalation; + } + + /* + * The W7 format depends on the F bit in W6. It defines the type + * of the notification : + * + * F=0 : single or multiple NVP notification + * F=1 : User level Event-Based Branch (EBB) notification, no + * priority + */ + format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6); + priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7); + + /* The END is masked */ + if (format == 0 && priority == 0xff) { + return; + } + + /* + * Check the END ESn (Event State Buffer for notification) for + * even futher coalescing in the Router + */ + if (!xive2_end_is_notify(&end)) { + /* ESn[Q]=1 : end of notification */ + if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, + &end, END2_W1_ESn)) { + return; + } + } + + /* + * Follows IVPE notification + */ + nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); + nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); + + /* NVP cache lookup */ + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", + nvp_blk, nvp_idx); + return; + } + + found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, + xive_get_field32(END2_W6_IGNORE, end.w7), + priority, + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); + + /* TODO: Auto EOI. */ + + if (found) { + return; + } + + /* + * If no matching NVP is dispatched on a HW thread : + * - specific VP: update the NVP structure if backlog is activated + * - logical server : forward request to IVPE (not supported) + */ + if (xive2_end_is_backlog(&end)) { + uint8_t ipb; + + if (format == 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: END %x/%x invalid config: F1 & backlog\n", + end_blk, end_idx); + return; + } + + /* + * Record the IPB in the associated NVP structure for later + * use. The presenter will resend the interrupt when the vCPU + * is dispatched again on a HW thread. + */ + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | + xive_priority_to_ipb(priority); + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); + + /* + * On HW, follows a "Broadcast Backlog" to IVPEs + */ + } + +do_escalation: + /* + * If activated, escalate notification using the ESe PQ bits and + * the EAS in w4-5 + */ + if (!xive2_end_is_escalate(&end)) { + return; + } + + /* + * Check the END ESe (Event State Buffer for escalation) for even + * futher coalescing in the Router + */ + if (!xive2_end_is_uncond_escalation(&end)) { + /* ESe[Q]=1 : end of escalation notification */ + if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, + &end, END2_W1_ESe)) { + return; + } + } + + /* + * The END trigger becomes an Escalation trigger + */ + xive2_router_end_notify(xrtr, + xive_get_field32(END2_W4_END_BLOCK, end.w4), + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), + xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); +} + +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xn); + uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); + uint32_t eas_idx = XIVE_EAS_INDEX(lisn); + Xive2Eas eas; + + /* EAS cache lookup */ + if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); + return; + } + + if (!pq_checked) { + bool notify; + uint8_t pq; + + /* PQ cache lookup */ + if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { + /* Set FIR */ + g_assert_not_reached(); + } + + notify = xive_esb_trigger(&pq); + + if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { + /* Set FIR */ + g_assert_not_reached(); + } + + if (!notify) { + return; + } + } + + if (!xive2_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn); + return; + } + + if (xive2_eas_is_masked(&eas)) { + /* Notification completed */ + return; + } + + /* + * The event trigger becomes an END trigger + */ + xive2_router_end_notify(xrtr, + xive_get_field64(EAS2_END_BLOCK, eas.w), + xive_get_field64(EAS2_END_INDEX, eas.w), + xive_get_field64(EAS2_END_DATA, eas.w)); +} + +static Property xive2_router_properties[] = { + DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, + TYPE_XIVE_FABRIC, XiveFabric *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive2_router_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + + dc->desc = "XIVE2 Router Engine"; + device_class_set_props(dc, xive2_router_properties); + /* Parent is SysBusDeviceClass. No need to call its realize hook */ + dc->realize = xive2_router_realize; + xnc->notify = xive2_router_notify; +} + +static const TypeInfo xive2_router_info = { + .name = TYPE_XIVE2_ROUTER, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .instance_size = sizeof(Xive2Router), + .class_size = sizeof(Xive2RouterClass), + .class_init = xive2_router_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { TYPE_XIVE_PRESENTER }, + { } + } +}; + +static inline bool addr_is_even(hwaddr addr, uint32_t shift) +{ + return !((addr >> shift) & 1); +} + +static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size) +{ + Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint8_t end_blk; + uint32_t end_idx; + Xive2End end; + uint32_t end_esmask; + uint8_t pq; + uint64_t ret; + + /* + * The block id should be deduced from the load address on the END + * ESB MMIO but our model only supports a single block per XIVE chip. + */ + end_blk = xive2_router_get_block_id(xsrc->xrtr); + end_idx = addr >> (xsrc->esb_shift + 1); + + if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return -1; + } + + if (!xive2_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return -1; + } + + end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : + END2_W1_ESe; + pq = xive_get_field32(end_esmask, end.w1); + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_esb_eoi(&pq); + + /* Forward the source event notification for routing ?? */ + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = pq; + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_esb_set(&pq, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", + offset); + return -1; + } + + if (pq != xive_get_field32(end_esmask, end.w1)) { + end.w1 = xive_set_field32(end_esmask, end.w1, pq); + xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); + } + + return ret; +} + +static void xive2_end_source_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint8_t end_blk; + uint32_t end_idx; + Xive2End end; + uint32_t end_esmask; + uint8_t pq; + bool notify = false; + + /* + * The block id should be deduced from the load address on the END + * ESB MMIO but our model only supports a single block per XIVE chip. + */ + end_blk = xive2_router_get_block_id(xsrc->xrtr); + end_idx = addr >> (xsrc->esb_shift + 1); + + if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return; + } + + if (!xive2_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return; + } + + end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : + END2_W1_ESe; + pq = xive_get_field32(end_esmask, end.w1); + + switch (offset) { + case 0 ... 0x3FF: + notify = xive_esb_trigger(&pq); + break; + + case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: + /* TODO: can we check StoreEOI availability from the router ? */ + notify = xive_esb_eoi(&pq); + break; + + case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: + if (end_esmask == END2_W1_ESe) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: END %x/%x can not EQ inject on ESe\n", + end_blk, end_idx); + return; + } + notify = true; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n", + offset); + return; + } + + if (pq != xive_get_field32(end_esmask, end.w1)) { + end.w1 = xive_set_field32(end_esmask, end.w1, pq); + xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); + } + + /* TODO: Forward the source event notification for routing */ + if (notify) { + ; + } +} + +static const MemoryRegionOps xive2_end_source_ops = { + .read = xive2_end_source_read, + .write = xive2_end_source_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive2_end_source_realize(DeviceState *dev, Error **errp) +{ + Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev); + + assert(xsrc->xrtr); + + if (!xsrc->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_64K) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + /* + * Each END is assigned an even/odd pair of MMIO pages, the even page + * manages the ESn field while the odd page manages the ESe field. + */ + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive2_end_source_ops, xsrc, "xive.end", + (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); +} + +static Property xive2_end_source_properties[] = { + DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), + DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), + DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, + Xive2Router *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive2_end_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE END Source"; + device_class_set_props(dc, xive2_end_source_properties); + dc->realize = xive2_end_source_realize; + dc->user_creatable = false; +} + +static const TypeInfo xive2_end_source_info = { + .name = TYPE_XIVE2_END_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(Xive2EndSource), + .class_init = xive2_end_source_class_init, +}; + +static void xive2_register_types(void) +{ + type_register_static(&xive2_router_info); + type_register_static(&xive2_end_source_info); +} + +type_init(xive2_register_types) diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c index f19ecaeb1c..ae20f36da6 100644 --- a/hw/ipack/ipack.c +++ b/hw/ipack/ipack.c @@ -30,12 +30,12 @@ IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot) return NULL; } -void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size, - DeviceState *parent, - const char *name, uint8_t n_slots, - qemu_irq_handler handler) +void ipack_bus_init(IPackBus *bus, size_t bus_size, + DeviceState *parent, + uint8_t n_slots, + qemu_irq_handler handler) { - qbus_create_inplace(bus, bus_size, TYPE_IPACK_BUS, parent, name); + qbus_init(bus, bus_size, TYPE_IPACK_BUS, parent, NULL); bus->n_slots = n_slots; bus->set_irq = handler; } diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c index d107e134c4..1f764fc85b 100644 --- a/hw/ipack/tpci200.c +++ b/hw/ipack/tpci200.c @@ -611,8 +611,8 @@ static void tpci200_realize(PCIDevice *pci_dev, Error **errp) pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las2); pci_register_bar(&s->dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las3); - ipack_bus_new_inplace(&s->bus, sizeof(s->bus), DEVICE(pci_dev), NULL, - N_MODULES, tpci200_set_irq); + ipack_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), + N_MODULES, tpci200_set_irq); } static const VMStateDescription vmstate_tpci200 = { diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c index 8d35c9fdd6..bbb07b151e 100644 --- a/hw/ipmi/ipmi.c +++ b/hw/ipmi/ipmi.c @@ -85,7 +85,7 @@ static void ipmi_interface_class_init(ObjectClass *class, void *data) ik->do_hw_op = ipmi_do_hw_op; } -static TypeInfo ipmi_interface_type_info = { +static const TypeInfo ipmi_interface_type_info = { .name = TYPE_IPMI_INTERFACE, .parent = TYPE_INTERFACE, .class_size = sizeof(IPMIInterfaceClass), @@ -120,7 +120,7 @@ static void bmc_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, ipmi_bmc_properties); } -static TypeInfo ipmi_bmc_type_info = { +static const TypeInfo ipmi_bmc_type_info = { .name = TYPE_IPMI_BMC, .parent = TYPE_DEVICE, .instance_size = sizeof(IPMIBmc), diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c index 02625eb94e..a83e7243d6 100644 --- a/hw/ipmi/isa_ipmi_bt.c +++ b/hw/ipmi/isa_ipmi_bt.c @@ -31,6 +31,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "hw/acpi/ipmi.h" #define TYPE_ISA_IPMI_BT "isa-ipmi-bt" OBJECT_DECLARE_SIMPLE_TYPE(ISAIPMIBTDevice, ISA_IPMI_BT) @@ -92,7 +93,7 @@ static void isa_ipmi_bt_realize(DeviceState *dev, Error **errp) } if (iib->isairq > 0) { - isa_init_irq(isadev, &iib->irq, iib->isairq); + iib->irq = isa_get_irq(isadev, iib->isairq); iib->bt.use_irq = 1; iib->bt.raise_irq = isa_ipmi_bt_raise_irq; iib->bt.lower_irq = isa_ipmi_bt_lower_irq; @@ -144,6 +145,7 @@ static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc); dc->realize = isa_ipmi_bt_realize; device_class_set_props(dc, ipmi_isa_properties); @@ -151,6 +153,7 @@ static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data) iic->get_backend_data = isa_ipmi_bt_get_backend_data; ipmi_bt_class_init(iic); iic->get_fwinfo = isa_ipmi_bt_get_fwinfo; + adevc->build_dev_aml = build_ipmi_dev_aml; } static const TypeInfo isa_ipmi_bt_info = { @@ -161,6 +164,7 @@ static const TypeInfo isa_ipmi_bt_info = { .class_init = isa_ipmi_bt_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, + { TYPE_ACPI_DEV_AML_IF }, { } } }; diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c index 3b23ad08b3..b2ed70b9da 100644 --- a/hw/ipmi/isa_ipmi_kcs.c +++ b/hw/ipmi/isa_ipmi_kcs.c @@ -31,6 +31,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "hw/acpi/ipmi.h" #define TYPE_ISA_IPMI_KCS "isa-ipmi-kcs" OBJECT_DECLARE_SIMPLE_TYPE(ISAIPMIKCSDevice, ISA_IPMI_KCS) @@ -91,7 +92,7 @@ static void ipmi_isa_realize(DeviceState *dev, Error **errp) } if (iik->isairq > 0) { - isa_init_irq(isadev, &iik->irq, iik->isairq); + iik->irq = isa_get_irq(isadev, iik->isairq); iik->kcs.use_irq = 1; iik->kcs.raise_irq = isa_ipmi_kcs_raise_irq; iik->kcs.lower_irq = isa_ipmi_kcs_lower_irq; @@ -151,6 +152,7 @@ static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc); dc->realize = ipmi_isa_realize; device_class_set_props(dc, ipmi_isa_properties); @@ -158,6 +160,7 @@ static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data) iic->get_backend_data = isa_ipmi_kcs_get_backend_data; ipmi_kcs_class_init(iic); iic->get_fwinfo = isa_ipmi_kcs_get_fwinfo; + adevc->build_dev_aml = build_ipmi_dev_aml; } static const TypeInfo isa_ipmi_kcs_info = { @@ -168,6 +171,7 @@ static const TypeInfo isa_ipmi_kcs_info = { .class_init = isa_ipmi_kcs_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, + { TYPE_ACPI_DEV_AML_IF }, { } } }; diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c index 1fdf0a66b6..d0991ab7f9 100644 --- a/hw/ipmi/smbus_ipmi.c +++ b/hw/ipmi/smbus_ipmi.c @@ -28,6 +28,7 @@ #include "qemu/error-report.h" #include "hw/ipmi/ipmi.h" #include "qom/object.h" +#include "hw/acpi/ipmi.h" #define TYPE_SMBUS_IPMI "smbus-ipmi" OBJECT_DECLARE_SIMPLE_TYPE(SMBusIPMIDevice, SMBUS_IPMI) @@ -280,7 +281,9 @@ static int ipmi_write_data(SMBusDevice *dev, uint8_t *buf, uint8_t len) */ send = true; } - memcpy(sid->inmsg + sid->inlen, buf, len); + if (len > 0) { + memcpy(sid->inmsg + sid->inlen, buf, len); + } sid->inlen += len; break; } @@ -353,6 +356,7 @@ static void smbus_ipmi_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); SMBusDeviceClass *sc = SMBUS_DEVICE_CLASS(oc); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc); sc->receive_byte = ipmi_receive_byte; sc->write_data = ipmi_write_data; @@ -363,6 +367,7 @@ static void smbus_ipmi_class_init(ObjectClass *oc, void *data) iic->handle_if_event = smbus_ipmi_handle_event; iic->set_irq_enable = smbus_ipmi_set_irq_enable; iic->get_fwinfo = smbus_ipmi_get_fwinfo; + adevc->build_dev_aml = build_ipmi_dev_aml; } static const TypeInfo smbus_ipmi_info = { @@ -373,6 +378,7 @@ static const TypeInfo smbus_ipmi_info = { .class_init = smbus_ipmi_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, + { TYPE_ACPI_DEV_AML_IF }, { } } }; diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig index d42143a991..18b5c6bf3f 100644 --- a/hw/isa/Kconfig +++ b/hw/isa/Kconfig @@ -33,13 +33,20 @@ config PC87312 config PIIX3 bool + select I8257 select ISA_BUS config PIIX4 bool # For historical reasons, SuperIO devices are created in the board # for PIIX4. + select ACPI_PIIX4 + select I8254 + select I8257 + select I8259 + select IDE_PIIX select ISA_BUS + select MC146818RTC select USB_UHCI config VT82C686 @@ -53,6 +60,7 @@ config VT82C686 select I8254 select I8257 select I8259 + select IDE_VIA select MC146818RTC select PARALLEL @@ -67,6 +75,7 @@ config LPC_ICH9 bool # For historical reasons, SuperIO devices are created in the board # for ICH9. + select I8257 select ISA_BUS select ACPI_SMBUS select ACPI_X86_ICH diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c index f4576b8e09..f0c035c2f5 100644 --- a/hw/isa/isa-bus.c +++ b/hw/isa/isa-bus.c @@ -21,21 +21,19 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qapi/error.h" -#include "monitor/monitor.h" #include "hw/sysbus.h" #include "sysemu/sysemu.h" #include "hw/isa/isa.h" +#include "hw/acpi/acpi_aml_interface.h" static ISABus *isabus; -static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent); static char *isabus_get_fw_dev_path(DeviceState *dev); static void isa_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); - k->print_dev = isabus_dev_print; k->get_fw_dev_path = isabus_get_fw_dev_path; } @@ -64,7 +62,7 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space, sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); } - isabus = ISA_BUS(qbus_create(TYPE_ISA_BUS, dev, NULL)); + isabus = ISA_BUS(qbus_new(TYPE_ISA_BUS, dev, NULL)); isabus->address_space = address_space; isabus->address_space_io = address_space_io; return isabus; @@ -88,19 +86,9 @@ qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq) return isabus->irqs[isairq]; } -void isa_init_irq(ISADevice *dev, qemu_irq *p, unsigned isairq) -{ - assert(dev->nirqs < ARRAY_SIZE(dev->isairq)); - assert(isairq < ISA_NUM_IRQS); - dev->isairq[dev->nirqs] = isairq; - *p = isa_get_irq(dev, isairq); - dev->nirqs++; -} - void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq) { - qemu_irq irq; - isa_init_irq(isadev, &irq, isairq); + qemu_irq irq = isa_get_irq(isadev, isairq); qdev_connect_gpio_out(DEVICE(isadev), gpioirq, irq); } @@ -161,14 +149,6 @@ int isa_register_portio_list(ISADevice *dev, return 0; } -static void isa_device_init(Object *obj) -{ - ISADevice *dev = ISA_DEVICE(obj); - - dev->isairq[0] = -1; - dev->isairq[1] = -1; -} - ISADevice *isa_new(const char *name) { return ISA_DEVICE(qdev_new(name)); @@ -195,6 +175,7 @@ bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp) ISADevice *isa_vga_init(ISABus *bus) { + vga_interface_created = true; switch (vga_interface_type) { case VGA_CIRRUS: return isa_create_simple(bus, "isa-cirrus-vga"); @@ -218,28 +199,9 @@ ISADevice *isa_vga_init(ISABus *bus) void isa_build_aml(ISABus *bus, Aml *scope) { BusChild *kid; - ISADevice *dev; - ISADeviceClass *dc; QTAILQ_FOREACH(kid, &bus->parent_obj.children, sibling) { - dev = ISA_DEVICE(kid->child); - dc = ISA_DEVICE_GET_CLASS(dev); - if (dc->build_aml) { - dc->build_aml(dev, scope); - } - } -} - -static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent) -{ - ISADevice *d = ISA_DEVICE(dev); - - if (d->isairq[1] != -1) { - monitor_printf(mon, "%*sisa irqs %d,%d\n", indent, "", - d->isairq[0], d->isairq[1]); - } else if (d->isairq[0] != -1) { - monitor_printf(mon, "%*sisa irq %d\n", indent, "", - d->isairq[0]); + call_dev_aml_func(DEVICE(kid->child), scope); } } @@ -268,7 +230,6 @@ static const TypeInfo isa_device_type_info = { .name = TYPE_ISA_DEVICE, .parent = TYPE_DEVICE, .instance_size = sizeof(ISADevice), - .instance_init = isa_device_init, .abstract = true, .class_size = sizeof(ISADeviceClass), .class_init = isa_device_class_init, diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 5f9de0239c..6c44cc9767 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -31,8 +31,10 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" +#include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/range.h" +#include "hw/dma/i8257.h" #include "hw/isa/isa.h" #include "migration/vmstate.h" #include "hw/irq.h" @@ -49,6 +51,7 @@ #include "hw/core/cpu.h" #include "hw/nvram/fw_cfg.h" #include "qemu/cutils.h" +#include "hw/acpi/acpi_aml_interface.h" /*****************************************************************************/ /* ICH9 LPC PCI to ISA bridge */ @@ -676,6 +679,18 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) DeviceState *dev = DEVICE(d); ISABus *isa_bus; + if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) && + !(lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) { + /* + * smi_features_ok_callback() throws an error on this. + * + * So bail out here instead of advertizing the invalid + * configuration and get obscure firmware failures from that. + */ + error_setg(errp, "cpu hot-unplug requires cpu hot-plug"); + return; + } + isa_bus = isa_bus_new(DEVICE(d), get_system_memory(), get_system_io(), errp); if (!isa_bus) { @@ -708,6 +723,8 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, GSI_NUM_PINS); isa_bus_irqs(isa_bus, lpc->gsi); + + i8257_dma_init(isa_bus, 0); } static bool ich9_rst_cnt_needed(void *opaque) @@ -790,12 +807,43 @@ static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) acpi_send_gpe_event(&s->pm.acpi_regs, s->pm.irq, ev); } +static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + Aml *field; + BusChild *kid; + ICH9LPCState *s = ICH9_LPC_DEVICE(adev); + BusState *bus = BUS(s->isa_bus); + Aml *sb_scope = aml_scope("\\_SB"); + + /* ICH9 PCI to ISA irq remapping */ + aml_append(scope, aml_operation_region("PIRQ", AML_PCI_CONFIG, + aml_int(0x60), 0x0C)); + /* Fields declarion has to happen *after* operation region */ + field = aml_field("PCI0.SF8.PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PRQA", 8)); + aml_append(field, aml_named_field("PRQB", 8)); + aml_append(field, aml_named_field("PRQC", 8)); + aml_append(field, aml_named_field("PRQD", 8)); + aml_append(field, aml_reserved_field(0x20)); + aml_append(field, aml_named_field("PRQE", 8)); + aml_append(field, aml_named_field("PRQF", 8)); + aml_append(field, aml_named_field("PRQG", 8)); + aml_append(field, aml_named_field("PRQH", 8)); + aml_append(sb_scope, field); + aml_append(scope, sb_scope); + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + call_dev_aml_func(DEVICE(kid->child), scope); + } +} + static void ich9_lpc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(klass); + AcpiDevAmlIfClass *amldevc = ACPI_DEV_AML_IF_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->reset = ich9_lpc_reset; @@ -820,6 +868,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data) adevc->ospm_status = ich9_pm_ospm_status; adevc->send_event = ich9_send_gpe; adevc->madt_cpu = pc_madt_cpu_entry; + amldevc->build_dev_aml = build_ich9_isa_aml; } static const TypeInfo ich9_lpc_info = { @@ -832,6 +881,7 @@ static const TypeInfo ich9_lpc_info = { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { TYPE_ACPI_DEV_AML_IF }, { } } }; diff --git a/hw/isa/piix3.c b/hw/isa/piix3.c index dab901c9ad..eabad7ba58 100644 --- a/hw/isa/piix3.c +++ b/hw/isa/piix3.c @@ -24,20 +24,18 @@ #include "qemu/osdep.h" #include "qemu/range.h" +#include "qapi/error.h" +#include "hw/dma/i8257.h" #include "hw/southbridge/piix.h" #include "hw/irq.h" #include "hw/isa/isa.h" #include "hw/xen/xen.h" -#include "sysemu/xen.h" -#include "sysemu/reset.h" #include "sysemu/runstate.h" #include "migration/vmstate.h" +#include "hw/acpi/acpi_aml_interface.h" #define XEN_PIIX_NUM_PIRQS 128ULL -#define TYPE_PIIX3_DEVICE "PIIX3" -#define TYPE_PIIX3_XEN_DEVICE "PIIX3-xen" - static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq) { qemu_set_irq(piix3->pic[pic_irq], @@ -81,6 +79,17 @@ static void piix3_set_irq(void *opaque, int pirq, int level) piix3_set_irq_level(piix3, pirq, level); } +/* + * Return the global irq number corresponding to a given device irq + * pin. We could also use the bus number to have a more precise mapping. + */ +static int pci_slot_get_pirq(PCIDevice *pci_dev, int pci_intx) +{ + int slot_addend; + slot_addend = PCI_SLOT(pci_dev->devfn) - 1; + return (pci_intx + slot_addend) & 3; +} + static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin) { PIIX3State *piix3 = opaque; @@ -128,13 +137,26 @@ static void piix3_write_config(PCIDevice *dev, static void piix3_write_config_xen(PCIDevice *dev, uint32_t address, uint32_t val, int len) { - xen_piix_pci_write_config_client(address, val, len); + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < len; i++) { + uint8_t v = (val >> (8 * i)) & 0xff; + if (v & 0x80) { + v = 0; + } + v &= 0xf; + if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { + xen_set_pci_link_route(address + i - PIIX_PIRQCA, v); + } + } + piix3_write_config(dev, address, val, len); } -static void piix3_reset(void *opaque) +static void piix3_reset(DeviceState *dev) { - PIIX3State *d = opaque; + PIIX3State *d = PIIX3_PCI_DEVICE(dev); uint8_t *pci_conf = d->dev.config; pci_conf[0x04] = 0x07; /* master, memory and I/O */ @@ -266,15 +288,21 @@ static uint64_t rcr_read(void *opaque, hwaddr addr, unsigned len) static const MemoryRegionOps rcr_ops = { .read = rcr_read, .write = rcr_write, - .endianness = DEVICE_LITTLE_ENDIAN + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, }; -static void piix3_realize(PCIDevice *dev, Error **errp) +static void pci_piix3_realize(PCIDevice *dev, Error **errp) { PIIX3State *d = PIIX3_PCI_DEVICE(dev); + ISABus *isa_bus; - if (!isa_bus_new(DEVICE(d), get_system_memory(), - pci_address_space_io(dev), errp)) { + isa_bus = isa_bus_new(DEVICE(d), pci_address_space(dev), + pci_address_space_io(dev), errp); + if (!isa_bus) { return; } @@ -283,18 +311,43 @@ static void piix3_realize(PCIDevice *dev, Error **errp) memory_region_add_subregion_overlap(pci_address_space_io(dev), PIIX_RCR_IOPORT, &d->rcr_mem, 1); - qemu_register_reset(piix3_reset, d); + i8257_dma_init(isa_bus, 0); +} + +static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + Aml *field; + BusChild *kid; + Aml *sb_scope = aml_scope("\\_SB"); + BusState *bus = qdev_get_child_bus(DEVICE(adev), "isa.0"); + + /* PIIX PCI to ISA irq remapping */ + aml_append(scope, aml_operation_region("P40C", AML_PCI_CONFIG, + aml_int(0x60), 0x04)); + /* Fields declarion has to happen *after* operation region */ + field = aml_field("PCI0.S08.P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PRQ0", 8)); + aml_append(field, aml_named_field("PRQ1", 8)); + aml_append(field, aml_named_field("PRQ2", 8)); + aml_append(field, aml_named_field("PRQ3", 8)); + aml_append(sb_scope, field); + aml_append(scope, sb_scope); + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + call_dev_aml_func(DEVICE(kid->child), scope); + } } static void pci_piix3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); + dc->reset = piix3_reset; dc->desc = "ISA bridge"; dc->vmsd = &vmstate_piix3; dc->hotpluggable = false; - k->realize = piix3_realize; k->vendor_id = PCI_VENDOR_ID_INTEL; /* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */ k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0; @@ -304,6 +357,7 @@ static void pci_piix3_class_init(ObjectClass *klass, void *data) * pc_piix.c's pc_init1() */ dc->user_creatable = false; + adevc->build_dev_aml = build_pci_isa_aml; } static const TypeInfo piix3_pci_type_info = { @@ -314,15 +368,33 @@ static const TypeInfo piix3_pci_type_info = { .class_init = pci_piix3_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { TYPE_ACPI_DEV_AML_IF }, { }, }, }; +static void piix3_realize(PCIDevice *dev, Error **errp) +{ + ERRP_GUARD(); + PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev); + PCIBus *pci_bus = pci_get_bus(dev); + + pci_piix3_realize(dev, errp); + if (*errp) { + return; + } + + pci_bus_irqs(pci_bus, piix3_set_irq, pci_slot_get_pirq, + piix3, PIIX_NUM_PIRQS); + pci_bus_set_route_irq_fn(pci_bus, piix3_route_intx_pin_to_irq); +} + static void piix3_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->config_write = piix3_write_config; + k->realize = piix3_realize; } static const TypeInfo piix3_info = { @@ -331,12 +403,34 @@ static const TypeInfo piix3_info = { .class_init = piix3_class_init, }; +static void piix3_xen_realize(PCIDevice *dev, Error **errp) +{ + ERRP_GUARD(); + PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev); + PCIBus *pci_bus = pci_get_bus(dev); + + pci_piix3_realize(dev, errp); + if (*errp) { + return; + } + + /* + * Xen supports additional interrupt routes from the PCI devices to + * the IOAPIC: the four pins of each PCI device on the bus are also + * connected to the IOAPIC directly. + * These additional routes can be discovered through ACPI. + */ + pci_bus_irqs(pci_bus, xen_piix3_set_irq, xen_pci_slot_get_pirq, + piix3, XEN_PIIX_NUM_PIRQS); +} + static void piix3_xen_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->config_write = piix3_write_config_xen; -}; + k->realize = piix3_xen_realize; +} static const TypeInfo piix3_xen_info = { .name = TYPE_PIIX3_XEN_DEVICE, @@ -352,44 +446,3 @@ static void piix3_register_types(void) } type_init(piix3_register_types) - -/* - * Return the global irq number corresponding to a given device irq - * pin. We could also use the bus number to have a more precise mapping. - */ -static int pci_slot_get_pirq(PCIDevice *pci_dev, int pci_intx) -{ - int slot_addend; - slot_addend = PCI_SLOT(pci_dev->devfn) - 1; - return (pci_intx + slot_addend) & 3; -} - -PIIX3State *piix3_create(PCIBus *pci_bus, ISABus **isa_bus) -{ - PIIX3State *piix3; - PCIDevice *pci_dev; - - /* - * Xen supports additional interrupt routes from the PCI devices to - * the IOAPIC: the four pins of each PCI device on the bus are also - * connected to the IOAPIC directly. - * These additional routes can be discovered through ACPI. - */ - if (xen_enabled()) { - pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, - TYPE_PIIX3_XEN_DEVICE); - piix3 = PIIX3_PCI_DEVICE(pci_dev); - pci_bus_irqs(pci_bus, xen_piix3_set_irq, xen_pci_slot_get_pirq, - piix3, XEN_PIIX_NUM_PIRQS); - } else { - pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, - TYPE_PIIX3_DEVICE); - piix3 = PIIX3_PCI_DEVICE(pci_dev); - pci_bus_irqs(pci_bus, piix3_set_irq, pci_slot_get_pirq, - piix3, PIIX_NUM_PIRQS); - pci_bus_set_route_irq_fn(pci_bus, piix3_route_intx_pin_to_irq); - } - *isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); - - return piix3; -} diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c index 0fe7b69bc4..8fc1db6dc9 100644 --- a/hw/isa/piix4.c +++ b/hw/isa/piix4.c @@ -28,25 +28,29 @@ #include "hw/irq.h" #include "hw/southbridge/piix.h" #include "hw/pci/pci.h" +#include "hw/ide/piix.h" #include "hw/isa/isa.h" #include "hw/intc/i8259.h" #include "hw/dma/i8257.h" #include "hw/timer/i8254.h" #include "hw/rtc/mc146818rtc.h" #include "hw/ide/pci.h" +#include "hw/acpi/piix4.h" +#include "hw/usb/hcd-uhci.h" #include "migration/vmstate.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" #include "qom/object.h" -PCIDevice *piix4_dev; - struct PIIX4State { PCIDevice dev; qemu_irq cpu_intr; qemu_irq *isa; RTCState rtc; + PCIIDEState ide; + UHCIState uhci; + PIIX4PMState pm; /* Reset Control Register */ MemoryRegion rcr_mem; uint8_t rcr; @@ -54,6 +58,52 @@ struct PIIX4State { OBJECT_DECLARE_SIMPLE_TYPE(PIIX4State, PIIX4_PCI_DEVICE) +static void piix4_set_irq(void *opaque, int irq_num, int level) +{ + int i, pic_irq, pic_level; + PIIX4State *s = opaque; + PCIBus *bus = pci_get_bus(&s->dev); + + /* now we change the pic irq level according to the piix irq mappings */ + /* XXX: optimize */ + pic_irq = s->dev.config[PIIX_PIRQCA + irq_num]; + if (pic_irq < ISA_NUM_IRQS) { + /* The pic level is the logical OR of all the PCI irqs mapped to it. */ + pic_level = 0; + for (i = 0; i < PIIX_NUM_PIRQS; i++) { + if (pic_irq == s->dev.config[PIIX_PIRQCA + i]) { + pic_level |= pci_bus_get_irq_level(bus, i); + } + } + qemu_set_irq(s->isa[pic_irq], pic_level); + } +} + +static int pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + int slot; + + slot = PCI_SLOT(pci_dev->devfn); + + switch (slot) { + /* PIIX4 USB */ + case 10: + return 3; + /* AMD 79C973 Ethernet */ + case 11: + return 1; + /* Crystal 4281 Sound */ + case 12: + return 2; + /* PCI slot 1 to 4 */ + case 18 ... 21: + return ((slot - 18) + irq_num) & 0x03; + /* Unknown device, don't do any translation */ + default: + return irq_num; + } +} + static void piix4_isa_reset(DeviceState *dev) { PIIX4State *d = PIIX4_PCI_DEVICE(dev); @@ -90,9 +140,11 @@ static void piix4_isa_reset(DeviceState *dev) pci_conf[0xab] = 0x00; pci_conf[0xac] = 0x00; pci_conf[0xae] = 0x00; + + d->rcr = 0; } -static int piix4_ide_post_load(void *opaque, int version_id) +static int piix4_post_load(void *opaque, int version_id) { PIIX4State *s = opaque; @@ -107,7 +159,7 @@ static const VMStateDescription vmstate_piix4 = { .name = "PIIX4", .version_id = 3, .minimum_version_id = 2, - .post_load = piix4_ide_post_load, + .post_load = piix4_post_load, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PIIX4State), VMSTATE_UINT8_V(rcr, PIIX4State, 3), @@ -160,6 +212,7 @@ static const MemoryRegionOps piix4_rcr_ops = { static void piix4_realize(PCIDevice *dev, Error **errp) { PIIX4State *s = PIIX4_PCI_DEVICE(dev); + PCIBus *pci_bus = pci_get_bus(dev); ISABus *isa_bus; qemu_irq *i8259_out_irq; @@ -197,16 +250,41 @@ static void piix4_realize(PCIDevice *dev, Error **errp) if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) { return; } - isa_init_irq(ISA_DEVICE(&s->rtc), &s->rtc.irq, RTC_ISA_IRQ); + s->rtc.irq = isa_get_irq(ISA_DEVICE(&s->rtc), s->rtc.isairq); - piix4_dev = dev; + /* IDE */ + qdev_prop_set_int32(DEVICE(&s->ide), "addr", dev->devfn + 1); + if (!qdev_realize(DEVICE(&s->ide), BUS(pci_bus), errp)) { + return; + } + + /* USB */ + qdev_prop_set_int32(DEVICE(&s->uhci), "addr", dev->devfn + 2); + if (!qdev_realize(DEVICE(&s->uhci), BUS(pci_bus), errp)) { + return; + } + + /* ACPI controller */ + qdev_prop_set_int32(DEVICE(&s->pm), "addr", dev->devfn + 3); + if (!qdev_realize(DEVICE(&s->pm), BUS(pci_bus), errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->pm), 0, s->isa[9]); + + pci_bus_irqs(pci_bus, piix4_set_irq, pci_slot_get_pirq, s, PIIX_NUM_PIRQS); } static void piix4_init(Object *obj) { PIIX4State *s = PIIX4_PCI_DEVICE(obj); - object_initialize(&s->rtc, sizeof(s->rtc), TYPE_MC146818_RTC); + object_initialize_child(obj, "rtc", &s->rtc, TYPE_MC146818_RTC); + object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE); + object_initialize_child(obj, "uhci", &s->uhci, "piix4-usb-uhci"); + + object_initialize_child(obj, "pm", &s->pm, TYPE_PIIX4_PM); + qdev_prop_set_uint32(DEVICE(&s->pm), "smb_io_base", 0x1100); + qdev_prop_set_bit(DEVICE(&s->pm), "smm-enabled", 0); } static void piix4_class_init(ObjectClass *klass, void *data) @@ -247,29 +325,3 @@ static void piix4_register_types(void) } type_init(piix4_register_types) - -DeviceState *piix4_create(PCIBus *pci_bus, ISABus **isa_bus, I2CBus **smbus) -{ - PCIDevice *pci; - DeviceState *dev; - int devfn = PCI_DEVFN(10, 0); - - pci = pci_create_simple_multifunction(pci_bus, devfn, true, - TYPE_PIIX4_PCI_DEVICE); - dev = DEVICE(pci); - if (isa_bus) { - *isa_bus = ISA_BUS(qdev_get_child_bus(dev, "isa.0")); - } - - pci = pci_create_simple(pci_bus, devfn + 1, "piix4-ide"); - pci_ide_create_devs(pci); - - pci_create_simple(pci_bus, devfn + 2, "piix4-usb-uhci"); - if (smbus) { - *smbus = piix4_pm_init(pci_bus, devfn + 3, 0x1100, - qdev_get_gpio_in_named(dev, "isa", 9), - NULL, 0, NULL); - } - - return dev; -} diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index f57f3e7067..3f9bd0c04d 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -17,11 +17,13 @@ #include "hw/isa/vt82c686.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" +#include "hw/ide/pci.h" #include "hw/isa/isa.h" #include "hw/isa/superio.h" #include "hw/intc/i8259.h" #include "hw/irq.h" #include "hw/dma/i8257.h" +#include "hw/usb/hcd-uhci.h" #include "hw/timer/i8254.h" #include "hw/rtc/mc146818rtc.h" #include "migration/vmstate.h" @@ -248,6 +250,8 @@ static const ViaPMInitInfo vt82c686b_pm_init_info = { .device_id = PCI_DEVICE_ID_VIA_82C686B_PM, }; +#define TYPE_VT82C686B_PM "vt82c686b-pm" + static const TypeInfo vt82c686b_pm_info = { .name = TYPE_VT82C686B_PM, .parent = TYPE_VIA_PM, @@ -259,6 +263,8 @@ static const ViaPMInitInfo vt8231_pm_init_info = { .device_id = PCI_DEVICE_ID_VIA_8231_PM, }; +#define TYPE_VT8231_PM "vt8231-pm" + static const TypeInfo vt8231_pm_info = { .name = TYPE_VT8231_PM, .parent = TYPE_VIA_PM, @@ -542,7 +548,14 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA) struct ViaISAState { PCIDevice dev; qemu_irq cpu_intr; - ViaSuperIOState *via_sio; + qemu_irq *isa_irqs; + ViaSuperIOState via_sio; + RTCState rtc; + PCIIDEState ide; + UHCIState uhci[2]; + ViaPMState pm; + PCIDevice ac97; + PCIDevice mc97; }; static const VMStateDescription vmstate_via = { @@ -555,10 +568,23 @@ static const VMStateDescription vmstate_via = { } }; +static void via_isa_init(Object *obj) +{ + ViaISAState *s = VIA_ISA(obj); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_MC146818_RTC); + object_initialize_child(obj, "ide", &s->ide, TYPE_VIA_IDE); + object_initialize_child(obj, "uhci1", &s->uhci[0], TYPE_VT82C686B_USB_UHCI); + object_initialize_child(obj, "uhci2", &s->uhci[1], TYPE_VT82C686B_USB_UHCI); + object_initialize_child(obj, "ac97", &s->ac97, TYPE_VIA_AC97); + object_initialize_child(obj, "mc97", &s->mc97, TYPE_VIA_MC97); +} + static const TypeInfo via_isa_info = { .name = TYPE_VIA_ISA, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(ViaISAState), + .instance_init = via_isa_init, .abstract = true, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, @@ -566,12 +592,92 @@ static const TypeInfo via_isa_info = { }, }; +void via_isa_set_irq(PCIDevice *d, int n, int level) +{ + ViaISAState *s = VIA_ISA(d); + qemu_set_irq(s->isa_irqs[n], level); +} + static void via_isa_request_i8259_irq(void *opaque, int irq, int level) { ViaISAState *s = opaque; qemu_set_irq(s->cpu_intr, level); } +static void via_isa_realize(PCIDevice *d, Error **errp) +{ + ViaISAState *s = VIA_ISA(d); + DeviceState *dev = DEVICE(d); + PCIBus *pci_bus = pci_get_bus(d); + qemu_irq *isa_irq; + ISABus *isa_bus; + int i; + + qdev_init_gpio_out(dev, &s->cpu_intr, 1); + isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); + isa_bus = isa_bus_new(dev, pci_address_space(d), pci_address_space_io(d), + errp); + + if (!isa_bus) { + return; + } + + s->isa_irqs = i8259_init(isa_bus, *isa_irq); + isa_bus_irqs(isa_bus, s->isa_irqs); + i8254_pit_init(isa_bus, 0x40, 0, NULL); + i8257_dma_init(isa_bus, 0); + + /* RTC */ + qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) { + return; + } + isa_connect_gpio_out(ISA_DEVICE(&s->rtc), 0, s->rtc.isairq); + + for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { + if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { + d->wmask[i] = 0; + } + } + + /* Super I/O */ + if (!qdev_realize(DEVICE(&s->via_sio), BUS(isa_bus), errp)) { + return; + } + + /* Function 1: IDE */ + qdev_prop_set_int32(DEVICE(&s->ide), "addr", d->devfn + 1); + if (!qdev_realize(DEVICE(&s->ide), BUS(pci_bus), errp)) { + return; + } + + /* Functions 2-3: USB Ports */ + for (i = 0; i < ARRAY_SIZE(s->uhci); i++) { + qdev_prop_set_int32(DEVICE(&s->uhci[i]), "addr", d->devfn + 2 + i); + if (!qdev_realize(DEVICE(&s->uhci[i]), BUS(pci_bus), errp)) { + return; + } + } + + /* Function 4: Power Management */ + qdev_prop_set_int32(DEVICE(&s->pm), "addr", d->devfn + 4); + if (!qdev_realize(DEVICE(&s->pm), BUS(pci_bus), errp)) { + return; + } + + /* Function 5: AC97 Audio */ + qdev_prop_set_int32(DEVICE(&s->ac97), "addr", d->devfn + 5); + if (!qdev_realize(DEVICE(&s->ac97), BUS(pci_bus), errp)) { + return; + } + + /* Function 6: MC97 Modem */ + qdev_prop_set_int32(DEVICE(&s->mc97), "addr", d->devfn + 6); + if (!qdev_realize(DEVICE(&s->mc97), BUS(pci_bus), errp)) { + return; + } +} + /* TYPE_VT82C686B_ISA */ static void vt82c686b_write_config(PCIDevice *d, uint32_t addr, @@ -583,7 +689,7 @@ static void vt82c686b_write_config(PCIDevice *d, uint32_t addr, pci_default_write_config(d, addr, val, len); if (addr == 0x85) { /* BIT(1): enable or disable superio config io ports */ - via_superio_io_enable(s->via_sio, val & BIT(1)); + via_superio_io_enable(&s->via_sio, val & BIT(1)); } } @@ -607,30 +713,12 @@ static void vt82c686b_isa_reset(DeviceState *dev) pci_conf[0x77] = 0x10; /* GPIO Control 1/2/3/4 */ } -static void vt82c686b_realize(PCIDevice *d, Error **errp) +static void vt82c686b_init(Object *obj) { - ViaISAState *s = VIA_ISA(d); - DeviceState *dev = DEVICE(d); - ISABus *isa_bus; - qemu_irq *isa_irq; - int i; + ViaISAState *s = VIA_ISA(obj); - qdev_init_gpio_out(dev, &s->cpu_intr, 1); - isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); - isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), - &error_fatal); - isa_bus_irqs(isa_bus, i8259_init(isa_bus, *isa_irq)); - i8254_pit_init(isa_bus, 0x40, 0, NULL); - i8257_dma_init(isa_bus, 0); - s->via_sio = VIA_SUPERIO(isa_create_simple(isa_bus, - TYPE_VT82C686B_SUPERIO)); - mc146818_rtc_init(isa_bus, 2000, NULL); - - for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { - if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { - d->wmask[i] = 0; - } - } + object_initialize_child(obj, "sio", &s->via_sio, TYPE_VT82C686B_SUPERIO); + object_initialize_child(obj, "pm", &s->pm, TYPE_VT82C686B_PM); } static void vt82c686b_class_init(ObjectClass *klass, void *data) @@ -638,7 +726,7 @@ static void vt82c686b_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - k->realize = vt82c686b_realize; + k->realize = via_isa_realize; k->config_write = vt82c686b_write_config; k->vendor_id = PCI_VENDOR_ID_VIA; k->device_id = PCI_DEVICE_ID_VIA_82C686B_ISA; @@ -655,6 +743,7 @@ static const TypeInfo vt82c686b_isa_info = { .name = TYPE_VT82C686B_ISA, .parent = TYPE_VIA_ISA, .instance_size = sizeof(ViaISAState), + .instance_init = vt82c686b_init, .class_init = vt82c686b_class_init, }; @@ -669,7 +758,7 @@ static void vt8231_write_config(PCIDevice *d, uint32_t addr, pci_default_write_config(d, addr, val, len); if (addr == 0x50) { /* BIT(2): enable or disable superio config io ports */ - via_superio_io_enable(s->via_sio, val & BIT(2)); + via_superio_io_enable(&s->via_sio, val & BIT(2)); } } @@ -688,29 +777,12 @@ static void vt8231_isa_reset(DeviceState *dev) pci_conf[0x6b] = 0x01; /* Fast IR I/O Base */ } -static void vt8231_realize(PCIDevice *d, Error **errp) +static void vt8231_init(Object *obj) { - ViaISAState *s = VIA_ISA(d); - DeviceState *dev = DEVICE(d); - ISABus *isa_bus; - qemu_irq *isa_irq; - int i; + ViaISAState *s = VIA_ISA(obj); - qdev_init_gpio_out(dev, &s->cpu_intr, 1); - isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); - isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), - &error_fatal); - isa_bus_irqs(isa_bus, i8259_init(isa_bus, *isa_irq)); - i8254_pit_init(isa_bus, 0x40, 0, NULL); - i8257_dma_init(isa_bus, 0); - s->via_sio = VIA_SUPERIO(isa_create_simple(isa_bus, TYPE_VT8231_SUPERIO)); - mc146818_rtc_init(isa_bus, 2000, NULL); - - for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { - if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { - d->wmask[i] = 0; - } - } + object_initialize_child(obj, "sio", &s->via_sio, TYPE_VT8231_SUPERIO); + object_initialize_child(obj, "pm", &s->pm, TYPE_VT8231_PM); } static void vt8231_class_init(ObjectClass *klass, void *data) @@ -718,7 +790,7 @@ static void vt8231_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - k->realize = vt8231_realize; + k->realize = via_isa_realize; k->config_write = vt8231_write_config; k->vendor_id = PCI_VENDOR_ID_VIA; k->device_id = PCI_DEVICE_ID_VIA_8231_ISA; @@ -735,6 +807,7 @@ static const TypeInfo vt8231_isa_info = { .name = TYPE_VT8231_ISA, .parent = TYPE_VIA_ISA, .instance_size = sizeof(ViaISAState), + .instance_init = vt8231_init, .class_init = vt8231_class_init, }; diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig new file mode 100644 index 0000000000..17d15b6c90 --- /dev/null +++ b/hw/loongarch/Kconfig @@ -0,0 +1,22 @@ +config LOONGARCH_VIRT + bool + select PCI + select PCI_EXPRESS_GENERIC_BRIDGE + imply VIRTIO_VGA + imply PCI_DEVICES + imply NVDIMM + select ISA_BUS + select SERIAL + select SERIAL_ISA + select VIRTIO_PCI + select PLATFORM_BUS + select LOONGARCH_IPI + select LOONGARCH_PCH_PIC + select LOONGARCH_PCH_MSI + select LOONGARCH_EXTIOI + select LS7A_RTC + select SMBIOS + select ACPI_PCI + select ACPI_HW_REDUCED + select FW_CFG_DMA + select DIMM diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c new file mode 100644 index 0000000000..7d5f5a757d --- /dev/null +++ b/hw/loongarch/acpi-build.c @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/bitmap.h" +#include "hw/pci/pci.h" +#include "hw/core/cpu.h" +#include "target/loongarch/cpu.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "migration/vmstate.h" +#include "hw/mem/memory-device.h" +#include "sysemu/reset.h" + +/* Supported chipsets: */ +#include "hw/pci-host/ls7a.h" +#include "hw/loongarch/virt.h" +#include "hw/acpi/aml-build.h" + +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" + +#include "qom/qom-qobject.h" + +#include "hw/acpi/generic_event_device.h" +#include "hw/pci-host/gpex.h" +#include "sysemu/tpm.h" +#include "hw/platform-bus.h" +#include "hw/acpi/aml-build.h" + +#define ACPI_BUILD_ALIGN_SIZE 0x1000 +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +#ifdef DEBUG_ACPI_BUILD +#define ACPI_BUILD_DPRINTF(fmt, ...) \ + do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0) +#else +#define ACPI_BUILD_DPRINTF(fmt, ...) +#endif + +/* build FADT */ +static void init_common_fadt_data(AcpiFadtData *data) +{ + AcpiFadtData fadt = { + /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */ + .rev = 5, + .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) | + (1 << ACPI_FADT_F_RESET_REG_SUP)), + + /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */ + .sleep_ctl = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL, + }, + .sleep_sts = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS, + }, + + /* ACPI 5.0: 4.8.3.6 Reset Register */ + .reset_reg = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET, + }, + .reset_val = ACPI_GED_RESET_VALUE, + }; + *data = fadt; +} + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +/* build FACS */ +static void +build_facs(GArray *table_data) +{ + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +/* build MADT */ +static void +build_madt(GArray *table_data, BIOSLinker *linker, LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + int i; + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Local APIC Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ + + for (i = 0; i < ms->smp.cpus; i++) { + /* Processor Core Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 17, 1); /* Type */ + build_append_int_noprefix(table_data, 15, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, i + 1, 4); /* ACPI Processor ID */ + build_append_int_noprefix(table_data, i, 4); /* Core ID */ + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + } + + /* Extend I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 20, 1); /* Type */ + build_append_int_noprefix(table_data, 13, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, 3, 1); /* Cascade */ + build_append_int_noprefix(table_data, 0, 1); /* Node */ + build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */ + + /* MSI Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 21, 1); /* Type */ + build_append_int_noprefix(table_data, 19, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, VIRT_PCH_MSI_ADDR_LOW, 8);/* Address */ + build_append_int_noprefix(table_data, 0x40, 4); /* Start */ + build_append_int_noprefix(table_data, 0xc0, 4); /* Count */ + + /* Bridge I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 22, 1); /* Type */ + build_append_int_noprefix(table_data, 17, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, VIRT_PCH_REG_BASE, 8);/* Address */ + build_append_int_noprefix(table_data, 0x1000, 2); /* Size */ + build_append_int_noprefix(table_data, 0, 2); /* Id */ + build_append_int_noprefix(table_data, 0x40, 2); /* Base */ + + acpi_table_end(linker, &table); +} + +/* build SRAT */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + uint64_t i; + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + MachineState *ms = MACHINE(lams); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < ms->smp.cpus; ++i) { + /* Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, 0, 1); + build_append_int_noprefix(table_data, i, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + + build_srat_memory(table_data, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, + 0, MEM_AFFINITY_ENABLED); + + build_srat_memory(table_data, VIRT_HIGHMEM_BASE, machine->ram_size - VIRT_LOWMEM_SIZE, + 0, MEM_AFFINITY_ENABLED); + + if (ms->device_memory) { + build_srat_memory(table_data, ms->device_memory->base, + memory_region_size(&ms->device_memory->mr), + 0, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + } + + acpi_table_end(linker, &table); +} + +typedef +struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + /* Is table patched? */ + uint8_t patched; + void *rsdp; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; +} AcpiBuildState; + +static void build_uart_device_aml(Aml *table) +{ + Aml *dev; + Aml *crs; + Aml *pkg0, *pkg1, *pkg2; + uint32_t uart_irq = VIRT_UART_IRQ; + + Aml *scope = aml_scope("_SB"); + dev = aml_device("COMA"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0, VIRT_UART_BASE, VIRT_UART_BASE + VIRT_UART_SIZE - 1, + 0, VIRT_UART_SIZE)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_SHARED, &uart_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + pkg0 = aml_package(0x2); + aml_append(pkg0, aml_int(0x05F5E100)); + aml_append(pkg0, aml_string("clock-frenquency")); + pkg1 = aml_package(0x1); + aml_append(pkg1, pkg0); + pkg2 = aml_package(0x2); + aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); + aml_append(pkg2, pkg1); + aml_append(dev, aml_name_decl("_DSD", pkg2)); + aml_append(scope, dev); + aml_append(table, scope); +} + +static void +build_la_ged_aml(Aml *dsdt, MachineState *machine) +{ + uint32_t event; + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + + build_ged_aml(dsdt, "\\_SB."GED_DEVICE, + HOTPLUG_HANDLER(lams->acpi_ged), + VIRT_SCI_IRQ, AML_SYSTEM_MEMORY, + VIRT_GED_EVT_ADDR); + event = object_property_get_uint(OBJECT(lams->acpi_ged), + "ged-event", &error_abort); + if (event & ACPI_GED_MEM_HOTPLUG_EVT) { + build_memory_hotplug_aml(dsdt, machine->ram_slots, "\\_SB", NULL, + AML_SYSTEM_MEMORY, + VIRT_GED_MEM_ADDR); + } +} + +static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) +{ + struct GPEXConfig cfg = { + .mmio64.base = VIRT_PCI_MEM_BASE, + .mmio64.size = VIRT_PCI_MEM_SIZE, + .pio.base = VIRT_PCI_IO_BASE, + .pio.size = VIRT_PCI_IO_SIZE, + .ecam.base = VIRT_PCI_CFG_BASE, + .ecam.size = VIRT_PCI_CFG_SIZE, + .irq = PCH_PIC_IRQ_OFFSET + VIRT_DEVICE_IRQS, + .bus = lams->pci_bus, + }; + + acpi_dsdt_add_gpex(scope, &cfg); +} + +#ifdef CONFIG_TPM +static void acpi_dsdt_add_tpm(Aml *scope, LoongArchMachineState *vms) +{ + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS; + SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find()); + MemoryRegion *sbdev_mr; + hwaddr tpm_base; + + if (!sbdev) { + return; + } + + tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + assert(tpm_base != -1); + + tpm_base += pbus_base; + + sbdev_mr = sysbus_mmio_get_region(sbdev, 0); + + Aml *dev = aml_device("TPM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, + aml_memory32_fixed(tpm_base, + (uint32_t)memory_region_size(sbdev_mr), + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} +#endif + +/* build DSDT */ +static void +build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + Aml *dsdt, *scope, *pkg; + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + build_uart_device_aml(dsdt); + build_pci_device_aml(dsdt, lams); + build_la_ged_aml(dsdt, machine); +#ifdef CONFIG_TPM + acpi_dsdt_add_tpm(dsdt, lams); +#endif + /* System State Package */ + scope = aml_scope("\\"); + pkg = aml_package(4); + aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5)); + aml_append(pkg, aml_int(0)); /* ignored */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + /* Copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void acpi_build(AcpiBuildTables *tables, MachineState *machine) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + GArray *table_offsets; + AcpiFadtData fadt_data; + unsigned facs, rsdt, dsdt; + uint8_t *u; + GArray *tables_blob = tables->table_data; + + init_common_fadt_data(&fadt_data); + + table_offsets = g_array_new(false, true, sizeof(uint32_t)); + ACPI_BUILD_DPRINTF("init ACPI tables\n"); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false); + + /* + * FACS is pointed to by FADT. + * We place it first since it's the only table that has alignment + * requirements. + */ + facs = tables_blob->len; + build_facs(tables_blob); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, machine); + + /* ACPI tables pointed to by RSDT */ + acpi_add_table(table_offsets, tables_blob); + fadt_data.facs_tbl_offset = &facs; + fadt_data.dsdt_tbl_offset = &dsdt; + fadt_data.xdsdt_tbl_offset = &dsdt; + build_fadt(tables_blob, tables->linker, &fadt_data, + lams->oem_id, lams->oem_table_id); + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, lams); + + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, machine); + + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = cpu_to_le64(VIRT_PCI_CFG_BASE), + .size = cpu_to_le64(VIRT_PCI_CFG_SIZE), + }; + build_mcfg(tables_blob, tables->linker, &mcfg, lams->oem_id, + lams->oem_table_id); + } + +#ifdef CONFIG_TPM + /* TPM info */ + if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { + acpi_add_table(table_offsets, tables_blob); + build_tpm2(tables_blob, tables->linker, + tables->tcpalog, lams->oem_id, + lams->oem_table_id); + } +#endif + /* Add tables supplied by user (if any) */ + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + unsigned len = acpi_table_len(u); + + acpi_add_table(table_offsets, tables_blob); + g_array_append_vals(tables_blob, u, len); + } + + /* RSDT is pointed to by RSDP */ + rsdt = tables_blob->len; + build_rsdt(tables_blob, tables->linker, table_offsets, + lams->oem_id, lams->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 0, + .oem_id = lams->oem_id, + .xsdt_tbl_offset = NULL, + .rsdt_tbl_offset = &rsdt, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges."); + } + + acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - in case it got changed + * e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = 1; + + acpi_build_tables_init(&tables); + + acpi_build(&tables, MACHINE(qdev_get_machine())); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = 0; +} + +static const VMStateDescription vmstate_acpi_build = { + .name = "acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void loongarch_acpi_setup(LoongArchMachineState *lams) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + + if (!lams->fw_cfg) { + ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); + return; + } + + if (!loongarch_is_acpi_enabled(lams)) { + ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + acpi_build(&tables, MACHINE(lams)); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = + acpi_add_rom_blob(acpi_build_update, build_state, + tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE); + + build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(acpi_build_reset, build_state); + acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); + + /* + * Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/loongarch/fw_cfg.c b/hw/loongarch/fw_cfg.c new file mode 100644 index 0000000000..f15a17416c --- /dev/null +++ b/hw/loongarch/fw_cfg.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU fw_cfg helpers (LoongArch specific) + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/loongarch/fw_cfg.h" +#include "hw/loongarch/virt.h" +#include "hw/nvram/fw_cfg.h" +#include "sysemu/sysemu.h" + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size, MachineState *ms) +{ + FWCfgState *fw_cfg; + int max_cpus = ms->smp.max_cpus; + int smp_cpus = ms->smp.cpus; + + fw_cfg = fw_cfg_init_mem_wide(VIRT_FWCFG_BASE + 8, VIRT_FWCFG_BASE, 8, + VIRT_FWCFG_BASE + 16, &address_space_memory); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + return fw_cfg; +} diff --git a/hw/loongarch/fw_cfg.h b/hw/loongarch/fw_cfg.h new file mode 100644 index 0000000000..7c0de4db4a --- /dev/null +++ b/hw/loongarch/fw_cfg.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU fw_cfg helpers (LoongArch specific) + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_FW_CFG_H +#define HW_LOONGARCH_FW_CFG_H + +#include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" + +FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size, MachineState *ms); +#endif diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build new file mode 100644 index 0000000000..c0421502ab --- /dev/null +++ b/hw/loongarch/meson.build @@ -0,0 +1,8 @@ +loongarch_ss = ss.source_set() +loongarch_ss.add(files( + 'fw_cfg.c', +)) +loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: [files('virt.c'), fdt]) +loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-build.c')) + +hw_arch += {'loongarch': loongarch_ss} diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c new file mode 100644 index 0000000000..958be74fa1 --- /dev/null +++ b/hw/loongarch/virt.c @@ -0,0 +1,985 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU loongson 3a5000 develop board emulation + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "sysemu/rtc.h" +#include "hw/loongarch/virt.h" +#include "exec/address-spaces.h" +#include "hw/irq.h" +#include "net/net.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/intc/loongarch_ipi.h" +#include "hw/intc/loongarch_extioi.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "hw/intc/loongarch_pch_msi.h" +#include "hw/pci-host/ls7a.h" +#include "hw/pci-host/gpex.h" +#include "hw/misc/unimp.h" +#include "hw/loongarch/fw_cfg.h" +#include "target/loongarch/cpu.h" +#include "hw/firmware/smbios.h" +#include "hw/acpi/aml-build.h" +#include "qapi/qapi-visit-common.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/mem/nvdimm.h" +#include "sysemu/device_tree.h" +#include +#include "hw/core/sysbus-fdt.h" +#include "hw/platform-bus.h" +#include "hw/display/ramfb.h" +#include "hw/mem/pc-dimm.h" +#include "sysemu/tpm.h" + +static void fdt_add_rtc_node(LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base = VIRT_RTC_REG_BASE; + hwaddr size = VIRT_RTC_LEN; + MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/rtc@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "loongson,ls7a-rtc"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); + g_free(nodename); +} + +static void fdt_add_uart_node(LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base = VIRT_UART_BASE; + hwaddr size = VIRT_UART_SIZE; + MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/serial@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, base, 0x0, size); + qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000); + qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename); + g_free(nodename); +} + +static void create_fdt(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + + ms->fdt = create_device_tree(&lams->fdt_size); + if (!ms->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + /* Header */ + qemu_fdt_setprop_string(ms->fdt, "/", "compatible", + "linux,dummy-loongson3"); + qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); + qemu_fdt_add_subnode(ms->fdt, "/chosen"); +} + +static void fdt_add_cpu_nodes(const LoongArchMachineState *lams) +{ + int num; + const MachineState *ms = MACHINE(lams); + int smp_cpus = ms->smp.cpus; + + qemu_fdt_add_subnode(ms->fdt, "/cpus"); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); + + /* cpu nodes */ + for (num = smp_cpus - 1; num >= 0; num--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", num); + LoongArchCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num)); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + cpu->dtb_compatible); + qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + g_free(nodename); + } + + /*cpu map */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + + for (num = smp_cpus - 1; num >= 0; num--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d/thread%d", + num / (ms->smp.cores * ms->smp.threads), + (num / ms->smp.threads) % ms->smp.cores, + num % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d", + num / ms->smp.cores, + num % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } +} + +static void fdt_add_fw_cfg_node(const LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base = VIRT_FWCFG_BASE; + const MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, 0x18); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); +} + +static void fdt_add_pcie_node(const LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base_mmio = VIRT_PCI_MEM_BASE; + hwaddr size_mmio = VIRT_PCI_MEM_SIZE; + hwaddr base_pio = VIRT_PCI_IO_BASE; + hwaddr size_pio = VIRT_PCI_IO_SIZE; + hwaddr base_pcie = VIRT_PCI_CFG_BASE; + hwaddr size_pcie = VIRT_PCI_CFG_SIZE; + hwaddr base = base_pcie; + + const MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0, + PCIE_MMCFG_BUS(VIRT_PCI_CFG_SIZE - 1)); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base_pcie, 2, size_pcie); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, VIRT_PCI_IO_OFFSET, + 2, base_pio, 2, size_pio, + 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio); + g_free(nodename); +} + +static void fdt_add_irqchip_node(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + char *nodename; + uint32_t irqchip_phandle; + + irqchip_phandle = qemu_fdt_alloc_phandle(ms->fdt); + qemu_fdt_setprop_cell(ms->fdt, "/", "interrupt-parent", irqchip_phandle); + + nodename = g_strdup_printf("/intc@%lx", VIRT_IOAPIC_REG_BASE); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 3); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2); + qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0); + + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongarch,ls7a"); + + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, VIRT_IOAPIC_REG_BASE, + 2, PCH_PIC_ROUTE_ENTRY_OFFSET); + + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", irqchip_phandle); + g_free(nodename); +} + +#define PM_BASE 0x10080000 +#define PM_SIZE 0x100 +#define PM_CTRL 0x10 + +static void virt_build_smbios(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + MachineClass *mc = MACHINE_GET_CLASS(lams); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + const char *product = "QEMU Virtual Machine"; + + if (!lams->fw_cfg) { + return; + } + + smbios_set_defaults("QEMU", product, mc->name, false, + true, SMBIOS_ENTRY_POINT_TYPE_64); + + smbios_get_tables(ms, NULL, 0, &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(lams->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(lams->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + +static void virt_machine_done(Notifier *notifier, void *data) +{ + LoongArchMachineState *lams = container_of(notifier, + LoongArchMachineState, machine_done); + virt_build_smbios(lams); + loongarch_acpi_setup(lams); +} + +struct memmap_entry { + uint64_t address; + uint64_t length; + uint32_t type; + uint32_t reserved; +}; + +static struct memmap_entry *memmap_table; +static unsigned memmap_entries; + +static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) +{ + /* Ensure there are no duplicate entries. */ + for (unsigned i = 0; i < memmap_entries; i++) { + assert(memmap_table[i].address != address); + } + + memmap_table = g_renew(struct memmap_entry, memmap_table, + memmap_entries + 1); + memmap_table[memmap_entries].address = cpu_to_le64(address); + memmap_table[memmap_entries].length = cpu_to_le64(length); + memmap_table[memmap_entries].type = cpu_to_le32(type); + memmap_table[memmap_entries].reserved = 0; + memmap_entries++; +} + +/* + * This is a placeholder for missing ACPI, + * and will eventually be replaced. + */ +static uint64_t loongarch_virt_pm_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void loongarch_virt_pm_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + if (addr != PM_CTRL) { + return; + } + + switch (val) { + case 0x00: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + case 0xff: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return; + default: + return; + } +} + +static const MemoryRegionOps loongarch_virt_pm_ops = { + .read = loongarch_virt_pm_read, + .write = loongarch_virt_pm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1 + } +}; + +static struct _loaderparams { + uint64_t ram_size; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +} loaderparams; + +static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) +{ + return addr & 0x1fffffffll; +} + +static int64_t load_kernel_info(void) +{ + uint64_t kernel_entry, kernel_low, kernel_high; + ssize_t kernel_size; + + kernel_size = load_elf(loaderparams.kernel_filename, NULL, + cpu_loongarch_virt_to_phys, NULL, + &kernel_entry, &kernel_low, + &kernel_high, NULL, 0, + EM_LOONGARCH, 1, 0); + + if (kernel_size < 0) { + error_report("could not load kernel '%s': %s", + loaderparams.kernel_filename, + load_elf_strerror(kernel_size)); + exit(1); + } + return kernel_entry; +} + +static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState *lams) +{ + DeviceState *dev; + MachineState *ms = MACHINE(lams); + uint32_t event = ACPI_GED_PWR_DOWN_EVT; + + if (ms->ram_slots) { + event |= ACPI_GED_MEM_HOTPLUG_EVT; + } + dev = qdev_new(TYPE_ACPI_GED); + qdev_prop_set_uint32(dev, "ged-event", event); + + /* ged event */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, VIRT_GED_EVT_ADDR); + /* memory hotplug */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, VIRT_GED_MEM_ADDR); + /* ged regs used for reset and power down */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - PCH_PIC_IRQ_OFFSET)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + return dev; +} + +static DeviceState *create_platform_bus(DeviceState *pch_pic) +{ + DeviceState *dev; + SysBusDevice *sysbus; + int i, irq; + MemoryRegion *sysmem = get_system_memory(); + + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS); + qdev_prop_set_uint32(dev, "mmio_size", VIRT_PLATFORM_BUS_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus = SYS_BUS_DEVICE(dev); + for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { + irq = VIRT_PLATFORM_BUS_IRQ - PCH_PIC_IRQ_OFFSET + i; + sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(pch_pic, irq)); + } + + memory_region_add_subregion(sysmem, + VIRT_PLATFORM_BUS_BASEADDRESS, + sysbus_mmio_get_region(sysbus, 0)); + return dev; +} + +static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState *lams) +{ + DeviceState *gpex_dev; + SysBusDevice *d; + PCIBus *pci_bus; + MemoryRegion *ecam_alias, *ecam_reg, *pio_alias, *pio_reg; + MemoryRegion *mmio_alias, *mmio_reg, *pm_mem; + int i; + + gpex_dev = qdev_new(TYPE_GPEX_HOST); + d = SYS_BUS_DEVICE(gpex_dev); + sysbus_realize_and_unref(d, &error_fatal); + pci_bus = PCI_HOST_BRIDGE(gpex_dev)->bus; + lams->pci_bus = pci_bus; + + /* Map only part size_ecam bytes of ECAM space */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(d, 0); + memory_region_init_alias(ecam_alias, OBJECT(gpex_dev), "pcie-ecam", + ecam_reg, 0, VIRT_PCI_CFG_SIZE); + memory_region_add_subregion(get_system_memory(), VIRT_PCI_CFG_BASE, + ecam_alias); + + /* Map PCI mem space */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(d, 1); + memory_region_init_alias(mmio_alias, OBJECT(gpex_dev), "pcie-mmio", + mmio_reg, VIRT_PCI_MEM_BASE, VIRT_PCI_MEM_SIZE); + memory_region_add_subregion(get_system_memory(), VIRT_PCI_MEM_BASE, + mmio_alias); + + /* Map PCI IO port space. */ + pio_alias = g_new0(MemoryRegion, 1); + pio_reg = sysbus_mmio_get_region(d, 2); + memory_region_init_alias(pio_alias, OBJECT(gpex_dev), "pcie-io", pio_reg, + VIRT_PCI_IO_OFFSET, VIRT_PCI_IO_SIZE); + memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, + pio_alias); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(d, i, + qdev_get_gpio_in(pch_pic, 16 + i)); + gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); + } + + serial_mm_init(get_system_memory(), VIRT_UART_BASE, 0, + qdev_get_gpio_in(pch_pic, + VIRT_UART_IRQ - PCH_PIC_IRQ_OFFSET), + 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); + fdt_add_uart_node(lams); + + /* Network init */ + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("virtio"); + } + + pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); + } + + /* + * There are some invalid guest memory access. + * Create some unimplemented devices to emulate this. + */ + create_unimplemented_device("pci-dma-cfg", 0x1001041c, 0x4); + sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE, + qdev_get_gpio_in(pch_pic, + VIRT_RTC_IRQ - PCH_PIC_IRQ_OFFSET)); + fdt_add_rtc_node(lams); + + pm_mem = g_new(MemoryRegion, 1); + memory_region_init_io(pm_mem, NULL, &loongarch_virt_pm_ops, + NULL, "loongarch_virt_pm", PM_SIZE); + memory_region_add_subregion(get_system_memory(), PM_BASE, pm_mem); + /* acpi ged */ + lams->acpi_ged = create_acpi_ged(pch_pic, lams); + /* platform bus */ + lams->platform_bus_dev = create_platform_bus(pch_pic); +} + +static void loongarch_irq_init(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + DeviceState *pch_pic, *pch_msi, *cpudev; + DeviceState *ipi, *extioi; + SysBusDevice *d; + LoongArchCPU *lacpu; + CPULoongArchState *env; + CPUState *cpu_state; + int cpu, pin, i; + + ipi = qdev_new(TYPE_LOONGARCH_IPI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + + extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); + + /* + * The connection of interrupts: + * +-----+ +---------+ +-------+ + * | IPI |--> | CPUINTC | <-- | Timer | + * +-----+ +---------+ +-------+ + * ^ + * | + * +---------+ + * | EIOINTC | + * +---------+ + * ^ ^ + * | | + * +---------+ +---------+ + * | PCH-PIC | | PCH-MSI | + * +---------+ +---------+ + * ^ ^ ^ + * | | | + * +--------+ +---------+ +---------+ + * | UARTs | | Devices | | Devices | + * +--------+ +---------+ +---------+ + */ + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + cpu_state = qemu_get_cpu(cpu); + cpudev = DEVICE(cpu_state); + lacpu = LOONGARCH_CPU(cpu_state); + env = &(lacpu->env); + + /* connect ipi irq to cpu irq */ + qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); + /* IPI iocsr memory region */ + memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), + cpu * 2)); + memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), + cpu * 2 + 1)); + /* extioi iocsr memory region */ + memory_region_add_subregion(&env->system_iocsr, APIC_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), + cpu)); + } + + /* + * connect ext irq to the cpu irq + * cpu_pin[9:2] <= intc_pin[7:0] + */ + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + cpudev = DEVICE(qemu_get_cpu(cpu)); + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_connect_gpio_out(extioi, (cpu * 8 + pin), + qdev_get_gpio_in(cpudev, pin + 2)); + } + } + + pch_pic = qdev_new(TYPE_LOONGARCH_PCH_PIC); + d = SYS_BUS_DEVICE(pch_pic); + sysbus_realize_and_unref(d, &error_fatal); + memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE, + sysbus_mmio_get_region(d, 0)); + memory_region_add_subregion(get_system_memory(), + VIRT_IOAPIC_REG_BASE + PCH_PIC_ROUTE_ENTRY_OFFSET, + sysbus_mmio_get_region(d, 1)); + memory_region_add_subregion(get_system_memory(), + VIRT_IOAPIC_REG_BASE + PCH_PIC_INT_STATUS_LO, + sysbus_mmio_get_region(d, 2)); + + /* Connect 64 pch_pic irqs to extioi */ + for (int i = 0; i < PCH_PIC_IRQ_NUM; i++) { + qdev_connect_gpio_out(DEVICE(d), i, qdev_get_gpio_in(extioi, i)); + } + + pch_msi = qdev_new(TYPE_LOONGARCH_PCH_MSI); + qdev_prop_set_uint32(pch_msi, "msi_irq_base", PCH_MSI_IRQ_START); + d = SYS_BUS_DEVICE(pch_msi); + sysbus_realize_and_unref(d, &error_fatal); + sysbus_mmio_map(d, 0, VIRT_PCH_MSI_ADDR_LOW); + for (i = 0; i < PCH_MSI_IRQ_NUM; i++) { + /* Connect 192 pch_msi irqs to extioi */ + qdev_connect_gpio_out(DEVICE(d), i, + qdev_get_gpio_in(extioi, i + PCH_MSI_IRQ_START)); + } + + loongarch_devices_init(pch_pic, lams); +} + +static void loongarch_firmware_init(LoongArchMachineState *lams) +{ + char *filename = MACHINE(lams)->firmware; + char *bios_name = NULL; + int bios_size; + + lams->bios_loaded = false; + if (filename) { + bios_name = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); + if (!bios_name) { + error_report("Could not find ROM image '%s'", filename); + exit(1); + } + + bios_size = load_image_targphys(bios_name, VIRT_BIOS_BASE, VIRT_BIOS_SIZE); + if (bios_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + + g_free(bios_name); + + memory_region_init_ram(&lams->bios, NULL, "loongarch.bios", + VIRT_BIOS_SIZE, &error_fatal); + memory_region_set_readonly(&lams->bios, true); + memory_region_add_subregion(get_system_memory(), VIRT_BIOS_BASE, &lams->bios); + lams->bios_loaded = true; + } + +} + +static void reset_load_elf(void *opaque) +{ + LoongArchCPU *cpu = opaque; + CPULoongArchState *env = &cpu->env; + + cpu_reset(CPU(cpu)); + if (env->load_elf) { + cpu_set_pc(CPU(cpu), env->elf_address); + } +} + +static void fw_cfg_add_kernel_info(FWCfgState *fw_cfg) +{ + /* + * Expose the kernel, the command line, and the initrd in fw_cfg. + * We don't process them here at all, it's all left to the + * firmware. + */ + load_image_to_fw_cfg(fw_cfg, + FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA, + loaderparams.kernel_filename, + false); + + if (loaderparams.initrd_filename) { + load_image_to_fw_cfg(fw_cfg, + FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA, + loaderparams.initrd_filename, false); + } + + if (loaderparams.kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(loaderparams.kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, + loaderparams.kernel_cmdline); + } +} + +static void loongarch_firmware_boot(LoongArchMachineState *lams) +{ + fw_cfg_add_kernel_info(lams->fw_cfg); +} + +static void loongarch_direct_kernel_boot(LoongArchMachineState *lams) +{ + MachineState *machine = MACHINE(lams); + int64_t kernel_addr = 0; + LoongArchCPU *lacpu; + int i; + + kernel_addr = load_kernel_info(); + if (!machine->firmware) { + for (i = 0; i < machine->smp.cpus; i++) { + lacpu = LOONGARCH_CPU(qemu_get_cpu(i)); + lacpu->env.load_elf = true; + lacpu->env.elf_address = kernel_addr; + } + } +} + +static void loongarch_init(MachineState *machine) +{ + LoongArchCPU *lacpu; + const char *cpu_model = machine->cpu_type; + ram_addr_t offset = 0; + ram_addr_t ram_size = machine->ram_size; + uint64_t highram_size = 0; + MemoryRegion *address_space_mem = get_system_memory(); + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + int i; + hwaddr fdt_base; + + if (!cpu_model) { + cpu_model = LOONGARCH_CPU_TYPE_NAME("la464"); + } + + if (!strstr(cpu_model, "la464")) { + error_report("LoongArch/TCG needs cpu type la464"); + exit(1); + } + + if (ram_size < 1 * GiB) { + error_report("ram_size must be greater than 1G."); + exit(1); + } + create_fdt(lams); + /* Init CPUs */ + for (i = 0; i < machine->smp.cpus; i++) { + cpu_create(machine->cpu_type); + } + fdt_add_cpu_nodes(lams); + /* Add memory region */ + memory_region_init_alias(&lams->lowmem, NULL, "loongarch.lowram", + machine->ram, 0, 256 * MiB); + memory_region_add_subregion(address_space_mem, offset, &lams->lowmem); + offset += 256 * MiB; + memmap_add_entry(0, 256 * MiB, 1); + highram_size = ram_size - 256 * MiB; + memory_region_init_alias(&lams->highmem, NULL, "loongarch.highmem", + machine->ram, offset, highram_size); + memory_region_add_subregion(address_space_mem, 0x90000000, &lams->highmem); + memmap_add_entry(0x90000000, highram_size, 1); + + /* initialize device memory address space */ + if (machine->ram_size < machine->maxram_size) { + machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); + ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; + + if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { + error_report("unsupported amount of memory slots: %"PRIu64, + machine->ram_slots); + exit(EXIT_FAILURE); + } + + if (QEMU_ALIGN_UP(machine->maxram_size, + TARGET_PAGE_SIZE) != machine->maxram_size) { + error_report("maximum memory size must by aligned to multiple of " + "%d bytes", TARGET_PAGE_SIZE); + exit(EXIT_FAILURE); + } + /* device memory base is the top of high memory address. */ + machine->device_memory->base = 0x90000000 + highram_size; + machine->device_memory->base = + ROUND_UP(machine->device_memory->base, 1 * GiB); + + memory_region_init(&machine->device_memory->mr, OBJECT(lams), + "device-memory", device_mem_size); + memory_region_add_subregion(address_space_mem, machine->device_memory->base, + &machine->device_memory->mr); + } + + /* Add isa io region */ + memory_region_init_alias(&lams->isa_io, NULL, "isa-io", + get_system_io(), 0, VIRT_ISA_IO_SIZE); + memory_region_add_subregion(address_space_mem, VIRT_ISA_IO_BASE, + &lams->isa_io); + /* load the BIOS image. */ + loongarch_firmware_init(lams); + + /* fw_cfg init */ + lams->fw_cfg = loongarch_fw_cfg_init(ram_size, machine); + rom_set_fw(lams->fw_cfg); + if (lams->fw_cfg != NULL) { + fw_cfg_add_file(lams->fw_cfg, "etc/memmap", + memmap_table, + sizeof(struct memmap_entry) * (memmap_entries)); + } + fdt_add_fw_cfg_node(lams); + loaderparams.ram_size = ram_size; + loaderparams.kernel_filename = machine->kernel_filename; + loaderparams.kernel_cmdline = machine->kernel_cmdline; + loaderparams.initrd_filename = machine->initrd_filename; + /* load the kernel. */ + if (loaderparams.kernel_filename) { + if (lams->bios_loaded) { + loongarch_firmware_boot(lams); + } else { + loongarch_direct_kernel_boot(lams); + } + } + /* register reset function */ + for (i = 0; i < machine->smp.cpus; i++) { + lacpu = LOONGARCH_CPU(qemu_get_cpu(i)); + qemu_register_reset(reset_load_elf, lacpu); + } + /* Initialize the IO interrupt subsystem */ + loongarch_irq_init(lams); + fdt_add_irqchip_node(lams); + platform_bus_add_all_fdt_nodes(machine->fdt, "/intc", + VIRT_PLATFORM_BUS_BASEADDRESS, + VIRT_PLATFORM_BUS_SIZE, + VIRT_PLATFORM_BUS_IRQ); + lams->machine_done.notify = virt_machine_done; + qemu_add_machine_init_done_notifier(&lams->machine_done); + fdt_add_pcie_node(lams); + /* + * Since lowmem region starts from 0 and Linux kernel legacy start address + * at 2 MiB, FDT base address is located at 1 MiB to avoid NULL pointer + * access. FDT size limit with 1 MiB. + * Put the FDT into the memory map as a ROM image: this will ensure + * the FDT is copied again upon reset, even if addr points into RAM. + */ + fdt_base = 1 * MiB; + qemu_fdt_dumpdtb(machine->fdt, lams->fdt_size); + rom_add_blob_fixed("fdt", machine->fdt, lams->fdt_size, fdt_base); +} + +bool loongarch_is_acpi_enabled(LoongArchMachineState *lams) +{ + if (lams->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +static void loongarch_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + OnOffAuto acpi = lams->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void loongarch_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &lams->acpi, errp); +} + +static void loongarch_machine_initfn(Object *obj) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + + lams->acpi = ON_OFF_AUTO_AUTO; + lams->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + lams->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); +} + +static bool memhp_type_supported(DeviceState *dev) +{ + /* we only support pc dimm now */ + return object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + !object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); +} + +static void virt_mem_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), NULL, errp); +} + +static void virt_machine_device_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (memhp_type_supported(dev)) { + virt_mem_pre_plug(hotplug_dev, dev, errp); + } +} + +static void virt_mem_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(hotplug_dev); + + /* the acpi ged is always exist */ + hotplug_handler_unplug_request(HOTPLUG_HANDLER(lams->acpi_ged), dev, + errp); +} + +static void virt_machine_device_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (memhp_type_supported(dev)) { + virt_mem_unplug_request(hotplug_dev, dev, errp); + } +} + +static void virt_mem_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(hotplug_dev); + + hotplug_handler_unplug(HOTPLUG_HANDLER(lams->acpi_ged), dev, errp); + pc_dimm_unplug(PC_DIMM(dev), MACHINE(lams)); + qdev_unrealize(dev); +} + +static void virt_machine_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (memhp_type_supported(dev)) { + virt_mem_unplug(hotplug_dev, dev, errp); + } +} + +static void virt_mem_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(hotplug_dev); + + pc_dimm_plug(PC_DIMM(dev), MACHINE(lams)); + hotplug_handler_plug(HOTPLUG_HANDLER(lams->acpi_ged), + dev, &error_abort); +} + +static void loongarch_machine_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(hotplug_dev); + MachineClass *mc = MACHINE_GET_CLASS(lams); + + if (device_is_dynamic_sysbus(mc, dev)) { + if (lams->platform_bus_dev) { + platform_bus_link_device(PLATFORM_BUS_DEVICE(lams->platform_bus_dev), + SYS_BUS_DEVICE(dev)); + } + } else if (memhp_type_supported(dev)) { + virt_mem_plug(hotplug_dev, dev, errp); + } +} + +static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (device_is_dynamic_sysbus(mc, dev) || + memhp_type_supported(dev)) { + return HOTPLUG_HANDLER(machine); + } + return NULL; +} + +static void loongarch_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + mc->desc = "Loongson-3A5000 LS7A1000 machine"; + mc->init = loongarch_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); + mc->default_ram_id = "loongarch.ram"; + mc->max_cpus = LOONGARCH_MAX_VCPUS; + mc->is_default = 1; + mc->default_kernel_irqchip_split = false; + mc->block_default_type = IF_VIRTIO; + mc->default_boot_order = "c"; + mc->no_cdrom = 1; + mc->get_hotplug_handler = virt_machine_get_hotplug_handler; + hc->plug = loongarch_machine_device_plug_cb; + hc->pre_plug = virt_machine_device_pre_plug; + hc->unplug_request = virt_machine_device_unplug_request; + hc->unplug = virt_machine_device_unplug; + + object_class_property_add(oc, "acpi", "OnOffAuto", + loongarch_get_acpi, loongarch_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, "acpi", + "Enable ACPI"); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); +#ifdef CONFIG_TPM + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); +#endif +} + +static const TypeInfo loongarch_machine_types[] = { + { + .name = TYPE_LOONGARCH_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(LoongArchMachineState), + .class_init = loongarch_class_init, + .instance_init = loongarch_machine_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, + } +}; + +DEFINE_TYPES(loongarch_machine_types) diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h index adbf0c5521..a3d37e3c80 100644 --- a/hw/m68k/bootinfo.h +++ b/hw/m68k/bootinfo.h @@ -12,48 +12,66 @@ #ifndef HW_M68K_BOOTINFO_H #define HW_M68K_BOOTINFO_H -#define BOOTINFO0(as, base, id) \ +#define BOOTINFO0(base, id) \ do { \ - stw_phys(as, base, id); \ + stw_p(base, id); \ base += 2; \ - stw_phys(as, base, sizeof(struct bi_record)); \ + stw_p(base, sizeof(struct bi_record)); \ base += 2; \ } while (0) -#define BOOTINFO1(as, base, id, value) \ +#define BOOTINFO1(base, id, value) \ do { \ - stw_phys(as, base, id); \ + stw_p(base, id); \ base += 2; \ - stw_phys(as, base, sizeof(struct bi_record) + 4); \ + stw_p(base, sizeof(struct bi_record) + 4); \ base += 2; \ - stl_phys(as, base, value); \ + stl_p(base, value); \ base += 4; \ } while (0) -#define BOOTINFO2(as, base, id, value1, value2) \ +#define BOOTINFO2(base, id, value1, value2) \ do { \ - stw_phys(as, base, id); \ + stw_p(base, id); \ base += 2; \ - stw_phys(as, base, sizeof(struct bi_record) + 8); \ + stw_p(base, sizeof(struct bi_record) + 8); \ base += 2; \ - stl_phys(as, base, value1); \ + stl_p(base, value1); \ base += 4; \ - stl_phys(as, base, value2); \ + stl_p(base, value2); \ base += 4; \ } while (0) -#define BOOTINFOSTR(as, base, id, string) \ +#define BOOTINFOSTR(base, id, string) \ do { \ int i; \ - stw_phys(as, base, id); \ + stw_p(base, id); \ base += 2; \ - stw_phys(as, base, \ - (sizeof(struct bi_record) + strlen(string) + 2) & ~1); \ + stw_p(base, \ + (sizeof(struct bi_record) + strlen(string) + \ + 1 /* null termination */ + 3 /* padding */) & ~3); \ base += 2; \ for (i = 0; string[i]; i++) { \ - stb_phys(as, base++, string[i]); \ + stb_p(base++, string[i]); \ } \ - stb_phys(as, base++, 0); \ - base = (parameters_base + 1) & ~1; \ + stb_p(base++, 0); \ + base = QEMU_ALIGN_PTR_UP(base, 4); \ + } while (0) + +#define BOOTINFODATA(base, id, data, len) \ + do { \ + int i; \ + stw_p(base, id); \ + base += 2; \ + stw_p(base, \ + (sizeof(struct bi_record) + len + \ + 2 /* length field */ + 3 /* padding */) & ~3); \ + base += 2; \ + stw_p(base, len); \ + base += 2; \ + for (i = 0; i < len; ++i) { \ + stb_p(base++, data[i]); \ + } \ + base = QEMU_ALIGN_PTR_UP(base, 4); \ } while (0) #endif diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c index 6d93d761a5..2ab1b4f059 100644 --- a/hw/m68k/mcf5206.c +++ b/hw/m68k/mcf5206.c @@ -152,7 +152,7 @@ static m5206_timer_state *m5206_timer_init(qemu_irq irq) m5206_timer_state *s; s = g_new0(m5206_timer_state, 1); - s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_LEGACY); s->irq = irq; m5206_timer_reset(s); return s; diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c index 93812ee206..be1033f84f 100644 --- a/hw/m68k/mcf5208.c +++ b/hw/m68k/mcf5208.c @@ -11,7 +11,6 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "hw/irq.h" @@ -198,7 +197,7 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic) /* Timers. */ for (i = 0; i < 2; i++) { s = g_new0(m5208_timer_state, 1); - s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_LEGACY); memory_region_init_io(&s->iomem, NULL, &m5208_timer_ops, s, "m5208-timer", 0x00004000); memory_region_add_subregion(address_space, 0xfc080000 + 0x4000 * i, diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index ac0a13060b..9d52ca6613 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -22,12 +22,13 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu-common.h" #include "qemu/datadir.h" +#include "qemu/guest-random.h" #include "sysemu/sysemu.h" #include "cpu.h" #include "hw/boards.h" #include "hw/or-irq.h" +#include "hw/nmi.h" #include "elf.h" #include "hw/loader.h" #include "ui/console.h" @@ -67,9 +68,6 @@ #define ASC_BASE (IO_BASE + 0x14000) #define SWIM_BASE (IO_BASE + 0x1E000) -#define NUBUS_SUPER_SLOT_BASE 0x60000000 -#define NUBUS_SLOT_BASE 0xf0000000 - #define SONIC_PROM_SIZE 0x1000 /* @@ -77,10 +75,17 @@ * is needed by the kernel to have early display and * thus provided by the bootloader */ -#define VIDEO_BASE 0xf9001000 +#define VIDEO_BASE 0xf9000000 #define MAC_CLOCK 3686418 +/* + * Slot 0x9 is reserved for use by the in-built framebuffer whilst only + * slots 0xc, 0xd and 0xe physically exist on the Quadra 800 + */ +#define Q800_NUBUS_SLOTS_AVAILABLE (BIT(0x9) | BIT(0xc) | BIT(0xd) | \ + BIT(0xe)) + /* * The GLUE (General Logic Unit) is an Apple custom integrated circuit chip * that performs a variety of functions (RAM management, clock generation, ...). @@ -96,13 +101,110 @@ struct GLUEState { SysBusDevice parent_obj; M68kCPU *cpu; uint8_t ipr; + uint8_t auxmode; + qemu_irq irqs[1]; + QEMUTimer *nmi_release; }; +#define GLUE_IRQ_IN_VIA1 0 +#define GLUE_IRQ_IN_VIA2 1 +#define GLUE_IRQ_IN_SONIC 2 +#define GLUE_IRQ_IN_ESCC 3 +#define GLUE_IRQ_IN_NMI 4 + +#define GLUE_IRQ_NUBUS_9 0 + +/* + * The GLUE logic on the Quadra 800 supports 2 different IRQ routing modes + * controlled from the VIA1 auxmode GPIO (port B bit 6) which are documented + * in NetBSD as follows: + * + * A/UX mode (Linux, NetBSD, auxmode GPIO low) + * + * Level 0: Spurious: ignored + * Level 1: Software + * Level 2: VIA2 (except ethernet, sound) + * Level 3: Ethernet + * Level 4: Serial (SCC) + * Level 5: Sound + * Level 6: VIA1 + * Level 7: NMIs: parity errors, RESET button, YANCC error + * + * Classic mode (default: used by MacOS, A/UX 3.0.1, auxmode GPIO high) + * + * Level 0: Spurious: ignored + * Level 1: VIA1 (clock, ADB) + * Level 2: VIA2 (NuBus, SCSI) + * Level 3: + * Level 4: Serial (SCC) + * Level 5: + * Level 6: + * Level 7: Non-maskable: parity errors, RESET button + * + * Note that despite references to A/UX mode in Linux and NetBSD, at least + * A/UX 3.0.1 still uses Classic mode. + */ + static void GLUE_set_irq(void *opaque, int irq, int level) { GLUEState *s = opaque; int i; + if (s->auxmode) { + /* Classic mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 0; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + /* Route to VIA2 instead */ + qemu_set_irq(s->irqs[GLUE_IRQ_NUBUS_9], level); + return; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } else { + /* A/UX mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 5; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + irq = 2; + break; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } + if (level) { s->ipr |= 1 << irq; } else { @@ -118,11 +220,37 @@ static void GLUE_set_irq(void *opaque, int irq, int level) m68k_set_irq_level(s->cpu, 0, 0); } +static void glue_auxmode_set_irq(void *opaque, int irq, int level) +{ + GLUEState *s = GLUE(opaque); + + s->auxmode = level; +} + +static void glue_nmi(NMIState *n, int cpu_index, Error **errp) +{ + GLUEState *s = GLUE(n); + + /* Hold NMI active for 100ms */ + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 1); + timer_mod(s->nmi_release, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 100); +} + +static void glue_nmi_release(void *opaque) +{ + GLUEState *s = GLUE(opaque); + + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 0); +} + static void glue_reset(DeviceState *dev) { GLUEState *s = GLUE(dev); s->ipr = 0; + s->auxmode = 0; + + timer_del(s->nmi_release); } static const VMStateDescription vmstate_glue = { @@ -131,6 +259,8 @@ static const VMStateDescription vmstate_glue = { .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT8(ipr, GLUEState), + VMSTATE_UINT8(auxmode, GLUEState), + VMSTATE_TIMER_PTR(nmi_release, GLUEState), VMSTATE_END_OF_LIST(), }, }; @@ -146,20 +276,36 @@ static Property glue_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static void glue_finalize(Object *obj) +{ + GLUEState *s = GLUE(obj); + + timer_free(s->nmi_release); +} + static void glue_init(Object *obj) { DeviceState *dev = DEVICE(obj); + GLUEState *s = GLUE(dev); qdev_init_gpio_in(dev, GLUE_set_irq, 8); + qdev_init_gpio_in_named(dev, glue_auxmode_set_irq, "auxmode", 1); + + qdev_init_gpio_out(dev, s->irqs, 1); + + /* NMI release timer */ + s->nmi_release = timer_new_ms(QEMU_CLOCK_VIRTUAL, glue_nmi_release, s); } static void glue_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + NMIClass *nc = NMI_CLASS(klass); dc->vmsd = &vmstate_glue; dc->reset = glue_reset; device_class_set_props(dc, glue_properties); + nc->nmi_monitor_handler = glue_nmi; } static const TypeInfo glue_info = { @@ -167,7 +313,12 @@ static const TypeInfo glue_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(GLUEState), .instance_init = glue_init, + .instance_finalize = glue_finalize, .class_init = glue_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, }; static void main_cpu_reset(void *opaque) @@ -180,6 +331,13 @@ static void main_cpu_reset(void *opaque) cpu->env.pc = ldl_phys(cs->as, 4); } +static void rerandomize_rng_seed(void *opaque) +{ + struct bi_record *rng_seed = opaque; + qemu_guest_getrandom_nofail((void *)rng_seed->data + 2, + be16_to_cpu(*(uint16_t *)rng_seed->data)); +} + static uint8_t fake_mac_rom[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -217,6 +375,7 @@ static void q800_init(MachineState *machine) uint8_t *prom; const int io_slice_nb = (IO_SIZE / IO_SLICE) - 1; int i, checksum; + MacFbMode *macfb_mode; ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; @@ -225,7 +384,7 @@ static void q800_init(MachineState *machine) hwaddr parameters_base; CPUState *cs; DeviceState *dev; - DeviceState *via_dev; + DeviceState *via1_dev, *via2_dev; DeviceState *escc_orgate; SysBusESPState *sysbus_esp; ESPState *esp; @@ -234,6 +393,7 @@ static void q800_init(MachineState *machine) NubusBus *nubus; DeviceState *glue; DriveInfo *dinfo; + uint8_t rng_seed[32]; linux_boot = (kernel_filename != NULL); @@ -270,28 +430,33 @@ static void q800_init(MachineState *machine) object_property_set_link(OBJECT(glue), "cpu", OBJECT(cpu), &error_abort); sysbus_realize_and_unref(SYS_BUS_DEVICE(glue), &error_fatal); - /* VIA */ - - via_dev = qdev_new(TYPE_MAC_VIA); + /* VIA 1 */ + via1_dev = qdev_new(TYPE_MOS6522_Q800_VIA1); dinfo = drive_get(IF_MTD, 0, 0); if (dinfo) { - qdev_prop_set_drive(via_dev, "drive", blk_by_legacy_dinfo(dinfo)); + qdev_prop_set_drive(via1_dev, "drive", blk_by_legacy_dinfo(dinfo)); } - sysbus = SYS_BUS_DEVICE(via_dev); + sysbus = SYS_BUS_DEVICE(via1_dev); sysbus_realize_and_unref(sysbus, &error_fatal); - sysbus_mmio_map(sysbus, 0, VIA_BASE); - qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 0, - qdev_get_gpio_in(glue, 0)); - qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 1, - qdev_get_gpio_in(glue, 1)); + sysbus_mmio_map(sysbus, 1, VIA_BASE); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA1)); + /* A/UX mode */ + qdev_connect_gpio_out(via1_dev, 0, + qdev_get_gpio_in_named(glue, "auxmode", 0)); - - adb_bus = qdev_get_child_bus(via_dev, "adb.0"); + adb_bus = qdev_get_child_bus(via1_dev, "adb.0"); dev = qdev_new(TYPE_ADB_KEYBOARD); qdev_realize_and_unref(dev, adb_bus, &error_fatal); dev = qdev_new(TYPE_ADB_MOUSE); qdev_realize_and_unref(dev, adb_bus, &error_fatal); + /* VIA 2 */ + via2_dev = qdev_new(TYPE_MOS6522_Q800_VIA2); + sysbus = SYS_BUS_DEVICE(via2_dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 1, VIA_BASE + VIA_SIZE); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA2)); + /* MACSONIC */ if (nb_nics > 1) { @@ -323,7 +488,7 @@ static void q800_init(MachineState *machine) sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, SONIC_BASE); - sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 2)); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_SONIC)); memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); @@ -359,7 +524,8 @@ static void q800_init(MachineState *machine) qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); - qdev_connect_gpio_out(DEVICE(escc_orgate), 0, qdev_get_gpio_in(glue, 3)); + qdev_connect_gpio_out(DEVICE(escc_orgate), 0, + qdev_get_gpio_in(glue, GLUE_IRQ_IN_ESCC)); sysbus_mmio_map(sysbus, 0, SCC_BASE); /* SCSI */ @@ -375,12 +541,11 @@ static void q800_init(MachineState *machine) sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); - sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in_named(via_dev, - "via2-irq", - VIA2_IRQ_SCSI_BIT)); - sysbus_connect_irq(sysbus, 1, - qdev_get_gpio_in_named(via_dev, "via2-irq", - VIA2_IRQ_SCSI_DATA_BIT)); + /* SCSI and SCSI data IRQs are negative edge triggered */ + sysbus_connect_irq(sysbus, 0, qemu_irq_invert(qdev_get_gpio_in(via2_dev, + VIA2_IRQ_SCSI_BIT))); + sysbus_connect_irq(sysbus, 1, qemu_irq_invert(qdev_get_gpio_in(via2_dev, + VIA2_IRQ_SCSI_DATA_BIT))); sysbus_mmio_map(sysbus, 0, ESP_BASE); sysbus_mmio_map(sysbus, 1, ESP_PDMA); @@ -395,23 +560,59 @@ static void q800_init(MachineState *machine) /* NuBus */ dev = qdev_new(TYPE_MAC_NUBUS_BRIDGE); + qdev_prop_set_uint32(dev, "slot-available-mask", + Q800_NUBUS_SLOTS_AVAILABLE); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, NUBUS_SUPER_SLOT_BASE); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE); + qdev_connect_gpio_out(dev, 9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_INTVIDEO)); + for (i = 1; i < VIA2_NUBUS_IRQ_NB; i++) { + qdev_connect_gpio_out(dev, 9 + i, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9 + i)); + } - nubus = MAC_NUBUS_BRIDGE(dev)->bus; + /* + * Since the framebuffer in slot 0x9 uses a separate IRQ, wire the unused + * IRQ via GLUE for use by SONIC Ethernet in classic mode + */ + qdev_connect_gpio_out(glue, GLUE_IRQ_NUBUS_9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9)); + + nubus = &NUBUS_BRIDGE(dev)->bus; /* framebuffer in nubus slot #9 */ dev = qdev_new(TYPE_NUBUS_MACFB); + qdev_prop_set_uint32(dev, "slot", 9); qdev_prop_set_uint32(dev, "width", graphic_width); qdev_prop_set_uint32(dev, "height", graphic_height); qdev_prop_set_uint8(dev, "depth", graphic_depth); + if (graphic_width == 1152 && graphic_height == 870) { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_APPLE_21_COLOR); + } else { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_VGA); + } qdev_realize_and_unref(dev, BUS(nubus), &error_fatal); + macfb_mode = (NUBUS_MACFB(dev)->macfb).mode; + cs = CPU(cpu); if (linux_boot) { uint64_t high; + void *param_blob, *param_ptr, *param_rng_seed; + + if (kernel_cmdline) { + param_blob = g_malloc(strlen(kernel_cmdline) + 1024); + } else { + param_blob = g_malloc(1024); + } + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, NULL, &high, NULL, 1, EM_68K, 0, 0); @@ -421,23 +622,24 @@ static void q800_init(MachineState *machine) } stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ parameters_base = (high + 1) & ~1; + param_ptr = param_blob; - BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_MAC); - BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); - BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); - BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); - BOOTINFO1(cs->as, parameters_base, BI_MAC_CPUID, CPUB_68040); - BOOTINFO1(cs->as, parameters_base, BI_MAC_MODEL, MAC_MODEL_Q800); - BOOTINFO1(cs->as, parameters_base, + BOOTINFO1(param_ptr, BI_MACHTYPE, MACH_MAC); + BOOTINFO1(param_ptr, BI_FPUTYPE, FPU_68040); + BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68040); + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68040); + BOOTINFO1(param_ptr, BI_MAC_CPUID, CPUB_68040); + BOOTINFO1(param_ptr, BI_MAC_MODEL, MAC_MODEL_Q800); + BOOTINFO1(param_ptr, BI_MAC_MEMSIZE, ram_size >> 20); /* in MB */ - BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, VIDEO_BASE); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VDEPTH, graphic_depth); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VDIM, + BOOTINFO2(param_ptr, BI_MEMCHUNK, 0, ram_size); + BOOTINFO1(param_ptr, BI_MAC_VADDR, + VIDEO_BASE + macfb_mode->offset); + BOOTINFO1(param_ptr, BI_MAC_VDEPTH, graphic_depth); + BOOTINFO1(param_ptr, BI_MAC_VDIM, (graphic_height << 16) | graphic_width); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, - (graphic_width * graphic_depth + 7) / 8); - BOOTINFO1(cs->as, parameters_base, BI_MAC_SCCBASE, SCC_BASE); + BOOTINFO1(param_ptr, BI_MAC_VROW, macfb_mode->stride); + BOOTINFO1(param_ptr, BI_MAC_SCCBASE, SCC_BASE); rom = g_malloc(sizeof(*rom)); memory_region_init_ram_ptr(rom, NULL, "m68k_fake_mac.rom", @@ -446,10 +648,16 @@ static void q800_init(MachineState *machine) memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); if (kernel_cmdline) { - BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, + BOOTINFOSTR(param_ptr, BI_COMMAND_LINE, kernel_cmdline); } + /* Pass seed to RNG. */ + param_rng_seed = param_ptr; + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + BOOTINFODATA(param_ptr, BI_RNG_SEED, + rng_seed, sizeof(rng_seed)); + /* load initrd */ if (initrd_filename) { initrd_size = get_image_size(initrd_filename); @@ -462,13 +670,20 @@ static void q800_init(MachineState *machine) initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); - BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, + BOOTINFO2(param_ptr, BI_RAMDISK, initrd_base, initrd_size); } else { initrd_base = 0; initrd_size = 0; } - BOOTINFO0(cs->as, parameters_base, BI_LAST); + BOOTINFO0(param_ptr, BI_LAST); + rom_add_blob_fixed_as("bootinfo", param_blob, param_ptr - param_blob, + parameters_base, cs->as); + qemu_register_reset_nosnapshotload(rerandomize_rng_seed, + rom_ptr_for_as(cs->as, parameters_base, + param_ptr - param_blob) + + (param_rng_seed - param_blob)); + g_free(param_blob); } else { uint8_t *ptr; /* allocate and load BIOS */ @@ -488,12 +703,13 @@ static void q800_init(MachineState *machine) /* Remove qtest_enabled() check once firmware files are in the tree */ if (!qtest_enabled()) { - if (bios_size < 0 || bios_size > MACROM_SIZE) { + if (bios_size <= 0 || bios_size > MACROM_SIZE) { error_report("could not load MacROM '%s'", bios_name); exit(1); } - ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE); + ptr = rom_ptr(MACROM_ADDR, bios_size); + assert(ptr != NULL); stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */ stl_phys(cs->as, 4, MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */ @@ -501,6 +717,21 @@ static void q800_init(MachineState *machine) } } +static GlobalProperty hw_compat_q800[] = { + { "scsi-hd", "quirk_mode_page_vendor_specific_apple", "on"}, + { "scsi-hd", "vendor", " SEAGATE" }, + { "scsi-hd", "product", " ST225N" }, + { "scsi-hd", "ver", "1.0 " }, + { "scsi-cd", "quirk_mode_page_apple_vendor", "on"}, + { "scsi-cd", "quirk_mode_sense_rom_use_dbd", "on"}, + { "scsi-cd", "quirk_mode_page_vendor_specific_apple", "on"}, + { "scsi-cd", "quirk_mode_page_truncated", "on"}, + { "scsi-cd", "vendor", "MATSHITA" }, + { "scsi-cd", "product", "CD-ROM CR-8005" }, + { "scsi-cd", "ver", "1.0k" }, +}; +static const size_t hw_compat_q800_len = G_N_ELEMENTS(hw_compat_q800); + static void q800_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -510,6 +741,7 @@ static void q800_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 1; mc->block_default_type = IF_SCSI; mc->default_ram_id = "m68k_mac.ram"; + compat_props_add(mc->compat_props, hw_compat_q800, hw_compat_q800_len); } static const TypeInfo q800_machine_typeinfo = { diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 4e8bce5aa6..da5eafd275 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Vitual M68K Machine * @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu-common.h" +#include "qemu/guest-random.h" #include "sysemu/sysemu.h" #include "cpu.h" #include "hw/boards.h" @@ -85,14 +85,28 @@ #define VIRT_VIRTIO_MMIO_BASE 0xff010000 /* MMIO: 0xff010000 - 0xff01ffff */ #define VIRT_VIRTIO_IRQ_BASE PIC_IRQ(2, 1) /* PIC: 2, 3, 4, 5, IRQ: ALL */ +typedef struct { + M68kCPU *cpu; + hwaddr initial_pc; + hwaddr initial_stack; +} ResetInfo; + static void main_cpu_reset(void *opaque) { - M68kCPU *cpu = opaque; + ResetInfo *reset_info = opaque; + M68kCPU *cpu = reset_info->cpu; CPUState *cs = CPU(cpu); cpu_reset(cs); - cpu->env.aregs[7] = ldl_phys(cs->as, 0); - cpu->env.pc = ldl_phys(cs->as, 4); + cpu->env.aregs[7] = reset_info->initial_stack; + cpu->env.pc = reset_info->initial_pc; +} + +static void rerandomize_rng_seed(void *opaque) +{ + struct bi_record *rng_seed = opaque; + qemu_guest_getrandom_nofail((void *)rng_seed->data + 2, + be16_to_cpu(*(uint16_t *)rng_seed->data)); } static void virt_init(MachineState *machine) @@ -113,6 +127,8 @@ static void virt_init(MachineState *machine) SysBusDevice *sysbus; hwaddr io_base; int i; + ResetInfo *reset_info; + uint8_t rng_seed[32]; if (ram_size > 3399672 * KiB) { /* @@ -124,9 +140,13 @@ static void virt_init(MachineState *machine) exit(1); } + reset_info = g_new0(ResetInfo, 1); + /* init CPUs */ cpu = M68K_CPU(cpu_create(machine->cpu_type)); - qemu_register_reset(main_cpu_reset, cpu); + + reset_info->cpu = cpu; + qemu_register_reset(main_cpu_reset, reset_info); /* RAM */ memory_region_add_subregion(get_system_memory(), 0, machine->ram); @@ -160,6 +180,7 @@ static void virt_init(MachineState *machine) io_base = VIRT_GF_RTC_MMIO_BASE; for (i = 0; i < VIRT_GF_RTC_NB; i++) { dev = qdev_new(TYPE_GOLDFISH_RTC); + qdev_prop_set_bit(dev, "big-endian", true); sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, io_base); @@ -198,6 +219,13 @@ static void virt_init(MachineState *machine) if (kernel_filename) { CPUState *cs = CPU(cpu); uint64_t high; + void *param_blob, *param_ptr, *param_rng_seed; + + if (kernel_cmdline) { + param_blob = g_malloc(strlen(kernel_cmdline) + 1024); + } else { + param_blob = g_malloc(1024); + } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, NULL, &high, NULL, 1, @@ -206,34 +234,41 @@ static void virt_init(MachineState *machine) error_report("could not load kernel '%s'", kernel_filename); exit(1); } - stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ + reset_info->initial_pc = elf_entry; parameters_base = (high + 1) & ~1; + param_ptr = param_blob; - BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_VIRT); - BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); - BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); - BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); - BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); + BOOTINFO1(param_ptr, BI_MACHTYPE, MACH_VIRT); + BOOTINFO1(param_ptr, BI_FPUTYPE, FPU_68040); + BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68040); + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68040); + BOOTINFO2(param_ptr, BI_MEMCHUNK, 0, ram_size); - BOOTINFO1(cs->as, parameters_base, BI_VIRT_QEMU_VERSION, + BOOTINFO1(param_ptr, BI_VIRT_QEMU_VERSION, ((QEMU_VERSION_MAJOR << 24) | (QEMU_VERSION_MINOR << 16) | (QEMU_VERSION_MICRO << 8))); - BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_PIC_BASE, + BOOTINFO2(param_ptr, BI_VIRT_GF_PIC_BASE, VIRT_GF_PIC_MMIO_BASE, VIRT_GF_PIC_IRQ_BASE); - BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_RTC_BASE, + BOOTINFO2(param_ptr, BI_VIRT_GF_RTC_BASE, VIRT_GF_RTC_MMIO_BASE, VIRT_GF_RTC_IRQ_BASE); - BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_TTY_BASE, + BOOTINFO2(param_ptr, BI_VIRT_GF_TTY_BASE, VIRT_GF_TTY_MMIO_BASE, VIRT_GF_TTY_IRQ_BASE); - BOOTINFO2(cs->as, parameters_base, BI_VIRT_CTRL_BASE, + BOOTINFO2(param_ptr, BI_VIRT_CTRL_BASE, VIRT_CTRL_MMIO_BASE, VIRT_CTRL_IRQ_BASE); - BOOTINFO2(cs->as, parameters_base, BI_VIRT_VIRTIO_BASE, + BOOTINFO2(param_ptr, BI_VIRT_VIRTIO_BASE, VIRT_VIRTIO_MMIO_BASE, VIRT_VIRTIO_IRQ_BASE); if (kernel_cmdline) { - BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, + BOOTINFOSTR(param_ptr, BI_COMMAND_LINE, kernel_cmdline); } + /* Pass seed to RNG. */ + param_rng_seed = param_ptr; + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + BOOTINFODATA(param_ptr, BI_RNG_SEED, + rng_seed, sizeof(rng_seed)); + /* load initrd */ if (initrd_filename) { initrd_size = get_image_size(initrd_filename); @@ -246,13 +281,20 @@ static void virt_init(MachineState *machine) initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); - BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, + BOOTINFO2(param_ptr, BI_RAMDISK, initrd_base, initrd_size); } else { initrd_base = 0; initrd_size = 0; } - BOOTINFO0(cs->as, parameters_base, BI_LAST); + BOOTINFO0(param_ptr, BI_LAST); + rom_add_blob_fixed_as("bootinfo", param_blob, param_ptr - param_blob, + parameters_base, cs->as); + qemu_register_reset_nosnapshotload(rerandomize_rng_seed, + rom_ptr_for_as(cs->as, parameters_base, + param_ptr - param_blob) + + (param_rng_seed - param_blob)); + g_free(param_blob); } } @@ -304,7 +346,42 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); -static void virt_machine_6_0_options(MachineClass *mc) +static void virt_machine_7_2_options(MachineClass *mc) { } -DEFINE_VIRT_MACHINE(6, 0, true) +DEFINE_VIRT_MACHINE(7, 2, true) + +static void virt_machine_7_1_options(MachineClass *mc) +{ + virt_machine_7_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); +} +DEFINE_VIRT_MACHINE(7, 1, false) + +static void virt_machine_7_0_options(MachineClass *mc) +{ + virt_machine_7_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len); +} +DEFINE_VIRT_MACHINE(7, 0, false) + +static void virt_machine_6_2_options(MachineClass *mc) +{ + virt_machine_7_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len); +} +DEFINE_VIRT_MACHINE(6, 2, false) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); +} +DEFINE_VIRT_MACHINE(6, 1, false) + +static void virt_machine_6_0_options(MachineClass *mc) +{ + virt_machine_6_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} +DEFINE_VIRT_MACHINE(6, 0, false) diff --git a/hw/mem/Kconfig b/hw/mem/Kconfig index 8b19fdc49f..73c5ae8ad9 100644 --- a/hw/mem/Kconfig +++ b/hw/mem/Kconfig @@ -8,3 +8,11 @@ config MEM_DEVICE config NVDIMM bool select MEM_DEVICE + +config SPARSE_MEM + bool + +config CXL_MEM_DEVICE + bool + default y if CXL + select MEM_DEVICE diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c new file mode 100644 index 0000000000..255590201a --- /dev/null +++ b/hw/mem/cxl_type3.c @@ -0,0 +1,647 @@ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "hw/mem/memory-device.h" +#include "hw/mem/pc-dimm.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/pmem.h" +#include "qemu/range.h" +#include "qemu/rcu.h" +#include "sysemu/hostmem.h" +#include "sysemu/numa.h" +#include "hw/cxl/cxl.h" +#include "hw/pci/msix.h" + +#define DWORD_BYTE 4 + +/* Default CDAT entries for a memory region */ +enum { + CT3_CDAT_DSMAS, + CT3_CDAT_DSLBIS0, + CT3_CDAT_DSLBIS1, + CT3_CDAT_DSLBIS2, + CT3_CDAT_DSLBIS3, + CT3_CDAT_DSEMTS, + CT3_CDAT_NUM_ENTRIES +}; + +static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, + int dsmad_handle, MemoryRegion *mr) +{ + g_autofree CDATDsmas *dsmas = NULL; + g_autofree CDATDslbis *dslbis0 = NULL; + g_autofree CDATDslbis *dslbis1 = NULL; + g_autofree CDATDslbis *dslbis2 = NULL; + g_autofree CDATDslbis *dslbis3 = NULL; + g_autofree CDATDsemts *dsemts = NULL; + + dsmas = g_malloc(sizeof(*dsmas)); + if (!dsmas) { + return -ENOMEM; + } + *dsmas = (CDATDsmas) { + .header = { + .type = CDAT_TYPE_DSMAS, + .length = sizeof(*dsmas), + }, + .DSMADhandle = dsmad_handle, + .flags = CDAT_DSMAS_FLAG_NV, + .DPA_base = 0, + .DPA_length = int128_get64(mr->size), + }; + + /* For now, no memory side cache, plausiblish numbers */ + dslbis0 = g_malloc(sizeof(*dslbis0)); + if (!dslbis0) { + return -ENOMEM; + } + *dslbis0 = (CDATDslbis) { + .header = { + .type = CDAT_TYPE_DSLBIS, + .length = sizeof(*dslbis0), + }, + .handle = dsmad_handle, + .flags = HMAT_LB_MEM_MEMORY, + .data_type = HMAT_LB_DATA_READ_LATENCY, + .entry_base_unit = 10000, /* 10ns base */ + .entry[0] = 15, /* 150ns */ + }; + + dslbis1 = g_malloc(sizeof(*dslbis1)); + if (!dslbis1) { + return -ENOMEM; + } + *dslbis1 = (CDATDslbis) { + .header = { + .type = CDAT_TYPE_DSLBIS, + .length = sizeof(*dslbis1), + }, + .handle = dsmad_handle, + .flags = HMAT_LB_MEM_MEMORY, + .data_type = HMAT_LB_DATA_WRITE_LATENCY, + .entry_base_unit = 10000, + .entry[0] = 25, /* 250ns */ + }; + + dslbis2 = g_malloc(sizeof(*dslbis2)); + if (!dslbis2) { + return -ENOMEM; + } + *dslbis2 = (CDATDslbis) { + .header = { + .type = CDAT_TYPE_DSLBIS, + .length = sizeof(*dslbis2), + }, + .handle = dsmad_handle, + .flags = HMAT_LB_MEM_MEMORY, + .data_type = HMAT_LB_DATA_READ_BANDWIDTH, + .entry_base_unit = 1000, /* GB/s */ + .entry[0] = 16, + }; + + dslbis3 = g_malloc(sizeof(*dslbis3)); + if (!dslbis3) { + return -ENOMEM; + } + *dslbis3 = (CDATDslbis) { + .header = { + .type = CDAT_TYPE_DSLBIS, + .length = sizeof(*dslbis3), + }, + .handle = dsmad_handle, + .flags = HMAT_LB_MEM_MEMORY, + .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH, + .entry_base_unit = 1000, /* GB/s */ + .entry[0] = 16, + }; + + dsemts = g_malloc(sizeof(*dsemts)); + if (!dsemts) { + return -ENOMEM; + } + *dsemts = (CDATDsemts) { + .header = { + .type = CDAT_TYPE_DSEMTS, + .length = sizeof(*dsemts), + }, + .DSMAS_handle = dsmad_handle, + /* Reserved - the non volatile from DSMAS matters */ + .EFI_memory_type_attr = 2, + .DPA_offset = 0, + .DPA_length = int128_get64(mr->size), + }; + + /* Header always at start of structure */ + cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas); + cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0); + cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1); + cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2); + cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3); + cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts); + + return 0; +} + +static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) +{ + g_autofree CDATSubHeader **table = NULL; + MemoryRegion *nonvolatile_mr; + CXLType3Dev *ct3d = priv; + int dsmad_handle = 0; + int rc; + + if (!ct3d->hostmem) { + return 0; + } + + nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem); + if (!nonvolatile_mr) { + return -EINVAL; + } + + table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table)); + if (!table) { + return -ENOMEM; + } + + rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr); + if (rc < 0) { + return rc; + } + + *cdat_table = g_steal_pointer(&table); + + return CT3_CDAT_NUM_ENTRIES; +} + +static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) +{ + int i; + + for (i = 0; i < num; i++) { + g_free(cdat_table[i]); + } + g_free(cdat_table); +} + +static bool cxl_doe_cdat_rsp(DOECap *doe_cap) +{ + CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat; + uint16_t ent; + void *base; + uint32_t len; + CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap); + CDATRsp rsp; + + assert(cdat->entry_len); + + /* Discard if request length mismatched */ + if (pcie_doe_get_obj_len(req) < + DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) { + return false; + } + + ent = req->entry_handle; + base = cdat->entry[ent].base; + len = cdat->entry[ent].length; + + rsp = (CDATRsp) { + .header = { + .vendor_id = CXL_VENDOR_ID, + .data_obj_type = CXL_DOE_TABLE_ACCESS, + .reserved = 0x0, + .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE), + }, + .rsp_code = CXL_DOE_TAB_RSP, + .table_type = CXL_DOE_TAB_TYPE_CDAT, + .entry_handle = (ent < cdat->entry_len - 1) ? + ent + 1 : CXL_DOE_TAB_ENT_MAX, + }; + + memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp)); + memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE), + base, len); + + doe_cap->read_mbox_len += rsp.header.length; + + return true; +} + +static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size) +{ + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); + uint32_t val; + + if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) { + return val; + } + + return pci_default_read_config(pci_dev, addr, size); +} + +static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val, + int size) +{ + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); + + pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size); + pci_default_write_config(pci_dev, addr, val, size); +} + +/* + * Null value of all Fs suggested by IEEE RA guidelines for use of + * EU, OUI and CID + */ +#define UI64_NULL ~(0ULL) + +static void build_dvsecs(CXLType3Dev *ct3d) +{ + CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; + uint8_t *dvsec; + + dvsec = (uint8_t *)&(CXLDVSECDevice){ + .cap = 0x1e, + .ctrl = 0x2, + .status2 = 0x2, + .range1_size_hi = ct3d->hostmem->size >> 32, + .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostmem->size & 0xF0000000), + .range1_base_hi = 0, + .range1_base_lo = 0, + }; + cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, + PCIE_CXL_DEVICE_DVSEC_LENGTH, + PCIE_CXL_DEVICE_DVSEC, + PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ + .rsvd = 0, + .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, + .reg0_base_hi = 0, + .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX, + .reg1_base_hi = 0, + }; + cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, + REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, + REG_LOC_DVSEC_REVID, dvsec); + dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){ + .phase2_duration = 0x603, /* 3 seconds */ + .phase2_power = 0x33, /* 0x33 miliwatts */ + }; + cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, + GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC, + GPF_DEVICE_DVSEC_REVID, dvsec); +} + +static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) +{ + ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; + uint32_t *cache_mem = cregs->cache_mem_registers; + + assert(which == 0); + + /* TODO: Sanity checks that the decoder is possible */ + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); + + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); +} + +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CXLComponentState *cxl_cstate = opaque; + ComponentRegisters *cregs = &cxl_cstate->crb; + CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); + uint32_t *cache_mem = cregs->cache_mem_registers; + bool should_commit = false; + int which_hdm = -1; + + assert(size == 4); + g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE); + + switch (offset) { + case A_CXL_HDM_DECODER0_CTRL: + should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + which_hdm = 0; + break; + default: + break; + } + + stl_le_p((uint8_t *)cache_mem + offset, value); + if (should_commit) { + hdm_decoder_commit(ct3d, which_hdm); + } +} + +static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) +{ + DeviceState *ds = DEVICE(ct3d); + MemoryRegion *mr; + char *name; + + if (!ct3d->hostmem) { + error_setg(errp, "memdev property must be set"); + return false; + } + + mr = host_memory_backend_get_memory(ct3d->hostmem); + if (!mr) { + error_setg(errp, "memdev property must be set"); + return false; + } + memory_region_set_nonvolatile(mr, true); + memory_region_set_enabled(mr, true); + host_memory_backend_set_mapped(ct3d->hostmem, true); + + if (ds->id) { + name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id); + } else { + name = g_strdup("cxl-type3-dpa-space"); + } + address_space_init(&ct3d->hostmem_as, mr, name); + g_free(name); + + ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size; + + if (!ct3d->lsa) { + error_setg(errp, "lsa property must be set"); + return false; + } + + return true; +} + +static DOEProtocol doe_cdat_prot[] = { + { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp }, + { } +}; + +static void ct3_realize(PCIDevice *pci_dev, Error **errp) +{ + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); + CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; + ComponentRegisters *regs = &cxl_cstate->crb; + MemoryRegion *mr = ®s->component_registers; + uint8_t *pci_conf = pci_dev->config; + unsigned short msix_num = 1; + int i; + + if (!cxl_setup_memory(ct3d, errp)) { + return; + } + + pci_config_set_prog_interface(pci_conf, 0x10); + pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL); + + pcie_endpoint_cap_init(pci_dev, 0x80); + if (ct3d->sn != UI64_NULL) { + pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn); + cxl_cstate->dvsec_offset = 0x100 + 0x0c; + } else { + cxl_cstate->dvsec_offset = 0x100; + } + + ct3d->cxl_cstate.pdev = pci_dev; + build_dvsecs(ct3d); + + regs->special_ops = g_new0(MemoryRegionOps, 1); + regs->special_ops->write = ct3d_reg_write; + + cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, + TYPE_CXL_TYPE3); + + pci_register_bar( + pci_dev, CXL_COMPONENT_REG_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr); + + cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate); + pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + &ct3d->cxl_dstate.device_registers); + + /* MSI(-X) Initailization */ + msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + for (i = 0; i < msix_num; i++) { + msix_vector_use(pci_dev, i); + } + + /* DOE Initailization */ + pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0); + + cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table; + cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; + cxl_cstate->cdat.private = ct3d; + cxl_doe_cdat_init(cxl_cstate, errp); +} + +static void ct3_exit(PCIDevice *pci_dev) +{ + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); + CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; + ComponentRegisters *regs = &cxl_cstate->crb; + + cxl_doe_cdat_release(cxl_cstate); + g_free(regs->special_ops); + address_space_destroy(&ct3d->hostmem_as); +} + +/* TODO: Support multiple HDM decoders and DPA skip */ +static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) +{ + uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; + uint64_t decoder_base, decoder_size, hpa_offset; + uint32_t hdm0_ctrl; + int ig, iw; + + decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) | + cache_mem[R_CXL_HDM_DECODER0_BASE_LO]); + if ((uint64_t)host_addr < decoder_base) { + return false; + } + + hpa_offset = (uint64_t)host_addr - decoder_base; + + decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) | + cache_mem[R_CXL_HDM_DECODER0_SIZE_LO]; + if (hpa_offset >= decoder_size) { + return false; + } + + hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL]; + iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW); + ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG); + + *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | + ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw); + + return true; +} + +MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + CXLType3Dev *ct3d = CXL_TYPE3(d); + uint64_t dpa_offset; + MemoryRegion *mr; + + /* TODO support volatile region */ + mr = host_memory_backend_get_memory(ct3d->hostmem); + if (!mr) { + return MEMTX_ERROR; + } + + if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { + return MEMTX_ERROR; + } + + if (dpa_offset > int128_get64(mr->size)) { + return MEMTX_ERROR; + } + + return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size); +} + +MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + CXLType3Dev *ct3d = CXL_TYPE3(d); + uint64_t dpa_offset; + MemoryRegion *mr; + + mr = host_memory_backend_get_memory(ct3d->hostmem); + if (!mr) { + return MEMTX_OK; + } + + if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { + return MEMTX_OK; + } + + if (dpa_offset > int128_get64(mr->size)) { + return MEMTX_OK; + } + return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs, + &data, size); +} + +static void ct3d_reset(DeviceState *dev) +{ + CXLType3Dev *ct3d = CXL_TYPE3(dev); + uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; + uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; + + cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); + cxl_device_register_init_common(&ct3d->cxl_dstate); +} + +static Property ct3_props[] = { + DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, + HostMemoryBackend *), + DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, + HostMemoryBackend *), + DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), + DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename), + DEFINE_PROP_END_OF_LIST(), +}; + +static uint64_t get_lsa_size(CXLType3Dev *ct3d) +{ + MemoryRegion *mr; + + mr = host_memory_backend_get_memory(ct3d->lsa); + return memory_region_size(mr); +} + +static void validate_lsa_access(MemoryRegion *mr, uint64_t size, + uint64_t offset) +{ + assert(offset + size <= memory_region_size(mr)); + assert(offset + size > offset); +} + +static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size, + uint64_t offset) +{ + MemoryRegion *mr; + void *lsa; + + mr = host_memory_backend_get_memory(ct3d->lsa); + validate_lsa_access(mr, size, offset); + + lsa = memory_region_get_ram_ptr(mr) + offset; + memcpy(buf, lsa, size); + + return size; +} + +static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, + uint64_t offset) +{ + MemoryRegion *mr; + void *lsa; + + mr = host_memory_backend_get_memory(ct3d->lsa); + validate_lsa_access(mr, size, offset); + + lsa = memory_region_get_ram_ptr(mr) + offset; + memcpy(lsa, buf, size); + memory_region_set_dirty(mr, offset, size); + + /* + * Just like the PMEM, if the guest is not allowed to exit gracefully, label + * updates will get lost. + */ +} + +static void ct3_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + CXLType3Class *cvc = CXL_TYPE3_CLASS(oc); + + pc->realize = ct3_realize; + pc->exit = ct3_exit; + pc->class_id = PCI_CLASS_STORAGE_EXPRESS; + pc->vendor_id = PCI_VENDOR_ID_INTEL; + pc->device_id = 0xd93; /* LVF for now */ + pc->revision = 1; + + pc->config_write = ct3d_config_write; + pc->config_read = ct3d_config_read; + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "CXL PMEM Device (Type 3)"; + dc->reset = ct3d_reset; + device_class_set_props(dc, ct3_props); + + cvc->get_lsa_size = get_lsa_size; + cvc->get_lsa = get_lsa; + cvc->set_lsa = set_lsa; +} + +static const TypeInfo ct3d_info = { + .name = TYPE_CXL_TYPE3, + .parent = TYPE_PCI_DEVICE, + .class_size = sizeof(struct CXLType3Class), + .class_init = ct3_class_init, + .instance_size = sizeof(CXLType3Dev), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CXL_DEVICE }, + { INTERFACE_PCIE_DEVICE }, + {} + }, +}; + +static void ct3d_registers(void) +{ + type_register_static(&ct3d_info); +} + +type_init(ct3d_registers); diff --git a/hw/mem/meson.build b/hw/mem/meson.build index 3c8fdef9f9..609b2b36fc 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -3,7 +3,8 @@ mem_ss.add(files('memory-device.c')) mem_ss.add(when: 'CONFIG_DIMM', if_true: files('pc-dimm.c')) mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) +mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c')) softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) -softmmu_ss.add(when: 'CONFIG_FUZZ', if_true: files('sparse-mem.c')) +softmmu_ss.add(when: 'CONFIG_SPARSE_MEM', if_true: files('sparse-mem.c')) diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c index 7397b67156..31080c22c9 100644 --- a/hw/mem/nvdimm.c +++ b/hw/mem/nvdimm.c @@ -149,7 +149,7 @@ static void nvdimm_prepare_memory_region(NVDIMMDevice *nvdimm, Error **errp) if (!nvdimm->unarmed && memory_region_is_rom(mr)) { HostMemoryBackend *hostmem = dimm->hostmem; - error_setg(errp, "'unarmed' property must be off since memdev %s " + error_setg(errp, "'unarmed' property must be 'on' since memdev %s " "is read-only", object_get_canonical_path_component(OBJECT(hostmem))); return; @@ -181,10 +181,25 @@ static MemoryRegion *nvdimm_md_get_memory_region(MemoryDeviceState *md, static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp) { NVDIMMDevice *nvdimm = NVDIMM(dimm); + NVDIMMClass *ndc = NVDIMM_GET_CLASS(nvdimm); if (!nvdimm->nvdimm_mr) { nvdimm_prepare_memory_region(nvdimm, errp); } + + if (ndc->realize) { + ndc->realize(nvdimm, errp); + } +} + +static void nvdimm_unrealize(PCDIMMDevice *dimm) +{ + NVDIMMDevice *nvdimm = NVDIMM(dimm); + NVDIMMClass *ndc = NVDIMM_GET_CLASS(nvdimm); + + if (ndc->unrealize) { + ndc->unrealize(nvdimm); + } } /* @@ -240,6 +255,7 @@ static void nvdimm_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); ddc->realize = nvdimm_realize; + ddc->unrealize = nvdimm_unrealize; mdc->get_memory_region = nvdimm_md_get_memory_region; device_class_set_props(dc, nvdimm_properties); @@ -248,7 +264,7 @@ static void nvdimm_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } -static TypeInfo nvdimm_info = { +static const TypeInfo nvdimm_info = { .name = TYPE_NVDIMM, .parent = TYPE_PC_DIMM, .class_size = sizeof(NVDIMMClass), diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index a3a2560301..f27e1a11ba 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -181,7 +181,21 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp) PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MachineState *ms = MACHINE(qdev_get_machine()); - int nb_numa_nodes = ms->numa_state->num_nodes; + + if (ms->numa_state) { + int nb_numa_nodes = ms->numa_state->num_nodes; + + if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) || + (!nb_numa_nodes && dimm->node)) { + error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %" + PRIu32 "' which exceeds the number of numa nodes: %d", + dimm->node, nb_numa_nodes ? nb_numa_nodes : 1); + return; + } + } else if (dimm->node > 0) { + error_setg(errp, "machine doesn't support NUMA"); + return; + } if (!dimm->hostmem) { error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set"); @@ -191,13 +205,6 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp) object_get_canonical_path_component(OBJECT(dimm->hostmem))); return; } - if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) || - (!nb_numa_nodes && dimm->node)) { - error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %" - PRIu32 "' which exceeds the number of numa nodes: %d", - dimm->node, nb_numa_nodes ? nb_numa_nodes : 1); - return; - } if (ddc->realize) { ddc->realize(dimm, errp); @@ -209,6 +216,11 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp) static void pc_dimm_unrealize(DeviceState *dev) { PCDIMMDevice *dimm = PC_DIMM(dev); + PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); + + if (ddc->unrealize) { + ddc->unrealize(dimm); + } host_memory_backend_set_mapped(dimm->hostmem, false); } @@ -279,7 +291,7 @@ static void pc_dimm_class_init(ObjectClass *oc, void *data) mdc->fill_device_info = pc_dimm_md_fill_device_info; } -static TypeInfo pc_dimm_info = { +static const TypeInfo pc_dimm_info = { .name = TYPE_PC_DIMM, .parent = TYPE_DEVICE, .instance_size = sizeof(PCDIMMDevice), diff --git a/hw/meson.build b/hw/meson.build index 00b49848c7..43ed36fa15 100644 --- a/hw/meson.build +++ b/hw/meson.build @@ -6,6 +6,7 @@ subdir('block') subdir('char') subdir('core') subdir('cpu') +subdir('cxl') subdir('display') subdir('dma') subdir('gpio') @@ -50,6 +51,7 @@ subdir('avr') subdir('cris') subdir('hppa') subdir('i386') +subdir('loongarch') subdir('m68k') subdir('microblaze') subdir('mips') diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c index 8821d009f1..25ad54754e 100644 --- a/hw/microblaze/boot.c +++ b/hw/microblaze/boot.c @@ -25,12 +25,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "sysemu/device_tree.h" #include "sysemu/reset.h" #include "hw/boards.h" @@ -76,6 +76,7 @@ static int microblaze_load_dtb(hwaddr addr, int fdt_size; void *fdt = NULL; int r; + uint8_t rng_seed[32]; if (dtb_filename) { fdt = load_device_tree(dtb_filename, &fdt_size); @@ -84,6 +85,9 @@ static int microblaze_load_dtb(hwaddr addr, return 0; } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + if (kernel_cmdline) { r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); @@ -138,7 +142,7 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, uint32_t base32; int big_endian = 0; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN big_endian = 1; #endif diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c index 159db6cbe2..a24fadddca 100644 --- a/hw/microblaze/petalogix_ml605_mmu.c +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -183,7 +183,7 @@ petalogix_ml605_init(MachineState *machine) spi = (SSIBus *)qdev_get_child_bus(dev, "spi"); for (i = 0; i < NUM_SPI_FLASHES; i++) { - DriveInfo *dinfo = drive_get_next(IF_MTD); + DriveInfo *dinfo = drive_get(IF_MTD, 0, i); qemu_irq cs_line; dev = qdev_new("n25q128"); diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index b4c5549ce8..725525358d 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -16,7 +16,7 @@ config JAZZ select I8254 select I8257 select PCSPK - select VGA_ISA_MM + select VGA_MMIO select G364FB select DP8393X select ESP diff --git a/hw/mips/bootloader.c b/hw/mips/bootloader.c index 6ec8314490..f5f42f2bf2 100644 --- a/hw/mips/bootloader.c +++ b/hw/mips/bootloader.c @@ -165,15 +165,29 @@ void bl_gen_jump_to(uint32_t **p, target_ulong jump_addr) bl_gen_nop(p); /* delay slot */ } -void bl_gen_jump_kernel(uint32_t **p, target_ulong sp, target_ulong a0, - target_ulong a1, target_ulong a2, target_ulong a3, +void bl_gen_jump_kernel(uint32_t **p, + bool set_sp, target_ulong sp, + bool set_a0, target_ulong a0, + bool set_a1, target_ulong a1, + bool set_a2, target_ulong a2, + bool set_a3, target_ulong a3, target_ulong kernel_addr) { - bl_gen_load_ulong(p, BL_REG_SP, sp); - bl_gen_load_ulong(p, BL_REG_A0, a0); - bl_gen_load_ulong(p, BL_REG_A1, a1); - bl_gen_load_ulong(p, BL_REG_A2, a2); - bl_gen_load_ulong(p, BL_REG_A3, a3); + if (set_sp) { + bl_gen_load_ulong(p, BL_REG_SP, sp); + } + if (set_a0) { + bl_gen_load_ulong(p, BL_REG_A0, a0); + } + if (set_a1) { + bl_gen_load_ulong(p, BL_REG_A1, a1); + } + if (set_a2) { + bl_gen_load_ulong(p, BL_REG_A2, a2); + } + if (set_a3) { + bl_gen_load_ulong(p, BL_REG_A3, a3); + } bl_gen_jump_to(p, kernel_addr); } @@ -182,7 +196,11 @@ void bl_gen_write_ulong(uint32_t **p, target_ulong addr, target_ulong val) { bl_gen_load_ulong(p, BL_REG_K0, val); bl_gen_load_ulong(p, BL_REG_K1, addr); - bl_gen_sd(p, BL_REG_K0, BL_REG_K1, 0x0); + if (bootcpu_supports_isa(ISA_MIPS3)) { + bl_gen_sd(p, BL_REG_K0, BL_REG_K1, 0x0); + } else { + bl_gen_sw(p, BL_REG_K0, BL_REG_K1, 0x0); + } } void bl_gen_write_u32(uint32_t **p, target_ulong addr, uint32_t val) diff --git a/hw/mips/boston.c b/hw/mips/boston.c index 20b06865b2..edda87e23c 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "elf.h" #include "hw/boards.h" #include "hw/char/serial.h" #include "hw/ide/pci.h" @@ -33,12 +34,14 @@ #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "qemu/log.h" #include "chardev/char.h" #include "sysemu/device_tree.h" #include "sysemu/sysemu.h" #include "sysemu/qtest.h" #include "sysemu/runstate.h" +#include "sysemu/reset.h" #include #include "qom/object.h" @@ -48,6 +51,15 @@ typedef struct BostonState BostonState; DECLARE_INSTANCE_CHECKER(BostonState, BOSTON, TYPE_BOSTON) +#define FDT_IRQ_TYPE_NONE 0 +#define FDT_IRQ_TYPE_LEVEL_HIGH 4 +#define FDT_GIC_SHARED 0 +#define FDT_GIC_LOCAL 1 +#define FDT_BOSTON_CLK_SYS 1 +#define FDT_BOSTON_CLK_CPU 2 +#define FDT_PCI_IRQ_MAP_PINS 4 +#define FDT_PCI_IRQ_MAP_DESCS 6 + struct BostonState { SysBusDevice parent_obj; @@ -64,6 +76,44 @@ struct BostonState { hwaddr fdt_base; }; +enum { + BOSTON_LOWDDR, + BOSTON_PCIE0, + BOSTON_PCIE1, + BOSTON_PCIE2, + BOSTON_PCIE2_MMIO, + BOSTON_CM, + BOSTON_GIC, + BOSTON_CDMM, + BOSTON_CPC, + BOSTON_PLATREG, + BOSTON_UART, + BOSTON_LCD, + BOSTON_FLASH, + BOSTON_PCIE1_MMIO, + BOSTON_PCIE0_MMIO, + BOSTON_HIGHDDR, +}; + +static const MemMapEntry boston_memmap[] = { + [BOSTON_LOWDDR] = { 0x0, 0x10000000 }, + [BOSTON_PCIE0] = { 0x10000000, 0x2000000 }, + [BOSTON_PCIE1] = { 0x12000000, 0x2000000 }, + [BOSTON_PCIE2] = { 0x14000000, 0x2000000 }, + [BOSTON_PCIE2_MMIO] = { 0x16000000, 0x100000 }, + [BOSTON_CM] = { 0x16100000, 0x20000 }, + [BOSTON_GIC] = { 0x16120000, 0x20000 }, + [BOSTON_CDMM] = { 0x16140000, 0x8000 }, + [BOSTON_CPC] = { 0x16200000, 0x8000 }, + [BOSTON_PLATREG] = { 0x17ffd000, 0x1000 }, + [BOSTON_UART] = { 0x17ffe000, 0x20 }, + [BOSTON_LCD] = { 0x17fff000, 0x8 }, + [BOSTON_FLASH] = { 0x18000000, 0x8000000 }, + [BOSTON_PCIE1_MMIO] = { 0x20000000, 0x20000000 }, + [BOSTON_PCIE0_MMIO] = { 0x40000000, 0x40000000 }, + [BOSTON_HIGHDDR] = { 0x80000000, 0x0 }, +}; + enum boston_plat_reg { PLAT_FPGA_BUILD = 0x00, PLAT_CORE_CL = 0x04, @@ -275,24 +325,24 @@ type_init(boston_register_types) static void gen_firmware(uint32_t *p, hwaddr kernel_entry, hwaddr fdt_addr) { - const uint32_t cm_base = 0x16100000; - const uint32_t gic_base = 0x16120000; - const uint32_t cpc_base = 0x16200000; + uint64_t regaddr; /* Move CM GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, GCR_BASE_ADDR + GCR_BASE_OFS), - cm_base); + regaddr = cpu_mips_phys_to_kseg1(NULL, GCR_BASE_ADDR + GCR_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CM].base); /* Move & enable GIC GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, cm_base + GCR_GIC_BASE_OFS), - gic_base | GCR_GIC_BASE_GICEN_MSK); + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_GIC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_GIC].base | GCR_GIC_BASE_GICEN_MSK); /* Move & enable CPC GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, cm_base + GCR_CPC_BASE_OFS), - cpc_base | GCR_CPC_BASE_CPCEN_MSK); + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_CPC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CPC].base | GCR_CPC_BASE_CPCEN_MSK); /* * Setup argument registers to follow the UHI boot protocol: @@ -302,7 +352,10 @@ static void gen_firmware(uint32_t *p, hwaddr kernel_entry, hwaddr fdt_addr) * a2/$6 = 0 * a3/$7 = 0 */ - bl_gen_jump_kernel(&p, 0, (int32_t)-2, fdt_addr, 0, 0, kernel_entry); + bl_gen_jump_kernel(&p, + true, 0, true, (int32_t)-2, + true, fdt_addr, true, 0, true, 0, + kernel_entry); } static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, @@ -315,6 +368,7 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, size_t ram_low_sz, ram_high_sz; size_t fdt_sz = fdt_totalsize(fdt_orig) * 2; g_autofree void *fdt = g_malloc0(fdt_sz); + uint8_t rng_seed[32]; err = fdt_open_into(fdt_orig, fdt, fdt_sz); if (err) { @@ -322,6 +376,9 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, return NULL; } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + cmdline = (machine->kernel_cmdline && machine->kernel_cmdline[0]) ? machine->kernel_cmdline : " "; err = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); @@ -333,8 +390,9 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, ram_low_sz = MIN(256 * MiB, machine->ram_size); ram_high_sz = machine->ram_size - ram_low_sz; qemu_fdt_setprop_sized_cells(fdt, "/memory@0", "reg", - 1, 0x00000000, 1, ram_low_sz, - 1, 0x90000000, 1, ram_high_sz); + 1, boston_memmap[BOSTON_LOWDDR].base, 1, ram_low_sz, + 1, boston_memmap[BOSTON_HIGHDDR].base + ram_low_sz, + 1, ram_high_sz); fdt = g_realloc(fdt, fdt_totalsize(fdt)); qemu_fdt_dumpdtb(fdt, fdt_sz); @@ -370,7 +428,7 @@ static inline XilinxPCIEHost * xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr, hwaddr cfg_base, uint64_t cfg_size, hwaddr mmio_base, uint64_t mmio_size, - qemu_irq irq, bool link_up) + qemu_irq irq) { DeviceState *dev; MemoryRegion *cfg, *mmio; @@ -382,7 +440,6 @@ xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr, qdev_prop_set_uint64(dev, "cfg_size", cfg_size); qdev_prop_set_uint64(dev, "mmio_base", mmio_base); qdev_prop_set_uint64(dev, "mmio_size", mmio_size); - qdev_prop_set_bit(dev, "link_up", link_up); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -397,6 +454,222 @@ xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr, return XILINX_PCIE_HOST(dev); } + +static void fdt_create_pcie(void *fdt, int gic_ph, int irq, hwaddr reg_base, + hwaddr reg_size, hwaddr mmio_base, hwaddr mmio_size) +{ + int i; + char *name, *intc_name; + uint32_t intc_ph; + uint32_t interrupt_map[FDT_PCI_IRQ_MAP_PINS][FDT_PCI_IRQ_MAP_DESCS]; + + intc_ph = qemu_fdt_alloc_phandle(fdt); + name = g_strdup_printf("/soc/pci@%" HWADDR_PRIx, reg_base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", + "xlnx,axi-pcie-host-1.00.a"); + qemu_fdt_setprop_string(fdt, name, "device_type", "pci"); + qemu_fdt_setprop_cells(fdt, name, "reg", reg_base, reg_size); + + qemu_fdt_setprop_cell(fdt, name, "#address-cells", 3); + qemu_fdt_setprop_cell(fdt, name, "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, name, "#interrupt-cells", 1); + + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, irq, + FDT_IRQ_TYPE_LEVEL_HIGH); + + qemu_fdt_setprop_cells(fdt, name, "ranges", 0x02000000, 0, mmio_base, + mmio_base, 0, mmio_size); + qemu_fdt_setprop_cells(fdt, name, "bus-range", 0x00, 0xff); + + + + intc_name = g_strdup_printf("%s/interrupt-controller", name); + qemu_fdt_add_subnode(fdt, intc_name); + qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#address-cells", 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_ph); + + qemu_fdt_setprop_cells(fdt, name, "interrupt-map-mask", 0, 0, 0, 7); + for (i = 0; i < FDT_PCI_IRQ_MAP_PINS; i++) { + uint32_t *irqmap = interrupt_map[i]; + + irqmap[0] = cpu_to_be32(0); + irqmap[1] = cpu_to_be32(0); + irqmap[2] = cpu_to_be32(0); + irqmap[3] = cpu_to_be32(i + 1); + irqmap[4] = cpu_to_be32(intc_ph); + irqmap[5] = cpu_to_be32(i + 1); + } + qemu_fdt_setprop(fdt, name, "interrupt-map", + &interrupt_map, sizeof(interrupt_map)); + + g_free(intc_name); + g_free(name); +} + +static const void *create_fdt(BostonState *s, + const MemMapEntry *memmap, int *dt_size) +{ + void *fdt; + int cpu; + MachineState *mc = s->mach; + uint32_t platreg_ph, gic_ph, clk_ph; + char *name, *gic_name, *platreg_name, *stdout_name; + static const char * const syscon_compat[2] = { + "img,boston-platform-regs", "syscon" + }; + + fdt = create_device_tree(dt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + platreg_ph = qemu_fdt_alloc_phandle(fdt); + gic_ph = qemu_fdt_alloc_phandle(fdt); + clk_ph = qemu_fdt_alloc_phandle(fdt); + + qemu_fdt_setprop_string(fdt, "/", "model", "img,boston"); + qemu_fdt_setprop_string(fdt, "/", "compatible", "img,boston"); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x1); + + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = 0; cpu < mc->smp.cpus; cpu++) { + name = g_strdup_printf("/cpus/cpu@%d", cpu); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,mips"); + qemu_fdt_setprop_string(fdt, name, "status", "okay"); + qemu_fdt_setprop_cell(fdt, name, "reg", cpu); + qemu_fdt_setprop_string(fdt, name, "device_type", "cpu"); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + } + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x1); + + fdt_create_pcie(fdt, gic_ph, 2, + memmap[BOSTON_PCIE0].base, memmap[BOSTON_PCIE0].size, + memmap[BOSTON_PCIE0_MMIO].base, memmap[BOSTON_PCIE0_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 1, + memmap[BOSTON_PCIE1].base, memmap[BOSTON_PCIE1].size, + memmap[BOSTON_PCIE1_MMIO].base, memmap[BOSTON_PCIE1_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 0, + memmap[BOSTON_PCIE2].base, memmap[BOSTON_PCIE2].size, + memmap[BOSTON_PCIE2_MMIO].base, memmap[BOSTON_PCIE2_MMIO].size); + + /* GIC with it's timer node */ + gic_name = g_strdup_printf("/soc/interrupt-controller@%" HWADDR_PRIx, + memmap[BOSTON_GIC].base); + qemu_fdt_add_subnode(fdt, gic_name); + qemu_fdt_setprop_string(fdt, gic_name, "compatible", "mti,gic"); + qemu_fdt_setprop_cells(fdt, gic_name, "reg", memmap[BOSTON_GIC].base, + memmap[BOSTON_GIC].size); + qemu_fdt_setprop(fdt, gic_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, gic_name, "#interrupt-cells", 3); + qemu_fdt_setprop_cell(fdt, gic_name, "phandle", gic_ph); + + name = g_strdup_printf("%s/timer", gic_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,gic-timer"); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_LOCAL, 1, + FDT_IRQ_TYPE_NONE); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + g_free(gic_name); + + /* CDMM node */ + name = g_strdup_printf("/soc/cdmm@%" HWADDR_PRIx, memmap[BOSTON_CDMM].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cdmm"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CDMM].base, + memmap[BOSTON_CDMM].size); + g_free(name); + + /* CPC node */ + name = g_strdup_printf("/soc/cpc@%" HWADDR_PRIx, memmap[BOSTON_CPC].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cpc"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CPC].base, + memmap[BOSTON_CPC].size); + g_free(name); + + /* platreg and it's clk node */ + platreg_name = g_strdup_printf("/soc/system-controller@%" HWADDR_PRIx, + memmap[BOSTON_PLATREG].base); + qemu_fdt_add_subnode(fdt, platreg_name); + qemu_fdt_setprop_string_array(fdt, platreg_name, "compatible", + (char **)&syscon_compat, + ARRAY_SIZE(syscon_compat)); + qemu_fdt_setprop_cells(fdt, platreg_name, "reg", + memmap[BOSTON_PLATREG].base, + memmap[BOSTON_PLATREG].size); + qemu_fdt_setprop_cell(fdt, platreg_name, "phandle", platreg_ph); + + name = g_strdup_printf("%s/clock", platreg_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-clock"); + qemu_fdt_setprop_cell(fdt, name, "#clock-cells", 1); + qemu_fdt_setprop_cell(fdt, name, "phandle", clk_ph); + g_free(name); + g_free(platreg_name); + + /* reboot node */ + name = g_strdup_printf("/soc/reboot"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(fdt, name, "regmap", platreg_ph); + qemu_fdt_setprop_cell(fdt, name, "offset", 0x10); + qemu_fdt_setprop_cell(fdt, name, "mask", 0x10); + g_free(name); + + /* uart node */ + name = g_strdup_printf("/soc/uart@%" HWADDR_PRIx, memmap[BOSTON_UART].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_UART].base, + memmap[BOSTON_UART].size); + qemu_fdt_setprop_cell(fdt, name, "reg-shift", 0x2); + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, 3, + FDT_IRQ_TYPE_LEVEL_HIGH); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_SYS); + + qemu_fdt_add_subnode(fdt, "/chosen"); + stdout_name = g_strdup_printf("%s:115200", name); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", stdout_name); + g_free(stdout_name); + g_free(name); + + /* lcd node */ + name = g_strdup_printf("/soc/lcd@%" HWADDR_PRIx, memmap[BOSTON_LCD].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-lcd"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_LCD].base, + memmap[BOSTON_LCD].size); + g_free(name); + + name = g_strdup_printf("/memory@0"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "device_type", "memory"); + g_free(name); + + return fdt; +} + static void boston_mach_init(MachineState *machine) { DeviceState *dev; @@ -438,11 +711,15 @@ static void boston_mach_init(MachineState *machine) sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->cps), 0, 0, 1); flash = g_new(MemoryRegion, 1); - memory_region_init_rom(flash, NULL, "boston.flash", 128 * MiB, - &error_fatal); - memory_region_add_subregion_overlap(sys_mem, 0x18000000, flash, 0); + memory_region_init_rom(flash, NULL, "boston.flash", + boston_memmap[BOSTON_FLASH].size, &error_fatal); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_FLASH].base, + flash, 0); - memory_region_add_subregion_overlap(sys_mem, 0x80000000, machine->ram, 0); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_HIGHDDR].base, + machine->ram, 0); ddr_low_alias = g_new(MemoryRegion, 1); memory_region_init_alias(ddr_low_alias, NULL, "boston_low.ddr", @@ -451,32 +728,41 @@ static void boston_mach_init(MachineState *machine) memory_region_add_subregion_overlap(sys_mem, 0, ddr_low_alias, 0); xilinx_pcie_init(sys_mem, 0, - 0x10000000, 32 * MiB, - 0x40000000, 1 * GiB, - get_cps_irq(&s->cps, 2), false); + boston_memmap[BOSTON_PCIE0].base, + boston_memmap[BOSTON_PCIE0].size, + boston_memmap[BOSTON_PCIE0_MMIO].base, + boston_memmap[BOSTON_PCIE0_MMIO].size, + get_cps_irq(&s->cps, 2)); xilinx_pcie_init(sys_mem, 1, - 0x12000000, 32 * MiB, - 0x20000000, 512 * MiB, - get_cps_irq(&s->cps, 1), false); + boston_memmap[BOSTON_PCIE1].base, + boston_memmap[BOSTON_PCIE1].size, + boston_memmap[BOSTON_PCIE1_MMIO].base, + boston_memmap[BOSTON_PCIE1_MMIO].size, + get_cps_irq(&s->cps, 1)); pcie2 = xilinx_pcie_init(sys_mem, 2, - 0x14000000, 32 * MiB, - 0x16000000, 1 * MiB, - get_cps_irq(&s->cps, 0), true); + boston_memmap[BOSTON_PCIE2].base, + boston_memmap[BOSTON_PCIE2].size, + boston_memmap[BOSTON_PCIE2_MMIO].base, + boston_memmap[BOSTON_PCIE2_MMIO].size, + get_cps_irq(&s->cps, 0)); platreg = g_new(MemoryRegion, 1); memory_region_init_io(platreg, NULL, &boston_platreg_ops, s, - "boston-platregs", 0x1000); - memory_region_add_subregion_overlap(sys_mem, 0x17ffd000, platreg, 0); + "boston-platregs", + boston_memmap[BOSTON_PLATREG].size); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_PLATREG].base, platreg, 0); - s->uart = serial_mm_init(sys_mem, 0x17ffe000, 2, + s->uart = serial_mm_init(sys_mem, boston_memmap[BOSTON_UART].base, 2, get_cps_irq(&s->cps, 3), 10000000, serial_hd(0), DEVICE_NATIVE_ENDIAN); lcd = g_new(MemoryRegion, 1); memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8); - memory_region_add_subregion_overlap(sys_mem, 0x17fff000, lcd, 0); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_LCD].base, lcd, 0); chr = qemu_chr_new("lcd", "vc:320x240", NULL); qemu_chr_fe_init(&s->lcd_display, chr, NULL); @@ -499,10 +785,43 @@ static void boston_mach_init(MachineState *machine) exit(1); } } else if (machine->kernel_filename) { - fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); - if (fit_err) { - error_report("unable to load FIT image"); - exit(1); + uint64_t kernel_entry, kernel_high; + ssize_t kernel_size; + + kernel_size = load_elf(machine->kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &kernel_entry, NULL, &kernel_high, + NULL, 0, EM_MIPS, 1, 0); + + if (kernel_size > 0) { + int dt_size; + g_autofree const void *dtb_file_data = NULL; + g_autofree const void *dtb_load_data = NULL; + hwaddr dtb_paddr = QEMU_ALIGN_UP(kernel_high, 64 * KiB); + hwaddr dtb_vaddr = cpu_mips_phys_to_kseg0(NULL, dtb_paddr); + + s->kernel_entry = kernel_entry; + if (machine->dtb) { + dtb_file_data = load_device_tree(machine->dtb, &dt_size); + } else { + dtb_file_data = create_fdt(s, boston_memmap, &dt_size); + } + + dtb_load_data = boston_fdt_filter(s, dtb_file_data, + NULL, &dtb_vaddr); + + /* Calculate real fdt size after filter */ + dt_size = fdt_totalsize(dtb_load_data); + rom_add_blob_fixed("dtb", dtb_load_data, dt_size, dtb_paddr); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr(dtb_paddr, dt_size)); + } else { + /* Try to load file as FIT */ + fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); + if (fit_err) { + error_report("unable to load kernel image"); + exit(1); + } } gen_firmware(memory_region_get_ram_ptr(flash) + 0x7c00000, diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c index c1b8066a13..34befa5dd5 100644 --- a/hw/mips/fuloong2e.c +++ b/hw/mips/fuloong2e.c @@ -19,7 +19,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qapi/error.h" @@ -50,7 +49,6 @@ /* Fuloong 2e has a 512k flash: Winbond W39L040AP70Z */ #define BIOS_SIZE (512 * KiB) -#define MAX_IDE_BUS 2 /* * PMON is not part of qemu and released with BSD license, anyone @@ -72,7 +70,7 @@ static struct _loaderparams { const char *initrd_filename; } loaderparams; -static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t *prom_buf, int index, +static void G_GNUC_PRINTF(3, 4) prom_set(uint32_t *prom_buf, int index, const char *string, ...) { va_list ap; @@ -181,8 +179,12 @@ static void write_bootloader(CPUMIPSState *env, uint8_t *base, /* Second part of the bootloader */ p = (uint32_t *)(base + 0x040); - bl_gen_jump_kernel(&p, ENVP_VADDR - 64, 2, ENVP_VADDR, ENVP_VADDR + 8, - loaderparams.ram_size, kernel_addr); + bl_gen_jump_kernel(&p, + true, ENVP_VADDR - 64, + true, 2, true, ENVP_VADDR, + true, ENVP_VADDR + 8, + true, loaderparams.ram_size, + kernel_addr); } static void main_cpu_reset(void *opaque) @@ -197,29 +199,6 @@ static void main_cpu_reset(void *opaque) } } -static void vt82c686b_southbridge_init(PCIBus *pci_bus, int slot, qemu_irq intc, - I2CBus **i2c_bus) -{ - PCIDevice *dev; - - dev = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(slot, 0), true, - TYPE_VT82C686B_ISA); - qdev_connect_gpio_out(DEVICE(dev), 0, intc); - - dev = pci_create_simple(pci_bus, PCI_DEVFN(slot, 1), "via-ide"); - pci_ide_create_devs(dev); - - pci_create_simple(pci_bus, PCI_DEVFN(slot, 2), "vt82c686b-usb-uhci"); - pci_create_simple(pci_bus, PCI_DEVFN(slot, 3), "vt82c686b-usb-uhci"); - - dev = pci_create_simple(pci_bus, PCI_DEVFN(slot, 4), TYPE_VT82C686B_PM); - *i2c_bus = I2C_BUS(qdev_get_child_bus(DEVICE(dev), "i2c")); - - /* Audio support */ - pci_create_simple(pci_bus, PCI_DEVFN(slot, 5), TYPE_VIA_AC97); - pci_create_simple(pci_bus, PCI_DEVFN(slot, 6), TYPE_VIA_MC97); -} - /* Network support */ static void network_init(PCIBus *pci_bus) { @@ -316,11 +295,24 @@ static void mips_fuloong2e_init(MachineState *machine) pci_bus = bonito_init((qemu_irq *)&(env->irq[2])); /* South bridge -> IP5 */ - vt82c686b_southbridge_init(pci_bus, FULOONG2E_VIA_SLOT, env->irq[5], - &smbus); + pci_dev = pci_create_simple_multifunction(pci_bus, + PCI_DEVFN(FULOONG2E_VIA_SLOT, 0), + true, TYPE_VT82C686B_ISA); + object_property_add_alias(OBJECT(machine), "rtc-time", + object_resolve_path_component(OBJECT(pci_dev), + "rtc"), + "date"); + qdev_connect_gpio_out(DEVICE(pci_dev), 0, env->irq[5]); + + dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide")); + pci_ide_create_devs(PCI_DEVICE(dev)); + + dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "pm")); + smbus = I2C_BUS(qdev_get_child_bus(dev, "i2c")); /* GPU */ if (vga_interface_type != VGA_NONE) { + vga_interface_created = true; pci_dev = pci_new(-1, "ati-vga"); dev = DEVICE(pci_dev); qdev_prop_set_uint32(dev, "vgamem_mb", 16); diff --git a/hw/mips/gt64xxx_pci.c b/hw/mips/gt64xxx_pci.c index c7480bd019..19d0d9889f 100644 --- a/hw/mips/gt64xxx_pci.c +++ b/hw/mips/gt64xxx_pci.c @@ -26,10 +26,8 @@ #include "qapi/error.h" #include "qemu/units.h" #include "qemu/log.h" -#include "hw/mips/mips.h" #include "hw/pci/pci.h" #include "hw/pci/pci_host.h" -#include "hw/southbridge/piix.h" #include "migration/vmstate.h" #include "hw/intc/i8259.h" #include "hw/irq.h" @@ -981,56 +979,6 @@ static const MemoryRegionOps isd_mem_ops = { }, }; -static int gt64120_pci_map_irq(PCIDevice *pci_dev, int irq_num) -{ - int slot; - - slot = PCI_SLOT(pci_dev->devfn); - - switch (slot) { - /* PIIX4 USB */ - case 10: - return 3; - /* AMD 79C973 Ethernet */ - case 11: - return 1; - /* Crystal 4281 Sound */ - case 12: - return 2; - /* PCI slot 1 to 4 */ - case 18 ... 21: - return ((slot - 18) + irq_num) & 0x03; - /* Unknown device, don't do any translation */ - default: - return irq_num; - } -} - -static int pci_irq_levels[4]; - -static void gt64120_pci_set_irq(void *opaque, int irq_num, int level) -{ - int i, pic_irq, pic_level; - qemu_irq *pic = opaque; - - pci_irq_levels[irq_num] = level; - - /* now we change the pic irq level according to the piix irq mappings */ - /* XXX: optimize */ - pic_irq = piix4_dev->config[PIIX_PIRQCA + irq_num]; - if (pic_irq < 16) { - /* The pic level is the logical OR of all the PCI irqs mapped to it. */ - pic_level = 0; - for (i = 0; i < 4; i++) { - if (pic_irq == piix4_dev->config[PIIX_PIRQCA + i]) { - pic_level |= pci_irq_levels[i]; - } - } - qemu_set_irq(pic[pic_irq], pic_level); - } -} - - static void gt64120_reset(DeviceState *dev) { GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev); @@ -1038,7 +986,7 @@ static void gt64120_reset(DeviceState *dev) /* FIXME: Malta specific hw assumptions ahead */ /* CPU Configuration */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN s->regs[GT_CPU] = 0x00000000; #else s->regs[GT_CPU] = 0x00001000; @@ -1149,7 +1097,7 @@ static void gt64120_reset(DeviceState *dev) s->regs[GT_TC_CONTROL] = 0x00000000; /* PCI Internal */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN s->regs[GT_PCI0_CMD] = 0x00000000; #else s->regs[GT_PCI0_CMD] = 0x00010001; @@ -1170,7 +1118,7 @@ static void gt64120_reset(DeviceState *dev) s->regs[GT_PCI0_SSCS10_BAR] = 0x00000000; s->regs[GT_PCI0_SSCS32_BAR] = 0x01000000; s->regs[GT_PCI0_SCS3BT_BAR] = 0x1f000000; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN s->regs[GT_PCI1_CMD] = 0x00000000; #else s->regs[GT_PCI1_CMD] = 0x00010001; @@ -1202,32 +1150,18 @@ static void gt64120_reset(DeviceState *dev) static void gt64120_realize(DeviceState *dev, Error **errp) { GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(dev); memory_region_init_io(&s->ISD_mem, OBJECT(dev), &isd_mem_ops, s, "gt64120-isd", 0x1000); -} - -PCIBus *gt64120_register(qemu_irq *pic) -{ - GT64120State *d; - PCIHostState *phb; - DeviceState *dev; - - dev = qdev_new(TYPE_GT64120_PCI_HOST_BRIDGE); - d = GT64120_PCI_HOST_BRIDGE(dev); - phb = PCI_HOST_BRIDGE(dev); - memory_region_init(&d->pci0_mem, OBJECT(dev), "pci0-mem", 4 * GiB); - address_space_init(&d->pci0_mem_as, &d->pci0_mem, "pci0-mem"); - phb->bus = pci_register_root_bus(dev, "pci", - gt64120_pci_set_irq, gt64120_pci_map_irq, - pic, - &d->pci0_mem, - get_system_io(), - PCI_DEVFN(18, 0), 4, TYPE_PCI_BUS); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_init(&s->pci0_mem, OBJECT(dev), "pci0-mem", 4 * GiB); + address_space_init(&s->pci0_mem_as, &s->pci0_mem, "pci0-mem"); + phb->bus = pci_root_bus_new(dev, "pci", + &s->pci0_mem, + get_system_io(), + PCI_DEVFN(18, 0), TYPE_PCI_BUS); pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "gt64120_pci"); - return phb->bus; } static void gt64120_pci_realize(PCIDevice *d, Error **errp) diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index d6183e1882..6aefe9a61b 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "hw/clock.h" #include "hw/mips/mips.h" @@ -35,7 +34,6 @@ #include "hw/isa/isa.h" #include "hw/block/fdc.h" #include "sysemu/sysemu.h" -#include "sysemu/arch_init.h" #include "hw/boards.h" #include "net/net.h" #include "hw/scsi/esp.h" @@ -44,6 +42,7 @@ #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" #include "hw/display/vga.h" +#include "hw/display/bochs-vbe.h" #include "hw/audio/pcspk.h" #include "hw/input/i8042.h" #include "hw/sysbus.h" @@ -137,11 +136,11 @@ static void mips_jazz_init(MachineState *machine, MemoryRegion *isa_mem = g_new(MemoryRegion, 1); MemoryRegion *isa_io = g_new(MemoryRegion, 1); MemoryRegion *rtc = g_new(MemoryRegion, 1); - MemoryRegion *i8042 = g_new(MemoryRegion, 1); MemoryRegion *dma_dummy = g_new(MemoryRegion, 1); MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); NICInfo *nd; DeviceState *dev, *rc4030; + MMIOKBDState *i8042; SysBusDevice *sysbus; ISABus *isa_bus; ISADevice *pit; @@ -158,7 +157,7 @@ static void mips_jazz_init(MachineState *machine, [JAZZ_PICA61] = {33333333, 4}, }; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN big_endian = 1; #else big_endian = 0; @@ -275,7 +274,13 @@ static void mips_jazz_init(MachineState *machine, } break; case JAZZ_PICA61: - isa_vga_mm_init(0x40000000, 0x60000000, 0, get_system_memory()); + dev = qdev_new(TYPE_VGA_MMIO); + qdev_prop_set_uint8(dev, "it_shift", 0); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, 0x60000000); + sysbus_mmio_map(sysbus, 1, 0x400a0000); + sysbus_mmio_map(sysbus, 2, VBE_DISPI_LFB_PHYSICAL_ADDRESS); break; default: break; @@ -348,7 +353,7 @@ static void mips_jazz_init(MachineState *machine, fds[n] = drive_get(IF_FLOPPY, 0, n); } /* FIXME: we should enable DMA with a custom IsaDma device */ - fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), -1, 0x80003000, fds); + fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), 0x80003000, fds); /* Real time clock */ mc146818_rtc_init(isa_bus, 1980, NULL); @@ -356,9 +361,19 @@ static void mips_jazz_init(MachineState *machine, memory_region_add_subregion(address_space, 0x80004000, rtc); /* Keyboard (i8042) */ - i8042_mm_init(qdev_get_gpio_in(rc4030, 6), qdev_get_gpio_in(rc4030, 7), - i8042, 0x1000, 0x1); - memory_region_add_subregion(address_space, 0x80005000, i8042); + i8042 = I8042_MMIO(qdev_new(TYPE_I8042_MMIO)); + qdev_prop_set_uint64(DEVICE(i8042), "mask", 1); + qdev_prop_set_uint32(DEVICE(i8042), "size", 0x1000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(i8042), &error_fatal); + + qdev_connect_gpio_out(DEVICE(i8042), I8042_KBD_IRQ, + qdev_get_gpio_in(rc4030, 6)); + qdev_connect_gpio_out(DEVICE(i8042), I8042_MOUSE_IRQ, + qdev_get_gpio_in(rc4030, 7)); + + memory_region_add_subregion(address_space, 0x80005000, + sysbus_mmio_get_region(SYS_BUS_DEVICE(i8042), + 0)); /* Serial ports */ serial_mm_init(address_space, 0x80006000, 0, diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index ae192db0c8..25534288dd 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/units.h" #include "qemu/cutils.h" #include "qemu/datadir.h" diff --git a/hw/mips/malta.c b/hw/mips/malta.c index 7dcf175d72..c0a2e0ab04 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -25,8 +25,8 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/bitops.h" -#include "qemu-common.h" #include "qemu/datadir.h" +#include "qemu/guest-random.h" #include "hw/clock.h" #include "hw/southbridge/piix.h" #include "hw/isa/superio.h" @@ -36,12 +36,12 @@ #include "hw/i2c/smbus_eeprom.h" #include "hw/block/flash.h" #include "hw/mips/mips.h" +#include "hw/mips/bootloader.h" #include "hw/mips/cpudevs.h" #include "hw/pci/pci.h" -#include "sysemu/arch_init.h" #include "qemu/log.h" #include "hw/mips/bios.h" -#include "hw/ide.h" +#include "hw/ide/pci.h" #include "hw/irq.h" #include "hw/loader.h" #include "elf.h" @@ -71,8 +71,6 @@ #define FLASH_SIZE 0x400000 -#define MAX_IDE_BUS 2 - typedef struct { MemoryRegion iomem; MemoryRegion iomem_lo; /* 0 - 0x900 */ @@ -98,7 +96,6 @@ struct MaltaState { Clock *cpuclk; MIPSCPSState cps; - qemu_irq i8259[ISA_NUM_IRQS]; }; static struct _loaderparams { @@ -369,7 +366,7 @@ static uint64_t malta_fpga_read(void *opaque, hwaddr addr, /* STATUS Register */ case 0x00208: -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN val = 0x00000012; #else val = 0x00000010; @@ -697,7 +694,7 @@ static void write_bootloader_nanomips(uint8_t *base, uint64_t run_addr, stw_p(p++, 0xe040); stw_p(p++, 0x0681); /* lui t1, %hi(0xb4000000) */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN stw_p(p++, 0xe020); stw_p(p++, 0x0be1); /* lui t0, %hi(0xdf000000) */ @@ -869,88 +866,63 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, /* Second part of the bootloader */ p = (uint32_t *) (base + 0x580); - if (semihosting_get_argc()) { - /* Preserve a0 content as arguments have been passed */ - stl_p(p++, 0x00000000); /* nop */ - } else { - stl_p(p++, 0x24040002); /* addiu a0, zero, 2 */ - } + /* + * Load BAR registers as done by YAMON: + * + * - set up PCI0 I/O BARs from 0x18000000 to 0x181fffff + * - set up PCI0 MEM0 at 0x10000000, size 0x7e00000 + * - set up PCI0 MEM1 at 0x18200000, size 0xbc00000 + * + */ - /* lui sp, high(ENVP_VADDR) */ - stl_p(p++, 0x3c1d0000 | (((ENVP_VADDR - 64) >> 16) & 0xffff)); - /* ori sp, sp, low(ENVP_VADDR) */ - stl_p(p++, 0x37bd0000 | ((ENVP_VADDR - 64) & 0xffff)); - /* lui a1, high(ENVP_VADDR) */ - stl_p(p++, 0x3c050000 | ((ENVP_VADDR >> 16) & 0xffff)); - /* ori a1, a1, low(ENVP_VADDR) */ - stl_p(p++, 0x34a50000 | (ENVP_VADDR & 0xffff)); - /* lui a2, high(ENVP_VADDR + 8) */ - stl_p(p++, 0x3c060000 | (((ENVP_VADDR + 8) >> 16) & 0xffff)); - /* ori a2, a2, low(ENVP_VADDR + 8) */ - stl_p(p++, 0x34c60000 | ((ENVP_VADDR + 8) & 0xffff)); - /* lui a3, high(ram_low_size) */ - stl_p(p++, 0x3c070000 | (loaderparams.ram_low_size >> 16)); - /* ori a3, a3, low(ram_low_size) */ - stl_p(p++, 0x34e70000 | (loaderparams.ram_low_size & 0xffff)); - - /* Load BAR registers as done by YAMON */ - stl_p(p++, 0x3c09b400); /* lui t1, 0xb400 */ - -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c08df00); /* lui t0, 0xdf00 */ + /* Bus endianess is always reversed */ +#if TARGET_BIG_ENDIAN +#define cpu_to_gt32 cpu_to_le32 #else - stl_p(p++, 0x340800df); /* ori t0, r0, 0x00df */ +#define cpu_to_gt32 cpu_to_be32 #endif - stl_p(p++, 0xad280068); /* sw t0, 0x0068(t1) */ - stl_p(p++, 0x3c09bbe0); /* lui t1, 0xbbe0 */ + /* move GT64120 registers from 0x14000000 to 0x1be00000 */ + bl_gen_write_u32(&p, /* GT_ISD */ + cpu_mips_phys_to_kseg1(NULL, 0x14000000 + 0x68), + cpu_to_gt32(0x1be00000 << 3)); -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c08c000); /* lui t0, 0xc000 */ -#else - stl_p(p++, 0x340800c0); /* ori t0, r0, 0x00c0 */ -#endif - stl_p(p++, 0xad280048); /* sw t0, 0x0048(t1) */ -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c084000); /* lui t0, 0x4000 */ -#else - stl_p(p++, 0x34080040); /* ori t0, r0, 0x0040 */ -#endif - stl_p(p++, 0xad280050); /* sw t0, 0x0050(t1) */ + /* setup MEM-to-PCI0 mapping */ + /* setup PCI0 io window to 0x18000000-0x181fffff */ + bl_gen_write_u32(&p, /* GT_PCI0IOLD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x48), + cpu_to_gt32(0x18000000 << 3)); + bl_gen_write_u32(&p, /* GT_PCI0IOHD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x50), + cpu_to_gt32(0x08000000 << 3)); + /* setup PCI0 mem windows */ + bl_gen_write_u32(&p, /* GT_PCI0M0LD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x58), + cpu_to_gt32(0x10000000 << 3)); + bl_gen_write_u32(&p, /* GT_PCI0M0HD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x60), + cpu_to_gt32(0x07e00000 << 3)); -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c088000); /* lui t0, 0x8000 */ -#else - stl_p(p++, 0x34080080); /* ori t0, r0, 0x0080 */ -#endif - stl_p(p++, 0xad280058); /* sw t0, 0x0058(t1) */ -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c083f00); /* lui t0, 0x3f00 */ -#else - stl_p(p++, 0x3408003f); /* ori t0, r0, 0x003f */ -#endif - stl_p(p++, 0xad280060); /* sw t0, 0x0060(t1) */ + bl_gen_write_u32(&p, /* GT_PCI0M1LD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x80), + cpu_to_gt32(0x18200000 << 3)); + bl_gen_write_u32(&p, /* GT_PCI0M1HD */ + cpu_mips_phys_to_kseg1(NULL, 0x1be00000 + 0x88), + cpu_to_gt32(0x0bc00000 << 3)); -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c08c100); /* lui t0, 0xc100 */ -#else - stl_p(p++, 0x340800c1); /* ori t0, r0, 0x00c1 */ -#endif - stl_p(p++, 0xad280080); /* sw t0, 0x0080(t1) */ -#ifdef TARGET_WORDS_BIGENDIAN - stl_p(p++, 0x3c085e00); /* lui t0, 0x5e00 */ -#else - stl_p(p++, 0x3408005e); /* ori t0, r0, 0x005e */ -#endif - stl_p(p++, 0xad280088); /* sw t0, 0x0088(t1) */ +#undef cpu_to_gt32 - /* Jump to kernel code */ - stl_p(p++, 0x3c1f0000 | - ((kernel_entry >> 16) & 0xffff)); /* lui ra, high(kernel_entry) */ - stl_p(p++, 0x37ff0000 | - (kernel_entry & 0xffff)); /* ori ra, ra, low(kernel_entry) */ - stl_p(p++, 0x03e00009); /* jalr ra */ - stl_p(p++, 0x00000000); /* nop */ + bl_gen_jump_kernel(&p, + true, ENVP_VADDR - 64, + /* + * If semihosting is used, arguments have already been + * passed, so we preserve $a0. + */ + !semihosting_get_argc(), 2, + true, ENVP_VADDR, + true, ENVP_VADDR + 8, + true, loaderparams.ram_low_size, + kernel_entry); /* YAMON subroutines */ p = (uint32_t *) (base + 0x800); @@ -997,7 +969,7 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, } -static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t *prom_buf, int index, +static void G_GNUC_PRINTF(3, 4) prom_set(uint32_t *prom_buf, int index, const char *string, ...) { va_list ap; @@ -1020,6 +992,17 @@ static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t *prom_buf, int index, va_end(ap); } +static void reinitialize_rng_seed(void *opaque) +{ + char *rng_seed_hex = opaque; + uint8_t rng_seed[32]; + + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + for (size_t i = 0; i < sizeof(rng_seed); ++i) { + sprintf(rng_seed_hex + i * 2, "%02x", rng_seed[i]); + } +} + /* Kernel */ static uint64_t load_kernel(void) { @@ -1031,8 +1014,11 @@ static uint64_t load_kernel(void) long prom_size; int prom_index = 0; uint64_t (*xlate_to_kseg0) (void *opaque, uint64_t addr); + uint8_t rng_seed[32]; + char rng_seed_hex[sizeof(rng_seed) * 2 + 1]; + size_t rng_seed_prom_offset; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN big_endian = 1; #else big_endian = 0; @@ -1118,9 +1104,21 @@ static uint64_t load_kernel(void) prom_set(prom_buf, prom_index++, "modetty0"); prom_set(prom_buf, prom_index++, "38400n8r"); + + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + for (size_t i = 0; i < sizeof(rng_seed); ++i) { + sprintf(rng_seed_hex + i * 2, "%02x", rng_seed[i]); + } + prom_set(prom_buf, prom_index++, "rngseed"); + rng_seed_prom_offset = prom_index * ENVP_ENTRY_SIZE + + sizeof(uint32_t) * ENVP_NB_ENTRIES; + prom_set(prom_buf, prom_index++, "%s", rng_seed_hex); + prom_set(prom_buf, prom_index++, NULL); rom_add_blob_fixed("prom", prom_buf, prom_size, ENVP_PADDR); + qemu_register_reset_nosnapshotload(reinitialize_rng_seed, + rom_ptr(ENVP_PADDR, prom_size) + rng_seed_prom_offset); g_free(prom_buf); return kernel_entry; @@ -1240,6 +1238,7 @@ void mips_malta_init(MachineState *machine) int fl_idx = 0; int be; MaltaState *s; + PCIDevice *piix4; DeviceState *dev; s = MIPS_MALTA(qdev_new(TYPE_MIPS_MALTA)); @@ -1274,7 +1273,7 @@ void mips_malta_init(MachineState *machine) ram_low_postio); } -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN be = 1; #else be = 0; @@ -1355,7 +1354,7 @@ void mips_malta_init(MachineState *machine) * In little endian mode the 32bit words in the bios are swapped, * a neat trick which allows bi-endian firmware. */ -#ifndef TARGET_WORDS_BIGENDIAN +#if !TARGET_BIG_ENDIAN { uint32_t *end, *addr; const size_t swapsize = MIN(bios_size, 0x3e0000); @@ -1392,7 +1391,8 @@ void mips_malta_init(MachineState *machine) stl_p(memory_region_get_ram_ptr(bios_copy) + 0x10, 0x00000420); /* Northbridge */ - pci_bus = gt64120_register(s->i8259); + dev = sysbus_create_simple("gt64120", -1, NULL); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci")); /* * The whole address space decoded by the GT-64120A doesn't generate * exception when accessing invalid memory. Create an empty slot to @@ -1401,15 +1401,19 @@ void mips_malta_init(MachineState *machine) empty_slot_init("GT64120", 0, 0x20000000); /* Southbridge */ - dev = piix4_create(pci_bus, &isa_bus, &smbus); + piix4 = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(10, 0), true, + TYPE_PIIX4_PCI_DEVICE); + isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix4), "isa.0")); + + dev = DEVICE(object_resolve_path_component(OBJECT(piix4), "ide")); + pci_ide_create_devs(PCI_DEVICE(dev)); /* Interrupt controller */ - qdev_connect_gpio_out_named(dev, "intr", 0, i8259_irq); - for (int i = 0; i < ISA_NUM_IRQS; i++) { - s->i8259[i] = qdev_get_gpio_in_named(dev, "isa", i); - } + qdev_connect_gpio_out_named(DEVICE(piix4), "intr", 0, i8259_irq); /* generate SPD EEPROM data */ + dev = DEVICE(object_resolve_path_component(OBJECT(piix4), "pm")); + smbus = I2C_BUS(qdev_get_child_bus(dev, "i2c")); generate_eeprom_spd(&smbus_eeprom_buf[0 * 256], ram_size); generate_eeprom_serial(&smbus_eeprom_buf[6 * 256]); smbus_eeprom_init(smbus, 8, smbus_eeprom_buf, smbus_eeprom_size); @@ -1440,6 +1444,14 @@ static const TypeInfo mips_malta_device = { .instance_init = mips_malta_instance_init, }; +GlobalProperty malta_compat[] = { + { "PIIX4_PM", "memory-hotplug-support", "off" }, + { "PIIX4_PM", "acpi-pci-hotplug-with-bridge-support", "off" }, + { "PIIX4_PM", "acpi-root-pci-hotplug", "off" }, + { "PIIX4_PM", "x-not-migrate-acpi-index", "true" }, +}; +const size_t malta_compat_len = G_N_ELEMENTS(malta_compat); + static void mips_malta_machine_init(MachineClass *mc) { mc->desc = "MIPS Malta Core LV"; @@ -1453,6 +1465,7 @@ static void mips_malta_machine_init(MachineClass *mc) mc->default_cpu_type = MIPS_CPU_TYPE_NAME("24Kf"); #endif mc->default_ram_id = "mips_malta.ram"; + compat_props_add(mc->compat_props, malta_compat, malta_compat_len); } DEFINE_MACHINE("malta", mips_malta_machine_init) diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c index 2325e7e05a..39f64448f2 100644 --- a/hw/mips/mipssim.c +++ b/hw/mips/mipssim.c @@ -27,7 +27,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "hw/clock.h" #include "hw/mips/mips.h" @@ -65,7 +64,7 @@ static uint64_t load_kernel(void) ram_addr_t initrd_offset; int big_endian; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN big_endian = 1; #else big_endian = 0; @@ -162,7 +161,7 @@ mips_mipssim_init(MachineState *machine) cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); env = &cpu->env; - reset_info = g_malloc0(sizeof(ResetData)); + reset_info = g_new0(ResetData, 1); reset_info->cpu = cpu; reset_info->vector = env->active_tc.PC; qemu_register_reset(main_cpu_reset, reset_info); diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig index 507058d8bf..cbabe9f78c 100644 --- a/hw/misc/Kconfig +++ b/hw/misc/Kconfig @@ -171,4 +171,7 @@ config SIFIVE_U_PRCI config VIRT_CTRL bool +config LASI + bool + source macio/Kconfig diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c index 1b9acaf1d3..5f9c742e50 100644 --- a/hw/misc/applesmc.c +++ b/hw/misc/applesmc.c @@ -37,10 +37,14 @@ #include "qemu/module.h" #include "qemu/timer.h" #include "qom/object.h" +#include "hw/acpi/acpi_aml_interface.h" /* #define DEBUG_SMC */ #define APPLESMC_DEFAULT_IOBASE 0x300 +#define TYPE_APPLE_SMC "isa-applesmc" +#define APPLESMC_MAX_DATA_LENGTH 32 +#define APPLESMC_PROP_IO_BASE "iobase" enum { APPLESMC_DATA_PORT = 0x00, @@ -253,7 +257,7 @@ static void applesmc_add_key(AppleSMCState *s, const char *key, { struct AppleSMCData *def; - def = g_malloc0(sizeof(struct AppleSMCData)); + def = g_new0(struct AppleSMCData, 1); def->key = key; def->len = len; def->data = data; @@ -347,14 +351,35 @@ static Property applesmc_isa_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static void build_applesmc_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + Aml *crs; + AppleSMCState *s = APPLE_SMC(adev); + uint32_t iobase = s->iobase; + Aml *dev = aml_device("SMC"); + + aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, iobase, iobase, 0x01, APPLESMC_MAX_DATA_LENGTH) + ); + aml_append(crs, aml_irq_no_flags(6)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + static void qdev_applesmc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = applesmc_isa_realize; dc->reset = qdev_applesmc_isa_reset; device_class_set_props(dc, applesmc_isa_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); + adevc->build_dev_aml = build_applesmc_aml; } static const TypeInfo applesmc_isa_info = { @@ -362,6 +387,10 @@ static const TypeInfo applesmc_isa_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(AppleSMCState), .class_init = qdev_applesmc_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void applesmc_register_types(void) diff --git a/hw/misc/armv7m_ras.c b/hw/misc/armv7m_ras.c new file mode 100644 index 0000000000..de24922c94 --- /dev/null +++ b/hw/misc/armv7m_ras.c @@ -0,0 +1,93 @@ +/* + * Arm M-profile RAS (Reliability, Availability and Serviceability) block + * + * Copyright (c) 2021 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/misc/armv7m_ras.h" +#include "qemu/log.h" + +static MemTxResult ras_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + if (attrs.user) { + return MEMTX_ERROR; + } + + switch (addr) { + case 0xe10: /* ERRIIDR */ + /* architect field = Arm; product/variant/revision 0 */ + *data = 0x43b; + break; + case 0xfc8: /* ERRDEVID */ + /* Minimal RAS: we implement 0 error record indexes */ + *data = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n", + (uint32_t)addr); + *data = 0; + break; + } + return MEMTX_OK; +} + +static MemTxResult ras_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + if (attrs.user) { + return MEMTX_ERROR; + } + + switch (addr) { + default: + qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n", + (uint32_t)addr); + break; + } + return MEMTX_OK; +} + +static const MemoryRegionOps ras_ops = { + .read_with_attrs = ras_read, + .write_with_attrs = ras_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + + +static void armv7m_ras_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMv7MRAS *s = ARMV7M_RAS(obj); + + memory_region_init_io(&s->iomem, obj, &ras_ops, + s, "armv7m-ras", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void armv7m_ras_class_init(ObjectClass *klass, void *data) +{ + /* This device has no state: no need for vmstate or reset */ +} + +static const TypeInfo armv7m_ras_info = { + .name = TYPE_ARMV7M_RAS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMv7MRAS), + .instance_init = armv7m_ras_init, + .class_init = armv7m_ras_class_init, +}; + +static void armv7m_ras_register_types(void) +{ + type_register_static(&armv7m_ras_info); +} + +type_init(armv7m_ras_register_types); diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index 10f00e65f4..69175e972d 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -27,6 +27,7 @@ #define R_HASH_SRC (0x20 / 4) #define R_HASH_DEST (0x24 / 4) +#define R_HASH_KEY_BUFF (0x28 / 4) #define R_HASH_SRC_LEN (0x2c / 4) #define R_HASH_CMD (0x30 / 4) @@ -64,7 +65,6 @@ #define SG_LIST_ADDR_SIZE 4 #define SG_LIST_ADDR_MASK 0x7FFFFFFF #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE) -#define ASPEED_HACE_MAX_SG 256 /* max number of entries */ static const struct { uint32_t mask; @@ -94,11 +94,104 @@ static int hash_algo_lookup(uint32_t reg) return -1; } -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) +/** + * Check whether the request contains padding message. + * + * @param s aspeed hace state object + * @param iov iov of current request + * @param req_len length of the current request + * @param total_msg_len length of all acc_mode requests(excluding padding msg) + * @param pad_offset start offset of padding message + */ +static bool has_padding(AspeedHACEState *s, struct iovec *iov, + hwaddr req_len, uint32_t *total_msg_len, + uint32_t *pad_offset) +{ + *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8); + /* + * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the + * last request. The last request should contain padding message. + * We check whether message contains padding by + * 1. Get total message length. If the current message contains + * padding, the last 8 bytes are total message length. + * 2. Check whether the total message length is valid. + * If it is valid, the value should less than or equal to + * total_req_len. + * 3. Current request len - padding_size to get padding offset. + * The padding message's first byte should be 0x80 + */ + if (*total_msg_len <= s->total_req_len) { + uint32_t padding_size = s->total_req_len - *total_msg_len; + uint8_t *padding = iov->iov_base; + *pad_offset = req_len - padding_size; + if (padding[*pad_offset] == 0x80) { + return true; + } + } + + return false; +} + +static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id, + uint32_t *pad_offset) +{ + int i, iov_count; + if (*pad_offset != 0) { + s->iov_cache[s->iov_count].iov_base = iov[id].iov_base; + s->iov_cache[s->iov_count].iov_len = *pad_offset; + ++s->iov_count; + } + for (i = 0; i < s->iov_count; i++) { + iov[i].iov_base = s->iov_cache[i].iov_base; + iov[i].iov_len = s->iov_cache[i].iov_len; + } + iov_count = s->iov_count; + s->iov_count = 0; + s->total_req_len = 0; + return iov_count; +} + +/** + * Generate iov for accumulative mode. + * + * @param s aspeed hace state object + * @param iov iov of the current request + * @param id index of the current iov + * @param req_len length of the current request + * + * @return count of iov + */ +static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id, + hwaddr *req_len) +{ + uint32_t pad_offset; + uint32_t total_msg_len; + s->total_req_len += *req_len; + + if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) { + if (s->iov_count) { + return reconstruct_iov(s, iov, id, &pad_offset); + } + + *req_len -= s->total_req_len - total_msg_len; + s->total_req_len = 0; + iov[id].iov_len = *req_len; + } else { + s->iov_cache[s->iov_count].iov_base = iov->iov_base; + s->iov_cache[s->iov_count].iov_len = *req_len; + ++s->iov_count; + } + + return id + 1; +} + +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, + bool acc_mode) { struct iovec iov[ASPEED_HACE_MAX_SG]; - g_autofree uint8_t *digest_buf; + g_autofree uint8_t *digest_buf = NULL; size_t digest_len = 0; + int niov = 0; int i; if (sg_mode) { @@ -123,10 +216,16 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) MEMTXATTRS_UNSPECIFIED, NULL); addr &= SG_LIST_ADDR_MASK; - iov[i].iov_len = len & SG_LIST_LEN_MASK; - plen = iov[i].iov_len; + plen = len & SG_LIST_LEN_MASK; iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false, MEMTXATTRS_UNSPECIFIED); + + if (acc_mode) { + niov = gen_acc_mode_iov(s, iov, i, &plen); + + } else { + iov[i].iov_len = plen; + } } } else { hwaddr len = s->regs[R_HASH_SRC_LEN]; @@ -136,6 +235,25 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) &len, false, MEMTXATTRS_UNSPECIFIED); i = 1; + + if (s->iov_count) { + /* + * In aspeed sdk kernel driver, sg_mode is disabled in hash_final(). + * Thus if we received a request with sg_mode disabled, it is + * required to check whether cache is empty. If no, we should + * combine cached iov and the current iov. + */ + uint32_t total_msg_len; + uint32_t pad_offset; + s->total_req_len += len; + if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) { + niov = reconstruct_iov(s, iov, 0, &pad_offset); + } + } + } + + if (niov) { + i = niov; } if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { @@ -210,6 +328,9 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, case R_HASH_DEST: data &= ahc->dest_mask; break; + case R_HASH_KEY_BUFF: + data &= ahc->key_mask; + break; case R_HASH_SRC_LEN: data &= 0x0FFFFFFF; break; @@ -217,14 +338,14 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, int algo; data &= ahc->hash_mask; - if ((data & HASH_HMAC_MASK)) { + if ((data & HASH_DIGEST_HMAC)) { qemu_log_mask(LOG_UNIMP, - "%s: HMAC engine command mode %"PRIx64" not implemented", - __func__, (data & HASH_HMAC_MASK) >> 8); + "%s: HMAC mode not implemented\n", + __func__); } if (data & BIT(1)) { qemu_log_mask(LOG_UNIMP, - "%s: Cascaded mode not implemented", + "%s: Cascaded mode not implemented\n", __func__); } algo = hash_algo_lookup(data); @@ -234,7 +355,8 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, __func__, data & ahc->hash_mask); break; } - do_hash_operation(s, algo, data & HASH_SG_EN); + do_hash_operation(s, algo, data & HASH_SG_EN, + ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); if (data & HASH_IRQ_EN) { qemu_irq_raise(s->irq); @@ -267,6 +389,8 @@ static void aspeed_hace_reset(DeviceState *dev) struct AspeedHACEState *s = ASPEED_HACE(dev); memset(s->regs, 0, sizeof(s->regs)); + s->iov_count = 0; + s->total_req_len = 0; } static void aspeed_hace_realize(DeviceState *dev, Error **errp) @@ -302,6 +426,8 @@ static const VMStateDescription vmstate_aspeed_hace = { .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), + VMSTATE_UINT32(total_req_len, AspeedHACEState), + VMSTATE_UINT32(iov_count, AspeedHACEState), VMSTATE_END_OF_LIST(), } }; @@ -333,6 +459,7 @@ static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data) ahc->src_mask = 0x0FFFFFFF; ahc->dest_mask = 0x0FFFFFF8; + ahc->key_mask = 0x0FFFFFC0; ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ } @@ -351,6 +478,7 @@ static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data) ahc->src_mask = 0x3fffffff; ahc->dest_mask = 0x3ffffff8; + ahc->key_mask = 0x3FFFFFC0; ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ } @@ -369,6 +497,7 @@ static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data) ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; + ahc->key_mask = 0x7FFFFFF8; ahc->hash_mask = 0x00147FFF; } @@ -378,11 +507,31 @@ static const TypeInfo aspeed_ast2600_hace_info = { .class_init = aspeed_ast2600_hace_class_init, }; +static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST1030 Hash and Crypto Engine"; + + ahc->src_mask = 0x7FFFFFFF; + ahc->dest_mask = 0x7FFFFFF8; + ahc->key_mask = 0x7FFFFFF8; + ahc->hash_mask = 0x00147FFF; +} + +static const TypeInfo aspeed_ast1030_hace_info = { + .name = TYPE_ASPEED_AST1030_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast1030_hace_class_init, +}; + static void aspeed_hace_register_types(void) { type_register_static(&aspeed_ast2400_hace_info); type_register_static(&aspeed_ast2500_hace_info); type_register_static(&aspeed_ast2600_hace_info); + type_register_static(&aspeed_ast1030_hace_info); type_register_static(&aspeed_hace_info); } diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c new file mode 100644 index 0000000000..f54f5da522 --- /dev/null +++ b/hw/misc/aspeed_i3c.c @@ -0,0 +1,384 @@ +/* + * ASPEED I3C Controller + * + * Copyright (C) 2021 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_i3c.h" +#include "hw/registerfields.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "trace.h" + +/* I3C Controller Registers */ +REG32(I3C1_REG0, 0x10) +REG32(I3C1_REG1, 0x14) + FIELD(I3C1_REG1, I2C_MODE, 0, 1) + FIELD(I3C1_REG1, SA_EN, 15, 1) +REG32(I3C2_REG0, 0x20) +REG32(I3C2_REG1, 0x24) + FIELD(I3C2_REG1, I2C_MODE, 0, 1) + FIELD(I3C2_REG1, SA_EN, 15, 1) +REG32(I3C3_REG0, 0x30) +REG32(I3C3_REG1, 0x34) + FIELD(I3C3_REG1, I2C_MODE, 0, 1) + FIELD(I3C3_REG1, SA_EN, 15, 1) +REG32(I3C4_REG0, 0x40) +REG32(I3C4_REG1, 0x44) + FIELD(I3C4_REG1, I2C_MODE, 0, 1) + FIELD(I3C4_REG1, SA_EN, 15, 1) +REG32(I3C5_REG0, 0x50) +REG32(I3C5_REG1, 0x54) + FIELD(I3C5_REG1, I2C_MODE, 0, 1) + FIELD(I3C5_REG1, SA_EN, 15, 1) +REG32(I3C6_REG0, 0x60) +REG32(I3C6_REG1, 0x64) + FIELD(I3C6_REG1, I2C_MODE, 0, 1) + FIELD(I3C6_REG1, SA_EN, 15, 1) + +/* I3C Device Registers */ +REG32(DEVICE_CTRL, 0x00) +REG32(DEVICE_ADDR, 0x04) +REG32(HW_CAPABILITY, 0x08) +REG32(COMMAND_QUEUE_PORT, 0x0c) +REG32(RESPONSE_QUEUE_PORT, 0x10) +REG32(RX_TX_DATA_PORT, 0x14) +REG32(IBI_QUEUE_STATUS, 0x18) +REG32(IBI_QUEUE_DATA, 0x18) +REG32(QUEUE_THLD_CTRL, 0x1c) +REG32(DATA_BUFFER_THLD_CTRL, 0x20) +REG32(IBI_QUEUE_CTRL, 0x24) +REG32(IBI_MR_REQ_REJECT, 0x2c) +REG32(IBI_SIR_REQ_REJECT, 0x30) +REG32(RESET_CTRL, 0x34) +REG32(SLV_EVENT_CTRL, 0x38) +REG32(INTR_STATUS, 0x3c) +REG32(INTR_STATUS_EN, 0x40) +REG32(INTR_SIGNAL_EN, 0x44) +REG32(INTR_FORCE, 0x48) +REG32(QUEUE_STATUS_LEVEL, 0x4c) +REG32(DATA_BUFFER_STATUS_LEVEL, 0x50) +REG32(PRESENT_STATE, 0x54) +REG32(CCC_DEVICE_STATUS, 0x58) +REG32(DEVICE_ADDR_TABLE_POINTER, 0x5c) + FIELD(DEVICE_ADDR_TABLE_POINTER, DEPTH, 16, 16) + FIELD(DEVICE_ADDR_TABLE_POINTER, ADDR, 0, 16) +REG32(DEV_CHAR_TABLE_POINTER, 0x60) +REG32(VENDOR_SPECIFIC_REG_POINTER, 0x6c) +REG32(SLV_MIPI_PID_VALUE, 0x70) +REG32(SLV_PID_VALUE, 0x74) +REG32(SLV_CHAR_CTRL, 0x78) +REG32(SLV_MAX_LEN, 0x7c) +REG32(MAX_READ_TURNAROUND, 0x80) +REG32(MAX_DATA_SPEED, 0x84) +REG32(SLV_DEBUG_STATUS, 0x88) +REG32(SLV_INTR_REQ, 0x8c) +REG32(DEVICE_CTRL_EXTENDED, 0xb0) +REG32(SCL_I3C_OD_TIMING, 0xb4) +REG32(SCL_I3C_PP_TIMING, 0xb8) +REG32(SCL_I2C_FM_TIMING, 0xbc) +REG32(SCL_I2C_FMP_TIMING, 0xc0) +REG32(SCL_EXT_LCNT_TIMING, 0xc8) +REG32(SCL_EXT_TERMN_LCNT_TIMING, 0xcc) +REG32(BUS_FREE_TIMING, 0xd4) +REG32(BUS_IDLE_TIMING, 0xd8) +REG32(I3C_VER_ID, 0xe0) +REG32(I3C_VER_TYPE, 0xe4) +REG32(EXTENDED_CAPABILITY, 0xe8) +REG32(SLAVE_CONFIG, 0xec) + +static const uint32_t ast2600_i3c_device_resets[ASPEED_I3C_DEVICE_NR_REGS] = { + [R_HW_CAPABILITY] = 0x000e00bf, + [R_QUEUE_THLD_CTRL] = 0x01000101, + [R_I3C_VER_ID] = 0x3130302a, + [R_I3C_VER_TYPE] = 0x6c633033, + [R_DEVICE_ADDR_TABLE_POINTER] = 0x00080280, + [R_DEV_CHAR_TABLE_POINTER] = 0x00020200, + [A_VENDOR_SPECIFIC_REG_POINTER] = 0x000000b0, + [R_SLV_MAX_LEN] = 0x00ff00ff, +}; + +static uint64_t aspeed_i3c_device_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI3CDevice *s = ASPEED_I3C_DEVICE(opaque); + uint32_t addr = offset >> 2; + uint64_t value; + + switch (addr) { + case R_COMMAND_QUEUE_PORT: + value = 0; + break; + default: + value = s->regs[addr]; + break; + } + + trace_aspeed_i3c_device_read(s->id, offset, value); + + return value; +} + +static void aspeed_i3c_device_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI3CDevice *s = ASPEED_I3C_DEVICE(opaque); + uint32_t addr = offset >> 2; + + trace_aspeed_i3c_device_write(s->id, offset, value); + + switch (addr) { + case R_HW_CAPABILITY: + case R_RESPONSE_QUEUE_PORT: + case R_IBI_QUEUE_DATA: + case R_QUEUE_STATUS_LEVEL: + case R_PRESENT_STATE: + case R_CCC_DEVICE_STATUS: + case R_DEVICE_ADDR_TABLE_POINTER: + case R_VENDOR_SPECIFIC_REG_POINTER: + case R_SLV_CHAR_CTRL: + case R_SLV_MAX_LEN: + case R_MAX_READ_TURNAROUND: + case R_I3C_VER_ID: + case R_I3C_VER_TYPE: + case R_EXTENDED_CAPABILITY: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to readonly register[0x%02" HWADDR_PRIx + "] = 0x%08" PRIx64 "\n", + __func__, offset, value); + break; + case R_RX_TX_DATA_PORT: + break; + case R_RESET_CTRL: + break; + default: + s->regs[addr] = value; + break; + } +} + +static const VMStateDescription aspeed_i3c_device_vmstate = { + .name = TYPE_ASPEED_I3C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ + VMSTATE_UINT32_ARRAY(regs, AspeedI3CDevice, ASPEED_I3C_DEVICE_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static const MemoryRegionOps aspeed_i3c_device_ops = { + .read = aspeed_i3c_device_read, + .write = aspeed_i3c_device_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void aspeed_i3c_device_reset(DeviceState *dev) +{ + AspeedI3CDevice *s = ASPEED_I3C_DEVICE(dev); + + memcpy(s->regs, ast2600_i3c_device_resets, sizeof(s->regs)); +} + +static void aspeed_i3c_device_realize(DeviceState *dev, Error **errp) +{ + AspeedI3CDevice *s = ASPEED_I3C_DEVICE(dev); + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I3C_DEVICE ".%d", + s->id); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i3c_device_ops, + s, name, ASPEED_I3C_DEVICE_NR_REGS << 2); +} + +static uint64_t aspeed_i3c_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedI3CState *s = ASPEED_I3C(opaque); + uint64_t val = 0; + + val = s->regs[addr >> 2]; + + trace_aspeed_i3c_read(addr, val); + + return val; +} + +static void aspeed_i3c_write(void *opaque, + hwaddr addr, + uint64_t data, + unsigned int size) +{ + AspeedI3CState *s = ASPEED_I3C(opaque); + + trace_aspeed_i3c_write(addr, data); + + addr >>= 2; + + /* I3C controller register */ + switch (addr) { + case R_I3C1_REG1: + case R_I3C2_REG1: + case R_I3C3_REG1: + case R_I3C4_REG1: + case R_I3C5_REG1: + case R_I3C6_REG1: + if (data & R_I3C1_REG1_I2C_MODE_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: Unsupported I2C mode [0x%08" HWADDR_PRIx + "]=%08" PRIx64 "\n", + __func__, addr << 2, data); + break; + } + if (data & R_I3C1_REG1_SA_EN_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: Unsupported slave mode [%08" HWADDR_PRIx + "]=0x%08" PRIx64 "\n", + __func__, addr << 2, data); + break; + } + s->regs[addr] = data; + break; + default: + s->regs[addr] = data; + break; + } +} + +static const MemoryRegionOps aspeed_i3c_ops = { + .read = aspeed_i3c_read, + .write = aspeed_i3c_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + } +}; + +static void aspeed_i3c_reset(DeviceState *dev) +{ + AspeedI3CState *s = ASPEED_I3C(dev); + memset(s->regs, 0, sizeof(s->regs)); +} + +static void aspeed_i3c_instance_init(Object *obj) +{ + AspeedI3CState *s = ASPEED_I3C(obj); + int i; + + for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) { + object_initialize_child(obj, "device[*]", &s->devices[i], + TYPE_ASPEED_I3C_DEVICE); + } +} + +static void aspeed_i3c_realize(DeviceState *dev, Error **errp) +{ + int i; + AspeedI3CState *s = ASPEED_I3C(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init(&s->iomem_container, OBJECT(s), + TYPE_ASPEED_I3C ".container", 0x8000); + + sysbus_init_mmio(sbd, &s->iomem_container); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_i3c_ops, s, + TYPE_ASPEED_I3C ".regs", ASPEED_I3C_NR_REGS << 2); + + memory_region_add_subregion(&s->iomem_container, 0x0, &s->iomem); + + for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) { + Object *dev = OBJECT(&s->devices[i]); + + if (!object_property_set_uint(dev, "device-id", i, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(dev), errp)) { + return; + } + + /* + * Register Address of I3CX Device = + * (Base Address of Global Register) + (Offset of I3CX) + Offset + * X = 0, 1, 2, 3, 4, 5 + * Offset of I3C0 = 0x2000 + * Offset of I3C1 = 0x3000 + * Offset of I3C2 = 0x4000 + * Offset of I3C3 = 0x5000 + * Offset of I3C4 = 0x6000 + * Offset of I3C5 = 0x7000 + */ + memory_region_add_subregion(&s->iomem_container, + 0x2000 + i * 0x1000, &s->devices[i].mr); + } + +} + +static Property aspeed_i3c_device_properties[] = { + DEFINE_PROP_UINT8("device-id", AspeedI3CDevice, id, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_i3c_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed I3C Device"; + dc->realize = aspeed_i3c_device_realize; + dc->reset = aspeed_i3c_device_reset; + device_class_set_props(dc, aspeed_i3c_device_properties); +} + +static const TypeInfo aspeed_i3c_device_info = { + .name = TYPE_ASPEED_I3C_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedI3CDevice), + .class_init = aspeed_i3c_device_class_init, +}; + +static const VMStateDescription vmstate_aspeed_i3c = { + .name = TYPE_ASPEED_I3C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedI3CState, ASPEED_I3C_NR_REGS), + VMSTATE_STRUCT_ARRAY(devices, AspeedI3CState, ASPEED_I3C_NR_DEVICES, 1, + aspeed_i3c_device_vmstate, AspeedI3CDevice), + VMSTATE_END_OF_LIST(), + } +}; + +static void aspeed_i3c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_i3c_realize; + dc->reset = aspeed_i3c_reset; + dc->desc = "Aspeed I3C Controller"; + dc->vmsd = &vmstate_aspeed_i3c; +} + +static const TypeInfo aspeed_i3c_info = { + .name = TYPE_ASPEED_I3C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_i3c_instance_init, + .instance_size = sizeof(AspeedI3CState), + .class_init = aspeed_i3c_class_init, +}; + +static void aspeed_i3c_register_types(void) +{ + type_register_static(&aspeed_i3c_device_info); + type_register_static(&aspeed_i3c_info); +} + +type_init(aspeed_i3c_register_types); diff --git a/hw/misc/aspeed_peci.c b/hw/misc/aspeed_peci.c new file mode 100644 index 0000000000..93cc672e96 --- /dev/null +++ b/hw/misc/aspeed_peci.c @@ -0,0 +1,152 @@ +/* + * Aspeed PECI Controller + * + * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com) + * + * This code is licensed under the GPL version 2 or later. See the COPYING + * file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/misc/aspeed_peci.h" +#include "hw/registerfields.h" +#include "trace.h" + +#define ASPEED_PECI_CC_RSP_SUCCESS (0x40U) + +/* Command Register */ +REG32(PECI_CMD, 0x08) + FIELD(PECI_CMD, FIRE, 0, 1) + +/* Interrupt Control Register */ +REG32(PECI_INT_CTRL, 0x18) + +/* Interrupt Status Register */ +REG32(PECI_INT_STS, 0x1C) + FIELD(PECI_INT_STS, CMD_DONE, 0, 1) + +/* Rx/Tx Data Buffer Registers */ +REG32(PECI_WR_DATA0, 0x20) +REG32(PECI_RD_DATA0, 0x30) + +static void aspeed_peci_raise_interrupt(AspeedPECIState *s, uint32_t status) +{ + trace_aspeed_peci_raise_interrupt(s->regs[R_PECI_INT_CTRL], status); + + s->regs[R_PECI_INT_STS] = s->regs[R_PECI_INT_CTRL] & status; + if (!s->regs[R_PECI_INT_STS]) { + return; + } + qemu_irq_raise(s->irq); +} + +static uint64_t aspeed_peci_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedPECIState *s = ASPEED_PECI(opaque); + uint64_t data; + + if (offset >= ASPEED_PECI_NR_REGS << 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + data = s->regs[offset >> 2]; + + trace_aspeed_peci_read(offset, data); + return data; +} + +static void aspeed_peci_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedPECIState *s = ASPEED_PECI(opaque); + + trace_aspeed_peci_write(offset, data); + + if (offset >= ASPEED_PECI_NR_REGS << 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + switch (offset) { + case A_PECI_INT_STS: + s->regs[R_PECI_INT_STS] &= ~data; + if (!s->regs[R_PECI_INT_STS]) { + qemu_irq_lower(s->irq); + } + break; + case A_PECI_CMD: + /* + * Only the FIRE bit is writable. Once the command is complete, it + * should be cleared. Since we complete the command immediately, the + * value is not stored in the register array. + */ + if (!FIELD_EX32(data, PECI_CMD, FIRE)) { + break; + } + if (s->regs[R_PECI_INT_STS]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Interrupt status must be " + "cleared before firing another command: 0x%08x\n", + __func__, s->regs[R_PECI_INT_STS]); + break; + } + s->regs[R_PECI_RD_DATA0] = ASPEED_PECI_CC_RSP_SUCCESS; + s->regs[R_PECI_WR_DATA0] = ASPEED_PECI_CC_RSP_SUCCESS; + aspeed_peci_raise_interrupt(s, + FIELD_DP32(0, PECI_INT_STS, CMD_DONE, 1)); + break; + default: + s->regs[offset / sizeof(s->regs[0])] = data; + break; + } +} + +static const MemoryRegionOps aspeed_peci_ops = { + .read = aspeed_peci_read, + .write = aspeed_peci_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void aspeed_peci_realize(DeviceState *dev, Error **errp) +{ + AspeedPECIState *s = ASPEED_PECI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_peci_ops, s, + TYPE_ASPEED_PECI, 0x1000); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); +} + +static void aspeed_peci_reset(DeviceState *dev) +{ + AspeedPECIState *s = ASPEED_PECI(dev); + + memset(s->regs, 0, sizeof(s->regs)); +} + +static void aspeed_peci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_peci_realize; + dc->reset = aspeed_peci_reset; + dc->desc = "Aspeed PECI Controller"; +} + +static const TypeInfo aspeed_peci_types[] = { + { + .name = TYPE_ASPEED_PECI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedPECIState), + .class_init = aspeed_peci_class_init, + .abstract = false, + }, +}; + +DEFINE_TYPES(aspeed_peci_types); diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c new file mode 100644 index 0000000000..c6f328e3be --- /dev/null +++ b/hw/misc/aspeed_sbc.c @@ -0,0 +1,182 @@ +/* + * ASPEED Secure Boot Controller + * + * Copyright (C) 2021-2022 IBM Corp. + * + * Joel Stanley + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/qdev-properties.h" +#include "hw/misc/aspeed_sbc.h" +#include "qapi/error.h" +#include "migration/vmstate.h" + +#define R_PROT (0x000 / 4) +#define R_STATUS (0x014 / 4) +#define R_QSR (0x040 / 4) + +/* R_STATUS */ +#define ABR_EN BIT(14) /* Mirrors SCU510[11] */ +#define ABR_IMAGE_SOURCE BIT(13) +#define SPI_ABR_IMAGE_SOURCE BIT(12) +#define SB_CRYPTO_KEY_EXP_DONE BIT(11) +#define SB_CRYPTO_BUSY BIT(10) +#define OTP_WP_EN BIT(9) +#define OTP_ADDR_WP_EN BIT(8) +#define LOW_SEC_KEY_EN BIT(7) +#define SECURE_BOOT_EN BIT(6) +#define UART_BOOT_EN BIT(5) +/* bit 4 reserved*/ +#define OTP_CHARGE_PUMP_READY BIT(3) +#define OTP_IDLE BIT(2) +#define OTP_MEM_IDLE BIT(1) +#define OTP_COMPARE_STATUS BIT(0) + +/* QSR */ +#define QSR_RSA_MASK (0x3 << 12) +#define QSR_HASH_MASK (0x3 << 10) + +static uint64_t aspeed_sbc_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedSBCState *s = ASPEED_SBC(opaque); + + addr >>= 2; + + if (addr >= ASPEED_SBC_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return 0; + } + + return s->regs[addr]; +} + +static void aspeed_sbc_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedSBCState *s = ASPEED_SBC(opaque); + + addr >>= 2; + + if (addr >= ASPEED_SBC_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return; + } + + switch (addr) { + case R_STATUS: + case R_QSR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read only register 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return; + default: + break; + } + + s->regs[addr] = data; +} + +static const MemoryRegionOps aspeed_sbc_ops = { + .read = aspeed_sbc_read, + .write = aspeed_sbc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_sbc_reset(DeviceState *dev) +{ + struct AspeedSBCState *s = ASPEED_SBC(dev); + + memset(s->regs, 0, sizeof(s->regs)); + + /* Set secure boot enabled with RSA4096_SHA256 and enable eMMC ABR */ + s->regs[R_STATUS] = OTP_IDLE | OTP_MEM_IDLE; + + if (s->emmc_abr) { + s->regs[R_STATUS] &= ABR_EN; + } + + if (s->signing_settings) { + s->regs[R_STATUS] &= SECURE_BOOT_EN; + } + + s->regs[R_QSR] = s->signing_settings; +} + +static void aspeed_sbc_realize(DeviceState *dev, Error **errp) +{ + AspeedSBCState *s = ASPEED_SBC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_sbc_ops, s, + TYPE_ASPEED_SBC, 0x1000); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_aspeed_sbc = { + .name = TYPE_ASPEED_SBC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSBCState, ASPEED_SBC_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static Property aspeed_sbc_properties[] = { + DEFINE_PROP_BOOL("emmc-abr", AspeedSBCState, emmc_abr, 0), + DEFINE_PROP_UINT32("signing-settings", AspeedSBCState, signing_settings, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_sbc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_sbc_realize; + dc->reset = aspeed_sbc_reset; + dc->vmsd = &vmstate_aspeed_sbc; + device_class_set_props(dc, aspeed_sbc_properties); +} + +static const TypeInfo aspeed_sbc_info = { + .name = TYPE_ASPEED_SBC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSBCState), + .class_init = aspeed_sbc_class_init, + .class_size = sizeof(AspeedSBCClass) +}; + +static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "AST2600 Secure Boot Controller"; +} + +static const TypeInfo aspeed_ast2600_sbc_info = { + .name = TYPE_ASPEED_AST2600_SBC, + .parent = TYPE_ASPEED_SBC, + .class_init = aspeed_ast2600_sbc_class_init, +}; + +static void aspeed_sbc_register_types(void) +{ + type_register_static(&aspeed_ast2600_sbc_info); + type_register_static(&aspeed_sbc_info); +} + +type_init(aspeed_sbc_register_types); diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index 40a38ebd85..8335364906 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -101,14 +101,26 @@ #define AST2600_CLK_STOP_CTRL_CLR TO_REG(0x84) #define AST2600_CLK_STOP_CTRL2 TO_REG(0x90) #define AST2600_CLK_STOP_CTRL2_CLR TO_REG(0x94) +#define AST2600_DEBUG_CTRL TO_REG(0xC8) +#define AST2600_DEBUG_CTRL2 TO_REG(0xD8) #define AST2600_SDRAM_HANDSHAKE TO_REG(0x100) #define AST2600_HPLL_PARAM TO_REG(0x200) #define AST2600_HPLL_EXT TO_REG(0x204) +#define AST2600_APLL_PARAM TO_REG(0x210) +#define AST2600_APLL_EXT TO_REG(0x214) +#define AST2600_MPLL_PARAM TO_REG(0x220) #define AST2600_MPLL_EXT TO_REG(0x224) +#define AST2600_EPLL_PARAM TO_REG(0x240) #define AST2600_EPLL_EXT TO_REG(0x244) +#define AST2600_DPLL_PARAM TO_REG(0x260) +#define AST2600_DPLL_EXT TO_REG(0x264) #define AST2600_CLK_SEL TO_REG(0x300) #define AST2600_CLK_SEL2 TO_REG(0x304) -#define AST2600_CLK_SEL3 TO_REG(0x310) +#define AST2600_CLK_SEL3 TO_REG(0x308) +#define AST2600_CLK_SEL4 TO_REG(0x310) +#define AST2600_CLK_SEL5 TO_REG(0x314) +#define AST2600_UARTCLK TO_REG(0x338) +#define AST2600_HUARTCLK TO_REG(0x33C) #define AST2600_HW_STRAP1 TO_REG(0x500) #define AST2600_HW_STRAP1_CLR TO_REG(0x504) #define AST2600_HW_STRAP1_PROT TO_REG(0x508) @@ -201,6 +213,11 @@ static uint32_t aspeed_scu_get_random(void) } uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s) +{ + return ASPEED_SCU_GET_CLASS(s)->get_apb(s); +} + +static uint32_t aspeed_2400_scu_get_apb_freq(AspeedSCUState *s) { AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(s); uint32_t hpll = asc->calc_hpll(s, s->regs[HPLL_PARAM]); @@ -209,6 +226,24 @@ uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s) / asc->apb_divider; } +static uint32_t aspeed_2600_scu_get_apb_freq(AspeedSCUState *s) +{ + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(s); + uint32_t hpll = asc->calc_hpll(s, s->regs[AST2600_HPLL_PARAM]); + + return hpll / (SCU_CLK_GET_PCLK_DIV(s->regs[AST2600_CLK_SEL]) + 1) + / asc->apb_divider; +} + +static uint32_t aspeed_1030_scu_get_apb_freq(AspeedSCUState *s) +{ + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(s); + uint32_t hpll = asc->calc_hpll(s, s->regs[AST2600_HPLL_PARAM]); + + return hpll / (SCU_AST1030_CLK_GET_PCLK_DIV(s->regs[AST2600_CLK_SEL4]) + 1) + / asc->apb_divider; +} + static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) { AspeedSCUState *s = ASPEED_SCU(opaque); @@ -235,6 +270,7 @@ static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) break; } + trace_aspeed_scu_read(offset, size, s->regs[reg]); return s->regs[reg]; } @@ -345,7 +381,8 @@ static const MemoryRegionOps aspeed_ast2500_scu_ops = { static uint32_t aspeed_scu_get_clkin(AspeedSCUState *s) { - if (s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN) { + if (s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN || + ASPEED_SCU_GET_CLASS(s)->clkin_25Mhz) { return 25000000; } else if (s->hw_strap1 & SCU_HW_STRAP_CLK_48M_IN) { return 48000000; @@ -414,6 +451,26 @@ static uint32_t aspeed_2500_scu_calc_hpll(AspeedSCUState *s, uint32_t hpll_reg) return clkin * multiplier; } +static uint32_t aspeed_2600_scu_calc_hpll(AspeedSCUState *s, uint32_t hpll_reg) +{ + uint32_t multiplier = 1; + uint32_t clkin = aspeed_scu_get_clkin(s); + + if (hpll_reg & SCU_AST2600_H_PLL_OFF) { + return 0; + } + + if (!(hpll_reg & SCU_AST2600_H_PLL_BYPASS_EN)) { + uint32_t p = (hpll_reg >> 19) & 0xf; + uint32_t n = (hpll_reg >> 13) & 0x3f; + uint32_t m = hpll_reg & 0x1fff; + + multiplier = ((m + 1) / (n + 1)) / (p + 1); + } + + return clkin * multiplier; +} + static void aspeed_scu_reset(DeviceState *dev) { AspeedSCUState *s = ASPEED_SCU(dev); @@ -433,6 +490,10 @@ static uint32_t aspeed_silicon_revs[] = { AST2500_A1_SILICON_REV, AST2600_A0_SILICON_REV, AST2600_A1_SILICON_REV, + AST2600_A2_SILICON_REV, + AST2600_A3_SILICON_REV, + AST1030_A0_SILICON_REV, + AST1030_A1_SILICON_REV, }; bool is_supported_silicon_rev(uint32_t silicon_rev) @@ -511,8 +572,10 @@ static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data) dc->desc = "ASPEED 2400 System Control Unit"; asc->resets = ast2400_a0_resets; asc->calc_hpll = aspeed_2400_scu_calc_hpll; + asc->get_apb = aspeed_2400_scu_get_apb_freq; asc->apb_divider = 2; asc->nr_regs = ASPEED_SCU_NR_REGS; + asc->clkin_25Mhz = false; asc->ops = &aspeed_ast2400_scu_ops; } @@ -531,8 +594,10 @@ static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data) dc->desc = "ASPEED 2500 System Control Unit"; asc->resets = ast2500_a1_resets; asc->calc_hpll = aspeed_2500_scu_calc_hpll; + asc->get_apb = aspeed_2400_scu_get_apb_freq; asc->apb_divider = 4; asc->nr_regs = ASPEED_SCU_NR_REGS; + asc->clkin_25Mhz = false; asc->ops = &aspeed_ast2500_scu_ops; } @@ -573,6 +638,7 @@ static uint64_t aspeed_ast2600_scu_read(void *opaque, hwaddr offset, break; } + trace_aspeed_scu_read(offset, size, s->regs[reg]); return s->regs[reg]; } @@ -651,16 +717,28 @@ static const MemoryRegionOps aspeed_ast2600_scu_ops = { .valid.unaligned = false, }; -static const uint32_t ast2600_a1_resets[ASPEED_AST2600_SCU_NR_REGS] = { +static const uint32_t ast2600_a3_resets[ASPEED_AST2600_SCU_NR_REGS] = { [AST2600_SYS_RST_CTRL] = 0xF7C3FED8, - [AST2600_SYS_RST_CTRL2] = 0xFFFFFFFC, + [AST2600_SYS_RST_CTRL2] = 0x0DFFFFFC, [AST2600_CLK_STOP_CTRL] = 0xFFFF7F8A, [AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0, + [AST2600_DEBUG_CTRL] = 0x00000FFF, + [AST2600_DEBUG_CTRL2] = 0x000000FF, [AST2600_SDRAM_HANDSHAKE] = 0x00000000, - [AST2600_HPLL_PARAM] = 0x1000405F, + [AST2600_HPLL_PARAM] = 0x1000408F, + [AST2600_APLL_PARAM] = 0x1000405F, + [AST2600_MPLL_PARAM] = 0x1008405F, + [AST2600_EPLL_PARAM] = 0x1004077F, + [AST2600_DPLL_PARAM] = 0x1078405F, + [AST2600_CLK_SEL] = 0xF3940000, + [AST2600_CLK_SEL2] = 0x00700000, + [AST2600_CLK_SEL3] = 0x00000000, + [AST2600_CLK_SEL4] = 0xF3F40000, + [AST2600_CLK_SEL5] = 0x30000000, + [AST2600_UARTCLK] = 0x00014506, + [AST2600_HUARTCLK] = 0x000145C0, [AST2600_CHIP_ID0] = 0x1234ABCD, [AST2600_CHIP_ID1] = 0x88884444, - }; static void aspeed_ast2600_scu_reset(DeviceState *dev) @@ -675,7 +753,7 @@ static void aspeed_ast2600_scu_reset(DeviceState *dev) * of actual revision. QEMU and Linux only support A1 onwards so this is * sufficient. */ - s->regs[AST2600_SILICON_REV] = AST2600_A1_SILICON_REV; + s->regs[AST2600_SILICON_REV] = AST2600_A3_SILICON_REV; s->regs[AST2600_SILICON_REV2] = s->silicon_rev; s->regs[AST2600_HW_STRAP1] = s->hw_strap1; s->regs[AST2600_HW_STRAP2] = s->hw_strap2; @@ -689,10 +767,12 @@ static void aspeed_2600_scu_class_init(ObjectClass *klass, void *data) dc->desc = "ASPEED 2600 System Control Unit"; dc->reset = aspeed_ast2600_scu_reset; - asc->resets = ast2600_a1_resets; - asc->calc_hpll = aspeed_2500_scu_calc_hpll; /* No change since AST2500 */ + asc->resets = ast2600_a3_resets; + asc->calc_hpll = aspeed_2600_scu_calc_hpll; + asc->get_apb = aspeed_2600_scu_get_apb_freq; asc->apb_divider = 4; asc->nr_regs = ASPEED_AST2600_SCU_NR_REGS; + asc->clkin_25Mhz = true; asc->ops = &aspeed_ast2600_scu_ops; } @@ -703,12 +783,64 @@ static const TypeInfo aspeed_2600_scu_info = { .class_init = aspeed_2600_scu_class_init, }; +static const uint32_t ast1030_a1_resets[ASPEED_AST2600_SCU_NR_REGS] = { + [AST2600_SYS_RST_CTRL] = 0xFFC3FED8, + [AST2600_SYS_RST_CTRL2] = 0x09FFFFFC, + [AST2600_CLK_STOP_CTRL] = 0xFFFF7F8A, + [AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0, + [AST2600_DEBUG_CTRL2] = 0x00000000, + [AST2600_HPLL_PARAM] = 0x10004077, + [AST2600_HPLL_EXT] = 0x00000031, + [AST2600_CLK_SEL4] = 0x43F90900, + [AST2600_CLK_SEL5] = 0x40000000, + [AST2600_CHIP_ID0] = 0xDEADBEEF, + [AST2600_CHIP_ID1] = 0x0BADCAFE, +}; + +static void aspeed_ast1030_scu_reset(DeviceState *dev) +{ + AspeedSCUState *s = ASPEED_SCU(dev); + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev); + + memcpy(s->regs, asc->resets, asc->nr_regs * 4); + + s->regs[AST2600_SILICON_REV] = AST1030_A1_SILICON_REV; + s->regs[AST2600_SILICON_REV2] = s->silicon_rev; + s->regs[AST2600_HW_STRAP1] = s->hw_strap1; + s->regs[AST2600_HW_STRAP2] = s->hw_strap2; + s->regs[PROT_KEY] = s->hw_prot_key; +} + +static void aspeed_1030_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); + + dc->desc = "ASPEED 1030 System Control Unit"; + dc->reset = aspeed_ast1030_scu_reset; + asc->resets = ast1030_a1_resets; + asc->calc_hpll = aspeed_2600_scu_calc_hpll; + asc->get_apb = aspeed_1030_scu_get_apb_freq; + asc->apb_divider = 2; + asc->nr_regs = ASPEED_AST2600_SCU_NR_REGS; + asc->clkin_25Mhz = true; + asc->ops = &aspeed_ast2600_scu_ops; +} + +static const TypeInfo aspeed_1030_scu_info = { + .name = TYPE_ASPEED_1030_SCU, + .parent = TYPE_ASPEED_SCU, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_1030_scu_class_init, +}; + static void aspeed_scu_register_types(void) { type_register_static(&aspeed_scu_info); type_register_static(&aspeed_2400_scu_info); type_register_static(&aspeed_2500_scu_info); type_register_static(&aspeed_2600_scu_info); + type_register_static(&aspeed_1030_scu_info); } type_init(aspeed_scu_register_types); diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c index 08f856cbda..d2a3931033 100644 --- a/hw/misc/aspeed_sdmc.c +++ b/hw/misc/aspeed_sdmc.c @@ -130,6 +130,7 @@ static uint64_t aspeed_sdmc_read(void *opaque, hwaddr addr, unsigned size) return 0; } + trace_aspeed_sdmc_read(addr, s->regs[addr]); return s->regs[addr]; } @@ -148,6 +149,7 @@ static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data, return; } + trace_aspeed_sdmc_write(addr, data); asc->write(s, addr, data); } diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c index 434ff8d910..8a8012f5f0 100644 --- a/hw/misc/auxbus.c +++ b/hw/misc/auxbus.c @@ -65,7 +65,7 @@ AUXBus *aux_bus_init(DeviceState *parent, const char *name) AUXBus *bus; Object *auxtoi2c; - bus = AUX_BUS(qbus_create(TYPE_AUX_BUS, parent, name)); + bus = AUX_BUS(qbus_new(TYPE_AUX_BUS, parent, name)); auxtoi2c = object_new_with_props(TYPE_AUXTOI2C, OBJECT(bus), "i2c", &error_abort, NULL); diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c index 9f73cbd5e4..1e4e061bc1 100644 --- a/hw/misc/bcm2835_mbox.c +++ b/hw/misc/bcm2835_mbox.c @@ -271,7 +271,6 @@ static const VMStateDescription vmstate_bcm2835_mbox = { .name = TYPE_BCM2835_MBOX, .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT), VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1, @@ -324,7 +323,7 @@ static void bcm2835_mbox_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_mbox; } -static TypeInfo bcm2835_mbox_info = { +static const TypeInfo bcm2835_mbox_info = { .name = TYPE_BCM2835_MBOX, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835MboxState), diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c index 25fa804cbd..976f3d34e5 100644 --- a/hw/misc/bcm2835_powermgt.c +++ b/hw/misc/bcm2835_powermgt.c @@ -144,7 +144,7 @@ static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_powermgt; } -static TypeInfo bcm2835_powermgt_info = { +static const TypeInfo bcm2835_powermgt_info = { .name = TYPE_BCM2835_POWERMGT, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835PowerMgtState), diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index 73941bdae9..890ae7bae5 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -69,7 +69,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) break; case 0x00010003: /* Get board MAC address */ resplen = sizeof(s->macaddr.a); - dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen); + dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen, + MEMTXATTRS_UNSPECIFIED); break; case 0x00010004: /* Get board serial */ qemu_log_mask(LOG_UNIMP, @@ -269,6 +270,10 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) stl_le_phys(&s->dma_as, value + 12, 0); resplen = 4; break; + case 0x00040013: /* Get number of displays */ + stl_le_phys(&s->dma_as, value + 12, 1); + resplen = 4; + break; case 0x00060001: /* Get DMA channels */ /* channels 2-5 */ @@ -420,7 +425,7 @@ static void bcm2835_property_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_property; } -static TypeInfo bcm2835_property_info = { +static const TypeInfo bcm2835_property_info = { .name = TYPE_BCM2835_PROPERTY, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835PropertyState), diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c index d0c4e64e88..b3c80cf186 100644 --- a/hw/misc/bcm2835_rng.c +++ b/hw/misc/bcm2835_rng.c @@ -131,7 +131,7 @@ static void bcm2835_rng_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_rng; } -static TypeInfo bcm2835_rng_info = { +static const TypeInfo bcm2835_rng_info = { .name = TYPE_BCM2835_RNG, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835RngState), diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c index 3c3721ad2d..653e8ddcd5 100644 --- a/hw/misc/cbus.c +++ b/hw/misc/cbus.c @@ -133,7 +133,7 @@ static void cbus_sel(void *opaque, int line, int level) CBus *cbus_init(qemu_irq dat) { - CBusPriv *s = (CBusPriv *) g_malloc0(sizeof(*s)); + CBusPriv *s = g_malloc0(sizeof(*s)); s->dat_out = dat; s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0); @@ -388,7 +388,7 @@ static void retu_io(void *opaque, int rw, int reg, uint16_t *val) void *retu_init(qemu_irq irq, int vilma) { - CBusRetu *s = (CBusRetu *) g_malloc0(sizeof(*s)); + CBusRetu *s = g_malloc0(sizeof(*s)); s->irq = irq; s->irqen = 0xffff; @@ -604,7 +604,7 @@ static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val) void *tahvo_init(qemu_irq irq, int betty) { - CBusTahvo *s = (CBusTahvo *) g_malloc0(sizeof(*s)); + CBusTahvo *s = g_malloc0(sizeof(*s)); s->irq = irq; s->irqen = 0xffff; diff --git a/hw/misc/grlib_ahb_apb_pnp.c b/hw/misc/grlib_ahb_apb_pnp.c index 43e001c3c7..5b05f15859 100644 --- a/hw/misc/grlib_ahb_apb_pnp.c +++ b/hw/misc/grlib_ahb_apb_pnp.c @@ -136,7 +136,8 @@ static uint64_t grlib_ahb_pnp_read(void *opaque, hwaddr offset, unsigned size) uint32_t val; val = ahb_pnp->regs[offset >> 2]; - trace_grlib_ahb_pnp_read(offset, val); + val = extract32(val, (4 - (offset & 3) - size) * 8, size * 8); + trace_grlib_ahb_pnp_read(offset, size, val); return val; } @@ -152,7 +153,7 @@ static const MemoryRegionOps grlib_ahb_pnp_ops = { .write = grlib_ahb_pnp_write, .endianness = DEVICE_BIG_ENDIAN, .impl = { - .min_access_size = 4, + .min_access_size = 1, .max_access_size = 4, }, }; @@ -247,7 +248,8 @@ static uint64_t grlib_apb_pnp_read(void *opaque, hwaddr offset, unsigned size) uint32_t val; val = apb_pnp->regs[offset >> 2]; - trace_grlib_apb_pnp_read(offset, val); + val = extract32(val, (4 - (offset & 3) - size) * 8, size * 8); + trace_grlib_apb_pnp_read(offset, size, val); return val; } @@ -263,7 +265,7 @@ static const MemoryRegionOps grlib_apb_pnp_ops = { .write = grlib_apb_pnp_write, .endianness = DEVICE_BIG_ENDIAN, .impl = { - .min_access_size = 4, + .min_access_size = 1, .max_access_size = 4, }, }; diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index 79f4375911..7b0e968804 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -151,7 +151,7 @@ static void imx6_defer_clear_reset_bit(int cpuid, return; } - ri = g_malloc(sizeof(struct SRCSCRResetInfo)); + ri = g_new(struct SRCSCRResetInfo, 1); ri->s = s; ri->reset_bit = reset_shift; diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c index 7b41cfa8fc..b5a9e30a2c 100644 --- a/hw/misc/iotkit-secctl.c +++ b/hw/misc/iotkit-secctl.c @@ -114,7 +114,7 @@ static const uint8_t iotkit_secctl_ns_sse300_idregs[] = { * AHB expansion, APB expansion) are all set up so that they are * in 16-aligned blocks so offsets 0xN0, 0xN4, 0xN8, 0xNC are PPCs * 0, 1, 2, 3 of that type, so we can convert a register address offset - * into an an index into a PPC array easily. + * into an index into a PPC array easily. */ static inline int offset_to_ppc_idx(uint32_t offset) { diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c index 9ee8fe8495..7147e2f84e 100644 --- a/hw/misc/iotkit-sysctl.c +++ b/hw/misc/iotkit-sysctl.c @@ -237,7 +237,7 @@ static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset, r = s->ewctrl; break; case ARMSSE_SSE300: - /* In SSE300 this offset is is NMI_ENABLE */ + /* In SSE300 this offset is NMI_ENABLE */ r = s->nmi_enable; break; default: @@ -555,7 +555,7 @@ static void iotkit_sysctl_write(void *opaque, hwaddr offset, s->ewctrl = value; break; case ARMSSE_SSE300: - /* In SSE300 this offset is is NMI_ENABLE */ + /* In SSE300 this offset is NMI_ENABLE */ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n"); s->nmi_enable = value; break; diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 1ba4a98377..8270db53cd 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -243,7 +243,7 @@ static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, static const MemoryRegionOps ivshmem_mmio_ops = { .read = ivshmem_io_read, .write = ivshmem_io_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = 4, .max_access_size = 4, @@ -411,7 +411,7 @@ static void resize_peers(IVShmemState *s, int nb_peers) assert(nb_peers > old_nb_peers); IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers); - s->peers = g_realloc(s->peers, nb_peers * sizeof(Peer)); + s->peers = g_renew(Peer, s->peers, nb_peers); s->nb_peers = nb_peers; for (i = old_nb_peers; i < nb_peers; i++) { @@ -424,16 +424,19 @@ static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector, Error **errp) { PCIDevice *pdev = PCI_DEVICE(s); + KVMRouteChange c; int ret; IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); assert(!s->msi_vectors[vector].pdev); - ret = kvm_irqchip_add_msi_route(kvm_state, vector, pdev); + c = kvm_irqchip_begin_route_changes(kvm_state); + ret = kvm_irqchip_add_msi_route(&c, vector, pdev); if (ret < 0) { error_setg(errp, "kvm_irqchip_add_msi_route failed"); return; } + kvm_irqchip_commit_route_changes(&c); s->msi_vectors[vector].virq = ret; s->msi_vectors[vector].pdev = pdev; @@ -534,7 +537,7 @@ static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); event_notifier_init_fd(&peer->eventfds[vector], fd); - fcntl_setfl(fd, O_NONBLOCK); /* msix/irqfd poll non block */ + g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */ if (posn == s->vm_id) { setup_interrupt(s, vector, errp); @@ -728,7 +731,7 @@ static void ivshmem_reset(DeviceState *d) static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp) { /* allocate QEMU callback data for receiving interrupts */ - s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector)); + s->msi_vectors = g_new0(MSIVector, s->vectors); if (ivshmem_has_feature(s, IVSHMEM_MSI)) { if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) { diff --git a/hw/hppa/lasi.c b/hw/misc/lasi.c similarity index 60% rename from hw/hppa/lasi.c rename to hw/misc/lasi.c index 88c3791eb6..23a7634a8c 100644 --- a/hw/hppa/lasi.c +++ b/hw/misc/lasi.c @@ -17,57 +17,10 @@ #include "hw/irq.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" -#include "hppa_sys.h" -#include "hw/net/lasi_82596.h" -#include "hw/char/parallel.h" -#include "hw/char/serial.h" -#include "hw/input/lasips2.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "hw/misc/lasi.h" -#define TYPE_LASI_CHIP "lasi-chip" - -#define LASI_IRR 0x00 /* RO */ -#define LASI_IMR 0x04 -#define LASI_IPR 0x08 -#define LASI_ICR 0x0c -#define LASI_IAR 0x10 - -#define LASI_PCR 0x0C000 /* LASI Power Control register */ -#define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */ -#define LASI_VER 0x0C008 /* LASI Version Control register */ -#define LASI_IORESET 0x0C00C /* LASI I/O Reset register */ -#define LASI_AMR 0x0C010 /* LASI Arbitration Mask register */ -#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ -#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ - -#define LASI_BIT(x) (1ul << (x)) -#define LASI_IRQ_BITS (LASI_BIT(5) | LASI_BIT(7) | LASI_BIT(8) | LASI_BIT(9) \ - | LASI_BIT(13) | LASI_BIT(14) | LASI_BIT(16) | LASI_BIT(17) \ - | LASI_BIT(18) | LASI_BIT(19) | LASI_BIT(20) | LASI_BIT(21) \ - | LASI_BIT(26)) - -#define ICR_BUS_ERROR_BIT LASI_BIT(8) /* bit 8 in ICR */ -#define ICR_TOC_BIT LASI_BIT(1) /* bit 1 in ICR */ - -OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP) - -struct LasiState { - PCIHostState parent_obj; - - uint32_t irr; - uint32_t imr; - uint32_t ipr; - uint32_t icr; - uint32_t iar; - - uint32_t errlog; - uint32_t amr; - uint32_t rtc; - time_t rtc_ref; - - MemoryRegion this_mem; -}; static bool lasi_chip_mem_valid(void *opaque, hwaddr addr, unsigned size, bool is_write, @@ -82,10 +35,10 @@ static bool lasi_chip_mem_valid(void *opaque, hwaddr addr, case LASI_ICR: case LASI_IAR: - case (LASI_LAN_HPA - LASI_HPA): - case (LASI_LPT_HPA - LASI_HPA): - case (LASI_UART_HPA - LASI_HPA): - case (LASI_RTC_HPA - LASI_HPA): + case LASI_LPT: + case LASI_UART: + case LASI_LAN: + case LASI_RTC: case LASI_PCR ... LASI_AMR: ret = true; @@ -122,12 +75,12 @@ static MemTxResult lasi_chip_read_with_attrs(void *opaque, hwaddr addr, val = s->iar; break; - case (LASI_LAN_HPA - LASI_HPA): - case (LASI_LPT_HPA - LASI_HPA): - case (LASI_UART_HPA - LASI_HPA): + case LASI_LPT: + case LASI_UART: + case LASI_LAN: val = 0; break; - case (LASI_RTC_HPA - LASI_HPA): + case LASI_RTC: val = time(NULL); val += s->rtc_ref; break; @@ -169,10 +122,11 @@ static MemTxResult lasi_chip_write_with_attrs(void *opaque, hwaddr addr, break; case LASI_IMR: s->imr = val; - if (((val & LASI_IRQ_BITS) != val) && (val != 0xffffffff)) + if (((val & LASI_IRQ_BITS) != val) && (val != 0xffffffff)) { qemu_log_mask(LOG_GUEST_ERROR, "LASI: tried to set invalid %lx IMR value.\n", (unsigned long) val); + } break; case LASI_IPR: /* Any write to IPR clears the register. */ @@ -186,22 +140,23 @@ static MemTxResult lasi_chip_write_with_attrs(void *opaque, hwaddr addr, s->iar = val; break; - case (LASI_LAN_HPA - LASI_HPA): - /* XXX: reset LAN card */ - break; - case (LASI_LPT_HPA - LASI_HPA): + case LASI_LPT: /* XXX: reset parallel port */ break; - case (LASI_UART_HPA - LASI_HPA): + case LASI_UART: /* XXX: reset serial port */ break; - case (LASI_RTC_HPA - LASI_HPA): + case LASI_LAN: + /* XXX: reset LAN card */ + break; + case LASI_RTC: s->rtc_ref = val - time(NULL); break; case LASI_PCR: - if (val == 0x02) /* immediately power off */ + if (val == 0x02) { /* immediately power off */ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } break; case LASI_ERRLOG: s->errlog = val; @@ -271,90 +226,42 @@ static void lasi_set_irq(void *opaque, int irq, int level) } } -static int lasi_get_irq(unsigned long hpa) +static void lasi_reset(DeviceState *dev) { - switch (hpa) { - case LASI_HPA: - return 14; - case LASI_UART_HPA: - return 5; - case LASI_LPT_HPA: - return 7; - case LASI_LAN_HPA: - return 8; - case LASI_SCSI_HPA: - return 9; - case LASI_AUDIO_HPA: - return 13; - case LASI_PS2KBD_HPA: - case LASI_PS2MOU_HPA: - return 26; - default: - g_assert_not_reached(); - } -} + LasiState *s = LASI_CHIP(dev); -DeviceState *lasi_init(MemoryRegion *address_space) -{ - DeviceState *dev; - LasiState *s; - - dev = qdev_new(TYPE_LASI_CHIP); - s = LASI_CHIP(dev); - s->iar = CPU_HPA + 3; - - /* Lasi access from main memory. */ - memory_region_init_io(&s->this_mem, OBJECT(s), &lasi_chip_ops, - s, "lasi", 0x100000); - memory_region_add_subregion(address_space, LASI_HPA, &s->this_mem); - - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - /* LAN */ - if (enable_lasi_lan()) { - qemu_irq lan_irq = qemu_allocate_irq(lasi_set_irq, s, - lasi_get_irq(LASI_LAN_HPA)); - lasi_82596_init(address_space, LASI_LAN_HPA, lan_irq); - } - - /* Parallel port */ - qemu_irq lpt_irq = qemu_allocate_irq(lasi_set_irq, s, - lasi_get_irq(LASI_LPT_HPA)); - parallel_mm_init(address_space, LASI_LPT_HPA + 0x800, 0, - lpt_irq, parallel_hds[0]); + s->iar = 0xFFFB0000 + 3; /* CPU_HPA + 3 */ /* Real time clock (RTC), it's only one 32-bit counter @9000 */ - s->rtc = time(NULL); s->rtc_ref = 0; +} - if (serial_hd(1)) { - /* Serial port */ - qemu_irq serial_irq = qemu_allocate_irq(lasi_set_irq, s, - lasi_get_irq(LASI_UART_HPA)); - serial_mm_init(address_space, LASI_UART_HPA + 0x800, 0, - serial_irq, 8000000 / 16, - serial_hd(0), DEVICE_NATIVE_ENDIAN); - } +static void lasi_init(Object *obj) +{ + LasiState *s = LASI_CHIP(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - /* PS/2 Keyboard/Mouse */ - qemu_irq ps2kbd_irq = qemu_allocate_irq(lasi_set_irq, s, - lasi_get_irq(LASI_PS2KBD_HPA)); - lasips2_init(address_space, LASI_PS2KBD_HPA, ps2kbd_irq); + memory_region_init_io(&s->this_mem, OBJECT(s), &lasi_chip_ops, + s, "lasi", 0x100000); - return dev; + sysbus_init_mmio(sbd, &s->this_mem); + + qdev_init_gpio_in(DEVICE(obj), lasi_set_irq, LASI_IRQS); } static void lasi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = lasi_reset; dc->vmsd = &vmstate_lasi; } static const TypeInfo lasi_pcihost_info = { .name = TYPE_LASI_CHIP, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = lasi_init, .instance_size = sizeof(LasiState), .class_init = lasi_class_init, }; diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index ff0156db76..f42c12755a 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -16,7 +16,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "migration/vmstate.h" #include "hw/sysbus.h" #include "hw/irq.h" @@ -30,15 +29,14 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "sysemu/block-backend.h" +#include "sysemu/rtc.h" #include "trace.h" #include "qemu/log.h" /* - * VIAs: There are two in every machine, + * VIAs: There are two in every machine */ -#define VIA_SIZE (0x2000) - /* * Not all of these are true post MacII I think. * CSA: probably the ones CHRP marks as 'unused' change purposes @@ -132,6 +130,10 @@ * On SE/30, vertical sync interrupt enable. * 0=enabled. This vSync interrupt shows up * as a slot $E interrupt. + * On Quadra 800 this bit toggles A/UX mode which + * configures the glue logic to deliver some IRQs + * at different levels compared to a classic + * Mac. */ #define VIA1B_vADBS2 0x20 /* ADB state input bit 1 (unused on IIfx) */ #define VIA1B_vADBS1 0x10 /* ADB state input bit 0 (unused on IIfx) */ @@ -323,10 +325,11 @@ static void via1_sixty_hz(void *opaque) { MOS6522Q800VIA1State *v1s = opaque; MOS6522State *s = MOS6522(v1s); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + qemu_irq irq = qdev_get_gpio_in(DEVICE(s), VIA1_IRQ_60HZ_BIT); - s->ifr |= VIA1_IRQ_60HZ; - mdc->update_irq(s); + /* Negative edge trigger */ + qemu_irq_lower(irq); + qemu_irq_raise(irq); via1_sixty_hz_update(v1s); } @@ -335,50 +338,20 @@ static void via1_one_second(void *opaque) { MOS6522Q800VIA1State *v1s = opaque; MOS6522State *s = MOS6522(v1s); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + qemu_irq irq = qdev_get_gpio_in(DEVICE(s), VIA1_IRQ_ONE_SECOND_BIT); - s->ifr |= VIA1_IRQ_ONE_SECOND; - mdc->update_irq(s); + /* Negative edge trigger */ + qemu_irq_lower(irq); + qemu_irq_raise(irq); via1_one_second_update(v1s); } -static void via1_irq_request(void *opaque, int irq, int level) + +static void pram_update(MOS6522Q800VIA1State *v1s) { - MOS6522Q800VIA1State *v1s = opaque; - MOS6522State *s = MOS6522(v1s); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); - - if (level) { - s->ifr |= 1 << irq; - } else { - s->ifr &= ~(1 << irq); - } - - mdc->update_irq(s); -} - -static void via2_irq_request(void *opaque, int irq, int level) -{ - MOS6522Q800VIA2State *v2s = opaque; - MOS6522State *s = MOS6522(v2s); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); - - if (level) { - s->ifr |= 1 << irq; - } else { - s->ifr &= ~(1 << irq); - } - - mdc->update_irq(s); -} - - -static void pram_update(MacVIAState *m) -{ - if (m->blk) { - if (blk_pwrite(m->blk, 0, m->mos6522_via1.PRAM, - sizeof(m->mos6522_via1.PRAM), 0) < 0) { + if (v1s->blk) { + if (blk_pwrite(v1s->blk, 0, sizeof(v1s->PRAM), v1s->PRAM, 0) < 0) { qemu_log("pram_update: cannot write to file\n"); } } @@ -434,9 +407,8 @@ static int via1_rtc_compact_cmd(uint8_t value) return REG_INVALID; } -static void via1_rtc_update(MacVIAState *m) +static void via1_rtc_update(MOS6522Q800VIA1State *v1s) { - MOS6522Q800VIA1State *v1s = &m->mos6522_via1; MOS6522State *s = MOS6522(v1s); int cmd, sector, addr; uint32_t time; @@ -448,40 +420,40 @@ static void via1_rtc_update(MacVIAState *m) if (s->dirb & VIA1B_vRTCData) { /* send bits to the RTC */ if (!(v1s->last_b & VIA1B_vRTCClk) && (s->b & VIA1B_vRTCClk)) { - m->data_out <<= 1; - m->data_out |= s->b & VIA1B_vRTCData; - m->data_out_cnt++; + v1s->data_out <<= 1; + v1s->data_out |= s->b & VIA1B_vRTCData; + v1s->data_out_cnt++; } - trace_via1_rtc_update_data_out(m->data_out_cnt, m->data_out); + trace_via1_rtc_update_data_out(v1s->data_out_cnt, v1s->data_out); } else { - trace_via1_rtc_update_data_in(m->data_in_cnt, m->data_in); + trace_via1_rtc_update_data_in(v1s->data_in_cnt, v1s->data_in); /* receive bits from the RTC */ if ((v1s->last_b & VIA1B_vRTCClk) && !(s->b & VIA1B_vRTCClk) && - m->data_in_cnt) { + v1s->data_in_cnt) { s->b = (s->b & ~VIA1B_vRTCData) | - ((m->data_in >> 7) & VIA1B_vRTCData); - m->data_in <<= 1; - m->data_in_cnt--; + ((v1s->data_in >> 7) & VIA1B_vRTCData); + v1s->data_in <<= 1; + v1s->data_in_cnt--; } return; } - if (m->data_out_cnt != 8) { + if (v1s->data_out_cnt != 8) { return; } - m->data_out_cnt = 0; + v1s->data_out_cnt = 0; - trace_via1_rtc_internal_status(m->cmd, m->alt, m->data_out); + trace_via1_rtc_internal_status(v1s->cmd, v1s->alt, v1s->data_out); /* first byte: it's a command */ - if (m->cmd == REG_EMPTY) { + if (v1s->cmd == REG_EMPTY) { - cmd = via1_rtc_compact_cmd(m->data_out); + cmd = via1_rtc_compact_cmd(v1s->data_out); trace_via1_rtc_internal_cmd(cmd); if (cmd == REG_INVALID) { - trace_via1_rtc_cmd_invalid(m->data_out); + trace_via1_rtc_cmd_invalid(v1s->data_out); return; } @@ -493,20 +465,20 @@ static void via1_rtc_update(MacVIAState *m) * register 3 is highest-order byte */ - time = m->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time = v1s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / NANOSECONDS_PER_SECOND); trace_via1_rtc_internal_time(time); - m->data_in = (time >> ((cmd & 0x03) << 3)) & 0xff; - m->data_in_cnt = 8; + v1s->data_in = (time >> ((cmd & 0x03) << 3)) & 0xff; + v1s->data_in_cnt = 8; trace_via1_rtc_cmd_seconds_read((cmd & 0x7f) - REG_0, - m->data_in); + v1s->data_in); break; case REG_PRAM_ADDR...REG_PRAM_ADDR_LAST: /* PRAM address 0x00 -> 0x13 */ - m->data_in = v1s->PRAM[(cmd & 0x7f) - REG_PRAM_ADDR]; - m->data_in_cnt = 8; + v1s->data_in = v1s->PRAM[(cmd & 0x7f) - REG_PRAM_ADDR]; + v1s->data_in_cnt = 8; trace_via1_rtc_cmd_pram_read((cmd & 0x7f) - REG_PRAM_ADDR, - m->data_in); + v1s->data_in); break; case REG_PRAM_SECT...REG_PRAM_SECT_LAST: /* @@ -514,7 +486,7 @@ static void via1_rtc_update(MacVIAState *m) * the only two-byte read command */ trace_via1_rtc_internal_set_cmd(cmd); - m->cmd = cmd; + v1s->cmd = cmd; break; default: g_assert_not_reached(); @@ -524,9 +496,9 @@ static void via1_rtc_update(MacVIAState *m) } /* this is a write command, needs a parameter */ - if (cmd == REG_WPROTECT || !m->wprotect) { + if (cmd == REG_WPROTECT || !v1s->wprotect) { trace_via1_rtc_internal_set_cmd(cmd); - m->cmd = cmd; + v1s->cmd = cmd; } else { trace_via1_rtc_internal_ignore_cmd(cmd); } @@ -534,46 +506,47 @@ static void via1_rtc_update(MacVIAState *m) } /* second byte: it's a parameter */ - if (m->alt == REG_EMPTY) { - switch (m->cmd & 0x7f) { + if (v1s->alt == REG_EMPTY) { + switch (v1s->cmd & 0x7f) { case REG_0...REG_3: /* seconds register */ /* FIXME */ - trace_via1_rtc_cmd_seconds_write(m->cmd - REG_0, m->data_out); - m->cmd = REG_EMPTY; + trace_via1_rtc_cmd_seconds_write(v1s->cmd - REG_0, v1s->data_out); + v1s->cmd = REG_EMPTY; break; case REG_TEST: /* device control: nothing to do */ - trace_via1_rtc_cmd_test_write(m->data_out); - m->cmd = REG_EMPTY; + trace_via1_rtc_cmd_test_write(v1s->data_out); + v1s->cmd = REG_EMPTY; break; case REG_WPROTECT: /* Write Protect register */ - trace_via1_rtc_cmd_wprotect_write(m->data_out); - m->wprotect = !!(m->data_out & 0x80); - m->cmd = REG_EMPTY; + trace_via1_rtc_cmd_wprotect_write(v1s->data_out); + v1s->wprotect = !!(v1s->data_out & 0x80); + v1s->cmd = REG_EMPTY; break; case REG_PRAM_ADDR...REG_PRAM_ADDR_LAST: /* PRAM address 0x00 -> 0x13 */ - trace_via1_rtc_cmd_pram_write(m->cmd - REG_PRAM_ADDR, m->data_out); - v1s->PRAM[m->cmd - REG_PRAM_ADDR] = m->data_out; - pram_update(m); - m->cmd = REG_EMPTY; + trace_via1_rtc_cmd_pram_write(v1s->cmd - REG_PRAM_ADDR, + v1s->data_out); + v1s->PRAM[v1s->cmd - REG_PRAM_ADDR] = v1s->data_out; + pram_update(v1s); + v1s->cmd = REG_EMPTY; break; case REG_PRAM_SECT...REG_PRAM_SECT_LAST: - addr = (m->data_out >> 2) & 0x1f; - sector = (m->cmd & 0x7f) - REG_PRAM_SECT; - if (m->cmd & 0x80) { + addr = (v1s->data_out >> 2) & 0x1f; + sector = (v1s->cmd & 0x7f) - REG_PRAM_SECT; + if (v1s->cmd & 0x80) { /* it's a read */ - m->data_in = v1s->PRAM[sector * 32 + addr]; - m->data_in_cnt = 8; + v1s->data_in = v1s->PRAM[sector * 32 + addr]; + v1s->data_in_cnt = 8; trace_via1_rtc_cmd_pram_sect_read(sector, addr, sector * 32 + addr, - m->data_in); - m->cmd = REG_EMPTY; + v1s->data_in); + v1s->cmd = REG_EMPTY; } else { /* it's a write, we need one more parameter */ trace_via1_rtc_internal_set_alt(addr, sector, addr); - m->alt = addr; + v1s->alt = addr; } break; default: @@ -584,22 +557,21 @@ static void via1_rtc_update(MacVIAState *m) } /* third byte: it's the data of a REG_PRAM_SECT write */ - g_assert(REG_PRAM_SECT <= m->cmd && m->cmd <= REG_PRAM_SECT_LAST); - sector = m->cmd - REG_PRAM_SECT; - v1s->PRAM[sector * 32 + m->alt] = m->data_out; - pram_update(m); - trace_via1_rtc_cmd_pram_sect_write(sector, m->alt, sector * 32 + m->alt, - m->data_out); - m->alt = REG_EMPTY; - m->cmd = REG_EMPTY; + g_assert(REG_PRAM_SECT <= v1s->cmd && v1s->cmd <= REG_PRAM_SECT_LAST); + sector = v1s->cmd - REG_PRAM_SECT; + v1s->PRAM[sector * 32 + v1s->alt] = v1s->data_out; + pram_update(v1s); + trace_via1_rtc_cmd_pram_sect_write(sector, v1s->alt, sector * 32 + v1s->alt, + v1s->data_out); + v1s->alt = REG_EMPTY; + v1s->cmd = REG_EMPTY; } static void adb_via_poll(void *opaque) { - MacVIAState *m = opaque; - MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(&m->mos6522_via1); + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); MOS6522State *s = MOS6522(v1s); - ADBBusState *adb_bus = &m->adb_bus; + ADBBusState *adb_bus = &v1s->adb_bus; uint8_t obuf[9]; uint8_t *data = &s->sr; int olen; @@ -611,50 +583,50 @@ static void adb_via_poll(void *opaque) */ adb_autopoll_block(adb_bus); - if (m->adb_data_in_size > 0 && m->adb_data_in_index == 0) { + if (v1s->adb_data_in_size > 0 && v1s->adb_data_in_index == 0) { /* * For older Linux kernels that switch to IDLE mode after sending the * ADB command, detect if there is an existing response and return that - * as a a "fake" autopoll reply or bus timeout accordingly + * as a "fake" autopoll reply or bus timeout accordingly */ - *data = m->adb_data_out[0]; - olen = m->adb_data_in_size; + *data = v1s->adb_data_out[0]; + olen = v1s->adb_data_in_size; s->b &= ~VIA1B_vADBInt; - qemu_irq_raise(m->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); } else { /* * Otherwise poll as normal */ - m->adb_data_in_index = 0; - m->adb_data_out_index = 0; + v1s->adb_data_in_index = 0; + v1s->adb_data_out_index = 0; olen = adb_poll(adb_bus, obuf, adb_bus->autopoll_mask); if (olen > 0) { /* Autopoll response */ *data = obuf[0]; olen--; - memcpy(m->adb_data_in, &obuf[1], olen); - m->adb_data_in_size = olen; + memcpy(v1s->adb_data_in, &obuf[1], olen); + v1s->adb_data_in_size = olen; s->b &= ~VIA1B_vADBInt; - qemu_irq_raise(m->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); } else { - *data = m->adb_autopoll_cmd; + *data = v1s->adb_autopoll_cmd; obuf[0] = 0xff; obuf[1] = 0xff; olen = 2; - memcpy(m->adb_data_in, obuf, olen); - m->adb_data_in_size = olen; + memcpy(v1s->adb_data_in, obuf, olen); + v1s->adb_data_in_size = olen; s->b &= ~VIA1B_vADBInt; - qemu_irq_raise(m->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); } } trace_via1_adb_poll(*data, (s->b & VIA1B_vADBInt) ? "+" : "-", - adb_bus->status, m->adb_data_in_index, olen); + adb_bus->status, v1s->adb_data_in_index, olen); } static int adb_via_send_len(uint8_t data) @@ -687,11 +659,10 @@ static int adb_via_send_len(uint8_t data) } } -static void adb_via_send(MacVIAState *s, int state, uint8_t data) +static void adb_via_send(MOS6522Q800VIA1State *v1s, int state, uint8_t data) { - MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(&s->mos6522_via1); MOS6522State *ms = MOS6522(v1s); - ADBBusState *adb_bus = &s->adb_bus; + ADBBusState *adb_bus = &v1s->adb_bus; uint16_t autopoll_mask; switch (state) { @@ -707,22 +678,22 @@ static void adb_via_send(MacVIAState *s, int state, uint8_t data) ms->b &= ~VIA1B_vADBInt; } else { ms->b |= VIA1B_vADBInt; - s->adb_data_out_index = 0; - s->adb_data_out[s->adb_data_out_index++] = data; + v1s->adb_data_out_index = 0; + v1s->adb_data_out[v1s->adb_data_out_index++] = data; } trace_via1_adb_send(" NEW", data, (ms->b & VIA1B_vADBInt) ? "+" : "-"); - qemu_irq_raise(s->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); break; case ADB_STATE_EVEN: case ADB_STATE_ODD: ms->b |= VIA1B_vADBInt; - s->adb_data_out[s->adb_data_out_index++] = data; + v1s->adb_data_out[v1s->adb_data_out_index++] = data; trace_via1_adb_send(state == ADB_STATE_EVEN ? "EVEN" : " ODD", data, (ms->b & VIA1B_vADBInt) ? "+" : "-"); - qemu_irq_raise(s->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); break; case ADB_STATE_IDLE: @@ -730,40 +701,39 @@ static void adb_via_send(MacVIAState *s, int state, uint8_t data) } /* If the command is complete, execute it */ - if (s->adb_data_out_index == adb_via_send_len(s->adb_data_out[0])) { - s->adb_data_in_size = adb_request(adb_bus, s->adb_data_in, - s->adb_data_out, - s->adb_data_out_index); - s->adb_data_in_index = 0; + if (v1s->adb_data_out_index == adb_via_send_len(v1s->adb_data_out[0])) { + v1s->adb_data_in_size = adb_request(adb_bus, v1s->adb_data_in, + v1s->adb_data_out, + v1s->adb_data_out_index); + v1s->adb_data_in_index = 0; if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { /* * Bus timeout (but allow first EVEN and ODD byte to indicate * timeout via vADBInt and SRQ status) */ - s->adb_data_in[0] = 0xff; - s->adb_data_in[1] = 0xff; - s->adb_data_in_size = 2; + v1s->adb_data_in[0] = 0xff; + v1s->adb_data_in[1] = 0xff; + v1s->adb_data_in_size = 2; } /* * If last command is TALK, store it for use by autopoll and adjust * the autopoll mask accordingly */ - if ((s->adb_data_out[0] & 0xc) == 0xc) { - s->adb_autopoll_cmd = s->adb_data_out[0]; + if ((v1s->adb_data_out[0] & 0xc) == 0xc) { + v1s->adb_autopoll_cmd = v1s->adb_data_out[0]; - autopoll_mask = 1 << (s->adb_autopoll_cmd >> 4); + autopoll_mask = 1 << (v1s->adb_autopoll_cmd >> 4); adb_set_autopoll_mask(adb_bus, autopoll_mask); } } } -static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) +static void adb_via_receive(MOS6522Q800VIA1State *v1s, int state, uint8_t *data) { - MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(&s->mos6522_via1); MOS6522State *ms = MOS6522(v1s); - ADBBusState *adb_bus = &s->adb_bus; + ADBBusState *adb_bus = &v1s->adb_bus; uint16_t pending; switch (state) { @@ -777,16 +747,16 @@ static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) trace_via1_adb_receive("IDLE", *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", adb_bus->status, - s->adb_data_in_index, s->adb_data_in_size); + v1s->adb_data_in_index, v1s->adb_data_in_size); break; case ADB_STATE_EVEN: case ADB_STATE_ODD: - switch (s->adb_data_in_index) { + switch (v1s->adb_data_in_index) { case 0: /* First EVEN byte: vADBInt indicates bus timeout */ - *data = s->adb_data_in[s->adb_data_in_index]; + *data = v1s->adb_data_in[v1s->adb_data_in_index]; if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { ms->b &= ~VIA1B_vADBInt; } else { @@ -795,16 +765,16 @@ static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", - adb_bus->status, s->adb_data_in_index, - s->adb_data_in_size); + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); - s->adb_data_in_index++; + v1s->adb_data_in_index++; break; case 1: /* First ODD byte: vADBInt indicates SRQ */ - *data = s->adb_data_in[s->adb_data_in_index]; - pending = adb_bus->pending & ~(1 << (s->adb_autopoll_cmd >> 4)); + *data = v1s->adb_data_in[v1s->adb_data_in_index]; + pending = adb_bus->pending & ~(1 << (v1s->adb_autopoll_cmd >> 4)); if (pending) { ms->b &= ~VIA1B_vADBInt; } else { @@ -813,10 +783,10 @@ static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", - adb_bus->status, s->adb_data_in_index, - s->adb_data_in_size); + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); - s->adb_data_in_index++; + v1s->adb_data_in_index++; break; default: @@ -826,11 +796,11 @@ static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) * end of the poll reply, so provide these extra bytes below to * keep it happy */ - if (s->adb_data_in_index < s->adb_data_in_size) { + if (v1s->adb_data_in_index < v1s->adb_data_in_size) { /* Next data byte */ - *data = s->adb_data_in[s->adb_data_in_index]; + *data = v1s->adb_data_in[v1s->adb_data_in_index]; ms->b |= VIA1B_vADBInt; - } else if (s->adb_data_in_index == s->adb_data_in_size) { + } else if (v1s->adb_data_in_index == v1s->adb_data_in_size) { if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { /* Bus timeout (no more data) */ *data = 0xff; @@ -849,23 +819,22 @@ static void adb_via_receive(MacVIAState *s, int state, uint8_t *data) trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", - adb_bus->status, s->adb_data_in_index, - s->adb_data_in_size); + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); - if (s->adb_data_in_index <= s->adb_data_in_size) { - s->adb_data_in_index++; + if (v1s->adb_data_in_index <= v1s->adb_data_in_size) { + v1s->adb_data_in_index++; } break; } - qemu_irq_raise(s->adb_data_ready); + qemu_irq_raise(v1s->adb_data_ready); break; } } -static void via1_adb_update(MacVIAState *m) +static void via1_adb_update(MOS6522Q800VIA1State *v1s) { - MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(&m->mos6522_via1); MOS6522State *s = MOS6522(v1s); int oldstate, state; @@ -875,14 +844,29 @@ static void via1_adb_update(MacVIAState *m) if (state != oldstate) { if (s->acr & VIA1ACR_vShiftOut) { /* output mode */ - adb_via_send(m, state, s->sr); + adb_via_send(v1s, state, s->sr); } else { /* input mode */ - adb_via_receive(m, state, &s->sr); + adb_via_receive(v1s, state, &s->sr); } } } +static void via1_auxmode_update(MOS6522Q800VIA1State *v1s) +{ + MOS6522State *s = MOS6522(v1s); + int oldirq, irq; + + oldirq = (v1s->last_b & VIA1B_vMystery) ? 1 : 0; + irq = (s->b & VIA1B_vMystery) ? 1 : 0; + + /* Check to see if the A/UX mode bit has changed */ + if (irq != oldirq) { + trace_via1_auxmode(irq); + qemu_set_irq(v1s->auxmode_irq, irq); + } +} + static uint64_t mos6522_q800_via1_read(void *opaque, hwaddr addr, unsigned size) { MOS6522Q800VIA1State *s = MOS6522_Q800_VIA1(opaque); @@ -896,7 +880,6 @@ static void mos6522_q800_via1_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); - MacVIAState *m = container_of(v1s, MacVIAState, mos6522_via1); MOS6522State *ms = MOS6522(v1s); addr = (addr >> 9) & 0xf; @@ -904,8 +887,9 @@ static void mos6522_q800_via1_write(void *opaque, hwaddr addr, uint64_t val, switch (addr) { case VIA_REG_B: - via1_rtc_update(m); - via1_adb_update(m); + via1_rtc_update(v1s); + via1_adb_update(v1s); + via1_auxmode_update(v1s); v1s->last_b = ms->b; break; @@ -926,9 +910,26 @@ static uint64_t mos6522_q800_via2_read(void *opaque, hwaddr addr, unsigned size) { MOS6522Q800VIA2State *s = MOS6522_Q800_VIA2(opaque); MOS6522State *ms = MOS6522(s); + uint64_t val; addr = (addr >> 9) & 0xf; - return mos6522_read(ms, addr, size); + val = mos6522_read(ms, addr, size); + + switch (addr) { + case VIA_REG_IFR: + /* + * On a Q800 an emulated VIA2 is integrated into the onboard logic. The + * expectation of most OSs is that the DRQ bit is live, rather than + * latched as it would be on a real VIA so do the same here. + * + * Note: DRQ is negative edge triggered + */ + val &= ~VIA2_IRQ_SCSI_DATA; + val |= (~ms->last_irq_levels & VIA2_IRQ_SCSI_DATA); + break; + } + + return val; } static void mos6522_q800_via2_write(void *opaque, hwaddr addr, uint64_t val, @@ -951,196 +952,35 @@ static const MemoryRegionOps mos6522_q800_via2_ops = { }, }; -static void mac_via_reset(DeviceState *dev) +static void via1_postload_update_cb(void *opaque, bool running, RunState state) { - MacVIAState *m = MAC_VIA(dev); - ADBBusState *adb_bus = &m->adb_bus; + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); - adb_set_autopoll_enabled(adb_bus, true); + qemu_del_vm_change_state_handler(v1s->vmstate); + v1s->vmstate = NULL; - m->cmd = REG_EMPTY; - m->alt = REG_EMPTY; + pram_update(v1s); } -static void mac_via_realize(DeviceState *dev, Error **errp) +static int via1_post_load(void *opaque, int version_id) { - MacVIAState *m = MAC_VIA(dev); - MOS6522State *ms; - ADBBusState *adb_bus = &m->adb_bus; - struct tm tm; - int ret; + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); - /* Init VIAs 1 and 2 */ - object_initialize_child(OBJECT(dev), "via1", &m->mos6522_via1, - TYPE_MOS6522_Q800_VIA1); - - object_initialize_child(OBJECT(dev), "via2", &m->mos6522_via2, - TYPE_MOS6522_Q800_VIA2); - - /* Pass through mos6522 output IRQs */ - ms = MOS6522(&m->mos6522_via1); - object_property_add_alias(OBJECT(dev), "irq[0]", OBJECT(ms), - SYSBUS_DEVICE_GPIO_IRQ "[0]"); - ms = MOS6522(&m->mos6522_via2); - object_property_add_alias(OBJECT(dev), "irq[1]", OBJECT(ms), - SYSBUS_DEVICE_GPIO_IRQ "[0]"); - - sysbus_realize(SYS_BUS_DEVICE(&m->mos6522_via1), &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&m->mos6522_via2), &error_abort); - - /* Pass through mos6522 input IRQs */ - qdev_pass_gpios(DEVICE(&m->mos6522_via1), dev, "via1-irq"); - qdev_pass_gpios(DEVICE(&m->mos6522_via2), dev, "via2-irq"); - - /* VIA 1 */ - m->mos6522_via1.one_second_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, - via1_one_second, - &m->mos6522_via1); - via1_one_second_update(&m->mos6522_via1); - m->mos6522_via1.sixty_hz_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - via1_sixty_hz, - &m->mos6522_via1); - via1_sixty_hz_update(&m->mos6522_via1); - - qemu_get_timedate(&tm, 0); - m->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; - - adb_register_autopoll_callback(adb_bus, adb_via_poll, m); - m->adb_data_ready = qdev_get_gpio_in_named(dev, "via1-irq", - VIA1_IRQ_ADB_READY_BIT); - - if (m->blk) { - int64_t len = blk_getlength(m->blk); - if (len < 0) { - error_setg_errno(errp, -len, - "could not get length of backing image"); - return; - } - ret = blk_set_perm(m->blk, - BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, - BLK_PERM_ALL, errp); - if (ret < 0) { - return; - } - - len = blk_pread(m->blk, 0, m->mos6522_via1.PRAM, - sizeof(m->mos6522_via1.PRAM)); - if (len != sizeof(m->mos6522_via1.PRAM)) { - error_setg(errp, "can't read PRAM contents"); - return; - } - } -} - -static void mac_via_init(Object *obj) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - MacVIAState *m = MAC_VIA(obj); - - /* MMIO */ - memory_region_init(&m->mmio, obj, "mac-via", 2 * VIA_SIZE); - sysbus_init_mmio(sbd, &m->mmio); - - memory_region_init_io(&m->via1mem, obj, &mos6522_q800_via1_ops, - &m->mos6522_via1, "via1", VIA_SIZE); - memory_region_add_subregion(&m->mmio, 0x0, &m->via1mem); - - memory_region_init_io(&m->via2mem, obj, &mos6522_q800_via2_ops, - &m->mos6522_via2, "via2", VIA_SIZE); - memory_region_add_subregion(&m->mmio, VIA_SIZE, &m->via2mem); - - /* ADB */ - qbus_create_inplace((BusState *)&m->adb_bus, sizeof(m->adb_bus), - TYPE_ADB_BUS, DEVICE(obj), "adb.0"); -} - -static void postload_update_cb(void *opaque, bool running, RunState state) -{ - MacVIAState *m = MAC_VIA(opaque); - - qemu_del_vm_change_state_handler(m->vmstate); - m->vmstate = NULL; - - pram_update(m); -} - -static int mac_via_post_load(void *opaque, int version_id) -{ - MacVIAState *m = MAC_VIA(opaque); - - if (m->blk) { - m->vmstate = qemu_add_vm_change_state_handler(postload_update_cb, - m); + if (v1s->blk) { + v1s->vmstate = qemu_add_vm_change_state_handler( + via1_postload_update_cb, v1s); } return 0; } -static const VMStateDescription vmstate_mac_via = { - .name = "mac-via", - .version_id = 2, - .minimum_version_id = 2, - .post_load = mac_via_post_load, - .fields = (VMStateField[]) { - /* VIAs */ - VMSTATE_STRUCT(mos6522_via1.parent_obj, MacVIAState, 0, vmstate_mos6522, - MOS6522State), - VMSTATE_UINT8(mos6522_via1.last_b, MacVIAState), - VMSTATE_BUFFER(mos6522_via1.PRAM, MacVIAState), - VMSTATE_TIMER_PTR(mos6522_via1.one_second_timer, MacVIAState), - VMSTATE_INT64(mos6522_via1.next_second, MacVIAState), - VMSTATE_TIMER_PTR(mos6522_via1.sixty_hz_timer, MacVIAState), - VMSTATE_INT64(mos6522_via1.next_sixty_hz, MacVIAState), - VMSTATE_STRUCT(mos6522_via2.parent_obj, MacVIAState, 0, vmstate_mos6522, - MOS6522State), - /* RTC */ - VMSTATE_UINT32(tick_offset, MacVIAState), - VMSTATE_UINT8(data_out, MacVIAState), - VMSTATE_INT32(data_out_cnt, MacVIAState), - VMSTATE_UINT8(data_in, MacVIAState), - VMSTATE_UINT8(data_in_cnt, MacVIAState), - VMSTATE_UINT8(cmd, MacVIAState), - VMSTATE_INT32(wprotect, MacVIAState), - VMSTATE_INT32(alt, MacVIAState), - /* ADB */ - VMSTATE_INT32(adb_data_in_size, MacVIAState), - VMSTATE_INT32(adb_data_in_index, MacVIAState), - VMSTATE_INT32(adb_data_out_index, MacVIAState), - VMSTATE_BUFFER(adb_data_in, MacVIAState), - VMSTATE_BUFFER(adb_data_out, MacVIAState), - VMSTATE_UINT8(adb_autopoll_cmd, MacVIAState), - VMSTATE_END_OF_LIST() - } -}; - -static Property mac_via_properties[] = { - DEFINE_PROP_DRIVE("drive", MacVIAState, blk), - DEFINE_PROP_END_OF_LIST(), -}; - -static void mac_via_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = mac_via_realize; - dc->reset = mac_via_reset; - dc->vmsd = &vmstate_mac_via; - device_class_set_props(dc, mac_via_properties); -} - -static TypeInfo mac_via_info = { - .name = TYPE_MAC_VIA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MacVIAState), - .instance_init = mac_via_init, - .class_init = mac_via_class_init, -}; - /* VIA 1 */ static void mos6522_q800_via1_reset(DeviceState *dev) { - MOS6522State *ms = MOS6522(dev); + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(dev); + MOS6522State *ms = MOS6522(v1s); MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + ADBBusState *adb_bus = &v1s->adb_bus; mdc->parent_reset(dev); @@ -1148,19 +988,122 @@ static void mos6522_q800_via1_reset(DeviceState *dev) ms->timers[1].frequency = VIA_TIMER_FREQ; ms->b = VIA1B_vADB_StateMask | VIA1B_vADBInt | VIA1B_vRTCEnb; + + /* ADB/RTC */ + adb_set_autopoll_enabled(adb_bus, true); + v1s->cmd = REG_EMPTY; + v1s->alt = REG_EMPTY; +} + +static void mos6522_q800_via1_realize(DeviceState *dev, Error **errp) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(dev); + ADBBusState *adb_bus = &v1s->adb_bus; + struct tm tm; + int ret; + + v1s->one_second_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, via1_one_second, + v1s); + via1_one_second_update(v1s); + v1s->sixty_hz_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, via1_sixty_hz, + v1s); + via1_sixty_hz_update(v1s); + + qemu_get_timedate(&tm, 0); + v1s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; + + adb_register_autopoll_callback(adb_bus, adb_via_poll, v1s); + v1s->adb_data_ready = qdev_get_gpio_in(dev, VIA1_IRQ_ADB_READY_BIT); + + if (v1s->blk) { + int64_t len = blk_getlength(v1s->blk); + if (len < 0) { + error_setg_errno(errp, -len, + "could not get length of backing image"); + return; + } + ret = blk_set_perm(v1s->blk, + BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + + ret = blk_pread(v1s->blk, 0, sizeof(v1s->PRAM), v1s->PRAM, 0); + if (ret < 0) { + error_setg(errp, "can't read PRAM contents"); + return; + } + } } static void mos6522_q800_via1_init(Object *obj) { - qdev_init_gpio_in_named(DEVICE(obj), via1_irq_request, "via1-irq", - VIA1_IRQ_NB); + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(v1s); + + memory_region_init_io(&v1s->via_mem, obj, &mos6522_q800_via1_ops, v1s, + "via1", VIA_SIZE); + sysbus_init_mmio(sbd, &v1s->via_mem); + + /* ADB */ + qbus_init((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus), + TYPE_ADB_BUS, DEVICE(v1s), "adb.0"); + + /* A/UX mode */ + qdev_init_gpio_out(DEVICE(obj), &v1s->auxmode_irq, 1); } +static const VMStateDescription vmstate_q800_via1 = { + .name = "q800-via1", + .version_id = 0, + .minimum_version_id = 0, + .post_load = via1_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA1State, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_UINT8(last_b, MOS6522Q800VIA1State), + /* RTC */ + VMSTATE_BUFFER(PRAM, MOS6522Q800VIA1State), + VMSTATE_UINT32(tick_offset, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_out, MOS6522Q800VIA1State), + VMSTATE_INT32(data_out_cnt, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_in, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_in_cnt, MOS6522Q800VIA1State), + VMSTATE_UINT8(cmd, MOS6522Q800VIA1State), + VMSTATE_INT32(wprotect, MOS6522Q800VIA1State), + VMSTATE_INT32(alt, MOS6522Q800VIA1State), + /* ADB */ + VMSTATE_INT32(adb_data_in_size, MOS6522Q800VIA1State), + VMSTATE_INT32(adb_data_in_index, MOS6522Q800VIA1State), + VMSTATE_INT32(adb_data_out_index, MOS6522Q800VIA1State), + VMSTATE_BUFFER(adb_data_in, MOS6522Q800VIA1State), + VMSTATE_BUFFER(adb_data_out, MOS6522Q800VIA1State), + VMSTATE_UINT8(adb_autopoll_cmd, MOS6522Q800VIA1State), + /* Timers */ + VMSTATE_TIMER_PTR(one_second_timer, MOS6522Q800VIA1State), + VMSTATE_INT64(next_second, MOS6522Q800VIA1State), + VMSTATE_TIMER_PTR(sixty_hz_timer, MOS6522Q800VIA1State), + VMSTATE_INT64(next_sixty_hz, MOS6522Q800VIA1State), + VMSTATE_END_OF_LIST() + } +}; + +static Property mos6522_q800_via1_properties[] = { + DEFINE_PROP_DRIVE("drive", MOS6522Q800VIA1State, blk), + DEFINE_PROP_END_OF_LIST(), +}; + static void mos6522_q800_via1_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - dc->reset = mos6522_q800_via1_reset; + dc->realize = mos6522_q800_via1_realize; + device_class_set_parent_reset(dc, mos6522_q800_via1_reset, + &mdc->parent_reset); + dc->vmsd = &vmstate_q800_via1; + device_class_set_props(dc, mos6522_q800_via1_properties); } static const TypeInfo mos6522_q800_via1_type_info = { @@ -1192,20 +1135,59 @@ static void mos6522_q800_via2_reset(DeviceState *dev) ms->dirb = 0; ms->b = 0; + ms->dira = 0; + ms->a = 0x7f; +} + +static void via2_nubus_irq_request(void *opaque, int n, int level) +{ + MOS6522Q800VIA2State *v2s = opaque; + MOS6522State *s = MOS6522(v2s); + qemu_irq irq = qdev_get_gpio_in(DEVICE(s), VIA2_IRQ_NUBUS_BIT); + + if (level) { + /* Port A nubus IRQ inputs are active LOW */ + s->a &= ~(1 << n); + } else { + s->a |= (1 << n); + } + + /* Negative edge trigger */ + qemu_set_irq(irq, !level); } static void mos6522_q800_via2_init(Object *obj) { - qdev_init_gpio_in_named(DEVICE(obj), via2_irq_request, "via2-irq", - VIA2_IRQ_NB); + MOS6522Q800VIA2State *v2s = MOS6522_Q800_VIA2(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(v2s); + + memory_region_init_io(&v2s->via_mem, obj, &mos6522_q800_via2_ops, v2s, + "via2", VIA_SIZE); + sysbus_init_mmio(sbd, &v2s->via_mem); + + qdev_init_gpio_in_named(DEVICE(obj), via2_nubus_irq_request, "nubus-irq", + VIA2_NUBUS_IRQ_NB); } +static const VMStateDescription vmstate_q800_via2 = { + .name = "q800-via2", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA2State, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_END_OF_LIST() + } +}; + static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - dc->reset = mos6522_q800_via2_reset; + device_class_set_parent_reset(dc, mos6522_q800_via2_reset, + &mdc->parent_reset); + dc->vmsd = &vmstate_q800_via2; mdc->portB_write = mos6522_q800_via2_portB_write; } @@ -1221,7 +1203,6 @@ static void mac_via_register_types(void) { type_register_static(&mos6522_q800_via1_type_info); type_register_static(&mos6522_q800_via2_type_info); - type_register_static(&mac_via_info); } type_init(mac_via_register_types); diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index edbd4186b2..0d4c13319a 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -24,8 +24,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "hw/ppc/mac.h" +#include "hw/irq.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "hw/input/adb.h" @@ -34,6 +33,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "sysemu/runstate.h" +#include "sysemu/rtc.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -96,9 +96,9 @@ static void cuda_set_sr_int(void *opaque) CUDAState *s = opaque; MOS6522CUDAState *mcs = &s->mos6522_cuda; MOS6522State *ms = MOS6522(mcs); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + qemu_irq irq = qdev_get_gpio_in(DEVICE(ms), SR_INT_BIT); - mdc->set_sr_int(ms); + qemu_set_irq(irq, 1); } static void cuda_delay_set_sr_int(CUDAState *s) @@ -553,8 +553,8 @@ static void cuda_init(Object *obj) memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000); sysbus_init_mmio(sbd, &s->mem); - qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, - DEVICE(obj), "adb.0"); + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + DEVICE(obj), "adb.0"); } static Property cuda_properties[] = { @@ -605,7 +605,8 @@ static void mos6522_cuda_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - dc->reset = mos6522_cuda_reset; + device_class_set_parent_reset(dc, mos6522_cuda_reset, + &mdc->parent_reset); mdc->portB_write = mos6522_cuda_portB_write; mdc->get_timer1_counter_value = cuda_get_counter_value; mdc->get_timer2_counter_value = cuda_get_counter_value; diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c index b1bcf830c3..c8ac5633b2 100644 --- a/hw/misc/macio/gpio.c +++ b/hw/misc/macio/gpio.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "hw/ppc/mac.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "hw/misc/macio/macio.h" diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index e220f1a927..efcc02609f 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -94,7 +94,7 @@ static void dbdma_cmdptr_load(DBDMA_channel *ch) DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n", ch->regs[DBDMA_CMDPTR_LO]); dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); } static void dbdma_cmdptr_save(DBDMA_channel *ch) @@ -104,7 +104,7 @@ static void dbdma_cmdptr_save(DBDMA_channel *ch) le16_to_cpu(ch->current.xfer_status), le16_to_cpu(ch->current.res_count)); dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); } static void kill_channel(DBDMA_channel *ch) @@ -371,7 +371,8 @@ static void load_word(DBDMA_channel *ch, int key, uint32_t addr, return; } - dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len); + dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len, + MEMTXATTRS_UNSPECIFIED); if (conditional_wait(ch)) goto wait; @@ -403,7 +404,8 @@ static void store_word(DBDMA_channel *ch, int key, uint32_t addr, return; } - dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len); + dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len, + MEMTXATTRS_UNSPECIFIED); if (conditional_wait(ch)) goto wait; diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index 4515296e66..08dbdd7fc0 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -26,7 +26,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" -#include "hw/ppc/mac.h" #include "hw/misc/macio/cuda.h" #include "hw/pci/pci.h" #include "hw/ppc/mac_dbdma.h" @@ -37,8 +36,9 @@ #include "hw/intc/heathrow_pic.h" #include "trace.h" -/* Note: this code is strongly inspirated from the corresponding code - * in PearPC */ +#define ESCC_CLOCK 3686400 + +/* Note: this code is strongly inspired by the corresponding code in PearPC */ /* * The mac-io has two interfaces to the ESCC. One is called "escc-legacy", @@ -226,7 +226,7 @@ static void macio_oldworld_init(Object *obj) object_initialize_child(OBJECT(s), "nvram", &os->nvram, TYPE_MACIO_NVRAM); dev = DEVICE(&os->nvram); - qdev_prop_set_uint32(dev, "size", 0x2000); + qdev_prop_set_uint32(dev, "size", MACIO_NVRAM_SIZE); qdev_prop_set_uint32(dev, "it_shift", 4); for (i = 0; i < 2; i++) { @@ -387,8 +387,8 @@ static void macio_instance_init(Object *obj) memory_region_init(&s->bar, obj, "macio", 0x80000); - qbus_create_inplace(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS, - DEVICE(obj), "macio.0"); + qbus_init(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS, + DEVICE(obj), "macio.0"); object_initialize_child(OBJECT(s), "dbdma", &s->dbdma, TYPE_MAC_DBDMA); diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 71924d4768..70562ed8d0 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -29,8 +29,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "hw/ppc/mac.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "hw/input/adb.h" @@ -41,6 +39,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "sysemu/runstate.h" +#include "sysemu/rtc.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -57,27 +56,14 @@ #define VIA_TIMER_FREQ (4700000 / 6) -static void via_update_irq(PMUState *s) -{ - MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu); - MOS6522State *ms = MOS6522(mps); - - bool new_state = !!(ms->ifr & ms->ier & (SR_INT | T1_INT | T2_INT)); - - if (new_state != s->via_irq_state) { - s->via_irq_state = new_state; - qemu_set_irq(s->via_irq, new_state); - } -} - static void via_set_sr_int(void *opaque) { PMUState *s = opaque; MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu); MOS6522State *ms = MOS6522(mps); - MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + qemu_irq irq = qdev_get_gpio_in(DEVICE(ms), SR_INT_BIT); - mdc->set_sr_int(ms); + qemu_set_irq(irq, 1); } static void pmu_update_extirq(PMUState *s) @@ -718,6 +704,7 @@ static const VMStateDescription vmstate_pmu = { }, .subsections = (const VMStateDescription * []) { &vmstate_pmu_adb, + NULL } }; @@ -754,8 +741,8 @@ static void pmu_realize(DeviceState *dev, Error **errp) timer_mod(s->one_sec_timer, s->one_sec_target); if (s->has_adb) { - qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, - dev, "adb.0"); + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + dev, "adb.0"); adb_register_autopoll_callback(adb_bus, pmu_adb_poll, s); } } @@ -807,28 +794,9 @@ static void mos6522_pmu_portB_write(MOS6522State *s) MOS6522PMUState *mps = container_of(s, MOS6522PMUState, parent_obj); PMUState *ps = container_of(mps, PMUState, mos6522_pmu); - if ((s->pcr & 0xe0) == 0x20 || (s->pcr & 0xe0) == 0x60) { - s->ifr &= ~CB2_INT; - } - s->ifr &= ~CB1_INT; - - via_update_irq(ps); pmu_update(ps); } -static void mos6522_pmu_portA_write(MOS6522State *s) -{ - MOS6522PMUState *mps = container_of(s, MOS6522PMUState, parent_obj); - PMUState *ps = container_of(mps, PMUState, mos6522_pmu); - - if ((s->pcr & 0x0e) == 0x02 || (s->pcr & 0x0e) == 0x06) { - s->ifr &= ~CA2_INT; - } - s->ifr &= ~CA1_INT; - - via_update_irq(ps); -} - static void mos6522_pmu_reset(DeviceState *dev) { MOS6522State *ms = MOS6522(dev); @@ -849,9 +817,9 @@ static void mos6522_pmu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - dc->reset = mos6522_pmu_reset; + device_class_set_parent_reset(dc, mos6522_pmu_reset, + &mdc->parent_reset); mdc->portB_write = mos6522_pmu_portB_write; - mdc->portA_write = mos6522_pmu_portA_write; } static const TypeInfo mos6522_pmu_type_info = { diff --git a/hw/misc/meson.build b/hw/misc/meson.build index a53b849a5a..95268eddc0 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -17,6 +17,8 @@ softmmu_ss.add(when: 'CONFIG_INTEGRATOR_DEBUG', if_true: files('arm_integrator_d softmmu_ss.add(when: 'CONFIG_A9SCU', if_true: files('a9scu.c')) softmmu_ss.add(when: 'CONFIG_ARM11SCU', if_true: files('arm11scu.c')) +softmmu_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_ras.c')) + # Mac devices softmmu_ss.add(when: 'CONFIG_MOS6522', if_true: files('mos6522.c')) @@ -82,7 +84,13 @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( )) softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-xramc.c')) +specific_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) +specific_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) +specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( + 'xlnx-versal-xramc.c', + 'xlnx-versal-pmc-iou-slcr.c', +)) softmmu_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) softmmu_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) softmmu_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) @@ -103,10 +111,13 @@ softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed_hace.c', + 'aspeed_i3c.c', 'aspeed_lpc.c', 'aspeed_scu.c', + 'aspeed_sbc.c', 'aspeed_sdmc.c', - 'aspeed_xdma.c')) + 'aspeed_xdma.c', + 'aspeed_peci.c')) softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-sysreg.c')) softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) @@ -124,3 +135,6 @@ specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cp specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c')) specific_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) + +# HPPA devices +softmmu_ss.add(when: 'CONFIG_LASI', if_true: files('lasi.c')) diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c index 80683fed31..badef5c214 100644 --- a/hw/misc/mips_itu.c +++ b/hw/misc/mips_itu.c @@ -189,7 +189,8 @@ static void wake_blocked_threads(ITCStorageCell *c) c->blocked_threads = 0; } -static void QEMU_NORETURN block_thread_and_exit(ITCStorageCell *c) +static G_NORETURN +void block_thread_and_exit(ITCStorageCell *c) { c->blocked_threads |= 1ULL << current_cpu->cpu_index; current_cpu->halted = 1; diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c index 1c57332b40..fe38c44426 100644 --- a/hw/misc/mos6522.c +++ b/hw/misc/mos6522.c @@ -30,12 +30,21 @@ #include "hw/misc/mos6522.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "monitor/hmp.h" +#include "qapi/type-helpers.h" #include "qemu/timer.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" + +static const char *mos6522_reg_names[MOS6522_NUM_REGS] = { + "ORB", "ORA", "DDRB", "DDRA", "T1CL", "T1CH", "T1LL", "T1LH", + "T2CL", "T2CH", "SR", "ACR", "PCR", "IFR", "IER", "ANH" +}; + /* XXX: implement all timer modes */ static void mos6522_timer1_update(MOS6522State *s, MOS6522Timer *ti, @@ -52,6 +61,73 @@ static void mos6522_update_irq(MOS6522State *s) } } +static void mos6522_set_irq(void *opaque, int n, int level) +{ + MOS6522State *s = MOS6522(opaque); + int last_level = !!(s->last_irq_levels & (1 << n)); + uint8_t last_ifr = s->ifr; + bool positive_edge = true; + int ctrl; + + /* + * SR_INT is managed by mos6522 instances and cleared upon SR + * read. It is only the external CA1/2 and CB1/2 lines that + * are edge-triggered and latched in IFR + */ + if (n != SR_INT_BIT && level == last_level) { + return; + } + + /* Detect negative edge trigger */ + if (last_level == 1 && level == 0) { + positive_edge = false; + } + + switch (n) { + case CA2_INT_BIT: + ctrl = (s->pcr & CA2_CTRL_MASK) >> CA2_CTRL_SHIFT; + if ((positive_edge && (ctrl & C2_POS)) || + (!positive_edge && !(ctrl & C2_POS))) { + s->ifr |= 1 << n; + } + break; + case CA1_INT_BIT: + ctrl = (s->pcr & CA1_CTRL_MASK) >> CA1_CTRL_SHIFT; + if ((positive_edge && (ctrl & C1_POS)) || + (!positive_edge && !(ctrl & C1_POS))) { + s->ifr |= 1 << n; + } + break; + case SR_INT_BIT: + s->ifr |= 1 << n; + break; + case CB2_INT_BIT: + ctrl = (s->pcr & CB2_CTRL_MASK) >> CB2_CTRL_SHIFT; + if ((positive_edge && (ctrl & C2_POS)) || + (!positive_edge && !(ctrl & C2_POS))) { + s->ifr |= 1 << n; + } + break; + case CB1_INT_BIT: + ctrl = (s->pcr & CB1_CTRL_MASK) >> CB1_CTRL_SHIFT; + if ((positive_edge && (ctrl & C1_POS)) || + (!positive_edge && !(ctrl & C1_POS))) { + s->ifr |= 1 << n; + } + break; + } + + if (s->ifr != last_ifr) { + mos6522_update_irq(s); + } + + if (level) { + s->last_irq_levels |= 1 << n; + } else { + s->last_irq_levels &= ~(1 << n); + } +} + static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti) { MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); @@ -195,13 +271,6 @@ static void mos6522_timer2(void *opaque) mos6522_update_irq(s); } -static void mos6522_set_sr_int(MOS6522State *s) -{ - trace_mos6522_set_sr_int(); - s->ifr |= SR_INT; - mos6522_update_irq(s); -} - static uint64_t mos6522_get_counter_value(MOS6522State *s, MOS6522Timer *ti) { return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ti->load_time, @@ -229,6 +298,7 @@ uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size) { MOS6522State *s = opaque; uint32_t val; + int ctrl; int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (now >= s->timers[0].next_irq_time) { @@ -242,12 +312,24 @@ uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size) switch (addr) { case VIA_REG_B: val = s->b; + ctrl = (s->pcr & CB2_CTRL_MASK) >> CB2_CTRL_SHIFT; + if (!(ctrl & C2_IND)) { + s->ifr &= ~CB2_INT; + } + s->ifr &= ~CB1_INT; + mos6522_update_irq(s); break; case VIA_REG_A: qemu_log_mask(LOG_UNIMP, "Read access to register A with handshake"); /* fall through */ case VIA_REG_ANH: val = s->a; + ctrl = (s->pcr & CA2_CTRL_MASK) >> CA2_CTRL_SHIFT; + if (!(ctrl & C2_IND)) { + s->ifr &= ~CA2_INT; + } + s->ifr &= ~CA1_INT; + mos6522_update_irq(s); break; case VIA_REG_DIRB: val = s->dirb; @@ -304,7 +386,7 @@ uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size) } if (addr != VIA_REG_IFR || val != 0) { - trace_mos6522_read(addr, val); + trace_mos6522_read(addr, mos6522_reg_names[addr], val); } return val; @@ -314,13 +396,20 @@ void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { MOS6522State *s = opaque; MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + int ctrl; - trace_mos6522_write(addr, val); + trace_mos6522_write(addr, mos6522_reg_names[addr], val); switch (addr) { case VIA_REG_B: s->b = (s->b & ~s->dirb) | (val & s->dirb); mdc->portB_write(s); + ctrl = (s->pcr & CB2_CTRL_MASK) >> CB2_CTRL_SHIFT; + if (!(ctrl & C2_IND)) { + s->ifr &= ~CB2_INT; + } + s->ifr &= ~CB1_INT; + mos6522_update_irq(s); break; case VIA_REG_A: qemu_log_mask(LOG_UNIMP, "Write access to register A with handshake"); @@ -328,6 +417,12 @@ void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) case VIA_REG_ANH: s->a = (s->a & ~s->dira) | (val & s->dira); mdc->portA_write(s); + ctrl = (s->pcr & CA2_CTRL_MASK) >> CA2_CTRL_SHIFT; + if (!(ctrl & C2_IND)) { + s->ifr &= ~CA2_INT; + } + s->ifr &= ~CA1_INT; + mos6522_update_irq(s); break; case VIA_REG_DIRB: s->dirb = val; @@ -403,6 +498,106 @@ void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) } } +static int qmp_x_query_via_foreach(Object *obj, void *opaque) +{ + GString *buf = opaque; + + if (object_dynamic_cast(obj, TYPE_MOS6522)) { + MOS6522State *s = MOS6522(obj); + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint16_t t1counter = get_counter(s, &s->timers[0]); + uint16_t t2counter = get_counter(s, &s->timers[1]); + + g_string_append_printf(buf, "%s:\n", object_get_typename(obj)); + + g_string_append_printf(buf, " Registers:\n"); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[0], s->b); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[1], s->a); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[2], s->dirb); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[3], s->dira); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[4], t1counter & 0xff); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[5], t1counter >> 8); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[6], + s->timers[0].latch & 0xff); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[7], + s->timers[0].latch >> 8); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[8], t2counter & 0xff); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[9], t2counter >> 8); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[10], s->sr); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[11], s->acr); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[12], s->pcr); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[13], s->ifr); + g_string_append_printf(buf, " %-*s: 0x%x\n", 4, + mos6522_reg_names[14], s->ier); + + g_string_append_printf(buf, " Timers:\n"); + g_string_append_printf(buf, " Using current time now(ns)=%"PRId64 + "\n", now); + g_string_append_printf(buf, " T1 freq(hz)=%"PRId64 + " mode=%s" + " counter=0x%x" + " latch=0x%x\n" + " load_time(ns)=%"PRId64 + " next_irq_time(ns)=%"PRId64 "\n", + s->timers[0].frequency, + ((s->acr & T1MODE) == T1MODE_CONT) ? "continuous" + : "one-shot", + t1counter, + s->timers[0].latch, + s->timers[0].load_time, + get_next_irq_time(s, &s->timers[0], now)); + g_string_append_printf(buf, " T2 freq(hz)=%"PRId64 + " mode=%s" + " counter=0x%x" + " latch=0x%x\n" + " load_time(ns)=%"PRId64 + " next_irq_time(ns)=%"PRId64 "\n", + s->timers[1].frequency, + "one-shot", + t2counter, + s->timers[1].latch, + s->timers[1].load_time, + get_next_irq_time(s, &s->timers[1], now)); + } + + return 0; +} + +static HumanReadableText *qmp_x_query_via(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + object_child_foreach_recursive(object_get_root(), + qmp_x_query_via_foreach, buf); + + return human_readable_text_from_str(buf); +} + +void hmp_info_via(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + g_autoptr(HumanReadableText) info = qmp_x_query_via(&err); + + if (hmp_handle_error(mon, err)) { + return; + } + monitor_puts(mon, info->human_readable_text); +} + static const MemoryRegionOps mos6522_ops = { .read = mos6522_read, .write = mos6522_write, @@ -429,8 +624,8 @@ static const VMStateDescription vmstate_mos6522_timer = { const VMStateDescription vmstate_mos6522 = { .name = "mos6522", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT8(a, MOS6522State), VMSTATE_UINT8(b, MOS6522State), @@ -441,6 +636,7 @@ const VMStateDescription vmstate_mos6522 = { VMSTATE_UINT8(pcr, MOS6522State), VMSTATE_UINT8(ifr, MOS6522State), VMSTATE_UINT8(ier, MOS6522State), + VMSTATE_UINT8(last_irq_levels, MOS6522State), VMSTATE_STRUCT_ARRAY(timers, MOS6522State, 2, 0, vmstate_mos6522_timer, MOS6522Timer), VMSTATE_END_OF_LIST() @@ -478,7 +674,8 @@ static void mos6522_init(Object *obj) MOS6522State *s = MOS6522(obj); int i; - memory_region_init_io(&s->mem, obj, &mos6522_ops, s, "mos6522", 0x10); + memory_region_init_io(&s->mem, obj, &mos6522_ops, s, "mos6522", + MOS6522_NUM_REGS); sysbus_init_mmio(sbd, &s->mem); sysbus_init_irq(sbd, &s->irq); @@ -488,6 +685,8 @@ static void mos6522_init(Object *obj) s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, mos6522_timer1, s); s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, mos6522_timer2, s); + + qdev_init_gpio_in(DEVICE(obj), mos6522_set_irq, VIA_NUM_INTS); } static void mos6522_finalize(Object *obj) @@ -511,11 +710,8 @@ static void mos6522_class_init(ObjectClass *oc, void *data) dc->reset = mos6522_reset; dc->vmsd = &vmstate_mos6522; device_class_set_props(dc, mos6522_properties); - mdc->parent_reset = dc->reset; - mdc->set_sr_int = mos6522_set_sr_int; mdc->portB_write = mos6522_portB_write; mdc->portA_write = mos6522_portA_write; - mdc->update_irq = mos6522_update_irq; mdc->get_timer1_counter_value = mos6522_get_counter_value; mdc->get_timer2_counter_value = mos6522_get_counter_value; mdc->get_timer1_load_time = mos6522_get_load_time; diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c index 0b61070c52..bc2b879feb 100644 --- a/hw/misc/npcm7xx_clk.c +++ b/hw/misc/npcm7xx_clk.c @@ -612,8 +612,8 @@ static void npcm7xx_clk_sel_init(Object *obj) NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj); for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) { - sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), - g_strdup_printf("clock-in[%d]", i), + g_autofree char *s = g_strdup_printf("clock-in[%d]", i); + sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), s, npcm7xx_clk_update_sel_cb, sel, ClockUpdate); } sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out"); diff --git a/hw/misc/pca9552.c b/hw/misc/pca9552.c index b7686e27d7..fff19e369a 100644 --- a/hw/misc/pca9552.c +++ b/hw/misc/pca9552.c @@ -272,7 +272,7 @@ static void pca955x_get_led(Object *obj, Visitor *v, const char *name, * reading the INPUTx reg */ reg = PCA9552_LS0 + led / 4; - state = (pca955x_read(s, reg) >> (led % 8)) & 0x3; + state = (pca955x_read(s, reg) >> ((led % 4) * 2)) & 0x3; visit_type_str(v, name, (char **)&led_state[state], errp); } diff --git a/hw/misc/pvpanic-isa.c b/hw/misc/pvpanic-isa.c index 7b66d58acc..ccec50f61b 100644 --- a/hw/misc/pvpanic-isa.c +++ b/hw/misc/pvpanic-isa.c @@ -21,6 +21,8 @@ #include "hw/misc/pvpanic.h" #include "qom/object.h" #include "hw/isa/isa.h" +#include "standard-headers/linux/pvpanic.h" +#include "hw/acpi/acpi_aml_interface.h" OBJECT_DECLARE_SIMPLE_TYPE(PVPanicISAState, PVPANIC_ISA_DEVICE) @@ -62,27 +64,69 @@ static void pvpanic_isa_realizefn(DeviceState *dev, Error **errp) isa_register_ioport(d, &ps->mr, s->ioport); } +static void build_pvpanic_isa_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + Aml *crs, *field, *method; + PVPanicISAState *s = PVPANIC_ISA_DEVICE(adev); + Aml *dev = aml_device("PEVT"); + + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); + + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, s->ioport, s->ioport, 1, 1) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, + aml_int(s->ioport), 1)); + field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PEPT", 8)); + aml_append(dev, field); + + /* device present, functioning, decoding, shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); + + method = aml_method("RDPT", 0, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); + aml_append(method, aml_return(aml_local(0))); + aml_append(dev, method); + + method = aml_method("WRPT", 1, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); + aml_append(dev, method); + + aml_append(scope, dev); +} + static Property pvpanic_isa_properties[] = { DEFINE_PROP_UINT16(PVPANIC_IOPORT_PROP, PVPanicISAState, ioport, 0x505), - DEFINE_PROP_UINT8("events", PVPanicISAState, pvpanic.events, PVPANIC_PANICKED | PVPANIC_CRASHLOADED), + DEFINE_PROP_UINT8("events", PVPanicISAState, pvpanic.events, + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), DEFINE_PROP_END_OF_LIST(), }; static void pvpanic_isa_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = pvpanic_isa_realizefn; device_class_set_props(dc, pvpanic_isa_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); + adevc->build_dev_aml = build_pvpanic_isa_aml; } -static TypeInfo pvpanic_isa_info = { +static const TypeInfo pvpanic_isa_info = { .name = TYPE_PVPANIC_ISA_DEVICE, .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(PVPanicISAState), .instance_init = pvpanic_isa_initfn, .class_init = pvpanic_isa_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void pvpanic_register_types(void) diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c index af8cbe2830..99cf7e2041 100644 --- a/hw/misc/pvpanic-pci.c +++ b/hw/misc/pvpanic-pci.c @@ -21,6 +21,7 @@ #include "hw/misc/pvpanic.h" #include "qom/object.h" #include "hw/pci/pci.h" +#include "standard-headers/linux/pvpanic.h" OBJECT_DECLARE_SIMPLE_TYPE(PVPanicPCIState, PVPANIC_PCI_DEVICE) @@ -53,7 +54,8 @@ static void pvpanic_pci_realizefn(PCIDevice *dev, Error **errp) } static Property pvpanic_pci_properties[] = { - DEFINE_PROP_UINT8("events", PVPanicPCIState, pvpanic.events, PVPANIC_PANICKED | PVPANIC_CRASHLOADED), + DEFINE_PROP_UINT8("events", PVPanicPCIState, pvpanic.events, + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), DEFINE_PROP_END_OF_LIST(), }; @@ -74,7 +76,7 @@ static void pvpanic_pci_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); } -static TypeInfo pvpanic_pci_info = { +static const TypeInfo pvpanic_pci_info = { .name = TYPE_PVPANIC_PCI_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PVPanicPCIState), diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c index e2cb4a5d28..1540e9091a 100644 --- a/hw/misc/pvpanic.c +++ b/hw/misc/pvpanic.c @@ -21,12 +21,13 @@ #include "hw/qdev-properties.h" #include "hw/misc/pvpanic.h" #include "qom/object.h" +#include "standard-headers/linux/pvpanic.h" static void handle_event(int event) { static bool logged; - if (event & ~(PVPANIC_PANICKED | PVPANIC_CRASHLOADED) && !logged) { + if (event & ~(PVPANIC_PANICKED | PVPANIC_CRASH_LOADED) && !logged) { qemu_log_mask(LOG_GUEST_ERROR, "pvpanic: unknown event %#x.\n", event); logged = true; } @@ -36,7 +37,7 @@ static void handle_event(int event) return; } - if (event & PVPANIC_CRASHLOADED) { + if (event & PVPANIC_CRASH_LOADED) { qemu_system_guest_crashloaded(NULL); return; } diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c index 83020fe9ac..8d939fe31b 100644 --- a/hw/misc/sbsa_ec.c +++ b/hw/misc/sbsa_ec.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/log.h" #include "hw/sysbus.h" #include "sysemu/runstate.h" diff --git a/hw/misc/sga.c b/hw/misc/sga.c index 4dbe6d78f9..1d04672b01 100644 --- a/hw/misc/sga.c +++ b/hw/misc/sga.c @@ -30,6 +30,7 @@ #include "hw/loader.h" #include "qemu/module.h" #include "qom/object.h" +#include "qemu/error-report.h" #define SGABIOS_FILENAME "sgabios.bin" @@ -42,6 +43,7 @@ struct ISASGAState { static void sga_realizefn(DeviceState *dev, Error **errp) { + warn_report("-device sga is deprecated, use -machine graphics=off"); rom_add_vga(SGABIOS_FILENAME); } diff --git a/hw/misc/sifive_u_otp.c b/hw/misc/sifive_u_otp.c index 18aa0bd55d..6d7fdb040a 100644 --- a/hw/misc/sifive_u_otp.c +++ b/hw/misc/sifive_u_otp.c @@ -64,8 +64,8 @@ static uint64_t sifive_u_otp_read(void *opaque, hwaddr addr, unsigned int size) if (s->blk) { int32_t buf; - if (blk_pread(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, &buf, - SIFIVE_U_OTP_FUSE_WORD) < 0) { + if (blk_pread(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, + SIFIVE_U_OTP_FUSE_WORD, &buf, 0) < 0) { error_report("read error index<%d>", s->pa); return 0xff; } @@ -167,8 +167,8 @@ static void sifive_u_otp_write(void *opaque, hwaddr addr, /* write to backend */ if (s->blk) { if (blk_pwrite(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, - &s->fuse[s->pa], SIFIVE_U_OTP_FUSE_WORD, - 0) < 0) { + SIFIVE_U_OTP_FUSE_WORD, &s->fuse[s->pa], 0) + < 0) { error_report("write error index<%d>", s->pa); } } @@ -209,7 +209,14 @@ static void sifive_u_otp_realize(DeviceState *dev, Error **errp) TYPE_SIFIVE_U_OTP, SIFIVE_U_OTP_REG_SIZE); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); - dinfo = drive_get_next(IF_NONE); + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!dinfo) { + dinfo = drive_get(IF_NONE, 0, 0); + if (dinfo) { + warn_report("using \"-drive if=none\" for the OTP is deprecated, " + "use \"-drive if=pflash\" instead."); + } + } if (dinfo) { int ret; uint64_t perm; @@ -233,16 +240,12 @@ static void sifive_u_otp_realize(DeviceState *dev, Error **errp) return; } - if (blk_pread(s->blk, 0, s->fuse, filesize) != filesize) { + if (blk_pread(s->blk, 0, filesize, s->fuse, 0) < 0) { error_setg(errp, "failed to read the initial flash content"); + return; } } } -} - -static void sifive_u_otp_reset(DeviceState *dev) -{ - SiFiveUOTPState *s = SIFIVE_U_OTP(dev); /* Initialize all fuses' initial value to 0xFFs */ memset(s->fuse, 0xff, sizeof(s->fuse)); @@ -258,14 +261,16 @@ static void sifive_u_otp_reset(DeviceState *dev) serial_data = s->serial; if (blk_pwrite(s->blk, index * SIFIVE_U_OTP_FUSE_WORD, - &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0) < 0) { - error_report("write error index<%d>", index); + SIFIVE_U_OTP_FUSE_WORD, &serial_data, 0) < 0) { + error_setg(errp, "failed to write index<%d>", index); + return; } serial_data = ~(s->serial); if (blk_pwrite(s->blk, (index + 1) * SIFIVE_U_OTP_FUSE_WORD, - &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0) < 0) { - error_report("write error index<%d>", index + 1); + SIFIVE_U_OTP_FUSE_WORD, &serial_data, 0) < 0) { + error_setg(errp, "failed to write index<%d>", index + 1); + return; } } @@ -279,7 +284,6 @@ static void sifive_u_otp_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, sifive_u_otp_properties); dc->realize = sifive_u_otp_realize; - dc->reset = sifive_u_otp_reset; } static const TypeInfo sifive_u_otp_info = { diff --git a/hw/misc/trace-events b/hw/misc/trace-events index ede413965b..c18bc0605e 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -1,7 +1,7 @@ # See docs/devel/tracing.rst for syntax documentation. # allwinner-cpucfg.c -allwinner_cpucfg_cpu_reset(uint8_t cpu_id, uint32_t reset_addr) "id %u, reset_addr 0x%" PRIu32 +allwinner_cpucfg_cpu_reset(uint8_t cpu_id, uint32_t reset_addr) "id %u, reset_addr 0x%" PRIx32 allwinner_cpucfg_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 allwinner_cpucfg_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 @@ -69,6 +69,7 @@ slavio_led_mem_readw(uint32_t ret) "Read diagnostic LED 0x%04x" # aspeed_scu.c aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 +aspeed_scu_read(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 # mps2-scc.c mps2_scc_read(uint64_t offset, uint64_t data, unsigned size) "MPS2 SCC read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" @@ -93,10 +94,10 @@ imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08 # mos6522.c mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" -mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRId64 " delta_next=0x%"PRId64 +mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRIx64 " delta_next=0x%"PRIx64 mos6522_set_sr_int(void) "set sr_int" -mos6522_write(uint64_t addr, uint64_t val) "reg=0x%"PRIx64 " val=0x%"PRIx64 -mos6522_read(uint64_t addr, unsigned val) "reg=0x%"PRIx64 " val=0x%x" +mos6522_write(uint64_t addr, const char *name, uint64_t val) "reg=0x%"PRIx64 " [%s] val=0x%"PRIx64 +mos6522_read(uint64_t addr, const char *name, unsigned val) "reg=0x%"PRIx64 " [%s] val=0x%x" # npcm7xx_clk.c npcm7xx_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 @@ -199,6 +200,21 @@ armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU wri # aspeed_xdma.c aspeed_xdma_write(uint64_t offset, uint64_t data) "XDMA write: offset 0x%" PRIx64 " data 0x%" PRIx64 +# aspeed_i3c.c +aspeed_i3c_read(uint64_t offset, uint64_t data) "I3C read: offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_i3c_write(uint64_t offset, uint64_t data) "I3C write: offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_i3c_device_read(uint32_t deviceid, uint64_t offset, uint64_t data) "I3C Dev[%u] read: offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_i3c_device_write(uint32_t deviceid, uint64_t offset, uint64_t data) "I3C Dev[%u] write: offset 0x%" PRIx64 " data 0x%" PRIx64 + +# aspeed_sdmc.c +aspeed_sdmc_write(uint64_t reg, uint64_t data) "reg @0x%" PRIx64 " data: 0x%" PRIx64 +aspeed_sdmc_read(uint64_t reg, uint64_t data) "reg @0x%" PRIx64 " data: 0x%" PRIx64 + +# aspeed_peci.c +aspeed_peci_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_peci_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_peci_raise_interrupt(uint32_t ctrl, uint32_t status) "ctrl 0x%" PRIx32 " status 0x%" PRIx32 + # bcm2835_property.c bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu" @@ -228,10 +244,11 @@ via1_rtc_cmd_pram_sect_write(int sector, int offset, int addr, int value) "secto via1_adb_send(const char *state, uint8_t data, const char *vadbint) "state %s data=0x%02x vADBInt=%s" via1_adb_receive(const char *state, uint8_t data, const char *vadbint, int status, int index, int size) "state %s data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" via1_adb_poll(uint8_t data, const char *vadbint, int status, int index, int size) "data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" +via1_auxmode(int mode) "setting auxmode to %d" # grlib_ahb_apb_pnp.c -grlib_ahb_pnp_read(uint64_t addr, uint32_t value) "AHB PnP read addr:0x%03"PRIx64" data:0x%08x" -grlib_apb_pnp_read(uint64_t addr, uint32_t value) "APB PnP read addr:0x%03"PRIx64" data:0x%08x" +grlib_ahb_pnp_read(uint64_t addr, unsigned size, uint32_t value) "AHB PnP read addr:0x%03"PRIx64" size:%u data:0x%08x" +grlib_apb_pnp_read(uint64_t addr, unsigned size, uint32_t value) "APB PnP read addr:0x%03"PRIx64" size:%u data:0x%08x" # led.c led_set_intensity(const char *color, const char *desc, uint8_t intensity_percent) "LED desc:'%s' color:%s intensity: %u%%" @@ -252,3 +269,8 @@ virt_ctrl_write(void *dev, unsigned int addr, unsigned int size, uint64_t value) virt_ctrl_reset(void *dev) "ctrl: %p" virt_ctrl_realize(void *dev) "ctrl: %p" virt_ctrl_instance_init(void *dev) "ctrl: %p" + +# lasi.c +lasi_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" +lasi_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" +lasi_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index 3552d7a09a..e75d1e7e17 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c new file mode 100644 index 0000000000..767106b7a3 --- /dev/null +++ b/hw/misc/xlnx-versal-crl.c @@ -0,0 +1,421 @@ +/* + * QEMU model of the Clock-Reset-LPD (CRL). + * + * Copyright (c) 2022 Advanced Micro Devices, Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Written by Edgar E. Iglesias + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/bitops.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/register.h" +#include "hw/resettable.h" + +#include "target/arm/arm-powerctl.h" +#include "hw/misc/xlnx-versal-crl.h" + +#ifndef XLNX_VERSAL_CRL_ERR_DEBUG +#define XLNX_VERSAL_CRL_ERR_DEBUG 0 +#endif + +static void crl_update_irq(XlnxVersalCRL *s) +{ + bool pending = s->regs[R_IR_STATUS] & ~s->regs[R_IR_MASK]; + qemu_set_irq(s->irq, pending); +} + +static void crl_status_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + crl_update_irq(s); +} + +static uint64_t crl_enable_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] &= ~val; + crl_update_irq(s); + return 0; +} + +static uint64_t crl_disable_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] |= val; + crl_update_irq(s); + return 0; +} + +static void crl_reset_dev(XlnxVersalCRL *s, DeviceState *dev, + bool rst_old, bool rst_new) +{ + device_cold_reset(dev); +} + +static void crl_reset_cpu(XlnxVersalCRL *s, ARMCPU *armcpu, + bool rst_old, bool rst_new) +{ + if (rst_new) { + arm_set_cpu_off(armcpu->mp_affinity); + } else { + arm_set_cpu_on_and_reset(armcpu->mp_affinity); + } +} + +#define REGFIELD_RESET(type, s, reg, f, new_val, dev) { \ + bool old_f = ARRAY_FIELD_EX32((s)->regs, reg, f); \ + bool new_f = FIELD_EX32(new_val, reg, f); \ + \ + /* Detect edges. */ \ + if (dev && old_f != new_f) { \ + crl_reset_ ## type(s, dev, old_f, new_f); \ + } \ +} + +static uint64_t crl_rst_r5_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(cpu, s, RST_CPU_R5, RESET_CPU0, val64, s->cfg.cpu_r5[0]); + REGFIELD_RESET(cpu, s, RST_CPU_R5, RESET_CPU1, val64, s->cfg.cpu_r5[1]); + return val64; +} + +static uint64_t crl_rst_adma_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + int i; + + /* A single register fans out to all ADMA reset inputs. */ + for (i = 0; i < ARRAY_SIZE(s->cfg.adma); i++) { + REGFIELD_RESET(dev, s, RST_ADMA, RESET, val64, s->cfg.adma[i]); + } + return val64; +} + +static uint64_t crl_rst_uart0_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(dev, s, RST_UART0, RESET, val64, s->cfg.uart[0]); + return val64; +} + +static uint64_t crl_rst_uart1_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(dev, s, RST_UART1, RESET, val64, s->cfg.uart[1]); + return val64; +} + +static uint64_t crl_rst_gem0_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(dev, s, RST_GEM0, RESET, val64, s->cfg.gem[0]); + return val64; +} + +static uint64_t crl_rst_gem1_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(dev, s, RST_GEM1, RESET, val64, s->cfg.gem[1]); + return val64; +} + +static uint64_t crl_rst_usb_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + + REGFIELD_RESET(dev, s, RST_USB0, RESET, val64, s->cfg.usb); + return val64; +} + +static const RegisterAccessInfo crl_regs_info[] = { + { .name = "ERR_CTRL", .addr = A_ERR_CTRL, + },{ .name = "IR_STATUS", .addr = A_IR_STATUS, + .w1c = 0x1, + .post_write = crl_status_postw, + },{ .name = "IR_MASK", .addr = A_IR_MASK, + .reset = 0x1, + .ro = 0x1, + },{ .name = "IR_ENABLE", .addr = A_IR_ENABLE, + .pre_write = crl_enable_prew, + },{ .name = "IR_DISABLE", .addr = A_IR_DISABLE, + .pre_write = crl_disable_prew, + },{ .name = "WPROT", .addr = A_WPROT, + },{ .name = "PLL_CLK_OTHER_DMN", .addr = A_PLL_CLK_OTHER_DMN, + .reset = 0x1, + .rsvd = 0xe, + },{ .name = "RPLL_CTRL", .addr = A_RPLL_CTRL, + .reset = 0x24809, + .rsvd = 0xf88c00f6, + },{ .name = "RPLL_CFG", .addr = A_RPLL_CFG, + .reset = 0x2000000, + .rsvd = 0x1801210, + },{ .name = "RPLL_FRAC_CFG", .addr = A_RPLL_FRAC_CFG, + .rsvd = 0x7e330000, + },{ .name = "PLL_STATUS", .addr = A_PLL_STATUS, + .reset = R_PLL_STATUS_RPLL_STABLE_MASK | + R_PLL_STATUS_RPLL_LOCK_MASK, + .rsvd = 0xfa, + .ro = 0x5, + },{ .name = "RPLL_TO_XPD_CTRL", .addr = A_RPLL_TO_XPD_CTRL, + .reset = 0x2000100, + .rsvd = 0xfdfc00ff, + },{ .name = "LPD_TOP_SWITCH_CTRL", .addr = A_LPD_TOP_SWITCH_CTRL, + .reset = 0x6000300, + .rsvd = 0xf9fc00f8, + },{ .name = "LPD_LSBUS_CTRL", .addr = A_LPD_LSBUS_CTRL, + .reset = 0x2000800, + .rsvd = 0xfdfc00f8, + },{ .name = "CPU_R5_CTRL", .addr = A_CPU_R5_CTRL, + .reset = 0xe000300, + .rsvd = 0xe1fc00f8, + },{ .name = "IOU_SWITCH_CTRL", .addr = A_IOU_SWITCH_CTRL, + .reset = 0x2000500, + .rsvd = 0xfdfc00f8, + },{ .name = "GEM0_REF_CTRL", .addr = A_GEM0_REF_CTRL, + .reset = 0xe000a00, + .rsvd = 0xf1fc00f8, + },{ .name = "GEM1_REF_CTRL", .addr = A_GEM1_REF_CTRL, + .reset = 0xe000a00, + .rsvd = 0xf1fc00f8, + },{ .name = "GEM_TSU_REF_CTRL", .addr = A_GEM_TSU_REF_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "USB0_BUS_REF_CTRL", .addr = A_USB0_BUS_REF_CTRL, + .reset = 0x2001900, + .rsvd = 0xfdfc00f8, + },{ .name = "UART0_REF_CTRL", .addr = A_UART0_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "UART1_REF_CTRL", .addr = A_UART1_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "SPI0_REF_CTRL", .addr = A_SPI0_REF_CTRL, + .reset = 0x600, + .rsvd = 0xfdfc00f8, + },{ .name = "SPI1_REF_CTRL", .addr = A_SPI1_REF_CTRL, + .reset = 0x600, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN0_REF_CTRL", .addr = A_CAN0_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN1_REF_CTRL", .addr = A_CAN1_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "I2C0_REF_CTRL", .addr = A_I2C0_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "I2C1_REF_CTRL", .addr = A_I2C1_REF_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "DBG_LPD_CTRL", .addr = A_DBG_LPD_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "TIMESTAMP_REF_CTRL", .addr = A_TIMESTAMP_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "CRL_SAFETY_CHK", .addr = A_CRL_SAFETY_CHK, + },{ .name = "PSM_REF_CTRL", .addr = A_PSM_REF_CTRL, + .reset = 0xf04, + .rsvd = 0xfffc00f8, + },{ .name = "DBG_TSTMP_CTRL", .addr = A_DBG_TSTMP_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "CPM_TOPSW_REF_CTRL", .addr = A_CPM_TOPSW_REF_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "USB3_DUAL_REF_CTRL", .addr = A_USB3_DUAL_REF_CTRL, + .reset = 0x3c00, + .rsvd = 0xfdfc00f8, + },{ .name = "RST_CPU_R5", .addr = A_RST_CPU_R5, + .reset = 0x17, + .rsvd = 0x8, + .pre_write = crl_rst_r5_prew, + },{ .name = "RST_ADMA", .addr = A_RST_ADMA, + .reset = 0x1, + .pre_write = crl_rst_adma_prew, + },{ .name = "RST_GEM0", .addr = A_RST_GEM0, + .reset = 0x1, + .pre_write = crl_rst_gem0_prew, + },{ .name = "RST_GEM1", .addr = A_RST_GEM1, + .reset = 0x1, + .pre_write = crl_rst_gem1_prew, + },{ .name = "RST_SPARE", .addr = A_RST_SPARE, + .reset = 0x1, + },{ .name = "RST_USB0", .addr = A_RST_USB0, + .reset = 0x1, + .pre_write = crl_rst_usb_prew, + },{ .name = "RST_UART0", .addr = A_RST_UART0, + .reset = 0x1, + .pre_write = crl_rst_uart0_prew, + },{ .name = "RST_UART1", .addr = A_RST_UART1, + .reset = 0x1, + .pre_write = crl_rst_uart1_prew, + },{ .name = "RST_SPI0", .addr = A_RST_SPI0, + .reset = 0x1, + },{ .name = "RST_SPI1", .addr = A_RST_SPI1, + .reset = 0x1, + },{ .name = "RST_CAN0", .addr = A_RST_CAN0, + .reset = 0x1, + },{ .name = "RST_CAN1", .addr = A_RST_CAN1, + .reset = 0x1, + },{ .name = "RST_I2C0", .addr = A_RST_I2C0, + .reset = 0x1, + },{ .name = "RST_I2C1", .addr = A_RST_I2C1, + .reset = 0x1, + },{ .name = "RST_DBG_LPD", .addr = A_RST_DBG_LPD, + .reset = 0x33, + .rsvd = 0xcc, + },{ .name = "RST_GPIO", .addr = A_RST_GPIO, + .reset = 0x1, + },{ .name = "RST_TTC", .addr = A_RST_TTC, + .reset = 0xf, + },{ .name = "RST_TIMESTAMP", .addr = A_RST_TIMESTAMP, + .reset = 0x1, + },{ .name = "RST_SWDT", .addr = A_RST_SWDT, + .reset = 0x1, + },{ .name = "RST_OCM", .addr = A_RST_OCM, + },{ .name = "RST_IPI", .addr = A_RST_IPI, + },{ .name = "RST_FPD", .addr = A_RST_FPD, + .reset = 0x3, + },{ .name = "PSM_RST_MODE", .addr = A_PSM_RST_MODE, + .reset = 0x1, + .rsvd = 0xf8, + } +}; + +static void crl_reset_enter(Object *obj, ResetType type) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void crl_reset_hold(Object *obj) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); + + crl_update_irq(s); +} + +static const MemoryRegionOps crl_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void crl_init(Object *obj) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + + s->reg_array = + register_init_block32(DEVICE(obj), crl_regs_info, + ARRAY_SIZE(crl_regs_info), + s->regs_info, s->regs, + &crl_ops, + XLNX_VERSAL_CRL_ERR_DEBUG, + CRL_R_MAX * 4); + sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_irq(sbd, &s->irq); + + for (i = 0; i < ARRAY_SIZE(s->cfg.cpu_r5); ++i) { + object_property_add_link(obj, "cpu_r5[*]", TYPE_ARM_CPU, + (Object **)&s->cfg.cpu_r5[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.adma); ++i) { + object_property_add_link(obj, "adma[*]", TYPE_DEVICE, + (Object **)&s->cfg.adma[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.uart); ++i) { + object_property_add_link(obj, "uart[*]", TYPE_DEVICE, + (Object **)&s->cfg.uart[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.gem); ++i) { + object_property_add_link(obj, "gem[*]", TYPE_DEVICE, + (Object **)&s->cfg.gem[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + object_property_add_link(obj, "usb", TYPE_DEVICE, + (Object **)&s->cfg.gem[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); +} + +static void crl_finalize(Object *obj) +{ + XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); + register_finalize_block(s->reg_array); +} + +static const VMStateDescription vmstate_crl = { + .name = TYPE_XLNX_VERSAL_CRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalCRL, CRL_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void crl_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_crl; + + rc->phases.enter = crl_reset_enter; + rc->phases.hold = crl_reset_hold; +} + +static const TypeInfo crl_info = { + .name = TYPE_XLNX_VERSAL_CRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalCRL), + .class_init = crl_class_init, + .instance_init = crl_init, + .instance_finalize = crl_finalize, +}; + +static void crl_register_types(void) +{ + type_register_static(&crl_info); +} + +type_init(crl_register_types) diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c new file mode 100644 index 0000000000..07b7ebc217 --- /dev/null +++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c @@ -0,0 +1,1446 @@ +/* + * QEMU model of Versal's PMC IOU SLCR (system level control registers) + * + * Copyright (c) 2021 Xilinx Inc. + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/irq.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/misc/xlnx-versal-pmc-iou-slcr.h" + +#ifndef XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG +#define XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG 0 +#endif + +REG32(MIO_PIN_0, 0x0) + FIELD(MIO_PIN_0, L3_SEL, 7, 3) + FIELD(MIO_PIN_0, L2_SEL, 5, 2) + FIELD(MIO_PIN_0, L1_SEL, 3, 2) + FIELD(MIO_PIN_0, L0_SEL, 1, 2) +REG32(MIO_PIN_1, 0x4) + FIELD(MIO_PIN_1, L3_SEL, 7, 3) + FIELD(MIO_PIN_1, L2_SEL, 5, 2) + FIELD(MIO_PIN_1, L1_SEL, 3, 2) + FIELD(MIO_PIN_1, L0_SEL, 1, 2) +REG32(MIO_PIN_2, 0x8) + FIELD(MIO_PIN_2, L3_SEL, 7, 3) + FIELD(MIO_PIN_2, L2_SEL, 5, 2) + FIELD(MIO_PIN_2, L1_SEL, 3, 2) + FIELD(MIO_PIN_2, L0_SEL, 1, 2) +REG32(MIO_PIN_3, 0xc) + FIELD(MIO_PIN_3, L3_SEL, 7, 3) + FIELD(MIO_PIN_3, L2_SEL, 5, 2) + FIELD(MIO_PIN_3, L1_SEL, 3, 2) + FIELD(MIO_PIN_3, L0_SEL, 1, 2) +REG32(MIO_PIN_4, 0x10) + FIELD(MIO_PIN_4, L3_SEL, 7, 3) + FIELD(MIO_PIN_4, L2_SEL, 5, 2) + FIELD(MIO_PIN_4, L1_SEL, 3, 2) + FIELD(MIO_PIN_4, L0_SEL, 1, 2) +REG32(MIO_PIN_5, 0x14) + FIELD(MIO_PIN_5, L3_SEL, 7, 3) + FIELD(MIO_PIN_5, L2_SEL, 5, 2) + FIELD(MIO_PIN_5, L1_SEL, 3, 2) + FIELD(MIO_PIN_5, L0_SEL, 1, 2) +REG32(MIO_PIN_6, 0x18) + FIELD(MIO_PIN_6, L3_SEL, 7, 3) + FIELD(MIO_PIN_6, L2_SEL, 5, 2) + FIELD(MIO_PIN_6, L1_SEL, 3, 2) + FIELD(MIO_PIN_6, L0_SEL, 1, 2) +REG32(MIO_PIN_7, 0x1c) + FIELD(MIO_PIN_7, L3_SEL, 7, 3) + FIELD(MIO_PIN_7, L2_SEL, 5, 2) + FIELD(MIO_PIN_7, L1_SEL, 3, 2) + FIELD(MIO_PIN_7, L0_SEL, 1, 2) +REG32(MIO_PIN_8, 0x20) + FIELD(MIO_PIN_8, L3_SEL, 7, 3) + FIELD(MIO_PIN_8, L2_SEL, 5, 2) + FIELD(MIO_PIN_8, L1_SEL, 3, 2) + FIELD(MIO_PIN_8, L0_SEL, 1, 2) +REG32(MIO_PIN_9, 0x24) + FIELD(MIO_PIN_9, L3_SEL, 7, 3) + FIELD(MIO_PIN_9, L2_SEL, 5, 2) + FIELD(MIO_PIN_9, L1_SEL, 3, 2) + FIELD(MIO_PIN_9, L0_SEL, 1, 2) +REG32(MIO_PIN_10, 0x28) + FIELD(MIO_PIN_10, L3_SEL, 7, 3) + FIELD(MIO_PIN_10, L2_SEL, 5, 2) + FIELD(MIO_PIN_10, L1_SEL, 3, 2) + FIELD(MIO_PIN_10, L0_SEL, 1, 2) +REG32(MIO_PIN_11, 0x2c) + FIELD(MIO_PIN_11, L3_SEL, 7, 3) + FIELD(MIO_PIN_11, L2_SEL, 5, 2) + FIELD(MIO_PIN_11, L1_SEL, 3, 2) + FIELD(MIO_PIN_11, L0_SEL, 1, 2) +REG32(MIO_PIN_12, 0x30) + FIELD(MIO_PIN_12, L3_SEL, 7, 3) + FIELD(MIO_PIN_12, L2_SEL, 5, 2) + FIELD(MIO_PIN_12, L1_SEL, 3, 2) + FIELD(MIO_PIN_12, L0_SEL, 1, 2) +REG32(MIO_PIN_13, 0x34) + FIELD(MIO_PIN_13, L3_SEL, 7, 3) + FIELD(MIO_PIN_13, L2_SEL, 5, 2) + FIELD(MIO_PIN_13, L1_SEL, 3, 2) + FIELD(MIO_PIN_13, L0_SEL, 1, 2) +REG32(MIO_PIN_14, 0x38) + FIELD(MIO_PIN_14, L3_SEL, 7, 3) + FIELD(MIO_PIN_14, L2_SEL, 5, 2) + FIELD(MIO_PIN_14, L1_SEL, 3, 2) + FIELD(MIO_PIN_14, L0_SEL, 1, 2) +REG32(MIO_PIN_15, 0x3c) + FIELD(MIO_PIN_15, L3_SEL, 7, 3) + FIELD(MIO_PIN_15, L2_SEL, 5, 2) + FIELD(MIO_PIN_15, L1_SEL, 3, 2) + FIELD(MIO_PIN_15, L0_SEL, 1, 2) +REG32(MIO_PIN_16, 0x40) + FIELD(MIO_PIN_16, L3_SEL, 7, 3) + FIELD(MIO_PIN_16, L2_SEL, 5, 2) + FIELD(MIO_PIN_16, L1_SEL, 3, 2) + FIELD(MIO_PIN_16, L0_SEL, 1, 2) +REG32(MIO_PIN_17, 0x44) + FIELD(MIO_PIN_17, L3_SEL, 7, 3) + FIELD(MIO_PIN_17, L2_SEL, 5, 2) + FIELD(MIO_PIN_17, L1_SEL, 3, 2) + FIELD(MIO_PIN_17, L0_SEL, 1, 2) +REG32(MIO_PIN_18, 0x48) + FIELD(MIO_PIN_18, L3_SEL, 7, 3) + FIELD(MIO_PIN_18, L2_SEL, 5, 2) + FIELD(MIO_PIN_18, L1_SEL, 3, 2) + FIELD(MIO_PIN_18, L0_SEL, 1, 2) +REG32(MIO_PIN_19, 0x4c) + FIELD(MIO_PIN_19, L3_SEL, 7, 3) + FIELD(MIO_PIN_19, L2_SEL, 5, 2) + FIELD(MIO_PIN_19, L1_SEL, 3, 2) + FIELD(MIO_PIN_19, L0_SEL, 1, 2) +REG32(MIO_PIN_20, 0x50) + FIELD(MIO_PIN_20, L3_SEL, 7, 3) + FIELD(MIO_PIN_20, L2_SEL, 5, 2) + FIELD(MIO_PIN_20, L1_SEL, 3, 2) + FIELD(MIO_PIN_20, L0_SEL, 1, 2) +REG32(MIO_PIN_21, 0x54) + FIELD(MIO_PIN_21, L3_SEL, 7, 3) + FIELD(MIO_PIN_21, L2_SEL, 5, 2) + FIELD(MIO_PIN_21, L1_SEL, 3, 2) + FIELD(MIO_PIN_21, L0_SEL, 1, 2) +REG32(MIO_PIN_22, 0x58) + FIELD(MIO_PIN_22, L3_SEL, 7, 3) + FIELD(MIO_PIN_22, L2_SEL, 5, 2) + FIELD(MIO_PIN_22, L1_SEL, 3, 2) + FIELD(MIO_PIN_22, L0_SEL, 1, 2) +REG32(MIO_PIN_23, 0x5c) + FIELD(MIO_PIN_23, L3_SEL, 7, 3) + FIELD(MIO_PIN_23, L2_SEL, 5, 2) + FIELD(MIO_PIN_23, L1_SEL, 3, 2) + FIELD(MIO_PIN_23, L0_SEL, 1, 2) +REG32(MIO_PIN_24, 0x60) + FIELD(MIO_PIN_24, L3_SEL, 7, 3) + FIELD(MIO_PIN_24, L2_SEL, 5, 2) + FIELD(MIO_PIN_24, L1_SEL, 3, 2) + FIELD(MIO_PIN_24, L0_SEL, 1, 2) +REG32(MIO_PIN_25, 0x64) + FIELD(MIO_PIN_25, L3_SEL, 7, 3) + FIELD(MIO_PIN_25, L2_SEL, 5, 2) + FIELD(MIO_PIN_25, L1_SEL, 3, 2) + FIELD(MIO_PIN_25, L0_SEL, 1, 2) +REG32(MIO_PIN_26, 0x68) + FIELD(MIO_PIN_26, L3_SEL, 7, 3) + FIELD(MIO_PIN_26, L2_SEL, 5, 2) + FIELD(MIO_PIN_26, L1_SEL, 3, 2) + FIELD(MIO_PIN_26, L0_SEL, 1, 2) +REG32(MIO_PIN_27, 0x6c) + FIELD(MIO_PIN_27, L3_SEL, 7, 3) + FIELD(MIO_PIN_27, L2_SEL, 5, 2) + FIELD(MIO_PIN_27, L1_SEL, 3, 2) + FIELD(MIO_PIN_27, L0_SEL, 1, 2) +REG32(MIO_PIN_28, 0x70) + FIELD(MIO_PIN_28, L3_SEL, 7, 3) + FIELD(MIO_PIN_28, L2_SEL, 5, 2) + FIELD(MIO_PIN_28, L1_SEL, 3, 2) + FIELD(MIO_PIN_28, L0_SEL, 1, 2) +REG32(MIO_PIN_29, 0x74) + FIELD(MIO_PIN_29, L3_SEL, 7, 3) + FIELD(MIO_PIN_29, L2_SEL, 5, 2) + FIELD(MIO_PIN_29, L1_SEL, 3, 2) + FIELD(MIO_PIN_29, L0_SEL, 1, 2) +REG32(MIO_PIN_30, 0x78) + FIELD(MIO_PIN_30, L3_SEL, 7, 3) + FIELD(MIO_PIN_30, L2_SEL, 5, 2) + FIELD(MIO_PIN_30, L1_SEL, 3, 2) + FIELD(MIO_PIN_30, L0_SEL, 1, 2) +REG32(MIO_PIN_31, 0x7c) + FIELD(MIO_PIN_31, L3_SEL, 7, 3) + FIELD(MIO_PIN_31, L2_SEL, 5, 2) + FIELD(MIO_PIN_31, L1_SEL, 3, 2) + FIELD(MIO_PIN_31, L0_SEL, 1, 2) +REG32(MIO_PIN_32, 0x80) + FIELD(MIO_PIN_32, L3_SEL, 7, 3) + FIELD(MIO_PIN_32, L2_SEL, 5, 2) + FIELD(MIO_PIN_32, L1_SEL, 3, 2) + FIELD(MIO_PIN_32, L0_SEL, 1, 2) +REG32(MIO_PIN_33, 0x84) + FIELD(MIO_PIN_33, L3_SEL, 7, 3) + FIELD(MIO_PIN_33, L2_SEL, 5, 2) + FIELD(MIO_PIN_33, L1_SEL, 3, 2) + FIELD(MIO_PIN_33, L0_SEL, 1, 2) +REG32(MIO_PIN_34, 0x88) + FIELD(MIO_PIN_34, L3_SEL, 7, 3) + FIELD(MIO_PIN_34, L2_SEL, 5, 2) + FIELD(MIO_PIN_34, L1_SEL, 3, 2) + FIELD(MIO_PIN_34, L0_SEL, 1, 2) +REG32(MIO_PIN_35, 0x8c) + FIELD(MIO_PIN_35, L3_SEL, 7, 3) + FIELD(MIO_PIN_35, L2_SEL, 5, 2) + FIELD(MIO_PIN_35, L1_SEL, 3, 2) + FIELD(MIO_PIN_35, L0_SEL, 1, 2) +REG32(MIO_PIN_36, 0x90) + FIELD(MIO_PIN_36, L3_SEL, 7, 3) + FIELD(MIO_PIN_36, L2_SEL, 5, 2) + FIELD(MIO_PIN_36, L1_SEL, 3, 2) + FIELD(MIO_PIN_36, L0_SEL, 1, 2) +REG32(MIO_PIN_37, 0x94) + FIELD(MIO_PIN_37, L3_SEL, 7, 3) + FIELD(MIO_PIN_37, L2_SEL, 5, 2) + FIELD(MIO_PIN_37, L1_SEL, 3, 2) + FIELD(MIO_PIN_37, L0_SEL, 1, 2) +REG32(MIO_PIN_38, 0x98) + FIELD(MIO_PIN_38, L3_SEL, 7, 3) + FIELD(MIO_PIN_38, L2_SEL, 5, 2) + FIELD(MIO_PIN_38, L1_SEL, 3, 2) + FIELD(MIO_PIN_38, L0_SEL, 1, 2) +REG32(MIO_PIN_39, 0x9c) + FIELD(MIO_PIN_39, L3_SEL, 7, 3) + FIELD(MIO_PIN_39, L2_SEL, 5, 2) + FIELD(MIO_PIN_39, L1_SEL, 3, 2) + FIELD(MIO_PIN_39, L0_SEL, 1, 2) +REG32(MIO_PIN_40, 0xa0) + FIELD(MIO_PIN_40, L3_SEL, 7, 3) + FIELD(MIO_PIN_40, L2_SEL, 5, 2) + FIELD(MIO_PIN_40, L1_SEL, 3, 2) + FIELD(MIO_PIN_40, L0_SEL, 1, 2) +REG32(MIO_PIN_41, 0xa4) + FIELD(MIO_PIN_41, L3_SEL, 7, 3) + FIELD(MIO_PIN_41, L2_SEL, 5, 2) + FIELD(MIO_PIN_41, L1_SEL, 3, 2) + FIELD(MIO_PIN_41, L0_SEL, 1, 2) +REG32(MIO_PIN_42, 0xa8) + FIELD(MIO_PIN_42, L3_SEL, 7, 3) + FIELD(MIO_PIN_42, L2_SEL, 5, 2) + FIELD(MIO_PIN_42, L1_SEL, 3, 2) + FIELD(MIO_PIN_42, L0_SEL, 1, 2) +REG32(MIO_PIN_43, 0xac) + FIELD(MIO_PIN_43, L3_SEL, 7, 3) + FIELD(MIO_PIN_43, L2_SEL, 5, 2) + FIELD(MIO_PIN_43, L1_SEL, 3, 2) + FIELD(MIO_PIN_43, L0_SEL, 1, 2) +REG32(MIO_PIN_44, 0xb0) + FIELD(MIO_PIN_44, L3_SEL, 7, 3) + FIELD(MIO_PIN_44, L2_SEL, 5, 2) + FIELD(MIO_PIN_44, L1_SEL, 3, 2) + FIELD(MIO_PIN_44, L0_SEL, 1, 2) +REG32(MIO_PIN_45, 0xb4) + FIELD(MIO_PIN_45, L3_SEL, 7, 3) + FIELD(MIO_PIN_45, L2_SEL, 5, 2) + FIELD(MIO_PIN_45, L1_SEL, 3, 2) + FIELD(MIO_PIN_45, L0_SEL, 1, 2) +REG32(MIO_PIN_46, 0xb8) + FIELD(MIO_PIN_46, L3_SEL, 7, 3) + FIELD(MIO_PIN_46, L2_SEL, 5, 2) + FIELD(MIO_PIN_46, L1_SEL, 3, 2) + FIELD(MIO_PIN_46, L0_SEL, 1, 2) +REG32(MIO_PIN_47, 0xbc) + FIELD(MIO_PIN_47, L3_SEL, 7, 3) + FIELD(MIO_PIN_47, L2_SEL, 5, 2) + FIELD(MIO_PIN_47, L1_SEL, 3, 2) + FIELD(MIO_PIN_47, L0_SEL, 1, 2) +REG32(MIO_PIN_48, 0xc0) + FIELD(MIO_PIN_48, L3_SEL, 7, 3) + FIELD(MIO_PIN_48, L2_SEL, 5, 2) + FIELD(MIO_PIN_48, L1_SEL, 3, 2) + FIELD(MIO_PIN_48, L0_SEL, 1, 2) +REG32(MIO_PIN_49, 0xc4) + FIELD(MIO_PIN_49, L3_SEL, 7, 3) + FIELD(MIO_PIN_49, L2_SEL, 5, 2) + FIELD(MIO_PIN_49, L1_SEL, 3, 2) + FIELD(MIO_PIN_49, L0_SEL, 1, 2) +REG32(MIO_PIN_50, 0xc8) + FIELD(MIO_PIN_50, L3_SEL, 7, 3) + FIELD(MIO_PIN_50, L2_SEL, 5, 2) + FIELD(MIO_PIN_50, L1_SEL, 3, 2) + FIELD(MIO_PIN_50, L0_SEL, 1, 2) +REG32(MIO_PIN_51, 0xcc) + FIELD(MIO_PIN_51, L3_SEL, 7, 3) + FIELD(MIO_PIN_51, L2_SEL, 5, 2) + FIELD(MIO_PIN_51, L1_SEL, 3, 2) + FIELD(MIO_PIN_51, L0_SEL, 1, 2) +REG32(BNK0_EN_RX, 0x100) + FIELD(BNK0_EN_RX, BNK0_EN_RX, 0, 26) +REG32(BNK0_SEL_RX0, 0x104) +REG32(BNK0_SEL_RX1, 0x108) + FIELD(BNK0_SEL_RX1, BNK0_SEL_RX, 0, 20) +REG32(BNK0_EN_RX_SCHMITT_HYST, 0x10c) + FIELD(BNK0_EN_RX_SCHMITT_HYST, BNK0_EN_RX_SCHMITT_HYST, 0, 26) +REG32(BNK0_EN_WK_PD, 0x110) + FIELD(BNK0_EN_WK_PD, BNK0_EN_WK_PD, 0, 26) +REG32(BNK0_EN_WK_PU, 0x114) + FIELD(BNK0_EN_WK_PU, BNK0_EN_WK_PU, 0, 26) +REG32(BNK0_SEL_DRV0, 0x118) +REG32(BNK0_SEL_DRV1, 0x11c) + FIELD(BNK0_SEL_DRV1, BNK0_SEL_DRV, 0, 20) +REG32(BNK0_SEL_SLEW, 0x120) + FIELD(BNK0_SEL_SLEW, BNK0_SEL_SLEW, 0, 26) +REG32(BNK0_EN_DFT_OPT_INV, 0x124) + FIELD(BNK0_EN_DFT_OPT_INV, BNK0_EN_DFT_OPT_INV, 0, 26) +REG32(BNK0_EN_PAD2PAD_LOOPBACK, 0x128) + FIELD(BNK0_EN_PAD2PAD_LOOPBACK, BNK0_EN_PAD2PAD_LOOPBACK, 0, 13) +REG32(BNK0_RX_SPARE0, 0x12c) +REG32(BNK0_RX_SPARE1, 0x130) + FIELD(BNK0_RX_SPARE1, BNK0_RX_SPARE, 0, 20) +REG32(BNK0_TX_SPARE0, 0x134) +REG32(BNK0_TX_SPARE1, 0x138) + FIELD(BNK0_TX_SPARE1, BNK0_TX_SPARE, 0, 20) +REG32(BNK0_SEL_EN1P8, 0x13c) + FIELD(BNK0_SEL_EN1P8, BNK0_SEL_EN1P8, 0, 1) +REG32(BNK0_EN_B_POR_DETECT, 0x140) + FIELD(BNK0_EN_B_POR_DETECT, BNK0_EN_B_POR_DETECT, 0, 1) +REG32(BNK0_LPF_BYP_POR_DETECT, 0x144) + FIELD(BNK0_LPF_BYP_POR_DETECT, BNK0_LPF_BYP_POR_DETECT, 0, 1) +REG32(BNK0_EN_LATCH, 0x148) + FIELD(BNK0_EN_LATCH, BNK0_EN_LATCH, 0, 1) +REG32(BNK0_VBG_LPF_BYP_B, 0x14c) + FIELD(BNK0_VBG_LPF_BYP_B, BNK0_VBG_LPF_BYP_B, 0, 1) +REG32(BNK0_EN_AMP_B, 0x150) + FIELD(BNK0_EN_AMP_B, BNK0_EN_AMP_B, 0, 2) +REG32(BNK0_SPARE_BIAS, 0x154) + FIELD(BNK0_SPARE_BIAS, BNK0_SPARE_BIAS, 0, 4) +REG32(BNK0_DRIVER_BIAS, 0x158) + FIELD(BNK0_DRIVER_BIAS, BNK0_DRIVER_BIAS, 0, 15) +REG32(BNK0_VMODE, 0x15c) + FIELD(BNK0_VMODE, BNK0_VMODE, 0, 1) +REG32(BNK0_SEL_AUX_IO_RX, 0x160) + FIELD(BNK0_SEL_AUX_IO_RX, BNK0_SEL_AUX_IO_RX, 0, 26) +REG32(BNK0_EN_TX_HS_MODE, 0x164) + FIELD(BNK0_EN_TX_HS_MODE, BNK0_EN_TX_HS_MODE, 0, 26) +REG32(MIO_MST_TRI0, 0x200) + FIELD(MIO_MST_TRI0, PIN_25_TRI, 25, 1) + FIELD(MIO_MST_TRI0, PIN_24_TRI, 24, 1) + FIELD(MIO_MST_TRI0, PIN_23_TRI, 23, 1) + FIELD(MIO_MST_TRI0, PIN_22_TRI, 22, 1) + FIELD(MIO_MST_TRI0, PIN_21_TRI, 21, 1) + FIELD(MIO_MST_TRI0, PIN_20_TRI, 20, 1) + FIELD(MIO_MST_TRI0, PIN_19_TRI, 19, 1) + FIELD(MIO_MST_TRI0, PIN_18_TRI, 18, 1) + FIELD(MIO_MST_TRI0, PIN_17_TRI, 17, 1) + FIELD(MIO_MST_TRI0, PIN_16_TRI, 16, 1) + FIELD(MIO_MST_TRI0, PIN_15_TRI, 15, 1) + FIELD(MIO_MST_TRI0, PIN_14_TRI, 14, 1) + FIELD(MIO_MST_TRI0, PIN_13_TRI, 13, 1) + FIELD(MIO_MST_TRI0, PIN_12_TRI, 12, 1) + FIELD(MIO_MST_TRI0, PIN_11_TRI, 11, 1) + FIELD(MIO_MST_TRI0, PIN_10_TRI, 10, 1) + FIELD(MIO_MST_TRI0, PIN_09_TRI, 9, 1) + FIELD(MIO_MST_TRI0, PIN_08_TRI, 8, 1) + FIELD(MIO_MST_TRI0, PIN_07_TRI, 7, 1) + FIELD(MIO_MST_TRI0, PIN_06_TRI, 6, 1) + FIELD(MIO_MST_TRI0, PIN_05_TRI, 5, 1) + FIELD(MIO_MST_TRI0, PIN_04_TRI, 4, 1) + FIELD(MIO_MST_TRI0, PIN_03_TRI, 3, 1) + FIELD(MIO_MST_TRI0, PIN_02_TRI, 2, 1) + FIELD(MIO_MST_TRI0, PIN_01_TRI, 1, 1) + FIELD(MIO_MST_TRI0, PIN_00_TRI, 0, 1) +REG32(MIO_MST_TRI1, 0x204) + FIELD(MIO_MST_TRI1, PIN_51_TRI, 25, 1) + FIELD(MIO_MST_TRI1, PIN_50_TRI, 24, 1) + FIELD(MIO_MST_TRI1, PIN_49_TRI, 23, 1) + FIELD(MIO_MST_TRI1, PIN_48_TRI, 22, 1) + FIELD(MIO_MST_TRI1, PIN_47_TRI, 21, 1) + FIELD(MIO_MST_TRI1, PIN_46_TRI, 20, 1) + FIELD(MIO_MST_TRI1, PIN_45_TRI, 19, 1) + FIELD(MIO_MST_TRI1, PIN_44_TRI, 18, 1) + FIELD(MIO_MST_TRI1, PIN_43_TRI, 17, 1) + FIELD(MIO_MST_TRI1, PIN_42_TRI, 16, 1) + FIELD(MIO_MST_TRI1, PIN_41_TRI, 15, 1) + FIELD(MIO_MST_TRI1, PIN_40_TRI, 14, 1) + FIELD(MIO_MST_TRI1, PIN_39_TRI, 13, 1) + FIELD(MIO_MST_TRI1, PIN_38_TRI, 12, 1) + FIELD(MIO_MST_TRI1, PIN_37_TRI, 11, 1) + FIELD(MIO_MST_TRI1, PIN_36_TRI, 10, 1) + FIELD(MIO_MST_TRI1, PIN_35_TRI, 9, 1) + FIELD(MIO_MST_TRI1, PIN_34_TRI, 8, 1) + FIELD(MIO_MST_TRI1, PIN_33_TRI, 7, 1) + FIELD(MIO_MST_TRI1, PIN_32_TRI, 6, 1) + FIELD(MIO_MST_TRI1, PIN_31_TRI, 5, 1) + FIELD(MIO_MST_TRI1, PIN_30_TRI, 4, 1) + FIELD(MIO_MST_TRI1, PIN_29_TRI, 3, 1) + FIELD(MIO_MST_TRI1, PIN_28_TRI, 2, 1) + FIELD(MIO_MST_TRI1, PIN_27_TRI, 1, 1) + FIELD(MIO_MST_TRI1, PIN_26_TRI, 0, 1) +REG32(BNK1_EN_RX, 0x300) + FIELD(BNK1_EN_RX, BNK1_EN_RX, 0, 26) +REG32(BNK1_SEL_RX0, 0x304) +REG32(BNK1_SEL_RX1, 0x308) + FIELD(BNK1_SEL_RX1, BNK1_SEL_RX, 0, 20) +REG32(BNK1_EN_RX_SCHMITT_HYST, 0x30c) + FIELD(BNK1_EN_RX_SCHMITT_HYST, BNK1_EN_RX_SCHMITT_HYST, 0, 26) +REG32(BNK1_EN_WK_PD, 0x310) + FIELD(BNK1_EN_WK_PD, BNK1_EN_WK_PD, 0, 26) +REG32(BNK1_EN_WK_PU, 0x314) + FIELD(BNK1_EN_WK_PU, BNK1_EN_WK_PU, 0, 26) +REG32(BNK1_SEL_DRV0, 0x318) +REG32(BNK1_SEL_DRV1, 0x31c) + FIELD(BNK1_SEL_DRV1, BNK1_SEL_DRV, 0, 20) +REG32(BNK1_SEL_SLEW, 0x320) + FIELD(BNK1_SEL_SLEW, BNK1_SEL_SLEW, 0, 26) +REG32(BNK1_EN_DFT_OPT_INV, 0x324) + FIELD(BNK1_EN_DFT_OPT_INV, BNK1_EN_DFT_OPT_INV, 0, 26) +REG32(BNK1_EN_PAD2PAD_LOOPBACK, 0x328) + FIELD(BNK1_EN_PAD2PAD_LOOPBACK, BNK1_EN_PAD2PAD_LOOPBACK, 0, 13) +REG32(BNK1_RX_SPARE0, 0x32c) +REG32(BNK1_RX_SPARE1, 0x330) + FIELD(BNK1_RX_SPARE1, BNK1_RX_SPARE, 0, 20) +REG32(BNK1_TX_SPARE0, 0x334) +REG32(BNK1_TX_SPARE1, 0x338) + FIELD(BNK1_TX_SPARE1, BNK1_TX_SPARE, 0, 20) +REG32(BNK1_SEL_EN1P8, 0x33c) + FIELD(BNK1_SEL_EN1P8, BNK1_SEL_EN1P8, 0, 1) +REG32(BNK1_EN_B_POR_DETECT, 0x340) + FIELD(BNK1_EN_B_POR_DETECT, BNK1_EN_B_POR_DETECT, 0, 1) +REG32(BNK1_LPF_BYP_POR_DETECT, 0x344) + FIELD(BNK1_LPF_BYP_POR_DETECT, BNK1_LPF_BYP_POR_DETECT, 0, 1) +REG32(BNK1_EN_LATCH, 0x348) + FIELD(BNK1_EN_LATCH, BNK1_EN_LATCH, 0, 1) +REG32(BNK1_VBG_LPF_BYP_B, 0x34c) + FIELD(BNK1_VBG_LPF_BYP_B, BNK1_VBG_LPF_BYP_B, 0, 1) +REG32(BNK1_EN_AMP_B, 0x350) + FIELD(BNK1_EN_AMP_B, BNK1_EN_AMP_B, 0, 2) +REG32(BNK1_SPARE_BIAS, 0x354) + FIELD(BNK1_SPARE_BIAS, BNK1_SPARE_BIAS, 0, 4) +REG32(BNK1_DRIVER_BIAS, 0x358) + FIELD(BNK1_DRIVER_BIAS, BNK1_DRIVER_BIAS, 0, 15) +REG32(BNK1_VMODE, 0x35c) + FIELD(BNK1_VMODE, BNK1_VMODE, 0, 1) +REG32(BNK1_SEL_AUX_IO_RX, 0x360) + FIELD(BNK1_SEL_AUX_IO_RX, BNK1_SEL_AUX_IO_RX, 0, 26) +REG32(BNK1_EN_TX_HS_MODE, 0x364) + FIELD(BNK1_EN_TX_HS_MODE, BNK1_EN_TX_HS_MODE, 0, 26) +REG32(SD0_CLK_CTRL, 0x400) + FIELD(SD0_CLK_CTRL, SDIO0_FBCLK_SEL, 2, 1) + FIELD(SD0_CLK_CTRL, SDIO0_RX_SRC_SEL, 0, 2) +REG32(SD0_CTRL_REG, 0x404) + FIELD(SD0_CTRL_REG, SD0_EMMC_SEL, 0, 1) +REG32(SD0_CONFIG_REG1, 0x410) + FIELD(SD0_CONFIG_REG1, SD0_BASECLK, 7, 8) + FIELD(SD0_CONFIG_REG1, SD0_TUNIGCOUNT, 1, 6) + FIELD(SD0_CONFIG_REG1, SD0_ASYNCWKPENA, 0, 1) +REG32(SD0_CONFIG_REG2, 0x414) + FIELD(SD0_CONFIG_REG2, SD0_SLOTTYPE, 12, 2) + FIELD(SD0_CONFIG_REG2, SD0_ASYCINTR, 11, 1) + FIELD(SD0_CONFIG_REG2, SD0_64BIT, 10, 1) + FIELD(SD0_CONFIG_REG2, SD0_1P8V, 9, 1) + FIELD(SD0_CONFIG_REG2, SD0_3P0V, 8, 1) + FIELD(SD0_CONFIG_REG2, SD0_3P3V, 7, 1) + FIELD(SD0_CONFIG_REG2, SD0_SUSPRES, 6, 1) + FIELD(SD0_CONFIG_REG2, SD0_SDMA, 5, 1) + FIELD(SD0_CONFIG_REG2, SD0_HIGHSPEED, 4, 1) + FIELD(SD0_CONFIG_REG2, SD0_ADMA2, 3, 1) + FIELD(SD0_CONFIG_REG2, SD0_8BIT, 2, 1) + FIELD(SD0_CONFIG_REG2, SD0_MAXBLK, 0, 2) +REG32(SD0_CONFIG_REG3, 0x418) + FIELD(SD0_CONFIG_REG3, SD0_TUNINGSDR50, 10, 1) + FIELD(SD0_CONFIG_REG3, SD0_RETUNETMR, 6, 4) + FIELD(SD0_CONFIG_REG3, SD0_DDRIVER, 5, 1) + FIELD(SD0_CONFIG_REG3, SD0_CDRIVER, 4, 1) + FIELD(SD0_CONFIG_REG3, SD0_ADRIVER, 3, 1) + FIELD(SD0_CONFIG_REG3, SD0_DDR50, 2, 1) + FIELD(SD0_CONFIG_REG3, SD0_SDR104, 1, 1) + FIELD(SD0_CONFIG_REG3, SD0_SDR50, 0, 1) +REG32(SD0_INITPRESET, 0x41c) + FIELD(SD0_INITPRESET, SD0_INITPRESET, 0, 13) +REG32(SD0_DSPPRESET, 0x420) + FIELD(SD0_DSPPRESET, SD0_DSPPRESET, 0, 13) +REG32(SD0_HSPDPRESET, 0x424) + FIELD(SD0_HSPDPRESET, SD0_HSPDPRESET, 0, 13) +REG32(SD0_SDR12PRESET, 0x428) + FIELD(SD0_SDR12PRESET, SD0_SDR12PRESET, 0, 13) +REG32(SD0_SDR25PRESET, 0x42c) + FIELD(SD0_SDR25PRESET, SD0_SDR25PRESET, 0, 13) +REG32(SD0_SDR50PRSET, 0x430) + FIELD(SD0_SDR50PRSET, SD0_SDR50PRESET, 0, 13) +REG32(SD0_SDR104PRST, 0x434) + FIELD(SD0_SDR104PRST, SD0_SDR104PRESET, 0, 13) +REG32(SD0_DDR50PRESET, 0x438) + FIELD(SD0_DDR50PRESET, SD0_DDR50PRESET, 0, 13) +REG32(SD0_MAXCUR1P8, 0x43c) + FIELD(SD0_MAXCUR1P8, SD0_MAXCUR1P8, 0, 8) +REG32(SD0_MAXCUR3P0, 0x440) + FIELD(SD0_MAXCUR3P0, SD0_MAXCUR3P0, 0, 8) +REG32(SD0_MAXCUR3P3, 0x444) + FIELD(SD0_MAXCUR3P3, SD0_MAXCUR3P3, 0, 8) +REG32(SD0_DLL_CTRL, 0x448) + FIELD(SD0_DLL_CTRL, SD0_CLKSTABLE_CFG, 9, 1) + FIELD(SD0_DLL_CTRL, SD0_DLL_CFG, 5, 4) + FIELD(SD0_DLL_CTRL, SD0_DLL_PSDONE, 4, 1) + FIELD(SD0_DLL_CTRL, SD0_DLL_OVF, 3, 1) + FIELD(SD0_DLL_CTRL, SD0_DLL_RST, 2, 1) + FIELD(SD0_DLL_CTRL, SD0_DLL_TESTMODE, 1, 1) + FIELD(SD0_DLL_CTRL, SD0_DLL_LOCK, 0, 1) +REG32(SD0_CDN_CTRL, 0x44c) + FIELD(SD0_CDN_CTRL, SD0_CDN_CTRL, 0, 1) +REG32(SD0_DLL_TEST, 0x450) + FIELD(SD0_DLL_TEST, DLL_DIV, 16, 8) + FIELD(SD0_DLL_TEST, DLL_TX_SEL, 9, 7) + FIELD(SD0_DLL_TEST, DLL_RX_SEL, 0, 9) +REG32(SD0_RX_TUNING_SEL, 0x454) + FIELD(SD0_RX_TUNING_SEL, SD0_RX_SEL, 0, 9) +REG32(SD0_DLL_DIV_MAP0, 0x458) + FIELD(SD0_DLL_DIV_MAP0, DIV_3, 24, 8) + FIELD(SD0_DLL_DIV_MAP0, DIV_2, 16, 8) + FIELD(SD0_DLL_DIV_MAP0, DIV_1, 8, 8) + FIELD(SD0_DLL_DIV_MAP0, DIV_0, 0, 8) +REG32(SD0_DLL_DIV_MAP1, 0x45c) + FIELD(SD0_DLL_DIV_MAP1, DIV_7, 24, 8) + FIELD(SD0_DLL_DIV_MAP1, DIV_6, 16, 8) + FIELD(SD0_DLL_DIV_MAP1, DIV_5, 8, 8) + FIELD(SD0_DLL_DIV_MAP1, DIV_4, 0, 8) +REG32(SD0_IOU_COHERENT_CTRL, 0x460) + FIELD(SD0_IOU_COHERENT_CTRL, SD0_AXI_COH, 0, 4) +REG32(SD0_IOU_INTERCONNECT_ROUTE, 0x464) + FIELD(SD0_IOU_INTERCONNECT_ROUTE, SD0, 0, 1) +REG32(SD0_IOU_RAM, 0x468) + FIELD(SD0_IOU_RAM, EMASA0, 6, 1) + FIELD(SD0_IOU_RAM, EMAB0, 3, 3) + FIELD(SD0_IOU_RAM, EMAA0, 0, 3) +REG32(SD0_IOU_INTERCONNECT_QOS, 0x46c) + FIELD(SD0_IOU_INTERCONNECT_QOS, SD0_QOS, 0, 4) +REG32(SD1_CLK_CTRL, 0x480) + FIELD(SD1_CLK_CTRL, SDIO1_FBCLK_SEL, 1, 1) + FIELD(SD1_CLK_CTRL, SDIO1_RX_SRC_SEL, 0, 1) +REG32(SD1_CTRL_REG, 0x484) + FIELD(SD1_CTRL_REG, SD1_EMMC_SEL, 0, 1) +REG32(SD1_CONFIG_REG1, 0x490) + FIELD(SD1_CONFIG_REG1, SD1_BASECLK, 7, 8) + FIELD(SD1_CONFIG_REG1, SD1_TUNIGCOUNT, 1, 6) + FIELD(SD1_CONFIG_REG1, SD1_ASYNCWKPENA, 0, 1) +REG32(SD1_CONFIG_REG2, 0x494) + FIELD(SD1_CONFIG_REG2, SD1_SLOTTYPE, 12, 2) + FIELD(SD1_CONFIG_REG2, SD1_ASYCINTR, 11, 1) + FIELD(SD1_CONFIG_REG2, SD1_64BIT, 10, 1) + FIELD(SD1_CONFIG_REG2, SD1_1P8V, 9, 1) + FIELD(SD1_CONFIG_REG2, SD1_3P0V, 8, 1) + FIELD(SD1_CONFIG_REG2, SD1_3P3V, 7, 1) + FIELD(SD1_CONFIG_REG2, SD1_SUSPRES, 6, 1) + FIELD(SD1_CONFIG_REG2, SD1_SDMA, 5, 1) + FIELD(SD1_CONFIG_REG2, SD1_HIGHSPEED, 4, 1) + FIELD(SD1_CONFIG_REG2, SD1_ADMA2, 3, 1) + FIELD(SD1_CONFIG_REG2, SD1_8BIT, 2, 1) + FIELD(SD1_CONFIG_REG2, SD1_MAXBLK, 0, 2) +REG32(SD1_CONFIG_REG3, 0x498) + FIELD(SD1_CONFIG_REG3, SD1_TUNINGSDR50, 10, 1) + FIELD(SD1_CONFIG_REG3, SD1_RETUNETMR, 6, 4) + FIELD(SD1_CONFIG_REG3, SD1_DDRIVER, 5, 1) + FIELD(SD1_CONFIG_REG3, SD1_CDRIVER, 4, 1) + FIELD(SD1_CONFIG_REG3, SD1_ADRIVER, 3, 1) + FIELD(SD1_CONFIG_REG3, SD1_DDR50, 2, 1) + FIELD(SD1_CONFIG_REG3, SD1_SDR104, 1, 1) + FIELD(SD1_CONFIG_REG3, SD1_SDR50, 0, 1) +REG32(SD1_INITPRESET, 0x49c) + FIELD(SD1_INITPRESET, SD1_INITPRESET, 0, 13) +REG32(SD1_DSPPRESET, 0x4a0) + FIELD(SD1_DSPPRESET, SD1_DSPPRESET, 0, 13) +REG32(SD1_HSPDPRESET, 0x4a4) + FIELD(SD1_HSPDPRESET, SD1_HSPDPRESET, 0, 13) +REG32(SD1_SDR12PRESET, 0x4a8) + FIELD(SD1_SDR12PRESET, SD1_SDR12PRESET, 0, 13) +REG32(SD1_SDR25PRESET, 0x4ac) + FIELD(SD1_SDR25PRESET, SD1_SDR25PRESET, 0, 13) +REG32(SD1_SDR50PRSET, 0x4b0) + FIELD(SD1_SDR50PRSET, SD1_SDR50PRESET, 0, 13) +REG32(SD1_SDR104PRST, 0x4b4) + FIELD(SD1_SDR104PRST, SD1_SDR104PRESET, 0, 13) +REG32(SD1_DDR50PRESET, 0x4b8) + FIELD(SD1_DDR50PRESET, SD1_DDR50PRESET, 0, 13) +REG32(SD1_MAXCUR1P8, 0x4bc) + FIELD(SD1_MAXCUR1P8, SD1_MAXCUR1P8, 0, 8) +REG32(SD1_MAXCUR3P0, 0x4c0) + FIELD(SD1_MAXCUR3P0, SD1_MAXCUR3P0, 0, 8) +REG32(SD1_MAXCUR3P3, 0x4c4) + FIELD(SD1_MAXCUR3P3, SD1_MAXCUR3P3, 0, 8) +REG32(SD1_DLL_CTRL, 0x4c8) + FIELD(SD1_DLL_CTRL, SD1_CLKSTABLE_CFG, 9, 1) + FIELD(SD1_DLL_CTRL, SD1_DLL_CFG, 5, 4) + FIELD(SD1_DLL_CTRL, SD1_DLL_PSDONE, 4, 1) + FIELD(SD1_DLL_CTRL, SD1_DLL_OVF, 3, 1) + FIELD(SD1_DLL_CTRL, SD1_DLL_RST, 2, 1) + FIELD(SD1_DLL_CTRL, SD1_DLL_TESTMODE, 1, 1) + FIELD(SD1_DLL_CTRL, SD1_DLL_LOCK, 0, 1) +REG32(SD1_CDN_CTRL, 0x4cc) + FIELD(SD1_CDN_CTRL, SD1_CDN_CTRL, 0, 1) +REG32(SD1_DLL_TEST, 0x4d0) + FIELD(SD1_DLL_TEST, DLL_DIV, 16, 8) + FIELD(SD1_DLL_TEST, DLL_TX_SEL, 9, 7) + FIELD(SD1_DLL_TEST, DLL_RX_SEL, 0, 9) +REG32(SD1_RX_TUNING_SEL, 0x4d4) + FIELD(SD1_RX_TUNING_SEL, SD1_RX_SEL, 0, 9) +REG32(SD1_DLL_DIV_MAP0, 0x4d8) + FIELD(SD1_DLL_DIV_MAP0, DIV_3, 24, 8) + FIELD(SD1_DLL_DIV_MAP0, DIV_2, 16, 8) + FIELD(SD1_DLL_DIV_MAP0, DIV_1, 8, 8) + FIELD(SD1_DLL_DIV_MAP0, DIV_0, 0, 8) +REG32(SD1_DLL_DIV_MAP1, 0x4dc) + FIELD(SD1_DLL_DIV_MAP1, DIV_7, 24, 8) + FIELD(SD1_DLL_DIV_MAP1, DIV_6, 16, 8) + FIELD(SD1_DLL_DIV_MAP1, DIV_5, 8, 8) + FIELD(SD1_DLL_DIV_MAP1, DIV_4, 0, 8) +REG32(SD1_IOU_COHERENT_CTRL, 0x4e0) + FIELD(SD1_IOU_COHERENT_CTRL, SD1_AXI_COH, 0, 4) +REG32(SD1_IOU_INTERCONNECT_ROUTE, 0x4e4) + FIELD(SD1_IOU_INTERCONNECT_ROUTE, SD1, 0, 1) +REG32(SD1_IOU_RAM, 0x4e8) + FIELD(SD1_IOU_RAM, EMASA0, 6, 1) + FIELD(SD1_IOU_RAM, EMAB0, 3, 3) + FIELD(SD1_IOU_RAM, EMAA0, 0, 3) +REG32(SD1_IOU_INTERCONNECT_QOS, 0x4ec) + FIELD(SD1_IOU_INTERCONNECT_QOS, SD1_QOS, 0, 4) +REG32(OSPI_QSPI_IOU_AXI_MUX_SEL, 0x504) + FIELD(OSPI_QSPI_IOU_AXI_MUX_SEL, OSPI_MUX_SEL, 1, 1) + FIELD(OSPI_QSPI_IOU_AXI_MUX_SEL, QSPI_OSPI_MUX_SEL, 0, 1) +REG32(QSPI_IOU_COHERENT_CTRL, 0x508) + FIELD(QSPI_IOU_COHERENT_CTRL, QSPI_AXI_COH, 0, 4) +REG32(QSPI_IOU_INTERCONNECT_ROUTE, 0x50c) + FIELD(QSPI_IOU_INTERCONNECT_ROUTE, QSPI, 0, 1) +REG32(QSPI_IOU_RAM, 0x510) + FIELD(QSPI_IOU_RAM, EMASA1, 13, 1) + FIELD(QSPI_IOU_RAM, EMAB1, 10, 3) + FIELD(QSPI_IOU_RAM, EMAA1, 7, 3) + FIELD(QSPI_IOU_RAM, EMASA0, 6, 1) + FIELD(QSPI_IOU_RAM, EMAB0, 3, 3) + FIELD(QSPI_IOU_RAM, EMAA0, 0, 3) +REG32(QSPI_IOU_INTERCONNECT_QOS, 0x514) + FIELD(QSPI_IOU_INTERCONNECT_QOS, QSPI_QOS, 0, 4) +REG32(OSPI_IOU_COHERENT_CTRL, 0x530) + FIELD(OSPI_IOU_COHERENT_CTRL, OSPI_AXI_COH, 0, 4) +REG32(OSPI_IOU_INTERCONNECT_ROUTE, 0x534) + FIELD(OSPI_IOU_INTERCONNECT_ROUTE, OSPI, 0, 1) +REG32(OSPI_IOU_RAM, 0x538) + FIELD(OSPI_IOU_RAM, EMAS0, 5, 1) + FIELD(OSPI_IOU_RAM, EMAW0, 3, 2) + FIELD(OSPI_IOU_RAM, EMA0, 0, 3) +REG32(OSPI_IOU_INTERCONNECT_QOS, 0x53c) + FIELD(OSPI_IOU_INTERCONNECT_QOS, OSPI_QOS, 0, 4) +REG32(OSPI_REFCLK_DLY_CTRL, 0x540) + FIELD(OSPI_REFCLK_DLY_CTRL, DLY1, 3, 2) + FIELD(OSPI_REFCLK_DLY_CTRL, DLY0, 0, 3) +REG32(CUR_PWR_ST, 0x600) + FIELD(CUR_PWR_ST, U2PMU, 0, 2) +REG32(CONNECT_ST, 0x604) + FIELD(CONNECT_ST, U2PMU, 0, 1) +REG32(PW_STATE_REQ, 0x608) + FIELD(PW_STATE_REQ, BIT_1_0, 0, 2) +REG32(HOST_U2_PORT_DISABLE, 0x60c) + FIELD(HOST_U2_PORT_DISABLE, BIT_0, 0, 1) +REG32(DBG_U2PMU, 0x610) +REG32(DBG_U2PMU_EXT1, 0x614) +REG32(DBG_U2PMU_EXT2, 0x618) + FIELD(DBG_U2PMU_EXT2, BIT_67_64, 0, 4) +REG32(PME_GEN_U2PMU, 0x61c) + FIELD(PME_GEN_U2PMU, BIT_0, 0, 1) +REG32(PWR_CONFIG_USB2, 0x620) + FIELD(PWR_CONFIG_USB2, STRAP, 0, 30) +REG32(PHY_HUB, 0x624) + FIELD(PHY_HUB, VBUS_CTRL, 1, 1) + FIELD(PHY_HUB, OVER_CURRENT, 0, 1) +REG32(CTRL, 0x700) + FIELD(CTRL, SLVERR_ENABLE, 0, 1) +REG32(ISR, 0x800) + FIELD(ISR, ADDR_DECODE_ERR, 0, 1) +REG32(IMR, 0x804) + FIELD(IMR, ADDR_DECODE_ERR, 0, 1) +REG32(IER, 0x808) + FIELD(IER, ADDR_DECODE_ERR, 0, 1) +REG32(IDR, 0x80c) + FIELD(IDR, ADDR_DECODE_ERR, 0, 1) +REG32(ITR, 0x810) + FIELD(ITR, ADDR_DECODE_ERR, 0, 1) +REG32(PARITY_ISR, 0x814) + FIELD(PARITY_ISR, PERR_AXI_SD1_IOU, 12, 1) + FIELD(PARITY_ISR, PERR_AXI_SD0_IOU, 11, 1) + FIELD(PARITY_ISR, PERR_AXI_QSPI_IOU, 10, 1) + FIELD(PARITY_ISR, PERR_AXI_OSPI_IOU, 9, 1) + FIELD(PARITY_ISR, PERR_IOU_SD1, 8, 1) + FIELD(PARITY_ISR, PERR_IOU_SD0, 7, 1) + FIELD(PARITY_ISR, PERR_IOU_QSPI1, 6, 1) + FIELD(PARITY_ISR, PERR_IOUSLCR_SECURE_APB, 5, 1) + FIELD(PARITY_ISR, PERR_IOUSLCR_APB, 4, 1) + FIELD(PARITY_ISR, PERR_QSPI0_APB, 3, 1) + FIELD(PARITY_ISR, PERR_OSPI_APB, 2, 1) + FIELD(PARITY_ISR, PERR_I2C_APB, 1, 1) + FIELD(PARITY_ISR, PERR_GPIO_APB, 0, 1) +REG32(PARITY_IMR, 0x818) + FIELD(PARITY_IMR, PERR_AXI_SD1_IOU, 12, 1) + FIELD(PARITY_IMR, PERR_AXI_SD0_IOU, 11, 1) + FIELD(PARITY_IMR, PERR_AXI_QSPI_IOU, 10, 1) + FIELD(PARITY_IMR, PERR_AXI_OSPI_IOU, 9, 1) + FIELD(PARITY_IMR, PERR_IOU_SD1, 8, 1) + FIELD(PARITY_IMR, PERR_IOU_SD0, 7, 1) + FIELD(PARITY_IMR, PERR_IOU_QSPI1, 6, 1) + FIELD(PARITY_IMR, PERR_IOUSLCR_SECURE_APB, 5, 1) + FIELD(PARITY_IMR, PERR_IOUSLCR_APB, 4, 1) + FIELD(PARITY_IMR, PERR_QSPI0_APB, 3, 1) + FIELD(PARITY_IMR, PERR_OSPI_APB, 2, 1) + FIELD(PARITY_IMR, PERR_I2C_APB, 1, 1) + FIELD(PARITY_IMR, PERR_GPIO_APB, 0, 1) +REG32(PARITY_IER, 0x81c) + FIELD(PARITY_IER, PERR_AXI_SD1_IOU, 12, 1) + FIELD(PARITY_IER, PERR_AXI_SD0_IOU, 11, 1) + FIELD(PARITY_IER, PERR_AXI_QSPI_IOU, 10, 1) + FIELD(PARITY_IER, PERR_AXI_OSPI_IOU, 9, 1) + FIELD(PARITY_IER, PERR_IOU_SD1, 8, 1) + FIELD(PARITY_IER, PERR_IOU_SD0, 7, 1) + FIELD(PARITY_IER, PERR_IOU_QSPI1, 6, 1) + FIELD(PARITY_IER, PERR_IOUSLCR_SECURE_APB, 5, 1) + FIELD(PARITY_IER, PERR_IOUSLCR_APB, 4, 1) + FIELD(PARITY_IER, PERR_QSPI0_APB, 3, 1) + FIELD(PARITY_IER, PERR_OSPI_APB, 2, 1) + FIELD(PARITY_IER, PERR_I2C_APB, 1, 1) + FIELD(PARITY_IER, PERR_GPIO_APB, 0, 1) +REG32(PARITY_IDR, 0x820) + FIELD(PARITY_IDR, PERR_AXI_SD1_IOU, 12, 1) + FIELD(PARITY_IDR, PERR_AXI_SD0_IOU, 11, 1) + FIELD(PARITY_IDR, PERR_AXI_QSPI_IOU, 10, 1) + FIELD(PARITY_IDR, PERR_AXI_OSPI_IOU, 9, 1) + FIELD(PARITY_IDR, PERR_IOU_SD1, 8, 1) + FIELD(PARITY_IDR, PERR_IOU_SD0, 7, 1) + FIELD(PARITY_IDR, PERR_IOU_QSPI1, 6, 1) + FIELD(PARITY_IDR, PERR_IOUSLCR_SECURE_APB, 5, 1) + FIELD(PARITY_IDR, PERR_IOUSLCR_APB, 4, 1) + FIELD(PARITY_IDR, PERR_QSPI0_APB, 3, 1) + FIELD(PARITY_IDR, PERR_OSPI_APB, 2, 1) + FIELD(PARITY_IDR, PERR_I2C_APB, 1, 1) + FIELD(PARITY_IDR, PERR_GPIO_APB, 0, 1) +REG32(PARITY_ITR, 0x824) + FIELD(PARITY_ITR, PERR_AXI_SD1_IOU, 12, 1) + FIELD(PARITY_ITR, PERR_AXI_SD0_IOU, 11, 1) + FIELD(PARITY_ITR, PERR_AXI_QSPI_IOU, 10, 1) + FIELD(PARITY_ITR, PERR_AXI_OSPI_IOU, 9, 1) + FIELD(PARITY_ITR, PERR_IOU_SD1, 8, 1) + FIELD(PARITY_ITR, PERR_IOU_SD0, 7, 1) + FIELD(PARITY_ITR, PERR_IOU_QSPI1, 6, 1) + FIELD(PARITY_ITR, PERR_IOUSLCR_SECURE_APB, 5, 1) + FIELD(PARITY_ITR, PERR_IOUSLCR_APB, 4, 1) + FIELD(PARITY_ITR, PERR_QSPI0_APB, 3, 1) + FIELD(PARITY_ITR, PERR_OSPI_APB, 2, 1) + FIELD(PARITY_ITR, PERR_I2C_APB, 1, 1) + FIELD(PARITY_ITR, PERR_GPIO_APB, 0, 1) +REG32(WPROT0, 0x828) + FIELD(WPROT0, ACTIVE, 0, 1) + +static void parity_imr_update_irq(XlnxVersalPmcIouSlcr *s) +{ + bool pending = s->regs[R_PARITY_ISR] & ~s->regs[R_PARITY_IMR]; + qemu_set_irq(s->irq_parity_imr, pending); +} + +static void parity_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + parity_imr_update_irq(s); +} + +static uint64_t parity_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_PARITY_IMR] &= ~val; + parity_imr_update_irq(s); + return 0; +} + +static uint64_t parity_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_PARITY_IMR] |= val; + parity_imr_update_irq(s); + return 0; +} + +static uint64_t parity_itr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_PARITY_ISR] |= val; + parity_imr_update_irq(s); + return 0; +} + +static void imr_update_irq(XlnxVersalPmcIouSlcr *s) +{ + bool pending = s->regs[R_ISR] & ~s->regs[R_IMR]; + qemu_set_irq(s->irq_imr, pending); +} + +static void isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + imr_update_irq(s); +} + +static uint64_t ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_IMR] &= ~val; + imr_update_irq(s); + return 0; +} + +static uint64_t idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_IMR] |= val; + imr_update_irq(s); + return 0; +} + +static uint64_t itr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val = val64; + + s->regs[R_ISR] |= val; + imr_update_irq(s); + return 0; +} + +static uint64_t sd0_ctrl_reg_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t prev = ARRAY_FIELD_EX32(s->regs, SD0_CTRL_REG, SD0_EMMC_SEL); + + if (prev != (val64 & R_SD0_CTRL_REG_SD0_EMMC_SEL_MASK)) { + qemu_set_irq(s->sd_emmc_sel[0], !!val64); + } + + return val64; +} + +static uint64_t sd1_ctrl_reg_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t prev = ARRAY_FIELD_EX32(s->regs, SD1_CTRL_REG, SD1_EMMC_SEL); + + if (prev != (val64 & R_SD1_CTRL_REG_SD1_EMMC_SEL_MASK)) { + qemu_set_irq(s->sd_emmc_sel[1], !!val64); + } + + return val64; +} + +static uint64_t ospi_qspi_iou_axi_mux_sel_prew(RegisterInfo *reg, + uint64_t val64) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque); + uint32_t val32 = (uint32_t) val64; + uint8_t ospi_mux_sel = FIELD_EX32(val32, OSPI_QSPI_IOU_AXI_MUX_SEL, + OSPI_MUX_SEL); + uint8_t qspi_ospi_mux_sel = FIELD_EX32(val32, OSPI_QSPI_IOU_AXI_MUX_SEL, + QSPI_OSPI_MUX_SEL); + + if (ospi_mux_sel != + ARRAY_FIELD_EX32(s->regs, OSPI_QSPI_IOU_AXI_MUX_SEL, OSPI_MUX_SEL)) { + qemu_set_irq(s->ospi_mux_sel, !!ospi_mux_sel); + } + + if (qspi_ospi_mux_sel != + ARRAY_FIELD_EX32(s->regs, OSPI_QSPI_IOU_AXI_MUX_SEL, + QSPI_OSPI_MUX_SEL)) { + qemu_set_irq(s->qspi_ospi_mux_sel, !!qspi_ospi_mux_sel); + } + + return val64; +} + +static RegisterAccessInfo pmc_iou_slcr_regs_info[] = { + { .name = "MIO_PIN_0", .addr = A_MIO_PIN_0, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_1", .addr = A_MIO_PIN_1, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_2", .addr = A_MIO_PIN_2, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_3", .addr = A_MIO_PIN_3, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_4", .addr = A_MIO_PIN_4, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_5", .addr = A_MIO_PIN_5, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_6", .addr = A_MIO_PIN_6, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_7", .addr = A_MIO_PIN_7, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_8", .addr = A_MIO_PIN_8, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_9", .addr = A_MIO_PIN_9, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_10", .addr = A_MIO_PIN_10, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_11", .addr = A_MIO_PIN_11, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_12", .addr = A_MIO_PIN_12, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_13", .addr = A_MIO_PIN_13, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_14", .addr = A_MIO_PIN_14, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_15", .addr = A_MIO_PIN_15, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_16", .addr = A_MIO_PIN_16, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_17", .addr = A_MIO_PIN_17, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_18", .addr = A_MIO_PIN_18, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_19", .addr = A_MIO_PIN_19, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_20", .addr = A_MIO_PIN_20, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_21", .addr = A_MIO_PIN_21, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_22", .addr = A_MIO_PIN_22, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_23", .addr = A_MIO_PIN_23, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_24", .addr = A_MIO_PIN_24, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_25", .addr = A_MIO_PIN_25, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_26", .addr = A_MIO_PIN_26, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_27", .addr = A_MIO_PIN_27, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_28", .addr = A_MIO_PIN_28, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_29", .addr = A_MIO_PIN_29, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_30", .addr = A_MIO_PIN_30, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_31", .addr = A_MIO_PIN_31, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_32", .addr = A_MIO_PIN_32, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_33", .addr = A_MIO_PIN_33, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_34", .addr = A_MIO_PIN_34, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_35", .addr = A_MIO_PIN_35, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_36", .addr = A_MIO_PIN_36, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_37", .addr = A_MIO_PIN_37, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_38", .addr = A_MIO_PIN_38, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_39", .addr = A_MIO_PIN_39, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_40", .addr = A_MIO_PIN_40, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_41", .addr = A_MIO_PIN_41, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_42", .addr = A_MIO_PIN_42, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_43", .addr = A_MIO_PIN_43, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_44", .addr = A_MIO_PIN_44, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_45", .addr = A_MIO_PIN_45, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_46", .addr = A_MIO_PIN_46, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_47", .addr = A_MIO_PIN_47, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_48", .addr = A_MIO_PIN_48, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_49", .addr = A_MIO_PIN_49, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_50", .addr = A_MIO_PIN_50, + .rsvd = 0xfffffc01, + },{ .name = "MIO_PIN_51", .addr = A_MIO_PIN_51, + .rsvd = 0xfffffc01, + },{ .name = "BNK0_EN_RX", .addr = A_BNK0_EN_RX, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "BNK0_SEL_RX0", .addr = A_BNK0_SEL_RX0, + .reset = 0xffffffff, + },{ .name = "BNK0_SEL_RX1", .addr = A_BNK0_SEL_RX1, + .reset = 0xfffff, + .rsvd = 0xfff00000, + },{ .name = "BNK0_EN_RX_SCHMITT_HYST", .addr = A_BNK0_EN_RX_SCHMITT_HYST, + .rsvd = 0xfc000000, + },{ .name = "BNK0_EN_WK_PD", .addr = A_BNK0_EN_WK_PD, + .rsvd = 0xfc000000, + },{ .name = "BNK0_EN_WK_PU", .addr = A_BNK0_EN_WK_PU, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "BNK0_SEL_DRV0", .addr = A_BNK0_SEL_DRV0, + .reset = 0xffffffff, + },{ .name = "BNK0_SEL_DRV1", .addr = A_BNK0_SEL_DRV1, + .reset = 0xfffff, + .rsvd = 0xfff00000, + },{ .name = "BNK0_SEL_SLEW", .addr = A_BNK0_SEL_SLEW, + .rsvd = 0xfc000000, + },{ .name = "BNK0_EN_DFT_OPT_INV", .addr = A_BNK0_EN_DFT_OPT_INV, + .rsvd = 0xfc000000, + },{ .name = "BNK0_EN_PAD2PAD_LOOPBACK", + .addr = A_BNK0_EN_PAD2PAD_LOOPBACK, + .rsvd = 0xffffe000, + },{ .name = "BNK0_RX_SPARE0", .addr = A_BNK0_RX_SPARE0, + },{ .name = "BNK0_RX_SPARE1", .addr = A_BNK0_RX_SPARE1, + .rsvd = 0xfff00000, + },{ .name = "BNK0_TX_SPARE0", .addr = A_BNK0_TX_SPARE0, + },{ .name = "BNK0_TX_SPARE1", .addr = A_BNK0_TX_SPARE1, + .rsvd = 0xfff00000, + },{ .name = "BNK0_SEL_EN1P8", .addr = A_BNK0_SEL_EN1P8, + .rsvd = 0xfffffffe, + },{ .name = "BNK0_EN_B_POR_DETECT", .addr = A_BNK0_EN_B_POR_DETECT, + .rsvd = 0xfffffffe, + },{ .name = "BNK0_LPF_BYP_POR_DETECT", .addr = A_BNK0_LPF_BYP_POR_DETECT, + .reset = 0x1, + .rsvd = 0xfffffffe, + },{ .name = "BNK0_EN_LATCH", .addr = A_BNK0_EN_LATCH, + .rsvd = 0xfffffffe, + },{ .name = "BNK0_VBG_LPF_BYP_B", .addr = A_BNK0_VBG_LPF_BYP_B, + .reset = 0x1, + .rsvd = 0xfffffffe, + },{ .name = "BNK0_EN_AMP_B", .addr = A_BNK0_EN_AMP_B, + .rsvd = 0xfffffffc, + },{ .name = "BNK0_SPARE_BIAS", .addr = A_BNK0_SPARE_BIAS, + .rsvd = 0xfffffff0, + },{ .name = "BNK0_DRIVER_BIAS", .addr = A_BNK0_DRIVER_BIAS, + .rsvd = 0xffff8000, + },{ .name = "BNK0_VMODE", .addr = A_BNK0_VMODE, + .rsvd = 0xfffffffe, + .ro = 0x1, + },{ .name = "BNK0_SEL_AUX_IO_RX", .addr = A_BNK0_SEL_AUX_IO_RX, + .rsvd = 0xfc000000, + },{ .name = "BNK0_EN_TX_HS_MODE", .addr = A_BNK0_EN_TX_HS_MODE, + .rsvd = 0xfc000000, + },{ .name = "MIO_MST_TRI0", .addr = A_MIO_MST_TRI0, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "MIO_MST_TRI1", .addr = A_MIO_MST_TRI1, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_RX", .addr = A_BNK1_EN_RX, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "BNK1_SEL_RX0", .addr = A_BNK1_SEL_RX0, + .reset = 0xffffffff, + },{ .name = "BNK1_SEL_RX1", .addr = A_BNK1_SEL_RX1, + .reset = 0xfffff, + .rsvd = 0xfff00000, + },{ .name = "BNK1_EN_RX_SCHMITT_HYST", .addr = A_BNK1_EN_RX_SCHMITT_HYST, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_WK_PD", .addr = A_BNK1_EN_WK_PD, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_WK_PU", .addr = A_BNK1_EN_WK_PU, + .reset = 0x3ffffff, + .rsvd = 0xfc000000, + },{ .name = "BNK1_SEL_DRV0", .addr = A_BNK1_SEL_DRV0, + .reset = 0xffffffff, + },{ .name = "BNK1_SEL_DRV1", .addr = A_BNK1_SEL_DRV1, + .reset = 0xfffff, + .rsvd = 0xfff00000, + },{ .name = "BNK1_SEL_SLEW", .addr = A_BNK1_SEL_SLEW, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_DFT_OPT_INV", .addr = A_BNK1_EN_DFT_OPT_INV, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_PAD2PAD_LOOPBACK", + .addr = A_BNK1_EN_PAD2PAD_LOOPBACK, + .rsvd = 0xffffe000, + },{ .name = "BNK1_RX_SPARE0", .addr = A_BNK1_RX_SPARE0, + },{ .name = "BNK1_RX_SPARE1", .addr = A_BNK1_RX_SPARE1, + .rsvd = 0xfff00000, + },{ .name = "BNK1_TX_SPARE0", .addr = A_BNK1_TX_SPARE0, + },{ .name = "BNK1_TX_SPARE1", .addr = A_BNK1_TX_SPARE1, + .rsvd = 0xfff00000, + },{ .name = "BNK1_SEL_EN1P8", .addr = A_BNK1_SEL_EN1P8, + .rsvd = 0xfffffffe, + },{ .name = "BNK1_EN_B_POR_DETECT", .addr = A_BNK1_EN_B_POR_DETECT, + .rsvd = 0xfffffffe, + },{ .name = "BNK1_LPF_BYP_POR_DETECT", .addr = A_BNK1_LPF_BYP_POR_DETECT, + .reset = 0x1, + .rsvd = 0xfffffffe, + },{ .name = "BNK1_EN_LATCH", .addr = A_BNK1_EN_LATCH, + .rsvd = 0xfffffffe, + },{ .name = "BNK1_VBG_LPF_BYP_B", .addr = A_BNK1_VBG_LPF_BYP_B, + .reset = 0x1, + .rsvd = 0xfffffffe, + },{ .name = "BNK1_EN_AMP_B", .addr = A_BNK1_EN_AMP_B, + .rsvd = 0xfffffffc, + },{ .name = "BNK1_SPARE_BIAS", .addr = A_BNK1_SPARE_BIAS, + .rsvd = 0xfffffff0, + },{ .name = "BNK1_DRIVER_BIAS", .addr = A_BNK1_DRIVER_BIAS, + .rsvd = 0xffff8000, + },{ .name = "BNK1_VMODE", .addr = A_BNK1_VMODE, + .rsvd = 0xfffffffe, + .ro = 0x1, + },{ .name = "BNK1_SEL_AUX_IO_RX", .addr = A_BNK1_SEL_AUX_IO_RX, + .rsvd = 0xfc000000, + },{ .name = "BNK1_EN_TX_HS_MODE", .addr = A_BNK1_EN_TX_HS_MODE, + .rsvd = 0xfc000000, + },{ .name = "SD0_CLK_CTRL", .addr = A_SD0_CLK_CTRL, + .rsvd = 0xfffffff8, + },{ .name = "SD0_CTRL_REG", .addr = A_SD0_CTRL_REG, + .rsvd = 0xfffffffe, + .pre_write = sd0_ctrl_reg_prew, + },{ .name = "SD0_CONFIG_REG1", .addr = A_SD0_CONFIG_REG1, + .reset = 0x3250, + .rsvd = 0xffff8000, + },{ .name = "SD0_CONFIG_REG2", .addr = A_SD0_CONFIG_REG2, + .reset = 0xffc, + .rsvd = 0xffffc000, + },{ .name = "SD0_CONFIG_REG3", .addr = A_SD0_CONFIG_REG3, + .reset = 0x407, + .rsvd = 0xfffff800, + },{ .name = "SD0_INITPRESET", .addr = A_SD0_INITPRESET, + .reset = 0x100, + .rsvd = 0xffffe000, + },{ .name = "SD0_DSPPRESET", .addr = A_SD0_DSPPRESET, + .reset = 0x4, + .rsvd = 0xffffe000, + },{ .name = "SD0_HSPDPRESET", .addr = A_SD0_HSPDPRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD0_SDR12PRESET", .addr = A_SD0_SDR12PRESET, + .reset = 0x4, + .rsvd = 0xffffe000, + },{ .name = "SD0_SDR25PRESET", .addr = A_SD0_SDR25PRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD0_SDR50PRSET", .addr = A_SD0_SDR50PRSET, + .reset = 0x1, + .rsvd = 0xffffe000, + },{ .name = "SD0_SDR104PRST", .addr = A_SD0_SDR104PRST, + .rsvd = 0xffffe000, + },{ .name = "SD0_DDR50PRESET", .addr = A_SD0_DDR50PRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD0_MAXCUR1P8", .addr = A_SD0_MAXCUR1P8, + .rsvd = 0xffffff00, + },{ .name = "SD0_MAXCUR3P0", .addr = A_SD0_MAXCUR3P0, + .rsvd = 0xffffff00, + },{ .name = "SD0_MAXCUR3P3", .addr = A_SD0_MAXCUR3P3, + .rsvd = 0xffffff00, + },{ .name = "SD0_DLL_CTRL", .addr = A_SD0_DLL_CTRL, + .reset = 0x1, + .rsvd = 0xfffffc00, + .ro = 0x19, + },{ .name = "SD0_CDN_CTRL", .addr = A_SD0_CDN_CTRL, + .rsvd = 0xfffffffe, + },{ .name = "SD0_DLL_TEST", .addr = A_SD0_DLL_TEST, + .rsvd = 0xff000000, + },{ .name = "SD0_RX_TUNING_SEL", .addr = A_SD0_RX_TUNING_SEL, + .rsvd = 0xfffffe00, + .ro = 0x1ff, + },{ .name = "SD0_DLL_DIV_MAP0", .addr = A_SD0_DLL_DIV_MAP0, + .reset = 0x50505050, + },{ .name = "SD0_DLL_DIV_MAP1", .addr = A_SD0_DLL_DIV_MAP1, + .reset = 0x50505050, + },{ .name = "SD0_IOU_COHERENT_CTRL", .addr = A_SD0_IOU_COHERENT_CTRL, + .rsvd = 0xfffffff0, + },{ .name = "SD0_IOU_INTERCONNECT_ROUTE", + .addr = A_SD0_IOU_INTERCONNECT_ROUTE, + .rsvd = 0xfffffffe, + },{ .name = "SD0_IOU_RAM", .addr = A_SD0_IOU_RAM, + .reset = 0x24, + .rsvd = 0xffffff80, + },{ .name = "SD0_IOU_INTERCONNECT_QOS", + .addr = A_SD0_IOU_INTERCONNECT_QOS, + .rsvd = 0xfffffff0, + },{ .name = "SD1_CLK_CTRL", .addr = A_SD1_CLK_CTRL, + .rsvd = 0xfffffffc, + },{ .name = "SD1_CTRL_REG", .addr = A_SD1_CTRL_REG, + .rsvd = 0xfffffffe, + .pre_write = sd1_ctrl_reg_prew, + },{ .name = "SD1_CONFIG_REG1", .addr = A_SD1_CONFIG_REG1, + .reset = 0x3250, + .rsvd = 0xffff8000, + },{ .name = "SD1_CONFIG_REG2", .addr = A_SD1_CONFIG_REG2, + .reset = 0xffc, + .rsvd = 0xffffc000, + },{ .name = "SD1_CONFIG_REG3", .addr = A_SD1_CONFIG_REG3, + .reset = 0x407, + .rsvd = 0xfffff800, + },{ .name = "SD1_INITPRESET", .addr = A_SD1_INITPRESET, + .reset = 0x100, + .rsvd = 0xffffe000, + },{ .name = "SD1_DSPPRESET", .addr = A_SD1_DSPPRESET, + .reset = 0x4, + .rsvd = 0xffffe000, + },{ .name = "SD1_HSPDPRESET", .addr = A_SD1_HSPDPRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD1_SDR12PRESET", .addr = A_SD1_SDR12PRESET, + .reset = 0x4, + .rsvd = 0xffffe000, + },{ .name = "SD1_SDR25PRESET", .addr = A_SD1_SDR25PRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD1_SDR50PRSET", .addr = A_SD1_SDR50PRSET, + .reset = 0x1, + .rsvd = 0xffffe000, + },{ .name = "SD1_SDR104PRST", .addr = A_SD1_SDR104PRST, + .rsvd = 0xffffe000, + },{ .name = "SD1_DDR50PRESET", .addr = A_SD1_DDR50PRESET, + .reset = 0x2, + .rsvd = 0xffffe000, + },{ .name = "SD1_MAXCUR1P8", .addr = A_SD1_MAXCUR1P8, + .rsvd = 0xffffff00, + },{ .name = "SD1_MAXCUR3P0", .addr = A_SD1_MAXCUR3P0, + .rsvd = 0xffffff00, + },{ .name = "SD1_MAXCUR3P3", .addr = A_SD1_MAXCUR3P3, + .rsvd = 0xffffff00, + },{ .name = "SD1_DLL_CTRL", .addr = A_SD1_DLL_CTRL, + .reset = 0x1, + .rsvd = 0xfffffc00, + .ro = 0x19, + },{ .name = "SD1_CDN_CTRL", .addr = A_SD1_CDN_CTRL, + .rsvd = 0xfffffffe, + },{ .name = "SD1_DLL_TEST", .addr = A_SD1_DLL_TEST, + .rsvd = 0xff000000, + },{ .name = "SD1_RX_TUNING_SEL", .addr = A_SD1_RX_TUNING_SEL, + .rsvd = 0xfffffe00, + .ro = 0x1ff, + },{ .name = "SD1_DLL_DIV_MAP0", .addr = A_SD1_DLL_DIV_MAP0, + .reset = 0x50505050, + },{ .name = "SD1_DLL_DIV_MAP1", .addr = A_SD1_DLL_DIV_MAP1, + .reset = 0x50505050, + },{ .name = "SD1_IOU_COHERENT_CTRL", .addr = A_SD1_IOU_COHERENT_CTRL, + .rsvd = 0xfffffff0, + },{ .name = "SD1_IOU_INTERCONNECT_ROUTE", + .addr = A_SD1_IOU_INTERCONNECT_ROUTE, + .rsvd = 0xfffffffe, + },{ .name = "SD1_IOU_RAM", .addr = A_SD1_IOU_RAM, + .reset = 0x24, + .rsvd = 0xffffff80, + },{ .name = "SD1_IOU_INTERCONNECT_QOS", + .addr = A_SD1_IOU_INTERCONNECT_QOS, + .rsvd = 0xfffffff0, + },{ .name = "OSPI_QSPI_IOU_AXI_MUX_SEL", + .addr = A_OSPI_QSPI_IOU_AXI_MUX_SEL, + .reset = 0x1, + .rsvd = 0xfffffffc, + .pre_write = ospi_qspi_iou_axi_mux_sel_prew, + },{ .name = "QSPI_IOU_COHERENT_CTRL", .addr = A_QSPI_IOU_COHERENT_CTRL, + .rsvd = 0xfffffff0, + },{ .name = "QSPI_IOU_INTERCONNECT_ROUTE", + .addr = A_QSPI_IOU_INTERCONNECT_ROUTE, + .rsvd = 0xfffffffe, + },{ .name = "QSPI_IOU_RAM", .addr = A_QSPI_IOU_RAM, + .reset = 0x1224, + .rsvd = 0xffffc000, + },{ .name = "QSPI_IOU_INTERCONNECT_QOS", + .addr = A_QSPI_IOU_INTERCONNECT_QOS, + .rsvd = 0xfffffff0, + },{ .name = "OSPI_IOU_COHERENT_CTRL", .addr = A_OSPI_IOU_COHERENT_CTRL, + .rsvd = 0xfffffff0, + },{ .name = "OSPI_IOU_INTERCONNECT_ROUTE", + .addr = A_OSPI_IOU_INTERCONNECT_ROUTE, + .rsvd = 0xfffffffe, + },{ .name = "OSPI_IOU_RAM", .addr = A_OSPI_IOU_RAM, + .reset = 0xa, + .rsvd = 0xffffffc0, + },{ .name = "OSPI_IOU_INTERCONNECT_QOS", + .addr = A_OSPI_IOU_INTERCONNECT_QOS, + .rsvd = 0xfffffff0, + },{ .name = "OSPI_REFCLK_DLY_CTRL", .addr = A_OSPI_REFCLK_DLY_CTRL, + .reset = 0x13, + .rsvd = 0xffffffe0, + },{ .name = "CUR_PWR_ST", .addr = A_CUR_PWR_ST, + .rsvd = 0xfffffffc, + .ro = 0x3, + },{ .name = "CONNECT_ST", .addr = A_CONNECT_ST, + .rsvd = 0xfffffffe, + .ro = 0x1, + },{ .name = "PW_STATE_REQ", .addr = A_PW_STATE_REQ, + .rsvd = 0xfffffffc, + },{ .name = "HOST_U2_PORT_DISABLE", .addr = A_HOST_U2_PORT_DISABLE, + .rsvd = 0xfffffffe, + },{ .name = "DBG_U2PMU", .addr = A_DBG_U2PMU, + .ro = 0xffffffff, + },{ .name = "DBG_U2PMU_EXT1", .addr = A_DBG_U2PMU_EXT1, + .ro = 0xffffffff, + },{ .name = "DBG_U2PMU_EXT2", .addr = A_DBG_U2PMU_EXT2, + .rsvd = 0xfffffff0, + .ro = 0xf, + },{ .name = "PME_GEN_U2PMU", .addr = A_PME_GEN_U2PMU, + .rsvd = 0xfffffffe, + .ro = 0x1, + },{ .name = "PWR_CONFIG_USB2", .addr = A_PWR_CONFIG_USB2, + .rsvd = 0xc0000000, + },{ .name = "PHY_HUB", .addr = A_PHY_HUB, + .rsvd = 0xfffffffc, + .ro = 0x2, + },{ .name = "CTRL", .addr = A_CTRL, + },{ .name = "ISR", .addr = A_ISR, + .w1c = 0x1, + .post_write = isr_postw, + },{ .name = "IMR", .addr = A_IMR, + .reset = 0x1, + .ro = 0x1, + },{ .name = "IER", .addr = A_IER, + .pre_write = ier_prew, + },{ .name = "IDR", .addr = A_IDR, + .pre_write = idr_prew, + },{ .name = "ITR", .addr = A_ITR, + .pre_write = itr_prew, + },{ .name = "PARITY_ISR", .addr = A_PARITY_ISR, + .w1c = 0x1fff, + .post_write = parity_isr_postw, + },{ .name = "PARITY_IMR", .addr = A_PARITY_IMR, + .reset = 0x1fff, + .ro = 0x1fff, + },{ .name = "PARITY_IER", .addr = A_PARITY_IER, + .pre_write = parity_ier_prew, + },{ .name = "PARITY_IDR", .addr = A_PARITY_IDR, + .pre_write = parity_idr_prew, + },{ .name = "PARITY_ITR", .addr = A_PARITY_ITR, + .pre_write = parity_itr_prew, + },{ .name = "WPROT0", .addr = A_WPROT0, + .reset = 0x1, + } +}; + +static void xlnx_versal_pmc_iou_slcr_reset_init(Object *obj, ResetType type) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void xlnx_versal_pmc_iou_slcr_reset_hold(Object *obj) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj); + + parity_imr_update_irq(s); + imr_update_irq(s); + + /* + * Setup OSPI_QSPI mux + * By default axi slave interface is enabled for ospi-dma + */ + qemu_set_irq(s->ospi_mux_sel, 0); + qemu_set_irq(s->qspi_ospi_mux_sel, 1); +} + +static const MemoryRegionOps pmc_iou_slcr_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void xlnx_versal_pmc_iou_slcr_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(dev); + + qdev_init_gpio_out_named(dev, s->sd_emmc_sel, "sd-emmc-sel", 2); + qdev_init_gpio_out_named(dev, &s->qspi_ospi_mux_sel, + "qspi-ospi-mux-sel", 1); + qdev_init_gpio_out_named(dev, &s->ospi_mux_sel, "ospi-mux-sel", 1); +} + +static void xlnx_versal_pmc_iou_slcr_init(Object *obj) +{ + XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_XILINX_VERSAL_PMC_IOU_SLCR, + XILINX_VERSAL_PMC_IOU_SLCR_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), pmc_iou_slcr_regs_info, + ARRAY_SIZE(pmc_iou_slcr_regs_info), + s->regs_info, s->regs, + &pmc_iou_slcr_ops, + XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG, + XILINX_VERSAL_PMC_IOU_SLCR_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq_parity_imr); + sysbus_init_irq(sbd, &s->irq_imr); +} + +static const VMStateDescription vmstate_pmc_iou_slcr = { + .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalPmcIouSlcr, + XILINX_VERSAL_PMC_IOU_SLCR_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = xlnx_versal_pmc_iou_slcr_realize; + dc->vmsd = &vmstate_pmc_iou_slcr; + rc->phases.enter = xlnx_versal_pmc_iou_slcr_reset_init; + rc->phases.hold = xlnx_versal_pmc_iou_slcr_reset_hold; +} + +static const TypeInfo xlnx_versal_pmc_iou_slcr_info = { + .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalPmcIouSlcr), + .class_init = xlnx_versal_pmc_iou_slcr_class_init, + .instance_init = xlnx_versal_pmc_iou_slcr_init, +}; + +static void xlnx_versal_pmc_iou_slcr_register_types(void) +{ + type_register_static(&xlnx_versal_pmc_iou_slcr_info); +} + +type_init(xlnx_versal_pmc_iou_slcr_register_types) diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c new file mode 100644 index 0000000000..20de23cf67 --- /dev/null +++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c @@ -0,0 +1,253 @@ +/* + * QEMU model of the ZynqMP APU Control. + * + * Copyright (c) 2013-2022 Xilinx Inc + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Written by Peter Crosthwaite and + * Edgar E. Iglesias + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/register.h" + +#include "qemu/bitops.h" +#include "qapi/qmp/qerror.h" + +#include "hw/misc/xlnx-zynqmp-apu-ctrl.h" + +#ifndef XILINX_ZYNQMP_APU_ERR_DEBUG +#define XILINX_ZYNQMP_APU_ERR_DEBUG 0 +#endif + +static void update_wfi_out(void *opaque) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(opaque); + unsigned int i, wfi_pending; + + wfi_pending = s->cpu_pwrdwn_req & s->cpu_in_wfi; + for (i = 0; i < APU_MAX_CPU; i++) { + qemu_set_irq(s->wfi_out[i], !!(wfi_pending & (1 << i))); + } +} + +static void zynqmp_apu_rvbar_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(reg->opaque); + int i; + + for (i = 0; i < APU_MAX_CPU; ++i) { + uint64_t rvbar = s->regs[R_RVBARADDR0L + 2 * i] + + ((uint64_t)s->regs[R_RVBARADDR0H + 2 * i] << 32); + if (s->cpus[i]) { + object_property_set_int(OBJECT(s->cpus[i]), "rvbar", rvbar, + &error_abort); + } + } +} + +static void zynqmp_apu_pwrctl_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(reg->opaque); + unsigned int i, new; + + for (i = 0; i < APU_MAX_CPU; i++) { + new = val & (1 << i); + /* Check if CPU's CPUPWRDNREQ has changed. If yes, update GPIOs. */ + if (new != (s->cpu_pwrdwn_req & (1 << i))) { + qemu_set_irq(s->cpu_power_status[i], !!new); + } + s->cpu_pwrdwn_req &= ~(1 << i); + s->cpu_pwrdwn_req |= new; + } + update_wfi_out(s); +} + +static void imr_update_irq(XlnxZynqMPAPUCtrl *s) +{ + bool pending = s->regs[R_ISR] & ~s->regs[R_IMR]; + qemu_set_irq(s->irq_imr, pending); +} + +static void isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(reg->opaque); + imr_update_irq(s); +} + +static uint64_t ien_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_IMR] &= ~val; + imr_update_irq(s); + return 0; +} + +static uint64_t ids_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_IMR] |= val; + imr_update_irq(s); + return 0; +} + +static const RegisterAccessInfo zynqmp_apu_regs_info[] = { +#define RVBAR_REGDEF(n) \ + { .name = "RVBAR CPU " #n " Low", .addr = A_RVBARADDR ## n ## L, \ + .reset = 0xffff0000ul, \ + .post_write = zynqmp_apu_rvbar_post_write, \ + },{ .name = "RVBAR CPU " #n " High", .addr = A_RVBARADDR ## n ## H, \ + .post_write = zynqmp_apu_rvbar_post_write, \ + } + { .name = "ERR_CTRL", .addr = A_APU_ERR_CTRL, + },{ .name = "ISR", .addr = A_ISR, + .w1c = 0x1, + .post_write = isr_postw, + },{ .name = "IMR", .addr = A_IMR, + .reset = 0x1, + .ro = 0x1, + },{ .name = "IEN", .addr = A_IEN, + .pre_write = ien_prew, + },{ .name = "IDS", .addr = A_IDS, + .pre_write = ids_prew, + },{ .name = "CONFIG_0", .addr = A_CONFIG_0, + .reset = 0xf0f, + },{ .name = "CONFIG_1", .addr = A_CONFIG_1, + }, + RVBAR_REGDEF(0), + RVBAR_REGDEF(1), + RVBAR_REGDEF(2), + RVBAR_REGDEF(3), + { .name = "ACE_CTRL", .addr = A_ACE_CTRL, + .reset = 0xf000f, + },{ .name = "SNOOP_CTRL", .addr = A_SNOOP_CTRL, + },{ .name = "PWRCTL", .addr = A_PWRCTL, + .post_write = zynqmp_apu_pwrctl_post_write, + },{ .name = "PWRSTAT", .addr = A_PWRSTAT, + .ro = 0x3000f, + } +}; + +static void zynqmp_apu_reset_enter(Object *obj, ResetType type) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); + int i; + + for (i = 0; i < APU_R_MAX; ++i) { + register_reset(&s->regs_info[i]); + } + + s->cpu_pwrdwn_req = 0; + s->cpu_in_wfi = 0; +} + +static void zynqmp_apu_reset_hold(Object *obj) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); + + update_wfi_out(s); + imr_update_irq(s); +} + +static const MemoryRegionOps zynqmp_apu_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static void zynqmp_apu_handle_wfi(void *opaque, int irq, int level) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(opaque); + + s->cpu_in_wfi = deposit32(s->cpu_in_wfi, irq, 1, level); + update_wfi_out(s); +} + +static void zynqmp_apu_init(Object *obj) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); + int i; + + s->reg_array = + register_init_block32(DEVICE(obj), zynqmp_apu_regs_info, + ARRAY_SIZE(zynqmp_apu_regs_info), + s->regs_info, s->regs, + &zynqmp_apu_ops, + XILINX_ZYNQMP_APU_ERR_DEBUG, + APU_R_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->reg_array->mem); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq_imr); + + for (i = 0; i < APU_MAX_CPU; ++i) { + g_autofree gchar *prop_name = g_strdup_printf("cpu%d", i); + object_property_add_link(obj, prop_name, TYPE_ARM_CPU, + (Object **)&s->cpus[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + /* wfi_out is used to connect to PMU GPIs. */ + qdev_init_gpio_out_named(DEVICE(obj), s->wfi_out, "wfi_out", 4); + /* CPU_POWER_STATUS is used to connect to INTC redirect. */ + qdev_init_gpio_out_named(DEVICE(obj), s->cpu_power_status, + "CPU_POWER_STATUS", 4); + /* wfi_in is used as input from CPUs as wfi request. */ + qdev_init_gpio_in_named(DEVICE(obj), zynqmp_apu_handle_wfi, "wfi_in", 4); +} + +static void zynqmp_apu_finalize(Object *obj) +{ + XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); + register_finalize_block(s->reg_array); +} + +static const VMStateDescription vmstate_zynqmp_apu = { + .name = TYPE_XLNX_ZYNQMP_APU_CTRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPAPUCtrl, APU_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void zynqmp_apu_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_zynqmp_apu; + + rc->phases.enter = zynqmp_apu_reset_enter; + rc->phases.hold = zynqmp_apu_reset_hold; +} + +static const TypeInfo zynqmp_apu_info = { + .name = TYPE_XLNX_ZYNQMP_APU_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPAPUCtrl), + .class_init = zynqmp_apu_class_init, + .instance_init = zynqmp_apu_init, + .instance_finalize = zynqmp_apu_finalize, +}; + +static void zynqmp_apu_register_types(void) +{ + type_register_static(&zynqmp_apu_info); +} + +type_init(zynqmp_apu_register_types) diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c new file mode 100644 index 0000000000..57bc8cf49a --- /dev/null +++ b/hw/misc/xlnx-zynqmp-crf.c @@ -0,0 +1,266 @@ +/* + * QEMU model of the CRF - Clock Reset FPD. + * + * Copyright (c) 2022 Xilinx Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * Written by Edgar E. Iglesias + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/misc/xlnx-zynqmp-crf.h" +#include "target/arm/arm-powerctl.h" + +#ifndef XLNX_ZYNQMP_CRF_ERR_DEBUG +#define XLNX_ZYNQMP_CRF_ERR_DEBUG 0 +#endif + +#define CRF_MAX_CPU 4 + +static void ir_update_irq(XlnxZynqMPCRF *s) +{ + bool pending = s->regs[R_IR_STATUS] & ~s->regs[R_IR_MASK]; + qemu_set_irq(s->irq_ir, pending); +} + +static void ir_status_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(reg->opaque); + ir_update_irq(s); +} + +static uint64_t ir_enable_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] &= ~val; + ir_update_irq(s); + return 0; +} + +static uint64_t ir_disable_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] |= val; + ir_update_irq(s); + return 0; +} + +static uint64_t rst_fpd_apu_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(reg->opaque); + uint32_t val = val64; + uint32_t val_old = s->regs[R_RST_FPD_APU]; + unsigned int i; + + for (i = 0; i < CRF_MAX_CPU; i++) { + uint32_t mask = (1 << (R_RST_FPD_APU_ACPU0_RESET_SHIFT + i)); + + if ((val ^ val_old) & mask) { + if (val & mask) { + arm_set_cpu_off(i); + } else { + arm_set_cpu_on_and_reset(i); + } + } + } + return val64; +} + +static const RegisterAccessInfo crf_regs_info[] = { + { .name = "ERR_CTRL", .addr = A_ERR_CTRL, + },{ .name = "IR_STATUS", .addr = A_IR_STATUS, + .w1c = 0x1, + .post_write = ir_status_postw, + },{ .name = "IR_MASK", .addr = A_IR_MASK, + .reset = 0x1, + .ro = 0x1, + },{ .name = "IR_ENABLE", .addr = A_IR_ENABLE, + .pre_write = ir_enable_prew, + },{ .name = "IR_DISABLE", .addr = A_IR_DISABLE, + .pre_write = ir_disable_prew, + },{ .name = "CRF_WPROT", .addr = A_CRF_WPROT, + },{ .name = "APLL_CTRL", .addr = A_APLL_CTRL, + .reset = 0x12c09, + .rsvd = 0xf88c80f6, + },{ .name = "APLL_CFG", .addr = A_APLL_CFG, + .rsvd = 0x1801210, + },{ .name = "APLL_FRAC_CFG", .addr = A_APLL_FRAC_CFG, + .rsvd = 0x7e330000, + },{ .name = "DPLL_CTRL", .addr = A_DPLL_CTRL, + .reset = 0x2c09, + .rsvd = 0xf88c80f6, + },{ .name = "DPLL_CFG", .addr = A_DPLL_CFG, + .rsvd = 0x1801210, + },{ .name = "DPLL_FRAC_CFG", .addr = A_DPLL_FRAC_CFG, + .rsvd = 0x7e330000, + },{ .name = "VPLL_CTRL", .addr = A_VPLL_CTRL, + .reset = 0x12809, + .rsvd = 0xf88c80f6, + },{ .name = "VPLL_CFG", .addr = A_VPLL_CFG, + .rsvd = 0x1801210, + },{ .name = "VPLL_FRAC_CFG", .addr = A_VPLL_FRAC_CFG, + .rsvd = 0x7e330000, + },{ .name = "PLL_STATUS", .addr = A_PLL_STATUS, + .reset = 0x3f, + .rsvd = 0xc0, + .ro = 0x3f, + },{ .name = "APLL_TO_LPD_CTRL", .addr = A_APLL_TO_LPD_CTRL, + .reset = 0x400, + .rsvd = 0xc0ff, + },{ .name = "DPLL_TO_LPD_CTRL", .addr = A_DPLL_TO_LPD_CTRL, + .reset = 0x400, + .rsvd = 0xc0ff, + },{ .name = "VPLL_TO_LPD_CTRL", .addr = A_VPLL_TO_LPD_CTRL, + .reset = 0x400, + .rsvd = 0xc0ff, + },{ .name = "ACPU_CTRL", .addr = A_ACPU_CTRL, + .reset = 0x3000400, + .rsvd = 0xfcffc0f8, + },{ .name = "DBG_TRACE_CTRL", .addr = A_DBG_TRACE_CTRL, + .reset = 0x2500, + .rsvd = 0xfeffc0f8, + },{ .name = "DBG_FPD_CTRL", .addr = A_DBG_FPD_CTRL, + .reset = 0x1002500, + .rsvd = 0xfeffc0f8, + },{ .name = "DP_VIDEO_REF_CTRL", .addr = A_DP_VIDEO_REF_CTRL, + .reset = 0x1002300, + .rsvd = 0xfec0c0f8, + },{ .name = "DP_AUDIO_REF_CTRL", .addr = A_DP_AUDIO_REF_CTRL, + .reset = 0x1032300, + .rsvd = 0xfec0c0f8, + },{ .name = "DP_STC_REF_CTRL", .addr = A_DP_STC_REF_CTRL, + .reset = 0x1203200, + .rsvd = 0xfec0c0f8, + },{ .name = "DDR_CTRL", .addr = A_DDR_CTRL, + .reset = 0x1000500, + .rsvd = 0xfeffc0f8, + },{ .name = "GPU_REF_CTRL", .addr = A_GPU_REF_CTRL, + .reset = 0x1500, + .rsvd = 0xf8ffc0f8, + },{ .name = "SATA_REF_CTRL", .addr = A_SATA_REF_CTRL, + .reset = 0x1001600, + .rsvd = 0xfeffc0f8, + },{ .name = "PCIE_REF_CTRL", .addr = A_PCIE_REF_CTRL, + .reset = 0x1500, + .rsvd = 0xfeffc0f8, + },{ .name = "GDMA_REF_CTRL", .addr = A_GDMA_REF_CTRL, + .reset = 0x1000500, + .rsvd = 0xfeffc0f8, + },{ .name = "DPDMA_REF_CTRL", .addr = A_DPDMA_REF_CTRL, + .reset = 0x1000500, + .rsvd = 0xfeffc0f8, + },{ .name = "TOPSW_MAIN_CTRL", .addr = A_TOPSW_MAIN_CTRL, + .reset = 0x1000400, + .rsvd = 0xfeffc0f8, + },{ .name = "TOPSW_LSBUS_CTRL", .addr = A_TOPSW_LSBUS_CTRL, + .reset = 0x1000800, + .rsvd = 0xfeffc0f8, + },{ .name = "DBG_TSTMP_CTRL", .addr = A_DBG_TSTMP_CTRL, + .reset = 0xa00, + .rsvd = 0xffffc0f8, + }, + { .name = "RST_FPD_TOP", .addr = A_RST_FPD_TOP, + .reset = 0xf9ffe, + .rsvd = 0xf06001, + },{ .name = "RST_FPD_APU", .addr = A_RST_FPD_APU, + .reset = 0x3d0f, + .rsvd = 0xc2f0, + .pre_write = rst_fpd_apu_prew, + },{ .name = "RST_DDR_SS", .addr = A_RST_DDR_SS, + .reset = 0xf, + .rsvd = 0xf3, + } +}; + +static void crf_reset_enter(Object *obj, ResetType type) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void crf_reset_hold(Object *obj) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); + ir_update_irq(s); +} + +static const MemoryRegionOps crf_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void crf_init(Object *obj) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->reg_array = + register_init_block32(DEVICE(obj), crf_regs_info, + ARRAY_SIZE(crf_regs_info), + s->regs_info, s->regs, + &crf_ops, + XLNX_ZYNQMP_CRF_ERR_DEBUG, + CRF_R_MAX * 4); + sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_irq(sbd, &s->irq_ir); +} + +static void crf_finalize(Object *obj) +{ + XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); + register_finalize_block(s->reg_array); +} + +static const VMStateDescription vmstate_crf = { + .name = TYPE_XLNX_ZYNQMP_CRF, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCRF, CRF_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void crf_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_crf; + rc->phases.enter = crf_reset_enter; + rc->phases.hold = crf_reset_hold; +} + +static const TypeInfo crf_info = { + .name = TYPE_XLNX_ZYNQMP_CRF, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPCRF), + .class_init = crf_class_init, + .instance_init = crf_init, + .instance_finalize = crf_finalize, +}; + +static void crf_register_types(void) +{ + type_register_static(&crf_info); +} + +type_init(crf_register_types) diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c index 5086e6b7ed..8b70285961 100644 --- a/hw/misc/zynq_slcr.c +++ b/hw/misc/zynq_slcr.c @@ -269,6 +269,21 @@ static uint64_t zynq_slcr_compute_clock(const uint64_t periods[], zynq_slcr_compute_clock((plls), (state)->regs[reg], \ reg ## _ ## enable_field ## _SHIFT) +static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk) +{ + uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]); + uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]); + uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]); + + uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll}; + + /* compute uartX reference clocks */ + clock_set(s->uart0_ref_clk, + ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0)); + clock_set(s->uart1_ref_clk, + ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1)); +} + /** * Compute and set the ouputs clocks periods. * But do not propagate them further. Connected clocks @@ -283,17 +298,7 @@ static void zynq_slcr_compute_clocks(ZynqSLCRState *s) ps_clk = 0; } - uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]); - uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]); - uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]); - - uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll}; - - /* compute uartX reference clocks */ - clock_set(s->uart0_ref_clk, - ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0)); - clock_set(s->uart1_ref_clk, - ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1)); + zynq_slcr_compute_clocks_internal(s, ps_clk); } /** @@ -416,7 +421,7 @@ static void zynq_slcr_reset_hold(Object *obj) ZynqSLCRState *s = ZYNQ_SLCR(obj); /* will disable all output clocks */ - zynq_slcr_compute_clocks(s); + zynq_slcr_compute_clocks_internal(s, 0); zynq_slcr_propagate_clocks(s); } @@ -425,7 +430,7 @@ static void zynq_slcr_reset_exit(Object *obj) ZynqSLCRState *s = ZYNQ_SLCR(obj); /* will compute output clocks according to ps_clk and registers */ - zynq_slcr_compute_clocks(s); + zynq_slcr_compute_clocks_internal(s, clock_get(s->ps_clk)); zynq_slcr_propagate_clocks(s); } diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index ff611f18fb..c3fed5fcbe 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -350,7 +350,13 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc)); + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->status2 = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); } static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, @@ -399,10 +405,16 @@ static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s, } static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, - FrameDescriptor *desc, + const FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc)); + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->status2); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); } static bool allwinner_sun8i_emac_can_receive(NetClientState *nc) @@ -460,7 +472,8 @@ static ssize_t allwinner_sun8i_emac_receive(NetClientState *nc, << RX_DESC_STATUS_FRM_LEN_SHIFT; } - dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes); + dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes, + MEMTXATTRS_UNSPECIFIED); allwinner_sun8i_emac_flush_desc(s, &desc, s->rx_desc_curr); trace_allwinner_sun8i_emac_receive(s->rx_desc_curr, desc.addr, desc_bytes); @@ -512,7 +525,8 @@ static void allwinner_sun8i_emac_transmit(AwSun8iEmacState *s) desc.status |= TX_DESC_STATUS_LENGTH_ERR; break; } - dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, bytes); + dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, + bytes, MEMTXATTRS_UNSPECIFIED); packet_bytes += bytes; desc.status &= ~DESC_STATUS_CTL; allwinner_sun8i_emac_flush_desc(s, &desc, s->tx_desc_curr); @@ -634,7 +648,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_TX_CUR_BUF: /* Transmit Current Buffer */ if (s->tx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc)); + allwinner_sun8i_emac_get_desc(s, &desc, s->tx_desc_curr); value = desc.addr; } else { value = 0; @@ -647,7 +661,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_RX_CUR_BUF: /* Receive Current Buffer */ if (s->rx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc)); + allwinner_sun8i_emac_get_desc(s, &desc, s->rx_desc_curr); value = desc.addr; } else { value = 0; diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c index 168b3a620d..94b3a534f8 100644 --- a/hw/net/can/can_kvaser_pci.c +++ b/hw/net/can/can_kvaser_pci.c @@ -266,7 +266,6 @@ static const VMStateDescription vmstate_kvaser_pci = { .name = "kvaser_pci", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, KvaserPCIState), /* Load this before sja_state. */ diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c index 7a79e2605a..29dc696f7c 100644 --- a/hw/net/can/can_mioe3680_pci.c +++ b/hw/net/can/can_mioe3680_pci.c @@ -203,7 +203,6 @@ static const VMStateDescription vmstate_mioe3680_pci = { .name = "mioe3680_pci", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState), VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja, diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c index 8ef4e74af0..e8e57f4f33 100644 --- a/hw/net/can/can_pcm3680_pci.c +++ b/hw/net/can/can_pcm3680_pci.c @@ -204,7 +204,6 @@ static const VMStateDescription vmstate_pcm3680i_pci = { .name = "pcm3680i_pci", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState), VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0, diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 34eea684ce..73201f9139 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -247,21 +247,22 @@ int can_sja_accept_filter(CanSJA1000State *s, static void can_display_msg(const char *prefix, const qemu_can_frame *msg) { int i; - FILE *logfile = qemu_log_lock(); + FILE *logfile = qemu_log_trylock(); - qemu_log("%s%03X [%01d] %s %s", - prefix, - msg->can_id & QEMU_CAN_EFF_MASK, - msg->can_dlc, - msg->can_id & QEMU_CAN_EFF_FLAG ? "EFF" : "SFF", - msg->can_id & QEMU_CAN_RTR_FLAG ? "RTR" : "DAT"); + if (logfile) { + fprintf(logfile, "%s%03X [%01d] %s %s", + prefix, + msg->can_id & QEMU_CAN_EFF_MASK, + msg->can_dlc, + msg->can_id & QEMU_CAN_EFF_FLAG ? "EFF" : "SFF", + msg->can_id & QEMU_CAN_RTR_FLAG ? "RTR" : "DAT"); - for (i = 0; i < msg->can_dlc; i++) { - qemu_log(" %02X", msg->data[i]); + for (i = 0; i < msg->can_dlc; i++) { + fprintf(logfile, " %02X", msg->data[i]); + } + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); } - qemu_log("\n"); - qemu_log_flush(); - qemu_log_unlock(logfile); } static void buff2frame_pel(const uint8_t *buff, qemu_can_frame *frame) @@ -430,7 +431,7 @@ void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val, (unsigned long long)val, (unsigned int)addr); if (addr > CAN_SJA_MEM_SIZE) { - return ; + return; } if (s->clock & 0x80) { /* PeliCAN Mode */ @@ -928,7 +929,6 @@ const VMStateDescription vmstate_qemu_can_filter = { .name = "qemu_can_filter", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(can_id, qemu_can_filter), VMSTATE_UINT32(can_mask, qemu_can_filter), @@ -952,7 +952,6 @@ const VMStateDescription vmstate_can_sja = { .name = "can_sja", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .post_load = can_sja_post_load, .fields = (VMStateField[]) { VMSTATE_UINT8(mode, CanSJA1000State), diff --git a/hw/net/can/ctu_can_fd_frame.h b/hw/net/can/ctu_can_fd_frame.h index 04d956c84e..8c43fd99dd 100644 --- a/hw/net/can/ctu_can_fd_frame.h +++ b/hw/net/can/ctu_can_fd_frame.h @@ -29,161 +29,161 @@ /* This file is autogenerated, DO NOT EDIT! */ -#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ -#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ +#ifndef HW_CAN_CTU_CAN_FD_FRAME_H +#define HW_CAN_CTU_CAN_FD_FRAME_H /* CAN_Frame_format memory map */ enum ctu_can_fd_can_frame_format { - CTU_CAN_FD_FRAME_FORM_W = 0x0, - CTU_CAN_FD_IDENTIFIER_W = 0x4, - CTU_CAN_FD_TIMESTAMP_L_W = 0x8, - CTU_CAN_FD_TIMESTAMP_U_W = 0xc, - CTU_CAN_FD_DATA_1_4_W = 0x10, - CTU_CAN_FD_DATA_5_8_W = 0x14, - CTU_CAN_FD_DATA_61_64_W = 0x4c, + CTU_CAN_FD_FRAME_FORM_W = 0x0, + CTU_CAN_FD_IDENTIFIER_W = 0x4, + CTU_CAN_FD_TIMESTAMP_L_W = 0x8, + CTU_CAN_FD_TIMESTAMP_U_W = 0xc, + CTU_CAN_FD_DATA_1_4_W = 0x10, + CTU_CAN_FD_DATA_5_8_W = 0x14, + CTU_CAN_FD_DATA_61_64_W = 0x4c, }; /* Register descriptions: */ union ctu_can_fd_frame_form_w { - uint32_t u32; - struct ctu_can_fd_frame_form_w_s { + uint32_t u32; + struct ctu_can_fd_frame_form_w_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FRAME_FORM_W */ - uint32_t dlc : 4; - uint32_t reserved_4 : 1; - uint32_t rtr : 1; - uint32_t ide : 1; - uint32_t fdf : 1; - uint32_t reserved_8 : 1; - uint32_t brs : 1; - uint32_t esi_rsv : 1; - uint32_t rwcnt : 5; - uint32_t reserved_31_16 : 16; + uint32_t dlc : 4; + uint32_t reserved_4 : 1; + uint32_t rtr : 1; + uint32_t ide : 1; + uint32_t fdf : 1; + uint32_t reserved_8 : 1; + uint32_t brs : 1; + uint32_t esi_rsv : 1; + uint32_t rwcnt : 5; + uint32_t reserved_31_16 : 16; #else - uint32_t reserved_31_16 : 16; - uint32_t rwcnt : 5; - uint32_t esi_rsv : 1; - uint32_t brs : 1; - uint32_t reserved_8 : 1; - uint32_t fdf : 1; - uint32_t ide : 1; - uint32_t rtr : 1; - uint32_t reserved_4 : 1; - uint32_t dlc : 4; + uint32_t reserved_31_16 : 16; + uint32_t rwcnt : 5; + uint32_t esi_rsv : 1; + uint32_t brs : 1; + uint32_t reserved_8 : 1; + uint32_t fdf : 1; + uint32_t ide : 1; + uint32_t rtr : 1; + uint32_t reserved_4 : 1; + uint32_t dlc : 4; #endif - } s; + } s; }; enum ctu_can_fd_frame_form_w_rtr { - NO_RTR_FRAME = 0x0, - RTR_FRAME = 0x1, + NO_RTR_FRAME = 0x0, + RTR_FRAME = 0x1, }; enum ctu_can_fd_frame_form_w_ide { - BASE = 0x0, - EXTENDED = 0x1, + BASE = 0x0, + EXTENDED = 0x1, }; enum ctu_can_fd_frame_form_w_fdf { - NORMAL_CAN = 0x0, - FD_CAN = 0x1, + NORMAL_CAN = 0x0, + FD_CAN = 0x1, }; enum ctu_can_fd_frame_form_w_brs { - BR_NO_SHIFT = 0x0, - BR_SHIFT = 0x1, + BR_NO_SHIFT = 0x0, + BR_SHIFT = 0x1, }; enum ctu_can_fd_frame_form_w_esi_rsv { - ESI_ERR_ACTIVE = 0x0, - ESI_ERR_PASIVE = 0x1, + ESI_ERR_ACTIVE = 0x0, + ESI_ERR_PASIVE = 0x1, }; union ctu_can_fd_identifier_w { - uint32_t u32; - struct ctu_can_fd_identifier_w_s { + uint32_t u32; + struct ctu_can_fd_identifier_w_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* IDENTIFIER_W */ - uint32_t identifier_ext : 18; - uint32_t identifier_base : 11; - uint32_t reserved_31_29 : 3; + uint32_t identifier_ext : 18; + uint32_t identifier_base : 11; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t identifier_base : 11; - uint32_t identifier_ext : 18; + uint32_t reserved_31_29 : 3; + uint32_t identifier_base : 11; + uint32_t identifier_ext : 18; #endif - } s; + } s; }; union ctu_can_fd_timestamp_l_w { - uint32_t u32; - struct ctu_can_fd_timestamp_l_w_s { + uint32_t u32; + struct ctu_can_fd_timestamp_l_w_s { /* TIMESTAMP_L_W */ - uint32_t time_stamp_31_0 : 32; - } s; + uint32_t time_stamp_31_0 : 32; + } s; }; union ctu_can_fd_timestamp_u_w { - uint32_t u32; - struct ctu_can_fd_timestamp_u_w_s { + uint32_t u32; + struct ctu_can_fd_timestamp_u_w_s { /* TIMESTAMP_U_W */ - uint32_t timestamp_l_w : 32; - } s; + uint32_t timestamp_l_w : 32; + } s; }; union ctu_can_fd_data_1_4_w { - uint32_t u32; - struct ctu_can_fd_data_1_4_w_s { + uint32_t u32; + struct ctu_can_fd_data_1_4_w_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* DATA_1_4_W */ - uint32_t data_1 : 8; - uint32_t data_2 : 8; - uint32_t data_3 : 8; - uint32_t data_4 : 8; + uint32_t data_1 : 8; + uint32_t data_2 : 8; + uint32_t data_3 : 8; + uint32_t data_4 : 8; #else - uint32_t data_4 : 8; - uint32_t data_3 : 8; - uint32_t data_2 : 8; - uint32_t data_1 : 8; + uint32_t data_4 : 8; + uint32_t data_3 : 8; + uint32_t data_2 : 8; + uint32_t data_1 : 8; #endif - } s; + } s; }; union ctu_can_fd_data_5_8_w { - uint32_t u32; - struct ctu_can_fd_data_5_8_w_s { + uint32_t u32; + struct ctu_can_fd_data_5_8_w_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* DATA_5_8_W */ - uint32_t data_5 : 8; - uint32_t data_6 : 8; - uint32_t data_7 : 8; - uint32_t data_8 : 8; + uint32_t data_5 : 8; + uint32_t data_6 : 8; + uint32_t data_7 : 8; + uint32_t data_8 : 8; #else - uint32_t data_8 : 8; - uint32_t data_7 : 8; - uint32_t data_6 : 8; - uint32_t data_5 : 8; + uint32_t data_8 : 8; + uint32_t data_7 : 8; + uint32_t data_6 : 8; + uint32_t data_5 : 8; #endif - } s; + } s; }; union ctu_can_fd_data_61_64_w { - uint32_t u32; - struct ctu_can_fd_data_61_64_w_s { + uint32_t u32; + struct ctu_can_fd_data_61_64_w_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* DATA_61_64_W */ - uint32_t data_61 : 8; - uint32_t data_62 : 8; - uint32_t data_63 : 8; - uint32_t data_64 : 8; + uint32_t data_61 : 8; + uint32_t data_62 : 8; + uint32_t data_63 : 8; + uint32_t data_64 : 8; #else - uint32_t data_64 : 8; - uint32_t data_63 : 8; - uint32_t data_62 : 8; - uint32_t data_61 : 8; + uint32_t data_64 : 8; + uint32_t data_63 : 8; + uint32_t data_62 : 8; + uint32_t data_61 : 8; #endif - } s; + } s; }; #endif diff --git a/hw/net/can/ctu_can_fd_regs.h b/hw/net/can/ctu_can_fd_regs.h index 450f4b9fb3..d2d88cdd1a 100644 --- a/hw/net/can/ctu_can_fd_regs.h +++ b/hw/net/can/ctu_can_fd_regs.h @@ -29,943 +29,943 @@ /* This file is autogenerated, DO NOT EDIT! */ -#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ -#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ +#ifndef HW_CAN_CTU_CAN_FD_REGS_H +#define HW_CAN_CTU_CAN_FD_REGS_H /* CAN_Registers memory map */ enum ctu_can_fd_can_registers { - CTU_CAN_FD_DEVICE_ID = 0x0, - CTU_CAN_FD_VERSION = 0x2, - CTU_CAN_FD_MODE = 0x4, - CTU_CAN_FD_SETTINGS = 0x6, - CTU_CAN_FD_STATUS = 0x8, - CTU_CAN_FD_COMMAND = 0xc, - CTU_CAN_FD_INT_STAT = 0x10, - CTU_CAN_FD_INT_ENA_SET = 0x14, - CTU_CAN_FD_INT_ENA_CLR = 0x18, - CTU_CAN_FD_INT_MASK_SET = 0x1c, - CTU_CAN_FD_INT_MASK_CLR = 0x20, - CTU_CAN_FD_BTR = 0x24, - CTU_CAN_FD_BTR_FD = 0x28, - CTU_CAN_FD_EWL = 0x2c, - CTU_CAN_FD_ERP = 0x2d, - CTU_CAN_FD_FAULT_STATE = 0x2e, - CTU_CAN_FD_REC = 0x30, - CTU_CAN_FD_TEC = 0x32, - CTU_CAN_FD_ERR_NORM = 0x34, - CTU_CAN_FD_ERR_FD = 0x36, - CTU_CAN_FD_CTR_PRES = 0x38, - CTU_CAN_FD_FILTER_A_MASK = 0x3c, - CTU_CAN_FD_FILTER_A_VAL = 0x40, - CTU_CAN_FD_FILTER_B_MASK = 0x44, - CTU_CAN_FD_FILTER_B_VAL = 0x48, - CTU_CAN_FD_FILTER_C_MASK = 0x4c, - CTU_CAN_FD_FILTER_C_VAL = 0x50, - CTU_CAN_FD_FILTER_RAN_LOW = 0x54, - CTU_CAN_FD_FILTER_RAN_HIGH = 0x58, - CTU_CAN_FD_FILTER_CONTROL = 0x5c, - CTU_CAN_FD_FILTER_STATUS = 0x5e, - CTU_CAN_FD_RX_MEM_INFO = 0x60, - CTU_CAN_FD_RX_POINTERS = 0x64, - CTU_CAN_FD_RX_STATUS = 0x68, - CTU_CAN_FD_RX_SETTINGS = 0x6a, - CTU_CAN_FD_RX_DATA = 0x6c, - CTU_CAN_FD_TX_STATUS = 0x70, - CTU_CAN_FD_TX_COMMAND = 0x74, - CTU_CAN_FD_TX_PRIORITY = 0x78, - CTU_CAN_FD_ERR_CAPT = 0x7c, - CTU_CAN_FD_ALC = 0x7e, - CTU_CAN_FD_TRV_DELAY = 0x80, - CTU_CAN_FD_SSP_CFG = 0x82, - CTU_CAN_FD_RX_FR_CTR = 0x84, - CTU_CAN_FD_TX_FR_CTR = 0x88, - CTU_CAN_FD_DEBUG_REGISTER = 0x8c, - CTU_CAN_FD_YOLO_REG = 0x90, - CTU_CAN_FD_TIMESTAMP_LOW = 0x94, - CTU_CAN_FD_TIMESTAMP_HIGH = 0x98, - CTU_CAN_FD_TXTB1_DATA_1 = 0x100, - CTU_CAN_FD_TXTB1_DATA_2 = 0x104, - CTU_CAN_FD_TXTB1_DATA_20 = 0x14c, - CTU_CAN_FD_TXTB2_DATA_1 = 0x200, - CTU_CAN_FD_TXTB2_DATA_2 = 0x204, - CTU_CAN_FD_TXTB2_DATA_20 = 0x24c, - CTU_CAN_FD_TXTB3_DATA_1 = 0x300, - CTU_CAN_FD_TXTB3_DATA_2 = 0x304, - CTU_CAN_FD_TXTB3_DATA_20 = 0x34c, - CTU_CAN_FD_TXTB4_DATA_1 = 0x400, - CTU_CAN_FD_TXTB4_DATA_2 = 0x404, - CTU_CAN_FD_TXTB4_DATA_20 = 0x44c, + CTU_CAN_FD_DEVICE_ID = 0x0, + CTU_CAN_FD_VERSION = 0x2, + CTU_CAN_FD_MODE = 0x4, + CTU_CAN_FD_SETTINGS = 0x6, + CTU_CAN_FD_STATUS = 0x8, + CTU_CAN_FD_COMMAND = 0xc, + CTU_CAN_FD_INT_STAT = 0x10, + CTU_CAN_FD_INT_ENA_SET = 0x14, + CTU_CAN_FD_INT_ENA_CLR = 0x18, + CTU_CAN_FD_INT_MASK_SET = 0x1c, + CTU_CAN_FD_INT_MASK_CLR = 0x20, + CTU_CAN_FD_BTR = 0x24, + CTU_CAN_FD_BTR_FD = 0x28, + CTU_CAN_FD_EWL = 0x2c, + CTU_CAN_FD_ERP = 0x2d, + CTU_CAN_FD_FAULT_STATE = 0x2e, + CTU_CAN_FD_REC = 0x30, + CTU_CAN_FD_TEC = 0x32, + CTU_CAN_FD_ERR_NORM = 0x34, + CTU_CAN_FD_ERR_FD = 0x36, + CTU_CAN_FD_CTR_PRES = 0x38, + CTU_CAN_FD_FILTER_A_MASK = 0x3c, + CTU_CAN_FD_FILTER_A_VAL = 0x40, + CTU_CAN_FD_FILTER_B_MASK = 0x44, + CTU_CAN_FD_FILTER_B_VAL = 0x48, + CTU_CAN_FD_FILTER_C_MASK = 0x4c, + CTU_CAN_FD_FILTER_C_VAL = 0x50, + CTU_CAN_FD_FILTER_RAN_LOW = 0x54, + CTU_CAN_FD_FILTER_RAN_HIGH = 0x58, + CTU_CAN_FD_FILTER_CONTROL = 0x5c, + CTU_CAN_FD_FILTER_STATUS = 0x5e, + CTU_CAN_FD_RX_MEM_INFO = 0x60, + CTU_CAN_FD_RX_POINTERS = 0x64, + CTU_CAN_FD_RX_STATUS = 0x68, + CTU_CAN_FD_RX_SETTINGS = 0x6a, + CTU_CAN_FD_RX_DATA = 0x6c, + CTU_CAN_FD_TX_STATUS = 0x70, + CTU_CAN_FD_TX_COMMAND = 0x74, + CTU_CAN_FD_TX_PRIORITY = 0x78, + CTU_CAN_FD_ERR_CAPT = 0x7c, + CTU_CAN_FD_ALC = 0x7e, + CTU_CAN_FD_TRV_DELAY = 0x80, + CTU_CAN_FD_SSP_CFG = 0x82, + CTU_CAN_FD_RX_FR_CTR = 0x84, + CTU_CAN_FD_TX_FR_CTR = 0x88, + CTU_CAN_FD_DEBUG_REGISTER = 0x8c, + CTU_CAN_FD_YOLO_REG = 0x90, + CTU_CAN_FD_TIMESTAMP_LOW = 0x94, + CTU_CAN_FD_TIMESTAMP_HIGH = 0x98, + CTU_CAN_FD_TXTB1_DATA_1 = 0x100, + CTU_CAN_FD_TXTB1_DATA_2 = 0x104, + CTU_CAN_FD_TXTB1_DATA_20 = 0x14c, + CTU_CAN_FD_TXTB2_DATA_1 = 0x200, + CTU_CAN_FD_TXTB2_DATA_2 = 0x204, + CTU_CAN_FD_TXTB2_DATA_20 = 0x24c, + CTU_CAN_FD_TXTB3_DATA_1 = 0x300, + CTU_CAN_FD_TXTB3_DATA_2 = 0x304, + CTU_CAN_FD_TXTB3_DATA_20 = 0x34c, + CTU_CAN_FD_TXTB4_DATA_1 = 0x400, + CTU_CAN_FD_TXTB4_DATA_2 = 0x404, + CTU_CAN_FD_TXTB4_DATA_20 = 0x44c, }; /* Register descriptions: */ union ctu_can_fd_device_id_version { - uint32_t u32; - struct ctu_can_fd_device_id_version_s { + uint32_t u32; + struct ctu_can_fd_device_id_version_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* DEVICE_ID */ - uint32_t device_id : 16; + uint32_t device_id : 16; /* VERSION */ - uint32_t ver_minor : 8; - uint32_t ver_major : 8; + uint32_t ver_minor : 8; + uint32_t ver_major : 8; #else - uint32_t ver_major : 8; - uint32_t ver_minor : 8; - uint32_t device_id : 16; + uint32_t ver_major : 8; + uint32_t ver_minor : 8; + uint32_t device_id : 16; #endif - } s; + } s; }; enum ctu_can_fd_device_id_device_id { - CTU_CAN_FD_ID = 0xcafd, + CTU_CAN_FD_ID = 0xcafd, }; union ctu_can_fd_mode_settings { - uint32_t u32; - struct ctu_can_fd_mode_settings_s { + uint32_t u32; + struct ctu_can_fd_mode_settings_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* MODE */ - uint32_t rst : 1; - uint32_t lom : 1; - uint32_t stm : 1; - uint32_t afm : 1; - uint32_t fde : 1; - uint32_t reserved_6_5 : 2; - uint32_t acf : 1; - uint32_t tstm : 1; - uint32_t reserved_15_9 : 7; + uint32_t rst : 1; + uint32_t lom : 1; + uint32_t stm : 1; + uint32_t afm : 1; + uint32_t fde : 1; + uint32_t reserved_6_5 : 2; + uint32_t acf : 1; + uint32_t tstm : 1; + uint32_t reserved_15_9 : 7; /* SETTINGS */ - uint32_t rtrle : 1; - uint32_t rtrth : 4; - uint32_t ilbp : 1; - uint32_t ena : 1; - uint32_t nisofd : 1; - uint32_t pex : 1; - uint32_t reserved_31_25 : 7; + uint32_t rtrle : 1; + uint32_t rtrth : 4; + uint32_t ilbp : 1; + uint32_t ena : 1; + uint32_t nisofd : 1; + uint32_t pex : 1; + uint32_t reserved_31_25 : 7; #else - uint32_t reserved_31_25 : 7; - uint32_t pex : 1; - uint32_t nisofd : 1; - uint32_t ena : 1; - uint32_t ilbp : 1; - uint32_t rtrth : 4; - uint32_t rtrle : 1; - uint32_t reserved_15_9 : 7; - uint32_t tstm : 1; - uint32_t acf : 1; - uint32_t reserved_6_5 : 2; - uint32_t fde : 1; - uint32_t afm : 1; - uint32_t stm : 1; - uint32_t lom : 1; - uint32_t rst : 1; + uint32_t reserved_31_25 : 7; + uint32_t pex : 1; + uint32_t nisofd : 1; + uint32_t ena : 1; + uint32_t ilbp : 1; + uint32_t rtrth : 4; + uint32_t rtrle : 1; + uint32_t reserved_15_9 : 7; + uint32_t tstm : 1; + uint32_t acf : 1; + uint32_t reserved_6_5 : 2; + uint32_t fde : 1; + uint32_t afm : 1; + uint32_t stm : 1; + uint32_t lom : 1; + uint32_t rst : 1; #endif - } s; + } s; }; enum ctu_can_fd_mode_lom { - LOM_DISABLED = 0x0, - LOM_ENABLED = 0x1, + LOM_DISABLED = 0x0, + LOM_ENABLED = 0x1, }; enum ctu_can_fd_mode_stm { - STM_DISABLED = 0x0, - STM_ENABLED = 0x1, + STM_DISABLED = 0x0, + STM_ENABLED = 0x1, }; enum ctu_can_fd_mode_afm { - AFM_DISABLED = 0x0, - AFM_ENABLED = 0x1, + AFM_DISABLED = 0x0, + AFM_ENABLED = 0x1, }; enum ctu_can_fd_mode_fde { - FDE_DISABLE = 0x0, - FDE_ENABLE = 0x1, + FDE_DISABLE = 0x0, + FDE_ENABLE = 0x1, }; enum ctu_can_fd_mode_acf { - ACF_DISABLED = 0x0, - ACF_ENABLED = 0x1, + ACF_DISABLED = 0x0, + ACF_ENABLED = 0x1, }; enum ctu_can_fd_settings_rtrle { - RTRLE_DISABLED = 0x0, - RTRLE_ENABLED = 0x1, + RTRLE_DISABLED = 0x0, + RTRLE_ENABLED = 0x1, }; enum ctu_can_fd_settings_ilbp { - INT_LOOP_DISABLED = 0x0, - INT_LOOP_ENABLED = 0x1, + INT_LOOP_DISABLED = 0x0, + INT_LOOP_ENABLED = 0x1, }; enum ctu_can_fd_settings_ena { - CTU_CAN_DISABLED = 0x0, - CTU_CAN_ENABLED = 0x1, + CTU_CAN_DISABLED = 0x0, + CTU_CAN_ENABLED = 0x1, }; enum ctu_can_fd_settings_nisofd { - ISO_FD = 0x0, - NON_ISO_FD = 0x1, + ISO_FD = 0x0, + NON_ISO_FD = 0x1, }; enum ctu_can_fd_settings_pex { - PROTOCOL_EXCEPTION_DISABLED = 0x0, - PROTOCOL_EXCEPTION_ENABLED = 0x1, + PROTOCOL_EXCEPTION_DISABLED = 0x0, + PROTOCOL_EXCEPTION_ENABLED = 0x1, }; union ctu_can_fd_status { - uint32_t u32; - struct ctu_can_fd_status_s { + uint32_t u32; + struct ctu_can_fd_status_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* STATUS */ - uint32_t rxne : 1; - uint32_t dor : 1; - uint32_t txnf : 1; - uint32_t eft : 1; - uint32_t rxs : 1; - uint32_t txs : 1; - uint32_t ewl : 1; - uint32_t idle : 1; - uint32_t reserved_31_8 : 24; + uint32_t rxne : 1; + uint32_t dor : 1; + uint32_t txnf : 1; + uint32_t eft : 1; + uint32_t rxs : 1; + uint32_t txs : 1; + uint32_t ewl : 1; + uint32_t idle : 1; + uint32_t reserved_31_8 : 24; #else - uint32_t reserved_31_8 : 24; - uint32_t idle : 1; - uint32_t ewl : 1; - uint32_t txs : 1; - uint32_t rxs : 1; - uint32_t eft : 1; - uint32_t txnf : 1; - uint32_t dor : 1; - uint32_t rxne : 1; + uint32_t reserved_31_8 : 24; + uint32_t idle : 1; + uint32_t ewl : 1; + uint32_t txs : 1; + uint32_t rxs : 1; + uint32_t eft : 1; + uint32_t txnf : 1; + uint32_t dor : 1; + uint32_t rxne : 1; #endif - } s; + } s; }; union ctu_can_fd_command { - uint32_t u32; - struct ctu_can_fd_command_s { + uint32_t u32; + struct ctu_can_fd_command_s { #ifdef __LITTLE_ENDIAN_BITFIELD - uint32_t reserved_1_0 : 2; + uint32_t reserved_1_0 : 2; /* COMMAND */ - uint32_t rrb : 1; - uint32_t cdo : 1; - uint32_t ercrst : 1; - uint32_t rxfcrst : 1; - uint32_t txfcrst : 1; - uint32_t reserved_31_7 : 25; + uint32_t rrb : 1; + uint32_t cdo : 1; + uint32_t ercrst : 1; + uint32_t rxfcrst : 1; + uint32_t txfcrst : 1; + uint32_t reserved_31_7 : 25; #else - uint32_t reserved_31_7 : 25; - uint32_t txfcrst : 1; - uint32_t rxfcrst : 1; - uint32_t ercrst : 1; - uint32_t cdo : 1; - uint32_t rrb : 1; - uint32_t reserved_1_0 : 2; + uint32_t reserved_31_7 : 25; + uint32_t txfcrst : 1; + uint32_t rxfcrst : 1; + uint32_t ercrst : 1; + uint32_t cdo : 1; + uint32_t rrb : 1; + uint32_t reserved_1_0 : 2; #endif - } s; + } s; }; union ctu_can_fd_int_stat { - uint32_t u32; - struct ctu_can_fd_int_stat_s { + uint32_t u32; + struct ctu_can_fd_int_stat_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* INT_STAT */ - uint32_t rxi : 1; - uint32_t txi : 1; - uint32_t ewli : 1; - uint32_t doi : 1; - uint32_t fcsi : 1; - uint32_t ali : 1; - uint32_t bei : 1; - uint32_t ofi : 1; - uint32_t rxfi : 1; - uint32_t bsi : 1; - uint32_t rbnei : 1; - uint32_t txbhci : 1; - uint32_t reserved_31_12 : 20; + uint32_t rxi : 1; + uint32_t txi : 1; + uint32_t ewli : 1; + uint32_t doi : 1; + uint32_t fcsi : 1; + uint32_t ali : 1; + uint32_t bei : 1; + uint32_t ofi : 1; + uint32_t rxfi : 1; + uint32_t bsi : 1; + uint32_t rbnei : 1; + uint32_t txbhci : 1; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t txbhci : 1; - uint32_t rbnei : 1; - uint32_t bsi : 1; - uint32_t rxfi : 1; - uint32_t ofi : 1; - uint32_t bei : 1; - uint32_t ali : 1; - uint32_t fcsi : 1; - uint32_t doi : 1; - uint32_t ewli : 1; - uint32_t txi : 1; - uint32_t rxi : 1; + uint32_t reserved_31_12 : 20; + uint32_t txbhci : 1; + uint32_t rbnei : 1; + uint32_t bsi : 1; + uint32_t rxfi : 1; + uint32_t ofi : 1; + uint32_t bei : 1; + uint32_t ali : 1; + uint32_t fcsi : 1; + uint32_t doi : 1; + uint32_t ewli : 1; + uint32_t txi : 1; + uint32_t rxi : 1; #endif - } s; + } s; }; union ctu_can_fd_int_ena_set { - uint32_t u32; - struct ctu_can_fd_int_ena_set_s { + uint32_t u32; + struct ctu_can_fd_int_ena_set_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* INT_ENA_SET */ - uint32_t int_ena_set : 12; - uint32_t reserved_31_12 : 20; + uint32_t int_ena_set : 12; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t int_ena_set : 12; + uint32_t reserved_31_12 : 20; + uint32_t int_ena_set : 12; #endif - } s; + } s; }; union ctu_can_fd_int_ena_clr { - uint32_t u32; - struct ctu_can_fd_int_ena_clr_s { + uint32_t u32; + struct ctu_can_fd_int_ena_clr_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* INT_ENA_CLR */ - uint32_t int_ena_clr : 12; - uint32_t reserved_31_12 : 20; + uint32_t int_ena_clr : 12; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t int_ena_clr : 12; + uint32_t reserved_31_12 : 20; + uint32_t int_ena_clr : 12; #endif - } s; + } s; }; union ctu_can_fd_int_mask_set { - uint32_t u32; - struct ctu_can_fd_int_mask_set_s { + uint32_t u32; + struct ctu_can_fd_int_mask_set_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* INT_MASK_SET */ - uint32_t int_mask_set : 12; - uint32_t reserved_31_12 : 20; + uint32_t int_mask_set : 12; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t int_mask_set : 12; + uint32_t reserved_31_12 : 20; + uint32_t int_mask_set : 12; #endif - } s; + } s; }; union ctu_can_fd_int_mask_clr { - uint32_t u32; - struct ctu_can_fd_int_mask_clr_s { + uint32_t u32; + struct ctu_can_fd_int_mask_clr_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* INT_MASK_CLR */ - uint32_t int_mask_clr : 12; - uint32_t reserved_31_12 : 20; + uint32_t int_mask_clr : 12; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t int_mask_clr : 12; + uint32_t reserved_31_12 : 20; + uint32_t int_mask_clr : 12; #endif - } s; + } s; }; union ctu_can_fd_btr { - uint32_t u32; - struct ctu_can_fd_btr_s { + uint32_t u32; + struct ctu_can_fd_btr_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* BTR */ - uint32_t prop : 7; - uint32_t ph1 : 6; - uint32_t ph2 : 6; - uint32_t brp : 8; - uint32_t sjw : 5; + uint32_t prop : 7; + uint32_t ph1 : 6; + uint32_t ph2 : 6; + uint32_t brp : 8; + uint32_t sjw : 5; #else - uint32_t sjw : 5; - uint32_t brp : 8; - uint32_t ph2 : 6; - uint32_t ph1 : 6; - uint32_t prop : 7; + uint32_t sjw : 5; + uint32_t brp : 8; + uint32_t ph2 : 6; + uint32_t ph1 : 6; + uint32_t prop : 7; #endif - } s; + } s; }; union ctu_can_fd_btr_fd { - uint32_t u32; - struct ctu_can_fd_btr_fd_s { + uint32_t u32; + struct ctu_can_fd_btr_fd_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* BTR_FD */ - uint32_t prop_fd : 6; - uint32_t reserved_6 : 1; - uint32_t ph1_fd : 5; - uint32_t reserved_12 : 1; - uint32_t ph2_fd : 5; - uint32_t reserved_18 : 1; - uint32_t brp_fd : 8; - uint32_t sjw_fd : 5; + uint32_t prop_fd : 6; + uint32_t reserved_6 : 1; + uint32_t ph1_fd : 5; + uint32_t reserved_12 : 1; + uint32_t ph2_fd : 5; + uint32_t reserved_18 : 1; + uint32_t brp_fd : 8; + uint32_t sjw_fd : 5; #else - uint32_t sjw_fd : 5; - uint32_t brp_fd : 8; - uint32_t reserved_18 : 1; - uint32_t ph2_fd : 5; - uint32_t reserved_12 : 1; - uint32_t ph1_fd : 5; - uint32_t reserved_6 : 1; - uint32_t prop_fd : 6; + uint32_t sjw_fd : 5; + uint32_t brp_fd : 8; + uint32_t reserved_18 : 1; + uint32_t ph2_fd : 5; + uint32_t reserved_12 : 1; + uint32_t ph1_fd : 5; + uint32_t reserved_6 : 1; + uint32_t prop_fd : 6; #endif - } s; + } s; }; union ctu_can_fd_ewl_erp_fault_state { - uint32_t u32; - struct ctu_can_fd_ewl_erp_fault_state_s { + uint32_t u32; + struct ctu_can_fd_ewl_erp_fault_state_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* EWL */ - uint32_t ew_limit : 8; + uint32_t ew_limit : 8; /* ERP */ - uint32_t erp_limit : 8; + uint32_t erp_limit : 8; /* FAULT_STATE */ - uint32_t era : 1; - uint32_t erp : 1; - uint32_t bof : 1; - uint32_t reserved_31_19 : 13; + uint32_t era : 1; + uint32_t erp : 1; + uint32_t bof : 1; + uint32_t reserved_31_19 : 13; #else - uint32_t reserved_31_19 : 13; - uint32_t bof : 1; - uint32_t erp : 1; - uint32_t era : 1; - uint32_t erp_limit : 8; - uint32_t ew_limit : 8; + uint32_t reserved_31_19 : 13; + uint32_t bof : 1; + uint32_t erp : 1; + uint32_t era : 1; + uint32_t erp_limit : 8; + uint32_t ew_limit : 8; #endif - } s; + } s; }; union ctu_can_fd_rec_tec { - uint32_t u32; - struct ctu_can_fd_rec_tec_s { + uint32_t u32; + struct ctu_can_fd_rec_tec_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* REC */ - uint32_t rec_val : 9; - uint32_t reserved_15_9 : 7; + uint32_t rec_val : 9; + uint32_t reserved_15_9 : 7; /* TEC */ - uint32_t tec_val : 9; - uint32_t reserved_31_25 : 7; + uint32_t tec_val : 9; + uint32_t reserved_31_25 : 7; #else - uint32_t reserved_31_25 : 7; - uint32_t tec_val : 9; - uint32_t reserved_15_9 : 7; - uint32_t rec_val : 9; + uint32_t reserved_31_25 : 7; + uint32_t tec_val : 9; + uint32_t reserved_15_9 : 7; + uint32_t rec_val : 9; #endif - } s; + } s; }; union ctu_can_fd_err_norm_err_fd { - uint32_t u32; - struct ctu_can_fd_err_norm_err_fd_s { + uint32_t u32; + struct ctu_can_fd_err_norm_err_fd_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* ERR_NORM */ - uint32_t err_norm_val : 16; + uint32_t err_norm_val : 16; /* ERR_FD */ - uint32_t err_fd_val : 16; + uint32_t err_fd_val : 16; #else - uint32_t err_fd_val : 16; - uint32_t err_norm_val : 16; + uint32_t err_fd_val : 16; + uint32_t err_norm_val : 16; #endif - } s; + } s; }; union ctu_can_fd_ctr_pres { - uint32_t u32; - struct ctu_can_fd_ctr_pres_s { + uint32_t u32; + struct ctu_can_fd_ctr_pres_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* CTR_PRES */ - uint32_t ctpv : 9; - uint32_t ptx : 1; - uint32_t prx : 1; - uint32_t enorm : 1; - uint32_t efd : 1; - uint32_t reserved_31_13 : 19; + uint32_t ctpv : 9; + uint32_t ptx : 1; + uint32_t prx : 1; + uint32_t enorm : 1; + uint32_t efd : 1; + uint32_t reserved_31_13 : 19; #else - uint32_t reserved_31_13 : 19; - uint32_t efd : 1; - uint32_t enorm : 1; - uint32_t prx : 1; - uint32_t ptx : 1; - uint32_t ctpv : 9; + uint32_t reserved_31_13 : 19; + uint32_t efd : 1; + uint32_t enorm : 1; + uint32_t prx : 1; + uint32_t ptx : 1; + uint32_t ctpv : 9; #endif - } s; + } s; }; union ctu_can_fd_filter_a_mask { - uint32_t u32; - struct ctu_can_fd_filter_a_mask_s { + uint32_t u32; + struct ctu_can_fd_filter_a_mask_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_A_MASK */ - uint32_t bit_mask_a_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_mask_a_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_mask_a_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_a_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_a_val { - uint32_t u32; - struct ctu_can_fd_filter_a_val_s { + uint32_t u32; + struct ctu_can_fd_filter_a_val_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_A_VAL */ - uint32_t bit_val_a_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_val_a_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_val_a_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_val_a_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_b_mask { - uint32_t u32; - struct ctu_can_fd_filter_b_mask_s { + uint32_t u32; + struct ctu_can_fd_filter_b_mask_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_B_MASK */ - uint32_t bit_mask_b_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_mask_b_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_mask_b_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_b_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_b_val { - uint32_t u32; - struct ctu_can_fd_filter_b_val_s { + uint32_t u32; + struct ctu_can_fd_filter_b_val_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_B_VAL */ - uint32_t bit_val_b_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_val_b_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_val_b_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_val_b_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_c_mask { - uint32_t u32; - struct ctu_can_fd_filter_c_mask_s { + uint32_t u32; + struct ctu_can_fd_filter_c_mask_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_C_MASK */ - uint32_t bit_mask_c_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_mask_c_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_mask_c_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_c_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_c_val { - uint32_t u32; - struct ctu_can_fd_filter_c_val_s { + uint32_t u32; + struct ctu_can_fd_filter_c_val_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_C_VAL */ - uint32_t bit_val_c_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_val_c_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_val_c_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_val_c_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_ran_low { - uint32_t u32; - struct ctu_can_fd_filter_ran_low_s { + uint32_t u32; + struct ctu_can_fd_filter_ran_low_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_RAN_LOW */ - uint32_t bit_ran_low_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_ran_low_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_ran_low_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_ran_low_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_ran_high { - uint32_t u32; - struct ctu_can_fd_filter_ran_high_s { + uint32_t u32; + struct ctu_can_fd_filter_ran_high_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_RAN_HIGH */ - uint32_t bit_ran_high_val : 29; - uint32_t reserved_31_29 : 3; + uint32_t bit_ran_high_val : 29; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t bit_ran_high_val : 29; + uint32_t reserved_31_29 : 3; + uint32_t bit_ran_high_val : 29; #endif - } s; + } s; }; union ctu_can_fd_filter_control_filter_status { - uint32_t u32; - struct ctu_can_fd_filter_control_filter_status_s { + uint32_t u32; + struct ctu_can_fd_filter_control_filter_status_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* FILTER_CONTROL */ - uint32_t fanb : 1; - uint32_t fane : 1; - uint32_t fafb : 1; - uint32_t fafe : 1; - uint32_t fbnb : 1; - uint32_t fbne : 1; - uint32_t fbfb : 1; - uint32_t fbfe : 1; - uint32_t fcnb : 1; - uint32_t fcne : 1; - uint32_t fcfb : 1; - uint32_t fcfe : 1; - uint32_t frnb : 1; - uint32_t frne : 1; - uint32_t frfb : 1; - uint32_t frfe : 1; + uint32_t fanb : 1; + uint32_t fane : 1; + uint32_t fafb : 1; + uint32_t fafe : 1; + uint32_t fbnb : 1; + uint32_t fbne : 1; + uint32_t fbfb : 1; + uint32_t fbfe : 1; + uint32_t fcnb : 1; + uint32_t fcne : 1; + uint32_t fcfb : 1; + uint32_t fcfe : 1; + uint32_t frnb : 1; + uint32_t frne : 1; + uint32_t frfb : 1; + uint32_t frfe : 1; /* FILTER_STATUS */ - uint32_t sfa : 1; - uint32_t sfb : 1; - uint32_t sfc : 1; - uint32_t sfr : 1; - uint32_t reserved_31_20 : 12; + uint32_t sfa : 1; + uint32_t sfb : 1; + uint32_t sfc : 1; + uint32_t sfr : 1; + uint32_t reserved_31_20 : 12; #else - uint32_t reserved_31_20 : 12; - uint32_t sfr : 1; - uint32_t sfc : 1; - uint32_t sfb : 1; - uint32_t sfa : 1; - uint32_t frfe : 1; - uint32_t frfb : 1; - uint32_t frne : 1; - uint32_t frnb : 1; - uint32_t fcfe : 1; - uint32_t fcfb : 1; - uint32_t fcne : 1; - uint32_t fcnb : 1; - uint32_t fbfe : 1; - uint32_t fbfb : 1; - uint32_t fbne : 1; - uint32_t fbnb : 1; - uint32_t fafe : 1; - uint32_t fafb : 1; - uint32_t fane : 1; - uint32_t fanb : 1; + uint32_t reserved_31_20 : 12; + uint32_t sfr : 1; + uint32_t sfc : 1; + uint32_t sfb : 1; + uint32_t sfa : 1; + uint32_t frfe : 1; + uint32_t frfb : 1; + uint32_t frne : 1; + uint32_t frnb : 1; + uint32_t fcfe : 1; + uint32_t fcfb : 1; + uint32_t fcne : 1; + uint32_t fcnb : 1; + uint32_t fbfe : 1; + uint32_t fbfb : 1; + uint32_t fbne : 1; + uint32_t fbnb : 1; + uint32_t fafe : 1; + uint32_t fafb : 1; + uint32_t fane : 1; + uint32_t fanb : 1; #endif - } s; + } s; }; union ctu_can_fd_rx_mem_info { - uint32_t u32; - struct ctu_can_fd_rx_mem_info_s { + uint32_t u32; + struct ctu_can_fd_rx_mem_info_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* RX_MEM_INFO */ - uint32_t rx_buff_size : 13; - uint32_t reserved_15_13 : 3; - uint32_t rx_mem_free : 13; - uint32_t reserved_31_29 : 3; + uint32_t rx_buff_size : 13; + uint32_t reserved_15_13 : 3; + uint32_t rx_mem_free : 13; + uint32_t reserved_31_29 : 3; #else - uint32_t reserved_31_29 : 3; - uint32_t rx_mem_free : 13; - uint32_t reserved_15_13 : 3; - uint32_t rx_buff_size : 13; + uint32_t reserved_31_29 : 3; + uint32_t rx_mem_free : 13; + uint32_t reserved_15_13 : 3; + uint32_t rx_buff_size : 13; #endif - } s; + } s; }; union ctu_can_fd_rx_pointers { - uint32_t u32; - struct ctu_can_fd_rx_pointers_s { + uint32_t u32; + struct ctu_can_fd_rx_pointers_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* RX_POINTERS */ - uint32_t rx_wpp : 12; - uint32_t reserved_15_12 : 4; - uint32_t rx_rpp : 12; - uint32_t reserved_31_28 : 4; + uint32_t rx_wpp : 12; + uint32_t reserved_15_12 : 4; + uint32_t rx_rpp : 12; + uint32_t reserved_31_28 : 4; #else - uint32_t reserved_31_28 : 4; - uint32_t rx_rpp : 12; - uint32_t reserved_15_12 : 4; - uint32_t rx_wpp : 12; + uint32_t reserved_31_28 : 4; + uint32_t rx_rpp : 12; + uint32_t reserved_15_12 : 4; + uint32_t rx_wpp : 12; #endif - } s; + } s; }; union ctu_can_fd_rx_status_rx_settings { - uint32_t u32; - struct ctu_can_fd_rx_status_rx_settings_s { + uint32_t u32; + struct ctu_can_fd_rx_status_rx_settings_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* RX_STATUS */ - uint32_t rxe : 1; - uint32_t rxf : 1; - uint32_t reserved_3_2 : 2; - uint32_t rxfrc : 11; - uint32_t reserved_15 : 1; + uint32_t rxe : 1; + uint32_t rxf : 1; + uint32_t reserved_3_2 : 2; + uint32_t rxfrc : 11; + uint32_t reserved_15 : 1; /* RX_SETTINGS */ - uint32_t rtsop : 1; - uint32_t reserved_31_17 : 15; + uint32_t rtsop : 1; + uint32_t reserved_31_17 : 15; #else - uint32_t reserved_31_17 : 15; - uint32_t rtsop : 1; - uint32_t reserved_15 : 1; - uint32_t rxfrc : 11; - uint32_t reserved_3_2 : 2; - uint32_t rxf : 1; - uint32_t rxe : 1; + uint32_t reserved_31_17 : 15; + uint32_t rtsop : 1; + uint32_t reserved_15 : 1; + uint32_t rxfrc : 11; + uint32_t reserved_3_2 : 2; + uint32_t rxf : 1; + uint32_t rxe : 1; #endif - } s; + } s; }; enum ctu_can_fd_rx_settings_rtsop { - RTS_END = 0x0, - RTS_BEG = 0x1, + RTS_END = 0x0, + RTS_BEG = 0x1, }; union ctu_can_fd_rx_data { - uint32_t u32; - struct ctu_can_fd_rx_data_s { + uint32_t u32; + struct ctu_can_fd_rx_data_s { /* RX_DATA */ - uint32_t rx_data : 32; - } s; + uint32_t rx_data : 32; + } s; }; union ctu_can_fd_tx_status { - uint32_t u32; - struct ctu_can_fd_tx_status_s { + uint32_t u32; + struct ctu_can_fd_tx_status_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* TX_STATUS */ - uint32_t tx1s : 4; - uint32_t tx2s : 4; - uint32_t tx3s : 4; - uint32_t tx4s : 4; - uint32_t reserved_31_16 : 16; + uint32_t tx1s : 4; + uint32_t tx2s : 4; + uint32_t tx3s : 4; + uint32_t tx4s : 4; + uint32_t reserved_31_16 : 16; #else - uint32_t reserved_31_16 : 16; - uint32_t tx4s : 4; - uint32_t tx3s : 4; - uint32_t tx2s : 4; - uint32_t tx1s : 4; + uint32_t reserved_31_16 : 16; + uint32_t tx4s : 4; + uint32_t tx3s : 4; + uint32_t tx2s : 4; + uint32_t tx1s : 4; #endif - } s; + } s; }; enum ctu_can_fd_tx_status_tx1s { - TXT_RDY = 0x1, - TXT_TRAN = 0x2, - TXT_ABTP = 0x3, - TXT_TOK = 0x4, - TXT_ERR = 0x6, - TXT_ABT = 0x7, - TXT_ETY = 0x8, + TXT_RDY = 0x1, + TXT_TRAN = 0x2, + TXT_ABTP = 0x3, + TXT_TOK = 0x4, + TXT_ERR = 0x6, + TXT_ABT = 0x7, + TXT_ETY = 0x8, }; union ctu_can_fd_tx_command { - uint32_t u32; - struct ctu_can_fd_tx_command_s { + uint32_t u32; + struct ctu_can_fd_tx_command_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* TX_COMMAND */ - uint32_t txce : 1; - uint32_t txcr : 1; - uint32_t txca : 1; - uint32_t reserved_7_3 : 5; - uint32_t txb1 : 1; - uint32_t txb2 : 1; - uint32_t txb3 : 1; - uint32_t txb4 : 1; - uint32_t reserved_31_12 : 20; + uint32_t txce : 1; + uint32_t txcr : 1; + uint32_t txca : 1; + uint32_t reserved_7_3 : 5; + uint32_t txb1 : 1; + uint32_t txb2 : 1; + uint32_t txb3 : 1; + uint32_t txb4 : 1; + uint32_t reserved_31_12 : 20; #else - uint32_t reserved_31_12 : 20; - uint32_t txb4 : 1; - uint32_t txb3 : 1; - uint32_t txb2 : 1; - uint32_t txb1 : 1; - uint32_t reserved_7_3 : 5; - uint32_t txca : 1; - uint32_t txcr : 1; - uint32_t txce : 1; + uint32_t reserved_31_12 : 20; + uint32_t txb4 : 1; + uint32_t txb3 : 1; + uint32_t txb2 : 1; + uint32_t txb1 : 1; + uint32_t reserved_7_3 : 5; + uint32_t txca : 1; + uint32_t txcr : 1; + uint32_t txce : 1; #endif - } s; + } s; }; union ctu_can_fd_tx_priority { - uint32_t u32; - struct ctu_can_fd_tx_priority_s { + uint32_t u32; + struct ctu_can_fd_tx_priority_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* TX_PRIORITY */ - uint32_t txt1p : 3; - uint32_t reserved_3 : 1; - uint32_t txt2p : 3; - uint32_t reserved_7 : 1; - uint32_t txt3p : 3; - uint32_t reserved_11 : 1; - uint32_t txt4p : 3; - uint32_t reserved_31_15 : 17; + uint32_t txt1p : 3; + uint32_t reserved_3 : 1; + uint32_t txt2p : 3; + uint32_t reserved_7 : 1; + uint32_t txt3p : 3; + uint32_t reserved_11 : 1; + uint32_t txt4p : 3; + uint32_t reserved_31_15 : 17; #else - uint32_t reserved_31_15 : 17; - uint32_t txt4p : 3; - uint32_t reserved_11 : 1; - uint32_t txt3p : 3; - uint32_t reserved_7 : 1; - uint32_t txt2p : 3; - uint32_t reserved_3 : 1; - uint32_t txt1p : 3; + uint32_t reserved_31_15 : 17; + uint32_t txt4p : 3; + uint32_t reserved_11 : 1; + uint32_t txt3p : 3; + uint32_t reserved_7 : 1; + uint32_t txt2p : 3; + uint32_t reserved_3 : 1; + uint32_t txt1p : 3; #endif - } s; + } s; }; union ctu_can_fd_err_capt_alc { - uint32_t u32; - struct ctu_can_fd_err_capt_alc_s { + uint32_t u32; + struct ctu_can_fd_err_capt_alc_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* ERR_CAPT */ - uint32_t err_pos : 5; - uint32_t err_type : 3; - uint32_t reserved_15_8 : 8; + uint32_t err_pos : 5; + uint32_t err_type : 3; + uint32_t reserved_15_8 : 8; /* ALC */ - uint32_t alc_bit : 5; - uint32_t alc_id_field : 3; - uint32_t reserved_31_24 : 8; + uint32_t alc_bit : 5; + uint32_t alc_id_field : 3; + uint32_t reserved_31_24 : 8; #else - uint32_t reserved_31_24 : 8; - uint32_t alc_id_field : 3; - uint32_t alc_bit : 5; - uint32_t reserved_15_8 : 8; - uint32_t err_type : 3; - uint32_t err_pos : 5; + uint32_t reserved_31_24 : 8; + uint32_t alc_id_field : 3; + uint32_t alc_bit : 5; + uint32_t reserved_15_8 : 8; + uint32_t err_type : 3; + uint32_t err_pos : 5; #endif - } s; + } s; }; enum ctu_can_fd_err_capt_err_pos { - ERC_POS_SOF = 0x0, - ERC_POS_ARB = 0x1, - ERC_POS_CTRL = 0x2, - ERC_POS_DATA = 0x3, - ERC_POS_CRC = 0x4, - ERC_POS_ACK = 0x5, - ERC_POS_EOF = 0x6, - ERC_POS_ERR = 0x7, - ERC_POS_OVRL = 0x8, - ERC_POS_OTHER = 0x1f, + ERC_POS_SOF = 0x0, + ERC_POS_ARB = 0x1, + ERC_POS_CTRL = 0x2, + ERC_POS_DATA = 0x3, + ERC_POS_CRC = 0x4, + ERC_POS_ACK = 0x5, + ERC_POS_EOF = 0x6, + ERC_POS_ERR = 0x7, + ERC_POS_OVRL = 0x8, + ERC_POS_OTHER = 0x1f, }; enum ctu_can_fd_err_capt_err_type { - ERC_BIT_ERR = 0x0, - ERC_CRC_ERR = 0x1, - ERC_FRM_ERR = 0x2, - ERC_ACK_ERR = 0x3, - ERC_STUF_ERR = 0x4, + ERC_BIT_ERR = 0x0, + ERC_CRC_ERR = 0x1, + ERC_FRM_ERR = 0x2, + ERC_ACK_ERR = 0x3, + ERC_STUF_ERR = 0x4, }; enum ctu_can_fd_alc_alc_id_field { - ALC_RSVD = 0x0, - ALC_BASE_ID = 0x1, - ALC_SRR_RTR = 0x2, - ALC_IDE = 0x3, - ALC_EXTENSION = 0x4, - ALC_RTR = 0x5, + ALC_RSVD = 0x0, + ALC_BASE_ID = 0x1, + ALC_SRR_RTR = 0x2, + ALC_IDE = 0x3, + ALC_EXTENSION = 0x4, + ALC_RTR = 0x5, }; union ctu_can_fd_trv_delay_ssp_cfg { - uint32_t u32; - struct ctu_can_fd_trv_delay_ssp_cfg_s { + uint32_t u32; + struct ctu_can_fd_trv_delay_ssp_cfg_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* TRV_DELAY */ - uint32_t trv_delay_value : 7; - uint32_t reserved_15_7 : 9; + uint32_t trv_delay_value : 7; + uint32_t reserved_15_7 : 9; /* SSP_CFG */ - uint32_t ssp_offset : 8; - uint32_t ssp_src : 2; - uint32_t reserved_31_26 : 6; + uint32_t ssp_offset : 8; + uint32_t ssp_src : 2; + uint32_t reserved_31_26 : 6; #else - uint32_t reserved_31_26 : 6; - uint32_t ssp_src : 2; - uint32_t ssp_offset : 8; - uint32_t reserved_15_7 : 9; - uint32_t trv_delay_value : 7; + uint32_t reserved_31_26 : 6; + uint32_t ssp_src : 2; + uint32_t ssp_offset : 8; + uint32_t reserved_15_7 : 9; + uint32_t trv_delay_value : 7; #endif - } s; + } s; }; enum ctu_can_fd_ssp_cfg_ssp_src { - SSP_SRC_MEAS_N_OFFSET = 0x0, - SSP_SRC_NO_SSP = 0x1, - SSP_SRC_OFFSET = 0x2, + SSP_SRC_MEAS_N_OFFSET = 0x0, + SSP_SRC_NO_SSP = 0x1, + SSP_SRC_OFFSET = 0x2, }; union ctu_can_fd_rx_fr_ctr { - uint32_t u32; - struct ctu_can_fd_rx_fr_ctr_s { + uint32_t u32; + struct ctu_can_fd_rx_fr_ctr_s { /* RX_FR_CTR */ - uint32_t rx_fr_ctr_val : 32; - } s; + uint32_t rx_fr_ctr_val : 32; + } s; }; union ctu_can_fd_tx_fr_ctr { - uint32_t u32; - struct ctu_can_fd_tx_fr_ctr_s { + uint32_t u32; + struct ctu_can_fd_tx_fr_ctr_s { /* TX_FR_CTR */ - uint32_t tx_fr_ctr_val : 32; - } s; + uint32_t tx_fr_ctr_val : 32; + } s; }; union ctu_can_fd_debug_register { - uint32_t u32; - struct ctu_can_fd_debug_register_s { + uint32_t u32; + struct ctu_can_fd_debug_register_s { #ifdef __LITTLE_ENDIAN_BITFIELD /* DEBUG_REGISTER */ - uint32_t stuff_count : 3; - uint32_t destuff_count : 3; - uint32_t pc_arb : 1; - uint32_t pc_con : 1; - uint32_t pc_dat : 1; - uint32_t pc_stc : 1; - uint32_t pc_crc : 1; - uint32_t pc_crcd : 1; - uint32_t pc_ack : 1; - uint32_t pc_ackd : 1; - uint32_t pc_eof : 1; - uint32_t pc_int : 1; - uint32_t pc_susp : 1; - uint32_t pc_ovr : 1; - uint32_t pc_sof : 1; - uint32_t reserved_31_19 : 13; + uint32_t stuff_count : 3; + uint32_t destuff_count : 3; + uint32_t pc_arb : 1; + uint32_t pc_con : 1; + uint32_t pc_dat : 1; + uint32_t pc_stc : 1; + uint32_t pc_crc : 1; + uint32_t pc_crcd : 1; + uint32_t pc_ack : 1; + uint32_t pc_ackd : 1; + uint32_t pc_eof : 1; + uint32_t pc_int : 1; + uint32_t pc_susp : 1; + uint32_t pc_ovr : 1; + uint32_t pc_sof : 1; + uint32_t reserved_31_19 : 13; #else - uint32_t reserved_31_19 : 13; - uint32_t pc_sof : 1; - uint32_t pc_ovr : 1; - uint32_t pc_susp : 1; - uint32_t pc_int : 1; - uint32_t pc_eof : 1; - uint32_t pc_ackd : 1; - uint32_t pc_ack : 1; - uint32_t pc_crcd : 1; - uint32_t pc_crc : 1; - uint32_t pc_stc : 1; - uint32_t pc_dat : 1; - uint32_t pc_con : 1; - uint32_t pc_arb : 1; - uint32_t destuff_count : 3; - uint32_t stuff_count : 3; + uint32_t reserved_31_19 : 13; + uint32_t pc_sof : 1; + uint32_t pc_ovr : 1; + uint32_t pc_susp : 1; + uint32_t pc_int : 1; + uint32_t pc_eof : 1; + uint32_t pc_ackd : 1; + uint32_t pc_ack : 1; + uint32_t pc_crcd : 1; + uint32_t pc_crc : 1; + uint32_t pc_stc : 1; + uint32_t pc_dat : 1; + uint32_t pc_con : 1; + uint32_t pc_arb : 1; + uint32_t destuff_count : 3; + uint32_t stuff_count : 3; #endif - } s; + } s; }; union ctu_can_fd_yolo_reg { - uint32_t u32; - struct ctu_can_fd_yolo_reg_s { + uint32_t u32; + struct ctu_can_fd_yolo_reg_s { /* YOLO_REG */ - uint32_t yolo_val : 32; - } s; + uint32_t yolo_val : 32; + } s; }; union ctu_can_fd_timestamp_low { - uint32_t u32; - struct ctu_can_fd_timestamp_low_s { + uint32_t u32; + struct ctu_can_fd_timestamp_low_s { /* TIMESTAMP_LOW */ - uint32_t timestamp_low : 32; - } s; + uint32_t timestamp_low : 32; + } s; }; union ctu_can_fd_timestamp_high { - uint32_t u32; - struct ctu_can_fd_timestamp_high_s { + uint32_t u32; + struct ctu_can_fd_timestamp_high_s { /* TIMESTAMP_HIGH */ - uint32_t timestamp_high : 32; - } s; + uint32_t timestamp_high : 32; + } s; }; #endif diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c index d171c372e0..f2c3b6a706 100644 --- a/hw/net/can/ctucan_core.c +++ b/hw/net/can/ctucan_core.c @@ -617,7 +617,6 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = { .name = "qemu_ctucan_tx_buffer", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN), VMSTATE_END_OF_LIST() @@ -636,7 +635,6 @@ const VMStateDescription vmstate_ctucan = { .name = "ctucan", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .post_load = ctucan_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState), diff --git a/hw/net/can/ctucan_core.h b/hw/net/can/ctucan_core.h index bbc09ae067..608307a631 100644 --- a/hw/net/can/ctucan_core.h +++ b/hw/net/can/ctucan_core.h @@ -31,7 +31,7 @@ #include "exec/hwaddr.h" #include "net/can_emu.h" -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN #define __LITTLE_ENDIAN_BITFIELD 1 #endif diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c index f1c86cd06a..50f4ea6cd6 100644 --- a/hw/net/can/ctucan_pci.c +++ b/hw/net/can/ctucan_pci.c @@ -215,7 +215,6 @@ static const VMStateDescription vmstate_ctucan_pci = { .name = "ctucan_pci", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, CtuCanPCIState), VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan, diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c index 22bb8910fa..e93e6c5e19 100644 --- a/hw/net/can/xlnx-zynqmp-can.c +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -696,30 +696,30 @@ static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame) timestamp)); /* First 32 bit of the data. */ - fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT, - R_TXFIFO_DATA1_DB3_LENGTH, + fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA1_DB3_SHIFT, + R_RXFIFO_DATA1_DB3_LENGTH, frame->data[0]) | - deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT, - R_TXFIFO_DATA1_DB2_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB2_SHIFT, + R_RXFIFO_DATA1_DB2_LENGTH, frame->data[1]) | - deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT, - R_TXFIFO_DATA1_DB1_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB1_SHIFT, + R_RXFIFO_DATA1_DB1_LENGTH, frame->data[2]) | - deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT, - R_TXFIFO_DATA1_DB0_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB0_SHIFT, + R_RXFIFO_DATA1_DB0_LENGTH, frame->data[3])); /* Last 32 bit of the data. */ - fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT, - R_TXFIFO_DATA2_DB7_LENGTH, + fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA2_DB7_SHIFT, + R_RXFIFO_DATA2_DB7_LENGTH, frame->data[4]) | - deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT, - R_TXFIFO_DATA2_DB6_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB6_SHIFT, + R_RXFIFO_DATA2_DB6_LENGTH, frame->data[5]) | - deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT, - R_TXFIFO_DATA2_DB5_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB5_SHIFT, + R_RXFIFO_DATA2_DB5_LENGTH, frame->data[6]) | - deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT, - R_TXFIFO_DATA2_DB4_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB4_SHIFT, + R_RXFIFO_DATA2_DB4_LENGTH, frame->data[7])); ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); @@ -1079,7 +1079,7 @@ static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp) /* Allocate a new timer. */ s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s, - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); ptimer_transaction_begin(s->can_timer); diff --git a/hw/net/e1000.c b/hw/net/e1000.c index a30546c5d5..0dfdf47313 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -107,6 +107,7 @@ struct E1000State_st { e1000x_txd_props props; e1000x_txd_props tso_props; uint16_t tso_frames; + bool busy; } tx; struct { @@ -566,7 +567,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size) qemu_send_packet(nc, buf, size); } inc_tx_bcast_or_mcast_count(s, buf); - e1000x_increase_size_stats(s->mac_reg, PTCregs, size); + e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4); } static void @@ -630,10 +631,9 @@ xmit_seg(E1000State *s) } e1000x_inc_reg_if_not_full(s->mac_reg, TPT); - e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size); - s->mac_reg[GPTC] = s->mac_reg[TPT]; - s->mac_reg[GOTCL] = s->mac_reg[TOTL]; - s->mac_reg[GOTCH] = s->mac_reg[TOTH]; + e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); + e1000x_inc_reg_if_not_full(s->mac_reg, GPTC); + e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); } static void @@ -763,6 +763,11 @@ start_xmit(E1000State *s) return; } + if (s->tx.busy) { + return; + } + s->tx.busy = true; + while (s->mac_reg[TDH] != s->mac_reg[TDT]) { base = tx_desc_base(s) + sizeof(struct e1000_tx_desc) * s->mac_reg[TDH]; @@ -789,6 +794,7 @@ start_xmit(E1000State *s) break; } } + s->tx.busy = false; set_ics(s, 0, cause); } @@ -972,7 +978,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; pci_dma_read(d, base, &desc, sizeof(desc)); desc.special = vlan_special; - desc.status |= (vlan_status | E1000_RXD_STAT_DD); + desc.status &= ~E1000_RXD_STAT_DD; if (desc.buffer_addr) { if (desc_offset < size) { size_t iov_copy; @@ -1006,6 +1012,9 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) DBGOUT(RX, "Null RX descriptor!!\n"); } pci_dma_write(d, base, &desc, sizeof(desc)); + desc.status |= (vlan_status | E1000_RXD_STAT_DD); + pci_dma_write(d, base + offsetof(struct e1000_rx_desc, status), + &desc.status, sizeof(desc.status)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h index ae99f58bab..59e050742b 100644 --- a/hw/net/e1000_regs.h +++ b/hw/net/e1000_regs.h @@ -552,21 +552,21 @@ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ /* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* PHY Link Partner Ability Register */ #define MII_LPAR_LPACK 0x4000 /* Acked by link partner */ @@ -793,6 +793,7 @@ #define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */ #define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index ac96f7665a..7523e9f5d2 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -276,25 +276,18 @@ e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors) } } -static bool +static void e1000e_use_msix_vectors(E1000EState *s, int num_vectors) { int i; for (i = 0; i < num_vectors; i++) { - int res = msix_vector_use(PCI_DEVICE(s), i); - if (res < 0) { - trace_e1000e_msix_use_vector_fail(i, res); - e1000e_unuse_msix_vectors(s, i); - return false; - } + msix_vector_use(PCI_DEVICE(s), i); } - return true; } static void e1000e_init_msix(E1000EState *s) { - PCIDevice *d = PCI_DEVICE(s); int res = msix_init(PCI_DEVICE(s), E1000E_MSIX_VEC_NUM, &s->msix, E1000E_MSIX_IDX, E1000E_MSIX_TABLE, @@ -305,9 +298,7 @@ e1000e_init_msix(E1000EState *s) if (res < 0) { trace_e1000e_msix_init_fail(res); } else { - if (!e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM)) { - msix_uninit(d, &s->msix, &s->msix); - } + e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM); } } diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 8ae6fb7e14..c71d82ce1d 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -159,6 +159,8 @@ e1000e_intrmgr_on_throttling_timer(void *opaque) if (msi_enabled(timer->core->owner)) { trace_e1000e_irq_msi_notify_postponed(); + /* Clear msi_causes_pending to fire MSI eventually */ + timer->core->msi_causes_pending = 0; e1000e_set_interrupt_cause(timer->core, 0); } else { trace_e1000e_irq_legacy_notify_postponed(); @@ -685,9 +687,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); } static void @@ -1362,6 +1363,57 @@ struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, } } +static inline void +e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, + uint8_t *desc, dma_addr_t len) +{ + PCIDevice *dev = core->owner; + + if (e1000e_rx_use_legacy_descriptor(core)) { + struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; + size_t offset = offsetof(struct e1000_rx_desc, status); + uint8_t status = d->status; + + d->status &= ~E1000_RXD_STAT_DD; + pci_dma_write(dev, addr, desc, len); + + if (status & E1000_RXD_STAT_DD) { + d->status = status; + pci_dma_write(dev, addr + offset, &status, sizeof(status)); + } + } else { + if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { + union e1000_rx_desc_packet_split *d = + (union e1000_rx_desc_packet_split *) desc; + size_t offset = offsetof(union e1000_rx_desc_packet_split, + wb.middle.status_error); + uint32_t status = d->wb.middle.status_error; + + d->wb.middle.status_error &= ~E1000_RXD_STAT_DD; + pci_dma_write(dev, addr, desc, len); + + if (status & E1000_RXD_STAT_DD) { + d->wb.middle.status_error = status; + pci_dma_write(dev, addr + offset, &status, sizeof(status)); + } + } else { + union e1000_rx_desc_extended *d = + (union e1000_rx_desc_extended *) desc; + size_t offset = offsetof(union e1000_rx_desc_extended, + wb.upper.status_error); + uint32_t status = d->wb.upper.status_error; + + d->wb.upper.status_error &= ~E1000_RXD_STAT_DD; + pci_dma_write(dev, addr, desc, len); + + if (status & E1000_RXD_STAT_DD) { + d->wb.upper.status_error = status; + pci_dma_write(dev, addr + offset, &status, sizeof(status)); + } + } + } +} + typedef struct e1000e_ba_state_st { uint16_t written[MAX_PS_BUFFERS]; uint8_t cur_idx; @@ -1598,7 +1650,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); - pci_dma_write(d, base, &desc, core->rx_desc_len); + e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len); e1000e_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); @@ -1620,15 +1672,16 @@ e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) } } +/* Min. octets in an ethernet frame sans FCS */ +#define MIN_BUF_SIZE 60 + ssize_t e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) { static const int maximum_ethernet_hdr_len = (14 + 4); - /* Min. octets in an ethernet frame sans FCS */ - static const int min_buf_size = 60; uint32_t n = 0; - uint8_t min_buf[min_buf_size]; + uint8_t min_buf[MIN_BUF_SIZE]; struct iovec min_iov; uint8_t *filter_buf; size_t size, orig_size; @@ -2607,6 +2660,11 @@ e1000e_mac_icr_read(E1000ECore *core, int index) core->mac[ICR] = 0; } + if (!msix_enabled(core->owner)) { + trace_e1000e_irq_icr_clear_nonmsix_icr_read(); + core->mac[ICR] = 0; + } + if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_clear_iame(); diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index a8d93870b5..3fdc34f753 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -217,15 +217,14 @@ e1000x_update_rx_total_stats(uint32_t *mac, e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); e1000x_inc_reg_if_not_full(mac, TPR); - mac[GPRC] = mac[TPR]; + e1000x_inc_reg_if_not_full(mac, GPRC); /* TOR - Total Octets Received: * This register includes bytes received in a packet from the field through the field, inclusively. * Always include FCS length (4) in size. */ e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); - mac[GORCL] = mac[TORL]; - mac[GORCH] = mac[TORH]; + e1000x_grow_8reg_if_not_full(mac, GORCL, data_size + 4); } void diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index 16e95ef9cc..679f52f80f 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -700,6 +700,8 @@ static void set_ru_state(EEPRO100State * s, ru_state_t state) static void dump_statistics(EEPRO100State * s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + /* Dump statistical data. Most data is never changed by the emulation * and always 0, so we first just copy the whole block and then those * values which really matter. @@ -707,16 +709,18 @@ static void dump_statistics(EEPRO100State * s) */ pci_dma_write(&s->dev, s->statsaddr, &s->statistics, s->stats_size); stl_le_pci_dma(&s->dev, s->statsaddr + 0, - s->statistics.tx_good_frames); + s->statistics.tx_good_frames, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 36, - s->statistics.rx_good_frames); + s->statistics.rx_good_frames, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 48, - s->statistics.rx_resource_errors); + s->statistics.rx_resource_errors, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 60, - s->statistics.rx_short_frame_errors); + s->statistics.rx_short_frame_errors, attrs); #if 0 - stw_le_pci_dma(&s->dev, s->statsaddr + 76, s->statistics.xmt_tco_frames); - stw_le_pci_dma(&s->dev, s->statsaddr + 78, s->statistics.rcv_tco_frames); + stw_le_pci_dma(&s->dev, s->statsaddr + 76, + s->statistics.xmt_tco_frames, attrs); + stw_le_pci_dma(&s->dev, s->statsaddr + 78, + s->statistics.rcv_tco_frames, attrs); missing("CU dump statistical counters"); #endif } @@ -733,6 +737,7 @@ static void read_cb(EEPRO100State *s) static void tx_command(EEPRO100State *s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; uint32_t tbd_array = s->tx.tbd_array_addr; uint16_t tcb_bytes = s->tx.tcb_bytes & 0x3fff; /* Sends larger than MAX_ETH_FRAME_SIZE are allowed, up to 2600 bytes. */ @@ -764,15 +769,16 @@ static void tx_command(EEPRO100State *s) } else { /* Flexible mode. */ uint8_t tbd_count = 0; + uint32_t tx_buffer_address; + uint16_t tx_buffer_size; + uint16_t tx_buffer_el; + if (s->has_extended_tcb_support && !(s->configuration[6] & BIT(4))) { /* Extended Flexible TCB. */ for (; tbd_count < 2; tbd_count++) { - uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, - tbd_address); - uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, - tbd_address + 4); - uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, - tbd_address + 6); + ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs); tbd_address += 8; TRACE(RXTX, logout ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n", @@ -788,9 +794,9 @@ static void tx_command(EEPRO100State *s) } tbd_address = tbd_array; for (; tbd_count < s->tx.tbd_count; tbd_count++) { - uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); - uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); - uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); + ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs); tbd_address += 8; TRACE(RXTX, logout ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n", @@ -833,6 +839,7 @@ static void set_multicast_list(EEPRO100State *s) static void action_command(EEPRO100State *s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; /* The loop below won't stop if it gets special handcrafted data. Therefore we limit the number of iterations. */ unsigned max_loop_count = 16; @@ -911,7 +918,7 @@ static void action_command(EEPRO100State *s) } /* Write new status. */ stw_le_pci_dma(&s->dev, s->cb_address, - s->tx.status | ok_status | STATUS_C); + s->tx.status | ok_status | STATUS_C, attrs); if (bit_i) { /* CU completed action. */ eepro100_cx_interrupt(s); @@ -937,6 +944,7 @@ static void action_command(EEPRO100State *s) static void eepro100_cu_command(EEPRO100State * s, uint8_t val) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; cu_state_t cu_state; switch (val) { case CU_NOP: @@ -986,7 +994,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val) /* Dump statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats)\n", val)); dump_statistics(s); - stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005, attrs); break; case CU_CMD_BASE: /* Load CU base. */ @@ -997,7 +1005,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val) /* Dump and reset statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats and reset)\n", val)); dump_statistics(s); - stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007, attrs); memset(&s->statistics, 0, sizeof(s->statistics)); break; case CU_SRESUME: @@ -1612,6 +1620,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) * - Magic packets should set bit 30 in power management driver register. * - Interesting packets should set bit 29 in power management driver register. */ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; EEPRO100State *s = qemu_get_nic_opaque(nc); uint16_t rfd_status = 0xa000; #if defined(CONFIG_PAD_RECEIVED_FRAMES) @@ -1726,9 +1735,9 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n", rfd_command, rx.link, rx.rx_buf_addr, rfd_size)); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + - offsetof(eepro100_rx_t, status), rfd_status); + offsetof(eepro100_rx_t, status), rfd_status, attrs); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + - offsetof(eepro100_rx_t, count), size); + offsetof(eepro100_rx_t, count), size, attrs); /* Early receive interrupt not supported. */ #if 0 eepro100_er_interrupt(s); diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c index bd9d62b559..b75d8e3dce 100644 --- a/hw/net/fsl_etsec/etsec.c +++ b/hw/net/fsl_etsec/etsec.c @@ -27,7 +27,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/ptimer.h" @@ -394,7 +393,7 @@ static void etsec_realize(DeviceState *dev, Error **errp) object_get_typename(OBJECT(dev)), dev->id, etsec); qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a); - etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT); + etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(etsec->ptimer); ptimer_set_freq(etsec->ptimer, 100); ptimer_transaction_commit(etsec->ptimer); @@ -430,7 +429,7 @@ static void etsec_class_init(ObjectClass *klass, void *data) dc->user_creatable = true; } -static TypeInfo etsec_info = { +static const TypeInfo etsec_info = { .name = TYPE_ETSEC_COMMON, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(eTSEC), @@ -444,26 +443,3 @@ static void etsec_register_types(void) } type_init(etsec_register_types) - -DeviceState *etsec_create(hwaddr base, - MemoryRegion * mr, - NICInfo * nd, - qemu_irq tx_irq, - qemu_irq rx_irq, - qemu_irq err_irq) -{ - DeviceState *dev; - - dev = qdev_new("eTSEC"); - qdev_set_nic_properties(dev, nd); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, tx_irq); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, rx_irq); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, err_irq); - - memory_region_add_subregion(mr, base, - SYS_BUS_DEVICE(dev)->mmio[0].memory); - - return dev; -} diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h index fddf551544..3c625c955c 100644 --- a/hw/net/fsl_etsec/etsec.h +++ b/hw/net/fsl_etsec/etsec.h @@ -155,13 +155,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(eTSEC, ETSEC_COMMON) #define eTSEC_TRANSMIT 1 #define eTSEC_RECEIVE 2 -DeviceState *etsec_create(hwaddr base, - MemoryRegion *mr, - NICInfo *nd, - qemu_irq tx_irq, - qemu_irq rx_irq, - qemu_irq err_irq); - void etsec_update_irq(eTSEC *etsec); void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr); diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c index 8f08446415..a32589e33b 100644 --- a/hw/net/fsl_etsec/rings.c +++ b/hw/net/fsl_etsec/rings.c @@ -22,7 +22,6 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "net/checksum.h" #include "qemu/log.h" #include "etsec.h" diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index 25685ba3a9..83ef0a783e 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -453,7 +453,8 @@ static void do_phy_ctl(FTGMAC100State *s) static int ftgmac100_read_bd(FTGMAC100Desc *bd, dma_addr_t addr) { - if (dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd))) { + if (dma_memory_read(&address_space_memory, addr, + bd, sizeof(*bd), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -473,7 +474,8 @@ static int ftgmac100_write_bd(FTGMAC100Desc *bd, dma_addr_t addr) lebd.des1 = cpu_to_le32(bd->des1); lebd.des2 = cpu_to_le32(bd->des2); lebd.des3 = cpu_to_le32(bd->des3); - if (dma_memory_write(&address_space_memory, addr, &lebd, sizeof(lebd))) { + if (dma_memory_write(&address_space_memory, addr, + &lebd, sizeof(lebd), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -554,7 +556,8 @@ static void ftgmac100_do_tx(FTGMAC100State *s, uint32_t tx_ring, len = sizeof(s->frame) - frame_size; } - if (dma_memory_read(&address_space_memory, bd.des3, ptr, len)) { + if (dma_memory_read(&address_space_memory, bd.des3, + ptr, len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read packet @ 0x%x\n", __func__, bd.des3); s->isr |= FTGMAC100_INT_AHB_ERR; @@ -1030,20 +1033,24 @@ static ssize_t ftgmac100_receive(NetClientState *nc, const uint8_t *buf, bd.des1 = lduw_be_p(buf + 14) | FTGMAC100_RXDES1_VLANTAG_AVAIL; if (s->maccr & FTGMAC100_MACCR_RM_VLAN) { - dma_memory_write(&address_space_memory, buf_addr, buf, 12); - dma_memory_write(&address_space_memory, buf_addr + 12, buf + 16, - buf_len - 16); + dma_memory_write(&address_space_memory, buf_addr, buf, 12, + MEMTXATTRS_UNSPECIFIED); + dma_memory_write(&address_space_memory, buf_addr + 12, + buf + 16, buf_len - 16, + MEMTXATTRS_UNSPECIFIED); } else { - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, + buf_len, MEMTXATTRS_UNSPECIFIED); } } else { bd.des1 = 0; - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); } buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index 9c7035bc94..8c11b237de 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -387,19 +387,22 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) { - dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data); } static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) { - dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); } static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) { - dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data, bd->option, bd->status); @@ -407,7 +410,8 @@ static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) { - dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); } static void imx_eth_update(IMXFECState *s) @@ -434,7 +438,7 @@ static void imx_eth_update(IMXFECState *s) * assignment fail. * * To ensure that all versions of Linux work, generate ENET_INT_MAC - * interrrupts on both interrupt lines. This should be changed if and when + * interrupts on both interrupt lines. This should be changed if and when * qemu supports IOMUX. */ if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & @@ -474,7 +478,8 @@ static void imx_fec_do_tx(IMXFECState *s) len = ENET_MAX_FRAME_SIZE - frame_size; s->regs[ENET_EIR] |= ENET_INT_BABT; } - dma_memory_read(&address_space_memory, bd.data, ptr, len); + dma_memory_read(&address_space_memory, bd.data, ptr, len, + MEMTXATTRS_UNSPECIFIED); ptr += len; frame_size += len; if (bd.flags & ENET_BD_L) { @@ -555,7 +560,8 @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index) len = ENET_MAX_FRAME_SIZE - frame_size; s->regs[ENET_EIR] |= ENET_INT_BABT; } - dma_memory_read(&address_space_memory, bd.data, ptr, len); + dma_memory_read(&address_space_memory, bd.data, ptr, len, + MEMTXATTRS_UNSPECIFIED); ptr += len; frame_size += len; if (bd.flags & ENET_BD_L) { @@ -1103,11 +1109,12 @@ static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, buf_len += size - 4; } buf_addr = bd.data; - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; @@ -1210,8 +1217,8 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, */ const uint8_t zeros[2] = { 0 }; - dma_memory_write(&address_space_memory, buf_addr, - zeros, sizeof(zeros)); + dma_memory_write(&address_space_memory, buf_addr, zeros, + sizeof(zeros), MEMTXATTRS_UNSPECIFIED); buf_addr += sizeof(zeros); buf_len -= sizeof(zeros); @@ -1220,11 +1227,12 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, shift16 = false; } - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index 6aff424cbe..f1cba55967 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -696,6 +696,14 @@ static void do_tx_packet(lan9118_state *s) n = (s->tx_status_fifo_head + s->tx_status_fifo_used) & 511; s->tx_status_fifo[n] = status; s->tx_status_fifo_used++; + + /* + * Generate TSFL interrupt if TX FIFO level exceeds the level + * specified in the FIFO_INT TX Status Level field. + */ + if (s->tx_status_fifo_used > ((s->fifo_int >> 16) & 0xff)) { + s->int_sts |= TSFL_INT; + } if (s->tx_status_fifo_used == 512) { s->int_sts |= TSFF_INT; /* TODO: Stop transmission. */ @@ -1363,7 +1371,7 @@ static void lan9118_realize(DeviceState *dev, Error **errp) s->pmt_ctrl = 1; s->txp = &s->tx_packet; - s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(s->timer); ptimer_set_freq(s->timer, 10000); ptimer_set_limit(s->timer, 0xffff, 1); diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c index 25e3e453ab..8aa27bd322 100644 --- a/hw/net/mcf_fec.c +++ b/hw/net/mcf_fec.c @@ -313,10 +313,10 @@ static void mcf_fec_reset(DeviceState *dev) s->rfsr = 0x500; } -#define MMFR_WRITE_OP (1 << 28) -#define MMFR_READ_OP (2 << 28) -#define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) -#define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) +#define MMFR_WRITE_OP (1 << 28) +#define MMFR_READ_OP (2 << 28) +#define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) +#define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) { diff --git a/hw/net/meson.build b/hw/net/meson.build index bdf71f1f40..ebac261542 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -26,6 +26,7 @@ softmmu_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c') softmmu_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c')) softmmu_ss.add(when: 'CONFIG_IMX_FEC', if_true: files('imx_fec.c')) softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-emac.c')) +softmmu_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('mv88w8618_eth.c')) softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c')) softmmu_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c')) @@ -45,8 +46,12 @@ specific_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c' softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) -softmmu_ss.add(when: ['CONFIG_VIRTIO_NET', 'CONFIG_VHOST_NET'], if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) +if have_vhost_net + softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) + softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) +else + softmmu_ss.add(files('vhost_net-stub.c')) +endif softmmu_ss.add(when: 'CONFIG_ETSEC', if_true: files( 'fsl_etsec/etsec.c', diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 9278fdce0b..db3a04deb1 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -29,7 +29,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/log.h" #include "qapi/error.h" #include "hw/registerfields.h" @@ -119,14 +118,18 @@ static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) d->next = le32_to_cpu(d->next); } -static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +static void emac_store_desc(MSF2EmacState *s, const EmacDesc *d, hwaddr desc) { - /* Convert from host endianness into LE. */ - d->pktaddr = cpu_to_le32(d->pktaddr); - d->pktsize = cpu_to_le32(d->pktsize); - d->next = cpu_to_le32(d->next); + EmacDesc outd; + /* + * Convert from host endianness into LE. We use a local struct because + * calling code may still want to look at the fields afterwards. + */ + outd.pktaddr = cpu_to_le32(d->pktaddr); + outd.pktsize = cpu_to_le32(d->pktsize); + outd.next = cpu_to_le32(d->next); - address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, &outd, sizeof outd); } static void msf2_dma_tx(MSF2EmacState *s) diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c new file mode 100644 index 0000000000..ef30b0d4a6 --- /dev/null +++ b/hw/net/mv88w8618_eth.c @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Marvell MV88W8618 / Freecom MusicPal emulation. + * + * Copyright (c) 2008 Jan Kiszka + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/net/mv88w8618_eth.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "net/net.h" + +#define MP_ETH_SIZE 0x00001000 + +/* Ethernet register offsets */ +#define MP_ETH_SMIR 0x010 +#define MP_ETH_PCXR 0x408 +#define MP_ETH_SDCMR 0x448 +#define MP_ETH_ICR 0x450 +#define MP_ETH_IMR 0x458 +#define MP_ETH_FRDP0 0x480 +#define MP_ETH_FRDP1 0x484 +#define MP_ETH_FRDP2 0x488 +#define MP_ETH_FRDP3 0x48C +#define MP_ETH_CRDP0 0x4A0 +#define MP_ETH_CRDP1 0x4A4 +#define MP_ETH_CRDP2 0x4A8 +#define MP_ETH_CRDP3 0x4AC +#define MP_ETH_CTDP0 0x4E0 +#define MP_ETH_CTDP1 0x4E4 + +/* MII PHY access */ +#define MP_ETH_SMIR_DATA 0x0000FFFF +#define MP_ETH_SMIR_ADDR 0x03FF0000 +#define MP_ETH_SMIR_OPCODE (1 << 26) /* Read value */ +#define MP_ETH_SMIR_RDVALID (1 << 27) + +/* PHY registers */ +#define MP_ETH_PHY1_BMSR 0x00210000 +#define MP_ETH_PHY1_PHYSID1 0x00410000 +#define MP_ETH_PHY1_PHYSID2 0x00610000 + +#define MP_PHY_BMSR_LINK 0x0004 +#define MP_PHY_BMSR_AUTONEG 0x0008 + +#define MP_PHY_88E3015 0x01410E20 + +/* TX descriptor status */ +#define MP_ETH_TX_OWN (1U << 31) + +/* RX descriptor status */ +#define MP_ETH_RX_OWN (1U << 31) + +/* Interrupt cause/mask bits */ +#define MP_ETH_IRQ_RX_BIT 0 +#define MP_ETH_IRQ_RX (1 << MP_ETH_IRQ_RX_BIT) +#define MP_ETH_IRQ_TXHI_BIT 2 +#define MP_ETH_IRQ_TXLO_BIT 3 + +/* Port config bits */ +#define MP_ETH_PCXR_2BSM_BIT 28 /* 2-byte incoming suffix */ + +/* SDMA command bits */ +#define MP_ETH_CMD_TXHI (1 << 23) +#define MP_ETH_CMD_TXLO (1 << 22) + +typedef struct mv88w8618_tx_desc { + uint32_t cmdstat; + uint16_t res; + uint16_t bytes; + uint32_t buffer; + uint32_t next; +} mv88w8618_tx_desc; + +typedef struct mv88w8618_rx_desc { + uint32_t cmdstat; + uint16_t bytes; + uint16_t buffer_size; + uint32_t buffer; + uint32_t next; +} mv88w8618_rx_desc; + +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_eth_state, MV88W8618_ETH) + +struct mv88w8618_eth_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + qemu_irq irq; + MemoryRegion *dma_mr; + AddressSpace dma_as; + uint32_t smir; + uint32_t icr; + uint32_t imr; + int mmio_index; + uint32_t vlan_header; + uint32_t tx_queue[2]; + uint32_t rx_queue[4]; + uint32_t frx_queue[4]; + uint32_t cur_rx[4]; + NICState *nic; + NICConf conf; +}; + +static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr, + mv88w8618_rx_desc *desc) +{ + cpu_to_le32s(&desc->cmdstat); + cpu_to_le16s(&desc->bytes); + cpu_to_le16s(&desc->buffer_size); + cpu_to_le32s(&desc->buffer); + cpu_to_le32s(&desc->next); + dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); +} + +static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr, + mv88w8618_rx_desc *desc) +{ + dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); + le32_to_cpus(&desc->cmdstat); + le16_to_cpus(&desc->bytes); + le16_to_cpus(&desc->buffer_size); + le32_to_cpus(&desc->buffer); + le32_to_cpus(&desc->next); +} + +static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); + uint32_t desc_addr; + mv88w8618_rx_desc desc; + int i; + + for (i = 0; i < 4; i++) { + desc_addr = s->cur_rx[i]; + if (!desc_addr) { + continue; + } + do { + eth_rx_desc_get(&s->dma_as, desc_addr, &desc); + if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) { + dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header, + buf, size, MEMTXATTRS_UNSPECIFIED); + desc.bytes = size + s->vlan_header; + desc.cmdstat &= ~MP_ETH_RX_OWN; + s->cur_rx[i] = desc.next; + + s->icr |= MP_ETH_IRQ_RX; + if (s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + eth_rx_desc_put(&s->dma_as, desc_addr, &desc); + return size; + } + desc_addr = desc.next; + } while (desc_addr != s->rx_queue[i]); + } + return size; +} + +static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr, + mv88w8618_tx_desc *desc) +{ + cpu_to_le32s(&desc->cmdstat); + cpu_to_le16s(&desc->res); + cpu_to_le16s(&desc->bytes); + cpu_to_le32s(&desc->buffer); + cpu_to_le32s(&desc->next); + dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); +} + +static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr, + mv88w8618_tx_desc *desc) +{ + dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); + le32_to_cpus(&desc->cmdstat); + le16_to_cpus(&desc->res); + le16_to_cpus(&desc->bytes); + le32_to_cpus(&desc->buffer); + le32_to_cpus(&desc->next); +} + +static void eth_send(mv88w8618_eth_state *s, int queue_index) +{ + uint32_t desc_addr = s->tx_queue[queue_index]; + mv88w8618_tx_desc desc; + uint32_t next_desc; + uint8_t buf[2048]; + int len; + + do { + eth_tx_desc_get(&s->dma_as, desc_addr, &desc); + next_desc = desc.next; + if (desc.cmdstat & MP_ETH_TX_OWN) { + len = desc.bytes; + if (len < 2048) { + dma_memory_read(&s->dma_as, desc.buffer, buf, len, + MEMTXATTRS_UNSPECIFIED); + qemu_send_packet(qemu_get_queue(s->nic), buf, len); + } + desc.cmdstat &= ~MP_ETH_TX_OWN; + s->icr |= 1 << (MP_ETH_IRQ_TXLO_BIT - queue_index); + eth_tx_desc_put(&s->dma_as, desc_addr, &desc); + } + desc_addr = next_desc; + } while (desc_addr != s->tx_queue[queue_index]); +} + +static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset, + unsigned size) +{ + mv88w8618_eth_state *s = opaque; + + switch (offset) { + case MP_ETH_SMIR: + if (s->smir & MP_ETH_SMIR_OPCODE) { + switch (s->smir & MP_ETH_SMIR_ADDR) { + case MP_ETH_PHY1_BMSR: + return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG | + MP_ETH_SMIR_RDVALID; + case MP_ETH_PHY1_PHYSID1: + return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID; + case MP_ETH_PHY1_PHYSID2: + return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID; + default: + return MP_ETH_SMIR_RDVALID; + } + } + return 0; + + case MP_ETH_ICR: + return s->icr; + + case MP_ETH_IMR: + return s->imr; + + case MP_ETH_FRDP0 ... MP_ETH_FRDP3: + return s->frx_queue[(offset - MP_ETH_FRDP0) / 4]; + + case MP_ETH_CRDP0 ... MP_ETH_CRDP3: + return s->rx_queue[(offset - MP_ETH_CRDP0) / 4]; + + case MP_ETH_CTDP0 ... MP_ETH_CTDP1: + return s->tx_queue[(offset - MP_ETH_CTDP0) / 4]; + + default: + return 0; + } +} + +static void mv88w8618_eth_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_eth_state *s = opaque; + + switch (offset) { + case MP_ETH_SMIR: + s->smir = value; + break; + + case MP_ETH_PCXR: + s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2; + break; + + case MP_ETH_SDCMR: + if (value & MP_ETH_CMD_TXHI) { + eth_send(s, 1); + } + if (value & MP_ETH_CMD_TXLO) { + eth_send(s, 0); + } + if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + break; + + case MP_ETH_ICR: + s->icr &= value; + break; + + case MP_ETH_IMR: + s->imr = value; + if (s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + break; + + case MP_ETH_FRDP0 ... MP_ETH_FRDP3: + s->frx_queue[(offset - MP_ETH_FRDP0) / 4] = value; + break; + + case MP_ETH_CRDP0 ... MP_ETH_CRDP3: + s->rx_queue[(offset - MP_ETH_CRDP0) / 4] = + s->cur_rx[(offset - MP_ETH_CRDP0) / 4] = value; + break; + + case MP_ETH_CTDP0 ... MP_ETH_CTDP1: + s->tx_queue[(offset - MP_ETH_CTDP0) / 4] = value; + break; + } +} + +static const MemoryRegionOps mv88w8618_eth_ops = { + .read = mv88w8618_eth_read, + .write = mv88w8618_eth_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void eth_cleanup(NetClientState *nc) +{ + mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); + + s->nic = NULL; +} + +static NetClientInfo net_mv88w8618_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = eth_receive, + .cleanup = eth_cleanup, +}; + +static void mv88w8618_eth_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + mv88w8618_eth_state *s = MV88W8618_ETH(dev); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s, + "mv88w8618-eth", MP_ETH_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void mv88w8618_eth_realize(DeviceState *dev, Error **errp) +{ + mv88w8618_eth_state *s = MV88W8618_ETH(dev); + + if (!s->dma_mr) { + error_setg(errp, TYPE_MV88W8618_ETH " 'dma-memory' link not set"); + return; + } + + address_space_init(&s->dma_as, s->dma_mr, "emac-dma"); + s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); +} + +static const VMStateDescription mv88w8618_eth_vmsd = { + .name = "mv88w8618_eth", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(smir, mv88w8618_eth_state), + VMSTATE_UINT32(icr, mv88w8618_eth_state), + VMSTATE_UINT32(imr, mv88w8618_eth_state), + VMSTATE_UINT32(vlan_header, mv88w8618_eth_state), + VMSTATE_UINT32_ARRAY(tx_queue, mv88w8618_eth_state, 2), + VMSTATE_UINT32_ARRAY(rx_queue, mv88w8618_eth_state, 4), + VMSTATE_UINT32_ARRAY(frx_queue, mv88w8618_eth_state, 4), + VMSTATE_UINT32_ARRAY(cur_rx, mv88w8618_eth_state, 4), + VMSTATE_END_OF_LIST() + } +}; + +static Property mv88w8618_eth_properties[] = { + DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf), + DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mv88w8618_eth_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &mv88w8618_eth_vmsd; + device_class_set_props(dc, mv88w8618_eth_properties); + dc->realize = mv88w8618_eth_realize; +} + +static const TypeInfo mv88w8618_eth_info = { + .name = TYPE_MV88W8618_ETH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_eth_state), + .instance_init = mv88w8618_eth_init, + .class_init = mv88w8618_eth_class_init, +}; + +static void musicpal_register_types(void) +{ + type_register_static(&mv88w8618_eth_info); +} + +type_init(musicpal_register_types) + diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c index dd6f6e34d3..6ced6775ff 100644 --- a/hw/net/ne2000-isa.c +++ b/hw/net/ne2000-isa.c @@ -68,7 +68,7 @@ static void isa_ne2000_realizefn(DeviceState *dev, Error **errp) ne2000_setup_io(s, DEVICE(isadev), 0x20); isa_register_ioport(isadev, &s->io, isa->iobase); - isa_init_irq(isadev, &s->irq, isa->isairq); + s->irq = isa_get_irq(isadev, isa->isairq); qemu_macaddr_default_if_unset(&s->c.macaddr); ne2000_reset(s); diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c index 6c17ee1ae2..3f31d04efb 100644 --- a/hw/net/ne2000.c +++ b/hw/net/ne2000.c @@ -36,89 +36,89 @@ #define MAX_ETH_FRAME_SIZE 1514 -#define E8390_CMD 0x00 /* The command register (for all pages) */ +#define E8390_CMD 0x00 /* The command register (for all pages) */ /* Page 0 register offsets. */ -#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */ -#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */ -#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */ -#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */ -#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */ -#define EN0_TSR 0x04 /* Transmit status reg RD */ -#define EN0_TPSR 0x04 /* Transmit starting page WR */ -#define EN0_NCR 0x05 /* Number of collision reg RD */ -#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */ -#define EN0_FIFO 0x06 /* FIFO RD */ -#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */ -#define EN0_ISR 0x07 /* Interrupt status reg RD WR */ -#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */ -#define EN0_RSARLO 0x08 /* Remote start address reg 0 */ -#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */ -#define EN0_RSARHI 0x09 /* Remote start address reg 1 */ -#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */ -#define EN0_RTL8029ID0 0x0a /* Realtek ID byte #1 RD */ -#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */ -#define EN0_RTL8029ID1 0x0b /* Realtek ID byte #2 RD */ -#define EN0_RSR 0x0c /* rx status reg RD */ -#define EN0_RXCR 0x0c /* RX configuration reg WR */ -#define EN0_TXCR 0x0d /* TX configuration reg WR */ -#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */ -#define EN0_DCFG 0x0e /* Data configuration reg WR */ -#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */ -#define EN0_IMR 0x0f /* Interrupt mask reg WR */ -#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */ +#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */ +#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */ +#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */ +#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */ +#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */ +#define EN0_TSR 0x04 /* Transmit status reg RD */ +#define EN0_TPSR 0x04 /* Transmit starting page WR */ +#define EN0_NCR 0x05 /* Number of collision reg RD */ +#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */ +#define EN0_FIFO 0x06 /* FIFO RD */ +#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */ +#define EN0_ISR 0x07 /* Interrupt status reg RD WR */ +#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */ +#define EN0_RSARLO 0x08 /* Remote start address reg 0 */ +#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */ +#define EN0_RSARHI 0x09 /* Remote start address reg 1 */ +#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */ +#define EN0_RTL8029ID0 0x0a /* Realtek ID byte #1 RD */ +#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */ +#define EN0_RTL8029ID1 0x0b /* Realtek ID byte #2 RD */ +#define EN0_RSR 0x0c /* rx status reg RD */ +#define EN0_RXCR 0x0c /* RX configuration reg WR */ +#define EN0_TXCR 0x0d /* TX configuration reg WR */ +#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */ +#define EN0_DCFG 0x0e /* Data configuration reg WR */ +#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */ +#define EN0_IMR 0x0f /* Interrupt mask reg WR */ +#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */ #define EN1_PHYS 0x11 #define EN1_CURPAG 0x17 #define EN1_MULT 0x18 -#define EN2_STARTPG 0x21 /* Starting page of ring bfr RD */ -#define EN2_STOPPG 0x22 /* Ending page +1 of ring bfr RD */ +#define EN2_STARTPG 0x21 /* Starting page of ring bfr RD */ +#define EN2_STOPPG 0x22 /* Ending page +1 of ring bfr RD */ -#define EN3_CONFIG0 0x33 -#define EN3_CONFIG1 0x34 -#define EN3_CONFIG2 0x35 -#define EN3_CONFIG3 0x36 +#define EN3_CONFIG0 0x33 +#define EN3_CONFIG1 0x34 +#define EN3_CONFIG2 0x35 +#define EN3_CONFIG3 0x36 /* Register accessed at EN_CMD, the 8390 base addr. */ -#define E8390_STOP 0x01 /* Stop and reset the chip */ -#define E8390_START 0x02 /* Start the chip, clear reset */ -#define E8390_TRANS 0x04 /* Transmit a frame */ -#define E8390_RREAD 0x08 /* Remote read */ -#define E8390_RWRITE 0x10 /* Remote write */ -#define E8390_NODMA 0x20 /* Remote DMA */ -#define E8390_PAGE0 0x00 /* Select page chip registers */ -#define E8390_PAGE1 0x40 /* using the two high-order bits */ -#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ +#define E8390_STOP 0x01 /* Stop and reset the chip */ +#define E8390_START 0x02 /* Start the chip, clear reset */ +#define E8390_TRANS 0x04 /* Transmit a frame */ +#define E8390_RREAD 0x08 /* Remote read */ +#define E8390_RWRITE 0x10 /* Remote write */ +#define E8390_NODMA 0x20 /* Remote DMA */ +#define E8390_PAGE0 0x00 /* Select page chip registers */ +#define E8390_PAGE1 0x40 /* using the two high-order bits */ +#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ /* Bits in EN0_ISR - Interrupt status register */ -#define ENISR_RX 0x01 /* Receiver, no error */ -#define ENISR_TX 0x02 /* Transmitter, no error */ -#define ENISR_RX_ERR 0x04 /* Receiver, with error */ -#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ -#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ -#define ENISR_COUNTERS 0x20 /* Counters need emptying */ -#define ENISR_RDC 0x40 /* remote dma complete */ -#define ENISR_RESET 0x80 /* Reset completed */ -#define ENISR_ALL 0x3f /* Interrupts we will enable */ +#define ENISR_RX 0x01 /* Receiver, no error */ +#define ENISR_TX 0x02 /* Transmitter, no error */ +#define ENISR_RX_ERR 0x04 /* Receiver, with error */ +#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ +#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ +#define ENISR_COUNTERS 0x20 /* Counters need emptying */ +#define ENISR_RDC 0x40 /* remote dma complete */ +#define ENISR_RESET 0x80 /* Reset completed */ +#define ENISR_ALL 0x3f /* Interrupts we will enable */ /* Bits in received packet status byte and EN0_RSR*/ -#define ENRSR_RXOK 0x01 /* Received a good packet */ -#define ENRSR_CRC 0x02 /* CRC error */ -#define ENRSR_FAE 0x04 /* frame alignment error */ -#define ENRSR_FO 0x08 /* FIFO overrun */ -#define ENRSR_MPA 0x10 /* missed pkt */ -#define ENRSR_PHY 0x20 /* physical/multicast address */ -#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ -#define ENRSR_DEF 0x80 /* deferring */ +#define ENRSR_RXOK 0x01 /* Received a good packet */ +#define ENRSR_CRC 0x02 /* CRC error */ +#define ENRSR_FAE 0x04 /* frame alignment error */ +#define ENRSR_FO 0x08 /* FIFO overrun */ +#define ENRSR_MPA 0x10 /* missed pkt */ +#define ENRSR_PHY 0x20 /* physical/multicast address */ +#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ +#define ENRSR_DEF 0x80 /* deferring */ /* Transmitted packet status, EN0_TSR. */ -#define ENTSR_PTX 0x01 /* Packet transmitted without error */ -#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ -#define ENTSR_COL 0x04 /* The transmit collided at least once. */ +#define ENTSR_PTX 0x01 /* Packet transmitted without error */ +#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ +#define ENTSR_COL 0x04 /* The transmit collided at least once. */ #define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */ -#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ +#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ #define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */ -#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ +#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ #define ENTSR_OWC 0x80 /* There was an out-of-window collision. */ void ne2000_reset(NE2000State *s) @@ -425,13 +425,13 @@ static uint32_t ne2000_ioport_read(void *opaque, uint32_t addr) ret = 0x43; break; case EN3_CONFIG0: - ret = 0; /* 10baseT media */ + ret = 0; /* 10baseT media */ break; case EN3_CONFIG2: - ret = 0x40; /* 10baseT active */ + ret = 0x40; /* 10baseT active */ break; case EN3_CONFIG3: - ret = 0x40; /* Full duplex */ + ret = 0x40; /* Full duplex */ break; default: ret = 0x00; diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 7c892f820f..7c86bb52e5 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -32,7 +32,6 @@ /* For crc32 */ #include -#include "qemu-common.h" #include "hw/irq.h" #include "hw/qdev-clock.h" #include "hw/qdev-properties.h" @@ -200,7 +199,8 @@ static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc) static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc) { - if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -221,7 +221,7 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) le_desc.status_and_length = cpu_to_le32(desc->status_and_length); le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa); if (dma_memory_write(&address_space_memory, addr, &le_desc, - sizeof(le_desc))) { + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -231,7 +231,8 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc) { - if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -252,7 +253,7 @@ static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr) le_desc.reserved = cpu_to_le32(desc->reserved); le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa); if (dma_memory_write(&address_space_memory, addr, &le_desc, - sizeof(le_desc))) { + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -284,6 +285,12 @@ static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag) emc_set_mista(emc, mista_flag); } +static void emc_enable_rx_and_flush(NPCM7xxEMCState *emc) +{ + emc->rx_active = true; + qemu_flush_queued_packets(qemu_get_queue(emc->nic)); +} + static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc, const NPCM7xxEMCTxDesc *tx_desc, uint32_t desc_addr) @@ -360,7 +367,8 @@ static void emc_try_send_next_packet(NPCM7xxEMCState *emc) buf = malloced_buf; } - if (dma_memory_read(&address_space_memory, next_buf_addr, buf, length)) { + if (dma_memory_read(&address_space_memory, next_buf_addr, buf, + length, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, next_buf_addr); emc_set_mista(emc, REG_MISTA_TXBERR); @@ -545,10 +553,11 @@ static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) buf_addr = rx_desc.rxbsa; emc->regs[REG_CRXBSA] = buf_addr; - if (dma_memory_write(&address_space_memory, buf_addr, buf, len) || + if (dma_memory_write(&address_space_memory, buf_addr, buf, + len, MEMTXATTRS_UNSPECIFIED) || (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) && - dma_memory_write(&address_space_memory, buf_addr + len, crc_ptr, - 4))) { + dma_memory_write(&address_space_memory, buf_addr + len, + crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n", __func__); emc_set_mista(emc, REG_MISTA_RXBERR); @@ -581,13 +590,6 @@ static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) return len; } -static void emc_try_receive_next_packet(NPCM7xxEMCState *emc) -{ - if (emc_can_receive(qemu_get_queue(emc->nic))) { - qemu_flush_queued_packets(qemu_get_queue(emc->nic)); - } -} - static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size) { NPCM7xxEMCState *emc = opaque; @@ -703,7 +705,7 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA; } if (value & REG_MCMDR_RXON) { - emc->rx_active = true; + emc_enable_rx_and_flush(emc); } else { emc_halt_rx(emc, 0); } @@ -739,8 +741,7 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, break; case REG_RSDR: if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) { - emc->rx_active = true; - emc_try_receive_next_packet(emc); + emc_enable_rx_and_flush(emc); } break; case REG_MIIDA: diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index dcd3fc4948..e63e524913 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -370,7 +370,7 @@ static inline void pcnet_rmd_load(PCNetState *s, struct pcnet_RMD *rmd, uint32_t rbadr; int16_t buf_length; int16_t msg_length; - } rda; + } rda; s->phys_mem_read(s->dma_opaque, addr, (void *)&rda, sizeof(rda), 0); rmd->rbadr = le32_to_cpu(rda.rbadr) & 0xffffff; rmd->buf_length = le16_to_cpu(rda.buf_length); @@ -524,77 +524,77 @@ static inline void pcnet_rmd_store(PCNetState *s, struct pcnet_RMD *rmd, be16_to_cpu(hdr->ether_type)); \ } while (0) -#define CRC(crc, ch) (crc = (crc >> 8) ^ crctab[(crc ^ (ch)) & 0xff]) +#define CRC(crc, ch) (crc = (crc >> 8) ^ crctab[(crc ^ (ch)) & 0xff]) /* generated using the AUTODIN II polynomial - * x^32 + x^26 + x^23 + x^22 + x^16 + - * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + 1 + * x^32 + x^26 + x^23 + x^22 + x^16 + + * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + 1 */ static const uint32_t crctab[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, }; static inline int padr_match(PCNetState *s, const uint8_t *buf, int size) diff --git a/hw/net/pcnet.h b/hw/net/pcnet.h index f49b213c57..eb7f46aab3 100644 --- a/hw/net/pcnet.h +++ b/hw/net/pcnet.h @@ -4,8 +4,8 @@ #define PCNET_IOPORT_SIZE 0x20 #define PCNET_PNPMMIO_SIZE 0x20 -#define PCNET_LOOPTEST_CRC 1 -#define PCNET_LOOPTEST_NOCRC 2 +#define PCNET_LOOPTEST_CRC 1 +#define PCNET_LOOPTEST_NOCRC 2 #include "exec/memory.h" #include "hw/irq.h" diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c index 31f2340fb9..281d43e6cf 100644 --- a/hw/net/rocker/rocker.c +++ b/hw/net/rocker/rocker.c @@ -1010,7 +1010,7 @@ static uint64_t rocker_port_phys_link_status(Rocker *r) FpPort *port = r->fp_port[i]; if (fp_port_get_link_up(port)) { - status |= 1 << (i + 1); + status |= 1ULL << (i + 1); } } return status; @@ -1025,7 +1025,7 @@ static uint64_t rocker_port_phys_enable_read(Rocker *r) FpPort *port = r->fp_port[i]; if (fp_port_enabled(port)) { - ret |= 1 << (i + 1); + ret |= 1ULL << (i + 1); } } return ret; @@ -1212,24 +1212,14 @@ static void rocker_msix_vectors_unuse(Rocker *r, } } -static int rocker_msix_vectors_use(Rocker *r, - unsigned int num_vectors) +static void rocker_msix_vectors_use(Rocker *r, unsigned int num_vectors) { PCIDevice *dev = PCI_DEVICE(r); - int err; int i; for (i = 0; i < num_vectors; i++) { - err = msix_vector_use(dev, i); - if (err) { - goto rollback; - } + msix_vector_use(dev, i); } - return 0; - -rollback: - rocker_msix_vectors_unuse(r, i); - return err; } static int rocker_msix_init(Rocker *r, Error **errp) @@ -1247,16 +1237,9 @@ static int rocker_msix_init(Rocker *r, Error **errp) return err; } - err = rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports)); - if (err) { - goto err_msix_vectors_use; - } + rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports)); return 0; - -err_msix_vectors_use: - msix_uninit(dev, &r->msix_bar, &r->msix_bar); - return err; } static void rocker_msix_uninit(Rocker *r) diff --git a/hw/net/rocker/rocker.h b/hw/net/rocker/rocker.h index 412fa44d01..f85354d9d1 100644 --- a/hw/net/rocker/rocker.h +++ b/hw/net/rocker/rocker.h @@ -30,7 +30,7 @@ fprintf(stderr, "%s ROCKER: " fmt, nowstr, ## __VA_ARGS__);\ } while (0) #else -static inline GCC_FMT_ATTR(1, 2) int DPRINTF(const char *fmt, ...) +static inline G_GNUC_PRINTF(1, 2) int DPRINTF(const char *fmt, ...) { return 0; } diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 90b4fc63ce..eb679d7c40 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -77,7 +77,6 @@ ( ( input ) & ( size - 1 ) ) #define ETHER_TYPE_LEN 2 -#define ETH_MTU 1500 #define VLAN_TCI_LEN 2 #define VLAN_HLEN (ETHER_TYPE_LEN + VLAN_TCI_LEN) @@ -86,7 +85,7 @@ # define DPRINTF(fmt, ...) \ do { fprintf(stderr, "RTL8139: " fmt, ## __VA_ARGS__); } while (0) #else -static inline GCC_FMT_ATTR(1, 2) int DPRINTF(const char *fmt, ...) +static inline G_GNUC_PRINTF(1, 2) int DPRINTF(const char *fmt, ...) { return 0; } @@ -1934,8 +1933,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) #define CP_TX_LS (1<<28) /* large send packet flag */ #define CP_TX_LGSEN (1<<27) -/* large send MSS mask, bits 16...25 */ -#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1) +/* large send MSS mask, bits 16...26 */ +#define CP_TC_LGSEN_MSS_SHIFT 16 +#define CP_TC_LGSEN_MSS_MASK ((1 << 11) - 1) /* IP checksum offload flag */ #define CP_TX_IPCS (1<<18) @@ -2027,18 +2027,21 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) s->currCPlusTxDesc = 0; } + /* Build the Tx Status Descriptor */ + uint32_t tx_status = txdw0; + /* transfer ownership to target */ - txdw0 &= ~CP_TX_OWN; + tx_status &= ~CP_TX_OWN; /* reset error indicator bits */ - txdw0 &= ~CP_TX_STATUS_UNF; - txdw0 &= ~CP_TX_STATUS_TES; - txdw0 &= ~CP_TX_STATUS_OWC; - txdw0 &= ~CP_TX_STATUS_LNKF; - txdw0 &= ~CP_TX_STATUS_EXC; + tx_status &= ~CP_TX_STATUS_UNF; + tx_status &= ~CP_TX_STATUS_TES; + tx_status &= ~CP_TX_STATUS_OWC; + tx_status &= ~CP_TX_STATUS_LNKF; + tx_status &= ~CP_TX_STATUS_EXC; /* update ring data */ - val = cpu_to_le32(txdw0); + val = cpu_to_le32(tx_status); pci_dma_write(d, cplus_tx_ring_desc, (uint8_t *)&val, 4); /* Now decide if descriptor being processed is holding the last segment of packet */ @@ -2132,7 +2135,7 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) } ip_data_len -= hlen; - if (txdw0 & CP_TX_IPCS) + if (!(txdw0 & CP_TX_LGSEN) && (txdw0 & CP_TX_IPCS)) { DPRINTF("+++ C+ mode need IP checksum\n"); @@ -2149,14 +2152,17 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) goto skip_offload; } - int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK; + int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) & + CP_TC_LGSEN_MSS_MASK; + if (large_send_mss == 0) { + goto skip_offload; + } - DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d " - "frame data %d specified MSS=%d\n", ETH_MTU, + DPRINTF("+++ C+ mode offloaded task TSO IP data %d " + "frame data %d specified MSS=%d\n", ip_data_len, saved_size - ETH_HLEN, large_send_mss); int tcp_send_offset = 0; - int send_count = 0; /* maximum IP header length is 60 bytes */ uint8_t saved_ip_header[60]; @@ -2178,25 +2184,22 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) goto skip_offload; } - /* ETH_MTU = ip header len + tcp header len + payload */ int tcp_data_len = ip_data_len - tcp_hlen; - int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen; DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP " - "data len %d TCP chunk size %d\n", ip_data_len, - tcp_hlen, tcp_data_len, tcp_chunk_size); + "data len %d\n", ip_data_len, tcp_hlen, tcp_data_len); /* note the cycle below overwrites IP header data, but restores it from saved_ip_header before sending packet */ int is_last_frame = 0; - for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size) + for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += large_send_mss) { - uint16_t chunk_size = tcp_chunk_size; + uint16_t chunk_size = large_send_mss; /* check if this is the last frame */ - if (tcp_send_offset + tcp_chunk_size >= tcp_data_len) + if (tcp_send_offset + large_send_mss >= tcp_data_len) { is_last_frame = 1; chunk_size = tcp_data_len - tcp_send_offset; @@ -2245,7 +2248,7 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size); /* increment IP id for subsequent frames */ - ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id)); + ip->ip_id = cpu_to_be16(tcp_send_offset/large_send_mss + be16_to_cpu(ip->ip_id)); ip->ip_sum = 0; ip->ip_sum = ip_checksum(eth_payload_data, hlen); @@ -2261,13 +2264,12 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) /* add transferred count to TCP sequence number */ stl_be_p(&p_tcp_hdr->th_seq, chunk_size + ldl_be_p(&p_tcp_hdr->th_seq)); - ++send_count; } /* Stop sending this frame */ saved_size = 0; } - else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS)) + else if (!(txdw0 & CP_TX_LGSEN) && (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS))) { DPRINTF("+++ C+ mode need TCP or UDP checksum\n"); diff --git a/hw/net/trace-events b/hw/net/trace-events index 643338f610..4c0ec3fda1 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -221,6 +221,7 @@ e1000e_irq_write_ics(uint32_t val) "Adding ICR bits 0x%x" e1000e_irq_icr_process_iame(void) "Clearing IMS bits due to IAME" e1000e_irq_read_ics(uint32_t ics) "Current ICS: 0x%x" e1000e_irq_read_ims(uint32_t ims) "Current IMS: 0x%x" +e1000e_irq_icr_clear_nonmsix_icr_read(void) "Clearing ICR on read due to non MSI-X int" e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x" e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x" e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS" diff --git a/hw/net/tulip.c b/hw/net/tulip.c index ca69f7ea5e..c2b3b1bdfa 100644 --- a/hw/net/tulip.c +++ b/hw/net/tulip.c @@ -70,32 +70,36 @@ static const VMStateDescription vmstate_pci_tulip = { static void tulip_desc_read(TULIPState *s, hwaddr p, struct tulip_descriptor *desc) { + const MemTxAttrs attrs = { .memory = true }; + if (s->csr[0] & CSR0_DBO) { - desc->status = ldl_be_pci_dma(&s->dev, p); - desc->control = ldl_be_pci_dma(&s->dev, p + 4); - desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8); - desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12); + ldl_be_pci_dma(&s->dev, p, &desc->status, attrs); + ldl_be_pci_dma(&s->dev, p + 4, &desc->control, attrs); + ldl_be_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs); + ldl_be_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs); } else { - desc->status = ldl_le_pci_dma(&s->dev, p); - desc->control = ldl_le_pci_dma(&s->dev, p + 4); - desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8); - desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12); + ldl_le_pci_dma(&s->dev, p, &desc->status, attrs); + ldl_le_pci_dma(&s->dev, p + 4, &desc->control, attrs); + ldl_le_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs); + ldl_le_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs); } } static void tulip_desc_write(TULIPState *s, hwaddr p, struct tulip_descriptor *desc) { + const MemTxAttrs attrs = { .memory = true }; + if (s->csr[0] & CSR0_DBO) { - stl_be_pci_dma(&s->dev, p, desc->status); - stl_be_pci_dma(&s->dev, p + 4, desc->control); - stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1); - stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2); + stl_be_pci_dma(&s->dev, p, desc->status, attrs); + stl_be_pci_dma(&s->dev, p + 4, desc->control, attrs); + stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs); + stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs); } else { - stl_le_pci_dma(&s->dev, p, desc->status); - stl_le_pci_dma(&s->dev, p + 4, desc->control); - stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1); - stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2); + stl_le_pci_dma(&s->dev, p, desc->status, attrs); + stl_le_pci_dma(&s->dev, p + 4, desc->control, attrs); + stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs); + stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs); } } @@ -866,11 +870,10 @@ static const MemoryRegionOps tulip_ops = { static void tulip_idblock_crc(TULIPState *s, uint16_t *srom) { - int word, n; + int word; int bit; unsigned char bitval, crc; const int len = 9; - n = 0; crc = -1; for (word = 0; word < len; word++) { @@ -883,7 +886,6 @@ static void tulip_idblock_crc(TULIPState *s, uint16_t *srom) srom[len - 1] = (srom[len - 1] & 0xff00) | (unsigned short)crc; break; } - n++; bitval = ((srom[word] >> bit) & 1) ^ ((crc >> 7) & 1); crc = crc << 1; if (bitval == 1) { @@ -963,6 +965,8 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp) pci_conf = s->dev.config; pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + qemu_macaddr_default_if_unset(&s->c.macaddr); + s->eeprom = eeprom93xx_new(&pci_dev->qdev, 64); tulip_fill_eeprom(s); @@ -977,8 +981,6 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp) s->irq = pci_allocate_irq(&s->dev); - qemu_macaddr_default_if_unset(&s->c.macaddr); - s->nic = qemu_new_nic(&net_tulip_info, &s->c, object_get_typename(OBJECT(pci_dev)), pci_dev->qdev.id, s); diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index a7f4252630..9f7daae99c 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { return -ENOSYS; } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { } @@ -101,3 +101,15 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) { return 0; } + +void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc, + int vq_index) +{ + +} + +int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc, + int vq_index) +{ + return 0; +} diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 10a7780a13..043058ff43 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -34,6 +34,7 @@ #include "standard-headers/linux/virtio_ring.h" #include "hw/virtio/vhost.h" #include "hw/virtio/virtio-bus.h" +#include "linux-headers/linux/vhost.h" /* Features supported by host kernel. */ @@ -46,6 +47,7 @@ static const int kernel_feature_bits[] = { VIRTIO_NET_F_MTU, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, VIRTIO_NET_F_HASH_REPORT, VHOST_INVALID_FEATURE_BIT }; @@ -73,6 +75,7 @@ static const int user_feature_bits[] = { VIRTIO_NET_F_MTU, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, @@ -165,9 +168,9 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) goto fail; } net->nc = options->net_backend; + net->dev.nvqs = options->nvqs; net->dev.max_queues = 1; - net->dev.nvqs = 2; net->dev.vqs = net->vqs; if (backend_kernel) { @@ -201,7 +204,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF); } if (~net->dev.features & net->dev.backend_features) { - fprintf(stderr, "vhost lacks feature mask %" PRIu64 + fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 " for backend\n", (uint64_t)(~net->dev.features & net->dev.backend_features)); goto fail; @@ -213,7 +216,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { features = vhost_user_get_acked_features(net->nc); if (~net->dev.features & features) { - fprintf(stderr, "vhost lacks feature mask %" PRIu64 + fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 " for backend\n", (uint64_t)(~net->dev.features & features)); goto fail; @@ -231,9 +234,11 @@ fail: return NULL; } -static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index) +static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index, + int vq_index_end) { net->dev.vq_index = vq_index; + net->dev.vq_index_end = vq_index_end; } static int vhost_net_start_one(struct vhost_net *net, @@ -242,15 +247,19 @@ static int vhost_net_start_one(struct vhost_net *net, struct vhost_vring_file file = { }; int r; - net->dev.nvqs = 2; - net->dev.vqs = net->vqs; + if (net->nc->info->start) { + r = net->nc->info->start(net->nc); + if (r < 0) { + return r; + } + } r = vhost_dev_enable_notifiers(&net->dev, dev); if (r < 0) { goto fail_notifiers; } - r = vhost_dev_start(&net->dev, dev); + r = vhost_dev_start(&net->dev, dev, false); if (r < 0) { goto fail_start; } @@ -275,6 +284,13 @@ static int vhost_net_start_one(struct vhost_net *net, } } } + + if (net->nc->info->load) { + r = net->nc->info->load(net->nc); + if (r < 0) { + goto fail; + } + } return 0; fail: file.fd = -1; @@ -292,7 +308,7 @@ fail: if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); } - vhost_dev_stop(&net->dev, dev); + vhost_dev_stop(&net->dev, dev, false); fail_start: vhost_dev_disable_notifiers(&net->dev, dev); fail_notifiers: @@ -313,30 +329,45 @@ static void vhost_net_stop_one(struct vhost_net *net, if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); } - vhost_dev_stop(&net->dev, dev); + vhost_dev_stop(&net->dev, dev, false); + if (net->nc->info->stop) { + net->nc->info->stop(net->nc); + } vhost_dev_disable_notifiers(&net->dev, dev); } int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + int total_notifiers = data_queue_pairs * 2 + cvq; + VirtIONet *n = VIRTIO_NET(dev); + int nvhosts = data_queue_pairs + cvq; struct vhost_net *net; - int r, e, i; + int r, e, i, index_end = data_queue_pairs * 2; NetClientState *peer; + if (cvq) { + index_end += 1; + } + if (!k->set_guest_notifiers) { error_report("binding does not support guest notifiers"); return -ENOSYS; } - for (i = 0; i < total_queues; i++) { + for (i = 0; i < nvhosts; i++) { + + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { /* Control Virtqueue */ + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } - peer = qemu_get_peer(ncs, i); net = get_vhost_net(peer); - vhost_net_set_vq_index(net, i * 2); + vhost_net_set_vq_index(net, i * 2, index_end); /* Suppress the masking guest notifiers on vhost user * because vhost user doesn't interrupt masking/unmasking @@ -347,18 +378,17 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, } } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); goto err; } - for (i = 0; i < total_queues; i++) { - peer = qemu_get_peer(ncs, i); - r = vhost_net_start_one(get_vhost_net(peer), dev); - - if (r < 0) { - goto err_start; + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); } if (peer->vring_enable) { @@ -369,16 +399,22 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, goto err_start; } } + + r = vhost_net_start_one(get_vhost_net(peer), dev); + if (r < 0) { + goto err_start; + } } return 0; err_start: while (--i >= 0) { - peer = qemu_get_peer(ncs , i); + peer = qemu_get_peer(ncs, i < data_queue_pairs ? + i : n->max_queue_pairs); vhost_net_stop_one(get_vhost_net(peer), dev); } - e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (e < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); fflush(stderr); @@ -388,18 +424,27 @@ err: } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + VirtIONet *n = VIRTIO_NET(dev); + NetClientState *peer; + int total_notifiers = data_queue_pairs * 2 + cvq; + int nvhosts = data_queue_pairs + cvq; int i, r; - for (i = 0; i < total_queues; i++) { - vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev); + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + vhost_net_stop_one(get_vhost_net(peer), dev); } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (r < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); fflush(stderr); @@ -488,3 +533,80 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) return vhost_ops->vhost_net_set_mtu(&net->dev, mtu); } + +void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc, + int vq_index) +{ + VHostNetState *net = get_vhost_net(nc->peer); + const VhostOps *vhost_ops = net->dev.vhost_ops; + struct vhost_vring_file file = { .fd = -1 }; + int idx; + + /* should only be called after backend is connected */ + assert(vhost_ops); + + idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index); + + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + file.index = idx; + int r = vhost_net_set_backend(&net->dev, &file); + assert(r >= 0); + } + + vhost_virtqueue_stop(&net->dev, + vdev, + net->dev.vqs + idx, + net->dev.vq_index + idx); +} + +int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc, + int vq_index) +{ + VHostNetState *net = get_vhost_net(nc->peer); + const VhostOps *vhost_ops = net->dev.vhost_ops; + struct vhost_vring_file file = { }; + int idx, r; + + if (!net->dev.started) { + return -EBUSY; + } + + /* should only be called after backend is connected */ + assert(vhost_ops); + + idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index); + + r = vhost_virtqueue_start(&net->dev, + vdev, + net->dev.vqs + idx, + net->dev.vq_index + idx); + if (r < 0) { + goto err_start; + } + + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + file.index = idx; + file.fd = net->backend; + r = vhost_net_set_backend(&net->dev, &file); + if (r < 0) { + r = -errno; + goto err_start; + } + } + + return 0; + +err_start: + error_report("Error when restarting the queue."); + + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + file.fd = VHOST_FILE_UNBIND; + file.index = idx; + int r = vhost_net_set_backend(&net->dev, &file); + assert(r >= 0); + } + + vhost_dev_stop(&net->dev, vdev, false); + + return r; +} diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 16d20cdee5..4abd49e298 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -14,6 +14,7 @@ #include "qemu/osdep.h" #include "qemu/atomic.h" #include "qemu/iov.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "hw/virtio/virtio.h" @@ -44,17 +45,17 @@ #include "hw/pci/pci.h" #include "net_rx_pkt.h" #include "hw/virtio/vhost.h" +#include "sysemu/qtest.h" #define VIRTIO_NET_VM_VERSION 11 -#define MAC_TABLE_ENTRIES 64 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ /* previously fixed value */ #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 -/* for now, only allow larger queues; with virtio-1, guest can downsize */ +/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */ #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE @@ -105,6 +106,12 @@ static const VirtIOFeature feature_sizes[] = { {} }; +static const VirtIOConfigSizeParams cfg_size_params = { + .min_size = endof(struct virtio_net_config, mac), + .max_size = sizeof(struct virtio_net_config), + .feature_sizes = feature_sizes +}; + static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) { VirtIONet *n = qemu_get_nic_opaque(nc); @@ -117,6 +124,16 @@ static int vq2q(int queue_index) return queue_index / 2; } +static void flush_or_purge_queued_packets(NetClientState *nc) +{ + if (!nc->peer) { + return; + } + + qemu_flush_or_purge_queued_packets(nc->peer, true); + assert(!virtio_net_get_subqueue(nc)->async_tx.elem); +} + /* TODO * - we could suppress RX interrupt if we were so inclined. */ @@ -131,7 +148,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) int ret = 0; memset(&netcfg, 0 , sizeof(struct virtio_net_config)); virtio_stw_p(vdev, &netcfg.status, n->status); - virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); + virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs); virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); memcpy(netcfg.mac, n->mac, ETH_ALEN); virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); @@ -243,7 +260,9 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? + n->max_ncs - n->max_queue_pairs : 0; if (!get_vhost_net(nc->peer)) { return; @@ -266,7 +285,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) /* Any packets outstanding? Purge them to avoid touching rings * when vhost is running. */ - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { NetClientState *qnc = qemu_get_subqueue(n->nic, i); /* Purge both directions: TX and RX. */ @@ -285,14 +304,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queues); + r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queues); + vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq); n->vhost_started = 0; } } @@ -309,11 +328,11 @@ static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, } static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, - int queues, bool enable) + int queue_pairs, bool enable) { int i; - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && enable) { while (--i >= 0) { @@ -330,7 +349,7 @@ static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; if (virtio_net_started(n, status)) { /* Before using the device, we tell the network backend about the @@ -339,14 +358,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) * virtio-net code. */ n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, - queues, true); + queue_pairs, true); } else if (virtio_net_started(n, vdev->status)) { /* After using the device, we need to reset the network backend to * the default (guest native endianness), otherwise the guest may * lose network connectivity if it is rebooted into a different * endianness. */ - virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); + virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false); } } @@ -368,12 +387,12 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) virtio_net_vnet_endian_status(n, status); virtio_net_vhost_status(n, status); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *ncs = qemu_get_subqueue(n->nic, i); bool queue_started; q = &n->vqs[i]; - if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { + if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) { queue_status = 0; } else { queue_status = status; @@ -527,6 +546,57 @@ static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) return info; } +static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc; + + /* validate queue_index and skip for cvq */ + if (queue_index >= n->max_queue_pairs * 2) { + return; + } + + nc = qemu_get_subqueue(n->nic, vq2q(queue_index)); + + if (!nc->peer) { + return; + } + + if (get_vhost_net(nc->peer) && + nc->peer->info->type == NET_CLIENT_DRIVER_TAP) { + vhost_net_virtqueue_reset(vdev, nc, queue_index); + } + + flush_or_purge_queued_packets(nc); +} + +static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc; + int r; + + /* validate queue_index and skip for cvq */ + if (queue_index >= n->max_queue_pairs * 2) { + return; + } + + nc = qemu_get_subqueue(n->nic, vq2q(queue_index)); + + if (!nc->peer || !vdev->vhost_started) { + return; + } + + if (get_vhost_net(nc->peer) && + nc->peer->info->type == NET_CLIENT_DRIVER_TAP) { + r = vhost_net_virtqueue_restart(vdev, nc, queue_index); + if (r < 0) { + error_report("unable to restart vhost net virtqueue: %d, " + "when resetting the queue", queue_index); + } + } +} + static void virtio_net_reset(VirtIODevice *vdev) { VirtIONet *n = VIRTIO_NET(vdev); @@ -540,7 +610,7 @@ static void virtio_net_reset(VirtIODevice *vdev) n->nouni = 0; n->nobcast = 0; /* multiqueue is disabled by default */ - n->curr_queues = 1; + n->curr_queue_pairs = 1; timer_del(n->announce_timer.tm); n->announce_timer.round = 0; n->status &= ~VIRTIO_NET_S_ANNOUNCE; @@ -556,13 +626,8 @@ static void virtio_net_reset(VirtIODevice *vdev) memset(n->vlans, 0, MAX_VLAN >> 3); /* Flush any async TX */ - for (i = 0; i < n->max_queues; i++) { - NetClientState *nc = qemu_get_subqueue(n->nic, i); - - if (nc->peer) { - qemu_flush_or_purge_queued_packets(nc->peer, true); - assert(!virtio_net_get_subqueue(nc)->async_tx.elem); - } + for (i = 0; i < n->max_queue_pairs; i++) { + flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i)); } } @@ -610,7 +675,7 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, sizeof(struct virtio_net_hdr); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { nc = qemu_get_subqueue(n->nic, i); if (peer_has_vnet_hdr(n) && @@ -626,17 +691,20 @@ static int virtio_net_max_tx_queue_size(VirtIONet *n) NetClientState *peer = n->nic_conf.peers.ncs[0]; /* - * Backends other than vhost-user don't support max queue size. + * Backends other than vhost-user or vhost-vdpa don't support max queue + * size. */ if (!peer) { return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; } - if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { + switch(peer->info->type) { + case NET_CLIENT_DRIVER_VHOST_USER: + case NET_CLIENT_DRIVER_VHOST_VDPA: + return VIRTQUEUE_MAX_SIZE; + default: return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; - } - - return VIRTQUEUE_MAX_SIZE; + }; } static int peer_attach(VirtIONet *n, int index) @@ -655,7 +723,7 @@ static int peer_attach(VirtIONet *n, int index) return 0; } - if (n->max_queues == 1) { + if (n->max_queue_pairs == 1) { return 0; } @@ -681,7 +749,7 @@ static int peer_detach(VirtIONet *n, int index) return tap_disable(nc->peer); } -static void virtio_net_set_queues(VirtIONet *n) +static void virtio_net_set_queue_pairs(VirtIONet *n) { int i; int r; @@ -690,8 +758,8 @@ static void virtio_net_set_queues(VirtIONet *n) return; } - for (i = 0; i < n->max_queues; i++) { - if (i < n->curr_queues) { + for (i = 0; i < n->max_queue_pairs; i++) { + if (i < n->curr_queue_pairs) { r = peer_attach(n, i); assert(!r); } else { @@ -796,48 +864,34 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) typedef struct { VirtIONet *n; - char *id; -} FailoverId; + DeviceState *dev; +} FailoverDevice; /** - * Set the id of the failover primary device + * Set the failover primary device * * @opaque: FailoverId to setup * @opts: opts for device we are handling * @errp: returns an error if this function fails */ -static int failover_set_primary(void *opaque, QemuOpts *opts, Error **errp) +static int failover_set_primary(DeviceState *dev, void *opaque) { - FailoverId *fid = opaque; - const char *standby_id = qemu_opt_get(opts, "failover_pair_id"); + FailoverDevice *fdev = opaque; + PCIDevice *pci_dev = (PCIDevice *) + object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE); - if (g_strcmp0(standby_id, fid->n->netclient_name) == 0) { - fid->id = g_strdup(opts->id); + if (!pci_dev) { + return 0; + } + + if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) { + fdev->dev = dev; return 1; } return 0; } -/** - * Find the primary device id for this failover virtio-net - * - * @n: VirtIONet device - * @errp: returns an error if this function fails - */ -static char *failover_find_primary_device_id(VirtIONet *n) -{ - Error *err = NULL; - FailoverId fid; - - fid.n = n; - if (!qemu_opts_foreach(qemu_find_opts("device"), - failover_set_primary, &fid, &err)) { - return NULL; - } - return fid.id; -} - /** * Find the primary device for this failover virtio-net * @@ -846,39 +900,38 @@ static char *failover_find_primary_device_id(VirtIONet *n) */ static DeviceState *failover_find_primary_device(VirtIONet *n) { - char *id = failover_find_primary_device_id(n); + FailoverDevice fdev = { + .n = n, + }; - if (!id) { - return NULL; - } - - return qdev_find_recursive(sysbus_get_default(), id); + qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL, + NULL, NULL, &fdev); + return fdev.dev; } static void failover_add_primary(VirtIONet *n, Error **errp) { Error *err = NULL; - QemuOpts *opts; - char *id; DeviceState *dev = failover_find_primary_device(n); if (dev) { return; } - id = failover_find_primary_device_id(n); - if (!id) { + if (!n->primary_opts) { error_setg(errp, "Primary device not found"); error_append_hint(errp, "Virtio-net failover will not work. Make " "sure primary device has parameter" " failover_pair_id=%s\n", n->netclient_name); return; } - opts = qemu_opts_find(qemu_find_opts("device"), id); - g_assert(opts); /* cannot be NULL because id was found using opts list */ - dev = qdev_device_add(opts, &err); + + dev = qdev_device_add_from_qdict(n->primary_opts, + n->primary_opts_from_json, + &err); if (err) { - qemu_opts_del(opts); + qobject_unref(n->primary_opts); + n->primary_opts = NULL; } else { object_unref(OBJECT(dev)); } @@ -920,7 +973,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) virtio_net_apply_guest_offloads(n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *nc = qemu_get_subqueue(n->nic, i); if (!get_vhost_net(nc->peer)) { @@ -940,7 +993,11 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) qatomic_set(&n->failover_primary_hidden, false); failover_add_primary(n, &err); if (err) { - warn_report_err(err); + if (!qtest_enabled()) { + warn_report_err(err); + } else { + error_free(err); + } } } } @@ -1247,7 +1304,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, VirtIODevice *vdev = VIRTIO_DEVICE(n); struct virtio_net_rss_config cfg; size_t s, offset = 0, size_get; - uint16_t queues, i; + uint16_t queue_pairs, i; struct { uint16_t us; uint8_t b; @@ -1289,7 +1346,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } n->rss_data.default_queue = do_rss ? virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0; - if (n->rss_data.default_queue >= n->max_queues) { + if (n->rss_data.default_queue >= n->max_queue_pairs) { err_msg = "Invalid default queue"; err_value = n->rss_data.default_queue; goto error; @@ -1318,14 +1375,14 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, size_get = sizeof(temp); s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get); if (s != size_get) { - err_msg = "Can't get queues"; + err_msg = "Can't get queue_pairs"; err_value = (uint32_t)s; goto error; } - queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues; - if (queues == 0 || queues > n->max_queues) { - err_msg = "Invalid number of queues"; - err_value = queues; + queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs; + if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) { + err_msg = "Invalid number of queue_pairs"; + err_value = queue_pairs; goto error; } if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) { @@ -1340,7 +1397,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } if (!temp.b && !n->rss_data.hash_types) { virtio_net_disable_rss(n); - return queues; + return queue_pairs; } offset += size_get; size_get = temp.b; @@ -1373,7 +1430,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, trace_virtio_net_rss_enable(n->rss_data.hash_types, n->rss_data.indirections_len, temp.b); - return queues; + return queue_pairs; error: trace_virtio_net_rss_error(err_msg, err_value); virtio_net_disable_rss(n); @@ -1384,15 +1441,16 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - uint16_t queues; + uint16_t queue_pairs; + NetClientState *nc = qemu_get_queue(n->nic); virtio_net_disable_rss(n); if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, false); - return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR; + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false); + return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR; } if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, true); + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true); } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { struct virtio_net_ctrl_mq mq; size_t s; @@ -1403,78 +1461,100 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, if (s != sizeof(mq)) { return VIRTIO_NET_ERR; } - queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); + queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs); } else { return VIRTIO_NET_ERR; } - if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || - queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || - queues > n->max_queues || + if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + queue_pairs > n->max_queue_pairs || !n->multiqueue) { return VIRTIO_NET_ERR; } - n->curr_queues = queues; - /* stop the backend before changing the number of queues to avoid handling a + n->curr_queue_pairs = queue_pairs; + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + /* + * Avoid updating the backend for a vdpa device: We're only interested + * in updating the device model queues. + */ + return VIRTIO_NET_OK; + } + /* stop the backend before changing the number of queue_pairs to avoid handling a * disabled queue */ virtio_net_set_status(vdev, vdev->status); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); return VIRTIO_NET_OK; } -static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num) { VirtIONet *n = VIRTIO_NET(vdev); struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = VIRTIO_NET_ERR; - VirtQueueElement *elem; size_t s; struct iovec *iov, *iov2; - unsigned int iov_cnt; + + if (iov_size(in_sg, in_num) < sizeof(status) || + iov_size(out_sg, out_num) < sizeof(ctrl)) { + virtio_error(vdev, "virtio-net ctrl missing headers"); + return 0; + } + + iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num); + s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &out_num, sizeof(ctrl)); + if (s != sizeof(ctrl)) { + status = VIRTIO_NET_ERR; + } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { + status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { + status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { + status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { + status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { + status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num); + } + + s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status)); + assert(s == sizeof(status)); + + g_free(iov2); + return sizeof(status); +} + +static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtQueueElement *elem; for (;;) { + size_t written; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } - if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || - iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { - virtio_error(vdev, "virtio-net ctrl missing headers"); + + written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num, + elem->out_sg, elem->out_num); + if (written > 0) { + virtqueue_push(vq, elem, written); + virtio_notify(vdev, vq); + g_free(elem); + } else { virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } - - iov_cnt = elem->out_num; - iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); - s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); - iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); - if (s != sizeof(ctrl)) { - status = VIRTIO_NET_ERR; - } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { - status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { - status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { - status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { - status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { - status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { - status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); - } - - s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); - assert(s == sizeof(status)); - - virtqueue_push(vq, elem, sizeof(status)); - virtio_notify(vdev, vq); - g_free(iov2); - g_free(elem); } } @@ -1498,7 +1578,7 @@ static bool virtio_net_can_receive(NetClientState *nc) return false; } - if (nc->queue_index >= n->curr_queues) { + if (nc->queue_index >= n->curr_queue_pairs) { return false; } @@ -1746,10 +1826,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE]; + size_t lens[VIRTQUEUE_MAX_SIZE]; struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; struct virtio_net_hdr_mrg_rxbuf mhdr; unsigned mhdr_cnt = 0; - size_t offset, i, guest_offset; + size_t offset, i, guest_offset, j; + ssize_t err; if (!virtio_net_can_receive(nc)) { return -1; @@ -1780,6 +1863,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, total = 0; + if (i == VIRTQUEUE_MAX_SIZE) { + virtio_error(vdev, "virtio-net unexpected long buffer chain"); + err = size; + goto err; + } + elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); if (!elem) { if (i) { @@ -1791,7 +1880,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); } - return -1; + err = -1; + goto err; } if (elem->in_num < 1) { @@ -1799,7 +1889,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, "virtio-net receive queue contains no in buffers"); virtqueue_detach_element(q->rx_vq, elem, 0); g_free(elem); - return -1; + err = -1; + goto err; } sg = elem->in_sg; @@ -1836,12 +1927,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, if (!n->mergeable_rx_bufs && offset < size) { virtqueue_unpop(q->rx_vq, elem, total); g_free(elem); - return size; + err = size; + goto err; } - /* signal other side */ - virtqueue_fill(q->rx_vq, elem, total, i++); - g_free(elem); + elems[i] = elem; + lens[i] = total; + i++; } if (mhdr_cnt) { @@ -1851,10 +1943,24 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, &mhdr.num_buffers, sizeof mhdr.num_buffers); } + for (j = 0; j < i; j++) { + /* signal other side */ + virtqueue_fill(q->rx_vq, elems[j], lens[j], j); + g_free(elems[j]); + } + virtqueue_flush(q->rx_vq, i); virtio_notify(vdev, q->rx_vq); return size; + +err: + for (j = 0; j < i; j++) { + virtqueue_detach_element(q->rx_vq, elems[j], lens[j]); + g_free(elems[j]); + } + + return err; } static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, @@ -1975,7 +2081,7 @@ static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain, VirtioNetRscSeg *seg; hdr_len = chain->n->guest_hdr_len; - seg = g_malloc(sizeof(VirtioNetRscSeg)); + seg = g_new(VirtioNetRscSeg, 1); seg->buf = g_malloc(hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD); memcpy(seg->buf, buf, size); @@ -2476,6 +2582,7 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIODevice *vdev = VIRTIO_DEVICE(n); + int ret; virtqueue_push(q->tx_vq, q->async_tx.elem, 0); virtio_notify(vdev, q->tx_vq); @@ -2484,7 +2591,22 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) q->async_tx.elem = NULL; virtio_queue_set_notification(q->tx_vq, 1); - virtio_net_flush_tx(q); + ret = virtio_net_flush_tx(q); + if (ret >= n->tx_burst) { + /* + * the flush has been stopped by tx_burst + * we will not receive notification for the + * remainining part, so re-schedule + */ + virtio_queue_set_notification(q->tx_vq, 0); + if (q->tx_bh) { + qemu_bh_schedule(q->tx_bh); + } else { + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } + q->tx_waiting = 1; + } } /* TX */ @@ -2583,6 +2705,8 @@ drop: return num_packets; } +static void virtio_net_tx_timer(void *opaque); + static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = VIRTIO_NET(vdev); @@ -2600,15 +2724,13 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) } if (q->tx_waiting) { - virtio_queue_set_notification(vq, 1); + /* We already have queued packets, immediately flush */ timer_del(q->tx_timer); - q->tx_waiting = 0; - if (virtio_net_flush_tx(q) == -EINVAL) { - return; - } + virtio_net_tx_timer(q); } else { + /* re-arm timer to flush it (and more) on next tick */ timer_mod(q->tx_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); q->tx_waiting = 1; virtio_queue_set_notification(vq, 0); } @@ -2641,6 +2763,8 @@ static void virtio_net_tx_timer(void *opaque) VirtIONetQueue *q = opaque; VirtIONet *n = q->n; VirtIODevice *vdev = VIRTIO_DEVICE(n); + int ret; + /* This happens when device was stopped but BH wasn't. */ if (!vdev->vm_running) { /* Make sure tx waiting is set, so we'll run when restarted. */ @@ -2655,8 +2779,33 @@ static void virtio_net_tx_timer(void *opaque) return; } + ret = virtio_net_flush_tx(q); + if (ret == -EBUSY || ret == -EINVAL) { + return; + } + /* + * If we flush a full burst of packets, assume there are + * more coming and immediately rearm + */ + if (ret >= n->tx_burst) { + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + return; + } + /* + * If less than a full burst, re-enable notification and flush + * anything that may have come in while we weren't looking. If + * we find something, assume the guest is still active and rearm + */ virtio_queue_set_notification(q->tx_vq, 1); - virtio_net_flush_tx(q); + ret = virtio_net_flush_tx(q); + if (ret > 0) { + virtio_queue_set_notification(q->tx_vq, 0); + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } } static void virtio_net_tx_bh(void *opaque) @@ -2753,11 +2902,11 @@ static void virtio_net_del_queue(VirtIONet *n, int index) virtio_del_queue(vdev, index * 2 + 1); } -static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) +static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs) { VirtIODevice *vdev = VIRTIO_DEVICE(n); int old_num_queues = virtio_get_num_queues(vdev); - int new_num_queues = new_max_queues * 2 + 1; + int new_num_queues = new_max_queue_pairs * 2 + 1; int i; assert(old_num_queues >= 3); @@ -2790,12 +2939,12 @@ static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) { - int max = multiqueue ? n->max_queues : 1; + int max = multiqueue ? n->max_queue_pairs : 1; n->multiqueue = multiqueue; - virtio_net_change_num_queues(n, max); + virtio_net_change_num_queue_pairs(n, max); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); } static int virtio_net_post_load_device(void *opaque, int version_id) @@ -2828,7 +2977,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) */ n->saved_guest_offloads = n->curr_guest_offloads; - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); /* Find the first multicast entry in the saved MAC filter */ for (i = 0; i < n->mac_table.in_use; i++) { @@ -2841,7 +2990,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) /* nc.link_down can't be migrated, so infer link_down according * to link status bit in n->status */ link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_get_subqueue(n->nic, i)->link_down = link_down; } @@ -2906,9 +3055,9 @@ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { }, }; -static bool max_queues_gt_1(void *opaque, int version_id) +static bool max_queue_pairs_gt_1(void *opaque, int version_id) { - return VIRTIO_NET(opaque)->max_queues > 1; + return VIRTIO_NET(opaque)->max_queue_pairs > 1; } static bool has_ctrl_guest_offloads(void *opaque, int version_id) @@ -2933,13 +3082,13 @@ static bool mac_table_doesnt_fit(void *opaque, int version_id) struct VirtIONetMigTmp { VirtIONet *parent; VirtIONetQueue *vqs_1; - uint16_t curr_queues_1; + uint16_t curr_queue_pairs_1; uint8_t has_ufo; uint32_t has_vnet_hdr; }; /* The 2nd and subsequent tx_waiting flags are loaded later than - * the 1st entry in the queues and only if there's more than one + * the 1st entry in the queue_pairs and only if there's more than one * entry. We use the tmp mechanism to calculate a temporary * pointer and count and also validate the count. */ @@ -2949,9 +3098,9 @@ static int virtio_net_tx_waiting_pre_save(void *opaque) struct VirtIONetMigTmp *tmp = opaque; tmp->vqs_1 = tmp->parent->vqs + 1; - tmp->curr_queues_1 = tmp->parent->curr_queues - 1; - if (tmp->parent->curr_queues == 0) { - tmp->curr_queues_1 = 0; + tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1; + if (tmp->parent->curr_queue_pairs == 0) { + tmp->curr_queue_pairs_1 = 0; } return 0; @@ -2964,9 +3113,9 @@ static int virtio_net_tx_waiting_pre_load(void *opaque) /* Reuse the pointer setup from save */ virtio_net_tx_waiting_pre_save(opaque); - if (tmp->parent->curr_queues > tmp->parent->max_queues) { - error_report("virtio-net: curr_queues %x > max_queues %x", - tmp->parent->curr_queues, tmp->parent->max_queues); + if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) { + error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x", + tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs); return -EINVAL; } @@ -2980,7 +3129,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = { .pre_save = virtio_net_tx_waiting_pre_save, .fields = (VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, - curr_queues_1, + curr_queue_pairs_1, vmstate_virtio_net_queue_tx_waiting, struct VirtIONetQueue), VMSTATE_END_OF_LIST() @@ -3122,9 +3271,9 @@ static const VMStateDescription vmstate_virtio_net_device = { VMSTATE_UINT8(nobcast, VirtIONet), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_has_ufo), - VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, + VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0, vmstate_info_uint16_equal, uint16_t), - VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), + VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_tx_waiting), VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, @@ -3150,8 +3299,22 @@ static NetClientInfo net_virtio_info = { static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) { VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + NetClientState *nc; assert(n->vhost_started); + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) { + /* Must guard against invalid features and bogus queue index + * from being set by malicious guest, or penetrated through + * buggy migration stream. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bogus vq index ignored\n", __func__); + return false; + } + nc = qemu_get_subqueue(n->nic, n->max_queue_pairs); + } else { + nc = qemu_get_subqueue(n->nic, vq2q(idx)); + } return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); } @@ -3159,8 +3322,22 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + NetClientState *nc; assert(n->vhost_started); + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) { + /* Must guard against invalid features and bogus queue index + * from being set by malicious guest, or penetrated through + * buggy migration stream. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bogus vq index ignored\n", __func__); + return; + } + nc = qemu_get_subqueue(n->nic, n->max_queue_pairs); + } else { + nc = qemu_get_subqueue(n->nic, vq2q(idx)); + } vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask); } @@ -3169,8 +3346,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) { virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); - n->config_size = virtio_feature_get_config_size(feature_sizes, - host_features); + n->config_size = virtio_get_config_size(&cfg_size_params, host_features); } void virtio_net_set_netclient_name(VirtIONet *n, const char *name, @@ -3279,7 +3455,9 @@ static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) } static bool failover_hide_primary_device(DeviceListener *listener, - QemuOpts *device_opts) + const QDict *device_opts, + bool from_json, + Error **errp) { VirtIONet *n = container_of(listener, VirtIONet, primary_listener); const char *standby_id; @@ -3287,11 +3465,42 @@ static bool failover_hide_primary_device(DeviceListener *listener, if (!device_opts) { return false; } - standby_id = qemu_opt_get(device_opts, "failover_pair_id"); + + if (!qdict_haskey(device_opts, "failover_pair_id")) { + return false; + } + + if (!qdict_haskey(device_opts, "id")) { + error_setg(errp, "Device with failover_pair_id needs to have id"); + return false; + } + + standby_id = qdict_get_str(device_opts, "failover_pair_id"); if (g_strcmp0(standby_id, n->netclient_name) != 0) { return false; } + /* + * The hide helper can be called several times for a given device. + * Check there is only one primary for a virtio-net device but + * don't duplicate the qdict several times if it's called for the same + * device. + */ + if (n->primary_opts) { + const char *old, *new; + /* devices with failover_pair_id always have an id */ + old = qdict_get_str(n->primary_opts, "id"); + new = qdict_get_str(device_opts, "id"); + if (strcmp(old, new) != 0) { + error_setg(errp, "Cannot attach more than one primary device to " + "'%s': '%s' and '%s'", n->netclient_name, old, new); + return false; + } + } else { + n->primary_opts = qdict_clone_shallow(device_opts); + n->primary_opts_from_json = from_json; + } + /* failover_primary_hidden is set during feature negotiation */ return qatomic_read(&n->failover_primary_hidden); } @@ -3339,7 +3548,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) } virtio_net_set_config_size(n, n->host_features); - virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); + virtio_init(vdev, VIRTIO_ID_NET, n->config_size); /* * We set a lower limit on RX queue size to what it always was. @@ -3368,16 +3577,30 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) return; } - n->max_queues = MAX(n->nic_conf.peers.queues, 1); - if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { - error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " + n->max_ncs = MAX(n->nic_conf.peers.queues, 1); + + /* + * Figure out the datapath queue pairs since the backend could + * provide control queue via peers as well. + */ + if (n->nic_conf.peers.queues) { + for (i = 0; i < n->max_ncs; i++) { + if (n->nic_conf.peers.ncs[i]->is_datapath) { + ++n->max_queue_pairs; + } + } + } + n->max_queue_pairs = MAX(n->max_queue_pairs, 1); + + if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) { + error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), " "must be a positive integer less than %d.", - n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); + n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2); virtio_cleanup(vdev); return; } - n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); - n->curr_queues = 1; + n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs); + n->curr_queue_pairs = 1; n->tx_timeout = n->net_conf.txtimer; if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") @@ -3391,7 +3614,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), n->net_conf.tx_queue_size); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { virtio_net_add_queue(n, i); } @@ -3415,13 +3638,13 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) object_get_typename(OBJECT(dev)), dev->id, n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { n->nic->ncs[i].do_not_pad = true; } peer_test_vnet_hdr(n); if (peer_has_vnet_hdr(n)) { - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); } n->host_hdr_len = sizeof(struct virtio_net_hdr); @@ -3463,7 +3686,7 @@ static void virtio_net_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); - int i, max_queues; + int i, max_queue_pairs; if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { virtio_net_unload_ebpf(n); @@ -3481,16 +3704,19 @@ static void virtio_net_device_unrealize(DeviceState *dev) g_free(n->vlans); if (n->failover) { + qobject_unref(n->primary_opts); device_listener_unregister(&n->primary_listener); remove_migration_state_change_notifier(&n->migration_state); + } else { + assert(n->primary_opts == NULL); } - max_queues = n->multiqueue ? n->max_queues : 1; - for (i = 0; i < max_queues; i++) { + max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + for (i = 0; i < max_queue_pairs; i++) { virtio_net_del_queue(n, i); } /* delete also control vq */ - virtio_del_queue(vdev, max_queues * 2); + virtio_del_queue(vdev, max_queue_pairs * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); @@ -3549,6 +3775,14 @@ static bool dev_unplug_pending(void *opaque) return vdc->primary_unplug_pending(dev); } +static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_queue(n->nic); + struct vhost_net *net = get_vhost_net(nc->peer); + return &net->dev; +} + static const VMStateDescription vmstate_virtio_net = { .name = "virtio-net", .minimum_version_id = VIRTIO_NET_VM_VERSION, @@ -3644,6 +3878,8 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->set_features = virtio_net_set_features; vdc->bad_features = virtio_net_bad_features; vdc->reset = virtio_net_reset; + vdc->queue_reset = virtio_net_queue_reset; + vdc->queue_enable = virtio_net_queue_enable; vdc->set_status = virtio_net_set_status; vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; @@ -3651,6 +3887,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->post_load = virtio_net_post_load_virtio; vdc->vmsd = &vmstate_virtio_net_device; vdc->primary_unplug_pending = primary_unplug_pending; + vdc->get_vhost = virtio_net_get_vhost; } static const TypeInfo virtio_net_info = { diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 41f796a247..56559cda24 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1441,6 +1441,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); + assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu <= VMXNET3_MAX_MTU); VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = @@ -1486,6 +1487,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read rings memory locations for TX queues */ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); + if (size > VMXNET3_TX_RING_MAX_SIZE) { + size = VMXNET3_TX_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, sizeof(struct Vmxnet3_TxDesc), false); @@ -1496,6 +1500,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* TXC ring */ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); + if (size > VMXNET3_TC_RING_MAX_SIZE) { + size = VMXNET3_TC_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_TxCompDesc), true); VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); @@ -1537,6 +1544,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* RX rings */ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); + if (size > VMXNET3_RX_RING_MAX_SIZE) { + size = VMXNET3_RX_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, sizeof(struct Vmxnet3_RxDesc), false); VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", @@ -1546,6 +1556,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* RXC ring */ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); + if (size > VMXNET3_RC_RING_MAX_SIZE) { + size = VMXNET3_RC_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_RxCompDesc), true); VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); @@ -1803,7 +1816,9 @@ vmxnet3_io_bar1_write(void *opaque, case VMXNET3_REG_ICR: VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d", val, size); - g_assert_not_reached(); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register VMXNET3_REG_ICR\n", + TYPE_VMXNET3); break; /* Event Cause Register */ @@ -2095,20 +2110,14 @@ vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors) } } -static bool +static void vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors) { PCIDevice *d = PCI_DEVICE(s); int i; for (i = 0; i < num_vectors; i++) { - int res = msix_vector_use(d, i); - if (0 > res) { - VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res); - vmxnet3_unuse_msix_vectors(s, i); - return false; - } + msix_vector_use(d, i); } - return true; } static bool @@ -2126,13 +2135,8 @@ vmxnet3_init_msix(VMXNET3State *s) VMW_WRPRN("Failed to initialize MSI-X, error %d", res); s->msix_used = false; } else { - if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { - VMW_WRPRN("Failed to use MSI-X vectors, error %d", res); - msix_uninit(d, &s->msix_bar, &s->msix_bar); - s->msix_used = false; - } else { - s->msix_used = true; - } + vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS); + s->msix_used = true; } return s->msix_used; } @@ -2397,19 +2401,13 @@ static const VMStateDescription vmstate_vmxnet3_rxq_descr = { static int vmxnet3_post_load(void *opaque, int version_id) { VMXNET3State *s = opaque; - PCIDevice *d = PCI_DEVICE(s); net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags, s->peer_has_vhdr); net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); if (s->msix_used) { - if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { - VMW_WRPRN("Failed to re-use MSI-X vectors"); - msix_uninit(d, &s->msix_bar, &s->msix_bar); - s->msix_used = false; - return -1; - } + vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS); } if (!vmxnet3_validate_queues(s)) { diff --git a/hw/net/vmxnet3.h b/hw/net/vmxnet3.h index 5b3b76ba7a..bf4f6de74a 100644 --- a/hw/net/vmxnet3.h +++ b/hw/net/vmxnet3.h @@ -35,7 +35,7 @@ #define __le32 uint32_t #define __le64 uint64_t -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define __BIG_ENDIAN_BITFIELD #else #endif @@ -800,7 +800,7 @@ struct Vmxnet3_DriverShared { #undef __le16 #undef __le32 #undef __le64 -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #undef __BIG_ENDIAN_BITFIELD #endif diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 5c815b4f0c..7d92c2d022 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -296,9 +296,8 @@ static int net_init(struct XenLegacyDevice *xendev) netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, "xen", NULL, netdev); - snprintf(qemu_get_queue(netdev->nic)->info_str, - sizeof(qemu_get_queue(netdev->nic)->info_str), - "nic: xenbus vif macaddr=%s", netdev->mac); + qemu_set_info_str(qemu_get_queue(netdev->nic), + "nic: xenbus vif macaddr=%s", netdev->mac); /* fill info */ xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c index 3d1205b8bd..91383fb097 100644 --- a/hw/nios2/10m50_devboard.c +++ b/hw/nios2/10m50_devboard.c @@ -27,6 +27,7 @@ #include "hw/sysbus.h" #include "hw/char/serial.h" +#include "hw/intc/nios2_vic.h" #include "hw/qdev-properties.h" #include "sysemu/sysemu.h" #include "hw/boards.h" @@ -36,17 +37,28 @@ #include "boot.h" +struct Nios2MachineState { + MachineState parent_obj; + + MemoryRegion phys_tcm; + MemoryRegion phys_tcm_alias; + MemoryRegion phys_ram; + MemoryRegion phys_ram_alias; + + bool vic; +}; + +#define TYPE_NIOS2_MACHINE MACHINE_TYPE_NAME("10m50-ghrd") +OBJECT_DECLARE_TYPE(Nios2MachineState, MachineClass, NIOS2_MACHINE) + #define BINARY_DEVICE_TREE_FILE "10m50-devboard.dtb" static void nios2_10m50_ghrd_init(MachineState *machine) { + Nios2MachineState *nms = NIOS2_MACHINE(machine); Nios2CPU *cpu; DeviceState *dev; MemoryRegion *address_space_mem = get_system_memory(); - MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); - MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); - MemoryRegion *phys_ram = g_new(MemoryRegion, 1); - MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); ram_addr_t tcm_base = 0x0; ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ ram_addr_t ram_base = 0x08000000; @@ -55,27 +67,56 @@ static void nios2_10m50_ghrd_init(MachineState *machine) int i; /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ - memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, + memory_region_init_ram(&nms->phys_tcm, NULL, "nios2.tcm", tcm_size, &error_abort); - memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", - phys_tcm, 0, tcm_size); - memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_init_alias(&nms->phys_tcm_alias, NULL, "nios2.tcm.alias", + &nms->phys_tcm, 0, tcm_size); + memory_region_add_subregion(address_space_mem, tcm_base, &nms->phys_tcm); memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, - phys_tcm_alias); + &nms->phys_tcm_alias); /* Physical DRAM with alias at 0xc0000000 */ - memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, + memory_region_init_ram(&nms->phys_ram, NULL, "nios2.ram", ram_size, &error_abort); - memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", - phys_ram, 0, ram_size); - memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_init_alias(&nms->phys_ram_alias, NULL, "nios2.ram.alias", + &nms->phys_ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, ram_base, &nms->phys_ram); memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, - phys_ram_alias); + &nms->phys_ram_alias); - /* Create CPU -- FIXME */ - cpu = NIOS2_CPU(cpu_create(TYPE_NIOS2_CPU)); - for (i = 0; i < 32; i++) { - irq[i] = qdev_get_gpio_in_named(DEVICE(cpu), "IRQ", i); + /* Create CPU. We need to set eic_present between init and realize. */ + cpu = NIOS2_CPU(object_new(TYPE_NIOS2_CPU)); + + /* Enable the External Interrupt Controller within the CPU. */ + cpu->eic_present = nms->vic; + + /* Configure new exception vectors. */ + cpu->reset_addr = 0xd4000000; + cpu->exception_addr = 0xc8000120; + cpu->fast_tlb_miss_addr = 0xc0000100; + + qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); + + if (nms->vic) { + DeviceState *dev = qdev_new(TYPE_NIOS2_VIC); + MemoryRegion *dev_mr; + qemu_irq cpu_irq; + + object_property_set_link(OBJECT(dev), "cpu", OBJECT(cpu), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + cpu_irq = qdev_get_gpio_in_named(DEVICE(cpu), "EIC", 0); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq); + for (int i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + dev_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(address_space_mem, 0x18002000, dev_mr); + } else { + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in_named(DEVICE(cpu), "IRQ", i); + } } /* Register: Altera 16550 UART */ @@ -96,20 +137,44 @@ static void nios2_10m50_ghrd_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xe0000880); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[5]); - /* Configure new exception vectors and reset CPU for it to take effect. */ - cpu->reset_addr = 0xd4000000; - cpu->exception_addr = 0xc8000120; - cpu->fast_tlb_miss_addr = 0xc0000100; - nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); } -static void nios2_10m50_ghrd_machine_init(struct MachineClass *mc) +static bool get_vic(Object *obj, Error **errp) { + Nios2MachineState *nms = NIOS2_MACHINE(obj); + return nms->vic; +} + +static void set_vic(Object *obj, bool value, Error **errp) +{ + Nios2MachineState *nms = NIOS2_MACHINE(obj); + nms->vic = value; +} + +static void nios2_10m50_ghrd_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "Altera 10M50 GHRD Nios II design"; mc->init = nios2_10m50_ghrd_init; mc->is_default = true; + + object_class_property_add_bool(oc, "vic", get_vic, set_vic); + object_class_property_set_description(oc, "vic", + "Set on/off to enable/disable the Vectored Interrupt Controller"); } -DEFINE_MACHINE("10m50-ghrd", nios2_10m50_ghrd_machine_init); +static const TypeInfo nios2_10m50_ghrd_type_info = { + .name = TYPE_NIOS2_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(Nios2MachineState), + .class_init = nios2_10m50_ghrd_class_init, +}; + +static void nios2_10m50_ghrd_type_init(void) +{ + type_register_static(&nios2_10m50_ghrd_type_info); +} +type_init(nios2_10m50_ghrd_type_init); diff --git a/hw/nios2/Kconfig b/hw/nios2/Kconfig index b10ea640da..4748ae27b6 100644 --- a/hw/nios2/Kconfig +++ b/hw/nios2/Kconfig @@ -3,6 +3,7 @@ config NIOS2_10M50 select NIOS2 select SERIAL select ALTERA_TIMER + select NIOS2_VIC config NIOS2_GENERIC_NOMMU bool diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c index 5b3e4efed5..b30a7b1efb 100644 --- a/hw/nios2/boot.c +++ b/hw/nios2/boot.c @@ -30,11 +30,11 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "sysemu/device_tree.h" #include "sysemu/reset.h" #include "hw/boards.h" @@ -43,6 +43,8 @@ #include "boot.h" +#include + #define NIOS2_MAGIC 0x534f494e static struct nios2_boot_info { @@ -81,9 +83,11 @@ static uint64_t translate_kernel_address(void *opaque, uint64_t addr) static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize, const char *kernel_cmdline, const char *dtb_filename) { + MachineState *machine = MACHINE(qdev_get_machine()); int fdt_size; void *fdt = NULL; int r; + uint8_t rng_seed[32]; if (dtb_filename) { fdt = load_device_tree(dtb_filename, &fdt_size); @@ -92,6 +96,9 @@ static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize, return 0; } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + if (kernel_cmdline) { r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); @@ -109,7 +116,10 @@ static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize, } cpu_physical_memory_write(bi.fdt, fdt, fdt_size); - g_free(fdt); + + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; + return fdt_size; } @@ -140,7 +150,7 @@ void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, uint64_t entry, high; int big_endian = 0; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN big_endian = 1; #endif diff --git a/hw/nios2/generic_nommu.c b/hw/nios2/generic_nommu.c index fbc18dbd04..48edb3ae37 100644 --- a/hw/nios2/generic_nommu.c +++ b/hw/nios2/generic_nommu.c @@ -28,7 +28,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/char/serial.h" #include "hw/boards.h" diff --git a/hw/nios2/meson.build b/hw/nios2/meson.build index 6c58e8082b..22277bd6c5 100644 --- a/hw/nios2/meson.build +++ b/hw/nios2/meson.build @@ -1,5 +1,5 @@ nios2_ss = ss.source_set() -nios2_ss.add(files('boot.c')) +nios2_ss.add(files('boot.c'), fdt) nios2_ss.add(when: 'CONFIG_NIOS2_10M50', if_true: files('10m50_devboard.c')) nios2_ss.add(when: 'CONFIG_NIOS2_GENERIC_NOMMU', if_true: files('generic_nommu.c')) diff --git a/hw/nubus/mac-nubus-bridge.c b/hw/nubus/mac-nubus-bridge.c index 7c329300b8..a0da5a8b2f 100644 --- a/hw/nubus/mac-nubus-bridge.c +++ b/hw/nubus/mac-nubus-bridge.c @@ -1,5 +1,7 @@ /* - * Copyright (c) 2013-2018 Laurent Vivier + * QEMU Macintosh Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -13,13 +15,29 @@ static void mac_nubus_bridge_init(Object *obj) { - MacNubusState *s = MAC_NUBUS_BRIDGE(obj); + MacNubusBridge *s = MAC_NUBUS_BRIDGE(obj); + NubusBridge *nb = NUBUS_BRIDGE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + NubusBus *bus = &nb->bus; - s->bus = NUBUS_BUS(qbus_create(TYPE_NUBUS_BUS, DEVICE(s), NULL)); + /* Macintosh only has slots 0x9 to 0xe available */ + bus->slot_available_mask = MAKE_64BIT_MASK(MAC_NUBUS_FIRST_SLOT, + MAC_NUBUS_SLOT_NB); - sysbus_init_mmio(sbd, &s->bus->super_slot_io); - sysbus_init_mmio(sbd, &s->bus->slot_io); + /* Aliases for slots 0x9 to 0xe */ + memory_region_init_alias(&s->super_slot_alias, obj, "super-slot-alias", + &bus->nubus_mr, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SUPER_SLOT_SIZE); + + memory_region_init_alias(&s->slot_alias, obj, "slot-alias", + &bus->nubus_mr, + NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + + sysbus_init_mmio(sbd, &s->super_slot_alias); + sysbus_init_mmio(sbd, &s->slot_alias); } static void mac_nubus_bridge_class_init(ObjectClass *klass, void *data) @@ -33,7 +51,7 @@ static const TypeInfo mac_nubus_bridge_info = { .name = TYPE_MAC_NUBUS_BRIDGE, .parent = TYPE_NUBUS_BRIDGE, .instance_init = mac_nubus_bridge_init, - .instance_size = sizeof(MacNubusState), + .instance_size = sizeof(MacNubusBridge), .class_init = mac_nubus_bridge_class_init, }; diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c index cd8c6a91eb..a42c86080f 100644 --- a/hw/nubus/nubus-bridge.c +++ b/hw/nubus/nubus-bridge.c @@ -1,5 +1,5 @@ /* - * QEMU Macintosh Nubus + * QEMU Nubus * * Copyright (c) 2013-2018 Laurent Vivier * @@ -12,17 +12,36 @@ #include "hw/sysbus.h" #include "hw/nubus/nubus.h" + +static void nubus_bridge_init(Object *obj) +{ + NubusBridge *s = NUBUS_BRIDGE(obj); + NubusBus *bus = &s->bus; + + qbus_init(bus, sizeof(s->bus), TYPE_NUBUS_BUS, DEVICE(s), NULL); + + qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS); +} + +static Property nubus_bridge_properties[] = { + DEFINE_PROP_UINT16("slot-available-mask", NubusBridge, + bus.slot_available_mask, 0xffff), + DEFINE_PROP_END_OF_LIST() +}; + static void nubus_bridge_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->fw_name = "nubus"; + device_class_set_props(dc, nubus_bridge_properties); } static const TypeInfo nubus_bridge_info = { .name = TYPE_NUBUS_BRIDGE, .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SysBusDevice), + .instance_init = nubus_bridge_init, + .instance_size = sizeof(NubusBridge), .class_init = nubus_bridge_class_init, }; diff --git a/hw/nubus/nubus-bus.c b/hw/nubus/nubus-bus.c index 5c13452308..07c279bde5 100644 --- a/hw/nubus/nubus-bus.c +++ b/hw/nubus/nubus-bus.c @@ -8,9 +8,18 @@ * */ +/* + * References: + * Nubus Specification (TI) + * http://www.bitsavers.org/pdf/ti/nubus/2242825-0001_NuBus_Spec1983.pdf + * + * Designing Cards and Drivers for the Macintosh Family (Apple) + */ + #include "qemu/osdep.h" #include "hw/nubus/nubus.h" #include "qapi/error.h" +#include "trace.h" static NubusBus *nubus_find(void) @@ -19,72 +28,138 @@ static NubusBus *nubus_find(void) return NUBUS_BUS(object_resolve_path_type("", TYPE_NUBUS_BUS, NULL)); } -static void nubus_slot_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) +static MemTxResult nubus_slot_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { - /* read only */ + trace_nubus_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; } - -static uint64_t nubus_slot_read(void *opaque, hwaddr addr, - unsigned int size) +static MemTxResult nubus_slot_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) { - return 0; + trace_nubus_slot_read(addr, size); + return MEMTX_DECODE_ERROR; } static const MemoryRegionOps nubus_slot_ops = { - .read = nubus_slot_read, - .write = nubus_slot_write, + .read_with_attrs = nubus_slot_read, + .write_with_attrs = nubus_slot_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 4, }, }; -static void nubus_super_slot_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) +static MemTxResult nubus_super_slot_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) { - /* read only */ + trace_nubus_super_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; } -static uint64_t nubus_super_slot_read(void *opaque, hwaddr addr, - unsigned int size) +static MemTxResult nubus_super_slot_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) { - return 0; + trace_nubus_super_slot_read(addr, size); + return MEMTX_DECODE_ERROR; } static const MemoryRegionOps nubus_super_slot_ops = { - .read = nubus_super_slot_read, - .write = nubus_super_slot_write, + .read_with_attrs = nubus_super_slot_read, + .write_with_attrs = nubus_super_slot_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 4, }, }; +static void nubus_unrealize(BusState *bus) +{ + NubusBus *nubus = NUBUS_BUS(bus); + + address_space_destroy(&nubus->nubus_as); +} + static void nubus_realize(BusState *bus, Error **errp) { + NubusBus *nubus = NUBUS_BUS(bus); + if (!nubus_find()) { error_setg(errp, "at most one %s device is permitted", TYPE_NUBUS_BUS); return; } + + address_space_init(&nubus->nubus_as, &nubus->nubus_mr, "nubus"); } static void nubus_init(Object *obj) { NubusBus *nubus = NUBUS_BUS(obj); + memory_region_init(&nubus->nubus_mr, obj, "nubus", 0x100000000); + memory_region_init_io(&nubus->super_slot_io, obj, &nubus_super_slot_ops, nubus, "nubus-super-slots", - NUBUS_SUPER_SLOT_NB * NUBUS_SUPER_SLOT_SIZE); + (NUBUS_SUPER_SLOT_NB + 1) * NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, 0x0, &nubus->super_slot_io); memory_region_init_io(&nubus->slot_io, obj, &nubus_slot_ops, nubus, "nubus-slots", NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, + (NUBUS_SUPER_SLOT_NB + 1) * + NUBUS_SUPER_SLOT_SIZE, &nubus->slot_io); - nubus->current_slot = NUBUS_FIRST_SLOT; + nubus->slot_available_mask = MAKE_64BIT_MASK(NUBUS_FIRST_SLOT, + NUBUS_SLOT_NB); +} + +static char *nubus_get_dev_path(DeviceState *dev) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + BusState *bus = qdev_get_parent_bus(dev); + char *p = qdev_get_dev_path(bus->parent); + + if (p) { + char *ret = g_strdup_printf("%s/%s/%02x", p, bus->name, nd->slot); + g_free(p); + return ret; + } else { + return g_strdup_printf("%s/%02x", bus->name, nd->slot); + } +} + +static bool nubus_check_address(BusState *bus, DeviceState *dev, Error **errp) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + NubusBus *nubus = NUBUS_BUS(bus); + + if (nd->slot == -1) { + /* No slot specified, find first available free slot */ + int s = ctz32(nubus->slot_available_mask); + if (s != 32) { + nd->slot = s; + } else { + error_setg(errp, "Cannot register nubus card, no free slot " + "available"); + return false; + } + } else { + /* Slot specified, make sure the slot is available */ + if (!(nubus->slot_available_mask & BIT(nd->slot))) { + error_setg(errp, "Cannot register nubus card, slot %d is " + "unavailable or already occupied", nd->slot); + return false; + } + } + + nubus->slot_available_mask &= ~BIT(nd->slot); + return true; } static void nubus_class_init(ObjectClass *oc, void *data) @@ -92,6 +167,9 @@ static void nubus_class_init(ObjectClass *oc, void *data) BusClass *bc = BUS_CLASS(oc); bc->realize = nubus_realize; + bc->unrealize = nubus_unrealize; + bc->check_address = nubus_check_address; + bc->get_dev_path = nubus_get_dev_path; } static const TypeInfo nubus_bus_info = { diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c index ffe78a8823..0f1852f671 100644 --- a/hw/nubus/nubus-device.c +++ b/hw/nubus/nubus-device.c @@ -9,194 +9,99 @@ */ #include "qemu/osdep.h" +#include "qemu/datadir.h" +#include "hw/irq.h" +#include "hw/loader.h" #include "hw/nubus/nubus.h" #include "qapi/error.h" +#include "qemu/error-report.h" -/* The Format Block Structure */ - -#define FBLOCK_DIRECTORY_OFFSET 0 -#define FBLOCK_LENGTH 4 -#define FBLOCK_CRC 8 -#define FBLOCK_REVISION_LEVEL 12 -#define FBLOCK_FORMAT 13 -#define FBLOCK_TEST_PATTERN 14 -#define FBLOCK_RESERVED 18 -#define FBLOCK_BYTE_LANES 19 - -#define FBLOCK_SIZE 20 -#define FBLOCK_PATTERN_VAL 0x5a932bc7 - -static uint64_t nubus_fblock_read(void *opaque, hwaddr addr, unsigned int size) +void nubus_set_irq(NubusDevice *nd, int level) { - NubusDevice *dev = opaque; - uint64_t val; + NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(DEVICE(nd))); -#define BYTE(v, b) (((v) >> (24 - 8 * (b))) & 0xff) - switch (addr) { - case FBLOCK_BYTE_LANES: - val = dev->byte_lanes; - val |= (val ^ 0xf) << 4; - break; - case FBLOCK_RESERVED: - val = 0x00; - break; - case FBLOCK_TEST_PATTERN...FBLOCK_TEST_PATTERN + 3: - val = BYTE(FBLOCK_PATTERN_VAL, addr - FBLOCK_TEST_PATTERN); - break; - case FBLOCK_FORMAT: - val = dev->rom_format; - break; - case FBLOCK_REVISION_LEVEL: - val = dev->rom_rev; - break; - case FBLOCK_CRC...FBLOCK_CRC + 3: - val = BYTE(dev->rom_crc, addr - FBLOCK_CRC); - break; - case FBLOCK_LENGTH...FBLOCK_LENGTH + 3: - val = BYTE(dev->rom_length, addr - FBLOCK_LENGTH); - break; - case FBLOCK_DIRECTORY_OFFSET...FBLOCK_DIRECTORY_OFFSET + 3: - val = BYTE(dev->directory_offset, addr - FBLOCK_DIRECTORY_OFFSET); - break; - default: - val = 0; - break; - } - return val; -} - -static void nubus_fblock_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) -{ - /* read only */ -} - -static const MemoryRegionOps nubus_format_block_ops = { - .read = nubus_fblock_read, - .write = nubus_fblock_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 1, - } -}; - -static void nubus_register_format_block(NubusDevice *dev) -{ - char *fblock_name; - - fblock_name = g_strdup_printf("nubus-slot-%d-format-block", - dev->slot_nb); - - hwaddr fblock_offset = memory_region_size(&dev->slot_mem) - FBLOCK_SIZE; - memory_region_init_io(&dev->fblock_io, NULL, &nubus_format_block_ops, - dev, fblock_name, FBLOCK_SIZE); - memory_region_add_subregion(&dev->slot_mem, fblock_offset, - &dev->fblock_io); - - g_free(fblock_name); -} - -static void mac_nubus_rom_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) -{ - /* read only */ -} - -static uint64_t mac_nubus_rom_read(void *opaque, hwaddr addr, - unsigned int size) -{ - NubusDevice *dev = opaque; - - return dev->rom[addr]; -} - -static const MemoryRegionOps mac_nubus_rom_ops = { - .read = mac_nubus_rom_read, - .write = mac_nubus_rom_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 1, - }, -}; - - -void nubus_register_rom(NubusDevice *dev, const uint8_t *rom, uint32_t size, - int revision, int format, uint8_t byte_lanes) -{ - hwaddr rom_offset; - char *rom_name; - - /* FIXME : really compute CRC */ - dev->rom_length = 0; - dev->rom_crc = 0; - - dev->rom_rev = revision; - dev->rom_format = format; - - dev->byte_lanes = byte_lanes; - dev->directory_offset = -size; - - /* ROM */ - - dev->rom = rom; - rom_name = g_strdup_printf("nubus-slot-%d-rom", dev->slot_nb); - memory_region_init_io(&dev->rom_io, NULL, &mac_nubus_rom_ops, - dev, rom_name, size); - memory_region_set_readonly(&dev->rom_io, true); - - rom_offset = memory_region_size(&dev->slot_mem) - FBLOCK_SIZE + - dev->directory_offset; - memory_region_add_subregion(&dev->slot_mem, rom_offset, &dev->rom_io); - - g_free(rom_name); + qemu_set_irq(nubus->irqs[nd->slot], level); } static void nubus_device_realize(DeviceState *dev, Error **errp) { NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(dev)); NubusDevice *nd = NUBUS_DEVICE(dev); - char *name; + char *name, *path; hwaddr slot_offset; + int64_t size; + int ret; - if (nubus->current_slot < NUBUS_FIRST_SLOT || - nubus->current_slot > NUBUS_LAST_SLOT) { - error_setg(errp, "Cannot register nubus card, not enough slots"); - return; - } - - nd->slot_nb = nubus->current_slot++; - name = g_strdup_printf("nubus-slot-%d", nd->slot_nb); - - if (nd->slot_nb < NUBUS_FIRST_SLOT) { - /* Super */ - slot_offset = (nd->slot_nb - 6) * NUBUS_SUPER_SLOT_SIZE; - - memory_region_init(&nd->slot_mem, OBJECT(dev), name, - NUBUS_SUPER_SLOT_SIZE); - memory_region_add_subregion(&nubus->super_slot_io, slot_offset, - &nd->slot_mem); - } else { - /* Normal */ - slot_offset = nd->slot_nb * NUBUS_SLOT_SIZE; - - memory_region_init(&nd->slot_mem, OBJECT(dev), name, NUBUS_SLOT_SIZE); - memory_region_add_subregion(&nubus->slot_io, slot_offset, - &nd->slot_mem); - } + /* Super */ + slot_offset = nd->slot * NUBUS_SUPER_SLOT_SIZE; + name = g_strdup_printf("nubus-super-slot-%x", nd->slot); + memory_region_init(&nd->super_slot_mem, OBJECT(dev), name, + NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->super_slot_io, slot_offset, + &nd->super_slot_mem); g_free(name); - nubus_register_format_block(nd); + + /* Normal */ + slot_offset = nd->slot * NUBUS_SLOT_SIZE; + + name = g_strdup_printf("nubus-slot-%x", nd->slot); + memory_region_init(&nd->slot_mem, OBJECT(dev), name, NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->slot_io, slot_offset, + &nd->slot_mem); + g_free(name); + + /* Declaration ROM */ + if (nd->romfile != NULL) { + path = qemu_find_file(QEMU_FILE_TYPE_BIOS, nd->romfile); + if (path == NULL) { + path = g_strdup(nd->romfile); + } + + size = get_image_size(path); + if (size < 0) { + error_setg(errp, "failed to find romfile \"%s\"", nd->romfile); + g_free(path); + return; + } else if (size == 0) { + error_setg(errp, "romfile \"%s\" is empty", nd->romfile); + g_free(path); + return; + } else if (size > NUBUS_DECL_ROM_MAX_SIZE) { + error_setg(errp, "romfile \"%s\" too large (maximum size 128K)", + nd->romfile); + g_free(path); + return; + } + + name = g_strdup_printf("nubus-slot-%x-declaration-rom", nd->slot); + memory_region_init_rom(&nd->decl_rom, OBJECT(dev), name, size, + &error_abort); + ret = load_image_mr(path, &nd->decl_rom); + g_free(path); + if (ret < 0) { + error_setg(errp, "could not load romfile \"%s\"", nd->romfile); + return; + } + memory_region_add_subregion(&nd->slot_mem, NUBUS_SLOT_SIZE - size, + &nd->decl_rom); + } } +static Property nubus_device_properties[] = { + DEFINE_PROP_INT32("slot", NubusDevice, slot, -1), + DEFINE_PROP_STRING("romfile", NubusDevice, romfile), + DEFINE_PROP_END_OF_LIST() +}; + static void nubus_device_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = nubus_device_realize; dc->bus_type = TYPE_NUBUS_BUS; + device_class_set_props(dc, nubus_device_properties); } static const TypeInfo nubus_device_type_info = { diff --git a/hw/nubus/trace-events b/hw/nubus/trace-events new file mode 100644 index 0000000000..e31833d694 --- /dev/null +++ b/hw/nubus/trace-events @@ -0,0 +1,7 @@ +# See docs/devel/tracing.txt for syntax documentation. + +# nubus-bus.c +nubus_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" +nubus_super_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_super_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" diff --git a/hw/nubus/trace.h b/hw/nubus/trace.h new file mode 100644 index 0000000000..3749420da1 --- /dev/null +++ b/hw/nubus/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_nubus.h" diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 6baf9e0420..749a6938dd 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -35,6 +35,11 @@ * mdts=,vsl=, \ * zoned.zasl=, \ * zoned.auto_transition=, \ + * sriov_max_vfs= \ + * sriov_vq_flexible= \ + * sriov_vi_flexible= \ + * sriov_max_vi_per_vf= \ + * sriov_max_vq_per_vf= \ * subsys= * -device nvme-ns,drive=,bus=,nsid=,\ * zoned=, \ @@ -71,7 +76,7 @@ * the SUBNQN field in the controller will report the NQN of the subsystem * device. This also enables multi controller capability represented in * Identify Controller data structure in CMIC (Controller Multi-path I/O and - * Namesapce Sharing Capabilities). + * Namespace Sharing Capabilities). * * - `aerl` * The Asynchronous Event Request Limit (AERL). Indicates the maximum number @@ -106,6 +111,35 @@ * transitioned to zone state closed for resource management purposes. * Defaults to 'on'. * + * - `sriov_max_vfs` + * Indicates the maximum number of PCIe virtual functions supported + * by the controller. The default value is 0. Specifying a non-zero value + * enables reporting of both SR-IOV and ARI capabilities by the NVMe device. + * Virtual function controllers will not report SR-IOV capability. + * + * NOTE: Single Root I/O Virtualization support is experimental. + * All the related parameters may be subject to change. + * + * - `sriov_vq_flexible` + * Indicates the total number of flexible queue resources assignable to all + * the secondary controllers. Implicitly sets the number of primary + * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`. + * + * - `sriov_vi_flexible` + * Indicates the total number of flexible interrupt resources assignable to + * all the secondary controllers. Implicitly sets the number of primary + * controller's private resources to `(msix_qsize - sriov_vi_flexible)`. + * + * - `sriov_max_vi_per_vf` + * Indicates the maximum number of virtual interrupt resources assignable + * to a secondary controller. The default 0 resolves to + * `(sriov_vi_flexible / sriov_max_vfs)`. + * + * - `sriov_max_vq_per_vf` + * Indicates the maximum number of virtual queue resources assignable to + * a secondary controller. The default 0 resolves to + * `(sriov_vq_flexible / sriov_max_vfs)`. + * * nvme namespace device parameters * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * - `shared` @@ -154,15 +188,18 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/units.h" +#include "qemu/range.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" #include "sysemu/hostmem.h" #include "hw/pci/msix.h" +#include "hw/pci/pcie_sriov.h" #include "migration/vmstate.h" #include "nvme.h" +#include "dif.h" #include "trace.h" #define NVME_MAX_IOQPAIRS 0xffff @@ -175,6 +212,10 @@ #define NVME_TEMPERATURE_CRITICAL 0x175 #define NVME_NUM_FW_SLOTS 1 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB) +#define NVME_MAX_VFS 127 +#define NVME_VF_RES_GRANULARITY 1 +#define NVME_VF_OFFSET 0x1 +#define NVME_VF_STRIDE 1 #define NVME_GUEST_ERR(trace, fmt, ...) \ do { \ @@ -195,6 +236,7 @@ static const bool nvme_feature_support[NVME_FID_MAX] = { [NVME_WRITE_ATOMICITY] = true, [NVME_ASYNCHRONOUS_EVENT_CONF] = true, [NVME_TIMESTAMP] = true, + [NVME_HOST_BEHAVIOR_SUPPORT] = true, [NVME_COMMAND_SET_PROFILE] = true, }; @@ -205,6 +247,7 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, + [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, }; @@ -220,6 +263,8 @@ static const uint32_t nvme_cse_acs[256] = { [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, + [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, }; @@ -251,6 +296,7 @@ static const uint32_t nvme_cse_iocs_zoned[256] = { }; static void nvme_process_sq(void *opaque); +static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst); static uint16_t nvme_sqid(NvmeRequest *req) { @@ -299,26 +345,37 @@ static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, } } -/* - * Check if we can open a zone without exceeding open/active limits. - * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). - */ -static int nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) +static uint16_t nvme_zns_check_resources(NvmeNamespace *ns, uint32_t act, + uint32_t opn, uint32_t zrwa) { if (ns->params.max_active_zones != 0 && ns->nr_active_zones + act > ns->params.max_active_zones) { trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR; } + if (ns->params.max_open_zones != 0 && ns->nr_open_zones + opn > ns->params.max_open_zones) { trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR; } + if (zrwa > ns->zns.numzrwa) { + return NVME_NOZRWA | NVME_DNR; + } + return NVME_SUCCESS; } +/* + * Check if we can open a zone without exceeding open/active limits. + * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). + */ +static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) +{ + return nvme_zns_check_resources(ns, act, opn, 0); +} + static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) { hwaddr hi, lo; @@ -357,6 +414,24 @@ static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); } +static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) +{ + hwaddr hi, lo; + + /* + * The purpose of this check is to guard against invalid "local" access to + * the iomem (i.e. controller registers). Thus, we check against the range + * covered by the 'bar0' MemoryRegion since that is currently composed of + * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, + * that if the device model is ever changed to allow the CMB to be located + * in BAR0 as well, then this must be changed. + */ + lo = n->bar0.addr; + hi = lo + int128_get64(n->bar0.size); + + return addr >= lo && addr < hi; +} + static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) { hwaddr hi = addr + size - 1; @@ -377,7 +452,7 @@ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) return pci_dma_read(&n->parent_obj, addr, buf, size); } -static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, void *buf, int size) +static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size) { hwaddr hi = addr + size - 1; if (hi < addr) { @@ -405,12 +480,12 @@ static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) { - return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; + return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; } static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) { - return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; + return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; } static void nvme_inc_cq_tail(NvmeCQueue *cq) @@ -614,6 +689,10 @@ static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) trace_pci_nvme_map_addr(addr, len); + if (nvme_addr_is_iomem(n, addr)) { + return NVME_DATA_TRAS_ERROR; + } + if (nvme_addr_is_cmb(n, addr)) { cmb = true; } else if (nvme_addr_is_pmr(n, addr)) { @@ -772,10 +851,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, uint8_t type = NVME_SGL_TYPE(segment[i].type); switch (type) { - case NVME_SGL_DESCR_TYPE_BIT_BUCKET: - if (cmd->opcode == NVME_CMD_WRITE) { - continue; - } case NVME_SGL_DESCR_TYPE_DATA_BLOCK: break; case NVME_SGL_DESCR_TYPE_SEGMENT: @@ -808,10 +883,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, trans_len = MIN(*len, dlen); - if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) { - goto next; - } - addr = le64_to_cpu(segment[i].addr); if (UINT64_MAX - addr < dlen) { @@ -823,7 +894,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, return status; } -next: *len -= trans_len; } @@ -881,8 +951,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, seg_len = le32_to_cpu(sgld->len); /* check the length of the (Last) Segment descriptor */ - if ((!seg_len || seg_len & 0xf) && - (NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) { + if (!seg_len || seg_len & 0xf) { return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; } @@ -920,26 +989,20 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, last_sgld = &segment[nsgld - 1]; /* - * If the segment ends with a Data Block or Bit Bucket Descriptor Type, - * then we are done. + * If the segment ends with a Data Block, then we are done. */ - switch (NVME_SGL_TYPE(last_sgld->type)) { - case NVME_SGL_DESCR_TYPE_DATA_BLOCK: - case NVME_SGL_DESCR_TYPE_BIT_BUCKET: + if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); if (status) { goto unmap; } goto out; - - default: - break; } /* - * If the last descriptor was not a Data Block or Bit Bucket, then the - * current segment must not be a Last Segment. + * If the last descriptor was not a Data Block, then the current + * segment must not be a Last Segment. */ if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; @@ -1032,7 +1095,8 @@ static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) size_t len = nvme_l2b(ns, nlb); uint16_t status; - if (nvme_ns_ext(ns) && !(pi && pract && ns->lbaf.ms == 8)) { + if (nvme_ns_ext(ns) && + !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { NvmeSg sg; len += nvme_m2b(ns, nlb); @@ -1140,18 +1204,19 @@ static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, return NVME_SUCCESS; } -static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len, +static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, void *ptr, uint32_t len, NvmeTxDirection dir) { assert(sg->flags & NVME_SG_ALLOC); if (sg->flags & NVME_SG_DMA) { - uint64_t residual; + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + dma_addr_t residual; if (dir == NVME_TX_DIRECTION_TO_DEVICE) { - residual = dma_buf_write(ptr, len, &sg->qsg); + dma_buf_write(ptr, len, &residual, &sg->qsg, attrs); } else { - residual = dma_buf_read(ptr, len, &sg->qsg); + dma_buf_read(ptr, len, &residual, &sg->qsg, attrs); } if (unlikely(residual)) { @@ -1176,7 +1241,7 @@ static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len, return NVME_SUCCESS; } -static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +static inline uint16_t nvme_c2h(NvmeCtrl *n, void *ptr, uint32_t len, NvmeRequest *req) { uint16_t status; @@ -1189,7 +1254,7 @@ static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len, return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); } -static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +static inline uint16_t nvme_h2c(NvmeCtrl *n, void *ptr, uint32_t len, NvmeRequest *req) { uint16_t status; @@ -1202,7 +1267,7 @@ static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len, return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); } -uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, NvmeTxDirection dir, NvmeRequest *req) { NvmeNamespace *ns = req->ns; @@ -1210,7 +1275,8 @@ uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); - if (nvme_ns_ext(ns) && !(pi && pract && ns->lbaf.ms == 8)) { + if (nvme_ns_ext(ns) && + !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, ns->lbaf.ms, 0, dir); } @@ -1218,7 +1284,7 @@ uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, return nvme_tx(n, &req->sg, ptr, len, dir); } -uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, NvmeTxDirection dir, NvmeRequest *req) { NvmeNamespace *ns = req->ns; @@ -1265,6 +1331,26 @@ static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, } } +static void nvme_update_cq_eventidx(const NvmeCQueue *cq) +{ + uint32_t v = cpu_to_le32(cq->head); + + //not in 7.2: trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head); + + pci_dma_write(PCI_DEVICE(cq->ctrl), cq->ei_addr, &v, sizeof(v)); +} + +static void nvme_update_cq_head(NvmeCQueue *cq) +{ + uint32_t v; + + pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &v, sizeof(v)); + + cq->head = le32_to_cpu(v); + + trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head); +} + static void nvme_post_cqes(void *opaque) { NvmeCQueue *cq = opaque; @@ -1277,6 +1363,11 @@ static void nvme_post_cqes(void *opaque) NvmeSQueue *sq; hwaddr addr; + if (n->dbbuf_enabled) { + nvme_update_cq_eventidx(cq); + nvme_update_cq_head(cq); + } + if (nvme_cq_full(cq)) { break; } @@ -1323,7 +1414,8 @@ static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); - timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + + qemu_bh_schedule(cq->bh); } static void nvme_process_aers(void *opaque) @@ -1605,9 +1697,19 @@ static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone, return status; } - if (unlikely(slba != zone->w_ptr)) { - trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, zone->w_ptr); - return NVME_ZONE_INVALID_WRITE; + if (zone->d.za & NVME_ZA_ZRWA_VALID) { + uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas; + + if (slba < zone->w_ptr || slba + nlb > ezrwa) { + trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr); + return NVME_ZONE_INVALID_WRITE; + } + } else { + if (unlikely(slba != zone->w_ptr)) { + trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, + zone->w_ptr); + return NVME_ZONE_INVALID_WRITE; + } } if (unlikely((slba + nlb) > zcap)) { @@ -1687,6 +1789,14 @@ static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) /* fallthrough */ case NVME_ZONE_STATE_CLOSED: nvme_aor_dec_active(ns); + + if (zone->d.za & NVME_ZA_ZRWA_VALID) { + zone->d.za &= ~NVME_ZA_ZRWA_VALID; + if (ns->params.numzrwa) { + ns->zns.numzrwa++; + } + } + /* fallthrough */ case NVME_ZONE_STATE_EMPTY: nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); @@ -1722,6 +1832,13 @@ static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone) /* fallthrough */ case NVME_ZONE_STATE_CLOSED: nvme_aor_dec_active(ns); + + if (zone->d.za & NVME_ZA_ZRWA_VALID) { + if (ns->params.numzrwa) { + ns->zns.numzrwa++; + } + } + /* fallthrough */ case NVME_ZONE_STATE_FULL: zone->w_ptr = zone->d.zslba; @@ -1755,6 +1872,7 @@ static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns) enum { NVME_ZRM_AUTO = 1 << 0, + NVME_ZRM_ZRWA = 1 << 1, }; static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, @@ -1773,7 +1891,8 @@ static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, if (n->params.auto_transition_zones) { nvme_zrm_auto_transition_zone(ns); } - status = nvme_aor_check(ns, act, 1); + status = nvme_zns_check_resources(ns, act, 1, + (flags & NVME_ZRM_ZRWA) ? 1 : 0); if (status) { return status; } @@ -1801,6 +1920,12 @@ static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, /* fallthrough */ case NVME_ZONE_STATE_EXPLICITLY_OPEN: + if (flags & NVME_ZRM_ZRWA) { + ns->zns.numzrwa--; + + zone->d.za |= NVME_ZA_ZRWA_VALID; + } + return NVME_SUCCESS; default: @@ -1814,12 +1939,6 @@ static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns, return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO); } -static inline uint16_t nvme_zrm_open(NvmeCtrl *n, NvmeNamespace *ns, - NvmeZone *zone) -{ - return nvme_zrm_open_flags(n, ns, zone, 0); -} - static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, uint32_t nlb) { @@ -1830,6 +1949,20 @@ static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, } } +static void nvme_zoned_zrwa_implicit_flush(NvmeNamespace *ns, NvmeZone *zone, + uint32_t nlbc) +{ + uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg); + + nlbc = nzrwafgs * ns->zns.zrwafg; + + trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc); + + zone->w_ptr += nlbc; + + nvme_advance_zone_wp(ns, zone, nlbc); +} + static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) { NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; @@ -1842,6 +1975,17 @@ static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) zone = nvme_get_zone_by_slba(ns, slba); assert(zone); + if (zone->d.za & NVME_ZA_ZRWA_VALID) { + uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1; + uint64_t elba = slba + nlb - 1; + + if (elba > ezrwa) { + nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa); + } + + return; + } + nvme_advance_zone_wp(ns, zone, nlb); } @@ -1959,9 +2103,12 @@ static void nvme_verify_cb(void *opaque, int ret) uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); + uint64_t reftag = le32_to_cpu(rw->reftag); + uint64_t cdw3 = le32_to_cpu(rw->cdw3); uint16_t status; + reftag |= cdw3 << 32; + trace_pci_nvme_verify_cb(nvme_cid(req), prinfo, apptag, appmask, reftag); if (ret) { @@ -2050,7 +2197,8 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); + uint64_t reftag = le32_to_cpu(rw->reftag); + uint64_t cdw3 = le32_to_cpu(rw->cdw3); struct nvme_compare_ctx *ctx = req->opaque; g_autofree uint8_t *buf = NULL; BlockBackend *blk = ns->blkconf.blk; @@ -2058,6 +2206,8 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) BlockAcctStats *stats = blk_get_stats(blk); uint16_t status = NVME_SUCCESS; + reftag |= cdw3 << 32; + trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); if (ret) { @@ -2095,7 +2245,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) * tuple. */ if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); } for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { @@ -2193,7 +2343,6 @@ typedef struct NvmeDSMAIOCB { BlockAIOCB common; BlockAIOCB *aiocb; NvmeRequest *req; - QEMUBH *bh; int ret; NvmeDsmRange *range; @@ -2215,7 +2364,7 @@ static void nvme_dsm_cancel(BlockAIOCB *aiocb) } else { /* * We only reach this if nvme_dsm_cancel() has already been called or - * the command ran to completion and nvme_dsm_bh is scheduled to run. + * the command ran to completion. */ assert(iocb->idx == iocb->nr); } @@ -2226,17 +2375,6 @@ static const AIOCBInfo nvme_dsm_aiocb_info = { .cancel_async = nvme_dsm_cancel, }; -static void nvme_dsm_bh(void *opaque) -{ - NvmeDSMAIOCB *iocb = opaque; - - iocb->common.cb(iocb->common.opaque, iocb->ret); - - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; - qemu_aio_unref(iocb); -} - static void nvme_dsm_cb(void *opaque, int ret); static void nvme_dsm_md_cb(void *opaque, int ret) @@ -2248,16 +2386,10 @@ static void nvme_dsm_md_cb(void *opaque, int ret) uint64_t slba; uint32_t nlb; - if (ret < 0) { - iocb->ret = ret; + if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { goto done; } - if (!ns->lbaf.ms) { - nvme_dsm_cb(iocb, 0); - return; - } - range = &iocb->range[iocb->idx - 1]; slba = le64_to_cpu(range->slba); nlb = le32_to_cpu(range->nlb); @@ -2270,11 +2402,11 @@ static void nvme_dsm_md_cb(void *opaque, int ret) ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO); if (ret) { if (ret < 0) { - iocb->ret = ret; goto done; } nvme_dsm_cb(iocb, 0); + return; } iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), @@ -2283,8 +2415,7 @@ static void nvme_dsm_md_cb(void *opaque, int ret) return; done: - iocb->aiocb = NULL; - qemu_bh_schedule(iocb->bh); + nvme_dsm_cb(iocb, ret); } static void nvme_dsm_cb(void *opaque, int ret) @@ -2297,7 +2428,9 @@ static void nvme_dsm_cb(void *opaque, int ret) uint64_t slba; uint32_t nlb; - if (ret < 0) { + if (iocb->ret < 0) { + goto done; + } else if (ret < 0) { iocb->ret = ret; goto done; } @@ -2331,7 +2464,8 @@ next: done: iocb->aiocb = NULL; - qemu_bh_schedule(iocb->bh); + iocb->common.cb(iocb->common.opaque, iocb->ret); + qemu_aio_unref(iocb); } static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) @@ -2349,7 +2483,6 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) nvme_misc_cb, req); iocb->req = req; - iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb); iocb->ret = 0; iocb->range = g_new(NvmeDsmRange, nr); iocb->nr = nr; @@ -2358,6 +2491,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, req); if (status) { + g_free(iocb->range); + qemu_aio_unref(iocb); + return status; } @@ -2433,10 +2569,10 @@ typedef struct NvmeCopyAIOCB { BlockAIOCB common; BlockAIOCB *aiocb; NvmeRequest *req; - QEMUBH *bh; int ret; - NvmeCopySourceRange *ranges; + void *ranges; + unsigned int format; int nr; int idx; @@ -2447,7 +2583,7 @@ typedef struct NvmeCopyAIOCB { BlockAcctCookie write; } acct; - uint32_t reftag; + uint64_t reftag; uint64_t slba; NvmeZone *zone; @@ -2470,9 +2606,8 @@ static const AIOCBInfo nvme_copy_aiocb_info = { .cancel_async = nvme_copy_cancel, }; -static void nvme_copy_bh(void *opaque) +static void nvme_copy_done(NvmeCopyAIOCB *iocb) { - NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); @@ -2484,9 +2619,6 @@ static void nvme_copy_bh(void *opaque) qemu_iovec_destroy(&iocb->iov); g_free(iocb->bounce); - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; - if (iocb->ret < 0) { block_acct_failed(stats, &iocb->acct.read); block_acct_failed(stats, &iocb->acct.write); @@ -2499,15 +2631,103 @@ static void nvme_copy_bh(void *opaque) qemu_aio_unref(iocb); } -static void nvme_copy_cb(void *opaque, int ret); +static void nvme_do_copy(NvmeCopyAIOCB *iocb); + +static void nvme_copy_source_range_parse_format0(void *ranges, int idx, + uint64_t *slba, uint32_t *nlb, + uint16_t *apptag, + uint16_t *appmask, + uint64_t *reftag) +{ + NvmeCopySourceRangeFormat0 *_ranges = ranges; + + if (slba) { + *slba = le64_to_cpu(_ranges[idx].slba); + } + + if (nlb) { + *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; + } + + if (apptag) { + *apptag = le16_to_cpu(_ranges[idx].apptag); + } + + if (appmask) { + *appmask = le16_to_cpu(_ranges[idx].appmask); + } + + if (reftag) { + *reftag = le32_to_cpu(_ranges[idx].reftag); + } +} + +static void nvme_copy_source_range_parse_format1(void *ranges, int idx, + uint64_t *slba, uint32_t *nlb, + uint16_t *apptag, + uint16_t *appmask, + uint64_t *reftag) +{ + NvmeCopySourceRangeFormat1 *_ranges = ranges; + + if (slba) { + *slba = le64_to_cpu(_ranges[idx].slba); + } + + if (nlb) { + *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; + } + + if (apptag) { + *apptag = le16_to_cpu(_ranges[idx].apptag); + } + + if (appmask) { + *appmask = le16_to_cpu(_ranges[idx].appmask); + } + + if (reftag) { + *reftag = 0; + + *reftag |= (uint64_t)_ranges[idx].sr[4] << 40; + *reftag |= (uint64_t)_ranges[idx].sr[5] << 32; + *reftag |= (uint64_t)_ranges[idx].sr[6] << 24; + *reftag |= (uint64_t)_ranges[idx].sr[7] << 16; + *reftag |= (uint64_t)_ranges[idx].sr[8] << 8; + *reftag |= (uint64_t)_ranges[idx].sr[9]; + } +} + +static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format, + uint64_t *slba, uint32_t *nlb, + uint16_t *apptag, uint16_t *appmask, + uint64_t *reftag) +{ + switch (format) { + case NVME_COPY_FORMAT_0: + nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag, + appmask, reftag); + break; + + case NVME_COPY_FORMAT_1: + nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag, + appmask, reftag); + break; + + default: + abort(); + } +} static void nvme_copy_out_completed_cb(void *opaque, int ret) { NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopySourceRange *range = &iocb->ranges[iocb->idx]; - uint32_t nlb = le32_to_cpu(range->nlb) + 1; + uint32_t nlb; + + nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, + &nlb, NULL, NULL, NULL); if (ret < 0) { iocb->ret = ret; @@ -2523,7 +2743,7 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret) iocb->idx++; iocb->slba += nlb; out: - nvme_copy_cb(iocb, iocb->ret); + nvme_do_copy(iocb); } static void nvme_copy_out_cb(void *opaque, int ret) @@ -2531,25 +2751,16 @@ static void nvme_copy_out_cb(void *opaque, int ret) NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopySourceRange *range; uint32_t nlb; size_t mlen; uint8_t *mbounce; - if (ret < 0) { - iocb->ret = ret; - goto out; - } else if (iocb->ret < 0) { + if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { goto out; } - if (!ns->lbaf.ms) { - nvme_copy_out_completed_cb(iocb, 0); - return; - } - - range = &iocb->ranges[iocb->idx]; - nlb = le32_to_cpu(range->nlb) + 1; + nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, + &nlb, NULL, NULL, NULL); mlen = nvme_m2b(ns, nlb); mbounce = iocb->bounce + nvme_l2b(ns, nlb); @@ -2564,7 +2775,7 @@ static void nvme_copy_out_cb(void *opaque, int ret) return; out: - nvme_copy_cb(iocb, ret); + nvme_copy_out_completed_cb(iocb, ret); } static void nvme_copy_in_completed_cb(void *opaque, int ret) @@ -2572,8 +2783,10 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopySourceRange *range; uint32_t nlb; + uint64_t slba; + uint16_t apptag, appmask; + uint64_t reftag; size_t len; uint16_t status; @@ -2584,8 +2797,8 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) goto out; } - range = &iocb->ranges[iocb->idx]; - nlb = le32_to_cpu(range->nlb) + 1; + nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, + &nlb, &apptag, &appmask, &reftag); len = nvme_l2b(ns, nlb); trace_pci_nvme_copy_out(iocb->slba, nlb); @@ -2596,14 +2809,13 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); - uint16_t apptag = le16_to_cpu(range->apptag); - uint16_t appmask = le16_to_cpu(range->appmask); - uint32_t reftag = le32_to_cpu(range->reftag); - - uint64_t slba = le64_to_cpu(range->slba); size_t mlen = nvme_m2b(ns, nlb); uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb); + status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba); + if (status) { + goto invalid; + } status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor, slba, apptag, appmask, &reftag); if (status) { @@ -2642,7 +2854,9 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) goto invalid; } - iocb->zone->w_ptr += nlb; + if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { + iocb->zone->w_ptr += nlb; + } } qemu_iovec_reset(&iocb->iov); @@ -2655,15 +2869,9 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) invalid: req->status = status; - iocb->aiocb = NULL; - if (iocb->bh) { - qemu_bh_schedule(iocb->bh); - } - - return; - + iocb->ret = -1; out: - nvme_copy_cb(iocb, ret); + nvme_do_copy(iocb); } static void nvme_copy_in_cb(void *opaque, int ret) @@ -2671,25 +2879,15 @@ static void nvme_copy_in_cb(void *opaque, int ret) NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopySourceRange *range; uint64_t slba; uint32_t nlb; - if (ret < 0) { - iocb->ret = ret; - goto out; - } else if (iocb->ret < 0) { + if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { goto out; } - if (!ns->lbaf.ms) { - nvme_copy_in_completed_cb(iocb, 0); - return; - } - - range = &iocb->ranges[iocb->idx]; - slba = le64_to_cpu(range->slba); - nlb = le32_to_cpu(range->nlb) + 1; + nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, + &nlb, NULL, NULL, NULL); qemu_iovec_reset(&iocb->iov); qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb), @@ -2701,24 +2899,19 @@ static void nvme_copy_in_cb(void *opaque, int ret) return; out: - nvme_copy_cb(iocb, iocb->ret); + nvme_copy_in_completed_cb(iocb, ret); } -static void nvme_copy_cb(void *opaque, int ret) +static void nvme_do_copy(NvmeCopyAIOCB *iocb) { - NvmeCopyAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopySourceRange *range; uint64_t slba; uint32_t nlb; size_t len; uint16_t status; - if (ret < 0) { - iocb->ret = ret; - goto done; - } else if (iocb->ret < 0) { + if (iocb->ret < 0) { goto done; } @@ -2726,9 +2919,8 @@ static void nvme_copy_cb(void *opaque, int ret) goto done; } - range = &iocb->ranges[iocb->idx]; - slba = le64_to_cpu(range->slba); - nlb = le32_to_cpu(range->nlb) + 1; + nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, + &nlb, NULL, NULL, NULL); len = nvme_l2b(ns, nlb); trace_pci_nvme_copy_source_range(slba, nlb); @@ -2766,14 +2958,11 @@ static void nvme_copy_cb(void *opaque, int ret) invalid: req->status = status; + iocb->ret = -1; done: - iocb->aiocb = NULL; - if (iocb->bh) { - qemu_bh_schedule(iocb->bh); - } + nvme_copy_done(iocb); } - static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns = req->ns; @@ -2784,6 +2973,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) uint8_t format = copy->control[0] & 0xf; uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); + size_t len = sizeof(NvmeCopySourceRangeFormat0); uint16_t status; @@ -2809,10 +2999,19 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) goto invalid; } - iocb->ranges = g_new(NvmeCopySourceRange, nr); + if ((ns->pif == 0x0 && format != 0x0) || + (ns->pif != 0x0 && format != 0x1)) { + status = NVME_INVALID_FORMAT | NVME_DNR; + goto invalid; + } - status = nvme_h2c(n, (uint8_t *)iocb->ranges, - sizeof(NvmeCopySourceRange) * nr, req); + if (ns->pif) { + len = sizeof(NvmeCopySourceRangeFormat1); + } + + iocb->format = format; + iocb->ranges = g_malloc_n(nr, len); + status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); if (status) { goto invalid; } @@ -2833,11 +3032,11 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) } iocb->req = req; - iocb->bh = qemu_bh_new(nvme_copy_bh, iocb); iocb->ret = 0; iocb->nr = nr; iocb->idx = 0; iocb->reftag = le32_to_cpu(copy->reftag); + iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl), ns->lbasz + ns->lbaf.ms); @@ -2849,7 +3048,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) BLOCK_ACCT_WRITE); req->aiocb = &iocb->common; - nvme_copy_cb(iocb, 0); + nvme_do_copy(iocb); return NVME_NO_COMPLETE; @@ -2925,7 +3124,6 @@ typedef struct NvmeFlushAIOCB { BlockAIOCB common; BlockAIOCB *aiocb; NvmeRequest *req; - QEMUBH *bh; int ret; NvmeNamespace *ns; @@ -2941,6 +3139,7 @@ static void nvme_flush_cancel(BlockAIOCB *acb) if (iocb->aiocb) { blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; } } @@ -2950,6 +3149,8 @@ static const AIOCBInfo nvme_flush_aiocb_info = { .get_aio_context = nvme_get_aio_context, }; +static void nvme_do_flush(NvmeFlushAIOCB *iocb); + static void nvme_flush_ns_cb(void *opaque, int ret) { NvmeFlushAIOCB *iocb = opaque; @@ -2971,13 +3172,11 @@ static void nvme_flush_ns_cb(void *opaque, int ret) } out: - iocb->aiocb = NULL; - qemu_bh_schedule(iocb->bh); + nvme_do_flush(iocb); } -static void nvme_flush_bh(void *opaque) +static void nvme_do_flush(NvmeFlushAIOCB *iocb) { - NvmeFlushAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeCtrl *n = nvme_ctrl(req); int i; @@ -3004,14 +3203,8 @@ static void nvme_flush_bh(void *opaque) return; done: - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; - iocb->common.cb(iocb->common.opaque, iocb->ret); - qemu_aio_unref(iocb); - - return; } static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) @@ -3023,7 +3216,6 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req); iocb->req = req; - iocb->bh = qemu_bh_new(nvme_flush_bh, iocb); iocb->ret = 0; iocb->ns = NULL; iocb->nsid = 0; @@ -3045,13 +3237,11 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) } req->aiocb = &iocb->common; - qemu_bh_schedule(iocb->bh); + nvme_do_flush(iocb); return NVME_NO_COMPLETE; out: - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; qemu_aio_unref(iocb); return status; @@ -3076,7 +3266,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { bool pract = prinfo & NVME_PRINFO_PRACT; - if (pract && ns->lbaf.ms == 8) { + if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { mapped_size = data_size; } } @@ -3153,7 +3343,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { bool pract = prinfo & NVME_PRINFO_PRACT; - if (pract && ns->lbaf.ms == 8) { + if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { mapped_size -= nvme_m2b(ns, nlb); } } @@ -3181,6 +3371,10 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, if (append) { bool piremap = !!(ctrl & NVME_RW_PIREMAP); + if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) { + return NVME_INVALID_ZONE_OP | NVME_DNR; + } + if (unlikely(slba != zone->d.zslba)) { trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); status = NVME_INVALID_FIELD; @@ -3232,7 +3426,9 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, goto invalid; } - zone->w_ptr += nlb; + if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { + zone->w_ptr += nlb; + } } data_offset = nvme_l2b(ns, slba); @@ -3316,7 +3512,24 @@ enum NvmeZoneProcessingMask { static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone, NvmeZoneState state, NvmeRequest *req) { - return nvme_zrm_open(nvme_ctrl(req), ns, zone); + NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; + int flags = 0; + + if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) { + uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); + + if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { + return NVME_INVALID_ZONE_OP | NVME_DNR; + } + + if (zone->w_ptr % ns->zns.zrwafg) { + return NVME_NOZRWA | NVME_DNR; + } + + flags = NVME_ZRM_ZRWA; + } + + return nvme_zrm_open_flags(nvme_ctrl(req), ns, zone, flags); } static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone, @@ -3463,7 +3676,6 @@ typedef struct NvmeZoneResetAIOCB { BlockAIOCB common; BlockAIOCB *aiocb; NvmeRequest *req; - QEMUBH *bh; int ret; bool all; @@ -3492,17 +3704,6 @@ static const AIOCBInfo nvme_zone_reset_aiocb_info = { .cancel_async = nvme_zone_reset_cancel, }; -static void nvme_zone_reset_bh(void *opaque) -{ - NvmeZoneResetAIOCB *iocb = opaque; - - iocb->common.cb(iocb->common.opaque, iocb->ret); - - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; - qemu_aio_unref(iocb); -} - static void nvme_zone_reset_cb(void *opaque, int ret); static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) @@ -3513,14 +3714,8 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) int64_t moff; int count; - if (ret < 0) { - nvme_zone_reset_cb(iocb, ret); - return; - } - - if (!ns->lbaf.ms) { - nvme_zone_reset_cb(iocb, 0); - return; + if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { + goto out; } moff = nvme_moff(ns, iocb->zone->d.zslba); @@ -3530,6 +3725,9 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) BDRV_REQ_MAY_UNMAP, nvme_zone_reset_cb, iocb); return; + +out: + nvme_zone_reset_cb(iocb, ret); } static void nvme_zone_reset_cb(void *opaque, int ret) @@ -3538,7 +3736,9 @@ static void nvme_zone_reset_cb(void *opaque, int ret) NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - if (ret < 0) { + if (iocb->ret < 0) { + goto done; + } else if (ret < 0) { iocb->ret = ret; goto done; } @@ -3586,40 +3786,76 @@ static void nvme_zone_reset_cb(void *opaque, int ret) done: iocb->aiocb = NULL; - if (iocb->bh) { - qemu_bh_schedule(iocb->bh); + + iocb->common.cb(iocb->common.opaque, iocb->ret); + qemu_aio_unref(iocb); +} + +static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone, + uint64_t elba, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); + uint64_t wp = zone->d.wp; + uint32_t nlb = elba - wp + 1; + uint16_t status; + + + if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { + return NVME_INVALID_ZONE_OP | NVME_DNR; } + + if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (elba < wp || elba > wp + ns->zns.zrwas) { + return NVME_ZONE_BOUNDARY_ERROR | NVME_DNR; + } + + if (nlb % ns->zns.zrwafg) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + status = nvme_zrm_auto(n, ns, zone); + if (status) { + return status; + } + + zone->w_ptr += nlb; + + nvme_advance_zone_wp(ns, zone, nlb); + + return NVME_SUCCESS; } static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) { - NvmeCmd *cmd = (NvmeCmd *)&req->cmd; + NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; NvmeNamespace *ns = req->ns; NvmeZone *zone; NvmeZoneResetAIOCB *iocb; uint8_t *zd_ext; - uint32_t dw13 = le32_to_cpu(cmd->cdw13); uint64_t slba = 0; uint32_t zone_idx = 0; uint16_t status; - uint8_t action; + uint8_t action = cmd->zsa; bool all; enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE; - action = dw13 & 0xff; - all = !!(dw13 & 0x100); + all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL; req->status = NVME_SUCCESS; if (!all) { - status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); + status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx); if (status) { return status; } } zone = &ns->zone_array[zone_idx]; - if (slba != zone->d.zslba) { + if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) { trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); return NVME_INVALID_FIELD | NVME_DNR; } @@ -3657,7 +3893,6 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) nvme_misc_cb, req); iocb->req = req; - iocb->bh = qemu_bh_new(nvme_zone_reset_bh, iocb); iocb->ret = 0; iocb->all = all; iocb->idx = zone_idx; @@ -3695,6 +3930,13 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) } break; + case NVME_ZONE_ACTION_ZRWA_FLUSH: + if (all) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + return nvme_zone_mgmt_send_zrwa_flush(n, zone, slba, req); + default: trace_pci_nvme_err_invalid_mgmt_action(action); status = NVME_INVALID_FIELD; @@ -3893,6 +4135,10 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) return ns->status; } + if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { + return NVME_INVALID_FIELD; + } + req->ns = ns; switch (req->cmd.opcode) { @@ -3923,10 +4169,87 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_OPCODE | NVME_DNR; } +static void nvme_cq_notifier(EventNotifier *e) +{ + NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier); + NvmeCtrl *n = cq->ctrl; + + if (!event_notifier_test_and_clear(e)) { + return; + } + + nvme_update_cq_head(cq); + + if (cq->tail == cq->head) { + if (cq->irq_enabled) { + n->cq_pending--; + } + + nvme_irq_deassert(n, cq); + } + + qemu_bh_schedule(cq->bh); +} + +static int nvme_init_cq_ioeventfd(NvmeCQueue *cq) +{ + NvmeCtrl *n = cq->ctrl; + uint16_t offset = (cq->cqid << 3) + (1 << 2); + int ret; + + ret = event_notifier_init(&cq->notifier, 0); + if (ret < 0) { + return ret; + } + + event_notifier_set_handler(&cq->notifier, nvme_cq_notifier); + memory_region_add_eventfd(&n->iomem, + 0x1000 + offset, 4, false, 0, &cq->notifier); + + return 0; +} + +static void nvme_sq_notifier(EventNotifier *e) +{ + NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier); + + if (!event_notifier_test_and_clear(e)) { + return; + } + + nvme_process_sq(sq); +} + +static int nvme_init_sq_ioeventfd(NvmeSQueue *sq) +{ + NvmeCtrl *n = sq->ctrl; + uint16_t offset = sq->sqid << 3; + int ret; + + ret = event_notifier_init(&sq->notifier, 0); + if (ret < 0) { + return ret; + } + + event_notifier_set_handler(&sq->notifier, nvme_sq_notifier); + memory_region_add_eventfd(&n->iomem, + 0x1000 + offset, 4, false, 0, &sq->notifier); + + return 0; +} + static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) { + uint16_t offset = sq->sqid << 3; + n->sq[sq->sqid] = NULL; - timer_free(sq->timer); + qemu_bh_delete(sq->bh); + if (sq->ioeventfd_enabled) { + memory_region_del_eventfd(&n->iomem, + 0x1000 + offset, 4, false, 0, &sq->notifier); + event_notifier_set_handler(&sq->notifier, NULL); + event_notifier_cleanup(&sq->notifier); + } g_free(sq->io_req); if (sq->sqid) { g_free(sq); @@ -3994,7 +4317,19 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, sq->io_req[i].sq = sq; QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); } - sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); + + sq->bh = qemu_bh_new(nvme_process_sq, sq); + + if (n->dbbuf_enabled) { + sq->db_addr = n->dbbuf_dbs + (sqid << 3); + sq->ei_addr = n->dbbuf_eis + (sqid << 3); + + if (n->params.ioeventfd && sq->sqid != 0) { + if (!nvme_init_sq_ioeventfd(sq)) { + sq->ioeventfd_enabled = true; + } + } + } assert(n->cq[cqid]); cq = n->cq[cqid]; @@ -4019,8 +4354,7 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) trace_pci_nvme_err_invalid_create_sq_cqid(cqid); return NVME_INVALID_CQID | NVME_DNR; } - if (unlikely(!sqid || sqid > n->params.max_ioqpairs || - n->sq[sqid] != NULL)) { + if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) { trace_pci_nvme_err_invalid_create_sq_sqid(sqid); return NVME_INVALID_QID | NVME_DNR; } @@ -4164,6 +4498,11 @@ static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, int i = 0; uint32_t nsid; + if (off >= sizeof(nslist)) { + trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(nslist)); + return NVME_INVALID_FIELD | NVME_DNR; + } + memset(nslist, 0x0, sizeof(nslist)); trans_len = MIN(sizeof(nslist) - off, buf_len); @@ -4293,8 +4632,16 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) { + uint16_t offset = (cq->cqid << 3) + (1 << 2); + n->cq[cq->cqid] = NULL; - timer_free(cq->timer); + qemu_bh_delete(cq->bh); + if (cq->ioeventfd_enabled) { + memory_region_del_eventfd(&n->iomem, + 0x1000 + offset, 4, false, 0, &cq->notifier); + event_notifier_set_handler(&cq->notifier, NULL); + event_notifier_cleanup(&cq->notifier); + } if (msix_enabled(&n->parent_obj)) { msix_vector_unuse(&n->parent_obj, cq->vector); } @@ -4334,11 +4681,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled) { - int ret; - if (msix_enabled(&n->parent_obj)) { - ret = msix_vector_use(&n->parent_obj, vector); - assert(ret == 0); + msix_vector_use(&n->parent_obj, vector); } cq->ctrl = n; cq->cqid = cqid; @@ -4350,8 +4694,18 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, cq->head = cq->tail = 0; QTAILQ_INIT(&cq->req_list); QTAILQ_INIT(&cq->sq_list); + if (n->dbbuf_enabled) { + cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2); + cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2); + + if (n->params.ioeventfd && cqid != 0) { + if (!nvme_init_cq_ioeventfd(cq)) { + cq->ioeventfd_enabled = true; + } + } + } n->cq[cqid] = cq; - cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); + cq->bh = qemu_bh_new(nvme_post_cqes, cq); } static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) @@ -4367,8 +4721,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, NVME_CQ_FLAGS_IEN(qflags) != 0); - if (unlikely(!cqid || cqid > n->params.max_ioqpairs || - n->cq[cqid] != NULL)) { + if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) { trace_pci_nvme_err_invalid_create_cq_cqid(cqid); return NVME_INVALID_QID | NVME_DNR; } @@ -4384,7 +4737,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) trace_pci_nvme_err_invalid_create_cq_vector(vector); return NVME_INVALID_IRQ_VECTOR | NVME_DNR; } - if (unlikely(vector >= n->params.msix_qsize)) { + if (unlikely(vector >= n->conf_msix_qsize)) { trace_pci_nvme_err_invalid_create_cq_vector(vector); return NVME_INVALID_IRQ_VECTOR | NVME_DNR; } @@ -4523,6 +4876,37 @@ static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); } +static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl *n, NvmeRequest *req) +{ + trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid)); + + return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap, + sizeof(NvmePriCtrlCap), req); +} + +static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid); + uint16_t min_id = le16_to_cpu(c->ctrlid); + uint8_t num_sec_ctrl = n->sec_ctrl_list.numcntl; + NvmeSecCtrlList list = {0}; + uint8_t i; + + for (i = 0; i < num_sec_ctrl; i++) { + if (n->sec_ctrl_list.sec[i].scid >= min_id) { + list.numcntl = num_sec_ctrl - i; + memcpy(&list.sec, n->sec_ctrl_list.sec + i, + list.numcntl * sizeof(NvmeSecCtrlEntry)); + break; + } + } + + trace_pci_nvme_identify_sec_ctrl_list(pri_ctrl_id, list.numcntl); + + return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); +} + static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, bool active) { @@ -4549,7 +4933,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, } if (c->csi == NVME_CSI_NVM) { - return nvme_rpt_empty_id_struct(n, req); + return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm), + req); } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), req); @@ -4684,16 +5069,13 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_FIELD | NVME_DNR; } - /* - * If the EUI-64 field is 0 and the NGUID field is 0, the namespace must - * provide a valid Namespace UUID in the Namespace Identification Descriptor - * data structure. QEMU does not yet support setting NGUID. - */ - uuid.hdr.nidt = NVME_NIDT_UUID; - uuid.hdr.nidl = NVME_NIDL_UUID; - memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); - memcpy(pos, &uuid, sizeof(uuid)); - pos += sizeof(uuid); + if (!qemu_uuid_is_null(&ns->params.uuid)) { + uuid.hdr.nidt = NVME_NIDT_UUID; + uuid.hdr.nidl = NVME_NIDL_UUID; + memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); + memcpy(pos, &uuid, sizeof(uuid)); + pos += sizeof(uuid); + } if (ns->params.eui64) { eui64.hdr.nidt = NVME_NIDT_EUI64; @@ -4741,6 +5123,10 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) return nvme_identify_ctrl_list(n, req, true); case NVME_ID_CNS_CTRL_LIST: return nvme_identify_ctrl_list(n, req, false); + case NVME_ID_CNS_PRIMARY_CTRL_CAP: + return nvme_identify_pri_ctrl_cap(n, req); + case NVME_ID_CNS_SECONDARY_CTRL_LIST: + return nvme_identify_sec_ctrl_list(n, req); case NVME_ID_CNS_CS_NS: return nvme_identify_ns_csi(n, req, true); case NVME_ID_CNS_CS_NS_PRESENT: @@ -4927,6 +5313,9 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) goto out; case NVME_TIMESTAMP: return nvme_get_feature_timestamp(n, req); + case NVME_HOST_BEHAVIOR_SUPPORT: + return nvme_c2h(n, (uint8_t *)&n->features.hbs, + sizeof(n->features.hbs), req); default: break; } @@ -4946,13 +5335,12 @@ defaults: break; case NVME_NUMBER_OF_QUEUES: - result = (n->params.max_ioqpairs - 1) | - ((n->params.max_ioqpairs - 1) << 16); + result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16); trace_pci_nvme_getfeat_numq(result); break; case NVME_INTERRUPT_VECTOR_CONF: iv = dw11 & 0xffff; - if (iv >= n->params.max_ioqpairs + 1) { + if (iv >= n->conf_ioqpairs + 1) { return NVME_INVALID_FIELD | NVME_DNR; } @@ -4996,6 +5384,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) uint32_t nsid = le32_to_cpu(cmd->nsid); uint8_t fid = NVME_GETSETFEAT_FID(dw10); uint8_t save = NVME_SETFEAT_SAVE(dw10); + uint16_t status; int i; trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); @@ -5050,7 +5439,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) if ((n->temperature >= n->features.temp_thresh_hi) || (n->temperature <= n->features.temp_thresh_low)) { - nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH); + nvme_smart_event(n, NVME_SMART_TEMPERATURE); } break; @@ -5107,16 +5496,37 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1, ((dw11 >> 16) & 0xffff) + 1, - n->params.max_ioqpairs, - n->params.max_ioqpairs); - req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) | - ((n->params.max_ioqpairs - 1) << 16)); + n->conf_ioqpairs, + n->conf_ioqpairs); + req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) | + ((n->conf_ioqpairs - 1) << 16)); break; case NVME_ASYNCHRONOUS_EVENT_CONF: n->features.async_config = dw11; break; case NVME_TIMESTAMP: return nvme_set_feature_timestamp(n, req); + case NVME_HOST_BEHAVIOR_SUPPORT: + status = nvme_h2c(n, (uint8_t *)&n->features.hbs, + sizeof(n->features.hbs), req); + if (status) { + return status; + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + + if (!ns) { + continue; + } + + ns->id_ns.nlbaf = ns->nlbaf - 1; + if (!n->features.hbs.lbafee) { + ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15); + } + } + + return status; case NVME_COMMAND_SET_PROFILE: if (dw11 & 0x1ff) { trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); @@ -5191,7 +5601,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; uint32_t nsid = le32_to_cpu(req->cmd.nsid); uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); - bool attach = !(dw10 & 0xf); + uint8_t sel = dw10 & 0xf; uint16_t *nr_ids = &list[0]; uint16_t *ids = &list[1]; uint16_t ret; @@ -5224,7 +5634,8 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; } - if (attach) { + switch (sel) { + case NVME_NS_ATTACHMENT_ATTACH: if (nvme_ns(ctrl, nsid)) { return NVME_NS_ALREADY_ATTACHED | NVME_DNR; } @@ -5235,7 +5646,10 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) nvme_attach_ns(ctrl, ns); nvme_select_iocs_ns(ctrl, ns); - } else { + + break; + + case NVME_NS_ATTACHMENT_DETACH: if (!nvme_ns(ctrl, nsid)) { return NVME_NS_NOT_ATTACHED | NVME_DNR; } @@ -5244,6 +5658,11 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) ns->attached--; nvme_update_dmrsl(ctrl); + + break; + + default: + return NVME_INVALID_FIELD | NVME_DNR; } /* @@ -5263,7 +5682,6 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) typedef struct NvmeFormatAIOCB { BlockAIOCB common; BlockAIOCB *aiocb; - QEMUBH *bh; NvmeRequest *req; int ret; @@ -5271,16 +5689,22 @@ typedef struct NvmeFormatAIOCB { uint32_t nsid; bool broadcast; int64_t offset; -} NvmeFormatAIOCB; -static void nvme_format_bh(void *opaque); + uint8_t lbaf; + uint8_t mset; + uint8_t pi; + uint8_t pil; +} NvmeFormatAIOCB; static void nvme_format_cancel(BlockAIOCB *aiocb) { NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common); + iocb->ret = -ECANCELED; + if (iocb->aiocb) { blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; } } @@ -5290,30 +5714,31 @@ static const AIOCBInfo nvme_format_aiocb_info = { .get_aio_context = nvme_get_aio_context, }; -static void nvme_format_set(NvmeNamespace *ns, NvmeCmd *cmd) +static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset, + uint8_t pi, uint8_t pil) { - uint32_t dw10 = le32_to_cpu(cmd->cdw10); - uint8_t lbaf = dw10 & 0xf; - uint8_t pi = (dw10 >> 5) & 0x7; - uint8_t mset = (dw10 >> 4) & 0x1; - uint8_t pil = (dw10 >> 8) & 0x1; + uint8_t lbafl = lbaf & 0xf; + uint8_t lbafu = lbaf >> 4; trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); ns->id_ns.dps = (pil << 3) | pi; - ns->id_ns.flbas = lbaf | (mset << 4); + ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl; nvme_ns_init_format(ns); } +static void nvme_do_format(NvmeFormatAIOCB *iocb); + static void nvme_format_ns_cb(void *opaque, int ret) { NvmeFormatAIOCB *iocb = opaque; - NvmeRequest *req = iocb->req; NvmeNamespace *ns = iocb->ns; int bytes; - if (ret < 0) { + if (iocb->ret < 0) { + goto done; + } else if (ret < 0) { iocb->ret = ret; goto done; } @@ -5331,14 +5756,13 @@ static void nvme_format_ns_cb(void *opaque, int ret) return; } - nvme_format_set(ns, &req->cmd); + nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); ns->status = 0x0; iocb->ns = NULL; iocb->offset = 0; done: - iocb->aiocb = NULL; - qemu_bh_schedule(iocb->bh); + nvme_do_format(iocb); } static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) @@ -5351,7 +5775,7 @@ static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) return NVME_INVALID_FORMAT | NVME_DNR; } - if (pi && (ns->id_ns.lbaf[lbaf].ms < sizeof(NvmeDifTuple))) { + if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) { return NVME_INVALID_FORMAT | NVME_DNR; } @@ -5362,9 +5786,8 @@ static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) return NVME_SUCCESS; } -static void nvme_format_bh(void *opaque) +static void nvme_do_format(NvmeFormatAIOCB *iocb) { - NvmeFormatAIOCB *iocb = opaque; NvmeRequest *req = iocb->req; NvmeCtrl *n = nvme_ctrl(req); uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); @@ -5402,11 +5825,7 @@ static void nvme_format_bh(void *opaque) return; done: - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; - iocb->common.cb(iocb->common.opaque, iocb->ret); - qemu_aio_unref(iocb); } @@ -5414,18 +5833,31 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) { NvmeFormatAIOCB *iocb; uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint8_t lbaf = dw10 & 0xf; + uint8_t mset = (dw10 >> 4) & 0x1; + uint8_t pi = (dw10 >> 5) & 0x7; + uint8_t pil = (dw10 >> 8) & 0x1; + uint8_t lbafu = (dw10 >> 12) & 0x3; uint16_t status; iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req); iocb->req = req; - iocb->bh = qemu_bh_new(nvme_format_bh, iocb); iocb->ret = 0; iocb->ns = NULL; iocb->nsid = 0; + iocb->lbaf = lbaf; + iocb->mset = mset; + iocb->pi = pi; + iocb->pil = pil; iocb->broadcast = (nsid == NVME_NSID_BROADCAST); iocb->offset = 0; + if (n->features.hbs.lbafee) { + iocb->lbaf |= lbafu << 4; + } + if (!iocb->broadcast) { if (!nvme_nsid_valid(n, nsid)) { status = NVME_INVALID_NSID | NVME_DNR; @@ -5440,17 +5872,235 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) } req->aiocb = &iocb->common; - qemu_bh_schedule(iocb->bh); + nvme_do_format(iocb); return NVME_NO_COMPLETE; out: - qemu_bh_delete(iocb->bh); - iocb->bh = NULL; qemu_aio_unref(iocb); + return status; } +static void nvme_get_virt_res_num(NvmeCtrl *n, uint8_t rt, int *num_total, + int *num_prim, int *num_sec) +{ + *num_total = le32_to_cpu(rt ? + n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt); + *num_prim = le16_to_cpu(rt ? + n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap); + *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa); +} + +static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl *n, NvmeRequest *req, + uint16_t cntlid, uint8_t rt, + int nr) +{ + int num_total, num_prim, num_sec; + + if (cntlid != n->cntlid) { + return NVME_INVALID_CTRL_ID | NVME_DNR; + } + + nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); + + if (nr > num_total) { + return NVME_INVALID_NUM_RESOURCES | NVME_DNR; + } + + if (nr > num_total - num_sec) { + return NVME_INVALID_RESOURCE_ID | NVME_DNR; + } + + if (rt) { + n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr); + } else { + n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr); + } + + req->cqe.result = cpu_to_le32(nr); + return req->status; +} + +static void nvme_update_virt_res(NvmeCtrl *n, NvmeSecCtrlEntry *sctrl, + uint8_t rt, int nr) +{ + int prev_nr, prev_total; + + if (rt) { + prev_nr = le16_to_cpu(sctrl->nvi); + prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa); + sctrl->nvi = cpu_to_le16(nr); + n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr); + } else { + prev_nr = le16_to_cpu(sctrl->nvq); + prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa); + sctrl->nvq = cpu_to_le16(nr); + n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr); + } +} + +static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req, + uint16_t cntlid, uint8_t rt, int nr) +{ + int num_total, num_prim, num_sec, num_free, diff, limit; + NvmeSecCtrlEntry *sctrl; + + sctrl = nvme_sctrl_for_cntlid(n, cntlid); + if (!sctrl) { + return NVME_INVALID_CTRL_ID | NVME_DNR; + } + + if (sctrl->scs) { + return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; + } + + limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm); + if (nr > limit) { + return NVME_INVALID_NUM_RESOURCES | NVME_DNR; + } + + nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); + num_free = num_total - num_prim - num_sec; + diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq); + + if (diff > num_free) { + return NVME_INVALID_RESOURCE_ID | NVME_DNR; + } + + nvme_update_virt_res(n, sctrl, rt, nr); + req->cqe.result = cpu_to_le32(nr); + + return req->status; +} + +static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online) +{ + NvmeCtrl *sn = NULL; + NvmeSecCtrlEntry *sctrl; + int vf_index; + + sctrl = nvme_sctrl_for_cntlid(n, cntlid); + if (!sctrl) { + return NVME_INVALID_CTRL_ID | NVME_DNR; + } + + if (!pci_is_vf(&n->parent_obj)) { + vf_index = le16_to_cpu(sctrl->vfn) - 1; + sn = NVME(pcie_sriov_get_vf_at_index(&n->parent_obj, vf_index)); + } + + if (online) { + if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) { + return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; + } + + if (!sctrl->scs) { + sctrl->scs = 0x1; + nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); + } + } else { + nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_INTERRUPT, 0); + nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_QUEUE, 0); + + if (sctrl->scs) { + sctrl->scs = 0x0; + if (sn) { + nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); + } + } + } + + return NVME_SUCCESS; +} + +static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req) +{ + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); + uint8_t act = dw10 & 0xf; + uint8_t rt = (dw10 >> 8) & 0x7; + uint16_t cntlid = (dw10 >> 16) & 0xffff; + int nr = dw11 & 0xffff; + + trace_pci_nvme_virt_mngmt(nvme_cid(req), act, cntlid, rt ? "VI" : "VQ", nr); + + if (rt != NVME_VIRT_RES_QUEUE && rt != NVME_VIRT_RES_INTERRUPT) { + return NVME_INVALID_RESOURCE_ID | NVME_DNR; + } + + switch (act) { + case NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN: + return nvme_assign_virt_res_to_sec(n, req, cntlid, rt, nr); + case NVME_VIRT_MNGMT_ACTION_PRM_ALLOC: + return nvme_assign_virt_res_to_prim(n, req, cntlid, rt, nr); + case NVME_VIRT_MNGMT_ACTION_SEC_ONLINE: + return nvme_virt_set_state(n, cntlid, true); + case NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE: + return nvme_virt_set_state(n, cntlid, false); + default: + return NVME_INVALID_FIELD | NVME_DNR; + } +} + +static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) +{ + uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); + uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); + int i; + + /* Address should be page aligned */ + if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + /* Save shadow buffer base addr for use during queue creation */ + n->dbbuf_dbs = dbs_addr; + n->dbbuf_eis = eis_addr; + n->dbbuf_enabled = true; + + for (i = 0; i < n->params.max_ioqpairs + 1; i++) { + NvmeSQueue *sq = n->sq[i]; + NvmeCQueue *cq = n->cq[i]; + + if (sq) { + /* + * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3) + * nvme_process_db() uses this hard-coded way to calculate + * doorbell offsets. Be consistent with that here. + */ + sq->db_addr = dbs_addr + (i << 3); + sq->ei_addr = eis_addr + (i << 3); + pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, + sizeof(sq->tail)); + + if (n->params.ioeventfd && sq->sqid != 0) { + if (!nvme_init_sq_ioeventfd(sq)) { + sq->ioeventfd_enabled = true; + } + } + } + + if (cq) { + /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */ + cq->db_addr = dbs_addr + (i << 3) + (1 << 2); + cq->ei_addr = eis_addr + (i << 3) + (1 << 2); + pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, + sizeof(cq->head)); + + if (n->params.ioeventfd && cq->cqid != 0) { + if (!nvme_init_cq_ioeventfd(cq)) { + cq->ioeventfd_enabled = true; + } + } + } + } + + trace_pci_nvme_dbbuf_config(dbs_addr, eis_addr); + + return NVME_SUCCESS; +} + static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) { trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, @@ -5466,6 +6116,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_FIELD | NVME_DNR; } + if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { + return NVME_INVALID_FIELD; + } + switch (req->cmd.opcode) { case NVME_ADM_CMD_DELETE_SQ: return nvme_del_sq(n, req); @@ -5489,6 +6143,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return nvme_aer(n, req); case NVME_ADM_CMD_NS_ATTACHMENT: return nvme_ns_attachment(n, req); + case NVME_ADM_CMD_VIRT_MNGMT: + return nvme_virt_mngmt(n, req); + case NVME_ADM_CMD_DBBUF_CONFIG: + return nvme_dbbuf_config(n, req); case NVME_ADM_CMD_FORMAT_NVM: return nvme_format(n, req); default: @@ -5498,6 +6156,26 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_OPCODE | NVME_DNR; } +static void nvme_update_sq_eventidx(const NvmeSQueue *sq) +{ + uint32_t v = cpu_to_le32(sq->tail); + + pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &v, sizeof(v)); + + trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail); +} + +static void nvme_update_sq_tail(NvmeSQueue *sq) +{ + uint32_t v; + + pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &v, sizeof(v)); + + sq->tail = le32_to_cpu(v); + + trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail); +} + static void nvme_process_sq(void *opaque) { NvmeSQueue *sq = opaque; @@ -5509,6 +6187,10 @@ static void nvme_process_sq(void *opaque) NvmeCmd cmd; NvmeRequest *req; + if (n->dbbuf_enabled) { + nvme_update_sq_tail(sq); + } + while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { addr = sq->dma_addr + sq->head * n->sqe_size; if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { @@ -5532,11 +6214,56 @@ static void nvme_process_sq(void *opaque) req->status = status; nvme_enqueue_req_completion(cq, req); } + + if (n->dbbuf_enabled) { + nvme_update_sq_eventidx(sq); + nvme_update_sq_tail(sq); + } } } -static void nvme_ctrl_reset(NvmeCtrl *n) +static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size) { + uint8_t *config; + + if (!msix_present(pci_dev)) { + return; + } + + assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr); + + config = pci_dev->config + pci_dev->msix_cap; + pci_set_word_by_mask(config + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_QSIZE, + table_size - 1); +} + +static void nvme_activate_virt_res(NvmeCtrl *n) +{ + PCIDevice *pci_dev = &n->parent_obj; + NvmePriCtrlCap *cap = &n->pri_ctrl_cap; + NvmeSecCtrlEntry *sctrl; + + /* -1 to account for the admin queue */ + if (pci_is_vf(pci_dev)) { + sctrl = nvme_sctrl(n); + cap->vqprt = sctrl->nvq; + cap->viprt = sctrl->nvi; + n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; + n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; + } else { + cap->vqrfap = n->next_pri_ctrl_cap.vqrfap; + cap->virfap = n->next_pri_ctrl_cap.virfap; + n->conf_ioqpairs = le16_to_cpu(cap->vqprt) + + le16_to_cpu(cap->vqrfap) - 1; + n->conf_msix_qsize = le16_to_cpu(cap->viprt) + + le16_to_cpu(cap->virfap); + } +} + +static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) +{ + PCIDevice *pci_dev = &n->parent_obj; + NvmeSecCtrlEntry *sctrl; NvmeNamespace *ns; int i; @@ -5566,9 +6293,45 @@ static void nvme_ctrl_reset(NvmeCtrl *n) g_free(event); } + if (n->params.sriov_max_vfs) { + if (!pci_is_vf(pci_dev)) { + for (i = 0; i < n->sec_ctrl_list.numcntl; i++) { + sctrl = &n->sec_ctrl_list.sec[i]; + nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); + } + + if (rst != NVME_RESET_CONTROLLER) { + pcie_sriov_pf_disable_vfs(pci_dev); + } + } + + if (rst != NVME_RESET_CONTROLLER) { + nvme_activate_virt_res(n); + } + } + n->aer_queued = 0; + n->aer_mask = 0; n->outstanding_aers = 0; n->qs_created = false; + + nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); + + if (pci_is_vf(pci_dev)) { + sctrl = nvme_sctrl(n); + + stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED); + } else { + stl_le_p(&n->bar.csts, 0); + } + + stl_le_p(&n->bar.intms, 0); + stl_le_p(&n->bar.intmc, 0); + stl_le_p(&n->bar.cc, 0); + + n->dbbuf_dbs = 0; + n->dbbuf_eis = 0; + n->dbbuf_enabled = false; } static void nvme_ctrl_shutdown(NvmeCtrl *n) @@ -5614,7 +6377,15 @@ static int nvme_start_ctrl(NvmeCtrl *n) uint64_t acq = ldq_le_p(&n->bar.acq); uint32_t page_bits = NVME_CC_MPS(cc) + 12; uint32_t page_size = 1 << page_bits; + NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); + if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { + trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), + le16_to_cpu(sctrl->nvq), + sctrl->scs ? "ONLINE" : + "OFFLINE"); + return -1; + } if (unlikely(n->cq[0])) { trace_pci_nvme_err_startfail_cq(); return -1; @@ -5623,14 +6394,6 @@ static int nvme_start_ctrl(NvmeCtrl *n) trace_pci_nvme_err_startfail_sq(); return -1; } - if (unlikely(!asq)) { - trace_pci_nvme_err_startfail_nbarasq(); - return -1; - } - if (unlikely(!acq)) { - trace_pci_nvme_err_startfail_nbaracq(); - return -1; - } if (unlikely(asq & (page_size - 1))) { trace_pci_nvme_err_startfail_asq_misaligned(asq); return -1; @@ -5703,8 +6466,6 @@ static int nvme_start_ctrl(NvmeCtrl *n) nvme_set_timestamp(n, 0ULL); - QTAILQ_INIT(&n->aer_queue); - nvme_select_iocs(n); return 0; @@ -5782,20 +6543,21 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, nvme_irq_check(n); break; case NVME_REG_CC: + stl_le_p(&n->bar.cc, data); + trace_pci_nvme_mmio_cfg(data & 0xffffffff); - /* Windows first sends data, then sends enable bit */ - if (!NVME_CC_EN(data) && !NVME_CC_EN(cc) && - !NVME_CC_SHN(data) && !NVME_CC_SHN(cc)) - { - cc = data; + if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) { + trace_pci_nvme_mmio_shutdown_set(); + nvme_ctrl_shutdown(n); + csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); + csts |= NVME_CSTS_SHST_COMPLETE; + } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) { + trace_pci_nvme_mmio_shutdown_cleared(); + csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); } if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) { - cc = data; - - /* flush CC since nvme_start_ctrl() needs the value */ - stl_le_p(&n->bar.cc, cc); if (unlikely(nvme_start_ctrl(n))) { trace_pci_nvme_err_startfail(); csts = NVME_CSTS_FAILED; @@ -5805,23 +6567,11 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, } } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) { trace_pci_nvme_mmio_stopped(); - nvme_ctrl_reset(n); - cc = 0; - csts &= ~NVME_CSTS_READY; + nvme_ctrl_reset(n, NVME_RESET_CONTROLLER); + + break; } - if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) { - trace_pci_nvme_mmio_shutdown_set(); - nvme_ctrl_shutdown(n); - cc = data; - csts |= NVME_CSTS_SHST_COMPLETE; - } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) { - trace_pci_nvme_mmio_shutdown_cleared(); - csts &= ~NVME_CSTS_SHST_COMPLETE; - cc = data; - } - - stl_le_p(&n->bar.cc, cc); stl_le_p(&n->bar.csts, csts); break; @@ -6005,6 +6755,12 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) return 0; } + if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && + addr != NVME_REG_CSTS) { + trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); + return 0; + } + /* * When PMRWBM bit 1 is set then read from * from PMRSTS should ensure prior writes @@ -6086,12 +6842,16 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) start_sqs = nvme_cq_full(cq) ? 1 : 0; cq->head = new_head; + if (!qid && n->dbbuf_enabled) { + pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, + sizeof(cq->head)); + } if (start_sqs) { NvmeSQueue *sq; QTAILQ_FOREACH(sq, &cq->sq_list, entry) { - timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + qemu_bh_schedule(sq->bh); } - timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + qemu_bh_schedule(cq->bh); } if (cq->tail == cq->head) { @@ -6143,7 +6903,25 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); sq->tail = new_tail; - timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + if (!qid && n->dbbuf_enabled) { + /* + * The spec states "the host shall also update the controller's + * corresponding doorbell property to match the value of that entry + * in the Shadow Doorbell buffer." + * + * Since this context is currently a VM trap, we can safely enforce + * the requirement from the device side in case the host is + * misbehaving. + * + * Note, we shouldn't have to do this, but various drivers + * including ones that run on Linux, are not updating Admin Queues, + * so we can't trust reading it for an appropriate sq tail. + */ + pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, + sizeof(sq->tail)); + } + + qemu_bh_schedule(sq->bh); } } @@ -6154,6 +6932,12 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, trace_pci_nvme_mmio_write(addr, data, size); + if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && + addr != NVME_REG_CSTS) { + trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); + return; + } + if (addr < sizeof(n->bar)) { nvme_write_bar(n, addr, data, size); } else { @@ -6255,19 +7039,140 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp) error_setg(errp, "vsl must be non-zero"); return; } + + if (params->sriov_max_vfs) { + if (!n->subsys) { + error_setg(errp, "subsystem is required for the use of SR-IOV"); + return; + } + + if (params->sriov_max_vfs > NVME_MAX_VFS) { + error_setg(errp, "sriov_max_vfs must be between 0 and %d", + NVME_MAX_VFS); + return; + } + + if (params->cmb_size_mb) { + error_setg(errp, "CMB is not supported with SR-IOV"); + return; + } + + if (n->pmr.dev) { + error_setg(errp, "PMR is not supported with SR-IOV"); + return; + } + + if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) { + error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible" + " must be set for the use of SR-IOV"); + return; + } + + if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) { + error_setg(errp, "sriov_vq_flexible must be greater than or equal" + " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2); + return; + } + + if (params->max_ioqpairs < params->sriov_vq_flexible + 2) { + error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be" + " greater than or equal to 2"); + return; + } + + if (params->sriov_vi_flexible < params->sriov_max_vfs) { + error_setg(errp, "sriov_vi_flexible must be greater than or equal" + " to %d (sriov_max_vfs)", params->sriov_max_vfs); + return; + } + + if (params->msix_qsize < params->sriov_vi_flexible + 1) { + error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be" + " greater than or equal to 1"); + return; + } + + if (params->sriov_max_vi_per_vf && + (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) { + error_setg(errp, "sriov_max_vi_per_vf must meet:" + " (sriov_max_vi_per_vf - 1) %% %d == 0 and" + " sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY); + return; + } + + if (params->sriov_max_vq_per_vf && + (params->sriov_max_vq_per_vf < 2 || + (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) { + error_setg(errp, "sriov_max_vq_per_vf must meet:" + " (sriov_max_vq_per_vf - 1) %% %d == 0 and" + " sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY); + return; + } + } } static void nvme_init_state(NvmeCtrl *n) { - /* add one to max_ioqpairs to account for the admin queue pair */ - n->reg_size = pow2ceil(sizeof(NvmeBar) + - 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE); + NvmePriCtrlCap *cap = &n->pri_ctrl_cap; + NvmeSecCtrlList *list = &n->sec_ctrl_list; + NvmeSecCtrlEntry *sctrl; + uint8_t max_vfs; + int i; + + if (pci_is_vf(&n->parent_obj)) { + sctrl = nvme_sctrl(n); + max_vfs = 0; + n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; + n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; + } else { + max_vfs = n->params.sriov_max_vfs; + n->conf_ioqpairs = n->params.max_ioqpairs; + n->conf_msix_qsize = n->params.msix_qsize; + } + n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); n->temperature = NVME_TEMPERATURE; n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); + QTAILQ_INIT(&n->aer_queue); + + list->numcntl = cpu_to_le16(max_vfs); + for (i = 0; i < max_vfs; i++) { + sctrl = &list->sec[i]; + sctrl->pcid = cpu_to_le16(n->cntlid); + sctrl->vfn = cpu_to_le16(i + 1); + } + + cap->cntlid = cpu_to_le16(n->cntlid); + cap->crt = NVME_CRT_VQ | NVME_CRT_VI; + + if (pci_is_vf(&n->parent_obj)) { + cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); + } else { + cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - + n->params.sriov_vq_flexible); + cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible); + cap->vqrfap = cap->vqfrt; + cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY); + cap->vqfrsm = n->params.sriov_max_vq_per_vf ? + cpu_to_le16(n->params.sriov_max_vq_per_vf) : + cap->vqfrt / MAX(max_vfs, 1); + } + + if (pci_is_vf(&n->parent_obj)) { + cap->viprt = cpu_to_le16(n->conf_msix_qsize); + } else { + cap->viprt = cpu_to_le16(n->params.msix_qsize - + n->params.sriov_vi_flexible); + cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible); + cap->virfap = cap->vifrt; + cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY); + cap->vifrsm = n->params.sriov_max_vi_per_vf ? + cpu_to_le16(n->params.sriov_max_vi_per_vf) : + cap->vifrt / MAX(max_vfs, 1); + } } static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) @@ -6312,10 +7217,77 @@ static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) memory_region_set_enabled(&n->pmr.dev->mr, false); } +static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, + unsigned *msix_table_offset, + unsigned *msix_pba_offset) +{ + uint64_t bar_size, msix_table_size, msix_pba_size; + + bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE; + bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); + + if (msix_table_offset) { + *msix_table_offset = bar_size; + } + + msix_table_size = PCI_MSIX_ENTRY_SIZE * total_irqs; + bar_size += msix_table_size; + bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); + + if (msix_pba_offset) { + *msix_pba_offset = bar_size; + } + + msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8; + bar_size += msix_pba_size; + + bar_size = pow2ceil(bar_size); + return bar_size; +} + +static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) +{ + uint16_t vf_dev_id = n->params.use_intel_id ? + PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; + NvmePriCtrlCap *cap = &n->pri_ctrl_cap; + uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm), + le16_to_cpu(cap->vifrsm), + NULL, NULL); + + pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id, + n->params.sriov_max_vfs, n->params.sriov_max_vfs, + NVME_VF_OFFSET, NVME_VF_STRIDE); + + pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size); +} + +static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset) +{ + Error *err = NULL; + int ret; + + ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset, + PCI_PM_SIZEOF, &err); + if (err) { + error_report_err(err); + return ret; + } + + pci_set_word(pci_dev->config + offset + PCI_PM_PMC, + PCI_PM_CAP_VER_1_2); + pci_set_word(pci_dev->config + offset + PCI_PM_CTRL, + PCI_PM_CTRL_NO_SOFT_RESET); + pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK); + + return 0; +} + static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) { uint8_t *pci_conf = pci_dev->config; - uint64_t bar_size, msix_table_size, msix_pba_size; + uint64_t bar_size; unsigned msix_table_offset, msix_pba_offset; int ret; @@ -6326,34 +7298,35 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) if (n->params.use_intel_id) { pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); - pci_config_set_device_id(pci_conf, 0x5845); + pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_NVME); } else { pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME); } pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS); + nvme_add_pm_capability(pci_dev, 0x60); pcie_endpoint_cap_init(pci_dev, 0x80); + pcie_cap_flr_init(pci_dev); + if (n->params.sriov_max_vfs) { + pcie_ari_init(pci_dev, 0x100, 1); + } - bar_size = QEMU_ALIGN_UP(n->reg_size, 4 * KiB); - msix_table_offset = bar_size; - msix_table_size = PCI_MSIX_ENTRY_SIZE * n->params.msix_qsize; - - bar_size += msix_table_size; - bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); - msix_pba_offset = bar_size; - msix_pba_size = QEMU_ALIGN_UP(n->params.msix_qsize, 64) / 8; - - bar_size += msix_pba_size; - bar_size = pow2ceil(bar_size); + /* add one to max_ioqpairs to account for the admin queue pair */ + bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize, + &msix_table_offset, &msix_pba_offset); memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", - n->reg_size); + msix_table_offset); memory_region_add_subregion(&n->bar0, 0, &n->iomem); - pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + if (pci_is_vf(pci_dev)) { + pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); + } else { + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + } ret = msix_init(pci_dev, n->params.msix_qsize, &n->bar0, 0, msix_table_offset, &n->bar0, 0, msix_pba_offset, 0, &err); @@ -6366,6 +7339,8 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) } } + nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); + if (n->params.cmb_size_mb) { nvme_init_cmb(n, pci_dev); } @@ -6374,6 +7349,10 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) nvme_init_pmr(n, pci_dev); } + if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { + nvme_init_sriov(n, pci_dev, 0x120); + } + return 0; } @@ -6395,16 +7374,18 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) NvmeIdCtrl *id = &n->id_ctrl; uint8_t *pci_conf = pci_dev->config; uint64_t cap = ldq_le_p(&n->bar.cap); + NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); - strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); + strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' '); strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); id->cntlid = cpu_to_le16(n->cntlid); id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); + id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS); id->rab = 6; @@ -6420,7 +7401,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); - id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT); + id->oacs = + cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF); id->cntrltype = 0x1; /* @@ -6459,9 +7441,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) */ id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; - id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0); - id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | - NVME_CTRL_SGLS_BITBUCKET); + id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1); + id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN); nvme_init_subnqn(n); @@ -6486,6 +7467,10 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) stl_le_p(&n->bar.vs, NVME_SPEC_VER); n->bar.intmc = n->bar.intms = 0; + + if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { + stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); + } } static int nvme_init_subsys(NvmeCtrl *n, Error **errp) @@ -6523,6 +7508,16 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) NvmeCtrl *n = NVME(pci_dev); NvmeNamespace *ns; Error *local_err = NULL; + NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev)); + + if (pci_is_vf(pci_dev)) { + /* + * VFs derive settings from the parent. PF's lifespan exceeds + * that of VF's, so it's safe to share params.serial. + */ + memcpy(&n->params, &pn->params, sizeof(NvmeParams)); + n->subsys = pn->subsys; + } nvme_check_constraints(n, &local_err); if (local_err) { @@ -6530,18 +7525,17 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) return; } - qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, - &pci_dev->qdev, n->parent_obj.qdev.id); - - nvme_init_state(n); - if (nvme_init_pci(n, pci_dev, errp)) { - return; - } + qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, + &pci_dev->qdev, n->parent_obj.qdev.id); if (nvme_init_subsys(n, errp)) { error_propagate(errp, local_err); return; } + nvme_init_state(n); + if (nvme_init_pci(n, pci_dev, errp)) { + return; + } nvme_init_ctrl(n, pci_dev); /* setup a namespace if the controller drive property was given */ @@ -6563,7 +7557,7 @@ static void nvme_exit(PCIDevice *pci_dev) NvmeNamespace *ns; int i; - nvme_ctrl_reset(n); + nvme_ctrl_reset(n, NVME_RESET_FUNCTION); if (n->subsys) { for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { @@ -6587,6 +7581,11 @@ static void nvme_exit(PCIDevice *pci_dev) if (n->pmr.dev) { host_memory_backend_set_mapped(n->pmr.dev, false); } + + if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { + pcie_sriov_pf_exit(pci_dev); + } + msix_uninit(pci_dev, &n->bar0, &n->bar0); memory_region_del_subregion(&n->bar0, &n->iomem); } @@ -6608,9 +7607,19 @@ static Property nvme_props[] = { DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7), DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), + DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false), DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl, params.auto_transition_zones, true), + DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl, params.sriov_max_vfs, 0), + DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl, + params.sriov_vq_flexible, 0), + DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl, + params.sriov_vi_flexible, 0), + DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl, + params.sriov_max_vi_per_vf, 0), + DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, + params.sriov_max_vq_per_vf, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -6656,6 +7665,47 @@ static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name, } } +static void nvme_pci_reset(DeviceState *qdev) +{ + PCIDevice *pci_dev = PCI_DEVICE(qdev); + NvmeCtrl *n = NVME(pci_dev); + + trace_pci_nvme_pci_reset(); + nvme_ctrl_reset(n, NVME_RESET_FUNCTION); +} + +static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address, + uint32_t val, int len) +{ + NvmeCtrl *n = NVME(dev); + NvmeSecCtrlEntry *sctrl; + uint16_t sriov_cap = dev->exp.sriov_cap; + uint32_t off = address - sriov_cap; + int i, num_vfs; + + if (!sriov_cap) { + return; + } + + if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { + if (!(val & PCI_SRIOV_CTRL_VFE)) { + num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); + for (i = 0; i < num_vfs; i++) { + sctrl = &n->sec_ctrl_list.sec[i]; + nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); + } + } + } +} + +static void nvme_pci_write_config(PCIDevice *dev, uint32_t address, + uint32_t val, int len) +{ + nvme_sriov_pre_write_ctrl(dev, address, val, len); + pci_default_write_config(dev, address, val, len); + pcie_cap_flr_write_config(dev, address, val, len); +} + static const VMStateDescription nvme_vmstate = { .name = "nvme", .unmigratable = 1, @@ -6667,6 +7717,7 @@ static void nvme_class_init(ObjectClass *oc, void *data) PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); pc->realize = nvme_realize; + pc->config_write = nvme_pci_write_config; pc->exit = nvme_exit; pc->class_id = PCI_CLASS_STORAGE_EXPRESS; pc->revision = 2; @@ -6675,6 +7726,7 @@ static void nvme_class_init(ObjectClass *oc, void *data) dc->desc = "Non-Volatile Memory Express"; device_class_set_props(dc, nvme_props); dc->vmsd = &nvme_vmstate; + dc->reset = nvme_pci_reset; } static void nvme_instance_init(Object *obj) diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c index 5dbd18b2a4..63c44c86ab 100644 --- a/hw/nvme/dif.c +++ b/hw/nvme/dif.c @@ -13,57 +13,80 @@ #include "sysemu/block-backend.h" #include "nvme.h" +#include "dif.h" #include "trace.h" uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, - uint32_t reftag) + uint64_t reftag) { + uint64_t mask = ns->pif ? 0xffffffffffff : 0xffffffff; + if ((NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) == NVME_ID_NS_DPS_TYPE_1) && - (prinfo & NVME_PRINFO_PRCHK_REF) && (slba & 0xffffffff) != reftag) { + (prinfo & NVME_PRINFO_PRCHK_REF) && (slba & mask) != reftag) { return NVME_INVALID_PROT_INFO | NVME_DNR; } + if ((NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) == NVME_ID_NS_DPS_TYPE_3) && + (prinfo & NVME_PRINFO_PRCHK_REF)) { + return NVME_INVALID_PROT_INFO; + } + return NVME_SUCCESS; } /* from Linux kernel (crypto/crct10dif_common.c) */ -static uint16_t crc_t10dif(uint16_t crc, const unsigned char *buffer, - size_t len) +static uint16_t crc16_t10dif(uint16_t crc, const unsigned char *buffer, + size_t len) { unsigned int i; for (i = 0; i < len; i++) { - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + crc = (crc << 8) ^ crc16_t10dif_table[((crc >> 8) ^ buffer[i]) & 0xff]; } return crc; } -void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t apptag, - uint32_t *reftag) +/* from Linux kernel (lib/crc64.c) */ +static uint64_t crc64_nvme(uint64_t crc, const unsigned char *buffer, + size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + crc = (crc >> 8) ^ crc64_nvme_table[(crc & 0xff) ^ buffer[i]]; + } + + return crc ^ (uint64_t)~0; +} + +static void nvme_dif_pract_generate_dif_crc16(NvmeNamespace *ns, uint8_t *buf, + size_t len, uint8_t *mbuf, + size_t mlen, uint16_t apptag, + uint64_t *reftag) { uint8_t *end = buf + len; int16_t pil = 0; if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); } - trace_pci_nvme_dif_pract_generate_dif(len, ns->lbasz, ns->lbasz + pil, - apptag, *reftag); + trace_pci_nvme_dif_pract_generate_dif_crc16(len, ns->lbasz, + ns->lbasz + pil, apptag, + *reftag); for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); + uint16_t crc = crc16_t10dif(0x0, buf, ns->lbasz); if (pil) { - crc = crc_t10dif(crc, mbuf, pil); + crc = crc16_t10dif(crc, mbuf, pil); } - dif->guard = cpu_to_be16(crc); - dif->apptag = cpu_to_be16(apptag); - dif->reftag = cpu_to_be32(*reftag); + dif->g16.guard = cpu_to_be16(crc); + dif->g16.apptag = cpu_to_be16(apptag); + dif->g16.reftag = cpu_to_be32(*reftag); if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { (*reftag)++; @@ -71,57 +94,114 @@ void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, } } -static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif, - uint8_t *buf, uint8_t *mbuf, size_t pil, - uint8_t prinfo, uint16_t apptag, - uint16_t appmask, uint32_t reftag) +static void nvme_dif_pract_generate_dif_crc64(NvmeNamespace *ns, uint8_t *buf, + size_t len, uint8_t *mbuf, + size_t mlen, uint16_t apptag, + uint64_t *reftag) +{ + uint8_t *end = buf + len; + int16_t pil = 0; + + if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { + pil = ns->lbaf.ms - 16; + } + + trace_pci_nvme_dif_pract_generate_dif_crc64(len, ns->lbasz, + ns->lbasz + pil, apptag, + *reftag); + + for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { + NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); + uint64_t crc = crc64_nvme(~0ULL, buf, ns->lbasz); + + if (pil) { + crc = crc64_nvme(crc, mbuf, pil); + } + + dif->g64.guard = cpu_to_be64(crc); + dif->g64.apptag = cpu_to_be16(apptag); + + dif->g64.sr[0] = *reftag >> 40; + dif->g64.sr[1] = *reftag >> 32; + dif->g64.sr[2] = *reftag >> 24; + dif->g64.sr[3] = *reftag >> 16; + dif->g64.sr[4] = *reftag >> 8; + dif->g64.sr[5] = *reftag; + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { + (*reftag)++; + } + } +} + +void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint16_t apptag, + uint64_t *reftag) +{ + switch (ns->pif) { + case NVME_PI_GUARD_16: + return nvme_dif_pract_generate_dif_crc16(ns, buf, len, mbuf, mlen, + apptag, reftag); + case NVME_PI_GUARD_64: + return nvme_dif_pract_generate_dif_crc64(ns, buf, len, mbuf, mlen, + apptag, reftag); + } + + abort(); +} + +static uint16_t nvme_dif_prchk_crc16(NvmeNamespace *ns, NvmeDifTuple *dif, + uint8_t *buf, uint8_t *mbuf, size_t pil, + uint8_t prinfo, uint16_t apptag, + uint16_t appmask, uint64_t reftag) { switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { case NVME_ID_NS_DPS_TYPE_3: - if (be32_to_cpu(dif->reftag) != 0xffffffff) { + if (be32_to_cpu(dif->g16.reftag) != 0xffffffff) { break; } /* fallthrough */ case NVME_ID_NS_DPS_TYPE_1: case NVME_ID_NS_DPS_TYPE_2: - if (be16_to_cpu(dif->apptag) != 0xffff) { + if (be16_to_cpu(dif->g16.apptag) != 0xffff) { break; } - trace_pci_nvme_dif_prchk_disabled(be16_to_cpu(dif->apptag), - be32_to_cpu(dif->reftag)); + trace_pci_nvme_dif_prchk_disabled_crc16(be16_to_cpu(dif->g16.apptag), + be32_to_cpu(dif->g16.reftag)); return NVME_SUCCESS; } if (prinfo & NVME_PRINFO_PRCHK_GUARD) { - uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); + uint16_t crc = crc16_t10dif(0x0, buf, ns->lbasz); if (pil) { - crc = crc_t10dif(crc, mbuf, pil); + crc = crc16_t10dif(crc, mbuf, pil); } - trace_pci_nvme_dif_prchk_guard(be16_to_cpu(dif->guard), crc); + trace_pci_nvme_dif_prchk_guard_crc16(be16_to_cpu(dif->g16.guard), crc); - if (be16_to_cpu(dif->guard) != crc) { + if (be16_to_cpu(dif->g16.guard) != crc) { return NVME_E2E_GUARD_ERROR; } } if (prinfo & NVME_PRINFO_PRCHK_APP) { - trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->apptag), apptag, + trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g16.apptag), apptag, appmask); - if ((be16_to_cpu(dif->apptag) & appmask) != (apptag & appmask)) { + if ((be16_to_cpu(dif->g16.apptag) & appmask) != (apptag & appmask)) { return NVME_E2E_APP_ERROR; } } if (prinfo & NVME_PRINFO_PRCHK_REF) { - trace_pci_nvme_dif_prchk_reftag(be32_to_cpu(dif->reftag), reftag); + trace_pci_nvme_dif_prchk_reftag_crc16(be32_to_cpu(dif->g16.reftag), + reftag); - if (be32_to_cpu(dif->reftag) != reftag) { + if (be32_to_cpu(dif->g16.reftag) != reftag) { return NVME_E2E_REF_ERROR; } } @@ -129,12 +209,96 @@ static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif, return NVME_SUCCESS; } +static uint16_t nvme_dif_prchk_crc64(NvmeNamespace *ns, NvmeDifTuple *dif, + uint8_t *buf, uint8_t *mbuf, size_t pil, + uint8_t prinfo, uint16_t apptag, + uint16_t appmask, uint64_t reftag) +{ + uint64_t r = 0; + + r |= (uint64_t)dif->g64.sr[0] << 40; + r |= (uint64_t)dif->g64.sr[1] << 32; + r |= (uint64_t)dif->g64.sr[2] << 24; + r |= (uint64_t)dif->g64.sr[3] << 16; + r |= (uint64_t)dif->g64.sr[4] << 8; + r |= (uint64_t)dif->g64.sr[5]; + + switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + case NVME_ID_NS_DPS_TYPE_3: + if (r != 0xffffffffffff) { + break; + } + + /* fallthrough */ + case NVME_ID_NS_DPS_TYPE_1: + case NVME_ID_NS_DPS_TYPE_2: + if (be16_to_cpu(dif->g64.apptag) != 0xffff) { + break; + } + + trace_pci_nvme_dif_prchk_disabled_crc64(be16_to_cpu(dif->g16.apptag), + r); + + return NVME_SUCCESS; + } + + if (prinfo & NVME_PRINFO_PRCHK_GUARD) { + uint64_t crc = crc64_nvme(~0ULL, buf, ns->lbasz); + + if (pil) { + crc = crc64_nvme(crc, mbuf, pil); + } + + trace_pci_nvme_dif_prchk_guard_crc64(be64_to_cpu(dif->g64.guard), crc); + + if (be64_to_cpu(dif->g64.guard) != crc) { + return NVME_E2E_GUARD_ERROR; + } + } + + if (prinfo & NVME_PRINFO_PRCHK_APP) { + trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g64.apptag), apptag, + appmask); + + if ((be16_to_cpu(dif->g64.apptag) & appmask) != (apptag & appmask)) { + return NVME_E2E_APP_ERROR; + } + } + + if (prinfo & NVME_PRINFO_PRCHK_REF) { + trace_pci_nvme_dif_prchk_reftag_crc64(r, reftag); + + if (r != reftag) { + return NVME_E2E_REF_ERROR; + } + } + + return NVME_SUCCESS; +} + +static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif, + uint8_t *buf, uint8_t *mbuf, size_t pil, + uint8_t prinfo, uint16_t apptag, + uint16_t appmask, uint64_t reftag) +{ + switch (ns->pif) { + case NVME_PI_GUARD_16: + return nvme_dif_prchk_crc16(ns, dif, buf, mbuf, pil, prinfo, apptag, + appmask, reftag); + case NVME_PI_GUARD_64: + return nvme_dif_prchk_crc64(ns, dif, buf, mbuf, pil, prinfo, apptag, + appmask, reftag); + } + + abort(); +} + uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, uint8_t *mbuf, size_t mlen, uint8_t prinfo, uint64_t slba, uint16_t apptag, - uint16_t appmask, uint32_t *reftag) + uint16_t appmask, uint64_t *reftag) { - uint8_t *end = buf + len; + uint8_t *bufp, *end = buf + len; int16_t pil = 0; uint16_t status; @@ -144,18 +308,34 @@ uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, } if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); } trace_pci_nvme_dif_check(prinfo, ns->lbasz + pil); - for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { + for (bufp = buf; bufp < end; bufp += ns->lbasz, mbuf += ns->lbaf.ms) { NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - - status = nvme_dif_prchk(ns, dif, buf, mbuf, pil, prinfo, apptag, + status = nvme_dif_prchk(ns, dif, bufp, mbuf, pil, prinfo, apptag, appmask, *reftag); if (status) { - return status; + /* + * The first block of a 'raw' image is always allocated, so we + * cannot reliably know if the block is all zeroes or not. For + * CRC16 this works fine because the T10 CRC16 is 0x0 for all + * zeroes, but the Rocksoft CRC64 is not. Thus, if a guard error is + * detected for the first block, check if it is zeroed and manually + * set the protection information to all ones to disable protection + * information checking. + */ + if (status == NVME_E2E_GUARD_ERROR && slba == 0x0 && bufp == buf) { + g_autofree uint8_t *zeroes = g_malloc0(ns->lbasz); + + if (memcmp(bufp, zeroes, ns->lbasz) == 0) { + memset(mbuf + pil, 0xff, nvme_pi_tuple_size(ns)); + } + } else { + return status; + } } if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { @@ -183,7 +363,7 @@ uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); } do { @@ -209,7 +389,7 @@ uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, end = mbufp + mlen; for (; mbufp < end; mbufp += ns->lbaf.ms) { - memset(mbufp + pil, 0xff, sizeof(NvmeDifTuple)); + memset(mbufp + pil, 0xff, nvme_pi_tuple_size(ns)); } } @@ -251,9 +431,12 @@ static void nvme_dif_rw_check_cb(void *opaque, int ret) uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); + uint64_t reftag = le32_to_cpu(rw->reftag); + uint64_t cdw3 = le32_to_cpu(rw->cdw3); uint16_t status; + reftag |= cdw3 << 32; + trace_pci_nvme_dif_rw_check_cb(nvme_cid(req), prinfo, apptag, appmask, reftag); @@ -283,7 +466,7 @@ static void nvme_dif_rw_check_cb(void *opaque, int ret) goto out; } - if (prinfo & NVME_PRINFO_PRACT && ns->lbaf.ms == 8) { + if (prinfo & NVME_PRINFO_PRACT && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { goto out; } @@ -367,11 +550,14 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); + uint64_t reftag = le32_to_cpu(rw->reftag); + uint64_t cdw3 = le32_to_cpu(rw->cdw3); bool pract = !!(prinfo & NVME_PRINFO_PRACT); NvmeBounceContext *ctx; uint16_t status; + reftag |= cdw3 << 32; + trace_pci_nvme_dif_rw(pract, prinfo); ctx = g_new0(NvmeBounceContext, 1); @@ -387,7 +573,7 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) if (pract) { uint8_t *mbuf, *end; - int16_t pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + int16_t pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); status = nvme_check_prinfo(ns, prinfo, slba, reftag); if (status) { @@ -411,8 +597,29 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) for (; mbuf < end; mbuf += ns->lbaf.ms) { NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - dif->apptag = cpu_to_be16(apptag); - dif->reftag = cpu_to_be32(reftag); + switch (ns->pif) { + case NVME_PI_GUARD_16: + dif->g16.apptag = cpu_to_be16(apptag); + dif->g16.reftag = cpu_to_be32(reftag); + + break; + + case NVME_PI_GUARD_64: + dif->g64.guard = cpu_to_be64(0x6482d367eb22b64e); + dif->g64.apptag = cpu_to_be16(apptag); + + dif->g64.sr[0] = reftag >> 40; + dif->g64.sr[1] = reftag >> 32; + dif->g64.sr[2] = reftag >> 24; + dif->g64.sr[3] = reftag >> 16; + dif->g64.sr[4] = reftag >> 8; + dif->g64.sr[5] = reftag; + + break; + + default: + abort(); + } switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { case NVME_ID_NS_DPS_TYPE_1: @@ -427,7 +634,7 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) return NVME_NO_COMPLETE; } - if (nvme_ns_ext(ns) && !(pract && ns->lbaf.ms == 8)) { + if (nvme_ns_ext(ns) && !(pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { mapped_len += mlen; } @@ -461,7 +668,7 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) qemu_iovec_init(&ctx->mdata.iov, 1); qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - if (!(pract && ns->lbaf.ms == 8)) { + if (!(pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size, NVME_TX_DIRECTION_TO_DEVICE, req); if (status) { diff --git a/hw/nvme/dif.h b/hw/nvme/dif.h new file mode 100644 index 0000000000..f12e312250 --- /dev/null +++ b/hw/nvme/dif.h @@ -0,0 +1,191 @@ +#ifndef HW_NVME_DIF_H +#define HW_NVME_DIF_H + +/* from Linux kernel (crypto/crct10dif_common.c) */ +static const uint16_t crc16_t10dif_table[256] = { + 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, + 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, + 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, + 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, + 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, + 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, + 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, + 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, + 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, + 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, + 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, + 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, + 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, + 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, + 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, + 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, + 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, + 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, + 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, + 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, + 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, + 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, + 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, + 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, + 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, + 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, + 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, + 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, + 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, + 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, + 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, + 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 +}; + +#define CRC64_NVME_POLY 0x9A6C9329AC4BC9B5ULL + +static const uint64_t crc64_nvme_table[] = { + 0x0000000000000000ULL, 0x7F6EF0C830358979ULL, + 0xFEDDE190606B12F2ULL, 0x81B31158505E9B8BULL, + 0xC962E5739841B68FULL, 0xB60C15BBA8743FF6ULL, + 0x37BF04E3F82AA47DULL, 0x48D1F42BC81F2D04ULL, + 0xA61CECB46814FE75ULL, 0xD9721C7C5821770CULL, + 0x58C10D24087FEC87ULL, 0x27AFFDEC384A65FEULL, + 0x6F7E09C7F05548FAULL, 0x1010F90FC060C183ULL, + 0x91A3E857903E5A08ULL, 0xEECD189FA00BD371ULL, + 0x78E0FF3B88BE6F81ULL, 0x078E0FF3B88BE6F8ULL, + 0x863D1EABE8D57D73ULL, 0xF953EE63D8E0F40AULL, + 0xB1821A4810FFD90EULL, 0xCEECEA8020CA5077ULL, + 0x4F5FFBD87094CBFCULL, 0x30310B1040A14285ULL, + 0xDEFC138FE0AA91F4ULL, 0xA192E347D09F188DULL, + 0x2021F21F80C18306ULL, 0x5F4F02D7B0F40A7FULL, + 0x179EF6FC78EB277BULL, 0x68F0063448DEAE02ULL, + 0xE943176C18803589ULL, 0x962DE7A428B5BCF0ULL, + 0xF1C1FE77117CDF02ULL, 0x8EAF0EBF2149567BULL, + 0x0F1C1FE77117CDF0ULL, 0x7072EF2F41224489ULL, + 0x38A31B04893D698DULL, 0x47CDEBCCB908E0F4ULL, + 0xC67EFA94E9567B7FULL, 0xB9100A5CD963F206ULL, + 0x57DD12C379682177ULL, 0x28B3E20B495DA80EULL, + 0xA900F35319033385ULL, 0xD66E039B2936BAFCULL, + 0x9EBFF7B0E12997F8ULL, 0xE1D10778D11C1E81ULL, + 0x606216208142850AULL, 0x1F0CE6E8B1770C73ULL, + 0x8921014C99C2B083ULL, 0xF64FF184A9F739FAULL, + 0x77FCE0DCF9A9A271ULL, 0x08921014C99C2B08ULL, + 0x4043E43F0183060CULL, 0x3F2D14F731B68F75ULL, + 0xBE9E05AF61E814FEULL, 0xC1F0F56751DD9D87ULL, + 0x2F3DEDF8F1D64EF6ULL, 0x50531D30C1E3C78FULL, + 0xD1E00C6891BD5C04ULL, 0xAE8EFCA0A188D57DULL, + 0xE65F088B6997F879ULL, 0x9931F84359A27100ULL, + 0x1882E91B09FCEA8BULL, 0x67EC19D339C963F2ULL, + 0xD75ADABD7A6E2D6FULL, 0xA8342A754A5BA416ULL, + 0x29873B2D1A053F9DULL, 0x56E9CBE52A30B6E4ULL, + 0x1E383FCEE22F9BE0ULL, 0x6156CF06D21A1299ULL, + 0xE0E5DE5E82448912ULL, 0x9F8B2E96B271006BULL, + 0x71463609127AD31AULL, 0x0E28C6C1224F5A63ULL, + 0x8F9BD7997211C1E8ULL, 0xF0F5275142244891ULL, + 0xB824D37A8A3B6595ULL, 0xC74A23B2BA0EECECULL, + 0x46F932EAEA507767ULL, 0x3997C222DA65FE1EULL, + 0xAFBA2586F2D042EEULL, 0xD0D4D54EC2E5CB97ULL, + 0x5167C41692BB501CULL, 0x2E0934DEA28ED965ULL, + 0x66D8C0F56A91F461ULL, 0x19B6303D5AA47D18ULL, + 0x980521650AFAE693ULL, 0xE76BD1AD3ACF6FEAULL, + 0x09A6C9329AC4BC9BULL, 0x76C839FAAAF135E2ULL, + 0xF77B28A2FAAFAE69ULL, 0x8815D86ACA9A2710ULL, + 0xC0C42C4102850A14ULL, 0xBFAADC8932B0836DULL, + 0x3E19CDD162EE18E6ULL, 0x41773D1952DB919FULL, + 0x269B24CA6B12F26DULL, 0x59F5D4025B277B14ULL, + 0xD846C55A0B79E09FULL, 0xA72835923B4C69E6ULL, + 0xEFF9C1B9F35344E2ULL, 0x90973171C366CD9BULL, + 0x1124202993385610ULL, 0x6E4AD0E1A30DDF69ULL, + 0x8087C87E03060C18ULL, 0xFFE938B633338561ULL, + 0x7E5A29EE636D1EEAULL, 0x0134D92653589793ULL, + 0x49E52D0D9B47BA97ULL, 0x368BDDC5AB7233EEULL, + 0xB738CC9DFB2CA865ULL, 0xC8563C55CB19211CULL, + 0x5E7BDBF1E3AC9DECULL, 0x21152B39D3991495ULL, + 0xA0A63A6183C78F1EULL, 0xDFC8CAA9B3F20667ULL, + 0x97193E827BED2B63ULL, 0xE877CE4A4BD8A21AULL, + 0x69C4DF121B863991ULL, 0x16AA2FDA2BB3B0E8ULL, + 0xF86737458BB86399ULL, 0x8709C78DBB8DEAE0ULL, + 0x06BAD6D5EBD3716BULL, 0x79D4261DDBE6F812ULL, + 0x3105D23613F9D516ULL, 0x4E6B22FE23CC5C6FULL, + 0xCFD833A67392C7E4ULL, 0xB0B6C36E43A74E9DULL, + 0x9A6C9329AC4BC9B5ULL, 0xE50263E19C7E40CCULL, + 0x64B172B9CC20DB47ULL, 0x1BDF8271FC15523EULL, + 0x530E765A340A7F3AULL, 0x2C608692043FF643ULL, + 0xADD397CA54616DC8ULL, 0xD2BD67026454E4B1ULL, + 0x3C707F9DC45F37C0ULL, 0x431E8F55F46ABEB9ULL, + 0xC2AD9E0DA4342532ULL, 0xBDC36EC59401AC4BULL, + 0xF5129AEE5C1E814FULL, 0x8A7C6A266C2B0836ULL, + 0x0BCF7B7E3C7593BDULL, 0x74A18BB60C401AC4ULL, + 0xE28C6C1224F5A634ULL, 0x9DE29CDA14C02F4DULL, + 0x1C518D82449EB4C6ULL, 0x633F7D4A74AB3DBFULL, + 0x2BEE8961BCB410BBULL, 0x548079A98C8199C2ULL, + 0xD53368F1DCDF0249ULL, 0xAA5D9839ECEA8B30ULL, + 0x449080A64CE15841ULL, 0x3BFE706E7CD4D138ULL, + 0xBA4D61362C8A4AB3ULL, 0xC52391FE1CBFC3CAULL, + 0x8DF265D5D4A0EECEULL, 0xF29C951DE49567B7ULL, + 0x732F8445B4CBFC3CULL, 0x0C41748D84FE7545ULL, + 0x6BAD6D5EBD3716B7ULL, 0x14C39D968D029FCEULL, + 0x95708CCEDD5C0445ULL, 0xEA1E7C06ED698D3CULL, + 0xA2CF882D2576A038ULL, 0xDDA178E515432941ULL, + 0x5C1269BD451DB2CAULL, 0x237C997575283BB3ULL, + 0xCDB181EAD523E8C2ULL, 0xB2DF7122E51661BBULL, + 0x336C607AB548FA30ULL, 0x4C0290B2857D7349ULL, + 0x04D364994D625E4DULL, 0x7BBD94517D57D734ULL, + 0xFA0E85092D094CBFULL, 0x856075C11D3CC5C6ULL, + 0x134D926535897936ULL, 0x6C2362AD05BCF04FULL, + 0xED9073F555E26BC4ULL, 0x92FE833D65D7E2BDULL, + 0xDA2F7716ADC8CFB9ULL, 0xA54187DE9DFD46C0ULL, + 0x24F29686CDA3DD4BULL, 0x5B9C664EFD965432ULL, + 0xB5517ED15D9D8743ULL, 0xCA3F8E196DA80E3AULL, + 0x4B8C9F413DF695B1ULL, 0x34E26F890DC31CC8ULL, + 0x7C339BA2C5DC31CCULL, 0x035D6B6AF5E9B8B5ULL, + 0x82EE7A32A5B7233EULL, 0xFD808AFA9582AA47ULL, + 0x4D364994D625E4DAULL, 0x3258B95CE6106DA3ULL, + 0xB3EBA804B64EF628ULL, 0xCC8558CC867B7F51ULL, + 0x8454ACE74E645255ULL, 0xFB3A5C2F7E51DB2CULL, + 0x7A894D772E0F40A7ULL, 0x05E7BDBF1E3AC9DEULL, + 0xEB2AA520BE311AAFULL, 0x944455E88E0493D6ULL, + 0x15F744B0DE5A085DULL, 0x6A99B478EE6F8124ULL, + 0x224840532670AC20ULL, 0x5D26B09B16452559ULL, + 0xDC95A1C3461BBED2ULL, 0xA3FB510B762E37ABULL, + 0x35D6B6AF5E9B8B5BULL, 0x4AB846676EAE0222ULL, + 0xCB0B573F3EF099A9ULL, 0xB465A7F70EC510D0ULL, + 0xFCB453DCC6DA3DD4ULL, 0x83DAA314F6EFB4ADULL, + 0x0269B24CA6B12F26ULL, 0x7D0742849684A65FULL, + 0x93CA5A1B368F752EULL, 0xECA4AAD306BAFC57ULL, + 0x6D17BB8B56E467DCULL, 0x12794B4366D1EEA5ULL, + 0x5AA8BF68AECEC3A1ULL, 0x25C64FA09EFB4AD8ULL, + 0xA4755EF8CEA5D153ULL, 0xDB1BAE30FE90582AULL, + 0xBCF7B7E3C7593BD8ULL, 0xC399472BF76CB2A1ULL, + 0x422A5673A732292AULL, 0x3D44A6BB9707A053ULL, + 0x759552905F188D57ULL, 0x0AFBA2586F2D042EULL, + 0x8B48B3003F739FA5ULL, 0xF42643C80F4616DCULL, + 0x1AEB5B57AF4DC5ADULL, 0x6585AB9F9F784CD4ULL, + 0xE436BAC7CF26D75FULL, 0x9B584A0FFF135E26ULL, + 0xD389BE24370C7322ULL, 0xACE74EEC0739FA5BULL, + 0x2D545FB4576761D0ULL, 0x523AAF7C6752E8A9ULL, + 0xC41748D84FE75459ULL, 0xBB79B8107FD2DD20ULL, + 0x3ACAA9482F8C46ABULL, 0x45A459801FB9CFD2ULL, + 0x0D75ADABD7A6E2D6ULL, 0x721B5D63E7936BAFULL, + 0xF3A84C3BB7CDF024ULL, 0x8CC6BCF387F8795DULL, + 0x620BA46C27F3AA2CULL, 0x1D6554A417C62355ULL, + 0x9CD645FC4798B8DEULL, 0xE3B8B53477AD31A7ULL, + 0xAB69411FBFB21CA3ULL, 0xD407B1D78F8795DAULL, + 0x55B4A08FDFD90E51ULL, 0x2ADA5047EFEC8728ULL, +}; + +static inline size_t nvme_pi_tuple_size(NvmeNamespace *ns) +{ + return ns->pif ? 16 : 8; +} + +uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, + uint64_t reftag); +uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, + uint64_t slba); +void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint16_t apptag, + uint64_t *reftag); +uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint8_t prinfo, + uint64_t slba, uint16_t apptag, + uint16_t appmask, uint64_t *reftag); +uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req); + +#endif /* HW_NVME_DIF_H */ diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index b7cf1494e7..62a1f97be0 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -29,7 +29,8 @@ void nvme_ns_init_format(NvmeNamespace *ns) { NvmeIdNs *id_ns = &ns->id_ns; BlockDriverInfo bdi; - int npdg, nlbas, ret; + int npdg, ret; + int64_t nlbas; ns->lbaf = id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)]; ns->lbasz = 1 << ns->lbaf.ds; @@ -42,7 +43,7 @@ void nvme_ns_init_format(NvmeNamespace *ns) id_ns->ncap = id_ns->nsze; id_ns->nuse = id_ns->ncap; - ns->moff = (int64_t)nlbas << ns->lbaf.ds; + ns->moff = nlbas << ns->lbaf.ds; npdg = ns->blkconf.discard_granularity / ns->lbasz; @@ -58,6 +59,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) { static uint64_t ns_count; NvmeIdNs *id_ns = &ns->id_ns; + NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm; uint8_t ds; uint16_t ms; int i; @@ -101,6 +103,8 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) id_ns->dps |= NVME_ID_NS_DPS_FIRST_EIGHT; } + ns->pif = ns->params.pif; + static const NvmeLBAF lbaf[16] = { [0] = { .ds = 9 }, [1] = { .ds = 9, .ms = 8 }, @@ -112,10 +116,11 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) [7] = { .ds = 12, .ms = 64 }, }; - memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); - id_ns->nlbaf = 7; + ns->nlbaf = 8; - for (i = 0; i <= id_ns->nlbaf; i++) { + memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); + + for (i = 0; i < ns->nlbaf; i++) { NvmeLBAF *lbaf = &id_ns->lbaf[i]; if (lbaf->ds == ds) { if (lbaf->ms == ms) { @@ -126,12 +131,16 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) } /* add non-standard lba format */ - id_ns->nlbaf++; - id_ns->lbaf[id_ns->nlbaf].ds = ds; - id_ns->lbaf[id_ns->nlbaf].ms = ms; - id_ns->flbas |= id_ns->nlbaf; + id_ns->lbaf[ns->nlbaf].ds = ds; + id_ns->lbaf[ns->nlbaf].ms = ms; + ns->nlbaf++; + + id_ns->flbas |= i; + lbaf_found: + id_ns_nvm->elbaf[i] = (ns->pif & 0x3) << 7; + id_ns->nlbaf = ns->nlbaf - 1; nvme_ns_init_format(ns); return 0; @@ -260,13 +269,14 @@ static void nvme_ns_init_zoned(NvmeNamespace *ns) nvme_ns_zoned_init_state(ns); - id_ns_z = g_malloc0(sizeof(NvmeIdNsZoned)); + id_ns_z = g_new0(NvmeIdNsZoned, 1); /* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */ id_ns_z->mar = cpu_to_le32(ns->params.max_active_zones - 1); id_ns_z->mor = cpu_to_le32(ns->params.max_open_zones - 1); id_ns_z->zoc = 0; - id_ns_z->ozcs = ns->params.cross_zone_read ? 0x01 : 0x00; + id_ns_z->ozcs = ns->params.cross_zone_read ? + NVME_ID_NS_ZONED_OZCS_RAZB : 0x00; for (i = 0; i <= ns->id_ns.nlbaf; i++) { id_ns_z->lbafe[i].zsze = cpu_to_le64(ns->zone_size); @@ -274,6 +284,23 @@ static void nvme_ns_init_zoned(NvmeNamespace *ns) ns->params.zd_extension_size >> 6; /* Units of 64B */ } + if (ns->params.zrwas) { + ns->zns.numzrwa = ns->params.numzrwa ? + ns->params.numzrwa : ns->num_zones; + + ns->zns.zrwas = ns->params.zrwas >> ns->lbaf.ds; + ns->zns.zrwafg = ns->params.zrwafg >> ns->lbaf.ds; + + id_ns_z->ozcs |= NVME_ID_NS_ZONED_OZCS_ZRWASUP; + id_ns_z->zrwacap = NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP; + + id_ns_z->numzrwa = cpu_to_le32(ns->params.numzrwa); + id_ns_z->zrwas = cpu_to_le16(ns->zns.zrwas); + id_ns_z->zrwafg = cpu_to_le16(ns->zns.zrwafg); + } + + id_ns_z->ozcs = cpu_to_le16(id_ns_z->ozcs); + ns->csi = NVME_CSI_ZONED; ns->id_ns.nsze = cpu_to_le64(ns->num_zones * ns->zone_size); ns->id_ns.ncap = ns->id_ns.nsze; @@ -314,6 +341,10 @@ static void nvme_clear_zone(NvmeNamespace *ns, NvmeZone *zone) QTAILQ_INSERT_HEAD(&ns->closed_zones, zone, entry); } else { trace_pci_nvme_clear_ns_reset(state, zone->d.zslba); + if (zone->d.za & NVME_ZA_ZRWA_VALID) { + zone->d.za &= ~NVME_ZA_ZRWA_VALID; + ns->zns.numzrwa++; + } nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY); } } @@ -348,15 +379,36 @@ static void nvme_zoned_ns_shutdown(NvmeNamespace *ns) static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) { + unsigned int pi_size; + if (!ns->blkconf.blk) { error_setg(errp, "block backend not configured"); return -1; } - if (ns->params.pi && ns->params.ms < 8) { - error_setg(errp, "at least 8 bytes of metadata required to enable " - "protection information"); - return -1; + if (ns->params.pi) { + if (ns->params.pi > NVME_ID_NS_DPS_TYPE_3) { + error_setg(errp, "invalid 'pi' value"); + return -1; + } + + switch (ns->params.pif) { + case NVME_PI_GUARD_16: + pi_size = 8; + break; + case NVME_PI_GUARD_64: + pi_size = 16; + break; + default: + error_setg(errp, "invalid 'pif'"); + return -1; + } + + if (ns->params.ms < pi_size) { + error_setg(errp, "at least %u bytes of metadata required to " + "enable protection information", pi_size); + return -1; + } } if (ns->params.nsid > NVME_MAX_NAMESPACES) { @@ -391,6 +443,40 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) return -1; } } + + if (ns->params.zrwas) { + if (ns->params.zrwas % ns->blkconf.logical_block_size) { + error_setg(errp, "zone random write area size (zoned.zrwas " + "%"PRIu64") must be a multiple of the logical " + "block size (logical_block_size %"PRIu32")", + ns->params.zrwas, ns->blkconf.logical_block_size); + return -1; + } + + if (ns->params.zrwafg == -1) { + ns->params.zrwafg = ns->blkconf.logical_block_size; + } + + if (ns->params.zrwas % ns->params.zrwafg) { + error_setg(errp, "zone random write area size (zoned.zrwas " + "%"PRIu64") must be a multiple of the zone random " + "write area flush granularity (zoned.zrwafg, " + "%"PRIu64")", ns->params.zrwas, ns->params.zrwafg); + return -1; + } + + if (ns->params.max_active_zones) { + if (ns->params.numzrwa > ns->params.max_active_zones) { + error_setg(errp, "number of zone random write area " + "resources (zoned.numzrwa, %d) must be less " + "than or equal to maximum active resources " + "(zoned.max_active_zones, %d)", + ns->params.numzrwa, + ns->params.max_active_zones); + return -1; + } + } + } } return 0; @@ -460,17 +546,13 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) int i; if (!n->subsys) { + /* If no subsys, the ns cannot be attached to more than one ctrl. */ + ns->params.shared = false; if (ns->params.detached) { error_setg(errp, "detached requires that the nvme device is " "linked to an nvme-subsys device"); return; } - - if (ns->params.shared) { - error_setg(errp, "shared requires that the nvme device is " - "linked to an nvme-subsys device"); - return; - } } else { /* * If this namespace belongs to a subsystem (through a link on the @@ -517,7 +599,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) { NvmeCtrl *ctrl = subsys->ctrls[i]; - if (ctrl) { + if (ctrl && ctrl != SUBSYS_SLOT_RSVD) { nvme_attach_ns(ctrl, ns); } } @@ -532,14 +614,15 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) static Property nvme_ns_props[] = { DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf), DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false), - DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, false), + DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true), DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0), - DEFINE_PROP_UUID("uuid", NvmeNamespace, params.uuid), + DEFINE_PROP_UUID_NODEFAULT("uuid", NvmeNamespace, params.uuid), DEFINE_PROP_UINT64("eui64", NvmeNamespace, params.eui64, 0), DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0), DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0), DEFINE_PROP_UINT8("pi", NvmeNamespace, params.pi, 0), DEFINE_PROP_UINT8("pil", NvmeNamespace, params.pil, 0), + DEFINE_PROP_UINT8("pif", NvmeNamespace, params.pif, 0), DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128), DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128), DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127), @@ -556,8 +639,11 @@ static Property nvme_ns_props[] = { params.max_open_zones, 0), DEFINE_PROP_UINT32("zoned.descr_ext_size", NvmeNamespace, params.zd_extension_size, 0), + DEFINE_PROP_UINT32("zoned.numzrwa", NvmeNamespace, params.numzrwa, 0), + DEFINE_PROP_SIZE("zoned.zrwas", NvmeNamespace, params.zrwas, 0), + DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1), DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, - true), + false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 83ffabade4..7adf042ec3 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -15,8 +15,8 @@ * This code is licensed under the GNU GPL v2 or later. */ -#ifndef HW_NVME_INTERNAL_H -#define HW_NVME_INTERNAL_H +#ifndef HW_NVME_NVME_H +#define HW_NVME_NVME_H #include "qemu/uuid.h" #include "hw/pci/pci.h" @@ -24,7 +24,7 @@ #include "block/nvme.h" -#define NVME_MAX_CONTROLLERS 32 +#define NVME_MAX_CONTROLLERS 256 #define NVME_MAX_NAMESPACES 256 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) @@ -43,11 +43,13 @@ typedef struct NvmeBus { #define TYPE_NVME_SUBSYS "nvme-subsys" #define NVME_SUBSYS(obj) \ OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) +#define SUBSYS_SLOT_RSVD (void *)0xFFFF typedef struct NvmeSubsystem { DeviceState parent_obj; NvmeBus bus; uint8_t subnqn[256]; + char *serial; NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; @@ -67,6 +69,10 @@ static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys, return NULL; } + if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) { + return NULL; + } + return subsys->ctrls[cntlid]; } @@ -102,6 +108,7 @@ typedef struct NvmeNamespaceParams { uint8_t mset; uint8_t pi; uint8_t pil; + uint8_t pif; uint16_t mssrl; uint32_t mcl; @@ -114,6 +121,10 @@ typedef struct NvmeNamespaceParams { uint32_t max_active_zones; uint32_t max_open_zones; uint32_t zd_extension_size; + + uint32_t numzrwa; + uint64_t zrwas; + uint64_t zrwafg; } NvmeNamespaceParams; typedef struct NvmeNamespace { @@ -123,12 +134,21 @@ typedef struct NvmeNamespace { int64_t size; int64_t moff; NvmeIdNs id_ns; + NvmeIdNsNvm id_ns_nvm; NvmeLBAF lbaf; + unsigned int nlbaf; size_t lbasz; const uint32_t *iocs; uint8_t csi; uint16_t status; int attached; + uint8_t pif; + + struct { + uint16_t zrwas; + uint16_t zrwafg; + uint32_t numzrwa; + } zns; QTAILQ_ENTRY(NvmeNamespace) entry; @@ -320,6 +340,8 @@ static inline const char *nvme_adm_opc_str(uint8_t opc) case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; + case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT"; + case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG"; case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; default: return "NVME_ADM_CMD_UNKNOWN"; } @@ -351,7 +373,11 @@ typedef struct NvmeSQueue { uint32_t tail; uint32_t size; uint64_t dma_addr; - QEMUTimer *timer; + uint64_t db_addr; + uint64_t ei_addr; + QEMUBH *bh; + EventNotifier notifier; + bool ioeventfd_enabled; NvmeRequest *io_req; QTAILQ_HEAD(, NvmeRequest) req_list; QTAILQ_HEAD(, NvmeRequest) out_req_list; @@ -368,7 +394,11 @@ typedef struct NvmeCQueue { uint32_t vector; uint32_t size; uint64_t dma_addr; - QEMUTimer *timer; + uint64_t db_addr; + uint64_t ei_addr; + QEMUBH *bh; + EventNotifier notifier; + bool ioeventfd_enabled; QTAILQ_HEAD(, NvmeSQueue) sq_list; QTAILQ_HEAD(, NvmeRequest) req_list; } NvmeCQueue; @@ -391,6 +421,12 @@ typedef struct NvmeParams { uint8_t zasl; bool auto_transition_zones; bool legacy_cmb; + bool ioeventfd; + uint8_t sriov_max_vfs; + uint16_t sriov_vq_flexible; + uint16_t sriov_vi_flexible; + uint8_t sriov_max_vq_per_vf; + uint8_t sriov_max_vi_per_vf; } NvmeParams; typedef struct NvmeCtrl { @@ -408,7 +444,6 @@ typedef struct NvmeCtrl { uint16_t max_prp_ents; uint16_t cqe_size; uint16_t sqe_size; - uint32_t reg_size; uint32_t max_q_ents; uint8_t outstanding_aers; uint32_t irq_status; @@ -418,6 +453,11 @@ typedef struct NvmeCtrl { uint64_t starttime_ms; uint16_t temperature; uint8_t smart_critical_warning; + uint32_t conf_msix_qsize; + uint32_t conf_ioqpairs; + uint64_t dbbuf_dbs; + uint64_t dbbuf_eis; + bool dbbuf_enabled; struct { MemoryRegion mem; @@ -458,10 +498,24 @@ typedef struct NvmeCtrl { uint16_t temp_thresh_hi; uint16_t temp_thresh_low; }; - uint32_t async_config; + + uint32_t async_config; + NvmeHostBehaviorSupport hbs; } features; + + NvmePriCtrlCap pri_ctrl_cap; + NvmeSecCtrlList sec_ctrl_list; + struct { + uint16_t vqrfap; + uint16_t virfap; + } next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */ } NvmeCtrl; +typedef enum NvmeResetType { + NVME_RESET_FUNCTION = 0, + NVME_RESET_CONTROLLER = 1, +} NvmeResetType; + static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid) { if (!nsid || nsid > NVME_MAX_NAMESPACES) { @@ -494,63 +548,40 @@ static inline uint16_t nvme_cid(NvmeRequest *req) return le16_to_cpu(req->cqe.cid); } +static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n) +{ + PCIDevice *pci_dev = &n->parent_obj; + NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev)); + + if (pci_is_vf(pci_dev)) { + return &pf->sec_ctrl_list.sec[pcie_sriov_vf_number(pci_dev)]; + } + + return NULL; +} + +static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n, + uint16_t cntlid) +{ + NvmeSecCtrlList *list = &n->sec_ctrl_list; + uint8_t i; + + for (i = 0; i < list->numcntl; i++) { + if (le16_to_cpu(list->sec[i].scid) == cntlid) { + return &list->sec[i]; + } + } + + return NULL; +} + void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns); -uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, NvmeTxDirection dir, NvmeRequest *req); -uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, +uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, NvmeTxDirection dir, NvmeRequest *req); void nvme_rw_complete_cb(void *opaque, int ret); uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, NvmeCmd *cmd); -/* from Linux kernel (crypto/crct10dif_common.c) */ -static const uint16_t t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; - -uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, - uint32_t reftag); -uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, - uint64_t slba); -void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t apptag, - uint32_t *reftag); -uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint8_t prinfo, - uint64_t slba, uint16_t apptag, - uint16_t appmask, uint32_t *reftag); -uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req); - - -#endif /* HW_NVME_INTERNAL_H */ +#endif /* HW_NVME_NVME_H */ diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c index 93c35950d6..9d2643678b 100644 --- a/hw/nvme/subsys.c +++ b/hw/nvme/subsys.c @@ -11,30 +11,102 @@ #include "nvme.h" -int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) +static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num) { NvmeSubsystem *subsys = n->subsys; - int cntlid; + NvmeSecCtrlList *list = &n->sec_ctrl_list; + NvmeSecCtrlEntry *sctrl; + int i, cnt = 0; - for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) { - if (!subsys->ctrls[cntlid]) { - break; + for (i = start; i < ARRAY_SIZE(subsys->ctrls) && cnt < num; i++) { + if (!subsys->ctrls[i]) { + sctrl = &list->sec[cnt]; + sctrl->scid = cpu_to_le16(i); + subsys->ctrls[i] = SUBSYS_SLOT_RSVD; + cnt++; } } - if (cntlid == ARRAY_SIZE(subsys->ctrls)) { - error_setg(errp, "no more free controller id"); + return cnt; +} + +static void nvme_subsys_unreserve_cntlids(NvmeCtrl *n) +{ + NvmeSubsystem *subsys = n->subsys; + NvmeSecCtrlList *list = &n->sec_ctrl_list; + NvmeSecCtrlEntry *sctrl; + int i, cntlid; + + for (i = 0; i < n->params.sriov_max_vfs; i++) { + sctrl = &list->sec[i]; + cntlid = le16_to_cpu(sctrl->scid); + + if (cntlid) { + assert(subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD); + subsys->ctrls[cntlid] = NULL; + sctrl->scid = 0; + } + } +} + +int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) +{ + NvmeSubsystem *subsys = n->subsys; + NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); + int cntlid, nsid, num_rsvd, num_vfs = n->params.sriov_max_vfs; + + if (pci_is_vf(&n->parent_obj)) { + cntlid = le16_to_cpu(sctrl->scid); + } else { + for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) { + if (!subsys->ctrls[cntlid]) { + break; + } + } + + if (cntlid == ARRAY_SIZE(subsys->ctrls)) { + error_setg(errp, "no more free controller id"); + return -1; + } + + num_rsvd = nvme_subsys_reserve_cntlids(n, cntlid + 1, num_vfs); + if (num_rsvd != num_vfs) { + nvme_subsys_unreserve_cntlids(n); + error_setg(errp, + "no more free controller ids for secondary controllers"); + return -1; + } + } + + if (!subsys->serial) { + subsys->serial = g_strdup(n->params.serial); + } else if (strcmp(subsys->serial, n->params.serial)) { + error_setg(errp, "invalid controller serial"); return -1; } subsys->ctrls[cntlid] = n; + for (nsid = 1; nsid < ARRAY_SIZE(subsys->namespaces); nsid++) { + NvmeNamespace *ns = subsys->namespaces[nsid]; + if (ns && ns->params.shared && !ns->params.detached) { + nvme_attach_ns(n, ns); + } + } + return cntlid; } void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n) { - subsys->ctrls[n->cntlid] = NULL; + if (pci_is_vf(&n->parent_obj)) { + subsys->ctrls[n->cntlid] = SUBSYS_SLOT_RSVD; + } else { + subsys->ctrls[n->cntlid] = NULL; + nvme_subsys_unreserve_cntlids(n); + } + + n->cntlid = -1; } static void nvme_subsys_setup(NvmeSubsystem *subsys) @@ -50,8 +122,7 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp) { NvmeSubsystem *subsys = NVME_SUBSYS(dev); - qbus_create_inplace(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, - dev->id); + qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); nvme_subsys_setup(subsys); } diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index 430eeb395b..fccb79f489 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -3,6 +3,7 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" pci_nvme_irq_pin(void) "pulsing IRQ pin" pci_nvme_irq_masked(void) "IRQ is masked" pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" +pci_nvme_dbbuf_config(uint64_t dbs_addr, uint64_t eis_addr) "dbs_addr=0x%"PRIx64" eis_addr=0x%"PRIx64"" pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d" @@ -20,12 +21,16 @@ pci_nvme_dif_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" pci_nvme_dif_rw_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" pci_nvme_dif_rw_mdata_out_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" pci_nvme_dif_rw_check_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_dif_pract_generate_dif(size_t len, size_t lba_size, size_t chksum_len, uint16_t apptag, uint32_t reftag) "len %zu lba_size %zu chksum_len %zu apptag 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_pract_generate_dif_crc16(size_t len, size_t lba_size, size_t chksum_len, uint16_t apptag, uint32_t reftag) "len %zu lba_size %zu chksum_len %zu apptag 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_pract_generate_dif_crc64(size_t len, size_t lba_size, size_t chksum_len, uint16_t apptag, uint64_t reftag) "len %zu lba_size %zu chksum_len %zu apptag 0x%"PRIx16" reftag 0x%"PRIx64"" pci_nvme_dif_check(uint8_t prinfo, uint16_t chksum_len) "prinfo 0x%"PRIx8" chksum_len %"PRIu16"" -pci_nvme_dif_prchk_disabled(uint16_t apptag, uint32_t reftag) "apptag 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_dif_prchk_guard(uint16_t guard, uint16_t crc) "guard 0x%"PRIx16" crc 0x%"PRIx16"" +pci_nvme_dif_prchk_disabled_crc16(uint16_t apptag, uint32_t reftag) "apptag 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_prchk_disabled_crc64(uint16_t apptag, uint64_t reftag) "apptag 0x%"PRIx16" reftag 0x%"PRIx64"" +pci_nvme_dif_prchk_guard_crc16(uint16_t guard, uint16_t crc) "guard 0x%"PRIx16" crc 0x%"PRIx16"" +pci_nvme_dif_prchk_guard_crc64(uint64_t guard, uint64_t crc) "guard 0x%"PRIx64" crc 0x%"PRIx64"" pci_nvme_dif_prchk_apptag(uint16_t apptag, uint16_t elbat, uint16_t elbatm) "apptag 0x%"PRIx16" elbat 0x%"PRIx16" elbatm 0x%"PRIx16"" -pci_nvme_dif_prchk_reftag(uint32_t reftag, uint32_t elbrt) "reftag 0x%"PRIx32" elbrt 0x%"PRIx32"" +pci_nvme_dif_prchk_reftag_crc16(uint32_t reftag, uint32_t elbrt) "reftag 0x%"PRIx32" elbrt 0x%"PRIx32"" +pci_nvme_dif_prchk_reftag_crc64(uint64_t reftag, uint64_t elbrt) "reftag 0x%"PRIx64" elbrt 0x%"PRIx64"" pci_nvme_copy(uint16_t cid, uint32_t nsid, uint16_t nr, uint8_t format) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu16" format 0x%"PRIx8"" pci_nvme_copy_source_range(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" pci_nvme_copy_out(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" @@ -52,6 +57,8 @@ pci_nvme_identify_ctrl(void) "identify controller" pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8"" pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32"" pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16"" +pci_nvme_identify_pri_ctrl_cap(uint16_t cntlid) "identify primary controller capabilities cntlid=%"PRIu16"" +pci_nvme_identify_sec_ctrl_list(uint16_t cntlid, uint8_t numcntl) "identify secondary controller list cntlid=%"PRIu16" numcntl=%"PRIu8"" pci_nvme_identify_ns_csi(uint32_t ns, uint8_t csi) "nsid=%"PRIu32", csi=0x%"PRIx8"" pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32"" pci_nvme_identify_nslist_csi(uint16_t ns, uint8_t csi) "nsid=%"PRIu16", csi=0x%"PRIx8"" @@ -77,6 +84,8 @@ pci_nvme_enqueue_event_noqueue(int queued) "queued %d" pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8"" pci_nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs" pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint32_t dw0, uint32_t dw1, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" dw0 0x%"PRIx32" dw1 0x%"PRIx32" status 0x%"PRIx16"" +pci_nvme_eventidx_cq(uint16_t cqid, uint16_t new_eventidx) "cqid %"PRIu16" new_eventidx %"PRIu16"" +pci_nvme_eventidx_sq(uint16_t sqid, uint16_t new_eventidx) "sqid %"PRIu16" new_eventidx %"PRIu16"" pci_nvme_mmio_read(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d" pci_nvme_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d" pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16"" @@ -93,6 +102,8 @@ pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded" pci_nvme_mmio_stopped(void) "cleared controller enable bit" pci_nvme_mmio_shutdown_set(void) "shutdown bit set" pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared" +pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16"" +pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16"" pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" @@ -103,6 +114,9 @@ pci_nvme_set_descriptor_extension(uint64_t slba, uint32_t zone_idx) "set zone de pci_nvme_zd_extension_set(uint32_t zone_idx) "set descriptor extension for zone_idx=%"PRIu32"" pci_nvme_clear_ns_close(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Closed state" pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Empty state" +pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32"" +pci_nvme_pci_reset(void) "PCI Function Level Reset" +pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16"" # error conditions pci_nvme_err_mdts(size_t len) "len %zu" @@ -159,8 +173,6 @@ pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16"" pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues" pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues" -pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null" -pci_nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null" pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64"" pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64"" pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u" @@ -174,7 +186,9 @@ pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32"" pci_nvme_err_startfail(void) "setting controller enable bit failed" +pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi, const char *state) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u %s" pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8"" +pci_nvme_err_ignored_mmio_vf_offline(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d" # undefined behavior pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64"" diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig index e872fcb194..24cfc18f8b 100644 --- a/hw/nvram/Kconfig +++ b/hw/nvram/Kconfig @@ -15,3 +15,22 @@ config NMC93XX_EEPROM config CHRP_NVRAM bool + +config XLNX_EFUSE_CRC + bool + +config XLNX_EFUSE + bool + select XLNX_EFUSE_CRC + +config XLNX_EFUSE_VERSAL + bool + select XLNX_EFUSE + +config XLNX_EFUSE_ZYNQMP + bool + select XLNX_EFUSE + +config XLNX_BBRAM + bool + select XLNX_EFUSE_CRC diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index a1b9c78844..1081e2cc0d 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -315,7 +315,7 @@ eeprom_t *eeprom93xx_new(DeviceState *dev, uint16_t nwords) addrbits = 6; } - eeprom = (eeprom_t *)g_malloc0(sizeof(*eeprom) + nwords * 2); + eeprom = g_malloc0(sizeof(*eeprom) + nwords * 2); eeprom->size = nwords; eeprom->addrbits = addrbits; /* Output DO is tristate, read results in 1. */ diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c index af6f5dbb99..2d4d8b952f 100644 --- a/hw/nvram/eeprom_at24c.c +++ b/hw/nvram/eeprom_at24c.c @@ -54,17 +54,18 @@ struct EEPROMState { static int at24c_eeprom_event(I2CSlave *s, enum i2c_event event) { - EEPROMState *ee = container_of(s, EEPROMState, parent_obj); + EEPROMState *ee = AT24C_EE(s); switch (event) { case I2C_START_SEND: - case I2C_START_RECV: case I2C_FINISH: ee->haveaddr = 0; + /* fallthrough */ + case I2C_START_RECV: DPRINTK("clear\n"); if (ee->blk && ee->changed) { - int len = blk_pwrite(ee->blk, 0, ee->mem, ee->rsize, 0); - if (len != ee->rsize) { + int ret = blk_pwrite(ee->blk, 0, ee->rsize, ee->mem, 0); + if (ret < 0) { ERR(TYPE_AT24C_EE " : failed to write backing file\n"); } @@ -74,6 +75,8 @@ int at24c_eeprom_event(I2CSlave *s, enum i2c_event event) break; case I2C_NACK: break; + default: + return -1; } return 0; } @@ -84,6 +87,10 @@ uint8_t at24c_eeprom_recv(I2CSlave *s) EEPROMState *ee = AT24C_EE(s); uint8_t ret; + if (ee->haveaddr == 1) { + return 0xff; + } + ret = ee->mem[ee->cur]; ee->cur = (ee->cur + 1u) % ee->rsize; @@ -158,9 +165,9 @@ void at24c_eeprom_reset(DeviceState *state) memset(ee->mem, 0, ee->rsize); if (ee->blk) { - int len = blk_pread(ee->blk, 0, ee->mem, ee->rsize); + int ret = blk_pread(ee->blk, 0, ee->rsize, ee->mem, 0); - if (len != ee->rsize) { + if (ret < 0) { ERR(TYPE_AT24C_EE " : Failed initial sync with backing file\n"); } diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 9b8dcca4ea..371a45dfe2 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "sysemu/sysemu.h" #include "sysemu/dma.h" @@ -42,6 +41,7 @@ #include "qapi/error.h" #include "hw/acpi/aml-build.h" #include "hw/pci/pci_bus.h" +#include "hw/loader.h" #define FW_CFG_FILE_SLOTS_DFLT 0x20 @@ -179,21 +179,13 @@ error: static void fw_cfg_bootsplash(FWCfgState *s) { - const char *boot_splash_filename = NULL; - const char *boot_splash_time = NULL; char *filename, *file_data; gsize file_size; int file_type; - /* get user configuration */ - QemuOptsList *plist = qemu_find_opts("boot-opts"); - QemuOpts *opts = QTAILQ_FIRST(&plist->head); - boot_splash_filename = qemu_opt_get(opts, "splash"); - boot_splash_time = qemu_opt_get(opts, "splash-time"); - /* insert splash time if user configurated */ - if (boot_splash_time) { - int64_t bst_val = qemu_opt_get_number(opts, "splash-time", -1); + if (current_machine->boot_config.has_splash_time) { + int64_t bst_val = current_machine->boot_config.splash_time; uint16_t bst_le16; /* validate the input */ @@ -209,7 +201,8 @@ static void fw_cfg_bootsplash(FWCfgState *s) } /* insert splash file if user configurated */ - if (boot_splash_filename) { + if (current_machine->boot_config.has_splash) { + const char *boot_splash_filename = current_machine->boot_config.splash; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename); if (filename == NULL) { error_report("failed to find file '%s'", boot_splash_filename); @@ -239,17 +232,11 @@ static void fw_cfg_bootsplash(FWCfgState *s) static void fw_cfg_reboot(FWCfgState *s) { - const char *reboot_timeout = NULL; uint64_t rt_val = -1; uint32_t rt_le32; - /* get user configuration */ - QemuOptsList *plist = qemu_find_opts("boot-opts"); - QemuOpts *opts = QTAILQ_FIRST(&plist->head); - reboot_timeout = qemu_opt_get(opts, "reboot-timeout"); - - if (reboot_timeout) { - rt_val = qemu_opt_get_number(opts, "reboot-timeout", -1); + if (current_machine->boot_config.has_reboot_timeout) { + rt_val = current_machine->boot_config.reboot_timeout; /* validate the input */ if (rt_val > 0xffff && rt_val != (uint64_t)-1) { @@ -357,9 +344,10 @@ static void fw_cfg_dma_transfer(FWCfgState *s) dma_addr = s->dma_addr; s->dma_addr = 0; - if (dma_memory_read(s->dma_as, dma_addr, &dma, sizeof(dma))) { + if (dma_memory_read(s->dma_as, dma_addr, + &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) { stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), - FW_CFG_DMA_CTL_ERROR); + FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED); return; } @@ -399,7 +387,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) * tested before. */ if (read) { - if (dma_memory_set(s->dma_as, dma.address, 0, len)) { + if (dma_memory_set(s->dma_as, dma.address, 0, len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } } @@ -418,7 +407,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) */ if (read) { if (dma_memory_write(s->dma_as, dma.address, - &e->data[s->cur_offset], len)) { + &e->data[s->cur_offset], len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } } @@ -426,7 +416,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) if (!e->allow_write || len != dma.length || dma_memory_read(s->dma_as, dma.address, - &e->data[s->cur_offset], len)) { + &e->data[s->cur_offset], len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } else if (e->write_cb) { e->write_cb(e->callback_opaque, s->cur_offset, len); @@ -442,7 +433,7 @@ static void fw_cfg_dma_transfer(FWCfgState *s) } stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), - dma.control); + dma.control, MEMTXATTRS_UNSPECIFIED); trace_fw_cfg_read(s, 0); } @@ -618,9 +609,9 @@ static bool fw_cfg_acpi_mr_restore(void *opaque) FWCfgState *s = opaque; bool mr_aligned; - mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size) && - QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size) && - QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size); + mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) && + QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) && + QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size()); return s->acpi_mr_restore && !mr_aligned; } @@ -878,6 +869,7 @@ static struct { { "etc/tpm/log", 150 }, { "etc/acpi/rsdp", 160 }, { "bootorder", 170 }, + { "etc/msr_feature_control", 180 }, #define FW_CFG_ORDER_OVERRIDE_LAST 200 }; @@ -1129,7 +1121,7 @@ static void fw_cfg_common_realize(DeviceState *dev, Error **errp) fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4); fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16); fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics); - fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)boot_menu); + fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)(machine->boot_config.has_menu && machine->boot_config.menu)); fw_cfg_bootsplash(s); fw_cfg_reboot(s); @@ -1230,6 +1222,37 @@ FWCfgState *fw_cfg_find(void) return FW_CFG(object_resolve_path_type("", TYPE_FW_CFG, NULL)); } +void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, + uint16_t data_key, const char *image_name, + bool try_decompress) +{ + size_t size = -1; + uint8_t *data; + + if (image_name == NULL) { + return; + } + + if (try_decompress) { + size = load_image_gzipped_buffer(image_name, + LOAD_IMAGE_MAX_GUNZIP_BYTES, &data); + } + + if (size == (size_t)-1) { + gchar *contents; + gsize length; + + if (!g_file_get_contents(image_name, &contents, &length, NULL)) { + error_report("failed to load \"%s\"", image_name); + exit(1); + } + size = length; + data = (uint8_t *)contents; + } + + fw_cfg_add_i32(fw_cfg, size_key, size); + fw_cfg_add_bytes(fw_cfg, data_key, data, size); +} static void fw_cfg_class_init(ObjectClass *klass, void *data) { diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c index 11f2d31cdb..3d9ddda217 100644 --- a/hw/nvram/mac_nvram.c +++ b/hw/nvram/mac_nvram.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "hw/nvram/chrp_nvram.h" -#include "hw/ppc/mac.h" +#include "hw/nvram/mac_nvram.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/cutils.h" diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index fd2951a860..f5ee9f6b88 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -1,5 +1,7 @@ -# QOM interfaces must be available anytime QOM is used. -qom_ss.add(files('fw_cfg-interface.c')) +if have_system or have_tools + # QOM interfaces must be available anytime QOM is used. + qom_ss.add(files('fw_cfg-interface.c')) +endif softmmu_ss.add(files('fw_cfg.c')) softmmu_ss.add(when: 'CONFIG_CHRP_NVRAM', if_true: files('chrp_nvram.c')) @@ -9,5 +11,13 @@ softmmu_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c')) softmmu_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c')) softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c')) softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files( + 'xlnx-versal-efuse-cache.c', + 'xlnx-versal-efuse-ctrl.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( + 'xlnx-zynqmp-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c index fbfdf47e26..2d72f30442 100644 --- a/hw/nvram/spapr_nvram.c +++ b/hw/nvram/spapr_nvram.c @@ -103,7 +103,7 @@ static void rtas_nvram_store(PowerPCCPU *cpu, SpaprMachineState *spapr, { SpaprNvram *nvram = spapr->nvram; hwaddr offset, buffer, len; - int alen; + int ret; void *membuf; if ((nargs != 3) || (nret != 2)) { @@ -128,9 +128,9 @@ static void rtas_nvram_store(PowerPCCPU *cpu, SpaprMachineState *spapr, membuf = cpu_physical_memory_map(buffer, &len, false); - alen = len; + ret = 0; if (nvram->blk) { - alen = blk_pwrite(nvram->blk, offset, membuf, len, 0); + ret = blk_pwrite(nvram->blk, offset, len, membuf, 0); } assert(nvram->buf); @@ -138,8 +138,8 @@ static void rtas_nvram_store(PowerPCCPU *cpu, SpaprMachineState *spapr, cpu_physical_memory_unmap(membuf, len, 0, len); - rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS); - rtas_st(rets, 1, (alen < 0) ? 0 : alen); + rtas_st(rets, 0, (ret < 0) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS); + rtas_st(rets, 1, (ret < 0) ? 0 : len); } static void spapr_nvram_realize(SpaprVioDevice *dev, Error **errp) @@ -179,9 +179,9 @@ static void spapr_nvram_realize(SpaprVioDevice *dev, Error **errp) } if (nvram->blk) { - int alen = blk_pread(nvram->blk, 0, nvram->buf, nvram->size); + ret = blk_pread(nvram->blk, 0, nvram->size, nvram->buf, 0); - if (alen != nvram->size) { + if (ret < 0) { error_setg(errp, "can't read spapr-nvram contents"); return; } @@ -219,12 +219,12 @@ static void postload_update_cb(void *opaque, bool running, RunState state) { SpaprNvram *nvram = opaque; - /* This is called after bdrv_invalidate_cache_all. */ + /* This is called after bdrv_activate_all. */ qemu_del_vm_change_state_handler(nvram->vmstate); nvram->vmstate = NULL; - blk_pwrite(nvram->blk, 0, nvram->buf, nvram->size, 0); + blk_pwrite(nvram->blk, 0, nvram->size, nvram->buf, 0); } static int spapr_nvram_post_load(void *opaque, int version_id) diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c new file mode 100644 index 0000000000..c6b484cc85 --- /dev/null +++ b/hw/nvram/xlnx-bbram.c @@ -0,0 +1,545 @@ +/* + * QEMU model of the Xilinx BBRAM Battery Backed RAM + * + * Copyright (c) 2014-2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-bbram.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/nvram/xlnx-efuse.h" + +#ifndef XLNX_BBRAM_ERR_DEBUG +#define XLNX_BBRAM_ERR_DEBUG 0 +#endif + +REG32(BBRAM_STATUS, 0x0) + FIELD(BBRAM_STATUS, AES_CRC_PASS, 9, 1) + FIELD(BBRAM_STATUS, AES_CRC_DONE, 8, 1) + FIELD(BBRAM_STATUS, BBRAM_ZEROIZED, 4, 1) + FIELD(BBRAM_STATUS, PGM_MODE, 0, 1) +REG32(BBRAM_CTRL, 0x4) + FIELD(BBRAM_CTRL, ZEROIZE, 0, 1) +REG32(PGM_MODE, 0x8) +REG32(BBRAM_AES_CRC, 0xc) +REG32(BBRAM_0, 0x10) +REG32(BBRAM_1, 0x14) +REG32(BBRAM_2, 0x18) +REG32(BBRAM_3, 0x1c) +REG32(BBRAM_4, 0x20) +REG32(BBRAM_5, 0x24) +REG32(BBRAM_6, 0x28) +REG32(BBRAM_7, 0x2c) +REG32(BBRAM_8, 0x30) +REG32(BBRAM_SLVERR, 0x34) + FIELD(BBRAM_SLVERR, ENABLE, 0, 1) +REG32(BBRAM_ISR, 0x38) + FIELD(BBRAM_ISR, APB_SLVERR, 0, 1) +REG32(BBRAM_IMR, 0x3c) + FIELD(BBRAM_IMR, APB_SLVERR, 0, 1) +REG32(BBRAM_IER, 0x40) + FIELD(BBRAM_IER, APB_SLVERR, 0, 1) +REG32(BBRAM_IDR, 0x44) + FIELD(BBRAM_IDR, APB_SLVERR, 0, 1) +REG32(BBRAM_MSW_LOCK, 0x4c) + FIELD(BBRAM_MSW_LOCK, VAL, 0, 1) + +#define R_MAX (R_BBRAM_MSW_LOCK + 1) + +#define RAM_MAX (A_BBRAM_8 + 4 - A_BBRAM_0) + +#define BBRAM_PGM_MAGIC 0x757bdf0d + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxBBRam *)0)->regs)); + +static bool bbram_msw_locked(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_MSW_LOCK, VAL) != 0; +} + +static bool bbram_pgm_enabled(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_STATUS, PGM_MODE) != 0; +} + +static void bbram_bdrv_error(XlnxBBRam *s, int rc, gchar *detail) +{ + Error *errp = NULL; + + error_setg_errno(&errp, -rc, "%s: BBRAM backstore %s failed.", + blk_name(s->blk), detail); + error_report("%s", error_get_pretty(errp)); + error_free(errp); + + g_free(detail); +} + +static void bbram_bdrv_read(XlnxBBRam *s, Error **errp) +{ + uint32_t *ram = &s->regs[R_BBRAM_0]; + int nr = RAM_MAX; + + if (!s->blk) { + return; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only BBRAM backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, nr, ram, 0) < 0) { + error_setg(errp, + "%s: Failed to read %u bytes from BBRAM backstore.", + blk_name(s->blk), nr); + return; + } + + /* Convert from little-endian backstore for each 32-bit word */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } +} + +static void bbram_bdrv_sync(XlnxBBRam *s, uint64_t hwaddr) +{ + uint32_t le32; + unsigned offset; + int rc; + + assert(A_BBRAM_0 <= hwaddr && hwaddr <= A_BBRAM_8); + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(s->regs[hwaddr / 4]); + + /* Update zeroized flag */ + if (le32 && (hwaddr != A_BBRAM_8 || s->bbram8_wo)) { + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 0); + } + + if (!s->blk || s->blk_ro) { + return; + } + + offset = hwaddr - A_BBRAM_0; + rc = blk_pwrite(s->blk, offset, 4, &le32, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup_printf("write to offset %u", offset)); + } +} + +static void bbram_bdrv_zero(XlnxBBRam *s) +{ + int rc; + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 1); + + if (!s->blk || s->blk_ro) { + return; + } + + rc = blk_make_zero(s->blk, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup("zeroizing")); + } + + /* Restore bbram8 if it is non-zero */ + if (s->regs[R_BBRAM_8]) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static void bbram_zeroize(XlnxBBRam *s) +{ + int nr = RAM_MAX - (s->bbram8_wo ? 0 : 4); /* only wo bbram8 is cleared */ + + memset(&s->regs[R_BBRAM_0], 0, nr); + bbram_bdrv_zero(s); +} + +static void bbram_update_irq(XlnxBBRam *s) +{ + bool pending = s->regs[R_BBRAM_ISR] & ~s->regs[R_BBRAM_IMR]; + + qemu_set_irq(s->irq_bbram, pending); +} + +static void bbram_ctrl_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val & R_BBRAM_CTRL_ZEROIZE_MASK) { + bbram_zeroize(s); + /* The bit is self clearing */ + s->regs[R_BBRAM_CTRL] &= ~R_BBRAM_CTRL_ZEROIZE_MASK; + } +} + +static void bbram_pgm_mode_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val == BBRAM_PGM_MAGIC) { + bbram_zeroize(s); + + /* The status bit is cleared only by POR */ + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, PGM_MODE, 1); + } +} + +static void bbram_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t calc_crc; + + if (!bbram_pgm_enabled(s)) { + /* We are not in programming mode, don't do anything */ + return; + } + + /* Perform the AES integrity check */ + s->regs[R_BBRAM_STATUS] |= R_BBRAM_STATUS_AES_CRC_DONE_MASK; + + /* + * Set check status. + * + * ZynqMP BBRAM check has a zero-u32 prepended; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_bbramps_zynqmp.c#L311 + */ + calc_crc = xlnx_efuse_calc_crc(&s->regs[R_BBRAM_0], + (R_BBRAM_8 - R_BBRAM_0), s->crc_zpads); + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, AES_CRC_PASS, + (s->regs[R_BBRAM_AES_CRC] == calc_crc)); +} + +static uint64_t bbram_key_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t original_data = *(uint32_t *) reg->data; + + if (bbram_pgm_enabled(s)) { + return val64; + } else { + /* We are not in programming mode, don't do anything */ + qemu_log_mask(LOG_GUEST_ERROR, + "Not in programming mode, dropping the write\n"); + return original_data; + } +} + +static void bbram_key_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_bdrv_sync(s, reg->access->addr); +} + +static uint64_t bbram_wo_postr(RegisterInfo *reg, uint64_t val) +{ + return 0; +} + +static uint64_t bbram_r8_postr(RegisterInfo *reg, uint64_t val) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + return s->bbram8_wo ? bbram_wo_postr(reg, val) : val; +} + +static bool bbram_r8_readonly(XlnxBBRam *s) +{ + return !bbram_pgm_enabled(s) || bbram_msw_locked(s); +} + +static uint64_t bbram_r8_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (bbram_r8_readonly(s)) { + val64 = *(uint32_t *)reg->data; + } + + return val64; +} + +static void bbram_r8_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (!bbram_r8_readonly(s)) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static uint64_t bbram_msw_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + /* Never lock if bbram8 is wo; and, only POR can clear the lock */ + if (s->bbram8_wo) { + val64 = 0; + } else { + val64 |= s->regs[R_BBRAM_MSW_LOCK]; + } + + return val64; +} + +static void bbram_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_update_irq(s); +} + +static uint64_t bbram_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] &= ~val; + bbram_update_irq(s); + return 0; +} + +static uint64_t bbram_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] |= val; + bbram_update_irq(s); + return 0; +} + +static RegisterAccessInfo bbram_ctrl_regs_info[] = { + { .name = "BBRAM_STATUS", .addr = A_BBRAM_STATUS, + .rsvd = 0xee, + .ro = 0x3ff, + },{ .name = "BBRAM_CTRL", .addr = A_BBRAM_CTRL, + .post_write = bbram_ctrl_postw, + },{ .name = "PGM_MODE", .addr = A_PGM_MODE, + .post_write = bbram_pgm_mode_postw, + },{ .name = "BBRAM_AES_CRC", .addr = A_BBRAM_AES_CRC, + .post_write = bbram_aes_crc_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_0", .addr = A_BBRAM_0, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_1", .addr = A_BBRAM_1, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_2", .addr = A_BBRAM_2, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_3", .addr = A_BBRAM_3, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_4", .addr = A_BBRAM_4, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_5", .addr = A_BBRAM_5, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_6", .addr = A_BBRAM_6, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_7", .addr = A_BBRAM_7, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_8", .addr = A_BBRAM_8, + .pre_write = bbram_r8_prew, + .post_write = bbram_r8_postw, + .post_read = bbram_r8_postr, + },{ .name = "BBRAM_SLVERR", .addr = A_BBRAM_SLVERR, + .rsvd = ~1, + },{ .name = "BBRAM_ISR", .addr = A_BBRAM_ISR, + .w1c = 0x1, + .post_write = bbram_isr_postw, + },{ .name = "BBRAM_IMR", .addr = A_BBRAM_IMR, + .ro = 0x1, + },{ .name = "BBRAM_IER", .addr = A_BBRAM_IER, + .pre_write = bbram_ier_prew, + },{ .name = "BBRAM_IDR", .addr = A_BBRAM_IDR, + .pre_write = bbram_idr_prew, + },{ .name = "BBRAM_MSW_LOCK", .addr = A_BBRAM_MSW_LOCK, + .pre_write = bbram_msw_lock_prew, + .ro = ~R_BBRAM_MSW_LOCK_VAL_MASK, + } +}; + +static void bbram_ctrl_reset(DeviceState *dev) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + if (i < R_BBRAM_0 || i > R_BBRAM_8) { + register_reset(&s->regs_info[i]); + } + } + + bbram_update_irq(s); +} + +static const MemoryRegionOps bbram_ctrl_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bbram_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + + if (s->crc_zpads) { + s->bbram8_wo = true; + } + + bbram_bdrv_read(s, errp); +} + +static void bbram_ctrl_init(Object *obj) +{ + XlnxBBRam *s = XLNX_BBRAM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), bbram_ctrl_regs_info, + ARRAY_SIZE(bbram_ctrl_regs_info), + s->regs_info, s->regs, + &bbram_ctrl_ops, + XLNX_BBRAM_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_bbram); +} + +static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + bbram_bdrv_read(XLNX_BBRAM(obj), errp); + } +} + +static void bbram_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void bbram_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo bbram_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as BBRAM backend", + .realized_set_allowed = true, + .get = bbram_prop_get_drive, + .set = bbram_prop_set_drive, + .release = bbram_prop_release_drive, +}; + +static const VMStateDescription vmstate_bbram_ctrl = { + .name = TYPE_XLNX_BBRAM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property bbram_ctrl_props[] = { + DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *), + DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bbram_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bbram_ctrl_reset; + dc->realize = bbram_ctrl_realize; + dc->vmsd = &vmstate_bbram_ctrl; + device_class_set_props(dc, bbram_ctrl_props); +} + +static const TypeInfo bbram_ctrl_info = { + .name = TYPE_XLNX_BBRAM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxBBRam), + .class_init = bbram_ctrl_class_init, + .instance_init = bbram_ctrl_init, +}; + +static void bbram_ctrl_register_types(void) +{ + type_register_static(&bbram_ctrl_info); +} + +type_init(bbram_ctrl_register_types) diff --git a/hw/nvram/xlnx-efuse-crc.c b/hw/nvram/xlnx-efuse-crc.c new file mode 100644 index 0000000000..5a5cc13f39 --- /dev/null +++ b/hw/nvram/xlnx-efuse-crc.c @@ -0,0 +1,119 @@ +/* + * Xilinx eFuse/bbram CRC calculator + * + * Copyright (c) 2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +static uint32_t xlnx_efuse_u37_crc(uint32_t prev_crc, uint32_t data, + uint32_t addr) +{ + /* A table for 7-bit slicing */ + static const uint32_t crc_tab[128] = { + 0x00000000, 0xe13b70f7, 0xc79a971f, 0x26a1e7e8, + 0x8ad958cf, 0x6be22838, 0x4d43cfd0, 0xac78bf27, + 0x105ec76f, 0xf165b798, 0xd7c45070, 0x36ff2087, + 0x9a879fa0, 0x7bbcef57, 0x5d1d08bf, 0xbc267848, + 0x20bd8ede, 0xc186fe29, 0xe72719c1, 0x061c6936, + 0xaa64d611, 0x4b5fa6e6, 0x6dfe410e, 0x8cc531f9, + 0x30e349b1, 0xd1d83946, 0xf779deae, 0x1642ae59, + 0xba3a117e, 0x5b016189, 0x7da08661, 0x9c9bf696, + 0x417b1dbc, 0xa0406d4b, 0x86e18aa3, 0x67dafa54, + 0xcba24573, 0x2a993584, 0x0c38d26c, 0xed03a29b, + 0x5125dad3, 0xb01eaa24, 0x96bf4dcc, 0x77843d3b, + 0xdbfc821c, 0x3ac7f2eb, 0x1c661503, 0xfd5d65f4, + 0x61c69362, 0x80fde395, 0xa65c047d, 0x4767748a, + 0xeb1fcbad, 0x0a24bb5a, 0x2c855cb2, 0xcdbe2c45, + 0x7198540d, 0x90a324fa, 0xb602c312, 0x5739b3e5, + 0xfb410cc2, 0x1a7a7c35, 0x3cdb9bdd, 0xdde0eb2a, + 0x82f63b78, 0x63cd4b8f, 0x456cac67, 0xa457dc90, + 0x082f63b7, 0xe9141340, 0xcfb5f4a8, 0x2e8e845f, + 0x92a8fc17, 0x73938ce0, 0x55326b08, 0xb4091bff, + 0x1871a4d8, 0xf94ad42f, 0xdfeb33c7, 0x3ed04330, + 0xa24bb5a6, 0x4370c551, 0x65d122b9, 0x84ea524e, + 0x2892ed69, 0xc9a99d9e, 0xef087a76, 0x0e330a81, + 0xb21572c9, 0x532e023e, 0x758fe5d6, 0x94b49521, + 0x38cc2a06, 0xd9f75af1, 0xff56bd19, 0x1e6dcdee, + 0xc38d26c4, 0x22b65633, 0x0417b1db, 0xe52cc12c, + 0x49547e0b, 0xa86f0efc, 0x8ecee914, 0x6ff599e3, + 0xd3d3e1ab, 0x32e8915c, 0x144976b4, 0xf5720643, + 0x590ab964, 0xb831c993, 0x9e902e7b, 0x7fab5e8c, + 0xe330a81a, 0x020bd8ed, 0x24aa3f05, 0xc5914ff2, + 0x69e9f0d5, 0x88d28022, 0xae7367ca, 0x4f48173d, + 0xf36e6f75, 0x12551f82, 0x34f4f86a, 0xd5cf889d, + 0x79b737ba, 0x988c474d, 0xbe2da0a5, 0x5f16d052 + }; + + /* + * eFuse calculation is shown here: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1496 + * + * Each u32 word is appended a 5-bit value, for a total of 37 bits; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1356 + */ + uint32_t crc = prev_crc; + const unsigned rshf = 7; + const uint32_t im = (1 << rshf) - 1; + const uint32_t rm = (1 << (32 - rshf)) - 1; + const uint32_t i2 = (1 << 2) - 1; + const uint32_t r2 = (1 << 30) - 1; + + unsigned j; + uint32_t i, r; + uint64_t w; + + w = (uint64_t)(addr) << 32; + w |= data; + + /* Feed 35 bits, in 5 rounds, each a slice of 7 bits */ + for (j = 0; j < 5; j++) { + r = rm & (crc >> rshf); + i = im & (crc ^ w); + crc = crc_tab[i] ^ r; + + w >>= rshf; + } + + /* Feed the remaining 2 bits */ + r = r2 & (crc >> 2); + i = i2 & (crc ^ w); + crc = crc_tab[i << (rshf - 2)] ^ r; + + return crc; +} + +uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt, + unsigned zpads) +{ + uint32_t crc = 0; + unsigned index; + + for (index = zpads; index; index--) { + crc = xlnx_efuse_u37_crc(crc, 0, (index + u32_cnt)); + } + + for (index = u32_cnt; index; index--) { + crc = xlnx_efuse_u37_crc(crc, data[index - 1], index); + } + + return crc; +} diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c new file mode 100644 index 0000000000..fdfffaab99 --- /dev/null +++ b/hw/nvram/xlnx-efuse.c @@ -0,0 +1,283 @@ +/* + * QEMU model of the EFUSE eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +#define TBIT0_OFFSET 28 +#define TBIT1_OFFSET 29 +#define TBIT2_OFFSET 30 +#define TBIT3_OFFSET 31 +#define TBITS_PATTERN (0x0AU << TBIT0_OFFSET) +#define TBITS_MASK (0x0FU << TBIT0_OFFSET) + +bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit) +{ + bool b = s->fuse32[bit / 32] & (1 << (bit % 32)); + return b; +} + +static int efuse_bytes(XlnxEFuse *s) +{ + return ROUND_UP((s->efuse_nr * s->efuse_size) / 8, 4); +} + +static int efuse_bdrv_read(XlnxEFuse *s, Error **errp) +{ + uint32_t *ram = s->fuse32; + int nr = efuse_bytes(s); + + if (!s->blk) { + return 0; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only eFUSE backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, nr, ram, 0) < 0) { + error_setg(errp, "%s: Failed to read %u bytes from eFUSE backstore.", + blk_name(s->blk), nr); + return -1; + } + + /* Convert from little-endian backstore for each 32-bit row */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } + + return 0; +} + +static void efuse_bdrv_sync(XlnxEFuse *s, unsigned int bit) +{ + unsigned int row_offset; + uint32_t le32; + + if (!s->blk || s->blk_ro) { + return; /* Silent on read-only backend to avoid message flood */ + } + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(xlnx_efuse_get_row(s, bit)); + + row_offset = (bit / 32) * 4; + if (blk_pwrite(s->blk, row_offset, 4, &le32, 0) < 0) { + error_report("%s: Failed to write offset %u of eFUSE backstore.", + blk_name(s->blk), row_offset); + } +} + +static int efuse_ro_bits_cmp(const void *a, const void *b) +{ + uint32_t i = *(const uint32_t *)a; + uint32_t j = *(const uint32_t *)b; + + return (i > j) - (i < j); +} + +static void efuse_ro_bits_sort(XlnxEFuse *s) +{ + uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (ary && cnt > 1) { + qsort(ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp); + } +} + +static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) +{ + const uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (!ary || !cnt) { + return false; + } + + return bsearch(&k, ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp) != NULL; +} + +bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) +{ + if (efuse_ro_bits_find(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: WARN: " + "Ignored setting of readonly efuse bit<%u,%u>!\n", + path, (bit / 32), (bit % 32)); + return false; + } + + s->fuse32[bit / 32] |= 1 << (bit % 32); + efuse_bdrv_sync(s, bit); + return true; +} + +bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start) +{ + uint32_t calc; + + /* A key always occupies multiple of whole rows */ + assert((start % 32) == 0); + + calc = xlnx_efuse_calc_crc(&s->fuse32[start / 32], (256 / 32), 0); + return calc == crc; +} + +uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s) +{ + int nr; + uint32_t check = 0; + + for (nr = s->efuse_nr; nr-- > 0; ) { + int efuse_start_row_num = (s->efuse_size * nr) / 32; + uint32_t data = s->fuse32[efuse_start_row_num]; + + /* + * If the option is on, auto-init blank T-bits. + * (non-blank will still be reported as '0' in the check, e.g., + * for error-injection tests) + */ + if ((data & TBITS_MASK) == 0 && s->init_tbits) { + data |= TBITS_PATTERN; + + s->fuse32[efuse_start_row_num] = data; + efuse_bdrv_sync(s, (efuse_start_row_num * 32 + TBIT0_OFFSET)); + } + + check = (check << 1) | ((data & TBITS_MASK) == TBITS_PATTERN); + } + + return check; +} + +static void efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxEFuse *s = XLNX_EFUSE(dev); + + /* Sort readonly-list for bsearch lookup */ + efuse_ro_bits_sort(s); + + if ((s->efuse_size % 32) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.efuse-size: %u: property value not multiple of 32.", + path, s->efuse_size); + return; + } + + s->fuse32 = g_malloc0(efuse_bytes(s)); + if (efuse_bdrv_read(s, errp)) { + g_free(s->fuse32); + } +} + +static void efuse_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + efuse_bdrv_read(XLNX_EFUSE(obj), errp); + } +} + +static void efuse_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void efuse_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo efuse_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as eFUSE backend", + .realized_set_allowed = true, + .get = efuse_prop_get_drive, + .set = efuse_prop_set_drive, + .release = efuse_prop_release_drive, +}; + +static Property efuse_properties[] = { + DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *), + DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3), + DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32), + DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true), + DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits, + qdev_prop_uint32, uint32_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = efuse_realize; + device_class_set_props(dc, efuse_properties); +} + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_EFUSE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XlnxEFuse), + .class_init = efuse_class_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} +type_init(efuse_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c new file mode 100644 index 0000000000..eaec64d785 --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-cache.c @@ -0,0 +1,114 @@ +/* + * QEMU model of the EFuse_Cache + * + * Copyright (c) 2017 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "hw/qdev-properties.h" + +#define MR_SIZE 0xC00 + +static uint64_t efuse_cache_read(void *opaque, hwaddr addr, unsigned size) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(opaque); + unsigned int w0 = QEMU_ALIGN_DOWN(addr * 8, 32); + unsigned int w1 = QEMU_ALIGN_DOWN((addr + size - 1) * 8, 32); + + uint64_t ret; + + assert(w0 == w1 || (w0 + 32) == w1); + + ret = xlnx_versal_efuse_read_row(s->efuse, w1, NULL); + if (w0 < w1) { + ret <<= 32; + ret |= xlnx_versal_efuse_read_row(s->efuse, w0, NULL); + } + + /* If 'addr' unaligned, the guest is always assumed to be little-endian. */ + addr &= 3; + if (addr) { + ret >>= 8 * addr; + } + + return ret; +} + +static void efuse_cache_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + /* No Register Writes allowed */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: efuse cache registers are read-only", + __func__); +} + +static const MemoryRegionOps efuse_cache_ops = { + .read = efuse_cache_read, + .write = efuse_cache_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void efuse_cache_init(Object *obj) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &efuse_cache_ops, s, + TYPE_XLNX_VERSAL_EFUSE_CACHE, MR_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property efuse_cache_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCache, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_cache_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, efuse_cache_props); +} + +static const TypeInfo efuse_cache_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CACHE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCache), + .class_init = efuse_cache_class_init, + .instance_init = efuse_cache_init, +}; + +static void efuse_cache_register_types(void) +{ + type_register_static(&efuse_cache_info); +} + +type_init(efuse_cache_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c new file mode 100644 index 0000000000..b35ba65ab5 --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -0,0 +1,793 @@ +/* + * QEMU model of the Versal eFuse controller + * + * Copyright (c) 2020 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG +#define XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 1) + FIELD(CFG, PGM_EN, 1, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_USER_KEY_1_CRC_PASS, 11, 1) + FIELD(STATUS, AES_USER_KEY_1_CRC_DONE, 10, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_PASS, 9, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_DONE, 8, 1) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_2_TBIT, 2, 1) + FIELD(STATUS, EFUSE_1_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, PAGE, 13, 4) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 8) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, PAGE, 13, 4) + FIELD(EFUSE_RD_ADDR, ROW, 5, 8) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TRDM, 0x28) + FIELD(TRDM, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 8) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_ISR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_ISR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_ISR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_ISR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_ISR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_ISR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IMR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IMR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IMR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IMR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IMR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IMR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IER, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IER, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IER, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IER, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IER, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IER, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IDR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IDR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IDR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IDR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IDR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IDR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_AES_USR_KEY0_CRC, 0x4c) +REG32(EFUSE_AES_USR_KEY1_CRC, 0x50) +REG32(EFUSE_PD, 0x54) +REG32(EFUSE_ANLG_OSC_SW_1LP, 0x60) +REG32(EFUSE_TEST_CTRL, 0x100) + +#define R_MAX (R_EFUSE_TEST_CTRL + 1) + +#define R_WR_LOCK_UNLOCK_PASSCODE (0xDF0D) + +/* + * eFuse layout references: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilnvm/src/xnvm_efuse_hw.h + */ +#define BIT_POS_OF(A_) \ + ((uint32_t)((A_) & (R_EFUSE_PGM_ADDR_ROW_MASK | \ + R_EFUSE_PGM_ADDR_COLUMN_MASK))) + +#define BIT_POS(R_, C_) \ + ((uint32_t)((R_EFUSE_PGM_ADDR_ROW_MASK \ + & ((R_) << R_EFUSE_PGM_ADDR_ROW_SHIFT)) \ + | \ + (R_EFUSE_PGM_ADDR_COLUMN_MASK \ + & ((C_) << R_EFUSE_PGM_ADDR_COLUMN_SHIFT)))) + +#define EFUSE_TBIT_POS(A_) (BIT_POS_OF(A_) >= BIT_POS(0, 28)) + +#define EFUSE_ANCHOR_ROW (0) +#define EFUSE_ANCHOR_3_COL (27) +#define EFUSE_ANCHOR_1_COL (1) + +#define EFUSE_AES_KEY_START BIT_POS(12, 0) +#define EFUSE_AES_KEY_END BIT_POS(19, 31) +#define EFUSE_USER_KEY_0_START BIT_POS(20, 0) +#define EFUSE_USER_KEY_0_END BIT_POS(27, 31) +#define EFUSE_USER_KEY_1_START BIT_POS(28, 0) +#define EFUSE_USER_KEY_1_END BIT_POS(35, 31) + +#define EFUSE_RD_BLOCKED_START EFUSE_AES_KEY_START +#define EFUSE_RD_BLOCKED_END EFUSE_USER_KEY_1_END + +#define EFUSE_GLITCH_DET_WR_LK BIT_POS(4, 31) +#define EFUSE_PPK0_WR_LK BIT_POS(43, 6) +#define EFUSE_PPK1_WR_LK BIT_POS(43, 7) +#define EFUSE_PPK2_WR_LK BIT_POS(43, 8) +#define EFUSE_AES_WR_LK BIT_POS(43, 11) +#define EFUSE_USER_KEY_0_WR_LK BIT_POS(43, 13) +#define EFUSE_USER_KEY_1_WR_LK BIT_POS(43, 15) +#define EFUSE_PUF_SYN_LK BIT_POS(43, 16) +#define EFUSE_DNA_WR_LK BIT_POS(43, 27) +#define EFUSE_BOOT_ENV_WR_LK BIT_POS(43, 28) + +#define EFUSE_PGM_LOCKED_START BIT_POS(44, 0) +#define EFUSE_PGM_LOCKED_END BIT_POS(51, 31) + +#define EFUSE_PUF_PAGE (2) +#define EFUSE_PUF_SYN_START BIT_POS(129, 0) +#define EFUSE_PUF_SYN_END BIT_POS(255, 27) + +#define EFUSE_KEY_CRC_LK_ROW (43) +#define EFUSE_AES_KEY_CRC_LK_MASK ((1U << 9) | (1U << 10)) +#define EFUSE_USER_KEY_0_CRC_LK_MASK (1U << 12) +#define EFUSE_USER_KEY_1_CRC_LK_MASK (1U << 14) + +/* + * A handy macro to return value of an array element, + * or a specific default if given index is out of bound. + */ +#define ARRAY_GET(A_, I_, D_) \ + ((unsigned int)(I_) < ARRAY_SIZE(A_) ? (A_)[I_] : (D_)) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxVersalEFuseCtrl *)0)->regs)); + +typedef struct XlnxEFuseLkSpec { + uint16_t row; + uint16_t lk_bit; +} XlnxEFuseLkSpec; + +static void efuse_imr_update_irq(XlnxVersalEFuseCtrl *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & ~s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq_efuse_imr, pending); +} + +static void efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + efuse_imr_update_irq(s); +} + +static uint64_t efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + efuse_imr_update_irq(s); + return 0; +} + +static uint64_t efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + efuse_imr_update_irq(s); + return 0; +} + +static void efuse_status_tbits_sync(XlnxVersalEFuseCtrl *s) +{ + uint32_t check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_1_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +static void efuse_anchor_bits_check(XlnxVersalEFuseCtrl *s) +{ + unsigned page; + + if (!s->efuse || !s->efuse->init_tbits) { + return; + } + + for (page = 0; page < s->efuse->efuse_nr; page++) { + uint32_t row = 0, bit; + + row = FIELD_DP32(row, EFUSE_PGM_ADDR, PAGE, page); + row = FIELD_DP32(row, EFUSE_PGM_ADDR, ROW, EFUSE_ANCHOR_ROW); + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_3_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_1_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + } +} + +static void efuse_key_crc_check(RegisterInfo *reg, uint32_t crc, + uint32_t pass_mask, uint32_t done_mask, + unsigned first, uint32_t lk_mask) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t r, lk_bits; + + /* + * To start, assume both DONE and PASS, and clear PASS by xor + * if CRC-check fails or CRC-check disabled by lock fuse. + */ + r = s->regs[R_STATUS] | done_mask | pass_mask; + + lk_bits = xlnx_efuse_get_row(s->efuse, EFUSE_KEY_CRC_LK_ROW) & lk_mask; + if (lk_bits == 0 && xlnx_efuse_k256_check(s->efuse, crc, first)) { + pass_mask = 0; + } + + s->regs[R_STATUS] = r ^ pass_mask; +} + +static void efuse_data_sync(XlnxVersalEFuseCtrl *s) +{ + efuse_status_tbits_sync(s); +} + +static int efuse_lk_spec_cmp(const void *a, const void *b) +{ + uint16_t r1 = ((const XlnxEFuseLkSpec *)a)->row; + uint16_t r2 = ((const XlnxEFuseLkSpec *)b)->row; + + return (r1 > r2) - (r1 < r2); +} + +static void efuse_lk_spec_sort(XlnxVersalEFuseCtrl *s) +{ + XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + + if (ary && cnt) { + qsort(ary, cnt, sz, efuse_lk_spec_cmp); + } +} + +static uint32_t efuse_lk_spec_find(XlnxVersalEFuseCtrl *s, uint32_t row) +{ + const XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + const XlnxEFuseLkSpec *item = NULL; + + if (ary && cnt) { + XlnxEFuseLkSpec k = { .row = row, }; + + item = bsearch(&k, ary, cnt, sz, efuse_lk_spec_cmp); + } + + return item ? item->lk_bit : 0; +} + +static uint32_t efuse_bit_locked(XlnxVersalEFuseCtrl *s, uint32_t bit) +{ + /* Hard-coded locks */ + static const uint16_t pg0_hard_lock[] = { + [4] = EFUSE_GLITCH_DET_WR_LK, + [37] = EFUSE_BOOT_ENV_WR_LK, + + [8 ... 11] = EFUSE_DNA_WR_LK, + [12 ... 19] = EFUSE_AES_WR_LK, + [20 ... 27] = EFUSE_USER_KEY_0_WR_LK, + [28 ... 35] = EFUSE_USER_KEY_1_WR_LK, + [64 ... 71] = EFUSE_PPK0_WR_LK, + [72 ... 79] = EFUSE_PPK1_WR_LK, + [80 ... 87] = EFUSE_PPK2_WR_LK, + }; + + uint32_t row = FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW); + uint32_t lk_bit = ARRAY_GET(pg0_hard_lock, row, 0); + + return lk_bit ? lk_bit : efuse_lk_spec_find(s, row); +} + +static bool efuse_pgm_locked(XlnxVersalEFuseCtrl *s, unsigned int bit) +{ + + unsigned int lock = 1; + + /* Global lock */ + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + goto ret_lock; + } + + /* Row lock */ + switch (FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE)) { + case 0: + if (ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK) && + bit >= EFUSE_PGM_LOCKED_START && bit <= EFUSE_PGM_LOCKED_END) { + goto ret_lock; + } + + lock = efuse_bit_locked(s, bit); + break; + case EFUSE_PUF_PAGE: + if (bit < EFUSE_PUF_SYN_START || bit > EFUSE_PUF_SYN_END) { + lock = 0; + goto ret_lock; + } + + lock = EFUSE_PUF_SYN_LK; + break; + default: + lock = 0; + goto ret_lock; + } + + /* Row lock by an efuse bit */ + if (lock) { + lock = xlnx_efuse_get_bit(s->efuse, lock); + } + + ret_lock: + return lock != 0; +} + +static void efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool ok = false; + + /* Always zero out PGM_ADDR because it is write-only */ + s->regs[R_EFUSE_PGM_ADDR] = 0; + + /* + * Indicate error if bit is write-protected (or read-only + * as guarded by efuse_set_bit()). + * + * Keep it simple by not modeling program timing. + * + * Note: model must NEVER clear the PGM_ERROR bit; it is + * up to guest to do so (or by reset). + */ + if (efuse_pgm_locked(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied setting of efuse<%u, %u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW), + FIELD_EX32(bit, EFUSE_PGM_ADDR, COLUMN)); + } else if (xlnx_efuse_set_bit(s->efuse, bit)) { + ok = true; + if (EFUSE_TBIT_POS(bit)) { + efuse_status_tbits_sync(s); + } + } + + if (!ok) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + efuse_imr_update_irq(s); +} + +static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool denied; + + /* Always zero out RD_ADDR because it is write-only */ + s->regs[R_EFUSE_RD_ADDR] = 0; + + /* + * Indicate error if row is read-blocked. + * + * Note: model must NEVER clear the RD_ERROR bit; it is + * up to guest to do so (or by reset). + */ + s->regs[R_EFUSE_RD_DATA] = xlnx_versal_efuse_read_row(s->efuse, + bit, &denied); + if (denied) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied reading of efuse<%u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_RD_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_RD_ADDR, ROW)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + efuse_imr_update_irq(s); + return; +} + +static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + if (val64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + efuse_data_sync(s); + + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + efuse_imr_update_irq(s); + } + + return 0; +} + +static uint64_t efuse_pgm_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + /* Ignore all other bits */ + val64 = FIELD_EX32(val64, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + /* Once the bit is written 1, only reset will clear it to 0 */ + val64 |= ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + return val64; +} + +static void efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_CRC_PASS_MASK, + R_STATUS_AES_CRC_DONE_MASK, + EFUSE_AES_KEY_START, + EFUSE_AES_KEY_CRC_LK_MASK); +} + +static void efuse_aes_u0_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_0_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_0_CRC_DONE_MASK, + EFUSE_USER_KEY_0_START, + EFUSE_USER_KEY_0_CRC_LK_MASK); +} + +static void efuse_aes_u1_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_1_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_1_CRC_DONE_MASK, + EFUSE_USER_KEY_1_START, + EFUSE_USER_KEY_1_CRC_LK_MASK); +} + +static uint64_t efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val != R_WR_LOCK_UNLOCK_PASSCODE; +} + +static const RegisterAccessInfo efuse_ctrl_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + .rsvd = 0x9, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xfff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = efuse_pgm_addr_postw, + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x19, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0x11, + },{ .name = "TRDM", .addr = A_TRDM, + .reset = 0x3a, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x16, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fff8000, + .w1c = 0x80007fff, + .post_write = efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x80007fff, + .rsvd = 0x7fff8000, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fff8000, + .pre_write = efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fff8000, + .pre_write = efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + .pre_write = efuse_pgm_lock_prew, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = efuse_aes_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY0_CRC", .addr = A_EFUSE_AES_USR_KEY0_CRC, + .post_write = efuse_aes_u0_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY1_CRC", .addr = A_EFUSE_AES_USR_KEY1_CRC, + .post_write = efuse_aes_u1_crc_postw, + },{ .name = "EFUSE_PD", .addr = A_EFUSE_PD, + .ro = 0xfffffffe, + },{ .name = "EFUSE_ANLG_OSC_SW_1LP", .addr = A_EFUSE_ANLG_OSC_SW_1LP, + },{ .name = "EFUSE_TEST_CTRL", .addr = A_EFUSE_TEST_CTRL, + .reset = 0x8, + } +}; + +static void efuse_ctrl_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxVersalEFuseCtrl *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_VERSAL_EFUSE_CTRL(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static void efuse_ctrl_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + case A_EFUSE_AES_USR_KEY0_CRC: + case A_EFUSE_AES_USR_KEY1_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void efuse_ctrl_reset(DeviceState *dev) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + efuse_ctrl_register_reset(&s->regs_info[i]); + } + + efuse_anchor_bits_check(s); + efuse_data_sync(s); + efuse_imr_update_irq(s); +} + +static const MemoryRegionOps efuse_ctrl_ops = { + .read = register_read_memory, + .write = efuse_ctrl_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void efuse_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + const uint32_t lks_sz = sizeof(XlnxEFuseLkSpec) / 2; + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + /* Sort property-defined pgm-locks for bsearch lookup */ + if ((s->extra_pg0_lock_n16 % lks_sz) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.pg0-lock: array property item-count not multiple of %u", + path, lks_sz); + return; + } + + efuse_lk_spec_sort(s); +} + +static void efuse_ctrl_init(Object *obj) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), efuse_ctrl_regs_info, + ARRAY_SIZE(efuse_ctrl_regs_info), + s->regs_info, s->regs, + &efuse_ctrl_ops, + XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_efuse_imr); +} + +static const VMStateDescription vmstate_efuse_ctrl = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property efuse_ctrl_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCtrl, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + DEFINE_PROP_ARRAY("pg0-lock", + XlnxVersalEFuseCtrl, extra_pg0_lock_n16, + extra_pg0_lock_spec, qdev_prop_uint16, uint16_t), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = efuse_ctrl_reset; + dc->realize = efuse_ctrl_realize; + dc->vmsd = &vmstate_efuse_ctrl; + device_class_set_props(dc, efuse_ctrl_props); +} + +static const TypeInfo efuse_ctrl_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCtrl), + .class_init = efuse_ctrl_class_init, + .instance_init = efuse_ctrl_init, +}; + +static void efuse_ctrl_register_types(void) +{ + type_register_static(&efuse_ctrl_info); +} + +type_init(efuse_ctrl_register_types) + +/* + * Retrieve a row, with unreadable bits returned as 0. + */ +uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *efuse, + uint32_t bit, bool *denied) +{ + bool dummy; + + if (!denied) { + denied = &dummy; + } + + if (bit >= EFUSE_RD_BLOCKED_START && bit <= EFUSE_RD_BLOCKED_END) { + *denied = true; + return 0; + } + + *denied = false; + return xlnx_efuse_get_row(efuse, bit); +} diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c new file mode 100644 index 0000000000..228ba0bbfa --- /dev/null +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -0,0 +1,861 @@ +/* + * QEMU model of the ZynqMP eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-zynqmp-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef ZYNQMP_EFUSE_ERR_DEBUG +#define ZYNQMP_EFUSE_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 2) + FIELD(CFG, PGM_EN, 1, 1) + FIELD(CFG, EFUSE_CLK_SEL, 0, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_3_TBIT, 2, 1) + FIELD(STATUS, EFUSE_2_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 6) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_RD_ADDR, ROW, 5, 6) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 4) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_TBITS_PRGRMG_EN, 0x100) + FIELD(EFUSE_TBITS_PRGRMG_EN, TBITS_PRGRMG_EN, 3, 1) +REG32(DNA_0, 0x100c) +REG32(DNA_1, 0x1010) +REG32(DNA_2, 0x1014) +REG32(IPDISABLE, 0x1018) + FIELD(IPDISABLE, VCU_DIS, 8, 1) + FIELD(IPDISABLE, GPU_DIS, 5, 1) + FIELD(IPDISABLE, APU3_DIS, 3, 1) + FIELD(IPDISABLE, APU2_DIS, 2, 1) + FIELD(IPDISABLE, APU1_DIS, 1, 1) + FIELD(IPDISABLE, APU0_DIS, 0, 1) +REG32(SYSOSC_CTRL, 0x101c) + FIELD(SYSOSC_CTRL, SYSOSC_EN, 0, 1) +REG32(USER_0, 0x1020) +REG32(USER_1, 0x1024) +REG32(USER_2, 0x1028) +REG32(USER_3, 0x102c) +REG32(USER_4, 0x1030) +REG32(USER_5, 0x1034) +REG32(USER_6, 0x1038) +REG32(USER_7, 0x103c) +REG32(MISC_USER_CTRL, 0x1040) + FIELD(MISC_USER_CTRL, FPD_SC_EN_0, 14, 1) + FIELD(MISC_USER_CTRL, LPD_SC_EN_0, 11, 1) + FIELD(MISC_USER_CTRL, LBIST_EN, 10, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_7, 7, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_6, 6, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_5, 5, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_4, 4, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_3, 3, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_2, 2, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_1, 1, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_0, 0, 1) +REG32(ROM_RSVD, 0x1044) + FIELD(ROM_RSVD, PBR_BOOT_ERROR, 0, 3) +REG32(PUF_CHASH, 0x1050) +REG32(PUF_MISC, 0x1054) + FIELD(PUF_MISC, REGISTER_DIS, 31, 1) + FIELD(PUF_MISC, SYN_WRLK, 30, 1) + FIELD(PUF_MISC, SYN_INVLD, 29, 1) + FIELD(PUF_MISC, TEST2_DIS, 28, 1) + FIELD(PUF_MISC, UNUSED27, 27, 1) + FIELD(PUF_MISC, UNUSED26, 26, 1) + FIELD(PUF_MISC, UNUSED25, 25, 1) + FIELD(PUF_MISC, UNUSED24, 24, 1) + FIELD(PUF_MISC, AUX, 0, 24) +REG32(SEC_CTRL, 0x1058) + FIELD(SEC_CTRL, PPK1_INVLD, 30, 2) + FIELD(SEC_CTRL, PPK1_WRLK, 29, 1) + FIELD(SEC_CTRL, PPK0_INVLD, 27, 2) + FIELD(SEC_CTRL, PPK0_WRLK, 26, 1) + FIELD(SEC_CTRL, RSA_EN, 11, 15) + FIELD(SEC_CTRL, SEC_LOCK, 10, 1) + FIELD(SEC_CTRL, PROG_GATE_2, 9, 1) + FIELD(SEC_CTRL, PROG_GATE_1, 8, 1) + FIELD(SEC_CTRL, PROG_GATE_0, 7, 1) + FIELD(SEC_CTRL, DFT_DIS, 6, 1) + FIELD(SEC_CTRL, JTAG_DIS, 5, 1) + FIELD(SEC_CTRL, ERROR_DIS, 4, 1) + FIELD(SEC_CTRL, BBRAM_DIS, 3, 1) + FIELD(SEC_CTRL, ENC_ONLY, 2, 1) + FIELD(SEC_CTRL, AES_WRLK, 1, 1) + FIELD(SEC_CTRL, AES_RDLK, 0, 1) +REG32(SPK_ID, 0x105c) +REG32(PPK0_0, 0x10a0) +REG32(PPK0_1, 0x10a4) +REG32(PPK0_2, 0x10a8) +REG32(PPK0_3, 0x10ac) +REG32(PPK0_4, 0x10b0) +REG32(PPK0_5, 0x10b4) +REG32(PPK0_6, 0x10b8) +REG32(PPK0_7, 0x10bc) +REG32(PPK0_8, 0x10c0) +REG32(PPK0_9, 0x10c4) +REG32(PPK0_10, 0x10c8) +REG32(PPK0_11, 0x10cc) +REG32(PPK1_0, 0x10d0) +REG32(PPK1_1, 0x10d4) +REG32(PPK1_2, 0x10d8) +REG32(PPK1_3, 0x10dc) +REG32(PPK1_4, 0x10e0) +REG32(PPK1_5, 0x10e4) +REG32(PPK1_6, 0x10e8) +REG32(PPK1_7, 0x10ec) +REG32(PPK1_8, 0x10f0) +REG32(PPK1_9, 0x10f4) +REG32(PPK1_10, 0x10f8) +REG32(PPK1_11, 0x10fc) + +#define BIT_POS(ROW, COLUMN) (ROW * 32 + COLUMN) +#define R_MAX (R_PPK1_11 + 1) + +/* #define EFUSE_XOSC 26 */ + +/* + * eFUSE layout references: + * ZynqMP: UG1085 (v2.1) August 21, 2019, p.277, Table 12-13 + */ +#define EFUSE_AES_RDLK BIT_POS(22, 0) +#define EFUSE_AES_WRLK BIT_POS(22, 1) +#define EFUSE_ENC_ONLY BIT_POS(22, 2) +#define EFUSE_BBRAM_DIS BIT_POS(22, 3) +#define EFUSE_ERROR_DIS BIT_POS(22, 4) +#define EFUSE_JTAG_DIS BIT_POS(22, 5) +#define EFUSE_DFT_DIS BIT_POS(22, 6) +#define EFUSE_PROG_GATE_0 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_1 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_2 BIT_POS(22, 9) +#define EFUSE_SEC_LOCK BIT_POS(22, 10) +#define EFUSE_RSA_EN BIT_POS(22, 11) +#define EFUSE_RSA_EN14 BIT_POS(22, 25) +#define EFUSE_PPK0_WRLK BIT_POS(22, 26) +#define EFUSE_PPK0_INVLD BIT_POS(22, 27) +#define EFUSE_PPK0_INVLD_1 BIT_POS(22, 28) +#define EFUSE_PPK1_WRLK BIT_POS(22, 29) +#define EFUSE_PPK1_INVLD BIT_POS(22, 30) +#define EFUSE_PPK1_INVLD_1 BIT_POS(22, 31) + +/* Areas. */ +#define EFUSE_TRIM_START BIT_POS(1, 0) +#define EFUSE_TRIM_END BIT_POS(1, 30) +#define EFUSE_DNA_START BIT_POS(3, 0) +#define EFUSE_DNA_END BIT_POS(5, 31) +#define EFUSE_AES_START BIT_POS(24, 0) +#define EFUSE_AES_END BIT_POS(31, 31) +#define EFUSE_ROM_START BIT_POS(17, 0) +#define EFUSE_ROM_END BIT_POS(17, 31) +#define EFUSE_IPDIS_START BIT_POS(6, 0) +#define EFUSE_IPDIS_END BIT_POS(6, 31) +#define EFUSE_USER_START BIT_POS(8, 0) +#define EFUSE_USER_END BIT_POS(15, 31) +#define EFUSE_BISR_START BIT_POS(32, 0) +#define EFUSE_BISR_END BIT_POS(39, 31) + +#define EFUSE_USER_CTRL_START BIT_POS(16, 0) +#define EFUSE_USER_CTRL_END BIT_POS(16, 16) +#define EFUSE_USER_CTRL_MASK ((uint32_t)MAKE_64BIT_MASK(0, 17)) + +#define EFUSE_PUF_CHASH_START BIT_POS(20, 0) +#define EFUSE_PUF_CHASH_END BIT_POS(20, 31) +#define EFUSE_PUF_MISC_START BIT_POS(21, 0) +#define EFUSE_PUF_MISC_END BIT_POS(21, 31) +#define EFUSE_PUF_SYN_WRLK BIT_POS(21, 30) + +#define EFUSE_SPK_START BIT_POS(23, 0) +#define EFUSE_SPK_END BIT_POS(23, 31) + +#define EFUSE_PPK0_START BIT_POS(40, 0) +#define EFUSE_PPK0_END BIT_POS(51, 31) +#define EFUSE_PPK1_START BIT_POS(52, 0) +#define EFUSE_PPK1_END BIT_POS(63, 31) + +#define EFUSE_CACHE_FLD(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, \ + (xlnx_efuse_get_row((s->efuse), EFUSE_ ## field) \ + >> (EFUSE_ ## field % 32))) + +#define EFUSE_CACHE_BIT(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, xlnx_efuse_get_bit((s->efuse), \ + EFUSE_ ## field)) + +#define FBIT_UNKNOWN (~0) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxZynqMPEFuse *)0)->regs)); + +static void update_tbit_status(XlnxZynqMPEFuse *s) +{ + unsigned int check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_3_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +/* Update the u32 array from efuse bits. Slow but simple approach. */ +static void cache_sync_u32(XlnxZynqMPEFuse *s, unsigned int r_start, + unsigned int f_start, unsigned int f_end, + unsigned int f_written) +{ + uint32_t *u32 = &s->regs[r_start]; + unsigned int fbit, wbits = 0, u32_off = 0; + + /* Avoid working on bits that are not relevant. */ + if (f_written != FBIT_UNKNOWN + && (f_written < f_start || f_written > f_end)) { + return; + } + + for (fbit = f_start; fbit <= f_end; fbit++, wbits++) { + if (wbits == 32) { + /* Update the key offset. */ + u32_off += 1; + wbits = 0; + } + u32[u32_off] |= xlnx_efuse_get_bit(s->efuse, fbit) << wbits; + } +} + +/* + * Keep the syncs in bit order so we can bail out for the + * slower ones. + */ +static void zynqmp_efuse_sync_cache(XlnxZynqMPEFuse *s, unsigned int bit) +{ + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_RDLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, ENC_ONLY); + EFUSE_CACHE_BIT(s, SEC_CTRL, BBRAM_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, ERROR_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, JTAG_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, DFT_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_0); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_1); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_2); + EFUSE_CACHE_BIT(s, SEC_CTRL, SEC_LOCK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK0_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK1_WRLK); + + EFUSE_CACHE_FLD(s, SEC_CTRL, RSA_EN); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK0_INVLD); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK1_INVLD); + + /* Update the tbits. */ + update_tbit_status(s); + + /* Sync the various areas. */ + s->regs[R_MISC_USER_CTRL] = xlnx_efuse_get_row(s->efuse, + EFUSE_USER_CTRL_START) + & EFUSE_USER_CTRL_MASK; + s->regs[R_PUF_CHASH] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_CHASH_START); + s->regs[R_PUF_MISC] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_MISC_START); + + cache_sync_u32(s, R_DNA_0, EFUSE_DNA_START, EFUSE_DNA_END, bit); + + if (bit < EFUSE_AES_START) { + return; + } + + cache_sync_u32(s, R_ROM_RSVD, EFUSE_ROM_START, EFUSE_ROM_END, bit); + cache_sync_u32(s, R_IPDISABLE, EFUSE_IPDIS_START, EFUSE_IPDIS_END, bit); + cache_sync_u32(s, R_USER_0, EFUSE_USER_START, EFUSE_USER_END, bit); + cache_sync_u32(s, R_SPK_ID, EFUSE_SPK_START, EFUSE_SPK_END, bit); + cache_sync_u32(s, R_PPK0_0, EFUSE_PPK0_START, EFUSE_PPK0_END, bit); + cache_sync_u32(s, R_PPK1_0, EFUSE_PPK1_START, EFUSE_PPK1_END, bit); +} + +static void zynqmp_efuse_update_irq(XlnxZynqMPEFuse *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq, pending); +} + +static void zynqmp_efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + zynqmp_efuse_update_irq(s); +} + +static uint64_t zynqmp_efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static uint64_t zynqmp_efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static void zynqmp_efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + unsigned bit = val64; + unsigned page = FIELD_EX32(bit, EFUSE_PGM_ADDR, EFUSE); + bool puf_prot = false; + const char *errmsg = NULL; + + /* Allow only valid array, and adjust for skipped array 1 */ + switch (page) { + case 0: + break; + case 2 ... 3: + bit = FIELD_DP32(bit, EFUSE_PGM_ADDR, EFUSE, page - 1); + puf_prot = xlnx_efuse_get_bit(s->efuse, EFUSE_PUF_SYN_WRLK); + break; + default: + errmsg = "Invalid address"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, WR_LOCK, LOCK)) { + errmsg = "Array write-locked"; + goto pgm_done; + } + + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + errmsg = "Array pgm-disabled"; + goto pgm_done; + } + + if (puf_prot) { + errmsg = "PUF_HD-store write-locked"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, SEC_CTRL, AES_WRLK) + && bit >= EFUSE_AES_START && bit <= EFUSE_AES_END) { + errmsg = "AES key-store Write-locked"; + goto pgm_done; + } + + if (!xlnx_efuse_set_bit(s->efuse, bit)) { + errmsg = "Write failed"; + } + + pgm_done: + if (!errmsg) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 0); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s - eFuse write error: %s; addr=0x%x\n", + path, errmsg, (unsigned)val64); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + g_autofree char *path = NULL; + + /* + * Grant reads only to allowed bits; reference sources: + * 1/ XilSKey - XilSKey_ZynqMp_EfusePs_ReadRow() + * 2/ UG1085, v2.0, table 12-13 + * (note: enumerates the masks as per described in + * references to avoid mental translation). + */ +#define COL_MASK(L_, H_) \ + ((uint32_t)MAKE_64BIT_MASK((L_), (1 + (H_) - (L_)))) + + static const uint32_t ary0_col_mask[] = { + /* XilSKey - XSK_ZYNQMP_EFUSEPS_TBITS_ROW */ + [0] = COL_MASK(28, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_USR{0:7}_FUSE_ROW */ + [8] = COL_MASK(0, 31), [9] = COL_MASK(0, 31), + [10] = COL_MASK(0, 31), [11] = COL_MASK(0, 31), + [12] = COL_MASK(0, 31), [13] = COL_MASK(0, 31), + [14] = COL_MASK(0, 31), [15] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_MISC_USR_CTRL_ROW */ + [16] = COL_MASK(0, 7) | COL_MASK(10, 16), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PBR_BOOT_ERR_ROW */ + [17] = COL_MASK(0, 2), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_CHASH_ROW */ + [20] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_AUX_ROW */ + [21] = COL_MASK(0, 23) | COL_MASK(29, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SEC_CTRL_ROW */ + [22] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SPK_ID_ROW */ + [23] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK0_START_ROW */ + [40] = COL_MASK(0, 31), [41] = COL_MASK(0, 31), + [42] = COL_MASK(0, 31), [43] = COL_MASK(0, 31), + [44] = COL_MASK(0, 31), [45] = COL_MASK(0, 31), + [46] = COL_MASK(0, 31), [47] = COL_MASK(0, 31), + [48] = COL_MASK(0, 31), [49] = COL_MASK(0, 31), + [50] = COL_MASK(0, 31), [51] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK1_START_ROW */ + [52] = COL_MASK(0, 31), [53] = COL_MASK(0, 31), + [54] = COL_MASK(0, 31), [55] = COL_MASK(0, 31), + [56] = COL_MASK(0, 31), [57] = COL_MASK(0, 31), + [58] = COL_MASK(0, 31), [59] = COL_MASK(0, 31), + [60] = COL_MASK(0, 31), [61] = COL_MASK(0, 31), + [62] = COL_MASK(0, 31), [63] = COL_MASK(0, 31), + }; + + uint32_t col_mask = COL_MASK(0, 31); +#undef COL_MASK + + uint32_t efuse_idx = s->regs[R_EFUSE_RD_ADDR]; + uint32_t efuse_ary = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, EFUSE); + uint32_t efuse_row = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, ROW); + + switch (efuse_ary) { + case 0: /* Various */ + if (efuse_row >= ARRAY_SIZE(ary0_col_mask)) { + goto denied; + } + + col_mask = ary0_col_mask[efuse_row]; + if (!col_mask) { + goto denied; + } + break; + case 2: /* PUF helper data, adjust for skipped array 1 */ + case 3: + val64 = FIELD_DP32(efuse_idx, EFUSE_RD_ADDR, EFUSE, efuse_ary - 1); + break; + default: + goto denied; + } + + s->regs[R_EFUSE_RD_DATA] = xlnx_efuse_get_row(s->efuse, val64) & col_mask; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 0); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + zynqmp_efuse_update_irq(s); + return; + + denied: + path = object_get_canonical_path(OBJECT(s)); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied efuse read from array %u, row %u\n", + path, efuse_ary, efuse_row); + + s->regs[R_EFUSE_RD_DATA] = 0; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 0); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + bool ok; + + ok = xlnx_efuse_k256_check(s->efuse, (uint32_t)val64, EFUSE_AES_START); + + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_PASS, (ok ? 1 : 0)); + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_DONE, 1); + + s->regs[R_EFUSE_AES_CRC] = 0; /* crc value is write-only */ +} + +static uint64_t zynqmp_efuse_cache_load_prew(RegisterInfo *reg, + uint64_t valu64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + + if (valu64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); + } + + return 0; +} + +static uint64_t zynqmp_efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val == 0xDF0D ? 0 : 1; +} + +static RegisterAccessInfo zynqmp_efuse_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = zynqmp_efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = zynqmp_efuse_pgm_addr_postw + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = zynqmp_efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x1b, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0xb, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x7, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fffffe0, + .w1c = 0x8000001f, + .post_write = zynqmp_efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x8000001f, + .rsvd = 0x7fffffe0, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = zynqmp_efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = zynqmp_efuse_aes_crc_postw, + },{ .name = "EFUSE_TBITS_PRGRMG_EN", .addr = A_EFUSE_TBITS_PRGRMG_EN, + .reset = R_EFUSE_TBITS_PRGRMG_EN_TBITS_PRGRMG_EN_MASK, + },{ .name = "DNA_0", .addr = A_DNA_0, + .ro = 0xffffffff, + },{ .name = "DNA_1", .addr = A_DNA_1, + .ro = 0xffffffff, + },{ .name = "DNA_2", .addr = A_DNA_2, + .ro = 0xffffffff, + },{ .name = "IPDISABLE", .addr = A_IPDISABLE, + .ro = 0xffffffff, + },{ .name = "SYSOSC_CTRL", .addr = A_SYSOSC_CTRL, + .ro = 0xffffffff, + },{ .name = "USER_0", .addr = A_USER_0, + .ro = 0xffffffff, + },{ .name = "USER_1", .addr = A_USER_1, + .ro = 0xffffffff, + },{ .name = "USER_2", .addr = A_USER_2, + .ro = 0xffffffff, + },{ .name = "USER_3", .addr = A_USER_3, + .ro = 0xffffffff, + },{ .name = "USER_4", .addr = A_USER_4, + .ro = 0xffffffff, + },{ .name = "USER_5", .addr = A_USER_5, + .ro = 0xffffffff, + },{ .name = "USER_6", .addr = A_USER_6, + .ro = 0xffffffff, + },{ .name = "USER_7", .addr = A_USER_7, + .ro = 0xffffffff, + },{ .name = "MISC_USER_CTRL", .addr = A_MISC_USER_CTRL, + .ro = 0xffffffff, + },{ .name = "ROM_RSVD", .addr = A_ROM_RSVD, + .ro = 0xffffffff, + },{ .name = "PUF_CHASH", .addr = A_PUF_CHASH, + .ro = 0xffffffff, + },{ .name = "PUF_MISC", .addr = A_PUF_MISC, + .ro = 0xffffffff, + },{ .name = "SEC_CTRL", .addr = A_SEC_CTRL, + .ro = 0xffffffff, + },{ .name = "SPK_ID", .addr = A_SPK_ID, + .ro = 0xffffffff, + },{ .name = "PPK0_0", .addr = A_PPK0_0, + .ro = 0xffffffff, + },{ .name = "PPK0_1", .addr = A_PPK0_1, + .ro = 0xffffffff, + },{ .name = "PPK0_2", .addr = A_PPK0_2, + .ro = 0xffffffff, + },{ .name = "PPK0_3", .addr = A_PPK0_3, + .ro = 0xffffffff, + },{ .name = "PPK0_4", .addr = A_PPK0_4, + .ro = 0xffffffff, + },{ .name = "PPK0_5", .addr = A_PPK0_5, + .ro = 0xffffffff, + },{ .name = "PPK0_6", .addr = A_PPK0_6, + .ro = 0xffffffff, + },{ .name = "PPK0_7", .addr = A_PPK0_7, + .ro = 0xffffffff, + },{ .name = "PPK0_8", .addr = A_PPK0_8, + .ro = 0xffffffff, + },{ .name = "PPK0_9", .addr = A_PPK0_9, + .ro = 0xffffffff, + },{ .name = "PPK0_10", .addr = A_PPK0_10, + .ro = 0xffffffff, + },{ .name = "PPK0_11", .addr = A_PPK0_11, + .ro = 0xffffffff, + },{ .name = "PPK1_0", .addr = A_PPK1_0, + .ro = 0xffffffff, + },{ .name = "PPK1_1", .addr = A_PPK1_1, + .ro = 0xffffffff, + },{ .name = "PPK1_2", .addr = A_PPK1_2, + .ro = 0xffffffff, + },{ .name = "PPK1_3", .addr = A_PPK1_3, + .ro = 0xffffffff, + },{ .name = "PPK1_4", .addr = A_PPK1_4, + .ro = 0xffffffff, + },{ .name = "PPK1_5", .addr = A_PPK1_5, + .ro = 0xffffffff, + },{ .name = "PPK1_6", .addr = A_PPK1_6, + .ro = 0xffffffff, + },{ .name = "PPK1_7", .addr = A_PPK1_7, + .ro = 0xffffffff, + },{ .name = "PPK1_8", .addr = A_PPK1_8, + .ro = 0xffffffff, + },{ .name = "PPK1_9", .addr = A_PPK1_9, + .ro = 0xffffffff, + },{ .name = "PPK1_10", .addr = A_PPK1_10, + .ro = 0xffffffff, + },{ .name = "PPK1_11", .addr = A_PPK1_11, + .ro = 0xffffffff, + } +}; + +static void zynqmp_efuse_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxZynqMPEFuse *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_ZYNQMP_EFUSE(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static const MemoryRegionOps zynqmp_efuse_ops = { + .read = register_read_memory, + .write = zynqmp_efuse_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void zynqmp_efuse_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void zynqmp_efuse_reset(DeviceState *dev) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + zynqmp_efuse_register_reset(&s->regs_info[i]); + } + + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + s->efuse->dev = dev; +} + +static void zynqmp_efuse_init(Object *obj) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info, + ARRAY_SIZE(zynqmp_efuse_regs_info), + s->regs_info, s->regs, + &zynqmp_efuse_ops, + ZYNQMP_EFUSE_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq); +} + +static const VMStateDescription vmstate_efuse = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property zynqmp_efuse_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxZynqMPEFuse, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void zynqmp_efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = zynqmp_efuse_reset; + dc->realize = zynqmp_efuse_realize; + dc->vmsd = &vmstate_efuse; + device_class_set_props(dc, zynqmp_efuse_props); +} + + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPEFuse), + .class_init = zynqmp_efuse_class_init, + .instance_init = zynqmp_efuse_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} + +type_init(efuse_register_types) diff --git a/hw/openrisc/Kconfig b/hw/openrisc/Kconfig index 8f284f3ba0..97af258b55 100644 --- a/hw/openrisc/Kconfig +++ b/hw/openrisc/Kconfig @@ -4,3 +4,15 @@ config OR1K_SIM select OPENCORES_ETH select OMPIC select SPLIT_IRQ + +config OR1K_VIRT + bool + imply PCI_DEVICES + imply VIRTIO_VGA + imply TEST_DEVICES + select PCI + select PCI_EXPRESS_GENERIC_BRIDGE + select GOLDFISH_RTC + select SERIAL + select SIFIVE_TEST + select VIRTIO_MMIO diff --git a/hw/openrisc/boot.c b/hw/openrisc/boot.c new file mode 100644 index 0000000000..007e80cd5a --- /dev/null +++ b/hw/openrisc/boot.c @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * QEMU OpenRISC boot helpers. + * + * (c) 2022 Stafford Horne + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/cpu-defs.h" +#include "elf.h" +#include "hw/loader.h" +#include "hw/openrisc/boot.h" +#include "sysemu/device_tree.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" + +#include + +#define KERNEL_LOAD_ADDR 0x100 + +hwaddr openrisc_load_kernel(ram_addr_t ram_size, + const char *kernel_filename, + uint32_t *bootstrap_pc) +{ + long kernel_size; + uint64_t elf_entry; + uint64_t high_addr; + hwaddr entry; + + if (kernel_filename && !qtest_enabled()) { + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &elf_entry, NULL, &high_addr, NULL, 1, + EM_OPENRISC, 1, 0); + entry = elf_entry; + if (kernel_size < 0) { + kernel_size = load_uimage(kernel_filename, + &entry, NULL, NULL, NULL, NULL); + high_addr = entry + kernel_size; + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, + KERNEL_LOAD_ADDR, + ram_size - KERNEL_LOAD_ADDR); + high_addr = KERNEL_LOAD_ADDR + kernel_size; + } + + if (entry <= 0) { + entry = KERNEL_LOAD_ADDR; + } + + if (kernel_size < 0) { + error_report("couldn't load the kernel '%s'", kernel_filename); + exit(1); + } + *bootstrap_pc = entry; + + return high_addr; + } + return 0; +} + +hwaddr openrisc_load_initrd(void *fdt, const char *filename, + hwaddr load_start, uint64_t mem_size) +{ + int size; + hwaddr start; + + /* We put the initrd right after the kernel; page aligned. */ + start = TARGET_PAGE_ALIGN(load_start); + + size = load_ramdisk(filename, start, mem_size - start); + if (size < 0) { + size = load_image_targphys(filename, start, mem_size - start); + if (size < 0) { + error_report("could not load ramdisk '%s'", filename); + exit(1); + } + } + + if (fdt) { + qemu_fdt_setprop_cell(fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(fdt, "/chosen", + "linux,initrd-end", start + size); + } + + return start + size; +} + +uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start, + uint64_t mem_size) +{ + uint32_t fdt_addr; + int ret; + int fdtsize = fdt_totalsize(fdt); + + if (fdtsize <= 0) { + error_report("invalid device-tree"); + exit(1); + } + + /* We put fdt right after the kernel and/or initrd. */ + fdt_addr = TARGET_PAGE_ALIGN(load_start); + + ret = fdt_pack(fdt); + /* Should only fail if we've built a corrupted tree */ + g_assert(ret == 0); + /* copy in the device tree */ + qemu_fdt_dumpdtb(fdt, fdtsize); + + rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr, + &address_space_memory); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr_for_as(&address_space_memory, fdt_addr, fdtsize)); + + return fdt_addr; +} diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c index 93268815d8..10163b391b 100644 --- a/hw/openrisc/cputimer.c +++ b/hw/openrisc/cputimer.c @@ -22,6 +22,7 @@ #include "cpu.h" #include "migration/vmstate.h" #include "qemu/timer.h" +#include "sysemu/reset.h" #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */ @@ -122,6 +123,24 @@ static void openrisc_timer_cb(void *opaque) qemu_cpu_kick(CPU(cpu)); } +/* Reset the per CPU counter state. */ +static void openrisc_count_reset(void *opaque) +{ + OpenRISCCPU *cpu = opaque; + + if (cpu->env.is_counting) { + cpu_openrisc_count_stop(cpu); + } + cpu->env.ttmr = 0x00000000; +} + +/* Reset the global timer state. */ +static void openrisc_timer_reset(void *opaque) +{ + or1k_timer->ttcr = 0x00000000; + or1k_timer->last_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + static const VMStateDescription vmstate_or1k_timer = { .name = "or1k_timer", .version_id = 1, @@ -136,10 +155,11 @@ static const VMStateDescription vmstate_or1k_timer = { void cpu_openrisc_clock_init(OpenRISCCPU *cpu) { cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu); - cpu->env.ttmr = 0x00000000; + qemu_register_reset(openrisc_count_reset, cpu); if (or1k_timer == NULL) { or1k_timer = g_new0(OR1KTimerState, 1); + qemu_register_reset(openrisc_timer_reset, cpu); vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer); } } diff --git a/hw/openrisc/meson.build b/hw/openrisc/meson.build index 947f63ee08..2dbc6365bb 100644 --- a/hw/openrisc/meson.build +++ b/hw/openrisc/meson.build @@ -1,5 +1,7 @@ openrisc_ss = ss.source_set() openrisc_ss.add(files('cputimer.c')) -openrisc_ss.add(when: 'CONFIG_OR1K_SIM', if_true: files('openrisc_sim.c')) +openrisc_ss.add(files('boot.c')) +openrisc_ss.add(when: 'CONFIG_OR1K_SIM', if_true: [files('openrisc_sim.c'), fdt]) +openrisc_ss.add(when: 'CONFIG_OR1K_VIRT', if_true: [files('virt.c'), fdt]) hw_arch += {'openrisc': openrisc_ss} diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c index 73fe383c2d..35da123aef 100644 --- a/hw/openrisc/openrisc_sim.c +++ b/hw/openrisc/openrisc_sim.c @@ -24,21 +24,69 @@ #include "cpu.h" #include "hw/irq.h" #include "hw/boards.h" -#include "elf.h" #include "hw/char/serial.h" #include "net/net.h" -#include "hw/loader.h" +#include "hw/openrisc/boot.h" #include "hw/qdev-properties.h" +#include "exec/address-spaces.h" +#include "sysemu/device_tree.h" #include "sysemu/sysemu.h" #include "hw/sysbus.h" #include "sysemu/qtest.h" #include "sysemu/reset.h" #include "hw/core/split-irq.h" +#include + #define KERNEL_LOAD_ADDR 0x100 +#define OR1KSIM_CPUS_MAX 4 +#define OR1KSIM_CLK_MHZ 20000000 + +#define TYPE_OR1KSIM_MACHINE MACHINE_TYPE_NAME("or1k-sim") +#define OR1KSIM_MACHINE(obj) \ + OBJECT_CHECK(Or1ksimState, (obj), TYPE_OR1KSIM_MACHINE) + +typedef struct Or1ksimState { + /*< private >*/ + MachineState parent_obj; + + /*< public >*/ + void *fdt; + int fdt_size; + +} Or1ksimState; + +enum { + OR1KSIM_DRAM, + OR1KSIM_UART, + OR1KSIM_ETHOC, + OR1KSIM_OMPIC, +}; + +enum { + OR1KSIM_OMPIC_IRQ = 1, + OR1KSIM_UART_IRQ = 2, + OR1KSIM_ETHOC_IRQ = 4, +}; + +enum { + OR1KSIM_UART_COUNT = 4 +}; + +static const struct MemmapEntry { + hwaddr base; + hwaddr size; +} or1ksim_memmap[] = { + [OR1KSIM_DRAM] = { 0x00000000, 0 }, + [OR1KSIM_UART] = { 0x90000000, 0x100 }, + [OR1KSIM_ETHOC] = { 0x92000000, 0x800 }, + [OR1KSIM_OMPIC] = { 0x98000000, OR1KSIM_CPUS_MAX * 8 }, +}; + static struct openrisc_boot_info { uint32_t bootstrap_pc; + uint32_t fdt_addr; } boot_info; static void main_cpu_reset(void *opaque) @@ -49,6 +97,7 @@ static void main_cpu_reset(void *opaque) cpu_reset(CPU(cpu)); cpu_set_pc(cs, boot_info.bootstrap_pc); + cpu_set_gpr(&cpu->env, 3, boot_info.fdt_addr); } static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) @@ -56,12 +105,77 @@ static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) return qdev_get_gpio_in_named(DEVICE(cpus[cpunum]), "IRQ", irq_pin); } -static void openrisc_sim_net_init(hwaddr base, hwaddr descriptors, +static void openrisc_create_fdt(Or1ksimState *state, + const struct MemmapEntry *memmap, + int num_cpus, uint64_t mem_size, + const char *cmdline) +{ + void *fdt; + int cpu; + char *nodename; + int pic_ph; + + fdt = state->fdt = create_device_tree(&state->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + qemu_fdt_setprop_string(fdt, "/", "compatible", "opencores,or1ksim"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x1); + + nodename = g_strdup_printf("/memory@%" HWADDR_PRIx, + memmap[OR1KSIM_DRAM].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + memmap[OR1KSIM_DRAM].base, mem_size); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + g_free(nodename); + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = 0; cpu < num_cpus; cpu++) { + nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "opencores,or1200-rtlsvn481"); + qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", + OR1KSIM_CLK_MHZ); + g_free(nodename); + } + + nodename = (char *)"/pic"; + qemu_fdt_add_subnode(fdt, nodename); + pic_ph = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "opencores,or1k-pic-level"); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 1); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", pic_ph); + + qemu_fdt_setprop_cell(fdt, "/", "interrupt-parent", pic_ph); + + qemu_fdt_add_subnode(fdt, "/chosen"); + if (cmdline) { + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + } + + /* Create aliases node for use by devices. */ + qemu_fdt_add_subnode(fdt, "/aliases"); +} + +static void openrisc_sim_net_init(Or1ksimState *state, hwaddr base, hwaddr size, int num_cpus, OpenRISCCPU *cpus[], int irq_pin, NICInfo *nd) { + void *fdt = state->fdt; DeviceState *dev; SysBusDevice *s; + char *nodename; int i; dev = qdev_new("open_eth"); @@ -81,14 +195,28 @@ static void openrisc_sim_net_init(hwaddr base, hwaddr descriptors, sysbus_connect_irq(s, 0, get_cpu_irq(cpus, 0, irq_pin)); } sysbus_mmio_map(s, 0, base); - sysbus_mmio_map(s, 1, descriptors); + sysbus_mmio_map(s, 1, base + 0x400); + + /* Init device tree node for ethoc. */ + nodename = g_strdup_printf("/ethoc@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "opencores,ethoc"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0); + + qemu_fdt_setprop_string(fdt, "/aliases", "enet0", nodename); + g_free(nodename); } -static void openrisc_sim_ompic_init(hwaddr base, int num_cpus, +static void openrisc_sim_ompic_init(Or1ksimState *state, hwaddr base, + hwaddr size, int num_cpus, OpenRISCCPU *cpus[], int irq_pin) { + void *fdt = state->fdt; DeviceState *dev; SysBusDevice *s; + char *nodename; int i; dev = qdev_new("or1k-ompic"); @@ -100,53 +228,72 @@ static void openrisc_sim_ompic_init(hwaddr base, int num_cpus, sysbus_connect_irq(s, i, get_cpu_irq(cpus, i, irq_pin)); } sysbus_mmio_map(s, 0, base); + + /* Add device tree node for ompic. */ + nodename = g_strdup_printf("/ompic@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "openrisc,ompic"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 0); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + g_free(nodename); } -static void openrisc_load_kernel(ram_addr_t ram_size, - const char *kernel_filename) +static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, + hwaddr size, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin, + int uart_idx) { - long kernel_size; - uint64_t elf_entry; - hwaddr entry; + void *fdt = state->fdt; + char *nodename; + qemu_irq serial_irq; + char alias[sizeof("uart0")]; + int i; - if (kernel_filename && !qtest_enabled()) { - kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, - &elf_entry, NULL, NULL, NULL, 1, EM_OPENRISC, - 1, 0); - entry = elf_entry; - if (kernel_size < 0) { - kernel_size = load_uimage(kernel_filename, - &entry, NULL, NULL, NULL, NULL); + if (num_cpus > 1) { + DeviceState *splitter = qdev_new(TYPE_SPLIT_IRQ); + qdev_prop_set_uint32(splitter, "num-lines", num_cpus); + qdev_realize_and_unref(splitter, NULL, &error_fatal); + for (i = 0; i < num_cpus; i++) { + qdev_connect_gpio_out(splitter, i, get_cpu_irq(cpus, i, irq_pin)); } - if (kernel_size < 0) { - kernel_size = load_image_targphys(kernel_filename, - KERNEL_LOAD_ADDR, - ram_size - KERNEL_LOAD_ADDR); - } - - if (entry <= 0) { - entry = KERNEL_LOAD_ADDR; - } - - if (kernel_size < 0) { - error_report("couldn't load the kernel '%s'", kernel_filename); - exit(1); - } - boot_info.bootstrap_pc = entry; + serial_irq = qdev_get_gpio_in(splitter, 0); + } else { + serial_irq = get_cpu_irq(cpus, 0, irq_pin); } + serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200, + serial_hd(OR1KSIM_UART_COUNT - uart_idx - 1), + DEVICE_NATIVE_ENDIAN); + + /* Add device tree node for serial. */ + nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", OR1KSIM_CLK_MHZ); + qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0); + + /* The /chosen node is created during fdt creation. */ + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); + snprintf(alias, sizeof(alias), "uart%d", uart_idx); + qemu_fdt_setprop_string(fdt, "/aliases", alias, nodename); + g_free(nodename); } static void openrisc_sim_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; - OpenRISCCPU *cpus[2] = {}; + OpenRISCCPU *cpus[OR1KSIM_CPUS_MAX] = {}; + Or1ksimState *state = OR1KSIM_MACHINE(machine); MemoryRegion *ram; - qemu_irq serial_irq; + hwaddr load_addr; int n; unsigned int smp_cpus = machine->smp.cpus; - assert(smp_cpus >= 1 && smp_cpus <= 2); + assert(smp_cpus >= 1 && smp_cpus <= OR1KSIM_CPUS_MAX); for (n = 0; n < smp_cpus; n++) { cpus[n] = OPENRISC_CPU(cpu_create(machine->cpu_type)); if (cpus[n] == NULL) { @@ -163,33 +310,62 @@ static void openrisc_sim_init(MachineState *machine) memory_region_init_ram(ram, NULL, "openrisc.ram", ram_size, &error_fatal); memory_region_add_subregion(get_system_memory(), 0, ram); + openrisc_create_fdt(state, or1ksim_memmap, smp_cpus, machine->ram_size, + machine->kernel_cmdline); + if (nd_table[0].used) { - openrisc_sim_net_init(0x92000000, 0x92000400, smp_cpus, - cpus, 4, nd_table); + openrisc_sim_net_init(state, or1ksim_memmap[OR1KSIM_ETHOC].base, + or1ksim_memmap[OR1KSIM_ETHOC].size, + smp_cpus, cpus, + OR1KSIM_ETHOC_IRQ, nd_table); } if (smp_cpus > 1) { - openrisc_sim_ompic_init(0x98000000, smp_cpus, cpus, 1); - - serial_irq = qemu_irq_split(get_cpu_irq(cpus, 0, 2), - get_cpu_irq(cpus, 1, 2)); - } else { - serial_irq = get_cpu_irq(cpus, 0, 2); + openrisc_sim_ompic_init(state, or1ksim_memmap[OR1KSIM_OMPIC].base, + or1ksim_memmap[OR1KSIM_OMPIC].size, + smp_cpus, cpus, OR1KSIM_OMPIC_IRQ); } - serial_mm_init(get_system_memory(), 0x90000000, 0, serial_irq, - 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + for (n = 0; n < OR1KSIM_UART_COUNT; ++n) + openrisc_sim_serial_init(state, or1ksim_memmap[OR1KSIM_UART].base + + or1ksim_memmap[OR1KSIM_UART].size * n, + or1ksim_memmap[OR1KSIM_UART].size, + smp_cpus, cpus, OR1KSIM_UART_IRQ, n); - openrisc_load_kernel(ram_size, kernel_filename); + load_addr = openrisc_load_kernel(ram_size, kernel_filename, + &boot_info.bootstrap_pc); + if (load_addr > 0) { + if (machine->initrd_filename) { + load_addr = openrisc_load_initrd(state->fdt, + machine->initrd_filename, + load_addr, machine->ram_size); + } + boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr, + machine->ram_size); + } } -static void openrisc_sim_machine_init(MachineClass *mc) +static void openrisc_sim_machine_init(ObjectClass *oc, void *data) { + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "or1k simulation"; mc->init = openrisc_sim_init; - mc->max_cpus = 2; + mc->max_cpus = OR1KSIM_CPUS_MAX; mc->is_default = true; mc->default_cpu_type = OPENRISC_CPU_TYPE_NAME("or1200"); } -DEFINE_MACHINE("or1k-sim", openrisc_sim_machine_init) +static const TypeInfo or1ksim_machine_typeinfo = { + .name = TYPE_OR1KSIM_MACHINE, + .parent = TYPE_MACHINE, + .class_init = openrisc_sim_machine_init, + .instance_size = sizeof(Or1ksimState), +}; + +static void or1ksim_machine_init_register_types(void) +{ + type_register_static(&or1ksim_machine_typeinfo); +} + +type_init(or1ksim_machine_init_register_types) diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c new file mode 100644 index 0000000000..f8a68a6a6b --- /dev/null +++ b/hw/openrisc/virt.c @@ -0,0 +1,571 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * OpenRISC QEMU virtual machine. + * + * (c) 2022 Stafford Horne + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/guest-random.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/address-spaces.h" +#include "hw/irq.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "hw/core/split-irq.h" +#include "hw/openrisc/boot.h" +#include "hw/misc/sifive_test.h" +#include "hw/pci/pci.h" +#include "hw/pci-host/gpex.h" +#include "hw/qdev-properties.h" +#include "hw/rtc/goldfish_rtc.h" +#include "hw/sysbus.h" +#include "hw/virtio/virtio-mmio.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" + +#include + +#define VIRT_CPUS_MAX 4 +#define VIRT_CLK_MHZ 20000000 + +#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt") +#define VIRT_MACHINE(obj) \ + OBJECT_CHECK(OR1KVirtState, (obj), TYPE_VIRT_MACHINE) + +typedef struct OR1KVirtState { + /*< private >*/ + MachineState parent_obj; + + /*< public >*/ + void *fdt; + int fdt_size; + +} OR1KVirtState; + +enum { + VIRT_DRAM, + VIRT_ECAM, + VIRT_MMIO, + VIRT_PIO, + VIRT_TEST, + VIRT_RTC, + VIRT_VIRTIO, + VIRT_UART, + VIRT_OMPIC, +}; + +enum { + VIRT_OMPIC_IRQ = 1, + VIRT_UART_IRQ = 2, + VIRT_RTC_IRQ = 3, + VIRT_VIRTIO_IRQ = 4, /* to 12 */ + VIRTIO_COUNT = 8, + VIRT_PCI_IRQ_BASE = 13, /* to 17 */ +}; + +static const struct MemmapEntry { + hwaddr base; + hwaddr size; +} virt_memmap[] = { + [VIRT_DRAM] = { 0x00000000, 0 }, + [VIRT_UART] = { 0x90000000, 0x100 }, + [VIRT_TEST] = { 0x96000000, 0x8 }, + [VIRT_RTC] = { 0x96005000, 0x1000 }, + [VIRT_VIRTIO] = { 0x97000000, 0x1000 }, + [VIRT_OMPIC] = { 0x98000000, VIRT_CPUS_MAX * 8 }, + [VIRT_ECAM] = { 0x9e000000, 0x1000000 }, + [VIRT_PIO] = { 0x9f000000, 0x1000000 }, + [VIRT_MMIO] = { 0xa0000000, 0x10000000 }, +}; + +static struct openrisc_boot_info { + uint32_t bootstrap_pc; + uint32_t fdt_addr; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + OpenRISCCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(CPU(cpu)); + + cpu_set_pc(cs, boot_info.bootstrap_pc); + cpu_set_gpr(&cpu->env, 3, boot_info.fdt_addr); +} + +static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) +{ + return qdev_get_gpio_in_named(DEVICE(cpus[cpunum]), "IRQ", irq_pin); +} + +static qemu_irq get_per_cpu_irq(OpenRISCCPU *cpus[], int num_cpus, int irq_pin) +{ + int i; + + if (num_cpus > 1) { + DeviceState *splitter = qdev_new(TYPE_SPLIT_IRQ); + qdev_prop_set_uint32(splitter, "num-lines", num_cpus); + qdev_realize_and_unref(splitter, NULL, &error_fatal); + for (i = 0; i < num_cpus; i++) { + qdev_connect_gpio_out(splitter, i, get_cpu_irq(cpus, i, irq_pin)); + } + return qdev_get_gpio_in(splitter, 0); + } else { + return get_cpu_irq(cpus, 0, irq_pin); + } +} + +static void openrisc_create_fdt(OR1KVirtState *state, + const struct MemmapEntry *memmap, + int num_cpus, uint64_t mem_size, + const char *cmdline, + int32_t *pic_phandle) +{ + void *fdt; + int cpu; + char *nodename; + uint8_t rng_seed[32]; + + fdt = state->fdt = create_device_tree(&state->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + qemu_fdt_setprop_string(fdt, "/", "compatible", "opencores,or1ksim"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x1); + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x1); + + nodename = g_strdup_printf("/memory@%" HWADDR_PRIx, + memmap[VIRT_DRAM].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + memmap[VIRT_DRAM].base, mem_size); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + g_free(nodename); + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = 0; cpu < num_cpus; cpu++) { + nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "opencores,or1200-rtlsvn481"); + qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", + VIRT_CLK_MHZ); + g_free(nodename); + } + + nodename = (char *)"/pic"; + qemu_fdt_add_subnode(fdt, nodename); + *pic_phandle = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "opencores,or1k-pic-level"); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 1); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", *pic_phandle); + + qemu_fdt_setprop_cell(fdt, "/", "interrupt-parent", *pic_phandle); + + qemu_fdt_add_subnode(fdt, "/chosen"); + if (cmdline) { + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + } + + /* Pass seed to RNG. */ + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + + /* Create aliases node for use by devices. */ + qemu_fdt_add_subnode(fdt, "/aliases"); +} + +static void openrisc_virt_ompic_init(OR1KVirtState *state, hwaddr base, + hwaddr size, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin) +{ + void *fdt = state->fdt; + DeviceState *dev; + SysBusDevice *s; + char *nodename; + int i; + + dev = qdev_new("or1k-ompic"); + qdev_prop_set_uint32(dev, "num-cpus", num_cpus); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + for (i = 0; i < num_cpus; i++) { + sysbus_connect_irq(s, i, get_cpu_irq(cpus, i, irq_pin)); + } + sysbus_mmio_map(s, 0, base); + + /* Add device tree node for ompic. */ + nodename = g_strdup_printf("/ompic@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "openrisc,ompic"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 0); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + g_free(nodename); +} + +static void openrisc_virt_serial_init(OR1KVirtState *state, hwaddr base, + hwaddr size, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin) +{ + void *fdt = state->fdt; + char *nodename; + qemu_irq serial_irq = get_per_cpu_irq(cpus, num_cpus, irq_pin); + + serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + + /* Add device tree node for serial. */ + nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", VIRT_CLK_MHZ); + qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0); + + /* The /chosen node is created during fdt creation. */ + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); + qemu_fdt_setprop_string(fdt, "/aliases", "uart0", nodename); + g_free(nodename); +} + +static void openrisc_virt_test_init(OR1KVirtState *state, hwaddr base, + hwaddr size) +{ + void *fdt = state->fdt; + int test_ph; + char *nodename; + + /* SiFive Test MMIO device */ + sifive_test_create(base); + + /* SiFive Test MMIO Reset device FDT */ + nodename = g_strdup_printf("/soc/test@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "syscon"); + test_ph = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", test_ph); + qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0); + g_free(nodename); + + nodename = g_strdup_printf("/soc/reboot"); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(fdt, nodename, "regmap", test_ph); + qemu_fdt_setprop_cell(fdt, nodename, "offset", 0x0); + qemu_fdt_setprop_cell(fdt, nodename, "value", FINISHER_RESET); + g_free(nodename); + + nodename = g_strdup_printf("/soc/poweroff"); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "syscon-poweroff"); + qemu_fdt_setprop_cell(fdt, nodename, "regmap", test_ph); + qemu_fdt_setprop_cell(fdt, nodename, "offset", 0x0); + qemu_fdt_setprop_cell(fdt, nodename, "value", FINISHER_PASS); + g_free(nodename); + +} + +static void openrisc_virt_rtc_init(OR1KVirtState *state, hwaddr base, + hwaddr size, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin) +{ + void *fdt = state->fdt; + char *nodename; + qemu_irq rtc_irq = get_per_cpu_irq(cpus, num_cpus, irq_pin); + + /* Goldfish RTC */ + sysbus_create_simple(TYPE_GOLDFISH_RTC, base, rtc_irq); + + /* Goldfish RTC FDT */ + nodename = g_strdup_printf("/soc/rtc@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "google,goldfish-rtc"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + g_free(nodename); + +} + +static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, + uint32_t irqchip_phandle) +{ + int pin, dev; + uint32_t irq_map_stride = 0; + uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {}; + uint32_t *irq_map = full_irq_map; + + /* + * This code creates a standard swizzle of interrupts such that + * each device's first interrupt is based on it's PCI_SLOT number. + * (See pci_swizzle_map_irq_fn()) + * + * We only need one entry per interrupt in the table (not one per + * possible slot) seeing the interrupt-map-mask will allow the table + * to wrap to any number of devices. + */ + for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { + int devfn = dev << 3; + + for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { + int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); + int i = 0; + + /* Fill PCI address cells */ + irq_map[i++] = cpu_to_be32(devfn << 8); + irq_map[i++] = 0; + irq_map[i++] = 0; + + /* Fill PCI Interrupt cells */ + irq_map[i++] = cpu_to_be32(pin + 1); + + /* Fill interrupt controller phandle and cells */ + irq_map[i++] = cpu_to_be32(irqchip_phandle); + irq_map[i++] = cpu_to_be32(irq_nr); + + if (!irq_map_stride) { + irq_map_stride = i; + } + irq_map += irq_map_stride; + } + } + + qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, + GPEX_NUM_IRQS * GPEX_NUM_IRQS * + irq_map_stride * sizeof(uint32_t)); + + qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", + 0x1800, 0, 0, 0x7); +} + +static void openrisc_virt_pcie_init(OR1KVirtState *state, + hwaddr ecam_base, hwaddr ecam_size, + hwaddr pio_base, hwaddr pio_size, + hwaddr mmio_base, hwaddr mmio_size, + int num_cpus, OpenRISCCPU *cpus[], + int irq_base, int32_t pic_phandle) +{ + void *fdt = state->fdt; + char *nodename; + MemoryRegion *alias; + MemoryRegion *reg; + DeviceState *dev; + qemu_irq pcie_irq; + int i; + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Map ECAM space. */ + alias = g_new0(MemoryRegion, 1); + reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(alias, OBJECT(dev), "pcie-ecam", + reg, 0, ecam_size); + memory_region_add_subregion(get_system_memory(), ecam_base, alias); + + /* + * Map the MMIO window into system address space so as to expose + * the section of PCI MMIO space which starts at the same base address + * (ie 1:1 mapping for that part of PCI MMIO space visible through + * the window). + */ + alias = g_new0(MemoryRegion, 1); + reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(alias, OBJECT(dev), "pcie-mmio", + reg, mmio_base, mmio_size); + memory_region_add_subregion(get_system_memory(), mmio_base, alias); + + /* Map IO port space. */ + alias = g_new0(MemoryRegion, 1); + reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 2); + memory_region_init_alias(alias, OBJECT(dev), "pcie-pio", + reg, 0, pio_size); + memory_region_add_subregion(get_system_memory(), pio_base, alias); + + /* Connect IRQ lines. */ + for (i = 0; i < GPEX_NUM_IRQS; i++) { + pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq); + gpex_set_irq_num(GPEX_HOST(dev), i, irq_base + i); + } + + nodename = g_strdup_printf("/soc/pci@%" HWADDR_PRIx, ecam_base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "pci-host-ecam-generic"); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(fdt, nodename, "bus-range", 0, + ecam_size / PCIE_MMCFG_SIZE_MIN - 1); + qemu_fdt_setprop(fdt, nodename, "dma-coherent", NULL, 0); + qemu_fdt_setprop_cells(fdt, nodename, "reg", ecam_base, ecam_size); + /* pci-address(3) cpu-address(1) pci-size(2) */ + qemu_fdt_setprop_cells(fdt, nodename, "ranges", + FDT_PCI_RANGE_IOPORT, 0, 0, + pio_base, 0, pio_size, + FDT_PCI_RANGE_MMIO, 0, mmio_base, + mmio_base, 0, mmio_size); + + create_pcie_irq_map(fdt, nodename, irq_base, pic_phandle); + g_free(nodename); +} + +static void openrisc_virt_virtio_init(OR1KVirtState *state, hwaddr base, + hwaddr size, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin) +{ + void *fdt = state->fdt; + char *nodename; + DeviceState *dev; + SysBusDevice *sysbus; + qemu_irq virtio_irq = get_per_cpu_irq(cpus, num_cpus, irq_pin); + + /* VirtIO MMIO devices */ + dev = qdev_new(TYPE_VIRTIO_MMIO); + qdev_prop_set_bit(dev, "force-legacy", false); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_connect_irq(sysbus, 0, virtio_irq); + sysbus_mmio_map(sysbus, 0, base); + + /* VirtIO MMIO devices FDT */ + nodename = g_strdup_printf("/soc/virtio_mmio@%" HWADDR_PRIx, base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "virtio,mmio"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", base, size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", irq_pin); + g_free(nodename); +} + +static void openrisc_virt_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + OpenRISCCPU *cpus[VIRT_CPUS_MAX] = {}; + OR1KVirtState *state = VIRT_MACHINE(machine); + MemoryRegion *ram; + hwaddr load_addr; + int n; + unsigned int smp_cpus = machine->smp.cpus; + int32_t pic_phandle; + + assert(smp_cpus >= 1 && smp_cpus <= VIRT_CPUS_MAX); + for (n = 0; n < smp_cpus; n++) { + cpus[n] = OPENRISC_CPU(cpu_create(machine->cpu_type)); + if (cpus[n] == NULL) { + fprintf(stderr, "Unable to find CPU definition!\n"); + exit(1); + } + + cpu_openrisc_clock_init(cpus[n]); + + qemu_register_reset(main_cpu_reset, cpus[n]); + } + + ram = g_malloc(sizeof(*ram)); + memory_region_init_ram(ram, NULL, "openrisc.ram", ram_size, &error_fatal); + memory_region_add_subregion(get_system_memory(), 0, ram); + + openrisc_create_fdt(state, virt_memmap, smp_cpus, machine->ram_size, + machine->kernel_cmdline, &pic_phandle); + + if (smp_cpus > 1) { + openrisc_virt_ompic_init(state, virt_memmap[VIRT_OMPIC].base, + virt_memmap[VIRT_OMPIC].size, + smp_cpus, cpus, VIRT_OMPIC_IRQ); + } + + openrisc_virt_serial_init(state, virt_memmap[VIRT_UART].base, + virt_memmap[VIRT_UART].size, + smp_cpus, cpus, VIRT_UART_IRQ); + + openrisc_virt_test_init(state, virt_memmap[VIRT_TEST].base, + virt_memmap[VIRT_TEST].size); + + openrisc_virt_rtc_init(state, virt_memmap[VIRT_RTC].base, + virt_memmap[VIRT_RTC].size, smp_cpus, cpus, + VIRT_RTC_IRQ); + + openrisc_virt_pcie_init(state, virt_memmap[VIRT_ECAM].base, + virt_memmap[VIRT_ECAM].size, + virt_memmap[VIRT_PIO].base, + virt_memmap[VIRT_PIO].size, + virt_memmap[VIRT_MMIO].base, + virt_memmap[VIRT_MMIO].size, + smp_cpus, cpus, + VIRT_PCI_IRQ_BASE, pic_phandle); + + for (n = 0; n < VIRTIO_COUNT; n++) { + openrisc_virt_virtio_init(state, virt_memmap[VIRT_VIRTIO].base + + n * virt_memmap[VIRT_VIRTIO].size, + virt_memmap[VIRT_VIRTIO].size, + smp_cpus, cpus, VIRT_VIRTIO_IRQ + n); + } + + load_addr = openrisc_load_kernel(ram_size, kernel_filename, + &boot_info.bootstrap_pc); + if (load_addr > 0) { + if (machine->initrd_filename) { + load_addr = openrisc_load_initrd(state->fdt, + machine->initrd_filename, + load_addr, machine->ram_size); + } + boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr, + machine->ram_size); + } +} + +static void openrisc_virt_machine_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "or1k virtual machine"; + mc->init = openrisc_virt_init; + mc->max_cpus = VIRT_CPUS_MAX; + mc->is_default = false; + mc->default_cpu_type = OPENRISC_CPU_TYPE_NAME("or1200"); +} + +static const TypeInfo or1ksim_machine_typeinfo = { + .name = TYPE_VIRT_MACHINE, + .parent = TYPE_MACHINE, + .class_init = openrisc_virt_machine_init, + .instance_size = sizeof(OR1KVirtState), +}; + +static void or1ksim_machine_init_register_types(void) +{ + type_register_static(&or1ksim_machine_typeinfo); +} + +type_init(or1ksim_machine_init_register_types) diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index f8df4315ba..02614f49aa 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -27,3 +27,8 @@ config DEC_PCI config SIMBA bool + +config CXL + bool + default y if PCI_EXPRESS && PXB + depends on PCI_EXPRESS && MSI_NONBROKEN && PXB diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c new file mode 100644 index 0000000000..a361e519d0 --- /dev/null +++ b/hw/pci-bridge/cxl_downstream.c @@ -0,0 +1,249 @@ +/* + * Emulated CXL Switch Downstream Port + * + * Copyright (c) 2022 Huawei Technologies. + * + * Based on xio3130_downstream.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/pci/msi.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" +#include "qapi/error.h" + +typedef struct CXLDownStreamPort { + /*< private >*/ + PCIESlot parent_obj; + + /*< public >*/ + CXLComponentState cxl_cstate; +} CXLDownstreamPort; + +#define TYPE_CXL_DSP "cxl-downstream" +DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP) + +#define CXL_DOWNSTREAM_PORT_MSI_OFFSET 0x70 +#define CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR 1 +#define CXL_DOWNSTREAM_PORT_EXP_OFFSET 0x90 +#define CXL_DOWNSTREAM_PORT_AER_OFFSET 0x100 +#define CXL_DOWNSTREAM_PORT_DVSEC_OFFSET \ + (CXL_DOWNSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF) + +static void latch_registers(CXLDownstreamPort *dsp) +{ + uint32_t *reg_state = dsp->cxl_cstate.crb.cache_mem_registers; + uint32_t *write_msk = dsp->cxl_cstate.crb.cache_mem_regs_write_mask; + + cxl_component_register_init_common(reg_state, write_msk, + CXL2_DOWNSTREAM_PORT); +} + +/* TODO: Look at sharing this code acorss all CXL port types */ +static void cxl_dsp_dvsec_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int len) +{ + CXLDownstreamPort *dsp = CXL_DSP(dev); + CXLComponentState *cxl_cstate = &dsp->cxl_cstate; + + if (range_contains(&cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC], addr)) { + uint8_t *reg = &dev->config[addr]; + addr -= cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC].lob; + if (addr == PORT_CONTROL_OFFSET) { + if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) { + /* unmask SBR */ + qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n"); + } + if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) { + /* Alt Memory & ID Space Enable */ + qemu_log_mask(LOG_UNIMP, + "Alt Memory & ID space is not supported\n"); + + } + } + } +} + +static void cxl_dsp_config_write(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_ctl, &slt_sta); + pci_bridge_write_config(d, address, val, len); + pcie_cap_flr_write_config(d, address, val, len); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); + pcie_aer_write_config(d, address, val, len); + + cxl_dsp_dvsec_write_config(d, address, val, len); +} + +static void cxl_dsp_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + CXLDownstreamPort *dsp = CXL_DSP(qdev); + + pcie_cap_deverr_reset(d); + pcie_cap_slot_reset(d); + pcie_cap_arifwd_reset(d); + pci_bridge_reset(qdev); + + latch_registers(dsp); +} + +static void build_dvsecs(CXLComponentState *cxl) +{ + uint8_t *dvsec; + + dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 }; + cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, + EXTENSIONS_PORT_DVSEC_LENGTH, + EXTENSIONS_PORT_DVSEC, + EXTENSIONS_PORT_DVSEC_REVID, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ + .cap = 0x27, /* Cache, IO, Mem, non-MLD */ + .ctrl = 0x02, /* IO always enabled */ + .status = 0x26, /* same */ + .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ + }; + cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, + PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_FLEXBUS_PORT_DVSEC, + PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECPortGPF){ + .rsvd = 0, + .phase1_ctrl = 1, /* 1μs timeout */ + .phase2_ctrl = 1, /* 1μs timeout */ + }; + cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, + GPF_PORT_DVSEC_LENGTH, GPF_PORT_DVSEC, + GPF_PORT_DVSEC_REVID, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ + .rsvd = 0, + .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, + .reg0_base_hi = 0, + }; + cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, + REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, + REG_LOC_DVSEC_REVID, dvsec); +} + +static void cxl_dsp_realize(PCIDevice *d, Error **errp) +{ + PCIEPort *p = PCIE_PORT(d); + PCIESlot *s = PCIE_SLOT(d); + CXLDownstreamPort *dsp = CXL_DSP(d); + CXLComponentState *cxl_cstate = &dsp->cxl_cstate; + ComponentRegisters *cregs = &cxl_cstate->crb; + MemoryRegion *component_bar = &cregs->component_registers; + int rc; + + pci_bridge_initfn(d, TYPE_PCIE_BUS); + pcie_port_init_reg(d); + + rc = msi_init(d, CXL_DOWNSTREAM_PORT_MSI_OFFSET, + CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR, + true, true, errp); + if (rc) { + assert(rc == -ENOTSUP); + goto err_bridge; + } + + rc = pcie_cap_init(d, CXL_DOWNSTREAM_PORT_EXP_OFFSET, + PCI_EXP_TYPE_DOWNSTREAM, p->port, + errp); + if (rc < 0) { + goto err_msi; + } + + pcie_cap_flr_init(d); + pcie_cap_deverr_init(d); + pcie_cap_slot_init(d, s); + pcie_cap_arifwd_init(d); + + pcie_chassis_create(s->chassis); + rc = pcie_chassis_add_slot(s); + if (rc < 0) { + error_setg(errp, "Can't add chassis slot, error %d", rc); + goto err_pcie_cap; + } + + rc = pcie_aer_init(d, PCI_ERR_VER, CXL_DOWNSTREAM_PORT_AER_OFFSET, + PCI_ERR_SIZEOF, errp); + if (rc < 0) { + goto err_chassis; + } + + cxl_cstate->dvsec_offset = CXL_DOWNSTREAM_PORT_DVSEC_OFFSET; + cxl_cstate->pdev = d; + build_dvsecs(cxl_cstate); + cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_DSP); + pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + component_bar); + + return; + + err_chassis: + pcie_chassis_del_slot(s); + err_pcie_cap: + pcie_cap_exit(d); + err_msi: + msi_uninit(d); + err_bridge: + pci_bridge_exitfn(d); +} + +static void cxl_dsp_exitfn(PCIDevice *d) +{ + PCIESlot *s = PCIE_SLOT(d); + + pcie_aer_exit(d); + pcie_chassis_del_slot(s); + pcie_cap_exit(d); + msi_uninit(d); + pci_bridge_exitfn(d); +} + +static void cxl_dsp_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + + k->is_bridge = true; + k->config_write = cxl_dsp_config_write; + k->realize = cxl_dsp_realize; + k->exit = cxl_dsp_exitfn; + k->vendor_id = 0x19e5; /* Huawei */ + k->device_id = 0xa129; /* Emulated CXL Switch Downstream Port */ + k->revision = 0; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "CXL Switch Downstream Port"; + dc->reset = cxl_dsp_reset; +} + +static const TypeInfo cxl_dsp_info = { + .name = TYPE_CXL_DSP, + .instance_size = sizeof(CXLDownstreamPort), + .parent = TYPE_PCIE_SLOT, + .class_init = cxl_dsp_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CXL_DEVICE }, + { } + }, +}; + +static void cxl_dsp_register_type(void) +{ + type_register_static(&cxl_dsp_info); +} + +type_init(cxl_dsp_register_type); diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c new file mode 100644 index 0000000000..fb213fa06e --- /dev/null +++ b/hw/pci-bridge/cxl_root_port.c @@ -0,0 +1,236 @@ +/* + * CXL 2.0 Root Port Implementation + * + * Copyright(C) 2020 Intel Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/range.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "hw/cxl/cxl.h" + +#define CXL_ROOT_PORT_DID 0x7075 + +/* Copied from the gen root port which we derive */ +#define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100 +#define GEN_PCIE_ROOT_PORT_ACS_OFFSET \ + (GEN_PCIE_ROOT_PORT_AER_OFFSET + PCI_ERR_SIZEOF) +#define CXL_ROOT_PORT_DVSEC_OFFSET \ + (GEN_PCIE_ROOT_PORT_ACS_OFFSET + PCI_ACS_SIZEOF) + +typedef struct CXLRootPort { + /*< private >*/ + PCIESlot parent_obj; + + CXLComponentState cxl_cstate; + PCIResReserve res_reserve; +} CXLRootPort; + +#define TYPE_CXL_ROOT_PORT "cxl-rp" +DECLARE_INSTANCE_CHECKER(CXLRootPort, CXL_ROOT_PORT, TYPE_CXL_ROOT_PORT) + +static void latch_registers(CXLRootPort *crp) +{ + uint32_t *reg_state = crp->cxl_cstate.crb.cache_mem_registers; + uint32_t *write_msk = crp->cxl_cstate.crb.cache_mem_regs_write_mask; + + cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT); +} + +static void build_dvsecs(CXLComponentState *cxl) +{ + uint8_t *dvsec; + + dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 }; + cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, + EXTENSIONS_PORT_DVSEC_LENGTH, + EXTENSIONS_PORT_DVSEC, + EXTENSIONS_PORT_DVSEC_REVID, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECPortGPF){ + .rsvd = 0, + .phase1_ctrl = 1, /* 1μs timeout */ + .phase2_ctrl = 1, /* 1μs timeout */ + }; + cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, + GPF_PORT_DVSEC_LENGTH, GPF_PORT_DVSEC, + GPF_PORT_DVSEC_REVID, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ + .cap = 0x26, /* IO, Mem, non-MLD */ + .ctrl = 0x2, + .status = 0x26, /* same */ + .rcvd_mod_ts_data_phase1 = 0xef, + }; + cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, + PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_FLEXBUS_PORT_DVSEC, + PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ + .rsvd = 0, + .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, + .reg0_base_hi = 0, + }; + cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, + REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, + REG_LOC_DVSEC_REVID, dvsec); +} + +static void cxl_rp_realize(DeviceState *dev, Error **errp) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + CXLRootPort *crp = CXL_ROOT_PORT(dev); + CXLComponentState *cxl_cstate = &crp->cxl_cstate; + ComponentRegisters *cregs = &cxl_cstate->crb; + MemoryRegion *component_bar = &cregs->component_registers; + Error *local_err = NULL; + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + int rc = + pci_bridge_qemu_reserve_cap_init(pci_dev, 0, crp->res_reserve, errp); + if (rc < 0) { + rpc->parent_class.exit(pci_dev); + return; + } + + if (!crp->res_reserve.io || crp->res_reserve.io == -1) { + pci_word_test_and_clear_mask(pci_dev->wmask + PCI_COMMAND, + PCI_COMMAND_IO); + pci_dev->wmask[PCI_IO_BASE] = 0; + pci_dev->wmask[PCI_IO_LIMIT] = 0; + } + + cxl_cstate->dvsec_offset = CXL_ROOT_PORT_DVSEC_OFFSET; + cxl_cstate->pdev = pci_dev; + build_dvsecs(&crp->cxl_cstate); + + cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, + TYPE_CXL_ROOT_PORT); + + pci_register_bar(pci_dev, CXL_COMPONENT_REG_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + component_bar); +} + +static void cxl_rp_reset(DeviceState *dev) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + CXLRootPort *crp = CXL_ROOT_PORT(dev); + + rpc->parent_reset(dev); + + latch_registers(crp); +} + +static Property gen_rp_props[] = { + DEFINE_PROP_UINT32("bus-reserve", CXLRootPort, res_reserve.bus, -1), + DEFINE_PROP_SIZE("io-reserve", CXLRootPort, res_reserve.io, -1), + DEFINE_PROP_SIZE("mem-reserve", CXLRootPort, res_reserve.mem_non_pref, -1), + DEFINE_PROP_SIZE("pref32-reserve", CXLRootPort, res_reserve.mem_pref_32, + -1), + DEFINE_PROP_SIZE("pref64-reserve", CXLRootPort, res_reserve.mem_pref_64, + -1), + DEFINE_PROP_END_OF_LIST() +}; + +static void cxl_rp_dvsec_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int len) +{ + CXLRootPort *crp = CXL_ROOT_PORT(dev); + + if (range_contains(&crp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) { + uint8_t *reg = &dev->config[addr]; + addr -= crp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob; + if (addr == PORT_CONTROL_OFFSET) { + if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) { + /* unmask SBR */ + qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n"); + } + if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) { + /* Alt Memory & ID Space Enable */ + qemu_log_mask(LOG_UNIMP, + "Alt Memory & ID space is not supported\n"); + } + } + } +} + +static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val, + int len) +{ + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_ctl, &slt_sta); + pci_bridge_write_config(d, address, val, len); + pcie_cap_flr_write_config(d, address, val, len); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); + pcie_aer_write_config(d, address, val, len); + + cxl_rp_dvsec_write_config(d, address, val, len); +} + +static void cxl_root_port_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(oc); + + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = CXL_ROOT_PORT_DID; + dc->desc = "CXL Root Port"; + k->revision = 0; + device_class_set_props(dc, gen_rp_props); + k->config_write = cxl_rp_write_config; + + device_class_set_parent_realize(dc, cxl_rp_realize, &rpc->parent_realize); + device_class_set_parent_reset(dc, cxl_rp_reset, &rpc->parent_reset); + + rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; + rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; + + dc->hotpluggable = false; +} + +static const TypeInfo cxl_root_port_info = { + .name = TYPE_CXL_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(CXLRootPort), + .class_init = cxl_root_port_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CXL_DEVICE }, + { } + }, +}; + +static void cxl_register(void) +{ + type_register_static(&cxl_root_port_info); +} + +type_init(cxl_register); diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c new file mode 100644 index 0000000000..9b8b57df9d --- /dev/null +++ b/hw/pci-bridge/cxl_upstream.c @@ -0,0 +1,409 @@ +/* + * Emulated CXL Switch Upstream Port + * + * Copyright (c) 2022 Huawei Technologies. + * + * Based on xio3130_upstream.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/pci/msi.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" + +#define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 2 + +#define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70 +#define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90 +#define CXL_UPSTREAM_PORT_AER_OFFSET 0x100 +#define CXL_UPSTREAM_PORT_DVSEC_OFFSET \ + (CXL_UPSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF) + +typedef struct CXLUpstreamPort { + /*< private >*/ + PCIEPort parent_obj; + + /*< public >*/ + CXLComponentState cxl_cstate; + DOECap doe_cdat; +} CXLUpstreamPort; + +CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp) +{ + return &usp->cxl_cstate; +} + +static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int len) +{ + CXLUpstreamPort *usp = CXL_USP(dev); + + if (range_contains(&usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) { + uint8_t *reg = &dev->config[addr]; + addr -= usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob; + if (addr == PORT_CONTROL_OFFSET) { + if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) { + /* unmask SBR */ + qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n"); + } + if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) { + /* Alt Memory & ID Space Enable */ + qemu_log_mask(LOG_UNIMP, + "Alt Memory & ID space is not supported\n"); + } + } + } +} + +static void cxl_usp_write_config(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + CXLUpstreamPort *usp = CXL_USP(d); + + pcie_doe_write_config(&usp->doe_cdat, address, val, len); + pci_bridge_write_config(d, address, val, len); + pcie_cap_flr_write_config(d, address, val, len); + pcie_aer_write_config(d, address, val, len); + + cxl_usp_dvsec_write_config(d, address, val, len); +} + +static uint32_t cxl_usp_read_config(PCIDevice *d, uint32_t address, int len) +{ + CXLUpstreamPort *usp = CXL_USP(d); + uint32_t val; + + if (pcie_doe_read_config(&usp->doe_cdat, address, len, &val)) { + return val; + } + + return pci_default_read_config(d, address, len); +} + +static void latch_registers(CXLUpstreamPort *usp) +{ + uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers; + uint32_t *write_msk = usp->cxl_cstate.crb.cache_mem_regs_write_mask; + + cxl_component_register_init_common(reg_state, write_msk, + CXL2_UPSTREAM_PORT); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8); +} + +static void cxl_usp_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + CXLUpstreamPort *usp = CXL_USP(qdev); + + pci_bridge_reset(qdev); + pcie_cap_deverr_reset(d); + latch_registers(usp); +} + +static void build_dvsecs(CXLComponentState *cxl) +{ + uint8_t *dvsec; + + dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ + .status = 0x1, /* Port Power Management Init Complete */ + }; + cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, + EXTENSIONS_PORT_DVSEC_LENGTH, + EXTENSIONS_PORT_DVSEC, + EXTENSIONS_PORT_DVSEC_REVID, dvsec); + dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ + .cap = 0x27, /* Cache, IO, Mem, non-MLD */ + .ctrl = 0x27, /* Cache, IO, Mem */ + .status = 0x26, /* same */ + .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ + }; + cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, + PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_FLEXBUS_PORT_DVSEC, + PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + + dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ + .rsvd = 0, + .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, + .reg0_base_hi = 0, + }; + cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, + REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, + REG_LOC_DVSEC_REVID, dvsec); +} + +static bool cxl_doe_cdat_rsp(DOECap *doe_cap) +{ + CDATObject *cdat = &CXL_USP(doe_cap->pdev)->cxl_cstate.cdat; + uint16_t ent; + void *base; + uint32_t len; + CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap); + CDATRsp rsp; + + cxl_doe_cdat_update(&CXL_USP(doe_cap->pdev)->cxl_cstate, &error_fatal); + assert(cdat->entry_len); + + /* Discard if request length mismatched */ + if (pcie_doe_get_obj_len(req) < + DIV_ROUND_UP(sizeof(CDATReq), sizeof(uint32_t))) { + return false; + } + + ent = req->entry_handle; + base = cdat->entry[ent].base; + len = cdat->entry[ent].length; + + rsp = (CDATRsp) { + .header = { + .vendor_id = CXL_VENDOR_ID, + .data_obj_type = CXL_DOE_TABLE_ACCESS, + .reserved = 0x0, + .length = DIV_ROUND_UP((sizeof(rsp) + len), sizeof(uint32_t)), + }, + .rsp_code = CXL_DOE_TAB_RSP, + .table_type = CXL_DOE_TAB_TYPE_CDAT, + .entry_handle = (ent < cdat->entry_len - 1) ? + ent + 1 : CXL_DOE_TAB_ENT_MAX, + }; + + memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp)); + memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), sizeof(uint32_t)), + base, len); + + doe_cap->read_mbox_len += rsp.header.length; + + return true; +} + +static DOEProtocol doe_cdat_prot[] = { + { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp }, + { } +}; + +enum { + CXL_USP_CDAT_SSLBIS_LAT, + CXL_USP_CDAT_SSLBIS_BW, + CXL_USP_CDAT_NUM_ENTRIES +}; + +static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) +{ + g_autofree CDATSslbis *sslbis_latency = NULL; + g_autofree CDATSslbis *sslbis_bandwidth = NULL; + CXLUpstreamPort *us = CXL_USP(priv); + PCIBus *bus = &PCI_BRIDGE(us)->sec_bus; + int devfn, sslbis_size, i; + int count = 0; + uint16_t port_ids[256]; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + PCIDevice *d = bus->devices[devfn]; + PCIEPort *port; + + if (!d || !pci_is_express(d) || !d->exp.exp_cap) { + continue; + } + + /* + * Whilst the PCI express spec doesn't allow anything other than + * downstream ports on this bus, let us be a little paranoid + */ + if (!object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) { + continue; + } + + port = PCIE_PORT(d); + port_ids[count] = port->port; + count++; + } + + /* May not yet have any ports - try again later */ + if (count == 0) { + return 0; + } + + sslbis_size = sizeof(CDATSslbis) + sizeof(*sslbis_latency->sslbe) * count; + sslbis_latency = g_malloc(sslbis_size); + if (!sslbis_latency) { + return -ENOMEM; + } + *sslbis_latency = (CDATSslbis) { + .sslbis_header = { + .header = { + .type = CDAT_TYPE_SSLBIS, + .length = sslbis_size, + }, + .data_type = HMATLB_DATA_TYPE_ACCESS_LATENCY, + .entry_base_unit = 10000, + }, + }; + + for (i = 0; i < count; i++) { + sslbis_latency->sslbe[i] = (CDATSslbe) { + .port_x_id = CDAT_PORT_ID_USP, + .port_y_id = port_ids[i], + .latency_bandwidth = 15, /* 150ns */ + }; + } + + sslbis_bandwidth = g_malloc(sslbis_size); + if (!sslbis_bandwidth) { + return 0; + } + *sslbis_bandwidth = (CDATSslbis) { + .sslbis_header = { + .header = { + .type = CDAT_TYPE_SSLBIS, + .length = sslbis_size, + }, + .data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH, + .entry_base_unit = 1000, + }, + }; + + for (i = 0; i < count; i++) { + sslbis_bandwidth->sslbe[i] = (CDATSslbe) { + .port_x_id = CDAT_PORT_ID_USP, + .port_y_id = port_ids[i], + .latency_bandwidth = 16, /* 16 GB/s */ + }; + } + + *cdat_table = g_malloc0(sizeof(*cdat_table) * CXL_USP_CDAT_NUM_ENTRIES); + if (!*cdat_table) { + return -ENOMEM; + } + + /* Header always at start of structure */ + (*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency); + (*cdat_table)[CXL_USP_CDAT_SSLBIS_BW] = g_steal_pointer(&sslbis_bandwidth); + + return CXL_USP_CDAT_NUM_ENTRIES; +} + +static void free_default_cdat_table(CDATSubHeader **cdat_table, int num, + void *priv) +{ + int i; + + for (i = 0; i < num; i++) { + g_free(cdat_table[i]); + } + g_free(cdat_table); +} + +static void cxl_usp_realize(PCIDevice *d, Error **errp) +{ + PCIEPort *p = PCIE_PORT(d); + CXLUpstreamPort *usp = CXL_USP(d); + CXLComponentState *cxl_cstate = &usp->cxl_cstate; + ComponentRegisters *cregs = &cxl_cstate->crb; + MemoryRegion *component_bar = &cregs->component_registers; + int rc; + + pci_bridge_initfn(d, TYPE_PCIE_BUS); + pcie_port_init_reg(d); + + rc = msi_init(d, CXL_UPSTREAM_PORT_MSI_OFFSET, + CXL_UPSTREAM_PORT_MSI_NR_VECTOR, true, true, errp); + if (rc) { + assert(rc == -ENOTSUP); + goto err_bridge; + } + + rc = pcie_cap_init(d, CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET, + PCI_EXP_TYPE_UPSTREAM, p->port, errp); + if (rc < 0) { + goto err_msi; + } + + pcie_cap_flr_init(d); + pcie_cap_deverr_init(d); + rc = pcie_aer_init(d, PCI_ERR_VER, CXL_UPSTREAM_PORT_AER_OFFSET, + PCI_ERR_SIZEOF, errp); + if (rc) { + goto err_cap; + } + + cxl_cstate->dvsec_offset = CXL_UPSTREAM_PORT_DVSEC_OFFSET; + cxl_cstate->pdev = d; + build_dvsecs(cxl_cstate); + cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_USP); + pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + component_bar); + + pcie_doe_init(d, &usp->doe_cdat, cxl_cstate->dvsec_offset, doe_cdat_prot, + true, 1); + + cxl_cstate->cdat.build_cdat_table = build_cdat_table; + cxl_cstate->cdat.free_cdat_table = free_default_cdat_table; + cxl_cstate->cdat.private = d; + cxl_doe_cdat_init(cxl_cstate, errp); + + return; + +err_cap: + pcie_cap_exit(d); +err_msi: + msi_uninit(d); +err_bridge: + pci_bridge_exitfn(d); +} + +static void cxl_usp_exitfn(PCIDevice *d) +{ + pcie_aer_exit(d); + pcie_cap_exit(d); + msi_uninit(d); + pci_bridge_exitfn(d); +} + +static Property cxl_upstream_props[] = { + DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename), + DEFINE_PROP_END_OF_LIST() +}; + +static void cxl_upstream_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + + k->is_bridge = true; + k->config_write = cxl_usp_write_config; + k->config_read = cxl_usp_read_config; + k->realize = cxl_usp_realize; + k->exit = cxl_usp_exitfn; + k->vendor_id = 0x19e5; /* Huawei */ + k->device_id = 0xa128; /* Emulated CXL Switch Upstream Port */ + k->revision = 0; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "CXL Switch Upstream Port"; + dc->reset = cxl_usp_reset; + device_class_set_props(dc, cxl_upstream_props); +} + +static const TypeInfo cxl_usp_info = { + .name = TYPE_CXL_USP, + .parent = TYPE_PCIE_PORT, + .instance_size = sizeof(CXLUpstreamPort), + .class_init = cxl_upstream_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CXL_DEVICE }, + { } + }, +}; + +static void cxl_usp_register_type(void) +{ + type_register_static(&cxl_usp_info); +} + +type_init(cxl_usp_register_type); diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index daab8acf2a..243ceeda50 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -3,8 +3,10 @@ pci_ss.add(files('pci_bridge_dev.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) -pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c')) +pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'), + if_false: files('pci_expander_bridge_stubs.c')) pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) +pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c', 'cxl_downstream.c')) # NewWorld PowerMac pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c')) @@ -12,3 +14,5 @@ pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c')) pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c')) softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) + +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('pci_expander_bridge_stubs.c')) diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 7112dc3062..c9e817aa58 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -17,6 +17,8 @@ #include "hw/pci/pci_host.h" #include "hw/qdev-properties.h" #include "hw/pci/pci_bridge.h" +#include "hw/pci-bridge/pci_expander_bridge.h" +#include "hw/cxl/cxl.h" #include "qemu/range.h" #include "qemu/error-report.h" #include "qemu/module.h" @@ -24,6 +26,8 @@ #include "hw/boards.h" #include "qom/object.h" +enum BusType { PCI, PCIE, CXL }; + #define TYPE_PXB_BUS "pxb-bus" typedef struct PXBBus PXBBus; DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, @@ -33,6 +37,10 @@ DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS, TYPE_PXB_PCIE_BUS) +#define TYPE_PXB_CXL_BUS "pxb-cxl-bus" +DECLARE_INSTANCE_CHECKER(PXBBus, PXB_CXL_BUS, + TYPE_PXB_CXL_BUS) + struct PXBBus { /*< private >*/ PCIBus parent_obj; @@ -50,18 +58,13 @@ DECLARE_INSTANCE_CHECKER(PXBDev, PXB_DEV, DECLARE_INSTANCE_CHECKER(PXBDev, PXB_PCIE_DEV, TYPE_PXB_PCIE_DEVICE) -struct PXBDev { - /*< private >*/ - PCIDevice parent_obj; - /*< public >*/ - - uint8_t bus_nr; - uint16_t numa_node; - bool bypass_iommu; -}; - static PXBDev *convert_to_pxb(PCIDevice *dev) { + /* A CXL PXB's parent bus is PCIe, so the normal check won't work */ + if (object_dynamic_cast(OBJECT(dev), TYPE_PXB_CXL_DEVICE)) { + return PXB_CXL_DEV(dev); + } + return pci_bus_is_express(pci_get_bus(dev)) ? PXB_PCIE_DEV(dev) : PXB_DEV(dev); } @@ -70,6 +73,13 @@ static GList *pxb_dev_list; #define TYPE_PXB_HOST "pxb-host" +CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb) +{ + CXLHost *host = PXB_CXL_HOST(hb); + + return &host->cxl_cstate; +} + static int pxb_bus_num(PCIBus *bus) { PXBDev *pxb = convert_to_pxb(bus->parent_dev); @@ -106,11 +116,20 @@ static const TypeInfo pxb_pcie_bus_info = { .class_init = pxb_bus_class_init, }; +static const TypeInfo pxb_cxl_bus_info = { + .name = TYPE_PXB_CXL_BUS, + .parent = TYPE_CXL_BUS, + .instance_size = sizeof(PXBBus), + .class_init = pxb_bus_class_init, +}; + static const char *pxb_host_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus) { - PXBBus *bus = pci_bus_is_express(rootbus) ? - PXB_PCIE_BUS(rootbus) : PXB_BUS(rootbus); + PXBBus *bus = pci_bus_is_cxl(rootbus) ? + PXB_CXL_BUS(rootbus) : + pci_bus_is_express(rootbus) ? PXB_PCIE_BUS(rootbus) : + PXB_BUS(rootbus); snprintf(bus->bus_path, 8, "0000:%02x", pxb_bus_num(rootbus)); return bus->bus_path; @@ -166,6 +185,65 @@ static const TypeInfo pxb_host_info = { .class_init = pxb_host_class_init, }; +static void pxb_cxl_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + CXLHost *cxl = PXB_CXL_HOST(dev); + CXLComponentState *cxl_cstate = &cxl->cxl_cstate; + struct MemoryRegion *mr = &cxl_cstate->crb.component_registers; + + cxl_component_register_block_init(OBJECT(dev), cxl_cstate, + TYPE_PXB_CXL_HOST); + sysbus_init_mmio(sbd, mr); +} + +/* + * Host bridge realization has no means of knowning state associated + * with a particular machine. As such, it is nececssary to delay + * final setup of the host bridge register space until later in the + * machine bring up. + */ +void pxb_cxl_hook_up_registers(CXLState *cxl_state, PCIBus *bus, Error **errp) +{ + PXBDev *pxb = PXB_CXL_DEV(pci_bridge_get_device(bus)); + CXLHost *cxl = pxb->cxl.cxl_host_bridge; + CXLComponentState *cxl_cstate = &cxl->cxl_cstate; + struct MemoryRegion *mr = &cxl_cstate->crb.component_registers; + hwaddr offset; + + offset = memory_region_size(mr) * cxl_state->next_mr_idx; + if (offset > memory_region_size(&cxl_state->host_mr)) { + error_setg(errp, "Insufficient space for pxb cxl host register space"); + return; + } + + memory_region_add_subregion(&cxl_state->host_mr, offset, mr); + cxl_state->next_mr_idx++; +} + +static void pxb_cxl_host_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class); + + hc->root_bus_path = pxb_host_root_bus_path; + dc->fw_name = "cxl"; + dc->realize = pxb_cxl_realize; + /* Reason: Internal part of the pxb/pxb-pcie device, not usable by itself */ + dc->user_creatable = false; +} + +/* + * This is a device to handle the MMIO for a CXL host bridge. It does nothing + * else. + */ +static const TypeInfo cxl_host_info = { + .name = TYPE_PXB_CXL_HOST, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(CXLHost), + .class_init = pxb_cxl_host_class_init, +}; + /* * Registers the PXB bus as a child of pci host root bus. */ @@ -192,6 +270,12 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) { PCIDevice *pxb = pci_get_bus(pci_dev)->parent_dev; + /* + * First carry out normal swizzle to handle + * multple root ports on a pxb instance. + */ + pin = pci_swizzle_map_irq_fn(pci_dev, pin); + /* * The bios does not index the pxb slot number when * it computes the IRQ because it resides on bus 0 @@ -206,6 +290,17 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) return pin - PCI_SLOT(pxb->devfn); } +static void pxb_dev_reset(DeviceState *dev) +{ + CXLHost *cxl = PXB_CXL_DEV(dev)->cxl.cxl_host_bridge; + CXLComponentState *cxl_cstate = &cxl->cxl_cstate; + uint32_t *reg_state = cxl_cstate->crb.cache_mem_registers; + uint32_t *write_msk = cxl_cstate->crb.cache_mem_regs_write_mask; + + cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8); +} + static gint pxb_compare(gconstpointer a, gconstpointer b) { const PXBDev *pxb_a = a, *pxb_b = b; @@ -215,7 +310,8 @@ static gint pxb_compare(gconstpointer a, gconstpointer b) 0; } -static void pxb_dev_realize_common(PCIDevice *dev, bool pcie, Error **errp) +static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, + Error **errp) { PXBDev *pxb = convert_to_pxb(dev); DeviceState *ds, *bds = NULL; @@ -239,13 +335,17 @@ static void pxb_dev_realize_common(PCIDevice *dev, bool pcie, Error **errp) dev_name = dev->qdev.id; } - ds = qdev_new(TYPE_PXB_HOST); - if (pcie) { + ds = qdev_new(type == CXL ? TYPE_PXB_CXL_HOST : TYPE_PXB_HOST); + if (type == PCIE) { bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_PCIE_BUS); + } else if (type == CXL) { + bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_CXL_BUS); + bus->flags |= PCI_BUS_CXL; + PXB_CXL_DEV(dev)->cxl.cxl_host_bridge = PXB_CXL_HOST(ds); } else { bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bds = qdev_new("pci-bridge"); - bds->id = dev_name; + bds->id = g_strdup(dev_name); qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr); qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false); } @@ -289,7 +389,7 @@ static void pxb_dev_realize(PCIDevice *dev, Error **errp) return; } - pxb_dev_realize_common(dev, false, errp); + pxb_dev_realize_common(dev, PCI, errp); } static void pxb_dev_exitfn(PCIDevice *pci_dev) @@ -342,7 +442,7 @@ static void pxb_pcie_dev_realize(PCIDevice *dev, Error **errp) return; } - pxb_dev_realize_common(dev, true, errp); + pxb_dev_realize_common(dev, PCIE, errp); } static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data) @@ -373,13 +473,61 @@ static const TypeInfo pxb_pcie_dev_info = { }, }; +static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp) +{ + /* A CXL PXB's parent bus is still PCIe */ + if (!pci_bus_is_express(pci_get_bus(dev))) { + error_setg(errp, "pxb-cxl devices cannot reside on a PCI bus"); + return; + } + + pxb_dev_realize_common(dev, CXL, errp); + pxb_dev_reset(DEVICE(dev)); +} + +static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pxb_cxl_dev_realize; + k->exit = pxb_dev_exitfn; + /* + * XXX: These types of bridges don't actually show up in the hierarchy so + * vendor, device, class, etc. ids are intentionally left out. + */ + + dc->desc = "CXL Host Bridge"; + device_class_set_props(dc, pxb_dev_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + + /* Host bridges aren't hotpluggable. FIXME: spec reference */ + dc->hotpluggable = false; + dc->reset = pxb_dev_reset; +} + +static const TypeInfo pxb_cxl_dev_info = { + .name = TYPE_PXB_CXL_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PXBDev), + .class_init = pxb_cxl_dev_class_init, + .interfaces = + (InterfaceInfo[]){ + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + {}, + }, +}; + static void pxb_register_types(void) { type_register_static(&pxb_bus_info); type_register_static(&pxb_pcie_bus_info); + type_register_static(&pxb_cxl_bus_info); type_register_static(&pxb_host_info); + type_register_static(&cxl_host_info); type_register_static(&pxb_dev_info); type_register_static(&pxb_pcie_dev_info); + type_register_static(&pxb_cxl_dev_info); } type_init(pxb_register_types) diff --git a/hw/pci-bridge/pci_expander_bridge_stubs.c b/hw/pci-bridge/pci_expander_bridge_stubs.c new file mode 100644 index 0000000000..b35180311f --- /dev/null +++ b/hw/pci-bridge/pci_expander_bridge_stubs.c @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Stubs for calls made from machines to handle the case where CONFIG_PXB + * is not enabled. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci-bridge/pci_expander_bridge.h" +#include "hw/cxl/cxl.h" + +void pxb_cxl_hook_up_registers(CXLState *state, PCIBus *bus, Error **errp) {}; diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index f1cfe9d14a..460e48269d 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -67,7 +67,11 @@ static void rp_realize(PCIDevice *d, Error **errp) int rc; pci_config_set_interrupt_pin(d->config, 1); - pci_bridge_initfn(d, TYPE_PCIE_BUS); + if (d->cap_present & QEMU_PCIE_CAP_CXL) { + pci_bridge_initfn(d, TYPE_CXL_BUS); + } else { + pci_bridge_initfn(d, TYPE_PCIE_BUS); + } pcie_port_init_reg(d); rc = pci_bridge_ssvid_init(d, rpc->ssvid_offset, dc->vendor_id, diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 04aae72cd6..05e2b06c0c 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -28,6 +28,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/module.h" +#include "hw/pci-bridge/xio3130_downstream.h" #define PCI_DEVICE_ID_TI_XIO3130D 0x8233 /* downstream port */ #define XIO3130_REVISION 0x1 @@ -84,7 +85,7 @@ static void xio3130_downstream_realize(PCIDevice *d, Error **errp) XIO3130_SSVID_SVID, XIO3130_SSVID_SSID, errp); if (rc < 0) { - goto err_bridge; + goto err_msi; } rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_DOWNSTREAM, @@ -173,7 +174,7 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data) } static const TypeInfo xio3130_downstream_info = { - .name = "xio3130-downstream", + .name = TYPE_XIO3130_DOWNSTREAM, .parent = TYPE_PCIE_SLOT, .class_init = xio3130_downstream_class_init, .interfaces = (InterfaceInfo[]) { diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index 5cd3af4fbc..5ff46ef050 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -75,7 +75,7 @@ static void xio3130_upstream_realize(PCIDevice *d, Error **errp) XIO3130_SSVID_SVID, XIO3130_SSVID_SSID, errp); if (rc < 0) { - goto err_bridge; + goto err_msi; } rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_UPSTREAM, diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig index 2b5f7d58cc..38fd2ee8f3 100644 --- a/hw/pci-host/Kconfig +++ b/hw/pci-host/Kconfig @@ -77,3 +77,7 @@ config MV64361 bool select PCI select I8259 + +config DINO + bool + select PCI diff --git a/hw/hppa/dino.c b/hw/pci-host/dino.c similarity index 71% rename from hw/hppa/dino.c rename to hw/pci-host/dino.c index eab96dd84e..f257c24e64 100644 --- a/hw/hppa/dino.c +++ b/hw/pci-host/dino.c @@ -17,118 +17,13 @@ #include "hw/irq.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "hw/pci-host/dino.h" #include "migration/vmstate.h" -#include "hppa_sys.h" #include "trace.h" #include "qom/object.h" -#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost" - -#define DINO_IAR0 0x004 -#define DINO_IODC 0x008 -#define DINO_IRR0 0x00C /* RO */ -#define DINO_IAR1 0x010 -#define DINO_IRR1 0x014 /* RO */ -#define DINO_IMR 0x018 -#define DINO_IPR 0x01C -#define DINO_TOC_ADDR 0x020 -#define DINO_ICR 0x024 -#define DINO_ILR 0x028 /* RO */ -#define DINO_IO_COMMAND 0x030 /* WO */ -#define DINO_IO_STATUS 0x034 /* RO */ -#define DINO_IO_CONTROL 0x038 -#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */ -#define DINO_IO_ERR_INFO 0x044 /* RO */ -#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */ -#define DINO_IO_FBB_EN 0x05c -#define DINO_IO_ADDR_EN 0x060 -#define DINO_PCI_CONFIG_ADDR 0x064 -#define DINO_PCI_CONFIG_DATA 0x068 -#define DINO_PCI_IO_DATA 0x06c -#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */ -#define DINO_GSC2X_CONFIG 0x7b4 /* RO */ -#define DINO_GMASK 0x800 -#define DINO_PAMR 0x804 -#define DINO_PAPR 0x808 -#define DINO_DAMODE 0x80c -#define DINO_PCICMD 0x810 -#define DINO_PCISTS 0x814 /* R/WC */ -#define DINO_MLTIM 0x81c -#define DINO_BRDG_FEAT 0x820 -#define DINO_PCIROR 0x824 -#define DINO_PCIWOR 0x828 -#define DINO_TLTIM 0x830 - -#define DINO_IRQS 11 /* bits 0-10 are architected */ -#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ -#define DINO_LOCAL_IRQS (DINO_IRQS + 1) -#define DINO_MASK_IRQ(x) (1 << (x)) - -#define PCIINTA 0x001 -#define PCIINTB 0x002 -#define PCIINTC 0x004 -#define PCIINTD 0x008 -#define PCIINTE 0x010 -#define PCIINTF 0x020 -#define GSCEXTINT 0x040 -/* #define xxx 0x080 - bit 7 is "default" */ -/* #define xxx 0x100 - bit 8 not used */ -/* #define xxx 0x200 - bit 9 not used */ -#define RS232INT 0x400 - -#define DINO_MEM_CHUNK_SIZE (8 * MiB) - -OBJECT_DECLARE_SIMPLE_TYPE(DinoState, DINO_PCI_HOST_BRIDGE) - -#define DINO800_REGS (1 + (DINO_TLTIM - DINO_GMASK) / 4) -static const uint32_t reg800_keep_bits[DINO800_REGS] = { - MAKE_64BIT_MASK(0, 1), /* GMASK */ - MAKE_64BIT_MASK(0, 7), /* PAMR */ - MAKE_64BIT_MASK(0, 7), /* PAPR */ - MAKE_64BIT_MASK(0, 8), /* DAMODE */ - MAKE_64BIT_MASK(0, 7), /* PCICMD */ - MAKE_64BIT_MASK(0, 9), /* PCISTS */ - MAKE_64BIT_MASK(0, 32), /* Undefined */ - MAKE_64BIT_MASK(0, 8), /* MLTIM */ - MAKE_64BIT_MASK(0, 30), /* BRDG_FEAT */ - MAKE_64BIT_MASK(0, 24), /* PCIROR */ - MAKE_64BIT_MASK(0, 22), /* PCIWOR */ - MAKE_64BIT_MASK(0, 32), /* Undocumented */ - MAKE_64BIT_MASK(0, 9), /* TLTIM */ -}; - -struct DinoState { - PCIHostState parent_obj; - - /* PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops, - so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops. */ - uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */ - - uint32_t iar0; - uint32_t iar1; - uint32_t imr; - uint32_t ipr; - uint32_t icr; - uint32_t ilr; - uint32_t io_fbb_en; - uint32_t io_addr_en; - uint32_t io_control; - uint32_t toc_addr; - - uint32_t reg800[DINO800_REGS]; - - MemoryRegion this_mem; - MemoryRegion pci_mem; - MemoryRegion pci_mem_alias[32]; - - AddressSpace bm_as; - MemoryRegion bm; - MemoryRegion bm_ram_alias; - MemoryRegion bm_pci_alias; - MemoryRegion bm_cpu_alias; -}; - /* * Dino can forward memory accesses from the CPU in the range between * 0xf0800000 and 0xff000000 to the PCI bus. @@ -200,6 +95,7 @@ static MemTxResult dino_chip_read_with_attrs(void *opaque, hwaddr addr, MemTxAttrs attrs) { DinoState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); MemTxResult ret = MEMTX_OK; AddressSpace *io; uint16_t ioaddr; @@ -209,7 +105,7 @@ static MemTxResult dino_chip_read_with_attrs(void *opaque, hwaddr addr, case DINO_PCI_IO_DATA ... DINO_PCI_IO_DATA + 3: /* Read from PCI IO space. */ io = &address_space_io; - ioaddr = s->parent_obj.config_reg + (addr & 3); + ioaddr = phb->config_reg + (addr & 3); switch (size) { case 1: val = address_space_ldub(io, ioaddr, attrs, &ret); @@ -292,6 +188,7 @@ static MemTxResult dino_chip_write_with_attrs(void *opaque, hwaddr addr, MemTxAttrs attrs) { DinoState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); AddressSpace *io; MemTxResult ret; uint16_t ioaddr; @@ -303,7 +200,7 @@ static MemTxResult dino_chip_write_with_attrs(void *opaque, hwaddr addr, case DINO_IO_DATA ... DINO_PCI_IO_DATA + 3: /* Write into PCI IO space. */ io = &address_space_io; - ioaddr = s->parent_obj.config_reg + (addr & 3); + ioaddr = phb->config_reg + (addr & 3); switch (size) { case 1: address_space_stb(io, ioaddr, val, attrs, &ret); @@ -501,52 +398,78 @@ static int dino_pci_map_irq(PCIDevice *d, int irq_num) return slot & 0x03; } -static void dino_set_timer_irq(void *opaque, int irq, int level) +static void dino_pcihost_reset(DeviceState *dev) { - /* ??? Not connected. */ -} + DinoState *s = DINO_PCI_HOST_BRIDGE(dev); -static void dino_set_serial_irq(void *opaque, int irq, int level) -{ - dino_set_irq(opaque, 10, level); -} - -PCIBus *dino_init(MemoryRegion *addr_space, - qemu_irq *p_rtc_irq, qemu_irq *p_ser_irq) -{ - DeviceState *dev; - DinoState *s; - PCIBus *b; - int i; - - dev = qdev_new(TYPE_DINO_PCI_HOST_BRIDGE); - s = DINO_PCI_HOST_BRIDGE(dev); - s->iar0 = s->iar1 = CPU_HPA + 3; + s->iar0 = s->iar1 = 0xFFFB0000 + 3; /* CPU_HPA + 3 */ s->toc_addr = 0xFFFA0030; /* IO_COMMAND of CPU */ +} + +static void dino_pcihost_realize(DeviceState *dev, Error **errp) +{ + DinoState *s = DINO_PCI_HOST_BRIDGE(dev); + + /* Set up PCI view of memory: Bus master address space. */ + memory_region_init(&s->bm, OBJECT(s), "bm-dino", 4 * GiB); + memory_region_init_alias(&s->bm_ram_alias, OBJECT(s), + "bm-system", s->memory_as, 0, + 0xf0000000 + DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_pci_alias, OBJECT(s), + "bm-pci", &s->pci_mem, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + 30 * DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_cpu_alias, OBJECT(s), + "bm-cpu", s->memory_as, 0xfff00000, + 0xfffff); + memory_region_add_subregion(&s->bm, 0, + &s->bm_ram_alias); + memory_region_add_subregion(&s->bm, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + &s->bm_pci_alias); + memory_region_add_subregion(&s->bm, 0xfff00000, + &s->bm_cpu_alias); + + address_space_init(&s->bm_as, &s->bm, "pci-bm"); +} + +static void dino_pcihost_unrealize(DeviceState *dev) +{ + DinoState *s = DINO_PCI_HOST_BRIDGE(dev); + + address_space_destroy(&s->bm_as); +} + +static void dino_pcihost_init(Object *obj) +{ + DinoState *s = DINO_PCI_HOST_BRIDGE(obj); + PCIHostState *phb = PCI_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; /* Dino PCI access from main memory. */ memory_region_init_io(&s->this_mem, OBJECT(s), &dino_chip_ops, s, "dino", 4096); - memory_region_add_subregion(addr_space, DINO_HPA, &s->this_mem); /* Dino PCI config. */ - memory_region_init_io(&s->parent_obj.conf_mem, OBJECT(&s->parent_obj), - &dino_config_addr_ops, dev, "pci-conf-idx", 4); - memory_region_init_io(&s->parent_obj.data_mem, OBJECT(&s->parent_obj), - &dino_config_data_ops, dev, "pci-conf-data", 4); + memory_region_init_io(&phb->conf_mem, OBJECT(phb), + &dino_config_addr_ops, DEVICE(s), + "pci-conf-idx", 4); + memory_region_init_io(&phb->data_mem, OBJECT(phb), + &dino_config_data_ops, DEVICE(s), + "pci-conf-data", 4); memory_region_add_subregion(&s->this_mem, DINO_PCI_CONFIG_ADDR, - &s->parent_obj.conf_mem); + &phb->conf_mem); memory_region_add_subregion(&s->this_mem, DINO_CONFIG_DATA, - &s->parent_obj.data_mem); + &phb->data_mem); /* Dino PCI bus memory. */ memory_region_init(&s->pci_mem, OBJECT(s), "pci-memory", 4 * GiB); - b = pci_register_root_bus(dev, "pci", dino_set_irq, dino_pci_map_irq, s, - &s->pci_mem, get_system_io(), - PCI_DEVFN(0, 0), 32, TYPE_PCI_BUS); - s->parent_obj.bus = b; - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + phb->bus = pci_register_root_bus(DEVICE(s), "pci", + dino_set_irq, dino_pci_map_irq, s, + &s->pci_mem, get_system_io(), + PCI_DEVFN(0, 0), 32, TYPE_PCI_BUS); /* Set up windows into PCI bus memory. */ for (i = 1; i < 31; i++) { @@ -558,44 +481,34 @@ PCIBus *dino_init(MemoryRegion *addr_space, g_free(name); } - /* Set up PCI view of memory: Bus master address space. */ - memory_region_init(&s->bm, OBJECT(s), "bm-dino", 4 * GiB); - memory_region_init_alias(&s->bm_ram_alias, OBJECT(s), - "bm-system", addr_space, 0, - 0xf0000000 + DINO_MEM_CHUNK_SIZE); - memory_region_init_alias(&s->bm_pci_alias, OBJECT(s), - "bm-pci", &s->pci_mem, - 0xf0000000 + DINO_MEM_CHUNK_SIZE, - 30 * DINO_MEM_CHUNK_SIZE); - memory_region_init_alias(&s->bm_cpu_alias, OBJECT(s), - "bm-cpu", addr_space, 0xfff00000, - 0xfffff); - memory_region_add_subregion(&s->bm, 0, - &s->bm_ram_alias); - memory_region_add_subregion(&s->bm, - 0xf0000000 + DINO_MEM_CHUNK_SIZE, - &s->bm_pci_alias); - memory_region_add_subregion(&s->bm, 0xfff00000, - &s->bm_cpu_alias); - address_space_init(&s->bm_as, &s->bm, "pci-bm"); - pci_setup_iommu(b, dino_pcihost_set_iommu, s); + pci_setup_iommu(phb->bus, dino_pcihost_set_iommu, s); - *p_rtc_irq = qemu_allocate_irq(dino_set_timer_irq, s, 0); - *p_ser_irq = qemu_allocate_irq(dino_set_serial_irq, s, 0); + sysbus_init_mmio(sbd, &s->this_mem); - return b; + qdev_init_gpio_in(DEVICE(obj), dino_set_irq, DINO_IRQS); } +static Property dino_pcihost_properties[] = { + DEFINE_PROP_LINK("memory-as", DinoState, memory_as, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + static void dino_pcihost_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = dino_pcihost_reset; + dc->realize = dino_pcihost_realize; + dc->unrealize = dino_pcihost_unrealize; + device_class_set_props(dc, dino_pcihost_properties); dc->vmsd = &vmstate_dino; } static const TypeInfo dino_pcihost_info = { .name = TYPE_DINO_PCI_HOST_BRIDGE, .parent = TYPE_PCI_HOST_BRIDGE, + .instance_init = dino_pcihost_init, .instance_size = sizeof(DinoState), .class_init = dino_pcihost_class_init, }; diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index e7e162a00a..7c7316bc96 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -5,6 +5,7 @@ #include "hw/pci/pci_bus.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pcie_host.h" +#include "hw/acpi/cxl.h" static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq) { @@ -139,6 +140,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); + bool is_cxl = pci_bus_is_cxl(bus); if (!pci_bus_is_root(bus)) { continue; @@ -154,8 +156,16 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) } dev = aml_device("PC%.02X", bus_num); - aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); - aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); + if (is_cxl) { + struct Aml *pkg = aml_package(2); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0016"))); + aml_append(pkg, aml_eisaid("PNP0A08")); + aml_append(pkg, aml_eisaid("PNP0A03")); + aml_append(dev, aml_name_decl("_CID", pkg)); + } else { + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); + } aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device"))); @@ -175,7 +185,11 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) cfg->pio.base, 0, 0, 0); aml_append(dev, aml_name_decl("_CRS", crs)); - acpi_dsdt_add_pci_osc(dev); + if (is_cxl) { + build_cxl_osc_method(dev); + } else { + acpi_dsdt_add_pci_osc(dev); + } aml_append(scope, dev); } diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c index b05facf463..95945ac0f4 100644 --- a/hw/pci-host/grackle.c +++ b/hw/pci-host/grackle.c @@ -24,8 +24,6 @@ */ #include "qemu/osdep.h" -#include "hw/pci/pci_host.h" -#include "hw/ppc/mac.h" #include "hw/qdev-properties.h" #include "hw/pci/pci.h" #include "hw/irq.h" @@ -33,18 +31,7 @@ #include "qemu/module.h" #include "trace.h" #include "qom/object.h" - -OBJECT_DECLARE_SIMPLE_TYPE(GrackleState, GRACKLE_PCI_HOST_BRIDGE) - -struct GrackleState { - PCIHostState parent_obj; - - uint32_t ofw_addr; - qemu_irq irqs[4]; - MemoryRegion pci_mmio; - MemoryRegion pci_hole; - MemoryRegion pci_io; -}; +#include "hw/pci-host/grackle.h" /* Don't know if this matches real hardware, but it agrees with OHW. */ static int pci_grackle_map_irq(PCIDevice *pci_dev, int irq_num) diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 28c9bae899..d5426ef4a5 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -237,8 +237,8 @@ static void i440fx_realize(PCIDevice *dev, Error **errp) } } -PCIBus *i440fx_init(const char *host_type, const char *pci_type, - PCII440FXState **pi440fx_state, +PCIBus *i440fx_init(const char *pci_type, + DeviceState *dev, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, ram_addr_t ram_size, @@ -247,7 +247,6 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, MemoryRegion *pci_address_space, MemoryRegion *ram_memory) { - DeviceState *dev; PCIBus *b; PCIDevice *d; PCIHostState *s; @@ -255,7 +254,6 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, unsigned i; I440FXState *i440fx; - dev = qdev_new(host_type); s = PCI_HOST_BRIDGE(dev); b = pci_root_bus_new(dev, NULL, pci_address_space, address_space_io, 0, TYPE_PCI_BUS); @@ -264,8 +262,7 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); d = pci_create_simple(b, 0, pci_type); - *pi440fx_state = I440FX_PCI_DEVICE(d); - f = *pi440fx_state; + f = I440FX_PCI_DEVICE(d); f->system_memory = address_space_mem; f->pci_address_space = pci_address_space; f->ram_memory = ram_memory; @@ -314,14 +311,6 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, return b; } -PCIBus *find_i440fx(void) -{ - PCIHostState *s = OBJECT_CHECK(PCIHostState, - object_resolve_path("/machine/i440fx", NULL), - TYPE_PCI_HOST_BRIDGE); - return s ? s->bus : NULL; -} - static void i440fx_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build index 4c4f39c15c..e832babc9d 100644 --- a/hw/pci-host/meson.build +++ b/hw/pci-host/meson.build @@ -25,6 +25,9 @@ pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c')) # ARM devices pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c')) +# HPPA devices +pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c')) + softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) specific_ss.add(when: 'CONFIG_PCI_POWERNV', if_true: files( @@ -32,5 +35,6 @@ specific_ss.add(when: 'CONFIG_PCI_POWERNV', if_true: files( 'pnv_phb3_msi.c', 'pnv_phb3_pbcq.c', 'pnv_phb4.c', - 'pnv_phb4_pec.c' + 'pnv_phb4_pec.c', + 'pnv_phb.c', )) diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c index 92b0f5d047..cc9c4d6d3b 100644 --- a/hw/pci-host/mv64361.c +++ b/hw/pci-host/mv64361.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/units.h" #include "qapi/error.h" #include "hw/hw.h" @@ -869,6 +868,7 @@ static void mv64361_realize(DeviceState *dev, Error **errp) s->base_addr_enable = 0x1fffff; memory_region_init_io(&s->regs, OBJECT(s), &mv64361_ops, s, TYPE_MV64361, 0x10000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->regs); for (i = 0; i < 2; i++) { g_autofree char *name = g_strdup_printf("pcihost%d", i); object_initialize_child(OBJECT(dev), name, &s->pci[i], diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c new file mode 100644 index 0000000000..0b26b43736 --- /dev/null +++ b/hw/pci-host/pnv_phb.c @@ -0,0 +1,345 @@ +/* + * QEMU PowerPC PowerNV Proxy PHB model + * + * Copyright (c) 2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "hw/pci-host/pnv_phb.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/pci-host/pnv_phb4.h" +#include "hw/ppc/pnv.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" +#include "sysemu/sysemu.h" + + +/* + * Set the QOM parent and parent bus of an object child. If the device + * state associated with the child has an id, use it as QOM id. + * Otherwise use object_typename[index] as QOM id. + * + * This helper does both operations at the same time because seting + * a new QOM child will erase the bus parent of the device. This happens + * because object_unparent() will call object_property_del_child(), + * which in turn calls the property release callback prop->release if + * it's defined. In our case this callback is set to + * object_finalize_child_property(), which was assigned during the + * first object_property_add_child() call. This callback will end up + * calling device_unparent(), and this function removes the device + * from its parent bus. + * + * The QOM and parent bus to be set aren´t necessarily related, so + * let's receive both as arguments. + */ +static bool pnv_parent_fixup(Object *parent, BusState *parent_bus, + Object *child, int index, + Error **errp) +{ + g_autofree char *default_id = + g_strdup_printf("%s[%d]", object_get_typename(child), index); + const char *dev_id = DEVICE(child)->id; + + if (child->parent == parent) { + return true; + } + + object_ref(child); + object_unparent(child); + object_property_add_child(parent, dev_id ? dev_id : default_id, child); + object_unref(child); + + if (!qdev_set_parent_bus(DEVICE(child), parent_bus, errp)) { + return false; + } + + return true; +} + +/* + * User created devices won't have the initial setup that default + * devices have. This setup consists of assigning a parent device + * (chip for PHB3, PEC for PHB4/5) that will be the QOM/bus parent + * of the PHB. + */ +static bool pnv_phb_user_device_init(PnvPHB *phb, Error **errp) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + PnvChip *chip = pnv_get_chip(pnv, phb->chip_id); + Object *parent = NULL; + + if (!chip) { + error_setg(errp, "invalid chip id: %d", phb->chip_id); + return false; + } + + parent = pnv_chip_add_phb(chip, phb, errp); + if (!parent) { + return false; + } + + /* + * Reparent user created devices to the chip to build + * correctly the device tree. pnv_xscom_dt() needs every + * PHB to be a child of the chip to build the DT correctly. + */ + if (!pnv_parent_fixup(parent, qdev_get_parent_bus(DEVICE(chip)), + OBJECT(phb), phb->phb_id, errp)) { + return false; + } + + return true; +} + +static void pnv_phb_realize(DeviceState *dev, Error **errp) +{ + PnvPHB *phb = PNV_PHB(dev); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + g_autofree char *phb_typename = NULL; + + if (!phb->version) { + error_setg(errp, "version not specified"); + return; + } + + switch (phb->version) { + case 3: + phb_typename = g_strdup(TYPE_PNV_PHB3); + break; + case 4: + phb_typename = g_strdup(TYPE_PNV_PHB4); + break; + case 5: + phb_typename = g_strdup(TYPE_PNV_PHB5); + break; + default: + g_assert_not_reached(); + } + + phb->backend = object_new(phb_typename); + object_property_add_child(OBJECT(dev), "phb-backend", phb->backend); + + /* Passthrough child device properties to the proxy device */ + object_property_set_uint(phb->backend, "index", phb->phb_id, errp); + object_property_set_uint(phb->backend, "chip-id", phb->chip_id, errp); + object_property_set_link(phb->backend, "phb-base", OBJECT(phb), errp); + + /* + * Handle user created devices. User devices will not have a + * pointer to a chip (PHB3) and a PEC (PHB4/5). + */ + if (!phb->chip && !phb->pec) { + if (!pnv_phb_user_device_init(phb, errp)) { + return; + } + } + + if (phb->version == 3) { + object_property_set_link(phb->backend, "chip", + OBJECT(phb->chip), errp); + } else { + object_property_set_link(phb->backend, "pec", OBJECT(phb->pec), errp); + } + + if (!qdev_realize(DEVICE(phb->backend), NULL, errp)) { + return; + } + + if (phb->version == 3) { + pnv_phb3_bus_init(dev, PNV_PHB3(phb->backend)); + } else { + pnv_phb4_bus_init(dev, PNV_PHB4(phb->backend)); + } + + if (defaults_enabled()) { + PCIDevice *root = pci_new(PCI_DEVFN(0, 0), TYPE_PNV_PHB_ROOT_PORT); + + pci_realize_and_unref(root, pci->bus, errp); + } +} + +static const char *pnv_phb_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + PnvPHB *phb = PNV_PHB(host_bridge); + + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", + phb->chip_id, phb->phb_id); + return phb->bus_path; +} + +static Property pnv_phb_properties[] = { + DEFINE_PROP_UINT32("index", PnvPHB, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB, chip_id, 0), + DEFINE_PROP_UINT32("version", PnvPHB, version, 0), + + DEFINE_PROP_LINK("chip", PnvPHB, chip, TYPE_PNV_CHIP, PnvChip *), + + DEFINE_PROP_LINK("pec", PnvPHB, pec, TYPE_PNV_PHB4_PEC, + PnvPhb4PecState *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_phb_class_init(ObjectClass *klass, void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + hc->root_bus_path = pnv_phb_root_bus_path; + dc->realize = pnv_phb_realize; + device_class_set_props(dc, pnv_phb_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->user_creatable = true; +} + +static void pnv_phb_root_port_reset(DeviceState *dev) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + PnvPHBRootPort *phb_rp = PNV_PHB_ROOT_PORT(dev); + PCIDevice *d = PCI_DEVICE(dev); + uint8_t *conf = d->config; + + rpc->parent_reset(dev); + + if (phb_rp->version == 3) { + return; + } + + /* PHB4 and later requires these extra reset steps */ + pci_byte_test_and_set_mask(conf + PCI_IO_BASE, + PCI_IO_RANGE_MASK & 0xff); + pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, + PCI_IO_RANGE_MASK & 0xff); + pci_set_word(conf + PCI_MEMORY_BASE, 0); + pci_set_word(conf + PCI_MEMORY_LIMIT, 0xfff0); + pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0x1); + pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0xfff1); + pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0x1); /* Hack */ + pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0xffffffff); + pci_config_set_interrupt_pin(conf, 0); +} + +static void pnv_phb_root_port_realize(DeviceState *dev, Error **errp) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + PnvPHBRootPort *phb_rp = PNV_PHB_ROOT_PORT(dev); + PCIBus *bus = PCI_BUS(qdev_get_parent_bus(dev)); + PCIDevice *pci = PCI_DEVICE(dev); + uint16_t device_id = 0; + Error *local_err = NULL; + int chip_id, index; + + /* + * 'index' will be used both as a PCIE slot value and to calculate + * QOM id. 'chip_id' is going to be used as PCIE chassis for the + * root port. + */ + chip_id = object_property_get_int(OBJECT(bus), "chip-id", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + index = object_property_get_int(OBJECT(bus), "phb-id", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Set unique chassis/slot values for the root port */ + qdev_prop_set_uint8(dev, "chassis", chip_id); + qdev_prop_set_uint16(dev, "slot", index); + + /* + * User created root ports are QOM parented to one of + * the peripheral containers but it's already at the right + * parent bus. Change the QOM parent to be the same as the + * parent bus it's already assigned to. + */ + if (!pnv_parent_fixup(OBJECT(bus), BUS(bus), OBJECT(dev), + index, errp)) { + return; + } + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + switch (phb_rp->version) { + case 3: + device_id = PNV_PHB3_DEVICE_ID; + break; + case 4: + device_id = PNV_PHB4_DEVICE_ID; + break; + case 5: + device_id = PNV_PHB5_DEVICE_ID; + break; + default: + g_assert_not_reached(); + } + + pci_config_set_device_id(pci->config, device_id); + pci_config_set_interrupt_pin(pci->config, 0); +} + +static Property pnv_phb_root_port_properties[] = { + DEFINE_PROP_UINT32("version", PnvPHBRootPort, version, 0), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_phb_root_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + dc->desc = "IBM PHB PCIE Root Port"; + + device_class_set_props(dc, pnv_phb_root_port_properties); + device_class_set_parent_realize(dc, pnv_phb_root_port_realize, + &rpc->parent_realize); + device_class_set_parent_reset(dc, pnv_phb_root_port_reset, + &rpc->parent_reset); + dc->reset = &pnv_phb_root_port_reset; + dc->user_creatable = true; + + k->vendor_id = PCI_VENDOR_ID_IBM; + /* device_id will be written during realize() */ + k->device_id = 0; + k->revision = 0; + + rpc->exp_offset = 0x48; + rpc->aer_offset = 0x100; +} + +static const TypeInfo pnv_phb_type_info = { + .name = TYPE_PNV_PHB, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(PnvPHB), + .class_init = pnv_phb_class_init, +}; + +static const TypeInfo pnv_phb_root_port_info = { + .name = TYPE_PNV_PHB_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(PnvPHBRootPort), + .class_init = pnv_phb_root_port_class_init, +}; + +static void pnv_phb_register_types(void) +{ + type_register_static(&pnv_phb_type_info); + type_register_static(&pnv_phb_root_port_info); +} + +type_init(pnv_phb_register_types) diff --git a/hw/pci-host/pnv_phb.h b/hw/pci-host/pnv_phb.h new file mode 100644 index 0000000000..58ebd6dd0f --- /dev/null +++ b/hw/pci-host/pnv_phb.h @@ -0,0 +1,55 @@ +/* + * QEMU PowerPC PowerNV Proxy PHB model + * + * Copyright (c) 2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PCI_HOST_PNV_PHB_H +#define PCI_HOST_PNV_PHB_H + +#include "hw/pci/pcie_host.h" +#include "hw/pci/pcie_port.h" +#include "qom/object.h" + +typedef struct PnvChip PnvChip; +typedef struct PnvPhb4PecState PnvPhb4PecState; + +struct PnvPHB { + PCIExpressHost parent_obj; + + uint32_t chip_id; + uint32_t phb_id; + uint32_t version; + char bus_path[8]; + + PnvChip *chip; + + PnvPhb4PecState *pec; + + /* The PHB backend (PnvPHB3, PnvPHB4 ...) being used */ + Object *backend; +}; + +#define TYPE_PNV_PHB "pnv-phb" +OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB, PNV_PHB) + +/* + * PHB PCIe Root port + */ +#define PNV_PHB3_DEVICE_ID 0x03dc +#define PNV_PHB4_DEVICE_ID 0x04c1 +#define PNV_PHB5_DEVICE_ID 0x0652 + +typedef struct PnvPHBRootPort { + PCIESlot parent_obj; + + uint32_t version; +} PnvPHBRootPort; + +#define TYPE_PNV_PHB_ROOT_PORT "pnv-phb-root-port" +OBJECT_DECLARE_SIMPLE_TYPE(PnvPHBRootPort, PNV_PHB_ROOT_PORT) + +#endif /* PCI_HOST_PNV_PHB_H */ diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c index a7f9685005..9054c393a2 100644 --- a/hw/pci-host/pnv_phb3.c +++ b/hw/pci-host/pnv_phb3.c @@ -10,8 +10,8 @@ #include "qemu/log.h" #include "qapi/visitor.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb.h" #include "hw/pci-host/pnv_phb3.h" #include "hw/pci/pcie_host.h" #include "hw/pci/pcie_port.h" @@ -19,6 +19,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "qom/object.h" +#include "sysemu/sysemu.h" #define phb3_error(phb, fmt, ...) \ qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \ @@ -26,7 +27,7 @@ static PCIDevice *pnv_phb3_find_cfg_dev(PnvPHB3 *phb) { - PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base); uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; uint8_t bus, devfn; @@ -590,7 +591,7 @@ void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size) uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size) { PnvPHB3 *phb = opaque; - PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base); uint64_t val; if ((off & 0xfffc) == PHB_CONFIG_DATA) { @@ -715,7 +716,8 @@ static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds) bus_num = pci_bus_num(ds->bus); addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; addr += 2 * ((bus_num << 8) | ds->devfn); - if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + if (dma_memory_read(&address_space_memory, addr, &rte, + sizeof(rte), MEMTXATTRS_UNSPECIFIED)) { phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); /* Set error bits ? fence ? ... */ return false; @@ -790,11 +792,13 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, sh = tbl_shift * lev + tce_shift; /* TODO: Multi-level untested */ - while ((lev--) >= 0) { + do { + lev--; + /* Grab the TCE address */ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); if (dma_memory_read(&address_space_memory, taddr, &tce, - sizeof(tce))) { + sizeof(tce), MEMTXATTRS_UNSPECIFIED)) { phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr); return; } @@ -811,21 +815,22 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, } sh -= tbl_shift; base = tce & ~0xfffull; - } + } while (lev >= 0); /* We exit the loop with TCE being the final TCE */ - tce_mask = ~((1ull << tce_shift) - 1); - tlb->iova = addr & tce_mask; - tlb->translated_addr = tce & tce_mask; - tlb->addr_mask = ~tce_mask; - tlb->perm = tce & 3; if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr); phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, is_write ? 'W' : 'R', tve); phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", tta, lev, tts, tps); + return; } + tce_mask = ~((1ull << tce_shift) - 1); + tlb->iova = addr & tce_mask; + tlb->translated_addr = tce & tce_mask; + tlb->addr_mask = ~tce_mask; + tlb->perm = tce & 3; } } @@ -941,7 +946,7 @@ static AddressSpace *pnv_phb3_dma_iommu(PCIBus *bus, void *opaque, int devfn) } if (ds == NULL) { - ds = g_malloc0(sizeof(PnvPhb3DMASpace)); + ds = g_new0(PnvPhb3DMASpace, 1); ds->bus = bus; ds->devfn = devfn; ds->pe_num = PHB_INVALID_PE; @@ -980,20 +985,42 @@ static void pnv_phb3_instance_init(Object *obj) /* Power Bus Common Queue */ object_initialize_child(obj, "pbcq", &phb->pbcq, TYPE_PNV_PBCQ); - /* Root Port */ - object_initialize_child(obj, "root", &phb->root, TYPE_PNV_PHB3_ROOT_PORT); - qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); - qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); +} + +void pnv_phb3_bus_init(DeviceState *dev, PnvPHB3 *phb) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + + /* + * PHB3 doesn't support IO space. However, qemu gets very upset if + * we don't have an IO region to anchor IO BARs onto so we just + * initialize one which we never hook up to anything + */ + memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000); + memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio", + PCI_MMIO_TOTAL_SIZE); + + pci->bus = pci_register_root_bus(dev, + dev->id ? dev->id : NULL, + pnv_phb3_set_irq, pnv_phb3_map_irq, phb, + &phb->pci_mmio, &phb->pci_io, + 0, 4, TYPE_PNV_PHB3_ROOT_BUS); + + object_property_set_int(OBJECT(pci->bus), "phb-id", phb->phb_id, + &error_abort); + object_property_set_int(OBJECT(pci->bus), "chip-id", phb->chip_id, + &error_abort); + + pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); } static void pnv_phb3_realize(DeviceState *dev, Error **errp) { PnvPHB3 *phb = PNV_PHB3(dev); - PCIHostState *pci = PCI_HOST_BRIDGE(dev); PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); int i; - if (phb->phb_id >= PNV8_CHIP_PHB3_MAX) { + if (phb->phb_id >= PNV_CHIP_GET_CLASS(phb->chip)->num_phbs) { error_setg(errp, "invalid PHB index: %d", phb->phb_id); return; } @@ -1034,27 +1061,6 @@ static void pnv_phb3_realize(DeviceState *dev, Error **errp) /* Controller Registers */ memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb3_reg_ops, phb, "phb3-regs", 0x1000); - - /* - * PHB3 doesn't support IO space. However, qemu gets very upset if - * we don't have an IO region to anchor IO BARs onto so we just - * initialize one which we never hook up to anything - */ - memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000); - memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio", - PCI_MMIO_TOTAL_SIZE); - - pci->bus = pci_register_root_bus(dev, "root-bus", - pnv_phb3_set_irq, pnv_phb3_map_irq, phb, - &phb->pci_mmio, &phb->pci_io, - 0, 4, TYPE_PNV_PHB3_ROOT_BUS); - - pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); - - /* Add a single Root port */ - qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); - qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); - qdev_realize(DEVICE(&phb->root), BUS(pci->bus), &error_fatal); } void pnv_phb3_update_regions(PnvPHB3 *phb) @@ -1079,46 +1085,80 @@ void pnv_phb3_update_regions(PnvPHB3 *phb) pnv_phb3_check_all_m64s(phb); } -static const char *pnv_phb3_root_bus_path(PCIHostState *host_bridge, - PCIBus *rootbus) -{ - PnvPHB3 *phb = PNV_PHB3(host_bridge); - - snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", - phb->chip_id, phb->phb_id); - return phb->bus_path; -} - static Property pnv_phb3_properties[] = { - DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), - DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), + DEFINE_PROP_LINK("chip", PnvPHB3, chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_LINK("phb-base", PnvPHB3, phb_base, TYPE_PNV_PHB, PnvPHB *), + DEFINE_PROP_END_OF_LIST(), }; static void pnv_phb3_class_init(ObjectClass *klass, void *data) { - PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - hc->root_bus_path = pnv_phb3_root_bus_path; dc->realize = pnv_phb3_realize; device_class_set_props(dc, pnv_phb3_properties); - set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->user_creatable = false; } static const TypeInfo pnv_phb3_type_info = { .name = TYPE_PNV_PHB3, - .parent = TYPE_PCIE_HOST_BRIDGE, + .parent = TYPE_DEVICE, .instance_size = sizeof(PnvPHB3), .class_init = pnv_phb3_class_init, .instance_init = pnv_phb3_instance_init, }; +static void pnv_phb3_root_bus_get_prop(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + PnvPHB3RootBus *bus = PNV_PHB3_ROOT_BUS(obj); + uint64_t value = 0; + + if (strcmp(name, "phb-id") == 0) { + value = bus->phb_id; + } else { + value = bus->chip_id; + } + + visit_type_size(v, name, &value, errp); +} + +static void pnv_phb3_root_bus_set_prop(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) + +{ + PnvPHB3RootBus *bus = PNV_PHB3_ROOT_BUS(obj); + uint64_t value; + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + + if (strcmp(name, "phb-id") == 0) { + bus->phb_id = value; + } else { + bus->chip_id = value; + } +} + static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); + object_class_property_add(klass, "phb-id", "int", + pnv_phb3_root_bus_get_prop, + pnv_phb3_root_bus_set_prop, + NULL, NULL); + + object_class_property_add(klass, "chip-id", "int", + pnv_phb3_root_bus_get_prop, + pnv_phb3_root_bus_set_prop, + NULL, NULL); + /* * PHB3 has only a single root complex. Enforce the limit on the * parent bus @@ -1129,56 +1169,13 @@ static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) static const TypeInfo pnv_phb3_root_bus_info = { .name = TYPE_PNV_PHB3_ROOT_BUS, .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(PnvPHB3RootBus), .class_init = pnv_phb3_root_bus_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } - }, -}; - -static void pnv_phb3_root_port_realize(DeviceState *dev, Error **errp) -{ - PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - Error *local_err = NULL; - - rpc->parent_realize(dev, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } -} - -static void pnv_phb3_root_port_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); - - dc->desc = "IBM PHB3 PCIE Root Port"; - - device_class_set_parent_realize(dc, pnv_phb3_root_port_realize, - &rpc->parent_realize); - dc->user_creatable = false; - - k->vendor_id = PCI_VENDOR_ID_IBM; - k->device_id = 0x03dc; - k->revision = 0; - - rpc->exp_offset = 0x48; - rpc->aer_offset = 0x100; -} - -static const TypeInfo pnv_phb3_root_port_info = { - .name = TYPE_PNV_PHB3_ROOT_PORT, - .parent = TYPE_PCIE_ROOT_PORT, - .instance_size = sizeof(PnvPHB3RootPort), - .class_init = pnv_phb3_root_port_class_init, }; static void pnv_phb3_register_types(void) { type_register_static(&pnv_phb3_root_bus_info); - type_register_static(&pnv_phb3_root_port_info); type_register_static(&pnv_phb3_type_info); type_register_static(&pnv_phb3_iommu_memory_region_info); } diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index 099d2092a2..2f4112907b 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/pci-host/pnv_phb3_regs.h" #include "hw/pci-host/pnv_phb3.h" #include "hw/ppc/pnv.h" @@ -53,7 +52,8 @@ static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive) return false; } - if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) { + if (dma_memory_read(&address_space_memory, ive_addr, + &ive, sizeof(ive), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64, ive_addr); return false; @@ -73,7 +73,8 @@ static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen) return; } - if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) { + if (dma_memory_write(&address_space_memory, ive_addr + 4, + &p, 1, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr); } @@ -89,7 +90,8 @@ static void phb3_msi_set_q(Phb3MsiState *msi, int srcno) return; } - if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) { + if (dma_memory_write(&address_space_memory, ive_addr + 5, + &q, 1, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr); } diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c index a0526aa1ec..82f70efa43 100644 --- a/hw/pci-host/pnv_phb3_pbcq.c +++ b/hw/pci-host/pnv_phb3_pbcq.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/log.h" #include "target/ppc/cpu.h" #include "hw/ppc/fdt.h" @@ -284,6 +283,17 @@ static void pnv_pbcq_realize(DeviceState *dev, Error **errp) pnv_xscom_region_init(&pbcq->xscom_spci_regs, OBJECT(dev), &pnv_pbcq_spci_xscom_ops, pbcq, name, PNV_XSCOM_PBCQ_SPCI_SIZE); + + /* Populate the XSCOM address space. */ + pnv_xscom_add_subregion(phb->chip, + PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_nest_regs); + pnv_xscom_add_subregion(phb->chip, + PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_pci_regs); + pnv_xscom_add_subregion(phb->chip, + PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id, + &pbcq->xscom_spci_regs); } static int pnv_pbcq_dt_xscom(PnvXScomInterface *dev, void *fdt, diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c index 5c375a9f28..ccbde841fc 100644 --- a/hw/pci-host/pnv_phb4.c +++ b/hw/pci-host/pnv_phb4.c @@ -10,7 +10,6 @@ #include "qemu/log.h" #include "qapi/visitor.h" #include "qapi/error.h" -#include "qemu-common.h" #include "monitor/monitor.h" #include "target/ppc/cpu.h" #include "hw/pci-host/pnv_phb4_regs.h" @@ -28,25 +27,13 @@ qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \ (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) -/* - * QEMU version of the GETFIELD/SETFIELD macros - * - * These are common with the PnvXive model. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} +#define phb_pec_error(pec, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \ + (pec)->chip_id, (pec)->index, ## __VA_ARGS__) static PCIDevice *pnv_phb4_find_cfg_dev(PnvPHB4 *phb) { - PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base); uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; uint8_t bus, devfn; @@ -142,7 +129,7 @@ static uint64_t pnv_phb4_config_read(PnvPHB4 *phb, unsigned off, static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, unsigned size, uint64_t val) { - PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base); PCIDevice *pdev; if (size != 4) { @@ -151,7 +138,10 @@ static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, } pdev = pci_find_device(pci->bus, 0, 0); - assert(pdev); + if (!pdev) { + phb_error(phb, "rc_config_write device not found\n"); + return; + } pci_host_config_write_common(pdev, off, PHB_RC_CONFIG_SIZE, bswap32(val), 4); @@ -160,7 +150,7 @@ static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, static uint64_t pnv_phb4_rc_config_read(PnvPHB4 *phb, unsigned off, unsigned size) { - PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base); PCIDevice *pdev; uint64_t val; @@ -170,7 +160,10 @@ static uint64_t pnv_phb4_rc_config_read(PnvPHB4 *phb, unsigned off, } pdev = pci_find_device(pci->bus, 0, 0); - assert(pdev); + if (!pdev) { + phb_error(phb, "rc_config_read device not found\n"); + return ~0ull; + } val = pci_host_config_read_common(pdev, off, PHB_RC_CONFIG_SIZE, 4); return bswap32(val); @@ -217,16 +210,16 @@ static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index) /* TODO: Figure out how to implemet/decode AOMASK */ /* Check if it matches an enabled MMIO region in the PEC stack */ - if (memory_region_is_mapped(&phb->stack->mmbar0) && - base >= phb->stack->mmio0_base && - (base + size) <= (phb->stack->mmio0_base + phb->stack->mmio0_size)) { - parent = &phb->stack->mmbar0; - base -= phb->stack->mmio0_base; - } else if (memory_region_is_mapped(&phb->stack->mmbar1) && - base >= phb->stack->mmio1_base && - (base + size) <= (phb->stack->mmio1_base + phb->stack->mmio1_size)) { - parent = &phb->stack->mmbar1; - base -= phb->stack->mmio1_base; + if (memory_region_is_mapped(&phb->mmbar0) && + base >= phb->mmio0_base && + (base + size) <= (phb->mmio0_base + phb->mmio0_size)) { + parent = &phb->mmbar0; + base -= phb->mmio0_base; + } else if (memory_region_is_mapped(&phb->mmbar1) && + base >= phb->mmio1_base && + (base + size) <= (phb->mmio1_base + phb->mmio1_size)) { + parent = &phb->mmbar1; + base -= phb->mmio1_base; } else { phb_error(phb, "PHB MBAR %d out of parent bounds", index); return; @@ -475,6 +468,15 @@ static void pnv_phb4_update_xsrc(PnvPHB4 *phb) flags = 0; } + /* + * When the PQ disable configuration bit is set, the check on the + * PQ state bits is disabled on the PHB side (for MSI only) and it + * is performed on the IC side instead. + */ + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PQ_DISABLE) { + flags |= XIVE_SRC_PQ_DISABLE; + } + phb->xsrc.esb_shift = shift; phb->xsrc.esb_flags = flags; @@ -662,7 +664,7 @@ static uint64_t pnv_phb4_reg_read(void *opaque, hwaddr off, unsigned size) switch (off) { case PHB_VERSION: - return phb->version; + return PNV_PHB4_PEC_GET_CLASS(phb->pec)->version; /* Read-only */ case PHB_PHB4_GEN_CAP: @@ -847,6 +849,305 @@ const MemoryRegionOps pnv_phb4_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; +static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return phb->nest_regs[reg]; +} + +/* + * Return the 'stack_no' of a PHB4. 'stack_no' is the order + * the PHB4 occupies in the PEC. This is the reverse of what + * pnv_phb4_pec_get_phb_id() does. + * + * E.g. a phb with phb_id = 4 and pec->index = 1 (PEC1) will + * be the second phb (stack_no = 1) of the PEC. + */ +static int pnv_phb4_get_phb_stack_no(PnvPHB4 *phb) +{ + PnvPhb4PecState *pec = phb->pec; + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + int index = pec->index; + int stack_no = phb->phb_id; + + while (index--) { + stack_no -= pecc->num_phbs[index]; + } + + return stack_no; +} + +static void pnv_phb4_update_regions(PnvPHB4 *phb) +{ + /* Unmap first always */ + if (memory_region_is_mapped(&phb->mr_regs)) { + memory_region_del_subregion(&phb->phbbar, &phb->mr_regs); + } + if (memory_region_is_mapped(&phb->xsrc.esb_mmio)) { + memory_region_del_subregion(&phb->intbar, &phb->xsrc.esb_mmio); + } + + /* Map registers if enabled */ + if (memory_region_is_mapped(&phb->phbbar)) { + memory_region_add_subregion(&phb->phbbar, 0, &phb->mr_regs); + } + + /* Map ESB if enabled */ + if (memory_region_is_mapped(&phb->intbar)) { + memory_region_add_subregion(&phb->intbar, 0, &phb->xsrc.esb_mmio); + } + + /* Check/update m32 */ + pnv_phb4_check_all_mbt(phb); +} + +static void pnv_pec_phb_update_map(PnvPHB4 *phb) +{ + PnvPhb4PecState *pec = phb->pec; + MemoryRegion *sysmem = get_system_memory(); + uint64_t bar_en = phb->nest_regs[PEC_NEST_STK_BAR_EN]; + int stack_no = pnv_phb4_get_phb_stack_no(phb); + uint64_t bar, mask, size; + char name[64]; + + /* + * NOTE: This will really not work well if those are remapped + * after the PHB has created its sub regions. We could do better + * if we had a way to resize regions but we don't really care + * that much in practice as the stuff below really only happens + * once early during boot + */ + + /* Handle unmaps */ + if (memory_region_is_mapped(&phb->mmbar0) && + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { + memory_region_del_subregion(sysmem, &phb->mmbar0); + } + if (memory_region_is_mapped(&phb->mmbar1) && + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { + memory_region_del_subregion(sysmem, &phb->mmbar1); + } + if (memory_region_is_mapped(&phb->phbbar) && + !(bar_en & PEC_NEST_STK_BAR_EN_PHB)) { + memory_region_del_subregion(sysmem, &phb->phbbar); + } + if (memory_region_is_mapped(&phb->intbar) && + !(bar_en & PEC_NEST_STK_BAR_EN_INT)) { + memory_region_del_subregion(sysmem, &phb->intbar); + } + + /* Update PHB */ + pnv_phb4_update_regions(phb); + + /* Handle maps */ + if (!memory_region_is_mapped(&phb->mmbar0) && + (bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { + bar = phb->nest_regs[PEC_NEST_STK_MMIO_BAR0] >> 8; + mask = phb->nest_regs[PEC_NEST_STK_MMIO_BAR0_MASK]; + size = ((~mask) >> 8) + 1; + snprintf(name, sizeof(name), "pec-%d.%d-phb-%d-mmio0", + pec->chip_id, pec->index, stack_no); + memory_region_init(&phb->mmbar0, OBJECT(phb), name, size); + memory_region_add_subregion(sysmem, bar, &phb->mmbar0); + phb->mmio0_base = bar; + phb->mmio0_size = size; + } + if (!memory_region_is_mapped(&phb->mmbar1) && + (bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { + bar = phb->nest_regs[PEC_NEST_STK_MMIO_BAR1] >> 8; + mask = phb->nest_regs[PEC_NEST_STK_MMIO_BAR1_MASK]; + size = ((~mask) >> 8) + 1; + snprintf(name, sizeof(name), "pec-%d.%d-phb-%d-mmio1", + pec->chip_id, pec->index, stack_no); + memory_region_init(&phb->mmbar1, OBJECT(phb), name, size); + memory_region_add_subregion(sysmem, bar, &phb->mmbar1); + phb->mmio1_base = bar; + phb->mmio1_size = size; + } + if (!memory_region_is_mapped(&phb->phbbar) && + (bar_en & PEC_NEST_STK_BAR_EN_PHB)) { + bar = phb->nest_regs[PEC_NEST_STK_PHB_REGS_BAR] >> 8; + size = PNV_PHB4_NUM_REGS << 3; + snprintf(name, sizeof(name), "pec-%d.%d-phb-%d", + pec->chip_id, pec->index, stack_no); + memory_region_init(&phb->phbbar, OBJECT(phb), name, size); + memory_region_add_subregion(sysmem, bar, &phb->phbbar); + } + if (!memory_region_is_mapped(&phb->intbar) && + (bar_en & PEC_NEST_STK_BAR_EN_INT)) { + bar = phb->nest_regs[PEC_NEST_STK_INT_BAR] >> 8; + size = PNV_PHB4_MAX_INTs << 16; + snprintf(name, sizeof(name), "pec-%d.%d-phb-%d-int", + phb->pec->chip_id, phb->pec->index, stack_no); + memory_region_init(&phb->intbar, OBJECT(phb), name, size); + memory_region_add_subregion(sysmem, bar, &phb->intbar); + } + + /* Update PHB */ + pnv_phb4_update_regions(phb); +} + +static void pnv_pec_stk_nest_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + PnvPhb4PecState *pec = phb->pec; + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_NEST_STK_PCI_NEST_FIR: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_CLR: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] &= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_SET: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] |= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSK: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSKC: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] &= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSKS: + phb->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] |= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_ACT0: + case PEC_NEST_STK_PCI_NEST_FIR_ACT1: + phb->nest_regs[reg] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_WOF: + phb->nest_regs[reg] = 0; + break; + case PEC_NEST_STK_ERR_REPORT_0: + case PEC_NEST_STK_ERR_REPORT_1: + case PEC_NEST_STK_PBCQ_GNRL_STATUS: + /* Flag error ? */ + break; + case PEC_NEST_STK_PBCQ_MODE: + phb->nest_regs[reg] = val & 0xff00000000000000ull; + break; + case PEC_NEST_STK_MMIO_BAR0: + case PEC_NEST_STK_MMIO_BAR0_MASK: + case PEC_NEST_STK_MMIO_BAR1: + case PEC_NEST_STK_MMIO_BAR1_MASK: + if (phb->nest_regs[PEC_NEST_STK_BAR_EN] & + (PEC_NEST_STK_BAR_EN_MMIO0 | + PEC_NEST_STK_BAR_EN_MMIO1)) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + phb->nest_regs[reg] = val & 0xffffffffff000000ull; + break; + case PEC_NEST_STK_PHB_REGS_BAR: + if (phb->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_PHB) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + phb->nest_regs[reg] = val & 0xffffffffffc00000ull; + break; + case PEC_NEST_STK_INT_BAR: + if (phb->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_INT) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + phb->nest_regs[reg] = val & 0xfffffff000000000ull; + break; + case PEC_NEST_STK_BAR_EN: + phb->nest_regs[reg] = val & 0xf000000000000000ull; + pnv_pec_phb_update_map(phb); + break; + case PEC_NEST_STK_DATA_FRZ_TYPE: + case PEC_NEST_STK_PBCQ_TUN_BAR: + /* Not used for now */ + phb->nest_regs[reg] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx + "=%"PRIx64"\n", addr, val); + } +} + +static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops = { + .read = pnv_pec_stk_nest_xscom_read, + .write = pnv_pec_stk_nest_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return phb->pci_regs[reg]; +} + +static void pnv_pec_stk_pci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_PCI_STK_PCI_FIR: + phb->pci_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_CLR: + phb->pci_regs[PEC_PCI_STK_PCI_FIR] &= val; + break; + case PEC_PCI_STK_PCI_FIR_SET: + phb->pci_regs[PEC_PCI_STK_PCI_FIR] |= val; + break; + case PEC_PCI_STK_PCI_FIR_MSK: + phb->pci_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_MSKC: + phb->pci_regs[PEC_PCI_STK_PCI_FIR_MSK] &= val; + break; + case PEC_PCI_STK_PCI_FIR_MSKS: + phb->pci_regs[PEC_PCI_STK_PCI_FIR_MSK] |= val; + break; + case PEC_PCI_STK_PCI_FIR_ACT0: + case PEC_PCI_STK_PCI_FIR_ACT1: + phb->pci_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_WOF: + phb->pci_regs[reg] = 0; + break; + case PEC_PCI_STK_ETU_RESET: + phb->pci_regs[reg] = val & 0x8000000000000000ull; + /* TODO: Implement reset */ + break; + case PEC_PCI_STK_PBAIB_ERR_REPORT: + break; + case PEC_PCI_STK_PBAIB_TX_CMD_CRED: + case PEC_PCI_STK_PBAIB_TX_DAT_CRED: + phb->pci_regs[reg] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx + "=%"PRIx64"\n", addr, val); + } +} + +static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops = { + .read = pnv_pec_stk_pci_xscom_read, + .write = pnv_pec_stk_pci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + static int pnv_phb4_map_irq(PCIDevice *pci_dev, int irq_num) { /* Check that out properly ... */ @@ -891,7 +1192,8 @@ static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds) bus_num = pci_bus_num(ds->bus); addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; addr += 2 * PCI_BUILD_BDF(bus_num, ds->devfn); - if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + if (dma_memory_read(&address_space_memory, addr, &rte, + sizeof(rte), MEMTXATTRS_UNSPECIFIED)) { phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); /* Set error bits ? fence ? ... */ return false; @@ -957,11 +1259,13 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, /* TODO: Limit to support IO page sizes */ /* TODO: Multi-level untested */ - while ((lev--) >= 0) { + do { + lev--; + /* Grab the TCE address */ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); if (dma_memory_read(&address_space_memory, taddr, &tce, - sizeof(tce))) { + sizeof(tce), MEMTXATTRS_UNSPECIFIED)) { phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr); return; } @@ -978,21 +1282,22 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, } sh -= tbl_shift; base = tce & ~0xfffull; - } + } while (lev >= 0); /* We exit the loop with TCE being the final TCE */ - tce_mask = ~((1ull << tce_shift) - 1); - tlb->iova = addr & tce_mask; - tlb->translated_addr = tce & tce_mask; - tlb->addr_mask = ~tce_mask; - tlb->perm = tce & 3; if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { phb_error(ds->phb, "TCE access fault at 0x%"PRIx64, taddr); phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, is_write ? 'W' : 'R', tve); phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", tta, lev, tts, tps); + return; } + tce_mask = ~((1ull << tce_shift) - 1); + tlb->iova = addr & tce_mask; + tlb->translated_addr = tce & tce_mask; + tlb->addr_mask = ~tce_mask; + tlb->perm = tce & 3; } } @@ -1061,6 +1366,23 @@ static const TypeInfo pnv_phb4_iommu_memory_region_info = { .class_init = pnv_phb4_iommu_memory_region_class_init, }; +/* + * Return the index/phb-id of a PHB4 that belongs to a + * pec->stacks[stack_index] stack. + */ +int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index) +{ + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + int index = pec->index; + int offset = 0; + + while (index--) { + offset += pecc->num_phbs[index]; + } + + return offset + stack_index; +} + /* * MSI/MSIX memory region implementation. * The handler handles both MSI and MSIX. @@ -1127,7 +1449,7 @@ static AddressSpace *pnv_phb4_dma_iommu(PCIBus *bus, void *opaque, int devfn) ds = pnv_phb4_dma_find(phb, bus, devfn); if (ds == NULL) { - ds = g_malloc0(sizeof(PnvPhb4DMASpace)); + ds = g_new0(PnvPhb4DMASpace, 1); ds->bus = bus; ds->devfn = devfn; ds->pe_num = PHB_INVALID_PE; @@ -1150,6 +1472,52 @@ static AddressSpace *pnv_phb4_dma_iommu(PCIBus *bus, void *opaque, int devfn) return &ds->dma_as; } +static void pnv_phb4_xscom_realize(PnvPHB4 *phb) +{ + PnvPhb4PecState *pec = phb->pec; + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + int stack_no = pnv_phb4_get_phb_stack_no(phb); + uint32_t pec_nest_base; + uint32_t pec_pci_base; + char name[64]; + + assert(pec); + + /* Initialize the XSCOM regions for the stack registers */ + snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest-phb-%d", + pec->chip_id, pec->index, stack_no); + pnv_xscom_region_init(&phb->nest_regs_mr, OBJECT(phb), + &pnv_pec_stk_nest_xscom_ops, phb, name, + PHB4_PEC_NEST_STK_REGS_COUNT); + + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-phb-%d", + pec->chip_id, pec->index, stack_no); + pnv_xscom_region_init(&phb->pci_regs_mr, OBJECT(phb), + &pnv_pec_stk_pci_xscom_ops, phb, name, + PHB4_PEC_PCI_STK_REGS_COUNT); + + /* PHB pass-through */ + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-phb-%d", + pec->chip_id, pec->index, stack_no); + pnv_xscom_region_init(&phb->phb_regs_mr, OBJECT(phb), + &pnv_phb4_xscom_ops, phb, name, 0x40); + + pec_nest_base = pecc->xscom_nest_base(pec); + pec_pci_base = pecc->xscom_pci_base(pec); + + /* Populate the XSCOM address space. */ + pnv_xscom_add_subregion(pec->chip, + pec_nest_base + 0x40 * (stack_no + 1), + &phb->nest_regs_mr); + pnv_xscom_add_subregion(pec->chip, + pec_pci_base + 0x40 * (stack_no + 1), + &phb->pci_regs_mr); + pnv_xscom_add_subregion(pec->chip, + pec_pci_base + PNV9_XSCOM_PEC_PCI_STK0 + + 0x40 * stack_no, + &phb->phb_regs_mr); +} + static void pnv_phb4_instance_init(Object *obj) { PnvPHB4 *phb = PNV_PHB4(obj); @@ -1158,39 +1526,18 @@ static void pnv_phb4_instance_init(Object *obj) /* XIVE interrupt source object */ object_initialize_child(obj, "source", &phb->xsrc, TYPE_XIVE_SOURCE); - - /* Root Port */ - object_initialize_child(obj, "root", &phb->root, TYPE_PNV_PHB4_ROOT_PORT); - - qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); - qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); } -static void pnv_phb4_realize(DeviceState *dev, Error **errp) +void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb) { - PnvPHB4 *phb = PNV_PHB4(dev); PCIHostState *pci = PCI_HOST_BRIDGE(dev); - XiveSource *xsrc = &phb->xsrc; - int nr_irqs; char name[32]; - assert(phb->stack); - - /* Set the "big_phb" flag */ - phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3; - - /* Controller Registers */ - snprintf(name, sizeof(name), "phb4-%d.%d-regs", phb->chip_id, - phb->phb_id); - memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb4_reg_ops, phb, - name, 0x2000); - /* * PHB4 doesn't support IO space. However, qemu gets very upset if * we don't have an IO region to anchor IO BARs onto so we just * initialize one which we never hook up to anything */ - snprintf(name, sizeof(name), "phb4-%d.%d-pci-io", phb->chip_id, phb->phb_id); memory_region_init(&phb->pci_io, OBJECT(phb), name, 0x10000); @@ -1200,16 +1547,35 @@ static void pnv_phb4_realize(DeviceState *dev, Error **errp) memory_region_init(&phb->pci_mmio, OBJECT(phb), name, PCI_MMIO_TOTAL_SIZE); - pci->bus = pci_register_root_bus(dev, "root-bus", + pci->bus = pci_register_root_bus(dev, dev->id ? dev->id : NULL, pnv_phb4_set_irq, pnv_phb4_map_irq, phb, &phb->pci_mmio, &phb->pci_io, 0, 4, TYPE_PNV_PHB4_ROOT_BUS); - pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); - /* Add a single Root port */ - qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); - qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); - qdev_realize(DEVICE(&phb->root), BUS(pci->bus), &error_fatal); + object_property_set_int(OBJECT(pci->bus), "phb-id", phb->phb_id, + &error_abort); + object_property_set_int(OBJECT(pci->bus), "chip-id", phb->chip_id, + &error_abort); + + pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); + pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; +} + +static void pnv_phb4_realize(DeviceState *dev, Error **errp) +{ + PnvPHB4 *phb = PNV_PHB4(dev); + XiveSource *xsrc = &phb->xsrc; + int nr_irqs; + char name[32]; + + /* Set the "big_phb" flag */ + phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3; + + /* Controller Registers */ + snprintf(name, sizeof(name), "phb4-%d.%d-regs", phb->chip_id, + phb->phb_id); + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb4_reg_ops, phb, + name, 0x2000); /* Setup XIVE Source */ if (phb->big_phb) { @@ -1226,39 +1592,68 @@ static void pnv_phb4_realize(DeviceState *dev, Error **errp) pnv_phb4_update_xsrc(phb); phb->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); + + pnv_phb4_xscom_realize(phb); } -static void pnv_phb4_reset(DeviceState *dev) +/* + * Address base trigger mode (POWER10) + * + * Trigger directly the IC ESB page + */ +static void pnv_phb4_xive_notify_abt(PnvPHB4 *phb, uint32_t srcno, + bool pq_checked) { - PnvPHB4 *phb = PNV_PHB4(dev); - PCIDevice *root_dev = PCI_DEVICE(&phb->root); + uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3]; + uint64_t data = 0; /* trigger data : don't care */ + hwaddr addr; + MemTxResult result; + int esb_shift; + + if (notif_port & PHB_INT_NOTIFY_ADDR_64K) { + esb_shift = 16; + } else { + esb_shift = 12; + } + + /* Compute the address of the IC ESB management page */ + addr = (notif_port & ~PHB_INT_NOTIFY_ADDR_64K); + addr |= (1ull << (esb_shift + 1)) * srcno; + addr |= (1ull << esb_shift); /* - * Configure PCI device id at reset using a property. + * When the PQ state bits are checked on the PHB, the associated + * PQ state bits on the IC should be ignored. Use the unconditional + * trigger offset to inject a trigger on the IC. This is always + * the case for LSIs */ - pci_config_set_vendor_id(root_dev->config, PCI_VENDOR_ID_IBM); - pci_config_set_device_id(root_dev->config, phb->device_id); + if (pq_checked) { + addr |= XIVE_ESB_INJECT; + } + + trace_pnv_phb4_xive_notify_ic(addr, data); + + address_space_stq_be(&address_space_memory, addr, data, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", addr); + return; + } } -static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge, - PCIBus *rootbus) +static void pnv_phb4_xive_notify_ic(PnvPHB4 *phb, uint32_t srcno, + bool pq_checked) { - PnvPHB4 *phb = PNV_PHB4(host_bridge); - - snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", - phb->chip_id, phb->phb_id); - return phb->bus_path; -} - -static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno) -{ - PnvPHB4 *phb = PNV_PHB4(xf); uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3]; uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; - uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; + uint64_t data = offset | srcno; MemTxResult result; - trace_pnv_phb4_xive_notify(notif_port, data); + if (pq_checked) { + data |= XIVE_TRIGGER_PQ; + } + + trace_pnv_phb4_xive_notify_ic(notif_port, data); address_space_stq_be(&address_space_memory, notif_port, data, MEMTXATTRS_UNSPECIFIED, &result); @@ -1268,35 +1663,42 @@ static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno) } } +static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno, + bool pq_checked) +{ + PnvPHB4 *phb = PNV_PHB4(xf); + + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE) { + pnv_phb4_xive_notify_abt(phb, srcno, pq_checked); + } else { + pnv_phb4_xive_notify_ic(phb, srcno, pq_checked); + } +} + static Property pnv_phb4_properties[] = { - DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), - DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), - DEFINE_PROP_UINT64("version", PnvPHB4, version, 0), - DEFINE_PROP_UINT16("device-id", PnvPHB4, device_id, 0), - DEFINE_PROP_LINK("stack", PnvPHB4, stack, TYPE_PNV_PHB4_PEC_STACK, - PnvPhb4PecStack *), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), + DEFINE_PROP_LINK("pec", PnvPHB4, pec, TYPE_PNV_PHB4_PEC, + PnvPhb4PecState *), + DEFINE_PROP_LINK("phb-base", PnvPHB4, phb_base, TYPE_PNV_PHB, PnvPHB *), + DEFINE_PROP_END_OF_LIST(), }; static void pnv_phb4_class_init(ObjectClass *klass, void *data) { - PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); - hc->root_bus_path = pnv_phb4_root_bus_path; dc->realize = pnv_phb4_realize; device_class_set_props(dc, pnv_phb4_properties); - set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->user_creatable = false; - dc->reset = pnv_phb4_reset; xfc->notify = pnv_phb4_xive_notify; } static const TypeInfo pnv_phb4_type_info = { .name = TYPE_PNV_PHB4, - .parent = TYPE_PCIE_HOST_BRIDGE, + .parent = TYPE_DEVICE, .instance_init = pnv_phb4_instance_init, .instance_size = sizeof(PnvPHB4), .class_init = pnv_phb4_class_init, @@ -1306,10 +1708,61 @@ static const TypeInfo pnv_phb4_type_info = { } }; +static const TypeInfo pnv_phb5_type_info = { + .name = TYPE_PNV_PHB5, + .parent = TYPE_PNV_PHB4, + .instance_size = sizeof(PnvPHB4), +}; + +static void pnv_phb4_root_bus_get_prop(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + PnvPHB4RootBus *bus = PNV_PHB4_ROOT_BUS(obj); + uint64_t value = 0; + + if (strcmp(name, "phb-id") == 0) { + value = bus->phb_id; + } else { + value = bus->chip_id; + } + + visit_type_size(v, name, &value, errp); +} + +static void pnv_phb4_root_bus_set_prop(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) + +{ + PnvPHB4RootBus *bus = PNV_PHB4_ROOT_BUS(obj); + uint64_t value; + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + + if (strcmp(name, "phb-id") == 0) { + bus->phb_id = value; + } else { + bus->chip_id = value; + } +} + static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); + object_class_property_add(klass, "phb-id", "int", + pnv_phb4_root_bus_get_prop, + pnv_phb4_root_bus_set_prop, + NULL, NULL); + + object_class_property_add(klass, "chip-id", "int", + pnv_phb4_root_bus_get_prop, + pnv_phb4_root_bus_set_prop, + NULL, NULL); + /* * PHB4 has only a single root complex. Enforce the limit on the * parent bus @@ -1320,118 +1773,31 @@ static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) static const TypeInfo pnv_phb4_root_bus_info = { .name = TYPE_PNV_PHB4_ROOT_BUS, .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(PnvPHB4RootBus), .class_init = pnv_phb4_root_bus_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } - }, -}; - -static void pnv_phb4_root_port_reset(DeviceState *dev) -{ - PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - PCIDevice *d = PCI_DEVICE(dev); - uint8_t *conf = d->config; - - rpc->parent_reset(dev); - - pci_byte_test_and_set_mask(conf + PCI_IO_BASE, - PCI_IO_RANGE_MASK & 0xff); - pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, - PCI_IO_RANGE_MASK & 0xff); - pci_set_word(conf + PCI_MEMORY_BASE, 0); - pci_set_word(conf + PCI_MEMORY_LIMIT, 0xfff0); - pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0x1); - pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0xfff1); - pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0x1); /* Hack */ - pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0xffffffff); -} - -static void pnv_phb4_root_port_realize(DeviceState *dev, Error **errp) -{ - PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - Error *local_err = NULL; - - rpc->parent_realize(dev, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } -} - -static void pnv_phb4_root_port_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); - - dc->desc = "IBM PHB4 PCIE Root Port"; - dc->user_creatable = false; - - device_class_set_parent_realize(dc, pnv_phb4_root_port_realize, - &rpc->parent_realize); - device_class_set_parent_reset(dc, pnv_phb4_root_port_reset, - &rpc->parent_reset); - - k->vendor_id = PCI_VENDOR_ID_IBM; - k->device_id = PNV_PHB4_DEVICE_ID; - k->revision = 0; - - rpc->exp_offset = 0x48; - rpc->aer_offset = 0x100; - - dc->reset = &pnv_phb4_root_port_reset; -} - -static const TypeInfo pnv_phb4_root_port_info = { - .name = TYPE_PNV_PHB4_ROOT_PORT, - .parent = TYPE_PCIE_ROOT_PORT, - .instance_size = sizeof(PnvPHB4RootPort), - .class_init = pnv_phb4_root_port_class_init, }; static void pnv_phb4_register_types(void) { type_register_static(&pnv_phb4_root_bus_info); - type_register_static(&pnv_phb4_root_port_info); type_register_static(&pnv_phb4_type_info); + type_register_static(&pnv_phb5_type_info); type_register_static(&pnv_phb4_iommu_memory_region_info); } type_init(pnv_phb4_register_types); -void pnv_phb4_update_regions(PnvPhb4PecStack *stack) -{ - PnvPHB4 *phb = &stack->phb; - - /* Unmap first always */ - if (memory_region_is_mapped(&phb->mr_regs)) { - memory_region_del_subregion(&stack->phbbar, &phb->mr_regs); - } - if (memory_region_is_mapped(&phb->xsrc.esb_mmio)) { - memory_region_del_subregion(&stack->intbar, &phb->xsrc.esb_mmio); - } - - /* Map registers if enabled */ - if (memory_region_is_mapped(&stack->phbbar)) { - memory_region_add_subregion(&stack->phbbar, 0, &phb->mr_regs); - } - - /* Map ESB if enabled */ - if (memory_region_is_mapped(&stack->intbar)) { - memory_region_add_subregion(&stack->intbar, 0, &phb->xsrc.esb_mmio); - } - - /* Check/update m32 */ - pnv_phb4_check_all_mbt(phb); -} - void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon) { + uint64_t notif_port = + phb->regs[PHB_INT_NOTIFY_ADDR >> 3] & ~PHB_INT_NOTIFY_ADDR_64K; uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; + bool abt = !!(phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE); - monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n", + monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x %s @%"HWADDR_PRIx"\n", phb->chip_id, phb->phb_id, - offset, offset + phb->xsrc.nr_irqs - 1); + offset, offset + phb->xsrc.nr_irqs - 1, + abt ? "ABT" : "", + notif_port); xive_source_pic_print_info(&phb->xsrc, 0, mon); } diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index 741ddc90ed..9871f462cd 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/log.h" #include "target/ppc/cpu.h" #include "hw/ppc/fdt.h" @@ -19,6 +18,7 @@ #include "hw/pci/pci_bus.h" #include "hw/ppc/pnv.h" #include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" #include @@ -111,290 +111,45 @@ static const MemoryRegionOps pnv_pec_pci_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque, hwaddr addr, - unsigned size) +static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, + int stack_no, + Error **errp) { - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); - uint32_t reg = addr >> 3; + PnvPHB *phb = PNV_PHB(qdev_new(TYPE_PNV_PHB)); + int phb_id = pnv_phb4_pec_get_phb_id(pec, stack_no); - /* TODO: add list of allowed registers and error out if not */ - return stack->nest_regs[reg]; -} + object_property_add_child(OBJECT(pec), "phb[*]", OBJECT(phb)); + object_property_set_link(OBJECT(phb), "pec", OBJECT(pec), + &error_abort); + object_property_set_int(OBJECT(phb), "chip-id", pec->chip_id, + &error_fatal); + object_property_set_int(OBJECT(phb), "index", phb_id, + &error_fatal); -static void pnv_pec_stk_update_map(PnvPhb4PecStack *stack) -{ - PnvPhb4PecState *pec = stack->pec; - MemoryRegion *sysmem = pec->system_memory; - uint64_t bar_en = stack->nest_regs[PEC_NEST_STK_BAR_EN]; - uint64_t bar, mask, size; - char name[64]; - - /* - * NOTE: This will really not work well if those are remapped - * after the PHB has created its sub regions. We could do better - * if we had a way to resize regions but we don't really care - * that much in practice as the stuff below really only happens - * once early during boot - */ - - /* Handle unmaps */ - if (memory_region_is_mapped(&stack->mmbar0) && - !(bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { - memory_region_del_subregion(sysmem, &stack->mmbar0); - } - if (memory_region_is_mapped(&stack->mmbar1) && - !(bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { - memory_region_del_subregion(sysmem, &stack->mmbar1); - } - if (memory_region_is_mapped(&stack->phbbar) && - !(bar_en & PEC_NEST_STK_BAR_EN_PHB)) { - memory_region_del_subregion(sysmem, &stack->phbbar); - } - if (memory_region_is_mapped(&stack->intbar) && - !(bar_en & PEC_NEST_STK_BAR_EN_INT)) { - memory_region_del_subregion(sysmem, &stack->intbar); - } - - /* Update PHB */ - pnv_phb4_update_regions(stack); - - /* Handle maps */ - if (!memory_region_is_mapped(&stack->mmbar0) && - (bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { - bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0] >> 8; - mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0_MASK]; - size = ((~mask) >> 8) + 1; - snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio0", - pec->chip_id, pec->index, stack->stack_no); - memory_region_init(&stack->mmbar0, OBJECT(stack), name, size); - memory_region_add_subregion(sysmem, bar, &stack->mmbar0); - stack->mmio0_base = bar; - stack->mmio0_size = size; - } - if (!memory_region_is_mapped(&stack->mmbar1) && - (bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { - bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1] >> 8; - mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1_MASK]; - size = ((~mask) >> 8) + 1; - snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio1", - pec->chip_id, pec->index, stack->stack_no); - memory_region_init(&stack->mmbar1, OBJECT(stack), name, size); - memory_region_add_subregion(sysmem, bar, &stack->mmbar1); - stack->mmio1_base = bar; - stack->mmio1_size = size; - } - if (!memory_region_is_mapped(&stack->phbbar) && - (bar_en & PEC_NEST_STK_BAR_EN_PHB)) { - bar = stack->nest_regs[PEC_NEST_STK_PHB_REGS_BAR] >> 8; - size = PNV_PHB4_NUM_REGS << 3; - snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-phb", - pec->chip_id, pec->index, stack->stack_no); - memory_region_init(&stack->phbbar, OBJECT(stack), name, size); - memory_region_add_subregion(sysmem, bar, &stack->phbbar); - } - if (!memory_region_is_mapped(&stack->intbar) && - (bar_en & PEC_NEST_STK_BAR_EN_INT)) { - bar = stack->nest_regs[PEC_NEST_STK_INT_BAR] >> 8; - size = PNV_PHB4_MAX_INTs << 16; - snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-int", - stack->pec->chip_id, stack->pec->index, stack->stack_no); - memory_region_init(&stack->intbar, OBJECT(stack), name, size); - memory_region_add_subregion(sysmem, bar, &stack->intbar); - } - - /* Update PHB */ - pnv_phb4_update_regions(stack); -} - -static void pnv_pec_stk_nest_xscom_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); - PnvPhb4PecState *pec = stack->pec; - uint32_t reg = addr >> 3; - - switch (reg) { - case PEC_NEST_STK_PCI_NEST_FIR: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] = val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_CLR: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] &= val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_SET: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] |= val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_MSK: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] = val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_MSKC: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] &= val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_MSKS: - stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] |= val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_ACT0: - case PEC_NEST_STK_PCI_NEST_FIR_ACT1: - stack->nest_regs[reg] = val; - break; - case PEC_NEST_STK_PCI_NEST_FIR_WOF: - stack->nest_regs[reg] = 0; - break; - case PEC_NEST_STK_ERR_REPORT_0: - case PEC_NEST_STK_ERR_REPORT_1: - case PEC_NEST_STK_PBCQ_GNRL_STATUS: - /* Flag error ? */ - break; - case PEC_NEST_STK_PBCQ_MODE: - stack->nest_regs[reg] = val & 0xff00000000000000ull; - break; - case PEC_NEST_STK_MMIO_BAR0: - case PEC_NEST_STK_MMIO_BAR0_MASK: - case PEC_NEST_STK_MMIO_BAR1: - case PEC_NEST_STK_MMIO_BAR1_MASK: - if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & - (PEC_NEST_STK_BAR_EN_MMIO0 | - PEC_NEST_STK_BAR_EN_MMIO1)) { - phb_pec_error(pec, "Changing enabled BAR unsupported\n"); - } - stack->nest_regs[reg] = val & 0xffffffffff000000ull; - break; - case PEC_NEST_STK_PHB_REGS_BAR: - if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_PHB) { - phb_pec_error(pec, "Changing enabled BAR unsupported\n"); - } - stack->nest_regs[reg] = val & 0xffffffffffc00000ull; - break; - case PEC_NEST_STK_INT_BAR: - if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_INT) { - phb_pec_error(pec, "Changing enabled BAR unsupported\n"); - } - stack->nest_regs[reg] = val & 0xfffffff000000000ull; - break; - case PEC_NEST_STK_BAR_EN: - stack->nest_regs[reg] = val & 0xf000000000000000ull; - pnv_pec_stk_update_map(stack); - break; - case PEC_NEST_STK_DATA_FRZ_TYPE: - case PEC_NEST_STK_PBCQ_TUN_BAR: - /* Not used for now */ - stack->nest_regs[reg] = val; - break; - default: - qemu_log_mask(LOG_UNIMP, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx - "=%"PRIx64"\n", addr, val); - } -} - -static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops = { - .read = pnv_pec_stk_nest_xscom_read, - .write = pnv_pec_stk_nest_xscom_write, - .valid.min_access_size = 8, - .valid.max_access_size = 8, - .impl.min_access_size = 8, - .impl.max_access_size = 8, - .endianness = DEVICE_BIG_ENDIAN, -}; - -static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque, hwaddr addr, - unsigned size) -{ - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); - uint32_t reg = addr >> 3; - - /* TODO: add list of allowed registers and error out if not */ - return stack->pci_regs[reg]; -} - -static void pnv_pec_stk_pci_xscom_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); - uint32_t reg = addr >> 3; - - switch (reg) { - case PEC_PCI_STK_PCI_FIR: - stack->nest_regs[reg] = val; - break; - case PEC_PCI_STK_PCI_FIR_CLR: - stack->nest_regs[PEC_PCI_STK_PCI_FIR] &= val; - break; - case PEC_PCI_STK_PCI_FIR_SET: - stack->nest_regs[PEC_PCI_STK_PCI_FIR] |= val; - break; - case PEC_PCI_STK_PCI_FIR_MSK: - stack->nest_regs[reg] = val; - break; - case PEC_PCI_STK_PCI_FIR_MSKC: - stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] &= val; - break; - case PEC_PCI_STK_PCI_FIR_MSKS: - stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] |= val; - break; - case PEC_PCI_STK_PCI_FIR_ACT0: - case PEC_PCI_STK_PCI_FIR_ACT1: - stack->nest_regs[reg] = val; - break; - case PEC_PCI_STK_PCI_FIR_WOF: - stack->nest_regs[reg] = 0; - break; - case PEC_PCI_STK_ETU_RESET: - stack->nest_regs[reg] = val & 0x8000000000000000ull; - /* TODO: Implement reset */ - break; - case PEC_PCI_STK_PBAIB_ERR_REPORT: - break; - case PEC_PCI_STK_PBAIB_TX_CMD_CRED: - case PEC_PCI_STK_PBAIB_TX_DAT_CRED: - stack->nest_regs[reg] = val; - break; - default: - qemu_log_mask(LOG_UNIMP, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx - "=%"PRIx64"\n", addr, val); - } -} - -static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops = { - .read = pnv_pec_stk_pci_xscom_read, - .write = pnv_pec_stk_pci_xscom_write, - .valid.min_access_size = 8, - .valid.max_access_size = 8, - .impl.min_access_size = 8, - .impl.max_access_size = 8, - .endianness = DEVICE_BIG_ENDIAN, -}; - -static void pnv_pec_instance_init(Object *obj) -{ - PnvPhb4PecState *pec = PNV_PHB4_PEC(obj); - int i; - - for (i = 0; i < PHB4_PEC_MAX_STACKS; i++) { - object_initialize_child(obj, "stack[*]", &pec->stacks[i], - TYPE_PNV_PHB4_PEC_STACK); + if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { + return; } } static void pnv_pec_realize(DeviceState *dev, Error **errp) { PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); char name[64]; int i; - assert(pec->system_memory); - - /* Create stacks */ - for (i = 0; i < pec->num_stacks; i++) { - PnvPhb4PecStack *stack = &pec->stacks[i]; - Object *stk_obj = OBJECT(stack); - - object_property_set_int(stk_obj, "stack-no", i, &error_abort); - object_property_set_link(stk_obj, "pec", OBJECT(pec), &error_abort); - if (!qdev_realize(DEVICE(stk_obj), NULL, errp)) { - return; - } + if (pec->index >= PNV_CHIP_GET_CLASS(pec->chip)->num_pecs) { + error_setg(errp, "invalid PEC index: %d", pec->index); + return; } - for (; i < PHB4_PEC_MAX_STACKS; i++) { - object_unparent(OBJECT(&pec->stacks[i])); + + pec->num_phbs = pecc->num_phbs[pec->index]; + + /* Create PHBs if running with defaults */ + if (defaults_enabled()) { + for (i = 0; i < pec->num_phbs; i++) { + pnv_pec_default_phb_realize(pec, i, errp); + } } /* Initialize the XSCOM regions for the PEC registers */ @@ -440,9 +195,8 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, _FDT((fdt_setprop(fdt, offset, "compatible", pecc->compat, pecc->compat_size))); - for (i = 0; i < pec->num_stacks; i++) { - PnvPhb4PecStack *stack = &pec->stacks[i]; - PnvPHB4 *phb = &stack->phb; + for (i = 0; i < pec->num_phbs; i++) { + int phb_id = pnv_phb4_pec_get_phb_id(pec, i); int stk_offset; name = g_strdup_printf("stack@%x", i); @@ -452,19 +206,18 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, _FDT((fdt_setprop(fdt, stk_offset, "compatible", pecc->stk_compat, pecc->stk_compat_size))); _FDT((fdt_setprop_cell(fdt, stk_offset, "reg", i))); - _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb->phb_id))); + _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb_id))); } return 0; } static Property pnv_pec_properties[] = { - DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0), - DEFINE_PROP_UINT32("num-stacks", PnvPhb4PecState, num_stacks, 0), - DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0), - DEFINE_PROP_LINK("system-memory", PnvPhb4PecState, system_memory, - TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0), + DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0), + DEFINE_PROP_LINK("chip", PnvPhb4PecState, chip, TYPE_PNV_CHIP, + PnvChip *), + DEFINE_PROP_END_OF_LIST(), }; static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec) @@ -477,6 +230,13 @@ static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec) return PNV9_XSCOM_PEC_NEST_BASE + 0x400 * pec->index; } +/* + * PEC0 -> 1 phb + * PEC1 -> 2 phb + * PEC2 -> 3 phbs + */ +static const uint32_t pnv_pec_num_phbs[] = { 1, 2, 3 }; + static void pnv_pec_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -499,13 +259,15 @@ static void pnv_pec_class_init(ObjectClass *klass, void *data) pecc->compat_size = sizeof(compat); pecc->stk_compat = stk_compat; pecc->stk_compat_size = sizeof(stk_compat); + pecc->version = PNV_PHB4_VERSION; + pecc->phb_type = TYPE_PNV_PHB4; + pecc->num_phbs = pnv_pec_num_phbs; } static const TypeInfo pnv_pec_type_info = { .name = TYPE_PNV_PHB4_PEC, .parent = TYPE_DEVICE, .instance_size = sizeof(PnvPhb4PecState), - .instance_init = pnv_pec_instance_init, .class_init = pnv_pec_class_init, .class_size = sizeof(PnvPhb4PecClass), .interfaces = (InterfaceInfo[]) { @@ -514,70 +276,52 @@ static const TypeInfo pnv_pec_type_info = { } }; -static void pnv_pec_stk_instance_init(Object *obj) -{ - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(obj); +/* + * POWER10 definitions + */ - object_initialize_child(obj, "phb", &stack->phb, TYPE_PNV_PHB4); +static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec) +{ + return PNV10_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index; } -static void pnv_pec_stk_realize(DeviceState *dev, Error **errp) +static uint32_t pnv_phb5_pec_xscom_nest_base(PnvPhb4PecState *pec) { - PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(dev); - PnvPhb4PecState *pec = stack->pec; - char name[64]; - - assert(pec); - - /* Initialize the XSCOM regions for the stack registers */ - snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest-stack-%d", - pec->chip_id, pec->index, stack->stack_no); - pnv_xscom_region_init(&stack->nest_regs_mr, OBJECT(stack), - &pnv_pec_stk_nest_xscom_ops, stack, name, - PHB4_PEC_NEST_STK_REGS_COUNT); - - snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d", - pec->chip_id, pec->index, stack->stack_no); - pnv_xscom_region_init(&stack->pci_regs_mr, OBJECT(stack), - &pnv_pec_stk_pci_xscom_ops, stack, name, - PHB4_PEC_PCI_STK_REGS_COUNT); - - /* PHB pass-through */ - snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d-phb", - pec->chip_id, pec->index, stack->stack_no); - pnv_xscom_region_init(&stack->phb_regs_mr, OBJECT(&stack->phb), - &pnv_phb4_xscom_ops, &stack->phb, name, 0x40); - - /* - * Let the machine/chip realize the PHB object to customize more - * easily some fields - */ + /* index goes down ... */ + return PNV10_XSCOM_PEC_NEST_BASE - 0x1000000 * pec->index; } -static Property pnv_pec_stk_properties[] = { - DEFINE_PROP_UINT32("stack-no", PnvPhb4PecStack, stack_no, 0), - DEFINE_PROP_LINK("pec", PnvPhb4PecStack, pec, TYPE_PNV_PHB4_PEC, - PnvPhb4PecState *), - DEFINE_PROP_END_OF_LIST(), -}; +/* + * PEC0 -> 3 stacks + * PEC1 -> 3 stacks + */ +static const uint32_t pnv_phb5_pec_num_stacks[] = { 3, 3 }; -static void pnv_pec_stk_class_init(ObjectClass *klass, void *data) +static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data) { - DeviceClass *dc = DEVICE_CLASS(klass); + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass); + static const char compat[] = "ibm,power10-pbcq"; + static const char stk_compat[] = "ibm,power10-phb-stack"; - device_class_set_props(dc, pnv_pec_stk_properties); - dc->realize = pnv_pec_stk_realize; - dc->user_creatable = false; - - /* TODO: reset regs ? */ + pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base; + pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base; + pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE; + pecc->xscom_pci_size = PNV10_XSCOM_PEC_PCI_SIZE; + pecc->compat = compat; + pecc->compat_size = sizeof(compat); + pecc->stk_compat = stk_compat; + pecc->stk_compat_size = sizeof(stk_compat); + pecc->version = PNV_PHB5_VERSION; + pecc->phb_type = TYPE_PNV_PHB5; + pecc->num_phbs = pnv_phb5_pec_num_stacks; } -static const TypeInfo pnv_pec_stk_type_info = { - .name = TYPE_PNV_PHB4_PEC_STACK, - .parent = TYPE_DEVICE, - .instance_size = sizeof(PnvPhb4PecStack), - .instance_init = pnv_pec_stk_instance_init, - .class_init = pnv_pec_stk_class_init, +static const TypeInfo pnv_phb5_pec_type_info = { + .name = TYPE_PNV_PHB5_PEC, + .parent = TYPE_PNV_PHB4_PEC, + .instance_size = sizeof(PnvPhb4PecState), + .class_init = pnv_phb5_pec_class_init, + .class_size = sizeof(PnvPhb4PecClass), .interfaces = (InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } @@ -587,7 +331,7 @@ static const TypeInfo pnv_pec_stk_type_info = { static void pnv_pec_register_types(void) { type_register_static(&pnv_pec_type_info); - type_register_static(&pnv_pec_stk_type_info); + type_register_static(&pnv_phb5_pec_type_info); } type_init(pnv_pec_register_types); diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index ab5a47aff5..20da121374 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -379,7 +379,8 @@ static void mch_update_smram(MCHPCIState *mch) memory_region_set_enabled(&mch->high_smram, false); } - if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) { + if ((pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) && + (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME)) { switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) { case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB: diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 3be27f0a14..7a105e4a63 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/log.h" @@ -300,8 +299,8 @@ static void raven_pcihost_initfn(Object *obj) memory_region_add_subregion_overlap(address_space_mem, PCI_IO_BASE_ADDR, &s->pci_io_non_contiguous, 1); memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory); - pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, - &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, + &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); /* Bus master address space */ memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB); diff --git a/hw/pci-host/remote.c b/hw/pci-host/remote.c index eee45444ef..bfb25ef6af 100644 --- a/hw/pci-host/remote.c +++ b/hw/pci-host/remote.c @@ -22,7 +22,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/pci/pci.h" #include "hw/pci/pci_host.h" diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c index 08c1562e22..719d6ca2a6 100644 --- a/hw/pci-host/sh_pci.c +++ b/hw/pci-host/sh_pci.c @@ -49,13 +49,12 @@ struct SHPCIState { uint32_t iobr; }; -static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, - unsigned size) +static void sh_pci_reg_write(void *p, hwaddr addr, uint64_t val, unsigned size) { SHPCIState *pcic = p; PCIHostState *phb = PCI_HOST_BRIDGE(pcic); - switch(addr) { + switch (addr) { case 0 ... 0xfc: stl_le_p(pcic->dev->config + addr, val); break; @@ -75,13 +74,12 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, } } -static uint64_t sh_pci_reg_read (void *p, hwaddr addr, - unsigned size) +static uint64_t sh_pci_reg_read(void *p, hwaddr addr, unsigned size) { SHPCIState *pcic = p; PCIHostState *phb = PCI_HOST_BRIDGE(pcic); - switch(addr) { + switch (addr) { case 0 ... 0xfc: return ldl_le_p(pcic->dev->config + addr); case 0x1c0: diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events index 630e9fcc5e..437e66ff50 100644 --- a/hw/pci-host/trace-events +++ b/hw/pci-host/trace-events @@ -32,3 +32,10 @@ unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64 # pnv_phb4.c pnv_phb4_xive_notify(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64 +pnv_phb4_xive_notify_ic(uint64_t addr, uint64_t data) "addr=@0x%"PRIx64" data=0x%"PRIx64 +pnv_phb4_xive_notify_abt(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64 + +# dino.c +dino_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" +dino_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" +dino_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c index d25b62d6a5..aebd44d265 100644 --- a/hw/pci-host/uninorth.c +++ b/hw/pci-host/uninorth.c @@ -24,7 +24,6 @@ #include "qemu/osdep.h" #include "hw/irq.h" -#include "hw/ppc/mac.h" #include "hw/qdev-properties.h" #include "qemu/module.h" #include "hw/pci/pci.h" diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 3553277f94..f66384fa02 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -405,9 +405,9 @@ static void pci_vpb_realize(DeviceState *dev, Error **errp) memory_region_init(&s->pci_io_space, OBJECT(s), "pci_io", 4 * GiB); memory_region_init(&s->pci_mem_space, OBJECT(s), "pci_mem", 4 * GiB); - pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), dev, "pci", - &s->pci_mem_space, &s->pci_io_space, - PCI_DEVFN(11, 0), TYPE_PCI_BUS); + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), dev, "pci", + &s->pci_mem_space, &s->pci_io_space, + PCI_DEVFN(11, 0), TYPE_PCI_BUS); h->bus = &s->pci_bus; object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_VERSATILE_PCI_HOST); diff --git a/hw/pci/meson.build b/hw/pci/meson.build index 5c4bbac817..5aff7ed1c6 100644 --- a/hw/pci/meson.build +++ b/hw/pci/meson.build @@ -5,6 +5,7 @@ pci_ss.add(files( 'pci.c', 'pci_bridge.c', 'pci_host.c', + 'pcie_sriov.c', 'shpc.c', 'slotid_cap.c' )) @@ -12,6 +13,7 @@ pci_ss.add(files( # allow plugging PCIe devices into PCI buses, include them even if # CONFIG_PCI_EXPRESS=n. pci_ss.add(files('pcie.c', 'pcie_aer.c')) +pci_ss.add(files('pcie_doe.c')) softmmu_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_host.c')) softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 47d2b0f33c..058d1d1ef1 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -134,7 +134,7 @@ void msi_set_message(PCIDevice *dev, MSIMessage msg) pci_set_word(dev->config + msi_data_off(dev, msi64bit), msg.data); } -MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector) +static MSIMessage msi_prepare_message(PCIDevice *dev, unsigned int vector) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; @@ -159,6 +159,11 @@ MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector) return msg; } +MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector) +{ + return dev->msi_prepare_message(dev, vector); +} + bool msi_enabled(const PCIDevice *dev) { return msi_present(dev) && @@ -241,6 +246,8 @@ int msi_init(struct PCIDevice *dev, uint8_t offset, 0xffffffff >> (PCI_MSI_VECTORS_MAX - nr_vectors)); } + dev->msi_prepare_message = msi_prepare_message; + return 0; } @@ -256,6 +263,7 @@ void msi_uninit(struct PCIDevice *dev) cap_size = msi_cap_sizeof(flags); pci_del_capability(dev, PCI_CAP_ID_MSI, cap_size); dev->cap_present &= ~QEMU_PCI_CAP_MSI; + dev->msi_prepare_message = NULL; MSI_DEV_PRINTF(dev, "uninit\n"); } @@ -307,6 +315,39 @@ bool msi_is_masked(const PCIDevice *dev, unsigned int vector) return mask & (1U << vector); } +void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp) +{ + ERRP_GUARD(); + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + uint32_t irq_state, vector_mask, pending; + + if (vector >= PCI_MSI_VECTORS_MAX) { + error_setg(errp, "msi: vector %d not allocated. max vector is %d", + vector, (PCI_MSI_VECTORS_MAX - 1)); + return; + } + + vector_mask = (1U << vector); + + irq_state = pci_get_long(dev->config + msi_mask_off(dev, msi64bit)); + + if (mask) { + irq_state |= vector_mask; + } else { + irq_state &= ~vector_mask; + } + + pci_set_long(dev->config + msi_mask_off(dev, msi64bit), irq_state); + + pending = pci_get_long(dev->config + msi_pending_off(dev, msi64bit)); + if (!mask && (pending & vector_mask)) { + pending &= ~vector_mask; + pci_set_long(dev->config + msi_pending_off(dev, msi64bit), pending); + msi_notify(dev, vector); + } +} + void msi_notify(PCIDevice *dev, unsigned int vector) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); @@ -334,11 +375,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector) void msi_send_message(PCIDevice *dev, MSIMessage msg) { - MemTxAttrs attrs = {}; - - attrs.requester_id = pci_requester_id(dev); - address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, - attrs, NULL); + dev->msi_trigger(dev, msg); } /* Normally called by pci_default_write_config(). */ diff --git a/hw/pci/msix.c b/hw/pci/msix.c index ae9331cd0b..9e70fcd6fa 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -31,7 +31,7 @@ #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) -MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) +static MSIMessage msix_prepare_message(PCIDevice *dev, unsigned vector) { uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; MSIMessage msg; @@ -41,6 +41,11 @@ MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) return msg; } +MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) +{ + return dev->msix_prepare_message(dev, vector); +} + /* * Special API for POWER to configure the vectors through * a side channel. Should never be used by devices. @@ -131,6 +136,26 @@ static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) } } +void msix_set_mask(PCIDevice *dev, int vector, bool mask) +{ + unsigned offset; + bool was_masked; + + assert(vector < dev->msix_entries_nr); + + offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; + + was_masked = msix_is_masked(dev, vector); + + if (mask) { + dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; + } else { + dev->msix_table[offset] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + } + + msix_handle_mask_update(dev, vector, was_masked); +} + static bool msix_masked(PCIDevice *dev) { return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; @@ -344,6 +369,8 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries, "msix-pba", pba_size); memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); + dev->msix_prepare_message = msix_prepare_message; + return 0; } @@ -429,6 +456,7 @@ void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar) g_free(dev->msix_entry_used); dev->msix_entry_used = NULL; dev->cap_present &= ~QEMU_PCI_CAP_MSIX; + dev->msix_prepare_message = NULL; } void msix_uninit_exclusive_bar(PCIDevice *dev) @@ -489,7 +517,9 @@ void msix_notify(PCIDevice *dev, unsigned vector) { MSIMessage msg; - if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { + assert(vector < dev->msix_entries_nr); + + if (!dev->msix_entry_used[vector]) { return; } @@ -525,20 +555,17 @@ void msix_reset(PCIDevice *dev) * don't want to follow the spec suggestion can declare all vectors as used. */ /* Mark vector as used. */ -int msix_vector_use(PCIDevice *dev, unsigned vector) +void msix_vector_use(PCIDevice *dev, unsigned vector) { - if (vector >= dev->msix_entries_nr) { - return -EINVAL; - } - + assert(vector < dev->msix_entries_nr); dev->msix_entry_used[vector]++; - return 0; } /* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { - if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { + assert(vector < dev->msix_entries_nr); + if (!dev->msix_entry_used[vector]) { return; } if (--dev->msix_entry_used[vector]) { diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 1a95d569ab..e358a2fb4c 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "hw/irq.h" @@ -201,6 +200,11 @@ static const TypeInfo pci_bus_info = { .class_init = pci_bus_class_init, }; +static const TypeInfo cxl_interface_info = { + .name = INTERFACE_CXL_DEVICE, + .parent = TYPE_INTERFACE, +}; + static const TypeInfo pcie_interface_info = { .name = INTERFACE_PCIE_DEVICE, .parent = TYPE_INTERFACE, @@ -224,6 +228,12 @@ static const TypeInfo pcie_bus_info = { .class_init = pcie_bus_class_init, }; +static const TypeInfo cxl_bus_info = { + .name = TYPE_CXL_BUS, + .parent = TYPE_PCIE_BUS, + .class_init = pcie_bus_class_init, +}; + static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num); static void pci_update_mappings(PCIDevice *d); static void pci_irq_handler(void *opaque, int irq_num, int level); @@ -244,6 +254,9 @@ int pci_bar(PCIDevice *d, int reg) { uint8_t type; + /* PCIe virtual functions do not have their own BARs */ + assert(!pci_is_vf(d)); + if (reg != PCI_ROM_SLOT) return PCI_BASE_ADDRESS_0 + reg * 4; @@ -309,10 +322,39 @@ void pci_device_deassert_intx(PCIDevice *dev) } } -static void pci_do_device_reset(PCIDevice *dev) +static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) +{ + MemTxAttrs attrs = {}; + + attrs.requester_id = pci_requester_id(dev); + address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, + attrs, NULL); +} + +static void pci_reset_regions(PCIDevice *dev) { int r; + if (pci_is_vf(dev)) { + return; + } + for (r = 0; r < PCI_NUM_REGIONS; ++r) { + PCIIORegion *region = &dev->io_regions[r]; + if (!region->size) { + continue; + } + + if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && + region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_set_quad(dev->config + pci_bar(dev, r), region->type); + } else { + pci_set_long(dev->config + pci_bar(dev, r), region->type); + } + } +} + +static void pci_do_device_reset(PCIDevice *dev) +{ pci_device_deassert_intx(dev); assert(dev->irq_state == 0); @@ -328,19 +370,7 @@ static void pci_do_device_reset(PCIDevice *dev) pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); dev->config[PCI_CACHE_LINE_SIZE] = 0x0; - for (r = 0; r < PCI_NUM_REGIONS; ++r) { - PCIIORegion *region = &dev->io_regions[r]; - if (!region->size) { - continue; - } - - if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && - region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_set_quad(dev->config + pci_bar(dev, r), region->type); - } else { - pci_set_long(dev->config + pci_bar(dev, r), region->type); - } - } + pci_reset_regions(dev); pci_update_mappings(dev); msi_reset(dev); @@ -437,10 +467,10 @@ bool pci_bus_bypass_iommu(PCIBus *bus) return host_bridge->bypass_iommu; } -static void pci_root_bus_init(PCIBus *bus, DeviceState *parent, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min) +static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min) { assert(PCI_FUNC(devfn_min) == 0); bus->devfn_min = devfn_min; @@ -465,15 +495,15 @@ bool pci_bus_is_express(PCIBus *bus) return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); } -void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, - const char *name, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min, const char *typename) +void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, + const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename) { - qbus_create_inplace(bus, bus_size, typename, parent, name); - pci_root_bus_init(bus, parent, address_space_mem, address_space_io, - devfn_min); + qbus_init(bus, bus_size, typename, parent, name); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); } PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, @@ -483,9 +513,9 @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, { PCIBus *bus; - bus = PCI_BUS(qbus_create(typename, parent, name)); - pci_root_bus_init(bus, parent, address_space_mem, address_space_io, - devfn_min); + bus = PCI_BUS(qbus_new(typename, parent, name)); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); return bus; } @@ -889,6 +919,16 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; } + /* + * With SR/IOV and ARI, a device at function 0 need not be a multifunction + * device, as it may just be a VF that ended up with function 0 in + * the legacy PCI interpretation. Avoid failing in such cases: + */ + if (pci_is_vf(dev) && + dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + return; + } + /* * multifunction bit is interpreted in two ways as follows. * - all functions must set the bit to 1. @@ -1083,11 +1123,12 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, return NULL; } else if (!pci_bus_devfn_available(bus, devfn)) { error_setg(errp, "PCI: slot %d function %d not available for %s," - " in use by %s", + " in use by %s,id=%s", PCI_SLOT(devfn), PCI_FUNC(devfn), name, - bus->devices[devfn]->name); + bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); return NULL; } else if (dev->hotplugged && + !pci_is_vf(pci_dev) && pci_get_function_0(pci_dev)) { error_setg(errp, "PCI: slot %d function 0 already occupied by %s," " new func %s cannot be exposed to guest.", @@ -1185,6 +1226,8 @@ static void pci_qdev_unrealize(DeviceState *dev) pci_device_deassert_intx(pci_dev); do_pci_unregister_device(pci_dev); + + pci_dev->msi_trigger = NULL; } void pci_register_bar(PCIDevice *pci_dev, int region_num, @@ -1196,6 +1239,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, pcibus_t size = memory_region_size(memory); uint8_t hdr_type; + assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ assert(region_num >= 0); assert(region_num < PCI_NUM_REGIONS); assert(is_power_of_2(size)); @@ -1299,11 +1343,45 @@ pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) return pci_dev->io_regions[region_num].addr; } -static pcibus_t pci_bar_address(PCIDevice *d, - int reg, uint8_t type, pcibus_t size) +static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, + uint8_t type, pcibus_t size) +{ + pcibus_t new_addr; + if (!pci_is_vf(d)) { + int bar = pci_bar(d, reg); + if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + new_addr = pci_get_quad(d->config + bar); + } else { + new_addr = pci_get_long(d->config + bar); + } + } else { + PCIDevice *pf = d->exp.sriov_vf.pf; + uint16_t sriov_cap = pf->exp.sriov_cap; + int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; + uint16_t vf_offset = + pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); + uint16_t vf_stride = + pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); + uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; + + if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + new_addr = pci_get_quad(pf->config + bar); + } else { + new_addr = pci_get_long(pf->config + bar); + } + new_addr += vf_num * size; + } + /* The ROM slot has a specific enable bit, keep it intact */ + if (reg != PCI_ROM_SLOT) { + new_addr &= ~(size - 1); + } + return new_addr; +} + +pcibus_t pci_bar_address(PCIDevice *d, + int reg, uint8_t type, pcibus_t size) { pcibus_t new_addr, last_addr; - int bar = pci_bar(d, reg); uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); Object *machine = qdev_get_machine(); ObjectClass *oc = object_get_class(machine); @@ -1314,7 +1392,7 @@ static pcibus_t pci_bar_address(PCIDevice *d, if (!(cmd & PCI_COMMAND_IO)) { return PCI_BAR_UNMAPPED; } - new_addr = pci_get_long(d->config + bar) & ~(size - 1); + new_addr = pci_config_get_bar_addr(d, reg, type, size); last_addr = new_addr + size - 1; /* Check if 32 bit BAR wraps around explicitly. * TODO: make priorities correct and remove this work around. @@ -1329,11 +1407,7 @@ static pcibus_t pci_bar_address(PCIDevice *d, if (!(cmd & PCI_COMMAND_MEMORY)) { return PCI_BAR_UNMAPPED; } - if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { - new_addr = pci_get_quad(d->config + bar); - } else { - new_addr = pci_get_long(d->config + bar); - } + new_addr = pci_config_get_bar_addr(d, reg, type, size); /* the ROM slot has a specific enable bit */ if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { return PCI_BAR_UNMAPPED; @@ -1385,6 +1459,9 @@ static void pci_update_mappings(PCIDevice *d) continue; new_addr = pci_bar_address(d, i, r->type, r->size); + if (!d->has_power) { + new_addr = PCI_BAR_UNMAPPED; + } /* This bar isn't changed */ if (new_addr == r->addr) @@ -1392,7 +1469,7 @@ static void pci_update_mappings(PCIDevice *d) /* now do the real mapping */ if (r->addr != PCI_BAR_UNMAPPED) { - trace_pci_update_mappings_del(d, pci_dev_bus_num(d), + trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), i, r->addr, r->size); @@ -1400,7 +1477,7 @@ static void pci_update_mappings(PCIDevice *d) } r->addr = new_addr; if (r->addr != PCI_BAR_UNMAPPED) { - trace_pci_update_mappings_add(d, pci_dev_bus_num(d), + trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), i, r->addr, r->size); @@ -1469,12 +1546,13 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int if (range_covers_byte(addr, l, PCI_COMMAND)) { pci_update_irq_disabled(d, was_irq_disabled); memory_region_set_enabled(&d->bus_master_enable_region, - pci_get_word(d->config + PCI_COMMAND) - & PCI_COMMAND_MASTER); + (pci_get_word(d->config + PCI_COMMAND) + & PCI_COMMAND_MASTER) && d->has_power); } msi_write_config(d, addr, val_in, l); msix_write_config(d, addr, val_in, l); + pcie_sriov_config_write(d, addr, val_in, l); } /***********************************************************/ @@ -1499,11 +1577,6 @@ static void pci_irq_handler(void *opaque, int irq_num, int level) pci_change_irq_level(pci_dev, irq_num, change); } -static inline int pci_intx(PCIDevice *pci_dev) -{ - return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; -} - qemu_irq pci_allocate_irq(PCIDevice *pci_dev) { int intx = pci_intx(pci_dev); @@ -1650,7 +1723,7 @@ static const pci_class_desc pci_class_descriptions[] = { 0x0902, "Mouse", "mouse"}, { 0x0A00, "Dock station", "dock", 0x00ff}, { 0x0B00, "i386 cpu", "cpu", 0x00ff}, - { 0x0c00, "Fireware contorller", "fireware"}, + { 0x0c00, "Firewire controller", "firewire"}, { 0x0c01, "Access bus controller", "access-bus"}, { 0x0c02, "SSA controller", "ssa"}, { 0x0c03, "USB controller", "usb"}, @@ -1659,11 +1732,9 @@ static const pci_class_desc pci_class_descriptions[] = { 0, NULL} }; -static void pci_for_each_device_under_bus_reverse(PCIBus *bus, - void (*fn)(PCIBus *b, - PCIDevice *d, - void *opaque), - void *opaque) +void pci_for_each_device_under_bus_reverse(PCIBus *bus, + pci_bus_dev_fn fn, + void *opaque) { PCIDevice *d; int devfn; @@ -1677,8 +1748,7 @@ static void pci_for_each_device_under_bus_reverse(PCIBus *bus, } void pci_for_each_device_reverse(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *b, PCIDevice *d, void *opaque), - void *opaque) + pci_bus_dev_fn fn, void *opaque) { bus = pci_find_bus_nr(bus, bus_num); @@ -1687,10 +1757,8 @@ void pci_for_each_device_reverse(PCIBus *bus, int bus_num, } } -static void pci_for_each_device_under_bus(PCIBus *bus, - void (*fn)(PCIBus *b, PCIDevice *d, - void *opaque), - void *opaque) +void pci_for_each_device_under_bus(PCIBus *bus, + pci_bus_dev_fn fn, void *opaque) { PCIDevice *d; int devfn; @@ -1704,8 +1772,7 @@ static void pci_for_each_device_under_bus(PCIBus *bus, } void pci_for_each_device(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *b, PCIDevice *d, void *opaque), - void *opaque) + pci_bus_dev_fn fn, void *opaque) { bus = pci_find_bus_nr(bus, bus_num); @@ -1997,6 +2064,7 @@ PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus, PCIDevice *pci_vga_init(PCIBus *bus) { + vga_interface_created = true; switch (vga_interface_type) { case VGA_CIRRUS: return pci_create_simple(bus, -1, "cirrus-vga"); @@ -2083,10 +2151,8 @@ static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) return NULL; } -void pci_for_each_bus_depth_first(PCIBus *bus, - void *(*begin)(PCIBus *bus, void *parent_state), - void (*end)(PCIBus *bus, void *state), - void *parent_state) +void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, + pci_bus_fn end, void *parent_state) { PCIBus *sec; void *state; @@ -2143,6 +2209,10 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp) pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; } + if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { + pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; + } + pci_dev = do_pci_register_device(pci_dev, object_get_typename(OBJECT(qdev)), pci_dev->devfn, errp); @@ -2195,6 +2265,10 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp) pci_qdev_unrealize(DEVICE(pci_dev)); return; } + + pci_set_power(pci_dev, true); + + pci_dev->msi_trigger = pci_msi_trigger; } PCIDevice *pci_new_multifunction(int devfn, bool multifunction, @@ -2584,15 +2658,15 @@ static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) static char *pcibus_get_fw_dev_path(DeviceState *dev) { PCIDevice *d = (PCIDevice *)dev; - char path[50], name[33]; - int off; + char name[33]; + int has_func = !!PCI_FUNC(d->devfn); - off = snprintf(path, sizeof(path), "%s@%x", - pci_dev_fw_name(dev, name, sizeof name), - PCI_SLOT(d->devfn)); - if (PCI_FUNC(d->devfn)) - snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn)); - return g_strdup(path); + return g_strdup_printf("%s@%x%s%.*x", + pci_dev_fw_name(dev, name, sizeof(name)), + PCI_SLOT(d->devfn), + has_func ? "," : "", + has_func, + PCI_FUNC(d->devfn)); } static char *pcibus_get_dev_path(DeviceState *dev) @@ -2706,7 +2780,9 @@ static void pci_device_class_base_init(ObjectClass *klass, void *data) object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); ObjectClass *pcie = object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); - assert(conventional || pcie); + ObjectClass *cxl = + object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); + assert(conventional || pcie || cxl); } } @@ -2866,6 +2942,22 @@ MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) return msg; } +void pci_set_power(PCIDevice *d, bool state) +{ + if (d->has_power == state) { + return; + } + + d->has_power = state; + pci_update_mappings(d); + memory_region_set_enabled(&d->bus_master_enable_region, + (pci_get_word(d->config + PCI_COMMAND) + & PCI_COMMAND_MASTER) && d->has_power); + if (!d->has_power) { + pci_device_reset(d); + } +} + static const TypeInfo pci_device_type_info = { .name = TYPE_PCI_DEVICE, .parent = TYPE_DEVICE, @@ -2880,7 +2972,9 @@ static void pci_register_types(void) { type_register_static(&pci_bus_info); type_register_static(&pcie_bus_info); + type_register_static(&cxl_bus_info); type_register_static(&conventional_pci_interface_info); + type_register_static(&cxl_interface_info); type_register_static(&pcie_interface_info); type_register_static(&pci_device_type_info); } diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index 3789c17edc..da34c8ebcd 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -374,8 +374,8 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) br->bus_name = dev->qdev.id; } - qbus_create_inplace(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), - br->bus_name); + qbus_init(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), + br->bus_name); sec_bus->parent_dev = dev; sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; sec_bus->address_space_mem = &br->address_space_mem; @@ -448,11 +448,11 @@ int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, PCIBridgeQemuCap cap = { .len = cap_len, .type = REDHAT_PCI_CAP_RESOURCE_RESERVE, - .bus_res = res_reserve.bus, - .io = res_reserve.io, - .mem = res_reserve.mem_non_pref, - .mem_pref_32 = res_reserve.mem_pref_32, - .mem_pref_64 = res_reserve.mem_pref_64 + .bus_res = cpu_to_le32(res_reserve.bus), + .io = cpu_to_le64(res_reserve.io), + .mem = cpu_to_le32(res_reserve.mem_non_pref), + .mem_pref_32 = cpu_to_le32(res_reserve.mem_pref_32), + .mem_pref_64 = cpu_to_le64(res_reserve.mem_pref_64) }; int offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index cf02f0d6a5..eaf217ff55 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -74,11 +74,13 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr, /* non-zero functions are only exposed when function 0 is present, * allowing direct removal of unexposed functions. */ - if (pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) { + if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || + !pci_dev->has_power) { return; } - trace_pci_cfg_write(pci_dev->name, PCI_SLOT(pci_dev->devfn), + trace_pci_cfg_write(pci_dev->name, pci_dev_bus_num(pci_dev), + PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), addr, val); pci_dev->config_write(pci_dev, addr, val, MIN(len, limit - addr)); } @@ -97,12 +99,14 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr, /* non-zero functions are only exposed when function 0 is present, * allowing direct removal of unexposed functions. */ - if (pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) { + if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || + !pci_dev->has_power) { return ~0x0; } ret = pci_dev->config_read(pci_dev, addr, MIN(len, limit - addr)); - trace_pci_cfg_read(pci_dev->name, PCI_SLOT(pci_dev->devfn), + trace_pci_cfg_read(pci_dev->name, pci_dev_bus_num(pci_dev), + PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), addr, ret); return ret; diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 6e95d82903..68a62da0b5 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -353,7 +353,7 @@ static void hotplug_event_notify(PCIDevice *dev) msix_notify(dev, pcie_cap_flags_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_cap_flags_get_vector(dev)); - } else { + } else if (pci_intx(dev) != -1) { pci_set_irq(dev, dev->exp.hpev_notified); } } @@ -361,11 +361,46 @@ static void hotplug_event_notify(PCIDevice *dev) static void hotplug_event_clear(PCIDevice *dev) { hotplug_event_update_event_status(dev); - if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { + if (!msix_enabled(dev) && !msi_enabled(dev) && pci_intx(dev) != -1 && + !dev->exp.hpev_notified) { pci_irq_deassert(dev); } } +void pcie_cap_slot_enable_power(PCIDevice *dev) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); + + if (sltcap & PCI_EXP_SLTCAP_PCP) { + pci_set_word_by_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC, PCI_EXP_SLTCTL_PWR_ON); + } +} + +static void pcie_set_power_device(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + bool *power = opaque; + + pci_set_power(dev, *power); +} + +static void pcie_cap_update_power(PCIDevice *hotplug_dev) +{ + uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap; + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(hotplug_dev)); + uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); + uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + bool power = true; + + if (sltcap & PCI_EXP_SLTCAP_PCP) { + power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON; + } + + pci_for_each_device(sec_bus, pci_bus_num(sec_bus), + pcie_set_power_device, &power); +} + /* * A PCI Express Hot-Plug Event has occurred, so update slot status register * and notify OS of the event if necessary. @@ -423,6 +458,11 @@ void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, PCIDevice *pci_dev = PCI_DEVICE(dev); uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP); + if (pci_is_vf(pci_dev)) { + /* Virtual function cannot be physically disconnected */ + return; + } + /* Don't send event when device is enabled during qemu machine creation: * it is present on boot, no hotplug event is necessary. We do send an * event when the device is disabled later. */ @@ -434,6 +474,7 @@ void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_DLLLA); } + pcie_cap_update_power(hotplug_pdev); return; } @@ -451,6 +492,7 @@ void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, } pcie_cap_slot_event(hotplug_pdev, PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP); + pcie_cap_update_power(hotplug_pdev); } } @@ -472,6 +514,25 @@ static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque) object_unparent(OBJECT(dev)); } +static void pcie_cap_slot_do_unplug(PCIDevice *dev) +{ + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP); + + pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL); + + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDS); + if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || + (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) { + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_DLLLA); + } + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDC); +} + void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -481,6 +542,7 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev); uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap; uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP); + uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); /* Check if hot-unplug is disabled on the slot */ if ((sltcap & PCI_EXP_SLTCAP_HPC) == 0) { @@ -496,7 +558,15 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, return; } + if ((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_BLINK) { + error_setg(errp, "Hot-unplug failed: " + "guest is busy (power indicator blinking)"); + return; + } + dev->pending_deleted_event = true; + dev->pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 5000; /* 5 secs */ /* In case user cancel the operation of multi-function hot-add, * remove the function that is unexposed to guest individually, @@ -509,6 +579,16 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, return; } + if (((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF) && + ((sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF)) { + /* slot is powered off -> unplug without round-trip to the guest */ + pcie_cap_slot_do_unplug(hotplug_pdev); + hotplug_event_notify(hotplug_pdev); + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_ABP); + return; + } + pcie_cap_slot_push_attention_button(hotplug_pdev); } @@ -625,6 +705,7 @@ void pcie_cap_slot_reset(PCIDevice *dev) PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_ABP); + pcie_cap_update_power(dev); hotplug_event_update_event_status(dev); } @@ -643,7 +724,6 @@ void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t pos = dev->exp.exp_cap; uint8_t *exp_cap = dev->config + pos; uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); - uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP); if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { /* @@ -693,20 +773,9 @@ void pcie_cap_slot_write_config(PCIDevice *dev, (val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF && (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { - PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - pcie_unplug_device, NULL); - - pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, - PCI_EXP_SLTSTA_PDS); - if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || - (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) { - pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, - PCI_EXP_LNKSTA_DLLLA); - } - pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, - PCI_EXP_SLTSTA_PDC); + pcie_cap_slot_do_unplug(dev); } + pcie_cap_update_power(dev); hotplug_event_notify(dev); @@ -733,6 +802,7 @@ int pcie_cap_slot_post_load(void *opaque, int version_id) { PCIDevice *dev = opaque; hotplug_event_update_event_status(dev); + pcie_cap_update_power(dev); return 0; } @@ -876,8 +946,8 @@ void pcie_add_capability(PCIDevice *dev, uint16_t offset, uint16_t size) { assert(offset >= PCI_CONFIG_SPACE_SIZE); - assert(offset < offset + size); - assert(offset + size <= PCIE_CONFIG_SPACE_SIZE); + assert(offset < (uint16_t)(offset + size)); + assert((uint16_t)(offset + size) <= PCIE_CONFIG_SPACE_SIZE); assert(size >= 8); assert(pci_is_express(dev)); diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 27f9cc56af..eff62f3945 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -290,7 +290,7 @@ static void pcie_aer_root_notify(PCIDevice *dev) msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); - } else { + } else if (pci_intx(dev) != -1) { pci_irq_assert(dev); } } @@ -323,7 +323,7 @@ static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) */ } - /* Errro Message Received: Root Error Status register */ + /* Error Message Received: Root Error Status register */ switch (msg->severity) { case PCI_ERR_ROOT_CMD_COR_EN: if (root_status & PCI_ERR_ROOT_COR_RCV) { @@ -774,7 +774,9 @@ void pcie_aer_root_write_config(PCIDevice *dev, uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); /* 6.2.4.1.2 Interrupt Generation */ if (!msix_enabled(dev) && !msi_enabled(dev)) { - pci_set_irq(dev, !!(root_cmd & enabled_cmd)); + if (pci_intx(dev) != -1) { + pci_set_irq(dev, !!(root_cmd & enabled_cmd)); + } return; } diff --git a/hw/pci/pcie_doe.c b/hw/pci/pcie_doe.c new file mode 100644 index 0000000000..2210f86968 --- /dev/null +++ b/hw/pci/pcie_doe.c @@ -0,0 +1,367 @@ +/* + * PCIe Data Object Exchange + * + * Copyright (C) 2021 Avery Design Systems, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/range.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_doe.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" + +#define DWORD_BYTE 4 + +typedef struct DoeDiscoveryReq { + DOEHeader header; + uint8_t index; + uint8_t reserved[3]; +} QEMU_PACKED DoeDiscoveryReq; + +typedef struct DoeDiscoveryRsp { + DOEHeader header; + uint16_t vendor_id; + uint8_t data_obj_type; + uint8_t next_index; +} QEMU_PACKED DoeDiscoveryRsp; + +static bool pcie_doe_discovery(DOECap *doe_cap) +{ + DoeDiscoveryReq *req = pcie_doe_get_write_mbox_ptr(doe_cap); + DoeDiscoveryRsp rsp; + uint8_t index = req->index; + DOEProtocol *prot; + + /* Discard request if length does not match DoeDiscoveryReq */ + if (pcie_doe_get_obj_len(req) < + DIV_ROUND_UP(sizeof(DoeDiscoveryReq), DWORD_BYTE)) { + return false; + } + + rsp.header = (DOEHeader) { + .vendor_id = PCI_VENDOR_ID_PCI_SIG, + .data_obj_type = PCI_SIG_DOE_DISCOVERY, + .length = DIV_ROUND_UP(sizeof(DoeDiscoveryRsp), DWORD_BYTE), + }; + + /* Point to the requested protocol, index 0 must be Discovery */ + if (index == 0) { + rsp.vendor_id = PCI_VENDOR_ID_PCI_SIG; + rsp.data_obj_type = PCI_SIG_DOE_DISCOVERY; + } else { + if (index < doe_cap->protocol_num) { + prot = &doe_cap->protocols[index - 1]; + rsp.vendor_id = prot->vendor_id; + rsp.data_obj_type = prot->data_obj_type; + } else { + rsp.vendor_id = 0xFFFF; + rsp.data_obj_type = 0xFF; + } + } + + if (index + 1 == doe_cap->protocol_num) { + rsp.next_index = 0; + } else { + rsp.next_index = index + 1; + } + + pcie_doe_set_rsp(doe_cap, &rsp); + + return true; +} + +static void pcie_doe_reset_mbox(DOECap *st) +{ + st->read_mbox_idx = 0; + st->read_mbox_len = 0; + st->write_mbox_len = 0; + + memset(st->read_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE); + memset(st->write_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE); +} + +void pcie_doe_init(PCIDevice *dev, DOECap *doe_cap, uint16_t offset, + DOEProtocol *protocols, bool intr, uint16_t vec) +{ + pcie_add_capability(dev, PCI_EXT_CAP_ID_DOE, 0x1, offset, + PCI_DOE_SIZEOF); + + doe_cap->pdev = dev; + doe_cap->offset = offset; + + if (intr && (msi_present(dev) || msix_present(dev))) { + doe_cap->cap.intr = intr; + doe_cap->cap.vec = vec; + } + + doe_cap->write_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE); + doe_cap->read_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE); + + pcie_doe_reset_mbox(doe_cap); + + doe_cap->protocols = protocols; + for (; protocols->vendor_id; protocols++) { + doe_cap->protocol_num++; + } + assert(doe_cap->protocol_num < PCI_DOE_PROTOCOL_NUM_MAX); + + /* Increment to allow for the discovery protocol */ + doe_cap->protocol_num++; +} + +void pcie_doe_fini(DOECap *doe_cap) +{ + g_free(doe_cap->read_mbox); + g_free(doe_cap->write_mbox); + g_free(doe_cap); +} + +uint32_t pcie_doe_build_protocol(DOEProtocol *p) +{ + return DATA_OBJ_BUILD_HEADER1(p->vendor_id, p->data_obj_type); +} + +void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap) +{ + return doe_cap->write_mbox; +} + +/* + * Copy the response to read mailbox buffer + * This might be called in self-defined handle_request() if a DOE response is + * required in the corresponding protocol + */ +void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp) +{ + uint32_t len = pcie_doe_get_obj_len(rsp); + + memcpy(doe_cap->read_mbox + doe_cap->read_mbox_len, rsp, len * DWORD_BYTE); + doe_cap->read_mbox_len += len; +} + +uint32_t pcie_doe_get_obj_len(void *obj) +{ + uint32_t len; + + if (!obj) { + return 0; + } + + /* Only lower 18 bits are valid */ + len = DATA_OBJ_LEN_MASK(((DOEHeader *)obj)->length); + + /* PCIe r6.0 Table 6.29: a value of 00000h indicates 2^18 DW */ + return (len) ? len : PCI_DOE_DW_SIZE_MAX; +} + +static void pcie_doe_irq_assert(DOECap *doe_cap) +{ + PCIDevice *dev = doe_cap->pdev; + + if (doe_cap->cap.intr && doe_cap->ctrl.intr) { + if (doe_cap->status.intr) { + return; + } + doe_cap->status.intr = 1; + + if (msix_enabled(dev)) { + msix_notify(dev, doe_cap->cap.vec); + } else if (msi_enabled(dev)) { + msi_notify(dev, doe_cap->cap.vec); + } + } +} + +static void pcie_doe_set_ready(DOECap *doe_cap, bool rdy) +{ + doe_cap->status.ready = rdy; + + if (rdy) { + pcie_doe_irq_assert(doe_cap); + } +} + +static void pcie_doe_set_error(DOECap *doe_cap, bool err) +{ + doe_cap->status.error = err; + + if (err) { + pcie_doe_irq_assert(doe_cap); + } +} + +/* + * Check incoming request in write_mbox for protocol format + */ +static void pcie_doe_prepare_rsp(DOECap *doe_cap) +{ + bool success = false; + int p; + bool (*handle_request)(DOECap *) = NULL; + + if (doe_cap->status.error) { + return; + } + + if (doe_cap->write_mbox[0] == + DATA_OBJ_BUILD_HEADER1(PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_DISCOVERY)) { + handle_request = pcie_doe_discovery; + } else { + for (p = 0; p < doe_cap->protocol_num - 1; p++) { + if (doe_cap->write_mbox[0] == + pcie_doe_build_protocol(&doe_cap->protocols[p])) { + handle_request = doe_cap->protocols[p].handle_request; + break; + } + } + } + + /* + * PCIe r6 DOE 6.30.1: + * If the number of DW transferred does not match the + * indicated Length for a data object, then the + * data object must be silently discarded. + */ + if (handle_request && (doe_cap->write_mbox_len == + pcie_doe_get_obj_len(pcie_doe_get_write_mbox_ptr(doe_cap)))) { + success = handle_request(doe_cap); + } + + if (success) { + pcie_doe_set_ready(doe_cap, 1); + } else { + pcie_doe_reset_mbox(doe_cap); + } +} + +/* + * Read from DOE config space. + * Return false if the address not within DOE_CAP range. + */ +bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size, + uint32_t *buf) +{ + uint32_t shift; + uint16_t doe_offset = doe_cap->offset; + + if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP, + PCI_DOE_SIZEOF - 4, addr)) { + return false; + } + + addr -= doe_offset; + *buf = 0; + + if (range_covers_byte(PCI_EXP_DOE_CAP, DWORD_BYTE, addr)) { + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, INTR_SUPP, + doe_cap->cap.intr); + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM, + doe_cap->cap.vec); + } else if (range_covers_byte(PCI_EXP_DOE_CTRL, DWORD_BYTE, addr)) { + /* Must return ABORT=0 and GO=0 */ + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_CONTROL, DOE_INTR_EN, + doe_cap->ctrl.intr); + } else if (range_covers_byte(PCI_EXP_DOE_STATUS, DWORD_BYTE, addr)) { + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_BUSY, + doe_cap->status.busy); + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS, + doe_cap->status.intr); + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_ERROR, + doe_cap->status.error); + *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DATA_OBJ_RDY, + doe_cap->status.ready); + /* Mailbox should be DW accessed */ + } else if (addr == PCI_EXP_DOE_RD_DATA_MBOX && size == DWORD_BYTE) { + if (doe_cap->status.ready && !doe_cap->status.error) { + *buf = doe_cap->read_mbox[doe_cap->read_mbox_idx]; + } + } + + /* Process Alignment */ + shift = addr % DWORD_BYTE; + *buf = extract32(*buf, shift * 8, size * 8); + + return true; +} + +/* + * Write to DOE config space. + * Return if the address not within DOE_CAP range or receives an abort + */ +void pcie_doe_write_config(DOECap *doe_cap, + uint32_t addr, uint32_t val, int size) +{ + uint16_t doe_offset = doe_cap->offset; + uint32_t shift; + + if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP, + PCI_DOE_SIZEOF - 4, addr)) { + return; + } + + /* Process Alignment */ + shift = addr % DWORD_BYTE; + addr -= (doe_offset + shift); + val = deposit32(val, shift * 8, size * 8, val); + + switch (addr) { + case PCI_EXP_DOE_CTRL: + if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_ABORT)) { + pcie_doe_set_ready(doe_cap, 0); + pcie_doe_set_error(doe_cap, 0); + pcie_doe_reset_mbox(doe_cap); + return; + } + + if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_GO)) { + pcie_doe_prepare_rsp(doe_cap); + } + + if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_INTR_EN)) { + doe_cap->ctrl.intr = 1; + /* Clear interrupt bit located within the first byte */ + } else if (shift == 0) { + doe_cap->ctrl.intr = 0; + } + break; + case PCI_EXP_DOE_STATUS: + if (FIELD_EX32(val, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS)) { + doe_cap->status.intr = 0; + } + break; + case PCI_EXP_DOE_RD_DATA_MBOX: + /* Mailbox should be DW accessed */ + if (size != DWORD_BYTE) { + return; + } + doe_cap->read_mbox_idx++; + if (doe_cap->read_mbox_idx == doe_cap->read_mbox_len) { + pcie_doe_reset_mbox(doe_cap); + pcie_doe_set_ready(doe_cap, 0); + } else if (doe_cap->read_mbox_idx > doe_cap->read_mbox_len) { + /* Underflow */ + pcie_doe_set_error(doe_cap, 1); + } + break; + case PCI_EXP_DOE_WR_DATA_MBOX: + /* Mailbox should be DW accessed */ + if (size != DWORD_BYTE) { + return; + } + doe_cap->write_mbox[doe_cap->write_mbox_len] = val; + doe_cap->write_mbox_len++; + break; + case PCI_EXP_DOE_CAP: + /* fallthrough */ + default: + break; + } +} diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c index da850e8dde..687e4e763a 100644 --- a/hw/pci/pcie_port.c +++ b/hw/pci/pcie_port.c @@ -136,6 +136,31 @@ static void pcie_port_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, pcie_port_props); } +PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn) +{ + int devfn; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + PCIDevice *d = bus->devices[devfn]; + PCIEPort *port; + + if (!d || !pci_is_express(d) || !d->exp.exp_cap) { + continue; + } + + if (!object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) { + continue; + } + + port = PCIE_PORT(d); + if (port->port == pn) { + return d; + } + } + + return NULL; +} + static const TypeInfo pcie_port_type_info = { .name = TYPE_PCIE_PORT, .parent = TYPE_PCI_BRIDGE, @@ -148,7 +173,7 @@ static Property pcie_slot_props[] = { DEFINE_PROP_UINT8("chassis", PCIESlot, chassis, 0), DEFINE_PROP_UINT16("slot", PCIESlot, slot, 0), DEFINE_PROP_BOOL("hotplug", PCIESlot, hotplug, true), - DEFINE_PROP_BOOL("native-hotplug", PCIESlot, native_hotplug, true), + DEFINE_PROP_BOOL("x-native-hotplug", PCIESlot, native_hotplug, true), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c new file mode 100644 index 0000000000..8e3faf1f59 --- /dev/null +++ b/hw/pci/pcie_sriov.c @@ -0,0 +1,302 @@ +/* + * pcie_sriov.c: + * + * Implementation of SR/IOV emulation support. + * + * Copyright (c) 2015-2017 Knut Omang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "qemu/range.h" +#include "qapi/error.h" +#include "trace.h" + +static PCIDevice *register_vf(PCIDevice *pf, int devfn, + const char *name, uint16_t vf_num); +static void unregister_vfs(PCIDevice *dev); + +void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, + const char *vfname, uint16_t vf_dev_id, + uint16_t init_vfs, uint16_t total_vfs, + uint16_t vf_offset, uint16_t vf_stride) +{ + uint8_t *cfg = dev->config + offset; + uint8_t *wmask; + + pcie_add_capability(dev, PCI_EXT_CAP_ID_SRIOV, 1, + offset, PCI_EXT_CAP_SRIOV_SIZEOF); + dev->exp.sriov_cap = offset; + dev->exp.sriov_pf.num_vfs = 0; + dev->exp.sriov_pf.vfname = g_strdup(vfname); + dev->exp.sriov_pf.vf = NULL; + + pci_set_word(cfg + PCI_SRIOV_VF_OFFSET, vf_offset); + pci_set_word(cfg + PCI_SRIOV_VF_STRIDE, vf_stride); + + /* + * Mandatory page sizes to support. + * Device implementations can call pcie_sriov_pf_add_sup_pgsize() + * to set more bits: + */ + pci_set_word(cfg + PCI_SRIOV_SUP_PGSIZE, SRIOV_SUP_PGSIZE_MINREQ); + + /* + * Default is to use 4K pages, software can modify it + * to any of the supported bits + */ + pci_set_word(cfg + PCI_SRIOV_SYS_PGSIZE, 0x1); + + /* Set up device ID and initial/total number of VFs available */ + pci_set_word(cfg + PCI_SRIOV_VF_DID, vf_dev_id); + pci_set_word(cfg + PCI_SRIOV_INITIAL_VF, init_vfs); + pci_set_word(cfg + PCI_SRIOV_TOTAL_VF, total_vfs); + pci_set_word(cfg + PCI_SRIOV_NUM_VF, 0); + + /* Write enable control bits */ + wmask = dev->wmask + offset; + pci_set_word(wmask + PCI_SRIOV_CTRL, + PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI); + pci_set_word(wmask + PCI_SRIOV_NUM_VF, 0xffff); + pci_set_word(wmask + PCI_SRIOV_SYS_PGSIZE, 0x553); + + qdev_prop_set_bit(&dev->qdev, "multifunction", true); +} + +void pcie_sriov_pf_exit(PCIDevice *dev) +{ + unregister_vfs(dev); + g_free((char *)dev->exp.sriov_pf.vfname); + dev->exp.sriov_pf.vfname = NULL; +} + +void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, + uint8_t type, dma_addr_t size) +{ + uint32_t addr; + uint64_t wmask; + uint16_t sriov_cap = dev->exp.sriov_cap; + + assert(sriov_cap > 0); + assert(region_num >= 0); + assert(region_num < PCI_NUM_REGIONS); + assert(region_num != PCI_ROM_SLOT); + + wmask = ~(size - 1); + addr = sriov_cap + PCI_SRIOV_BAR + region_num * 4; + + pci_set_long(dev->config + addr, type); + if (!(type & PCI_BASE_ADDRESS_SPACE_IO) && + type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_set_quad(dev->wmask + addr, wmask); + pci_set_quad(dev->cmask + addr, ~0ULL); + } else { + pci_set_long(dev->wmask + addr, wmask & 0xffffffff); + pci_set_long(dev->cmask + addr, 0xffffffff); + } + dev->exp.sriov_pf.vf_bar_type[region_num] = type; +} + +void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, + MemoryRegion *memory) +{ + PCIIORegion *r; + PCIBus *bus = pci_get_bus(dev); + uint8_t type; + pcibus_t size = memory_region_size(memory); + + assert(pci_is_vf(dev)); /* PFs must use pci_register_bar */ + assert(region_num >= 0); + assert(region_num < PCI_NUM_REGIONS); + type = dev->exp.sriov_vf.pf->exp.sriov_pf.vf_bar_type[region_num]; + + if (!is_power_of_2(size)) { + error_report("%s: PCI region size must be a power" + " of two - type=0x%x, size=0x%"FMT_PCIBUS, + __func__, type, size); + exit(1); + } + + r = &dev->io_regions[region_num]; + r->memory = memory; + r->address_space = + type & PCI_BASE_ADDRESS_SPACE_IO + ? bus->address_space_io + : bus->address_space_mem; + r->size = size; + r->type = type; + + r->addr = pci_bar_address(dev, region_num, r->type, r->size); + if (r->addr != PCI_BAR_UNMAPPED) { + memory_region_add_subregion_overlap(r->address_space, + r->addr, r->memory, 1); + } +} + +static PCIDevice *register_vf(PCIDevice *pf, int devfn, const char *name, + uint16_t vf_num) +{ + PCIDevice *dev = pci_new(devfn, name); + dev->exp.sriov_vf.pf = pf; + dev->exp.sriov_vf.vf_number = vf_num; + PCIBus *bus = pci_get_bus(pf); + Error *local_err = NULL; + + qdev_realize(&dev->qdev, &bus->qbus, &local_err); + if (local_err) { + error_report_err(local_err); + return NULL; + } + + /* set vid/did according to sr/iov spec - they are not used */ + pci_config_set_vendor_id(dev->config, 0xffff); + pci_config_set_device_id(dev->config, 0xffff); + + return dev; +} + +static void register_vfs(PCIDevice *dev) +{ + uint16_t num_vfs; + uint16_t i; + uint16_t sriov_cap = dev->exp.sriov_cap; + uint16_t vf_offset = + pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_OFFSET); + uint16_t vf_stride = + pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_STRIDE); + int32_t devfn = dev->devfn + vf_offset; + + assert(sriov_cap > 0); + num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); + + dev->exp.sriov_pf.vf = g_new(PCIDevice *, num_vfs); + assert(dev->exp.sriov_pf.vf); + + trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), num_vfs); + for (i = 0; i < num_vfs; i++) { + dev->exp.sriov_pf.vf[i] = register_vf(dev, devfn, + dev->exp.sriov_pf.vfname, i); + if (!dev->exp.sriov_pf.vf[i]) { + num_vfs = i; + break; + } + devfn += vf_stride; + } + dev->exp.sriov_pf.num_vfs = num_vfs; +} + +static void unregister_vfs(PCIDevice *dev) +{ + Error *local_err = NULL; + uint16_t num_vfs = dev->exp.sriov_pf.num_vfs; + uint16_t i; + + trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), num_vfs); + for (i = 0; i < num_vfs; i++) { + PCIDevice *vf = dev->exp.sriov_pf.vf[i]; + object_property_set_bool(OBJECT(vf), "realized", false, &local_err); + if (local_err) { + fprintf(stderr, "Failed to unplug: %s\n", + error_get_pretty(local_err)); + error_free(local_err); + } + object_unparent(OBJECT(vf)); + } + g_free(dev->exp.sriov_pf.vf); + dev->exp.sriov_pf.vf = NULL; + dev->exp.sriov_pf.num_vfs = 0; + pci_set_word(dev->config + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0); +} + +void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, + uint32_t val, int len) +{ + uint32_t off; + uint16_t sriov_cap = dev->exp.sriov_cap; + + if (!sriov_cap || address < sriov_cap) { + return; + } + off = address - sriov_cap; + if (off >= PCI_EXT_CAP_SRIOV_SIZEOF) { + return; + } + + trace_sriov_config_write(dev->name, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), off, val, len); + + if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { + if (dev->exp.sriov_pf.num_vfs) { + if (!(val & PCI_SRIOV_CTRL_VFE)) { + unregister_vfs(dev); + } + } else { + if (val & PCI_SRIOV_CTRL_VFE) { + register_vfs(dev); + } + } + } +} + + +/* Reset SR/IOV VF Enable bit to trigger an unregister of all VFs */ +void pcie_sriov_pf_disable_vfs(PCIDevice *dev) +{ + uint16_t sriov_cap = dev->exp.sriov_cap; + if (sriov_cap) { + uint32_t val = pci_get_byte(dev->config + sriov_cap + PCI_SRIOV_CTRL); + if (val & PCI_SRIOV_CTRL_VFE) { + val &= ~PCI_SRIOV_CTRL_VFE; + pcie_sriov_config_write(dev, sriov_cap + PCI_SRIOV_CTRL, val, 1); + } + } +} + +/* Add optional supported page sizes to the mask of supported page sizes */ +void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize) +{ + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + uint8_t *wmask = dev->wmask + dev->exp.sriov_cap; + + uint16_t sup_pgsize = pci_get_word(cfg + PCI_SRIOV_SUP_PGSIZE); + + sup_pgsize |= opt_sup_pgsize; + + /* + * Make sure the new bits are set, and that system page size + * also can be set to any of the new values according to spec: + */ + pci_set_word(cfg + PCI_SRIOV_SUP_PGSIZE, sup_pgsize); + pci_set_word(wmask + PCI_SRIOV_SYS_PGSIZE, sup_pgsize); +} + + +uint16_t pcie_sriov_vf_number(PCIDevice *dev) +{ + assert(pci_is_vf(dev)); + return dev->exp.sriov_vf.vf_number; +} + +PCIDevice *pcie_sriov_get_pf(PCIDevice *dev) +{ + return dev->exp.sriov_vf.pf; +} + +PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n) +{ + assert(!pci_is_vf(dev)); + if (n < dev->exp.sriov_pf.num_vfs) { + return dev->exp.sriov_pf.vf[n]; + } + return NULL; +} diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index 28e62174c4..e71f3a7483 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -456,7 +456,7 @@ static int shpc_cap_add_config(PCIDevice *d, Error **errp) pci_set_byte(config + SHPC_CAP_CxP, 0); pci_set_long(config + SHPC_CAP_DWORD_DATA, 0); d->shpc->cap = config_offset; - /* Make dword select and data writeable. */ + /* Make dword select and data writable. */ pci_set_byte(d->wmask + config_offset + SHPC_CAP_DWORD_SELECT, 0xff); pci_set_long(d->wmask + config_offset + SHPC_CAP_DWORD_DATA, 0xffffffff); return 0; @@ -480,7 +480,8 @@ static const MemoryRegionOps shpc_mmio_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { /* SHPC ECN requires dword accesses, but the original 1.0 spec doesn't. - * It's easier to suppport all sizes than worry about it. */ + * It's easier to support all sizes than worry about it. + */ .min_access_size = 1, .max_access_size = 4, }, diff --git a/hw/pci/trace-events b/hw/pci/trace-events index fc777d0b5e..aaf46bc92d 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -1,12 +1,17 @@ # See docs/devel/tracing.rst for syntax documentation. # pci.c -pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 -pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 # pci_host.c -pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x" -pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x" +pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x" +pci_cfg_write(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x <- 0x%x" # msix.c msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d masked %d" + +# hw/pci/pcie_sriov.c +sriov_register_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: creating %d vf devs" +sriov_unregister_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: Unregistering %d vf devs" +sriov_config_write(const char *name, int slot, int fun, uint32_t offset, uint32_t val, uint32_t len) "%s %02x:%x: sriov offset 0x%x val 0x%x len %d" diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 400511c6b7..b8d2522f45 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -58,7 +58,6 @@ config PPC4XX config SAM460EX bool - select PPC405 select PFLASH_CFI01 select IDE_SII3112 select M41T80 @@ -72,14 +71,13 @@ config SAM460EX config PEGASOS2 bool + imply ATI_VGA select MV64361 select VT82C686 - select IDE_VIA select SMBUS_EEPROM select VOF # This should come with VT82C686 select ACPI_X86 - imply ATI_VGA config PREP bool @@ -125,7 +123,9 @@ config E500 imply AT24C imply VIRTIO_PCI select ETSEC + select GPIO_MPC8XXX select OPENPIC + select PFLASH_CFI01 select PLATFORM_BUS select PPCE500_PCI select SERIAL @@ -133,6 +133,14 @@ config E500 select FDT_PPC select DS1338 +config E500PLAT + bool + select E500 + +config MPC8544DS + bool + select E500 + config VIRTEX bool select PPC4XX diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 95451414dd..2fe496677c 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -15,16 +15,18 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "qemu/guest-random.h" #include "qapi/error.h" #include "e500.h" #include "e500-ccsr.h" #include "net/net.h" #include "qemu/config-file.h" +#include "hw/block/flash.h" #include "hw/char/serial.h" #include "hw/pci/pci.h" +#include "sysemu/block-backend-io.h" #include "sysemu/sysemu.h" #include "sysemu/kvm.h" #include "sysemu/reset.h" @@ -48,7 +50,6 @@ #include "hw/irq.h" #define EPAPR_MAGIC (0x45504150) -#define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb" #define DTC_LOAD_PAD 0x1800000 #define DTC_PAD_MASK 0xFFFFF #define DTB_MAX_SIZE (8 * MiB) @@ -268,6 +269,31 @@ static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque) } } +static void create_devtree_flash(SysBusDevice *sbdev, + PlatformDevtreeData *data) +{ + g_autofree char *name = NULL; + uint64_t num_blocks = object_property_get_uint(OBJECT(sbdev), + "num-blocks", + &error_fatal); + uint64_t sector_length = object_property_get_uint(OBJECT(sbdev), + "sector-length", + &error_fatal); + uint64_t bank_width = object_property_get_uint(OBJECT(sbdev), + "width", + &error_fatal); + hwaddr flashbase = 0; + hwaddr flashsize = num_blocks * sector_length; + void *fdt = data->fdt; + + name = g_strdup_printf("%s/nor@%" PRIx64, data->node, flashbase); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(fdt, name, "reg", + 1, flashbase, 1, flashsize); + qemu_fdt_setprop_cell(fdt, name, "bank-width", bank_width); +} + static void platform_bus_create_devtree(PPCE500MachineState *pms, void *fdt, const char *mpic) { @@ -277,6 +303,8 @@ static void platform_bus_create_devtree(PPCE500MachineState *pms, uint64_t addr = pmc->platform_bus_base; uint64_t size = pmc->platform_bus_size; int irq_start = pmc->platform_bus_first_irq; + SysBusDevice *sbdev; + bool ambiguous; /* Create a /platform node that we can put all devices into */ @@ -303,6 +331,13 @@ static void platform_bus_create_devtree(PPCE500MachineState *pms, /* Loop through all dynamic sysbus devices and create nodes for them */ foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data); + sbdev = SYS_BUS_DEVICE(object_resolve_path_type("", TYPE_PFLASH_CFI01, + &ambiguous)); + if (sbdev) { + assert(!ambiguous); + create_devtree_flash(sbdev, &data); + } + g_free(node); } @@ -348,6 +383,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, }; const char *dtb_file = machine->dtb; const char *toplevel_compat = machine->dt_compatible; + uint8_t rng_seed[32]; if (dtb_file) { char *filename; @@ -405,6 +441,9 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + if (kvm_enabled()) { /* Read out host's frequencies */ clock_freq = kvmppc_get_clockfreq(); @@ -853,6 +892,7 @@ void ppce500_init(MachineState *machine) unsigned int pci_irq_nrs[PCI_NUM_PINS] = {1, 2, 3, 4}; IrqLines *irqs; DeviceState *dev, *mpicdev; + DriveInfo *dinfo; CPUPPCState *firstenv = NULL; MemoryRegion *ccsr_addr_space; SysBusDevice *s; @@ -863,7 +903,6 @@ void ppce500_init(MachineState *machine) for (i = 0; i < smp_cpus; i++) { PowerPCCPU *cpu; CPUState *cs; - qemu_irq *input; cpu = POWERPC_CPU(object_new(machine->cpu_type)); env = &cpu->env; @@ -887,9 +926,10 @@ void ppce500_init(MachineState *machine) firstenv = env; } - input = (qemu_irq *)env->irq_inputs; - irqs[i].irq[OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; - irqs[i].irq[OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; + irqs[i].irq[OPENPIC_OUTPUT_INT] = + qdev_get_gpio_in(DEVICE(cpu), PPCE500_INPUT_INT); + irqs[i].irq[OPENPIC_OUTPUT_CINT] = + qdev_get_gpio_in(DEVICE(cpu), PPCE500_INPUT_CINT); env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; @@ -899,7 +939,7 @@ void ppce500_init(MachineState *machine) if (!i) { /* Primary CPU */ struct boot_info *boot_info; - boot_info = g_malloc0(sizeof(struct boot_info)); + boot_info = g_new0(struct boot_info, 1); qemu_register_reset(ppce500_cpu_reset, cpu); env->load_info = boot_info; } else { @@ -1004,23 +1044,63 @@ void ppce500_init(MachineState *machine) } /* Platform Bus Device */ - if (pmc->has_platform_bus) { - dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); - dev->id = TYPE_PLATFORM_BUS_DEVICE; - qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs); - qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - pms->pbus_dev = PLATFORM_BUS_DEVICE(dev); + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs); + qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + pms->pbus_dev = PLATFORM_BUS_DEVICE(dev); - s = SYS_BUS_DEVICE(pms->pbus_dev); - for (i = 0; i < pmc->platform_bus_num_irqs; i++) { - int irqn = pmc->platform_bus_first_irq + i; - sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, irqn)); + s = SYS_BUS_DEVICE(pms->pbus_dev); + for (i = 0; i < pmc->platform_bus_num_irqs; i++) { + int irqn = pmc->platform_bus_first_irq + i; + sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, irqn)); + } + + memory_region_add_subregion(address_space_mem, + pmc->platform_bus_base, + &pms->pbus_dev->mmio); + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (dinfo) { + BlockBackend *blk = blk_by_legacy_dinfo(dinfo); + BlockDriverState *bs = blk_bs(blk); + uint64_t mmio_size = memory_region_size(&pms->pbus_dev->mmio); + uint64_t size = bdrv_getlength(bs); + uint32_t sector_len = 64 * KiB; + + if (!is_power_of_2(size)) { + error_report("Size of pflash file must be a power of two."); + exit(1); } - memory_region_add_subregion(address_space_mem, - pmc->platform_bus_base, - sysbus_mmio_get_region(s, 0)); + if (size > mmio_size) { + error_report("Size of pflash file must not be bigger than %" PRIu64 + " bytes.", mmio_size); + exit(1); + } + + if (!QEMU_IS_ALIGNED(size, sector_len)) { + error_report("Size of pflash file must be a multiple of %" PRIu32 + ".", sector_len); + exit(1); + } + + dev = qdev_new(TYPE_PFLASH_CFI01); + qdev_prop_set_drive(dev, "drive", blk); + qdev_prop_set_uint32(dev, "num-blocks", size / sector_len); + qdev_prop_set_uint64(dev, "sector-length", sector_len); + qdev_prop_set_uint8(dev, "width", 2); + qdev_prop_set_bit(dev, "big-endian", true); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x0000); + qdev_prop_set_uint16(dev, "id3", 0x0); + qdev_prop_set_string(dev, "name", "e500.flash"); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + memory_region_add_subregion(&pms->pbus_dev->mmio, 0, + pflash_cfi01_get_memory(PFLASH_CFI01(dev))); } /* diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h index 1e5853b032..68f754ce50 100644 --- a/hw/ppc/e500.h +++ b/hw/ppc/e500.h @@ -27,7 +27,6 @@ struct PPCE500MachineClass { int mpic_version; bool has_mpc8xxx_gpio; - bool has_platform_bus; hwaddr platform_bus_base; hwaddr platform_bus_size; int platform_bus_first_irq; diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c index fc911bbb7b..5bb1c603da 100644 --- a/hw/ppc/e500plat.c +++ b/hw/ppc/e500plat.c @@ -86,7 +86,6 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) pmc->fixup_devtree = e500plat_fixup_devtree; pmc->mpic_version = OPENPIC_MODEL_FSL_MPIC_42; pmc->has_mpc8xxx_gpio = true; - pmc->has_platform_bus = true; pmc->platform_bus_base = 0xf00000000ULL; pmc->platform_bus_size = 128 * MiB; pmc->platform_bus_first_irq = 5; diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h deleted file mode 100644 index 22c8408078..0000000000 --- a/hw/ppc/mac.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * QEMU PowerMac emulation shared definitions and prototypes - * - * Copyright (c) 2004-2007 Fabrice Bellard - * Copyright (c) 2007 Jocelyn Mayer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#ifndef PPC_MAC_H -#define PPC_MAC_H - -#include "qemu/units.h" -#include "exec/memory.h" -#include "hw/boards.h" -#include "hw/sysbus.h" -#include "hw/input/adb.h" -#include "hw/misc/mos6522.h" -#include "hw/pci/pci_host.h" -#include "hw/pci-host/uninorth.h" -#include "qom/object.h" - -/* SMP is not enabled, for now */ -#define MAX_CPUS 1 - -#define NVRAM_SIZE 0x2000 -#define PROM_FILENAME "openbios-ppc" - -#define KERNEL_LOAD_ADDR 0x01000000 -#define KERNEL_GAP 0x00100000 - -#define ESCC_CLOCK 3686400 - -/* Old World IRQs */ -#define OLDWORLD_CUDA_IRQ 0x12 -#define OLDWORLD_ESCCB_IRQ 0x10 -#define OLDWORLD_ESCCA_IRQ 0xf -#define OLDWORLD_IDE0_IRQ 0xd -#define OLDWORLD_IDE0_DMA_IRQ 0x2 -#define OLDWORLD_IDE1_IRQ 0xe -#define OLDWORLD_IDE1_DMA_IRQ 0x3 - -/* New World IRQs */ -#define NEWWORLD_CUDA_IRQ 0x19 -#define NEWWORLD_PMU_IRQ 0x19 -#define NEWWORLD_ESCCB_IRQ 0x24 -#define NEWWORLD_ESCCA_IRQ 0x25 -#define NEWWORLD_IDE0_IRQ 0xd -#define NEWWORLD_IDE0_DMA_IRQ 0x2 -#define NEWWORLD_IDE1_IRQ 0xe -#define NEWWORLD_IDE1_DMA_IRQ 0x3 -#define NEWWORLD_EXTING_GPIO1 0x2f -#define NEWWORLD_EXTING_GPIO9 0x37 - -/* Core99 machine */ -#define TYPE_CORE99_MACHINE MACHINE_TYPE_NAME("mac99") -typedef struct Core99MachineState Core99MachineState; -DECLARE_INSTANCE_CHECKER(Core99MachineState, CORE99_MACHINE, - TYPE_CORE99_MACHINE) - -#define CORE99_VIA_CONFIG_CUDA 0x0 -#define CORE99_VIA_CONFIG_PMU 0x1 -#define CORE99_VIA_CONFIG_PMU_ADB 0x2 - -struct Core99MachineState { - /*< private >*/ - MachineState parent; - - uint8_t via_config; -}; - -/* Grackle PCI */ -#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost" - -/* Mac NVRAM */ -#define TYPE_MACIO_NVRAM "macio-nvram" -OBJECT_DECLARE_SIMPLE_TYPE(MacIONVRAMState, MACIO_NVRAM) - -struct MacIONVRAMState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - uint32_t size; - uint32_t it_shift; - - MemoryRegion mem; - uint8_t *data; -}; - -void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len); -#endif /* PPC_MAC_H */ diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 7bb7ac3997..601ea518f8 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -47,12 +47,14 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" +#include "qemu/units.h" #include "qapi/error.h" #include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" -#include "hw/ppc/mac.h" +#include "hw/nvram/mac_nvram.h" +#include "hw/boards.h" +#include "hw/pci-host/uninorth.h" #include "hw/input/adb.h" #include "hw/ppc/mac_dbdma.h" #include "hw/pci/pci.h" @@ -81,9 +83,31 @@ #define NDRV_VGA_FILENAME "qemu_vga.ndrv" +#define PROM_FILENAME "openbios-ppc" #define PROM_BASE 0xfff00000 #define PROM_SIZE (1 * MiB) +#define KERNEL_LOAD_ADDR 0x01000000 +#define KERNEL_GAP 0x00100000 + +#define TYPE_CORE99_MACHINE MACHINE_TYPE_NAME("mac99") +typedef struct Core99MachineState Core99MachineState; +DECLARE_INSTANCE_CHECKER(Core99MachineState, CORE99_MACHINE, + TYPE_CORE99_MACHINE) + +typedef enum { + CORE99_VIA_CONFIG_CUDA = 0, + CORE99_VIA_CONFIG_PMU, + CORE99_VIA_CONFIG_PMU_ADB +} Core99ViaConfig; + +struct Core99MachineState { + /*< private >*/ + MachineState parent; + + Core99ViaConfig via_config; +}; + static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { @@ -107,45 +131,32 @@ static void ppc_core99_reset(void *opaque) /* PowerPC Mac99 hardware initialisation */ static void ppc_core99_init(MachineState *machine) { - ram_addr_t ram_size = machine->ram_size; - const char *bios_name = machine->firmware ?: PROM_FILENAME; - const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; - const char *initrd_filename = machine->initrd_filename; - const char *boot_device = machine->boot_order; Core99MachineState *core99_machine = CORE99_MACHINE(machine); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; IrqLines *openpic_irqs; - int linux_boot, i, j, k; + int i, j, k, ppc_boot_device, machine_arch, bios_size = -1; + const char *bios_name = machine->firmware ?: PROM_FILENAME; MemoryRegion *bios = g_new(MemoryRegion, 1); - hwaddr kernel_base, initrd_base, cmdline_base = 0; - long kernel_size, initrd_size; - UNINHostState *uninorth_pci; + hwaddr kernel_base = 0, initrd_base = 0, cmdline_base = 0; + long kernel_size = 0, initrd_size = 0; PCIBus *pci_bus; - PCIDevice *macio; - ESCCState *escc; bool has_pmu, has_adb; + Object *macio; MACIOIDEState *macio_ide; BusState *adb_bus; MacIONVRAMState *nvr; - int bios_size; - int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; - int machine_arch; SysBusDevice *s; - DeviceState *dev, *pic_dev; + DeviceState *dev, *pic_dev, *uninorth_pci_dev; DeviceState *uninorth_internal_dev = NULL, *uninorth_agp_dev = NULL; hwaddr nvram_addr = 0xFFF04000; - uint64_t tbfreq; - unsigned int smp_cpus = machine->smp.cpus; - - linux_boot = (kernel_filename != NULL); + uint64_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TBFREQ; /* init CPUs */ - for (i = 0; i < smp_cpus; i++) { + for (i = 0; i < machine->smp.cpus; i++) { cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); env = &cpu->env; @@ -177,68 +188,62 @@ static void ppc_core99_init(MachineState *machine) bios_size = load_image_targphys(filename, PROM_BASE, PROM_SIZE); } g_free(filename); - } else { - bios_size = -1; } if (bios_size < 0 || bios_size > PROM_SIZE) { error_report("could not load PowerPC bios '%s'", bios_name); exit(1); } - if (linux_boot) { - int bswap_needed; + if (machine->kernel_filename) { + int bswap_needed = 0; #ifdef BSWAP_NEEDED bswap_needed = 1; -#else - bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; - - kernel_size = load_elf(kernel_filename, NULL, + kernel_size = load_elf(machine->kernel_filename, NULL, translate_kernel_address, NULL, NULL, NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); - if (kernel_size < 0) - kernel_size = load_aout(kernel_filename, kernel_base, - ram_size - kernel_base, bswap_needed, - TARGET_PAGE_SIZE); - if (kernel_size < 0) - kernel_size = load_image_targphys(kernel_filename, - kernel_base, - ram_size - kernel_base); if (kernel_size < 0) { - error_report("could not load kernel '%s'", kernel_filename); + kernel_size = load_aout(machine->kernel_filename, kernel_base, + machine->ram_size - kernel_base, + bswap_needed, TARGET_PAGE_SIZE); + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(machine->kernel_filename, + kernel_base, + machine->ram_size - kernel_base); + } + if (kernel_size < 0) { + error_report("could not load kernel '%s'", + machine->kernel_filename); exit(1); } /* load initrd */ - if (initrd_filename) { + if (machine->initrd_filename) { initrd_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); - initrd_size = load_image_targphys(initrd_filename, initrd_base, - ram_size - initrd_base); + initrd_size = load_image_targphys(machine->initrd_filename, + initrd_base, + machine->ram_size - initrd_base); if (initrd_size < 0) { error_report("could not load initial ram disk '%s'", - initrd_filename); + machine->initrd_filename); exit(1); } cmdline_base = TARGET_PAGE_ALIGN(initrd_base + initrd_size); } else { - initrd_base = 0; - initrd_size = 0; cmdline_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); } ppc_boot_device = 'm'; } else { - kernel_base = 0; - kernel_size = 0; - initrd_base = 0; - initrd_size = 0; ppc_boot_device = '\0'; /* We consider that NewWorld PowerMac never have any floppy drive * For now, OHW cannot boot from the network. */ - for (i = 0; boot_device[i] != '\0'; i++) { - if (boot_device[i] >= 'c' && boot_device[i] <= 'f') { - ppc_boot_device = boot_device[i]; + for (i = 0; machine->boot_config.order[i] != '\0'; i++) { + if (machine->boot_config.order[i] >= 'c' && + machine->boot_config.order[i] <= 'f') { + ppc_boot_device = machine->boot_config.order[i]; break; } } @@ -248,45 +253,39 @@ static void ppc_core99_init(MachineState *machine) } } - /* UniN init */ - dev = qdev_new(TYPE_UNI_NORTH); - s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - memory_region_add_subregion(get_system_memory(), 0xf8000000, - sysbus_mmio_get_region(s, 0)); - - openpic_irqs = g_new0(IrqLines, smp_cpus); - for (i = 0; i < smp_cpus; i++) { + openpic_irqs = g_new0(IrqLines, machine->smp.cpus); + dev = DEVICE(cpu); + for (i = 0; i < machine->smp.cpus; i++) { /* Mac99 IRQ connection between OpenPIC outputs pins * and PowerPC input pins */ switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + qdev_get_gpio_in(dev, PPC6xx_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + qdev_get_gpio_in(dev, PPC6xx_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; + qdev_get_gpio_in(dev, PPC6xx_INPUT_MCP); /* Not connected ? */ openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; + qdev_get_gpio_in(dev, PPC6xx_INPUT_HRESET); break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + qdev_get_gpio_in(dev, PPC970_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + qdev_get_gpio_in(dev, PPC970_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; + qdev_get_gpio_in(dev, PPC970_INPUT_MCP); /* Not connected ? */ openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; + qdev_get_gpio_in(dev, PPC970_INPUT_HRESET); break; #endif /* defined(TARGET_PPC64) */ default: @@ -295,24 +294,29 @@ static void ppc_core99_init(MachineState *machine) } } + /* UniN init */ + s = SYS_BUS_DEVICE(qdev_new(TYPE_UNI_NORTH)); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(get_system_memory(), 0xf8000000, + sysbus_mmio_get_region(s, 0)); + if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) { + machine_arch = ARCH_MAC99_U3; /* 970 gets a U3 bus */ /* Uninorth AGP bus */ - dev = qdev_new(TYPE_U3_AGP_HOST_BRIDGE); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - uninorth_pci = U3_AGP_HOST_BRIDGE(dev); - s = SYS_BUS_DEVICE(dev); + uninorth_pci_dev = qdev_new(TYPE_U3_AGP_HOST_BRIDGE); + s = SYS_BUS_DEVICE(uninorth_pci_dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0xf0800000); + sysbus_mmio_map(s, 1, 0xf0c00000); /* PCI hole */ - memory_region_add_subregion(get_system_memory(), 0x80000000ULL, + memory_region_add_subregion(get_system_memory(), 0x80000000, sysbus_mmio_get_region(s, 2)); /* Register 8 MB of ISA IO space */ memory_region_add_subregion(get_system_memory(), 0xf2000000, sysbus_mmio_get_region(s, 3)); - sysbus_mmio_map(s, 0, 0xf0800000); - sysbus_mmio_map(s, 1, 0xf0c00000); - - machine_arch = ARCH_MAC99_U3; } else { + machine_arch = ARCH_MAC99; /* Use values found on a real PowerMac */ /* Uninorth AGP bus */ uninorth_agp_dev = qdev_new(TYPE_UNI_NORTH_AGP_HOST_BRIDGE); @@ -329,22 +333,19 @@ static void ppc_core99_init(MachineState *machine) sysbus_mmio_map(s, 0, 0xf4800000); sysbus_mmio_map(s, 1, 0xf4c00000); - /* Uninorth main bus */ - dev = qdev_new(TYPE_UNI_NORTH_PCI_HOST_BRIDGE); - qdev_prop_set_uint32(dev, "ofw-addr", 0xf2000000); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - uninorth_pci = UNI_NORTH_PCI_HOST_BRIDGE(dev); - s = SYS_BUS_DEVICE(dev); + /* Uninorth main bus - this must be last to make it the default */ + uninorth_pci_dev = qdev_new(TYPE_UNI_NORTH_PCI_HOST_BRIDGE); + qdev_prop_set_uint32(uninorth_pci_dev, "ofw-addr", 0xf2000000); + s = SYS_BUS_DEVICE(uninorth_pci_dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0xf2800000); + sysbus_mmio_map(s, 1, 0xf2c00000); /* PCI hole */ - memory_region_add_subregion(get_system_memory(), 0x80000000ULL, + memory_region_add_subregion(get_system_memory(), 0x80000000, sysbus_mmio_get_region(s, 2)); /* Register 8 MB of ISA IO space */ memory_region_add_subregion(get_system_memory(), 0xf2000000, sysbus_mmio_get_region(s, 3)); - sysbus_mmio_map(s, 0, 0xf2800000); - sysbus_mmio_map(s, 1, 0xf2c00000); - - machine_arch = ARCH_MAC99; } machine->usb |= defaults_enabled() && !machine->usb_disabled; @@ -352,32 +353,25 @@ static void ppc_core99_init(MachineState *machine) has_adb = (core99_machine->via_config == CORE99_VIA_CONFIG_CUDA || core99_machine->via_config == CORE99_VIA_CONFIG_PMU_ADB); - /* Timebase Frequency */ - if (kvm_enabled()) { - tbfreq = kvmppc_get_tbfreq(); - } else { - tbfreq = TBFREQ; - } - /* init basic PC hardware */ - pci_bus = PCI_HOST_BRIDGE(uninorth_pci)->bus; + pci_bus = PCI_HOST_BRIDGE(uninorth_pci_dev)->bus; /* MacIO */ - macio = pci_new(-1, TYPE_NEWWORLD_MACIO); + macio = OBJECT(pci_new(-1, TYPE_NEWWORLD_MACIO)); dev = DEVICE(macio); qdev_prop_set_uint64(dev, "frequency", tbfreq); qdev_prop_set_bit(dev, "has-pmu", has_pmu); qdev_prop_set_bit(dev, "has-adb", has_adb); - escc = ESCC(object_resolve_path_component(OBJECT(macio), "escc")); - qdev_prop_set_chr(DEVICE(escc), "chrA", serial_hd(0)); - qdev_prop_set_chr(DEVICE(escc), "chrB", serial_hd(1)); + dev = DEVICE(object_resolve_path_component(macio, "escc")); + qdev_prop_set_chr(dev, "chrA", serial_hd(0)); + qdev_prop_set_chr(dev, "chrB", serial_hd(1)); - pci_realize_and_unref(macio, pci_bus, &error_fatal); + pci_realize_and_unref(PCI_DEVICE(macio), pci_bus, &error_fatal); - pic_dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pic")); + pic_dev = DEVICE(object_resolve_path_component(macio, "pic")); for (i = 0; i < 4; i++) { - qdev_connect_gpio_out(DEVICE(uninorth_pci), i, + qdev_connect_gpio_out(uninorth_pci_dev, i, qdev_get_gpio_in(pic_dev, 0x1b + i)); } @@ -399,7 +393,7 @@ static void ppc_core99_init(MachineState *machine) /* OpenPIC */ s = SYS_BUS_DEVICE(pic_dev); k = 0; - for (i = 0; i < smp_cpus; i++) { + for (i = 0; i < machine->smp.cpus; i++) { for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { sysbus_connect_irq(s, k++, openpic_irqs[i].irq[j]); } @@ -409,19 +403,17 @@ static void ppc_core99_init(MachineState *machine) /* We only emulate 2 out of 3 IDE controllers for now */ ide_drive_get(hd, ARRAY_SIZE(hd)); - macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), - "ide[0]")); + macio_ide = MACIO_IDE(object_resolve_path_component(macio, "ide[0]")); macio_ide_init_drives(macio_ide, hd); - macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), - "ide[1]")); + macio_ide = MACIO_IDE(object_resolve_path_component(macio, "ide[1]")); macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]); if (has_adb) { if (has_pmu) { - dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pmu")); + dev = DEVICE(object_resolve_path_component(macio, "pmu")); } else { - dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda")); + dev = DEVICE(object_resolve_path_component(macio, "cuda")); } adb_bus = qdev_get_child_bus(dev, "adb.0"); @@ -456,18 +448,18 @@ static void ppc_core99_init(MachineState *machine) } /* The NewWorld NVRAM is not located in the MacIO device */ - if (kvm_enabled() && qemu_real_host_page_size > 4096) { + if (kvm_enabled() && qemu_real_host_page_size() > 4096) { /* We can't combine read-write and read-only in a single page, so move the NVRAM out of ROM again for KVM */ nvram_addr = 0xFFE00000; } dev = qdev_new(TYPE_MACIO_NVRAM); - qdev_prop_set_uint32(dev, "size", 0x2000); + qdev_prop_set_uint32(dev, "size", MACIO_NVRAM_SIZE); qdev_prop_set_uint32(dev, "it_shift", 1); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, nvram_addr); nvr = MACIO_NVRAM(dev); - pmac_format_nvram_partition(nvr, 0x2000); + pmac_format_nvram_partition(nvr, MACIO_NVRAM_SIZE); /* No PCI init: the BIOS will do it */ dev = qdev_new(TYPE_FW_CFG_MEM); @@ -481,15 +473,16 @@ static void ppc_core99_init(MachineState *machine) sysbus_mmio_map(s, 0, CFG_ADDR); sysbus_mmio_map(s, 1, CFG_ADDR + 2); - fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)machine->smp.cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); - fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - if (kernel_cmdline) { + if (machine->kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); - pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); + pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, + machine->kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } @@ -581,7 +574,8 @@ static void core99_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Mac99 based PowerMAC"; mc->init = ppc_core99_init; mc->block_default_type = IF_IDE; - mc->max_cpus = MAX_CPUS; + /* SMP is not supported currently */ + mc->max_cpus = 1; mc->default_boot_order = "cd"; mc->default_display = "std"; mc->kvm_type = core99_kvm_type; diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index de2be960e6..558c639202 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -25,19 +25,19 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qapi/error.h" #include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" -#include "mac.h" +#include "hw/boards.h" #include "hw/input/adb.h" #include "sysemu/sysemu.h" #include "net/net.h" #include "hw/isa/isa.h" #include "hw/pci/pci.h" #include "hw/pci/pci_host.h" +#include "hw/pci-host/grackle.h" #include "hw/nvram/fw_cfg.h" #include "hw/char/escc.h" #include "hw/misc/macio/macio.h" @@ -57,10 +57,15 @@ #define NDRV_VGA_FILENAME "qemu_vga.ndrv" -#define GRACKLE_BASE 0xfec00000 +#define PROM_FILENAME "openbios-ppc" #define PROM_BASE 0xffc00000 #define PROM_SIZE (4 * MiB) +#define KERNEL_LOAD_ADDR 0x01000000 +#define KERNEL_GAP 0x00100000 + +#define GRACKLE_BASE 0xfec00000 + static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { @@ -81,33 +86,28 @@ static void ppc_heathrow_reset(void *opaque) static void ppc_heathrow_init(MachineState *machine) { - ram_addr_t ram_size = machine->ram_size; const char *bios_name = machine->firmware ?: PROM_FILENAME; - const char *boot_device = machine->boot_order; PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; - int i; + int i, bios_size = -1; MemoryRegion *bios = g_new(MemoryRegion, 1); - uint32_t kernel_base, initrd_base, cmdline_base = 0; - int32_t kernel_size, initrd_size; + uint64_t bios_addr; + uint32_t kernel_base = 0, initrd_base = 0, cmdline_base = 0; + int32_t kernel_size = 0, initrd_size = 0; PCIBus *pci_bus; - PCIDevice *macio; + Object *macio; MACIOIDEState *macio_ide; - ESCCState *escc; SysBusDevice *s; DeviceState *dev, *pic_dev, *grackle_dev; BusState *adb_bus; - uint64_t bios_addr; - int bios_size; - unsigned int smp_cpus = machine->smp.cpus; uint16_t ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; - uint64_t tbfreq; + uint64_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TBFREQ; /* init CPUs */ - for (i = 0; i < smp_cpus; i++) { + for (i = 0; i < machine->smp.cpus; i++) { cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); env = &cpu->env; @@ -117,9 +117,9 @@ static void ppc_heathrow_init(MachineState *machine) } /* allocate RAM */ - if (ram_size > 2047 * MiB) { + if (machine->ram_size > 2047 * MiB) { error_report("Too much memory for this machine: %" PRId64 " MB, " - "maximum 2047 MB", ram_size / MiB); + "maximum 2047 MB", machine->ram_size / MiB); exit(1); } @@ -144,8 +144,6 @@ static void ppc_heathrow_init(MachineState *machine) bios_addr = PROM_BASE; } g_free(filename); - } else { - bios_size = -1; } if (bios_size < 0 || bios_addr - PROM_BASE + bios_size > PROM_SIZE) { error_report("could not load PowerPC bios '%s'", bios_name); @@ -153,25 +151,25 @@ static void ppc_heathrow_init(MachineState *machine) } if (machine->kernel_filename) { - int bswap_needed; + int bswap_needed = 0; #ifdef BSWAP_NEEDED bswap_needed = 1; -#else - bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(machine->kernel_filename, NULL, translate_kernel_address, NULL, NULL, NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); - if (kernel_size < 0) + if (kernel_size < 0) { kernel_size = load_aout(machine->kernel_filename, kernel_base, - ram_size - kernel_base, bswap_needed, - TARGET_PAGE_SIZE); - if (kernel_size < 0) + machine->ram_size - kernel_base, + bswap_needed, TARGET_PAGE_SIZE); + } + if (kernel_size < 0) { kernel_size = load_image_targphys(machine->kernel_filename, kernel_base, - ram_size - kernel_base); + machine->ram_size - kernel_base); + } if (kernel_size < 0) { error_report("could not load kernel '%s'", machine->kernel_filename); @@ -183,7 +181,7 @@ static void ppc_heathrow_init(MachineState *machine) KERNEL_GAP); initrd_size = load_image_targphys(machine->initrd_filename, initrd_base, - ram_size - initrd_base); + machine->ram_size - initrd_base); if (initrd_size < 0) { error_report("could not load initial ram disk '%s'", machine->initrd_filename); @@ -191,30 +189,27 @@ static void ppc_heathrow_init(MachineState *machine) } cmdline_base = TARGET_PAGE_ALIGN(initrd_base + initrd_size); } else { - initrd_base = 0; - initrd_size = 0; cmdline_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); } ppc_boot_device = 'm'; } else { - kernel_base = 0; - kernel_size = 0; - initrd_base = 0; - initrd_size = 0; ppc_boot_device = '\0'; - for (i = 0; boot_device[i] != '\0'; i++) { - /* TOFIX: for now, the second IDE channel is not properly + for (i = 0; machine->boot_config.order[i] != '\0'; i++) { + /* + * TOFIX: for now, the second IDE channel is not properly * used by OHW. The Mac floppy disk are not emulated. * For now, OHW cannot boot from the network. */ #if 0 - if (boot_device[i] >= 'a' && boot_device[i] <= 'f') { - ppc_boot_device = boot_device[i]; + if (machine->boot_config.order[i] >= 'a' && + machine->boot_config.order[i] <= 'f') { + ppc_boot_device = machine->boot_config.order[i]; break; } #else - if (boot_device[i] >= 'c' && boot_device[i] <= 'd') { - ppc_boot_device = boot_device[i]; + if (machine->boot_config.order[i] >= 'c' && + machine->boot_config.order[i] <= 'd') { + ppc_boot_device = machine->boot_config.order[i]; break; } #endif @@ -225,13 +220,6 @@ static void ppc_heathrow_init(MachineState *machine) } } - /* Timebase Frequency */ - if (kvm_enabled()) { - tbfreq = kvmppc_get_tbfreq(); - } else { - tbfreq = TBFREQ; - } - /* Grackle PCI host bridge */ grackle_dev = qdev_new(TYPE_GRACKLE_PCI_HOST_BRIDGE); qdev_prop_set_uint32(grackle_dev, "ofw-addr", 0x80000000); @@ -250,29 +238,28 @@ static void ppc_heathrow_init(MachineState *machine) pci_bus = PCI_HOST_BRIDGE(grackle_dev)->bus; /* MacIO */ - macio = pci_new(PCI_DEVFN(16, 0), TYPE_OLDWORLD_MACIO); - dev = DEVICE(macio); - qdev_prop_set_uint64(dev, "frequency", tbfreq); + macio = OBJECT(pci_new(PCI_DEVFN(16, 0), TYPE_OLDWORLD_MACIO)); + qdev_prop_set_uint64(DEVICE(macio), "frequency", tbfreq); - escc = ESCC(object_resolve_path_component(OBJECT(macio), "escc")); - qdev_prop_set_chr(DEVICE(escc), "chrA", serial_hd(0)); - qdev_prop_set_chr(DEVICE(escc), "chrB", serial_hd(1)); + dev = DEVICE(object_resolve_path_component(macio, "escc")); + qdev_prop_set_chr(dev, "chrA", serial_hd(0)); + qdev_prop_set_chr(dev, "chrB", serial_hd(1)); - pci_realize_and_unref(macio, pci_bus, &error_fatal); + pci_realize_and_unref(PCI_DEVICE(macio), pci_bus, &error_fatal); - pic_dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pic")); + pic_dev = DEVICE(object_resolve_path_component(macio, "pic")); for (i = 0; i < 4; i++) { qdev_connect_gpio_out(grackle_dev, i, qdev_get_gpio_in(pic_dev, 0x15 + i)); } /* Connect the heathrow PIC outputs to the 6xx bus */ - for (i = 0; i < smp_cpus; i++) { + for (i = 0; i < machine->smp.cpus; i++) { switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: /* XXX: we register only 1 output pin for heathrow PIC */ qdev_connect_gpio_out(pic_dev, 0, - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); break; default: error_report("Bus model not supported on OldWorld Mac machine"); @@ -288,16 +275,14 @@ static void ppc_heathrow_init(MachineState *machine) /* MacIO IDE */ ide_drive_get(hd, ARRAY_SIZE(hd)); - macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), - "ide[0]")); + macio_ide = MACIO_IDE(object_resolve_path_component(macio, "ide[0]")); macio_ide_init_drives(macio_ide, hd); - macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), - "ide[1]")); + macio_ide = MACIO_IDE(object_resolve_path_component(macio, "ide[1]")); macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]); /* MacIO CUDA/ADB */ - dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda")); + dev = DEVICE(object_resolve_path_component(macio, "cuda")); adb_bus = qdev_get_child_bus(dev, "adb.0"); dev = qdev_new(TYPE_ADB_KEYBOARD); qdev_realize_and_unref(dev, adb_bus, &error_fatal); @@ -308,8 +293,9 @@ static void ppc_heathrow_init(MachineState *machine) pci_create_simple(pci_bus, -1, "pci-ohci"); } - if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) + if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) { graphic_depth = 15; + } /* No PCI init: the BIOS will do it */ @@ -324,9 +310,9 @@ static void ppc_heathrow_init(MachineState *machine) sysbus_mmio_map(s, 0, CFG_ADDR); sysbus_mmio_map(s, 1, CFG_ADDR + 2); - fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)machine->smp.cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); - fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); @@ -423,7 +409,8 @@ static void heathrow_class_init(ObjectClass *oc, void *data) mc->desc = "Heathrow based PowerMAC"; mc->init = ppc_heathrow_init; mc->block_default_type = IF_IDE; - mc->max_cpus = MAX_CPUS; + /* SMP is not supported currently */ + mc->max_cpus = 1; #ifndef TARGET_PPC64 mc->is_default = true; #endif diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index aa4c8e6a2e..c927337da0 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -46,6 +46,7 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( 'pnv_lpc.c', 'pnv_psi.c', 'pnv_occ.c', + 'pnv_sbe.c', 'pnv_bmc.c', 'pnv_homer.c', 'pnv_pnor.c', @@ -58,8 +59,9 @@ ppc_ss.add(when: 'CONFIG_PPC440', if_true: files( 'ppc440_bamboo.c', 'ppc440_pcix.c', 'ppc440_uc.c')) ppc_ss.add(when: 'CONFIG_PPC4XX', if_true: files( + 'ppc4xx_devs.c', 'ppc4xx_pci.c', - 'ppc4xx_devs.c')) + 'ppc4xx_sdram.c')) ppc_ss.add(when: 'CONFIG_SAM460EX', if_true: files('sam460ex.c')) # PReP ppc_ss.add(when: 'CONFIG_PREP', if_true: files('prep.c')) @@ -70,12 +72,10 @@ ppc_ss.add(when: 'CONFIG_MAC_OLDWORLD', if_true: files('mac_oldworld.c')) # NewWorld PowerMac ppc_ss.add(when: 'CONFIG_MAC_NEWWORLD', if_true: files('mac_newworld.c')) # e500 +ppc_ss.add(when: 'CONFIG_E500PLAT', if_true: files('e500plat.c')) +ppc_ss.add(when: 'CONFIG_MPC8544DS', if_true: files('mpc8544ds.c')) ppc_ss.add(when: 'CONFIG_E500', if_true: files( 'e500.c', - 'mpc8544ds.c', - 'e500plat.c' -)) -ppc_ss.add(when: 'CONFIG_E500', if_true: files( 'mpc8544_guts.c', 'ppce500_spin.c' )) diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c index e8d2d51c20..a26e83d048 100644 --- a/hw/ppc/mpc8544_guts.c +++ b/hw/ppc/mpc8544_guts.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" +#include "qemu/log.h" #include "sysemu/runstate.h" #include "cpu.h" #include "hw/sysbus.h" @@ -82,7 +83,9 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr, value = env->spr[SPR_E500_SVR]; break; default: - fprintf(stderr, "guts: Unknown register read: %x\n", (int)addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unknown register 0x%" HWADDR_PRIx "\n", + __func__, addr); break; } @@ -101,8 +104,8 @@ static void mpc8544_guts_write(void *opaque, hwaddr addr, } break; default: - fprintf(stderr, "guts: Unknown register write: %x = %x\n", - (int)addr, (unsigned)value); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Unknown register 0x%" HWADDR_PRIx + " = 0x%" PRIx64 "\n", __func__, addr, value); break; } } diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c index 81177505f0..7dd5219736 100644 --- a/hw/ppc/mpc8544ds.c +++ b/hw/ppc/mpc8544ds.c @@ -14,6 +14,7 @@ #include "sysemu/device_tree.h" #include "hw/ppc/openpic.h" #include "qemu/error-report.h" +#include "qemu/units.h" #include "cpu.h" static void mpc8544ds_fixup_devtree(void *fdt) @@ -36,7 +37,7 @@ static void mpc8544ds_init(MachineState *machine) ppce500_init(machine); } -static void e500plat_machine_class_init(ObjectClass *oc, void *data) +static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc); @@ -45,6 +46,10 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) pmc->pci_nr_slots = 2; pmc->fixup_devtree = mpc8544ds_fixup_devtree; pmc->mpic_version = OPENPIC_MODEL_FSL_MPIC_20; + pmc->platform_bus_base = 0xFF800000ULL; + pmc->platform_bus_size = 8 * MiB; + pmc->platform_bus_first_irq = 5; + pmc->platform_bus_num_irqs = 10; pmc->ccsrbar_base = 0xE0000000ULL; pmc->pci_mmio_base = 0xC0000000ULL; pmc->pci_mmio_bus_base = 0xC0000000ULL; @@ -63,7 +68,7 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) static const TypeInfo mpc8544ds_info = { .name = TYPE_MPC8544DS_MACHINE, .parent = TYPE_PPCE500_MACHINE, - .class_init = e500plat_machine_class_init, + .class_init = mpc8544ds_machine_class_init, }; static void mpc8544ds_register_types(void) diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index b8ce859f1a..f46d4bf51d 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/units.h" #include "qapi/error.h" #include "hw/hw.h" @@ -22,6 +21,8 @@ #include "hw/i2c/smbus_eeprom.h" #include "hw/qdev-properties.h" #include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/qtest.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/fw-path-provider.h" @@ -31,6 +32,8 @@ #include "sysemu/kvm.h" #include "kvm_ppc.h" #include "exec/address-spaces.h" +#include "qom/qom-qobject.h" +#include "qapi/qmp/qdict.h" #include "trace.h" #include "qemu/datadir.h" #include "sysemu/device_tree.h" @@ -52,11 +55,13 @@ #define BUS_FREQ_HZ 133333333 +#define PCI0_CFG_ADDR 0xcf8 #define PCI0_MEM_BASE 0xc0000000 #define PCI0_MEM_SIZE 0x20000000 #define PCI0_IO_BASE 0xf8000000 #define PCI0_IO_SIZE 0x10000 +#define PCI1_CFG_ADDR 0xc78 #define PCI1_MEM_BASE 0x80000000 #define PCI1_MEM_SIZE 0x40000000 #define PCI1_IO_BASE 0xfe000000 @@ -97,7 +102,7 @@ static void pegasos2_init(MachineState *machine) CPUPPCState *env; MemoryRegion *rom = g_new(MemoryRegion, 1); PCIBus *pci_bus; - PCIDevice *dev; + PCIDevice *dev, *via; I2CBus *i2c_bus; const char *fwname = machine->firmware ?: PROM_FILENAME; char *filename; @@ -117,6 +122,10 @@ static void pegasos2_init(MachineState *machine) qemu_register_reset(pegasos2_cpu_reset, pm->cpu); /* RAM */ + if (machine->ram_size > 2 * GiB) { + error_report("RAM size more than 2 GiB is not supported"); + exit(1); + } memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* allocate and load firmware */ @@ -146,34 +155,27 @@ static void pegasos2_init(MachineState *machine) /* Marvell Discovery II system controller */ pm->mv = DEVICE(sysbus_create_simple(TYPE_MV64361, -1, - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT])); + qdev_get_gpio_in(DEVICE(pm->cpu), PPC6xx_INPUT_INT))); pci_bus = mv64361_get_pci_bus(pm->mv, 1); /* VIA VT8231 South Bridge (multifunction PCI device) */ - /* VT8231 function 0: PCI-to-ISA Bridge */ - dev = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0), true, + via = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0), true, TYPE_VT8231_ISA); - qdev_connect_gpio_out(DEVICE(dev), 0, + object_property_add_alias(OBJECT(machine), "rtc-time", + object_resolve_path_component(OBJECT(via), + "rtc"), + "date"); + qdev_connect_gpio_out(DEVICE(via), 0, qdev_get_gpio_in_named(pm->mv, "gpp", 31)); - /* VT8231 function 1: IDE Controller */ - dev = pci_create_simple(pci_bus, PCI_DEVFN(12, 1), "via-ide"); + dev = PCI_DEVICE(object_resolve_path_component(OBJECT(via), "ide")); pci_ide_create_devs(dev); - /* VT8231 function 2-3: USB Ports */ - pci_create_simple(pci_bus, PCI_DEVFN(12, 2), "vt82c686b-usb-uhci"); - pci_create_simple(pci_bus, PCI_DEVFN(12, 3), "vt82c686b-usb-uhci"); - - /* VT8231 function 4: Power Management Controller */ - dev = pci_create_simple(pci_bus, PCI_DEVFN(12, 4), TYPE_VT8231_PM); + dev = PCI_DEVICE(object_resolve_path_component(OBJECT(via), "pm")); i2c_bus = I2C_BUS(qdev_get_child_bus(DEVICE(dev), "i2c")); spd_data = spd_data_generate(DDR, machine->ram_size); smbus_eeprom_init_one(i2c_bus, 0x57, spd_data); - /* VT8231 function 5-6: AC97 Audio & Modem */ - pci_create_simple(pci_bus, PCI_DEVFN(12, 5), TYPE_VIA_AC97); - pci_create_simple(pci_bus, PCI_DEVFN(12, 6), TYPE_VIA_MC97); - /* other PC hardware */ pci_vga_init(pci_bus); @@ -190,117 +192,113 @@ static void pegasos2_init(MachineState *machine) if (!pm->vof) { warn_report("Option -kernel may be ineffective with -bios."); } + } else if (pm->vof && !qtest_enabled()) { + warn_report("Using Virtual OpenFirmware but no -kernel option."); } + if (!pm->vof && machine->kernel_cmdline && machine->kernel_cmdline[0]) { warn_report("Option -append may be ineffective with -bios."); } } -static uint32_t pegasos2_pci_config_read(AddressSpace *as, int bus, +static uint32_t pegasos2_mv_reg_read(Pegasos2MachineState *pm, + uint32_t addr, uint32_t len) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + uint64_t val = 0xffffffffULL; + memory_region_dispatch_read(r, addr, &val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + return val; +} + +static void pegasos2_mv_reg_write(Pegasos2MachineState *pm, uint32_t addr, + uint32_t len, uint32_t val) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + memory_region_dispatch_write(r, addr, val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); +} + +static uint32_t pegasos2_pci_config_read(Pegasos2MachineState *pm, int bus, uint32_t addr, uint32_t len) { - hwaddr pcicfg = (bus ? 0xf1000c78 : 0xf1000cf8); - uint32_t val = 0xffffffff; + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; + uint64_t val = 0xffffffffULL; - stl_le_phys(as, pcicfg, addr | BIT(31)); - switch (len) { - case 4: - val = ldl_le_phys(as, pcicfg + 4); - break; - case 2: - val = lduw_le_phys(as, pcicfg + 4); - break; - case 1: - val = ldub_phys(as, pcicfg + 4); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid length\n", __func__); - break; + if (len <= 4) { + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + val = pegasos2_mv_reg_read(pm, pcicfg + 4, len); } return val; } -static void pegasos2_pci_config_write(AddressSpace *as, int bus, uint32_t addr, - uint32_t len, uint32_t val) +static void pegasos2_pci_config_write(Pegasos2MachineState *pm, int bus, + uint32_t addr, uint32_t len, uint32_t val) { - hwaddr pcicfg = (bus ? 0xf1000c78 : 0xf1000cf8); + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; - stl_le_phys(as, pcicfg, addr | BIT(31)); - switch (len) { - case 4: - stl_le_phys(as, pcicfg + 4, val); - break; - case 2: - stw_le_phys(as, pcicfg + 4, val); - break; - case 1: - stb_phys(as, pcicfg + 4, val); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid length\n", __func__); - break; - } + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + pegasos2_mv_reg_write(pm, pcicfg + 4, len, val); } -static void pegasos2_machine_reset(MachineState *machine) +static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); - AddressSpace *as = CPU(pm->cpu)->as; void *fdt; uint64_t d[2]; int sz; - qemu_devices_reset(); + qemu_devices_reset(reason); if (!pm->vof) { return; /* Firmware should set up machine so nothing to do */ } /* Otherwise, set up devices that board firmware would normally do */ - stl_le_phys(as, 0xf1000000, 0x28020ff); - stl_le_phys(as, 0xf1000278, 0xa31fc); - stl_le_phys(as, 0xf100f300, 0x11ff0400); - stl_le_phys(as, 0xf100f10c, 0x80000000); - stl_le_phys(as, 0xf100001c, 0x8000000); - pegasos2_pci_config_write(as, 0, PCI_COMMAND, 2, PCI_COMMAND_IO | + pegasos2_mv_reg_write(pm, 0, 4, 0x28020ff); + pegasos2_mv_reg_write(pm, 0x278, 4, 0xa31fc); + pegasos2_mv_reg_write(pm, 0xf300, 4, 0x11ff0400); + pegasos2_mv_reg_write(pm, 0xf10c, 4, 0x80000000); + pegasos2_mv_reg_write(pm, 0x1c, 4, 0x8000000); + pegasos2_pci_config_write(pm, 0, PCI_COMMAND, 2, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - pegasos2_pci_config_write(as, 1, PCI_COMMAND, 2, PCI_COMMAND_IO | + pegasos2_pci_config_write(pm, 1, PCI_COMMAND, 2, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 0) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | PCI_INTERRUPT_LINE, 2, 0x9); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 0) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | 0x50, 1, 0x2); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_INTERRUPT_LINE, 2, 0x109); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_CLASS_PROG, 1, 0xf); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | 0x40, 1, 0xb); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | 0x50, 4, 0x17171717); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_COMMAND, 2, 0x87); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 2) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 2) << 8) | PCI_INTERRUPT_LINE, 2, 0x409); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 3) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 3) << 8) | PCI_INTERRUPT_LINE, 2, 0x409); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | PCI_INTERRUPT_LINE, 2, 0x9); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x48, 4, 0xf00); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x40, 4, 0x558020); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x90, 4, 0xd00); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 5) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 5) << 8) | PCI_INTERRUPT_LINE, 2, 0x309); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 6) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 6) << 8) | PCI_INTERRUPT_LINE, 2, 0x309); /* Device tree and VOF set up */ @@ -326,6 +324,10 @@ static void pegasos2_machine_reset(MachineState *machine) vof_build_dt(fdt, pm->vof); vof_client_open_store(fdt, pm->vof, "/chosen", "stdout", "/failsafe"); + + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; + pm->cpu->vhyp = PPC_VIRTUAL_HYPERVISOR(machine); } @@ -362,6 +364,29 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, return H_PARAMETER; } switch (token) { + case RTAS_GET_TIME_OF_DAY: + { + QObject *qo = object_property_get_qobject(qdev_get_machine(), + "rtc-time", &error_fatal); + QDict *qd = qobject_to(QDict, qo); + + if (nargs != 0 || nrets != 8 || !qd) { + stl_be_phys(as, rets, -1); + qobject_unref(qo); + return H_PARAMETER; + } + + stl_be_phys(as, rets, 0); + stl_be_phys(as, rets + 4, qdict_get_int(qd, "tm_year") + 1900); + stl_be_phys(as, rets + 8, qdict_get_int(qd, "tm_mon") + 1); + stl_be_phys(as, rets + 12, qdict_get_int(qd, "tm_mday")); + stl_be_phys(as, rets + 16, qdict_get_int(qd, "tm_hour")); + stl_be_phys(as, rets + 20, qdict_get_int(qd, "tm_min")); + stl_be_phys(as, rets + 24, qdict_get_int(qd, "tm_sec")); + stl_be_phys(as, rets + 28, 0); + qobject_unref(qo); + return H_SUCCESS; + } case RTAS_READ_PCI_CONFIG: { uint32_t addr, len, val; @@ -372,7 +397,7 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, } addr = ldl_be_phys(as, args); len = ldl_be_phys(as, args + 4); - val = pegasos2_pci_config_read(as, !(addr >> 24), + val = pegasos2_pci_config_read(pm, !(addr >> 24), addr & 0x0fffffff, len); stl_be_phys(as, rets, 0); stl_be_phys(as, rets + 4, val); @@ -389,7 +414,7 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, addr = ldl_be_phys(as, args); len = ldl_be_phys(as, args + 4); val = ldl_be_phys(as, args + 8); - pegasos2_pci_config_write(as, !(addr >> 24), + pegasos2_pci_config_write(pm, !(addr >> 24), addr & 0x0fffffff, len, val); stl_be_phys(as, rets, 0); return H_SUCCESS; @@ -402,6 +427,16 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, qemu_log_mask(LOG_UNIMP, "%c", ldl_be_phys(as, args)); stl_be_phys(as, rets, 0); return H_SUCCESS; + case RTAS_POWER_OFF: + { + if (nargs != 2 || nrets != 1) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + } default: qemu_log_mask(LOG_UNIMP, "Unknown RTAS token %u (args=%u, rets=%u)\n", token, nargs, nrets); @@ -410,6 +445,11 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, } } +static bool pegasos2_cpu_in_nested(PowerPCCPU *cpu) +{ + return false; +} + static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(vhyp); @@ -418,7 +458,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) /* The TCG path should also be holding the BQL at this point */ g_assert(qemu_mutex_iothread_locked()); - if (msr_pr) { + if (FIELD_EX64(env->msr, MSR, PR)) { qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else if (env->gpr[3] == KVMPPC_H_RTAS) { @@ -465,6 +505,7 @@ static void pegasos2_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_id = "pegasos2.ram"; mc->default_ram_size = 512 * MiB; + vhc->cpu_in_nested = pegasos2_cpu_in_nested; vhc->hypercall = pegasos2_hypercall; vhc->cpu_exec_enter = vhyp_nop; vhc->cpu_exec_exit = vhyp_nop; diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index d16dd2d080..3d01e26f84 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/cutils.h" @@ -44,6 +43,7 @@ #include "hw/ipmi/ipmi.h" #include "target/ppc/mmu-hash64.h" #include "hw/pci/msi.h" +#include "hw/pci-host/pnv_phb.h" #include "hw/ppc/xics.h" #include "hw/qdev-properties.h" @@ -138,7 +138,7 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) int smt_threads = CPU_CORE(pc)->nr_threads; CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); - uint32_t servers_prop[smt_threads]; + g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads); int i; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; @@ -241,7 +241,7 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) servers_prop[i] = cpu_to_be32(pc->pir + i); } _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", - servers_prop, sizeof(servers_prop)))); + servers_prop, sizeof(*servers_prop) * smt_threads))); } static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, @@ -281,6 +281,75 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, g_free(reg); } +static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb, + Error **errp) +{ + PnvPHB *phb_base = phb->phb_base; + PnvPhb4PecState *pecs = NULL; + int chip_id = phb->chip_id; + int index = phb->phb_id; + int i, j; + + if (phb_base->version == 4) { + Pnv9Chip *chip9 = PNV9_CHIP(chip); + + pecs = chip9->pecs; + } else if (phb_base->version == 5) { + Pnv10Chip *chip10 = PNV10_CHIP(chip); + + pecs = chip10->pecs; + } else { + g_assert_not_reached(); + } + + for (i = 0; i < chip->num_pecs; i++) { + /* + * For each PEC, check the amount of phbs it supports + * and see if the given phb4 index matches an index. + */ + PnvPhb4PecState *pec = &pecs[i]; + + for (j = 0; j < pec->num_phbs; j++) { + if (index == pnv_phb4_pec_get_phb_id(pec, j)) { + return pec; + } + } + } + error_setg(errp, + "pnv-phb4 chip-id %d index %d didn't match any existing PEC", + chip_id, index); + + return NULL; +} + +/* + * Adds a PnvPHB to the chip. Returns the parent obj of the + * PHB which varies with each version (phb version 3 is parented + * by the chip, version 4 and 5 are parented by the PEC + * device). + * + * TODO: for version 3 we're still parenting the PHB with the + * chip. We should parent with a (so far not implemented) + * PHB3 PEC device. + */ +Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp) +{ + if (phb->version == 3) { + Pnv8Chip *chip8 = PNV8_CHIP(chip); + + phb->chip = chip; + + chip8->phbs[chip8->num_phbs] = phb; + chip8->num_phbs++; + + return OBJECT(chip); + } + + phb->pec = pnv_phb4_get_pec(chip, PNV_PHB4(phb->backend), errp); + + return OBJECT(phb->pec); +} + static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power8-xscom\0ibm,xscom"; @@ -380,9 +449,12 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off) cpu_to_be32(io_base), cpu_to_be32(8) }; + uint32_t irq; char *name; int node; + irq = object_property_get_uint(OBJECT(d), "irq", &error_fatal); + name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base); node = fdt_add_subnode(fdt, lpc_off, name); _FDT(node); @@ -394,7 +466,7 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off) _FDT((fdt_setprop_cell(fdt, node, "clock-frequency", 1843200))); _FDT((fdt_setprop_cell(fdt, node, "current-speed", 115200))); - _FDT((fdt_setprop_cell(fdt, node, "interrupts", d->isairq[0]))); + _FDT((fdt_setprop_cell(fdt, node, "interrupts", irq))); _FDT((fdt_setprop_cell(fdt, node, "interrupt-parent", fdt_get_phandle(fdt, lpc_off)))); @@ -522,7 +594,7 @@ static void *pnv_dt_create(MachineState *machine) buf = qemu_uuid_unparse_strdup(&qemu_uuid); _FDT((fdt_setprop_string(fdt, 0, "vm,uuid", buf))); if (qemu_uuid_set) { - _FDT((fdt_property_string(fdt, "system-id", buf))); + _FDT((fdt_setprop_string(fdt, 0, "system-id", buf))); } g_free(buf); @@ -571,13 +643,13 @@ static void pnv_powerdown_notify(Notifier *n, void *opaque) } } -static void pnv_reset(MachineState *machine) +static void pnv_reset(MachineState *machine, ShutdownCause reason) { PnvMachineState *pnv = PNV_MACHINE(machine); IPMIBmc *bmc; void *fdt; - qemu_devices_reset(); + qemu_devices_reset(reason); /* * The machine should provide by default an internal BMC simulator. @@ -606,30 +678,48 @@ static void pnv_reset(MachineState *machine) qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt)); - g_free(fdt); + /* + * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free + * the existing machine->fdt to avoid leaking it during + * a reset. + */ + g_free(machine->fdt); + machine->fdt = fdt; } static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp) { Pnv8Chip *chip8 = PNV8_CHIP(chip); + qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_EXTERNAL); + + qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq); return pnv_lpc_isa_create(&chip8->lpc, true, errp); } static ISABus *pnv_chip_power8nvl_isa_create(PnvChip *chip, Error **errp) { Pnv8Chip *chip8 = PNV8_CHIP(chip); + qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_LPC_I2C); + + qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq); return pnv_lpc_isa_create(&chip8->lpc, false, errp); } static ISABus *pnv_chip_power9_isa_create(PnvChip *chip, Error **errp) { Pnv9Chip *chip9 = PNV9_CHIP(chip); + qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPCHC); + + qdev_connect_gpio_out(DEVICE(&chip9->lpc), 0, irq); return pnv_lpc_isa_create(&chip9->lpc, false, errp); } static ISABus *pnv_chip_power10_isa_create(PnvChip *chip, Error **errp) { Pnv10Chip *chip10 = PNV10_CHIP(chip); + qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPCHC); + + qdev_connect_gpio_out(DEVICE(&chip10->lpc), 0, irq); return pnv_lpc_isa_create(&chip10->lpc, false, errp); } @@ -644,26 +734,39 @@ static void pnv_chip_power8_pic_print_info(PnvChip *chip, Monitor *mon) int i; ics_pic_print_info(&chip8->psi.ics, mon); - for (i = 0; i < chip->num_phbs; i++) { - pnv_phb3_msi_pic_print_info(&chip8->phbs[i].msis, mon); - ics_pic_print_info(&chip8->phbs[i].lsis, mon); + + for (i = 0; i < chip8->num_phbs; i++) { + PnvPHB *phb = chip8->phbs[i]; + PnvPHB3 *phb3 = PNV_PHB3(phb->backend); + + pnv_phb3_msi_pic_print_info(&phb3->msis, mon); + ics_pic_print_info(&phb3->lsis, mon); } } +static int pnv_chip_power9_pic_print_info_child(Object *child, void *opaque) +{ + Monitor *mon = opaque; + PnvPHB *phb = (PnvPHB *) object_dynamic_cast(child, TYPE_PNV_PHB); + + if (!phb) { + return 0; + } + + pnv_phb4_pic_print_info(PNV_PHB4(phb->backend), mon); + + return 0; +} + static void pnv_chip_power9_pic_print_info(PnvChip *chip, Monitor *mon) { Pnv9Chip *chip9 = PNV9_CHIP(chip); - int i, j; pnv_xive_pic_print_info(&chip9->xive, mon); pnv_psi_pic_print_info(&chip9->psi, mon); - for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { - PnvPhb4PecState *pec = &chip9->pecs[i]; - for (j = 0; j < pec->num_stacks; j++) { - pnv_phb4_pic_print_info(&pec->stacks[j].phb, mon); - } - } + object_child_foreach_recursive(OBJECT(chip), + pnv_chip_power9_pic_print_info_child, mon); } static uint64_t pnv_chip_power8_xscom_core_base(PnvChip *chip, @@ -691,7 +794,7 @@ static bool pnv_match_cpu(const char *default_type, const char *cpu_type) PowerPCCPUClass *ppc = POWERPC_CPU_CLASS(object_class_by_name(cpu_type)); - return ppc_default->pvr_match(ppc_default, ppc->pvr); + return ppc_default->pvr_match(ppc_default, ppc->pvr, false); } static void pnv_ipmi_bt_init(ISABus *bus, IPMIBmc *bmc, uint32_t irq) @@ -707,7 +810,30 @@ static void pnv_chip_power10_pic_print_info(PnvChip *chip, Monitor *mon) { Pnv10Chip *chip10 = PNV10_CHIP(chip); + pnv_xive2_pic_print_info(&chip10->xive, mon); pnv_psi_pic_print_info(&chip10->psi, mon); + + object_child_foreach_recursive(OBJECT(chip), + pnv_chip_power9_pic_print_info_child, mon); +} + +/* Always give the first 1GB to chip 0 else we won't boot */ +static uint64_t pnv_chip_get_ram_size(PnvMachineState *pnv, int chip_id) +{ + MachineState *machine = MACHINE(pnv); + uint64_t ram_per_chip; + + assert(machine->ram_size >= 1 * GiB); + + ram_per_chip = machine->ram_size / pnv->num_chips; + if (ram_per_chip >= 1 * GiB) { + return QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); + } + + assert(pnv->num_chips > 1); + + ram_per_chip = (machine->ram_size - 1 * GiB) / (pnv->num_chips - 1); + return chip_id == 0 ? 1 * GiB : QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); } static void pnv_init(MachineState *machine) @@ -717,11 +843,17 @@ static void pnv_init(MachineState *machine) MachineClass *mc = MACHINE_GET_CLASS(machine); char *fw_filename; long fw_size; + uint64_t chip_ram_start = 0; int i; char *chip_typename; DriveInfo *pnor = drive_get(IF_MTD, 0, 0); DeviceState *dev; + if (kvm_enabled()) { + error_report("The powernv machine does not work with KVM acceleration"); + exit(EXIT_FAILURE); + } + /* allocate RAM */ if (machine->ram_size < mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); @@ -809,9 +941,10 @@ static void pnv_init(MachineState *machine) * TODO: should we decide on how many chips we can create based * on #cores and Venice vs. Murano vs. Naples chip type etc..., */ - if (!is_power_of_2(pnv->num_chips) || pnv->num_chips > 4) { + if (!is_power_of_2(pnv->num_chips) || pnv->num_chips > 16) { error_report("invalid number of chips: '%d'", pnv->num_chips); - error_printf("Try '-smp sockets=N'. Valid values are : 1, 2 or 4.\n"); + error_printf( + "Try '-smp sockets=N'. Valid values are : 1, 2, 4, 8 and 16.\n"); exit(1); } @@ -819,22 +952,20 @@ static void pnv_init(MachineState *machine) for (i = 0; i < pnv->num_chips; i++) { char chip_name[32]; Object *chip = OBJECT(qdev_new(chip_typename)); + uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, i); pnv->chips[i] = PNV_CHIP(chip); - /* - * TODO: put all the memory in one node on chip 0 until we find a - * way to specify different ranges for each chip - */ - if (i == 0) { - object_property_set_int(chip, "ram-size", machine->ram_size, - &error_fatal); - } - - snprintf(chip_name, sizeof(chip_name), "chip[%d]", PNV_CHIP_HWID(i)); - object_property_add_child(OBJECT(pnv), chip_name, chip); - object_property_set_int(chip, "chip-id", PNV_CHIP_HWID(i), + /* Distribute RAM among the chips */ + object_property_set_int(chip, "ram-start", chip_ram_start, &error_fatal); + object_property_set_int(chip, "ram-size", chip_ram_size, + &error_fatal); + chip_ram_start += chip_ram_size; + + snprintf(chip_name, sizeof(chip_name), "chip[%d]", i); + object_property_add_child(OBJECT(pnv), chip_name, chip); + object_property_set_int(chip, "chip-id", i, &error_fatal); object_property_set_int(chip, "nr-cores", machine->smp.cores, &error_fatal); object_property_set_int(chip, "nr-threads", machine->smp.threads, @@ -1005,27 +1136,45 @@ static void pnv_chip_power9_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, static void pnv_chip_power10_intc_create(PnvChip *chip, PowerPCCPU *cpu, Error **errp) { + Pnv10Chip *chip10 = PNV10_CHIP(chip); + Error *local_err = NULL; + Object *obj; PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); - /* Will be defined when the interrupt controller is */ - pnv_cpu->intc = NULL; + /* + * The core creates its interrupt presenter but the XIVE2 interrupt + * controller object is initialized afterwards. Hopefully, it's + * only used at runtime. + */ + obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(&chip10->xive), + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pnv_cpu->intc = obj; } static void pnv_chip_power10_intc_reset(PnvChip *chip, PowerPCCPU *cpu) { - ; + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc)); } static void pnv_chip_power10_intc_destroy(PnvChip *chip, PowerPCCPU *cpu) { PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + xive_tctx_destroy(XIVE_TCTX(pnv_cpu->intc)); pnv_cpu->intc = NULL; } static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, Monitor *mon) { + xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), mon); } /* @@ -1060,7 +1209,6 @@ static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, static void pnv_chip_power8_instance_init(Object *obj) { - PnvChip *chip = PNV_CHIP(obj); Pnv8Chip *chip8 = PNV8_CHIP(obj); PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); int i; @@ -1078,14 +1226,24 @@ static void pnv_chip_power8_instance_init(Object *obj) object_initialize_child(obj, "homer", &chip8->homer, TYPE_PNV8_HOMER); - for (i = 0; i < pcc->num_phbs; i++) { - object_initialize_child(obj, "phb[*]", &chip8->phbs[i], TYPE_PNV_PHB3); + if (defaults_enabled()) { + chip8->num_phbs = pcc->num_phbs; + + for (i = 0; i < chip8->num_phbs; i++) { + Object *phb = object_new(TYPE_PNV_PHB); + + /* + * We need the chip to parent the PHB to allow the DT + * to build correctly (via pnv_xscom_dt()). + * + * TODO: the PHB should be parented by a PEC device that, at + * this moment, is not modelled powernv8/phb3. + */ + object_property_add_child(obj, "phb[*]", phb); + chip8->phbs[i] = PNV_PHB(phb); + } } - /* - * Number of PHBs is the chip default - */ - chip->num_phbs = pcc->num_phbs; } static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) @@ -1154,8 +1312,6 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) &PNV_PSI(psi8)->xscom_regs); /* Create LPC controller */ - object_property_set_link(OBJECT(&chip8->lpc), "psi", OBJECT(&chip8->psi), - &error_abort); qdev_realize(DEVICE(&chip8->lpc), NULL, &error_fatal); pnv_xscom_add_subregion(chip, PNV_XSCOM_LPC_BASE, &chip8->lpc.xscom_regs); @@ -1175,12 +1331,12 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) } /* Create the simplified OCC model */ - object_property_set_link(OBJECT(&chip8->occ), "psi", OBJECT(&chip8->psi), - &error_abort); if (!qdev_realize(DEVICE(&chip8->occ), NULL, errp)) { return; } pnv_xscom_add_subregion(chip, PNV_XSCOM_OCC_BASE, &chip8->occ.xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip8->occ), 0, + qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_OCC)); /* OCC SRAM model */ memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip), @@ -1199,28 +1355,18 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip), &chip8->homer.regs); - /* PHB3 controllers */ - for (i = 0; i < chip->num_phbs; i++) { - PnvPHB3 *phb = &chip8->phbs[i]; - PnvPBCQState *pbcq = &phb->pbcq; + /* PHB controllers */ + for (i = 0; i < chip8->num_phbs; i++) { + PnvPHB *phb = chip8->phbs[i]; object_property_set_int(OBJECT(phb), "index", i, &error_fatal); object_property_set_int(OBJECT(phb), "chip-id", chip->chip_id, &error_fatal); + object_property_set_link(OBJECT(phb), "chip", OBJECT(chip), + &error_fatal); if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { return; } - - /* Populate the XSCOM address space. */ - pnv_xscom_add_subregion(chip, - PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id, - &pbcq->xscom_nest_regs); - pnv_xscom_add_subregion(chip, - PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id, - &pbcq->xscom_pci_regs); - pnv_xscom_add_subregion(chip, - PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id, - &pbcq->xscom_spci_regs); } } @@ -1285,7 +1431,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ k->cores_mask = POWER8_CORE_MASK; - k->num_phbs = 3; + k->num_phbs = 4; k->core_pir = pnv_chip_core_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; @@ -1319,17 +1465,32 @@ static void pnv_chip_power9_instance_init(Object *obj) object_initialize_child(obj, "occ", &chip9->occ, TYPE_PNV9_OCC); + object_initialize_child(obj, "sbe", &chip9->sbe, TYPE_PNV9_SBE); + object_initialize_child(obj, "homer", &chip9->homer, TYPE_PNV9_HOMER); - for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { + /* Number of PECs is the chip default */ + chip->num_pecs = pcc->num_pecs; + + for (i = 0; i < chip->num_pecs; i++) { object_initialize_child(obj, "pec[*]", &chip9->pecs[i], TYPE_PNV_PHB4_PEC); } +} - /* - * Number of PHBs is the chip default - */ - chip->num_phbs = pcc->num_phbs; +static void pnv_chip_quad_realize_one(PnvChip *chip, PnvQuad *eq, + PnvCore *pnv_core) +{ + char eq_name[32]; + int core_id = CPU_CORE(pnv_core)->core_id; + + snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id); + object_initialize_child_with_props(OBJECT(chip), eq_name, eq, + sizeof(*eq), TYPE_PNV_QUAD, + &error_fatal, NULL); + + object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal); + qdev_realize(DEVICE(eq), NULL, &error_fatal); } static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) @@ -1341,48 +1502,31 @@ static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) chip9->quads = g_new0(PnvQuad, chip9->nr_quads); for (i = 0; i < chip9->nr_quads; i++) { - char eq_name[32]; PnvQuad *eq = &chip9->quads[i]; - PnvCore *pnv_core = chip->cores[i * 4]; - int core_id = CPU_CORE(pnv_core)->core_id; - snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id); - object_initialize_child_with_props(OBJECT(chip), eq_name, eq, - sizeof(*eq), TYPE_PNV_QUAD, - &error_fatal, NULL); + pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]); - object_property_set_int(OBJECT(eq), "id", core_id, &error_fatal); - qdev_realize(DEVICE(eq), NULL, &error_fatal); - - pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->id), + pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id), &eq->xscom_regs); } } -static void pnv_chip_power9_phb_realize(PnvChip *chip, Error **errp) +static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp) { Pnv9Chip *chip9 = PNV9_CHIP(chip); - int i, j; - int phb_id = 0; + int i; - for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { + for (i = 0; i < chip->num_pecs; i++) { PnvPhb4PecState *pec = &chip9->pecs[i]; PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); uint32_t pec_nest_base; uint32_t pec_pci_base; object_property_set_int(OBJECT(pec), "index", i, &error_fatal); - /* - * PEC0 -> 1 stack - * PEC1 -> 2 stacks - * PEC2 -> 3 stacks - */ - object_property_set_int(OBJECT(pec), "num-stacks", i + 1, - &error_fatal); object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id, &error_fatal); - object_property_set_link(OBJECT(pec), "system-memory", - OBJECT(get_system_memory()), &error_abort); + object_property_set_link(OBJECT(pec), "chip", OBJECT(chip), + &error_fatal); if (!qdev_realize(DEVICE(pec), NULL, errp)) { return; } @@ -1392,37 +1536,6 @@ static void pnv_chip_power9_phb_realize(PnvChip *chip, Error **errp) pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); - - for (j = 0; j < pec->num_stacks && phb_id < chip->num_phbs; - j++, phb_id++) { - PnvPhb4PecStack *stack = &pec->stacks[j]; - Object *obj = OBJECT(&stack->phb); - - object_property_set_int(obj, "index", phb_id, &error_fatal); - object_property_set_int(obj, "chip-id", chip->chip_id, - &error_fatal); - object_property_set_int(obj, "version", PNV_PHB4_VERSION, - &error_fatal); - object_property_set_int(obj, "device-id", PNV_PHB4_DEVICE_ID, - &error_fatal); - object_property_set_link(obj, "stack", OBJECT(stack), - &error_abort); - if (!sysbus_realize(SYS_BUS_DEVICE(obj), errp)) { - return; - } - - /* Populate the XSCOM address space. */ - pnv_xscom_add_subregion(chip, - pec_nest_base + 0x40 * (stack->stack_no + 1), - &stack->nest_regs_mr); - pnv_xscom_add_subregion(chip, - pec_pci_base + 0x40 * (stack->stack_no + 1), - &stack->pci_regs_mr); - pnv_xscom_add_subregion(chip, - pec_pci_base + PNV9_XSCOM_PEC_PCI_STK0 + - 0x40 * stack->stack_no, - &stack->phb_regs_mr); - } } } @@ -1474,6 +1587,9 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) /* Processor Service Interface (PSI) Host Bridge */ object_property_set_int(OBJECT(&chip9->psi), "bar", PNV9_PSIHB_BASE(chip), &error_fatal); + /* This is the only device with 4k ESB pages */ + object_property_set_int(OBJECT(&chip9->psi), "shift", XIVE_ESB_4K, + &error_fatal); if (!qdev_realize(DEVICE(&chip9->psi), NULL, errp)) { return; } @@ -1481,8 +1597,6 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) &PNV_PSI(psi9)->xscom_regs); /* LPC */ - object_property_set_link(OBJECT(&chip9->lpc), "psi", OBJECT(&chip9->psi), - &error_abort); if (!qdev_realize(DEVICE(&chip9->lpc), NULL, errp)) { return; } @@ -1494,17 +1608,28 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) (uint64_t) PNV9_LPCM_BASE(chip)); /* Create the simplified OCC model */ - object_property_set_link(OBJECT(&chip9->occ), "psi", OBJECT(&chip9->psi), - &error_abort); if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) { return; } pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in( + DEVICE(&chip9->psi), PSIHB9_IRQ_OCC)); /* OCC SRAM model */ memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip), &chip9->occ.sram_regs); + /* SBE */ + if (!qdev_realize(DEVICE(&chip9->sbe), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_SBE_CTRL_BASE, + &chip9->sbe.xscom_ctrl_regs); + pnv_xscom_add_subregion(chip, PNV9_XSCOM_SBE_MBOX_BASE, + &chip9->sbe.xscom_mbox_regs); + qdev_connect_gpio_out(DEVICE(&chip9->sbe), 0, qdev_get_gpio_in( + DEVICE(&chip9->psi), PSIHB9_IRQ_PSU)); + /* HOMER */ object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip), &error_abort); @@ -1518,8 +1643,8 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip), &chip9->homer.regs); - /* PHBs */ - pnv_chip_power9_phb_realize(chip, &local_err); + /* PEC PHBs */ + pnv_chip_power9_pec_realize(chip, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -1550,7 +1675,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) k->xscom_core_base = pnv_chip_power9_xscom_core_base; k->xscom_pcba = pnv_chip_power9_xscom_pcba; dc->desc = "PowerNV Chip POWER9"; - k->num_phbs = 6; + k->num_pecs = PNV9_CHIP_MAX_PEC; device_class_set_parent_realize(dc, pnv_chip_power9_realize, &k->parent_realize); @@ -1558,10 +1683,72 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) static void pnv_chip_power10_instance_init(Object *obj) { + PnvChip *chip = PNV_CHIP(obj); Pnv10Chip *chip10 = PNV10_CHIP(obj); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); + int i; + object_initialize_child(obj, "xive", &chip10->xive, TYPE_PNV_XIVE2); + object_property_add_alias(obj, "xive-fabric", OBJECT(&chip10->xive), + "xive-fabric"); object_initialize_child(obj, "psi", &chip10->psi, TYPE_PNV10_PSI); object_initialize_child(obj, "lpc", &chip10->lpc, TYPE_PNV10_LPC); + object_initialize_child(obj, "occ", &chip10->occ, TYPE_PNV10_OCC); + object_initialize_child(obj, "sbe", &chip10->sbe, TYPE_PNV10_SBE); + object_initialize_child(obj, "homer", &chip10->homer, TYPE_PNV10_HOMER); + + chip->num_pecs = pcc->num_pecs; + + for (i = 0; i < chip->num_pecs; i++) { + object_initialize_child(obj, "pec[*]", &chip10->pecs[i], + TYPE_PNV_PHB5_PEC); + } +} + +static void pnv_chip_power10_quad_realize(Pnv10Chip *chip10, Error **errp) +{ + PnvChip *chip = PNV_CHIP(chip10); + int i; + + chip10->nr_quads = DIV_ROUND_UP(chip->nr_cores, 4); + chip10->quads = g_new0(PnvQuad, chip10->nr_quads); + + for (i = 0; i < chip10->nr_quads; i++) { + PnvQuad *eq = &chip10->quads[i]; + + pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]); + + pnv_xscom_add_subregion(chip, PNV10_XSCOM_EQ_BASE(eq->quad_id), + &eq->xscom_regs); + } +} + +static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp) +{ + Pnv10Chip *chip10 = PNV10_CHIP(chip); + int i; + + for (i = 0; i < chip->num_pecs; i++) { + PnvPhb4PecState *pec = &chip10->pecs[i]; + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + uint32_t pec_nest_base; + uint32_t pec_pci_base; + + object_property_set_int(OBJECT(pec), "index", i, &error_fatal); + object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id, + &error_fatal); + object_property_set_link(OBJECT(pec), "chip", OBJECT(chip), + &error_fatal); + if (!qdev_realize(DEVICE(pec), NULL, errp)) { + return; + } + + pec_nest_base = pecc->xscom_nest_base(pec); + pec_pci_base = pecc->xscom_pci_base(pec); + + pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); + pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); + } } static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) @@ -1585,9 +1772,39 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) return; } + pnv_chip_power10_quad_realize(chip10, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* XIVE2 interrupt controller (POWER10) */ + object_property_set_int(OBJECT(&chip10->xive), "ic-bar", + PNV10_XIVE2_IC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip10->xive), "esb-bar", + PNV10_XIVE2_ESB_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip10->xive), "end-bar", + PNV10_XIVE2_END_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip10->xive), "nvpg-bar", + PNV10_XIVE2_NVPG_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip10->xive), "nvc-bar", + PNV10_XIVE2_NVC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip10->xive), "tm-bar", + PNV10_XIVE2_TM_BASE(chip), &error_fatal); + object_property_set_link(OBJECT(&chip10->xive), "chip", OBJECT(chip), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&chip10->xive), errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_XIVE2_BASE, + &chip10->xive.xscom_regs); + /* Processor Service Interface (PSI) Host Bridge */ object_property_set_int(OBJECT(&chip10->psi), "bar", PNV10_PSIHB_BASE(chip), &error_fatal); + /* PSI can now be configured to use 64k ESB pages on POWER10 */ + object_property_set_int(OBJECT(&chip10->psi), "shift", XIVE_ESB_64K, + &error_fatal); if (!qdev_realize(DEVICE(&chip10->psi), NULL, errp)) { return; } @@ -1595,8 +1812,6 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) &PNV_PSI(&chip10->psi)->xscom_regs); /* LPC */ - object_property_set_link(OBJECT(&chip10->lpc), "psi", - OBJECT(&chip10->psi), &error_abort); if (!qdev_realize(DEVICE(&chip10->lpc), NULL, errp)) { return; } @@ -1606,6 +1821,52 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) chip->fw_mr = &chip10->lpc.isa_fw; chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", (uint64_t) PNV10_LPCM_BASE(chip)); + + /* Create the simplified OCC model */ + if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_OCC_BASE, + &chip10->occ.xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip10->occ), 0, qdev_get_gpio_in( + DEVICE(&chip10->psi), PSIHB9_IRQ_OCC)); + + /* OCC SRAM model */ + memory_region_add_subregion(get_system_memory(), + PNV10_OCC_SENSOR_BASE(chip), + &chip10->occ.sram_regs); + + /* SBE */ + if (!qdev_realize(DEVICE(&chip10->sbe), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_SBE_CTRL_BASE, + &chip10->sbe.xscom_ctrl_regs); + pnv_xscom_add_subregion(chip, PNV10_XSCOM_SBE_MBOX_BASE, + &chip10->sbe.xscom_mbox_regs); + qdev_connect_gpio_out(DEVICE(&chip10->sbe), 0, qdev_get_gpio_in( + DEVICE(&chip10->psi), PSIHB9_IRQ_PSU)); + + /* HOMER */ + object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE, + &chip10->homer.pba_regs); + + /* Homer mmio region */ + memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip), + &chip10->homer.regs); + + /* PHBs */ + pnv_chip_power10_phb_realize(chip, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } } static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr) @@ -1632,6 +1893,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data) k->xscom_core_base = pnv_chip_power10_xscom_core_base; k->xscom_pcba = pnv_chip_power10_xscom_pcba; dc->desc = "PowerNV Chip POWER10"; + k->num_pecs = PNV10_CHIP_MAX_PEC; device_class_set_parent_realize(dc, pnv_chip_power10_realize, &k->parent_realize); @@ -1745,7 +2007,6 @@ static Property pnv_chip_properties[] = { DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1), DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0), DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1), - DEFINE_PROP_UINT32("num-phbs", PnvChip, num_phbs, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -1782,37 +2043,57 @@ static ICSState *pnv_ics_get(XICSFabric *xi, int irq) int i, j; for (i = 0; i < pnv->num_chips; i++) { - PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); if (ics_valid_irq(&chip8->psi.ics, irq)) { return &chip8->psi.ics; } - for (j = 0; j < chip->num_phbs; j++) { - if (ics_valid_irq(&chip8->phbs[j].lsis, irq)) { - return &chip8->phbs[j].lsis; + + for (j = 0; j < chip8->num_phbs; j++) { + PnvPHB *phb = chip8->phbs[j]; + PnvPHB3 *phb3 = PNV_PHB3(phb->backend); + + if (ics_valid_irq(&phb3->lsis, irq)) { + return &phb3->lsis; } - if (ics_valid_irq(ICS(&chip8->phbs[j].msis), irq)) { - return ICS(&chip8->phbs[j].msis); + + if (ics_valid_irq(ICS(&phb3->msis), irq)) { + return ICS(&phb3->msis); } } } return NULL; } +PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id) +{ + int i; + + for (i = 0; i < pnv->num_chips; i++) { + PnvChip *chip = pnv->chips[i]; + if (chip->chip_id == chip_id) { + return chip; + } + } + return NULL; +} + static void pnv_ics_resend(XICSFabric *xi) { PnvMachineState *pnv = PNV_MACHINE(xi); int i, j; for (i = 0; i < pnv->num_chips; i++) { - PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); ics_resend(&chip8->psi.ics); - for (j = 0; j < chip->num_phbs; j++) { - ics_resend(&chip8->phbs[j].lsis); - ics_resend(ICS(&chip8->phbs[j].msis)); + + for (j = 0; j < chip8->num_phbs; j++) { + PnvPHB *phb = chip8->phbs[j]; + PnvPHB3 *phb3 = PNV_PHB3(phb->backend); + + ics_resend(&phb3->lsis); + ics_resend(ICS(&phb3->msis)); } } } @@ -1873,6 +2154,35 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, return total_count; } +static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) +{ + PnvMachineState *pnv = PNV_MACHINE(xfb); + int total_count = 0; + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); + int count; + + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, match); + + if (count < 0) { + return count; + } + + total_count += count; + } + + return total_count; +} + static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1880,8 +2190,14 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); static const char compat[] = "qemu,powernv8\0qemu,powernv\0ibm,powernv"; + static GlobalProperty phb_compat[] = { + { TYPE_PNV_PHB, "version", "3" }, + { TYPE_PNV_PHB_ROOT_PORT, "version", "3" }, + }; + mc->desc = "IBM PowerNV (Non-Virtualized) POWER8"; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); + compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); xic->icp_get = pnv_icp_get; xic->ics_get = pnv_ics_get; @@ -1889,6 +2205,8 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) pmc->compat = compat; pmc->compat_size = sizeof(compat); + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) @@ -1898,8 +2216,15 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); static const char compat[] = "qemu,powernv9\0ibm,powernv"; + static GlobalProperty phb_compat[] = { + { TYPE_PNV_PHB, "version", "4" }, + { TYPE_PNV_PHB_ROOT_PORT, "version", "4" }, + }; + mc->desc = "IBM PowerNV (Non-Virtualized) POWER9"; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); + xfc->match_nvt = pnv_match_nvt; mc->alias = "powernv"; @@ -1907,20 +2232,33 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) pmc->compat = compat; pmc->compat_size = sizeof(compat); pmc->dt_power_mgt = pnv_dt_power_mgt; + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); static const char compat[] = "qemu,powernv10\0ibm,powernv"; + static GlobalProperty phb_compat[] = { + { TYPE_PNV_PHB, "version", "5" }, + { TYPE_PNV_PHB_ROOT_PORT, "version", "5" }, + }; + mc->desc = "IBM PowerNV (Non-Virtualized) POWER10"; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v1.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); + compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); pmc->compat = compat; pmc->compat_size = sizeof(compat); pmc->dt_power_mgt = pnv_dt_power_mgt; + + xfc->match_nvt = pnv10_xive_match_nvt; + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } static bool pnv_machine_get_hb(Object *obj, Error **errp) @@ -2032,6 +2370,10 @@ static const TypeInfo types[] = { .name = MACHINE_TYPE_NAME("powernv10"), .parent = TYPE_PNV_MACHINE, .class_init = pnv_machine_power10_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_FABRIC }, + { }, + }, }, { .name = MACHINE_TYPE_NAME("powernv9"), diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c index 75a22ce50b..99f1e8d7f9 100644 --- a/hw/ppc/pnv_bmc.c +++ b/hw/ppc/pnv_bmc.c @@ -17,7 +17,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "target/ppc/cpu.h" #include "qemu/log.h" diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 8c2a15a0fb..9ee79192dd 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -58,6 +58,7 @@ static void pnv_core_cpu_reset(PnvCore *pc, PowerPCCPU *cpu) env->msr |= MSR_HVB; /* Hypervisor mode */ env->spr[SPR_HRMOR] = pc->hrmor; hreg_compute_hflags(env); + ppc_maybe_interrupt(env); pcc->intc_reset(pc->chip, cpu); } @@ -347,7 +348,7 @@ static const TypeInfo pnv_core_infos[] = { DEFINE_PNV_CORE_TYPE(power8, "power8_v2.0"), DEFINE_PNV_CORE_TYPE(power8, "power8nvl_v1.0"), DEFINE_PNV_CORE_TYPE(power9, "power9_v2.0"), - DEFINE_PNV_CORE_TYPE(power10, "power10_v1.0"), + DEFINE_PNV_CORE_TYPE(power10, "power10_v2.0"), }; DEFINE_TYPES(pnv_core_infos) @@ -407,13 +408,13 @@ static void pnv_quad_realize(DeviceState *dev, Error **errp) PnvQuad *eq = PNV_QUAD(dev); char name[32]; - snprintf(name, sizeof(name), "xscom-quad.%d", eq->id); + snprintf(name, sizeof(name), "xscom-quad.%d", eq->quad_id); pnv_xscom_region_init(&eq->xscom_regs, OBJECT(dev), &pnv_quad_xscom_ops, eq, name, PNV9_XSCOM_EQ_SIZE); } static Property pnv_quad_properties[] = { - DEFINE_PROP_UINT32("id", PnvQuad, id, 0), + DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c index 9a262629b7..ea73919e54 100644 --- a/hw/ppc/pnv_homer.c +++ b/hw/ppc/pnv_homer.c @@ -332,6 +332,69 @@ static const TypeInfo pnv_homer_power9_type_info = { .class_init = pnv_homer_power9_class_init, }; +static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvHomer *homer = PNV_HOMER(opaque); + PnvChip *chip = homer->chip; + uint32_t reg = addr >> 3; + uint64_t val = 0; + + switch (reg) { + case PBA_BAR0: + val = PNV10_HOMER_BASE(chip); + break; + case PBA_BARMASK0: /* P10 homer region mask */ + val = (PNV10_HOMER_SIZE - 1) & 0x300000; + break; + case PBA_BAR2: /* P10 occ common area */ + val = PNV10_OCC_COMMON_AREA_BASE; + break; + case PBA_BARMASK2: /* P10 occ common area size */ + val = (PNV10_OCC_COMMON_AREA_SIZE - 1) & 0x700000; + break; + default: + qemu_log_mask(LOG_UNIMP, "PBA: read to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + return val; +} + +static void pnv_homer_power10_pba_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "PBA: write to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); +} + +static const MemoryRegionOps pnv_homer_power10_pba_ops = { + .read = pnv_homer_power10_pba_read, + .write = pnv_homer_power10_pba_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_homer_power10_class_init(ObjectClass *klass, void *data) +{ + PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + + homer->pba_size = PNV10_XSCOM_PBA_SIZE; + homer->pba_ops = &pnv_homer_power10_pba_ops; + homer->homer_size = PNV10_HOMER_SIZE; + homer->homer_ops = &pnv_power9_homer_ops; /* TODO */ + homer->core_max_base = PNV9_CORE_MAX_BASE; +} + +static const TypeInfo pnv_homer_power10_type_info = { + .name = TYPE_PNV10_HOMER, + .parent = TYPE_PNV_HOMER, + .instance_size = sizeof(PnvHomer), + .class_init = pnv_homer_power10_class_init, +}; + static void pnv_homer_realize(DeviceState *dev, Error **errp) { PnvHomer *homer = PNV_HOMER(dev); @@ -377,6 +440,7 @@ static void pnv_homer_register_types(void) type_register_static(&pnv_homer_type_info); type_register_static(&pnv_homer_power8_type_info); type_register_static(&pnv_homer_power9_type_info); + type_register_static(&pnv_homer_power10_type_info); } type_init(pnv_homer_register_types); diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c index bcbca3db97..ee890e7ab4 100644 --- a/hw/ppc/pnv_lpc.c +++ b/hw/ppc/pnv_lpc.c @@ -422,7 +422,6 @@ static const MemoryRegionOps pnv_lpc_mmio_ops = { static void pnv_lpc_eval_irqs(PnvLpcController *lpc) { bool lpc_to_opb_irq = false; - PnvLpcClass *plc = PNV_LPC_GET_CLASS(lpc); /* Update LPC controller to OPB line */ if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) { @@ -445,7 +444,7 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc) lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask; /* Reflect the interrupt */ - pnv_psi_irq_set(lpc->psi, plc->psi_irq, lpc->opb_irq_stat != 0); + qemu_set_irq(lpc->psi_irq, lpc->opb_irq_stat != 0); } static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size) @@ -637,8 +636,6 @@ static void pnv_lpc_power8_class_init(ObjectClass *klass, void *data) xdc->dt_xscom = pnv_lpc_dt_xscom; - plc->psi_irq = PSIHB_IRQ_LPC_I2C; - device_class_set_parent_realize(dc, pnv_lpc_power8_realize, &plc->parent_realize); } @@ -677,8 +674,6 @@ static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data) dc->desc = "PowerNV LPC Controller POWER9"; - plc->psi_irq = PSIHB9_IRQ_LPCHC; - device_class_set_parent_realize(dc, pnv_lpc_power9_realize, &plc->parent_realize); } @@ -706,8 +701,6 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) { PnvLpcController *lpc = PNV_LPC(dev); - assert(lpc->psi); - /* Reg inits */ lpc->lpc_hc_fw_rd_acc_size = LPC_HC_FW_RD_4B; @@ -746,12 +739,9 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) "lpc-hc", LPC_HC_REGS_OPB_SIZE); memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR, &lpc->lpc_hc_regs); -} -static Property pnv_lpc_properties[] = { - DEFINE_PROP_LINK("psi", PnvLpcController, psi, TYPE_PNV_PSI, PnvPsi *), - DEFINE_PROP_END_OF_LIST(), -}; + qdev_init_gpio_out(DEVICE(dev), &lpc->psi_irq, 1); +} static void pnv_lpc_class_init(ObjectClass *klass, void *data) { @@ -759,7 +749,6 @@ static void pnv_lpc_class_init(ObjectClass *klass, void *data) dc->realize = pnv_lpc_realize; dc->desc = "PowerNV LPC Controller"; - device_class_set_props(dc, pnv_lpc_properties); dc->user_creatable = false; } @@ -803,7 +792,7 @@ static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level) } if (pnv->cpld_irqstate != old_state) { - pnv_psi_irq_set(lpc->psi, PSIHB_IRQ_EXTERNAL, pnv->cpld_irqstate != 0); + qemu_set_irq(lpc->psi_irq, pnv->cpld_irqstate != 0); } } diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c index 5a716c256e..9fa6d91d31 100644 --- a/hw/ppc/pnv_occ.c +++ b/hw/ppc/pnv_occ.c @@ -21,6 +21,7 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" +#include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_xscom.h" @@ -51,13 +52,12 @@ static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val) { bool irq_state; - PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); val &= 0xffff000000000000ull; occ->occmisc = val; irq_state = !!(val >> 63); - pnv_psi_irq_set(occ->psi, poc->psi_irq, irq_state); + qemu_set_irq(occ->psi_irq, irq_state); } static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr, @@ -168,7 +168,6 @@ static void pnv_occ_power8_class_init(ObjectClass *klass, void *data) poc->xscom_size = PNV_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power8_xscom_ops; - poc->psi_irq = PSIHB_IRQ_OCC; } static const TypeInfo pnv_occ_power8_type_info = { @@ -236,10 +235,11 @@ static const MemoryRegionOps pnv_occ_power9_xscom_ops = { static void pnv_occ_power9_class_init(ObjectClass *klass, void *data) { PnvOCCClass *poc = PNV_OCC_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "PowerNV OCC Controller (POWER9)"; poc->xscom_size = PNV9_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power9_xscom_ops; - poc->psi_irq = PSIHB9_IRQ_OCC; } static const TypeInfo pnv_occ_power9_type_info = { @@ -249,13 +249,24 @@ static const TypeInfo pnv_occ_power9_type_info = { .class_init = pnv_occ_power9_class_init, }; +static void pnv_occ_power10_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV OCC Controller (POWER10)"; +} + +static const TypeInfo pnv_occ_power10_type_info = { + .name = TYPE_PNV10_OCC, + .parent = TYPE_PNV9_OCC, + .class_init = pnv_occ_power10_class_init, +}; + static void pnv_occ_realize(DeviceState *dev, Error **errp) { PnvOCC *occ = PNV_OCC(dev); PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); - assert(occ->psi); - occ->occmisc = 0; /* XScom region for OCC registers */ @@ -266,12 +277,9 @@ static void pnv_occ_realize(DeviceState *dev, Error **errp) memory_region_init_io(&occ->sram_regs, OBJECT(dev), &pnv_occ_sram_ops, occ, "occ-common-area", PNV_OCC_SENSOR_DATA_BLOCK_SIZE); -} -static Property pnv_occ_properties[] = { - DEFINE_PROP_LINK("psi", PnvOCC, psi, TYPE_PNV_PSI, PnvPsi *), - DEFINE_PROP_END_OF_LIST(), -}; + qdev_init_gpio_out(DEVICE(dev), &occ->psi_irq, 1); +} static void pnv_occ_class_init(ObjectClass *klass, void *data) { @@ -279,7 +287,6 @@ static void pnv_occ_class_init(ObjectClass *klass, void *data) dc->realize = pnv_occ_realize; dc->desc = "PowerNV OCC Controller"; - device_class_set_props(dc, pnv_occ_properties); dc->user_creatable = false; } @@ -297,6 +304,7 @@ static void pnv_occ_register_types(void) type_register_static(&pnv_occ_type_info); type_register_static(&pnv_occ_power8_type_info); type_register_static(&pnv_occ_power9_type_info); + type_register_static(&pnv_occ_power10_type_info); } type_init(pnv_occ_register_types); diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c index 5ef1cf2afb..6280408299 100644 --- a/hw/ppc/pnv_pnor.c +++ b/hw/ppc/pnv_pnor.c @@ -36,7 +36,7 @@ static void pnv_pnor_update(PnvPnor *s, int offset, int size) int offset_end; int ret; - if (s->blk) { + if (!s->blk || !blk_is_writable(s->blk)) { return; } @@ -44,8 +44,8 @@ static void pnv_pnor_update(PnvPnor *s, int offset, int size) offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); - ret = blk_pwrite(s->blk, offset, s->storage + offset, - offset_end - offset, 0); + ret = blk_pwrite(s->blk, offset, offset_end - offset, s->storage + offset, + 0); if (ret < 0) { error_report("Could not update PNOR offset=0x%" PRIx32" : %s", offset, strerror(-ret)); @@ -99,7 +99,7 @@ static void pnv_pnor_realize(DeviceState *dev, Error **errp) s->storage = blk_blockalign(s->blk, s->size); - if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) { + if (blk_pread(s->blk, 0, s->size, s->storage, 0) < 0) { error_setg(errp, "failed to read the initial flash content"); return; } diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c index cd9a2c5952..98045ed3d2 100644 --- a/hw/ppc/pnv_psi.c +++ b/hw/ppc/pnv_psi.c @@ -184,8 +184,7 @@ static void pnv_psi_set_irsn(PnvPsi *psi, uint64_t val) /* * FSP and PSI interrupts are muxed under the same number. */ -static const uint32_t xivr_regs[] = { - [PSIHB_IRQ_PSI] = PSIHB_XSCOM_XIVR_FSP, +static const uint32_t xivr_regs[PSI_NUM_INTERRUPTS] = { [PSIHB_IRQ_FSP] = PSIHB_XSCOM_XIVR_FSP, [PSIHB_IRQ_OCC] = PSIHB_XSCOM_XIVR_OCC, [PSIHB_IRQ_FSI] = PSIHB_XSCOM_XIVR_FSI, @@ -194,8 +193,7 @@ static const uint32_t xivr_regs[] = { [PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_XIVR_EXT, }; -static const uint32_t stat_regs[] = { - [PSIHB_IRQ_PSI] = PSIHB_XSCOM_CR, +static const uint32_t stat_regs[PSI_NUM_INTERRUPTS] = { [PSIHB_IRQ_FSP] = PSIHB_XSCOM_CR, [PSIHB_IRQ_OCC] = PSIHB_XSCOM_IRQ_STAT, [PSIHB_IRQ_FSI] = PSIHB_XSCOM_IRQ_STAT, @@ -204,8 +202,7 @@ static const uint32_t stat_regs[] = { [PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_IRQ_STAT, }; -static const uint64_t stat_bits[] = { - [PSIHB_IRQ_PSI] = PSIHB_CR_PSI_IRQ, +static const uint64_t stat_bits[PSI_NUM_INTERRUPTS] = { [PSIHB_IRQ_FSP] = PSIHB_CR_FSP_IRQ, [PSIHB_IRQ_OCC] = PSIHB_IRQ_STAT_OCC, [PSIHB_IRQ_FSI] = PSIHB_IRQ_STAT_FSI, @@ -214,23 +211,14 @@ static const uint64_t stat_bits[] = { [PSIHB_IRQ_EXTERNAL] = PSIHB_IRQ_STAT_EXT, }; -void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state) -{ - PNV_PSI_GET_CLASS(psi)->irq_set(psi, irq, state); -} - -static void pnv_psi_power8_irq_set(PnvPsi *psi, int irq, bool state) +static void pnv_psi_power8_set_irq(void *opaque, int irq, int state) { + PnvPsi *psi = opaque; uint32_t xivr_reg; uint32_t stat_reg; uint32_t src; bool masked; - if (irq > PSIHB_IRQ_EXTERNAL) { - qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq); - return; - } - xivr_reg = xivr_regs[irq]; stat_reg = stat_regs[irq]; @@ -515,6 +503,8 @@ static void pnv_psi_power8_realize(DeviceState *dev, Error **errp) ics_set_irq_type(ics, i, true); } + qdev_init_gpio_in(dev, pnv_psi_power8_set_irq, ics->nr_irqs); + psi->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); /* XSCOM region for PSI registers */ @@ -576,7 +566,6 @@ static void pnv_psi_power8_class_init(ObjectClass *klass, void *data) ppc->xscom_pcba = PNV_XSCOM_PSIHB_BASE; ppc->xscom_size = PNV_XSCOM_PSIHB_SIZE; ppc->bar_mask = PSIHB_BAR_MASK; - ppc->irq_set = pnv_psi_power8_irq_set; ppc->compat = compat; ppc->compat_size = sizeof(compat); } @@ -601,7 +590,6 @@ static const TypeInfo pnv_psi_power8_info = { #define PSIHB9_IRQ_METHOD PPC_BIT(0) #define PSIHB9_IRQ_RESET PPC_BIT(1) #define PSIHB9_ESB_CI_BASE 0x60 -#define PSIHB9_ESB_CI_64K PPC_BIT(1) #define PSIHB9_ESB_CI_ADDR_MASK PPC_BITMASK(8, 47) #define PSIHB9_ESB_CI_VALID PPC_BIT(63) #define PSIHB9_ESB_NOTIF_ADDR 0x68 @@ -646,7 +634,15 @@ static const TypeInfo pnv_psi_power8_info = { #define PSIHB9_IRQ_STAT_DIO PPC_BIT(12) #define PSIHB9_IRQ_STAT_PSU PPC_BIT(13) -static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno) +/* P10 register extensions */ + +#define PSIHB10_CR PSIHB9_CR +#define PSIHB10_CR_STORE_EOI PPC_BIT(12) + +#define PSIHB10_ESB_CI_BASE PSIHB9_ESB_CI_BASE +#define PSIHB10_ESB_CI_64K PPC_BIT(1) + +static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno, bool pq_checked) { PnvPsi *psi = PNV_PSI(xf); uint64_t notif_port = psi->regs[PSIHB_REG(PSIHB9_ESB_NOTIF_ADDR)]; @@ -655,9 +651,13 @@ static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno) uint32_t offset = (psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT); - uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; + uint64_t data = offset | srcno; MemTxResult result; + if (pq_checked) { + data |= XIVE_TRIGGER_PQ; + } + if (!valid) { return; } @@ -704,6 +704,13 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr, switch (addr) { case PSIHB9_CR: + if (val & PSIHB10_CR_STORE_EOI) { + psi9->source.esb_flags |= XIVE_SRC_STORE_EOI; + } else { + psi9->source.esb_flags &= ~XIVE_SRC_STORE_EOI; + } + break; + case PSIHB9_SEMR: /* FSP stuff */ break; @@ -715,15 +722,20 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr, break; case PSIHB9_ESB_CI_BASE: + if (val & PSIHB10_ESB_CI_64K) { + psi9->source.esb_shift = XIVE_ESB_64K; + } else { + psi9->source.esb_shift = XIVE_ESB_4K; + } if (!(val & PSIHB9_ESB_CI_VALID)) { if (psi->regs[reg] & PSIHB9_ESB_CI_VALID) { memory_region_del_subregion(sysmem, &psi9->source.esb_mmio); } } else { if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) { - memory_region_add_subregion(sysmem, - val & ~PSIHB9_ESB_CI_VALID, - &psi9->source.esb_mmio); + hwaddr addr = val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K); + memory_region_add_subregion(sysmem, addr, + &psi9->source.esb_mmio); } } psi->regs[reg] = val; @@ -791,15 +803,11 @@ static const MemoryRegionOps pnv_psi_p9_xscom_ops = { } }; -static void pnv_psi_power9_irq_set(PnvPsi *psi, int irq, bool state) +static void pnv_psi_power9_set_irq(void *opaque, int irq, int state) { + PnvPsi *psi = opaque; uint64_t irq_method = psi->regs[PSIHB_REG(PSIHB9_INTERRUPT_CONTROL)]; - if (irq > PSIHB9_NUM_IRQS) { - qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq); - return; - } - if (irq_method & PSIHB9_IRQ_METHOD) { qemu_log_mask(LOG_GUEST_ERROR, "PSI: LSI IRQ method no supported\n"); return; @@ -831,6 +839,7 @@ static void pnv_psi_power9_instance_init(Object *obj) Pnv9Psi *psi = PNV9_PSI(obj); object_initialize_child(obj, "source", &psi->source, TYPE_XIVE_SOURCE); + object_property_add_alias(obj, "shift", OBJECT(&psi->source), "shift"); } static void pnv_psi_power9_realize(DeviceState *dev, Error **errp) @@ -839,8 +848,6 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp) XiveSource *xsrc = &PNV9_PSI(psi)->source; int i; - /* This is the only device with 4k ESB pages */ - object_property_set_int(OBJECT(xsrc), "shift", XIVE_ESB_4K, &error_fatal); object_property_set_int(OBJECT(xsrc), "nr-irqs", PSIHB9_NUM_IRQS, &error_fatal); object_property_set_link(OBJECT(xsrc), "xive", OBJECT(psi), &error_abort); @@ -854,6 +861,8 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp) psi->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); + qdev_init_gpio_in(dev, pnv_psi_power9_set_irq, xsrc->nr_irqs); + /* XSCOM region for PSI registers */ pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_p9_xscom_ops, psi, "xscom-psi", PNV9_XSCOM_PSIHB_SIZE); @@ -879,7 +888,6 @@ static void pnv_psi_power9_class_init(ObjectClass *klass, void *data) ppc->xscom_pcba = PNV9_XSCOM_PSIHB_BASE; ppc->xscom_size = PNV9_XSCOM_PSIHB_SIZE; ppc->bar_mask = PSIHB9_BAR_MASK; - ppc->irq_set = pnv_psi_power9_irq_set; ppc->compat = compat; ppc->compat_size = sizeof(compat); diff --git a/hw/ppc/pnv_sbe.c b/hw/ppc/pnv_sbe.c new file mode 100644 index 0000000000..1c7812a135 --- /dev/null +++ b/hw/ppc/pnv_sbe.c @@ -0,0 +1,414 @@ +/* + * QEMU PowerPC PowerNV Emulation of some SBE behaviour + * + * Copyright (c) 2022, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "target/ppc/cpu.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_sbe.h" +#include "trace.h" + +/* + * Most register and command definitions come from skiboot. + * + * xscom addresses are adjusted to be relative to xscom subregion bases + */ + +/* + * SBE MBOX register address + * Reg 0 - 3 : Host to send command packets to SBE + * Reg 4 - 7 : SBE to send response packets to Host + */ +#define PSU_HOST_SBE_MBOX_REG0 0x00000000 +#define PSU_HOST_SBE_MBOX_REG1 0x00000001 +#define PSU_HOST_SBE_MBOX_REG2 0x00000002 +#define PSU_HOST_SBE_MBOX_REG3 0x00000003 +#define PSU_HOST_SBE_MBOX_REG4 0x00000004 +#define PSU_HOST_SBE_MBOX_REG5 0x00000005 +#define PSU_HOST_SBE_MBOX_REG6 0x00000006 +#define PSU_HOST_SBE_MBOX_REG7 0x00000007 +#define PSU_SBE_DOORBELL_REG_RW 0x00000010 +#define PSU_SBE_DOORBELL_REG_AND 0x00000011 +#define PSU_SBE_DOORBELL_REG_OR 0x00000012 +#define PSU_HOST_DOORBELL_REG_RW 0x00000013 +#define PSU_HOST_DOORBELL_REG_AND 0x00000014 +#define PSU_HOST_DOORBELL_REG_OR 0x00000015 + +/* + * Doorbell register to trigger SBE interrupt. Set by OPAL to inform + * the SBE about a waiting message in the Host/SBE mailbox registers + */ +#define HOST_SBE_MSG_WAITING PPC_BIT(0) + +/* + * Doorbell register for host bridge interrupt. Set by the SBE to inform + * host about a response message in the Host/SBE mailbox registers + */ +#define SBE_HOST_RESPONSE_WAITING PPC_BIT(0) +#define SBE_HOST_MSG_READ PPC_BIT(1) +#define SBE_HOST_STOP15_EXIT PPC_BIT(2) +#define SBE_HOST_RESET PPC_BIT(3) +#define SBE_HOST_PASSTHROUGH PPC_BIT(4) +#define SBE_HOST_TIMER_EXPIRY PPC_BIT(14) +#define SBE_HOST_RESPONSE_MASK (PPC_BITMASK(0, 4) | \ + SBE_HOST_TIMER_EXPIRY) + +/* SBE Control Register */ +#define SBE_CONTROL_REG_RW 0x00000000 + +/* SBE interrupt s0/s1 bits */ +#define SBE_CONTROL_REG_S0 PPC_BIT(14) +#define SBE_CONTROL_REG_S1 PPC_BIT(15) + +struct sbe_msg { + uint64_t reg[4]; +}; + +static uint64_t pnv_sbe_power9_xscom_ctrl_read(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t offset = addr >> 3; + uint64_t val = 0; + + switch (offset) { + default: + qemu_log_mask(LOG_UNIMP, "SBE Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + + trace_pnv_sbe_xscom_ctrl_read(addr, val); + + return val; +} + +static void pnv_sbe_power9_xscom_ctrl_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + uint32_t offset = addr >> 3; + + trace_pnv_sbe_xscom_ctrl_write(addr, val); + + switch (offset) { + default: + qemu_log_mask(LOG_UNIMP, "SBE Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } +} + +static const MemoryRegionOps pnv_sbe_power9_xscom_ctrl_ops = { + .read = pnv_sbe_power9_xscom_ctrl_read, + .write = pnv_sbe_power9_xscom_ctrl_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_sbe_set_host_doorbell(PnvSBE *sbe, uint64_t val) +{ + val &= SBE_HOST_RESPONSE_MASK; /* Is this right? What does HW do? */ + sbe->host_doorbell = val; + + trace_pnv_sbe_reg_set_host_doorbell(val); + qemu_set_irq(sbe->psi_irq, !!val); +} + +/* SBE Target Type */ +#define SBE_TARGET_TYPE_PROC 0x00 +#define SBE_TARGET_TYPE_EX 0x01 +#define SBE_TARGET_TYPE_PERV 0x02 +#define SBE_TARGET_TYPE_MCS 0x03 +#define SBE_TARGET_TYPE_EQ 0x04 +#define SBE_TARGET_TYPE_CORE 0x05 + +/* SBE MBOX command class */ +#define SBE_MCLASS_FIRST 0xD1 +#define SBE_MCLASS_CORE_STATE 0xD1 +#define SBE_MCLASS_SCOM 0xD2 +#define SBE_MCLASS_RING 0xD3 +#define SBE_MCLASS_TIMER 0xD4 +#define SBE_MCLASS_MPIPL 0xD5 +#define SBE_MCLASS_SECURITY 0xD6 +#define SBE_MCLASS_GENERIC 0xD7 +#define SBE_MCLASS_LAST 0xD7 + +/* + * Commands are provided in xxyy form where: + * - xx : command class + * - yy : command + * + * Both request and response message uses same seq ID, + * command class and command. + */ +#define SBE_CMD_CTRL_DEADMAN_LOOP 0xD101 +#define SBE_CMD_MULTI_SCOM 0xD201 +#define SBE_CMD_PUT_RING_FORM_IMAGE 0xD301 +#define SBE_CMD_CONTROL_TIMER 0xD401 +#define SBE_CMD_GET_ARCHITECTED_REG 0xD501 +#define SBE_CMD_CLR_ARCHITECTED_REG 0xD502 +#define SBE_CMD_SET_UNSEC_MEM_WINDOW 0xD601 +#define SBE_CMD_GET_SBE_FFDC 0xD701 +#define SBE_CMD_GET_CAPABILITY 0xD702 +#define SBE_CMD_READ_SBE_SEEPROM 0xD703 +#define SBE_CMD_SET_FFDC_ADDR 0xD704 +#define SBE_CMD_QUIESCE_SBE 0xD705 +#define SBE_CMD_SET_FABRIC_ID_MAP 0xD706 +#define SBE_CMD_STASH_MPIPL_CONFIG 0xD707 + +/* SBE MBOX control flags */ + +/* Generic flags */ +#define SBE_CMD_CTRL_RESP_REQ 0x0100 +#define SBE_CMD_CTRL_ACK_REQ 0x0200 + +/* Deadman loop */ +#define CTRL_DEADMAN_LOOP_START 0x0001 +#define CTRL_DEADMAN_LOOP_STOP 0x0002 + +/* Control timer */ +#define CONTROL_TIMER_START 0x0001 +#define CONTROL_TIMER_STOP 0x0002 + +/* Stash MPIPL config */ +#define SBE_STASH_KEY_SKIBOOT_BASE 0x03 + +static void sbe_timer(void *opaque) +{ + PnvSBE *sbe = opaque; + + trace_pnv_sbe_cmd_timer_expired(); + + pnv_sbe_set_host_doorbell(sbe, sbe->host_doorbell | SBE_HOST_TIMER_EXPIRY); +} + +static void do_sbe_msg(PnvSBE *sbe) +{ + struct sbe_msg msg; + uint16_t cmd, ctrl_flags, seq_id; + int i; + + memset(&msg, 0, sizeof(msg)); + + for (i = 0; i < 4; i++) { + msg.reg[i] = sbe->mbox[i]; + } + + cmd = msg.reg[0]; + seq_id = msg.reg[0] >> 16; + ctrl_flags = msg.reg[0] >> 32; + + trace_pnv_sbe_msg_recv(cmd, seq_id, ctrl_flags); + + if (ctrl_flags & SBE_CMD_CTRL_ACK_REQ) { + pnv_sbe_set_host_doorbell(sbe, sbe->host_doorbell | SBE_HOST_MSG_READ); + } + + switch (cmd) { + case SBE_CMD_CONTROL_TIMER: + if (ctrl_flags & CONTROL_TIMER_START) { + uint64_t us = msg.reg[1]; + trace_pnv_sbe_cmd_timer_start(us); + timer_mod(sbe->timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + us); + } + if (ctrl_flags & CONTROL_TIMER_STOP) { + trace_pnv_sbe_cmd_timer_stop(); + timer_del(sbe->timer); + } + break; + default: + qemu_log_mask(LOG_UNIMP, "SBE Unimplemented command: 0x%x\n", cmd); + } +} + +static void pnv_sbe_set_sbe_doorbell(PnvSBE *sbe, uint64_t val) +{ + val &= HOST_SBE_MSG_WAITING; + sbe->sbe_doorbell = val; + + if (val & HOST_SBE_MSG_WAITING) { + sbe->sbe_doorbell &= ~HOST_SBE_MSG_WAITING; + do_sbe_msg(sbe); + } +} + +static uint64_t pnv_sbe_power9_xscom_mbox_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvSBE *sbe = PNV_SBE(opaque); + uint32_t offset = addr >> 3; + uint64_t val = 0; + + if (offset <= PSU_HOST_SBE_MBOX_REG7) { + uint32_t idx = offset - PSU_HOST_SBE_MBOX_REG0; + val = sbe->mbox[idx]; + } else { + switch (offset) { + case PSU_SBE_DOORBELL_REG_RW: + val = sbe->sbe_doorbell; + break; + case PSU_HOST_DOORBELL_REG_RW: + val = sbe->host_doorbell; + break; + default: + qemu_log_mask(LOG_UNIMP, "SBE Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + } + + trace_pnv_sbe_xscom_mbox_read(addr, val); + + return val; +} + +static void pnv_sbe_power9_xscom_mbox_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvSBE *sbe = PNV_SBE(opaque); + uint32_t offset = addr >> 3; + + trace_pnv_sbe_xscom_mbox_write(addr, val); + + if (offset <= PSU_HOST_SBE_MBOX_REG7) { + uint32_t idx = offset - PSU_HOST_SBE_MBOX_REG0; + sbe->mbox[idx] = val; + } else { + switch (offset) { + case PSU_SBE_DOORBELL_REG_RW: + pnv_sbe_set_sbe_doorbell(sbe, val); + break; + case PSU_SBE_DOORBELL_REG_AND: + pnv_sbe_set_sbe_doorbell(sbe, sbe->sbe_doorbell & val); + break; + case PSU_SBE_DOORBELL_REG_OR: + pnv_sbe_set_sbe_doorbell(sbe, sbe->sbe_doorbell | val); + break; + + case PSU_HOST_DOORBELL_REG_RW: + pnv_sbe_set_host_doorbell(sbe, val); + break; + case PSU_HOST_DOORBELL_REG_AND: + pnv_sbe_set_host_doorbell(sbe, sbe->host_doorbell & val); + break; + case PSU_HOST_DOORBELL_REG_OR: + pnv_sbe_set_host_doorbell(sbe, sbe->host_doorbell | val); + break; + + default: + qemu_log_mask(LOG_UNIMP, "SBE Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + } +} + +static const MemoryRegionOps pnv_sbe_power9_xscom_mbox_ops = { + .read = pnv_sbe_power9_xscom_mbox_read, + .write = pnv_sbe_power9_xscom_mbox_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_sbe_power9_class_init(ObjectClass *klass, void *data) +{ + PnvSBEClass *psc = PNV_SBE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV SBE Controller (POWER9)"; + psc->xscom_ctrl_size = PNV9_XSCOM_SBE_CTRL_SIZE; + psc->xscom_ctrl_ops = &pnv_sbe_power9_xscom_ctrl_ops; + psc->xscom_mbox_size = PNV9_XSCOM_SBE_MBOX_SIZE; + psc->xscom_mbox_ops = &pnv_sbe_power9_xscom_mbox_ops; +} + +static const TypeInfo pnv_sbe_power9_type_info = { + .name = TYPE_PNV9_SBE, + .parent = TYPE_PNV_SBE, + .instance_size = sizeof(PnvSBE), + .class_init = pnv_sbe_power9_class_init, +}; + +static void pnv_sbe_power10_class_init(ObjectClass *klass, void *data) +{ + PnvSBEClass *psc = PNV_SBE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV SBE Controller (POWER10)"; + psc->xscom_ctrl_size = PNV10_XSCOM_SBE_CTRL_SIZE; + psc->xscom_ctrl_ops = &pnv_sbe_power9_xscom_ctrl_ops; + psc->xscom_mbox_size = PNV10_XSCOM_SBE_MBOX_SIZE; + psc->xscom_mbox_ops = &pnv_sbe_power9_xscom_mbox_ops; +} + +static const TypeInfo pnv_sbe_power10_type_info = { + .name = TYPE_PNV10_SBE, + .parent = TYPE_PNV9_SBE, + .class_init = pnv_sbe_power10_class_init, +}; + +static void pnv_sbe_realize(DeviceState *dev, Error **errp) +{ + PnvSBE *sbe = PNV_SBE(dev); + PnvSBEClass *psc = PNV_SBE_GET_CLASS(sbe); + + /* XScom regions for SBE registers */ + pnv_xscom_region_init(&sbe->xscom_ctrl_regs, OBJECT(dev), + psc->xscom_ctrl_ops, sbe, "xscom-sbe-ctrl", + psc->xscom_ctrl_size); + pnv_xscom_region_init(&sbe->xscom_mbox_regs, OBJECT(dev), + psc->xscom_mbox_ops, sbe, "xscom-sbe-mbox", + psc->xscom_mbox_size); + + qdev_init_gpio_out(DEVICE(dev), &sbe->psi_irq, 1); + + sbe->timer = timer_new_us(QEMU_CLOCK_VIRTUAL, sbe_timer, sbe); +} + +static void pnv_sbe_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_sbe_realize; + dc->desc = "PowerNV SBE Controller"; + dc->user_creatable = false; +} + +static const TypeInfo pnv_sbe_type_info = { + .name = TYPE_PNV_SBE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvSBE), + .class_init = pnv_sbe_class_init, + .class_size = sizeof(PnvSBEClass), + .abstract = true, +}; + +static void pnv_sbe_register_types(void) +{ + type_register_static(&pnv_sbe_type_info); + type_register_static(&pnv_sbe_power9_type_info); + type_register_static(&pnv_sbe_power10_type_info); +} + +type_init(pnv_sbe_register_types); diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index be7018e8ac..79f10de57f 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -284,11 +284,20 @@ int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, _FDT(xscom_offset); g_free(name); _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,chip-id", chip->chip_id))); + /* + * On P10, the xscom bus id has been deprecated and the chip id is + * calculated from the "Primary topology table index". See skiboot. + */ + _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,primary-topology-index", + chip->chip_id))); _FDT((fdt_setprop_cell(fdt, xscom_offset, "#address-cells", 1))); _FDT((fdt_setprop_cell(fdt, xscom_offset, "#size-cells", 1))); _FDT((fdt_setprop(fdt, xscom_offset, "reg", reg, sizeof(reg)))); _FDT((fdt_setprop(fdt, xscom_offset, "compatible", compat, compat_size))); _FDT((fdt_setprop(fdt, xscom_offset, "scom-controller", NULL, 0))); + if (chip->chip_id == 0) { + _FDT((fdt_setprop(fdt, xscom_offset, "primary", NULL, 0))); + } args.fdt = fdt; args.xscom_offset = xscom_offset; diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 7375bf4fa9..fbdc48911e 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -37,28 +37,11 @@ #include "migration/vmstate.h" #include "trace.h" -//#define PPC_DEBUG_IRQ -//#define PPC_DEBUG_TB - -#ifdef PPC_DEBUG_IRQ -# define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) -#else -# define LOG_IRQ(...) do { } while (0) -#endif - - -#ifdef PPC_DEBUG_TB -# define LOG_TB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_TB(...) do { } while (0) -#endif - static void cpu_ppc_tb_stop (CPUPPCState *env); static void cpu_ppc_tb_start (CPUPPCState *env); -void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) +void ppc_set_irq(PowerPCCPU *cpu, int irq, int level) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; unsigned int old_pending; bool locked = false; @@ -72,23 +55,18 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) old_pending = env->pending_interrupts; if (level) { - env->pending_interrupts |= 1 << n_IRQ; - cpu_interrupt(cs, CPU_INTERRUPT_HARD); + env->pending_interrupts |= irq; } else { - env->pending_interrupts &= ~(1 << n_IRQ); - if (env->pending_interrupts == 0) { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); - } + env->pending_interrupts &= ~irq; } if (old_pending != env->pending_interrupts) { - kvmppc_set_interrupt(cpu, n_IRQ, level); + ppc_maybe_interrupt(env); + kvmppc_set_interrupt(cpu, irq, level); } - - LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 - "req %08x\n", __func__, env, n_IRQ, level, - env->pending_interrupts, CPU(cpu)->interrupt_request); + trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts, + CPU(cpu)->interrupt_request); if (locked) { qemu_mutex_unlock_iothread(); @@ -102,8 +80,8 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -112,8 +90,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC6xx_INPUT_TBEN: /* Level sensitive - active high */ - LOG_IRQ("%s: %s the time base\n", - __func__, level ? "start" : "stop"); + trace_ppc_irq_set_state("time base", level); if (level) { cpu_ppc_tb_start(env); } else { @@ -122,14 +99,12 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) break; case PPC6xx_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC6xx_INPUT_SMI: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); break; case PPC6xx_INPUT_MCP: @@ -138,8 +113,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -148,26 +122,23 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) /* XXX: TODO: relay the signal to CKSTP_OUT pin */ /* XXX: Note that the only way to restart the CPU is to reset it */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } break; case PPC6xx_INPUT_HRESET: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: reset the CPU\n", __func__); + trace_ppc_irq_reset("CPU"); cpu_interrupt(cs, CPU_INTERRUPT_RESET); } break; case PPC6xx_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -178,10 +149,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) void ppc6xx_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, - PPC6xx_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB); } #if defined(TARGET_PPC64) @@ -192,8 +160,8 @@ static void ppc970_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -202,14 +170,12 @@ static void ppc970_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC970_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC970_INPUT_THINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, - level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); break; case PPC970_INPUT_MCP: @@ -218,8 +184,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -227,10 +192,10 @@ static void ppc970_set_irq(void *opaque, int pin, int level) /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } @@ -242,19 +207,15 @@ static void ppc970_set_irq(void *opaque, int pin, int level) } break; case PPC970_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; case PPC970_INPUT_TBEN: - LOG_IRQ("%s: set the TBEN state to %d\n", __func__, - level); + trace_ppc_irq_set_state("TBEN IRQ", level); /* XXX: TODO */ break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -265,10 +226,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) void ppc970_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, - PPC970_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB); } /* POWER7 internal IRQ controller */ @@ -276,29 +234,22 @@ static void power7_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER7_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } } void ppcPOWER7_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, - POWER7_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB); } /* POWER9 internal IRQ controller */ @@ -306,35 +257,28 @@ static void power9_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER9_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case POWER9_INPUT_HINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("HV external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + g_assert_not_reached(); return; } } void ppcPOWER9_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu, - POWER9_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB); } #endif /* defined(TARGET_PPC64) */ @@ -375,6 +319,8 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + switch ((val >> 28) & 0x3) { case 0x0: /* No action */ @@ -392,6 +338,8 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) ppc40x_system_reset(cpu); break; } + + qemu_mutex_unlock_iothread(); } /* PowerPC 40x internal IRQ controller */ @@ -401,8 +349,8 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -411,57 +359,51 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC40x_INPUT_RESET_SYS: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); ppc40x_system_reset(cpu); } break; case PPC40x_INPUT_RESET_CHIP: if (level) { - LOG_IRQ("%s: reset the PowerPC chip\n", __func__); + trace_ppc_irq_reset("chip"); ppc40x_chip_reset(cpu); } break; case PPC40x_INPUT_RESET_CORE: /* XXX: TODO: update DBSR[MRR] */ if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc40x_core_reset(cpu); } break; case PPC40x_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPC40x_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC40x_INPUT_HALT: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } break; case PPC40x_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -472,10 +414,7 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) void ppc40x_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, - cpu, PPC40x_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB); } /* PowerPC E500 internal IRQ controller */ @@ -485,47 +424,41 @@ static void ppce500_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { switch (pin) { case PPCE500_INPUT_MCK: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } break; case PPCE500_INPUT_RESET_CORE: if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); } break; case PPCE500_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPCE500_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the core IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("core IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPCE500_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -536,10 +469,7 @@ static void ppce500_set_irq(void *opaque, int pin, int level) void ppce500_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, - cpu, PPCE500_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB); } /* Enable or Disable the E500 EPR capability */ @@ -576,7 +506,7 @@ uint64_t cpu_ppc_load_tbl (CPUPPCState *env) } tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -587,7 +517,7 @@ static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -607,8 +537,7 @@ static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); - LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", - __func__, value, *tb_offsetp); + trace_ppc_tb_store(value, *tb_offsetp); } void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) @@ -644,7 +573,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -655,7 +584,7 @@ uint32_t cpu_ppc_load_atbu (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -774,7 +703,7 @@ static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) } else { decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); } - LOG_TB("%s: %016" PRIx64 "\n", __func__, decr); + trace_ppc_decr_load(decr); return decr; } @@ -833,7 +762,7 @@ uint64_t cpu_ppc_load_purr (CPUPPCState *env) static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) { /* Raise it */ - LOG_TB("raise decrementer exception\n"); + trace_ppc_decr_excp("raise"); ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); } @@ -847,7 +776,7 @@ static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* Raise it */ - LOG_TB("raise hv decrementer exception\n"); + trace_ppc_decr_excp("raise HV"); /* The architecture specifies that we don't deliver HDEC * interrupts in a PM state. Not only they don't cause a @@ -873,17 +802,16 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env = env->tb_env; uint64_t now, next; - bool negative; + int64_t signed_value; + int64_t signed_decr; /* Truncate value to decr_width and sign extend for simplicity */ - value &= ((1ULL << nr_bits) - 1); - negative = !!(value & (1ULL << (nr_bits - 1))); - if (negative) { - value |= (0xFFFFFFFFULL << nr_bits); - } + value = extract64(value, 0, nr_bits); + decr = extract64(decr, 0, nr_bits); + signed_value = sextract64(value, 0, nr_bits); + signed_decr = sextract64(decr, 0, nr_bits); - LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__, - decr, value); + trace_ppc_decr_store(nr_bits, decr, value); if (kvm_enabled()) { /* KVM handles decrementer exceptions, we don't need our own timer */ @@ -904,15 +832,15 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, * an edge interrupt, so raise it here too. */ if ((value < 3) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative - && !(decr & (1ULL << (nr_bits - 1))))) { + ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0 + && signed_decr >= 0)) { (*raise_excp)(cpu); return; } /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ - if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { + if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { (*lower_excp)(cpu); } @@ -1100,7 +1028,6 @@ const VMStateDescription vmstate_ppc_timebase = { .name = "timebase", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .pre_save = timebase_pre_save, .fields = (VMStateField []) { VMSTATE_UINT64(guest_timebase, PPCTimebase), @@ -1115,7 +1042,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) PowerPCCPU *cpu = env_archcpu(env); ppc_tb_t *tb_env; - tb_env = g_malloc0(sizeof(ppc_tb_t)); + tb_env = g_new0(ppc_tb_t, 1); env->tb_env = tb_env; tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; if (is_book3s_arch2x(env)) { @@ -1124,7 +1051,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) } /* Create new timer */ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); - if (env->has_hv_mode) { + if (env->has_hv_mode && !cpu->vhyp) { tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, cpu); } else { @@ -1135,25 +1062,32 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) return &cpu_ppc_set_tb_clk; } -/* Specific helpers for POWER & PowerPC 601 RTC */ -void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) +void cpu_ppc_tb_free(CPUPPCState *env) { - _cpu_ppc_store_tbu(env, value); + timer_free(env->tb_env->decr_timer); + timer_free(env->tb_env->hdecr_timer); + g_free(env->tb_env); } -uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) +/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */ +void cpu_ppc_hdecr_init(CPUPPCState *env) { - return _cpu_ppc_load_tbu(env); + PowerPCCPU *cpu = env_archcpu(env); + + assert(env->tb_env->hdecr_timer == NULL); + + env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &cpu_ppc_hdecr_cb, cpu); } -void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) +void cpu_ppc_hdecr_exit(CPUPPCState *env) { - cpu_ppc_store_tbl(env, value & 0x3FFFFF80); -} + PowerPCCPU *cpu = env_archcpu(env); -uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) -{ - return cpu_ppc_load_tbl(env) & 0x3FFFFF80; + timer_free(env->tb_env->hdecr_timer); + env->tb_env->hdecr_timer = NULL; + + cpu_ppc_hdecr_lower(cpu); } /*****************************************************************************/ @@ -1175,14 +1109,12 @@ struct ppc40x_timer_t { /* Fixed interval timer */ static void cpu_4xx_fit_cb (void *opaque) { - PowerPCCPU *cpu; - CPUPPCState *env; + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; uint64_t now, next; - env = opaque; - cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); @@ -1211,9 +1143,8 @@ static void cpu_4xx_fit_cb (void *opaque) if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); } - LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); } /* Programmable interval timer */ @@ -1227,11 +1158,10 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { /* Stop PIT */ - LOG_TB("%s: stop PIT\n", __func__); + trace_ppc4xx_pit_stop(); timer_del(tb_env->decr_timer); } else { - LOG_TB("%s: start PIT %016" PRIx64 "\n", - __func__, ppc40x_timer->pit_reload); + trace_ppc4xx_pit_start(ppc40x_timer->pit_reload); now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(ppc40x_timer->pit_reload, NANOSECONDS_PER_SECOND, tb_env->decr_freq); @@ -1246,13 +1176,11 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) static void cpu_4xx_pit_cb (void *opaque) { - PowerPCCPU *cpu; - CPUPPCState *env; + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; - env = opaque; - cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; env->spr[SPR_40x_TSR] |= 1 << 27; @@ -1260,9 +1188,7 @@ static void cpu_4xx_pit_cb (void *opaque) ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); } start_stop_pit(env, tb_env, 1); - LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " - "%016" PRIx64 "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), + trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], ppc40x_timer->pit_reload); @@ -1271,14 +1197,12 @@ static void cpu_4xx_pit_cb (void *opaque) /* Watchdog timer */ static void cpu_4xx_wdt_cb (void *opaque) { - PowerPCCPU *cpu; - CPUPPCState *env; + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; uint64_t now, next; - env = opaque; - cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); @@ -1302,8 +1226,7 @@ static void cpu_4xx_wdt_cb (void *opaque) next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); if (next == now) next++; - LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { case 0x0: case 0x1: @@ -1346,7 +1269,7 @@ void store_40x_pit (CPUPPCState *env, target_ulong val) tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; - LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); + trace_ppc40x_store_pit(val); ppc40x_timer->pit_reload = val; start_stop_pit(env, tb_env, 0); } @@ -1356,13 +1279,37 @@ target_ulong load_40x_pit (CPUPPCState *env) return cpu_ppc_load_decr(env); } +void store_40x_tsr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + + trace_ppc40x_store_tcr(val); + + env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000); + if (val & 0x80000000) { + ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0); + } +} + +void store_40x_tcr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env; + + trace_ppc40x_store_tsr(val); + + tb_env = env->tb_env; + env->spr[SPR_40x_TCR] = val & 0xFFC00000; + start_stop_pit(env, tb_env, 1); + cpu_4xx_wdt_cb(cpu); +} + static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) { CPUPPCState *env = opaque; ppc_tb_t *tb_env = env->tb_env; - LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, - freq); + trace_ppc40x_set_tb_clk(freq); tb_env->tb_freq = freq; tb_env->decr_freq = freq; /* XXX: we should also update all timers */ @@ -1373,24 +1320,26 @@ clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, { ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; + PowerPCCPU *cpu = env_archcpu(env); + + trace_ppc40x_timers_init(freq); + + tb_env = g_new0(ppc_tb_t, 1); + ppc40x_timer = g_new0(ppc40x_timer_t, 1); - tb_env = g_malloc0(sizeof(ppc_tb_t)); env->tb_env = tb_env; tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; - ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); tb_env->tb_freq = freq; tb_env->decr_freq = freq; tb_env->opaque = ppc40x_timer; - LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); - if (ppc40x_timer != NULL) { - /* We use decr timer for PIT */ - tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); - ppc40x_timer->fit_timer = - timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); - ppc40x_timer->wdt_timer = - timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); - ppc40x_timer->decr_excp = decr_excp; - } + + /* We use decr timer for PIT */ + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu); + ppc40x_timer->fit_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu); + ppc40x_timer->wdt_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu); + ppc40x_timer->decr_excp = decr_excp; return &ppc_40x_set_tb_clk; } @@ -1424,6 +1373,7 @@ int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) if (dcr->dcr_read == NULL) goto error; *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); + trace_ppc_dcr_read(dcrn, *valp); return 0; @@ -1443,6 +1393,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) dcr = &dcr_env->dcrn[dcrn]; if (dcr->dcr_write == NULL) goto error; + trace_ppc_dcr_write(dcrn, val); (*dcr->dcr_write)(dcr->opaque, dcrn, val); return 0; @@ -1482,7 +1433,7 @@ int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), { ppc_dcr_t *dcr_env; - dcr_env = g_malloc0(sizeof(ppc_dcr_t)); + dcr_env = g_new0(ppc_dcr_t, 1); dcr_env->read_error = read_error; dcr_env->write_error = write_error; env->dcr_env = dcr_env; diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h index c58f739886..9a4312691e 100644 --- a/hw/ppc/ppc405.h +++ b/hw/ppc/ppc405.h @@ -25,48 +25,162 @@ #ifndef PPC405_H #define PPC405_H +#include "qom/object.h" #include "hw/ppc/ppc4xx.h" +#include "hw/intc/ppc-uic.h" +#include "hw/i2c/ppc4xx_i2c.h" -/* Bootinfo as set-up by u-boot */ -typedef struct ppc4xx_bd_info_t ppc4xx_bd_info_t; -struct ppc4xx_bd_info_t { - uint32_t bi_memstart; - uint32_t bi_memsize; - uint32_t bi_flashstart; - uint32_t bi_flashsize; - uint32_t bi_flashoffset; /* 0x10 */ - uint32_t bi_sramstart; - uint32_t bi_sramsize; - uint32_t bi_bootflags; - uint32_t bi_ipaddr; /* 0x20 */ - uint8_t bi_enetaddr[6]; - uint16_t bi_ethspeed; - uint32_t bi_intfreq; - uint32_t bi_busfreq; /* 0x30 */ - uint32_t bi_baudrate; - uint8_t bi_s_version[4]; - uint8_t bi_r_version[32]; - uint32_t bi_procfreq; - uint32_t bi_plb_busfreq; - uint32_t bi_pci_busfreq; - uint8_t bi_pci_enetaddr[6]; - uint32_t bi_pci_enetaddr2[6]; - uint32_t bi_opbfreq; - uint32_t bi_iic_fast[2]; +/* PLB to OPB bridge */ +#define TYPE_PPC405_POB "ppc405-pob" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405PobState, PPC405_POB); +struct Ppc405PobState { + Ppc4xxDcrDeviceState parent_obj; + + uint32_t bear; + uint32_t besr0; + uint32_t besr1; }; -/* PowerPC 405 core */ -ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, - uint32_t flags); +/* OPB arbitrer */ +#define TYPE_PPC405_OPBA "ppc405-opba" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OpbaState, PPC405_OPBA); +struct Ppc405OpbaState { + SysBusDevice parent_obj; -void ppc4xx_plb_init(CPUPPCState *env); -void ppc405_ebc_init(CPUPPCState *env); + MemoryRegion io; + uint8_t cr; + uint8_t pr; +}; -CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem, - MemoryRegion ram_memories[2], - hwaddr ram_bases[2], - hwaddr ram_sizes[2], - uint32_t sysclk, DeviceState **uicdev, - int do_init); +/* DMA controller */ +#define TYPE_PPC405_DMA "ppc405-dma" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405DmaState, PPC405_DMA); +struct Ppc405DmaState { + Ppc4xxDcrDeviceState parent_obj; + + qemu_irq irqs[4]; + uint32_t cr[4]; + uint32_t ct[4]; + uint32_t da[4]; + uint32_t sa[4]; + uint32_t sg[4]; + uint32_t sr; + uint32_t sgc; + uint32_t slp; + uint32_t pol; +}; + +/* GPIO */ +#define TYPE_PPC405_GPIO "ppc405-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GpioState, PPC405_GPIO); +struct Ppc405GpioState { + SysBusDevice parent_obj; + + MemoryRegion io; + uint32_t or; + uint32_t tcr; + uint32_t osrh; + uint32_t osrl; + uint32_t tsrh; + uint32_t tsrl; + uint32_t odr; + uint32_t ir; + uint32_t rr1; + uint32_t isr1h; + uint32_t isr1l; +}; + +/* On Chip Memory */ +#define TYPE_PPC405_OCM "ppc405-ocm" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OcmState, PPC405_OCM); +struct Ppc405OcmState { + Ppc4xxDcrDeviceState parent_obj; + + MemoryRegion ram; + MemoryRegion isarc_ram; + MemoryRegion dsarc_ram; + uint32_t isarc; + uint32_t isacntl; + uint32_t dsarc; + uint32_t dsacntl; +}; + +/* General purpose timers */ +#define TYPE_PPC405_GPT "ppc405-gpt" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GptState, PPC405_GPT); +struct Ppc405GptState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + int64_t tb_offset; + uint32_t tb_freq; + QEMUTimer *timer; + qemu_irq irqs[5]; + uint32_t oe; + uint32_t ol; + uint32_t im; + uint32_t is; + uint32_t ie; + uint32_t comp[5]; + uint32_t mask[5]; +}; + +#define TYPE_PPC405_CPC "ppc405-cpc" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405CpcState, PPC405_CPC); + +enum { + PPC405EP_CPU_CLK = 0, + PPC405EP_PLB_CLK = 1, + PPC405EP_OPB_CLK = 2, + PPC405EP_EBC_CLK = 3, + PPC405EP_MAL_CLK = 4, + PPC405EP_PCI_CLK = 5, + PPC405EP_UART0_CLK = 6, + PPC405EP_UART1_CLK = 7, + PPC405EP_CLK_NB = 8, +}; + +struct Ppc405CpcState { + Ppc4xxDcrDeviceState parent_obj; + + uint32_t sysclk; + clk_setup_t clk_setup[PPC405EP_CLK_NB]; + uint32_t boot; + uint32_t epctl; + uint32_t pllmr[2]; + uint32_t ucr; + uint32_t srr; + uint32_t jtagid; + uint32_t pci; + /* Clock and power management */ + uint32_t er; + uint32_t fr; + uint32_t sr; +}; + +#define TYPE_PPC405_SOC "ppc405-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405SoCState, PPC405_SOC); + +struct Ppc405SoCState { + /* Private */ + DeviceState parent_obj; + + /* Public */ + PowerPCCPU cpu; + PPCUIC uic; + Ppc405CpcState cpc; + Ppc405GptState gpt; + Ppc405OcmState ocm; + Ppc405GpioState gpio; + Ppc405DmaState dma; + PPC4xxI2CState i2c; + Ppc4xxEbcState ebc; + Ppc405OpbaState opba; + Ppc405PobState pob; + Ppc4xxPlbState plb; + Ppc4xxMalState mal; + Ppc4xxSdramDdrState sdram; +}; #endif /* PPC405_H */ diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c index 972a7a4a3e..4092ebc1ab 100644 --- a/hw/ppc/ppc405_boards.c +++ b/hw/ppc/ppc405_boards.c @@ -25,7 +25,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "hw/ppc/ppc.h" @@ -41,18 +40,330 @@ #include "qemu/error-report.h" #include "hw/loader.h" #include "qemu/cutils.h" +#include "elf.h" #define BIOS_FILENAME "ppc405_rom.bin" #define BIOS_SIZE (2 * MiB) -#define KERNEL_LOAD_ADDR 0x00000000 +#define KERNEL_LOAD_ADDR 0x01000000 #define INITRD_LOAD_ADDR 0x01800000 +#define PPC405EP_SDRAM_BASE 0x00000000 +#define PPC405EP_SRAM_BASE 0xFFF00000 +#define PPC405EP_SRAM_SIZE (512 * KiB) + #define USE_FLASH_BIOS +#define TYPE_PPC405_MACHINE MACHINE_TYPE_NAME("ppc405") +OBJECT_DECLARE_SIMPLE_TYPE(Ppc405MachineState, PPC405_MACHINE); + +struct Ppc405MachineState { + /* Private */ + MachineState parent_obj; + /* Public */ + + Ppc405SoCState soc; +}; + +/* CPU reset handler when booting directly from a loaded kernel */ +static struct boot_info { + uint32_t entry; + uint32_t bdloc; + uint32_t initrd_base; + uint32_t initrd_size; + uint32_t cmdline_base; + uint32_t cmdline_size; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + struct boot_info *bi = env->load_info; + + cpu_reset(CPU(cpu)); + + /* stack: top of sram */ + env->gpr[1] = PPC405EP_SRAM_BASE + PPC405EP_SRAM_SIZE - 8; + + /* Tune our boot state */ + env->gpr[3] = bi->bdloc; + env->gpr[4] = bi->initrd_base; + env->gpr[5] = bi->initrd_base + bi->initrd_size; + env->gpr[6] = bi->cmdline_base; + env->gpr[7] = bi->cmdline_size; + + env->nip = bi->entry; +} + +/* Bootinfo as set-up by u-boot */ +typedef struct { + uint32_t bi_memstart; + uint32_t bi_memsize; + uint32_t bi_flashstart; + uint32_t bi_flashsize; + uint32_t bi_flashoffset; /* 0x10 */ + uint32_t bi_sramstart; + uint32_t bi_sramsize; + uint32_t bi_bootflags; + uint32_t bi_ipaddr; /* 0x20 */ + uint8_t bi_enetaddr[6]; + uint16_t bi_ethspeed; + uint32_t bi_intfreq; + uint32_t bi_busfreq; /* 0x30 */ + uint32_t bi_baudrate; + uint8_t bi_s_version[4]; + uint8_t bi_r_version[32]; + uint32_t bi_procfreq; + uint32_t bi_plb_busfreq; + uint32_t bi_pci_busfreq; + uint8_t bi_pci_enetaddr[6]; + uint8_t bi_pci_enetaddr2[6]; /* PPC405EP specific */ + uint32_t bi_opbfreq; + uint32_t bi_iic_fast[2]; +} ppc4xx_bd_info_t; + +static void ppc405_set_default_bootinfo(ppc4xx_bd_info_t *bd, + ram_addr_t ram_size) +{ + memset(bd, 0, sizeof(*bd)); + + bd->bi_memstart = PPC405EP_SDRAM_BASE; + bd->bi_memsize = ram_size; + bd->bi_sramstart = PPC405EP_SRAM_BASE; + bd->bi_sramsize = PPC405EP_SRAM_SIZE; + bd->bi_bootflags = 0; + bd->bi_intfreq = 133333333; + bd->bi_busfreq = 33333333; + bd->bi_baudrate = 115200; + bd->bi_s_version[0] = 'Q'; + bd->bi_s_version[1] = 'M'; + bd->bi_s_version[2] = 'U'; + bd->bi_s_version[3] = '\0'; + bd->bi_r_version[0] = 'Q'; + bd->bi_r_version[1] = 'E'; + bd->bi_r_version[2] = 'M'; + bd->bi_r_version[3] = 'U'; + bd->bi_r_version[4] = '\0'; + bd->bi_procfreq = 133333333; + bd->bi_plb_busfreq = 33333333; + bd->bi_pci_busfreq = 33333333; + bd->bi_opbfreq = 33333333; +} + +static ram_addr_t __ppc405_set_bootinfo(CPUPPCState *env, ppc4xx_bd_info_t *bd) +{ + CPUState *cs = env_cpu(env); + ram_addr_t bdloc; + int i, n; + + /* We put the bd structure at the top of memory */ + if (bd->bi_memsize >= 0x01000000UL) { + bdloc = 0x01000000UL - sizeof(ppc4xx_bd_info_t); + } else { + bdloc = bd->bi_memsize - sizeof(ppc4xx_bd_info_t); + } + stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart); + stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize); + stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart); + stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize); + stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset); + stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart); + stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize); + stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags); + stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr); + for (i = 0; i < 6; i++) { + stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]); + } + stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed); + stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq); + stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq); + stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate); + for (i = 0; i < 4; i++) { + stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]); + } + for (i = 0; i < 32; i++) { + stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]); + } + stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_procfreq); + stl_be_phys(cs->as, bdloc + 0x60, bd->bi_plb_busfreq); + stl_be_phys(cs->as, bdloc + 0x64, bd->bi_pci_busfreq); + for (i = 0; i < 6; i++) { + stb_phys(cs->as, bdloc + 0x68 + i, bd->bi_pci_enetaddr[i]); + } + n = 0x70; /* includes 2 bytes hole */ + for (i = 0; i < 6; i++) { + stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]); + } + stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq); + n += 4; + for (i = 0; i < 2; i++) { + stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]); + n += 4; + } + + return bdloc; +} + +static ram_addr_t ppc405_set_bootinfo(CPUPPCState *env, ram_addr_t ram_size) +{ + ppc4xx_bd_info_t bd; + + memset(&bd, 0, sizeof(bd)); + + ppc405_set_default_bootinfo(&bd, ram_size); + + return __ppc405_set_bootinfo(env, &bd); +} + +static void boot_from_kernel(MachineState *machine, PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + hwaddr boot_entry; + hwaddr kernel_base; + int kernel_size; + hwaddr initrd_base; + int initrd_size; + ram_addr_t bdloc; + int len; + + bdloc = ppc405_set_bootinfo(env, machine->ram_size); + boot_info.bdloc = bdloc; + + kernel_size = load_elf(machine->kernel_filename, NULL, NULL, NULL, + &boot_entry, &kernel_base, NULL, NULL, + 1, PPC_ELF_MACHINE, 0, 0); + if (kernel_size < 0) { + error_report("Could not load kernel '%s' : %s", + machine->kernel_filename, load_elf_strerror(kernel_size)); + exit(1); + } + boot_info.entry = boot_entry; + + /* load initrd */ + if (machine->initrd_filename) { + initrd_base = INITRD_LOAD_ADDR; + initrd_size = load_image_targphys(machine->initrd_filename, initrd_base, + machine->ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + + boot_info.initrd_base = initrd_base; + boot_info.initrd_size = initrd_size; + } + + if (machine->kernel_cmdline) { + len = strlen(machine->kernel_cmdline); + bdloc -= ((len + 255) & ~255); + cpu_physical_memory_write(bdloc, machine->kernel_cmdline, len + 1); + boot_info.cmdline_base = bdloc; + boot_info.cmdline_size = bdloc + len; + } + + /* Install our custom reset handler to start from Linux */ + qemu_register_reset(main_cpu_reset, cpu); + env->load_info = &boot_info; +} + +static void ppc405_init(MachineState *machine) +{ + Ppc405MachineState *ppc405 = PPC405_MACHINE(machine); + const char *kernel_filename = machine->kernel_filename; + MemoryRegion *sysmem = get_system_memory(); + + object_initialize_child(OBJECT(machine), "soc", &ppc405->soc, + TYPE_PPC405_SOC); + object_property_set_link(OBJECT(&ppc405->soc), "dram", + OBJECT(machine->ram), &error_abort); + object_property_set_uint(OBJECT(&ppc405->soc), "sys-clk", 33333333, + &error_abort); + qdev_realize(DEVICE(&ppc405->soc), NULL, &error_fatal); + + /* allocate and load BIOS */ + if (machine->firmware) { + MemoryRegion *bios = g_new(MemoryRegion, 1); + g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware); + long bios_size; + + memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE, + &error_fatal); + + if (!filename) { + error_report("Could not find firmware '%s'", machine->firmware); + exit(1); + } + + bios_size = load_image_size(filename, + memory_region_get_ram_ptr(bios), + BIOS_SIZE); + if (bios_size < 0) { + error_report("Could not load PowerPC BIOS '%s'", machine->firmware); + exit(1); + } + + bios_size = (bios_size + 0xfff) & ~0xfff; + memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); + } + + /* Load kernel and initrd using U-Boot images */ + if (kernel_filename && machine->firmware) { + target_ulong kernel_base, initrd_base; + long kernel_size, initrd_size; + + kernel_base = KERNEL_LOAD_ADDR; + kernel_size = load_image_targphys(kernel_filename, kernel_base, + machine->ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + + /* load initrd */ + if (machine->initrd_filename) { + initrd_base = INITRD_LOAD_ADDR; + initrd_size = load_image_targphys(machine->initrd_filename, + initrd_base, + machine->ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + } + + /* Load ELF kernel and rootfs.cpio */ + } else if (kernel_filename && !machine->firmware) { + ppc4xx_sdram_ddr_enable(&ppc405->soc.sdram); + boot_from_kernel(machine, &ppc405->soc.cpu); + } +} + +static void ppc405_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "PPC405 generic machine"; + mc->init = ppc405_init; + mc->default_ram_size = 128 * MiB; + mc->default_ram_id = "ppc405.ram"; +} + +static const TypeInfo ppc405_machine_type = { + .name = TYPE_PPC405_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(Ppc405MachineState), + .class_init = ppc405_machine_class_init, + .abstract = true, +}; + /*****************************************************************************/ /* PPC405EP reference board (IBM) */ -/* Standalone board with: +/* + * Standalone board with: * - PowerPC 405EP CPU * - SDRAM (0x00000000) * - Flash (0xFFF80000) @@ -60,18 +371,27 @@ * - NVRAM (0xF0000000) * - FPGA (0xF0300000) */ -typedef struct ref405ep_fpga_t ref405ep_fpga_t; -struct ref405ep_fpga_t { + +#define PPC405EP_NVRAM_BASE 0xF0000000 +#define PPC405EP_FPGA_BASE 0xF0300000 +#define PPC405EP_FLASH_BASE 0xFFF80000 + +#define TYPE_REF405EP_FPGA "ref405ep-fpga" +OBJECT_DECLARE_SIMPLE_TYPE(Ref405epFpgaState, REF405EP_FPGA); +struct Ref405epFpgaState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint8_t reg0; uint8_t reg1; }; static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size) { - ref405ep_fpga_t *fpga; + Ref405epFpgaState *fpga = opaque; uint32_t ret; - fpga = opaque; switch (addr) { case 0x0: ret = fpga->reg0; @@ -90,9 +410,8 @@ static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size) static void ref405ep_fpga_writeb(void *opaque, hwaddr addr, uint64_t value, unsigned size) { - ref405ep_fpga_t *fpga; + Ref405epFpgaState *fpga = opaque; - fpga = opaque; switch (addr) { case 0x0: /* Read only */ @@ -115,198 +434,65 @@ static const MemoryRegionOps ref405ep_fpga_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void ref405ep_fpga_reset (void *opaque) +static void ref405ep_fpga_reset(DeviceState *dev) { - ref405ep_fpga_t *fpga; + Ref405epFpgaState *fpga = REF405EP_FPGA(dev); - fpga = opaque; fpga->reg0 = 0x00; fpga->reg1 = 0x0F; } -static void ref405ep_fpga_init(MemoryRegion *sysmem, uint32_t base) +static void ref405ep_fpga_realize(DeviceState *dev, Error **errp) { - ref405ep_fpga_t *fpga; - MemoryRegion *fpga_memory = g_new(MemoryRegion, 1); + Ref405epFpgaState *s = REF405EP_FPGA(dev); - fpga = g_malloc0(sizeof(ref405ep_fpga_t)); - memory_region_init_io(fpga_memory, NULL, &ref405ep_fpga_ops, fpga, + memory_region_init_io(&s->iomem, OBJECT(s), &ref405ep_fpga_ops, s, "fpga", 0x00000100); - memory_region_add_subregion(sysmem, base, fpga_memory); - qemu_register_reset(&ref405ep_fpga_reset, fpga); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); } +static void ref405ep_fpga_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ref405ep_fpga_realize; + dc->reset = ref405ep_fpga_reset; + /* Reason: only works as part of a ppc405 board */ + dc->user_creatable = false; +} + +static const TypeInfo ref405ep_fpga_type = { + .name = TYPE_REF405EP_FPGA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Ref405epFpgaState), + .class_init = ref405ep_fpga_class_init, +}; + static void ref405ep_init(MachineState *machine) { - MachineClass *mc = MACHINE_GET_CLASS(machine); - const char *bios_name = machine->firmware ?: BIOS_FILENAME; - const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; - const char *initrd_filename = machine->initrd_filename; - char *filename; - ppc4xx_bd_info_t bd; - CPUPPCState *env; DeviceState *dev; SysBusDevice *s; - MemoryRegion *bios; MemoryRegion *sram = g_new(MemoryRegion, 1); - ram_addr_t bdloc; - MemoryRegion *ram_memories = g_new(MemoryRegion, 2); - hwaddr ram_bases[2], ram_sizes[2]; - target_ulong sram_size; - long bios_size; - //int phy_addr = 0; - //static int phy_addr = 1; - target_ulong kernel_base, initrd_base; - long kernel_size, initrd_size; - int linux_boot; - int len; - DriveInfo *dinfo; - MemoryRegion *sysmem = get_system_memory(); - DeviceState *uicdev; - if (machine->ram_size != mc->default_ram_size) { - char *sz = size_to_str(mc->default_ram_size); - error_report("Invalid RAM size, should be %s", sz); - g_free(sz); - exit(EXIT_FAILURE); - } + ppc405_init(machine); - /* XXX: fix this */ - memory_region_init_alias(&ram_memories[0], NULL, "ef405ep.ram.alias", - machine->ram, 0, machine->ram_size); - ram_bases[0] = 0; - ram_sizes[0] = machine->ram_size; - memory_region_init(&ram_memories[1], NULL, "ef405ep.ram1", 0); - ram_bases[1] = 0x00000000; - ram_sizes[1] = 0x00000000; - env = ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes, - 33333333, &uicdev, kernel_filename == NULL ? 0 : 1); /* allocate SRAM */ - sram_size = 512 * KiB; - memory_region_init_ram(sram, NULL, "ef405ep.sram", sram_size, + memory_region_init_ram(sram, NULL, "ref405ep.sram", PPC405EP_SRAM_SIZE, &error_fatal); - memory_region_add_subregion(sysmem, 0xFFF00000, sram); - /* allocate and load BIOS */ -#ifdef USE_FLASH_BIOS - dinfo = drive_get(IF_PFLASH, 0, 0); - if (dinfo) { - bios_size = 8 * MiB; - pflash_cfi02_register((uint32_t)(-bios_size), - "ef405ep.bios", bios_size, - blk_by_legacy_dinfo(dinfo), - 64 * KiB, 1, - 2, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, - 1); - } else -#endif - { - bios = g_new(MemoryRegion, 1); - memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE, - &error_fatal); + memory_region_add_subregion(get_system_memory(), PPC405EP_SRAM_BASE, sram); - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); - if (filename) { - bios_size = load_image_size(filename, - memory_region_get_ram_ptr(bios), - BIOS_SIZE); - g_free(filename); - if (bios_size < 0) { - error_report("Could not load PowerPC BIOS '%s'", bios_name); - exit(1); - } - bios_size = (bios_size + 0xfff) & ~0xfff; - memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); - } else if (!qtest_enabled() || kernel_filename != NULL) { - error_report("Could not load PowerPC BIOS '%s'", bios_name); - exit(1); - } else { - /* Avoid an uninitialized variable warning */ - bios_size = -1; - } - } /* Register FPGA */ - ref405ep_fpga_init(sysmem, 0xF0300000); + dev = qdev_new(TYPE_REF405EP_FPGA); + object_property_add_child(OBJECT(machine), "fpga", OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, PPC405EP_FPGA_BASE); + /* Register NVRAM */ dev = qdev_new("sysbus-m48t08"); qdev_prop_set_int32(dev, "base-year", 1968); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, 0xF0000000); - /* Load kernel */ - linux_boot = (kernel_filename != NULL); - if (linux_boot) { - memset(&bd, 0, sizeof(bd)); - bd.bi_memstart = 0x00000000; - bd.bi_memsize = machine->ram_size; - bd.bi_flashstart = -bios_size; - bd.bi_flashsize = -bios_size; - bd.bi_flashoffset = 0; - bd.bi_sramstart = 0xFFF00000; - bd.bi_sramsize = sram_size; - bd.bi_bootflags = 0; - bd.bi_intfreq = 133333333; - bd.bi_busfreq = 33333333; - bd.bi_baudrate = 115200; - bd.bi_s_version[0] = 'Q'; - bd.bi_s_version[1] = 'M'; - bd.bi_s_version[2] = 'U'; - bd.bi_s_version[3] = '\0'; - bd.bi_r_version[0] = 'Q'; - bd.bi_r_version[1] = 'E'; - bd.bi_r_version[2] = 'M'; - bd.bi_r_version[3] = 'U'; - bd.bi_r_version[4] = '\0'; - bd.bi_procfreq = 133333333; - bd.bi_plb_busfreq = 33333333; - bd.bi_pci_busfreq = 33333333; - bd.bi_opbfreq = 33333333; - bdloc = ppc405_set_bootinfo(env, &bd, 0x00000001); - env->gpr[3] = bdloc; - kernel_base = KERNEL_LOAD_ADDR; - /* now we can load the kernel */ - kernel_size = load_image_targphys(kernel_filename, kernel_base, - machine->ram_size - kernel_base); - if (kernel_size < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - printf("Load kernel size %ld at " TARGET_FMT_lx, - kernel_size, kernel_base); - /* load initrd */ - if (initrd_filename) { - initrd_base = INITRD_LOAD_ADDR; - initrd_size = load_image_targphys(initrd_filename, initrd_base, - machine->ram_size - initrd_base); - if (initrd_size < 0) { - error_report("could not load initial ram disk '%s'", - initrd_filename); - exit(1); - } - } else { - initrd_base = 0; - initrd_size = 0; - } - env->gpr[4] = initrd_base; - env->gpr[5] = initrd_size; - if (kernel_cmdline != NULL) { - len = strlen(kernel_cmdline); - bdloc -= ((len + 255) & ~255); - cpu_physical_memory_write(bdloc, kernel_cmdline, len + 1); - env->gpr[6] = bdloc; - env->gpr[7] = bdloc + len; - } else { - env->gpr[6] = 0; - env->gpr[7] = 0; - } - env->nip = KERNEL_LOAD_ADDR; - } else { - kernel_base = 0; - kernel_size = 0; - initrd_base = 0; - initrd_size = 0; - bdloc = 0; - } + sysbus_mmio_map(s, 0, PPC405EP_NVRAM_BASE); } static void ref405ep_class_init(ObjectClass *oc, void *data) @@ -315,250 +501,19 @@ static void ref405ep_class_init(ObjectClass *oc, void *data) mc->desc = "ref405ep"; mc->init = ref405ep_init; - mc->default_ram_size = 0x08000000; - mc->default_ram_id = "ef405ep.ram"; } static const TypeInfo ref405ep_type = { .name = MACHINE_TYPE_NAME("ref405ep"), - .parent = TYPE_MACHINE, + .parent = TYPE_PPC405_MACHINE, .class_init = ref405ep_class_init, }; -/*****************************************************************************/ -/* AMCC Taihu evaluation board */ -/* - PowerPC 405EP processor - * - SDRAM 128 MB at 0x00000000 - * - Boot flash 2 MB at 0xFFE00000 - * - Application flash 32 MB at 0xFC000000 - * - 2 serial ports - * - 2 ethernet PHY - * - 1 USB 1.1 device 0x50000000 - * - 1 LCD display 0x50100000 - * - 1 CPLD 0x50100000 - * - 1 I2C EEPROM - * - 1 I2C thermal sensor - * - a set of LEDs - * - bit-bang SPI port using GPIOs - * - 1 EBC interface connector 0 0x50200000 - * - 1 cardbus controller + expansion slot. - * - 1 PCI expansion slot. - */ -typedef struct taihu_cpld_t taihu_cpld_t; -struct taihu_cpld_t { - uint8_t reg0; - uint8_t reg1; -}; - -static uint64_t taihu_cpld_read(void *opaque, hwaddr addr, unsigned size) -{ - taihu_cpld_t *cpld; - uint32_t ret; - - cpld = opaque; - switch (addr) { - case 0x0: - ret = cpld->reg0; - break; - case 0x1: - ret = cpld->reg1; - break; - default: - ret = 0; - break; - } - - return ret; -} - -static void taihu_cpld_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - taihu_cpld_t *cpld; - - cpld = opaque; - switch (addr) { - case 0x0: - /* Read only */ - break; - case 0x1: - cpld->reg1 = value; - break; - default: - break; - } -} - -static const MemoryRegionOps taihu_cpld_ops = { - .read = taihu_cpld_read, - .write = taihu_cpld_write, - .impl = { - .min_access_size = 1, - .max_access_size = 1, - }, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void taihu_cpld_reset (void *opaque) -{ - taihu_cpld_t *cpld; - - cpld = opaque; - cpld->reg0 = 0x01; - cpld->reg1 = 0x80; -} - -static void taihu_cpld_init(MemoryRegion *sysmem, uint32_t base) -{ - taihu_cpld_t *cpld; - MemoryRegion *cpld_memory = g_new(MemoryRegion, 1); - - cpld = g_malloc0(sizeof(taihu_cpld_t)); - memory_region_init_io(cpld_memory, NULL, &taihu_cpld_ops, cpld, "cpld", 0x100); - memory_region_add_subregion(sysmem, base, cpld_memory); - qemu_register_reset(&taihu_cpld_reset, cpld); -} - -static void taihu_405ep_init(MachineState *machine) -{ - MachineClass *mc = MACHINE_GET_CLASS(machine); - const char *bios_name = machine->firmware ?: BIOS_FILENAME; - const char *kernel_filename = machine->kernel_filename; - const char *initrd_filename = machine->initrd_filename; - char *filename; - MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *bios; - MemoryRegion *ram_memories = g_new(MemoryRegion, 2); - hwaddr ram_bases[2], ram_sizes[2]; - long bios_size; - target_ulong kernel_base, initrd_base; - long kernel_size, initrd_size; - int linux_boot; - int fl_idx; - DriveInfo *dinfo; - DeviceState *uicdev; - - if (machine->ram_size != mc->default_ram_size) { - char *sz = size_to_str(mc->default_ram_size); - error_report("Invalid RAM size, should be %s", sz); - g_free(sz); - exit(EXIT_FAILURE); - } - - ram_bases[0] = 0; - ram_sizes[0] = 0x04000000; - memory_region_init_alias(&ram_memories[0], NULL, - "taihu_405ep.ram-0", machine->ram, ram_bases[0], - ram_sizes[0]); - ram_bases[1] = 0x04000000; - ram_sizes[1] = 0x04000000; - memory_region_init_alias(&ram_memories[1], NULL, - "taihu_405ep.ram-1", machine->ram, ram_bases[1], - ram_sizes[1]); - ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes, - 33333333, &uicdev, kernel_filename == NULL ? 0 : 1); - /* allocate and load BIOS */ - fl_idx = 0; -#if defined(USE_FLASH_BIOS) - dinfo = drive_get(IF_PFLASH, 0, fl_idx); - if (dinfo) { - bios_size = 2 * MiB; - pflash_cfi02_register(0xFFE00000, - "taihu_405ep.bios", bios_size, - blk_by_legacy_dinfo(dinfo), - 64 * KiB, 1, - 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, - 1); - fl_idx++; - } else -#endif - { - bios = g_new(MemoryRegion, 1); - memory_region_init_rom(bios, NULL, "taihu_405ep.bios", BIOS_SIZE, - &error_fatal); - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); - if (filename) { - bios_size = load_image_size(filename, - memory_region_get_ram_ptr(bios), - BIOS_SIZE); - g_free(filename); - if (bios_size < 0) { - error_report("Could not load PowerPC BIOS '%s'", bios_name); - exit(1); - } - bios_size = (bios_size + 0xfff) & ~0xfff; - memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); - } else if (!qtest_enabled()) { - error_report("Could not load PowerPC BIOS '%s'", bios_name); - exit(1); - } - } - /* Register Linux flash */ - dinfo = drive_get(IF_PFLASH, 0, fl_idx); - if (dinfo) { - bios_size = 32 * MiB; - pflash_cfi02_register(0xfc000000, "taihu_405ep.flash", bios_size, - blk_by_legacy_dinfo(dinfo), - 64 * KiB, 1, - 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, - 1); - fl_idx++; - } - /* Register CLPD & LCD display */ - taihu_cpld_init(sysmem, 0x50100000); - /* Load kernel */ - linux_boot = (kernel_filename != NULL); - if (linux_boot) { - kernel_base = KERNEL_LOAD_ADDR; - /* now we can load the kernel */ - kernel_size = load_image_targphys(kernel_filename, kernel_base, - machine->ram_size - kernel_base); - if (kernel_size < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - /* load initrd */ - if (initrd_filename) { - initrd_base = INITRD_LOAD_ADDR; - initrd_size = load_image_targphys(initrd_filename, initrd_base, - machine->ram_size - initrd_base); - if (initrd_size < 0) { - error_report("could not load initial ram disk '%s'", - initrd_filename); - exit(1); - } - } else { - initrd_base = 0; - initrd_size = 0; - } - } else { - kernel_base = 0; - kernel_size = 0; - initrd_base = 0; - initrd_size = 0; - } -} - -static void taihu_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "taihu"; - mc->init = taihu_405ep_init; - mc->default_ram_size = 0x08000000; - mc->default_ram_id = "taihu_405ep.ram"; -} - -static const TypeInfo taihu_type = { - .name = MACHINE_TYPE_NAME("taihu"), - .parent = TYPE_MACHINE, - .class_init = taihu_class_init, -}; - static void ppc405_machine_init(void) { + type_register_static(&ppc405_machine_type); type_register_static(&ref405ep_type); - type_register_static(&taihu_type); + type_register_static(&ref405ep_fpga_type); } type_init(ppc405_machine_init) diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c index e632c408bd..c973cfb04e 100644 --- a/hw/ppc/ppc405_uc.c +++ b/hw/ppc/ppc405_uc.c @@ -25,10 +25,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/ppc/ppc.h" #include "hw/i2c/ppc4xx_i2c.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "ppc405.h" #include "hw/char/serial.h" #include "qemu/timer.h" @@ -38,162 +40,11 @@ #include "hw/intc/ppc-uic.h" #include "hw/qdev-properties.h" #include "qapi/error.h" - -//#define DEBUG_OPBA -//#define DEBUG_SDRAM -//#define DEBUG_GPIO -//#define DEBUG_SERIAL -//#define DEBUG_OCM -//#define DEBUG_GPT -//#define DEBUG_CLOCKS -//#define DEBUG_CLOCKS_LL - -ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, - uint32_t flags) -{ - CPUState *cs = env_cpu(env); - ram_addr_t bdloc; - int i, n; - - /* We put the bd structure at the top of memory */ - if (bd->bi_memsize >= 0x01000000UL) - bdloc = 0x01000000UL - sizeof(struct ppc4xx_bd_info_t); - else - bdloc = bd->bi_memsize - sizeof(struct ppc4xx_bd_info_t); - stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart); - stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize); - stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart); - stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize); - stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset); - stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart); - stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize); - stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags); - stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr); - for (i = 0; i < 6; i++) { - stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]); - } - stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed); - stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq); - stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq); - stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate); - for (i = 0; i < 4; i++) { - stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]); - } - for (i = 0; i < 32; i++) { - stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]); - } - stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_plb_busfreq); - stl_be_phys(cs->as, bdloc + 0x60, bd->bi_pci_busfreq); - for (i = 0; i < 6; i++) { - stb_phys(cs->as, bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]); - } - n = 0x6A; - if (flags & 0x00000001) { - for (i = 0; i < 6; i++) - stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]); - } - stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq); - n += 4; - for (i = 0; i < 2; i++) { - stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]); - n += 4; - } - - return bdloc; -} +#include "trace.h" /*****************************************************************************/ /* Shared peripherals */ -/*****************************************************************************/ -/* Peripheral local bus arbitrer */ -enum { - PLB3A0_ACR = 0x077, - PLB4A0_ACR = 0x081, - PLB0_BESR = 0x084, - PLB0_BEAR = 0x086, - PLB0_ACR = 0x087, - PLB4A1_ACR = 0x089, -}; - -typedef struct ppc4xx_plb_t ppc4xx_plb_t; -struct ppc4xx_plb_t { - uint32_t acr; - uint32_t bear; - uint32_t besr; -}; - -static uint32_t dcr_read_plb (void *opaque, int dcrn) -{ - ppc4xx_plb_t *plb; - uint32_t ret; - - plb = opaque; - switch (dcrn) { - case PLB0_ACR: - ret = plb->acr; - break; - case PLB0_BEAR: - ret = plb->bear; - break; - case PLB0_BESR: - ret = plb->besr; - break; - default: - /* Avoid gcc warning */ - ret = 0; - break; - } - - return ret; -} - -static void dcr_write_plb (void *opaque, int dcrn, uint32_t val) -{ - ppc4xx_plb_t *plb; - - plb = opaque; - switch (dcrn) { - case PLB0_ACR: - /* We don't care about the actual parameters written as - * we don't manage any priorities on the bus - */ - plb->acr = val & 0xF8000000; - break; - case PLB0_BEAR: - /* Read only */ - break; - case PLB0_BESR: - /* Write-clear */ - plb->besr &= ~val; - break; - } -} - -static void ppc4xx_plb_reset (void *opaque) -{ - ppc4xx_plb_t *plb; - - plb = opaque; - plb->acr = 0x00000000; - plb->bear = 0x00000000; - plb->besr = 0x00000000; -} - -void ppc4xx_plb_init(CPUPPCState *env) -{ - ppc4xx_plb_t *plb; - - plb = g_malloc0(sizeof(ppc4xx_plb_t)); - ppc_dcr_register(env, PLB3A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); - ppc_dcr_register(env, PLB4A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); - ppc_dcr_register(env, PLB0_ACR, plb, &dcr_read_plb, &dcr_write_plb); - ppc_dcr_register(env, PLB0_BEAR, plb, &dcr_read_plb, &dcr_write_plb); - ppc_dcr_register(env, PLB0_BESR, plb, &dcr_read_plb, &dcr_write_plb); - ppc_dcr_register(env, PLB4A1_ACR, plb, &dcr_read_plb, &dcr_write_plb); - qemu_register_reset(ppc4xx_plb_reset, plb); -} - /*****************************************************************************/ /* PLB to OPB bridge */ enum { @@ -202,19 +53,11 @@ enum { POB0_BEAR = 0x0A4, }; -typedef struct ppc4xx_pob_t ppc4xx_pob_t; -struct ppc4xx_pob_t { - uint32_t bear; - uint32_t besr0; - uint32_t besr1; -}; - -static uint32_t dcr_read_pob (void *opaque, int dcrn) +static uint32_t dcr_read_pob(void *opaque, int dcrn) { - ppc4xx_pob_t *pob; + Ppc405PobState *pob = opaque; uint32_t ret; - pob = opaque; switch (dcrn) { case POB0_BEAR: ret = pob->bear; @@ -234,11 +77,10 @@ static uint32_t dcr_read_pob (void *opaque, int dcrn) return ret; } -static void dcr_write_pob (void *opaque, int dcrn, uint32_t val) +static void dcr_write_pob(void *opaque, int dcrn, uint32_t val) { - ppc4xx_pob_t *pob; + Ppc405PobState *pob = opaque; - pob = opaque; switch (dcrn) { case POB0_BEAR: /* Read only */ @@ -254,46 +96,43 @@ static void dcr_write_pob (void *opaque, int dcrn, uint32_t val) } } -static void ppc4xx_pob_reset (void *opaque) +static void ppc405_pob_reset(DeviceState *dev) { - ppc4xx_pob_t *pob; + Ppc405PobState *pob = PPC405_POB(dev); - pob = opaque; /* No error */ pob->bear = 0x00000000; pob->besr0 = 0x0000000; pob->besr1 = 0x0000000; } -static void ppc4xx_pob_init(CPUPPCState *env) +static void ppc405_pob_realize(DeviceState *dev, Error **errp) { - ppc4xx_pob_t *pob; + Ppc405PobState *pob = PPC405_POB(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - pob = g_malloc0(sizeof(ppc4xx_pob_t)); - ppc_dcr_register(env, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob); - ppc_dcr_register(env, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob); - ppc_dcr_register(env, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob); - qemu_register_reset(ppc4xx_pob_reset, pob); + ppc4xx_dcr_register(dcr, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob); + ppc4xx_dcr_register(dcr, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob); + ppc4xx_dcr_register(dcr, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob); +} + +static void ppc405_pob_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_pob_realize; + dc->reset = ppc405_pob_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ /* OPB arbitrer */ -typedef struct ppc4xx_opba_t ppc4xx_opba_t; -struct ppc4xx_opba_t { - MemoryRegion io; - uint8_t cr; - uint8_t pr; -}; - static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size) { - ppc4xx_opba_t *opba; + Ppc405OpbaState *opba = opaque; uint32_t ret; -#ifdef DEBUG_OPBA - printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); -#endif - opba = opaque; switch (addr) { case 0x00: ret = opba->cr; @@ -306,19 +145,17 @@ static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size) break; } + trace_opba_readb(addr, ret); return ret; } static void opba_writeb(void *opaque, hwaddr addr, uint64_t value, unsigned size) { - ppc4xx_opba_t *opba; + Ppc405OpbaState *opba = opaque; + + trace_opba_writeb(addr, value); -#ifdef DEBUG_OPBA - printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr, - value); -#endif - opba = opaque; switch (addr) { case 0x00: opba->cr = value & 0xF8; @@ -340,225 +177,36 @@ static const MemoryRegionOps opba_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void ppc4xx_opba_reset (void *opaque) +static void ppc405_opba_reset(DeviceState *dev) { - ppc4xx_opba_t *opba; + Ppc405OpbaState *opba = PPC405_OPBA(dev); - opba = opaque; opba->cr = 0x00; /* No dynamic priorities - park disabled */ opba->pr = 0x11; } -static void ppc4xx_opba_init(hwaddr base) +static void ppc405_opba_realize(DeviceState *dev, Error **errp) { - ppc4xx_opba_t *opba; + Ppc405OpbaState *s = PPC405_OPBA(dev); - opba = g_malloc0(sizeof(ppc4xx_opba_t)); -#ifdef DEBUG_OPBA - printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); -#endif - memory_region_init_io(&opba->io, NULL, &opba_ops, opba, "opba", 0x002); - memory_region_add_subregion(get_system_memory(), base, &opba->io); - qemu_register_reset(ppc4xx_opba_reset, opba); + memory_region_init_io(&s->io, OBJECT(s), &opba_ops, s, "opba", 2); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io); +} + +static void ppc405_opba_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_opba_realize; + dc->reset = ppc405_opba_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ /* Code decompression controller */ /* XXX: TODO */ -/*****************************************************************************/ -/* Peripheral controller */ -typedef struct ppc4xx_ebc_t ppc4xx_ebc_t; -struct ppc4xx_ebc_t { - uint32_t addr; - uint32_t bcr[8]; - uint32_t bap[8]; - uint32_t bear; - uint32_t besr0; - uint32_t besr1; - uint32_t cfg; -}; - -enum { - EBC0_CFGADDR = 0x012, - EBC0_CFGDATA = 0x013, -}; - -static uint32_t dcr_read_ebc (void *opaque, int dcrn) -{ - ppc4xx_ebc_t *ebc; - uint32_t ret; - - ebc = opaque; - switch (dcrn) { - case EBC0_CFGADDR: - ret = ebc->addr; - break; - case EBC0_CFGDATA: - switch (ebc->addr) { - case 0x00: /* B0CR */ - ret = ebc->bcr[0]; - break; - case 0x01: /* B1CR */ - ret = ebc->bcr[1]; - break; - case 0x02: /* B2CR */ - ret = ebc->bcr[2]; - break; - case 0x03: /* B3CR */ - ret = ebc->bcr[3]; - break; - case 0x04: /* B4CR */ - ret = ebc->bcr[4]; - break; - case 0x05: /* B5CR */ - ret = ebc->bcr[5]; - break; - case 0x06: /* B6CR */ - ret = ebc->bcr[6]; - break; - case 0x07: /* B7CR */ - ret = ebc->bcr[7]; - break; - case 0x10: /* B0AP */ - ret = ebc->bap[0]; - break; - case 0x11: /* B1AP */ - ret = ebc->bap[1]; - break; - case 0x12: /* B2AP */ - ret = ebc->bap[2]; - break; - case 0x13: /* B3AP */ - ret = ebc->bap[3]; - break; - case 0x14: /* B4AP */ - ret = ebc->bap[4]; - break; - case 0x15: /* B5AP */ - ret = ebc->bap[5]; - break; - case 0x16: /* B6AP */ - ret = ebc->bap[6]; - break; - case 0x17: /* B7AP */ - ret = ebc->bap[7]; - break; - case 0x20: /* BEAR */ - ret = ebc->bear; - break; - case 0x21: /* BESR0 */ - ret = ebc->besr0; - break; - case 0x22: /* BESR1 */ - ret = ebc->besr1; - break; - case 0x23: /* CFG */ - ret = ebc->cfg; - break; - default: - ret = 0x00000000; - break; - } - break; - default: - ret = 0x00000000; - break; - } - - return ret; -} - -static void dcr_write_ebc (void *opaque, int dcrn, uint32_t val) -{ - ppc4xx_ebc_t *ebc; - - ebc = opaque; - switch (dcrn) { - case EBC0_CFGADDR: - ebc->addr = val; - break; - case EBC0_CFGDATA: - switch (ebc->addr) { - case 0x00: /* B0CR */ - break; - case 0x01: /* B1CR */ - break; - case 0x02: /* B2CR */ - break; - case 0x03: /* B3CR */ - break; - case 0x04: /* B4CR */ - break; - case 0x05: /* B5CR */ - break; - case 0x06: /* B6CR */ - break; - case 0x07: /* B7CR */ - break; - case 0x10: /* B0AP */ - break; - case 0x11: /* B1AP */ - break; - case 0x12: /* B2AP */ - break; - case 0x13: /* B3AP */ - break; - case 0x14: /* B4AP */ - break; - case 0x15: /* B5AP */ - break; - case 0x16: /* B6AP */ - break; - case 0x17: /* B7AP */ - break; - case 0x20: /* BEAR */ - break; - case 0x21: /* BESR0 */ - break; - case 0x22: /* BESR1 */ - break; - case 0x23: /* CFG */ - break; - default: - break; - } - break; - default: - break; - } -} - -static void ebc_reset (void *opaque) -{ - ppc4xx_ebc_t *ebc; - int i; - - ebc = opaque; - ebc->addr = 0x00000000; - ebc->bap[0] = 0x7F8FFE80; - ebc->bcr[0] = 0xFFE28000; - for (i = 0; i < 8; i++) { - ebc->bap[i] = 0x00000000; - ebc->bcr[i] = 0x00000000; - } - ebc->besr0 = 0x00000000; - ebc->besr1 = 0x00000000; - ebc->cfg = 0x80400000; -} - -void ppc405_ebc_init(CPUPPCState *env) -{ - ppc4xx_ebc_t *ebc; - - ebc = g_malloc0(sizeof(ppc4xx_ebc_t)); - qemu_register_reset(&ebc_reset, ebc); - ppc_dcr_register(env, EBC0_CFGADDR, - ebc, &dcr_read_ebc, &dcr_write_ebc); - ppc_dcr_register(env, EBC0_CFGDATA, - ebc, &dcr_read_ebc, &dcr_write_ebc); -} - /*****************************************************************************/ /* DMA controller */ enum { @@ -588,35 +236,20 @@ enum { DMA0_POL = 0x126, }; -typedef struct ppc405_dma_t ppc405_dma_t; -struct ppc405_dma_t { - qemu_irq irqs[4]; - uint32_t cr[4]; - uint32_t ct[4]; - uint32_t da[4]; - uint32_t sa[4]; - uint32_t sg[4]; - uint32_t sr; - uint32_t sgc; - uint32_t slp; - uint32_t pol; -}; - -static uint32_t dcr_read_dma (void *opaque, int dcrn) +static uint32_t dcr_read_dma(void *opaque, int dcrn) { return 0; } -static void dcr_write_dma (void *opaque, int dcrn, uint32_t val) +static void dcr_write_dma(void *opaque, int dcrn, uint32_t val) { } -static void ppc405_dma_reset (void *opaque) +static void ppc405_dma_reset(DeviceState *dev) { - ppc405_dma_t *dma; + Ppc405DmaState *dma = PPC405_DMA(dev); int i; - dma = opaque; for (i = 0; i < 4; i++) { dma->cr[i] = 0x00000000; dma->ct[i] = 0x00000000; @@ -630,97 +263,64 @@ static void ppc405_dma_reset (void *opaque) dma->pol = 0x00000000; } -static void ppc405_dma_init(CPUPPCState *env, qemu_irq irqs[4]) +static void ppc405_dma_realize(DeviceState *dev, Error **errp) { - ppc405_dma_t *dma; + Ppc405DmaState *dma = PPC405_DMA(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); + int i; - dma = g_malloc0(sizeof(ppc405_dma_t)); - memcpy(dma->irqs, irqs, 4 * sizeof(qemu_irq)); - qemu_register_reset(&ppc405_dma_reset, dma); - ppc_dcr_register(env, DMA0_CR0, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CT0, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_DA0, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SA0, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SG0, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CR1, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CT1, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_DA1, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SA1, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SG1, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CR2, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CT2, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_DA2, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SA2, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SG2, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CR3, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_CT3, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_DA3, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SA3, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SG3, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SR, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SGC, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_SLP, - dma, &dcr_read_dma, &dcr_write_dma); - ppc_dcr_register(env, DMA0_POL, - dma, &dcr_read_dma, &dcr_write_dma); + for (i = 0; i < ARRAY_SIZE(dma->irqs); i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dma), &dma->irqs[i]); + } + + ppc4xx_dcr_register(dcr, DMA0_CR0, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CT0, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_DA0, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SA0, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SG0, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CR1, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CT1, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_DA1, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SA1, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SG1, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CR2, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CT2, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_DA2, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SA2, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SG2, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CR3, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_CT3, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_DA3, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SA3, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SG3, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SR, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SGC, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_SLP, dma, &dcr_read_dma, &dcr_write_dma); + ppc4xx_dcr_register(dcr, DMA0_POL, dma, &dcr_read_dma, &dcr_write_dma); +} + +static void ppc405_dma_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_dma_realize; + dc->reset = ppc405_dma_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ /* GPIO */ -typedef struct ppc405_gpio_t ppc405_gpio_t; -struct ppc405_gpio_t { - MemoryRegion io; - uint32_t or; - uint32_t tcr; - uint32_t osrh; - uint32_t osrl; - uint32_t tsrh; - uint32_t tsrl; - uint32_t odr; - uint32_t ir; - uint32_t rr1; - uint32_t isr1h; - uint32_t isr1l; -}; - static uint64_t ppc405_gpio_read(void *opaque, hwaddr addr, unsigned size) { -#ifdef DEBUG_GPIO - printf("%s: addr " TARGET_FMT_plx " size %d\n", __func__, addr, size); -#endif - + trace_ppc405_gpio_read(addr, size); return 0; } static void ppc405_gpio_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { -#ifdef DEBUG_GPIO - printf("%s: addr " TARGET_FMT_plx " size %d val %08" PRIx32 "\n", - __func__, addr, size, value); -#endif + trace_ppc405_gpio_write(addr, size, value); } static const MemoryRegionOps ppc405_gpio_ops = { @@ -729,21 +329,22 @@ static const MemoryRegionOps ppc405_gpio_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static void ppc405_gpio_reset (void *opaque) +static void ppc405_gpio_realize(DeviceState *dev, Error **errp) { + Ppc405GpioState *s = PPC405_GPIO(dev); + + memory_region_init_io(&s->io, OBJECT(s), &ppc405_gpio_ops, s, "gpio", + 0x38); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io); } -static void ppc405_gpio_init(hwaddr base) +static void ppc405_gpio_class_init(ObjectClass *oc, void *data) { - ppc405_gpio_t *gpio; + DeviceClass *dc = DEVICE_CLASS(oc); - gpio = g_malloc0(sizeof(ppc405_gpio_t)); -#ifdef DEBUG_GPIO - printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); -#endif - memory_region_init_io(&gpio->io, NULL, &ppc405_gpio_ops, gpio, "pgio", 0x038); - memory_region_add_subregion(get_system_memory(), base, &gpio->io); - qemu_register_reset(&ppc405_gpio_reset, gpio); + dc->realize = ppc405_gpio_realize; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ @@ -755,40 +356,23 @@ enum { OCM0_DSACNTL = 0x01B, }; -typedef struct ppc405_ocm_t ppc405_ocm_t; -struct ppc405_ocm_t { - MemoryRegion ram; - MemoryRegion isarc_ram; - MemoryRegion dsarc_ram; - uint32_t isarc; - uint32_t isacntl; - uint32_t dsarc; - uint32_t dsacntl; -}; - -static void ocm_update_mappings (ppc405_ocm_t *ocm, - uint32_t isarc, uint32_t isacntl, - uint32_t dsarc, uint32_t dsacntl) +static void ocm_update_mappings(Ppc405OcmState *ocm, + uint32_t isarc, uint32_t isacntl, + uint32_t dsarc, uint32_t dsacntl) { -#ifdef DEBUG_OCM - printf("OCM update ISA %08" PRIx32 " %08" PRIx32 " (%08" PRIx32 - " %08" PRIx32 ") DSA %08" PRIx32 " %08" PRIx32 - " (%08" PRIx32 " %08" PRIx32 ")\n", - isarc, isacntl, dsarc, dsacntl, - ocm->isarc, ocm->isacntl, ocm->dsarc, ocm->dsacntl); -#endif + trace_ocm_update_mappings(isarc, isacntl, dsarc, dsacntl, ocm->isarc, + ocm->isacntl, ocm->dsarc, ocm->dsacntl); + if (ocm->isarc != isarc || (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) { if (ocm->isacntl & 0x80000000) { /* Unmap previously assigned memory region */ - printf("OCM unmap ISA %08" PRIx32 "\n", ocm->isarc); + trace_ocm_unmap("ISA", ocm->isarc); memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram); } if (isacntl & 0x80000000) { /* Map new instruction memory region */ -#ifdef DEBUG_OCM - printf("OCM map ISA %08" PRIx32 "\n", isarc); -#endif + trace_ocm_map("ISA", isarc); memory_region_add_subregion(get_system_memory(), isarc, &ocm->isarc_ram); } @@ -799,9 +383,7 @@ static void ocm_update_mappings (ppc405_ocm_t *ocm, /* Beware not to unmap the region we just mapped */ if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) { /* Unmap previously assigned memory region */ -#ifdef DEBUG_OCM - printf("OCM unmap DSA %08" PRIx32 "\n", ocm->dsarc); -#endif + trace_ocm_unmap("DSA", ocm->dsarc); memory_region_del_subregion(get_system_memory(), &ocm->dsarc_ram); } @@ -810,9 +392,7 @@ static void ocm_update_mappings (ppc405_ocm_t *ocm, /* Beware not to remap the region we just mapped */ if (!(isacntl & 0x80000000) || dsarc != isarc) { /* Map new data memory region */ -#ifdef DEBUG_OCM - printf("OCM map DSA %08" PRIx32 "\n", dsarc); -#endif + trace_ocm_map("DSA", dsarc); memory_region_add_subregion(get_system_memory(), dsarc, &ocm->dsarc_ram); } @@ -820,12 +400,11 @@ static void ocm_update_mappings (ppc405_ocm_t *ocm, } } -static uint32_t dcr_read_ocm (void *opaque, int dcrn) +static uint32_t dcr_read_ocm(void *opaque, int dcrn) { - ppc405_ocm_t *ocm; + Ppc405OcmState *ocm = opaque; uint32_t ret; - ocm = opaque; switch (dcrn) { case OCM0_ISARC: ret = ocm->isarc; @@ -847,12 +426,11 @@ static uint32_t dcr_read_ocm (void *opaque, int dcrn) return ret; } -static void dcr_write_ocm (void *opaque, int dcrn, uint32_t val) +static void dcr_write_ocm(void *opaque, int dcrn, uint32_t val) { - ppc405_ocm_t *ocm; + Ppc405OcmState *ocm = opaque; uint32_t isarc, dsarc, isacntl, dsacntl; - ocm = opaque; isarc = ocm->isarc; dsarc = ocm->dsarc; isacntl = ocm->isacntl; @@ -878,12 +456,11 @@ static void dcr_write_ocm (void *opaque, int dcrn, uint32_t val) ocm->dsacntl = dsacntl; } -static void ocm_reset (void *opaque) +static void ppc405_ocm_reset(DeviceState *dev) { - ppc405_ocm_t *ocm; + Ppc405OcmState *ocm = PPC405_OCM(dev); uint32_t isarc, dsarc, isacntl, dsacntl; - ocm = opaque; isarc = 0x00000000; isacntl = 0x00000000; dsarc = 0x00000000; @@ -895,57 +472,47 @@ static void ocm_reset (void *opaque) ocm->dsacntl = dsacntl; } -static void ppc405_ocm_init(CPUPPCState *env) +static void ppc405_ocm_realize(DeviceState *dev, Error **errp) { - ppc405_ocm_t *ocm; + Ppc405OcmState *ocm = PPC405_OCM(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - ocm = g_malloc0(sizeof(ppc405_ocm_t)); /* XXX: Size is 4096 or 0x04000000 */ - memory_region_init_ram(&ocm->isarc_ram, NULL, "ppc405.ocm", 4 * KiB, + memory_region_init_ram(&ocm->isarc_ram, OBJECT(ocm), "ppc405.ocm", 4 * KiB, &error_fatal); - memory_region_init_alias(&ocm->dsarc_ram, NULL, "ppc405.dsarc", + memory_region_init_alias(&ocm->dsarc_ram, OBJECT(ocm), "ppc405.dsarc", &ocm->isarc_ram, 0, 4 * KiB); - qemu_register_reset(&ocm_reset, ocm); - ppc_dcr_register(env, OCM0_ISARC, - ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc_dcr_register(env, OCM0_ISACNTL, - ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc_dcr_register(env, OCM0_DSARC, - ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc_dcr_register(env, OCM0_DSACNTL, - ocm, &dcr_read_ocm, &dcr_write_ocm); + + ppc4xx_dcr_register(dcr, OCM0_ISARC, ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc4xx_dcr_register(dcr, OCM0_ISACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc4xx_dcr_register(dcr, OCM0_DSARC, ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc4xx_dcr_register(dcr, OCM0_DSACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); +} + +static void ppc405_ocm_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_ocm_realize; + dc->reset = ppc405_ocm_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ /* General purpose timers */ -typedef struct ppc4xx_gpt_t ppc4xx_gpt_t; -struct ppc4xx_gpt_t { - MemoryRegion iomem; - int64_t tb_offset; - uint32_t tb_freq; - QEMUTimer *timer; - qemu_irq irqs[5]; - uint32_t oe; - uint32_t ol; - uint32_t im; - uint32_t is; - uint32_t ie; - uint32_t comp[5]; - uint32_t mask[5]; -}; - -static int ppc4xx_gpt_compare (ppc4xx_gpt_t *gpt, int n) +static int ppc4xx_gpt_compare(Ppc405GptState *gpt, int n) { /* XXX: TODO */ return 0; } -static void ppc4xx_gpt_set_output (ppc4xx_gpt_t *gpt, int n, int level) +static void ppc4xx_gpt_set_output(Ppc405GptState *gpt, int n, int level) { /* XXX: TODO */ } -static void ppc4xx_gpt_set_outputs (ppc4xx_gpt_t *gpt) +static void ppc4xx_gpt_set_outputs(Ppc405GptState *gpt) { uint32_t mask; int i; @@ -966,36 +533,35 @@ static void ppc4xx_gpt_set_outputs (ppc4xx_gpt_t *gpt) } } -static void ppc4xx_gpt_set_irqs (ppc4xx_gpt_t *gpt) +static void ppc4xx_gpt_set_irqs(Ppc405GptState *gpt) { uint32_t mask; int i; mask = 0x00008000; for (i = 0; i < 5; i++) { - if (gpt->is & gpt->im & mask) + if (gpt->is & gpt->im & mask) { qemu_irq_raise(gpt->irqs[i]); - else + } else { qemu_irq_lower(gpt->irqs[i]); + } mask = mask >> 1; } } -static void ppc4xx_gpt_compute_timer (ppc4xx_gpt_t *gpt) +static void ppc4xx_gpt_compute_timer(Ppc405GptState *gpt) { /* XXX: TODO */ } static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size) { - ppc4xx_gpt_t *gpt; + Ppc405GptState *gpt = opaque; uint32_t ret; int idx; -#ifdef DEBUG_GPT - printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); -#endif - gpt = opaque; + trace_ppc4xx_gpt_read(addr, size); + switch (addr) { case 0x00: /* Time base counter */ @@ -1044,14 +610,11 @@ static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size) static void ppc4xx_gpt_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { - ppc4xx_gpt_t *gpt; + Ppc405GptState *gpt = opaque; int idx; -#ifdef DEBUG_I2C - printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr, - value); -#endif - gpt = opaque; + trace_ppc4xx_gpt_write(addr, size, value); + switch (addr) { case 0x00: /* Time base counter */ @@ -1111,22 +674,20 @@ static const MemoryRegionOps gpt_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static void ppc4xx_gpt_cb (void *opaque) +static void ppc4xx_gpt_cb(void *opaque) { - ppc4xx_gpt_t *gpt; + Ppc405GptState *gpt = opaque; - gpt = opaque; ppc4xx_gpt_set_irqs(gpt); ppc4xx_gpt_set_outputs(gpt); ppc4xx_gpt_compute_timer(gpt); } -static void ppc4xx_gpt_reset (void *opaque) +static void ppc405_gpt_reset(DeviceState *dev) { - ppc4xx_gpt_t *gpt; + Ppc405GptState *gpt = PPC405_GPT(dev); int i; - gpt = opaque; timer_del(gpt->timer); gpt->oe = 0x00000000; gpt->ol = 0x00000000; @@ -1139,22 +700,37 @@ static void ppc4xx_gpt_reset (void *opaque) } } -static void ppc4xx_gpt_init(hwaddr base, qemu_irq irqs[5]) +static void ppc405_gpt_realize(DeviceState *dev, Error **errp) { - ppc4xx_gpt_t *gpt; + Ppc405GptState *s = PPC405_GPT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); int i; - gpt = g_malloc0(sizeof(ppc4xx_gpt_t)); - for (i = 0; i < 5; i++) { - gpt->irqs[i] = irqs[i]; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, s); + memory_region_init_io(&s->iomem, OBJECT(s), &gpt_ops, s, "gpt", 0xd4); + sysbus_init_mmio(sbd, &s->iomem); + + for (i = 0; i < ARRAY_SIZE(s->irqs); i++) { + sysbus_init_irq(sbd, &s->irqs[i]); } - gpt->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, gpt); -#ifdef DEBUG_GPT - printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); -#endif - memory_region_init_io(&gpt->iomem, NULL, &gpt_ops, gpt, "gpt", 0x0d4); - memory_region_add_subregion(get_system_memory(), base, &gpt->iomem); - qemu_register_reset(ppc4xx_gpt_reset, gpt); +} + +static void ppc405_gpt_finalize(Object *obj) +{ + /* timer will be NULL if the GPT wasn't realized */ + if (PPC405_GPT(obj)->timer) { + timer_del(PPC405_GPT(obj)->timer); + } +} + +static void ppc405_gpt_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_gpt_realize; + dc->reset = ppc405_gpt_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; } /*****************************************************************************/ @@ -1176,36 +752,7 @@ enum { #endif }; -enum { - PPC405EP_CPU_CLK = 0, - PPC405EP_PLB_CLK = 1, - PPC405EP_OPB_CLK = 2, - PPC405EP_EBC_CLK = 3, - PPC405EP_MAL_CLK = 4, - PPC405EP_PCI_CLK = 5, - PPC405EP_UART0_CLK = 6, - PPC405EP_UART1_CLK = 7, - PPC405EP_CLK_NB = 8, -}; - -typedef struct ppc405ep_cpc_t ppc405ep_cpc_t; -struct ppc405ep_cpc_t { - uint32_t sysclk; - clk_setup_t clk_setup[PPC405EP_CLK_NB]; - uint32_t boot; - uint32_t epctl; - uint32_t pllmr[2]; - uint32_t ucr; - uint32_t srr; - uint32_t jtagid; - uint32_t pci; - /* Clock and power management */ - uint32_t er; - uint32_t fr; - uint32_t sr; -}; - -static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) +static void ppc405ep_compute_clocks(Ppc405CpcState *cpc) { uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk; uint32_t UART0_clk, UART1_clk; @@ -1215,17 +762,14 @@ static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) VCO_out = 0; if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) { M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */ -#ifdef DEBUG_CLOCKS_LL - printf("FBMUL %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 20) & 0xF, M); -#endif + trace_ppc405ep_clocks_compute("FBMUL", (cpc->pllmr[1] >> 20) & 0xF, M); D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */ -#ifdef DEBUG_CLOCKS_LL - printf("FWDA %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 16) & 0x7, D); -#endif + trace_ppc405ep_clocks_compute("FWDA", (cpc->pllmr[1] >> 16) & 0x7, D); VCO_out = (uint64_t)cpc->sysclk * M * D; if (VCO_out < 500000000UL || VCO_out > 1000000000UL) { /* Error - unlock the PLL */ - printf("VCO out of range %" PRIu64 "\n", VCO_out); + qemu_log_mask(LOG_GUEST_ERROR, "VCO out of range %" PRIu64 "\n", + VCO_out); #if 0 cpc->pllmr[1] &= ~0x80000000; goto pll_bypass; @@ -1246,54 +790,43 @@ static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) } /* Now, compute all other clocks */ D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */ -#ifdef DEBUG_CLOCKS_LL - printf("CCDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 20) & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("CCDV", (cpc->pllmr[0] >> 20) & 0x3, D); CPU_clk = PLL_out / D; D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */ -#ifdef DEBUG_CLOCKS_LL - printf("CBDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 16) & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("CBDV", (cpc->pllmr[0] >> 16) & 0x3, D); PLB_clk = CPU_clk / D; D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */ -#ifdef DEBUG_CLOCKS_LL - printf("OPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 12) & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("OPDV", (cpc->pllmr[0] >> 12) & 0x3, D); OPB_clk = PLB_clk / D; D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */ -#ifdef DEBUG_CLOCKS_LL - printf("EPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 8) & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("EPDV", (cpc->pllmr[0] >> 8) & 0x3, D); EBC_clk = PLB_clk / D; D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */ -#ifdef DEBUG_CLOCKS_LL - printf("MPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 4) & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("MPDV", (cpc->pllmr[0] >> 4) & 0x3, D); MAL_clk = PLB_clk / D; D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */ -#ifdef DEBUG_CLOCKS_LL - printf("PPDV %01" PRIx32 " %d\n", cpc->pllmr[0] & 0x3, D); -#endif + trace_ppc405ep_clocks_compute("PPDV", cpc->pllmr[0] & 0x3, D); PCI_clk = PLB_clk / D; D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */ -#ifdef DEBUG_CLOCKS_LL - printf("U0DIV %01" PRIx32 " %d\n", cpc->ucr & 0x7F, D); -#endif + trace_ppc405ep_clocks_compute("U0DIV", cpc->ucr & 0x7F, D); UART0_clk = PLL_out / D; D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */ -#ifdef DEBUG_CLOCKS_LL - printf("U1DIV %01" PRIx32 " %d\n", (cpc->ucr >> 8) & 0x7F, D); -#endif + trace_ppc405ep_clocks_compute("U1DIV", (cpc->ucr >> 8) & 0x7F, D); UART1_clk = PLL_out / D; -#ifdef DEBUG_CLOCKS - printf("Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64 - " PLL out %" PRIu64 " Hz\n", cpc->sysclk, VCO_out, PLL_out); - printf("CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32 - " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32 - " UART1 %" PRIu32 "\n", - CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk, - UART0_clk, UART1_clk); -#endif + + if (trace_event_get_state_backends(TRACE_PPC405EP_CLOCKS_SETUP)) { + g_autofree char *trace = g_strdup_printf( + "Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64 + " PLL out %" PRIu64 " Hz\n" + "CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32 + " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32 + " UART1 %" PRIu32 "\n", + cpc->sysclk, VCO_out, PLL_out, + CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk, + UART0_clk, UART1_clk); + trace_ppc405ep_clocks_setup(trace); + } + /* Setup CPU clocks */ clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk); /* Setup PLB clock */ @@ -1312,12 +845,11 @@ static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk); } -static uint32_t dcr_read_epcpc (void *opaque, int dcrn) +static uint32_t dcr_read_epcpc(void *opaque, int dcrn) { - ppc405ep_cpc_t *cpc; + Ppc405CpcState *cpc = opaque; uint32_t ret; - cpc = opaque; switch (dcrn) { case PPC405EP_CPC0_BOOT: ret = cpc->boot; @@ -1352,11 +884,10 @@ static uint32_t dcr_read_epcpc (void *opaque, int dcrn) return ret; } -static void dcr_write_epcpc (void *opaque, int dcrn, uint32_t val) +static void dcr_write_epcpc(void *opaque, int dcrn, uint32_t val) { - ppc405ep_cpc_t *cpc; + Ppc405CpcState *cpc = opaque; - cpc = opaque; switch (dcrn) { case PPC405EP_CPC0_BOOT: /* Read-only register */ @@ -1389,159 +920,300 @@ static void dcr_write_epcpc (void *opaque, int dcrn, uint32_t val) } } -static void ppc405ep_cpc_reset (void *opaque) +static void ppc405_cpc_reset(DeviceState *dev) { - ppc405ep_cpc_t *cpc = opaque; + Ppc405CpcState *cpc = PPC405_CPC(dev); cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */ cpc->epctl = 0x00000000; - cpc->pllmr[0] = 0x00011010; - cpc->pllmr[1] = 0x40000000; - cpc->ucr = 0x00000000; + cpc->pllmr[0] = 0x00021002; + cpc->pllmr[1] = 0x80a552be; + cpc->ucr = 0x00004646; cpc->srr = 0x00040000; cpc->pci = 0x00000000; cpc->er = 0x00000000; cpc->fr = 0x00000000; cpc->sr = 0x00000000; + cpc->jtagid = 0x20267049; ppc405ep_compute_clocks(cpc); } /* XXX: sysclk should be between 25 and 100 MHz */ -static void ppc405ep_cpc_init (CPUPPCState *env, clk_setup_t clk_setup[8], - uint32_t sysclk) +static void ppc405_cpc_realize(DeviceState *dev, Error **errp) { - ppc405ep_cpc_t *cpc; + Ppc405CpcState *cpc = PPC405_CPC(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - cpc = g_malloc0(sizeof(ppc405ep_cpc_t)); - memcpy(cpc->clk_setup, clk_setup, - PPC405EP_CLK_NB * sizeof(clk_setup_t)); - cpc->jtagid = 0x20267049; - cpc->sysclk = sysclk; - qemu_register_reset(&ppc405ep_cpc_reset, cpc); - ppc_dcr_register(env, PPC405EP_CPC0_BOOT, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_EPCTL, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_PLLMR0, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_PLLMR1, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_UCR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_SRR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_JTAGID, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_PCI, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); -#if 0 - ppc_dcr_register(env, PPC405EP_CPC0_ER, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_FR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc_dcr_register(env, PPC405EP_CPC0_SR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); -#endif + assert(dcr->cpu); + cpc->clk_setup[PPC405EP_CPU_CLK].cb = + ppc_40x_timers_init(&dcr->cpu->env, cpc->sysclk, PPC_INTERRUPT_PIT); + cpc->clk_setup[PPC405EP_CPU_CLK].opaque = &dcr->cpu->env; + + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_BOOT, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_EPCTL, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR0, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR1, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_UCR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_SRR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_JTAGID, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PCI, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); } -CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem, - MemoryRegion ram_memories[2], - hwaddr ram_bases[2], - hwaddr ram_sizes[2], - uint32_t sysclk, DeviceState **uicdevp, - int do_init) +static Property ppc405_cpc_properties[] = { + DEFINE_PROP_UINT32("sys-clk", Ppc405CpcState, sysclk, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc405_cpc_class_init(ObjectClass *oc, void *data) { - clk_setup_t clk_setup[PPC405EP_CLK_NB], tlb_clk_setup; - qemu_irq dma_irqs[4], gpt_irqs[5], mal_irqs[4]; - PowerPCCPU *cpu; + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_cpc_realize; + dc->reset = ppc405_cpc_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; + device_class_set_props(dc, ppc405_cpc_properties); +} + +/* PPC405_SOC */ + +static void ppc405_soc_instance_init(Object *obj) +{ + Ppc405SoCState *s = PPC405_SOC(obj); + + object_initialize_child(obj, "cpu", &s->cpu, + POWERPC_CPU_TYPE_NAME("405ep")); + + object_initialize_child(obj, "uic", &s->uic, TYPE_PPC_UIC); + + object_initialize_child(obj, "cpc", &s->cpc, TYPE_PPC405_CPC); + object_property_add_alias(obj, "sys-clk", OBJECT(&s->cpc), "sys-clk"); + + object_initialize_child(obj, "gpt", &s->gpt, TYPE_PPC405_GPT); + + object_initialize_child(obj, "ocm", &s->ocm, TYPE_PPC405_OCM); + + object_initialize_child(obj, "gpio", &s->gpio, TYPE_PPC405_GPIO); + + object_initialize_child(obj, "dma", &s->dma, TYPE_PPC405_DMA); + + object_initialize_child(obj, "i2c", &s->i2c, TYPE_PPC4xx_I2C); + + object_initialize_child(obj, "ebc", &s->ebc, TYPE_PPC4xx_EBC); + + object_initialize_child(obj, "opba", &s->opba, TYPE_PPC405_OPBA); + + object_initialize_child(obj, "pob", &s->pob, TYPE_PPC405_POB); + + object_initialize_child(obj, "plb", &s->plb, TYPE_PPC4xx_PLB); + + object_initialize_child(obj, "mal", &s->mal, TYPE_PPC4xx_MAL); + + object_initialize_child(obj, "sdram", &s->sdram, TYPE_PPC4xx_SDRAM_DDR); + object_property_add_alias(obj, "dram", OBJECT(&s->sdram), "dram"); +} + +static void ppc405_reset(void *opaque) +{ + cpu_reset(CPU(opaque)); +} + +static void ppc405_soc_realize(DeviceState *dev, Error **errp) +{ + Ppc405SoCState *s = PPC405_SOC(dev); CPUPPCState *env; - DeviceState *uicdev; - SysBusDevice *uicsbd; + SysBusDevice *sbd; + int i; - memset(clk_setup, 0, sizeof(clk_setup)); /* init CPUs */ - cpu = ppc4xx_init(POWERPC_CPU_TYPE_NAME("405ep"), - &clk_setup[PPC405EP_CPU_CLK], - &tlb_clk_setup, sysclk); - env = &cpu->env; - clk_setup[PPC405EP_CPU_CLK].cb = tlb_clk_setup.cb; - clk_setup[PPC405EP_CPU_CLK].opaque = tlb_clk_setup.opaque; - /* Internal devices init */ - /* Memory mapped devices registers */ + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + qemu_register_reset(ppc405_reset, &s->cpu); + + env = &s->cpu.env; + + ppc_dcr_init(env, NULL, NULL); + + /* CPU control */ + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->cpc), &s->cpu, errp)) { + return; + } + /* PLB arbitrer */ - ppc4xx_plb_init(env); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->plb), &s->cpu, errp)) { + return; + } + /* PLB to OPB bridge */ - ppc4xx_pob_init(env); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->pob), &s->cpu, errp)) { + return; + } + /* OBP arbitrer */ - ppc4xx_opba_init(0xef600600); - /* Initialize timers */ - ppc_booke_timers_init(cpu, sysclk, 0); + sbd = SYS_BUS_DEVICE(&s->opba); + if (!sysbus_realize(sbd, errp)) { + return; + } + sysbus_mmio_map(sbd, 0, 0xef600600); + /* Universal interrupt controller */ - uicdev = qdev_new(TYPE_PPC_UIC); - uicsbd = SYS_BUS_DEVICE(uicdev); - - object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), - &error_fatal); - sysbus_realize_and_unref(uicsbd, &error_fatal); - - sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); - sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); - - *uicdevp = uicdev; + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->uic), &s->cpu, errp)) { + return; + } + sbd = SYS_BUS_DEVICE(&s->uic); + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, + qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_INT)); + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, + qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_CINT)); /* SDRAM controller */ - /* XXX 405EP has no ECC interrupt */ - ppc4xx_sdram_init(env, qdev_get_gpio_in(uicdev, 17), 2, ram_memories, - ram_bases, ram_sizes, do_init); + /* + * We use the 440 DDR SDRAM controller which has more regs and features + * but it's compatible enough for now + */ + object_property_set_int(OBJECT(&s->sdram), "nbanks", 2, &error_abort); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->sdram), &s->cpu, errp)) { + return; + } + /* XXX 405EP has no ECC interrupt */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdram), 0, + qdev_get_gpio_in(DEVICE(&s->uic), 17)); + /* External bus controller */ - ppc405_ebc_init(env); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ebc), &s->cpu, errp)) { + return; + } + /* DMA controller */ - dma_irqs[0] = qdev_get_gpio_in(uicdev, 5); - dma_irqs[1] = qdev_get_gpio_in(uicdev, 6); - dma_irqs[2] = qdev_get_gpio_in(uicdev, 7); - dma_irqs[3] = qdev_get_gpio_in(uicdev, 8); - ppc405_dma_init(env, dma_irqs); - /* IIC controller */ - sysbus_create_simple(TYPE_PPC4xx_I2C, 0xef600500, - qdev_get_gpio_in(uicdev, 2)); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->dma), &s->cpu, errp)) { + return; + } + sbd = SYS_BUS_DEVICE(&s->dma); + for (i = 0; i < ARRAY_SIZE(s->dma.irqs); i++) { + sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 5 + i)); + } + + /* I2C controller */ + sbd = SYS_BUS_DEVICE(&s->i2c); + if (!sysbus_realize(sbd, errp)) { + return; + } + sysbus_mmio_map(sbd, 0, 0xef600500); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(&s->uic), 2)); + /* GPIO */ - ppc405_gpio_init(0xef600700); + sbd = SYS_BUS_DEVICE(&s->gpio); + if (!sysbus_realize(sbd, errp)) { + return; + } + sysbus_mmio_map(sbd, 0, 0xef600700); + /* Serial ports */ if (serial_hd(0) != NULL) { - serial_mm_init(address_space_mem, 0xef600300, 0, - qdev_get_gpio_in(uicdev, 0), + serial_mm_init(get_system_memory(), 0xef600300, 0, + qdev_get_gpio_in(DEVICE(&s->uic), 0), PPC_SERIAL_MM_BAUDBASE, serial_hd(0), DEVICE_BIG_ENDIAN); } if (serial_hd(1) != NULL) { - serial_mm_init(address_space_mem, 0xef600400, 0, - qdev_get_gpio_in(uicdev, 1), + serial_mm_init(get_system_memory(), 0xef600400, 0, + qdev_get_gpio_in(DEVICE(&s->uic), 1), PPC_SERIAL_MM_BAUDBASE, serial_hd(1), DEVICE_BIG_ENDIAN); } + /* OCM */ - ppc405_ocm_init(env); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ocm), &s->cpu, errp)) { + return; + } + /* GPT */ - gpt_irqs[0] = qdev_get_gpio_in(uicdev, 19); - gpt_irqs[1] = qdev_get_gpio_in(uicdev, 20); - gpt_irqs[2] = qdev_get_gpio_in(uicdev, 21); - gpt_irqs[3] = qdev_get_gpio_in(uicdev, 22); - gpt_irqs[4] = qdev_get_gpio_in(uicdev, 23); - ppc4xx_gpt_init(0xef600000, gpt_irqs); - /* PCI */ - /* Uses UIC IRQs 3, 16, 18 */ + sbd = SYS_BUS_DEVICE(&s->gpt); + if (!sysbus_realize(sbd, errp)) { + return; + } + sysbus_mmio_map(sbd, 0, 0xef600000); + for (i = 0; i < ARRAY_SIZE(s->gpt.irqs); i++) { + sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 19 + i)); + } + /* MAL */ - mal_irqs[0] = qdev_get_gpio_in(uicdev, 11); - mal_irqs[1] = qdev_get_gpio_in(uicdev, 12); - mal_irqs[2] = qdev_get_gpio_in(uicdev, 13); - mal_irqs[3] = qdev_get_gpio_in(uicdev, 14); - ppc4xx_mal_init(env, 4, 2, mal_irqs); + object_property_set_int(OBJECT(&s->mal), "txc-num", 4, &error_abort); + object_property_set_int(OBJECT(&s->mal), "rxc-num", 2, &error_abort); + if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->mal), &s->cpu, errp)) { + return; + } + sbd = SYS_BUS_DEVICE(&s->mal); + for (i = 0; i < ARRAY_SIZE(s->mal.irqs); i++) { + sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 11 + i)); + } + /* Ethernet */ /* Uses UIC IRQs 9, 15, 17 */ - /* CPU control */ - ppc405ep_cpc_init(env, clk_setup, sysclk); - - return env; } + +static void ppc405_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_soc_realize; + /* Reason: only works as part of a ppc405 board/machine */ + dc->user_creatable = false; +} + +static const TypeInfo ppc405_types[] = { + { + .name = TYPE_PPC405_POB, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc405PobState), + .class_init = ppc405_pob_class_init, + }, { + .name = TYPE_PPC405_OPBA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Ppc405OpbaState), + .class_init = ppc405_opba_class_init, + }, { + .name = TYPE_PPC405_DMA, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc405DmaState), + .class_init = ppc405_dma_class_init, + }, { + .name = TYPE_PPC405_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Ppc405GpioState), + .class_init = ppc405_gpio_class_init, + }, { + .name = TYPE_PPC405_OCM, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc405OcmState), + .class_init = ppc405_ocm_class_init, + }, { + .name = TYPE_PPC405_GPT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Ppc405GptState), + .instance_finalize = ppc405_gpt_finalize, + .class_init = ppc405_gpt_class_init, + }, { + .name = TYPE_PPC405_CPC, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc405CpcState), + .class_init = ppc405_cpc_class_init, + }, { + .name = TYPE_PPC405_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(Ppc405SoCState), + .instance_init = ppc405_soc_instance_init, + .class_init = ppc405_soc_class_init, + } +}; + +DEFINE_TYPES(ppc405_types) diff --git a/hw/ppc/ppc440.h b/hw/ppc/ppc440.h index 7cef936125..7c24db8504 100644 --- a/hw/ppc/ppc440.h +++ b/hw/ppc/ppc440.h @@ -16,10 +16,6 @@ void ppc4xx_l2sram_init(CPUPPCState *env); void ppc4xx_cpr_init(CPUPPCState *env); void ppc4xx_sdr_init(CPUPPCState *env); -void ppc440_sdram_init(CPUPPCState *env, int nbanks, - MemoryRegion *ram_memories, - hwaddr *ram_bases, hwaddr *ram_sizes, - int do_init); void ppc4xx_ahb_init(CPUPPCState *env); void ppc4xx_dma_init(CPUPPCState *env, int dcr_base); void ppc460ex_pcie_init(CPUPPCState *env); diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index 7fb620b9a0..81d71adf34 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -3,9 +3,9 @@ * * Copyright 2007 IBM Corporation. * Authors: - * Jerone Young - * Christian Ehrhardt - * Hollis Blanchard + * Jerone Young + * Christian Ehrhardt + * Hollis Blanchard * * This work is licensed under the GNU GPL license version 2 or later. * @@ -14,7 +14,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/error-report.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/error-report.h" #include "net/net.h" @@ -35,6 +34,8 @@ #include "hw/qdev-properties.h" #include "qapi/error.h" +#include + #define BINARY_DEVICE_TREE_FILE "bamboo.dtb" /* from u-boot */ @@ -49,22 +50,15 @@ #define PPC440EP_PCI_IO 0xe8000000 #define PPC440EP_PCI_IOLEN 0x00010000 -#define PPC440EP_SDRAM_NR_BANKS 4 - -static const ram_addr_t ppc440ep_sdram_bank_sizes[] = { - 256 * MiB, 128 * MiB, 64 * MiB, 32 * MiB, 16 * MiB, 8 * MiB, 0 -}; - static hwaddr entry; -static int bamboo_load_device_tree(hwaddr addr, - uint32_t ramsize, - hwaddr initrd_base, - hwaddr initrd_size, - const char *kernel_cmdline) +static int bamboo_load_device_tree(MachineState *machine, + hwaddr addr, + hwaddr initrd_base, + hwaddr initrd_size) { int ret = -1; - uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) }; + uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(machine->ram_size) }; char *filename; int fdt_size; void *fdt; @@ -85,27 +79,30 @@ static int bamboo_load_device_tree(hwaddr addr, ret = qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property, sizeof(mem_reg_property)); - if (ret < 0) + if (ret < 0) { fprintf(stderr, "couldn't set /memory/reg\n"); - + } ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); - if (ret < 0) + if (ret < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); - + } ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); - if (ret < 0) + if (ret < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); - + } ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", - kernel_cmdline); - if (ret < 0) + machine->kernel_cmdline); + if (ret < 0) { fprintf(stderr, "couldn't set /chosen/bootargs\n"); + } - /* Copy data from the host device tree into the guest. Since the guest can + /* + * Copy data from the host device tree into the guest. Since the guest can * directly access the timebase without host involvement, we must expose - * the correct frequencies. */ + * the correct frequencies. + */ if (kvm_enabled()) { tb_freq = kvmppc_get_tbfreq(); clock_freq = kvmppc_get_clockfreq(); @@ -117,7 +114,10 @@ static int bamboo_load_device_tree(hwaddr addr, tb_freq); rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); - g_free(fdt); + + /* Set ms->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; + return 0; } @@ -161,14 +161,10 @@ static void main_cpu_reset(void *opaque) static void bamboo_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); - MemoryRegion *ram_memories = g_new(MemoryRegion, PPC440EP_SDRAM_NR_BANKS); - hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS]; - hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS]; PCIBus *pcibus; PowerPCCPU *cpu; CPUPPCState *env; @@ -194,27 +190,24 @@ static void bamboo_init(MachineState *machine) /* interrupt controller */ uicdev = qdev_new(TYPE_PPC_UIC); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(uicdev), cpu, &error_fatal); + object_unref(OBJECT(uicdev)); uicsbd = SYS_BUS_DEVICE(uicdev); - - object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), - &error_fatal); - sysbus_realize_and_unref(uicsbd, &error_fatal); - sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); /* SDRAM controller */ - memset(ram_bases, 0, sizeof(ram_bases)); - memset(ram_sizes, 0, sizeof(ram_sizes)); - ppc4xx_sdram_banks(machine->ram, PPC440EP_SDRAM_NR_BANKS, ram_memories, - ram_bases, ram_sizes, ppc440ep_sdram_bank_sizes); + dev = qdev_new(TYPE_PPC4xx_SDRAM_DDR); + object_property_set_link(OBJECT(dev), "dram", OBJECT(machine->ram), + &error_abort); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); + object_unref(OBJECT(dev)); /* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */ - ppc4xx_sdram_init(env, - qdev_get_gpio_in(uicdev, 14), - PPC440EP_SDRAM_NR_BANKS, ram_memories, - ram_bases, ram_sizes, 1); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(uicdev, 14)); + /* Enable SDRAM memory regions, this should be done by the firmware */ + ppc4xx_sdram_ddr_enable(PPC4xx_SDRAM_DDR(dev)); /* PCI */ dev = sysbus_create_varargs(TYPE_PPC4xx_PCI_HOST_BRIDGE, @@ -250,8 +243,10 @@ static void bamboo_init(MachineState *machine) if (pcibus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { - /* There are no PCI NICs on the Bamboo board, but there are - * PCI slots, so we can pick whatever default model we want. */ + /* + * There are no PCI NICs on the Bamboo board, but there are + * PCI slots, so we can pick whatever default model we want. + */ pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL); } } @@ -288,8 +283,8 @@ static void bamboo_init(MachineState *machine) /* If we're loading a kernel directly, we must load the device tree too. */ if (kernel_filename) { - if (bamboo_load_device_tree(FDT_ADDR, machine->ram_size, RAMDISK_ADDR, - initrd_size, kernel_cmdline) < 0) { + if (bamboo_load_device_tree(machine, FDT_ADDR, + RAMDISK_ADDR, initrd_size) < 0) { error_report("couldn't load device tree"); exit(1); } diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c index 993e3ba955..651263926e 100644 --- a/hw/ppc/ppc440_uc.c +++ b/hw/ppc/ppc440_uc.c @@ -10,19 +10,14 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu/error-report.h" #include "qapi/error.h" #include "qemu/log.h" -#include "qemu/module.h" #include "hw/irq.h" -#include "exec/memory.h" -#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc4xx.h" #include "hw/qdev-properties.h" #include "hw/pci/pci.h" -#include "sysemu/block-backend.h" #include "sysemu/reset.h" #include "ppc440.h" -#include "qom/object.h" /*****************************************************************************/ /* L2 Cache as SRAM */ @@ -378,10 +373,6 @@ enum { PESDR1_RSTSTA = 0x365, }; -#define SDR0_DDR0_DDRM_ENCODE(n) ((((unsigned long)(n)) & 0x03) << 29) -#define SDR0_DDR0_DDRM_DDR1 0x20000000 -#define SDR0_DDR0_DDRM_DDR2 0x40000000 - static uint32_t dcr_read_sdr(void *opaque, int dcrn) { ppc4xx_sdr_t *sdr = opaque; @@ -480,262 +471,6 @@ void ppc4xx_sdr_init(CPUPPCState *env) sdr, &dcr_read_sdr, &dcr_write_sdr); } -/*****************************************************************************/ -/* SDRAM controller */ -typedef struct ppc440_sdram_t { - uint32_t addr; - int nbanks; - MemoryRegion containers[4]; /* used for clipping */ - MemoryRegion *ram_memories; - hwaddr ram_bases[4]; - hwaddr ram_sizes[4]; - uint32_t bcr[4]; -} ppc440_sdram_t; - -enum { - SDRAM0_CFGADDR = 0x10, - SDRAM0_CFGDATA, - SDRAM_R0BAS = 0x40, - SDRAM_R1BAS, - SDRAM_R2BAS, - SDRAM_R3BAS, - SDRAM_CONF1HB = 0x45, - SDRAM_PLBADDULL = 0x4a, - SDRAM_CONF1LL = 0x4b, - SDRAM_CONFPATHB = 0x4f, - SDRAM_PLBADDUHB = 0x50, -}; - -static uint32_t sdram_bcr(hwaddr ram_base, hwaddr ram_size) -{ - uint32_t bcr; - - switch (ram_size) { - case (8 * MiB): - bcr = 0xffc0; - break; - case (16 * MiB): - bcr = 0xff80; - break; - case (32 * MiB): - bcr = 0xff00; - break; - case (64 * MiB): - bcr = 0xfe00; - break; - case (128 * MiB): - bcr = 0xfc00; - break; - case (256 * MiB): - bcr = 0xf800; - break; - case (512 * MiB): - bcr = 0xf000; - break; - case (1 * GiB): - bcr = 0xe000; - break; - case (2 * GiB): - bcr = 0xc000; - break; - case (4 * GiB): - bcr = 0x8000; - break; - default: - error_report("invalid RAM size " TARGET_FMT_plx, ram_size); - return 0; - } - bcr |= ram_base >> 2 & 0xffe00000; - bcr |= 1; - - return bcr; -} - -static inline hwaddr sdram_base(uint32_t bcr) -{ - return (bcr & 0xffe00000) << 2; -} - -static uint64_t sdram_size(uint32_t bcr) -{ - uint64_t size; - int sh; - - sh = 1024 - ((bcr >> 6) & 0x3ff); - size = 8 * MiB * sh; - - return size; -} - -static void sdram_set_bcr(ppc440_sdram_t *sdram, int i, - uint32_t bcr, int enabled) -{ - if (sdram->bcr[i] & 1) { - /* First unmap RAM if enabled */ - memory_region_del_subregion(get_system_memory(), - &sdram->containers[i]); - memory_region_del_subregion(&sdram->containers[i], - &sdram->ram_memories[i]); - object_unparent(OBJECT(&sdram->containers[i])); - } - sdram->bcr[i] = bcr & 0xffe0ffc1; - if (enabled && (bcr & 1)) { - memory_region_init(&sdram->containers[i], NULL, "sdram-containers", - sdram_size(bcr)); - memory_region_add_subregion(&sdram->containers[i], 0, - &sdram->ram_memories[i]); - memory_region_add_subregion(get_system_memory(), - sdram_base(bcr), - &sdram->containers[i]); - } -} - -static void sdram_map_bcr(ppc440_sdram_t *sdram) -{ - int i; - - for (i = 0; i < sdram->nbanks; i++) { - if (sdram->ram_sizes[i] != 0) { - sdram_set_bcr(sdram, i, sdram_bcr(sdram->ram_bases[i], - sdram->ram_sizes[i]), 1); - } else { - sdram_set_bcr(sdram, i, 0, 0); - } - } -} - -static uint32_t dcr_read_sdram(void *opaque, int dcrn) -{ - ppc440_sdram_t *sdram = opaque; - uint32_t ret = 0; - - switch (dcrn) { - case SDRAM_R0BAS: - case SDRAM_R1BAS: - case SDRAM_R2BAS: - case SDRAM_R3BAS: - if (sdram->ram_sizes[dcrn - SDRAM_R0BAS]) { - ret = sdram_bcr(sdram->ram_bases[dcrn - SDRAM_R0BAS], - sdram->ram_sizes[dcrn - SDRAM_R0BAS]); - } - break; - case SDRAM_CONF1HB: - case SDRAM_CONF1LL: - case SDRAM_CONFPATHB: - case SDRAM_PLBADDULL: - case SDRAM_PLBADDUHB: - break; - case SDRAM0_CFGADDR: - ret = sdram->addr; - break; - case SDRAM0_CFGDATA: - switch (sdram->addr) { - case 0x14: /* SDRAM_MCSTAT (405EX) */ - case 0x1F: - ret = 0x80000000; - break; - case 0x21: /* SDRAM_MCOPT2 */ - ret = 0x08000000; - break; - case 0x40: /* SDRAM_MB0CF */ - ret = 0x00008001; - break; - case 0x7A: /* SDRAM_DLCR */ - ret = 0x02000000; - break; - case 0xE1: /* SDR0_DDR0 */ - ret = SDR0_DDR0_DDRM_ENCODE(1) | SDR0_DDR0_DDRM_DDR1; - break; - default: - break; - } - break; - default: - break; - } - - return ret; -} - -static void dcr_write_sdram(void *opaque, int dcrn, uint32_t val) -{ - ppc440_sdram_t *sdram = opaque; - - switch (dcrn) { - case SDRAM_R0BAS: - case SDRAM_R1BAS: - case SDRAM_R2BAS: - case SDRAM_R3BAS: - case SDRAM_CONF1HB: - case SDRAM_CONF1LL: - case SDRAM_CONFPATHB: - case SDRAM_PLBADDULL: - case SDRAM_PLBADDUHB: - break; - case SDRAM0_CFGADDR: - sdram->addr = val; - break; - case SDRAM0_CFGDATA: - switch (sdram->addr) { - case 0x00: /* B0CR */ - break; - default: - break; - } - break; - default: - break; - } -} - -static void sdram_reset(void *opaque) -{ - ppc440_sdram_t *sdram = opaque; - - sdram->addr = 0; -} - -void ppc440_sdram_init(CPUPPCState *env, int nbanks, - MemoryRegion *ram_memories, - hwaddr *ram_bases, hwaddr *ram_sizes, - int do_init) -{ - ppc440_sdram_t *sdram; - - sdram = g_malloc0(sizeof(*sdram)); - sdram->nbanks = nbanks; - sdram->ram_memories = ram_memories; - memcpy(sdram->ram_bases, ram_bases, nbanks * sizeof(hwaddr)); - memcpy(sdram->ram_sizes, ram_sizes, nbanks * sizeof(hwaddr)); - qemu_register_reset(&sdram_reset, sdram); - ppc_dcr_register(env, SDRAM0_CFGADDR, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM0_CFGDATA, - sdram, &dcr_read_sdram, &dcr_write_sdram); - if (do_init) { - sdram_map_bcr(sdram); - } - - ppc_dcr_register(env, SDRAM_R0BAS, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_R1BAS, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_R2BAS, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_R3BAS, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_CONF1HB, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_PLBADDULL, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_CONF1LL, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_CONFPATHB, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM_PLBADDUHB, - sdram, &dcr_read_sdram, &dcr_write_sdram); -} - /*****************************************************************************/ /* PLB to AHB bridge */ enum { @@ -904,14 +639,17 @@ static void dcr_write_dma(void *opaque, int dcrn, uint32_t val) int width, i, sidx, didx; uint8_t *rptr, *wptr; hwaddr rlen, wlen; + hwaddr xferlen; sidx = didx = 0; width = 1 << ((val & DMA0_CR_PW) >> 25); + xferlen = count * width; + wlen = rlen = xferlen; rptr = cpu_physical_memory_map(dma->ch[chnl].sa, &rlen, false); wptr = cpu_physical_memory_map(dma->ch[chnl].da, &wlen, true); - if (rptr && wptr) { + if (rptr && rlen == xferlen && wptr && wlen == xferlen) { if (!(val & DMA0_CR_DEC) && val & DMA0_CR_SAI && val & DMA0_CR_DAI) { /* optimise common case */ @@ -1025,7 +763,8 @@ void ppc4xx_dma_init(CPUPPCState *env, int dcr_base) /*****************************************************************************/ /* PCI Express controller */ -/* FIXME: This is not complete and does not work, only implemented partially +/* + * FIXME: This is not complete and does not work, only implemented partially * to allow firmware and guests to find an empty bus. Cards should use PCI. */ #include "hw/pci/pcie_host.h" @@ -1180,6 +919,14 @@ static void dcr_write_pcie(void *opaque, int dcrn, uint32_t val) case PEGPL_CFGMSK: s->cfg_mask = val; size = ~(val & 0xfffffffe) + 1; + /* + * Firmware sets this register to E0000001. Why we are not sure, + * but the current guess is anything above PCIE_MMCFG_SIZE_MAX is + * ignored. + */ + if (size > PCIE_MMCFG_SIZE_MAX) { + size = PCIE_MMCFG_SIZE_MAX; + } pcie_host_mmcfg_update(PCIE_HOST_BRIDGE(s), val & 1, s->cfg_base, size); break; case PEGPL_MSGBAH: diff --git a/hw/ppc/ppc4xx_devs.c b/hw/ppc/ppc4xx_devs.c index 980c48944f..c1d111465d 100644 --- a/hw/ppc/ppc4xx_devs.c +++ b/hw/ppc/ppc4xx_devs.c @@ -23,470 +23,11 @@ */ #include "qemu/osdep.h" -#include "qemu/units.h" -#include "sysemu/reset.h" #include "cpu.h" -#include "hw/irq.h" -#include "hw/ppc/ppc.h" #include "hw/ppc/ppc4xx.h" -#include "hw/intc/ppc-uic.h" #include "hw/qdev-properties.h" -#include "qemu/log.h" -#include "exec/address-spaces.h" -#include "qemu/error-report.h" #include "qapi/error.h" -/*#define DEBUG_UIC*/ - -#ifdef DEBUG_UIC -# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) -#else -# define LOG_UIC(...) do { } while (0) -#endif - -static void ppc4xx_reset(void *opaque) -{ - PowerPCCPU *cpu = opaque; - - cpu_reset(CPU(cpu)); -} - -/*****************************************************************************/ -/* Generic PowerPC 4xx processor instantiation */ -PowerPCCPU *ppc4xx_init(const char *cpu_type, - clk_setup_t *cpu_clk, clk_setup_t *tb_clk, - uint32_t sysclk) -{ - PowerPCCPU *cpu; - CPUPPCState *env; - - /* init CPUs */ - cpu = POWERPC_CPU(cpu_create(cpu_type)); - env = &cpu->env; - - cpu_clk->cb = NULL; /* We don't care about CPU clock frequency changes */ - cpu_clk->opaque = env; - /* Set time-base frequency to sysclk */ - tb_clk->cb = ppc_40x_timers_init(env, sysclk, PPC_INTERRUPT_PIT); - tb_clk->opaque = env; - ppc_dcr_init(env, NULL, NULL); - /* Register qemu callbacks */ - qemu_register_reset(ppc4xx_reset, cpu); - - return cpu; -} - -/*****************************************************************************/ -/* SDRAM controller */ -typedef struct ppc4xx_sdram_t ppc4xx_sdram_t; -struct ppc4xx_sdram_t { - uint32_t addr; - int nbanks; - MemoryRegion containers[4]; /* used for clipping */ - MemoryRegion *ram_memories; - hwaddr ram_bases[4]; - hwaddr ram_sizes[4]; - uint32_t besr0; - uint32_t besr1; - uint32_t bear; - uint32_t cfg; - uint32_t status; - uint32_t rtr; - uint32_t pmit; - uint32_t bcr[4]; - uint32_t tr; - uint32_t ecccfg; - uint32_t eccesr; - qemu_irq irq; -}; - -enum { - SDRAM0_CFGADDR = 0x010, - SDRAM0_CFGDATA = 0x011, -}; - -/* XXX: TOFIX: some patches have made this code become inconsistent: - * there are type inconsistencies, mixing hwaddr, target_ulong - * and uint32_t - */ -static uint32_t sdram_bcr (hwaddr ram_base, - hwaddr ram_size) -{ - uint32_t bcr; - - switch (ram_size) { - case 4 * MiB: - bcr = 0x00000000; - break; - case 8 * MiB: - bcr = 0x00020000; - break; - case 16 * MiB: - bcr = 0x00040000; - break; - case 32 * MiB: - bcr = 0x00060000; - break; - case 64 * MiB: - bcr = 0x00080000; - break; - case 128 * MiB: - bcr = 0x000A0000; - break; - case 256 * MiB: - bcr = 0x000C0000; - break; - default: - printf("%s: invalid RAM size " TARGET_FMT_plx "\n", __func__, - ram_size); - return 0x00000000; - } - bcr |= ram_base & 0xFF800000; - bcr |= 1; - - return bcr; -} - -static inline hwaddr sdram_base(uint32_t bcr) -{ - return bcr & 0xFF800000; -} - -static target_ulong sdram_size (uint32_t bcr) -{ - target_ulong size; - int sh; - - sh = (bcr >> 17) & 0x7; - if (sh == 7) - size = -1; - else - size = (4 * MiB) << sh; - - return size; -} - -static void sdram_set_bcr(ppc4xx_sdram_t *sdram, int i, - uint32_t bcr, int enabled) -{ - if (sdram->bcr[i] & 0x00000001) { - /* Unmap RAM */ -#ifdef DEBUG_SDRAM - printf("%s: unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", - __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i])); -#endif - memory_region_del_subregion(get_system_memory(), - &sdram->containers[i]); - memory_region_del_subregion(&sdram->containers[i], - &sdram->ram_memories[i]); - object_unparent(OBJECT(&sdram->containers[i])); - } - sdram->bcr[i] = bcr & 0xFFDEE001; - if (enabled && (bcr & 0x00000001)) { -#ifdef DEBUG_SDRAM - printf("%s: Map RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", - __func__, sdram_base(bcr), sdram_size(bcr)); -#endif - memory_region_init(&sdram->containers[i], NULL, "sdram-containers", - sdram_size(bcr)); - memory_region_add_subregion(&sdram->containers[i], 0, - &sdram->ram_memories[i]); - memory_region_add_subregion(get_system_memory(), - sdram_base(bcr), - &sdram->containers[i]); - } -} - -static void sdram_map_bcr (ppc4xx_sdram_t *sdram) -{ - int i; - - for (i = 0; i < sdram->nbanks; i++) { - if (sdram->ram_sizes[i] != 0) { - sdram_set_bcr(sdram, i, sdram_bcr(sdram->ram_bases[i], - sdram->ram_sizes[i]), 1); - } else { - sdram_set_bcr(sdram, i, 0x00000000, 0); - } - } -} - -static void sdram_unmap_bcr (ppc4xx_sdram_t *sdram) -{ - int i; - - for (i = 0; i < sdram->nbanks; i++) { -#ifdef DEBUG_SDRAM - printf("%s: Unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", - __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i])); -#endif - memory_region_del_subregion(get_system_memory(), - &sdram->ram_memories[i]); - } -} - -static uint32_t dcr_read_sdram (void *opaque, int dcrn) -{ - ppc4xx_sdram_t *sdram; - uint32_t ret; - - sdram = opaque; - switch (dcrn) { - case SDRAM0_CFGADDR: - ret = sdram->addr; - break; - case SDRAM0_CFGDATA: - switch (sdram->addr) { - case 0x00: /* SDRAM_BESR0 */ - ret = sdram->besr0; - break; - case 0x08: /* SDRAM_BESR1 */ - ret = sdram->besr1; - break; - case 0x10: /* SDRAM_BEAR */ - ret = sdram->bear; - break; - case 0x20: /* SDRAM_CFG */ - ret = sdram->cfg; - break; - case 0x24: /* SDRAM_STATUS */ - ret = sdram->status; - break; - case 0x30: /* SDRAM_RTR */ - ret = sdram->rtr; - break; - case 0x34: /* SDRAM_PMIT */ - ret = sdram->pmit; - break; - case 0x40: /* SDRAM_B0CR */ - ret = sdram->bcr[0]; - break; - case 0x44: /* SDRAM_B1CR */ - ret = sdram->bcr[1]; - break; - case 0x48: /* SDRAM_B2CR */ - ret = sdram->bcr[2]; - break; - case 0x4C: /* SDRAM_B3CR */ - ret = sdram->bcr[3]; - break; - case 0x80: /* SDRAM_TR */ - ret = -1; /* ? */ - break; - case 0x94: /* SDRAM_ECCCFG */ - ret = sdram->ecccfg; - break; - case 0x98: /* SDRAM_ECCESR */ - ret = sdram->eccesr; - break; - default: /* Error */ - ret = -1; - break; - } - break; - default: - /* Avoid gcc warning */ - ret = 0x00000000; - break; - } - - return ret; -} - -static void dcr_write_sdram (void *opaque, int dcrn, uint32_t val) -{ - ppc4xx_sdram_t *sdram; - - sdram = opaque; - switch (dcrn) { - case SDRAM0_CFGADDR: - sdram->addr = val; - break; - case SDRAM0_CFGDATA: - switch (sdram->addr) { - case 0x00: /* SDRAM_BESR0 */ - sdram->besr0 &= ~val; - break; - case 0x08: /* SDRAM_BESR1 */ - sdram->besr1 &= ~val; - break; - case 0x10: /* SDRAM_BEAR */ - sdram->bear = val; - break; - case 0x20: /* SDRAM_CFG */ - val &= 0xFFE00000; - if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) { -#ifdef DEBUG_SDRAM - printf("%s: enable SDRAM controller\n", __func__); -#endif - /* validate all RAM mappings */ - sdram_map_bcr(sdram); - sdram->status &= ~0x80000000; - } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) { -#ifdef DEBUG_SDRAM - printf("%s: disable SDRAM controller\n", __func__); -#endif - /* invalidate all RAM mappings */ - sdram_unmap_bcr(sdram); - sdram->status |= 0x80000000; - } - if (!(sdram->cfg & 0x40000000) && (val & 0x40000000)) - sdram->status |= 0x40000000; - else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000)) - sdram->status &= ~0x40000000; - sdram->cfg = val; - break; - case 0x24: /* SDRAM_STATUS */ - /* Read-only register */ - break; - case 0x30: /* SDRAM_RTR */ - sdram->rtr = val & 0x3FF80000; - break; - case 0x34: /* SDRAM_PMIT */ - sdram->pmit = (val & 0xF8000000) | 0x07C00000; - break; - case 0x40: /* SDRAM_B0CR */ - sdram_set_bcr(sdram, 0, val, sdram->cfg & 0x80000000); - break; - case 0x44: /* SDRAM_B1CR */ - sdram_set_bcr(sdram, 1, val, sdram->cfg & 0x80000000); - break; - case 0x48: /* SDRAM_B2CR */ - sdram_set_bcr(sdram, 2, val, sdram->cfg & 0x80000000); - break; - case 0x4C: /* SDRAM_B3CR */ - sdram_set_bcr(sdram, 3, val, sdram->cfg & 0x80000000); - break; - case 0x80: /* SDRAM_TR */ - sdram->tr = val & 0x018FC01F; - break; - case 0x94: /* SDRAM_ECCCFG */ - sdram->ecccfg = val & 0x00F00000; - break; - case 0x98: /* SDRAM_ECCESR */ - val &= 0xFFF0F000; - if (sdram->eccesr == 0 && val != 0) - qemu_irq_raise(sdram->irq); - else if (sdram->eccesr != 0 && val == 0) - qemu_irq_lower(sdram->irq); - sdram->eccesr = val; - break; - default: /* Error */ - break; - } - break; - } -} - -static void sdram_reset (void *opaque) -{ - ppc4xx_sdram_t *sdram; - - sdram = opaque; - sdram->addr = 0x00000000; - sdram->bear = 0x00000000; - sdram->besr0 = 0x00000000; /* No error */ - sdram->besr1 = 0x00000000; /* No error */ - sdram->cfg = 0x00000000; - sdram->ecccfg = 0x00000000; /* No ECC */ - sdram->eccesr = 0x00000000; /* No error */ - sdram->pmit = 0x07C00000; - sdram->rtr = 0x05F00000; - sdram->tr = 0x00854009; - /* We pre-initialize RAM banks */ - sdram->status = 0x00000000; - sdram->cfg = 0x00800000; -} - -void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks, - MemoryRegion *ram_memories, - hwaddr *ram_bases, - hwaddr *ram_sizes, - int do_init) -{ - ppc4xx_sdram_t *sdram; - - sdram = g_malloc0(sizeof(ppc4xx_sdram_t)); - sdram->irq = irq; - sdram->nbanks = nbanks; - sdram->ram_memories = ram_memories; - memset(sdram->ram_bases, 0, 4 * sizeof(hwaddr)); - memcpy(sdram->ram_bases, ram_bases, - nbanks * sizeof(hwaddr)); - memset(sdram->ram_sizes, 0, 4 * sizeof(hwaddr)); - memcpy(sdram->ram_sizes, ram_sizes, - nbanks * sizeof(hwaddr)); - qemu_register_reset(&sdram_reset, sdram); - ppc_dcr_register(env, SDRAM0_CFGADDR, - sdram, &dcr_read_sdram, &dcr_write_sdram); - ppc_dcr_register(env, SDRAM0_CFGDATA, - sdram, &dcr_read_sdram, &dcr_write_sdram); - if (do_init) - sdram_map_bcr(sdram); -} - -/* - * Split RAM between SDRAM banks. - * - * sdram_bank_sizes[] must be in descending order, that is sizes[i] > sizes[i+1] - * and must be 0-terminated. - * - * The 4xx SDRAM controller supports a small number of banks, and each bank - * must be one of a small set of sizes. The number of banks and the supported - * sizes varies by SoC. - */ -void ppc4xx_sdram_banks(MemoryRegion *ram, int nr_banks, - MemoryRegion ram_memories[], - hwaddr ram_bases[], hwaddr ram_sizes[], - const ram_addr_t sdram_bank_sizes[]) -{ - ram_addr_t size_left = memory_region_size(ram); - ram_addr_t base = 0; - ram_addr_t bank_size; - int i; - int j; - - for (i = 0; i < nr_banks; i++) { - for (j = 0; sdram_bank_sizes[j] != 0; j++) { - bank_size = sdram_bank_sizes[j]; - if (bank_size <= size_left) { - char name[32]; - - ram_bases[i] = base; - ram_sizes[i] = bank_size; - base += bank_size; - size_left -= bank_size; - snprintf(name, sizeof(name), "ppc4xx.sdram%d", i); - memory_region_init_alias(&ram_memories[i], NULL, name, ram, - ram_bases[i], ram_sizes[i]); - break; - } - } - if (!size_left) { - /* No need to use the remaining banks. */ - break; - } - } - - if (size_left) { - ram_addr_t used_size = memory_region_size(ram) - size_left; - GString *s = g_string_new(NULL); - - for (i = 0; sdram_bank_sizes[i]; i++) { - g_string_append_printf(s, "%" PRIi64 "%s", - sdram_bank_sizes[i] / MiB, - sdram_bank_sizes[i + 1] ? ", " : ""); - } - error_report("at most %d bank%s of %s MiB each supported", - nr_banks, nr_banks == 1 ? "" : "s", s->str); - error_printf("Possible valid RAM size: %" PRIi64 " MiB \n", - used_size ? used_size / MiB : sdram_bank_sizes[i - 1] / MiB); - - g_string_free(s, true); - exit(EXIT_FAILURE); - } -} - /*****************************************************************************/ /* MAL */ @@ -508,32 +49,10 @@ enum { MAL0_RCBS1 = 0x1E1, }; -typedef struct ppc4xx_mal_t ppc4xx_mal_t; -struct ppc4xx_mal_t { - qemu_irq irqs[4]; - uint32_t cfg; - uint32_t esr; - uint32_t ier; - uint32_t txcasr; - uint32_t txcarr; - uint32_t txeobisr; - uint32_t txdeir; - uint32_t rxcasr; - uint32_t rxcarr; - uint32_t rxeobisr; - uint32_t rxdeir; - uint32_t *txctpr; - uint32_t *rxctpr; - uint32_t *rcbs; - uint8_t txcnum; - uint8_t rxcnum; -}; - -static void ppc4xx_mal_reset(void *opaque) +static void ppc4xx_mal_reset(DeviceState *dev) { - ppc4xx_mal_t *mal; + Ppc4xxMalState *mal = PPC4xx_MAL(dev); - mal = opaque; mal->cfg = 0x0007C000; mal->esr = 0x00000000; mal->ier = 0x00000000; @@ -547,10 +66,9 @@ static void ppc4xx_mal_reset(void *opaque) static uint32_t dcr_read_mal(void *opaque, int dcrn) { - ppc4xx_mal_t *mal; + Ppc4xxMalState *mal = opaque; uint32_t ret; - mal = opaque; switch (dcrn) { case MAL0_CFG: ret = mal->cfg; @@ -604,13 +122,12 @@ static uint32_t dcr_read_mal(void *opaque, int dcrn) static void dcr_write_mal(void *opaque, int dcrn, uint32_t val) { - ppc4xx_mal_t *mal; + Ppc4xxMalState *mal = opaque; - mal = opaque; switch (dcrn) { case MAL0_CFG: if (val & 0x80000000) { - ppc4xx_mal_reset(mal); + ppc4xx_mal_reset(DEVICE(mal)); } mal->cfg = val & 0x00FFC087; break; @@ -661,55 +178,404 @@ static void dcr_write_mal(void *opaque, int dcrn, uint32_t val) } } -void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum, - qemu_irq irqs[4]) +static void ppc4xx_mal_realize(DeviceState *dev, Error **errp) { - ppc4xx_mal_t *mal; + Ppc4xxMalState *mal = PPC4xx_MAL(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); int i; - assert(txcnum <= 32 && rxcnum <= 32); - mal = g_malloc0(sizeof(*mal)); - mal->txcnum = txcnum; - mal->rxcnum = rxcnum; - mal->txctpr = g_new0(uint32_t, txcnum); - mal->rxctpr = g_new0(uint32_t, rxcnum); - mal->rcbs = g_new0(uint32_t, rxcnum); - for (i = 0; i < 4; i++) { - mal->irqs[i] = irqs[i]; + if (mal->txcnum > 32 || mal->rxcnum > 32) { + error_setg(errp, "invalid TXC/RXC number"); + return; } - qemu_register_reset(&ppc4xx_mal_reset, mal); - ppc_dcr_register(env, MAL0_CFG, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_ESR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_IER, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_TXCASR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_TXCARR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_TXEOBISR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_TXDEIR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_RXCASR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_RXCARR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_RXEOBISR, - mal, &dcr_read_mal, &dcr_write_mal); - ppc_dcr_register(env, MAL0_RXDEIR, - mal, &dcr_read_mal, &dcr_write_mal); - for (i = 0; i < txcnum; i++) { - ppc_dcr_register(env, MAL0_TXCTP0R + i, - mal, &dcr_read_mal, &dcr_write_mal); + + mal->txctpr = g_new0(uint32_t, mal->txcnum); + mal->rxctpr = g_new0(uint32_t, mal->rxcnum); + mal->rcbs = g_new0(uint32_t, mal->rxcnum); + + for (i = 0; i < ARRAY_SIZE(mal->irqs); i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &mal->irqs[i]); } - for (i = 0; i < rxcnum; i++) { - ppc_dcr_register(env, MAL0_RXCTP0R + i, - mal, &dcr_read_mal, &dcr_write_mal); + + ppc4xx_dcr_register(dcr, MAL0_CFG, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_ESR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_IER, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_TXCASR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_TXCARR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_TXEOBISR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_TXDEIR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_RXCASR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_RXCARR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_RXEOBISR, mal, &dcr_read_mal, &dcr_write_mal); + ppc4xx_dcr_register(dcr, MAL0_RXDEIR, mal, &dcr_read_mal, &dcr_write_mal); + for (i = 0; i < mal->txcnum; i++) { + ppc4xx_dcr_register(dcr, MAL0_TXCTP0R + i, + mal, &dcr_read_mal, &dcr_write_mal); } - for (i = 0; i < rxcnum; i++) { - ppc_dcr_register(env, MAL0_RCBS0 + i, - mal, &dcr_read_mal, &dcr_write_mal); + for (i = 0; i < mal->rxcnum; i++) { + ppc4xx_dcr_register(dcr, MAL0_RXCTP0R + i, + mal, &dcr_read_mal, &dcr_write_mal); + } + for (i = 0; i < mal->rxcnum; i++) { + ppc4xx_dcr_register(dcr, MAL0_RCBS0 + i, + mal, &dcr_read_mal, &dcr_write_mal); } } + +static void ppc4xx_mal_finalize(Object *obj) +{ + Ppc4xxMalState *mal = PPC4xx_MAL(obj); + + g_free(mal->rcbs); + g_free(mal->rxctpr); + g_free(mal->txctpr); +} + +static Property ppc4xx_mal_properties[] = { + DEFINE_PROP_UINT8("txc-num", Ppc4xxMalState, txcnum, 0), + DEFINE_PROP_UINT8("rxc-num", Ppc4xxMalState, rxcnum, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc4xx_mal_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc4xx_mal_realize; + dc->reset = ppc4xx_mal_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; + device_class_set_props(dc, ppc4xx_mal_properties); +} + +/*****************************************************************************/ +/* Peripheral local bus arbitrer */ +enum { + PLB3A0_ACR = 0x077, + PLB4A0_ACR = 0x081, + PLB0_BESR = 0x084, + PLB0_BEAR = 0x086, + PLB0_ACR = 0x087, + PLB4A1_ACR = 0x089, +}; + +static uint32_t dcr_read_plb(void *opaque, int dcrn) +{ + Ppc4xxPlbState *plb = opaque; + uint32_t ret; + + switch (dcrn) { + case PLB0_ACR: + ret = plb->acr; + break; + case PLB0_BEAR: + ret = plb->bear; + break; + case PLB0_BESR: + ret = plb->besr; + break; + default: + /* Avoid gcc warning */ + ret = 0; + break; + } + + return ret; +} + +static void dcr_write_plb(void *opaque, int dcrn, uint32_t val) +{ + Ppc4xxPlbState *plb = opaque; + + switch (dcrn) { + case PLB0_ACR: + /* + * We don't care about the actual parameters written as + * we don't manage any priorities on the bus + */ + plb->acr = val & 0xF8000000; + break; + case PLB0_BEAR: + /* Read only */ + break; + case PLB0_BESR: + /* Write-clear */ + plb->besr &= ~val; + break; + } +} + +static void ppc405_plb_reset(DeviceState *dev) +{ + Ppc4xxPlbState *plb = PPC4xx_PLB(dev); + + plb->acr = 0x00000000; + plb->bear = 0x00000000; + plb->besr = 0x00000000; +} + +static void ppc405_plb_realize(DeviceState *dev, Error **errp) +{ + Ppc4xxPlbState *plb = PPC4xx_PLB(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); + + ppc4xx_dcr_register(dcr, PLB3A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc4xx_dcr_register(dcr, PLB4A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc4xx_dcr_register(dcr, PLB0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc4xx_dcr_register(dcr, PLB0_BEAR, plb, &dcr_read_plb, &dcr_write_plb); + ppc4xx_dcr_register(dcr, PLB0_BESR, plb, &dcr_read_plb, &dcr_write_plb); + ppc4xx_dcr_register(dcr, PLB4A1_ACR, plb, &dcr_read_plb, &dcr_write_plb); +} + +static void ppc405_plb_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_plb_realize; + dc->reset = ppc405_plb_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; +} + +/*****************************************************************************/ +/* Peripheral controller */ +enum { + EBC0_CFGADDR = 0x012, + EBC0_CFGDATA = 0x013, +}; + +static uint32_t dcr_read_ebc(void *opaque, int dcrn) +{ + Ppc4xxEbcState *ebc = opaque; + uint32_t ret; + + switch (dcrn) { + case EBC0_CFGADDR: + ret = ebc->addr; + break; + case EBC0_CFGDATA: + switch (ebc->addr) { + case 0x00: /* B0CR */ + ret = ebc->bcr[0]; + break; + case 0x01: /* B1CR */ + ret = ebc->bcr[1]; + break; + case 0x02: /* B2CR */ + ret = ebc->bcr[2]; + break; + case 0x03: /* B3CR */ + ret = ebc->bcr[3]; + break; + case 0x04: /* B4CR */ + ret = ebc->bcr[4]; + break; + case 0x05: /* B5CR */ + ret = ebc->bcr[5]; + break; + case 0x06: /* B6CR */ + ret = ebc->bcr[6]; + break; + case 0x07: /* B7CR */ + ret = ebc->bcr[7]; + break; + case 0x10: /* B0AP */ + ret = ebc->bap[0]; + break; + case 0x11: /* B1AP */ + ret = ebc->bap[1]; + break; + case 0x12: /* B2AP */ + ret = ebc->bap[2]; + break; + case 0x13: /* B3AP */ + ret = ebc->bap[3]; + break; + case 0x14: /* B4AP */ + ret = ebc->bap[4]; + break; + case 0x15: /* B5AP */ + ret = ebc->bap[5]; + break; + case 0x16: /* B6AP */ + ret = ebc->bap[6]; + break; + case 0x17: /* B7AP */ + ret = ebc->bap[7]; + break; + case 0x20: /* BEAR */ + ret = ebc->bear; + break; + case 0x21: /* BESR0 */ + ret = ebc->besr0; + break; + case 0x22: /* BESR1 */ + ret = ebc->besr1; + break; + case 0x23: /* CFG */ + ret = ebc->cfg; + break; + default: + ret = 0x00000000; + break; + } + break; + default: + ret = 0x00000000; + break; + } + + return ret; +} + +static void dcr_write_ebc(void *opaque, int dcrn, uint32_t val) +{ + Ppc4xxEbcState *ebc = opaque; + + switch (dcrn) { + case EBC0_CFGADDR: + ebc->addr = val; + break; + case EBC0_CFGDATA: + switch (ebc->addr) { + case 0x00: /* B0CR */ + break; + case 0x01: /* B1CR */ + break; + case 0x02: /* B2CR */ + break; + case 0x03: /* B3CR */ + break; + case 0x04: /* B4CR */ + break; + case 0x05: /* B5CR */ + break; + case 0x06: /* B6CR */ + break; + case 0x07: /* B7CR */ + break; + case 0x10: /* B0AP */ + break; + case 0x11: /* B1AP */ + break; + case 0x12: /* B2AP */ + break; + case 0x13: /* B3AP */ + break; + case 0x14: /* B4AP */ + break; + case 0x15: /* B5AP */ + break; + case 0x16: /* B6AP */ + break; + case 0x17: /* B7AP */ + break; + case 0x20: /* BEAR */ + break; + case 0x21: /* BESR0 */ + break; + case 0x22: /* BESR1 */ + break; + case 0x23: /* CFG */ + break; + default: + break; + } + break; + default: + break; + } +} + +static void ppc405_ebc_reset(DeviceState *dev) +{ + Ppc4xxEbcState *ebc = PPC4xx_EBC(dev); + int i; + + ebc->addr = 0x00000000; + ebc->bap[0] = 0x7F8FFE80; + ebc->bcr[0] = 0xFFE28000; + for (i = 0; i < 8; i++) { + ebc->bap[i] = 0x00000000; + ebc->bcr[i] = 0x00000000; + } + ebc->besr0 = 0x00000000; + ebc->besr1 = 0x00000000; + ebc->cfg = 0x80400000; +} + +static void ppc405_ebc_realize(DeviceState *dev, Error **errp) +{ + Ppc4xxEbcState *ebc = PPC4xx_EBC(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); + + ppc4xx_dcr_register(dcr, EBC0_CFGADDR, ebc, &dcr_read_ebc, &dcr_write_ebc); + ppc4xx_dcr_register(dcr, EBC0_CFGDATA, ebc, &dcr_read_ebc, &dcr_write_ebc); +} + +static void ppc405_ebc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc405_ebc_realize; + dc->reset = ppc405_ebc_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; +} + +/* PPC4xx_DCR_DEVICE */ + +void ppc4xx_dcr_register(Ppc4xxDcrDeviceState *dev, int dcrn, void *opaque, + dcr_read_cb dcr_read, dcr_write_cb dcr_write) +{ + assert(dev->cpu); + ppc_dcr_register(&dev->cpu->env, dcrn, opaque, dcr_read, dcr_write); +} + +bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu, + Error **errp) +{ + object_property_set_link(OBJECT(dev), "cpu", OBJECT(cpu), &error_abort); + return sysbus_realize(SYS_BUS_DEVICE(dev), errp); +} + +static Property ppc4xx_dcr_properties[] = { + DEFINE_PROP_LINK("cpu", Ppc4xxDcrDeviceState, cpu, TYPE_POWERPC_CPU, + PowerPCCPU *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc4xx_dcr_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, ppc4xx_dcr_properties); +} + +static const TypeInfo ppc4xx_types[] = { + { + .name = TYPE_PPC4xx_MAL, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc4xxMalState), + .instance_finalize = ppc4xx_mal_finalize, + .class_init = ppc4xx_mal_class_init, + }, { + .name = TYPE_PPC4xx_PLB, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc4xxPlbState), + .class_init = ppc405_plb_class_init, + }, { + .name = TYPE_PPC4xx_EBC, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc4xxEbcState), + .class_init = ppc405_ebc_class_init, + }, { + .name = TYPE_PPC4xx_DCR_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Ppc4xxDcrDeviceState), + .class_init = ppc4xx_dcr_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(ppc4xx_types) diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c index 8147ba6f94..8642b96455 100644 --- a/hw/ppc/ppc4xx_pci.c +++ b/hw/ppc/ppc4xx_pci.c @@ -16,10 +16,13 @@ * Authors: Hollis Blanchard */ -/* This file implements emulation of the 32-bit PCI controller found in some - * 4xx SoCs, such as the 440EP. */ +/* + * This file implements emulation of the 32-bit PCI controller found in some + * 4xx SoCs, such as the 440EP. + */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/ppc/ppc.h" #include "hw/ppc/ppc4xx.h" @@ -48,12 +51,14 @@ OBJECT_DECLARE_SIMPLE_TYPE(PPC4xxPCIState, PPC4xx_PCI_HOST_BRIDGE) #define PPC4xx_PCI_NR_PMMS 3 #define PPC4xx_PCI_NR_PTMS 2 +#define PPC4xx_PCI_NUM_DEVS 5 + struct PPC4xxPCIState { PCIHostState parent_obj; struct PCIMasterMap pmm[PPC4xx_PCI_NR_PMMS]; struct PCITargetMap ptm[PPC4xx_PCI_NR_PTMS]; - qemu_irq irq[PCI_NUM_PINS]; + qemu_irq irq[PPC4xx_PCI_NUM_DEVS]; MemoryRegion container; MemoryRegion iomem; @@ -62,8 +67,10 @@ struct PPC4xxPCIState { #define PCIC0_CFGADDR 0x0 #define PCIC0_CFGDATA 0x4 -/* PLB Memory Map (PMM) registers specify which PLB addresses are translated to - * PCI accesses. */ +/* + * PLB Memory Map (PMM) registers specify which PLB addresses are translated to + * PCI accesses. + */ #define PCIL0_PMM0LA 0x0 #define PCIL0_PMM0MA 0x4 #define PCIL0_PMM0PCILA 0x8 @@ -77,8 +84,10 @@ struct PPC4xxPCIState { #define PCIL0_PMM2PCILA 0x28 #define PCIL0_PMM2PCIHA 0x2c -/* PCI Target Map (PTM) registers specify which PCI addresses are translated to - * PLB accesses. */ +/* + * PCI Target Map (PTM) registers specify which PCI addresses are translated to + * PLB accesses. + */ #define PCIL0_PTM1MS 0x30 #define PCIL0_PTM1LA 0x34 #define PCIL0_PTM2MS 0x38 @@ -93,9 +102,10 @@ static void ppc4xx_pci_reg_write4(void *opaque, hwaddr offset, { struct PPC4xxPCIState *pci = opaque; - /* We ignore all target attempts at PCI configuration, effectively - * assuming a bidirectional 1:1 mapping of PLB and PCI space. */ - + /* + * We ignore all target attempts at PCI configuration, effectively + * assuming a bidirectional 1:1 mapping of PLB and PCI space. + */ switch (offset) { case PCIL0_PMM0LA: pci->pmm[0].la = value; @@ -150,8 +160,9 @@ static void ppc4xx_pci_reg_write4(void *opaque, hwaddr offset, break; default: - printf("%s: unhandled PCI internal register 0x%lx\n", __func__, - (unsigned long)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unhandled PCI internal register 0x%" HWADDR_PRIx "\n", + __func__, offset); break; } } @@ -216,8 +227,9 @@ static uint64_t ppc4xx_pci_reg_read4(void *opaque, hwaddr offset, break; default: - printf("%s: invalid PCI internal register 0x%lx\n", __func__, - (unsigned long)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid PCI internal register 0x%" HWADDR_PRIx "\n", + __func__, offset); value = 0; } @@ -238,15 +250,17 @@ static void ppc4xx_pci_reset(void *opaque) memset(pci->ptm, 0, sizeof(pci->ptm)); } -/* On Bamboo, all pins from each slot are tied to a single board IRQ. This - * may need further refactoring for other boards. */ +/* + * On Bamboo, all pins from each slot are tied to a single board IRQ. + * This may need further refactoring for other boards. + */ static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num) { int slot = PCI_SLOT(pci_dev->devfn); trace_ppc4xx_pci_map_irq(pci_dev->devfn, irq_num, slot); - return slot - 1; + return slot > 0 ? slot - 1 : PPC4xx_PCI_NUM_DEVS - 1; } static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level) @@ -254,7 +268,7 @@ static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level) qemu_irq *pci_irqs = opaque; trace_ppc4xx_pci_set_irq(irq_num); - assert(irq_num >= 0); + assert(irq_num >= 0 && irq_num < PPC4xx_PCI_NUM_DEVS); qemu_set_irq(pci_irqs[irq_num], level); } diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c new file mode 100644 index 0000000000..8d7137faf3 --- /dev/null +++ b/hw/ppc/ppc4xx_sdram.c @@ -0,0 +1,757 @@ +/* + * QEMU PowerPC 4xx embedded processors SDRAM controller emulation + * + * DDR SDRAM controller: + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * DDR2 SDRAM controller: + * Copyright (c) 2012 François Revol + * Copyright (c) 2016-2019 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "exec/address-spaces.h" /* get_system_memory() */ +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/ppc4xx.h" +#include "trace.h" + +/*****************************************************************************/ +/* Shared functions */ + +/* + * Split RAM between SDRAM banks. + * + * sdram_bank_sizes[] must be in descending order, that is sizes[i] > sizes[i+1] + * and must be 0-terminated. + * + * The 4xx SDRAM controller supports a small number of banks, and each bank + * must be one of a small set of sizes. The number of banks and the supported + * sizes varies by SoC. + */ +static bool ppc4xx_sdram_banks(MemoryRegion *ram, int nr_banks, + Ppc4xxSdramBank ram_banks[], + const ram_addr_t sdram_bank_sizes[], + Error **errp) +{ + ERRP_GUARD(); + ram_addr_t size_left = memory_region_size(ram); + ram_addr_t base = 0; + ram_addr_t bank_size; + int i; + int j; + + for (i = 0; i < nr_banks; i++) { + for (j = 0; sdram_bank_sizes[j] != 0; j++) { + bank_size = sdram_bank_sizes[j]; + if (bank_size <= size_left) { + char name[32]; + + ram_banks[i].base = base; + ram_banks[i].size = bank_size; + base += bank_size; + size_left -= bank_size; + snprintf(name, sizeof(name), "ppc4xx.sdram%d", i); + memory_region_init_alias(&ram_banks[i].ram, NULL, name, ram, + ram_banks[i].base, ram_banks[i].size); + break; + } + } + if (!size_left) { + /* No need to use the remaining banks. */ + break; + } + } + + if (size_left) { + ram_addr_t used_size = memory_region_size(ram) - size_left; + GString *s = g_string_new(NULL); + + for (i = 0; sdram_bank_sizes[i]; i++) { + g_string_append_printf(s, "%" PRIi64 "%s", + sdram_bank_sizes[i] / MiB, + sdram_bank_sizes[i + 1] ? ", " : ""); + } + error_setg(errp, "Invalid SDRAM banks"); + error_append_hint(errp, "at most %d bank%s of %s MiB each supported\n", + nr_banks, nr_banks == 1 ? "" : "s", s->str); + error_append_hint(errp, "Possible valid RAM size: %" PRIi64 " MiB\n", + used_size ? used_size / MiB : sdram_bank_sizes[i - 1] / MiB); + + g_string_free(s, true); + return false; + } + return true; +} + +static void sdram_bank_map(Ppc4xxSdramBank *bank) +{ + trace_ppc4xx_sdram_map(bank->base, bank->size); + memory_region_init(&bank->container, NULL, "sdram-container", bank->size); + memory_region_add_subregion(&bank->container, 0, &bank->ram); + memory_region_add_subregion(get_system_memory(), bank->base, + &bank->container); +} + +static void sdram_bank_unmap(Ppc4xxSdramBank *bank) +{ + trace_ppc4xx_sdram_unmap(bank->base, bank->size); + memory_region_del_subregion(get_system_memory(), &bank->container); + memory_region_del_subregion(&bank->container, &bank->ram); + object_unparent(OBJECT(&bank->container)); +} + +static void sdram_bank_set_bcr(Ppc4xxSdramBank *bank, uint32_t bcr, + hwaddr base, hwaddr size, int enabled) +{ + if (memory_region_is_mapped(&bank->container)) { + sdram_bank_unmap(bank); + } + bank->bcr = bcr; + bank->base = base; + bank->size = size; + if (enabled && (bcr & 1)) { + sdram_bank_map(bank); + } +} + +enum { + SDRAM0_CFGADDR = 0x010, + SDRAM0_CFGDATA = 0x011, +}; + +/*****************************************************************************/ +/* DDR SDRAM controller */ +#define SDRAM_DDR_BCR_MASK 0xFFDEE001 + +static uint32_t sdram_ddr_bcr(hwaddr ram_base, hwaddr ram_size) +{ + uint32_t bcr; + + switch (ram_size) { + case 4 * MiB: + bcr = 0; + break; + case 8 * MiB: + bcr = 0x20000; + break; + case 16 * MiB: + bcr = 0x40000; + break; + case 32 * MiB: + bcr = 0x60000; + break; + case 64 * MiB: + bcr = 0x80000; + break; + case 128 * MiB: + bcr = 0xA0000; + break; + case 256 * MiB: + bcr = 0xC0000; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid RAM size 0x%" HWADDR_PRIx "\n", __func__, + ram_size); + return 0; + } + bcr |= ram_base & 0xFF800000; + bcr |= 1; + + return bcr; +} + +static inline hwaddr sdram_ddr_base(uint32_t bcr) +{ + return bcr & 0xFF800000; +} + +static hwaddr sdram_ddr_size(uint32_t bcr) +{ + hwaddr size; + int sh; + + sh = (bcr >> 17) & 0x7; + if (sh == 7) { + size = -1; + } else { + size = (4 * MiB) << sh; + } + + return size; +} + +static uint32_t sdram_ddr_dcr_read(void *opaque, int dcrn) +{ + Ppc4xxSdramDdrState *s = opaque; + uint32_t ret; + + switch (dcrn) { + case SDRAM0_CFGADDR: + ret = s->addr; + break; + case SDRAM0_CFGDATA: + switch (s->addr) { + case 0x00: /* SDRAM_BESR0 */ + ret = s->besr0; + break; + case 0x08: /* SDRAM_BESR1 */ + ret = s->besr1; + break; + case 0x10: /* SDRAM_BEAR */ + ret = s->bear; + break; + case 0x20: /* SDRAM_CFG */ + ret = s->cfg; + break; + case 0x24: /* SDRAM_STATUS */ + ret = s->status; + break; + case 0x30: /* SDRAM_RTR */ + ret = s->rtr; + break; + case 0x34: /* SDRAM_PMIT */ + ret = s->pmit; + break; + case 0x40: /* SDRAM_B0CR */ + ret = s->bank[0].bcr; + break; + case 0x44: /* SDRAM_B1CR */ + ret = s->bank[1].bcr; + break; + case 0x48: /* SDRAM_B2CR */ + ret = s->bank[2].bcr; + break; + case 0x4C: /* SDRAM_B3CR */ + ret = s->bank[3].bcr; + break; + case 0x80: /* SDRAM_TR */ + ret = -1; /* ? */ + break; + case 0x94: /* SDRAM_ECCCFG */ + ret = s->ecccfg; + break; + case 0x98: /* SDRAM_ECCESR */ + ret = s->eccesr; + break; + default: /* Error */ + ret = -1; + break; + } + break; + default: + /* Avoid gcc warning */ + ret = 0; + break; + } + + return ret; +} + +static void sdram_ddr_dcr_write(void *opaque, int dcrn, uint32_t val) +{ + Ppc4xxSdramDdrState *s = opaque; + int i; + + switch (dcrn) { + case SDRAM0_CFGADDR: + s->addr = val; + break; + case SDRAM0_CFGDATA: + switch (s->addr) { + case 0x00: /* SDRAM_BESR0 */ + s->besr0 &= ~val; + break; + case 0x08: /* SDRAM_BESR1 */ + s->besr1 &= ~val; + break; + case 0x10: /* SDRAM_BEAR */ + s->bear = val; + break; + case 0x20: /* SDRAM_CFG */ + val &= 0xFFE00000; + if (!(s->cfg & 0x80000000) && (val & 0x80000000)) { + trace_ppc4xx_sdram_enable("enable"); + /* validate all RAM mappings */ + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, + 1); + } + } + s->status &= ~0x80000000; + } else if ((s->cfg & 0x80000000) && !(val & 0x80000000)) { + trace_ppc4xx_sdram_enable("disable"); + /* invalidate all RAM mappings */ + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, + 0); + } + } + s->status |= 0x80000000; + } + if (!(s->cfg & 0x40000000) && (val & 0x40000000)) { + s->status |= 0x40000000; + } else if ((s->cfg & 0x40000000) && !(val & 0x40000000)) { + s->status &= ~0x40000000; + } + s->cfg = val; + break; + case 0x24: /* SDRAM_STATUS */ + /* Read-only register */ + break; + case 0x30: /* SDRAM_RTR */ + s->rtr = val & 0x3FF80000; + break; + case 0x34: /* SDRAM_PMIT */ + s->pmit = (val & 0xF8000000) | 0x07C00000; + break; + case 0x40: /* SDRAM_B0CR */ + case 0x44: /* SDRAM_B1CR */ + case 0x48: /* SDRAM_B2CR */ + case 0x4C: /* SDRAM_B3CR */ + i = (s->addr - 0x40) / 4; + val &= SDRAM_DDR_BCR_MASK; + if (s->bank[i].size) { + sdram_bank_set_bcr(&s->bank[i], val, + sdram_ddr_base(val), sdram_ddr_size(val), + s->cfg & 0x80000000); + } + break; + case 0x80: /* SDRAM_TR */ + s->tr = val & 0x018FC01F; + break; + case 0x94: /* SDRAM_ECCCFG */ + s->ecccfg = val & 0x00F00000; + break; + case 0x98: /* SDRAM_ECCESR */ + val &= 0xFFF0F000; + if (s->eccesr == 0 && val != 0) { + qemu_irq_raise(s->irq); + } else if (s->eccesr != 0 && val == 0) { + qemu_irq_lower(s->irq); + } + s->eccesr = val; + break; + default: /* Error */ + break; + } + break; + } +} + +static void ppc4xx_sdram_ddr_reset(DeviceState *dev) +{ + Ppc4xxSdramDdrState *s = PPC4xx_SDRAM_DDR(dev); + + s->addr = 0; + s->bear = 0; + s->besr0 = 0; /* No error */ + s->besr1 = 0; /* No error */ + s->cfg = 0; + s->ecccfg = 0; /* No ECC */ + s->eccesr = 0; /* No error */ + s->pmit = 0x07C00000; + s->rtr = 0x05F00000; + s->tr = 0x00854009; + /* We pre-initialize RAM banks */ + s->status = 0; + s->cfg = 0x00800000; +} + +static void ppc4xx_sdram_ddr_realize(DeviceState *dev, Error **errp) +{ + Ppc4xxSdramDdrState *s = PPC4xx_SDRAM_DDR(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); + const ram_addr_t valid_bank_sizes[] = { + 256 * MiB, 128 * MiB, 64 * MiB, 32 * MiB, 16 * MiB, 8 * MiB, 4 * MiB, 0 + }; + int i; + + if (s->nbanks < 1 || s->nbanks > 4) { + error_setg(errp, "Invalid number of RAM banks"); + return; + } + if (!s->dram_mr) { + error_setg(errp, "Missing dram memory region"); + return; + } + if (!ppc4xx_sdram_banks(s->dram_mr, s->nbanks, s->bank, + valid_bank_sizes, errp)) { + return; + } + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + s->bank[i].bcr = sdram_ddr_bcr(s->bank[i].base, s->bank[i].size); + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, 0); + } else { + sdram_bank_set_bcr(&s->bank[i], 0, 0, 0, 0); + } + trace_ppc4xx_sdram_init(sdram_ddr_base(s->bank[i].bcr), + sdram_ddr_size(s->bank[i].bcr), + s->bank[i].bcr); + } + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + ppc4xx_dcr_register(dcr, SDRAM0_CFGADDR, + s, &sdram_ddr_dcr_read, &sdram_ddr_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM0_CFGDATA, + s, &sdram_ddr_dcr_read, &sdram_ddr_dcr_write); +} + +static Property ppc4xx_sdram_ddr_props[] = { + DEFINE_PROP_LINK("dram", Ppc4xxSdramDdrState, dram_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdrState, nbanks, 4), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc4xx_sdram_ddr_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc4xx_sdram_ddr_realize; + dc->reset = ppc4xx_sdram_ddr_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; + device_class_set_props(dc, ppc4xx_sdram_ddr_props); +} + +void ppc4xx_sdram_ddr_enable(Ppc4xxSdramDdrState *s) +{ + sdram_ddr_dcr_write(s, SDRAM0_CFGADDR, 0x20); + sdram_ddr_dcr_write(s, SDRAM0_CFGDATA, 0x80000000); +} + +/*****************************************************************************/ +/* DDR2 SDRAM controller */ +#define SDRAM_DDR2_BCR_MASK 0xffe0ffc1 + +enum { + SDRAM_R0BAS = 0x40, + SDRAM_R1BAS, + SDRAM_R2BAS, + SDRAM_R3BAS, + SDRAM_CONF1HB = 0x45, + SDRAM_PLBADDULL = 0x4a, + SDRAM_CONF1LL = 0x4b, + SDRAM_CONFPATHB = 0x4f, + SDRAM_PLBADDUHB = 0x50, +}; + +static uint32_t sdram_ddr2_bcr(hwaddr ram_base, hwaddr ram_size) +{ + uint32_t bcr; + + switch (ram_size) { + case 8 * MiB: + bcr = 0xffc0; + break; + case 16 * MiB: + bcr = 0xff80; + break; + case 32 * MiB: + bcr = 0xff00; + break; + case 64 * MiB: + bcr = 0xfe00; + break; + case 128 * MiB: + bcr = 0xfc00; + break; + case 256 * MiB: + bcr = 0xf800; + break; + case 512 * MiB: + bcr = 0xf000; + break; + case 1 * GiB: + bcr = 0xe000; + break; + case 2 * GiB: + bcr = 0xc000; + break; + case 4 * GiB: + bcr = 0x8000; + break; + default: + error_report("invalid RAM size " TARGET_FMT_plx, ram_size); + return 0; + } + bcr |= ram_base >> 2 & 0xffe00000; + bcr |= 1; + + return bcr; +} + +static inline hwaddr sdram_ddr2_base(uint32_t bcr) +{ + return (bcr & 0xffe00000) << 2; +} + +static hwaddr sdram_ddr2_size(uint32_t bcr) +{ + hwaddr size; + int sh; + + sh = 1024 - ((bcr >> 6) & 0x3ff); + size = 8 * MiB * sh; + + return size; +} + +static uint32_t sdram_ddr2_dcr_read(void *opaque, int dcrn) +{ + Ppc4xxSdramDdr2State *s = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case SDRAM_R0BAS: + case SDRAM_R1BAS: + case SDRAM_R2BAS: + case SDRAM_R3BAS: + if (s->bank[dcrn - SDRAM_R0BAS].size) { + ret = sdram_ddr2_bcr(s->bank[dcrn - SDRAM_R0BAS].base, + s->bank[dcrn - SDRAM_R0BAS].size); + } + break; + case SDRAM_CONF1HB: + case SDRAM_CONF1LL: + case SDRAM_CONFPATHB: + case SDRAM_PLBADDULL: + case SDRAM_PLBADDUHB: + break; + case SDRAM0_CFGADDR: + ret = s->addr; + break; + case SDRAM0_CFGDATA: + switch (s->addr) { + case 0x14: /* SDRAM_MCSTAT (405EX) */ + case 0x1F: + ret = 0x80000000; + break; + case 0x21: /* SDRAM_MCOPT2 */ + ret = s->mcopt2; + break; + case 0x40: /* SDRAM_MB0CF */ + ret = 0x00008001; + break; + case 0x7A: /* SDRAM_DLCR */ + ret = 0x02000000; + break; + case 0xE1: /* SDR0_DDR0 */ + ret = SDR0_DDR0_DDRM_ENCODE(1) | SDR0_DDR0_DDRM_DDR1; + break; + default: + break; + } + break; + default: + break; + } + + return ret; +} + +#define SDRAM_DDR2_MCOPT2_DCEN BIT(27) + +static void sdram_ddr2_dcr_write(void *opaque, int dcrn, uint32_t val) +{ + Ppc4xxSdramDdr2State *s = opaque; + int i; + + switch (dcrn) { + case SDRAM_R0BAS: + case SDRAM_R1BAS: + case SDRAM_R2BAS: + case SDRAM_R3BAS: + case SDRAM_CONF1HB: + case SDRAM_CONF1LL: + case SDRAM_CONFPATHB: + case SDRAM_PLBADDULL: + case SDRAM_PLBADDUHB: + break; + case SDRAM0_CFGADDR: + s->addr = val; + break; + case SDRAM0_CFGDATA: + switch (s->addr) { + case 0x00: /* B0CR */ + break; + case 0x21: /* SDRAM_MCOPT2 */ + if (!(s->mcopt2 & SDRAM_DDR2_MCOPT2_DCEN) && + (val & SDRAM_DDR2_MCOPT2_DCEN)) { + trace_ppc4xx_sdram_enable("enable"); + /* validate all RAM mappings */ + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, + 1); + } + } + s->mcopt2 |= SDRAM_DDR2_MCOPT2_DCEN; + } else if ((s->mcopt2 & SDRAM_DDR2_MCOPT2_DCEN) && + !(val & SDRAM_DDR2_MCOPT2_DCEN)) { + trace_ppc4xx_sdram_enable("disable"); + /* invalidate all RAM mappings */ + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, + 0); + } + } + s->mcopt2 &= ~SDRAM_DDR2_MCOPT2_DCEN; + } + break; + default: + break; + } + break; + default: + break; + } +} + +static void ppc4xx_sdram_ddr2_reset(DeviceState *dev) +{ + Ppc4xxSdramDdr2State *s = PPC4xx_SDRAM_DDR2(dev); + + s->addr = 0; + s->mcopt2 = 0; +} + +static void ppc4xx_sdram_ddr2_realize(DeviceState *dev, Error **errp) +{ + Ppc4xxSdramDdr2State *s = PPC4xx_SDRAM_DDR2(dev); + Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); + /* + * SoC also has 4 GiB but that causes problem with 32 bit + * builds (4*GiB overflows the 32 bit ram_addr_t). + */ + const ram_addr_t valid_bank_sizes[] = { + 2 * GiB, 1 * GiB, 512 * MiB, 256 * MiB, 128 * MiB, + 64 * MiB, 32 * MiB, 16 * MiB, 8 * MiB, 0 + }; + int i; + + if (s->nbanks < 1 || s->nbanks > 4) { + error_setg(errp, "Invalid number of RAM banks"); + return; + } + if (!s->dram_mr) { + error_setg(errp, "Missing dram memory region"); + return; + } + if (!ppc4xx_sdram_banks(s->dram_mr, s->nbanks, s->bank, + valid_bank_sizes, errp)) { + return; + } + for (i = 0; i < s->nbanks; i++) { + if (s->bank[i].size) { + s->bank[i].bcr = sdram_ddr2_bcr(s->bank[i].base, s->bank[i].size); + s->bank[i].bcr &= SDRAM_DDR2_BCR_MASK; + sdram_bank_set_bcr(&s->bank[i], s->bank[i].bcr, + s->bank[i].base, s->bank[i].size, 0); + } else { + sdram_bank_set_bcr(&s->bank[i], 0, 0, 0, 0); + } + trace_ppc4xx_sdram_init(sdram_ddr2_base(s->bank[i].bcr), + sdram_ddr2_size(s->bank[i].bcr), + s->bank[i].bcr); + } + + ppc4xx_dcr_register(dcr, SDRAM0_CFGADDR, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM0_CFGDATA, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + + ppc4xx_dcr_register(dcr, SDRAM_R0BAS, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_R1BAS, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_R2BAS, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_R3BAS, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_CONF1HB, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_PLBADDULL, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_CONF1LL, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_CONFPATHB, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); + ppc4xx_dcr_register(dcr, SDRAM_PLBADDUHB, + s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); +} + +static Property ppc4xx_sdram_ddr2_props[] = { + DEFINE_PROP_LINK("dram", Ppc4xxSdramDdr2State, dram_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdr2State, nbanks, 4), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc4xx_sdram_ddr2_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = ppc4xx_sdram_ddr2_realize; + dc->reset = ppc4xx_sdram_ddr2_reset; + /* Reason: only works as function of a ppc4xx SoC */ + dc->user_creatable = false; + device_class_set_props(dc, ppc4xx_sdram_ddr2_props); +} + +void ppc4xx_sdram_ddr2_enable(Ppc4xxSdramDdr2State *s) +{ + sdram_ddr2_dcr_write(s, SDRAM0_CFGADDR, 0x21); + sdram_ddr2_dcr_write(s, SDRAM0_CFGDATA, 0x08000000); +} + +static const TypeInfo ppc4xx_sdram_types[] = { + { + .name = TYPE_PPC4xx_SDRAM_DDR, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc4xxSdramDdrState), + .class_init = ppc4xx_sdram_ddr_class_init, + }, { + .name = TYPE_PPC4xx_SDRAM_DDR2, + .parent = TYPE_PPC4xx_DCR_DEVICE, + .instance_size = sizeof(Ppc4xxSdramDdr2State), + .class_init = ppc4xx_sdram_ddr2_class_init, + } +}; + +DEFINE_TYPES(ppc4xx_sdram_types) diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c index 10b643861f..ca22da196a 100644 --- a/hw/ppc/ppc_booke.c +++ b/hw/ppc/ppc_booke.c @@ -337,8 +337,8 @@ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags) booke_timer_t *booke_timer; int ret = 0; - tb_env = g_malloc0(sizeof(ppc_tb_t)); - booke_timer = g_malloc0(sizeof(booke_timer_t)); + tb_env = g_new0(ppc_tb_t, 1); + booke_timer = g_new0(booke_timer_t, 1); cpu->env.tb_env = tb_env; tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index acfc2a91d8..ec8d9584fb 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -40,7 +40,6 @@ #include "hw/rtc/mc146818rtc.h" #include "hw/isa/pc87312.h" #include "hw/qdev-properties.h" -#include "sysemu/arch_init.h" #include "sysemu/kvm.h" #include "sysemu/reset.h" #include "trace.h" @@ -51,8 +50,6 @@ /* SMP is not enabled, for now */ #define MAX_CPUS 1 -#define MAX_IDE_BUS 2 - #define CFG_ADDR 0xf0000510 #define KERNEL_LOAD_ADDR 0x01000000 @@ -256,13 +253,8 @@ static void ibm_40p_init(MachineState *machine) exit(1); } - if (env->flags & POWERPC_FLAG_RTC_CLK) { - /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */ - cpu_ppc_tb_init(env, 7812500UL); - } else { - /* Set time-base frequency to 100 Mhz */ - cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); - } + /* Set time-base frequency to 100 Mhz */ + cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); qemu_register_reset(ppc_prep_reset, cpu); /* PCI host */ @@ -279,9 +271,11 @@ static void ibm_40p_init(MachineState *machine) } /* PCI -> ISA bridge */ - i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); + i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378")); qdev_connect_gpio_out(i82378_dev, 0, - cpu->env.irq_inputs[PPC6xx_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); + qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); + sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); @@ -387,7 +381,7 @@ static void ibm_40p_init(MachineState *machine) } boot_device = 'm'; } else { - boot_device = machine->boot_order[0]; + boot_device = machine->boot_config.order[0]; } fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index b2bd783248..5a56f155f5 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -262,7 +262,7 @@ static void prep_systemio_realize(DeviceState *dev, Error **errp) qemu_set_irq(s->non_contiguous_io_map_irq, s->iomap_type & PORT0850_IOMAP_NONCONTIGUOUS); cpu = POWERPC_CPU(first_cpu); - s->softreset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET]; + s->softreset_irq = qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_HRESET); isa_register_portio_list(isa, &s->portio, 0x0, ppc_io800_port_list, s, "systemio800"); @@ -300,7 +300,7 @@ static void prep_systemio_class_initfn(ObjectClass *klass, void *data) device_class_set_props(dc, prep_systemio_properties); } -static TypeInfo prep_systemio800_info = { +static const TypeInfo prep_systemio800_info = { .name = TYPE_PREP_SYSTEMIO, .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(PrepSystemIoState), diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 0737234d66..4a22ce3761 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/error-report.h" #include "qapi/error.h" @@ -26,7 +25,6 @@ #include "elf.h" #include "exec/memory.h" #include "ppc440.h" -#include "ppc405.h" #include "hw/block/flash.h" #include "sysemu/sysemu.h" #include "sysemu/reset.h" @@ -75,14 +73,6 @@ #define OPB_FREQ 115000000 #define EBC_FREQ 115000000 #define UART_FREQ 11059200 -#define SDRAM_NR_BANKS 4 - -/* The SoC could also handle 4 GiB but firmware does not work with that. */ -/* Maybe it overflows a signed 32 bit number somewhere? */ -static const ram_addr_t ppc460ex_sdram_bank_sizes[] = { - 2 * GiB, 1 * GiB, 512 * MiB, 256 * MiB, 128 * MiB, 64 * MiB, - 32 * MiB, 0 -}; struct boot_info { uint32_t dt_base; @@ -133,13 +123,12 @@ static int sam460ex_load_uboot(void) return 0; } -static int sam460ex_load_device_tree(hwaddr addr, - uint32_t ramsize, +static int sam460ex_load_device_tree(MachineState *machine, + hwaddr addr, hwaddr initrd_base, - hwaddr initrd_size, - const char *kernel_cmdline) + hwaddr initrd_size) { - uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) }; + uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(machine->ram_size) }; char *filename; int fdt_size; void *fdt; @@ -173,7 +162,8 @@ static int sam460ex_load_device_tree(hwaddr addr, qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); - qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + machine->kernel_cmdline); /* Copy data from the host device tree into the guest. Since the guest can * directly access the timebase without host involvement, we must expose @@ -210,7 +200,9 @@ static int sam460ex_load_device_tree(hwaddr addr, EBC_FREQ); rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); - g_free(fdt); + + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; return fdt_size; } @@ -276,12 +268,8 @@ static void sam460ex_init(MachineState *machine) { MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); - MemoryRegion *ram_memories = g_new(MemoryRegion, SDRAM_NR_BANKS); - hwaddr ram_bases[SDRAM_NR_BANKS] = {0}; - hwaddr ram_sizes[SDRAM_NR_BANKS] = {0}; MemoryRegion *l2cache_ram = g_new(MemoryRegion, 1); DeviceState *uic[4]; - qemu_irq mal_irqs[4]; int i; PCIBus *pci_bus; PowerPCCPU *cpu; @@ -310,11 +298,12 @@ static void sam460ex_init(MachineState *machine) ppc_dcr_init(env, NULL, NULL); /* PLB arbitrer */ - ppc4xx_plb_init(env); + dev = qdev_new(TYPE_PPC4xx_PLB); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); + object_unref(OBJECT(dev)); /* interrupt controllers */ for (i = 0; i < ARRAY_SIZE(uic); i++) { - SysBusDevice *sbd; /* * UICs 1, 2 and 3 are cascaded through UIC 0. * input_ints[n] is the interrupt number on UIC 0 which @@ -326,43 +315,56 @@ static void sam460ex_init(MachineState *machine) const int input_ints[] = { -1, 30, 10, 16 }; uic[i] = qdev_new(TYPE_PPC_UIC); - sbd = SYS_BUS_DEVICE(uic[i]); - qdev_prop_set_uint32(uic[i], "dcr-base", 0xc0 + i * 0x10); - object_property_set_link(OBJECT(uic[i]), "cpu", OBJECT(cpu), - &error_fatal); - sysbus_realize_and_unref(sbd, &error_fatal); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(uic[i]), cpu, &error_fatal); + object_unref(OBJECT(uic[i])); + sbdev = SYS_BUS_DEVICE(uic[i]); if (i == 0) { - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + sysbus_connect_irq(sbdev, PPCUIC_OUTPUT_INT, + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); + sysbus_connect_irq(sbdev, PPCUIC_OUTPUT_CINT, + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); } else { - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, + sysbus_connect_irq(sbdev, PPCUIC_OUTPUT_INT, qdev_get_gpio_in(uic[0], input_ints[i])); - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, + sysbus_connect_irq(sbdev, PPCUIC_OUTPUT_CINT, qdev_get_gpio_in(uic[0], input_ints[i] + 1)); } } /* SDRAM controller */ - /* put all RAM on first bank because board has one slot - * and firmware only checks that */ - ppc4xx_sdram_banks(machine->ram, 1, ram_memories, ram_bases, ram_sizes, - ppc460ex_sdram_bank_sizes); - + /* The SoC could also handle 4 GiB but firmware does not work with that. */ + if (machine->ram_size > 2 * GiB) { + error_report("Memory over 2 GiB is not supported"); + exit(1); + } + /* Firmware needs at least 64 MiB */ + if (machine->ram_size < 64 * MiB) { + error_report("Memory below 64 MiB is not supported"); + exit(1); + } + dev = qdev_new(TYPE_PPC4xx_SDRAM_DDR2); + object_property_set_link(OBJECT(dev), "dram", OBJECT(machine->ram), + &error_abort); + /* + * Put all RAM on first bank because board has one slot + * and firmware only checks that + */ + object_property_set_int(OBJECT(dev), "nbanks", 1, &error_abort); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); + object_unref(OBJECT(dev)); /* FIXME: does 460EX have ECC interrupts? */ - ppc440_sdram_init(env, SDRAM_NR_BANKS, ram_memories, - ram_bases, ram_sizes, 1); + /* Enable SDRAM memory regions as we may boot without firmware */ + ppc4xx_sdram_ddr2_enable(PPC4xx_SDRAM_DDR2(dev)); /* IIC controllers and devices */ dev = sysbus_create_simple(TYPE_PPC4xx_I2C, 0x4ef600700, qdev_get_gpio_in(uic[0], 2)); i2c = PPC4xx_I2C(dev)->bus; /* SPD EEPROM on RAM module */ - spd_data = spd_data_generate(ram_sizes[0] < 128 * MiB ? DDR : DDR2, - ram_sizes[0]); + spd_data = spd_data_generate(machine->ram_size < 128 * MiB ? DDR : DDR2, + machine->ram_size); spd_data[20] = 4; /* SO-DIMM module */ smbus_eeprom_init_one(i2c, 0x50, spd_data); /* RTC */ @@ -372,7 +374,9 @@ static void sam460ex_init(MachineState *machine) qdev_get_gpio_in(uic[0], 3)); /* External bus controller */ - ppc405_ebc_init(env); + dev = qdev_new(TYPE_PPC4xx_EBC); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); + object_unref(OBJECT(dev)); /* CPR */ ppc4xx_cpr_init(env); @@ -384,10 +388,15 @@ static void sam460ex_init(MachineState *machine) ppc4xx_sdr_init(env); /* MAL */ - for (i = 0; i < ARRAY_SIZE(mal_irqs); i++) { - mal_irqs[0] = qdev_get_gpio_in(uic[2], 3 + i); + dev = qdev_new(TYPE_PPC4xx_MAL); + qdev_prop_set_uint32(dev, "txc-num", 4); + qdev_prop_set_uint32(dev, "rxc-num", 16); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); + object_unref(OBJECT(dev)); + sbdev = SYS_BUS_DEVICE(dev); + for (i = 0; i < ARRAY_SIZE(PPC4xx_MAL(dev)->irqs); i++) { + sysbus_connect_irq(sbdev, i, qdev_get_gpio_in(uic[2], 3 + i)); } - ppc4xx_mal_init(env, 4, 16, mal_irqs); /* DMA */ ppc4xx_dma_init(env, 0x200); @@ -493,9 +502,8 @@ static void sam460ex_init(MachineState *machine) if (machine->kernel_filename) { int dt_size; - dt_size = sam460ex_load_device_tree(FDT_ADDR, machine->ram_size, - RAMDISK_ADDR, initrd_size, - machine->kernel_cmdline); + dt_size = sam460ex_load_device_tree(machine, FDT_ADDR, + RAMDISK_ADDR, initrd_size); boot_info->dt_base = FDT_ADDR; boot_info->dt_size = dt_size; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 81699d4f8b..66b414d2e9 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -25,10 +25,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" +#include "qemu/memalign.h" +#include "qemu/guest-random.h" #include "qapi/error.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "sysemu/sysemu.h" #include "sysemu/hostmem.h" @@ -175,8 +177,8 @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, int smt_threads) { int i, ret = 0; - uint32_t servers_prop[smt_threads]; - uint32_t gservers_prop[smt_threads * 2]; + g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads); + g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2); int index = spapr_get_vcpu_id(cpu); if (cpu->compat_pvr) { @@ -194,12 +196,12 @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, gservers_prop[i*2 + 1] = 0; } ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", - servers_prop, sizeof(servers_prop)); + servers_prop, sizeof(*servers_prop) * smt_threads); if (ret < 0) { return ret; } ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", - gservers_prop, sizeof(gservers_prop)); + gservers_prop, sizeof(*gservers_prop) * smt_threads * 2); return ret; } @@ -722,10 +724,12 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, * * Only CPUs for which we create core types in spapr_cpu_core.c * are possible, and all of those have VMX */ - if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { - _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); - } else { - _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); + if (env->insns_flags & PPC_ALTIVEC) { + if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); + } else { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); + } } /* Advertise DFP (Decimal Floating Point) if available @@ -895,6 +899,8 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) add_str(hypertas, "hcall-hpt-resize"); } + add_str(hypertas, "hcall-watchdog"); + _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", hypertas->str, hypertas->len)); g_string_free(hypertas, TRUE); @@ -1009,15 +1015,16 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) { MachineState *machine = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + uint8_t rng_seed[32]; int chosen; _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); if (reset) { const char *boot_device = spapr->boot_device; - char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); + g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); size_t cb = 0; - char *bootlist = get_boot_devices_list(&cb); + g_autofree char *bootlist = get_boot_devices_list(&cb); if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { _FDT(fdt_setprop_string(fdt, chosen, "bootargs", @@ -1041,8 +1048,8 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); } } - if (boot_menu) { - _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); + if (machine->boot_config.has_menu && machine->boot_config.menu) { + _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true))); } _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); @@ -1063,7 +1070,7 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); } - if (!spapr->has_graphics && stdout_path) { + if (spapr->want_stdout_path && stdout_path) { /* * "linux,stdout-path" and "stdout" properties are * deprecated by linux kernel. New platforms should only @@ -1084,11 +1091,11 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) } spapr_dt_ov5_platform_support(spapr, fdt, chosen); - - g_free(stdout_path); - g_free(bootlist); } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + _FDT(fdt_setprop(fdt, chosen, "rng-seed", rng_seed, sizeof(rng_seed))); + _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); } @@ -1267,7 +1274,9 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, /* The TCG path should also be holding the BQL at this point */ g_assert(qemu_mutex_iothread_locked()); - if (msr_pr) { + g_assert(!vhyp_cpu_in_nested(cpu)); + + if (FIELD_EX64(env->msr, MSR, PR)) { hcall_dprintf("Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else { @@ -1306,13 +1315,45 @@ void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) } } -static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry) +static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) { SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); - /* Copy PATE1:GR into PATE0:HR */ - entry->dw0 = spapr->patb_entry & PATE0_HR; - entry->dw1 = spapr->patb_entry; + if (!spapr_cpu->in_nested) { + assert(lpid == 0); + + /* Copy PATE1:GR into PATE0:HR */ + entry->dw0 = spapr->patb_entry & PATE0_HR; + entry->dw1 = spapr->patb_entry; + + } else { + uint64_t patb, pats; + + assert(lpid != 0); + + patb = spapr->nested_ptcr & PTCR_PATB; + pats = spapr->nested_ptcr & PTCR_PATS; + + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + + /* Calculate number of entries */ + pats = 1ull << (pats + 12 - 4); + if (pats <= lpid) { + return false; + } + + /* Grab entry */ + patb += 16 * lpid; + entry->dw0 = ldq_phys(CPU(cpu)->as, patb); + entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); + } + + return true; } #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) @@ -1413,7 +1454,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, kvmppc_write_hpte(ptex, pte0, pte1); } else { if (pte0 & HPTE64_V_VALID) { - stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); + stq_p(spapr->htab + offset + HPTE64_DW1, pte1); /* * When setting valid, we write PTE1 first. This ensures * proper synchronization with the reading code in @@ -1429,7 +1470,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, * ppc_hash64_pteg_search() */ smp_wmb(); - stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); + stq_p(spapr->htab + offset + HPTE64_DW1, pte1); } } } @@ -1437,7 +1478,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1) { - hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; + hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); if (!spapr->htab) { @@ -1453,7 +1494,7 @@ static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1) { - hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; + hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); if (!spapr->htab) { @@ -1481,7 +1522,7 @@ int spapr_hpt_shift_for_ramsize(uint64_t ramsize) void spapr_free_hpt(SpaprMachineState *spapr) { - g_free(spapr->htab); + qemu_vfree(spapr->htab); spapr->htab = NULL; spapr->htab_shift = 0; close_htab_fd(spapr); @@ -1582,7 +1623,7 @@ void spapr_check_mmu_mode(bool guest_radix) } } -static void spapr_machine_reset(MachineState *machine) +static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) { SpaprMachineState *spapr = SPAPR_MACHINE(machine); PowerPCCPU *first_ppc_cpu; @@ -1608,7 +1649,7 @@ static void spapr_machine_reset(MachineState *machine) spapr_setup_hpt(spapr); } - qemu_devices_reset(); + qemu_devices_reset(reason); spapr_ovec_cleanup(spapr->ov5_cas); spapr->ov5_cas = spapr_ovec_new(); @@ -1631,6 +1672,8 @@ static void spapr_machine_reset(MachineState *machine) spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); } + spapr_nvdimm_finish_flushes(); + /* DRC reset may cause a device to be unplugged. This will cause troubles * if this device is used by another device (eg, a running vhost backend * will crash QEMU if the DIMM holding the vring goes away). To avoid such @@ -1670,6 +1713,9 @@ static void spapr_machine_reset(MachineState *machine) spapr->fdt_initial_size = spapr->fdt_size; spapr->fdt_blob = fdt; + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; + /* Set up the entry state */ first_ppc_cpu->env.gpr[5] = 0; @@ -1711,6 +1757,7 @@ static void spapr_rtc_create(SpaprMachineState *spapr) /* Returns whether we want to use VGA or not */ static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) { + vga_interface_created = true; switch (vga_interface_type) { case VGA_NONE: return false; @@ -2676,15 +2723,26 @@ static void spapr_machine_init(MachineState *machine) MachineClass *mc = MACHINE_GET_CLASS(machine); const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME; const char *bios_name = machine->firmware ?: bios_default; + g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; PCIHostState *phb; + bool has_vga; int i; MemoryRegion *sysmem = get_system_memory(); long load_limit, fw_size; - char *filename; Error *resize_hpt_err = NULL; + if (!filename) { + error_report("Could not find LPAR firmware '%s'", bios_name); + exit(1); + } + fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); + if (fw_size <= 0) { + error_report("Could not load LPAR firmware '%s'", filename); + exit(1); + } + /* * if Secure VM (PEF) support is configured, then initialize it */ @@ -2752,6 +2810,11 @@ static void spapr_machine_init(MachineState *machine) spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); + /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */ + if (!smc->pre_6_2_numa_affinity) { + spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY); + } + /* advertise support for dedicated HP event source to guests */ if (spapr->use_hotplug_event_source) { spapr_ovec_set(spapr->ov5, OV5_HP_EVT); @@ -2773,39 +2836,6 @@ static void spapr_machine_init(MachineState *machine) /* init CPUs */ spapr_init_cpus(spapr); - /* - * check we don't have a memory-less/cpu-less NUMA node - * Firmware relies on the existing memory/cpu topology to provide the - * NUMA topology to the kernel. - * And the linux kernel needs to know the NUMA topology at start - * to be able to hotplug CPUs later. - */ - if (machine->numa_state->num_nodes) { - for (i = 0; i < machine->numa_state->num_nodes; ++i) { - /* check for memory-less node */ - if (machine->numa_state->nodes[i].node_mem == 0) { - CPUState *cs; - int found = 0; - /* check for cpu-less node */ - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - if (cpu->node_id == i) { - found = 1; - break; - } - } - /* memory-less and cpu-less node */ - if (!found) { - error_report( - "Memory-less/cpu-less nodes are not supported (node %d)", - i); - exit(1); - } - } - } - - } - spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); /* Init numa_assoc_array */ @@ -2936,9 +2966,12 @@ static void spapr_machine_init(MachineState *machine) } /* Graphics */ - if (spapr_vga_init(phb->bus, &error_fatal)) { - spapr->has_graphics = true; + has_vga = spapr_vga_init(phb->bus, &error_fatal); + if (has_vga) { + spapr->want_stdout_path = !machine->enable_graphics; machine->usb |= defaults_enabled() && !machine->usb_disabled; + } else { + spapr->want_stdout_path = true; } if (machine->usb) { @@ -2948,7 +2981,7 @@ static void spapr_machine_init(MachineState *machine) pci_create_simple(phb->bus, -1, "nec-usb-xhci"); } - if (spapr->has_graphics) { + if (has_vga) { USBBus *usb_bus = usb_bus_find(-1); usb_create_simple(usb_bus, "usb-kbd"); @@ -2957,14 +2990,16 @@ static void spapr_machine_init(MachineState *machine) } if (kernel_filename) { + uint64_t loaded_addr = 0; + spapr->kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, spapr, - NULL, NULL, NULL, NULL, 1, + NULL, &loaded_addr, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { spapr->kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, spapr, - NULL, NULL, NULL, NULL, 0, + NULL, &loaded_addr, NULL, NULL, 0, PPC_ELF_MACHINE, 0, 0); spapr->kernel_le = spapr->kernel_size > 0; } @@ -2974,6 +3009,13 @@ static void spapr_machine_init(MachineState *machine) exit(1); } + if (spapr->kernel_addr != loaded_addr) { + warn_report("spapr: kernel_addr changed from 0x%"PRIx64 + " to 0x%"PRIx64, + spapr->kernel_addr, loaded_addr); + spapr->kernel_addr = loaded_addr; + } + /* load initrd */ if (initrd_filename) { /* Try to locate the initrd in the gap between the kernel @@ -2993,18 +3035,6 @@ static void spapr_machine_init(MachineState *machine) } } - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); - if (!filename) { - error_report("Could not find LPAR firmware '%s'", bios_name); - exit(1); - } - fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); - if (fw_size <= 0) { - error_report("Could not load LPAR firmware '%s'", filename); - exit(1); - } - g_free(filename); - /* FIXME: Should register things through the MachineState's qdev * interface, this is a legacy from the sPAPREnvironment structure * which predated MachineState but had a similar function */ @@ -3036,6 +3066,8 @@ static void spapr_machine_init(MachineState *machine) spapr->vof->fw_size = fw_size; /* for claim() on itself */ spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client); } + + spapr_watchdog_init(spapr); } #define DEFAULT_KVM_TYPE "auto" @@ -3078,7 +3110,7 @@ static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); - if (d) { + if (d && bus) { void *spapr = CAST(void, bus->parent, "spapr-vscsi"); VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); @@ -3599,7 +3631,7 @@ static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, */ ds = spapr_pending_dimm_unplugs_find(spapr, dimm); if (!ds) { - ds = g_malloc0(sizeof(SpaprDimmState)); + ds = g_new0(SpaprDimmState, 1); ds->nr_lmbs = nr_lmbs; ds->dimm = dimm; QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); @@ -3686,11 +3718,18 @@ void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) /* * Tell QAPI that something happened and the memory - * hotunplug wasn't successful. + * hotunplug wasn't successful. Keep sending + * MEM_UNPLUG_ERROR even while sending + * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of + * MEM_UNPLUG_ERROR is due. */ qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest " "for device %s", dev->id); - qapi_event_send_mem_unplug_error(dev->id, qapi_error); + + qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); + + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); } /* Callback to be called during DRC release. */ @@ -4483,6 +4522,13 @@ PowerPCCPU *spapr_find_cpu(int vcpu_id) return NULL; } +static bool spapr_cpu_in_nested(PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + return spapr_cpu->in_nested; +} + static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) { SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); @@ -4591,6 +4637,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) fwc->get_dev_path = spapr_get_fw_dev_path; nc->nmi_monitor_handler = spapr_nmi; smc->phb_placement = spapr_phb_placement; + vhc->cpu_in_nested = spapr_cpu_in_nested; + vhc->deliver_hv_excp = spapr_exit_nested; vhc->hypercall = emulate_spapr_hypercall; vhc->hpt_mask = spapr_hpt_mask; vhc->map_hptes = spapr_map_hptes; @@ -4686,14 +4734,62 @@ static void spapr_machine_latest_class_options(MachineClass *mc) type_init(spapr_machine_register_##suffix) /* - * pseries-6.1 + * pseries-7.2 */ -static void spapr_machine_6_1_class_options(MachineClass *mc) +static void spapr_machine_7_2_class_options(MachineClass *mc) { /* Defaults for the latest behaviour inherited from the base class */ } -DEFINE_SPAPR_MACHINE(6_1, "6.1", true); +DEFINE_SPAPR_MACHINE(7_2, "7.2", true); + +/* + * pseries-7.1 + */ +static void spapr_machine_7_1_class_options(MachineClass *mc) +{ + spapr_machine_7_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); +} + +DEFINE_SPAPR_MACHINE(7_1, "7.1", false); + +/* + * pseries-7.0 + */ +static void spapr_machine_7_0_class_options(MachineClass *mc) +{ + spapr_machine_7_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len); +} + +DEFINE_SPAPR_MACHINE(7_0, "7.0", false); + +/* + * pseries-6.2 + */ +static void spapr_machine_6_2_class_options(MachineClass *mc) +{ + spapr_machine_7_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len); +} + +DEFINE_SPAPR_MACHINE(6_2, "6.2", false); + +/* + * pseries-6.1 + */ +static void spapr_machine_6_1_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_6_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + smc->pre_6_2_numa_affinity = true; + mc->smp_props.prefer_sockets = true; +} + +DEFINE_SPAPR_MACHINE(6_1, "6.1", false); /* * pseries-6.0 diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index ed7c077a0d..b4283055c1 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -95,12 +95,12 @@ static void spapr_cap_set_bool(Object *obj, Visitor *v, const char *name, } -static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { SpaprCapabilityInfo *cap = opaque; SpaprMachineState *spapr = SPAPR_MACHINE(obj); - char *val = NULL; + g_autofree char *val = NULL; uint8_t value = spapr_get_cap(spapr, cap->index); if (value >= cap->possible->num) { @@ -111,7 +111,6 @@ static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name, val = g_strdup(cap->possible->vals[value]); visit_type_str(v, name, &val, errp); - g_free(val); } static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name, @@ -120,7 +119,7 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name, SpaprCapabilityInfo *cap = opaque; SpaprMachineState *spapr = SPAPR_MACHINE(obj); uint8_t i; - char *val; + g_autofree char *val = NULL; if (!visit_type_str(v, name, &val, errp)) { return; @@ -128,20 +127,18 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name, if (!strcmp(val, "?")) { error_setg(errp, "%s", cap->possible->help); - goto out; + return; } for (i = 0; i < cap->possible->num; i++) { if (!strcasecmp(val, cap->possible->vals[i])) { spapr->cmd_line_caps[cap->index] = true; spapr->eff.caps[cap->index] = i; - goto out; + return; } } error_setg(errp, "Invalid capability mode \"%s\" for cap-%s", val, cap->name); -out: - g_free(val); } static void spapr_cap_get_pagesize(Object *obj, Visitor *v, const char *name, @@ -444,19 +441,23 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, { ERRP_GUARD(); PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + CPUPPCState *env = &cpu->env; if (!val) { /* capability disabled by default */ return; } - if (tcg_enabled()) { - error_setg(errp, "No Nested KVM-HV support in TCG"); + if (!(env->insns_flags2 & PPC2_ISA300)) { + error_setg(errp, "Nested-HV only supported on POWER9 and later"); error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n"); - } else if (kvm_enabled()) { + return; + } + + if (kvm_enabled()) { if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, spapr->max_compat_pvr)) { - error_setg(errp, "Nested KVM-HV only supported on POWER9"); + error_setg(errp, "Nested-HV only supported on POWER9 and later"); error_append_hint(errp, "Try appending -machine max-cpu-compat=power9\n"); return; @@ -464,7 +465,7 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, if (!kvmppc_has_cap_nested_kvm_hv()) { error_setg(errp, - "KVM implementation does not support Nested KVM-HV"); + "KVM implementation does not support Nested-HV"); error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n"); } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) { @@ -552,7 +553,7 @@ static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val, * instruction is a harmless no-op. It won't correctly * implement the cache count flush *but* if we have * count-cache-disabled in the host, that flush is - * unnnecessary. So, specifically allow this case. This + * unnecessary. So, specifically allow this case. This * allows us to have better performance on POWER9 DD2.3, * while still working on POWER9 DD2.2 and POWER8 host * cpus. @@ -929,16 +930,13 @@ void spapr_caps_add_properties(SpaprMachineClass *smc) for (i = 0; i < ARRAY_SIZE(capability_table); i++) { SpaprCapabilityInfo *cap = &capability_table[i]; - char *name = g_strdup_printf("cap-%s", cap->name); - char *desc; + g_autofree char *name = g_strdup_printf("cap-%s", cap->name); + g_autofree char *desc = g_strdup_printf("%s", cap->description); object_class_property_add(klass, name, cap->type, cap->get, cap->set, NULL, cap); - desc = g_strdup_printf("%s", cap->description); object_class_property_set_description(klass, name, desc); - g_free(name); - g_free(desc); } } diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 4f316a6f9d..8a4861f45a 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -20,6 +20,7 @@ #include "target/ppc/kvm_ppc.h" #include "hw/ppc/ppc.h" #include "target/ppc/mmu-hash64.h" +#include "target/ppc/power8-pmu.h" #include "sysemu/numa.h" #include "sysemu/reset.h" #include "sysemu/hw_accel.h" @@ -36,6 +37,11 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu) cpu_reset(cs); + /* + * "PowerPC Processor binding to IEEE 1275" defines the initial MSR state + * as 32bit (MSR_SF=0) in "8.2.1. Initial Register Values". + */ + env->msr &= ~(1ULL << MSR_SF); env->spr[SPR_HIOR] = 0; lpcr = env->spr[SPR_LPCR]; @@ -183,10 +189,13 @@ static const VMStateDescription vmstate_spapr_cpu_state = { static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc) { + CPUPPCState *env = &cpu->env; + if (!sc->pre_3_0_migration) { vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); } spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu); + cpu_ppc_tb_free(env); qdev_unrealize(DEVICE(cpu)); } @@ -255,12 +264,12 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr, return false; } - /* Set time-base frequency to 512 MHz */ - cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ); - cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr)); kvmppc_set_papr(cpu); + /* Set time-base frequency to 512 MHz. vhyp must be set first. */ + cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ); + if (spapr_irq_cpu_intc_create(spapr, cpu, errp) < 0) { qdev_unrealize(DEVICE(cpu)); return false; @@ -382,6 +391,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), #endif diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index a2f2634601..76bc5d42a0 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -17,6 +17,8 @@ #include "hw/ppc/spapr_drc.h" #include "qom/object.h" #include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/ppc/spapr.h" /* for RTAS return codes */ @@ -167,13 +169,15 @@ static uint32_t drc_unisolate_logical(SpaprDrc *drc) } drc->unplug_requested = false; - error_report("Device hotunplug rejected by the guest " - "for device %s", drc->dev->id); - /* - * TODO: send a QAPI DEVICE_UNPLUG_ERROR event when - * it is implemented. - */ + if (drc->dev->id) { + error_report("Device hotunplug rejected by the guest " + "for device %s", drc->dev->id); + } + + qapi_event_send_device_unplug_guest_error(!!drc->dev->id, + drc->dev->id, + drc->dev->canonical_path); } return RTAS_OUT_SUCCESS; /* Nothing to do */ @@ -515,8 +519,8 @@ static const VMStateDescription vmstate_spapr_drc = { static void drc_realize(DeviceState *d, Error **errp) { SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); + g_autofree gchar *link_name = g_strdup_printf("%x", spapr_drc_index(drc)); Object *root_container; - gchar *link_name; const char *child_name; trace_spapr_drc_realize(spapr_drc_index(drc)); @@ -528,12 +532,10 @@ static void drc_realize(DeviceState *d, Error **errp) * existing in the composition tree */ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); - link_name = g_strdup_printf("%x", spapr_drc_index(drc)); child_name = object_get_canonical_path_component(OBJECT(drc)); trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name); object_property_add_alias(root_container, link_name, drc->owner, child_name); - g_free(link_name); vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc, drc); trace_spapr_drc_realize_complete(spapr_drc_index(drc)); @@ -542,22 +544,20 @@ static void drc_realize(DeviceState *d, Error **errp) static void drc_unrealize(DeviceState *d) { SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); + g_autofree gchar *name = g_strdup_printf("%x", spapr_drc_index(drc)); Object *root_container; - gchar *name; trace_spapr_drc_unrealize(spapr_drc_index(drc)); vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc); root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); - name = g_strdup_printf("%x", spapr_drc_index(drc)); object_property_del(root_container, name); - g_free(name); } SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type, uint32_t id) { SpaprDrc *drc = SPAPR_DR_CONNECTOR(object_new(type)); - char *prop_name; + g_autofree char *prop_name = NULL; drc->id = id; drc->owner = owner; @@ -566,7 +566,6 @@ SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type, object_property_add_child(owner, prop_name, OBJECT(drc)); object_unref(OBJECT(drc)); qdev_realize(DEVICE(drc), NULL, NULL); - g_free(prop_name); return drc; } @@ -799,11 +798,9 @@ static const TypeInfo spapr_drc_pmem_info = { SpaprDrc *spapr_drc_by_index(uint32_t index) { Object *obj; - gchar *name; - - name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, index); + g_autofree gchar *name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, + index); obj = object_resolve_path(name, NULL); - g_free(name); return !obj ? NULL : SPAPR_DR_CONNECTOR(obj); } @@ -837,8 +834,14 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) ObjectProperty *prop; ObjectPropertyIterator iter; uint32_t drc_count = 0; - GArray *drc_indexes, *drc_power_domains; - GString *drc_names, *drc_types; + g_autoptr(GArray) drc_indexes = g_array_new(false, true, + sizeof(uint32_t)); + g_autoptr(GArray) drc_power_domains = g_array_new(false, true, + sizeof(uint32_t)); + g_autoptr(GString) drc_names = g_string_set_size(g_string_new(NULL), + sizeof(uint32_t)); + g_autoptr(GString) drc_types = g_string_set_size(g_string_new(NULL), + sizeof(uint32_t)); int ret; /* @@ -853,12 +856,8 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) * reserve the space now and set the offsets accordingly so we * can fill them in later. */ - drc_indexes = g_array_new(false, true, sizeof(uint32_t)); drc_indexes = g_array_set_size(drc_indexes, 1); - drc_power_domains = g_array_new(false, true, sizeof(uint32_t)); drc_power_domains = g_array_set_size(drc_power_domains, 1); - drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t)); - drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t)); /* aliases for all DRConnector objects will be rooted in QOM * composition tree at DRC_CONTAINER_PATH @@ -870,7 +869,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) Object *obj; SpaprDrc *drc; SpaprDrcClass *drck; - char *drc_name = NULL; + g_autofree char *drc_name = NULL; uint32_t drc_index, drc_power_domain; if (!strstart(prop->type, "link<", NULL)) { @@ -904,7 +903,6 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) drc_name = spapr_drc_name(drc); drc_names = g_string_append(drc_names, drc_name); drc_names = g_string_insert_len(drc_names, -1, "\0", 1); - g_free(drc_name); /* ibm,drc-types */ drc_types = g_string_append(drc_types, drck->typename); @@ -924,7 +922,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) drc_indexes->len * sizeof(uint32_t)); if (ret) { error_report("Couldn't create ibm,drc-indexes property"); - goto out; + return ret; } ret = fdt_setprop(fdt, offset, "ibm,drc-power-domains", @@ -932,29 +930,22 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) drc_power_domains->len * sizeof(uint32_t)); if (ret) { error_report("Couldn't finalize ibm,drc-power-domains property"); - goto out; + return ret; } ret = fdt_setprop(fdt, offset, "ibm,drc-names", drc_names->str, drc_names->len); if (ret) { error_report("Couldn't finalize ibm,drc-names property"); - goto out; + return ret; } ret = fdt_setprop(fdt, offset, "ibm,drc-types", drc_types->str, drc_types->len); if (ret) { error_report("Couldn't finalize ibm,drc-types property"); - goto out; } -out: - g_array_free(drc_indexes, true); - g_array_free(drc_power_domains, true); - g_string_free(drc_names, true); - g_string_free(drc_types, true); - return ret; } diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index 23e2e2fff1..4508e40814 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -594,7 +594,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, struct rtas_event_log_v6_hp *hp; entry = g_new(SpaprEventLogEntry, 1); - new_hp = g_malloc0(sizeof(struct hp_extended_log)); + new_hp = g_new0(struct hp_extended_log, 1); entry->extended_log = new_hp; v6hdr = &new_hp->v6hdr; @@ -872,7 +872,6 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered) SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); CPUState *cs = CPU(cpu); int ret; - Error *local_err = NULL; if (spapr->fwnmi_machine_check_addr == -1) { /* Non-FWNMI case, deliver it like an architected CPU interrupt. */ @@ -912,16 +911,17 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered) } } - ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, &local_err); + /* + * Try to block migration while FWNMI is being handled, so the + * machine check handler runs where the information passed to it + * actually makes sense. This shouldn't actually block migration, + * only delay it slightly, assuming migration is retried. If the + * attempt to block fails, carry on. Unfortunately, it always + * fails when running with -only-migrate. A proper interface to + * delay migration completion for a bit could avoid that. + */ + ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, NULL); if (ret == -EBUSY) { - /* - * We don't want to abort so we let the migration to continue. - * In a rare case, the machine check handler will run on the target. - * Though this is not preferable, it is better than aborting - * the migration or killing the VM. It is okay to call - * migrate_del_blocker on a blocker that was not added (which the - * nmi-interlock handler would do when it's called after this). - */ warn_report("Received a fwnmi while migration was in progress"); } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 0e9a5b2e40..925ff523cc 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -9,6 +9,7 @@ #include "qemu/error-report.h" #include "exec/exec-all.h" #include "helper_regs.h" +#include "hw/ppc/ppc.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "mmu-hash64.h" @@ -17,6 +18,7 @@ #include "kvm_ppc.h" #include "hw/ppc/fdt.h" #include "hw/ppc/spapr_ovec.h" +#include "hw/ppc/spapr_numa.h" #include "mmu-book3s-v3.h" #include "hw/mem/memory-device.h" @@ -488,6 +490,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr, env->msr |= (1ULL << MSR_EE); hreg_compute_hflags(env); + ppc_maybe_interrupt(env); if (spapr_cpu->prod) { spapr_cpu->prod = false; @@ -498,6 +501,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr, cs->halted = 1; cs->exception_index = EXCP_HLT; cs->exit_request = 1; + ppc_maybe_interrupt(env); } return H_SUCCESS; @@ -519,6 +523,7 @@ static target_ulong h_confer_self(PowerPCCPU *cpu) cs->halted = 1; cs->exception_index = EXCP_HALTED; cs->exit_request = 1; + ppc_maybe_interrupt(&cpu->env); return H_SUCCESS; } @@ -631,6 +636,7 @@ static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr, spapr_cpu = spapr_cpu_state(tcpu); spapr_cpu->prod = true; cs->halted = 0; + ppc_maybe_interrupt(&cpu->env); qemu_cpu_kick(cs); return H_SUCCESS; @@ -918,6 +924,7 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, target_ulong page_size = args[2]; target_ulong table_size = args[3]; target_ulong update_lpcr = 0; + target_ulong table_byte_size; uint64_t cproc; if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */ @@ -925,6 +932,14 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, } if (flags & FLAG_MODIFY) { if (flags & FLAG_REGISTER) { + /* Check process table alignment */ + table_byte_size = 1ULL << (table_size + 12); + if (proc_tbl & (table_byte_size - 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: process table not properly aligned: proc_tbl 0x" + TARGET_FMT_lx" proc_tbl_size 0x"TARGET_FMT_lx"\n", + __func__, proc_tbl, table_byte_size); + } if (flags & FLAG_RADIX) { /* Register new RADIX process table */ if (proc_tbl & 0xfff || proc_tbl >> 60) { return H_P2; @@ -1197,6 +1212,12 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu, spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00); spapr_ovec_cleanup(ov1_guest); + /* + * Check for NUMA affinity conditions now that we know which NUMA + * affinity the guest will use. + */ + spapr_numa_associativity_check(spapr); + /* * Ensure the guest asks for an interrupt mode we support; * otherwise terminate the boot. @@ -1239,6 +1260,14 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu, spapr->fdt_initial_size = spapr->fdt_size; spapr->fdt_blob = fdt; + /* + * Set the machine->fdt pointer again since we just freed + * it above (by freeing spapr->fdt_blob). We set this + * pointer to enable support for the 'dumpdtb' QMP/HMP + * command. + */ + MACHINE(spapr)->fdt = fdt; + return H_SUCCESS; } @@ -1465,13 +1494,364 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, return H_FUNCTION; } -#ifndef CONFIG_TCG +#ifdef CONFIG_TCG +#define PRTS_MASK 0x1f + +static target_ulong h_set_ptbl(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong ptcr = args[0]; + + if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { + return H_FUNCTION; + } + + if ((ptcr & PRTS_MASK) + 12 - 4 > 12) { + return H_PARAMETER; + } + + spapr->nested_ptcr = ptcr; /* Save new partition table */ + + return H_SUCCESS; +} + +static target_ulong h_tlb_invalidate(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + /* + * The spapr virtual hypervisor nested HV implementation retains no L2 + * translation state except for TLB. And the TLB is always invalidated + * across L1<->L2 transitions, so nothing is required here. + */ + + return H_SUCCESS; +} + +static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + /* + * This HCALL is not required, L1 KVM will take a slow path and walk the + * page tables manually to do the data copy. + */ + return H_FUNCTION; +} + +/* + * When this handler returns, the environment is switched to the L2 guest + * and TCG begins running that. spapr_exit_nested() performs the switch from + * L2 back to L1 and returns from the H_ENTER_NESTED hcall. + */ +static target_ulong h_enter_nested(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong hv_ptr = args[0]; + target_ulong regs_ptr = args[1]; + target_ulong hdec, now = cpu_ppc_load_tbl(env); + target_ulong lpcr, lpcr_mask; + struct kvmppc_hv_guest_state *hvstate; + struct kvmppc_hv_guest_state hv_state; + struct kvmppc_pt_regs *regs; + hwaddr len; + uint64_t cr; + int i; + + if (spapr->nested_ptcr == 0) { + return H_NOT_AVAILABLE; + } + + len = sizeof(*hvstate); + hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false, + MEMTXATTRS_UNSPECIFIED); + if (len != sizeof(*hvstate)) { + address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false); + return H_PARAMETER; + } + + memcpy(&hv_state, hvstate, len); + + address_space_unmap(CPU(cpu)->as, hvstate, len, len, false); + + /* + * We accept versions 1 and 2. Version 2 fields are unused because TCG + * does not implement DAWR*. + */ + if (hv_state.version > HV_GUEST_STATE_VERSION) { + return H_PARAMETER; + } + + spapr_cpu->nested_host_state = g_try_new(CPUPPCState, 1); + if (!spapr_cpu->nested_host_state) { + return H_NO_MEM; + } + + memcpy(spapr_cpu->nested_host_state, env, sizeof(CPUPPCState)); + + len = sizeof(*regs); + regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false, + MEMTXATTRS_UNSPECIFIED); + if (!regs || len != sizeof(*regs)) { + address_space_unmap(CPU(cpu)->as, regs, len, 0, false); + g_free(spapr_cpu->nested_host_state); + return H_P2; + } + + len = sizeof(env->gpr); + assert(len == sizeof(regs->gpr)); + memcpy(env->gpr, regs->gpr, len); + + env->lr = regs->link; + env->ctr = regs->ctr; + cpu_write_xer(env, regs->xer); + + cr = regs->ccr; + for (i = 7; i >= 0; i--) { + env->crf[i] = cr & 15; + cr >>= 4; + } + + env->msr = regs->msr; + env->nip = regs->nip; + + address_space_unmap(CPU(cpu)->as, regs, len, len, false); + + env->cfar = hv_state.cfar; + + assert(env->spr[SPR_LPIDR] == 0); + env->spr[SPR_LPIDR] = hv_state.lpid; + + lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; + lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask); + lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; + lpcr &= ~LPCR_LPES0; + env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask; + + env->spr[SPR_PCR] = hv_state.pcr; + /* hv_state.amor is not used */ + env->spr[SPR_DPDES] = hv_state.dpdes; + env->spr[SPR_HFSCR] = hv_state.hfscr; + hdec = hv_state.hdec_expiry - now; + spapr_cpu->nested_tb_offset = hv_state.tb_offset; + /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/ + env->spr[SPR_SRR0] = hv_state.srr0; + env->spr[SPR_SRR1] = hv_state.srr1; + env->spr[SPR_SPRG0] = hv_state.sprg[0]; + env->spr[SPR_SPRG1] = hv_state.sprg[1]; + env->spr[SPR_SPRG2] = hv_state.sprg[2]; + env->spr[SPR_SPRG3] = hv_state.sprg[3]; + env->spr[SPR_BOOKS_PID] = hv_state.pidr; + env->spr[SPR_PPR] = hv_state.ppr; + + cpu_ppc_hdecr_init(env); + cpu_ppc_store_hdecr(env, hdec); + + /* + * The hv_state.vcpu_token is not needed. It is used by the KVM + * implementation to remember which L2 vCPU last ran on which physical + * CPU so as to invalidate process scope translations if it is moved + * between physical CPUs. For now TLBs are always flushed on L1<->L2 + * transitions so this is not a problem. + * + * Could validate that the same vcpu_token does not attempt to run on + * different L1 vCPUs at the same time, but that would be a L1 KVM bug + * and it's not obviously worth a new data structure to do it. + */ + + env->tb_env->tb_offset += spapr_cpu->nested_tb_offset; + spapr_cpu->in_nested = true; + + hreg_compute_hflags(env); + ppc_maybe_interrupt(env); + tlb_flush(cs); + env->reserve_addr = -1; /* Reset the reservation */ + + /* + * The spapr hcall helper sets env->gpr[3] to the return value, but at + * this point the L1 is not returning from the hcall but rather we + * start running the L2, so r3 must not be clobbered, so return env->gpr[3] + * to leave it unchanged. + */ + return env->gpr[3]; +} + +void spapr_exit_nested(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */ + target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; + target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5]; + struct kvmppc_hv_guest_state *hvstate; + struct kvmppc_pt_regs *regs; + hwaddr len; + uint64_t cr; + int i; + + assert(spapr_cpu->in_nested); + + cpu_ppc_hdecr_exit(env); + + len = sizeof(*hvstate); + hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true, + MEMTXATTRS_UNSPECIFIED); + if (len != sizeof(*hvstate)) { + address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true); + r3_return = H_PARAMETER; + goto out_restore_l1; + } + + hvstate->cfar = env->cfar; + hvstate->lpcr = env->spr[SPR_LPCR]; + hvstate->pcr = env->spr[SPR_PCR]; + hvstate->dpdes = env->spr[SPR_DPDES]; + hvstate->hfscr = env->spr[SPR_HFSCR]; + + if (excp == POWERPC_EXCP_HDSI) { + hvstate->hdar = env->spr[SPR_HDAR]; + hvstate->hdsisr = env->spr[SPR_HDSISR]; + hvstate->asdr = env->spr[SPR_ASDR]; + } else if (excp == POWERPC_EXCP_HISI) { + hvstate->asdr = env->spr[SPR_ASDR]; + } + + /* HEIR should be implemented for HV mode and saved here. */ + hvstate->srr0 = env->spr[SPR_SRR0]; + hvstate->srr1 = env->spr[SPR_SRR1]; + hvstate->sprg[0] = env->spr[SPR_SPRG0]; + hvstate->sprg[1] = env->spr[SPR_SPRG1]; + hvstate->sprg[2] = env->spr[SPR_SPRG2]; + hvstate->sprg[3] = env->spr[SPR_SPRG3]; + hvstate->pidr = env->spr[SPR_BOOKS_PID]; + hvstate->ppr = env->spr[SPR_PPR]; + + /* Is it okay to specify write length larger than actual data written? */ + address_space_unmap(CPU(cpu)->as, hvstate, len, len, true); + + len = sizeof(*regs); + regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true, + MEMTXATTRS_UNSPECIFIED); + if (!regs || len != sizeof(*regs)) { + address_space_unmap(CPU(cpu)->as, regs, len, 0, true); + r3_return = H_P2; + goto out_restore_l1; + } + + len = sizeof(env->gpr); + assert(len == sizeof(regs->gpr)); + memcpy(regs->gpr, env->gpr, len); + + regs->link = env->lr; + regs->ctr = env->ctr; + regs->xer = cpu_read_xer(env); + + cr = 0; + for (i = 0; i < 8; i++) { + cr |= (env->crf[i] & 15) << (4 * (7 - i)); + } + regs->ccr = cr; + + if (excp == POWERPC_EXCP_MCHECK || + excp == POWERPC_EXCP_RESET || + excp == POWERPC_EXCP_SYSCALL) { + regs->nip = env->spr[SPR_SRR0]; + regs->msr = env->spr[SPR_SRR1] & env->msr_mask; + } else { + regs->nip = env->spr[SPR_HSRR0]; + regs->msr = env->spr[SPR_HSRR1] & env->msr_mask; + } + + /* Is it okay to specify write length larger than actual data written? */ + address_space_unmap(CPU(cpu)->as, regs, len, len, true); + +out_restore_l1: + memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr)); + env->lr = spapr_cpu->nested_host_state->lr; + env->ctr = spapr_cpu->nested_host_state->ctr; + memcpy(env->crf, spapr_cpu->nested_host_state->crf, sizeof(env->crf)); + env->cfar = spapr_cpu->nested_host_state->cfar; + env->xer = spapr_cpu->nested_host_state->xer; + env->so = spapr_cpu->nested_host_state->so; + env->ov = spapr_cpu->nested_host_state->ov; + env->ov32 = spapr_cpu->nested_host_state->ov32; + env->ca32 = spapr_cpu->nested_host_state->ca32; + env->msr = spapr_cpu->nested_host_state->msr; + env->nip = spapr_cpu->nested_host_state->nip; + + assert(env->spr[SPR_LPIDR] != 0); + env->spr[SPR_LPCR] = spapr_cpu->nested_host_state->spr[SPR_LPCR]; + env->spr[SPR_LPIDR] = spapr_cpu->nested_host_state->spr[SPR_LPIDR]; + env->spr[SPR_PCR] = spapr_cpu->nested_host_state->spr[SPR_PCR]; + env->spr[SPR_DPDES] = 0; + env->spr[SPR_HFSCR] = spapr_cpu->nested_host_state->spr[SPR_HFSCR]; + env->spr[SPR_SRR0] = spapr_cpu->nested_host_state->spr[SPR_SRR0]; + env->spr[SPR_SRR1] = spapr_cpu->nested_host_state->spr[SPR_SRR1]; + env->spr[SPR_SPRG0] = spapr_cpu->nested_host_state->spr[SPR_SPRG0]; + env->spr[SPR_SPRG1] = spapr_cpu->nested_host_state->spr[SPR_SPRG1]; + env->spr[SPR_SPRG2] = spapr_cpu->nested_host_state->spr[SPR_SPRG2]; + env->spr[SPR_SPRG3] = spapr_cpu->nested_host_state->spr[SPR_SPRG3]; + env->spr[SPR_BOOKS_PID] = spapr_cpu->nested_host_state->spr[SPR_BOOKS_PID]; + env->spr[SPR_PPR] = spapr_cpu->nested_host_state->spr[SPR_PPR]; + + /* + * Return the interrupt vector address from H_ENTER_NESTED to the L1 + * (or error code). + */ + env->gpr[3] = r3_return; + + env->tb_env->tb_offset -= spapr_cpu->nested_tb_offset; + spapr_cpu->in_nested = false; + + hreg_compute_hflags(env); + ppc_maybe_interrupt(env); + tlb_flush(cs); + env->reserve_addr = -1; /* Reset the reservation */ + + g_free(spapr_cpu->nested_host_state); + spapr_cpu->nested_host_state = NULL; +} + +static void hypercall_register_nested(void) +{ + spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); + spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); + spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); + spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); +} + +static void hypercall_register_softmmu(void) +{ + /* DO NOTHING */ +} +#else +void spapr_exit_nested(PowerPCCPU *cpu, int excp) +{ + g_assert_not_reached(); +} + static target_ulong h_softmmu(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { g_assert_not_reached(); } +static void hypercall_register_nested(void) +{ + /* DO NOTHING */ +} + static void hypercall_register_softmmu(void) { /* hcall-pft */ @@ -1483,11 +1863,6 @@ static void hypercall_register_softmmu(void) /* hcall-bulk */ spapr_register_hypercall(H_BULK_REMOVE, h_softmmu); } -#else -static void hypercall_register_softmmu(void) -{ - /* DO NOTHING */ -} #endif static void hypercall_register_types(void) @@ -1545,6 +1920,8 @@ static void hypercall_register_types(void) spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt); + + hypercall_register_nested(); } type_init(hypercall_register_types) diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index db01071858..63e34d457a 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -279,7 +279,7 @@ static const VMStateDescription vmstate_spapr_tce_table_ex = { static const VMStateDescription vmstate_spapr_tce_table = { .name = "spapr_iommu", - .version_id = 2, + .version_id = 3, .minimum_version_id = 2, .pre_save = spapr_tce_table_pre_save, .post_load = spapr_tce_table_post_load, @@ -292,6 +292,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { VMSTATE_BOOL(bypass, SpaprTceTable), VMSTATE_VARRAY_UINT32_ALLOC(mig_table, SpaprTceTable, mig_nb_table, 0, vmstate_info_uint64, uint64_t), + VMSTATE_BOOL_V(def_win, SpaprTceTable, 3), VMSTATE_END_OF_LIST() }, @@ -685,7 +686,7 @@ static void spapr_tce_table_class_init(ObjectClass *klass, void *data) spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); } -static TypeInfo spapr_tce_table_info = { +static const TypeInfo spapr_tce_table_info = { .name = TYPE_SPAPR_TCE_TABLE, .parent = TYPE_DEVICE, .instance_size = sizeof(SpaprTceTable), diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 779f18b994..a64098c375 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/ppc/spapr_numa.h" #include "hw/pci-host/spapr.h" #include "hw/ppc/fdt.h" @@ -19,25 +18,88 @@ /* Moved from hw/ppc/spapr_pci_nvlink2.c */ #define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) -static bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr) +/* + * Retrieves max_dist_ref_points of the current NUMA affinity. + */ +static int get_max_dist_ref_points(SpaprMachineState *spapr) { - MachineState *machine = MACHINE(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_DIST_REF_POINTS; + } - return smc->pre_5_2_numa_associativity || - machine->numa_state->num_nodes <= 1; + return FORM1_DIST_REF_POINTS; +} + +/* + * Retrieves numa_assoc_size of the current NUMA affinity. + */ +static int get_numa_assoc_size(SpaprMachineState *spapr) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_NUMA_ASSOC_SIZE; + } + + return FORM1_NUMA_ASSOC_SIZE; +} + +/* + * Retrieves vcpu_assoc_size of the current NUMA affinity. + * + * vcpu_assoc_size is the size of ibm,associativity array + * for CPUs, which has an extra element (vcpu_id) in the end. + */ +static int get_vcpu_assoc_size(SpaprMachineState *spapr) +{ + return get_numa_assoc_size(spapr) + 1; +} + +/* + * Retrieves the ibm,associativity array of NUMA node 'node_id' + * for the current NUMA affinity. + */ +static const uint32_t *get_associativity(SpaprMachineState *spapr, int node_id) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return spapr->FORM2_assoc_array[node_id]; + } + return spapr->FORM1_assoc_array[node_id]; +} + +/* + * Wrapper that returns node distance from ms->numa_state->nodes + * after handling edge cases where the distance might be absent. + */ +static int get_numa_distance(MachineState *ms, int src, int dst) +{ + NodeInfo *numa_info = ms->numa_state->nodes; + int ret = numa_info[src].distance[dst]; + + if (ret != 0) { + return ret; + } + + /* + * In case QEMU adds a default NUMA single node when the user + * did not add any, or where the user did not supply distances, + * the distance will be absent (zero). Return local/remote + * distance in this case. + */ + if (src == dst) { + return NUMA_DISTANCE_MIN; + } + + return NUMA_DISTANCE_DEFAULT; } static bool spapr_numa_is_symmetrical(MachineState *ms) { - int src, dst; int nb_numa_nodes = ms->numa_state->num_nodes; - NodeInfo *numa_info = ms->numa_state->nodes; + int src, dst; for (src = 0; src < nb_numa_nodes; src++) { for (dst = src; dst < nb_numa_nodes; dst++) { - if (numa_info[src].distance[dst] != - numa_info[dst].distance[src]) { + if (get_numa_distance(ms, src, dst) != + get_numa_distance(ms, dst, src)) { return false; } } @@ -92,12 +154,22 @@ static uint8_t spapr_numa_get_numa_level(uint8_t distance) return 0; } -static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) +static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr) { MachineState *ms = MACHINE(spapr); - NodeInfo *numa_info = ms->numa_state->nodes; int nb_numa_nodes = ms->numa_state->num_nodes; - int src, dst, i; + int src, dst, i, j; + + /* + * Fill all associativity domains of non-zero NUMA nodes with + * node_id. This is required because the default value (0) is + * considered a match with associativity domains of node 0. + */ + for (i = 1; i < nb_numa_nodes; i++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { + spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i); + } + } for (src = 0; src < nb_numa_nodes; src++) { for (dst = src; dst < nb_numa_nodes; dst++) { @@ -121,7 +193,7 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * The PPC kernel expects the associativity domains of node 0 to * be always 0, and this algorithm will grant that by default. */ - uint8_t distance = numa_info[src].distance[dst]; + uint8_t distance = get_numa_distance(ms, src, dst); uint8_t n_level = spapr_numa_get_numa_level(distance); uint32_t assoc_src; @@ -132,7 +204,7 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * * The Linux kernel will assume that the distance between src and * dst, in this case of no match, is 10 (local distance) doubled - * for each NUMA it didn't match. We have MAX_DISTANCE_REF_POINTS + * for each NUMA it didn't match. We have FORM1_DIST_REF_POINTS * levels (4), so this gives us 10*2*2*2*2 = 160. * * This logic can be seen in the Linux kernel source code, as of @@ -147,25 +219,69 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * and going up to 0x1. */ for (i = n_level; i > 0; i--) { - assoc_src = spapr->numa_assoc_array[src][i]; - spapr->numa_assoc_array[dst][i] = assoc_src; + assoc_src = spapr->FORM1_assoc_array[src][i]; + spapr->FORM1_assoc_array[dst][i] = assoc_src; } } } } -void spapr_numa_associativity_init(SpaprMachineState *spapr, - MachineState *machine) +static void spapr_numa_FORM1_affinity_check(MachineState *machine) +{ + int i; + + /* + * Check we don't have a memory-less/cpu-less NUMA node + * Firmware relies on the existing memory/cpu topology to provide the + * NUMA topology to the kernel. + * And the linux kernel needs to know the NUMA topology at start + * to be able to hotplug CPUs later. + */ + if (machine->numa_state->num_nodes) { + for (i = 0; i < machine->numa_state->num_nodes; ++i) { + /* check for memory-less node */ + if (machine->numa_state->nodes[i].node_mem == 0) { + CPUState *cs; + int found = 0; + /* check for cpu-less node */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + if (cpu->node_id == i) { + found = 1; + break; + } + } + /* memory-less and cpu-less node */ + if (!found) { + error_report( +"Memory-less/cpu-less nodes are not supported with FORM1 NUMA (node %d)", i); + exit(EXIT_FAILURE); + } + } + } + } + + if (!spapr_numa_is_symmetrical(machine)) { + error_report( +"Asymmetrical NUMA topologies aren't supported in the pSeries machine using FORM1 NUMA"); + exit(EXIT_FAILURE); + } +} + +/* + * Set NUMA machine state data based on FORM1 affinity semantics. + */ +static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, + MachineState *machine) { SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int i, j, max_nodes_with_gpus; - bool using_legacy_numa = spapr_machine_using_legacy_numa(spapr); /* * For all associativity arrays: first position is the size, - * position MAX_DISTANCE_REF_POINTS is always the numa_id, + * position FORM1_DIST_REF_POINTS is always the numa_id, * represented by the index 'i'. * * This will break on sparse NUMA setups, when/if QEMU starts @@ -173,19 +289,8 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, * 'i' will be a valid node_id set by the user. */ for (i = 0; i < nb_numa_nodes; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); - - /* - * Fill all associativity domains of non-zero NUMA nodes with - * node_id. This is required because the default value (0) is - * considered a match with associativity domains of node 0. - */ - if (!using_legacy_numa && i != 0) { - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { - spapr->numa_assoc_array[i][j] = cpu_to_be32(i); - } - } + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* @@ -199,47 +304,95 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ? SPAPR_GPU_NUMA_ID : cpu_to_be32(i); - spapr->numa_assoc_array[i][j] = gpu_assoc; + spapr->FORM1_assoc_array[i][j] = gpu_assoc; } - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* - * Legacy NUMA guests (pseries-5.1 and older, or guests with only - * 1 NUMA node) will not benefit from anything we're going to do - * after this point. + * Guests pseries-5.1 and older uses zeroed associativity domains, + * i.e. no domain definition based on NUMA distance input. + * + * Same thing with guests that have only one NUMA node. */ - if (using_legacy_numa) { + if (smc->pre_5_2_numa_associativity || + machine->numa_state->num_nodes <= 1) { return; } - if (!spapr_numa_is_symmetrical(machine)) { - error_report("Asymmetrical NUMA topologies aren't supported " - "in the pSeries machine"); - exit(EXIT_FAILURE); + spapr_numa_define_FORM1_domains(spapr); +} + +/* + * Init NUMA FORM2 machine state data + */ +static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) +{ + int i; + + /* + * For all resources but CPUs, FORM2 associativity arrays will + * be a size 2 array with the following format: + * + * ibm,associativity = {1, numa_id} + * + * CPUs will write an additional 'vcpu_id' on top of the arrays + * being initialized here. 'numa_id' is represented by the + * index 'i' of the loop. + * + * Given that this initialization is also valid for GPU associativity + * arrays, handle everything in one single step by populating the + * arrays up to NUMA_NODES_MAX_NUM. + */ + for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { + spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); + spapr->FORM2_assoc_array[i][1] = cpu_to_be32(i); + } +} + +void spapr_numa_associativity_init(SpaprMachineState *spapr, + MachineState *machine) +{ + spapr_numa_FORM1_affinity_init(spapr, machine); + spapr_numa_FORM2_affinity_init(spapr); +} + +void spapr_numa_associativity_check(SpaprMachineState *spapr) +{ + /* + * FORM2 does not have any restrictions we need to handle + * at CAS time, for now. + */ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return; } - spapr_numa_define_associativity_domains(spapr); + spapr_numa_FORM1_affinity_check(MACHINE(spapr)); } void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, int offset, int nodeid) { + const uint32_t *associativity = get_associativity(spapr, nodeid); + _FDT((fdt_setprop(fdt, offset, "ibm,associativity", - spapr->numa_assoc_array[nodeid], - sizeof(spapr->numa_assoc_array[nodeid])))); + associativity, + get_numa_assoc_size(spapr) * sizeof(uint32_t)))); } static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, PowerPCCPU *cpu) { - uint32_t *vcpu_assoc = g_new(uint32_t, VCPU_ASSOC_SIZE); + const uint32_t *associativity = get_associativity(spapr, cpu->node_id); + int max_distance_ref_points = get_max_dist_ref_points(spapr); + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size); int index = spapr_get_vcpu_id(cpu); /* @@ -248,10 +401,10 @@ static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, * 0, put cpu_id last, then copy the remaining associativity * domains. */ - vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1); - vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index); - memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1, - (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t)); + vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1); + vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index); + memcpy(vcpu_assoc + 1, associativity + 1, + (vcpu_assoc_size - 2) * sizeof(uint32_t)); return vcpu_assoc; } @@ -260,12 +413,13 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt, int offset, PowerPCCPU *cpu) { g_autofree uint32_t *vcpu_assoc = NULL; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu); /* Advertise NUMA via ibm,associativity */ return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc, - VCPU_ASSOC_SIZE * sizeof(uint32_t)); + vcpu_assoc_size * sizeof(uint32_t)); } @@ -273,41 +427,37 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, int offset) { MachineState *machine = MACHINE(spapr); + int max_distance_ref_points = get_max_dist_ref_points(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; - uint32_t *int_buf, *cur_index, buf_len; - int ret, i; + g_autofree uint32_t *int_buf = NULL; + uint32_t *cur_index; + int i; /* ibm,associativity-lookup-arrays */ - buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t); - cur_index = int_buf = g_malloc0(buf_len); + int_buf = g_new0(uint32_t, nr_nodes * max_distance_ref_points + 2); + cur_index = int_buf; int_buf[0] = cpu_to_be32(nr_nodes); /* Number of entries per associativity list */ - int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + int_buf[1] = cpu_to_be32(max_distance_ref_points); cur_index += 2; for (i = 0; i < nr_nodes; i++) { /* - * For the lookup-array we use the ibm,associativity array, - * from numa_assoc_array. without the first element (size). + * For the lookup-array we use the ibm,associativity array of the + * current NUMA affinity, without the first element (size). */ - uint32_t *associativity = spapr->numa_assoc_array[i]; + const uint32_t *associativity = get_associativity(spapr, i); memcpy(cur_index, ++associativity, - sizeof(uint32_t) * MAX_DISTANCE_REF_POINTS); - cur_index += MAX_DISTANCE_REF_POINTS; + sizeof(uint32_t) * max_distance_ref_points); + cur_index += max_distance_ref_points; } - ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, - (cur_index - int_buf) * sizeof(uint32_t)); - g_free(int_buf); - return ret; + return fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", + int_buf, (cur_index - int_buf) * sizeof(uint32_t)); } -/* - * Helper that writes ibm,associativity-reference-points and - * max-associativity-domains in the RTAS pointed by @rtas - * in the DT @fdt. - */ -void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); @@ -329,7 +479,8 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) cpu_to_be32(maxdomain) }; - if (spapr_machine_using_legacy_numa(spapr)) { + if (smc->pre_5_2_numa_associativity || + ms->numa_state->num_nodes <= 1) { uint32_t legacy_refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4), @@ -365,6 +516,113 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) maxdomains, sizeof(maxdomains))); } +static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + int nb_numa_nodes = ms->numa_state->num_nodes; + int distance_table_entries = nb_numa_nodes * nb_numa_nodes; + g_autofree uint32_t *lookup_index_table = NULL; + g_autofree uint8_t *distance_table = NULL; + int src, dst, i, distance_table_size; + + /* + * ibm,numa-lookup-index-table: array with length and a + * list of NUMA ids present in the guest. + */ + lookup_index_table = g_new0(uint32_t, nb_numa_nodes + 1); + lookup_index_table[0] = cpu_to_be32(nb_numa_nodes); + + for (i = 0; i < nb_numa_nodes; i++) { + lookup_index_table[i + 1] = cpu_to_be32(i); + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-lookup-index-table", + lookup_index_table, + (nb_numa_nodes + 1) * sizeof(uint32_t))); + + /* + * ibm,numa-distance-table: contains all node distances. First + * element is the size of the table as uint32, followed up + * by all the uint8 distances from the first NUMA node, then all + * distances from the second NUMA node and so on. + * + * ibm,numa-lookup-index-table is used by guest to navigate this + * array because NUMA ids can be sparse (node 0 is the first, + * node 8 is the second ...). + */ + distance_table_size = distance_table_entries * sizeof(uint8_t) + + sizeof(uint32_t); + distance_table = g_new0(uint8_t, distance_table_size); + stl_be_p(distance_table, distance_table_entries); + + /* Skip the uint32_t array length at the start */ + i = sizeof(uint32_t); + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + distance_table[i++] = get_numa_distance(ms, src, dst); + } + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-distance-table", + distance_table, distance_table_size)); +} + +/* + * This helper could be compressed in a single function with + * FORM1 logic since we're setting the same DT values, with the + * difference being a call to spapr_numa_FORM2_write_rtas_tables() + * in the end. The separation was made to avoid clogging FORM1 code + * which already has to deal with compat modes from previous + * QEMU machine types. + */ +static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - + spapr_numa_initial_nvgpu_numa_id(ms); + + /* + * In FORM2, ibm,associativity-reference-points will point to + * the element in the ibm,associativity array that contains the + * primary domain index (for FORM2, the first element). + * + * This value (in our case, the numa-id) is then used as an index + * to retrieve all other attributes of the node (distance, + * bandwidth, latency) via ibm,numa-lookup-index-table and other + * ibm,numa-*-table properties. + */ + uint32_t refpoints[] = { cpu_to_be32(1) }; + + uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; + + _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", + refpoints, sizeof(refpoints))); + + _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", + maxdomains, sizeof(maxdomains))); + + spapr_numa_FORM2_write_rtas_tables(spapr, fdt, rtas); +} + +/* + * Helper that writes ibm,associativity-reference-points and + * max-associativity-domains in the RTAS pointed by @rtas + * in the DT @fdt. + */ +void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + spapr_numa_FORM2_write_rtas_dt(spapr, fdt, rtas); + return; + } + + spapr_numa_FORM1_write_rtas_dt(spapr, fdt, rtas); +} + static target_ulong h_home_node_associativity(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, @@ -375,6 +633,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, target_ulong procno = args[1]; PowerPCCPU *tcpu; int idx, assoc_idx; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); /* only support procno from H_REGISTER_VPA */ if (flags != 0x1) { @@ -393,7 +652,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * 12 associativity domains for vcpus. Assert and bail if that's * not the case. */ - G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12); + g_assert((vcpu_assoc_size - 1) <= 12); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu); /* assoc_idx starts at 1 to skip associativity size */ @@ -414,9 +673,9 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * macro. The ternary will fill the remaining registers with -1 * after we went through vcpu_assoc[]. */ - a = assoc_idx < VCPU_ASSOC_SIZE ? + a = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; - b = assoc_idx < VCPU_ASSOC_SIZE ? + b = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; args[idx] = ASSOCIATIVITY(a, b); diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index 91de1052f2..04a64cada3 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -22,6 +22,7 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qapi/error.h" #include "hw/ppc/spapr_drc.h" #include "hw/ppc/spapr_nvdimm.h" @@ -30,6 +31,10 @@ #include "hw/ppc/fdt.h" #include "qemu/range.h" #include "hw/ppc/spapr_numa.h" +#include "block/thread-pool.h" +#include "migration/vmstate.h" +#include "qemu/pmem.h" +#include "hw/qdev-properties.h" /* DIMM health bitmap bitmap indicators. Taken from kernel's papr_scm.c */ /* SCM device is unable to persist memory contents */ @@ -47,11 +52,25 @@ /* Have an explicit check for alignment */ QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE); +#define TYPE_SPAPR_NVDIMM "spapr-nvdimm" +OBJECT_DECLARE_TYPE(SpaprNVDIMMDevice, SPAPRNVDIMMClass, SPAPR_NVDIMM) + +struct SPAPRNVDIMMClass { + /* private */ + NVDIMMClass parent_class; + + /* public */ + void (*realize)(NVDIMMDevice *dimm, Error **errp); + void (*unrealize)(NVDIMMDevice *dimm, Error **errp); +}; + bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm, uint64_t size, Error **errp) { const MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); const MachineState *ms = MACHINE(hotplug_dev); + PCDIMMDevice *dimm = PC_DIMM(nvdimm); + MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem); g_autofree char *uuidstr = NULL; QemuUUID uuid; int ret; @@ -89,6 +108,14 @@ bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm, return false; } + if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM) && + (memory_region_get_fd(mr) < 0)) { + error_setg(errp, "spapr-nvdimm device requires the " + "memdev %s to be of memory-backend-file type", + object_get_canonical_path_component(OBJECT(dimm->hostmem))); + return false; + } + return true; } @@ -160,6 +187,20 @@ static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt, "operating-system"))); _FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0)); + if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) { + bool is_pmem = false, pmem_override = false; + PCDIMMDevice *dimm = PC_DIMM(nvdimm); + HostMemoryBackend *hostmem = dimm->hostmem; + + is_pmem = object_property_get_bool(OBJECT(hostmem), "pmem", NULL); + pmem_override = object_property_get_bool(OBJECT(nvdimm), + "pmem-override", NULL); + if (!is_pmem || pmem_override) { + _FDT(fdt_setprop(fdt, child_offset, "ibm,hcall-flush-required", + NULL, 0)); + } + } + return child_offset; } @@ -375,6 +416,303 @@ static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, return H_SUCCESS; } +typedef struct SpaprNVDIMMDeviceFlushState { + uint64_t continue_token; + int64_t hcall_ret; + uint32_t drcidx; + + QLIST_ENTRY(SpaprNVDIMMDeviceFlushState) node; +} SpaprNVDIMMDeviceFlushState; + +typedef struct SpaprNVDIMMDevice SpaprNVDIMMDevice; +struct SpaprNVDIMMDevice { + /* private */ + NVDIMMDevice parent_obj; + + bool hcall_flush_required; + uint64_t nvdimm_flush_token; + QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) pending_nvdimm_flush_states; + QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) completed_nvdimm_flush_states; + + /* public */ + + /* + * The 'on' value for this property forced the qemu to enable the hcall + * flush for the nvdimm device even if the backend is a pmem + */ + bool pmem_override; +}; + +static int flush_worker_cb(void *opaque) +{ + SpaprNVDIMMDeviceFlushState *state = opaque; + SpaprDrc *drc = spapr_drc_by_index(state->drcidx); + PCDIMMDevice *dimm; + HostMemoryBackend *backend; + int backend_fd; + + g_assert(drc != NULL); + + dimm = PC_DIMM(drc->dev); + backend = MEMORY_BACKEND(dimm->hostmem); + backend_fd = memory_region_get_fd(&backend->mr); + + if (object_property_get_bool(OBJECT(backend), "pmem", NULL)) { + MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem); + void *ptr = memory_region_get_ram_ptr(mr); + size_t size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, + NULL); + + /* flush pmem backend */ + pmem_persist(ptr, size); + } else { + /* flush raw backing image */ + if (qemu_fdatasync(backend_fd) < 0) { + error_report("papr_scm: Could not sync nvdimm to backend file: %s", + strerror(errno)); + return H_HARDWARE; + } + } + + return H_SUCCESS; +} + +static void spapr_nvdimm_flush_completion_cb(void *opaque, int hcall_ret) +{ + SpaprNVDIMMDeviceFlushState *state = opaque; + SpaprDrc *drc = spapr_drc_by_index(state->drcidx); + SpaprNVDIMMDevice *s_nvdimm; + + g_assert(drc != NULL); + + s_nvdimm = SPAPR_NVDIMM(drc->dev); + + state->hcall_ret = hcall_ret; + QLIST_REMOVE(state, node); + QLIST_INSERT_HEAD(&s_nvdimm->completed_nvdimm_flush_states, state, node); +} + +static int spapr_nvdimm_flush_post_load(void *opaque, int version_id) +{ + SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque; + SpaprNVDIMMDeviceFlushState *state; + ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); + HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem); + bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL); + bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm), + "pmem-override", NULL); + bool dest_hcall_flush_required = pmem_override || !is_pmem; + + if (!s_nvdimm->hcall_flush_required && dest_hcall_flush_required) { + error_report("The file backend for the spapr-nvdimm device %s at " + "source is a pmem, use pmem=on and pmem-override=off to " + "continue.", DEVICE(s_nvdimm)->id); + return -EINVAL; + } + if (s_nvdimm->hcall_flush_required && !dest_hcall_flush_required) { + error_report("The guest expects hcall-flush support for the " + "spapr-nvdimm device %s, use pmem_override=on to " + "continue.", DEVICE(s_nvdimm)->id); + return -EINVAL; + } + + QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) { + thread_pool_submit_aio(pool, flush_worker_cb, state, + spapr_nvdimm_flush_completion_cb, state); + } + + return 0; +} + +static const VMStateDescription vmstate_spapr_nvdimm_flush_state = { + .name = "spapr_nvdimm_flush_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(continue_token, SpaprNVDIMMDeviceFlushState), + VMSTATE_INT64(hcall_ret, SpaprNVDIMMDeviceFlushState), + VMSTATE_UINT32(drcidx, SpaprNVDIMMDeviceFlushState), + VMSTATE_END_OF_LIST() + }, +}; + +const VMStateDescription vmstate_spapr_nvdimm_states = { + .name = "spapr_nvdimm_states", + .version_id = 1, + .minimum_version_id = 1, + .post_load = spapr_nvdimm_flush_post_load, + .fields = (VMStateField[]) { + VMSTATE_BOOL(hcall_flush_required, SpaprNVDIMMDevice), + VMSTATE_UINT64(nvdimm_flush_token, SpaprNVDIMMDevice), + VMSTATE_QLIST_V(completed_nvdimm_flush_states, SpaprNVDIMMDevice, 1, + vmstate_spapr_nvdimm_flush_state, + SpaprNVDIMMDeviceFlushState, node), + VMSTATE_QLIST_V(pending_nvdimm_flush_states, SpaprNVDIMMDevice, 1, + vmstate_spapr_nvdimm_flush_state, + SpaprNVDIMMDeviceFlushState, node), + VMSTATE_END_OF_LIST() + }, +}; + +/* + * Assign a token and reserve it for the new flush state. + */ +static SpaprNVDIMMDeviceFlushState *spapr_nvdimm_init_new_flush_state( + SpaprNVDIMMDevice *spapr_nvdimm) +{ + SpaprNVDIMMDeviceFlushState *state; + + state = g_malloc0(sizeof(*state)); + + spapr_nvdimm->nvdimm_flush_token++; + /* Token zero is presumed as no job pending. Assert on overflow to zero */ + g_assert(spapr_nvdimm->nvdimm_flush_token != 0); + + state->continue_token = spapr_nvdimm->nvdimm_flush_token; + + QLIST_INSERT_HEAD(&spapr_nvdimm->pending_nvdimm_flush_states, state, node); + + return state; +} + +/* + * spapr_nvdimm_finish_flushes + * Waits for all pending flush requests to complete + * their execution and free the states + */ +void spapr_nvdimm_finish_flushes(void) +{ + SpaprNVDIMMDeviceFlushState *state, *next; + GSList *list, *nvdimms; + + /* + * Called on reset path, the main loop thread which calls + * the pending BHs has gotten out running in the reset path, + * finally reaching here. Other code path being guest + * h_client_architecture_support, thats early boot up. + */ + nvdimms = nvdimm_get_device_list(); + for (list = nvdimms; list; list = list->next) { + NVDIMMDevice *nvdimm = list->data; + if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) { + SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(nvdimm); + while (!QLIST_EMPTY(&s_nvdimm->pending_nvdimm_flush_states)) { + aio_poll(qemu_get_aio_context(), true); + } + + QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states, + node, next) { + QLIST_REMOVE(state, node); + g_free(state); + } + } + } + g_slist_free(nvdimms); +} + +/* + * spapr_nvdimm_get_flush_status + * Fetches the status of the hcall worker and returns + * H_LONG_BUSY_ORDER_10_MSEC if the worker is still running. + */ +static int spapr_nvdimm_get_flush_status(SpaprNVDIMMDevice *s_nvdimm, + uint64_t token) +{ + SpaprNVDIMMDeviceFlushState *state, *node; + + QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) { + if (state->continue_token == token) { + return H_LONG_BUSY_ORDER_10_MSEC; + } + } + + QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states, + node, node) { + if (state->continue_token == token) { + int ret = state->hcall_ret; + QLIST_REMOVE(state, node); + g_free(state); + return ret; + } + } + + /* If not found in complete list too, invalid token */ + return H_P2; +} + +/* + * H_SCM_FLUSH + * Input: drc_index, continue-token + * Out: continue-token + * Return Value: H_SUCCESS, H_Parameter, H_P2, H_LONG_BUSY_ORDER_10_MSEC, + * H_UNSUPPORTED + * + * Given a DRC Index Flush the data to backend NVDIMM device. The hcall returns + * H_LONG_BUSY_ORDER_10_MSEC when the flush takes longer time and the hcall + * needs to be issued multiple times in order to be completely serviced. The + * continue-token from the output to be passed in the argument list of + * subsequent hcalls until the hcall is completely serviced at which point + * H_SUCCESS or other error is returned. + */ +static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + int ret; + uint32_t drc_index = args[0]; + uint64_t continue_token = args[1]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + PCDIMMDevice *dimm; + HostMemoryBackend *backend = NULL; + SpaprNVDIMMDeviceFlushState *state; + ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); + int fd; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + dimm = PC_DIMM(drc->dev); + if (!object_dynamic_cast(OBJECT(dimm), TYPE_SPAPR_NVDIMM)) { + return H_PARAMETER; + } + if (continue_token == 0) { + bool is_pmem = false, pmem_override = false; + backend = MEMORY_BACKEND(dimm->hostmem); + fd = memory_region_get_fd(&backend->mr); + + if (fd < 0) { + return H_UNSUPPORTED; + } + + is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL); + pmem_override = object_property_get_bool(OBJECT(dimm), + "pmem-override", NULL); + if (is_pmem && !pmem_override) { + return H_UNSUPPORTED; + } + + state = spapr_nvdimm_init_new_flush_state(SPAPR_NVDIMM(dimm)); + if (!state) { + return H_HARDWARE; + } + + state->drcidx = drc_index; + + thread_pool_submit_aio(pool, flush_worker_cb, state, + spapr_nvdimm_flush_completion_cb, state); + + continue_token = state->continue_token; + } + + ret = spapr_nvdimm_get_flush_status(SPAPR_NVDIMM(dimm), continue_token); + if (H_IS_LONG_BUSY(ret)) { + args[0] = continue_token; + } + + return ret; +} + static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { @@ -523,6 +861,70 @@ static void spapr_scm_register_types(void) spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem); spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all); spapr_register_hypercall(H_SCM_HEALTH, h_scm_health); + spapr_register_hypercall(H_SCM_FLUSH, h_scm_flush); } type_init(spapr_scm_register_types) + +static void spapr_nvdimm_realize(NVDIMMDevice *dimm, Error **errp) +{ + SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(dimm); + HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(dimm)->hostmem); + bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL); + bool pmem_override = object_property_get_bool(OBJECT(dimm), "pmem-override", + NULL); + if (!is_pmem || pmem_override) { + s_nvdimm->hcall_flush_required = true; + } + + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, + &vmstate_spapr_nvdimm_states, dimm); +} + +static void spapr_nvdimm_unrealize(NVDIMMDevice *dimm) +{ + vmstate_unregister(NULL, &vmstate_spapr_nvdimm_states, dimm); +} + +static Property spapr_nvdimm_properties[] = { +#ifdef CONFIG_LIBPMEM + DEFINE_PROP_BOOL("pmem-override", SpaprNVDIMMDevice, pmem_override, false), +#endif + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_nvdimm_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + NVDIMMClass *nvc = NVDIMM_CLASS(oc); + + nvc->realize = spapr_nvdimm_realize; + nvc->unrealize = spapr_nvdimm_unrealize; + + device_class_set_props(dc, spapr_nvdimm_properties); +} + +static void spapr_nvdimm_init(Object *obj) +{ + SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(obj); + + s_nvdimm->hcall_flush_required = false; + QLIST_INIT(&s_nvdimm->pending_nvdimm_flush_states); + QLIST_INIT(&s_nvdimm->completed_nvdimm_flush_states); +} + +static TypeInfo spapr_nvdimm_info = { + .name = TYPE_SPAPR_NVDIMM, + .parent = TYPE_NVDIMM, + .class_init = spapr_nvdimm_class_init, + .class_size = sizeof(SPAPRNVDIMMClass), + .instance_size = sizeof(SpaprNVDIMMDevice), + .instance_init = spapr_nvdimm_init, +}; + +static void spapr_nvdimm_register_types(void) +{ + type_register_static(&spapr_nvdimm_info); +} + +type_init(spapr_nvdimm_register_types) diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 7a725855f9..7b7618d5da 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -782,33 +782,30 @@ static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) static char *spapr_phb_vfio_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev) { - char *path = NULL, *buf = NULL, *host = NULL; + g_autofree char *path = NULL; + g_autofree char *host = NULL; + g_autofree char *devspec = NULL; + char *buf = NULL; /* Get the PCI VFIO host id */ host = object_property_get_str(OBJECT(pdev), "host", NULL); if (!host) { - goto err_out; + return NULL; } /* Construct the path of the file that will give us the DT location */ path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host); - g_free(host); - if (!g_file_get_contents(path, &buf, NULL, NULL)) { - goto err_out; + if (!g_file_get_contents(path, &devspec, NULL, NULL)) { + return NULL; } - g_free(path); /* Construct and read from host device tree the loc-code */ - path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf); - g_free(buf); + g_free(path); + path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", devspec); if (!g_file_get_contents(path, &buf, NULL, NULL)) { - goto err_out; + return NULL; } return buf; - -err_out: - g_free(path); - return NULL; } static char *spapr_phb_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev) @@ -1321,8 +1318,7 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus, RESOURCE_CELLS_SIZE)); assert(bus); - pci_for_each_device_reverse(bus, pci_bus_num(bus), - spapr_dt_pci_device_cb, &cbinfo); + pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, &cbinfo); if (cbinfo.err) { return cbinfo.err; } @@ -1983,7 +1979,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) * our memory slot is of page size granularity. */ if (kvm_enabled()) { - msi_window_size = qemu_real_host_page_size; + msi_window_size = qemu_real_host_page_size(); } memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, @@ -2049,7 +2045,7 @@ static int spapr_phb_children_reset(Object *child, void *opaque) DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE); if (dev) { - device_legacy_reset(dev); + device_cold_reset(dev); } return 0; @@ -2072,6 +2068,7 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb) tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]); spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); + tcet->def_win = true; } static void spapr_phb_reset(DeviceState *qdev) @@ -2310,8 +2307,8 @@ static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, return; } - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - spapr_phb_pci_enumerate_bridge, bus_no); + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge, + bus_no); pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); } @@ -2320,9 +2317,8 @@ static void spapr_phb_pci_enumerate(SpaprPhbState *phb) PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; unsigned int bus_no = 0; - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_pci_enumerate_bridge, - &bus_no); + pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge, + &bus_no); } @@ -2365,8 +2361,9 @@ int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb, cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) }; uint32_t ddw_extensions[] = { - cpu_to_be32(1), - cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) + cpu_to_be32(2), + cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW), + cpu_to_be32(1), /* 1: ibm,query-pe-dma-window 6 outputs, PAPR 2.8 */ }; SpaprTceTable *tcet; SpaprDrc *drc; diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c index 8ef9b40a18..2a8a11be1d 100644 --- a/hw/ppc/spapr_pci_nvlink2.c +++ b/hw/ppc/spapr_pci_nvlink2.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/pci/pci.h" #include "hw/pci-host/spapr.h" #include "hw/ppc/spapr_numa.h" @@ -164,8 +163,7 @@ static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, return; } - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - spapr_phb_pci_collect_nvgpu, opaque); + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, opaque); } void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) @@ -183,8 +181,8 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; bus = PCI_HOST_BRIDGE(sphb)->bus; - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_pci_collect_nvgpu, sphb->nvgpus); + pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu, + sphb->nvgpus); if (sphb->nvgpus->err) { error_propagate(errp, sphb->nvgpus->err); @@ -321,7 +319,7 @@ void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) { int i, j, linkidx, npuoff; - char *npuname; + g_autofree char *npuname = NULL; if (!sphb->nvgpus) { return; @@ -334,11 +332,10 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0)); /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */ _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu"))); - g_free(npuname); for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) { for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) { - char *linkname = g_strdup_printf("link@%d", linkidx); + g_autofree char *linkname = g_strdup_printf("link@%d", linkidx); int off = fdt_add_subnode(fdt, npuoff, linkname); _FDT(off); @@ -348,7 +345,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) _FDT((fdt_setprop_cell(fdt, off, "phandle", PHANDLE_NVLINK(sphb, i, j)))); _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx))); - g_free(linkname); ++linkidx; } } @@ -361,7 +357,8 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) &error_abort); uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; - char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); + g_autofree char *mem_name = g_strdup_printf("memory@%"PRIx64, + nvslot->gpa); int off = fdt_add_subnode(fdt, 0, mem_name); _FDT(off); @@ -379,7 +376,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) sizeof(mem_reg)))); _FDT((fdt_setprop_cell(fdt, off, "phandle", PHANDLE_GPURAM(sphb, i)))); - g_free(mem_name); } } @@ -401,7 +397,7 @@ void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, continue; } if (dev == nvslot->gpdev) { - uint32_t npus[nvslot->linknum]; + g_autofree uint32_t *npus = g_new(uint32_t, nvslot->linknum); for (j = 0; j < nvslot->linknum; ++j) { PCIDevice *npdev = nvslot->links[j].npdev; diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index f3b37df8ea..2a76b4e0b5 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -164,8 +164,8 @@ static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus, static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque) { - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_vfio_eeh_clear_dev_msix, NULL); + pci_for_each_device_under_bus(bus, spapr_phb_vfio_eeh_clear_dev_msix, + NULL); } static void spapr_phb_vfio_eeh_pre_reset(SpaprPhbState *sphb) diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index b476382ae6..3f664ea02c 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -214,9 +214,9 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, * guest. * For the same reason, set PSSCR_EC. */ - ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); env->spr[SPR_PSSCR] |= PSSCR_EC; cs->halted = 1; + ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); kvmppc_set_reg_ppc_online(cpu, 0); qemu_cpu_kick(cs); } @@ -279,30 +279,29 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu, switch (parameter) { case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: { - char *param_val = g_strdup_printf("MaxEntCap=%d," - "DesMem=%" PRIu64 "," - "DesProcs=%d," - "MaxPlatProcs=%d", - ms->smp.max_cpus, - ms->ram_size / MiB, - ms->smp.cpus, - ms->smp.max_cpus); + g_autofree char *param_val = g_strdup_printf("MaxEntCap=%d," + "DesMem=%" PRIu64 "," + "DesProcs=%d," + "MaxPlatProcs=%d", + ms->smp.max_cpus, + ms->ram_size / MiB, + ms->smp.cpus, + ms->smp.max_cpus); if (pcc->n_host_threads > 0) { - char *hostthr_val, *old = param_val; - /* * Add HostThrs property. This property is not present in PAPR but * is expected by some guests to communicate the number of physical * host threads per core on the system so that they can scale * information which varies based on the thread configuration. */ - hostthr_val = g_strdup_printf(",HostThrs=%d", pcc->n_host_threads); + g_autofree char *hostthr_val = g_strdup_printf(",HostThrs=%d", + pcc->n_host_threads); + char *old = param_val; + param_val = g_strconcat(param_val, hostthr_val, NULL); - g_free(hostthr_val); g_free(old); } ret = sysparm_st(buffer, length, param_val, strlen(param_val) + 1); - g_free(param_val); break; } case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: { @@ -475,16 +474,16 @@ static void rtas_ibm_nmi_interlock(PowerPCCPU *cpu, if (spapr->fwnmi_machine_check_interlock != cpu->vcpu_id) { /* - * The vCPU that hit the NMI should invoke "ibm,nmi-interlock" + * The vCPU that hit the NMI should invoke "ibm,nmi-interlock" * This should be PARAM_ERROR, but Linux calls "ibm,nmi-interlock" - * for system reset interrupts, despite them not being interlocked. - * PowerVM silently ignores this and returns success here. Returning - * failure causes Linux to print the error "FWNMI: nmi-interlock - * failed: -3", although no other apparent ill effects, this is a - * regression for the user when enabling FWNMI. So for now, match - * PowerVM. When most Linux clients are fixed, this could be - * changed. - */ + * for system reset interrupts, despite them not being interlocked. + * PowerVM silently ignores this and returns success here. Returning + * failure causes Linux to print the error "FWNMI: nmi-interlock + * failed: -3", although no other apparent ill effects, this is a + * regression for the user when enabling FWNMI. So for now, match + * PowerVM. When most Linux clients are fixed, this could be + * changed. + */ rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; } diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c index 3e826e1308..7ba11382bc 100644 --- a/hw/ppc/spapr_rtas_ddw.c +++ b/hw/ppc/spapr_rtas_ddw.c @@ -72,6 +72,7 @@ static uint32_t spapr_page_mask_to_query_mask(uint64_t page_mask) const struct { int shift; uint32_t mask; } masks[] = { { 12, RTAS_DDW_PGSIZE_4K }, { 16, RTAS_DDW_PGSIZE_64K }, + { 21, RTAS_DDW_PGSIZE_2M }, { 24, RTAS_DDW_PGSIZE_16M }, { 25, RTAS_DDW_PGSIZE_32M }, { 26, RTAS_DDW_PGSIZE_64M }, @@ -99,7 +100,7 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, uint64_t buid; uint32_t avail, addr, pgmask = 0; - if ((nargs != 3) || (nret != 5)) { + if ((nargs != 3) || ((nret != 5) && (nret != 6))) { goto param_error_exit; } @@ -117,9 +118,20 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, avail); - rtas_st(rets, 2, 0x80000000); /* The largest window we can possibly have */ - rtas_st(rets, 3, pgmask); - rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + if (nret == 6) { + /* + * Set the Max TCE number as 1<<(58-21) = 0x20.0000.0000 + * 1<<59 is the huge window start and 21 is 2M page shift. + */ + rtas_st(rets, 2, 0x00000020); + rtas_st(rets, 3, 0x00000000); + rtas_st(rets, 4, pgmask); + rtas_st(rets, 5, 0); /* DMA migration mask, not supported */ + } else { + rtas_st(rets, 2, 0x80000000); + rtas_st(rets, 3, pgmask); + rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + } trace_spapr_iommu_ddw_query(buid, addr, avail, 0x80000000, pgmask); return; @@ -214,6 +226,7 @@ static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, SpaprPhbState *sphb; SpaprTceTable *tcet; uint32_t liobn; + bool def_win_removed; if ((nargs != 1) || (nret != 1)) { goto param_error_exit; @@ -230,9 +243,23 @@ static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, goto param_error_exit; } + def_win_removed = tcet->def_win; spapr_tce_table_disable(tcet); trace_spapr_iommu_ddw_remove(liobn); + /* + * PAPR+/LoPAPR says: + * The platform must restore the default DMA window for the PE on a call + * to the ibm,remove-pe-dma-window RTAS call when all of the following + * are true: + * a. The call removes the last DMA window remaining for the PE. + * b. The DMA window being removed is not the default window + */ + if (spapr_phb_get_active_win_num(sphb) == 0 && !def_win_removed) { + spapr_phb_dma_reset(sphb); + trace_spapr_iommu_ddw_reset(sphb->buid, 0); + } + rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c index fba4dfca35..d55b4b0c50 100644 --- a/hw/ppc/spapr_rtc.c +++ b/hw/ppc/spapr_rtc.c @@ -26,13 +26,13 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "hw/ppc/spapr.h" #include "migration/vmstate.h" #include "qapi/error.h" -#include "qapi/qapi-events-misc-target.h" +#include "qapi/qapi-events-misc.h" #include "qemu/cutils.h" #include "qemu/module.h" @@ -97,6 +97,7 @@ static void rtas_set_time_of_day(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t nret, target_ulong rets) { SpaprRtcState *rtc = &spapr->rtc; + g_autofree const char *qom_path = NULL; struct tm tm; time_t new_s; int64_t host_ns; @@ -120,7 +121,8 @@ static void rtas_set_time_of_day(PowerPCCPU *cpu, SpaprMachineState *spapr, } /* Generate a monitor event for the change */ - qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + qom_path = object_get_canonical_path(OBJECT(rtc)); + qapi_event_send_rtc_change(qemu_timedate_diff(&tm), qom_path); host_ns = qemu_clock_get_ns(rtc_clock); diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c index 6c6b86dd3c..5170a33369 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_softmmu.c @@ -1,25 +1,11 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" -#include "qapi/error.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "qemu/log.h" -#include "qemu/main-loop.h" -#include "qemu/module.h" -#include "qemu/error-report.h" +#include "qemu/memalign.h" #include "cpu.h" -#include "exec/exec-all.h" #include "helper_regs.h" #include "hw/ppc/spapr.h" -#include "hw/ppc/spapr_cpu_core.h" #include "mmu-hash64.h" -#include "cpu-models.h" -#include "trace.h" -#include "kvm_ppc.h" -#include "hw/ppc/fdt.h" -#include "hw/ppc/spapr_ovec.h" #include "mmu-book3s-v3.h" -#include "hw/mem/memory-device.h" static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) { @@ -441,7 +427,7 @@ static void new_hpte_store(void *htab, uint64_t pteg, int slot, addr += slot * HASH_PTE_SIZE_64; stq_p(addr, pte0); - stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1); + stq_p(addr + HPTE64_DW1, pte1); } static int rehash_hpte(PowerPCCPU *cpu, diff --git a/hw/ppc/spapr_tpm_proxy.c b/hw/ppc/spapr_tpm_proxy.c index 2454086744..e10af35a18 100644 --- a/hw/ppc/spapr_tpm_proxy.c +++ b/hw/ppc/spapr_tpm_proxy.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/reset.h" diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index b59452bcd6..9d4fec2c04 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -577,7 +577,7 @@ SpaprVioBus *spapr_vio_bus_init(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); + qbus = qbus_new(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); bus = SPAPR_VIO_BUS(qbus); bus->next_reg = SPAPR_VIO_REG_BASE; @@ -726,7 +726,7 @@ void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt) gchar *spapr_vio_stdout_path(SpaprVioBus *bus) { SpaprVioDevice *dev; - char *name, *path; + g_autofree char *name = NULL; dev = spapr_vty_get_default(bus); if (!dev) { @@ -734,8 +734,6 @@ gchar *spapr_vio_stdout_path(SpaprVioBus *bus) } name = spapr_vio_get_dev_name(DEVICE(dev)); - path = g_strdup_printf("/vdevice/%s", name); - g_free(name); - return path; + return g_strdup_printf("/vdevice/%s", name); } diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c index 40ce8fe003..09f29be0b9 100644 --- a/hw/ppc/spapr_vof.c +++ b/hw/ppc/spapr_vof.c @@ -4,7 +4,6 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_vio.h" @@ -88,8 +87,6 @@ void spapr_vof_reset(SpaprMachineState *spapr, void *fdt, Error **errp) spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, stack_ptr, spapr->initrd_base, spapr->initrd_size); - /* VOF is 32bit BE so enforce MSR here */ - first_ppc_cpu->env.msr &= ~((1ULL << MSR_SF) | (1ULL << MSR_LE)); /* * At this point the expected allocation map is: diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index da6e74b80d..f670e8906c 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -95,9 +95,45 @@ vof_write(uint32_t ih, unsigned cb, const char *msg) "ih=0x%x [%u] \"%s\"" vof_avail(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 +# pnv_sbe.c +pnv_sbe_xscom_ctrl_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 +pnv_sbe_xscom_ctrl_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 +pnv_sbe_xscom_mbox_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 +pnv_sbe_xscom_mbox_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 +pnv_sbe_reg_set_host_doorbell(uint64_t val) "val 0x%" PRIx64 +pnv_sbe_cmd_timer_start(uint64_t ns) "ns 0x%" PRIu64 +pnv_sbe_cmd_timer_stop(void) "" +pnv_sbe_cmd_timer_expired(void) "" +pnv_sbe_msg_recv(uint16_t cmd, uint16_t seq, uint16_t ctrl_flags) "cmd 0x%" PRIx16 " seq %"PRIu16 " ctrl_flags 0x%" PRIx16 + # ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" +ppc_tb_load(uint64_t tb) "tb 0x%016" PRIx64 +ppc_tb_store(uint64_t tb, uint64_t offset) "tb 0x%016" PRIx64 " offset 0x%08" PRIx64 +ppc_decr_load(uint64_t tb) "decr 0x%016" PRIx64 +ppc_decr_excp(const char *action) "%s decrementer" +ppc_decr_store(uint32_t nr_bits, uint64_t decr, uint64_t value) "%d-bit 0x%016" PRIx64 " => 0x%016" PRIx64 + +ppc4xx_fit(uint32_t ir, uint64_t tcr, uint64_t tsr) "ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc4xx_pit_stop(void) "" +ppc4xx_pit_start(uint64_t reload) "PIT 0x%016" PRIx64 +ppc4xx_pit(uint32_t ar, uint32_t ir, uint64_t tcr, uint64_t tsr, uint64_t reload) "ar %d ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 " PIT 0x%016" PRIx64 +ppc4xx_wdt(uint64_t tcr, uint64_t tsr) "TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc40x_store_pit(uint64_t value) "val 0x%" PRIx64 +ppc40x_store_tcr(uint64_t value) "val 0x%" PRIx64 +ppc40x_store_tsr(uint64_t value) "val 0x%" PRIx64 +ppc40x_set_tb_clk(uint32_t value) "new frequency %" PRIu32 +ppc40x_timers_init(uint32_t value) "frequency %" PRIu32 + +ppc_irq_set(void *env, uint32_t pin, uint32_t level) "env [%p] pin %d level %d" +ppc_irq_set_exit(void *env, uint32_t irq, uint32_t level, uint32_t pending, uint32_t request) "env [%p] irq 0x%05" PRIx32 " level %d => pending 0x%08" PRIx32 " req 0x%08" PRIx32 +ppc_irq_set_state(const char *name, uint32_t level) "\"%s\" level %d" +ppc_irq_reset(const char *name) "%s" +ppc_irq_cpu(const char *action) "%s" + +ppc_dcr_read(uint32_t addr, uint32_t val) "DRCN[0x%x] -> 0x%x" +ppc_dcr_write(uint32_t addr, uint32_t val) "DRCN[0x%x] <- 0x%x" # prep_systemio.c prep_systemio_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" @@ -121,3 +157,26 @@ ppc440_pcix_update_pim(int idx, uint64_t size, uint64_t la) "Added window %d of ppc440_pcix_update_pom(int idx, uint32_t size, uint64_t la, uint64_t pcia) "Added window %d of size=0x%x from CPU=0x%" PRIx64 " to PCI=0x%" PRIx64 ppc440_pcix_reg_read(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 ppc440_pcix_reg_write(uint64_t addr, uint32_t val, uint32_t size) "addr 0x%" PRIx64 " = 0x%" PRIx32 " size 0x%" PRIx32 + +# ppc405_boards.c +opba_readb(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 +opba_writeb(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " = 0x%" PRIx64 + +ppc405_gpio_read(uint64_t addr, uint32_t size) "addr 0x%" PRIx64 " size %d" +ppc405_gpio_write(uint64_t addr, uint32_t size, uint64_t val) "addr 0x%" PRIx64 " size %d = 0x%" PRIx64 + +ocm_update_mappings(uint32_t isarc, uint32_t isacntl, uint32_t dsarc, uint32_t dsacntl, uint32_t ocm_isarc, uint32_t ocm_isacntl, uint32_t ocm_dsarc, uint32_t ocm_dsacntl) "OCM update ISA 0x%08" PRIx32 " 0x%08" PRIx32 " (0x%08" PRIx32" 0x%08" PRIx32 ") DSA 0x%08" PRIx32 " 0x%08" PRIx32" (0x%08" PRIx32 " 0x%08" PRIx32 ")" +ocm_map(const char* prefix, uint32_t isarc) "OCM map %s 0x%08" PRIx32 +ocm_unmap(const char* prefix, uint32_t isarc) "OCM unmap %s 0x%08" PRIx32 + +ppc4xx_gpt_read(uint64_t addr, uint32_t size) "addr 0x%" PRIx64 " size %d" +ppc4xx_gpt_write(uint64_t addr, uint32_t size, uint64_t val) "addr 0x%" PRIx64 " size %d = 0x%" PRIx64 + +ppc405ep_clocks_compute(const char *param, uint32_t param2, uint32_t val) "%s 0x%1" PRIx32 " %d" +ppc405ep_clocks_setup(const char *trace) "%s" + +# ppc4xx_devs.c +ppc4xx_sdram_enable(const char *trace) "%s SDRAM controller" +ppc4xx_sdram_unmap(uint64_t addr, uint64_t size) "Unmap RAM area 0x%" PRIx64 " size 0x%" PRIx64 +ppc4xx_sdram_map(uint64_t addr, uint64_t size) "Map RAM area 0x%" PRIx64 " size 0x%" PRIx64 +ppc4xx_sdram_init(uint64_t base, uint64_t size, uint32_t bcr) "Init RAM area 0x%" PRIx64 " size 0x%" PRIx64 " bcr 0x%x" diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index 9c575403b8..13cace229b 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "cpu.h" @@ -46,6 +45,8 @@ #include "hw/qdev-properties.h" #include "ppc405.h" +#include + #define EPAPR_MAGIC (0x45504150) #define FLASH_SIZE (16 * MiB) @@ -105,16 +106,13 @@ static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk) /* interrupt controller */ uicdev = qdev_new(TYPE_PPC_UIC); + ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(uicdev), cpu, &error_fatal); + object_unref(OBJECT(uicdev)); uicsbd = SYS_BUS_DEVICE(uicdev); - - object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), - &error_fatal); - sysbus_realize_and_unref(uicsbd, &error_fatal); - sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); /* This board doesn't wire anything up to the inputs of the UIC. */ return cpu; @@ -148,11 +146,10 @@ static void main_cpu_reset(void *opaque) } #define BINARY_DEVICE_TREE_FILE "virtex-ml507.dtb" -static int xilinx_load_device_tree(hwaddr addr, - uint32_t ramsize, - hwaddr initrd_base, - hwaddr initrd_size, - const char *kernel_cmdline) +static int xilinx_load_device_tree(MachineState *machine, + hwaddr addr, + hwaddr initrd_base, + hwaddr initrd_size) { char *path; int fdt_size; @@ -194,18 +191,21 @@ static int xilinx_load_device_tree(hwaddr addr, error_report("couldn't set /chosen/linux,initrd-end"); } - r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); + r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + machine->kernel_cmdline); if (r < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); cpu_physical_memory_write(addr, fdt, fdt_size); - g_free(fdt); + + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = fdt; + return fdt_size; } static void virtex_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; hwaddr initrd_base = 0; int initrd_size = 0; MemoryRegion *address_space_mem = get_system_memory(); @@ -214,7 +214,7 @@ static void virtex_init(MachineState *machine) CPUPPCState *env; hwaddr ram_base = 0; DriveInfo *dinfo; - qemu_irq irq[32], *cpu_irq; + qemu_irq irq[32], cpu_irq; int kernel_size; int i; @@ -237,12 +237,12 @@ static void virtex_init(MachineState *machine) dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1); - cpu_irq = (qemu_irq *) &env->irq_inputs[PPC40x_INPUT_INT]; + cpu_irq = qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT); dev = qdev_new("xlnx.xps-intc"); qdev_prop_set_uint32(dev, "kind-of-intr", 0); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(dev, i); } @@ -298,9 +298,8 @@ static void virtex_init(MachineState *machine) boot_info.fdt = high + (8192 * 2); boot_info.fdt &= ~8191; - xilinx_load_device_tree(boot_info.fdt, machine->ram_size, - initrd_base, initrd_size, - kernel_cmdline); + xilinx_load_device_tree(machine, boot_info.fdt, + initrd_base, initrd_size); } env->load_info = &boot_info; } diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c index 73adc44ec2..18c3f92317 100644 --- a/hw/ppc/vof.c +++ b/hw/ppc/vof.c @@ -10,13 +10,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/timer.h" #include "qemu/range.h" #include "qemu/units.h" #include "qemu/log.h" #include "qapi/error.h" -#include "exec/ram_addr.h" #include "exec/address-spaces.h" #include "hw/ppc/vof.h" #include "hw/ppc/fdt.h" @@ -295,7 +293,7 @@ static uint32_t vof_setprop(MachineState *ms, void *fdt, Vof *vof, uint32_t nodeph, uint32_t pname, uint32_t valaddr, uint32_t vallen) { - char propname[OF_PROPNAME_LEN_MAX + 1]; + char propname[OF_PROPNAME_LEN_MAX + 1] = ""; uint32_t ret = PROM_ERROR; int offset, rc; char trval[64] = ""; diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 49141d4074..cfd85de3e6 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -27,58 +27,58 @@ #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } -void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res) +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) { - monitor_printf(mon, "\ttx : %" PRId64 "\n", - dev_res->stats.tx); - monitor_printf(mon, "\ttx_len : %" PRId64 "\n", - dev_res->stats.tx_len); - monitor_printf(mon, "\ttx_err : %" PRId64 "\n", - dev_res->stats.tx_err); - monitor_printf(mon, "\trx_bufs : %" PRId64 "\n", - dev_res->stats.rx_bufs); - monitor_printf(mon, "\trx_srq : %" PRId64 "\n", - dev_res->stats.rx_srq); - monitor_printf(mon, "\trx_bufs_len : %" PRId64 "\n", - dev_res->stats.rx_bufs_len); - monitor_printf(mon, "\trx_bufs_err : %" PRId64 "\n", - dev_res->stats.rx_bufs_err); - monitor_printf(mon, "\tcomps : %" PRId64 "\n", - dev_res->stats.completions); - monitor_printf(mon, "\tmissing_comps : %" PRId32 "\n", - dev_res->stats.missing_cqe); - monitor_printf(mon, "\tpoll_cq (bk) : %" PRId64 "\n", - dev_res->stats.poll_cq_from_bk); - monitor_printf(mon, "\tpoll_cq_ppoll_to : %" PRId64 "\n", - dev_res->stats.poll_cq_ppoll_to); - monitor_printf(mon, "\tpoll_cq (fe) : %" PRId64 "\n", - dev_res->stats.poll_cq_from_guest); - monitor_printf(mon, "\tpoll_cq_empty : %" PRId64 "\n", - dev_res->stats.poll_cq_from_guest_empty); - monitor_printf(mon, "\tmad_tx : %" PRId64 "\n", - dev_res->stats.mad_tx); - monitor_printf(mon, "\tmad_tx_err : %" PRId64 "\n", - dev_res->stats.mad_tx_err); - monitor_printf(mon, "\tmad_rx : %" PRId64 "\n", - dev_res->stats.mad_rx); - monitor_printf(mon, "\tmad_rx_err : %" PRId64 "\n", - dev_res->stats.mad_rx_err); - monitor_printf(mon, "\tmad_rx_bufs : %" PRId64 "\n", - dev_res->stats.mad_rx_bufs); - monitor_printf(mon, "\tmad_rx_bufs_err : %" PRId64 "\n", - dev_res->stats.mad_rx_bufs_err); - monitor_printf(mon, "\tPDs : %" PRId32 "\n", - dev_res->pd_tbl.used); - monitor_printf(mon, "\tMRs : %" PRId32 "\n", - dev_res->mr_tbl.used); - monitor_printf(mon, "\tUCs : %" PRId32 "\n", - dev_res->uc_tbl.used); - monitor_printf(mon, "\tQPs : %" PRId32 "\n", - dev_res->qp_tbl.used); - monitor_printf(mon, "\tCQs : %" PRId32 "\n", - dev_res->cq_tbl.used); - monitor_printf(mon, "\tCEQ_CTXs : %" PRId32 "\n", - dev_res->cqe_ctx_tbl.used); + g_string_append_printf(buf, "\ttx : %" PRId64 "\n", + dev_res->stats.tx); + g_string_append_printf(buf, "\ttx_len : %" PRId64 "\n", + dev_res->stats.tx_len); + g_string_append_printf(buf, "\ttx_err : %" PRId64 "\n", + dev_res->stats.tx_err); + g_string_append_printf(buf, "\trx_bufs : %" PRId64 "\n", + dev_res->stats.rx_bufs); + g_string_append_printf(buf, "\trx_srq : %" PRId64 "\n", + dev_res->stats.rx_srq); + g_string_append_printf(buf, "\trx_bufs_len : %" PRId64 "\n", + dev_res->stats.rx_bufs_len); + g_string_append_printf(buf, "\trx_bufs_err : %" PRId64 "\n", + dev_res->stats.rx_bufs_err); + g_string_append_printf(buf, "\tcomps : %" PRId64 "\n", + dev_res->stats.completions); + g_string_append_printf(buf, "\tmissing_comps : %" PRId32 "\n", + dev_res->stats.missing_cqe); + g_string_append_printf(buf, "\tpoll_cq (bk) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_bk); + g_string_append_printf(buf, "\tpoll_cq_ppoll_to : %" PRId64 "\n", + dev_res->stats.poll_cq_ppoll_to); + g_string_append_printf(buf, "\tpoll_cq (fe) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest); + g_string_append_printf(buf, "\tpoll_cq_empty : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest_empty); + g_string_append_printf(buf, "\tmad_tx : %" PRId64 "\n", + dev_res->stats.mad_tx); + g_string_append_printf(buf, "\tmad_tx_err : %" PRId64 "\n", + dev_res->stats.mad_tx_err); + g_string_append_printf(buf, "\tmad_rx : %" PRId64 "\n", + dev_res->stats.mad_rx); + g_string_append_printf(buf, "\tmad_rx_err : %" PRId64 "\n", + dev_res->stats.mad_rx_err); + g_string_append_printf(buf, "\tmad_rx_bufs : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs); + g_string_append_printf(buf, "\tmad_rx_bufs_err : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs_err); + g_string_append_printf(buf, "\tPDs : %" PRId32 "\n", + dev_res->pd_tbl.used); + g_string_append_printf(buf, "\tMRs : %" PRId32 "\n", + dev_res->mr_tbl.used); + g_string_append_printf(buf, "\tUCs : %" PRId32 "\n", + dev_res->uc_tbl.used); + g_string_append_printf(buf, "\tQPs : %" PRId32 "\n", + dev_res->qp_tbl.used); + g_string_append_printf(buf, "\tCQs : %" PRId32 "\n", + dev_res->cq_tbl.used); + g_string_append_printf(buf, "\tCEQ_CTXs : %" PRId32 "\n", + dev_res->cqe_ctx_tbl.used); } static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h index e8639909cd..d69a917795 100644 --- a/hw/rdma/rdma_rm.h +++ b/hw/rdma/rdma_rm.h @@ -92,6 +92,6 @@ static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res, { return &dev_res->port.gid_tbl[sgid_idx].gid; } -void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res); +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf); #endif diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c index 98df58f689..5a7ef63ad2 100644 --- a/hw/rdma/rdma_utils.c +++ b/hw/rdma/rdma_utils.c @@ -17,29 +17,29 @@ #include "trace.h" #include "rdma_utils.h" -void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen) +void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t len) { void *p; - hwaddr len = plen; + dma_addr_t pci_len = len; if (!addr) { rdma_error_report("addr is NULL"); return NULL; } - p = pci_dma_map(dev, addr, &len, DMA_DIRECTION_TO_DEVICE); + p = pci_dma_map(dev, addr, &pci_len, DMA_DIRECTION_TO_DEVICE); if (!p) { rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64, - addr, len); + addr, pci_len); return NULL; } - if (len != plen) { - rdma_pci_dma_unmap(dev, p, len); + if (pci_len != len) { + rdma_pci_dma_unmap(dev, p, pci_len); return NULL; } - trace_rdma_pci_dma_map(addr, p, len); + trace_rdma_pci_dma_map(addr, p, pci_len); return p; } diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h index 9fd0efd940..0c6414e7e0 100644 --- a/hw/rdma/rdma_utils.h +++ b/hw/rdma/rdma_utils.h @@ -38,7 +38,7 @@ typedef struct RdmaProtectedGSList { GSList *list; } RdmaProtectedGSList; -void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen); +void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t len); void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len); void rdma_protected_gqueue_init(RdmaProtectedGQueue *list); void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list); diff --git a/hw/rdma/trace-events b/hw/rdma/trace-events index 9accb14973..c23175120e 100644 --- a/hw/rdma/trace-events +++ b/hw/rdma/trace-events @@ -27,5 +27,5 @@ rdma_rm_alloc_qp(uint32_t rm_qpn, uint32_t backend_qpn, uint8_t qp_type) "rm_qpn rdma_rm_modify_qp(uint32_t qpn, uint32_t attr_mask, int qp_state, uint8_t sgid_idx) "qpn=0x%x, attr_mask=0x%x, qp_state=%d, sgid_idx=%d" # rdma_utils.c -rdma_pci_dma_map(uint64_t addr, void *vaddr, uint64_t len) "0x%"PRIx64" -> %p (len=%" PRId64")" +rdma_pci_dma_map(uint64_t addr, void *vaddr, uint64_t len) "0x%"PRIx64" -> %p (len=%" PRIu64")" rdma_pci_dma_unmap(void *vaddr) "%p" diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index da7ddfa548..89db963c46 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -796,6 +796,12 @@ int pvrdma_exec_cmd(PVRDMADev *dev) dsr_info = &dev->dsr_info; + if (!dsr_info->dsr) { + /* Buggy or malicious guest driver */ + rdma_error_report("Exec command without dsr, req or rsp buffers"); + goto out; + } + if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / sizeof(struct cmd_handler)) { rdma_error_report("Unsupported command"); diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c index 42130667a7..598e6adc5e 100644 --- a/hw/rdma/vmw/pvrdma_dev_ring.c +++ b/hw/rdma/vmw/pvrdma_dev_ring.c @@ -41,7 +41,7 @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, qatomic_set(&ring->ring_state->cons_head, 0); */ ring->npages = npages; - ring->pages = g_malloc0(npages * sizeof(void *)); + ring->pages = g_new0(void *, npages); for (i = 0; i < npages; i++) { if (!tbl[i]) { diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 7c0c3551a8..4fc6712025 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -58,24 +58,25 @@ static Property pvrdma_dev_properties[] = { DEFINE_PROP_END_OF_LIST(), }; -static void pvrdma_print_statistics(Monitor *mon, RdmaProvider *obj) +static void pvrdma_format_statistics(RdmaProvider *obj, GString *buf) { PVRDMADev *dev = PVRDMA_DEV(obj); PCIDevice *pdev = PCI_DEVICE(dev); - monitor_printf(mon, "%s, %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - monitor_printf(mon, "\tcommands : %" PRId64 "\n", - dev->stats.commands); - monitor_printf(mon, "\tregs_reads : %" PRId64 "\n", - dev->stats.regs_reads); - monitor_printf(mon, "\tregs_writes : %" PRId64 "\n", - dev->stats.regs_writes); - monitor_printf(mon, "\tuar_writes : %" PRId64 "\n", - dev->stats.uar_writes); - monitor_printf(mon, "\tinterrupts : %" PRId64 "\n", - dev->stats.interrupts); - rdma_dump_device_counters(mon, &dev->rdma_dev_res); + g_string_append_printf(buf, "%s, %x.%x\n", + pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + g_string_append_printf(buf, "\tcommands : %" PRId64 "\n", + dev->stats.commands); + g_string_append_printf(buf, "\tregs_reads : %" PRId64 "\n", + dev->stats.regs_reads); + g_string_append_printf(buf, "\tregs_writes : %" PRId64 "\n", + dev->stats.regs_writes); + g_string_append_printf(buf, "\tuar_writes : %" PRId64 "\n", + dev->stats.uar_writes); + g_string_append_printf(buf, "\tinterrupts : %" PRId64 "\n", + dev->stats.interrupts); + rdma_format_device_counters(&dev->rdma_dev_res, buf); } static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring, @@ -158,13 +159,13 @@ static void free_dsr(PVRDMADev *dev) free_dev_ring(pci_dev, &dev->dsr_info.cq, dev->dsr_info.cq_ring_state); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.req, - sizeof(union pvrdma_cmd_req)); + sizeof(union pvrdma_cmd_req)); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.rsp, - sizeof(union pvrdma_cmd_resp)); + sizeof(union pvrdma_cmd_resp)); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.dsr, - sizeof(struct pvrdma_device_shared_region)); + sizeof(struct pvrdma_device_shared_region)); dev->dsr_info.dsr = NULL; } @@ -248,7 +249,8 @@ static void init_dsr_dev_caps(PVRDMADev *dev) { struct pvrdma_device_shared_region *dsr; - if (dev->dsr_info.dsr == NULL) { + if (!dev->dsr_info.dsr) { + /* Buggy or malicious guest driver */ rdma_error_report("Can't initialized DSR"); return; } @@ -305,12 +307,7 @@ static int init_msix(PCIDevice *pdev) } for (i = 0; i < RDMA_MAX_INTRS; i++) { - rc = msix_vector_use(PCI_DEVICE(dev), i); - if (rc < 0) { - rdma_error_report("Fail mark MSI-X vector %d", i); - uninit_msix(pdev, i); - return rc; - } + msix_vector_use(PCI_DEVICE(dev), i); } return 0; @@ -607,7 +604,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) rdma_info_report("Initializing device %s %x.%x", pdev->name, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - if (TARGET_PAGE_SIZE != qemu_real_host_page_size) { + if (TARGET_PAGE_SIZE != qemu_real_host_page_size()) { error_setg(errp, "Target page size must be the same as host page size"); return; } @@ -699,7 +696,7 @@ static void pvrdma_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, pvrdma_dev_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); - ir->print_statistics = pvrdma_print_statistics; + ir->format_statistics = pvrdma_format_statistics; } static const TypeInfo pvrdma_info = { diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c index 8050287a6c..bd7cbf2bdf 100644 --- a/hw/rdma/vmw/pvrdma_qp_ops.c +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -154,7 +154,7 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) CompHandlerCtx *comp_ctx; /* Prepare CQE */ - comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx = g_new(CompHandlerCtx, 1); comp_ctx->dev = dev; comp_ctx->cq_handle = qp->send_cq_handle; comp_ctx->cqe.wr_id = wqe->hdr.wr_id; @@ -217,7 +217,7 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) CompHandlerCtx *comp_ctx; /* Prepare CQE */ - comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx = g_new(CompHandlerCtx, 1); comp_ctx->dev = dev; comp_ctx->cq_handle = qp->recv_cq_handle; comp_ctx->cqe.wr_id = wqe->hdr.wr_id; @@ -259,7 +259,7 @@ void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle) CompHandlerCtx *comp_ctx; /* Prepare CQE */ - comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx = g_new(CompHandlerCtx, 1); comp_ctx->dev = dev; comp_ctx->cq_handle = srq->recv_cq_handle; comp_ctx->cqe.wr_id = wqe->hdr.wr_id; diff --git a/hw/remote/Kconfig b/hw/remote/Kconfig index 08c16e235f..2d6b4f4cf4 100644 --- a/hw/remote/Kconfig +++ b/hw/remote/Kconfig @@ -2,3 +2,7 @@ config MULTIPROCESS bool depends on PCI && PCI_EXPRESS && KVM select REMOTE_PCIHOST + +config VFIO_USER_SERVER + bool + depends on MULTIPROCESS diff --git a/hw/remote/iohub.c b/hw/remote/iohub.c index 547d597f0f..40dfee4bad 100644 --- a/hw/remote/iohub.c +++ b/hw/remote/iohub.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/pci/pci.h" #include "hw/pci/pci_ids.h" diff --git a/hw/remote/iommu.c b/hw/remote/iommu.c new file mode 100644 index 0000000000..1391dd712c --- /dev/null +++ b/hw/remote/iommu.c @@ -0,0 +1,131 @@ +/** + * IOMMU for remote device + * + * Copyright © 2022 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "hw/remote/iommu.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "trace.h" + +/** + * IOMMU for TYPE_REMOTE_MACHINE - manages DMA address space isolation + * for remote machine. It is used by TYPE_VFIO_USER_SERVER. + * + * - Each TYPE_VFIO_USER_SERVER instance handles one PCIDevice on a PCIBus. + * There is one RemoteIommu per PCIBus, so the RemoteIommu tracks multiple + * PCIDevices by maintaining a ->elem_by_devfn mapping. + * + * - memory_region_init_iommu() is not used because vfio-user MemoryRegions + * will be added to the elem->mr container instead. This is more natural + * than implementing the IOMMUMemoryRegionClass APIs since vfio-user + * provides something that is close to a full-fledged MemoryRegion and + * not like an IOMMU mapping. + * + * - When a device is hot unplugged, the elem->mr reference is dropped so + * all vfio-user MemoryRegions associated with this vfio-user server are + * destroyed. + */ + +static AddressSpace *remote_iommu_find_add_as(PCIBus *pci_bus, + void *opaque, int devfn) +{ + RemoteIommu *iommu = opaque; + RemoteIommuElem *elem = NULL; + + qemu_mutex_lock(&iommu->lock); + + elem = g_hash_table_lookup(iommu->elem_by_devfn, INT2VOIDP(devfn)); + + if (!elem) { + elem = g_new0(RemoteIommuElem, 1); + g_hash_table_insert(iommu->elem_by_devfn, INT2VOIDP(devfn), elem); + } + + if (!elem->mr) { + elem->mr = MEMORY_REGION(object_new(TYPE_MEMORY_REGION)); + memory_region_set_size(elem->mr, UINT64_MAX); + address_space_init(&elem->as, elem->mr, NULL); + } + + qemu_mutex_unlock(&iommu->lock); + + return &elem->as; +} + +void remote_iommu_unplug_dev(PCIDevice *pci_dev) +{ + AddressSpace *as = pci_device_iommu_address_space(pci_dev); + RemoteIommuElem *elem = NULL; + + if (as == &address_space_memory) { + return; + } + + elem = container_of(as, RemoteIommuElem, as); + + address_space_destroy(&elem->as); + + object_unref(elem->mr); + + elem->mr = NULL; +} + +static void remote_iommu_init(Object *obj) +{ + RemoteIommu *iommu = REMOTE_IOMMU(obj); + + iommu->elem_by_devfn = g_hash_table_new_full(NULL, NULL, NULL, g_free); + + qemu_mutex_init(&iommu->lock); +} + +static void remote_iommu_finalize(Object *obj) +{ + RemoteIommu *iommu = REMOTE_IOMMU(obj); + + qemu_mutex_destroy(&iommu->lock); + + g_hash_table_destroy(iommu->elem_by_devfn); + + iommu->elem_by_devfn = NULL; +} + +void remote_iommu_setup(PCIBus *pci_bus) +{ + RemoteIommu *iommu = NULL; + + g_assert(pci_bus); + + iommu = REMOTE_IOMMU(object_new(TYPE_REMOTE_IOMMU)); + + pci_setup_iommu(pci_bus, remote_iommu_find_add_as, iommu); + + object_property_add_child(OBJECT(pci_bus), "remote-iommu", OBJECT(iommu)); + + object_unref(OBJECT(iommu)); +} + +static const TypeInfo remote_iommu_info = { + .name = TYPE_REMOTE_IOMMU, + .parent = TYPE_OBJECT, + .instance_size = sizeof(RemoteIommu), + .instance_init = remote_iommu_init, + .instance_finalize = remote_iommu_finalize, +}; + +static void remote_iommu_register_types(void) +{ + type_register_static(&remote_iommu_info); +} + +type_init(remote_iommu_register_types) diff --git a/hw/remote/machine.c b/hw/remote/machine.c index 952105eab5..75d550daae 100644 --- a/hw/remote/machine.c +++ b/hw/remote/machine.c @@ -14,13 +14,17 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/remote/machine.h" #include "exec/memory.h" #include "qapi/error.h" #include "hw/pci/pci_host.h" #include "hw/remote/iohub.h" +#include "hw/remote/iommu.h" +#include "hw/qdev-core.h" +#include "hw/remote/iommu.h" +#include "hw/remote/vfio-user-obj.h" +#include "hw/pci/msi.h" static void remote_machine_init(MachineState *machine) { @@ -50,25 +54,102 @@ static void remote_machine_init(MachineState *machine) pci_host = PCI_HOST_BRIDGE(rem_host); - remote_iohub_init(&s->iohub); + if (s->vfio_user) { + remote_iommu_setup(pci_host->bus); - pci_bus_irqs(pci_host->bus, remote_iohub_set_irq, remote_iohub_map_irq, - &s->iohub, REMOTE_IOHUB_NB_PIRQS); + msi_nonbroken = true; + + vfu_object_set_bus_irq(pci_host->bus); + } else { + remote_iohub_init(&s->iohub); + + pci_bus_irqs(pci_host->bus, remote_iohub_set_irq, remote_iohub_map_irq, + &s->iohub, REMOTE_IOHUB_NB_PIRQS); + } + + qbus_set_hotplug_handler(BUS(pci_host->bus), OBJECT(s)); +} + +static bool remote_machine_get_vfio_user(Object *obj, Error **errp) +{ + RemoteMachineState *s = REMOTE_MACHINE(obj); + + return s->vfio_user; +} + +static void remote_machine_set_vfio_user(Object *obj, bool value, Error **errp) +{ + RemoteMachineState *s = REMOTE_MACHINE(obj); + + if (phase_check(PHASE_MACHINE_CREATED)) { + error_setg(errp, "Error enabling vfio-user - machine already created"); + return; + } + + s->vfio_user = value; +} + +static bool remote_machine_get_auto_shutdown(Object *obj, Error **errp) +{ + RemoteMachineState *s = REMOTE_MACHINE(obj); + + return s->auto_shutdown; +} + +static void remote_machine_set_auto_shutdown(Object *obj, bool value, + Error **errp) +{ + RemoteMachineState *s = REMOTE_MACHINE(obj); + + s->auto_shutdown = value; +} + +static void remote_machine_instance_init(Object *obj) +{ + RemoteMachineState *s = REMOTE_MACHINE(obj); + + s->auto_shutdown = true; +} + +static void remote_machine_dev_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + qdev_unrealize(dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + remote_iommu_unplug_dev(PCI_DEVICE(dev)); + } } static void remote_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); mc->init = remote_machine_init; mc->desc = "Experimental remote machine"; + + hc->unplug = remote_machine_dev_unplug_cb; + + object_class_property_add_bool(oc, "vfio-user", + remote_machine_get_vfio_user, + remote_machine_set_vfio_user); + + object_class_property_add_bool(oc, "auto-shutdown", + remote_machine_get_auto_shutdown, + remote_machine_set_auto_shutdown); } static const TypeInfo remote_machine = { .name = TYPE_REMOTE_MACHINE, .parent = TYPE_MACHINE, .instance_size = sizeof(RemoteMachineState), + .instance_init = remote_machine_instance_init, .class_init = remote_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } }; static void remote_machine_register_types(void) diff --git a/hw/remote/memory.c b/hw/remote/memory.c index 6e21ab1a45..6d60da91e0 100644 --- a/hw/remote/memory.c +++ b/hw/remote/memory.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/remote/memory.h" #include "exec/ram_addr.h" diff --git a/hw/remote/meson.build b/hw/remote/meson.build index e6a5574242..ab25c04906 100644 --- a/hw/remote/meson.build +++ b/hw/remote/meson.build @@ -6,6 +6,10 @@ remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('message.c')) remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('remote-obj.c')) remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy.c')) remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('iohub.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('iommu.c')) +remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: files('vfio-user-obj.c')) + +remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: libvfio_user_dep) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('memory.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy-memory-listener.c')) diff --git a/hw/remote/message.c b/hw/remote/message.c index 11d729845c..50f6bf2d49 100644 --- a/hw/remote/message.c +++ b/hw/remote/message.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/remote/machine.h" #include "io/channel.h" diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c index e67a5de72c..9bd98e8219 100644 --- a/hw/remote/mpqemu-link.c +++ b/hw/remote/mpqemu-link.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/module.h" #include "hw/remote/mpqemu-link.h" @@ -34,7 +33,6 @@ */ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) { - ERRP_GUARD(); bool iolock = qemu_mutex_iothread_locked(); bool iothread = qemu_in_iothread(); struct iovec send[2] = {}; @@ -70,7 +68,7 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) } if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send), - fds, nfds, errp)) { + fds, nfds, 0, errp)) { ret = true; } else { trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds); @@ -97,7 +95,6 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, size_t *nfds, Error **errp) { - ERRP_GUARD(); struct iovec iov = { .iov_base = buf, .iov_len = len }; bool iolock = qemu_mutex_iothread_locked(); bool iothread = qemu_in_iothread(); @@ -192,7 +189,6 @@ fail: uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev, Error **errp) { - ERRP_GUARD(); MPQemuMsg msg_reply = {0}; uint64_t ret = UINT64_MAX; diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index 901dbf1357..eb9918fe72 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -7,7 +7,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/compiler.h" #include "qemu/int128.h" @@ -16,6 +15,7 @@ #include "exec/cpu-common.h" #include "exec/ram_addr.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/remote/mpqemu-link.h" #include "hw/remote/proxy-memory-listener.h" @@ -219,6 +219,7 @@ void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener, proxy_listener->listener.region_add = proxy_memory_listener_region_addnop; proxy_listener->listener.region_nop = proxy_memory_listener_region_addnop; proxy_listener->listener.priority = 10; + proxy_listener->listener.name = "proxy"; memory_listener_register(&proxy_listener->listener, &address_space_memory); diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c index 6dda705fc2..1c7786b52c 100644 --- a/hw/remote/proxy.c +++ b/hw/remote/proxy.c @@ -7,7 +7,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/remote/proxy.h" #include "hw/pci/pci.h" @@ -102,10 +101,18 @@ static void pci_proxy_dev_realize(PCIDevice *device, Error **errp) } dev->ioc = qio_channel_new_fd(fd, errp); + if (!dev->ioc) { + close(fd); + return; + } error_setg(&dev->migration_blocker, "%s does not support migration", TYPE_PCI_PROXY_DEV); - migrate_add_blocker(dev->migration_blocker, errp); + if (migrate_add_blocker(dev->migration_blocker, errp) < 0) { + error_free(dev->migration_blocker); + object_unref(dev->ioc); + return; + } qemu_mutex_init(&dev->io_mutex); qio_channel_set_blocking(dev->ioc, true, NULL); @@ -316,6 +323,7 @@ static void probe_pci_info(PCIDevice *dev, Error **errp) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); break; case PCI_BASE_CLASS_NETWORK: + case PCI_BASE_CLASS_WIRELESS: set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); break; case PCI_BASE_CLASS_INPUT: diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c index 4f21254219..333e5ac443 100644 --- a/hw/remote/remote-obj.c +++ b/hw/remote/remote-obj.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/notify.h" diff --git a/hw/remote/trace-events b/hw/remote/trace-events index 0b23974f90..0d1b7d56a5 100644 --- a/hw/remote/trace-events +++ b/hw/remote/trace-events @@ -2,3 +2,14 @@ mpqemu_send_io_error(int cmd, int size, int nfds) "send command %d size %d, %d file descriptors to remote process" mpqemu_recv_io_error(int cmd, int size, int nfds) "failed to receive %d size %d, %d file descriptors to remote process" + +# vfio-user-obj.c +vfu_prop(const char *prop, const char *val) "vfu: setting %s as %s" +vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x -> 0x%x" +vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x <- 0x%x" +vfu_dma_register(uint64_t gpa, size_t len) "vfu: registering GPA 0x%"PRIx64", %zu bytes" +vfu_dma_unregister(uint64_t gpa) "vfu: unregistering GPA 0x%"PRIx64"" +vfu_bar_register(int i, uint64_t addr, uint64_t size) "vfu: BAR %d: addr 0x%"PRIx64" size 0x%"PRIx64"" +vfu_bar_rw_enter(const char *op, uint64_t addr) "vfu: %s request for BAR address 0x%"PRIx64"" +vfu_bar_rw_exit(const char *op, uint64_t addr) "vfu: Finished %s of BAR address 0x%"PRIx64"" +vfu_interrupt(int pirq) "vfu: sending interrupt to device - PIRQ %d" diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c new file mode 100644 index 0000000000..4e36bb8bcf --- /dev/null +++ b/hw/remote/vfio-user-obj.c @@ -0,0 +1,951 @@ +/** + * QEMU vfio-user-server server object + * + * Copyright © 2022 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL-v2, version 2 or later. + * + * See the COPYING file in the top-level directory. + * + */ + +/** + * Usage: add options: + * -machine x-remote,vfio-user=on,auto-shutdown=on + * -device ,id= + * -object x-vfio-user-server,id=,type=unix,path=, + * device= + * + * Note that x-vfio-user-server object must be used with x-remote machine only. + * This server could only support PCI devices for now. + * + * type - SocketAddress type - presently "unix" alone is supported. Required + * option + * + * path - named unix socket, it will be created by the server. It is + * a required option + * + * device - id of a device on the server, a required option. PCI devices + * alone are supported presently. + * + * notes - x-vfio-user-server could block IO and monitor during the + * initialization phase. + */ + +#include "qemu/osdep.h" + +#include "qom/object.h" +#include "qom/object_interfaces.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "sysemu/runstate.h" +#include "hw/boards.h" +#include "hw/remote/machine.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-sockets.h" +#include "qapi/qapi-events-misc.h" +#include "qemu/notify.h" +#include "qemu/thread.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "libvfio-user.h" +#include "hw/qdev-core.h" +#include "hw/pci/pci.h" +#include "qemu/timer.h" +#include "exec/memory.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/remote/vfio-user-obj.h" + +#define TYPE_VFU_OBJECT "x-vfio-user-server" +OBJECT_DECLARE_TYPE(VfuObject, VfuObjectClass, VFU_OBJECT) + +/** + * VFU_OBJECT_ERROR - reports an error message. If auto_shutdown + * is set, it aborts the machine on error. Otherwise, it logs an + * error message without aborting. + */ +#define VFU_OBJECT_ERROR(o, fmt, ...) \ + { \ + if (vfu_object_auto_shutdown()) { \ + error_setg(&error_abort, (fmt), ## __VA_ARGS__); \ + } else { \ + error_report((fmt), ## __VA_ARGS__); \ + } \ + } \ + +struct VfuObjectClass { + ObjectClass parent_class; + + unsigned int nr_devs; +}; + +struct VfuObject { + /* private */ + Object parent; + + SocketAddress *socket; + + char *device; + + Error *err; + + Notifier machine_done; + + vfu_ctx_t *vfu_ctx; + + PCIDevice *pci_dev; + + Error *unplug_blocker; + + int vfu_poll_fd; + + MSITriggerFunc *default_msi_trigger; + MSIPrepareMessageFunc *default_msi_prepare_message; + MSIxPrepareMessageFunc *default_msix_prepare_message; +}; + +static void vfu_object_init_ctx(VfuObject *o, Error **errp); + +static bool vfu_object_auto_shutdown(void) +{ + bool auto_shutdown = true; + Error *local_err = NULL; + + if (!current_machine) { + return auto_shutdown; + } + + auto_shutdown = object_property_get_bool(OBJECT(current_machine), + "auto-shutdown", + &local_err); + + /* + * local_err would be set if no such property exists - safe to ignore. + * Unlikely scenario as auto-shutdown is always defined for + * TYPE_REMOTE_MACHINE, and TYPE_VFU_OBJECT only works with + * TYPE_REMOTE_MACHINE + */ + if (local_err) { + auto_shutdown = true; + error_free(local_err); + } + + return auto_shutdown; +} + +static void vfu_object_set_socket(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + VfuObject *o = VFU_OBJECT(obj); + + if (o->vfu_ctx) { + error_setg(errp, "vfu: Unable to set socket property - server busy"); + return; + } + + qapi_free_SocketAddress(o->socket); + + o->socket = NULL; + + visit_type_SocketAddress(v, name, &o->socket, errp); + + if (o->socket->type != SOCKET_ADDRESS_TYPE_UNIX) { + error_setg(errp, "vfu: Unsupported socket type - %s", + SocketAddressType_str(o->socket->type)); + qapi_free_SocketAddress(o->socket); + o->socket = NULL; + return; + } + + trace_vfu_prop("socket", o->socket->u.q_unix.path); + + vfu_object_init_ctx(o, errp); +} + +static void vfu_object_set_device(Object *obj, const char *str, Error **errp) +{ + VfuObject *o = VFU_OBJECT(obj); + + if (o->vfu_ctx) { + error_setg(errp, "vfu: Unable to set device property - server busy"); + return; + } + + g_free(o->device); + + o->device = g_strdup(str); + + trace_vfu_prop("device", str); + + vfu_object_init_ctx(o, errp); +} + +static void vfu_object_ctx_run(void *opaque) +{ + VfuObject *o = opaque; + const char *vfu_id; + char *vfu_path, *pci_dev_path; + int ret = -1; + + while (ret != 0) { + ret = vfu_run_ctx(o->vfu_ctx); + if (ret < 0) { + if (errno == EINTR) { + continue; + } else if (errno == ENOTCONN) { + vfu_id = object_get_canonical_path_component(OBJECT(o)); + vfu_path = object_get_canonical_path(OBJECT(o)); + g_assert(o->pci_dev); + pci_dev_path = object_get_canonical_path(OBJECT(o->pci_dev)); + /* o->device is a required property and is non-NULL here */ + g_assert(o->device); + qapi_event_send_vfu_client_hangup(vfu_id, vfu_path, + o->device, pci_dev_path); + qemu_set_fd_handler(o->vfu_poll_fd, NULL, NULL, NULL); + o->vfu_poll_fd = -1; + object_unparent(OBJECT(o)); + g_free(vfu_path); + g_free(pci_dev_path); + break; + } else { + VFU_OBJECT_ERROR(o, "vfu: Failed to run device %s - %s", + o->device, strerror(errno)); + break; + } + } + } +} + +static void vfu_object_attach_ctx(void *opaque) +{ + VfuObject *o = opaque; + GPollFD pfds[1]; + int ret; + + qemu_set_fd_handler(o->vfu_poll_fd, NULL, NULL, NULL); + + pfds[0].fd = o->vfu_poll_fd; + pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; + +retry_attach: + ret = vfu_attach_ctx(o->vfu_ctx); + if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) { + /** + * vfu_object_attach_ctx can block QEMU's main loop + * during attach - the monitor and other IO + * could be unresponsive during this time. + */ + (void)qemu_poll_ns(pfds, 1, 500 * (int64_t)SCALE_MS); + goto retry_attach; + } else if (ret < 0) { + VFU_OBJECT_ERROR(o, "vfu: Failed to attach device %s to context - %s", + o->device, strerror(errno)); + return; + } + + o->vfu_poll_fd = vfu_get_poll_fd(o->vfu_ctx); + if (o->vfu_poll_fd < 0) { + VFU_OBJECT_ERROR(o, "vfu: Failed to get poll fd %s", o->device); + return; + } + + qemu_set_fd_handler(o->vfu_poll_fd, vfu_object_ctx_run, NULL, o); +} + +static ssize_t vfu_object_cfg_access(vfu_ctx_t *vfu_ctx, char * const buf, + size_t count, loff_t offset, + const bool is_write) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + uint32_t pci_access_width = sizeof(uint32_t); + size_t bytes = count; + uint32_t val = 0; + char *ptr = buf; + int len; + + /* + * Writes to the BAR registers would trigger an update to the + * global Memory and IO AddressSpaces. But the remote device + * never uses the global AddressSpaces, therefore overlapping + * memory regions are not a problem + */ + while (bytes > 0) { + len = (bytes > pci_access_width) ? pci_access_width : bytes; + if (is_write) { + memcpy(&val, ptr, len); + pci_host_config_write_common(o->pci_dev, offset, + pci_config_size(o->pci_dev), + val, len); + trace_vfu_cfg_write(offset, val); + } else { + val = pci_host_config_read_common(o->pci_dev, offset, + pci_config_size(o->pci_dev), len); + memcpy(ptr, &val, len); + trace_vfu_cfg_read(offset, val); + } + offset += len; + ptr += len; + bytes -= len; + } + + return count; +} + +static void dma_register(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + AddressSpace *dma_as = NULL; + MemoryRegion *subregion = NULL; + g_autofree char *name = NULL; + struct iovec *iov = &info->iova; + + if (!info->vaddr) { + return; + } + + name = g_strdup_printf("mem-%s-%"PRIx64"", o->device, + (uint64_t)info->vaddr); + + subregion = g_new0(MemoryRegion, 1); + + memory_region_init_ram_ptr(subregion, NULL, name, + iov->iov_len, info->vaddr); + + dma_as = pci_device_iommu_address_space(o->pci_dev); + + memory_region_add_subregion(dma_as->root, (hwaddr)iov->iov_base, subregion); + + trace_vfu_dma_register((uint64_t)iov->iov_base, iov->iov_len); +} + +static void dma_unregister(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + AddressSpace *dma_as = NULL; + MemoryRegion *mr = NULL; + ram_addr_t offset; + + mr = memory_region_from_host(info->vaddr, &offset); + if (!mr) { + return; + } + + dma_as = pci_device_iommu_address_space(o->pci_dev); + + memory_region_del_subregion(dma_as->root, mr); + + object_unparent((OBJECT(mr))); + + trace_vfu_dma_unregister((uint64_t)info->iova.iov_base); +} + +static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset, + hwaddr size, const bool is_write) +{ + uint8_t *ptr = buf; + bool release_lock = false; + uint8_t *ram_ptr = NULL; + MemTxResult result; + int access_size; + uint64_t val; + + if (memory_access_is_direct(mr, is_write)) { + /** + * Some devices expose a PCI expansion ROM, which could be buffer + * based as compared to other regions which are primarily based on + * MemoryRegionOps. memory_region_find() would already check + * for buffer overflow, we don't need to repeat it here. + */ + ram_ptr = memory_region_get_ram_ptr(mr); + + if (is_write) { + memcpy((ram_ptr + offset), buf, size); + } else { + memcpy(buf, (ram_ptr + offset), size); + } + + return 0; + } + + while (size) { + /** + * The read/write logic used below is similar to the ones in + * flatview_read/write_continue() + */ + release_lock = prepare_mmio_access(mr); + + access_size = memory_access_size(mr, size, offset); + + if (is_write) { + val = ldn_he_p(ptr, access_size); + + result = memory_region_dispatch_write(mr, offset, val, + size_memop(access_size), + MEMTXATTRS_UNSPECIFIED); + } else { + result = memory_region_dispatch_read(mr, offset, &val, + size_memop(access_size), + MEMTXATTRS_UNSPECIFIED); + + stn_he_p(ptr, access_size, val); + } + + if (release_lock) { + qemu_mutex_unlock_iothread(); + release_lock = false; + } + + if (result != MEMTX_OK) { + return -1; + } + + size -= access_size; + ptr += access_size; + offset += access_size; + } + + return 0; +} + +static size_t vfu_object_bar_rw(PCIDevice *pci_dev, int pci_bar, + hwaddr bar_offset, char * const buf, + hwaddr len, const bool is_write) +{ + MemoryRegionSection section = { 0 }; + uint8_t *ptr = (uint8_t *)buf; + MemoryRegion *section_mr = NULL; + uint64_t section_size; + hwaddr section_offset; + hwaddr size = 0; + + while (len) { + section = memory_region_find(pci_dev->io_regions[pci_bar].memory, + bar_offset, len); + + if (!section.mr) { + warn_report("vfu: invalid address 0x%"PRIx64"", bar_offset); + return size; + } + + section_mr = section.mr; + section_offset = section.offset_within_region; + section_size = int128_get64(section.size); + + if (is_write && section_mr->readonly) { + warn_report("vfu: attempting to write to readonly region in " + "bar %d - [0x%"PRIx64" - 0x%"PRIx64"]", + pci_bar, bar_offset, + (bar_offset + section_size)); + memory_region_unref(section_mr); + return size; + } + + if (vfu_object_mr_rw(section_mr, ptr, section_offset, + section_size, is_write)) { + warn_report("vfu: failed to %s " + "[0x%"PRIx64" - 0x%"PRIx64"] in bar %d", + is_write ? "write to" : "read from", bar_offset, + (bar_offset + section_size), pci_bar); + memory_region_unref(section_mr); + return size; + } + + size += section_size; + bar_offset += section_size; + ptr += section_size; + len -= section_size; + + memory_region_unref(section_mr); + } + + return size; +} + +/** + * VFU_OBJECT_BAR_HANDLER - macro for defining handlers for PCI BARs. + * + * To create handler for BAR number 2, VFU_OBJECT_BAR_HANDLER(2) would + * define vfu_object_bar2_handler + */ +#define VFU_OBJECT_BAR_HANDLER(BAR_NO) \ + static ssize_t vfu_object_bar##BAR_NO##_handler(vfu_ctx_t *vfu_ctx, \ + char * const buf, size_t count, \ + loff_t offset, const bool is_write) \ + { \ + VfuObject *o = vfu_get_private(vfu_ctx); \ + PCIDevice *pci_dev = o->pci_dev; \ + \ + return vfu_object_bar_rw(pci_dev, BAR_NO, offset, \ + buf, count, is_write); \ + } \ + +VFU_OBJECT_BAR_HANDLER(0) +VFU_OBJECT_BAR_HANDLER(1) +VFU_OBJECT_BAR_HANDLER(2) +VFU_OBJECT_BAR_HANDLER(3) +VFU_OBJECT_BAR_HANDLER(4) +VFU_OBJECT_BAR_HANDLER(5) +VFU_OBJECT_BAR_HANDLER(6) + +static vfu_region_access_cb_t *vfu_object_bar_handlers[PCI_NUM_REGIONS] = { + &vfu_object_bar0_handler, + &vfu_object_bar1_handler, + &vfu_object_bar2_handler, + &vfu_object_bar3_handler, + &vfu_object_bar4_handler, + &vfu_object_bar5_handler, + &vfu_object_bar6_handler, +}; + +/** + * vfu_object_register_bars - Identify active BAR regions of pdev and setup + * callbacks to handle read/write accesses + */ +static void vfu_object_register_bars(vfu_ctx_t *vfu_ctx, PCIDevice *pdev) +{ + int flags = VFU_REGION_FLAG_RW; + int i; + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + if (!pdev->io_regions[i].size) { + continue; + } + + if ((i == VFU_PCI_DEV_ROM_REGION_IDX) || + pdev->io_regions[i].memory->readonly) { + flags &= ~VFU_REGION_FLAG_WRITE; + } + + vfu_setup_region(vfu_ctx, VFU_PCI_DEV_BAR0_REGION_IDX + i, + (size_t)pdev->io_regions[i].size, + vfu_object_bar_handlers[i], + flags, NULL, 0, -1, 0); + + trace_vfu_bar_register(i, pdev->io_regions[i].addr, + pdev->io_regions[i].size); + } +} + +static int vfu_object_map_irq(PCIDevice *pci_dev, int intx) +{ + int pci_bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)), + pci_dev->devfn); + + return pci_bdf; +} + +static void vfu_object_set_irq(void *opaque, int pirq, int level) +{ + PCIBus *pci_bus = opaque; + PCIDevice *pci_dev = NULL; + vfu_ctx_t *vfu_ctx = NULL; + int pci_bus_num, devfn; + + if (level) { + pci_bus_num = PCI_BUS_NUM(pirq); + devfn = PCI_BDF_TO_DEVFN(pirq); + + /* + * pci_find_device() performs at O(1) if the device is attached + * to the root PCI bus. Whereas, if the device is attached to a + * secondary PCI bus (such as when a root port is involved), + * finding the parent PCI bus could take O(n) + */ + pci_dev = pci_find_device(pci_bus, pci_bus_num, devfn); + + vfu_ctx = pci_dev->irq_opaque; + + g_assert(vfu_ctx); + + vfu_irq_trigger(vfu_ctx, 0); + } +} + +static MSIMessage vfu_object_msi_prepare_msg(PCIDevice *pci_dev, + unsigned int vector) +{ + MSIMessage msg; + + msg.address = 0; + msg.data = vector; + + return msg; +} + +static void vfu_object_msi_trigger(PCIDevice *pci_dev, MSIMessage msg) +{ + vfu_ctx_t *vfu_ctx = pci_dev->irq_opaque; + + vfu_irq_trigger(vfu_ctx, msg.data); +} + +static void vfu_object_setup_msi_cbs(VfuObject *o) +{ + o->default_msi_trigger = o->pci_dev->msi_trigger; + o->default_msi_prepare_message = o->pci_dev->msi_prepare_message; + o->default_msix_prepare_message = o->pci_dev->msix_prepare_message; + + o->pci_dev->msi_trigger = vfu_object_msi_trigger; + o->pci_dev->msi_prepare_message = vfu_object_msi_prepare_msg; + o->pci_dev->msix_prepare_message = vfu_object_msi_prepare_msg; +} + +static void vfu_object_restore_msi_cbs(VfuObject *o) +{ + o->pci_dev->msi_trigger = o->default_msi_trigger; + o->pci_dev->msi_prepare_message = o->default_msi_prepare_message; + o->pci_dev->msix_prepare_message = o->default_msix_prepare_message; +} + +static void vfu_msix_irq_state(vfu_ctx_t *vfu_ctx, uint32_t start, + uint32_t count, bool mask) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + uint32_t vector; + + for (vector = start; vector < count; vector++) { + msix_set_mask(o->pci_dev, vector, mask); + } +} + +static void vfu_msi_irq_state(vfu_ctx_t *vfu_ctx, uint32_t start, + uint32_t count, bool mask) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + Error *err = NULL; + uint32_t vector; + + for (vector = start; vector < count; vector++) { + msi_set_mask(o->pci_dev, vector, mask, &err); + if (err) { + VFU_OBJECT_ERROR(o, "vfu: %s: %s", o->device, + error_get_pretty(err)); + error_free(err); + err = NULL; + } + } +} + +static int vfu_object_setup_irqs(VfuObject *o, PCIDevice *pci_dev) +{ + vfu_ctx_t *vfu_ctx = o->vfu_ctx; + int ret; + + ret = vfu_setup_device_nr_irqs(vfu_ctx, VFU_DEV_INTX_IRQ, 1); + if (ret < 0) { + return ret; + } + + if (msix_nr_vectors_allocated(pci_dev)) { + ret = vfu_setup_device_nr_irqs(vfu_ctx, VFU_DEV_MSIX_IRQ, + msix_nr_vectors_allocated(pci_dev)); + vfu_setup_irq_state_callback(vfu_ctx, VFU_DEV_MSIX_IRQ, + &vfu_msix_irq_state); + } else if (msi_nr_vectors_allocated(pci_dev)) { + ret = vfu_setup_device_nr_irqs(vfu_ctx, VFU_DEV_MSI_IRQ, + msi_nr_vectors_allocated(pci_dev)); + vfu_setup_irq_state_callback(vfu_ctx, VFU_DEV_MSI_IRQ, + &vfu_msi_irq_state); + } + + if (ret < 0) { + return ret; + } + + vfu_object_setup_msi_cbs(o); + + pci_dev->irq_opaque = vfu_ctx; + + return 0; +} + +void vfu_object_set_bus_irq(PCIBus *pci_bus) +{ + int bus_num = pci_bus_num(pci_bus); + int max_bdf = PCI_BUILD_BDF(bus_num, PCI_DEVFN_MAX - 1); + + pci_bus_irqs(pci_bus, vfu_object_set_irq, vfu_object_map_irq, pci_bus, + max_bdf); +} + +static int vfu_object_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t type) +{ + VfuObject *o = vfu_get_private(vfu_ctx); + + /* vfu_object_ctx_run() handles lost connection */ + if (type == VFU_RESET_LOST_CONN) { + return 0; + } + + qdev_reset_all(DEVICE(o->pci_dev)); + + return 0; +} + +/* + * TYPE_VFU_OBJECT depends on the availability of the 'socket' and 'device' + * properties. It also depends on devices instantiated in QEMU. These + * dependencies are not available during the instance_init phase of this + * object's life-cycle. As such, the server is initialized after the + * machine is setup. machine_init_done_notifier notifies TYPE_VFU_OBJECT + * when the machine is setup, and the dependencies are available. + */ +static void vfu_object_machine_done(Notifier *notifier, void *data) +{ + VfuObject *o = container_of(notifier, VfuObject, machine_done); + Error *err = NULL; + + vfu_object_init_ctx(o, &err); + + if (err) { + error_propagate(&error_abort, err); + } +} + +/** + * vfu_object_init_ctx: Create and initialize libvfio-user context. Add + * an unplug blocker for the associated PCI device. Setup a FD handler + * to process incoming messages in the context's socket. + * + * The socket and device properties are mandatory, and this function + * will not create the context without them - the setters for these + * properties should call this function when the property is set. The + * machine should also be ready when this function is invoked - it is + * because QEMU objects are initialized before devices, and the + * associated PCI device wouldn't be available at the object + * initialization time. Until these conditions are satisfied, this + * function would return early without performing any task. + */ +static void vfu_object_init_ctx(VfuObject *o, Error **errp) +{ + ERRP_GUARD(); + DeviceState *dev = NULL; + vfu_pci_type_t pci_type = VFU_PCI_TYPE_CONVENTIONAL; + int ret; + + if (o->vfu_ctx || !o->socket || !o->device || + !phase_check(PHASE_MACHINE_READY)) { + return; + } + + if (o->err) { + error_propagate(errp, o->err); + o->err = NULL; + return; + } + + o->vfu_ctx = vfu_create_ctx(VFU_TRANS_SOCK, o->socket->u.q_unix.path, + LIBVFIO_USER_FLAG_ATTACH_NB, + o, VFU_DEV_TYPE_PCI); + if (o->vfu_ctx == NULL) { + error_setg(errp, "vfu: Failed to create context - %s", strerror(errno)); + return; + } + + dev = qdev_find_recursive(sysbus_get_default(), o->device); + if (dev == NULL) { + error_setg(errp, "vfu: Device %s not found", o->device); + goto fail; + } + + if (!object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + error_setg(errp, "vfu: %s not a PCI device", o->device); + goto fail; + } + + o->pci_dev = PCI_DEVICE(dev); + + object_ref(OBJECT(o->pci_dev)); + + if (pci_is_express(o->pci_dev)) { + pci_type = VFU_PCI_TYPE_EXPRESS; + } + + ret = vfu_pci_init(o->vfu_ctx, pci_type, PCI_HEADER_TYPE_NORMAL, 0); + if (ret < 0) { + error_setg(errp, + "vfu: Failed to attach PCI device %s to context - %s", + o->device, strerror(errno)); + goto fail; + } + + error_setg(&o->unplug_blocker, + "vfu: %s for %s must be deleted before unplugging", + TYPE_VFU_OBJECT, o->device); + qdev_add_unplug_blocker(DEVICE(o->pci_dev), o->unplug_blocker); + + ret = vfu_setup_region(o->vfu_ctx, VFU_PCI_DEV_CFG_REGION_IDX, + pci_config_size(o->pci_dev), &vfu_object_cfg_access, + VFU_REGION_FLAG_RW | VFU_REGION_FLAG_ALWAYS_CB, + NULL, 0, -1, 0); + if (ret < 0) { + error_setg(errp, + "vfu: Failed to setup config space handlers for %s- %s", + o->device, strerror(errno)); + goto fail; + } + + ret = vfu_setup_device_dma(o->vfu_ctx, &dma_register, &dma_unregister); + if (ret < 0) { + error_setg(errp, "vfu: Failed to setup DMA handlers for %s", + o->device); + goto fail; + } + + vfu_object_register_bars(o->vfu_ctx, o->pci_dev); + + ret = vfu_object_setup_irqs(o, o->pci_dev); + if (ret < 0) { + error_setg(errp, "vfu: Failed to setup interrupts for %s", + o->device); + goto fail; + } + + ret = vfu_setup_device_reset_cb(o->vfu_ctx, &vfu_object_device_reset); + if (ret < 0) { + error_setg(errp, "vfu: Failed to setup reset callback"); + goto fail; + } + + ret = vfu_realize_ctx(o->vfu_ctx); + if (ret < 0) { + error_setg(errp, "vfu: Failed to realize device %s- %s", + o->device, strerror(errno)); + goto fail; + } + + o->vfu_poll_fd = vfu_get_poll_fd(o->vfu_ctx); + if (o->vfu_poll_fd < 0) { + error_setg(errp, "vfu: Failed to get poll fd %s", o->device); + goto fail; + } + + qemu_set_fd_handler(o->vfu_poll_fd, vfu_object_attach_ctx, NULL, o); + + return; + +fail: + vfu_destroy_ctx(o->vfu_ctx); + if (o->unplug_blocker && o->pci_dev) { + qdev_del_unplug_blocker(DEVICE(o->pci_dev), o->unplug_blocker); + error_free(o->unplug_blocker); + o->unplug_blocker = NULL; + } + if (o->pci_dev) { + vfu_object_restore_msi_cbs(o); + o->pci_dev->irq_opaque = NULL; + object_unref(OBJECT(o->pci_dev)); + o->pci_dev = NULL; + } + o->vfu_ctx = NULL; +} + +static void vfu_object_init(Object *obj) +{ + VfuObjectClass *k = VFU_OBJECT_GET_CLASS(obj); + VfuObject *o = VFU_OBJECT(obj); + + k->nr_devs++; + + if (!object_dynamic_cast(OBJECT(current_machine), TYPE_REMOTE_MACHINE)) { + error_setg(&o->err, "vfu: %s only compatible with %s machine", + TYPE_VFU_OBJECT, TYPE_REMOTE_MACHINE); + return; + } + + if (!phase_check(PHASE_MACHINE_READY)) { + o->machine_done.notify = vfu_object_machine_done; + qemu_add_machine_init_done_notifier(&o->machine_done); + } + + o->vfu_poll_fd = -1; +} + +static void vfu_object_finalize(Object *obj) +{ + VfuObjectClass *k = VFU_OBJECT_GET_CLASS(obj); + VfuObject *o = VFU_OBJECT(obj); + + k->nr_devs--; + + qapi_free_SocketAddress(o->socket); + + o->socket = NULL; + + if (o->vfu_poll_fd != -1) { + qemu_set_fd_handler(o->vfu_poll_fd, NULL, NULL, NULL); + o->vfu_poll_fd = -1; + } + + if (o->vfu_ctx) { + vfu_destroy_ctx(o->vfu_ctx); + o->vfu_ctx = NULL; + } + + g_free(o->device); + + o->device = NULL; + + if (o->unplug_blocker && o->pci_dev) { + qdev_del_unplug_blocker(DEVICE(o->pci_dev), o->unplug_blocker); + error_free(o->unplug_blocker); + o->unplug_blocker = NULL; + } + + if (o->pci_dev) { + vfu_object_restore_msi_cbs(o); + o->pci_dev->irq_opaque = NULL; + object_unref(OBJECT(o->pci_dev)); + o->pci_dev = NULL; + } + + if (!k->nr_devs && vfu_object_auto_shutdown()) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + + if (o->machine_done.notify) { + qemu_remove_machine_init_done_notifier(&o->machine_done); + o->machine_done.notify = NULL; + } +} + +static void vfu_object_class_init(ObjectClass *klass, void *data) +{ + VfuObjectClass *k = VFU_OBJECT_CLASS(klass); + + k->nr_devs = 0; + + object_class_property_add(klass, "socket", "SocketAddress", NULL, + vfu_object_set_socket, NULL, NULL); + object_class_property_set_description(klass, "socket", + "SocketAddress " + "(ex: type=unix,path=/tmp/sock). " + "Only UNIX is presently supported"); + object_class_property_add_str(klass, "device", NULL, + vfu_object_set_device); + object_class_property_set_description(klass, "device", + "device ID - only PCI devices " + "are presently supported"); +} + +static const TypeInfo vfu_object_info = { + .name = TYPE_VFU_OBJECT, + .parent = TYPE_OBJECT, + .instance_size = sizeof(VfuObject), + .instance_init = vfu_object_init, + .instance_finalize = vfu_object_finalize, + .class_size = sizeof(VfuObjectClass), + .class_init = vfu_object_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void vfu_register_types(void) +{ + type_register_static(&vfu_object_info); +} + +type_init(vfu_register_types); diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index 0590f443fd..79ff61c464 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -12,7 +12,7 @@ config MICROCHIP_PFSOC select MCHP_PFSOC_MMUART select MCHP_PFSOC_SYSREG select MSI_NONBROKEN - select SIFIVE_CLINT + select RISCV_ACLINT select SIFIVE_PDMA select SIFIVE_PLIC select UNIMP @@ -22,14 +22,11 @@ config OPENTITAN select IBEX select UNIMP -config SHAKTI - bool - config SHAKTI_C bool select UNIMP - select SHAKTI - select SIFIVE_CLINT + select SHAKTI_UART + select RISCV_ACLINT select SIFIVE_PLIC config RISCV_VIRT @@ -37,6 +34,7 @@ config RISCV_VIRT imply PCI_DEVICES imply VIRTIO_VGA imply TEST_DEVICES + imply TPM_TIS_SYSBUS select RISCV_NUMA select GOLDFISH_RTC select MSI_NONBROKEN @@ -44,16 +42,19 @@ config RISCV_VIRT select PCI_EXPRESS_GENERIC_BRIDGE select PFLASH_CFI01 select SERIAL - select SIFIVE_CLINT + select RISCV_ACLINT + select RISCV_APLIC + select RISCV_IMSIC select SIFIVE_PLIC select SIFIVE_TEST select VIRTIO_MMIO select FW_CFG_DMA + select PLATFORM_BUS config SIFIVE_E bool select MSI_NONBROKEN - select SIFIVE_CLINT + select RISCV_ACLINT select SIFIVE_GPIO select SIFIVE_PLIC select SIFIVE_UART @@ -64,7 +65,7 @@ config SIFIVE_U bool select CADENCE select MSI_NONBROKEN - select SIFIVE_CLINT + select RISCV_ACLINT select SIFIVE_GPIO select SIFIVE_PDMA select SIFIVE_PLIC @@ -72,6 +73,7 @@ config SIFIVE_U select SIFIVE_UART select SIFIVE_U_OTP select SIFIVE_U_PRCI + select SIFIVE_PWM select SSI_M25P80 select SSI_SD select UNIMP @@ -81,5 +83,5 @@ config SPIKE select RISCV_NUMA select HTIF select MSI_NONBROKEN - select SIFIVE_CLINT + select RISCV_ACLINT select SIFIVE_PLIC diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 993bf89064..ebd351c840 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/error-report.h" @@ -30,12 +29,41 @@ #include "elf.h" #include "sysemu/device_tree.h" #include "sysemu/qtest.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" #include bool riscv_is_32bit(RISCVHartArrayState *harts) { - return riscv_cpu_is_32bit(&harts->harts[0].env); + return harts->harts[0].env.misa_mxl_max == MXL_RV32; +} + +/* + * Return the per-socket PLIC hart topology configuration string + * (caller must free with g_free()) + */ +char *riscv_plic_hart_config_string(int hart_count) +{ + g_autofree const char **vals = g_new(const char *, hart_count + 1); + int i; + + for (i = 0; i < hart_count; i++) { + CPUState *cs = qemu_get_cpu(i); + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (kvm_enabled()) { + vals[i] = "S"; + } else if (riscv_has_ext(env, RVS)) { + vals[i] = "MS"; + } else { + vals[i] = "M"; + } + } + vals[i] = NULL; + + /* g_strjoinv() obliges us to cast away const here */ + return g_strjoinv(",", (char **)vals); } target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, @@ -84,8 +112,8 @@ char *riscv_find_firmware(const char *firmware_filename) if (filename == NULL) { if (!qtest_enabled()) { /* - * We only ship plain binary bios images in the QEMU source. - * With Spike machine that uses ELF images as the default bios, + * We only ship OpenSBI binary bios images in the QEMU source. + * For machines that use images other than the default bios, * running QEMU test will complain hence let's suppress the error * report for QEMU testing. */ @@ -102,7 +130,8 @@ target_ulong riscv_load_firmware(const char *firmware_filename, hwaddr firmware_load_addr, symbol_fn_t sym_cb) { - uint64_t firmware_entry, firmware_size, firmware_end; + uint64_t firmware_entry, firmware_end; + ssize_t firmware_size; if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL, &firmware_entry, NULL, &firmware_end, NULL, @@ -126,12 +155,19 @@ target_ulong riscv_load_kernel(const char *kernel_filename, target_ulong kernel_start_addr, symbol_fn_t sym_cb) { - uint64_t kernel_entry; + uint64_t kernel_load_base, kernel_entry; + /* + * NB: Use low address not ELF entry point to ensure that the fw_dynamic + * behaviour when loading an ELF matches the fw_payload, fw_jump and BBL + * behaviour, as well as fw_dynamic with a raw binary, all of which jump to + * the (expected) load address load address. This allows kernels to have + * separate SBI and ELF entry points (used by FreeBSD, for example). + */ if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, - &kernel_entry, NULL, NULL, NULL, 0, + NULL, &kernel_load_base, NULL, NULL, 0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) { - return kernel_entry; + return kernel_load_base; } if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL, @@ -151,7 +187,7 @@ target_ulong riscv_load_kernel(const char *kernel_filename, hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, uint64_t kernel_entry, hwaddr *start) { - int size; + ssize_t size; /* * We want to put the initrd far enough into RAM that when the @@ -178,9 +214,9 @@ hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, return *start + size; } -uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) +uint64_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) { - uint32_t temp, fdt_addr; + uint64_t temp, fdt_addr; hwaddr dram_end = dram_base + mem_size; int ret, fdtsize = fdt_totalsize(fdt); @@ -192,11 +228,11 @@ uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) /* * We should put fdt as far as possible to avoid kernel/initrd overwriting * its content. But it should be addressable by 32 bit system as well. - * Thus, put it at an 16MB aligned address that less than fdt size from the + * Thus, put it at an 2MB aligned address that less than fdt size from the * end of dram or 3GB whichever is lesser. */ - temp = MIN(dram_end, 3072 * MiB); - fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB); + temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end; + fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB); ret = fdt_pack(fdt); /* Should only fail if we've built a corrupted tree */ @@ -206,6 +242,8 @@ uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr, &address_space_memory); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr_for_as(&address_space_memory, fdt_addr, fdtsize)); return fdt_addr; } @@ -251,13 +289,15 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts hwaddr start_addr, hwaddr rom_base, hwaddr rom_size, uint64_t kernel_entry, - uint32_t fdt_load_addr, void *fdt) + uint64_t fdt_load_addr) { int i; uint32_t start_addr_hi32 = 0x00000000; + uint32_t fdt_load_addr_hi32 = 0x00000000; if (!riscv_is_32bit(harts)) { start_addr_hi32 = start_addr >> 32; + fdt_load_addr_hi32 = fdt_load_addr >> 32; } /* reset vector */ uint32_t reset_vec[10] = { @@ -270,7 +310,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts start_addr, /* start: .dword */ start_addr_hi32, fdt_load_addr, /* fdt_laddr: .dword */ - 0x00000000, + fdt_load_addr_hi32, /* fw_dyn: */ }; if (riscv_is_32bit(harts)) { @@ -289,6 +329,44 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts rom_base, &address_space_memory); riscv_rom_copy_firmware_info(machine, rom_base, rom_size, sizeof(reset_vec), kernel_entry); - - return; +} + +void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr) +{ + CPUState *cs; + + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + RISCVCPU *riscv_cpu = RISCV_CPU(cs); + riscv_cpu->env.kernel_addr = kernel_addr; + riscv_cpu->env.fdt_addr = fdt_addr; + } +} + +void riscv_setup_firmware_boot(MachineState *machine) +{ + if (machine->kernel_filename) { + FWCfgState *fw_cfg; + fw_cfg = fw_cfg_find(); + + assert(fw_cfg); + /* + * Expose the kernel, the command line, and the initrd in fw_cfg. + * We don't process them here at all, it's all left to the + * firmware. + */ + load_image_to_fw_cfg(fw_cfg, + FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA, + machine->kernel_filename, + true); + load_image_to_fw_cfg(fw_cfg, + FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA, + machine->initrd_filename, false); + + if (machine->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(machine->kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, + machine->kernel_cmdline); + } + } } diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c index eb8e79e0a1..a821263d4f 100644 --- a/hw/riscv/microchip_pfsoc.c +++ b/hw/riscv/microchip_pfsoc.c @@ -49,7 +49,7 @@ #include "hw/riscv/boot.h" #include "hw/riscv/riscv_hart.h" #include "hw/riscv/microchip_pfsoc.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" #include "hw/intc/sifive_plic.h" #include "sysemu/device_tree.h" #include "sysemu/sysemu.h" @@ -100,8 +100,11 @@ static const MemMapEntry microchip_pfsoc_memmap[] = { [MICROCHIP_PFSOC_L2LIM] = { 0x8000000, 0x2000000 }, [MICROCHIP_PFSOC_PLIC] = { 0xc000000, 0x4000000 }, [MICROCHIP_PFSOC_MMUART0] = { 0x20000000, 0x1000 }, + [MICROCHIP_PFSOC_WDOG0] = { 0x20001000, 0x1000 }, [MICROCHIP_PFSOC_SYSREG] = { 0x20002000, 0x2000 }, + [MICROCHIP_PFSOC_AXISW] = { 0x20004000, 0x1000 }, [MICROCHIP_PFSOC_MPUCFG] = { 0x20005000, 0x1000 }, + [MICROCHIP_PFSOC_FMETER] = { 0x20006000, 0x1000 }, [MICROCHIP_PFSOC_DDR_SGMII_PHY] = { 0x20007000, 0x1000 }, [MICROCHIP_PFSOC_EMMC_SD] = { 0x20008000, 0x1000 }, [MICROCHIP_PFSOC_DDR_CFG] = { 0x20080000, 0x40000 }, @@ -109,19 +112,28 @@ static const MemMapEntry microchip_pfsoc_memmap[] = { [MICROCHIP_PFSOC_MMUART2] = { 0x20102000, 0x1000 }, [MICROCHIP_PFSOC_MMUART3] = { 0x20104000, 0x1000 }, [MICROCHIP_PFSOC_MMUART4] = { 0x20106000, 0x1000 }, + [MICROCHIP_PFSOC_WDOG1] = { 0x20101000, 0x1000 }, + [MICROCHIP_PFSOC_WDOG2] = { 0x20103000, 0x1000 }, + [MICROCHIP_PFSOC_WDOG3] = { 0x20105000, 0x1000 }, + [MICROCHIP_PFSOC_WDOG4] = { 0x20106000, 0x1000 }, [MICROCHIP_PFSOC_SPI0] = { 0x20108000, 0x1000 }, [MICROCHIP_PFSOC_SPI1] = { 0x20109000, 0x1000 }, + [MICROCHIP_PFSOC_I2C0] = { 0x2010a000, 0x1000 }, [MICROCHIP_PFSOC_I2C1] = { 0x2010b000, 0x1000 }, + [MICROCHIP_PFSOC_CAN0] = { 0x2010c000, 0x1000 }, + [MICROCHIP_PFSOC_CAN1] = { 0x2010d000, 0x1000 }, [MICROCHIP_PFSOC_GEM0] = { 0x20110000, 0x2000 }, [MICROCHIP_PFSOC_GEM1] = { 0x20112000, 0x2000 }, [MICROCHIP_PFSOC_GPIO0] = { 0x20120000, 0x1000 }, [MICROCHIP_PFSOC_GPIO1] = { 0x20121000, 0x1000 }, [MICROCHIP_PFSOC_GPIO2] = { 0x20122000, 0x1000 }, + [MICROCHIP_PFSOC_RTC] = { 0x20124000, 0x1000 }, [MICROCHIP_PFSOC_ENVM_CFG] = { 0x20200000, 0x1000 }, [MICROCHIP_PFSOC_ENVM_DATA] = { 0x20220000, 0x20000 }, + [MICROCHIP_PFSOC_USB] = { 0x20201000, 0x1000 }, [MICROCHIP_PFSOC_QSPI_XIP] = { 0x21000000, 0x1000000 }, [MICROCHIP_PFSOC_IOSCB] = { 0x30000000, 0x10000000 }, - [MICROCHIP_PFSOC_EMMC_SD_MUX] = { 0x4f000000, 0x4 }, + [MICROCHIP_PFSOC_FABRIC_FIC3] = { 0x40000000, 0x20000000 }, [MICROCHIP_PFSOC_DRAM_LO] = { 0x80000000, 0x40000000 }, [MICROCHIP_PFSOC_DRAM_LO_ALIAS] = { 0xc0000000, 0x40000000 }, [MICROCHIP_PFSOC_DRAM_HI] = { 0x1000000000, 0x0 }, @@ -187,7 +199,6 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *envm_data = g_new(MemoryRegion, 1); MemoryRegion *qspi_xip_mem = g_new(MemoryRegion, 1); char *plic_hart_config; - size_t plic_hart_config_len; NICInfo *nd; int i; @@ -234,9 +245,12 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) memmap[MICROCHIP_PFSOC_BUSERR_UNIT4].size); /* CLINT */ - sifive_clint_create(memmap[MICROCHIP_PFSOC_CLINT].base, - memmap[MICROCHIP_PFSOC_CLINT].size, 0, ms->smp.cpus, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, + riscv_aclint_swi_create(memmap[MICROCHIP_PFSOC_CLINT].base, + 0, ms->smp.cpus, false); + riscv_aclint_mtimer_create( + memmap[MICROCHIP_PFSOC_CLINT].base + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, CLINT_TIMEBASE_FREQ, false); /* L2 cache controller */ @@ -259,22 +273,11 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) l2lim_mem); /* create PLIC hart topology configuration string */ - plic_hart_config_len = (strlen(MICROCHIP_PFSOC_PLIC_HART_CONFIG) + 1) * - ms->smp.cpus; - plic_hart_config = g_malloc0(plic_hart_config_len); - for (i = 0; i < ms->smp.cpus; i++) { - if (i != 0) { - strncat(plic_hart_config, "," MICROCHIP_PFSOC_PLIC_HART_CONFIG, - plic_hart_config_len); - } else { - strncat(plic_hart_config, "M", plic_hart_config_len); - } - plic_hart_config_len -= (strlen(MICROCHIP_PFSOC_PLIC_HART_CONFIG) + 1); - } + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); /* PLIC */ s->plic = sifive_plic_create(memmap[MICROCHIP_PFSOC_PLIC].base, - plic_hart_config, 0, + plic_hart_config, ms->smp.cpus, 0, MICROCHIP_PFSOC_PLIC_NUM_SOURCES, MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES, MICROCHIP_PFSOC_PLIC_PRIORITY_BASE, @@ -301,11 +304,21 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->sysreg), 0, memmap[MICROCHIP_PFSOC_SYSREG].base); + /* AXISW */ + create_unimplemented_device("microchip.pfsoc.axisw", + memmap[MICROCHIP_PFSOC_AXISW].base, + memmap[MICROCHIP_PFSOC_AXISW].size); + /* MPUCFG */ create_unimplemented_device("microchip.pfsoc.mpucfg", memmap[MICROCHIP_PFSOC_MPUCFG].base, memmap[MICROCHIP_PFSOC_MPUCFG].size); + /* FMETER */ + create_unimplemented_device("microchip.pfsoc.fmeter", + memmap[MICROCHIP_PFSOC_FMETER].base, + memmap[MICROCHIP_PFSOC_FMETER].size); + /* DDR SGMII PHY */ sysbus_realize(SYS_BUS_DEVICE(&s->ddr_sgmii_phy), errp); sysbus_mmio_map(SYS_BUS_DEVICE(&s->ddr_sgmii_phy), 0, @@ -345,6 +358,23 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART4_IRQ), serial_hd(4)); + /* Watchdogs */ + create_unimplemented_device("microchip.pfsoc.watchdog0", + memmap[MICROCHIP_PFSOC_WDOG0].base, + memmap[MICROCHIP_PFSOC_WDOG0].size); + create_unimplemented_device("microchip.pfsoc.watchdog1", + memmap[MICROCHIP_PFSOC_WDOG1].base, + memmap[MICROCHIP_PFSOC_WDOG1].size); + create_unimplemented_device("microchip.pfsoc.watchdog2", + memmap[MICROCHIP_PFSOC_WDOG2].base, + memmap[MICROCHIP_PFSOC_WDOG2].size); + create_unimplemented_device("microchip.pfsoc.watchdog3", + memmap[MICROCHIP_PFSOC_WDOG3].base, + memmap[MICROCHIP_PFSOC_WDOG3].size); + create_unimplemented_device("microchip.pfsoc.watchdog4", + memmap[MICROCHIP_PFSOC_WDOG4].base, + memmap[MICROCHIP_PFSOC_WDOG4].size); + /* SPI */ create_unimplemented_device("microchip.pfsoc.spi0", memmap[MICROCHIP_PFSOC_SPI0].base, @@ -353,11 +383,27 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) memmap[MICROCHIP_PFSOC_SPI1].base, memmap[MICROCHIP_PFSOC_SPI1].size); - /* I2C1 */ + /* I2C */ + create_unimplemented_device("microchip.pfsoc.i2c0", + memmap[MICROCHIP_PFSOC_I2C0].base, + memmap[MICROCHIP_PFSOC_I2C0].size); create_unimplemented_device("microchip.pfsoc.i2c1", memmap[MICROCHIP_PFSOC_I2C1].base, memmap[MICROCHIP_PFSOC_I2C1].size); + /* CAN */ + create_unimplemented_device("microchip.pfsoc.can0", + memmap[MICROCHIP_PFSOC_CAN0].base, + memmap[MICROCHIP_PFSOC_CAN0].size); + create_unimplemented_device("microchip.pfsoc.can1", + memmap[MICROCHIP_PFSOC_CAN1].base, + memmap[MICROCHIP_PFSOC_CAN1].size); + + /* USB */ + create_unimplemented_device("microchip.pfsoc.usb", + memmap[MICROCHIP_PFSOC_USB].base, + memmap[MICROCHIP_PFSOC_USB].size); + /* GEMs */ nd = &nd_table[0]; @@ -411,10 +457,10 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->ioscb), 0, memmap[MICROCHIP_PFSOC_IOSCB].base); - /* eMMC/SD mux */ - create_unimplemented_device("microchip.pfsoc.emmc_sd_mux", - memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].base, - memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].size); + /* FPGA Fabric */ + create_unimplemented_device("microchip.pfsoc.fabricfic3", + memmap[MICROCHIP_PFSOC_FABRIC_FIC3].base, + memmap[MICROCHIP_PFSOC_FABRIC_FIC3].size); /* QSPI Flash */ memory_region_init_rom(qspi_xip_mem, OBJECT(dev), @@ -460,14 +506,14 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) MemoryRegion *mem_low_alias = g_new(MemoryRegion, 1); MemoryRegion *mem_high = g_new(MemoryRegion, 1); MemoryRegion *mem_high_alias = g_new(MemoryRegion, 1); - uint64_t mem_high_size; + uint64_t mem_low_size, mem_high_size; hwaddr firmware_load_addr; const char *firmware_name; bool kernel_as_payload = false; target_ulong firmware_end_addr, kernel_start_addr; uint64_t kernel_entry; uint32_t fdt_load_addr; - DriveInfo *dinfo = drive_get_next(IF_SD); + DriveInfo *dinfo = drive_get(IF_SD, 0, 0); /* Sanity check on RAM size */ if (machine->ram_size < mc->default_ram_size) { @@ -480,33 +526,36 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_MICROCHIP_PFSOC); - qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* Split RAM into low and high regions using aliases to machine->ram */ + mem_low_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size; + mem_high_size = machine->ram_size - mem_low_size; + memory_region_init_alias(mem_low, NULL, + "microchip.icicle.kit.ram_low", machine->ram, + 0, mem_low_size); + memory_region_init_alias(mem_high, NULL, + "microchip.icicle.kit.ram_high", machine->ram, + mem_low_size, mem_high_size); /* Register RAM */ - memory_region_init_ram(mem_low, NULL, "microchip.icicle.kit.ram_low", - memmap[MICROCHIP_PFSOC_DRAM_LO].size, - &error_fatal); - memory_region_init_alias(mem_low_alias, NULL, - "microchip.icicle.kit.ram_low.alias", - mem_low, 0, - memmap[MICROCHIP_PFSOC_DRAM_LO_ALIAS].size); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_LO].base, mem_low); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_HI].base, + mem_high); + + /* Create aliases for the low and high RAM regions */ + memory_region_init_alias(mem_low_alias, NULL, + "microchip.icicle.kit.ram_low.alias", + mem_low, 0, mem_low_size); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_LO_ALIAS].base, mem_low_alias); - - mem_high_size = machine->ram_size - 1 * GiB; - - memory_region_init_ram(mem_high, NULL, "microchip.icicle.kit.ram_high", - mem_high_size, &error_fatal); memory_region_init_alias(mem_high_alias, NULL, "microchip.icicle.kit.ram_high.alias", mem_high, 0, mem_high_size); - memory_region_add_subregion(system_memory, - memmap[MICROCHIP_PFSOC_DRAM_HI].base, - mem_high); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_HI_ALIAS].base, mem_high_alias); @@ -577,7 +626,7 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) "linux,initrd-end", end); } - if (machine->kernel_cmdline) { + if (machine->kernel_cmdline && *machine->kernel_cmdline) { qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs", machine->kernel_cmdline); } @@ -589,7 +638,7 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, firmware_load_addr, memmap[MICROCHIP_PFSOC_ENVM_DATA].base, memmap[MICROCHIP_PFSOC_ENVM_DATA].size, - kernel_entry, fdt_load_addr, machine->fdt); + kernel_entry, fdt_load_addr); } } @@ -603,6 +652,7 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) MICROCHIP_PFSOC_COMPUTE_CPU_COUNT; mc->min_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + 1; mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "microchip.icicle.kit.ram"; /* * Map 513 MiB high memory, the mimimum required high memory size, because diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index 7fe92d402f..edf6750f54 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -207,6 +207,12 @@ int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx) { int64_t nidx = 0; + if (ms->numa_state->num_nodes > ms->smp.cpus) { + error_report("Number of NUMA nodes (%d)" + " cannot exceed the number of available CPUs (%d).", + ms->numa_state->num_nodes, ms->smp.max_cpus); + exit(EXIT_FAILURE); + } if (ms->numa_state->num_nodes) { nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes); if (ms->numa_state->num_nodes <= nidx) { diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index 36a41c8b5b..be7ff1eea0 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "hw/riscv/opentitan.h" #include "qapi/error.h" #include "hw/boards.h" @@ -28,56 +29,64 @@ #include "sysemu/sysemu.h" static const MemMapEntry ibex_memmap[] = { - [IBEX_DEV_ROM] = { 0x00008000, 16 * KiB }, - [IBEX_DEV_RAM] = { 0x10000000, 0x10000 }, - [IBEX_DEV_FLASH] = { 0x20000000, 0x80000 }, + [IBEX_DEV_ROM] = { 0x00008000, 0x8000 }, + [IBEX_DEV_RAM] = { 0x10000000, 0x20000 }, + [IBEX_DEV_FLASH] = { 0x20000000, 0x100000 }, [IBEX_DEV_UART] = { 0x40000000, 0x1000 }, [IBEX_DEV_GPIO] = { 0x40040000, 0x1000 }, - [IBEX_DEV_SPI] = { 0x40050000, 0x1000 }, + [IBEX_DEV_SPI_DEVICE] = { 0x40050000, 0x1000 }, [IBEX_DEV_I2C] = { 0x40080000, 0x1000 }, [IBEX_DEV_PATTGEN] = { 0x400e0000, 0x1000 }, [IBEX_DEV_TIMER] = { 0x40100000, 0x1000 }, [IBEX_DEV_SENSOR_CTRL] = { 0x40110000, 0x1000 }, [IBEX_DEV_OTP_CTRL] = { 0x40130000, 0x4000 }, + [IBEX_DEV_LC_CTRL] = { 0x40140000, 0x1000 }, + [IBEX_DEV_USBDEV] = { 0x40150000, 0x1000 }, + [IBEX_DEV_SPI_HOST0] = { 0x40300000, 0x1000 }, + [IBEX_DEV_SPI_HOST1] = { 0x40310000, 0x1000 }, [IBEX_DEV_PWRMGR] = { 0x40400000, 0x1000 }, [IBEX_DEV_RSTMGR] = { 0x40410000, 0x1000 }, [IBEX_DEV_CLKMGR] = { 0x40420000, 0x1000 }, [IBEX_DEV_PINMUX] = { 0x40460000, 0x1000 }, [IBEX_DEV_PADCTRL] = { 0x40470000, 0x1000 }, - [IBEX_DEV_USBDEV] = { 0x40500000, 0x1000 }, [IBEX_DEV_FLASH_CTRL] = { 0x41000000, 0x1000 }, - [IBEX_DEV_PLIC] = { 0x41010000, 0x1000 }, [IBEX_DEV_AES] = { 0x41100000, 0x1000 }, [IBEX_DEV_HMAC] = { 0x41110000, 0x1000 }, [IBEX_DEV_KMAC] = { 0x41120000, 0x1000 }, - [IBEX_DEV_KEYMGR] = { 0x41130000, 0x1000 }, + [IBEX_DEV_OTBN] = { 0x41130000, 0x10000 }, + [IBEX_DEV_KEYMGR] = { 0x41140000, 0x1000 }, [IBEX_DEV_CSRNG] = { 0x41150000, 0x1000 }, [IBEX_DEV_ENTROPY] = { 0x41160000, 0x1000 }, [IBEX_DEV_EDNO] = { 0x41170000, 0x1000 }, [IBEX_DEV_EDN1] = { 0x41180000, 0x1000 }, [IBEX_DEV_ALERT_HANDLER] = { 0x411b0000, 0x1000 }, [IBEX_DEV_NMI_GEN] = { 0x411c0000, 0x1000 }, - [IBEX_DEV_OTBN] = { 0x411d0000, 0x10000 }, [IBEX_DEV_PERI] = { 0x411f0000, 0x10000 }, + [IBEX_DEV_PLIC] = { 0x48000000, 0x4005000 }, [IBEX_DEV_FLASH_VIRTUAL] = { 0x80000000, 0x80000 }, }; static void opentitan_board_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); const MemMapEntry *memmap = ibex_memmap; OpenTitanState *s = g_new0(OpenTitanState, 1); MemoryRegion *sys_mem = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_IBEX_SOC); - qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); - memory_region_init_ram(main_mem, NULL, "riscv.lowrisc.ibex.ram", - memmap[IBEX_DEV_RAM].size, &error_fatal); memory_region_add_subregion(sys_mem, - memmap[IBEX_DEV_RAM].base, main_mem); + memmap[IBEX_DEV_RAM].base, machine->ram); if (machine->firmware) { riscv_load_firmware(machine->firmware, memmap[IBEX_DEV_RAM].base, NULL); @@ -95,6 +104,8 @@ static void opentitan_machine_init(MachineClass *mc) mc->init = opentitan_board_init; mc->max_cpus = 1; mc->default_cpu_type = TYPE_RISCV_CPU_IBEX; + mc->default_ram_id = "riscv.lowrisc.ibex.ram"; + mc->default_ram_size = ibex_memmap[IBEX_DEV_RAM].size; } DEFINE_MACHINE("opentitan", opentitan_machine_init) @@ -105,26 +116,35 @@ static void lowrisc_ibex_soc_init(Object *obj) object_initialize_child(obj, "cpus", &s->cpus, TYPE_RISCV_HART_ARRAY); - object_initialize_child(obj, "plic", &s->plic, TYPE_IBEX_PLIC); + object_initialize_child(obj, "plic", &s->plic, TYPE_SIFIVE_PLIC); object_initialize_child(obj, "uart", &s->uart, TYPE_IBEX_UART); object_initialize_child(obj, "timer", &s->timer, TYPE_IBEX_TIMER); + + for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; i++) { + object_initialize_child(obj, "spi_host[*]", &s->spi_host[i], + TYPE_IBEX_SPI_HOST); + } } static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) { const MemMapEntry *memmap = ibex_memmap; + DeviceState *dev; + SysBusDevice *busdev; MachineState *ms = MACHINE(qdev_get_machine()); LowRISCIbexSoCState *s = RISCV_IBEX_SOC(dev_soc); MemoryRegion *sys_mem = get_system_memory(); + int i; object_property_set_str(OBJECT(&s->cpus), "cpu-type", ms->cpu_type, &error_abort); object_property_set_int(OBJECT(&s->cpus), "num-harts", ms->smp.cpus, &error_abort); - object_property_set_int(OBJECT(&s->cpus), "resetvec", 0x8080, &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_abort); + object_property_set_int(OBJECT(&s->cpus), "resetvec", s->resetvec, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_fatal); /* Boot ROM */ memory_region_init_rom(&s->rom, OBJECT(dev_soc), "riscv.lowrisc.ibex.rom", @@ -144,11 +164,30 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) &s->flash_alias); /* PLIC */ + qdev_prop_set_string(DEVICE(&s->plic), "hart-config", "M"); + qdev_prop_set_uint32(DEVICE(&s->plic), "hartid-base", 0); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-sources", 180); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-priorities", 3); + qdev_prop_set_uint32(DEVICE(&s->plic), "priority-base", 0x00); + qdev_prop_set_uint32(DEVICE(&s->plic), "pending-base", 0x1000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-base", 0x2000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-stride", 32); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-base", 0x200000); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-stride", 8); + qdev_prop_set_uint32(DEVICE(&s->plic), "aperture-size", memmap[IBEX_DEV_PLIC].size); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->plic), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->plic), 0, memmap[IBEX_DEV_PLIC].base); + for (i = 0; i < ms->smp.cpus; i++) { + CPUState *cpu = qemu_get_cpu(i); + + qdev_connect_gpio_out(DEVICE(&s->plic), ms->smp.cpus + i, + qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT)); + } + /* UART */ qdev_prop_set_chr(DEVICE(&(s->uart)), "chardev", serial_hd(0)); if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart), errp)) { @@ -175,11 +214,39 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 0, qdev_get_gpio_in(DEVICE(&s->plic), IBEX_TIMER_TIMEREXPIRED0_0)); + qdev_connect_gpio_out(DEVICE(&s->timer), 0, + qdev_get_gpio_in(DEVICE(qemu_get_cpu(0)), + IRQ_M_TIMER)); + + /* SPI-Hosts */ + for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) { + dev = DEVICE(&(s->spi_host[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, memmap[IBEX_DEV_SPI_HOST0 + i].base); + + switch (i) { + case OPENTITAN_SPI_HOST0: + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST0_ERR_IRQ)); + sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST0_SPI_EVENT_IRQ)); + break; + case OPENTITAN_SPI_HOST1: + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST1_ERR_IRQ)); + sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST1_SPI_EVENT_IRQ)); + break; + } + } create_unimplemented_device("riscv.lowrisc.ibex.gpio", memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size); - create_unimplemented_device("riscv.lowrisc.ibex.spi", - memmap[IBEX_DEV_SPI].base, memmap[IBEX_DEV_SPI].size); + create_unimplemented_device("riscv.lowrisc.ibex.spi_device", + memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size); create_unimplemented_device("riscv.lowrisc.ibex.i2c", memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size); create_unimplemented_device("riscv.lowrisc.ibex.pattgen", @@ -188,6 +255,8 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) memmap[IBEX_DEV_SENSOR_CTRL].base, memmap[IBEX_DEV_SENSOR_CTRL].size); create_unimplemented_device("riscv.lowrisc.ibex.otp_ctrl", memmap[IBEX_DEV_OTP_CTRL].base, memmap[IBEX_DEV_OTP_CTRL].size); + create_unimplemented_device("riscv.lowrisc.ibex.lc_ctrl", + memmap[IBEX_DEV_LC_CTRL].base, memmap[IBEX_DEV_LC_CTRL].size); create_unimplemented_device("riscv.lowrisc.ibex.pwrmgr", memmap[IBEX_DEV_PWRMGR].base, memmap[IBEX_DEV_PWRMGR].size); create_unimplemented_device("riscv.lowrisc.ibex.rstmgr", @@ -228,10 +297,16 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) memmap[IBEX_DEV_PERI].base, memmap[IBEX_DEV_PERI].size); } +static Property lowrisc_ibex_soc_props[] = { + DEFINE_PROP_UINT32("resetvec", LowRISCIbexSoCState, resetvec, 0x20000400), + DEFINE_PROP_END_OF_LIST() +}; + static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + device_class_set_props(dc, lowrisc_ibex_soc_props); dc->realize = lowrisc_ibex_soc_realize; /* Reason: Uses serial_hds in realize function, thus can't be used twice */ dc->user_creatable = false; diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index 18f70fadaa..e43cc9445c 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -21,7 +21,7 @@ #include "hw/riscv/shakti_c.h" #include "qapi/error.h" #include "hw/intc/sifive_plic.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" #include "sysemu/sysemu.h" #include "hw/qdev-properties.h" #include "exec/address-spaces.h" @@ -45,7 +45,6 @@ static void shakti_c_machine_state_init(MachineState *mstate) { ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); /* Allow only Shakti C CPU for this platform */ if (strcmp(mstate->cpu_type, TYPE_RISCV_CPU_SHAKTI_C) != 0) { @@ -59,18 +58,15 @@ static void shakti_c_machine_state_init(MachineState *mstate) qdev_realize(DEVICE(&sms->soc), NULL, &error_abort); /* register RAM */ - memory_region_init_ram(main_mem, NULL, "riscv.shakti.c.ram", - mstate->ram_size, &error_fatal); memory_region_add_subregion(system_memory, shakti_c_memmap[SHAKTI_C_RAM].base, - main_mem); + mstate->ram); /* ROM reset vector */ riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, shakti_c_memmap[SHAKTI_C_RAM].base, shakti_c_memmap[SHAKTI_C_ROM].base, - shakti_c_memmap[SHAKTI_C_ROM].size, 0, 0, - NULL); + shakti_c_memmap[SHAKTI_C_ROM].size, 0, 0); if (mstate->firmware) { riscv_load_firmware(mstate->firmware, shakti_c_memmap[SHAKTI_C_RAM].base, @@ -88,6 +84,7 @@ static void shakti_c_machine_class_init(ObjectClass *klass, void *data) mc->desc = "RISC-V Board compatible with Shakti SDK"; mc->init = shakti_c_machine_state_init; mc->default_cpu_type = TYPE_RISCV_CPU_SHAKTI_C; + mc->default_ram_id = "riscv.shakti.c.ram"; } static const TypeInfo shakti_c_machine_type_info = { @@ -106,13 +103,14 @@ type_init(shakti_c_machine_type_info_register) static void shakti_c_soc_state_realize(DeviceState *dev, Error **errp) { + MachineState *ms = MACHINE(qdev_get_machine()); ShaktiCSoCState *sss = RISCV_SHAKTI_SOC(dev); MemoryRegion *system_memory = get_system_memory(); sysbus_realize(SYS_BUS_DEVICE(&sss->cpus), &error_abort); sss->plic = sifive_plic_create(shakti_c_memmap[SHAKTI_C_PLIC].base, - (char *)SHAKTI_C_PLIC_HART_CONFIG, 0, + (char *)SHAKTI_C_PLIC_HART_CONFIG, ms->smp.cpus, 0, SHAKTI_C_PLIC_NUM_SOURCES, SHAKTI_C_PLIC_NUM_PRIORITIES, SHAKTI_C_PLIC_PRIORITY_BASE, @@ -123,10 +121,13 @@ static void shakti_c_soc_state_realize(DeviceState *dev, Error **errp) SHAKTI_C_PLIC_CONTEXT_STRIDE, shakti_c_memmap[SHAKTI_C_PLIC].size); - sifive_clint_create(shakti_c_memmap[SHAKTI_C_CLINT].base, - shakti_c_memmap[SHAKTI_C_CLINT].size, 0, 1, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, - SIFIVE_CLINT_TIMEBASE_FREQ, false); + riscv_aclint_swi_create(shakti_c_memmap[SHAKTI_C_CLINT].base, + 0, 1, false); + riscv_aclint_mtimer_create(shakti_c_memmap[SHAKTI_C_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, 1, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); qdev_prop_set_chr(DEVICE(&(sss->uart)), "chardev", serial_hd(0)); if (!sysbus_realize(SYS_BUS_DEVICE(&sss->uart), errp)) { @@ -146,6 +147,13 @@ static void shakti_c_soc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = shakti_c_soc_state_realize; + /* + * Reasons: + * - Creates CPUS in riscv_hart_realize(), and can create unintended + * CPUs + * - Uses serial_hds in realize function, thus can't be used twice + */ + dc->user_creatable = false; } static void shakti_c_soc_instance_init(Object *obj) diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c index ddc658c8d6..d65d2fd869 100644 --- a/hw/riscv/sifive_e.c +++ b/hw/riscv/sifive_e.c @@ -29,6 +29,7 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "hw/boards.h" @@ -41,11 +42,10 @@ #include "hw/riscv/sifive_e.h" #include "hw/riscv/boot.h" #include "hw/char/sifive_uart.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" #include "hw/intc/sifive_plic.h" #include "hw/misc/sifive_e_prci.h" #include "chardev/char.h" -#include "sysemu/arch_init.h" #include "sysemu/sysemu.h" static const MemMapEntry sifive_e_memmap[] = { @@ -72,22 +72,27 @@ static const MemMapEntry sifive_e_memmap[] = { static void sifive_e_machine_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); const MemMapEntry *memmap = sifive_e_memmap; SiFiveEState *s = RISCV_E_MACHINE(machine); MemoryRegion *sys_mem = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); int i; + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_E_SOC); - qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); /* Data Tightly Integrated Memory */ - memory_region_init_ram(main_mem, NULL, "riscv.sifive.e.ram", - memmap[SIFIVE_E_DEV_DTIM].size, &error_fatal); memory_region_add_subregion(sys_mem, - memmap[SIFIVE_E_DEV_DTIM].base, main_mem); + memmap[SIFIVE_E_DEV_DTIM].base, machine->ram); /* Mask ROM reset vector */ uint32_t reset_vec[4]; @@ -143,6 +148,8 @@ static void sifive_e_machine_class_init(ObjectClass *oc, void *data) mc->init = sifive_e_machine_init; mc->max_cpus = 1; mc->default_cpu_type = SIFIVE_E_CPU; + mc->default_ram_id = "riscv.sifive.e.ram"; + mc->default_ram_size = sifive_e_memmap[SIFIVE_E_DEV_DTIM].size; object_class_property_add_bool(oc, "revb", sifive_e_machine_get_revb, sifive_e_machine_set_revb); @@ -188,7 +195,7 @@ static void sifive_e_soc_realize(DeviceState *dev, Error **errp) object_property_set_str(OBJECT(&s->cpus), "cpu-type", ms->cpu_type, &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_fatal); /* Mask ROM */ memory_region_init_rom(&s->mask_rom, OBJECT(dev), "riscv.sifive.e.mrom", @@ -198,7 +205,7 @@ static void sifive_e_soc_realize(DeviceState *dev, Error **errp) /* MMIO */ s->plic = sifive_plic_create(memmap[SIFIVE_E_DEV_PLIC].base, - (char *)SIFIVE_E_PLIC_HART_CONFIG, 0, + (char *)SIFIVE_E_PLIC_HART_CONFIG, ms->smp.cpus, 0, SIFIVE_E_PLIC_NUM_SOURCES, SIFIVE_E_PLIC_NUM_PRIORITIES, SIFIVE_E_PLIC_PRIORITY_BASE, @@ -208,10 +215,13 @@ static void sifive_e_soc_realize(DeviceState *dev, Error **errp) SIFIVE_E_PLIC_CONTEXT_BASE, SIFIVE_E_PLIC_CONTEXT_STRIDE, memmap[SIFIVE_E_DEV_PLIC].size); - sifive_clint_create(memmap[SIFIVE_E_DEV_CLINT].base, - memmap[SIFIVE_E_DEV_CLINT].size, 0, ms->smp.cpus, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, - SIFIVE_CLINT_TIMEBASE_FREQ, false); + riscv_aclint_swi_create(memmap[SIFIVE_E_DEV_CLINT].base, + 0, ms->smp.cpus, false); + riscv_aclint_mtimer_create(memmap[SIFIVE_E_DEV_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); create_unimplemented_device("riscv.sifive.e.aon", memmap[SIFIVE_E_DEV_AON].base, memmap[SIFIVE_E_DEV_AON].size); sifive_e_prci_create(memmap[SIFIVE_E_DEV_PRCI].base); diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index 87bbd10b21..b139824aab 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -17,6 +17,7 @@ * 7) DMA (Direct Memory Access Controller) * 8) SPI0 connected to an SPI flash * 9) SPI2 connected to an SD card + * 10) PWM0 and PWM1 * * This board currently generates devicetree dynamically that indicates at least * two harts and up to five harts. @@ -45,17 +46,17 @@ #include "hw/char/serial.h" #include "hw/cpu/cluster.h" #include "hw/misc/unimp.h" +#include "hw/sd/sd.h" #include "hw/ssi/ssi.h" #include "target/riscv/cpu.h" #include "hw/riscv/riscv_hart.h" #include "hw/riscv/sifive_u.h" #include "hw/riscv/boot.h" #include "hw/char/sifive_uart.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" #include "hw/intc/sifive_plic.h" #include "chardev/char.h" #include "net/eth.h" -#include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "sysemu/runstate.h" #include "sysemu/sysemu.h" @@ -76,6 +77,8 @@ static const MemMapEntry sifive_u_memmap[] = { [SIFIVE_U_DEV_PRCI] = { 0x10000000, 0x1000 }, [SIFIVE_U_DEV_UART0] = { 0x10010000, 0x1000 }, [SIFIVE_U_DEV_UART1] = { 0x10011000, 0x1000 }, + [SIFIVE_U_DEV_PWM0] = { 0x10020000, 0x1000 }, + [SIFIVE_U_DEV_PWM1] = { 0x10021000, 0x1000 }, [SIFIVE_U_DEV_QSPI0] = { 0x10040000, 0x1000 }, [SIFIVE_U_DEV_QSPI2] = { 0x10050000, 0x1000 }, [SIFIVE_U_DEV_GPIO] = { 0x10060000, 0x1000 }, @@ -442,6 +445,38 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, qemu_fdt_setprop_cell(fdt, nodename, "reg", 0x0); g_free(nodename); + nodename = g_strdup_printf("/soc/pwm@%lx", + (long)memmap[SIFIVE_U_DEV_PWM0].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,pwm0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PWM0].base, + 0x0, memmap[SIFIVE_U_DEV_PWM0].size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_PWM0_IRQ0, SIFIVE_U_PWM0_IRQ1, + SIFIVE_U_PWM0_IRQ2, SIFIVE_U_PWM0_IRQ3); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "#pwm-cells", 0); + g_free(nodename); + + nodename = g_strdup_printf("/soc/pwm@%lx", + (long)memmap[SIFIVE_U_DEV_PWM1].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,pwm0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PWM1].base, + 0x0, memmap[SIFIVE_U_DEV_PWM1].size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_PWM1_IRQ0, SIFIVE_U_PWM1_IRQ1, + SIFIVE_U_PWM1_IRQ2, SIFIVE_U_PWM1_IRQ3); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "#pwm-cells", 0); + g_free(nodename); + nodename = g_strdup_printf("/soc/serial@%lx", (long)memmap[SIFIVE_U_DEV_UART1].base); qemu_fdt_add_subnode(fdt, nodename); @@ -476,7 +511,7 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, g_free(nodename); update_bootargs: - if (cmdline) { + if (cmdline && *cmdline) { qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); } } @@ -494,7 +529,6 @@ static void sifive_u_machine_init(MachineState *machine) const MemMapEntry *memmap = sifive_u_memmap; SiFiveUState *s = RISCV_U_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *flash0 = g_new(MemoryRegion, 1); target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base; target_ulong firmware_end_addr, kernel_start_addr; @@ -503,7 +537,8 @@ static void sifive_u_machine_init(MachineState *machine) uint32_t fdt_load_addr; uint64_t kernel_entry; DriveInfo *dinfo; - DeviceState *flash_dev, *sd_dev; + BlockBackend *blk; + DeviceState *flash_dev, *sd_dev, *card_dev; qemu_irq flash_cs, sd_cs; /* Initialize SoC */ @@ -512,13 +547,11 @@ static void sifive_u_machine_init(MachineState *machine) &error_abort); object_property_set_str(OBJECT(&s->soc), "cpu-type", machine->cpu_type, &error_abort); - qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); /* register RAM */ - memory_region_init_ram(main_mem, NULL, "riscv.sifive.u.ram", - machine->ram_size, &error_fatal); memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_DRAM].base, - main_mem); + machine->ram); /* register QSPI0 Flash */ memory_region_init_ram(flash0, NULL, "riscv.sifive.u.flash0", @@ -601,6 +634,9 @@ static void sifive_u_machine_init(MachineState *machine) start_addr_hi32 = (uint64_t)start_addr >> 32; } + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = s->fdt; + /* reset vector */ uint32_t reset_vec[12] = { s->msel, /* MSEL pin state */ @@ -639,7 +675,7 @@ static void sifive_u_machine_init(MachineState *machine) /* Connect an SPI flash to SPI0 */ flash_dev = qdev_new("is25wp256"); - dinfo = drive_get_next(IF_MTD); + dinfo = drive_get(IF_MTD, 0, 0); if (dinfo) { qdev_prop_set_drive_err(flash_dev, "drive", blk_by_legacy_dinfo(dinfo), @@ -655,6 +691,15 @@ static void sifive_u_machine_init(MachineState *machine) sd_cs = qdev_get_gpio_in_named(sd_dev, SSI_GPIO_CS, 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi2), 1, sd_cs); + + dinfo = drive_get(IF_SD, 0, 0); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + card_dev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card_dev, "drive", blk, &error_fatal); + qdev_prop_set_bit(card_dev, "spi", true); + qdev_realize_and_unref(card_dev, + qdev_get_child_bus(sd_dev, "sd-bus"), + &error_fatal); } static bool sifive_u_machine_get_start_in_flash(Object *obj, Error **errp) @@ -671,36 +716,20 @@ static void sifive_u_machine_set_start_in_flash(Object *obj, bool value, Error * s->start_in_flash = value; } -static void sifive_u_machine_get_uint32_prop(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - visit_type_uint32(v, name, (uint32_t *)opaque, errp); -} - -static void sifive_u_machine_set_uint32_prop(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - visit_type_uint32(v, name, (uint32_t *)opaque, errp); -} - static void sifive_u_machine_instance_init(Object *obj) { SiFiveUState *s = RISCV_U_MACHINE(obj); s->start_in_flash = false; s->msel = 0; - object_property_add(obj, "msel", "uint32", - sifive_u_machine_get_uint32_prop, - sifive_u_machine_set_uint32_prop, NULL, &s->msel); + object_property_add_uint32_ptr(obj, "msel", &s->msel, + OBJ_PROP_FLAG_READWRITE); object_property_set_description(obj, "msel", "Mode Select (MSEL[3:0]) pin state"); s->serial = OTP_SERIAL; - object_property_add(obj, "serial", "uint32", - sifive_u_machine_get_uint32_prop, - sifive_u_machine_set_uint32_prop, NULL, &s->serial); + object_property_add_uint32_ptr(obj, "serial", &s->serial, + OBJ_PROP_FLAG_READWRITE); object_property_set_description(obj, "serial", "Board serial number"); } @@ -714,6 +743,7 @@ static void sifive_u_machine_class_init(ObjectClass *oc, void *data) mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1; mc->default_cpu_type = SIFIVE_U_CPU; mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "riscv.sifive.u.ram"; object_class_property_add_bool(oc, "start-in-flash", sifive_u_machine_get_start_in_flash, @@ -766,6 +796,8 @@ static void sifive_u_soc_instance_init(Object *obj) object_initialize_child(obj, "pdma", &s->dma, TYPE_SIFIVE_PDMA); object_initialize_child(obj, "spi0", &s->spi0, TYPE_SIFIVE_SPI); object_initialize_child(obj, "spi2", &s->spi2, TYPE_SIFIVE_SPI); + object_initialize_child(obj, "pwm0", &s->pwm[0], TYPE_SIFIVE_PWM); + object_initialize_child(obj, "pwm1", &s->pwm[1], TYPE_SIFIVE_PWM); } static void sifive_u_soc_realize(DeviceState *dev, Error **errp) @@ -777,8 +809,7 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *mask_rom = g_new(MemoryRegion, 1); MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); char *plic_hart_config; - size_t plic_hart_config_len; - int i; + int i, j; NICInfo *nd = &nd_table[0]; qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1); @@ -786,8 +817,8 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) qdev_prop_set_string(DEVICE(&s->u_cpus), "cpu-type", s->cpu_type); qdev_prop_set_uint64(DEVICE(&s->u_cpus), "resetvec", 0x1004); - sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->u_cpus), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->u_cpus), &error_fatal); /* * The cluster must be realized after the RISC-V hart array container, * as the container's CPU object is only created on realize, and the @@ -818,22 +849,11 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) l2lim_mem); /* create PLIC hart topology configuration string */ - plic_hart_config_len = (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1) * - ms->smp.cpus; - plic_hart_config = g_malloc0(plic_hart_config_len); - for (i = 0; i < ms->smp.cpus; i++) { - if (i != 0) { - strncat(plic_hart_config, "," SIFIVE_U_PLIC_HART_CONFIG, - plic_hart_config_len); - } else { - strncat(plic_hart_config, "M", plic_hart_config_len); - } - plic_hart_config_len -= (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1); - } + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); /* MMIO */ s->plic = sifive_plic_create(memmap[SIFIVE_U_DEV_PLIC].base, - plic_hart_config, 0, + plic_hart_config, ms->smp.cpus, 0, SIFIVE_U_PLIC_NUM_SOURCES, SIFIVE_U_PLIC_NUM_PRIORITIES, SIFIVE_U_PLIC_PRIORITY_BASE, @@ -848,9 +868,12 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) serial_hd(0), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_UART0_IRQ)); sifive_uart_create(system_memory, memmap[SIFIVE_U_DEV_UART1].base, serial_hd(1), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_UART1_IRQ)); - sifive_clint_create(memmap[SIFIVE_U_DEV_CLINT].base, - memmap[SIFIVE_U_DEV_CLINT].size, 0, ms->smp.cpus, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, + riscv_aclint_swi_create(memmap[SIFIVE_U_DEV_CLINT].base, 0, + ms->smp.cpus, false); + riscv_aclint_mtimer_create(memmap[SIFIVE_U_DEV_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, CLINT_TIMEBASE_FREQ, false); if (!sysbus_realize(SYS_BUS_DEVICE(&s->prci), errp)) { @@ -905,6 +928,22 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem), 0, qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_GEM_IRQ)); + /* PWM */ + for (i = 0; i < 2; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pwm[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pwm[i]), 0, + memmap[SIFIVE_U_DEV_PWM0].base + (0x1000 * i)); + + /* Connect PWM interrupts to the PLIC */ + for (j = 0; j < SIFIVE_PWM_IRQS; j++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pwm[i]), j, + qdev_get_gpio_in(DEVICE(s->plic), + SIFIVE_U_PWM0_IRQ0 + (i * 4) + j)); + } + } + create_unimplemented_device("riscv.sifive.u.gem-mgmt", memmap[SIFIVE_U_DEV_GEM_MGMT].base, memmap[SIFIVE_U_DEV_GEM_MGMT].size); diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index fead77f0c4..1e1d752c00 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -35,14 +35,16 @@ #include "hw/riscv/boot.h" #include "hw/riscv/numa.h" #include "hw/char/riscv_htif.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" #include "chardev/char.h" -#include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "sysemu/sysemu.h" +#include + static const MemMapEntry spike_memmap[] = { [SPIKE_MROM] = { 0x1000, 0xf000 }, + [SPIKE_HTIF] = { 0x1000000, 0x1000 }, [SPIKE_CLINT] = { 0x2000000, 0x10000 }, [SPIKE_DRAM] = { 0x80000000, 0x0 }, }; @@ -76,6 +78,10 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, qemu_fdt_add_subnode(fdt, "/htif"); qemu_fdt_setprop_string(fdt, "/htif", "compatible", "ucb,htif0"); + if (!htif_uses_elf_symbols()) { + qemu_fdt_setprop_cells(fdt, "/htif", "reg", + 0x0, memmap[SPIKE_HTIF].base, 0x0, memmap[SPIKE_HTIF].size); + } qemu_fdt_add_subnode(fdt, "/soc"); qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); @@ -85,7 +91,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, qemu_fdt_add_subnode(fdt, "/cpus"); qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency", - SIFIVE_CLINT_TIMEBASE_FREQ); + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); qemu_fdt_add_subnode(fdt, "/cpus/cpu-map"); @@ -170,8 +176,10 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, riscv_socket_fdt_write_distance_matrix(mc, fdt); - if (cmdline) { - qemu_fdt_add_subnode(fdt, "/chosen"); + qemu_fdt_add_subnode(fdt, "/chosen"); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", "/htif"); + + if (cmdline && *cmdline) { qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); } } @@ -181,7 +189,6 @@ static void spike_board_init(MachineState *machine) const MemMapEntry *memmap = spike_memmap; SpikeState *s = SPIKE_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); target_ulong firmware_end_addr, kernel_start_addr; uint32_t fdt_load_addr; @@ -225,25 +232,23 @@ static void spike_board_init(MachineState *machine) base_hartid, &error_abort); object_property_set_int(OBJECT(&s->soc[i]), "num-harts", hart_count, &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal); /* Core Local Interruptor (timer and IPI) for each socket */ - sifive_clint_create( + riscv_aclint_swi_create( memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size, - memmap[SPIKE_CLINT].size, base_hartid, hart_count, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, - SIFIVE_CLINT_TIMEBASE_FREQ, false); + base_hartid, hart_count, false); + riscv_aclint_mtimer_create( + memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); } /* register system main memory (actual RAM) */ - memory_region_init_ram(main_mem, NULL, "riscv.spike.ram", - machine->ram_size, &error_fatal); memory_region_add_subregion(system_memory, memmap[SPIKE_DRAM].base, - main_mem); - - /* create device tree */ - create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, - riscv_is_32bit(&s->soc[0])); + machine->ram); /* boot rom */ memory_region_init_rom(mask_rom, NULL, "riscv.spike.mrom", @@ -258,14 +263,15 @@ static void spike_board_init(MachineState *machine) */ if (riscv_is_32bit(&s->soc[0])) { firmware_end_addr = riscv_find_and_load_firmware(machine, - RISCV32_BIOS_ELF, memmap[SPIKE_DRAM].base, + RISCV32_BIOS_BIN, memmap[SPIKE_DRAM].base, htif_symbol_callback); } else { firmware_end_addr = riscv_find_and_load_firmware(machine, - RISCV64_BIOS_ELF, memmap[SPIKE_DRAM].base, + RISCV64_BIOS_BIN, memmap[SPIKE_DRAM].base, htif_symbol_callback); } + /* Load kernel */ if (machine->kernel_filename) { kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], firmware_end_addr); @@ -273,17 +279,6 @@ static void spike_board_init(MachineState *machine) kernel_entry = riscv_load_kernel(machine->kernel_filename, kernel_start_addr, htif_symbol_callback); - - if (machine->initrd_filename) { - hwaddr start; - hwaddr end = riscv_load_initrd(machine->initrd_filename, - machine->ram_size, kernel_entry, - &start); - qemu_fdt_setprop_cell(s->fdt, "/chosen", - "linux,initrd-start", start); - qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end", - end); - } } else { /* * If dynamic firmware is used, it doesn't know where is the next mode @@ -292,18 +287,39 @@ static void spike_board_init(MachineState *machine) kernel_entry = 0; } + /* Create device tree */ + create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, + riscv_is_32bit(&s->soc[0])); + + /* Load initrd */ + if (machine->kernel_filename && machine->initrd_filename) { + hwaddr start; + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end", + end); + } + /* Compute the fdt load address in dram */ fdt_load_addr = riscv_load_fdt(memmap[SPIKE_DRAM].base, machine->ram_size, s->fdt); + + /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ + machine->fdt = s->fdt; + /* load the reset vector */ riscv_setup_rom_reset_vec(machine, &s->soc[0], memmap[SPIKE_DRAM].base, memmap[SPIKE_MROM].base, memmap[SPIKE_MROM].size, kernel_entry, - fdt_load_addr, s->fdt); + fdt_load_addr); /* initialize HTIF using symbols found in load_kernel */ htif_mm_init(system_memory, mask_rom, - &s->soc[0].harts[0].env, serial_hd(0)); + &s->soc[0].harts[0].env, serial_hd(0), + memmap[SPIKE_HTIF].base); } static void spike_machine_instance_init(Object *obj) @@ -323,6 +339,7 @@ static void spike_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; + mc->default_ram_id = "riscv.spike.ram"; } static const TypeInfo spike_machine_typeinfo = { diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 4a3cd2599a..a5bc7353b4 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "qapi/error.h" #include "hw/boards.h" #include "hw/loader.h" @@ -28,36 +29,70 @@ #include "hw/qdev-properties.h" #include "hw/char/serial.h" #include "target/riscv/cpu.h" +#include "hw/core/sysbus-fdt.h" +#include "target/riscv/pmu.h" #include "hw/riscv/riscv_hart.h" #include "hw/riscv/virt.h" #include "hw/riscv/boot.h" #include "hw/riscv/numa.h" -#include "hw/intc/sifive_clint.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/riscv_aplic.h" +#include "hw/intc/riscv_imsic.h" #include "hw/intc/sifive_plic.h" #include "hw/misc/sifive_test.h" +#include "hw/platform-bus.h" #include "chardev/char.h" -#include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/tpm.h" #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" #include "hw/display/ramfb.h" +/* + * The virt machine physical address space used by some of the devices + * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets, + * number of CPUs, and number of IMSIC guest files. + * + * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS, + * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization + * of virt machine physical address space. + */ + +#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT) +#if VIRT_IMSIC_GROUP_MAX_SIZE < \ + IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS) +#error "Can't accomodate single IMSIC group in address space" +#endif + +#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \ + VIRT_IMSIC_GROUP_MAX_SIZE) +#if 0x4000000 < VIRT_IMSIC_MAX_SIZE +#error "Can't accomodate all IMSIC groups in address space" +#endif + static const MemMapEntry virt_memmap[] = { - [VIRT_DEBUG] = { 0x0, 0x100 }, - [VIRT_MROM] = { 0x1000, 0xf000 }, - [VIRT_TEST] = { 0x100000, 0x1000 }, - [VIRT_RTC] = { 0x101000, 0x1000 }, - [VIRT_CLINT] = { 0x2000000, 0x10000 }, - [VIRT_PCIE_PIO] = { 0x3000000, 0x10000 }, - [VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) }, - [VIRT_UART0] = { 0x10000000, 0x100 }, - [VIRT_VIRTIO] = { 0x10001000, 0x1000 }, - [VIRT_FW_CFG] = { 0x10100000, 0x18 }, - [VIRT_FLASH] = { 0x20000000, 0x4000000 }, - [VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 }, - [VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 }, - [VIRT_DRAM] = { 0x80000000, 0x0 }, + [VIRT_DEBUG] = { 0x0, 0x100 }, + [VIRT_MROM] = { 0x1000, 0xf000 }, + [VIRT_TEST] = { 0x100000, 0x1000 }, + [VIRT_RTC] = { 0x101000, 0x1000 }, + [VIRT_CLINT] = { 0x2000000, 0x10000 }, + [VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 }, + [VIRT_PCIE_PIO] = { 0x3000000, 0x10000 }, + [VIRT_PLATFORM_BUS] = { 0x4000000, 0x2000000 }, + [VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) }, + [VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) }, + [VIRT_APLIC_S] = { 0xd000000, APLIC_SIZE(VIRT_CPUS_MAX) }, + [VIRT_UART0] = { 0x10000000, 0x100 }, + [VIRT_VIRTIO] = { 0x10001000, 0x1000 }, + [VIRT_FW_CFG] = { 0x10100000, 0x18 }, + [VIRT_FLASH] = { 0x20000000, 0x4000000 }, + [VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE }, + [VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE }, + [VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 }, + [VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 }, + [VIRT_DRAM] = { 0x80000000, 0x0 }, }; /* PCIe high mmio is fixed for RV32 */ @@ -132,12 +167,13 @@ static void virt_flash_map(RISCVVirtState *s, sysmem); } -static void create_pcie_irq_map(void *fdt, char *nodename, - uint32_t plic_phandle) +static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, + uint32_t irqchip_phandle) { int pin, dev; - uint32_t - full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {}; + uint32_t irq_map_stride = 0; + uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * + FDT_MAX_INT_MAP_WIDTH] = {}; uint32_t *irq_map = full_irq_map; /* This code creates a standard swizzle of interrupts such that @@ -155,236 +191,681 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); int i = 0; + /* Fill PCI address cells */ irq_map[i] = cpu_to_be32(devfn << 8); - i += FDT_PCI_ADDR_CELLS; + + /* Fill PCI Interrupt cells */ irq_map[i] = cpu_to_be32(pin + 1); - i += FDT_PCI_INT_CELLS; - irq_map[i++] = cpu_to_be32(plic_phandle); - i += FDT_PLIC_ADDR_CELLS; - irq_map[i] = cpu_to_be32(irq_nr); + /* Fill interrupt controller phandle and cells */ + irq_map[i++] = cpu_to_be32(irqchip_phandle); + irq_map[i++] = cpu_to_be32(irq_nr); + if (s->aia_type != VIRT_AIA_TYPE_NONE) { + irq_map[i++] = cpu_to_be32(0x4); + } - irq_map += FDT_INT_MAP_WIDTH; + if (!irq_map_stride) { + irq_map_stride = i; + } + irq_map += irq_map_stride; } } - qemu_fdt_setprop(fdt, nodename, "interrupt-map", - full_irq_map, sizeof(full_irq_map)); + qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, + GPEX_NUM_IRQS * GPEX_NUM_IRQS * + irq_map_stride * sizeof(uint32_t)); qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", 0x1800, 0, 0, 0x7); } -static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap, - uint64_t mem_size, const char *cmdline, bool is_32_bit) +static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, + char *clust_name, uint32_t *phandle, + bool is_32_bit, uint32_t *intc_phandles) { - void *fdt; - int i, cpu, socket; + int cpu; + uint32_t cpu_phandle; MachineState *mc = MACHINE(s); + char *name, *cpu_name, *core_name, *intc_name; + + for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { + cpu_phandle = (*phandle)++; + + cpu_name = g_strdup_printf("/cpus/cpu@%d", + s->soc[socket].hartid_base + cpu); + qemu_fdt_add_subnode(mc->fdt, cpu_name); + if (riscv_feature(&s->soc[socket].harts[cpu].env, + RISCV_FEATURE_MMU)) { + qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", + (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); + } else { + qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", + "riscv,none"); + } + name = riscv_isa_string(&s->soc[socket].harts[cpu]); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "riscv,isa", name); + g_free(name); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "compatible", "riscv"); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "status", "okay"); + qemu_fdt_setprop_cell(mc->fdt, cpu_name, "reg", + s->soc[socket].hartid_base + cpu); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "device_type", "cpu"); + riscv_socket_fdt_write_id(mc, mc->fdt, cpu_name, socket); + qemu_fdt_setprop_cell(mc->fdt, cpu_name, "phandle", cpu_phandle); + + intc_phandles[cpu] = (*phandle)++; + + intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name); + qemu_fdt_add_subnode(mc->fdt, intc_name); + qemu_fdt_setprop_cell(mc->fdt, intc_name, "phandle", + intc_phandles[cpu]); + qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible", + "riscv,cpu-intc"); + qemu_fdt_setprop(mc->fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, intc_name, "#interrupt-cells", 1); + + core_name = g_strdup_printf("%s/core%d", clust_name, cpu); + qemu_fdt_add_subnode(mc->fdt, core_name); + qemu_fdt_setprop_cell(mc->fdt, core_name, "cpu", cpu_phandle); + + g_free(core_name); + g_free(intc_name); + g_free(cpu_name); + } +} + +static void create_fdt_socket_memory(RISCVVirtState *s, + const MemMapEntry *memmap, int socket) +{ + char *mem_name; uint64_t addr, size; - uint32_t *clint_cells, *plic_cells; - unsigned long clint_addr, plic_addr; - uint32_t plic_phandle[MAX_NODES]; - uint32_t cpu_phandle, intc_phandle, test_phandle; - uint32_t phandle = 1, plic_mmio_phandle = 1; - uint32_t plic_pcie_phandle = 1, plic_virtio_phandle = 1; - char *mem_name, *cpu_name, *core_name, *intc_name; - char *name, *clint_name, *plic_name, *clust_name; - hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; - hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + MachineState *mc = MACHINE(s); + + addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(mc, socket); + size = riscv_socket_mem_size(mc, socket); + mem_name = g_strdup_printf("/memory@%lx", (long)addr); + qemu_fdt_add_subnode(mc->fdt, mem_name); + qemu_fdt_setprop_cells(mc->fdt, mem_name, "reg", + addr >> 32, addr, size >> 32, size); + qemu_fdt_setprop_string(mc->fdt, mem_name, "device_type", "memory"); + riscv_socket_fdt_write_id(mc, mc->fdt, mem_name, socket); + g_free(mem_name); +} + +static void create_fdt_socket_clint(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *intc_phandles) +{ + int cpu; + char *clint_name; + uint32_t *clint_cells; + unsigned long clint_addr; + MachineState *mc = MACHINE(s); static const char * const clint_compat[2] = { "sifive,clint0", "riscv,clint0" }; + + clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT); + clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); + } + + clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); + clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr); + qemu_fdt_add_subnode(mc->fdt, clint_name); + qemu_fdt_setprop_string_array(mc->fdt, clint_name, "compatible", + (char **)&clint_compat, + ARRAY_SIZE(clint_compat)); + qemu_fdt_setprop_cells(mc->fdt, clint_name, "reg", + 0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size); + qemu_fdt_setprop(mc->fdt, clint_name, "interrupts-extended", + clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + riscv_socket_fdt_write_id(mc, mc->fdt, clint_name, socket); + g_free(clint_name); + + g_free(clint_cells); +} + +static void create_fdt_socket_aclint(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *intc_phandles) +{ + int cpu; + char *name; + unsigned long addr, size; + uint32_t aclint_cells_size; + uint32_t *aclint_mswi_cells; + uint32_t *aclint_sswi_cells; + uint32_t *aclint_mtimer_cells; + MachineState *mc = MACHINE(s); + + aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + aclint_mtimer_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + aclint_sswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + aclint_mswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_mswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_SOFT); + aclint_mtimer_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_mtimer_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_TIMER); + aclint_sswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_sswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_SOFT); + } + aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2; + + if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) { + addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); + name = g_strdup_printf("/soc/mswi@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "riscv,aclint-mswi"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_mswi_cells, aclint_cells_size); + qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + } + + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + addr = memmap[VIRT_CLINT].base + + (RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket); + size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE; + } else { + addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE + + (memmap[VIRT_CLINT].size * socket); + size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE; + } + name = g_strdup_printf("/soc/mtimer@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "riscv,aclint-mtimer"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr + RISCV_ACLINT_DEFAULT_MTIME, + 0x0, size - RISCV_ACLINT_DEFAULT_MTIME, + 0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP, + 0x0, RISCV_ACLINT_DEFAULT_MTIME); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_mtimer_cells, aclint_cells_size); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + + if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) { + addr = memmap[VIRT_ACLINT_SSWI].base + + (memmap[VIRT_ACLINT_SSWI].size * socket); + name = g_strdup_printf("/soc/sswi@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "riscv,aclint-sswi"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_sswi_cells, aclint_cells_size); + qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + } + + g_free(aclint_mswi_cells); + g_free(aclint_mtimer_cells); + g_free(aclint_sswi_cells); +} + +static void create_fdt_socket_plic(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *phandle, uint32_t *intc_phandles, + uint32_t *plic_phandles) +{ + int cpu; + char *plic_name; + uint32_t *plic_cells; + unsigned long plic_addr; + MachineState *mc = MACHINE(s); static const char * const plic_compat[2] = { "sifive,plic-1.0.0", "riscv,plic0" }; - if (mc->dtb) { - fdt = mc->fdt = load_device_tree(mc->dtb, &s->fdt_size); - if (!fdt) { - error_report("load_device_tree() failed"); - exit(1); - } - goto update_bootargs; + if (kvm_enabled()) { + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); } else { - fdt = mc->fdt = create_device_tree(&s->fdt_size); - if (!fdt) { - error_report("create_device_tree() failed"); - exit(1); - } - } - - qemu_fdt_setprop_string(fdt, "/", "model", "riscv-virtio,qemu"); - qemu_fdt_setprop_string(fdt, "/", "compatible", "riscv-virtio"); - qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); - qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); - - qemu_fdt_add_subnode(fdt, "/soc"); - qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); - qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); - qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2); - qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2); - - qemu_fdt_add_subnode(fdt, "/cpus"); - qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency", - SIFIVE_CLINT_TIMEBASE_FREQ); - qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); - qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); - qemu_fdt_add_subnode(fdt, "/cpus/cpu-map"); - - for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) { - clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); - qemu_fdt_add_subnode(fdt, clust_name); - plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); - clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); - - for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { - cpu_phandle = phandle++; - - cpu_name = g_strdup_printf("/cpus/cpu@%d", - s->soc[socket].hartid_base + cpu); - qemu_fdt_add_subnode(fdt, cpu_name); - if (is_32_bit) { - qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv32"); - } else { - qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48"); - } - name = riscv_isa_string(&s->soc[socket].harts[cpu]); - qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name); - g_free(name); - qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv"); - qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay"); - qemu_fdt_setprop_cell(fdt, cpu_name, "reg", - s->soc[socket].hartid_base + cpu); - qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu"); - riscv_socket_fdt_write_id(mc, fdt, cpu_name, socket); - qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle); - - intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name); - qemu_fdt_add_subnode(fdt, intc_name); - intc_phandle = phandle++; - qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle); - qemu_fdt_setprop_string(fdt, intc_name, "compatible", - "riscv,cpu-intc"); - qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1); - - clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle); - clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT); - clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle); - clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); - - plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle); - plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); - plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle); - plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); - - core_name = g_strdup_printf("%s/core%d", clust_name, cpu); - qemu_fdt_add_subnode(fdt, core_name); - qemu_fdt_setprop_cell(fdt, core_name, "cpu", cpu_phandle); - - g_free(core_name); - g_free(intc_name); - g_free(cpu_name); - } - - addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(mc, socket); - size = riscv_socket_mem_size(mc, socket); - mem_name = g_strdup_printf("/memory@%lx", (long)addr); - qemu_fdt_add_subnode(fdt, mem_name); - qemu_fdt_setprop_cells(fdt, mem_name, "reg", - addr >> 32, addr, size >> 32, size); - qemu_fdt_setprop_string(fdt, mem_name, "device_type", "memory"); - riscv_socket_fdt_write_id(mc, fdt, mem_name, socket); - g_free(mem_name); - - clint_addr = memmap[VIRT_CLINT].base + - (memmap[VIRT_CLINT].size * socket); - clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr); - qemu_fdt_add_subnode(fdt, clint_name); - qemu_fdt_setprop_string_array(fdt, clint_name, "compatible", - (char **)&clint_compat, ARRAY_SIZE(clint_compat)); - qemu_fdt_setprop_cells(fdt, clint_name, "reg", - 0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size); - qemu_fdt_setprop(fdt, clint_name, "interrupts-extended", - clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); - riscv_socket_fdt_write_id(mc, fdt, clint_name, socket); - g_free(clint_name); - - plic_phandle[socket] = phandle++; - plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); - plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); - qemu_fdt_add_subnode(fdt, plic_name); - qemu_fdt_setprop_cell(fdt, plic_name, - "#address-cells", FDT_PLIC_ADDR_CELLS); - qemu_fdt_setprop_cell(fdt, plic_name, - "#interrupt-cells", FDT_PLIC_INT_CELLS); - qemu_fdt_setprop_string_array(fdt, plic_name, "compatible", - (char **)&plic_compat, ARRAY_SIZE(plic_compat)); - qemu_fdt_setprop(fdt, plic_name, "interrupt-controller", NULL, 0); - qemu_fdt_setprop(fdt, plic_name, "interrupts-extended", - plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); - qemu_fdt_setprop_cells(fdt, plic_name, "reg", - 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); - qemu_fdt_setprop_cell(fdt, plic_name, "riscv,ndev", VIRTIO_NDEV); - riscv_socket_fdt_write_id(mc, fdt, plic_name, socket); - qemu_fdt_setprop_cell(fdt, plic_name, "phandle", plic_phandle[socket]); - g_free(plic_name); - - g_free(clint_cells); - g_free(plic_cells); - g_free(clust_name); } + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + if (kvm_enabled()) { + plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); + } else { + plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); + plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); + } + } + + plic_phandles[socket] = (*phandle)++; + plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); + plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); + qemu_fdt_add_subnode(mc->fdt, plic_name); + qemu_fdt_setprop_cell(mc->fdt, plic_name, + "#interrupt-cells", FDT_PLIC_INT_CELLS); + qemu_fdt_setprop_cell(mc->fdt, plic_name, + "#address-cells", FDT_PLIC_ADDR_CELLS); + qemu_fdt_setprop_string_array(mc->fdt, plic_name, "compatible", + (char **)&plic_compat, + ARRAY_SIZE(plic_compat)); + qemu_fdt_setprop(mc->fdt, plic_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop(mc->fdt, plic_name, "interrupts-extended", + plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + qemu_fdt_setprop_cells(mc->fdt, plic_name, "reg", + 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); + qemu_fdt_setprop_cell(mc->fdt, plic_name, "riscv,ndev", VIRTIO_NDEV); + riscv_socket_fdt_write_id(mc, mc->fdt, plic_name, socket); + qemu_fdt_setprop_cell(mc->fdt, plic_name, "phandle", + plic_phandles[socket]); + + if (!socket) { + platform_bus_add_all_fdt_nodes(mc->fdt, plic_name, + memmap[VIRT_PLATFORM_BUS].base, + memmap[VIRT_PLATFORM_BUS].size, + VIRT_PLATFORM_BUS_IRQ); + } + + g_free(plic_name); + + g_free(plic_cells); +} + +static uint32_t imsic_num_bits(uint32_t count) +{ + uint32_t ret = 0; + + while (BIT(ret) < count) { + ret++; + } + + return ret; +} + +static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t *phandle, uint32_t *intc_phandles, + uint32_t *msi_m_phandle, uint32_t *msi_s_phandle) +{ + int cpu, socket; + char *imsic_name; + MachineState *mc = MACHINE(s); + uint32_t imsic_max_hart_per_socket, imsic_guest_bits; + uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size; + + *msi_m_phandle = (*phandle)++; + *msi_s_phandle = (*phandle)++; + imsic_cells = g_new0(uint32_t, mc->smp.cpus * 2); + imsic_regs = g_new0(uint32_t, riscv_socket_count(mc) * 4); + + /* M-level IMSIC node */ + for (cpu = 0; cpu < mc->smp.cpus; cpu++) { + imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT); + } + imsic_max_hart_per_socket = 0; + for (socket = 0; socket < riscv_socket_count(mc); socket++) { + imsic_addr = memmap[VIRT_IMSIC_M].base + + socket * VIRT_IMSIC_GROUP_MAX_SIZE; + imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts; + imsic_regs[socket * 4 + 0] = 0; + imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr); + imsic_regs[socket * 4 + 2] = 0; + imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size); + if (imsic_max_hart_per_socket < s->soc[socket].num_harts) { + imsic_max_hart_per_socket = s->soc[socket].num_harts; + } + } + imsic_name = g_strdup_printf("/soc/imsics@%lx", + (unsigned long)memmap[VIRT_IMSIC_M].base); + qemu_fdt_add_subnode(mc->fdt, imsic_name); + qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible", + "riscv,imsics"); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells", + FDT_IMSIC_INT_CELLS); + qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller", + NULL, 0); + qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller", + NULL, 0); + qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended", + imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2); + qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs, + riscv_socket_count(mc) * sizeof(uint32_t) * 4); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids", + VIRT_IRQCHIP_NUM_MSIS); + qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id", + VIRT_IRQCHIP_IPI_MSI); + if (riscv_socket_count(mc) > 1) { + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits", + imsic_num_bits(imsic_max_hart_per_socket)); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits", + imsic_num_bits(riscv_socket_count(mc))); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift", + IMSIC_MMIO_GROUP_MIN_SHIFT); + } + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_m_phandle); + + g_free(imsic_name); + + /* S-level IMSIC node */ + for (cpu = 0; cpu < mc->smp.cpus; cpu++) { + imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); + } + imsic_guest_bits = imsic_num_bits(s->aia_guests + 1); + imsic_max_hart_per_socket = 0; + for (socket = 0; socket < riscv_socket_count(mc); socket++) { + imsic_addr = memmap[VIRT_IMSIC_S].base + + socket * VIRT_IMSIC_GROUP_MAX_SIZE; + imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) * + s->soc[socket].num_harts; + imsic_regs[socket * 4 + 0] = 0; + imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr); + imsic_regs[socket * 4 + 2] = 0; + imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size); + if (imsic_max_hart_per_socket < s->soc[socket].num_harts) { + imsic_max_hart_per_socket = s->soc[socket].num_harts; + } + } + imsic_name = g_strdup_printf("/soc/imsics@%lx", + (unsigned long)memmap[VIRT_IMSIC_S].base); + qemu_fdt_add_subnode(mc->fdt, imsic_name); + qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible", + "riscv,imsics"); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells", + FDT_IMSIC_INT_CELLS); + qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller", + NULL, 0); + qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller", + NULL, 0); + qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended", + imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2); + qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs, + riscv_socket_count(mc) * sizeof(uint32_t) * 4); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids", + VIRT_IRQCHIP_NUM_MSIS); + qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id", + VIRT_IRQCHIP_IPI_MSI); + if (imsic_guest_bits) { + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,guest-index-bits", + imsic_guest_bits); + } + if (riscv_socket_count(mc) > 1) { + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits", + imsic_num_bits(imsic_max_hart_per_socket)); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits", + imsic_num_bits(riscv_socket_count(mc))); + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift", + IMSIC_MMIO_GROUP_MIN_SHIFT); + } + qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_s_phandle); + g_free(imsic_name); + + g_free(imsic_regs); + g_free(imsic_cells); +} + +static void create_fdt_socket_aplic(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t msi_m_phandle, + uint32_t msi_s_phandle, + uint32_t *phandle, + uint32_t *intc_phandles, + uint32_t *aplic_phandles) +{ + int cpu; + char *aplic_name; + uint32_t *aplic_cells; + unsigned long aplic_addr; + MachineState *mc = MACHINE(s); + uint32_t aplic_m_phandle, aplic_s_phandle; + + aplic_m_phandle = (*phandle)++; + aplic_s_phandle = (*phandle)++; + aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + /* M-level APLIC node */ + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT); + } + aplic_addr = memmap[VIRT_APLIC_M].base + + (memmap[VIRT_APLIC_M].size * socket); + aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr); + qemu_fdt_add_subnode(mc->fdt, aplic_name); + qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic"); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, + "#interrupt-cells", FDT_APLIC_INT_CELLS); + qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0); + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended", + aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2); + } else { + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent", + msi_m_phandle); + } + qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg", + 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources", + VIRT_IRQCHIP_NUM_SOURCES); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,children", + aplic_s_phandle); + qemu_fdt_setprop_cells(mc->fdt, aplic_name, "riscv,delegate", + aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES); + riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_m_phandle); + g_free(aplic_name); + + /* S-level APLIC node */ + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); + } + aplic_addr = memmap[VIRT_APLIC_S].base + + (memmap[VIRT_APLIC_S].size * socket); + aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr); + qemu_fdt_add_subnode(mc->fdt, aplic_name); + qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic"); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, + "#interrupt-cells", FDT_APLIC_INT_CELLS); + qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0); + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended", + aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2); + } else { + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent", + msi_s_phandle); + } + qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg", + 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources", + VIRT_IRQCHIP_NUM_SOURCES); + riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket); + qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_s_phandle); + + if (!socket) { + platform_bus_add_all_fdt_nodes(mc->fdt, aplic_name, + memmap[VIRT_PLATFORM_BUS].base, + memmap[VIRT_PLATFORM_BUS].size, + VIRT_PLATFORM_BUS_IRQ); + } + + g_free(aplic_name); + + g_free(aplic_cells); + aplic_phandles[socket] = aplic_s_phandle; +} + +static void create_fdt_pmu(RISCVVirtState *s) +{ + char *pmu_name; + MachineState *mc = MACHINE(s); + RISCVCPU hart = s->soc[0].harts[0]; + + pmu_name = g_strdup_printf("/soc/pmu"); + qemu_fdt_add_subnode(mc->fdt, pmu_name); + qemu_fdt_setprop_string(mc->fdt, pmu_name, "compatible", "riscv,pmu"); + riscv_pmu_generate_fdt_node(mc->fdt, hart.cfg.pmu_num, pmu_name); + + g_free(pmu_name); +} + +static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, + bool is_32_bit, uint32_t *phandle, + uint32_t *irq_mmio_phandle, + uint32_t *irq_pcie_phandle, + uint32_t *irq_virtio_phandle, + uint32_t *msi_pcie_phandle) +{ + char *clust_name; + int socket, phandle_pos; + MachineState *mc = MACHINE(s); + uint32_t msi_m_phandle = 0, msi_s_phandle = 0; + uint32_t *intc_phandles, xplic_phandles[MAX_NODES]; + + qemu_fdt_add_subnode(mc->fdt, "/cpus"); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "timebase-frequency", + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_add_subnode(mc->fdt, "/cpus/cpu-map"); + + intc_phandles = g_new0(uint32_t, mc->smp.cpus); + + phandle_pos = mc->smp.cpus; + for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) { + phandle_pos -= s->soc[socket].num_harts; + + clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); + qemu_fdt_add_subnode(mc->fdt, clust_name); + + create_fdt_socket_cpus(s, socket, clust_name, phandle, + is_32_bit, &intc_phandles[phandle_pos]); + + create_fdt_socket_memory(s, memmap, socket); + + g_free(clust_name); + + if (!kvm_enabled()) { + if (s->have_aclint) { + create_fdt_socket_aclint(s, memmap, socket, + &intc_phandles[phandle_pos]); + } else { + create_fdt_socket_clint(s, memmap, socket, + &intc_phandles[phandle_pos]); + } + } + } + + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + create_fdt_imsic(s, memmap, phandle, intc_phandles, + &msi_m_phandle, &msi_s_phandle); + *msi_pcie_phandle = msi_s_phandle; + } + + phandle_pos = mc->smp.cpus; + for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) { + phandle_pos -= s->soc[socket].num_harts; + + if (s->aia_type == VIRT_AIA_TYPE_NONE) { + create_fdt_socket_plic(s, memmap, socket, phandle, + &intc_phandles[phandle_pos], xplic_phandles); + } else { + create_fdt_socket_aplic(s, memmap, socket, + msi_m_phandle, msi_s_phandle, phandle, + &intc_phandles[phandle_pos], xplic_phandles); + } + } + + g_free(intc_phandles); + for (socket = 0; socket < riscv_socket_count(mc); socket++) { if (socket == 0) { - plic_mmio_phandle = plic_phandle[socket]; - plic_virtio_phandle = plic_phandle[socket]; - plic_pcie_phandle = plic_phandle[socket]; + *irq_mmio_phandle = xplic_phandles[socket]; + *irq_virtio_phandle = xplic_phandles[socket]; + *irq_pcie_phandle = xplic_phandles[socket]; } if (socket == 1) { - plic_virtio_phandle = plic_phandle[socket]; - plic_pcie_phandle = plic_phandle[socket]; + *irq_virtio_phandle = xplic_phandles[socket]; + *irq_pcie_phandle = xplic_phandles[socket]; } if (socket == 2) { - plic_pcie_phandle = plic_phandle[socket]; + *irq_pcie_phandle = xplic_phandles[socket]; } } - riscv_socket_fdt_write_distance_matrix(mc, fdt); + riscv_socket_fdt_write_distance_matrix(mc, mc->fdt); +} + +static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_virtio_phandle) +{ + int i; + char *name; + MachineState *mc = MACHINE(s); for (i = 0; i < VIRTIO_COUNT; i++) { name = g_strdup_printf("/soc/virtio_mmio@%lx", (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size)); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "compatible", "virtio,mmio"); - qemu_fdt_setprop_cells(fdt, name, "reg", + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "virtio,mmio"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, 0x0, memmap[VIRT_VIRTIO].size); - qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", - plic_virtio_phandle); - qemu_fdt_setprop_cell(fdt, name, "interrupts", VIRTIO_IRQ + i); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", + irq_virtio_phandle); + if (s->aia_type == VIRT_AIA_TYPE_NONE) { + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", + VIRTIO_IRQ + i); + } else { + qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", + VIRTIO_IRQ + i, 0x4); + } g_free(name); } +} + +static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_pcie_phandle, + uint32_t msi_pcie_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_cell(fdt, name, "#address-cells", FDT_PCI_ADDR_CELLS); - qemu_fdt_setprop_cell(fdt, name, "#interrupt-cells", FDT_PCI_INT_CELLS); - qemu_fdt_setprop_cell(fdt, name, "#size-cells", 0x2); - qemu_fdt_setprop_string(fdt, name, "compatible", "pci-host-ecam-generic"); - qemu_fdt_setprop_string(fdt, name, "device_type", "pci"); - qemu_fdt_setprop_cell(fdt, name, "linux,pci-domain", 0); - qemu_fdt_setprop_cells(fdt, name, "bus-range", 0, + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_cell(mc->fdt, name, "#address-cells", + FDT_PCI_ADDR_CELLS); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", + FDT_PCI_INT_CELLS); + qemu_fdt_setprop_cell(mc->fdt, name, "#size-cells", 0x2); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "pci-host-ecam-generic"); + qemu_fdt_setprop_string(mc->fdt, name, "device_type", "pci"); + qemu_fdt_setprop_cell(mc->fdt, name, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(mc->fdt, name, "bus-range", 0, memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1); - qemu_fdt_setprop(fdt, name, "dma-coherent", NULL, 0); - qemu_fdt_setprop_cells(fdt, name, "reg", 0, + qemu_fdt_setprop(mc->fdt, name, "dma-coherent", NULL, 0); + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + qemu_fdt_setprop_cell(mc->fdt, name, "msi-parent", msi_pcie_phandle); + } + qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0, memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size); - qemu_fdt_setprop_sized_cells(fdt, name, "ranges", + qemu_fdt_setprop_sized_cells(mc->fdt, name, "ranges", 1, FDT_PCI_RANGE_IOPORT, 2, 0, 2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size, 1, FDT_PCI_RANGE_MMIO, @@ -394,68 +875,106 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size); - create_pcie_irq_map(fdt, name, plic_pcie_phandle); + create_pcie_irq_map(s, mc->fdt, name, irq_pcie_phandle); g_free(name); +} - test_phandle = phandle++; +static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t *phandle) +{ + char *name; + uint32_t test_phandle; + MachineState *mc = MACHINE(s); + + test_phandle = (*phandle)++; name = g_strdup_printf("/soc/test@%lx", (long)memmap[VIRT_TEST].base); - qemu_fdt_add_subnode(fdt, name); + qemu_fdt_add_subnode(mc->fdt, name); { static const char * const compat[3] = { "sifive,test1", "sifive,test0", "syscon" }; - qemu_fdt_setprop_string_array(fdt, name, "compatible", (char **)&compat, - ARRAY_SIZE(compat)); + qemu_fdt_setprop_string_array(mc->fdt, name, "compatible", + (char **)&compat, ARRAY_SIZE(compat)); } - qemu_fdt_setprop_cells(fdt, name, "reg", - 0x0, memmap[VIRT_TEST].base, - 0x0, memmap[VIRT_TEST].size); - qemu_fdt_setprop_cell(fdt, name, "phandle", test_phandle); - test_phandle = qemu_fdt_get_phandle(fdt, name); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size); + qemu_fdt_setprop_cell(mc->fdt, name, "phandle", test_phandle); + test_phandle = qemu_fdt_get_phandle(mc->fdt, name); g_free(name); - name = g_strdup_printf("/soc/reboot"); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-reboot"); - qemu_fdt_setprop_cell(fdt, name, "regmap", test_phandle); - qemu_fdt_setprop_cell(fdt, name, "offset", 0x0); - qemu_fdt_setprop_cell(fdt, name, "value", FINISHER_RESET); + name = g_strdup_printf("/reboot"); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(mc->fdt, name, "regmap", test_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "offset", 0x0); + qemu_fdt_setprop_cell(mc->fdt, name, "value", FINISHER_RESET); g_free(name); - name = g_strdup_printf("/soc/poweroff"); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-poweroff"); - qemu_fdt_setprop_cell(fdt, name, "regmap", test_phandle); - qemu_fdt_setprop_cell(fdt, name, "offset", 0x0); - qemu_fdt_setprop_cell(fdt, name, "value", FINISHER_PASS); + name = g_strdup_printf("/poweroff"); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "syscon-poweroff"); + qemu_fdt_setprop_cell(mc->fdt, name, "regmap", test_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "offset", 0x0); + qemu_fdt_setprop_cell(mc->fdt, name, "value", FINISHER_PASS); g_free(name); +} - name = g_strdup_printf("/soc/uart@%lx", (long)memmap[VIRT_UART0].base); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "compatible", "ns16550a"); - qemu_fdt_setprop_cells(fdt, name, "reg", +static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_mmio_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); + + name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0x0, memmap[VIRT_UART0].base, 0x0, memmap[VIRT_UART0].size); - qemu_fdt_setprop_cell(fdt, name, "clock-frequency", 3686400); - qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", plic_mmio_phandle); - qemu_fdt_setprop_cell(fdt, name, "interrupts", UART0_IRQ); + qemu_fdt_setprop_cell(mc->fdt, name, "clock-frequency", 3686400); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", irq_mmio_phandle); + if (s->aia_type == VIRT_AIA_TYPE_NONE) { + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ); + } else { + qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", UART0_IRQ, 0x4); + } - qemu_fdt_add_subnode(fdt, "/chosen"); - qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", name); + qemu_fdt_add_subnode(mc->fdt, "/chosen"); + qemu_fdt_setprop_string(mc->fdt, "/chosen", "stdout-path", name); g_free(name); +} + +static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_mmio_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base); - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "compatible", "google,goldfish-rtc"); - qemu_fdt_setprop_cells(fdt, name, "reg", - 0x0, memmap[VIRT_RTC].base, - 0x0, memmap[VIRT_RTC].size); - qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", plic_mmio_phandle); - qemu_fdt_setprop_cell(fdt, name, "interrupts", RTC_IRQ); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "google,goldfish-rtc"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", + irq_mmio_phandle); + if (s->aia_type == VIRT_AIA_TYPE_NONE) { + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ); + } else { + qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", RTC_IRQ, 0x4); + } g_free(name); +} - name = g_strdup_printf("/soc/flash@%" PRIx64, flashbase); +static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) +{ + char *name; + MachineState *mc = MACHINE(s); + hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + + name = g_strdup_printf("/flash@%" PRIx64, flashbase); qemu_fdt_add_subnode(mc->fdt, name); qemu_fdt_setprop_string(mc->fdt, name, "compatible", "cfi-flash"); qemu_fdt_setprop_sized_cells(mc->fdt, name, "reg", @@ -463,11 +982,85 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap, 2, flashbase + flashsize, 2, flashsize); qemu_fdt_setprop_cell(mc->fdt, name, "bank-width", 4); g_free(name); +} + +static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap) +{ + char *nodename; + MachineState *mc = MACHINE(s); + hwaddr base = memmap[VIRT_FW_CFG].base; + hwaddr size = memmap[VIRT_FW_CFG].size; + + nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); + qemu_fdt_add_subnode(mc->fdt, nodename); + qemu_fdt_setprop_string(mc->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(mc->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop(mc->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); +} + +static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap, + uint64_t mem_size, const char *cmdline, bool is_32_bit) +{ + MachineState *mc = MACHINE(s); + uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1; + uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1; + uint8_t rng_seed[32]; + + if (mc->dtb) { + mc->fdt = load_device_tree(mc->dtb, &s->fdt_size); + if (!mc->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + goto update_bootargs; + } else { + mc->fdt = create_device_tree(&s->fdt_size); + if (!mc->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + } + + qemu_fdt_setprop_string(mc->fdt, "/", "model", "riscv-virtio,qemu"); + qemu_fdt_setprop_string(mc->fdt, "/", "compatible", "riscv-virtio"); + qemu_fdt_setprop_cell(mc->fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(mc->fdt, "/", "#address-cells", 0x2); + + qemu_fdt_add_subnode(mc->fdt, "/soc"); + qemu_fdt_setprop(mc->fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(mc->fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(mc->fdt, "/soc", "#size-cells", 0x2); + qemu_fdt_setprop_cell(mc->fdt, "/soc", "#address-cells", 0x2); + + create_fdt_sockets(s, memmap, is_32_bit, &phandle, + &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle, + &msi_pcie_phandle); + + create_fdt_virtio(s, memmap, irq_virtio_phandle); + + create_fdt_pcie(s, memmap, irq_pcie_phandle, msi_pcie_phandle); + + create_fdt_reset(s, memmap, &phandle); + + create_fdt_uart(s, memmap, irq_mmio_phandle); + + create_fdt_rtc(s, memmap, irq_mmio_phandle); + + create_fdt_flash(s, memmap); + create_fdt_fw_cfg(s, memmap); + create_fdt_pmu(s); update_bootargs: - if (cmdline) { - qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + if (cmdline && *cmdline) { + qemu_fdt_setprop_string(mc->fdt, "/chosen", "bootargs", cmdline); } + + /* Pass seed to RNG */ + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(mc->fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); } static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, @@ -476,7 +1069,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, hwaddr high_mmio_base, hwaddr high_mmio_size, hwaddr pio_base, - DeviceState *plic) + DeviceState *irqchip) { DeviceState *dev; MemoryRegion *ecam_alias, *ecam_reg; @@ -510,7 +1103,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); for (i = 0; i < GPEX_NUM_IRQS; i++) { - irq = qdev_get_gpio_in(plic, PCIE_IRQ + i); + irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i); sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i); @@ -522,106 +1115,30 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, static FWCfgState *create_fw_cfg(const MachineState *mc) { hwaddr base = virt_memmap[VIRT_FW_CFG].base; - hwaddr size = virt_memmap[VIRT_FW_CFG].size; FWCfgState *fw_cfg; - char *nodename; fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, &address_space_memory); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)mc->smp.cpus); - nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); - qemu_fdt_add_subnode(mc->fdt, nodename); - qemu_fdt_setprop_string(mc->fdt, nodename, - "compatible", "qemu,fw-cfg-mmio"); - qemu_fdt_setprop_sized_cells(mc->fdt, nodename, "reg", - 2, base, 2, size); - qemu_fdt_setprop(mc->fdt, nodename, "dma-coherent", NULL, 0); - g_free(nodename); return fw_cfg; } -static void virt_machine_init(MachineState *machine) +static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, + int base_hartid, int hart_count) { - const MemMapEntry *memmap = virt_memmap; - RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); - MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); - MemoryRegion *mask_rom = g_new(MemoryRegion, 1); - char *plic_hart_config, *soc_name; - size_t plic_hart_config_len; - target_ulong start_addr = memmap[VIRT_DRAM].base; - target_ulong firmware_end_addr, kernel_start_addr; - uint32_t fdt_load_addr; - uint64_t kernel_entry; - DeviceState *mmio_plic, *virtio_plic, *pcie_plic; - int i, j, base_hartid, hart_count; + DeviceState *ret; + char *plic_hart_config; - /* Check socket count limit */ - if (VIRT_SOCKETS_MAX < riscv_socket_count(machine)) { - error_report("number of sockets/nodes should be less than %d", - VIRT_SOCKETS_MAX); - exit(1); - } + /* Per-socket PLIC hart topology configuration string */ + plic_hart_config = riscv_plic_hart_config_string(hart_count); - /* Initialize sockets */ - mmio_plic = virtio_plic = pcie_plic = NULL; - for (i = 0; i < riscv_socket_count(machine); i++) { - if (!riscv_socket_check_hartids(machine, i)) { - error_report("discontinuous hartids in socket%d", i); - exit(1); - } - - base_hartid = riscv_socket_first_hartid(machine, i); - if (base_hartid < 0) { - error_report("can't find hartid base for socket%d", i); - exit(1); - } - - hart_count = riscv_socket_hart_count(machine, i); - if (hart_count < 0) { - error_report("can't find hart count for socket%d", i); - exit(1); - } - - soc_name = g_strdup_printf("soc%d", i); - object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], - TYPE_RISCV_HART_ARRAY); - g_free(soc_name); - object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", - machine->cpu_type, &error_abort); - object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", - base_hartid, &error_abort); - object_property_set_int(OBJECT(&s->soc[i]), "num-harts", - hart_count, &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort); - - /* Per-socket CLINT */ - sifive_clint_create( - memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, - memmap[VIRT_CLINT].size, base_hartid, hart_count, - SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, - SIFIVE_CLINT_TIMEBASE_FREQ, true); - - /* Per-socket PLIC hart topology configuration string */ - plic_hart_config_len = - (strlen(VIRT_PLIC_HART_CONFIG) + 1) * hart_count; - plic_hart_config = g_malloc0(plic_hart_config_len); - for (j = 0; j < hart_count; j++) { - if (j != 0) { - strncat(plic_hart_config, ",", plic_hart_config_len); - } - strncat(plic_hart_config, VIRT_PLIC_HART_CONFIG, - plic_hart_config_len); - plic_hart_config_len -= (strlen(VIRT_PLIC_HART_CONFIG) + 1); - } - - /* Per-socket PLIC */ - s->plic[i] = sifive_plic_create( - memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size, - plic_hart_config, base_hartid, - VIRT_PLIC_NUM_SOURCES, - VIRT_PLIC_NUM_PRIORITIES, + /* Per-socket PLIC */ + ret = sifive_plic_create( + memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size, + plic_hart_config, hart_count, base_hartid, + VIRT_IRQCHIP_NUM_SOURCES, + ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1), VIRT_PLIC_PRIORITY_BASE, VIRT_PLIC_PENDING_BASE, VIRT_PLIC_ENABLE_BASE, @@ -629,55 +1146,118 @@ static void virt_machine_init(MachineState *machine) VIRT_PLIC_CONTEXT_BASE, VIRT_PLIC_CONTEXT_STRIDE, memmap[VIRT_PLIC].size); - g_free(plic_hart_config); - /* Try to use different PLIC instance based device type */ - if (i == 0) { - mmio_plic = s->plic[i]; - virtio_plic = s->plic[i]; - pcie_plic = s->plic[i]; + g_free(plic_hart_config); + + return ret; +} + +static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests, + const MemMapEntry *memmap, int socket, + int base_hartid, int hart_count) +{ + int i; + hwaddr addr; + uint32_t guest_bits; + DeviceState *aplic_m; + bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false; + + if (msimode) { + /* Per-socket M-level IMSICs */ + addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE; + for (i = 0; i < hart_count; i++) { + riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0), + base_hartid + i, true, 1, + VIRT_IRQCHIP_NUM_MSIS); } - if (i == 1) { - virtio_plic = s->plic[i]; - pcie_plic = s->plic[i]; - } - if (i == 2) { - pcie_plic = s->plic[i]; + + /* Per-socket S-level IMSICs */ + guest_bits = imsic_num_bits(aia_guests + 1); + addr = memmap[VIRT_IMSIC_S].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE; + for (i = 0; i < hart_count; i++) { + riscv_imsic_create(addr + i * IMSIC_HART_SIZE(guest_bits), + base_hartid + i, false, 1 + aia_guests, + VIRT_IRQCHIP_NUM_MSIS); } } - if (riscv_is_32bit(&s->soc[0])) { -#if HOST_LONG_BITS == 64 - /* limit RAM size in a 32-bit system */ - if (machine->ram_size > 10 * GiB) { - machine->ram_size = 10 * GiB; - error_report("Limiting RAM size to 10 GiB"); - } -#endif - virt_high_pcie_memmap.base = VIRT32_HIGH_PCIE_MMIO_BASE; - virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE; - } else { - virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE; - virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size; - virt_high_pcie_memmap.base = - ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); + /* Per-socket M-level APLIC */ + aplic_m = riscv_aplic_create( + memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size, + memmap[VIRT_APLIC_M].size, + (msimode) ? 0 : base_hartid, + (msimode) ? 0 : hart_count, + VIRT_IRQCHIP_NUM_SOURCES, + VIRT_IRQCHIP_NUM_PRIO_BITS, + msimode, true, NULL); + + if (aplic_m) { + /* Per-socket S-level APLIC */ + riscv_aplic_create( + memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size, + memmap[VIRT_APLIC_S].size, + (msimode) ? 0 : base_hartid, + (msimode) ? 0 : hart_count, + VIRT_IRQCHIP_NUM_SOURCES, + VIRT_IRQCHIP_NUM_PRIO_BITS, + msimode, false, aplic_m); } - /* register system main memory (actual RAM) */ - memory_region_init_ram(main_mem, NULL, "riscv_virt_board.ram", - machine->ram_size, &error_fatal); - memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, - main_mem); + return aplic_m; +} - /* create device tree */ - create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, - riscv_is_32bit(&s->soc[0])); +static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip) +{ + DeviceState *dev; + SysBusDevice *sysbus; + const MemMapEntry *memmap = virt_memmap; + int i; + MemoryRegion *sysmem = get_system_memory(); - /* boot rom */ - memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom", - memmap[VIRT_MROM].size, &error_fatal); - memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base, - mask_rom); + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS); + qdev_prop_set_uint32(dev, "mmio_size", memmap[VIRT_PLATFORM_BUS].size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + s->platform_bus_dev = dev; + + sysbus = SYS_BUS_DEVICE(dev); + for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { + int irq = VIRT_PLATFORM_BUS_IRQ + i; + sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(irqchip, irq)); + } + + memory_region_add_subregion(sysmem, + memmap[VIRT_PLATFORM_BUS].base, + sysbus_mmio_get_region(sysbus, 0)); +} + +static void virt_machine_done(Notifier *notifier, void *data) +{ + RISCVVirtState *s = container_of(notifier, RISCVVirtState, + machine_done); + const MemMapEntry *memmap = virt_memmap; + MachineState *machine = MACHINE(s); + target_ulong start_addr = memmap[VIRT_DRAM].base; + target_ulong firmware_end_addr, kernel_start_addr; + uint32_t fdt_load_addr; + uint64_t kernel_entry; + + /* + * Only direct boot kernel is currently supported for KVM VM, + * so the "-bios" parameter is not supported when KVM is enabled. + */ + if (kvm_enabled()) { + if (machine->firmware) { + if (strcmp(machine->firmware, "none")) { + error_report("Machine mode firmware is not supported in " + "combination with KVM."); + exit(1); + } + } else { + machine->firmware = g_strdup("none"); + } + } if (riscv_is_32bit(&s->soc[0])) { firmware_end_addr = riscv_find_and_load_firmware(machine, @@ -687,7 +1267,30 @@ static void virt_machine_init(MachineState *machine) RISCV64_BIOS_BIN, start_addr, NULL); } - if (machine->kernel_filename) { + /* + * Init fw_cfg. Must be done before riscv_load_fdt, otherwise the device + * tree cannot be altered and we get FDT_ERR_NOSPACE. + */ + s->fw_cfg = create_fw_cfg(machine); + rom_set_fw(s->fw_cfg); + + if (drive_get(IF_PFLASH, 0, 1)) { + /* + * S-mode FW like EDK2 will be kept in second plash (unit 1). + * When both kernel, initrd and pflash options are provided in the + * command line, the kernel and initrd will be copied to the fw_cfg + * table and opensbi will jump to the flash address which is the + * entry point of S-mode FW. It is the job of the S-mode FW to load + * the kernel and initrd using fw_cfg table. + * + * If only pflash is given but not -kernel, then it is the job of + * of the S-mode firmware to locate and load the kernel. + * In either case, the next_addr for opensbi will be the flash address. + */ + riscv_setup_firmware_boot(machine); + kernel_entry = virt_memmap[VIRT_FLASH].base + + virt_memmap[VIRT_FLASH].size / 2; + } else if (machine->kernel_filename) { kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], firmware_end_addr); @@ -720,13 +1323,6 @@ static void virt_machine_init(MachineState *machine) start_addr = virt_memmap[VIRT_FLASH].base; } - /* - * Init fw_cfg. Must be done before riscv_load_fdt, otherwise the device - * tree cannot be altered and we get FDT_ERR_NOSPACE. - */ - s->fw_cfg = create_fw_cfg(machine); - rom_set_fw(s->fw_cfg); - /* Compute the fdt load address in dram */ fdt_load_addr = riscv_load_fdt(memmap[VIRT_DRAM].base, machine->ram_size, machine->fdt); @@ -734,7 +1330,159 @@ static void virt_machine_init(MachineState *machine) riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr, virt_memmap[VIRT_MROM].base, virt_memmap[VIRT_MROM].size, kernel_entry, - fdt_load_addr, machine->fdt); + fdt_load_addr); + + /* + * Only direct boot kernel is currently supported for KVM VM, + * So here setup kernel start address and fdt address. + * TODO:Support firmware loading and integrate to TCG start + */ + if (kvm_enabled()) { + riscv_setup_direct_kernel(kernel_entry, fdt_load_addr); + } +} + +static void virt_machine_init(MachineState *machine) +{ + const MemMapEntry *memmap = virt_memmap; + RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mask_rom = g_new(MemoryRegion, 1); + char *soc_name; + DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip; + int i, base_hartid, hart_count; + + /* Check socket count limit */ + if (VIRT_SOCKETS_MAX < riscv_socket_count(machine)) { + error_report("number of sockets/nodes should be less than %d", + VIRT_SOCKETS_MAX); + exit(1); + } + + /* Initialize sockets */ + mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL; + for (i = 0; i < riscv_socket_count(machine); i++) { + if (!riscv_socket_check_hartids(machine, i)) { + error_report("discontinuous hartids in socket%d", i); + exit(1); + } + + base_hartid = riscv_socket_first_hartid(machine, i); + if (base_hartid < 0) { + error_report("can't find hartid base for socket%d", i); + exit(1); + } + + hart_count = riscv_socket_hart_count(machine, i); + if (hart_count < 0) { + error_report("can't find hart count for socket%d", i); + exit(1); + } + + soc_name = g_strdup_printf("soc%d", i); + object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], + TYPE_RISCV_HART_ARRAY); + g_free(soc_name); + object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", + machine->cpu_type, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", + base_hartid, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "num-harts", + hart_count, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal); + + if (!kvm_enabled()) { + if (s->have_aclint) { + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* Per-socket ACLINT MTIMER */ + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, + base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, + RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); + } else { + /* Per-socket ACLINT MSWI, MTIMER, and SSWI */ + riscv_aclint_swi_create(memmap[VIRT_CLINT].base + + i * memmap[VIRT_CLINT].size, + base_hartid, hart_count, false); + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + i * memmap[VIRT_CLINT].size + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, + base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, + RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); + riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base + + i * memmap[VIRT_ACLINT_SSWI].size, + base_hartid, hart_count, true); + } + } else { + /* Per-socket SiFive CLINT */ + riscv_aclint_swi_create( + memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, + base_hartid, hart_count, false); + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); + } + } + + /* Per-socket interrupt controller */ + if (s->aia_type == VIRT_AIA_TYPE_NONE) { + s->irqchip[i] = virt_create_plic(memmap, i, + base_hartid, hart_count); + } else { + s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests, + memmap, i, base_hartid, + hart_count); + } + + /* Try to use different IRQCHIP instance based device type */ + if (i == 0) { + mmio_irqchip = s->irqchip[i]; + virtio_irqchip = s->irqchip[i]; + pcie_irqchip = s->irqchip[i]; + } + if (i == 1) { + virtio_irqchip = s->irqchip[i]; + pcie_irqchip = s->irqchip[i]; + } + if (i == 2) { + pcie_irqchip = s->irqchip[i]; + } + } + + if (riscv_is_32bit(&s->soc[0])) { +#if HOST_LONG_BITS == 64 + /* limit RAM size in a 32-bit system */ + if (machine->ram_size > 10 * GiB) { + machine->ram_size = 10 * GiB; + error_report("Limiting RAM size to 10 GiB"); + } +#endif + virt_high_pcie_memmap.base = VIRT32_HIGH_PCIE_MMIO_BASE; + virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE; + } else { + virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE; + virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size; + virt_high_pcie_memmap.base = + ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); + } + + /* register system main memory (actual RAM) */ + memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, + machine->ram); + + /* boot rom */ + memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom", + memmap[VIRT_MROM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base, + mask_rom); /* SiFive Test MMIO device */ sifive_test_create(memmap[VIRT_TEST].base); @@ -743,7 +1491,7 @@ static void virt_machine_init(MachineState *machine) for (i = 0; i < VIRTIO_COUNT; i++) { sysbus_create_simple("virtio-mmio", memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, - qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i)); + qdev_get_gpio_in(DEVICE(virtio_irqchip), VIRTIO_IRQ + i)); } gpex_pcie_init(system_memory, @@ -754,14 +1502,16 @@ static void virt_machine_init(MachineState *machine) virt_high_pcie_memmap.base, virt_high_pcie_memmap.size, memmap[VIRT_PCIE_PIO].base, - DEVICE(pcie_plic)); + DEVICE(pcie_irqchip)); + + create_platform_bus(s, DEVICE(mmio_irqchip)); serial_mm_init(system_memory, memmap[VIRT_UART0].base, - 0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193, + 0, qdev_get_gpio_in(DEVICE(mmio_irqchip), UART0_IRQ), 399193, serial_hd(0), DEVICE_LITTLE_ENDIAN); sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base, - qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ)); + qdev_get_gpio_in(DEVICE(mmio_irqchip), RTC_IRQ)); virt_flash_create(s); @@ -771,15 +1521,124 @@ static void virt_machine_init(MachineState *machine) drive_get(IF_PFLASH, 0, i)); } virt_flash_map(s, system_memory); + + /* create device tree */ + create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, + riscv_is_32bit(&s->soc[0])); + + s->machine_done.notify = virt_machine_done; + qemu_add_machine_init_done_notifier(&s->machine_done); } static void virt_machine_instance_init(Object *obj) { } +static char *virt_get_aia_guests(Object *obj, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + char val[32]; + + sprintf(val, "%d", s->aia_guests); + return g_strdup(val); +} + +static void virt_set_aia_guests(Object *obj, const char *val, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + s->aia_guests = atoi(val); + if (s->aia_guests < 0 || s->aia_guests > VIRT_IRQCHIP_MAX_GUESTS) { + error_setg(errp, "Invalid number of AIA IMSIC guests"); + error_append_hint(errp, "Valid values be between 0 and %d.\n", + VIRT_IRQCHIP_MAX_GUESTS); + } +} + +static char *virt_get_aia(Object *obj, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + const char *val; + + switch (s->aia_type) { + case VIRT_AIA_TYPE_APLIC: + val = "aplic"; + break; + case VIRT_AIA_TYPE_APLIC_IMSIC: + val = "aplic-imsic"; + break; + default: + val = "none"; + break; + }; + + return g_strdup(val); +} + +static void virt_set_aia(Object *obj, const char *val, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + if (!strcmp(val, "none")) { + s->aia_type = VIRT_AIA_TYPE_NONE; + } else if (!strcmp(val, "aplic")) { + s->aia_type = VIRT_AIA_TYPE_APLIC; + } else if (!strcmp(val, "aplic-imsic")) { + s->aia_type = VIRT_AIA_TYPE_APLIC_IMSIC; + } else { + error_setg(errp, "Invalid AIA interrupt controller type"); + error_append_hint(errp, "Valid values are none, aplic, and " + "aplic-imsic.\n"); + } +} + +static bool virt_get_aclint(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + + return s->have_aclint; +} + +static void virt_set_aclint(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + + s->have_aclint = value; +} + +static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (device_is_dynamic_sysbus(mc, dev)) { + return HOTPLUG_HANDLER(machine); + } + return NULL; +} + +static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(hotplug_dev); + + if (s->platform_bus_dev) { + MachineClass *mc = MACHINE_GET_CLASS(s); + + if (device_is_dynamic_sysbus(mc, dev)) { + platform_bus_link_device(PLATFORM_BUS_DEVICE(s->platform_bus_dev), + SYS_BUS_DEVICE(dev)); + } + } +} + static void virt_machine_class_init(ObjectClass *oc, void *data) { + char str[128]; MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); mc->desc = "RISC-V VirtIO board"; mc->init = virt_machine_init; @@ -790,8 +1649,36 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; + mc->default_ram_id = "riscv_virt_board.ram"; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = virt_machine_get_hotplug_handler; + + hc->plug = virt_machine_device_plug_cb; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); +#ifdef CONFIG_TPM + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); +#endif + + object_class_property_add_bool(oc, "aclint", virt_get_aclint, + virt_set_aclint); + object_class_property_set_description(oc, "aclint", + "Set on/off to enable/disable " + "emulating ACLINT devices"); + + object_class_property_add_str(oc, "aia", virt_get_aia, + virt_set_aia); + object_class_property_set_description(oc, "aia", + "Set type of AIA interrupt " + "conttoller. Valid values are " + "none, aplic, and aplic-imsic."); + + object_class_property_add_str(oc, "aia-guests", + virt_get_aia_guests, + virt_set_aia_guests); + sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value " + "should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS); + object_class_property_set_description(oc, "aia-guests", str); } static const TypeInfo virt_machine_typeinfo = { @@ -800,6 +1687,10 @@ static const TypeInfo virt_machine_typeinfo = { .class_init = virt_machine_class_init, .instance_init = virt_machine_instance_init, .instance_size = sizeof(RISCVVirtState), + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, }; static void virt_machine_init_register_types(void) diff --git a/hw/rtc/Kconfig b/hw/rtc/Kconfig index f06e133b8a..d0d8dda084 100644 --- a/hw/rtc/Kconfig +++ b/hw/rtc/Kconfig @@ -1,10 +1,12 @@ config DS1338 bool depends on I2C + default y if I2C_DEVICES config M41T80 bool depends on I2C + default y if I2C_DEVICES config M48T59 bool @@ -25,3 +27,6 @@ config SUN4V_RTC config GOLDFISH_RTC bool + +config LS7A_RTC + bool diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c index 5606a51d5c..7e493f0e79 100644 --- a/hw/rtc/allwinner-rtc.c +++ b/hw/rtc/allwinner-rtc.c @@ -23,9 +23,9 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" -#include "qemu-common.h" #include "hw/qdev-properties.h" #include "hw/rtc/allwinner-rtc.h" +#include "sysemu/rtc.h" #include "trace.h" /* RTC registers */ diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c index 3ca1183558..f6da7b666d 100644 --- a/hw/rtc/aspeed_rtc.c +++ b/hw/rtc/aspeed_rtc.c @@ -7,11 +7,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/rtc/aspeed_rtc.h" #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/timer.h" +#include "sysemu/rtc.h" #include "trace.h" diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c index bc5ce1a9f4..36d8121ddd 100644 --- a/hw/rtc/ds1338.c +++ b/hw/rtc/ds1338.c @@ -11,12 +11,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/i2c/i2c.h" #include "migration/vmstate.h" #include "qemu/bcd.h" #include "qemu/module.h" #include "qom/object.h" +#include "sysemu/rtc.h" /* Size of NVRAM including both the user-accessible area and the * secondary register area. diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c index 45c0a951c4..d1620c7a2a 100644 --- a/hw/rtc/exynos4210_rtc.c +++ b/hw/rtc/exynos4210_rtc.c @@ -26,7 +26,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/log.h" #include "qemu/module.h" #include "hw/sysbus.h" @@ -39,6 +38,7 @@ #include "hw/arm/exynos4210.h" #include "qom/object.h" +#include "sysemu/rtc.h" #define DEBUG_RTC 0 @@ -564,14 +564,14 @@ static void exynos4210_rtc_init(Object *obj) Exynos4210RTCState *s = EXYNOS4210_RTC(obj); SysBusDevice *dev = SYS_BUS_DEVICE(obj); - s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_DEFAULT); + s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(s->ptimer); ptimer_set_freq(s->ptimer, RTC_BASE_FREQ); exynos4210_rtc_update_freq(s, 0); ptimer_transaction_commit(s->ptimer); s->ptimer_1Hz = ptimer_init(exynos4210_rtc_1Hz_tick, - s, PTIMER_POLICY_DEFAULT); + s, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(s->ptimer_1Hz); ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ); ptimer_transaction_commit(s->ptimer_1Hz); diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c index e07ff0164e..19a56402a0 100644 --- a/hw/rtc/goldfish_rtc.c +++ b/hw/rtc/goldfish_rtc.c @@ -20,7 +20,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/rtc/goldfish_rtc.h" #include "migration/vmstate.h" #include "hw/irq.h" @@ -29,6 +28,7 @@ #include "qemu/bitops.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -216,14 +216,25 @@ static int goldfish_rtc_post_load(void *opaque, int version_id) return 0; } -static const MemoryRegionOps goldfish_rtc_ops = { - .read = goldfish_rtc_read, - .write = goldfish_rtc_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } +static const MemoryRegionOps goldfish_rtc_ops[2] = { + [false] = { + .read = goldfish_rtc_read, + .write = goldfish_rtc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } + }, + [true] = { + .read = goldfish_rtc_read, + .write = goldfish_rtc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } + }, }; static const VMStateDescription goldfish_rtc_vmstate = { @@ -265,7 +276,8 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp) SysBusDevice *dev = SYS_BUS_DEVICE(d); GoldfishRTCState *s = GOLDFISH_RTC(d); - memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_rtc_ops, s, + memory_region_init_io(&s->iomem, OBJECT(s), + &goldfish_rtc_ops[s->big_endian], s, "goldfish_rtc", 0x24); sysbus_init_mmio(dev, &s->iomem); @@ -274,10 +286,17 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp) s->timer = timer_new_ns(rtc_clock, goldfish_rtc_interrupt, s); } +static Property goldfish_rtc_properties[] = { + DEFINE_PROP_BOOL("big-endian", GoldfishRTCState, big_endian, + false), + DEFINE_PROP_END_OF_LIST(), +}; + static void goldfish_rtc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + device_class_set_props(dc, goldfish_rtc_properties); dc->realize = goldfish_rtc_realize; dc->reset = goldfish_rtc_reset; dc->vmsd = &goldfish_rtc_vmstate; diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c new file mode 100644 index 0000000000..1f9e38a735 --- /dev/null +++ b/hw/rtc/ls7a_rtc.c @@ -0,0 +1,488 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Loongarch LS7A Real Time Clock emulation + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "include/hw/register.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "hw/misc/unimp.h" +#include "sysemu/rtc.h" +#include "hw/registerfields.h" + +#define SYS_TOYTRIM 0x20 +#define SYS_TOYWRITE0 0x24 +#define SYS_TOYWRITE1 0x28 +#define SYS_TOYREAD0 0x2C +#define SYS_TOYREAD1 0x30 +#define SYS_TOYMATCH0 0x34 +#define SYS_TOYMATCH1 0x38 +#define SYS_TOYMATCH2 0x3C +#define SYS_RTCCTRL 0x40 +#define SYS_RTCTRIM 0x60 +#define SYS_RTCWRTIE0 0x64 +#define SYS_RTCREAD0 0x68 +#define SYS_RTCMATCH0 0x6C +#define SYS_RTCMATCH1 0x70 +#define SYS_RTCMATCH2 0x74 + +#define LS7A_RTC_FREQ 32768 +#define TIMER_NUMS 3 +/* + * Shift bits and filed mask + */ + +FIELD(TOY, MON, 26, 6) +FIELD(TOY, DAY, 21, 5) +FIELD(TOY, HOUR, 16, 5) +FIELD(TOY, MIN, 10, 6) +FIELD(TOY, SEC, 4, 6) +FIELD(TOY, MSEC, 0, 4) + +FIELD(TOY_MATCH, YEAR, 26, 6) +FIELD(TOY_MATCH, MON, 22, 4) +FIELD(TOY_MATCH, DAY, 17, 5) +FIELD(TOY_MATCH, HOUR, 12, 5) +FIELD(TOY_MATCH, MIN, 6, 6) +FIELD(TOY_MATCH, SEC, 0, 6) + +FIELD(RTC_CTRL, RTCEN, 13, 1) +FIELD(RTC_CTRL, TOYEN, 11, 1) +FIELD(RTC_CTRL, EO, 8, 1) + +#define TYPE_LS7A_RTC "ls7a_rtc" +OBJECT_DECLARE_SIMPLE_TYPE(LS7ARtcState, LS7A_RTC) + +struct LS7ARtcState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + /* + * Needed to preserve the tick_count across migration, even if the + * absolute value of the rtc_clock is different on the source and + * destination. + */ + int64_t offset_toy; + int64_t offset_rtc; + int64_t data; + int tidx; + uint32_t toymatch[3]; + uint32_t toytrim; + uint32_t cntrctl; + uint32_t rtctrim; + uint32_t rtccount; + uint32_t rtcmatch[3]; + QEMUTimer *toy_timer[TIMER_NUMS]; + QEMUTimer *rtc_timer[TIMER_NUMS]; + qemu_irq irq; +}; + +/* switch nanoseconds time to rtc ticks */ +static uint64_t ls7a_rtc_ticks(void) +{ + return qemu_clock_get_ns(rtc_clock) * LS7A_RTC_FREQ / NANOSECONDS_PER_SECOND; +} + +/* switch rtc ticks to nanoseconds */ +static uint64_t ticks_to_ns(uint64_t ticks) +{ + return ticks * NANOSECONDS_PER_SECOND / LS7A_RTC_FREQ; +} + +static bool toy_enabled(LS7ARtcState *s) +{ + return FIELD_EX32(s->cntrctl, RTC_CTRL, TOYEN) && + FIELD_EX32(s->cntrctl, RTC_CTRL, EO); +} + +static bool rtc_enabled(LS7ARtcState *s) +{ + return FIELD_EX32(s->cntrctl, RTC_CTRL, RTCEN) && + FIELD_EX32(s->cntrctl, RTC_CTRL, EO); +} + +/* parse struct tm to toy value */ +static uint64_t toy_time_to_val_mon(const struct tm *tm) +{ + uint64_t val = 0; + + val = FIELD_DP32(val, TOY, MON, tm->tm_mon + 1); + val = FIELD_DP32(val, TOY, DAY, tm->tm_mday); + val = FIELD_DP32(val, TOY, HOUR, tm->tm_hour); + val = FIELD_DP32(val, TOY, MIN, tm->tm_min); + val = FIELD_DP32(val, TOY, SEC, tm->tm_sec); + return val; +} + +static void toymatch_val_to_time(LS7ARtcState *s, uint64_t val, struct tm *tm) +{ + qemu_get_timedate(tm, s->offset_toy); + tm->tm_sec = FIELD_EX32(val, TOY_MATCH, SEC); + tm->tm_min = FIELD_EX32(val, TOY_MATCH, MIN); + tm->tm_hour = FIELD_EX32(val, TOY_MATCH, HOUR); + tm->tm_mday = FIELD_EX32(val, TOY_MATCH, DAY); + tm->tm_mon = FIELD_EX32(val, TOY_MATCH, MON) - 1; + tm->tm_year += (FIELD_EX32(val, TOY_MATCH, YEAR) - (tm->tm_year & 0x3f)); +} + +static void toymatch_write(LS7ARtcState *s, uint64_t val, int num) +{ + int64_t now, expire_time; + struct tm tm = {}; + + /* it do not support write when toy disabled */ + if (toy_enabled(s)) { + s->toymatch[num] = val; + /* calculate expire time */ + now = qemu_clock_get_ms(rtc_clock); + toymatch_val_to_time(s, val, &tm); + expire_time = now + (qemu_timedate_diff(&tm) - s->offset_toy) * 1000; + timer_mod(s->toy_timer[num], expire_time); + } +} + +static void rtcmatch_write(LS7ARtcState *s, uint64_t val, int num) +{ + uint64_t expire_ns; + + /* it do not support write when toy disabled */ + if (rtc_enabled(s)) { + s->rtcmatch[num] = val; + /* calculate expire time */ + expire_ns = ticks_to_ns(val) - ticks_to_ns(s->offset_rtc); + timer_mod_ns(s->rtc_timer[num], expire_ns); + } +} + +static void ls7a_toy_stop(LS7ARtcState *s) +{ + int i; + + /* delete timers, and when re-enabled, recalculate expire time */ + for (i = 0; i < TIMER_NUMS; i++) { + timer_del(s->toy_timer[i]); + } +} + +static void ls7a_rtc_stop(LS7ARtcState *s) +{ + int i; + + /* delete timers, and when re-enabled, recalculate expire time */ + for (i = 0; i < TIMER_NUMS; i++) { + timer_del(s->rtc_timer[i]); + } +} + +static void ls7a_toy_start(LS7ARtcState *s) +{ + int i; + uint64_t expire_time, now; + struct tm tm = {}; + + now = qemu_clock_get_ms(rtc_clock); + + /* recalculate expire time and enable timer */ + for (i = 0; i < TIMER_NUMS; i++) { + toymatch_val_to_time(s, s->toymatch[i], &tm); + expire_time = now + (qemu_timedate_diff(&tm) - s->offset_toy) * 1000; + timer_mod(s->toy_timer[i], expire_time); + } +} + +static void ls7a_rtc_start(LS7ARtcState *s) +{ + int i; + uint64_t expire_time; + + /* recalculate expire time and enable timer */ + for (i = 0; i < TIMER_NUMS; i++) { + expire_time = ticks_to_ns(s->rtcmatch[i]) - ticks_to_ns(s->offset_rtc); + timer_mod_ns(s->rtc_timer[i], expire_time); + } +} + +static uint64_t ls7a_rtc_read(void *opaque, hwaddr addr, unsigned size) +{ + LS7ARtcState *s = LS7A_RTC(opaque); + struct tm tm; + int val = 0; + + switch (addr) { + case SYS_TOYREAD0: + if (toy_enabled(s)) { + qemu_get_timedate(&tm, s->offset_toy); + val = toy_time_to_val_mon(&tm); + } else { + /* return 0 when toy disabled */ + val = 0; + } + break; + case SYS_TOYREAD1: + if (toy_enabled(s)) { + qemu_get_timedate(&tm, s->offset_toy); + val = tm.tm_year; + } else { + /* return 0 when toy disabled */ + val = 0; + } + break; + case SYS_TOYMATCH0: + val = s->toymatch[0]; + break; + case SYS_TOYMATCH1: + val = s->toymatch[1]; + break; + case SYS_TOYMATCH2: + val = s->toymatch[2]; + break; + case SYS_RTCCTRL: + val = s->cntrctl; + break; + case SYS_RTCREAD0: + if (rtc_enabled(s)) { + val = ls7a_rtc_ticks() + s->offset_rtc; + } else { + /* return 0 when rtc disabled */ + val = 0; + } + break; + case SYS_RTCMATCH0: + val = s->rtcmatch[0]; + break; + case SYS_RTCMATCH1: + val = s->rtcmatch[1]; + break; + case SYS_RTCMATCH2: + val = s->rtcmatch[2]; + break; + default: + val = 0; + break; + } + return val; +} + +static void ls7a_rtc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + int old_toyen, old_rtcen, new_toyen, new_rtcen; + LS7ARtcState *s = LS7A_RTC(opaque); + struct tm tm; + + switch (addr) { + case SYS_TOYWRITE0: + /* it do not support write when toy disabled */ + if (toy_enabled(s)) { + qemu_get_timedate(&tm, s->offset_toy); + tm.tm_sec = FIELD_EX32(val, TOY, SEC); + tm.tm_min = FIELD_EX32(val, TOY, MIN); + tm.tm_hour = FIELD_EX32(val, TOY, HOUR); + tm.tm_mday = FIELD_EX32(val, TOY, DAY); + tm.tm_mon = FIELD_EX32(val, TOY, MON) - 1; + s->offset_toy = qemu_timedate_diff(&tm); + } + break; + case SYS_TOYWRITE1: + if (toy_enabled(s)) { + qemu_get_timedate(&tm, s->offset_toy); + tm.tm_year = val; + s->offset_toy = qemu_timedate_diff(&tm); + } + break; + case SYS_TOYMATCH0: + toymatch_write(s, val, 0); + break; + case SYS_TOYMATCH1: + toymatch_write(s, val, 1); + break; + case SYS_TOYMATCH2: + toymatch_write(s, val, 2); + break; + case SYS_RTCCTRL: + /* get old ctrl */ + old_toyen = toy_enabled(s); + old_rtcen = rtc_enabled(s); + + s->cntrctl = val; + /* get new ctrl */ + new_toyen = toy_enabled(s); + new_rtcen = rtc_enabled(s); + + /* + * we do not consider if EO changed, as it always set at most time. + * toy or rtc enabled should start timer. otherwise, stop timer + */ + if (old_toyen != new_toyen) { + if (new_toyen) { + ls7a_toy_start(s); + } else { + ls7a_toy_stop(s); + } + } + if (old_rtcen != new_rtcen) { + if (new_rtcen) { + ls7a_rtc_start(s); + } else { + ls7a_rtc_stop(s); + } + } + break; + case SYS_RTCWRTIE0: + if (rtc_enabled(s)) { + s->offset_rtc = val - ls7a_rtc_ticks(); + } + break; + case SYS_RTCMATCH0: + rtcmatch_write(s, val, 0); + break; + case SYS_RTCMATCH1: + rtcmatch_write(s, val, 1); + break; + case SYS_RTCMATCH2: + rtcmatch_write(s, val, 2); + break; + default: + break; + } +} + +static const MemoryRegionOps ls7a_rtc_ops = { + .read = ls7a_rtc_read, + .write = ls7a_rtc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void toy_timer_cb(void *opaque) +{ + LS7ARtcState *s = opaque; + + if (toy_enabled(s)) { + qemu_irq_raise(s->irq); + } +} + +static void rtc_timer_cb(void *opaque) +{ + LS7ARtcState *s = opaque; + + if (rtc_enabled(s)) { + qemu_irq_raise(s->irq); + } +} + +static void ls7a_rtc_realize(DeviceState *dev, Error **errp) +{ + int i; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + LS7ARtcState *d = LS7A_RTC(sbd); + memory_region_init_io(&d->iomem, NULL, &ls7a_rtc_ops, + (void *)d, "ls7a_rtc", 0x100); + + sysbus_init_irq(sbd, &d->irq); + + sysbus_init_mmio(sbd, &d->iomem); + for (i = 0; i < TIMER_NUMS; i++) { + d->toymatch[i] = 0; + d->rtcmatch[i] = 0; + d->toy_timer[i] = timer_new_ms(rtc_clock, toy_timer_cb, d); + d->rtc_timer[i] = timer_new_ms(rtc_clock, rtc_timer_cb, d); + } + d->offset_toy = 0; + d->offset_rtc = 0; + +} + +/* delete timer and clear reg when reset */ +static void ls7a_rtc_reset(DeviceState *dev) +{ + int i; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + LS7ARtcState *d = LS7A_RTC(sbd); + for (i = 0; i < TIMER_NUMS; i++) { + if (toy_enabled(d)) { + timer_del(d->toy_timer[i]); + } + if (rtc_enabled(d)) { + timer_del(d->rtc_timer[i]); + } + d->toymatch[i] = 0; + d->rtcmatch[i] = 0; + } + d->cntrctl = 0; +} + +static int ls7a_rtc_pre_save(void *opaque) +{ + LS7ARtcState *s = LS7A_RTC(opaque); + + ls7a_toy_stop(s); + ls7a_rtc_stop(s); + + return 0; +} + +static int ls7a_rtc_post_load(void *opaque, int version_id) +{ + LS7ARtcState *s = LS7A_RTC(opaque); + if (toy_enabled(s)) { + ls7a_toy_start(s); + } + + if (rtc_enabled(s)) { + ls7a_rtc_start(s); + } + + return 0; +} + +static const VMStateDescription vmstate_ls7a_rtc = { + .name = "ls7a_rtc", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ls7a_rtc_pre_save, + .post_load = ls7a_rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT64(offset_toy, LS7ARtcState), + VMSTATE_INT64(offset_rtc, LS7ARtcState), + VMSTATE_UINT32_ARRAY(toymatch, LS7ARtcState, TIMER_NUMS), + VMSTATE_UINT32_ARRAY(rtcmatch, LS7ARtcState, TIMER_NUMS), + VMSTATE_UINT32(cntrctl, LS7ARtcState), + VMSTATE_END_OF_LIST() + } +}; + +static void ls7a_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->vmsd = &vmstate_ls7a_rtc; + dc->realize = ls7a_rtc_realize; + dc->reset = ls7a_rtc_reset; + dc->desc = "ls7a rtc"; +} + +static const TypeInfo ls7a_rtc_info = { + .name = TYPE_LS7A_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LS7ARtcState), + .class_init = ls7a_rtc_class_init, +}; + +static void ls7a_rtc_register_types(void) +{ + type_register_static(&ls7a_rtc_info); +} + +type_init(ls7a_rtc_register_types) diff --git a/hw/rtc/m41t80.c b/hw/rtc/m41t80.c index 396d110ba2..e045c864bb 100644 --- a/hw/rtc/m41t80.c +++ b/hw/rtc/m41t80.c @@ -8,13 +8,13 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/log.h" #include "qemu/module.h" #include "qemu/timer.h" #include "qemu/bcd.h" #include "hw/i2c/i2c.h" #include "qom/object.h" +#include "sysemu/rtc.h" #define TYPE_M41T80 "m41t80" OBJECT_DECLARE_SIMPLE_TYPE(M41t80State, M41T80) @@ -47,7 +47,7 @@ static uint8_t m41t80_recv(I2CSlave *i2c) { M41t80State *s = M41T80(i2c); struct tm now; - qemu_timeval tv; + int64_t rt; if (s->addr < 0) { s->addr = 0; @@ -57,8 +57,8 @@ static uint8_t m41t80_recv(I2CSlave *i2c) } switch (s->addr++) { case 0: - qemu_gettimeofday(&tv); - return to_bcd(tv.tv_usec / 10000); + rt = g_get_real_time(); + return to_bcd((rt % G_USEC_PER_SEC) / 10000); case 1: return to_bcd(now.tm_sec); case 2: diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c index dc21fb10a5..e61f7ec370 100644 --- a/hw/rtc/m48t59-isa.c +++ b/hw/rtc/m48t59-isa.c @@ -42,6 +42,7 @@ struct M48txxISAState { ISADevice parent_obj; M48t59State state; uint32_t io_base; + uint8_t isairq; MemoryRegion io; }; @@ -79,6 +80,7 @@ static void m48txx_isa_toggle_lock(Nvram *obj, int lock) static Property m48t59_isa_properties[] = { DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0), DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74), + DEFINE_PROP_UINT8("irq", M48txxISAState, isairq, 8), DEFINE_PROP_END_OF_LIST(), }; @@ -97,9 +99,14 @@ static void m48t59_isa_realize(DeviceState *dev, Error **errp) M48txxISAState *d = M48TXX_ISA(dev); M48t59State *s = &d->state; + if (d->isairq >= ISA_NUM_IRQS) { + error_setg(errp, "Maximum value for \"irq\" is: %u", ISA_NUM_IRQS - 1); + return; + } + s->model = u->info.model; s->size = u->info.size; - isa_init_irq(isadev, &s->IRQ, 8); + s->IRQ = isa_get_irq(isadev, d->isairq); m48t59_realize_common(s, errp); memory_region_init_io(&d->io, OBJECT(dev), &m48t59_io_ops, s, "m48t59", 4); if (d->io_base != 0) { diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c index 690f4e071a..74345d9d90 100644 --- a/hw/rtc/m48t59.c +++ b/hw/rtc/m48t59.c @@ -24,12 +24,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/rtc/m48t59.h" #include "qemu/timer.h" #include "sysemu/runstate.h" +#include "sysemu/rtc.h" #include "sysemu/sysemu.h" #include "hw/sysbus.h" #include "qapi/error.h" diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 090a87b425..af5607ad04 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -23,11 +23,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/bcd.h" -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" @@ -36,11 +35,12 @@ #include "sysemu/replay.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" +#include "sysemu/rtc.h" #include "hw/rtc/mc146818rtc.h" #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" #include "qapi/error.h" -#include "qapi/qapi-events-misc-target.h" +#include "qapi/qapi-events-misc.h" #include "qapi/visitor.h" #include "hw/rtc/mc146818rtc_regs.h" @@ -74,6 +74,8 @@ #define RTC_CLOCK_RATE 32768 #define UIP_HOLD_LENGTH (8 * NANOSECONDS_PER_SECOND / 32768) +#define RTC_ISA_BASE 0x70 + static void rtc_set_time(RTCState *s); static void rtc_update_time(RTCState *s); static void rtc_set_cmos(RTCState *s, const struct tm *tm); @@ -621,12 +623,13 @@ static void rtc_get_time(RTCState *s, struct tm *tm) static void rtc_set_time(RTCState *s) { struct tm tm; + g_autofree const char *qom_path = object_get_canonical_path(OBJECT(s)); rtc_get_time(s, &tm); s->base_rtc = mktimegm(&tm); s->last_update = qemu_clock_get_ns(rtc_clock); - qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + qapi_event_send_rtc_change(qemu_timedate_diff(&tm), qom_path); } static void rtc_set_cmos(RTCState *s, const struct tm *tm) @@ -929,6 +932,11 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) s->base_year = 0; } + if (s->isairq >= ISA_NUM_IRQS) { + error_setg(errp, "Maximum value for \"irq\" is: %u", ISA_NUM_IRQS - 1); + return; + } + rtc_set_date_from_host(isadev); switch (s->lost_tick_policy) { @@ -953,7 +961,7 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) qemu_register_suspend_notifier(&s->suspend_notifier); memory_region_init_io(&s->io, OBJECT(s), &cmos_ops, s, "rtc", 4); - isa_register_ioport(isadev, &s->io, RTC_ISA_BASE); + isa_register_ioport(isadev, &s->io, s->io_base); /* register rtc 0x70 port for coalesced_pio */ memory_region_set_flush_coalesced(&s->io); @@ -962,7 +970,7 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->io, 0, &s->coalesced_io); memory_region_add_coalescing(&s->coalesced_io, 0, 1); - qdev_set_legacy_instance_id(dev, RTC_ISA_BASE, 3); + qdev_set_legacy_instance_id(dev, s->io_base, 3); object_property_add_tm(OBJECT(s), "date", rtc_get_date); @@ -974,15 +982,17 @@ ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) { DeviceState *dev; ISADevice *isadev; + RTCState *s; isadev = isa_new(TYPE_MC146818_RTC); dev = DEVICE(isadev); + s = MC146818_RTC(isadev); qdev_prop_set_int32(dev, "base_year", base_year); isa_realize_and_unref(isadev, bus, &error_fatal); if (intercept_irq) { qdev_connect_gpio_out(dev, 0, intercept_irq); } else { - isa_connect_gpio_out(isadev, 0, RTC_ISA_IRQ); + isa_connect_gpio_out(isadev, 0, s->isairq); } object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(isadev), @@ -993,6 +1003,8 @@ ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) static Property mc146818rtc_properties[] = { DEFINE_PROP_INT32("base_year", RTCState, base_year, 1980), + DEFINE_PROP_UINT16("iobase", RTCState, io_base, RTC_ISA_BASE), + DEFINE_PROP_UINT8("irq", RTCState, isairq, RTC_ISA_IRQ), DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", RTCState, lost_tick_policy, LOST_TICK_POLICY_DISCARD), DEFINE_PROP_END_OF_LIST(), @@ -1026,8 +1038,9 @@ static void rtc_reset_hold(Object *obj) qemu_irq_lower(s->irq); } -static void rtc_build_aml(ISADevice *isadev, Aml *scope) +static void rtc_build_aml(AcpiDevAmlIf *adev, Aml *scope) { + RTCState *s = MC146818_RTC(adev); Aml *dev; Aml *crs; @@ -1036,9 +1049,9 @@ static void rtc_build_aml(ISADevice *isadev, Aml *scope) * does, even though qemu only responds to the first two ports. */ crs = aml_resource_template(); - aml_append(crs, aml_io(AML_DECODE16, RTC_ISA_BASE, RTC_ISA_BASE, + aml_append(crs, aml_io(AML_DECODE16, s->io_base, s->io_base, 0x01, 0x08)); - aml_append(crs, aml_irq_no_flags(RTC_ISA_IRQ)); + aml_append(crs, aml_irq_no_flags(s->isairq)); dev = aml_device("RTC"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0B00"))); @@ -1051,13 +1064,13 @@ static void rtc_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); - ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = rtc_realizefn; dc->vmsd = &vmstate_rtc; rc->phases.enter = rtc_reset_enter; rc->phases.hold = rtc_reset_hold; - isa->build_aml = rtc_build_aml; + adevc->build_dev_aml = rtc_build_aml; device_class_set_props(dc, mc146818rtc_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); } @@ -1067,6 +1080,10 @@ static const TypeInfo mc146818rtc_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(RTCState), .class_init = rtc_class_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_ACPI_DEV_AML_IF }, + { }, + }, }; static void mc146818rtc_register_types(void) diff --git a/hw/rtc/meson.build b/hw/rtc/meson.build index 7cecdee5dd..dc33973384 100644 --- a/hw/rtc/meson.build +++ b/hw/rtc/meson.build @@ -11,6 +11,7 @@ softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_rtc.c')) softmmu_ss.add(when: 'CONFIG_SUN4V_RTC', if_true: files('sun4v-rtc.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_rtc.c')) softmmu_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c')) +softmmu_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c')) softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c')) specific_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c')) diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c index 2bbb2062ac..b01d0e75d1 100644 --- a/hw/rtc/pl031.c +++ b/hw/rtc/pl031.c @@ -12,7 +12,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/rtc/pl031.h" #include "migration/vmstate.h" #include "hw/irq.h" @@ -20,10 +19,12 @@ #include "hw/sysbus.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" +#include "qapi/qapi-events-misc.h" #define RTC_DR 0x00 /* Data read register */ #define RTC_MR 0x04 /* Match register */ @@ -136,10 +137,18 @@ static void pl031_write(void * opaque, hwaddr offset, trace_pl031_write(offset, value); switch (offset) { - case RTC_LR: + case RTC_LR: { + g_autofree const char *qom_path = object_get_canonical_path(opaque); + struct tm tm; + s->tick_offset += value - pl031_get_count(s); + + qemu_get_timedate(&tm, s->tick_offset); + qapi_event_send_rtc_change(qemu_timedate_diff(&tm), qom_path); + pl031_set_alarm(s); break; + } case RTC_MR: s->mr = value; pl031_set_alarm(s); diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c index 0922df5ad3..e8d5eda3fc 100644 --- a/hw/rtc/twl92230.c +++ b/hw/rtc/twl92230.c @@ -20,13 +20,13 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/timer.h" #include "hw/i2c/i2c.h" #include "hw/irq.h" #include "migration/qemu-file-types.h" #include "migration/vmstate.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "qemu/bcd.h" #include "qemu/module.h" #include "qom/object.h" diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c index 2bcd14d779..3e7d61a41c 100644 --- a/hw/rtc/xlnx-zynqmp-rtc.c +++ b/hw/rtc/xlnx-zynqmp-rtc.c @@ -25,7 +25,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/sysbus.h" #include "hw/register.h" #include "qemu/bitops.h" @@ -34,6 +33,7 @@ #include "hw/irq.h" #include "qemu/cutils.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "trace.h" #include "hw/rtc/xlnx-zynqmp-rtc.h" #include "migration/vmstate.h" diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c index 75d1fec6ca..47c17026c7 100644 --- a/hw/rx/rx-gdbsim.c +++ b/hw/rx/rx-gdbsim.c @@ -19,12 +19,13 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "qapi/error.h" -#include "qemu-common.h" #include "hw/loader.h" #include "hw/rx/rx62n.h" #include "sysemu/qtest.h" #include "sysemu/device_tree.h" +#include "sysemu/reset.h" #include "hw/boards.h" #include "qom/object.h" @@ -84,6 +85,7 @@ static void rx_gdbsim_init(MachineState *machine) MemoryRegion *sysmem = get_system_memory(); const char *kernel_filename = machine->kernel_filename; const char *dtb_filename = machine->dtb; + uint8_t rng_seed[32]; if (machine->ram_size < mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); @@ -141,10 +143,14 @@ static void rx_gdbsim_init(MachineState *machine) error_report("Couldn't set /chosen/bootargs"); exit(1); } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(dtb, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); /* DTB is located at the end of SDRAM space. */ - dtb_offset = machine->ram_size - dtb_size; + dtb_offset = ROUND_DOWN(machine->ram_size - dtb_size, 16); rom_add_blob_fixed("dtb", dtb, dtb_size, SDRAM_BASE + dtb_offset); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr(SDRAM_BASE + dtb_offset, dtb_size)); /* Set dtb address to R1 */ RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset; } diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c index 8bcf8ece9d..ef8fa2b15b 100644 --- a/hw/s390x/ap-bridge.c +++ b/hw/s390x/ap-bridge.c @@ -55,7 +55,7 @@ void s390_init_ap(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - bus = qbus_create(TYPE_AP_BUS, dev, TYPE_AP_BUS); + bus = qbus_new(TYPE_AP_BUS, dev, TYPE_AP_BUS); /* Enable hotplugging */ qbus_set_hotplug_handler(bus, OBJECT(dev)); diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c index 191b29f077..4017081d49 100644 --- a/hw/s390x/css-bridge.c +++ b/hw/s390x/css-bridge.c @@ -106,7 +106,7 @@ VirtualCssBus *virtual_css_bus_init(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); + bus = qbus_new(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); cbus = VIRTUAL_CSS_BUS(bus); /* Enable hotplugging */ diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 133ddea575..95d1b3a3ce 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -1206,23 +1206,53 @@ static void sch_handle_start_func_virtual(SubchDev *sch) } -static void sch_handle_halt_func_passthrough(SubchDev *sch) +static IOInstEnding sch_handle_halt_func_passthrough(SubchDev *sch) { int ret; ret = s390_ccw_halt(sch); if (ret == -ENOSYS) { sch_handle_halt_func(sch); + return IOINST_CC_EXPECTED; + } + /* + * Some conditions may have been detected prior to starting the halt + * function; map them to the correct cc. + * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really + * anything else we can do.) + */ + switch (ret) { + case -EBUSY: + return IOINST_CC_BUSY; + case -ENODEV: + case -EACCES: + return IOINST_CC_NOT_OPERATIONAL; + default: + return IOINST_CC_EXPECTED; } } -static void sch_handle_clear_func_passthrough(SubchDev *sch) +static IOInstEnding sch_handle_clear_func_passthrough(SubchDev *sch) { int ret; ret = s390_ccw_clear(sch); if (ret == -ENOSYS) { sch_handle_clear_func(sch); + return IOINST_CC_EXPECTED; + } + /* + * Some conditions may have been detected prior to starting the clear + * function; map them to the correct cc. + * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really + * anything else we can do.) + */ + switch (ret) { + case -ENODEV: + case -EACCES: + return IOINST_CC_NOT_OPERATIONAL; + default: + return IOINST_CC_EXPECTED; } } @@ -1265,9 +1295,9 @@ IOInstEnding do_subchannel_work_passthrough(SubchDev *sch) SCHIB *schib = &sch->curr_status; if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { - sch_handle_clear_func_passthrough(sch); + return sch_handle_clear_func_passthrough(sch); } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { - sch_handle_halt_func_passthrough(sch); + return sch_handle_halt_func_passthrough(sch); } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { return sch_handle_start_func_passthrough(sch); } @@ -1492,21 +1522,37 @@ IOInstEnding css_do_xsch(SubchDev *sch) IOInstEnding css_do_csch(SubchDev *sch) { SCHIB *schib = &sch->curr_status; + uint16_t old_scsw_ctrl; + IOInstEnding ccode; if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } + /* + * Save the current scsw.ctrl in case CSCH fails and we need + * to revert the scsw to the status quo ante. + */ + old_scsw_ctrl = schib->scsw.ctrl; + /* Trigger the clear function. */ schib->scsw.ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); schib->scsw.ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; - return do_subchannel_work(sch); + ccode = do_subchannel_work(sch); + + if (ccode != IOINST_CC_EXPECTED) { + schib->scsw.ctrl = old_scsw_ctrl; + } + + return ccode; } IOInstEnding css_do_hsch(SubchDev *sch) { SCHIB *schib = &sch->curr_status; + uint16_t old_scsw_ctrl; + IOInstEnding ccode; if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; @@ -1523,6 +1569,12 @@ IOInstEnding css_do_hsch(SubchDev *sch) return IOINST_CC_BUSY; } + /* + * Save the current scsw.ctrl in case HSCH fails and we need + * to revert the scsw to the status quo ante. + */ + old_scsw_ctrl = schib->scsw.ctrl; + /* Trigger the halt function. */ schib->scsw.ctrl |= SCSW_FCTL_HALT_FUNC; schib->scsw.ctrl &= ~SCSW_FCTL_START_FUNC; @@ -1534,7 +1586,13 @@ IOInstEnding css_do_hsch(SubchDev *sch) } schib->scsw.ctrl |= SCSW_ACTL_HALT_PEND; - return do_subchannel_work(sch); + ccode = do_subchannel_work(sch); + + if (ccode != IOINST_CC_EXPECTED) { + schib->scsw.ctrl = old_scsw_ctrl; + } + + return ccode; } static void css_update_chnmon(SubchDev *sch) @@ -1575,6 +1633,8 @@ static void css_update_chnmon(SubchDev *sch) IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) { SCHIB *schib = &sch->curr_status; + uint16_t old_scsw_ctrl, old_scsw_flags; + IOInstEnding ccode; if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; @@ -1596,11 +1656,26 @@ IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) } sch->orb = *orb; sch->channel_prog = orb->cpa; + + /* + * Save the current scsw.ctrl and scsw.flags in case SSCH fails and we need + * to revert the scsw to the status quo ante. + */ + old_scsw_ctrl = schib->scsw.ctrl; + old_scsw_flags = schib->scsw.flags; + /* Trigger the start function. */ schib->scsw.ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; - return do_subchannel_work(sch); + ccode = do_subchannel_work(sch); + + if (ccode != IOINST_CC_EXPECTED) { + schib->scsw.ctrl = old_scsw_ctrl; + schib->scsw.flags = old_scsw_flags; + } + + return ccode; } static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index ed92ce510d..faa51aa4c7 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -28,7 +28,7 @@ typedef struct SCLPEventsBus { } SCLPEventsBus; /* we need to save 32 bit chunks for compatibility */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define RECV_MASK_LOWER 1 #define RECV_MASK_UPPER 0 #else /* little endian host */ @@ -427,8 +427,8 @@ static void init_event_facility(Object *obj) sclp_event_set_allow_all_mask_sizes); /* Spawn a new bus for SCLP events */ - qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), - TYPE_SCLP_EVENTS_BUS, sdev, NULL); + qbus_init(&event_facility->sbus, sizeof(event_facility->sbus), + TYPE_SCLP_EVENTS_BUS, sdev, NULL); object_initialize_child(obj, TYPE_SCLP_QUIESCE, &event_facility->quiesce, diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 8c863cf386..8612684d48 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -13,7 +13,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "sysemu/reset.h" @@ -28,16 +27,21 @@ #include "hw/s390x/css.h" #include "hw/s390x/ebcdic.h" #include "hw/s390x/pv.h" +#include "hw/scsi/scsi.h" +#include "hw/virtio/virtio-net.h" #include "ipl.h" #include "qemu/error-report.h" #include "qemu/config-file.h" #include "qemu/cutils.h" #include "qemu/option.h" +#include "standard-headers/linux/virtio_ids.h" #include "exec/exec-all.h" #define KERN_IMAGE_START 0x010000UL #define LINUX_MAGIC_ADDR 0x010008UL +#define KERN_PARM_AREA_SIZE_ADDR 0x010430UL #define KERN_PARM_AREA 0x010480UL +#define LEGACY_KERN_PARM_AREA_SIZE 0x000380UL #define INITRD_START 0x800000UL #define INITRD_PARM_START 0x010408UL #define PARMFILE_START 0x001000UL @@ -109,6 +113,21 @@ static uint64_t bios_translate_addr(void *opaque, uint64_t srcaddr) return srcaddr + dstaddr; } +static uint64_t get_max_kernel_cmdline_size(void) +{ + uint64_t *size_ptr = rom_ptr(KERN_PARM_AREA_SIZE_ADDR, sizeof(*size_ptr)); + + if (size_ptr) { + uint64_t size; + + size = be64_to_cpu(*size_ptr); + if (size) { + return size; + } + } + return LEGACY_KERN_PARM_AREA_SIZE; +} + static void s390_ipl_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -190,10 +209,22 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) * loader) and it won't work. For this case we force it to 0x10000, too. */ if (pentry == KERN_IMAGE_START || pentry == 0x800) { - char *parm_area = rom_ptr(KERN_PARM_AREA, strlen(ipl->cmdline) + 1); + size_t cmdline_size = strlen(ipl->cmdline) + 1; + char *parm_area = rom_ptr(KERN_PARM_AREA, cmdline_size); + ipl->start_addr = KERN_IMAGE_START; /* Overwrite parameters in the kernel image, which are "rom" */ if (parm_area) { + uint64_t max_cmdline_size = get_max_kernel_cmdline_size(); + + if (cmdline_size > max_cmdline_size) { + error_setg(errp, + "kernel command line exceeds maximum size:" + " %zu > %" PRIu64, + cmdline_size, max_cmdline_size); + return; + } + strcpy(parm_area, ipl->cmdline); } } else { @@ -259,13 +290,10 @@ static Property s390_ipl_properties[] = { static void s390_ipl_set_boot_menu(S390IPLState *ipl) { - QemuOptsList *plist = qemu_find_opts("boot-opts"); - QemuOpts *opts = QTAILQ_FIRST(&plist->head); - const char *tmp; unsigned long splash_time = 0; if (!get_boot_device(0)) { - if (boot_menu) { + if (current_machine->boot_config.has_menu && current_machine->boot_config.menu) { error_report("boot menu requires a bootindex to be specified for " "the IPL device"); } @@ -275,7 +303,7 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl) switch (ipl->iplb.pbt) { case S390_IPL_TYPE_CCW: /* In the absence of -boot menu, use zipl parameters */ - if (!qemu_opt_get(opts, "menu")) { + if (!current_machine->boot_config.has_menu) { ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_ZIPL; return; } @@ -283,26 +311,21 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl) case S390_IPL_TYPE_QEMU_SCSI: break; default: - if (boot_menu) { + if (current_machine->boot_config.has_menu && current_machine->boot_config.menu) { error_report("boot menu is not supported for this device type"); } return; } - if (!boot_menu) { + if (!current_machine->boot_config.has_menu || !current_machine->boot_config.menu) { return; } ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_CMD; - tmp = qemu_opt_get(opts, "splash-time"); - - if (tmp && qemu_strtoul(tmp, NULL, 10, &splash_time)) { - error_report("splash-time is invalid, forcing it to 0"); - ipl->qipl.boot_menu_timeout = 0; - return; + if (current_machine->boot_config.has_splash_time) { + splash_time = current_machine->boot_config.splash_time; } - if (splash_time > 0xffffffff) { error_report("splash-time is too large, forcing it to max value"); ipl->qipl.boot_menu_timeout = 0xffffffff; @@ -347,14 +370,18 @@ static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype) object_dynamic_cast(OBJECT(dev_st), TYPE_SCSI_DEVICE); if (sd) { - SCSIBus *bus = scsi_bus_from_device(sd); - VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus); - VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, - vdev); - - ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw), - TYPE_CCW_DEVICE); - tmp_dt = CCW_DEVTYPE_SCSI; + SCSIBus *sbus = scsi_bus_from_device(sd); + VirtIODevice *vdev = (VirtIODevice *) + object_dynamic_cast(OBJECT(sbus->qbus.parent), + TYPE_VIRTIO_DEVICE); + if (vdev) { + ccw_dev = (CcwDevice *) + object_dynamic_cast(OBJECT(qdev_get_parent_bus(DEVICE(vdev))->parent), + TYPE_CCW_DEVICE); + if (ccw_dev) { + tmp_dt = CCW_DEVTYPE_SCSI; + } + } } } } @@ -711,7 +738,6 @@ int s390_ipl_pv_unpack(void) void s390_ipl_prepare_cpu(S390CPU *cpu) { S390IPLState *ipl = get_ipl_device(); - Error *err = NULL; cpu->env.psw.addr = ipl->start_addr; cpu->env.psw.mask = IPL_PSW_MASK; @@ -723,10 +749,7 @@ void s390_ipl_prepare_cpu(S390CPU *cpu) } } if (ipl->netboot) { - if (load_netboot_image(&err) < 0) { - error_report_err(err); - exit(1); - } + load_netboot_image(&error_fatal); ipl->qipl.netboot_start_addr = cpu_to_be64(ipl->start_addr); } s390_ipl_set_boot_menu(ipl); diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index dfc6dfd89c..7fc86e7905 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -140,7 +140,7 @@ void s390_ipl_clear_reset_request(void); * have an offset of 4 + n * 8 bytes within the struct in order * to keep it double-word aligned. * The total size of the struct must never exceed 28 bytes. - * This definition must be kept in sync with the defininition + * This definition must be kept in sync with the definition * in pc-bios/s390-ccw/iplb.h. */ struct QemuIplParameters { diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build index 28484256ec..f291016fee 100644 --- a/hw/s390x/meson.build +++ b/hw/s390x/meson.build @@ -23,6 +23,7 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files( 's390-skeys-kvm.c', 's390-stattrib-kvm.c', 'pv.c', + 's390-pci-kvm.c', )) s390x_ss.add(when: 'CONFIG_TCG', if_true: files( 'tod-tcg.c', @@ -44,6 +45,7 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-ccw-serial.c' if have_virtfs virtio_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-ccw-9p.c')) endif +virtio_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-ccw.c')) virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c')) s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss) diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c index 401b63d6cb..8dfe92d8df 100644 --- a/hw/s390x/pv.c +++ b/hw/s390x/pv.c @@ -20,6 +20,11 @@ #include "exec/confidential-guest-support.h" #include "hw/s390x/ipl.h" #include "hw/s390x/pv.h" +#include "target/s390x/kvm/kvm_s390x.h" + +static bool info_valid; +static struct kvm_s390_pv_info_vm info_vm; +static struct kvm_s390_pv_info_dump info_dump; static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) { @@ -45,7 +50,7 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) * This macro lets us pass the command as a string to the function so * we can print it on an error. */ -#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data); +#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data) #define s390_pv_cmd_exit(cmd, data) \ { \ int rc; \ @@ -56,6 +61,42 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) } \ } +int s390_pv_query_info(void) +{ + struct kvm_s390_pv_info info = { + .header.id = KVM_PV_INFO_VM, + .header.len_max = sizeof(info.header) + sizeof(info.vm), + }; + int rc; + + /* Info API's first user is dump so they are bundled */ + if (!kvm_s390_get_protected_dump()) { + return 0; + } + + rc = s390_pv_cmd(KVM_PV_INFO, &info); + if (rc) { + error_report("KVM PV INFO cmd %x failed: %s", + info.header.id, strerror(-rc)); + return rc; + } + memcpy(&info_vm, &info.vm, sizeof(info.vm)); + + info.header.id = KVM_PV_INFO_DUMP; + info.header.len_max = sizeof(info.header) + sizeof(info.dump); + rc = s390_pv_cmd(KVM_PV_INFO, &info); + if (rc) { + error_report("KVM PV INFO cmd %x failed: %s", + info.header.id, strerror(-rc)); + return rc; + } + + memcpy(&info_dump, &info.dump, sizeof(info.dump)); + info_valid = true; + + return rc; +} + int s390_pv_vm_enable(void) { return s390_pv_cmd(KVM_PV_ENABLE, NULL); @@ -114,6 +155,77 @@ void s390_pv_inject_reset_error(CPUState *cs) env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV; } +uint64_t kvm_s390_pv_dmp_get_size_cpu(void) +{ + return info_dump.dump_cpu_buffer_len; +} + +uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) +{ + return info_dump.dump_config_finalize_len; +} + +uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) +{ + return info_dump.dump_config_mem_buffer_per_1m; +} + +bool kvm_s390_pv_info_basic_valid(void) +{ + return info_valid; +} + +static int s390_pv_dump_cmd(uint64_t subcmd, uint64_t uaddr, uint64_t gaddr, + uint64_t len) +{ + struct kvm_s390_pv_dmp dmp = { + .subcmd = subcmd, + .buff_addr = uaddr, + .buff_len = len, + .gaddr = gaddr, + }; + int ret; + + ret = s390_pv_cmd(KVM_PV_DUMP, (void *)&dmp); + if (ret) { + error_report("KVM DUMP command %ld failed", subcmd); + } + return ret; +} + +int kvm_s390_dump_cpu(S390CPU *cpu, void *buff) +{ + struct kvm_s390_pv_dmp dmp = { + .subcmd = KVM_PV_DUMP_CPU, + .buff_addr = (uint64_t)buff, + .gaddr = 0, + .buff_len = info_dump.dump_cpu_buffer_len, + }; + struct kvm_pv_cmd pv = { + .cmd = KVM_PV_DUMP, + .data = (uint64_t)&dmp, + }; + + return kvm_vcpu_ioctl(CPU(cpu), KVM_S390_PV_CPU_COMMAND, &pv); +} + +int kvm_s390_dump_init(void) +{ + return s390_pv_dump_cmd(KVM_PV_DUMP_INIT, 0, 0, 0); +} + +int kvm_s390_dump_mem_state(uint64_t gaddr, size_t len, void *dest) +{ + return s390_pv_dump_cmd(KVM_PV_DUMP_CONFIG_STOR_STATE, (uint64_t)dest, + gaddr, len); +} + +int kvm_s390_dump_completion_data(void *buff) +{ + return s390_pv_dump_cmd(KVM_PV_DUMP_COMPLETE, (uint64_t)buff, 0, + info_dump.dump_config_finalize_len); +} + #define TYPE_S390_PV_GUEST "s390-pv-guest" OBJECT_DECLARE_SIMPLE_TYPE(S390PVGuest, S390_PV_GUEST) diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index 2fc8bb9c23..e2d86d96e7 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -57,7 +57,7 @@ IOInstEnding s390_ccw_store(SubchDev *sch) /* * This code is called for both virtual and passthrough devices, - * but only applies to to the latter. This ugly check makes that + * but only applies to the latter. This ugly check makes that * distinction for us. */ if (object_dynamic_cast(OBJECT(sch->driver_data), TYPE_S390_CCW)) { diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 7db1c5943f..977e7daa15 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -16,6 +16,7 @@ #include "qapi/visitor.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/s390-pci-inst.h" +#include "hw/s390x/s390-pci-kvm.h" #include "hw/s390x/s390-pci-vfio.h" #include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" @@ -189,7 +190,10 @@ void s390_pci_sclp_deconfigure(SCCB *sccb) rc = SCLP_RC_NO_ACTION_REQUIRED; break; default: - if (pbdev->summary_ind) { + if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { + /* Interpreted devices were using interrupt forwarding */ + s390_pci_kvm_aif_disable(pbdev); + } else if (pbdev->summary_ind) { pci_dereg_irqs(pbdev); } if (pbdev->iommu->enabled) { @@ -330,7 +334,7 @@ static unsigned int calc_sx(dma_addr_t ptr) static unsigned int calc_px(dma_addr_t ptr) { - return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; + return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; } static uint64_t get_rt_sto(uint64_t entry) @@ -506,7 +510,7 @@ uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, int8_t ett = 1; uint16_t error = 0; - entry->iova = addr & PAGE_MASK; + entry->iova = addr & TARGET_PAGE_MASK; entry->translated_addr = 0; entry->perm = IOMMU_RW; @@ -526,7 +530,7 @@ static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, { S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); S390IOTLBEntry *entry; - uint64_t iova = addr & PAGE_MASK; + uint64_t iova = addr & TARGET_PAGE_MASK; uint16_t error = 0; IOMMUTLBEntry ret = { .target_as = &address_space_memory, @@ -562,7 +566,7 @@ static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, ret.perm = entry->perm; } else { ret.iova = iova; - ret.addr_mask = ~PAGE_MASK; + ret.addr_mask = ~TARGET_PAGE_MASK; ret.perm = IOMMU_NONE; } @@ -744,13 +748,14 @@ static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) object_unref(OBJECT(iommu)); } -S390PCIGroup *s390_group_create(int id) +S390PCIGroup *s390_group_create(int id, int host_id) { S390PCIGroup *group; S390pciState *s = s390_get_phb(); group = g_new0(S390PCIGroup, 1); group->id = id; + group->host_id = host_id; QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); return group; } @@ -768,12 +773,25 @@ S390PCIGroup *s390_group_find(int id) return NULL; } +S390PCIGroup *s390_group_find_host_sim(int host_id) +{ + S390PCIGroup *group; + S390pciState *s = s390_get_phb(); + + QTAILQ_FOREACH(group, &s->zpci_groups, link) { + if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { + return group; + } + } + return NULL; +} + static void s390_pci_init_default_group(void) { S390PCIGroup *group; ClpRspQueryPciGrp *resgrp; - group = s390_group_create(ZPCI_DEFAULT_FN_GRP); + group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); resgrp = &group->zpci_group; resgrp->fr = 1; resgrp->dasm = 0; @@ -782,6 +800,7 @@ static void s390_pci_init_default_group(void) resgrp->i = 128; resgrp->maxstbl = 128; resgrp->version = 0; + resgrp->dtsm = ZPCI_DTSM; } static void set_pbdev_info(S390PCIBusDevice *pbdev) @@ -813,13 +832,14 @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp) qbus_set_hotplug_handler(bus, OBJECT(dev)); phb->bus = b; - s->bus = S390_PCI_BUS(qbus_create(TYPE_S390_PCI_BUS, dev, NULL)); + s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free); s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); s->bus_no = 0; + s->next_sim_grp = ZPCI_SIM_GRP_START; QTAILQ_INIT(&s->pending_sei); QTAILQ_INIT(&s->zpci_devs); QTAILQ_INIT(&s->zpci_dma_limit); @@ -868,7 +888,7 @@ static int s390_pci_msix_init(S390PCIBusDevice *pbdev) name = g_strdup_printf("msix-s390-%04x", pbdev->uid); memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), - &s390_msi_ctrl_ops, pbdev, name, PAGE_SIZE); + &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); memory_region_add_subregion(&pbdev->iommu->mr, pbdev->pci_group->zpci_group.msia, &pbdev->msix_notify_mr); @@ -879,6 +899,10 @@ static int s390_pci_msix_init(S390PCIBusDevice *pbdev) static void s390_pci_msix_free(S390PCIBusDevice *pbdev) { + if (pbdev->msix.entries == 0) { + return; + } + memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); object_unparent(OBJECT(&pbdev->msix_notify_mr)); } @@ -970,12 +994,51 @@ static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) } } +static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) +{ + uint32_t idx, fh; + + if (!s390_pci_get_host_fh(pbdev, &fh)) { + return -EPERM; + } + + /* + * The host device is already in an enabled state, but we always present + * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). + * Therefore, mask off the enable bit from the passthrough handle until + * the guest issues a CLP SET PCI FN later to enable the device. + */ + pbdev->fh = fh & ~FH_MASK_ENABLE; + + /* Next, see if the idx is already in-use */ + idx = pbdev->fh & FH_MASK_INDEX; + if (pbdev->idx != idx) { + if (s390_pci_find_dev_by_idx(s, idx)) { + return -EINVAL; + } + /* + * Update the idx entry with the passed through idx + * If the relinquished idx is lower than next_idx, use it + * to replace next_idx + */ + g_hash_table_remove(s->zpci_table, &pbdev->idx); + if (idx < s->next_idx) { + s->next_idx = idx; + } + pbdev->idx = idx; + g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); + } + + return 0; +} + static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); PCIDevice *pdev = NULL; S390PCIBusDevice *pbdev = NULL; + int rc; if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { PCIBridge *pb = PCI_BRIDGE(dev); @@ -1021,15 +1084,41 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, set_pbdev_info(pbdev); if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { - pbdev->fh |= FH_SHM_VFIO; + /* + * By default, interpretation is always requested; if the available + * facilities indicate it is not available, fallback to the + * interception model. + */ + if (pbdev->interp) { + if (s390_pci_kvm_interp_allowed()) { + rc = s390_pci_interp_plug(s, pbdev); + if (rc) { + error_setg(errp, "Plug failed for zPCI device in " + "interpretation mode: %d", rc); + return; + } + } else { + DPRINTF("zPCI interpretation facilities missing.\n"); + pbdev->interp = false; + pbdev->forwarding_assist = false; + } + } pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); /* Fill in CLP information passed via the vfio region */ s390_pci_get_clp_info(pbdev); + if (!pbdev->interp) { + /* Do vfio passthrough but intercept for I/O */ + pbdev->fh |= FH_SHM_VFIO; + pbdev->forwarding_assist = false; + } } else { pbdev->fh |= FH_SHM_EMUL; + /* Always intercept emulated devices */ + pbdev->interp = false; + pbdev->forwarding_assist = false; } - if (s390_pci_msix_init(pbdev)) { + if (s390_pci_msix_init(pbdev) && !pbdev->interp) { error_setg(errp, "MSI-X support is mandatory " "in the S390 architecture"); return; @@ -1163,8 +1252,7 @@ static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, } /* Assign numbers to all child bridges. The last is the highest number. */ - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - s390_pci_enumerate_bridge, s); + pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); } @@ -1177,7 +1265,10 @@ static void s390_pcihost_reset(DeviceState *dev) /* Process all pending unplug requests */ QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { if (pbdev->unplug_requested) { - if (pbdev->summary_ind) { + if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { + /* Interpreted devices were using interrupt forwarding */ + s390_pci_kvm_aif_disable(pbdev); + } else if (pbdev->summary_ind) { pci_dereg_irqs(pbdev); } if (pbdev->iommu->enabled) { @@ -1193,7 +1284,7 @@ static void s390_pcihost_reset(DeviceState *dev) * on every system reset, we also have to reassign numbers. */ s->bus_no = 0; - pci_for_each_device(bus, pci_bus_num(bus), s390_pci_enumerate_bridge, s); + pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); } static void s390_pcihost_class_init(ObjectClass *klass, void *data) @@ -1315,7 +1406,10 @@ static void s390_pci_device_reset(DeviceState *dev) break; } - if (pbdev->summary_ind) { + if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { + /* Interpreted devices were using interrupt forwarding */ + s390_pci_kvm_aif_disable(pbdev); + } else if (pbdev->summary_ind) { pci_dereg_irqs(pbdev); } if (pbdev->iommu->enabled) { @@ -1360,6 +1454,9 @@ static Property s390_pci_device_properties[] = { DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), DEFINE_PROP_STRING("target", S390PCIBusDevice, target), + DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), + DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, + true), DEFINE_PROP_END_OF_LIST(), }; @@ -1392,7 +1489,7 @@ static const TypeInfo s390_pci_device_info = { .class_init = s390_pci_device_class_init, }; -static TypeInfo s390_pci_iommu_info = { +static const TypeInfo s390_pci_iommu_info = { .name = TYPE_S390_PCI_IOMMU, .parent = TYPE_OBJECT, .instance_size = sizeof(S390PCIIOMMU), diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 9ec277d50e..7cc4bcf850 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -18,6 +18,8 @@ #include "sysemu/hw_accel.h" #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/s390-pci-kvm.h" +#include "hw/s390x/s390-pci-vfio.h" #include "hw/s390x/tod.h" #ifndef DEBUG_S390PCI_INST @@ -246,6 +248,20 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) goto out; } + /* + * Take this opportunity to make sure we still have an accurate + * host fh. It's possible part of the handle changed while the + * device was disabled to the guest (e.g. vfio hot reset for + * ISM during plug) + */ + if (pbdev->interp) { + /* Take this opportunity to make sure we are sync'd with host */ + if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) || + !(pbdev->fh & FH_MASK_ENABLE)) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); + goto out; + } + } pbdev->fh |= FH_MASK_ENABLE; pbdev->state = ZPCI_FS_ENABLED; stl_p(&ressetpci->fh, pbdev->fh); @@ -329,6 +345,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) stw_p(&resgrp->i, group->zpci_group.i); stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl); resgrp->version = group->zpci_group.version; + resgrp->dtsm = group->zpci_group.dtsm; stw_p(&resgrp->hdr.rsp, CLP_RC_OK); break; } @@ -613,7 +630,7 @@ static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu, .iova = entry->iova, .translated_addr = entry->translated_addr, .perm = entry->perm, - .addr_mask = ~PAGE_MASK, + .addr_mask = ~TARGET_PAGE_MASK, }, }; @@ -640,7 +657,7 @@ static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu, cache = g_new(S390IOTLBEntry, 1); cache->iova = entry->iova; cache->translated_addr = entry->translated_addr; - cache->len = PAGE_SIZE; + cache->len = TARGET_PAGE_SIZE; cache->perm = entry->perm; g_hash_table_replace(iommu->iotlb, &cache->iova, cache); dec_dma_avail(iommu); @@ -660,8 +677,9 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) S390PCIBusDevice *pbdev; S390PCIIOMMU *iommu; S390IOTLBEntry entry; - hwaddr start, end; + hwaddr start, end, sstart; uint32_t dma_avail; + bool again; if (env->psw.mask & PSW_MASK_PSTATE) { s390_program_interrupt(env, PGM_PRIVILEGED, ra); @@ -674,7 +692,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) } fh = env->regs[r1] >> 32; - start = env->regs[r2]; + sstart = start = env->regs[r2]; end = start + env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); @@ -715,6 +733,9 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) goto err; } + retry: + start = sstart; + again = false; while (start < end) { error = s390_guest_io_table_walk(iommu->g_iota, start, &entry); if (error) { @@ -722,13 +743,24 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) } start += entry.len; - while (entry.iova < start && entry.iova < end && - (dma_avail > 0 || entry.perm == IOMMU_NONE)) { - dma_avail = s390_pci_update_iotlb(iommu, &entry); - entry.iova += PAGE_SIZE; - entry.translated_addr += PAGE_SIZE; + while (entry.iova < start && entry.iova < end) { + if (dma_avail > 0 || entry.perm == IOMMU_NONE) { + dma_avail = s390_pci_update_iotlb(iommu, &entry); + entry.iova += TARGET_PAGE_SIZE; + entry.translated_addr += TARGET_PAGE_SIZE; + } else { + /* + * We are unable to make a new mapping at this time, continue + * on and hopefully free up more space. Then attempt another + * pass. + */ + again = true; + break; + } } } + if (again && dma_avail > 0) + goto retry; err: if (error) { pbdev->state = ZPCI_FS_ERROR; @@ -916,9 +948,10 @@ int pci_dereg_irqs(S390PCIBusDevice *pbdev) return 0; } -static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib, +static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib, uintptr_t ra) { + S390PCIIOMMU *iommu = pbdev->iommu; uint64_t pba = ldq_p(&fib.pba); uint64_t pal = ldq_p(&fib.pal); uint64_t g_iota = ldq_p(&fib.iota); @@ -927,7 +960,7 @@ static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib, pba &= ~0xfff; pal |= 0xfff; - if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { + if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) { s390_program_interrupt(env, PGM_OPERAND, ra); return -EINVAL; } @@ -1045,7 +1078,33 @@ static void fmb_update(void *opaque) sizeof(pbdev->fmb.last_update))) { return; } - timer_mod(pbdev->fmb_timer, t + DEFAULT_MUI); + timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui); +} + +static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib) +{ + int rc; + + rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist); + if (rc) { + DPRINTF("Failed to enable interrupt forwarding\n"); + return rc; + } + + return 0; +} + +static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib) +{ + int rc; + + rc = s390_pci_kvm_aif_disable(pbdev); + if (rc) { + DPRINTF("Failed to disable interrupt forwarding\n"); + return rc; + } + + return 0; } int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, @@ -1102,7 +1161,12 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, switch (oc) { case ZPCI_MOD_FC_REG_INT: - if (pbdev->summary_ind) { + if (pbdev->interp) { + if (mpcifc_reg_int_interp(pbdev, &fib)) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } + } else if (pbdev->summary_ind) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else if (reg_irqs(env, pbdev, fib)) { @@ -1111,7 +1175,12 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, } break; case ZPCI_MOD_FC_DEREG_INT: - if (!pbdev->summary_ind) { + if (pbdev->interp) { + if (mpcifc_dereg_int_interp(pbdev, &fib)) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } + } else if (!pbdev->summary_ind) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else { @@ -1125,7 +1194,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, } else if (pbdev->iommu->enabled) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); - } else if (reg_ioat(env, pbdev->iommu, fib, ra)) { + } else if (reg_ioat(env, pbdev, fib, ra)) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); } @@ -1150,7 +1219,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else { pci_dereg_ioat(pbdev->iommu); - if (reg_ioat(env, pbdev->iommu, fib, ra)) { + if (reg_ioat(env, pbdev, fib, ra)) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); } @@ -1203,7 +1272,8 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, } pbdev->fmb_addr = fmb_addr; timer_mod(pbdev->fmb_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + DEFAULT_MUI); + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + pbdev->pci_group->zpci_group.mui); break; } default: diff --git a/hw/s390x/s390-pci-kvm.c b/hw/s390x/s390-pci-kvm.c new file mode 100644 index 0000000000..9134fe185f --- /dev/null +++ b/hw/s390x/s390-pci-kvm.c @@ -0,0 +1,52 @@ +/* + * s390 zPCI KVM interfaces + * + * Copyright 2022 IBM Corp. + * Author(s): Matthew Rosato + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" + +#include + +#include "kvm/kvm_s390x.h" +#include "hw/s390x/pv.h" +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/s390-pci-kvm.h" +#include "hw/s390x/s390-pci-inst.h" +#include "cpu_models.h" + +bool s390_pci_kvm_interp_allowed(void) +{ + return kvm_s390_get_zpci_op() && !s390_is_pv(); +} + +int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, bool assist) +{ + struct kvm_s390_zpci_op args = { + .fh = pbdev->fh, + .op = KVM_S390_ZPCIOP_REG_AEN, + .u.reg_aen.ibv = fib->aibv, + .u.reg_aen.sb = fib->aisb, + .u.reg_aen.noi = FIB_DATA_NOI(fib->data), + .u.reg_aen.isc = FIB_DATA_ISC(fib->data), + .u.reg_aen.sbo = FIB_DATA_AISBO(fib->data), + .u.reg_aen.flags = (assist) ? 0 : KVM_S390_ZPCIOP_REGAEN_HOST + }; + + return kvm_vm_ioctl(kvm_state, KVM_S390_ZPCI_OP, &args); +} + +int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev) +{ + struct kvm_s390_zpci_op args = { + .fh = pbdev->fh, + .op = KVM_S390_ZPCIOP_DEREG_AEN + }; + + return kvm_vm_ioctl(kvm_state, KVM_S390_ZPCI_OP, &args); +} diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c index 2a153fa8c9..5f0adb0b4a 100644 --- a/hw/s390x/s390-pci-vfio.c +++ b/hw/s390x/s390-pci-vfio.c @@ -124,18 +124,44 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev, pbdev->zpci_fn.pft = 0; } +static bool get_host_fh(S390PCIBusDevice *pbdev, struct vfio_device_info *info, + uint32_t *fh) +{ + struct vfio_info_cap_header *hdr; + struct vfio_device_info_cap_zpci_base *cap; + VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + + hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_BASE); + + /* Can only get the host fh with version 2 or greater */ + if (hdr == NULL || hdr->version < 2) { + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_BASE); + return false; + } + cap = (void *) hdr; + + *fh = cap->fh; + return true; +} + static void s390_pci_read_group(S390PCIBusDevice *pbdev, struct vfio_device_info *info) { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_group *cap; + S390pciState *s = s390_get_phb(); ClpRspQueryPciGrp *resgrp; VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + uint8_t start_gid = pbdev->zpci_fn.pfgid; hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); - /* If capability not provided, just use the default group */ - if (hdr == NULL) { + /* + * If capability not provided or the underlying hostdev is simulated, just + * use the default group. + */ + if (hdr == NULL || pbdev->zpci_fn.pfgid >= ZPCI_SIM_GRP_START) { trace_s390_pci_clp_cap(vpci->vbasedev.name, VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; @@ -144,11 +170,40 @@ static void s390_pci_read_group(S390PCIBusDevice *pbdev, } cap = (void *) hdr; + /* + * For an intercept device, let's use an existing simulated group if one + * one was already created for other intercept devices in this group. + * If not, create a new simulated group if any are still available. + * If all else fails, just fall back on the default group. + */ + if (!pbdev->interp) { + pbdev->pci_group = s390_group_find_host_sim(pbdev->zpci_fn.pfgid); + if (pbdev->pci_group) { + /* Use existing simulated group */ + pbdev->zpci_fn.pfgid = pbdev->pci_group->id; + return; + } else { + if (s->next_sim_grp == ZPCI_DEFAULT_FN_GRP) { + /* All out of simulated groups, use default */ + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); + pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; + pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); + return; + } else { + /* We can assign a new simulated group */ + pbdev->zpci_fn.pfgid = s->next_sim_grp; + s->next_sim_grp++; + /* Fall through to create the new sim group using CLP info */ + } + } + } + /* See if the PCI group is already defined, create if not */ pbdev->pci_group = s390_group_find(pbdev->zpci_fn.pfgid); if (!pbdev->pci_group) { - pbdev->pci_group = s390_group_create(pbdev->zpci_fn.pfgid); + pbdev->pci_group = s390_group_create(pbdev->zpci_fn.pfgid, start_gid); resgrp = &pbdev->pci_group->zpci_group; if (cap->flags & VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH) { @@ -158,8 +213,13 @@ static void s390_pci_read_group(S390PCIBusDevice *pbdev, resgrp->msia = cap->msi_addr; resgrp->mui = cap->mui; resgrp->i = cap->noi; - resgrp->maxstbl = cap->maxstbl; + if (pbdev->interp && hdr->version >= 2) { + resgrp->maxstbl = cap->imaxstbl; + } else { + resgrp->maxstbl = cap->maxstbl; + } resgrp->version = cap->version; + resgrp->dtsm = ZPCI_DTSM; } } @@ -216,25 +276,13 @@ static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, memcpy(pbdev->zpci_fn.pfip, cap->pfip, CLP_PFIP_NR_SEGMENTS); } -/* - * This function will issue the VFIO_DEVICE_GET_INFO ioctl and look for - * capabilities that contain information about CLP features provided by the - * underlying host. - * On entry, defaults have already been placed into the guest CLP response - * buffers. On exit, defaults will have been overwritten for any CLP features - * found in the capability chain; defaults will remain for any CLP features not - * found in the chain. - */ -void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) +static struct vfio_device_info *get_device_info(S390PCIBusDevice *pbdev, + uint32_t argsz) { - g_autofree struct vfio_device_info *info = NULL; + struct vfio_device_info *info = g_malloc0(argsz); VFIOPCIDevice *vfio_pci; - uint32_t argsz; int fd; - argsz = sizeof(*info); - info = g_malloc0(argsz); - vfio_pci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); fd = vfio_pci->vbasedev.fd; @@ -249,7 +297,8 @@ retry: if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) { trace_s390_pci_clp_dev_info(vfio_pci->vbasedev.name); - return; + g_free(info); + return NULL; } if (info->argsz > argsz) { @@ -258,6 +307,47 @@ retry: goto retry; } + return info; +} + +/* + * Get the host function handle from the vfio CLP capabilities chain. Returns + * true if a fh value was placed into the provided buffer. Returns false + * if a fh could not be obtained (ioctl failed or capability version does + * not include the fh) + */ +bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh) +{ + g_autofree struct vfio_device_info *info = NULL; + + assert(fh); + + info = get_device_info(pbdev, sizeof(*info)); + if (!info) { + return false; + } + + return get_host_fh(pbdev, info, fh); +} + +/* + * This function will issue the VFIO_DEVICE_GET_INFO ioctl and look for + * capabilities that contain information about CLP features provided by the + * underlying host. + * On entry, defaults have already been placed into the guest CLP response + * buffers. On exit, defaults will have been overwritten for any CLP features + * found in the capability chain; defaults will remain for any CLP features not + * found in the chain. + */ +void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) +{ + g_autofree struct vfio_device_info *info = NULL; + + info = get_device_info(pbdev, sizeof(*info)); + if (!info) { + return; + } + /* * Find the CLP features provided and fill in the guest CLP responses. * Always call s390_pci_read_base first as information from this could diff --git a/hw/s390x/s390-skeys-kvm.c b/hw/s390x/s390-skeys-kvm.c index 1c4d805ad8..3ff9d94b80 100644 --- a/hw/s390x/s390-skeys-kvm.c +++ b/hw/s390x/s390-skeys-kvm.c @@ -15,7 +15,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" -static int kvm_s390_skeys_enabled(S390SKeysState *ss) +static bool kvm_s390_skeys_are_enabled(S390SKeysState *ss) { S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint8_t single_key; @@ -57,7 +57,7 @@ static void kvm_s390_skeys_class_init(ObjectClass *oc, void *data) S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); - skeyclass->skeys_enabled = kvm_s390_skeys_enabled; + skeyclass->skeys_are_enabled = kvm_s390_skeys_are_enabled; skeyclass->get_skeys = kvm_s390_skeys_get; skeyclass->set_skeys = kvm_s390_skeys_set; diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index 9a8d60d1d9..5024faf411 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -17,6 +17,8 @@ #include "qapi/qapi-commands-misc-target.h" #include "qapi/qmp/qdict.h" #include "qemu/error-report.h" +#include "sysemu/memory_mapping.h" +#include "exec/address-spaces.h" #include "sysemu/kvm.h" #include "migration/qemu-file-types.h" #include "migration/register.h" @@ -80,11 +82,18 @@ void hmp_info_skeys(Monitor *mon, const QDict *qdict) int r; /* Quick check to see if guest is using storage keys*/ - if (!skeyclass->skeys_enabled(ss)) { + if (!skeyclass->skeys_are_enabled(ss)) { monitor_printf(mon, "Error: This guest is not using storage keys\n"); return; } + if (!address_space_access_valid(&address_space_memory, + addr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE, + false, MEMTXATTRS_UNSPECIFIED)) { + monitor_printf(mon, "Error: The given address is not valid\n"); + return; + } + r = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); if (r < 0) { monitor_printf(mon, "Error: %s\n", strerror(-r)); @@ -109,18 +118,17 @@ void qmp_dump_skeys(const char *filename, Error **errp) { S390SKeysState *ss = s390_get_skeys_device(); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); - MachineState *ms = MACHINE(qdev_get_machine()); - const uint64_t total_count = ms->ram_size / TARGET_PAGE_SIZE; - uint64_t handled_count = 0, cur_count; + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; + uint64_t pages, gfn; Error *lerr = NULL; - vaddr cur_gfn = 0; uint8_t *buf; int ret; int fd; FILE *f; /* Quick check to see if guest is using storage keys*/ - if (!skeyclass->skeys_enabled(ss)) { + if (!skeyclass->skeys_are_enabled(ss)) { error_setg(errp, "This guest is not using storage keys - " "nothing to dump"); return; @@ -144,53 +152,86 @@ void qmp_dump_skeys(const char *filename, Error **errp) goto out; } - /* we'll only dump initial memory for now */ - while (handled_count < total_count) { - /* Calculate how many keys to ask for & handle overflow case */ - cur_count = MIN(total_count - handled_count, S390_SKEYS_BUFFER_SIZE); + assert(qemu_mutex_iothread_locked()); + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); - ret = skeyclass->get_skeys(ss, cur_gfn, cur_count, buf); - if (ret < 0) { - error_setg(errp, "get_keys error %d", ret); - goto out_free; + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE)); + assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE)); + + gfn = block->target_start / TARGET_PAGE_SIZE; + pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; + + while (pages) { + const uint64_t cur_pages = MIN(pages, S390_SKEYS_BUFFER_SIZE); + + ret = skeyclass->get_skeys(ss, gfn, cur_pages, buf); + if (ret < 0) { + error_setg_errno(errp, -ret, "get_keys error"); + goto out_free; + } + + /* write keys to stream */ + write_keys(f, buf, gfn, cur_pages, &lerr); + if (lerr) { + goto out_free; + } + + gfn += cur_pages; + pages -= cur_pages; } - - /* write keys to stream */ - write_keys(f, buf, cur_gfn, cur_count, &lerr); - if (lerr) { - goto out_free; - } - - cur_gfn += cur_count; - handled_count += cur_count; } out_free: + guest_phys_blocks_free(&guest_phys_blocks); error_propagate(errp, lerr); g_free(buf); out: fclose(f); } -static void qemu_s390_skeys_init(Object *obj) +static bool qemu_s390_skeys_are_enabled(S390SKeysState *ss) { - QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj); - MachineState *machine = MACHINE(qdev_get_machine()); + QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(ss); - skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; - skeys->keydata = g_malloc0(skeys->key_count); + /* Lockless check is sufficient. */ + return !!skeys->keydata; } -static int qemu_s390_skeys_enabled(S390SKeysState *ss) +static bool qemu_s390_enable_skeys(S390SKeysState *ss) { - return 1; + QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(ss); + static gsize initialized; + + if (likely(skeys->keydata)) { + return true; + } + + /* + * TODO: Modern Linux doesn't use storage keys unless running KVM guests + * that use storage keys. Therefore, we keep it simple for now. + * + * 1) We should initialize to "referenced+changed" for an initial + * over-indication. Let's avoid touching megabytes of data for now and + * assume that any sane user will issue a storage key instruction before + * actually relying on this data. + * 2) Relying on ram_size and allocating a big array is ugly. We should + * allocate and manage storage key data per RAMBlock or optimally using + * some sparse data structure. + * 3) We only ever have a single S390SKeysState, so relying on + * g_once_init_enter() is good enough. + */ + if (g_once_init_enter(&initialized)) { + MachineState *machine = MACHINE(qdev_get_machine()); + + skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + skeys->keydata = g_malloc0(skeys->key_count); + g_once_init_leave(&initialized, 1); + } + return false; } -/* - * TODO: for memory hotplug support qemu_s390_skeys_set and qemu_s390_skeys_get - * will have to make sure that the given gfn belongs to a memory region and not - * a memory hole. - */ static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, uint64_t count, uint8_t *keys) { @@ -198,9 +239,10 @@ static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, int i; /* Check for uint64 overflow and access beyond end of key data */ - if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { - error_report("Error: Setting storage keys for page beyond the end " - "of memory: gfn=%" PRIx64 " count=%" PRId64, + if (unlikely(!skeydev->keydata || start_gfn + count > skeydev->key_count || + start_gfn + count < count)) { + error_report("Error: Setting storage keys for pages with unallocated " + "storage key memory: gfn=%" PRIx64 " count=%" PRId64, start_gfn, count); return -EINVAL; } @@ -218,9 +260,10 @@ static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, int i; /* Check for uint64 overflow and access beyond end of key data */ - if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { - error_report("Error: Getting storage keys for page beyond the end " - "of memory: gfn=%" PRIx64 " count=%" PRId64, + if (unlikely(!skeydev->keydata || start_gfn + count > skeydev->key_count || + start_gfn + count < count)) { + error_report("Error: Getting storage keys for pages with unallocated " + "storage key memory: gfn=%" PRIx64 " count=%" PRId64, start_gfn, count); return -EINVAL; } @@ -236,7 +279,8 @@ static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); - skeyclass->skeys_enabled = qemu_s390_skeys_enabled; + skeyclass->skeys_are_enabled = qemu_s390_skeys_are_enabled; + skeyclass->enable_skeys = qemu_s390_enable_skeys; skeyclass->get_skeys = qemu_s390_skeys_get; skeyclass->set_skeys = qemu_s390_skeys_set; @@ -247,7 +291,6 @@ static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) static const TypeInfo qemu_s390_skeys_info = { .name = TYPE_QEMU_S390_SKEYS, .parent = TYPE_S390_SKEYS, - .instance_init = qemu_s390_skeys_init, .instance_size = sizeof(QEMUS390SKeysState), .class_init = qemu_s390_skeys_class_init, .class_size = sizeof(S390SKeysClass), @@ -257,14 +300,13 @@ static void s390_storage_keys_save(QEMUFile *f, void *opaque) { S390SKeysState *ss = S390_SKEYS(opaque); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); - MachineState *ms = MACHINE(qdev_get_machine()); - uint64_t pages_left = ms->ram_size / TARGET_PAGE_SIZE; - uint64_t read_count, eos = S390_SKEYS_SAVE_FLAG_EOS; - vaddr cur_gfn = 0; + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; + uint64_t pages, gfn; int error = 0; uint8_t *buf; - if (!skeyclass->skeys_enabled(ss)) { + if (!skeyclass->skeys_are_enabled(ss)) { goto end_stream; } @@ -274,36 +316,52 @@ static void s390_storage_keys_save(QEMUFile *f, void *opaque) goto end_stream; } - /* We only support initial memory. Standby memory is not handled yet. */ - qemu_put_be64(f, (cur_gfn * TARGET_PAGE_SIZE) | S390_SKEYS_SAVE_FLAG_SKEYS); - qemu_put_be64(f, pages_left); + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); - while (pages_left) { - read_count = MIN(pages_left, S390_SKEYS_BUFFER_SIZE); + /* Send each contiguous physical memory range separately. */ + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE)); + assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE)); - if (!error) { - error = skeyclass->get_skeys(ss, cur_gfn, read_count, buf); - if (error) { - /* - * If error: we want to fill the stream with valid data instead - * of stopping early so we pad the stream with 0x00 values and - * use S390_SKEYS_SAVE_FLAG_ERROR to indicate failure to the - * reading side. - */ - error_report("S390_GET_KEYS error %d", error); - memset(buf, 0, S390_SKEYS_BUFFER_SIZE); - eos = S390_SKEYS_SAVE_FLAG_ERROR; + gfn = block->target_start / TARGET_PAGE_SIZE; + pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; + qemu_put_be64(f, block->target_start | S390_SKEYS_SAVE_FLAG_SKEYS); + qemu_put_be64(f, pages); + + while (pages) { + const uint64_t cur_pages = MIN(pages, S390_SKEYS_BUFFER_SIZE); + + if (!error) { + error = skeyclass->get_skeys(ss, gfn, cur_pages, buf); + if (error) { + /* + * Create a valid stream with all 0x00 and indicate + * S390_SKEYS_SAVE_FLAG_ERROR to the destination. + */ + error_report("S390_GET_KEYS error %d", error); + memset(buf, 0, S390_SKEYS_BUFFER_SIZE); + } } + + qemu_put_buffer(f, buf, cur_pages); + gfn += cur_pages; + pages -= cur_pages; } - qemu_put_buffer(f, buf, read_count); - cur_gfn += read_count; - pages_left -= read_count; + if (error) { + break; + } } + guest_phys_blocks_free(&guest_phys_blocks); g_free(buf); end_stream: - qemu_put_be64(f, eos); + if (error) { + qemu_put_be64(f, S390_SKEYS_SAVE_FLAG_ERROR); + } else { + qemu_put_be64(f, S390_SKEYS_SAVE_FLAG_EOS); + } } static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id) @@ -312,6 +370,14 @@ static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id) S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); int ret = 0; + /* + * Make sure to lazy-enable if required to be done explicitly. No need to + * flush any TLB as the VM is not running yet. + */ + if (skeyclass->enable_skeys) { + skeyclass->enable_skeys(ss); + } + while (!ret) { ram_addr_t addr; int flags; diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index e4b18aef49..2e64ffab45 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -25,6 +25,7 @@ #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/qemu-print.h" +#include "qemu/units.h" #include "hw/s390x/s390-pci-bus.h" #include "sysemu/reset.h" #include "hw/s390x/storage-keys.h" @@ -42,6 +43,7 @@ #include "sysemu/sysemu.h" #include "hw/s390x/pv.h" #include "migration/blocker.h" +#include "qapi/visitor.h" static Error *pv_mig_blocker; @@ -83,8 +85,15 @@ out: static void s390_init_cpus(MachineState *machine) { MachineClass *mc = MACHINE_GET_CLASS(machine); + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); int i; + if (machine->smp.threads > s390mc->max_threads) { + error_report("S390 does not support more than %d threads.", + s390mc->max_threads); + exit(1); + } + /* initialize possible_cpus */ mc->possible_cpu_arch_ids(machine); @@ -345,7 +354,7 @@ static int s390_machine_protect(S390CcwMachineState *ms) } error_setg(&pv_mig_blocker, - "protected VMs are currently not migrateable."); + "protected VMs are currently not migratable."); rc = migrate_add_blocker(pv_mig_blocker, &local_err); if (rc) { ram_block_discard_disable(false); @@ -365,6 +374,12 @@ static int s390_machine_protect(S390CcwMachineState *ms) ms->pv = true; + /* Will return 0 if API is not available since it's not vital */ + rc = s390_pv_query_info(); + if (rc) { + goto out_err; + } + /* Set SE header and unpack */ rc = s390_ipl_prepare_pv_header(); if (rc) { @@ -404,7 +419,7 @@ static void s390_pv_prepare_reset(S390CcwMachineState *ms) s390_pv_prep_reset(); } -static void s390_machine_reset(MachineState *machine) +static void s390_machine_reset(MachineState *machine, ShutdownCause reason) { S390CcwMachineState *ms = S390_CCW_MACHINE(machine); enum s390_reset reset_type; @@ -426,7 +441,7 @@ static void s390_machine_reset(MachineState *machine) s390_machine_unprotect(ms); } - qemu_devices_reset(); + qemu_devices_reset(reason); s390_crypto_reset(); /* configure and start the ipl CPU only */ @@ -434,7 +449,7 @@ static void s390_machine_reset(MachineState *machine) break; case S390_RESET_MODIFIED_CLEAR: /* - * Susbsystem reset needs to be done before we unshare memory + * Subsystem reset needs to be done before we unshare memory * and lose access to VIRTIO structures in guest memory. */ subsystem_reset(); @@ -447,7 +462,7 @@ static void s390_machine_reset(MachineState *machine) break; case S390_RESET_LOAD_NORMAL: /* - * Susbsystem reset needs to be done before we unshare memory + * Subsystem reset needs to be done before we unshare memory * and lose access to VIRTIO structures in guest memory. */ subsystem_reset(); @@ -582,38 +597,6 @@ static ram_addr_t s390_fixup_ram_size(ram_addr_t sz) return newsz; } -static void ccw_machine_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - NMIClass *nc = NMI_CLASS(oc); - HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); - S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); - - s390mc->ri_allowed = true; - s390mc->cpu_model_allowed = true; - s390mc->css_migration_enabled = true; - s390mc->hpage_1m_allowed = true; - mc->init = ccw_init; - mc->reset = s390_machine_reset; - mc->block_default_type = IF_VIRTIO; - mc->no_cdrom = 1; - mc->no_floppy = 1; - mc->no_parallel = 1; - mc->no_sdcard = 1; - mc->max_cpus = S390_MAX_CPUS; - mc->has_hotpluggable_cpus = true; - assert(!mc->get_hotplug_handler); - mc->get_hotplug_handler = s390_get_hotplug_handler; - mc->cpu_index_to_instance_props = s390_cpu_index_to_props; - mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids; - /* it is overridden with 'host' cpu *in kvm_arch_init* */ - mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu"); - hc->plug = s390_machine_device_plug; - hc->unplug_request = s390_machine_device_unplug_request; - nc->nmi_monitor_handler = s390_nmi; - mc->default_ram_id = "s390.ram"; -} - static inline bool machine_get_aes_key_wrap(Object *obj, Error **errp) { S390CcwMachineState *ms = S390_CCW_MACHINE(obj); @@ -688,19 +671,29 @@ bool hpage_1m_allowed(void) return get_machine_class()->hpage_1m_allowed; } -static char *machine_get_loadparm(Object *obj, Error **errp) +static void machine_get_loadparm(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) { S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + char *str = g_strndup((char *) ms->loadparm, sizeof(ms->loadparm)); - /* make a NUL-terminated string */ - return g_strndup((char *) ms->loadparm, sizeof(ms->loadparm)); + visit_type_str(v, name, &str, errp); + g_free(str); } -static void machine_set_loadparm(Object *obj, const char *val, Error **errp) +static void machine_set_loadparm(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) { S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + char *val; int i; + if (!visit_type_str(v, name, &val, errp)) { + return; + } + for (i = 0; i < sizeof(ms->loadparm) && val[i]; i++) { uint8_t c = qemu_toupper(val[i]); /* mimic HMC */ @@ -718,29 +711,68 @@ static void machine_set_loadparm(Object *obj, const char *val, Error **errp) ms->loadparm[i] = ' '; /* pad right with spaces */ } } -static inline void s390_machine_initfn(Object *obj) -{ - object_property_add_bool(obj, "aes-key-wrap", - machine_get_aes_key_wrap, - machine_set_aes_key_wrap); - object_property_set_description(obj, "aes-key-wrap", - "enable/disable AES key wrapping using the CPACF wrapping key"); - object_property_set_bool(obj, "aes-key-wrap", true, NULL); - object_property_add_bool(obj, "dea-key-wrap", - machine_get_dea_key_wrap, - machine_set_dea_key_wrap); - object_property_set_description(obj, "dea-key-wrap", +static void ccw_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + + s390mc->ri_allowed = true; + s390mc->cpu_model_allowed = true; + s390mc->css_migration_enabled = true; + s390mc->hpage_1m_allowed = true; + s390mc->max_threads = 1; + mc->init = ccw_init; + mc->reset = s390_machine_reset; + mc->block_default_type = IF_VIRTIO; + mc->no_cdrom = 1; + mc->no_floppy = 1; + mc->no_parallel = 1; + mc->no_sdcard = 1; + mc->max_cpus = S390_MAX_CPUS; + mc->has_hotpluggable_cpus = true; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = s390_get_hotplug_handler; + mc->cpu_index_to_instance_props = s390_cpu_index_to_props; + mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids; + /* it is overridden with 'host' cpu *in kvm_arch_init* */ + mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu"); + hc->plug = s390_machine_device_plug; + hc->unplug_request = s390_machine_device_unplug_request; + nc->nmi_monitor_handler = s390_nmi; + mc->default_ram_id = "s390.ram"; + + object_class_property_add_bool(oc, "aes-key-wrap", + machine_get_aes_key_wrap, + machine_set_aes_key_wrap); + object_class_property_set_description(oc, "aes-key-wrap", + "enable/disable AES key wrapping using the CPACF wrapping key"); + + object_class_property_add_bool(oc, "dea-key-wrap", + machine_get_dea_key_wrap, + machine_set_dea_key_wrap); + object_class_property_set_description(oc, "dea-key-wrap", "enable/disable DEA key wrapping using the CPACF wrapping key"); - object_property_set_bool(obj, "dea-key-wrap", true, NULL); - object_property_add_str(obj, "loadparm", - machine_get_loadparm, machine_set_loadparm); - object_property_set_description(obj, "loadparm", + + object_class_property_add(oc, "loadparm", "loadparm", + machine_get_loadparm, machine_set_loadparm, + NULL, NULL); + object_class_property_set_description(oc, "loadparm", "Up to 8 chars in set of [A-Za-z0-9. ] (lower case chars converted" " to upper case) to pass to machine loader, boot manager," " and guest kernel"); } +static inline void s390_machine_initfn(Object *obj) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + ms->aes_key_wrap = true; + ms->dea_key_wrap = true; +} + static const TypeInfo ccw_machine_info = { .name = TYPE_S390_CCW_MACHINE, .parent = TYPE_MACHINE, @@ -767,7 +799,7 @@ bool css_migration_enabled(void) { \ MachineClass *mc = MACHINE_CLASS(oc); \ ccw_machine_##suffix##_class_options(mc); \ - mc->desc = "VirtIO-ccw based S390 machine v" verstr; \ + mc->desc = "Virtual s390x machine (version " verstr ")"; \ if (latest) { \ mc->alias = "s390-ccw-virtio"; \ mc->is_default = true; \ @@ -791,14 +823,86 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_7_2_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_7_2_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(7_2, "7.2", true); + +static void ccw_machine_7_1_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V7_1 }; + + ccw_machine_7_2_instance_options(machine); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_PAIE); + s390_set_qemu_cpu_model(0x8561, 15, 1, qemu_cpu_feat); +} + +static void ccw_machine_7_1_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_S390_PCI_DEVICE, "interpret", "off", }, + { TYPE_S390_PCI_DEVICE, "forwarding-assist", "off", }, + }; + + ccw_machine_7_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + s390mc->max_threads = S390_MAX_CPUS; +} +DEFINE_CCW_MACHINE(7_1, "7.1", false); + +static void ccw_machine_7_0_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V7_0 }; + + ccw_machine_7_1_instance_options(machine); + s390_set_qemu_cpu_model(0x8561, 15, 1, qemu_cpu_feat); +} + +static void ccw_machine_7_0_class_options(MachineClass *mc) +{ + ccw_machine_7_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len); +} +DEFINE_CCW_MACHINE(7_0, "7.0", false); + +static void ccw_machine_6_2_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V6_2 }; + + ccw_machine_7_0_instance_options(machine); + s390_set_qemu_cpu_model(0x3906, 14, 2, qemu_cpu_feat); +} + +static void ccw_machine_6_2_class_options(MachineClass *mc) +{ + ccw_machine_7_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len); +} +DEFINE_CCW_MACHINE(6_2, "6.2", false); + static void ccw_machine_6_1_instance_options(MachineState *machine) { + ccw_machine_6_2_instance_options(machine); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_NNPA); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_BEAR_ENH); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_RDP); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_PAI); } static void ccw_machine_6_1_class_options(MachineClass *mc) { + ccw_machine_6_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; } -DEFINE_CCW_MACHINE(6_1, "6.1", true); +DEFINE_CCW_MACHINE(6_1, "6.1", false); static void ccw_machine_6_0_instance_options(MachineState *machine) { diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index edb6e3ea01..eff74479f4 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -51,7 +51,7 @@ static bool sccb_verify_boundary(uint64_t sccb_addr, uint16_t sccb_len, uint32_t code) { uint64_t sccb_max_addr = sccb_addr + sccb_len - 1; - uint64_t sccb_boundary = (sccb_addr & PAGE_MASK) + PAGE_SIZE; + uint64_t sccb_boundary = (sccb_addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; switch (code & SCLP_CMD_CODE_MASK) { case SCLP_CMDW_READ_SCP_INFO: @@ -460,7 +460,7 @@ static void sclp_class_init(ObjectClass *oc, void *data) sc->service_interrupt = service_interrupt; } -static TypeInfo sclp_info = { +static const TypeInfo sclp_info = { .name = TYPE_SCLP, .parent = TYPE_DEVICE, .instance_init = sclp_init, diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c index ec855811ae..e2202dae2d 100644 --- a/hw/s390x/tod-kvm.c +++ b/hw/s390x/tod-kvm.c @@ -13,6 +13,7 @@ #include "qemu/module.h" #include "sysemu/runstate.h" #include "hw/s390x/tod.h" +#include "hw/s390x/pv.h" #include "kvm/kvm_s390x.h" static void kvm_s390_get_tod_raw(S390TOD *tod, Error **errp) @@ -84,6 +85,14 @@ static void kvm_s390_tod_vm_state_change(void *opaque, bool running, S390TODState *td = opaque; Error *local_err = NULL; + /* + * Under PV, the clock is under ultravisor control, hence we cannot restore + * it on resume. + */ + if (s390_is_pv()) { + return; + } + if (running && td->stopped) { /* Set the old TOD when running the VM - start the TOD clock. */ kvm_s390_set_tod_raw(&td->base, &local_err); @@ -147,7 +156,7 @@ static void kvm_s390_tod_init(Object *obj) td->stopped = false; } -static TypeInfo kvm_s390_tod_info = { +static const TypeInfo kvm_s390_tod_info = { .name = TYPE_KVM_S390_TOD, .parent = TYPE_S390_TOD, .instance_size = sizeof(S390TODState), diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c index 9bb94ff72b..2d540dba65 100644 --- a/hw/s390x/tod-tcg.c +++ b/hw/s390x/tod-tcg.c @@ -9,7 +9,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "hw/s390x/tod.h" #include "qemu/timer.h" @@ -17,6 +16,7 @@ #include "qemu/module.h" #include "cpu.h" #include "tcg/tcg_s390x.h" +#include "sysemu/rtc.h" static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) @@ -73,7 +73,7 @@ static void qemu_s390_tod_init(Object *obj) } } -static TypeInfo qemu_s390_tod_info = { +static const TypeInfo qemu_s390_tod_info = { .name = TYPE_QEMU_S390_TOD, .parent = TYPE_S390_TOD, .instance_size = sizeof(S390TODState), diff --git a/hw/s390x/tod.c b/hw/s390x/tod.c index fd5a36bf24..c81b1c0338 100644 --- a/hw/s390x/tod.c +++ b/hw/s390x/tod.c @@ -123,7 +123,7 @@ static void s390_tod_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; } -static TypeInfo s390_tod_info = { +static const TypeInfo s390_tod_info = { .name = TYPE_S390_TOD, .parent = TYPE_DEVICE, .instance_size = sizeof(S390TODState), diff --git a/hw/s390x/vhost-scsi-ccw.c b/hw/s390x/vhost-scsi-ccw.c new file mode 100644 index 0000000000..40dc14bbc7 --- /dev/null +++ b/hw/s390x/vhost-scsi-ccw.c @@ -0,0 +1,73 @@ +/* + * vhost ccw scsi implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" +#include "hw/virtio/vhost-scsi.h" + +#define TYPE_VHOST_SCSI_CCW "vhost-scsi-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSICcw, VHOST_SCSI_CCW) + +struct VHostSCSICcw { + VirtioCcwDevice parent_obj; + VHostSCSI vdev; +}; + +static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VHostSCSICcw *dev = VHOST_SCSI_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void vhost_ccw_scsi_instance_init(Object *obj) +{ + VHostSCSICcw *dev = VHOST_SCSI_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_SCSI); +} + +static Property vhost_ccw_scsi_properties[] = { + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = vhost_ccw_scsi_realize; + device_class_set_props(dc, vhost_ccw_scsi_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo vhost_ccw_scsi = { + .name = TYPE_VHOST_SCSI_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VHostSCSICcw), + .instance_init = vhost_ccw_scsi_instance_init, + .class_init = vhost_ccw_scsi_class_init, +}; + +static void virtio_ccw_scsi_register(void) +{ + type_register_static(&vhost_ccw_scsi); +} + +type_init(virtio_ccw_scsi_register) diff --git a/hw/s390x/vhost-vsock-ccw.c b/hw/s390x/vhost-vsock-ccw.c index 246416a8f9..07845a9a00 100644 --- a/hw/s390x/vhost-vsock-ccw.c +++ b/hw/s390x/vhost-vsock-ccw.c @@ -12,6 +12,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/vhost-vsock.h" + +#define TYPE_VHOST_VSOCK_CCW "vhost-vsock-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VHostVSockCCWState, VHOST_VSOCK_CCW) + +struct VHostVSockCCWState { + VirtioCcwDevice parent_obj; + VHostVSock vdev; +}; static Property vhost_vsock_ccw_properties[] = { DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, diff --git a/hw/s390x/virtio-ccw-9p.c b/hw/s390x/virtio-ccw-9p.c index 88c8884fc5..6f931f5994 100644 --- a/hw/s390x/virtio-ccw-9p.c +++ b/hw/s390x/virtio-ccw-9p.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/9pfs/virtio-9p.h" + +#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(V9fsCCWState, VIRTIO_9P_CCW) + +struct V9fsCCWState { + VirtioCcwDevice parent_obj; + V9fsVirtioState vdev; +}; static void virtio_ccw_9p_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-balloon.c b/hw/s390x/virtio-ccw-balloon.c index 4c7631a433..44287b9bbe 100644 --- a/hw/s390x/virtio-ccw-balloon.c +++ b/hw/s390x/virtio-ccw-balloon.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-balloon.h" + +#define TYPE_VIRTIO_BALLOON_CCW "virtio-balloon-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBalloonCcw, VIRTIO_BALLOON_CCW) + +struct VirtIOBalloonCcw { + VirtioCcwDevice parent_obj; + VirtIOBalloon vdev; +}; static void virtio_ccw_balloon_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-blk.c b/hw/s390x/virtio-ccw-blk.c index 2294ce1ce4..8e0e58b77d 100644 --- a/hw/s390x/virtio-ccw-blk.c +++ b/hw/s390x/virtio-ccw-blk.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-blk.h" + +#define TYPE_VIRTIO_BLK_CCW "virtio-blk-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlkCcw, VIRTIO_BLK_CCW) + +struct VirtIOBlkCcw { + VirtioCcwDevice parent_obj; + VirtIOBlock vdev; +}; static void virtio_ccw_blk_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-crypto.c b/hw/s390x/virtio-ccw-crypto.c index 358c74fb4b..0fa2f89443 100644 --- a/hw/s390x/virtio-ccw-crypto.c +++ b/hw/s390x/virtio-ccw-crypto.c @@ -14,6 +14,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-crypto.h" + +#define TYPE_VIRTIO_CRYPTO_CCW "virtio-crypto-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOCryptoCcw, VIRTIO_CRYPTO_CCW) + +struct VirtIOCryptoCcw { + VirtioCcwDevice parent_obj; + VirtIOCrypto vdev; +}; static void virtio_ccw_crypto_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-gpu.c b/hw/s390x/virtio-ccw-gpu.c index 5868a2a070..0642c5281d 100644 --- a/hw/s390x/virtio-ccw-gpu.c +++ b/hw/s390x/virtio-ccw-gpu.c @@ -14,6 +14,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-gpu.h" + +#define TYPE_VIRTIO_GPU_CCW "virtio-gpu-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUCcw, VIRTIO_GPU_CCW) + +struct VirtIOGPUCcw { + VirtioCcwDevice parent_obj; + VirtIOGPU vdev; +}; static void virtio_ccw_gpu_realize(VirtioCcwDevice *ccw_dev, Error **errp) { @@ -60,6 +69,7 @@ static const TypeInfo virtio_ccw_gpu = { .class_init = virtio_ccw_gpu_class_init, }; module_obj(TYPE_VIRTIO_GPU_CCW); +module_kconfig(VIRTIO_CCW); static void virtio_ccw_gpu_register(void) { diff --git a/hw/s390x/virtio-ccw-input.c b/hw/s390x/virtio-ccw-input.c index 83136fbba1..61a07ba38d 100644 --- a/hw/s390x/virtio-ccw-input.c +++ b/hw/s390x/virtio-ccw-input.c @@ -14,6 +14,26 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-input.h" + +#define TYPE_VIRTIO_INPUT_CCW "virtio-input-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputCcw, VIRTIO_INPUT_CCW) + +struct VirtIOInputCcw { + VirtioCcwDevice parent_obj; + VirtIOInput vdev; +}; + +#define TYPE_VIRTIO_INPUT_HID_CCW "virtio-input-hid-ccw" +#define TYPE_VIRTIO_KEYBOARD_CCW "virtio-keyboard-ccw" +#define TYPE_VIRTIO_MOUSE_CCW "virtio-mouse-ccw" +#define TYPE_VIRTIO_TABLET_CCW "virtio-tablet-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDCcw, VIRTIO_INPUT_HID_CCW) + +struct VirtIOInputHIDCcw { + VirtioCcwDevice parent_obj; + VirtIOInputHID vdev; +}; static void virtio_ccw_input_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-net.c b/hw/s390x/virtio-ccw-net.c index 3860d4e6ea..484e617659 100644 --- a/hw/s390x/virtio-ccw-net.c +++ b/hw/s390x/virtio-ccw-net.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-net.h" + +#define TYPE_VIRTIO_NET_CCW "virtio-net-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIONetCcw, VIRTIO_NET_CCW) + +struct VirtIONetCcw { + VirtioCcwDevice parent_obj; + VirtIONet vdev; +}; static void virtio_ccw_net_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-rng.c b/hw/s390x/virtio-ccw-rng.c index 2e3a9da5e8..a3fffb5138 100644 --- a/hw/s390x/virtio-ccw-rng.c +++ b/hw/s390x/virtio-ccw-rng.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-rng.h" + +#define TYPE_VIRTIO_RNG_CCW "virtio-rng-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIORNGCcw, VIRTIO_RNG_CCW) + +struct VirtIORNGCcw { + VirtioCcwDevice parent_obj; + VirtIORNG vdev; +}; static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw-scsi.c b/hw/s390x/virtio-ccw-scsi.c index 6e4beef700..d003f89f43 100644 --- a/hw/s390x/virtio-ccw-scsi.c +++ b/hw/s390x/virtio-ccw-scsi.c @@ -15,6 +15,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-scsi.h" + +#define TYPE_VIRTIO_SCSI_CCW "virtio-scsi-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICcw, VIRTIO_SCSI_CCW) + +struct VirtIOSCSICcw { + VirtioCcwDevice parent_obj; + VirtIOSCSI vdev; +}; static void virtio_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) { @@ -70,56 +79,9 @@ static const TypeInfo virtio_ccw_scsi = { .class_init = virtio_ccw_scsi_class_init, }; -#ifdef CONFIG_VHOST_SCSI - -static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) -{ - VHostSCSICcw *dev = VHOST_SCSI_CCW(ccw_dev); - DeviceState *vdev = DEVICE(&dev->vdev); - - qdev_realize(vdev, BUS(&ccw_dev->bus), errp); -} - -static void vhost_ccw_scsi_instance_init(Object *obj) -{ - VHostSCSICcw *dev = VHOST_SCSI_CCW(obj); - - virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), - TYPE_VHOST_SCSI); -} - -static Property vhost_ccw_scsi_properties[] = { - DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, - VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), -}; - -static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); - - k->realize = vhost_ccw_scsi_realize; - device_class_set_props(dc, vhost_ccw_scsi_properties); - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); -} - -static const TypeInfo vhost_ccw_scsi = { - .name = TYPE_VHOST_SCSI_CCW, - .parent = TYPE_VIRTIO_CCW_DEVICE, - .instance_size = sizeof(VHostSCSICcw), - .instance_init = vhost_ccw_scsi_instance_init, - .class_init = vhost_ccw_scsi_class_init, -}; - -#endif - static void virtio_ccw_scsi_register(void) { type_register_static(&virtio_ccw_scsi); -#ifdef CONFIG_VHOST_SCSI - type_register_static(&vhost_ccw_scsi); -#endif } type_init(virtio_ccw_scsi_register) diff --git a/hw/s390x/virtio-ccw-serial.c b/hw/s390x/virtio-ccw-serial.c index 61958228d1..bf8057880f 100644 --- a/hw/s390x/virtio-ccw-serial.c +++ b/hw/s390x/virtio-ccw-serial.c @@ -15,6 +15,15 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-serial.h" #include "virtio-ccw.h" +#include "hw/virtio/virtio-serial.h" + +#define TYPE_VIRTIO_SERIAL_CCW "virtio-serial-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtioSerialCcw, VIRTIO_SERIAL_CCW) + +struct VirtioSerialCcw { + VirtioCcwDevice parent_obj; + VirtIOSerial vdev; +}; static void virtio_ccw_serial_realize(VirtioCcwDevice *ccw_dev, Error **errp) { diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 6a2df1c1e9..e33e5207ab 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "exec/address-spaces.h" #include "sysemu/kvm.h" #include "net/net.h" #include "hw/virtio/virtio.h" @@ -19,6 +20,7 @@ #include "hw/virtio/virtio-net.h" #include "qemu/bitops.h" #include "qemu/error-report.h" +#include "qemu/log.h" #include "qemu/module.h" #include "hw/virtio/virtio-access.h" #include "hw/virtio/virtio-bus.h" @@ -247,12 +249,11 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, return 0; } -static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) +static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev) { CcwDevice *ccw_dev = CCW_DEVICE(dev); - virtio_ccw_stop_ioeventfd(dev); - virtio_reset(vdev); + virtio_bus_reset(&dev->bus); if (dev->indicators) { release_indicator(&dev->routes.adapter, dev->indicators); dev->indicators = NULL; @@ -357,7 +358,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); break; case CCW_CMD_VDEV_RESET: - virtio_ccw_reset_virtio(dev, vdev); + virtio_ccw_reset_virtio(dev); ret = 0; break; case CCW_CMD_READ_FEAT: @@ -534,7 +535,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) } if (virtio_set_status(vdev, status) == 0) { if (vdev->status == 0) { - virtio_ccw_reset_virtio(dev, vdev); + virtio_ccw_reset_virtio(dev); } if (status & VIRTIO_CONFIG_S_DRIVER_OK) { virtio_ccw_start_ioeventfd(dev); @@ -919,10 +920,9 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector) static void virtio_ccw_reset(DeviceState *d) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); - virtio_ccw_reset_virtio(dev, vdev); + virtio_ccw_reset_virtio(dev); if (vdc->parent_reset) { vdc->parent_reset(d); } @@ -1261,8 +1261,7 @@ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, DeviceState *qdev = DEVICE(dev); char virtio_bus_name[] = "virtio-bus"; - qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, - qdev, virtio_bus_name); + qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name); } static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h index 0168232e3b..fac186c8f6 100644 --- a/hw/s390x/virtio-ccw.h +++ b/hw/s390x/virtio-ccw.h @@ -13,24 +13,8 @@ #ifndef HW_S390X_VIRTIO_CCW_H #define HW_S390X_VIRTIO_CCW_H -#include "hw/virtio/virtio-blk.h" -#include "hw/virtio/virtio-net.h" -#include "hw/virtio/virtio-serial.h" -#include "hw/virtio/virtio-scsi.h" #include "qom/object.h" -#ifdef CONFIG_VHOST_SCSI -#include "hw/virtio/vhost-scsi.h" -#endif -#include "hw/virtio/virtio-balloon.h" -#include "hw/virtio/virtio-rng.h" -#include "hw/virtio/virtio-crypto.h" #include "hw/virtio/virtio-bus.h" -#ifdef CONFIG_VHOST_VSOCK -#include "hw/virtio/vhost-vsock.h" -#endif /* CONFIG_VHOST_VSOCK */ -#include "hw/virtio/virtio-gpu.h" -#include "hw/virtio/virtio-input.h" - #include "hw/s390x/s390_flic.h" #include "hw/s390x/css.h" #include "ccw-device.h" @@ -104,139 +88,6 @@ static inline int virtio_ccw_rev_max(VirtioCcwDevice *dev) return dev->max_rev; } -/* virtio-scsi-ccw */ - -#define TYPE_VIRTIO_SCSI_CCW "virtio-scsi-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICcw, VIRTIO_SCSI_CCW) - -struct VirtIOSCSICcw { - VirtioCcwDevice parent_obj; - VirtIOSCSI vdev; -}; - -#ifdef CONFIG_VHOST_SCSI -/* vhost-scsi-ccw */ - -#define TYPE_VHOST_SCSI_CCW "vhost-scsi-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSICcw, VHOST_SCSI_CCW) - -struct VHostSCSICcw { - VirtioCcwDevice parent_obj; - VHostSCSI vdev; -}; -#endif - -/* virtio-blk-ccw */ - -#define TYPE_VIRTIO_BLK_CCW "virtio-blk-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlkCcw, VIRTIO_BLK_CCW) - -struct VirtIOBlkCcw { - VirtioCcwDevice parent_obj; - VirtIOBlock vdev; -}; - -/* virtio-balloon-ccw */ - -#define TYPE_VIRTIO_BALLOON_CCW "virtio-balloon-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBalloonCcw, VIRTIO_BALLOON_CCW) - -struct VirtIOBalloonCcw { - VirtioCcwDevice parent_obj; - VirtIOBalloon vdev; -}; - -/* virtio-serial-ccw */ - -#define TYPE_VIRTIO_SERIAL_CCW "virtio-serial-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtioSerialCcw, VIRTIO_SERIAL_CCW) - -struct VirtioSerialCcw { - VirtioCcwDevice parent_obj; - VirtIOSerial vdev; -}; - -/* virtio-net-ccw */ - -#define TYPE_VIRTIO_NET_CCW "virtio-net-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIONetCcw, VIRTIO_NET_CCW) - -struct VirtIONetCcw { - VirtioCcwDevice parent_obj; - VirtIONet vdev; -}; - -/* virtio-rng-ccw */ - -#define TYPE_VIRTIO_RNG_CCW "virtio-rng-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIORNGCcw, VIRTIO_RNG_CCW) - -struct VirtIORNGCcw { - VirtioCcwDevice parent_obj; - VirtIORNG vdev; -}; - -/* virtio-crypto-ccw */ - -#define TYPE_VIRTIO_CRYPTO_CCW "virtio-crypto-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOCryptoCcw, VIRTIO_CRYPTO_CCW) - -struct VirtIOCryptoCcw { - VirtioCcwDevice parent_obj; - VirtIOCrypto vdev; -}; - VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch); -#ifdef CONFIG_VIRTFS -#include "hw/9pfs/virtio-9p.h" - -#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(V9fsCCWState, VIRTIO_9P_CCW) - -struct V9fsCCWState { - VirtioCcwDevice parent_obj; - V9fsVirtioState vdev; -}; - -#endif /* CONFIG_VIRTFS */ - -#ifdef CONFIG_VHOST_VSOCK -#define TYPE_VHOST_VSOCK_CCW "vhost-vsock-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VHostVSockCCWState, VHOST_VSOCK_CCW) - -struct VHostVSockCCWState { - VirtioCcwDevice parent_obj; - VHostVSock vdev; -}; - -#endif /* CONFIG_VHOST_VSOCK */ - -#define TYPE_VIRTIO_GPU_CCW "virtio-gpu-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUCcw, VIRTIO_GPU_CCW) - -struct VirtIOGPUCcw { - VirtioCcwDevice parent_obj; - VirtIOGPU vdev; -}; - -#define TYPE_VIRTIO_INPUT_CCW "virtio-input-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputCcw, VIRTIO_INPUT_CCW) - -struct VirtIOInputCcw { - VirtioCcwDevice parent_obj; - VirtIOInput vdev; -}; - -#define TYPE_VIRTIO_INPUT_HID_CCW "virtio-input-hid-ccw" -#define TYPE_VIRTIO_KEYBOARD_CCW "virtio-keyboard-ccw" -#define TYPE_VIRTIO_MOUSE_CCW "virtio-mouse-ccw" -#define TYPE_VIRTIO_TABLET_CCW "virtio-tablet-ccw" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDCcw, VIRTIO_INPUT_HID_CCW) - -struct VirtIOInputHIDCcw { - VirtioCcwDevice parent_obj; - VirtIOInputHID vdev; -}; - #endif diff --git a/hw/scsi/Kconfig b/hw/scsi/Kconfig index 77d397c949..e7b34dc8e2 100644 --- a/hw/scsi/Kconfig +++ b/hw/scsi/Kconfig @@ -48,6 +48,11 @@ config VIRTIO_SCSI depends on VIRTIO select SCSI +config VHOST_SCSI + bool + default y + depends on VIRTIO && VHOST_KERNEL + config VHOST_USER_SCSI bool # Only PCI devices are provided for now diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 9db10b1a48..1792f84cea 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -280,7 +280,7 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len, len = pci->dma_regs[DMA_WBC]; } - pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir); + pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir, MEMTXATTRS_UNSPECIFIED); /* update status registers */ pci->dma_regs[DMA_WBC] -= len; @@ -388,7 +388,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp) pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); s->irq = pci_allocate_irq(dev); - scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info); } static void esp_pci_scsi_exit(PCIDevice *d) diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 4ac2114788..e52188d022 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -195,6 +195,11 @@ static void esp_pdma_write(ESPState *s, uint8_t val) esp_set_tc(s, dmalen); } +static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb) +{ + s->pdma_cb = cb; +} + static int esp_select(ESPState *s) { int target; @@ -204,11 +209,6 @@ static int esp_select(ESPState *s) s->ti_size = 0; fifo8_reset(&s->fifo); - if (s->current_req) { - /* Started a new command before the old one finished. Cancel it. */ - scsi_req_cancel(s->current_req); - } - s->current_dev = scsi_device_find(&s->bus, 0, target, 0); if (!s->current_dev) { /* No such drive */ @@ -235,6 +235,11 @@ static uint32_t get_cmd(ESPState *s, uint32_t maxlen) uint32_t dmalen, n; int target; + if (s->current_req) { + /* Started a new command before the old one finished. Cancel it. */ + scsi_req_cancel(s->current_req); + } + target = s->wregs[ESP_WBUSID] & BUSID_DID; if (s->dma) { dmalen = MIN(esp_get_tc(s), maxlen); @@ -287,7 +292,7 @@ static void do_command_phase(ESPState *s) esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); - s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, s); + s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); datalen = scsi_req_enqueue(s->current_req); s->ti_size = datalen; fifo8_reset(&s->cmdfifo); @@ -356,7 +361,7 @@ static void handle_satn(ESPState *s) s->dma_cb = handle_satn; return; } - s->pdma_cb = satn_pdma_cb; + esp_set_pdma_cb(s, SATN_PDMA_CB); cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); if (cmdlen > 0) { s->cmdfifo_cdb_offset = 1; @@ -387,7 +392,7 @@ static void handle_s_without_atn(ESPState *s) s->dma_cb = handle_s_without_atn; return; } - s->pdma_cb = s_without_satn_pdma_cb; + esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB); cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); if (cmdlen > 0) { s->cmdfifo_cdb_offset = 0; @@ -422,7 +427,7 @@ static void handle_satn_stop(ESPState *s) s->dma_cb = handle_satn_stop; return; } - s->pdma_cb = satn_stop_pdma_cb; + esp_set_pdma_cb(s, SATN_STOP_PDMA_CB); cmdlen = get_cmd(s, 1); if (cmdlen > 0) { trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); @@ -464,7 +469,7 @@ static void write_response(ESPState *s) s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; } else { - s->pdma_cb = write_response_pdma_cb; + esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB); esp_raise_drq(s); return; } @@ -510,7 +515,7 @@ static void do_dma_pdma_cb(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -604,7 +609,7 @@ static void esp_do_dma(ESPState *s) s->dma_memory_read(s->dma_opaque, buf, len); fifo8_push_all(&s->cmdfifo, buf, len); } else { - s->pdma_cb = do_dma_pdma_cb; + esp_set_pdma_cb(s, DO_DMA_PDMA_CB); esp_raise_drq(s); return; } @@ -622,7 +627,7 @@ static void esp_do_dma(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -646,7 +651,7 @@ static void esp_do_dma(ESPState *s) if (s->dma_memory_read) { s->dma_memory_read(s->dma_opaque, s->async_buf, len); } else { - s->pdma_cb = do_dma_pdma_cb; + esp_set_pdma_cb(s, DO_DMA_PDMA_CB); esp_raise_drq(s); return; } @@ -678,7 +683,7 @@ static void esp_do_dma(ESPState *s) } esp_set_tc(s, esp_get_tc(s) - len); - s->pdma_cb = do_dma_pdma_cb; + esp_set_pdma_cb(s, DO_DMA_PDMA_CB); esp_raise_drq(s); /* Indicate transfer to FIFO is complete */ @@ -733,7 +738,7 @@ static void esp_do_nodma(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -777,6 +782,29 @@ static void esp_do_nodma(ESPState *s) esp_raise_irq(s); } +static void esp_pdma_cb(ESPState *s) +{ + switch (s->pdma_cb) { + case SATN_PDMA_CB: + satn_pdma_cb(s); + break; + case S_WITHOUT_SATN_PDMA_CB: + s_without_satn_pdma_cb(s); + break; + case SATN_STOP_PDMA_CB: + satn_stop_pdma_cb(s); + break; + case WRITE_RESPONSE_PDMA_CB: + write_response_pdma_cb(s); + break; + case DO_DMA_PDMA_CB: + do_dma_pdma_cb(s); + break; + default: + g_assert_not_reached(); + } +} + void esp_command_complete(SCSIRequest *req, size_t resid) { ESPState *s = req->hba_private; @@ -894,6 +922,7 @@ void esp_hard_reset(ESPState *s) memset(s->wregs, 0, ESP_REGS); s->tchi_written = 0; s->ti_size = 0; + s->async_len = 0; fifo8_reset(&s->fifo); fifo8_reset(&s->cmdfifo); s->dma = 0; @@ -910,6 +939,11 @@ static void esp_soft_reset(ESPState *s) esp_hard_reset(s); } +static void esp_bus_reset(ESPState *s) +{ + bus_cold_reset(BUS(&s->bus)); +} + static void parent_esp_reset(ESPState *s, int irq, int level) { if (level) { @@ -1038,6 +1072,7 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) break; case CMD_BUSRESET: trace_esp_mem_writeb_cmd_bus_reset(val); + esp_bus_reset(s); if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { s->rregs[ESP_RINTR] |= INTR_RST; esp_raise_irq(s); @@ -1180,6 +1215,33 @@ static int esp_post_load(void *opaque, int version_id) return 0; } +/* + * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the + * guest CPU to perform the transfers between the SCSI bus and memory + * itself. This is indicated by the dma_memory_read and dma_memory_write + * functions being NULL (in contrast to the ESP PCI device) whilst + * dma_enabled is still set. + */ + +static bool esp_pdma_needed(void *opaque) +{ + ESPState *s = ESP(opaque); + + return s->dma_memory_read == NULL && s->dma_memory_write == NULL && + s->dma_enabled; +} + +static const VMStateDescription vmstate_esp_pdma = { + .name = "esp/pdma", + .version_id = 0, + .minimum_version_id = 0, + .needed = esp_pdma_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(pdma_cb, ESPState), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_esp = { .name = "esp", .version_id = 6, @@ -1214,6 +1276,10 @@ const VMStateDescription vmstate_esp = { VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), VMSTATE_END_OF_LIST() }, + .subsections = (const VMStateDescription * []) { + &vmstate_esp_pdma, + NULL + } }; static void sysbus_esp_mem_write(void *opaque, hwaddr addr, @@ -1262,7 +1328,7 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, esp_pdma_write(s, val); break; } - s->pdma_cb(s); + esp_pdma_cb(s); } static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, @@ -1284,11 +1350,20 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, break; } if (fifo8_num_used(&s->fifo) < 2) { - s->pdma_cb(s); + esp_pdma_cb(s); } return val; } +static void *esp_load_request(QEMUFile *f, SCSIRequest *req) +{ + ESPState *s = container_of(req->bus, ESPState, bus); + + scsi_req_ref(req); + s->current_req = req; + return s; +} + static const MemoryRegionOps sysbus_esp_pdma_ops = { .read = sysbus_esp_pdma_read, .write = sysbus_esp_pdma_write, @@ -1304,6 +1379,7 @@ static const struct SCSIBusInfo esp_scsi_info = { .max_target = ESP_MAX_DEVS, .max_lun = 7, + .load_request = esp_load_request, .transfer_data = esp_transfer_data, .complete = esp_command_complete, .cancel = esp_request_cancelled @@ -1348,7 +1424,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); - scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); } static void sysbus_esp_hard_reset(DeviceState *dev) diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index e2c19180a0..42532c4744 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -621,8 +621,7 @@ static void lsi_do_dma(LSIState *s, int out) dma_addr_t addr; SCSIDevice *dev; - assert(s->current); - if (!s->current->dma_len) { + if (!s->current || !s->current->dma_len) { /* Wait until data is available. */ trace_lsi_do_dma_unavailable(); return; @@ -865,7 +864,7 @@ static void lsi_do_command(LSIState *s) s->current = g_new0(lsi_request, 1); s->current->tag = s->select_tag; s->current->req = scsi_req_new(dev, s->current->tag, s->current_lun, buf, - s->current); + s->dbc, s->current); n = scsi_req_enqueue(s->current->req); if (n) { @@ -1029,8 +1028,9 @@ static void lsi_do_msgout(LSIState *s) case 0x0d: /* The ABORT TAG message clears the current I/O process only. */ trace_lsi_do_msgout_abort(current_tag); - if (current_req) { + if (current_req && current_req->req) { scsi_req_cancel(current_req->req); + current_req = NULL; } lsi_disconnect(s); break; @@ -1056,6 +1056,7 @@ static void lsi_do_msgout(LSIState *s) /* clear the current I/O process */ if (s->current) { scsi_req_cancel(s->current->req); + current_req = NULL; } /* As the current implemented devices scsi_disk and scsi_generic @@ -1133,15 +1134,24 @@ static void lsi_execute_script(LSIState *s) uint32_t addr, addr_high; int opcode; int insn_processed = 0; + static int reentrancy_level; + + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; again: - if (++insn_processed > LSI_MAX_INSN) { - /* Some windows drivers make the device spin waiting for a memory - location to change. If we have been executed a lot of code then - assume this is the case and force an unexpected device disconnect. - This is apparently sufficient to beat the drivers into submission. - */ + /* + * Some windows drivers make the device spin waiting for a memory location + * to change. If we have executed more than LSI_MAX_INSN instructions then + * assume this is the case and force an unexpected device disconnect. This + * is apparently sufficient to beat the drivers into submission. + * + * Another issue (CVE-2023-0330) can occur if the script is programmed to + * trigger itself again and again. Avoid this problem by stopping after + * being called multiple times in a reentrant way (8 is an arbitrary value + * which should be enough for all valid use cases). + */ + if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { if (!(s->sien0 & LSI_SIST0_UDC)) { qemu_log_mask(LOG_GUEST_ERROR, "lsi_scsi: inf. loop with UDC masked"); @@ -1595,6 +1605,8 @@ again: } } trace_lsi_execute_script_stop(); + + reentrancy_level--; } static uint8_t lsi_reg_readb(LSIState *s, int offset) @@ -1867,7 +1879,7 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) } if (val & LSI_SCNTL1_RST) { if (!(s->sstat0 & LSI_SSTAT0_RST)) { - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); s->sstat0 |= LSI_SSTAT0_RST; lsi_script_scsi_interrupt(s, LSI_SIST0_RST, 0); } @@ -1925,7 +1937,7 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) lsi_execute_script(s); } if (val & LSI_ISTAT0_SRST) { - qdev_reset_all(DEVICE(s)); + device_cold_reset(DEVICE(s)); } break; case 0x16: /* MBOX0 */ @@ -2309,7 +2321,7 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp) pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io); QTAILQ_INIT(&s->queue); - scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), d, &lsi_scsi_info); } static void lsi_scsi_exit(PCIDevice *dev) @@ -2353,7 +2365,7 @@ static void lsi53c810_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_LSI_53C810; } -static TypeInfo lsi53c810_info = { +static const TypeInfo lsi53c810_info = { .name = TYPE_LSI53C810, .parent = TYPE_LSI53C895A, .class_init = lsi53c810_class_init, diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 8f2389d2c6..9cbbb16121 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -19,15 +19,16 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "sysemu/dma.h" #include "sysemu/block-backend.h" +#include "sysemu/rtc.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "qemu/iov.h" #include "qemu/module.h" +#include "qemu/hw-version.h" #include "hw/scsi/scsi.h" #include "scsi/constants.h" #include "trace.h" @@ -109,8 +110,8 @@ struct MegasasState { uint64_t reply_queue_pa; void *reply_queue; uint16_t reply_queue_len; - uint16_t reply_queue_head; - uint16_t reply_queue_tail; + uint32_t reply_queue_head; + uint32_t reply_queue_tail; uint64_t consumer_pa; uint64_t producer_pa; @@ -168,14 +169,16 @@ static void megasas_frame_set_cmd_status(MegasasState *s, unsigned long frame, uint8_t v) { PCIDevice *pci = &s->parent_obj; - stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), v); + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), + v, MEMTXATTRS_UNSPECIFIED); } static void megasas_frame_set_scsi_status(MegasasState *s, unsigned long frame, uint8_t v) { PCIDevice *pci = &s->parent_obj; - stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), v); + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), + v, MEMTXATTRS_UNSPECIFIED); } static inline const char *mfi_frame_desc(unsigned int cmd) @@ -200,7 +203,12 @@ static uint64_t megasas_frame_get_context(MegasasState *s, unsigned long frame) { PCIDevice *pci = &s->parent_obj; - return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context)); + uint64_t val; + + ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context), + &val, MEMTXATTRS_UNSPECIFIED); + + return val; } static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd) @@ -303,6 +311,7 @@ static int megasas_map_sgl(MegasasState *s, MegasasCmd *cmd, union mfi_sgl *sgl) } if (cmd->iov_size > iov_size) { trace_megasas_iovec_overflow(cmd->index, iov_size, cmd->iov_size); + goto unmap; } else if (cmd->iov_size < iov_size) { trace_megasas_iovec_underflow(cmd->index, iov_size, cmd->iov_size); } @@ -375,8 +384,7 @@ static int megasas_setup_inquiry(uint8_t *cdb, int pg, int len) cdb[1] = 0x1; cdb[2] = pg; } - cdb[3] = (len >> 8) & 0xff; - cdb[4] = (len & 0xff); + stw_be_p(&cdb[3], len); return len; } @@ -392,18 +400,8 @@ static void megasas_encode_lba(uint8_t *cdb, uint64_t lba, } else { cdb[0] = READ_16; } - cdb[2] = (lba >> 56) & 0xff; - cdb[3] = (lba >> 48) & 0xff; - cdb[4] = (lba >> 40) & 0xff; - cdb[5] = (lba >> 32) & 0xff; - cdb[6] = (lba >> 24) & 0xff; - cdb[7] = (lba >> 16) & 0xff; - cdb[8] = (lba >> 8) & 0xff; - cdb[9] = (lba) & 0xff; - cdb[10] = (len >> 24) & 0xff; - cdb[11] = (len >> 16) & 0xff; - cdb[12] = (len >> 8) & 0xff; - cdb[13] = (len) & 0xff; + stq_be_p(&cdb[2], lba); + stl_be_p(&cdb[2 + 8], len); } /* @@ -531,7 +529,8 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s, s->busy++; if (s->consumer_pa) { - s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail, + MEMTXATTRS_UNSPECIFIED); } trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context, s->reply_queue_head, s->reply_queue_tail, s->busy); @@ -541,6 +540,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s, static void megasas_complete_frame(MegasasState *s, uint64_t context) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pci_dev = PCI_DEVICE(s); int tail, queue_offset; @@ -554,24 +554,26 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context) */ if (megasas_use_queue64(s)) { queue_offset = s->reply_queue_head * sizeof(uint64_t); - stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, + context, attrs); } else { queue_offset = s->reply_queue_head * sizeof(uint32_t); - stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, + context, attrs); } - s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs); trace_megasas_qf_complete(context, s->reply_queue_head, s->reply_queue_tail, s->busy); } if (megasas_intr_enabled(s)) { /* Update reply queue pointer */ - s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs); tail = s->reply_queue_head; s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds); trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail, s->busy); - stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head); + stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head, attrs); /* Notify HBA */ if (msix_enabled(pci_dev)) { trace_megasas_msix_raise(0); @@ -631,6 +633,7 @@ static void megasas_abort_command(MegasasCmd *cmd) static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pcid = PCI_DEVICE(s); uint32_t pa_hi, pa_lo; hwaddr iq_pa, initq_size = sizeof(struct mfi_init_qinfo); @@ -669,9 +672,9 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) pa_lo = le32_to_cpu(initq->pi_addr_lo); pa_hi = le32_to_cpu(initq->pi_addr_hi); s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo; - s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa); + ldl_le_pci_dma(pcid, s->producer_pa, &s->reply_queue_head, attrs); s->reply_queue_head %= MEGASAS_MAX_FRAMES; - s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail, attrs); s->reply_queue_tail %= MEGASAS_MAX_FRAMES; flags = le32_to_cpu(initq->flags); if (flags & MFI_QUEUE_FLAG_CONTEXT64) { @@ -737,6 +740,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) size_t dcmd_size = sizeof(info); BusChild *kid; int num_pd_disks = 0; + dma_addr_t residual; memset(&info, 0x0, dcmd_size); if (cmd->iov_size < dcmd_size) { @@ -847,7 +851,9 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) MFI_INFO_PDMIX_SATA | MFI_INFO_PDMIX_LD); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -855,6 +861,7 @@ static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd) { struct mfi_defaults info; size_t dcmd_size = sizeof(struct mfi_defaults); + dma_addr_t residual; memset(&info, 0x0, dcmd_size); if (cmd->iov_size < dcmd_size) { @@ -877,7 +884,9 @@ static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd) info.disable_preboot_cli = 1; info.cluster_disable = 1; - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -885,6 +894,7 @@ static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd) { struct mfi_bios_data info; size_t dcmd_size = sizeof(info); + dma_addr_t residual; memset(&info, 0x0, dcmd_size); if (cmd->iov_size < dcmd_size) { @@ -898,7 +908,9 @@ static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd) info.expose_all_drives = 1; } - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -906,10 +918,13 @@ static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd) { uint64_t fw_time; size_t dcmd_size = sizeof(fw_time); + dma_addr_t residual; fw_time = cpu_to_le64(megasas_fw_time()); - cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg); + dma_buf_read(&fw_time, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -929,6 +944,7 @@ static int megasas_event_info(MegasasState *s, MegasasCmd *cmd) { struct mfi_evt_log_state info; size_t dcmd_size = sizeof(info); + dma_addr_t residual; memset(&info, 0, dcmd_size); @@ -936,7 +952,9 @@ static int megasas_event_info(MegasasState *s, MegasasCmd *cmd) info.shutdown_seq_num = cpu_to_le32(s->shutdown_event); info.boot_seq_num = cpu_to_le32(s->boot_event); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -966,6 +984,7 @@ static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) size_t dcmd_size = sizeof(info); BusChild *kid; uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks; + dma_addr_t residual; memset(&info, 0, dcmd_size); offset = 8; @@ -1005,7 +1024,9 @@ static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) info.size = cpu_to_le32(offset); info.count = cpu_to_le32(num_pd_disks); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg); + dma_buf_read(&info, offset, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -1032,7 +1053,8 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, uint64_t pd_size; uint16_t pd_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); uint8_t cmdbuf[6]; - size_t len, resid; + size_t len; + dma_addr_t residual; if (!cmd->iov_buf) { cmd->iov_buf = g_malloc0(dcmd_size); @@ -1040,7 +1062,7 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */ info->vpd_page83[0] = 0x7f; megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data)); - cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, sizeof(cmdbuf), cmd); if (!cmd->req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, "PD get info std inquiry"); @@ -1058,7 +1080,7 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, return MFI_STAT_INVALID_STATUS; } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) { megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83)); - cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, sizeof(cmdbuf), cmd); if (!cmd->req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, "PD get info vpd inquiry"); @@ -1099,9 +1121,11 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, info->connected_port_bitmap = 0x1; info->device_speed = 1; info->link_speed = 1; - resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + dma_buf_read(cmd->iov_buf, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; g_free(cmd->iov_buf); - cmd->iov_size = dcmd_size - resid; + cmd->iov_size = dcmd_size - residual; cmd->iov_buf = NULL; return MFI_STAT_OK; } @@ -1136,7 +1160,8 @@ static int megasas_dcmd_pd_get_info(MegasasState *s, MegasasCmd *cmd) static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd) { struct mfi_ld_list info; - size_t dcmd_size = sizeof(info), resid; + size_t dcmd_size = sizeof(info); + dma_addr_t residual; uint32_t num_ld_disks = 0, max_ld_disks; uint64_t ld_size; BusChild *kid; @@ -1171,8 +1196,9 @@ static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd) info.ld_count = cpu_to_le32(num_ld_disks); trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); - resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); - cmd->iov_size = dcmd_size - resid; + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size = dcmd_size - residual; return MFI_STAT_OK; } @@ -1180,7 +1206,8 @@ static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd) { uint16_t flags; struct mfi_ld_targetid_list info; - size_t dcmd_size = sizeof(info), resid; + size_t dcmd_size = sizeof(info); + dma_addr_t residual; uint32_t num_ld_disks = 0, max_ld_disks = s->fw_luns; BusChild *kid; @@ -1220,8 +1247,9 @@ static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd) info.size = dcmd_size; trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); - resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); - cmd->iov_size = dcmd_size - resid; + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size = dcmd_size - residual; return MFI_STAT_OK; } @@ -1231,7 +1259,8 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, struct mfi_ld_info *info = cmd->iov_buf; size_t dcmd_size = sizeof(struct mfi_ld_info); uint8_t cdb[6]; - ssize_t len, resid; + ssize_t len; + dma_addr_t residual; uint16_t sdev_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); uint64_t ld_size; @@ -1239,7 +1268,7 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, cmd->iov_buf = g_malloc0(dcmd_size); info = cmd->iov_buf; megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83)); - cmd->req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cdb, sizeof(cdb), cmd); if (!cmd->req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, "LD get info vpd inquiry"); @@ -1270,9 +1299,10 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, info->ld_config.span[0].num_blocks = info->size; info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id); - resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + dma_buf_read(cmd->iov_buf, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); g_free(cmd->iov_buf); - cmd->iov_size = dcmd_size - resid; + cmd->iov_size = dcmd_size - residual; cmd->iov_buf = NULL; return MFI_STAT_OK; } @@ -1315,6 +1345,7 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) struct mfi_config_data *info; int num_pd_disks = 0, array_offset, ld_offset; BusChild *kid; + dma_addr_t residual; if (cmd->iov_size > 4096) { return MFI_STAT_INVALID_PARAMETER; @@ -1389,7 +1420,9 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) ld_offset += sizeof(struct mfi_ld_config); } - cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg); + dma_buf_read(data, info->size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -1397,6 +1430,7 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) { struct mfi_ctrl_props info; size_t dcmd_size = sizeof(info); + dma_addr_t residual; memset(&info, 0x0, dcmd_size); if (cmd->iov_size < dcmd_size) { @@ -1419,7 +1453,9 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) info.ecc_bucket_leak_rate = cpu_to_le16(1440); info.expose_encl_devices = 1; - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_read(&info, dcmd_size, &residual, &cmd->qsg, + MEMTXATTRS_UNSPECIFIED); + cmd->iov_size -= residual; return MFI_STAT_OK; } @@ -1448,7 +1484,7 @@ static int megasas_cluster_reset_ld(MegasasState *s, MegasasCmd *cmd) MegasasCmd *tmp_cmd = &s->frames[i]; if (tmp_cmd->req && tmp_cmd->req->dev->id == target_id) { SCSIDevice *d = tmp_cmd->req->dev; - qdev_reset_all(&d->qdev); + device_cold_reset(&d->qdev); } } return MFI_STAT_OK; @@ -1464,7 +1500,7 @@ static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd) dcmd_size); return MFI_STAT_INVALID_PARAMETER; } - dma_buf_write((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_write(&info, dcmd_size, NULL, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size); return MFI_STAT_OK; } @@ -1604,13 +1640,13 @@ static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd) } static int megasas_finish_internal_dcmd(MegasasCmd *cmd, - SCSIRequest *req, size_t resid) + SCSIRequest *req, dma_addr_t residual) { int retval = MFI_STAT_OK; int lun = req->lun; trace_megasas_dcmd_internal_finish(cmd->index, cmd->dcmd_opcode, lun); - cmd->iov_size -= resid; + cmd->iov_size -= residual; switch (cmd->dcmd_opcode) { case MFI_DCMD_PD_GET_INFO: retval = megasas_pd_get_info_submit(req->dev, lun, cmd); @@ -1712,7 +1748,7 @@ static int megasas_handle_scsi(MegasasState *s, MegasasCmd *cmd, return MFI_STAT_SCSI_DONE_WITH_ERROR; } - cmd->req = scsi_req_new(sdev, cmd->index, lun_id, cdb, cmd); + cmd->req = scsi_req_new(sdev, cmd->index, lun_id, cdb, cdb_len, cmd); if (!cmd->req) { trace_megasas_scsi_req_alloc_failed( mfi_frame_desc(frame_cmd), target_id, lun_id); @@ -1787,7 +1823,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) megasas_encode_lba(cdb, lba_start, lba_count, is_write); cmd->req = scsi_req_new(sdev, cmd->index, - lun_id, cdb, cmd); + lun_id, cdb, cdb_len, cmd); if (!cmd->req) { trace_megasas_scsi_req_alloc_failed( mfi_frame_desc(frame_cmd), target_id, lun_id); @@ -1852,12 +1888,12 @@ static void megasas_xfer_complete(SCSIRequest *req, uint32_t len) } } -static void megasas_command_complete(SCSIRequest *req, size_t resid) +static void megasas_command_complete(SCSIRequest *req, size_t residual) { MegasasCmd *cmd = req->hba_private; uint8_t cmd_status = MFI_STAT_OK; - trace_megasas_command_complete(cmd->index, req->status, resid); + trace_megasas_command_complete(cmd->index, req->status, residual); if (req->io_canceled) { return; @@ -1867,7 +1903,7 @@ static void megasas_command_complete(SCSIRequest *req, size_t resid) /* * Internal command complete */ - cmd_status = megasas_finish_internal_dcmd(cmd, req, resid); + cmd_status = megasas_finish_internal_dcmd(cmd, req, residual); if (cmd_status == MFI_STAT_INVALID_STATUS) { return; } @@ -2280,7 +2316,6 @@ static const VMStateDescription vmstate_megasas_gen2 = { .name = "megasas-gen2", .version_id = 0, .minimum_version_id = 0, - .minimum_version_id_old = 0, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), @@ -2416,8 +2451,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) s->frames[i].state = s; } - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &megasas_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info); } static Property megasas_properties_gen1[] = { diff --git a/hw/scsi/mfi.h b/hw/scsi/mfi.h index e67a5c0b47..0b4ee53dfc 100644 --- a/hw/scsi/mfi.h +++ b/hw/scsi/mfi.h @@ -633,7 +633,7 @@ struct mfi_ctrl_props { * metadata and user data * 1=5%, 2=10%, 3=15% and so on */ - uint8_t viewSpace; /* snapshot writeable VIEWs + uint8_t viewSpace; /* snapshot writable VIEWs * capacity as a % of source LD * capacity. 0=READ only * 1=5%, 2=10%, 3=15% and so on diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index db3219e7d2..c485da792c 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -172,14 +172,21 @@ static const int mpi_request_sizes[] = { static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length, dma_addr_t *sgaddr) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pci = (PCIDevice *) s; dma_addr_t addr; if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { - addr = ldq_le_pci_dma(pci, *sgaddr + 4); + uint64_t addr64; + + ldq_le_pci_dma(pci, *sgaddr + 4, &addr64, attrs); + addr = addr64; *sgaddr += 12; } else { - addr = ldl_le_pci_dma(pci, *sgaddr + 4); + uint32_t addr32; + + ldl_le_pci_dma(pci, *sgaddr + 4, &addr32, attrs); + addr = addr32; *sgaddr += 8; } return addr; @@ -203,7 +210,7 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr) dma_addr_t addr, len; uint32_t flags_and_length; - flags_and_length = ldl_le_pci_dma(pci, sgaddr); + ldl_le_pci_dma(pci, sgaddr, &flags_and_length, MEMTXATTRS_UNSPECIFIED); len = flags_and_length & MPI_SGE_LENGTH_MASK; if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) != MPI_SGE_FLAGS_SIMPLE_ELEMENT || @@ -234,7 +241,8 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr) break; } - flags_and_length = ldl_le_pci_dma(pci, next_chain_addr); + ldl_le_pci_dma(pci, next_chain_addr, &flags_and_length, + MEMTXATTRS_UNSPECIFIED); if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) != MPI_SGE_FLAGS_CHAIN_ELEMENT) { return MPI_IOCSTATUS_INVALID_SGL; @@ -316,7 +324,8 @@ static int mptsas_process_scsi_io_request(MPTSASState *s, } req->sreq = scsi_req_new(sdev, scsi_io->MsgContext, - scsi_io->LUN[1], scsi_io->CDB, req); + scsi_io->LUN[1], scsi_io->CDB, + scsi_io->CDBLength, req); if (req->sreq->cmd.xfer > scsi_io->DataLength) { goto overrun; @@ -513,7 +522,7 @@ reply_maybe_async: reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN; goto out; } - qdev_reset_all(&sdev->qdev); + device_cold_reset(&sdev->qdev); break; case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET: @@ -529,13 +538,13 @@ reply_maybe_async: QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { sdev = SCSI_DEVICE(kid->child); if (sdev->channel == 0 && sdev->id == req->TargetID) { - qdev_reset_all(kid->child); + device_cold_reset(kid->child); } } break; case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS: - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); break; default: @@ -798,7 +807,7 @@ static void mptsas_soft_reset(MPTSASState *s) s->intr_mask = MPI_HIM_DIM | MPI_HIM_RIM; mptsas_update_interrupt(s); - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); s->intr_status = 0; s->intr_mask = save_mask; @@ -1315,7 +1324,7 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); - scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); } static void mptsas_scsi_uninit(PCIDevice *dev) @@ -1355,7 +1364,6 @@ static const VMStateDescription vmstate_mptsas = { .name = "mptsas", .version_id = 0, .minimum_version_id = 0, - .minimum_version_id_old = 0, .post_load = mptsas_post_load, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, MPTSASState), diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index 2a0a98cac9..ceceafb2cd 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -3,6 +3,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/hw-version.h" #include "hw/qdev-properties.h" #include "hw/scsi/scsi.h" #include "migration/qemu-file-types.h" @@ -101,15 +102,15 @@ static void scsi_device_unrealize(SCSIDevice *s) } int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, - void *hba_private) + size_t buf_len, void *hba_private) { SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); int rc; assert(cmd->len == 0); - rc = scsi_req_parse_cdb(dev, cmd, buf); + rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len); if (bus->info->parse_cdb) { - rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); + rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private); } return rc; } @@ -134,10 +135,10 @@ void scsi_device_unit_attention_reported(SCSIDevice *s) } /* Create a scsi bus, and attach devices to it. */ -void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, - const SCSIBusInfo *info, const char *bus_name) +void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name) { - qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); + qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); bus->busnr = next_scsi_bus++; bus->info = info; qbus_set_bus_hotplug_handler(BUS(bus)); @@ -702,7 +703,7 @@ SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, } SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, - uint8_t *buf, void *hba_private) + uint8_t *buf, size_t buf_len, void *hba_private) { SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); const SCSIReqOps *ops; @@ -711,6 +712,11 @@ SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, SCSICommand cmd = { .len = 0 }; int ret; + if (buf_len == 0) { + trace_scsi_req_parse_bad(d->id, lun, tag, 0); + goto invalid_opcode; + } + if ((d->unit_attention.key == UNIT_ATTENTION || bus->unit_attention.key == UNIT_ATTENTION) && (buf[0] != INQUIRY && @@ -733,13 +739,14 @@ SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, } if (ops != NULL || !sc->parse_cdb) { - ret = scsi_req_parse_cdb(d, &cmd, buf); + ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len); } else { - ret = sc->parse_cdb(d, &cmd, buf, hba_private); + ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private); } if (ret != 0) { trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); +invalid_opcode: req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); } else { assert(cmd.len != 0); @@ -760,7 +767,7 @@ SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, } req->cmd = cmd; - req->resid = req->cmd.xfer; + req->residual = req->cmd.xfer; switch (buf[0]) { case INQUIRY: @@ -1307,14 +1314,15 @@ static void scsi_cmd_xfer_mode(SCSICommand *cmd) } } -int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) +int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, + size_t buf_len) { int rc; int len; cmd->lba = -1; len = scsi_cdb_length(buf); - if (len < 0) { + if (len < 0 || len > buf_len) { return -1; } @@ -1408,7 +1416,7 @@ void scsi_req_data(SCSIRequest *req, int len) trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); assert(req->cmd.mode != SCSI_XFER_NONE); if (!req->sg) { - req->resid -= len; + req->residual -= len; req->bus->info->transfer_data(req, len); return; } @@ -1421,9 +1429,11 @@ void scsi_req_data(SCSIRequest *req, int len) buf = scsi_req_get_buf(req); if (req->cmd.mode == SCSI_XFER_FROM_DEV) { - req->resid = dma_buf_read(buf, len, req->sg); + dma_buf_read(buf, len, &req->residual, req->sg, + MEMTXATTRS_UNSPECIFIED); } else { - req->resid = dma_buf_write(buf, len, req->sg); + dma_buf_write(buf, len, &req->residual, req->sg, + MEMTXATTRS_UNSPECIFIED); } scsi_req_continue(req); } @@ -1512,7 +1522,7 @@ void scsi_req_complete(SCSIRequest *req, int status) scsi_req_ref(req); scsi_req_dequeue(req); - req->bus->info->complete(req, req->resid); + req->bus->info->complete(req, req->residual); /* Cancelled requests might end up being completed instead of cancelled */ notifier_list_notify(&req->cancel_notifiers, req); @@ -1606,6 +1616,24 @@ static int scsi_ua_precedence(SCSISense sense) return (sense.asc << 8) | sense.ascq; } +void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense) +{ + int prec1, prec2; + if (sense.key != UNIT_ATTENTION) { + return; + } + + /* + * Override a pre-existing unit attention condition, except for a more + * important reset condition. + */ + prec1 = scsi_ua_precedence(bus->unit_attention); + prec2 = scsi_ua_precedence(sense); + if (prec2 < prec1) { + bus->unit_attention = sense; + } +} + void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) { int prec1, prec2; @@ -1710,7 +1738,11 @@ static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, qemu_get_buffer(f, buf, sizeof(buf)); qemu_get_be32s(f, &tag); qemu_get_be32s(f, &lun); - req = scsi_req_new(s, tag, lun, buf, NULL); + /* + * A too-short CDB would have been rejected by scsi_req_new, so just use + * SCSI_CMD_BUF_SIZE as the CDB length. + */ + req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL); req->retry = (sbyte == 1); if (bus->info->load_request) { req->hba_private = bus->info->load_request(f, req); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index e8a547dbb7..e493c28814 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -25,6 +25,8 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" +#include "qemu/hw-version.h" +#include "qemu/memalign.h" #include "hw/scsi/scsi.h" #include "migration/qemu-file-types.h" #include "migration/vmstate.h" @@ -92,6 +94,7 @@ struct SCSIDiskState { uint16_t port_index; uint64_t max_unmap_size; uint64_t max_io_size; + uint32_t quirks; QEMUBH *bh; char *version; char *serial; @@ -420,7 +423,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret) if (r->req.sg) { dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); - r->req.resid -= r->req.sg->size; + r->req.residual -= r->req.sg->size; r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), r->req.sg, r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, @@ -580,7 +583,7 @@ static void scsi_write_data(SCSIRequest *req) if (r->req.sg) { dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); - r->req.resid -= r->req.sg->size; + r->req.residual -= r->req.sg->size; r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), r->req.sg, r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, @@ -1076,17 +1079,20 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, int page_control) { static const int mode_sense_valid[0x3f] = { + [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM), [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), + [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM), }; uint8_t *p = *p_outbuf + 2; int length; + assert(page < ARRAY_SIZE(mode_sense_valid)); if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { return -1; } @@ -1182,6 +1188,10 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, case MODE_PAGE_R_W_ERROR: length = 10; if (page_control == 1) { /* Changeable Values */ + if (s->qdev.type == TYPE_ROM) { + /* Automatic Write Reallocation Enabled */ + p[0] = 0x80; + } break; } p[0] = 0x80; /* Automatic Write Reallocation Enabled */ @@ -1225,6 +1235,36 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, p[19] = (16 * 176) & 0xff; break; + case MODE_PAGE_APPLE_VENDOR: + if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) { + length = 0x1e; + if (page_control == 1) { /* Changeable Values */ + break; + } + + memset(p, 0, length); + strcpy((char *)p + 8, "APPLE COMPUTER, INC "); + break; + } else { + return -1; + } + + case MODE_PAGE_VENDOR_SPECIFIC: + if (s->qdev.type == TYPE_DISK && (s->quirks & + (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { + length = 0x2; + if (page_control == 1) { /* Changeable Values */ + p[0] = 0xff; + p[1] = 0xff; + break; + } + p[0] = 0; + p[1] = 0; + break; + } else { + return -1; + } + default: return -1; } @@ -1260,10 +1300,27 @@ static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) dev_specific_param |= 0x80; /* Readonly. */ } } else { - /* MMC prescribes that CD/DVD drives have no block descriptors, - * and defines no device-specific parameter. */ - dev_specific_param = 0x00; - dbd = true; + if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) { + /* Use DBD from the request... */ + dev_specific_param = 0x00; + + /* + * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR + * which should never return a block descriptor even though DBD is + * not set, otherwise CDROM detection fails in MacOS + */ + if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) && + page == MODE_PAGE_APPLE_VENDOR) { + dbd = true; + } + } else { + /* + * MMC prescribes that CD/DVD drives have no block descriptors, + * and defines no device-specific parameter. + */ + dev_specific_param = 0x00; + dbd = true; + } } if (r->req.cmd.buf[0] == MODE_SENSE) { @@ -1428,6 +1485,11 @@ static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, return -1; } + /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ + if (page == MODE_PAGE_ALLS) { + return -1; + } + p = mode_current; memset(mode_current, 0, inlen + 2); len = mode_sense_page(s, page, &p, 0); @@ -1494,7 +1556,10 @@ static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) goto invalid_param; } if (page_len > len) { - goto invalid_param_len; + if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) { + goto invalid_param_len; + } + trace_scsi_disk_mode_select_page_truncated(page, page_len, len); } if (!change) { @@ -1526,12 +1591,15 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) int cmd = r->req.cmd.buf[0]; int len = r->req.cmd.xfer; int hdr_len = (cmd == MODE_SELECT ? 4 : 8); - int bd_len; + int bd_len, bs; int pass; - /* We only support PF=1, SP=0. */ if ((r->req.cmd.buf[1] & 0x11) != 0x10) { - goto invalid_field; + if (!(s->quirks & + (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { + /* We only support PF=1, SP=0. */ + goto invalid_field; + } } if (len < hdr_len) { @@ -1548,6 +1616,22 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) goto invalid_param; } + /* Allow changing the block size */ + if (bd_len) { + bs = p[5] << 16 | p[6] << 8 | p[7]; + + /* + * Since the existing code only checks/updates bits 8-15 of the block + * size, restrict ourselves to the same requirement for now to ensure + * that a block size set by a block descriptor and then read back by + * a subsequent SCSI command will be the same + */ + if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) { + s->qdev.blocksize = bs; + trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize); + } + } + len -= bd_len; p += bd_len; @@ -1775,7 +1859,7 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); WriteSameCBData *data; uint8_t *buf; - int i; + int i, l; /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { @@ -1817,8 +1901,9 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) data->iov.iov_len); qemu_iovec_init_external(&data->qiov, &data->iov, 1); - for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { - memcpy(&buf[i], inbuf, s->qdev.blocksize); + for (i = 0; i < data->iov.iov_len; i += l) { + l = MIN(s->qdev.blocksize, data->iov.iov_len - i); + memcpy(&buf[i], inbuf, l); } scsi_req_ref(&r->req); @@ -2119,6 +2204,9 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) trace_scsi_disk_emulate_command_WRITE_SAME( req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); break; + case FORMAT_UNIT: + trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer); + break; default: trace_scsi_disk_emulate_command_UNKNOWN(buf[0], scsi_command_name(buf[0])); @@ -2411,7 +2499,6 @@ static void scsi_realize(SCSIDevice *dev, Error **errp) } else { blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); } - blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); blk_iostatus_enable(s->qdev.conf.blk); @@ -2457,6 +2544,7 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); AioContext *ctx; int ret; + uint32_t blocksize = 2048; if (!dev->conf.blk) { /* Anonymous BlockBackend for an empty drive. As we put it into @@ -2466,9 +2554,13 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) assert(ret == 0); } + if (dev->conf.physical_block_size != 0) { + blocksize = dev->conf.physical_block_size; + } + ctx = blk_get_aio_context(dev->conf.blk); aio_context_acquire(ctx); - s->qdev.blocksize = 2048; + s->qdev.blocksize = blocksize; s->qdev.type = TYPE_ROM; s->features |= 1 << SCSI_DISK_F_REMOVABLE; if (!s->product) { @@ -2525,6 +2617,7 @@ static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { [VERIFY_10] = &scsi_disk_emulate_reqops, [VERIFY_12] = &scsi_disk_emulate_reqops, [VERIFY_16] = &scsi_disk_emulate_reqops, + [FORMAT_UNIT] = &scsi_disk_emulate_reqops, [READ_6] = &scsi_disk_dma_reqops, [READ_10] = &scsi_disk_dma_reqops, @@ -2942,14 +3035,15 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, } static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, - uint8_t *buf, void *hba_private) + uint8_t *buf, size_t buf_len, + void *hba_private) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); if (scsi_block_is_passthrough(s, buf)) { - return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); + return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private); } else { - return scsi_req_parse_cdb(&s->qdev, cmd, buf); + return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len); } } @@ -3029,6 +3123,9 @@ static Property scsi_hd_properties[] = { DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 5), + DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, + quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, + 0), DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), DEFINE_PROP_END_OF_LIST(), }; @@ -3077,6 +3174,15 @@ static Property scsi_cd_properties[] = { DEFAULT_MAX_IO_SIZE), DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 5), + DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks, + SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0), + DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks, + SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0), + DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, + quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, + 0), + DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks, + SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 665baf900e..d513870181 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -147,6 +147,18 @@ static int execute_command(BlockBackend *blk, return 0; } +static uint64_t calculate_max_transfer(SCSIDevice *s) +{ + uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); + uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); + + assert(max_transfer); + max_transfer = MIN_NON_ZERO(max_transfer, + max_iov * qemu_real_host_page_size()); + + return max_transfer / s->blocksize; +} + static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) { uint8_t page, page_idx; @@ -178,17 +190,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && (r->req.cmd.buf[1] & 0x01)) { page = r->req.cmd.buf[2]; - if (page == 0xb0) { - uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); - uint32_t max_iov = blk_get_max_iov(s->conf.blk); + if (page == 0xb0 && r->buflen >= 8) { + uint8_t buf[16] = {}; + uint8_t buf_used = MIN(r->buflen, 16); + uint64_t max_transfer = calculate_max_transfer(s); + + memcpy(buf, r->buf, buf_used); + stl_be_p(&buf[8], max_transfer); + stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12]))); + memcpy(r->buf + 8, buf + 8, buf_used - 8); - assert(max_transfer); - max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size) - / s->blocksize; - stl_be_p(&r->buf[8], max_transfer); - /* Also take care of the opt xfer len. */ - stl_be_p(&r->buf[12], - MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { /* * Now we're capable of supplying the VPD Block Limits @@ -230,7 +241,7 @@ static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) uint8_t buf[64]; SCSIBlockLimits bl = { - .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize + .max_io_sectors = calculate_max_transfer(s), }; memset(r->buf, 0, r->buflen); @@ -321,7 +332,6 @@ static void scsi_read_complete(void * opaque, int ret) s->blocksize = ldl_be_p(&r->buf[8]); s->max_lba = ldq_be_p(&r->buf[0]); } - blk_set_guest_block_size(s->conf.blk, s->blocksize); /* * Patch MODE SENSE device specific parameters if the BDS is opened @@ -785,9 +795,10 @@ static Property scsi_generic_properties[] = { }; static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, - uint8_t *buf, void *hba_private) + uint8_t *buf, size_t buf_len, + void *hba_private) { - return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); + return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private); } static void scsi_generic_class_initfn(ObjectClass *klass, void *data) diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index c210262484..5bbbef64ef 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -783,6 +783,7 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) union srp_iu *srp = &req_iu(req)->srp; SCSIDevice *sdev; int n, lun; + size_t cdb_len = sizeof (srp->cmd.cdb) + (srp->cmd.add_cdb_len & ~3); if ((srp->cmd.lun == 0 || be64_to_cpu(srp->cmd.lun) == SRP_REPORT_LUNS_WLUN) && srp->cmd.cdb[0] == REPORT_LUNS) { @@ -801,7 +802,7 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) } return 1; } - req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req); + req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, cdb_len, req); n = scsi_req_enqueue(req->sreq); trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0], @@ -864,7 +865,7 @@ static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req) break; } - qdev_reset_all(&d->qdev); + device_cold_reset(&d->qdev); break; case SRP_TSK_ABORT_TASK_SET: @@ -1013,7 +1014,7 @@ static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req) } /* - * Current implementation does not suppport any migration or + * Current implementation does not support any migration or * reservation capabilities. Construct the response telling the * guest not to use them. */ @@ -1216,8 +1217,7 @@ static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp) dev->crq.SendFunc = vscsi_do_crq; - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &vscsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info); /* ibmvscsi SCSI bus does not allow hotplug. */ qbus_set_hotplug_handler(BUS(&s->bus), NULL); diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index 92d5b40f89..ab238293f0 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -42,18 +42,18 @@ mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_ # megasas.c megasas_init_firmware(uint64_t pa) "pa 0x%" PRIx64 " " -megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx64 " tail 0x%" PRIx64 " flags 0x%x" +megasas_init_queue(uint64_t queue_pa, int queue_len, uint32_t head, uint32_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx32 " tail 0x%" PRIx32 " flags 0x%x" megasas_initq_map_failed(int frame) "scmd %d: failed to map queue" megasas_initq_mapped(uint64_t pa) "queue already mapped at 0x%" PRIx64 megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d" megasas_qf_mapped(unsigned int index) "skip mapped frame 0x%x" megasas_qf_new(unsigned int index, uint64_t frame) "frame 0x%x addr 0x%" PRIx64 megasas_qf_busy(unsigned long pa) "all frames busy for frame 0x%lx" -megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int head, unsigned int tail, int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" -megasas_qf_update(unsigned int head, unsigned int tail, unsigned int busy) "head 0x%x tail 0x%x busy %d" +megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, uint32_t head, uint32_t tail, unsigned int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" +megasas_qf_update(uint32_t head, uint32_t tail, unsigned int busy) "head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu" megasas_qf_complete_noirq(uint64_t context) "context 0x%" PRIx64 " " -megasas_qf_complete(uint64_t context, unsigned int head, unsigned int tail, int busy) "context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" +megasas_qf_complete(uint64_t context, uint32_t head, uint32_t tail, int busy) "context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" megasas_frame_busy(uint64_t addr) "frame 0x%" PRIx64 " busy" megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: MFI cmd 0x%x" megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu" @@ -249,7 +249,7 @@ lsi_bad_phase_interrupt(void) "Phase mismatch interrupt" lsi_bad_selection(uint32_t id) "Selected absent target %"PRIu32 lsi_do_dma_unavailable(void) "DMA no data available" lsi_do_dma(uint64_t addr, int len) "DMA addr=0x%"PRIx64" len=%d" -lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRId32 +lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRIx32 lsi_add_msg_byte_error(void) "MSG IN data too long" lsi_add_msg_byte(uint8_t data) "MSG IN 0x%02x" lsi_reselect(int id) "Reselected target %d" @@ -267,7 +267,7 @@ lsi_do_msgout_noop(void) "MSG: No Operation" lsi_do_msgout_extended(uint8_t msg, uint8_t len) "Extended message 0x%x (len %d)" lsi_do_msgout_ignored(const char *msg) "%s (ignored)" lsi_do_msgout_simplequeue(uint8_t select_tag) "SIMPLE queue tag=0x%x" -lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRId32 +lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRIx32 lsi_do_msgout_clearqueue(uint32_t tag) "MSG: CLEAR QUEUE tag=0x%"PRIx32 lsi_do_msgout_busdevicereset(uint32_t tag) "MSG: BUS DEVICE RESET tag=0x%"PRIx32 lsi_do_msgout_select(int id) "Select LUN %d" @@ -334,10 +334,13 @@ scsi_disk_emulate_command_UNMAP(size_t xfer) "Unmap (len %zd)" scsi_disk_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)" scsi_disk_emulate_command_WRITE_SAME(int cmd, size_t xfer) "WRITE SAME %d (len %zd)" scsi_disk_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)" +scsi_disk_emulate_command_FORMAT_UNIT(size_t xfer) "Format Unit (len %zu)" scsi_disk_dma_command_READ(uint64_t lba, uint32_t len) "Read (sector %" PRId64 ", count %u)" scsi_disk_dma_command_WRITE(const char *cmd, uint64_t lba, int len) "Write %s(sector %" PRId64 ", count %u)" scsi_disk_new_request(uint32_t lun, uint32_t tag, const char *line) "Command: lun=%d tag=0x%x data=%s" scsi_disk_aio_sgio_command(uint32_t tag, uint8_t cmd, uint64_t lba, int len, uint32_t timeout) "disk aio sgio: tag=0x%x cmd=0x%x (sector %" PRId64 ", count %d) timeout=%u" +scsi_disk_mode_select_page_truncated(int page, int len, int page_len) "page %d expected length %d but received length %d" +scsi_disk_mode_select_set_blocksize(int blocksize) "set block size to %d" # scsi-generic.c scsi_generic_command_complete_noio(void *req, uint32_t tag, int statuc) "Command complete %p tag=0x%x status=%d" diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c index 767f827e55..18ea5dcfa1 100644 --- a/hw/scsi/vhost-scsi-common.c +++ b/hw/scsi/vhost-scsi-common.c @@ -68,7 +68,7 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc) goto err_guest_notifiers; } - ret = vhost_dev_start(&vsc->dev, vdev); + ret = vhost_dev_start(&vsc->dev, vdev, true); if (ret < 0) { error_report("Error start vhost dev"); goto err_guest_notifiers; @@ -101,7 +101,7 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc) VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret = 0; - vhost_dev_stop(&vsc->dev, vdev); + vhost_dev_stop(&vsc->dev, vdev, true); if (k->set_guest_notifiers) { ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 8c611bfd2d..6a0fd0dfb1 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -38,6 +38,7 @@ static const int kernel_feature_bits[] = { VIRTIO_RING_F_INDIRECT_DESC, VIRTIO_RING_F_EVENT_IDX, VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_F_RING_RESET, VHOST_INVALID_FEATURE_BIT }; @@ -120,7 +121,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) start = false; } - if (vsc->dev.started == start) { + if (vhost_dev_is_started(&vsc->dev) == start) { return; } @@ -147,7 +148,7 @@ static int vhost_scsi_pre_save(void *opaque) /* At this point, backend must be stopped, otherwise * it might keep writing to memory. */ - assert(!vsc->dev.started); + assert(!vhost_dev_is_started(&vsc->dev)); return 0; } @@ -170,6 +171,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) Error *err = NULL; int vhostfd = -1; int ret; + struct vhost_virtqueue *vqs = NULL; if (!vs->conf.wwpn) { error_setg(errp, "vhost-scsi: missing wwpn"); @@ -208,19 +210,24 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) "target SCSI device state or use shared storage over network), " "set 'migratable' property to true to enable migration."); if (migrate_add_blocker(vsc->migration_blocker, errp) < 0) { - error_free(vsc->migration_blocker); goto free_virtio; } } vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; - vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vsc->dev.vqs = vqs; vsc->dev.vq_index = 0; vsc->dev.backend_features = 0; ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0, errp); if (ret < 0) { + /* + * vhost_dev_init calls vhost_dev_cleanup on error, which closes + * vhostfd, don't double close it. + */ + vhostfd = -1; goto free_vqs; } @@ -233,14 +240,17 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) return; free_vqs: + g_free(vqs); if (!vsc->migratable) { migrate_del_blocker(vsc->migration_blocker); } - g_free(vsc->dev.vqs); free_virtio: + error_free(vsc->migration_blocker); virtio_scsi_common_unrealize(dev); close_fd: - close(vhostfd); + if (vhostfd >= 0) { + close(vhostfd); + } return; } @@ -264,6 +274,13 @@ static void vhost_scsi_unrealize(DeviceState *dev) virtio_scsi_common_unrealize(dev); } +static struct vhost_dev *vhost_scsi_get_vhost(VirtIODevice *vdev) +{ + VHostSCSI *s = VHOST_SCSI(vdev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + return &vsc->dev; +} + static Property vhost_scsi_properties[] = { DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd), DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn), @@ -298,6 +315,7 @@ static void vhost_scsi_class_init(ObjectClass *klass, void *data) vdc->get_features = vhost_scsi_common_get_features; vdc->set_config = vhost_scsi_common_set_config; vdc->set_status = vhost_scsi_set_status; + vdc->get_vhost = vhost_scsi_get_vhost; fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; } diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 1b2f7eed98..b7a71a802c 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -36,6 +36,7 @@ static const int user_feature_bits[] = { VIRTIO_RING_F_INDIRECT_DESC, VIRTIO_RING_F_EVENT_IDX, VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_F_RING_RESET, VHOST_INVALID_FEATURE_BIT }; @@ -49,7 +50,7 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running; - if (vsc->dev.started == start) { + if (vhost_dev_is_started(&vsc->dev) == start) { return; } diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 18eb824c97..20bb91766e 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -49,51 +49,6 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) } } -static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, - VirtQueue *vq) -{ - bool progress = false; - VirtIOSCSI *s = VIRTIO_SCSI(vdev); - - virtio_scsi_acquire(s); - if (!s->dataplane_fenced) { - assert(s->ctx && s->dataplane_started); - progress = virtio_scsi_handle_cmd_vq(s, vq); - } - virtio_scsi_release(s); - return progress; -} - -static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, - VirtQueue *vq) -{ - bool progress = false; - VirtIOSCSI *s = VIRTIO_SCSI(vdev); - - virtio_scsi_acquire(s); - if (!s->dataplane_fenced) { - assert(s->ctx && s->dataplane_started); - progress = virtio_scsi_handle_ctrl_vq(s, vq); - } - virtio_scsi_release(s); - return progress; -} - -static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, - VirtQueue *vq) -{ - bool progress = false; - VirtIOSCSI *s = VIRTIO_SCSI(vdev); - - virtio_scsi_acquire(s); - if (!s->dataplane_fenced) { - assert(s->ctx && s->dataplane_started); - progress = virtio_scsi_handle_event_vq(s, vq); - } - virtio_scsi_release(s); - return progress; -} - static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); @@ -118,10 +73,10 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); int i; - virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL); - virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL); + virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx); + virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx); for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL); + virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx); } } @@ -181,19 +136,21 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) memory_region_transaction_commit(); - aio_context_acquire(s->ctx); - virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, - virtio_scsi_data_plane_handle_ctrl); - virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, - virtio_scsi_data_plane_handle_event); - - for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, - virtio_scsi_data_plane_handle_cmd); - } - + /* + * These fields are visible to the IOThread so we rely on implicit barriers + * in aio_context_acquire() on the write side and aio_notify_accept() on + * the read side. + */ s->dataplane_starting = false; s->dataplane_started = true; + + aio_context_acquire(s->ctx); + virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); + virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); + + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + } aio_context_release(s->ctx); return 0; diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 6d80730287..6f6e2e32ba 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -29,6 +29,43 @@ #include "hw/virtio/virtio-access.h" #include "trace.h" +typedef struct VirtIOSCSIReq { + /* + * Note: + * - fields up to resp_iov are initialized by virtio_scsi_init_req; + * - fields starting at vring are zeroed by virtio_scsi_init_req. + */ + VirtQueueElement elem; + + VirtIOSCSI *dev; + VirtQueue *vq; + QEMUSGList qsgl; + QEMUIOVector resp_iov; + + union { + /* Used for two-stage request submission */ + QTAILQ_ENTRY(VirtIOSCSIReq) next; + + /* Used for cancellation of request during TMFs */ + int remaining; + }; + + SCSIRequest *sreq; + size_t resp_size; + enum SCSIXferMode mode; + union { + VirtIOSCSICmdResp cmd; + VirtIOSCSICtrlTMFResp tmf; + VirtIOSCSICtrlANResp an; + VirtIOSCSIEvent event; + } resp; + union { + VirtIOSCSICmdReq cmd; + VirtIOSCSICtrlTMFReq tmf; + VirtIOSCSICtrlANReq an; + } req; +} VirtIOSCSIReq; + static inline int virtio_scsi_get_lun(uint8_t *lun) { return ((lun[2] << 8) | lun[3]) & 0x3FFF; @@ -45,7 +82,7 @@ static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun) return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun)); } -void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) +static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) { VirtIODevice *vdev = VIRTIO_DEVICE(s); const size_t zero_skip = @@ -58,7 +95,7 @@ void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip); } -void virtio_scsi_free_req(VirtIOSCSIReq *req) +static void virtio_scsi_free_req(VirtIOSCSIReq *req) { qemu_iovec_destroy(&req->resp_iov); qemu_sglist_destroy(&req->qsgl); @@ -328,7 +365,7 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) goto incorrect_lun; } s->resetting++; - qdev_reset_all(&d->qdev); + device_cold_reset(&d->qdev); s->resetting--; break; @@ -380,7 +417,7 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) { SCSIDevice *d1 = SCSI_DEVICE(kid->child); if (d1->channel == 0 && d1->id == target) { - qdev_reset_all(&d1->qdev); + device_cold_reset(&d1->qdev); } } rcu_read_unlock(); @@ -460,28 +497,41 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) } } -bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) +static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req; - bool progress = false; while ((req = virtio_scsi_pop_req(s, vq))) { - progress = true; virtio_scsi_handle_ctrl_req(s, req); } - return progress; +} + +/* + * If dataplane is configured but not yet started, do so now and return true on + * success. + * + * Dataplane is started by the core virtio code but virtqueue handler functions + * can also be invoked when a guest kicks before DRIVER_OK, so this helper + * function helps us deal with manually starting ioeventfd in that case. + */ +static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s) +{ + if (!s->ctx || s->dataplane_started) { + return false; + } + + virtio_device_start_ioeventfd(&s->parent_obj.parent_obj); + return !s->dataplane_fenced; } static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSCSI *s = (VirtIOSCSI *)vdev; - if (s->ctx) { - virtio_device_start_ioeventfd(vdev); - if (!s->dataplane_fenced) { - return; - } + if (virtio_scsi_defer_to_dataplane(s)) { + return; } + virtio_scsi_acquire(s); virtio_scsi_handle_ctrl_vq(s, vq); virtio_scsi_release(s); @@ -572,7 +622,8 @@ static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid) } static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, - uint8_t *buf, void *hba_private) + uint8_t *buf, size_t buf_len, + void *hba_private) { VirtIOSCSIReq *req = hba_private; @@ -646,7 +697,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) virtio_scsi_ctx_check(s, d); req->sreq = scsi_req_new(d, req->req.cmd.tag, virtio_scsi_get_lun(req->req.cmd.lun), - req->req.cmd.cdb, req); + req->req.cmd.cdb, vs->cdb_size, req); if (req->sreq->cmd.mode != SCSI_XFER_NONE && (req->sreq->cmd.mode != req->mode || @@ -672,12 +723,11 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) scsi_req_unref(sreq); } -bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) +static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req, *next; int ret = 0; bool suppress_notifications = virtio_queue_get_notification(vq); - bool progress = false; QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); @@ -687,7 +737,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) } while ((req = virtio_scsi_pop_req(s, vq))) { - progress = true; ret = virtio_scsi_handle_cmd_req_prepare(s, req); if (!ret) { QTAILQ_INSERT_TAIL(&reqs, req, next); @@ -712,7 +761,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { virtio_scsi_handle_cmd_req_submit(s, req); } - return progress; } static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) @@ -720,12 +768,10 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) /* use non-QOM casts in the data path */ VirtIOSCSI *s = (VirtIOSCSI *)vdev; - if (s->ctx) { - virtio_device_start_ioeventfd(vdev); - if (!s->dataplane_fenced) { - return; - } + if (virtio_scsi_defer_to_dataplane(s)) { + return; } + virtio_scsi_acquire(s); virtio_scsi_handle_cmd_vq(s, vq); virtio_scsi_release(s); @@ -785,7 +831,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev) assert(!s->dataplane_started); s->resetting++; - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); s->resetting--; vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; @@ -793,8 +839,8 @@ static void virtio_scsi_reset(VirtIODevice *vdev) s->events_dropped = false; } -void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, - uint32_t event, uint32_t reason) +static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, + uint32_t event, uint32_t reason) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VirtIOSCSIReq *req; @@ -842,25 +888,21 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, virtio_scsi_complete_req(req); } -bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) +static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) { if (s->events_dropped) { virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); - return true; } - return false; } static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSCSI *s = VIRTIO_SCSI(vdev); - if (s->ctx) { - virtio_device_start_ioeventfd(vdev); - if (!s->dataplane_fenced) { - return; - } + if (virtio_scsi_defer_to_dataplane(s)) { + return; } + virtio_scsi_acquire(s); virtio_scsi_handle_event_vq(s, vq); virtio_scsi_release(s); @@ -914,6 +956,7 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, virtio_scsi_push_event(s, sd, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN); + scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); virtio_scsi_release(s); } } @@ -931,6 +974,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, virtio_scsi_push_event(s, sd, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); + scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); virtio_scsi_release(s); } @@ -972,8 +1016,7 @@ void virtio_scsi_common_realize(DeviceState *dev, VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev); int i; - virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI, - sizeof(VirtIOSCSIConfig)); + virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig)); if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { s->conf.num_queues = 1; @@ -1019,8 +1062,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) return; } - scsi_bus_new(&s->bus, sizeof(s->bus), dev, - &virtio_scsi_scsi_info, vdev->bus_name); + scsi_bus_init_named(&s->bus, sizeof(s->bus), dev, + &virtio_scsi_scsi_info, vdev->bus_name); /* override default SCSI bus hotplug-handler, with virtio-scsi's one */ qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index 1f30cb020a..fa76696855 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -50,12 +50,14 @@ #define PVSCSI_MAX_CMD_DATA_WORDS \ (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t)) -#define RS_GET_FIELD(m, field) \ - (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ - (m)->rs_pa + offsetof(struct PVSCSIRingsState, field))) +#define RS_GET_FIELD(pval, m, field) \ + ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), \ + pval, MEMTXATTRS_UNSPECIFIED) #define RS_SET_FIELD(m, field, val) \ (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ - (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val)) + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \ + MEMTXATTRS_UNSPECIFIED)) struct PVSCSIClass { PCIDeviceClass parent_class; @@ -247,10 +249,11 @@ pvscsi_ring_cleanup(PVSCSIRingInfo *mgr) static hwaddr pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) { - uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); + uint32_t ready_ptr; uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + RS_GET_FIELD(&ready_ptr, mgr, reqProdIdx); if (ready_ptr != mgr->consumed_ptr && ready_ptr - mgr->consumed_ptr < ring_size) { uint32_t next_ready_ptr = @@ -321,8 +324,11 @@ pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr) static bool pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr) { - uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx); - uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx); + uint32_t prodIdx; + uint32_t consIdx; + + RS_GET_FIELD(&prodIdx, mgr, msgProdIdx); + RS_GET_FIELD(&consIdx, mgr, msgConsIdx); return (prodIdx - consIdx) < (mgr->msg_len_mask + 1); } @@ -439,7 +445,7 @@ static void pvscsi_reset_adapter(PVSCSIState *s) { s->resetting++; - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); s->resetting--; pvscsi_process_completion_queue(s); assert(QTAILQ_EMPTY(&s->pending_queue)); @@ -724,7 +730,7 @@ pvscsi_process_request_descriptor(PVSCSIState *s, r->sg.elemAddr = descr->dataAddr; } - r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r); + r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, descr->cdbLen, r); if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV && (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) { r->cmp.hostStatus = BTSTAT_BADMSG; @@ -874,7 +880,7 @@ pvscsi_on_cmd_reset_device(PVSCSIState *s) if (sdev != NULL) { s->resetting++; - device_legacy_reset(&sdev->qdev); + device_cold_reset(&sdev->qdev); s->resetting--; return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; } @@ -888,7 +894,7 @@ pvscsi_on_cmd_reset_bus(PVSCSIState *s) trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS"); s->resetting++; - qbus_reset_all(BUS(&s->bus)); + bus_cold_reset(BUS(&s->bus)); s->resetting--; return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; } @@ -1180,8 +1186,7 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev), - &pvscsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); /* override default SCSI bus hotplug-handler, with pvscsi's one */ qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s)); pvscsi_reset_state(s); diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index bea6d97ef8..92a0f42708 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -65,7 +65,7 @@ enum { REG_SD_DLBA = 0x84, /* Descriptor List Base Address */ REG_SD_IDST = 0x88, /* Internal DMA Controller Status */ REG_SD_IDIE = 0x8C, /* Internal DMA Controller IRQ Enable */ - REG_SD_THLDC = 0x100, /* Card Threshold Control */ + REG_SD_THLDC = 0x100, /* Card Threshold Control / FIFO (sun4i only)*/ REG_SD_DSBD = 0x10C, /* eMMC DDR Start Bit Detection Control */ REG_SD_RES_CRC = 0x110, /* Response CRC from card/eMMC */ REG_SD_DATA7_CRC = 0x114, /* CRC Data 7 from card/eMMC */ @@ -114,7 +114,9 @@ enum { }; enum { + SD_STAR_FIFO_EMPTY = (1 << 2), SD_STAR_CARD_PRESENT = (1 << 8), + SD_STAR_FIFO_LEVEL_1 = (1 << 17), }; enum { @@ -300,6 +302,30 @@ static void allwinner_sdhost_auto_stop(AwSdHostState *s) } } +static void read_descriptor(AwSdHostState *s, hwaddr desc_addr, + TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->size = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); +} + +static void write_descriptor(AwSdHostState *s, hwaddr desc_addr, + const TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->size); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); +} + static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, hwaddr desc_addr, TransferDescriptor *desc, @@ -310,8 +336,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, uint32_t num_bytes = max_bytes; uint8_t buf[1024]; - /* Read descriptor */ - dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc)); + read_descriptor(s, desc_addr, desc); if (desc->size == 0) { desc->size = klass->max_desc_size; } else if (desc->size > klass->max_desc_size) { @@ -337,23 +362,23 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, /* Write to SD bus */ if (is_write) { dma_memory_read(&s->dma_as, - (desc->addr & DESC_SIZE_MASK) + num_done, - buf, buf_bytes); + (desc->addr & DESC_SIZE_MASK) + num_done, buf, + buf_bytes, MEMTXATTRS_UNSPECIFIED); sdbus_write_data(&s->sdbus, buf, buf_bytes); /* Read from SD bus */ } else { sdbus_read_data(&s->sdbus, buf, buf_bytes); dma_memory_write(&s->dma_as, - (desc->addr & DESC_SIZE_MASK) + num_done, - buf, buf_bytes); + (desc->addr & DESC_SIZE_MASK) + num_done, buf, + buf_bytes, MEMTXATTRS_UNSPECIFIED); } num_done += buf_bytes; } /* Clear hold flag and flush descriptor */ desc->status &= ~DESC_STATUS_HOLD; - dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc)); + write_descriptor(s, desc_addr, desc); return num_done; } @@ -411,10 +436,29 @@ static void allwinner_sdhost_dma(AwSdHostState *s) } } +static uint32_t allwinner_sdhost_fifo_read(AwSdHostState *s) +{ + uint32_t res = 0; + + if (sdbus_data_ready(&s->sdbus)) { + sdbus_read_data(&s->sdbus, &res, sizeof(uint32_t)); + le32_to_cpus(&res); + allwinner_sdhost_update_transfer_cnt(s, sizeof(uint32_t)); + allwinner_sdhost_auto_stop(s); + allwinner_sdhost_update_irq(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no data ready on SD bus\n", + __func__); + } + + return res; +} + static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, unsigned size) { AwSdHostState *s = AW_SDHOST(opaque); + AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); uint32_t res = 0; switch (offset) { @@ -465,6 +509,11 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, break; case REG_SD_STAR: /* Status */ res = s->status; + if (sdbus_data_ready(&s->sdbus)) { + res |= SD_STAR_FIFO_LEVEL_1; + } else { + res |= SD_STAR_FIFO_EMPTY; + } break; case REG_SD_FWLR: /* FIFO Water Level */ res = s->fifo_wlevel; @@ -499,8 +548,12 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, case REG_SD_IDIE: /* Internal DMA Controller Interrupt Enable */ res = s->dmac_irq; break; - case REG_SD_THLDC: /* Card Threshold Control */ - res = s->card_threshold; + case REG_SD_THLDC: /* Card Threshold Control or FIFO register (sun4i) */ + if (sc->is_sun4i) { + res = allwinner_sdhost_fifo_read(s); + } else { + res = s->card_threshold; + } break; case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */ res = s->startbit_detect; @@ -522,16 +575,7 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, res = s->status_crc; break; case REG_SD_FIFO: /* Read/Write FIFO */ - if (sdbus_data_ready(&s->sdbus)) { - sdbus_read_data(&s->sdbus, &res, sizeof(uint32_t)); - le32_to_cpus(&res); - allwinner_sdhost_update_transfer_cnt(s, sizeof(uint32_t)); - allwinner_sdhost_auto_stop(s); - allwinner_sdhost_update_irq(s); - } else { - qemu_log_mask(LOG_GUEST_ERROR, "%s: no data ready on SD bus\n", - __func__); - } + res = allwinner_sdhost_fifo_read(s); break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" @@ -544,11 +588,20 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, return res; } +static void allwinner_sdhost_fifo_write(AwSdHostState *s, uint64_t value) +{ + uint32_t u32 = cpu_to_le32(value); + sdbus_write_data(&s->sdbus, &u32, sizeof(u32)); + allwinner_sdhost_update_transfer_cnt(s, sizeof(u32)); + allwinner_sdhost_auto_stop(s); + allwinner_sdhost_update_irq(s); +} + static void allwinner_sdhost_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { AwSdHostState *s = AW_SDHOST(opaque); - uint32_t u32; + AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); trace_allwinner_sdhost_write(offset, value, size); @@ -648,18 +701,18 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, s->dmac_irq = value; allwinner_sdhost_update_irq(s); break; - case REG_SD_THLDC: /* Card Threshold Control */ - s->card_threshold = value; + case REG_SD_THLDC: /* Card Threshold Control or FIFO (sun4i) */ + if (sc->is_sun4i) { + allwinner_sdhost_fifo_write(s, value); + } else { + s->card_threshold = value; + } break; case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */ s->startbit_detect = value; break; case REG_SD_FIFO: /* Read/Write FIFO */ - u32 = cpu_to_le32(value); - sdbus_write_data(&s->sdbus, &u32, sizeof(u32)); - allwinner_sdhost_update_transfer_cnt(s, sizeof(u32)); - allwinner_sdhost_auto_stop(s); - allwinner_sdhost_update_irq(s); + allwinner_sdhost_fifo_write(s, value); break; case REG_SD_RES_CRC: /* Response CRC from card/eMMC */ case REG_SD_DATA7_CRC: /* CRC Data 7 from card/eMMC */ @@ -738,8 +791,8 @@ static void allwinner_sdhost_init(Object *obj) { AwSdHostState *s = AW_SDHOST(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &allwinner_sdhost_ops, s, TYPE_AW_SDHOST, 4 * KiB); @@ -825,15 +878,17 @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 8 * KiB; + sc->is_sun4i = true; } static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 64 * KiB; + sc->is_sun4i = false; } -static TypeInfo allwinner_sdhost_info = { +static const TypeInfo allwinner_sdhost_info = { .name = TYPE_AW_SDHOST, .parent = TYPE_SYS_BUS_DEVICE, .instance_init = allwinner_sdhost_init, diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index 3299844de6..be8cafd65f 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -14,6 +14,7 @@ #include "hw/irq.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" +#include "trace.h" #define ASPEED_SDHCI_INFO 0x00 #define ASPEED_SDHCI_INFO_SLOT1 (1 << 17) @@ -60,6 +61,8 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) } } + trace_aspeed_sdhci_read(addr, size, (uint64_t) val); + return (uint64_t)val; } @@ -68,6 +71,8 @@ static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, { AspeedSDHCIState *sdhci = opaque; + trace_aspeed_sdhci_write(addr, size, val); + switch (addr) { case ASPEED_SDHCI_INFO: /* The RESET bit automatically clears. */ @@ -193,7 +198,7 @@ static void aspeed_sdhci_class_init(ObjectClass *classp, void *data) device_class_set_props(dc, aspeed_sdhci_properties); } -static TypeInfo aspeed_sdhci_info = { +static const TypeInfo aspeed_sdhci_info = { .name = TYPE_ASPEED_SDHCI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(AspeedSDHCIState), diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index 50f5fdb88b..9431c35914 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -403,8 +403,8 @@ static void bcm2835_sdhost_init(Object *obj) { BCM2835SDHostState *s = BCM2835_SDHOST(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &bcm2835_sdhost_ops, s, TYPE_BCM2835_SDHOST, 0x1000); @@ -436,7 +436,7 @@ static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_sdhost; } -static TypeInfo bcm2835_sdhost_info = { +static const TypeInfo bcm2835_sdhost_info = { .name = TYPE_BCM2835_SDHOST, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(BCM2835SDHostState), diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c index 56b8bae1c3..75db34befe 100644 --- a/hw/sd/cadence_sdhci.c +++ b/hw/sd/cadence_sdhci.c @@ -175,7 +175,7 @@ static void cadence_sdhci_class_init(ObjectClass *classp, void *data) dc->vmsd = &vmstate_cadence_sdhci; } -static TypeInfo cadence_sdhci_info = { +static const TypeInfo cadence_sdhci_info = { .name = TYPE_CADENCE_SDHCI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(CadenceSDHCIState), diff --git a/hw/sd/meson.build b/hw/sd/meson.build index f1ce357a3b..807ca07b7c 100644 --- a/hw/sd/meson.build +++ b/hw/sd/meson.build @@ -9,4 +9,5 @@ softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_sdhci.c')) softmmu_ss.add(when: 'CONFIG_CADENCE_SDHCI', if_true: files('cadence_sdhci.c')) diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c new file mode 100644 index 0000000000..b2f5b4a542 --- /dev/null +++ b/hw/sd/npcm7xx_sdhci.c @@ -0,0 +1,182 @@ +/* + * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/sd/npcm7xx_sdhci.h" +#include "migration/vmstate.h" +#include "sdhci-internal.h" +#include "qemu/log.h" + +static uint64_t npcm7xx_sdhci_read(void *opaque, hwaddr addr, unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + val = s->regs.prstvals[(addr - NPCM7XX_PRSTVALS_0) / 2]; + break; + case NPCM7XX_BOOTTOCTRL: + val = s->regs.boottoctrl; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI read of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } + + return val; +} + +static void npcm7xx_sdhci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + + switch (addr) { + case NPCM7XX_BOOTTOCTRL: + s->regs.boottoctrl = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI write of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } +} + +static bool npcm7xx_sdhci_check_mem_op(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + /* RO Word */ + return !is_write && size == 2; + case NPCM7XX_BOOTTOCTRL: + /* R/W Dword */ + return size == 4; + default: + return false; + } +} + +static const MemoryRegionOps npcm7xx_sdhci_ops = { + .read = npcm7xx_sdhci_read, + .write = npcm7xx_sdhci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false, + .accepts = npcm7xx_sdhci_check_mem_op, + }, +}; + +static void npcm7xx_sdhci_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusDevice *sbd_sdhci = SYS_BUS_DEVICE(&s->sdhci); + + memory_region_init(&s->container, OBJECT(s), + "npcm7xx.sdhci-container", 0x1000); + sysbus_init_mmio(sbd, &s->container); + + memory_region_init_io(&s->iomem, OBJECT(s), &npcm7xx_sdhci_ops, s, + TYPE_NPCM7XX_SDHCI, NPCM7XX_SDHCI_REGSIZE); + memory_region_add_subregion_overlap(&s->container, NPCM7XX_PRSTVALS, + &s->iomem, 1); + + sysbus_realize(sbd_sdhci, errp); + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(sbd_sdhci, 0)); + + /* propagate irq and "sd-bus" from generic-sdhci */ + sysbus_pass_irq(sbd, sbd_sdhci); + s->bus = qdev_get_child_bus(DEVICE(sbd_sdhci), "sd-bus"); + + /* Set the read only preset values. */ + memset(s->regs.prstvals, 0, sizeof(s->regs.prstvals)); + s->regs.prstvals[0] = NPCM7XX_PRSTVALS_0_RESET; + s->regs.prstvals[1] = NPCM7XX_PRSTVALS_1_RESET; + s->regs.prstvals[3] = NPCM7XX_PRSTVALS_3_RESET; +} + +static void npcm7xx_sdhci_reset(DeviceState *dev) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + device_cold_reset(DEVICE(&s->sdhci)); + s->regs.boottoctrl = 0; + + s->sdhci.prnsts = NPCM7XX_PRSNTS_RESET; + s->sdhci.blkgap = NPCM7XX_BLKGAP_RESET; + s->sdhci.capareg = NPCM7XX_CAPAB_RESET; + s->sdhci.maxcurr = NPCM7XX_MAXCURR_RESET; + s->sdhci.version = NPCM7XX_HCVER_RESET; +} + +static const VMStateDescription vmstate_npcm7xx_sdhci = { + .name = TYPE_NPCM7XX_SDHCI, + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(regs.boottoctrl, NPCM7xxSDHCIState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_sdhci_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->desc = "NPCM7xx SD/eMMC Host Controller"; + dc->realize = npcm7xx_sdhci_realize; + dc->reset = npcm7xx_sdhci_reset; + dc->vmsd = &vmstate_npcm7xx_sdhci; +} + +static void npcm7xx_sdhci_instance_init(Object *obj) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(obj); + + object_initialize_child(OBJECT(s), "generic-sdhci", &s->sdhci, + TYPE_SYSBUS_SDHCI); +} + +static const TypeInfo npcm7xx_sdhci_info = { + .name = TYPE_NPCM7XX_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxSDHCIState), + .instance_init = npcm7xx_sdhci_instance_init, + .class_init = npcm7xx_sdhci_class_init, +}; + +static void npcm7xx_sdhci_register_types(void) +{ + type_register_static(&npcm7xx_sdhci_info); +} + +type_init(npcm7xx_sdhci_register_types) diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index 960f155098..5e554bd467 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -506,8 +506,7 @@ static void pl181_init(Object *obj) qdev_init_gpio_out_named(dev, &s->card_readonly, "card-read-only", 1); qdev_init_gpio_out_named(dev, &s->card_inserted, "card-inserted", 1); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_PL181_BUS, dev, "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus"); } static void pl181_class_init(ObjectClass *klass, void *data) diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 3dd2fc7a83..124fbf8bbd 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -560,8 +560,8 @@ static void pxa2xx_mmci_instance_init(Object *obj) qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1); qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); } static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data) diff --git a/hw/sd/sd.c b/hw/sd/sd.c index bb5dbff68c..da5bdd134a 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -47,7 +47,6 @@ #include "qemu/timer.h" #include "qemu/log.h" #include "qemu/module.h" -#include "qemu-common.h" #include "sdmmc-internal.h" #include "trace.h" @@ -116,8 +115,8 @@ struct SDState { int32_t state; /* current card state, one of SDCardStates */ uint32_t vhs; bool wp_switch; - unsigned long *wp_groups; - int32_t wpgrps_size; + unsigned long *wp_group_bmap; + int32_t wp_group_bits; uint64_t size; uint32_t blk_len; uint32_t multi_blk_cnt; @@ -290,12 +289,6 @@ FIELD(OCR, CARD_POWER_UP, 31, 1) | R_OCR_CARD_CAPACITY_MASK \ | R_OCR_CARD_POWER_UP_MASK) -static void sd_set_ocr(SDState *sd) -{ - /* All voltages OK */ - sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK; -} - static void sd_ocr_powerup(void *opaque) { SDState *sd = opaque; @@ -311,6 +304,22 @@ static void sd_ocr_powerup(void *opaque) } } +static void sd_set_ocr(SDState *sd) +{ + /* All voltages OK */ + sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK; + + if (sd->spi) { + /* + * We don't need to emulate power up sequence in SPI-mode. + * Thus, the card's power up status bit should be set to 1 when reset. + * The card's capacity status bit should also be set if SD card size + * is larger than 2GB for SDHC support. + */ + sd_ocr_powerup(sd); + } +} + static void sd_set_scr(SDState *sd) { sd->scr[0] = 0 << 4; /* SCR structure version 1.0 */ @@ -560,6 +569,7 @@ static void sd_reset(DeviceState *dev) sd->state = sd_idle_state; sd->rca = 0x0000; + sd->size = size; sd_set_ocr(sd); sd_set_scr(sd); sd_set_cid(sd); @@ -567,14 +577,13 @@ static void sd_reset(DeviceState *dev) sd_set_cardstatus(sd); sd_set_sdstatus(sd); - g_free(sd->wp_groups); + g_free(sd->wp_group_bmap); sd->wp_switch = sd->blk ? !blk_is_writable(sd->blk) : false; - sd->wpgrps_size = sect; - sd->wp_groups = bitmap_new(sd->wpgrps_size); + sd->wp_group_bits = sect; + sd->wp_group_bmap = bitmap_new(sd->wp_group_bits); memset(sd->function_group, 0, sizeof(sd->function_group)); sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; - sd->size = size; sd->blk_len = 0x200; sd->pwd_len = 0; sd->expecting_acmd = false; @@ -673,7 +682,7 @@ static const VMStateDescription sd_vmstate = { VMSTATE_UINT32(card_status, SDState), VMSTATE_PARTIAL_BUFFER(sd_status, SDState, 1), VMSTATE_UINT32(vhs, SDState), - VMSTATE_BITMAP(wp_groups, SDState, 0, wpgrps_size), + VMSTATE_BITMAP(wp_group_bmap, SDState, 0, wp_group_bits), VMSTATE_UINT32(blk_len, SDState), VMSTATE_UINT32(multi_blk_cnt, SDState), VMSTATE_UINT32(erase_start, SDState), @@ -743,7 +752,7 @@ void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert) static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) { trace_sdcard_read_block(addr, len); - if (!sd->blk || blk_pread(sd->blk, addr, sd->data, len) < 0) { + if (!sd->blk || blk_pread(sd->blk, addr, len, sd->data, 0) < 0) { fprintf(stderr, "sd_blk_read: read error on host side\n"); } } @@ -751,7 +760,7 @@ static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) static void sd_blk_write(SDState *sd, uint64_t addr, uint32_t len) { trace_sdcard_write_block(addr, len); - if (!sd->blk || blk_pwrite(sd->blk, addr, sd->data, len, 0) < 0) { + if (!sd->blk || blk_pwrite(sd->blk, addr, len, sd->data, 0) < 0) { fprintf(stderr, "sd_blk_write: write error on host side\n"); } } @@ -803,8 +812,8 @@ static void sd_erase(SDState *sd) if (sdsc) { /* Only SDSC cards support write protect groups */ wpnum = sd_addr_to_wpnum(erase_addr); - assert(wpnum < sd->wpgrps_size); - if (test_bit(wpnum, sd->wp_groups)) { + assert(wpnum < sd->wp_group_bits); + if (test_bit(wpnum, sd->wp_group_bmap)) { sd->card_status |= WP_ERASE_SKIP; continue; } @@ -828,8 +837,8 @@ static uint32_t sd_wpbits(SDState *sd, uint64_t addr) */ continue; } - assert(wpnum < sd->wpgrps_size); - if (test_bit(wpnum, sd->wp_groups)) { + assert(wpnum < sd->wp_group_bits); + if (test_bit(wpnum, sd->wp_group_bmap)) { ret |= (1 << i); } } @@ -869,7 +878,7 @@ static void sd_function_switch(SDState *sd, uint32_t arg) static inline bool sd_wp_addr(SDState *sd, uint64_t addr) { - return test_bit(sd_addr_to_wpnum(addr), sd->wp_groups); + return test_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap); } static void sd_lock_command(SDState *sd) @@ -897,7 +906,7 @@ static void sd_lock_command(SDState *sd) sd->card_status |= LOCK_UNLOCK_FAILED; return; } - bitmap_zero(sd->wp_groups, sd->wpgrps_size); + bitmap_zero(sd->wp_group_bmap, sd->wp_group_bits); sd->csd[14] &= ~0x10; sd->card_status &= ~CARD_IS_LOCKED; sd->pwd_len = 0; @@ -1348,7 +1357,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } sd->state = sd_programming_state; - set_bit(sd_addr_to_wpnum(addr), sd->wp_groups); + set_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap); /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; @@ -1370,7 +1379,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } sd->state = sd_programming_state; - clear_bit(sd_addr_to_wpnum(addr), sd->wp_groups); + clear_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap); /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h index e8c753d6d1..964570f8e8 100644 --- a/hw/sd/sdhci-internal.h +++ b/hw/sd/sdhci-internal.h @@ -288,26 +288,6 @@ enum { extern const VMStateDescription sdhci_vmstate; - -#define ESDHC_MIX_CTRL 0x48 - -#define ESDHC_VENDOR_SPEC 0xc0 -#define ESDHC_IMX_FRC_SDCLK_ON (1 << 8) - -#define ESDHC_DLL_CTRL 0x60 - -#define ESDHC_TUNING_CTRL 0xcc -#define ESDHC_TUNE_CTRL_STATUS 0x68 -#define ESDHC_WTMK_LVL 0x44 - -/* Undocumented register used by guests working around erratum ERR004536 */ -#define ESDHC_UNDOCUMENTED_REG27 0x6c - -#define ESDHC_CTRL_4BITBUS (0x1 << 1) -#define ESDHC_CTRL_8BITBUS (0x2 << 1) - -#define ESDHC_PRNSTS_SDSTB (1 << 3) - /* * Default SD/MMC host controller features information, which will be * presented in CAPABILITIES register of generic SD host controller at reset. diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 5b8678110b..306070c872 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -616,8 +616,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->blkcnt--; } } - dma_memory_write(s->dma_as, s->sdmasysad, - &s->fifo_buffer[begin], s->data_count - begin); + dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], + s->data_count - begin, MEMTXATTRS_UNSPECIFIED); s->sdmasysad += s->data_count - begin; if (s->data_count == block_size) { s->data_count = 0; @@ -637,8 +637,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->data_count = block_size; boundary_count -= block_size - begin; } - dma_memory_read(s->dma_as, s->sdmasysad, - &s->fifo_buffer[begin], s->data_count - begin); + dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], + s->data_count - begin, MEMTXATTRS_UNSPECIFIED); s->sdmasysad += s->data_count - begin; if (s->data_count == block_size) { sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); @@ -670,9 +670,11 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s) if (s->trnmod & SDHC_TRNS_READ) { sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); - dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, + MEMTXATTRS_UNSPECIFIED); } else { - dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, + MEMTXATTRS_UNSPECIFIED); sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); } s->blkcnt--; @@ -694,7 +696,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) hwaddr entry_addr = (hwaddr)s->admasysaddr; switch (SDHC_DMA_TYPE(s->hostctl1)) { case SDHC_CTRL_ADMA2_32: - dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2)); + dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), + MEMTXATTRS_UNSPECIFIED); adma2 = le64_to_cpu(adma2); /* The spec does not specify endianness of descriptor table. * We currently assume that it is LE. @@ -705,7 +708,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) dscr->incr = 8; break; case SDHC_CTRL_ADMA1_32: - dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1)); + dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1), + MEMTXATTRS_UNSPECIFIED); adma1 = le32_to_cpu(adma1); dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); dscr->attr = (uint8_t)extract32(adma1, 0, 7); @@ -717,10 +721,13 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) } break; case SDHC_CTRL_ADMA2_64: - dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1); - dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2); + dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1, + MEMTXATTRS_UNSPECIFIED); + dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2, + MEMTXATTRS_UNSPECIFIED); dscr->length = le16_to_cpu(dscr->length); - dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8); + dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8, + MEMTXATTRS_UNSPECIFIED); dscr->addr = le64_to_cpu(dscr->addr); dscr->attr &= (uint8_t) ~0xC0; dscr->incr = 12; @@ -734,7 +741,9 @@ static void sdhci_do_adma(SDHCIState *s) { unsigned int begin, length; const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; + const MemTxAttrs attrs = { .memory = true }; ADMADescr dscr = {}; + MemTxResult res; int i; if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { @@ -783,9 +792,13 @@ static void sdhci_do_adma(SDHCIState *s) s->data_count = block_size; length -= block_size - begin; } - dma_memory_write(s->dma_as, dscr.addr, - &s->fifo_buffer[begin], - s->data_count - begin); + res = dma_memory_write(s->dma_as, dscr.addr, + &s->fifo_buffer[begin], + s->data_count - begin, + attrs); + if (res != MEMTX_OK) { + break; + } dscr.addr += s->data_count - begin; if (s->data_count == block_size) { s->data_count = 0; @@ -808,9 +821,13 @@ static void sdhci_do_adma(SDHCIState *s) s->data_count = block_size; length -= block_size - begin; } - dma_memory_read(s->dma_as, dscr.addr, - &s->fifo_buffer[begin], - s->data_count - begin); + res = dma_memory_read(s->dma_as, dscr.addr, + &s->fifo_buffer[begin], + s->data_count - begin, + attrs); + if (res != MEMTX_OK) { + break; + } dscr.addr += s->data_count - begin; if (s->data_count == block_size) { sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); @@ -824,7 +841,16 @@ static void sdhci_do_adma(SDHCIState *s) } } } - s->admasysaddr += dscr.incr; + if (res != MEMTX_OK) { + if (s->errintstsen & SDHC_EISEN_ADMAERR) { + trace_sdhci_error("Set ADMA error flag"); + s->errintsts |= SDHC_EIS_ADMAERR; + s->norintsts |= SDHC_NIS_ERR; + } + sdhci_update_irq(s); + } else { + s->admasysaddr += dscr.incr; + } break; case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ s->admasysaddr = dscr.addr; @@ -1337,8 +1363,7 @@ static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) void sdhci_initfn(SDHCIState *s) { - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); @@ -1552,6 +1577,25 @@ static const TypeInfo sdhci_bus_info = { /* --- qdev i.MX eSDHC --- */ +#define USDHC_MIX_CTRL 0x48 + +#define USDHC_VENDOR_SPEC 0xc0 +#define USDHC_IMX_FRC_SDCLK_ON (1 << 8) + +#define USDHC_DLL_CTRL 0x60 + +#define USDHC_TUNING_CTRL 0xcc +#define USDHC_TUNE_CTRL_STATUS 0x68 +#define USDHC_WTMK_LVL 0x44 + +/* Undocumented register used by guests working around erratum ERR004536 */ +#define USDHC_UNDOCUMENTED_REG27 0x6c + +#define USDHC_CTRL_4BITBUS (0x1 << 1) +#define USDHC_CTRL_8BITBUS (0x2 << 1) + +#define USDHC_PRNSTS_SDSTB (1 << 3) + static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) { SDHCIState *s = SYSBUS_SDHCI(opaque); @@ -1571,11 +1615,11 @@ static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); if (s->hostctl1 & SDHC_CTRL_8BITBUS) { - hostctl1 |= ESDHC_CTRL_8BITBUS; + hostctl1 |= USDHC_CTRL_8BITBUS; } if (s->hostctl1 & SDHC_CTRL_4BITBUS) { - hostctl1 |= ESDHC_CTRL_4BITBUS; + hostctl1 |= USDHC_CTRL_4BITBUS; } ret = hostctl1; @@ -1586,21 +1630,21 @@ static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) case SDHC_PRNSTS: /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ - ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB; + ret = sdhci_read(opaque, offset, size) & ~USDHC_PRNSTS_SDSTB; if (s->clkcon & SDHC_CLOCK_INT_STABLE) { - ret |= ESDHC_PRNSTS_SDSTB; + ret |= USDHC_PRNSTS_SDSTB; } break; - case ESDHC_VENDOR_SPEC: + case USDHC_VENDOR_SPEC: ret = s->vendor_spec; break; - case ESDHC_DLL_CTRL: - case ESDHC_TUNE_CTRL_STATUS: - case ESDHC_UNDOCUMENTED_REG27: - case ESDHC_TUNING_CTRL: - case ESDHC_MIX_CTRL: - case ESDHC_WTMK_LVL: + case USDHC_DLL_CTRL: + case USDHC_TUNE_CTRL_STATUS: + case USDHC_UNDOCUMENTED_REG27: + case USDHC_TUNING_CTRL: + case USDHC_MIX_CTRL: + case USDHC_WTMK_LVL: ret = 0; break; } @@ -1616,18 +1660,18 @@ usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) uint32_t value = (uint32_t)val; switch (offset) { - case ESDHC_DLL_CTRL: - case ESDHC_TUNE_CTRL_STATUS: - case ESDHC_UNDOCUMENTED_REG27: - case ESDHC_TUNING_CTRL: - case ESDHC_WTMK_LVL: + case USDHC_DLL_CTRL: + case USDHC_TUNE_CTRL_STATUS: + case USDHC_UNDOCUMENTED_REG27: + case USDHC_TUNING_CTRL: + case USDHC_WTMK_LVL: break; - case ESDHC_VENDOR_SPEC: + case USDHC_VENDOR_SPEC: s->vendor_spec = value; switch (s->vendor) { case SDHCI_VENDOR_IMX: - if (value & ESDHC_IMX_FRC_SDCLK_ON) { + if (value & USDHC_IMX_FRC_SDCLK_ON) { s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; } else { s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; @@ -1696,12 +1740,12 @@ usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) * Second, split "Data Transfer Width" from bits 2 and 1 in to * bits 5 and 1 */ - if (value & ESDHC_CTRL_8BITBUS) { + if (value & USDHC_CTRL_8BITBUS) { hostctl1 |= SDHC_CTRL_8BITBUS; } - if (value & ESDHC_CTRL_4BITBUS) { - hostctl1 |= ESDHC_CTRL_4BITBUS; + if (value & USDHC_CTRL_4BITBUS) { + hostctl1 |= USDHC_CTRL_4BITBUS; } /* @@ -1724,7 +1768,7 @@ usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) sdhci_write(opaque, offset, value, size); break; - case ESDHC_MIX_CTRL: + case USDHC_MIX_CTRL: /* * So, when SD/MMC stack in Linux tries to write to "Transfer * Mode Register", ESDHC i.MX quirk code will translate it diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 97ee58e20c..167c03b780 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -368,37 +368,9 @@ static const VMStateDescription vmstate_ssi_sd = { static void ssi_sd_realize(SSIPeripheral *d, Error **errp) { - ERRP_GUARD(); ssi_sd_state *s = SSI_SD(d); - DeviceState *carddev; - DriveInfo *dinfo; - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, - DEVICE(d), "sd-bus"); - - /* Create and plug in the sd card */ - /* FIXME use a qdev drive property instead of drive_get_next() */ - dinfo = drive_get_next(IF_SD); - carddev = qdev_new(TYPE_SD_CARD); - if (dinfo) { - if (!qdev_prop_set_drive_err(carddev, "drive", - blk_by_legacy_dinfo(dinfo), errp)) { - goto fail; - } - } - - if (!object_property_set_bool(OBJECT(carddev), "spi", true, errp)) { - goto fail; - } - - if (!qdev_realize_and_unref(carddev, BUS(&s->sdbus), errp)) { - goto fail; - } - - return; - -fail: - error_prepend(errp, "failed to init SD card: "); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(d), "sd-bus"); } static void ssi_sd_reset(DeviceState *dev) @@ -427,7 +399,7 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data) k->cs_polarity = SSI_CS_LOW; dc->vmsd = &vmstate_ssi_sd; dc->reset = ssi_sd_reset; - /* Reason: init() method uses drive_get_next() */ + /* Reason: GPIO chip-select line should be wired up */ dc->user_creatable = false; } diff --git a/hw/sd/trace-events b/hw/sd/trace-events index 3cc2ef89ba..94a00557b2 100644 --- a/hw/sd/trace-events +++ b/hw/sd/trace-events @@ -68,3 +68,7 @@ pl181_fifo_push(uint32_t data) "FIFO push 0x%08" PRIx32 pl181_fifo_pop(uint32_t data) "FIFO pop 0x%08" PRIx32 pl181_fifo_transfer_complete(void) "FIFO transfer complete" pl181_data_engine_idle(void) "data engine idle" + +# aspeed_sdhci.c +aspeed_sdhci_read(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +aspeed_sdhci_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 diff --git a/hw/sensor/Kconfig b/hw/sensor/Kconfig index a2b55a4fdb..e03bd09b50 100644 --- a/hw/sensor/Kconfig +++ b/hw/sensor/Kconfig @@ -1,14 +1,22 @@ config TMP105 bool depends on I2C + default y if I2C_DEVICES config TMP421 bool depends on I2C + default y if I2C_DEVICES + +config DPS310 + bool + depends on I2C + default y if I2C_DEVICES config EMC141X bool depends on I2C + default y if I2C_DEVICES config ADM1272 bool @@ -17,3 +25,16 @@ config ADM1272 config MAX34451 bool depends on I2C + +config LSM303DLHC_MAG + bool + depends on I2C + default y if I2C_DEVICES + +config ISL_PMBUS_VR + bool + depends on PMBUS + +config MAX31785 + bool + depends on PMBUS diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c new file mode 100644 index 0000000000..d60a18ac41 --- /dev/null +++ b/hw/sensor/dps310.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2017-2021 Joel Stanley , IBM Corporation + * + * Infineon DPS310 temperature and humidity sensor + * + * https://www.infineon.com/cms/en/product/sensor/pressure-sensors/pressure-sensors-for-iot/dps310/ + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/hw.h" +#include "hw/i2c/i2c.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "migration/vmstate.h" + +#define NUM_REGISTERS 0x33 + +typedef struct DPS310State { + /*< private >*/ + I2CSlave i2c; + + /*< public >*/ + uint8_t regs[NUM_REGISTERS]; + + uint8_t len; + uint8_t pointer; + +} DPS310State; + +#define TYPE_DPS310 "dps310" +#define DPS310(obj) OBJECT_CHECK(DPS310State, (obj), TYPE_DPS310) + +#define DPS310_PRS_B2 0x00 +#define DPS310_PRS_B1 0x01 +#define DPS310_PRS_B0 0x02 +#define DPS310_TMP_B2 0x03 +#define DPS310_TMP_B1 0x04 +#define DPS310_TMP_B0 0x05 +#define DPS310_PRS_CFG 0x06 +#define DPS310_TMP_CFG 0x07 +#define DPS310_TMP_RATE_BITS (0x70) +#define DPS310_MEAS_CFG 0x08 +#define DPS310_MEAS_CTRL_BITS (0x07) +#define DPS310_PRESSURE_EN BIT(0) +#define DPS310_TEMP_EN BIT(1) +#define DPS310_BACKGROUND BIT(2) +#define DPS310_PRS_RDY BIT(4) +#define DPS310_TMP_RDY BIT(5) +#define DPS310_SENSOR_RDY BIT(6) +#define DPS310_COEF_RDY BIT(7) +#define DPS310_CFG_REG 0x09 +#define DPS310_RESET 0x0c +#define DPS310_RESET_MAGIC (BIT(0) | BIT(3)) +#define DPS310_COEF_BASE 0x10 +#define DPS310_COEF_LAST 0x21 +#define DPS310_COEF_SRC 0x28 + +static void dps310_reset(DeviceState *dev) +{ + DPS310State *s = DPS310(dev); + + static const uint8_t regs_reset_state[sizeof(s->regs)] = { + 0xfe, 0x2f, 0xee, 0x02, 0x69, 0xa6, 0x00, 0x80, 0xc7, 0x00, 0x00, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x0e, 0x1e, 0xdd, 0x13, 0xca, 0x5f, 0x21, 0x52, + 0xf9, 0xc6, 0x04, 0xd1, 0xdb, 0x47, 0x00, 0x5b, 0xfb, 0x3a, 0x00, 0x00, + 0x20, 0x49, 0x4e, 0xa5, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x60, 0x15, 0x02 + }; + + memcpy(s->regs, regs_reset_state, sizeof(s->regs)); + s->pointer = 0; + + /* TODO: assert these after some timeout ? */ + s->regs[DPS310_MEAS_CFG] = DPS310_COEF_RDY | DPS310_SENSOR_RDY + | DPS310_TMP_RDY | DPS310_PRS_RDY; +} + +static uint8_t dps310_read(DPS310State *s, uint8_t reg) +{ + if (reg >= sizeof(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: register 0x%02x out of bounds\n", + __func__, s->pointer); + return 0xFF; + } + + switch (reg) { + case DPS310_PRS_B2: + case DPS310_PRS_B1: + case DPS310_PRS_B0: + case DPS310_TMP_B2: + case DPS310_TMP_B1: + case DPS310_TMP_B0: + case DPS310_PRS_CFG: + case DPS310_TMP_CFG: + case DPS310_MEAS_CFG: + case DPS310_CFG_REG: + case DPS310_COEF_BASE...DPS310_COEF_LAST: + case DPS310_COEF_SRC: + case 0x32: /* Undocumented register to indicate workaround not required */ + return s->regs[reg]; + default: + qemu_log_mask(LOG_UNIMP, "%s: register 0x%02x unimplemented\n", + __func__, reg); + return 0xFF; + } +} + +static void dps310_write(DPS310State *s, uint8_t reg, uint8_t data) +{ + if (reg >= sizeof(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: register %d out of bounds\n", + __func__, s->pointer); + return; + } + + switch (reg) { + case DPS310_RESET: + if (data == DPS310_RESET_MAGIC) { + device_cold_reset(DEVICE(s)); + } + break; + case DPS310_PRS_CFG: + case DPS310_TMP_CFG: + case DPS310_MEAS_CFG: + case DPS310_CFG_REG: + s->regs[reg] = data; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: register 0x%02x unimplemented\n", + __func__, reg); + return; + } +} + +static uint8_t dps310_rx(I2CSlave *i2c) +{ + DPS310State *s = DPS310(i2c); + + if (s->len == 1) { + return dps310_read(s, s->pointer++); + } else { + return 0xFF; + } +} + +static int dps310_tx(I2CSlave *i2c, uint8_t data) +{ + DPS310State *s = DPS310(i2c); + + if (s->len == 0) { + /* + * first byte is the register pointer for a read or write + * operation + */ + s->pointer = data; + s->len++; + } else if (s->len == 1) { + dps310_write(s, s->pointer++, data); + } + + return 0; +} + +static int dps310_event(I2CSlave *i2c, enum i2c_event event) +{ + DPS310State *s = DPS310(i2c); + + switch (event) { + case I2C_START_SEND: + s->pointer = 0xFF; + s->len = 0; + break; + case I2C_START_RECV: + if (s->len != 1) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid recv sequence\n", + __func__); + } + break; + default: + break; + } + + return 0; +} + +static const VMStateDescription vmstate_dps310 = { + .name = "DPS310", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, DPS310State), + VMSTATE_UINT8_ARRAY(regs, DPS310State, NUM_REGISTERS), + VMSTATE_UINT8(pointer, DPS310State), + VMSTATE_I2C_SLAVE(i2c, DPS310State), + VMSTATE_END_OF_LIST() + } +}; + +static void dps310_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = dps310_event; + k->recv = dps310_rx; + k->send = dps310_tx; + dc->reset = dps310_reset; + dc->vmsd = &vmstate_dps310; +} + +static const TypeInfo dps310_info = { + .name = TYPE_DPS310, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(DPS310State), + .class_init = dps310_class_init, +}; + +static void dps310_register_types(void) +{ + type_register_static(&dps310_info); +} + +type_init(dps310_register_types) diff --git a/hw/sensor/isl_pmbus_vr.c b/hw/sensor/isl_pmbus_vr.c new file mode 100644 index 0000000000..eb344dd5a9 --- /dev/null +++ b/hw/sensor/isl_pmbus_vr.c @@ -0,0 +1,319 @@ +/* + * PMBus device for Renesas Digital Multiphase Voltage Regulators + * + * Copyright 2021 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/sensor/isl_pmbus_vr.h" +#include "hw/qdev-properties.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static uint8_t isl_pmbus_vr_read_byte(PMBusDevice *pmdev) +{ + ISLState *s = ISL69260(pmdev); + + switch (pmdev->code) { + case PMBUS_IC_DEVICE_ID: + if (!s->ic_device_id_len) { + break; + } + pmbus_send(pmdev, s->ic_device_id, s->ic_device_id_len); + pmbus_idle(pmdev); + return 0; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: reading from unsupported register: 0x%02x\n", + __func__, pmdev->code); + return PMBUS_ERR_BYTE; +} + +static int isl_pmbus_vr_write_data(PMBusDevice *pmdev, const uint8_t *buf, + uint8_t len) +{ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to unsupported register: 0x%02x\n", + __func__, pmdev->code); + return PMBUS_ERR_BYTE; +} + +/* TODO: Implement coefficients support in pmbus_device.c for qmp */ +static void isl_pmbus_vr_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint16(v, name, (uint16_t *)opaque, errp); +} + +static void isl_pmbus_vr_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint16_t *internal = opaque; + uint16_t value; + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + *internal = value; + pmbus_check_limits(pmdev); +} + +static void isl_pmbus_vr_exit_reset(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + + pmdev->page = 0; + pmdev->capability = ISL_CAPABILITY_DEFAULT; + for (int i = 0; i < pmdev->num_pages; i++) { + pmdev->pages[i].operation = ISL_OPERATION_DEFAULT; + pmdev->pages[i].on_off_config = ISL_ON_OFF_CONFIG_DEFAULT; + pmdev->pages[i].vout_mode = ISL_VOUT_MODE_DEFAULT; + pmdev->pages[i].vout_command = ISL_VOUT_COMMAND_DEFAULT; + pmdev->pages[i].vout_max = ISL_VOUT_MAX_DEFAULT; + pmdev->pages[i].vout_margin_high = ISL_VOUT_MARGIN_HIGH_DEFAULT; + pmdev->pages[i].vout_margin_low = ISL_VOUT_MARGIN_LOW_DEFAULT; + pmdev->pages[i].vout_transition_rate = ISL_VOUT_TRANSITION_RATE_DEFAULT; + pmdev->pages[i].vout_ov_fault_limit = ISL_VOUT_OV_FAULT_LIMIT_DEFAULT; + pmdev->pages[i].ot_fault_limit = ISL_OT_FAULT_LIMIT_DEFAULT; + pmdev->pages[i].ot_warn_limit = ISL_OT_WARN_LIMIT_DEFAULT; + pmdev->pages[i].vin_ov_warn_limit = ISL_VIN_OV_WARN_LIMIT_DEFAULT; + pmdev->pages[i].vin_uv_warn_limit = ISL_VIN_UV_WARN_LIMIT_DEFAULT; + pmdev->pages[i].iin_oc_fault_limit = ISL_IIN_OC_FAULT_LIMIT_DEFAULT; + pmdev->pages[i].ton_delay = ISL_TON_DELAY_DEFAULT; + pmdev->pages[i].ton_rise = ISL_TON_RISE_DEFAULT; + pmdev->pages[i].toff_fall = ISL_TOFF_FALL_DEFAULT; + pmdev->pages[i].revision = ISL_REVISION_DEFAULT; + + pmdev->pages[i].read_vout = ISL_READ_VOUT_DEFAULT; + pmdev->pages[i].read_iout = ISL_READ_IOUT_DEFAULT; + pmdev->pages[i].read_pout = ISL_READ_POUT_DEFAULT; + pmdev->pages[i].read_vin = ISL_READ_VIN_DEFAULT; + pmdev->pages[i].read_iin = ISL_READ_IIN_DEFAULT; + pmdev->pages[i].read_pin = ISL_READ_PIN_DEFAULT; + pmdev->pages[i].read_temperature_1 = ISL_READ_TEMP_DEFAULT; + pmdev->pages[i].read_temperature_2 = ISL_READ_TEMP_DEFAULT; + pmdev->pages[i].read_temperature_3 = ISL_READ_TEMP_DEFAULT; + } +} + +/* The raa228000 uses different direct mode coefficents from most isl devices */ +static void raa228000_exit_reset(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + + isl_pmbus_vr_exit_reset(obj); + + pmdev->pages[0].read_iout = 0; + pmdev->pages[0].read_pout = 0; + pmdev->pages[0].read_vout = 0; + pmdev->pages[0].read_vin = 0; + pmdev->pages[0].read_iin = 0; + pmdev->pages[0].read_pin = 0; + pmdev->pages[0].read_temperature_1 = 0; + pmdev->pages[0].read_temperature_2 = 0; + pmdev->pages[0].read_temperature_3 = 0; +} + +static void isl69259_exit_reset(Object *obj) +{ + ISLState *s = ISL69260(obj); + static const uint8_t ic_device_id[] = {0x04, 0x00, 0x81, 0xD2, 0x49, 0x3c}; + g_assert(sizeof(ic_device_id) <= sizeof(s->ic_device_id)); + + isl_pmbus_vr_exit_reset(obj); + + s->ic_device_id_len = sizeof(ic_device_id); + memcpy(s->ic_device_id, ic_device_id, sizeof(ic_device_id)); +} + +static void isl_pmbus_vr_add_props(Object *obj, uint64_t *flags, uint8_t pages) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + for (int i = 0; i < pages; i++) { + if (flags[i] & PB_HAS_VIN) { + object_property_add(obj, "vin[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_vin); + } + + if (flags[i] & PB_HAS_VOUT) { + object_property_add(obj, "vout[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_vout); + } + + if (flags[i] & PB_HAS_IIN) { + object_property_add(obj, "iin[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_iin); + } + + if (flags[i] & PB_HAS_IOUT) { + object_property_add(obj, "iout[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_iout); + } + + if (flags[i] & PB_HAS_PIN) { + object_property_add(obj, "pin[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_pin); + } + + if (flags[i] & PB_HAS_POUT) { + object_property_add(obj, "pout[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_pout); + } + + if (flags[i] & PB_HAS_TEMPERATURE) { + object_property_add(obj, "temp1[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_temperature_1); + } + + if (flags[i] & PB_HAS_TEMP2) { + object_property_add(obj, "temp2[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_temperature_2); + } + + if (flags[i] & PB_HAS_TEMP3) { + object_property_add(obj, "temp3[*]", "uint16", + isl_pmbus_vr_get, + isl_pmbus_vr_set, + NULL, &pmdev->pages[i].read_temperature_3); + } + } +} + +static void raa22xx_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint64_t flags[2]; + + flags[0] = PB_HAS_VIN | PB_HAS_VOUT | PB_HAS_VOUT_MODE | + PB_HAS_VOUT_RATING | PB_HAS_VOUT_MARGIN | PB_HAS_IIN | + PB_HAS_IOUT | PB_HAS_PIN | PB_HAS_POUT | PB_HAS_TEMPERATURE | + PB_HAS_TEMP2 | PB_HAS_TEMP3 | PB_HAS_STATUS_MFR_SPECIFIC; + flags[1] = PB_HAS_IIN | PB_HAS_PIN | PB_HAS_TEMPERATURE | PB_HAS_TEMP3 | + PB_HAS_VOUT | PB_HAS_VOUT_MODE | PB_HAS_VOUT_MARGIN | + PB_HAS_VOUT_RATING | PB_HAS_IOUT | PB_HAS_POUT | + PB_HAS_STATUS_MFR_SPECIFIC; + + pmbus_page_config(pmdev, 0, flags[0]); + pmbus_page_config(pmdev, 1, flags[1]); + isl_pmbus_vr_add_props(obj, flags, ARRAY_SIZE(flags)); +} + +static void raa228000_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint64_t flags[1]; + + flags[0] = PB_HAS_VIN | PB_HAS_VOUT | PB_HAS_VOUT_MODE | + PB_HAS_VOUT_RATING | PB_HAS_VOUT_MARGIN | PB_HAS_IIN | + PB_HAS_IOUT | PB_HAS_PIN | PB_HAS_POUT | PB_HAS_TEMPERATURE | + PB_HAS_TEMP2 | PB_HAS_TEMP3 | PB_HAS_STATUS_MFR_SPECIFIC; + + pmbus_page_config(pmdev, 0, flags[0]); + isl_pmbus_vr_add_props(obj, flags, 1); +} + +static void isl_pmbus_vr_class_init(ObjectClass *klass, void *data, + uint8_t pages) +{ + PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); + k->write_data = isl_pmbus_vr_write_data; + k->receive_byte = isl_pmbus_vr_read_byte; + k->device_num_pages = pages; +} + +static void isl69260_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "Renesas ISL69260 Digital Multiphase Voltage Regulator"; + rc->phases.exit = isl_pmbus_vr_exit_reset; + isl_pmbus_vr_class_init(klass, data, 2); +} + +static void raa228000_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "Renesas 228000 Digital Multiphase Voltage Regulator"; + rc->phases.exit = raa228000_exit_reset; + isl_pmbus_vr_class_init(klass, data, 1); +} + +static void raa229004_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "Renesas 229004 Digital Multiphase Voltage Regulator"; + rc->phases.exit = isl_pmbus_vr_exit_reset; + isl_pmbus_vr_class_init(klass, data, 2); +} + +static void isl69259_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "Renesas ISL69259 Digital Multiphase Voltage Regulator"; + rc->phases.exit = isl69259_exit_reset; + isl_pmbus_vr_class_init(klass, data, 2); +} + +static const TypeInfo isl69259_info = { + .name = TYPE_ISL69259, + .parent = TYPE_ISL69260, + .class_init = isl69259_class_init, +}; + +static const TypeInfo isl69260_info = { + .name = TYPE_ISL69260, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(ISLState), + .instance_init = raa22xx_init, + .class_init = isl69260_class_init, +}; + +static const TypeInfo raa229004_info = { + .name = TYPE_RAA229004, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(ISLState), + .instance_init = raa22xx_init, + .class_init = raa229004_class_init, +}; + +static const TypeInfo raa228000_info = { + .name = TYPE_RAA228000, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(ISLState), + .instance_init = raa228000_init, + .class_init = raa228000_class_init, +}; + +static void isl_pmbus_vr_register_types(void) +{ + type_register_static(&isl69259_info); + type_register_static(&isl69260_info); + type_register_static(&raa228000_info); + type_register_static(&raa229004_info); +} + +type_init(isl_pmbus_vr_register_types) diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c new file mode 100644 index 0000000000..bb8d48b2fd --- /dev/null +++ b/hw/sensor/lsm303dlhc_mag.c @@ -0,0 +1,558 @@ +/* + * LSM303DLHC I2C magnetometer. + * + * Copyright (C) 2021 Linaro Ltd. + * Written by Kevin Townsend + * + * Based on: https://www.st.com/resource/en/datasheet/lsm303dlhc.pdf + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * The I2C address associated with this device is set on the command-line when + * initialising the machine, but the following address is standard: 0x1E. + * + * Get and set functions for 'mag-x', 'mag-y' and 'mag-z' assume that + * 1 = 0.001 uT. (NOTE the 1 gauss = 100 uT, so setting a value of 100,000 + * would be equal to 1 gauss or 100 uT.) + * + * Get and set functions for 'temperature' assume that 1 = 0.001 C, so 23.6 C + * would be equal to 23600. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "qemu/bswap.h" + +enum LSM303DLHCMagReg { + LSM303DLHC_MAG_REG_CRA = 0x00, + LSM303DLHC_MAG_REG_CRB = 0x01, + LSM303DLHC_MAG_REG_MR = 0x02, + LSM303DLHC_MAG_REG_OUT_X_H = 0x03, + LSM303DLHC_MAG_REG_OUT_X_L = 0x04, + LSM303DLHC_MAG_REG_OUT_Z_H = 0x05, + LSM303DLHC_MAG_REG_OUT_Z_L = 0x06, + LSM303DLHC_MAG_REG_OUT_Y_H = 0x07, + LSM303DLHC_MAG_REG_OUT_Y_L = 0x08, + LSM303DLHC_MAG_REG_SR = 0x09, + LSM303DLHC_MAG_REG_IRA = 0x0A, + LSM303DLHC_MAG_REG_IRB = 0x0B, + LSM303DLHC_MAG_REG_IRC = 0x0C, + LSM303DLHC_MAG_REG_TEMP_OUT_H = 0x31, + LSM303DLHC_MAG_REG_TEMP_OUT_L = 0x32 +}; + +typedef struct LSM303DLHCMagState { + I2CSlave parent_obj; + uint8_t cra; + uint8_t crb; + uint8_t mr; + int16_t x; + int16_t z; + int16_t y; + int16_t x_lock; + int16_t z_lock; + int16_t y_lock; + uint8_t sr; + uint8_t ira; + uint8_t irb; + uint8_t irc; + int16_t temperature; + int16_t temperature_lock; + uint8_t len; + uint8_t buf; + uint8_t pointer; +} LSM303DLHCMagState; + +#define TYPE_LSM303DLHC_MAG "lsm303dlhc_mag" +OBJECT_DECLARE_SIMPLE_TYPE(LSM303DLHCMagState, LSM303DLHC_MAG) + +/* + * Conversion factor from Gauss to sensor values for each GN gain setting, + * in units "lsb per Gauss" (see data sheet table 3). There is no documented + * behaviour if the GN setting in CRB is incorrectly set to 0b000; + * we arbitrarily make it the same as 0b001. + */ +uint32_t xy_gain[] = { 1100, 1100, 855, 670, 450, 400, 330, 230 }; +uint32_t z_gain[] = { 980, 980, 760, 600, 400, 355, 295, 205 }; + +static void lsm303dlhc_mag_get_x(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int gm = extract32(s->crb, 5, 3); + + /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */ + int64_t value = muldiv64(s->x, 100000, xy_gain[gm]); + visit_type_int(v, name, &value, errp); +} + +static void lsm303dlhc_mag_get_y(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int gm = extract32(s->crb, 5, 3); + + /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */ + int64_t value = muldiv64(s->y, 100000, xy_gain[gm]); + visit_type_int(v, name, &value, errp); +} + +static void lsm303dlhc_mag_get_z(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int gm = extract32(s->crb, 5, 3); + + /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */ + int64_t value = muldiv64(s->z, 100000, z_gain[gm]); + visit_type_int(v, name, &value, errp); +} + +static void lsm303dlhc_mag_set_x(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int64_t value; + int64_t reg; + int gm = extract32(s->crb, 5, 3); + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + reg = muldiv64(value, xy_gain[gm], 100000); + + /* Make sure we are within a 12-bit limit. */ + if (reg > 2047 || reg < -2048) { + error_setg(errp, "value %" PRId64 " out of register's range", value); + return; + } + + s->x = (int16_t)reg; +} + +static void lsm303dlhc_mag_set_y(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int64_t value; + int64_t reg; + int gm = extract32(s->crb, 5, 3); + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + reg = muldiv64(value, xy_gain[gm], 100000); + + /* Make sure we are within a 12-bit limit. */ + if (reg > 2047 || reg < -2048) { + error_setg(errp, "value %" PRId64 " out of register's range", value); + return; + } + + s->y = (int16_t)reg; +} + +static void lsm303dlhc_mag_set_z(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int64_t value; + int64_t reg; + int gm = extract32(s->crb, 5, 3); + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + reg = muldiv64(value, z_gain[gm], 100000); + + /* Make sure we are within a 12-bit limit. */ + if (reg > 2047 || reg < -2048) { + error_setg(errp, "value %" PRId64 " out of register's range", value); + return; + } + + s->z = (int16_t)reg; +} + +/* + * Get handler for the temperature property. + */ +static void lsm303dlhc_mag_get_temperature(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int64_t value; + + /* Convert to 1 lsb = 0.125 C to 1 = 0.001 C for 'temperature' property. */ + value = s->temperature * 125; + + visit_type_int(v, name, &value, errp); +} + +/* + * Set handler for the temperature property. + */ +static void lsm303dlhc_mag_set_temperature(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(obj); + int64_t value; + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + /* Input temperature is in 0.001 C units. Convert to 1 lsb = 0.125 C. */ + value /= 125; + + if (value > 2047 || value < -2048) { + error_setg(errp, "value %" PRId64 " lsb is out of range", value); + return; + } + + s->temperature = (int16_t)value; +} + +/* + * Callback handler whenever a 'I2C_START_RECV' (read) event is received. + */ +static void lsm303dlhc_mag_read(LSM303DLHCMagState *s) +{ + /* + * Set the LOCK bit whenever a new read attempt is made. This will be + * cleared in I2C_FINISH. Note that DRDY is always set to 1 in this driver. + */ + s->sr = 0x3; + + /* + * Copy the current X/Y/Z and temp. values into the locked registers so + * that 'mag-x', 'mag-y', 'mag-z' and 'temperature' can continue to be + * updated via QOM, etc., without corrupting the current read event. + */ + s->x_lock = s->x; + s->z_lock = s->z; + s->y_lock = s->y; + s->temperature_lock = s->temperature; +} + +/* + * Callback handler whenever a 'I2C_FINISH' event is received. + */ +static void lsm303dlhc_mag_finish(LSM303DLHCMagState *s) +{ + /* + * Clear the LOCK bit when the read attempt terminates. + * This bit is initially set in the I2C_START_RECV handler. + */ + s->sr = 0x1; +} + +/* + * Callback handler when a device attempts to write to a register. + */ +static void lsm303dlhc_mag_write(LSM303DLHCMagState *s) +{ + switch (s->pointer) { + case LSM303DLHC_MAG_REG_CRA: + s->cra = s->buf; + break; + case LSM303DLHC_MAG_REG_CRB: + /* Make sure gain is at least 1, falling back to 1 on an error. */ + if (s->buf >> 5 == 0) { + s->buf = 1 << 5; + } + s->crb = s->buf; + break; + case LSM303DLHC_MAG_REG_MR: + s->mr = s->buf; + break; + case LSM303DLHC_MAG_REG_SR: + s->sr = s->buf; + break; + case LSM303DLHC_MAG_REG_IRA: + s->ira = s->buf; + break; + case LSM303DLHC_MAG_REG_IRB: + s->irb = s->buf; + break; + case LSM303DLHC_MAG_REG_IRC: + s->irc = s->buf; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "reg is read-only: 0x%02X", s->buf); + break; + } +} + +/* + * Low-level master-to-slave transaction handler. + */ +static int lsm303dlhc_mag_send(I2CSlave *i2c, uint8_t data) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c); + + if (s->len == 0) { + /* First byte is the reg pointer */ + s->pointer = data; + s->len++; + } else if (s->len == 1) { + /* Second byte is the new register value. */ + s->buf = data; + lsm303dlhc_mag_write(s); + } else { + g_assert_not_reached(); + } + + return 0; +} + +/* + * Low-level slave-to-master transaction handler (read attempts). + */ +static uint8_t lsm303dlhc_mag_recv(I2CSlave *i2c) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c); + uint8_t resp; + + switch (s->pointer) { + case LSM303DLHC_MAG_REG_CRA: + resp = s->cra; + break; + case LSM303DLHC_MAG_REG_CRB: + resp = s->crb; + break; + case LSM303DLHC_MAG_REG_MR: + resp = s->mr; + break; + case LSM303DLHC_MAG_REG_OUT_X_H: + resp = (uint8_t)(s->x_lock >> 8); + break; + case LSM303DLHC_MAG_REG_OUT_X_L: + resp = (uint8_t)(s->x_lock); + break; + case LSM303DLHC_MAG_REG_OUT_Z_H: + resp = (uint8_t)(s->z_lock >> 8); + break; + case LSM303DLHC_MAG_REG_OUT_Z_L: + resp = (uint8_t)(s->z_lock); + break; + case LSM303DLHC_MAG_REG_OUT_Y_H: + resp = (uint8_t)(s->y_lock >> 8); + break; + case LSM303DLHC_MAG_REG_OUT_Y_L: + resp = (uint8_t)(s->y_lock); + break; + case LSM303DLHC_MAG_REG_SR: + resp = s->sr; + break; + case LSM303DLHC_MAG_REG_IRA: + resp = s->ira; + break; + case LSM303DLHC_MAG_REG_IRB: + resp = s->irb; + break; + case LSM303DLHC_MAG_REG_IRC: + resp = s->irc; + break; + case LSM303DLHC_MAG_REG_TEMP_OUT_H: + /* Check if the temperature sensor is enabled or not (CRA & 0x80). */ + if (s->cra & 0x80) { + resp = (uint8_t)(s->temperature_lock >> 8); + } else { + resp = 0; + } + break; + case LSM303DLHC_MAG_REG_TEMP_OUT_L: + if (s->cra & 0x80) { + resp = (uint8_t)(s->temperature_lock & 0xff); + } else { + resp = 0; + } + break; + default: + resp = 0; + break; + } + + /* + * The address pointer on the LSM303DLHC auto-increments whenever a byte + * is read, without the master device having to request the next address. + * + * The auto-increment process has the following logic: + * + * - if (s->pointer == 8) then s->pointer = 3 + * - else: if (s->pointer == 12) then s->pointer = 0 + * - else: s->pointer += 1 + * + * Reading an invalid address return 0. + */ + if (s->pointer == LSM303DLHC_MAG_REG_OUT_Y_L) { + s->pointer = LSM303DLHC_MAG_REG_OUT_X_H; + } else if (s->pointer == LSM303DLHC_MAG_REG_IRC) { + s->pointer = LSM303DLHC_MAG_REG_CRA; + } else { + s->pointer++; + } + + return resp; +} + +/* + * Bus state change handler. + */ +static int lsm303dlhc_mag_event(I2CSlave *i2c, enum i2c_event event) +{ + LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c); + + switch (event) { + case I2C_START_SEND: + break; + case I2C_START_RECV: + lsm303dlhc_mag_read(s); + break; + case I2C_FINISH: + lsm303dlhc_mag_finish(s); + break; + case I2C_NACK: + break; + default: + return -1; + } + + s->len = 0; + return 0; +} + +/* + * Device data description using VMSTATE macros. + */ +static const VMStateDescription vmstate_lsm303dlhc_mag = { + .name = "LSM303DLHC_MAG", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + + VMSTATE_I2C_SLAVE(parent_obj, LSM303DLHCMagState), + VMSTATE_UINT8(len, LSM303DLHCMagState), + VMSTATE_UINT8(buf, LSM303DLHCMagState), + VMSTATE_UINT8(pointer, LSM303DLHCMagState), + VMSTATE_UINT8(cra, LSM303DLHCMagState), + VMSTATE_UINT8(crb, LSM303DLHCMagState), + VMSTATE_UINT8(mr, LSM303DLHCMagState), + VMSTATE_INT16(x, LSM303DLHCMagState), + VMSTATE_INT16(z, LSM303DLHCMagState), + VMSTATE_INT16(y, LSM303DLHCMagState), + VMSTATE_INT16(x_lock, LSM303DLHCMagState), + VMSTATE_INT16(z_lock, LSM303DLHCMagState), + VMSTATE_INT16(y_lock, LSM303DLHCMagState), + VMSTATE_UINT8(sr, LSM303DLHCMagState), + VMSTATE_UINT8(ira, LSM303DLHCMagState), + VMSTATE_UINT8(irb, LSM303DLHCMagState), + VMSTATE_UINT8(irc, LSM303DLHCMagState), + VMSTATE_INT16(temperature, LSM303DLHCMagState), + VMSTATE_INT16(temperature_lock, LSM303DLHCMagState), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Put the device into post-reset default state. + */ +static void lsm303dlhc_mag_default_cfg(LSM303DLHCMagState *s) +{ + /* Set the device into is default reset state. */ + s->len = 0; + s->pointer = 0; /* Current register. */ + s->buf = 0; /* Shared buffer. */ + s->cra = 0x10; /* Temp Enabled = 0, Data Rate = 15.0 Hz. */ + s->crb = 0x20; /* Gain = +/- 1.3 Gauss. */ + s->mr = 0x3; /* Operating Mode = Sleep. */ + s->x = 0; + s->z = 0; + s->y = 0; + s->x_lock = 0; + s->z_lock = 0; + s->y_lock = 0; + s->sr = 0x1; /* DRDY = 1. */ + s->ira = 0x48; + s->irb = 0x34; + s->irc = 0x33; + s->temperature = 0; /* Default to 0 degrees C (0/8 lsb = 0 C). */ + s->temperature_lock = 0; +} + +/* + * Callback handler when DeviceState 'reset' is set to true. + */ +static void lsm303dlhc_mag_reset(DeviceState *dev) +{ + I2CSlave *i2c = I2C_SLAVE(dev); + LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c); + + /* Set the device into its default reset state. */ + lsm303dlhc_mag_default_cfg(s); +} + +/* + * Initialisation of any public properties. + */ +static void lsm303dlhc_mag_initfn(Object *obj) +{ + object_property_add(obj, "mag-x", "int", + lsm303dlhc_mag_get_x, + lsm303dlhc_mag_set_x, NULL, NULL); + + object_property_add(obj, "mag-y", "int", + lsm303dlhc_mag_get_y, + lsm303dlhc_mag_set_y, NULL, NULL); + + object_property_add(obj, "mag-z", "int", + lsm303dlhc_mag_get_z, + lsm303dlhc_mag_set_z, NULL, NULL); + + object_property_add(obj, "temperature", "int", + lsm303dlhc_mag_get_temperature, + lsm303dlhc_mag_set_temperature, NULL, NULL); +} + +/* + * Set the virtual method pointers (bus state change, tx/rx, etc.). + */ +static void lsm303dlhc_mag_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->reset = lsm303dlhc_mag_reset; + dc->vmsd = &vmstate_lsm303dlhc_mag; + k->event = lsm303dlhc_mag_event; + k->recv = lsm303dlhc_mag_recv; + k->send = lsm303dlhc_mag_send; +} + +static const TypeInfo lsm303dlhc_mag_info = { + .name = TYPE_LSM303DLHC_MAG, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(LSM303DLHCMagState), + .instance_init = lsm303dlhc_mag_initfn, + .class_init = lsm303dlhc_mag_class_init, +}; + +static void lsm303dlhc_mag_register_types(void) +{ + type_register_static(&lsm303dlhc_mag_info); +} + +type_init(lsm303dlhc_mag_register_types) diff --git a/hw/sensor/max31785.c b/hw/sensor/max31785.c new file mode 100644 index 0000000000..8b95e32481 --- /dev/null +++ b/hw/sensor/max31785.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Maxim MAX31785 PMBus 6-Channel Fan Controller + * + * Datasheet: + * https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf + * + * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/pmbus_device.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define TYPE_MAX31785 "max31785" +#define MAX31785(obj) OBJECT_CHECK(MAX31785State, (obj), TYPE_MAX31785) + +/* MAX31785 mfr specific PMBus commands */ +#define MAX31785_MFR_MODE 0xD1 +#define MAX31785_MFR_PSEN_CONFIG 0xD2 +#define MAX31785_MFR_VOUT_PEAK 0xD4 +#define MAX31785_MFR_TEMPERATURE_PEAK 0xD6 +#define MAX31785_MFR_VOUT_MIN 0xD7 +#define MAX31785_MFR_FAULT_RESPONSE 0xD9 +#define MAX31785_MFR_NV_FAULT_LOG 0xDC +#define MAX31785_MFR_TIME_COUNT 0xDD +#define MAX31785_MFR_TEMP_SENSOR_CONFIG 0xF0 +#define MAX31785_MFR_FAN_CONFIG 0xF1 +#define MAX31785_MFR_FAN_LUT 0xF2 +#define MAX31785_MFR_READ_FAN_PWM 0xF3 +#define MAX31785_MFR_FAN_FAULT_LIMIT 0xF5 +#define MAX31785_MFR_FAN_WARN_LIMIT 0xF6 +#define MAX31785_MFR_FAN_RUN_TIME 0xF7 +#define MAX31785_MFR_FAN_PWM_AVG 0xF8 +#define MAX31785_MFR_FAN_PWM2RPM 0xF9 + +/* defaults as per the data sheet */ +#define MAX31785_DEFAULT_CAPABILITY 0x10 +#define MAX31785_DEFAULT_VOUT_MODE 0x40 +#define MAX31785_DEFAULT_VOUT_SCALE_MONITOR 0x7FFF +#define MAX31785_DEFAULT_FAN_COMMAND_1 0x7FFF +#define MAX31785_DEFAULT_OV_FAULT_LIMIT 0x7FFF +#define MAX31785_DEFAULT_OV_WARN_LIMIT 0x7FFF +#define MAX31785_DEFAULT_OT_FAULT_LIMIT 0x7FFF +#define MAX31785_DEFAULT_OT_WARN_LIMIT 0x7FFF +#define MAX31785_DEFAULT_PMBUS_REVISION 0x11 +#define MAX31785_DEFAULT_MFR_ID 0x4D +#define MAX31785_DEFAULT_MFR_MODEL 0x53 +#define MAX31785_DEFAULT_MFR_REVISION 0x3030 +#define MAX31785A_DEFAULT_MFR_REVISION 0x3040 +#define MAX31785B_DEFAULT_MFR_REVISION 0x3061 +#define MAX31785B_DEFAULT_MFR_TEMPERATURE_PEAK 0x8000 +#define MAX31785B_DEFAULT_MFR_VOUT_MIN 0x7FFF +#define MAX31785_DEFAULT_TEXT 0x3130313031303130 + +/* MAX31785 pages */ +#define MAX31785_TOTAL_NUM_PAGES 23 +#define MAX31785_FAN_PAGES 6 +#define MAX31785_MIN_FAN_PAGE 0 +#define MAX31785_MAX_FAN_PAGE 5 +#define MAX31785_MIN_TEMP_PAGE 6 +#define MAX31785_MAX_TEMP_PAGE 16 +#define MAX31785_MIN_ADC_VOLTAGE_PAGE 17 +#define MAX31785_MAX_ADC_VOLTAGE_PAGE 22 + +/* FAN_CONFIG_1_2 */ +#define MAX31785_MFR_FAN_CONFIG 0xF1 +#define MAX31785_FAN_CONFIG_ENABLE BIT(7) +#define MAX31785_FAN_CONFIG_RPM_PWM BIT(6) +#define MAX31785_FAN_CONFIG_PULSE(pulse) (pulse << 4) +#define MAX31785_DEFAULT_FAN_CONFIG_1_2(pulse) \ + (MAX31785_FAN_CONFIG_ENABLE | MAX31785_FAN_CONFIG_PULSE(pulse)) +#define MAX31785_DEFAULT_MFR_FAN_CONFIG 0x0000 + +/* fan speed in RPM */ +#define MAX31785_DEFAULT_FAN_SPEED 0x7fff +#define MAX31785_DEFAULT_FAN_STATUS 0x00 + +#define MAX31785_DEFAULT_FAN_MAX_PWM 0x2710 + +/* + * MAX31785State: + * @code: The command code received + * @page: Each page corresponds to a device monitored by the Max 31785 + * The page register determines the available commands depending on device + * _____________________________________________________________________________ + * | 0 | Fan Connected to PWM0 | + * |_______|___________________________________________________________________| + * | 1 | Fan Connected to PWM1 | + * |_______|___________________________________________________________________| + * | 2 | Fan Connected to PWM2 | + * |_______|___________________________________________________________________| + * | 3 | Fan Connected to PWM3 | + * |_______|___________________________________________________________________| + * | 4 | Fan Connected to PWM4 | + * |_______|___________________________________________________________________| + * | 5 | Fan Connected to PWM5 | + * |_______|___________________________________________________________________| + * | 6 | Remote Thermal Diode Connected to ADC 0 | + * |_______|___________________________________________________________________| + * | 7 | Remote Thermal Diode Connected to ADC 1 | + * |_______|___________________________________________________________________| + * | 8 | Remote Thermal Diode Connected to ADC 2 | + * |_______|___________________________________________________________________| + * | 9 | Remote Thermal Diode Connected to ADC 3 | + * |_______|___________________________________________________________________| + * | 10 | Remote Thermal Diode Connected to ADC 4 | + * |_______|___________________________________________________________________| + * | 11 | Remote Thermal Diode Connected to ADC 5 | + * |_______|___________________________________________________________________| + * | 12 | Internal Temperature Sensor | + * |_______|___________________________________________________________________| + * | 13 | Remote I2C Temperature Sensor with Address 0 | + * |_______|___________________________________________________________________| + * | 14 | Remote I2C Temperature Sensor with Address 1 | + * |_______|___________________________________________________________________| + * | 15 | Remote I2C Temperature Sensor with Address 2 | + * |_______|___________________________________________________________________| + * | 16 | Remote I2C Temperature Sensor with Address 3 | + * |_______|___________________________________________________________________| + * | 17 | Remote I2C Temperature Sensor with Address 4 | + * |_______|___________________________________________________________________| + * | 17 | Remote Voltage Connected to ADC0 | + * |_______|___________________________________________________________________| + * | 18 | Remote Voltage Connected to ADC1 | + * |_______|___________________________________________________________________| + * | 19 | Remote Voltage Connected to ADC2 | + * |_______|___________________________________________________________________| + * | 20 | Remote Voltage Connected to ADC3 | + * |_______|___________________________________________________________________| + * | 21 | Remote Voltage Connected to ADC4 | + * |_______|___________________________________________________________________| + * | 22 | Remote Voltage Connected to ADC5 | + * |_______|___________________________________________________________________| + * |23-254 | Reserved | + * |_______|___________________________________________________________________| + * | 255 | Applies to all pages | + * |_______|___________________________________________________________________| + */ + +/* Place holder to save the max31785 mfr specific registers */ +typedef struct MAX31785State { + PMBusDevice parent; + uint16_t mfr_mode[MAX31785_TOTAL_NUM_PAGES]; + uint16_t vout_peak[MAX31785_TOTAL_NUM_PAGES]; + uint16_t temperature_peak[MAX31785_TOTAL_NUM_PAGES]; + uint16_t vout_min[MAX31785_TOTAL_NUM_PAGES]; + uint8_t fault_response[MAX31785_TOTAL_NUM_PAGES]; + uint32_t time_count[MAX31785_TOTAL_NUM_PAGES]; + uint16_t temp_sensor_config[MAX31785_TOTAL_NUM_PAGES]; + uint16_t fan_config[MAX31785_TOTAL_NUM_PAGES]; + uint16_t read_fan_pwm[MAX31785_TOTAL_NUM_PAGES]; + uint16_t fan_fault_limit[MAX31785_TOTAL_NUM_PAGES]; + uint16_t fan_warn_limit[MAX31785_TOTAL_NUM_PAGES]; + uint16_t fan_run_time[MAX31785_TOTAL_NUM_PAGES]; + uint16_t fan_pwm_avg[MAX31785_TOTAL_NUM_PAGES]; + uint64_t fan_pwm2rpm[MAX31785_TOTAL_NUM_PAGES]; + uint64_t mfr_location; + uint64_t mfr_date; + uint64_t mfr_serial; + uint16_t mfr_revision; +} MAX31785State; + +static uint8_t max31785_read_byte(PMBusDevice *pmdev) +{ + MAX31785State *s = MAX31785(pmdev); + switch (pmdev->code) { + + case PMBUS_FAN_CONFIG_1_2: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send8(pmdev, pmdev->pages[pmdev->page].fan_config_1_2); + } + break; + + case PMBUS_FAN_COMMAND_1: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, pmdev->pages[pmdev->page].fan_command_1); + } + break; + + case PMBUS_READ_FAN_SPEED_1: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, pmdev->pages[pmdev->page].read_fan_speed_1); + } + break; + + case PMBUS_STATUS_FANS_1_2: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, pmdev->pages[pmdev->page].status_fans_1_2); + } + break; + + case PMBUS_MFR_REVISION: + pmbus_send16(pmdev, MAX31785_DEFAULT_MFR_REVISION); + break; + + case PMBUS_MFR_ID: + pmbus_send8(pmdev, 0x4d); /* Maxim */ + break; + + case PMBUS_MFR_MODEL: + pmbus_send8(pmdev, 0x53); + break; + + case PMBUS_MFR_LOCATION: + pmbus_send64(pmdev, s->mfr_location); + break; + + case PMBUS_MFR_DATE: + pmbus_send64(pmdev, s->mfr_date); + break; + + case PMBUS_MFR_SERIAL: + pmbus_send64(pmdev, s->mfr_serial); + break; + + case MAX31785_MFR_MODE: + pmbus_send16(pmdev, s->mfr_mode[pmdev->page]); + break; + + case MAX31785_MFR_VOUT_PEAK: + if ((pmdev->page >= MAX31785_MIN_ADC_VOLTAGE_PAGE) && + (pmdev->page <= MAX31785_MAX_ADC_VOLTAGE_PAGE)) { + pmbus_send16(pmdev, s->vout_peak[pmdev->page]); + } + break; + + case MAX31785_MFR_TEMPERATURE_PEAK: + if ((pmdev->page >= MAX31785_MIN_TEMP_PAGE) && + (pmdev->page <= MAX31785_MAX_TEMP_PAGE)) { + pmbus_send16(pmdev, s->temperature_peak[pmdev->page]); + } + break; + + case MAX31785_MFR_VOUT_MIN: + if ((pmdev->page >= MAX31785_MIN_ADC_VOLTAGE_PAGE) && + (pmdev->page <= MAX31785_MAX_ADC_VOLTAGE_PAGE)) { + pmbus_send16(pmdev, s->vout_min[pmdev->page]); + } + break; + + case MAX31785_MFR_FAULT_RESPONSE: + pmbus_send8(pmdev, s->fault_response[pmdev->page]); + break; + + case MAX31785_MFR_TIME_COUNT: /* R/W 32 */ + pmbus_send32(pmdev, s->time_count[pmdev->page]); + break; + + case MAX31785_MFR_TEMP_SENSOR_CONFIG: /* R/W 16 */ + if ((pmdev->page >= MAX31785_MIN_TEMP_PAGE) && + (pmdev->page <= MAX31785_MAX_TEMP_PAGE)) { + pmbus_send16(pmdev, s->temp_sensor_config[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_CONFIG: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->fan_config[pmdev->page]); + } + break; + + case MAX31785_MFR_READ_FAN_PWM: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->read_fan_pwm[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_FAULT_LIMIT: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->fan_fault_limit[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_WARN_LIMIT: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->fan_warn_limit[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_RUN_TIME: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->fan_run_time[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_PWM_AVG: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send16(pmdev, s->fan_pwm_avg[pmdev->page]); + } + break; + + case MAX31785_MFR_FAN_PWM2RPM: /* R/W 64 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmbus_send64(pmdev, s->fan_pwm2rpm[pmdev->page]); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: reading from unsupported register: 0x%02x\n", + __func__, pmdev->code); + break; + } + + return 0xFF; +} + +static int max31785_write_data(PMBusDevice *pmdev, const uint8_t *buf, + uint8_t len) +{ + MAX31785State *s = MAX31785(pmdev); + if (len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); + return -1; + } + + pmdev->code = buf[0]; /* PMBus command code */ + + if (len == 1) { + return 0; + } + + /* Exclude command code from buffer */ + buf++; + len--; + + switch (pmdev->code) { + + case PMBUS_FAN_CONFIG_1_2: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmdev->pages[pmdev->page].fan_config_1_2 = pmbus_receive8(pmdev); + } + break; + + case PMBUS_FAN_COMMAND_1: + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + pmdev->pages[pmdev->page].fan_command_1 = pmbus_receive16(pmdev); + pmdev->pages[pmdev->page].read_fan_speed_1 = + ((MAX31785_DEFAULT_FAN_SPEED / MAX31785_DEFAULT_FAN_MAX_PWM) * + pmdev->pages[pmdev->page].fan_command_1); + } + break; + + case PMBUS_MFR_LOCATION: /* R/W 64 */ + s->mfr_location = pmbus_receive64(pmdev); + break; + + case PMBUS_MFR_DATE: /* R/W 64 */ + s->mfr_date = pmbus_receive64(pmdev); + break; + + case PMBUS_MFR_SERIAL: /* R/W 64 */ + s->mfr_serial = pmbus_receive64(pmdev); + break; + + case MAX31785_MFR_MODE: /* R/W word */ + s->mfr_mode[pmdev->page] = pmbus_receive16(pmdev); + break; + + case MAX31785_MFR_VOUT_PEAK: /* R/W word */ + if ((pmdev->page >= MAX31785_MIN_ADC_VOLTAGE_PAGE) && + (pmdev->page <= MAX31785_MAX_ADC_VOLTAGE_PAGE)) { + s->vout_peak[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_TEMPERATURE_PEAK: /* R/W word */ + if ((pmdev->page >= 6) && (pmdev->page <= 16)) { + s->temperature_peak[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_VOUT_MIN: /* R/W word */ + if ((pmdev->page >= MAX31785_MIN_ADC_VOLTAGE_PAGE) && + (pmdev->page <= MAX31785_MAX_ADC_VOLTAGE_PAGE)) { + s->vout_min[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAULT_RESPONSE: /* R/W 8 */ + s->fault_response[pmdev->page] = pmbus_receive8(pmdev); + break; + + case MAX31785_MFR_TIME_COUNT: /* R/W 32 */ + s->time_count[pmdev->page] = pmbus_receive32(pmdev); + break; + + case MAX31785_MFR_TEMP_SENSOR_CONFIG: /* R/W 16 */ + if ((pmdev->page >= MAX31785_MIN_TEMP_PAGE) && + (pmdev->page <= MAX31785_MAX_TEMP_PAGE)) { + s->temp_sensor_config[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_CONFIG: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_config[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_FAULT_LIMIT: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_fault_limit[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_WARN_LIMIT: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_warn_limit[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_RUN_TIME: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_run_time[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_PWM_AVG: /* R/W 16 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_pwm_avg[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX31785_MFR_FAN_PWM2RPM: /* R/W 64 */ + if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { + s->fan_pwm2rpm[pmdev->page] = pmbus_receive64(pmdev); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: writing to unsupported register: 0x%02x\n", + __func__, pmdev->code); + break; + } + + return 0; +} + +static void max31785_exit_reset(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + MAX31785State *s = MAX31785(obj); + + pmdev->capability = MAX31785_DEFAULT_CAPABILITY; + + for (int i = MAX31785_MIN_FAN_PAGE; i <= MAX31785_MAX_FAN_PAGE; i++) { + pmdev->pages[i].vout_mode = MAX31785_DEFAULT_VOUT_MODE; + pmdev->pages[i].fan_command_1 = MAX31785_DEFAULT_FAN_COMMAND_1; + pmdev->pages[i].revision = MAX31785_DEFAULT_PMBUS_REVISION; + pmdev->pages[i].fan_config_1_2 = MAX31785_DEFAULT_FAN_CONFIG_1_2(0); + pmdev->pages[i].read_fan_speed_1 = MAX31785_DEFAULT_FAN_SPEED; + pmdev->pages[i].status_fans_1_2 = MAX31785_DEFAULT_FAN_STATUS; + } + + for (int i = MAX31785_MIN_TEMP_PAGE; i <= MAX31785_MAX_TEMP_PAGE; i++) { + pmdev->pages[i].vout_mode = MAX31785_DEFAULT_VOUT_MODE; + pmdev->pages[i].revision = MAX31785_DEFAULT_PMBUS_REVISION; + pmdev->pages[i].ot_fault_limit = MAX31785_DEFAULT_OT_FAULT_LIMIT; + pmdev->pages[i].ot_warn_limit = MAX31785_DEFAULT_OT_WARN_LIMIT; + } + + for (int i = MAX31785_MIN_ADC_VOLTAGE_PAGE; + i <= MAX31785_MAX_ADC_VOLTAGE_PAGE; + i++) { + pmdev->pages[i].vout_mode = MAX31785_DEFAULT_VOUT_MODE; + pmdev->pages[i].revision = MAX31785_DEFAULT_PMBUS_REVISION; + pmdev->pages[i].vout_scale_monitor = + MAX31785_DEFAULT_VOUT_SCALE_MONITOR; + pmdev->pages[i].vout_ov_fault_limit = MAX31785_DEFAULT_OV_FAULT_LIMIT; + pmdev->pages[i].vout_ov_warn_limit = MAX31785_DEFAULT_OV_WARN_LIMIT; + } + + s->mfr_location = MAX31785_DEFAULT_TEXT; + s->mfr_date = MAX31785_DEFAULT_TEXT; + s->mfr_serial = MAX31785_DEFAULT_TEXT; +} + +static const VMStateDescription vmstate_max31785 = { + .name = TYPE_MAX31785, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]){ + VMSTATE_PMBUS_DEVICE(parent, MAX31785State), + VMSTATE_UINT16_ARRAY(mfr_mode, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(vout_peak, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(temperature_peak, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(vout_min, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT8_ARRAY(fault_response, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT32_ARRAY(time_count, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(temp_sensor_config, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(fan_config, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(read_fan_pwm, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(fan_fault_limit, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(fan_warn_limit, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(fan_run_time, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT16_ARRAY(fan_pwm_avg, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT64_ARRAY(fan_pwm2rpm, MAX31785State, + MAX31785_TOTAL_NUM_PAGES), + VMSTATE_UINT64(mfr_location, MAX31785State), + VMSTATE_UINT64(mfr_date, MAX31785State), + VMSTATE_UINT64(mfr_serial, MAX31785State), + VMSTATE_END_OF_LIST() + } +}; + +static void max31785_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + + for (int i = MAX31785_MIN_FAN_PAGE; i <= MAX31785_MAX_FAN_PAGE; i++) { + pmbus_page_config(pmdev, i, PB_HAS_VOUT_MODE); + } + + for (int i = MAX31785_MIN_TEMP_PAGE; i <= MAX31785_MAX_TEMP_PAGE; i++) { + pmbus_page_config(pmdev, i, PB_HAS_VOUT_MODE | PB_HAS_TEMPERATURE); + } + + for (int i = MAX31785_MIN_ADC_VOLTAGE_PAGE; + i <= MAX31785_MAX_ADC_VOLTAGE_PAGE; + i++) { + pmbus_page_config(pmdev, i, PB_HAS_VOUT_MODE | PB_HAS_VOUT | + PB_HAS_VOUT_RATING); + } +} + +static void max31785_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); + dc->desc = "Maxim MAX31785 6-Channel Fan Controller"; + dc->vmsd = &vmstate_max31785; + k->write_data = max31785_write_data; + k->receive_byte = max31785_read_byte; + k->device_num_pages = MAX31785_TOTAL_NUM_PAGES; + rc->phases.exit = max31785_exit_reset; +} + +static const TypeInfo max31785_info = { + .name = TYPE_MAX31785, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(MAX31785State), + .instance_init = max31785_init, + .class_init = max31785_class_init, +}; + +static void max31785_register_types(void) +{ + type_register_static(&max31785_info); +} + +type_init(max31785_register_types) diff --git a/hw/sensor/meson.build b/hw/sensor/meson.build index 034e3e0207..9e9be602c3 100644 --- a/hw/sensor/meson.build +++ b/hw/sensor/meson.build @@ -1,5 +1,9 @@ softmmu_ss.add(when: 'CONFIG_TMP105', if_true: files('tmp105.c')) softmmu_ss.add(when: 'CONFIG_TMP421', if_true: files('tmp421.c')) +softmmu_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c')) softmmu_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c')) softmmu_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c')) softmmu_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c')) +softmmu_ss.add(when: 'CONFIG_LSM303DLHC_MAG', if_true: files('lsm303dlhc_mag.c')) +softmmu_ss.add(when: 'CONFIG_ISL_PMBUS_VR', if_true: files('isl_pmbus_vr.c')) +softmmu_ss.add(when: 'CONFIG_MAX31785', if_true: files('max31785.c')) diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index 006010f30a..39fc4f19d9 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "cpu.h" #include "hw/sysbus.h" #include "hw/sh4/sh.h" @@ -56,10 +57,10 @@ #define LINUX_LOAD_OFFSET 0x0800000 #define INITRD_LOAD_OFFSET 0x1800000 -#define PA_IRLMSK 0x00 -#define PA_POWOFF 0x30 -#define PA_VERREG 0x32 -#define PA_OUTPORT 0x36 +#define PA_IRLMSK 0x00 +#define PA_POWOFF 0x30 +#define PA_VERREG 0x32 +#define PA_OUTPORT 0x36 typedef struct { uint16_t bcr; @@ -96,38 +97,41 @@ enum r2d_fpga_irq { }; static const struct { short irl; uint16_t msk; } irqtab[NR_IRQS] = { - [CF_IDE] = { 1, 1<<9 }, - [CF_CD] = { 2, 1<<8 }, - [PCI_INTA] = { 9, 1<<14 }, - [PCI_INTB] = { 10, 1<<13 }, - [PCI_INTC] = { 3, 1<<12 }, - [PCI_INTD] = { 0, 1<<11 }, - [SM501] = { 4, 1<<10 }, - [KEY] = { 5, 1<<6 }, - [RTC_A] = { 6, 1<<5 }, - [RTC_T] = { 7, 1<<4 }, - [SDCARD] = { 8, 1<<7 }, - [EXT] = { 11, 1<<0 }, - [TP] = { 12, 1<<15 }, + [CF_IDE] = { 1, 1 << 9 }, + [CF_CD] = { 2, 1 << 8 }, + [PCI_INTA] = { 9, 1 << 14 }, + [PCI_INTB] = { 10, 1 << 13 }, + [PCI_INTC] = { 3, 1 << 12 }, + [PCI_INTD] = { 0, 1 << 11 }, + [SM501] = { 4, 1 << 10 }, + [KEY] = { 5, 1 << 6 }, + [RTC_A] = { 6, 1 << 5 }, + [RTC_T] = { 7, 1 << 4 }, + [SDCARD] = { 8, 1 << 7 }, + [EXT] = { 11, 1 << 0 }, + [TP] = { 12, 1 << 15 }, }; static void update_irl(r2d_fpga_t *fpga) { int i, irl = 15; - for (i = 0; i < NR_IRQS; i++) - if (fpga->irlmon & fpga->irlmsk & irqtab[i].msk) - if (irqtab[i].irl < irl) - irl = irqtab[i].irl; + for (i = 0; i < NR_IRQS; i++) { + if ((fpga->irlmon & fpga->irlmsk & irqtab[i].msk) && + irqtab[i].irl < irl) { + irl = irqtab[i].irl; + } + } qemu_set_irq(fpga->irl, irl ^ 15); } static void r2d_fpga_irq_set(void *opaque, int n, int level) { r2d_fpga_t *fpga = opaque; - if (level) + if (level) { fpga->irlmon |= irqtab[n].msk; - else + } else { fpga->irlmon &= ~irqtab[n].msk; + } update_irl(fpga); } @@ -186,7 +190,7 @@ static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem, { r2d_fpga_t *s; - s = g_malloc0(sizeof(r2d_fpga_t)); + s = g_new0(r2d_fpga_t, 1); s->irl = irl; @@ -244,7 +248,7 @@ static void r2d_init(MachineState *machine) cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); env = &cpu->env; - reset_info = g_malloc0(sizeof(ResetData)); + reset_info = g_new0(ResetData, 1); reset_info->cpu = cpu; reset_info->vector = env->pc; qemu_register_reset(main_cpu_reset, reset_info); @@ -306,7 +310,7 @@ static void r2d_init(MachineState *machine) /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, - "rtl8139", i==0 ? "2" : NULL); + "rtl8139", i == 0 ? "2" : NULL); /* USB keyboard */ usb_create_simple(usb_bus_find(-1), "usb-kbd"); @@ -321,8 +325,8 @@ static void r2d_init(MachineState *machine) SDRAM_BASE + LINUX_LOAD_OFFSET, INITRD_LOAD_OFFSET - LINUX_LOAD_OFFSET); if (kernel_size < 0) { - fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); - exit(1); + error_report("qemu: could not load kernel '%s'", kernel_filename); + exit(1); } /* initialization which should be done by firmware */ @@ -330,7 +334,8 @@ static void r2d_init(MachineState *machine) MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */ address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2), MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */ - reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */ + /* Start from P2 area */ + reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; } if (initrd_filename) { @@ -341,8 +346,8 @@ static void r2d_init(MachineState *machine) SDRAM_SIZE - INITRD_LOAD_OFFSET); if (initrd_size < 0) { - fprintf(stderr, "qemu: could not load initrd '%s'\n", initrd_filename); - exit(1); + error_report("qemu: could not load initrd '%s'", initrd_filename); + exit(1); } /* initialization which should be done by firmware */ @@ -352,8 +357,10 @@ static void r2d_init(MachineState *machine) } if (kernel_cmdline) { - /* I see no evidence that this .kernel_cmdline buffer requires - NUL-termination, so using strncpy should be ok. */ + /* + * I see no evidence that this .kernel_cmdline buffer requires + * NUL-termination, so using strncpy should be ok. + */ strncpy(boot_params.kernel_cmdline, kernel_cmdline, sizeof(boot_params.kernel_cmdline)); } diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c index d53a436d8c..c77792d150 100644 --- a/hw/sh4/sh7750.c +++ b/hw/sh4/sh7750.c @@ -24,14 +24,19 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" #include "hw/irq.h" #include "hw/sh4/sh.h" #include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "sh7750_regs.h" #include "sh7750_regnames.h" #include "hw/sh4/sh_intc.h" #include "hw/timer/tmu012.h" #include "exec/exec-all.h" +#include "trace.h" #define NB_DEVICES 4 @@ -60,17 +65,17 @@ typedef struct SH7750State { uint16_t gpioic; uint32_t pctra; uint32_t pctrb; - uint16_t portdira; /* Cached */ - uint16_t portpullupa; /* Cached */ - uint16_t portdirb; /* Cached */ - uint16_t portpullupb; /* Cached */ + uint16_t portdira; /* Cached */ + uint16_t portpullupa; /* Cached */ + uint16_t portdirb; /* Cached */ + uint16_t portpullupb; /* Cached */ uint16_t pdtra; uint16_t pdtrb; - uint16_t periph_pdtra; /* Imposed by the peripherals */ - uint16_t periph_portdira; /* Direction seen from the peripherals */ - uint16_t periph_pdtrb; /* Imposed by the peripherals */ - uint16_t periph_portdirb; /* Direction seen from the peripherals */ - sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ + uint16_t periph_pdtra; /* Imposed by the peripherals */ + uint16_t periph_portdira; /* Direction seen from the peripherals */ + uint16_t periph_pdtrb; /* Imposed by the peripherals */ + uint16_t periph_portdirb; /* Direction seen from the peripherals */ + sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ /* Cache */ uint32_t ccr; @@ -78,143 +83,145 @@ typedef struct SH7750State { struct intc_desc intc; } SH7750State; -static inline int has_bcr3_and_bcr4(SH7750State * s) +static inline int has_bcr3_and_bcr4(SH7750State *s) { return s->cpu->env.features & SH_FEATURE_BCR3_AND_BCR4; } -/********************************************************************** - I/O ports -**********************************************************************/ -int sh7750_register_io_device(SH7750State * s, sh7750_io_device * device) +/* + * I/O ports + */ + +int sh7750_register_io_device(SH7750State *s, sh7750_io_device *device) { int i; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] == NULL) { - s->devices[i] = device; - return 0; - } + if (s->devices[i] == NULL) { + s->devices[i] = device; + return 0; + } } return -1; } static uint16_t portdir(uint32_t v) { -#define EVENPORTMASK(n) ((v & (1<<((n)<<1))) >> (n)) +#define EVENPORTMASK(n) ((v & (1 << ((n) << 1))) >> (n)) return - EVENPORTMASK(15) | EVENPORTMASK(14) | EVENPORTMASK(13) | - EVENPORTMASK(12) | EVENPORTMASK(11) | EVENPORTMASK(10) | - EVENPORTMASK(9) | EVENPORTMASK(8) | EVENPORTMASK(7) | - EVENPORTMASK(6) | EVENPORTMASK(5) | EVENPORTMASK(4) | - EVENPORTMASK(3) | EVENPORTMASK(2) | EVENPORTMASK(1) | - EVENPORTMASK(0); + EVENPORTMASK(15) | EVENPORTMASK(14) | EVENPORTMASK(13) | + EVENPORTMASK(12) | EVENPORTMASK(11) | EVENPORTMASK(10) | + EVENPORTMASK(9) | EVENPORTMASK(8) | EVENPORTMASK(7) | + EVENPORTMASK(6) | EVENPORTMASK(5) | EVENPORTMASK(4) | + EVENPORTMASK(3) | EVENPORTMASK(2) | EVENPORTMASK(1) | + EVENPORTMASK(0); } static uint16_t portpullup(uint32_t v) { -#define ODDPORTMASK(n) ((v & (1<<(((n)<<1)+1))) >> (n)) +#define ODDPORTMASK(n) ((v & (1 << (((n) << 1) + 1))) >> (n)) return - ODDPORTMASK(15) | ODDPORTMASK(14) | ODDPORTMASK(13) | - ODDPORTMASK(12) | ODDPORTMASK(11) | ODDPORTMASK(10) | - ODDPORTMASK(9) | ODDPORTMASK(8) | ODDPORTMASK(7) | ODDPORTMASK(6) | - ODDPORTMASK(5) | ODDPORTMASK(4) | ODDPORTMASK(3) | ODDPORTMASK(2) | - ODDPORTMASK(1) | ODDPORTMASK(0); + ODDPORTMASK(15) | ODDPORTMASK(14) | ODDPORTMASK(13) | + ODDPORTMASK(12) | ODDPORTMASK(11) | ODDPORTMASK(10) | + ODDPORTMASK(9) | ODDPORTMASK(8) | ODDPORTMASK(7) | ODDPORTMASK(6) | + ODDPORTMASK(5) | ODDPORTMASK(4) | ODDPORTMASK(3) | ODDPORTMASK(2) | + ODDPORTMASK(1) | ODDPORTMASK(0); } -static uint16_t porta_lines(SH7750State * s) +static uint16_t porta_lines(SH7750State *s) { - return (s->portdira & s->pdtra) | /* CPU */ - (s->periph_portdira & s->periph_pdtra) | /* Peripherals */ - (~(s->portdira | s->periph_portdira) & s->portpullupa); /* Pullups */ + return (s->portdira & s->pdtra) | /* CPU */ + (s->periph_portdira & s->periph_pdtra) | /* Peripherals */ + (~(s->portdira | s->periph_portdira) & s->portpullupa); /* Pullups */ } -static uint16_t portb_lines(SH7750State * s) +static uint16_t portb_lines(SH7750State *s) { - return (s->portdirb & s->pdtrb) | /* CPU */ - (s->periph_portdirb & s->periph_pdtrb) | /* Peripherals */ - (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ + return (s->portdirb & s->pdtrb) | /* CPU */ + (s->periph_portdirb & s->periph_pdtrb) | /* Peripherals */ + (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ } -static void gen_port_interrupts(SH7750State * s) +static void gen_port_interrupts(SH7750State *s) { /* XXXXX interrupts not generated */ } -static void porta_changed(SH7750State * s, uint16_t prev) +static void porta_changed(SH7750State *s, uint16_t prev) { uint16_t currenta, changes; int i, r = 0; -#if 0 - fprintf(stderr, "porta changed from 0x%04x to 0x%04x\n", - prev, porta_lines(s)); - fprintf(stderr, "pdtra=0x%04x, pctra=0x%08x\n", s->pdtra, s->pctra); -#endif currenta = porta_lines(s); - if (currenta == prev) - return; + if (currenta == prev) { + return; + } + trace_sh7750_porta(prev, currenta, s->pdtra, s->pctra); changes = currenta ^ prev; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } + if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } } - if (r) - gen_port_interrupts(s); + if (r) { + gen_port_interrupts(s); + } } -static void portb_changed(SH7750State * s, uint16_t prev) +static void portb_changed(SH7750State *s, uint16_t prev) { uint16_t currentb, changes; int i, r = 0; currentb = portb_lines(s); - if (currentb == prev) - return; + if (currentb == prev) { + return; + } + trace_sh7750_portb(prev, currentb, s->pdtrb, s->pctrb); changes = currentb ^ prev; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } + if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } } - if (r) - gen_port_interrupts(s); + if (r) { + gen_port_interrupts(s); + } } -/********************************************************************** - Memory -**********************************************************************/ +/* + * Memory + */ static void error_access(const char *kind, hwaddr addr) { fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") not supported\n", - kind, regname(addr), addr); + kind, regname(addr), addr); } static void ignore_access(const char *kind, hwaddr addr) { fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") ignored\n", - kind, regname(addr), addr); + kind, regname(addr), addr); } static uint32_t sh7750_mem_readb(void *opaque, hwaddr addr) { switch (addr) { default: - error_access("byte read", addr); + error_access("byte read", addr); abort(); } } @@ -225,30 +232,31 @@ static uint32_t sh7750_mem_readw(void *opaque, hwaddr addr) switch (addr) { case SH7750_BCR2_A7: - return s->bcr2; + return s->bcr2; case SH7750_BCR3_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("word read", addr); - return s->bcr3; + if (!has_bcr3_and_bcr4(s)) { + error_access("word read", addr); + } + return s->bcr3; case SH7750_FRQCR_A7: - return 0; + return 0; case SH7750_PCR_A7: - return s->pcr; + return s->pcr; case SH7750_RFCR_A7: - fprintf(stderr, - "Read access to refresh count register, incrementing\n"); - return s->rfcr++; + fprintf(stderr, + "Read access to refresh count register, incrementing\n"); + return s->rfcr++; case SH7750_PDTRA_A7: - return porta_lines(s); + return porta_lines(s); case SH7750_PDTRB_A7: - return portb_lines(s); + return portb_lines(s); case SH7750_RTCOR_A7: case SH7750_RTCNT_A7: case SH7750_RTCSR_A7: - ignore_access("word read", addr); - return 0; + ignore_access("word read", addr); + return 0; default: - error_access("word read", addr); + error_access("word read", addr); abort(); } } @@ -260,11 +268,12 @@ static uint32_t sh7750_mem_readl(void *opaque, hwaddr addr) switch (addr) { case SH7750_BCR1_A7: - return s->bcr1; + return s->bcr1; case SH7750_BCR4_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("long read", addr); - return s->bcr4; + if (!has_bcr3_and_bcr4(s)) { + error_access("long read", addr); + } + return s->bcr4; case SH7750_WCR1_A7: case SH7750_WCR2_A7: case SH7750_WCR3_A7: @@ -288,31 +297,31 @@ static uint32_t sh7750_mem_readl(void *opaque, hwaddr addr) case SH7750_INTEVT_A7: return s->cpu->env.intevt; case SH7750_CCR_A7: - return s->ccr; - case 0x1f000030: /* Processor version */ + return s->ccr; + case 0x1f000030: /* Processor version */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->pvr; - case 0x1f000040: /* Cache version */ + case 0x1f000040: /* Cache version */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->cvr; - case 0x1f000044: /* Processor revision */ + case 0x1f000044: /* Processor revision */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->prr; default: - error_access("long read", addr); + error_access("long read", addr); abort(); } } #define is_in_sdrmx(a, x) (a >= SH7750_SDMR ## x ## _A7 \ - && a <= (SH7750_SDMR ## x ## _A7 + SH7750_SDMR ## x ## _REGNB)) + && a <= (SH7750_SDMR ## x ## _A7 + SH7750_SDMR ## x ## _REGNB)) static void sh7750_mem_writeb(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { if (is_in_sdrmx(addr, 2) || is_in_sdrmx(addr, 3)) { - ignore_access("byte write", addr); - return; + ignore_access("byte write", addr); + return; } error_access("byte write", addr); @@ -320,94 +329,96 @@ static void sh7750_mem_writeb(void *opaque, hwaddr addr, } static void sh7750_mem_writew(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { SH7750State *s = opaque; uint16_t temp; switch (addr) { - /* SDRAM controller */ + /* SDRAM controller */ case SH7750_BCR2_A7: s->bcr2 = mem_value; return; case SH7750_BCR3_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("word write", addr); - s->bcr3 = mem_value; - return; + if (!has_bcr3_and_bcr4(s)) { + error_access("word write", addr); + } + s->bcr3 = mem_value; + return; case SH7750_PCR_A7: - s->pcr = mem_value; - return; + s->pcr = mem_value; + return; case SH7750_RTCNT_A7: case SH7750_RTCOR_A7: case SH7750_RTCSR_A7: - ignore_access("word write", addr); - return; - /* IO ports */ + ignore_access("word write", addr); + return; + /* IO ports */ case SH7750_PDTRA_A7: - temp = porta_lines(s); - s->pdtra = mem_value; - porta_changed(s, temp); - return; + temp = porta_lines(s); + s->pdtra = mem_value; + porta_changed(s, temp); + return; case SH7750_PDTRB_A7: - temp = portb_lines(s); - s->pdtrb = mem_value; - portb_changed(s, temp); - return; + temp = portb_lines(s); + s->pdtrb = mem_value; + portb_changed(s, temp); + return; case SH7750_RFCR_A7: - fprintf(stderr, "Write access to refresh count register\n"); - s->rfcr = mem_value; - return; + fprintf(stderr, "Write access to refresh count register\n"); + s->rfcr = mem_value; + return; case SH7750_GPIOIC_A7: - s->gpioic = mem_value; - if (mem_value != 0) { - fprintf(stderr, "I/O interrupts not implemented\n"); + s->gpioic = mem_value; + if (mem_value != 0) { + fprintf(stderr, "I/O interrupts not implemented\n"); abort(); - } - return; + } + return; default: - error_access("word write", addr); + error_access("word write", addr); abort(); } } static void sh7750_mem_writel(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { SH7750State *s = opaque; uint16_t temp; switch (addr) { - /* SDRAM controller */ + /* SDRAM controller */ case SH7750_BCR1_A7: s->bcr1 = mem_value; return; case SH7750_BCR4_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("long write", addr); - s->bcr4 = mem_value; - return; + if (!has_bcr3_and_bcr4(s)) { + error_access("long write", addr); + } + s->bcr4 = mem_value; + return; case SH7750_WCR1_A7: case SH7750_WCR2_A7: case SH7750_WCR3_A7: case SH7750_MCR_A7: - ignore_access("long write", addr); - return; - /* IO ports */ + ignore_access("long write", addr); + return; + /* IO ports */ case SH7750_PCTRA_A7: - temp = porta_lines(s); - s->pctra = mem_value; - s->portdira = portdir(mem_value); - s->portpullupa = portpullup(mem_value); - porta_changed(s, temp); - return; + temp = porta_lines(s); + s->pctra = mem_value; + s->portdira = portdir(mem_value); + s->portpullupa = portpullup(mem_value); + porta_changed(s, temp); + return; case SH7750_PCTRB_A7: - temp = portb_lines(s); - s->pctrb = mem_value; - s->portdirb = portdir(mem_value); - s->portpullupb = portpullup(mem_value); - portb_changed(s, temp); - return; + temp = portb_lines(s); + s->pctrb = mem_value; + s->portdirb = portdir(mem_value); + s->portpullupb = portpullup(mem_value); + portb_changed(s, temp); + return; case SH7750_MMUCR_A7: if (mem_value & MMUCR_TI) { cpu_sh4_invalidate_tlb(&s->cpu->env); @@ -443,10 +454,10 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr, s->cpu->env.intevt = mem_value & 0x000007ff; return; case SH7750_CCR_A7: - s->ccr = mem_value; - return; + s->ccr = mem_value; + return; default: - error_access("long write", addr); + error_access("long write", addr); abort(); } } @@ -491,161 +502,161 @@ static const MemoryRegionOps sh7750_mem_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -/* sh775x interrupt controller tables for sh_intc.c +/* + * sh775x interrupt controller tables for sh_intc.c * stolen from linux/arch/sh/kernel/cpu/sh4/setup-sh7750.c */ enum { - UNUSED = 0, + UNUSED = 0, - /* interrupt sources */ - IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, IRL_7, - IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E, - IRL0, IRL1, IRL2, IRL3, - HUDI, GPIOI, - DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, - DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, - DMAC_DMAE, - PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, - TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, - RTC_ATI, RTC_PRI, RTC_CUI, - SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, - SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, - WDT, - REF_RCMI, REF_ROVI, + /* interrupt sources */ + IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, IRL_7, + IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E, + IRL0, IRL1, IRL2, IRL3, + HUDI, GPIOI, + DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, + DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, + DMAC_DMAE, + PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, + TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, + RTC_ATI, RTC_PRI, RTC_CUI, + SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, + SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, + WDT, + REF_RCMI, REF_ROVI, - /* interrupt groups */ - DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, - /* irl bundle */ - IRL, + /* interrupt groups */ + DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, + /* irl bundle */ + IRL, - NR_SOURCES, + NR_SOURCES, }; static struct intc_vect vectors[] = { - INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), - INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), - INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), - INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), - INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), - INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), - INTC_VECT(WDT, 0x560), - INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), + INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), + INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), + INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), + INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), + INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), }; static struct intc_group groups[] = { - INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), - INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), - INTC_GROUP(REF, REF_RCMI, REF_ROVI), + INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), + INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), + INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), + INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), + INTC_GROUP(REF, REF_RCMI, REF_ROVI), }; static struct intc_prio_reg prio_registers[] = { - { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, - { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, - { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, - { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, - { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, - TMU4, TMU3, - PCIC1, PCIC0_PCISERR } }, + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, TMU4, TMU3, + PCIC1, PCIC0_PCISERR } }, }; /* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ static struct intc_vect vectors_dma4[] = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMAE, 0x6c0), + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMAE, 0x6c0), }; static struct intc_group groups_dma4[] = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMAE), + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMAE), }; /* SH7750R and SH7751R both have 8-channel DMA controllers */ static struct intc_vect vectors_dma8[] = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), - INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), - INTC_VECT(DMAC_DMAE, 0x6c0), + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), + INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), + INTC_VECT(DMAC_DMAE, 0x6c0), }; static struct intc_group groups_dma8[] = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, - DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, + DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), }; /* SH7750R, SH7751 and SH7751R all have two extra timer channels */ static struct intc_vect vectors_tmu34[] = { - INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), + INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), }; static struct intc_mask_reg mask_registers[] = { - { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, TMU4, TMU3, - PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, - PCIC1_PCIDMA3, PCIC0_PCISERR } }, + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, TMU4, TMU3, + PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, + PCIC1_PCIDMA3, PCIC0_PCISERR } }, }; /* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */ static struct intc_vect vectors_irlm[] = { - INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), - INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), }; /* SH7751 and SH7751R both have PCI */ static struct intc_vect vectors_pci[] = { - INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), - INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), - INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), - INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), + INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), + INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), + INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), + INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), }; static struct intc_group groups_pci[] = { - INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), + INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), }; static struct intc_vect vectors_irl[] = { - INTC_VECT(IRL_0, 0x200), - INTC_VECT(IRL_1, 0x220), - INTC_VECT(IRL_2, 0x240), - INTC_VECT(IRL_3, 0x260), - INTC_VECT(IRL_4, 0x280), - INTC_VECT(IRL_5, 0x2a0), - INTC_VECT(IRL_6, 0x2c0), - INTC_VECT(IRL_7, 0x2e0), - INTC_VECT(IRL_8, 0x300), - INTC_VECT(IRL_9, 0x320), - INTC_VECT(IRL_A, 0x340), - INTC_VECT(IRL_B, 0x360), - INTC_VECT(IRL_C, 0x380), - INTC_VECT(IRL_D, 0x3a0), - INTC_VECT(IRL_E, 0x3c0), + INTC_VECT(IRL_0, 0x200), + INTC_VECT(IRL_1, 0x220), + INTC_VECT(IRL_2, 0x240), + INTC_VECT(IRL_3, 0x260), + INTC_VECT(IRL_4, 0x280), + INTC_VECT(IRL_5, 0x2a0), + INTC_VECT(IRL_6, 0x2c0), + INTC_VECT(IRL_7, 0x2e0), + INTC_VECT(IRL_8, 0x300), + INTC_VECT(IRL_9, 0x320), + INTC_VECT(IRL_A, 0x340), + INTC_VECT(IRL_B, 0x360), + INTC_VECT(IRL_C, 0x380), + INTC_VECT(IRL_D, 0x3a0), + INTC_VECT(IRL_E, 0x3c0), }; static struct intc_group groups_irl[] = { - INTC_GROUP(IRL, IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, - IRL_7, IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E), + INTC_GROUP(IRL, IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, + IRL_7, IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E), }; -/********************************************************************** - Memory mapped cache and TLB -**********************************************************************/ +/* + * Memory mapped cache and TLB + */ #define MM_REGION_MASK 0x07000000 #define MM_ICACHE_ADDR (0) @@ -679,7 +690,7 @@ static uint64_t sh7750_mmct_read(void *opaque, hwaddr addr, case MM_ICACHE_ADDR: case MM_ICACHE_DATA: /* do nothing */ - break; + break; case MM_ITLB_ADDR: ret = cpu_sh4_read_mmaped_itlb_addr(&s->cpu->env, addr); break; @@ -689,7 +700,7 @@ static uint64_t sh7750_mmct_read(void *opaque, hwaddr addr, case MM_OCACHE_ADDR: case MM_OCACHE_DATA: /* do nothing */ - break; + break; case MM_UTLB_ADDR: ret = cpu_sh4_read_mmaped_utlb_addr(&s->cpu->env, addr); break; @@ -722,27 +733,27 @@ static void sh7750_mmct_write(void *opaque, hwaddr addr, case MM_ICACHE_ADDR: case MM_ICACHE_DATA: /* do nothing */ - break; + break; case MM_ITLB_ADDR: cpu_sh4_write_mmaped_itlb_addr(&s->cpu->env, addr, mem_value); break; case MM_ITLB_DATA: cpu_sh4_write_mmaped_itlb_data(&s->cpu->env, addr, mem_value); abort(); - break; + break; case MM_OCACHE_ADDR: case MM_OCACHE_DATA: /* do nothing */ - break; + break; case MM_UTLB_ADDR: cpu_sh4_write_mmaped_utlb_addr(&s->cpu->env, addr, mem_value); - break; + break; case MM_UTLB_DATA: cpu_sh4_write_mmaped_utlb_data(&s->cpu->env, addr, mem_value); - break; + break; default: abort(); - break; + break; } } @@ -755,10 +766,13 @@ static const MemoryRegionOps sh7750_mmct_ops = { SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem) { SH7750State *s; + DeviceState *dev; + SysBusDevice *sb; + MemoryRegion *mr, *alias; - s = g_malloc0(sizeof(SH7750State)); + s = g_new0(SH7750State, 1); s->cpu = cpu; - s->periph_freq = 60000000; /* 60MHz */ + s->periph_freq = 60000000; /* 60MHz */ memory_region_init_io(&s->iomem, NULL, &sh7750_mem_ops, s, "memory", 0x1fc01000); @@ -791,81 +805,100 @@ SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem) memory_region_add_subregion(sysmem, 0xf0000000, &s->mmct_iomem); sh_intc_init(sysmem, &s->intc, NR_SOURCES, - _INTC_ARRAY(mask_registers), - _INTC_ARRAY(prio_registers)); + _INTC_ARRAY(mask_registers), + _INTC_ARRAY(prio_registers)); sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors), - _INTC_ARRAY(groups)); + _INTC_ARRAY(vectors), + _INTC_ARRAY(groups)); cpu->env.intc_handle = &s->intc; - sh_serial_init(sysmem, 0x1fe00000, - 0, s->periph_freq, serial_hd(0), - s->intc.irqs[SCI1_ERI], - s->intc.irqs[SCI1_RXI], - s->intc.irqs[SCI1_TXI], - s->intc.irqs[SCI1_TEI], - NULL); - sh_serial_init(sysmem, 0x1fe80000, - SH_SERIAL_FEAT_SCIF, - s->periph_freq, serial_hd(1), - s->intc.irqs[SCIF_ERI], - s->intc.irqs[SCIF_RXI], - s->intc.irqs[SCIF_TXI], - NULL, - s->intc.irqs[SCIF_BRI]); + /* SCI */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("sci"); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe00000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "sci-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe00000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCI1_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCI1_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCI1_TXI]); + qdev_connect_gpio_out_named(dev, "tei", 0, s->intc.irqs[SCI1_TEI]); + + /* SCIF */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("scif"); + qdev_prop_set_chr(dev, "chardev", serial_hd(1)); + qdev_prop_set_uint8(dev, "features", SH_SERIAL_FEAT_SCIF); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe80000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "scif-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe80000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCIF_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCIF_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCIF_TXI]); + qdev_connect_gpio_out_named(dev, "bri", 0, s->intc.irqs[SCIF_BRI]); tmu012_init(sysmem, 0x1fd80000, - TMU012_FEAT_TOCR | TMU012_FEAT_3CHAN | TMU012_FEAT_EXTCLK, - s->periph_freq, - s->intc.irqs[TMU0], - s->intc.irqs[TMU1], - s->intc.irqs[TMU2_TUNI], - s->intc.irqs[TMU2_TICPI]); + TMU012_FEAT_TOCR | TMU012_FEAT_3CHAN | TMU012_FEAT_EXTCLK, + s->periph_freq, + s->intc.irqs[TMU0], + s->intc.irqs[TMU1], + s->intc.irqs[TMU2_TUNI], + s->intc.irqs[TMU2_TICPI]); if (cpu->env.id & (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7751)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_dma4), - _INTC_ARRAY(groups_dma4)); + _INTC_ARRAY(vectors_dma4), + _INTC_ARRAY(groups_dma4)); } if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751R)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_dma8), - _INTC_ARRAY(groups_dma8)); + _INTC_ARRAY(vectors_dma8), + _INTC_ARRAY(groups_dma8)); } if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751 | SH_CPU_SH7751R)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_tmu34), - NULL, 0); + _INTC_ARRAY(vectors_tmu34), + NULL, 0); tmu012_init(sysmem, 0x1e100000, 0, s->periph_freq, - s->intc.irqs[TMU3], - s->intc.irqs[TMU4], - NULL, NULL); + s->intc.irqs[TMU3], + s->intc.irqs[TMU4], + NULL, NULL); } if (cpu->env.id & (SH_CPU_SH7751_ALL)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_pci), - _INTC_ARRAY(groups_pci)); + _INTC_ARRAY(vectors_pci), + _INTC_ARRAY(groups_pci)); } if (cpu->env.id & (SH_CPU_SH7750S | SH_CPU_SH7750R | SH_CPU_SH7751_ALL)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_irlm), - NULL, 0); + _INTC_ARRAY(vectors_irlm), + NULL, 0); } sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_irl), - _INTC_ARRAY(groups_irl)); + _INTC_ARRAY(vectors_irl), + _INTC_ARRAY(groups_irl)); return s; } qemu_irq sh7750_irl(SH7750State *s) { - sh_intc_toggle_source(sh_intc_source(&s->intc, IRL), 1, 0); /* enable */ - return qemu_allocate_irq(sh_intc_set_irl, sh_intc_source(&s->intc, IRL), 0); + sh_intc_toggle_source(&s->intc.sources[IRL], 1, 0); /* enable */ + return qemu_allocate_irq(sh_intc_set_irl, &s->intc.sources[IRL], 0); } diff --git a/hw/sh4/sh7750_regnames.c b/hw/sh4/sh7750_regnames.c index 0630fe3cf4..e531d46a8e 100644 --- a/hw/sh4/sh7750_regnames.c +++ b/hw/sh4/sh7750_regnames.c @@ -12,85 +12,87 @@ typedef struct { static regname_t regnames[] = { REGNAME(SH7750_PTEH_A7) - REGNAME(SH7750_PTEL_A7) - REGNAME(SH7750_PTEA_A7) - REGNAME(SH7750_TTB_A7) - REGNAME(SH7750_TEA_A7) - REGNAME(SH7750_MMUCR_A7) - REGNAME(SH7750_CCR_A7) - REGNAME(SH7750_QACR0_A7) - REGNAME(SH7750_QACR1_A7) - REGNAME(SH7750_TRA_A7) - REGNAME(SH7750_EXPEVT_A7) - REGNAME(SH7750_INTEVT_A7) - REGNAME(SH7750_STBCR_A7) - REGNAME(SH7750_STBCR2_A7) - REGNAME(SH7750_FRQCR_A7) - REGNAME(SH7750_WTCNT_A7) - REGNAME(SH7750_WTCSR_A7) - REGNAME(SH7750_R64CNT_A7) - REGNAME(SH7750_RSECCNT_A7) - REGNAME(SH7750_RMINCNT_A7) - REGNAME(SH7750_RHRCNT_A7) - REGNAME(SH7750_RWKCNT_A7) - REGNAME(SH7750_RDAYCNT_A7) - REGNAME(SH7750_RMONCNT_A7) - REGNAME(SH7750_RYRCNT_A7) - REGNAME(SH7750_RSECAR_A7) - REGNAME(SH7750_RMINAR_A7) - REGNAME(SH7750_RHRAR_A7) - REGNAME(SH7750_RWKAR_A7) - REGNAME(SH7750_RDAYAR_A7) - REGNAME(SH7750_RMONAR_A7) - REGNAME(SH7750_RCR1_A7) - REGNAME(SH7750_RCR2_A7) - REGNAME(SH7750_BCR1_A7) - REGNAME(SH7750_BCR2_A7) - REGNAME(SH7750_WCR1_A7) - REGNAME(SH7750_WCR2_A7) - REGNAME(SH7750_WCR3_A7) - REGNAME(SH7750_MCR_A7) - REGNAME(SH7750_PCR_A7) - REGNAME(SH7750_RTCSR_A7) - REGNAME(SH7750_RTCNT_A7) - REGNAME(SH7750_RTCOR_A7) - REGNAME(SH7750_RFCR_A7) - REGNAME(SH7750_SAR0_A7) - REGNAME(SH7750_SAR1_A7) - REGNAME(SH7750_SAR2_A7) - REGNAME(SH7750_SAR3_A7) - REGNAME(SH7750_DAR0_A7) - REGNAME(SH7750_DAR1_A7) - REGNAME(SH7750_DAR2_A7) - REGNAME(SH7750_DAR3_A7) - REGNAME(SH7750_DMATCR0_A7) - REGNAME(SH7750_DMATCR1_A7) - REGNAME(SH7750_DMATCR2_A7) - REGNAME(SH7750_DMATCR3_A7) - REGNAME(SH7750_CHCR0_A7) - REGNAME(SH7750_CHCR1_A7) - REGNAME(SH7750_CHCR2_A7) - REGNAME(SH7750_CHCR3_A7) - REGNAME(SH7750_DMAOR_A7) - REGNAME(SH7750_PCTRA_A7) - REGNAME(SH7750_PDTRA_A7) - REGNAME(SH7750_PCTRB_A7) - REGNAME(SH7750_PDTRB_A7) - REGNAME(SH7750_GPIOIC_A7) - REGNAME(SH7750_ICR_A7) - REGNAME(SH7750_BCR3_A7) - REGNAME(SH7750_BCR4_A7) - REGNAME(SH7750_SDMR2_A7) - REGNAME(SH7750_SDMR3_A7) {(uint32_t) - 1, NULL} + REGNAME(SH7750_PTEL_A7) + REGNAME(SH7750_PTEA_A7) + REGNAME(SH7750_TTB_A7) + REGNAME(SH7750_TEA_A7) + REGNAME(SH7750_MMUCR_A7) + REGNAME(SH7750_CCR_A7) + REGNAME(SH7750_QACR0_A7) + REGNAME(SH7750_QACR1_A7) + REGNAME(SH7750_TRA_A7) + REGNAME(SH7750_EXPEVT_A7) + REGNAME(SH7750_INTEVT_A7) + REGNAME(SH7750_STBCR_A7) + REGNAME(SH7750_STBCR2_A7) + REGNAME(SH7750_FRQCR_A7) + REGNAME(SH7750_WTCNT_A7) + REGNAME(SH7750_WTCSR_A7) + REGNAME(SH7750_R64CNT_A7) + REGNAME(SH7750_RSECCNT_A7) + REGNAME(SH7750_RMINCNT_A7) + REGNAME(SH7750_RHRCNT_A7) + REGNAME(SH7750_RWKCNT_A7) + REGNAME(SH7750_RDAYCNT_A7) + REGNAME(SH7750_RMONCNT_A7) + REGNAME(SH7750_RYRCNT_A7) + REGNAME(SH7750_RSECAR_A7) + REGNAME(SH7750_RMINAR_A7) + REGNAME(SH7750_RHRAR_A7) + REGNAME(SH7750_RWKAR_A7) + REGNAME(SH7750_RDAYAR_A7) + REGNAME(SH7750_RMONAR_A7) + REGNAME(SH7750_RCR1_A7) + REGNAME(SH7750_RCR2_A7) + REGNAME(SH7750_BCR1_A7) + REGNAME(SH7750_BCR2_A7) + REGNAME(SH7750_WCR1_A7) + REGNAME(SH7750_WCR2_A7) + REGNAME(SH7750_WCR3_A7) + REGNAME(SH7750_MCR_A7) + REGNAME(SH7750_PCR_A7) + REGNAME(SH7750_RTCSR_A7) + REGNAME(SH7750_RTCNT_A7) + REGNAME(SH7750_RTCOR_A7) + REGNAME(SH7750_RFCR_A7) + REGNAME(SH7750_SAR0_A7) + REGNAME(SH7750_SAR1_A7) + REGNAME(SH7750_SAR2_A7) + REGNAME(SH7750_SAR3_A7) + REGNAME(SH7750_DAR0_A7) + REGNAME(SH7750_DAR1_A7) + REGNAME(SH7750_DAR2_A7) + REGNAME(SH7750_DAR3_A7) + REGNAME(SH7750_DMATCR0_A7) + REGNAME(SH7750_DMATCR1_A7) + REGNAME(SH7750_DMATCR2_A7) + REGNAME(SH7750_DMATCR3_A7) + REGNAME(SH7750_CHCR0_A7) + REGNAME(SH7750_CHCR1_A7) + REGNAME(SH7750_CHCR2_A7) + REGNAME(SH7750_CHCR3_A7) + REGNAME(SH7750_DMAOR_A7) + REGNAME(SH7750_PCTRA_A7) + REGNAME(SH7750_PDTRA_A7) + REGNAME(SH7750_PCTRB_A7) + REGNAME(SH7750_PDTRB_A7) + REGNAME(SH7750_GPIOIC_A7) + REGNAME(SH7750_ICR_A7) + REGNAME(SH7750_BCR3_A7) + REGNAME(SH7750_BCR4_A7) + REGNAME(SH7750_SDMR2_A7) + REGNAME(SH7750_SDMR3_A7) + { (uint32_t)-1, NULL } }; const char *regname(uint32_t addr) { unsigned int i; - for (i = 0; regnames[i].regaddr != (uint32_t) - 1; i++) { - if (regnames[i].regaddr == addr) - return regnames[i].regname; + for (i = 0; regnames[i].regaddr != (uint32_t)-1; i++) { + if (regnames[i].regaddr == addr) { + return regnames[i].regname; + } } return ""; diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h index ab073dadc7..beb571d5e9 100644 --- a/hw/sh4/sh7750_regs.h +++ b/hw/sh4/sh7750_regs.h @@ -43,9 +43,8 @@ * All register has 2 addresses: in 0xff000000 - 0xffffffff (P4 address) and * in 0x1f000000 - 0x1fffffff (area 7 address) */ -#define SH7750_P4_BASE 0xff000000 /* Accessible only in - privileged mode */ -#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ +#define SH7750_P4_BASE 0xff000000 /* Accessible only in privileged mode */ +#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ #define SH7750_P4_REG32(ofs) (SH7750_P4_BASE + (ofs)) #define SH7750_A7_REG32(ofs) (SH7750_A7_BASE + (ofs)) @@ -55,84 +54,84 @@ */ /* Page Table Entry High register - PTEH */ -#define SH7750_PTEH_REGOFS 0x000000 /* offset */ +#define SH7750_PTEH_REGOFS 0x000000 /* offset */ #define SH7750_PTEH SH7750_P4_REG32(SH7750_PTEH_REGOFS) #define SH7750_PTEH_A7 SH7750_A7_REG32(SH7750_PTEH_REGOFS) -#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ +#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ #define SH7750_PTEH_VPN_S 10 -#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ +#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ #define SH7750_PTEH_ASID_S 0 /* Page Table Entry Low register - PTEL */ -#define SH7750_PTEL_REGOFS 0x000004 /* offset */ +#define SH7750_PTEL_REGOFS 0x000004 /* offset */ #define SH7750_PTEL SH7750_P4_REG32(SH7750_PTEL_REGOFS) #define SH7750_PTEL_A7 SH7750_A7_REG32(SH7750_PTEL_REGOFS) -#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ +#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ #define SH7750_PTEL_PPN_S 10 -#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ -#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ -#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ -#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ -#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ -#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ -#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ -#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ -#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ -#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ -#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode */ -#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode */ -#define SH7750_PTEL_C 0x00000008 /* Cacheability - (0 - page not cacheable) */ -#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been - performed to a page) */ -#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are - shared by processes) */ -#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the - cache write mode: - 0 - Copy-back mode - 1 - Write-through mode */ +#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ +#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ +#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ +#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ +#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ +#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ +#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ +#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ +#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ +#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ +#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode */ +#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode */ +#define SH7750_PTEL_C 0x00000008 /* Cacheability */ + /* (0 - page not cacheable) */ +#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been */ + /* performed to a page) */ +#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are */ + /* shared by processes) */ +#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the */ + /* cache write mode: */ + /* 0 - Copy-back mode */ + /* 1 - Write-through mode */ /* Page Table Entry Assistance register - PTEA */ -#define SH7750_PTEA_REGOFS 0x000034 /* offset */ +#define SH7750_PTEA_REGOFS 0x000034 /* offset */ #define SH7750_PTEA SH7750_P4_REG32(SH7750_PTEA_REGOFS) #define SH7750_PTEA_A7 SH7750_A7_REG32(SH7750_PTEA_REGOFS) -#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit - 0 - use area 5 wait states - 1 - use area 6 wait states */ -#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ -#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ -#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ -#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ -#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ -#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space */ -#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space */ -#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ -#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ +#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit */ + /* 0 - use area 5 wait states */ + /* 1 - use area 6 wait states */ +#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ +#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ +#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ +#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ +#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ +#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space */ +#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space */ +#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ +#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ /* Translation table base register */ -#define SH7750_TTB_REGOFS 0x000008 /* offset */ +#define SH7750_TTB_REGOFS 0x000008 /* offset */ #define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS) #define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS) /* TLB exeption address register - TEA */ -#define SH7750_TEA_REGOFS 0x00000c /* offset */ +#define SH7750_TEA_REGOFS 0x00000c /* offset */ #define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS) #define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS) /* MMU control register - MMUCR */ -#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ +#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ #define SH7750_MMUCR SH7750_P4_REG32(SH7750_MMUCR_REGOFS) #define SH7750_MMUCR_A7 SH7750_A7_REG32(SH7750_MMUCR_REGOFS) -#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ -#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ -#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ -#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ -#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ +#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ +#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ +#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ +#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ +#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ #define SH7750_MMUCR_URC_S 10 -#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ +#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ #define SH7750_MMUCR_URB_S 18 -#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ +#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ #define SH7750_MMUCR_LRUI_S 26 @@ -145,30 +144,30 @@ */ /* Cache Control Register - CCR */ -#define SH7750_CCR_REGOFS 0x00001c /* offset */ +#define SH7750_CCR_REGOFS 0x00001c /* offset */ #define SH7750_CCR SH7750_P4_REG32(SH7750_CCR_REGOFS) #define SH7750_CCR_A7 SH7750_A7_REG32(SH7750_CCR_REGOFS) -#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ -#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: - set it to clear IC */ -#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ -#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ -#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit - if you set OCE = 0, - you should set ORA = 0 */ -#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ -#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ -#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ -#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ +#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ +#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: */ + /* set it to clear IC */ +#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ +#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ +#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit */ + /* if you set OCE = 0, */ + /* you should set ORA = 0 */ +#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ +#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ +#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ +#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ /* Queue address control register 0 - QACR0 */ -#define SH7750_QACR0_REGOFS 0x000038 /* offset */ +#define SH7750_QACR0_REGOFS 0x000038 /* offset */ #define SH7750_QACR0 SH7750_P4_REG32(SH7750_QACR0_REGOFS) #define SH7750_QACR0_A7 SH7750_A7_REG32(SH7750_QACR0_REGOFS) /* Queue address control register 1 - QACR1 */ -#define SH7750_QACR1_REGOFS 0x00003c /* offset */ +#define SH7750_QACR1_REGOFS 0x00003c /* offset */ #define SH7750_QACR1 SH7750_P4_REG32(SH7750_QACR1_REGOFS) #define SH7750_QACR1_A7 SH7750_A7_REG32(SH7750_QACR1_REGOFS) @@ -178,11 +177,11 @@ */ /* Immediate data for TRAPA instruction - TRA */ -#define SH7750_TRA_REGOFS 0x000020 /* offset */ +#define SH7750_TRA_REGOFS 0x000020 /* offset */ #define SH7750_TRA SH7750_P4_REG32(SH7750_TRA_REGOFS) #define SH7750_TRA_A7 SH7750_A7_REG32(SH7750_TRA_REGOFS) -#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ +#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ #define SH7750_TRA_IMM_S 2 /* Exeption event register - EXPEVT */ @@ -190,14 +189,14 @@ #define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS) #define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS) -#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ #define SH7750_EXPEVT_EX_S 0 /* Interrupt event register */ #define SH7750_INTEVT_REGOFS 0x000028 #define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS) #define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS) -#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ #define SH7750_INTEVT_EX_S 0 /* @@ -206,683 +205,684 @@ #define SH7750_EVT_TO_NUM(evt) ((evt) >> 5) /* Reset exception category */ -#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ -#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ -#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ +#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ +#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ +#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ /* General exception category */ -#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ -#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ -#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / - DTLB miss exception (read) */ -#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation / - DTLB protection violation (read) */ -#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction - exception */ -#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction - exception */ -#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception */ -#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ -#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ -#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ -#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ -#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation - exception (write) */ -#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ -#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ -#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ +#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ +#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ +#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / */ + /* DTLB miss exception (read) */ +#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation, */ + /* DTLB protection violation */ + /* (read) */ +#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction */ + /* exception */ +#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction */ + /* exception */ +#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception */ +#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ +#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ +#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ +#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ +#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation */ + /* exception (write) */ +#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ +#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ +#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ /* Interrupt exception category */ -#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ -#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ -#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ -#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ -#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ -#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ -#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ -#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ -#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ -#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ -#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ -#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ -#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ -#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ -#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ -#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ +#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ +#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ +#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ +#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ +#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ +#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ +#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ +#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ +#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ +#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ +#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ +#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ +#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ +#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ +#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ +#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ /* Peripheral Module Interrupts - Timer Unit (TMU) */ -#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ -#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ -#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ -#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2 */ +#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ +#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ +#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ +#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2 */ /* Peripheral Module Interrupts - Real-Time Clock (RTC) */ -#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ -#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ -#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ +#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ +#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ +#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ /* Peripheral Module Interrupts - Serial Communication Interface (SCI) */ -#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ -#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ -#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ -#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ +#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ +#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ +#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ +#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ /* Peripheral Module Interrupts - Watchdog Timer (WDT) */ -#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt - (used when WDT operates in - interval timer mode) */ +#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt */ + /* (used when WDT operates in */ + /* interval timer mode) */ /* Peripheral Module Interrupts - Memory Refresh Unit (REF) */ -#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ -#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow - interrupt */ +#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ +#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow */ + /* interrupt */ /* Peripheral Module Interrupts - Hitachi User Debug Interface (H-UDI) */ -#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ +#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ /* Peripheral Module Interrupts - General-Purpose I/O (GPIO) */ -#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ +#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ /* Peripheral Module Interrupts - DMA Controller (DMAC) */ -#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ +#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ -/* Peripheral Module Interrupts - Serial Communication Interface with FIFO */ -/* (SCIF) */ -#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ -#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or - Receive Data ready interrupt */ -#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ -#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ +/* Peripheral Module Interrupts Serial Communication Interface w/ FIFO (SCIF) */ +#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ +#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or */ + /* Receive Data ready interrupt */ +#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ +#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ /* * Power Management */ -#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ +#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ #define SH7750_STBCR SH7750_P4_REG32(SH7750_STBCR_REGOFS) #define SH7750_STBCR_A7 SH7750_A7_REG32(SH7750_STBCR_REGOFS) -#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: - 0 - Transition to SLEEP mode on SLEEP - 1 - Transition to STANDBY mode on SLEEP */ -#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in - standby mode: - 0 - normal state - 1 - high-impendance state */ +#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: */ + /* 0 Transition to SLEEP mode on SLEEP */ + /* 1 Transition to STANDBY mode on SLEEP */ +#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in */ + /* standby mode: */ + /* 0 normal state */ + /* 1 high-impendance state */ -#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls */ -#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ +#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls */ +#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ #define SH7750_STBCR_DMAC_STP SH7750_STBCR_MSTP4 -#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ +#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ #define SH7750_STBCR_SCIF_STP SH7750_STBCR_MSTP3 -#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ +#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ #define SH7750_STBCR_TMU_STP SH7750_STBCR_MSTP2 -#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ +#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ #define SH7750_STBCR_RTC_STP SH7750_STBCR_MSTP1 -#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ +#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ #define SH7750_STBCR_SCI_STP SH7750_STBCR_MSTP0 #define SH7750_STBCR_STBY 0x80 -#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ +#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ #define SH7750_STBCR2 SH7750_P4_REG32(SH7750_STBCR2_REGOFS) #define SH7750_STBCR2_A7 SH7750_A7_REG32(SH7750_STBCR2_REGOFS) -#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode: - 0 - transition to sleep or standby mode - as it is specified in STBY bit - 1 - transition to deep sleep mode on - execution of SLEEP instruction */ -#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to Store Queue - in the cache controller */ +#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode */ + /* 0 transition to sleep or standby mode */ + /* as it is specified in STBY bit */ + /* 1 transition to deep sleep mode on */ + /* execution of SLEEP instruction */ +#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to the */ + /* Store Queue in the cache controller */ #define SH7750_STBCR2_SQ_STP SH7750_STBCR2_MSTP6 -#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the User - Break Controller (UBC) */ +#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the */ + /* User Break Controller (UBC) */ #define SH7750_STBCR2_UBC_STP SH7750_STBCR2_MSTP5 /* * Clock Pulse Generator (CPG) */ -#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ +#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ #define SH7750_FRQCR SH7750_P4_REG32(SH7750_FRQCR_REGOFS) #define SH7750_FRQCR_A7 SH7750_A7_REG32(SH7750_FRQCR_REGOFS) -#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable - 0 - CKIO pin goes to HiZ/pullup - 1 - Clock is output from CKIO */ -#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ -#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ +#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable */ + /* 0 - CKIO pin goes to HiZ/pullup */ + /* 1 - Clock is output from CKIO */ +#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ +#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ -#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ -#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ -#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ -#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ -#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ -#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ -#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ +#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ +#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ +#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ +#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ +#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ +#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ -#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ -#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ -#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ -#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ -#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ -#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ -#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ +#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ +#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ +#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ +#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ +#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ +#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ -#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency - division ratio: */ -#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ -#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ -#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ -#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ -#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ +#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency */ + /* division ratio: */ +#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ +#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ +#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ +#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ +#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ /* * Watchdog Timer (WDT) */ /* Watchdog Timer Counter register - WTCNT */ -#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ +#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ #define SH7750_WTCNT SH7750_P4_REG32(SH7750_WTCNT_REGOFS) #define SH7750_WTCNT_A7 SH7750_A7_REG32(SH7750_WTCNT_REGOFS) -#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, - you have to set the upper byte to - 0x5A */ +#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, you */ + /* have to set the upper byte to 0x5A */ /* Watchdog Timer Control/Status register - WTCSR */ -#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ +#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ #define SH7750_WTCSR SH7750_P4_REG32(SH7750_WTCSR_REGOFS) #define SH7750_WTCSR_A7 SH7750_A7_REG32(SH7750_WTCSR_REGOFS) -#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, - you have to set the upper byte to - 0xA5 */ -#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ -#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ -#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ -#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ -#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ -#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ -#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ -#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ -#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ -#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ -#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ -#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ -#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ -#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ -#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ -#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ -#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ -#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ +#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, you */ + /* have to set the upper byte to 0xA5 */ +#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ +#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ +#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ +#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ +#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ +#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ +#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ +#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ +#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ +#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ +#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ +#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ +#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ +#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ +#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ +#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ +#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ +#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ /* * Real-Time Clock (RTC) */ /* 64-Hz Counter Register (byte, read-only) - R64CNT */ -#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ +#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ #define SH7750_R64CNT SH7750_P4_REG32(SH7750_R64CNT_REGOFS) #define SH7750_R64CNT_A7 SH7750_A7_REG32(SH7750_R64CNT_REGOFS) /* Second Counter Register (byte, BCD-coded) - RSECCNT */ -#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ +#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ #define SH7750_RSECCNT SH7750_P4_REG32(SH7750_RSECCNT_REGOFS) #define SH7750_RSECCNT_A7 SH7750_A7_REG32(SH7750_RSECCNT_REGOFS) /* Minute Counter Register (byte, BCD-coded) - RMINCNT */ -#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ +#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ #define SH7750_RMINCNT SH7750_P4_REG32(SH7750_RMINCNT_REGOFS) #define SH7750_RMINCNT_A7 SH7750_A7_REG32(SH7750_RMINCNT_REGOFS) /* Hour Counter Register (byte, BCD-coded) - RHRCNT */ -#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ +#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ #define SH7750_RHRCNT SH7750_P4_REG32(SH7750_RHRCNT_REGOFS) #define SH7750_RHRCNT_A7 SH7750_A7_REG32(SH7750_RHRCNT_REGOFS) /* Day-of-Week Counter Register (byte) - RWKCNT */ -#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ +#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ #define SH7750_RWKCNT SH7750_P4_REG32(SH7750_RWKCNT_REGOFS) #define SH7750_RWKCNT_A7 SH7750_A7_REG32(SH7750_RWKCNT_REGOFS) -#define SH7750_RWKCNT_SUN 0 /* Sunday */ -#define SH7750_RWKCNT_MON 1 /* Monday */ -#define SH7750_RWKCNT_TUE 2 /* Tuesday */ -#define SH7750_RWKCNT_WED 3 /* Wednesday */ -#define SH7750_RWKCNT_THU 4 /* Thursday */ -#define SH7750_RWKCNT_FRI 5 /* Friday */ -#define SH7750_RWKCNT_SAT 6 /* Saturday */ +#define SH7750_RWKCNT_SUN 0 /* Sunday */ +#define SH7750_RWKCNT_MON 1 /* Monday */ +#define SH7750_RWKCNT_TUE 2 /* Tuesday */ +#define SH7750_RWKCNT_WED 3 /* Wednesday */ +#define SH7750_RWKCNT_THU 4 /* Thursday */ +#define SH7750_RWKCNT_FRI 5 /* Friday */ +#define SH7750_RWKCNT_SAT 6 /* Saturday */ /* Day Counter Register (byte, BCD-coded) - RDAYCNT */ -#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ +#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ #define SH7750_RDAYCNT SH7750_P4_REG32(SH7750_RDAYCNT_REGOFS) #define SH7750_RDAYCNT_A7 SH7750_A7_REG32(SH7750_RDAYCNT_REGOFS) /* Month Counter Register (byte, BCD-coded) - RMONCNT */ -#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ +#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ #define SH7750_RMONCNT SH7750_P4_REG32(SH7750_RMONCNT_REGOFS) #define SH7750_RMONCNT_A7 SH7750_A7_REG32(SH7750_RMONCNT_REGOFS) /* Year Counter Register (half, BCD-coded) - RYRCNT */ -#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ +#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ #define SH7750_RYRCNT SH7750_P4_REG32(SH7750_RYRCNT_REGOFS) #define SH7750_RYRCNT_A7 SH7750_A7_REG32(SH7750_RYRCNT_REGOFS) /* Second Alarm Register (byte, BCD-coded) - RSECAR */ -#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ +#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ #define SH7750_RSECAR SH7750_P4_REG32(SH7750_RSECAR_REGOFS) #define SH7750_RSECAR_A7 SH7750_A7_REG32(SH7750_RSECAR_REGOFS) -#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ +#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ /* Minute Alarm Register (byte, BCD-coded) - RMINAR */ -#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ +#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ #define SH7750_RMINAR SH7750_P4_REG32(SH7750_RMINAR_REGOFS) #define SH7750_RMINAR_A7 SH7750_A7_REG32(SH7750_RMINAR_REGOFS) -#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ +#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ /* Hour Alarm Register (byte, BCD-coded) - RHRAR */ -#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ +#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ #define SH7750_RHRAR SH7750_P4_REG32(SH7750_RHRAR_REGOFS) #define SH7750_RHRAR_A7 SH7750_A7_REG32(SH7750_RHRAR_REGOFS) -#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ +#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ /* Day-of-Week Alarm Register (byte) - RWKAR */ -#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ +#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ #define SH7750_RWKAR SH7750_P4_REG32(SH7750_RWKAR_REGOFS) #define SH7750_RWKAR_A7 SH7750_A7_REG32(SH7750_RWKAR_REGOFS) -#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ +#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ -#define SH7750_RWKAR_SUN 0 /* Sunday */ -#define SH7750_RWKAR_MON 1 /* Monday */ -#define SH7750_RWKAR_TUE 2 /* Tuesday */ -#define SH7750_RWKAR_WED 3 /* Wednesday */ -#define SH7750_RWKAR_THU 4 /* Thursday */ -#define SH7750_RWKAR_FRI 5 /* Friday */ -#define SH7750_RWKAR_SAT 6 /* Saturday */ +#define SH7750_RWKAR_SUN 0 /* Sunday */ +#define SH7750_RWKAR_MON 1 /* Monday */ +#define SH7750_RWKAR_TUE 2 /* Tuesday */ +#define SH7750_RWKAR_WED 3 /* Wednesday */ +#define SH7750_RWKAR_THU 4 /* Thursday */ +#define SH7750_RWKAR_FRI 5 /* Friday */ +#define SH7750_RWKAR_SAT 6 /* Saturday */ /* Day Alarm Register (byte, BCD-coded) - RDAYAR */ -#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ +#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ #define SH7750_RDAYAR SH7750_P4_REG32(SH7750_RDAYAR_REGOFS) #define SH7750_RDAYAR_A7 SH7750_A7_REG32(SH7750_RDAYAR_REGOFS) -#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ +#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ /* Month Counter Register (byte, BCD-coded) - RMONAR */ -#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ +#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ #define SH7750_RMONAR SH7750_P4_REG32(SH7750_RMONAR_REGOFS) #define SH7750_RMONAR_A7 SH7750_A7_REG32(SH7750_RMONAR_REGOFS) -#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ +#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ /* RTC Control Register 1 (byte) - RCR1 */ -#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ +#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ #define SH7750_RCR1 SH7750_P4_REG32(SH7750_RCR1_REGOFS) #define SH7750_RCR1_A7 SH7750_A7_REG32(SH7750_RCR1_REGOFS) -#define SH7750_RCR1_CF 0x80 /* Carry Flag */ -#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ -#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ -#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ +#define SH7750_RCR1_CF 0x80 /* Carry Flag */ +#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ +#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ +#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ /* RTC Control Register 2 (byte) - RCR2 */ -#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ +#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ #define SH7750_RCR2 SH7750_P4_REG32(SH7750_RCR2_REGOFS) #define SH7750_RCR2_A7 SH7750_A7_REG32(SH7750_RCR2_REGOFS) -#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ -#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ -#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ -#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ -#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ -#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ -#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ -#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ -#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ -#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ -#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ -#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ -#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset */ -#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, - year counters are stopped - 1 - sec, min, hr, day-of-week, month, - year counters operate normally */ +#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ +#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ +#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ +#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ +#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ +#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ +#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ +#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ +#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ +#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ +#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ +#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ +#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset */ +#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, */ + /* year counters are stopped */ + /* 1 - sec, min, hr, day-of-week, month, */ + /* year counters operate normally */ /* * Bus State Controller - BSC */ /* Bus Control Register 1 - BCR1 */ -#define SH7750_BCR1_REGOFS 0x800000 /* offset */ +#define SH7750_BCR1_REGOFS 0x800000 /* offset */ #define SH7750_BCR1 SH7750_P4_REG32(SH7750_BCR1_REGOFS) #define SH7750_BCR1_A7 SH7750_A7_REG32(SH7750_BCR1_REGOFS) -#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ -#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ -#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX) */ -#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: - 0 - pull-up resistor is on for - control input pins - 1 - pull-up resistor is off */ -#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: - 0 - pull-up resistor is on for - control output pins - 1 - pull-up resistor is off */ -#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: - 0 - Area 1 SRAM is set to - normal mode - 1 - Area 1 SRAM is set to byte - control mode */ -#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: - 0 - Area 4 SRAM is set to - normal mode - 1 - Area 4 SRAM is set to byte - control mode */ -#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: - 0 - External requests are not - accepted - 1 - External requests are - accepted */ -#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: - 0 - Master Mode - 1 - Partial-sharing Mode */ -#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: - 0 - SRAM/burst ROM interface - 1 - MPX interface */ -#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. Specifies - the state of A[25:0], BS\, CSn\, - RD/WR\, CE2A\, CE2B\ in standby - mode and when bus is released: - 0 - signals go to High-Z mode - 1 - signals driven */ -#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. Specifies - the state of the RAS\, RAS2\, WEn\, - CASn\, DQMn, RD\, CASS\, FRAME\, - RD2\ signals in standby mode and - when bus is released: - 0 - signals go to High-Z mode - 1 - signals driven */ -#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ -#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ -#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ +#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ +#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX) */ +#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control input pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control output pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: */ + /* 0 - Area 1 SRAM is set to */ + /* normal mode */ + /* 1 - Area 1 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: */ + /* 0 - Area 4 SRAM is set to */ + /* normal mode */ + /* 1 - Area 4 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: */ + /* 0 - External requests are not */ + /* accepted */ + /* 1 - External requests are */ + /* accepted */ +#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: */ + /* 0 - Master Mode */ + /* 1 - Partial-sharing Mode */ +#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: */ + /* 0 - SRAM/burst ROM interface */ + /* 1 - MPX interface */ +#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. */ + /* Specifies the state of A[25:0], */ + /* BS\, CSn\, RD/WR\, CE2A\, CE2B\ */ + /* in standby mode and when bus is */ + /* released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. */ + /* Specifies the state of the */ + /* RAS\, RAS2\, WEn\, CASn\, DQMn, */ + /* RD\, CASS\, FRAME\, RD2\ */ + /* signals in standby mode and */ + /* when bus is released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ +#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ +#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ -#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ -#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ +#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ +#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ -#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ -#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ +#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ +#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ -#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or MPX - interface. */ -#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 - - synchronous DRAM */ -#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are synchronous - DRAM interface */ -#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 - - DRAM interface */ -#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM - interface */ +#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or */ + /* MPX interface. */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 */ + /* synchronous DRAM */ +#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are */ + /* synchronous DRAM interface */ +#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 */ + /* DRAM interface */ +#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM */ + /* interface */ -#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: - 0 - SRAM interface - 1 - PCMCIA interface */ +#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: */ + /* 0 - SRAM interface */ + /* 1 - PCMCIA interface */ /* Bus Control Register 2 (half) - BCR2 */ -#define SH7750_BCR2_REGOFS 0x800004 /* offset */ +#define SH7750_BCR2_REGOFS 0x800004 /* offset */ #define SH7750_BCR2 SH7750_P4_REG32(SH7750_BCR2_REGOFS) #define SH7750_BCR2_A7 SH7750_A7_REG32(SH7750_BCR2_REGOFS) -#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ +#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ #define SH7750_BCR2_A0SZ_S 14 -#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ +#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ #define SH7750_BCR2_A6SZ_S 12 -#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ +#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ #define SH7750_BCR2_A5SZ_S 10 -#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ +#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ #define SH7750_BCR2_A4SZ_S 8 -#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ +#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ #define SH7750_BCR2_A3SZ_S 6 -#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ +#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ #define SH7750_BCR2_A2SZ_S 4 -#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ +#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ #define SH7750_BCR2_A1SZ_S 2 -#define SH7750_BCR2_SZ_64 0 /* 64 bits */ -#define SH7750_BCR2_SZ_8 1 /* 8 bits */ -#define SH7750_BCR2_SZ_16 2 /* 16 bits */ -#define SH7750_BCR2_SZ_32 3 /* 32 bits */ -#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable : - 0 - D51-D32 are not used as a port - 1 - D51-D32 are used as a port */ +#define SH7750_BCR2_SZ_64 0 /* 64 bits */ +#define SH7750_BCR2_SZ_8 1 /* 8 bits */ +#define SH7750_BCR2_SZ_16 2 /* 16 bits */ +#define SH7750_BCR2_SZ_32 3 /* 32 bits */ +#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable */ + /* 0 - D51-D32 are not used as a port */ + /* 1 - D51-D32 are used as a port */ /* Wait Control Register 1 - WCR1 */ -#define SH7750_WCR1_REGOFS 0x800008 /* offset */ +#define SH7750_WCR1_REGOFS 0x800008 /* offset */ #define SH7750_WCR1 SH7750_P4_REG32(SH7750_WCR1_REGOFS) #define SH7750_WCR1_A7 SH7750_A7_REG32(SH7750_WCR1_REGOFS) -#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle - specification */ +#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle */ + /* specification */ #define SH7750_WCR1_DMAIW_S 28 -#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A6IW_S 24 -#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A5IW_S 20 -#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A4IW_S 16 -#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A3IW_S 12 -#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A2IW_S 8 -#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A1IW_S 4 -#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A0IW_S 0 /* Wait Control Register 2 - WCR2 */ -#define SH7750_WCR2_REGOFS 0x80000C /* offset */ +#define SH7750_WCR2_REGOFS 0x80000C /* offset */ #define SH7750_WCR2 SH7750_P4_REG32(SH7750_WCR2_REGOFS) #define SH7750_WCR2_A7 SH7750_A7_REG32(SH7750_WCR2_REGOFS) -#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ +#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ #define SH7750_WCR2_A6W_S 29 -#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ +#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ #define SH7750_WCR2_A6B_S 26 -#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ +#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ #define SH7750_WCR2_A5W_S 23 -#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ +#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ #define SH7750_WCR2_A5B_S 20 -#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ +#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ #define SH7750_WCR2_A4W_S 17 -#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ +#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ #define SH7750_WCR2_A3W_S 13 -#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ +#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ #define SH7750_WCR2_A2W_S 9 -#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ +#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ #define SH7750_WCR2_A1W_S 6 -#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ +#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ #define SH7750_WCR2_A0W_S 3 -#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ +#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ #define SH7750_WCR2_A0B_S 0 -#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ -#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ -#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ -#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ -#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ -#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ -#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ -#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ +#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ +#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ +#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ +#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ +#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ +#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ +#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ +#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ -#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ /* DRAM CAS\ Assertion Delay (area 3,2) */ -#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ -#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ +#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ /* SDRAM CAS\ Latency Cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ -#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ +#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ /* Wait Control Register 3 - WCR3 */ -#define SH7750_WCR3_REGOFS 0x800010 /* offset */ +#define SH7750_WCR3_REGOFS 0x800010 /* offset */ #define SH7750_WCR3 SH7750_P4_REG32(SH7750_WCR3_REGOFS) #define SH7750_WCR3_A7 SH7750_A7_REG32(SH7750_WCR3_REGOFS) -#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ -#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ +#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ +#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ #define SH7750_WCR3_A6H_S 24 -#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ -#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ +#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ +#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ #define SH7750_WCR3_A5H_S 20 -#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ -#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ +#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ +#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ #define SH7750_WCR3_A4H_S 16 -#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ -#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ +#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ +#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ #define SH7750_WCR3_A3H_S 12 -#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ -#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ +#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ +#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ #define SH7750_WCR3_A2H_S 8 -#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ -#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ +#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ +#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ #define SH7750_WCR3_A1H_S 4 -#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ -#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ +#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ +#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ #define SH7750_WCR3_A0H_S 0 -#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ -#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ -#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ -#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ +#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ +#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ +#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ +#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ -#define SH7750_MCR_REGOFS 0x800014 /* offset */ +#define SH7750_MCR_REGOFS 0x800014 /* offset */ #define SH7750_MCR SH7750_P4_REG32(SH7750_MCR_REGOFS) #define SH7750_MCR_A7 SH7750_A7_REG32(SH7750_MCR_REGOFS) -#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ -#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ -#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ -#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of - Refresh: */ -#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ -#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ -#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ -#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ -#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ -#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ -#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ -#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ +#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ +#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ +#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ +#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of */ + /* Refresh: */ +#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ +#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ +#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ +#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ +#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ +#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ +#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ +#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ -#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ -#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ -#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ +#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ +#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ +#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ -#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period - SDRAM: minimum number of cycles - until the next bank active cmd - is output after precharging */ +#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period */ + /* SDRAM: minimum number of cycles */ + /* until the next bank active cmd */ + /* is output after precharging */ #define SH7750_MCR_TPC_S 19 -#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ -#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ -#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ -#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ -#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ -#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ -#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ -#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ +#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ +#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ +#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ +#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ +#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ +#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ +#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ +#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ -#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay time - SDRAM: bank active-read/write cmd - delay time */ -#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ -#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ -#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ -#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ -#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ -#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ -#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay */ + /* time */ + /* SDRAM: bank active-read/write */ + /* command delay time */ +#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ +#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ -#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ -#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ -#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ -#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ -#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ -#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ +#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ +#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ +#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ +#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ +#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ +#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ -#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS - asserting period - SDRAM: Command interval after - synchronous DRAM refresh */ -#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ -#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ -#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ -#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ -#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ -#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ -#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ -#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ +#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS */ + /* asserting period */ + /* SDRAM: Command interval after */ + /* synchronous DRAM refresh */ +#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ +#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ +#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ +#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ +#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ +#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ +#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ +#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ -#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ -#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ -#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ -#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ -#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ -#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ +#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ +#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ +#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ +#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ +#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ -#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ +#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ #define SH7750_MCR_AMX_S 3 -#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ -#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ -#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ -#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ -#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ +#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ +#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ +#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ +#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ +#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ /* See SH7750 Hardware Manual for SDRAM address multiplexor selection */ -#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ -#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ -#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ -#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ -#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ +#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ +#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ +#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ +#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ +#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ /* SDRAM Mode Set address */ #define SH7750_SDRAM_MODE_A2_BASE 0xFF900000 @@ -894,119 +894,119 @@ /* PCMCIA Control Register (half) - PCR */ -#define SH7750_PCR_REGOFS 0x800018 /* offset */ +#define SH7750_PCR_REGOFS 0x800018 /* offset */ #define SH7750_PCR SH7750_P4_REG32(SH7750_PCR_REGOFS) #define SH7750_PCR_A7 SH7750_A7_REG32(SH7750_PCR_REGOFS) -#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait - states to be added to the number of - waits specified by WCR2 in a low-speed - PCMCIA wait cycle */ -#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ -#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ -#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ -#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ +#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ +#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ +#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ -#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait - states to be added to the number of - waits specified by WCR2 in a low-speed - PCMCIA wait cycle */ -#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ -#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ -#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ -#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ +#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ +#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ +#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ -#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Address-OE\/WE\ Assertion Delay, - delay time from address output to - OE\/WE\ assertion on the connected - PCMCIA interface */ +#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Addr-OE\/WE\ Assertion Delay */ + /* delay time from address output to */ + /* OE\/WE\ assertion on the connected */ + /* PCMCIA interface */ #define SH7750_PCR_A5TED_S 9 -#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Address-OE\/WE\ Assertion Delay */ +#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Addr-OE\/WE\ Assertion Delay */ #define SH7750_PCR_A6TED_S 6 -#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ -#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ -#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ -#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ -#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ -#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ -#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ -#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ +#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ -#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Address delay, - address hold delay time from OE\/WE\ - negation in a write on the connected - PCMCIA interface */ +#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Addr delay, */ + /* address hold delay time from OE\/WE\ */ + /* negation in a write on the connected */ + /* PCMCIA interface */ #define SH7750_PCR_A5TEH_S 3 -#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay */ +#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay */ #define SH7750_PCR_A6TEH_S 0 -#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ -#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ -#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ -#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ -#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ -#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ -#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ -#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ +#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ /* Refresh Timer Control/Status Register (half) - RTSCR */ -#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ +#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ #define SH7750_RTCSR SH7750_P4_REG32(SH7750_RTCSR_REGOFS) #define SH7750_RTCSR_A7 SH7750_A7_REG32(SH7750_RTCSR_REGOFS) -#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ -#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a - match between the refresh timer - counter and refresh time constant) */ -#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ -#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ -#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ -#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ -#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ -#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ -#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ -#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ -#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ -#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ +#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ +#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a */ + /* match between the refresh timer */ + /* counter and refresh time constant) */ +#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ +#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ +#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ +#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ +#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ +#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ +#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ +#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ +#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ +#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ -#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ -#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt - Enable */ -#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ -#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ -#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ +#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ +#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt */ + /* Enable */ +#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ +#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ +#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ /* Refresh Timer Counter (half) - RTCNT */ -#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ +#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ #define SH7750_RTCNT SH7750_P4_REG32(SH7750_RTCNT_REGOFS) #define SH7750_RTCNT_A7 SH7750_A7_REG32(SH7750_RTCNT_REGOFS) -#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ +#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ /* Refresh Time Constant Register (half) - RTCOR */ -#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ +#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ #define SH7750_RTCOR SH7750_P4_REG32(SH7750_RTCOR_REGOFS) #define SH7750_RTCOR_A7 SH7750_A7_REG32(SH7750_RTCOR_REGOFS) -#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ +#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ /* Refresh Count Register (half) - RFCR */ -#define SH7750_RFCR_REGOFS 0x800028 /* offset */ +#define SH7750_RFCR_REGOFS 0x800028 /* offset */ #define SH7750_RFCR SH7750_P4_REG32(SH7750_RFCR_REGOFS) #define SH7750_RFCR_A7 SH7750_A7_REG32(SH7750_RFCR_REGOFS) -#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ +#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ /* Synchronous DRAM mode registers - SDMR */ -#define SH7750_SDMR2_REGOFS 0x900000 /* base offset */ -#define SH7750_SDMR2_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR2_REGOFS 0x900000 /* base offset */ +#define SH7750_SDMR2_REGNB 0x0FFC /* nb of register */ #define SH7750_SDMR2 SH7750_P4_REG32(SH7750_SDMR2_REGOFS) #define SH7750_SDMR2_A7 SH7750_A7_REG32(SH7750_SDMR2_REGOFS) -#define SH7750_SDMR3_REGOFS 0x940000 /* offset */ -#define SH7750_SDMR3_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR3_REGOFS 0x940000 /* offset */ +#define SH7750_SDMR3_REGNB 0x0FFC /* nb of register */ #define SH7750_SDMR3 SH7750_P4_REG32(SH7750_SDMR3_REGOFS) #define SH7750_SDMR3_A7 SH7750_A7_REG32(SH7750_SDMR3_REGOFS) @@ -1015,7 +1015,7 @@ */ /* DMA Source Address Register - SAR0, SAR1, SAR2, SAR3 */ -#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n)*16)) /* offset */ +#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n) * 16)) /* offset */ #define SH7750_SAR(n) SH7750_P4_REG32(SH7750_SAR_REGOFS(n)) #define SH7750_SAR_A7(n) SH7750_A7_REG32(SH7750_SAR_REGOFS(n)) #define SH7750_SAR0 SH7750_SAR(0) @@ -1028,7 +1028,7 @@ #define SH7750_SAR3_A7 SH7750_SAR_A7(3) /* DMA Destination Address Register - DAR0, DAR1, DAR2, DAR3 */ -#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n)*16)) /* offset */ +#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n) * 16)) /* offset */ #define SH7750_DAR(n) SH7750_P4_REG32(SH7750_DAR_REGOFS(n)) #define SH7750_DAR_A7(n) SH7750_A7_REG32(SH7750_DAR_REGOFS(n)) #define SH7750_DAR0 SH7750_DAR(0) @@ -1041,7 +1041,7 @@ #define SH7750_DAR3_A7 SH7750_DAR_A7(3) /* DMA Transfer Count Register - DMATCR0, DMATCR1, DMATCR2, DMATCR3 */ -#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n)*16)) /* offset */ +#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n) * 16)) /* offset */ #define SH7750_DMATCR(n) SH7750_P4_REG32(SH7750_DMATCR_REGOFS(n)) #define SH7750_DMATCR_A7(n) SH7750_A7_REG32(SH7750_DMATCR_REGOFS(n)) #define SH7750_DMATCR0_P4 SH7750_DMATCR(0) @@ -1054,7 +1054,7 @@ #define SH7750_DMATCR3_A7 SH7750_DMATCR_A7(3) /* DMA Channel Control Register - CHCR0, CHCR1, CHCR2, CHCR3 */ -#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n)*16)) /* offset */ +#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n) * 16)) /* offset */ #define SH7750_CHCR(n) SH7750_P4_REG32(SH7750_CHCR_REGOFS(n)) #define SH7750_CHCR_A7(n) SH7750_A7_REG32(SH7750_CHCR_REGOFS(n)) #define SH7750_CHCR0 SH7750_CHCR(0) @@ -1066,227 +1066,227 @@ #define SH7750_CHCR2_A7 SH7750_CHCR_A7(2) #define SH7750_CHCR3_A7 SH7750_CHCR_A7(3) -#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ -#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ -#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ -#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ -#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ -#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ -#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ -#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ -#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ +#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ +#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ +#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ +#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ +#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ -#define SH7750_CHCR_STC 0x10000000 /* Source Address Wait Control Select, - specifies CS5 or CS6 space wait - control for PCMCIA access */ +#define SH7750_CHCR_STC 0x10000000 /* Source Addr Wait Control Select */ + /* specifies CS5 or CS6 space wait */ + /* control for PCMCIA access */ -#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ -#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ -#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ -#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ -#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ -#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ -#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ -#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ -#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ +#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ +#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ +#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ +#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ +#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ -#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control - Select, specifies CS5 or CS6 - space wait control for PCMCIA - access */ +#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control */ + /* Select, specifies CS5 or CS6 */ + /* space wait control for PCMCIA */ + /* access */ -#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ -#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ -#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ +#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ +#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ +#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ -#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ -#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ -#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ +#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ +#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ +#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ -#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ -#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ -#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle */ +#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ +#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ +#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle */ -#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ -#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ -#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ +#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ +#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ +#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ -#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ -#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ -#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ -#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ +#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ +#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ +#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ +#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ -#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ -#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ -#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ -#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ +#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ +#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ +#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ +#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ -#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ -#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Address - Mode (External Addr Space-> - External Addr Space) */ -#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single - Address Mode (External Addr - Space -> External Device) */ -#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single - Address Mode, (External - Device -> External Addr - Space) */ -#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr - Space -> External Addr Space) */ +#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ +#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Addr */ + /* Mode, External Addr Space */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single */ + /* Address Mode (Ext. Addr */ + /* Space -> External Device) */ +#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single */ + /* Address Mode, (External */ + /* Device -> External Addr */ + /* Space) */ +#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr */ + /* Space -> Ext. Addr Space) */ -#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr - Space -> On-chip Peripheral - Module) */ -#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip - Peripheral Module -> - External Addr Space */ -#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr - transfer request (external - address space -> SCTDR1) */ -#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr - transfer request (SCRDR1 -> - External Addr Space) */ -#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF Transmit-Data-Empty intr - transfer request (external - address space -> SCFTDR1) */ -#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr - transfer request (SCFRDR2 -> - External Addr Space) */ -#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture - interrupt), (external address - space -> external address - space) */ -#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture - interrupt), (external address - space -> on-chip peripheral - module) */ -#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture - interrupt), (on-chip - peripheral module -> external - address space) */ +#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr */ + /* Space -> On-chip */ + /* Peripheral Module) */ +#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip */ + /* Peripheral Module -> */ + /* External Addr Space */ +#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCTDR1) */ +#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr */ + /* transfer request (SCRDR1 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF TX-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCFTDR1) */ +#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr */ + /* transfer request (SCFRDR2 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> external */ + /* address space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> on-chip */ + /* peripheral module) */ +#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture */ + /* interrupt), (on-chip */ + /* peripheral module -> */ + /* external address space) */ -#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ -#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ -#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ +#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ +#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ +#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ -#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ -#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ -#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ -#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ -#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ -#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ +#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ +#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ +#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ +#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ +#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ +#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ -#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ -#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ -#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ +#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ +#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ +#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ /* DMA Operation Register - DMAOR */ -#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ +#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ #define SH7750_DMAOR SH7750_P4_REG32(SH7750_DMAOR_REGOFS) #define SH7750_DMAOR_A7 SH7750_A7_REG32(SH7750_DMAOR_REGOFS) -#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ +#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ -#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ -#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ -#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ -#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ -#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ +#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ +#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ +#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ +#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ +#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ -#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ -#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ -#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ -#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ +#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ +#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ +#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ +#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ /* * I/O Ports */ /* Port Control Register A - PCTRA */ -#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ +#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ #define SH7750_PCTRA SH7750_P4_REG32(SH7750_PCTRA_REGOFS) #define SH7750_PCTRA_A7 SH7750_A7_REG32(SH7750_PCTRA_REGOFS) -#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ -#define SH7750_PCTRA_PBNPUP(n) (1 << ((n)*2+1)) /* Bit n is not pulled up */ -#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ -#define SH7750_PCTRA_PBOUT(n) (1 << ((n)*2)) /* Bit n is an output */ +#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRA_PBNPUP(n) (1 << ((n) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRA_PBOUT(n) (1 << ((n) * 2)) /* Bit n is an output */ /* Port Data Register A - PDTRA(half) */ -#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ +#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ #define SH7750_PDTRA SH7750_P4_REG32(SH7750_PDTRA_REGOFS) #define SH7750_PDTRA_A7 SH7750_A7_REG32(SH7750_PDTRA_REGOFS) #define SH7750_PDTRA_BIT(n) (1 << (n)) /* Port Control Register B - PCTRB */ -#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ +#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ #define SH7750_PCTRB SH7750_P4_REG32(SH7750_PCTRB_REGOFS) #define SH7750_PCTRB_A7 SH7750_A7_REG32(SH7750_PCTRB_REGOFS) -#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ -#define SH7750_PCTRB_PBNPUP(n) (1 << ((n-16)*2+1)) /* Bit n is not pulled up */ -#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ -#define SH7750_PCTRB_PBOUT(n) (1 << ((n-16)*2)) /* Bit n is an output */ +#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRB_PBNPUP(n) (1 << ((n - 16) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRB_PBOUT(n) (1 << ((n - 16) * 2)) /* Bit n is an output */ /* Port Data Register B - PDTRB(half) */ -#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ +#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ #define SH7750_PDTRB SH7750_P4_REG32(SH7750_PDTRB_REGOFS) #define SH7750_PDTRB_A7 SH7750_A7_REG32(SH7750_PDTRB_REGOFS) -#define SH7750_PDTRB_BIT(n) (1 << ((n)-16)) +#define SH7750_PDTRB_BIT(n) (1 << ((n) - 16)) /* GPIO Interrupt Control Register - GPIOIC(half) */ -#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ +#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ #define SH7750_GPIOIC SH7750_P4_REG32(SH7750_GPIOIC_REGOFS) #define SH7750_GPIOIC_A7 SH7750_A7_REG32(SH7750_GPIOIC_REGOFS) -#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ +#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ /* * Interrupt Controller - INTC */ /* Interrupt Control Register - ICR (half) */ -#define SH7750_ICR_REGOFS 0xD00000 /* offset */ +#define SH7750_ICR_REGOFS 0xD00000 /* offset */ #define SH7750_ICR SH7750_P4_REG32(SH7750_ICR_REGOFS) #define SH7750_ICR_A7 SH7750_A7_REG32(SH7750_ICR_REGOFS) -#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ -#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ +#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ +#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ -#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ -#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while - SR.BL bit is set to 1 */ -#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL bit - set to 1 */ +#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ +#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while */ + /* SR.BL bit is set to 1 */ +#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL */ + /* bit set to 1 */ -#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ -#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on falling - edge of NMI input */ -#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on rising - edge of NMI input */ +#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ +#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on */ + /* falling edge of NMI input */ +#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on */ + /* rising edge of NMI input */ -#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ -#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded - interrupt requests */ -#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four independent - interrupt requests */ +#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ +#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded */ + /* interrupt requests */ +#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four */ + /* independent interrupt requests */ /* * User Break Controller registers */ -#define SH7750_BARA 0x200000 /* Break address regiser A */ -#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ -#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ -#define SH7750_BARB 0x20000c /* Break address regiser B */ -#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ -#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ -#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ -#define SH7750_BDRB 0x200018 /* Break data regiser B */ -#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ -#define SH7750_BRCR 0x200020 /* Break control register */ +#define SH7750_BARA 0x200000 /* Break address regiser A */ +#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ +#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ +#define SH7750_BARB 0x20000c /* Break address regiser B */ +#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ +#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ +#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ +#define SH7750_BDRB 0x200018 /* Break data regiser B */ +#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ +#define SH7750_BRCR 0x200020 /* Break control register */ -#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ +#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ /* * Missing in RTEMS, added for QEMU diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c index b0579aa0f1..aa812512f0 100644 --- a/hw/sh4/shix.c +++ b/hw/sh4/shix.c @@ -22,11 +22,11 @@ * THE SOFTWARE. */ /* - Shix 2.0 board by Alexis Polti, described at - https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 - - More information in target/sh4/README.sh4 -*/ + * Shix 2.0 board by Alexis Polti, described at + * https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 + * + * More information in target/sh4/README.sh4 + */ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" @@ -48,7 +48,7 @@ static void shix_init(MachineState *machine) MemoryRegion *rom = g_new(MemoryRegion, 1); MemoryRegion *sdram = g_new(MemoryRegion, 2); const char *bios_name = machine->firmware ?: BIOS_FILENAME; - + cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); /* Allocate memory space */ diff --git a/hw/sh4/trace-events b/hw/sh4/trace-events new file mode 100644 index 0000000000..4b61cd56c8 --- /dev/null +++ b/hw/sh4/trace-events @@ -0,0 +1,3 @@ +# sh7750.c +sh7750_porta(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "porta changed from 0x%04x to 0x%04x\npdtra=0x%04x, pctra=0x%08x" +sh7750_portb(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "portb changed from 0x%04x to 0x%04x\npdtrb=0x%04x, pctrb=0x%08x" diff --git a/hw/sh4/trace.h b/hw/sh4/trace.h new file mode 100644 index 0000000000..e2c13323b7 --- /dev/null +++ b/hw/sh4/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sh4.h" diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 7397e56737..66a020999b 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -62,7 +62,7 @@ uint8_t *smbios_tables; size_t smbios_tables_len; unsigned smbios_table_max; unsigned smbios_table_cnt; -static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_21; +static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32; static SmbiosEntryPoint ep; @@ -104,11 +104,20 @@ static struct { const char *sock_pfx, *manufacturer, *version, *serial, *asset, *part; uint64_t max_speed; uint64_t current_speed; + uint64_t processor_id; } type4 = { .max_speed = DEFAULT_CPU_SPEED, - .current_speed = DEFAULT_CPU_SPEED + .current_speed = DEFAULT_CPU_SPEED, + .processor_id = 0, }; +struct type8_instance { + const char *internal_reference, *external_reference; + uint8_t connector_type, port_type; + QTAILQ_ENTRY(type8_instance) next; +}; +static QTAILQ_HEAD(, type8_instance) type8 = QTAILQ_HEAD_INITIALIZER(type8); + static struct { size_t nvalues; char **values; @@ -327,10 +336,37 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = { .name = "part", .type = QEMU_OPT_STRING, .help = "part number", + }, { + .name = "processor-id", + .type = QEMU_OPT_NUMBER, + .help = "processor id", }, { /* end of list */ } }; +static const QemuOptDesc qemu_smbios_type8_opts[] = { + { + .name = "internal_reference", + .type = QEMU_OPT_STRING, + .help = "internal reference designator", + }, + { + .name = "external_reference", + .type = QEMU_OPT_STRING, + .help = "external reference designator", + }, + { + .name = "connector_type", + .type = QEMU_OPT_NUMBER, + .help = "connector type", + }, + { + .name = "port_type", + .type = QEMU_OPT_NUMBER, + .help = "port type", + }, +}; + static const QemuOptDesc qemu_smbios_type11_opts[] = { { .name = "value", @@ -432,7 +468,7 @@ static void smbios_validate_table(MachineState *ms) exit(1); } - if (smbios_ep_type == SMBIOS_ENTRY_POINT_21 && + if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_32 && smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) { error_report("SMBIOS 2.1 table length %zu exceeds %d", smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN); @@ -549,9 +585,23 @@ bool smbios_skip_table(uint8_t type, bool required_table) return true; } +#define T0_BASE 0x000 +#define T1_BASE 0x100 +#define T2_BASE 0x200 +#define T3_BASE 0x300 +#define T4_BASE 0x400 +#define T11_BASE 0xe00 + +#define T16_BASE 0x1000 +#define T17_BASE 0x1100 +#define T19_BASE 0x1300 +#define T32_BASE 0x2000 +#define T41_BASE 0x2900 +#define T127_BASE 0x7F00 + static void smbios_build_type_0_table(void) { - SMBIOS_BUILD_TABLE_PRE(0, 0x000, false); /* optional, leave up to BIOS */ + SMBIOS_BUILD_TABLE_PRE(0, T0_BASE, false); /* optional, leave up to BIOS */ SMBIOS_TABLE_SET_STR(0, vendor_str, type0.vendor); SMBIOS_TABLE_SET_STR(0, bios_version_str, type0.version); @@ -599,7 +649,7 @@ static void smbios_encode_uuid(struct smbios_uuid *uuid, QemuUUID *in) static void smbios_build_type_1_table(void) { - SMBIOS_BUILD_TABLE_PRE(1, 0x100, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(1, T1_BASE, true); /* required */ SMBIOS_TABLE_SET_STR(1, manufacturer_str, type1.manufacturer); SMBIOS_TABLE_SET_STR(1, product_name_str, type1.product); @@ -619,7 +669,7 @@ static void smbios_build_type_1_table(void) static void smbios_build_type_2_table(void) { - SMBIOS_BUILD_TABLE_PRE(2, 0x200, false); /* optional */ + SMBIOS_BUILD_TABLE_PRE(2, T2_BASE, false); /* optional */ SMBIOS_TABLE_SET_STR(2, manufacturer_str, type2.manufacturer); SMBIOS_TABLE_SET_STR(2, product_str, type2.product); @@ -637,7 +687,7 @@ static void smbios_build_type_2_table(void) static void smbios_build_type_3_table(void) { - SMBIOS_BUILD_TABLE_PRE(3, 0x300, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(3, T3_BASE, true); /* required */ SMBIOS_TABLE_SET_STR(3, manufacturer_str, type3.manufacturer); t->type = 0x01; /* Other */ @@ -661,16 +711,27 @@ static void smbios_build_type_3_table(void) static void smbios_build_type_4_table(MachineState *ms, unsigned instance) { char sock_str[128]; + size_t tbl_len = SMBIOS_TYPE_4_LEN_V28; - SMBIOS_BUILD_TABLE_PRE(4, 0x400 + instance, true); /* required */ + if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_64) { + tbl_len = SMBIOS_TYPE_4_LEN_V30; + } + + SMBIOS_BUILD_TABLE_PRE_SIZE(4, T4_BASE + instance, + true, tbl_len); /* required */ snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance); SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str); t->processor_type = 0x03; /* CPU */ t->processor_family = 0x01; /* Other */ SMBIOS_TABLE_SET_STR(4, processor_manufacturer_str, type4.manufacturer); - t->processor_id[0] = cpu_to_le32(smbios_cpuid_version); - t->processor_id[1] = cpu_to_le32(smbios_cpuid_features); + if (type4.processor_id == 0) { + t->processor_id[0] = cpu_to_le32(smbios_cpuid_version); + t->processor_id[1] = cpu_to_le32(smbios_cpuid_features); + } else { + t->processor_id[0] = cpu_to_le32((uint32_t)type4.processor_id); + t->processor_id[1] = cpu_to_le32(type4.processor_id >> 32); + } SMBIOS_TABLE_SET_STR(4, processor_version_str, type4.version); t->voltage = 0; t->external_clock = cpu_to_le16(0); /* Unknown */ @@ -684,15 +745,44 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) SMBIOS_TABLE_SET_STR(4, serial_number_str, type4.serial); SMBIOS_TABLE_SET_STR(4, asset_tag_number_str, type4.asset); SMBIOS_TABLE_SET_STR(4, part_number_str, type4.part); - t->core_count = t->core_enabled = ms->smp.cores; - t->thread_count = ms->smp.threads; + + t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores; + t->core_enabled = t->core_count; + + t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads; + t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ t->processor_family2 = cpu_to_le16(0x01); /* Other */ + if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { + t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); + t->thread_count2 = cpu_to_le16(ms->smp.threads); + } + SMBIOS_BUILD_TABLE_POST; smbios_type4_count++; } +static void smbios_build_type_8_table(void) +{ + unsigned instance = 0; + struct type8_instance *t8; + + QTAILQ_FOREACH(t8, &type8, next) { + SMBIOS_BUILD_TABLE_PRE(8, T0_BASE + instance, true); + + SMBIOS_TABLE_SET_STR(8, internal_reference_str, t8->internal_reference); + SMBIOS_TABLE_SET_STR(8, external_reference_str, t8->external_reference); + /* most vendors seem to set this to None */ + t->internal_connector_type = 0x0; + t->external_connector_type = t8->connector_type; + t->port_type = t8->port_type; + + SMBIOS_BUILD_TABLE_POST; + instance++; + } +} + static void smbios_build_type_11_table(void) { char count_str[128]; @@ -702,7 +792,7 @@ static void smbios_build_type_11_table(void) return; } - SMBIOS_BUILD_TABLE_PRE(11, 0xe00, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(11, T11_BASE, true); /* required */ snprintf(count_str, sizeof(count_str), "%zu", type11.nvalues); t->count = type11.nvalues; @@ -722,7 +812,7 @@ static void smbios_build_type_16_table(unsigned dimm_cnt) { uint64_t size_kb; - SMBIOS_BUILD_TABLE_PRE(16, 0x1000, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(16, T16_BASE, true); /* required */ t->location = 0x01; /* Other */ t->use = 0x03; /* System memory */ @@ -749,7 +839,7 @@ static void smbios_build_type_17_table(unsigned instance, uint64_t size) char loc_str[128]; uint64_t size_mb; - SMBIOS_BUILD_TABLE_PRE(17, 0x1100 + instance, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(17, T17_BASE + instance, true); /* required */ t->physical_memory_array_handle = cpu_to_le16(0x1000); /* Type 16 above */ t->memory_error_information_handle = cpu_to_le16(0xFFFE); /* Not provided */ @@ -785,12 +875,13 @@ static void smbios_build_type_17_table(unsigned instance, uint64_t size) SMBIOS_BUILD_TABLE_POST; } -static void smbios_build_type_19_table(unsigned instance, +static void smbios_build_type_19_table(unsigned instance, unsigned offset, uint64_t start, uint64_t size) { uint64_t end, start_kb, end_kb; - SMBIOS_BUILD_TABLE_PRE(19, 0x1300 + instance, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(19, T19_BASE + offset + instance, + true); /* required */ end = start + size - 1; assert(end > start); @@ -814,7 +905,7 @@ static void smbios_build_type_19_table(unsigned instance, static void smbios_build_type_32_table(void) { - SMBIOS_BUILD_TABLE_PRE(32, 0x2000, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(32, T32_BASE, true); /* required */ memset(t->reserved, 0, 6); t->boot_status = 0; /* No errors detected */ @@ -828,7 +919,7 @@ static void smbios_build_type_41_table(Error **errp) struct type41_instance *t41; QTAILQ_FOREACH(t41, &type41, next) { - SMBIOS_BUILD_TABLE_PRE(41, 0x2900 + instance, true); + SMBIOS_BUILD_TABLE_PRE(41, T41_BASE + instance, true); SMBIOS_TABLE_SET_STR(41, reference_designation_str, t41->designation); t->device_type = t41->kind; @@ -871,7 +962,7 @@ static void smbios_build_type_41_table(Error **errp) static void smbios_build_type_127_table(void) { - SMBIOS_BUILD_TABLE_PRE(127, 0x7F00, true); /* required */ + SMBIOS_BUILD_TABLE_PRE(127, T127_BASE, true); /* required */ SMBIOS_BUILD_TABLE_POST; } @@ -927,7 +1018,7 @@ void smbios_set_defaults(const char *manufacturer, const char *product, static void smbios_entry_point_setup(void) { switch (smbios_ep_type) { - case SMBIOS_ENTRY_POINT_21: + case SMBIOS_ENTRY_POINT_TYPE_32: memcpy(ep.ep21.anchor_string, "_SM_", 4); memcpy(ep.ep21.intermediate_anchor_string, "_DMI_", 5); ep.ep21.length = sizeof(struct smbios_21_entry_point); @@ -950,7 +1041,7 @@ static void smbios_entry_point_setup(void) ep.ep21.structure_table_address = cpu_to_le32(0); break; - case SMBIOS_ENTRY_POINT_30: + case SMBIOS_ENTRY_POINT_TYPE_64: memcpy(ep.ep30.anchor_string, "_SM3_", 5); ep.ep30.length = sizeof(struct smbios_30_entry_point); ep.ep30.entry_point_revision = 1; @@ -982,7 +1073,7 @@ void smbios_get_tables(MachineState *ms, uint8_t **anchor, size_t *anchor_len, Error **errp) { - unsigned i, dimm_cnt; + unsigned i, dimm_cnt, offset; if (smbios_legacy) { *tables = *anchor = NULL; @@ -1004,6 +1095,7 @@ void smbios_get_tables(MachineState *ms, smbios_build_type_4_table(ms, i); } + smbios_build_type_8_table(); smbios_build_type_11_table(); #define MAX_DIMM_SZ (16 * GiB) @@ -1012,6 +1104,16 @@ void smbios_get_tables(MachineState *ms, dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / MAX_DIMM_SZ; + /* + * The offset determines if we need to keep additional space betweeen + * table 17 and table 19 header handle numbers so that they do + * not overlap. For example, for a VM with larger than 8 TB guest + * memory and DIMM like chunks of 16 GiB, the default space between + * the two tables (T19_BASE - T17_BASE = 512) is not enough. + */ + offset = (dimm_cnt > (T19_BASE - T17_BASE)) ? \ + dimm_cnt - (T19_BASE - T17_BASE) : 0; + smbios_build_type_16_table(dimm_cnt); for (i = 0; i < dimm_cnt; i++) { @@ -1019,10 +1121,16 @@ void smbios_get_tables(MachineState *ms, } for (i = 0; i < mem_array_size; i++) { - smbios_build_type_19_table(i, mem_array[i].address, + smbios_build_type_19_table(i, offset, mem_array[i].address, mem_array[i].length); } + /* + * make sure 16 bit handle numbers in the headers of tables 19 + * and 32 do not overlap. + */ + assert((mem_array_size + offset) < (T32_BASE - T19_BASE)); + smbios_build_type_32_table(); smbios_build_type_38_table(); smbios_build_type_41_table(errp); @@ -1163,13 +1271,15 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) return; } - if (test_bit(header->type, have_fields_bitmap)) { - error_setg(errp, - "can't load type %d struct, fields already specified!", - header->type); - return; + if (header->type <= SMBIOS_MAX_TYPE) { + if (test_bit(header->type, have_fields_bitmap)) { + error_setg(errp, + "can't load type %d struct, fields already specified!", + header->type); + return; + } + set_bit(header->type, have_binfile_bitmap); } - set_bit(header->type, have_binfile_bitmap); if (header->type == 4) { smbios_type4_count++; @@ -1292,6 +1402,8 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) save_opt(&type4.serial, opts, "serial"); save_opt(&type4.asset, opts, "asset"); save_opt(&type4.part, opts, "part"); + /* If the value is 0, it will take the value from the CPU model. */ + type4.processor_id = qemu_opt_get_number(opts, "processor-id", 0); type4.max_speed = qemu_opt_get_number(opts, "max-speed", DEFAULT_CPU_SPEED); type4.current_speed = qemu_opt_get_number(opts, "current-speed", @@ -1302,6 +1414,18 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) UINT16_MAX); } return; + case 8: + if (!qemu_opts_validate(opts, qemu_smbios_type8_opts, errp)) { + return; + } + struct type8_instance *t; + t = g_new0(struct type8_instance, 1); + save_opt(&t->internal_reference, opts, "internal_reference"); + save_opt(&t->external_reference, opts, "external_reference"); + t->connector_type = qemu_opt_get_number(opts, "connector_type", 0); + t->port_type = qemu_opt_get_number(opts, "port_type", 0); + QTAILQ_INSERT_TAIL(&type8, t, next); + return; case 11: if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) { return; diff --git a/hw/smbios/smbios_build.h b/hw/smbios/smbios_build.h index 56b5a1e3f3..351660024e 100644 --- a/hw/smbios/smbios_build.h +++ b/hw/smbios/smbios_build.h @@ -27,6 +27,11 @@ extern unsigned smbios_table_max; extern unsigned smbios_table_cnt; #define SMBIOS_BUILD_TABLE_PRE(tbl_type, tbl_handle, tbl_required) \ + SMBIOS_BUILD_TABLE_PRE_SIZE(tbl_type, tbl_handle, tbl_required, \ + sizeof(struct smbios_type_##tbl_type))\ + +#define SMBIOS_BUILD_TABLE_PRE_SIZE(tbl_type, tbl_handle, \ + tbl_required, tbl_len) \ struct smbios_type_##tbl_type *t; \ size_t t_off; /* table offset into smbios_tables */ \ int str_index = 0; \ @@ -39,12 +44,12 @@ extern unsigned smbios_table_cnt; /* use offset of table t within smbios_tables */ \ /* (pointer must be updated after each realloc) */ \ t_off = smbios_tables_len; \ - smbios_tables_len += sizeof(*t); \ + smbios_tables_len += tbl_len; \ smbios_tables = g_realloc(smbios_tables, smbios_tables_len); \ t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \ \ t->header.type = tbl_type; \ - t->header.length = sizeof(*t); \ + t->header.length = tbl_len; \ t->header.handle = cpu_to_le16(tbl_handle); \ } while (0) diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c index 7b4dec1721..1e39d2e2d0 100644 --- a/hw/sparc/leon3.c +++ b/hw/sparc/leon3.c @@ -26,7 +26,6 @@ #include "qemu/units.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "hw/irq.h" @@ -241,7 +240,7 @@ static void leon3_generic_hw_init(MachineState *machine) cpu_sparc_set_id(env, 0); /* Reset data */ - reset_info = g_malloc0(sizeof(ResetData)); + reset_info = g_new0(ResetData, 1); reset_info->cpu = cpu; reset_info->sp = LEON3_RAM_OFFSET + ram_size; qemu_register_reset(main_cpu_reset, reset_info); diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 42e139849e..d9288326d6 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -26,7 +26,6 @@ #include "qemu/units.h" #include "qapi/error.h" #include "qemu/datadir.h" -#include "qemu-common.h" #include "cpu.h" #include "hw/sysbus.h" #include "qemu/error-report.h" @@ -803,11 +802,11 @@ static void cpu_devinit(const char *cpu_type, unsigned int id, cpu = SPARC_CPU(object_new(cpu_type)); env = &cpu->env; - cpu_sparc_set_id(env, id); qemu_register_reset(sun4m_cpu_reset, cpu); object_property_set_bool(OBJECT(cpu), "start-powered-off", id != 0, &error_fatal); qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); + cpu_sparc_set_id(env, id); *cpu_irqs = qemu_allocate_irqs(cpu_set_irq, cpu, MAX_PILS); env->prom_addr = prom_addr; } @@ -832,8 +831,7 @@ static void sun4m_hw_init(MachineState *machine) SysBusDevice *s; unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; - Object *ram_memdev = object_resolve_path_type(machine->ram_memdev_id, - TYPE_MEMORY_BACKEND, NULL); + HostMemoryBackend *ram_memdev = machine->memdev; NICInfo *nd = &nd_table[0]; if (machine->ram_size > hwdef->max_mem) { @@ -853,7 +851,7 @@ static void sun4m_hw_init(MachineState *machine) /* Create and map RAM frontend */ dev = qdev_new("memory"); - object_property_set_link(OBJECT(dev), "memdev", ram_memdev, &error_fatal); + object_property_set_link(OBJECT(dev), "memdev", OBJECT(ram_memdev), &error_fatal); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0); @@ -921,6 +919,7 @@ static void sun4m_hw_init(MachineState *machine) /* sbus irq 5 */ cg3_init(hwdef->tcx_base, slavio_irq[11], 0x00100000, graphic_width, graphic_height, graphic_depth); + vga_interface_created = true; } else { /* If no display specified, default to TCX */ if (graphic_depth != 8 && graphic_depth != 24) { @@ -936,6 +935,7 @@ static void sun4m_hw_init(MachineState *machine) tcx_init(hwdef->tcx_base, slavio_irq[11], 0x00100000, graphic_width, graphic_height, graphic_depth); + vga_interface_created = true; } } @@ -1049,7 +1049,7 @@ static void sun4m_hw_init(MachineState *machine) machine->ram_size, &initrd_size); nvram_init(nvram, (uint8_t *)&nd->macaddr, machine->kernel_cmdline, - machine->boot_order, machine->ram_size, kernel_size, + machine->boot_config.order, machine->ram_size, kernel_size, graphic_width, graphic_height, graphic_depth, hwdef->nvram_machine_id, "Sun4m"); @@ -1090,7 +1090,7 @@ static void sun4m_hw_init(MachineState *machine) } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, INITRD_LOAD_ADDR); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); - fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_config.order[0]); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } diff --git a/hw/sparc64/niagara.c b/hw/sparc64/niagara.c index f3e42d0326..ccad2c43a3 100644 --- a/hw/sparc64/niagara.c +++ b/hw/sparc64/niagara.c @@ -98,7 +98,7 @@ static void add_rom_or_fail(const char *file, const hwaddr addr) static void niagara_init(MachineState *machine) { NiagaraBoardState *s = g_new(NiagaraBoardState, 1); - DriveInfo *dinfo = drive_get_next(IF_PFLASH); + DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); MemoryRegion *sysmem = get_system_memory(); /* init CPUs */ diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c index 8654e955eb..72f0849f50 100644 --- a/hw/sparc64/sparc64.c +++ b/hw/sparc64/sparc64.c @@ -81,7 +81,7 @@ static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu, QEMUBHFunc *cb, uint32_t frequency, uint64_t disabled_mask, uint64_t npt_mask) { - CPUTimer *timer = g_malloc0(sizeof(CPUTimer)); + CPUTimer *timer = g_new0(CPUTimer, 1); timer->name = name; timer->frequency = frequency; @@ -288,7 +288,7 @@ SPARCCPU *sparc64_cpu_devinit(const char *cpu_type, uint64_t prom_addr) hstick_frequency, TICK_INT_DIS, TICK_NPT_MASK); - reset_info = g_malloc0(sizeof(ResetData)); + reset_info = g_new0(ResetData, 1); reset_info->cpu = cpu; reset_info->prom_addr = prom_addr; qemu_register_reset(main_cpu_reset, reset_info); diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index cda7df36e3..387181ff77 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -26,7 +26,6 @@ #include "qemu/units.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "cpu.h" #include "hw/pci/pci.h" @@ -67,7 +66,6 @@ #define PBM_PCI_IO_BASE (PBM_SPECIAL_BASE + 0x02000000ULL) #define PROM_FILENAME "openbios-sparc64" #define NVRAM_SIZE 0x2000 -#define MAX_IDE_BUS 2 #define BIOS_CFG_IOPORT 0x510 #define FW_CFG_SPARC64_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) #define FW_CFG_SPARC64_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) @@ -335,7 +333,7 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp) parallel_hds_isa_init(s->isa_bus, MAX_PARALLEL_PORTS); /* Keyboard */ - isa_create_simple(s->isa_bus, "i8042"); + isa_create_simple(s->isa_bus, TYPE_I8042); /* Floppy */ for (i = 0; i < MAX_FD; i++) { @@ -633,6 +631,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem, switch (vga_interface_type) { case VGA_STD: pci_create_simple(pci_busA, PCI_DEVFN(2, 0), "VGA"); + vga_interface_created = true; break; case VGA_NONE: break; @@ -695,7 +694,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", machine->ram_size, - machine->boot_order, + machine->boot_config.order, kernel_addr, kernel_size, machine->kernel_cmdline, initrd_addr, initrd_size, @@ -727,7 +726,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem, } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); - fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_config.order[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); diff --git a/hw/sparc64/sun4u_iommu.c b/hw/sparc64/sun4u_iommu.c index 9178277f82..1c1dca712e 100644 --- a/hw/sparc64/sun4u_iommu.c +++ b/hw/sparc64/sun4u_iommu.c @@ -165,7 +165,7 @@ static IOMMUTLBEntry sun4u_translate_iommu(IOMMUMemoryRegion *iommu, } if (tte & IOMMU_TTE_DATA_W) { - /* Writeable */ + /* Writable */ ret.perm = IOMMU_RW; } else { ret.perm = IOMMU_RO; diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 331a2c5446..22df4be528 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -124,6 +124,13 @@ /* SPI dummy cycle data */ #define R_DUMMY_DATA (0x54 / 4) +/* FMC_WDT2 Control/Status Register for Alternate Boot (AST2600) */ +#define R_FMC_WDT2_CTRL (0x64 / 4) +#define FMC_WDT2_CTRL_ALT_BOOT_MODE BIT(6) /* O: 2 chips 1: 1 chip */ +#define FMC_WDT2_CTRL_SINGLE_BOOT_MODE BIT(5) +#define FMC_WDT2_CTRL_BOOT_SOURCE BIT(4) /* O: primary 1: alternate */ +#define FMC_WDT2_CTRL_EN BIT(0) + /* DMA Control/Status Register */ #define R_DMA_CTRL (0x80 / 4) #define DMA_CTRL_REQUEST (1 << 31) @@ -162,11 +169,6 @@ #define ASPEED_SMC_R_SPI_MAX (0x20 / 4) #define ASPEED_SMC_R_SMC_MAX (0x20 / 4) -#define ASPEED_SOC_SMC_FLASH_BASE 0x10000000 -#define ASPEED_SOC_FMC_FLASH_BASE 0x20000000 -#define ASPEED_SOC_SPI_FLASH_BASE 0x30000000 -#define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000 - /* * DMA DRAM addresses should be 4 bytes aligned and the valid address * range is 0x40000000 - 0x5FFFFFFF (AST2400) @@ -179,8 +181,8 @@ * 0: 4 bytes * 0x7FFFFF: 32M bytes */ -#define DMA_DRAM_ADDR(s, val) ((val) & (s)->ctrl->dma_dram_mask) -#define DMA_FLASH_ADDR(s, val) ((val) & (s)->ctrl->dma_flash_mask) +#define DMA_DRAM_ADDR(asc, val) ((val) & (asc)->dma_dram_mask) +#define DMA_FLASH_ADDR(asc, val) ((val) & (asc)->dma_flash_mask) #define DMA_LENGTH(val) ((val) & 0x01FFFFFC) /* Flash opcodes. */ @@ -194,332 +196,48 @@ * controller. These can be changed when board is initialized with the * Segment Address Registers. */ -static const AspeedSegments aspeed_segments_legacy[] = { - { 0x10000000, 32 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_fmc[] = { - { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */ - { 0x24000000, 32 * 1024 * 1024 }, - { 0x26000000, 32 * 1024 * 1024 }, - { 0x28000000, 32 * 1024 * 1024 }, - { 0x2A000000, 32 * 1024 * 1024 } -}; - -static const AspeedSegments aspeed_segments_spi[] = { - { 0x30000000, 64 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_ast2500_fmc[] = { - { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */ - { 0x28000000, 32 * 1024 * 1024 }, - { 0x2A000000, 32 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_ast2500_spi1[] = { - { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */ - { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */ -}; - -static const AspeedSegments aspeed_segments_ast2500_spi2[] = { - { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */ - { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */ -}; -static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg); -static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, uint32_t reg, - AspeedSegments *seg); -static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); - -/* - * AST2600 definitions - */ -#define ASPEED26_SOC_FMC_FLASH_BASE 0x20000000 -#define ASPEED26_SOC_SPI_FLASH_BASE 0x30000000 -#define ASPEED26_SOC_SPI2_FLASH_BASE 0x50000000 - -static const AspeedSegments aspeed_segments_ast2600_fmc[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ - { 0x0, 0 }, /* disabled */ -}; - -static const AspeedSegments aspeed_segments_ast2600_spi1[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 0x0, 0 }, /* disabled */ -}; - -static const AspeedSegments aspeed_segments_ast2600_spi2[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 0x0, 0 }, /* disabled */ - { 0x0, 0 }, /* disabled */ -}; - -static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg); -static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg); -static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); +static const AspeedSegments aspeed_2500_spi1_segments[]; +static const AspeedSegments aspeed_2500_spi2_segments[]; #define ASPEED_SMC_FEATURE_DMA 0x1 #define ASPEED_SMC_FEATURE_DMA_GRANT 0x2 +#define ASPEED_SMC_FEATURE_WDT_CONTROL 0x4 -static inline bool aspeed_smc_has_dma(const AspeedSMCState *s) +static inline bool aspeed_smc_has_dma(const AspeedSMCClass *asc) { - return !!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA); + return !!(asc->features & ASPEED_SMC_FEATURE_DMA); } -static const AspeedSMCController controllers[] = { - { - .name = "aspeed.smc-ast2400", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 1, - .segments = aspeed_segments_legacy, - .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE, - .flash_window_size = 0x6000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_SMC_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2400", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 5, - .segments = aspeed_segments_fmc, - .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x1FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2400", - .r_conf = R_SPI_CONF, - .r_ce_ctrl = 0xff, - .r_ctrl0 = R_SPI_CTRL0, - .r_timings = R_SPI_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = SPI_CONF_ENABLE_W0, - .max_peripherals = 1, - .segments = aspeed_segments_spi, - .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_SPI_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2500_fmc, - .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2500_spi1, - .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x8000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi2-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2500_spi2, - .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE, - .flash_window_size = 0x8000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2600_fmc, - .flash_window_base = ASPEED26_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 2, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2600_spi1, - .flash_window_base = ASPEED26_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA | - ASPEED_SMC_FEATURE_DMA_GRANT, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, { - .name = "aspeed.spi2-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 3, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2600_spi2, - .flash_window_base = ASPEED26_SOC_SPI2_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA | - ASPEED_SMC_FEATURE_DMA_GRANT, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, -}; - -/* - * The Segment Registers of the AST2400 and AST2500 have a 8MB - * unit. The address range of a flash SPI peripheral is encoded with - * absolute addresses which should be part of the overall controller - * window. - */ -static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg) +static inline bool aspeed_smc_has_wdt_control(const AspeedSMCClass *asc) { - uint32_t reg = 0; - reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; - reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; - return reg; + return !!(asc->features & ASPEED_SMC_FEATURE_WDT_CONTROL); } -static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg) -{ - seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; - seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; -} - -/* - * The Segment Registers of the AST2600 have a 1MB unit. The address - * range of a flash SPI peripheral is encoded with offsets in the overall - * controller window. The previous SoC AST2400 and AST2500 used - * absolute addresses. Only bits [27:20] are relevant and the end - * address is an upper bound limit. - */ -#define AST2600_SEG_ADDR_MASK 0x0ff00000 - -static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg) -{ - uint32_t reg = 0; - - /* Disabled segments have a nil register */ - if (!seg->size) { - return 0; - } - - reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ - reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ - return reg; -} - -static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg) -{ - uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; - uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; - - if (reg) { - seg->addr = s->ctrl->flash_window_base + start_offset; - seg->size = end_offset + MiB - start_offset; - } else { - seg->addr = s->ctrl->flash_window_base; - seg->size = 0; - } -} +#define aspeed_smc_error(fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "%s: " fmt "\n", __func__, ## __VA_ARGS__) static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, const AspeedSegments *new, int cs) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSegments seg; int i; - for (i = 0; i < s->ctrl->max_peripherals; i++) { + for (i = 0; i < asc->cs_num_max; i++) { if (i == cs) { continue; } - s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); if (new->addr + new->size > seg.addr && new->addr < seg.addr + seg.size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%" - HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " - "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, new->addr, new->addr + new->size, - i, seg.addr, seg.addr + seg.size); + aspeed_smc_error("new segment CS%d [ 0x%" + HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " + "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, new->addr, new->addr + new->size, + i, seg.addr, seg.addr + seg.size); return true; } } @@ -529,70 +247,74 @@ static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, uint64_t regval) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSMCFlash *fl = &s->flashes[cs]; AspeedSegments seg; - s->ctrl->reg_to_segment(s, regval, &seg); + asc->reg_to_segment(s, regval, &seg); memory_region_transaction_begin(); memory_region_set_size(&fl->mmio, seg.size); - memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base); + memory_region_set_address(&fl->mmio, seg.addr - asc->flash_window_base); memory_region_set_enabled(&fl->mmio, !!seg.size); memory_region_transaction_commit(); + if (asc->segment_addr_mask) { + regval &= asc->segment_addr_mask; + } + s->regs[R_SEG_ADDR0 + cs] = regval; } static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, uint64_t new) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSegments seg; - s->ctrl->reg_to_segment(s, new, &seg); + asc->reg_to_segment(s, new, &seg); trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); /* The start address of CS0 is read-only */ - if (cs == 0 && seg.addr != s->ctrl->flash_window_base) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Tried to change CS0 start address to 0x%" - HWADDR_PRIx "\n", s->ctrl->name, seg.addr); - seg.addr = s->ctrl->flash_window_base; - new = s->ctrl->segment_to_reg(s, &seg); + if (cs == 0 && seg.addr != asc->flash_window_base) { + aspeed_smc_error("Tried to change CS0 start address to 0x%" + HWADDR_PRIx, seg.addr); + seg.addr = asc->flash_window_base; + new = asc->segment_to_reg(s, &seg); } /* * The end address of the AST2500 spi controllers is also * read-only. */ - if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 || - s->ctrl->segments == aspeed_segments_ast2500_spi2) && - cs == s->ctrl->max_peripherals && - seg.addr + seg.size != s->ctrl->segments[cs].addr + - s->ctrl->segments[cs].size) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Tried to change CS%d end address to 0x%" - HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size); - seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size - + if ((asc->segments == aspeed_2500_spi1_segments || + asc->segments == aspeed_2500_spi2_segments) && + cs == asc->cs_num_max && + seg.addr + seg.size != asc->segments[cs].addr + + asc->segments[cs].size) { + aspeed_smc_error("Tried to change CS%d end address to 0x%" + HWADDR_PRIx, cs, seg.addr + seg.size); + seg.size = asc->segments[cs].addr + asc->segments[cs].size - seg.addr; - new = s->ctrl->segment_to_reg(s, &seg); + new = asc->segment_to_reg(s, &seg); } /* Keep the segment in the overall flash window */ if (seg.size && - (seg.addr + seg.size <= s->ctrl->flash_window_base || - seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : " - "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, seg.addr, seg.addr + seg.size); + (seg.addr + seg.size <= asc->flash_window_base || + seg.addr > asc->flash_window_base + asc->flash_window_size)) { + aspeed_smc_error("new segment for CS%d is invalid : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); return; } /* Check start address vs. alignment */ if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not " - "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, seg.addr, seg.addr + seg.size); + aspeed_smc_error("new segment for CS%d is not " + "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); } /* And segments should not overlap (in the specs) */ @@ -605,16 +327,15 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, unsigned size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" - PRIx64 "\n", __func__, addr, size); + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u", addr, size); return 0; } static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" - PRIx64 "\n", __func__, addr, size, data); + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u: 0x%" PRIx64, + addr, size, data); } static const MemoryRegionOps aspeed_smc_flash_default_ops = { @@ -631,20 +352,20 @@ static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK; + return s->regs[s->r_ctrl0 + fl->cs] & CTRL_CMD_MODE_MASK; } static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id)); + return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->cs)); } static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; + int cmd = (s->regs[s->r_ctrl0 + fl->cs] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; /* * In read mode, the default SPI command is READ (0x3). In other @@ -657,21 +378,22 @@ static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) } if (!cmd) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("no command defined for mode %d", + aspeed_smc_flash_mode(fl)); } return cmd; } -static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl) +static inline int aspeed_smc_flash_addr_width(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = fl->asc; - if (s->ctrl->segments == aspeed_segments_spi) { - return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE; + if (asc->addr_width) { + return asc->addr_width(s); } else { - return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id)); + return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->cs)) ? 4 : 3; } } @@ -679,9 +401,9 @@ static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) { AspeedSMCState *s = fl->controller; - trace_aspeed_smc_flash_select(fl->id, unselect ? "un" : ""); + trace_aspeed_smc_flash_select(fl->cs, unselect ? "un" : ""); - qemu_set_irq(s->cs_lines[fl->id], unselect); + qemu_set_irq(s->cs_lines[fl->cs], unselect); } static void aspeed_smc_flash_select(AspeedSMCFlash *fl) @@ -698,15 +420,14 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = fl->asc; AspeedSegments seg; - s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->id], &seg); + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->cs], &seg); if ((addr % seg.size) != addr) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid address 0x%08x for CS%d segment : " - "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, addr, fl->id, seg.addr, - seg.addr + seg.size); + aspeed_smc_error("invalid address 0x%08x for CS%d segment : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + addr, fl->cs, seg.addr, seg.addr + seg.size); addr %= seg.size; } @@ -716,7 +437,7 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; + uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->cs]; uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; @@ -732,7 +453,7 @@ static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; uint8_t cmd = aspeed_smc_flash_cmd(fl); - int i = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; + int i = aspeed_smc_flash_addr_width(fl); /* Flash access can not exceed CS segment */ addr = aspeed_smc_check_segment_addr(fl, addr); @@ -767,7 +488,7 @@ static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) switch (aspeed_smc_flash_mode(fl)) { case CTRL_USERMODE: for (i = 0; i < size; i++) { - ret |= ssi_transfer(s->spi, 0x0) << (8 * i); + ret |= (uint64_t) ssi_transfer(s->spi, 0x0) << (8 * i); } break; case CTRL_READMODE: @@ -776,17 +497,16 @@ static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) aspeed_smc_flash_setup(fl, addr); for (i = 0; i < size; i++) { - ret |= ssi_transfer(s->spi, 0x0) << (8 * i); + ret |= (uint64_t) ssi_transfer(s->spi, 0x0) << (8 * i); } aspeed_smc_flash_unselect(fl); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); } - trace_aspeed_smc_flash_read(fl->id, addr, size, ret, + trace_aspeed_smc_flash_read(fl->cs, addr, size, ret, aspeed_smc_flash_mode(fl)); return ret; } @@ -841,9 +561,9 @@ static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, unsigned size) { AspeedSMCState *s = fl->controller; - uint8_t addr_width = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; + uint8_t addr_width = aspeed_smc_flash_addr_width(fl); - trace_aspeed_smc_do_snoop(fl->id, s->snoop_index, s->snoop_dummies, + trace_aspeed_smc_do_snoop(fl->cs, s->snoop_index, s->snoop_dummies, (uint8_t) data & 0xff); if (s->snoop_index == SNOOP_OFF) { @@ -896,12 +616,11 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, AspeedSMCState *s = fl->controller; int i; - trace_aspeed_smc_flash_write(fl->id, addr, size, data, + trace_aspeed_smc_flash_write(fl->cs, addr, size, data, aspeed_smc_flash_mode(fl)); if (!aspeed_smc_is_writable(fl)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" - HWADDR_PRIx "\n", __func__, addr); + aspeed_smc_error("flash is not writable at 0x%" HWADDR_PRIx, addr); return; } @@ -926,8 +645,7 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, aspeed_smc_flash_unselect(fl); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); } } @@ -950,12 +668,12 @@ static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ - if (!(s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE) && + if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) && value & CTRL_CE_STOP_ACTIVE) { unselect = true; } - s->regs[s->r_ctrl0 + fl->id] = value; + s->regs[s->r_ctrl0 + fl->cs] = value; s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; @@ -965,41 +683,25 @@ static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) static void aspeed_smc_reset(DeviceState *d) { AspeedSMCState *s = ASPEED_SMC(d); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); int i; - memset(s->regs, 0, sizeof s->regs); + if (asc->resets) { + memcpy(s->regs, asc->resets, sizeof s->regs); + } else { + memset(s->regs, 0, sizeof s->regs); + } /* Unselect all peripherals */ - for (i = 0; i < s->num_cs; ++i) { + for (i = 0; i < asc->cs_num_max; ++i) { s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; qemu_set_irq(s->cs_lines[i], true); } /* setup the default segment register values and regions for all */ - for (i = 0; i < s->ctrl->max_peripherals; ++i) { + for (i = 0; i < asc->cs_num_max; ++i) { aspeed_smc_flash_set_segment_region(s, i, - s->ctrl->segment_to_reg(s, &s->ctrl->segments[i])); - } - - /* HW strapping flash type for the AST2600 controllers */ - if (s->ctrl->segments == aspeed_segments_ast2600_fmc) { - /* flash type is fixed to SPI for all */ - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2); - } - - /* HW strapping flash type for FMC controllers */ - if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { - /* flash type is fixed to SPI for CE0 and CE1 */ - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); - } - - /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the - * configuration of the palmetto-bmc machine */ - if (s->ctrl->segments == aspeed_segments_fmc) { - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); + asc->segment_to_reg(s, &asc->segments[i])); } s->snoop_index = SNOOP_OFF; @@ -1009,26 +711,28 @@ static void aspeed_smc_reset(DeviceState *d) static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) { AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(opaque); addr >>= 2; if (addr == s->r_conf || (addr >= s->r_timings && - addr < s->r_timings + s->ctrl->nregs_timings) || + addr < s->r_timings + asc->nregs_timings) || addr == s->r_ce_ctrl || addr == R_CE_CMD_CTRL || addr == R_INTR_CTRL || addr == R_DUMMY_DATA || - (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) || - (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR) || - (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR) || - (aspeed_smc_has_dma(s) && addr == R_DMA_LEN) || - (aspeed_smc_has_dma(s) && addr == R_DMA_CHECKSUM) || + (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CHECKSUM) || (addr >= R_SEG_ADDR0 && - addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) || - (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->ctrl->max_peripherals)) { + addr < R_SEG_ADDR0 + asc->cs_num_max) || + (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + asc->cs_num_max)) { - trace_aspeed_smc_read(addr, size, s->regs[addr]); + trace_aspeed_smc_read(addr << 2, size, s->regs[addr]); return s->regs[addr]; } else { @@ -1052,7 +756,7 @@ static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) } } - qemu_log_mask(LOG_GUEST_ERROR, "invalid HCLK mask %x", hclk_mask); + aspeed_smc_error("invalid HCLK mask %x", hclk_mask); return 0; } @@ -1132,8 +836,7 @@ static void aspeed_smc_dma_checksum(AspeedSMCState *s) uint32_t data; if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid direction for DMA checksum\n", __func__); + aspeed_smc_error("invalid direction for DMA checksum"); return; } @@ -1145,8 +848,8 @@ static void aspeed_smc_dma_checksum(AspeedSMCState *s) data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); @@ -1181,32 +884,32 @@ static void aspeed_smc_dma_rw(AspeedSMCState *s) data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", - __func__, s->regs[R_DMA_DRAM_ADDR]); + aspeed_smc_error("DRAM read failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); return; } address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], data, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash write failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } } else { data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], data, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", - __func__, s->regs[R_DMA_DRAM_ADDR]); + aspeed_smc_error("DRAM write failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); return; } } @@ -1266,7 +969,7 @@ static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) } if (aspeed_smc_dma_in_progress(s)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA in progress\n", __func__); + aspeed_smc_error("DMA in progress !"); return; } @@ -1283,12 +986,14 @@ static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) static inline bool aspeed_smc_dma_granted(AspeedSMCState *s) { - if (!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (!(asc->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { return true; } if (!(s->regs[R_DMA_CTRL] & DMA_CTRL_GRANT)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); + aspeed_smc_error("DMA not granted"); return false; } @@ -1313,7 +1018,7 @@ static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) } if (!aspeed_smc_dma_granted(s)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); + aspeed_smc_error("DMA not granted"); return; } @@ -1325,22 +1030,23 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); uint32_t value = data; - addr >>= 2; - trace_aspeed_smc_write(addr, size, data); + addr >>= 2; + if (addr == s->r_conf || (addr >= s->r_timings && - addr < s->r_timings + s->ctrl->nregs_timings) || + addr < s->r_timings + asc->nregs_timings) || addr == s->r_ce_ctrl) { s->regs[addr] = value; - } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { + } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + asc->cs_num_max) { int cs = addr - s->r_ctrl0; aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); } else if (addr >= R_SEG_ADDR0 && - addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) { + addr < R_SEG_ADDR0 + asc->cs_num_max) { int cs = addr - R_SEG_ADDR0; if (value != s->regs[R_SEG_ADDR0 + cs]) { @@ -1350,17 +1056,19 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, s->regs[addr] = value & 0xff; } else if (addr == R_DUMMY_DATA) { s->regs[addr] = value & 0xff; + } else if (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) { + s->regs[addr] = value & FMC_WDT2_CTRL_EN; } else if (addr == R_INTR_CTRL) { s->regs[addr] = value; - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) { - s->ctrl->dma_ctrl(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR && + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) { + asc->dma_ctrl(s, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR && aspeed_smc_dma_granted(s)) { - s->regs[addr] = DMA_DRAM_ADDR(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR && + s->regs[addr] = DMA_DRAM_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR && aspeed_smc_dma_granted(s)) { - s->regs[addr] = DMA_FLASH_ADDR(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_LEN && + s->regs[addr] = DMA_FLASH_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN && aspeed_smc_dma_granted(s)) { s->regs[addr] = DMA_LENGTH(value); } else { @@ -1376,67 +1084,64 @@ static const MemoryRegionOps aspeed_smc_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static void aspeed_smc_instance_init(Object *obj) +{ + AspeedSMCState *s = ASPEED_SMC(obj); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + int i; + + for (i = 0; i < asc->cs_num_max; i++) { + object_initialize_child(obj, "flash[*]", &s->flashes[i], + TYPE_ASPEED_SMC_FLASH); + } +} + /* * Initialize the custom address spaces for DMAs */ static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) { - char *name; - if (!s->dram_mr) { error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); return; } - name = g_strdup_printf("%s-dma-flash", s->ctrl->name); - address_space_init(&s->flash_as, &s->mmio_flash, name); - g_free(name); - - name = g_strdup_printf("%s-dma-dram", s->ctrl->name); - address_space_init(&s->dram_as, s->dram_mr, name); - g_free(name); + address_space_init(&s->flash_as, &s->mmio_flash, + TYPE_ASPEED_SMC ".dma-flash"); + address_space_init(&s->dram_as, s->dram_mr, + TYPE_ASPEED_SMC ".dma-dram"); } static void aspeed_smc_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedSMCState *s = ASPEED_SMC(dev); - AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); int i; - char name[32]; hwaddr offset = 0; - s->ctrl = mc->ctrl; - /* keep a copy under AspeedSMCState to speed up accesses */ - s->r_conf = s->ctrl->r_conf; - s->r_ce_ctrl = s->ctrl->r_ce_ctrl; - s->r_ctrl0 = s->ctrl->r_ctrl0; - s->r_timings = s->ctrl->r_timings; - s->conf_enable_w0 = s->ctrl->conf_enable_w0; - - /* Enforce some real HW limits */ - if (s->num_cs > s->ctrl->max_peripherals) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", - __func__, s->ctrl->max_peripherals); - s->num_cs = s->ctrl->max_peripherals; - } + s->r_conf = asc->r_conf; + s->r_ce_ctrl = asc->r_ce_ctrl; + s->r_ctrl0 = asc->r_ctrl0; + s->r_timings = asc->r_timings; + s->conf_enable_w0 = asc->conf_enable_w0; /* DMA irq. Keep it first for the initialization in the SoC */ sysbus_init_irq(sbd, &s->irq); - s->spi = ssi_create_bus(dev, "spi"); + s->spi = ssi_create_bus(dev, NULL); /* Setup cs_lines for peripherals */ - s->cs_lines = g_new0(qemu_irq, s->num_cs); + s->cs_lines = g_new0(qemu_irq, asc->cs_num_max); - for (i = 0; i < s->num_cs; ++i) { + for (i = 0; i < asc->cs_num_max; ++i) { sysbus_init_irq(sbd, &s->cs_lines[i]); } /* The memory region for the controller registers */ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, - s->ctrl->name, s->ctrl->nregs * 4); + TYPE_ASPEED_SMC, asc->nregs * 4); sysbus_init_mmio(sbd, &s->mmio); /* @@ -1444,16 +1149,17 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) * window in which the flash modules are mapped. The size and * address depends on the SoC model and controller type. */ - snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); + memory_region_init(&s->mmio_flash_container, OBJECT(s), + TYPE_ASPEED_SMC ".container", + asc->flash_window_size); + sysbus_init_mmio(sbd, &s->mmio_flash_container); memory_region_init_io(&s->mmio_flash, OBJECT(s), - &aspeed_smc_flash_default_ops, s, name, - s->ctrl->flash_window_size); - memory_region_init_alias(&s->mmio_flash_alias, OBJECT(s), name, - &s->mmio_flash, 0, s->ctrl->flash_window_size); - sysbus_init_mmio(sbd, &s->mmio_flash_alias); - - s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_peripherals); + &aspeed_smc_flash_default_ops, s, + TYPE_ASPEED_SMC ".flash", + asc->flash_window_size); + memory_region_add_subregion(&s->mmio_flash_container, 0x0, + &s->mmio_flash); /* * Let's create a sub memory region for each possible peripheral. All @@ -1462,22 +1168,26 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) * module behind to handle the memory accesses. This depends on * the board configuration. */ - for (i = 0; i < s->ctrl->max_peripherals; ++i) { + for (i = 0; i < asc->cs_num_max; ++i) { AspeedSMCFlash *fl = &s->flashes[i]; - snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); + if (!object_property_set_link(OBJECT(fl), "controller", OBJECT(s), + errp)) { + return; + } + if (!object_property_set_uint(OBJECT(fl), "cs", i, errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(fl), errp)) { + return; + } - fl->id = i; - fl->controller = s; - fl->size = s->ctrl->segments[i].size; - memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, - fl, name, fl->size); memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); - offset += fl->size; + offset += asc->segments[i].size; } /* DMA support */ - if (aspeed_smc_has_dma(s)) { + if (aspeed_smc_has_dma(asc)) { aspeed_smc_dma_setup(s, errp); } } @@ -1495,7 +1205,6 @@ static const VMStateDescription vmstate_aspeed_smc = { }; static Property aspeed_smc_properties[] = { - DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), @@ -1505,37 +1214,657 @@ static Property aspeed_smc_properties[] = { static void aspeed_smc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); dc->realize = aspeed_smc_realize; dc->reset = aspeed_smc_reset; device_class_set_props(dc, aspeed_smc_properties); dc->vmsd = &vmstate_aspeed_smc; - mc->ctrl = data; } static const TypeInfo aspeed_smc_info = { .name = TYPE_ASPEED_SMC, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_smc_instance_init, .instance_size = sizeof(AspeedSMCState), .class_size = sizeof(AspeedSMCClass), + .class_init = aspeed_smc_class_init, .abstract = true, }; +static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) +{ + AspeedSMCFlash *s = ASPEED_SMC_FLASH(dev); + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_SMC_FLASH ".%d", s->cs); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_SMC_FLASH ": 'controller' link not set"); + return; + } + + s->asc = ASPEED_SMC_GET_CLASS(s->controller); + + /* + * Use the default segment value to size the memory region. This + * can be changed by FW at runtime. + */ + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_flash_ops, + s, name, s->asc->segments[s->cs].size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); +} + +static Property aspeed_smc_flash_properties[] = { + DEFINE_PROP_UINT8("cs", AspeedSMCFlash, cs, 0), + DEFINE_PROP_LINK("controller", AspeedSMCFlash, controller, TYPE_ASPEED_SMC, + AspeedSMCState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_smc_flash_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed SMC Flash device region"; + dc->realize = aspeed_smc_flash_realize; + device_class_set_props(dc, aspeed_smc_flash_properties); +} + +static const TypeInfo aspeed_smc_flash_info = { + .name = TYPE_ASPEED_SMC_FLASH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSMCFlash), + .class_init = aspeed_smc_flash_class_init, +}; + +/* + * The Segment Registers of the AST2400 and AST2500 have a 8MB + * unit. The address range of a flash SPI peripheral is encoded with + * absolute addresses which should be part of the overall controller + * window. + */ +static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; + reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; + return reg; +} + +static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; + seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; +} + +static const AspeedSegments aspeed_2400_smc_segments[] = { + { 0x10000000, 32 * MiB }, +}; + +static void aspeed_2400_smc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 1; + asc->segments = aspeed_2400_smc_segments; + asc->flash_window_base = 0x10000000; + asc->flash_window_size = 0x6000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SMC_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_smc_info = { + .name = "aspeed.smc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_smc_class_init, +}; + +static const uint32_t aspeed_2400_fmc_resets[ASPEED_SMC_R_MAX] = { + /* + * CE0 and CE1 types are HW strapped in SCU70. Do it here to + * simplify the model. + */ + [R_CONF] = CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0, +}; + +static const AspeedSegments aspeed_2400_fmc_segments[] = { + { 0x20000000, 64 * MiB }, /* start address is readonly */ + { 0x24000000, 32 * MiB }, + { 0x26000000, 32 * MiB }, + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB } +}; + +static void aspeed_2400_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 5; + asc->segments = aspeed_2400_fmc_segments; + asc->segment_addr_mask = 0xffff0000; + asc->resets = aspeed_2400_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x1FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_fmc_info = { + .name = "aspeed.fmc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_fmc_class_init, +}; + +static const AspeedSegments aspeed_2400_spi1_segments[] = { + { 0x30000000, 64 * MiB }, +}; + +static int aspeed_2400_spi1_addr_width(const AspeedSMCState *s) +{ + return s->regs[R_SPI_CTRL0] & CTRL_AST2400_SPI_4BYTE ? 4 : 3; +} + +static void aspeed_2400_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SPI1 Controller"; + asc->r_conf = R_SPI_CONF; + asc->r_ce_ctrl = 0xff; + asc->r_ctrl0 = R_SPI_CTRL0; + asc->r_timings = R_SPI_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = SPI_CONF_ENABLE_W0; + asc->cs_num_max = 1; + asc->segments = aspeed_2400_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SPI_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; + asc->addr_width = aspeed_2400_spi1_addr_width; +} + +static const TypeInfo aspeed_2400_spi1_info = { + .name = "aspeed.spi1-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_spi1_class_init, +}; + +static const uint32_t aspeed_2500_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1), +}; + +static const AspeedSegments aspeed_2500_fmc_segments[] = { + { 0x20000000, 128 * MiB }, /* start address is readonly */ + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB }, +}; + +static void aspeed_2500_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 3; + asc->segments = aspeed_2500_fmc_segments; + asc->segment_addr_mask = 0xffff0000; + asc->resets = aspeed_2500_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_fmc_info = { + .name = "aspeed.fmc-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_fmc_class_init, +}; + +static const AspeedSegments aspeed_2500_spi1_segments[] = { + { 0x30000000, 32 * MiB }, /* start address is readonly */ + { 0x32000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_2500_spi1_segments; + asc->segment_addr_mask = 0xffff0000; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi1_info = { + .name = "aspeed.spi1-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi1_class_init, +}; + +static const AspeedSegments aspeed_2500_spi2_segments[] = { + { 0x38000000, 32 * MiB }, /* start address is readonly */ + { 0x3A000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_2500_spi2_segments; + asc->segment_addr_mask = 0xffff0000; + asc->flash_window_base = 0x38000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi2_info = { + .name = "aspeed.spi2-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi2_class_init, +}; + +/* + * The Segment Registers of the AST2600 have a 1MB unit. The address + * range of a flash SPI peripheral is encoded with offsets in the overall + * controller window. The previous SoC AST2400 and AST2500 used + * absolute addresses. Only bits [27:20] are relevant and the end + * address is an upper bound limit. + */ +#define AST2600_SEG_ADDR_MASK 0x0ff00000 + +static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + + /* Disabled segments have a nil register */ + if (!seg->size) { + return 0; + } + + reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ + reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ + return reg; +} + +static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; + uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (reg) { + seg->addr = asc->flash_window_base + start_offset; + seg->size = end_offset + MiB - start_offset; + } else { + seg->addr = asc->flash_window_base; + seg->size = 0; + } +} + +static const uint32_t aspeed_2600_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2), +}; + +static const AspeedSegments aspeed_2600_fmc_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 3; + asc->segments = aspeed_2600_fmc_segments; + asc->segment_addr_mask = 0x0ff00ff0; + asc->resets = aspeed_2600_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_WDT_CONTROL; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_fmc_info = { + .name = "aspeed.fmc-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_fmc_class_init, +}; + +static const AspeedSegments aspeed_2600_spi1_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_2600_spi1_segments; + asc->segment_addr_mask = 0x0ff00ff0; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi1_info = { + .name = "aspeed.spi1-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi1_class_init, +}; + +static const AspeedSegments aspeed_2600_spi2_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 3; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 3; + asc->segments = aspeed_2600_spi2_segments; + asc->segment_addr_mask = 0x0ff00ff0; + asc->flash_window_base = 0x50000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi2_info = { + .name = "aspeed.spi2-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi2_class_init, +}; + +/* + * The FMC Segment Registers of the AST1030 have a 512KB unit. + * Only bits [27:19] are used for decoding. + */ +#define AST1030_SEG_ADDR_MASK 0x0ff80000 + +static uint32_t aspeed_1030_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + + /* Disabled segments have a nil register */ + if (!seg->size) { + return 0; + } + + reg |= (seg->addr & AST1030_SEG_ADDR_MASK) >> 16; /* start offset */ + reg |= (seg->addr + seg->size - 1) & AST1030_SEG_ADDR_MASK; /* end offset */ + return reg; +} + +static void aspeed_1030_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + uint32_t start_offset = (reg << 16) & AST1030_SEG_ADDR_MASK; + uint32_t end_offset = reg & AST1030_SEG_ADDR_MASK; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (reg) { + seg->addr = asc->flash_window_base + start_offset; + seg->size = end_offset + (512 * KiB) - start_offset; + } else { + seg->addr = asc->flash_window_base; + seg->size = 0; + } +} + +static const uint32_t aspeed_1030_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1), +}; + +static const AspeedSegments aspeed_1030_fmc_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_1030_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 1030 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_1030_fmc_segments; + asc->segment_addr_mask = 0x0ff80ff8; + asc->resets = aspeed_1030_fmc_resets; + asc->flash_window_base = 0x80000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x000BFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_1030_smc_segment_to_reg; + asc->reg_to_segment = aspeed_1030_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_1030_fmc_info = { + .name = "aspeed.fmc-ast1030", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_1030_fmc_class_init, +}; + +static const AspeedSegments aspeed_1030_spi1_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_1030_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 1030 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_1030_spi1_segments; + asc->segment_addr_mask = 0x0ff00ff0; + asc->flash_window_base = 0x90000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x000BFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_1030_spi1_info = { + .name = "aspeed.spi1-ast1030", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_1030_spi1_class_init, +}; +static const AspeedSegments aspeed_1030_spi2_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_1030_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 1030 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->cs_num_max = 2; + asc->segments = aspeed_1030_spi2_segments; + asc->segment_addr_mask = 0x0ff00ff0; + asc->flash_window_base = 0xb0000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x000BFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_1030_spi2_info = { + .name = "aspeed.spi2-ast1030", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_1030_spi2_class_init, +}; + static void aspeed_smc_register_types(void) { - int i; - + type_register_static(&aspeed_smc_flash_info); type_register_static(&aspeed_smc_info); - for (i = 0; i < ARRAY_SIZE(controllers); ++i) { - TypeInfo ti = { - .name = controllers[i].name, - .parent = TYPE_ASPEED_SMC, - .class_init = aspeed_smc_class_init, - .class_data = (void *)&controllers[i], - }; - type_register(&ti); - } + type_register_static(&aspeed_2400_smc_info); + type_register_static(&aspeed_2400_fmc_info); + type_register_static(&aspeed_2400_spi1_info); + type_register_static(&aspeed_2500_fmc_info); + type_register_static(&aspeed_2500_spi1_info); + type_register_static(&aspeed_2500_spi2_info); + type_register_static(&aspeed_2600_fmc_info); + type_register_static(&aspeed_2600_spi1_info); + type_register_static(&aspeed_2600_spi2_info); + type_register_static(&aspeed_1030_fmc_info); + type_register_static(&aspeed_1030_spi1_info); + type_register_static(&aspeed_1030_spi2_info); } type_init(aspeed_smc_register_types) diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c new file mode 100644 index 0000000000..57df462e3c --- /dev/null +++ b/hw/ssi/ibex_spi_host.c @@ -0,0 +1,646 @@ +/* + * QEMU model of the Ibex SPI Controller + * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/ + * + * Copyright (C) 2022 Western Digital + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/ssi/ibex_spi_host.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "trace.h" + +REG32(INTR_STATE, 0x00) + FIELD(INTR_STATE, ERROR, 0, 1) + FIELD(INTR_STATE, SPI_EVENT, 1, 1) +REG32(INTR_ENABLE, 0x04) + FIELD(INTR_ENABLE, ERROR, 0, 1) + FIELD(INTR_ENABLE, SPI_EVENT, 1, 1) +REG32(INTR_TEST, 0x08) + FIELD(INTR_TEST, ERROR, 0, 1) + FIELD(INTR_TEST, SPI_EVENT, 1, 1) +REG32(ALERT_TEST, 0x0c) + FIELD(ALERT_TEST, FETAL_TEST, 0, 1) +REG32(CONTROL, 0x10) + FIELD(CONTROL, RX_WATERMARK, 0, 8) + FIELD(CONTROL, TX_WATERMARK, 1, 8) + FIELD(CONTROL, OUTPUT_EN, 29, 1) + FIELD(CONTROL, SW_RST, 30, 1) + FIELD(CONTROL, SPIEN, 31, 1) +REG32(STATUS, 0x14) + FIELD(STATUS, TXQD, 0, 8) + FIELD(STATUS, RXQD, 18, 8) + FIELD(STATUS, CMDQD, 16, 3) + FIELD(STATUS, RXWM, 20, 1) + FIELD(STATUS, BYTEORDER, 22, 1) + FIELD(STATUS, RXSTALL, 23, 1) + FIELD(STATUS, RXEMPTY, 24, 1) + FIELD(STATUS, RXFULL, 25, 1) + FIELD(STATUS, TXWM, 26, 1) + FIELD(STATUS, TXSTALL, 27, 1) + FIELD(STATUS, TXEMPTY, 28, 1) + FIELD(STATUS, TXFULL, 29, 1) + FIELD(STATUS, ACTIVE, 30, 1) + FIELD(STATUS, READY, 31, 1) +REG32(CONFIGOPTS, 0x18) + FIELD(CONFIGOPTS, CLKDIV_0, 0, 16) + FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4) + FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4) + FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4) + FIELD(CONFIGOPTS, FULLCYC_0, 29, 1) + FIELD(CONFIGOPTS, CPHA_0, 30, 1) + FIELD(CONFIGOPTS, CPOL_0, 31, 1) +REG32(CSID, 0x1c) + FIELD(CSID, CSID, 0, 32) +REG32(COMMAND, 0x20) + FIELD(COMMAND, LEN, 0, 8) + FIELD(COMMAND, CSAAT, 9, 1) + FIELD(COMMAND, SPEED, 10, 2) + FIELD(COMMAND, DIRECTION, 12, 2) +REG32(ERROR_ENABLE, 0x2c) + FIELD(ERROR_ENABLE, CMDBUSY, 0, 1) + FIELD(ERROR_ENABLE, OVERFLOW, 1, 1) + FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1) + FIELD(ERROR_ENABLE, CMDINVAL, 3, 1) + FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1) +REG32(ERROR_STATUS, 0x30) + FIELD(ERROR_STATUS, CMDBUSY, 0, 1) + FIELD(ERROR_STATUS, OVERFLOW, 1, 1) + FIELD(ERROR_STATUS, UNDERFLOW, 2, 1) + FIELD(ERROR_STATUS, CMDINVAL, 3, 1) + FIELD(ERROR_STATUS, CSIDINVAL, 4, 1) + FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1) +REG32(EVENT_ENABLE, 0x34) + FIELD(EVENT_ENABLE, RXFULL, 0, 1) + FIELD(EVENT_ENABLE, TXEMPTY, 1, 1) + FIELD(EVENT_ENABLE, RXWM, 2, 1) + FIELD(EVENT_ENABLE, TXWM, 3, 1) + FIELD(EVENT_ENABLE, READY, 4, 1) + FIELD(EVENT_ENABLE, IDLE, 5, 1) + +static inline uint8_t div4_round_up(uint8_t dividend) +{ + return (dividend + 3) / 4; +} + +static void ibex_spi_rxfifo_reset(IbexSPIHostState *s) +{ + uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; + /* Empty the RX FIFO and assert RXEMPTY */ + fifo8_reset(&s->rx_fifo); + data = FIELD_DP32(data, STATUS, RXFULL, 0); + data = FIELD_DP32(data, STATUS, RXEMPTY, 1); + s->regs[IBEX_SPI_HOST_STATUS] = data; +} + +static void ibex_spi_txfifo_reset(IbexSPIHostState *s) +{ + uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; + /* Empty the TX FIFO and assert TXEMPTY */ + fifo8_reset(&s->tx_fifo); + data = FIELD_DP32(data, STATUS, TXFULL, 0); + data = FIELD_DP32(data, STATUS, TXEMPTY, 1); + s->regs[IBEX_SPI_HOST_STATUS] = data; +} + +static void ibex_spi_host_reset(DeviceState *dev) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(dev); + trace_ibex_spi_host_reset("Resetting Ibex SPI"); + + /* SPI Host Register Reset */ + s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00; + s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00; + s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00; + s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00; + s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f; + s->regs[IBEX_SPI_HOST_STATUS] = 0x00; + s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00; + s->regs[IBEX_SPI_HOST_CSID] = 0x00; + s->regs[IBEX_SPI_HOST_COMMAND] = 0x00; + /* RX/TX Modelled by FIFO */ + s->regs[IBEX_SPI_HOST_RXDATA] = 0x00; + s->regs[IBEX_SPI_HOST_TXDATA] = 0x00; + + s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00; + s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00; + + ibex_spi_rxfifo_reset(s); + ibex_spi_txfifo_reset(s); + + s->init_status = true; + return; +} + +/* + * Check if we need to trigger an interrupt. + * The two interrupts lines (host_err and event) can + * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'. + * + * Interrupts are triggered based on the ones + * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`. + */ +static void ibex_spi_host_irq(IbexSPIHostState *s) +{ + uint32_t intr_test_reg = s->regs[IBEX_SPI_HOST_INTR_TEST]; + uint32_t intr_en_reg = s->regs[IBEX_SPI_HOST_INTR_ENABLE]; + uint32_t intr_state_reg = s->regs[IBEX_SPI_HOST_INTR_STATE]; + + uint32_t err_en_reg = s->regs[IBEX_SPI_HOST_ERROR_ENABLE]; + uint32_t event_en_reg = s->regs[IBEX_SPI_HOST_EVENT_ENABLE]; + uint32_t err_status_reg = s->regs[IBEX_SPI_HOST_ERROR_STATUS]; + uint32_t status_reg = s->regs[IBEX_SPI_HOST_STATUS]; + + + bool error_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, ERROR); + bool event_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, SPI_EVENT); + bool err_pending = FIELD_EX32(intr_state_reg, INTR_STATE, ERROR); + bool status_pending = FIELD_EX32(intr_state_reg, INTR_STATE, SPI_EVENT); + + int err_irq = 0, event_irq = 0; + + /* Error IRQ enabled and Error IRQ Cleared */ + if (error_en && !err_pending) { + /* Event enabled, Interrupt Test Error */ + if (FIELD_EX32(intr_test_reg, INTR_TEST, ERROR)) { + err_irq = 1; + } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDBUSY) && + FIELD_EX32(err_status_reg, ERROR_STATUS, CMDBUSY)) { + /* Wrote to COMMAND when not READY */ + err_irq = 1; + } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDINVAL) && + FIELD_EX32(err_status_reg, ERROR_STATUS, CMDINVAL)) { + /* Invalid command segment */ + err_irq = 1; + } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CSIDINVAL) && + FIELD_EX32(err_status_reg, ERROR_STATUS, CSIDINVAL)) { + /* Invalid value for CSID */ + err_irq = 1; + } + if (err_irq) { + s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK; + } + qemu_set_irq(s->host_err, err_irq); + } + + /* Event IRQ Enabled and Event IRQ Cleared */ + if (event_en && !status_pending) { + if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) { + /* Event enabled, Interrupt Test Event */ + event_irq = 1; + } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, READY) && + FIELD_EX32(status_reg, STATUS, READY)) { + /* SPI Host ready for next command */ + event_irq = 1; + } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, TXEMPTY) && + FIELD_EX32(status_reg, STATUS, TXEMPTY)) { + /* SPI TXEMPTY, TXFIFO drained */ + event_irq = 1; + } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, RXFULL) && + FIELD_EX32(status_reg, STATUS, RXFULL)) { + /* SPI RXFULL, RXFIFO full */ + event_irq = 1; + } + if (event_irq) { + s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK; + } + qemu_set_irq(s->event, event_irq); + } +} + +static void ibex_spi_host_transfer(IbexSPIHostState *s) +{ + uint32_t rx, tx, data; + /* Get num of one byte transfers */ + uint8_t segment_len = FIELD_EX32(s->regs[IBEX_SPI_HOST_COMMAND], + COMMAND, LEN); + + while (segment_len > 0) { + if (fifo8_is_empty(&s->tx_fifo)) { + /* Assert Stall */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK; + break; + } else if (fifo8_is_full(&s->rx_fifo)) { + /* Assert Stall */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK; + break; + } else { + tx = fifo8_pop(&s->tx_fifo); + } + + rx = ssi_transfer(s->ssi, tx); + + trace_ibex_spi_host_transfer(tx, rx); + + if (!fifo8_is_full(&s->rx_fifo)) { + fifo8_push(&s->rx_fifo, rx); + } else { + /* Assert RXFULL */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK; + } + --segment_len; + } + + data = s->regs[IBEX_SPI_HOST_STATUS]; + /* Assert Ready */ + data = FIELD_DP32(data, STATUS, READY, 1); + /* Set RXQD */ + data = FIELD_DP32(data, STATUS, RXQD, div4_round_up(segment_len)); + /* Set TXQD */ + data = FIELD_DP32(data, STATUS, TXQD, fifo8_num_used(&s->tx_fifo) / 4); + /* Clear TXFULL */ + data = FIELD_DP32(data, STATUS, TXFULL, 0); + /* Reset RXEMPTY */ + data = FIELD_DP32(data, STATUS, RXEMPTY, 0); + /* Update register status */ + s->regs[IBEX_SPI_HOST_STATUS] = data; + /* Drop remaining bytes that exceed segment_len */ + ibex_spi_txfifo_reset(s); + + ibex_spi_host_irq(s); +} + +static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr, + unsigned int size) +{ + IbexSPIHostState *s = opaque; + uint32_t rc = 0; + uint8_t rx_byte = 0; + + trace_ibex_spi_host_read(addr, size); + + /* Match reg index */ + addr = addr >> 2; + switch (addr) { + /* Skipping any W/O registers */ + case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE: + case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_CSID: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_CONFIGOPTS: + rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]]; + break; + case IBEX_SPI_HOST_TXDATA: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_RXDATA: + /* Clear RXFULL */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK; + + for (int i = 0; i < 4; ++i) { + if (fifo8_is_empty(&s->rx_fifo)) { + /* Assert RXEMPTY, no IRQ */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_UNDERFLOW_MASK; + return rc; + } + rx_byte = fifo8_pop(&s->rx_fifo); + rc |= rx_byte << (i * 8); + } + break; + case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE: + rc = s->regs[addr]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", + addr << 2); + } + return rc; +} + + +static void ibex_spi_host_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + IbexSPIHostState *s = opaque; + uint32_t val32 = val64; + uint32_t shift_mask = 0xff, status = 0, data = 0; + uint8_t txqd_len; + + trace_ibex_spi_host_write(addr, size, val64); + + /* Match reg index */ + addr = addr >> 2; + + switch (addr) { + /* Skipping any R/O registers */ + case IBEX_SPI_HOST_INTR_STATE: + /* rw1c status register */ + if (FIELD_EX32(val32, INTR_STATE, ERROR)) { + data = FIELD_DP32(data, INTR_STATE, ERROR, 0); + } + if (FIELD_EX32(val32, INTR_STATE, SPI_EVENT)) { + data = FIELD_DP32(data, INTR_STATE, SPI_EVENT, 0); + } + s->regs[addr] = data; + break; + case IBEX_SPI_HOST_INTR_ENABLE: + s->regs[addr] = val32; + break; + case IBEX_SPI_HOST_INTR_TEST: + s->regs[addr] = val32; + ibex_spi_host_irq(s); + break; + case IBEX_SPI_HOST_ALERT_TEST: + s->regs[addr] = val32; + qemu_log_mask(LOG_UNIMP, + "%s: SPI_ALERT_TEST is not supported\n", __func__); + break; + case IBEX_SPI_HOST_CONTROL: + s->regs[addr] = val32; + + if (val32 & R_CONTROL_SW_RST_MASK) { + ibex_spi_host_reset((DeviceState *)s); + /* Clear active if any */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK; + } + + if (val32 & R_CONTROL_OUTPUT_EN_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: CONTROL_OUTPUT_EN is not supported\n", __func__); + } + break; + case IBEX_SPI_HOST_CONFIGOPTS: + /* Update the respective config-opts register based on CSIDth index */ + s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32; + qemu_log_mask(LOG_UNIMP, + "%s: CONFIGOPTS Hardware settings not supported\n", + __func__); + break; + case IBEX_SPI_HOST_CSID: + if (val32 >= s->num_cs) { + /* CSID exceeds max num_cs */ + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_CSIDINVAL_MASK; + ibex_spi_host_irq(s); + return; + } + s->regs[addr] = val32; + break; + case IBEX_SPI_HOST_COMMAND: + s->regs[addr] = val32; + + /* STALL, IP not enabled */ + if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_CONTROL], + CONTROL, SPIEN))) { + return; + } + + /* SPI not ready, IRQ Error */ + if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_STATUS], + STATUS, READY))) { + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK; + ibex_spi_host_irq(s); + return; + } + + /* Assert Not Ready */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK; + + if (FIELD_EX32(val32, COMMAND, DIRECTION) != BIDIRECTIONAL_TRANSFER) { + qemu_log_mask(LOG_UNIMP, + "%s: Rx Only/Tx Only are not supported\n", __func__); + } + + if (val32 & R_COMMAND_CSAAT_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: CSAAT is not supported\n", __func__); + } + if (val32 & R_COMMAND_SPEED_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: SPEED is not supported\n", __func__); + } + + /* Set Transfer Callback */ + timer_mod(s->fifo_trigger_handle, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (TX_INTERRUPT_TRIGGER_DELAY_NS)); + + break; + case IBEX_SPI_HOST_TXDATA: + /* + * This is a hardware `feature` where + * the first word written to TXDATA after init is omitted entirely + */ + if (s->init_status) { + s->init_status = false; + return; + } + + for (int i = 0; i < 4; ++i) { + /* Attempting to write when TXFULL */ + if (fifo8_is_full(&s->tx_fifo)) { + /* Assert RXEMPTY, no IRQ */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_OVERFLOW_MASK; + ibex_spi_host_irq(s); + return; + } + /* Byte ordering is set by the IP */ + status = s->regs[IBEX_SPI_HOST_STATUS]; + if (FIELD_EX32(status, STATUS, BYTEORDER) == 0) { + /* LE: LSB transmitted first (default for ibex processor) */ + shift_mask = 0xff << (i * 8); + } else { + /* BE: MSB transmitted first */ + qemu_log_mask(LOG_UNIMP, + "%s: Big endian is not supported\n", __func__); + } + + fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8)); + } + status = s->regs[IBEX_SPI_HOST_STATUS]; + /* Reset TXEMPTY */ + status = FIELD_DP32(status, STATUS, TXEMPTY, 0); + /* Update TXQD */ + txqd_len = FIELD_EX32(status, STATUS, TXQD); + /* Partial bytes (size < 4) are padded, in words. */ + txqd_len += 1; + status = FIELD_DP32(status, STATUS, TXQD, txqd_len); + /* Assert Ready */ + status = FIELD_DP32(status, STATUS, READY, 1); + /* Update register status */ + s->regs[IBEX_SPI_HOST_STATUS] = status; + break; + case IBEX_SPI_HOST_ERROR_ENABLE: + s->regs[addr] = val32; + + if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: Segment Length is not supported\n", __func__); + } + break; + case IBEX_SPI_HOST_ERROR_STATUS: + /* + * Indicates any errors that have occurred. + * When an error occurs, the corresponding bit must be cleared + * here before issuing any further commands + */ + status = s->regs[addr]; + /* rw1c status register */ + if (FIELD_EX32(val32, ERROR_STATUS, CMDBUSY)) { + status = FIELD_DP32(status, ERROR_STATUS, CMDBUSY, 0); + } + if (FIELD_EX32(val32, ERROR_STATUS, OVERFLOW)) { + status = FIELD_DP32(status, ERROR_STATUS, OVERFLOW, 0); + } + if (FIELD_EX32(val32, ERROR_STATUS, UNDERFLOW)) { + status = FIELD_DP32(status, ERROR_STATUS, UNDERFLOW, 0); + } + if (FIELD_EX32(val32, ERROR_STATUS, CMDINVAL)) { + status = FIELD_DP32(status, ERROR_STATUS, CMDINVAL, 0); + } + if (FIELD_EX32(val32, ERROR_STATUS, CSIDINVAL)) { + status = FIELD_DP32(status, ERROR_STATUS, CSIDINVAL, 0); + } + if (FIELD_EX32(val32, ERROR_STATUS, ACCESSINVAL)) { + status = FIELD_DP32(status, ERROR_STATUS, ACCESSINVAL, 0); + } + s->regs[addr] = status; + break; + case IBEX_SPI_HOST_EVENT_ENABLE: + /* Controls which classes of SPI events raise an interrupt. */ + s->regs[addr] = val32; + + if (val32 & R_EVENT_ENABLE_RXWM_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: RXWM is not supported\n", __func__); + } + if (val32 & R_EVENT_ENABLE_TXWM_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: TXWM is not supported\n", __func__); + } + + if (val32 & R_EVENT_ENABLE_IDLE_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: IDLE is not supported\n", __func__); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", + addr << 2); + } +} + +static const MemoryRegionOps ibex_spi_ops = { + .read = ibex_spi_host_read, + .write = ibex_spi_host_write, + /* Ibex default LE */ + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static Property ibex_spi_properties[] = { + DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_ibex = { + .name = TYPE_IBEX_SPI_HOST, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), + VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, + num_cs, 0, vmstate_info_uint32, uint32_t), + VMSTATE_FIFO8(rx_fifo, IbexSPIHostState), + VMSTATE_FIFO8(tx_fifo, IbexSPIHostState), + VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState), + VMSTATE_BOOL(init_status, IbexSPIHostState), + VMSTATE_END_OF_LIST() + } +}; + +static void fifo_trigger_update(void *opaque) +{ + IbexSPIHostState *s = opaque; + ibex_spi_host_transfer(s); +} + +static void ibex_spi_host_realize(DeviceState *dev, Error **errp) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(dev); + int i; + + s->ssi = ssi_create_bus(dev, "ssi"); + s->cs_lines = g_new0(qemu_irq, s->num_cs); + + for (i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); + } + + /* Setup CONFIGOPTS Multi-register */ + s->config_opts = g_new0(uint32_t, s->num_cs); + + /* Setup FIFO Interrupt Timer */ + s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fifo_trigger_update, s); + + /* FIFO sizes as per OT Spec */ + fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN); + fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN); +} + +static void ibex_spi_host_init(Object *obj) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event); + + memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s, + TYPE_IBEX_SPI_HOST, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void ibex_spi_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = ibex_spi_host_realize; + dc->reset = ibex_spi_host_reset; + dc->vmsd = &vmstate_ibex; + device_class_set_props(dc, ibex_spi_properties); +} + +static const TypeInfo ibex_spi_host_info = { + .name = TYPE_IBEX_SPI_HOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IbexSPIHostState), + .instance_init = ibex_spi_host_init, + .class_init = ibex_spi_host_class_init, +}; + +static void ibex_spi_host_register_types(void) +{ + type_register_static(&ibex_spi_host_info); +} + +type_init(ibex_spi_host_register_types) diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build index 3d6bc82ab1..702aa5e4df 100644 --- a/hw/ssi/meson.build +++ b/hw/ssi/meson.build @@ -7,5 +7,7 @@ softmmu_ss.add(when: 'CONFIG_SSI', if_true: files('ssi.c')) softmmu_ss.add(when: 'CONFIG_STM32F2XX_SPI', if_true: files('stm32f2xx_spi.c')) softmmu_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c')) softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) +softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index e5d7ce9523..d54a109bee 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -38,9 +38,8 @@ static void ssi_cs_default(void *opaque, int n, int level) bool cs = !!level; assert(n == 0); if (s->cs != cs) { - SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(s); - if (ssc->set_cs) { - ssc->set_cs(s, cs); + if (s->spc->set_cs) { + s->spc->set_cs(s, cs); } } s->cs = cs; @@ -48,11 +47,11 @@ static void ssi_cs_default(void *opaque, int n, int level) static uint32_t ssi_transfer_raw_default(SSIPeripheral *dev, uint32_t val) { - SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(dev); + SSIPeripheralClass *ssc = dev->spc; if ((dev->cs && ssc->cs_polarity == SSI_CS_HIGH) || - (!dev->cs && ssc->cs_polarity == SSI_CS_LOW) || - ssc->cs_polarity == SSI_CS_NONE) { + (!dev->cs && ssc->cs_polarity == SSI_CS_LOW) || + ssc->cs_polarity == SSI_CS_NONE) { return ssc->transfer(dev, val); } return 0; @@ -67,6 +66,7 @@ static void ssi_peripheral_realize(DeviceState *dev, Error **errp) ssc->cs_polarity != SSI_CS_NONE) { qdev_init_gpio_in_named(dev, ssi_cs_default, SSI_GPIO_CS, 1); } + s->spc = ssc; ssc->realize(s, errp); } @@ -107,7 +107,7 @@ DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name) SSIBus *ssi_create_bus(DeviceState *parent, const char *name) { BusState *bus; - bus = qbus_create(TYPE_SSI_BUS, parent, name); + bus = qbus_new(TYPE_SSI_BUS, parent, name); return SSI_BUS(bus); } @@ -115,13 +115,11 @@ uint32_t ssi_transfer(SSIBus *bus, uint32_t val) { BusState *b = BUS(bus); BusChild *kid; - SSIPeripheralClass *ssc; uint32_t r = 0; QTAILQ_FOREACH(kid, &b->children, sibling) { - SSIPeripheral *peripheral = SSI_PERIPHERAL(kid->child); - ssc = SSI_PERIPHERAL_GET_CLASS(peripheral); - r |= ssc->transfer_raw(peripheral, val); + SSIPeripheral *p = SSI_PERIPHERAL(kid->child); + r |= p->spc->transfer_raw(p, val); } return r; diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events index 612d3d6087..c707d4aaba 100644 --- a/hw/ssi/trace-events +++ b/hw/ssi/trace-events @@ -20,3 +20,10 @@ npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset: npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 + +# ibex_spi_host.c + +ibex_spi_host_reset(const char *msg) "%s" +ibex_spi_host_transfer(uint32_t tx_data, uint32_t rx_data) "tx_data: 0x%" PRIx32 " rx_data: @0x%" PRIx32 +ibex_spi_host_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +ibex_spi_host_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size %u:" diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c new file mode 100644 index 0000000000..c762e0b367 --- /dev/null +++ b/hw/ssi/xlnx-versal-ospi.c @@ -0,0 +1,1852 @@ +/* + * QEMU model of Xilinx Versal's OSPI controller. + * + * Copyright (c) 2021 Xilinx Inc. + * Written by Francisco Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/ssi/xlnx-versal-ospi.h" + +#ifndef XILINX_VERSAL_OSPI_ERR_DEBUG +#define XILINX_VERSAL_OSPI_ERR_DEBUG 0 +#endif + +REG32(CONFIG_REG, 0x0) + FIELD(CONFIG_REG, IDLE_FLD, 31, 1) + FIELD(CONFIG_REG, DUAL_BYTE_OPCODE_EN_FLD, 30, 1) + FIELD(CONFIG_REG, CRC_ENABLE_FLD, 29, 1) + FIELD(CONFIG_REG, CONFIG_RESV2_FLD, 26, 3) + FIELD(CONFIG_REG, PIPELINE_PHY_FLD, 25, 1) + FIELD(CONFIG_REG, ENABLE_DTR_PROTOCOL_FLD, 24, 1) + FIELD(CONFIG_REG, ENABLE_AHB_DECODER_FLD, 23, 1) + FIELD(CONFIG_REG, MSTR_BAUD_DIV_FLD, 19, 4) + FIELD(CONFIG_REG, ENTER_XIP_MODE_IMM_FLD, 18, 1) + FIELD(CONFIG_REG, ENTER_XIP_MODE_FLD, 17, 1) + FIELD(CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD, 16, 1) + FIELD(CONFIG_REG, ENB_DMA_IF_FLD, 15, 1) + FIELD(CONFIG_REG, WR_PROT_FLASH_FLD, 14, 1) + FIELD(CONFIG_REG, PERIPH_CS_LINES_FLD, 10, 4) + FIELD(CONFIG_REG, PERIPH_SEL_DEC_FLD, 9, 1) + FIELD(CONFIG_REG, ENB_LEGACY_IP_MODE_FLD, 8, 1) + FIELD(CONFIG_REG, ENB_DIR_ACC_CTLR_FLD, 7, 1) + FIELD(CONFIG_REG, RESET_CFG_FLD, 6, 1) + FIELD(CONFIG_REG, RESET_PIN_FLD, 5, 1) + FIELD(CONFIG_REG, HOLD_PIN_FLD, 4, 1) + FIELD(CONFIG_REG, PHY_MODE_ENABLE_FLD, 3, 1) + FIELD(CONFIG_REG, SEL_CLK_PHASE_FLD, 2, 1) + FIELD(CONFIG_REG, SEL_CLK_POL_FLD, 1, 1) + FIELD(CONFIG_REG, ENB_SPI_FLD, 0, 1) +REG32(DEV_INSTR_RD_CONFIG_REG, 0x4) + FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV5_FLD, 29, 3) + FIELD(DEV_INSTR_RD_CONFIG_REG, DUMMY_RD_CLK_CYCLES_FLD, 24, 5) + FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV4_FLD, 21, 3) + FIELD(DEV_INSTR_RD_CONFIG_REG, MODE_BIT_ENABLE_FLD, 20, 1) + FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV3_FLD, 18, 2) + FIELD(DEV_INSTR_RD_CONFIG_REG, DATA_XFER_TYPE_EXT_MODE_FLD, 16, 2) + FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV2_FLD, 14, 2) + FIELD(DEV_INSTR_RD_CONFIG_REG, ADDR_XFER_TYPE_STD_MODE_FLD, 12, 2) + FIELD(DEV_INSTR_RD_CONFIG_REG, PRED_DIS_FLD, 11, 1) + FIELD(DEV_INSTR_RD_CONFIG_REG, DDR_EN_FLD, 10, 1) + FIELD(DEV_INSTR_RD_CONFIG_REG, INSTR_TYPE_FLD, 8, 2) + FIELD(DEV_INSTR_RD_CONFIG_REG, RD_OPCODE_NON_XIP_FLD, 0, 8) +REG32(DEV_INSTR_WR_CONFIG_REG, 0x8) + FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV4_FLD, 29, 3) + FIELD(DEV_INSTR_WR_CONFIG_REG, DUMMY_WR_CLK_CYCLES_FLD, 24, 5) + FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV3_FLD, 18, 6) + FIELD(DEV_INSTR_WR_CONFIG_REG, DATA_XFER_TYPE_EXT_MODE_FLD, 16, 2) + FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV2_FLD, 14, 2) + FIELD(DEV_INSTR_WR_CONFIG_REG, ADDR_XFER_TYPE_STD_MODE_FLD, 12, 2) + FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV1_FLD, 9, 3) + FIELD(DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD, 8, 1) + FIELD(DEV_INSTR_WR_CONFIG_REG, WR_OPCODE_FLD, 0, 8) +REG32(DEV_DELAY_REG, 0xc) + FIELD(DEV_DELAY_REG, D_NSS_FLD, 24, 8) + FIELD(DEV_DELAY_REG, D_BTWN_FLD, 16, 8) + FIELD(DEV_DELAY_REG, D_AFTER_FLD, 8, 8) + FIELD(DEV_DELAY_REG, D_INIT_FLD, 0, 8) +REG32(RD_DATA_CAPTURE_REG, 0x10) + FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV3_FLD, 20, 12) + FIELD(RD_DATA_CAPTURE_REG, DDR_READ_DELAY_FLD, 16, 4) + FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV2_FLD, 9, 7) + FIELD(RD_DATA_CAPTURE_REG, DQS_ENABLE_FLD, 8, 1) + FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV1_FLD, 6, 2) + FIELD(RD_DATA_CAPTURE_REG, SAMPLE_EDGE_SEL_FLD, 5, 1) + FIELD(RD_DATA_CAPTURE_REG, DELAY_FLD, 1, 4) + FIELD(RD_DATA_CAPTURE_REG, BYPASS_FLD, 0, 1) +REG32(DEV_SIZE_CONFIG_REG, 0x14) + FIELD(DEV_SIZE_CONFIG_REG, DEV_SIZE_RESV_FLD, 29, 3) + FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS3_FLD, 27, 2) + FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS2_FLD, 25, 2) + FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS1_FLD, 23, 2) + FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS0_FLD, 21, 2) + FIELD(DEV_SIZE_CONFIG_REG, BYTES_PER_SUBSECTOR_FLD, 16, 5) + FIELD(DEV_SIZE_CONFIG_REG, BYTES_PER_DEVICE_PAGE_FLD, 4, 12) + FIELD(DEV_SIZE_CONFIG_REG, NUM_ADDR_BYTES_FLD, 0, 4) +REG32(SRAM_PARTITION_CFG_REG, 0x18) + FIELD(SRAM_PARTITION_CFG_REG, SRAM_PARTITION_RESV_FLD, 8, 24) + FIELD(SRAM_PARTITION_CFG_REG, ADDR_FLD, 0, 8) +REG32(IND_AHB_ADDR_TRIGGER_REG, 0x1c) +REG32(DMA_PERIPH_CONFIG_REG, 0x20) + FIELD(DMA_PERIPH_CONFIG_REG, DMA_PERIPH_RESV2_FLD, 12, 20) + FIELD(DMA_PERIPH_CONFIG_REG, NUM_BURST_REQ_BYTES_FLD, 8, 4) + FIELD(DMA_PERIPH_CONFIG_REG, DMA_PERIPH_RESV1_FLD, 4, 4) + FIELD(DMA_PERIPH_CONFIG_REG, NUM_SINGLE_REQ_BYTES_FLD, 0, 4) +REG32(REMAP_ADDR_REG, 0x24) +REG32(MODE_BIT_CONFIG_REG, 0x28) + FIELD(MODE_BIT_CONFIG_REG, RX_CRC_DATA_LOW_FLD, 24, 8) + FIELD(MODE_BIT_CONFIG_REG, RX_CRC_DATA_UP_FLD, 16, 8) + FIELD(MODE_BIT_CONFIG_REG, CRC_OUT_ENABLE_FLD, 15, 1) + FIELD(MODE_BIT_CONFIG_REG, MODE_BIT_RESV1_FLD, 11, 4) + FIELD(MODE_BIT_CONFIG_REG, CHUNK_SIZE_FLD, 8, 3) + FIELD(MODE_BIT_CONFIG_REG, MODE_FLD, 0, 8) +REG32(SRAM_FILL_REG, 0x2c) + FIELD(SRAM_FILL_REG, SRAM_FILL_INDAC_WRITE_FLD, 16, 16) + FIELD(SRAM_FILL_REG, SRAM_FILL_INDAC_READ_FLD, 0, 16) +REG32(TX_THRESH_REG, 0x30) + FIELD(TX_THRESH_REG, TX_THRESH_RESV_FLD, 5, 27) + FIELD(TX_THRESH_REG, LEVEL_FLD, 0, 5) +REG32(RX_THRESH_REG, 0x34) + FIELD(RX_THRESH_REG, RX_THRESH_RESV_FLD, 5, 27) + FIELD(RX_THRESH_REG, LEVEL_FLD, 0, 5) +REG32(WRITE_COMPLETION_CTRL_REG, 0x38) + FIELD(WRITE_COMPLETION_CTRL_REG, POLL_REP_DELAY_FLD, 24, 8) + FIELD(WRITE_COMPLETION_CTRL_REG, POLL_COUNT_FLD, 16, 8) + FIELD(WRITE_COMPLETION_CTRL_REG, ENABLE_POLLING_EXP_FLD, 15, 1) + FIELD(WRITE_COMPLETION_CTRL_REG, DISABLE_POLLING_FLD, 14, 1) + FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_POLARITY_FLD, 13, 1) + FIELD(WRITE_COMPLETION_CTRL_REG, WR_COMP_CTRL_RESV1_FLD, 12, 1) + FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_ADDR_EN_FLD, 11, 1) + FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_BIT_INDEX_FLD, 8, 3) + FIELD(WRITE_COMPLETION_CTRL_REG, OPCODE_FLD, 0, 8) +REG32(NO_OF_POLLS_BEF_EXP_REG, 0x3c) +REG32(IRQ_STATUS_REG, 0x40) + FIELD(IRQ_STATUS_REG, IRQ_STAT_RESV_FLD, 20, 12) + FIELD(IRQ_STATUS_REG, ECC_FAIL_FLD, 19, 1) + FIELD(IRQ_STATUS_REG, TX_CRC_CHUNK_BRK_FLD, 18, 1) + FIELD(IRQ_STATUS_REG, RX_CRC_DATA_VAL_FLD, 17, 1) + FIELD(IRQ_STATUS_REG, RX_CRC_DATA_ERR_FLD, 16, 1) + FIELD(IRQ_STATUS_REG, IRQ_STAT_RESV1_FLD, 15, 1) + FIELD(IRQ_STATUS_REG, STIG_REQ_INT_FLD, 14, 1) + FIELD(IRQ_STATUS_REG, POLL_EXP_INT_FLD, 13, 1) + FIELD(IRQ_STATUS_REG, INDRD_SRAM_FULL_FLD, 12, 1) + FIELD(IRQ_STATUS_REG, RX_FIFO_FULL_FLD, 11, 1) + FIELD(IRQ_STATUS_REG, RX_FIFO_NOT_EMPTY_FLD, 10, 1) + FIELD(IRQ_STATUS_REG, TX_FIFO_FULL_FLD, 9, 1) + FIELD(IRQ_STATUS_REG, TX_FIFO_NOT_FULL_FLD, 8, 1) + FIELD(IRQ_STATUS_REG, RECV_OVERFLOW_FLD, 7, 1) + FIELD(IRQ_STATUS_REG, INDIRECT_XFER_LEVEL_BREACH_FLD, 6, 1) + FIELD(IRQ_STATUS_REG, ILLEGAL_ACCESS_DET_FLD, 5, 1) + FIELD(IRQ_STATUS_REG, PROT_WR_ATTEMPT_FLD, 4, 1) + FIELD(IRQ_STATUS_REG, INDIRECT_TRANSFER_REJECT_FLD, 3, 1) + FIELD(IRQ_STATUS_REG, INDIRECT_OP_DONE_FLD, 2, 1) + FIELD(IRQ_STATUS_REG, UNDERFLOW_DET_FLD, 1, 1) + FIELD(IRQ_STATUS_REG, MODE_M_FAIL_FLD, 0, 1) +REG32(IRQ_MASK_REG, 0x44) + FIELD(IRQ_MASK_REG, IRQ_MASK_RESV_FLD, 20, 12) + FIELD(IRQ_MASK_REG, ECC_FAIL_MASK_FLD, 19, 1) + FIELD(IRQ_MASK_REG, TX_CRC_CHUNK_BRK_MASK_FLD, 18, 1) + FIELD(IRQ_MASK_REG, RX_CRC_DATA_VAL_MASK_FLD, 17, 1) + FIELD(IRQ_MASK_REG, RX_CRC_DATA_ERR_MASK_FLD, 16, 1) + FIELD(IRQ_MASK_REG, IRQ_MASK_RESV1_FLD, 15, 1) + FIELD(IRQ_MASK_REG, STIG_REQ_MASK_FLD, 14, 1) + FIELD(IRQ_MASK_REG, POLL_EXP_INT_MASK_FLD, 13, 1) + FIELD(IRQ_MASK_REG, INDRD_SRAM_FULL_MASK_FLD, 12, 1) + FIELD(IRQ_MASK_REG, RX_FIFO_FULL_MASK_FLD, 11, 1) + FIELD(IRQ_MASK_REG, RX_FIFO_NOT_EMPTY_MASK_FLD, 10, 1) + FIELD(IRQ_MASK_REG, TX_FIFO_FULL_MASK_FLD, 9, 1) + FIELD(IRQ_MASK_REG, TX_FIFO_NOT_FULL_MASK_FLD, 8, 1) + FIELD(IRQ_MASK_REG, RECV_OVERFLOW_MASK_FLD, 7, 1) + FIELD(IRQ_MASK_REG, INDIRECT_XFER_LEVEL_BREACH_MASK_FLD, 6, 1) + FIELD(IRQ_MASK_REG, ILLEGAL_ACCESS_DET_MASK_FLD, 5, 1) + FIELD(IRQ_MASK_REG, PROT_WR_ATTEMPT_MASK_FLD, 4, 1) + FIELD(IRQ_MASK_REG, INDIRECT_TRANSFER_REJECT_MASK_FLD, 3, 1) + FIELD(IRQ_MASK_REG, INDIRECT_OP_DONE_MASK_FLD, 2, 1) + FIELD(IRQ_MASK_REG, UNDERFLOW_DET_MASK_FLD, 1, 1) + FIELD(IRQ_MASK_REG, MODE_M_FAIL_MASK_FLD, 0, 1) +REG32(LOWER_WR_PROT_REG, 0x50) +REG32(UPPER_WR_PROT_REG, 0x54) +REG32(WR_PROT_CTRL_REG, 0x58) + FIELD(WR_PROT_CTRL_REG, WR_PROT_CTRL_RESV_FLD, 2, 30) + FIELD(WR_PROT_CTRL_REG, ENB_FLD, 1, 1) + FIELD(WR_PROT_CTRL_REG, INV_FLD, 0, 1) +REG32(INDIRECT_READ_XFER_CTRL_REG, 0x60) + FIELD(INDIRECT_READ_XFER_CTRL_REG, INDIR_RD_XFER_RESV_FLD, 8, 24) + FIELD(INDIRECT_READ_XFER_CTRL_REG, NUM_IND_OPS_DONE_FLD, 6, 2) + FIELD(INDIRECT_READ_XFER_CTRL_REG, IND_OPS_DONE_STATUS_FLD, 5, 1) + FIELD(INDIRECT_READ_XFER_CTRL_REG, RD_QUEUED_FLD, 4, 1) + FIELD(INDIRECT_READ_XFER_CTRL_REG, SRAM_FULL_FLD, 3, 1) + FIELD(INDIRECT_READ_XFER_CTRL_REG, RD_STATUS_FLD, 2, 1) + FIELD(INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD, 1, 1) + FIELD(INDIRECT_READ_XFER_CTRL_REG, START_FLD, 0, 1) +REG32(INDIRECT_READ_XFER_WATERMARK_REG, 0x64) +REG32(INDIRECT_READ_XFER_START_REG, 0x68) +REG32(INDIRECT_READ_XFER_NUM_BYTES_REG, 0x6c) +REG32(INDIRECT_WRITE_XFER_CTRL_REG, 0x70) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, INDIR_WR_XFER_RESV2_FLD, 8, 24) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, NUM_IND_OPS_DONE_FLD, 6, 2) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, IND_OPS_DONE_STATUS_FLD, 5, 1) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, WR_QUEUED_FLD, 4, 1) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, INDIR_WR_XFER_RESV1_FLD, 3, 1) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, WR_STATUS_FLD, 2, 1) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD, 1, 1) + FIELD(INDIRECT_WRITE_XFER_CTRL_REG, START_FLD, 0, 1) +REG32(INDIRECT_WRITE_XFER_WATERMARK_REG, 0x74) +REG32(INDIRECT_WRITE_XFER_START_REG, 0x78) +REG32(INDIRECT_WRITE_XFER_NUM_BYTES_REG, 0x7c) +REG32(INDIRECT_TRIGGER_ADDR_RANGE_REG, 0x80) + FIELD(INDIRECT_TRIGGER_ADDR_RANGE_REG, IND_RANGE_RESV1_FLD, 4, 28) + FIELD(INDIRECT_TRIGGER_ADDR_RANGE_REG, IND_RANGE_WIDTH_FLD, 0, 4) +REG32(FLASH_COMMAND_CTRL_MEM_REG, 0x8c) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV1_FLD, 29, 3) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_ADDR_FLD, 20, 9) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV2_FLD, 19, 1) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, NB_OF_STIG_READ_BYTES_FLD, 16, 3) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_READ_DATA_FLD, 8, 8) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV3_FLD, 2, 6) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_REQ_IN_PROGRESS_FLD, 1, 1) + FIELD(FLASH_COMMAND_CTRL_MEM_REG, TRIGGER_MEM_BANK_REQ_FLD, 0, 1) +REG32(FLASH_CMD_CTRL_REG, 0x90) + FIELD(FLASH_CMD_CTRL_REG, CMD_OPCODE_FLD, 24, 8) + FIELD(FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD, 23, 1) + FIELD(FLASH_CMD_CTRL_REG, NUM_RD_DATA_BYTES_FLD, 20, 3) + FIELD(FLASH_CMD_CTRL_REG, ENB_COMD_ADDR_FLD, 19, 1) + FIELD(FLASH_CMD_CTRL_REG, ENB_MODE_BIT_FLD, 18, 1) + FIELD(FLASH_CMD_CTRL_REG, NUM_ADDR_BYTES_FLD, 16, 2) + FIELD(FLASH_CMD_CTRL_REG, ENB_WRITE_DATA_FLD, 15, 1) + FIELD(FLASH_CMD_CTRL_REG, NUM_WR_DATA_BYTES_FLD, 12, 3) + FIELD(FLASH_CMD_CTRL_REG, NUM_DUMMY_CYCLES_FLD, 7, 5) + FIELD(FLASH_CMD_CTRL_REG, FLASH_CMD_CTRL_RESV1_FLD, 3, 4) + FIELD(FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD, 2, 1) + FIELD(FLASH_CMD_CTRL_REG, CMD_EXEC_STATUS_FLD, 1, 1) + FIELD(FLASH_CMD_CTRL_REG, CMD_EXEC_FLD, 0, 1) +REG32(FLASH_CMD_ADDR_REG, 0x94) +REG32(FLASH_RD_DATA_LOWER_REG, 0xa0) +REG32(FLASH_RD_DATA_UPPER_REG, 0xa4) +REG32(FLASH_WR_DATA_LOWER_REG, 0xa8) +REG32(FLASH_WR_DATA_UPPER_REG, 0xac) +REG32(POLLING_FLASH_STATUS_REG, 0xb0) + FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_RSVD_FLD2, 21, 11) + FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_NB_DUMMY, 16, 5) + FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_RSVD_FLD1, 9, 7) + FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_VALID_FLD, 8, 1) + FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_FLD, 0, 8) +REG32(PHY_CONFIGURATION_REG, 0xb4) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESYNC_FLD, 31, 1) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESET_FLD, 30, 1) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RX_DLL_BYPASS_FLD, 29, 1) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESV2_FLD, 23, 6) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_TX_DLL_DELAY_FLD, 16, 7) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESV1_FLD, 7, 9) + FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RX_DLL_DELAY_FLD, 0, 7) +REG32(PHY_MASTER_CONTROL_REG, 0xb8) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV3_FLD, 25, 7) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_LOCK_MODE_FLD, 24, 1) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_BYPASS_MODE_FLD, 23, 1) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_PHASE_DETECT_SELECTOR_FLD, 20, 3) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV2_FLD, 19, 1) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_NB_INDICATIONS_FLD, 16, 3) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV1_FLD, 7, 9) + FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_INITIAL_DELAY_FLD, 0, 7) +REG32(DLL_OBSERVABLE_LOWER_REG, 0xbc) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_DLL_LOCK_INC_FLD, 24, 8) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_DLL_LOCK_DEC_FLD, 16, 8) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_LOOPBACK_LOCK_FLD, 15, 1) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_LOCK_VALUE_FLD, 8, 7) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_UNLOCK_COUNTER_FLD, 3, 5) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_LOCK_MODE_FLD, 1, 2) + FIELD(DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_DLL_LOCK_FLD, 0, 1) +REG32(DLL_OBSERVABLE_UPPER_REG, 0xc0) + FIELD(DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE_UPPER_RESV2_FLD, 23, 9) + FIELD(DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE_UPPER_TX_DECODER_OUTPUT_FLD, 16, 7) + FIELD(DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE_UPPER_RESV1_FLD, 7, 9) + FIELD(DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD, 0, 7) +REG32(OPCODE_EXT_LOWER_REG, 0xe0) + FIELD(OPCODE_EXT_LOWER_REG, EXT_READ_OPCODE_FLD, 24, 8) + FIELD(OPCODE_EXT_LOWER_REG, EXT_WRITE_OPCODE_FLD, 16, 8) + FIELD(OPCODE_EXT_LOWER_REG, EXT_POLL_OPCODE_FLD, 8, 8) + FIELD(OPCODE_EXT_LOWER_REG, EXT_STIG_OPCODE_FLD, 0, 8) +REG32(OPCODE_EXT_UPPER_REG, 0xe4) + FIELD(OPCODE_EXT_UPPER_REG, WEL_OPCODE_FLD, 24, 8) + FIELD(OPCODE_EXT_UPPER_REG, EXT_WEL_OPCODE_FLD, 16, 8) + FIELD(OPCODE_EXT_UPPER_REG, OPCODE_EXT_UPPER_RESV1_FLD, 0, 16) +REG32(MODULE_ID_REG, 0xfc) + FIELD(MODULE_ID_REG, FIX_PATCH_FLD, 24, 8) + FIELD(MODULE_ID_REG, MODULE_ID_FLD, 8, 16) + FIELD(MODULE_ID_REG, MODULE_ID_RESV_FLD, 2, 6) + FIELD(MODULE_ID_REG, CONF_FLD, 0, 2) + +#define RXFF_SZ 1024 +#define TXFF_SZ 1024 + +#define MAX_RX_DEC_OUT 8 + +#define SZ_512MBIT (512 * 1024 * 1024) +#define SZ_1GBIT (1024 * 1024 * 1024) +#define SZ_2GBIT (2ULL * SZ_1GBIT) +#define SZ_4GBIT (4ULL * SZ_1GBIT) + +#define IS_IND_DMA_START(op) (op->done_bytes == 0) +/* + * Bit field size of R_INDIRECT_WRITE_XFER_CTRL_REG_NUM_IND_OPS_DONE_FLD + * is 2 bits, which can record max of 3 indac operations. + */ +#define IND_OPS_DONE_MAX 3 + +typedef enum { + WREN = 0x6, +} FlashCMD; + +static unsigned int ospi_stig_addr_len(XlnxVersalOspi *s) +{ + /* Num address bytes is NUM_ADDR_BYTES_FLD + 1 */ + return ARRAY_FIELD_EX32(s->regs, + FLASH_CMD_CTRL_REG, NUM_ADDR_BYTES_FLD) + 1; +} + +static unsigned int ospi_stig_wr_data_len(XlnxVersalOspi *s) +{ + /* Num write data bytes is NUM_WR_DATA_BYTES_FLD + 1 */ + return ARRAY_FIELD_EX32(s->regs, + FLASH_CMD_CTRL_REG, NUM_WR_DATA_BYTES_FLD) + 1; +} + +static unsigned int ospi_stig_rd_data_len(XlnxVersalOspi *s) +{ + /* Num read data bytes is NUM_RD_DATA_BYTES_FLD + 1 */ + return ARRAY_FIELD_EX32(s->regs, + FLASH_CMD_CTRL_REG, NUM_RD_DATA_BYTES_FLD) + 1; +} + +/* + * Status bits in R_IRQ_STATUS_REG are set when the event occurs and the + * interrupt is enabled in the mask register ([1] Section 2.3.17) + */ +static void set_irq(XlnxVersalOspi *s, uint32_t set_mask) +{ + s->regs[R_IRQ_STATUS_REG] |= s->regs[R_IRQ_MASK_REG] & set_mask; +} + +static void ospi_update_irq_line(XlnxVersalOspi *s) +{ + qemu_set_irq(s->irq, !!(s->regs[R_IRQ_STATUS_REG] & + s->regs[R_IRQ_MASK_REG])); +} + +static uint8_t ospi_get_wr_opcode(XlnxVersalOspi *s) +{ + return ARRAY_FIELD_EX32(s->regs, + DEV_INSTR_WR_CONFIG_REG, WR_OPCODE_FLD); +} + +static uint8_t ospi_get_rd_opcode(XlnxVersalOspi *s) +{ + return ARRAY_FIELD_EX32(s->regs, + DEV_INSTR_RD_CONFIG_REG, RD_OPCODE_NON_XIP_FLD); +} + +static uint32_t ospi_get_num_addr_bytes(XlnxVersalOspi *s) +{ + /* Num address bytes is NUM_ADDR_BYTES_FLD + 1 */ + return ARRAY_FIELD_EX32(s->regs, + DEV_SIZE_CONFIG_REG, NUM_ADDR_BYTES_FLD) + 1; +} + +static void ospi_stig_membank_req(XlnxVersalOspi *s) +{ + int idx = ARRAY_FIELD_EX32(s->regs, + FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_ADDR_FLD); + + ARRAY_FIELD_DP32(s->regs, FLASH_COMMAND_CTRL_MEM_REG, + MEM_BANK_READ_DATA_FLD, s->stig_membank[idx]); +} + +static int ospi_stig_membank_rd_bytes(XlnxVersalOspi *s) +{ + int rd_data_fld = ARRAY_FIELD_EX32(s->regs, FLASH_COMMAND_CTRL_MEM_REG, + NB_OF_STIG_READ_BYTES_FLD); + static const int sizes[6] = { 16, 32, 64, 128, 256, 512 }; + return (rd_data_fld < 6) ? sizes[rd_data_fld] : 0; +} + +static uint32_t ospi_get_page_sz(XlnxVersalOspi *s) +{ + return ARRAY_FIELD_EX32(s->regs, + DEV_SIZE_CONFIG_REG, BYTES_PER_DEVICE_PAGE_FLD); +} + +static bool ospi_ind_rd_watermark_enabled(XlnxVersalOspi *s) +{ + return s->regs[R_INDIRECT_READ_XFER_WATERMARK_REG]; +} + +static void ind_op_advance(IndOp *op, unsigned int len) +{ + op->done_bytes += len; + assert(op->done_bytes <= op->num_bytes); + if (op->done_bytes == op->num_bytes) { + op->completed = true; + } +} + +static uint32_t ind_op_next_byte(IndOp *op) +{ + return op->flash_addr + op->done_bytes; +} + +static uint32_t ind_op_end_byte(IndOp *op) +{ + return op->flash_addr + op->num_bytes; +} + +static void ospi_ind_op_next(IndOp *op) +{ + op[0] = op[1]; + op[1].completed = true; +} + +static void ind_op_setup(IndOp *op, uint32_t flash_addr, uint32_t num_bytes) +{ + if (num_bytes & 0x3) { + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI indirect op num bytes not word aligned\n"); + } + op->flash_addr = flash_addr; + op->num_bytes = num_bytes; + op->done_bytes = 0; + op->completed = false; +} + +static bool ospi_ind_op_completed(IndOp *op) +{ + return op->completed; +} + +static bool ospi_ind_op_all_completed(XlnxVersalOspi *s) +{ + return s->rd_ind_op[0].completed && s->wr_ind_op[0].completed; +} + +static void ospi_ind_op_cancel(IndOp *op) +{ + op[0].completed = true; + op[1].completed = true; +} + +static bool ospi_ind_op_add(IndOp *op, Fifo8 *fifo, + uint32_t flash_addr, uint32_t num_bytes) +{ + /* Check if first indirect op has been completed */ + if (op->completed) { + fifo8_reset(fifo); + ind_op_setup(op, flash_addr, num_bytes); + return false; + } + + /* Check if second indirect op has been completed */ + op++; + if (op->completed) { + ind_op_setup(op, flash_addr, num_bytes); + return false; + } + return true; +} + +static void ospi_ind_op_queue_up_rd(XlnxVersalOspi *s) +{ + uint32_t num_bytes = s->regs[R_INDIRECT_READ_XFER_NUM_BYTES_REG]; + uint32_t flash_addr = s->regs[R_INDIRECT_READ_XFER_START_REG]; + bool failed; + + failed = ospi_ind_op_add(s->rd_ind_op, &s->rx_sram, flash_addr, num_bytes); + /* If two already queued set rd reject interrupt */ + if (failed) { + set_irq(s, R_IRQ_STATUS_REG_INDIRECT_TRANSFER_REJECT_FLD_MASK); + } +} + +static void ospi_ind_op_queue_up_wr(XlnxVersalOspi *s) +{ + uint32_t num_bytes = s->regs[R_INDIRECT_WRITE_XFER_NUM_BYTES_REG]; + uint32_t flash_addr = s->regs[R_INDIRECT_WRITE_XFER_START_REG]; + bool failed; + + failed = ospi_ind_op_add(s->wr_ind_op, &s->tx_sram, flash_addr, num_bytes); + /* If two already queued set rd reject interrupt */ + if (failed) { + set_irq(s, R_IRQ_STATUS_REG_INDIRECT_TRANSFER_REJECT_FLD_MASK); + } +} + +static uint64_t flash_sz(XlnxVersalOspi *s, unsigned int cs) +{ + /* Flash sizes in MB */ + static const uint64_t sizes[4] = { SZ_512MBIT / 8, SZ_1GBIT / 8, + SZ_2GBIT / 8, SZ_4GBIT / 8 }; + uint32_t v = s->regs[R_DEV_SIZE_CONFIG_REG]; + + v >>= cs * R_DEV_SIZE_CONFIG_REG_MEM_SIZE_ON_CS0_FLD_LENGTH; + return sizes[FIELD_EX32(v, DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS0_FLD)]; +} + +static unsigned int ospi_get_block_sz(XlnxVersalOspi *s) +{ + unsigned int block_fld = ARRAY_FIELD_EX32(s->regs, + DEV_SIZE_CONFIG_REG, + BYTES_PER_SUBSECTOR_FLD); + return 1 << block_fld; +} + +static unsigned int flash_blocks(XlnxVersalOspi *s, unsigned int cs) +{ + unsigned int b_sz = ospi_get_block_sz(s); + unsigned int f_sz = flash_sz(s, cs); + + return f_sz / b_sz; +} + +static int ospi_ahb_decoder_cs(XlnxVersalOspi *s, hwaddr addr) +{ + uint64_t end_addr = 0; + int cs; + + for (cs = 0; cs < s->num_cs; cs++) { + end_addr += flash_sz(s, cs); + if (addr < end_addr) { + break; + } + } + + if (cs == s->num_cs) { + /* Address is out of range */ + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI flash address does not fit in configuration\n"); + return -1; + } + return cs; +} + +static void ospi_ahb_decoder_enable_cs(XlnxVersalOspi *s, hwaddr addr) +{ + int cs = ospi_ahb_decoder_cs(s, addr); + + if (cs >= 0) { + for (int i = 0; i < s->num_cs; i++) { + qemu_set_irq(s->cs_lines[i], cs != i); + } + } +} + +static unsigned int single_cs(XlnxVersalOspi *s) +{ + unsigned int field = ARRAY_FIELD_EX32(s->regs, + CONFIG_REG, PERIPH_CS_LINES_FLD); + + /* + * Below one liner is a trick that finds the rightmost zero and makes sure + * all other bits are turned to 1. It is a variant of the 'Isolate the + * rightmost 0-bit' trick found below at the time of writing: + * + * https://emre.me/computer-science/bit-manipulation-tricks/ + * + * 4'bXXX0 -> 4'b1110 + * 4'bXX01 -> 4'b1101 + * 4'bX011 -> 4'b1011 + * 4'b0111 -> 4'b0111 + * 4'b1111 -> 4'b1111 + */ + return (field | ~(field + 1)) & 0xf; +} + +static void ospi_update_cs_lines(XlnxVersalOspi *s) +{ + unsigned int all_cs; + int i; + + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, PERIPH_SEL_DEC_FLD)) { + all_cs = ARRAY_FIELD_EX32(s->regs, CONFIG_REG, PERIPH_CS_LINES_FLD); + } else { + all_cs = single_cs(s); + } + + for (i = 0; i < s->num_cs; i++) { + bool cs = (all_cs >> i) & 1; + + qemu_set_irq(s->cs_lines[i], cs); + } +} + +static void ospi_dac_cs(XlnxVersalOspi *s, hwaddr addr) +{ + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENABLE_AHB_DECODER_FLD)) { + ospi_ahb_decoder_enable_cs(s, addr); + } else { + ospi_update_cs_lines(s); + } +} + +static void ospi_disable_cs(XlnxVersalOspi *s) +{ + int i; + + for (i = 0; i < s->num_cs; i++) { + qemu_set_irq(s->cs_lines[i], 1); + } +} + +static void ospi_flush_txfifo(XlnxVersalOspi *s) +{ + while (!fifo8_is_empty(&s->tx_fifo)) { + uint32_t tx_rx = fifo8_pop(&s->tx_fifo); + + tx_rx = ssi_transfer(s->spi, tx_rx); + fifo8_push(&s->rx_fifo, tx_rx); + } +} + +static void ospi_tx_fifo_push_address_raw(XlnxVersalOspi *s, + uint32_t flash_addr, + unsigned int addr_bytes) +{ + /* Push write address */ + if (addr_bytes == 4) { + fifo8_push(&s->tx_fifo, flash_addr >> 24); + } + if (addr_bytes >= 3) { + fifo8_push(&s->tx_fifo, flash_addr >> 16); + } + if (addr_bytes >= 2) { + fifo8_push(&s->tx_fifo, flash_addr >> 8); + } + fifo8_push(&s->tx_fifo, flash_addr); +} + +static void ospi_tx_fifo_push_address(XlnxVersalOspi *s, uint32_t flash_addr) +{ + /* Push write address */ + int addr_bytes = ospi_get_num_addr_bytes(s); + + ospi_tx_fifo_push_address_raw(s, flash_addr, addr_bytes); +} + +static void ospi_tx_fifo_push_stig_addr(XlnxVersalOspi *s) +{ + uint32_t flash_addr = s->regs[R_FLASH_CMD_ADDR_REG]; + unsigned int addr_bytes = ospi_stig_addr_len(s); + + ospi_tx_fifo_push_address_raw(s, flash_addr, addr_bytes); +} + +static void ospi_tx_fifo_push_rd_op_addr(XlnxVersalOspi *s, uint32_t flash_addr) +{ + uint8_t inst_code = ospi_get_rd_opcode(s); + + fifo8_reset(&s->tx_fifo); + + /* Push read opcode */ + fifo8_push(&s->tx_fifo, inst_code); + + /* Push read address */ + ospi_tx_fifo_push_address(s, flash_addr); +} + +static void ospi_tx_fifo_push_stig_wr_data(XlnxVersalOspi *s) +{ + uint64_t data = s->regs[R_FLASH_WR_DATA_LOWER_REG]; + int wr_data_len = ospi_stig_wr_data_len(s); + int i; + + data |= (uint64_t) s->regs[R_FLASH_WR_DATA_UPPER_REG] << 32; + for (i = 0; i < wr_data_len; i++) { + int shift = i * 8; + fifo8_push(&s->tx_fifo, data >> shift); + } +} + +static void ospi_tx_fifo_push_stig_rd_data(XlnxVersalOspi *s) +{ + int rd_data_len; + int i; + + if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD)) { + rd_data_len = ospi_stig_membank_rd_bytes(s); + } else { + rd_data_len = ospi_stig_rd_data_len(s); + } + + /* transmit second part (data) */ + for (i = 0; i < rd_data_len; ++i) { + fifo8_push(&s->tx_fifo, 0); + } +} + +static void ospi_rx_fifo_pop_stig_rd_data(XlnxVersalOspi *s) +{ + int size = ospi_stig_rd_data_len(s); + uint8_t bytes[8] = {}; + int i; + + size = MIN(fifo8_num_used(&s->rx_fifo), size); + + assert(size <= 8); + + for (i = 0; i < size; i++) { + bytes[i] = fifo8_pop(&s->rx_fifo); + } + + s->regs[R_FLASH_RD_DATA_LOWER_REG] = ldl_le_p(bytes); + s->regs[R_FLASH_RD_DATA_UPPER_REG] = ldl_le_p(bytes + 4); +} + +static void ospi_ind_read(XlnxVersalOspi *s, uint32_t flash_addr, uint32_t len) +{ + int i; + + /* Create first section of read cmd */ + ospi_tx_fifo_push_rd_op_addr(s, flash_addr); + + /* transmit first part */ + ospi_update_cs_lines(s); + ospi_flush_txfifo(s); + + fifo8_reset(&s->rx_fifo); + + /* transmit second part (data) */ + for (i = 0; i < len; ++i) { + fifo8_push(&s->tx_fifo, 0); + } + ospi_flush_txfifo(s); + + for (i = 0; i < len; ++i) { + fifo8_push(&s->rx_sram, fifo8_pop(&s->rx_fifo)); + } + + /* done */ + ospi_disable_cs(s); +} + +static unsigned int ospi_dma_burst_size(XlnxVersalOspi *s) +{ + return 1 << ARRAY_FIELD_EX32(s->regs, + DMA_PERIPH_CONFIG_REG, + NUM_BURST_REQ_BYTES_FLD); +} + +static unsigned int ospi_dma_single_size(XlnxVersalOspi *s) +{ + return 1 << ARRAY_FIELD_EX32(s->regs, + DMA_PERIPH_CONFIG_REG, + NUM_SINGLE_REQ_BYTES_FLD); +} + +static void ind_rd_inc_num_done(XlnxVersalOspi *s) +{ + unsigned int done = ARRAY_FIELD_EX32(s->regs, + INDIRECT_READ_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD); + if (done < IND_OPS_DONE_MAX) { + done++; + } + done &= 0x3; + ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD, done); +} + +static void ospi_ind_rd_completed(XlnxVersalOspi *s) +{ + ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, + IND_OPS_DONE_STATUS_FLD, 1); + + ind_rd_inc_num_done(s); + ospi_ind_op_next(s->rd_ind_op); + if (ospi_ind_op_all_completed(s)) { + set_irq(s, R_IRQ_STATUS_REG_INDIRECT_OP_DONE_FLD_MASK); + } +} + +static void ospi_dma_read(XlnxVersalOspi *s) +{ + IndOp *op = s->rd_ind_op; + uint32_t dma_len = op->num_bytes; + uint32_t burst_sz = ospi_dma_burst_size(s); + uint32_t single_sz = ospi_dma_single_size(s); + uint32_t ind_trig_range; + uint32_t remainder; + XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_GET_CLASS(s->dma_src); + + ind_trig_range = (1 << ARRAY_FIELD_EX32(s->regs, + INDIRECT_TRIGGER_ADDR_RANGE_REG, + IND_RANGE_WIDTH_FLD)); + remainder = dma_len % burst_sz; + remainder = remainder % single_sz; + if (burst_sz > ind_trig_range || single_sz > ind_trig_range || + remainder != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI DMA burst size / single size config error\n"); + } + + s->src_dma_inprog = true; + if (xcdc->read(s->dma_src, 0, dma_len) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "OSPI DMA configuration error\n"); + } + s->src_dma_inprog = false; +} + +static void ospi_do_ind_read(XlnxVersalOspi *s) +{ + IndOp *op = s->rd_ind_op; + uint32_t next_b; + uint32_t end_b; + uint32_t len; + bool start_dma = IS_IND_DMA_START(op) && !s->src_dma_inprog; + + /* Continue to read flash until we run out of space in sram */ + while (!ospi_ind_op_completed(op) && + !fifo8_is_full(&s->rx_sram)) { + /* Read reqested number of bytes, max bytes limited to size of sram */ + next_b = ind_op_next_byte(op); + end_b = next_b + fifo8_num_free(&s->rx_sram); + end_b = MIN(end_b, ind_op_end_byte(op)); + + len = end_b - next_b; + ospi_ind_read(s, next_b, len); + ind_op_advance(op, len); + + if (ospi_ind_rd_watermark_enabled(s)) { + ARRAY_FIELD_DP32(s->regs, IRQ_STATUS_REG, + INDIRECT_XFER_LEVEL_BREACH_FLD, 1); + set_irq(s, + R_IRQ_STATUS_REG_INDIRECT_XFER_LEVEL_BREACH_FLD_MASK); + } + + if (!s->src_dma_inprog && + ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD)) { + ospi_dma_read(s); + } + } + + /* Set sram full */ + if (fifo8_num_used(&s->rx_sram) == RXFF_SZ) { + ARRAY_FIELD_DP32(s->regs, + INDIRECT_READ_XFER_CTRL_REG, SRAM_FULL_FLD, 1); + set_irq(s, R_IRQ_STATUS_REG_INDRD_SRAM_FULL_FLD_MASK); + } + + /* Signal completion if done, unless inside recursion via ospi_dma_read */ + if (!ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD) || start_dma) { + if (ospi_ind_op_completed(op)) { + ospi_ind_rd_completed(s); + } + } +} + +/* Transmit write enable instruction */ +static void ospi_transmit_wel(XlnxVersalOspi *s, bool ahb_decoder_cs, + hwaddr addr) +{ + fifo8_reset(&s->tx_fifo); + fifo8_push(&s->tx_fifo, WREN); + + if (ahb_decoder_cs) { + ospi_ahb_decoder_enable_cs(s, addr); + } else { + ospi_update_cs_lines(s); + } + + ospi_flush_txfifo(s); + ospi_disable_cs(s); + + fifo8_reset(&s->rx_fifo); +} + +static void ospi_ind_write(XlnxVersalOspi *s, uint32_t flash_addr, uint32_t len) +{ + bool ahb_decoder_cs = false; + uint8_t inst_code; + int i; + + assert(fifo8_num_used(&s->tx_sram) >= len); + + if (!ARRAY_FIELD_EX32(s->regs, DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD)) { + ospi_transmit_wel(s, ahb_decoder_cs, 0); + } + + /* reset fifos */ + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); + + /* Push write opcode */ + inst_code = ospi_get_wr_opcode(s); + fifo8_push(&s->tx_fifo, inst_code); + + /* Push write address */ + ospi_tx_fifo_push_address(s, flash_addr); + + /* data */ + for (i = 0; i < len; i++) { + fifo8_push(&s->tx_fifo, fifo8_pop(&s->tx_sram)); + } + + /* transmit */ + ospi_update_cs_lines(s); + ospi_flush_txfifo(s); + + /* done */ + ospi_disable_cs(s); + fifo8_reset(&s->rx_fifo); +} + +static void ind_wr_inc_num_done(XlnxVersalOspi *s) +{ + unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD); + if (done < IND_OPS_DONE_MAX) { + done++; + } + done &= 0x3; + ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD, done); +} + +static void ospi_ind_wr_completed(XlnxVersalOspi *s) +{ + ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, + IND_OPS_DONE_STATUS_FLD, 1); + ind_wr_inc_num_done(s); + ospi_ind_op_next(s->wr_ind_op); + /* Set indirect op done interrupt if enabled */ + if (ospi_ind_op_all_completed(s)) { + set_irq(s, R_IRQ_STATUS_REG_INDIRECT_OP_DONE_FLD_MASK); + } +} + +static void ospi_do_indirect_write(XlnxVersalOspi *s) +{ + uint32_t write_watermark = s->regs[R_INDIRECT_WRITE_XFER_WATERMARK_REG]; + uint32_t pagesz = ospi_get_page_sz(s); + uint32_t page_mask = ~(pagesz - 1); + IndOp *op = s->wr_ind_op; + uint32_t next_b; + uint32_t end_b; + uint32_t len; + + /* Write out tx_fifo in maximum page sz chunks */ + while (!ospi_ind_op_completed(op) && fifo8_num_used(&s->tx_sram) > 0) { + next_b = ind_op_next_byte(op); + end_b = next_b + MIN(fifo8_num_used(&s->tx_sram), pagesz); + + /* Dont cross page boundary */ + if ((end_b & page_mask) > next_b) { + end_b &= page_mask; + } + + len = end_b - next_b; + len = MIN(len, op->num_bytes - op->done_bytes); + ospi_ind_write(s, next_b, len); + ind_op_advance(op, len); + } + + /* + * Always set indirect transfer level breached interrupt if enabled + * (write watermark > 0) since the tx_sram always will be emptied + */ + if (write_watermark > 0) { + set_irq(s, R_IRQ_STATUS_REG_INDIRECT_XFER_LEVEL_BREACH_FLD_MASK); + } + + /* Signal completions if done */ + if (ospi_ind_op_completed(op)) { + ospi_ind_wr_completed(s); + } +} + +static void ospi_stig_fill_membank(XlnxVersalOspi *s) +{ + int num_rd_bytes = ospi_stig_membank_rd_bytes(s); + int idx = num_rd_bytes - 8; /* first of last 8 */ + int i; + + for (i = 0; i < num_rd_bytes; i++) { + s->stig_membank[i] = fifo8_pop(&s->rx_fifo); + } + + g_assert((idx + 4) < ARRAY_SIZE(s->stig_membank)); + + /* Fill in lower upper regs */ + s->regs[R_FLASH_RD_DATA_LOWER_REG] = ldl_le_p(&s->stig_membank[idx]); + s->regs[R_FLASH_RD_DATA_UPPER_REG] = ldl_le_p(&s->stig_membank[idx + 4]); +} + +static void ospi_stig_cmd_exec(XlnxVersalOspi *s) +{ + uint8_t inst_code; + + /* Reset fifos */ + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); + + /* Push write opcode */ + inst_code = ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, CMD_OPCODE_FLD); + fifo8_push(&s->tx_fifo, inst_code); + + /* Push address if enabled */ + if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_COMD_ADDR_FLD)) { + ospi_tx_fifo_push_stig_addr(s); + } + + /* Enable cs */ + ospi_update_cs_lines(s); + + /* Data */ + if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_WRITE_DATA_FLD)) { + ospi_tx_fifo_push_stig_wr_data(s); + } else if (ARRAY_FIELD_EX32(s->regs, + FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD)) { + /* transmit first part */ + ospi_flush_txfifo(s); + fifo8_reset(&s->rx_fifo); + ospi_tx_fifo_push_stig_rd_data(s); + } + + /* Transmit */ + ospi_flush_txfifo(s); + ospi_disable_cs(s); + + if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD)) { + if (ARRAY_FIELD_EX32(s->regs, + FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD)) { + ospi_stig_fill_membank(s); + } else { + ospi_rx_fifo_pop_stig_rd_data(s); + } + } +} + +static uint32_t ospi_block_address(XlnxVersalOspi *s, unsigned int block) +{ + unsigned int block_sz = ospi_get_block_sz(s); + unsigned int cs = 0; + uint32_t addr = 0; + + while (cs < s->num_cs && block >= flash_blocks(s, cs)) { + block -= flash_blocks(s, 0); + addr += flash_sz(s, cs); + } + addr += block * block_sz; + return addr; +} + +static uint32_t ospi_get_wr_prot_addr_low(XlnxVersalOspi *s) +{ + unsigned int block = s->regs[R_LOWER_WR_PROT_REG]; + + return ospi_block_address(s, block); +} + +static uint32_t ospi_get_wr_prot_addr_upper(XlnxVersalOspi *s) +{ + unsigned int block = s->regs[R_UPPER_WR_PROT_REG]; + + /* Get address of first block out of defined range */ + return ospi_block_address(s, block + 1); +} + +static bool ospi_is_write_protected(XlnxVersalOspi *s, hwaddr addr) +{ + uint32_t wr_prot_addr_upper = ospi_get_wr_prot_addr_upper(s); + uint32_t wr_prot_addr_low = ospi_get_wr_prot_addr_low(s); + bool in_range = false; + + if (addr >= wr_prot_addr_low && addr < wr_prot_addr_upper) { + in_range = true; + } + + if (ARRAY_FIELD_EX32(s->regs, WR_PROT_CTRL_REG, INV_FLD)) { + in_range = !in_range; + } + return in_range; +} + +static uint64_t ospi_rx_sram_read(XlnxVersalOspi *s, unsigned int size) +{ + uint8_t bytes[8] = {}; + int i; + + if (size < 4 && fifo8_num_used(&s->rx_sram) >= 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI only last read of internal " + "sram is allowed to be < 32 bits\n"); + } + + size = MIN(fifo8_num_used(&s->rx_sram), size); + + assert(size <= 8); + + for (i = 0; i < size; i++) { + bytes[i] = fifo8_pop(&s->rx_sram); + } + + return ldq_le_p(bytes); +} + +static void ospi_tx_sram_write(XlnxVersalOspi *s, uint64_t value, + unsigned int size) +{ + int i; + for (i = 0; i < size && !fifo8_is_full(&s->tx_sram); i++) { + fifo8_push(&s->tx_sram, value >> 8 * i); + } +} + +static uint64_t ospi_do_dac_read(void *opaque, hwaddr addr, unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + uint8_t bytes[8] = {}; + int i; + + /* Create first section of read cmd */ + ospi_tx_fifo_push_rd_op_addr(s, (uint32_t) addr); + + /* Enable cs and transmit first part */ + ospi_dac_cs(s, addr); + ospi_flush_txfifo(s); + + fifo8_reset(&s->rx_fifo); + + /* transmit second part (data) */ + for (i = 0; i < size; ++i) { + fifo8_push(&s->tx_fifo, 0); + } + ospi_flush_txfifo(s); + + /* fill in result */ + size = MIN(fifo8_num_used(&s->rx_fifo), size); + + assert(size <= 8); + + for (i = 0; i < size; i++) { + bytes[i] = fifo8_pop(&s->rx_fifo); + } + + /* done */ + ospi_disable_cs(s); + + return ldq_le_p(bytes); +} + +static void ospi_do_dac_write(void *opaque, + hwaddr addr, + uint64_t value, + unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + bool ahb_decoder_cs = ARRAY_FIELD_EX32(s->regs, CONFIG_REG, + ENABLE_AHB_DECODER_FLD); + uint8_t inst_code; + unsigned int i; + + if (!ARRAY_FIELD_EX32(s->regs, DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD)) { + ospi_transmit_wel(s, ahb_decoder_cs, addr); + } + + /* reset fifos */ + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); + + /* Push write opcode */ + inst_code = ospi_get_wr_opcode(s); + fifo8_push(&s->tx_fifo, inst_code); + + /* Push write address */ + ospi_tx_fifo_push_address(s, addr); + + /* data */ + for (i = 0; i < size; i++) { + fifo8_push(&s->tx_fifo, value >> 8 * i); + } + + /* Enable cs and transmit */ + ospi_dac_cs(s, addr); + ospi_flush_txfifo(s); + ospi_disable_cs(s); + + fifo8_reset(&s->rx_fifo); +} + +static void flash_cmd_ctrl_mem_reg_post_write(RegisterInfo *reg, + uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) { + if (ARRAY_FIELD_EX32(s->regs, + FLASH_COMMAND_CTRL_MEM_REG, + TRIGGER_MEM_BANK_REQ_FLD)) { + ospi_stig_membank_req(s); + ARRAY_FIELD_DP32(s->regs, FLASH_COMMAND_CTRL_MEM_REG, + TRIGGER_MEM_BANK_REQ_FLD, 0); + } + } +} + +static void flash_cmd_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD) && + ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, CMD_EXEC_FLD)) { + ospi_stig_cmd_exec(s); + set_irq(s, R_IRQ_STATUS_REG_STIG_REQ_INT_FLD_MASK); + ARRAY_FIELD_DP32(s->regs, FLASH_CMD_CTRL_REG, CMD_EXEC_FLD, 0); + } +} + +static uint64_t ind_wr_dec_num_done(XlnxVersalOspi *s, uint64_t val) +{ + unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD); + done--; + done &= 0x3; + val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD, done); + return val; +} + +static bool ind_wr_clearing_op_done(XlnxVersalOspi *s, uint64_t new_val) +{ + bool set_in_reg = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, + IND_OPS_DONE_STATUS_FLD); + bool set_in_new_val = FIELD_EX32(new_val, INDIRECT_WRITE_XFER_CTRL_REG, + IND_OPS_DONE_STATUS_FLD); + /* return true if clearing bit */ + return set_in_reg && !set_in_new_val; +} + +static uint64_t ind_wr_xfer_ctrl_reg_pre_write(RegisterInfo *reg, + uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + + if (ind_wr_clearing_op_done(s, val)) { + val = ind_wr_dec_num_done(s, val); + } + return val; +} + +static void ind_wr_xfer_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + + if (s->ind_write_disabled) { + return; + } + + if (ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, START_FLD)) { + ospi_ind_op_queue_up_wr(s); + ospi_do_indirect_write(s); + ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, START_FLD, 0); + } + + if (ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD)) { + ospi_ind_op_cancel(s->wr_ind_op); + fifo8_reset(&s->tx_sram); + ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD, 0); + } +} + +static uint64_t ind_wr_xfer_ctrl_reg_post_read(RegisterInfo *reg, + uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + IndOp *op = s->wr_ind_op; + + /* Check if ind ops is ongoing */ + if (!ospi_ind_op_completed(&op[0])) { + /* Check if two ind ops are queued */ + if (!ospi_ind_op_completed(&op[1])) { + val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG, + WR_QUEUED_FLD, 1); + } + val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG, WR_STATUS_FLD, 1); + } + return val; +} + +static uint64_t ind_rd_dec_num_done(XlnxVersalOspi *s, uint64_t val) +{ + unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD); + done--; + done &= 0x3; + val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG, + NUM_IND_OPS_DONE_FLD, done); + return val; +} + +static uint64_t ind_rd_xfer_ctrl_reg_pre_write(RegisterInfo *reg, + uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + + if (FIELD_EX32(val, INDIRECT_READ_XFER_CTRL_REG, + IND_OPS_DONE_STATUS_FLD)) { + val = ind_rd_dec_num_done(s, val); + val &= ~R_INDIRECT_READ_XFER_CTRL_REG_IND_OPS_DONE_STATUS_FLD_MASK; + } + return val; +} + +static void ind_rd_xfer_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG, START_FLD)) { + ospi_ind_op_queue_up_rd(s); + ospi_do_ind_read(s); + ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, START_FLD, 0); + } + + if (ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD)) { + ospi_ind_op_cancel(s->rd_ind_op); + fifo8_reset(&s->rx_sram); + ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD, 0); + } +} + +static uint64_t ind_rd_xfer_ctrl_reg_post_read(RegisterInfo *reg, + uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + IndOp *op = s->rd_ind_op; + + /* Check if ind ops is ongoing */ + if (!ospi_ind_op_completed(&op[0])) { + /* Check if two ind ops are queued */ + if (!ospi_ind_op_completed(&op[1])) { + val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG, + RD_QUEUED_FLD, 1); + } + val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG, RD_STATUS_FLD, 1); + } + return val; +} + +static uint64_t sram_fill_reg_post_read(RegisterInfo *reg, uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + val = ((fifo8_num_used(&s->tx_sram) & 0xFFFF) << 16) | + (fifo8_num_used(&s->rx_sram) & 0xFFFF); + return val; +} + +static uint64_t dll_obs_upper_reg_post_read(RegisterInfo *reg, uint64_t val) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque); + uint32_t rx_dec_out; + + rx_dec_out = FIELD_EX32(val, DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD); + + if (rx_dec_out < MAX_RX_DEC_OUT) { + ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_UPPER_REG, + DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD, + rx_dec_out + 1); + } + + return val; +} + + +static void xlnx_versal_ospi_reset(DeviceState *dev) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + fifo8_reset(&s->rx_fifo); + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_sram); + fifo8_reset(&s->tx_sram); + + s->rd_ind_op[0].completed = true; + s->rd_ind_op[1].completed = true; + s->wr_ind_op[0].completed = true; + s->wr_ind_op[1].completed = true; + ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_DLL_LOCK_FLD, 1); + ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_LOWER_REG, + DLL_OBSERVABLE_LOWER_LOOPBACK_LOCK_FLD, 1); +} + +static RegisterAccessInfo ospi_regs_info[] = { + { .name = "CONFIG_REG", + .addr = A_CONFIG_REG, + .reset = 0x80780081, + .ro = 0x9c000000, + },{ .name = "DEV_INSTR_RD_CONFIG_REG", + .addr = A_DEV_INSTR_RD_CONFIG_REG, + .reset = 0x3, + .ro = 0xe0ecc800, + },{ .name = "DEV_INSTR_WR_CONFIG_REG", + .addr = A_DEV_INSTR_WR_CONFIG_REG, + .reset = 0x2, + .ro = 0xe0fcce00, + },{ .name = "DEV_DELAY_REG", + .addr = A_DEV_DELAY_REG, + },{ .name = "RD_DATA_CAPTURE_REG", + .addr = A_RD_DATA_CAPTURE_REG, + .reset = 0x1, + .ro = 0xfff0fec0, + },{ .name = "DEV_SIZE_CONFIG_REG", + .addr = A_DEV_SIZE_CONFIG_REG, + .reset = 0x101002, + .ro = 0xe0000000, + },{ .name = "SRAM_PARTITION_CFG_REG", + .addr = A_SRAM_PARTITION_CFG_REG, + .reset = 0x80, + .ro = 0xffffff00, + },{ .name = "IND_AHB_ADDR_TRIGGER_REG", + .addr = A_IND_AHB_ADDR_TRIGGER_REG, + },{ .name = "DMA_PERIPH_CONFIG_REG", + .addr = A_DMA_PERIPH_CONFIG_REG, + .ro = 0xfffff0f0, + },{ .name = "REMAP_ADDR_REG", + .addr = A_REMAP_ADDR_REG, + },{ .name = "MODE_BIT_CONFIG_REG", + .addr = A_MODE_BIT_CONFIG_REG, + .reset = 0x200, + .ro = 0xffff7800, + },{ .name = "SRAM_FILL_REG", + .addr = A_SRAM_FILL_REG, + .ro = 0xffffffff, + .post_read = sram_fill_reg_post_read, + },{ .name = "TX_THRESH_REG", + .addr = A_TX_THRESH_REG, + .reset = 0x1, + .ro = 0xffffffe0, + },{ .name = "RX_THRESH_REG", + .addr = A_RX_THRESH_REG, + .reset = 0x1, + .ro = 0xffffffe0, + },{ .name = "WRITE_COMPLETION_CTRL_REG", + .addr = A_WRITE_COMPLETION_CTRL_REG, + .reset = 0x10005, + .ro = 0x1800, + },{ .name = "NO_OF_POLLS_BEF_EXP_REG", + .addr = A_NO_OF_POLLS_BEF_EXP_REG, + .reset = 0xffffffff, + },{ .name = "IRQ_STATUS_REG", + .addr = A_IRQ_STATUS_REG, + .ro = 0xfff08000, + .w1c = 0xf7fff, + },{ .name = "IRQ_MASK_REG", + .addr = A_IRQ_MASK_REG, + .ro = 0xfff08000, + },{ .name = "LOWER_WR_PROT_REG", + .addr = A_LOWER_WR_PROT_REG, + },{ .name = "UPPER_WR_PROT_REG", + .addr = A_UPPER_WR_PROT_REG, + },{ .name = "WR_PROT_CTRL_REG", + .addr = A_WR_PROT_CTRL_REG, + .ro = 0xfffffffc, + },{ .name = "INDIRECT_READ_XFER_CTRL_REG", + .addr = A_INDIRECT_READ_XFER_CTRL_REG, + .ro = 0xffffffd4, + .w1c = 0x08, + .pre_write = ind_rd_xfer_ctrl_reg_pre_write, + .post_write = ind_rd_xfer_ctrl_reg_post_write, + .post_read = ind_rd_xfer_ctrl_reg_post_read, + },{ .name = "INDIRECT_READ_XFER_WATERMARK_REG", + .addr = A_INDIRECT_READ_XFER_WATERMARK_REG, + },{ .name = "INDIRECT_READ_XFER_START_REG", + .addr = A_INDIRECT_READ_XFER_START_REG, + },{ .name = "INDIRECT_READ_XFER_NUM_BYTES_REG", + .addr = A_INDIRECT_READ_XFER_NUM_BYTES_REG, + },{ .name = "INDIRECT_WRITE_XFER_CTRL_REG", + .addr = A_INDIRECT_WRITE_XFER_CTRL_REG, + .ro = 0xffffffdc, + .w1c = 0x20, + .pre_write = ind_wr_xfer_ctrl_reg_pre_write, + .post_write = ind_wr_xfer_ctrl_reg_post_write, + .post_read = ind_wr_xfer_ctrl_reg_post_read, + },{ .name = "INDIRECT_WRITE_XFER_WATERMARK_REG", + .addr = A_INDIRECT_WRITE_XFER_WATERMARK_REG, + .reset = 0xffffffff, + },{ .name = "INDIRECT_WRITE_XFER_START_REG", + .addr = A_INDIRECT_WRITE_XFER_START_REG, + },{ .name = "INDIRECT_WRITE_XFER_NUM_BYTES_REG", + .addr = A_INDIRECT_WRITE_XFER_NUM_BYTES_REG, + },{ .name = "INDIRECT_TRIGGER_ADDR_RANGE_REG", + .addr = A_INDIRECT_TRIGGER_ADDR_RANGE_REG, + .reset = 0x4, + .ro = 0xfffffff0, + },{ .name = "FLASH_COMMAND_CTRL_MEM_REG", + .addr = A_FLASH_COMMAND_CTRL_MEM_REG, + .ro = 0xe008fffe, + .post_write = flash_cmd_ctrl_mem_reg_post_write, + },{ .name = "FLASH_CMD_CTRL_REG", + .addr = A_FLASH_CMD_CTRL_REG, + .ro = 0x7a, + .post_write = flash_cmd_ctrl_reg_post_write, + },{ .name = "FLASH_CMD_ADDR_REG", + .addr = A_FLASH_CMD_ADDR_REG, + },{ .name = "FLASH_RD_DATA_LOWER_REG", + .addr = A_FLASH_RD_DATA_LOWER_REG, + .ro = 0xffffffff, + },{ .name = "FLASH_RD_DATA_UPPER_REG", + .addr = A_FLASH_RD_DATA_UPPER_REG, + .ro = 0xffffffff, + },{ .name = "FLASH_WR_DATA_LOWER_REG", + .addr = A_FLASH_WR_DATA_LOWER_REG, + },{ .name = "FLASH_WR_DATA_UPPER_REG", + .addr = A_FLASH_WR_DATA_UPPER_REG, + },{ .name = "POLLING_FLASH_STATUS_REG", + .addr = A_POLLING_FLASH_STATUS_REG, + .ro = 0xfff0ffff, + },{ .name = "PHY_CONFIGURATION_REG", + .addr = A_PHY_CONFIGURATION_REG, + .reset = 0x40000000, + .ro = 0x1f80ff80, + },{ .name = "PHY_MASTER_CONTROL_REG", + .addr = A_PHY_MASTER_CONTROL_REG, + .reset = 0x800000, + .ro = 0xfe08ff80, + },{ .name = "DLL_OBSERVABLE_LOWER_REG", + .addr = A_DLL_OBSERVABLE_LOWER_REG, + .ro = 0xffffffff, + },{ .name = "DLL_OBSERVABLE_UPPER_REG", + .addr = A_DLL_OBSERVABLE_UPPER_REG, + .ro = 0xffffffff, + .post_read = dll_obs_upper_reg_post_read, + },{ .name = "OPCODE_EXT_LOWER_REG", + .addr = A_OPCODE_EXT_LOWER_REG, + .reset = 0x13edfa00, + },{ .name = "OPCODE_EXT_UPPER_REG", + .addr = A_OPCODE_EXT_UPPER_REG, + .reset = 0x6f90000, + .ro = 0xffff, + },{ .name = "MODULE_ID_REG", + .addr = A_MODULE_ID_REG, + .reset = 0x300, + .ro = 0xffffffff, + } +}; + +/* Return dev-obj from reg-region created by register_init_block32 */ +static XlnxVersalOspi *xilinx_ospi_of_mr(void *mr_accessor) +{ + RegisterInfoArray *reg_array = mr_accessor; + Object *dev; + + dev = reg_array->mem.owner; + assert(dev); + + return XILINX_VERSAL_OSPI(dev); +} + +static void ospi_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + XlnxVersalOspi *s = xilinx_ospi_of_mr(opaque); + + register_write_memory(opaque, addr, value, size); + ospi_update_irq_line(s); +} + +static const MemoryRegionOps ospi_ops = { + .read = register_read_memory, + .write = ospi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t ospi_indac_read(void *opaque, unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + uint64_t ret = ospi_rx_sram_read(s, size); + + if (!ospi_ind_op_completed(s->rd_ind_op)) { + ospi_do_ind_read(s); + } + return ret; +} + +static void ospi_indac_write(void *opaque, uint64_t value, unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + + g_assert(!s->ind_write_disabled); + + if (!ospi_ind_op_completed(s->wr_ind_op)) { + ospi_tx_sram_write(s, value, size); + ospi_do_indirect_write(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI wr into indac area while no ongoing indac wr\n"); + } +} + +static bool is_inside_indac_range(XlnxVersalOspi *s, hwaddr addr) +{ + uint32_t range_start; + uint32_t range_end; + + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD)) { + return true; + } + + range_start = s->regs[R_IND_AHB_ADDR_TRIGGER_REG]; + range_end = range_start + + (1 << ARRAY_FIELD_EX32(s->regs, + INDIRECT_TRIGGER_ADDR_RANGE_REG, + IND_RANGE_WIDTH_FLD)); + + addr += s->regs[R_IND_AHB_ADDR_TRIGGER_REG] & 0xF0000000; + + return addr >= range_start && addr < range_end; +} + +static bool ospi_is_indac_active(XlnxVersalOspi *s) +{ + /* + * When dac and indac cannot be active at the same time, + * return true when dac is disabled. + */ + return s->dac_with_indac || !s->dac_enable; +} + +static uint64_t ospi_dac_read(void *opaque, hwaddr addr, unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) { + if (ospi_is_indac_active(s) && + is_inside_indac_range(s, addr)) { + return ospi_indac_read(s, size); + } + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DIR_ACC_CTLR_FLD) + && s->dac_enable) { + if (ARRAY_FIELD_EX32(s->regs, + CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD)) { + addr += s->regs[R_REMAP_ADDR_REG]; + } + return ospi_do_dac_read(opaque, addr, size); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB rd while DAC disabled\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB rd while OSPI disabled\n"); + } + + return 0; +} + +static void ospi_dac_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) { + if (ospi_is_indac_active(s) && + !s->ind_write_disabled && + is_inside_indac_range(s, addr)) { + return ospi_indac_write(s, value, size); + } + if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DIR_ACC_CTLR_FLD) && + s->dac_enable) { + if (ARRAY_FIELD_EX32(s->regs, + CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD)) { + addr += s->regs[R_REMAP_ADDR_REG]; + } + /* Check if addr is write protected */ + if (ARRAY_FIELD_EX32(s->regs, WR_PROT_CTRL_REG, ENB_FLD) && + ospi_is_write_protected(s, addr)) { + set_irq(s, R_IRQ_STATUS_REG_PROT_WR_ATTEMPT_FLD_MASK); + ospi_update_irq_line(s); + qemu_log_mask(LOG_GUEST_ERROR, + "OSPI writing into write protected area\n"); + return; + } + ospi_do_dac_write(opaque, addr, value, size); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB wr while DAC disabled\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB wr while OSPI disabled\n"); + } +} + +static const MemoryRegionOps ospi_dac_ops = { + .read = ospi_dac_read, + .write = ospi_dac_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void ospi_update_dac_status(void *opaque, int n, int level) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque); + + s->dac_enable = level; +} + +static void xlnx_versal_ospi_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + s->num_cs = 4; + s->spi = ssi_create_bus(dev, "spi0"); + s->cs_lines = g_new0(qemu_irq, s->num_cs); + for (int i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + fifo8_create(&s->rx_fifo, RXFF_SZ); + fifo8_create(&s->tx_fifo, TXFF_SZ); + fifo8_create(&s->rx_sram, RXFF_SZ); + fifo8_create(&s->tx_sram, TXFF_SZ); +} + +static void xlnx_versal_ospi_init(Object *obj) +{ + XlnxVersalOspi *s = XILINX_VERSAL_OSPI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(obj); + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_XILINX_VERSAL_OSPI, + XILINX_VERSAL_OSPI_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), ospi_regs_info, + ARRAY_SIZE(ospi_regs_info), + s->regs_info, s->regs, + &ospi_ops, + XILINX_VERSAL_OSPI_ERR_DEBUG, + XILINX_VERSAL_OSPI_R_MAX * 4); + memory_region_add_subregion(&s->iomem, 0x0, ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + + memory_region_init_io(&s->iomem_dac, obj, &ospi_dac_ops, s, + TYPE_XILINX_VERSAL_OSPI "-dac", 0x20000000); + sysbus_init_mmio(sbd, &s->iomem_dac); + + sysbus_init_irq(sbd, &s->irq); + + object_property_add_link(obj, "dma-src", TYPE_XLNX_CSU_DMA, + (Object **)&s->dma_src, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + + qdev_init_gpio_in_named(dev, ospi_update_dac_status, "ospi-mux-sel", 1); +} + +static const VMStateDescription vmstate_ind_op = { + .name = "OSPIIndOp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(flash_addr, IndOp), + VMSTATE_UINT32(num_bytes, IndOp), + VMSTATE_UINT32(done_bytes, IndOp), + VMSTATE_BOOL(completed, IndOp), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xlnx_versal_ospi = { + .name = TYPE_XILINX_VERSAL_OSPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_FIFO8(rx_fifo, XlnxVersalOspi), + VMSTATE_FIFO8(tx_fifo, XlnxVersalOspi), + VMSTATE_FIFO8(rx_sram, XlnxVersalOspi), + VMSTATE_FIFO8(tx_sram, XlnxVersalOspi), + VMSTATE_BOOL(ind_write_disabled, XlnxVersalOspi), + VMSTATE_BOOL(dac_with_indac, XlnxVersalOspi), + VMSTATE_BOOL(dac_enable, XlnxVersalOspi), + VMSTATE_BOOL(src_dma_inprog, XlnxVersalOspi), + VMSTATE_STRUCT_ARRAY(rd_ind_op, XlnxVersalOspi, 2, 1, + vmstate_ind_op, IndOp), + VMSTATE_STRUCT_ARRAY(wr_ind_op, XlnxVersalOspi, 2, 1, + vmstate_ind_op, IndOp), + VMSTATE_UINT32_ARRAY(regs, XlnxVersalOspi, XILINX_VERSAL_OSPI_R_MAX), + VMSTATE_UINT8_ARRAY(stig_membank, XlnxVersalOspi, 512), + VMSTATE_END_OF_LIST(), + } +}; + +static Property xlnx_versal_ospi_properties[] = { + DEFINE_PROP_BOOL("dac-with-indac", XlnxVersalOspi, dac_with_indac, false), + DEFINE_PROP_BOOL("indac-write-disabled", XlnxVersalOspi, + ind_write_disabled, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xlnx_versal_ospi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xlnx_versal_ospi_reset; + dc->realize = xlnx_versal_ospi_realize; + dc->vmsd = &vmstate_xlnx_versal_ospi; + device_class_set_props(dc, xlnx_versal_ospi_properties); +} + +static const TypeInfo xlnx_versal_ospi_info = { + .name = TYPE_XILINX_VERSAL_OSPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalOspi), + .class_init = xlnx_versal_ospi_class_init, + .instance_init = xlnx_versal_ospi_init, +}; + +static void xlnx_versal_ospi_register_types(void) +{ + type_register_static(&xlnx_versal_ospi_info); +} + +type_init(xlnx_versal_ospi_register_types) diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig index bac2511715..010be7ed1f 100644 --- a/hw/timer/Kconfig +++ b/hw/timer/Kconfig @@ -25,6 +25,9 @@ config ALLWINNER_A10_PIT bool select PTIMER +config SIFIVE_PWM + bool + config STM32F2XX_TIMER bool @@ -52,5 +55,8 @@ config SSE_COUNTER config SSE_TIMER bool +config STELLARIS_GPTM + bool + config AVR_TIMER16 bool diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c index 7233068a37..5e959b6d09 100644 --- a/hw/timer/a9gtimer.c +++ b/hw/timer/a9gtimer.c @@ -318,6 +318,12 @@ static void a9_gtimer_realize(DeviceState *dev, Error **errp) } } +static bool vmstate_a9_gtimer_control_needed(void *opaque) +{ + A9GTimerState *s = opaque; + return s->control != 0; +} + static const VMStateDescription vmstate_a9_gtimer_per_cpu = { .name = "arm.cortex-a9-global-timer.percpu", .version_id = 1, @@ -331,6 +337,17 @@ static const VMStateDescription vmstate_a9_gtimer_per_cpu = { } }; +static const VMStateDescription vmstate_a9_gtimer_control = { + .name = "arm.cortex-a9-global-timer.control", + .version_id = 1, + .minimum_version_id = 1, + .needed = vmstate_a9_gtimer_control_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, A9GTimerState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_a9_gtimer = { .name = "arm.cortex-a9-global-timer", .version_id = 1, @@ -344,6 +361,10 @@ static const VMStateDescription vmstate_a9_gtimer = { 1, vmstate_a9_gtimer_per_cpu, A9GTimerPerCPU), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_a9_gtimer_control, + NULL } }; diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c index c3fc2a4daa..971f78462a 100644 --- a/hw/timer/allwinner-a10-pit.c +++ b/hw/timer/allwinner-a10-pit.c @@ -275,7 +275,7 @@ static void a10_pit_init(Object *obj) tc->container = s; tc->index = i; - s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_DEFAULT); + s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_LEGACY); } } diff --git a/hw/timer/altera_timer.c b/hw/timer/altera_timer.c index c6e02d2b5a..0f1f54206a 100644 --- a/hw/timer/altera_timer.c +++ b/hw/timer/altera_timer.c @@ -185,7 +185,7 @@ static void altera_timer_realize(DeviceState *dev, Error **errp) return; } - t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_DEFAULT); + t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(t->ptimer); ptimer_set_freq(t->ptimer, t->freq_hz); ptimer_transaction_commit(t->ptimer); diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c index 15caff0e41..69c8863472 100644 --- a/hw/timer/arm_timer.c +++ b/hw/timer/arm_timer.c @@ -176,11 +176,11 @@ static arm_timer_state *arm_timer_init(uint32_t freq) { arm_timer_state *s; - s = (arm_timer_state *)g_malloc0(sizeof(arm_timer_state)); + s = g_new0(arm_timer_state, 1); s->freq = freq; s->control = TIMER_CTRL_IE; - s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_LEGACY); vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_arm_timer, s); return s; } diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c index 2f192011eb..5dfe39afe3 100644 --- a/hw/timer/armv7m_systick.c +++ b/hw/timer/armv7m_systick.c @@ -14,28 +14,32 @@ #include "migration/vmstate.h" #include "hw/irq.h" #include "hw/sysbus.h" +#include "hw/qdev-clock.h" #include "qemu/timer.h" #include "qemu/log.h" #include "qemu/module.h" +#include "qapi/error.h" #include "trace.h" -/* qemu timers run at 1GHz. We want something closer to 1MHz. */ -#define SYSTICK_SCALE 1000ULL - #define SYSTICK_ENABLE (1 << 0) #define SYSTICK_TICKINT (1 << 1) #define SYSTICK_CLKSOURCE (1 << 2) #define SYSTICK_COUNTFLAG (1 << 16) -int system_clock_scale; +#define SYSCALIB_NOREF (1U << 31) +#define SYSCALIB_SKEW (1U << 30) +#define SYSCALIB_TENMS ((1U << 24) - 1) -/* Conversion factor from qemu timer to SysTick frequencies. */ -static inline int64_t systick_scale(SysTickState *s) +static void systick_set_period_from_clock(SysTickState *s) { + /* + * Set the ptimer period from whichever clock is selected. + * Must be called from within a ptimer transaction block. + */ if (s->control & SYSTICK_CLKSOURCE) { - return system_clock_scale; + ptimer_set_period_from_clock(s->ptimer, s->cpuclk, 1); } else { - return 1000; + ptimer_set_period_from_clock(s->ptimer, s->refclk, 1); } } @@ -82,7 +86,28 @@ static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data, val = ptimer_get_count(s->ptimer); break; case 0xc: /* SysTick Calibration Value. */ - val = 10000; + /* + * In real hardware it is possible to make this register report + * a different value from what the reference clock is actually + * running at. We don't model that (which usually happens due + * to integration errors in the real hardware) and instead always + * report the theoretical correct value as described in the + * knowledgebase article at + * https://developer.arm.com/documentation/ka001325/latest + * If necessary, we could implement an extra QOM property on this + * device to force the STCALIB value to something different from + * the "correct" value. + */ + if (!clock_has_source(s->refclk)) { + val = SYSCALIB_NOREF; + break; + } + val = clock_ns_to_ticks(s->refclk, 10 * SCALE_MS) - 1; + val &= SYSCALIB_TENMS; + if (clock_ticks_to_ns(s->refclk, val + 1) != 10 * SCALE_MS) { + /* report that tick count does not yield exactly 10ms */ + val |= SYSCALIB_SKEW; + } break; default: val = 0; @@ -114,26 +139,26 @@ static MemTxResult systick_write(void *opaque, hwaddr addr, { uint32_t oldval; + if (!clock_has_source(s->refclk)) { + /* This bit is always 1 if there is no external refclk */ + value |= SYSTICK_CLKSOURCE; + } + ptimer_transaction_begin(s->ptimer); oldval = s->control; s->control &= 0xfffffff8; s->control |= value & 7; + if ((oldval ^ value) & SYSTICK_CLKSOURCE) { + systick_set_period_from_clock(s); + } + if ((oldval ^ value) & SYSTICK_ENABLE) { if (value & SYSTICK_ENABLE) { - /* - * Always reload the period in case board code has - * changed system_clock_scale. If we ever replace that - * global with a more sensible API then we might be able - * to set the period only when it actually changes. - */ - ptimer_set_period(s->ptimer, systick_scale(s)); ptimer_run(s->ptimer, 0); } else { ptimer_stop(s->ptimer); } - } else if ((oldval ^ value) & SYSTICK_CLKSOURCE) { - ptimer_set_period(s->ptimer, systick_scale(s)); } ptimer_transaction_commit(s->ptimer); break; @@ -176,20 +201,42 @@ static void systick_reset(DeviceState *dev) { SysTickState *s = SYSTICK(dev); - /* - * Forgetting to set system_clock_scale is always a board code - * bug. We can't check this earlier because for some boards - * (like stellaris) it is not yet configured at the point where - * the systick device is realized. - */ - assert(system_clock_scale != 0); - ptimer_transaction_begin(s->ptimer); s->control = 0; + if (!clock_has_source(s->refclk)) { + /* This bit is always 1 if there is no external refclk */ + s->control |= SYSTICK_CLKSOURCE; + } ptimer_stop(s->ptimer); ptimer_set_count(s->ptimer, 0); ptimer_set_limit(s->ptimer, 0, 0); - ptimer_set_period(s->ptimer, systick_scale(s)); + systick_set_period_from_clock(s); + ptimer_transaction_commit(s->ptimer); +} + +static void systick_cpuclk_update(void *opaque, ClockEvent event) +{ + SysTickState *s = SYSTICK(opaque); + + if (!(s->control & SYSTICK_CLKSOURCE)) { + /* currently using refclk, we can ignore cpuclk changes */ + } + + ptimer_transaction_begin(s->ptimer); + ptimer_set_period_from_clock(s->ptimer, s->cpuclk, 1); + ptimer_transaction_commit(s->ptimer); +} + +static void systick_refclk_update(void *opaque, ClockEvent event) +{ + SysTickState *s = SYSTICK(opaque); + + if (s->control & SYSTICK_CLKSOURCE) { + /* currently using cpuclk, we can ignore refclk changes */ + } + + ptimer_transaction_begin(s->ptimer); + ptimer_set_period_from_clock(s->ptimer, s->refclk, 1); ptimer_transaction_commit(s->ptimer); } @@ -201,6 +248,11 @@ static void systick_instance_init(Object *obj) memory_region_init_io(&s->iomem, obj, &systick_ops, s, "systick", 0xe0); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); + + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", + systick_refclk_update, s, ClockUpdate); + s->cpuclk = qdev_init_clock_in(DEVICE(obj), "cpuclk", + systick_cpuclk_update, s, ClockUpdate); } static void systick_realize(DeviceState *dev, Error **errp) @@ -211,13 +263,21 @@ static void systick_realize(DeviceState *dev, Error **errp) PTIMER_POLICY_NO_COUNTER_ROUND_DOWN | PTIMER_POLICY_NO_IMMEDIATE_RELOAD | PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT); + + if (!clock_has_source(s->cpuclk)) { + error_setg(errp, "systick: cpuclk must be connected"); + return; + } + /* It's OK not to connect the refclk */ } static const VMStateDescription vmstate_systick = { .name = "armv7m_systick", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = (VMStateField[]) { + VMSTATE_CLOCK(refclk, SysTickState), + VMSTATE_CLOCK(cpuclk, SysTickState), VMSTATE_UINT32(control, SysTickState), VMSTATE_INT64(tick, SysTickState), VMSTATE_PTIMER(ptimer, SysTickState), diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 42c47d2ce6..9c20b3d6ad 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -745,12 +745,29 @@ static const TypeInfo aspeed_2600_timer_info = { .class_init = aspeed_2600_timer_class_init, }; +static void aspeed_1030_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); + + dc->desc = "ASPEED 1030 Timer"; + awc->read = aspeed_2600_timer_read; + awc->write = aspeed_2600_timer_write; +} + +static const TypeInfo aspeed_1030_timer_info = { + .name = TYPE_ASPEED_1030_TIMER, + .parent = TYPE_ASPEED_TIMER, + .class_init = aspeed_1030_timer_class_init, +}; + static void aspeed_timer_register_types(void) { type_register_static(&aspeed_timer_info); type_register_static(&aspeed_2400_timer_info); type_register_static(&aspeed_2500_timer_info); type_register_static(&aspeed_2600_timer_info); + type_register_static(&aspeed_1030_timer_info); } type_init(aspeed_timer_register_types) diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c index 64108241ba..e57a0f5f09 100644 --- a/hw/timer/cadence_ttc.c +++ b/hw/timer/cadence_ttc.c @@ -24,6 +24,8 @@ #include "qemu/timer.h" #include "qom/object.h" +#include "hw/timer/cadence_ttc.h" + #ifdef CADENCE_TTC_ERR_DEBUG #define DB_PRINT(...) do { \ fprintf(stderr, ": %s: ", __func__); \ @@ -49,36 +51,6 @@ #define CLOCK_CTRL_PS_EN 0x00000001 #define CLOCK_CTRL_PS_V 0x0000001e -typedef struct { - QEMUTimer *timer; - int freq; - - uint32_t reg_clock; - uint32_t reg_count; - uint32_t reg_value; - uint16_t reg_interval; - uint16_t reg_match[3]; - uint32_t reg_intr; - uint32_t reg_intr_en; - uint32_t reg_event_ctrl; - uint32_t reg_event; - - uint64_t cpu_time; - unsigned int cpu_time_valid; - - qemu_irq irq; -} CadenceTimerState; - -#define TYPE_CADENCE_TTC "cadence_ttc" -OBJECT_DECLARE_SIMPLE_TYPE(CadenceTTCState, CADENCE_TTC) - -struct CadenceTTCState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - CadenceTimerState timer[3]; -}; - static void cadence_timer_update(CadenceTimerState *s) { qemu_set_irq(s->irq, !!(s->reg_intr & s->reg_intr_en)); diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c index e3aae4a45a..d5186f4454 100644 --- a/hw/timer/digic-timer.c +++ b/hw/timer/digic-timer.c @@ -139,7 +139,7 @@ static void digic_timer_init(Object *obj) { DigicTimerState *s = DIGIC_TIMER(obj); - s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_DEFAULT); + s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_LEGACY); /* * FIXME: there is no documentation on Digic timer diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c index 4ba662190d..ecc2831baf 100644 --- a/hw/timer/etraxfs_timer.c +++ b/hw/timer/etraxfs_timer.c @@ -26,6 +26,7 @@ #include "hw/sysbus.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" +#include "migration/vmstate.h" #include "qemu/module.h" #include "qemu/timer.h" #include "hw/irq.h" @@ -64,7 +65,7 @@ struct ETRAXTimerState { ptimer_state *ptimer_t1; ptimer_state *ptimer_wd; - int wd_hits; + uint32_t wd_hits; /* Control registers. */ uint32_t rw_tmr0_div; @@ -83,6 +84,36 @@ struct ETRAXTimerState { uint32_t r_masked_intr; }; +static const VMStateDescription vmstate_etraxfs = { + .name = "etraxfs", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(ptimer_t0, ETRAXTimerState), + VMSTATE_PTIMER(ptimer_t1, ETRAXTimerState), + VMSTATE_PTIMER(ptimer_wd, ETRAXTimerState), + + VMSTATE_UINT32(wd_hits, ETRAXTimerState), + + VMSTATE_UINT32(rw_tmr0_div, ETRAXTimerState), + VMSTATE_UINT32(r_tmr0_data, ETRAXTimerState), + VMSTATE_UINT32(rw_tmr0_ctrl, ETRAXTimerState), + + VMSTATE_UINT32(rw_tmr1_div, ETRAXTimerState), + VMSTATE_UINT32(r_tmr1_data, ETRAXTimerState), + VMSTATE_UINT32(rw_tmr1_ctrl, ETRAXTimerState), + + VMSTATE_UINT32(rw_wd_ctrl, ETRAXTimerState), + + VMSTATE_UINT32(rw_intr_mask, ETRAXTimerState), + VMSTATE_UINT32(rw_ack_intr, ETRAXTimerState), + VMSTATE_UINT32(r_intr, ETRAXTimerState), + VMSTATE_UINT32(r_masked_intr, ETRAXTimerState), + + VMSTATE_END_OF_LIST() + } +}; + static uint64_t timer_read(void *opaque, hwaddr addr, unsigned int size) { @@ -339,9 +370,9 @@ static void etraxfs_timer_realize(DeviceState *dev, Error **errp) ETRAXTimerState *t = ETRAX_TIMER(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_DEFAULT); - t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_DEFAULT); - t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_DEFAULT); + t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_LEGACY); + t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_LEGACY); + t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_LEGACY); sysbus_init_irq(sbd, &t->irq); sysbus_init_irq(sbd, &t->nmi); @@ -357,6 +388,7 @@ static void etraxfs_timer_class_init(ObjectClass *klass, void *data) ResettableClass *rc = RESETTABLE_CLASS(klass); dc->realize = etraxfs_timer_realize; + dc->vmsd = &vmstate_etraxfs; rc->phases.enter = etraxfs_timer_reset_enter; rc->phases.hold = etraxfs_timer_reset_hold; } diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c index d0e5343996..e175a9f5b9 100644 --- a/hw/timer/exynos4210_mct.c +++ b/hw/timer/exynos4210_mct.c @@ -1503,17 +1503,17 @@ static void exynos4210_mct_init(Object *obj) /* Global timer */ s->g_timer.ptimer_frc = ptimer_init(exynos4210_gfrc_event, s, - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); memset(&s->g_timer.reg, 0, sizeof(struct gregs)); /* Local timers */ for (i = 0; i < 2; i++) { s->l_timer[i].tick_timer.ptimer_tick = ptimer_init(exynos4210_ltick_event, &s->l_timer[i], - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); s->l_timer[i].ptimer_frc = ptimer_init(exynos4210_lfrc_event, &s->l_timer[i], - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); s->l_timer[i].id = i; } diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c index 220088120e..02924a9e5b 100644 --- a/hw/timer/exynos4210_pwm.c +++ b/hw/timer/exynos4210_pwm.c @@ -400,7 +400,7 @@ static void exynos4210_pwm_init(Object *obj) sysbus_init_irq(dev, &s->timer[i].irq); s->timer[i].ptimer = ptimer_init(exynos4210_pwm_tick, &s->timer[i], - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); s->timer[i].id = i; s->timer[i].parent = s; } diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c index d511890109..5c4923c1e0 100644 --- a/hw/timer/grlib_gptimer.c +++ b/hw/timer/grlib_gptimer.c @@ -383,7 +383,7 @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp) timer->unit = unit; timer->ptimer = ptimer_init(grlib_gptimer_hit, timer, - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); timer->id = i; /* One IRQ line for each timer */ diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 9520471be2..5f88ffdef8 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -352,6 +352,16 @@ static const VMStateDescription vmstate_hpet = { } }; +static void hpet_arm(HPETTimer *t, uint64_t ticks) +{ + if (ticks < ns_to_ticks(INT64_MAX / 2)) { + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks)); + } else { + timer_del(t->qemu_timer); + } +} + /* * timer expiration callback */ @@ -374,13 +384,11 @@ static void hpet_timer(void *opaque) } } diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { if (t->wrap_flag) { diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); t->wrap_flag = 0; } } @@ -407,8 +415,7 @@ static void hpet_set_timer(HPETTimer *t) t->wrap_flag = 1; } } - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } static void hpet_del_timer(HPETTimer *t) diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c index 5befb53506..d8b8e4e1f6 100644 --- a/hw/timer/ibex_timer.c +++ b/hw/timer/ibex_timer.c @@ -34,7 +34,9 @@ #include "target/riscv/cpu.h" #include "migration/vmstate.h" -REG32(CTRL, 0x00) +REG32(ALERT_TEST, 0x00) + FIELD(ALERT_TEST, FATAL_FAULT, 0, 1) +REG32(CTRL, 0x04) FIELD(CTRL, ACTIVE, 0, 1) REG32(CFG0, 0x100) FIELD(CFG0, PRESCALE, 0, 12) @@ -58,8 +60,6 @@ static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) static void ibex_timer_update_irqs(IbexTimerState *s) { - CPUState *cs = qemu_get_cpu(0); - RISCVCPU *cpu = RISCV_CPU(cs); uint64_t value = s->timer_compare_lower0 | ((uint64_t)s->timer_compare_upper0 << 32); uint64_t next, diff; @@ -71,13 +71,13 @@ static void ibex_timer_update_irqs(IbexTimerState *s) } /* Update the CPUs mtimecmp */ - cpu->env.timecmp = value; + s->mtimecmp = value; - if (cpu->env.timecmp <= now) { + if (s->mtimecmp <= now) { /* * If the mtimecmp was in the past raise the interrupt now. */ - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1)); + qemu_irq_raise(s->m_timer_irq); if (s->timer_intr_enable & R_INTR_ENABLE_IE_0_MASK) { s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; qemu_set_irq(s->irq, true); @@ -86,10 +86,10 @@ static void ibex_timer_update_irqs(IbexTimerState *s) } /* Setup a timer to trigger the interrupt in the future */ - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(0)); + qemu_irq_lower(s->m_timer_irq); qemu_set_irq(s->irq, false); - diff = cpu->env.timecmp - now; + diff = s->mtimecmp - now; next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + muldiv64(diff, NANOSECONDS_PER_SECOND, @@ -97,19 +97,17 @@ static void ibex_timer_update_irqs(IbexTimerState *s) if (next < qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) { /* We overflowed the timer, just set it as large as we can */ - timer_mod(cpu->env.timer, 0x7FFFFFFFFFFFFFFF); + timer_mod(s->mtimer, 0x7FFFFFFFFFFFFFFF); } else { - timer_mod(cpu->env.timer, next); + timer_mod(s->mtimer, next); } } static void ibex_timer_cb(void *opaque) { IbexTimerState *s = opaque; - CPUState *cs = qemu_get_cpu(0); - RISCVCPU *cpu = RISCV_CPU(cs); - riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1)); + qemu_irq_raise(s->m_timer_irq); if (s->timer_intr_enable & R_INTR_ENABLE_IE_0_MASK) { s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; qemu_set_irq(s->irq, true); @@ -120,11 +118,9 @@ static void ibex_timer_reset(DeviceState *dev) { IbexTimerState *s = IBEX_TIMER(dev); - CPUState *cpu = qemu_get_cpu(0); - CPURISCVState *env = cpu->env_ptr; - env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + s->mtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ibex_timer_cb, s); - env->timecmp = 0; + s->mtimecmp = 0; s->timer_ctrl = 0x00000000; s->timer_cfg0 = 0x00010000; @@ -132,7 +128,6 @@ static void ibex_timer_reset(DeviceState *dev) s->timer_compare_upper0 = 0xFFFFFFFF; s->timer_intr_enable = 0x00000000; s->timer_intr_state = 0x00000000; - s->timer_intr_test = 0x00000000; ibex_timer_update_irqs(s); } @@ -145,6 +140,10 @@ static uint64_t ibex_timer_read(void *opaque, hwaddr addr, uint64_t retvalue = 0; switch (addr >> 2) { + case R_ALERT_TEST: + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted to read ALERT_TEST, a write only register"); + break; case R_CTRL: retvalue = s->timer_ctrl; break; @@ -170,7 +169,8 @@ static uint64_t ibex_timer_read(void *opaque, hwaddr addr, retvalue = s->timer_intr_state; break; case R_INTR_TEST: - retvalue = s->timer_intr_test; + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted to read INTR_TEST, a write only register"); break; default: qemu_log_mask(LOG_GUEST_ERROR, @@ -188,6 +188,9 @@ static void ibex_timer_write(void *opaque, hwaddr addr, uint32_t val = val64; switch (addr >> 2) { + case R_ALERT_TEST: + qemu_log_mask(LOG_UNIMP, "Alert triggering not supported"); + break; case R_CTRL: s->timer_ctrl = val; break; @@ -217,10 +220,7 @@ static void ibex_timer_write(void *opaque, hwaddr addr, s->timer_intr_state &= ~val; break; case R_INTR_TEST: - s->timer_intr_test = val; - if (s->timer_intr_enable & - s->timer_intr_test & - R_INTR_ENABLE_IE_0_MASK) { + if (s->timer_intr_enable & val & R_INTR_ENABLE_IE_0_MASK) { s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; qemu_set_irq(s->irq, true); } @@ -249,8 +249,8 @@ static int ibex_timer_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_ibex_timer = { .name = TYPE_IBEX_TIMER, - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .post_load = ibex_timer_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(timer_ctrl, IbexTimerState), @@ -259,7 +259,6 @@ static const VMStateDescription vmstate_ibex_timer = { VMSTATE_UINT32(timer_compare_upper0, IbexTimerState), VMSTATE_UINT32(timer_intr_enable, IbexTimerState), VMSTATE_UINT32(timer_intr_state, IbexTimerState), - VMSTATE_UINT32(timer_intr_test, IbexTimerState), VMSTATE_END_OF_LIST() } }; @@ -280,12 +279,21 @@ static void ibex_timer_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } +static void ibex_timer_realize(DeviceState *dev, Error **errp) +{ + IbexTimerState *s = IBEX_TIMER(dev); + + qdev_init_gpio_out(dev, &s->m_timer_irq, 1); +} + + static void ibex_timer_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = ibex_timer_reset; dc->vmsd = &vmstate_ibex_timer; + dc->realize = ibex_timer_realize; device_class_set_props(dc, ibex_timer_properties); } diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index ebd58254d1..ec0fa440d7 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -275,10 +275,15 @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value, /* If IOVW bit is set then set the timer value */ ptimer_set_count(s->timer_reload, s->lr); } - + /* + * Commit the change to s->timer_reload, so it can propagate. Otherwise + * the timer interrupt may not fire properly. The commit must happen + * before calling imx_epit_reload_compare_timer(), which reads + * s->timer_reload internally again. + */ + ptimer_transaction_commit(s->timer_reload); imx_epit_reload_compare_timer(s); ptimer_transaction_commit(s->timer_cmp); - ptimer_transaction_commit(s->timer_reload); break; case 3: /* CMP */ @@ -347,9 +352,9 @@ static void imx_epit_realize(DeviceState *dev, Error **errp) 0x00001000); sysbus_init_mmio(sbd, &s->iomem); - s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_DEFAULT); + s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_LEGACY); - s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_DEFAULT); + s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_LEGACY); } static void imx_epit_class_init(ObjectClass *klass, void *data) diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 5c0d9a269c..80b8302639 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -505,7 +505,7 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp) 0x00001000); sysbus_init_mmio(sbd, &s->iomem); - s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_LEGACY); } static void imx_gpt_class_init(ObjectClass *klass, void *data) diff --git a/hw/timer/meson.build b/hw/timer/meson.build index 1aa3cd2284..03092e2ceb 100644 --- a/hw/timer/meson.build +++ b/hw/timer/meson.build @@ -31,8 +31,10 @@ softmmu_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c')) softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c')) softmmu_ss.add(when: 'CONFIG_SSE_COUNTER', if_true: files('sse-counter.c')) softmmu_ss.add(when: 'CONFIG_SSE_TIMER', if_true: files('sse-timer.c')) +softmmu_ss.add(when: 'CONFIG_STELLARIS_GPTM', if_true: files('stellaris-gptm.c')) softmmu_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c')) softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c')) specific_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_timer.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_PWM', if_true: files('sifive_pwm.c')) specific_ss.add(when: 'CONFIG_AVR_TIMER16', if_true: files('avr_timer16.c')) diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c index fe0ca905f3..ee7438f168 100644 --- a/hw/timer/mss-timer.c +++ b/hw/timer/mss-timer.c @@ -232,7 +232,7 @@ static void mss_timer_init(Object *obj) for (i = 0; i < NUM_TIMERS; i++) { struct Msf2Timer *st = &t->timers[i]; - st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT); + st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(st->ptimer); ptimer_set_freq(st->ptimer, t->freq_hz); ptimer_transaction_commit(st->ptimer); diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index 42be79c736..50c6772383 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -45,7 +45,12 @@ static uint32_t update_counter(NRF51TimerState *s, int64_t now) uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns); s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]); - s->update_counter_ns = now; + /* + * Only advance the sync time to the timestamp of the last tick, + * not all the way to 'now', so we don't lose time if we do + * multiple resyncs in a single tick. + */ + s->update_counter_ns += ticks_to_ns(s, ticks); return ticks; } diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c index 2e0fd21a36..69eabc678a 100644 --- a/hw/timer/renesas_cmt.c +++ b/hw/timer/renesas_cmt.c @@ -57,7 +57,7 @@ static void update_events(RCMTState *cmt, int ch) if ((cmt->cmstr & (1 << ch)) == 0) { /* count disable, so not happened next event. */ - return ; + return; } next_time = cmt->cmcor[ch] - cmt->cmcnt[ch]; next_time *= NANOSECONDS_PER_SECOND; diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index d96002e1ee..c15f654738 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -67,18 +67,18 @@ static void update_events(RTMRState *tmr, int ch) int i, event; if (tmr->tccr[ch] == 0) { - return ; + return; } if (FIELD_EX8(tmr->tccr[ch], TCCR, CSS) == 0) { /* external clock mode */ /* event not happened */ - return ; + return; } if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) == CSS_CASCADING) { /* cascading mode */ if (ch == 1) { tmr->next[ch] = none; - return ; + return; } diff[cmia] = concat_reg(tmr->tcora) - concat_reg(tmr->tcnt); diff[cmib] = concat_reg(tmr->tcorb) - concat_reg(tmr->tcnt); @@ -384,7 +384,7 @@ static void timer_events(RTMRState *tmr, int ch) tmr->tcorb[ch]) & 0xff; } else { if (ch == 1) { - return ; + return; } tcnt = issue_event(tmr, ch, 16, concat_reg(tmr->tcnt), diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c index 58af1a1edb..7788939766 100644 --- a/hw/timer/sh_timer.c +++ b/hw/timer/sh_timer.c @@ -10,13 +10,12 @@ #include "qemu/osdep.h" #include "exec/memory.h" -#include "hw/hw.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/sh4/sh.h" #include "hw/timer/tmu012.h" #include "hw/ptimer.h" - -//#define DEBUG_TIMER +#include "trace.h" #define TIMER_TCR_TPSC (7 << 0) #define TIMER_TCR_CKEG (3 << 3) @@ -46,24 +45,24 @@ typedef struct { int feat; int enabled; qemu_irq irq; -} sh_timer_state; +} SHTimerState; /* Check all active timers, and schedule the next timer interrupt. */ -static void sh_timer_update(sh_timer_state *s) +static void sh_timer_update(SHTimerState *s) { int new_level = s->int_level && (s->tcr & TIMER_TCR_UNIE); - if (new_level != s->old_level) - qemu_set_irq (s->irq, new_level); - + if (new_level != s->old_level) { + qemu_set_irq(s->irq, new_level); + } s->old_level = s->int_level; s->int_level = new_level; } static uint32_t sh_timer_read(void *opaque, hwaddr offset) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; switch (offset >> 2) { case OFFSET_TCOR: @@ -73,19 +72,18 @@ static uint32_t sh_timer_read(void *opaque, hwaddr offset) case OFFSET_TCR: return s->tcr | (s->int_level ? TIMER_TCR_UNF : 0); case OFFSET_TCPR: - if (s->feat & TIMER_FEAT_CAPT) + if (s->feat & TIMER_FEAT_CAPT) { return s->tcpr; - /* fall through */ - default: - hw_error("sh_timer_read: Bad offset %x\n", (int)offset); - return 0; + } } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; } -static void sh_timer_write(void *opaque, hwaddr offset, - uint32_t value) +static void sh_timer_write(void *opaque, hwaddr offset, uint32_t value) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; int freq; switch (offset >> 2) { @@ -104,19 +102,30 @@ static void sh_timer_write(void *opaque, hwaddr offset, case OFFSET_TCR: ptimer_transaction_begin(s->timer); if (s->enabled) { - /* Pause the timer if it is running. This may cause some - inaccuracy dure to rounding, but avoids a whole lot of other - messyness. */ + /* + * Pause the timer if it is running. This may cause some inaccuracy + * due to rounding, but avoids a whole lot of other messiness + */ ptimer_stop(s->timer); } freq = s->freq; /* ??? Need to recalculate expiry time after changing divisor. */ switch (value & TIMER_TCR_TPSC) { - case 0: freq >>= 2; break; - case 1: freq >>= 4; break; - case 2: freq >>= 6; break; - case 3: freq >>= 8; break; - case 4: freq >>= 10; break; + case 0: + freq >>= 2; + break; + case 1: + freq >>= 4; + break; + case 2: + freq >>= 6; + break; + case 3: + freq >>= 8; + break; + case 4: + freq >>= 10; + break; case 6: case 7: if (s->feat & TIMER_FEAT_EXTCLK) { @@ -124,7 +133,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved TPSC value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TPSC value\n", __func__); } switch ((value & TIMER_TCR_CKEG) >> 3) { case 0: @@ -137,7 +147,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved CKEG value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved CKEG value\n", __func__); } switch ((value & TIMER_TCR_ICPE) >> 6) { case 0: @@ -149,7 +160,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved ICPE value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPE value\n", __func__); } if ((value & TIMER_TCR_UNF) == 0) { s->int_level = 0; @@ -158,13 +170,15 @@ static void sh_timer_write(void *opaque, hwaddr offset, value &= ~TIMER_TCR_UNF; if ((value & TIMER_TCR_ICPF) && (!(s->feat & TIMER_FEAT_CAPT))) { - hw_error("sh_timer_write: Reserved ICPF value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPF value\n", __func__); } value &= ~TIMER_TCR_ICPF; /* capture not supported */ if (value & TIMER_TCR_RESERVED) { - hw_error("sh_timer_write: Reserved TCR bits set\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TCR bits set\n", __func__); } s->tcr = value; ptimer_set_limit(s->timer, s->tcor, 0); @@ -182,19 +196,17 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); } sh_timer_update(s); } static void sh_timer_start_stop(void *opaque, int enable) { - sh_timer_state *s = (sh_timer_state *)opaque; - -#ifdef DEBUG_TIMER - printf("sh_timer_start_stop %d (%d)\n", enable, s->enabled); -#endif + SHTimerState *s = opaque; + trace_sh_timer_start_stop(enable, s->enabled); ptimer_transaction_begin(s->timer); if (s->enabled && !enable) { ptimer_stop(s->timer); @@ -204,24 +216,20 @@ static void sh_timer_start_stop(void *opaque, int enable) } ptimer_transaction_commit(s->timer); s->enabled = !!enable; - -#ifdef DEBUG_TIMER - printf("sh_timer_start_stop done %d\n", s->enabled); -#endif } static void sh_timer_tick(void *opaque) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; s->int_level = s->enabled; sh_timer_update(s); } static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq) { - sh_timer_state *s; + SHTimerState *s; - s = (sh_timer_state *)g_malloc0(sizeof(sh_timer_state)); + s = g_malloc0(sizeof(*s)); s->freq = freq; s->feat = feat; s->tcor = 0xffffffff; @@ -231,7 +239,7 @@ static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq) s->enabled = 0; s->irq = irq; - s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_DEFAULT); + s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_LEGACY); sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor); sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt); @@ -252,50 +260,49 @@ typedef struct { int feat; } tmu012_state; -static uint64_t tmu012_read(void *opaque, hwaddr offset, - unsigned size) +static uint64_t tmu012_read(void *opaque, hwaddr offset, unsigned size) { - tmu012_state *s = (tmu012_state *)opaque; - -#ifdef DEBUG_TIMER - printf("tmu012_read 0x%lx\n", (unsigned long) offset); -#endif + tmu012_state *s = opaque; + trace_sh_timer_read(offset); if (offset >= 0x20) { if (!(s->feat & TMU012_FEAT_3CHAN)) { - hw_error("tmu012_write: Bad channel offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); } return sh_timer_read(s->timer[2], offset - 0x20); } - if (offset >= 0x14) + if (offset >= 0x14) { return sh_timer_read(s->timer[1], offset - 0x14); - - if (offset >= 0x08) + } + if (offset >= 0x08) { return sh_timer_read(s->timer[0], offset - 0x08); - - if (offset == 4) + } + if (offset == 4) { return s->tstr; - - if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) + } + if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) { return s->tocr; + } - hw_error("tmu012_write: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); return 0; } static void tmu012_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { - tmu012_state *s = (tmu012_state *)opaque; - -#ifdef DEBUG_TIMER - printf("tmu012_write 0x%lx 0x%08x\n", (unsigned long) offset, value); -#endif + tmu012_state *s = opaque; + trace_sh_timer_write(offset, value); if (offset >= 0x20) { if (!(s->feat & TMU012_FEAT_3CHAN)) { - hw_error("tmu012_write: Bad channel offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); } sh_timer_write(s->timer[2], offset - 0x20, value); return; @@ -318,7 +325,7 @@ static void tmu012_write(void *opaque, hwaddr offset, sh_timer_start_stop(s->timer[2], value & (1 << 2)); } else { if (value & (1 << 2)) { - hw_error("tmu012_write: Bad channel\n"); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad channel\n", __func__); } } @@ -337,15 +344,14 @@ static const MemoryRegionOps tmu012_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void tmu012_init(MemoryRegion *sysmem, hwaddr base, - int feat, uint32_t freq, +void tmu012_init(MemoryRegion *sysmem, hwaddr base, int feat, uint32_t freq, qemu_irq ch0_irq, qemu_irq ch1_irq, qemu_irq ch2_irq0, qemu_irq ch2_irq1) { tmu012_state *s; int timer_feat = (feat & TMU012_FEAT_EXTCLK) ? TIMER_FEAT_EXTCLK : 0; - s = (tmu012_state *)g_malloc0(sizeof(tmu012_state)); + s = g_malloc0(sizeof(*s)); s->feat = feat; s->timer[0] = sh_timer_init(freq, timer_feat, ch0_irq); s->timer[1] = sh_timer_init(freq, timer_feat, ch1_irq); @@ -354,15 +360,14 @@ void tmu012_init(MemoryRegion *sysmem, hwaddr base, ch2_irq0); /* ch2_irq1 not supported */ } - memory_region_init_io(&s->iomem, NULL, &tmu012_ops, s, - "timer", 0x100000000ULL); + memory_region_init_io(&s->iomem, NULL, &tmu012_ops, s, "timer", 0x30); memory_region_init_alias(&s->iomem_p4, NULL, "timer-p4", - &s->iomem, 0, 0x1000); + &s->iomem, 0, memory_region_size(&s->iomem)); memory_region_add_subregion(sysmem, P4ADDR(base), &s->iomem_p4); memory_region_init_alias(&s->iomem_a7, NULL, "timer-a7", - &s->iomem, 0, 0x1000); + &s->iomem, 0, memory_region_size(&s->iomem)); memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7); /* ??? Save/restore. */ } diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c new file mode 100644 index 0000000000..c664480ccf --- /dev/null +++ b/hw/timer/sifive_pwm.c @@ -0,0 +1,468 @@ +/* + * SiFive PWM + * + * Copyright (c) 2020 Western Digital + * + * Author: Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "hw/irq.h" +#include "hw/timer/sifive_pwm.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define HAS_PWM_EN_BITS(cfg) ((cfg & R_CONFIG_ENONESHOT_MASK) || \ + (cfg & R_CONFIG_ENALWAYS_MASK)) + +#define PWMCMP_MASK 0xFFFF +#define PWMCOUNT_MASK 0x7FFFFFFF + +REG32(CONFIG, 0x00) + FIELD(CONFIG, SCALE, 0, 4) + FIELD(CONFIG, STICKY, 8, 1) + FIELD(CONFIG, ZEROCMP, 9, 1) + FIELD(CONFIG, DEGLITCH, 10, 1) + FIELD(CONFIG, ENALWAYS, 12, 1) + FIELD(CONFIG, ENONESHOT, 13, 1) + FIELD(CONFIG, CMP0CENTER, 16, 1) + FIELD(CONFIG, CMP1CENTER, 17, 1) + FIELD(CONFIG, CMP2CENTER, 18, 1) + FIELD(CONFIG, CMP3CENTER, 19, 1) + FIELD(CONFIG, CMP0GANG, 24, 1) + FIELD(CONFIG, CMP1GANG, 25, 1) + FIELD(CONFIG, CMP2GANG, 26, 1) + FIELD(CONFIG, CMP3GANG, 27, 1) + FIELD(CONFIG, CMP0IP, 28, 1) + FIELD(CONFIG, CMP1IP, 29, 1) + FIELD(CONFIG, CMP2IP, 30, 1) + FIELD(CONFIG, CMP3IP, 31, 1) +REG32(COUNT, 0x08) +REG32(PWMS, 0x10) +REG32(PWMCMP0, 0x20) +REG32(PWMCMP1, 0x24) +REG32(PWMCMP2, 0x28) +REG32(PWMCMP3, 0x2C) + +static inline uint64_t sifive_pwm_ns_to_ticks(SiFivePwmState *s, + uint64_t time) +{ + return muldiv64(time, s->freq_hz, NANOSECONDS_PER_SECOND); +} + +static inline uint64_t sifive_pwm_ticks_to_ns(SiFivePwmState *s, + uint64_t ticks) +{ + return muldiv64(ticks, NANOSECONDS_PER_SECOND, s->freq_hz); +} + +static inline uint64_t sifive_pwm_compute_scale(SiFivePwmState *s) +{ + return s->pwmcfg & R_CONFIG_SCALE_MASK; +} + +static void sifive_pwm_set_alarms(SiFivePwmState *s) +{ + uint64_t now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + /* + * Subtract ticks from number of ticks when the timer was zero + * and mask to the register width. + */ + uint64_t pwmcount = (sifive_pwm_ns_to_ticks(s, now_ns) - + s->tick_offset) & PWMCOUNT_MASK; + uint64_t scale = sifive_pwm_compute_scale(s); + /* PWMs only contains PWMCMP_MASK bits starting at scale */ + uint64_t pwms = (pwmcount & (PWMCMP_MASK << scale)) >> scale; + + for (int i = 0; i < SIFIVE_PWM_CHANS; i++) { + uint64_t pwmcmp = s->pwmcmp[i] & PWMCMP_MASK; + uint64_t pwmcmp_ticks = pwmcmp << scale; + + /* + * Per circuit diagram and spec, both cases raises corresponding + * IP bit one clock cycle after time expires. + */ + if (pwmcmp > pwms) { + uint64_t offset = pwmcmp_ticks - pwmcount + 1; + uint64_t when_to_fire = now_ns + + sifive_pwm_ticks_to_ns(s, offset); + + trace_sifive_pwm_set_alarm(when_to_fire, now_ns); + timer_mod(&s->timer[i], when_to_fire); + } else { + /* Schedule interrupt for next cycle */ + trace_sifive_pwm_set_alarm(now_ns + 1, now_ns); + timer_mod(&s->timer[i], now_ns + 1); + } + + } + } else { + /* + * If timer incrementing disabled, just do pwms > pwmcmp check since + * a write may have happened to PWMs. + */ + uint64_t pwmcount = (s->tick_offset) & PWMCOUNT_MASK; + uint64_t scale = sifive_pwm_compute_scale(s); + uint64_t pwms = (pwmcount & (PWMCMP_MASK << scale)) >> scale; + + for (int i = 0; i < SIFIVE_PWM_CHANS; i++) { + uint64_t pwmcmp = s->pwmcmp[i] & PWMCMP_MASK; + + if (pwms >= pwmcmp) { + trace_sifive_pwm_set_alarm(now_ns + 1, now_ns); + timer_mod(&s->timer[i], now_ns + 1); + } else { + /* Effectively disable timer by scheduling far in future. */ + trace_sifive_pwm_set_alarm(0xFFFFFFFFFFFFFF, now_ns); + timer_mod(&s->timer[i], 0xFFFFFFFFFFFFFF); + } + } + } +} + +static void sifive_pwm_interrupt(SiFivePwmState *s, int num) +{ + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + bool was_incrementing = HAS_PWM_EN_BITS(s->pwmcfg); + + trace_sifive_pwm_interrupt(num); + + s->pwmcfg |= R_CONFIG_CMP0IP_MASK << num; + qemu_irq_raise(s->irqs[num]); + + /* + * If the zerocmp is set and pwmcmp0 raised the interrupt + * reset the zero ticks. + */ + if ((s->pwmcfg & R_CONFIG_ZEROCMP_MASK) && (num == 0)) { + /* If reset signal conditions, disable ENONESHOT. */ + s->pwmcfg &= ~R_CONFIG_ENONESHOT_MASK; + + if (was_incrementing) { + /* If incrementing, time in ticks is when pwmcount is zero */ + s->tick_offset = now; + } else { + /* If not incrementing, pwmcount = 0 */ + s->tick_offset = 0; + } + } + + /* + * If carryout bit set, which we discern via looking for overflow, + * also reset ENONESHOT. + */ + if (was_incrementing && + ((now & PWMCOUNT_MASK) < (s->tick_offset & PWMCOUNT_MASK))) { + s->pwmcfg &= ~R_CONFIG_ENONESHOT_MASK; + } + + /* Schedule or disable interrupts */ + sifive_pwm_set_alarms(s); + + /* If was enabled, and now not enabled, switch tick rep */ + if (was_incrementing && !HAS_PWM_EN_BITS(s->pwmcfg)) { + s->tick_offset = (now - s->tick_offset) & PWMCOUNT_MASK; + } +} + +static void sifive_pwm_interrupt_0(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 0); +} + +static void sifive_pwm_interrupt_1(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 1); +} + +static void sifive_pwm_interrupt_2(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 2); +} + +static void sifive_pwm_interrupt_3(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 3); +} + +static uint64_t sifive_pwm_read(void *opaque, hwaddr addr, + unsigned int size) +{ + SiFivePwmState *s = opaque; + uint64_t cur_time, scale; + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + trace_sifive_pwm_read(addr); + + switch (addr) { + case A_CONFIG: + return s->pwmcfg; + case A_COUNT: + cur_time = s->tick_offset; + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + cur_time = now - cur_time; + } + + /* + * Return the value in the counter with bit 31 always 0 + * This is allowed to wrap around so we don't need to check that. + */ + return cur_time & PWMCOUNT_MASK; + case A_PWMS: + cur_time = s->tick_offset; + scale = sifive_pwm_compute_scale(s); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + cur_time = now - cur_time; + } + + return ((cur_time & PWMCOUNT_MASK) >> scale) & PWMCMP_MASK; + case A_PWMCMP0: + return s->pwmcmp[0] & PWMCMP_MASK; + case A_PWMCMP1: + return s->pwmcmp[1] & PWMCMP_MASK; + case A_PWMCMP2: + return s->pwmcmp[2] & PWMCMP_MASK; + case A_PWMCMP3: + return s->pwmcmp[3] & PWMCMP_MASK; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return 0; +} + +static void sifive_pwm_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFivePwmState *s = opaque; + uint32_t value = val64; + uint64_t new_offset, scale; + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + trace_sifive_pwm_write(value, addr); + + switch (addr) { + case A_CONFIG: + if (value & (R_CONFIG_CMP0CENTER_MASK | R_CONFIG_CMP1CENTER_MASK | + R_CONFIG_CMP2CENTER_MASK | R_CONFIG_CMP3CENTER_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxCENTER is not supported\n", + __func__); + } + + if (value & (R_CONFIG_CMP0GANG_MASK | R_CONFIG_CMP1GANG_MASK | + R_CONFIG_CMP2GANG_MASK | R_CONFIG_CMP3GANG_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxGANG is not supported\n", + __func__); + } + + if (value & (R_CONFIG_CMP0IP_MASK | R_CONFIG_CMP1IP_MASK | + R_CONFIG_CMP2IP_MASK | R_CONFIG_CMP3IP_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxIP is not supported\n", + __func__); + } + + if (!(value & R_CONFIG_CMP0IP_MASK)) { + qemu_irq_lower(s->irqs[0]); + } + + if (!(value & R_CONFIG_CMP1IP_MASK)) { + qemu_irq_lower(s->irqs[1]); + } + + if (!(value & R_CONFIG_CMP2IP_MASK)) { + qemu_irq_lower(s->irqs[2]); + } + + if (!(value & R_CONFIG_CMP3IP_MASK)) { + qemu_irq_lower(s->irqs[3]); + } + + /* + * If this write enables the timer increment + * set the time when pwmcount was zero to be cur_time - pwmcount. + * If this write disables the timer increment + * convert back from pwmcount to the time in ticks + * when pwmcount was zero. + */ + if ((!HAS_PWM_EN_BITS(s->pwmcfg) && HAS_PWM_EN_BITS(value)) || + (HAS_PWM_EN_BITS(s->pwmcfg) && !HAS_PWM_EN_BITS(value))) { + s->tick_offset = (now - s->tick_offset) & PWMCOUNT_MASK; + } + + s->pwmcfg = value; + break; + case A_COUNT: + /* The guest changed the counter, updated the offset value. */ + new_offset = value; + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + new_offset = now - new_offset; + } + + s->tick_offset = new_offset; + break; + case A_PWMS: + scale = sifive_pwm_compute_scale(s); + new_offset = (((value & PWMCMP_MASK) << scale) & PWMCOUNT_MASK); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + new_offset = now - new_offset; + } + + s->tick_offset = new_offset; + break; + case A_PWMCMP0: + s->pwmcmp[0] = value & PWMCMP_MASK; + break; + case A_PWMCMP1: + s->pwmcmp[1] = value & PWMCMP_MASK; + break; + case A_PWMCMP2: + s->pwmcmp[2] = value & PWMCMP_MASK; + break; + case A_PWMCMP3: + s->pwmcmp[3] = value & PWMCMP_MASK; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } + + /* Update the alarms to reflect possible updated values */ + sifive_pwm_set_alarms(s); +} + +static void sifive_pwm_reset(DeviceState *dev) +{ + SiFivePwmState *s = SIFIVE_PWM(dev); + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->pwmcfg = 0x00000000; + s->pwmcmp[0] = 0x00000000; + s->pwmcmp[1] = 0x00000000; + s->pwmcmp[2] = 0x00000000; + s->pwmcmp[3] = 0x00000000; + + s->tick_offset = sifive_pwm_ns_to_ticks(s, now); +} + +static const MemoryRegionOps sifive_pwm_ops = { + .read = sifive_pwm_read, + .write = sifive_pwm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_sifive_pwm = { + .name = TYPE_SIFIVE_PWM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_ARRAY(timer, SiFivePwmState, 4), + VMSTATE_UINT64(tick_offset, SiFivePwmState), + VMSTATE_UINT32(pwmcfg, SiFivePwmState), + VMSTATE_UINT32_ARRAY(pwmcmp, SiFivePwmState, 4), + VMSTATE_END_OF_LIST() + } +}; + +static Property sifive_pwm_properties[] = { + /* 0.5Ghz per spec after FSBL */ + DEFINE_PROP_UINT64("clock-frequency", struct SiFivePwmState, + freq_hz, 500000000ULL), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_pwm_init(Object *obj) +{ + SiFivePwmState *s = SIFIVE_PWM(obj); + int i; + + for (i = 0; i < SIFIVE_PWM_IRQS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irqs[i]); + } + + memory_region_init_io(&s->mmio, obj, &sifive_pwm_ops, s, + TYPE_SIFIVE_PWM, 0x100); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void sifive_pwm_realize(DeviceState *dev, Error **errp) +{ + SiFivePwmState *s = SIFIVE_PWM(dev); + + timer_init_ns(&s->timer[0], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_0, s); + + timer_init_ns(&s->timer[1], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_1, s); + + timer_init_ns(&s->timer[2], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_2, s); + + timer_init_ns(&s->timer[3], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_3, s); +} + +static void sifive_pwm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = sifive_pwm_reset; + device_class_set_props(dc, sifive_pwm_properties); + dc->vmsd = &vmstate_sifive_pwm; + dc->realize = sifive_pwm_realize; +} + +static const TypeInfo sifive_pwm_info = { + .name = TYPE_SIFIVE_PWM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFivePwmState), + .instance_init = sifive_pwm_init, + .class_init = sifive_pwm_class_init, +}; + +static void sifive_pwm_register_types(void) +{ + type_register_static(&sifive_pwm_info); +} + +type_init(sifive_pwm_register_types) diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c index 03e33fc592..8c4f6eb06b 100644 --- a/hw/timer/slavio_timer.c +++ b/hw/timer/slavio_timer.c @@ -400,12 +400,12 @@ static void slavio_timer_init(Object *obj) uint64_t size; char timer_name[20]; - tc = g_malloc0(sizeof(TimerContext)); + tc = g_new0(TimerContext, 1); tc->s = s; tc->timer_index = i; s->cputimer[i].timer = ptimer_init(slavio_timer_irq, tc, - PTIMER_POLICY_DEFAULT); + PTIMER_POLICY_LEGACY); ptimer_transaction_begin(s->cputimer[i].timer); ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD); ptimer_transaction_commit(s->cputimer[i].timer); diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c index f959cb9d60..e92e83747d 100644 --- a/hw/timer/sse-timer.c +++ b/hw/timer/sse-timer.c @@ -324,7 +324,7 @@ static void sse_timer_write(void *opaque, hwaddr offset, uint64_t value, { uint32_t old_ctl = s->cntp_aival_ctl; - /* EN bit is writeable; CLR bit is write-0-to-clear, write-1-ignored */ + /* EN bit is writable; CLR bit is write-0-to-clear, write-1-ignored */ s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_EN_MASK; s->cntp_aival_ctl |= value & R_CNTP_AIVAL_CTL_EN_MASK; if (!(value & R_CNTP_AIVAL_CTL_CLR_MASK)) { diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c new file mode 100644 index 0000000000..fd71c79be4 --- /dev/null +++ b/hw/timer/stellaris-gptm.c @@ -0,0 +1,332 @@ +/* + * Luminary Micro Stellaris General Purpose Timer Module + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-clock.h" +#include "hw/timer/stellaris-gptm.h" + +static void gptm_update_irq(gptm_state *s) +{ + int level; + level = (s->state & s->mask) != 0; + qemu_set_irq(s->irq, level); +} + +static void gptm_stop(gptm_state *s, int n) +{ + timer_del(s->timer[n]); +} + +static void gptm_reload(gptm_state *s, int n, int reset) +{ + int64_t tick; + if (reset) { + tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } else { + tick = s->tick[n]; + } + + if (s->config == 0) { + /* 32-bit CountDown. */ + uint32_t count; + count = s->load[0] | (s->load[1] << 16); + tick += clock_ticks_to_ns(s->clk, count); + } else if (s->config == 1) { + /* 32-bit RTC. 1Hz tick. */ + tick += NANOSECONDS_PER_SECOND; + } else if (s->mode[n] == 0xa) { + /* PWM mode. Not implemented. */ + } else { + qemu_log_mask(LOG_UNIMP, + "GPTM: 16-bit timer mode unimplemented: 0x%x\n", + s->mode[n]); + return; + } + s->tick[n] = tick; + timer_mod(s->timer[n], tick); +} + +static void gptm_tick(void *opaque) +{ + gptm_state **p = (gptm_state **)opaque; + gptm_state *s; + int n; + + s = *p; + n = p - s->opaque; + if (s->config == 0) { + s->state |= 1; + if ((s->control & 0x20)) { + /* Output trigger. */ + qemu_irq_pulse(s->trigger); + } + if (s->mode[0] & 1) { + /* One-shot. */ + s->control &= ~1; + } else { + /* Periodic. */ + gptm_reload(s, 0, 0); + } + } else if (s->config == 1) { + /* RTC. */ + uint32_t match; + s->rtc++; + match = s->match[0] | (s->match[1] << 16); + if (s->rtc > match) + s->rtc = 0; + if (s->rtc == 0) { + s->state |= 8; + } + gptm_reload(s, 0, 0); + } else if (s->mode[n] == 0xa) { + /* PWM mode. Not implemented. */ + } else { + qemu_log_mask(LOG_UNIMP, + "GPTM: 16-bit timer mode unimplemented: 0x%x\n", + s->mode[n]); + } + gptm_update_irq(s); +} + +static uint64_t gptm_read(void *opaque, hwaddr offset, + unsigned size) +{ + gptm_state *s = (gptm_state *)opaque; + + switch (offset) { + case 0x00: /* CFG */ + return s->config; + case 0x04: /* TAMR */ + return s->mode[0]; + case 0x08: /* TBMR */ + return s->mode[1]; + case 0x0c: /* CTL */ + return s->control; + case 0x18: /* IMR */ + return s->mask; + case 0x1c: /* RIS */ + return s->state; + case 0x20: /* MIS */ + return s->state & s->mask; + case 0x24: /* CR */ + return 0; + case 0x28: /* TAILR */ + return s->load[0] | ((s->config < 4) ? (s->load[1] << 16) : 0); + case 0x2c: /* TBILR */ + return s->load[1]; + case 0x30: /* TAMARCHR */ + return s->match[0] | ((s->config < 4) ? (s->match[1] << 16) : 0); + case 0x34: /* TBMATCHR */ + return s->match[1]; + case 0x38: /* TAPR */ + return s->prescale[0]; + case 0x3c: /* TBPR */ + return s->prescale[1]; + case 0x40: /* TAPMR */ + return s->match_prescale[0]; + case 0x44: /* TBPMR */ + return s->match_prescale[1]; + case 0x48: /* TAR */ + if (s->config == 1) { + return s->rtc; + } + qemu_log_mask(LOG_UNIMP, + "GPTM: read of TAR but timer read not supported\n"); + return 0; + case 0x4c: /* TBR */ + qemu_log_mask(LOG_UNIMP, + "GPTM: read of TBR but timer read not supported\n"); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "GPTM: read at bad offset 0x02%" HWADDR_PRIx "\n", + offset); + return 0; + } +} + +static void gptm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + gptm_state *s = (gptm_state *)opaque; + uint32_t oldval; + + /* + * The timers should be disabled before changing the configuration. + * We take advantage of this and defer everything until the timer + * is enabled. + */ + switch (offset) { + case 0x00: /* CFG */ + s->config = value; + break; + case 0x04: /* TAMR */ + s->mode[0] = value; + break; + case 0x08: /* TBMR */ + s->mode[1] = value; + break; + case 0x0c: /* CTL */ + oldval = s->control; + s->control = value; + /* TODO: Implement pause. */ + if ((oldval ^ value) & 1) { + if (value & 1) { + gptm_reload(s, 0, 1); + } else { + gptm_stop(s, 0); + } + } + if (((oldval ^ value) & 0x100) && s->config >= 4) { + if (value & 0x100) { + gptm_reload(s, 1, 1); + } else { + gptm_stop(s, 1); + } + } + break; + case 0x18: /* IMR */ + s->mask = value & 0x77; + gptm_update_irq(s); + break; + case 0x24: /* CR */ + s->state &= ~value; + break; + case 0x28: /* TAILR */ + s->load[0] = value & 0xffff; + if (s->config < 4) { + s->load[1] = value >> 16; + } + break; + case 0x2c: /* TBILR */ + s->load[1] = value & 0xffff; + break; + case 0x30: /* TAMARCHR */ + s->match[0] = value & 0xffff; + if (s->config < 4) { + s->match[1] = value >> 16; + } + break; + case 0x34: /* TBMATCHR */ + s->match[1] = value >> 16; + break; + case 0x38: /* TAPR */ + s->prescale[0] = value; + break; + case 0x3c: /* TBPR */ + s->prescale[1] = value; + break; + case 0x40: /* TAPMR */ + s->match_prescale[0] = value; + break; + case 0x44: /* TBPMR */ + s->match_prescale[0] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "GPTM: write at bad offset 0x02%" HWADDR_PRIx "\n", + offset); + } + gptm_update_irq(s); +} + +static const MemoryRegionOps gptm_ops = { + .read = gptm_read, + .write = gptm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stellaris_gptm = { + .name = "stellaris_gptm", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(config, gptm_state), + VMSTATE_UINT32_ARRAY(mode, gptm_state, 2), + VMSTATE_UINT32(control, gptm_state), + VMSTATE_UINT32(state, gptm_state), + VMSTATE_UINT32(mask, gptm_state), + VMSTATE_UNUSED(8), + VMSTATE_UINT32_ARRAY(load, gptm_state, 2), + VMSTATE_UINT32_ARRAY(match, gptm_state, 2), + VMSTATE_UINT32_ARRAY(prescale, gptm_state, 2), + VMSTATE_UINT32_ARRAY(match_prescale, gptm_state, 2), + VMSTATE_UINT32(rtc, gptm_state), + VMSTATE_INT64_ARRAY(tick, gptm_state, 2), + VMSTATE_TIMER_PTR_ARRAY(timer, gptm_state, 2), + VMSTATE_CLOCK(clk, gptm_state), + VMSTATE_END_OF_LIST() + } +}; + +static void stellaris_gptm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + gptm_state *s = STELLARIS_GPTM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_out(dev, &s->trigger, 1); + + memory_region_init_io(&s->iomem, obj, &gptm_ops, s, + "gptm", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + s->opaque[0] = s->opaque[1] = s; + + /* + * TODO: in an ideal world we would model the effects of changing + * the input clock frequency while the countdown timer is active. + * The best way to do this would be to convert the device to use + * ptimer instead of hand-rolling its own timer. This would also + * make it easy to implement reading the current count from the + * TAR and TBR registers. + */ + s->clk = qdev_init_clock_in(dev, "clk", NULL, NULL, 0); +} + +static void stellaris_gptm_realize(DeviceState *dev, Error **errp) +{ + gptm_state *s = STELLARIS_GPTM(dev); + + if (!clock_has_source(s->clk)) { + error_setg(errp, "stellaris-gptm: clk must be connected"); + return; + } + + s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]); + s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]); +} + +static void stellaris_gptm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_stellaris_gptm; + dc->realize = stellaris_gptm_realize; +} + +static const TypeInfo stellaris_gptm_info = { + .name = TYPE_STELLARIS_GPTM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(gptm_state), + .instance_init = stellaris_gptm_init, + .class_init = stellaris_gptm_class_init, +}; + +static void stellaris_gptm_register_types(void) +{ + type_register_static(&stellaris_gptm_info); +} + +type_init(stellaris_gptm_register_types) diff --git a/hw/timer/trace-events b/hw/timer/trace-events index 5234c0ea9e..3eccef8385 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -88,3 +88,14 @@ sse_counter_reset(void) "SSE system counter: reset" sse_timer_read(uint64_t offset, uint64_t data, unsigned size) "SSE system timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" sse_timer_write(uint64_t offset, uint64_t data, unsigned size) "SSE system timer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" sse_timer_reset(void) "SSE system timer: reset" + +# sifive_pwm.c +sifive_pwm_set_alarm(uint64_t alarm, uint64_t now) "Setting alarm to: 0x%" PRIx64 ", now: 0x%" PRIx64 +sifive_pwm_interrupt(int num) "Interrupt %d" +sifive_pwm_read(uint64_t offset) "Read at address: 0x%" PRIx64 +sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address: 0x%" PRIx64 + +# sh_timer.c +sh_timer_start_stop(int enable, int current) "%d (%d)" +sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64 +sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64 diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c index 1eb927eb84..c7f17cd646 100644 --- a/hw/timer/xilinx_timer.c +++ b/hw/timer/xilinx_timer.c @@ -223,7 +223,7 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp) xt->parent = t; xt->nr = i; - xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_DEFAULT); + xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_LEGACY); ptimer_transaction_begin(xt->ptimer); ptimer_set_freq(xt->ptimer, t->freq_hz); ptimer_transaction_commit(xt->ptimer); diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index 58ebd1469c..ea930da545 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -18,6 +18,7 @@ #include "qemu/module.h" #include "qapi/error.h" +#include "exec/address-spaces.h" #include "hw/qdev-properties.h" #include "hw/pci/pci_ids.h" #include "hw/acpi/tpm.h" @@ -25,6 +26,7 @@ #include "sysemu/tpm_backend.h" #include "sysemu/tpm_util.h" #include "sysemu/reset.h" +#include "sysemu/xen.h" #include "tpm_prop.h" #include "tpm_ppi.h" #include "trace.h" @@ -196,6 +198,7 @@ static void tpm_crb_request_completed(TPMIf *ti, int ret) ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, tpmSts, 1); /* fatal error */ } + memory_region_set_dirty(&s->cmdmem, 0, CRB_CTRL_CMD_SIZE); } static enum TPMVersion tpm_crb_get_version(TPMIf *ti) @@ -306,7 +309,11 @@ static void tpm_crb_realize(DeviceState *dev, Error **errp) TPM_PPI_ADDR_BASE, OBJECT(s)); } - qemu_register_reset(tpm_crb_reset, dev); + if (xen_enabled()) { + tpm_crb_reset(dev); + } else { + qemu_register_reset(tpm_crb_reset, dev); + } } static void tpm_crb_class_init(ObjectClass *klass, void *data) diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c index 362edcc5c9..7f74e26ec6 100644 --- a/hw/tpm/tpm_ppi.c +++ b/hw/tpm/tpm_ppi.c @@ -12,11 +12,11 @@ */ #include "qemu/osdep.h" - +#include "qemu/memalign.h" #include "qapi/error.h" -#include "cpu.h" #include "sysemu/memory_mapping.h" #include "migration/vmstate.h" +#include "hw/qdev-core.h" #include "hw/acpi/tpm.h" #include "tpm_ppi.h" #include "trace.h" @@ -30,21 +30,24 @@ void tpm_ppi_reset(TPMPPI *tpmppi) guest_phys_blocks_init(&guest_phys_blocks); guest_phys_blocks_append(&guest_phys_blocks); QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + hwaddr mr_offs = block->host_addr - + (uint8_t *)memory_region_get_ram_ptr(block->mr); + trace_tpm_ppi_memset(block->host_addr, block->target_end - block->target_start); memset(block->host_addr, 0, block->target_end - block->target_start); - memory_region_set_dirty(block->mr, 0, + memory_region_set_dirty(block->mr, mr_offs, block->target_end - block->target_start); } guest_phys_blocks_free(&guest_phys_blocks); } } -void tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m, +void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m, hwaddr addr, Object *obj) { - tpmppi->buf = qemu_memalign(qemu_real_host_page_size, + tpmppi->buf = qemu_memalign(qemu_real_host_page_size(), HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE)); memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi", TPM_PPI_ADDR_SIZE, tpmppi->buf); diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h index 6f773c25a0..bf5d4a300f 100644 --- a/hw/tpm/tpm_ppi.h +++ b/hw/tpm/tpm_ppi.h @@ -12,7 +12,7 @@ #ifndef TPM_TPM_PPI_H #define TPM_TPM_PPI_H -#include "exec/address-spaces.h" +#include "exec/memory.h" typedef struct TPMPPI { MemoryRegion ram; @@ -29,7 +29,7 @@ typedef struct TPMPPI { * Register the TPM PPI memory region at @addr on the given address * space for the object @obj. **/ -void tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m, +void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m, hwaddr addr, Object *obj); /** diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index e700d82181..503be2a541 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -50,7 +50,12 @@ static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr, static uint8_t tpm_tis_locality_from_addr(hwaddr addr) { - return (uint8_t)((addr >> TPM_TIS_LOCALITY_SHIFT) & 0x7); + uint8_t locty; + + locty = (uint8_t)((addr >> TPM_TIS_LOCALITY_SHIFT) & 0x7); + assert(TPM_TIS_IS_VALID_LOCTY(locty)); + + return locty; } diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c index 10d8a14f19..91e3792248 100644 --- a/hw/tpm/tpm_tis_isa.c +++ b/hw/tpm/tpm_tis_isa.c @@ -30,6 +30,7 @@ #include "tpm_prop.h" #include "tpm_tis.h" #include "qom/object.h" +#include "hw/acpi/acpi_aml_interface.h" struct TPMStateISA { /*< private >*/ @@ -127,7 +128,7 @@ static void tpm_tis_isa_realizefn(DeviceState *dev, Error **errp) return; } - isa_init_irq(ISA_DEVICE(dev), &s->irq, s->irq_num); + s->irq = isa_get_irq(ISA_DEVICE(dev), s->irq_num); memory_region_add_subregion(isa_address_space(ISA_DEVICE(dev)), TPM_TIS_ADDR_BASE, &s->mmio); @@ -138,10 +139,39 @@ static void tpm_tis_isa_realizefn(DeviceState *dev, Error **errp) } } +static void build_tpm_tis_isa_aml(AcpiDevAmlIf *adev, Aml *scope) +{ + Aml *dev, *crs; + TPMStateISA *isadev = TPM_TIS_ISA(adev); + TPMIf *ti = TPM_IF(isadev); + + dev = aml_device("TPM"); + if (tpm_tis_isa_get_tpm_version(ti) == TPM_VERSION_2_0) { + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); + } else { + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31"))); + } + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, + AML_READ_WRITE)); + /* + * FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs, + * fix default TPM_TIS_IRQ value there to use some unused IRQ + */ + /* aml_append(crs, aml_irq_no_flags(isadev->state.irq_num)); */ + aml_append(dev, aml_name_decl("_CRS", crs)); + tpm_build_ppi_acpi(ti, dev); + aml_append(scope, dev); +} + static void tpm_tis_isa_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); TPMIfClass *tc = TPM_IF_CLASS(klass); + AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); device_class_set_props(dc, tpm_tis_isa_properties); dc->vmsd = &vmstate_tpm_tis_isa; @@ -151,6 +181,7 @@ static void tpm_tis_isa_class_init(ObjectClass *klass, void *data) tc->request_completed = tpm_tis_isa_request_completed; tc->get_version = tpm_tis_isa_get_tpm_version; set_bit(DEVICE_CATEGORY_MISC, dc->categories); + adevc->build_dev_aml = build_tpm_tis_isa_aml; } static const TypeInfo tpm_tis_isa_info = { @@ -161,6 +192,7 @@ static const TypeInfo tpm_tis_isa_info = { .class_init = tpm_tis_isa_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_TPM_IF }, + { TYPE_ACPI_DEV_AML_IF }, { } } }; diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig index 53f8283ffd..ce4f433976 100644 --- a/hw/usb/Kconfig +++ b/hw/usb/Kconfig @@ -119,6 +119,11 @@ config USB_U2F default y depends on USB +config USB_CANOKEY + bool + default y + depends on USB + config IMX_USBPHY bool default y diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 07083349f5..92d6ed5626 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -2,6 +2,8 @@ #include "hw/qdev-properties.h" #include "hw/usb.h" #include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "sysemu/sysemu.h" @@ -82,7 +84,7 @@ const VMStateDescription vmstate_usb_device = { void usb_bus_new(USBBus *bus, size_t bus_size, USBBusOps *ops, DeviceState *host) { - qbus_create_inplace(bus, bus_size, TYPE_USB_BUS, host, NULL); + qbus_init(bus, bus_size, TYPE_USB_BUS, host, NULL); qbus_set_bus_hotplug_handler(BUS(bus)); bus->ops = ops; bus->busnr = next_usb_bus++; @@ -631,15 +633,16 @@ static char *usb_get_fw_dev_path(DeviceState *qdev) return fw_path; } -void hmp_info_usb(Monitor *mon, const QDict *qdict) +HumanReadableText *qmp_x_query_usb(Error **errp) { + g_autoptr(GString) buf = g_string_new(""); USBBus *bus; USBDevice *dev; USBPort *port; if (QTAILQ_EMPTY(&busses)) { - monitor_printf(mon, "USB support not enabled\n"); - return; + error_setg(errp, "USB support not enabled"); + return NULL; } QTAILQ_FOREACH(bus, &busses, next) { @@ -647,14 +650,17 @@ void hmp_info_usb(Monitor *mon, const QDict *qdict) dev = port->dev; if (!dev) continue; - monitor_printf(mon, " Device %d.%d, Port %s, Speed %s Mb/s, " - "Product %s%s%s\n", - bus->busnr, dev->addr, port->path, - usb_speed(dev->speed), dev->product_desc, - dev->qdev.id ? ", ID: " : "", - dev->qdev.id ?: ""); + g_string_append_printf(buf, + " Device %d.%d, Port %s, Speed %s Mb/s, " + "Product %s%s%s\n", + bus->busnr, dev->addr, port->path, + usb_speed(dev->speed), dev->product_desc, + dev->qdev.id ? ", ID: " : "", + dev->qdev.id ?: ""); } } + + return human_readable_text_from_str(buf); } /* handle legacy -usbdevice cmd line option */ diff --git a/hw/usb/canokey.c b/hw/usb/canokey.c new file mode 100644 index 0000000000..bbc5da07b5 --- /dev/null +++ b/hw/usb/canokey.c @@ -0,0 +1,334 @@ +/* + * CanoKey QEMU device implementation. + * + * Copyright (c) 2021-2022 Canokeys.org + * Written by Hongren (Zenithal) Zheng + * + * This code is licensed under the Apache-2.0. + */ + +#include "qemu/osdep.h" +#include + +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/usb.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "desc.h" +#include "canokey.h" + +#define CANOKEY_EP_IN(ep) ((ep) & 0x7F) + +#define CANOKEY_VENDOR_NUM 0x20a0 +#define CANOKEY_PRODUCT_NUM 0x42d2 + +/* + * placeholder, canokey-qemu implements its own usb desc + * Namely we do not use usb_desc_handle_contorl + */ +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "canokeys.org", + [STR_PRODUCT] = "CanoKey QEMU", + [STR_SERIALNUMBER] = "0" +}; + +static const USBDescDevice desc_device_canokey = { + .bcdUSB = 0x0, + .bMaxPacketSize0 = 16, + .bNumConfigurations = 0, + .confs = NULL, +}; + +static const USBDesc desc_canokey = { + .id = { + .idVendor = CANOKEY_VENDOR_NUM, + .idProduct = CANOKEY_PRODUCT_NUM, + .bcdDevice = 0x0100, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_canokey, + .str = desc_strings, +}; + + +/* + * libcanokey-qemu.so side functions + * All functions are called from canokey_emu_device_loop + */ +int canokey_emu_stall_ep(void *base, uint8_t ep) +{ + trace_canokey_emu_stall_ep(ep); + CanoKeyState *key = base; + uint8_t ep_in = CANOKEY_EP_IN(ep); /* INTR IN has ep 129 */ + key->ep_in_size[ep_in] = 0; + key->ep_in_state[ep_in] = CANOKEY_EP_IN_STALL; + return 0; +} + +int canokey_emu_set_address(void *base, uint8_t addr) +{ + trace_canokey_emu_set_address(addr); + CanoKeyState *key = base; + key->dev.addr = addr; + return 0; +} + +int canokey_emu_prepare_receive( + void *base, uint8_t ep, uint8_t *pbuf, uint16_t size) +{ + trace_canokey_emu_prepare_receive(ep, size); + CanoKeyState *key = base; + key->ep_out[ep] = pbuf; + key->ep_out_size[ep] = size; + return 0; +} + +int canokey_emu_transmit( + void *base, uint8_t ep, const uint8_t *pbuf, uint16_t size) +{ + trace_canokey_emu_transmit(ep, size); + CanoKeyState *key = base; + uint8_t ep_in = CANOKEY_EP_IN(ep); /* INTR IN has ep 129 */ + memcpy(key->ep_in[ep_in] + key->ep_in_size[ep_in], + pbuf, size); + key->ep_in_size[ep_in] += size; + key->ep_in_state[ep_in] = CANOKEY_EP_IN_READY; + /* + * wake up controller if we NAKed IN token before + * Note: this is a quirk for CanoKey CTAPHID + */ + if (ep_in == CANOKEY_EMU_EP_CTAPHID) { + usb_wakeup(usb_ep_get(&key->dev, USB_TOKEN_IN, ep_in), 0); + } + /* + * ready for more data in device loop + * + * Note: this is a quirk for CanoKey CTAPHID + * because it calls multiple emu_transmit in one device_loop + * but w/o data_in it would stuck in device_loop + * This has side effect for CCID since CCID can send ZLP + * This also has side effect for Control transfer + */ + if (ep_in == CANOKEY_EMU_EP_CTAPHID) { + canokey_emu_data_in(ep_in); + } + return 0; +} + +uint32_t canokey_emu_get_rx_data_size(void *base, uint8_t ep) +{ + CanoKeyState *key = base; + return key->ep_out_size[ep]; +} + +/* + * QEMU side functions + */ +static void canokey_handle_reset(USBDevice *dev) +{ + trace_canokey_handle_reset(); + CanoKeyState *key = CANOKEY(dev); + for (int i = 0; i != CANOKEY_EP_NUM; ++i) { + key->ep_in_state[i] = CANOKEY_EP_IN_WAIT; + key->ep_in_pos[i] = 0; + key->ep_in_size[i] = 0; + } + canokey_emu_reset(); +} + +static void canokey_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + trace_canokey_handle_control_setup(request, value, index, length); + CanoKeyState *key = CANOKEY(dev); + + canokey_emu_setup(request, value, index, length); + + uint32_t dir_in = request & DeviceRequest; + if (!dir_in) { + /* OUT */ + trace_canokey_handle_control_out(); + if (key->ep_out[0] != NULL) { + memcpy(key->ep_out[0], data, length); + } + canokey_emu_data_out(p->ep->nr, data); + } + + canokey_emu_device_loop(); + + /* IN */ + switch (key->ep_in_state[0]) { + case CANOKEY_EP_IN_WAIT: + p->status = USB_RET_NAK; + break; + case CANOKEY_EP_IN_STALL: + p->status = USB_RET_STALL; + break; + case CANOKEY_EP_IN_READY: + memcpy(data, key->ep_in[0], key->ep_in_size[0]); + p->actual_length = key->ep_in_size[0]; + trace_canokey_handle_control_in(p->actual_length); + /* reset state */ + key->ep_in_state[0] = CANOKEY_EP_IN_WAIT; + key->ep_in_size[0] = 0; + key->ep_in_pos[0] = 0; + break; + } +} + +static void canokey_handle_data(USBDevice *dev, USBPacket *p) +{ + CanoKeyState *key = CANOKEY(dev); + + uint8_t ep_in = CANOKEY_EP_IN(p->ep->nr); + uint8_t ep_out = p->ep->nr; + uint32_t in_len; + uint32_t out_pos; + uint32_t out_len; + switch (p->pid) { + case USB_TOKEN_OUT: + trace_canokey_handle_data_out(ep_out, p->iov.size); + usb_packet_copy(p, key->ep_out_buffer[ep_out], p->iov.size); + out_pos = 0; + while (out_pos != p->iov.size) { + /* + * key->ep_out[ep_out] set by prepare_receive + * to be a buffer inside libcanokey-qemu.so + * key->ep_out_size[ep_out] set by prepare_receive + * to be the buffer length + */ + out_len = MIN(p->iov.size - out_pos, key->ep_out_size[ep_out]); + memcpy(key->ep_out[ep_out], + key->ep_out_buffer[ep_out] + out_pos, out_len); + out_pos += out_len; + /* update ep_out_size to actual len */ + key->ep_out_size[ep_out] = out_len; + canokey_emu_data_out(ep_out, NULL); + } + /* + * Note: this is a quirk for CanoKey CTAPHID + * + * There is one code path that uses this device loop + * INTR IN -> useful data_in and useless device_loop -> NAKed + * INTR OUT -> useful device loop -> transmit -> wakeup + * (useful thanks to both data_in and data_out having been called) + * the next INTR IN -> actual data to guest + * + * if there is no such device loop, there would be no further + * INTR IN, no device loop, no transmit hence no usb_wakeup + * then qemu would hang + */ + if (ep_in == CANOKEY_EMU_EP_CTAPHID) { + canokey_emu_device_loop(); /* may call transmit multiple times */ + } + break; + case USB_TOKEN_IN: + if (key->ep_in_pos[ep_in] == 0) { /* first time IN */ + canokey_emu_data_in(ep_in); + canokey_emu_device_loop(); /* may call transmit multiple times */ + } + switch (key->ep_in_state[ep_in]) { + case CANOKEY_EP_IN_WAIT: + /* NAK for early INTR IN */ + p->status = USB_RET_NAK; + break; + case CANOKEY_EP_IN_STALL: + p->status = USB_RET_STALL; + break; + case CANOKEY_EP_IN_READY: + /* submit part of ep_in buffer to USBPacket */ + in_len = MIN(key->ep_in_size[ep_in] - key->ep_in_pos[ep_in], + p->iov.size); + usb_packet_copy(p, + key->ep_in[ep_in] + key->ep_in_pos[ep_in], in_len); + key->ep_in_pos[ep_in] += in_len; + /* reset state if all data submitted */ + if (key->ep_in_pos[ep_in] == key->ep_in_size[ep_in]) { + key->ep_in_state[ep_in] = CANOKEY_EP_IN_WAIT; + key->ep_in_size[ep_in] = 0; + key->ep_in_pos[ep_in] = 0; + } + trace_canokey_handle_data_in(ep_in, in_len); + break; + } + break; + default: + p->status = USB_RET_STALL; + break; + } +} + +static void canokey_realize(USBDevice *base, Error **errp) +{ + trace_canokey_realize(); + CanoKeyState *key = CANOKEY(base); + + if (key->file == NULL) { + error_setg(errp, "You must provide file=/path/to/canokey-file"); + return; + } + + usb_desc_init(base); + + for (int i = 0; i != CANOKEY_EP_NUM; ++i) { + key->ep_in_state[i] = CANOKEY_EP_IN_WAIT; + key->ep_in_size[i] = 0; + key->ep_in_pos[i] = 0; + } + + if (canokey_emu_init(key, key->file)) { + error_setg(errp, "canokey can not create or read %s", key->file); + return; + } +} + +static void canokey_unrealize(USBDevice *base) +{ + trace_canokey_unrealize(); +} + +static Property canokey_properties[] = { + DEFINE_PROP_STRING("file", CanoKeyState, file), + DEFINE_PROP_END_OF_LIST(), +}; + +static void canokey_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "CanoKey QEMU"; + uc->usb_desc = &desc_canokey; + uc->handle_reset = canokey_handle_reset; + uc->handle_control = canokey_handle_control; + uc->handle_data = canokey_handle_data; + uc->handle_attach = usb_desc_attach; + uc->realize = canokey_realize; + uc->unrealize = canokey_unrealize; + dc->desc = "CanoKey QEMU"; + device_class_set_props(dc, canokey_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo canokey_info = { + .name = TYPE_CANOKEY, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(CanoKeyState), + .class_init = canokey_class_init +}; + +static void canokey_register_types(void) +{ + type_register_static(&canokey_info); +} + +type_init(canokey_register_types) diff --git a/hw/usb/canokey.h b/hw/usb/canokey.h new file mode 100644 index 0000000000..24cf304203 --- /dev/null +++ b/hw/usb/canokey.h @@ -0,0 +1,69 @@ +/* + * CanoKey QEMU device header. + * + * Copyright (c) 2021-2022 Canokeys.org + * Written by Hongren (Zenithal) Zheng + * + * This code is licensed under the Apache-2.0. + */ + +#ifndef CANOKEY_H +#define CANOKEY_H + +#include "hw/qdev-core.h" + +#define TYPE_CANOKEY "canokey" +#define CANOKEY(obj) \ + OBJECT_CHECK(CanoKeyState, (obj), TYPE_CANOKEY) + +/* + * State of Canokey (i.e. hw/canokey.c) + */ + +/* CTRL INTR BULK */ +#define CANOKEY_EP_NUM 3 +/* BULK/INTR IN can be up to 1352 bytes, e.g. get key info */ +#define CANOKEY_EP_IN_BUFFER_SIZE 2048 +/* BULK OUT can be up to 270 bytes, e.g. PIV import cert */ +#define CANOKEY_EP_OUT_BUFFER_SIZE 512 + +typedef enum { + CANOKEY_EP_IN_WAIT, + CANOKEY_EP_IN_READY, + CANOKEY_EP_IN_STALL +} CanoKeyEPState; + +typedef struct CanoKeyState { + USBDevice dev; + + /* IN packets from canokey device loop */ + uint8_t ep_in[CANOKEY_EP_NUM][CANOKEY_EP_IN_BUFFER_SIZE]; + /* + * See canokey_emu_transmit + * + * For large INTR IN, receive multiple data from canokey device loop + * in this case ep_in_size would increase with every call + */ + uint32_t ep_in_size[CANOKEY_EP_NUM]; + /* + * Used in canokey_handle_data + * for IN larger than p->iov.size, we would do multiple handle_data() + * + * The difference between ep_in_pos and ep_in_size: + * We first increase ep_in_size to fill ep_in buffer in device_loop, + * then use ep_in_pos to submit data from ep_in buffer in handle_data + */ + uint32_t ep_in_pos[CANOKEY_EP_NUM]; + CanoKeyEPState ep_in_state[CANOKEY_EP_NUM]; + + /* OUT pointer to canokey recv buffer */ + uint8_t *ep_out[CANOKEY_EP_NUM]; + uint32_t ep_out_size[CANOKEY_EP_NUM]; + /* For large BULK OUT, multiple write to ep_out is needed */ + uint8_t ep_out_buffer[CANOKEY_EP_NUM][CANOKEY_EP_OUT_BUFFER_SIZE]; + + /* Properties */ + char *file; /* canokey-file */ +} CanoKeyState; + +#endif /* CANOKEY_H */ diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c index 6c8c0355e0..ee41a81801 100644 --- a/hw/usb/ccid-card-emulated.c +++ b/hw/usb/ccid-card-emulated.c @@ -140,7 +140,7 @@ static void emulated_apdu_from_guest(CCIDCardState *base, const uint8_t *apdu, uint32_t len) { EmulatedState *card = EMULATED_CCID_CARD(base); - EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); + EmulEvent *event = g_malloc(sizeof(EmulEvent) + len); assert(event); event->p.data.type = EMUL_GUEST_APDU; @@ -613,6 +613,7 @@ static const TypeInfo emulated_card_info = { .class_init = emulated_class_initfn, }; module_obj(TYPE_EMULATED_CCID); +module_kconfig(USB); static void ccid_card_emulated_register_types(void) { diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c index fa3040fb71..07ee42f304 100644 --- a/hw/usb/ccid-card-passthru.c +++ b/hw/usb/ccid-card-passthru.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #include "qemu/units.h" #include #include "chardev/char-fe.h" @@ -415,6 +415,7 @@ static const TypeInfo passthru_card_info = { .class_init = passthru_class_initfn, }; module_obj(TYPE_CCID_PASSTHRU); +module_kconfig(USB); static void ccid_card_passthru_register_types(void) { diff --git a/hw/usb/desc-msos.c b/hw/usb/desc-msos.c index 836e38c67e..c72c65b650 100644 --- a/hw/usb/desc-msos.c +++ b/hw/usb/desc-msos.c @@ -5,12 +5,12 @@ /* * Microsoft OS Descriptors * - * Windows tries to fetch some special descriptors with informations + * Windows tries to fetch some special descriptors with information * specifically for windows. Presence is indicated using a special * string @ index 0xee. There are two kinds of descriptors: * * compatid descriptor - * Used to bind drivers, if usb class isn't specific enougth. + * Used to bind drivers, if usb class isn't specific enough. * Used for PTP/MTP for example (both share the same usb class). * * properties descriptor @@ -23,7 +23,7 @@ * HLM\SYSTEM\CurrentControlSet\Control\usbflags * HLM\SYSTEM\CurrentControlSet\Enum\USB * Windows will complain it can't delete entries on the second one. - * It has deleted everything it had permissions too, which is enouth + * It has deleted everything it had permissions too, which is enough * as this includes "Device Parameters". * * http://msdn.microsoft.com/en-us/library/windows/hardware/ff537430.aspx @@ -192,8 +192,8 @@ static int usb_desc_msos_prop(const USBDesc *desc, uint8_t *dest) if (desc->msos->SelectiveSuspendEnabled) { /* * Signaling remote wakeup capability in the standard usb - * descriptors isn't enouth to make windows actually use it. - * This is the "Yes, we really mean it" registy entry to flip + * descriptors isn't enough to make windows actually use it. + * This is the "Yes, we really mean it" registry entry to flip * the switch in the windows drivers. */ length += usb_desc_msos_prop_dword(dest+length, diff --git a/hw/usb/desc.c b/hw/usb/desc.c index 8b6eaea407..7f6cc2f99b 100644 --- a/hw/usb/desc.c +++ b/hw/usb/desc.c @@ -632,7 +632,8 @@ int usb_desc_get_descriptor(USBDevice *dev, USBPacket *p, bool msos = (dev->flags & (1 << USB_DEV_FLAG_MSOS_DESC_IN_USE)); const USBDesc *desc = usb_device_get_usb_desc(dev); const USBDescDevice *other_dev; - uint8_t buf[256]; + size_t buflen = USB_DESC_MAX_LEN; + g_autofree uint8_t *buf = g_malloc(buflen); uint8_t type = value >> 8; uint8_t index = value & 0xff; int flags, ret = -1; @@ -650,36 +651,36 @@ int usb_desc_get_descriptor(USBDevice *dev, USBPacket *p, switch(type) { case USB_DT_DEVICE: - ret = usb_desc_device(&desc->id, dev->device, msos, buf, sizeof(buf)); + ret = usb_desc_device(&desc->id, dev->device, msos, buf, buflen); trace_usb_desc_device(dev->addr, len, ret); break; case USB_DT_CONFIG: if (index < dev->device->bNumConfigurations) { ret = usb_desc_config(dev->device->confs + index, flags, - buf, sizeof(buf)); + buf, buflen); } trace_usb_desc_config(dev->addr, index, len, ret); break; case USB_DT_STRING: - ret = usb_desc_string(dev, index, buf, sizeof(buf)); + ret = usb_desc_string(dev, index, buf, buflen); trace_usb_desc_string(dev->addr, index, len, ret); break; case USB_DT_DEVICE_QUALIFIER: if (other_dev != NULL) { - ret = usb_desc_device_qualifier(other_dev, buf, sizeof(buf)); + ret = usb_desc_device_qualifier(other_dev, buf, buflen); } trace_usb_desc_device_qualifier(dev->addr, len, ret); break; case USB_DT_OTHER_SPEED_CONFIG: if (other_dev != NULL && index < other_dev->bNumConfigurations) { ret = usb_desc_config(other_dev->confs + index, flags, - buf, sizeof(buf)); + buf, buflen); buf[0x01] = USB_DT_OTHER_SPEED_CONFIG; } trace_usb_desc_other_speed_config(dev->addr, index, len, ret); break; case USB_DT_BOS: - ret = usb_desc_bos(desc, buf, sizeof(buf)); + ret = usb_desc_bos(desc, buf, buflen); trace_usb_desc_bos(dev->addr, len, ret); break; diff --git a/hw/usb/desc.h b/hw/usb/desc.h index 4d81c68e0e..35babdeff6 100644 --- a/hw/usb/desc.h +++ b/hw/usb/desc.h @@ -133,7 +133,7 @@ struct USBDescConfig { const USBDescIface *ifs; }; -/* conceptually an Interface Association Descriptor, and releated interfaces */ +/* conceptually an Interface Association Descriptor, and related interfaces */ struct USBDescIfaceAssoc { uint8_t bFirstInterface; uint8_t bInterfaceCount; @@ -199,6 +199,7 @@ struct USBDesc { const USBDescMSOS *msos; }; +#define USB_DESC_MAX_LEN 8192 #define USB_DESC_FLAG_SUPER (1 << 1) /* little helpers */ diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c index f5cb246792..8748c1ba04 100644 --- a/hw/usb/dev-audio.c +++ b/hw/usb/dev-audio.c @@ -168,7 +168,7 @@ static const USBDescIface desc_iface[] = { STRING_FEATURE_UNIT, /* u8 iFeature */ } },{ - /* Headphone Ouptut Terminal ID3 Descriptor */ + /* Headphone Output Terminal ID3 Descriptor */ .data = (uint8_t[]) { 0x09, /* u8 bLength */ USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ @@ -332,7 +332,7 @@ static const USBDescIface desc_iface_multi[] = { STRING_FEATURE_UNIT, /* u8 iFeature */ } },{ - /* Headphone Ouptut Terminal ID3 Descriptor */ + /* Headphone Output Terminal ID3 Descriptor */ .data = (uint8_t[]) { 0x09, /* u8 bLength */ USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index 1c7ae97c30..bdd6d1ffaf 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -461,14 +461,14 @@ static const uint8_t qemu_mouse_hid_report_descriptor[] = { 0xa1, 0x00, /* Collection (Physical) */ 0x05, 0x09, /* Usage Page (Button) */ 0x19, 0x01, /* Usage Minimum (1) */ - 0x29, 0x03, /* Usage Maximum (3) */ + 0x29, 0x05, /* Usage Maximum (5) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ - 0x95, 0x03, /* Report Count (3) */ + 0x95, 0x05, /* Report Count (5) */ 0x75, 0x01, /* Report Size (1) */ 0x81, 0x02, /* Input (Data, Variable, Absolute) */ 0x95, 0x01, /* Report Count (1) */ - 0x75, 0x05, /* Report Size (5) */ + 0x75, 0x03, /* Report Size (3) */ 0x81, 0x01, /* Input (Constant) */ 0x05, 0x01, /* Usage Page (Generic Desktop) */ 0x09, 0x30, /* Usage (X) */ diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 66c410f4b2..4aebdf9ed5 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -54,44 +54,44 @@ struct USBHubState { #define TYPE_USB_HUB "usb-hub" OBJECT_DECLARE_SIMPLE_TYPE(USBHubState, USB_HUB) -#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) -#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) -#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) -#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) -#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) -#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) -#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) +#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) +#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) +#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) +#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) -#define PORT_STAT_CONNECTION 0x0001 -#define PORT_STAT_ENABLE 0x0002 -#define PORT_STAT_SUSPEND 0x0004 -#define PORT_STAT_OVERCURRENT 0x0008 -#define PORT_STAT_RESET 0x0010 -#define PORT_STAT_POWER 0x0100 -#define PORT_STAT_LOW_SPEED 0x0200 +#define PORT_STAT_CONNECTION 0x0001 +#define PORT_STAT_ENABLE 0x0002 +#define PORT_STAT_SUSPEND 0x0004 +#define PORT_STAT_OVERCURRENT 0x0008 +#define PORT_STAT_RESET 0x0010 +#define PORT_STAT_POWER 0x0100 +#define PORT_STAT_LOW_SPEED 0x0200 #define PORT_STAT_HIGH_SPEED 0x0400 #define PORT_STAT_TEST 0x0800 #define PORT_STAT_INDICATOR 0x1000 -#define PORT_STAT_C_CONNECTION 0x0001 -#define PORT_STAT_C_ENABLE 0x0002 -#define PORT_STAT_C_SUSPEND 0x0004 -#define PORT_STAT_C_OVERCURRENT 0x0008 -#define PORT_STAT_C_RESET 0x0010 +#define PORT_STAT_C_CONNECTION 0x0001 +#define PORT_STAT_C_ENABLE 0x0002 +#define PORT_STAT_C_SUSPEND 0x0004 +#define PORT_STAT_C_OVERCURRENT 0x0008 +#define PORT_STAT_C_RESET 0x0010 -#define PORT_CONNECTION 0 -#define PORT_ENABLE 1 -#define PORT_SUSPEND 2 -#define PORT_OVERCURRENT 3 -#define PORT_RESET 4 -#define PORT_POWER 8 -#define PORT_LOWSPEED 9 -#define PORT_HIGHSPEED 10 -#define PORT_C_CONNECTION 16 -#define PORT_C_ENABLE 17 -#define PORT_C_SUSPEND 18 -#define PORT_C_OVERCURRENT 19 -#define PORT_C_RESET 20 +#define PORT_CONNECTION 0 +#define PORT_ENABLE 1 +#define PORT_SUSPEND 2 +#define PORT_OVERCURRENT 3 +#define PORT_RESET 4 +#define PORT_POWER 8 +#define PORT_LOWSPEED 9 +#define PORT_HIGHSPEED 10 +#define PORT_C_CONNECTION 16 +#define PORT_C_ENABLE 17 +#define PORT_C_SUSPEND 18 +#define PORT_C_OVERCURRENT 19 +#define PORT_C_RESET 20 #define PORT_TEST 21 #define PORT_INDICATOR 22 @@ -155,13 +155,13 @@ static const USBDesc desc_hub = { static const uint8_t qemu_hub_hub_descriptor[] = { - 0x00, /* u8 bLength; patched in later */ - 0x29, /* u8 bDescriptorType; Hub-descriptor */ - 0x00, /* u8 bNbrPorts; (patched later) */ - 0x0a, /* u16 wHubCharacteristics; */ - 0x00, /* (per-port OC, no power switching) */ - 0x01, /* u8 bPwrOn2pwrGood; 2ms */ - 0x00 /* u8 bHubContrCurrent; 0 mA */ + 0x00, /* u8 bLength; patched in later */ + 0x29, /* u8 bDescriptorType; Hub-descriptor */ + 0x00, /* u8 bNbrPorts; (patched later) */ + 0x0a, /* u16 wHubCharacteristics; */ + 0x00, /* (per-port OC, no power switching) */ + 0x01, /* u8 bPwrOn2pwrGood; 2ms */ + 0x00 /* u8 bHubContrCurrent; 0 mA */ /* DeviceRemovable and PortPwrCtrlMask patched in later */ }; diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index c1d1694fd0..1cac1cd435 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -10,12 +10,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/error-report.h" #include #include - +#include #include @@ -1607,7 +1606,7 @@ static void usb_mtp_write_data(MTPState *s, uint32_t handle) usb_mtp_object_lookup(s, s->dataset.parent_handle); char *path = NULL; uint64_t rc; - mode_t mask = 0644; + mode_t mask = 0755; int ret = 0; assert(d != NULL); @@ -1623,7 +1622,7 @@ static void usb_mtp_write_data(MTPState *s, uint32_t handle) if (s->dataset.filename) { path = g_strdup_printf("%s/%s", parent->path, s->dataset.filename); if (s->dataset.format == FMT_ASSOCIATION) { - ret = mkdir(path, mask); + ret = g_mkdir(path, mask); if (!ret) { usb_mtp_queue_result(s, RES_OK, d->trans, 3, QEMU_STORAGE_ID, @@ -1635,7 +1634,7 @@ static void usb_mtp_write_data(MTPState *s, uint32_t handle) } d->fd = open(path, O_CREAT | O_WRONLY | - O_CLOEXEC | O_NOFOLLOW, mask); + O_CLOEXEC | O_NOFOLLOW, mask & 0666); if (d->fd == -1) { ret = 1; goto done; @@ -2106,7 +2105,7 @@ static void usb_mtp_class_initfn(ObjectClass *klass, void *data) device_class_set_props(dc, mtp_properties); } -static TypeInfo mtp_info = { +static const TypeInfo mtp_info = { .name = TYPE_USB_MTP, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(MTPState), diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c index 6c49c16015..5fff487ee5 100644 --- a/hw/usb/dev-network.c +++ b/hw/usb/dev-network.c @@ -52,7 +52,7 @@ #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ enum usbstring_idx { - STRING_MANUFACTURER = 1, + STRING_MANUFACTURER = 1, STRING_PRODUCT, STRING_ETHADDR, STRING_DATA, @@ -64,37 +64,39 @@ enum usbstring_idx { STRING_SERIALNUMBER, }; -#define DEV_CONFIG_VALUE 1 /* CDC or a subset */ -#define DEV_RNDIS_CONFIG_VALUE 2 /* RNDIS; optional */ +#define DEV_CONFIG_VALUE 1 /* CDC or a subset */ +#define DEV_RNDIS_CONFIG_VALUE 2 /* RNDIS; optional */ -#define USB_CDC_SUBCLASS_ACM 0x02 -#define USB_CDC_SUBCLASS_ETHERNET 0x06 +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 -#define USB_CDC_PROTO_NONE 0 -#define USB_CDC_ACM_PROTO_VENDOR 0xff +#define USB_CDC_PROTO_NONE 0 +#define USB_CDC_ACM_PROTO_VENDOR 0xff -#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */ -#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */ -#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */ -#define USB_CDC_UNION_TYPE 0x06 /* union_desc */ -#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */ +#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */ +#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */ +#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */ +#define USB_CDC_UNION_TYPE 0x06 /* union_desc */ +#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */ -#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00 -#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01 -#define USB_CDC_REQ_SET_LINE_CODING 0x20 -#define USB_CDC_REQ_GET_LINE_CODING 0x21 -#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22 -#define USB_CDC_REQ_SEND_BREAK 0x23 -#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40 -#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41 -#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42 -#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43 -#define USB_CDC_GET_ETHERNET_STATISTIC 0x44 +#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00 +#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01 +#define USB_CDC_REQ_SET_LINE_CODING 0x20 +#define USB_CDC_REQ_GET_LINE_CODING 0x21 +#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22 +#define USB_CDC_REQ_SEND_BREAK 0x23 +#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40 +#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41 +#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42 +#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43 +#define USB_CDC_GET_ETHERNET_STATISTIC 0x44 -#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ -#define STATUS_BYTECOUNT 16 /* 8 byte header + data */ +#define USB_CDC_NETWORK_CONNECTION 0x00 -#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ +#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ +#define STATUS_BYTECOUNT 16 /* 8 byte header + data */ + +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ static const USBDescStrings usb_net_stringtable = { [STRING_MANUFACTURER] = "QEMU", @@ -304,57 +306,57 @@ static const USBDesc desc_net = { /* * RNDIS Definitions - in theory not specific to USB. */ -#define RNDIS_MAXIMUM_FRAME_SIZE 1518 -#define RNDIS_MAX_TOTAL_SIZE 1558 +#define RNDIS_MAXIMUM_FRAME_SIZE 1518 +#define RNDIS_MAX_TOTAL_SIZE 1558 /* Remote NDIS Versions */ -#define RNDIS_MAJOR_VERSION 1 -#define RNDIS_MINOR_VERSION 0 +#define RNDIS_MAJOR_VERSION 1 +#define RNDIS_MINOR_VERSION 0 /* Status Values */ -#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */ -#define RNDIS_STATUS_FAILURE 0xc0000001U /* Unspecified error */ -#define RNDIS_STATUS_INVALID_DATA 0xc0010015U /* Invalid data */ -#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000bbU /* Unsupported request */ -#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000bU /* Device connected */ -#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000cU /* Device disconnected */ +#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */ +#define RNDIS_STATUS_FAILURE 0xc0000001U /* Unspecified error */ +#define RNDIS_STATUS_INVALID_DATA 0xc0010015U /* Invalid data */ +#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000bbU /* Unsupported request */ +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000bU /* Device connected */ +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000cU /* Device disconnected */ /* Message Set for Connectionless (802.3) Devices */ enum { - RNDIS_PACKET_MSG = 1, - RNDIS_INITIALIZE_MSG = 2, /* Initialize device */ - RNDIS_HALT_MSG = 3, - RNDIS_QUERY_MSG = 4, - RNDIS_SET_MSG = 5, - RNDIS_RESET_MSG = 6, - RNDIS_INDICATE_STATUS_MSG = 7, - RNDIS_KEEPALIVE_MSG = 8, + RNDIS_PACKET_MSG = 1, + RNDIS_INITIALIZE_MSG = 2, /* Initialize device */ + RNDIS_HALT_MSG = 3, + RNDIS_QUERY_MSG = 4, + RNDIS_SET_MSG = 5, + RNDIS_RESET_MSG = 6, + RNDIS_INDICATE_STATUS_MSG = 7, + RNDIS_KEEPALIVE_MSG = 8, }; /* Message completion */ enum { - RNDIS_INITIALIZE_CMPLT = 0x80000002U, - RNDIS_QUERY_CMPLT = 0x80000004U, - RNDIS_SET_CMPLT = 0x80000005U, - RNDIS_RESET_CMPLT = 0x80000006U, - RNDIS_KEEPALIVE_CMPLT = 0x80000008U, + RNDIS_INITIALIZE_CMPLT = 0x80000002U, + RNDIS_QUERY_CMPLT = 0x80000004U, + RNDIS_SET_CMPLT = 0x80000005U, + RNDIS_RESET_CMPLT = 0x80000006U, + RNDIS_KEEPALIVE_CMPLT = 0x80000008U, }; /* Device Flags */ enum { - RNDIS_DF_CONNECTIONLESS = 1, - RNDIS_DF_CONNECTIONORIENTED = 2, + RNDIS_DF_CONNECTIONLESS = 1, + RNDIS_DF_CONNECTIONORIENTED = 2, }; -#define RNDIS_MEDIUM_802_3 0x00000000U +#define RNDIS_MEDIUM_802_3 0x00000000U /* from drivers/net/sk98lin/h/skgepnmi.h */ -#define OID_PNP_CAPABILITIES 0xfd010100 -#define OID_PNP_SET_POWER 0xfd010101 -#define OID_PNP_QUERY_POWER 0xfd010102 -#define OID_PNP_ADD_WAKE_UP_PATTERN 0xfd010103 -#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xfd010104 -#define OID_PNP_ENABLE_WAKE_UP 0xfd010106 +#define OID_PNP_CAPABILITIES 0xfd010100 +#define OID_PNP_SET_POWER 0xfd010101 +#define OID_PNP_QUERY_POWER 0xfd010102 +#define OID_PNP_ADD_WAKE_UP_PATTERN 0xfd010103 +#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xfd010104 +#define OID_PNP_ENABLE_WAKE_UP 0xfd010106 typedef uint32_t le32; @@ -492,88 +494,88 @@ enum rndis_state /* from ndis.h */ enum ndis_oid { /* Required Object IDs (OIDs) */ - OID_GEN_SUPPORTED_LIST = 0x00010101, - OID_GEN_HARDWARE_STATUS = 0x00010102, - OID_GEN_MEDIA_SUPPORTED = 0x00010103, - OID_GEN_MEDIA_IN_USE = 0x00010104, - OID_GEN_MAXIMUM_LOOKAHEAD = 0x00010105, - OID_GEN_MAXIMUM_FRAME_SIZE = 0x00010106, - OID_GEN_LINK_SPEED = 0x00010107, - OID_GEN_TRANSMIT_BUFFER_SPACE = 0x00010108, - OID_GEN_RECEIVE_BUFFER_SPACE = 0x00010109, - OID_GEN_TRANSMIT_BLOCK_SIZE = 0x0001010a, - OID_GEN_RECEIVE_BLOCK_SIZE = 0x0001010b, - OID_GEN_VENDOR_ID = 0x0001010c, - OID_GEN_VENDOR_DESCRIPTION = 0x0001010d, - OID_GEN_CURRENT_PACKET_FILTER = 0x0001010e, - OID_GEN_CURRENT_LOOKAHEAD = 0x0001010f, - OID_GEN_DRIVER_VERSION = 0x00010110, - OID_GEN_MAXIMUM_TOTAL_SIZE = 0x00010111, - OID_GEN_PROTOCOL_OPTIONS = 0x00010112, - OID_GEN_MAC_OPTIONS = 0x00010113, - OID_GEN_MEDIA_CONNECT_STATUS = 0x00010114, - OID_GEN_MAXIMUM_SEND_PACKETS = 0x00010115, - OID_GEN_VENDOR_DRIVER_VERSION = 0x00010116, - OID_GEN_SUPPORTED_GUIDS = 0x00010117, - OID_GEN_NETWORK_LAYER_ADDRESSES = 0x00010118, - OID_GEN_TRANSPORT_HEADER_OFFSET = 0x00010119, - OID_GEN_MACHINE_NAME = 0x0001021a, - OID_GEN_RNDIS_CONFIG_PARAMETER = 0x0001021b, - OID_GEN_VLAN_ID = 0x0001021c, + OID_GEN_SUPPORTED_LIST = 0x00010101, + OID_GEN_HARDWARE_STATUS = 0x00010102, + OID_GEN_MEDIA_SUPPORTED = 0x00010103, + OID_GEN_MEDIA_IN_USE = 0x00010104, + OID_GEN_MAXIMUM_LOOKAHEAD = 0x00010105, + OID_GEN_MAXIMUM_FRAME_SIZE = 0x00010106, + OID_GEN_LINK_SPEED = 0x00010107, + OID_GEN_TRANSMIT_BUFFER_SPACE = 0x00010108, + OID_GEN_RECEIVE_BUFFER_SPACE = 0x00010109, + OID_GEN_TRANSMIT_BLOCK_SIZE = 0x0001010a, + OID_GEN_RECEIVE_BLOCK_SIZE = 0x0001010b, + OID_GEN_VENDOR_ID = 0x0001010c, + OID_GEN_VENDOR_DESCRIPTION = 0x0001010d, + OID_GEN_CURRENT_PACKET_FILTER = 0x0001010e, + OID_GEN_CURRENT_LOOKAHEAD = 0x0001010f, + OID_GEN_DRIVER_VERSION = 0x00010110, + OID_GEN_MAXIMUM_TOTAL_SIZE = 0x00010111, + OID_GEN_PROTOCOL_OPTIONS = 0x00010112, + OID_GEN_MAC_OPTIONS = 0x00010113, + OID_GEN_MEDIA_CONNECT_STATUS = 0x00010114, + OID_GEN_MAXIMUM_SEND_PACKETS = 0x00010115, + OID_GEN_VENDOR_DRIVER_VERSION = 0x00010116, + OID_GEN_SUPPORTED_GUIDS = 0x00010117, + OID_GEN_NETWORK_LAYER_ADDRESSES = 0x00010118, + OID_GEN_TRANSPORT_HEADER_OFFSET = 0x00010119, + OID_GEN_MACHINE_NAME = 0x0001021a, + OID_GEN_RNDIS_CONFIG_PARAMETER = 0x0001021b, + OID_GEN_VLAN_ID = 0x0001021c, /* Optional OIDs */ - OID_GEN_MEDIA_CAPABILITIES = 0x00010201, - OID_GEN_PHYSICAL_MEDIUM = 0x00010202, + OID_GEN_MEDIA_CAPABILITIES = 0x00010201, + OID_GEN_PHYSICAL_MEDIUM = 0x00010202, /* Required statistics OIDs */ - OID_GEN_XMIT_OK = 0x00020101, - OID_GEN_RCV_OK = 0x00020102, - OID_GEN_XMIT_ERROR = 0x00020103, - OID_GEN_RCV_ERROR = 0x00020104, - OID_GEN_RCV_NO_BUFFER = 0x00020105, + OID_GEN_XMIT_OK = 0x00020101, + OID_GEN_RCV_OK = 0x00020102, + OID_GEN_XMIT_ERROR = 0x00020103, + OID_GEN_RCV_ERROR = 0x00020104, + OID_GEN_RCV_NO_BUFFER = 0x00020105, /* Optional statistics OIDs */ - OID_GEN_DIRECTED_BYTES_XMIT = 0x00020201, - OID_GEN_DIRECTED_FRAMES_XMIT = 0x00020202, - OID_GEN_MULTICAST_BYTES_XMIT = 0x00020203, - OID_GEN_MULTICAST_FRAMES_XMIT = 0x00020204, - OID_GEN_BROADCAST_BYTES_XMIT = 0x00020205, - OID_GEN_BROADCAST_FRAMES_XMIT = 0x00020206, - OID_GEN_DIRECTED_BYTES_RCV = 0x00020207, - OID_GEN_DIRECTED_FRAMES_RCV = 0x00020208, - OID_GEN_MULTICAST_BYTES_RCV = 0x00020209, - OID_GEN_MULTICAST_FRAMES_RCV = 0x0002020a, - OID_GEN_BROADCAST_BYTES_RCV = 0x0002020b, - OID_GEN_BROADCAST_FRAMES_RCV = 0x0002020c, - OID_GEN_RCV_CRC_ERROR = 0x0002020d, - OID_GEN_TRANSMIT_QUEUE_LENGTH = 0x0002020e, - OID_GEN_GET_TIME_CAPS = 0x0002020f, - OID_GEN_GET_NETCARD_TIME = 0x00020210, - OID_GEN_NETCARD_LOAD = 0x00020211, - OID_GEN_DEVICE_PROFILE = 0x00020212, - OID_GEN_INIT_TIME_MS = 0x00020213, - OID_GEN_RESET_COUNTS = 0x00020214, - OID_GEN_MEDIA_SENSE_COUNTS = 0x00020215, - OID_GEN_FRIENDLY_NAME = 0x00020216, - OID_GEN_MINIPORT_INFO = 0x00020217, - OID_GEN_RESET_VERIFY_PARAMETERS = 0x00020218, + OID_GEN_DIRECTED_BYTES_XMIT = 0x00020201, + OID_GEN_DIRECTED_FRAMES_XMIT = 0x00020202, + OID_GEN_MULTICAST_BYTES_XMIT = 0x00020203, + OID_GEN_MULTICAST_FRAMES_XMIT = 0x00020204, + OID_GEN_BROADCAST_BYTES_XMIT = 0x00020205, + OID_GEN_BROADCAST_FRAMES_XMIT = 0x00020206, + OID_GEN_DIRECTED_BYTES_RCV = 0x00020207, + OID_GEN_DIRECTED_FRAMES_RCV = 0x00020208, + OID_GEN_MULTICAST_BYTES_RCV = 0x00020209, + OID_GEN_MULTICAST_FRAMES_RCV = 0x0002020a, + OID_GEN_BROADCAST_BYTES_RCV = 0x0002020b, + OID_GEN_BROADCAST_FRAMES_RCV = 0x0002020c, + OID_GEN_RCV_CRC_ERROR = 0x0002020d, + OID_GEN_TRANSMIT_QUEUE_LENGTH = 0x0002020e, + OID_GEN_GET_TIME_CAPS = 0x0002020f, + OID_GEN_GET_NETCARD_TIME = 0x00020210, + OID_GEN_NETCARD_LOAD = 0x00020211, + OID_GEN_DEVICE_PROFILE = 0x00020212, + OID_GEN_INIT_TIME_MS = 0x00020213, + OID_GEN_RESET_COUNTS = 0x00020214, + OID_GEN_MEDIA_SENSE_COUNTS = 0x00020215, + OID_GEN_FRIENDLY_NAME = 0x00020216, + OID_GEN_MINIPORT_INFO = 0x00020217, + OID_GEN_RESET_VERIFY_PARAMETERS = 0x00020218, /* IEEE 802.3 (Ethernet) OIDs */ - OID_802_3_PERMANENT_ADDRESS = 0x01010101, - OID_802_3_CURRENT_ADDRESS = 0x01010102, - OID_802_3_MULTICAST_LIST = 0x01010103, - OID_802_3_MAXIMUM_LIST_SIZE = 0x01010104, - OID_802_3_MAC_OPTIONS = 0x01010105, - OID_802_3_RCV_ERROR_ALIGNMENT = 0x01020101, - OID_802_3_XMIT_ONE_COLLISION = 0x01020102, - OID_802_3_XMIT_MORE_COLLISIONS = 0x01020103, - OID_802_3_XMIT_DEFERRED = 0x01020201, - OID_802_3_XMIT_MAX_COLLISIONS = 0x01020202, - OID_802_3_RCV_OVERRUN = 0x01020203, - OID_802_3_XMIT_UNDERRUN = 0x01020204, - OID_802_3_XMIT_HEARTBEAT_FAILURE = 0x01020205, - OID_802_3_XMIT_TIMES_CRS_LOST = 0x01020206, - OID_802_3_XMIT_LATE_COLLISIONS = 0x01020207, + OID_802_3_PERMANENT_ADDRESS = 0x01010101, + OID_802_3_CURRENT_ADDRESS = 0x01010102, + OID_802_3_MULTICAST_LIST = 0x01010103, + OID_802_3_MAXIMUM_LIST_SIZE = 0x01010104, + OID_802_3_MAC_OPTIONS = 0x01010105, + OID_802_3_RCV_ERROR_ALIGNMENT = 0x01020101, + OID_802_3_XMIT_ONE_COLLISION = 0x01020102, + OID_802_3_XMIT_MORE_COLLISIONS = 0x01020103, + OID_802_3_XMIT_DEFERRED = 0x01020201, + OID_802_3_XMIT_MAX_COLLISIONS = 0x01020202, + OID_802_3_RCV_OVERRUN = 0x01020203, + OID_802_3_XMIT_UNDERRUN = 0x01020204, + OID_802_3_XMIT_HEARTBEAT_FAILURE = 0x01020205, + OID_802_3_XMIT_TIMES_CRS_LOST = 0x01020206, + OID_802_3_XMIT_LATE_COLLISIONS = 0x01020207, }; static const uint32_t oid_supported_list[] = @@ -616,13 +618,13 @@ static const uint32_t oid_supported_list[] = OID_802_3_XMIT_MORE_COLLISIONS, }; -#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA (1 << 0) -#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED (1 << 1) -#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND (1 << 2) -#define NDIS_MAC_OPTION_NO_LOOPBACK (1 << 3) -#define NDIS_MAC_OPTION_FULL_DUPLEX (1 << 4) -#define NDIS_MAC_OPTION_EOTX_INDICATION (1 << 5) -#define NDIS_MAC_OPTION_8021P_PRIORITY (1 << 6) +#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA (1 << 0) +#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED (1 << 1) +#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND (1 << 2) +#define NDIS_MAC_OPTION_NO_LOOPBACK (1 << 3) +#define NDIS_MAC_OPTION_FULL_DUPLEX (1 << 4) +#define NDIS_MAC_OPTION_EOTX_INDICATION (1 << 5) +#define NDIS_MAC_OPTION_8021P_PRIORITY (1 << 6) struct rndis_response { QTAILQ_ENTRY(rndis_response) entries; @@ -640,6 +642,8 @@ struct USBNetState { uint16_t filter; uint32_t vendorid; + uint16_t connection; + unsigned int out_ptr; uint8_t out_buf[2048]; @@ -647,6 +651,7 @@ struct USBNetState { uint8_t in_buf[2048]; USBEndpoint *intr; + USBEndpoint *bulk_in; char usbstring_mac[13]; NICState *nic; @@ -1121,6 +1126,12 @@ static void usb_net_handle_control(USBDevice *dev, USBPacket *p, #endif break; + case ClassInterfaceOutRequest | USB_CDC_SET_ETHERNET_PACKET_FILTER: + if (is_rndis(s)) { + goto fail; + } + break; + default: fail: fprintf(stderr, "usbnet: failed control transaction: " @@ -1133,18 +1144,28 @@ static void usb_net_handle_control(USBDevice *dev, USBPacket *p, static void usb_net_handle_statusin(USBNetState *s, USBPacket *p) { - le32 buf[2]; + le32 rbuf[2]; + uint16_t ebuf[4]; if (p->iov.size < 8) { p->status = USB_RET_STALL; return; } - buf[0] = cpu_to_le32(1); - buf[1] = cpu_to_le32(0); - usb_packet_copy(p, buf, 8); - if (!s->rndis_resp.tqh_first) { - p->status = USB_RET_NAK; + if (is_rndis(s)) { + rbuf[0] = cpu_to_le32(1); + rbuf[1] = cpu_to_le32(0); + usb_packet_copy(p, rbuf, 8); + if (!s->rndis_resp.tqh_first) { + p->status = USB_RET_NAK; + } + } else { + ebuf[0] = + cpu_to_be16(ClassInterfaceRequest | USB_CDC_NETWORK_CONNECTION); + ebuf[1] = cpu_to_le16(s->connection); + ebuf[2] = cpu_to_le16(1); + ebuf[3] = cpu_to_le16(0); + usb_packet_copy(p, ebuf, 8); } #ifdef TRAFFIC_DEBUG @@ -1204,7 +1225,7 @@ static void usb_net_handle_dataout(USBNetState *s, USBPacket *p) s->out_ptr += sz; if (!is_rndis(s)) { - if (p->iov.size < 64) { + if (p->iov.size % 64 || p->iov.size == 0) { qemu_send_packet(qemu_get_queue(s->nic), s->out_buf, s->out_ptr); s->out_ptr = 0; } @@ -1317,6 +1338,7 @@ static ssize_t usbnet_receive(NetClientState *nc, const uint8_t *buf, size_t siz memcpy(in_buf, buf, size); s->in_len = total_size; s->in_ptr = 0; + usb_wakeup(s->bulk_in, 0); return size; } @@ -1353,12 +1375,14 @@ static void usb_net_realize(USBDevice *dev, Error **errp) s->rndis_state = RNDIS_UNINITIALIZED; QTAILQ_INIT(&s->rndis_resp); - s->medium = 0; /* NDIS_MEDIUM_802_3 */ + s->medium = 0; /* NDIS_MEDIUM_802_3 */ s->speed = 1000000; /* 100MBps, in 100Bps units */ - s->media_state = 0; /* NDIS_MEDIA_STATE_CONNECTED */; + s->media_state = 0; /* NDIS_MEDIA_STATE_CONNECTED */; s->filter = 0; s->vendorid = 0x1234; + s->connection = 1; /* Connected */ s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); + s->bulk_in = usb_ep_get(dev, USB_TOKEN_IN, 2); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_usbnet_info, &s->conf, diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index f7043075be..28164d89be 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -37,7 +37,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "hw/qdev-properties.h" @@ -1320,8 +1320,7 @@ static void ccid_realize(USBDevice *dev, Error **errp) usb_desc_create_serial(dev); usb_desc_init(dev); - qbus_create_inplace(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), - NULL); + qbus_init(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), NULL); qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP); s->bulk = usb_ep_get(dev, USB_TOKEN_IN, CCID_BULK_IN_EP); diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c index 68ebaca10c..b24b3148c2 100644 --- a/hw/usb/dev-storage-bot.c +++ b/hw/usb/dev-storage-bot.c @@ -37,8 +37,7 @@ static void usb_msd_bot_realize(USBDevice *dev, Error **errp) s->dev.auto_attach = 0; } - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &usb_msd_scsi_info_bot, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_bot); usb_msd_handle_reset(dev); } diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c index 3d017a4e67..00f25bade2 100644 --- a/hw/usb/dev-storage-classic.c +++ b/hw/usb/dev-storage-classic.c @@ -65,8 +65,8 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) usb_desc_create_serial(dev); usb_desc_init(dev); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &usb_msd_scsi_info_storage, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), + &usb_msd_scsi_info_storage); scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable, s->conf.bootindex, s->conf.share_rw, s->conf.rerror, s->conf.werror, diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index dca62d544f..e3bcffb3e0 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -177,6 +177,37 @@ static const USBDesc desc = { .str = desc_strings, }; +static void usb_msd_packet_complete(MSDState *s) +{ + USBPacket *p = s->packet; + + /* + * Set s->packet to NULL before calling usb_packet_complete + * because another request may be issued before + * usb_packet_complete returns. + */ + trace_usb_msd_packet_complete(); + s->packet = NULL; + usb_packet_complete(&s->dev, p); +} + +static void usb_msd_fatal_error(MSDState *s) +{ + trace_usb_msd_fatal_error(); + + if (s->packet) { + s->packet->status = USB_RET_STALL; + usb_msd_packet_complete(s); + } + + /* + * Guest messed up up device state with illegal requests. Go + * ignore any requests until the guests resets the device (and + * brings it into a known state that way). + */ + s->needs_reset = true; +} + static void usb_msd_copy_data(MSDState *s, USBPacket *p) { uint32_t len; @@ -208,24 +239,16 @@ static void usb_msd_send_status(MSDState *s, USBPacket *p) memset(&s->csw, 0, sizeof(s->csw)); } -static void usb_msd_packet_complete(MSDState *s) -{ - USBPacket *p = s->packet; - - /* Set s->packet to NULL before calling usb_packet_complete - because another request may be issued before - usb_packet_complete returns. */ - trace_usb_msd_packet_complete(); - s->packet = NULL; - usb_packet_complete(&s->dev, p); -} - void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) { MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); USBPacket *p = s->packet; - assert((s->mode == USB_MSDM_DATAOUT) == (req->cmd.mode == SCSI_XFER_TO_DEV)); + if ((s->mode == USB_MSDM_DATAOUT) != (req->cmd.mode == SCSI_XFER_TO_DEV)) { + usb_msd_fatal_error(s); + return; + } + s->scsi_len = len; s->scsi_off = 0; if (p) { @@ -315,6 +338,8 @@ void usb_msd_handle_reset(USBDevice *dev) memset(&s->csw, 0, sizeof(s->csw)); s->mode = USB_MSDM_CBW; + + s->needs_reset = false; } static void usb_msd_handle_control(USBDevice *dev, USBPacket *p, @@ -380,6 +405,11 @@ static void usb_msd_handle_data(USBDevice *dev, USBPacket *p) SCSIDevice *scsi_dev; uint32_t len; + if (s->needs_reset) { + p->status = USB_RET_STALL; + return; + } + switch (p->pid) { case USB_TOKEN_OUT: if (devep != 2) @@ -415,7 +445,7 @@ static void usb_msd_handle_data(USBDevice *dev, USBPacket *p) cbw.cmd_len, s->data_len); assert(le32_to_cpu(s->csw.residue) == 0); s->scsi_len = 0; - s->req = scsi_req_new(scsi_dev, tag, cbw.lun, cbw.cmd, NULL); + s->req = scsi_req_new(scsi_dev, tag, cbw.lun, cbw.cmd, cbw.cmd_len, NULL); if (s->commandlog) { scsi_req_print(s->req); } diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 263056231c..5192b062d6 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -71,7 +71,7 @@ typedef struct { uint8_t reserved_2; uint64_t lun; uint8_t cdb[16]; - uint8_t add_cdb[1]; /* not supported by QEMU */ + uint8_t add_cdb[1]; } QEMU_PACKED uas_iu_command; typedef struct { @@ -699,6 +699,7 @@ static void usb_uas_command(UASDevice *uas, uas_iu *iu) UASRequest *req; uint32_t len; uint16_t tag = be16_to_cpu(iu->hdr.tag); + size_t cdb_len = sizeof(iu->command.cdb) + iu->command.add_cdb_length; if (iu->command.add_cdb_length > 0) { qemu_log_mask(LOG_UNIMP, "additional adb length not yet supported\n"); @@ -729,7 +730,7 @@ static void usb_uas_command(UASDevice *uas, uas_iu *iu) req->req = scsi_req_new(req->dev, req->tag, usb_uas_get_lun(req->lun), - iu->command.cdb, req); + iu->command.cdb, cdb_len, req); if (uas->requestlog) { scsi_req_print(req->req); } @@ -840,6 +841,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) } break; case UAS_PIPE_ID_STATUS: + if (p->stream > UAS_MAX_STREAMS) { + goto err_stream; + } if (p->stream) { QTAILQ_FOREACH(st, &uas->results, next) { if (st->stream == p->stream) { @@ -867,6 +871,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) break; case UAS_PIPE_ID_DATA_IN: case UAS_PIPE_ID_DATA_OUT: + if (p->stream > UAS_MAX_STREAMS) { + goto err_stream; + } if (p->stream) { req = usb_uas_find_request(uas, p->stream); } else { @@ -902,6 +909,12 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) p->status = USB_RET_STALL; break; } + return; + +err_stream: + error_report("%s: invalid stream %d", __func__, p->stream); + p->status = USB_RET_STALL; + return; } static void usb_uas_unrealize(USBDevice *dev) @@ -927,8 +940,7 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); - scsi_bus_new(&uas->bus, sizeof(uas->bus), DEVICE(dev), - &usb_uas_scsi_info, NULL); + scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); } static const VMStateDescription vmstate_usb_uas = { diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c index ed687bc9f1..7177c17f03 100644 --- a/hw/usb/dev-wacom.c +++ b/hw/usb/dev-wacom.c @@ -36,8 +36,8 @@ #include "qom/object.h" /* Interface requests */ -#define WACOM_GET_REPORT 0x2101 -#define WACOM_SET_REPORT 0x2109 +#define WACOM_GET_REPORT 0x2101 +#define WACOM_SET_REPORT 0x2109 struct USBWacomState { USBDevice dev; @@ -69,6 +69,65 @@ static const USBDescStrings desc_strings = { [STR_SERIALNUMBER] = "1", }; +static const uint8_t qemu_wacom_hid_report_descriptor[] = { + 0x05, 0x01, /* Usage Page (Desktop) */ + 0x09, 0x02, /* Usage (Mouse) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x01, /* Report ID (1) */ + 0x09, 0x01, /* Usage (Pointer) */ + 0xa1, 0x00, /* Collection (Physical) */ + 0x05, 0x09, /* Usage Page (Button) */ + 0x19, 0x01, /* Usage Minimum (01h) */ + 0x29, 0x03, /* Usage Maximum (03h) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0x01, /* Logical Maximum (1) */ + 0x95, 0x03, /* Report Count (3) */ + 0x75, 0x01, /* Report Size (1) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x95, 0x01, /* Report Count (1) */ + 0x75, 0x05, /* Report Size (5) */ + 0x81, 0x01, /* Input (Constant) */ + 0x05, 0x01, /* Usage Page (Desktop) */ + 0x09, 0x30, /* Usage (X) */ + 0x09, 0x31, /* Usage (Y) */ + 0x09, 0x38, /* Usage (Wheel) */ + 0x15, 0x81, /* Logical Minimum (-127) */ + 0x25, 0x7f, /* Logical Maximum (127) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x03, /* Report Count (3) */ + 0x81, 0x06, /* Input (Data, Variable, Relative) */ + 0x95, 0x03, /* Report Count (3) */ + 0x81, 0x01, /* Input (Constant) */ + 0xc0, /* End Collection */ + 0xc0, /* End Collection */ + 0x05, 0x0d, /* Usage Page (Digitizer) */ + 0x09, 0x01, /* Usage (Digitizer) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x02, /* Report ID (2) */ + 0xa1, 0x00, /* Collection (Physical) */ + 0x06, 0x00, 0xff,/* Usage Page (ff00h), vendor-defined */ + 0x09, 0x01, /* Usage (01h) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x00,/* Logical Maximum (255) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x07, /* Report Count (7) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0xc0, /* End Collection */ + 0x09, 0x01, /* Usage (01h) */ + 0x85, 0x63, /* Report ID (99) */ + 0x95, 0x07, /* Report Count (7) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x09, 0x01, /* Usage (01h) */ + 0x85, 0x02, /* Report ID (2) */ + 0x95, 0x01, /* Report Count (1) */ + 0xb1, 0x02, /* Feature (Variable) */ + 0x09, 0x01, /* Usage (01h) */ + 0x85, 0x03, /* Report ID (3) */ + 0x95, 0x01, /* Report Count (1) */ + 0xb1, 0x02, /* Feature (Variable) */ + 0xc0 /* End Collection */ +}; + static const USBDescIface desc_iface_wacom = { .bInterfaceNumber = 0, .bNumEndpoints = 1, @@ -86,7 +145,7 @@ static const USBDescIface desc_iface_wacom = { 0x00, /* u8 country_code */ 0x01, /* u8 num_descriptors */ USB_DT_REPORT, /* u8 type: Report */ - 0x6e, 0, /* u16 len */ + sizeof(qemu_wacom_hid_report_descriptor), 0, /* u16 len */ }, }, }, @@ -266,6 +325,17 @@ static void usb_wacom_handle_control(USBDevice *dev, USBPacket *p, } switch (request) { + case InterfaceRequest | USB_REQ_GET_DESCRIPTOR: + switch (value >> 8) { + case 0x22: + memcpy(data, qemu_wacom_hid_report_descriptor, + sizeof(qemu_wacom_hid_report_descriptor)); + p->actual_length = sizeof(qemu_wacom_hid_report_descriptor); + break; + default: + return; + } + break; case WACOM_SET_REPORT: if (s->mouse_grabbed) { qemu_remove_mouse_event_handler(s->eh_entry); diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index e1d96acf7e..8755e9cbb0 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -272,8 +272,8 @@ static void dwc2_handle_packet(DWC2State *s, uint32_t devadr, USBDevice *dev, if (pid != USB_TOKEN_IN) { trace_usb_dwc2_memory_read(hcdma, tlen); - if (dma_memory_read(&s->dma_as, hcdma, - s->usb_buf[chan], tlen) != MEMTX_OK) { + if (dma_memory_read(&s->dma_as, hcdma, s->usb_buf[chan], tlen, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_read failed\n", __func__); } @@ -328,8 +328,8 @@ babble: if (pid == USB_TOKEN_IN) { trace_usb_dwc2_memory_write(hcdma, actual); - if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], - actual) != MEMTX_OK) { + if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], actual, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_write failed\n", __func__); } diff --git a/hw/usb/hcd-dwc2.h b/hw/usb/hcd-dwc2.h index 6998b04706..9c3d88ea14 100644 --- a/hw/usb/hcd-dwc2.h +++ b/hw/usb/hcd-dwc2.h @@ -16,8 +16,8 @@ * GNU General Public License for more details. */ -#ifndef HW_USB_DWC2_H -#define HW_USB_DWC2_H +#ifndef HW_USB_HCD_DWC2_H +#define HW_USB_HCD_DWC2_H #include "qemu/timer.h" #include "hw/irq.h" diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 6caa7ac6c2..d4da8dcb8d 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -383,7 +383,8 @@ static inline int get_dwords(EHCIState *ehci, uint32_t addr, } for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - dma_memory_read(ehci->as, addr, buf, sizeof(*buf)); + dma_memory_read(ehci->as, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); *buf = le32_to_cpu(*buf); } @@ -405,7 +406,8 @@ static inline int put_dwords(EHCIState *ehci, uint32_t addr, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint32_t tmp = cpu_to_le32(*buf); - dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp)); + dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp), + MEMTXATTRS_UNSPECIFIED); } return num; @@ -2009,7 +2011,10 @@ static int ehci_state_writeback(EHCIQueue *q) ehci_trace_qtd(q, NLPTR_GET(p->qtdaddr), (EHCIqtd *) &q->qh.next_qtd); qtd = (uint32_t *) &q->qh.next_qtd; addr = NLPTR_GET(p->qtdaddr); - put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 2); + /* First write back the offset */ + put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qtd + 3, 1); + /* Then write back the token, clearing the 'active' bit */ + put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 1); ehci_free_packet(p); /* diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c index 85f5ff5bd4..6dca373cb1 100644 --- a/hw/usb/hcd-musb.c +++ b/hw/usb/hcd-musb.c @@ -28,227 +28,227 @@ #include "hw/hw.h" /* Common USB registers */ -#define MUSB_HDRC_FADDR 0x00 /* 8-bit */ -#define MUSB_HDRC_POWER 0x01 /* 8-bit */ +#define MUSB_HDRC_FADDR 0x00 /* 8-bit */ +#define MUSB_HDRC_POWER 0x01 /* 8-bit */ -#define MUSB_HDRC_INTRTX 0x02 /* 16-bit */ -#define MUSB_HDRC_INTRRX 0x04 -#define MUSB_HDRC_INTRTXE 0x06 -#define MUSB_HDRC_INTRRXE 0x08 -#define MUSB_HDRC_INTRUSB 0x0a /* 8 bit */ -#define MUSB_HDRC_INTRUSBE 0x0b /* 8 bit */ -#define MUSB_HDRC_FRAME 0x0c /* 16-bit */ -#define MUSB_HDRC_INDEX 0x0e /* 8 bit */ -#define MUSB_HDRC_TESTMODE 0x0f /* 8 bit */ +#define MUSB_HDRC_INTRTX 0x02 /* 16-bit */ +#define MUSB_HDRC_INTRRX 0x04 +#define MUSB_HDRC_INTRTXE 0x06 +#define MUSB_HDRC_INTRRXE 0x08 +#define MUSB_HDRC_INTRUSB 0x0a /* 8 bit */ +#define MUSB_HDRC_INTRUSBE 0x0b /* 8 bit */ +#define MUSB_HDRC_FRAME 0x0c /* 16-bit */ +#define MUSB_HDRC_INDEX 0x0e /* 8 bit */ +#define MUSB_HDRC_TESTMODE 0x0f /* 8 bit */ /* Per-EP registers in indexed mode */ -#define MUSB_HDRC_EP_IDX 0x10 /* 8-bit */ +#define MUSB_HDRC_EP_IDX 0x10 /* 8-bit */ /* EP FIFOs */ -#define MUSB_HDRC_FIFO 0x20 +#define MUSB_HDRC_FIFO 0x20 /* Additional Control Registers */ -#define MUSB_HDRC_DEVCTL 0x60 /* 8 bit */ +#define MUSB_HDRC_DEVCTL 0x60 /* 8 bit */ /* These are indexed */ -#define MUSB_HDRC_TXFIFOSZ 0x62 /* 8 bit (see masks) */ -#define MUSB_HDRC_RXFIFOSZ 0x63 /* 8 bit (see masks) */ -#define MUSB_HDRC_TXFIFOADDR 0x64 /* 16 bit offset shifted right 3 */ -#define MUSB_HDRC_RXFIFOADDR 0x66 /* 16 bit offset shifted right 3 */ +#define MUSB_HDRC_TXFIFOSZ 0x62 /* 8 bit (see masks) */ +#define MUSB_HDRC_RXFIFOSZ 0x63 /* 8 bit (see masks) */ +#define MUSB_HDRC_TXFIFOADDR 0x64 /* 16 bit offset shifted right 3 */ +#define MUSB_HDRC_RXFIFOADDR 0x66 /* 16 bit offset shifted right 3 */ /* Some more registers */ -#define MUSB_HDRC_VCTRL 0x68 /* 8 bit */ -#define MUSB_HDRC_HWVERS 0x6c /* 8 bit */ +#define MUSB_HDRC_VCTRL 0x68 /* 8 bit */ +#define MUSB_HDRC_HWVERS 0x6c /* 8 bit */ /* Added in HDRC 1.9(?) & MHDRC 1.4 */ /* ULPI pass-through */ -#define MUSB_HDRC_ULPI_VBUSCTL 0x70 -#define MUSB_HDRC_ULPI_REGDATA 0x74 -#define MUSB_HDRC_ULPI_REGADDR 0x75 -#define MUSB_HDRC_ULPI_REGCTL 0x76 +#define MUSB_HDRC_ULPI_VBUSCTL 0x70 +#define MUSB_HDRC_ULPI_REGDATA 0x74 +#define MUSB_HDRC_ULPI_REGADDR 0x75 +#define MUSB_HDRC_ULPI_REGCTL 0x76 /* Extended config & PHY control */ -#define MUSB_HDRC_ENDCOUNT 0x78 /* 8 bit */ -#define MUSB_HDRC_DMARAMCFG 0x79 /* 8 bit */ -#define MUSB_HDRC_PHYWAIT 0x7a /* 8 bit */ -#define MUSB_HDRC_PHYVPLEN 0x7b /* 8 bit */ -#define MUSB_HDRC_HS_EOF1 0x7c /* 8 bit, units of 546.1 us */ -#define MUSB_HDRC_FS_EOF1 0x7d /* 8 bit, units of 533.3 ns */ -#define MUSB_HDRC_LS_EOF1 0x7e /* 8 bit, units of 1.067 us */ +#define MUSB_HDRC_ENDCOUNT 0x78 /* 8 bit */ +#define MUSB_HDRC_DMARAMCFG 0x79 /* 8 bit */ +#define MUSB_HDRC_PHYWAIT 0x7a /* 8 bit */ +#define MUSB_HDRC_PHYVPLEN 0x7b /* 8 bit */ +#define MUSB_HDRC_HS_EOF1 0x7c /* 8 bit, units of 546.1 us */ +#define MUSB_HDRC_FS_EOF1 0x7d /* 8 bit, units of 533.3 ns */ +#define MUSB_HDRC_LS_EOF1 0x7e /* 8 bit, units of 1.067 us */ /* Per-EP BUSCTL registers */ -#define MUSB_HDRC_BUSCTL 0x80 +#define MUSB_HDRC_BUSCTL 0x80 /* Per-EP registers in flat mode */ -#define MUSB_HDRC_EP 0x100 +#define MUSB_HDRC_EP 0x100 /* offsets to registers in flat model */ -#define MUSB_HDRC_TXMAXP 0x00 /* 16 bit apparently */ -#define MUSB_HDRC_TXCSR 0x02 /* 16 bit apparently */ -#define MUSB_HDRC_CSR0 MUSB_HDRC_TXCSR /* re-used for EP0 */ -#define MUSB_HDRC_RXMAXP 0x04 /* 16 bit apparently */ -#define MUSB_HDRC_RXCSR 0x06 /* 16 bit apparently */ -#define MUSB_HDRC_RXCOUNT 0x08 /* 16 bit apparently */ -#define MUSB_HDRC_COUNT0 MUSB_HDRC_RXCOUNT /* re-used for EP0 */ -#define MUSB_HDRC_TXTYPE 0x0a /* 8 bit apparently */ -#define MUSB_HDRC_TYPE0 MUSB_HDRC_TXTYPE /* re-used for EP0 */ -#define MUSB_HDRC_TXINTERVAL 0x0b /* 8 bit apparently */ -#define MUSB_HDRC_NAKLIMIT0 MUSB_HDRC_TXINTERVAL /* re-used for EP0 */ -#define MUSB_HDRC_RXTYPE 0x0c /* 8 bit apparently */ -#define MUSB_HDRC_RXINTERVAL 0x0d /* 8 bit apparently */ -#define MUSB_HDRC_FIFOSIZE 0x0f /* 8 bit apparently */ -#define MUSB_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */ +#define MUSB_HDRC_TXMAXP 0x00 /* 16 bit apparently */ +#define MUSB_HDRC_TXCSR 0x02 /* 16 bit apparently */ +#define MUSB_HDRC_CSR0 MUSB_HDRC_TXCSR /* re-used for EP0 */ +#define MUSB_HDRC_RXMAXP 0x04 /* 16 bit apparently */ +#define MUSB_HDRC_RXCSR 0x06 /* 16 bit apparently */ +#define MUSB_HDRC_RXCOUNT 0x08 /* 16 bit apparently */ +#define MUSB_HDRC_COUNT0 MUSB_HDRC_RXCOUNT /* re-used for EP0 */ +#define MUSB_HDRC_TXTYPE 0x0a /* 8 bit apparently */ +#define MUSB_HDRC_TYPE0 MUSB_HDRC_TXTYPE /* re-used for EP0 */ +#define MUSB_HDRC_TXINTERVAL 0x0b /* 8 bit apparently */ +#define MUSB_HDRC_NAKLIMIT0 MUSB_HDRC_TXINTERVAL /* re-used for EP0 */ +#define MUSB_HDRC_RXTYPE 0x0c /* 8 bit apparently */ +#define MUSB_HDRC_RXINTERVAL 0x0d /* 8 bit apparently */ +#define MUSB_HDRC_FIFOSIZE 0x0f /* 8 bit apparently */ +#define MUSB_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */ /* "Bus control" registers */ -#define MUSB_HDRC_TXFUNCADDR 0x00 -#define MUSB_HDRC_TXHUBADDR 0x02 -#define MUSB_HDRC_TXHUBPORT 0x03 +#define MUSB_HDRC_TXFUNCADDR 0x00 +#define MUSB_HDRC_TXHUBADDR 0x02 +#define MUSB_HDRC_TXHUBPORT 0x03 -#define MUSB_HDRC_RXFUNCADDR 0x04 -#define MUSB_HDRC_RXHUBADDR 0x06 -#define MUSB_HDRC_RXHUBPORT 0x07 +#define MUSB_HDRC_RXFUNCADDR 0x04 +#define MUSB_HDRC_RXHUBADDR 0x06 +#define MUSB_HDRC_RXHUBPORT 0x07 /* * MUSBHDRC Register bit masks */ /* POWER */ -#define MGC_M_POWER_ISOUPDATE 0x80 -#define MGC_M_POWER_SOFTCONN 0x40 -#define MGC_M_POWER_HSENAB 0x20 -#define MGC_M_POWER_HSMODE 0x10 -#define MGC_M_POWER_RESET 0x08 -#define MGC_M_POWER_RESUME 0x04 -#define MGC_M_POWER_SUSPENDM 0x02 -#define MGC_M_POWER_ENSUSPEND 0x01 +#define MGC_M_POWER_ISOUPDATE 0x80 +#define MGC_M_POWER_SOFTCONN 0x40 +#define MGC_M_POWER_HSENAB 0x20 +#define MGC_M_POWER_HSMODE 0x10 +#define MGC_M_POWER_RESET 0x08 +#define MGC_M_POWER_RESUME 0x04 +#define MGC_M_POWER_SUSPENDM 0x02 +#define MGC_M_POWER_ENSUSPEND 0x01 /* INTRUSB */ -#define MGC_M_INTR_SUSPEND 0x01 -#define MGC_M_INTR_RESUME 0x02 -#define MGC_M_INTR_RESET 0x04 -#define MGC_M_INTR_BABBLE 0x04 -#define MGC_M_INTR_SOF 0x08 -#define MGC_M_INTR_CONNECT 0x10 -#define MGC_M_INTR_DISCONNECT 0x20 -#define MGC_M_INTR_SESSREQ 0x40 -#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */ -#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */ +#define MGC_M_INTR_SUSPEND 0x01 +#define MGC_M_INTR_RESUME 0x02 +#define MGC_M_INTR_RESET 0x04 +#define MGC_M_INTR_BABBLE 0x04 +#define MGC_M_INTR_SOF 0x08 +#define MGC_M_INTR_CONNECT 0x10 +#define MGC_M_INTR_DISCONNECT 0x20 +#define MGC_M_INTR_SESSREQ 0x40 +#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */ +#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */ /* DEVCTL */ -#define MGC_M_DEVCTL_BDEVICE 0x80 -#define MGC_M_DEVCTL_FSDEV 0x40 -#define MGC_M_DEVCTL_LSDEV 0x20 -#define MGC_M_DEVCTL_VBUS 0x18 -#define MGC_S_DEVCTL_VBUS 3 -#define MGC_M_DEVCTL_HM 0x04 -#define MGC_M_DEVCTL_HR 0x02 -#define MGC_M_DEVCTL_SESSION 0x01 +#define MGC_M_DEVCTL_BDEVICE 0x80 +#define MGC_M_DEVCTL_FSDEV 0x40 +#define MGC_M_DEVCTL_LSDEV 0x20 +#define MGC_M_DEVCTL_VBUS 0x18 +#define MGC_S_DEVCTL_VBUS 3 +#define MGC_M_DEVCTL_HM 0x04 +#define MGC_M_DEVCTL_HR 0x02 +#define MGC_M_DEVCTL_SESSION 0x01 /* TESTMODE */ -#define MGC_M_TEST_FORCE_HOST 0x80 -#define MGC_M_TEST_FIFO_ACCESS 0x40 -#define MGC_M_TEST_FORCE_FS 0x20 -#define MGC_M_TEST_FORCE_HS 0x10 -#define MGC_M_TEST_PACKET 0x08 -#define MGC_M_TEST_K 0x04 -#define MGC_M_TEST_J 0x02 -#define MGC_M_TEST_SE0_NAK 0x01 +#define MGC_M_TEST_FORCE_HOST 0x80 +#define MGC_M_TEST_FIFO_ACCESS 0x40 +#define MGC_M_TEST_FORCE_FS 0x20 +#define MGC_M_TEST_FORCE_HS 0x10 +#define MGC_M_TEST_PACKET 0x08 +#define MGC_M_TEST_K 0x04 +#define MGC_M_TEST_J 0x02 +#define MGC_M_TEST_SE0_NAK 0x01 /* CSR0 */ -#define MGC_M_CSR0_FLUSHFIFO 0x0100 -#define MGC_M_CSR0_TXPKTRDY 0x0002 -#define MGC_M_CSR0_RXPKTRDY 0x0001 +#define MGC_M_CSR0_FLUSHFIFO 0x0100 +#define MGC_M_CSR0_TXPKTRDY 0x0002 +#define MGC_M_CSR0_RXPKTRDY 0x0001 /* CSR0 in Peripheral mode */ -#define MGC_M_CSR0_P_SVDSETUPEND 0x0080 -#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040 -#define MGC_M_CSR0_P_SENDSTALL 0x0020 -#define MGC_M_CSR0_P_SETUPEND 0x0010 -#define MGC_M_CSR0_P_DATAEND 0x0008 -#define MGC_M_CSR0_P_SENTSTALL 0x0004 +#define MGC_M_CSR0_P_SVDSETUPEND 0x0080 +#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040 +#define MGC_M_CSR0_P_SENDSTALL 0x0020 +#define MGC_M_CSR0_P_SETUPEND 0x0010 +#define MGC_M_CSR0_P_DATAEND 0x0008 +#define MGC_M_CSR0_P_SENTSTALL 0x0004 /* CSR0 in Host mode */ -#define MGC_M_CSR0_H_NO_PING 0x0800 -#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */ -#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */ -#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080 -#define MGC_M_CSR0_H_STATUSPKT 0x0040 -#define MGC_M_CSR0_H_REQPKT 0x0020 -#define MGC_M_CSR0_H_ERROR 0x0010 -#define MGC_M_CSR0_H_SETUPPKT 0x0008 -#define MGC_M_CSR0_H_RXSTALL 0x0004 +#define MGC_M_CSR0_H_NO_PING 0x0800 +#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */ +#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */ +#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080 +#define MGC_M_CSR0_H_STATUSPKT 0x0040 +#define MGC_M_CSR0_H_REQPKT 0x0020 +#define MGC_M_CSR0_H_ERROR 0x0010 +#define MGC_M_CSR0_H_SETUPPKT 0x0008 +#define MGC_M_CSR0_H_RXSTALL 0x0004 /* CONFIGDATA */ -#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */ -#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */ -#define MGC_M_CONFIGDATA_BIGENDIAN 0x20 -#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ -#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ -#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */ -#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ -#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* Width, 0 => 8b, 1 => 16b */ +#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */ +#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */ +#define MGC_M_CONFIGDATA_BIGENDIAN 0x20 +#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ +#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ +#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */ +#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ +#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* Width, 0 => 8b, 1 => 16b */ /* TXCSR in Peripheral and Host mode */ -#define MGC_M_TXCSR_AUTOSET 0x8000 -#define MGC_M_TXCSR_ISO 0x4000 -#define MGC_M_TXCSR_MODE 0x2000 -#define MGC_M_TXCSR_DMAENAB 0x1000 -#define MGC_M_TXCSR_FRCDATATOG 0x0800 -#define MGC_M_TXCSR_DMAMODE 0x0400 -#define MGC_M_TXCSR_CLRDATATOG 0x0040 -#define MGC_M_TXCSR_FLUSHFIFO 0x0008 -#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002 -#define MGC_M_TXCSR_TXPKTRDY 0x0001 +#define MGC_M_TXCSR_AUTOSET 0x8000 +#define MGC_M_TXCSR_ISO 0x4000 +#define MGC_M_TXCSR_MODE 0x2000 +#define MGC_M_TXCSR_DMAENAB 0x1000 +#define MGC_M_TXCSR_FRCDATATOG 0x0800 +#define MGC_M_TXCSR_DMAMODE 0x0400 +#define MGC_M_TXCSR_CLRDATATOG 0x0040 +#define MGC_M_TXCSR_FLUSHFIFO 0x0008 +#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002 +#define MGC_M_TXCSR_TXPKTRDY 0x0001 /* TXCSR in Peripheral mode */ -#define MGC_M_TXCSR_P_INCOMPTX 0x0080 -#define MGC_M_TXCSR_P_SENTSTALL 0x0020 -#define MGC_M_TXCSR_P_SENDSTALL 0x0010 -#define MGC_M_TXCSR_P_UNDERRUN 0x0004 +#define MGC_M_TXCSR_P_INCOMPTX 0x0080 +#define MGC_M_TXCSR_P_SENTSTALL 0x0020 +#define MGC_M_TXCSR_P_SENDSTALL 0x0010 +#define MGC_M_TXCSR_P_UNDERRUN 0x0004 /* TXCSR in Host mode */ -#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200 -#define MGC_M_TXCSR_H_DATATOGGLE 0x0100 -#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080 -#define MGC_M_TXCSR_H_RXSTALL 0x0020 -#define MGC_M_TXCSR_H_ERROR 0x0004 +#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200 +#define MGC_M_TXCSR_H_DATATOGGLE 0x0100 +#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080 +#define MGC_M_TXCSR_H_RXSTALL 0x0020 +#define MGC_M_TXCSR_H_ERROR 0x0004 /* RXCSR in Peripheral and Host mode */ -#define MGC_M_RXCSR_AUTOCLEAR 0x8000 -#define MGC_M_RXCSR_DMAENAB 0x2000 -#define MGC_M_RXCSR_DISNYET 0x1000 -#define MGC_M_RXCSR_DMAMODE 0x0800 -#define MGC_M_RXCSR_INCOMPRX 0x0100 -#define MGC_M_RXCSR_CLRDATATOG 0x0080 -#define MGC_M_RXCSR_FLUSHFIFO 0x0010 -#define MGC_M_RXCSR_DATAERROR 0x0008 -#define MGC_M_RXCSR_FIFOFULL 0x0002 -#define MGC_M_RXCSR_RXPKTRDY 0x0001 +#define MGC_M_RXCSR_AUTOCLEAR 0x8000 +#define MGC_M_RXCSR_DMAENAB 0x2000 +#define MGC_M_RXCSR_DISNYET 0x1000 +#define MGC_M_RXCSR_DMAMODE 0x0800 +#define MGC_M_RXCSR_INCOMPRX 0x0100 +#define MGC_M_RXCSR_CLRDATATOG 0x0080 +#define MGC_M_RXCSR_FLUSHFIFO 0x0010 +#define MGC_M_RXCSR_DATAERROR 0x0008 +#define MGC_M_RXCSR_FIFOFULL 0x0002 +#define MGC_M_RXCSR_RXPKTRDY 0x0001 /* RXCSR in Peripheral mode */ -#define MGC_M_RXCSR_P_ISO 0x4000 -#define MGC_M_RXCSR_P_SENTSTALL 0x0040 -#define MGC_M_RXCSR_P_SENDSTALL 0x0020 -#define MGC_M_RXCSR_P_OVERRUN 0x0004 +#define MGC_M_RXCSR_P_ISO 0x4000 +#define MGC_M_RXCSR_P_SENTSTALL 0x0040 +#define MGC_M_RXCSR_P_SENDSTALL 0x0020 +#define MGC_M_RXCSR_P_OVERRUN 0x0004 /* RXCSR in Host mode */ -#define MGC_M_RXCSR_H_AUTOREQ 0x4000 -#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400 -#define MGC_M_RXCSR_H_DATATOGGLE 0x0200 -#define MGC_M_RXCSR_H_RXSTALL 0x0040 -#define MGC_M_RXCSR_H_REQPKT 0x0020 -#define MGC_M_RXCSR_H_ERROR 0x0004 +#define MGC_M_RXCSR_H_AUTOREQ 0x4000 +#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400 +#define MGC_M_RXCSR_H_DATATOGGLE 0x0200 +#define MGC_M_RXCSR_H_RXSTALL 0x0040 +#define MGC_M_RXCSR_H_REQPKT 0x0020 +#define MGC_M_RXCSR_H_ERROR 0x0004 /* HUBADDR */ -#define MGC_M_HUBADDR_MULTI_TT 0x80 +#define MGC_M_HUBADDR_MULTI_TT 0x80 /* ULPI: Added in HDRC 1.9(?) & MHDRC 1.4 */ -#define MGC_M_ULPI_VBCTL_USEEXTVBUSIND 0x02 -#define MGC_M_ULPI_VBCTL_USEEXTVBUS 0x01 -#define MGC_M_ULPI_REGCTL_INT_ENABLE 0x08 -#define MGC_M_ULPI_REGCTL_READNOTWRITE 0x04 -#define MGC_M_ULPI_REGCTL_COMPLETE 0x02 -#define MGC_M_ULPI_REGCTL_REG 0x01 +#define MGC_M_ULPI_VBCTL_USEEXTVBUSIND 0x02 +#define MGC_M_ULPI_VBCTL_USEEXTVBUS 0x01 +#define MGC_M_ULPI_REGCTL_INT_ENABLE 0x08 +#define MGC_M_ULPI_REGCTL_READNOTWRITE 0x04 +#define MGC_M_ULPI_REGCTL_COMPLETE 0x02 +#define MGC_M_ULPI_REGCTL_REG 0x01 /* #define MUSB_DEBUG */ @@ -296,7 +296,7 @@ struct MUSBEndPoint { uint8_t interval[2]; uint8_t config; uint8_t fifosize; - int timeout[2]; /* Always in microframes */ + int timeout[2]; /* Always in microframes */ uint8_t *buf[2]; int fifolen[2]; @@ -542,7 +542,7 @@ static void musb_cb_tick1(void *opaque) ep->delayed_cb[1](&ep->packey[1].p, opaque); } -#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0) +#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0) static void musb_schedule_cb(USBPort *port, USBPacket *packey) { @@ -1323,7 +1323,7 @@ static void musb_writeb(void *opaque, hwaddr addr, uint32_t value) /* Negotiate high-speed operation if MGC_M_POWER_HSENAB is set. */ if ((value & MGC_M_POWER_HSENAB) && s->port.dev->speed == USB_SPEED_HIGH) - s->power |= MGC_M_POWER_HSMODE; /* Success */ + s->power |= MGC_M_POWER_HSMODE; /* Success */ /* Restart frame counting. */ } if (value & MGC_M_POWER_SUSPENDM) { diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 8fb4e437bd..8306824961 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -58,8 +58,6 @@ struct ohci_hcca { #define ED_WBACK_OFFSET offsetof(struct ohci_ed, head) #define ED_WBACK_SIZE 4 -static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev); - /* Bitfields for the first word of an Endpoint Desciptor. */ #define OHCI_ED_FA_SHIFT 0 #define OHCI_ED_FA_MASK (0x7f<opaque; - OHCIPort *port = &s->rhport[port1->index]; - uint32_t old_state = port->ctrl; - - /* set connect status */ - port->ctrl |= OHCI_PORT_CCS | OHCI_PORT_CSC; - - /* update speed */ - if (port->port.dev->speed == USB_SPEED_LOW) { - port->ctrl |= OHCI_PORT_LSDA; - } else { - port->ctrl &= ~OHCI_PORT_LSDA; - } - - /* notify of remote-wakeup */ - if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { - ohci_set_interrupt(s, OHCI_INTR_RD); - } - - trace_usb_ohci_port_attach(port1->index); - - if (old_state != port->ctrl) { - ohci_set_interrupt(s, OHCI_INTR_RHSC); - } -} - -static void ohci_detach(USBPort *port1) -{ - OHCIState *s = port1->opaque; - OHCIPort *port = &s->rhport[port1->index]; - uint32_t old_state = port->ctrl; - - ohci_async_cancel_device(s, port1->dev); - - /* set connect status */ - if (port->ctrl & OHCI_PORT_CCS) { - port->ctrl &= ~OHCI_PORT_CCS; - port->ctrl |= OHCI_PORT_CSC; - } - /* disable port */ - if (port->ctrl & OHCI_PORT_PES) { - port->ctrl &= ~OHCI_PORT_PES; - port->ctrl |= OHCI_PORT_PESC; - } - trace_usb_ohci_port_detach(port1->index); - - if (old_state != port->ctrl) { - ohci_set_interrupt(s, OHCI_INTR_RHSC); - } -} - -static void ohci_wakeup(USBPort *port1) -{ - OHCIState *s = port1->opaque; - OHCIPort *port = &s->rhport[port1->index]; - uint32_t intr = 0; - if (port->ctrl & OHCI_PORT_PSS) { - trace_usb_ohci_port_wakeup(port1->index); - port->ctrl |= OHCI_PORT_PSSC; - port->ctrl &= ~OHCI_PORT_PSS; - intr = OHCI_INTR_RHSC; - } - /* Note that the controller can be suspended even if this port is not */ - if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { - trace_usb_ohci_remote_wakeup(s->name); - /* This is the one state transition the controller can do by itself */ - s->ctl &= ~OHCI_CTL_HCFS; - s->ctl |= OHCI_USB_RESUME; - /* In suspend mode only ResumeDetected is possible, not RHSC: - * see the OHCI spec 5.1.2.3. - */ - intr = OHCI_INTR_RD; - } - ohci_set_interrupt(s, intr); -} - -static void ohci_child_detach(USBPort *port1, USBDevice *child) -{ - OHCIState *s = port1->opaque; - - ohci_async_cancel_device(s, child); -} - static USBDevice *ohci_find_device(OHCIState *ohci, uint8_t addr) { USBDevice *dev; @@ -369,6 +281,10 @@ void ohci_stop_endpoints(OHCIState *ohci) USBDevice *dev; int i, j; + if (ohci->async_td) { + usb_cancel_packet(&ohci->usb_packet); + ohci->async_td = 0; + } for (i = 0; i < ohci->num_ports; i++) { dev = ohci->rhport[i].port.dev; if (dev && dev->attached) { @@ -398,10 +314,6 @@ static void ohci_roothub_reset(OHCIState *ohci) usb_port_reset(&port->port); } } - if (ohci->async_td) { - usb_cancel_packet(&ohci->usb_packet); - ohci->async_td = 0; - } ohci_stop_endpoints(ohci); } @@ -452,7 +364,8 @@ static inline int get_dwords(OHCIState *ohci, addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + if (dma_memory_read(ohci->as, addr, + buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) { return -1; } *buf = le32_to_cpu(*buf); @@ -471,7 +384,8 @@ static inline int put_dwords(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint32_t tmp = cpu_to_le32(*buf); - if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + if (dma_memory_write(ohci->as, addr, + &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) { return -1; } } @@ -488,7 +402,8 @@ static inline int get_words(OHCIState *ohci, addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + if (dma_memory_read(ohci->as, addr, + buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) { return -1; } *buf = le16_to_cpu(*buf); @@ -507,7 +422,8 @@ static inline int put_words(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint16_t tmp = cpu_to_le16(*buf); - if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + if (dma_memory_write(ohci->as, addr, + &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) { return -1; } } @@ -537,8 +453,8 @@ static inline int ohci_read_iso_td(OHCIState *ohci, static inline int ohci_read_hcca(OHCIState *ohci, dma_addr_t addr, struct ohci_hcca *hcca) { - return dma_memory_read(ohci->as, addr + ohci->localmem_base, - hcca, sizeof(*hcca)); + return dma_memory_read(ohci->as, addr + ohci->localmem_base, hcca, + sizeof(*hcca), MEMTXATTRS_UNSPECIFIED); } static inline int ohci_put_ed(OHCIState *ohci, @@ -572,7 +488,7 @@ static inline int ohci_put_hcca(OHCIState *ohci, return dma_memory_write(ohci->as, addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET, (char *)hcca + HCCA_WRITEBACK_OFFSET, - HCCA_WRITEBACK_SIZE); + HCCA_WRITEBACK_SIZE, MEMTXATTRS_UNSPECIFIED); } /* Read/Write the contents of a TD from/to main memory. */ @@ -586,7 +502,8 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, if (n > len) n = len; - if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } if (n == len) { @@ -595,7 +512,7 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, ptr = td->be & ~0xfffu; buf += n; if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, - len - n, dir)) { + len - n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } return 0; @@ -613,7 +530,8 @@ static int ohci_copy_iso_td(OHCIState *ohci, if (n > len) n = len; - if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } if (n == len) { @@ -622,27 +540,15 @@ static int ohci_copy_iso_td(OHCIState *ohci, ptr = end_addr & ~0xfffu; buf += n; if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, - len - n, dir)) { + len - n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } return 0; } -static void ohci_process_lists(OHCIState *ohci, int completion); - -static void ohci_async_complete_packet(USBPort *port, USBPacket *packet) -{ - OHCIState *ohci = container_of(packet, OHCIState, usb_packet); - - trace_usb_ohci_async_complete(); - ohci->async_complete = true; - ohci_process_lists(ohci, 1); -} - #define USUB(a, b) ((int16_t)((uint16_t)(a) - (uint16_t)(b))) -static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, - int completion) +static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) { int dir; size_t len = 0; @@ -652,6 +558,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, int i; USBDevice *dev; USBEndpoint *ep; + USBPacket *pkt; + uint8_t buf[8192]; + bool int_req; struct ohci_iso_td iso_td; uint32_t addr; uint16_t starting_frame; @@ -662,6 +571,11 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, addr = ed->head & OHCI_DPTR_MASK; + if (addr == 0) { + ohci_die(ohci); + return 1; + } + if (ohci_read_iso_td(ohci, addr, &iso_td)) { trace_usb_ohci_iso_td_read_failed(addr); ohci_die(ohci); @@ -786,40 +700,42 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, } else { len = end_addr - start_addr + 1; } - if (len > sizeof(ohci->usb_buf)) { - len = sizeof(ohci->usb_buf); + if (len > sizeof(buf)) { + len = sizeof(buf); } if (len && dir != OHCI_TD_DIR_IN) { - if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, + if (ohci_copy_iso_td(ohci, start_addr, end_addr, buf, len, DMA_DIRECTION_TO_DEVICE)) { ohci_die(ohci); return 1; } } - if (!completion) { - bool int_req = relative_frame_number == frame_count && - OHCI_BM(iso_td.flags, TD_DI) == 0; - dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA)); - if (dev == NULL) { - trace_usb_ohci_td_dev_error(); - return 1; - } - ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); - usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, false, int_req); - usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, len); - usb_handle_packet(dev, &ohci->usb_packet); - if (ohci->usb_packet.status == USB_RET_ASYNC) { - usb_device_flush_ep_queue(dev, ep); - return 1; - } + dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA)); + if (dev == NULL) { + trace_usb_ohci_td_dev_error(); + return 1; } - if (ohci->usb_packet.status == USB_RET_SUCCESS) { - ret = ohci->usb_packet.actual_length; + ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); + pkt = g_new0(USBPacket, 1); + usb_packet_init(pkt); + int_req = relative_frame_number == frame_count && + OHCI_BM(iso_td.flags, TD_DI) == 0; + usb_packet_setup(pkt, pid, ep, 0, addr, false, int_req); + usb_packet_addbuf(pkt, buf, len); + usb_handle_packet(dev, pkt); + if (pkt->status == USB_RET_ASYNC) { + usb_device_flush_ep_queue(dev, ep); + g_free(pkt); + return 1; + } + if (pkt->status == USB_RET_SUCCESS) { + ret = pkt->actual_length; } else { - ret = ohci->usb_packet.status; + ret = pkt->status; } + g_free(pkt); trace_usb_ohci_iso_td_so(start_offset, end_offset, start_addr, end_addr, str, len, ret); @@ -827,7 +743,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, /* Writeback */ if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) { /* IN transfer succeeded */ - if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, + if (ohci_copy_iso_td(ohci, start_addr, end_addr, buf, ret, DMA_DIRECTION_FROM_DEVICE)) { ohci_die(ohci); return 1; @@ -894,13 +810,14 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, return 1; } +#define HEX_CHAR_PER_LINE 16 + static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) { bool print16; bool printall; - const int width = 16; int i; - char tmp[3 * width + 1]; + char tmp[3 * HEX_CHAR_PER_LINE + 1]; char *p = tmp; print16 = !!trace_event_get_state_backends(TRACE_USB_OHCI_TD_PKT_SHORT); @@ -911,7 +828,7 @@ static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) } for (i = 0; ; i++) { - if (i && (!(i % width) || (i == len))) { + if (i && (!(i % HEX_CHAR_PER_LINE) || (i == len))) { if (!printall) { trace_usb_ohci_td_pkt_short(msg, tmp); break; @@ -947,6 +864,11 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) int completion; addr = ed->head & OHCI_DPTR_MASK; + if (addr == 0) { + ohci_die(ohci); + return 1; + } + /* See if this TD has already been submitted to the device. */ completion = (addr == ohci->async_td); if (completion && !ohci->async_complete) { @@ -1027,21 +949,21 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) ohci->async_td = 0; ohci->async_complete = false; } else { - if (ohci->async_td) { - /* ??? The hardware should allow one active packet per - endpoint. We only allow one active packet per controller. - This should be sufficient as long as devices respond in a - timely manner. - */ - trace_usb_ohci_td_too_many_pending(); - return 1; - } dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA)); if (dev == NULL) { trace_usb_ohci_td_dev_error(); return 1; } ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); + if (ohci->async_td) { + /* ??? The hardware should allow one active packet per + endpoint. We only allow one active packet per controller. + This should be sufficient as long as devices respond in a + timely manner. + */ + trace_usb_ohci_td_too_many_pending(ep->nr); + return 1; + } usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, !flag_r, OHCI_BM(td.flags, TD_DI) == 0); usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, pktlen); @@ -1154,7 +1076,7 @@ exit_no_retire: } /* Service an endpoint list. Returns nonzero if active TD were found. */ -static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) +static int ohci_service_ed_list(OHCIState *ohci, uint32_t head) { struct ohci_ed ed; uint32_t next_ed; @@ -1205,8 +1127,9 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) break; } else { /* Handle isochronous endpoints */ - if (ohci_service_iso_td(ohci, &ed, completion)) + if (ohci_service_iso_td(ohci, &ed)) { break; + } } } @@ -1233,20 +1156,20 @@ static void ohci_sof(OHCIState *ohci) } /* Process Control and Bulk lists. */ -static void ohci_process_lists(OHCIState *ohci, int completion) +static void ohci_process_lists(OHCIState *ohci) { if ((ohci->ctl & OHCI_CTL_CLE) && (ohci->status & OHCI_STATUS_CLF)) { if (ohci->ctrl_cur && ohci->ctrl_cur != ohci->ctrl_head) { trace_usb_ohci_process_lists(ohci->ctrl_head, ohci->ctrl_cur); } - if (!ohci_service_ed_list(ohci, ohci->ctrl_head, completion)) { + if (!ohci_service_ed_list(ohci, ohci->ctrl_head)) { ohci->ctrl_cur = 0; ohci->status &= ~OHCI_STATUS_CLF; } } if ((ohci->ctl & OHCI_CTL_BLE) && (ohci->status & OHCI_STATUS_BLF)) { - if (!ohci_service_ed_list(ohci, ohci->bulk_head, completion)) { + if (!ohci_service_ed_list(ohci, ohci->bulk_head)) { ohci->bulk_cur = 0; ohci->status &= ~OHCI_STATUS_BLF; } @@ -1270,19 +1193,15 @@ static void ohci_frame_boundary(void *opaque) int n; n = ohci->frame_number & 0x1f; - ohci_service_ed_list(ohci, le32_to_cpu(hcca.intr[n]), 0); + ohci_service_ed_list(ohci, le32_to_cpu(hcca.intr[n])); } /* Cancel all pending packets if either of the lists has been disabled. */ if (ohci->old_ctl & (~ohci->ctl) & (OHCI_CTL_BLE | OHCI_CTL_CLE)) { - if (ohci->async_td) { - usb_cancel_packet(&ohci->usb_packet); - ohci->async_td = 0; - } ohci_stop_endpoints(ohci); } ohci->old_ctl = ohci->ctl; - ohci_process_lists(ohci, 0); + ohci_process_lists(ohci); /* Stop if UnrecoverableError happened or ohci_sof will crash */ if (ohci->intr_status & OHCI_INTR_UE) { @@ -1295,6 +1214,8 @@ static void ohci_frame_boundary(void *opaque) /* Increment frame number and take care of endianness. */ ohci->frame_number = (ohci->frame_number + 1) & 0xffff; hcca.frame = cpu_to_le16(ohci->frame_number); + /* When the HC updates frame number, set pad to 0. Ref OHCI Spec 4.4.1*/ + hcca.pad = 0; if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) { if (!ohci->done) @@ -1791,8 +1712,45 @@ static void ohci_mem_write(void *opaque, } } -static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev) +static const MemoryRegionOps ohci_mem_ops = { + .read = ohci_mem_read, + .write = ohci_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* USBPortOps */ +static void ohci_attach(USBPort *port1) { + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t old_state = port->ctrl; + + /* set connect status */ + port->ctrl |= OHCI_PORT_CCS | OHCI_PORT_CSC; + + /* update speed */ + if (port->port.dev->speed == USB_SPEED_LOW) { + port->ctrl |= OHCI_PORT_LSDA; + } else { + port->ctrl &= ~OHCI_PORT_LSDA; + } + + /* notify of remote-wakeup */ + if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { + ohci_set_interrupt(s, OHCI_INTR_RD); + } + + trace_usb_ohci_port_attach(port1->index); + + if (old_state != port->ctrl) { + ohci_set_interrupt(s, OHCI_INTR_RHSC); + } +} + +static void ohci_child_detach(USBPort *port1, USBDevice *dev) +{ + OHCIState *ohci = port1->opaque; + if (ohci->async_td && usb_packet_is_inflight(&ohci->usb_packet) && ohci->usb_packet.ep->dev == dev) { @@ -1801,11 +1759,65 @@ static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev) } } -static const MemoryRegionOps ohci_mem_ops = { - .read = ohci_mem_read, - .write = ohci_mem_write, - .endianness = DEVICE_LITTLE_ENDIAN, -}; +static void ohci_detach(USBPort *port1) +{ + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t old_state = port->ctrl; + + ohci_child_detach(port1, port1->dev); + + /* set connect status */ + if (port->ctrl & OHCI_PORT_CCS) { + port->ctrl &= ~OHCI_PORT_CCS; + port->ctrl |= OHCI_PORT_CSC; + } + /* disable port */ + if (port->ctrl & OHCI_PORT_PES) { + port->ctrl &= ~OHCI_PORT_PES; + port->ctrl |= OHCI_PORT_PESC; + } + trace_usb_ohci_port_detach(port1->index); + + if (old_state != port->ctrl) { + ohci_set_interrupt(s, OHCI_INTR_RHSC); + } +} + +static void ohci_wakeup(USBPort *port1) +{ + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t intr = 0; + if (port->ctrl & OHCI_PORT_PSS) { + trace_usb_ohci_port_wakeup(port1->index); + port->ctrl |= OHCI_PORT_PSSC; + port->ctrl &= ~OHCI_PORT_PSS; + intr = OHCI_INTR_RHSC; + } + /* Note that the controller can be suspended even if this port is not */ + if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { + trace_usb_ohci_remote_wakeup(s->name); + /* This is the one state transition the controller can do by itself */ + s->ctl &= ~OHCI_CTL_HCFS; + s->ctl |= OHCI_USB_RESUME; + /* + * In suspend mode only ResumeDetected is possible, not RHSC: + * see the OHCI spec 5.1.2.3. + */ + intr = OHCI_INTR_RD; + } + ohci_set_interrupt(s, intr); +} + +static void ohci_async_complete_packet(USBPort *port, USBPacket *packet) +{ + OHCIState *ohci = container_of(packet, OHCIState, usb_packet); + + trace_usb_ohci_async_complete(); + ohci->async_complete = true; + ohci_process_lists(ohci); +} static USBPortOps ohci_port_ops = { .attach = ohci_attach, diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 0cb02a6432..d1b5657d72 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -31,6 +31,7 @@ #include "hw/usb/uhci-regs.h" #include "migration/vmstate.h" #include "hw/pci/pci.h" +#include "hw/irq.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/timer.h" @@ -290,7 +291,7 @@ static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t td_addr) static void uhci_update_irq(UHCIState *s) { - int level; + int level = 0; if (((s->status2 & 1) && (s->intr & (1 << 2))) || ((s->status2 & 2) && (s->intr & (1 << 3))) || ((s->status & UHCI_STS_USBERR) && (s->intr & (1 << 0))) || @@ -298,10 +299,8 @@ static void uhci_update_irq(UHCIState *s) (s->status & UHCI_STS_HSERR) || (s->status & UHCI_STS_HCPERR)) { level = 1; - } else { - level = 0; } - pci_set_irq(&s->dev, level); + qemu_set_irq(s->irq, level); } static void uhci_reset(DeviceState *dev) @@ -1170,9 +1169,9 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) pci_conf[PCI_CLASS_PROG] = 0x00; /* TODO: reset value should be 0. */ - pci_conf[USB_SBRN] = USB_RELEASE_1; // release number - + pci_conf[USB_SBRN] = USB_RELEASE_1; /* release number */ pci_config_set_interrupt_pin(pci_conf, u->info.irq_pin + 1); + s->irq = pci_allocate_irq(dev); if (s->masterbus) { USBPort *ports[NB_PORTS]; @@ -1285,6 +1284,9 @@ void uhci_data_class_init(ObjectClass *klass, void *data) } else { device_class_set_props(dc, uhci_properties_standalone); } + if (info->notuser) { + dc->user_creatable = false; + } u->info = *info; } diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index e61d8fcb19..c85ab7868e 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -60,7 +60,7 @@ typedef struct UHCIState { uint32_t frame_bandwidth; bool completions_only; UHCIPort ports[NB_PORTS]; - + qemu_irq irq; /* Interrupts that should be raised at the end of the current frame. */ uint32_t pending_int_mask; @@ -85,6 +85,7 @@ typedef struct UHCIInfo { uint8_t irq_pin; void (*realize)(PCIDevice *dev, Error **errp); bool unplug; + bool notuser; /* disallow user_creatable */ } UHCIInfo; void uhci_data_class_init(ObjectClass *klass, void *data); diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c index e934b1a5b1..643d4643e4 100644 --- a/hw/usb/hcd-xhci-pci.c +++ b/hw/usb/hcd-xhci-pci.c @@ -85,7 +85,7 @@ static void xhci_pci_reset(DeviceState *dev) { XHCIPciState *s = XHCI_PCI(dev); - device_legacy_reset(DEVICE(&s->xhci)); + device_cold_reset(DEVICE(&s->xhci)); } static int xhci_pci_vmstate_post_load(void *opaque, int version_id) diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c index a14e438196..faf57b4797 100644 --- a/hw/usb/hcd-xhci-sysbus.c +++ b/hw/usb/hcd-xhci-sysbus.c @@ -29,7 +29,7 @@ void xhci_sysbus_reset(DeviceState *dev) { XHCISysbusState *s = XHCI_SYSBUS(dev); - device_legacy_reset(DEVICE(&s->xhci)); + device_cold_reset(DEVICE(&s->xhci)); } static void xhci_sysbus_realize(DeviceState *dev, Error **errp) diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index e01700039b..b89b618ec2 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" +#include "qemu/log.h" #include "qemu/module.h" #include "qemu/queue.h" #include "migration/vmstate.h" @@ -462,6 +463,12 @@ static void xhci_mfwrap_timer(void *opaque) xhci_mfwrap_update(xhci); } +static void xhci_die(XHCIState *xhci) +{ + xhci->usbsts |= USBSTS_HCE; + DPRINTF("xhci: asserted controller error\n"); +} + static inline dma_addr_t xhci_addr64(uint32_t low, uint32_t high) { if (sizeof(dma_addr_t) == 4) { @@ -487,7 +494,14 @@ static inline void xhci_dma_read_u32s(XHCIState *xhci, dma_addr_t addr, assert((len % sizeof(uint32_t)) == 0); - dma_memory_read(xhci->as, addr, buf, len); + if (dma_memory_read(xhci->as, addr, buf, len, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + memset(buf, 0xff, len); + xhci_die(xhci); + return; + } for (i = 0; i < (len / sizeof(uint32_t)); i++) { buf[i] = le32_to_cpu(buf[i]); @@ -495,7 +509,7 @@ static inline void xhci_dma_read_u32s(XHCIState *xhci, dma_addr_t addr, } static inline void xhci_dma_write_u32s(XHCIState *xhci, dma_addr_t addr, - uint32_t *buf, size_t len) + const uint32_t *buf, size_t len) { int i; uint32_t tmp[5]; @@ -507,7 +521,13 @@ static inline void xhci_dma_write_u32s(XHCIState *xhci, dma_addr_t addr, for (i = 0; i < n; i++) { tmp[i] = cpu_to_le32(buf[i]); } - dma_memory_write(xhci->as, addr, tmp, len); + if (dma_memory_write(xhci->as, addr, tmp, len, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + xhci_die(xhci); + return; + } } static XHCIPort *xhci_lookup_port(XHCIState *xhci, struct USBPort *uport) @@ -592,12 +612,6 @@ static inline int xhci_running(XHCIState *xhci) return !(xhci->usbsts & USBSTS_HCH); } -static void xhci_die(XHCIState *xhci) -{ - xhci->usbsts |= USBSTS_HCE; - DPRINTF("xhci: asserted controller error\n"); -} - static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v) { XHCIInterrupter *intr = &xhci->intr[v]; @@ -618,7 +632,12 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v) ev_trb.status, ev_trb.control); addr = intr->er_start + TRB_SIZE*intr->er_ep_idx; - dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE); + if (dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + xhci_die(xhci); + } intr->er_ep_idx++; if (intr->er_ep_idx >= intr->er_size) { @@ -679,7 +698,12 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, while (1) { TRBType type; - dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE); + if (dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + return 0; + } trb->addr = ring->dequeue; trb->ccs = ring->ccs; le64_to_cpus(&trb->parameter); @@ -724,9 +748,14 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) bool control_td_set = 0; uint32_t link_cnt = 0; - while (1) { + do { TRBType type; - dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE); + if (dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + return -1; + } le64_to_cpus(&trb.parameter); le32_to_cpus(&trb.status); le32_to_cpus(&trb.control); @@ -760,7 +789,17 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) if (!control_td_set && !(trb.control & TRB_TR_CH)) { return length; } - } + + /* + * According to the xHCI spec, Transfer Ring segments should have + * a maximum size of 64 kB (see chapter "6 Data Structures") + */ + } while (length < TRB_LINK_LIMIT * 65536 / TRB_SIZE); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: exceeded maximum transfer ring size!\n", + __func__); + + return -1; } static void xhci_er_reset(XHCIState *xhci, int v) @@ -781,7 +820,14 @@ static void xhci_er_reset(XHCIState *xhci, int v) xhci_die(xhci); return; } - dma_memory_read(xhci->as, erstba, &seg, sizeof(seg)); + if (dma_memory_read(xhci->as, erstba, &seg, sizeof(seg), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + xhci_die(xhci); + return; + } + le32_to_cpus(&seg.addr_low); le32_to_cpus(&seg.addr_high); le32_to_cpus(&seg.size); @@ -974,7 +1020,9 @@ static XHCIStreamContext *xhci_find_stream(XHCIEPContext *epctx, } sctx = epctx->pstreams + streamid; } else { - FIXME("secondary streams not implemented yet"); + fprintf(stderr, "xhci: FIXME: secondary streams not implemented yet"); + *cc_error = CC_INVALID_STREAM_TYPE_ERROR; + return NULL; } if (sctx->sct == -1) { @@ -2059,7 +2107,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, assert(slotid >= 1 && slotid <= xhci->numslots); dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); - poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid); + ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &poctx, MEMTXATTRS_UNSPECIFIED); ictx = xhci_mask64(pictx); octx = xhci_mask64(poctx); @@ -2397,7 +2445,12 @@ static TRBCCode xhci_get_port_bandwidth(XHCIState *xhci, uint64_t pctx) /* TODO: actually implement real values here */ bw_ctx[0] = 0; memset(&bw_ctx[1], 80, xhci->numports); /* 80% */ - dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx)); + if (dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory write failed!\n", + __func__); + return CC_TRB_ERROR; + } return CC_SUCCESS; } @@ -2519,7 +2572,7 @@ static void xhci_process_commands(XHCIState *xhci) case CR_VENDOR_NEC_FIRMWARE_REVISION: if (xhci->nec_quirks) { event.type = 48; /* NEC reply */ - event.length = 0x3025; + event.length = 0x3034; } else { event.ccode = CC_TRB_ERROR; } @@ -3265,7 +3318,8 @@ static void xhci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, DPRINTF("%s\n", __func__); slotid = ep->dev->addr; - if (slotid == 0 || !xhci->slots[slotid-1].enabled) { + if (slotid == 0 || slotid > xhci->numslots || + !xhci->slots[slotid - 1].enabled) { DPRINTF("%s: oops, no slot for dev %d\n", __func__, ep->dev->addr); return; } @@ -3425,6 +3479,7 @@ static int usb_xhci_post_load(void *opaque, int version_id) uint32_t slot_ctx[4]; uint32_t ep_ctx[5]; int slotid, epid, state; + uint64_t addr; dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); @@ -3433,8 +3488,9 @@ static int usb_xhci_post_load(void *opaque, int version_id) if (!slot->addressed) { continue; } - slot->ctx = - xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid)); + ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &addr, MEMTXATTRS_UNSPECIFIED); + slot->ctx = xhci_mask64(addr); + xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx)); slot->uport = xhci_lookup_uport(xhci, slot_ctx); if (!slot->uport) { diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index 00f6fbb29b..176868d345 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1706,7 +1706,7 @@ static void usb_host_free_streams(USBDevice *udev, USBEndpoint **eps, /* * This is *NOT* about restoring state. We have absolutely no idea * what state the host device is in at the moment and whenever it is - * still present in the first place. Attemping to contine where we + * still present in the first place. Attempting to continue where we * left off is impossible. * * What we are going to do here is emulate a surprise removal of @@ -1801,7 +1801,7 @@ static void usb_host_class_initfn(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } -static TypeInfo usb_host_dev_info = { +static const TypeInfo usb_host_dev_info = { .name = TYPE_USB_HOST_DEVICE, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBHostDevice), @@ -1809,6 +1809,7 @@ static TypeInfo usb_host_dev_info = { .instance_init = usb_host_instance_init, }; module_obj(TYPE_USB_HOST_DEVICE); +module_kconfig(USB); static void usb_host_register_types(void) { @@ -1836,7 +1837,6 @@ static void usb_host_auto_check(void *unused) struct USBAutoFilter *f; libusb_device **devs = NULL; struct libusb_device_descriptor ddesc; - int unconnected = 0; int i, n; if (usb_host_init() != 0) { @@ -1896,9 +1896,6 @@ static void usb_host_auto_check(void *unused) libusb_free_device_list(devs, 1); QTAILQ_FOREACH(s, &hostdevs, next) { - if (s->dh == NULL) { - unconnected++; - } if (s->seen == 0) { if (s->dh) { usb_host_close(s); @@ -1907,17 +1904,6 @@ static void usb_host_auto_check(void *unused) } s->seen = 0; } - -#if 0 - if (unconnected == 0) { - /* nothing to watch */ - if (usb_auto_timer) { - timer_del(usb_auto_timer); - trace_usb_host_auto_scan_disabled(); - } - return; - } -#endif } if (!usb_vmstate) { diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c index 9c33a1640f..f350eae443 100644 --- a/hw/usb/libhw.c +++ b/hw/usb/libhw.c @@ -36,7 +36,8 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl) while (len) { dma_addr_t xlen = len; - mem = dma_memory_map(sgl->as, base, &xlen, dir); + mem = dma_memory_map(sgl->as, base, &xlen, dir, + MEMTXATTRS_UNSPECIFIED); if (!mem) { goto err; } diff --git a/hw/usb/meson.build b/hw/usb/meson.build index de853d780d..793df42e21 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -63,6 +63,11 @@ if u2f.found() softmmu_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) endif +# CanoKey +if canokey.found() + softmmu_ss.add(when: 'CONFIG_USB_CANOKEY', if_true: [canokey, files('canokey.c')]) +endif + # usb redirect if usbredir.found() usbredir_ss = ss.source_set() diff --git a/hw/usb/quirks-ftdi-ids.h b/hw/usb/quirks-ftdi-ids.h index 01aca55ca7..f3cb157d6f 100644 --- a/hw/usb/quirks-ftdi-ids.h +++ b/hw/usb/quirks-ftdi-ids.h @@ -625,9 +625,9 @@ * Definitions for Icom Inc. devices */ #define ICOM_VID 0x0C26 /* Icom vendor ID */ -/* Note: ID-1 is a communications tranceiver for HAM-radio operators */ +/* Note: ID-1 is a communications transceiver for HAM-radio operators */ #define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */ -/* Note: OPC is an Optional cable to connect an Icom Tranceiver */ +/* Note: OPC is an Optional cable to connect an Icom Transceiver */ #define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */ /* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */ #define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */ diff --git a/hw/usb/quirks-pl2303-ids.h b/hw/usb/quirks-pl2303-ids.h index 8dbdb46ffe..28dd8da61c 100644 --- a/hw/usb/quirks-pl2303-ids.h +++ b/hw/usb/quirks-pl2303-ids.h @@ -1,150 +1,150 @@ /* * Prolific PL2303 USB to serial adaptor driver header file * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * */ -#define BENQ_VENDOR_ID 0x04a5 -#define BENQ_PRODUCT_ID_S81 0x4027 +#define BENQ_VENDOR_ID 0x04a5 +#define BENQ_PRODUCT_ID_S81 0x4027 -#define PL2303_VENDOR_ID 0x067b -#define PL2303_PRODUCT_ID 0x2303 -#define PL2303_PRODUCT_ID_RSAQ2 0x04bb -#define PL2303_PRODUCT_ID_DCU11 0x1234 -#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 -#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 -#define PL2303_PRODUCT_ID_ALDIGA 0x0611 -#define PL2303_PRODUCT_ID_MMX 0x0612 -#define PL2303_PRODUCT_ID_GPRS 0x0609 -#define PL2303_PRODUCT_ID_HCR331 0x331a -#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 +#define PL2303_VENDOR_ID 0x067b +#define PL2303_PRODUCT_ID 0x2303 +#define PL2303_PRODUCT_ID_RSAQ2 0x04bb +#define PL2303_PRODUCT_ID_DCU11 0x1234 +#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 +#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 +#define PL2303_PRODUCT_ID_ALDIGA 0x0611 +#define PL2303_PRODUCT_ID_MMX 0x0612 +#define PL2303_PRODUCT_ID_GPRS 0x0609 +#define PL2303_PRODUCT_ID_HCR331 0x331a +#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 -#define ATEN_VENDOR_ID 0x0557 -#define ATEN_VENDOR_ID2 0x0547 -#define ATEN_PRODUCT_ID 0x2008 +#define ATEN_VENDOR_ID 0x0557 +#define ATEN_VENDOR_ID2 0x0547 +#define ATEN_PRODUCT_ID 0x2008 -#define IODATA_VENDOR_ID 0x04bb -#define IODATA_PRODUCT_ID 0x0a03 -#define IODATA_PRODUCT_ID_RSAQ5 0x0a0e +#define IODATA_VENDOR_ID 0x04bb +#define IODATA_PRODUCT_ID 0x0a03 +#define IODATA_PRODUCT_ID_RSAQ5 0x0a0e -#define ELCOM_VENDOR_ID 0x056e -#define ELCOM_PRODUCT_ID 0x5003 -#define ELCOM_PRODUCT_ID_UCSGT 0x5004 +#define ELCOM_VENDOR_ID 0x056e +#define ELCOM_PRODUCT_ID 0x5003 +#define ELCOM_PRODUCT_ID_UCSGT 0x5004 -#define ITEGNO_VENDOR_ID 0x0eba -#define ITEGNO_PRODUCT_ID 0x1080 -#define ITEGNO_PRODUCT_ID_2080 0x2080 +#define ITEGNO_VENDOR_ID 0x0eba +#define ITEGNO_PRODUCT_ID 0x1080 +#define ITEGNO_PRODUCT_ID_2080 0x2080 -#define MA620_VENDOR_ID 0x0df7 -#define MA620_PRODUCT_ID 0x0620 +#define MA620_VENDOR_ID 0x0df7 +#define MA620_PRODUCT_ID 0x0620 -#define RATOC_VENDOR_ID 0x0584 -#define RATOC_PRODUCT_ID 0xb000 +#define RATOC_VENDOR_ID 0x0584 +#define RATOC_PRODUCT_ID 0xb000 -#define TRIPP_VENDOR_ID 0x2478 -#define TRIPP_PRODUCT_ID 0x2008 +#define TRIPP_VENDOR_ID 0x2478 +#define TRIPP_PRODUCT_ID 0x2008 -#define RADIOSHACK_VENDOR_ID 0x1453 -#define RADIOSHACK_PRODUCT_ID 0x4026 +#define RADIOSHACK_VENDOR_ID 0x1453 +#define RADIOSHACK_PRODUCT_ID 0x4026 -#define DCU10_VENDOR_ID 0x0731 -#define DCU10_PRODUCT_ID 0x0528 +#define DCU10_VENDOR_ID 0x0731 +#define DCU10_PRODUCT_ID 0x0528 -#define SITECOM_VENDOR_ID 0x6189 -#define SITECOM_PRODUCT_ID 0x2068 +#define SITECOM_VENDOR_ID 0x6189 +#define SITECOM_PRODUCT_ID 0x2068 /* Alcatel OT535/735 USB cable */ -#define ALCATEL_VENDOR_ID 0x11f7 -#define ALCATEL_PRODUCT_ID 0x02df +#define ALCATEL_VENDOR_ID 0x11f7 +#define ALCATEL_PRODUCT_ID 0x02df /* Samsung I330 phone cradle */ -#define SAMSUNG_VENDOR_ID 0x04e8 -#define SAMSUNG_PRODUCT_ID 0x8001 +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_ID 0x8001 -#define SIEMENS_VENDOR_ID 0x11f5 -#define SIEMENS_PRODUCT_ID_SX1 0x0001 -#define SIEMENS_PRODUCT_ID_X65 0x0003 -#define SIEMENS_PRODUCT_ID_X75 0x0004 -#define SIEMENS_PRODUCT_ID_EF81 0x0005 +#define SIEMENS_VENDOR_ID 0x11f5 +#define SIEMENS_PRODUCT_ID_SX1 0x0001 +#define SIEMENS_PRODUCT_ID_X65 0x0003 +#define SIEMENS_PRODUCT_ID_X75 0x0004 +#define SIEMENS_PRODUCT_ID_EF81 0x0005 -#define SYNTECH_VENDOR_ID 0x0745 -#define SYNTECH_PRODUCT_ID 0x0001 +#define SYNTECH_VENDOR_ID 0x0745 +#define SYNTECH_PRODUCT_ID 0x0001 /* Nokia CA-42 Cable */ -#define NOKIA_CA42_VENDOR_ID 0x078b -#define NOKIA_CA42_PRODUCT_ID 0x1234 +#define NOKIA_CA42_VENDOR_ID 0x078b +#define NOKIA_CA42_PRODUCT_ID 0x1234 /* CA-42 CLONE Cable www.ca-42.com chipset: Prolific Technology Inc */ -#define CA_42_CA42_VENDOR_ID 0x10b5 -#define CA_42_CA42_PRODUCT_ID 0xac70 +#define CA_42_CA42_VENDOR_ID 0x10b5 +#define CA_42_CA42_PRODUCT_ID 0xac70 -#define SAGEM_VENDOR_ID 0x079b -#define SAGEM_PRODUCT_ID 0x0027 +#define SAGEM_VENDOR_ID 0x079b +#define SAGEM_PRODUCT_ID 0x0027 /* Leadtek GPS 9531 (ID 0413:2101) */ -#define LEADTEK_VENDOR_ID 0x0413 -#define LEADTEK_9531_PRODUCT_ID 0x2101 +#define LEADTEK_VENDOR_ID 0x0413 +#define LEADTEK_9531_PRODUCT_ID 0x2101 /* USB GSM cable from Speed Dragon Multimedia, Ltd */ -#define SPEEDDRAGON_VENDOR_ID 0x0e55 -#define SPEEDDRAGON_PRODUCT_ID 0x110b +#define SPEEDDRAGON_VENDOR_ID 0x0e55 +#define SPEEDDRAGON_PRODUCT_ID 0x110b /* DATAPILOT Universal-2 Phone Cable */ -#define DATAPILOT_U2_VENDOR_ID 0x0731 -#define DATAPILOT_U2_PRODUCT_ID 0x2003 +#define DATAPILOT_U2_VENDOR_ID 0x0731 +#define DATAPILOT_U2_PRODUCT_ID 0x2003 /* Belkin "F5U257" Serial Adapter */ -#define BELKIN_VENDOR_ID 0x050d -#define BELKIN_PRODUCT_ID 0x0257 +#define BELKIN_VENDOR_ID 0x050d +#define BELKIN_PRODUCT_ID 0x0257 /* Alcor Micro Corp. USB 2.0 TO RS-232 */ -#define ALCOR_VENDOR_ID 0x058F -#define ALCOR_PRODUCT_ID 0x9720 +#define ALCOR_VENDOR_ID 0x058F +#define ALCOR_PRODUCT_ID 0x9720 /* Willcom WS002IN Data Driver (by NetIndex Inc.) */ -#define WS002IN_VENDOR_ID 0x11f6 -#define WS002IN_PRODUCT_ID 0x2001 +#define WS002IN_VENDOR_ID 0x11f6 +#define WS002IN_PRODUCT_ID 0x2001 /* Corega CG-USBRS232R Serial Adapter */ -#define COREGA_VENDOR_ID 0x07aa -#define COREGA_PRODUCT_ID 0x002a +#define COREGA_VENDOR_ID 0x07aa +#define COREGA_PRODUCT_ID 0x002a /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ -#define YCCABLE_VENDOR_ID 0x05ad -#define YCCABLE_PRODUCT_ID 0x0fba +#define YCCABLE_VENDOR_ID 0x05ad +#define YCCABLE_PRODUCT_ID 0x0fba /* "Superial" USB - Serial */ -#define SUPERIAL_VENDOR_ID 0x5372 -#define SUPERIAL_PRODUCT_ID 0x2303 +#define SUPERIAL_VENDOR_ID 0x5372 +#define SUPERIAL_PRODUCT_ID 0x2303 /* Hewlett-Packard LD220-HP POS Pole Display */ -#define HP_VENDOR_ID 0x03f0 -#define HP_LD220_PRODUCT_ID 0x3524 +#define HP_VENDOR_ID 0x03f0 +#define HP_LD220_PRODUCT_ID 0x3524 /* Cressi Edy (diving computer) PC interface */ -#define CRESSI_VENDOR_ID 0x04b8 -#define CRESSI_EDY_PRODUCT_ID 0x0521 +#define CRESSI_VENDOR_ID 0x04b8 +#define CRESSI_EDY_PRODUCT_ID 0x0521 /* Zeagle dive computer interface */ -#define ZEAGLE_VENDOR_ID 0x04b8 -#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 +#define ZEAGLE_VENDOR_ID 0x04b8 +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 /* Sony, USB data cable for CMD-Jxx mobile phones */ -#define SONY_VENDOR_ID 0x054c -#define SONY_QN3USB_PRODUCT_ID 0x0437 +#define SONY_VENDOR_ID 0x054c +#define SONY_QN3USB_PRODUCT_ID 0x0437 /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ -#define SANWA_VENDOR_ID 0x11ad -#define SANWA_PRODUCT_ID 0x0001 +#define SANWA_VENDOR_ID 0x11ad +#define SANWA_PRODUCT_ID 0x0001 /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ -#define ADLINK_VENDOR_ID 0x0b63 -#define ADLINK_ND6530_PRODUCT_ID 0x6530 +#define ADLINK_VENDOR_ID 0x0b63 +#define ADLINK_ND6530_PRODUCT_ID 0x6530 /* SMART USB Serial Adapter */ -#define SMART_VENDOR_ID 0x0b8c -#define SMART_PRODUCT_ID 0x2303 +#define SMART_VENDOR_ID 0x0b8c +#define SMART_PRODUCT_ID 0x2303 diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index 5f0ef9cb3b..fd7df599bc 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -26,7 +26,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #include "qemu/units.h" #include "qapi/error.h" #include "qemu/timer.h" @@ -1239,7 +1239,11 @@ static void usbredir_create_parser(USBRedirDevice *dev) DPRINTF("creating usbredirparser\n"); - dev->parser = qemu_oom_check(usbredirparser_create()); + dev->parser = usbredirparser_create(); + if (!dev->parser) { + error_report("usbredirparser_create() failed"); + exit(1); + } dev->parser->priv = dev; dev->parser->log_func = usbredir_log; dev->parser->read_func = usbredir_read; @@ -2239,7 +2243,10 @@ static int usbredir_put_parser(QEMUFile *f, void *priv, size_t unused, } usbredirparser_serialize(dev->parser, &data, &len); - qemu_oom_check(data); + if (!data) { + error_report("usbredirparser_serialize failed"); + exit(1); + } qemu_put_be32(f, len); qemu_put_buffer(f, data, len); @@ -2330,7 +2337,11 @@ static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused, bufp->len = qemu_get_be32(f); bufp->status = qemu_get_be32(f); bufp->offset = 0; - bufp->data = qemu_oom_check(malloc(bufp->len)); /* regular malloc! */ + bufp->data = malloc(bufp->len); /* regular malloc! */ + if (!bufp->data) { + error_report("usbredir_get_bufpq: out of memory"); + exit(1); + } bufp->free_on_destroy = bufp->data; qemu_get_buffer(f, bufp->data, bufp->len); QTAILQ_INSERT_TAIL(&endp->bufpq, bufp, next); @@ -2609,6 +2620,7 @@ static const TypeInfo usbredir_dev_info = { .instance_init = usbredir_instance_init, }; module_obj(TYPE_USB_REDIR); +module_kconfig(USB); static void usbredir_register_types(void) { diff --git a/hw/usb/trace-events b/hw/usb/trace-events index b8287b63f1..b65269892c 100644 --- a/hw/usb/trace-events +++ b/hw/usb/trace-events @@ -51,7 +51,7 @@ usb_ohci_td_skip_async(void) "" usb_ohci_td_pkt_hdr(uint32_t addr, int64_t pktlen, int64_t len, const char *s, int flag_r, uint32_t cbp, uint32_t be) " TD @ 0x%.8x %" PRId64 " of %" PRId64 " bytes %s r=%d cbp=0x%.8x be=0x%.8x" usb_ohci_td_pkt_short(const char *dir, const char *buf) "%s data: %s" usb_ohci_td_pkt_full(const char *dir, const char *buf) "%s data: %s" -usb_ohci_td_too_many_pending(void) "" +usb_ohci_td_too_many_pending(int ep) "ep=%d" usb_ohci_td_packet_status(int status) "status=%d" usb_ohci_ed_read_error(uint32_t addr) "ED read error at 0x%x" usb_ohci_ed_pkt(uint32_t cur, int h, int c, uint32_t head, uint32_t tail, uint32_t next) "ED @ 0x%.8x h=%u c=%u\n head=0x%.8x tailp=0x%.8x next=0x%.8x" @@ -263,6 +263,7 @@ usb_msd_packet_complete(void) "" usb_msd_cmd_submit(unsigned lun, unsigned tag, unsigned flags, unsigned len, unsigned data_len) "lun %u, tag 0x%x, flags 0x%08x, len %d, data-len %d" usb_msd_cmd_complete(unsigned status, unsigned tag) "status %d, tag 0x%x" usb_msd_cmd_cancel(unsigned tag) "tag 0x%x" +usb_msd_fatal_error(void) "" # dev-uas.c usb_uas_reset(int addr) "dev %d" @@ -345,3 +346,19 @@ usb_serial_set_baud(int bus, int addr, int baud) "dev %d:%u baud rate %d" usb_serial_set_data(int bus, int addr, int parity, int data, int stop) "dev %d:%u parity %c, data bits %d, stop bits %d" usb_serial_set_flow_control(int bus, int addr, int index) "dev %d:%u flow control %d" usb_serial_set_xonxoff(int bus, int addr, uint8_t xon, uint8_t xoff) "dev %d:%u xon 0x%x xoff 0x%x" + +# canokey.c +canokey_emu_stall_ep(uint8_t ep) "ep %d" +canokey_emu_set_address(uint8_t addr) "addr %d" +canokey_emu_prepare_receive(uint8_t ep, uint16_t size) "ep %d size %d" +canokey_emu_transmit(uint8_t ep, uint16_t size) "ep %d size %d" +canokey_thread_start(void) +canokey_thread_stop(void) +canokey_handle_reset(void) +canokey_handle_control_setup(int request, int value, int index, int length) "request 0x%04X value 0x%04X index 0x%04X length 0x%04X" +canokey_handle_control_out(void) +canokey_handle_control_in(int actual_len) "len %d" +canokey_handle_data_out(uint8_t ep_out, uint32_t out_len) "ep %d len %d" +canokey_handle_data_in(uint8_t ep_in, uint32_t in_len) "ep %d len %d" +canokey_realize(void) +canokey_unrealize(void) diff --git a/hw/usb/u2f-emulated.c b/hw/usb/u2f-emulated.c index 9151feb63d..63cceaa5fc 100644 --- a/hw/usb/u2f-emulated.c +++ b/hw/usb/u2f-emulated.c @@ -307,7 +307,7 @@ static void u2f_emulated_realize(U2FKeyState *base, Error **errp) rc = u2f_emulated_setup_vdev_manualy(key); } else { error_setg(errp, "%s: cert, priv, entropy and counter " - "parameters must be provided to manualy configure " + "parameters must be provided to manually configure " "the emulated device", TYPE_U2F_EMULATED); return; } diff --git a/hw/usb/u2f.h b/hw/usb/u2f.h index db30f3586b..a408a82927 100644 --- a/hw/usb/u2f.h +++ b/hw/usb/u2f.h @@ -74,7 +74,7 @@ typedef struct U2FKeyState { /* * API to be used by the U2F key device variants (i.e. hw/u2f-*.c) - * to interact with the the U2F key base device (i.e. hw/u2f.c) + * to interact with the U2F key base device (i.e. hw/u2f.c) */ void u2f_send_to_guest(U2FKeyState *key, const uint8_t packet[U2FHID_PACKET_SIZE]); diff --git a/hw/usb/vt82c686-uhci-pci.c b/hw/usb/vt82c686-uhci-pci.c index b109c21603..46a901f56f 100644 --- a/hw/usb/vt82c686-uhci-pci.c +++ b/hw/usb/vt82c686-uhci-pci.c @@ -1,6 +1,17 @@ #include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/isa/vt82c686.h" #include "hcd-uhci.h" +static void uhci_isa_set_irq(void *opaque, int irq_num, int level) +{ + UHCIState *s = opaque; + uint8_t irq = pci_get_byte(s->dev.config + PCI_INTERRUPT_LINE); + if (irq > 0 && irq < 15) { + via_isa_set_irq(pci_get_function_0(&s->dev), irq, level); + } +} + static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) { UHCIState *s = UHCI(dev); @@ -14,23 +25,27 @@ static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) pci_set_long(pci_conf + 0xc0, 0x00002000); usb_uhci_common_realize(dev, errp); + object_unref(s->irq); + s->irq = qemu_allocate_irq(uhci_isa_set_irq, s, 0); } static UHCIInfo uhci_info[] = { { - .name = "vt82c686b-usb-uhci", + .name = TYPE_VT82C686B_USB_UHCI, .vendor_id = PCI_VENDOR_ID_VIA, .device_id = PCI_DEVICE_ID_VIA_UHCI, .revision = 0x01, .irq_pin = 3, .realize = usb_uhci_vt82c686b_realize, .unplug = true, + /* Reason: only works as USB function of VT82xx superio chips */ + .notuser = true, } }; static const TypeInfo vt82c686b_usb_uhci_type_info = { .parent = TYPE_UHCI, - .name = "vt82c686b-usb-uhci", + .name = TYPE_VT82C686B_USB_UHCI, .class_init = uhci_data_class_init, .class_data = uhci_info, }; diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 000992fb9f..0354737666 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -199,7 +199,7 @@ again: case 0: case -ENODEV: case -EACCES: - return 0; + return ret; case -EFAULT: default: sch_gen_unit_exception(sch); @@ -240,7 +240,7 @@ again: case -EBUSY: case -ENODEV: case -EACCES: - return 0; + return ret; case -EFAULT: default: sch_gen_unit_exception(sch); diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 8728d4d5c2..130e5d1dc7 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -40,6 +40,7 @@ #include "trace.h" #include "qapi/error.h" #include "migration/migration.h" +#include "sysemu/tpm.h" VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); @@ -354,7 +355,7 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) } if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) - && (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + && (migration->device_state & VFIO_DEVICE_STATE_V1_RUNNING)) { return false; } } @@ -380,8 +381,8 @@ static bool vfio_devices_all_running_and_saving(VFIOContainer *container) return false; } - if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && - (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + if ((migration->device_state & VFIO_DEVICE_STATE_V1_SAVING) && + (migration->device_state & VFIO_DEVICE_STATE_V1_RUNNING)) { continue; } else { return false; @@ -397,7 +398,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, { struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_bitmap *bitmap; - uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size; + uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size(); int ret; unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); @@ -414,7 +415,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, * to qemu_real_host_page_size. */ - bitmap->pgsize = qemu_real_host_page_size; + bitmap->pgsize = qemu_real_host_page_size(); bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / BITS_PER_BYTE; @@ -551,6 +552,7 @@ static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); return 0; } } @@ -562,6 +564,7 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) { return (!memory_region_is_ram(section->mr) && !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || /* * Sizing an enabled 64-bit BAR can cause spurious mappings to * addresses in the upper part of the 64-bit address space. These @@ -575,45 +578,11 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, ram_addr_t *ram_addr, bool *read_only) { - MemoryRegion *mr; - hwaddr xlat; - hwaddr len = iotlb->addr_mask + 1; - bool writable = iotlb->perm & IOMMU_WO; - - /* - * The IOMMU TLB entry we have just covers translation through - * this IOMMU to its immediate target. We need to translate - * it the rest of the way through to memory. - */ - mr = address_space_translate(&address_space_memory, - iotlb->translated_addr, - &xlat, &len, writable, - MEMTXATTRS_UNSPECIFIED); - if (!memory_region_is_ram(mr)) { - error_report("iommu map to non memory area %"HWADDR_PRIx"", - xlat); - return false; - } else if (memory_region_has_ram_discard_manager(mr)) { - RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); - MemoryRegionSection tmp = { - .mr = mr, - .offset_within_region = xlat, - .size = int128_make64(len), - }; - - /* - * Malicious VMs can map memory into the IOMMU, which is expected - * to remain discarded. vfio will pin all pages, populating memory. - * Disallow that. vmstate priorities make sure any RamDiscardManager - * were already restored before IOMMUs are restored. - */ - if (!ram_discard_manager_is_populated(rdm, &tmp)) { - error_report("iommu map to discarded memory (e.g., unplugged via" - " virtio-mem): %"HWADDR_PRIx"", - iotlb->translated_addr); - return false; - } + bool ret, mr_has_discard_manager; + ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only, + &mr_has_discard_manager); + if (ret && mr_has_discard_manager) { /* * Malicious VMs might trigger discarding of IOMMU-mapped memory. The * pages will remain pinned inside vfio until unmapped, resulting in a @@ -632,29 +601,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, " intended via an IOMMU. It's possible to mitigate " " by setting/adjusting RLIMIT_MEMLOCK."); } - - /* - * Translation truncates length to the IOMMU page size, - * check that it did not truncate too much. - */ - if (len & iotlb->addr_mask) { - error_report("iommu has granularity incompatible with target AS"); - return false; - } - - if (vaddr) { - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - } - - if (ram_addr) { - *ram_addr = memory_region_get_ram_addr(mr) + xlat; - } - - if (read_only) { - *read_only = !writable || mr->readonly; - } - - return true; + return ret; } static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) @@ -859,6 +806,22 @@ static void vfio_unregister_ram_discard_listener(VFIOContainer *container, g_free(vrdl); } +static bool vfio_known_safe_misalignment(MemoryRegionSection *section) +{ + MemoryRegion *mr = section->mr; + + if (!TPM_IS_CRB(mr->owner)) { + return false; + } + + /* this is a known safe misaligned region, just trace for debug purpose */ + trace_vfio_known_safe_misalignment(memory_region_name(mr), + section->offset_within_address_space, + section->offset_within_region, + qemu_real_host_page_size()); + return true; +} + static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -880,18 +843,33 @@ static void vfio_listener_region_add(MemoryListener *listener, } if (unlikely((section->offset_within_address_space & - ~qemu_real_host_page_mask) != - (section->offset_within_region & ~qemu_real_host_page_mask))) { - error_report("%s received unaligned region", __func__); + ~qemu_real_host_page_mask()) != + (section->offset_within_region & ~qemu_real_host_page_mask()))) { + if (!vfio_known_safe_misalignment(section)) { + error_report("%s received unaligned region %s iova=0x%"PRIx64 + " offset_within_region=0x%"PRIx64 + " qemu_real_host_page_size=0x%"PRIxPTR, + __func__, memory_region_name(section->mr), + section->offset_within_address_space, + section->offset_within_region, + qemu_real_host_page_size()); + } return; } iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space); llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); + llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); if (int128_ge(int128_make64(iova), llend)) { + if (memory_region_is_ram_device(section->mr)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + qemu_real_host_page_size()); + } return; } end = int128_get64(int128_sub(llend, int128_one())); @@ -983,7 +961,7 @@ static void vfio_listener_region_add(MemoryListener *listener, * device emulation the VFIO iommu handles to use). */ giommu = g_malloc0(sizeof(*giommu)); - giommu->iommu = iommu_mr; + giommu->iommu_mr = iommu_mr; giommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; giommu->container = container; @@ -998,7 +976,7 @@ static void vfio_listener_region_add(MemoryListener *listener, int128_get64(llend), iommu_idx); - ret = memory_region_iommu_set_page_size_mask(giommu->iommu, + ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr, container->pgsizes, &err); if (ret) { @@ -1013,7 +991,7 @@ static void vfio_listener_region_add(MemoryListener *listener, goto fail; } QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); - memory_region_iommu_replay(giommu->iommu, &giommu->n); + memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); return; } @@ -1109,9 +1087,17 @@ static void vfio_listener_region_del(MemoryListener *listener, } if (unlikely((section->offset_within_address_space & - ~qemu_real_host_page_mask) != - (section->offset_within_region & ~qemu_real_host_page_mask))) { - error_report("%s received unaligned region", __func__); + ~qemu_real_host_page_mask()) != + (section->offset_within_region & ~qemu_real_host_page_mask()))) { + if (!vfio_known_safe_misalignment(section)) { + error_report("%s received unaligned region %s iova=0x%"PRIx64 + " offset_within_region=0x%"PRIx64 + " qemu_real_host_page_size=0x%"PRIxPTR, + __func__, memory_region_name(section->mr), + section->offset_within_address_space, + section->offset_within_region, + qemu_real_host_page_size()); + } return; } @@ -1119,7 +1105,7 @@ static void vfio_listener_region_del(MemoryListener *listener, VFIOGuestIOMMU *giommu; QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { - if (MEMORY_REGION(giommu->iommu) == section->mr && + if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { memory_region_unregister_iommu_notifier(section->mr, &giommu->n); @@ -1141,7 +1127,7 @@ static void vfio_listener_region_del(MemoryListener *listener, iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space); llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); + llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); if (int128_ge(int128_make64(iova), llend)) { return; @@ -1263,9 +1249,9 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize * to qemu_real_host_page_size. */ - range->bitmap.pgsize = qemu_real_host_page_size; + range->bitmap.pgsize = qemu_real_host_page_size(); - pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size; + pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size(); range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / BITS_PER_BYTE; range->bitmap.data = g_try_malloc0(range->bitmap.size); @@ -1384,11 +1370,11 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, VFIOGuestIOMMU *giommu; QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { - if (MEMORY_REGION(giommu->iommu) == section->mr && + if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { Int128 llend; vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; - int idx = memory_region_iommu_attrs_to_index(giommu->iommu, + int idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr, MEMTXATTRS_UNSPECIFIED); llend = int128_add(int128_make64(section->offset_within_region), @@ -1401,7 +1387,7 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, section->offset_within_region, int128_get64(llend), idx); - memory_region_iommu_replay(giommu->iommu, &gdn.n); + memory_region_iommu_replay(giommu->iommu_mr, &gdn.n); break; } } @@ -1434,6 +1420,7 @@ static void vfio_listener_log_sync(MemoryListener *listener, } static const MemoryListener vfio_memory_listener = { + .name = "vfio", .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, .log_global_start = vfio_listener_log_global_start, @@ -1534,11 +1521,10 @@ static int vfio_setup_region_sparse_mmaps(VFIORegion *region, region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); for (i = 0, j = 0; i < sparse->nr_areas; i++) { - trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, - sparse->areas[i].offset + - sparse->areas[i].size); - if (sparse->areas[i].size) { + trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, + sparse->areas[i].offset + + sparse->areas[i].size - 1); region->mmaps[j].offset = sparse->areas[i].offset; region->mmaps[j].size = sparse->areas[i].size; j++; @@ -1960,7 +1946,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container, * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of * qemu_real_host_page_size to mark those dirty. */ - if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) { + if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { container->dirty_pages_supported = true; container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; container->dirty_pgsizes = cap_mig->pgsize_bitmap; @@ -2069,29 +2055,31 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, { struct vfio_iommu_type1_info *info; - /* - * FIXME: This assumes that a Type1 IOMMU can map any 64-bit - * IOVA whatsoever. That's not actually true, but the current - * kernel interface doesn't tell us what it can map, and the - * existing Type1 IOMMUs generally support any IOVA we're - * going to actually try in practice. - */ ret = vfio_get_iommu_info(container, &info); - - if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { - /* Assume 4k IOVA page size */ - info->iova_pgsizes = 4096; + if (ret) { + error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); + goto enable_discards_exit; } - vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); - container->pgsizes = info->iova_pgsizes; - /* The default in the kernel ("dma_entry_limit") is 65535. */ - container->dma_max_mappings = 65535; - if (!ret) { - vfio_get_info_dma_avail(info, &container->dma_max_mappings); - vfio_get_iommu_info_migration(container, info); + if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { + container->pgsizes = info->iova_pgsizes; + } else { + container->pgsizes = qemu_real_host_page_size(); } + + if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) { + container->dma_max_mappings = 65535; + } + vfio_get_iommu_info_migration(container, info); g_free(info); + + /* + * FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE + * information to get the actual window extent rather than assume + * a 64-bit IOVA address space. + */ + vfio_host_win_add(container, 0, (hwaddr)-1, container->pgsizes); + break; } case VFIO_SPAPR_TCE_v2_IOMMU: @@ -2230,16 +2218,23 @@ static void vfio_disconnect_container(VFIOGroup *group) if (QLIST_EMPTY(&container->group_list)) { VFIOAddressSpace *space = container->space; VFIOGuestIOMMU *giommu, *tmp; + VFIOHostDMAWindow *hostwin, *next; QLIST_REMOVE(container, next); QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { memory_region_unregister_iommu_notifier( - MEMORY_REGION(giommu->iommu), &giommu->n); + MEMORY_REGION(giommu->iommu_mr), &giommu->n); QLIST_REMOVE(giommu, giommu_next); g_free(giommu); } + QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next, + next) { + QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); + } + trace_vfio_disconnect_container(container->fd); close(container->fd); g_free(container); diff --git a/hw/vfio/display.c b/hw/vfio/display.c index 89bc90508f..78f4d82c1c 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -106,14 +106,14 @@ err: return; } -static int vfio_display_edid_ui_info(void *opaque, uint32_t idx, - QemuUIInfo *info) +static void vfio_display_edid_ui_info(void *opaque, uint32_t idx, + QemuUIInfo *info) { VFIOPCIDevice *vdev = opaque; VFIODisplay *dpy = vdev->dpy; if (!dpy->edid_regs) { - return 0; + return; } if (info->width && info->height) { @@ -121,8 +121,6 @@ static int vfio_display_edid_ui_info(void *opaque, uint32_t idx, } else { vfio_display_edid_update(vdev, false, 0, 0); } - - return 0; } static void vfio_display_edid_init(VFIOPCIDevice *vdev) diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c index 470205f487..afe3fe7efc 100644 --- a/hw/vfio/igd.c +++ b/hw/vfio/igd.c @@ -199,7 +199,7 @@ static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_BRIDGE_ISA; } -static TypeInfo vfio_pci_igd_lpc_bridge_info = { +static const TypeInfo vfio_pci_igd_lpc_bridge_info = { .name = "vfio-pci-igd-lpc-bridge", .parent = TYPE_PCI_DEVICE, .class_init = vfio_pci_igd_lpc_bridge_class_init, @@ -557,7 +557,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr) * must allocate a 1MB aligned reserved memory region below 4GB with * the requested size (in bytes) for use by the Intel PCI class VGA * device at VM address 00:02.0. The base address of this reserved - * memory region must be written to the device BDSM regsiter at PCI + * memory region must be written to the device BDSM register at PCI * config offset 0x5C. */ bdsm_size = g_malloc(sizeof(*bdsm_size)); diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 82f654afb6..c74453e0b5 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -432,7 +432,7 @@ static int vfio_save_setup(QEMUFile *f, void *opaque) } ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_MASK, - VFIO_DEVICE_STATE_SAVING); + VFIO_DEVICE_STATE_V1_SAVING); if (ret) { error_report("%s: Failed to set state SAVING", vbasedev->name); return ret; @@ -531,8 +531,8 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) uint64_t data_size; int ret; - ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING, - VFIO_DEVICE_STATE_SAVING); + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_V1_RUNNING, + VFIO_DEVICE_STATE_V1_SAVING); if (ret) { error_report("%s: Failed to set state STOP and SAVING", vbasedev->name); @@ -569,7 +569,7 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) return ret; } - ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0); + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_V1_SAVING, 0); if (ret) { error_report("%s: Failed to set state STOPPED", vbasedev->name); return ret; @@ -609,7 +609,7 @@ static int vfio_load_setup(QEMUFile *f, void *opaque) } ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_MASK, - VFIO_DEVICE_STATE_RESUMING); + VFIO_DEVICE_STATE_V1_RESUMING); if (ret) { error_report("%s: Failed to set state RESUMING", vbasedev->name); if (migration->region.mmaps) { @@ -717,20 +717,20 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state) * In both the above cases, set _RUNNING bit. */ mask = ~VFIO_DEVICE_STATE_MASK; - value = VFIO_DEVICE_STATE_RUNNING; + value = VFIO_DEVICE_STATE_V1_RUNNING; } else { /* * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset * _RUNNING bit */ - mask = ~VFIO_DEVICE_STATE_RUNNING; + mask = ~VFIO_DEVICE_STATE_V1_RUNNING; /* * When VM state transition to stop for savevm command, device should * start saving data. */ if (state == RUN_STATE_SAVE_VM) { - value = VFIO_DEVICE_STATE_SAVING; + value = VFIO_DEVICE_STATE_V1_SAVING; } else { value = 0; } @@ -768,8 +768,9 @@ static void vfio_migration_state_notifier(Notifier *notifier, void *data) case MIGRATION_STATUS_FAILED: bytes_transferred = 0; ret = vfio_migration_set_state(vbasedev, - ~(VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RESUMING), - VFIO_DEVICE_STATE_RUNNING); + ~(VFIO_DEVICE_STATE_V1_SAVING | + VFIO_DEVICE_STATE_V1_RESUMING), + VFIO_DEVICE_STATE_V1_RUNNING); if (ret) { error_report("%s: Failed to set state RUNNING", vbasedev->name); } @@ -805,6 +806,8 @@ static int vfio_migration_init(VFIODevice *vbasedev, } vbasedev->migration = g_new0(VFIOMigration, 1); + vbasedev->migration->device_state = VFIO_DEVICE_STATE_V1_RUNNING; + vbasedev->migration->vm_running = runstate_is_running(); ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region, info->index, "migration"); @@ -858,15 +861,16 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp) { VFIOContainer *container = vbasedev->group->container; struct vfio_region_info *info = NULL; - Error *local_err = NULL; int ret = -ENOTSUP; if (!vbasedev->enable_migration || !container->dirty_pages_supported) { goto add_blocker; } - ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_MIGRATION, - VFIO_REGION_SUBTYPE_MIGRATION, &info); + ret = vfio_get_dev_region_info(vbasedev, + VFIO_REGION_TYPE_MIGRATION_DEPRECATED, + VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED, + &info); if (ret) { goto add_blocker; } @@ -885,9 +889,8 @@ add_blocker: "VFIO device doesn't support migration"); g_free(info); - ret = migrate_add_blocker(vbasedev->migration_blocker, &local_err); - if (local_err) { - error_propagate(errp, local_err); + ret = migrate_add_blocker(vbasedev->migration_blocker, errp); + if (ret < 0) { error_free(vbasedev->migration_blocker); vbasedev->migration_blocker = NULL; } diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index e21a6ede11..f0147a050a 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1356,7 +1356,7 @@ static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev) /* * The scope of a config reset is controlled by a mode bit in the misc register * and a fuse, exposed as a bit in another register. The fuse is the default - * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula + * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the formula * scope = !(misc ^ fuse), where the resulting scope is defined the same as * the fuse. A truth table therefore tells us that if misc == fuse, we need * to flip the value of the bit in the misc register. @@ -1565,22 +1565,6 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) return 0; } -static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v, - const char *name, - void *opaque, Error **errp) -{ - uint64_t tgt = (uintptr_t) opaque; - visit_type_uint64(v, name, &tgt, errp); -} - -static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v, - const char *name, - void *opaque, Error **errp) -{ - uint32_t link_speed = (uint32_t)(uintptr_t) opaque; - visit_type_uint32(v, name, &link_speed, errp); -} - int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) { int ret; @@ -1618,9 +1602,9 @@ int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) nv2reg->size, p); QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); - object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", - vfio_pci_nvlink2_get_tgt, NULL, NULL, - (void *) (uintptr_t) cap->tgt); + object_property_add_uint64_ptr(OBJECT(vdev), "nvlink2-tgt", + (uint64_t *) &cap->tgt, + OBJ_PROP_FLAG_READ); trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt, nv2reg->size); free_exit: @@ -1679,15 +1663,15 @@ int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp) QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); } - object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", - vfio_pci_nvlink2_get_tgt, NULL, NULL, - (void *) (uintptr_t) captgt->tgt); + object_property_add_uint64_ptr(OBJECT(vdev), "nvlink2-tgt", + (uint64_t *) &captgt->tgt, + OBJ_PROP_FLAG_READ); trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt, atsdreg->size); - object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32", - vfio_pci_nvlink2_get_link_speed, NULL, NULL, - (void *) (uintptr_t) capspeed->link_speed); + object_property_add_uint32_ptr(OBJECT(vdev), "nvlink2-link-speed", + &capspeed->link_speed, + OBJ_PROP_FLAG_READ); trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name, capspeed->link_speed); free_exit: diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index e1ea1d8a23..92a45de4c3 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -29,10 +29,10 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" +#include "qapi/qmp/qdict.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "qemu/option.h" #include "qemu/range.h" #include "qemu/units.h" #include "sysemu/kvm.h" @@ -45,8 +45,12 @@ #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" +/* Protected by BQL */ +static KVMRouteChange vfio_route_change; + static void vfio_disable_interrupts(VFIOPCIDevice *vdev); static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled); +static void vfio_msi_disable_common(VFIOPCIDevice *vdev); /* * Disabling BAR mmaping can be slow, but toggling it around INTx can @@ -412,30 +416,36 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, int vector_n, bool msix) { - int virq; - if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { return; } - if (event_notifier_init(&vector->kvm_interrupt, 0)) { + vector->virq = kvm_irqchip_add_msi_route(&vfio_route_change, + vector_n, &vdev->pdev); +} + +static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector) +{ + if (vector->virq < 0) { return; } - virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev); - if (virq < 0) { - event_notifier_cleanup(&vector->kvm_interrupt); - return; + if (event_notifier_init(&vector->kvm_interrupt, 0)) { + goto fail_notifier; } if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, - NULL, virq) < 0) { - kvm_irqchip_release_virq(kvm_state, virq); - event_notifier_cleanup(&vector->kvm_interrupt); - return; + NULL, vector->virq) < 0) { + goto fail_kvm; } - vector->virq = virq; + return; + +fail_kvm: + event_notifier_cleanup(&vector->kvm_interrupt); +fail_notifier: + kvm_irqchip_release_virq(kvm_state, vector->virq); + vector->virq = -1; } static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) @@ -490,7 +500,14 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, } } else { if (msg) { - vfio_add_kvm_msi_virq(vdev, vector, nr, true); + if (vdev->defer_kvm_irq_routing) { + vfio_add_kvm_msi_virq(vdev, vector, nr, true); + } else { + vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state); + vfio_add_kvm_msi_virq(vdev, vector, nr, true); + kvm_irqchip_commit_route_changes(&vfio_route_change); + vfio_connect_kvm_msi_virq(vector); + } } } @@ -500,11 +517,13 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, * increase them as needed. */ if (vdev->nr_vectors < nr + 1) { - vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); vdev->nr_vectors = nr + 1; - ret = vfio_enable_vectors(vdev, true); - if (ret) { - error_report("vfio: failed to enable vectors, %d", ret); + if (!vdev->defer_kvm_irq_routing) { + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); + ret = vfio_enable_vectors(vdev, true); + if (ret) { + error_report("vfio: failed to enable vectors, %d", ret); + } } } else { Error *err = NULL; @@ -566,11 +585,29 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) } } +static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev) +{ + assert(!vdev->defer_kvm_irq_routing); + vdev->defer_kvm_irq_routing = true; + vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state); +} + +static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) +{ + int i; + + assert(vdev->defer_kvm_irq_routing); + vdev->defer_kvm_irq_routing = false; + + kvm_irqchip_commit_route_changes(&vfio_route_change); + + for (i = 0; i < vdev->nr_vectors; i++) { + vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]); + } +} + static void vfio_msix_enable(VFIOPCIDevice *vdev) { - PCIDevice *pdev = &vdev->pdev; - unsigned int nr, max_vec = 0; - vfio_disable_interrupts(vdev); vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries); @@ -578,37 +615,45 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev) vdev->interrupt = VFIO_INT_MSIX; /* - * Some communication channels between VF & PF or PF & fw rely on the - * physical state of the device and expect that enabling MSI-X from the - * guest enables the same on the host. When our guest is Linux, the - * guest driver call to pci_enable_msix() sets the enabling bit in the - * MSI-X capability, but leaves the vector table masked. We therefore - * can't rely on a vector_use callback (from request_irq() in the guest) - * to switch the physical device into MSI-X mode because that may come a - * long time after pci_enable_msix(). This code enables vector 0 with - * triggering to userspace, then immediately release the vector, leaving - * the physical device with no vectors enabled, but MSI-X enabled, just - * like the guest view. - * If there are already unmasked vectors (in migration resume phase and - * some guest startups) which will be enabled soon, we can allocate all - * of them here to avoid inefficiently disabling and enabling vectors - * repeatedly later. + * Setting vector notifiers triggers synchronous vector-use + * callbacks for each active vector. Deferring to commit the KVM + * routes once rather than per vector provides a substantial + * performance improvement. */ - if (!pdev->msix_function_masked) { - for (nr = 0; nr < msix_nr_vectors_allocated(pdev); nr++) { - if (!msix_is_masked(pdev, nr)) { - max_vec = nr; - } - } - } - vfio_msix_vector_do_use(pdev, max_vec, NULL, NULL); - vfio_msix_vector_release(pdev, max_vec); + vfio_prepare_kvm_msi_virq_batch(vdev); - if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use, + if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, vfio_msix_vector_release, NULL)) { error_report("vfio: msix_set_vector_notifiers failed"); } + vfio_commit_kvm_msi_virq_batch(vdev); + + if (vdev->nr_vectors) { + int ret; + + ret = vfio_enable_vectors(vdev, true); + if (ret) { + error_report("vfio: failed to enable vectors, %d", ret); + } + } else { + /* + * Some communication channels between VF & PF or PF & fw rely on the + * physical state of the device and expect that enabling MSI-X from the + * guest enables the same on the host. When our guest is Linux, the + * guest driver call to pci_enable_msix() sets the enabling bit in the + * MSI-X capability, but leaves the vector table masked. We therefore + * can't rely on a vector_use callback (from request_irq() in the guest) + * to switch the physical device into MSI-X mode because that may come a + * long time after pci_enable_msix(). This code enables vector 0 with + * triggering to userspace, then immediately release the vector, leaving + * the physical device with no vectors enabled, but MSI-X enabled, just + * like the guest view. + */ + vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); + vfio_msix_vector_release(&vdev->pdev, 0); + } + trace_vfio_msix_enable(vdev->vbasedev.name); } @@ -620,6 +665,13 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); retry: + /* + * Setting vector notifiers needs to enable route for each vector. + * Deferring to commit the KVM routes once rather than per vector + * provides a substantial performance improvement. + */ + vfio_prepare_kvm_msi_virq_batch(vdev); + vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); for (i = 0; i < vdev->nr_vectors; i++) { @@ -643,6 +695,8 @@ retry: vfio_add_kvm_msi_virq(vdev, vector, i, false); } + vfio_commit_kvm_msi_virq_batch(vdev); + /* Set interrupt type prior to possible interrupts */ vdev->interrupt = VFIO_INT_MSI; @@ -650,29 +704,17 @@ retry: if (ret) { if (ret < 0) { error_report("vfio: Error: Failed to setup MSI fds: %m"); - } else if (ret != vdev->nr_vectors) { + } else { error_report("vfio: Error: Failed to enable %d " "MSI vectors, retry with %d", vdev->nr_vectors, ret); } - for (i = 0; i < vdev->nr_vectors; i++) { - VFIOMSIVector *vector = &vdev->msi_vectors[i]; - if (vector->virq >= 0) { - vfio_remove_kvm_msi_virq(vector); - } - qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), - NULL, NULL, NULL); - event_notifier_cleanup(&vector->interrupt); - } + vfio_msi_disable_common(vdev); - g_free(vdev->msi_vectors); - vdev->msi_vectors = NULL; - - if (ret > 0 && ret != vdev->nr_vectors) { + if (ret > 0) { vdev->nr_vectors = ret; goto retry; } - vdev->nr_vectors = 0; /* * Failing to setup MSI doesn't really fall within any specification. @@ -680,7 +722,6 @@ retry: * out to fall back to INTx for this device. */ error_report("vfio: Error: Failed to enable MSI"); - vdev->interrupt = VFIO_INT_NONE; return; } @@ -690,7 +731,6 @@ retry: static void vfio_msi_disable_common(VFIOPCIDevice *vdev) { - Error *err = NULL; int i; for (i = 0; i < vdev->nr_vectors; i++) { @@ -709,15 +749,11 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev) vdev->msi_vectors = NULL; vdev->nr_vectors = 0; vdev->interrupt = VFIO_INT_NONE; - - vfio_intx_enable(vdev, &err); - if (err) { - error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); - } } static void vfio_msix_disable(VFIOPCIDevice *vdev) { + Error *err = NULL; int i; msix_unset_vector_notifiers(&vdev->pdev); @@ -738,6 +774,10 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev) } vfio_msi_disable_common(vdev); + vfio_intx_enable(vdev, &err); + if (err) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } memset(vdev->msix->pending, 0, BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long)); @@ -747,8 +787,14 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev) static void vfio_msi_disable(VFIOPCIDevice *vdev) { + Error *err = NULL; + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); vfio_msi_disable_common(vdev); + vfio_intx_enable(vdev, &err); + if (err) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } trace_vfio_msi_disable(vdev->vbasedev.name); } @@ -941,7 +987,7 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) } if (vfio_opt_rom_in_denylist(vdev)) { - if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { + if (dev->opts && qdict_haskey(dev->opts, "rombar")) { warn_report("Device at %s is known to cause system instability" " issues during option rom execution", vdev->vbasedev.name); @@ -1084,8 +1130,8 @@ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */ if (bar_addr != PCI_BAR_UNMAPPED && - !(bar_addr & ~qemu_real_host_page_mask)) { - size = qemu_real_host_page_size; + !(bar_addr & ~qemu_real_host_page_mask())) { + size = qemu_real_host_page_size(); } memory_region_transaction_begin(); @@ -1201,7 +1247,7 @@ void vfio_pci_write_config(PCIDevice *pdev, for (bar = 0; bar < PCI_ROM_SLOT; bar++) { if (old_addr[bar] != pdev->io_regions[bar].addr && vdev->bars[bar].region.size > 0 && - vdev->bars[bar].region.size < qemu_real_host_page_size) { + vdev->bars[bar].region.size < qemu_real_host_page_size()) { vfio_sub_page_bar_update_mapping(pdev, bar); } } @@ -1289,7 +1335,7 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev) } /* MSI-X table start and end aligned to host page size */ - start = vdev->msix->table_offset & qemu_real_host_page_mask; + start = vdev->msix->table_offset & qemu_real_host_page_mask(); end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); @@ -1364,7 +1410,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) * TODO: Lookup table for known devices. * * Logically we might use an algorithm here to select the BAR adding - * the least additional MMIO space, but we cannot programatically + * the least additional MMIO space, but we cannot programmatically * predict the driver dependency on BAR ordering or sizing, therefore * 'auto' becomes a lookup for combinations reported to work. */ @@ -1529,8 +1575,8 @@ static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) int ret; Error *err = NULL; - vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) * - sizeof(unsigned long)); + vdev->msix->pending = g_new0(unsigned long, + BITS_TO_LONGS(vdev->msix->entries)); ret = msix_init(&vdev->pdev, vdev->msix->entries, vdev->bars[vdev->msix->table_bar].mr, vdev->msix->table_bar, vdev->msix->table_offset, @@ -2158,7 +2204,7 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) } /* - * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master. + * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master. * Also put INTx Disable in known state. */ cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); @@ -2334,7 +2380,7 @@ static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) g_free(reset); trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, - ret ? "%m" : "Success"); + ret ? strerror(errno) : "Success"); out: /* Re-enable INTx on affected devices */ @@ -2384,7 +2430,7 @@ out_single: } /* - * We want to differentiate hot reset of mulitple in-use devices vs hot reset + * We want to differentiate hot reset of multiple in-use devices vs hot reset * of a single in-use device. VFIO_DEVICE_RESET will already handle the case * of doing hot resets when there is only a single device per bus. The in-use * here refers to how many VFIODevices are affected. A hot reset that affects @@ -2453,7 +2499,12 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); PCIDevice *pdev = &vdev->pdev; - int ret; + pcibus_t old_addr[PCI_NUM_REGIONS - 1]; + int bar, ret; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + old_addr[bar] = pdev->io_regions[bar].addr; + } ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); if (ret) { @@ -2463,6 +2514,18 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) vfio_pci_write_config(pdev, PCI_COMMAND, pci_get_word(pdev->config + PCI_COMMAND), 2); + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + /* + * The address may not be changed in some scenarios + * (e.g. the VF driver isn't loaded in VM). + */ + if (old_addr[bar] != pdev->io_regions[bar].addr && + vdev->bars[bar].region.size > 0 && + vdev->bars[bar].region.size < qemu_real_host_page_size()) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } + if (msi_enabled(pdev)) { vfio_msi_enable(vdev); } else if (msix_enabled(pdev)) { @@ -2783,6 +2846,7 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) static void vfio_realize(PCIDevice *pdev, Error **errp) { VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIODevice *vbasedev = &vdev->vbasedev; VFIODevice *vbasedev_iter; VFIOGroup *group; char *tmp, *subsys, group_path[PATH_MAX], *group_name; @@ -2793,7 +2857,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) int i, ret; bool is_mdev; - if (!vdev->vbasedev.sysfsdev) { + if (!vbasedev->sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || ~vdev->host.slot || ~vdev->host.function)) { error_setg(errp, "No provided host device"); @@ -2801,24 +2865,24 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); return; } - vdev->vbasedev.sysfsdev = + vbasedev->sysfsdev = g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); } - if (stat(vdev->vbasedev.sysfsdev, &st) < 0) { + if (stat(vbasedev->sysfsdev, &st) < 0) { error_setg_errno(errp, errno, "no such host device"); - error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev); + error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); return; } - vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev); - vdev->vbasedev.ops = &vfio_pci_ops; - vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; - vdev->vbasedev.dev = DEVICE(vdev); + vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); + vbasedev->ops = &vfio_pci_ops; + vbasedev->type = VFIO_DEVICE_TYPE_PCI; + vbasedev->dev = DEVICE(vdev); - tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev); + tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); len = readlink(tmp, group_path, sizeof(group_path)); g_free(tmp); @@ -2836,7 +2900,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } - trace_vfio_realize(vdev->vbasedev.name, groupid); + trace_vfio_realize(vbasedev->name, groupid); group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp); if (!group) { @@ -2844,7 +2908,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) { + if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { error_setg(errp, "device is already attached"); vfio_put_group(group); goto error; @@ -2857,22 +2921,22 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) * stays in sync with the active working set of the guest driver. Prevent * the x-balloon-allowed option unless this is minimally an mdev device. */ - tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev); + tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev); subsys = realpath(tmp, NULL); g_free(tmp); is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); free(subsys); - trace_vfio_mdev(vdev->vbasedev.name, is_mdev); + trace_vfio_mdev(vbasedev->name, is_mdev); - if (vdev->vbasedev.ram_block_discard_allowed && !is_mdev) { + if (vbasedev->ram_block_discard_allowed && !is_mdev) { error_setg(errp, "x-balloon-allowed only potentially compatible " "with mdev devices"); vfio_put_group(group); goto error; } - ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp); + ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); if (ret) { vfio_put_group(group); goto error; @@ -2885,7 +2949,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } /* Get a copy of config space */ - ret = pread(vdev->vbasedev.fd, vdev->pdev.config, + ret = pread(vbasedev->fd, vdev->pdev.config, MIN(pci_config_size(&vdev->pdev), vdev->config_size), vdev->config_offset); if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { @@ -2913,7 +2977,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); - trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id); + trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id); } else { vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); } @@ -2924,7 +2988,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); - trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id); + trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id); } else { vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); } @@ -2936,7 +3000,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, vdev->sub_vendor_id, ~0); - trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name, + trace_vfio_pci_emulated_sub_vendor_id(vbasedev->name, vdev->sub_vendor_id); } @@ -2946,7 +3010,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); - trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name, + trace_vfio_pci_emulated_sub_device_id(vbasedev->name, vdev->sub_device_id); } @@ -3005,7 +3069,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto out_teardown; } - ret = vfio_get_dev_region_info(&vdev->vbasedev, + ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion); if (ret) { @@ -3081,9 +3145,9 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } if (!pdev->failover_pair_id) { - ret = vfio_migration_probe(&vdev->vbasedev, errp); + ret = vfio_migration_probe(vbasedev, errp); if (ret) { - error_report("%s: Migration disabled", vdev->vbasedev.name); + error_report("%s: Migration disabled", vbasedev->name); } } @@ -3095,12 +3159,14 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) out_deregister: pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); - kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + if (vdev->irqchip_change_notifier.notify) { + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + } out_teardown: vfio_teardown_msi(vdev); vfio_bars_exit(vdev); error: - error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); + error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); } static void vfio_instance_finalize(Object *obj) diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index 64777516d1..7c236a52f4 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -19,6 +19,7 @@ #include "qemu/queue.h" #include "qemu/timer.h" #include "qom/object.h" +#include "sysemu/kvm.h" #define PCI_ANY_ID (~0) @@ -171,6 +172,7 @@ struct VFIOPCIDevice { bool no_kvm_ioeventfd; bool no_vfio_ioeventfd; bool enable_ramfb; + bool defer_kvm_irq_routing; VFIODisplay *dpy; Notifier irqchip_change_notifier; }; diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index cc3f66f7e4..5af73f9287 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -71,7 +71,7 @@ static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, sysbus_init_irq(sbdev, &intp->qemuirq); /* Get an eventfd for trigger */ - intp->interrupt = g_malloc0(sizeof(EventNotifier)); + intp->interrupt = g_new0(EventNotifier, 1); ret = event_notifier_init(intp->interrupt, 0); if (ret) { g_free(intp->interrupt); @@ -82,7 +82,7 @@ static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, } if (vfio_irq_is_automasked(intp)) { /* Get an eventfd for resample/unmask */ - intp->unmask = g_malloc0(sizeof(EventNotifier)); + intp->unmask = g_new0(EventNotifier, 1); ret = event_notifier_init(intp->unmask, 0); if (ret) { g_free(intp->interrupt); @@ -156,7 +156,7 @@ static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) * if there is no more active IRQ * @opaque: actually points to the VFIO platform device * - * Called on mmap timer timout, this function checks whether the + * Called on mmap timer timeout, this function checks whether the * IRQ is still active and if not, restores the fast path. * by construction a single eventfd is handled at a time. * if the IRQ is still active, the timer is re-programmed. diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index ea3f70bd2f..9ec1e95f6d 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -44,7 +44,7 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; - hwaddr page_mask = qemu_real_host_page_mask; + hwaddr page_mask = qemu_real_host_page_mask(); struct vfio_iommu_spapr_register_memory reg = { .argsz = sizeof(reg), .flags = 0, @@ -102,7 +102,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener, const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; - hwaddr page_mask = qemu_real_host_page_mask; + hwaddr page_mask = qemu_real_host_page_mask(); struct vfio_iommu_spapr_register_memory reg = { .argsz = sizeof(reg), .flags = 0, @@ -136,6 +136,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener, } const MemoryListener vfio_prereg_listener = { + .name = "vfio-pre-reg", .region_add = vfio_prereg_listener_region_add, .region_del = vfio_prereg_listener_region_del, }; @@ -198,12 +199,12 @@ int vfio_spapr_create_window(VFIOContainer *container, * Below we look at qemu_real_host_page_size as TCEs are allocated from * system pages. */ - bits_per_level = ctz64(qemu_real_host_page_size) + 8; + bits_per_level = ctz64(qemu_real_host_page_size()) + 8; create.levels = bits_total / bits_per_level; if (bits_total % bits_per_level) { ++create.levels; } - max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size); + max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size()); for ( ; create.levels <= max_levels; ++create.levels) { ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); if (!ret) { diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 0ef1b5f4a6..73dffe9e00 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -100,6 +100,7 @@ vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]" +vfio_known_safe_misalignment(const char *name, uint64_t iova, uint64_t offset_within_region, uintptr_t page_size) "Region \"%s\" iova=0x%"PRIx64" offset_within_region=0x%"PRIx64" qemu_real_host_page_size=0x%"PRIxPTR vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA" vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64 diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index 35ab45e209..cbfd8c7173 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -1,6 +1,3 @@ -config VHOST - bool - config VIRTIO bool @@ -59,7 +56,32 @@ config VIRTIO_MEM depends on VIRTIO_MEM_SUPPORTED select MEM_DEVICE +config VHOST_VSOCK + bool + default y + depends on VIRTIO && VHOST_KERNEL + +config VHOST_USER_VSOCK + bool + default y + depends on VIRTIO && VHOST_USER + config VHOST_USER_I2C bool default y depends on VIRTIO && VHOST_USER + +config VHOST_USER_RNG + bool + default y + depends on VIRTIO && VHOST_USER + +config VHOST_USER_FS + bool + default y + depends on VIRTIO && VHOST_USER + +config VHOST_USER_GPIO + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index bc352a6009..dfed1e7af5 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -2,23 +2,25 @@ softmmu_virtio_ss = ss.source_set() softmmu_virtio_ss.add(files('virtio-bus.c')) softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c')) softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c')) -softmmu_virtio_ss.add(when: 'CONFIG_VHOST', if_false: files('vhost-stub.c')) - -softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) -softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) - -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) virtio_ss = ss.source_set() virtio_ss.add(files('virtio.c')) -virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c')) -virtio_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user.c')) -virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-vdpa.c')) + +if have_vhost + virtio_ss.add(files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c')) + if have_vhost_user + virtio_ss.add(files('vhost-user.c')) + endif + if have_vhost_vdpa + virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c')) + endif +else + softmmu_virtio_ss.add(files('vhost-stub.c')) +endif + virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c')) -virtio_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VIRTIO_PCI'], if_true: files('virtio-crypto-pci.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c')) -virtio_ss.add(when: ['CONFIG_VHOST_USER_FS', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-fs-pci.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c')) virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c', 'vhost-vsock-common.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c', 'vhost-vsock-common.c')) @@ -26,15 +28,22 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) -virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c')) +virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-pci.c')) + +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng-pci.c')) @@ -51,3 +60,8 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c')) virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss) +softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) +softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) +softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 8ed19e9d0c..14fc5b9bb2 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -8,6 +8,10 @@ vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, vhost_section(const char *name) "%s" vhost_reject_section(const char *name, int d) "%s:%d" vhost_iotlb_miss(void *dev, int step) "%p step %d" +vhost_dev_cleanup(void *dev) "%p" +vhost_dev_start(void *dev, const char *name, bool vrings) "%p:%s vrings:%d" +vhost_dev_stop(void *dev, const char *name, bool vrings) "%p:%s vrings:%d" + # vhost-user.c vhost_user_postcopy_end_entry(void) "" @@ -21,10 +25,15 @@ vhost_user_set_mem_table_withfd(int index, const char *name, uint64_t memory_siz vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 +vhost_user_read(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" +vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" +vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p" # vhost-vdpa.c vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 @@ -52,6 +61,7 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 vhost_vdpa_set_owner(void *dev) "dev: %p" vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64 +vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64 # virtio.c virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u" @@ -86,12 +96,18 @@ virtio_mmio_guest_page(uint64_t size, int shift) "guest page size 0x%" PRIx64 " virtio_mmio_queue_write(uint64_t value, int max_size) "mmio_queue write 0x%" PRIx64 " max %d" virtio_mmio_setting_irq(int level) "virtio_mmio setting IRQ %d" -# virtio-iommu.c +# virtio-pci.c +virtio_pci_notify(uint16_t vector) "virtio_pci_notify vec 0x%x" +virtio_pci_notify_write(uint64_t addr, uint64_t val, unsigned int size) "0x%" PRIx64" = 0x%" PRIx64 " (%d)" +virtio_pci_notify_write_pio(uint64_t addr, uint64_t val, unsigned int size) "0x%" PRIx64" = 0x%" PRIx64 " (%d)" + +# hw/virtio/virtio-iommu.c virtio_iommu_device_reset(void) "reset!" +virtio_iommu_system_reset(void) "system reset!" virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64 virtio_iommu_device_status(uint8_t status) "driver status = %d" -virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_range=%d probe_size=0x%x" -virtio_iommu_set_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_bits=%d probe_size=0x%x" +virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%d domain range end=%d probe_size=0x%x bypass=0x%x" +virtio_iommu_set_config(uint8_t bypass) "bypass=0x%x" virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d" @@ -112,6 +128,7 @@ virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uin virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64 virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s" virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s" +virtio_iommu_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" # virtio-mem.c virtio_mem_send_response(uint16_t type) "type=%" PRIu16 @@ -127,3 +144,8 @@ virtio_mem_state_response(uint16_t state) "state=%" PRIu16 virtio_pmem_flush_request(void) "flush request" virtio_pmem_response(void) "flush response" virtio_pmem_flush_done(int type) "fsync return=%d" + +# virtio-gpio.c +virtio_gpio_start(void) "start" +virtio_gpio_stop(void) "stop" +virtio_gpio_set_status(uint8_t status) "0x%x" diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index 594d770b75..8e581575c9 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -47,7 +47,7 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev) assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL); - return close(fd); + return close(fd) < 0 ? -errno : 0; } static int vhost_kernel_memslots_limit(struct vhost_dev *dev) @@ -58,7 +58,7 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev) if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions", &s, NULL, NULL)) { uint64_t val = g_ascii_strtoull(s, NULL, 10); - if (!((val == G_MAXUINT64 || !val) && errno)) { + if (val < INT_MAX && val > 0) { g_free(s); return val; } @@ -146,6 +146,12 @@ static int vhost_kernel_set_vring_call(struct vhost_dev *dev, return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file); } +static int vhost_kernel_set_vring_err(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_ERR, file); +} + static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev, struct vhost_vring_state *s) { @@ -203,7 +209,6 @@ static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx) return idx - dev->vq_index; } -#ifdef CONFIG_VHOST_VSOCK static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev, uint64_t guest_cid) { @@ -214,7 +219,6 @@ static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start) { return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start); } -#endif /* CONFIG_VHOST_VSOCK */ static void vhost_kernel_iotlb_read(void *opaque) { @@ -293,7 +297,7 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev, qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL); } -static const VhostOps kernel_ops = { +const VhostOps kernel_ops = { .backend_type = VHOST_BACKEND_TYPE_KERNEL, .vhost_backend_init = vhost_kernel_init, .vhost_backend_cleanup = vhost_kernel_cleanup, @@ -311,6 +315,7 @@ static const VhostOps kernel_ops = { .vhost_get_vring_base = vhost_kernel_get_vring_base, .vhost_set_vring_kick = vhost_kernel_set_vring_kick, .vhost_set_vring_call = vhost_kernel_set_vring_call, + .vhost_set_vring_err = vhost_kernel_set_vring_err, .vhost_set_vring_busyloop_timeout = vhost_kernel_set_vring_busyloop_timeout, .vhost_set_features = vhost_kernel_set_features, @@ -319,43 +324,13 @@ static const VhostOps kernel_ops = { .vhost_set_owner = vhost_kernel_set_owner, .vhost_reset_device = vhost_kernel_reset_device, .vhost_get_vq_index = vhost_kernel_get_vq_index, -#ifdef CONFIG_VHOST_VSOCK .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid, .vhost_vsock_set_running = vhost_kernel_vsock_set_running, -#endif /* CONFIG_VHOST_VSOCK */ .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback, .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg, }; #endif -int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type) -{ - int r = 0; - - switch (backend_type) { -#ifdef CONFIG_VHOST_KERNEL - case VHOST_BACKEND_TYPE_KERNEL: - dev->vhost_ops = &kernel_ops; - break; -#endif -#ifdef CONFIG_VHOST_USER - case VHOST_BACKEND_TYPE_USER: - dev->vhost_ops = &user_ops; - break; -#endif -#ifdef CONFIG_VHOST_VDPA - case VHOST_BACKEND_TYPE_VDPA: - dev->vhost_ops = &vdpa_ops; - break; -#endif - default: - error_report("Unknown vhost backend type"); - r = -1; - } - - return r; -} - int vhost_backend_update_device_iotlb(struct vhost_dev *dev, uint64_t iova, uint64_t uaddr, uint64_t len, diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c new file mode 100644 index 0000000000..3d03395a77 --- /dev/null +++ b/hw/virtio/vhost-iova-tree.c @@ -0,0 +1,110 @@ +/* + * vhost software live migration iova tree + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/iova-tree.h" +#include "vhost-iova-tree.h" + +#define iova_min_addr qemu_real_host_page_size() + +/** + * VhostIOVATree, able to: + * - Translate iova address + * - Reverse translate iova address (from translated to iova) + * - Allocate IOVA regions for translated range (linear operation) + */ +struct VhostIOVATree { + /* First addressable iova address in the device */ + uint64_t iova_first; + + /* Last addressable iova address in the device */ + uint64_t iova_last; + + /* IOVA address to qemu memory maps. */ + IOVATree *iova_taddr_map; +}; + +/** + * Create a new IOVA tree + * + * Returns the new IOVA tree + */ +VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last) +{ + VhostIOVATree *tree = g_new(VhostIOVATree, 1); + + /* Some devices do not like 0 addresses */ + tree->iova_first = MAX(iova_first, iova_min_addr); + tree->iova_last = iova_last; + + tree->iova_taddr_map = iova_tree_new(); + return tree; +} + +/** + * Delete an iova tree + */ +void vhost_iova_tree_delete(VhostIOVATree *iova_tree) +{ + iova_tree_destroy(iova_tree->iova_taddr_map); + g_free(iova_tree); +} + +/** + * Find the IOVA address stored from a memory address + * + * @tree: The iova tree + * @map: The map with the memory address + * + * Return the stored mapping, or NULL if not found. + */ +const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree, + const DMAMap *map) +{ + return iova_tree_find_iova(tree->iova_taddr_map, map); +} + +/** + * Allocate a new mapping + * + * @tree: The iova tree + * @map: The iova map + * + * Returns: + * - IOVA_OK if the map fits in the container + * - IOVA_ERR_INVALID if the map does not make sense (like size overflow) + * - IOVA_ERR_NOMEM if tree cannot allocate more space. + * + * It returns assignated iova in map->iova if return value is VHOST_DMA_MAP_OK. + */ +int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map) +{ + /* Some vhost devices do not like addr 0. Skip first page */ + hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size(); + + if (map->translated_addr + map->size < map->translated_addr || + map->perm == IOMMU_NONE) { + return IOVA_ERR_INVALID; + } + + /* Allocate a node in IOVA address */ + return iova_tree_alloc_map(tree->iova_taddr_map, map, iova_first, + tree->iova_last); +} + +/** + * Remove existing mappings from iova tree + * + * @iova_tree: The vhost iova tree + * @map: The map to remove + */ +void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map) +{ + iova_tree_remove(iova_tree->iova_taddr_map, map); +} diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h new file mode 100644 index 0000000000..4adfd79ff0 --- /dev/null +++ b/hw/virtio/vhost-iova-tree.h @@ -0,0 +1,27 @@ +/* + * vhost software live migration iova tree + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VIRTIO_VHOST_IOVA_TREE_H +#define HW_VIRTIO_VHOST_IOVA_TREE_H + +#include "qemu/iova-tree.h" +#include "exec/memory.h" + +typedef struct VhostIOVATree VhostIOVATree; + +VhostIOVATree *vhost_iova_tree_new(uint64_t iova_first, uint64_t iova_last); +void vhost_iova_tree_delete(VhostIOVATree *iova_tree); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete); + +const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree, + const DMAMap *map); +int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map); +void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map); + +#endif diff --git a/hw/virtio/vhost-scsi-pci.c b/hw/virtio/vhost-scsi-pci.c index cb71a294fa..08980bc23b 100644 --- a/hw/virtio/vhost-scsi-pci.c +++ b/hw/virtio/vhost-scsi-pci.c @@ -21,7 +21,7 @@ #include "hw/virtio/vhost-scsi.h" #include "qapi/error.h" #include "qemu/module.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VHostSCSIPCI VHostSCSIPCI; diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c new file mode 100644 index 0000000000..d422418f2d --- /dev/null +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -0,0 +1,772 @@ +/* + * vhost shadow virtqueue + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" + +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qemu/log.h" +#include "qemu/memalign.h" +#include "linux-headers/linux/vhost.h" + +/** + * Validate the transport device features that both guests can use with the SVQ + * and SVQs can use with the device. + * + * @dev_features: The features + * @errp: Error pointer + */ +bool vhost_svq_valid_features(uint64_t features, Error **errp) +{ + bool ok = true; + uint64_t svq_features = features; + + for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END; + ++b) { + switch (b) { + case VIRTIO_F_ANY_LAYOUT: + case VIRTIO_RING_F_EVENT_IDX: + continue; + + case VIRTIO_F_ACCESS_PLATFORM: + /* SVQ trust in the host's IOMMU to translate addresses */ + case VIRTIO_F_VERSION_1: + /* SVQ trust that the guest vring is little endian */ + if (!(svq_features & BIT_ULL(b))) { + svq_features |= BIT_ULL(b); + ok = false; + } + continue; + + default: + if (svq_features & BIT_ULL(b)) { + svq_features &= ~BIT_ULL(b); + ok = false; + } + } + } + + if (!ok) { + error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64 + ", ok: 0x%"PRIx64, features, svq_features); + } + return ok; +} + +/** + * Number of descriptors that the SVQ can make available from the guest. + * + * @svq: The svq + */ +static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) +{ + return svq->num_free; +} + +/** + * Translate addresses between the qemu's virtual address and the SVQ IOVA + * + * @svq: Shadow VirtQueue + * @vaddr: Translated IOVA addresses + * @iovec: Source qemu's VA addresses + * @num: Length of iovec and minimum length of vaddr + */ +static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, + hwaddr *addrs, const struct iovec *iovec, + size_t num) +{ + if (num == 0) { + return true; + } + + for (size_t i = 0; i < num; ++i) { + DMAMap needle = { + .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, + .size = iovec[i].iov_len, + }; + Int128 needle_last, map_last; + size_t off; + + const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); + /* + * Map cannot be NULL since iova map contains all guest space and + * qemu already has a physical address mapped + */ + if (unlikely(!map)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Invalid address 0x%"HWADDR_PRIx" given by guest", + needle.translated_addr); + return false; + } + + off = needle.translated_addr - map->translated_addr; + addrs[i] = map->iova + off; + + needle_last = int128_add(int128_make64(needle.translated_addr), + int128_make64(iovec[i].iov_len)); + map_last = int128_make64(map->translated_addr + map->size); + if (unlikely(int128_gt(needle_last, map_last))) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest buffer expands over iova range"); + return false; + } + } + + return true; +} + +/** + * Write descriptors to SVQ vring + * + * @svq: The shadow virtqueue + * @sg: Cache for hwaddr + * @iovec: The iovec from the guest + * @num: iovec length + * @more_descs: True if more descriptors come in the chain + * @write: True if they are writeable descriptors + * + * Return true if success, false otherwise and print error. + */ +static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + const struct iovec *iovec, size_t num, + bool more_descs, bool write) +{ + uint16_t i = svq->free_head, last = svq->free_head; + unsigned n; + uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; + vring_desc_t *descs = svq->vring.desc; + bool ok; + + if (num == 0) { + return true; + } + + ok = vhost_svq_translate_addr(svq, sg, iovec, num); + if (unlikely(!ok)) { + return false; + } + + for (n = 0; n < num; n++) { + if (more_descs || (n + 1 < num)) { + descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT); + descs[i].next = cpu_to_le16(svq->desc_next[i]); + } else { + descs[i].flags = flags; + } + descs[i].addr = cpu_to_le64(sg[n]); + descs[i].len = cpu_to_le32(iovec[n].iov_len); + + last = i; + i = cpu_to_le16(svq->desc_next[i]); + } + + svq->free_head = le16_to_cpu(svq->desc_next[last]); + return true; +} + +static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, + const struct iovec *out_sg, size_t out_num, + const struct iovec *in_sg, size_t in_num, + unsigned *head) +{ + unsigned avail_idx; + vring_avail_t *avail = svq->vring.avail; + bool ok; + g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num)); + + *head = svq->free_head; + + /* We need some descriptors here */ + if (unlikely(!out_num && !in_num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest provided element with no descriptors"); + return false; + } + + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, + false); + if (unlikely(!ok)) { + return false; + } + + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); + if (unlikely(!ok)) { + return false; + } + + /* + * Put the entry in the available array (but don't update avail->idx until + * they do sync). + */ + avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1); + avail->ring[avail_idx] = cpu_to_le16(*head); + svq->shadow_avail_idx++; + + /* Update the avail index after write the descriptor */ + smp_wmb(); + avail->idx = cpu_to_le16(svq->shadow_avail_idx); + + return true; +} + +static void vhost_svq_kick(VhostShadowVirtqueue *svq) +{ + bool needs_kick; + + /* + * We need to expose the available array entries before checking the used + * flags + */ + smp_mb(); + + if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]); + needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1); + } else { + needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + } + + if (!needs_kick) { + return; + } + + event_notifier_set(&svq->hdev_kick); +} + +/** + * Add an element to a SVQ. + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full + */ +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem) +{ + unsigned qemu_head; + unsigned ndescs = in_num + out_num; + bool ok; + + if (unlikely(ndescs > vhost_svq_available_slots(svq))) { + return -ENOSPC; + } + + ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); + if (unlikely(!ok)) { + return -EINVAL; + } + + svq->num_free -= ndescs; + svq->desc_state[qemu_head].elem = elem; + svq->desc_state[qemu_head].ndescs = ndescs; + vhost_svq_kick(svq); + return 0; +} + +/* Convenience wrapper to add a guest's element to SVQ */ +static int vhost_svq_add_element(VhostShadowVirtqueue *svq, + VirtQueueElement *elem) +{ + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, + elem->in_num, elem); +} + +/** + * Forward available buffers. + * + * @svq: Shadow VirtQueue + * + * Note that this function does not guarantee that all guest's available + * buffers are available to the device in SVQ avail ring. The guest may have + * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in + * qemu vaddr. + * + * If that happens, guest's kick notifications will be disabled until the + * device uses some buffers. + */ +static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) +{ + /* Clear event notifier */ + event_notifier_test_and_clear(&svq->svq_kick); + + /* Forward to the device as many available buffers as possible */ + do { + virtio_queue_set_notification(svq->vq, false); + + while (true) { + g_autofree VirtQueueElement *elem = NULL; + int r; + + if (svq->next_guest_avail_elem) { + elem = g_steal_pointer(&svq->next_guest_avail_elem); + } else { + elem = virtqueue_pop(svq->vq, sizeof(*elem)); + } + + if (!elem) { + break; + } + + if (svq->ops) { + r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); + } else { + r = vhost_svq_add_element(svq, elem); + } + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* + * This condition is possible since a contiguous buffer in + * GPA does not imply a contiguous buffer in qemu's VA + * scatter-gather segments. If that happens, the buffer + * exposed to the device needs to be a chain of descriptors + * at this moment. + * + * SVQ cannot hold more available buffers if we are here: + * queue the current guest descriptor and ignore kicks + * until some elements are used. + */ + svq->next_guest_avail_elem = g_steal_pointer(&elem); + } + + /* VQ is full or broken, just return and ignore kicks */ + return; + } + /* elem belongs to SVQ or external caller now */ + elem = NULL; + } + + virtio_queue_set_notification(svq->vq, true); + } while (!virtio_queue_empty(svq->vq)); +} + +/** + * Handle guest's kick. + * + * @n: guest kick event notifier, the one that guest set to notify svq. + */ +static void vhost_handle_guest_kick_notifier(EventNotifier *n) +{ + VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick); + event_notifier_test_and_clear(n); + vhost_handle_guest_kick(svq); +} + +static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) +{ + uint16_t *used_idx = &svq->vring.used->idx; + if (svq->last_used_idx != svq->shadow_used_idx) { + return true; + } + + svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); + + return svq->last_used_idx != svq->shadow_used_idx; +} + +/** + * Enable vhost device calls after disable them. + * + * @svq: The svq + * + * It returns false if there are pending used buffers from the vhost device, + * avoiding the possible races between SVQ checking for more work and enabling + * callbacks. True if SVQ used vring has no more pending buffers. + */ +static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq) +{ + if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num]; + *used_event = svq->shadow_used_idx; + } else { + svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); + } + + /* Make sure the event is enabled before the read of used_idx */ + smp_mb(); + return !vhost_svq_more_used(svq); +} + +static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq) +{ + /* + * No need to disable notification in the event idx case, since used event + * index is already an index too far away. + */ + if (!virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); + } +} + +static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq, + uint16_t num, uint16_t i) +{ + for (uint16_t j = 0; j < (num - 1); ++j) { + i = le16_to_cpu(svq->desc_next[i]); + } + + return i; +} + +static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + uint32_t *len) +{ + const vring_used_t *used = svq->vring.used; + vring_used_elem_t used_elem; + uint16_t last_used, last_used_chain, num; + + if (!vhost_svq_more_used(svq)) { + return NULL; + } + + /* Only get used array entries after they have been exposed by dev */ + smp_rmb(); + last_used = svq->last_used_idx & (svq->vring.num - 1); + used_elem.id = le32_to_cpu(used->ring[last_used].id); + used_elem.len = le32_to_cpu(used->ring[last_used].len); + + svq->last_used_idx++; + if (unlikely(used_elem.id >= svq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used", + svq->vdev->name, used_elem.id); + return NULL; + } + + if (unlikely(!svq->desc_state[used_elem.id].ndescs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Device %s says index %u is used, but it was not available", + svq->vdev->name, used_elem.id); + return NULL; + } + + num = svq->desc_state[used_elem.id].ndescs; + svq->desc_state[used_elem.id].ndescs = 0; + last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); + svq->desc_next[last_used_chain] = svq->free_head; + svq->free_head = used_elem.id; + svq->num_free += num; + + *len = used_elem.len; + return g_steal_pointer(&svq->desc_state[used_elem.id].elem); +} + +/** + * Push an element to SVQ, returning it to the guest. + */ +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len) +{ + virtqueue_push(svq->vq, elem, len); + if (svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } +} + +static void vhost_svq_flush(VhostShadowVirtqueue *svq, + bool check_for_avail_queue) +{ + VirtQueue *vq = svq->vq; + + /* Forward as many used buffers as possible. */ + do { + unsigned i = 0; + + vhost_svq_disable_notification(svq); + while (true) { + uint32_t len; + g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); + if (!elem) { + break; + } + + if (unlikely(i >= svq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "More than %u used buffers obtained in a %u size SVQ", + i, svq->vring.num); + virtqueue_fill(vq, elem, len, i); + virtqueue_flush(vq, i); + return; + } + virtqueue_fill(vq, elem, len, i++); + } + + virtqueue_flush(vq, i); + event_notifier_set(&svq->svq_call); + + if (check_for_avail_queue && svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } + } while (!vhost_svq_enable_notification(svq)); +} + +/** + * Poll the SVQ for one device used buffer. + * + * This function race with main event loop SVQ polling, so extra + * synchronization is needed. + * + * Return the length written by the device. + */ +size_t vhost_svq_poll(VhostShadowVirtqueue *svq) +{ + int64_t start_us = g_get_monotonic_time(); + uint32_t len = 0; + + do { + if (vhost_svq_more_used(svq)) { + break; + } + + if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { + return 0; + } + } while (true); + + vhost_svq_get_buf(svq, &len); + return len; +} + +/** + * Forward used buffers. + * + * @n: hdev call event notifier, the one that device set to notify svq. + * + * Note that we are not making any buffers available in the loop, there is no + * way that it runs more than virtqueue size times. + */ +static void vhost_svq_handle_call(EventNotifier *n) +{ + VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, + hdev_call); + event_notifier_test_and_clear(n); + vhost_svq_flush(svq, true); +} + +/** + * Set the call notifier for the SVQ to call the guest + * + * @svq: Shadow virtqueue + * @call_fd: call notifier + * + * Called on BQL context. + */ +void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd) +{ + if (call_fd == VHOST_FILE_UNBIND) { + /* + * Fail event_notifier_set if called handling device call. + * + * SVQ still needs device notifications, since it needs to keep + * forwarding used buffers even with the unbind. + */ + memset(&svq->svq_call, 0, sizeof(svq->svq_call)); + } else { + event_notifier_init_fd(&svq->svq_call, call_fd); + } +} + +/** + * Get the shadow vq vring address. + * @svq: Shadow virtqueue + * @addr: Destination to store address + */ +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr) +{ + addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc; + addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail; + addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used; +} + +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq) +{ + size_t desc_size = sizeof(vring_desc_t) * svq->vring.num; + size_t avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) + + sizeof(uint16_t); + + return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size()); +} + +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq) +{ + size_t used_size = offsetof(vring_used_t, ring[svq->vring.num]) + + sizeof(uint16_t); + return ROUND_UP(used_size, qemu_real_host_page_size()); +} + +/** + * Set a new file descriptor for the guest to kick the SVQ and notify for avail + * + * @svq: The svq + * @svq_kick_fd: The svq kick fd + * + * Note that the SVQ will never close the old file descriptor. + */ +void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd) +{ + EventNotifier *svq_kick = &svq->svq_kick; + bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick); + bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND; + + if (poll_stop) { + event_notifier_set_handler(svq_kick, NULL); + } + + event_notifier_init_fd(svq_kick, svq_kick_fd); + /* + * event_notifier_set_handler already checks for guest's notifications if + * they arrive at the new file descriptor in the switch, so there is no + * need to explicitly check for them. + */ + if (poll_start) { + event_notifier_set(svq_kick); + event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier); + } +} + +/** + * Start the shadow virtqueue operation. + * + * @svq: Shadow Virtqueue + * @vdev: VirtIO device + * @vq: Virtqueue to shadow + */ +void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + VirtQueue *vq) +{ + size_t desc_size, driver_size, device_size; + + svq->next_guest_avail_elem = NULL; + svq->shadow_avail_idx = 0; + svq->shadow_used_idx = 0; + svq->last_used_idx = 0; + svq->vdev = vdev; + svq->vq = vq; + + svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); + svq->num_free = svq->vring.num; + driver_size = vhost_svq_driver_area_size(svq); + device_size = vhost_svq_device_area_size(svq); + svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); + desc_size = sizeof(vring_desc_t) * svq->vring.num; + svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size); + memset(svq->vring.desc, 0, driver_size); + svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size); + memset(svq->vring.used, 0, device_size); + svq->desc_state = g_new0(SVQDescState, svq->vring.num); + svq->desc_next = g_new0(uint16_t, svq->vring.num); + for (unsigned i = 0; i < svq->vring.num - 1; i++) { + svq->desc_next[i] = cpu_to_le16(i + 1); + } +} + +/** + * Stop the shadow virtqueue operation. + * @svq: Shadow Virtqueue + */ +void vhost_svq_stop(VhostShadowVirtqueue *svq) +{ + vhost_svq_set_svq_kick_fd(svq, VHOST_FILE_UNBIND); + g_autofree VirtQueueElement *next_avail_elem = NULL; + + if (!svq->vq) { + return; + } + + /* Send all pending used descriptors to guest */ + vhost_svq_flush(svq, false); + + for (unsigned i = 0; i < svq->vring.num; ++i) { + g_autofree VirtQueueElement *elem = NULL; + elem = g_steal_pointer(&svq->desc_state[i].elem); + if (elem) { + virtqueue_detach_element(svq->vq, elem, 0); + } + } + + next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem); + if (next_avail_elem) { + virtqueue_detach_element(svq->vq, next_avail_elem, 0); + } + svq->vq = NULL; + g_free(svq->desc_next); + g_free(svq->desc_state); + qemu_vfree(svq->vring.desc); + qemu_vfree(svq->vring.used); +} + +/** + * Creates vhost shadow virtqueue, and instructs the vhost device to use the + * shadow methods and file descriptors. + * + * @iova_tree: Tree to perform descriptors translations + * @ops: SVQ owner callbacks + * @ops_opaque: ops opaque pointer + * + * Returns the new virtqueue or NULL. + * + * In case of error, reason is reported through error_report. + */ +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque) +{ + g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); + int r; + + r = event_notifier_init(&svq->hdev_kick, 0); + if (r != 0) { + error_report("Couldn't create kick event notifier: %s (%d)", + g_strerror(errno), errno); + goto err_init_hdev_kick; + } + + r = event_notifier_init(&svq->hdev_call, 0); + if (r != 0) { + error_report("Couldn't create call event notifier: %s (%d)", + g_strerror(errno), errno); + goto err_init_hdev_call; + } + + event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); + event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); + svq->iova_tree = iova_tree; + svq->ops = ops; + svq->ops_opaque = ops_opaque; + return g_steal_pointer(&svq); + +err_init_hdev_call: + event_notifier_cleanup(&svq->hdev_kick); + +err_init_hdev_kick: + return NULL; +} + +/** + * Free the resources of the shadow virtqueue. + * + * @pvq: gpointer to SVQ so it can be used by autofree functions. + */ +void vhost_svq_free(gpointer pvq) +{ + VhostShadowVirtqueue *vq = pvq; + vhost_svq_stop(vq); + event_notifier_cleanup(&vq->hdev_kick); + event_notifier_set_handler(&vq->hdev_call, NULL); + event_notifier_cleanup(&vq->hdev_call); + g_free(vq); +} diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h new file mode 100644 index 0000000000..328a7fc075 --- /dev/null +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -0,0 +1,142 @@ +/* + * vhost shadow virtqueue + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef VHOST_SHADOW_VIRTQUEUE_H +#define VHOST_SHADOW_VIRTQUEUE_H + +#include "qemu/event_notifier.h" +#include "hw/virtio/virtio.h" +#include "standard-headers/linux/vhost_types.h" +#include "hw/virtio/vhost-iova-tree.h" + +typedef struct SVQDescState { + VirtQueueElement *elem; + + /* + * Number of descriptors exposed to the device. May or may not match + * guest's + */ + unsigned int ndescs; +} SVQDescState; + +typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; + +/** + * Callback to handle an avail buffer. + * + * @svq: Shadow virtqueue + * @elem: Element placed in the queue by the guest + * @vq_callback_opaque: Opaque + * + * Returns 0 if the vq is running as expected. + * + * Note that ownership of elem is transferred to the callback. + */ +typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *vq_callback_opaque); + +typedef struct VhostShadowVirtqueueOps { + VirtQueueAvailCallback avail_handler; +} VhostShadowVirtqueueOps; + +/* Shadow virtqueue to relay notifications */ +typedef struct VhostShadowVirtqueue { + /* Shadow vring */ + struct vring vring; + + /* Shadow kick notifier, sent to vhost */ + EventNotifier hdev_kick; + /* Shadow call notifier, sent to vhost */ + EventNotifier hdev_call; + + /* + * Borrowed virtqueue's guest to host notifier. To borrow it in this event + * notifier allows to recover the VhostShadowVirtqueue from the event loop + * easily. If we use the VirtQueue's one, we don't have an easy way to + * retrieve VhostShadowVirtqueue. + * + * So shadow virtqueue must not clean it, or we would lose VirtQueue one. + */ + EventNotifier svq_kick; + + /* Guest's call notifier, where the SVQ calls guest. */ + EventNotifier svq_call; + + /* Virtio queue shadowing */ + VirtQueue *vq; + + /* Virtio device */ + VirtIODevice *vdev; + + /* IOVA mapping */ + VhostIOVATree *iova_tree; + + /* SVQ vring descriptors state */ + SVQDescState *desc_state; + + /* Next VirtQueue element that guest made available */ + VirtQueueElement *next_guest_avail_elem; + + /* + * Backup next field for each descriptor so we can recover securely, not + * needing to trust the device access. + */ + uint16_t *desc_next; + + /* Caller callbacks */ + const VhostShadowVirtqueueOps *ops; + + /* Caller callbacks opaque */ + void *ops_opaque; + + /* Next head to expose to the device */ + uint16_t shadow_avail_idx; + + /* Next free descriptor */ + uint16_t free_head; + + /* Last seen used idx */ + uint16_t shadow_used_idx; + + /* Next head to consume from the device */ + uint16_t last_used_idx; + + /* Size of SVQ vring free descriptors */ + uint16_t num_free; +} VhostShadowVirtqueue; + +bool vhost_svq_valid_features(uint64_t features, Error **errp); + +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len); +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem); +size_t vhost_svq_poll(VhostShadowVirtqueue *svq); + +void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); +void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr); +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq); +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq); + +void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + VirtQueue *vq); +void vhost_svq_stop(VhostShadowVirtqueue *svq); + +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque); + +void vhost_svq_free(gpointer vq); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); + +#endif diff --git a/hw/virtio/vhost-user-blk-pci.c b/hw/virtio/vhost-user-blk-pci.c index 33b404d8a2..eef8641a98 100644 --- a/hw/virtio/vhost-user-blk-pci.c +++ b/hw/virtio/vhost-user-blk-pci.c @@ -26,7 +26,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VHostUserBlkPCI VHostUserBlkPCI; diff --git a/hw/virtio/vhost-user-fs-pci.c b/hw/virtio/vhost-user-fs-pci.c index 2ed8492b3f..6829b8b743 100644 --- a/hw/virtio/vhost-user-fs-pci.c +++ b/hw/virtio/vhost-user-fs-pci.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "hw/qdev-properties.h" #include "hw/virtio/vhost-user-fs.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" struct VHostUserFSPCI { diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index c595957983..d97b179e6f 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -20,6 +20,7 @@ #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "qemu/error-report.h" +#include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user-fs.h" #include "monitor/monitor.h" #include "sysemu/sysemu.h" @@ -31,6 +32,7 @@ static const int user_feature_bits[] = { VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_RING_PACKED, VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_RESET, VHOST_INVALID_FEATURE_BIT }; @@ -74,7 +76,7 @@ static void vuf_start(VirtIODevice *vdev) } fs->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&fs->vhost_dev, vdev); + ret = vhost_dev_start(&fs->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost: %d", -ret); goto err_guest_notifiers; @@ -108,7 +110,7 @@ static void vuf_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&fs->vhost_dev, vdev); + vhost_dev_stop(&fs->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); if (ret < 0) { @@ -122,13 +124,9 @@ static void vuf_stop(VirtIODevice *vdev) static void vuf_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserFS *fs = VHOST_USER_FS(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_should_start(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (fs->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&fs->vhost_dev) == should_start) { return; } @@ -219,8 +217,7 @@ static void vuf_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, "vhost-user-fs", VIRTIO_ID_FS, - sizeof(struct virtio_fs_config)); + virtio_init(vdev, VIRTIO_ID_FS, sizeof(struct virtio_fs_config)); /* Hiprio queue */ fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output); @@ -277,6 +274,12 @@ static void vuf_device_unrealize(DeviceState *dev) fs->vhost_dev.vqs = NULL; } +static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + return &fs->vhost_dev; +} + static const VMStateDescription vuf_vmstate = { .name = "vhost-user-fs", .unmigratable = 1, @@ -314,6 +317,7 @@ static void vuf_class_init(ObjectClass *klass, void *data) vdc->set_status = vuf_set_status; vdc->guest_notifier_mask = vuf_guest_notifier_mask; vdc->guest_notifier_pending = vuf_guest_notifier_pending; + vdc->get_vhost = vuf_get_vhost; } static const TypeInfo vuf_info = { diff --git a/hw/virtio/vhost-user-gpio-pci.c b/hw/virtio/vhost-user-gpio-pci.c new file mode 100644 index 0000000000..b3028a24a1 --- /dev/null +++ b/hw/virtio/vhost-user-gpio-pci.c @@ -0,0 +1,69 @@ +/* + * Vhost-user gpio virtio device PCI glue + * + * Copyright (c) 2022 Viresh Kumar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-gpio.h" +#include "hw/virtio/virtio-pci.h" + +struct VHostUserGPIOPCI { + VirtIOPCIProxy parent_obj; + VHostUserGPIO vdev; +}; + +typedef struct VHostUserGPIOPCI VHostUserGPIOPCI; + +#define TYPE_VHOST_USER_GPIO_PCI "vhost-user-gpio-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserGPIOPCI, VHOST_USER_GPIO_PCI, + TYPE_VHOST_USER_GPIO_PCI) + +static void vhost_user_gpio_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->nvectors = 1; + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_gpio_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_gpio_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_user_gpio_pci_instance_init(Object *obj) +{ + VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPIO); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_gpio_pci_info = { + .base_name = TYPE_VHOST_USER_GPIO_PCI, + .non_transitional_name = "vhost-user-gpio-pci", + .instance_size = sizeof(VHostUserGPIOPCI), + .instance_init = vhost_user_gpio_pci_instance_init, + .class_init = vhost_user_gpio_pci_class_init, +}; + +static void vhost_user_gpio_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_gpio_pci_info); +} + +type_init(vhost_user_gpio_pci_register); diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c new file mode 100644 index 0000000000..97145376eb --- /dev/null +++ b/hw/virtio/vhost-user-gpio.c @@ -0,0 +1,420 @@ +/* + * Vhost-user GPIO virtio device + * + * Copyright (c) 2022 Viresh Kumar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-gpio.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" +#include "trace.h" + +#define REALIZE_CONNECTION_RETRIES 3 +#define VHOST_NVQS 2 + +/* Features required from VirtIO */ +static const int feature_bits[] = { + VIRTIO_F_VERSION_1, + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_GPIO_F_IRQ, + VIRTIO_F_RING_RESET, + VHOST_INVALID_FEATURE_BIT +}; + +static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + memcpy(config, &gpio->config, sizeof(gpio->config)); +} + +static int vu_gpio_config_notifier(struct vhost_dev *dev) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev->vdev); + + memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config)); + virtio_notify_config(dev->vdev); + + return 0; +} + +const VhostDevConfigOps gpio_ops = { + .vhost_dev_config_notifier = vu_gpio_config_notifier, +}; + +static int vu_gpio_start(VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret, i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", ret); + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", ret); + goto err_host_notifiers; + } + + /* + * Before we start up we need to ensure we have the final feature + * set needed for the vhost configuration. The backend may also + * apply backend_features when the feature set is sent. + */ + vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features); + + ret = vhost_dev_start(&gpio->vhost_dev, vdev, false); + if (ret < 0) { + error_report("Error starting vhost-user-gpio: %d", ret); + goto err_guest_notifiers; + } + gpio->started_vu = true; + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < gpio->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&gpio->vhost_dev, vdev, i, false); + } + + /* + * As we must have VHOST_USER_F_PROTOCOL_FEATURES (because + * VHOST_USER_GET_CONFIG requires it) we need to explicitly enable + * the vrings. + */ + g_assert(vhost_dev->vhost_ops && + vhost_dev->vhost_ops->vhost_set_vring_enable); + ret = vhost_dev->vhost_ops->vhost_set_vring_enable(vhost_dev, true); + if (ret == 0) { + return 0; + } + + error_report("Failed to start vrings for vhost-user-gpio: %d", ret); + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, gpio->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev); + + return ret; +} + +static void vu_gpio_stop(VirtIODevice *vdev) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + if (!gpio->started_vu) { + return; + } + gpio->started_vu = false; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(vhost_dev, vdev, false); + + ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(vhost_dev, vdev); +} + +static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + bool should_start = virtio_device_should_start(vdev, status); + + trace_virtio_gpio_set_status(status); + + if (!gpio->connected) { + return; + } + + if (vhost_dev_is_started(&gpio->vhost_dev) == should_start) { + return; + } + + if (should_start) { + if (vu_gpio_start(vdev)) { + qemu_chr_fe_disconnect(&gpio->chardev); + } + } else { + vu_gpio_stop(vdev); + } +} + +static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + return vhost_get_features(&gpio->vhost_dev, feature_bits, features); +} + +static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + vhost_virtqueue_mask(&gpio->vhost_dev, vdev, idx, mask); +} + +static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) +{ + virtio_delete_queue(gpio->command_vq); + virtio_delete_queue(gpio->interrupt_vq); + g_free(gpio->vhost_vqs); + virtio_cleanup(vdev); + vhost_user_cleanup(&gpio->vhost_user); +} + +static int vu_gpio_connect(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + if (gpio->connected) { + return 0; + } + gpio->connected = true; + + vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); + gpio->vhost_user.supports_config = true; + + gpio->vhost_dev.nvqs = VHOST_NVQS; + gpio->vhost_dev.vqs = gpio->vhost_vqs; + + ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + return ret; + } + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_gpio_start(vdev); + } + + return 0; +} + +static void vu_gpio_event(void *opaque, QEMUChrEvent event); + +static void vu_gpio_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + if (!gpio->connected) { + return; + } + gpio->connected = false; + + vu_gpio_stop(vdev); + vhost_dev_cleanup(&gpio->vhost_dev); + + /* Re-instate the event handler for new connections */ + qemu_chr_fe_set_handlers(&gpio->chardev, + NULL, NULL, vu_gpio_event, + NULL, dev, NULL, true); +} + +static void vu_gpio_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + Error *local_err = NULL; + + switch (event) { + case CHR_EVENT_OPENED: + if (vu_gpio_connect(dev, &local_err) < 0) { + qemu_chr_fe_disconnect(&gpio->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + /* defer close until later to avoid circular close */ + vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev, + vu_gpio_disconnect); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static int vu_gpio_realize_connect(VHostUserGPIO *gpio, Error **errp) +{ + VirtIODevice *vdev = &gpio->parent_obj; + DeviceState *dev = &vdev->parent_obj; + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + ret = qemu_chr_fe_wait_connected(&gpio->chardev, errp); + if (ret < 0) { + return ret; + } + + /* + * vu_gpio_connect() may have already connected (via the event + * callback) in which case it will just report success. + */ + ret = vu_gpio_connect(dev, errp); + if (ret < 0) { + qemu_chr_fe_disconnect(&gpio->chardev); + return ret; + } + g_assert(gpio->connected); + + ret = vhost_dev_get_config(vhost_dev, (uint8_t *)&gpio->config, + sizeof(gpio->config), errp); + + if (ret < 0) { + error_report("vhost-user-gpio: get config failed"); + + qemu_chr_fe_disconnect(&gpio->chardev); + vhost_dev_cleanup(vhost_dev); + return ret; + } + + return 0; +} + +static void vu_gpio_device_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); + int retries, ret; + + if (!gpio->chardev.chr) { + error_setg(errp, "vhost-user-gpio: chardev is mandatory"); + return; + } + + if (!vhost_user_init(&gpio->vhost_user, &gpio->chardev, errp)) { + return; + } + + virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); + + gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); + gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); + gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS); + + gpio->connected = false; + + qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL, + dev, NULL, true); + + retries = REALIZE_CONNECTION_RETRIES; + g_assert(!*errp); + do { + if (*errp) { + error_prepend(errp, "Reconnecting after error: "); + error_report_err(*errp); + *errp = NULL; + } + ret = vu_gpio_realize_connect(gpio, errp); + } while (ret < 0 && retries--); + + if (ret < 0) { + do_vhost_user_cleanup(vdev, gpio); + } + + return; +} + +static void vu_gpio_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); + + vu_gpio_set_status(vdev, 0); + qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, NULL, NULL, NULL, NULL, + false); + vhost_dev_cleanup(&gpio->vhost_dev); + do_vhost_user_cleanup(vdev, gpio); +} + +static const VMStateDescription vu_gpio_vmstate = { + .name = "vhost-user-gpio", + .unmigratable = 1, +}; + +static Property vu_gpio_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserGPIO, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_gpio_properties); + dc->vmsd = &vu_gpio_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + vdc->realize = vu_gpio_device_realize; + vdc->unrealize = vu_gpio_device_unrealize; + vdc->get_features = vu_gpio_get_features; + vdc->get_config = vu_gpio_get_config; + vdc->set_status = vu_gpio_set_status; + vdc->guest_notifier_mask = vu_gpio_guest_notifier_mask; +} + +static const TypeInfo vu_gpio_info = { + .name = TYPE_VHOST_USER_GPIO, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserGPIO), + .class_init = vu_gpio_class_init, +}; + +static void vu_gpio_register_types(void) +{ + type_register_static(&vu_gpio_info); +} + +type_init(vu_gpio_register_types) diff --git a/hw/virtio/vhost-user-i2c-pci.c b/hw/virtio/vhost-user-i2c-pci.c index 70b7b65fd9..00ac10941f 100644 --- a/hw/virtio/vhost-user-i2c-pci.c +++ b/hw/virtio/vhost-user-i2c-pci.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "hw/qdev-properties.h" #include "hw/virtio/vhost-user-i2c.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" struct VHostUserI2CPCI { VirtIOPCIProxy parent_obj; diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index d172632bb0..60eaf0d95b 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -14,10 +14,11 @@ #include "qemu/error-report.h" #include "standard-headers/linux/virtio_ids.h" -/* Remove this once the header is updated in Linux kernel */ -#ifndef VIRTIO_ID_I2C_ADAPTER -#define VIRTIO_ID_I2C_ADAPTER 34 -#endif +static const int feature_bits[] = { + VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, + VIRTIO_F_RING_RESET, + VHOST_INVALID_FEATURE_BIT +}; static void vu_i2c_start(VirtIODevice *vdev) { @@ -45,7 +46,7 @@ static void vu_i2c_start(VirtIODevice *vdev) i2c->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&i2c->vhost_dev, vdev); + ret = vhost_dev_start(&i2c->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost-user-i2c: %d", -ret); goto err_guest_notifiers; @@ -79,7 +80,7 @@ static void vu_i2c_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&i2c->vhost_dev, vdev); + vhost_dev_stop(&i2c->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); if (ret < 0) { @@ -93,13 +94,9 @@ static void vu_i2c_stop(VirtIODevice *vdev) static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_should_start(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (i2c->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) { return; } @@ -113,8 +110,10 @@ static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status) static uint64_t vu_i2c_get_features(VirtIODevice *vdev, uint64_t requested_features, Error **errp) { - /* No feature bits used yet */ - return requested_features; + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + virtio_add_feature(&requested_features, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST); + return vhost_get_features(&i2c->vhost_dev, feature_bits, requested_features); } static void vu_i2c_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -144,8 +143,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) vhost_user_cleanup(&i2c->vhost_user); virtio_delete_queue(i2c->vq); virtio_cleanup(vdev); - g_free(i2c->vhost_dev.vqs); - i2c->vhost_dev.vqs = NULL; } static int vu_i2c_connect(DeviceState *dev) @@ -176,7 +173,7 @@ static void vu_i2c_disconnect(DeviceState *dev) } i2c->connected = false; - if (i2c->vhost_dev.started) { + if (vhost_dev_is_started(&i2c->vhost_dev)) { vu_i2c_stop(vdev); } } @@ -220,7 +217,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, "vhost-user-i2c", VIRTIO_ID_I2C_ADAPTER, 0); + virtio_init(vdev, VIRTIO_ID_I2C_ADAPTER, 0); i2c->vhost_dev.nvqs = 1; i2c->vq = virtio_add_queue(vdev, 4, vu_i2c_handle_output); @@ -229,6 +226,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp) ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { + g_free(i2c->vhost_dev.vqs); do_vhost_user_cleanup(vdev, i2c); } @@ -240,10 +238,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserI2C *i2c = VHOST_USER_I2C(dev); + struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs; /* This will stop vhost backend if appropriate. */ vu_i2c_set_status(vdev, 0); vhost_dev_cleanup(&i2c->vhost_dev); + g_free(vhost_vqs); do_vhost_user_cleanup(vdev, i2c); } diff --git a/hw/virtio/vhost-user-input-pci.c b/hw/virtio/vhost-user-input-pci.c index c9d3e9113a..b858898a36 100644 --- a/hw/virtio/vhost-user-input-pci.c +++ b/hw/virtio/vhost-user-input-pci.c @@ -9,7 +9,7 @@ #include "hw/virtio/virtio-input.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VHostUserInputPCI VHostUserInputPCI; diff --git a/hw/virtio/vhost-user-rng-pci.c b/hw/virtio/vhost-user-rng-pci.c new file mode 100644 index 0000000000..f64935453b --- /dev/null +++ b/hw/virtio/vhost-user-rng-pci.c @@ -0,0 +1,79 @@ +/* + * Vhost-user RNG virtio device PCI glue + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-rng.h" +#include "hw/virtio/virtio-pci.h" + +struct VHostUserRNGPCI { + VirtIOPCIProxy parent_obj; + VHostUserRNG vdev; +}; + +typedef struct VHostUserRNGPCI VHostUserRNGPCI; + +#define TYPE_VHOST_USER_RNG_PCI "vhost-user-rng-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI, + TYPE_VHOST_USER_RNG_PCI) + +static Property vhost_user_rng_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_rng_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, vhost_user_rng_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void vhost_user_rng_pci_instance_init(Object *obj) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_RNG); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_rng_pci_info = { + .base_name = TYPE_VHOST_USER_RNG_PCI, + .non_transitional_name = "vhost-user-rng-pci", + .instance_size = sizeof(VHostUserRNGPCI), + .instance_init = vhost_user_rng_pci_instance_init, + .class_init = vhost_user_rng_pci_class_init, +}; + +static void vhost_user_rng_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_rng_pci_info); +} + +type_init(vhost_user_rng_pci_register); diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c new file mode 100644 index 0000000000..efc54cd3fb --- /dev/null +++ b/hw/virtio/vhost-user-rng.c @@ -0,0 +1,300 @@ +/* + * Vhost-user RNG virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * Implementation seriously tailored on vhost-user-i2c.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-rng.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" + +static const int feature_bits[] = { + VIRTIO_F_RING_RESET, + VHOST_INVALID_FEATURE_BIT +}; + +static void vu_rng_start(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + int i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + rng->vhost_dev.acked_features = vdev->guest_features; + ret = vhost_dev_start(&rng->vhost_dev, vdev, true); + if (ret < 0) { + error_report("Error starting vhost-user-rng: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < rng->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_stop(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&rng->vhost_dev, vdev, true); + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + bool should_start = virtio_device_should_start(vdev, status); + + if (vhost_dev_is_started(&rng->vhost_dev) == should_start) { + return; + } + + if (should_start) { + vu_rng_start(vdev); + } else { + vu_rng_stop(vdev); + } +} + +static uint64_t vu_rng_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + return vhost_get_features(&rng->vhost_dev, feature_bits, + requested_features); +} + +static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); +} + +static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + return vhost_virtqueue_pending(&rng->vhost_dev, idx); +} + +static void vu_rng_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (rng->connected) { + return; + } + + rng->connected = true; + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_rng_start(vdev); + } +} + +static void vu_rng_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (!rng->connected) { + return; + } + + rng->connected = false; + + if (vhost_dev_is_started(&rng->vhost_dev)) { + vu_rng_stop(vdev); + } +} + +static void vu_rng_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + + switch (event) { + case CHR_EVENT_OPENED: + vu_rng_connect(dev); + break; + case CHR_EVENT_CLOSED: + vu_rng_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vu_rng_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + int ret; + + if (!rng->chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) { + return; + } + + virtio_init(vdev, VIRTIO_ID_RNG, 0); + + rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output); + if (!rng->req_vq) { + error_setg_errno(errp, -1, "virtio_add_queue() failed"); + goto virtio_add_queue_failed; + } + + rng->vhost_dev.nvqs = 1; + rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs); + ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_dev_init() failed"); + goto vhost_dev_init_failed; + } + + qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL, + dev, NULL, true); + + return; + +vhost_dev_init_failed: + g_free(rng->vhost_dev.vqs); + virtio_delete_queue(rng->req_vq); +virtio_add_queue_failed: + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static void vu_rng_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs; + + vu_rng_set_status(vdev, 0); + + vhost_dev_cleanup(&rng->vhost_dev); + g_free(vhost_vqs); + virtio_delete_queue(rng->req_vq); + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static struct vhost_dev *vu_rng_get_vhost(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + return &rng->vhost_dev; +} + +static const VMStateDescription vu_rng_vmstate = { + .name = "vhost-user-rng", + .unmigratable = 1, +}; + +static Property vu_rng_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_rng_properties); + dc->vmsd = &vu_rng_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + vdc->realize = vu_rng_device_realize; + vdc->unrealize = vu_rng_device_unrealize; + vdc->get_features = vu_rng_get_features; + vdc->set_status = vu_rng_set_status; + vdc->guest_notifier_mask = vu_rng_guest_notifier_mask; + vdc->guest_notifier_pending = vu_rng_guest_notifier_pending; + vdc->get_vhost = vu_rng_get_vhost; +} + +static const TypeInfo vu_rng_info = { + .name = TYPE_VHOST_USER_RNG, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserRNG), + .class_init = vu_rng_class_init, +}; + +static void vu_rng_register_types(void) +{ + type_register_static(&vu_rng_info); +} + +type_init(vu_rng_register_types) diff --git a/hw/virtio/vhost-user-scsi-pci.c b/hw/virtio/vhost-user-scsi-pci.c index d5343412a1..75882e3cf9 100644 --- a/hw/virtio/vhost-user-scsi-pci.c +++ b/hw/virtio/vhost-user-scsi-pci.c @@ -30,7 +30,7 @@ #include "hw/pci/msix.h" #include "hw/loader.h" #include "sysemu/kvm.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VHostUserSCSIPCI VHostUserSCSIPCI; diff --git a/hw/virtio/vhost-user-vsock-pci.c b/hw/virtio/vhost-user-vsock-pci.c index 72a96199cd..e5a86e8013 100644 --- a/hw/virtio/vhost-user-vsock-pci.c +++ b/hw/virtio/vhost-user-vsock-pci.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/qdev-properties.h" #include "hw/virtio/vhost-user-vsock.h" #include "qom/object.h" diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c index 6095ed7349..9431b9792c 100644 --- a/hw/virtio/vhost-user-vsock.c +++ b/hw/virtio/vhost-user-vsock.c @@ -55,13 +55,9 @@ const VhostDevConfigOps vsock_ops = { static void vuv_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_should_start(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (vvc->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { return; } @@ -81,7 +77,9 @@ static uint64_t vuv_get_features(VirtIODevice *vdev, { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - return vhost_get_features(&vvc->vhost_dev, user_feature_bits, features); + features = vhost_get_features(&vvc->vhost_dev, user_feature_bits, features); + + return vhost_vsock_common_get_features(vdev, features, errp); } static const VMStateDescription vuv_vmstate = { @@ -105,7 +103,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp) return; } - vhost_vsock_common_realize(vdev, "vhost-user-vsock"); + vhost_vsock_common_realize(vdev); vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index aec6cc1990..d92b026e1c 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -21,10 +21,12 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/sockets.h" +#include "sysemu/runstate.h" #include "sysemu/cryptodev.h" #include "migration/migration.h" #include "migration/postcopy-ram.h" #include "trace.h" +#include "exec/ramblock.h" #include #include @@ -50,7 +52,7 @@ #include "hw/acpi/acpi.h" #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS -#elif defined(TARGET_PPC) || defined(TARGET_PPC_64) +#elif defined(TARGET_PPC) || defined(TARGET_PPC64) #include "hw/ppc/spapr.h" #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS @@ -80,6 +82,7 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, + VHOST_USER_PROTOCOL_F_STATUS = 16, VHOST_USER_PROTOCOL_F_MAX }; @@ -125,6 +128,8 @@ typedef enum VhostUserRequest { VHOST_USER_GET_MAX_MEM_SLOTS = 36, VHOST_USER_ADD_MEM_REG = 37, VHOST_USER_REM_MEM_REG = 38, + VHOST_USER_SET_STATUS = 39, + VHOST_USER_GET_STATUS = 40, VHOST_USER_MAX } VhostUserRequest; @@ -199,7 +204,7 @@ typedef struct { VhostUserRequest request; #define VHOST_USER_VERSION_MASK (0x3) -#define VHOST_USER_REPLY_MASK (0x1<<2) +#define VHOST_USER_REPLY_MASK (0x1 << 2) #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) uint32_t flags; uint32_t size; /* the following payload size */ @@ -207,7 +212,7 @@ typedef struct { typedef union { #define VHOST_USER_VRING_IDX_MASK (0xff) -#define VHOST_USER_VRING_NOFD_MASK (0x1<<8) +#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) uint64_t u64; struct vhost_vring_state state; struct vhost_vring_addr addr; @@ -247,7 +252,8 @@ struct vhost_user { size_t region_rb_len; /* RAMBlock associated with a given region */ RAMBlock **region_rb; - /* The offset from the start of the RAMBlock to the start of the + /* + * The offset from the start of the RAMBlock to the start of the * vhost region. */ ram_addr_t *region_rb_offset; @@ -280,9 +286,10 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { + int saved_errno = errno; error_report("Failed to read msg header. Read %d instead of %d." " Original request %d.", r, size, msg->hdr.request); - return -1; + return r < 0 ? -saved_errno : -EIO; } /* validate received flags */ @@ -290,33 +297,24 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) error_report("Failed to read msg header." " Flags 0x%x instead of 0x%x.", msg->hdr.flags, VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); - return -1; + return -EPROTO; } + trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); + return 0; } -struct vhost_user_read_cb_data { - struct vhost_dev *dev; - VhostUserMsg *msg; - GMainLoop *loop; - int ret; -}; - -static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, - gpointer opaque) +static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) { - struct vhost_user_read_cb_data *data = opaque; - struct vhost_dev *dev = data->dev; - VhostUserMsg *msg = data->msg; struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; uint8_t *p = (uint8_t *) msg; int r, size; - if (vhost_user_read_header(dev, msg) < 0) { - data->ret = -1; - goto end; + r = vhost_user_read_header(dev, msg); + if (r < 0) { + return r; } /* validate message size is sane */ @@ -324,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, error_report("Failed to read msg header." " Size %d exceeds the maximum %zu.", msg->hdr.size, VHOST_USER_PAYLOAD_SIZE); - data->ret = -1; - goto end; + return -EPROTO; } if (msg->hdr.size) { @@ -333,109 +330,39 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, size = msg->hdr.size; r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { + int saved_errno = errno; error_report("Failed to read msg payload." " Read %d instead of %d.", r, msg->hdr.size); - data->ret = -1; - goto end; + return r < 0 ? -saved_errno : -EIO; } } -end: - g_main_loop_quit(data->loop); - return G_SOURCE_REMOVE; -} - -static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, - gpointer opaque); - -/* - * This updates the read handler to use a new event loop context. - * Event sources are removed from the previous context : this ensures - * that events detected in the previous context are purged. They will - * be re-detected and processed in the new context. - */ -static void slave_update_read_handler(struct vhost_dev *dev, - GMainContext *ctxt) -{ - struct vhost_user *u = dev->opaque; - - if (!u->slave_ioc) { - return; - } - - if (u->slave_src) { - g_source_destroy(u->slave_src); - g_source_unref(u->slave_src); - } - - u->slave_src = qio_channel_add_watch_source(u->slave_ioc, - G_IO_IN | G_IO_HUP, - slave_read, dev, NULL, - ctxt); -} - -static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) -{ - struct vhost_user *u = dev->opaque; - CharBackend *chr = u->user->chr; - GMainContext *prev_ctxt = chr->chr->gcontext; - GMainContext *ctxt = g_main_context_new(); - GMainLoop *loop = g_main_loop_new(ctxt, FALSE); - struct vhost_user_read_cb_data data = { - .dev = dev, - .loop = loop, - .msg = msg, - .ret = 0 - }; - - /* - * We want to be able to monitor the slave channel fd while waiting - * for chr I/O. This requires an event loop, but we can't nest the - * one to which chr is currently attached : its fd handlers might not - * be prepared for re-entrancy. So we create a new one and switch chr - * to use it. - */ - slave_update_read_handler(dev, ctxt); - qemu_chr_be_update_read_handlers(chr->chr, ctxt); - qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data); - - g_main_loop_run(loop); - - /* - * Restore the previous event loop context. This also destroys/recreates - * event sources : this guarantees that all pending events in the original - * context that have been processed by the nested loop are purged. - */ - qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt); - slave_update_read_handler(dev, NULL); - - g_main_loop_unref(loop); - g_main_context_unref(ctxt); - - return data.ret; + return 0; } static int process_message_reply(struct vhost_dev *dev, const VhostUserMsg *msg) { + int ret; VhostUserMsg msg_reply; if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { return 0; } - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } if (msg_reply.hdr.request != msg->hdr.request) { - error_report("Received unexpected msg type." + error_report("Received unexpected msg type. " "Expected %d received %d", msg->hdr.request, msg_reply.hdr.request); - return -1; + return -EPROTO; } - return msg_reply.payload.u64 ? -1 : 0; + return msg_reply.payload.u64 ? -EIO : 0; } static bool vhost_user_one_time_request(VhostUserRequest request) @@ -472,16 +399,19 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { error_report("Failed to set msg fds."); - return -1; + return -EINVAL; } ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); if (ret != size) { + int saved_errno = errno; error_report("Failed to write msg." " Wrote %d instead of %d.", ret, size); - return -1; + return ret < 0 ? -saved_errno : -EIO; } + trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); + return 0; } @@ -502,6 +432,7 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, size_t fd_num = 0; bool shmfd = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_LOG_SHMFD); + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_SET_LOG_BASE, .hdr.flags = VHOST_USER_VERSION, @@ -514,21 +445,23 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, fds[fd_num++] = log->fd; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } if (shmfd) { msg.hdr.size = 0; - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) { error_report("Received unexpected msg type. " "Expected %d received %d", VHOST_USER_SET_LOG_BASE, msg.hdr.request); - return -1; + return -EPROTO; } } @@ -588,7 +521,7 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, u->region_rb[i] = mr->ram_block; } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) { error_report("Failed preparing vhost-user memory table msg"); - return -1; + return -ENOBUFS; } vhost_user_fill_msg_region(®ion_buffer, reg, offset); msg->payload.memory.regions[*fd_num] = region_buffer; @@ -604,14 +537,14 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, if (!*fd_num) { error_report("Failed initializing vhost-user memory map, " "consider using -object memory-backend-file share=on"); - return -1; + return -EINVAL; } msg->hdr.size = sizeof(msg->payload.memory.nregions); msg->hdr.size += sizeof(msg->payload.memory.padding); msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); - return 1; + return 0; } static inline bool reg_equal(struct vhost_memory_region *shadow_reg, @@ -741,8 +674,9 @@ static int send_remove_regions(struct vhost_dev *dev, vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); msg->payload.mem_reg.region = region_buffer; - if (vhost_user_write(dev, msg, &fd, 1) < 0) { - return -1; + ret = vhost_user_write(dev, msg, NULL, 0); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -801,15 +735,17 @@ static int send_add_regions(struct vhost_dev *dev, vhost_user_fill_msg_region(®ion_buffer, reg, offset); msg->payload.mem_reg.region = region_buffer; - if (vhost_user_write(dev, msg, &fd, 1) < 0) { - return -1; + ret = vhost_user_write(dev, msg, &fd, 1); + if (ret < 0) { + return ret; } if (track_ramblocks) { uint64_t reply_gpa; - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr; @@ -819,7 +755,7 @@ static int send_add_regions(struct vhost_dev *dev, "Expected %d received %d", __func__, VHOST_USER_ADD_MEM_REG, msg_reply.hdr.request); - return -1; + return -EPROTO; } /* @@ -830,7 +766,7 @@ static int send_add_regions(struct vhost_dev *dev, error_report("%s: Unexpected size for postcopy reply " "%d vs %d", __func__, msg_reply.hdr.size, msg->hdr.size); - return -1; + return -EPROTO; } /* Get the postcopy client base from the backend's reply. */ @@ -846,7 +782,7 @@ static int send_add_regions(struct vhost_dev *dev, "Got guest physical address %" PRIX64 ", expected " "%" PRIX64, __func__, reply_gpa, dev->mem->regions[reg_idx].guest_phys_addr); - return -1; + return -EPROTO; } } else if (reply_supported) { ret = process_message_reply(dev, msg); @@ -887,6 +823,7 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS]; uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {}; int nr_add_reg, nr_rem_reg; + int ret; msg->hdr.size = sizeof(msg->payload.mem_reg); @@ -894,16 +831,20 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg, shadow_pcb, track_ramblocks); - if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg, - reply_supported) < 0) - { - goto err; + if (nr_rem_reg) { + ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg, + reply_supported); + if (ret < 0) { + goto err; + } } - if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg, - shadow_pcb, reply_supported, track_ramblocks) < 0) - { - goto err; + if (nr_add_reg) { + ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb, + reply_supported, track_ramblocks); + if (ret < 0) { + goto err; + } } if (track_ramblocks) { @@ -918,8 +859,9 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, msg->hdr.size = sizeof(msg->payload.u64); msg->payload.u64 = 0; /* OK */ - if (vhost_user_write(dev, msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, msg, NULL, 0); + if (ret < 0) { + return ret; } } @@ -931,7 +873,7 @@ err: sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); } - return -1; + return ret; } static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, @@ -944,6 +886,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, size_t fd_num = 0; VhostUserMsg msg_reply; int region_i, msg_i; + int ret; VhostUserMsg msg = { .hdr.flags = VHOST_USER_VERSION, @@ -961,29 +904,32 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, } if (config_mem_slots) { - if (vhost_user_add_remove_regions(dev, &msg, reply_supported, - true) < 0) { - return -1; + ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true); + if (ret < 0) { + return ret; } } else { - if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, - true) < 0) { - return -1; + ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + true); + if (ret < 0) { + return ret; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) { error_report("%s: Received unexpected msg type." "Expected %d received %d", __func__, VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request); - return -1; + return -EPROTO; } /* @@ -994,7 +940,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, error_report("%s: Unexpected size for postcopy reply " "%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size); - return -1; + return -EPROTO; } memset(u->postcopy_client_bases, 0, @@ -1024,7 +970,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, error_report("%s: postcopy reply not fully consumed " "%d vs %zd", __func__, msg_i, fd_num); - return -1; + return -EIO; } /* @@ -1035,8 +981,9 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, /* TODO: Use this for failure cases as well with a bad value. */ msg.hdr.size = sizeof(msg.payload.u64); msg.payload.u64 = 0; /* OK */ - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } } @@ -1055,6 +1002,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev, bool config_mem_slots = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS); + int ret; if (do_postcopy) { /* @@ -1074,17 +1022,20 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev, } if (config_mem_slots) { - if (vhost_user_add_remove_regions(dev, &msg, reply_supported, - false) < 0) { - return -1; + ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false); + if (ret < 0) { + return ret; } } else { - if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, - false) < 0) { - return -1; + ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + false); + if (ret < 0) { + return ret; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -1095,23 +1046,6 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev, return 0; } -static int vhost_user_set_vring_addr(struct vhost_dev *dev, - struct vhost_vring_addr *addr) -{ - VhostUserMsg msg = { - .hdr.request = VHOST_USER_SET_VRING_ADDR, - .hdr.flags = VHOST_USER_VERSION, - .payload.addr = *addr, - .hdr.size = sizeof(msg.payload.addr), - }; - - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; -} - static int vhost_user_set_vring_endian(struct vhost_dev *dev, struct vhost_vring_state *ring) { @@ -1126,14 +1060,10 @@ static int vhost_user_set_vring_endian(struct vhost_dev *dev, if (!cross_endian) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_set_vring(struct vhost_dev *dev, @@ -1147,11 +1077,7 @@ static int vhost_set_vring(struct vhost_dev *dev, .hdr.size = sizeof(msg.payload.state), }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_set_vring_num(struct vhost_dev *dev, @@ -1160,37 +1086,34 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); } -static void vhost_user_host_notifier_restore(struct vhost_dev *dev, - int queue_idx) +static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) { - struct vhost_user *u = dev->opaque; - VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; - VirtIODevice *vdev = dev->vdev; - - if (n->addr && !n->set) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true); - n->set = true; - } + assert(n && n->unmap_addr); + munmap(n->unmap_addr, qemu_real_host_page_size()); + n->unmap_addr = NULL; } -static void vhost_user_host_notifier_remove(struct vhost_dev *dev, - int queue_idx) +/* + * clean-up function for notifier, will finally free the structure + * under rcu. + */ +static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n, + VirtIODevice *vdev) { - struct vhost_user *u = dev->opaque; - VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; - VirtIODevice *vdev = dev->vdev; - - if (n->addr && n->set) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); - n->set = false; + if (n->addr) { + if (vdev) { + virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); + } + assert(!n->unmap_addr); + n->unmap_addr = n->addr; + n->addr = NULL; + call_rcu(n, vhost_user_host_notifier_free, rcu); } } static int vhost_user_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - vhost_user_host_notifier_restore(dev, ring->index); - return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); } @@ -1199,50 +1122,75 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) int i; if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { - return -1; + return -EINVAL; } for (i = 0; i < dev->nvqs; ++i) { + int ret; struct vhost_vring_state state = { .index = dev->vq_index + i, .num = enable, }; - vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + if (ret < 0) { + /* + * Restoring the previous state is likely infeasible, as well as + * proceeding regardless the error, so just bail out and hope for + * the device-level recovery. + */ + return ret; + } } return 0; } +static VhostUserHostNotifier *fetch_notifier(VhostUserState *u, + int idx) +{ + if (idx >= u->notifiers->len) { + return NULL; + } + return g_ptr_array_index(u->notifiers, idx); +} + static int vhost_user_get_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_GET_VRING_BASE, .hdr.flags = VHOST_USER_VERSION, .payload.state = *ring, .hdr.size = sizeof(msg.payload.state), }; + struct vhost_user *u = dev->opaque; - vhost_user_host_notifier_remove(dev, ring->index); - - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); + if (n) { + vhost_user_host_notifier_remove(n, dev->vdev); } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; + } + + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) { error_report("Received unexpected msg type. Expected %d received %d", VHOST_USER_GET_VRING_BASE, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.state)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } *ring = msg.payload.state; @@ -1269,11 +1217,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev, msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, fds, fd_num); } static int vhost_user_set_vring_kick(struct vhost_dev *dev, @@ -1288,36 +1232,15 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev, return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); } -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64) +static int vhost_user_set_vring_err(struct vhost_dev *dev, + struct vhost_vring_file *file) { - VhostUserMsg msg = { - .hdr.request = request, - .hdr.flags = VHOST_USER_VERSION, - .payload.u64 = u64, - .hdr.size = sizeof(msg.payload.u64), - }; - - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; -} - -static int vhost_user_set_features(struct vhost_dev *dev, - uint64_t features) -{ - return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features); -} - -static int vhost_user_set_protocol_features(struct vhost_dev *dev, - uint64_t features) -{ - return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features); + return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file); } static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) { + int ret; VhostUserMsg msg = { .hdr.request = request, .hdr.flags = VHOST_USER_VERSION, @@ -1327,23 +1250,25 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) return 0; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != request) { error_report("Received unexpected msg type. Expected %d received %d", request, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.u64)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } *u64 = msg.payload.u64; @@ -1360,6 +1285,165 @@ static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) return 0; } +static int enforce_reply(struct vhost_dev *dev, + const VhostUserMsg *msg) +{ + uint64_t dummy; + + if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { + return process_message_reply(dev, msg); + } + + /* + * We need to wait for a reply but the backend does not + * support replies for the command we just sent. + * Send VHOST_USER_GET_FEATURES which makes all backends + * send a reply. + */ + return vhost_user_get_features(dev, &dummy); +} + +static int vhost_user_set_vring_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + int ret; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_VRING_ADDR, + .hdr.flags = VHOST_USER_VERSION, + .payload.addr = *addr, + .hdr.size = sizeof(msg.payload.addr), + }; + + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + + /* + * wait for a reply if logging is enabled to make sure + * backend is actually logging changes + */ + bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); + + if (reply_supported && wait_for_reply) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; + } + + if (wait_for_reply) { + return enforce_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, + bool wait_for_reply) +{ + VhostUserMsg msg = { + .hdr.request = request, + .hdr.flags = VHOST_USER_VERSION, + .payload.u64 = u64, + .hdr.size = sizeof(msg.payload.u64), + }; + int ret; + + if (wait_for_reply) { + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + } + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; + } + + if (wait_for_reply) { + return enforce_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status) +{ + return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false); +} + +static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status) +{ + uint64_t value; + int ret; + + ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value); + if (ret < 0) { + return ret; + } + *status = value; + + return 0; +} + +static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status) +{ + uint8_t s; + int ret; + + ret = vhost_user_get_status(dev, &s); + if (ret < 0) { + return ret; + } + + if ((s & status) == status) { + return 0; + } + s |= status; + + return vhost_user_set_status(dev, s); +} + +static int vhost_user_set_features(struct vhost_dev *dev, + uint64_t features) +{ + /* + * wait for a reply if logging is enabled to make sure + * backend is actually logging changes + */ + bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL); + int ret; + + /* + * We need to include any extra backend only feature bits that + * might be needed by our device. Currently this includes the + * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol + * features. + */ + ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, + features | dev->backend_features, + log_enabled); + + if (virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_STATUS)) { + if (!ret) { + return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + } + } + + return ret; +} + +static int vhost_user_set_protocol_features(struct vhost_dev *dev, + uint64_t features) +{ + return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features, + false); +} + static int vhost_user_set_owner(struct vhost_dev *dev) { VhostUserMsg msg = { @@ -1367,11 +1451,7 @@ static int vhost_user_set_owner(struct vhost_dev *dev) .hdr.flags = VHOST_USER_VERSION, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -EPROTO; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_get_max_memslots(struct vhost_dev *dev, @@ -1402,26 +1482,44 @@ static int vhost_user_reset_device(struct vhost_dev *dev) ? VHOST_USER_RESET_DEVICE : VHOST_USER_RESET_OWNER; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_slave_handle_config_change(struct vhost_dev *dev) { - int ret = -1; - - if (!dev->config_ops) { - return -1; + if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { + return -ENOSYS; } - if (dev->config_ops->vhost_dev_config_notifier) { - ret = dev->config_ops->vhost_dev_config_notifier(dev); + return dev->config_ops->vhost_dev_config_notifier(dev); +} + +/* + * Fetch or create the notifier for a given idx. Newly created + * notifiers are added to the pointer array that tracks them. + */ +static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u, + int idx) +{ + VhostUserHostNotifier *n = NULL; + if (idx >= u->notifiers->len) { + g_ptr_array_set_size(u->notifiers, idx + 1); } - return ret; + n = g_ptr_array_index(u->notifiers, idx); + if (!n) { + /* + * In case notification arrive out-of-order, + * make room for current index. + */ + g_ptr_array_remove_index(u->notifiers, idx); + n = g_new0(VhostUserHostNotifier, 1); + n->idx = idx; + g_ptr_array_insert(u->notifiers, idx, n); + trace_vhost_user_create_notifier(idx, n); + } + + return n; } static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, @@ -1429,7 +1527,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, int fd) { int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; - size_t page_size = qemu_real_host_page_size; + size_t page_size = qemu_real_host_page_size(); struct vhost_user *u = dev->opaque; VhostUserState *user = u->user; VirtIODevice *vdev = dev->vdev; @@ -1440,17 +1538,15 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, if (!virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) || vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) { - return -1; + return -EINVAL; } - n = &user->notifier[queue_idx]; - - if (n->addr) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); - object_unparent(OBJECT(&n->mr)); - munmap(n->addr, page_size); - n->addr = NULL; - } + /* + * Fetch notifier and invalidate any old data before setting up + * new mapped address. + */ + n = fetch_or_create_notifier(user, queue_idx); + vhost_user_host_notifier_remove(n, vdev); if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { return 0; @@ -1458,28 +1554,32 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, /* Sanity check. */ if (area->size != page_size) { - return -1; + return -EINVAL; } addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, area->offset); if (addr == MAP_FAILED) { - return -1; + return -EFAULT; } name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", user, queue_idx); - memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, - page_size, addr); + if (!n->mr.ram) { /* Don't init again after suspend. */ + memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, + page_size, addr); + } else { + n->mr.ram_block->host = addr; + } g_free(name); if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { + object_unparent(OBJECT(&n->mr)); munmap(addr, page_size); - return -1; + return -ENXIO; } n->addr = addr; - n->set = true; return 0; } @@ -1604,18 +1704,21 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev) return 0; } - if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + int saved_errno = errno; error_report("socketpair() failed"); - return -1; + return -saved_errno; } ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err)); if (!ioc) { error_report_err(local_err); - return -1; + return -ECONNREFUSED; } u->slave_ioc = ioc; - slave_update_read_handler(dev, NULL); + u->slave_src = qio_channel_add_watch_source(u->slave_ioc, + G_IO_IN | G_IO_HUP, + slave_read, dev, NULL, NULL); if (reply_supported) { msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; @@ -1719,37 +1822,40 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; int ufd; + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_POSTCOPY_ADVISE, .hdr.flags = VHOST_USER_VERSION, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_advise to vhost"); - return -1; + return ret; } - if (vhost_user_read(dev, &msg) < 0) { + ret = vhost_user_read(dev, &msg); + if (ret < 0) { error_setg(errp, "Failed to get postcopy_advise reply from vhost"); - return -1; + return ret; } if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) { error_setg(errp, "Unexpected msg type. Expected %d received %d", VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size) { error_setg(errp, "Received bad msg size."); - return -1; + return -EPROTO; } ufd = qemu_chr_fe_get_msgfd(chr); if (ufd < 0) { error_setg(errp, "%s: Failed to get ufd", __func__); - return -1; + return -EIO; } - qemu_set_nonblock(ufd); + qemu_socket_set_nonblock(ufd); /* register ufd with userfault thread */ u->postcopy_fd.fd = ufd; @@ -1761,7 +1867,7 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) return 0; #else error_setg(errp, "Postcopy not supported on non-Linux systems"); - return -1; + return -ENOSYS; #endif } @@ -1777,10 +1883,13 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp) .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, }; u->postcopy_listen = true; + trace_vhost_user_postcopy_listen(); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_listen to vhost"); - return -1; + return ret; } ret = process_message_reply(dev, &msg); @@ -1805,9 +1914,11 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) struct vhost_user *u = dev->opaque; trace_vhost_user_postcopy_end_entry(); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_end to vhost"); - return -1; + return ret; } ret = process_message_reply(dev, &msg); @@ -1863,46 +1974,67 @@ static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, Error **errp) { - uint64_t features, protocol_features, ram_slots; + uint64_t features, ram_slots; struct vhost_user *u; + VhostUserState *vus = (VhostUserState *) opaque; int err; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); u = g_new0(struct vhost_user, 1); - u->user = opaque; + u->user = vus; u->dev = dev; dev->opaque = u; err = vhost_user_get_features(dev, &features); if (err < 0) { + error_setg_errno(errp, -err, "vhost_backend_init failed"); return err; } if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) { + bool supports_f_config = vus->supports_config || + (dev->config_ops && dev->config_ops->vhost_dev_config_notifier); + uint64_t protocol_features; + dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, &protocol_features); if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); return -EPROTO; } - dev->protocol_features = - protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK; + /* + * We will use all the protocol features we support - although + * we suppress F_CONFIG if we know QEMUs internal code can not support + * it. + */ + protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK; - if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { - /* Don't acknowledge CONFIG feature if device doesn't support it */ - dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); - } else if (!(protocol_features & - (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) { - error_setg(errp, "Device expects VHOST_USER_PROTOCOL_F_CONFIG " - "but backend does not support it."); - return -EINVAL; + if (supports_f_config) { + if (!virtio_has_feature(protocol_features, + VHOST_USER_PROTOCOL_F_CONFIG)) { + error_setg(errp, "vhost-user device expecting " + "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does " + "not support it."); + return -EPROTO; + } + } else { + if (virtio_has_feature(protocol_features, + VHOST_USER_PROTOCOL_F_CONFIG)) { + warn_report("vhost-user backend supports " + "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); + protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); + } } + /* final set of protocol features */ + dev->protocol_features = protocol_features; err = vhost_user_set_protocol_features(dev, dev->protocol_features); if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); return -EPROTO; } @@ -1911,6 +2043,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, &dev->max_queues); if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); return -EPROTO; } } else { @@ -1940,6 +2073,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, } else { err = vhost_user_get_max_memslots(dev, &ram_slots); if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); return -EPROTO; } @@ -1966,6 +2100,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, if (dev->vq_index == 0) { err = vhost_setup_slave_channel(dev); if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); return -EPROTO; } } @@ -2050,7 +2185,7 @@ static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) return vhost_user_write(dev, &msg, NULL, 0); } - return -1; + return -ENOTSUP; } static bool vhost_user_can_merge(struct vhost_dev *dev, @@ -2071,6 +2206,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) VhostUserMsg msg; bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); + int ret; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { return 0; @@ -2084,8 +2220,9 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } /* If reply_ack supported, slave has to ack specified MTU is valid */ @@ -2099,6 +2236,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *imsg) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_IOTLB_MSG, .hdr.size = sizeof(msg.payload.iotlb), @@ -2106,8 +2244,9 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, .payload.iotlb = *imsg, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -EFAULT; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } return process_message_reply(dev, &msg); @@ -2122,6 +2261,7 @@ static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, uint32_t config_len, Error **errp) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_GET_CONFIG, .hdr.flags = VHOST_USER_VERSION, @@ -2138,24 +2278,28 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, msg.payload.config.offset = 0; msg.payload.config.size = config_len; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -EPROTO; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_get_config failed"); + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -EPROTO; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_get_config failed"); + return ret; } if (msg.hdr.request != VHOST_USER_GET_CONFIG) { error_setg(errp, "Received unexpected msg type. Expected %d received %d", VHOST_USER_GET_CONFIG, msg.hdr.request); - return -EINVAL; + return -EPROTO; } if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) { error_setg(errp, "Received bad msg size."); - return -EINVAL; + return -EPROTO; } memcpy(config, msg.payload.config.region, config_len); @@ -2166,6 +2310,7 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, uint32_t offset, uint32_t size, uint32_t flags) { + int ret; uint8_t *p; bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); @@ -2178,7 +2323,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, if (!virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CONFIG)) { - return -1; + return -ENOTSUP; } if (reply_supported) { @@ -2186,7 +2331,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, } if (size > VHOST_USER_MAX_CONFIG_SIZE) { - return -1; + return -EINVAL; } msg.payload.config.offset = offset, @@ -2195,8 +2340,9 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, p = msg.payload.config.region; memcpy(p, data, size); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -2210,6 +2356,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, void *session_info, uint64_t *session_id) { + int ret; bool crypto_session = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); CryptoDevBackendSymSessionInfo *sess_info = session_info; @@ -2223,7 +2370,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, if (!crypto_session) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } memcpy(&msg.payload.session.session_setup_data, sess_info, @@ -2236,31 +2383,35 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, memcpy(&msg.payload.session.auth_key, sess_info->auth_key, sess_info->auth_key_len); } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - error_report("vhost_user_write() return -1, create session failed"); - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_report("vhost_user_write() return %d, create session failed", + ret); + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - error_report("vhost_user_read() return -1, create session failed"); - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_report("vhost_user_read() return %d, create session failed", + ret); + return ret; } if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) { error_report("Received unexpected msg type. Expected %d received %d", VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.session)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } if (msg.payload.session.session_id < 0) { error_report("Bad session id: %" PRId64 "", msg.payload.session.session_id); - return -1; + return -EINVAL; } *session_id = msg.payload.session.session_id; @@ -2270,6 +2421,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, static int vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) { + int ret; bool crypto_session = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); VhostUserMsg msg = { @@ -2281,12 +2433,14 @@ vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) if (!crypto_session) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - error_report("vhost_user_write() return -1, close session failed"); - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_report("vhost_user_write() return %d, close session failed", + ret); + return ret; } return 0; @@ -2308,6 +2462,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, { void *addr; int fd; + int ret; struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; VhostUserMsg msg = { @@ -2323,24 +2478,26 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, return 0; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { error_report("Received unexpected msg type. " "Expected %d received %d", VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.inflight)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } if (!msg.payload.inflight.mmap_size) { @@ -2350,7 +2507,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, fd = qemu_chr_fe_get_msgfd(chr); if (fd < 0) { error_report("Failed to get mem fd"); - return -1; + return -EIO; } addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, @@ -2359,7 +2516,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, if (addr == MAP_FAILED) { error_report("Failed to mmap mem fd"); close(fd); - return -1; + return -EFAULT; } inflight->addr = addr; @@ -2389,11 +2546,21 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev, return 0; } - if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) { - return -1; - } + return vhost_user_write(dev, &msg, &inflight->fd, 1); +} - return 0; +static void vhost_user_state_destroy(gpointer data) +{ + VhostUserHostNotifier *n = (VhostUserHostNotifier *) data; + if (n) { + vhost_user_host_notifier_remove(n, NULL); + object_unparent(OBJECT(&n->mr)); + /* + * We can't free until vhost_user_host_notifier_remove has + * done it's thing so schedule the free with RCU. + */ + g_free_rcu(n, rcu); + } } bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) @@ -2404,27 +2571,113 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) } user->chr = chr; user->memory_slots = 0; + user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4, + &vhost_user_state_destroy); return true; } void vhost_user_cleanup(VhostUserState *user) { - int i; - if (!user->chr) { return; } - - for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { - if (user->notifier[i].addr) { - object_unparent(OBJECT(&user->notifier[i].mr)); - munmap(user->notifier[i].addr, qemu_real_host_page_size); - user->notifier[i].addr = NULL; - } - } + memory_region_transaction_begin(); + user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); + memory_region_transaction_commit(); user->chr = NULL; } + +typedef struct { + vu_async_close_fn cb; + DeviceState *dev; + CharBackend *cd; + struct vhost_dev *vhost; +} VhostAsyncCallback; + +static void vhost_user_async_close_bh(void *opaque) +{ + VhostAsyncCallback *data = opaque; + struct vhost_dev *vhost = data->vhost; + + /* + * If the vhost_dev has been cleared in the meantime there is + * nothing left to do as some other path has completed the + * cleanup. + */ + if (vhost->vdev) { + data->cb(data->dev); + } + + g_free(data); +} + +/* + * We only schedule the work if the machine is running. If suspended + * we want to keep all the in-flight data as is for migration + * purposes. + */ +void vhost_user_async_close(DeviceState *d, + CharBackend *chardev, struct vhost_dev *vhost, + vu_async_close_fn cb) +{ + if (!runstate_check(RUN_STATE_SHUTDOWN)) { + /* + * A close event may happen during a read/write, but vhost + * code assumes the vhost_dev remains setup, so delay the + * stop & clear. + */ + AioContext *ctx = qemu_get_current_aio_context(); + VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1); + + /* Save data for the callback */ + data->cb = cb; + data->dev = d; + data->cd = chardev; + data->vhost = vhost; + + /* Disable any further notifications on the chardev */ + qemu_chr_fe_set_handlers(chardev, + NULL, NULL, NULL, NULL, NULL, NULL, + false); + + aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data); + + /* + * Move vhost device to the stopped state. The vhost-user device + * will be clean up and disconnected in BH. This can be useful in + * the vhost migration code. If disconnect was caught there is an + * option for the general vhost code to get the dev state without + * knowing its type (in this case vhost-user). + * + * Note if the vhost device is fully cleared by the time we + * execute the bottom half we won't continue with the cleanup. + */ + vhost->started = false; + } +} + +static int vhost_user_dev_start(struct vhost_dev *dev, bool started) +{ + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_STATUS)) { + return 0; + } + + /* Set device status only for last queue pair */ + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + if (started) { + return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | + VIRTIO_CONFIG_S_DRIVER | + VIRTIO_CONFIG_S_DRIVER_OK); + } else { + return vhost_user_set_status(dev, 0); + } +} + const VhostOps user_ops = { .backend_type = VHOST_BACKEND_TYPE_USER, .vhost_backend_init = vhost_user_backend_init, @@ -2439,6 +2692,7 @@ const VhostOps user_ops = { .vhost_get_vring_base = vhost_user_get_vring_base, .vhost_set_vring_kick = vhost_user_set_vring_kick, .vhost_set_vring_call = vhost_user_set_vring_call, + .vhost_set_vring_err = vhost_user_set_vring_err, .vhost_set_features = vhost_user_set_features, .vhost_get_features = vhost_user_get_features, .vhost_set_owner = vhost_user_set_owner, @@ -2458,4 +2712,5 @@ const VhostOps user_ops = { .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, .vhost_get_inflight_fd = vhost_user_get_inflight_fd, .vhost_set_inflight_fd = vhost_user_set_inflight_fd, + .vhost_dev_start = vhost_user_dev_start, }; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 4fa414feea..03c78d25d8 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -17,30 +17,63 @@ #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-backend.h" #include "hw/virtio/virtio-net.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-vdpa.h" #include "exec/address-spaces.h" +#include "migration/blocker.h" +#include "qemu/cutils.h" #include "qemu/main-loop.h" #include "cpu.h" #include "trace.h" -#include "qemu-common.h" +#include "qapi/error.h" -static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section) +/* + * Return one past the end of the end of section. Be careful with uint64_t + * conversions! + */ +static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) { - return (!memory_region_is_ram(section->mr) && - !memory_region_is_iommu(section->mr)) || - /* vhost-vDPA doesn't allow MMIO to be mapped */ - memory_region_is_ram_device(section->mr) || - /* - * Sizing an enabled 64-bit BAR can cause spurious mappings to - * addresses in the upper part of the 64-bit address space. These - * are never accessed by the CPU and beyond the address width of - * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width. - */ - section->offset_within_address_space & (1ULL << 63); + Int128 llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); + + return llend; } -static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, - void *vaddr, bool readonly) +static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, + uint64_t iova_min, + uint64_t iova_max) +{ + Int128 llend; + + if ((!memory_region_is_ram(section->mr) && + !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || + /* vhost-vDPA doesn't allow MMIO to be mapped */ + memory_region_is_ram_device(section->mr)) { + return true; + } + + if (section->offset_within_address_space < iova_min) { + error_report("RAM section out of device range (min=0x%" PRIx64 + ", addr=0x%" HWADDR_PRIx ")", + iova_min, section->offset_within_address_space); + return true; + } + + llend = vhost_vdpa_section_end(section); + if (int128_gt(llend, int128_make64(iova_max))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + iova_max, int128_get64(llend)); + return true; + } + + return false; +} + +int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; @@ -65,8 +98,7 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, return ret; } -static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, - hwaddr size) +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; @@ -89,26 +121,31 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, return ret; } -static void vhost_vdpa_listener_begin(MemoryListener *listener) +static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); - struct vhost_dev *dev = v->dev; - struct vhost_msg_v2 msg = {}; int fd = v->device_fd; + struct vhost_msg_v2 msg = { + .type = v->msg_type, + .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, + }; - if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { - return; - } - - msg.type = v->msg_type; - msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN; - + trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } } +static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) +{ + if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && + !v->iotlb_batch_begin_sent) { + vhost_vdpa_listener_begin_batch(v); + } + + v->iotlb_batch_begin_sent = true; +} + static void vhost_vdpa_listener_commit(MemoryListener *listener) { struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); @@ -120,25 +157,34 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) return; } + if (!v->iotlb_batch_begin_sent) { + return; + } + msg.type = v->msg_type; msg.iotlb.type = VHOST_IOTLB_BATCH_END; + trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } + + v->iotlb_batch_begin_sent = false; } static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { + DMAMap mem_region = {}; struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); hwaddr iova; Int128 llend, llsize; void *vaddr; int ret; - if (vhost_vdpa_listener_skipped_section(section)) { + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { return; } @@ -149,10 +195,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); - + llend = vhost_vdpa_section_end(section); if (int128_ge(int128_make64(iova), llend)) { return; } @@ -169,16 +212,37 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); + if (v->shadow_vqs_enabled) { + int r; + mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, + mem_region.size = int128_get64(llsize) - 1, + mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), + + r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); + if (unlikely(r != IOVA_OK)) { + error_report("Can't allocate a mapping (%d)", r); + goto fail; + } + + iova = mem_region.iova; + } + + vhost_vdpa_iotlb_batch_begin_once(v); ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vhost vdpa map fail!"); - goto fail; + goto fail_map; } return; +fail_map: + if (v->shadow_vqs_enabled) { + vhost_iova_tree_remove(v->iova_tree, mem_region); + } + fail: /* * On the initfn path, store the first error in the container so we @@ -198,7 +262,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, Int128 llend, llsize; int ret; - if (vhost_vdpa_listener_skipped_section(section)) { + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { return; } @@ -209,9 +274,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); + llend = vhost_vdpa_section_end(section); trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); @@ -221,6 +284,25 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); + if (v->shadow_vqs_enabled) { + const DMAMap *result; + const void *vaddr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (iova - section->offset_within_address_space); + DMAMap mem_region = { + .translated_addr = (hwaddr)(uintptr_t)vaddr, + .size = int128_get64(llsize) - 1, + }; + + result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); + if (!result) { + /* The memory listener map wasn't mapped */ + return; + } + iova = result->iova; + vhost_iova_tree_remove(v->iova_tree, *result); + } + vhost_vdpa_iotlb_batch_begin_once(v); ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa dma unmap error!"); @@ -229,12 +311,12 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, memory_region_unref(section->mr); } /* - * IOTLB API is used by vhost-vpda which requires incremental updating + * IOTLB API is used by vhost-vdpa which requires incremental updating * of the mapping. So we can not use generic vhost memory listener which * depends on the addnop(). */ static const MemoryListener vhost_vdpa_memory_listener = { - .begin = vhost_vdpa_listener_begin, + .name = "vhost-vdpa", .commit = vhost_vdpa_listener_commit, .region_add = vhost_vdpa_listener_region_add, .region_del = vhost_vdpa_listener_region_del, @@ -253,18 +335,112 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, return ret < 0 ? -errno : ret; } -static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) +static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) { uint8_t s; + int ret; trace_vhost_vdpa_add_status(dev, status); - if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { - return; + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); + if (ret < 0) { + return ret; } s |= status; - vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); + ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); + if (ret < 0) { + return ret; + } + + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); + if (ret < 0) { + return ret; + } + + if (!(s & status)) { + return -EIO; + } + + return 0; +} + +static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v) +{ + int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, + &v->iova_range); + if (ret != 0) { + v->iova_range.first = 0; + v->iova_range.last = UINT64_MAX; + } + + trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, + v->iova_range.last); +} + +/* + * The use of this function is for requests that only need to be + * applied once. Typically such request occurs at the beginning + * of operation, and before setting up queues. It should not be + * used for request that performs operation until all queues are + * set, which would need to check dev->vq_index_end instead. + */ +static bool vhost_vdpa_first_dev(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + return v->index == 0; +} + +static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, + uint64_t *features) +{ + int ret; + + ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); + trace_vhost_vdpa_get_features(dev, *features); + return ret; +} + +static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, + Error **errp) +{ + g_autoptr(GPtrArray) shadow_vqs = NULL; + uint64_t dev_features, svq_features; + int r; + bool ok; + + if (!v->shadow_vqs_enabled) { + return 0; + } + + r = vhost_vdpa_get_dev_features(hdev, &dev_features); + if (r != 0) { + error_setg_errno(errp, -r, "Can't get vdpa device features"); + return r; + } + + svq_features = dev_features; + ok = vhost_svq_valid_features(svq_features, errp); + if (unlikely(!ok)) { + return -1; + } + + shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); + for (unsigned n = 0; n < hdev->nvqs; ++n) { + g_autoptr(VhostShadowVirtqueue) svq; + + svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops, + v->shadow_vq_ops_opaque); + if (unlikely(!svq)) { + error_setg(errp, "Cannot create svq %u", n); + return -1; + } + g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq)); + } + + v->shadow_vqs = g_steal_pointer(&shadow_vqs); + return 0; } static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) @@ -272,23 +448,48 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) struct vhost_vdpa *v; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); trace_vhost_vdpa_init(dev, opaque); + int ret; + + /* + * Similar to VFIO, we end up pinning all guest memory and have to + * disable discarding of RAM. + */ + ret = ram_block_discard_disable(true); + if (ret) { + error_report("Cannot set discarding of RAM broken"); + return ret; + } v = opaque; v->dev = dev; dev->opaque = opaque ; v->listener = vhost_vdpa_memory_listener; v->msg_type = VHOST_IOTLB_MSG_V2; + ret = vhost_vdpa_init_svq(dev, v, errp); + if (ret) { + goto err; + } + + vhost_vdpa_get_iova_range(v); + + if (!vhost_vdpa_first_dev(dev)) { + return 0; + } vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); return 0; + +err: + ram_block_discard_disable(false); + return ret; } static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, int queue_index) { - size_t page_size = qemu_real_host_page_size; + size_t page_size = qemu_real_host_page_size(); struct vhost_vdpa *v = dev->opaque; VirtIODevice *vdev = dev->vdev; VhostVDPAHostNotifier *n; @@ -303,18 +504,9 @@ static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, } } -static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) -{ - int i; - - for (i = 0; i < n; i++) { - vhost_vdpa_host_notifier_uninit(dev, i); - } -} - static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) { - size_t page_size = qemu_real_host_page_size; + size_t page_size = qemu_real_host_page_size(); struct vhost_vdpa *v = dev->opaque; VirtIODevice *vdev = dev->vdev; VhostVDPAHostNotifier *n; @@ -339,6 +531,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) g_free(name); if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { + object_unparent(OBJECT(&n->mr)); munmap(addr, page_size); goto err; } @@ -350,10 +543,25 @@ err: return -1; } -static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) +static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) { int i; + for (i = dev->vq_index; i < dev->vq_index + n; i++) { + vhost_vdpa_host_notifier_uninit(dev, i); + } +} + +static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + int i; + + if (v->shadow_vqs_enabled) { + /* FIXME SVQ is not compatible with host notifiers mr */ + return; + } + for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { if (vhost_vdpa_host_notifier_init(dev, i)) { goto err; @@ -363,10 +571,25 @@ static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) return; err: - vhost_vdpa_host_notifiers_uninit(dev, i); + vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); return; } +static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + size_t idx; + + if (!v->shadow_vqs) { + return; + } + + for (idx = 0; idx < v->shadow_vqs->len; ++idx) { + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); + } + g_ptr_array_free(v->shadow_vqs, true); +} + static int vhost_vdpa_cleanup(struct vhost_dev *dev) { struct vhost_vdpa *v; @@ -375,8 +598,11 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) trace_vhost_vdpa_cleanup(dev, v); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); memory_listener_unregister(&v->listener); + vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; + ram_block_discard_disable(false); + return 0; } @@ -389,6 +615,10 @@ static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, struct vhost_memory *mem) { + if (!vhost_vdpa_first_dev(dev)) { + return 0; + } + trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { @@ -402,7 +632,7 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, } } if (mem->padding) { - return -1; + return -EINVAL; } return 0; @@ -411,17 +641,36 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, static int vhost_vdpa_set_features(struct vhost_dev *dev, uint64_t features) { + struct vhost_vdpa *v = dev->opaque; int ret; + + if (!vhost_vdpa_first_dev(dev)) { + return 0; + } + + if (v->shadow_vqs_enabled) { + if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { + /* + * QEMU is just trying to enable or disable logging. SVQ handles + * this sepparately, so no need to forward this. + */ + v->acked_features = features; + return 0; + } + + v->acked_features = features; + + /* We must not ack _F_LOG if SVQ is enabled */ + features &= ~BIT_ULL(VHOST_F_LOG_ALL); + } + trace_vhost_vdpa_set_features(dev, features); ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); - uint8_t status = 0; if (ret) { return ret; } - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); - vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); - return !(status & VIRTIO_CONFIG_S_FEATURES_OK); + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); } static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) @@ -432,13 +681,16 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) int r; if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { - return 0; + return -EFAULT; } features &= f; - r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); - if (r) { - return 0; + + if (vhost_vdpa_first_dev(dev)) { + r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); + if (r) { + return -EFAULT; + } } dev->backend_cap = features; @@ -469,8 +721,8 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) { assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); - trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index); - return idx - dev->vq_index; + trace_vhost_vdpa_get_vq_index(dev, idx, idx); + return idx; } static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) @@ -543,24 +795,319 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, return ret; } +static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); + return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); +} + +static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); +} + +static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); +} + +static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, + addr->desc_user_addr, addr->used_user_addr, + addr->avail_user_addr, + addr->log_guest_addr); + + return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); + +} + +/** + * Set the shadow virtqueue descriptors to the device + * + * @dev: The vhost device model + * @svq: The shadow virtqueue + * @idx: The index of the virtqueue in the vhost device + * @errp: Error + * + * Note that this function does not rewind kick file descriptor if cannot set + * call one. + */ +static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) +{ + struct vhost_vring_file file = { + .index = dev->vq_index + idx, + }; + const EventNotifier *event_notifier = &svq->hdev_kick; + int r; + + file.fd = event_notifier_get_fd(event_notifier); + r = vhost_vdpa_set_vring_dev_kick(dev, &file); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Can't set device kick fd"); + return r; + } + + event_notifier = &svq->hdev_call; + file.fd = event_notifier_get_fd(event_notifier); + r = vhost_vdpa_set_vring_dev_call(dev, &file); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Can't set device call fd"); + } + + return r; +} + +/** + * Unmap a SVQ area in the device + */ +static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) +{ + const DMAMap needle = { + .translated_addr = addr, + }; + const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); + hwaddr size; + int r; + + if (unlikely(!result)) { + error_report("Unable to find SVQ address to unmap"); + return; + } + + size = ROUND_UP(result->size, qemu_real_host_page_size()); + r = vhost_vdpa_dma_unmap(v, result->iova, size); + if (unlikely(r < 0)) { + error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); + return; + } + + vhost_iova_tree_remove(v->iova_tree, *result); +} + +static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq) +{ + struct vhost_vdpa *v = dev->opaque; + struct vhost_vring_addr svq_addr; + + vhost_svq_get_vring_addr(svq, &svq_addr); + + vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); + + vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); +} + +/** + * Map the SVQ area in the device + * + * @v: Vhost-vdpa device + * @needle: The area to search iova + * @errorp: Error pointer + */ +static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, + Error **errp) +{ + int r; + + r = vhost_iova_tree_map_alloc(v->iova_tree, needle); + if (unlikely(r != IOVA_OK)) { + error_setg(errp, "Cannot allocate iova (%d)", r); + return false; + } + + r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1, + (void *)(uintptr_t)needle->translated_addr, + needle->perm == IOMMU_RO); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Cannot map region to device"); + vhost_iova_tree_remove(v->iova_tree, *needle); + } + + return r == 0; +} + +/** + * Map the shadow virtqueue rings in the device + * + * @dev: The vhost device + * @svq: The shadow virtqueue + * @addr: Assigned IOVA addresses + * @errp: Error pointer + */ +static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr, + Error **errp) +{ + DMAMap device_region, driver_region; + struct vhost_vring_addr svq_addr; + struct vhost_vdpa *v = dev->opaque; + size_t device_size = vhost_svq_device_area_size(svq); + size_t driver_size = vhost_svq_driver_area_size(svq); + size_t avail_offset; + bool ok; + + ERRP_GUARD(); + vhost_svq_get_vring_addr(svq, &svq_addr); + + driver_region = (DMAMap) { + .translated_addr = svq_addr.desc_user_addr, + .size = driver_size - 1, + .perm = IOMMU_RO, + }; + ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); + if (unlikely(!ok)) { + error_prepend(errp, "Cannot create vq driver region: "); + return false; + } + addr->desc_user_addr = driver_region.iova; + avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; + addr->avail_user_addr = driver_region.iova + avail_offset; + + device_region = (DMAMap) { + .translated_addr = svq_addr.used_user_addr, + .size = device_size - 1, + .perm = IOMMU_RW, + }; + ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); + if (unlikely(!ok)) { + error_prepend(errp, "Cannot create vq device region: "); + vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); + } + addr->used_user_addr = device_region.iova; + + return ok; +} + +static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) +{ + uint16_t vq_index = dev->vq_index + idx; + struct vhost_vring_state s = { + .index = vq_index, + }; + int r; + + r = vhost_vdpa_set_dev_vring_base(dev, &s); + if (unlikely(r)) { + error_setg_errno(errp, -r, "Cannot set vring base"); + return false; + } + + r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); + return r == 0; +} + +static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + Error *err = NULL; + unsigned i; + + if (!v->shadow_vqs) { + return true; + } + + for (i = 0; i < v->shadow_vqs->len; ++i) { + VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + struct vhost_vring_addr addr = { + .index = dev->vq_index + i, + }; + int r; + bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); + if (unlikely(!ok)) { + goto err; + } + + vhost_svq_start(svq, dev->vdev, vq); + ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); + if (unlikely(!ok)) { + goto err_map; + } + + /* Override vring GPA set by vhost subsystem */ + r = vhost_vdpa_set_vring_dev_addr(dev, &addr); + if (unlikely(r != 0)) { + error_setg_errno(&err, -r, "Cannot set device address"); + goto err_set_addr; + } + } + + return true; + +err_set_addr: + vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); + +err_map: + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); + +err: + error_reportf_err(err, "Cannot setup SVQ %u: ", i); + for (unsigned j = 0; j < i; ++j) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); + vhost_vdpa_svq_unmap_rings(dev, svq); + vhost_svq_stop(svq); + } + + return false; +} + +static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + if (!v->shadow_vqs) { + return; + } + + for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + + vhost_svq_stop(svq); + vhost_vdpa_svq_unmap_rings(dev, svq); + } +} + static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) { struct vhost_vdpa *v = dev->opaque; + bool ok; trace_vhost_vdpa_dev_start(dev, started); - if (started) { - uint8_t status = 0; - memory_listener_register(&v->listener, &address_space_memory); - vhost_vdpa_host_notifiers_init(dev); - vhost_vdpa_set_vring_ready(dev); - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); - return !(status & VIRTIO_CONFIG_S_DRIVER_OK); + if (started) { + vhost_vdpa_host_notifiers_init(dev); + ok = vhost_vdpa_svqs_start(dev); + if (unlikely(!ok)) { + return -1; + } + vhost_vdpa_set_vring_ready(dev); + } else { + vhost_vdpa_svqs_stop(dev); + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); + } + + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + if (started) { + memory_listener_register(&v->listener, &address_space_memory); + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } else { vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); memory_listener_unregister(&v->listener); return 0; @@ -570,6 +1117,11 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, struct vhost_log *log) { + struct vhost_vdpa *v = dev->opaque; + if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { + return 0; + } + trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, log->log); return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); @@ -578,11 +1130,17 @@ static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, struct vhost_vring_addr *addr) { - trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, - addr->desc_user_addr, addr->used_user_addr, - addr->avail_user_addr, - addr->log_guest_addr); - return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); + struct vhost_vdpa *v = dev->opaque; + + if (v->shadow_vqs_enabled) { + /* + * Device vring addr was set at device start. SVQ base is handled by + * VirtQueue code. + */ + return 0; + } + + return vhost_vdpa_set_vring_dev_addr(dev, addr); } static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, @@ -595,15 +1153,41 @@ static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); - return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); + struct vhost_vdpa *v = dev->opaque; + VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index); + + /* + * vhost-vdpa devices does not support in-flight requests. Set all of them + * as available. + * + * TODO: This is ok for networking, but other kinds of devices might + * have problems with these retransmissions. + */ + while (virtqueue_rewind(vq, 1)) { + continue; + } + if (v->shadow_vqs_enabled) { + /* + * Device vring base was set at device start. SVQ base is handled by + * VirtQueue code. + */ + return 0; + } + + return vhost_vdpa_set_dev_vring_base(dev, ring); } static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { + struct vhost_vdpa *v = dev->opaque; int ret; + if (v->shadow_vqs_enabled) { + ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); + return 0; + } + ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); return ret; @@ -612,29 +1196,54 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, struct vhost_vring_file *file) { - trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); - return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); + struct vhost_vdpa *v = dev->opaque; + int vdpa_idx = file->index - dev->vq_index; + + if (v->shadow_vqs_enabled) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); + vhost_svq_set_svq_kick_fd(svq, file->fd); + return 0; + } else { + return vhost_vdpa_set_vring_dev_kick(dev, file); + } } static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, struct vhost_vring_file *file) { - trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); - return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); + struct vhost_vdpa *v = dev->opaque; + + if (v->shadow_vqs_enabled) { + int vdpa_idx = file->index - dev->vq_index; + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); + + vhost_svq_set_svq_call_fd(svq, file->fd); + return 0; + } else { + return vhost_vdpa_set_vring_dev_call(dev, file); + } } static int vhost_vdpa_get_features(struct vhost_dev *dev, uint64_t *features) { - int ret; + struct vhost_vdpa *v = dev->opaque; + int ret = vhost_vdpa_get_dev_features(dev, features); + + if (ret == 0 && v->shadow_vqs_enabled) { + /* Add SVQ logging capabilities */ + *features |= BIT_ULL(VHOST_F_LOG_ALL); + } - ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); - trace_vhost_vdpa_get_features(dev, *features); return ret; } static int vhost_vdpa_set_owner(struct vhost_dev *dev) { + if (!vhost_vdpa_first_dev(dev)) { + return 0; + } + trace_vhost_vdpa_set_owner(dev); return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); } diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c index 4ad6e234ad..d21c72b401 100644 --- a/hw/virtio/vhost-vsock-common.c +++ b/hw/virtio/vhost-vsock-common.c @@ -14,10 +14,36 @@ #include "hw/virtio/virtio-access.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" +#include "hw/virtio/vhost.h" #include "hw/virtio/vhost-vsock.h" #include "qemu/iov.h" #include "monitor/monitor.h" +const int feature_bits[] = { + VIRTIO_VSOCK_F_SEQPACKET, + VIRTIO_F_RING_RESET, + VHOST_INVALID_FEATURE_BIT +}; + +uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + if (vvc->seqpacket != ON_OFF_AUTO_OFF) { + virtio_add_feature(&features, VIRTIO_VSOCK_F_SEQPACKET); + } + + features = vhost_get_features(&vvc->vhost_dev, feature_bits, features); + + if (vvc->seqpacket == ON_OFF_AUTO_ON && + !virtio_has_feature(features, VIRTIO_VSOCK_F_SEQPACKET)) { + error_setg(errp, "vhost-vsock backend doesn't support seqpacket"); + } + + return features; +} + int vhost_vsock_common_start(VirtIODevice *vdev) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); @@ -44,7 +70,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev) } vvc->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&vvc->vhost_dev, vdev); + ret = vhost_dev_start(&vvc->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost: %d", -ret); goto err_guest_notifiers; @@ -79,7 +105,7 @@ void vhost_vsock_common_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&vvc->vhost_dev, vdev); + vhost_dev_stop(&vvc->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false); if (ret < 0) { @@ -129,19 +155,23 @@ static void vhost_vsock_common_send_transport_reset(VHostVSockCommon *vvc) if (elem->out_num) { error_report("invalid vhost-vsock event virtqueue element with " "out buffers"); - goto out; + goto err; } if (iov_from_buf(elem->in_sg, elem->in_num, 0, &event, sizeof(event)) != sizeof(event)) { error_report("vhost-vsock event virtqueue element is too short"); - goto out; + goto err; } virtqueue_push(vq, elem, sizeof(event)); virtio_notify(VIRTIO_DEVICE(vvc), vq); -out: + g_free(elem); + return; + +err: + virtqueue_detach_element(vq, elem, 0); g_free(elem); } @@ -171,7 +201,7 @@ int vhost_vsock_common_pre_save(void *opaque) * At this point, backend must be stopped, otherwise * it might keep writing to memory. */ - assert(!vvc->vhost_dev.started); + assert(!vhost_dev_is_started(&vvc->vhost_dev)); return 0; } @@ -196,12 +226,11 @@ int vhost_vsock_common_post_load(void *opaque, int version_id) return 0; } -void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name) +void vhost_vsock_common_realize(VirtIODevice *vdev) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - virtio_init(vdev, name, VIRTIO_ID_VSOCK, - sizeof(struct virtio_vsock_config)); + virtio_init(vdev, VIRTIO_ID_VSOCK, sizeof(struct virtio_vsock_config)); /* Receive and transmit queues belong to vhost */ vvc->recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, @@ -231,14 +260,28 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev) virtio_cleanup(vdev); } +static struct vhost_dev *vhost_vsock_common_get_vhost(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + return &vvc->vhost_dev; +} + +static Property vhost_vsock_common_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + static void vhost_vsock_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + device_class_set_props(dc, vhost_vsock_common_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->guest_notifier_mask = vhost_vsock_common_guest_notifier_mask; vdc->guest_notifier_pending = vhost_vsock_common_guest_notifier_pending; + vdc->get_vhost = vhost_vsock_common_get_vhost; } static const TypeInfo vhost_vsock_common_info = { diff --git a/hw/virtio/vhost-vsock-pci.c b/hw/virtio/vhost-vsock-pci.c index 205da8d1f5..9f34414d38 100644 --- a/hw/virtio/vhost-vsock-pci.c +++ b/hw/virtio/vhost-vsock-pci.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/qdev-properties.h" #include "hw/virtio/vhost-vsock.h" #include "qemu/module.h" diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 1b1a5c70ed..aa16d584ee 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -21,11 +21,6 @@ #include "hw/virtio/vhost-vsock.h" #include "monitor/monitor.h" -const int feature_bits[] = { - VIRTIO_VSOCK_F_SEQPACKET, - VHOST_INVALID_FEATURE_BIT -}; - static void vhost_vsock_get_config(VirtIODevice *vdev, uint8_t *config) { VHostVSock *vsock = VHOST_VSOCK(vdev); @@ -75,14 +70,10 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start) static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_should_start(vdev, status); int ret; - if (!vdev->vm_running) { - should_start = false; - } - - if (vvc->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { return; } @@ -113,11 +104,7 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev, uint64_t requested_features, Error **errp) { - VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - - virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET); - return vhost_get_features(&vvc->vhost_dev, feature_bits, - requested_features); + return vhost_vsock_common_get_features(vdev, requested_features, errp); } static const VMStateDescription vmstate_virtio_vhost_vsock = { @@ -158,9 +145,8 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; } - ret = qemu_try_set_nonblock(vhostfd); - if (ret < 0) { - error_setg_errno(errp, -ret, + if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { + error_setg_errno(errp, errno, "vhost-vsock: unable to set non-blocking mode"); return; } @@ -172,14 +158,22 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; } - qemu_set_nonblock(vhostfd); + if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { + error_setg_errno(errp, errno, + "Failed to set FD nonblocking"); + return; + } } - vhost_vsock_common_realize(vdev, "vhost-vsock"); + vhost_vsock_common_realize(vdev); ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0, errp); if (ret < 0) { + /* + * vhostfd is closed by vhost_dev_cleanup, which is called + * by vhost_dev_init on initialization error. + */ goto err_virtio; } @@ -192,15 +186,10 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; err_vhost_dev: - vhost_dev_cleanup(&vvc->vhost_dev); /* vhost_dev_cleanup() closes the vhostfd passed to vhost_dev_init() */ - vhostfd = -1; + vhost_dev_cleanup(&vvc->vhost_dev); err_virtio: vhost_vsock_common_unrealize(vdev); - if (vhostfd >= 0) { - close(vhostfd); - } - return; } static void vhost_vsock_device_unrealize(DeviceState *dev) diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index e8f85a5d2d..f38997b3f6 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -20,24 +20,26 @@ #include "qemu/range.h" #include "qemu/error-report.h" #include "qemu/memfd.h" +#include "qemu/log.h" #include "standard-headers/linux/vhost_types.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "migration/blocker.h" #include "migration/qemu-file-types.h" #include "sysemu/dma.h" -#include "sysemu/tcg.h" #include "trace.h" /* enabled until disconnected backend stabilizes */ #define _VHOST_DEBUG 1 #ifdef _VHOST_DEBUG -#define VHOST_OPS_DEBUG(fmt, ...) \ - do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ - strerror(errno), errno); } while (0) +#define VHOST_OPS_DEBUG(retval, fmt, ...) \ + do { \ + error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ + strerror(-retval), -retval); \ + } while (0) #else -#define VHOST_OPS_DEBUG(fmt, ...) \ +#define VHOST_OPS_DEBUG(retval, fmt, ...) \ do { } while (0) #endif @@ -105,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } } +static bool vhost_dev_has_iommu(struct vhost_dev *dev) +{ + VirtIODevice *vdev = dev->vdev; + + /* + * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support + * incremental memory mapping API via IOTLB API. For platform that + * does not have IOMMU, there's no need to enable this feature + * which may cause unnecessary IOTLB miss/update transactions. + */ + if (vdev) { + return virtio_bus_device_iommu_enabled(vdev) && + virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + } else { + return false; + } +} + static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, MemoryRegionSection *section, hwaddr first, @@ -136,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, continue; } - vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, - range_get_last(vq->used_phys, vq->used_size)); + if (vhost_dev_has_iommu(dev)) { + IOMMUTLBEntry iotlb; + hwaddr used_phys = vq->used_phys, used_size = vq->used_size; + hwaddr phys, s, offset; + + while (used_size) { + rcu_read_lock(); + iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, + used_phys, + true, + MEMTXATTRS_UNSPECIFIED); + rcu_read_unlock(); + + if (!iotlb.target_as) { + qemu_log_mask(LOG_GUEST_ERROR, "translation " + "failure for used_iova %"PRIx64"\n", + used_phys); + return -EINVAL; + } + + offset = used_phys & iotlb.addr_mask; + phys = iotlb.translated_addr + offset; + + /* + * Distance from start of used ring until last byte of + * IOMMU page. + */ + s = iotlb.addr_mask - offset; + /* + * Size of used ring, or of the part of it until end + * of IOMMU page. To avoid zero result, do the adding + * outside of MIN(). + */ + s = MIN(s, used_size - 1) + 1; + + vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, + range_get_last(phys, s)); + used_size -= s; + used_phys += s; + } + } else { + vhost_dev_sync_region(dev, section, start_addr, + end_addr, vq->used_phys, + range_get_last(vq->used_phys, vq->used_size)); + } } return 0; } @@ -174,6 +237,35 @@ static uint64_t vhost_get_log_size(struct vhost_dev *dev) return log_size; } +static int vhost_set_backend_type(struct vhost_dev *dev, + VhostBackendType backend_type) +{ + int r = 0; + + switch (backend_type) { +#ifdef CONFIG_VHOST_KERNEL + case VHOST_BACKEND_TYPE_KERNEL: + dev->vhost_ops = &kernel_ops; + break; +#endif +#ifdef CONFIG_VHOST_USER + case VHOST_BACKEND_TYPE_USER: + dev->vhost_ops = &user_ops; + break; +#endif +#ifdef CONFIG_VHOST_VDPA + case VHOST_BACKEND_TYPE_VDPA: + dev->vhost_ops = &vdpa_ops; + break; +#endif + default: + error_report("Unknown vhost backend type"); + r = -1; + } + + return r; +} + static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) { Error *err = NULL; @@ -268,7 +360,7 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) releasing the current log, to ensure no logging is lost */ r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_log_base failed"); + VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); } vhost_log_put(dev, true); @@ -276,20 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) dev->log_size = size; } -static int vhost_dev_has_iommu(struct vhost_dev *dev) -{ - VirtIODevice *vdev = dev->vdev; - - /* - * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support - * incremental memory mapping API via IOTLB API. For platform that - * does not have IOMMU, there's no need to enable this feature - * which may cause unnecessary IOTLB miss/update trnasactions. - */ - return vdev->dma_as != &address_space_memory && - virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); -} - static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, hwaddr *plen, bool is_write) { @@ -521,7 +599,7 @@ static void vhost_commit(MemoryListener *listener) if (!dev->log_enabled) { r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); } goto out; } @@ -535,7 +613,7 @@ static void vhost_commit(MemoryListener *listener) } r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); } /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { @@ -774,8 +852,8 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, if (dev->vhost_ops->vhost_vq_get_addr) { r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); if (r < 0) { - VHOST_OPS_DEBUG("vhost_vq_get_addr failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed"); + return r; } } else { addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; @@ -787,10 +865,9 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed"); } - return 0; + return r; } static int vhost_dev_set_features(struct vhost_dev *dev, @@ -811,19 +888,19 @@ static int vhost_dev_set_features(struct vhost_dev *dev, } r = dev->vhost_ops->vhost_set_features(dev, features); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_features failed"); + VHOST_OPS_DEBUG(r, "vhost_set_features failed"); goto out; } if (dev->vhost_ops->vhost_set_backend_cap) { r = dev->vhost_ops->vhost_set_backend_cap(dev); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_backend_cap failed"); + VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed"); goto out; } } out: - return r < 0 ? -errno : 0; + return r; } static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) @@ -857,6 +934,10 @@ static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) err_vq: for (; i >= 0; --i) { idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); + addr = virtio_queue_get_desc_addr(dev->vdev, idx); + if (!addr) { + continue; + } vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, dev->log_enabled); } @@ -959,7 +1040,7 @@ static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { return false; } -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; #else return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; @@ -970,22 +1051,17 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, bool is_big_endian, int vhost_vq_index) { + int r; struct vhost_vring_state s = { .index = vhost_vq_index, .num = is_big_endian }; - if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { - return 0; + r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed"); } - - VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); - if (errno == ENOTTY) { - error_report("vhost does not support cross-endian"); - return -ENOSYS; - } - - return -errno; + return r; } static int vhost_memory_region_lookup(struct vhost_dev *hdev, @@ -1049,10 +1125,10 @@ out: return ret; } -static int vhost_virtqueue_start(struct vhost_dev *dev, - struct VirtIODevice *vdev, - struct vhost_virtqueue *vq, - unsigned idx) +int vhost_virtqueue_start(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); @@ -1077,15 +1153,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, vq->num = state.num = virtio_queue_get_num(vdev, idx); r = dev->vhost_ops->vhost_set_vring_num(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_num failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed"); + return r; } state.num = virtio_queue_get_last_avail_idx(vdev, idx); r = dev->vhost_ops->vhost_set_vring_base(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_base failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed"); + return r; } if (vhost_needs_vring_endian(vdev)) { @@ -1093,7 +1169,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, virtio_is_big_endian(vdev), vhost_vq_index); if (r) { - return -errno; + return r; } } @@ -1121,15 +1197,13 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); if (r < 0) { - r = -errno; goto fail_alloc; } file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed"); goto fail_kick; } @@ -1171,10 +1245,10 @@ fail_alloc_desc: return r; } -static void vhost_virtqueue_stop(struct vhost_dev *dev, - struct VirtIODevice *vdev, - struct vhost_virtqueue *vq, - unsigned idx) +void vhost_virtqueue_stop(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx) { int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); struct vhost_vring_state state = { @@ -1189,7 +1263,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev, r = dev->vhost_ops->vhost_get_vring_base(dev, &state); if (r < 0) { - VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r); + VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); /* Connection to the backend is broken, so let's sync internal * last avail idx to the device used idx. */ @@ -1245,13 +1319,26 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); + VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed"); return r; } return 0; } +static void vhost_virtqueue_error_notifier(EventNotifier *n) +{ + struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue, + error_notifier); + struct vhost_dev *dev = vq->dev; + int index = vq - dev->vqs; + + if (event_notifier_test_and_clear(n) && dev->vdev) { + VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", + dev->vq_index + index); + } +} + static int vhost_virtqueue_init(struct vhost_dev *dev, struct vhost_virtqueue *vq, int n) { @@ -1264,17 +1351,36 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, return r; } - file.fd = event_notifier_get_fd(&vq->masked_notifier); + file.fd = event_notifier_get_wfd(&vq->masked_notifier); r = dev->vhost_ops->vhost_set_vring_call(dev, &file); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_call failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); goto fail_call; } vq->dev = dev; + if (dev->vhost_ops->vhost_set_vring_err) { + r = event_notifier_init(&vq->error_notifier, 0); + if (r < 0) { + goto fail_call; + } + + file.fd = event_notifier_get_fd(&vq->error_notifier); + r = dev->vhost_ops->vhost_set_vring_err(dev, &file); + if (r) { + VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed"); + goto fail_err; + } + + event_notifier_set_handler(&vq->error_notifier, + vhost_virtqueue_error_notifier); + } + return 0; + +fail_err: + event_notifier_cleanup(&vq->error_notifier); fail_call: event_notifier_cleanup(&vq->masked_notifier); return r; @@ -1283,13 +1389,16 @@ fail_call: static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) { event_notifier_cleanup(&vq->masked_notifier); + if (vq->dev->vhost_ops->vhost_set_vring_err) { + event_notifier_set_handler(&vq->error_notifier, NULL); + event_notifier_cleanup(&vq->error_notifier); + } } int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout, Error **errp) { - ERRP_GUARD(); uint64_t features; int i, r, n_initialized_vqs = 0; @@ -1301,9 +1410,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); if (r < 0) { - if (!*errp) { - error_setg_errno(errp, -r, "vhost_backend_init failed"); - } goto fail; } @@ -1341,6 +1447,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->features = features; hdev->memory_listener = (MemoryListener) { + .name = "vhost", .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_addnop, @@ -1356,6 +1463,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, }; hdev->iommu_listener = (MemoryListener) { + .name = "vhost-iommu", .region_add = vhost_iommu_region_add, .region_del = vhost_iommu_region_del, }; @@ -1372,7 +1480,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, if (hdev->migration_blocker != NULL) { r = migrate_add_blocker(hdev->migration_blocker, errp); - if (*errp) { + if (r < 0) { error_free(hdev->migration_blocker); goto fail_busyloop; } @@ -1413,6 +1521,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) { int i; + trace_vhost_dev_cleanup(hdev); + for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_cleanup(hdev->vqs + i); } @@ -1522,15 +1632,15 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, if (mask) { assert(vdev->use_guest_notifier_mask); - file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); + file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); } else { - file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); + file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq)); } file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_vring_call failed"); + VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); } } @@ -1564,21 +1674,15 @@ void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, uint32_t config_len, Error **errp) { - ERRP_GUARD(); - int ret; - assert(hdev->vhost_ops); if (hdev->vhost_ops->vhost_get_config) { - ret = hdev->vhost_ops->vhost_get_config(hdev, config, config_len, errp); - if (ret < 0 && !*errp) { - error_setg_errno(errp, -ret, "vhost_get_config failed"); - } - return ret; + return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, + errp); } error_setg(errp, "vhost_get_config not implemented"); - return -ENOTSUP; + return -ENOSYS; } int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, @@ -1591,7 +1695,7 @@ int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, size, flags); } - return -1; + return -ENOSYS; } void vhost_dev_set_config_notifier(struct vhost_dev *hdev, @@ -1620,7 +1724,7 @@ static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, if (err) { error_report_err(err); - return -1; + return -ENOMEM; } vhost_dev_free_inflight(inflight); @@ -1653,8 +1757,9 @@ int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) } if (inflight->size != size) { - if (vhost_dev_resize_inflight(inflight, size)) { - return -1; + int ret = vhost_dev_resize_inflight(inflight, size); + if (ret < 0) { + return ret; } } inflight->queue_size = qemu_get_be16(f); @@ -1677,7 +1782,7 @@ int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) r = vhost_dev_set_features(hdev, hdev->log_enabled); if (r < 0) { - VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed"); + VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed"); return r; } @@ -1692,8 +1797,8 @@ int vhost_dev_set_inflight(struct vhost_dev *dev, if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); if (r) { - VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed"); + return r; } } @@ -1708,22 +1813,46 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, if (dev->vhost_ops->vhost_get_inflight_fd) { r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); if (r) { - VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed"); + return r; } } return 0; } +static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) +{ + if (!hdev->vhost_ops->vhost_set_vring_enable) { + return 0; + } + + /* + * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not + * been negotiated, the rings start directly in the enabled state, and + * .vhost_set_vring_enable callback will fail since + * VHOST_USER_SET_VRING_ENABLE is not supported. + */ + if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && + !virtio_has_feature(hdev->backend_features, + VHOST_USER_F_PROTOCOL_FEATURES)) { + return 0; + } + + return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); +} + /* Host notifiers must be enabled at this point. */ -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i, r; /* should only be called after backend is connected */ assert(hdev->vhost_ops); + trace_vhost_dev_start(hdev, vdev->name, vrings); + + vdev->vhost_started = true; hdev->started = true; hdev->vdev = vdev; @@ -1738,8 +1867,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); goto fail_mem; } for (i = 0; i < hdev->nvqs; ++i) { @@ -1763,15 +1891,20 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) hdev->log_size ? log_base : 0, hdev->log); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_log_base failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); + goto fail_log; + } + } + if (vrings) { + r = vhost_dev_set_vring_enable(hdev, true); + if (r) { goto fail_log; } } if (hdev->vhost_ops->vhost_dev_start) { r = hdev->vhost_ops->vhost_dev_start(hdev, true); if (r) { - goto fail_log; + goto fail_start; } } if (vhost_dev_has_iommu(hdev) && @@ -1786,6 +1919,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) } } return 0; +fail_start: + if (vrings) { + vhost_dev_set_vring_enable(hdev, false); + } fail_log: vhost_log_put(hdev, false); fail_vq: @@ -1797,23 +1934,31 @@ fail_vq: } fail_mem: + if (vhost_dev_has_iommu(hdev)) { + memory_listener_unregister(&hdev->iommu_listener); + } fail_features: - + vdev->vhost_started = false; hdev->started = false; return r; } /* Host notifiers must be enabled at this point. */ -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) +void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i; /* should only be called after backend is connected */ assert(hdev->vhost_ops); + trace_vhost_dev_stop(hdev, vdev->name, vrings); + if (hdev->vhost_ops->vhost_dev_start) { hdev->vhost_ops->vhost_dev_start(hdev, false); } + if (vrings) { + vhost_dev_set_vring_enable(hdev, false); + } for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_stop(hdev, vdev, @@ -1829,6 +1974,7 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) } vhost_log_put(hdev, true); hdev->started = false; + vdev->vhost_started = false; hdev->vdev = NULL; } @@ -1839,5 +1985,5 @@ int vhost_net_set_backend(struct vhost_dev *hdev, return hdev->vhost_ops->vhost_net_set_backend(hdev, file); } - return -1; + return -ENOSYS; } diff --git a/hw/virtio/virtio-9p-pci.c b/hw/virtio/virtio-9p-pci.c index e07adcd9ea..94c14f0b98 100644 --- a/hw/virtio/virtio-9p-pci.c +++ b/hw/virtio/virtio-9p-pci.c @@ -15,7 +15,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/9pfs/virtio-9p.h" #include "hw/qdev-properties.h" #include "qemu/module.h" diff --git a/hw/virtio/virtio-balloon-pci.c b/hw/virtio/virtio-balloon-pci.c index 79a3ba979a..ce2645ba71 100644 --- a/hw/virtio/virtio-balloon-pci.c +++ b/hw/virtio/virtio-balloon-pci.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-balloon.h" #include "qapi/error.h" diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 4b5d9e5e50..73ac5eb675 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -17,6 +17,7 @@ #include "qemu/iov.h" #include "qemu/module.h" #include "qemu/timer.h" +#include "qemu/madvise.h" #include "hw/virtio/virtio.h" #include "hw/mem/pc-dimm.h" #include "hw/qdev-properties.h" @@ -30,6 +31,7 @@ #include "trace.h" #include "qemu/error-report.h" #include "migration/misc.h" +#include "migration/migration.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -230,7 +232,7 @@ static void balloon_stats_poll_cb(void *opaque) return; } - virtqueue_push(s->svq, s->stats_vq_elem, s->stats_vq_offset); + virtqueue_push(s->svq, s->stats_vq_elem, 0); virtio_notify(vdev, s->svq); g_free(s->stats_vq_elem); s->stats_vq_elem = NULL; @@ -240,7 +242,7 @@ static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { Error *err = NULL; - VirtIOBalloon *s = opaque; + VirtIOBalloon *s = VIRTIO_BALLOON(obj); int i; if (!visit_start_struct(v, name, NULL, 0, &err)) { @@ -275,7 +277,7 @@ static void balloon_stats_get_poll_interval(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - VirtIOBalloon *s = opaque; + VirtIOBalloon *s = VIRTIO_BALLOON(obj); visit_type_int(v, name, &s->stats_poll_interval, errp); } @@ -283,7 +285,7 @@ static void balloon_stats_set_poll_interval(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - VirtIOBalloon *s = opaque; + VirtIOBalloon *s = VIRTIO_BALLOON(obj); int64_t value; if (!visit_type_int(v, name, &value, errp)) { @@ -437,7 +439,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) memory_region_unref(section.mr); } - virtqueue_push(vq, elem, offset); + virtqueue_push(vq, elem, 0); virtio_notify(vdev, vq); g_free(elem); virtio_balloon_pbp_free(&pbp); @@ -450,7 +452,6 @@ static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) VirtQueueElement *elem; VirtIOBalloonStat stat; size_t offset = 0; - qemu_timeval tv; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { @@ -482,13 +483,7 @@ static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) s->stats[tag] = val; } s->stats_vq_offset = offset; - - if (qemu_gettimeofday(&tv) < 0) { - warn_report("%s: failed to get time of day", __func__); - goto out; - } - - s->stats_last_update = tv.tv_sec; + s->stats_last_update = g_get_real_time() / G_USEC_PER_SEC; out: if (balloon_stats_enabled(s)) { @@ -509,6 +504,7 @@ static bool get_free_page_hints(VirtIOBalloon *dev) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtQueue *vq = dev->free_page_vq; bool ret = true; + int i; while (dev->block_iothread) { qemu_cond_wait(&dev->free_page_cond, &dev->free_page_lock); @@ -533,26 +529,24 @@ static bool get_free_page_hints(VirtIOBalloon *dev) if (dev->free_page_hint_status == FREE_PAGE_HINT_S_REQUESTED && id == dev->free_page_hint_cmd_id) { dev->free_page_hint_status = FREE_PAGE_HINT_S_START; - } else { + } else if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { /* * Stop the optimization only when it has started. This * avoids a stale stop sign for the previous command. */ - if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { - dev->free_page_hint_status = FREE_PAGE_HINT_S_STOP; - } + dev->free_page_hint_status = FREE_PAGE_HINT_S_STOP; } } - if (elem->in_num) { - if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { - qemu_guest_free_page_hint(elem->in_sg[0].iov_base, - elem->in_sg[0].iov_len); + if (elem->in_num && dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { + for (i = 0; i < elem->in_num; i++) { + qemu_guest_free_page_hint(elem->in_sg[i].iov_base, + elem->in_sg[i].iov_len); } } out: - virtqueue_push(vq, elem, 1); + virtqueue_push(vq, elem, 0); g_free(elem); return ret; } @@ -591,16 +585,10 @@ static void virtio_balloon_free_page_start(VirtIOBalloon *s) { VirtIODevice *vdev = VIRTIO_DEVICE(s); - /* For the stop and copy phase, we don't need to start the optimization */ - if (!vdev->vm_running) { - return; - } - qemu_mutex_lock(&s->free_page_lock); if (s->free_page_hint_cmd_id == UINT_MAX) { - s->free_page_hint_cmd_id = - VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN; + s->free_page_hint_cmd_id = VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN; } else { s->free_page_hint_cmd_id++; } @@ -648,8 +636,7 @@ static void virtio_balloon_free_page_done(VirtIOBalloon *s) static int virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data) { - VirtIOBalloon *dev = container_of(n, VirtIOBalloon, - free_page_hint_notify); + VirtIOBalloon *dev = container_of(n, VirtIOBalloon, free_page_hint_notify); VirtIODevice *vdev = VIRTIO_DEVICE(dev); PrecopyNotifyData *pnd = data; @@ -662,6 +649,18 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data) return 0; } + /* + * Pages hinted via qemu_guest_free_page_hint() are cleared from the dirty + * bitmap and will not get migrated, especially also not when the postcopy + * destination starts using them and requests migration from the source; the + * faulting thread will stall until postcopy migration finishes and + * all threads are woken up. Let's not start free page hinting if postcopy + * is possible. + */ + if (migrate_postcopy_ram()) { + return 0; + } + switch (pnd->reason) { case PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC: virtio_balloon_free_page_stop(dev); @@ -850,7 +849,7 @@ static const VMStateDescription vmstate_virtio_balloon_free_page_hint = { }; static const VMStateDescription vmstate_virtio_balloon_page_poison = { - .name = "vitio-balloon-device/page-poison", + .name = "virtio-balloon-device/page-poison", .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_page_poison_support, @@ -883,8 +882,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) VirtIOBalloon *s = VIRTIO_BALLOON(dev); int ret; - virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, - virtio_balloon_config_size(s)); + virtio_init(vdev, VIRTIO_ID_BALLOON, virtio_balloon_config_size(s)); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); @@ -906,8 +904,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); - if (virtio_has_feature(s->host_features, - VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE, virtio_balloon_handle_free_page_vq); precopy_add_notifier(&s->free_page_hint_notify); @@ -1010,12 +1007,12 @@ static void virtio_balloon_instance_init(Object *obj) s->free_page_hint_notify.notify = virtio_balloon_free_page_hint_notify; object_property_add(obj, "guest-stats", "guest statistics", - balloon_stats_get_all, NULL, NULL, s); + balloon_stats_get_all, NULL, NULL, NULL); object_property_add(obj, "guest-stats-polling-interval", "int", balloon_stats_get_poll_interval, balloon_stats_set_poll_interval, - NULL, s); + NULL, NULL); } static const VMStateDescription vmstate_virtio_balloon = { diff --git a/hw/virtio/virtio-blk-pci.c b/hw/virtio/virtio-blk-pci.c index 9d5795810c..9743bee965 100644 --- a/hw/virtio/virtio-blk-pci.c +++ b/hw/virtio/virtio-blk-pci.c @@ -19,7 +19,7 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-blk.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object.h" diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c index 859978d248..896feb37a1 100644 --- a/hw/virtio/virtio-bus.c +++ b/hw/virtio/virtio-bus.c @@ -48,6 +48,7 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); bool has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + bool vdev_has_iommu; Error *local_err = NULL; DPRINTF("%s: plug device.\n", qbus->name); @@ -69,11 +70,6 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) return; } - if (has_iommu && !virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { - error_setg(errp, "iommu_platform=true is not supported by the device"); - return; - } - if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent, &local_err); } @@ -82,11 +78,23 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) return; } - if (klass->get_dma_as != NULL && has_iommu) { + vdev->dma_as = &address_space_memory; + if (has_iommu) { + vdev_has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + /* + * Present IOMMU_PLATFORM to the driver iff iommu_plattform=on and + * device operational. If the driver does not accept IOMMU_PLATFORM + * we fail the device. + */ virtio_add_feature(&vdev->host_features, VIRTIO_F_IOMMU_PLATFORM); - vdev->dma_as = klass->get_dma_as(qbus->parent); - } else { - vdev->dma_as = &address_space_memory; + if (klass->get_dma_as) { + vdev->dma_as = klass->get_dma_as(qbus->parent); + if (!vdev_has_iommu && vdev->dma_as != &address_space_memory) { + error_setg(errp, + "iommu_platform=true is not supported by the device"); + return; + } + } } } @@ -96,6 +104,7 @@ void virtio_bus_reset(VirtioBusState *bus) VirtIODevice *vdev = virtio_bus_get_device(bus); DPRINTF("%s: reset device.\n", BUS(bus)->name); + virtio_bus_stop_ioeventfd(bus); if (vdev != NULL) { virtio_reset(vdev); } @@ -325,6 +334,20 @@ static char *virtio_bus_get_fw_dev_path(DeviceState *dev) return NULL; } +bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev) +{ + DeviceState *qdev = DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(qdev)); + VirtioBusState *bus = VIRTIO_BUS(qbus); + VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); + + if (!klass->iommu_enabled) { + return false; + } + + return klass->iommu_enabled(qbus->parent); +} + static void virtio_bus_class_init(ObjectClass *klass, void *data) { BusClass *bus_class = BUS_CLASS(klass); diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 54f9bbb789..a6dbdd32da 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -27,6 +27,39 @@ #define VIRTIO_CRYPTO_VM_VERSION 1 +typedef struct VirtIOCryptoSessionReq { + VirtIODevice *vdev; + VirtQueue *vq; + VirtQueueElement *elem; + CryptoDevBackendSessionInfo info; + CryptoDevCompletionFunc cb; +} VirtIOCryptoSessionReq; + +static void virtio_crypto_free_create_session_req(VirtIOCryptoSessionReq *sreq) +{ + switch (sreq->info.op_code) { + case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: + g_free(sreq->info.u.sym_sess_info.cipher_key); + g_free(sreq->info.u.sym_sess_info.auth_key); + break; + + case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION: + g_free(sreq->info.u.asym_sess_info.key); + break; + + case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: + case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: + case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: + case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: + case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION: + break; + + default: + error_report("Unknown opcode: %u", sreq->info.op_code); + } + g_free(sreq); +} + /* * Transfer virtqueue index to crypto queue index. * The control virtqueue is after the data virtqueues @@ -75,140 +108,232 @@ virtio_crypto_cipher_session_helper(VirtIODevice *vdev, return 0; } -static int64_t +static int virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto, struct virtio_crypto_sym_create_session_req *sess_req, uint32_t queue_id, uint32_t opcode, - struct iovec *iov, unsigned int out_num) + struct iovec *iov, unsigned int out_num, + VirtIOCryptoSessionReq *sreq) { VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); - CryptoDevBackendSymSessionInfo info; - int64_t session_id; + CryptoDevBackendSymSessionInfo *sym_info = &sreq->info.u.sym_sess_info; int queue_index; uint32_t op_type; - Error *local_err = NULL; int ret; - memset(&info, 0, sizeof(info)); op_type = ldl_le_p(&sess_req->op_type); - info.op_type = op_type; - info.op_code = opcode; + sreq->info.op_code = opcode; + + sym_info = &sreq->info.u.sym_sess_info; + sym_info->op_type = op_type; if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { - ret = virtio_crypto_cipher_session_helper(vdev, &info, + ret = virtio_crypto_cipher_session_helper(vdev, sym_info, &sess_req->u.cipher.para, &iov, &out_num); if (ret < 0) { - goto err; + return ret; } } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { size_t s; /* cipher part */ - ret = virtio_crypto_cipher_session_helper(vdev, &info, + ret = virtio_crypto_cipher_session_helper(vdev, sym_info, &sess_req->u.chain.para.cipher_param, &iov, &out_num); if (ret < 0) { - goto err; + return ret; } /* hash part */ - info.alg_chain_order = ldl_le_p( + sym_info->alg_chain_order = ldl_le_p( &sess_req->u.chain.para.alg_chain_order); - info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); - info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); - if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { - info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); - info.auth_key_len = ldl_le_p( + sym_info->add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); + sym_info->hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); + if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { + sym_info->hash_alg = + ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); + sym_info->auth_key_len = ldl_le_p( &sess_req->u.chain.para.u.mac_param.auth_key_len); - info.hash_result_len = ldl_le_p( + sym_info->hash_result_len = ldl_le_p( &sess_req->u.chain.para.u.mac_param.hash_result_len); - if (info.auth_key_len > vcrypto->conf.max_auth_key_len) { + if (sym_info->auth_key_len > vcrypto->conf.max_auth_key_len) { error_report("virtio-crypto length of auth key is too big: %u", - info.auth_key_len); - ret = -VIRTIO_CRYPTO_ERR; - goto err; + sym_info->auth_key_len); + return -VIRTIO_CRYPTO_ERR; } /* get auth key */ - if (info.auth_key_len > 0) { - DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len); - info.auth_key = g_malloc(info.auth_key_len); - s = iov_to_buf(iov, out_num, 0, info.auth_key, - info.auth_key_len); - if (unlikely(s != info.auth_key_len)) { + if (sym_info->auth_key_len > 0) { + sym_info->auth_key = g_malloc(sym_info->auth_key_len); + s = iov_to_buf(iov, out_num, 0, sym_info->auth_key, + sym_info->auth_key_len); + if (unlikely(s != sym_info->auth_key_len)) { virtio_error(vdev, "virtio-crypto authenticated key incorrect"); - ret = -EFAULT; - goto err; + return -EFAULT; } - iov_discard_front(&iov, &out_num, info.auth_key_len); + iov_discard_front(&iov, &out_num, sym_info->auth_key_len); } - } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { - info.hash_alg = ldl_le_p( + } else if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { + sym_info->hash_alg = ldl_le_p( &sess_req->u.chain.para.u.hash_param.algo); - info.hash_result_len = ldl_le_p( + sym_info->hash_result_len = ldl_le_p( &sess_req->u.chain.para.u.hash_param.hash_result_len); } else { /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ error_report("unsupported hash mode"); - ret = -VIRTIO_CRYPTO_NOTSUPP; - goto err; + return -VIRTIO_CRYPTO_NOTSUPP; } } else { /* VIRTIO_CRYPTO_SYM_OP_NONE */ error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE"); - ret = -VIRTIO_CRYPTO_NOTSUPP; - goto err; + return -VIRTIO_CRYPTO_NOTSUPP; } queue_index = virtio_crypto_vq2q(queue_id); - session_id = cryptodev_backend_sym_create_session( - vcrypto->cryptodev, - &info, queue_index, &local_err); - if (session_id >= 0) { - DPRINTF("create session_id=%" PRIu64 " successfully\n", - session_id); - - ret = session_id; - } else { - if (local_err) { - error_report_err(local_err); - } - ret = -VIRTIO_CRYPTO_ERR; - } - -err: - g_free(info.cipher_key); - g_free(info.auth_key); - return ret; + return cryptodev_backend_create_session(vcrypto->cryptodev, &sreq->info, + queue_index, sreq->cb, sreq); } -static uint8_t +static int +virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto, + struct virtio_crypto_akcipher_create_session_req *sess_req, + uint32_t queue_id, uint32_t opcode, + struct iovec *iov, unsigned int out_num, + VirtIOCryptoSessionReq *sreq) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + CryptoDevBackendAsymSessionInfo *asym_info = &sreq->info.u.asym_sess_info; + int queue_index; + uint32_t algo, keytype, keylen; + + algo = ldl_le_p(&sess_req->para.algo); + keytype = ldl_le_p(&sess_req->para.keytype); + keylen = ldl_le_p(&sess_req->para.keylen); + + if ((keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC) + && (keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE)) { + error_report("unsupported asym keytype: %d", keytype); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + if (keylen) { + asym_info->key = g_malloc(keylen); + if (iov_to_buf(iov, out_num, 0, asym_info->key, keylen) != keylen) { + virtio_error(vdev, "virtio-crypto asym key incorrect"); + return -EFAULT; + } + iov_discard_front(&iov, &out_num, keylen); + } + + sreq->info.op_code = opcode; + asym_info = &sreq->info.u.asym_sess_info; + asym_info->algo = algo; + asym_info->keytype = keytype; + asym_info->keylen = keylen; + switch (asym_info->algo) { + case VIRTIO_CRYPTO_AKCIPHER_RSA: + asym_info->u.rsa.padding_algo = + ldl_le_p(&sess_req->para.u.rsa.padding_algo); + asym_info->u.rsa.hash_algo = + ldl_le_p(&sess_req->para.u.rsa.hash_algo); + break; + + /* TODO DSA&ECDSA handling */ + + default: + return -VIRTIO_CRYPTO_ERR; + } + + queue_index = virtio_crypto_vq2q(queue_id); + return cryptodev_backend_create_session(vcrypto->cryptodev, &sreq->info, + queue_index, sreq->cb, sreq); +} + +static int virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto, struct virtio_crypto_destroy_session_req *close_sess_req, - uint32_t queue_id) + uint32_t queue_id, + VirtIOCryptoSessionReq *sreq) { - int ret; uint64_t session_id; - uint32_t status; - Error *local_err = NULL; session_id = ldq_le_p(&close_sess_req->session_id); DPRINTF("close session, id=%" PRIu64 "\n", session_id); - ret = cryptodev_backend_sym_close_session( - vcrypto->cryptodev, session_id, queue_id, &local_err); - if (ret == 0) { - status = VIRTIO_CRYPTO_OK; + return cryptodev_backend_close_session( + vcrypto->cryptodev, session_id, queue_id, sreq->cb, sreq); +} + +static void virtio_crypto_create_session_completion(void *opaque, int ret) +{ + VirtIOCryptoSessionReq *sreq = (VirtIOCryptoSessionReq *)opaque; + VirtQueue *vq = sreq->vq; + VirtQueueElement *elem = sreq->elem; + VirtIODevice *vdev = sreq->vdev; + struct virtio_crypto_session_input input; + struct iovec *in_iov = elem->in_sg; + unsigned in_num = elem->in_num; + size_t s; + + memset(&input, 0, sizeof(input)); + /* Serious errors, need to reset virtio crypto device */ + if (ret == -EFAULT) { + virtqueue_detach_element(vq, elem, 0); + goto out; + } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { + stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); + } else if (ret == -VIRTIO_CRYPTO_KEY_REJECTED) { + stl_le_p(&input.status, VIRTIO_CRYPTO_KEY_REJECTED); + } else if (ret != VIRTIO_CRYPTO_OK) { + stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); } else { - if (local_err) { - error_report_err(local_err); - } else { - error_report("destroy session failed"); - } - status = VIRTIO_CRYPTO_ERR; + /* Set the session id */ + stq_le_p(&input.session_id, sreq->info.session_id); + stl_le_p(&input.status, VIRTIO_CRYPTO_OK); } - return status; + s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); + if (unlikely(s != sizeof(input))) { + virtio_error(vdev, "virtio-crypto input incorrect"); + virtqueue_detach_element(vq, elem, 0); + goto out; + } + virtqueue_push(vq, elem, sizeof(input)); + virtio_notify(vdev, vq); + +out: + g_free(elem); + virtio_crypto_free_create_session_req(sreq); +} + +static void virtio_crypto_destroy_session_completion(void *opaque, int ret) +{ + VirtIOCryptoSessionReq *sreq = (VirtIOCryptoSessionReq *)opaque; + VirtQueue *vq = sreq->vq; + VirtQueueElement *elem = sreq->elem; + VirtIODevice *vdev = sreq->vdev; + struct iovec *in_iov = elem->in_sg; + unsigned in_num = elem->in_num; + uint8_t status; + size_t s; + + if (ret < 0) { + status = VIRTIO_CRYPTO_ERR; + } else { + status = VIRTIO_CRYPTO_OK; + } + s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); + if (unlikely(s != sizeof(status))) { + virtio_error(vdev, "virtio-crypto status incorrect"); + virtqueue_detach_element(vq, elem, 0); + goto out; + } + virtqueue_push(vq, elem, sizeof(status)); + virtio_notify(vdev, vq); + +out: + g_free(elem); + g_free(sreq); } static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) @@ -216,16 +341,16 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); struct virtio_crypto_op_ctrl_req ctrl; VirtQueueElement *elem; - struct iovec *in_iov; - struct iovec *out_iov; - unsigned in_num; + VirtIOCryptoSessionReq *sreq; unsigned out_num; + unsigned in_num; uint32_t queue_id; uint32_t opcode; struct virtio_crypto_session_input input; - int64_t session_id; - uint8_t status; size_t s; + int ret; + struct iovec *out_iov; + struct iovec *in_iov; for (;;) { g_autofree struct iovec *out_iov_copy = NULL; @@ -242,7 +367,7 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) } out_num = elem->out_num; - out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); out_iov = out_iov_copy; in_num = elem->in_num; @@ -260,72 +385,71 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) opcode = ldl_le_p(&ctrl.header.opcode); queue_id = ldl_le_p(&ctrl.header.queue_id); + sreq = g_new0(VirtIOCryptoSessionReq, 1); + sreq->vdev = vdev; + sreq->vq = vq; + sreq->elem = elem; + switch (opcode) { case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: - memset(&input, 0, sizeof(input)); - session_id = virtio_crypto_create_sym_session(vcrypto, - &ctrl.u.sym_create_session, - queue_id, opcode, - out_iov, out_num); - /* Serious errors, need to reset virtio crypto device */ - if (session_id == -EFAULT) { - virtqueue_detach_element(vq, elem, 0); - break; - } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { - stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); - } else if (session_id == -VIRTIO_CRYPTO_ERR) { - stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); - } else { - /* Set the session id */ - stq_le_p(&input.session_id, session_id); - stl_le_p(&input.status, VIRTIO_CRYPTO_OK); + sreq->cb = virtio_crypto_create_session_completion; + ret = virtio_crypto_create_sym_session(vcrypto, + &ctrl.u.sym_create_session, + queue_id, opcode, + out_iov, out_num, + sreq); + if (ret < 0) { + virtio_crypto_create_session_completion(sreq, ret); } - - s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); - if (unlikely(s != sizeof(input))) { - virtio_error(vdev, "virtio-crypto input incorrect"); - virtqueue_detach_element(vq, elem, 0); - break; - } - virtqueue_push(vq, elem, sizeof(input)); - virtio_notify(vdev, vq); break; + + case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION: + sreq->cb = virtio_crypto_create_session_completion; + ret = virtio_crypto_create_asym_session(vcrypto, + &ctrl.u.akcipher_create_session, + queue_id, opcode, + out_iov, out_num, + sreq); + if (ret < 0) { + virtio_crypto_create_session_completion(sreq, ret); + } + break; + case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: - status = virtio_crypto_handle_close_session(vcrypto, - &ctrl.u.destroy_session, queue_id); - /* The status only occupy one byte, we can directly use it */ - s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); - if (unlikely(s != sizeof(status))) { - virtio_error(vdev, "virtio-crypto status incorrect"); - virtqueue_detach_element(vq, elem, 0); - break; + case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION: + sreq->cb = virtio_crypto_destroy_session_completion; + ret = virtio_crypto_handle_close_session(vcrypto, + &ctrl.u.destroy_session, queue_id, + sreq); + if (ret < 0) { + virtio_crypto_destroy_session_completion(sreq, ret); } - virtqueue_push(vq, elem, sizeof(status)); - virtio_notify(vdev, vq); break; + case VIRTIO_CRYPTO_HASH_CREATE_SESSION: case VIRTIO_CRYPTO_MAC_CREATE_SESSION: case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: default: - error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); memset(&input, 0, sizeof(input)); + error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); - break; + } else { + virtqueue_push(vq, elem, sizeof(input)); + virtio_notify(vdev, vq); } - virtqueue_push(vq, elem, sizeof(input)); - virtio_notify(vdev, vq); + g_free(sreq); + g_free(elem); break; } /* end switch case */ - g_free(elem); } /* end for loop */ } @@ -339,16 +463,20 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq, req->in_num = 0; req->in_len = 0; req->flags = CRYPTODEV_BACKEND_ALG__MAX; - req->u.sym_op_info = NULL; + memset(&req->op_info, 0x00, sizeof(req->op_info)); } static void virtio_crypto_free_request(VirtIOCryptoReq *req) { - if (req) { - if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { - size_t max_len; - CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info; + if (!req) { + return; + } + if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { + size_t max_len; + CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; + + if (op_info) { max_len = op_info->iv_len + op_info->aad_len + op_info->src_len + @@ -359,8 +487,18 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) memset(op_info, 0, sizeof(*op_info) + max_len); g_free(op_info); } - g_free(req); + } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { + CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; + if (op_info) { + g_free(op_info->src); + g_free(op_info->dst); + memset(op_info, 0, sizeof(*op_info)); + g_free(op_info); + } } + + g_free(req->in_iov); + g_free(req); } static void @@ -370,6 +508,7 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev, CryptoDevBackendSymOpInfo *sym_op_info) { size_t s, len; + struct iovec *in_iov = req->in_iov; if (status != VIRTIO_CRYPTO_OK) { return; @@ -377,18 +516,18 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev, len = sym_op_info->src_len; /* Save the cipher result */ - s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len); + s = iov_from_buf(in_iov, req->in_num, 0, sym_op_info->dst, len); if (s != len) { virtio_error(vdev, "virtio-crypto dest data incorrect"); return; } - iov_discard_front(&req->in_iov, &req->in_num, len); + iov_discard_front(&in_iov, &req->in_num, len); if (sym_op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { /* Save the digest result */ - s = iov_from_buf(req->in_iov, req->in_num, 0, + s = iov_from_buf(in_iov, req->in_num, 0, sym_op_info->digest_result, sym_op_info->digest_result_len); if (s != sym_op_info->digest_result_len) { @@ -397,18 +536,53 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev, } } -static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status) +static void +virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev, + VirtIOCryptoReq *req, int32_t status, + CryptoDevBackendAsymOpInfo *asym_op_info) { + size_t s, len; + struct iovec *in_iov = req->in_iov; + + if (status != VIRTIO_CRYPTO_OK) { + return; + } + + len = asym_op_info->dst_len; + if (!len) { + return; + } + + s = iov_from_buf(in_iov, req->in_num, 0, asym_op_info->dst, len); + if (s != len) { + virtio_error(vdev, "virtio-crypto asym dest data incorrect"); + return; + } + + iov_discard_front(&in_iov, &req->in_num, len); + + /* For akcipher, dst_len may be changed after operation */ + req->in_len = sizeof(struct virtio_crypto_inhdr) + asym_op_info->dst_len; +} + +static void virtio_crypto_req_complete(void *opaque, int ret) +{ + VirtIOCryptoReq *req = (VirtIOCryptoReq *)opaque; VirtIOCrypto *vcrypto = req->vcrypto; VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + uint8_t status = -ret; if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { virtio_crypto_sym_input_data_helper(vdev, req, status, - req->u.sym_op_info); + req->op_info.u.sym_op_info); + } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { + virtio_crypto_akcipher_input_data_helper(vdev, req, status, + req->op_info.u.asym_op_info); } stb_p(&req->in->status, status); virtqueue_push(req->vq, &req->elem, req->in_len); virtio_notify(vdev, req->vq); + virtio_crypto_free_request(req); } static VirtIOCryptoReq * @@ -543,41 +717,100 @@ err: static int virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto, struct virtio_crypto_sym_data_req *req, - CryptoDevBackendSymOpInfo **sym_op_info, + CryptoDevBackendOpInfo *op_info, struct iovec *iov, unsigned int out_num) { VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + CryptoDevBackendSymOpInfo *sym_op_info; uint32_t op_type; - CryptoDevBackendSymOpInfo *op_info; op_type = ldl_le_p(&req->op_type); - if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { - op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, + sym_op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, NULL, iov, out_num); - if (!op_info) { + if (!sym_op_info) { return -EFAULT; } - op_info->op_type = op_type; } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { - op_info = virtio_crypto_sym_op_helper(vdev, NULL, + sym_op_info = virtio_crypto_sym_op_helper(vdev, NULL, &req->u.chain.para, iov, out_num); - if (!op_info) { + if (!sym_op_info) { return -EFAULT; } - op_info->op_type = op_type; } else { /* VIRTIO_CRYPTO_SYM_OP_NONE */ error_report("virtio-crypto unsupported cipher type"); return -VIRTIO_CRYPTO_NOTSUPP; } - *sym_op_info = op_info; + sym_op_info->op_type = op_type; + op_info->u.sym_op_info = sym_op_info; return 0; } +static int +virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto, + struct virtio_crypto_akcipher_data_req *req, + CryptoDevBackendOpInfo *op_info, + struct iovec *iov, unsigned int out_num) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + CryptoDevBackendAsymOpInfo *asym_op_info; + uint32_t src_len; + uint32_t dst_len; + uint32_t len; + uint8_t *src = NULL; + uint8_t *dst = NULL; + + asym_op_info = g_new0(CryptoDevBackendAsymOpInfo, 1); + src_len = ldl_le_p(&req->para.src_data_len); + dst_len = ldl_le_p(&req->para.dst_data_len); + + if (src_len > 0) { + src = g_malloc0(src_len); + len = iov_to_buf(iov, out_num, 0, src, src_len); + if (unlikely(len != src_len)) { + virtio_error(vdev, "virtio-crypto asym src data incorrect" + "expected %u, actual %u", src_len, len); + goto err; + } + + iov_discard_front(&iov, &out_num, src_len); + } + + if (dst_len > 0) { + dst = g_malloc0(dst_len); + + if (op_info->op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) { + len = iov_to_buf(iov, out_num, 0, dst, dst_len); + if (unlikely(len != dst_len)) { + virtio_error(vdev, "virtio-crypto asym dst data incorrect" + "expected %u, actual %u", dst_len, len); + goto err; + } + + iov_discard_front(&iov, &out_num, dst_len); + } + } + + asym_op_info->src_len = src_len; + asym_op_info->dst_len = dst_len; + asym_op_info->src = src; + asym_op_info->dst = dst; + op_info->u.asym_op_info = asym_op_info; + + return 0; + + err: + g_free(asym_op_info); + g_free(src); + g_free(dst); + + return -EFAULT; +} + static int virtio_crypto_handle_request(VirtIOCryptoReq *request) { @@ -594,10 +827,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) unsigned in_num; unsigned out_num; uint32_t opcode; - uint8_t status = VIRTIO_CRYPTO_ERR; - uint64_t session_id; - CryptoDevBackendSymOpInfo *sym_op_info = NULL; - Error *local_err = NULL; + CryptoDevBackendOpInfo *op_info = &request->op_info; if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto dataq missing headers"); @@ -605,11 +835,11 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) } out_num = elem->out_num; - out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); out_iov = out_iov_copy; in_num = elem->in_num; - in_iov_copy = g_memdup(elem->in_sg, sizeof(in_iov[0]) * in_num); + in_iov_copy = g_memdup2(elem->in_sg, sizeof(in_iov[0]) * in_num); in_iov = in_iov_copy; if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) @@ -637,43 +867,48 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) */ request->in_num = in_num; request->in_iov = in_iov; + /* now, we free the in_iov_copy inside virtio_crypto_free_request */ + in_iov_copy = NULL; opcode = ldl_le_p(&req.header.opcode); - session_id = ldq_le_p(&req.header.session_id); + op_info->session_id = ldq_le_p(&req.header.session_id); + op_info->op_code = opcode; switch (opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: + op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM; ret = virtio_crypto_handle_sym_req(vcrypto, - &req.u.sym_req, - &sym_op_info, + &req.u.sym_req, op_info, out_iov, out_num); + goto check_result; + + case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT: + case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: + case VIRTIO_CRYPTO_AKCIPHER_SIGN: + case VIRTIO_CRYPTO_AKCIPHER_VERIFY: + op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM; + ret = virtio_crypto_handle_asym_req(vcrypto, + &req.u.akcipher_req, op_info, + out_iov, out_num); + +check_result: /* Serious errors, need to reset virtio crypto device */ if (ret == -EFAULT) { return -1; } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { - virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); - virtio_crypto_free_request(request); + virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP); } else { - sym_op_info->session_id = session_id; - - /* Set request's parameter */ - request->flags = CRYPTODEV_BACKEND_ALG_SYM; - request->u.sym_op_info = sym_op_info; ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, - request, queue_index, &local_err); + request, queue_index, + virtio_crypto_req_complete, + request); if (ret < 0) { - status = -ret; - if (local_err) { - error_report_err(local_err); - } - } else { /* ret == VIRTIO_CRYPTO_OK */ - status = ret; + virtio_crypto_req_complete(request, ret); } - virtio_crypto_req_complete(request, status); - virtio_crypto_free_request(request); } break; + case VIRTIO_CRYPTO_HASH: case VIRTIO_CRYPTO_MAC: case VIRTIO_CRYPTO_AEAD_ENCRYPT: @@ -681,8 +916,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) default: error_report("virtio-crypto unsupported dataq opcode: %u", opcode); - virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); - virtio_crypto_free_request(request); + virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP); } return 0; @@ -779,6 +1013,7 @@ static void virtio_crypto_init_config(VirtIODevice *vdev) vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l; vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h; vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo; + vcrypto->conf.akcipher_algo = vcrypto->conf.cryptodev->conf.akcipher_algo; vcrypto->conf.max_cipher_key_len = vcrypto->conf.cryptodev->conf.max_cipher_key_len; vcrypto->conf.max_auth_key_len = @@ -810,9 +1045,9 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size); + virtio_init(vdev, VIRTIO_ID_CRYPTO, vcrypto->config_size); vcrypto->curr_queues = 1; - vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues); + vcrypto->vqs = g_new0(VirtIOCryptoQueue, vcrypto->max_queues); for (i = 0; i < vcrypto->max_queues; i++) { vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); @@ -821,7 +1056,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->vqs[i].vcrypto = vcrypto; } - vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl); + vcrypto->ctrl_vq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_ctrl); if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; } else { @@ -891,6 +1126,7 @@ static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config) stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len); stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len); stq_le_p(&crypto_cfg.max_size, c->conf.max_size); + stl_le_p(&crypto_cfg.akcipher_algo, c->conf.akcipher_algo); memcpy(config, &crypto_cfg, c->config_size); } @@ -961,6 +1197,15 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx) return cryptodev_vhost_virtqueue_pending(vdev, queue, idx); } +static struct vhost_dev *virtio_crypto_get_vhost(VirtIODevice *vdev) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + CryptoDevBackend *b = vcrypto->cryptodev; + CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; + CryptoDevBackendVhost *vhost_crypto = cryptodev_get_vhost(cc, b, 0); + return &vhost_crypto->dev; +} + static void virtio_crypto_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -977,6 +1222,7 @@ static void virtio_crypto_class_init(ObjectClass *klass, void *data) vdc->set_status = virtio_crypto_set_status; vdc->guest_notifier_mask = virtio_crypto_guest_notifier_mask; vdc->guest_notifier_pending = virtio_crypto_guest_notifier_pending; + vdc->get_vhost = virtio_crypto_get_vhost; } static void virtio_crypto_instance_init(Object *obj) diff --git a/hw/virtio/virtio-input-host-pci.c b/hw/virtio/virtio-input-host-pci.c index 0ac360de4f..cf8a9cf9e8 100644 --- a/hw/virtio/virtio-input-host-pci.c +++ b/hw/virtio/virtio-input-host-pci.c @@ -8,7 +8,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-input.h" #include "qemu/module.h" #include "qom/object.h" diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c index 48e9ff38e2..a9d0992389 100644 --- a/hw/virtio/virtio-input-pci.c +++ b/hw/virtio/virtio-input-pci.c @@ -8,7 +8,7 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-input.h" #include "qemu/module.h" diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c index 770c286be7..7ef2f9dcdb 100644 --- a/hw/virtio/virtio-iommu-pci.c +++ b/hw/virtio/virtio-iommu-pci.c @@ -11,12 +11,13 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-iommu.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "qapi/error.h" #include "hw/boards.h" +#include "hw/pci/pci_bus.h" #include "qom/object.h" typedef struct VirtIOIOMMUPCI VirtIOIOMMUPCI; @@ -44,20 +45,13 @@ static Property virtio_iommu_pci_properties[] = { static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(vpci_dev); + PCIBus *pbus = pci_get_bus(&vpci_dev->pci_dev); DeviceState *vdev = DEVICE(&dev->vdev); VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); if (!qdev_get_machine_hotplug_handler(DEVICE(vpci_dev))) { - MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); - - error_setg(errp, - "%s machine fails to create iommu-map device tree bindings", - mc->name); - error_append_hint(errp, - "Check your machine implements a hotplug handler " - "for the virtio-iommu-pci device\n"); - error_append_hint(errp, "Check the guest is booted without FW or with " - "-no-acpi\n"); + error_setg(errp, "Check your machine implements a hotplug handler " + "for the virtio-iommu-pci device"); return; } for (int i = 0; i < s->nb_reserved_regions; i++) { @@ -65,11 +59,17 @@ static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) s->reserved_regions[i].type != VIRTIO_IOMMU_RESV_MEM_T_MSI) { error_setg(errp, "reserved region %d has an invalid type", i); error_append_hint(errp, "Valid values are 0 and 1\n"); + return; } } + if (!pci_bus_is_root(pbus)) { + error_setg(errp, "virtio-iommu-pci must be plugged on the root bus"); + return; + } + object_property_set_link(OBJECT(dev), "primary-bus", - OBJECT(pci_get_bus(&vpci_dev->pci_dev)), - &error_abort); + OBJECT(pbus), &error_abort); + virtio_pci_force_virtio_1(vpci_dev); qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } @@ -82,8 +82,6 @@ static void virtio_iommu_pci_class_init(ObjectClass *klass, void *data) k->realize = virtio_iommu_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); device_class_set_props(dc, virtio_iommu_pci_properties); - pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; - pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_IOMMU; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; dc->hotpluggable = false; @@ -98,9 +96,7 @@ static void virtio_iommu_pci_instance_init(Object *obj) } static const VirtioPCIDeviceTypeInfo virtio_iommu_pci_info = { - .base_name = TYPE_VIRTIO_IOMMU_PCI, - .generic_name = "virtio-iommu-pci", - .non_transitional_name = "virtio-iommu-pci-non-transitional", + .generic_name = TYPE_VIRTIO_IOMMU_PCI, .instance_size = sizeof(VirtIOIOMMUPCI), .instance_init = virtio_iommu_pci_instance_init, .class_init = virtio_iommu_pci_class_init, diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 1b23e8e18c..62e07ec2e4 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -20,10 +20,10 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/iov.h" -#include "qemu-common.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio.h" #include "sysemu/kvm.h" +#include "sysemu/reset.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "trace.h" @@ -42,6 +42,7 @@ typedef struct VirtIOIOMMUDomain { uint32_t id; + bool bypass; GTree *mappings; QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list; } VirtIOIOMMUDomain; @@ -68,6 +69,77 @@ static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev) return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn); } +static bool virtio_iommu_device_bypassed(IOMMUDevice *sdev) +{ + uint32_t sid; + bool bypassed; + VirtIOIOMMU *s = sdev->viommu; + VirtIOIOMMUEndpoint *ep; + + sid = virtio_iommu_get_bdf(sdev); + + qemu_rec_mutex_lock(&s->mutex); + /* need to check bypass before system reset */ + if (!s->endpoints) { + bypassed = s->config.bypass; + goto unlock; + } + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); + if (!ep || !ep->domain) { + bypassed = s->config.bypass; + } else { + bypassed = ep->domain->bypass; + } + +unlock: + qemu_rec_mutex_unlock(&s->mutex); + return bypassed; +} + +/* Return whether the device is using IOMMU translation. */ +static bool virtio_iommu_switch_address_space(IOMMUDevice *sdev) +{ + bool use_remapping; + + assert(sdev); + + use_remapping = !virtio_iommu_device_bypassed(sdev); + + trace_virtio_iommu_switch_address_space(pci_bus_num(sdev->bus), + PCI_SLOT(sdev->devfn), + PCI_FUNC(sdev->devfn), + use_remapping); + + /* Turn off first then on the other */ + if (use_remapping) { + memory_region_set_enabled(&sdev->bypass_mr, false); + memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), true); + } else { + memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), false); + memory_region_set_enabled(&sdev->bypass_mr, true); + } + + return use_remapping; +} + +static void virtio_iommu_switch_address_space_all(VirtIOIOMMU *s) +{ + GHashTableIter iter; + IOMMUPciBus *iommu_pci_bus; + int i; + + g_hash_table_iter_init(&iter, s->as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) { + for (i = 0; i < PCI_DEVFN_MAX; i++) { + if (!iommu_pci_bus->pbdev[i]) { + continue; + } + virtio_iommu_switch_address_space(iommu_pci_bus->pbdev[i]); + } + } +} + /** * The bus number is used for lookup when SID based operations occur. * In that case we lazily populate the IOMMUPciBus array from the bus hash @@ -125,6 +197,32 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) } } +static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr, + IOMMUTLBEvent *event, + hwaddr virt_start, hwaddr virt_end) +{ + uint64_t delta = virt_end - virt_start; + + event->entry.iova = virt_start; + event->entry.addr_mask = delta; + + if (delta == UINT64_MAX) { + memory_region_notify_iommu(mr, 0, *event); + } + + while (virt_start != virt_end + 1) { + uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); + + event->entry.addr_mask = mask; + event->entry.iova = virt_start; + memory_region_notify_iommu(mr, 0, *event); + virt_start += mask + 1; + if (event->entry.perm != IOMMU_NONE) { + event->entry.translated_addr += mask + 1; + } + } +} + static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end, hwaddr paddr, uint32_t flags) @@ -143,19 +241,16 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, event.type = IOMMU_NOTIFIER_MAP; event.entry.target_as = &address_space_memory; - event.entry.addr_mask = virt_end - virt_start; - event.entry.iova = virt_start; event.entry.perm = perm; event.entry.translated_addr = paddr; - memory_region_notify_iommu(mr, 0, event); + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end) { IOMMUTLBEvent event; - uint64_t delta = virt_end - virt_start; if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { return; @@ -167,22 +262,8 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, event.entry.target_as = &address_space_memory; event.entry.perm = IOMMU_NONE; event.entry.translated_addr = 0; - event.entry.addr_mask = delta; - event.entry.iova = virt_start; - if (delta == UINT64_MAX) { - memory_region_notify_iommu(mr, 0, event); - } - - - while (virt_start != virt_end + 1) { - uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); - - event.entry.addr_mask = mask; - event.entry.iova = virt_start; - memory_region_notify_iommu(mr, 0, event); - virt_start += mask + 1; - } + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value, @@ -212,6 +293,7 @@ static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value, static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) { VirtIOIOMMUDomain *domain = ep->domain; + IOMMUDevice *sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr); if (!ep->domain) { return; @@ -220,6 +302,7 @@ static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) ep->iommu_mr); QLIST_REMOVE(ep, next); ep->domain = NULL; + virtio_iommu_switch_address_space(sdev); } static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, @@ -257,12 +340,16 @@ static void virtio_iommu_put_endpoint(gpointer data) } static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, - uint32_t domain_id) + uint32_t domain_id, + bool bypass) { VirtIOIOMMUDomain *domain; domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); if (domain) { + if (domain->bypass != bypass) { + return NULL; + } return domain; } domain = g_malloc0(sizeof(*domain)); @@ -270,6 +357,7 @@ static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, NULL, (GDestroyNotify)g_free, (GDestroyNotify)g_free); + domain->bypass = bypass; g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain); QLIST_INIT(&domain->endpoint_list); trace_virtio_iommu_get_domain(domain_id); @@ -309,7 +397,7 @@ static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque, char *name = g_strdup_printf("%s-%d-%d", TYPE_VIRTIO_IOMMU_MEMORY_REGION, mr_index++, devfn); - sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice)); + sdev = sbus->pbdev[devfn] = g_new0(IOMMUDevice, 1); sdev->viommu = s; sdev->bus = bus; @@ -317,12 +405,39 @@ static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque, trace_virtio_iommu_init_iommu_mr(name); + memory_region_init(&sdev->root, OBJECT(s), name, UINT64_MAX); + address_space_init(&sdev->as, &sdev->root, TYPE_VIRTIO_IOMMU); + + /* + * Build the IOMMU disabled container with aliases to the + * shared MRs. Note that aliasing to a shared memory region + * could help the memory API to detect same FlatViews so we + * can have devices to share the same FlatView when in bypass + * mode. (either by not configuring virtio-iommu driver or with + * "iommu=pt"). It will greatly reduce the total number of + * FlatViews of the system hence VM runs faster. + */ + memory_region_init_alias(&sdev->bypass_mr, OBJECT(s), + "system", get_system_memory(), 0, + memory_region_size(get_system_memory())); + memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr), TYPE_VIRTIO_IOMMU_MEMORY_REGION, OBJECT(s), name, UINT64_MAX); - address_space_init(&sdev->as, - MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU); + + /* + * Hook both the containers under the root container, we + * switch between iommu & bypass MRs by enable/disable + * corresponding sub-containers + */ + memory_region_add_subregion_overlap(&sdev->root, 0, + MEMORY_REGION(&sdev->iommu_mr), + 0); + memory_region_add_subregion_overlap(&sdev->root, 0, + &sdev->bypass_mr, 0); + + virtio_iommu_switch_address_space(sdev); g_free(name); } return &sdev->as; @@ -333,11 +448,17 @@ static int virtio_iommu_attach(VirtIOIOMMU *s, { uint32_t domain_id = le32_to_cpu(req->domain); uint32_t ep_id = le32_to_cpu(req->endpoint); + uint32_t flags = le32_to_cpu(req->flags); VirtIOIOMMUDomain *domain; VirtIOIOMMUEndpoint *ep; + IOMMUDevice *sdev; trace_virtio_iommu_attach(domain_id, ep_id); + if (flags & ~VIRTIO_IOMMU_ATTACH_F_BYPASS) { + return VIRTIO_IOMMU_S_INVAL; + } + ep = virtio_iommu_get_endpoint(s, ep_id); if (!ep) { return VIRTIO_IOMMU_S_NOENT; @@ -355,10 +476,17 @@ static int virtio_iommu_attach(VirtIOIOMMU *s, } } - domain = virtio_iommu_get_domain(s, domain_id); + domain = virtio_iommu_get_domain(s, domain_id, + flags & VIRTIO_IOMMU_ATTACH_F_BYPASS); + if (!domain) { + /* Incompatible bypass flag */ + return VIRTIO_IOMMU_S_INVAL; + } QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next); ep->domain = domain; + sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr); + virtio_iommu_switch_address_space(sdev); /* Replay domain mappings on the associated memory region */ g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb, @@ -418,6 +546,10 @@ static int virtio_iommu_map(VirtIOIOMMU *s, return VIRTIO_IOMMU_S_NOENT; } + if (domain->bypass) { + return VIRTIO_IOMMU_S_INVAL; + } + interval = g_malloc0(sizeof(*interval)); interval->low = virt_start; @@ -463,6 +595,11 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s, if (!domain) { return VIRTIO_IOMMU_S_NOENT; } + + if (domain->bypass) { + return VIRTIO_IOMMU_S_INVAL; + } + interval.low = virt_start; interval.high = virt_end; @@ -547,11 +684,10 @@ static int virtio_iommu_probe(VirtIOIOMMU *s, static int virtio_iommu_iov_to_req(struct iovec *iov, unsigned int iov_cnt, - void *req, size_t req_sz) + void *req, size_t payload_sz) { - size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail); + size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); - sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); if (unlikely(sz != payload_sz)) { return VIRTIO_IOMMU_S_INVAL; } @@ -564,7 +700,8 @@ static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \ unsigned int iov_cnt) \ { \ struct virtio_iommu_req_ ## __req req; \ - int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \ + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, \ + sizeof(req) - sizeof(struct virtio_iommu_req_tail));\ \ return ret ? ret : virtio_iommu_ ## __req(s, &req); \ } @@ -617,7 +754,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) tail.status = VIRTIO_IOMMU_S_DEVERR; goto out; } - qemu_mutex_lock(&s->mutex); + qemu_rec_mutex_lock(&s->mutex); switch (head.type) { case VIRTIO_IOMMU_T_ATTACH: tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt); @@ -646,7 +783,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) default: tail.status = VIRTIO_IOMMU_S_UNSUPP; } - qemu_mutex_unlock(&s->mutex); + qemu_rec_mutex_unlock(&s->mutex); out: sz = iov_from_buf(elem->in_sg, elem->in_num, 0, @@ -657,6 +794,7 @@ out: virtio_notify(vdev, vq); g_free(elem); g_free(buf); + buf = NULL; } } @@ -728,15 +866,18 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, .perm = IOMMU_NONE, }; - bypass_allowed = virtio_vdev_has_feature(&s->parent_obj, - VIRTIO_IOMMU_F_BYPASS); + bypass_allowed = s->config.bypass; sid = virtio_iommu_get_bdf(sdev); trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag); - qemu_mutex_lock(&s->mutex); + qemu_rec_mutex_lock(&s->mutex); ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); + + if (bypass_allowed) + assert(ep && ep->domain && !ep->domain->bypass); + if (!ep) { if (!bypass_allowed) { error_report_once("%s sid=%d is not known!!", __func__, sid); @@ -780,6 +921,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, entry.perm = flag; } goto unlock; + } else if (ep->domain->bypass) { + entry.perm = flag; + goto unlock; } found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval), @@ -815,34 +959,54 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid); unlock: - qemu_mutex_unlock(&s->mutex); + qemu_rec_mutex_unlock(&s->mutex); return entry; } static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data) { VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); - struct virtio_iommu_config *config = &dev->config; + struct virtio_iommu_config *dev_config = &dev->config; + struct virtio_iommu_config *out_config = (void *)config_data; - trace_virtio_iommu_get_config(config->page_size_mask, - config->input_range.start, - config->input_range.end, - config->domain_range.end, - config->probe_size); - memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config)); + out_config->page_size_mask = cpu_to_le64(dev_config->page_size_mask); + out_config->input_range.start = cpu_to_le64(dev_config->input_range.start); + out_config->input_range.end = cpu_to_le64(dev_config->input_range.end); + out_config->domain_range.start = cpu_to_le32(dev_config->domain_range.start); + out_config->domain_range.end = cpu_to_le32(dev_config->domain_range.end); + out_config->probe_size = cpu_to_le32(dev_config->probe_size); + out_config->bypass = dev_config->bypass; + + trace_virtio_iommu_get_config(dev_config->page_size_mask, + dev_config->input_range.start, + dev_config->input_range.end, + dev_config->domain_range.start, + dev_config->domain_range.end, + dev_config->probe_size, + dev_config->bypass); } static void virtio_iommu_set_config(VirtIODevice *vdev, - const uint8_t *config_data) + const uint8_t *config_data) { - struct virtio_iommu_config config; + VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); + struct virtio_iommu_config *dev_config = &dev->config; + const struct virtio_iommu_config *in_config = (void *)config_data; - memcpy(&config, config_data, sizeof(struct virtio_iommu_config)); - trace_virtio_iommu_set_config(config.page_size_mask, - config.input_range.start, - config.input_range.end, - config.domain_range.end, - config.probe_size); + if (in_config->bypass != dev_config->bypass) { + if (!virtio_vdev_has_feature(vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + virtio_error(vdev, "cannot set config.bypass"); + return; + } else if (in_config->bypass != 0 && in_config->bypass != 1) { + virtio_error(vdev, "invalid config.bypass value '%u'", + in_config->bypass); + return; + } + dev_config->bypass = in_config->bypass; + virtio_iommu_switch_address_space_all(dev); + } + + trace_virtio_iommu_set_config(in_config->bypass); } static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f, @@ -884,7 +1048,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n) sid = virtio_iommu_get_bdf(sdev); - qemu_mutex_lock(&s->mutex); + qemu_rec_mutex_lock(&s->mutex); if (!s->endpoints) { goto unlock; @@ -898,7 +1062,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n) g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr); unlock: - qemu_mutex_unlock(&s->mutex); + qemu_rec_mutex_unlock(&s->mutex); } static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, @@ -968,13 +1132,27 @@ static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr, return 0; } +static void virtio_iommu_system_reset(void *opaque) +{ + VirtIOIOMMU *s = opaque; + + trace_virtio_iommu_system_reset(); + + /* + * config.bypass is sticky across device reset, but should be restored on + * system reset + */ + s->config.bypass = s->boot_bypass; + virtio_iommu_switch_address_space_all(s); + +} + static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOIOMMU *s = VIRTIO_IOMMU(dev); - virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU, - sizeof(struct virtio_iommu_config)); + virtio_init(vdev, VIRTIO_ID_IOMMU, sizeof(struct virtio_iommu_config)); memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num)); @@ -982,9 +1160,14 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) virtio_iommu_handle_command); s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL); + /* + * config.bypass is needed to get initial address space early, such as + * in vfio realize + */ + s->config.bypass = s->boot_bypass; s->config.page_size_mask = TARGET_PAGE_MASK; - s->config.input_range.end = -1UL; - s->config.domain_range.end = 32; + s->config.input_range.end = UINT64_MAX; + s->config.domain_range.end = UINT32_MAX; s->config.probe_size = VIOMMU_PROBE_SIZE; virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX); @@ -993,11 +1176,11 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP); - virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG); - qemu_mutex_init(&s->mutex); + qemu_rec_mutex_init(&s->mutex); s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free); @@ -1006,6 +1189,8 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) } else { error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!"); } + + qemu_register_reset(virtio_iommu_system_reset, s); } static void virtio_iommu_device_unrealize(DeviceState *dev) @@ -1013,6 +1198,8 @@ static void virtio_iommu_device_unrealize(DeviceState *dev) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOIOMMU *s = VIRTIO_IOMMU(dev); + qemu_unregister_reset(virtio_iommu_system_reset, s); + g_hash_table_destroy(s->as_by_busptr); if (s->domains) { g_tree_destroy(s->domains); @@ -1021,6 +1208,8 @@ static void virtio_iommu_device_unrealize(DeviceState *dev) g_tree_destroy(s->endpoints); } + qemu_rec_mutex_destroy(&s->mutex); + virtio_delete_queue(s->req_vq); virtio_delete_queue(s->event_vq); virtio_cleanup(vdev); @@ -1103,8 +1292,8 @@ static const VMStateDescription vmstate_endpoint = { static const VMStateDescription vmstate_domain = { .name = "domain", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .pre_load = domain_preload, .fields = (VMStateField[]) { VMSTATE_UINT32(id, VirtIOIOMMUDomain), @@ -1113,6 +1302,7 @@ static const VMStateDescription vmstate_domain = { VirtIOIOMMUInterval, VirtIOIOMMUMapping), VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1, vmstate_endpoint, VirtIOIOMMUEndpoint, next), + VMSTATE_BOOL_V(bypass, VirtIOIOMMUDomain, 2), VMSTATE_END_OF_LIST() } }; @@ -1141,26 +1331,35 @@ static int iommu_post_load(void *opaque, int version_id) VirtIOIOMMU *s = opaque; g_tree_foreach(s->domains, reconstruct_endpoints, s); + + /* + * Memory regions are dynamically turned on/off depending on + * 'config.bypass' and attached domain type if there is. After + * migration, we need to make sure the memory regions are + * still correct. + */ + virtio_iommu_switch_address_space_all(s); return 0; } static const VMStateDescription vmstate_virtio_iommu_device = { .name = "virtio-iommu-device", - .minimum_version_id = 1, - .version_id = 1, + .minimum_version_id = 2, + .version_id = 2, .post_load = iommu_post_load, .fields = (VMStateField[]) { - VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1, + VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2, &vmstate_domain, VirtIOIOMMUDomain), + VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2), VMSTATE_END_OF_LIST() }, }; static const VMStateDescription vmstate_virtio_iommu = { .name = "virtio-iommu", - .minimum_version_id = 1, + .minimum_version_id = 2, .priority = MIG_PRI_IOMMU, - .version_id = 1, + .version_id = 2, .fields = (VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() @@ -1169,6 +1368,7 @@ static const VMStateDescription vmstate_virtio_iommu = { static Property virtio_iommu_properties[] = { DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *), + DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index fa5395cd88..5c5c1e3ae3 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -87,14 +87,12 @@ static void virtio_mem_pci_size_change_notify(Notifier *notifier, void *data) VirtIOMEMPCI *pci_mem = container_of(notifier, VirtIOMEMPCI, size_change_notifier); DeviceState *dev = DEVICE(pci_mem); + char *qom_path = object_get_canonical_path(OBJECT(dev)); const uint64_t * const size_p = data; - const char *id = NULL; - if (dev->id) { - id = g_strdup(dev->id); - } - - qapi_event_send_memory_device_size_change(!!id, id, *size_p); + qapi_event_send_memory_device_size_change(!!dev->id, dev->id, *size_p, + qom_path); + g_free(qom_path); } static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) @@ -106,8 +104,6 @@ static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) k->realize = virtio_mem_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; - pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_MEM; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index df91e454b2..56db586c89 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/iov.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -32,20 +31,39 @@ #include CONFIG_DEVICES #include "trace.h" +/* + * We only had legacy x86 guests that did not support + * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. Other targets don't have legacy guests. + */ +#if defined(TARGET_X86_64) || defined(TARGET_I386) +#define VIRTIO_MEM_HAS_LEGACY_GUESTS +#endif + /* * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking * bitmap small. */ #define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB)) -#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ - defined(__powerpc64__) -#define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB)) -#else - /* fallback to 1 MiB (e.g., the THP size on s390x) */ -#define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE +static uint32_t virtio_mem_default_thp_size(void) +{ + uint32_t default_thp_size = VIRTIO_MEM_MIN_BLOCK_SIZE; + +#if defined(__x86_64__) || defined(__arm__) || defined(__powerpc64__) + default_thp_size = 2 * MiB; +#elif defined(__aarch64__) + if (qemu_real_host_page_size() == 4 * KiB) { + default_thp_size = 2 * MiB; + } else if (qemu_real_host_page_size() == 16 * KiB) { + default_thp_size = 32 * MiB; + } else if (qemu_real_host_page_size() == 64 * KiB) { + default_thp_size = 512 * MiB; + } #endif + return default_thp_size; +} + /* * We want to have a reasonable default block size such that * 1. We avoid splitting THPs when unplugging memory, which degrades @@ -78,11 +96,8 @@ static uint32_t virtio_mem_thp_size(void) if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) && !qemu_strtou64(content, &endptr, 0, &tmp) && (!endptr || *endptr == '\n')) { - /* - * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base - * pages) or weird, fallback to something smaller. - */ - if (!tmp || !is_power_of_2(tmp) || tmp > 16 * MiB) { + /* Sanity-check the value and fallback to something reasonable. */ + if (!tmp || !is_power_of_2(tmp)) { warn_report("Read unsupported THP size: %" PRIx64, tmp); } else { thp_size = tmp; @@ -90,7 +105,7 @@ static uint32_t virtio_mem_thp_size(void) } if (!thp_size) { - thp_size = VIRTIO_MEM_DEFAULT_THP_SIZE; + thp_size = virtio_mem_default_thp_size(); warn_report("Could not detect THP size, falling back to %" PRIx64 " MiB.", thp_size / MiB); } @@ -104,12 +119,25 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb) const uint64_t page_size = qemu_ram_pagesize(rb); /* We can have hugetlbfs with a page size smaller than the THP size. */ - if (page_size == qemu_real_host_page_size) { + if (page_size == qemu_real_host_page_size()) { return MAX(page_size, virtio_mem_thp_size()); } return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE); } +#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) +static bool virtio_mem_has_shared_zeropage(RAMBlock *rb) +{ + /* + * We only have a guaranteed shared zeropage on ordinary MAP_PRIVATE + * anonymous RAM. In any other case, reading unplugged *can* populate a + * fresh page, consuming actual memory. + */ + return !qemu_ram_is_shared(rb) && rb->fd < 0 && + qemu_ram_pagesize(rb) == qemu_real_host_page_size(); +} +#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ + /* * Size the usable region bigger than the requested size if possible. Esp. * Linux guests will only add (aligned) memory blocks in case they fully @@ -117,7 +145,7 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb) * The memory block size corresponds mostly to the section size. * * This allows e.g., to add 20MB with a section size of 128MB on x86_64, and - * a section size of 1GB on arm64 (as long as the start address is properly + * a section size of 512MB on arm64 (as long as the start address is properly * aligned, similar to ordinary DIMMs). * * We can change this at any time and maybe even make it configurable if @@ -126,6 +154,8 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb) */ #if defined(TARGET_X86_64) || defined(TARGET_I386) #define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB)) +#elif defined(TARGET_ARM) +#define VIRTIO_MEM_USABLE_EXTENT (2 * (512 * MiB)) #else #error VIRTIO_MEM_USABLE_EXTENT not defined #endif @@ -205,7 +235,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, uint64_t offset, size; int ret = 0; - first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = s->offset_within_region / vmem->block_size; first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit); while (first_bit < vmem->bitmap_size) { MemoryRegionSection tmp = *s; @@ -228,6 +258,38 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, return ret; } +static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, + MemoryRegionSection *s, + void *arg, + virtio_mem_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + int ret = 0; + + first_bit = s->offset_within_region / vmem->block_size; + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit); + while (first_bit < vmem->bitmap_size) { + MemoryRegionSection tmp = *s; + + offset = first_bit * vmem->block_size; + last_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * vmem->block_size; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + break; + } + ret = cb(&tmp, arg); + if (ret) { + break; + } + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, + last_bit + 2); + } + return ret; +} + static int virtio_mem_notify_populate_cb(MemoryRegionSection *s, void *arg) { RamDiscardListener *rdl = arg; @@ -279,7 +341,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, if (ret) { /* Notify all already-notified listeners. */ QLIST_FOREACH(rdl2, &vmem->rdl_list, next) { - MemoryRegionSection tmp = *rdl->section; + MemoryRegionSection tmp = *rdl2->section; if (rdl2 == rdl) { break; @@ -397,10 +459,40 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa, return -EBUSY; } virtio_mem_notify_unplug(vmem, offset, size); - } else if (virtio_mem_notify_plug(vmem, offset, size)) { - /* Could be a mapping attempt resulted in memory getting populated. */ - ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size); - return -EBUSY; + } else { + int ret = 0; + + if (vmem->prealloc) { + void *area = memory_region_get_ram_ptr(&vmem->memdev->mr) + offset; + int fd = memory_region_get_fd(&vmem->memdev->mr); + Error *local_err = NULL; + + qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err); + if (local_err) { + static bool warned; + + /* + * Warn only once, we don't want to fill the log with these + * warnings. + */ + if (!warned) { + warn_report_err(local_err); + warned = true; + } else { + error_free(local_err); + } + ret = -EBUSY; + } + } + if (!ret) { + ret = virtio_mem_notify_plug(vmem, offset, size); + } + + if (ret) { + /* Could be preallocation or a notifier populated memory. */ + ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size); + return -EBUSY; + } } virtio_mem_set_bitmap(vmem, start_gpa, size, plug); return 0; @@ -621,15 +713,29 @@ static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); + VirtIOMEM *vmem = VIRTIO_MEM(vdev); if (ms->numa_state) { #if defined(CONFIG_ACPI) virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM); #endif } + assert(vmem->unplugged_inaccessible != ON_OFF_AUTO_AUTO); + if (vmem->unplugged_inaccessible == ON_OFF_AUTO_ON) { + virtio_add_feature(&features, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE); + } return features; } +static int virtio_mem_validate_features(VirtIODevice *vdev) +{ + if (virtio_host_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE) && + !virtio_vdev_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE)) { + return -EFAULT; + } + return 0; +} + static void virtio_mem_system_reset(void *opaque) { VirtIOMEM *vmem = VIRTIO_MEM(opaque); @@ -684,6 +790,29 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) rb = vmem->memdev->mr.ram_block; page_size = qemu_ram_pagesize(rb); +#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) + switch (vmem->unplugged_inaccessible) { + case ON_OFF_AUTO_AUTO: + if (virtio_mem_has_shared_zeropage(rb)) { + vmem->unplugged_inaccessible = ON_OFF_AUTO_OFF; + } else { + vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; + } + break; + case ON_OFF_AUTO_OFF: + if (!virtio_mem_has_shared_zeropage(rb)) { + warn_report("'%s' property set to 'off' with a memdev that does" + " not support the shared zeropage.", + VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP); + } + break; + default: + break; + } +#else /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ + vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; +#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ + /* * If the block size wasn't configured by the user, use a sane default. This * allows using hugetlbfs backends of any page size without manual @@ -701,7 +830,8 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) warn_report("'%s' property is smaller than the default block size (%" PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP, virtio_mem_default_block_size(rb) / MiB); - } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { + } + if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 ")", VIRTIO_MEM_REQUESTED_SIZE_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size); @@ -737,14 +867,12 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) vmem->block_size; vmem->bitmap = bitmap_new(vmem->bitmap_size); - virtio_init(vdev, TYPE_VIRTIO_MEM, VIRTIO_ID_MEM, - sizeof(struct virtio_mem_config)); + virtio_init(vdev, VIRTIO_ID_MEM, sizeof(struct virtio_mem_config)); vmem->vq = virtio_add_queue(vdev, 128, virtio_mem_handle_request); host_memory_backend_set_mapped(vmem->memdev, true); vmstate_register_ram(&vmem->memdev->mr, DEVICE(vmem)); qemu_register_reset(virtio_mem_system_reset, vmem); - precopy_add_notifier(&vmem->precopy_notifier); /* * Set ourselves as RamDiscardManager before the plug handler maps the @@ -764,7 +892,6 @@ static void virtio_mem_device_unrealize(DeviceState *dev) * found via an address space anymore. Unset ourselves. */ memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); - precopy_remove_notifier(&vmem->precopy_notifier); qemu_unregister_reset(virtio_mem_system_reset, vmem); vmstate_unregister_ram(&vmem->memdev->mr, DEVICE(vmem)); host_memory_backend_set_mapped(vmem->memdev, false); @@ -1057,43 +1184,11 @@ static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name, vmem->block_size = value; } -static int virtio_mem_precopy_exclude_range_cb(const VirtIOMEM *vmem, void *arg, - uint64_t offset, uint64_t size) -{ - void * const host = qemu_ram_get_host_addr(vmem->memdev->mr.ram_block); - - qemu_guest_free_page_hint(host + offset, size); - return 0; -} - -static void virtio_mem_precopy_exclude_unplugged(VirtIOMEM *vmem) -{ - virtio_mem_for_each_unplugged_range(vmem, NULL, - virtio_mem_precopy_exclude_range_cb); -} - -static int virtio_mem_precopy_notify(NotifierWithReturn *n, void *data) -{ - VirtIOMEM *vmem = container_of(n, VirtIOMEM, precopy_notifier); - PrecopyNotifyData *pnd = data; - - switch (pnd->reason) { - case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC: - virtio_mem_precopy_exclude_unplugged(vmem); - break; - default: - break; - } - - return 0; -} - static void virtio_mem_instance_init(Object *obj) { VirtIOMEM *vmem = VIRTIO_MEM(obj); notifier_list_init(&vmem->size_change_notifiers); - vmem->precopy_notifier.notify = virtio_mem_precopy_notify; QLIST_INIT(&vmem->rdl_list); object_property_add(obj, VIRTIO_MEM_SIZE_PROP, "size", virtio_mem_get_size, @@ -1109,8 +1204,13 @@ static void virtio_mem_instance_init(Object *obj) static Property virtio_mem_properties[] = { DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0), DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0), + DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false), DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev, TYPE_MEMORY_BACKEND, HostMemoryBackend *), +#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) + DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, + unplugged_inaccessible, ON_OFF_AUTO_AUTO), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -1170,6 +1270,31 @@ static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm, virtio_mem_rdm_replay_populated_cb); } +static int virtio_mem_rdm_replay_discarded_cb(MemoryRegionSection *s, + void *arg) +{ + struct VirtIOMEMReplayData *data = arg; + + ((ReplayRamDiscard)data->fn)(s, data->opaque); + return 0; +} + +static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *s, + ReplayRamDiscard replay_fn, + void *opaque) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + struct VirtIOMEMReplayData data = { + .fn = replay_fn, + .opaque = opaque, + }; + + g_assert(s->mr == &vmem->memdev->mr); + virtio_mem_for_each_unplugged_section(vmem, s, &data, + virtio_mem_rdm_replay_discarded_cb); +} + static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *s) @@ -1224,6 +1349,7 @@ static void virtio_mem_class_init(ObjectClass *klass, void *data) vdc->unrealize = virtio_mem_device_unrealize; vdc->get_config = virtio_mem_get_config; vdc->get_features = virtio_mem_get_features; + vdc->validate_features = virtio_mem_validate_features; vdc->vmsd = &vmstate_virtio_mem_device; vmc->fill_device_info = virtio_mem_fill_device_info; @@ -1234,6 +1360,7 @@ static void virtio_mem_class_init(ObjectClass *klass, void *data) rdmc->get_min_granularity = virtio_mem_rdm_get_min_granularity; rdmc->is_populated = virtio_mem_rdm_is_populated; rdmc->replay_populated = virtio_mem_rdm_replay_populated; + rdmc->replay_discarded = virtio_mem_rdm_replay_discarded; rdmc->register_listener = virtio_mem_rdm_register_listener; rdmc->unregister_listener = virtio_mem_rdm_unregister_listener; } diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 1af48a1b04..d240efef97 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -72,12 +72,12 @@ static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy) { int i; - if (proxy->legacy) { - return; - } + virtio_bus_reset(&proxy->bus); - for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { - proxy->vqs[i].enabled = 0; + if (!proxy->legacy) { + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + proxy->vqs[i].enabled = 0; + } } } @@ -376,7 +376,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, return; } if (value == 0) { - virtio_reset(vdev); + virtio_mmio_soft_reset(proxy); } else { virtio_queue_set_addr(vdev, vdev->queue_sel, value << proxy->guest_page_shift); @@ -432,7 +432,6 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, } if (vdev->status == 0) { - virtio_reset(vdev); virtio_mmio_soft_reset(proxy); } break; @@ -592,7 +591,6 @@ static const VMStateDescription vmstate_virtio_mmio = { .name = "virtio_mmio", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_END_OF_LIST() }, @@ -628,8 +626,8 @@ static void virtio_mmio_reset(DeviceState *d) VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); int i; - virtio_mmio_stop_ioeventfd(proxy); - virtio_bus_reset(&proxy->bus); + virtio_mmio_soft_reset(proxy); + proxy->host_features_sel = 0; proxy->guest_features_sel = 0; proxy->guest_page_shift = 0; @@ -638,7 +636,6 @@ static void virtio_mmio_reset(DeviceState *d) proxy->guest_features[0] = proxy->guest_features[1] = 0; for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { - proxy->vqs[i].enabled = 0; proxy->vqs[i].num = 0; proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; @@ -733,8 +730,7 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); SysBusDevice *sbd = SYS_BUS_DEVICE(d); - qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, - d, NULL); + qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); sysbus_init_irq(sbd, &proxy->irq); if (!kvm_eventfds_enabled()) { @@ -818,6 +814,17 @@ static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) return path; } +static void virtio_mmio_vmstate_change(DeviceState *d, bool running) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + + if (running) { + virtio_mmio_start_ioeventfd(proxy); + } else { + virtio_mmio_stop_ioeventfd(proxy); + } +} + static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) { BusClass *bus_class = BUS_CLASS(klass); @@ -833,6 +840,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; k->pre_plugged = virtio_mmio_pre_plugged; + k->vmstate_change = virtio_mmio_vmstate_change; k->has_variable_vring_alignment = true; bus_class->max_dev = 1; bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; diff --git a/hw/virtio/virtio-net-pci.c b/hw/virtio/virtio-net-pci.c index aa0b3caecb..e03543a70a 100644 --- a/hw/virtio/virtio-net-pci.c +++ b/hw/virtio/virtio-net-pci.c @@ -19,7 +19,7 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-net.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object.h" diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 433060ac02..a1c9dfa7bb 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -33,11 +33,12 @@ #include "hw/pci/msix.h" #include "hw/loader.h" #include "sysemu/kvm.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qemu/range.h" #include "hw/virtio/virtio-bus.h" #include "qapi/visitor.h" #include "sysemu/replay.h" +#include "trace.h" #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) @@ -70,9 +71,11 @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); - if (msix_enabled(&proxy->pci_dev)) - msix_notify(&proxy->pci_dev, vector); - else { + if (msix_enabled(&proxy->pci_dev)) { + if (vector != VIRTIO_NO_VECTOR) { + msix_notify(&proxy->pci_dev, vector); + } + } else { VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); } @@ -131,7 +134,6 @@ static const VMStateDescription vmstate_virtio_pci = { .name = "virtio_pci", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_END_OF_LIST() }, @@ -175,6 +177,7 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint16_t vector; int ret; ret = pci_device_load(&proxy->pci_dev, f); @@ -184,12 +187,17 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) msix_unuse_all_vectors(&proxy->pci_dev); msix_load(&proxy->pci_dev, f); if (msix_present(&proxy->pci_dev)) { - qemu_get_be16s(f, &vdev->config_vector); + qemu_get_be16s(f, &vector); + + if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) { + return -EINVAL; + } } else { - vdev->config_vector = VIRTIO_NO_VECTOR; + vector = VIRTIO_NO_VECTOR; } - if (vdev->config_vector != VIRTIO_NO_VECTOR) { - return msix_vector_use(&proxy->pci_dev, vdev->config_vector); + vdev->config_vector = vector; + if (vector != VIRTIO_NO_VECTOR) { + msix_vector_use(&proxy->pci_dev, vector); } return 0; } @@ -202,12 +210,15 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) uint16_t vector; if (msix_present(&proxy->pci_dev)) { qemu_get_be16s(f, &vector); + if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) { + return -EINVAL; + } } else { vector = VIRTIO_NO_VECTOR; } virtio_queue_set_vector(vdev, n, vector); if (vector != VIRTIO_NO_VECTOR) { - return msix_vector_use(&proxy->pci_dev, vector); + msix_vector_use(&proxy->pci_dev, vector); } return 0; @@ -299,6 +310,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint16_t vector; hwaddr pa; switch (addr) { @@ -352,18 +364,28 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) } break; case VIRTIO_MSI_CONFIG_VECTOR: - msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + if (vdev->config_vector != VIRTIO_NO_VECTOR) { + msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + } /* Make it possible for guest to discover an error took place. */ - if (msix_vector_use(&proxy->pci_dev, val) < 0) + if (val < proxy->nvectors) { + msix_vector_use(&proxy->pci_dev, val); + } else { val = VIRTIO_NO_VECTOR; + } vdev->config_vector = val; break; case VIRTIO_MSI_QUEUE_VECTOR: - msix_vector_unuse(&proxy->pci_dev, - virtio_queue_vector(vdev, vdev->queue_sel)); + vector = virtio_queue_vector(vdev, vdev->queue_sel); + if (vector != VIRTIO_NO_VECTOR) { + msix_vector_unuse(&proxy->pci_dev, vector); + } /* Make it possible for guest to discover an error took place. */ - if (msix_vector_use(&proxy->pci_dev, val) < 0) + if (val < proxy->nvectors) { + msix_vector_use(&proxy->pci_dev, val); + } else { val = VIRTIO_NO_VECTOR; + } virtio_queue_set_vector(vdev, vdev->queue_sel, val); break; default: @@ -684,10 +706,12 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, int ret; if (irqfd->users == 0) { - ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); + KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); + ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); if (ret < 0) { return ret; } + kvm_irqchip_commit_route_changes(&c); irqfd->virq = ret; } irqfd->users++; @@ -994,9 +1018,14 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); - /* When deassigning, pass a consistent nvqs value - * to avoid leaking notifiers. + /* + * When deassigning, pass a consistent nvqs value to avoid leaking + * notifiers. But first check we've actually been configured, exit + * early if we haven't. */ + if (!assign && !proxy->nvqs_with_notifiers) { + return 0; + } assert(assign || nvqs == proxy->nvqs_with_notifiers); proxy->nvqs_with_notifiers = nvqs; @@ -1121,6 +1150,19 @@ static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) return pci_get_address_space(dev); } +static bool virtio_pci_iommu_enabled(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + PCIDevice *dev = &proxy->pci_dev; + AddressSpace *dma_as = pci_device_iommu_address_space(dev); + + if (dma_as == &address_space_memory) { + return false; + } + + return true; +} + static bool virtio_pci_queue_enabled(DeviceState *d, int n) { VirtIOPCIProxy *proxy = VIRTIO_PCI(d); @@ -1231,6 +1273,9 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, case VIRTIO_PCI_COMMON_Q_USEDHI: val = proxy->vqs[vdev->queue_sel].used[1]; break; + case VIRTIO_PCI_COMMON_Q_RESET: + val = proxy->vqs[vdev->queue_sel].reset; + break; default: val = 0; } @@ -1243,6 +1288,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint16_t vector; if (vdev == NULL) { return; @@ -1264,9 +1310,13 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, } break; case VIRTIO_PCI_COMMON_MSIX: - msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + if (vdev->config_vector != VIRTIO_NO_VECTOR) { + msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + } /* Make it possible for guest to discover an error took place. */ - if (msix_vector_use(&proxy->pci_dev, val) < 0) { + if (val < proxy->nvectors) { + msix_vector_use(&proxy->pci_dev, val); + } else { val = VIRTIO_NO_VECTOR; } vdev->config_vector = val; @@ -1298,10 +1348,14 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, proxy->vqs[vdev->queue_sel].num); break; case VIRTIO_PCI_COMMON_Q_MSIX: - msix_vector_unuse(&proxy->pci_dev, - virtio_queue_vector(vdev, vdev->queue_sel)); + vector = virtio_queue_vector(vdev, vdev->queue_sel); + if (vector != VIRTIO_NO_VECTOR) { + msix_vector_unuse(&proxy->pci_dev, vector); + } /* Make it possible for guest to discover an error took place. */ - if (msix_vector_use(&proxy->pci_dev, val) < 0) { + if (val < proxy->nvectors) { + msix_vector_use(&proxy->pci_dev, val); + } else { val = VIRTIO_NO_VECTOR; } virtio_queue_set_vector(vdev, vdev->queue_sel, val); @@ -1318,6 +1372,8 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | proxy->vqs[vdev->queue_sel].used[0]); proxy->vqs[vdev->queue_sel].enabled = 1; + proxy->vqs[vdev->queue_sel].reset = 0; + virtio_queue_enable(vdev, vdev->queue_sel); } else { virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); } @@ -1340,6 +1396,16 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, case VIRTIO_PCI_COMMON_Q_USEDHI: proxy->vqs[vdev->queue_sel].used[1] = val; break; + case VIRTIO_PCI_COMMON_Q_RESET: + if (val == 1) { + proxy->vqs[vdev->queue_sel].reset = 1; + + virtio_queue_reset(vdev, vdev->queue_sel); + + proxy->vqs[vdev->queue_sel].reset = 0; + proxy->vqs[vdev->queue_sel].enabled = 0; + } + break; default: break; } @@ -1366,6 +1432,7 @@ static void virtio_pci_notify_write(void *opaque, hwaddr addr, unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { + trace_virtio_pci_notify_write(addr, val, size); virtio_queue_notify(vdev, queue); } } @@ -1379,6 +1446,7 @@ static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, unsigned queue = val; if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { + trace_virtio_pci_notify_write_pio(addr, val, size); virtio_queue_notify(vdev, queue); } } @@ -1653,7 +1721,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" " neither legacy nor transitional device"); - return ; + return; } /* * Legacy and transitional devices use specific subsystem IDs. @@ -1666,7 +1734,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) pci_set_word(config + PCI_VENDOR_ID, PCI_VENDOR_ID_REDHAT_QUMRANET); pci_set_word(config + PCI_DEVICE_ID, - 0x1040 + virtio_bus_get_vdev_id(bus)); + PCI_DEVICE_ID_VIRTIO_10_BASE + virtio_bus_get_vdev_id(bus)); pci_config_set_revision(config, 1); } config[PCI_INTERRUPT_PIN] = 1; @@ -1925,20 +1993,26 @@ static void virtio_pci_reset(DeviceState *qdev) { VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); - PCIDevice *dev = PCI_DEVICE(qdev); int i; - virtio_pci_stop_ioeventfd(proxy); virtio_bus_reset(bus); msix_unuse_all_vectors(&proxy->pci_dev); for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { proxy->vqs[i].enabled = 0; + proxy->vqs[i].reset = 0; proxy->vqs[i].num = 0; proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; } +} + +static void virtio_pci_bus_reset(DeviceState *qdev) +{ + PCIDevice *dev = PCI_DEVICE(qdev); + + virtio_pci_reset(qdev); if (pci_is_express(dev)) { pcie_cap_deverr_reset(dev); @@ -2006,7 +2080,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_OTHERS; device_class_set_parent_realize(dc, virtio_pci_dc_realize, &vpciklass->parent_dc_realize); - dc->reset = virtio_pci_reset; + dc->reset = virtio_pci_bus_reset; } static const TypeInfo virtio_pci_info = { @@ -2174,8 +2248,7 @@ static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, DeviceState *qdev = DEVICE(dev); char virtio_bus_name[] = "virtio-bus"; - qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, - virtio_bus_name); + qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); } static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) @@ -2202,6 +2275,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; k->ioeventfd_assign = virtio_pci_ioeventfd_assign; k->get_dma_as = virtio_pci_get_dma_as; + k->iommu_enabled = virtio_pci_iommu_enabled; k->queue_enabled = virtio_pci_queue_enabled; } diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c index 2b2a0b1eae..7d9f4ec189 100644 --- a/hw/virtio/virtio-pmem-pci.c +++ b/hw/virtio/virtio-pmem-pci.c @@ -90,8 +90,6 @@ static void virtio_pmem_pci_class_init(ObjectClass *klass, void *data) k->realize = virtio_pmem_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; - pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_PMEM; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c index d1aeb90a31..a1abfe0e1b 100644 --- a/hw/virtio/virtio-pmem.c +++ b/hw/virtio/virtio-pmem.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "hw/virtio/virtio-pmem.h" @@ -123,8 +122,7 @@ static void virtio_pmem_realize(DeviceState *dev, Error **errp) } host_memory_backend_set_mapped(pmem->memdev, true); - virtio_init(vdev, TYPE_VIRTIO_PMEM, VIRTIO_ID_PMEM, - sizeof(struct virtio_pmem_config)); + virtio_init(vdev, VIRTIO_ID_PMEM, sizeof(struct virtio_pmem_config)); pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush); } @@ -182,7 +180,7 @@ static void virtio_pmem_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } -static TypeInfo virtio_pmem_info = { +static const TypeInfo virtio_pmem_info = { .name = TYPE_VIRTIO_PMEM, .parent = TYPE_VIRTIO_DEVICE, .class_size = sizeof(VirtIOPMEMClass), diff --git a/hw/virtio/virtio-rng-pci.c b/hw/virtio/virtio-rng-pci.c index c1f916268b..6e76f8b57b 100644 --- a/hw/virtio/virtio-rng-pci.c +++ b/hw/virtio/virtio-rng-pci.c @@ -11,8 +11,9 @@ #include "qemu/osdep.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-rng.h" +#include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object.h" @@ -31,11 +32,23 @@ struct VirtIORngPCI { VirtIORNG vdev; }; +static Property virtio_rng_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev); DeviceState *vdev = DEVICE(&vrng->vdev); + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 2; + } + if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) { return; } @@ -54,6 +67,7 @@ static void virtio_rng_pci_class_init(ObjectClass *klass, void *data) pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; + device_class_set_props(dc, virtio_rng_properties); } static void virtio_rng_initfn(Object *obj) diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index cc8e9f775d..7e12fc03bf 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -215,7 +215,7 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0); + virtio_init(vdev, VIRTIO_ID_RNG, 0); vrng->vq = virtio_add_queue(vdev, 8, handle_input); vrng->quota_remaining = vrng->conf.max_bytes; diff --git a/hw/virtio/virtio-scsi-pci.c b/hw/virtio/virtio-scsi-pci.c index 97fab74236..e8e3442f38 100644 --- a/hw/virtio/virtio-scsi-pci.c +++ b/hw/virtio/virtio-scsi-pci.c @@ -18,7 +18,7 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-scsi.h" #include "qemu/module.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VirtIOSCSIPCI VirtIOSCSIPCI; diff --git a/hw/virtio/virtio-serial-pci.c b/hw/virtio/virtio-serial-pci.c index 35bcd961c9..cea31adcc4 100644 --- a/hw/virtio/virtio-serial-pci.c +++ b/hw/virtio/virtio-serial-pci.c @@ -20,7 +20,7 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-serial.h" #include "qemu/module.h" -#include "virtio-pci.h" +#include "hw/virtio/virtio-pci.h" #include "qom/object.h" typedef struct VirtIOSerialPCI VirtIOSerialPCI; diff --git a/hw/virtio/virtio-stub.c b/hw/virtio/virtio-stub.c new file mode 100644 index 0000000000..7ddb22cc5e --- /dev/null +++ b/hw/virtio/virtio-stub.c @@ -0,0 +1,42 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-virtio.h" + +static void *qmp_virtio_unsupported(Error **errp) +{ + error_setg(errp, "Virtio is disabled"); + return NULL; +} + +VirtioInfoList *qmp_x_query_virtio(Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path, + uint16_t queue, + bool has_index, + uint16_t index, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 874377f37a..384c8f0f08 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -13,12 +13,18 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-virtio.h" +#include "qapi/qapi-commands-qom.h" +#include "qapi/qapi-visit-virtio.h" +#include "qapi/qmp/qjson.h" #include "cpu.h" #include "trace.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" +#include "qom/object_interfaces.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" #include "qemu/atomic.h" @@ -28,6 +34,432 @@ #include "sysemu/dma.h" #include "sysemu/runstate.h" #include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/vhost_types.h" +#include "standard-headers/linux/virtio_blk.h" +#include "standard-headers/linux/virtio_console.h" +#include "standard-headers/linux/virtio_gpu.h" +#include "standard-headers/linux/virtio_net.h" +#include "standard-headers/linux/virtio_scsi.h" +#include "standard-headers/linux/virtio_i2c.h" +#include "standard-headers/linux/virtio_balloon.h" +#include "standard-headers/linux/virtio_iommu.h" +#include "standard-headers/linux/virtio_mem.h" +#include "standard-headers/linux/virtio_vsock.h" +#include CONFIG_DEVICES + +/* QAPI list of realized VirtIODevices */ +static QTAILQ_HEAD(, VirtIODevice) virtio_list; + +/* + * Maximum size of virtio device config space + */ +#define VHOST_USER_MAX_CONFIG_SIZE 256 + +#define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \ + { .virtio_bit = name, .feature_desc = desc } + +enum VhostUserProtocolFeature { + VHOST_USER_PROTOCOL_F_MQ = 0, + VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, + VHOST_USER_PROTOCOL_F_RARP = 2, + VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, + VHOST_USER_PROTOCOL_F_NET_MTU = 4, + VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, + VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, + VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, + VHOST_USER_PROTOCOL_F_CONFIG = 9, + VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, + VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, + VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, + VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, + VHOST_USER_PROTOCOL_F_MAX +}; + +/* Virtio transport features mapping */ +static qmp_virtio_feature_map_t virtio_transport_map[] = { + /* Virtio device transport features */ +#ifndef VIRTIO_CONFIG_NO_LEGACY + FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \ + "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. " + "descs. on VQ"), + FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \ + "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"), +#endif /* !VIRTIO_CONFIG_NO_LEGACY */ + FEATURE_ENTRY(VIRTIO_F_VERSION_1, \ + "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"), + FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \ + "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"), + FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \ + "VIRTIO_F_RING_PACKED: Device supports packed VQ layout"), + FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \ + "VIRTIO_F_IN_ORDER: Device uses buffers in same order as made " + "available by driver"), + FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \ + "VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"), + FEATURE_ENTRY(VIRTIO_F_SR_IOV, \ + "VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"), + /* Virtio ring transport features */ + FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \ + "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"), + FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \ + "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"), + { -1, "" } +}; + +/* Vhost-user protocol features mapping */ +static qmp_virtio_feature_map_t vhost_user_protocol_map[] = { + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \ + "VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \ + "VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \ + "VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \ + "VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \ + "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \ + "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated " + "requests supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \ + "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy " + "devices supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \ + "VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto " + "operations supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \ + "VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd " + "for accessed pages supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \ + "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio " + "device configuration space supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \ + "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication " + "channel supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \ + "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified " + "VQs supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \ + "VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \ + "VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and " + "resetting internal device state supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \ + "VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \ + "VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for " + "memory slots supported"), + { -1, "" } +}; + +/* virtio device configuration statuses */ +static qmp_virtio_feature_map_t virtio_config_status_map[] = { + FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \ + "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \ + "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \ + "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \ + "VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs " + "reset"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \ + "VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \ + "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"), + { -1, "" } +}; + +/* virtio-blk features mapping */ +qmp_virtio_feature_map_t virtio_blk_feature_map[] = { + FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \ + "VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"), + FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \ + "VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"), + FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \ + "VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"), + FEATURE_ENTRY(VIRTIO_BLK_F_RO, \ + "VIRTIO_BLK_F_RO: Device is read-only"), + FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \ + "VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"), + FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \ + "VIRTIO_BLK_F_TOPOLOGY: Topology information available"), + FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \ + "VIRTIO_BLK_F_MQ: Multiqueue supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \ + "VIRTIO_BLK_F_DISCARD: Discard command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \ + "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"), +#ifndef VIRTIO_BLK_NO_LEGACY + FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \ + "VIRTIO_BLK_F_BARRIER: Request barriers supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \ + "VIRTIO_BLK_F_SCSI: SCSI packet commands supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \ + "VIRTIO_BLK_F_FLUSH: Flush command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \ + "VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes " + "supported"), +#endif /* !VIRTIO_BLK_NO_LEGACY */ + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-serial features mapping */ +qmp_virtio_feature_map_t virtio_serial_feature_map[] = { + FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \ + "VIRTIO_CONSOLE_F_SIZE: Host providing console size"), + FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \ + "VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"), + FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \ + "VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"), + { -1, "" } +}; + +/* virtio-gpu features mapping */ +qmp_virtio_feature_map_t virtio_gpu_feature_map[] = { + FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \ + "VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \ + "VIRTIO_GPU_F_EDID: EDID metadata supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \ + "VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \ + "VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \ + "VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization " + "timelines supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-input features mapping */ +qmp_virtio_feature_map_t virtio_input_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-net features mapping */ +qmp_virtio_feature_map_t virtio_net_feature_map[] = { + FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \ + "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \ + "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial " + "checksum supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ + "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading " + "reconfig. supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MTU, \ + "VIRTIO_NET_F_MTU: Device max MTU reporting supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MAC, \ + "VIRTIO_NET_F_MAC: Device has given MAC address"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \ + "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \ + "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \ + "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \ + "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \ + "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \ + "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \ + "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \ + "VIRTIO_NET_F_HOST_UFO: Device can receive UFO"), + FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \ + "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"), + FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \ + "VIRTIO_NET_F_STATUS: Configuration status field available"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \ + "VIRTIO_NET_F_CTRL_VQ: Control channel available"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \ + "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \ + "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \ + "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \ + "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MQ, \ + "VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \ + "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control " + "channel"), + FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \ + "VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"), + FEATURE_ENTRY(VIRTIO_NET_F_RSS, \ + "VIRTIO_NET_F_RSS: RSS RX steering supported"), + FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \ + "VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"), + FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \ + "VIRTIO_NET_F_STANDBY: Device acting as standby for primary " + "device with same MAC addr. supported"), + FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \ + "VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"), +#ifndef VIRTIO_NET_NO_LEGACY + FEATURE_ENTRY(VIRTIO_NET_F_GSO, \ + "VIRTIO_NET_F_GSO: Handling GSO-type packets supported"), +#endif /* !VIRTIO_NET_NO_LEGACY */ + FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \ + "VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX " + "packets supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-scsi features mapping */ +qmp_virtio_feature_map_t virtio_scsi_feature_map[] = { + FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \ + "VIRTIO_SCSI_F_INOUT: Requests including read and writable data " + "buffers suppoted"), + FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \ + "VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events " + "supported"), + FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \ + "VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes " + "supported"), + FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \ + "VIRTIO_SCSI_F_T10_PI: T10 info included in request header"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-user-fs features mapping */ +qmp_virtio_feature_map_t virtio_fs_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-user-i2c features mapping */ +qmp_virtio_feature_map_t virtio_i2c_feature_map[] = { + FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \ + "VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-vsock features mapping */ +qmp_virtio_feature_map_t virtio_vsock_feature_map[] = { + FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \ + "VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-balloon features mapping */ +qmp_virtio_feature_map_t virtio_balloon_feature_map[] = { + FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \ + "VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming " + "pages"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \ + "VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \ + "VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \ + "VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \ + "VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \ + "VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"), + { -1, "" } +}; + +/* virtio-crypto features mapping */ +qmp_virtio_feature_map_t virtio_crypto_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + { -1, "" } +}; + +/* virtio-iommu features mapping */ +qmp_virtio_feature_map_t virtio_iommu_feature_map[] = { + FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \ + "VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. " + "available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \ + "VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains " + "available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \ + "VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \ + "VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in " + "bypass mode"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \ + "VIRTIO_IOMMU_F_PROBE: Probe requests available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \ + "VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \ + "VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config " + "available"), + { -1, "" } +}; + +/* virtio-mem features mapping */ +qmp_virtio_feature_map_t virtio_mem_feature_map[] = { +#ifndef CONFIG_ACPI + FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \ + "VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"), +#endif /* !CONFIG_ACPI */ + FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \ + "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be " + "accessed"), + { -1, "" } +}; + +/* virtio-rng features mapping */ +qmp_virtio_feature_map_t virtio_rng_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; /* * The alignment to use between consumer and producer parts of vring. @@ -125,7 +557,6 @@ struct VirtQueue uint16_t vector; VirtIOHandleOutput handle_output; - VirtIOHandleAIOOutput handle_aio_output; VirtIODevice *vdev; EventNotifier guest_notifier; EventNotifier host_notifier; @@ -133,12 +564,60 @@ struct VirtQueue QLIST_ENTRY(VirtQueue) node; }; +const char *virtio_device_names[] = { + [VIRTIO_ID_NET] = "virtio-net", + [VIRTIO_ID_BLOCK] = "virtio-blk", + [VIRTIO_ID_CONSOLE] = "virtio-serial", + [VIRTIO_ID_RNG] = "virtio-rng", + [VIRTIO_ID_BALLOON] = "virtio-balloon", + [VIRTIO_ID_IOMEM] = "virtio-iomem", + [VIRTIO_ID_RPMSG] = "virtio-rpmsg", + [VIRTIO_ID_SCSI] = "virtio-scsi", + [VIRTIO_ID_9P] = "virtio-9p", + [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan", + [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial", + [VIRTIO_ID_CAIF] = "virtio-caif", + [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon", + [VIRTIO_ID_GPU] = "virtio-gpu", + [VIRTIO_ID_CLOCK] = "virtio-clk", + [VIRTIO_ID_INPUT] = "virtio-input", + [VIRTIO_ID_VSOCK] = "vhost-vsock", + [VIRTIO_ID_CRYPTO] = "virtio-crypto", + [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal", + [VIRTIO_ID_PSTORE] = "virtio-pstore", + [VIRTIO_ID_IOMMU] = "virtio-iommu", + [VIRTIO_ID_MEM] = "virtio-mem", + [VIRTIO_ID_SOUND] = "virtio-sound", + [VIRTIO_ID_FS] = "virtio-user-fs", + [VIRTIO_ID_PMEM] = "virtio-pmem", + [VIRTIO_ID_RPMB] = "virtio-rpmb", + [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim", + [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder", + [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder", + [VIRTIO_ID_SCMI] = "virtio-scmi", + [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod", + [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c", + [VIRTIO_ID_WATCHDOG] = "virtio-watchdog", + [VIRTIO_ID_CAN] = "virtio-can", + [VIRTIO_ID_DMABUF] = "virtio-dmabuf", + [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv", + [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol", + [VIRTIO_ID_BT] = "virtio-bluetooth", + [VIRTIO_ID_GPIO] = "virtio-gpio" +}; + +static const char *virtio_id_to_name(uint16_t device_id) +{ + assert(device_id < G_N_ELEMENTS(virtio_device_names)); + const char *name = virtio_device_names[device_id]; + assert(name != NULL); + return name; +} + +/* Called within call_rcu(). */ static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) { - if (!caches) { - return; - } - + assert(caches != NULL); address_space_cache_destroy(&caches->desc); address_space_cache_destroy(&caches->avail); address_space_cache_destroy(&caches->used); @@ -249,13 +728,10 @@ static void vring_packed_event_read(VirtIODevice *vdev, hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap); hwaddr off_flags = offsetof(VRingPackedDescEvent, flags); - address_space_read_cached(cache, off_flags, &e->flags, - sizeof(e->flags)); + e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags); /* Make sure flags is seen before off_wrap */ smp_rmb(); - address_space_read_cached(cache, off_off, &e->off_wrap, - sizeof(e->off_wrap)); - virtio_tswap16s(vdev, &e->off_wrap); + e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off); virtio_tswap16s(vdev, &e->flags); } @@ -265,8 +741,7 @@ static void vring_packed_off_wrap_write(VirtIODevice *vdev, { hwaddr off = offsetof(VRingPackedDescEvent, off_wrap); - virtio_tswap16s(vdev, &off_wrap); - address_space_write_cached(cache, off, &off_wrap, sizeof(off_wrap)); + virtio_stw_phys_cached(vdev, cache, off, off_wrap); address_space_cache_invalidate(cache, off, sizeof(off_wrap)); } @@ -275,8 +750,7 @@ static void vring_packed_flags_write(VirtIODevice *vdev, { hwaddr off = offsetof(VRingPackedDescEvent, flags); - virtio_tswap16s(vdev, &flags); - address_space_write_cached(cache, off, &flags, sizeof(flags)); + virtio_stw_phys_cached(vdev, cache, off, flags); address_space_cache_invalidate(cache, off, sizeof(flags)); } @@ -349,6 +823,19 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); } +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_used_flags(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingUsed, flags); + + if (!caches) { + return 0; + } + + return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); +} + /* Called within rcu_read_lock(). */ static uint16_t vring_used_idx(VirtQueue *vq) { @@ -509,11 +996,9 @@ static void vring_packed_desc_read_flags(VirtIODevice *vdev, MemoryRegionCache *cache, int i) { - address_space_read_cached(cache, - i * sizeof(VRingPackedDesc) + - offsetof(VRingPackedDesc, flags), - flags, sizeof(*flags)); - virtio_tswap16s(vdev, flags); + hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); + + *flags = virtio_lduw_phys_cached(vdev, cache, off); } static void vring_packed_desc_read(VirtIODevice *vdev, @@ -566,8 +1051,7 @@ static void vring_packed_desc_write_flags(VirtIODevice *vdev, { hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); - virtio_tswap16s(vdev, &desc->flags); - address_space_write_cached(cache, off, &desc->flags, sizeof(desc->flags)); + virtio_stw_phys_cached(vdev, cache, off, desc->flags); address_space_cache_invalidate(cache, off, sizeof(desc->flags)); } @@ -634,6 +1118,7 @@ static int virtio_queue_split_empty(VirtQueue *vq) return empty; } +/* Called within rcu_read_lock(). */ static int virtio_queue_packed_empty_rcu(VirtQueue *vq) { struct VRingPackedDesc desc; @@ -894,6 +1379,7 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) if (vq->used_idx >= vq->vring.num) { vq->used_idx -= vq->vring.num; vq->used_wrap_counter ^= 1; + vq->signalled_used_valid = false; } } @@ -985,34 +1471,28 @@ static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, return VIRTQUEUE_READ_DESC_MORE; } +/* Called within rcu_read_lock(). */ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, - unsigned max_in_bytes, unsigned max_out_bytes) + unsigned max_in_bytes, unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; - VRingMemoryRegionCaches *caches; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; int rc; - RCU_READ_LOCK_GUARD(); - idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - caches = vring_get_region_caches(vq); - if (!caches) { - goto err; - } - while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; unsigned int num_bufs; VRingDesc desc; unsigned int i; + unsigned int max = vq->vring.num; num_bufs = total_bufs; @@ -1125,39 +1605,35 @@ static int virtqueue_packed_read_next_desc(VirtQueue *vq, return VIRTQUEUE_READ_DESC_MORE; } +/* Called within rcu_read_lock(). */ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, unsigned max_in_bytes, - unsigned max_out_bytes) + unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache *desc_cache; - VRingMemoryRegionCaches *caches; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; VRingPackedDesc desc; bool wrap_counter; - RCU_READ_LOCK_GUARD(); idx = vq->last_avail_idx; wrap_counter = vq->last_avail_wrap_counter; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - caches = vring_get_region_caches(vq); - if (!caches) { - goto err; - } - for (;;) { unsigned int num_bufs = total_bufs; unsigned int i = idx; int rc; + unsigned int max = vq->vring.num; desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); if (!is_desc_avail(desc.flags, wrap_counter)) { break; @@ -1251,6 +1727,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, uint16_t desc_size; VRingMemoryRegionCaches *caches; + RCU_READ_LOCK_GUARD(); + if (unlikely(!vq->vring.desc)) { goto err; } @@ -1269,10 +1747,12 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, - max_in_bytes, max_out_bytes); + max_in_bytes, max_out_bytes, + caches); } else { virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, - max_in_bytes, max_out_bytes); + max_in_bytes, max_out_bytes, + caches); } return; @@ -1320,7 +1800,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, is_write ? DMA_DIRECTION_FROM_DEVICE : - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!iov[num_sg].iov_base) { virtio_error(vdev, "virtio: bogus descriptor or out of resources"); goto out; @@ -1369,7 +1850,8 @@ static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, sg[i].iov_base = dma_memory_map(vdev->dma_as, addr[i], &len, is_write ? DMA_DIRECTION_FROM_DEVICE : - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!sg[i].iov_base) { error_report("virtio: error trying to map MMIO memory"); exit(1); @@ -1704,6 +2186,8 @@ static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) VirtIODevice *vdev = vq->vdev; VRingPackedDesc desc; + RCU_READ_LOCK_GUARD(); + caches = vring_get_region_caches(vq); if (!caches) { return 0; @@ -1979,6 +2463,58 @@ static enum virtio_device_endian virtio_current_cpu_endian(void) } } +static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i) +{ + vdev->vq[i].vring.desc = 0; + vdev->vq[i].vring.avail = 0; + vdev->vq[i].vring.used = 0; + vdev->vq[i].last_avail_idx = 0; + vdev->vq[i].shadow_avail_idx = 0; + vdev->vq[i].used_idx = 0; + vdev->vq[i].last_avail_wrap_counter = true; + vdev->vq[i].shadow_avail_wrap_counter = true; + vdev->vq[i].used_wrap_counter = true; + virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); + vdev->vq[i].signalled_used = 0; + vdev->vq[i].signalled_used_valid = false; + vdev->vq[i].notification = true; + vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; + vdev->vq[i].inuse = 0; + virtio_virtqueue_reset_region_cache(&vdev->vq[i]); +} + +void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + if (k->queue_reset) { + k->queue_reset(vdev, queue_index); + } + + __virtio_queue_reset(vdev, queue_index); +} + +void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + /* + * TODO: Seabios is currently out of spec and triggering this error. + * So this needs to be fixed in Seabios, then this can + * be re-enabled for new machine types only, and also after + * being converted to LOG_GUEST_ERROR. + * + if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + error_report("queue_enable is only suppported in devices of virtio " + "1.0 or later."); + } + */ + + if (k->queue_enable) { + k->queue_enable(vdev, queue_index); + } +} + void virtio_reset(void *opaque) { VirtIODevice *vdev = opaque; @@ -2010,22 +2546,7 @@ void virtio_reset(void *opaque) virtio_notify_vector(vdev, vdev->config_vector); for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { - vdev->vq[i].vring.desc = 0; - vdev->vq[i].vring.avail = 0; - vdev->vq[i].vring.used = 0; - vdev->vq[i].last_avail_idx = 0; - vdev->vq[i].shadow_avail_idx = 0; - vdev->vq[i].used_idx = 0; - vdev->vq[i].last_avail_wrap_counter = true; - vdev->vq[i].shadow_avail_wrap_counter = true; - vdev->vq[i].used_wrap_counter = true; - virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); - vdev->vq[i].signalled_used = 0; - vdev->vq[i].signalled_used_valid = false; - vdev->vq[i].notification = true; - vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; - vdev->vq[i].inuse = 0; - virtio_virtqueue_reset_region_cache(&vdev->vq[i]); + __virtio_queue_reset(vdev, i); } } @@ -2312,24 +2833,6 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) } } -static bool virtio_queue_notify_aio_vq(VirtQueue *vq) -{ - bool ret = false; - - if (vq->vring.desc && vq->handle_aio_output) { - VirtIODevice *vdev = vq->vdev; - - trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); - ret = vq->handle_aio_output(vdev, vq); - - if (unlikely(vdev->start_on_kick)) { - virtio_set_started(vdev, true); - } - } - - return ret; -} - static void virtio_queue_notify_vq(VirtQueue *vq) { if (vq->vring.desc && vq->handle_output) { @@ -2408,9 +2911,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, vdev->vq[i].vring.num_default = queue_size; vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; vdev->vq[i].handle_output = handle_output; - vdev->vq[i].handle_aio_output = NULL; - vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) * - queue_size); + vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size); return &vdev->vq[i]; } @@ -2420,7 +2921,6 @@ void virtio_delete_queue(VirtQueue *vq) vq->vring.num = 0; vq->vring.num_default = 0; vq->handle_output = NULL; - vq->handle_aio_output = NULL; g_free(vq->used_elems); vq->used_elems = NULL; virtio_virtqueue_reset_region_cache(vq); @@ -2838,7 +3338,6 @@ static const VMStateDescription vmstate_virtio = { .name = "virtio", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_END_OF_LIST() }, @@ -2962,6 +3461,13 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { return -EINVAL; } + + if (val & (1ull << VIRTIO_F_BAD_FEATURE)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n", + __func__, vdev->name); + } + ret = virtio_set_features_nocheck(vdev, val); if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ @@ -2981,11 +3487,12 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) return ret; } -size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes, - uint64_t host_features) +size_t virtio_get_config_size(const VirtIOConfigSizeParams *params, + uint64_t host_features) { - size_t config_size = 0; - int i; + size_t config_size = params->min_size; + const VirtIOFeature *feature_sizes = params->feature_sizes; + size_t i; for (i = 0; feature_sizes[i].flags != 0; i++) { if (host_features & feature_sizes[i].flags) { @@ -2993,6 +3500,7 @@ size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes, } } + assert(config_size <= params->max_size); return config_size; } @@ -3239,8 +3747,7 @@ void virtio_instance_init_common(Object *proxy_obj, void *data, qdev_alias_all_properties(vdev, proxy_obj); } -void virtio_init(VirtIODevice *vdev, const char *name, - uint16_t device_id, size_t config_size) +void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); @@ -3254,12 +3761,13 @@ void virtio_init(VirtIODevice *vdev, const char *name, vdev->start_on_kick = false; vdev->started = false; + vdev->vhost_started = false; vdev->device_id = device_id; vdev->status = 0; qatomic_set(&vdev->isr, 0); vdev->queue_sel = 0; vdev->config_vector = VIRTIO_NO_VECTOR; - vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); + vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX); vdev->vm_running = runstate_is_running(); vdev->broken = false; for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { @@ -3269,7 +3777,7 @@ void virtio_init(VirtIODevice *vdev, const char *name, vdev->vq[i].host_notifier_enabled = false; } - vdev->name = name; + vdev->name = virtio_id_to_name(device_id); vdev->config_len = config_size; if (vdev->config_len) { vdev->config = g_malloc0(config_size); @@ -3525,14 +4033,6 @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) return &vq->guest_notifier; } -static void virtio_queue_host_notifier_aio_read(EventNotifier *n) -{ - VirtQueue *vq = container_of(n, VirtQueue, host_notifier); - if (event_notifier_test_and_clear(n)) { - virtio_queue_notify_aio_vq(vq); - } -} - static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) { VirtQueue *vq = container_of(n, VirtQueue, host_notifier); @@ -3545,11 +4045,14 @@ static bool virtio_queue_host_notifier_aio_poll(void *opaque) EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); - if (!vq->vring.desc || virtio_queue_empty(vq)) { - return false; - } + return vq->vring.desc && !virtio_queue_empty(vq); +} - return virtio_queue_notify_aio_vq(vq); +static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + + virtio_queue_notify_vq(vq); } static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) @@ -3560,24 +4063,36 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) virtio_queue_set_notification(vq, 1); } -void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, - VirtIOHandleAIOOutput handle_output) +void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) { - if (handle_output) { - vq->handle_aio_output = handle_output; - aio_set_event_notifier(ctx, &vq->host_notifier, true, - virtio_queue_host_notifier_aio_read, - virtio_queue_host_notifier_aio_poll); - aio_set_event_notifier_poll(ctx, &vq->host_notifier, - virtio_queue_host_notifier_aio_poll_begin, - virtio_queue_host_notifier_aio_poll_end); - } else { - aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL); - /* Test and clear notifier before after disabling event, - * in case poll callback didn't have time to run. */ - virtio_queue_host_notifier_aio_read(&vq->host_notifier); - vq->handle_aio_output = NULL; - } + aio_set_event_notifier(ctx, &vq->host_notifier, true, + virtio_queue_host_notifier_read, + virtio_queue_host_notifier_aio_poll, + virtio_queue_host_notifier_aio_poll_ready); + aio_set_event_notifier_poll(ctx, &vq->host_notifier, + virtio_queue_host_notifier_aio_poll_begin, + virtio_queue_host_notifier_aio_poll_end); +} + +/* + * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use + * this for rx virtqueues and similar cases where the virtqueue handler + * function does not pop all elements. When the virtqueue is left non-empty + * polling consumes CPU cycles and should not be used. + */ +void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) +{ + aio_set_event_notifier(ctx, &vq->host_notifier, true, + virtio_queue_host_notifier_read, + NULL, NULL); +} + +void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) +{ + aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); + /* Test and clear notifier before after disabling event, + * in case poll callback didn't have time to run. */ + virtio_queue_host_notifier_read(&vq->host_notifier); } void virtio_queue_host_notifier_read(EventNotifier *n) @@ -3617,7 +4132,7 @@ void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) vdev->bus_name = g_strdup(bus_name); } -void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) +void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) { va_list ap; @@ -3671,7 +4186,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp) } vdev->listener.commit = virtio_memory_listener_commit; + vdev->listener.name = "virtio"; memory_listener_register(&vdev->listener, vdev->dma_as); + QTAILQ_INSERT_TAIL(&virtio_list, vdev, next); } static void virtio_device_unrealize(DeviceState *dev) @@ -3686,6 +4203,7 @@ static void virtio_device_unrealize(DeviceState *dev) vdc->unrealize(dev); } + QTAILQ_REMOVE(&virtio_list, vdev, next); g_free(vdev->bus_name); vdev->bus_name = NULL; } @@ -3859,6 +4377,8 @@ static void virtio_device_class_init(ObjectClass *klass, void *data) vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; + + QTAILQ_INIT(&virtio_list); } bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) @@ -3869,6 +4389,589 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) return virtio_bus_ioeventfd_enabled(vbus); } +VirtioInfoList *qmp_x_query_virtio(Error **errp) +{ + VirtioInfoList *list = NULL; + VirtioInfoList *node; + VirtIODevice *vdev; + + QTAILQ_FOREACH(vdev, &virtio_list, next) { + DeviceState *dev = DEVICE(vdev); + Error *err = NULL; + QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err); + + if (err == NULL) { + GString *is_realized = qobject_to_json_pretty(obj, true); + /* virtio device is NOT realized, remove it from list */ + if (!strncmp(is_realized->str, "false", 4)) { + QTAILQ_REMOVE(&virtio_list, vdev, next); + } else { + node = g_new0(VirtioInfoList, 1); + node->value = g_new(VirtioInfo, 1); + node->value->path = g_strdup(dev->canonical_path); + node->value->name = g_strdup(vdev->name); + QAPI_LIST_PREPEND(list, node->value); + } + g_string_free(is_realized, true); + } + qobject_unref(obj); + } + + return list; +} + +static VirtIODevice *virtio_device_find(const char *path) +{ + VirtIODevice *vdev; + + QTAILQ_FOREACH(vdev, &virtio_list, next) { + DeviceState *dev = DEVICE(vdev); + + if (strcmp(dev->canonical_path, path) != 0) { + continue; + } + + Error *err = NULL; + QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err); + if (err == NULL) { + GString *is_realized = qobject_to_json_pretty(obj, true); + /* virtio device is NOT realized, remove it from list */ + if (!strncmp(is_realized->str, "false", 4)) { + g_string_free(is_realized, true); + qobject_unref(obj); + QTAILQ_REMOVE(&virtio_list, vdev, next); + return NULL; + } + g_string_free(is_realized, true); + } else { + /* virtio device doesn't exist in QOM tree */ + QTAILQ_REMOVE(&virtio_list, vdev, next); + qobject_unref(obj); + return NULL; + } + /* device exists in QOM tree & is realized */ + qobject_unref(obj); + return vdev; + } + return NULL; +} + +#define CONVERT_FEATURES(type, map, is_status, bitmap) \ + ({ \ + type *list = NULL; \ + type *node; \ + for (i = 0; map[i].virtio_bit != -1; i++) { \ + if (is_status) { \ + bit = map[i].virtio_bit; \ + } \ + else { \ + bit = 1ULL << map[i].virtio_bit; \ + } \ + if ((bitmap & bit) == 0) { \ + continue; \ + } \ + node = g_new0(type, 1); \ + node->value = g_strdup(map[i].feature_desc); \ + node->next = list; \ + list = node; \ + bitmap ^= bit; \ + } \ + list; \ + }) + +static VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap) +{ + VirtioDeviceStatus *status; + uint8_t bit; + int i; + + status = g_new0(VirtioDeviceStatus, 1); + status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map, + 1, bitmap); + status->has_unknown_statuses = bitmap != 0; + if (status->has_unknown_statuses) { + status->unknown_statuses = bitmap; + } + + return status; +} + +static VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap) +{ + VhostDeviceProtocols *vhu_protocols; + uint64_t bit; + int i; + + vhu_protocols = g_new0(VhostDeviceProtocols, 1); + vhu_protocols->protocols = + CONVERT_FEATURES(strList, + vhost_user_protocol_map, 0, bitmap); + vhu_protocols->has_unknown_protocols = bitmap != 0; + if (vhu_protocols->has_unknown_protocols) { + vhu_protocols->unknown_protocols = bitmap; + } + + return vhu_protocols; +} + +static VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, + uint64_t bitmap) +{ + VirtioDeviceFeatures *features; + uint64_t bit; + int i; + + features = g_new0(VirtioDeviceFeatures, 1); + features->has_dev_features = true; + + /* transport features */ + features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0, + bitmap); + + /* device features */ + switch (device_id) { +#ifdef CONFIG_VIRTIO_SERIAL + case VIRTIO_ID_CONSOLE: + features->dev_features = + CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_BLK + case VIRTIO_ID_BLOCK: + features->dev_features = + CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_GPU + case VIRTIO_ID_GPU: + features->dev_features = + CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_NET + case VIRTIO_ID_NET: + features->dev_features = + CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_SCSI + case VIRTIO_ID_SCSI: + features->dev_features = + CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_BALLOON + case VIRTIO_ID_BALLOON: + features->dev_features = + CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_IOMMU + case VIRTIO_ID_IOMMU: + features->dev_features = + CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_INPUT + case VIRTIO_ID_INPUT: + features->dev_features = + CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VHOST_USER_FS + case VIRTIO_ID_FS: + features->dev_features = + CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VHOST_VSOCK + case VIRTIO_ID_VSOCK: + features->dev_features = + CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_CRYPTO + case VIRTIO_ID_CRYPTO: + features->dev_features = + CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_MEM + case VIRTIO_ID_MEM: + features->dev_features = + CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_I2C_ADAPTER + case VIRTIO_ID_I2C_ADAPTER: + features->dev_features = + CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_RNG + case VIRTIO_ID_RNG: + features->dev_features = + CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap); + break; +#endif + /* No features */ + case VIRTIO_ID_9P: + case VIRTIO_ID_PMEM: + case VIRTIO_ID_IOMEM: + case VIRTIO_ID_RPMSG: + case VIRTIO_ID_CLOCK: + case VIRTIO_ID_MAC80211_WLAN: + case VIRTIO_ID_MAC80211_HWSIM: + case VIRTIO_ID_RPROC_SERIAL: + case VIRTIO_ID_MEMORY_BALLOON: + case VIRTIO_ID_CAIF: + case VIRTIO_ID_SIGNAL_DIST: + case VIRTIO_ID_PSTORE: + case VIRTIO_ID_SOUND: + case VIRTIO_ID_BT: + case VIRTIO_ID_RPMB: + case VIRTIO_ID_VIDEO_ENCODER: + case VIRTIO_ID_VIDEO_DECODER: + case VIRTIO_ID_SCMI: + case VIRTIO_ID_NITRO_SEC_MOD: + case VIRTIO_ID_WATCHDOG: + case VIRTIO_ID_CAN: + case VIRTIO_ID_DMABUF: + case VIRTIO_ID_PARAM_SERV: + case VIRTIO_ID_AUDIO_POLICY: + case VIRTIO_ID_GPIO: + break; + default: + g_assert_not_reached(); + } + + features->has_unknown_dev_features = bitmap != 0; + if (features->has_unknown_dev_features) { + features->unknown_dev_features = bitmap; + } + + return features; +} + +VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) +{ + VirtIODevice *vdev; + VirtioStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + status = g_new0(VirtioStatus, 1); + status->name = g_strdup(vdev->name); + status->device_id = vdev->device_id; + status->vhost_started = vdev->vhost_started; + status->guest_features = qmp_decode_features(vdev->device_id, + vdev->guest_features); + status->host_features = qmp_decode_features(vdev->device_id, + vdev->host_features); + status->backend_features = qmp_decode_features(vdev->device_id, + vdev->backend_features); + + switch (vdev->device_endian) { + case VIRTIO_DEVICE_ENDIAN_LITTLE: + status->device_endian = g_strdup("little"); + break; + case VIRTIO_DEVICE_ENDIAN_BIG: + status->device_endian = g_strdup("big"); + break; + default: + status->device_endian = g_strdup("unknown"); + break; + } + + status->num_vqs = virtio_get_num_queues(vdev); + status->status = qmp_decode_status(vdev->status); + status->isr = vdev->isr; + status->queue_sel = vdev->queue_sel; + status->vm_running = vdev->vm_running; + status->broken = vdev->broken; + status->disabled = vdev->disabled; + status->use_started = vdev->use_started; + status->started = vdev->started; + status->start_on_kick = vdev->start_on_kick; + status->disable_legacy_check = vdev->disable_legacy_check; + status->bus_name = g_strdup(vdev->bus_name); + status->use_guest_notifier_mask = vdev->use_guest_notifier_mask; + status->has_vhost_dev = vdev->vhost_started; + + if (vdev->vhost_started) { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + status->vhost_dev = g_new0(VhostStatus, 1); + status->vhost_dev->n_mem_sections = hdev->n_mem_sections; + status->vhost_dev->n_tmp_sections = hdev->n_tmp_sections; + status->vhost_dev->nvqs = hdev->nvqs; + status->vhost_dev->vq_index = hdev->vq_index; + status->vhost_dev->features = + qmp_decode_features(vdev->device_id, hdev->features); + status->vhost_dev->acked_features = + qmp_decode_features(vdev->device_id, hdev->acked_features); + status->vhost_dev->backend_features = + qmp_decode_features(vdev->device_id, hdev->backend_features); + status->vhost_dev->protocol_features = + qmp_decode_protocols(hdev->protocol_features); + status->vhost_dev->max_queues = hdev->max_queues; + status->vhost_dev->backend_cap = hdev->backend_cap; + status->vhost_dev->log_enabled = hdev->log_enabled; + status->vhost_dev->log_size = hdev->log_size; + } + + return status; +} + +VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + VirtIODevice *vdev; + VirtVhostQueueStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + if (!vdev->vhost_started) { + error_setg(errp, "Error: vhost device has not started yet"); + return NULL; + } + + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + if (queue < hdev->vq_index || queue >= hdev->vq_index + hdev->nvqs) { + error_setg(errp, "Invalid vhost virtqueue number %d", queue); + return NULL; + } + + status = g_new0(VirtVhostQueueStatus, 1); + status->name = g_strdup(vdev->name); + status->kick = hdev->vqs[queue].kick; + status->call = hdev->vqs[queue].call; + status->desc = (uintptr_t)hdev->vqs[queue].desc; + status->avail = (uintptr_t)hdev->vqs[queue].avail; + status->used = (uintptr_t)hdev->vqs[queue].used; + status->num = hdev->vqs[queue].num; + status->desc_phys = hdev->vqs[queue].desc_phys; + status->desc_size = hdev->vqs[queue].desc_size; + status->avail_phys = hdev->vqs[queue].avail_phys; + status->avail_size = hdev->vqs[queue].avail_size; + status->used_phys = hdev->vqs[queue].used_phys; + status->used_size = hdev->vqs[queue].used_size; + + return status; +} + +VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + VirtIODevice *vdev; + VirtQueueStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { + error_setg(errp, "Invalid virtqueue number %d", queue); + return NULL; + } + + status = g_new0(VirtQueueStatus, 1); + status->name = g_strdup(vdev->name); + status->queue_index = vdev->vq[queue].queue_index; + status->inuse = vdev->vq[queue].inuse; + status->vring_num = vdev->vq[queue].vring.num; + status->vring_num_default = vdev->vq[queue].vring.num_default; + status->vring_align = vdev->vq[queue].vring.align; + status->vring_desc = vdev->vq[queue].vring.desc; + status->vring_avail = vdev->vq[queue].vring.avail; + status->vring_used = vdev->vq[queue].vring.used; + status->used_idx = vdev->vq[queue].used_idx; + status->signalled_used = vdev->vq[queue].signalled_used; + status->signalled_used_valid = vdev->vq[queue].signalled_used_valid; + + if (vdev->vhost_started) { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + /* check if vq index exists for vhost as well */ + if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) { + status->has_last_avail_idx = true; + + int vhost_vq_index = + hdev->vhost_ops->vhost_get_vq_index(hdev, queue); + struct vhost_vring_state state = { + .index = vhost_vq_index, + }; + + status->last_avail_idx = + hdev->vhost_ops->vhost_get_vring_base(hdev, &state); + } + } else { + status->has_shadow_avail_idx = true; + status->has_last_avail_idx = true; + status->last_avail_idx = vdev->vq[queue].last_avail_idx; + status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx; + } + + return status; +} + +static strList *qmp_decode_vring_desc_flags(uint16_t flags) +{ + strList *list = NULL; + strList *node; + int i; + + struct { + uint16_t flag; + const char *value; + } map[] = { + { VRING_DESC_F_NEXT, "next" }, + { VRING_DESC_F_WRITE, "write" }, + { VRING_DESC_F_INDIRECT, "indirect" }, + { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" }, + { 1 << VRING_PACKED_DESC_F_USED, "used" }, + { 0, "" } + }; + + for (i = 0; map[i].flag; i++) { + if ((map[i].flag & flags) == 0) { + continue; + } + node = g_malloc0(sizeof(strList)); + node->value = g_strdup(map[i].value); + node->next = list; + list = node; + } + + return list; +} + +VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path, + uint16_t queue, + bool has_index, + uint16_t index, + Error **errp) +{ + VirtIODevice *vdev; + VirtQueue *vq; + VirtioQueueElement *element = NULL; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIO device", path); + return NULL; + } + + if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { + error_setg(errp, "Invalid virtqueue number %d", queue); + return NULL; + } + vq = &vdev->vq[queue]; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + error_setg(errp, "Packed ring not supported"); + return NULL; + } else { + unsigned int head, i, max; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + MemoryRegionCache *desc_cache; + VRingDesc desc; + VirtioRingDescList *list = NULL; + VirtioRingDescList *node; + int rc; int ndescs; + + RCU_READ_LOCK_GUARD(); + + max = vq->vring.num; + + if (!has_index) { + head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num); + } else { + head = vring_avail_ring(vq, index % vq->vring.num); + } + i = head; + + caches = vring_get_region_caches(vq); + if (!caches) { + error_setg(errp, "Region caches not initialized"); + return NULL; + } + if (caches->desc.len < max * sizeof(VRingDesc)) { + error_setg(errp, "Cannot map descriptor ring"); + return NULL; + } + + desc_cache = &caches->desc; + vring_split_desc_read(vdev, &desc, desc_cache, i); + if (desc.flags & VRING_DESC_F_INDIRECT) { + int64_t len; + len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + error_setg(errp, "Cannot map indirect buffer"); + goto done; + } + + max = desc.len / sizeof(VRingDesc); + i = 0; + vring_split_desc_read(vdev, &desc, desc_cache, i); + } + + element = g_new0(VirtioQueueElement, 1); + element->avail = g_new0(VirtioRingAvail, 1); + element->used = g_new0(VirtioRingUsed, 1); + element->name = g_strdup(vdev->name); + element->index = head; + element->avail->flags = vring_avail_flags(vq); + element->avail->idx = vring_avail_idx(vq); + element->avail->ring = head; + element->used->flags = vring_used_flags(vq); + element->used->idx = vring_used_idx(vq); + ndescs = 0; + + do { + /* A buggy driver may produce an infinite loop */ + if (ndescs >= max) { + break; + } + node = g_new0(VirtioRingDescList, 1); + node->value = g_new0(VirtioRingDesc, 1); + node->value->addr = desc.addr; + node->value->len = desc.len; + node->value->flags = qmp_decode_vring_desc_flags(desc.flags); + node->next = list; + list = node; + + ndescs++; + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, + max, &i); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + element->descs = list; +done: + address_space_cache_destroy(&indirect_desc_cache); + } + + return element; +} + static const TypeInfo virtio_device_info = { .name = TYPE_VIRTIO_DEVICE, .parent = TYPE_DEVICE, diff --git a/hw/watchdog/meson.build b/hw/watchdog/meson.build index 054c403dea..8974b5cf4c 100644 --- a/hw/watchdog/meson.build +++ b/hw/watchdog/meson.build @@ -6,3 +6,4 @@ softmmu_ss.add(when: 'CONFIG_WDT_DIAG288', if_true: files('wdt_diag288.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('wdt_aspeed.c')) softmmu_ss.add(when: 'CONFIG_WDT_IMX2', if_true: files('wdt_imx2.c')) softmmu_ss.add(when: 'CONFIG_WDT_SBSA', if_true: files('sbsa_gwdt.c')) +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_watchdog.c')) diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c index d0998f8489..7aa57a8c51 100644 --- a/hw/watchdog/sbsa_gwdt.c +++ b/hw/watchdog/sbsa_gwdt.c @@ -24,11 +24,6 @@ #include "qemu/log.h" #include "qemu/module.h" -static WatchdogTimerModel model = { - .wdt_name = TYPE_WDT_SBSA, - .wdt_description = "SBSA-compliant generic watchdog device", -}; - static const VMStateDescription vmstate_sbsa_gwdt = { .name = "sbsa-gwdt", .version_id = 1, @@ -273,8 +268,9 @@ static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, void *data) dc->realize = wdt_sbsa_gwdt_realize; dc->reset = wdt_sbsa_gwdt_reset; dc->hotpluggable = false; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_sbsa_gwdt; + dc->desc = "SBSA-compliant generic watchdog device"; } static const TypeInfo wdt_sbsa_gwdt_info = { @@ -286,7 +282,6 @@ static const TypeInfo wdt_sbsa_gwdt_info = { static void wdt_sbsa_gwdt_register_types(void) { - watchdog_add_model(&model); type_register_static(&wdt_sbsa_gwdt_info); } diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c new file mode 100644 index 0000000000..55ff1f03c1 --- /dev/null +++ b/hw/watchdog/spapr_watchdog.c @@ -0,0 +1,274 @@ +/* + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "migration/vmstate.h" +#include "trace.h" + +#include "hw/ppc/spapr.h" + +#define FIELD_BE(reg, field, start, len) \ + FIELD(reg, field, 64 - (start + len), len) + +/* + * Bits 47: "leaveOtherWatchdogsRunningOnTimeout", specified on + * the "Start watchdog" operation, + * 0 - stop out-standing watchdogs on timeout, + * 1 - leave outstanding watchdogs running on timeout + */ +FIELD_BE(PSERIES_WDTF, LEAVE_OTHER, 47, 1) + +/* Bits 48-55: "operation" */ +FIELD_BE(PSERIES_WDTF, OP, 48, 8) +#define PSERIES_WDTF_OP_START 0x1 +#define PSERIES_WDTF_OP_STOP 0x2 +#define PSERIES_WDTF_OP_QUERY 0x3 +#define PSERIES_WDTF_OP_QUERY_LPM 0x4 + +/* Bits 56-63: "timeoutAction" */ +FIELD_BE(PSERIES_WDTF, ACTION, 56, 8) +#define PSERIES_WDTF_ACTION_HARD_POWER_OFF 0x1 +#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2 +#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3 + +FIELD_BE(PSERIES_WDTF, RESERVED, 0, 47) + +/* Special watchdogNumber for the "stop all watchdogs" operation */ +#define PSERIES_WDT_STOP_ALL ((uint64_t)~0) + +/* + * For the "Query watchdog capabilities" operation, a uint64 structure + * defined as: + * Bits 0-15: The minimum supported timeout in milliseconds + * Bits 16-31: The number of watchdogs supported + * Bits 32-63: Reserved + */ +FIELD_BE(PSERIES_WDTQ, MIN_TIMEOUT, 0, 16) +FIELD_BE(PSERIES_WDTQ, NUM, 16, 16) + +/* + * For the "Query watchdog LPM requirement" operation: + * 1 = The given "watchdogNumber" must be stopped prior to suspending + * 2 = The given "watchdogNumber" does not have to be stopped prior to + * suspending + */ +#define PSERIES_WDTQL_STOPPED 1 +#define PSERIES_WDTQL_QUERY_NOT_STOPPED 2 + +#define WDT_MIN_TIMEOUT 1 /* 1ms */ + +static target_ulong watchdog_stop(unsigned watchdogNumber, SpaprWatchdog *w) +{ + target_ulong ret = H_NOOP; + + if (timer_pending(&w->timer)) { + timer_del(&w->timer); + ret = H_SUCCESS; + } + trace_spapr_watchdog_stop(watchdogNumber, ret); + + return ret; +} + +static target_ulong watchdog_stop_all(SpaprMachineState *spapr) +{ + target_ulong ret = H_NOOP; + int i; + + for (i = 1; i <= ARRAY_SIZE(spapr->wds); ++i) { + target_ulong r = watchdog_stop(i, &spapr->wds[i - 1]); + + if (r != H_NOOP && r != H_SUCCESS) { + ret = r; + } + } + + return ret; +} + +static void watchdog_expired(void *pw) +{ + SpaprWatchdog *w = pw; + CPUState *cs; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + unsigned num = w - spapr->wds; + + g_assert(num < ARRAY_SIZE(spapr->wds)); + trace_spapr_watchdog_expired(num, w->action); + switch (w->action) { + case PSERIES_WDTF_ACTION_HARD_POWER_OFF: + qemu_system_vmstop_request(RUN_STATE_SHUTDOWN); + break; + case PSERIES_WDTF_ACTION_HARD_RESTART: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + case PSERIES_WDTF_ACTION_DUMP_RESTART: + CPU_FOREACH(cs) { + async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); + } + break; + } + if (!w->leave_others) { + watchdog_stop_all(spapr); + } +} + +static target_ulong h_watchdog(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong ret = H_SUCCESS; + target_ulong flags = args[0]; + target_ulong watchdogNumber = args[1]; /* 1-Based per PAPR */ + target_ulong timeoutInMs = args[2]; + unsigned operation = FIELD_EX64(flags, PSERIES_WDTF, OP); + unsigned timeoutAction = FIELD_EX64(flags, PSERIES_WDTF, ACTION); + SpaprWatchdog *w; + + if (FIELD_EX64(flags, PSERIES_WDTF, RESERVED)) { + return H_PARAMETER; + } + + switch (operation) { + case PSERIES_WDTF_OP_START: + if (watchdogNumber > ARRAY_SIZE(spapr->wds)) { + return H_P2; + } + if (timeoutInMs <= WDT_MIN_TIMEOUT) { + return H_P3; + } + + w = &spapr->wds[watchdogNumber - 1]; + switch (timeoutAction) { + case PSERIES_WDTF_ACTION_HARD_POWER_OFF: + case PSERIES_WDTF_ACTION_HARD_RESTART: + case PSERIES_WDTF_ACTION_DUMP_RESTART: + w->action = timeoutAction; + break; + default: + return H_PARAMETER; + } + w->leave_others = FIELD_EX64(flags, PSERIES_WDTF, LEAVE_OTHER); + timer_mod(&w->timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + timeoutInMs); + trace_spapr_watchdog_start(flags, watchdogNumber, timeoutInMs); + break; + case PSERIES_WDTF_OP_STOP: + if (watchdogNumber == PSERIES_WDT_STOP_ALL) { + ret = watchdog_stop_all(spapr); + } else if (watchdogNumber <= ARRAY_SIZE(spapr->wds)) { + ret = watchdog_stop(watchdogNumber, + &spapr->wds[watchdogNumber - 1]); + } else { + return H_P2; + } + break; + case PSERIES_WDTF_OP_QUERY: + args[0] = FIELD_DP64(0, PSERIES_WDTQ, MIN_TIMEOUT, WDT_MIN_TIMEOUT); + args[0] = FIELD_DP64(args[0], PSERIES_WDTQ, NUM, + ARRAY_SIZE(spapr->wds)); + trace_spapr_watchdog_query(args[0]); + break; + case PSERIES_WDTF_OP_QUERY_LPM: + if (watchdogNumber > ARRAY_SIZE(spapr->wds)) { + return H_P2; + } + args[0] = PSERIES_WDTQL_QUERY_NOT_STOPPED; + trace_spapr_watchdog_query_lpm(args[0]); + break; + default: + return H_PARAMETER; + } + + return ret; +} + +void spapr_watchdog_init(SpaprMachineState *spapr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(spapr->wds); ++i) { + char name[16]; + SpaprWatchdog *w = &spapr->wds[i]; + + snprintf(name, sizeof(name) - 1, "wdt%d", i + 1); + object_initialize_child_with_props(OBJECT(spapr), name, w, + sizeof(SpaprWatchdog), + TYPE_SPAPR_WDT, + &error_fatal, NULL); + qdev_realize(DEVICE(w), NULL, &error_fatal); + } +} + +static bool watchdog_needed(void *opaque) +{ + SpaprWatchdog *w = opaque; + + return timer_pending(&w->timer); +} + +static const VMStateDescription vmstate_wdt = { + .name = "spapr_watchdog", + .version_id = 1, + .minimum_version_id = 1, + .needed = watchdog_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER(timer, SpaprWatchdog), + VMSTATE_UINT8(action, SpaprWatchdog), + VMSTATE_UINT8(leave_others, SpaprWatchdog), + VMSTATE_END_OF_LIST() + } +}; + +static void spapr_wdt_realize(DeviceState *dev, Error **errp) +{ + SpaprWatchdog *w = SPAPR_WDT(dev); + Object *o = OBJECT(dev); + + timer_init_ms(&w->timer, QEMU_CLOCK_VIRTUAL, watchdog_expired, w); + + object_property_add_uint64_ptr(o, "expire", + (uint64_t *)&w->timer.expire_time, + OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(o, "action", &w->action, OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(o, "leaveOtherWatchdogsRunningOnTimeout", + &w->leave_others, OBJ_PROP_FLAG_READ); +} + +static void spapr_wdt_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = spapr_wdt_realize; + dc->vmsd = &vmstate_wdt; + dc->user_creatable = false; +} + +static const TypeInfo spapr_wdt_info = { + .name = TYPE_SPAPR_WDT, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprWatchdog), + .class_init = spapr_wdt_class_init, +}; + +static void spapr_watchdog_register_types(void) +{ + spapr_register_hypercall(H_WATCHDOG, h_watchdog); + type_register_static(&spapr_wdt_info); +} + +type_init(spapr_watchdog_register_types) diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events index c3bafbffa9..89ccbcfdfd 100644 --- a/hw/watchdog/trace-events +++ b/hw/watchdog/trace-events @@ -5,3 +5,14 @@ cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK AP cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" cmsdk_apb_watchdog_lock(uint32_t lock) "CMSDK APB watchdog: lock %" PRIu32 + +# wdt-aspeed.c +aspeed_wdt_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +aspeed_wdt_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 + +# spapr_watchdog.c +spapr_watchdog_start(uint64_t flags, uint64_t num, uint64_t timeout) "Flags 0x%" PRIx64 " num=%" PRId64 " %" PRIu64 "ms" +spapr_watchdog_stop(uint64_t num, uint64_t ret) "num=%" PRIu64 " ret=%" PRId64 +spapr_watchdog_query(uint64_t caps) "caps=0x%" PRIx64 +spapr_watchdog_query_lpm(uint64_t caps) "caps=0x%" PRIx64 +spapr_watchdog_expired(uint64_t num, unsigned action) "num=%" PRIu64 " action=%u" diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c index 0e98ffb73f..6c082a3263 100644 --- a/hw/watchdog/watchdog.c +++ b/hw/watchdog/watchdog.c @@ -32,63 +32,6 @@ #include "qemu/help_option.h" static WatchdogAction watchdog_action = WATCHDOG_ACTION_RESET; -static QLIST_HEAD(, WatchdogTimerModel) watchdog_list; - -void watchdog_add_model(WatchdogTimerModel *model) -{ - QLIST_INSERT_HEAD(&watchdog_list, model, entry); -} - -/* Returns: - * 0 = continue - * 1 = exit program with error - * 2 = exit program without error - */ -int select_watchdog(const char *p) -{ - WatchdogTimerModel *model; - QemuOpts *opts; - - /* -watchdog ? lists available devices and exits cleanly. */ - if (is_help_option(p)) { - QLIST_FOREACH(model, &watchdog_list, entry) { - fprintf(stderr, "\t%s\t%s\n", - model->wdt_name, model->wdt_description); - } - return 2; - } - - QLIST_FOREACH(model, &watchdog_list, entry) { - if (strcasecmp(model->wdt_name, p) == 0) { - /* add the device */ - opts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, - &error_abort); - qemu_opt_set(opts, "driver", p, &error_abort); - return 0; - } - } - - fprintf(stderr, "Unknown -watchdog device. Supported devices are:\n"); - QLIST_FOREACH(model, &watchdog_list, entry) { - fprintf(stderr, "\t%s\t%s\n", - model->wdt_name, model->wdt_description); - } - return 1; -} - -int select_watchdog_action(const char *p) -{ - int action; - char *qapi_value; - - qapi_value = g_ascii_strdown(p, -1); - action = qapi_enum_parse(&WatchdogAction_lookup, qapi_value, -1, NULL); - g_free(qapi_value); - if (action < 0) - return -1; - qmp_watchdog_set_action(action, &error_abort); - return 0; -} WatchdogAction get_watchdog_action(void) { diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 6352ba1b0e..d753693a2e 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -19,6 +19,7 @@ #include "hw/sysbus.h" #include "hw/watchdog/wdt_aspeed.h" #include "migration/vmstate.h" +#include "trace.h" #define WDT_STATUS (0x00 / 4) #define WDT_RELOAD_VALUE (0x04 / 4) @@ -60,6 +61,8 @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size) { AspeedWDTState *s = ASPEED_WDT(opaque); + trace_aspeed_wdt_read(offset, size); + offset >>= 2; switch (offset) { @@ -118,13 +121,29 @@ static void aspeed_wdt_reload_1mhz(AspeedWDTState *s) } } +static uint64_t aspeed_2400_sanitize_ctrl(uint64_t data) +{ + return data & 0xffff; +} + +static uint64_t aspeed_2500_sanitize_ctrl(uint64_t data) +{ + return (data & ~(0xfUL << 8)) | WDT_CTRL_1MHZ_CLK; +} + +static uint64_t aspeed_2600_sanitize_ctrl(uint64_t data) +{ + return data & ~(0x7UL << 7); +} static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) { AspeedWDTState *s = ASPEED_WDT(opaque); AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(s); - bool enable = data & WDT_CTRL_ENABLE; + bool enable; + + trace_aspeed_wdt_write(offset, size, data); offset >>= 2; @@ -144,12 +163,16 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, } break; case WDT_CTRL: + data = awc->sanitize_ctrl(data); + enable = data & WDT_CTRL_ENABLE; if (enable && !aspeed_wdt_is_enabled(s)) { s->regs[WDT_CTRL] = data; awc->wdt_reload(s); } else if (!enable && aspeed_wdt_is_enabled(s)) { s->regs[WDT_CTRL] = data; timer_del(s->timer); + } else { + s->regs[WDT_CTRL] = data; } break; case WDT_RESET_WIDTH: @@ -179,11 +202,6 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, return; } -static WatchdogTimerModel model = { - .wdt_name = TYPE_ASPEED_WDT, - .wdt_description = "Aspeed watchdog device", -}; - static const VMStateDescription vmstate_aspeed_wdt = { .name = "vmstate_aspeed_wdt", .version_id = 0, @@ -207,11 +225,12 @@ static const MemoryRegionOps aspeed_wdt_ops = { static void aspeed_wdt_reset(DeviceState *dev) { AspeedWDTState *s = ASPEED_WDT(dev); + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(s); - s->regs[WDT_STATUS] = 0x3EF1480; - s->regs[WDT_RELOAD_VALUE] = 0x03EF1480; + s->regs[WDT_STATUS] = awc->default_status; + s->regs[WDT_RELOAD_VALUE] = awc->default_reload_value; s->regs[WDT_RESTART] = 0; - s->regs[WDT_CTRL] = 0; + s->regs[WDT_CTRL] = awc->sanitize_ctrl(0); s->regs[WDT_RESET_WIDTH] = 0xFF; timer_del(s->timer); @@ -269,9 +288,10 @@ static void aspeed_wdt_class_init(ObjectClass *klass, void *data) dc->desc = "ASPEED Watchdog Controller"; dc->realize = aspeed_wdt_realize; dc->reset = aspeed_wdt_reset; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_aspeed_wdt; device_class_set_props(dc, aspeed_wdt_properties); + dc->desc = "Aspeed watchdog device"; } static const TypeInfo aspeed_wdt_info = { @@ -293,6 +313,9 @@ static void aspeed_2400_wdt_class_init(ObjectClass *klass, void *data) awc->ext_pulse_width_mask = 0xff; awc->reset_ctrl_reg = SCU_RESET_CONTROL1; awc->wdt_reload = aspeed_wdt_reload; + awc->sanitize_ctrl = aspeed_2400_sanitize_ctrl; + awc->default_status = 0x03EF1480; + awc->default_reload_value = 0x03EF1480; } static const TypeInfo aspeed_2400_wdt_info = { @@ -328,6 +351,9 @@ static void aspeed_2500_wdt_class_init(ObjectClass *klass, void *data) awc->reset_ctrl_reg = SCU_RESET_CONTROL1; awc->reset_pulse = aspeed_2500_wdt_reset_pulse; awc->wdt_reload = aspeed_wdt_reload_1mhz; + awc->sanitize_ctrl = aspeed_2500_sanitize_ctrl; + awc->default_status = 0x014FB180; + awc->default_reload_value = 0x014FB180; } static const TypeInfo aspeed_2500_wdt_info = { @@ -348,6 +374,9 @@ static void aspeed_2600_wdt_class_init(ObjectClass *klass, void *data) awc->reset_ctrl_reg = AST2600_SCU_RESET_CONTROL1; awc->reset_pulse = aspeed_2500_wdt_reset_pulse; awc->wdt_reload = aspeed_wdt_reload_1mhz; + awc->sanitize_ctrl = aspeed_2600_sanitize_ctrl; + awc->default_status = 0x014FB180; + awc->default_reload_value = 0x014FB180; } static const TypeInfo aspeed_2600_wdt_info = { @@ -357,13 +386,36 @@ static const TypeInfo aspeed_2600_wdt_info = { .class_init = aspeed_2600_wdt_class_init, }; +static void aspeed_1030_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); + + dc->desc = "ASPEED 1030 Watchdog Controller"; + awc->offset = 0x80; + awc->ext_pulse_width_mask = 0xfffff; /* TODO */ + awc->reset_ctrl_reg = AST2600_SCU_RESET_CONTROL1; + awc->reset_pulse = aspeed_2500_wdt_reset_pulse; + awc->wdt_reload = aspeed_wdt_reload_1mhz; + awc->sanitize_ctrl = aspeed_2600_sanitize_ctrl; + awc->default_status = 0x014FB180; + awc->default_reload_value = 0x014FB180; +} + +static const TypeInfo aspeed_1030_wdt_info = { + .name = TYPE_ASPEED_1030_WDT, + .parent = TYPE_ASPEED_WDT, + .instance_size = sizeof(AspeedWDTState), + .class_init = aspeed_1030_wdt_class_init, +}; + static void wdt_aspeed_register_types(void) { - watchdog_add_model(&model); type_register_static(&aspeed_wdt_info); type_register_static(&aspeed_2400_wdt_info); type_register_static(&aspeed_2500_wdt_info); type_register_static(&aspeed_2600_wdt_info); + type_register_static(&aspeed_1030_wdt_info); } type_init(wdt_aspeed_register_types) diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c index e135a4de8b..76d89fbf78 100644 --- a/hw/watchdog/wdt_diag288.c +++ b/hw/watchdog/wdt_diag288.c @@ -19,11 +19,6 @@ #include "migration/vmstate.h" #include "qemu/log.h" -static WatchdogTimerModel model = { - .wdt_name = TYPE_WDT_DIAG288, - .wdt_description = "diag288 device for s390x platform", -}; - static const VMStateDescription vmstate_diag288 = { .name = "vmstate_diag288", .version_id = 0, @@ -122,9 +117,10 @@ static void wdt_diag288_class_init(ObjectClass *klass, void *data) dc->unrealize = wdt_diag288_unrealize; dc->reset = wdt_diag288_reset; dc->hotpluggable = false; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_diag288; diag288->handle_timer = wdt_diag288_handle_timer; + dc->desc = "diag288 device for s390x platform"; } static const TypeInfo wdt_diag288_info = { @@ -137,7 +133,6 @@ static const TypeInfo wdt_diag288_info = { static void wdt_diag288_register_types(void) { - watchdog_add_model(&model); type_register_static(&wdt_diag288_info); } diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index 4c52e3bb9e..5693ec6a09 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -457,11 +457,6 @@ static void i6300esb_exit(PCIDevice *dev) timer_free(d->timer); } -static WatchdogTimerModel model = { - .wdt_name = "i6300esb", - .wdt_description = "Intel 6300ESB", -}; - static void i6300esb_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -476,7 +471,8 @@ static void i6300esb_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_SYSTEM_OTHER; dc->reset = i6300esb_reset; dc->vmsd = &vmstate_i6300esb; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "Intel 6300ESB"; } static const TypeInfo i6300esb_info = { @@ -492,7 +488,6 @@ static const TypeInfo i6300esb_info = { static void i6300esb_register_types(void) { - watchdog_add_model(&model); type_register_static(&i6300esb_info); } diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index 177aaa503f..b116c3a3aa 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -128,11 +128,6 @@ static void wdt_ib700_reset(DeviceState *dev) timer_del(s->timer); } -static WatchdogTimerModel model = { - .wdt_name = "ib700", - .wdt_description = "iBASE 700", -}; - static void wdt_ib700_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -140,7 +135,8 @@ static void wdt_ib700_class_init(ObjectClass *klass, void *data) dc->realize = wdt_ib700_realize; dc->reset = wdt_ib700_reset; dc->vmsd = &vmstate_ib700; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "iBASE 700"; } static const TypeInfo wdt_ib700_info = { @@ -152,7 +148,6 @@ static const TypeInfo wdt_ib700_info = { static void wdt_ib700_register_types(void) { - watchdog_add_model(&model); type_register_static(&wdt_ib700_info); } diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index a5fb76308f..e776a2fbd4 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -280,8 +280,8 @@ static void imx2_wdt_class_init(ObjectClass *klass, void *data) dc->realize = imx2_wdt_realize; dc->reset = imx2_wdt_reset; dc->vmsd = &vmstate_imx2_wdt; - dc->desc = "i.MX watchdog timer"; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "i.MX2 watchdog timer"; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); } static const TypeInfo imx2_wdt_info = { @@ -291,14 +291,8 @@ static const TypeInfo imx2_wdt_info = { .class_init = imx2_wdt_class_init, }; -static WatchdogTimerModel model = { - .wdt_name = "imx2-watchdog", - .wdt_description = "i.MX2 Watchdog", -}; - static void imx2_wdt_register_type(void) { - watchdog_add_model(&model); type_register_static(&imx2_wdt_info); } type_init(imx2_wdt_register_type) diff --git a/hw/xbox/mcpx/apu.c b/hw/xbox/mcpx/apu.c index 5918808804..e1301bc07c 100644 --- a/hw/xbox/mcpx/apu.c +++ b/hw/xbox/mcpx/apu.c @@ -2393,7 +2393,6 @@ const VMStateDescription vmstate_vp_dsp_dma_state = { .name = "mcpx-apu/dsp-state/dma", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(configuration, DSPDMAState), VMSTATE_UINT32(control, DSPDMAState), @@ -2409,7 +2408,6 @@ const VMStateDescription vmstate_vp_dsp_core_state = { .name = "mcpx-apu/dsp-state/core", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { // FIXME: Remove unnecessary fields VMSTATE_UINT16(instr_cycle, dsp_core_t), @@ -2453,7 +2451,6 @@ const VMStateDescription vmstate_vp_dsp_state = { .name = "mcpx-apu/dsp-state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_STRUCT(core, DSPState, 1, vmstate_vp_dsp_core_state, dsp_core_t), VMSTATE_STRUCT(dma, DSPState, 1, vmstate_vp_dsp_dma_state, DSPDMAState), @@ -2468,7 +2465,6 @@ const VMStateDescription vmstate_vp_ssl_data = { .name = "mcpx_apu_voice_data", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(base, MCPXAPUVPSSLData, MCPX_HW_SSLS_PER_VOICE), VMSTATE_UINT8_ARRAY(count, MCPXAPUVPSSLData, MCPX_HW_SSLS_PER_VOICE), diff --git a/hw/xbox/mcpx/dsp/dsp.c b/hw/xbox/mcpx/dsp/dsp.c index ae0826d79c..2a08cbf320 100644 --- a/hw/xbox/mcpx/dsp/dsp.c +++ b/hw/xbox/mcpx/dsp/dsp.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include #include diff --git a/hw/xbox/nv2a/nv2a.c b/hw/xbox/nv2a/nv2a.c index e09fc8f890..15f06121e2 100644 --- a/hw/xbox/nv2a/nv2a.c +++ b/hw/xbox/nv2a/nv2a.c @@ -223,7 +223,7 @@ static void nv2a_init_vga(NV2AState *d) VGACommonState *vga = &d->vga; vga->vram_size_mb = memory_region_size(d->vram) / MiB; - vga_common_init(vga, OBJECT(d)); + vga_common_init(vga, OBJECT(d), &error_fatal); vga->get_bpp = nv2a_get_bpp; vga->get_offsets = nv2a_get_offsets; // vga->overlay_draw_line = nv2a_overlay_draw_line; @@ -417,7 +417,6 @@ const VMStateDescription vmstate_nv2a_pgraph_vertex_attributes = { .name = "nv2a/pgraph/vertex-attr", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { // FIXME VMSTATE_END_OF_LIST() diff --git a/hw/xbox/nv2a/nv2a_int.h b/hw/xbox/nv2a/nv2a_int.h index 65233ff265..31ab6d89ca 100644 --- a/hw/xbox/nv2a/nv2a_int.h +++ b/hw/xbox/nv2a/nv2a_int.h @@ -33,6 +33,8 @@ #include "qemu/error-report.h" #include "migration/vmstate.h" #include "sysemu/runstate.h" +#include "ui/console.h" +#include "hw/display/vga_int.h" #include "hw/hw.h" #include "hw/display/vga.h" diff --git a/hw/xbox/nv2a/shaders.c b/hw/xbox/nv2a/shaders.c index bef6c463cb..d0a1c0fa7c 100644 --- a/hw/xbox/nv2a/shaders.c +++ b/hw/xbox/nv2a/shaders.c @@ -20,7 +20,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include #include "shaders_common.h" diff --git a/hw/xbox/xbox.c b/hw/xbox/xbox.c index 4e393b1082..7a496118ab 100644 --- a/hw/xbox/xbox.c +++ b/hw/xbox/xbox.c @@ -98,7 +98,7 @@ static void xbox_flash_init(MachineState *ms, MemoryRegion *rom_memory) } if (failed_to_load_bios) { - fprintf(stderr, "Failed to load BIOS '%s'\n", filename); + fprintf(stderr, "Failed to load BIOS '%s'\n", filename ? filename : "(null)"); memset(bios_data, 0xff, bios_size); } if (filename != NULL) { @@ -278,7 +278,6 @@ void xbox_init_common(MachineState *machine, } /* init basic PC hardware */ - pcms->pit_enabled = 1; // XBOX_FIXME: What's the right way to do this? rtc_state = mc146818_rtc_init(isa_bus, 2000, NULL); if (kvm_pit_in_kernel()) { diff --git a/hw/xbox/xbox_pci.c b/hw/xbox/xbox_pci.c index d9ef8ce7a6..cd9d7d8048 100644 --- a/hw/xbox/xbox_pci.c +++ b/hw/xbox/xbox_pci.c @@ -38,7 +38,6 @@ #include "hw/pci/pci_bus.h" #include "hw/pci/pci_bridge.h" #include "exec/address-spaces.h" -#include "qemu-common.h" #include "qemu/option.h" #include "hw/xbox/acpi_xbox.h" #include "hw/xbox/amd_smbus.h" diff --git a/hw/xen/meson.build b/hw/xen/meson.build index 076954b89c..ae0ace3046 100644 --- a/hw/xen/meson.build +++ b/hw/xen/meson.build @@ -8,13 +8,17 @@ softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( )) xen_specific_ss = ss.source_set() -xen_specific_ss.add(when: 'CONFIG_XEN_PCI_PASSTHROUGH', if_true: files( - 'xen-host-pci-device.c', - 'xen_pt.c', - 'xen_pt_config_init.c', - 'xen_pt_graphics.c', - 'xen_pt_load_rom.c', - 'xen_pt_msi.c', -), if_false: files('xen_pt_stub.c')) +if have_xen_pci_passthrough + xen_specific_ss.add(files( + 'xen-host-pci-device.c', + 'xen_pt.c', + 'xen_pt_config_init.c', + 'xen_pt_graphics.c', + 'xen_pt_load_rom.c', + 'xen_pt_msi.c', + )) +else + xen_specific_ss.add(files('xen_pt_stub.c')) +endif specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss) diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index 8c588920d9..645a29a5a0 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -1115,11 +1115,11 @@ void xen_device_set_event_channel_context(XenDevice *xendev, if (channel->ctx) aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); channel->ctx = ctx; aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, - xen_device_event, NULL, xen_device_poll, channel); + xen_device_event, NULL, xen_device_poll, NULL, channel); } XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, @@ -1193,7 +1193,7 @@ void xen_device_unbind_event_channel(XenDevice *xendev, QLIST_REMOVE(channel, list); aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_unbind failed"); @@ -1398,7 +1398,7 @@ type_init(xen_register_types) void xen_bus_init(void) { DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE); - BusState *bus = qbus_create(TYPE_XEN_BUS, dev, NULL); + BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); qbus_set_bus_hotplug_handler(bus); diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index dd8ae1452d..085fd31ef7 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -276,7 +276,8 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom, xendev = g_malloc0(ops->size); object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND); OBJECT(xendev)->free = g_free; - qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev)); + qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev), + &error_fatal); qdev_realize(DEVICE(xendev), xen_sysbus, &error_fatal); object_unref(OBJECT(xendev)); @@ -702,7 +703,7 @@ int xen_be_init(void) xen_sysdev = qdev_new(TYPE_XENSYSDEV); sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal); - xen_sysbus = qbus_create(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); + xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); qbus_set_bus_hotplug_handler(xen_sysbus); return 0; diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 232482d65f..5dd706efbf 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -57,10 +57,10 @@ #include #include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "hw/xen/xen.h" -#include "hw/i386/pc.h" #include "hw/xen/xen-legacy-backend.h" #include "xen_pt.h" #include "qemu/range.h" @@ -615,8 +615,8 @@ static void xen_pt_region_update(XenPCIPassthroughState *s, } args.type = d->io_regions[bar].type; - pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d), - xen_pt_check_bar_overlap, &args); + pci_for_each_device_under_bus(pci_get_bus(d), + xen_pt_check_bar_overlap, &args); if (args.rc) { XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS ", len: 0x%"FMT_PCIBUS") is overlapped.\n", @@ -689,28 +689,19 @@ static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec) } static const MemoryListener xen_pt_memory_listener = { + .name = "xen-pt-mem", .region_add = xen_pt_region_add, .region_del = xen_pt_region_del, .priority = 10, }; static const MemoryListener xen_pt_io_listener = { + .name = "xen-pt-io", .region_add = xen_pt_io_region_add, .region_del = xen_pt_io_region_del, .priority = 10, }; -static void -xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, - XenHostPCIDevice *dev) -{ - uint16_t gpu_dev_id; - PCIDevice *d = &s->dev; - - gpu_dev_id = dev->device_id; - igd_passthrough_isa_bridge_create(pci_get_bus(d), gpu_dev_id); -} - /* destroy. */ static void xen_pt_destroy(PCIDevice *d) { @@ -790,15 +781,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, s->dev.devfn); - xen_host_pci_device_get(&s->real_device, - s->hostaddr.domain, s->hostaddr.bus, - s->hostaddr.slot, s->hostaddr.function, - errp); - if (*errp) { - error_append_hint(errp, "Failed to \"open\" the real pci device"); - return; - } - s->is_virtfn = s->real_device.is_virtfn; if (s->is_virtfn) { XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", @@ -813,8 +795,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->io_listener = xen_pt_io_listener; /* Setup VGA bios for passthrough GFX */ - if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && - (s->real_device.dev == 2) && (s->real_device.func == 0)) { + if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) && + (s->real_device.bus == XEN_PCI_IGD_BUS) && + (s->real_device.dev == XEN_PCI_IGD_DEV) && + (s->real_device.func == XEN_PCI_IGD_FN)) { if (!is_igd_vga_passthrough(&s->real_device)) { error_setg(errp, "Need to enable igd-passthru if you're trying" " to passthrough IGD GFX"); @@ -960,11 +944,58 @@ static void xen_pci_passthrough_instance_init(Object *obj) PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ + if (!xen_igd_gfx_pt_enabled()) { + return; + } + + XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n"); + pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK; +} + +static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) +{ + ERRP_GUARD(); + PCIDevice *pci_dev = (PCIDevice *)qdev; + XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s); + PCIBus *pci_bus = pci_get_bus(pci_dev); + + xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function, + errp); + if (*errp) { + error_append_hint(errp, "Failed to \"open\" the real pci device"); + return; + } + + if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) { + xpdc->pci_qdev_realize(qdev, errp); + return; + } + + if (is_igd_vga_passthrough(&s->real_device) && + s->real_device.domain == XEN_PCI_IGD_DOMAIN && + s->real_device.bus == XEN_PCI_IGD_BUS && + s->real_device.dev == XEN_PCI_IGD_DEV && + s->real_device.func == XEN_PCI_IGD_FN && + s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) { + pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK; + XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n"); + } + xpdc->pci_qdev_realize(qdev, errp); +} + static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass); + xpdc->pci_qdev_realize = dc->realize; + dc->realize = xen_igd_clear_slot; k->realize = xen_pt_realize; k->exit = xen_pt_unregister_device; k->config_read = xen_pt_pci_read_config; @@ -987,6 +1018,7 @@ static const TypeInfo xen_pci_passthrough_info = { .instance_size = sizeof(XenPCIPassthroughState), .instance_finalize = xen_pci_passthrough_finalize, .class_init = xen_pci_passthrough_class_init, + .class_size = sizeof(XenPTDeviceClass), .instance_init = xen_pci_passthrough_instance_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h index c74c4678f2..292bdf7499 100644 --- a/hw/xen/xen_pt.h +++ b/hw/xen/xen_pt.h @@ -9,7 +9,7 @@ bool xen_igd_gfx_pt_enabled(void); void xen_igd_gfx_pt_set(bool value, Error **errp); -void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3); +void xen_pt_log(const PCIDevice *d, const char *f, ...) G_GNUC_PRINTF(2, 3); #define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a) @@ -41,8 +41,23 @@ typedef struct XenPTReg XenPTReg; #define TYPE_XEN_PT_DEVICE "xen-pci-passthrough" OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE) +#define XEN_PT_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE) +#define XEN_PT_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE) + +typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp); + +typedef struct XenPTDeviceClass { + PCIDeviceClass parent_class; + XenPTQdevRealize pci_qdev_realize; +} XenPTDeviceClass; + uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void xen_igd_reserve_slot(PCIBus *pci_bus); void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); +void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, + XenHostPCIDevice *dev); /* function type for config reg */ typedef int (*xen_pt_conf_reg_init) @@ -74,6 +89,13 @@ typedef int (*xen_pt_conf_byte_read) #define XEN_PCI_INTEL_OPREGION 0xfc +#define XEN_PCI_IGD_DOMAIN 0 +#define XEN_PCI_IGD_BUS 0 +#define XEN_PCI_IGD_DEV 2 +#define XEN_PCI_IGD_FN 0 +#define XEN_PCI_IGD_SLOT_MASK \ + (1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN))) + typedef enum { XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ XEN_PT_GRP_TYPE_EMU, /* emul reg group */ diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index c5c4e943a8..cde898b744 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -985,8 +985,8 @@ static XenPTRegInfo xen_pt_emu_reg_pcie[] = { .offset = 0x28, .size = 2, .init_val = 0x0000, - .ro_mask = 0xFFE0, - .emu_mask = 0xFFFF, + .ro_mask = 0xFFA0, + .emu_mask = 0xFFBF, .init = xen_pt_devctrl2_reg_init, .u.w.read = xen_pt_word_reg_read, .u.w.write = xen_pt_word_reg_write, @@ -1965,11 +1965,12 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s, if ((data & host_mask) != (val & host_mask)) { uint32_t new_val; - - /* Mask out host (including past size). */ - new_val = val & host_mask; - /* Merge emulated ones (excluding the non-emulated ones). */ - new_val |= data & host_mask; + /* + * Merge the emulated bits (data) with the host bits (val) + * and mask out the bits past size to enable restoration + * of the proper value for logging below. + */ + new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask; /* Leave intact host and emulated values past the size - even though * we do not care as we write per reg->size granularity, but for the * logging below lets have the proper value. */ @@ -2031,12 +2032,16 @@ void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) } } - /* - * By default we will trap up to 0x40 in the cfg space. - * If an intel device is pass through we need to trap 0xfc, - * therefore the size should be 0xff. - */ if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { + if (!is_igd_vga_passthrough(&s->real_device) || + s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) { + continue; + } + /* + * By default we will trap up to 0x40 in the cfg space. + * If an intel device is pass through we need to trap 0xfc, + * therefore the size should be 0xff. + */ reg_grp_offset = XEN_PCI_INTEL_OPREGION; } diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c index a3bc7e3921..f303f67c9c 100644 --- a/hw/xen/xen_pt_graphics.c +++ b/hw/xen/xen_pt_graphics.c @@ -289,3 +289,125 @@ void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val) (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT)); } + +typedef struct { + uint16_t gpu_device_id; + uint16_t pch_device_id; + uint8_t pch_revision_id; +} IGDDeviceIDInfo; + +/* + * In real world different GPU should have different PCH. But actually + * the different PCH DIDs likely map to different PCH SKUs. We do the + * same thing for the GPU. For PCH, the different SKUs are going to be + * all the same silicon design and implementation, just different + * features turn on and off with fuses. The SW interfaces should be + * consistent across all SKUs in a given family (eg LPT). But just same + * features may not be supported. + * + * Most of these different PCH features probably don't matter to the + * Gfx driver, but obviously any difference in display port connections + * will so it should be fine with any PCH in case of passthrough. + * + * So currently use one PCH version, 0x8c4e, to cover all HSW(Haswell) + * scenarios, 0x9cc3 for BDW(Broadwell). + */ +static const IGDDeviceIDInfo igd_combo_id_infos[] = { + /* HSW Classic */ + {0x0402, 0x8c4e, 0x04}, /* HSWGT1D, HSWD_w7 */ + {0x0406, 0x8c4e, 0x04}, /* HSWGT1M, HSWM_w7 */ + {0x0412, 0x8c4e, 0x04}, /* HSWGT2D, HSWD_w7 */ + {0x0416, 0x8c4e, 0x04}, /* HSWGT2M, HSWM_w7 */ + {0x041E, 0x8c4e, 0x04}, /* HSWGT15D, HSWD_w7 */ + /* HSW ULT */ + {0x0A06, 0x8c4e, 0x04}, /* HSWGT1UT, HSWM_w7 */ + {0x0A16, 0x8c4e, 0x04}, /* HSWGT2UT, HSWM_w7 */ + {0x0A26, 0x8c4e, 0x06}, /* HSWGT3UT, HSWM_w7 */ + {0x0A2E, 0x8c4e, 0x04}, /* HSWGT3UT28W, HSWM_w7 */ + {0x0A1E, 0x8c4e, 0x04}, /* HSWGT2UX, HSWM_w7 */ + {0x0A0E, 0x8c4e, 0x04}, /* HSWGT1ULX, HSWM_w7 */ + /* HSW CRW */ + {0x0D26, 0x8c4e, 0x04}, /* HSWGT3CW, HSWM_w7 */ + {0x0D22, 0x8c4e, 0x04}, /* HSWGT3CWDT, HSWD_w7 */ + /* HSW Server */ + {0x041A, 0x8c4e, 0x04}, /* HSWSVGT2, HSWD_w7 */ + /* HSW SRVR */ + {0x040A, 0x8c4e, 0x04}, /* HSWSVGT1, HSWD_w7 */ + /* BSW */ + {0x1606, 0x9cc3, 0x03}, /* BDWULTGT1, BDWM_w7 */ + {0x1616, 0x9cc3, 0x03}, /* BDWULTGT2, BDWM_w7 */ + {0x1626, 0x9cc3, 0x03}, /* BDWULTGT3, BDWM_w7 */ + {0x160E, 0x9cc3, 0x03}, /* BDWULXGT1, BDWM_w7 */ + {0x161E, 0x9cc3, 0x03}, /* BDWULXGT2, BDWM_w7 */ + {0x1602, 0x9cc3, 0x03}, /* BDWHALOGT1, BDWM_w7 */ + {0x1612, 0x9cc3, 0x03}, /* BDWHALOGT2, BDWM_w7 */ + {0x1622, 0x9cc3, 0x03}, /* BDWHALOGT3, BDWM_w7 */ + {0x162B, 0x9cc3, 0x03}, /* BDWHALO28W, BDWM_w7 */ + {0x162A, 0x9cc3, 0x03}, /* BDWGT3WRKS, BDWM_w7 */ + {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */ +}; + +static void isa_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->desc = "ISA bridge faked to support IGD PT"; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->class_id = PCI_CLASS_BRIDGE_ISA; +}; + +static const TypeInfo isa_bridge_info = { + .name = "igd-passthrough-isa-bridge", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = isa_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pt_graphics_register_types(void) +{ + type_register_static(&isa_bridge_info); +} +type_init(pt_graphics_register_types) + +void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, + XenHostPCIDevice *dev) +{ + PCIBus *bus = pci_get_bus(&s->dev); + struct PCIDevice *bridge_dev; + int i, num; + const uint16_t gpu_dev_id = dev->device_id; + uint16_t pch_dev_id = 0xffff; + uint8_t pch_rev_id = 0; + + num = ARRAY_SIZE(igd_combo_id_infos); + for (i = 0; i < num; i++) { + if (gpu_dev_id == igd_combo_id_infos[i].gpu_device_id) { + pch_dev_id = igd_combo_id_infos[i].pch_device_id; + pch_rev_id = igd_combo_id_infos[i].pch_revision_id; + } + } + + if (pch_dev_id == 0xffff) { + return; + } + + /* Currently IGD drivers always need to access PCH by 1f.0. */ + bridge_dev = pci_create_simple(bus, PCI_DEVFN(0x1f, 0), + "igd-passthrough-isa-bridge"); + + /* + * Note that vendor id is always PCI_VENDOR_ID_INTEL. + */ + if (!bridge_dev) { + fprintf(stderr, "set igd-passthrough-isa-bridge failed!\n"); + return; + } + pci_config_set_device_id(bridge_dev->config, pch_dev_id); + pci_config_set_revision(bridge_dev->config, pch_rev_id); +} diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c index 2d8cac8d54..5c108446a8 100644 --- a/hw/xen/xen_pt_stub.c +++ b/hw/xen/xen_pt_stub.c @@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp) error_setg(errp, "Xen PCI passthrough support not built in"); } } + +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ +} diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c index 8ab458922a..037152f063 100644 --- a/hw/xen/xen_pvdev.c +++ b/hw/xen/xen_pvdev.c @@ -196,37 +196,40 @@ const char *xenbus_strstate(enum xenbus_state state) * 2 == noisy debug messages (logfile only). * 3 == will flood your log (logfile only). */ +static void xen_pv_output_msg(struct XenLegacyDevice *xendev, + FILE *f, const char *fmt, va_list args) +{ + if (xendev) { + fprintf(f, "xen be: %s: ", xendev->name); + } else { + fprintf(f, "xen be core: "); + } + vfprintf(f, fmt, args); +} + void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level, const char *fmt, ...) { + FILE *logfile; va_list args; - if (xendev) { - if (msg_level > xendev->debug) { - return; - } - qemu_log("xen be: %s: ", xendev->name); - if (msg_level == 0) { - fprintf(stderr, "xen be: %s: ", xendev->name); - } - } else { - if (msg_level > debug) { - return; - } - qemu_log("xen be core: "); - if (msg_level == 0) { - fprintf(stderr, "xen be core: "); - } + if (msg_level > (xendev ? xendev->debug : debug)) { + return; } - va_start(args, fmt); - qemu_log_vprintf(fmt, args); - va_end(args); + + logfile = qemu_log_trylock(); + if (logfile) { + va_start(args, fmt); + xen_pv_output_msg(xendev, logfile, fmt, args); + va_end(args); + qemu_log_unlock(logfile); + } + if (msg_level == 0) { va_start(args, fmt); - vfprintf(stderr, fmt, args); + xen_pv_output_msg(xendev, stderr, fmt, args); va_end(args); } - qemu_log_flush(); } void xen_pv_evtchn_event(void *opaque) diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 8df575a457..20c9611d71 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -63,6 +63,7 @@ static void xen_init_pv(MachineState *machine) if (vga_interface_type == VGA_XENFB) { xen_config_dev_vfb(0, "vnc"); xen_config_dev_vkbd(0); + vga_interface_created = true; } /* configure disks */ diff --git a/hw/xtensa/mx_pic.c b/hw/xtensa/mx_pic.c index d889f953d1..8211c993eb 100644 --- a/hw/xtensa/mx_pic.c +++ b/hw/xtensa/mx_pic.c @@ -334,7 +334,7 @@ void xtensa_mx_pic_reset(void *opaque) mx->miasg = 0; mx->mipipart = 0; for (i = 0; i < mx->n_irq; ++i) { - mx->mirout[i] = 1; + mx->mirout[i] = 0; } for (i = 0; i < mx->n_cpu; ++i) { mx->cpu[i].mipicause = 0; diff --git a/hw/xtensa/sim.c b/hw/xtensa/sim.c index 2028fe793d..946c71cb5b 100644 --- a/hw/xtensa/sim.c +++ b/hw/xtensa/sim.c @@ -96,7 +96,7 @@ XtensaCPU *xtensa_sim_common_init(MachineState *machine) void xtensa_sim_load_kernel(XtensaCPU *cpu, MachineState *machine) { const char *kernel_filename = machine->kernel_filename; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN int big_endian = true; #else int big_endian = false; diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c index 17f087b395..2a5556a35f 100644 --- a/hw/xtensa/xtfpga.c +++ b/hw/xtensa/xtfpga.c @@ -126,7 +126,7 @@ static const MemoryRegionOps xtfpga_fpga_ops = { static XtfpgaFpgaState *xtfpga_fpga_init(MemoryRegion *address_space, hwaddr base, uint32_t freq) { - XtfpgaFpgaState *s = g_malloc(sizeof(XtfpgaFpgaState)); + XtfpgaFpgaState *s = g_new(XtfpgaFpgaState, 1); memory_region_init_io(&s->iomem, NULL, &xtfpga_fpga_ops, s, "xtfpga.fpga", 0x10000); @@ -219,7 +219,7 @@ static const MemoryRegionOps xtfpga_io_ops = { static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN int be = 1; #else int be = 0; @@ -430,7 +430,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) } if (entry_point != env->pc) { uint8_t boot[] = { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN 0x60, 0x00, 0x08, /* j 1f */ 0x00, /* .literal_position */ 0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */ diff --git a/include/block/accounting.h b/include/block/accounting.h index 878b4c3581..b9caad60d5 100644 --- a/include/block/accounting.h +++ b/include/block/accounting.h @@ -27,7 +27,7 @@ #include "qemu/timed-average.h" #include "qemu/thread.h" -#include "qapi/qapi-builtin-types.h" +#include "qapi/qapi-types-common.h" typedef struct BlockAcctTimedStats BlockAcctTimedStats; typedef struct BlockAcctStats BlockAcctStats; @@ -100,8 +100,8 @@ typedef struct BlockAcctCookie { } BlockAcctCookie; void block_acct_init(BlockAcctStats *stats); -void block_acct_setup(BlockAcctStats *stats, bool account_invalid, - bool account_failed); +void block_acct_setup(BlockAcctStats *stats, enum OnOffAuto account_invalid, + enum OnOffAuto account_failed); void block_acct_cleanup(BlockAcctStats *stats); void block_acct_add_interval(BlockAcctStats *stats, unsigned interval_length); BlockAcctTimedStats *block_acct_interval_next(BlockAcctStats *stats, diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index b39eefb38d..dd9a7f6461 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -59,10 +59,13 @@ typedef struct { extern AioWait global_aio_wait; /** - * AIO_WAIT_WHILE: + * AIO_WAIT_WHILE_INTERNAL: * @ctx: the aio context, or NULL if multiple aio contexts (for which the * caller does not hold a lock) are involved in the polling condition. * @cond: wait while this conditional expression is true + * @unlock: whether to unlock and then lock again @ctx. This apples + * only when waiting for another AioContext from the main loop. + * Otherwise it's ignored. * * Wait while a condition is true. Use this to implement synchronous * operations that require event loop activity. @@ -75,12 +78,14 @@ extern AioWait global_aio_wait; * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE(ctx, cond) ({ \ +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ bool waited_ = false; \ AioWait *wait_ = &global_aio_wait; \ AioContext *ctx_ = (ctx); \ /* Increment wait_->num_waiters before evaluating cond. */ \ qatomic_inc(&wait_->num_waiters); \ + /* Paired with smp_mb in aio_wait_kick(). */ \ + smp_mb(); \ if (ctx_ && in_aio_context_home_thread(ctx_)) { \ while ((cond)) { \ aio_poll(ctx_, true); \ @@ -90,11 +95,11 @@ extern AioWait global_aio_wait; assert(qemu_get_current_aio_context() == \ qemu_get_aio_context()); \ while ((cond)) { \ - if (ctx_) { \ + if (unlock && ctx_) { \ aio_context_release(ctx_); \ } \ aio_poll(qemu_get_aio_context(), true); \ - if (ctx_) { \ + if (unlock && ctx_) { \ aio_context_acquire(ctx_); \ } \ waited_ = true; \ @@ -103,6 +108,12 @@ extern AioWait global_aio_wait; qatomic_dec(&wait_->num_waiters); \ waited_; }) +#define AIO_WAIT_WHILE(ctx, cond) \ + AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) + +#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ + AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) + /** * aio_wait_kick: * Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During diff --git a/include/block/aio.h b/include/block/aio.h index 47fbe9d81f..d128558f1d 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -192,6 +192,8 @@ struct AioContext { QSLIST_HEAD(, Coroutine) scheduled_coroutines; QEMUBH *co_schedule_bh; + int thread_pool_min; + int thread_pool_max; /* Thread pool for performing work and receiving completion callbacks. * Has its own locking. */ @@ -469,6 +471,7 @@ void aio_set_fd_handler(AioContext *ctx, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, + IOHandler *io_poll_ready, void *opaque); /* Set polling begin/end callbacks for a file descriptor that has already been @@ -490,7 +493,8 @@ void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, bool is_external, EventNotifierHandler *io_read, - AioPollFn *io_poll); + AioPollFn *io_poll, + EventNotifierHandler *io_poll_ready); /* Set polling begin/end callbacks for an event notifier that has already been * registered with aio_set_event_notifier. Do nothing if the event notifier is @@ -767,4 +771,12 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, Error **errp); +/** + * aio_context_set_thread_pool_params: + * @ctx: the aio context + * @min: min number of threads to have readily available in the thread pool + * @min: max number of threads the thread pool can contain + */ +void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, + int64_t max, Error **errp); #endif diff --git a/include/block/block-common.h b/include/block/block-common.h new file mode 100644 index 0000000000..72528c0ba9 --- /dev/null +++ b/include/block/block-common.h @@ -0,0 +1,471 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_COMMON_H +#define BLOCK_COMMON_H + +#include "block/aio.h" +#include "block/aio-wait.h" +#include "qemu/iov.h" +#include "qemu/coroutine.h" +#include "block/accounting.h" +#include "block/dirty-bitmap.h" +#include "block/blockjob.h" +#include "qemu/hbitmap.h" +#include "qemu/transactions.h" + +/* + * generated_co_wrapper + * + * Function specifier, which does nothing but mark functions to be + * generated by scripts/block-coroutine-wrapper.py + * + * Read more in docs/devel/block-coroutine-wrapper.rst + */ +#define generated_co_wrapper + +/* block.c */ +typedef struct BlockDriver BlockDriver; +typedef struct BdrvChild BdrvChild; +typedef struct BdrvChildClass BdrvChildClass; + +typedef struct BlockDriverInfo { + /* in bytes, 0 if irrelevant */ + int cluster_size; + /* offset at which the VM state can be saved (0 if not possible) */ + int64_t vm_state_offset; + bool is_dirty; + /* + * True if this block driver only supports compressed writes + */ + bool needs_compressed_writes; +} BlockDriverInfo; + +typedef struct BlockFragInfo { + uint64_t allocated_clusters; + uint64_t total_clusters; + uint64_t fragmented_clusters; + uint64_t compressed_clusters; +} BlockFragInfo; + +typedef enum { + BDRV_REQ_COPY_ON_READ = 0x1, + BDRV_REQ_ZERO_WRITE = 0x2, + + /* + * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate + * that the block driver should unmap (discard) blocks if it is guaranteed + * that the result will read back as zeroes. The flag is only passed to the + * driver if the block device is opened with BDRV_O_UNMAP. + */ + BDRV_REQ_MAY_UNMAP = 0x4, + + /* + * An optimization hint when all QEMUIOVector elements are within + * previously registered bdrv_register_buf() memory ranges. + * + * Code that replaces the user's QEMUIOVector elements with bounce buffers + * must take care to clear this flag. + */ + BDRV_REQ_REGISTERED_BUF = 0x8, + + BDRV_REQ_FUA = 0x10, + BDRV_REQ_WRITE_COMPRESSED = 0x20, + + /* + * Signifies that this write request will not change the visible disk + * content. + */ + BDRV_REQ_WRITE_UNCHANGED = 0x40, + + /* + * Forces request serialisation. Use only with write requests. + */ + BDRV_REQ_SERIALISING = 0x80, + + /* + * Execute the request only if the operation can be offloaded or otherwise + * be executed efficiently, but return an error instead of using a slow + * fallback. + */ + BDRV_REQ_NO_FALLBACK = 0x100, + + /* + * BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read + * (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR + * filter is involved), in which case it signals that the COR operation + * need not read the data into memory (qiov) but only ensure they are + * copied to the top layer (i.e., that COR operation is done). + */ + BDRV_REQ_PREFETCH = 0x200, + + /* + * If we need to wait for other requests, just fail immediately. Used + * only together with BDRV_REQ_SERIALISING. Used only with requests aligned + * to request_alignment (corresponding assertions are in block/io.c). + */ + BDRV_REQ_NO_WAIT = 0x400, + + /* Mask of valid flags */ + BDRV_REQ_MASK = 0x7ff, +} BdrvRequestFlags; + +#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */ +#define BDRV_O_RDWR 0x0002 +#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ +#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save + writes in a snapshot */ +#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ +#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ +#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the + thread pool */ +#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ +#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ +#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ +#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */ +#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ +#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ +#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ +#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: + select an appropriate protocol driver, + ignoring the format layer */ +#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ +#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening + read-write fails */ +#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */ + +#ifdef XBOX +#define BDRV_O_RO_WRITE_SHARE 0x80000 /* allow the file to open RO alongside an existing RW handle */ +#endif + +#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) + + +/* Option names of options parsed by the block layer */ + +#define BDRV_OPT_CACHE_WB "cache.writeback" +#define BDRV_OPT_CACHE_DIRECT "cache.direct" +#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" +#define BDRV_OPT_READ_ONLY "read-only" +#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only" +#define BDRV_OPT_DISCARD "discard" +#define BDRV_OPT_FORCE_SHARE "force-share" + + +#define BDRV_SECTOR_BITS 9 +#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) + +#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \ + INT_MAX >> BDRV_SECTOR_BITS) +#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) + +/* + * We want allow aligning requests and disk length up to any 32bit alignment + * and don't afraid of overflow. + * To achieve it, and in the same time use some pretty number as maximum disk + * size, let's define maximum "length" (a limit for any offset/bytes request and + * for disk size) to be the greatest power of 2 less than INT64_MAX. + */ +#define BDRV_MAX_ALIGNMENT (1L << 30) +#define BDRV_MAX_LENGTH (QEMU_ALIGN_DOWN(INT64_MAX, BDRV_MAX_ALIGNMENT)) + +/* + * Allocation status flags for bdrv_block_status() and friends. + * + * Public flags: + * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer + * BDRV_BLOCK_ZERO: offset reads as zero + * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data + * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this + * layer rather than any backing, set by block layer + * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this + * layer, set by block layer + * + * Internal flags: + * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request + * that the block layer recompute the answer from the returned + * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID. + * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for + * zeroes in file child of current block node inside + * returned region. Only valid together with both + * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not + * appear with BDRV_BLOCK_ZERO. + * + * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the + * host offset within the returned BDS that is allocated for the + * corresponding raw guest data. However, whether that offset + * actually contains data also depends on BDRV_BLOCK_DATA, as follows: + * + * DATA ZERO OFFSET_VALID + * t t t sectors read as zero, returned file is zero at offset + * t f t sectors read as valid from file at offset + * f t t sectors preallocated, read as zero, returned file not + * necessarily zero at offset + * f f t sectors preallocated but read from backing_hd, + * returned file contains garbage at offset + * t t f sectors preallocated, read as zero, unknown offset + * t f f sectors read from unknown file or offset + * f t f not allocated or unknown offset, read as zero + * f f f not allocated or unknown offset, read from backing_hd + */ +#define BDRV_BLOCK_DATA 0x01 +#define BDRV_BLOCK_ZERO 0x02 +#define BDRV_BLOCK_OFFSET_VALID 0x04 +#define BDRV_BLOCK_RAW 0x08 +#define BDRV_BLOCK_ALLOCATED 0x10 +#define BDRV_BLOCK_EOF 0x20 +#define BDRV_BLOCK_RECURSE 0x40 + +typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; + +typedef struct BDRVReopenState { + BlockDriverState *bs; + int flags; + BlockdevDetectZeroesOptions detect_zeroes; + bool backing_missing; + BlockDriverState *old_backing_bs; /* keep pointer for permissions update */ + BlockDriverState *old_file_bs; /* keep pointer for permissions update */ + QDict *options; + QDict *explicit_options; + void *opaque; +} BDRVReopenState; + +/* + * Block operation types + */ +typedef enum BlockOpType { + BLOCK_OP_TYPE_BACKUP_SOURCE, + BLOCK_OP_TYPE_BACKUP_TARGET, + BLOCK_OP_TYPE_CHANGE, + BLOCK_OP_TYPE_COMMIT_SOURCE, + BLOCK_OP_TYPE_COMMIT_TARGET, + BLOCK_OP_TYPE_DATAPLANE, + BLOCK_OP_TYPE_DRIVE_DEL, + BLOCK_OP_TYPE_EJECT, + BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, + BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, + BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, + BLOCK_OP_TYPE_MIRROR_SOURCE, + BLOCK_OP_TYPE_MIRROR_TARGET, + BLOCK_OP_TYPE_RESIZE, + BLOCK_OP_TYPE_STREAM, + BLOCK_OP_TYPE_REPLACE, + BLOCK_OP_TYPE_MAX, +} BlockOpType; + +/* Block node permission constants */ +enum { + /** + * A user that has the "permission" of consistent reads is guaranteed that + * their view of the contents of the block device is complete and + * self-consistent, representing the contents of a disk at a specific + * point. + * + * For most block devices (including their backing files) this is true, but + * the property cannot be maintained in a few situations like for + * intermediate nodes of a commit block job. + */ + BLK_PERM_CONSISTENT_READ = 0x01, + + /** This permission is required to change the visible disk contents. */ + BLK_PERM_WRITE = 0x02, + + /** + * This permission (which is weaker than BLK_PERM_WRITE) is both enough and + * required for writes to the block node when the caller promises that + * the visible disk content doesn't change. + * + * As the BLK_PERM_WRITE permission is strictly stronger, either is + * sufficient to perform an unchanging write. + */ + BLK_PERM_WRITE_UNCHANGED = 0x04, + + /** This permission is required to change the size of a block node. */ + BLK_PERM_RESIZE = 0x08, + + /** + * There was a now-removed bit BLK_PERM_GRAPH_MOD, with value of 0x10. QEMU + * 6.1 and earlier may still lock the corresponding byte in block/file-posix + * locking. So, implementing some new permission should be very careful to + * not interfere with this old unused thing. + */ + + BLK_PERM_ALL = 0x0f, + + DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ + | BLK_PERM_WRITE + | BLK_PERM_WRITE_UNCHANGED + | BLK_PERM_RESIZE, + + DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH, +}; + +/* + * Flags that parent nodes assign to child nodes to specify what kind of + * role(s) they take. + * + * At least one of DATA, METADATA, FILTERED, or COW must be set for + * every child. + * + * + * = Connection with bs->children, bs->file and bs->backing fields = + * + * 1. Filters + * + * Filter drivers have drv->is_filter = true. + * + * Filter node has exactly one FILTERED|PRIMARY child, and may have other + * children which must not have these bits (one example is the + * copy-before-write filter, which also has its target DATA child). + * + * Filter nodes never have COW children. + * + * For most filters, the filtered child is linked in bs->file, bs->backing is + * NULL. For some filters (as an exception), it is the other way around; those + * drivers will have drv->filtered_child_is_backing set to true (see that + * field’s documentation for what drivers this concerns) + * + * 2. "raw" driver (block/raw-format.c) + * + * Formally it's not a filter (drv->is_filter = false) + * + * bs->backing is always NULL + * + * Only has one child, linked in bs->file. Its role is either FILTERED|PRIMARY + * (like filter) or DATA|PRIMARY depending on options. + * + * 3. Other drivers + * + * Don't have any FILTERED children. + * + * May have at most one COW child. In this case it's linked in bs->backing. + * Otherwise bs->backing is NULL. COW child is never PRIMARY. + * + * May have at most one PRIMARY child. In this case it's linked in bs->file. + * Otherwise bs->file is NULL. + * + * May also have some other children that don't have the PRIMARY or COW bit set. + */ +enum BdrvChildRoleBits { + /* + * This child stores data. + * Any node may have an arbitrary number of such children. + */ + BDRV_CHILD_DATA = (1 << 0), + + /* + * This child stores metadata. + * Any node may have an arbitrary number of metadata-storing + * children. + */ + BDRV_CHILD_METADATA = (1 << 1), + + /* + * A child that always presents exactly the same visible data as + * the parent, e.g. by virtue of the parent forwarding all reads + * and writes. + * This flag is mutually exclusive with DATA, METADATA, and COW. + * Any node may have at most one filtered child at a time. + */ + BDRV_CHILD_FILTERED = (1 << 2), + + /* + * Child from which to read all data that isn't allocated in the + * parent (i.e., the backing child); such data is copied to the + * parent through COW (and optionally COR). + * This field is mutually exclusive with DATA, METADATA, and + * FILTERED. + * Any node may have at most one such backing child at a time. + */ + BDRV_CHILD_COW = (1 << 3), + + /* + * The primary child. For most drivers, this is the child whose + * filename applies best to the parent node. + * Any node may have at most one primary child at a time. + */ + BDRV_CHILD_PRIMARY = (1 << 4), + + /* Useful combination of flags */ + BDRV_CHILD_IMAGE = BDRV_CHILD_DATA + | BDRV_CHILD_METADATA + | BDRV_CHILD_PRIMARY, +}; + +/* Mask of BdrvChildRoleBits values */ +typedef unsigned int BdrvChildRole; + +typedef struct BdrvCheckResult { + int corruptions; + int leaks; + int check_errors; + int corruptions_fixed; + int leaks_fixed; + int64_t image_end_offset; + BlockFragInfo bfi; +} BdrvCheckResult; + +typedef enum { + BDRV_FIX_LEAKS = 1, + BDRV_FIX_ERRORS = 2, +} BdrvCheckMode; + +typedef struct BlockSizes { + uint32_t phys; + uint32_t log; +} BlockSizes; + +typedef struct HDGeometry { + uint32_t heads; + uint32_t sectors; + uint32_t cylinders; +} HDGeometry; + +/* + * Common functions that are neither I/O nor Global State. + * + * These functions must never call any function from other categories + * (I/O, "I/O or GS", Global State) except this one, but can be invoked by + * all of them. + */ + +char *bdrv_perm_names(uint64_t perm); +uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm); + +void bdrv_init_with_whitelist(void); +bool bdrv_uses_whitelist(void); +int bdrv_is_whitelisted(BlockDriver *drv, bool read_only); + +int bdrv_parse_aio(const char *mode, int *flags); +int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); +int bdrv_parse_discard_flags(const char *mode, int *flags); + +int path_has_protocol(const char *path); +int path_is_absolute(const char *path); +char *path_combine(const char *base_path, const char *filename); + +char *bdrv_get_full_backing_filename_from_filename(const char *backed, + const char *backing, + Error **errp); + +#endif /* BLOCK_COMMON_H */ diff --git a/include/block/block-copy.h b/include/block/block-copy.h index 5c8278895c..ba0b425d78 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -25,19 +25,24 @@ typedef struct BlockCopyState BlockCopyState; typedef struct BlockCopyCallState BlockCopyCallState; BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, - int64_t cluster_size, bool use_copy_range, - BdrvRequestFlags write_flags, + const BdrvDirtyBitmap *bitmap, Error **errp); +/* Function should be called prior any actual copy request */ +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, + bool compress); void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm); void block_copy_state_free(BlockCopyState *s); +void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes); int64_t block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count); int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, - bool ignore_ratelimit); + bool ignore_ratelimit, uint64_t timeout_ns, + BlockCopyAsyncCallbackFunc cb, + void *cb_opaque); /* * Run block-copy in a coroutine, create corresponding BlockCopyCallState @@ -89,6 +94,7 @@ void block_copy_kick(BlockCopyCallState *call_state); void block_copy_call_cancel(BlockCopyCallState *call_state); BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s); +int64_t block_copy_cluster_size(BlockCopyState *s); void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip); #endif /* BLOCK_COPY_H */ diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h new file mode 100644 index 0000000000..c7bd4a2088 --- /dev/null +++ b/include/block/block-global-state.h @@ -0,0 +1,255 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_GLOBAL_STATE_H +#define BLOCK_GLOBAL_STATE_H + +#include "block-common.h" + +/* + * Global state (GS) API. These functions run under the BQL. + * + * If a function modifies the graph, it also uses drain and/or + * aio_context_acquire/release to be sure it has unique access. + * aio_context locking is needed together with BQL because of + * the thread-safe I/O API that concurrently runs and accesses + * the graph without the BQL. + * + * It is important to note that not all of these functions are + * necessarily limited to running under the BQL, but they would + * require additional auditing and many small thread-safety changes + * to move them into the I/O API. Often it's not worth doing that + * work since the APIs are only used with the BQL held at the + * moment, so they have been placed in the GS API (for now). + * + * These functions can call any function from this and other categories + * (I/O, "I/O or GS", Common), but must be invoked only by other GS APIs. + * + * All functions in this header must use the macro + * GLOBAL_STATE_CODE(); + * to catch when they are accidentally called without the BQL. + */ + +void bdrv_init(void); +BlockDriver *bdrv_find_protocol(const char *filename, + bool allow_protocol_prefix, + Error **errp); +BlockDriver *bdrv_find_format(const char *format_name); +int bdrv_create(BlockDriver *drv, const char* filename, + QemuOpts *opts, Error **errp); +int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); + +BlockDriverState *bdrv_new(void); +int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, + Error **errp); +int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, + Error **errp); +int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, + Error **errp); +BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, + int flags, Error **errp); +int bdrv_drop_filter(BlockDriverState *bs, Error **errp); + +BdrvChild *bdrv_open_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + bool allow_none, Error **errp); +int bdrv_open_file_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, Error **errp); +BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); +int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, + Error **errp); +int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, + const char *bdref_key, Error **errp); +BlockDriverState *bdrv_open(const char *filename, const char *reference, + QDict *options, int flags, Error **errp); +BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv, + const char *node_name, + QDict *options, int flags, + Error **errp); +BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, + int flags, Error **errp); +BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, + BlockDriverState *bs, QDict *options, + bool keep_old_opts); +void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue); +int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); +int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, + Error **errp); +int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, + Error **errp); +BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, + const char *backing_file); +void bdrv_refresh_filename(BlockDriverState *bs); +void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp); +int bdrv_commit(BlockDriverState *bs); +int bdrv_make_empty(BdrvChild *c, Error **errp); +int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, + const char *backing_fmt, bool warn); +void bdrv_register(BlockDriver *bdrv); +int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, + const char *backing_file_str); +BlockDriverState *bdrv_find_overlay(BlockDriverState *active, + BlockDriverState *bs); +BlockDriverState *bdrv_find_base(BlockDriverState *bs); +bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base, + Error **errp); +int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base, + Error **errp); +void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base); + +/* + * The units of offset and total_work_size may be chosen arbitrarily by the + * block driver; total_work_size may change during the course of the amendment + * operation + */ +typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, + int64_t total_work_size, void *opaque); +int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, + BlockDriverAmendStatusCB *status_cb, void *cb_opaque, + bool force, + Error **errp); + +/* check if a named node can be replaced when doing drive-mirror */ +BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, + const char *node_name, Error **errp); + +int bdrv_activate(BlockDriverState *bs, Error **errp); +void bdrv_activate_all(Error **errp); +int bdrv_inactivate_all(void); + +int bdrv_flush_all(void); +void bdrv_close_all(void); +void bdrv_drain_all_begin(void); +void bdrv_drain_all_end(void); +void bdrv_drain_all(void); + +int bdrv_has_zero_init_1(BlockDriverState *bs); +int bdrv_has_zero_init(BlockDriverState *bs); +BlockDriverState *bdrv_find_node(const char *node_name); +BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp); +XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp); +BlockDriverState *bdrv_lookup_bs(const char *device, + const char *node_name, + Error **errp); +bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); +BlockDriverState *bdrv_next_node(BlockDriverState *bs); +BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); + +typedef struct BdrvNextIterator { + enum { + BDRV_NEXT_BACKEND_ROOTS, + BDRV_NEXT_MONITOR_OWNED, + } phase; + BlockBackend *blk; + BlockDriverState *bs; +} BdrvNextIterator; + +BlockDriverState *bdrv_first(BdrvNextIterator *it); +BlockDriverState *bdrv_next(BdrvNextIterator *it); +void bdrv_next_cleanup(BdrvNextIterator *it); + +BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs); +void bdrv_iterate_format(void (*it)(void *opaque, const char *name), + void *opaque, bool read_only); +char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp); +char *bdrv_dirname(BlockDriverState *bs, Error **errp); + +void bdrv_img_create(const char *filename, const char *fmt, + const char *base_filename, const char *base_fmt, + char *options, uint64_t img_size, int flags, + bool quiet, Error **errp); + +void bdrv_ref(BlockDriverState *bs); +void bdrv_unref(BlockDriverState *bs); +void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); +BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, + BlockDriverState *child_bs, + const char *child_name, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + Error **errp); + +bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); +void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); +void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); +void bdrv_op_block_all(BlockDriverState *bs, Error *reason); +void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); +bool bdrv_op_blocker_is_empty(BlockDriverState *bs); + +int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, + const char *tag); +int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); +int bdrv_debug_resume(BlockDriverState *bs, const char *tag); +bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); + +/** + * Locks the AioContext of @bs if it's not the current AioContext. This avoids + * double locking which could lead to deadlocks: This is a coroutine_fn, so we + * know we already own the lock of the current AioContext. + * + * May only be called in the main thread. + */ +void coroutine_fn bdrv_co_lock(BlockDriverState *bs); + +/** + * Unlocks the AioContext of @bs if it's not the current AioContext. + */ +void coroutine_fn bdrv_co_unlock(BlockDriverState *bs); + +bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); +int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp); + +int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); +int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); + +void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, + Error **errp); +void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp); + +/** + * + * bdrv_register_buf/bdrv_unregister_buf: + * + * Register/unregister a buffer for I/O. For example, VFIO drivers are + * interested to know the memory areas that would later be used for I/O, so + * that they can prepare IOMMU mapping etc., to get better performance. + * + * Buffers must not overlap and they must be unregistered with the same values that they were registered with. + * + * Returns: true on success, false on failure + */ +bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, + Error **errp); +void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size); + +void bdrv_cancel_in_flight(BlockDriverState *bs); + +#endif /* BLOCK_GLOBAL_STATE_H */ diff --git a/include/block/block-hmp-cmds.h b/include/block/block-hmp-cmds.h index 3412e108ca..ba0593c440 100644 --- a/include/block/block-hmp-cmds.h +++ b/include/block/block-hmp-cmds.h @@ -12,8 +12,8 @@ * the COPYING file in the top-level directory. */ -#ifndef BLOCK_HMP_COMMANDS_H -#define BLOCK_HMP_COMMANDS_H +#ifndef BLOCK_BLOCK_HMP_CMDS_H +#define BLOCK_BLOCK_HMP_CMDS_H void hmp_drive_add(Monitor *mon, const QDict *qdict); @@ -38,7 +38,7 @@ void hmp_nbd_server_add(Monitor *mon, const QDict *qdict); void hmp_nbd_server_remove(Monitor *mon, const QDict *qdict); void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict); -void hmp_block_resize(Monitor *mon, const QDict *qdict); +void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict); void hmp_block_stream(Monitor *mon, const QDict *qdict); void hmp_block_passwd(Monitor *mon, const QDict *qdict); void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict); diff --git a/include/block/block-io.h b/include/block/block-io.h new file mode 100644 index 0000000000..b099d7db45 --- /dev/null +++ b/include/block/block-io.h @@ -0,0 +1,378 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_IO_H +#define BLOCK_IO_H + +#include "block-common.h" + +/* + * I/O API functions. These functions are thread-safe, and therefore + * can run in any thread as long as the thread has called + * aio_context_acquire/release(). + * + * These functions can only call functions from I/O and Common categories, + * but can be invoked by GS, "I/O or GS" and I/O APIs. + * + * All functions in this category must use the macro + * IO_CODE(); + * to catch when they are accidentally called by the wrong API. + */ + +int generated_co_wrapper bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, + int64_t bytes, + BdrvRequestFlags flags); +int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); +int generated_co_wrapper bdrv_pread(BdrvChild *child, int64_t offset, + int64_t bytes, void *buf, + BdrvRequestFlags flags); +int generated_co_wrapper bdrv_pwrite(BdrvChild *child, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags); +int generated_co_wrapper bdrv_pwrite_sync(BdrvChild *child, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags); +int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags); +/* + * Efficiently zero a region of the disk image. Note that this is a regular + * I/O request like read or write and should have a reasonable size. This + * function is not suitable for zeroing the entire image in a single request + * because it may allocate memory for the entire region. + */ +int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, + int64_t bytes, BdrvRequestFlags flags); + +int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, + PreallocMode prealloc, BdrvRequestFlags flags, + Error **errp); + +int64_t bdrv_nb_sectors(BlockDriverState *bs); +int64_t bdrv_getlength(BlockDriverState *bs); +int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); +BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, + BlockDriverState *in_bs, Error **errp); +void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); +int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp); +void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs); + + +/* async block I/O */ +void bdrv_aio_cancel(BlockAIOCB *acb); +void bdrv_aio_cancel_async(BlockAIOCB *acb); + +/* sg packet commands */ +int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); + +/* Ensure contents are flushed to disk. */ +int coroutine_fn bdrv_co_flush(BlockDriverState *bs); + +int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, + int64_t bytes); +bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); +int bdrv_block_status(BlockDriverState *bs, int64_t offset, + int64_t bytes, int64_t *pnum, int64_t *map, + BlockDriverState **file); +int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, + int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file); +int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, + int64_t *pnum); +int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, + bool include_base, int64_t offset, int64_t bytes, + int64_t *pnum); +int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, + int64_t bytes); + +int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, + bool ignore_allow_rdw, Error **errp); +int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg, + Error **errp); +bool bdrv_is_read_only(BlockDriverState *bs); +bool bdrv_is_writable(BlockDriverState *bs); +bool bdrv_is_sg(BlockDriverState *bs); +int bdrv_get_flags(BlockDriverState *bs); +bool bdrv_is_inserted(BlockDriverState *bs); +void bdrv_lock_medium(BlockDriverState *bs, bool locked); +void bdrv_eject(BlockDriverState *bs, bool eject_flag); +const char *bdrv_get_format_name(BlockDriverState *bs); + +bool bdrv_supports_compressed_writes(BlockDriverState *bs); +const char *bdrv_get_node_name(const BlockDriverState *bs); +const char *bdrv_get_device_name(const BlockDriverState *bs); +const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); +int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); +ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, + Error **errp); +BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs); +void bdrv_round_to_clusters(BlockDriverState *bs, + int64_t offset, int64_t bytes, + int64_t *cluster_offset, + int64_t *cluster_bytes); + +void bdrv_get_backing_filename(BlockDriverState *bs, + char *filename, int filename_size); + +int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, + int64_t pos, int size); + +int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, + int64_t pos, int size); + +/* + * Returns the alignment in bytes that is required so that no bounce buffer + * is required throughout the stack + */ +size_t bdrv_min_mem_align(BlockDriverState *bs); +/* Returns optimal alignment in bytes for bounce buffer */ +size_t bdrv_opt_mem_align(BlockDriverState *bs); +void *qemu_blockalign(BlockDriverState *bs, size_t size); +void *qemu_blockalign0(BlockDriverState *bs, size_t size); +void *qemu_try_blockalign(BlockDriverState *bs, size_t size); +void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); + +void bdrv_enable_copy_on_read(BlockDriverState *bs); +void bdrv_disable_copy_on_read(BlockDriverState *bs); + +void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); + +#define BLKDBG_EVENT(child, evt) \ + do { \ + if (child) { \ + bdrv_debug_event(child->bs, evt); \ + } \ + } while (0) + +/** + * bdrv_get_aio_context: + * + * Returns: the currently bound #AioContext + */ +AioContext *bdrv_get_aio_context(BlockDriverState *bs); + +AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c); + +/** + * Move the current coroutine to the AioContext of @bs and return the old + * AioContext of the coroutine. Increase bs->in_flight so that draining @bs + * will wait for the operation to proceed until the corresponding + * bdrv_co_leave(). + * + * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as + * this will deadlock. + */ +AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs); + +/** + * Ends a section started by bdrv_co_enter(). Move the current coroutine back + * to old_ctx and decrease bs->in_flight again. + */ +void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx); + +/** + * Transfer control to @co in the aio context of @bs + */ +void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co); + +AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c); + +void bdrv_io_plug(BlockDriverState *bs); +void bdrv_io_unplug(BlockDriverState *bs); + +bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, + uint32_t granularity, Error **errp); + +/** + * + * bdrv_co_copy_range: + * + * Do offloaded copy between two children. If the operation is not implemented + * by the driver, or if the backend storage doesn't support it, a negative + * error code will be returned. + * + * Note: block layer doesn't emulate or fallback to a bounce buffer approach + * because usually the caller shouldn't attempt offloaded copy any more (e.g. + * calling copy_file_range(2)) after the first error, thus it should fall back + * to a read+write path in the caller level. + * + * @src: Source child to copy data from + * @src_offset: offset in @src image to read data + * @dst: Destination child to copy data to + * @dst_offset: offset in @dst image to write data + * @bytes: number of bytes to copy + * @flags: request flags. Supported flags: + * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero + * write on @dst as if bdrv_co_pwrite_zeroes is + * called. Used to simplify caller code, or + * during BlockDriver.bdrv_co_copy_range_from() + * recursion. + * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping + * requests currently in flight. + * + * Returns: 0 if succeeded; negative error code if failed. + **/ +int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); + +/** + * bdrv_drained_end_no_poll: + * + * Same as bdrv_drained_end(), but do not poll for the subgraph to + * actually become unquiesced. Therefore, no graph changes will occur + * with this function. + * + * *drained_end_counter is incremented for every background operation + * that is scheduled, and will be decremented for every operation once + * it settles. The caller must poll until it reaches 0. The counter + * should be accessed using atomic operations only. + */ +void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter); + + +/* + * "I/O or GS" API functions. These functions can run without + * the BQL, but only in one specific iothread/main loop. + * + * More specifically, these functions use BDRV_POLL_WHILE(bs), which + * requires the caller to be either in the main thread and hold + * the BlockdriverState (bs) AioContext lock, or directly in the + * home thread that runs the bs AioContext. Calling them from + * another thread in another AioContext would cause deadlocks. + * + * Therefore, these functions are not proper I/O, because they + * can't run in *any* iothreads, but only in a specific one. + * + * These functions can call any function from I/O, Common and this + * categories, but must be invoked only by other "I/O or GS" and GS APIs. + * + * All functions in this category must use the macro + * IO_OR_GS_CODE(); + * to catch when they are accidentally called by the wrong API. + */ + +#define BDRV_POLL_WHILE(bs, cond) ({ \ + BlockDriverState *bs_ = (bs); \ + IO_OR_GS_CODE(); \ + AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \ + cond); }) + +void bdrv_drain(BlockDriverState *bs); + +int generated_co_wrapper +bdrv_truncate(BdrvChild *child, int64_t offset, bool exact, + PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); + +int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, + BdrvCheckMode fix); + +/* Invalidate any cached metadata used by image formats */ +int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs, + Error **errp); +int generated_co_wrapper bdrv_flush(BlockDriverState *bs); +int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset, + int64_t bytes); +int generated_co_wrapper +bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); +int generated_co_wrapper +bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); + +/** + * bdrv_parent_drained_begin_single: + * + * Begin a quiesced section for the parent of @c. If @poll is true, wait for + * any pending activity to cease. + */ +void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); + +/** + * bdrv_parent_drained_end_single: + * + * End a quiesced section for the parent of @c. + * + * This polls @bs's AioContext until all scheduled sub-drained_ends + * have settled, which may result in graph changes. + */ +void bdrv_parent_drained_end_single(BdrvChild *c); + +/** + * bdrv_drain_poll: + * + * Poll for pending requests in @bs, its parents (except for @ignore_parent), + * and if @recursive is true its children as well (used for subtree drain). + * + * If @ignore_bds_parents is true, parents that are BlockDriverStates must + * ignore the drain request because they will be drained separately (used for + * drain_all). + * + * This is part of bdrv_drained_begin. + */ +bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, + BdrvChild *ignore_parent, bool ignore_bds_parents); + +/** + * bdrv_drained_begin: + * + * Begin a quiesced section for exclusive access to the BDS, by disabling + * external request sources including NBD server, block jobs, and device model. + * + * This function can be recursive. + */ +void bdrv_drained_begin(BlockDriverState *bs); + +/** + * bdrv_do_drained_begin_quiesce: + * + * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already + * running requests to complete. + */ +void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, + BdrvChild *parent, bool ignore_bds_parents); + +/** + * Like bdrv_drained_begin, but recursively begins a quiesced section for + * exclusive access to all child nodes as well. + */ +void bdrv_subtree_drained_begin(BlockDriverState *bs); + +/** + * bdrv_drained_end: + * + * End a quiescent section started by bdrv_drained_begin(). + * + * This polls @bs's AioContext until all scheduled sub-drained_ends + * have settled. On one hand, that may result in graph changes. On + * the other, this requires that the caller either runs in the main + * loop; or that all involved nodes (@bs and all of its parents) are + * in the caller's AioContext. + */ +void bdrv_drained_end(BlockDriverState *bs); + +/** + * End a quiescent section started by bdrv_subtree_drained_begin(). + */ +void bdrv_subtree_drained_end(BlockDriverState *bs); + +#endif /* BLOCK_IO_H */ diff --git a/include/block/block.h b/include/block/block.h index eccd27e884..1e6b8fef1e 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -1,864 +1,32 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #ifndef BLOCK_H #define BLOCK_H -#include "block/aio.h" -#include "block/aio-wait.h" -#include "qemu/iov.h" -#include "qemu/coroutine.h" -#include "block/accounting.h" -#include "block/dirty-bitmap.h" -#include "block/blockjob.h" -#include "qemu/hbitmap.h" -#include "qemu/transactions.h" +#include "block-global-state.h" +#include "block-io.h" -/* - * generated_co_wrapper - * - * Function specifier, which does nothing but mark functions to be - * generated by scripts/block-coroutine-wrapper.py - * - * Read more in docs/devel/block-coroutine-wrapper.rst - */ -#define generated_co_wrapper +/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */ -/* block.c */ -typedef struct BlockDriver BlockDriver; -typedef struct BdrvChild BdrvChild; -typedef struct BdrvChildClass BdrvChildClass; - -typedef struct BlockDriverInfo { - /* in bytes, 0 if irrelevant */ - int cluster_size; - /* offset at which the VM state can be saved (0 if not possible) */ - int64_t vm_state_offset; - bool is_dirty; - /* - * True if this block driver only supports compressed writes - */ - bool needs_compressed_writes; -} BlockDriverInfo; - -typedef struct BlockFragInfo { - uint64_t allocated_clusters; - uint64_t total_clusters; - uint64_t fragmented_clusters; - uint64_t compressed_clusters; -} BlockFragInfo; - -typedef enum { - BDRV_REQ_COPY_ON_READ = 0x1, - BDRV_REQ_ZERO_WRITE = 0x2, - - /* - * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate - * that the block driver should unmap (discard) blocks if it is guaranteed - * that the result will read back as zeroes. The flag is only passed to the - * driver if the block device is opened with BDRV_O_UNMAP. - */ - BDRV_REQ_MAY_UNMAP = 0x4, - - BDRV_REQ_FUA = 0x10, - BDRV_REQ_WRITE_COMPRESSED = 0x20, - - /* Signifies that this write request will not change the visible disk - * content. */ - BDRV_REQ_WRITE_UNCHANGED = 0x40, - - /* Forces request serialisation. Use only with write requests. */ - BDRV_REQ_SERIALISING = 0x80, - - /* Execute the request only if the operation can be offloaded or otherwise - * be executed efficiently, but return an error instead of using a slow - * fallback. */ - BDRV_REQ_NO_FALLBACK = 0x100, - - /* - * BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read - * (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR - * filter is involved), in which case it signals that the COR operation - * need not read the data into memory (qiov) but only ensure they are - * copied to the top layer (i.e., that COR operation is done). - */ - BDRV_REQ_PREFETCH = 0x200, - - /* - * If we need to wait for other requests, just fail immediately. Used - * only together with BDRV_REQ_SERIALISING. - */ - BDRV_REQ_NO_WAIT = 0x400, - - /* Mask of valid flags */ - BDRV_REQ_MASK = 0x7ff, -} BdrvRequestFlags; - -typedef struct BlockSizes { - uint32_t phys; - uint32_t log; -} BlockSizes; - -typedef struct HDGeometry { - uint32_t heads; - uint32_t sectors; - uint32_t cylinders; -} HDGeometry; - -#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */ -#define BDRV_O_RDWR 0x0002 -#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ -#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ -#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ -#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ -#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ -#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ -#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ -#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ -#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */ -#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ -#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ -#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ -#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: - select an appropriate protocol driver, - ignoring the format layer */ -#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ -#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */ -#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */ - -#ifdef XBOX -#define BDRV_O_RO_WRITE_SHARE 0x80000 /* allow the file to open RO alongside an existing RW handle */ -#endif - -#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) - - -/* Option names of options parsed by the block layer */ - -#define BDRV_OPT_CACHE_WB "cache.writeback" -#define BDRV_OPT_CACHE_DIRECT "cache.direct" -#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" -#define BDRV_OPT_READ_ONLY "read-only" -#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only" -#define BDRV_OPT_DISCARD "discard" -#define BDRV_OPT_FORCE_SHARE "force-share" - - -#define BDRV_SECTOR_BITS 9 -#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) - -#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \ - INT_MAX >> BDRV_SECTOR_BITS) -#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) - -/* - * We want allow aligning requests and disk length up to any 32bit alignment - * and don't afraid of overflow. - * To achieve it, and in the same time use some pretty number as maximum disk - * size, let's define maximum "length" (a limit for any offset/bytes request and - * for disk size) to be the greatest power of 2 less than INT64_MAX. - */ -#define BDRV_MAX_ALIGNMENT (1L << 30) -#define BDRV_MAX_LENGTH (QEMU_ALIGN_DOWN(INT64_MAX, BDRV_MAX_ALIGNMENT)) - -/* - * Allocation status flags for bdrv_block_status() and friends. - * - * Public flags: - * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer - * BDRV_BLOCK_ZERO: offset reads as zero - * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data - * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this - * layer rather than any backing, set by block layer - * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this - * layer, set by block layer - * - * Internal flags: - * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request - * that the block layer recompute the answer from the returned - * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID. - * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for - * zeroes in file child of current block node inside - * returned region. Only valid together with both - * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not - * appear with BDRV_BLOCK_ZERO. - * - * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the - * host offset within the returned BDS that is allocated for the - * corresponding raw guest data. However, whether that offset - * actually contains data also depends on BDRV_BLOCK_DATA, as follows: - * - * DATA ZERO OFFSET_VALID - * t t t sectors read as zero, returned file is zero at offset - * t f t sectors read as valid from file at offset - * f t t sectors preallocated, read as zero, returned file not - * necessarily zero at offset - * f f t sectors preallocated but read from backing_hd, - * returned file contains garbage at offset - * t t f sectors preallocated, read as zero, unknown offset - * t f f sectors read from unknown file or offset - * f t f not allocated or unknown offset, read as zero - * f f f not allocated or unknown offset, read from backing_hd - */ -#define BDRV_BLOCK_DATA 0x01 -#define BDRV_BLOCK_ZERO 0x02 -#define BDRV_BLOCK_OFFSET_VALID 0x04 -#define BDRV_BLOCK_RAW 0x08 -#define BDRV_BLOCK_ALLOCATED 0x10 -#define BDRV_BLOCK_EOF 0x20 -#define BDRV_BLOCK_RECURSE 0x40 - -typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; - -typedef struct BDRVReopenState { - BlockDriverState *bs; - int flags; - BlockdevDetectZeroesOptions detect_zeroes; - bool backing_missing; - BlockDriverState *old_backing_bs; /* keep pointer for permissions update */ - BlockDriverState *old_file_bs; /* keep pointer for permissions update */ - QDict *options; - QDict *explicit_options; - void *opaque; -} BDRVReopenState; - -/* - * Block operation types - */ -typedef enum BlockOpType { - BLOCK_OP_TYPE_BACKUP_SOURCE, - BLOCK_OP_TYPE_BACKUP_TARGET, - BLOCK_OP_TYPE_CHANGE, - BLOCK_OP_TYPE_COMMIT_SOURCE, - BLOCK_OP_TYPE_COMMIT_TARGET, - BLOCK_OP_TYPE_DATAPLANE, - BLOCK_OP_TYPE_DRIVE_DEL, - BLOCK_OP_TYPE_EJECT, - BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, - BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, - BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, - BLOCK_OP_TYPE_MIRROR_SOURCE, - BLOCK_OP_TYPE_MIRROR_TARGET, - BLOCK_OP_TYPE_RESIZE, - BLOCK_OP_TYPE_STREAM, - BLOCK_OP_TYPE_REPLACE, - BLOCK_OP_TYPE_MAX, -} BlockOpType; - -/* Block node permission constants */ -enum { - /** - * A user that has the "permission" of consistent reads is guaranteed that - * their view of the contents of the block device is complete and - * self-consistent, representing the contents of a disk at a specific - * point. - * - * For most block devices (including their backing files) this is true, but - * the property cannot be maintained in a few situations like for - * intermediate nodes of a commit block job. - */ - BLK_PERM_CONSISTENT_READ = 0x01, - - /** This permission is required to change the visible disk contents. */ - BLK_PERM_WRITE = 0x02, - - /** - * This permission (which is weaker than BLK_PERM_WRITE) is both enough and - * required for writes to the block node when the caller promises that - * the visible disk content doesn't change. - * - * As the BLK_PERM_WRITE permission is strictly stronger, either is - * sufficient to perform an unchanging write. - */ - BLK_PERM_WRITE_UNCHANGED = 0x04, - - /** This permission is required to change the size of a block node. */ - BLK_PERM_RESIZE = 0x08, - - /** - * This permission is required to change the node that this BdrvChild - * points to. - */ - BLK_PERM_GRAPH_MOD = 0x10, - - BLK_PERM_ALL = 0x1f, - - DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ - | BLK_PERM_WRITE - | BLK_PERM_WRITE_UNCHANGED - | BLK_PERM_RESIZE, - - DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH, -}; - -/* - * Flags that parent nodes assign to child nodes to specify what kind of - * role(s) they take. - * - * At least one of DATA, METADATA, FILTERED, or COW must be set for - * every child. - */ -enum BdrvChildRoleBits { - /* - * This child stores data. - * Any node may have an arbitrary number of such children. - */ - BDRV_CHILD_DATA = (1 << 0), - - /* - * This child stores metadata. - * Any node may have an arbitrary number of metadata-storing - * children. - */ - BDRV_CHILD_METADATA = (1 << 1), - - /* - * A child that always presents exactly the same visible data as - * the parent, e.g. by virtue of the parent forwarding all reads - * and writes. - * This flag is mutually exclusive with DATA, METADATA, and COW. - * Any node may have at most one filtered child at a time. - */ - BDRV_CHILD_FILTERED = (1 << 2), - - /* - * Child from which to read all data that isn't allocated in the - * parent (i.e., the backing child); such data is copied to the - * parent through COW (and optionally COR). - * This field is mutually exclusive with DATA, METADATA, and - * FILTERED. - * Any node may have at most one such backing child at a time. - */ - BDRV_CHILD_COW = (1 << 3), - - /* - * The primary child. For most drivers, this is the child whose - * filename applies best to the parent node. - * Any node may have at most one primary child at a time. - */ - BDRV_CHILD_PRIMARY = (1 << 4), - - /* Useful combination of flags */ - BDRV_CHILD_IMAGE = BDRV_CHILD_DATA - | BDRV_CHILD_METADATA - | BDRV_CHILD_PRIMARY, -}; - -/* Mask of BdrvChildRoleBits values */ -typedef unsigned int BdrvChildRole; - -char *bdrv_perm_names(uint64_t perm); -uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm); - -/* disk I/O throttling */ -void bdrv_init(void); -void bdrv_init_with_whitelist(void); -bool bdrv_uses_whitelist(void); -int bdrv_is_whitelisted(BlockDriver *drv, bool read_only); -BlockDriver *bdrv_find_protocol(const char *filename, - bool allow_protocol_prefix, - Error **errp); -BlockDriver *bdrv_find_format(const char *format_name); -int bdrv_create(BlockDriver *drv, const char* filename, - QemuOpts *opts, Error **errp); -int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); - -BlockDriverState *bdrv_new(void); -int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, - Error **errp); -int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, - Error **errp); -BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, - int flags, Error **errp); -int bdrv_drop_filter(BlockDriverState *bs, Error **errp); - -int bdrv_parse_aio(const char *mode, int *flags); -int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); -int bdrv_parse_discard_flags(const char *mode, int *flags); -BdrvChild *bdrv_open_child(const char *filename, - QDict *options, const char *bdref_key, - BlockDriverState* parent, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - bool allow_none, Error **errp); -BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); -int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, - Error **errp); -int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, - const char *bdref_key, Error **errp); -BlockDriverState *bdrv_open(const char *filename, const char *reference, - QDict *options, int flags, Error **errp); -BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, - int flags, Error **errp); -BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, - BlockDriverState *bs, QDict *options, - bool keep_old_opts); -void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue); -int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); -int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, - Error **errp); -int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, - Error **errp); -int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, - int64_t bytes, BdrvRequestFlags flags); -int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); -int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes); -int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, - int64_t bytes); -int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, - const void *buf, int64_t bytes); -/* - * Efficiently zero a region of the disk image. Note that this is a regular - * I/O request like read or write and should have a reasonable size. This - * function is not suitable for zeroing the entire image in a single request - * because it may allocate memory for the entire region. - */ -int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, - int64_t bytes, BdrvRequestFlags flags); -BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, - const char *backing_file); -void bdrv_refresh_filename(BlockDriverState *bs); - -int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, - PreallocMode prealloc, BdrvRequestFlags flags, - Error **errp); -int generated_co_wrapper -bdrv_truncate(BdrvChild *child, int64_t offset, bool exact, - PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); - -int64_t bdrv_nb_sectors(BlockDriverState *bs); -int64_t bdrv_getlength(BlockDriverState *bs); -int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); -BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, - BlockDriverState *in_bs, Error **errp); -void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); -void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp); -int bdrv_commit(BlockDriverState *bs); -int bdrv_make_empty(BdrvChild *c, Error **errp); -int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, - const char *backing_fmt, bool warn); -void bdrv_register(BlockDriver *bdrv); -int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, - const char *backing_file_str); -BlockDriverState *bdrv_find_overlay(BlockDriverState *active, - BlockDriverState *bs); -BlockDriverState *bdrv_find_base(BlockDriverState *bs); -bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base, - Error **errp); -int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base, - Error **errp); -void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base); -int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp); -void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs); - - -typedef struct BdrvCheckResult { - int corruptions; - int leaks; - int check_errors; - int corruptions_fixed; - int leaks_fixed; - int64_t image_end_offset; - BlockFragInfo bfi; -} BdrvCheckResult; - -typedef enum { - BDRV_FIX_LEAKS = 1, - BDRV_FIX_ERRORS = 2, -} BdrvCheckMode; - -int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, - BdrvCheckMode fix); - -/* The units of offset and total_work_size may be chosen arbitrarily by the - * block driver; total_work_size may change during the course of the amendment - * operation */ -typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, - int64_t total_work_size, void *opaque); -int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, - BlockDriverAmendStatusCB *status_cb, void *cb_opaque, - bool force, - Error **errp); - -/* check if a named node can be replaced when doing drive-mirror */ -BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, - const char *node_name, Error **errp); - -/* async block I/O */ -void bdrv_aio_cancel(BlockAIOCB *acb); -void bdrv_aio_cancel_async(BlockAIOCB *acb); - -/* sg packet commands */ -int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); - -/* Invalidate any cached metadata used by image formats */ -int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs, - Error **errp); -void bdrv_invalidate_cache_all(Error **errp); -int bdrv_inactivate_all(void); - -/* Ensure contents are flushed to disk. */ -int generated_co_wrapper bdrv_flush(BlockDriverState *bs); -int coroutine_fn bdrv_co_flush(BlockDriverState *bs); -int bdrv_flush_all(void); -void bdrv_close_all(void); -void bdrv_drain(BlockDriverState *bs); -void coroutine_fn bdrv_co_drain(BlockDriverState *bs); -void bdrv_drain_all_begin(void); -void bdrv_drain_all_end(void); -void bdrv_drain_all(void); - -#define BDRV_POLL_WHILE(bs, cond) ({ \ - BlockDriverState *bs_ = (bs); \ - AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \ - cond); }) - -int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset, - int64_t bytes); -int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes); -int bdrv_has_zero_init_1(BlockDriverState *bs); -int bdrv_has_zero_init(BlockDriverState *bs); -bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); -int bdrv_block_status(BlockDriverState *bs, int64_t offset, - int64_t bytes, int64_t *pnum, int64_t *map, - BlockDriverState **file); -int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, - int64_t offset, int64_t bytes, int64_t *pnum, - int64_t *map, BlockDriverState **file); -int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, - int64_t *pnum); -int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, - bool include_base, int64_t offset, int64_t bytes, - int64_t *pnum); -int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, - int64_t bytes); - -bool bdrv_is_read_only(BlockDriverState *bs); -int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, - bool ignore_allow_rdw, Error **errp); -int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg, - Error **errp); -bool bdrv_is_writable(BlockDriverState *bs); -bool bdrv_is_sg(BlockDriverState *bs); -bool bdrv_is_inserted(BlockDriverState *bs); -void bdrv_lock_medium(BlockDriverState *bs, bool locked); -void bdrv_eject(BlockDriverState *bs, bool eject_flag); -const char *bdrv_get_format_name(BlockDriverState *bs); -BlockDriverState *bdrv_find_node(const char *node_name); -BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp); -XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp); -BlockDriverState *bdrv_lookup_bs(const char *device, - const char *node_name, - Error **errp); -bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); -BlockDriverState *bdrv_next_node(BlockDriverState *bs); -BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); - -typedef struct BdrvNextIterator { - enum { - BDRV_NEXT_BACKEND_ROOTS, - BDRV_NEXT_MONITOR_OWNED, - } phase; - BlockBackend *blk; - BlockDriverState *bs; -} BdrvNextIterator; - -BlockDriverState *bdrv_first(BdrvNextIterator *it); -BlockDriverState *bdrv_next(BdrvNextIterator *it); -void bdrv_next_cleanup(BdrvNextIterator *it); - -BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs); -bool bdrv_supports_compressed_writes(BlockDriverState *bs); -void bdrv_iterate_format(void (*it)(void *opaque, const char *name), - void *opaque, bool read_only); -const char *bdrv_get_node_name(const BlockDriverState *bs); -const char *bdrv_get_device_name(const BlockDriverState *bs); -const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); -int bdrv_get_flags(BlockDriverState *bs); -int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); -ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, - Error **errp); -BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs); -void bdrv_round_to_clusters(BlockDriverState *bs, - int64_t offset, int64_t bytes, - int64_t *cluster_offset, - int64_t *cluster_bytes); - -void bdrv_get_backing_filename(BlockDriverState *bs, - char *filename, int filename_size); -char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp); -char *bdrv_get_full_backing_filename_from_filename(const char *backed, - const char *backing, - Error **errp); -char *bdrv_dirname(BlockDriverState *bs, Error **errp); - -int path_has_protocol(const char *path); -int path_is_absolute(const char *path); -char *path_combine(const char *base_path, const char *filename); - -int generated_co_wrapper -bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); -int generated_co_wrapper -bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); -int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, - int64_t pos, int size); - -int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, - int64_t pos, int size); - -void bdrv_img_create(const char *filename, const char *fmt, - const char *base_filename, const char *base_fmt, - char *options, uint64_t img_size, int flags, - bool quiet, Error **errp); - -/* Returns the alignment in bytes that is required so that no bounce buffer - * is required throughout the stack */ -size_t bdrv_min_mem_align(BlockDriverState *bs); -/* Returns optimal alignment in bytes for bounce buffer */ -size_t bdrv_opt_mem_align(BlockDriverState *bs); -void *qemu_blockalign(BlockDriverState *bs, size_t size); -void *qemu_blockalign0(BlockDriverState *bs, size_t size); -void *qemu_try_blockalign(BlockDriverState *bs, size_t size); -void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); -bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); - -void bdrv_enable_copy_on_read(BlockDriverState *bs); -void bdrv_disable_copy_on_read(BlockDriverState *bs); - -void bdrv_ref(BlockDriverState *bs); -void bdrv_unref(BlockDriverState *bs); -void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); -BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, - BlockDriverState *child_bs, - const char *child_name, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - Error **errp); - -bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); -void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); -void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); -void bdrv_op_block_all(BlockDriverState *bs, Error *reason); -void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); -bool bdrv_op_blocker_is_empty(BlockDriverState *bs); - -#define BLKDBG_EVENT(child, evt) \ - do { \ - if (child) { \ - bdrv_debug_event(child->bs, evt); \ - } \ - } while (0) - -void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); - -int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, - const char *tag); -int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); -int bdrv_debug_resume(BlockDriverState *bs, const char *tag); -bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); - -/** - * bdrv_get_aio_context: - * - * Returns: the currently bound #AioContext - */ -AioContext *bdrv_get_aio_context(BlockDriverState *bs); - -/** - * Move the current coroutine to the AioContext of @bs and return the old - * AioContext of the coroutine. Increase bs->in_flight so that draining @bs - * will wait for the operation to proceed until the corresponding - * bdrv_co_leave(). - * - * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as - * this will deadlock. - */ -AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs); - -/** - * Ends a section started by bdrv_co_enter(). Move the current coroutine back - * to old_ctx and decrease bs->in_flight again. - */ -void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx); - -/** - * Locks the AioContext of @bs if it's not the current AioContext. This avoids - * double locking which could lead to deadlocks: This is a coroutine_fn, so we - * know we already own the lock of the current AioContext. - * - * May only be called in the main thread. - */ -void coroutine_fn bdrv_co_lock(BlockDriverState *bs); - -/** - * Unlocks the AioContext of @bs if it's not the current AioContext. - */ -void coroutine_fn bdrv_co_unlock(BlockDriverState *bs); - -/** - * Transfer control to @co in the aio context of @bs - */ -void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co); - -void bdrv_set_aio_context_ignore(BlockDriverState *bs, - AioContext *new_context, GSList **ignore); -int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, - Error **errp); -int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp); -bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx, - GSList **ignore, Error **errp); -bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx, - GSList **ignore, Error **errp); -AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c); -AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c); - -int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); -int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); - -void bdrv_io_plug(BlockDriverState *bs); -void bdrv_io_unplug(BlockDriverState *bs); - -/** - * bdrv_parent_drained_begin_single: - * - * Begin a quiesced section for the parent of @c. If @poll is true, wait for - * any pending activity to cease. - */ -void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); - -/** - * bdrv_parent_drained_end_single: - * - * End a quiesced section for the parent of @c. - * - * This polls @bs's AioContext until all scheduled sub-drained_ends - * have settled, which may result in graph changes. - */ -void bdrv_parent_drained_end_single(BdrvChild *c); - -/** - * bdrv_drain_poll: - * - * Poll for pending requests in @bs, its parents (except for @ignore_parent), - * and if @recursive is true its children as well (used for subtree drain). - * - * If @ignore_bds_parents is true, parents that are BlockDriverStates must - * ignore the drain request because they will be drained separately (used for - * drain_all). - * - * This is part of bdrv_drained_begin. - */ -bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, - BdrvChild *ignore_parent, bool ignore_bds_parents); - -/** - * bdrv_drained_begin: - * - * Begin a quiesced section for exclusive access to the BDS, by disabling - * external request sources including NBD server and device model. Note that - * this doesn't block timers or coroutines from submitting more requests, which - * means block_job_pause is still necessary. - * - * This function can be recursive. - */ -void bdrv_drained_begin(BlockDriverState *bs); - -/** - * bdrv_do_drained_begin_quiesce: - * - * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already - * running requests to complete. - */ -void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, - BdrvChild *parent, bool ignore_bds_parents); - -/** - * Like bdrv_drained_begin, but recursively begins a quiesced section for - * exclusive access to all child nodes as well. - */ -void bdrv_subtree_drained_begin(BlockDriverState *bs); - -/** - * bdrv_drained_end: - * - * End a quiescent section started by bdrv_drained_begin(). - * - * This polls @bs's AioContext until all scheduled sub-drained_ends - * have settled. On one hand, that may result in graph changes. On - * the other, this requires that the caller either runs in the main - * loop; or that all involved nodes (@bs and all of its parents) are - * in the caller's AioContext. - */ -void bdrv_drained_end(BlockDriverState *bs); - -/** - * bdrv_drained_end_no_poll: - * - * Same as bdrv_drained_end(), but do not poll for the subgraph to - * actually become unquiesced. Therefore, no graph changes will occur - * with this function. - * - * *drained_end_counter is incremented for every background operation - * that is scheduled, and will be decremented for every operation once - * it settles. The caller must poll until it reaches 0. The counter - * should be accessed using atomic operations only. - */ -void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter); - -/** - * End a quiescent section started by bdrv_subtree_drained_begin(). - */ -void bdrv_subtree_drained_end(BlockDriverState *bs); - -void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, - Error **errp); -void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp); - -bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, - uint32_t granularity, Error **errp); -/** - * - * bdrv_register_buf/bdrv_unregister_buf: - * - * Register/unregister a buffer for I/O. For example, VFIO drivers are - * interested to know the memory areas that would later be used for I/O, so - * that they can prepare IOMMU mapping etc., to get better performance. - */ -void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size); -void bdrv_unregister_buf(BlockDriverState *bs, void *host); - -/** - * - * bdrv_co_copy_range: - * - * Do offloaded copy between two children. If the operation is not implemented - * by the driver, or if the backend storage doesn't support it, a negative - * error code will be returned. - * - * Note: block layer doesn't emulate or fallback to a bounce buffer approach - * because usually the caller shouldn't attempt offloaded copy any more (e.g. - * calling copy_file_range(2)) after the first error, thus it should fall back - * to a read+write path in the caller level. - * - * @src: Source child to copy data from - * @src_offset: offset in @src image to read data - * @dst: Destination child to copy data to - * @dst_offset: offset in @dst image to write data - * @bytes: number of bytes to copy - * @flags: request flags. Supported flags: - * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero - * write on @dst as if bdrv_co_pwrite_zeroes is - * called. Used to simplify caller code, or - * during BlockDriver.bdrv_co_copy_range_from() - * recursion. - * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping - * requests currently in flight. - * - * Returns: 0 if succeeded; negative error code if failed. - **/ -int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, - BdrvChild *dst, int64_t dst_offset, - int64_t bytes, BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); - -void bdrv_cancel_in_flight(BlockDriverState *bs); - -#endif +#endif /* BLOCK_H */ diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h new file mode 100644 index 0000000000..31ae91e56e --- /dev/null +++ b/include/block/block_int-common.h @@ -0,0 +1,1270 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_INT_COMMON_H +#define BLOCK_INT_COMMON_H + +#include "block/accounting.h" +#include "block/block.h" +#include "block/aio-wait.h" +#include "qemu/queue.h" +#include "qemu/coroutine.h" +#include "qemu/stats64.h" +#include "qemu/timer.h" +#include "qemu/hbitmap.h" +#include "block/snapshot.h" +#include "qemu/throttle.h" +#include "qemu/rcu.h" + +#define BLOCK_FLAG_LAZY_REFCOUNTS 8 + +#define BLOCK_OPT_SIZE "size" +#define BLOCK_OPT_ENCRYPT "encryption" +#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format" +#define BLOCK_OPT_COMPAT6 "compat6" +#define BLOCK_OPT_HWVERSION "hwversion" +#define BLOCK_OPT_BACKING_FILE "backing_file" +#define BLOCK_OPT_BACKING_FMT "backing_fmt" +#define BLOCK_OPT_CLUSTER_SIZE "cluster_size" +#define BLOCK_OPT_TABLE_SIZE "table_size" +#define BLOCK_OPT_PREALLOC "preallocation" +#define BLOCK_OPT_SUBFMT "subformat" +#define BLOCK_OPT_COMPAT_LEVEL "compat" +#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" +#define BLOCK_OPT_ADAPTER_TYPE "adapter_type" +#define BLOCK_OPT_REDUNDANCY "redundancy" +#define BLOCK_OPT_NOCOW "nocow" +#define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint" +#define BLOCK_OPT_OBJECT_SIZE "object_size" +#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" +#define BLOCK_OPT_DATA_FILE "data_file" +#define BLOCK_OPT_DATA_FILE_RAW "data_file_raw" +#define BLOCK_OPT_COMPRESSION_TYPE "compression_type" +#define BLOCK_OPT_EXTL2 "extended_l2" + +#define BLOCK_PROBE_BUF_SIZE 512 + +enum BdrvTrackedRequestType { + BDRV_TRACKED_READ, + BDRV_TRACKED_WRITE, + BDRV_TRACKED_DISCARD, + BDRV_TRACKED_TRUNCATE, +}; + +/* + * That is not quite good that BdrvTrackedRequest structure is public, + * as block/io.c is very careful about incoming offset/bytes being + * correct. Be sure to assert bdrv_check_request() succeeded after any + * modification of BdrvTrackedRequest object out of block/io.c + */ +typedef struct BdrvTrackedRequest { + BlockDriverState *bs; + int64_t offset; + int64_t bytes; + enum BdrvTrackedRequestType type; + + bool serialising; + int64_t overlap_offset; + int64_t overlap_bytes; + + QLIST_ENTRY(BdrvTrackedRequest) list; + Coroutine *co; /* owner, used for deadlock detection */ + CoQueue wait_queue; /* coroutines blocked on this request */ + + struct BdrvTrackedRequest *waiting_for; +} BdrvTrackedRequest; + + +struct BlockDriver { + /* + * These fields are initialized when this object is created, + * and are never changed afterwards. + */ + + const char *format_name; + int instance_size; + + /* + * Set to true if the BlockDriver is a block filter. Block filters pass + * certain callbacks that refer to data (see block.c) to their bs->file + * or bs->backing (whichever one exists) if the driver doesn't implement + * them. Drivers that do not wish to forward must implement them and return + * -ENOTSUP. + * Note that filters are not allowed to modify data. + * + * Filters generally cannot have more than a single filtered child, + * because the data they present must at all times be the same as + * that on their filtered child. That would be impossible to + * achieve for multiple filtered children. + * (And this filtered child must then be bs->file or bs->backing.) + */ + bool is_filter; + /* + * Only make sense for filter drivers, for others must be false. + * If true, filtered child is bs->backing. Otherwise it's bs->file. + * Two internal filters use bs->backing as filtered child and has this + * field set to true: mirror_top and commit_top. There also two such test + * filters in tests/unit/test-bdrv-graph-mod.c. + * + * Never create any more such filters! + * + * TODO: imagine how to deprecate this behavior and make all filters work + * similarly using bs->file as filtered child. + */ + bool filtered_child_is_backing; + + /* + * Set to true if the BlockDriver is a format driver. Format nodes + * generally do not expect their children to be other format nodes + * (except for backing files), and so format probing is disabled + * on those children. + */ + bool is_format; + + /* + * Drivers not implementing bdrv_parse_filename nor bdrv_open should have + * this field set to true, except ones that are defined only by their + * child's bs. + * An example of the last type will be the quorum block driver. + */ + bool bdrv_needs_filename; + + /* + * Set if a driver can support backing files. This also implies the + * following semantics: + * + * - Return status 0 of .bdrv_co_block_status means that corresponding + * blocks are not allocated in this layer of backing-chain + * - For such (unallocated) blocks, read will: + * - fill buffer with zeros if there is no backing file + * - read from the backing file otherwise, where the block layer + * takes care of reading zeros beyond EOF if backing file is short + */ + bool supports_backing; + + bool has_variable_length; + + /* + * Drivers setting this field must be able to work with just a plain + * filename with ':' as a prefix, and no other options. + * Options may be extracted from the filename by implementing + * bdrv_parse_filename. + */ + const char *protocol_name; + + /* List of options for creating images, terminated by name == NULL */ + QemuOptsList *create_opts; + + /* List of options for image amend */ + QemuOptsList *amend_opts; + + /* + * If this driver supports reopening images this contains a + * NULL-terminated list of the runtime options that can be + * modified. If an option in this list is unspecified during + * reopen then it _must_ be reset to its default value or return + * an error. + */ + const char *const *mutable_opts; + + /* + * Pointer to a NULL-terminated array of names of strong options + * that can be specified for bdrv_open(). A strong option is one + * that changes the data of a BDS. + * If this pointer is NULL, the array is considered empty. + * "filename" and "driver" are always considered strong. + */ + const char *const *strong_runtime_opts; + + + /* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + + /* + * This function is invoked under BQL before .bdrv_co_amend() + * (which in contrast does not necessarily run under the BQL) + * to allow driver-specific initialization code that requires + * the BQL, like setting up specific permission flags. + */ + int (*bdrv_amend_pre_run)(BlockDriverState *bs, Error **errp); + /* + * This function is invoked under BQL after .bdrv_co_amend() + * to allow cleaning up what was done in .bdrv_amend_pre_run(). + */ + void (*bdrv_amend_clean)(BlockDriverState *bs); + + /* + * Return true if @to_replace can be replaced by a BDS with the + * same data as @bs without it affecting @bs's behavior (that is, + * without it being visible to @bs's parents). + */ + bool (*bdrv_recurse_can_replace)(BlockDriverState *bs, + BlockDriverState *to_replace); + + int (*bdrv_probe_device)(const char *filename); + + /* + * Any driver implementing this callback is expected to be able to handle + * NULL file names in its .bdrv_open() implementation. + */ + void (*bdrv_parse_filename)(const char *filename, QDict *options, + Error **errp); + + /* For handling image reopen for split or non-split files. */ + int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, + BlockReopenQueue *queue, Error **errp); + void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); + void (*bdrv_reopen_commit_post)(BDRVReopenState *reopen_state); + void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); + void (*bdrv_join_options)(QDict *options, QDict *old_options); + + int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, + Error **errp); + + /* Protocol drivers should implement this instead of bdrv_open */ + int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, + Error **errp); + void (*bdrv_close)(BlockDriverState *bs); + + int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts, + Error **errp); + int coroutine_fn (*bdrv_co_create_opts)(BlockDriver *drv, + const char *filename, + QemuOpts *opts, + Error **errp); + + int (*bdrv_amend_options)(BlockDriverState *bs, + QemuOpts *opts, + BlockDriverAmendStatusCB *status_cb, + void *cb_opaque, + bool force, + Error **errp); + + int (*bdrv_make_empty)(BlockDriverState *bs); + + /* + * Refreshes the bs->exact_filename field. If that is impossible, + * bs->exact_filename has to be left empty. + */ + void (*bdrv_refresh_filename)(BlockDriverState *bs); + + /* + * Gathers the open options for all children into @target. + * A simple format driver (without backing file support) might + * implement this function like this: + * + * QINCREF(bs->file->bs->full_open_options); + * qdict_put(target, "file", bs->file->bs->full_open_options); + * + * If not specified, the generic implementation will simply put + * all children's options under their respective name. + * + * @backing_overridden is true when bs->backing seems not to be + * the child that would result from opening bs->backing_file. + * Therefore, if it is true, the backing child's options should be + * gathered; otherwise, there is no need since the backing child + * is the one implied by the image header. + * + * Note that ideally this function would not be needed. Every + * block driver which implements it is probably doing something + * shady regarding its runtime option structure. + */ + void (*bdrv_gather_child_options)(BlockDriverState *bs, QDict *target, + bool backing_overridden); + + /* + * Returns an allocated string which is the directory name of this BDS: It + * will be used to make relative filenames absolute by prepending this + * function's return value to them. + */ + char *(*bdrv_dirname)(BlockDriverState *bs, Error **errp); + + /* + * This informs the driver that we are no longer interested in the result + * of in-flight requests, so don't waste the time if possible. + * + * One example usage is to avoid waiting for an nbd target node reconnect + * timeout during job-cancel with force=true. + */ + void (*bdrv_cancel_in_flight)(BlockDriverState *bs); + + int (*bdrv_inactivate)(BlockDriverState *bs); + + int (*bdrv_snapshot_create)(BlockDriverState *bs, + QEMUSnapshotInfo *sn_info); + int (*bdrv_snapshot_goto)(BlockDriverState *bs, + const char *snapshot_id); + int (*bdrv_snapshot_delete)(BlockDriverState *bs, + const char *snapshot_id, + const char *name, + Error **errp); + int (*bdrv_snapshot_list)(BlockDriverState *bs, + QEMUSnapshotInfo **psn_info); + int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, + const char *snapshot_id, + const char *name, + Error **errp); + + int (*bdrv_change_backing_file)(BlockDriverState *bs, + const char *backing_file, const char *backing_fmt); + + /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ + int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, + const char *tag); + int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, + const char *tag); + int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); + bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); + + void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); + + /* + * Returns 1 if newly created images are guaranteed to contain only + * zeros, 0 otherwise. + */ + int (*bdrv_has_zero_init)(BlockDriverState *bs); + + /* + * Remove fd handlers, timers, and other event loop callbacks so the event + * loop is no longer in use. Called with no in-flight requests and in + * depth-first traversal order with parents before child nodes. + */ + void (*bdrv_detach_aio_context)(BlockDriverState *bs); + + /* + * Add fd handlers, timers, and other event loop callbacks so I/O requests + * can be processed again. Called with no in-flight requests and in + * depth-first traversal order with child nodes before parent nodes. + */ + void (*bdrv_attach_aio_context)(BlockDriverState *bs, + AioContext *new_context); + + /** + * Try to get @bs's logical and physical block size. + * On success, store them in @bsz and return zero. + * On failure, return negative errno. + */ + int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); + /** + * Try to get @bs's geometry (cyls, heads, sectors) + * On success, store them in @geo and return 0. + * On failure return -errno. + * Only drivers that want to override guest geometry implement this + * callback; see hd_geometry_guess(). + */ + int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); + + void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, + Error **errp); + void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, + Error **errp); + + /** + * Informs the block driver that a permission change is intended. The + * driver checks whether the change is permissible and may take other + * preparations for the change (e.g. get file system locks). This operation + * is always followed either by a call to either .bdrv_set_perm or + * .bdrv_abort_perm_update. + * + * Checks whether the requested set of cumulative permissions in @perm + * can be granted for accessing @bs and whether no other users are using + * permissions other than those given in @shared (both arguments take + * BLK_PERM_* bitmasks). + * + * If both conditions are met, 0 is returned. Otherwise, -errno is returned + * and errp is set to an error describing the conflict. + */ + int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm, + uint64_t shared, Error **errp); + + /** + * Called to inform the driver that the set of cumulative set of used + * permissions for @bs has changed to @perm, and the set of sharable + * permission to @shared. The driver can use this to propagate changes to + * its children (i.e. request permissions only if a parent actually needs + * them). + * + * This function is only invoked after bdrv_check_perm(), so block drivers + * may rely on preparations made in their .bdrv_check_perm implementation. + */ + void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared); + + /* + * Called to inform the driver that after a previous bdrv_check_perm() + * call, the permission update is not performed and any preparations made + * for it (e.g. taken file locks) need to be undone. + * + * This function can be called even for nodes that never saw a + * bdrv_check_perm() call. It is a no-op then. + */ + void (*bdrv_abort_perm_update)(BlockDriverState *bs); + + /** + * Returns in @nperm and @nshared the permissions that the driver for @bs + * needs on its child @c, based on the cumulative permissions requested by + * the parents in @parent_perm and @parent_shared. + * + * If @c is NULL, return the permissions for attaching a new child for the + * given @child_class and @role. + * + * If @reopen_queue is non-NULL, don't return the currently needed + * permissions, but those that will be needed after applying the + * @reopen_queue. + */ + void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, + BlockReopenQueue *reopen_queue, + uint64_t parent_perm, uint64_t parent_shared, + uint64_t *nperm, uint64_t *nshared); + + /** + * Register/unregister a buffer for I/O. For example, when the driver is + * interested to know the memory areas that will later be used in iovs, so + * that it can do IOMMU mapping with VFIO etc., in order to get better + * performance. In the case of VFIO drivers, this callback is used to do + * DMA mapping for hot buffers. + * + * Returns: true on success, false on failure + */ + bool (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size, + Error **errp); + void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host, size_t size); + + /* + * This field is modified only under the BQL, and is part of + * the global state. + */ + QLIST_ENTRY(BlockDriver) list; + + /* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + + int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); + + int coroutine_fn (*bdrv_co_amend)(BlockDriverState *bs, + BlockdevAmendOptions *opts, + bool force, + Error **errp); + + /* aio */ + BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); + BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); + BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, + BlockCompletionFunc *cb, void *opaque); + BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, + int64_t offset, int bytes, + BlockCompletionFunc *cb, void *opaque); + + int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + + /** + * @offset: position in bytes to read at + * @bytes: number of bytes to read + * @qiov: the buffers to fill with read data + * @flags: currently unused, always 0 + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ + int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); + + int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); + + int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, + int flags); + /** + * @offset: position in bytes to write at + * @bytes: number of bytes to write + * @qiov: the buffers containing data to write + * @flags: zero or more bits allowed by 'supported_write_flags' + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ + int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); + int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); + + /* + * Efficiently zero a region of the disk image. Typically an image format + * would use a compact metadata representation to implement this. This + * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() + * will be called instead. + */ + int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, + int64_t offset, int64_t bytes, BdrvRequestFlags flags); + int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, + int64_t offset, int64_t bytes); + + /* + * Map [offset, offset + nbytes) range onto a child of @bs to copy from, + * and invoke bdrv_co_copy_range_from(child, ...), or invoke + * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from. + * + * See the comment of bdrv_co_copy_range for the parameter and return value + * semantics. + */ + int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs, + BdrvChild *src, + int64_t offset, + BdrvChild *dst, + int64_t dst_offset, + int64_t bytes, + BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); + + /* + * Map [offset, offset + nbytes) range onto a child of bs to copy data to, + * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy + * operation if @bs is the leaf and @src has the same BlockDriver. Return + * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver. + * + * See the comment of bdrv_co_copy_range for the parameter and return value + * semantics. + */ + int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs, + BdrvChild *src, + int64_t src_offset, + BdrvChild *dst, + int64_t dst_offset, + int64_t bytes, + BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); + + /* + * Building block for bdrv_block_status[_above] and + * bdrv_is_allocated[_above]. The driver should answer only + * according to the current layer, and should only need to set + * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID, + * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing + * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See + * block.h for the overall meaning of the bits. As a hint, the + * flag want_zero is true if the caller cares more about precise + * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for + * overall allocation (favor larger *pnum, perhaps by reporting + * _DATA instead of _ZERO). The block layer guarantees input + * clamped to bdrv_getlength() and aligned to request_alignment, + * as well as non-NULL pnum, map, and file; in turn, the driver + * must return an error or set pnum to an aligned non-zero value. + * + * Note that @bytes is just a hint on how big of a region the + * caller wants to inspect. It is not a limit on *pnum. + * Implementations are free to return larger values of *pnum if + * doing so does not incur a performance penalty. + * + * block/io.c's bdrv_co_block_status() will utilize an unclamped + * *pnum value for the block-status cache on protocol nodes, prior + * to clamping *pnum for return to its caller. + */ + int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs, + bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file); + + /* + * Snapshot-access API. + * + * Block-driver may provide snapshot-access API: special functions to access + * some internal "snapshot". The functions are similar with normal + * read/block_status/discard handler, but don't have any specific handling + * in generic block-layer: no serializing, no alignment, no tracked + * requests. So, block-driver that realizes these APIs is fully responsible + * for synchronization between snapshot-access API and normal IO requests. + * + * TODO: To be able to support qcow2's internal snapshots, this API will + * need to be extended to: + * - be able to select a specific snapshot + * - receive the snapshot's actual length (which may differ from bs's + * length) + */ + int coroutine_fn (*bdrv_co_preadv_snapshot)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset); + int coroutine_fn (*bdrv_co_snapshot_block_status)(BlockDriverState *bs, + bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file); + int coroutine_fn (*bdrv_co_pdiscard_snapshot)(BlockDriverState *bs, + int64_t offset, int64_t bytes); + + /* + * Invalidate any cached meta-data. + */ + void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs, + Error **errp); + + /* + * Flushes all data for all layers by calling bdrv_co_flush for underlying + * layers, if needed. This function is needed for deterministic + * synchronization of the flush finishing callback. + */ + int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); + + /* Delete a created file. */ + int coroutine_fn (*bdrv_co_delete_file)(BlockDriverState *bs, + Error **errp); + + /* + * Flushes all data that was already written to the OS all the way down to + * the disk (for example file-posix.c calls fsync()). + */ + int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); + + /* + * Flushes all internal caches to the OS. The data may still sit in a + * writeback cache of the host OS, but it will survive a crash of the qemu + * process. + */ + int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); + + /* + * Truncate @bs to @offset bytes using the given @prealloc mode + * when growing. Modes other than PREALLOC_MODE_OFF should be + * rejected when shrinking @bs. + * + * If @exact is true, @bs must be resized to exactly @offset. + * Otherwise, it is sufficient for @bs (if it is a host block + * device and thus there is no way to resize it) to be at least + * @offset bytes in length. + * + * If @exact is true and this function fails but would succeed + * with @exact = false, it should return -ENOTSUP. + */ + int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset, + bool exact, PreallocMode prealloc, + BdrvRequestFlags flags, Error **errp); + int64_t (*bdrv_getlength)(BlockDriverState *bs); + int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); + BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs, + Error **errp); + + int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov); + int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset); + + int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); + + ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs, + Error **errp); + BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs); + + int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, + QEMUIOVector *qiov, + int64_t pos); + int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, + QEMUIOVector *qiov, + int64_t pos); + + /* removable device specific */ + bool (*bdrv_is_inserted)(BlockDriverState *bs); + void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); + void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); + + /* to control generic scsi devices */ + BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockCompletionFunc *cb, void *opaque); + int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, + unsigned long int req, void *buf); + + /* + * Returns 0 for completed check, -errno for internal errors. + * The check results are stored in result. + */ + int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs, + BdrvCheckResult *result, + BdrvCheckMode fix); + + void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); + + /* io queue for linux-aio */ + void (*bdrv_io_plug)(BlockDriverState *bs); + void (*bdrv_io_unplug)(BlockDriverState *bs); + + /** + * bdrv_co_drain_begin is called if implemented in the beginning of a + * drain operation to drain and stop any internal sources of requests in + * the driver. + * bdrv_co_drain_end is called if implemented at the end of the drain. + * + * They should be used by the driver to e.g. manage scheduled I/O + * requests, or toggle an internal state. After the end of the drain new + * requests will continue normally. + */ + void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); + void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); + + bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs); + bool coroutine_fn (*bdrv_co_can_store_new_dirty_bitmap)( + BlockDriverState *bs, const char *name, uint32_t granularity, + Error **errp); + int coroutine_fn (*bdrv_co_remove_persistent_dirty_bitmap)( + BlockDriverState *bs, const char *name, Error **errp); +}; + +static inline bool block_driver_can_compress(BlockDriver *drv) +{ + return drv->bdrv_co_pwritev_compressed || + drv->bdrv_co_pwritev_compressed_part; +} + +typedef struct BlockLimits { + /* + * Alignment requirement, in bytes, for offset/length of I/O + * requests. Must be a power of 2 less than INT_MAX; defaults to + * 1 for drivers with modern byte interfaces, and to 512 + * otherwise. + */ + uint32_t request_alignment; + + /* + * Maximum number of bytes that can be discarded at once. Must be multiple + * of pdiscard_alignment, but need not be power of 2. May be 0 if no + * inherent 64-bit limit. + */ + int64_t max_pdiscard; + + /* + * Optimal alignment for discard requests in bytes. A power of 2 + * is best but not mandatory. Must be a multiple of + * bl.request_alignment, and must be less than max_pdiscard if + * that is set. May be 0 if bl.request_alignment is good enough + */ + uint32_t pdiscard_alignment; + + /* + * Maximum number of bytes that can zeroized at once. Must be multiple of + * pwrite_zeroes_alignment. 0 means no limit. + */ + int64_t max_pwrite_zeroes; + + /* + * Optimal alignment for write zeroes requests in bytes. A power + * of 2 is best but not mandatory. Must be a multiple of + * bl.request_alignment, and must be less than max_pwrite_zeroes + * if that is set. May be 0 if bl.request_alignment is good + * enough + */ + uint32_t pwrite_zeroes_alignment; + + /* + * Optimal transfer length in bytes. A power of 2 is best but not + * mandatory. Must be a multiple of bl.request_alignment, or 0 if + * no preferred size + */ + uint32_t opt_transfer; + + /* + * Maximal transfer length in bytes. Need not be power of 2, but + * must be multiple of opt_transfer and bl.request_alignment, or 0 + * for no 32-bit limit. For now, anything larger than INT_MAX is + * clamped down. + */ + uint32_t max_transfer; + + /* + * Maximal hardware transfer length in bytes. Applies whenever + * transfers to the device bypass the kernel I/O scheduler, for + * example with SG_IO. If larger than max_transfer or if zero, + * blk_get_max_hw_transfer will fall back to max_transfer. + */ + uint64_t max_hw_transfer; + + /* + * Maximal number of scatter/gather elements allowed by the hardware. + * Applies whenever transfers to the device bypass the kernel I/O + * scheduler, for example with SG_IO. If larger than max_iov + * or if zero, blk_get_max_hw_iov will fall back to max_iov. + */ + int max_hw_iov; + + + /* memory alignment, in bytes so that no bounce buffer is needed */ + size_t min_mem_alignment; + + /* memory alignment, in bytes, for bounce buffer */ + size_t opt_mem_alignment; + + /* maximum number of iovec elements */ + int max_iov; +} BlockLimits; + +typedef struct BdrvOpBlocker BdrvOpBlocker; + +typedef struct BdrvAioNotifier { + void (*attached_aio_context)(AioContext *new_context, void *opaque); + void (*detach_aio_context)(void *opaque); + + void *opaque; + bool deleted; + + QLIST_ENTRY(BdrvAioNotifier) list; +} BdrvAioNotifier; + +struct BdrvChildClass { + /* + * If true, bdrv_replace_node() doesn't change the node this BdrvChild + * points to. + */ + bool stay_at_node; + + /* + * If true, the parent is a BlockDriverState and bdrv_next_all_states() + * will return it. This information is used for drain_all, where every node + * will be drained separately, so the drain only needs to be propagated to + * non-BDS parents. + */ + bool parent_is_bds; + + /* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + void (*inherit_options)(BdrvChildRole role, bool parent_is_format, + int *child_flags, QDict *child_options, + int parent_flags, QDict *parent_options); + void (*change_media)(BdrvChild *child, bool load); + + /* + * Returns a malloced string that describes the parent of the child for a + * human reader. This could be a node-name, BlockBackend name, qdev ID or + * QOM path of the device owning the BlockBackend, job type and ID etc. The + * caller is responsible for freeing the memory. + */ + char *(*get_parent_desc)(BdrvChild *child); + + /* + * Notifies the parent that the child has been activated/inactivated (e.g. + * when migration is completing) and it can start/stop requesting + * permissions and doing I/O on it. + */ + void (*activate)(BdrvChild *child, Error **errp); + int (*inactivate)(BdrvChild *child); + + void (*attach)(BdrvChild *child); + void (*detach)(BdrvChild *child); + + /* + * Notifies the parent that the filename of its child has changed (e.g. + * because the direct child was removed from the backing chain), so that it + * can update its reference. + */ + int (*update_filename)(BdrvChild *child, BlockDriverState *new_base, + const char *filename, Error **errp); + + bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); + + /* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + + void (*resize)(BdrvChild *child); + + /* + * Returns a name that is supposedly more useful for human users than the + * node name for identifying the node in question (in particular, a BB + * name), or NULL if the parent can't provide a better name. + */ + const char *(*get_name)(BdrvChild *child); + + AioContext *(*get_parent_aio_context)(BdrvChild *child); + + /* + * If this pair of functions is implemented, the parent doesn't issue new + * requests after returning from .drained_begin() until .drained_end() is + * called. + * + * These functions must not change the graph (and therefore also must not + * call aio_poll(), which could change the graph indirectly). + * + * If drained_end() schedules background operations, it must atomically + * increment *drained_end_counter for each such operation and atomically + * decrement it once the operation has settled. + * + * Note that this can be nested. If drained_begin() was called twice, new + * I/O is allowed only after drained_end() was called twice, too. + */ + void (*drained_begin)(BdrvChild *child); + void (*drained_end)(BdrvChild *child, int *drained_end_counter); + + /* + * Returns whether the parent has pending requests for the child. This + * callback is polled after .drained_begin() has been called until all + * activity on the child has stopped. + */ + bool (*drained_poll)(BdrvChild *child); +}; + +extern const BdrvChildClass child_of_bds; + +struct BdrvChild { + BlockDriverState *bs; + char *name; + const BdrvChildClass *klass; + BdrvChildRole role; + void *opaque; + + /** + * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask) + */ + uint64_t perm; + + /** + * Permissions that can still be granted to other users of @bs while this + * BdrvChild is still attached to it. (BLK_PERM_* bitmask) + */ + uint64_t shared_perm; + + /* + * This link is frozen: the child can neither be replaced nor + * detached from the parent. + */ + bool frozen; + + /* + * How many times the parent of this child has been drained + * (through klass->drained_*). + * Usually, this is equal to bs->quiesce_counter (potentially + * reduced by bdrv_drain_all_count). It may differ while the + * child is entering or leaving a drained section. + */ + int parent_quiesce_counter; + + QLIST_ENTRY(BdrvChild) next; + QLIST_ENTRY(BdrvChild) next_parent; +}; + +/* + * Allows bdrv_co_block_status() to cache one data region for a + * protocol node. + * + * @valid: Whether the cache is valid (should be accessed with atomic + * functions so this can be reset by RCU readers) + * @data_start: Offset where we know (or strongly assume) is data + * @data_end: Offset where the data region ends (which is not necessarily + * the start of a zeroed region) + */ +typedef struct BdrvBlockStatusCache { + struct rcu_head rcu; + + bool valid; + int64_t data_start; + int64_t data_end; +} BdrvBlockStatusCache; + +struct BlockDriverState { + /* + * Protected by big QEMU lock or read-only after opening. No special + * locking needed during I/O... + */ + int open_flags; /* flags used to open the file, re-used for re-open */ + bool encrypted; /* if true, the media is encrypted */ + bool sg; /* if true, the device is a /dev/sg* */ + bool probed; /* if true, format was probed rather than specified */ + bool force_share; /* if true, always allow all shared permissions */ + bool implicit; /* if true, this filter node was automatically inserted */ + + BlockDriver *drv; /* NULL means no media */ + void *opaque; + + AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ + /* + * long-running tasks intended to always use the same AioContext as this + * BDS may register themselves in this list to be notified of changes + * regarding this BDS's context + */ + QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; + bool walking_aio_notifiers; /* to make removal during iteration safe */ + + char filename[PATH_MAX]; + /* + * If not empty, this image is a diff in relation to backing_file. + * Note that this is the name given in the image header and + * therefore may or may not be equal to .backing->bs->filename. + * If this field contains a relative path, it is to be resolved + * relatively to the overlay's location. + */ + char backing_file[PATH_MAX]; + /* + * The backing filename indicated by the image header. Contrary + * to backing_file, if we ever open this file, auto_backing_file + * is replaced by the resulting BDS's filename (i.e. after a + * bdrv_refresh_filename() run). + */ + char auto_backing_file[PATH_MAX]; + char backing_format[16]; /* if non-zero and backing_file exists */ + + QDict *full_open_options; + char exact_filename[PATH_MAX]; + + /* I/O Limits */ + BlockLimits bl; + + /* + * Flags honored during pread + */ + BdrvRequestFlags supported_read_flags; + /* + * Flags honored during pwrite (so far: BDRV_REQ_FUA, + * BDRV_REQ_WRITE_UNCHANGED). + * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those + * writes will be issued as normal writes without the flag set. + * This is important to note for drivers that do not explicitly + * request a WRITE permission for their children and instead take + * the same permissions as their parent did (this is commonly what + * block filters do). Such drivers have to be aware that the + * parent may have taken a WRITE_UNCHANGED permission only and is + * issuing such requests. Drivers either must make sure that + * these requests do not result in plain WRITE accesses (usually + * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding + * every incoming write request as-is, including potentially that + * flag), or they have to explicitly take the WRITE permission for + * their children. + */ + BdrvRequestFlags supported_write_flags; + /* + * Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA, + * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) + */ + BdrvRequestFlags supported_zero_flags; + /* + * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE). + * + * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure + * that any added space reads as all zeros. If this can't be guaranteed, + * the operation must fail. + */ + BdrvRequestFlags supported_truncate_flags; + + /* the following member gives a name to every node on the bs graph. */ + char node_name[32]; + /* element of the list of named nodes building the graph */ + QTAILQ_ENTRY(BlockDriverState) node_list; + /* element of the list of all BlockDriverStates (all_bdrv_states) */ + QTAILQ_ENTRY(BlockDriverState) bs_list; + /* element of the list of monitor-owned BDS */ + QTAILQ_ENTRY(BlockDriverState) monitor_list; + int refcnt; + + /* operation blockers. Protected by BQL. */ + QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; + + /* + * The node that this node inherited default options from (and a reopen on + * which can affect this node by changing these defaults). This is always a + * parent node of this node. + */ + BlockDriverState *inherits_from; + + /* + * @backing and @file are some of @children or NULL. All these three fields + * (@file, @backing and @children) are modified only in + * bdrv_child_cb_attach() and bdrv_child_cb_detach(). + * + * See also comment in include/block/block.h, to learn how backing and file + * are connected with BdrvChildRole. + */ + QLIST_HEAD(, BdrvChild) children; + BdrvChild *backing; + BdrvChild *file; + + QLIST_HEAD(, BdrvChild) parents; + + QDict *options; + QDict *explicit_options; + BlockdevDetectZeroesOptions detect_zeroes; + + /* The error object in use for blocking operations on backing_hd */ + Error *backing_blocker; + + /* Protected by AioContext lock */ + + /* + * If we are reading a disk image, give its size in sectors. + * Generally read-only; it is written to by load_snapshot and + * save_snaphost, but the block layer is quiescent during those. + */ + int64_t total_sectors; + + /* threshold limit for writes, in bytes. "High water mark". */ + uint64_t write_threshold_offset; + + /* + * Writing to the list requires the BQL _and_ the dirty_bitmap_mutex. + * Reading from the list can be done with either the BQL or the + * dirty_bitmap_mutex. Modifying a bitmap only requires + * dirty_bitmap_mutex. + */ + QemuMutex dirty_bitmap_mutex; + QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; + + /* Offset after the highest byte written to */ + Stat64 wr_highest_offset; + + /* + * If true, copy read backing sectors into image. Can be >1 if more + * than one client has requested copy-on-read. Accessed with atomic + * ops. + */ + int copy_on_read; + + /* + * number of in-flight requests; overall and serialising. + * Accessed with atomic ops. + */ + unsigned int in_flight; + unsigned int serialising_in_flight; + + /* + * counter for nested bdrv_io_plug. + * Accessed with atomic ops. + */ + unsigned io_plugged; + + /* do we need to tell the quest if we have a volatile write cache? */ + int enable_write_cache; + + /* Accessed with atomic ops. */ + int quiesce_counter; + int recursive_quiesce_counter; + + unsigned int write_gen; /* Current data generation */ + + /* Protected by reqs_lock. */ + CoMutex reqs_lock; + QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; + CoQueue flush_queue; /* Serializing flush queue */ + bool active_flush_req; /* Flush request in flight? */ + + /* Only read/written by whoever has set active_flush_req to true. */ + unsigned int flushed_gen; /* Flushed write generation */ + + /* BdrvChild links to this node may never be frozen */ + bool never_freeze; + + /* Lock for block-status cache RCU writers */ + CoMutex bsc_modify_lock; + /* Always non-NULL, but must only be dereferenced under an RCU read guard */ + BdrvBlockStatusCache *block_status_cache; +}; + +struct BlockBackendRootState { + int open_flags; + BlockdevDetectZeroesOptions detect_zeroes; +}; + +typedef enum BlockMirrorBackingMode { + /* + * Reuse the existing backing chain from the source for the target. + * - sync=full: Set backing BDS to NULL. + * - sync=top: Use source's backing BDS. + * - sync=none: Use source as the backing BDS. + */ + MIRROR_SOURCE_BACKING_CHAIN, + + /* Open the target's backing chain completely anew */ + MIRROR_OPEN_BACKING_CHAIN, + + /* Do not change the target's backing BDS after job completion */ + MIRROR_LEAVE_BACKING_CHAIN, +} BlockMirrorBackingMode; + + +/* + * Essential block drivers which must always be statically linked into qemu, and + * which therefore can be accessed without using bdrv_find_format() + */ +extern BlockDriver bdrv_file; +extern BlockDriver bdrv_raw; +extern BlockDriver bdrv_qcow2; + +extern unsigned int bdrv_drain_all_count; +extern QemuOptsList bdrv_create_opts_simple; + +/* + * Common functions that are neither I/O nor Global State. + * + * See include/block/block-commmon.h for more information about + * the Common API. + */ + +static inline BlockDriverState *child_bs(BdrvChild *child) +{ + return child ? child->bs : NULL; +} + +int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp); +char *create_tmp_file(Error **errp); +void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix, + QDict *options); + + +int bdrv_check_qiov_request(int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + Error **errp); + +#ifdef _WIN32 +int is_windows_drive(const char *filename); +#endif + +#endif /* BLOCK_INT_COMMON_H */ diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h new file mode 100644 index 0000000000..b49f4eb35b --- /dev/null +++ b/include/block/block_int-global-state.h @@ -0,0 +1,330 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef BLOCK_INT_GLOBAL_STATE_H +#define BLOCK_INT_GLOBAL_STATE_H + +#include "block_int-common.h" + +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + +/** + * stream_start: + * @job_id: The id of the newly-created job, or %NULL to use the + * device name of @bs. + * @bs: Block device to operate on. + * @base: Block device that will become the new base, or %NULL to + * flatten the whole backing file chain onto @bs. + * @backing_file_str: The file name that will be written to @bs as the + * the new backing file if the job completes. Ignored if @base is %NULL. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_error: The action to take upon error. + * @filter_node_name: The node name that should be assigned to the filter + * driver that the stream job inserts into the graph above + * @bs. NULL means that a node name should be autogenerated. + * @errp: Error object. + * + * Start a streaming operation on @bs. Clusters that are unallocated + * in @bs, but allocated in any image between @base and @bs (both + * exclusive) will be written to @bs. At the end of a successful + * streaming job, the backing file of @bs will be changed to + * @backing_file_str in the written image and to @base in the live + * BlockDriverState. + */ +void stream_start(const char *job_id, BlockDriverState *bs, + BlockDriverState *base, const char *backing_file_str, + BlockDriverState *bottom, + int creation_flags, int64_t speed, + BlockdevOnError on_error, + const char *filter_node_name, + Error **errp); + +/** + * commit_start: + * @job_id: The id of the newly-created job, or %NULL to use the + * device name of @bs. + * @bs: Active block device. + * @top: Top block device to be committed. + * @base: Block device that will be written into, and become the new top. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_error: The action to take upon error. + * @backing_file_str: String to use as the backing file in @top's overlay + * @filter_node_name: The node name that should be assigned to the filter + * driver that the commit job inserts into the graph above @top. NULL means + * that a node name should be autogenerated. + * @errp: Error object. + * + */ +void commit_start(const char *job_id, BlockDriverState *bs, + BlockDriverState *base, BlockDriverState *top, + int creation_flags, int64_t speed, + BlockdevOnError on_error, const char *backing_file_str, + const char *filter_node_name, Error **errp); +/** + * commit_active_start: + * @job_id: The id of the newly-created job, or %NULL to use the + * device name of @bs. + * @bs: Active block device to be committed. + * @base: Block device that will be written into, and become the new top. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_error: The action to take upon error. + * @filter_node_name: The node name that should be assigned to the filter + * driver that the commit job inserts into the graph above @bs. NULL means that + * a node name should be autogenerated. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @auto_complete: Auto complete the job. + * @errp: Error object. + * + */ +BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, + BlockDriverState *base, int creation_flags, + int64_t speed, BlockdevOnError on_error, + const char *filter_node_name, + BlockCompletionFunc *cb, void *opaque, + bool auto_complete, Error **errp); +/* + * mirror_start: + * @job_id: The id of the newly-created job, or %NULL to use the + * device name of @bs. + * @bs: Block device to operate on. + * @target: Block device to write to. + * @replaces: Block graph node name to replace once the mirror is done. Can + * only be used when full mirroring is selected. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @granularity: The chosen granularity for the dirty bitmap. + * @buf_size: The amount of data that can be in flight at one time. + * @mode: Whether to collapse all images in the chain to the target. + * @backing_mode: How to establish the target's backing chain after completion. + * @zero_target: Whether the target should be explicitly zero-initialized + * @on_source_error: The action to take upon error reading from the source. + * @on_target_error: The action to take upon error writing to the target. + * @unmap: Whether to unmap target where source sectors only contain zeroes. + * @filter_node_name: The node name that should be assigned to the filter + * driver that the mirror job inserts into the graph above @bs. NULL means that + * a node name should be autogenerated. + * @copy_mode: When to trigger writes to the target. + * @errp: Error object. + * + * Start a mirroring operation on @bs. Clusters that are allocated + * in @bs will be written to @target until the job is cancelled or + * manually completed. At the end of a successful mirroring job, + * @bs will be switched to read from @target. + */ +void mirror_start(const char *job_id, BlockDriverState *bs, + BlockDriverState *target, const char *replaces, + int creation_flags, int64_t speed, + uint32_t granularity, int64_t buf_size, + MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, + bool zero_target, + BlockdevOnError on_source_error, + BlockdevOnError on_target_error, + bool unmap, const char *filter_node_name, + MirrorCopyMode copy_mode, Error **errp); + +/* + * backup_job_create: + * @job_id: The id of the newly-created job, or %NULL to use the + * device name of @bs. + * @bs: Block device to operate on. + * @target: Block device to write to. + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @sync_mode: What parts of the disk image should be copied to the destination. + * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental' + * @bitmap_mode: The bitmap synchronization policy to use. + * @perf: Performance options. All actual fields assumed to be present, + * all ".has_*" fields are ignored. + * @on_source_error: The action to take upon error reading from the source. + * @on_target_error: The action to take upon error writing to the target. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @txn: Transaction that this job is part of (may be NULL). + * + * Create a backup operation on @bs. Clusters in @bs are written to @target + * until the job is cancelled or manually completed. + */ +BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, + BlockDriverState *target, int64_t speed, + MirrorSyncMode sync_mode, + BdrvDirtyBitmap *sync_bitmap, + BitmapSyncMode bitmap_mode, + bool compress, + const char *filter_node_name, + BackupPerf *perf, + BlockdevOnError on_source_error, + BlockdevOnError on_target_error, + int creation_flags, + BlockCompletionFunc *cb, void *opaque, + JobTxn *txn, Error **errp); + +BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, + const char *child_name, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + uint64_t perm, uint64_t shared_perm, + void *opaque, Error **errp); +void bdrv_root_unref_child(BdrvChild *child); + +void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, + uint64_t *shared_perm); + +/** + * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use + * bdrv_child_refresh_perms() instead and make the parent's + * .bdrv_child_perm() implementation return the correct values. + */ +int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, + Error **errp); + +/** + * Calls bs->drv->bdrv_child_perm() and updates the child's permission + * masks with the result. + * Drivers should invoke this function whenever an event occurs that + * makes their .bdrv_child_perm() implementation return different + * values than before, but which will not result in the block layer + * automatically refreshing the permissions. + */ +int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp); + +bool bdrv_recurse_can_replace(BlockDriverState *bs, + BlockDriverState *to_replace); + +/* + * Default implementation for BlockDriver.bdrv_child_perm() that can + * be used by block filters and image formats, as long as they use the + * child_of_bds child class and set an appropriate BdrvChildRole. + */ +void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, BlockReopenQueue *reopen_queue, + uint64_t perm, uint64_t shared, + uint64_t *nperm, uint64_t *nshared); + +void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp); +bool blk_dev_has_removable_media(BlockBackend *blk); +void blk_dev_eject_request(BlockBackend *blk, bool force); +bool blk_dev_is_medium_locked(BlockBackend *blk); + +void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup); + +void bdrv_set_monitor_owned(BlockDriverState *bs); + +void blockdev_close_all_bdrv_states(void); + +BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp); + +/** + * Simple implementation of bdrv_co_create_opts for protocol drivers + * which only support creation via opening a file + * (usually existing raw storage device) + */ +int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv, + const char *filename, + QemuOpts *opts, + Error **errp); + +BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, + const char *name, + BlockDriverState **pbs, + Error **errp); +BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target, + BlockDirtyBitmapOrStrList *bms, + HBitmap **backup, Error **errp); +BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, + bool release, + BlockDriverState **bitmap_bs, + Error **errp); + + +BlockDriverState *bdrv_skip_implicit_filters(BlockDriverState *bs); + +/** + * bdrv_add_aio_context_notifier: + * + * If a long-running job intends to be always run in the same AioContext as a + * certain BDS, it may use this function to be notified of changes regarding the + * association of the BDS to an AioContext. + * + * attached_aio_context() is called after the target BDS has been attached to a + * new AioContext; detach_aio_context() is called before the target BDS is being + * detached from its old AioContext. + */ +void bdrv_add_aio_context_notifier(BlockDriverState *bs, + void (*attached_aio_context)(AioContext *new_context, void *opaque), + void (*detach_aio_context)(void *opaque), void *opaque); + +/** + * bdrv_remove_aio_context_notifier: + * + * Unsubscribe of change notifications regarding the BDS's AioContext. The + * parameters given here have to be the same as those given to + * bdrv_add_aio_context_notifier(). + */ +void bdrv_remove_aio_context_notifier(BlockDriverState *bs, + void (*aio_context_attached)(AioContext *, + void *), + void (*aio_context_detached)(void *), + void *opaque); + +/** + * End all quiescent sections started by bdrv_drain_all_begin(). This is + * needed when deleting a BDS before bdrv_drain_all_end() is called. + * + * NOTE: this is an internal helper for bdrv_close() *only*. No one else + * should call it. + */ +void bdrv_drain_all_end_quiesce(BlockDriverState *bs); + +/** + * Make sure that the function is running under both drain and BQL. + * The latter protects from concurrent writings + * from the GS API, while the former prevents concurrent reads + * from I/O. + */ +static inline void assert_bdrv_graph_writable(BlockDriverState *bs) +{ + /* + * TODO: this function is incomplete. Because the users of this + * assert lack the necessary drains, check only for BQL. + * Once the necessary drains are added, + * assert also for qatomic_read(&bs->quiesce_counter) > 0 + */ + assert(qemu_in_main_thread()); +} + +#endif /* BLOCK_INT_GLOBAL_STATE_H */ diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h new file mode 100644 index 0000000000..4b0b3e17ef --- /dev/null +++ b/include/block/block_int-io.h @@ -0,0 +1,194 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_INT_IO_H +#define BLOCK_INT_IO_H + +#include "block_int-common.h" + +/* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + +int coroutine_fn bdrv_co_preadv_snapshot(BdrvChild *child, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset); +int coroutine_fn bdrv_co_snapshot_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file); +int coroutine_fn bdrv_co_pdiscard_snapshot(BlockDriverState *bs, + int64_t offset, int64_t bytes); + + +int coroutine_fn bdrv_co_preadv(BdrvChild *child, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); +int coroutine_fn bdrv_co_pwritev(BdrvChild *child, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); + +static inline int coroutine_fn bdrv_co_pread(BdrvChild *child, + int64_t offset, int64_t bytes, void *buf, BdrvRequestFlags flags) +{ + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + IO_CODE(); + + return bdrv_co_preadv(child, offset, bytes, &qiov, flags); +} + +static inline int coroutine_fn bdrv_co_pwrite(BdrvChild *child, + int64_t offset, int64_t bytes, const void *buf, BdrvRequestFlags flags) +{ + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + IO_CODE(); + + return bdrv_co_pwritev(child, offset, bytes, &qiov, flags); +} + +void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, + uint64_t align); +BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); + +BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, + const char *filename); + +/** + * bdrv_wakeup: + * @bs: The BlockDriverState for which an I/O operation has been completed. + * + * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During + * synchronous I/O on a BlockDriverState that is attached to another + * I/O thread, the main thread lets the I/O thread's event loop run, + * waiting for the I/O operation to complete. A bdrv_wakeup will wake + * up the main thread if necessary. + * + * Manual calls to bdrv_wakeup are rarely necessary, because + * bdrv_dec_in_flight already calls it. + */ +void bdrv_wakeup(BlockDriverState *bs); + +const char *bdrv_get_parent_name(const BlockDriverState *bs); +bool blk_dev_has_tray(BlockBackend *blk); +bool blk_dev_is_tray_open(BlockBackend *blk); + +void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes); + +void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); +void bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, + const BdrvDirtyBitmap *src, + HBitmap **backup, bool lock); + +void bdrv_inc_in_flight(BlockDriverState *bs); +void bdrv_dec_in_flight(BlockDriverState *bs); + +int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, + BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); +int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, + BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); + +int refresh_total_sectors(BlockDriverState *bs, int64_t hint); + +BdrvChild *bdrv_cow_child(BlockDriverState *bs); +BdrvChild *bdrv_filter_child(BlockDriverState *bs); +BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs); +BdrvChild *bdrv_primary_child(BlockDriverState *bs); +BlockDriverState *bdrv_skip_filters(BlockDriverState *bs); +BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs); + +static inline BlockDriverState *bdrv_cow_bs(BlockDriverState *bs) +{ + IO_CODE(); + return child_bs(bdrv_cow_child(bs)); +} + +static inline BlockDriverState *bdrv_filter_bs(BlockDriverState *bs) +{ + IO_CODE(); + return child_bs(bdrv_filter_child(bs)); +} + +static inline BlockDriverState *bdrv_filter_or_cow_bs(BlockDriverState *bs) +{ + IO_CODE(); + return child_bs(bdrv_filter_or_cow_child(bs)); +} + +static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs) +{ + IO_CODE(); + return child_bs(bdrv_primary_child(bs)); +} + +/** + * Check whether the given offset is in the cached block-status data + * region. + * + * If it is, and @pnum is not NULL, *pnum is set to + * `bsc.data_end - offset`, i.e. how many bytes, starting from + * @offset, are data (according to the cache). + * Otherwise, *pnum is not touched. + */ +bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum); + +/** + * If [offset, offset + bytes) overlaps with the currently cached + * block-status region, invalidate the cache. + * + * (To be used by I/O paths that cause data regions to be zero or + * holes.) + */ +void bdrv_bsc_invalidate_range(BlockDriverState *bs, + int64_t offset, int64_t bytes); + +/** + * Mark the range [offset, offset + bytes) as a data region. + */ +void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes); + + +/* + * "I/O or GS" API functions. These functions can run without + * the BQL, but only in one specific iothread/main loop. + * + * See include/block/block-io.h for more information about + * the "I/O or GS" API. + */ + +void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); +void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); + +#endif /* BLOCK_INT_IO_H */ diff --git a/include/block/block_int.h b/include/block/block_int.h index f1a54db0f8..7d50b6bbd1 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -24,1415 +24,9 @@ #ifndef BLOCK_INT_H #define BLOCK_INT_H -#include "block/accounting.h" -#include "block/block.h" -#include "block/aio-wait.h" -#include "qemu/queue.h" -#include "qemu/coroutine.h" -#include "qemu/stats64.h" -#include "qemu/timer.h" -#include "qemu/hbitmap.h" -#include "block/snapshot.h" -#include "qemu/throttle.h" +#include "block_int-global-state.h" +#include "block_int-io.h" -#define BLOCK_FLAG_LAZY_REFCOUNTS 8 - -#define BLOCK_OPT_SIZE "size" -#define BLOCK_OPT_ENCRYPT "encryption" -#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format" -#define BLOCK_OPT_COMPAT6 "compat6" -#define BLOCK_OPT_HWVERSION "hwversion" -#define BLOCK_OPT_BACKING_FILE "backing_file" -#define BLOCK_OPT_BACKING_FMT "backing_fmt" -#define BLOCK_OPT_CLUSTER_SIZE "cluster_size" -#define BLOCK_OPT_TABLE_SIZE "table_size" -#define BLOCK_OPT_PREALLOC "preallocation" -#define BLOCK_OPT_SUBFMT "subformat" -#define BLOCK_OPT_COMPAT_LEVEL "compat" -#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" -#define BLOCK_OPT_ADAPTER_TYPE "adapter_type" -#define BLOCK_OPT_REDUNDANCY "redundancy" -#define BLOCK_OPT_NOCOW "nocow" -#define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint" -#define BLOCK_OPT_OBJECT_SIZE "object_size" -#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" -#define BLOCK_OPT_DATA_FILE "data_file" -#define BLOCK_OPT_DATA_FILE_RAW "data_file_raw" -#define BLOCK_OPT_COMPRESSION_TYPE "compression_type" -#define BLOCK_OPT_EXTL2 "extended_l2" - -#define BLOCK_PROBE_BUF_SIZE 512 - -enum BdrvTrackedRequestType { - BDRV_TRACKED_READ, - BDRV_TRACKED_WRITE, - BDRV_TRACKED_DISCARD, - BDRV_TRACKED_TRUNCATE, -}; - -/* - * That is not quite good that BdrvTrackedRequest structure is public, - * as block/io.c is very careful about incoming offset/bytes being - * correct. Be sure to assert bdrv_check_request() succeeded after any - * modification of BdrvTrackedRequest object out of block/io.c - */ -typedef struct BdrvTrackedRequest { - BlockDriverState *bs; - int64_t offset; - int64_t bytes; - enum BdrvTrackedRequestType type; - - bool serialising; - int64_t overlap_offset; - int64_t overlap_bytes; - - QLIST_ENTRY(BdrvTrackedRequest) list; - Coroutine *co; /* owner, used for deadlock detection */ - CoQueue wait_queue; /* coroutines blocked on this request */ - - struct BdrvTrackedRequest *waiting_for; -} BdrvTrackedRequest; - -int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp); - -struct BlockDriver { - const char *format_name; - int instance_size; - - /* set to true if the BlockDriver is a block filter. Block filters pass - * certain callbacks that refer to data (see block.c) to their bs->file - * or bs->backing (whichever one exists) if the driver doesn't implement - * them. Drivers that do not wish to forward must implement them and return - * -ENOTSUP. - * Note that filters are not allowed to modify data. - * - * Filters generally cannot have more than a single filtered child, - * because the data they present must at all times be the same as - * that on their filtered child. That would be impossible to - * achieve for multiple filtered children. - * (And this filtered child must then be bs->file or bs->backing.) - */ - bool is_filter; - /* - * Set to true if the BlockDriver is a format driver. Format nodes - * generally do not expect their children to be other format nodes - * (except for backing files), and so format probing is disabled - * on those children. - */ - bool is_format; - /* - * Return true if @to_replace can be replaced by a BDS with the - * same data as @bs without it affecting @bs's behavior (that is, - * without it being visible to @bs's parents). - */ - bool (*bdrv_recurse_can_replace)(BlockDriverState *bs, - BlockDriverState *to_replace); - - int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); - int (*bdrv_probe_device)(const char *filename); - - /* Any driver implementing this callback is expected to be able to handle - * NULL file names in its .bdrv_open() implementation */ - void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); - /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have - * this field set to true, except ones that are defined only by their - * child's bs. - * An example of the last type will be the quorum block driver. - */ - bool bdrv_needs_filename; - - /* - * Set if a driver can support backing files. This also implies the - * following semantics: - * - * - Return status 0 of .bdrv_co_block_status means that corresponding - * blocks are not allocated in this layer of backing-chain - * - For such (unallocated) blocks, read will: - * - fill buffer with zeros if there is no backing file - * - read from the backing file otherwise, where the block layer - * takes care of reading zeros beyond EOF if backing file is short - */ - bool supports_backing; - - /* For handling image reopen for split or non-split files */ - int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, - BlockReopenQueue *queue, Error **errp); - void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); - void (*bdrv_reopen_commit_post)(BDRVReopenState *reopen_state); - void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); - void (*bdrv_join_options)(QDict *options, QDict *old_options); - - int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, - Error **errp); - - /* Protocol drivers should implement this instead of bdrv_open */ - int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, - Error **errp); - void (*bdrv_close)(BlockDriverState *bs); - - - int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts, - Error **errp); - int coroutine_fn (*bdrv_co_create_opts)(BlockDriver *drv, - const char *filename, - QemuOpts *opts, - Error **errp); - - int coroutine_fn (*bdrv_co_amend)(BlockDriverState *bs, - BlockdevAmendOptions *opts, - bool force, - Error **errp); - - int (*bdrv_amend_options)(BlockDriverState *bs, - QemuOpts *opts, - BlockDriverAmendStatusCB *status_cb, - void *cb_opaque, - bool force, - Error **errp); - - int (*bdrv_make_empty)(BlockDriverState *bs); - - /* - * Refreshes the bs->exact_filename field. If that is impossible, - * bs->exact_filename has to be left empty. - */ - void (*bdrv_refresh_filename)(BlockDriverState *bs); - - /* - * Gathers the open options for all children into @target. - * A simple format driver (without backing file support) might - * implement this function like this: - * - * QINCREF(bs->file->bs->full_open_options); - * qdict_put(target, "file", bs->file->bs->full_open_options); - * - * If not specified, the generic implementation will simply put - * all children's options under their respective name. - * - * @backing_overridden is true when bs->backing seems not to be - * the child that would result from opening bs->backing_file. - * Therefore, if it is true, the backing child's options should be - * gathered; otherwise, there is no need since the backing child - * is the one implied by the image header. - * - * Note that ideally this function would not be needed. Every - * block driver which implements it is probably doing something - * shady regarding its runtime option structure. - */ - void (*bdrv_gather_child_options)(BlockDriverState *bs, QDict *target, - bool backing_overridden); - - /* - * Returns an allocated string which is the directory name of this BDS: It - * will be used to make relative filenames absolute by prepending this - * function's return value to them. - */ - char *(*bdrv_dirname)(BlockDriverState *bs, Error **errp); - - /* aio */ - BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, - BlockCompletionFunc *cb, void *opaque); - BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, - BlockCompletionFunc *cb, void *opaque); - BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, - BlockCompletionFunc *cb, void *opaque); - BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, - int64_t offset, int bytes, - BlockCompletionFunc *cb, void *opaque); - - int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); - - /** - * @offset: position in bytes to read at - * @bytes: number of bytes to read - * @qiov: the buffers to fill with read data - * @flags: currently unused, always 0 - * - * @offset and @bytes will be a multiple of 'request_alignment', - * but the length of individual @qiov elements does not have to - * be a multiple. - * - * @bytes will always equal the total size of @qiov, and will be - * no larger than 'max_transfer'. - * - * The buffer in @qiov may point directly to guest memory. - */ - int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); - int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags); - int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); - /** - * @offset: position in bytes to write at - * @bytes: number of bytes to write - * @qiov: the buffers containing data to write - * @flags: zero or more bits allowed by 'supported_write_flags' - * - * @offset and @bytes will be a multiple of 'request_alignment', - * but the length of individual @qiov elements does not have to - * be a multiple. - * - * @bytes will always equal the total size of @qiov, and will be - * no larger than 'max_transfer'. - * - * The buffer in @qiov may point directly to guest memory. - */ - int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); - int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags); - - /* - * Efficiently zero a region of the disk image. Typically an image format - * would use a compact metadata representation to implement this. This - * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() - * will be called instead. - */ - int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags); - int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, - int64_t offset, int bytes); - - /* Map [offset, offset + nbytes) range onto a child of @bs to copy from, - * and invoke bdrv_co_copy_range_from(child, ...), or invoke - * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from. - * - * See the comment of bdrv_co_copy_range for the parameter and return value - * semantics. - */ - int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs, - BdrvChild *src, - uint64_t offset, - BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, - BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); - - /* Map [offset, offset + nbytes) range onto a child of bs to copy data to, - * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy - * operation if @bs is the leaf and @src has the same BlockDriver. Return - * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver. - * - * See the comment of bdrv_co_copy_range for the parameter and return value - * semantics. - */ - int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs, - BdrvChild *src, - uint64_t src_offset, - BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, - BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); - - /* - * Building block for bdrv_block_status[_above] and - * bdrv_is_allocated[_above]. The driver should answer only - * according to the current layer, and should only need to set - * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID, - * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing - * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See - * block.h for the overall meaning of the bits. As a hint, the - * flag want_zero is true if the caller cares more about precise - * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for - * overall allocation (favor larger *pnum, perhaps by reporting - * _DATA instead of _ZERO). The block layer guarantees input - * clamped to bdrv_getlength() and aligned to request_alignment, - * as well as non-NULL pnum, map, and file; in turn, the driver - * must return an error or set pnum to an aligned non-zero value. - */ - int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs, - bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, - int64_t *map, BlockDriverState **file); - - /* - * This informs the driver that we are no longer interested in the result - * of in-flight requests, so don't waste the time if possible. - * - * One example usage is to avoid waiting for an nbd target node reconnect - * timeout during job-cancel with force=true. - */ - void (*bdrv_cancel_in_flight)(BlockDriverState *bs); - - /* - * Invalidate any cached meta-data. - */ - void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs, - Error **errp); - int (*bdrv_inactivate)(BlockDriverState *bs); - - /* - * Flushes all data for all layers by calling bdrv_co_flush for underlying - * layers, if needed. This function is needed for deterministic - * synchronization of the flush finishing callback. - */ - int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); - - /* Delete a created file. */ - int coroutine_fn (*bdrv_co_delete_file)(BlockDriverState *bs, - Error **errp); - - /* - * Flushes all data that was already written to the OS all the way down to - * the disk (for example file-posix.c calls fsync()). - */ - int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); - - /* - * Flushes all internal caches to the OS. The data may still sit in a - * writeback cache of the host OS, but it will survive a crash of the qemu - * process. - */ - int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); - - /* - * Drivers setting this field must be able to work with just a plain - * filename with ':' as a prefix, and no other options. - * Options may be extracted from the filename by implementing - * bdrv_parse_filename. - */ - const char *protocol_name; - - /* - * Truncate @bs to @offset bytes using the given @prealloc mode - * when growing. Modes other than PREALLOC_MODE_OFF should be - * rejected when shrinking @bs. - * - * If @exact is true, @bs must be resized to exactly @offset. - * Otherwise, it is sufficient for @bs (if it is a host block - * device and thus there is no way to resize it) to be at least - * @offset bytes in length. - * - * If @exact is true and this function fails but would succeed - * with @exact = false, it should return -ENOTSUP. - */ - int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset, - bool exact, PreallocMode prealloc, - BdrvRequestFlags flags, Error **errp); - - int64_t (*bdrv_getlength)(BlockDriverState *bs); - bool has_variable_length; - int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); - BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs, - Error **errp); - - int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); - int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset); - - int (*bdrv_snapshot_create)(BlockDriverState *bs, - QEMUSnapshotInfo *sn_info); - int (*bdrv_snapshot_goto)(BlockDriverState *bs, - const char *snapshot_id); - int (*bdrv_snapshot_delete)(BlockDriverState *bs, - const char *snapshot_id, - const char *name, - Error **errp); - int (*bdrv_snapshot_list)(BlockDriverState *bs, - QEMUSnapshotInfo **psn_info); - int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, - const char *snapshot_id, - const char *name, - Error **errp); - int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); - ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs, - Error **errp); - BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs); - - int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, - QEMUIOVector *qiov, - int64_t pos); - int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, - QEMUIOVector *qiov, - int64_t pos); - - int (*bdrv_change_backing_file)(BlockDriverState *bs, - const char *backing_file, const char *backing_fmt); - - /* removable device specific */ - bool (*bdrv_is_inserted)(BlockDriverState *bs); - void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); - void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); - - /* to control generic scsi devices */ - BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, - unsigned long int req, void *buf, - BlockCompletionFunc *cb, void *opaque); - int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, - unsigned long int req, void *buf); - - /* List of options for creating images, terminated by name == NULL */ - QemuOptsList *create_opts; - - /* List of options for image amend */ - QemuOptsList *amend_opts; - - /* - * If this driver supports reopening images this contains a - * NULL-terminated list of the runtime options that can be - * modified. If an option in this list is unspecified during - * reopen then it _must_ be reset to its default value or return - * an error. - */ - const char *const *mutable_opts; - - /* - * Returns 0 for completed check, -errno for internal errors. - * The check results are stored in result. - */ - int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs, - BdrvCheckResult *result, - BdrvCheckMode fix); - - void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); - - /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ - int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, - const char *tag); - int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, - const char *tag); - int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); - bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); - - void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); - - /* - * Returns 1 if newly created images are guaranteed to contain only - * zeros, 0 otherwise. - */ - int (*bdrv_has_zero_init)(BlockDriverState *bs); - - /* Remove fd handlers, timers, and other event loop callbacks so the event - * loop is no longer in use. Called with no in-flight requests and in - * depth-first traversal order with parents before child nodes. - */ - void (*bdrv_detach_aio_context)(BlockDriverState *bs); - - /* Add fd handlers, timers, and other event loop callbacks so I/O requests - * can be processed again. Called with no in-flight requests and in - * depth-first traversal order with child nodes before parent nodes. - */ - void (*bdrv_attach_aio_context)(BlockDriverState *bs, - AioContext *new_context); - - /* io queue for linux-aio */ - void (*bdrv_io_plug)(BlockDriverState *bs); - void (*bdrv_io_unplug)(BlockDriverState *bs); - - /** - * Try to get @bs's logical and physical block size. - * On success, store them in @bsz and return zero. - * On failure, return negative errno. - */ - int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); - /** - * Try to get @bs's geometry (cyls, heads, sectors) - * On success, store them in @geo and return 0. - * On failure return -errno. - * Only drivers that want to override guest geometry implement this - * callback; see hd_geometry_guess(). - */ - int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); - - /** - * bdrv_co_drain_begin is called if implemented in the beginning of a - * drain operation to drain and stop any internal sources of requests in - * the driver. - * bdrv_co_drain_end is called if implemented at the end of the drain. - * - * They should be used by the driver to e.g. manage scheduled I/O - * requests, or toggle an internal state. After the end of the drain new - * requests will continue normally. - */ - void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); - void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); - - void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, - Error **errp); - void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, - Error **errp); - - /** - * Informs the block driver that a permission change is intended. The - * driver checks whether the change is permissible and may take other - * preparations for the change (e.g. get file system locks). This operation - * is always followed either by a call to either .bdrv_set_perm or - * .bdrv_abort_perm_update. - * - * Checks whether the requested set of cumulative permissions in @perm - * can be granted for accessing @bs and whether no other users are using - * permissions other than those given in @shared (both arguments take - * BLK_PERM_* bitmasks). - * - * If both conditions are met, 0 is returned. Otherwise, -errno is returned - * and errp is set to an error describing the conflict. - */ - int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm, - uint64_t shared, Error **errp); - - /** - * Called to inform the driver that the set of cumulative set of used - * permissions for @bs has changed to @perm, and the set of sharable - * permission to @shared. The driver can use this to propagate changes to - * its children (i.e. request permissions only if a parent actually needs - * them). - * - * This function is only invoked after bdrv_check_perm(), so block drivers - * may rely on preparations made in their .bdrv_check_perm implementation. - */ - void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared); - - /* - * Called to inform the driver that after a previous bdrv_check_perm() - * call, the permission update is not performed and any preparations made - * for it (e.g. taken file locks) need to be undone. - * - * This function can be called even for nodes that never saw a - * bdrv_check_perm() call. It is a no-op then. - */ - void (*bdrv_abort_perm_update)(BlockDriverState *bs); - - /** - * Returns in @nperm and @nshared the permissions that the driver for @bs - * needs on its child @c, based on the cumulative permissions requested by - * the parents in @parent_perm and @parent_shared. - * - * If @c is NULL, return the permissions for attaching a new child for the - * given @child_class and @role. - * - * If @reopen_queue is non-NULL, don't return the currently needed - * permissions, but those that will be needed after applying the - * @reopen_queue. - */ - void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c, - BdrvChildRole role, - BlockReopenQueue *reopen_queue, - uint64_t parent_perm, uint64_t parent_shared, - uint64_t *nperm, uint64_t *nshared); - - bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs); - bool (*bdrv_co_can_store_new_dirty_bitmap)(BlockDriverState *bs, - const char *name, - uint32_t granularity, - Error **errp); - int (*bdrv_co_remove_persistent_dirty_bitmap)(BlockDriverState *bs, - const char *name, - Error **errp); - - /** - * Register/unregister a buffer for I/O. For example, when the driver is - * interested to know the memory areas that will later be used in iovs, so - * that it can do IOMMU mapping with VFIO etc., in order to get better - * performance. In the case of VFIO drivers, this callback is used to do - * DMA mapping for hot buffers. - */ - void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size); - void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host); - QLIST_ENTRY(BlockDriver) list; - - /* Pointer to a NULL-terminated array of names of strong options - * that can be specified for bdrv_open(). A strong option is one - * that changes the data of a BDS. - * If this pointer is NULL, the array is considered empty. - * "filename" and "driver" are always considered strong. */ - const char *const *strong_runtime_opts; -}; - -static inline bool block_driver_can_compress(BlockDriver *drv) -{ - return drv->bdrv_co_pwritev_compressed || - drv->bdrv_co_pwritev_compressed_part; -} - -typedef struct BlockLimits { - /* Alignment requirement, in bytes, for offset/length of I/O - * requests. Must be a power of 2 less than INT_MAX; defaults to - * 1 for drivers with modern byte interfaces, and to 512 - * otherwise. */ - uint32_t request_alignment; - - /* Maximum number of bytes that can be discarded at once (since it - * is signed, it must be < 2G, if set). Must be multiple of - * pdiscard_alignment, but need not be power of 2. May be 0 if no - * inherent 32-bit limit */ - int32_t max_pdiscard; - - /* Optimal alignment for discard requests in bytes. A power of 2 - * is best but not mandatory. Must be a multiple of - * bl.request_alignment, and must be less than max_pdiscard if - * that is set. May be 0 if bl.request_alignment is good enough */ - uint32_t pdiscard_alignment; - - /* Maximum number of bytes that can zeroized at once (since it is - * signed, it must be < 2G, if set). Must be multiple of - * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */ - int32_t max_pwrite_zeroes; - - /* Optimal alignment for write zeroes requests in bytes. A power - * of 2 is best but not mandatory. Must be a multiple of - * bl.request_alignment, and must be less than max_pwrite_zeroes - * if that is set. May be 0 if bl.request_alignment is good - * enough */ - uint32_t pwrite_zeroes_alignment; - - /* Optimal transfer length in bytes. A power of 2 is best but not - * mandatory. Must be a multiple of bl.request_alignment, or 0 if - * no preferred size */ - uint32_t opt_transfer; - - /* Maximal transfer length in bytes. Need not be power of 2, but - * must be multiple of opt_transfer and bl.request_alignment, or 0 - * for no 32-bit limit. For now, anything larger than INT_MAX is - * clamped down. */ - uint32_t max_transfer; - - /* Maximal hardware transfer length in bytes. Applies whenever - * transfers to the device bypass the kernel I/O scheduler, for - * example with SG_IO. If larger than max_transfer or if zero, - * blk_get_max_hw_transfer will fall back to max_transfer. - */ - uint64_t max_hw_transfer; - - /* memory alignment, in bytes so that no bounce buffer is needed */ - size_t min_mem_alignment; - - /* memory alignment, in bytes, for bounce buffer */ - size_t opt_mem_alignment; - - /* maximum number of iovec elements */ - int max_iov; -} BlockLimits; - -typedef struct BdrvOpBlocker BdrvOpBlocker; - -typedef struct BdrvAioNotifier { - void (*attached_aio_context)(AioContext *new_context, void *opaque); - void (*detach_aio_context)(void *opaque); - - void *opaque; - bool deleted; - - QLIST_ENTRY(BdrvAioNotifier) list; -} BdrvAioNotifier; - -struct BdrvChildClass { - /* If true, bdrv_replace_node() doesn't change the node this BdrvChild - * points to. */ - bool stay_at_node; - - /* If true, the parent is a BlockDriverState and bdrv_next_all_states() - * will return it. This information is used for drain_all, where every node - * will be drained separately, so the drain only needs to be propagated to - * non-BDS parents. */ - bool parent_is_bds; - - void (*inherit_options)(BdrvChildRole role, bool parent_is_format, - int *child_flags, QDict *child_options, - int parent_flags, QDict *parent_options); - - void (*change_media)(BdrvChild *child, bool load); - void (*resize)(BdrvChild *child); - - /* Returns a name that is supposedly more useful for human users than the - * node name for identifying the node in question (in particular, a BB - * name), or NULL if the parent can't provide a better name. */ - const char *(*get_name)(BdrvChild *child); - - /* Returns a malloced string that describes the parent of the child for a - * human reader. This could be a node-name, BlockBackend name, qdev ID or - * QOM path of the device owning the BlockBackend, job type and ID etc. The - * caller is responsible for freeing the memory. */ - char *(*get_parent_desc)(BdrvChild *child); - - /* - * If this pair of functions is implemented, the parent doesn't issue new - * requests after returning from .drained_begin() until .drained_end() is - * called. - * - * These functions must not change the graph (and therefore also must not - * call aio_poll(), which could change the graph indirectly). - * - * If drained_end() schedules background operations, it must atomically - * increment *drained_end_counter for each such operation and atomically - * decrement it once the operation has settled. - * - * Note that this can be nested. If drained_begin() was called twice, new - * I/O is allowed only after drained_end() was called twice, too. - */ - void (*drained_begin)(BdrvChild *child); - void (*drained_end)(BdrvChild *child, int *drained_end_counter); - - /* - * Returns whether the parent has pending requests for the child. This - * callback is polled after .drained_begin() has been called until all - * activity on the child has stopped. - */ - bool (*drained_poll)(BdrvChild *child); - - /* Notifies the parent that the child has been activated/inactivated (e.g. - * when migration is completing) and it can start/stop requesting - * permissions and doing I/O on it. */ - void (*activate)(BdrvChild *child, Error **errp); - int (*inactivate)(BdrvChild *child); - - void (*attach)(BdrvChild *child); - void (*detach)(BdrvChild *child); - - /* Notifies the parent that the filename of its child has changed (e.g. - * because the direct child was removed from the backing chain), so that it - * can update its reference. */ - int (*update_filename)(BdrvChild *child, BlockDriverState *new_base, - const char *filename, Error **errp); - - bool (*can_set_aio_ctx)(BdrvChild *child, AioContext *ctx, - GSList **ignore, Error **errp); - void (*set_aio_ctx)(BdrvChild *child, AioContext *ctx, GSList **ignore); - - AioContext *(*get_parent_aio_context)(BdrvChild *child); -}; - -extern const BdrvChildClass child_of_bds; - -struct BdrvChild { - BlockDriverState *bs; - char *name; - const BdrvChildClass *klass; - BdrvChildRole role; - void *opaque; - - /** - * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask) - */ - uint64_t perm; - - /** - * Permissions that can still be granted to other users of @bs while this - * BdrvChild is still attached to it. (BLK_PERM_* bitmask) - */ - uint64_t shared_perm; - - /* - * This link is frozen: the child can neither be replaced nor - * detached from the parent. - */ - bool frozen; - - /* - * How many times the parent of this child has been drained - * (through klass->drained_*). - * Usually, this is equal to bs->quiesce_counter (potentially - * reduced by bdrv_drain_all_count). It may differ while the - * child is entering or leaving a drained section. - */ - int parent_quiesce_counter; - - QLIST_ENTRY(BdrvChild) next; - QLIST_ENTRY(BdrvChild) next_parent; -}; - -/* - * Note: the function bdrv_append() copies and swaps contents of - * BlockDriverStates, so if you add new fields to this struct, please - * inspect bdrv_append() to determine if the new fields need to be - * copied as well. - */ -struct BlockDriverState { - /* Protected by big QEMU lock or read-only after opening. No special - * locking needed during I/O... - */ - int open_flags; /* flags used to open the file, re-used for re-open */ - bool encrypted; /* if true, the media is encrypted */ - bool sg; /* if true, the device is a /dev/sg* */ - bool probed; /* if true, format was probed rather than specified */ - bool force_share; /* if true, always allow all shared permissions */ - bool implicit; /* if true, this filter node was automatically inserted */ - - BlockDriver *drv; /* NULL means no media */ - void *opaque; - - AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ - /* long-running tasks intended to always use the same AioContext as this - * BDS may register themselves in this list to be notified of changes - * regarding this BDS's context */ - QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; - bool walking_aio_notifiers; /* to make removal during iteration safe */ - - char filename[PATH_MAX]; - /* - * If not empty, this image is a diff in relation to backing_file. - * Note that this is the name given in the image header and - * therefore may or may not be equal to .backing->bs->filename. - * If this field contains a relative path, it is to be resolved - * relatively to the overlay's location. - */ - char backing_file[PATH_MAX]; - /* - * The backing filename indicated by the image header. Contrary - * to backing_file, if we ever open this file, auto_backing_file - * is replaced by the resulting BDS's filename (i.e. after a - * bdrv_refresh_filename() run). - */ - char auto_backing_file[PATH_MAX]; - char backing_format[16]; /* if non-zero and backing_file exists */ - - QDict *full_open_options; - char exact_filename[PATH_MAX]; - - BdrvChild *backing; - BdrvChild *file; - - /* I/O Limits */ - BlockLimits bl; - - /* - * Flags honored during pread - */ - unsigned int supported_read_flags; - /* Flags honored during pwrite (so far: BDRV_REQ_FUA, - * BDRV_REQ_WRITE_UNCHANGED). - * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those - * writes will be issued as normal writes without the flag set. - * This is important to note for drivers that do not explicitly - * request a WRITE permission for their children and instead take - * the same permissions as their parent did (this is commonly what - * block filters do). Such drivers have to be aware that the - * parent may have taken a WRITE_UNCHANGED permission only and is - * issuing such requests. Drivers either must make sure that - * these requests do not result in plain WRITE accesses (usually - * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding - * every incoming write request as-is, including potentially that - * flag), or they have to explicitly take the WRITE permission for - * their children. */ - unsigned int supported_write_flags; - /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA, - * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */ - unsigned int supported_zero_flags; - /* - * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE). - * - * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure - * that any added space reads as all zeros. If this can't be guaranteed, - * the operation must fail. - */ - unsigned int supported_truncate_flags; - - /* the following member gives a name to every node on the bs graph. */ - char node_name[32]; - /* element of the list of named nodes building the graph */ - QTAILQ_ENTRY(BlockDriverState) node_list; - /* element of the list of all BlockDriverStates (all_bdrv_states) */ - QTAILQ_ENTRY(BlockDriverState) bs_list; - /* element of the list of monitor-owned BDS */ - QTAILQ_ENTRY(BlockDriverState) monitor_list; - int refcnt; - - /* operation blockers */ - QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; - - /* The node that this node inherited default options from (and a reopen on - * which can affect this node by changing these defaults). This is always a - * parent node of this node. */ - BlockDriverState *inherits_from; - QLIST_HEAD(, BdrvChild) children; - QLIST_HEAD(, BdrvChild) parents; - - QDict *options; - QDict *explicit_options; - BlockdevDetectZeroesOptions detect_zeroes; - - /* The error object in use for blocking operations on backing_hd */ - Error *backing_blocker; - - /* Protected by AioContext lock */ - - /* If we are reading a disk image, give its size in sectors. - * Generally read-only; it is written to by load_snapshot and - * save_snaphost, but the block layer is quiescent during those. - */ - int64_t total_sectors; - - /* threshold limit for writes, in bytes. "High water mark". */ - uint64_t write_threshold_offset; - - /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex. - * Reading from the list can be done with either the BQL or the - * dirty_bitmap_mutex. Modifying a bitmap only requires - * dirty_bitmap_mutex. */ - QemuMutex dirty_bitmap_mutex; - QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; - - /* Offset after the highest byte written to */ - Stat64 wr_highest_offset; - - /* If true, copy read backing sectors into image. Can be >1 if more - * than one client has requested copy-on-read. Accessed with atomic - * ops. - */ - int copy_on_read; - - /* number of in-flight requests; overall and serialising. - * Accessed with atomic ops. - */ - unsigned int in_flight; - unsigned int serialising_in_flight; - - /* counter for nested bdrv_io_plug. - * Accessed with atomic ops. - */ - unsigned io_plugged; - - /* do we need to tell the quest if we have a volatile write cache? */ - int enable_write_cache; - - /* Accessed with atomic ops. */ - int quiesce_counter; - int recursive_quiesce_counter; - - unsigned int write_gen; /* Current data generation */ - - /* Protected by reqs_lock. */ - CoMutex reqs_lock; - QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; - CoQueue flush_queue; /* Serializing flush queue */ - bool active_flush_req; /* Flush request in flight? */ - - /* Only read/written by whoever has set active_flush_req to true. */ - unsigned int flushed_gen; /* Flushed write generation */ - - /* BdrvChild links to this node may never be frozen */ - bool never_freeze; -}; - -struct BlockBackendRootState { - int open_flags; - BlockdevDetectZeroesOptions detect_zeroes; -}; - -typedef enum BlockMirrorBackingMode { - /* Reuse the existing backing chain from the source for the target. - * - sync=full: Set backing BDS to NULL. - * - sync=top: Use source's backing BDS. - * - sync=none: Use source as the backing BDS. */ - MIRROR_SOURCE_BACKING_CHAIN, - - /* Open the target's backing chain completely anew */ - MIRROR_OPEN_BACKING_CHAIN, - - /* Do not change the target's backing BDS after job completion */ - MIRROR_LEAVE_BACKING_CHAIN, -} BlockMirrorBackingMode; - - -/* Essential block drivers which must always be statically linked into qemu, and - * which therefore can be accessed without using bdrv_find_format() */ -extern BlockDriver bdrv_file; -extern BlockDriver bdrv_raw; -extern BlockDriver bdrv_qcow2; - -int coroutine_fn bdrv_co_preadv(BdrvChild *child, - int64_t offset, int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); -int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, - int64_t offset, int64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); -int coroutine_fn bdrv_co_pwritev(BdrvChild *child, - int64_t offset, int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); -int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, - int64_t offset, int64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); - -static inline int coroutine_fn bdrv_co_pread(BdrvChild *child, - int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - return bdrv_co_preadv(child, offset, bytes, &qiov, flags); -} - -static inline int coroutine_fn bdrv_co_pwrite(BdrvChild *child, - int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - return bdrv_co_pwritev(child, offset, bytes, &qiov, flags); -} - -extern unsigned int bdrv_drain_all_count; -void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); -void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); - -bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, - uint64_t align); -BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); - -int get_tmp_filename(char *filename, int size); -BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, - const char *filename); - -void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix, - QDict *options); - -bool bdrv_backing_overridden(BlockDriverState *bs); - - -/** - * bdrv_add_aio_context_notifier: - * - * If a long-running job intends to be always run in the same AioContext as a - * certain BDS, it may use this function to be notified of changes regarding the - * association of the BDS to an AioContext. - * - * attached_aio_context() is called after the target BDS has been attached to a - * new AioContext; detach_aio_context() is called before the target BDS is being - * detached from its old AioContext. - */ -void bdrv_add_aio_context_notifier(BlockDriverState *bs, - void (*attached_aio_context)(AioContext *new_context, void *opaque), - void (*detach_aio_context)(void *opaque), void *opaque); - -/** - * bdrv_remove_aio_context_notifier: - * - * Unsubscribe of change notifications regarding the BDS's AioContext. The - * parameters given here have to be the same as those given to - * bdrv_add_aio_context_notifier(). - */ -void bdrv_remove_aio_context_notifier(BlockDriverState *bs, - void (*aio_context_attached)(AioContext *, - void *), - void (*aio_context_detached)(void *), - void *opaque); - -/** - * bdrv_wakeup: - * @bs: The BlockDriverState for which an I/O operation has been completed. - * - * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During - * synchronous I/O on a BlockDriverState that is attached to another - * I/O thread, the main thread lets the I/O thread's event loop run, - * waiting for the I/O operation to complete. A bdrv_wakeup will wake - * up the main thread if necessary. - * - * Manual calls to bdrv_wakeup are rarely necessary, because - * bdrv_dec_in_flight already calls it. - */ -void bdrv_wakeup(BlockDriverState *bs); - -#ifdef _WIN32 -int is_windows_drive(const char *filename); -#endif - -/** - * stream_start: - * @job_id: The id of the newly-created job, or %NULL to use the - * device name of @bs. - * @bs: Block device to operate on. - * @base: Block device that will become the new base, or %NULL to - * flatten the whole backing file chain onto @bs. - * @backing_file_str: The file name that will be written to @bs as the - * the new backing file if the job completes. Ignored if @base is %NULL. - * @creation_flags: Flags that control the behavior of the Job lifetime. - * See @BlockJobCreateFlags - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @on_error: The action to take upon error. - * @filter_node_name: The node name that should be assigned to the filter - * driver that the stream job inserts into the graph above - * @bs. NULL means that a node name should be autogenerated. - * @errp: Error object. - * - * Start a streaming operation on @bs. Clusters that are unallocated - * in @bs, but allocated in any image between @base and @bs (both - * exclusive) will be written to @bs. At the end of a successful - * streaming job, the backing file of @bs will be changed to - * @backing_file_str in the written image and to @base in the live - * BlockDriverState. - */ -void stream_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *base, const char *backing_file_str, - BlockDriverState *bottom, - int creation_flags, int64_t speed, - BlockdevOnError on_error, - const char *filter_node_name, - Error **errp); - -/** - * commit_start: - * @job_id: The id of the newly-created job, or %NULL to use the - * device name of @bs. - * @bs: Active block device. - * @top: Top block device to be committed. - * @base: Block device that will be written into, and become the new top. - * @creation_flags: Flags that control the behavior of the Job lifetime. - * See @BlockJobCreateFlags - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @on_error: The action to take upon error. - * @backing_file_str: String to use as the backing file in @top's overlay - * @filter_node_name: The node name that should be assigned to the filter - * driver that the commit job inserts into the graph above @top. NULL means - * that a node name should be autogenerated. - * @errp: Error object. - * - */ -void commit_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *base, BlockDriverState *top, - int creation_flags, int64_t speed, - BlockdevOnError on_error, const char *backing_file_str, - const char *filter_node_name, Error **errp); -/** - * commit_active_start: - * @job_id: The id of the newly-created job, or %NULL to use the - * device name of @bs. - * @bs: Active block device to be committed. - * @base: Block device that will be written into, and become the new top. - * @creation_flags: Flags that control the behavior of the Job lifetime. - * See @BlockJobCreateFlags - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @on_error: The action to take upon error. - * @filter_node_name: The node name that should be assigned to the filter - * driver that the commit job inserts into the graph above @bs. NULL means that - * a node name should be autogenerated. - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. - * @auto_complete: Auto complete the job. - * @errp: Error object. - * - */ -BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *base, int creation_flags, - int64_t speed, BlockdevOnError on_error, - const char *filter_node_name, - BlockCompletionFunc *cb, void *opaque, - bool auto_complete, Error **errp); -/* - * mirror_start: - * @job_id: The id of the newly-created job, or %NULL to use the - * device name of @bs. - * @bs: Block device to operate on. - * @target: Block device to write to. - * @replaces: Block graph node name to replace once the mirror is done. Can - * only be used when full mirroring is selected. - * @creation_flags: Flags that control the behavior of the Job lifetime. - * See @BlockJobCreateFlags - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @granularity: The chosen granularity for the dirty bitmap. - * @buf_size: The amount of data that can be in flight at one time. - * @mode: Whether to collapse all images in the chain to the target. - * @backing_mode: How to establish the target's backing chain after completion. - * @zero_target: Whether the target should be explicitly zero-initialized - * @on_source_error: The action to take upon error reading from the source. - * @on_target_error: The action to take upon error writing to the target. - * @unmap: Whether to unmap target where source sectors only contain zeroes. - * @filter_node_name: The node name that should be assigned to the filter - * driver that the mirror job inserts into the graph above @bs. NULL means that - * a node name should be autogenerated. - * @copy_mode: When to trigger writes to the target. - * @errp: Error object. - * - * Start a mirroring operation on @bs. Clusters that are allocated - * in @bs will be written to @target until the job is cancelled or - * manually completed. At the end of a successful mirroring job, - * @bs will be switched to read from @target. - */ -void mirror_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *target, const char *replaces, - int creation_flags, int64_t speed, - uint32_t granularity, int64_t buf_size, - MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, - bool zero_target, - BlockdevOnError on_source_error, - BlockdevOnError on_target_error, - bool unmap, const char *filter_node_name, - MirrorCopyMode copy_mode, Error **errp); - -/* - * backup_job_create: - * @job_id: The id of the newly-created job, or %NULL to use the - * device name of @bs. - * @bs: Block device to operate on. - * @target: Block device to write to. - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @sync_mode: What parts of the disk image should be copied to the destination. - * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental' - * @bitmap_mode: The bitmap synchronization policy to use. - * @perf: Performance options. All actual fields assumed to be present, - * all ".has_*" fields are ignored. - * @on_source_error: The action to take upon error reading from the source. - * @on_target_error: The action to take upon error writing to the target. - * @creation_flags: Flags that control the behavior of the Job lifetime. - * See @BlockJobCreateFlags - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. - * @txn: Transaction that this job is part of (may be NULL). - * - * Create a backup operation on @bs. Clusters in @bs are written to @target - * until the job is cancelled or manually completed. - */ -BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, - BlockDriverState *target, int64_t speed, - MirrorSyncMode sync_mode, - BdrvDirtyBitmap *sync_bitmap, - BitmapSyncMode bitmap_mode, - bool compress, - const char *filter_node_name, - BackupPerf *perf, - BlockdevOnError on_source_error, - BlockdevOnError on_target_error, - int creation_flags, - BlockCompletionFunc *cb, void *opaque, - JobTxn *txn, Error **errp); - -BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, - const char *child_name, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - uint64_t perm, uint64_t shared_perm, - void *opaque, Error **errp); -void bdrv_root_unref_child(BdrvChild *child); - -void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, - uint64_t *shared_perm); - -/** - * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use - * bdrv_child_refresh_perms() instead and make the parent's - * .bdrv_child_perm() implementation return the correct values. - */ -int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, - Error **errp); - -/** - * Calls bs->drv->bdrv_child_perm() and updates the child's permission - * masks with the result. - * Drivers should invoke this function whenever an event occurs that - * makes their .bdrv_child_perm() implementation return different - * values than before, but which will not result in the block layer - * automatically refreshing the permissions. - */ -int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp); - -bool bdrv_recurse_can_replace(BlockDriverState *bs, - BlockDriverState *to_replace); - -/* - * Default implementation for BlockDriver.bdrv_child_perm() that can - * be used by block filters and image formats, as long as they use the - * child_of_bds child class and set an appropriate BdrvChildRole. - */ -void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c, - BdrvChildRole role, BlockReopenQueue *reopen_queue, - uint64_t perm, uint64_t shared, - uint64_t *nperm, uint64_t *nshared); - -const char *bdrv_get_parent_name(const BlockDriverState *bs); -void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp); -bool blk_dev_has_removable_media(BlockBackend *blk); -bool blk_dev_has_tray(BlockBackend *blk); -void blk_dev_eject_request(BlockBackend *blk, bool force); -bool blk_dev_is_tray_open(BlockBackend *blk); -bool blk_dev_is_medium_locked(BlockBackend *blk); - -void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes); - -void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); -void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup); -bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest, - const BdrvDirtyBitmap *src, - HBitmap **backup, bool lock); - -void bdrv_inc_in_flight(BlockDriverState *bs); -void bdrv_dec_in_flight(BlockDriverState *bs); - -void blockdev_close_all_bdrv_states(void); - -int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, - BdrvChild *dst, int64_t dst_offset, - int64_t bytes, - BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); -int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, - BdrvChild *dst, int64_t dst_offset, - int64_t bytes, - BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); - -int refresh_total_sectors(BlockDriverState *bs, int64_t hint); - -void bdrv_set_monitor_owned(BlockDriverState *bs); -BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp); - -/** - * Simple implementation of bdrv_co_create_opts for protocol drivers - * which only support creation via opening a file - * (usually existing raw storage device) - */ -int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv, - const char *filename, - QemuOpts *opts, - Error **errp); -extern QemuOptsList bdrv_create_opts_simple; - -BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, - const char *name, - BlockDriverState **pbs, - Error **errp); -BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target, - BlockDirtyBitmapMergeSourceList *bms, - HBitmap **backup, Error **errp); -BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, - bool release, - BlockDriverState **bitmap_bs, - Error **errp); - -BdrvChild *bdrv_cow_child(BlockDriverState *bs); -BdrvChild *bdrv_filter_child(BlockDriverState *bs); -BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs); -BdrvChild *bdrv_primary_child(BlockDriverState *bs); -BlockDriverState *bdrv_skip_implicit_filters(BlockDriverState *bs); -BlockDriverState *bdrv_skip_filters(BlockDriverState *bs); -BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs); - -static inline BlockDriverState *child_bs(BdrvChild *child) -{ - return child ? child->bs : NULL; -} - -static inline BlockDriverState *bdrv_cow_bs(BlockDriverState *bs) -{ - return child_bs(bdrv_cow_child(bs)); -} - -static inline BlockDriverState *bdrv_filter_bs(BlockDriverState *bs) -{ - return child_bs(bdrv_filter_child(bs)); -} - -static inline BlockDriverState *bdrv_filter_or_cow_bs(BlockDriverState *bs) -{ - return child_bs(bdrv_filter_or_cow_child(bs)); -} - -static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs) -{ - return child_bs(bdrv_primary_child(bs)); -} - -/** - * End all quiescent sections started by bdrv_drain_all_begin(). This is - * needed when deleting a BDS before bdrv_drain_all_end() is called. - * - * NOTE: this is an internal helper for bdrv_close() *only*. No one else - * should call it. - */ -void bdrv_drain_all_end_quiesce(BlockDriverState *bs); +/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */ #endif /* BLOCK_INT_H */ diff --git a/include/block/blockjob.h b/include/block/blockjob.h index d200f33c10..03032b2eca 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -40,24 +40,38 @@ typedef struct BlockJobDriver BlockJobDriver; * Long-running operation on a BlockDriverState. */ typedef struct BlockJob { - /** Data belonging to the generic Job infrastructure */ + /** + * Data belonging to the generic Job infrastructure. + * Protected by job mutex. + */ Job job; - /** The block device on which the job is operating. */ - BlockBackend *blk; - - /** Status that is published by the query-block-jobs QMP API */ + /** + * Status that is published by the query-block-jobs QMP API. + * Protected by job mutex. + */ BlockDeviceIoStatus iostatus; - /** Speed that was set with @block_job_set_speed. */ + /** + * Speed that was set with @block_job_set_speed. + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + */ int64_t speed; - /** Rate limiting data structure for implementing @speed. */ + /** + * Rate limiting data structure for implementing @speed. + * RateLimit API is thread-safe. + */ RateLimit limit; - /** Block other operations when block job is running */ + /** + * Block other operations when block job is running. + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + */ Error *blocker; + /** All notifiers are set once in block_job_create() and never modified. */ + /** Called when a cancelled job is finalised. */ Notifier finalize_cancelled_notifier; @@ -73,20 +87,31 @@ typedef struct BlockJob { /** Called when the job coroutine yields or terminates */ Notifier idle_notifier; - /** BlockDriverStates that are involved in this block job */ + /** + * BlockDriverStates that are involved in this block job. + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + */ GSList *nodes; } BlockJob; +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + /** - * block_job_next: + * block_job_next_locked: * @job: A block job, or %NULL. * * Get the next element from the list of block jobs after @job, or the * first one if @job is %NULL. * * Returns the requested job, or %NULL if there are no more jobs left. + * Called with job lock held. */ -BlockJob *block_job_next(BlockJob *job); +BlockJob *block_job_next_locked(BlockJob *job); /** * block_job_get: @@ -95,9 +120,13 @@ BlockJob *block_job_next(BlockJob *job); * Get the block job identified by @id (which must not be %NULL). * * Returns the requested job, or %NULL if it doesn't exist. + * Called with job lock *not* held. */ BlockJob *block_job_get(const char *id); +/* Same as block_job_get(), but called with job lock held. */ +BlockJob *block_job_get_locked(const char *id); + /** * block_job_add_bdrv: * @job: A block job @@ -131,32 +160,53 @@ void block_job_remove_all_bdrv(BlockJob *job); bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs); /** - * block_job_set_speed: + * block_job_set_speed_locked: * @job: The job to set the speed for. * @speed: The new value * @errp: Error object. * * Set a rate-limiting parameter for the job; the actual meaning may * vary depending on the job type. + * + * Called with job lock held, but might release it temporarily. */ -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp); /** - * block_job_query: + * block_job_query_locked: * @job: The job to get information about. * * Return information about a job. + * + * Called with job lock held. */ -BlockJobInfo *block_job_query(BlockJob *job, Error **errp); +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp); /** - * block_job_iostatus_reset: + * block_job_iostatus_reset_locked: * @job: The job whose I/O status should be reset. * * Reset I/O status on @job and on BlockDriverState objects it uses, * other than job->blk. + * + * Called with job lock held. + */ +void block_job_iostatus_reset_locked(BlockJob *job); + +/* + * block_job_get_aio_context: + * + * Returns aio context associated with a block job. + */ +AioContext *block_job_get_aio_context(BlockJob *job); + + +/* + * Common functions that are neither I/O nor Global State. + * + * See include/block/block-common.h for more information about + * the Common API. */ -void block_job_iostatus_reset(BlockJob *job); /** * block_job_is_internal: diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h index 6633d83da2..6bd9ae2b20 100644 --- a/include/block/blockjob_int.h +++ b/include/block/blockjob_int.h @@ -38,6 +38,13 @@ struct BlockJobDriver { /** Generic JobDriver callbacks and settings */ JobDriver job_driver; + /* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + /* * Returns whether the job has pending requests for the child or will * submit new requests before the next pause point. This callback is polled @@ -46,6 +53,13 @@ struct BlockJobDriver { */ bool (*drained_poll)(BlockJob *job); + /* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + /* * If the callback is not NULL, it will be invoked before the job is * resumed in a new AioContext. This is the place to move any resources @@ -56,6 +70,13 @@ struct BlockJobDriver { void (*set_speed)(BlockJob *job, int64_t speed); }; +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + /** * block_job_create: * @job_id: The id of the newly-created job, or %NULL to have one @@ -98,6 +119,13 @@ void block_job_free(Job *job); */ void block_job_user_resume(Job *job); +/* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + /** * block_job_ratelimit_get_delay: * diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 40950ae3d5..6528336c4c 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -77,7 +77,7 @@ void bdrv_dirty_bitmap_set_persistence(BdrvDirtyBitmap *bitmap, bool persistent); void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy); -void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, +bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, HBitmap **backup, Error **errp); void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip); bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset); @@ -115,6 +115,8 @@ int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, int64_t start, int64_t end, int64_t max_dirty_count, int64_t *dirty_start, int64_t *dirty_count); +bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes, int64_t *count); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, Error **errp); diff --git a/include/block/nbd.h b/include/block/nbd.h index 78d101b774..4ede3b2bd0 100644 --- a/include/block/nbd.h +++ b/include/block/nbd.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2020 Red Hat, Inc. + * Copyright (C) 2016-2022 Red Hat, Inc. * Copyright (C) 2005 Anthony Liguori * * Network Block Device @@ -344,8 +344,9 @@ void nbd_client_new(QIOChannelSocket *sioc, void nbd_client_get(NBDClient *client); void nbd_client_put(NBDClient *client); -void nbd_server_is_qemu_nbd(bool value); +void nbd_server_is_qemu_nbd(int max_connections); bool nbd_server_is_running(void); +int nbd_server_max_connections(void); void nbd_server_start(SocketAddress *addr, const char *tls_creds, const char *tls_authz, uint32_t max_connections, Error **errp); @@ -415,13 +416,14 @@ NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr, bool do_negotiation, const char *export_name, const char *x_dirty_bitmap, - QCryptoTLSCreds *tlscreds); + QCryptoTLSCreds *tlscreds, + const char *tlshostname); void nbd_client_connection_release(NBDClientConnection *conn); QIOChannel *coroutine_fn nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, bool blocking, Error **errp); -void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn); +void nbd_co_establish_connection_cancel(NBDClientConnection *conn); #endif diff --git a/include/block/nvme.h b/include/block/nvme.h index 77aae01174..8027b7126b 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -98,28 +98,28 @@ enum NvmeCapMask { #define NVME_CAP_PMRS(cap) (((cap) >> CAP_PMRS_SHIFT) & CAP_PMRS_MASK) #define NVME_CAP_CMBS(cap) (((cap) >> CAP_CMBS_SHIFT) & CAP_CMBS_MASK) -#define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \ - << CAP_MQES_SHIFT) -#define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \ - << CAP_CQR_SHIFT) -#define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \ - << CAP_AMS_SHIFT) -#define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \ - << CAP_TO_SHIFT) -#define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \ - << CAP_DSTRD_SHIFT) -#define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \ - << CAP_NSSRS_SHIFT) -#define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \ - << CAP_CSS_SHIFT) -#define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\ - << CAP_MPSMIN_SHIFT) -#define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\ - << CAP_MPSMAX_SHIFT) -#define NVME_CAP_SET_PMRS(cap, val) (cap |= (uint64_t)(val & CAP_PMRS_MASK) \ - << CAP_PMRS_SHIFT) -#define NVME_CAP_SET_CMBS(cap, val) (cap |= (uint64_t)(val & CAP_CMBS_MASK) \ - << CAP_CMBS_SHIFT) +#define NVME_CAP_SET_MQES(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_MQES_MASK) << CAP_MQES_SHIFT) +#define NVME_CAP_SET_CQR(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_CQR_MASK) << CAP_CQR_SHIFT) +#define NVME_CAP_SET_AMS(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_AMS_MASK) << CAP_AMS_SHIFT) +#define NVME_CAP_SET_TO(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_TO_MASK) << CAP_TO_SHIFT) +#define NVME_CAP_SET_DSTRD(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_DSTRD_MASK) << CAP_DSTRD_SHIFT) +#define NVME_CAP_SET_NSSRS(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_NSSRS_MASK) << CAP_NSSRS_SHIFT) +#define NVME_CAP_SET_CSS(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_CSS_MASK) << CAP_CSS_SHIFT) +#define NVME_CAP_SET_MPSMIN(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_MPSMIN_MASK) << CAP_MPSMIN_SHIFT) +#define NVME_CAP_SET_MPSMAX(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_MPSMAX_MASK) << CAP_MPSMAX_SHIFT) +#define NVME_CAP_SET_PMRS(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_PMRS_MASK) << CAP_PMRS_SHIFT) +#define NVME_CAP_SET_CMBS(cap, val) \ + ((cap) |= (uint64_t)((val) & CAP_CMBS_MASK) << CAP_CMBS_SHIFT) enum NvmeCapCss { NVME_CAP_CSS_NVM = 1 << 0, @@ -595,6 +595,8 @@ enum NvmeAdminCommands { NVME_ADM_CMD_ACTIVATE_FW = 0x10, NVME_ADM_CMD_DOWNLOAD_FW = 0x11, NVME_ADM_CMD_NS_ATTACHMENT = 0x15, + NVME_ADM_CMD_VIRT_MNGMT = 0x1c, + NVME_ADM_CMD_DBBUF_CONFIG = 0x7c, NVME_ADM_CMD_FORMAT_NVM = 0x80, NVME_ADM_CMD_SECURITY_SEND = 0x81, NVME_ADM_CMD_SECURITY_RECV = 0x82, @@ -695,7 +697,8 @@ typedef struct QEMU_PACKED NvmeRwCmd { uint8_t flags; uint16_t cid; uint32_t nsid; - uint64_t rsvd2; + uint32_t cdw2; + uint32_t cdw3; uint64_t mptr; NvmeCmdDptr dptr; uint64_t slba; @@ -731,7 +734,6 @@ enum { NVME_RW_PRINFO_PRCHK_APP = 1 << 11, NVME_RW_PRINFO_PRCHK_REF = 1 << 10, NVME_RW_PRINFO_PRCHK_MASK = 7 << 10, - }; #define NVME_RW_PRINFO(control) ((control >> 10) & 0xf) @@ -770,6 +772,7 @@ typedef struct QEMU_PACKED NvmeDsmRange { enum { NVME_COPY_FORMAT_0 = 0x0, + NVME_COPY_FORMAT_1 = 0x1, }; typedef struct QEMU_PACKED NvmeCopyCmd { @@ -777,7 +780,9 @@ typedef struct QEMU_PACKED NvmeCopyCmd { uint8_t flags; uint16_t cid; uint32_t nsid; - uint32_t rsvd2[4]; + uint32_t cdw2; + uint32_t cdw3; + uint32_t rsvd2[2]; NvmeCmdDptr dptr; uint64_t sdlba; uint8_t nr; @@ -789,7 +794,7 @@ typedef struct QEMU_PACKED NvmeCopyCmd { uint16_t appmask; } NvmeCopyCmd; -typedef struct QEMU_PACKED NvmeCopySourceRange { +typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0 { uint8_t rsvd0[8]; uint64_t slba; uint16_t nlb; @@ -797,7 +802,17 @@ typedef struct QEMU_PACKED NvmeCopySourceRange { uint32_t reftag; uint16_t apptag; uint16_t appmask; -} NvmeCopySourceRange; +} NvmeCopySourceRangeFormat0; + +typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1 { + uint8_t rsvd0[8]; + uint64_t slba; + uint16_t nlb; + uint8_t rsvd18[8]; + uint8_t sr[10]; + uint16_t apptag; + uint16_t appmask; +} NvmeCopySourceRangeFormat1; enum NvmeAsyncEventRequest { NVME_AER_TYPE_ERROR = 0, @@ -886,10 +901,16 @@ enum NvmeStatusCodes { NVME_NS_PRIVATE = 0x0119, NVME_NS_NOT_ATTACHED = 0x011a, NVME_NS_CTRL_LIST_INVALID = 0x011c, + NVME_INVALID_CTRL_ID = 0x011f, + NVME_INVALID_SEC_CTRL_STATE = 0x0120, + NVME_INVALID_NUM_RESOURCES = 0x0121, + NVME_INVALID_RESOURCE_ID = 0x0122, NVME_CONFLICTING_ATTRS = 0x0180, NVME_INVALID_PROT_INFO = 0x0181, NVME_WRITE_TO_RO = 0x0182, NVME_CMD_SIZE_LIMIT = 0x0183, + NVME_INVALID_ZONE_OP = 0x01b6, + NVME_NOZRWA = 0x01b7, NVME_ZONE_BOUNDARY_ERROR = 0x01b8, NVME_ZONE_FULL = 0x01b9, NVME_ZONE_READ_ONLY = 0x01ba, @@ -906,6 +927,7 @@ enum NvmeStatusCodes { NVME_CMP_FAILURE = 0x0285, NVME_ACCESS_DENIED = 0x0286, NVME_DULB = 0x0287, + NVME_E2E_STORAGE_TAG_ERROR = 0x0288, NVME_MORE = 0x2000, NVME_DNR = 0x4000, NVME_NO_COMPLETE = 0xffff, @@ -1017,6 +1039,8 @@ enum NvmeIdCns { NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12, NVME_ID_CNS_CTRL_LIST = 0x13, + NVME_ID_CNS_PRIMARY_CTRL_CAP = 0x14, + NVME_ID_CNS_SECONDARY_CTRL_LIST = 0x15, NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a, NVME_ID_CNS_CS_NS_PRESENT = 0x1b, NVME_ID_CNS_IO_COMMAND_SET = 0x1c, @@ -1109,11 +1133,16 @@ enum NvmeIdCtrlOaes { NVME_OAES_NS_ATTR = 1 << 8, }; +enum NvmeIdCtrlCtratt { + NVME_CTRATT_ELBAS = 1 << 15, +}; + enum NvmeIdCtrlOacs { NVME_OACS_SECURITY = 1 << 0, NVME_OACS_FORMAT = 1 << 1, NVME_OACS_FW = 1 << 2, NVME_OACS_NS_MGMT = 1 << 3, + NVME_OACS_DBBUF = 1 << 8, }; enum NvmeIdCtrlOncs { @@ -1129,7 +1158,8 @@ enum NvmeIdCtrlOncs { }; enum NvmeIdCtrlOcfs { - NVME_OCFS_COPY_FORMAT_0 = 1 << 0, + NVME_OCFS_COPY_FORMAT_0 = 1 << NVME_COPY_FORMAT_0, + NVME_OCFS_COPY_FORMAT_1 = 1 << NVME_COPY_FORMAT_1, }; enum NvmeIdctrlVwc { @@ -1154,6 +1184,11 @@ enum NvmeIdCtrlCmic { NVME_CMIC_MULTI_CTRL = 1 << 1, }; +enum NvmeNsAttachmentOperation { + NVME_NS_ATTACHMENT_ATTACH = 0x0, + NVME_NS_ATTACHMENT_DETACH = 0x1, +}; + #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf) #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf) #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf) @@ -1209,6 +1244,7 @@ enum NvmeFeatureIds { NVME_WRITE_ATOMICITY = 0xa, NVME_ASYNCHRONOUS_EVENT_CONF = 0xb, NVME_TIMESTAMP = 0xe, + NVME_HOST_BEHAVIOR_SUPPORT = 0x16, NVME_COMMAND_SET_PROFILE = 0x19, NVME_SOFTWARE_PROGRESS_MARKER = 0x80, NVME_FID_MAX = 0x100, @@ -1250,6 +1286,13 @@ typedef struct QEMU_PACKED NvmeRangeType { uint8_t rsvd48[16]; } NvmeRangeType; +typedef struct NvmeHostBehaviorSupport { + uint8_t acre; + uint8_t etdas; + uint8_t lbafee; + uint8_t rsvd3[509]; +} NvmeHostBehaviorSupport; + typedef struct QEMU_PACKED NvmeLBAF { uint16_t ms; uint8_t ds; @@ -1263,6 +1306,7 @@ typedef struct QEMU_PACKED NvmeLBAFE { } NvmeLBAFE; #define NVME_NSID_BROADCAST 0xffffffff +#define NVME_MAX_NLBAF 64 typedef struct QEMU_PACKED NvmeIdNs { uint64_t nsze; @@ -1297,11 +1341,20 @@ typedef struct QEMU_PACKED NvmeIdNs { uint8_t rsvd81[23]; uint8_t nguid[16]; uint64_t eui64; - NvmeLBAF lbaf[16]; - uint8_t rsvd192[192]; + NvmeLBAF lbaf[NVME_MAX_NLBAF]; uint8_t vs[3712]; } NvmeIdNs; +#define NVME_ID_NS_NVM_ELBAF_PIF(elbaf) (((elbaf) >> 7) & 0x3) + +typedef struct QEMU_PACKED NvmeIdNsNvm { + uint64_t lbstm; + uint8_t pic; + uint8_t rsvd9[3]; + uint32_t elbaf[NVME_MAX_NLBAF]; + uint8_t rsvd268[3828]; +} NvmeIdNsNvm; + typedef struct QEMU_PACKED NvmeIdNsDescr { uint8_t nidt; uint8_t nidl; @@ -1340,12 +1393,26 @@ typedef struct QEMU_PACKED NvmeIdNsZoned { uint32_t mor; uint32_t rrl; uint32_t frl; - uint8_t rsvd20[2796]; + uint8_t rsvd12[24]; + uint32_t numzrwa; + uint16_t zrwafg; + uint16_t zrwas; + uint8_t zrwacap; + uint8_t rsvd53[2763]; NvmeLBAFE lbafe[16]; uint8_t rsvd3072[768]; uint8_t vs[256]; } NvmeIdNsZoned; +enum NvmeIdNsZonedOzcs { + NVME_ID_NS_ZONED_OZCS_RAZB = 1 << 0, + NVME_ID_NS_ZONED_OZCS_ZRWASUP = 1 << 1, +}; + +enum NvmeIdNsZonedZrwacap { + NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP = 1 << 0, +}; + /*Deallocate Logical Block Features*/ #define NVME_ID_NS_DLFEAT_GUARD_CRC(dlfeat) ((dlfeat) & 0x10) #define NVME_ID_NS_DLFEAT_WRITE_ZEROES(dlfeat) ((dlfeat) & 0x08) @@ -1389,16 +1456,30 @@ enum NvmeIdNsMc { #define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK) -typedef struct NvmeDifTuple { - uint16_t guard; - uint16_t apptag; - uint32_t reftag; +enum NvmePIFormat { + NVME_PI_GUARD_16 = 0, + NVME_PI_GUARD_64 = 2, +}; + +typedef union NvmeDifTuple { + struct { + uint16_t guard; + uint16_t apptag; + uint32_t reftag; + } g16; + + struct { + uint64_t guard; + uint16_t apptag; + uint8_t sr[6]; + } g64; } NvmeDifTuple; enum NvmeZoneAttr { NVME_ZA_FINISHED_BY_CTLR = 1 << 0, NVME_ZA_FINISH_RECOMMENDED = 1 << 1, NVME_ZA_RESET_RECOMMENDED = 1 << 2, + NVME_ZA_ZRWA_VALID = 1 << 3, NVME_ZA_ZD_EXT_VALID = 1 << 7, }; @@ -1428,6 +1509,21 @@ enum NvmeZoneType { NVME_ZONE_TYPE_SEQ_WRITE = 0x02, }; +typedef struct QEMU_PACKED NvmeZoneSendCmd { + uint8_t opcode; + uint8_t flags; + uint16_t cid; + uint32_t nsid; + uint32_t rsvd8[4]; + NvmeCmdDptr dptr; + uint64_t slba; + uint32_t rsvd48; + uint8_t zsa; + uint8_t zsflags; + uint8_t rsvd54[2]; + uint32_t rsvd56[2]; +} NvmeZoneSendCmd; + enum NvmeZoneSendAction { NVME_ZONE_ACTION_RSD = 0x00, NVME_ZONE_ACTION_CLOSE = 0x01, @@ -1436,6 +1532,12 @@ enum NvmeZoneSendAction { NVME_ZONE_ACTION_RESET = 0x04, NVME_ZONE_ACTION_OFFLINE = 0x05, NVME_ZONE_ACTION_SET_ZD_EXT = 0x10, + NVME_ZONE_ACTION_ZRWA_FLUSH = 0x11, +}; + +enum { + NVME_ZSFLAG_SELECT_ALL = 1 << 0, + NVME_ZSFLAG_ZRWA_ALLOC = 1 << 1, }; typedef struct QEMU_PACKED NvmeZoneDescr { @@ -1460,6 +1562,61 @@ typedef enum NvmeZoneState { NVME_ZONE_STATE_OFFLINE = 0x0f, } NvmeZoneState; +typedef struct QEMU_PACKED NvmePriCtrlCap { + uint16_t cntlid; + uint16_t portid; + uint8_t crt; + uint8_t rsvd5[27]; + uint32_t vqfrt; + uint32_t vqrfa; + uint16_t vqrfap; + uint16_t vqprt; + uint16_t vqfrsm; + uint16_t vqgran; + uint8_t rsvd48[16]; + uint32_t vifrt; + uint32_t virfa; + uint16_t virfap; + uint16_t viprt; + uint16_t vifrsm; + uint16_t vigran; + uint8_t rsvd80[4016]; +} NvmePriCtrlCap; + +typedef enum NvmePriCtrlCapCrt { + NVME_CRT_VQ = 1 << 0, + NVME_CRT_VI = 1 << 1, +} NvmePriCtrlCapCrt; + +typedef struct QEMU_PACKED NvmeSecCtrlEntry { + uint16_t scid; + uint16_t pcid; + uint8_t scs; + uint8_t rsvd5[3]; + uint16_t vfn; + uint16_t nvq; + uint16_t nvi; + uint8_t rsvd14[18]; +} NvmeSecCtrlEntry; + +typedef struct QEMU_PACKED NvmeSecCtrlList { + uint8_t numcntl; + uint8_t rsvd1[31]; + NvmeSecCtrlEntry sec[127]; +} NvmeSecCtrlList; + +typedef enum NvmeVirtMngmtAction { + NVME_VIRT_MNGMT_ACTION_PRM_ALLOC = 0x01, + NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE = 0x07, + NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN = 0x08, + NVME_VIRT_MNGMT_ACTION_SEC_ONLINE = 0x09, +} NvmeVirtMngmtAction; + +typedef enum NvmeVirtualResourceType { + NVME_VIRT_RES_QUEUE = 0x00, + NVME_VIRT_RES_INTERRUPT = 0x01, +} NvmeVirtualResourceType; + static inline void _nvme_check_size(void) { QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096); @@ -1467,7 +1624,8 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8); QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16); QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16); - QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRange) != 32); + QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0) != 32); + QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1) != 40); QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64); @@ -1477,6 +1635,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeCopyCmd) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64); + QEMU_BUILD_BUG_ON(sizeof(NvmeHostBehaviorSupport) != 512); QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512); QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512); @@ -1487,10 +1646,14 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4); QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsDescr) != 4); QEMU_BUILD_BUG_ON(sizeof(NvmeZoneDescr) != 64); - QEMU_BUILD_BUG_ON(sizeof(NvmeDifTuple) != 8); + QEMU_BUILD_BUG_ON(sizeof(NvmeDifTuple) != 16); + QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32); + QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096); } #endif diff --git a/include/block/qdict.h b/include/block/qdict.h index ced2acfb92..b4c28d96a9 100644 --- a/include/block/qdict.h +++ b/include/block/qdict.h @@ -12,6 +12,9 @@ #include "qapi/qmp/qdict.h" +QObject *qdict_crumple(const QDict *src, Error **errp); +void qdict_flatten(QDict *qdict); + void qdict_copy_default(QDict *dst, QDict *src, const char *key); void qdict_set_default_str(QDict *dst, const char *key, const char *val); diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h index 251b10d273..21fc10c4c9 100644 --- a/include/block/raw-aio.h +++ b/include/block/raw-aio.h @@ -51,11 +51,13 @@ typedef struct LinuxAioState LinuxAioState; LinuxAioState *laio_init(Error **errp); void laio_cleanup(LinuxAioState *s); int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type); + uint64_t offset, QEMUIOVector *qiov, int type, + uint64_t dev_max_batch); void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context); void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); void laio_io_plug(BlockDriverState *bs, LinuxAioState *s); -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s); +void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, + uint64_t dev_max_batch); #endif /* io_uring.c - Linux io_uring implementation */ #ifdef CONFIG_LINUX_IO_URING diff --git a/include/block/reqlist.h b/include/block/reqlist.h new file mode 100644 index 0000000000..5253497bae --- /dev/null +++ b/include/block/reqlist.h @@ -0,0 +1,75 @@ +/* + * reqlist API + * + * Copyright (C) 2013 Proxmox Server Solutions + * Copyright (c) 2021 Virtuozzo International GmbH. + * + * Authors: + * Dietmar Maurer (dietmar@proxmox.com) + * Vladimir Sementsov-Ogievskiy + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef REQLIST_H +#define REQLIST_H + +#include "qemu/coroutine.h" + +/* + * The API is not thread-safe and shouldn't be. The struct is public to be part + * of other structures and protected by third-party locks, see + * block/block-copy.c for example. + */ + +typedef struct BlockReq { + int64_t offset; + int64_t bytes; + + CoQueue wait_queue; /* coroutines blocked on this req */ + QLIST_ENTRY(BlockReq) list; +} BlockReq; + +typedef QLIST_HEAD(, BlockReq) BlockReqList; + +/* + * Initialize new request and add it to the list. Caller must be sure that + * there are no conflicting requests in the list. + */ +void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset, + int64_t bytes); +/* Search for request in the list intersecting with @offset/@bytes area. */ +BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset, + int64_t bytes); + +/* + * If there are no intersecting requests return false. Otherwise, wait for the + * first found intersecting request to finish and return true. + * + * @lock is passed to qemu_co_queue_wait() + * False return value proves that lock was released at no point. + */ +bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset, + int64_t bytes, CoMutex *lock); + +/* + * Wait for all intersecting requests. It just calls reqlist_wait_one() in a + * loop, caller is responsible to stop producing new requests in this region + * in parallel, otherwise reqlist_wait_all() may never return. + */ +void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset, + int64_t bytes, CoMutex *lock); + +/* + * Shrink request and wake all waiting coroutines (maybe some of them are not + * intersecting with shrunk request). + */ +void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes); + +/* + * Remove request and wake all waiting coroutines. Do not release any memory. + */ +void coroutine_fn reqlist_remove_req(BlockReq *req); + +#endif /* REQLIST_H */ diff --git a/include/block/snapshot.h b/include/block/snapshot.h index 940345692f..50ff924710 100644 --- a/include/block/snapshot.h +++ b/include/block/snapshot.h @@ -45,6 +45,13 @@ typedef struct QEMUSnapshotInfo { uint64_t icount; /* record/replay step */ } QEMUSnapshotInfo; +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info, const char *name); bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs, @@ -73,9 +80,11 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs, Error **errp); -/* Group operations. All block drivers are involved. +/* + * Group operations. All block drivers are involved. * These functions will properly handle dataplane (take aio_context_acquire - * when appropriate for appropriate block drivers */ + * when appropriate for appropriate block drivers + */ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, Error **errp); diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h index 7dd7d730a0..2020bcc92d 100644 --- a/include/block/thread-pool.h +++ b/include/block/thread-pool.h @@ -20,6 +20,8 @@ #include "block/block.h" +#define THREAD_POOL_MAX_THREADS_DEFAULT 64 + typedef int ThreadPoolFunc(void *opaque); typedef struct ThreadPool ThreadPool; @@ -33,5 +35,6 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool, int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func, void *arg); void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg); +void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx); #endif diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h index 867ef1b3b2..8c420fa36e 100644 --- a/include/chardev/char-fe.h +++ b/include/chardev/char-fe.h @@ -172,7 +172,7 @@ void qemu_chr_fe_set_open(CharBackend *be, int fe_open); * Chardev. */ void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); typedef gboolean (*FEWatchFunc)(void *do_not_use, GIOCondition condition, void *data); diff --git a/include/chardev/char-socket.h b/include/chardev/char-socket.h new file mode 100644 index 0000000000..0708ca6fa9 --- /dev/null +++ b/include/chardev/char-socket.h @@ -0,0 +1,87 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef CHAR_SOCKET_H +#define CHAR_SOCKET_H + +#include "io/channel-socket.h" +#include "io/channel-tls.h" +#include "io/net-listener.h" +#include "chardev/char.h" +#include "qom/object.h" + +#define TCP_MAX_FDS 16 + +typedef struct { + char buf[21]; + size_t buflen; +} TCPChardevTelnetInit; + +typedef enum { + TCP_CHARDEV_STATE_DISCONNECTED, + TCP_CHARDEV_STATE_CONNECTING, + TCP_CHARDEV_STATE_CONNECTED, +} TCPChardevState; + +typedef ChardevClass SocketChardevClass; + +struct SocketChardev { + Chardev parent; + QIOChannel *ioc; /* Client I/O channel */ + QIOChannelSocket *sioc; /* Client master channel */ + QIONetListener *listener; + GSource *hup_source; + QCryptoTLSCreds *tls_creds; + char *tls_authz; + TCPChardevState state; + int max_size; + int do_telnetopt; + int do_nodelay; + int *read_msgfds; + size_t read_msgfds_num; + int *write_msgfds; + size_t write_msgfds_num; + bool registered_yank; + + SocketAddress *addr; + bool is_listen; + bool is_telnet; + bool is_tn3270; + GSource *telnet_source; + TCPChardevTelnetInit *telnet_init; + + bool is_websock; + + GSource *reconnect_timer; + int64_t reconnect_time; + bool connect_err_reported; + + QIOTask *connect_task; +}; +typedef struct SocketChardev SocketChardev; + +DECLARE_INSTANCE_CHECKER(SocketChardev, SOCKET_CHARDEV, + TYPE_CHARDEV_SOCKET) + +#endif /* CHAR_SOCKET_H */ diff --git a/include/chardev/char.h b/include/chardev/char.h index 7c0444f90d..44cd82e405 100644 --- a/include/chardev/char.h +++ b/include/chardev/char.h @@ -186,7 +186,7 @@ int qemu_chr_be_can_write(Chardev *s); * the caller should call @qemu_chr_be_can_write to determine how much data * the front end can currently accept. */ -void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len); +void qemu_chr_be_write(Chardev *s, const uint8_t *buf, int len); /** * qemu_chr_be_write_impl: @@ -195,7 +195,7 @@ void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len); * * Implementation of back end writing. Used by replay module. */ -void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len); +void qemu_chr_be_write_impl(Chardev *s, const uint8_t *buf, int len); /** * qemu_chr_be_update_read_handlers: @@ -254,26 +254,58 @@ struct ChardevClass { bool internal; /* TODO: eventually use TYPE_USER_CREATABLE */ bool supports_yank; + + /* parse command line options and populate QAPI @backend */ void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp); + /* called after construction, open/starts the backend */ void (*open)(Chardev *chr, ChardevBackend *backend, bool *be_opened, Error **errp); + /* write buf to the backend */ int (*chr_write)(Chardev *s, const uint8_t *buf, int len); + + /* + * Read from the backend (blocking). A typical front-end will instead rely + * on chr_can_read/chr_read being called when polling/looping. + */ int (*chr_sync_read)(Chardev *s, const uint8_t *buf, int len); + + /* create a watch on the backend */ GSource *(*chr_add_watch)(Chardev *s, GIOCondition cond); + + /* update the backend internal sources */ void (*chr_update_read_handler)(Chardev *s); + + /* send an ioctl to the backend */ int (*chr_ioctl)(Chardev *s, int cmd, void *arg); + + /* get ancillary-received fds during last read */ int (*get_msgfds)(Chardev *s, int* fds, int num); + + /* set ancillary fds to be sent with next write */ int (*set_msgfds)(Chardev *s, int *fds, int num); + + /* accept the given fd */ int (*chr_add_client)(Chardev *chr, int fd); + + /* wait for a connection */ int (*chr_wait_connected)(Chardev *chr, Error **errp); + + /* disconnect a connection */ void (*chr_disconnect)(Chardev *chr); + + /* called by frontend when it can read */ void (*chr_accept_input)(Chardev *chr); + + /* set terminal echo */ void (*chr_set_echo)(Chardev *chr, bool echo); + + /* notify the backend of frontend open state */ void (*chr_set_fe_open)(Chardev *chr, int fe_open); + + /* handle various events */ void (*chr_be_event)(Chardev *s, QEMUChrEvent event); - void (*chr_options_parsed)(Chardev *chr); }; Chardev *qemu_chardev_new(const char *id, const char *typename, diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 0000000000..214e58ca47 --- /dev/null +++ b/include/crypto/akcipher.h @@ -0,0 +1,179 @@ +/* + * QEMU Crypto asymmetric algorithms + * + * Copyright (c) 2022 Bytedance + * Author: zhenwei pi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QCRYPTO_AKCIPHER_H +#define QCRYPTO_AKCIPHER_H + +#include "qapi/qapi-types-crypto.h" + +typedef struct QCryptoAkCipher QCryptoAkCipher; + +/** + * qcrypto_akcipher_supports: + * @opts: the asymmetric key algorithm and related options + * + * Determine if asymmetric key cipher decribed with @opts is + * supported by the current configured build + * + * Returns: true if it is supported, false otherwise. + */ +bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts); + +/** + * qcrypto_akcipher_new: + * @opts: specify the algorithm and the related arguments + * @type: private or public key type + * @key: buffer to store the key + * @key_len: the length of key buffer + * @errp: error pointer + * + * Create akcipher context + * + * Returns: On success, a new QCryptoAkCipher initialized with @opt + * is created and returned, otherwise NULL is returned. + */ + +QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, + QCryptoAkCipherKeyType type, + const uint8_t *key, size_t key_len, + Error **errp); + +/** + * qcrypto_akcipher_encrypt: + * @akcipher: akcipher context + * @in: plaintext pending to be encrypted + * @in_len: length of plaintext, less or equal to the size reported + * by a call to qcrypto_akcipher_max_plaintext_len() + * @out: buffer to store the ciphertext + * @out_len: length of ciphertext, less or equal to the size reported + * by a call to qcrypto_akcipher_max_ciphertext_len() + * @errp: error pointer + * + * Encrypt @in and write ciphertext into @out + * + * Returns: length of ciphertext if encrypt succeed, + * otherwise -1 is returned + */ +int qcrypto_akcipher_encrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp); + +/** + * qcrypto_akcipher_decrypt: + * @akcipher: akcipher context + * @in: ciphertext to be decrypted + * @in_len: the length of ciphertext, less or equal to the size reported + * by a call to qcrypto_akcipher_max_ciphertext_len() + * @out: buffer to store the plaintext + * @out_len: length of the plaintext buffer, less or equal to the size + * reported by a call to qcrypto_akcipher_max_plaintext_len() + * @errp: error pointer + * + * Decrypt @in and write plaintext into @out + * + * Returns: length of plaintext if decrypt succeed, + * otherwise -1 is returned + */ +int qcrypto_akcipher_decrypt(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp); + +/** + * qcrypto_akcipher_sign: + * @akcipher: akcipher context + * @in: data to be signed + * @in_len: the length of data, less or equal to the size reported + * by a call to qcrypto_akcipher_max_dgst_len() + * @out: buffer to store the signature + * @out_len: length of the signature buffer, less or equal to the size + * by a call to qcrypto_akcipher_max_signature_len() + * @errp: error pointer + * + * Generate signature for @in, write into @out + * + * Returns: length of signature if succeed, + * otherwise -1 is returned + */ +int qcrypto_akcipher_sign(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + void *out, size_t out_len, Error **errp); + +/** + * qcrypto_akcipher_verify: + * @akcipher: akcipher context + * @in: pointer to the signature + * @in_len: length of signature, ess or equal to the size reported + * by a call to qcrypto_akcipher_max_signature_len() + * @in2: pointer to original data + * @in2_len: the length of original data, less or equal to the size + * by a call to qcrypto_akcipher_max_dgst_len() + * @errp: error pointer + * + * Verify @in and @in2 match or not + * + * Returns: 0 for succeed, + * otherwise -1 is returned + */ +int qcrypto_akcipher_verify(QCryptoAkCipher *akcipher, + const void *in, size_t in_len, + const void *in2, size_t in2_len, Error **errp); + +int qcrypto_akcipher_max_plaintext_len(QCryptoAkCipher *akcipher); + +int qcrypto_akcipher_max_ciphertext_len(QCryptoAkCipher *akcipher); + +int qcrypto_akcipher_max_signature_len(QCryptoAkCipher *akcipher); + +int qcrypto_akcipher_max_dgst_len(QCryptoAkCipher *akcipher); + +/** + * qcrypto_akcipher_free: + * @akcipher: akcipher context + * + * Free the akcipher context + * + */ +void qcrypto_akcipher_free(QCryptoAkCipher *akcipher); + +/** + * qcrypto_akcipher_export_p8info: + * @opts: the options of the akcipher to be exported. + * @key: the original key of the akcipher to be exported. + * @keylen: length of the 'key' + * @dst: output parameter, if export succeed, *dst is set to the + * PKCS#8 encoded private key, caller MUST free this key with + * g_free after use. + * @dst_len: output parameter, indicates the length of PKCS#8 encoded + * key. + * + * Export the akcipher into DER encoded pkcs#8 private key info, expects + * |key| stores a valid asymmetric PRIVATE key. + * + * Returns: 0 for succeed, otherwise -1 is returned. + */ +int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts, + uint8_t *key, size_t keylen, + uint8_t **dst, size_t *dst_len, + Error **errp); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipher, qcrypto_akcipher_free) + +#endif /* QCRYPTO_AKCIPHER_H */ diff --git a/include/crypto/block.h b/include/crypto/block.h index 7a65e8e402..4f63a37872 100644 --- a/include/crypto/block.h +++ b/include/crypto/block.h @@ -29,24 +29,24 @@ typedef struct QCryptoBlock QCryptoBlock; /* See also QCryptoBlockFormat, QCryptoBlockCreateOptions * and QCryptoBlockOpenOptions in qapi/crypto.json */ -typedef ssize_t (*QCryptoBlockReadFunc)(QCryptoBlock *block, - size_t offset, - uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp); +typedef int (*QCryptoBlockReadFunc)(QCryptoBlock *block, + size_t offset, + uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp); -typedef ssize_t (*QCryptoBlockInitFunc)(QCryptoBlock *block, - size_t headerlen, - void *opaque, - Error **errp); +typedef int (*QCryptoBlockInitFunc)(QCryptoBlock *block, + size_t headerlen, + void *opaque, + Error **errp); -typedef ssize_t (*QCryptoBlockWriteFunc)(QCryptoBlock *block, - size_t offset, - const uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp); +typedef int (*QCryptoBlockWriteFunc)(QCryptoBlock *block, + size_t offset, + const uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp); /** * qcrypto_block_has_format: diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h new file mode 100644 index 0000000000..9bd3ebc62e --- /dev/null +++ b/include/crypto/sm4.h @@ -0,0 +1,6 @@ +#ifndef QEMU_SM4_H +#define QEMU_SM4_H + +extern const uint8_t sm4_sbox[256]; + +#endif diff --git a/include/crypto/tls-cipher-suites.h b/include/crypto/tls-cipher-suites.h index 7eb1b76122..3bd2003f32 100644 --- a/include/crypto/tls-cipher-suites.h +++ b/include/crypto/tls-cipher-suites.h @@ -8,8 +8,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef QCRYPTO_TLSCIPHERSUITES_H -#define QCRYPTO_TLSCIPHERSUITES_H +#ifndef QCRYPTO_TLS_CIPHER_SUITES_H +#define QCRYPTO_TLS_CIPHER_SUITES_H #include "qom/object.h" #include "crypto/tlscreds.h" @@ -31,4 +31,4 @@ DECLARE_INSTANCE_CHECKER(QCryptoTLSCipherSuites, QCRYPTO_TLS_CIPHER_SUITES, GByteArray *qcrypto_tls_cipher_suites_get_data(QCryptoTLSCipherSuites *obj, Error **errp); -#endif /* QCRYPTO_TLSCIPHERSUITES_H */ +#endif /* QCRYPTO_TLS_CIPHER_SUITES_H */ diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index 524f29196d..64247ecb11 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -253,6 +253,7 @@ enum bfd_architecture #define bfd_mach_rx 0x75 #define bfd_mach_rx_v2 0x76 #define bfd_mach_rx_v3 0x77 + bfd_arch_loongarch, bfd_arch_last }; #define bfd_mach_s390_31 31 @@ -269,7 +270,7 @@ typedef struct symbol_cache_entry } asymbol; typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); enum dis_insn_type { dis_noninsn, /* Not a valid instruction */ @@ -418,7 +419,6 @@ int print_insn_tci(bfd_vma, disassemble_info*); int print_insn_big_mips (bfd_vma, disassemble_info*); int print_insn_little_mips (bfd_vma, disassemble_info*); int print_insn_nanomips (bfd_vma, disassemble_info*); -int print_insn_i386 (bfd_vma, disassemble_info*); int print_insn_m68k (bfd_vma, disassemble_info*); int print_insn_z8001 (bfd_vma, disassemble_info*); int print_insn_z8002 (bfd_vma, disassemble_info*); @@ -429,7 +429,6 @@ int print_insn_h8500 (bfd_vma, disassemble_info*); int print_insn_arm_a64 (bfd_vma, disassemble_info*); int print_insn_alpha (bfd_vma, disassemble_info*); disassembler_ftype arc_get_disassembler (int, int); -int print_insn_arm (bfd_vma, disassemble_info*); int print_insn_sparc (bfd_vma, disassemble_info*); int print_insn_big_a29k (bfd_vma, disassemble_info*); int print_insn_little_a29k (bfd_vma, disassemble_info*); @@ -449,19 +448,18 @@ int print_insn_w65 (bfd_vma, disassemble_info*); int print_insn_d10v (bfd_vma, disassemble_info*); int print_insn_v850 (bfd_vma, disassemble_info*); int print_insn_tic30 (bfd_vma, disassemble_info*); -int print_insn_ppc (bfd_vma, disassemble_info*); -int print_insn_s390 (bfd_vma, disassemble_info*); int print_insn_crisv32 (bfd_vma, disassemble_info*); int print_insn_crisv10 (bfd_vma, disassemble_info*); int print_insn_microblaze (bfd_vma, disassemble_info*); int print_insn_ia64 (bfd_vma, disassemble_info*); -int print_insn_big_nios2 (bfd_vma, disassemble_info*); -int print_insn_little_nios2 (bfd_vma, disassemble_info*); +int print_insn_nios2(bfd_vma, disassemble_info*); int print_insn_xtensa (bfd_vma, disassemble_info*); int print_insn_riscv32 (bfd_vma, disassemble_info*); int print_insn_riscv64 (bfd_vma, disassemble_info*); +int print_insn_riscv128 (bfd_vma, disassemble_info*); int print_insn_rx(bfd_vma, disassemble_info *); int print_insn_hexagon(bfd_vma, disassemble_info *); +int print_insn_loongarch(bfd_vma, disassemble_info *); #ifdef CONFIG_CAPSTONE bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size); diff --git a/include/elf.h b/include/elf.h index 811bf4a1cb..8bf1e72720 100644 --- a/include/elf.h +++ b/include/elf.h @@ -31,6 +31,7 @@ typedef int64_t Elf64_Sxword; #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff +#define PT_GNU_STACK (PT_LOOS + 0x474e551) #define PT_GNU_PROPERTY (PT_LOOS + 0x474e553) #define PT_MIPS_REGINFO 0x70000000 @@ -182,6 +183,8 @@ typedef struct mips_elf_abiflags_v0 { #define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ +#define EM_LOONGARCH 258 /* LoongArch */ + /* * This is an interim value that we will use until the committee comes * up with a final number. @@ -1647,6 +1650,8 @@ typedef struct elf64_shdr { #define NT_TASKSTRUCT 4 #define NT_AUXV 6 #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ +#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */ +#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */ #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ #define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ #define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */ diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 32cfb634c6..2eb1176538 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -34,13 +34,13 @@ /* some important defines: * - * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and + * HOST_BIG_ENDIAN : whether the host cpu is big endian and * otherwise little endian. * - * TARGET_WORDS_BIGENDIAN : same for target cpu + * TARGET_BIG_ENDIAN : same for the target cpu */ -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN #define BSWAP_NEEDED #endif @@ -120,7 +120,7 @@ static inline void tswap64s(uint64_t *s) /* Target-endianness CPU memory access functions. These fit into the * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. */ -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN #define lduw_p(p) lduw_be_p(p) #define ldsw_p(p) ldsw_be_p(p) #define ldl_p(p) ldl_be_p(p) @@ -234,15 +234,6 @@ extern const TargetPageBits target_page; #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) -/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even - * when intptr_t is 32-bit and we are aligning a long long. - */ -extern uintptr_t qemu_host_page_size; -extern intptr_t qemu_host_page_mask; - -#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) -#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size) - /* same as PROT_xxx */ #define PAGE_READ 0x0001 #define PAGE_WRITE 0x0002 @@ -271,6 +262,12 @@ extern intptr_t qemu_host_page_mask; #define PAGE_TARGET_1 0x0200 #define PAGE_TARGET_2 0x0400 +/* + * For linux-user, indicates that the page is mapped with the same semantics + * in both guest and host. + */ +#define PAGE_PASSTHROUGH 0x0800 + #if defined(CONFIG_USER_ONLY) void page_dump(FILE *f); @@ -280,31 +277,22 @@ int walk_memory_regions(void *, walk_memory_regions_fn); int page_get_flags(target_ulong address); void page_set_flags(target_ulong start, target_ulong end, int flags); +void page_reset_target_data(target_ulong start, target_ulong end); int page_check_range(target_ulong start, target_ulong len, int flags); -/** - * page_alloc_target_data(address, size) - * @address: guest virtual address - * @size: size of data to allocate - * - * Allocate @size bytes of out-of-band data to associate with the - * guest page at @address. If the page is not mapped, NULL will - * be returned. If there is existing data associated with @address, - * no new memory will be allocated. - * - * The memory will be freed when the guest page is deallocated, - * e.g. with the munmap system call. - */ -void *page_alloc_target_data(target_ulong address, size_t size); - /** * page_get_target_data(address) * @address: guest virtual address * - * Return any out-of-bound memory assocated with the guest page - * at @address, as per page_alloc_target_data. + * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate + * with the guest page at @address, allocating it if necessary. The + * caller should already have verified that the address is valid. + * + * The memory will be freed when the guest page is deallocated, + * e.g. with the munmap system call. */ -void *page_get_target_data(target_ulong address); +void *page_get_target_data(target_ulong address) + __attribute__((returns_nonnull)); #endif CPUArchState *cpu_copy(CPUArchState *env); @@ -428,25 +416,16 @@ static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr) } #ifdef CONFIG_TCG -/* accel/tcg/cpu-exec.c */ -void dump_drift_info(void); /* accel/tcg/translate-all.c */ -void dump_exec_info(void); -void dump_opcount_info(void); +void dump_exec_info(GString *buf); #endif /* CONFIG_TCG */ #endif /* !CONFIG_USER_ONLY */ -#ifdef CONFIG_TCG /* accel/tcg/cpu-exec.c */ int cpu_exec(CPUState *cpu); void tcg_exec_realizefn(CPUState *cpu, Error **errp); void tcg_exec_unrealizefn(CPUState *cpu); -#endif /* CONFIG_TCG */ - -/* Returns: 0 on success, -1 on error */ -int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - void *ptr, target_ulong len, bool is_write); /** * cpu_set_cpustate_pointers(cpu) diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 039d422bf4..6feaa40ca7 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -7,12 +7,38 @@ #include "exec/hwaddr.h" #endif +/** + * vaddr: + * Type wide enough to contain any #target_ulong virtual address. + */ +typedef uint64_t vaddr; +#define VADDR_PRId PRId64 +#define VADDR_PRIu PRIu64 +#define VADDR_PRIo PRIo64 +#define VADDR_PRIx PRIx64 +#define VADDR_PRIX PRIX64 +#define VADDR_MAX UINT64_MAX + +void cpu_exec_init_all(void); +void cpu_exec_step_atomic(CPUState *cpu); + +/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even + * when intptr_t is 32-bit and we are aligning a long long. + */ +extern uintptr_t qemu_host_page_size; +extern intptr_t qemu_host_page_mask; + +#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) +#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) + /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ void qemu_init_cpu_list(void); void cpu_list_lock(void); void cpu_list_unlock(void); +unsigned int cpu_list_generation_id_get(void); void tcg_flush_softmmu_tlb(CPUState *cs); +void tcg_flush_jmp_cache(CPUState *cs); void tcg_iommu_init_notifier_list(CPUState *cpu); void tcg_iommu_free_notifier_list(CPUState *cpu); @@ -25,7 +51,7 @@ enum device_endian { DEVICE_LITTLE_ENDIAN, }; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN #else #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN @@ -47,6 +73,7 @@ typedef uintptr_t ram_addr_t; void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); /* This should not be used by devices. */ ram_addr_t qemu_ram_addr_from_host(void *ptr); +ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); RAMBlock *qemu_ram_block_by_name(const char *name); RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, ram_addr_t *offset); @@ -65,10 +92,33 @@ void qemu_ram_set_uf_zeroable(RAMBlock *rb); bool qemu_ram_is_migratable(RAMBlock *rb); void qemu_ram_set_migratable(RAMBlock *rb); void qemu_ram_unset_migratable(RAMBlock *rb); +int qemu_ram_get_fd(RAMBlock *rb); size_t qemu_ram_pagesize(RAMBlock *block); size_t qemu_ram_pagesize_largest(void); +/** + * cpu_address_space_init: + * @cpu: CPU to add this address space to + * @asidx: integer index of this address space + * @prefix: prefix to be used as name of address space + * @mr: the root memory region of address space + * + * Add the specified address space to the CPU's cpu_ases list. + * The address space added with @asidx 0 is the one used for the + * convenience pointer cpu->as. + * The target-specific code which registers ASes is responsible + * for defining what semantics address space 0, 1, 2, etc have. + * + * Before the first call to this function, the caller must set + * cpu->num_ases to the total number of address spaces it needs + * to support. + * + * Note that with KVM only one address space is supported. + */ +void cpu_address_space_init(CPUState *cpu, int asidx, + const char *prefix, MemoryRegion *mr); + void cpu_physical_memory_rw(hwaddr addr, void *buf, hwaddr len, bool is_write); static inline void cpu_physical_memory_read(hwaddr addr, @@ -81,6 +131,7 @@ static inline void cpu_physical_memory_write(hwaddr addr, { cpu_physical_memory_rw(addr, (void *)buf, len, true); } +void cpu_reloading_memory_map(void); void *cpu_physical_memory_map(hwaddr addr, hwaddr *plen, bool is_write); @@ -107,7 +158,13 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); #endif +/* Returns: 0 on success, -1 on error */ +int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, + void *ptr, size_t len, bool is_write); + /* vl.c */ extern int singlestep; +void list_cpus(const char *optarg); + #endif /* CPU_COMMON_H */ diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index ba3cd32a1e..21309cf567 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -54,6 +54,9 @@ # error TARGET_PAGE_BITS must be defined in cpu-param.h # endif #endif +#ifndef TARGET_TB_PCREL +# define TARGET_TB_PCREL 0 +#endif #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) @@ -108,6 +111,7 @@ typedef uint64_t target_ulong; # endif # endif +/* Minimalized TLB entry for use by TCG fast path. */ typedef struct CPUTLBEntry { /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not @@ -131,14 +135,14 @@ typedef struct CPUTLBEntry { QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); -/* The IOTLB is not accessed directly inline by generated TCG code, - * so the CPUIOTLBEntry layout is not as critical as that of the - * CPUTLBEntry. (This is also why we don't want to combine the two - * structs into one.) +/* + * The full TLB entry, which is not accessed by generated TCG code, + * so the layout is not as critical as that of CPUTLBEntry. This is + * also why we don't want to combine the two structs. */ -typedef struct CPUIOTLBEntry { +typedef struct CPUTLBEntryFull { /* - * @addr contains: + * @xlat_section contains: * - in the lower TARGET_PAGE_BITS, a physical section number * - with the lower TARGET_PAGE_BITS masked off, an offset which * must be added to the virtual address to obtain: @@ -146,9 +150,32 @@ typedef struct CPUIOTLBEntry { * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) * + the offset within the target MemoryRegion (otherwise) */ - hwaddr addr; + hwaddr xlat_section; + + /* + * @phys_addr contains the physical address in the address space + * given by cpu_asidx_from_attrs(cpu, @attrs). + */ + hwaddr phys_addr; + + /* @attrs contains the memory transaction attributes for the page. */ MemTxAttrs attrs; -} CPUIOTLBEntry; + + /* @prot contains the complete protections for the page. */ + uint8_t prot; + + /* @lg_page_size contains the log2 of the page size. */ + uint8_t lg_page_size; + + /* + * Allow target-specific additions to this structure. + * This may be used to cache items from the guest cpu + * page tables for later use by the implementation. + */ +#ifdef TARGET_PAGE_ENTRY_EXTRA + TARGET_PAGE_ENTRY_EXTRA +#endif +} CPUTLBEntryFull; /* * Data elements that are per MMU mode, minus the bits accessed by @@ -172,9 +199,8 @@ typedef struct CPUTLBDesc { size_t vindex; /* The tlb victim table, in two parts. */ CPUTLBEntry vtable[CPU_VTLB_SIZE]; - CPUIOTLBEntry viotlb[CPU_VTLB_SIZE]; - /* The iotlb. */ - CPUIOTLBEntry *iotlb; + CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; + CPUTLBEntryFull *fulltlb; } CPUTLBDesc; /* diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 44a4e070ec..a32129cea8 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -28,10 +28,12 @@ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) + * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) * * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) + * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) * * sign is: * (empty): for 32 and 64 bit sizes @@ -53,10 +55,17 @@ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies * the index to use; the "data" and "code" suffixes take the index from * cpu_mmu_index(). + * + * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the + * MemOp including alignment requirements. The alignment will be enforced. */ #ifndef CPU_LDST_H #define CPU_LDST_H +#include "exec/memopidx.h" +#include "qemu/int128.h" +#include "cpu.h" + #if defined(CONFIG_USER_ONLY) /* sparc32plus has 64bit long but 32bit space address * this can make bad result with g2h() and h2g() @@ -113,17 +122,15 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) }) #else typedef target_ulong abi_ptr; -#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx +#define TARGET_ABI_FMT_ptr TARGET_FMT_lx #endif uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); - uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); - uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); @@ -131,37 +138,31 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); - uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); - uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); - void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); - void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); - void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, uintptr_t ra); - void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, @@ -169,6 +170,157 @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, uintptr_t ra); +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, + int mmu_idx, uintptr_t ra); +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, + int mmu_idx, uintptr_t ra); + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); + +void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); + +uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); + +#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ +TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ + (CPUArchState *env, target_ulong addr, TYPE val, \ + MemOpIdx oi, uintptr_t retaddr); + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) +#else +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) +#endif + +GEN_ATOMIC_HELPER_ALL(fetch_add) +GEN_ATOMIC_HELPER_ALL(fetch_sub) +GEN_ATOMIC_HELPER_ALL(fetch_and) +GEN_ATOMIC_HELPER_ALL(fetch_or) +GEN_ATOMIC_HELPER_ALL(fetch_xor) +GEN_ATOMIC_HELPER_ALL(fetch_smin) +GEN_ATOMIC_HELPER_ALL(fetch_umin) +GEN_ATOMIC_HELPER_ALL(fetch_smax) +GEN_ATOMIC_HELPER_ALL(fetch_umax) + +GEN_ATOMIC_HELPER_ALL(add_fetch) +GEN_ATOMIC_HELPER_ALL(sub_fetch) +GEN_ATOMIC_HELPER_ALL(and_fetch) +GEN_ATOMIC_HELPER_ALL(or_fetch) +GEN_ATOMIC_HELPER_ALL(xor_fetch) +GEN_ATOMIC_HELPER_ALL(smin_fetch) +GEN_ATOMIC_HELPER_ALL(umin_fetch) +GEN_ATOMIC_HELPER_ALL(smax_fetch) +GEN_ATOMIC_HELPER_ALL(umax_fetch) + +GEN_ATOMIC_HELPER_ALL(xchg) + +#undef GEN_ATOMIC_HELPER_ALL +#undef GEN_ATOMIC_HELPER + +Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); + +Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); + #if defined(CONFIG_USER_ONLY) extern __thread uintptr_t helper_retaddr; @@ -193,119 +345,6 @@ static inline void clear_helper_retaddr(void) helper_retaddr = 0; } -/* - * Provide the same *_mmuidx_ra interface as for softmmu. - * The mmu_idx argument is ignored. - */ - -static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldub_data_ra(env, addr, ra); -} - -static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsb_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_lduw_be_data_ra(env, addr, ra); -} - -static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsw_be_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldl_be_data_ra(env, addr, ra); -} - -static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldq_be_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_lduw_le_data_ra(env, addr, ra); -} - -static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsw_le_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldl_le_data_ra(env, addr, ra); -} - -static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldq_le_data_ra(env, addr, ra); -} - -static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, uintptr_t ra) -{ - cpu_stb_data_ra(env, addr, val, ra); -} - -static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stw_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stl_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stq_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stw_le_data_ra(env, addr, val, ra); -} - -static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stl_le_data_ra(env, addr, val, ra); -} - -static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stq_le_data_ra(env, addr, val, ra); -} - #else /* Needed for TCG_OVERSIZED_GUEST */ @@ -336,49 +375,9 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; } -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); - -void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); - -void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); - #endif /* defined(CONFIG_USER_ONLY) */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN # define cpu_lduw_data cpu_lduw_be_data # define cpu_ldsw_data cpu_ldsw_be_data # define cpu_ldl_data cpu_ldl_be_data @@ -391,6 +390,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra +# define cpu_ldw_mmu cpu_ldw_be_mmu +# define cpu_ldl_mmu cpu_ldl_be_mmu +# define cpu_ldq_mmu cpu_ldq_be_mmu # define cpu_stw_data cpu_stw_be_data # define cpu_stl_data cpu_stl_be_data # define cpu_stq_data cpu_stq_be_data @@ -400,6 +402,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra +# define cpu_stw_mmu cpu_stw_be_mmu +# define cpu_stl_mmu cpu_stl_be_mmu +# define cpu_stq_mmu cpu_stq_be_mmu #else # define cpu_lduw_data cpu_lduw_le_data # define cpu_ldsw_data cpu_ldsw_le_data @@ -413,6 +418,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra +# define cpu_ldw_mmu cpu_ldw_le_mmu +# define cpu_ldl_mmu cpu_ldl_le_mmu +# define cpu_ldq_mmu cpu_ldq_le_mmu # define cpu_stw_data cpu_stw_le_data # define cpu_stl_data cpu_stl_le_data # define cpu_stq_data cpu_stq_le_data @@ -422,6 +430,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra +# define cpu_stw_mmu cpu_stw_le_mmu +# define cpu_stl_mmu cpu_stl_le_mmu +# define cpu_stq_mmu cpu_stq_le_mmu #endif uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 1c2aff028a..1071c25458 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -24,7 +24,6 @@ #ifdef CONFIG_TCG #include "exec/cpu_ldst.h" #endif -#include "sysemu/cpu-timers.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ #define DEBUG_DISAS @@ -40,31 +39,35 @@ typedef ram_addr_t tb_page_addr_t; #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT #endif -#include "qemu/log.h" - -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); -void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, - target_ulong *data); +/** + * cpu_unwind_state_data: + * @cpu: the cpu context + * @host_pc: the host pc within the translation + * @data: output data + * + * Attempt to load the the unwind state for a host pc occurring in + * translated code. If @host_pc is not in translated code, the + * function returns false; otherwise @data is loaded. + * This is the same unwind info as given to restore_state_to_opc. + */ +bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); /** * cpu_restore_state: - * @cpu: the vCPU state is to be restore to - * @searched_pc: the host PC the fault occurred at - * @will_exit: true if the TB executed will be interrupted after some - cpu adjustments. Required for maintaining the correct - icount valus + * @cpu: the cpu context + * @host_pc: the host pc within the translation * @return: true if state was restored, false otherwise * * Attempt to restore the state for a fault occurring in translated - * code. If the searched_pc is not in translated code no state is + * code. If @host_pc is not in translated code no state is * restored and the function returns false. */ -bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); -void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); -void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); -void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); -void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); +G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); +G_NORETURN void cpu_loop_exit(CPUState *cpu); +G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); +G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); /** * cpu_loop_exit_requested: @@ -83,31 +86,6 @@ static inline bool cpu_loop_exit_requested(CPUState *cpu) return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; } -#if !defined(CONFIG_USER_ONLY) -void cpu_reloading_memory_map(void); -/** - * cpu_address_space_init: - * @cpu: CPU to add this address space to - * @asidx: integer index of this address space - * @prefix: prefix to be used as name of address space - * @mr: the root memory region of address space - * - * Add the specified address space to the CPU's cpu_ases list. - * The address space added with @asidx 0 is the one used for the - * convenience pointer cpu->as. - * The target-specific code which registers ASes is responsible - * for defining what semantics address space 0, 1, 2, etc have. - * - * Before the first call to this function, the caller must set - * cpu->num_ases to the total number of address spaces it needs - * to support. - * - * Note that with KVM only one address space is supported. - */ -void cpu_address_space_init(CPUState *cpu, int asidx, - const char *prefix, MemoryRegion *mr); -#endif - #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) /* cputlb.c */ /** @@ -286,6 +264,28 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap, unsigned bits); +/** + * tlb_set_page_full: + * @cpu: CPU context + * @mmu_idx: mmu index of the tlb to modify + * @vaddr: virtual address of the entry to add + * @full: the details of the tlb entry + * + * Add an entry to @cpu tlb index @mmu_idx. All of the fields of + * @full must be filled, except for xlat_section, and constitute + * the complete description of the translated page. + * + * This is generally called by the target tlb_fill function after + * having performed a successful page table walk to find the physical + * address and attributes for the translation. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only + * used by tlb_flush_page. + */ +void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr, + CPUTLBEntryFull *full); + /** * tlb_set_page_with_attrs: * @cpu: CPU to add this TLB entry for @@ -463,6 +463,21 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr); +#ifndef CONFIG_USER_ONLY +/** + * probe_access_full: + * Like probe_access_flags, except also return into @pfull. + * + * The CPUTLBEntryFull structure returned via @pfull is transient + * and must be consumed or copied immediately, before any further + * access or changes to TLB @mmu_idx. + */ +int probe_access_full(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, + CPUTLBEntryFull **pfull, uintptr_t retaddr); +#endif + #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ /* Estimated block size for TB allocation. */ @@ -488,8 +503,32 @@ struct tb_tc { }; struct TranslationBlock { - target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ - target_ulong cs_base; /* CS base for this block */ +#if !TARGET_TB_PCREL + /* + * Guest PC corresponding to this block. This must be the true + * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and + * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or + * privilege, must store those bits elsewhere. + * + * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are + * written such that the TB is associated only with the physical + * page and may be run in any virtual address context. In this case, + * PC must always be taken from ENV in a target-specific manner. + * Unwind information is taken as offsets from the page, to be + * deposited into the "current" PC. + */ + target_ulong pc; +#endif + + /* + * Target-specific data associated with the TranslationBlock, e.g.: + * x86: the original user, the Code Segment virtual base, + * arm: an extension of tb->flags, + * s390x: instruction data for EXECUTE, + * sparc: the next pc of the instruction queue (for delay slots). + */ + target_ulong cs_base; + uint32_t flags; /* flags defining in which context the code was generated */ uint32_t cflags; /* compile flags */ @@ -503,6 +542,7 @@ struct TranslationBlock { #define CF_USE_ICOUNT 0x00020000 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ +#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */ #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ #define CF_CLUSTER_SHIFT 24 @@ -562,31 +602,64 @@ struct TranslationBlock { uintptr_t jmp_dest[2]; }; +/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */ +static inline target_ulong tb_pc(const TranslationBlock *tb) +{ +#if TARGET_TB_PCREL + qemu_build_not_reached(); +#else + return tb->pc; +#endif +} + /* Hide the qatomic_read to make code a little easier on the eyes */ static inline uint32_t tb_cflags(const TranslationBlock *tb) { return qatomic_read(&tb->cflags); } +static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) +{ + return tb->page_addr[0]; +} + +static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) +{ + return tb->page_addr[1]; +} + +static inline void tb_set_page_addr0(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[0] = addr; +} + +static inline void tb_set_page_addr1(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[1] = addr; +} + /* current cflags for hashing/comparison */ uint32_t curr_cflags(CPUState *cpu); /* TranslationBlock invalidate API */ #if defined(CONFIG_USER_ONLY) void tb_invalidate_phys_addr(target_ulong addr); -void tb_invalidate_phys_range(target_ulong start, target_ulong end); #else void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); #endif void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); + TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, uint32_t cflags); TranslationBlock *inv_tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, uint32_t cflags); -void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ #if defined(CONFIG_TCG_INTERPRETER) @@ -606,14 +679,6 @@ extern __thread uintptr_t tci_tb_ptr; smaller than 4 bytes, so we don't worry about special-casing this. */ #define GETPC_ADJ 2 -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) -void assert_no_pages_locked(void); -#else -static inline void assert_no_pages_locked(void) -{ -} -#endif - #if !defined(CONFIG_USER_ONLY) /** @@ -629,62 +694,8 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, hwaddr index, MemTxAttrs attrs); #endif -#if defined(CONFIG_USER_ONLY) -void mmap_lock(void); -void mmap_unlock(void); -bool have_mmap_lock(void); - /** - * get_page_addr_code() - user-mode version - * @env: CPUArchState - * @addr: guest virtual address of guest code - * - * Returns @addr. - */ -static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, - target_ulong addr) -{ - return addr; -} - -/** - * get_page_addr_code_hostp() - user-mode version - * @env: CPUArchState - * @addr: guest virtual address of guest code - * - * Returns @addr. - * - * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content - * is kept. - */ -static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, - target_ulong addr, - void **hostp) -{ - if (hostp) { - *hostp = g2h_untagged(addr); - } - return addr; -} -#else -static inline void mmap_lock(void) {} -static inline void mmap_unlock(void) {} - -/** - * get_page_addr_code() - full-system version - * @env: CPUArchState - * @addr: guest virtual address of guest code - * - * If we cannot translate and execute from the entire RAM page, or if - * the region is not backed by RAM, returns -1. Otherwise, returns the - * ram_addr_t corresponding to the guest code at @addr. - * - * Note: this function can trigger an exception. - */ -tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); - -/** - * get_page_addr_code_hostp() - full-system version + * get_page_addr_code_hostp() * @env: CPUArchState * @addr: guest virtual address of guest code * @@ -700,6 +711,83 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, void **hostp); +/** + * get_page_addr_code() + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * If we cannot translate and execute from the entire RAM page, or if + * the region is not backed by RAM, returns -1. Otherwise, returns the + * ram_addr_t corresponding to the guest code at @addr. + * + * Note: this function can trigger an exception. + */ +static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, + target_ulong addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + +#if defined(CONFIG_USER_ONLY) +void mmap_lock(void); +void mmap_unlock(void); +bool have_mmap_lock(void); + +/** + * adjust_signal_pc: + * @pc: raw pc from the host signal ucontext_t. + * @is_write: host memory operation was write, or read-modify-write. + * + * Alter @pc as required for unwinding. Return the type of the + * guest memory access -- host reads may be for guest execution. + */ +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @host_addr: the host address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr); + +/** + * cpu_loop_exit_sigsegv: + * @cpu: the cpu context + * @addr: the guest address of the fault + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGSEGV, and jump to the main cpu loop. + */ +G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + +/** + * cpu_loop_exit_sigbus: + * @cpu: the cpu context + * @addr: the guest address of the alignment fault + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGBUS, and jump to the main cpu loop. + */ +G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, + uintptr_t ra); + +#else +static inline void mmap_lock(void) {} +static inline void mmap_unlock(void) {} + void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h index a024a0350d..f667014888 100644 --- a/include/exec/gdbstub.h +++ b/include/exec/gdbstub.h @@ -10,11 +10,71 @@ #define GDB_WATCHPOINT_READ 3 #define GDB_WATCHPOINT_ACCESS 4 +/* For gdb file i/o remote protocol open flags. */ +#define GDB_O_RDONLY 0 +#define GDB_O_WRONLY 1 +#define GDB_O_RDWR 2 +#define GDB_O_APPEND 8 +#define GDB_O_CREAT 0x200 +#define GDB_O_TRUNC 0x400 +#define GDB_O_EXCL 0x800 + +/* For gdb file i/o remote protocol errno values */ +#define GDB_EPERM 1 +#define GDB_ENOENT 2 +#define GDB_EINTR 4 +#define GDB_EBADF 9 +#define GDB_EACCES 13 +#define GDB_EFAULT 14 +#define GDB_EBUSY 16 +#define GDB_EEXIST 17 +#define GDB_ENODEV 19 +#define GDB_ENOTDIR 20 +#define GDB_EISDIR 21 +#define GDB_EINVAL 22 +#define GDB_ENFILE 23 +#define GDB_EMFILE 24 +#define GDB_EFBIG 27 +#define GDB_ENOSPC 28 +#define GDB_ESPIPE 29 +#define GDB_EROFS 30 +#define GDB_ENAMETOOLONG 91 +#define GDB_EUNKNOWN 9999 + +/* For gdb file i/o remote protocol lseek whence. */ +#define GDB_SEEK_SET 0 +#define GDB_SEEK_CUR 1 +#define GDB_SEEK_END 2 + +/* For gdb file i/o stat/fstat. */ +typedef uint32_t gdb_mode_t; +typedef uint32_t gdb_time_t; + +struct gdb_stat { + uint32_t gdb_st_dev; /* device */ + uint32_t gdb_st_ino; /* inode */ + gdb_mode_t gdb_st_mode; /* protection */ + uint32_t gdb_st_nlink; /* number of hard links */ + uint32_t gdb_st_uid; /* user ID of owner */ + uint32_t gdb_st_gid; /* group ID of owner */ + uint32_t gdb_st_rdev; /* device type (if inode device) */ + uint64_t gdb_st_size; /* total size, in bytes */ + uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */ + uint64_t gdb_st_blocks; /* number of blocks allocated */ + gdb_time_t gdb_st_atime; /* time of last access */ + gdb_time_t gdb_st_mtime; /* time of last modification */ + gdb_time_t gdb_st_ctime; /* time of last change */ +} QEMU_PACKED; + +struct gdb_timeval { + gdb_time_t tv_sec; /* second */ + uint64_t tv_usec; /* microsecond */ +} QEMU_PACKED; + #ifdef NEED_CPU_H #include "cpu.h" -typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, - target_ulong ret, target_ulong err); +typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err); /** * gdb_do_syscall: @@ -45,17 +105,6 @@ void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...); */ void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va); int use_gdb_syscalls(void); -void gdb_set_stop_cpu(CPUState *cpu); - -/** - * gdb_exit: exit gdb session, reporting inferior status - * @code: exit code reported - * - * This closes the session and sends a final packet to GDB reporting - * the exit status of the program. It also cleans up any connections - * detritus before returning. - */ -void gdb_exit(int code); #ifdef CONFIG_USER_ONLY /** @@ -121,7 +170,7 @@ static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi, uint64_t val_lo) { uint64_t to_quad; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN to_quad = tswap64(val_hi); g_byte_array_append(buf, (uint8_t *) &to_quad, 8); to_quad = tswap64(val_lo); @@ -165,7 +214,7 @@ static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len) #define ldtul_p(addr) ldl_p(addr) #endif -#endif +#endif /* NEED_CPU_H */ /** * gdbserver_start: start the gdb server @@ -177,6 +226,18 @@ static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len) */ int gdbserver_start(const char *port_or_device); +/** + * gdb_exit: exit gdb session, reporting inferior status + * @code: exit code reported + * + * This closes the session and sends a final packet to GDB reporting + * the exit status of the program. It also cleans up any connections + * detritus before returning. + */ +void gdb_exit(int code); + +void gdb_set_stop_cpu(CPUState *cpu); + /** * gdb_has_xml: * This is an ugly hack to cope with both new and old gdb. diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index 467529d84c..c57204ddad 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -17,27 +17,10 @@ static inline void gen_io_start(void) tcg_temp_free_i32(tmp); } -/* - * cpu->can_do_io is cleared automatically at the beginning of - * each translation block. The cost is minimal and only paid - * for -icount, plus it would be very easy to forget doing it - * in the translator. Therefore, backends only need to call - * gen_io_start. - */ -static inline void gen_io_end(void) -{ - TCGv_i32 tmp = tcg_const_i32(0); - tcg_gen_st_i32(tmp, cpu_env, - offsetof(ArchCPU, parent_obj.can_do_io) - - offsetof(ArchCPU, env)); - tcg_temp_free_i32(tmp); -} - static inline void gen_tb_start(const TranslationBlock *tb) { TCGv_i32 count; - tcg_ctx->exitreq_label = gen_new_label(); if (tb_cflags(tb) & CF_USE_ICOUNT) { count = tcg_temp_local_new_i32(); } else { @@ -58,13 +41,34 @@ static inline void gen_tb_start(const TranslationBlock *tb) icount_start_insn = tcg_last_op(); } - tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); + /* + * Emit the check against icount_decr.u32 to see if we should exit + * unless we suppress the check with CF_NOIRQ. If we are using + * icount and have suppressed interruption the higher level code + * should have ensured we don't run more instructions than the + * budget. + */ + if (tb_cflags(tb) & CF_NOIRQ) { + tcg_ctx->exitreq_label = NULL; + } else { + tcg_ctx->exitreq_label = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); + } if (tb_cflags(tb) & CF_USE_ICOUNT) { tcg_gen_st16_i32(count, cpu_env, offsetof(ArchCPU, neg.icount_decr.u16.low) - offsetof(ArchCPU, env)); - gen_io_end(); + /* + * cpu->can_do_io is cleared automatically here at the beginning of + * each translation block. The cost is minimal and only paid for + * -icount, plus it would be very easy to forget doing it in the + * translator. Doing it here means we don't need a gen_io_end() to + * go with gen_io_start(). + */ + tcg_gen_st_i32(tcg_constant_i32(0), cpu_env, + offsetof(ArchCPU, parent_obj.can_do_io) - + offsetof(ArchCPU, env)); } tcg_temp_free_i32(count); @@ -81,8 +85,10 @@ static inline void gen_tb_end(const TranslationBlock *tb, int num_insns) tcgv_i32_arg(tcg_constant_i32(num_insns))); } - gen_set_label(tcg_ctx->exitreq_label); - tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); + if (tcg_ctx->exitreq_label) { + gen_set_label(tcg_ctx->exitreq_label); + tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); + } } #endif diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h index 1c2e7a8ed3..7b6ca975ef 100644 --- a/include/exec/helper-gen.h +++ b/include/exec/helper-gen.h @@ -79,8 +79,6 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ } #include "helper.h" -#include "trace/generated-helpers.h" -#include "trace/generated-helpers-wrappers.h" #include "accel/tcg/tcg-runtime.h" #include "accel/tcg/plugin-helpers.h" diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h index b974eb394a..e242fed46e 100644 --- a/include/exec/helper-head.h +++ b/include/exec/helper-head.h @@ -46,20 +46,23 @@ #define dh_ctype_ptr void * #define dh_ctype_cptr const void * #define dh_ctype_void void -#define dh_ctype_noreturn void QEMU_NORETURN +#define dh_ctype_noreturn G_NORETURN void #define dh_ctype(t) dh_ctype_##t #ifdef NEED_CPU_H # ifdef TARGET_LONG_BITS # if TARGET_LONG_BITS == 32 # define dh_alias_tl i32 +# define dh_typecode_tl dh_typecode_i32 # else # define dh_alias_tl i64 +# define dh_typecode_tl dh_typecode_i64 # endif # endif -# define dh_alias_env ptr # define dh_ctype_tl target_ulong +# define dh_alias_env ptr # define dh_ctype_env CPUArchState * +# define dh_typecode_env dh_typecode_ptr #endif /* We can't use glue() here because it falls foul of C preprocessor @@ -92,18 +95,16 @@ #define dh_typecode_i64 4 #define dh_typecode_s64 5 #define dh_typecode_ptr 6 -#define dh_typecode(t) glue(dh_typecode_, dh_alias(t)) +#define dh_typecode_int dh_typecode_s32 +#define dh_typecode_f16 dh_typecode_i32 +#define dh_typecode_f32 dh_typecode_i32 +#define dh_typecode_f64 dh_typecode_i64 +#define dh_typecode_cptr dh_typecode_ptr +#define dh_typecode(t) dh_typecode_##t #define dh_callflag_i32 0 -#define dh_callflag_s32 0 -#define dh_callflag_int 0 #define dh_callflag_i64 0 -#define dh_callflag_s64 0 -#define dh_callflag_f16 0 -#define dh_callflag_f32 0 -#define dh_callflag_f64 0 #define dh_callflag_ptr 0 -#define dh_callflag_cptr dh_callflag_ptr #define dh_callflag_void 0 #define dh_callflag_noreturn TCG_CALL_NO_RETURN #define dh_callflag(t) glue(dh_callflag_, dh_alias(t)) diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h index ba100793a7..c4b1bda632 100644 --- a/include/exec/helper-proto.h +++ b/include/exec/helper-proto.h @@ -38,7 +38,6 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ #define IN_HELPER_PROTO #include "helper.h" -#include "trace/generated-helpers.h" #include "accel/tcg/tcg-runtime.h" #include "accel/tcg/plugin-helpers.h" diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h index 16cd318b83..3933258f1a 100644 --- a/include/exec/helper-tcg.h +++ b/include/exec/helper-tcg.h @@ -59,7 +59,6 @@ | dh_typemask(t5, 5) | dh_typemask(t6, 6) | dh_typemask(t7, 7) }, #include "helper.h" -#include "trace/generated-helpers.h" #include "accel/tcg/tcg-runtime.h" #include "accel/tcg/plugin-helpers.h" diff --git a/include/exec/log.h b/include/exec/log.h index 3c7fa65ead..4a7375a45f 100644 --- a/include/exec/log.h +++ b/include/exec/log.h @@ -15,15 +15,10 @@ */ static inline void log_cpu_state(CPUState *cpu, int flags) { - QemuLogFile *logfile; - - if (qemu_log_enabled()) { - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - cpu_dump_state(cpu, logfile->fd, flags); - } - rcu_read_unlock(); + FILE *f = qemu_log_trylock(); + if (f) { + cpu_dump_state(cpu, f, flags); + qemu_log_unlock(f); } } @@ -42,43 +37,4 @@ static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags) } } -#ifdef NEED_CPU_H -/* disas() and target_disas() to qemu_logfile: */ -static inline void log_target_disas(CPUState *cpu, target_ulong start, - target_ulong len) -{ - QemuLogFile *logfile; - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - target_disas(logfile->fd, cpu, start, len); - } - rcu_read_unlock(); -} - -static inline void log_disas(const void *code, unsigned long size) -{ - QemuLogFile *logfile; - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - disas(logfile->fd, code, size); - } - rcu_read_unlock(); -} - -#if defined(CONFIG_USER_ONLY) -/* page_dump() output to the log file: */ -static inline void log_page_dump(const char *operation) -{ - FILE *logfile = qemu_log_lock(); - if (logfile) { - qemu_log("page layout changed following %s\n", operation); - page_dump(logfile); - } - qemu_log_unlock(logfile); -} -#endif -#endif - #endif diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index 95f2d20d55..9fb98bc1ef 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -35,6 +35,14 @@ typedef struct MemTxAttrs { unsigned int secure:1; /* Memory access is usermode (unprivileged) */ unsigned int user:1; + /* + * Bus interconnect and peripherals can access anything (memories, + * devices) by default. By setting the 'memory' bit, bus transaction + * are restricted to "normal" memories (per the AMBA documentation) + * versus devices. Access to devices will be logged and rejected + * (see MEMTX_ACCESS_ERROR). + */ + unsigned int memory:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; /* Invert endianness for this page */ @@ -66,6 +74,7 @@ typedef struct MemTxAttrs { #define MEMTX_OK 0 #define MEMTX_ERROR (1U << 0) /* device returned an error */ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */ typedef uint32_t MemTxResult; #endif diff --git a/include/exec/memop.h b/include/exec/memop.h index 529d07b02d..25d027434a 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -19,12 +19,16 @@ typedef enum MemOp { MO_16 = 1, MO_32 = 2, MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ + MO_128 = 4, + MO_256 = 5, + MO_512 = 6, + MO_1024 = 7, + MO_SIZE = 0x07, /* Mask for the above. */ - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */ - MO_BSWAP = 8, /* Host reverse endian. */ -#ifdef HOST_WORDS_BIGENDIAN + MO_BSWAP = 0x10, /* Host reverse endian. */ +#if HOST_BIG_ENDIAN MO_LE = MO_BSWAP, MO_BE = 0, #else @@ -32,7 +36,7 @@ typedef enum MemOp { MO_BE = MO_BSWAP, #endif #ifdef NEED_CPU_H -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN MO_TE = MO_BE, #else MO_TE = MO_LE, @@ -59,8 +63,8 @@ typedef enum MemOp { * - an alignment to a specified size, which may be more or less than * the access size (MO_ALIGN_x where 'x' is a size in bytes); */ - MO_ASHIFT = 4, - MO_AMASK = 7 << MO_ASHIFT, + MO_ASHIFT = 5, + MO_AMASK = 0x7 << MO_ASHIFT, #ifdef NEED_CPU_H #ifdef TARGET_ALIGNED_ONLY MO_ALIGN = 0, @@ -81,29 +85,36 @@ typedef enum MemOp { MO_UB = MO_8, MO_UW = MO_16, MO_UL = MO_32, + MO_UQ = MO_64, + MO_UO = MO_128, MO_SB = MO_SIGN | MO_8, MO_SW = MO_SIGN | MO_16, MO_SL = MO_SIGN | MO_32, - MO_Q = MO_64, + MO_SQ = MO_SIGN | MO_64, + MO_SO = MO_SIGN | MO_128, MO_LEUW = MO_LE | MO_UW, MO_LEUL = MO_LE | MO_UL, + MO_LEUQ = MO_LE | MO_UQ, MO_LESW = MO_LE | MO_SW, MO_LESL = MO_LE | MO_SL, - MO_LEQ = MO_LE | MO_Q, + MO_LESQ = MO_LE | MO_SQ, MO_BEUW = MO_BE | MO_UW, MO_BEUL = MO_BE | MO_UL, + MO_BEUQ = MO_BE | MO_UQ, MO_BESW = MO_BE | MO_SW, MO_BESL = MO_BE | MO_SL, - MO_BEQ = MO_BE | MO_Q, + MO_BESQ = MO_BE | MO_SQ, #ifdef NEED_CPU_H MO_TEUW = MO_TE | MO_UW, MO_TEUL = MO_TE | MO_UL, + MO_TEUQ = MO_TE | MO_UQ, + MO_TEUO = MO_TE | MO_UO, MO_TESW = MO_TE | MO_SW, MO_TESL = MO_TE | MO_SL, - MO_TEQ = MO_TE | MO_Q, + MO_TESQ = MO_TE | MO_SQ, #endif MO_SSIZE = MO_SIZE | MO_SIGN, diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h new file mode 100644 index 0000000000..eb7f1591a3 --- /dev/null +++ b/include/exec/memopidx.h @@ -0,0 +1,55 @@ +/* + * Combine the MemOp and mmu_idx parameters into a single value. + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC_MEMOPIDX_H +#define EXEC_MEMOPIDX_H + +#include "exec/memop.h" + +typedef uint32_t MemOpIdx; + +/** + * make_memop_idx + * @op: memory operation + * @idx: mmu index + * + * Encode these values into a single parameter. + */ +static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx) +{ +#ifdef CONFIG_DEBUG_TCG + assert(idx <= 15); +#endif + return (op << 4) | idx; +} + +/** + * get_memop + * @oi: combined op/idx parameter + * + * Extract the memory operation from the combined value. + */ +static inline MemOp get_memop(MemOpIdx oi) +{ + return oi >> 4; +} + +/** + * get_mmuidx + * @oi: combined op/idx parameter + * + * Extract the mmu index from the combined value. + */ +static inline unsigned get_mmuidx(MemOpIdx oi) +{ + return oi & 15; +} + +#endif diff --git a/include/exec/memory.h b/include/exec/memory.h index ad2509087d..ddb9b9e10b 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -61,7 +61,20 @@ static inline void fuzz_dma_read_cb(size_t addr, } #endif -extern bool global_dirty_log; +/* Possible bits for global_dirty_log_{start|stop} */ + +/* Dirty tracking enabled because migration is running */ +#define GLOBAL_DIRTY_MIGRATION (1U << 0) + +/* Dirty tracking enabled because measuring dirty rate */ +#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) + +/* Dirty tracking enabled because dirty limit */ +#define GLOBAL_DIRTY_LIMIT (1U << 2) + +#define GLOBAL_DIRTY_MASK (0x7) + +extern unsigned int global_dirty_tracking; typedef struct MemoryRegionOps MemoryRegionOps; @@ -190,6 +203,9 @@ typedef struct IOMMUTLBEvent { */ #define RAM_NORESERVE (1 << 7) +/* RAM that isn't accessible through normal means. */ +#define RAM_PROTECTED (1 << 8) + static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, IOMMUNotifierFlag flags, hwaddr start, hwaddr end, @@ -537,6 +553,7 @@ static inline void ram_discard_listener_init(RamDiscardListener *rdl, } typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); +typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); /* * RamDiscardManagerClass: @@ -544,7 +561,7 @@ typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion * regions are currently populated to be used/accessed by the VM, notifying * after parts were discarded (freeing up memory) and before parts will be - * populated (consuming memory), to be used/acessed by the VM. + * populated (consuming memory), to be used/accessed by the VM. * * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is @@ -568,7 +585,7 @@ typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); * Listeners are called in multiples of the minimum granularity (unless it * would exceed the registered range) and changes are aligned to the minimum * granularity within the #MemoryRegion. Listeners have to prepare for memory - * becomming discarded in a different granularity than it was populated and the + * becoming discarded in a different granularity than it was populated and the * other way around. */ struct RamDiscardManagerClass { @@ -625,6 +642,21 @@ struct RamDiscardManagerClass { MemoryRegionSection *section, ReplayRamPopulate replay_fn, void *opaque); + /** + * @replay_discarded: + * + * Call the #ReplayRamDiscard callback for all discarded parts within the + * #MemoryRegionSection via the #RamDiscardManager. + * + * @rdm: the #RamDiscardManager + * @section: the #MemoryRegionSection + * @replay_fn: the #ReplayRamDiscard callback + * @opaque: pointer to forward to the callback + */ + void (*replay_discarded)(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, void *opaque); + /** * @register_listener: * @@ -669,6 +701,11 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, ReplayRamPopulate replay_fn, void *opaque); +void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, + void *opaque); + void ram_discard_manager_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *section); @@ -676,6 +713,10 @@ void ram_discard_manager_register_listener(RamDiscardManager *rdm, void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, RamDiscardListener *rdl); +bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only, + bool *mr_has_discard_manager); + typedef struct CoalescedMemoryRange CoalescedMemoryRange; typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; @@ -704,6 +745,7 @@ struct MemoryRegion { const MemoryRegionOps *ops; void *opaque; MemoryRegion *container; + int mapped_via_alias; /* Mapped via an alias, container might be NULL */ Int128 size; hwaddr addr; void (*destructor)(MemoryRegion *mr); @@ -979,6 +1021,14 @@ struct MemoryListener { */ unsigned priority; + /** + * @name: + * + * Name of the listener. It can be used in contexts where we'd like to + * identify one memory listener with the rest. + */ + const char *name; + /* private: */ AddressSpace *address_space; QTAILQ_ENTRY(MemoryListener) link; @@ -1203,7 +1253,7 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Error **errp); /** - * memory_region_init_resizeable_ram: Initialize memory region with resizeable + * memory_region_init_resizeable_ram: Initialize memory region with resizable * RAM. Accesses into the region will * modify memory directly. Only an initial * portion of this RAM is actually used. @@ -1273,7 +1323,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, * @name: the name of the region. * @size: size of the region. * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, - * RAM_NORESERVE. + * RAM_NORESERVE, RAM_PROTECTED. * @fd: the fd to mmap. * @offset: offset within the file referenced by fd * @errp: pointer to Error*, to store an error if it happens. @@ -1574,6 +1624,16 @@ static inline bool memory_region_is_romd(MemoryRegion *mr) return mr->rom_device && mr->romd_mode; } +/** + * memory_region_is_protected: check whether a memory region is protected + * + * Returns %true if a memory region is protected RAM and cannot be accessed + * via standard mechanisms, e.g. DMA. + * + * @mr: the memory region being queried + */ +bool memory_region_is_protected(MemoryRegion *mr); + /** * memory_region_get_iommu: check whether a memory region is an iommu * @@ -1939,7 +1999,7 @@ void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, * querying the same page multiple times, which is especially useful for * display updates where the scanlines often are not page aligned. * - * The dirty bitmap region which gets copyed into the snapshot (and + * The dirty bitmap region which gets copied into the snapshot (and * cleared afterwards) can be larger than requested. The boundaries * are rounded up/down so complete bitmap longs (covering 64 pages on * 64bit hosts) can be copied over into the bitmap snapshot. Which @@ -2269,7 +2329,8 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr); /** * memory_region_is_mapped: returns true if #MemoryRegion is mapped - * into any address space. + * into another memory region, which does not necessarily imply that it is + * mapped into an address space. * * @mr: a #MemoryRegion which should be checked if it's mapped */ @@ -2392,13 +2453,17 @@ void memory_listener_unregister(MemoryListener *listener); /** * memory_global_dirty_log_start: begin dirty logging for all regions + * + * @flags: purpose of starting dirty log, migration or dirty rate */ -void memory_global_dirty_log_start(void); +void memory_global_dirty_log_start(unsigned int flags); /** * memory_global_dirty_log_stop: end dirty logging for all regions + * + * @flags: purpose of stopping dirty log, migration or dirty rate */ -void memory_global_dirty_log_stop(void); +void memory_global_dirty_log_stop(unsigned int flags); void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled); @@ -2777,6 +2842,9 @@ MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, const void *buf, hwaddr len); +int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr); +bool prepare_mmio_access(MemoryRegion *mr); + static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) { if (is_write) { @@ -2875,6 +2943,22 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, } } +/** + * address_space_set: Fill address space with a constant byte. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @c: constant byte to fill the memory + * @len: the number of bytes to fill with the constant byte + * @attrs: memory transaction attributes + */ +MemTxResult address_space_set(AddressSpace *as, hwaddr addr, + uint8_t c, hwaddr len, MemTxAttrs attrs); + #ifdef NEED_CPU_H /* enum device_endian to MemOp. */ static inline MemOp devend_memop(enum device_endian end) @@ -2882,7 +2966,7 @@ static inline MemOp devend_memop(enum device_endian end) QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN /* Swap if non-host endianness or native (target) endianness */ return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; #else diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h index c22a7a742e..ebbe9b169b 100644 --- a/include/exec/page-vary.h +++ b/include/exec/page-vary.h @@ -31,4 +31,22 @@ extern bool set_preferred_target_page_bits_common(int bits); extern void finalize_target_page_bits_common(int min); #endif +/** + * set_preferred_target_page_bits: + * @bits: number of bits needed to represent an address within the page + * + * Set the preferred target page size (the actual target page + * size may be smaller than any given CPU's preference). + * Returns true on success, false on failure (which can only happen + * if this is called after the system has already finalized its + * choice of page size and the requested page size is smaller than that). + */ +bool set_preferred_target_page_bits(int bits); + +/** + * finalize_target_page_bits: + * Commit the final value set by set_preferred_target_page_bits. + */ +void finalize_target_page_bits(void); + #endif /* EXEC_PAGE_VARY_H */ diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h index b1b72b5d90..5004728c61 100644 --- a/include/exec/plugin-gen.h +++ b/include/exec/plugin-gen.h @@ -19,7 +19,8 @@ struct DisasContextBase; #ifdef CONFIG_PLUGIN -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress); +bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, + bool supress); void plugin_gen_tb_end(CPUState *cpu); void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db); void plugin_gen_insn_end(void); @@ -27,21 +28,29 @@ void plugin_gen_insn_end(void); void plugin_gen_disable_mem_helpers(void); void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); -static inline void plugin_insn_append(const void *from, size_t size) +static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn; + abi_ptr off; if (insn == NULL) { return; } + off = pc - insn->vaddr; + if (off < insn->data->len) { + g_byte_array_set_size(insn->data, off); + } else if (off > insn->data->len) { + /* we have an unexpected gap */ + g_assert_not_reached(); + } insn->data = g_byte_array_append(insn->data, from, size); } #else /* !CONFIG_PLUGIN */ -static inline -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress) +static inline bool +plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup) { return false; } @@ -62,7 +71,7 @@ static inline void plugin_gen_disable_mem_helpers(void) static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) { } -static inline void plugin_insn_append(const void *from, size_t size) +static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { } #endif /* CONFIG_PLUGIN */ diff --git a/include/exec/poison.h b/include/exec/poison.h index 7ad4ad18e8..f0959bc84e 100644 --- a/include/exec/poison.h +++ b/include/exec/poison.h @@ -14,6 +14,7 @@ #pragma GCC poison TARGET_CRIS #pragma GCC poison TARGET_HEXAGON #pragma GCC poison TARGET_HPPA +#pragma GCC poison TARGET_LOONGARCH64 #pragma GCC poison TARGET_M68K #pragma GCC poison TARGET_MICROBLAZE #pragma GCC poison TARGET_MIPS @@ -38,7 +39,7 @@ #pragma GCC poison TARGET_HAS_BFLT #pragma GCC poison TARGET_NAME #pragma GCC poison TARGET_SUPPORTS_MTTCG -#pragma GCC poison TARGET_WORDS_BIGENDIAN +#pragma GCC poison TARGET_BIG_ENDIAN #pragma GCC poison BSWAP_NEEDED #pragma GCC poison TARGET_LONG_BITS @@ -51,8 +52,6 @@ #pragma GCC poison TARGET_PAGE_BITS #pragma GCC poison TARGET_PAGE_ALIGN -#pragma GCC poison CPUArchState - #pragma GCC poison CPU_INTERRUPT_HARD #pragma GCC poison CPU_INTERRUPT_EXITTB #pragma GCC poison CPU_INTERRUPT_HALT @@ -67,12 +66,11 @@ #pragma GCC poison CPU_INTERRUPT_TGT_INT_2 #pragma GCC poison CONFIG_ALPHA_DIS -#pragma GCC poison CONFIG_ARM_A64_DIS -#pragma GCC poison CONFIG_ARM_DIS #pragma GCC poison CONFIG_CRIS_DIS #pragma GCC poison CONFIG_HPPA_DIS #pragma GCC poison CONFIG_I386_DIS #pragma GCC poison CONFIG_HEXAGON_DIS +#pragma GCC poison CONFIG_LOONGARCH_DIS #pragma GCC poison CONFIG_M68K_DIS #pragma GCC poison CONFIG_MICROBLAZE_DIS #pragma GCC poison CONFIG_MIPS_DIS diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 5cb9789d56..ae0499ec89 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -26,6 +26,8 @@ #include "exec/ramlist.h" #include "exec/ramblock.h" +extern uint64_t total_dirty_pages; + /** * clear_bmap_size: calculate clear bitmap size * @@ -40,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift) } /** - * clear_bmap_set: set clear bitmap for the page range + * clear_bmap_set: set clear bitmap for the page range. Must be with + * bitmap_mutex held. * * @rb: the ramblock to operate on * @start: the start page number @@ -53,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start, { uint8_t shift = rb->clear_bmap_shift; - bitmap_set_atomic(rb->clear_bmap, start >> shift, - clear_bmap_size(npages, shift)); + bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift)); } /** - * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set + * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set. + * Must be with bitmap_mutex held. * * @rb: the ramblock to operate on * @page: the page number to check @@ -69,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page) { uint8_t shift = rb->clear_bmap_shift; - return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1); + return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1); } static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) @@ -145,8 +148,6 @@ static inline void qemu_ram_block_writeback(RAMBlock *block) #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) -void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end); - static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, unsigned client) @@ -359,7 +360,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, hwaddr addr; ram_addr_t ram_addr; unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; - unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE; + unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE; unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); /* start address is aligned at the start of a word? */ @@ -389,10 +390,14 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, qatomic_or(&blocks[DIRTY_MEMORY_NV2A][idx][offset], temp); qatomic_or(&blocks[DIRTY_MEMORY_NV2A_TEX][idx][offset], temp); - if (global_dirty_log) { + if (global_dirty_tracking) { qatomic_or( &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp); + if (unlikely( + global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += ctpopl(temp); + } } if (tcg_enabled()) { @@ -412,7 +417,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } else { uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; - if (!global_dirty_log) { + if (!global_dirty_tracking) { clients &= ~(1 << DIRTY_MEMORY_MIGRATION); } @@ -423,6 +428,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, for (i = 0; i < len; i++) { if (bitmap[i] != 0) { c = leul_to_cpu(bitmap[i]); + if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += ctpopl(c); + } do { j = ctzl(c); c &= ~(1ul << j); diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h index 664701b759..adc03df59c 100644 --- a/include/exec/ramblock.h +++ b/include/exec/ramblock.h @@ -21,6 +21,8 @@ #ifndef CONFIG_USER_ONLY #include "cpu-common.h" +#include "qemu/rcu.h" +#include "exec/ramlist.h" struct RAMBlock { struct rcu_head rcu; @@ -51,6 +53,9 @@ struct RAMBlock { * and split clearing of dirty bitmap on the remote node (e.g., * KVM). The bitmap will be set only when doing global sync. * + * It is only used during src side of ram migration, and it is + * protected by the global ram_state.bitmap_mutex. + * * NOTE: this bitmap is different comparing to the other bitmaps * in that one bit can represent multiple guest pages (which is * decided by the `clear_bmap_shift' variable below). On diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h index e135e80fa2..5c6e5c5b4c 100644 --- a/include/exec/ramlist.h +++ b/include/exec/ramlist.h @@ -82,6 +82,6 @@ void ram_block_notify_add(void *host, size_t size, size_t max_size); void ram_block_notify_remove(void *host, size_t size, size_t max_size); void ram_block_notify_resize(void *host, size_t old_size, size_t new_size); -void ram_block_dump(Monitor *mon); +GString *ram_block_format(void); #endif /* RAMLIST_H */ diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h deleted file mode 100644 index fbcae88f4b..0000000000 --- a/include/exec/softmmu-semi.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Helper routines to provide target memory access for semihosting - * syscalls in system emulation mode. - * - * Copyright (c) 2007 CodeSourcery. - * - * This code is licensed under the GPL - */ - -#ifndef SOFTMMU_SEMI_H -#define SOFTMMU_SEMI_H - -#include "cpu.h" - -static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr) -{ - uint64_t val; - - cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0); - return tswap64(val); -} - -static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr) -{ - uint32_t val; - - cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0); - return tswap32(val); -} - -static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr) -{ - uint8_t val; - - cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0); - return val; -} - -#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; }) -#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; }) -#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; }) -#define get_user_ual(arg, p) get_user_u32(arg, p) - -static inline void softmmu_tput64(CPUArchState *env, - target_ulong addr, uint64_t val) -{ - val = tswap64(val); - cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1); -} - -static inline void softmmu_tput32(CPUArchState *env, - target_ulong addr, uint32_t val) -{ - val = tswap32(val); - cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1); -} -#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; }) -#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) -#define put_user_ual(arg, p) put_user_u32(arg, p) - -static void *softmmu_lock_user(CPUArchState *env, - target_ulong addr, target_ulong len, int copy) -{ - uint8_t *p; - /* TODO: Make this something that isn't fixed size. */ - p = malloc(len); - if (p && copy) { - cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0); - } - return p; -} -#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) -static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr) -{ - char *p; - char *s; - uint8_t c; - /* TODO: Make this something that isn't fixed size. */ - s = p = malloc(1024); - if (!s) { - return NULL; - } - do { - cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0); - addr++; - *(p++) = c; - } while (c); - return s; -} -#define lock_user_string(p) softmmu_lock_user_string(env, p) -static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr, - target_ulong len) -{ - if (len) { - cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); - } - free(p); -} -#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len) - -#endif diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h index a557b4e2bb..3e9cb91565 100644 --- a/include/exec/translate-all.h +++ b/include/exec/translate-all.h @@ -29,10 +29,11 @@ void page_collection_unlock(struct page_collection *set); void tb_invalidate_phys_page_fast(struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr); -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_invalidate_phys_page(tb_page_addr_t addr); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); #ifdef CONFIG_USER_ONLY +void page_protect(tb_page_addr_t page_addr); int page_unprotect(target_ulong address, uintptr_t pc); #endif diff --git a/include/exec/translator.h b/include/exec/translator.h index d318803267..af2ff95cd5 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -23,8 +23,22 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/plugin-gen.h" +#include "exec/translate-all.h" #include "tcg/tcg.h" +/** + * gen_intermediate_code + * @cpu: cpu context + * @tb: translation block + * @max_insns: max number of instructions to translate + * @pc: guest virtual program counter address + * @host_pc: host physical program counter address + * + * This function must be provided by the target, which should create + * the target-specific DisasContext, and then invoke translator_loop. + */ +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc); /** * DisasJumpType: @@ -67,13 +81,14 @@ typedef enum DisasJumpType { * Architecture-agnostic disassembly context. */ typedef struct DisasContextBase { - const TranslationBlock *tb; + TranslationBlock *tb; target_ulong pc_first; target_ulong pc_next; DisasJumpType is_jmp; int num_insns; int max_insns; bool singlestep_enabled; + void *host_addr[2]; } DisasContextBase; /** @@ -106,16 +121,18 @@ typedef struct TranslatorOps { void (*insn_start)(DisasContextBase *db, CPUState *cpu); void (*translate_insn)(DisasContextBase *db, CPUState *cpu); void (*tb_stop)(DisasContextBase *db, CPUState *cpu); - void (*disas_log)(const DisasContextBase *db, CPUState *cpu); + void (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f); } TranslatorOps; /** * translator_loop: - * @ops: Target-specific operations. - * @db: Disassembly context. * @cpu: Target vCPU. * @tb: Translation block. * @max_insns: Maximum number of insns to translate. + * @pc: guest virtual program counter address + * @host_pc: host physical program counter address + * @ops: Target-specific operations. + * @db: Disassembly context. * * Generic translator loop. * @@ -129,8 +146,9 @@ typedef struct TranslatorOps { * - When single-stepping is enabled (system-wide or on the current vCPU). * - When too many instructions have been translated. */ -void translator_loop(const TranslatorOps *ops, DisasContextBase *db, - CPUState *cpu, TranslationBlock *tb, int max_insns); +void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc, + const TranslatorOps *ops, DisasContextBase *db); void translator_loop_temp_check(DisasContextBase *db); @@ -155,28 +173,69 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest); * the relevant information at translation time. */ -#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ - static inline type \ - fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \ - { \ - type ret = load_fn(env, pc); \ - if (do_swap) { \ - ret = swap_fn(ret); \ - } \ - plugin_insn_append(&ret, sizeof(ret)); \ - return ret; \ - } \ - \ - static inline type fullname(CPUArchState *env, abi_ptr pc) \ - { \ - return fullname ## _swap(env, pc, false); \ +uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc); +uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc); +uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc); +uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc); + +static inline uint16_t +translator_lduw_swap(CPUArchState *env, DisasContextBase *db, + abi_ptr pc, bool do_swap) +{ + uint16_t ret = translator_lduw(env, db, pc); + if (do_swap) { + ret = bswap16(ret); } + return ret; +} -GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) -GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) -GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16) -GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32) -GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64) -#undef GEN_TRANSLATOR_LD +static inline uint32_t +translator_ldl_swap(CPUArchState *env, DisasContextBase *db, + abi_ptr pc, bool do_swap) +{ + uint32_t ret = translator_ldl(env, db, pc); + if (do_swap) { + ret = bswap32(ret); + } + return ret; +} -#endif /* EXEC__TRANSLATOR_H */ +static inline uint64_t +translator_ldq_swap(CPUArchState *env, DisasContextBase *db, + abi_ptr pc, bool do_swap) +{ + uint64_t ret = translator_ldq(env, db, pc); + if (do_swap) { + ret = bswap64(ret); + } + return ret; +} + +/** + * translator_fake_ldb - fake instruction load + * @insn8: byte of instruction + * @pc: program counter of instruction + * + * This is a special case helper used where the instruction we are + * about to translate comes from somewhere else (e.g. being + * re-synthesised for s390x "ex"). It ensures we update other areas of + * the translator with details of the executed instruction. + */ + +static inline void translator_fake_ldb(uint8_t insn8, abi_ptr pc) +{ + plugin_insn_append(pc, &insn8, sizeof(insn8)); +} + + +/* + * Return whether addr is on the same page as where disassembly started. + * Translators can use this to enforce the rule that only single-insn + * translation blocks are allowed to cross page boundaries. + */ +static inline bool is_same_page(const DisasContextBase *db, target_ulong addr) +{ + return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0; +} + +#endif /* EXEC__TRANSLATOR_H */ diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h index a98d759cd3..94cbe073ec 100644 --- a/include/fpu/softfloat-helpers.h +++ b/include/fpu/softfloat-helpers.h @@ -141,4 +141,4 @@ static inline bool get_default_nan_mode(float_status *status) return status->default_nan_mode; } -#endif /* _SOFTFLOAT_HELPERS_H_ */ +#endif /* SOFTFLOAT_HELPERS_H */ diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h index 81c3fe8256..f35cdbfa63 100644 --- a/include/fpu/softfloat-macros.h +++ b/include/fpu/softfloat-macros.h @@ -8,7 +8,6 @@ * so some portions are provided under: * the SoftFloat-2a license * the BSD license - * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically @@ -75,10 +74,6 @@ this code that are retained. * THE POSSIBILITY OF SUCH DAMAGE. */ -/* Portions of this work are licensed under the terms of the GNU GPL, - * version 2 or later. See the COPYING file in the top-level directory. - */ - #ifndef FPU_SOFTFLOAT_MACROS_H #define FPU_SOFTFLOAT_MACROS_H @@ -585,83 +580,6 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b) } -/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd - * (https://gmplib.org/repo/gmp/file/tip/longlong.h) - * - * Licensed under the GPLv2/LGPLv3 - */ -static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, - uint64_t n0, uint64_t d) -{ -#if defined(__x86_64__) - uint64_t q; - asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); - return q; -#elif defined(__s390x__) && !defined(__clang__) - /* Need to use a TImode type to get an even register pair for DLGR. */ - unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; - asm("dlgr %0, %1" : "+r"(n) : "r"(d)); - *r = n >> 64; - return n; -#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) - /* From Power ISA 2.06, programming note for divdeu. */ - uint64_t q1, q2, Q, r1, r2, R; - asm("divdeu %0,%2,%4; divdu %1,%3,%4" - : "=&r"(q1), "=r"(q2) - : "r"(n1), "r"(n0), "r"(d)); - r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ - r2 = n0 - (q2 * d); - Q = q1 + q2; - R = r1 + r2; - if (R >= d || R < r2) { /* overflow implies R > d */ - Q += 1; - R -= d; - } - *r = R; - return Q; -#else - uint64_t d0, d1, q0, q1, r1, r0, m; - - d0 = (uint32_t)d; - d1 = d >> 32; - - r1 = n1 % d1; - q1 = n1 / d1; - m = q1 * d0; - r1 = (r1 << 32) | (n0 >> 32); - if (r1 < m) { - q1 -= 1; - r1 += d; - if (r1 >= d) { - if (r1 < m) { - q1 -= 1; - r1 += d; - } - } - } - r1 -= m; - - r0 = r1 % d1; - q0 = r1 / d1; - m = q0 * d0; - r0 = (r0 << 32) | (uint32_t)n0; - if (r0 < m) { - q0 -= 1; - r0 += d; - if (r0 >= d) { - if (r0 < m) { - q0 -= 1; - r0 += d; - } - } - } - r0 -= m; - - *r = r0; - return (q1 << 32) | q0; -#endif -} - /*---------------------------------------------------------------------------- | Returns an approximation to the square root of the 32-bit significand given | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index 3c051b0a58..1e37d04b39 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -116,7 +116,7 @@ typedef struct { #define make_floatx80(exp, mant) ((floatx80) { .low = mant, .high = exp }) #define make_floatx80_init(exp, mant) { .low = mant, .high = exp } typedef struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint64_t high, low; #else uint64_t low, high; @@ -158,13 +158,20 @@ typedef enum __attribute__((__packed__)) { */ enum { - float_flag_invalid = 1, - float_flag_divbyzero = 4, - float_flag_overflow = 8, - float_flag_underflow = 16, - float_flag_inexact = 32, - float_flag_input_denormal = 64, - float_flag_output_denormal = 128 + float_flag_invalid = 0x0001, + float_flag_divbyzero = 0x0002, + float_flag_overflow = 0x0004, + float_flag_underflow = 0x0008, + float_flag_inexact = 0x0010, + float_flag_input_denormal = 0x0020, + float_flag_output_denormal = 0x0040, + float_flag_invalid_isi = 0x0080, /* inf - inf */ + float_flag_invalid_imz = 0x0100, /* inf * 0 */ + float_flag_invalid_idi = 0x0200, /* inf / inf */ + float_flag_invalid_zdz = 0x0400, /* 0 / 0 */ + float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */ + float_flag_invalid_cvti = 0x1000, /* non-nan to integer */ + float_flag_invalid_snan = 0x2000, /* any operand was snan */ }; /* @@ -184,8 +191,8 @@ typedef enum __attribute__((__packed__)) { */ typedef struct float_status { + uint16_t float_exception_flags; FloatRoundMode float_rounding_mode; - uint8_t float_exception_flags; FloatX80RoundPrec floatx80_rounding_precision; bool tininess_before_rounding; /* should denormalised results go to zero and set the inexact flag? */ @@ -201,6 +208,10 @@ typedef struct float_status { bool snan_bit_is_one; bool use_first_nan; bool no_signaling_nans; + /* should overflowed results subtract re_bias to its exponent? */ + bool rebias_overflow; + /* should underflowed results add re_bias to its exponent? */ + bool rebias_underflow; } float_status; #endif /* SOFTFLOAT_TYPES_H */ diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index ec7dca0960..3dcf20e3a2 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -95,12 +95,13 @@ typedef enum { #include "fpu/softfloat-types.h" #include "fpu/softfloat-helpers.h" +#include "qemu/int128.h" /*---------------------------------------------------------------------------- | Routine to raise any or all of the software IEC/IEEE floating-point | exception flags. *----------------------------------------------------------------------------*/ -static inline void float_raise(uint8_t flags, float_status *status) +static inline void float_raise(uint16_t flags, float_status *status) { status->float_exception_flags |= flags; } @@ -182,7 +183,9 @@ floatx80 int64_to_floatx80(int64_t, float_status *status); float128 int32_to_float128(int32_t, float_status *status); float128 int64_to_float128(int64_t, float_status *status); +float128 int128_to_float128(Int128, float_status *status); float128 uint64_to_float128(uint64_t, float_status *status); +float128 uint128_to_float128(Int128, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision conversion routines. @@ -243,6 +246,8 @@ float16 float16_minnum(float16, float16, float_status *status); float16 float16_maxnum(float16, float16, float_status *status); float16 float16_minnummag(float16, float16, float_status *status); float16 float16_maxnummag(float16, float16, float_status *status); +float16 float16_minimum_number(float16, float16, float_status *status); +float16 float16_maximum_number(float16, float16, float_status *status); float16 float16_sqrt(float16, float_status *status); FloatRelation float16_compare(float16, float16, float_status *status); FloatRelation float16_compare_quiet(float16, float16, float_status *status); @@ -422,6 +427,8 @@ bfloat16 bfloat16_minnum(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_maxnum(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_minnummag(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_maxnummag(bfloat16, bfloat16, float_status *status); +bfloat16 bfloat16_minimum_number(bfloat16, bfloat16, float_status *status); +bfloat16 bfloat16_maximum_number(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_sqrt(bfloat16, float_status *status); FloatRelation bfloat16_compare(bfloat16, bfloat16, float_status *status); FloatRelation bfloat16_compare_quiet(bfloat16, bfloat16, float_status *status); @@ -589,6 +596,8 @@ float32 float32_minnum(float32, float32, float_status *status); float32 float32_maxnum(float32, float32, float_status *status); float32 float32_minnummag(float32, float32, float_status *status); float32 float32_maxnummag(float32, float32, float_status *status); +float32 float32_minimum_number(float32, float32, float_status *status); +float32 float32_maximum_number(float32, float32, float_status *status); bool float32_is_quiet_nan(float32, float_status *status); bool float32_is_signaling_nan(float32, float_status *status); float32 float32_silence_nan(float32, float_status *status); @@ -778,6 +787,8 @@ float64 float64_minnum(float64, float64, float_status *status); float64 float64_maxnum(float64, float64, float_status *status); float64 float64_minnummag(float64, float64, float_status *status); float64 float64_maxnummag(float64, float64, float_status *status); +float64 float64_minimum_number(float64, float64, float_status *status); +float64 float64_maximum_number(float64, float64, float_status *status); bool float64_is_quiet_nan(float64 a, float_status *status); bool float64_is_signaling_nan(float64, float_status *status); float64 float64_silence_nan(float64, float_status *status); @@ -900,6 +911,18 @@ static inline bool float64_unordered_quiet(float64 a, float64 b, *----------------------------------------------------------------------------*/ float64 float64_default_nan(float_status *status); +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision operations, rounding to single precision, +| returning a result in double precision, with only one rounding step. +*----------------------------------------------------------------------------*/ + +float64 float64r32_add(float64, float64, float_status *status); +float64 float64r32_sub(float64, float64, float_status *status); +float64 float64r32_mul(float64, float64, float_status *status); +float64 float64r32_div(float64, float64, float_status *status); +float64 float64r32_muladd(float64, float64, float64, int, float_status *status); +float64 float64r32_sqrt(float64, float_status *status); + /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision conversion routines. *----------------------------------------------------------------------------*/ @@ -1181,9 +1204,13 @@ floatx80 floatx80_default_nan(float_status *status); int32_t float128_to_int32(float128, float_status *status); int32_t float128_to_int32_round_to_zero(float128, float_status *status); int64_t float128_to_int64(float128, float_status *status); +Int128 float128_to_int128(float128, float_status *status); int64_t float128_to_int64_round_to_zero(float128, float_status *status); +Int128 float128_to_int128_round_to_zero(float128, float_status *status); uint64_t float128_to_uint64(float128, float_status *status); +Int128 float128_to_uint128(float128, float_status *status); uint64_t float128_to_uint64_round_to_zero(float128, float_status *status); +Int128 float128_to_uint128_round_to_zero(float128, float_status *status); uint32_t float128_to_uint32(float128, float_status *status); uint32_t float128_to_uint32_round_to_zero(float128, float_status *status); float32 float128_to_float32(float128, float_status *status); @@ -1210,6 +1237,8 @@ float128 float128_minnum(float128, float128, float_status *status); float128 float128_maxnum(float128, float128, float_status *status); float128 float128_minnummag(float128, float128, float_status *status); float128 float128_maxnummag(float128, float128, float_status *status); +float128 float128_minimum_number(float128, float128, float_status *status); +float128 float128_maximum_number(float128, float128, float_status *status); bool float128_is_quiet_nan(float128, float_status *status); bool float128_is_signaling_nan(float128, float_status *status); float128 float128_silence_nan(float128, float_status *status); diff --git a/include/glib-compat.h b/include/glib-compat.h index 4542e920d5..43a562974d 100644 --- a/include/glib-compat.h +++ b/include/glib-compat.h @@ -19,12 +19,12 @@ /* Ask for warnings for anything that was marked deprecated in * the defined version, or before. It is a candidate for rewrite. */ -#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_48 +#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_56 /* Ask for warnings if code tries to use function that did not * exist in the defined version. These risk breaking builds */ -#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_48 +#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_56 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" @@ -46,9 +46,9 @@ * int g_foo(const char *wibble) * * We must define a static inline function with the same signature that does - * what we need, but with a "_qemu" suffix e.g. + * what we need, but with a "_compat" suffix e.g. * - * static inline void g_foo_qemu(const char *wibble) + * static inline void g_foo_compat(const char *wibble) * { * #if GLIB_CHECK_VERSION(X, Y, 0) * g_foo(wibble) @@ -61,21 +61,49 @@ * ensuring this wrapper function impl doesn't trigger the compiler warning * about using too new glib APIs. Finally we can do * - * #define g_foo(a) g_foo_qemu(a) + * #define g_foo(a) g_foo_compat(a) * * So now the code elsewhere in QEMU, which *does* have the * -Wdeprecated-declarations warning active, can call g_foo(...) as normal, * without generating warnings. */ -#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0) /* - * g_poll has a problem on Windows when using - * timeouts < 10ms, so use wrapper. + * g_memdup2_qemu: + * @mem: (nullable): the memory to copy. + * @byte_size: the number of bytes to copy. + * + * Allocates @byte_size bytes of memory, and copies @byte_size bytes into it + * from @mem. If @mem is %NULL it returns %NULL. + * + * This replaces g_memdup(), which was prone to integer overflows when + * converting the argument from a #gsize to a #guint. + * + * This static inline version is a backport of the new public API from + * GLib 2.68, kept internal to GLib for backport to older stable releases. + * See https://gitlab.gnome.org/GNOME/glib/-/issues/2319. + * + * Returns: (nullable): a pointer to the newly-allocated copy of the memory, + * or %NULL if @mem is %NULL. */ -#define g_poll(fds, nfds, timeout) g_poll_fixed(fds, nfds, timeout) -gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout); +static inline gpointer g_memdup2_qemu(gconstpointer mem, gsize byte_size) +{ +#if GLIB_CHECK_VERSION(2, 68, 0) + return g_memdup2(mem, byte_size); +#else + gpointer new_mem; + + if (mem && byte_size != 0) { + new_mem = g_malloc(byte_size); + memcpy(new_mem, mem, byte_size); + } else { + new_mem = NULL; + } + + return new_mem; #endif +} +#define g_memdup2(m, s) g_memdup2_qemu(m, s) #if defined(G_OS_UNIX) /* @@ -119,4 +147,8 @@ qemu_g_test_slow(void) #pragma GCC diagnostic pop +#ifndef G_NORETURN +#define G_NORETURN G_GNUC_NORETURN +#endif + #endif diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index cf9f44299c..2b42e4192b 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -48,31 +48,6 @@ typedef struct AcpiRsdpData { unsigned *xsdt_tbl_offset; } AcpiRsdpData; -/* Table structure from Linux kernel (the ACPI tables are under the - BSD license) */ - - -#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \ - uint32_t signature; /* ACPI signature (4 ASCII characters) */ \ - uint32_t length; /* Length of table, in bytes, including header */ \ - uint8_t revision; /* ACPI Specification minor version # */ \ - uint8_t checksum; /* To make sum of entire table == 0 */ \ - uint8_t oem_id[6] \ - QEMU_NONSTRING; /* OEM identification */ \ - uint8_t oem_table_id[8] \ - QEMU_NONSTRING; /* OEM table identification */ \ - uint32_t oem_revision; /* OEM revision number */ \ - uint8_t asl_compiler_id[4] \ - QEMU_NONSTRING; /* ASL compiler vendor ID */ \ - uint32_t asl_compiler_revision; /* ASL compiler revision number */ - - -/* ACPI common table header */ -struct AcpiTableHeader { - ACPI_TABLE_HEADER_DEF -} QEMU_PACKED; -typedef struct AcpiTableHeader AcpiTableHeader; - struct AcpiGenericAddress { uint8_t space_id; /* Address space where struct or register exists */ uint8_t bit_width; /* Size in bits of given register */ @@ -80,7 +55,7 @@ struct AcpiGenericAddress { uint8_t access_width; /* ACPI 3.0: Minimum Access size (ACPI 3.0), ACPI 2.0: Reserved, Table 5-1 */ uint64_t address; /* 64-bit address of struct or register */ -} QEMU_PACKED; +}; typedef struct AcpiFadtData { struct AcpiGenericAddress pm1a_cnt; /* PM1a_CNT_BLK */ @@ -102,6 +77,7 @@ typedef struct AcpiFadtData { uint16_t plvl2_lat; /* P_LVL2_LAT */ uint16_t plvl3_lat; /* P_LVL3_LAT */ uint16_t arm_boot_arch; /* ARM_BOOT_ARCH */ + uint16_t iapc_boot_arch; /* IAPC_BOOT_ARCH */ uint8_t minor_ver; /* FADT Minor Version */ /* @@ -117,505 +93,4 @@ typedef struct AcpiFadtData { #define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) #define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1) -/* - * Serial Port Console Redirection Table (SPCR), Rev. 1.02 - * - * For .interface_type see Debug Port Table 2 (DBG2) serial port - * subtypes in Table 3, Rev. May 22, 2012 - */ -struct AcpiSerialPortConsoleRedirection { - ACPI_TABLE_HEADER_DEF - uint8_t interface_type; - uint8_t reserved1[3]; - struct AcpiGenericAddress base_address; - uint8_t interrupt_types; - uint8_t irq; - uint32_t gsi; - uint8_t baud; - uint8_t parity; - uint8_t stopbits; - uint8_t flowctrl; - uint8_t term_type; - uint8_t reserved2; - uint16_t pci_device_id; - uint16_t pci_vendor_id; - uint8_t pci_bus; - uint8_t pci_slot; - uint8_t pci_func; - uint32_t pci_flags; - uint8_t pci_seg; - uint32_t reserved3; -} QEMU_PACKED; -typedef struct AcpiSerialPortConsoleRedirection - AcpiSerialPortConsoleRedirection; - -/* - * ACPI 1.0 Root System Description Table (RSDT) - */ -struct AcpiRsdtDescriptorRev1 { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t table_offset_entry[]; /* Array of pointers to other */ - /* ACPI tables */ -} QEMU_PACKED; -typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1; - -/* - * ACPI 2.0 eXtended System Description Table (XSDT) - */ -struct AcpiXsdtDescriptorRev2 { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint64_t table_offset_entry[]; /* Array of pointers to other */ - /* ACPI tables */ -} QEMU_PACKED; -typedef struct AcpiXsdtDescriptorRev2 AcpiXsdtDescriptorRev2; - -/* - * ACPI 1.0 Firmware ACPI Control Structure (FACS) - */ -struct AcpiFacsDescriptorRev1 { - uint32_t signature; /* ACPI Signature */ - uint32_t length; /* Length of structure, in bytes */ - uint32_t hardware_signature; /* Hardware configuration signature */ - uint32_t firmware_waking_vector; /* ACPI OS waking vector */ - uint32_t global_lock; /* Global Lock */ - uint32_t flags; - uint8_t resverved3 [40]; /* Reserved - must be zero */ -} QEMU_PACKED; -typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1; - -/* - * Differentiated System Description Table (DSDT) - */ - -/* - * MADT values and structures - */ - -/* Values for MADT PCATCompat */ - -#define ACPI_DUAL_PIC 0 -#define ACPI_MULTIPLE_APIC 1 - -/* Master MADT */ - -struct AcpiMultipleApicTable { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t local_apic_address; /* Physical address of local APIC */ - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiMultipleApicTable AcpiMultipleApicTable; - -/* Values for Type in APIC sub-headers */ - -#define ACPI_APIC_PROCESSOR 0 -#define ACPI_APIC_IO 1 -#define ACPI_APIC_XRUPT_OVERRIDE 2 -#define ACPI_APIC_NMI 3 -#define ACPI_APIC_LOCAL_NMI 4 -#define ACPI_APIC_ADDRESS_OVERRIDE 5 -#define ACPI_APIC_IO_SAPIC 6 -#define ACPI_APIC_LOCAL_SAPIC 7 -#define ACPI_APIC_XRUPT_SOURCE 8 -#define ACPI_APIC_LOCAL_X2APIC 9 -#define ACPI_APIC_LOCAL_X2APIC_NMI 10 -#define ACPI_APIC_GENERIC_CPU_INTERFACE 11 -#define ACPI_APIC_GENERIC_DISTRIBUTOR 12 -#define ACPI_APIC_GENERIC_MSI_FRAME 13 -#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14 -#define ACPI_APIC_GENERIC_TRANSLATOR 15 -#define ACPI_APIC_RESERVED 16 /* 16 and greater are reserved */ - -/* - * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE) - */ -#define ACPI_SUB_HEADER_DEF /* Common ACPI sub-structure header */\ - uint8_t type; \ - uint8_t length; - -/* Sub-structures for MADT */ - -struct AcpiMadtProcessorApic { - ACPI_SUB_HEADER_DEF - uint8_t processor_id; /* ACPI processor id */ - uint8_t local_apic_id; /* Processor's local APIC id */ - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiMadtProcessorApic AcpiMadtProcessorApic; - -struct AcpiMadtIoApic { - ACPI_SUB_HEADER_DEF - uint8_t io_apic_id; /* I/O APIC ID */ - uint8_t reserved; /* Reserved - must be zero */ - uint32_t address; /* APIC physical address */ - uint32_t interrupt; /* Global system interrupt where INTI - * lines start */ -} QEMU_PACKED; -typedef struct AcpiMadtIoApic AcpiMadtIoApic; - -struct AcpiMadtIntsrcovr { - ACPI_SUB_HEADER_DEF - uint8_t bus; - uint8_t source; - uint32_t gsi; - uint16_t flags; -} QEMU_PACKED; -typedef struct AcpiMadtIntsrcovr AcpiMadtIntsrcovr; - -struct AcpiMadtLocalNmi { - ACPI_SUB_HEADER_DEF - uint8_t processor_id; /* ACPI processor id */ - uint16_t flags; /* MPS INTI flags */ - uint8_t lint; /* Local APIC LINT# */ -} QEMU_PACKED; -typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi; - -struct AcpiMadtProcessorX2Apic { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t x2apic_id; /* Processor's local x2APIC ID */ - uint32_t flags; - uint32_t uid; /* Processor object _UID */ -} QEMU_PACKED; -typedef struct AcpiMadtProcessorX2Apic AcpiMadtProcessorX2Apic; - -struct AcpiMadtLocalX2ApicNmi { - ACPI_SUB_HEADER_DEF - uint16_t flags; /* MPS INTI flags */ - uint32_t uid; /* Processor object _UID */ - uint8_t lint; /* Local APIC LINT# */ - uint8_t reserved[3]; /* Local APIC LINT# */ -} QEMU_PACKED; -typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi; - -struct AcpiMadtGenericCpuInterface { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t cpu_interface_number; - uint32_t uid; - uint32_t flags; - uint32_t parking_version; - uint32_t performance_interrupt; - uint64_t parked_address; - uint64_t base_address; - uint64_t gicv_base_address; - uint64_t gich_base_address; - uint32_t vgic_interrupt; - uint64_t gicr_base_address; - uint64_t arm_mpidr; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface; - -/* GICC CPU Interface Flags */ -#define ACPI_MADT_GICC_ENABLED 1 - -struct AcpiMadtGenericDistributor { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t gic_id; - uint64_t base_address; - uint32_t global_irq_base; - /* ACPI 5.1 Errata 1228 Present GIC version in MADT table */ - uint8_t version; - uint8_t reserved2[3]; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor; - -struct AcpiMadtGenericMsiFrame { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t gic_msi_frame_id; - uint64_t base_address; - uint32_t flags; - uint16_t spi_count; - uint16_t spi_base; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericMsiFrame AcpiMadtGenericMsiFrame; - -struct AcpiMadtGenericRedistributor { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint64_t base_address; - uint32_t range_length; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericRedistributor AcpiMadtGenericRedistributor; - -struct AcpiMadtGenericTranslator { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t translation_id; - uint64_t base_address; - uint32_t reserved2; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator; - -/* - * Generic Timer Description Table (GTDT) - */ -#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0) -#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0) -#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2) - -struct AcpiGenericTimerTable { - ACPI_TABLE_HEADER_DEF - uint64_t counter_block_addresss; - uint32_t reserved; - uint32_t secure_el1_interrupt; - uint32_t secure_el1_flags; - uint32_t non_secure_el1_interrupt; - uint32_t non_secure_el1_flags; - uint32_t virtual_timer_interrupt; - uint32_t virtual_timer_flags; - uint32_t non_secure_el2_interrupt; - uint32_t non_secure_el2_flags; - uint64_t counter_read_block_address; - uint32_t platform_timer_count; - uint32_t platform_timer_offset; -} QEMU_PACKED; -typedef struct AcpiGenericTimerTable AcpiGenericTimerTable; - -/* - * HPET Description Table - */ -struct Acpi20Hpet { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t timer_block_id; - struct AcpiGenericAddress addr; - uint8_t hpet_number; - uint16_t min_tick; - uint8_t page_protect; -} QEMU_PACKED; -typedef struct Acpi20Hpet Acpi20Hpet; - -/* - * SRAT (NUMA topology description) table - */ - -struct AcpiSystemResourceAffinityTable { - ACPI_TABLE_HEADER_DEF - uint32_t reserved1; - uint32_t reserved2[2]; -} QEMU_PACKED; -typedef struct AcpiSystemResourceAffinityTable AcpiSystemResourceAffinityTable; - -#define ACPI_SRAT_PROCESSOR_APIC 0 -#define ACPI_SRAT_MEMORY 1 -#define ACPI_SRAT_PROCESSOR_x2APIC 2 -#define ACPI_SRAT_PROCESSOR_GICC 3 - -struct AcpiSratProcessorAffinity { - ACPI_SUB_HEADER_DEF - uint8_t proximity_lo; - uint8_t local_apic_id; - uint32_t flags; - uint8_t local_sapic_eid; - uint8_t proximity_hi[3]; - uint32_t reserved; -} QEMU_PACKED; -typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity; - -struct AcpiSratProcessorX2ApicAffinity { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t proximity_domain; - uint32_t x2apic_id; - uint32_t flags; - uint32_t clk_domain; - uint32_t reserved2; -} QEMU_PACKED; -typedef struct AcpiSratProcessorX2ApicAffinity AcpiSratProcessorX2ApicAffinity; - -struct AcpiSratMemoryAffinity { - ACPI_SUB_HEADER_DEF - uint32_t proximity; - uint16_t reserved1; - uint64_t base_addr; - uint64_t range_length; - uint32_t reserved2; - uint32_t flags; - uint32_t reserved3[2]; -} QEMU_PACKED; -typedef struct AcpiSratMemoryAffinity AcpiSratMemoryAffinity; - -struct AcpiSratProcessorGiccAffinity { - ACPI_SUB_HEADER_DEF - uint32_t proximity; - uint32_t acpi_processor_uid; - uint32_t flags; - uint32_t clock_domain; -} QEMU_PACKED; - -typedef struct AcpiSratProcessorGiccAffinity AcpiSratProcessorGiccAffinity; - -/* - * TCPA Description Table - * - * Following Level 00, Rev 00.37 of specs: - * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification - */ -struct Acpi20Tcpa { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint16_t platform_class; - uint32_t log_area_minimum_length; - uint64_t log_area_start_address; -} QEMU_PACKED; -typedef struct Acpi20Tcpa Acpi20Tcpa; - -/* DMAR - DMA Remapping table r2.2 */ -struct AcpiTableDmar { - ACPI_TABLE_HEADER_DEF - uint8_t host_address_width; /* Maximum DMA physical addressability */ - uint8_t flags; - uint8_t reserved[10]; -} QEMU_PACKED; -typedef struct AcpiTableDmar AcpiTableDmar; - -/* Masks for Flags field above */ -#define ACPI_DMAR_INTR_REMAP 1 -#define ACPI_DMAR_X2APIC_OPT_OUT (1 << 1) - -/* Values for sub-structure type for DMAR */ -enum { - ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, /* DRHD */ - ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, /* RMRR */ - ACPI_DMAR_TYPE_ATSR = 2, /* ATSR */ - ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, /* RHSR */ - ACPI_DMAR_TYPE_ANDD = 4, /* ANDD */ - ACPI_DMAR_TYPE_RESERVED = 5 /* Reserved for furture use */ -}; - -/* - * Sub-structures for DMAR - */ - -/* Device scope structure for DRHD. */ -struct AcpiDmarDeviceScope { - uint8_t entry_type; - uint8_t length; - uint16_t reserved; - uint8_t enumeration_id; - uint8_t bus; - struct { - uint8_t device; - uint8_t function; - } path[]; -} QEMU_PACKED; -typedef struct AcpiDmarDeviceScope AcpiDmarDeviceScope; - -/* Type 0: Hardware Unit Definition */ -struct AcpiDmarHardwareUnit { - uint16_t type; - uint16_t length; - uint8_t flags; - uint8_t reserved; - uint16_t pci_segment; /* The PCI Segment associated with this unit */ - uint64_t address; /* Base address of remapping hardware register-set */ - AcpiDmarDeviceScope scope[]; -} QEMU_PACKED; -typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit; - -/* Type 2: Root Port ATS Capability Reporting Structure */ -struct AcpiDmarRootPortATS { - uint16_t type; - uint16_t length; - uint8_t flags; - uint8_t reserved; - uint16_t pci_segment; - AcpiDmarDeviceScope scope[]; -} QEMU_PACKED; -typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS; - -/* Masks for Flags field above */ -#define ACPI_DMAR_INCLUDE_PCI_ALL 1 -#define ACPI_DMAR_ATSR_ALL_PORTS 1 - -/* - * Input Output Remapping Table (IORT) - * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049B, October 2015 - */ - -struct AcpiIortTable { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t node_count; - uint32_t node_offset; - uint32_t reserved; -} QEMU_PACKED; -typedef struct AcpiIortTable AcpiIortTable; - -/* - * IORT node types - */ - -#define ACPI_IORT_NODE_HEADER_DEF /* Node format common fields */ \ - uint8_t type; \ - uint16_t length; \ - uint8_t revision; \ - uint32_t reserved; \ - uint32_t mapping_count; \ - uint32_t mapping_offset; - -/* Values for node Type above */ -enum { - ACPI_IORT_NODE_ITS_GROUP = 0x00, - ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, - ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, - ACPI_IORT_NODE_SMMU = 0x03, - ACPI_IORT_NODE_SMMU_V3 = 0x04 -}; - -struct AcpiIortIdMapping { - uint32_t input_base; - uint32_t id_count; - uint32_t output_base; - uint32_t output_reference; - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiIortIdMapping AcpiIortIdMapping; - -struct AcpiIortMemoryAccess { - uint32_t cache_coherency; - uint8_t hints; - uint16_t reserved; - uint8_t memory_flags; -} QEMU_PACKED; -typedef struct AcpiIortMemoryAccess AcpiIortMemoryAccess; - -struct AcpiIortItsGroup { - ACPI_IORT_NODE_HEADER_DEF - uint32_t its_count; - uint32_t identifiers[]; -} QEMU_PACKED; -typedef struct AcpiIortItsGroup AcpiIortItsGroup; - -#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE 1 - -struct AcpiIortSmmu3 { - ACPI_IORT_NODE_HEADER_DEF - uint64_t base_address; - uint32_t flags; - uint32_t reserved2; - uint64_t vatos_address; - uint32_t model; - uint32_t event_gsiv; - uint32_t pri_gsiv; - uint32_t gerr_gsiv; - uint32_t sync_gsiv; - AcpiIortIdMapping id_mapping_array[]; -} QEMU_PACKED; -typedef struct AcpiIortSmmu3 AcpiIortSmmu3; - -struct AcpiIortRC { - ACPI_IORT_NODE_HEADER_DEF - AcpiIortMemoryAccess memory_properties; - uint32_t ats_attribute; - uint32_t pci_segment_number; - AcpiIortIdMapping id_mapping_array[]; -} QEMU_PACKED; -typedef struct AcpiIortRC AcpiIortRC; - #endif diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h index da08ddfff3..edbf35bf98 100644 --- a/include/hw/acpi/acpi.h +++ b/include/hw/acpi/acpi.h @@ -47,6 +47,8 @@ #define ACPI_PM_PROP_PM_IO_BASE "pm_io_base" #define ACPI_PM_PROP_GPE0_BLK "gpe0_blk" #define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len" +#define ACPI_PM_PROP_ACPI_PCIHP_BRIDGE "acpi-pci-hotplug-with-bridge-support" +#define ACPI_PM_PROP_ACPI_PCI_ROOTHP "acpi-root-pci-hotplug" /* PM Timer ticks per second (HZ) */ #ifdef XBOX diff --git a/include/hw/acpi/acpi_aml_interface.h b/include/hw/acpi/acpi_aml_interface.h new file mode 100644 index 0000000000..436da069d6 --- /dev/null +++ b/include/hw/acpi/acpi_aml_interface.h @@ -0,0 +1,49 @@ +#ifndef ACPI_AML_INTERFACE_H +#define ACPI_AML_INTERFACE_H + +#include "qom/object.h" +#include "hw/acpi/aml-build.h" + +#define TYPE_ACPI_DEV_AML_IF "acpi-dev-aml-interface" +typedef struct AcpiDevAmlIfClass AcpiDevAmlIfClass; +DECLARE_CLASS_CHECKERS(AcpiDevAmlIfClass, ACPI_DEV_AML_IF, TYPE_ACPI_DEV_AML_IF) +#define ACPI_DEV_AML_IF(obj) \ + INTERFACE_CHECK(AcpiDevAmlIf, (obj), TYPE_ACPI_DEV_AML_IF) + +typedef struct AcpiDevAmlIf AcpiDevAmlIf; +typedef void (*dev_aml_fn)(AcpiDevAmlIf *adev, Aml *scope); + +/** + * AcpiDevAmlIfClass: + * + * build_dev_aml: adds device specific AML blob to provided scope + * + * Interface is designed for providing generic callback that builds device + * specific AML blob. + */ +struct AcpiDevAmlIfClass { + /* */ + InterfaceClass parent_class; + + /* */ + dev_aml_fn build_dev_aml; +}; + +static inline dev_aml_fn get_dev_aml_func(DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_ACPI_DEV_AML_IF)) { + AcpiDevAmlIfClass *klass = ACPI_DEV_AML_IF_GET_CLASS(dev); + return klass->build_dev_aml; + } + return NULL; +} + +static inline void call_dev_aml_func(DeviceState *dev, Aml *scope) +{ + dev_aml_fn fn = get_dev_aml_func(dev); + if (fn) { + fn(ACPI_DEV_AML_IF(dev), scope); + } +} + +#endif diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h index 0956e448fb..2301f86513 100644 --- a/include/hw/acpi/acpi_dev_interface.h +++ b/include/hw/acpi/acpi_dev_interface.h @@ -56,6 +56,7 @@ struct AcpiDeviceIfClass { void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list); void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev); void (*madt_cpu)(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled); }; #endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index 471266d739..d1fb08514b 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -289,7 +289,7 @@ void free_aml_allocator(void); void aml_append(Aml *parent_ctx, Aml *child); /* non block AML object primitives */ -Aml *aml_name(const char *name_format, ...) GCC_FMT_ATTR(1, 2); +Aml *aml_name(const char *name_format, ...) G_GNUC_PRINTF(1, 2); Aml *aml_name_decl(const char *name, Aml *val); Aml *aml_debug(void); Aml *aml_return(Aml *val); @@ -344,13 +344,13 @@ Aml *aml_irq_no_flags(uint8_t irq); Aml *aml_named_field(const char *name, unsigned length); Aml *aml_reserved_field(unsigned length); Aml *aml_local(int num); -Aml *aml_string(const char *name_format, ...) GCC_FMT_ATTR(1, 2); +Aml *aml_string(const char *name_format, ...) G_GNUC_PRINTF(1, 2); Aml *aml_lnot(Aml *arg); Aml *aml_equal(Aml *arg1, Aml *arg2); Aml *aml_lgreater(Aml *arg1, Aml *arg2); Aml *aml_lgreater_equal(Aml *arg1, Aml *arg2); Aml *aml_processor(uint8_t proc_id, uint32_t pblk_addr, uint8_t pblk_len, - const char *name_format, ...) GCC_FMT_ATTR(4, 5); + const char *name_format, ...) G_GNUC_PRINTF(4, 5); Aml *aml_eisaid(const char *str); Aml *aml_word_bus_number(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, AmlDecode dec, uint16_t addr_gran, @@ -384,8 +384,8 @@ Aml *aml_sleep(uint64_t msec); Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source); /* Block AML object primitives */ -Aml *aml_scope(const char *name_format, ...) GCC_FMT_ATTR(1, 2); -Aml *aml_device(const char *name_format, ...) GCC_FMT_ATTR(1, 2); +Aml *aml_scope(const char *name_format, ...) G_GNUC_PRINTF(1, 2); +Aml *aml_device(const char *name_format, ...) G_GNUC_PRINTF(1, 2); Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag); Aml *aml_if(Aml *predicate); Aml *aml_else(void); @@ -413,10 +413,37 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target); Aml *aml_object_type(Aml *object); void build_append_int_noprefix(GArray *table, uint64_t value, int size); -void -build_header(BIOSLinker *linker, GArray *table_data, - AcpiTableHeader *h, const char *sig, int len, uint8_t rev, - const char *oem_id, const char *oem_table_id); + +typedef struct AcpiTable { + const char *sig; + const uint8_t rev; + const char *oem_id; + const char *oem_table_id; + /* private vars tracking table state */ + GArray *array; + unsigned table_offset; +} AcpiTable; + +/** + * acpi_table_begin: + * initializes table header and keeps track of + * table data/offsets + * @desc: ACPI table descriptor + * @array: blob where the ACPI table will be composed/stored. + */ +void acpi_table_begin(AcpiTable *desc, GArray *array); + +/** + * acpi_table_end: + * sets actual table length and tells bios loader + * where table is for the later initialization on + * guest side. + * @linker: reference to BIOSLinker object to use for the table + * @table: ACPI table descriptor that was used with @acpi_table_begin + * counterpart + */ +void acpi_table_end(BIOSLinker *linker, AcpiTable *table); + void *acpi_data_push(GArray *table_data, unsigned size); unsigned acpi_data_len(GArray *table); void acpi_add_table(GArray *table_offsets, GArray *table_data); @@ -433,7 +460,7 @@ build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, int build_append_named_dword(GArray *array, const char *name_format, ...) -GCC_FMT_ATTR(2, 3); +G_GNUC_PRINTF(2, 3); void build_append_gas(GArray *table, AmlAddressSpace as, uint8_t bit_width, uint8_t bit_offset, @@ -456,12 +483,15 @@ Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset, uint32_t mmio32_offset, uint64_t mmio64_offset, uint16_t bus_nr_offset); -void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, +void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags); void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id); + void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, const char *oem_id, const char *oem_table_id); diff --git a/include/hw/acpi/cxl.h b/include/hw/acpi/cxl.h new file mode 100644 index 0000000000..acf4418886 --- /dev/null +++ b/include/hw/acpi/cxl.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_ACPI_CXL_H +#define HW_ACPI_CXL_H + +#include "hw/acpi/bios-linker-loader.h" +#include "hw/cxl/cxl.h" + +void cxl_build_cedt(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, const char *oem_id, + const char *oem_table_id, CXLState *cxl_state); +void build_cxl_osc_method(Aml *dev); + +#endif diff --git a/include/hw/acpi/erst.h b/include/hw/acpi/erst.h new file mode 100644 index 0000000000..b747fe7739 --- /dev/null +++ b/include/hw/acpi/erst.h @@ -0,0 +1,24 @@ +/* + * ACPI Error Record Serialization Table, ERST, Implementation + * + * ACPI ERST introduced in ACPI 4.0, June 16, 2009. + * ACPI Platform Error Interfaces : Error Serialization + * + * Copyright (c) 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_ACPI_ERST_H +#define HW_ACPI_ERST_H + +void build_erst(GArray *table_data, BIOSLinker *linker, Object *erst_dev, + const char *oem_id, const char *oem_table_id); + +#define TYPE_ACPI_ERST "acpi-erst" + +/* returns NULL unless there is exactly one device */ +static inline Object *find_erst_dev(void) +{ + return object_resolve_path_type("", TYPE_ACPI_ERST, NULL); +} +#endif diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h index 6bed92e8fc..d831bbd889 100644 --- a/include/hw/acpi/generic_event_device.h +++ b/include/hw/acpi/generic_event_device.h @@ -56,8 +56,8 @@ * */ -#ifndef HW_ACPI_GED_H -#define HW_ACPI_GED_H +#ifndef HW_ACPI_GENERIC_EVENT_DEVICE_H +#define HW_ACPI_GENERIC_EVENT_DEVICE_H #include "hw/sysbus.h" #include "hw/acpi/memory_hotplug.h" @@ -70,8 +70,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) #define TYPE_ACPI_GED_X86 "acpi-ged-x86" -#define ACPI_GED_X86(obj) \ - OBJECT_CHECK(AcpiGedX86State, (obj), TYPE_ACPI_GED_X86) #define ACPI_GED_EVT_SEL_OFFSET 0x0 #define ACPI_GED_EVT_SEL_LEN 0x4 diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h index a329ce43ab..7ca92843c6 100644 --- a/include/hw/acpi/ich9.h +++ b/include/hw/acpi/ich9.h @@ -29,7 +29,7 @@ #include "hw/acpi/acpi_dev_interface.h" #include "hw/acpi/tco.h" -#define ACPI_PCIHP_ADDR_ICH9 0x0cc4 +#define ACPI_PCIHP_ADDR_ICH9 0x0cc0 typedef struct ICH9LPCPMRegs { /* @@ -56,6 +56,7 @@ typedef struct ICH9LPCPMRegs { AcpiCpuHotplug gpe_cpu; CPUHotplugState cpuhp_state; + bool keep_pci_slot_hpc; bool use_acpi_hotplug_bridge; AcpiPciHpState acpi_pci_hotplug; MemHotplugState acpi_memory_hotplug; diff --git a/include/hw/acpi/ipmi.h b/include/hw/acpi/ipmi.h index c14ad682ac..6c8079c97a 100644 --- a/include/hw/acpi/ipmi.h +++ b/include/hw/acpi/ipmi.h @@ -9,13 +9,8 @@ #ifndef HW_ACPI_IPMI_H #define HW_ACPI_IPMI_H -#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" -/* - * Add ACPI IPMI entries for all registered IPMI devices whose parent - * bus matches the given bus. The resource is the ACPI resource that - * contains the IPMI device, this is required for the I2C CRS. - */ -void build_acpi_ipmi_devices(Aml *table, BusState *bus, const char *resource); +void build_ipmi_dev_aml(AcpiDevAmlIf *adev, Aml *scope); #endif /* HW_ACPI_IPMI_H */ diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h index af1a169fc3..7e268c2c9c 100644 --- a/include/hw/acpi/pcihp.h +++ b/include/hw/acpi/pcihp.h @@ -73,8 +73,6 @@ void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off); extern const VMStateDescription vmstate_acpi_pcihp_pci_status; -bool vmstate_acpi_pcihp_use_acpi_index(void *opaque, int version_id); - #define VMSTATE_PCI_HOTPLUG(pcihp, state, test_pcihp, test_acpi_index) \ VMSTATE_UINT32_TEST(pcihp.hotplug_select, state, \ test_pcihp), \ diff --git a/include/hw/acpi/piix4.h b/include/hw/acpi/piix4.h new file mode 100644 index 0000000000..32686a75c5 --- /dev/null +++ b/include/hw/acpi/piix4.h @@ -0,0 +1,75 @@ +/* + * ACPI implementation + * + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#ifndef HW_ACPI_PIIX4_H +#define HW_ACPI_PIIX4_H + +#include "hw/pci/pci.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/cpu_hotplug.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/acpi/pcihp.h" +#include "hw/i2c/pm_smbus.h" +#include "hw/isa/apm.h" + +#define TYPE_PIIX4_PM "PIIX4_PM" +OBJECT_DECLARE_SIMPLE_TYPE(PIIX4PMState, PIIX4_PM) + +struct PIIX4PMState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion io; + uint32_t io_base; + + MemoryRegion io_gpe; + ACPIREGS ar; + + APMState apm; + + PMSMBus smb; + uint32_t smb_io_base; + + qemu_irq irq; + qemu_irq smi_irq; + bool smm_enabled; + bool smm_compat; + Notifier machine_ready; + Notifier powerdown_notifier; + + AcpiPciHpState acpi_pci_hotplug; + bool use_acpi_hotplug_bridge; + bool use_acpi_root_pci_hotplug; + bool not_migrate_acpi_index; + + uint8_t disable_s3; + uint8_t disable_s4; + uint8_t s4_val; + + bool cpu_hotplug_legacy; + AcpiCpuHotplug gpe_cpu; + CPUHotplugState cpuhp_state; + + MemHotplugState acpi_memory_hotplug; +}; + +#endif diff --git a/include/hw/adc/aspeed_adc.h b/include/hw/adc/aspeed_adc.h new file mode 100644 index 0000000000..ff1d06ea91 --- /dev/null +++ b/include/hw/adc/aspeed_adc.h @@ -0,0 +1,56 @@ +/* + * Aspeed ADC + * + * Copyright 2017-2021 IBM Corp. + * + * Andrew Jeffery + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_ADC_ASPEED_ADC_H +#define HW_ADC_ASPEED_ADC_H + +#include "hw/sysbus.h" + +#define TYPE_ASPEED_ADC "aspeed.adc" +#define TYPE_ASPEED_2400_ADC TYPE_ASPEED_ADC "-ast2400" +#define TYPE_ASPEED_2500_ADC TYPE_ASPEED_ADC "-ast2500" +#define TYPE_ASPEED_2600_ADC TYPE_ASPEED_ADC "-ast2600" +#define TYPE_ASPEED_1030_ADC TYPE_ASPEED_ADC "-ast1030" +OBJECT_DECLARE_TYPE(AspeedADCState, AspeedADCClass, ASPEED_ADC) + +#define TYPE_ASPEED_ADC_ENGINE "aspeed.adc.engine" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedADCEngineState, ASPEED_ADC_ENGINE) + +#define ASPEED_ADC_NR_CHANNELS 16 +#define ASPEED_ADC_NR_REGS (0xD0 >> 2) + +struct AspeedADCEngineState { + /* */ + SysBusDevice parent; + + MemoryRegion mmio; + qemu_irq irq; + uint32_t engine_id; + uint32_t nr_channels; + uint32_t regs[ASPEED_ADC_NR_REGS]; +}; + +struct AspeedADCState { + /* */ + SysBusDevice parent; + + MemoryRegion mmio; + qemu_irq irq; + + AspeedADCEngineState engines[2]; +}; + +struct AspeedADCClass { + SysBusDeviceClass parent_class; + + uint32_t nr_engines; +}; + +#endif /* HW_ADC_ASPEED_ADC_H */ diff --git a/include/hw/adc/zynq-xadc.h b/include/hw/adc/zynq-xadc.h index 2017b7a803..c10cc4c379 100644 --- a/include/hw/adc/zynq-xadc.h +++ b/include/hw/adc/zynq-xadc.h @@ -39,8 +39,7 @@ struct ZynqXADCState { uint16_t xadc_dfifo[ZYNQ_XADC_FIFO_DEPTH]; uint16_t xadc_dfifo_entries; - struct IRQState *qemu_irq; - + qemu_irq irq; }; #endif /* ZYNQ_XADC_H */ diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h index bc6733c518..b7ba0ff409 100644 --- a/include/hw/arm/armv7m.h +++ b/include/hw/arm/armv7m.h @@ -12,8 +12,10 @@ #include "hw/sysbus.h" #include "hw/intc/armv7m_nvic.h" +#include "hw/misc/armv7m_ras.h" #include "target/arm/idau.h" #include "qom/object.h" +#include "hw/clock.h" #define TYPE_BITBAND "ARM-bitband-memory" OBJECT_DECLARE_SIMPLE_TYPE(BitBandState, BITBAND) @@ -50,6 +52,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M) * + Property "vfp": enable VFP (forwarded to CPU object) * + Property "dsp": enable DSP (forwarded to CPU object) * + Property "enable-bitband": expose bitbanded IO + * + Clock input "refclk" is the external reference clock for the systick timers + * + Clock input "cpuclk" is the main CPU clock */ struct ARMv7MState { /*< private >*/ @@ -58,11 +62,31 @@ struct ARMv7MState { NVICState nvic; BitBandState bitband[ARMV7M_NUM_BITBANDS]; ARMCPU *cpu; + ARMv7MRAS ras; + SysTickState systick[M_REG_NUM_BANKS]; /* MemoryRegion we pass to the CPU, with our devices layered on * top of the ones the board provides in board_memory. */ MemoryRegion container; + /* + * MemoryRegion which passes the transaction to either the S or the + * NS systick device depending on the transaction attributes + */ + MemoryRegion systickmem; + /* + * MemoryRegion which enforces the S/NS handling of the systick + * device NS alias region and passes the transaction to the + * NS systick device if appropriate. + */ + MemoryRegion systick_ns_mem; + /* Ditto, for the sysregs region provided by the NVIC */ + MemoryRegion sysreg_ns_mem; + /* MR providing default PPB behaviour */ + MemoryRegion defaultmem; + + Clock *refclk; + Clock *cpuclk; /* Properties */ char *cpu_type; diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h index c9747b15fc..cbeacb214c 100644 --- a/include/hw/arm/aspeed.h +++ b/include/hw/arm/aspeed.h @@ -38,6 +38,7 @@ struct AspeedMachineClass { uint32_t num_cs; uint32_t macs_mask; void (*i2c_init)(AspeedMachineState *bmc); + uint32_t uart_default; }; diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index d9161d26d6..8389200b2d 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -13,15 +13,19 @@ #define ASPEED_SOC_H #include "hw/cpu/a15mpcore.h" +#include "hw/arm/armv7m.h" #include "hw/intc/aspeed_vic.h" #include "hw/misc/aspeed_scu.h" +#include "hw/adc/aspeed_adc.h" #include "hw/misc/aspeed_sdmc.h" #include "hw/misc/aspeed_xdma.h" #include "hw/timer/aspeed_timer.h" #include "hw/rtc/aspeed_rtc.h" #include "hw/i2c/aspeed_i2c.h" +#include "hw/misc/aspeed_i3c.h" #include "hw/ssi/aspeed_smc.h" #include "hw/misc/aspeed_hace.h" +#include "hw/misc/aspeed_sbc.h" #include "hw/watchdog/wdt_aspeed.h" #include "hw/net/ftgmac100.h" #include "target/arm/cpu.h" @@ -30,12 +34,16 @@ #include "hw/usb/hcd-ehci.h" #include "qom/object.h" #include "hw/misc/aspeed_lpc.h" +#include "hw/misc/unimp.h" +#include "hw/misc/aspeed_peci.h" +#include "hw/char/serial.h" #define ASPEED_SPIS_NUM 2 #define ASPEED_EHCIS_NUM 2 #define ASPEED_WDTS_NUM 4 #define ASPEED_CPUS_NUM 2 #define ASPEED_MACS_NUM 4 +#define ASPEED_UARTS_NUM 13 struct AspeedSoCState { /*< private >*/ @@ -44,18 +52,25 @@ struct AspeedSoCState { /*< public >*/ ARMCPU cpu[ASPEED_CPUS_NUM]; A15MPPrivState a7mpcore; + ARMv7MState armv7m; + MemoryRegion *memory; MemoryRegion *dram_mr; + MemoryRegion dram_container; MemoryRegion sram; AspeedVICState vic; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; AspeedI2CState i2c; + AspeedI3CState i3c; AspeedSCUState scu; AspeedHACEState hace; AspeedXDMAState xdma; + AspeedADCState adc; AspeedSMCState fmc; AspeedSMCState spi[ASPEED_SPIS_NUM]; EHCISysBusState ehci[ASPEED_EHCIS_NUM]; + AspeedSBCState sbc; + UnimplementedDeviceState sbc_unimplemented; AspeedSDMCState sdmc; AspeedWDTState wdt[ASPEED_WDTS_NUM]; FTGMAC100State ftgmac100[ASPEED_MACS_NUM]; @@ -65,6 +80,13 @@ struct AspeedSoCState { AspeedSDHCIState sdhci; AspeedSDHCIState emmc; AspeedLPCState lpc; + AspeedPECIState peci; + SerialMM uart[ASPEED_UARTS_NUM]; + Clock *sysclk; + UnimplementedDeviceState iomem; + UnimplementedDeviceState video; + UnimplementedDeviceState emmc_boot_controller; + UnimplementedDeviceState dpmcu; }; #define TYPE_ASPEED_SOC "aspeed-soc" @@ -81,9 +103,11 @@ struct AspeedSoCClass { int ehcis_num; int wdts_num; int macs_num; + int uarts_num; const int *irqmap; const hwaddr *memmap; uint32_t num_cpus; + qemu_irq (*get_irq)(AspeedSoCState *s, int dev); }; @@ -94,6 +118,14 @@ enum { ASPEED_DEV_UART3, ASPEED_DEV_UART4, ASPEED_DEV_UART5, + ASPEED_DEV_UART6, + ASPEED_DEV_UART7, + ASPEED_DEV_UART8, + ASPEED_DEV_UART9, + ASPEED_DEV_UART10, + ASPEED_DEV_UART11, + ASPEED_DEV_UART12, + ASPEED_DEV_UART13, ASPEED_DEV_VUART, ASPEED_DEV_FMC, ASPEED_DEV_SPI1, @@ -104,6 +136,8 @@ enum { ASPEED_DEV_SDMC, ASPEED_DEV_SCU, ASPEED_DEV_ADC, + ASPEED_DEV_SBC, + ASPEED_DEV_EMMC_BC, ASPEED_DEV_VIDEO, ASPEED_DEV_SRAM, ASPEED_DEV_SDHCI, @@ -123,6 +157,7 @@ enum { ASPEED_DEV_LPC, ASPEED_DEV_IBT, ASPEED_DEV_I2C, + ASPEED_DEV_PECI, ASPEED_DEV_ETH1, ASPEED_DEV_ETH2, ASPEED_DEV_ETH3, @@ -136,6 +171,20 @@ enum { ASPEED_DEV_EMMC, ASPEED_DEV_KCS, ASPEED_DEV_HACE, + ASPEED_DEV_DPMCU, + ASPEED_DEV_DP, + ASPEED_DEV_I3C, }; +qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); +bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp); +void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr); +bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp); +void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr); +void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, + const char *name, hwaddr addr, + uint64_t size); +void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, + unsigned int count, int unit0); + #endif /* ASPEED_SOC_H */ diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h index d864879421..c9d25d493e 100644 --- a/include/hw/arm/bcm2835_peripherals.h +++ b/include/hw/arm/bcm2835_peripherals.h @@ -17,6 +17,7 @@ #include "hw/char/bcm2835_aux.h" #include "hw/display/bcm2835_fb.h" #include "hw/dma/bcm2835_dma.h" +#include "hw/or-irq.h" #include "hw/intc/bcm2835_ic.h" #include "hw/misc/bcm2835_property.h" #include "hw/misc/bcm2835_rng.h" @@ -55,6 +56,7 @@ struct BCM2835PeripheralState { BCM2835AuxState aux; BCM2835FBState fb; BCM2835DMAState dma; + qemu_or_irq orgated_dma_irq; BCM2835ICState ic; BCM2835PropertyState property; BCM2835RngState rng; diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index ce2b48b88b..80c492d742 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -25,13 +25,16 @@ typedef enum { * armv7m_load_kernel: * @cpu: CPU * @kernel_filename: file to load + * @mem_base: base address to load image at (should be where the + * CPU expects to find its vector table on reset) * @mem_size: mem_size: maximum image size to load * * Load the guest image for an ARMv7M system. This must be called by * any ARMv7M board. (This is necessary to ensure that the CPU resets * correctly on system reset, as well as for kernel loading.) */ -void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size); +void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, + hwaddr mem_base, int mem_size); /* arm_boot.c */ struct arm_boot_info { @@ -56,7 +59,6 @@ struct arm_boot_info { hwaddr smp_loader_start; hwaddr smp_bootreg_addr; hwaddr gic_cpu_if_addr; - int nb_cpus; int board_id; /* ARM machines that support the ARM Security Extensions use this field to * control whether Linux is booted as secure(true) or non-secure(false). @@ -70,6 +72,9 @@ struct arm_boot_info { * boot loader/boot ROM code, and secondary_cpu_reset_hook() should * perform any necessary CPU reset handling and set the PC for the * secondary CPUs to point at this boot blob. + * + * These hooks won't be called if secondary CPUs are booting via + * emulated PSCI (see psci_conduit below). */ void (*write_secondary_boot)(ARMCPU *cpu, const struct arm_boot_info *info); @@ -86,6 +91,16 @@ struct arm_boot_info { * the user it should implement this hook. */ void (*modify_dtb)(const struct arm_boot_info *info, void *fdt); + /* + * If a board wants to use the QEMU emulated-firmware PSCI support, + * it should set this to QEMU_PSCI_CONDUIT_HVC or QEMU_PSCI_CONDUIT_SMC + * as appropriate. arm_load_kernel() will set the psci-conduit and + * start-powered-off properties on the CPUs accordingly. + * Note that if the guest image is started at the same exception level + * as the conduit specifies calls should go to (eg guest firmware booted + * to EL3) then PSCI will not be enabled. + */ + int psci_conduit; /* Used internally by arm_boot.c */ int is_linux; hwaddr initrd_start; @@ -168,4 +183,53 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, const struct arm_boot_info *info, hwaddr mvbar_addr); +typedef enum { + FIXUP_NONE = 0, /* do nothing */ + FIXUP_TERMINATOR, /* end of insns */ + FIXUP_BOARDID, /* overwrite with board ID number */ + FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ + FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ + FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ + FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ + FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ + FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ + FIXUP_BOOTREG, /* overwrite with boot register address */ + FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ + FIXUP_MAX, +} FixupType; + +typedef struct ARMInsnFixup { + uint32_t insn; + FixupType fixup; +} ARMInsnFixup; + +/** + * arm_write_bootloader - write a bootloader to guest memory + * @name: name of the bootloader blob + * @as: AddressSpace to write the bootloader + * @addr: guest address to write it + * @insns: the blob to be loaded + * @fixupcontext: context to be used for any fixups in @insns + * + * Write a bootloader to guest memory at address @addr in the address + * space @as. @name is the name to use for the resulting ROM blob, so + * it should be unique in the system and reasonably identifiable for debugging. + * + * @insns must be an array of ARMInsnFixup structs, each of which has + * one 32-bit value to be written to the guest memory, and a fixup to be + * applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively + * the fixup is optional when writing a struct initializer. + * The final entry in the array must be { 0, FIXUP_TERMINATOR }. + * + * All other supported fixup types have the semantics "ignore insn + * and instead use the value from the array element @fixupcontext[fixup]". + * The caller should therefore provide @fixupcontext as an array of + * size FIXUP_MAX whose elements have been initialized for at least + * the entries that @insns refers to. + */ +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext); + #endif /* HW_ARM_BOOT_H */ diff --git a/include/hw/arm/exynos4210.h b/include/hw/arm/exynos4210.h index 60b9e126f5..97353f1c02 100644 --- a/include/hw/arm/exynos4210.h +++ b/include/hw/arm/exynos4210.h @@ -26,6 +26,10 @@ #include "hw/or-irq.h" #include "hw/sysbus.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/intc/exynos4210_gic.h" +#include "hw/intc/exynos4210_combiner.h" +#include "hw/core/split-irq.h" #include "target/arm/cpu-qom.h" #include "qom/object.h" @@ -65,34 +69,25 @@ #define EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ \ (EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ * 8) -#define EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit) ((grp)*8 + (bit)) -#define EXYNOS4210_COMBINER_GET_GRP_NUM(irq) ((irq) / 8) -#define EXYNOS4210_COMBINER_GET_BIT_NUM(irq) \ - ((irq) - 8 * EXYNOS4210_COMBINER_GET_GRP_NUM(irq)) - -/* IRQs number for external and internal GIC */ -#define EXYNOS4210_EXT_GIC_NIRQ (160-32) -#define EXYNOS4210_INT_GIC_NIRQ 64 - #define EXYNOS4210_I2C_NUMBER 9 #define EXYNOS4210_NUM_DMA 3 -typedef struct Exynos4210Irq { - qemu_irq int_combiner_irq[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ]; - qemu_irq ext_combiner_irq[EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ]; - qemu_irq int_gic_irq[EXYNOS4210_INT_GIC_NIRQ]; - qemu_irq ext_gic_irq[EXYNOS4210_EXT_GIC_NIRQ]; - qemu_irq board_irqs[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ]; -} Exynos4210Irq; +/* + * We need one splitter for every external combiner input, plus + * one for every non-zero entry in combiner_grp_to_gic_id[], + * minus one for every external combiner ID in second or later + * places in a combinermap[] line. + * We'll assert in exynos4210_init_board_irqs() if this is wrong. + */ +#define EXYNOS4210_NUM_SPLITTERS (EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ + 38) struct Exynos4210State { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ ARMCPU *cpu[EXYNOS4210_NCPUS]; - Exynos4210Irq irqs; - qemu_irq *irq_table; + qemu_irq irq_table[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ]; MemoryRegion chipid_mem; MemoryRegion iram_mem; @@ -102,6 +97,12 @@ struct Exynos4210State { MemoryRegion bootreg_mem; I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER]; qemu_or_irq pl330_irq_orgate[EXYNOS4210_NUM_DMA]; + qemu_or_irq cpu_irq_orgate[EXYNOS4210_NCPUS]; + A9MPPrivState a9mpcore; + Exynos4210GicState ext_gic; + Exynos4210CombinerState int_combiner; + Exynos4210CombinerState ext_combiner; + SplitIRQ splitter[EXYNOS4210_NUM_SPLITTERS]; }; #define TYPE_EXYNOS4210_SOC "exynos4210" @@ -110,25 +111,12 @@ OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210State, EXYNOS4210_SOC) void exynos4210_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info); -/* Initialize exynos4210 IRQ subsystem stub */ -qemu_irq *exynos4210_init_irq(Exynos4210Irq *env); - -/* Initialize board IRQs. - * These IRQs contain splitted Int/External Combiner and External Gic IRQs */ -void exynos4210_init_board_irqs(Exynos4210Irq *s); - /* Get IRQ number from exynos4210 IRQ subsystem stub. * To identify IRQ source use internal combiner group and bit number * grp - group number * bit - bit number inside group */ uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit); -/* - * Get Combiner input GPIO into irqs structure - */ -void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev, - int ext); - /* * exynos4210 UART */ diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h index f5d527a490..1c5fa6fd67 100644 --- a/include/hw/arm/fsl-imx7.h +++ b/include/hw/arm/fsl-imx7.h @@ -174,6 +174,11 @@ enum FslIMX7MemoryMap { FSL_IMX7_UART6_ADDR = 0x30A80000, FSL_IMX7_UART7_ADDR = 0x30A90000, + FSL_IMX7_SAI1_ADDR = 0x308A0000, + FSL_IMX7_SAI2_ADDR = 0x308B0000, + FSL_IMX7_SAI3_ADDR = 0x308C0000, + FSL_IMX7_SAIn_SIZE = 0x10000, + FSL_IMX7_ENET1_ADDR = 0x30BE0000, FSL_IMX7_ENET2_ADDR = 0x30BF0000, diff --git a/include/hw/arm/msf2-soc.h b/include/hw/arm/msf2-soc.h index d406184685..ce417a6266 100644 --- a/include/hw/arm/msf2-soc.h +++ b/include/hw/arm/msf2-soc.h @@ -30,6 +30,7 @@ #include "hw/misc/msf2-sysreg.h" #include "hw/ssi/mss-spi.h" #include "hw/net/msf2-emac.h" +#include "hw/clock.h" #include "qom/object.h" #define TYPE_MSF2_SOC "msf2-soc" @@ -57,7 +58,8 @@ struct MSF2State { uint64_t envm_size; uint64_t esram_size; - uint32_t m3clk; + Clock *m3clk; + Clock *refclk; uint8_t apb0div; uint8_t apb1div; @@ -65,6 +67,10 @@ struct MSF2State { MSSTimerState timer; MSSSpiState spi[MSF2_NUM_SPIS]; MSF2EmacState emac; + + MemoryRegion nvm; + MemoryRegion nvm_alias; + MemoryRegion sram; }; #endif diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h index 61ecc57ab9..ce593235d9 100644 --- a/include/hw/arm/npcm7xx.h +++ b/include/hw/arm/npcm7xx.h @@ -35,6 +35,7 @@ #include "hw/usb/hcd-ehci.h" #include "hw/usb/hcd-ohci.h" #include "target/arm/cpu.h" +#include "hw/sd/npcm7xx_sdhci.h" #define NPCM7XX_MAX_NUM_CPUS (2) @@ -103,6 +104,7 @@ typedef struct NPCM7xxState { OHCISysBusState ohci; NPCM7xxFIUState fiu[2]; NPCM7xxEMCState emc[2]; + NPCM7xxSDHCIState mmc; } NPCM7xxState; #define TYPE_NPCM7XX "npcm7xx" diff --git a/include/hw/arm/nrf51_soc.h b/include/hw/arm/nrf51_soc.h index f8a6725b77..e52a56e75e 100644 --- a/include/hw/arm/nrf51_soc.h +++ b/include/hw/arm/nrf51_soc.h @@ -17,6 +17,7 @@ #include "hw/gpio/nrf51_gpio.h" #include "hw/nvram/nrf51_nvm.h" #include "hw/timer/nrf51_timer.h" +#include "hw/clock.h" #include "qom/object.h" #define TYPE_NRF51_SOC "nrf51-soc" @@ -50,6 +51,7 @@ struct NRF51State { MemoryRegion container; + Clock *sysclk; }; #endif diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 706be3c6d0..21e62342e9 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -71,6 +71,7 @@ typedef struct SMMUTransCfg { bool disabled; /* smmu is disabled */ bool bypassed; /* translation is bypassed */ bool aborted; /* translation is aborted */ + bool record_faults; /* record fault events */ uint64_t ttb; /* TT base address */ uint8_t oas; /* output address width */ uint8_t tbi; /* Top Byte Ignore */ diff --git a/include/hw/arm/stm32f100_soc.h b/include/hw/arm/stm32f100_soc.h index 71bffcf4fd..40cd415b28 100644 --- a/include/hw/arm/stm32f100_soc.h +++ b/include/hw/arm/stm32f100_soc.h @@ -29,6 +29,7 @@ #include "hw/ssi/stm32f2xx_spi.h" #include "hw/arm/armv7m.h" #include "qom/object.h" +#include "hw/clock.h" #define TYPE_STM32F100_SOC "stm32f100-soc" OBJECT_DECLARE_SIMPLE_TYPE(STM32F100State, STM32F100_SOC) @@ -52,6 +53,13 @@ struct STM32F100State { STM32F2XXUsartState usart[STM_NUM_USARTS]; STM32F2XXSPIState spi[STM_NUM_SPIS]; + + MemoryRegion sram; + MemoryRegion flash; + MemoryRegion flash_alias; + + Clock *sysclk; + Clock *refclk; }; #endif diff --git a/include/hw/arm/stm32f205_soc.h b/include/hw/arm/stm32f205_soc.h index 985ff63aa9..849d3ed889 100644 --- a/include/hw/arm/stm32f205_soc.h +++ b/include/hw/arm/stm32f205_soc.h @@ -32,6 +32,7 @@ #include "hw/or-irq.h" #include "hw/ssi/stm32f2xx_spi.h" #include "hw/arm/armv7m.h" +#include "hw/clock.h" #include "qom/object.h" #define TYPE_STM32F205_SOC "stm32f205-soc" @@ -63,6 +64,13 @@ struct STM32F205State { STM32F2XXSPIState spi[STM_NUM_SPIS]; qemu_or_irq *adc_irqs; + + MemoryRegion sram; + MemoryRegion flash; + MemoryRegion flash_alias; + + Clock *sysclk; + Clock *refclk; }; #endif diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h index 347105e709..5bb0c8d569 100644 --- a/include/hw/arm/stm32f405_soc.h +++ b/include/hw/arm/stm32f405_soc.h @@ -68,6 +68,9 @@ struct STM32F405State { MemoryRegion sram; MemoryRegion flash; MemoryRegion flash_alias; + + Clock *sysclk; + Clock *refclk; }; #endif diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 9661c46699..6ec479ca2b 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -113,6 +113,7 @@ typedef enum VirtGICType { VIRT_GIC_VERSION_HOST, VIRT_GIC_VERSION_2, VIRT_GIC_VERSION_3, + VIRT_GIC_VERSION_4, VIRT_GIC_VERSION_NOSEL, } VirtGICType; @@ -120,15 +121,19 @@ struct VirtMachineClass { MachineClass parent; bool disallow_affinity_adjustment; bool no_its; + bool no_tcg_its; bool no_pmu; bool claim_edge_triggered_timers; bool smbios_old_sys_ver; bool no_highmem_ecam; - bool no_ged; /* Machines < 4.2 has no support for ACPI GED device */ + bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */ bool kvm_no_adjvtime; bool no_kvm_steal_time; bool acpi_expose_flash; bool no_secure_gpio; + /* Machines < 6.2 have no support for describing cpu topology to guest */ + bool no_cpu_topology; + bool no_tcg_lpa2; }; struct VirtMachineState { @@ -140,10 +145,14 @@ struct VirtMachineState { bool secure; bool highmem; bool highmem_ecam; + bool highmem_mmio; + bool highmem_redists; bool its; + bool tcg_its; bool virt; bool ras; bool mte; + bool dtb_randomness; OnOffAuto acpi; VirtGICType gic_version; VirtIOMMUType iommu; @@ -177,15 +186,28 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE) void virt_acpi_setup(VirtMachineState *vms); bool virt_is_acpi_enabled(VirtMachineState *vms); +/* Return number of redistributors that fit in the specified region */ +static uint32_t virt_redist_capacity(VirtMachineState *vms, int region) +{ + uint32_t redist_size; + + if (vms->gic_version == VIRT_GIC_VERSION_3) { + redist_size = GICV3_REDIST_SIZE; + } else { + redist_size = GICV4_REDIST_SIZE; + } + return vms->memmap[region].size / redist_size; +} + /* Return the number of used redistributor regions */ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms) { - uint32_t redist0_capacity = - vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; + uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST); - assert(vms->gic_version == VIRT_GIC_VERSION_3); + assert(vms->gic_version != VIRT_GIC_VERSION_2); - return MACHINE(vms)->smp.cpus > redist0_capacity ? 2 : 1; + return (MACHINE(vms)->smp.cpus > redist0_capacity && + vms->highmem_redists) ? 2 : 1; } #endif /* QEMU_ARM_VIRT_H */ diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index 22a8fa5d11..cbe8a19c10 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -14,6 +14,7 @@ #include "hw/sysbus.h" #include "hw/arm/boot.h" +#include "hw/cpu/cluster.h" #include "hw/or-irq.h" #include "hw/sd/sdhci.h" #include "hw/intc/arm_gicv3.h" @@ -24,11 +25,18 @@ #include "qom/object.h" #include "hw/usb/xlnx-usb-subsystem.h" #include "hw/misc/xlnx-versal-xramc.h" +#include "hw/nvram/xlnx-bbram.h" +#include "hw/nvram/xlnx-versal-efuse.h" +#include "hw/ssi/xlnx-versal-ospi.h" +#include "hw/dma/xlnx_csu_dma.h" +#include "hw/misc/xlnx-versal-crl.h" +#include "hw/misc/xlnx-versal-pmc-iou-slcr.h" #define TYPE_XLNX_VERSAL "xlnx-versal" OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) #define XLNX_VERSAL_NR_ACPUS 2 +#define XLNX_VERSAL_NR_RCPUS 2 #define XLNX_VERSAL_NR_UARTS 2 #define XLNX_VERSAL_NR_GEMS 2 #define XLNX_VERSAL_NR_ADMAS 8 @@ -44,6 +52,7 @@ struct Versal { struct { struct { MemoryRegion mr; + CPUClusterState cluster; ARMCPU cpu[XLNX_VERSAL_NR_ACPUS]; GICv3State gic; } apu; @@ -66,24 +75,49 @@ struct Versal { VersalUsb2 usb; } iou; + /* Real-time Processing Unit. */ + struct { + MemoryRegion mr; + MemoryRegion mr_ps_alias; + + CPUClusterState cluster; + ARMCPU cpu[XLNX_VERSAL_NR_RCPUS]; + } rpu; + struct { qemu_or_irq irq_orgate; XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM]; } xram; + + XlnxVersalCRL crl; } lpd; /* The Platform Management Controller subsystem. */ struct { struct { SDHCIState sd[XLNX_VERSAL_NR_SDS]; + XlnxVersalPmcIouSlcr slcr; + + struct { + XlnxVersalOspi ospi; + XlnxCSUDMA dma_src; + XlnxCSUDMA dma_dst; + MemoryRegion linear_mr; + qemu_or_irq irq_orgate; + } ospi; } iou; XlnxZynqMPRTC rtc; + XlnxBBRam bbram; + XlnxEFuse efuse; + XlnxVersalEFuseCtrl efuse_ctrl; + XlnxVersalEFuseCache efuse_cache; + + qemu_or_irq apb_irq_orgate; } pmc; struct { MemoryRegion *mr_ddr; - uint32_t psci_conduit; } cfg; }; @@ -96,6 +130,7 @@ struct Versal { #define VERSAL_TIMER_NS_EL1_IRQ 14 #define VERSAL_TIMER_NS_EL2_IRQ 10 +#define VERSAL_CRL_IRQ 10 #define VERSAL_UART0_IRQ_0 18 #define VERSAL_UART1_IRQ_0 19 #define VERSAL_USB0_IRQ_0 22 @@ -105,8 +140,10 @@ struct Versal { #define VERSAL_GEM1_WAKE_IRQ_0 59 #define VERSAL_ADMA_IRQ_0 60 #define VERSAL_XRAM_IRQ_0 79 -#define VERSAL_RTC_APB_ERR_IRQ 121 +#define VERSAL_PMC_APB_IRQ 121 +#define VERSAL_OSPI_IRQ 124 #define VERSAL_SD0_IRQ_0 126 +#define VERSAL_EFUSE_IRQ 139 #define VERSAL_RTC_ALARM_IRQ 142 #define VERSAL_RTC_SECONDS_IRQ 143 @@ -167,9 +204,30 @@ struct Versal { #define MM_IOU_SCNTRS_SIZE 0x10000 #define MM_FPD_CRF 0xfd1a0000U #define MM_FPD_CRF_SIZE 0x140000 +#define MM_FPD_FPD_APU 0xfd5c0000 +#define MM_FPD_FPD_APU_SIZE 0x100 + +#define MM_PMC_PMC_IOU_SLCR 0xf1060000 +#define MM_PMC_PMC_IOU_SLCR_SIZE 0x10000 + +#define MM_PMC_OSPI 0xf1010000 +#define MM_PMC_OSPI_SIZE 0x10000 + +#define MM_PMC_OSPI_DAC 0xc0000000 +#define MM_PMC_OSPI_DAC_SIZE 0x20000000 + +#define MM_PMC_OSPI_DMA_DST 0xf1011800 +#define MM_PMC_OSPI_DMA_SRC 0xf1011000 #define MM_PMC_SD0 0xf1040000U #define MM_PMC_SD0_SIZE 0x10000 +#define MM_PMC_BBRAM_CTRL 0xf11f0000 +#define MM_PMC_BBRAM_CTRL_SIZE 0x00050 +#define MM_PMC_EFUSE_CTRL 0xf1240000 +#define MM_PMC_EFUSE_CTRL_SIZE 0x00104 +#define MM_PMC_EFUSE_CACHE 0xf1250000 +#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00 + #define MM_PMC_CRP 0xf1260000U #define MM_PMC_CRP_SIZE 0x10000 #define MM_PMC_RTC 0xf12a0000 diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index d3e2ef97f6..20bdf894aa 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -36,6 +36,13 @@ #include "qom/object.h" #include "net/can_emu.h" #include "hw/dma/xlnx_csu_dma.h" +#include "hw/nvram/xlnx-bbram.h" +#include "hw/nvram/xlnx-zynqmp-efuse.h" +#include "hw/or-irq.h" +#include "hw/misc/xlnx-zynqmp-apu-ctrl.h" +#include "hw/misc/xlnx-zynqmp-crf.h" +#include "hw/timer/cadence_ttc.h" +#include "hw/usb/hcd-dwc3.h" #define TYPE_XLNX_ZYNQMP "xlnx-zynqmp" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) @@ -50,6 +57,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) #define XLNX_ZYNQMP_NUM_SPIS 2 #define XLNX_ZYNQMP_NUM_GDMA_CH 8 #define XLNX_ZYNQMP_NUM_ADMA_CH 8 +#define XLNX_ZYNQMP_NUM_USB 2 #define XLNX_ZYNQMP_NUM_QSPI_BUS 2 #define XLNX_ZYNQMP_NUM_QSPI_BUS_CS 2 @@ -79,6 +87,13 @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) #define XLNX_ZYNQMP_MAX_RAM_SIZE (XLNX_ZYNQMP_MAX_LOW_RAM_SIZE + \ XLNX_ZYNQMP_MAX_HIGH_RAM_SIZE) +#define XLNX_ZYNQMP_NUM_TTC 4 + +/* + * Unimplemented mmio regions needed to boot some images. + */ +#define XLNX_ZYNQMP_NUM_UNIMP_AREAS 1 + struct XlnxZynqMPState { /*< private >*/ DeviceState parent_obj; @@ -95,6 +110,11 @@ struct XlnxZynqMPState { MemoryRegion *ddr_ram; MemoryRegion ddr_ram_low, ddr_ram_high; + XlnxBBRam bbram; + XlnxEFuse efuse; + XlnxZynqMPEFuse efuse_ctrl; + + MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS]; CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS]; CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS]; @@ -110,6 +130,11 @@ struct XlnxZynqMPState { XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH]; XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH]; XlnxCSUDMA qspi_dma; + qemu_or_irq qspi_irq_orgate; + XlnxZynqMPAPUCtrl apu_ctrl; + XlnxZynqMPCRF crf; + CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC]; + USBDWC3 usb[XLNX_ZYNQMP_NUM_USB]; char *boot_cpu; ARMCPU *boot_cpu_ptr; diff --git a/include/hw/audio/soundhw.h b/include/hw/audio/soundhw.h index f09a297854..270717a06a 100644 --- a/include/hw/audio/soundhw.h +++ b/include/hw/audio/soundhw.h @@ -1,15 +1,13 @@ #ifndef HW_SOUNDHW_H #define HW_SOUNDHW_H -void isa_register_soundhw(const char *name, const char *descr, - int (*init_isa)(ISABus *bus)); - void pci_register_soundhw(const char *name, const char *descr, - int (*init_pci)(PCIBus *bus)); + int (*init_pci)(PCIBus *bus, const char *audiodev)); void deprecated_register_soundhw(const char *name, const char *descr, int isa, const char *typename); void soundhw_init(void); -void select_soundhw(const char *optarg); +void show_valid_soundhw(void); +void select_soundhw(const char *optarg, const char *audiodev); #endif diff --git a/include/hw/block/block.h b/include/hw/block/block.h index 20e5019781..f22c64dfab 100644 --- a/include/hw/block/block.h +++ b/include/hw/block/block.h @@ -31,6 +31,7 @@ typedef struct BlockConf { uint32_t lcyls, lheads, lsecs; OnOffAuto wce; bool share_rw; + OnOffAuto account_invalid, account_failed; BlockdevOnError rerror; BlockdevOnError werror; } BlockConf; @@ -61,7 +62,11 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf) _conf.discard_granularity, -1), \ DEFINE_PROP_ON_OFF_AUTO("write-cache", _state, _conf.wce, \ ON_OFF_AUTO_AUTO), \ - DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false) + DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false), \ + DEFINE_PROP_ON_OFF_AUTO("account-invalid", _state, \ + _conf.account_invalid, ON_OFF_AUTO_AUTO), \ + DEFINE_PROP_ON_OFF_AUTO("account-failed", _state, \ + _conf.account_failed, ON_OFF_AUTO_AUTO) #define DEFINE_BLOCK_PROPERTIES(_state, _conf) \ DEFINE_PROP_DRIVE("drive", _state, _conf.blk), \ diff --git a/include/hw/block/fdc.h b/include/hw/block/fdc.h index 1ecca7cac7..35248c0837 100644 --- a/include/hw/block/fdc.h +++ b/include/hw/block/fdc.h @@ -10,8 +10,7 @@ #define TYPE_ISA_FDC "isa-fdc" void isa_fdc_init_drives(ISADevice *fdc, DriveInfo **fds); -void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, - hwaddr mmio_base, DriveInfo **fds); +void fdctrl_init_sysbus(qemu_irq irq, hwaddr mmio_base, DriveInfo **fds); void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base, DriveInfo **fds, qemu_irq *fdc_tc); diff --git a/include/hw/boards.h b/include/hw/boards.h index accd6eff35..90f1dd3aeb 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -25,7 +25,7 @@ OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE) extern MachineState *current_machine; -void machine_run_board_init(MachineState *machine); +void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp); bool machine_usb(MachineState *machine); int machine_phandle_start(MachineState *machine); bool machine_dump_guest_core(MachineState *machine); @@ -34,6 +34,8 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine); void machine_set_cpu_numa_node(MachineState *machine, const CpuInstanceProperties *props, Error **errp); +void machine_parse_smp_config(MachineState *ms, + const SMPConfiguration *config, Error **errp); /** * machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices @@ -51,6 +53,21 @@ void machine_set_cpu_numa_node(MachineState *machine, */ void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type); +/** + * device_type_is_dynamic_sysbus: Check if type is an allowed sysbus device + * type for the machine class. + * @mc: Machine class + * @type: type to check (should be a subtype of TYPE_SYS_BUS_DEVICE) + * + * Returns: true if @type is a type in the machine's list of + * dynamically pluggable sysbus devices; otherwise false. + * + * Check if the QOM type @type is in the list of allowed sysbus device + * types (see machine_class_allowed_dynamic_sysbus_dev()). + * Note that if @type has a parent type in the list, it is allowed too. + */ +bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type); + /** * device_is_dynamic_sysbus: test whether device is a dynamic sysbus device * @mc: Machine class @@ -108,6 +125,18 @@ typedef struct { CPUArchId cpus[]; } CPUArchIdList; +/** + * SMPCompatProps: + * @prefer_sockets - whether sockets are preferred over cores in smp parsing + * @dies_supported - whether dies are supported by the machine + * @clusters_supported - whether clusters are supported by the machine + */ +typedef struct { + bool prefer_sockets; + bool dies_supported; + bool clusters_supported; +} SMPCompatProps; + /** * MachineClass: * @deprecation_reason: If set, the machine is marked as deprecated. The @@ -169,10 +198,6 @@ typedef struct { * kvm-type may be NULL if it is not needed. * @numa_mem_supported: * true if '--numa node.mem' option is supported and false otherwise - * @smp_parse: - * The function pointer to hook different machine specific functions for - * parsing "smp-opts" from QemuOpts to MachineState::CpuTopology and more - * machine specific topology fields, such as smp_dies for PCMachine. * @hotplug_allowed: * If the hook is provided, then it'll be called for each device * hotplug to check whether the device hotplug is allowed. Return @@ -206,10 +231,9 @@ struct MachineClass { const char *deprecation_reason; void (*init)(MachineState *state); - void (*reset)(MachineState *state); + void (*reset)(MachineState *state, ShutdownCause reason); void (*wakeup)(MachineState *state); int (*kvm_type)(MachineState *machine, const char *arg); - void (*smp_parse)(MachineState *ms, SMPConfiguration *config, Error **errp); BlockInterfaceType block_default_type; int units_per_default_bus; @@ -247,6 +271,7 @@ struct MachineClass { bool nvdimm_supported; bool numa_mem_supported; bool auto_enable_numa; + SMPCompatProps smp_props; const char *default_ram_id; HotplugHandler *(*get_hotplug_handler)(MachineState *machine, @@ -274,17 +299,20 @@ typedef struct DeviceMemoryState { /** * CpuTopology: * @cpus: the number of present logical processors on the machine - * @cores: the number of cores in one package - * @threads: the number of threads in one core * @sockets: the number of sockets on the machine + * @dies: the number of dies in one socket + * @clusters: the number of clusters in one die + * @cores: the number of cores in one cluster + * @threads: the number of threads in one core * @max_cpus: the maximum number of logical processors on the machine */ typedef struct CpuTopology { unsigned int cpus; + unsigned int sockets; unsigned int dies; + unsigned int clusters; unsigned int cores; unsigned int threads; - unsigned int sockets; unsigned int max_cpus; } CpuTopology; @@ -294,7 +322,6 @@ typedef struct CpuTopology { struct MachineState { /*< private >*/ Object parent_obj; - Notifier sysbus_notifier; /*< public >*/ @@ -312,7 +339,7 @@ struct MachineState { bool suppress_vmdesc; bool enable_graphics; ConfidentialGuestSupport *cgs; - char *ram_memdev_id; + HostMemoryBackend *memdev; /* * convenience alias to ram_memdev_id backend memory region * or to numa container memory region @@ -323,8 +350,7 @@ struct MachineState { ram_addr_t ram_size; ram_addr_t maxram_size; uint64_t ram_slots; - const char *boot_order; - const char *boot_once; + BootConfiguration boot_config; char *kernel_filename; char *kernel_cmdline; char *initrd_filename; @@ -353,6 +379,18 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_7_1[]; +extern const size_t hw_compat_7_1_len; + +extern GlobalProperty hw_compat_7_0[]; +extern const size_t hw_compat_7_0_len; + +extern GlobalProperty hw_compat_6_2[]; +extern const size_t hw_compat_6_2_len; + +extern GlobalProperty hw_compat_6_1[]; +extern const size_t hw_compat_6_1_len; + extern GlobalProperty hw_compat_6_0[]; extern const size_t hw_compat_6_0_len; diff --git a/include/hw/char/goldfish_tty.h b/include/hw/char/goldfish_tty.h index b9dd67362a..7503d2fa1e 100644 --- a/include/hw/char/goldfish_tty.h +++ b/include/hw/char/goldfish_tty.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h index f61990215f..b0e14ca355 100644 --- a/include/hw/char/mchp_pfsoc_mmuart.h +++ b/include/hw/char/mchp_pfsoc_mmuart.h @@ -28,18 +28,25 @@ #ifndef HW_MCHP_PFSOC_MMUART_H #define HW_MCHP_PFSOC_MMUART_H +#include "hw/sysbus.h" #include "hw/char/serial.h" -#define MCHP_PFSOC_MMUART_REG_SIZE 52 +#define MCHP_PFSOC_MMUART_REG_COUNT 13 + +#define TYPE_MCHP_PFSOC_UART "mchp.pfsoc.uart" +OBJECT_DECLARE_SIMPLE_TYPE(MchpPfSoCMMUartState, MCHP_PFSOC_UART) typedef struct MchpPfSoCMMUartState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion container; MemoryRegion iomem; - hwaddr base; - qemu_irq irq; - SerialMM *serial; + SerialMM serial_mm; - uint32_t reg[MCHP_PFSOC_MMUART_REG_SIZE / sizeof(uint32_t)]; + uint32_t reg[MCHP_PFSOC_MMUART_REG_COUNT]; } MchpPfSoCMMUartState; /** diff --git a/include/hw/char/riscv_htif.h b/include/hw/char/riscv_htif.h index fb9452cf51..f888ac1b30 100644 --- a/include/hw/char/riscv_htif.h +++ b/include/hw/char/riscv_htif.h @@ -52,8 +52,11 @@ extern const MemoryRegionOps htif_io_ops; void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size); +/* Check if HTIF uses ELF symbols */ +bool htif_uses_elf_symbols(void); + /* legacy pre qom */ HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem, - CPURISCVState *env, Chardev *chr); + CPURISCVState *env, Chardev *chr, uint64_t nonelf_base); #endif diff --git a/include/hw/clock.h b/include/hw/clock.h index a7187eab95..5c927cee7f 100644 --- a/include/hw/clock.h +++ b/include/hw/clock.h @@ -81,6 +81,10 @@ struct Clock { void *callback_opaque; unsigned int callback_events; + /* Ratio of the parent clock to run the child clocks at */ + uint32_t multiplier; + uint32_t divider; + /* Clocks are organized in a clock tree */ Clock *source; QLIST_HEAD(, Clock) children; @@ -319,10 +323,7 @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns) if (clk->period == 0) { return 0; } - /* - * Ignore divu128() return value as we've caught div-by-zero and don't - * need different behaviour for overflow. - */ + divu128(&lo, &hi, clk->period); return lo; } @@ -350,4 +351,29 @@ static inline bool clock_is_enabled(const Clock *clk) */ char *clock_display_freq(Clock *clk); +/** + * clock_set_mul_div: set multiplier/divider for child clocks + * @clk: clock + * @multiplier: multiplier value + * @divider: divider value + * + * By default, a Clock's children will all run with the same period + * as their parent. This function allows you to adjust the multiplier + * and divider used to derive the child clock frequency. + * For example, setting a multiplier of 2 and a divider of 3 + * will run child clocks with a period 2/3 of the parent clock, + * so if the parent clock is an 8MHz clock the children will + * be 12MHz. + * + * Setting the multiplier to 0 will stop the child clocks. + * Setting the divider to 0 is a programming error (diagnosed with + * an assertion failure). + * Setting a multiplier value that results in the child period + * overflowing is not diagnosed. + * + * Note that this function does not call clock_propagate(); the + * caller should do that if necessary. + */ +void clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider); + #endif /* QEMU_HW_CLOCK_H */ diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index b6664cc1c9..4e6bd279fa 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -22,6 +22,7 @@ #include "hw/qdev-core.h" #include "disas/dis-asm.h" +#include "exec/cpu-common.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" #include "qapi/qapi-types-run-state.h" @@ -35,18 +36,6 @@ typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, void *opaque); -/** - * vaddr: - * Type wide enough to contain any #target_ulong virtual address. - */ -typedef uint64_t vaddr; -#define VADDR_PRId PRId64 -#define VADDR_PRIu PRIu64 -#define VADDR_PRIo PRIo64 -#define VADDR_PRIx PRIx64 -#define VADDR_PRIX PRIX64 -#define VADDR_MAX UINT64_MAX - /** * SECTION:cpu * @section_id: QEMU-cpu @@ -62,10 +51,35 @@ typedef uint64_t vaddr; */ #define CPU(obj) ((CPUState *)(obj)) +/* + * The class checkers bring in CPU_GET_CLASS() which is potentially + * expensive given the eventual call to + * object_class_dynamic_cast_assert(). Because of this the CPUState + * has a cached value for the class in cs->cc which is set up in + * cpu_exec_realizefn() for use in hot code paths. + */ typedef struct CPUClass CPUClass; DECLARE_CLASS_CHECKERS(CPUClass, CPU, TYPE_CPU) +/** + * OBJECT_DECLARE_CPU_TYPE: + * @CpuInstanceType: instance struct name + * @CpuClassType: class struct name + * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators + * + * This macro is typically used in "cpu-qom.h" header file, and will: + * + * - create the typedefs for the CPU object and class structs + * - register the type for use with g_autoptr + * - provide three standard type cast functions + * + * The object struct and class struct need to be declared manually. + */ +#define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \ + typedef struct ArchCPU CpuInstanceType; \ + OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME); + typedef enum MMUAccessType { MMU_DATA_LOAD = 0, MMU_DATA_STORE = 1, @@ -101,6 +115,8 @@ struct SysemuCPUOps; * If the target behaviour here is anything other than "set * the PC register to the value passed in" then the target must * also implement the synchronize_from_tb hook. + * @get_pc: Callback for getting the Program Counter register. + * As above, with the semantics of the target architecture. * @gdb_read_register: Callback for letting GDB read a register. * @gdb_write_register: Callback for letting GDB write a register. * @gdb_adjust_breakpoint: Callback for adjusting the address of a @@ -131,13 +147,13 @@ struct CPUClass { ObjectClass *(*class_by_name)(const char *cpu_model); void (*parse_features)(const char *typename, char *str, Error **errp); - int reset_dump_flags; bool (*has_work)(CPUState *cpu); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *, int flags); int64_t (*get_arch_id)(CPUState *cpu); void (*set_pc)(CPUState *cpu, vaddr value); + vaddr (*get_pc)(CPUState *cpu); int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); @@ -149,9 +165,6 @@ struct CPUClass { void (*disas_set_info)(CPUState *cpu, disassemble_info *info); const char *deprecation_note; - /* Keep non-pointer data at the end to minimize holes. */ - int gdb_num_core_regs; - bool gdb_stop_before_watchpoint; struct AccelCPUClass *accel_cpu; /* when system emulation is not available, this pointer is NULL */ @@ -165,6 +178,13 @@ struct CPUClass { * class data that depends on the accelerator, see accel/accel-common.c. */ void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc); + + /* + * Keep non-pointer data at the end to minimize holes. + */ + int reset_dump_flags; + int gdb_num_core_regs; + bool gdb_stop_before_watchpoint; }; /* @@ -177,7 +197,7 @@ struct CPUClass { typedef union IcountDecr { uint32_t u32; struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint16_t high; uint16_t low; #else @@ -221,7 +241,6 @@ struct CPUWatchpoint { * the memory regions get moved around by io_writex. */ typedef struct SavedIOTLB { - hwaddr addr; MemoryRegionSection *section; hwaddr mr_offset; } SavedIOTLB; @@ -233,9 +252,6 @@ struct kvm_run; struct hax_vcpu_state; struct hvf_vcpu_state; -#define TB_JMP_CACHE_BITS 12 -#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) - /* work queue */ /* The union type allows passing of 64 bit target pointers on 32 bit @@ -320,6 +336,8 @@ struct qemu_work_item; struct CPUState { /*< private >*/ DeviceState parent_obj; + /* cache to avoid expensive CPU_GET_CLASS */ + CPUClass *cc; /*< public >*/ int nr_cores; @@ -328,6 +346,7 @@ struct CPUState { struct QemuThread *thread; #ifdef _WIN32 HANDLE hThread; + QemuSemaphore sem; #endif int thread_id; bool running, has_waiter; @@ -361,11 +380,10 @@ struct CPUState { AddressSpace *as; MemoryRegion *memory; - void *env_ptr; /* CPUArchState */ + CPUArchState *env_ptr; IcountDecr *icount_decr_ptr; - /* Accessed in parallel; all accesses must be atomic */ - TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; + CPUJumpCache *tb_jmp_cache; struct GDBRegisterState *gdb_regs; int gdb_num_regs; @@ -393,6 +411,7 @@ struct CPUState { struct kvm_run *kvm_run; struct kvm_dirty_gfn *kvm_dirty_gfns; uint32_t kvm_fetch_index; + uint64_t dirty_pages; /* Used for events with 'vcpu' and *without* the 'disabled' properties */ DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); @@ -422,8 +441,17 @@ struct CPUState { */ bool throttle_thread_scheduled; + /* + * Sleep throttle_us_per_full microseconds once dirty ring is full + * if dirty page rate limit is enabled. + */ + int64_t throttle_us_per_full; + bool ignore_memory_transaction_failures; + /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ + bool prctl_unalign_sigbus; + struct hax_vcpu_state *hax_vcpu; struct hvf_vcpu_state *hvf; @@ -443,15 +471,6 @@ extern CPUTailQ cpus; extern __thread CPUState *current_cpu; -static inline void cpu_tb_jmp_cache_clear(CPUState *cpu) -{ - unsigned int i; - - for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { - qatomic_set(&cpu->tb_jmp_cache[i], NULL); - } -} - /** * qemu_tcg_mttcg_enabled: * Check whether we are running MultiThread TCG or not. @@ -1038,10 +1057,11 @@ void mem_check_access_callback_vaddr(CPUState *cpu, vaddr addr, vaddr len, */ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); -void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); +G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) + G_GNUC_PRINTF(2, 3); /* $(top_srcdir)/cpu.c */ +void cpu_class_init_props(DeviceClass *dc); void cpu_exec_initfn(CPUState *cpu); void cpu_exec_realizefn(CPUState *cpu, Error **errp); void cpu_exec_unrealizefn(CPUState *cpu); @@ -1050,13 +1070,15 @@ void cpu_exec_unrealizefn(CPUState *cpu); * target_words_bigendian: * Returns true if the (default) endianness of the target is big endian, * false otherwise. Note that in target-specific code, you can use - * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common + * TARGET_BIG_ENDIAN directly instead. On the other hand, common * code should normally never need to know about the endianness of the * target, so please do *not* use this function unless you know very well * what you are doing! */ bool target_words_bigendian(void); +void page_size_init(void); + #ifdef NEED_CPU_H #ifdef CONFIG_SOFTMMU diff --git a/include/hw/arm/sysbus-fdt.h b/include/hw/core/sysbus-fdt.h similarity index 100% rename from include/hw/arm/sysbus-fdt.h rename to include/hw/core/sysbus-fdt.h diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h index a9ba39e5f2..ee169b872c 100644 --- a/include/hw/core/sysemu-cpu-ops.h +++ b/include/hw/core/sysemu-cpu-ops.h @@ -53,25 +53,25 @@ typedef struct SysemuCPUOps { * 32-bit VM coredump. */ int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); + int cpuid, DumpState *s); /** * @write_elf64_note: Callback for writing a CPU-specific ELF note to a * 64-bit VM coredump. */ int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); + int cpuid, DumpState *s); /** * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF * note to a 32-bit VM coredump. */ int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); + DumpState *s); /** * @write_elf64_qemunote: Callback for writing a CPU- and QEMU-specific ELF * note to a 64-bit VM coredump. */ int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); + DumpState *s); /** * @virtio_is_big_endian: Callback to return %true if a CPU which supports * runtime configurable endianness is currently big-endian. diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index eab27d0c03..20e3c0ffbb 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -31,37 +31,52 @@ struct TCGCPUOps { * function to restore all the state, and register it here. */ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); + /** + * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn + * + * This is called when we unwind state in the middle of a TB, + * usually before raising an exception. Set all part of the CPU + * state which are tracked insn-by-insn in the target-specific + * arguments to start_insn, passed as @data. + */ + void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, + const uint64_t *data); + /** @cpu_exec_enter: Callback for cpu_exec preparation */ void (*cpu_exec_enter)(CPUState *cpu); /** @cpu_exec_exit: Callback for cpu_exec cleanup */ void (*cpu_exec_exit)(CPUState *cpu); - /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ - bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); - /** - * @do_interrupt: Callback for interrupt handling. - * - * note that this is in general SOFTMMU only, but it actually isn't - * because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it - * in the SOFTMMU section in general. - */ - void (*do_interrupt)(CPUState *cpu); - /** - * @tlb_fill: Handle a softmmu tlb miss or user-only address fault - * - * For system mode, if the access is valid, call tlb_set_page - * and return true; if the access is invalid, and probe is - * true, return false; otherwise raise an exception and do - * not return. For user-only mode, always raise an exception - * and do not return. - */ - bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); /** @debug_excp_handler: Callback for handling debug exceptions */ void (*debug_excp_handler)(CPUState *cpu); #ifdef NEED_CPU_H +#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386) + /** + * @fake_user_interrupt: Callback for 'fake exception' handling. + * + * Simulate 'fake exception' which will be handled outside the + * cpu execution loop (hack for x86 user mode). + */ + void (*fake_user_interrupt)(CPUState *cpu); +#else + /** + * @do_interrupt: Callback for interrupt handling. + */ + void (*do_interrupt)(CPUState *cpu); +#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */ #ifdef CONFIG_SOFTMMU + /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ + bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + /** + * @tlb_fill: Handle a softmmu tlb miss + * + * If the access is valid, call tlb_set_page and return true; + * if the access is invalid and probe is true, return false; + * otherwise raise an exception and do not return. + */ + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); /** * @do_transaction_failed: Callback for handling failed memory transactions * (ie bus faults or external aborts; not MMU faults) @@ -72,10 +87,11 @@ struct TCGCPUOps { MemTxResult response, uintptr_t retaddr); /** * @do_unaligned_access: Callback for unaligned access handling + * The callback must exit via raising an exception. */ - void (*do_unaligned_access)(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); /** * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM @@ -85,6 +101,7 @@ struct TCGCPUOps { /** * @debug_check_watchpoint: return true if the architectural * watchpoint whose address has matched should really fire, used by ARM + * and RISC-V */ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); @@ -104,6 +121,55 @@ struct TCGCPUOps { */ bool (*io_recompile_replay_branch)(CPUState *cpu, const TranslationBlock *tb); +#else + /** + * record_sigsegv: + * @cpu: cpu context + * @addr: faulting guest address + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * We are about to raise SIGSEGV with si_code set for @maperr, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide anything to the signal + * handler with anything besides the user context registers, and + * the siginfo_t, then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided + * so that a "normal" cpu exception can be raised. In this case, + * the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigsegv)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + /** + * record_sigbus: + * @cpu: cpu context + * @addr: misaligned guest address + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * We are about to raise SIGBUS with si_code BUS_ADRALN, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide the signal handler with + * anything besides the user context registers, and the siginfo_t, + * then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu do_unaligned_access code, + * @ra is provided so that a "normal" cpu exception can be raised. + * In this case, the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigbus)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); #endif /* CONFIG_SOFTMMU */ #endif /* NEED_CPU_H */ diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h new file mode 100644 index 0000000000..38e0e271d5 --- /dev/null +++ b/include/hw/cxl/cxl.h @@ -0,0 +1,61 @@ +/* + * QEMU CXL Support + * + * Copyright (c) 2020 Intel + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#ifndef CXL_H +#define CXL_H + + +#include "qapi/qapi-types-machine.h" +#include "qapi/qapi-visit-machine.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "cxl_pci.h" +#include "cxl_component.h" +#include "cxl_device.h" + +#define CXL_COMPONENT_REG_BAR_IDX 0 +#define CXL_DEVICE_REG_BAR_IDX 2 + +#define CXL_WINDOW_MAX 10 + +typedef struct CXLFixedWindow { + uint64_t size; + char **targets; + struct PXBDev *target_hbs[8]; + uint8_t num_targets; + uint8_t enc_int_ways; + uint8_t enc_int_gran; + /* Todo: XOR based interleaving */ + MemoryRegion mr; + hwaddr base; +} CXLFixedWindow; + +typedef struct CXLState { + bool is_enabled; + MemoryRegion host_mr; + unsigned int next_mr_idx; + GList *fixed_windows; + CXLFixedMemoryWindowOptionsList *cfmw_list; +} CXLState; + +struct CXLHost { + PCIHostState parent_obj; + + CXLComponentState cxl_cstate; +}; + +#define TYPE_PXB_CXL_HOST "pxb-cxl-host" +OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST) + +#define TYPE_CXL_USP "cxl-upstream" + +typedef struct CXLUpstreamPort CXLUpstreamPort; +DECLARE_INSTANCE_CHECKER(CXLUpstreamPort, CXL_USP, TYPE_CXL_USP) +CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp); +#endif diff --git a/include/hw/cxl/cxl_cdat.h b/include/hw/cxl/cxl_cdat.h new file mode 100644 index 0000000000..e9eda00142 --- /dev/null +++ b/include/hw/cxl/cxl_cdat.h @@ -0,0 +1,166 @@ +/* + * CXL CDAT Structure + * + * Copyright (C) 2021 Avery Design Systems, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef CXL_CDAT_H +#define CXL_CDAT_H + +#include "hw/cxl/cxl_pci.h" + +/* + * Reference: + * Coherent Device Attribute Table (CDAT) Specification, Rev. 1.03, July. 2022 + * Compute Express Link (CXL) Specification, Rev. 3.0, Aug. 2022 + */ +/* Table Access DOE - CXL r3.0 8.1.11 */ +#define CXL_DOE_TABLE_ACCESS 2 +#define CXL_DOE_PROTOCOL_CDAT ((CXL_DOE_TABLE_ACCESS << 16) | CXL_VENDOR_ID) + +/* Read Entry - CXL r3.0 8.1.11.1 */ +#define CXL_DOE_TAB_TYPE_CDAT 0 +#define CXL_DOE_TAB_ENT_MAX 0xFFFF + +/* Read Entry Request - CXL r3.0 8.1.11.1 Table 8-13 */ +#define CXL_DOE_TAB_REQ 0 +typedef struct CDATReq { + DOEHeader header; + uint8_t req_code; + uint8_t table_type; + uint16_t entry_handle; +} QEMU_PACKED CDATReq; + +/* Read Entry Response - CXL r3.0 8.1.11.1 Table 8-14 */ +#define CXL_DOE_TAB_RSP 0 +typedef struct CDATRsp { + DOEHeader header; + uint8_t rsp_code; + uint8_t table_type; + uint16_t entry_handle; +} QEMU_PACKED CDATRsp; + +/* CDAT Table Format - CDAT Table 1 */ +#define CXL_CDAT_REV 2 +typedef struct CDATTableHeader { + uint32_t length; + uint8_t revision; + uint8_t checksum; + uint8_t reserved[6]; + uint32_t sequence; +} QEMU_PACKED CDATTableHeader; + +/* CDAT Structure Types - CDAT Table 2 */ +typedef enum { + CDAT_TYPE_DSMAS = 0, + CDAT_TYPE_DSLBIS = 1, + CDAT_TYPE_DSMSCIS = 2, + CDAT_TYPE_DSIS = 3, + CDAT_TYPE_DSEMTS = 4, + CDAT_TYPE_SSLBIS = 5, +} CDATType; + +typedef struct CDATSubHeader { + uint8_t type; + uint8_t reserved; + uint16_t length; +} CDATSubHeader; + +/* Device Scoped Memory Affinity Structure - CDAT Table 3 */ +typedef struct CDATDsmas { + CDATSubHeader header; + uint8_t DSMADhandle; + uint8_t flags; +#define CDAT_DSMAS_FLAG_NV (1 << 2) +#define CDAT_DSMAS_FLAG_SHAREABLE (1 << 3) +#define CDAT_DSMAS_FLAG_HW_COHERENT (1 << 4) +#define CDAT_DSMAS_FLAG_DYNAMIC_CAP (1 << 5) + uint16_t reserved; + uint64_t DPA_base; + uint64_t DPA_length; +} QEMU_PACKED CDATDsmas; + +/* Device Scoped Latency and Bandwidth Information Structure - CDAT Table 5 */ +typedef struct CDATDslbis { + CDATSubHeader header; + uint8_t handle; + /* Definitions of these fields refer directly to HMAT fields */ + uint8_t flags; + uint8_t data_type; + uint8_t reserved; + uint64_t entry_base_unit; + uint16_t entry[3]; + uint16_t reserved2; +} QEMU_PACKED CDATDslbis; + +/* Device Scoped Memory Side Cache Information Structure - CDAT Table 6 */ +typedef struct CDATDsmscis { + CDATSubHeader header; + uint8_t DSMAS_handle; + uint8_t reserved[3]; + uint64_t memory_side_cache_size; + uint32_t cache_attributes; +} QEMU_PACKED CDATDsmscis; + +/* Device Scoped Initiator Structure - CDAT Table 7 */ +typedef struct CDATDsis { + CDATSubHeader header; + uint8_t flags; + uint8_t handle; + uint16_t reserved; +} QEMU_PACKED CDATDsis; + +/* Device Scoped EFI Memory Type Structure - CDAT Table 8 */ +typedef struct CDATDsemts { + CDATSubHeader header; + uint8_t DSMAS_handle; + uint8_t EFI_memory_type_attr; + uint16_t reserved; + uint64_t DPA_offset; + uint64_t DPA_length; +} QEMU_PACKED CDATDsemts; + +/* Switch Scoped Latency and Bandwidth Information Structure - CDAT Table 9 */ +typedef struct CDATSslbisHeader { + CDATSubHeader header; + uint8_t data_type; + uint8_t reserved[3]; + uint64_t entry_base_unit; +} QEMU_PACKED CDATSslbisHeader; + +#define CDAT_PORT_ID_USP 0x100 +/* Switch Scoped Latency and Bandwidth Entry - CDAT Table 10 */ +typedef struct CDATSslbe { + uint16_t port_x_id; + uint16_t port_y_id; + uint16_t latency_bandwidth; + uint16_t reserved; +} QEMU_PACKED CDATSslbe; + +typedef struct CDATSslbis { + CDATSslbisHeader sslbis_header; + CDATSslbe sslbe[]; +} QEMU_PACKED CDATSslbis; + +typedef struct CDATEntry { + void *base; + uint32_t length; +} CDATEntry; + +typedef struct CDATObject { + CDATEntry *entry; + int entry_len; + + int (*build_cdat_table)(CDATSubHeader ***cdat_table, void *priv); + void (*free_cdat_table)(CDATSubHeader **cdat_table, int num, void *priv); + bool to_update; + void *private; + char *filename; + uint8_t *buf; + struct CDATSubHeader **built_buf; + int built_buf_len; +} CDATObject; +#endif /* CXL_CDAT_H */ diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h new file mode 100644 index 0000000000..34075cfb72 --- /dev/null +++ b/include/hw/cxl/cxl_component.h @@ -0,0 +1,230 @@ +/* + * QEMU CXL Component + * + * Copyright (c) 2020 Intel + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#ifndef CXL_COMPONENT_H +#define CXL_COMPONENT_H + +/* CXL 2.0 - 8.2.4 */ +#define CXL2_COMPONENT_IO_REGION_SIZE 0x1000 +#define CXL2_COMPONENT_CM_REGION_SIZE 0x1000 +#define CXL2_COMPONENT_BLOCK_SIZE 0x10000 + +#include "qemu/compiler.h" +#include "qemu/range.h" +#include "qemu/typedefs.h" +#include "hw/register.h" +#include "qapi/error.h" + +enum reg_type { + CXL2_DEVICE, + CXL2_TYPE3_DEVICE, + CXL2_LOGICAL_DEVICE, + CXL2_ROOT_PORT, + CXL2_UPSTREAM_PORT, + CXL2_DOWNSTREAM_PORT +}; + +/* + * Capability registers are defined at the top of the CXL.cache/mem region and + * are packed. For our purposes we will always define the caps in the same + * order. + * CXL 2.0 - 8.2.5 Table 142 for details. + */ + +/* CXL 2.0 - 8.2.5.1 */ +REG32(CXL_CAPABILITY_HEADER, 0) + FIELD(CXL_CAPABILITY_HEADER, ID, 0, 16) + FIELD(CXL_CAPABILITY_HEADER, VERSION, 16, 4) + FIELD(CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 20, 4) + FIELD(CXL_CAPABILITY_HEADER, ARRAY_SIZE, 24, 8) + +#define CXLx_CAPABILITY_HEADER(type, offset) \ + REG32(CXL_##type##_CAPABILITY_HEADER, offset) \ + FIELD(CXL_##type##_CAPABILITY_HEADER, ID, 0, 16) \ + FIELD(CXL_##type##_CAPABILITY_HEADER, VERSION, 16, 4) \ + FIELD(CXL_##type##_CAPABILITY_HEADER, PTR, 20, 12) +CXLx_CAPABILITY_HEADER(RAS, 0x4) +CXLx_CAPABILITY_HEADER(LINK, 0x8) +CXLx_CAPABILITY_HEADER(HDM, 0xc) +CXLx_CAPABILITY_HEADER(EXTSEC, 0x10) +CXLx_CAPABILITY_HEADER(SNOOP, 0x14) + +/* + * Capability structures contain the actual registers that the CXL component + * implements. Some of these are specific to certain types of components, but + * this implementation leaves enough space regardless. + */ +/* 8.2.5.9 - CXL RAS Capability Structure */ + +/* Give ample space for caps before this */ +#define CXL_RAS_REGISTERS_OFFSET 0x80 +#define CXL_RAS_REGISTERS_SIZE 0x58 +REG32(CXL_RAS_UNC_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET) +REG32(CXL_RAS_UNC_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x4) +REG32(CXL_RAS_UNC_ERR_SEVERITY, CXL_RAS_REGISTERS_OFFSET + 0x8) +REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc) +REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10) +REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14) +/* Offset 0x18 - 0x58 reserved for RAS logs */ + +/* 8.2.5.10 - CXL Security Capability Structure */ +#define CXL_SEC_REGISTERS_OFFSET \ + (CXL_RAS_REGISTERS_OFFSET + CXL_RAS_REGISTERS_SIZE) +#define CXL_SEC_REGISTERS_SIZE 0 /* We don't implement 1.1 downstream ports */ + +/* 8.2.5.11 - CXL Link Capability Structure */ +#define CXL_LINK_REGISTERS_OFFSET \ + (CXL_SEC_REGISTERS_OFFSET + CXL_SEC_REGISTERS_SIZE) +#define CXL_LINK_REGISTERS_SIZE 0x38 + +/* 8.2.5.12 - CXL HDM Decoder Capability Structure */ +#define HDM_DECODE_MAX 10 /* 8.2.5.12.1 */ +#define CXL_HDM_REGISTERS_OFFSET \ + (CXL_LINK_REGISTERS_OFFSET + CXL_LINK_REGISTERS_SIZE) +#define CXL_HDM_REGISTERS_SIZE (0x10 + 0x20 * HDM_DECODE_MAX) +#define HDM_DECODER_INIT(n) \ + REG32(CXL_HDM_DECODER##n##_BASE_LO, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x10) \ + FIELD(CXL_HDM_DECODER##n##_BASE_LO, L, 28, 4) \ + REG32(CXL_HDM_DECODER##n##_BASE_HI, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x14) \ + REG32(CXL_HDM_DECODER##n##_SIZE_LO, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x18) \ + REG32(CXL_HDM_DECODER##n##_SIZE_HI, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x1C) \ + REG32(CXL_HDM_DECODER##n##_CTRL, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x20) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, IG, 0, 4) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, IW, 4, 4) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, LOCK_ON_COMMIT, 8, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, COMMIT, 9, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, COMMITTED, 10, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, ERR, 11, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, TYPE, 12, 1) \ + REG32(CXL_HDM_DECODER##n##_TARGET_LIST_LO, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \ + REG32(CXL_HDM_DECODER##n##_TARGET_LIST_HI, \ + CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x28) + +REG32(CXL_HDM_DECODER_CAPABILITY, CXL_HDM_REGISTERS_OFFSET) + FIELD(CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 0, 4) + FIELD(CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 4, 4) + FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 8, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 9, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 10, 1) +REG32(CXL_HDM_DECODER_GLOBAL_CONTROL, CXL_HDM_REGISTERS_OFFSET + 4) + FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, POISON_ON_ERR_EN, 0, 1) + FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 1, 1) + +HDM_DECODER_INIT(0); + +/* 8.2.5.13 - CXL Extended Security Capability Structure (Root complex only) */ +#define EXTSEC_ENTRY_MAX 256 +#define CXL_EXTSEC_REGISTERS_OFFSET \ + (CXL_HDM_REGISTERS_OFFSET + CXL_HDM_REGISTERS_SIZE) +#define CXL_EXTSEC_REGISTERS_SIZE (8 * EXTSEC_ENTRY_MAX + 4) + +/* 8.2.5.14 - CXL IDE Capability Structure */ +#define CXL_IDE_REGISTERS_OFFSET \ + (CXL_EXTSEC_REGISTERS_OFFSET + CXL_EXTSEC_REGISTERS_SIZE) +#define CXL_IDE_REGISTERS_SIZE 0x20 + +/* 8.2.5.15 - CXL Snoop Filter Capability Structure */ +#define CXL_SNOOP_REGISTERS_OFFSET \ + (CXL_IDE_REGISTERS_OFFSET + CXL_IDE_REGISTERS_SIZE) +#define CXL_SNOOP_REGISTERS_SIZE 0x8 + +QEMU_BUILD_BUG_MSG((CXL_SNOOP_REGISTERS_OFFSET + CXL_SNOOP_REGISTERS_SIZE) >= 0x1000, + "No space for registers"); + +typedef struct component_registers { + /* + * Main memory region to be registered with QEMU core. + */ + MemoryRegion component_registers; + + /* + * 8.2.4 Table 141: + * 0x0000 - 0x0fff CXL.io registers + * 0x1000 - 0x1fff CXL.cache and CXL.mem + * 0x2000 - 0xdfff Implementation specific + * 0xe000 - 0xe3ff CXL ARB/MUX registers + * 0xe400 - 0xffff RSVD + */ + uint32_t io_registers[CXL2_COMPONENT_IO_REGION_SIZE >> 2]; + MemoryRegion io; + + uint32_t cache_mem_registers[CXL2_COMPONENT_CM_REGION_SIZE >> 2]; + uint32_t cache_mem_regs_write_mask[CXL2_COMPONENT_CM_REGION_SIZE >> 2]; + MemoryRegion cache_mem; + + MemoryRegion impl_specific; + MemoryRegion arb_mux; + MemoryRegion rsvd; + + /* special_ops is used for any component that needs any specific handling */ + MemoryRegionOps *special_ops; +} ComponentRegisters; + +/* + * A CXL component represents all entities in a CXL hierarchy. This includes, + * host bridges, root ports, upstream/downstream switch ports, and devices + */ +typedef struct cxl_component { + ComponentRegisters crb; + union { + struct { + Range dvsecs[CXL20_MAX_DVSEC]; + uint16_t dvsec_offset; + struct PCIDevice *pdev; + }; + }; + + CDATObject cdat; +} CXLComponentState; + +void cxl_component_register_block_init(Object *obj, + CXLComponentState *cxl_cstate, + const char *type); +void cxl_component_register_init_common(uint32_t *reg_state, + uint32_t *write_msk, + enum reg_type type); + +void cxl_component_create_dvsec(CXLComponentState *cxl_cstate, + enum reg_type cxl_dev_type, uint16_t length, + uint16_t type, uint8_t rev, uint8_t *body); + +static inline int cxl_decoder_count_enc(int count) +{ + switch (count) { + case 1: return 0; + case 2: return 1; + case 4: return 2; + case 6: return 3; + case 8: return 4; + case 10: return 5; + } + return 0; +} + +uint8_t cxl_interleave_ways_enc(int iw, Error **errp); +uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp); + +static inline hwaddr cxl_decode_ig(int ig) +{ + return 1ULL << (ig + 8); +} + +CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb); + +void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp); +void cxl_doe_cdat_release(CXLComponentState *cxl_cstate); +void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp); + +#endif diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h new file mode 100644 index 0000000000..449b0edfe9 --- /dev/null +++ b/include/hw/cxl/cxl_device.h @@ -0,0 +1,272 @@ +/* + * QEMU CXL Devices + * + * Copyright (c) 2020 Intel + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#ifndef CXL_DEVICE_H +#define CXL_DEVICE_H + +#include "hw/register.h" + +/* + * The following is how a CXL device's Memory Device registers are laid out. + * The only requirement from the spec is that the capabilities array and the + * capability headers start at offset 0 and are contiguously packed. The headers + * themselves provide offsets to the register fields. For this emulation, the + * actual registers * will start at offset 0x80 (m == 0x80). No secondary + * mailbox is implemented which means that the offset of the start of the + * mailbox payload (n) is given by + * n = m + sizeof(mailbox registers) + sizeof(device registers). + * + * +---------------------------------+ + * | | + * | Memory Device Registers | + * | | + * n + PAYLOAD_SIZE_MAX ----------------------------------- + * ^ | | + * | | | + * | | | + * | | | + * | | | + * | | Mailbox Payload | + * | | | + * | | | + * | | | + * n ----------------------------------- + * ^ | Mailbox Registers | + * | | | + * | ----------------------------------- + * | | | + * | | Device Registers | + * | | | + * m ----------------------------------> + * ^ | Memory Device Capability Header| + * | ----------------------------------- + * | | Mailbox Capability Header | + * | ----------------------------------- + * | | Device Capability Header | + * | ----------------------------------- + * | | Device Cap Array Register | + * 0 +---------------------------------+ + * + */ + +#define CXL_DEVICE_CAP_HDR1_OFFSET 0x10 /* Figure 138 */ +#define CXL_DEVICE_CAP_REG_SIZE 0x10 /* 8.2.8.2 */ +#define CXL_DEVICE_CAPS_MAX 4 /* 8.2.8.2.1 + 8.2.8.5 */ +#define CXL_CAPS_SIZE \ + (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */ + +#define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */ +#define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8 /* 8.2.8.3.1 */ + +#define CXL_MAILBOX_REGISTERS_OFFSET \ + (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH) +#define CXL_MAILBOX_REGISTERS_SIZE 0x20 /* 8.2.8.4, Figure 139 */ +#define CXL_MAILBOX_PAYLOAD_SHIFT 11 +#define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT) +#define CXL_MAILBOX_REGISTERS_LENGTH \ + (CXL_MAILBOX_REGISTERS_SIZE + CXL_MAILBOX_MAX_PAYLOAD_SIZE) + +#define CXL_MEMORY_DEVICE_REGISTERS_OFFSET \ + (CXL_MAILBOX_REGISTERS_OFFSET + CXL_MAILBOX_REGISTERS_LENGTH) +#define CXL_MEMORY_DEVICE_REGISTERS_LENGTH 0x8 + +#define CXL_MMIO_SIZE \ + (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \ + CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH) + +typedef struct cxl_device_state { + MemoryRegion device_registers; + + /* mmio for device capabilities array - 8.2.8.2 */ + MemoryRegion device; + MemoryRegion memory_device; + struct { + MemoryRegion caps; + union { + uint32_t caps_reg_state32[CXL_CAPS_SIZE / 4]; + uint64_t caps_reg_state64[CXL_CAPS_SIZE / 8]; + }; + }; + + /* mmio for the mailbox registers 8.2.8.4 */ + struct { + MemoryRegion mailbox; + uint16_t payload_size; + union { + uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH]; + uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2]; + uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4]; + uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8]; + }; + struct cel_log { + uint16_t opcode; + uint16_t effect; + } cel_log[1 << 16]; + size_t cel_size; + }; + + struct { + bool set; + uint64_t last_set; + uint64_t host_set; + } timestamp; + + /* memory region for persistent memory, HDM */ + uint64_t pmem_size; +} CXLDeviceState; + +/* Initialize the register block for a device */ +void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev); + +/* Set up default values for the register block */ +void cxl_device_register_init_common(CXLDeviceState *dev); + +/* + * CXL 2.0 - 8.2.8.1 including errata F4 + * Documented as a 128 bit register, but 64 bit accesses and the second + * 64 bits are currently reserved. + */ +REG64(CXL_DEV_CAP_ARRAY, 0) /* Documented as 128 bit register but 64 byte accesses */ + FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16) + FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8) + FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16) + +/* + * Helper macro to initialize capability headers for CXL devices. + * + * In the 8.2.8.2, this is listed as a 128b register, but in 8.2.8, it says: + * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that + * > is the maximum access size allowed for these registers. If this rule is not + * > followed, the behavior is undefined + * + * CXL 2.0 Errata F4 states futher that the layouts in the specification are + * shown as greater than 128 bits, but implementations are expected to + * use any size of access up to 64 bits. + * + * Here we've chosen to make it 4 dwords. The spec allows any pow2 multiple + * access to be used for a register up to 64 bits. + */ +#define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \ + REG32(CXL_DEV_##n##_CAP_HDR0, offset) \ + FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_ID, 0, 16) \ + FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_VERSION, 16, 8) \ + REG32(CXL_DEV_##n##_CAP_HDR1, offset + 4) \ + FIELD(CXL_DEV_##n##_CAP_HDR1, CAP_OFFSET, 0, 32) \ + REG32(CXL_DEV_##n##_CAP_HDR2, offset + 8) \ + FIELD(CXL_DEV_##n##_CAP_HDR2, CAP_LENGTH, 0, 32) + +CXL_DEVICE_CAPABILITY_HEADER_REGISTER(DEVICE_STATUS, CXL_DEVICE_CAP_HDR1_OFFSET) +CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MAILBOX, CXL_DEVICE_CAP_HDR1_OFFSET + \ + CXL_DEVICE_CAP_REG_SIZE) +CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE, + CXL_DEVICE_CAP_HDR1_OFFSET + + CXL_DEVICE_CAP_REG_SIZE * 2) + +int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); +void cxl_process_mailbox(CXLDeviceState *cxl_dstate); + +#define cxl_device_cap_init(dstate, reg, cap_id) \ + do { \ + uint32_t *cap_hdrs = dstate->caps_reg_state32; \ + int which = R_CXL_DEV_##reg##_CAP_HDR0; \ + cap_hdrs[which] = \ + FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \ + CAP_ID, cap_id); \ + cap_hdrs[which] = FIELD_DP32( \ + cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, 1); \ + cap_hdrs[which + 1] = \ + FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \ + CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \ + cap_hdrs[which + 2] = \ + FIELD_DP32(cap_hdrs[which + 2], CXL_DEV_##reg##_CAP_HDR2, \ + CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \ + } while (0) + +/* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register */ +REG32(CXL_DEV_MAILBOX_CAP, 0) + FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5) + FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1) + FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1) + FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4) + +/* CXL 2.0 8.2.8.4.4 Mailbox Control Register */ +REG32(CXL_DEV_MAILBOX_CTRL, 4) + FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1) + FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1) + FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1) + +/* CXL 2.0 8.2.8.4.5 Command Register */ +REG64(CXL_DEV_MAILBOX_CMD, 8) + FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8) + FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8) + FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20) + +/* CXL 2.0 8.2.8.4.6 Mailbox Status Register */ +REG64(CXL_DEV_MAILBOX_STS, 0x10) + FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1) + FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16) + FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16) + +/* CXL 2.0 8.2.8.4.7 Background Command Status Register */ +REG64(CXL_DEV_BG_CMD_STS, 0x18) + FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16) + FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7) + FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16) + FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16) + +/* CXL 2.0 8.2.8.4.8 Command Payload Registers */ +REG32(CXL_DEV_CMD_PAYLOAD, 0x20) + +REG64(CXL_MEM_DEV_STS, 0) + FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1) + FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1) + FIELD(CXL_MEM_DEV_STS, MEDIA_STATUS, 2, 2) + FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1) + FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3) + +struct CXLType3Dev { + /* Private */ + PCIDevice parent_obj; + + /* Properties */ + HostMemoryBackend *hostmem; + HostMemoryBackend *lsa; + uint64_t sn; + + /* State */ + AddressSpace hostmem_as; + CXLComponentState cxl_cstate; + CXLDeviceState cxl_dstate; + + /* DOE */ + DOECap doe_cdat; +}; + +#define TYPE_CXL_TYPE3 "cxl-type3" +OBJECT_DECLARE_TYPE(CXLType3Dev, CXLType3Class, CXL_TYPE3) + +struct CXLType3Class { + /* Private */ + PCIDeviceClass parent_class; + + /* public */ + uint64_t (*get_lsa_size)(CXLType3Dev *ct3d); + + uint64_t (*get_lsa)(CXLType3Dev *ct3d, void *buf, uint64_t size, + uint64_t offset); + void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size, + uint64_t offset); +}; + +MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, + unsigned size, MemTxAttrs attrs); +MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, + unsigned size, MemTxAttrs attrs); + +#endif diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h new file mode 100644 index 0000000000..a1b662ce40 --- /dev/null +++ b/include/hw/cxl/cxl_host.h @@ -0,0 +1,23 @@ +/* + * QEMU CXL Host Setup + * + * Copyright (c) 2022 Huawei + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/cxl/cxl.h" +#include "hw/boards.h" + +#ifndef CXL_HOST_H +#define CXL_HOST_H + +void cxl_machine_init(Object *obj, CXLState *state); +void cxl_fmws_link_targets(CXLState *stat, Error **errp); +void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp); + +extern const MemoryRegionOps cfmws_ops; + +#endif diff --git a/include/hw/cxl/cxl_pci.h b/include/hw/cxl/cxl_pci.h new file mode 100644 index 0000000000..3cb79eca1e --- /dev/null +++ b/include/hw/cxl/cxl_pci.h @@ -0,0 +1,168 @@ +/* + * QEMU CXL PCI interfaces + * + * Copyright (c) 2020 Intel + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + */ + +#ifndef CXL_PCI_H +#define CXL_PCI_H + +#include "qemu/compiler.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie.h" +#include "hw/cxl/cxl_cdat.h" + +#define CXL_VENDOR_ID 0x1e98 + +#define PCIE_DVSEC_HEADER1_OFFSET 0x4 /* Offset from start of extend cap */ +#define PCIE_DVSEC_ID_OFFSET 0x8 + +#define PCIE_CXL_DEVICE_DVSEC_LENGTH 0x38 +#define PCIE_CXL1_DEVICE_DVSEC_REVID 0 +#define PCIE_CXL2_DEVICE_DVSEC_REVID 1 + +#define EXTENSIONS_PORT_DVSEC_LENGTH 0x28 +#define EXTENSIONS_PORT_DVSEC_REVID 0 + +#define GPF_PORT_DVSEC_LENGTH 0x10 +#define GPF_PORT_DVSEC_REVID 0 + +#define GPF_DEVICE_DVSEC_LENGTH 0x10 +#define GPF_DEVICE_DVSEC_REVID 0 + +#define PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0 0x14 +#define PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0 1 + +#define REG_LOC_DVSEC_LENGTH 0x24 +#define REG_LOC_DVSEC_REVID 0 + +enum { + PCIE_CXL_DEVICE_DVSEC = 0, + NON_CXL_FUNCTION_MAP_DVSEC = 2, + EXTENSIONS_PORT_DVSEC = 3, + GPF_PORT_DVSEC = 4, + GPF_DEVICE_DVSEC = 5, + PCIE_FLEXBUS_PORT_DVSEC = 7, + REG_LOC_DVSEC = 8, + MLD_DVSEC = 9, + CXL20_MAX_DVSEC +}; + +typedef struct DVSECHeader { + uint32_t cap_hdr; + uint32_t dv_hdr1; + uint16_t dv_hdr2; +} QEMU_PACKED DVSECHeader; +QEMU_BUILD_BUG_ON(sizeof(DVSECHeader) != 10); + +/* + * CXL 2.0 devices must implement certain DVSEC IDs, and can [optionally] + * implement others. + * + * CXL 2.0 Device: 0, [2], 5, 8 + * CXL 2.0 RP: 3, 4, 7, 8 + * CXL 2.0 Upstream Port: [2], 7, 8 + * CXL 2.0 Downstream Port: 3, 4, 7, 8 + */ + +/* CXL 2.0 - 8.1.3 (ID 0001) */ +typedef struct CXLDVSECDevice { + DVSECHeader hdr; + uint16_t cap; + uint16_t ctrl; + uint16_t status; + uint16_t ctrl2; + uint16_t status2; + uint16_t lock; + uint16_t cap2; + uint32_t range1_size_hi; + uint32_t range1_size_lo; + uint32_t range1_base_hi; + uint32_t range1_base_lo; + uint32_t range2_size_hi; + uint32_t range2_size_lo; + uint32_t range2_base_hi; + uint32_t range2_base_lo; +} CXLDVSECDevice; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != 0x38); + +/* CXL 2.0 - 8.1.5 (ID 0003) */ +typedef struct CXLDVSECPortExtensions { + DVSECHeader hdr; + uint16_t status; + uint16_t control; + uint8_t alt_bus_base; + uint8_t alt_bus_limit; + uint16_t alt_memory_base; + uint16_t alt_memory_limit; + uint16_t alt_prefetch_base; + uint16_t alt_prefetch_limit; + uint32_t alt_prefetch_base_high; + uint32_t alt_prefetch_limit_high; + uint32_t rcrb_base; + uint32_t rcrb_base_high; +} CXLDVSECPortExtensions; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExtensions) != 0x28); + +#define PORT_CONTROL_OFFSET 0xc +#define PORT_CONTROL_UNMASK_SBR 1 +#define PORT_CONTROL_ALT_MEMID_EN 4 + +/* CXL 2.0 - 8.1.6 GPF DVSEC (ID 0004) */ +typedef struct CXLDVSECPortGPF { + DVSECHeader hdr; + uint16_t rsvd; + uint16_t phase1_ctrl; + uint16_t phase2_ctrl; +} CXLDVSECPortGPF; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortGPF) != 0x10); + +/* CXL 2.0 - 8.1.7 GPF DVSEC for CXL Device */ +typedef struct CXLDVSECDeviceGPF { + DVSECHeader hdr; + uint16_t phase2_duration; + uint32_t phase2_power; +} CXLDVSECDeviceGPF; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDeviceGPF) != 0x10); + +/* CXL 2.0 - 8.1.8/8.2.1.3 Flex Bus DVSEC (ID 0007) */ +typedef struct CXLDVSECPortFlexBus { + DVSECHeader hdr; + uint16_t cap; + uint16_t ctrl; + uint16_t status; + uint32_t rcvd_mod_ts_data_phase1; +} CXLDVSECPortFlexBus; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortFlexBus) != 0x14); + +/* CXL 2.0 - 8.1.9 Register Locator DVSEC (ID 0008) */ +typedef struct CXLDVSECRegisterLocator { + DVSECHeader hdr; + uint16_t rsvd; + uint32_t reg0_base_lo; + uint32_t reg0_base_hi; + uint32_t reg1_base_lo; + uint32_t reg1_base_hi; + uint32_t reg2_base_lo; + uint32_t reg2_base_hi; +} CXLDVSECRegisterLocator; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECRegisterLocator) != 0x24); + +/* BAR Equivalence Indicator */ +#define BEI_BAR_10H 0 +#define BEI_BAR_14H 1 +#define BEI_BAR_18H 2 +#define BEI_BAR_1cH 3 +#define BEI_BAR_20H 4 +#define BEI_BAR_24H 5 + +/* Register Block Identifier */ +#define RBI_EMPTY 0 +#define RBI_COMPONENT_REG (1 << 8) +#define RBI_BAR_VIRT_ACL (2 << 8) +#define RBI_CXL_DEVICE_REG (3 << 8) + +#endif diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h index 80806b0306..55a50d3fb0 100644 --- a/include/hw/display/macfb.h +++ b/include/hw/display/macfb.h @@ -14,9 +14,43 @@ #define MACFB_H #include "exec/memory.h" +#include "hw/irq.h" #include "ui/console.h" +#include "qemu/timer.h" #include "qom/object.h" +typedef enum { + MACFB_DISPLAY_APPLE_21_COLOR = 0, + MACFB_DISPLAY_APPLE_PORTRAIT = 1, + MACFB_DISPLAY_APPLE_12_RGB = 2, + MACFB_DISPLAY_APPLE_2PAGE_MONO = 3, + MACFB_DISPLAY_NTSC_UNDERSCAN = 4, + MACFB_DISPLAY_NTSC_OVERSCAN = 5, + MACFB_DISPLAY_APPLE_12_MONO = 6, + MACFB_DISPLAY_APPLE_13_RGB = 7, + MACFB_DISPLAY_16_COLOR = 8, + MACFB_DISPLAY_PAL1_UNDERSCAN = 9, + MACFB_DISPLAY_PAL1_OVERSCAN = 10, + MACFB_DISPLAY_PAL2_UNDERSCAN = 11, + MACFB_DISPLAY_PAL2_OVERSCAN = 12, + MACFB_DISPLAY_VGA = 13, + MACFB_DISPLAY_SVGA = 14, +} MacfbDisplayType; + +typedef struct MacFbMode { + uint8_t type; + uint8_t depth; + uint32_t mode_ctrl1; + uint32_t mode_ctrl2; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t offset; +} MacFbMode; + +#define MACFB_CTRL_TOPADDR 0x200 +#define MACFB_NUM_REGS (MACFB_CTRL_TOPADDR / sizeof(uint32_t)) + typedef struct MacfbState { MemoryRegion mem_vram; MemoryRegion mem_ctrl; @@ -28,6 +62,13 @@ typedef struct MacfbState { uint8_t color_palette[256 * 3]; uint32_t width, height; /* in pixels */ uint8_t depth; + uint8_t type; + + uint32_t regs[MACFB_NUM_REGS]; + MacFbMode *mode; + + QEMUTimer *vbl_timer; + qemu_irq irq; } MacfbState; #define TYPE_MACFB "sysbus-macfb" @@ -46,6 +87,7 @@ struct MacfbNubusDeviceClass { DeviceClass parent_class; DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; }; diff --git a/include/hw/display/vga.h b/include/hw/display/vga.h index 5f7825e0e3..a79aa2909b 100644 --- a/include/hw/display/vga.h +++ b/include/hw/display/vga.h @@ -9,8 +9,6 @@ #ifndef QEMU_HW_DISPLAY_VGA_H #define QEMU_HW_DISPLAY_VGA_H -#include "exec/hwaddr.h" - /* * modules can reference this symbol to avoid being loaded * into system emulators without vga support @@ -24,8 +22,6 @@ enum vga_retrace_method { extern enum vga_retrace_method vga_retrace_method; -int isa_vga_mm_init(hwaddr vram_base, - hwaddr ctrl_base, int it_shift, - MemoryRegion *address_space); +#define TYPE_VGA_MMIO "vga-mmio" #endif diff --git a/include/hw/display/xlnx_dp.h b/include/hw/display/xlnx_dp.h index 8ab4733bb8..e86a87f235 100644 --- a/include/hw/display/xlnx_dp.h +++ b/include/hw/display/xlnx_dp.h @@ -35,14 +35,20 @@ #include "hw/dma/xlnx_dpdma.h" #include "audio/audio.h" #include "qom/object.h" +#include "hw/ptimer.h" #define AUD_CHBUF_MAX_DEPTH (32 * KiB) #define MAX_QEMU_BUFFER_SIZE (4 * KiB) -#define DP_CORE_REG_ARRAY_SIZE (0x3AF >> 2) +#define DP_CORE_REG_OFFSET (0x0000) +#define DP_CORE_REG_ARRAY_SIZE (0x3B0 >> 2) +#define DP_AVBUF_REG_OFFSET (0xB000) #define DP_AVBUF_REG_ARRAY_SIZE (0x238 >> 2) -#define DP_VBLEND_REG_ARRAY_SIZE (0x1DF >> 2) +#define DP_VBLEND_REG_OFFSET (0xA000) +#define DP_VBLEND_REG_ARRAY_SIZE (0x1E0 >> 2) +#define DP_AUDIO_REG_OFFSET (0xC000) #define DP_AUDIO_REG_ARRAY_SIZE (0x50 >> 2) +#define DP_CONTAINER_SIZE (0xC050) struct PixmanPlane { pixman_format_code_t format; @@ -102,6 +108,8 @@ struct XlnxDPState { */ DPCDState *dpcd; I2CDDCState *edid; + + ptimer_state *vblank; }; #define TYPE_XLNX_DP "xlnx.v-dp" diff --git a/include/hw/dma/xlnx-zdma.h b/include/hw/dma/xlnx-zdma.h index 6602e7ffa7..efc75217d5 100644 --- a/include/hw/dma/xlnx-zdma.h +++ b/include/hw/dma/xlnx-zdma.h @@ -56,7 +56,7 @@ struct XlnxZDMA { MemoryRegion iomem; MemTxAttrs attr; MemoryRegion *dma_mr; - AddressSpace *dma_as; + AddressSpace dma_as; qemu_irq irq_zdma_ch_imr; struct { diff --git a/include/hw/dma/xlnx_csu_dma.h b/include/hw/dma/xlnx_csu_dma.h index 204d94c673..922ab80eb6 100644 --- a/include/hw/dma/xlnx_csu_dma.h +++ b/include/hw/dma/xlnx_csu_dma.h @@ -21,6 +21,11 @@ #ifndef XLNX_CSU_DMA_H #define XLNX_CSU_DMA_H +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/ptimer.h" +#include "hw/stream.h" + #define TYPE_XLNX_CSU_DMA "xlnx.csu_dma" #define XLNX_CSU_DMA_R_MAX (0x2c / 4) @@ -30,7 +35,7 @@ typedef struct XlnxCSUDMA { MemoryRegion iomem; MemTxAttrs attr; MemoryRegion *dma_mr; - AddressSpace *dma_as; + AddressSpace dma_as; qemu_irq irq; StreamSink *tx_dev; /* Used as generic StreamSink */ ptimer_state *src_timer; @@ -46,7 +51,22 @@ typedef struct XlnxCSUDMA { RegisterInfo regs_info[XLNX_CSU_DMA_R_MAX]; } XlnxCSUDMA; -#define XLNX_CSU_DMA(obj) \ - OBJECT_CHECK(XlnxCSUDMA, (obj), TYPE_XLNX_CSU_DMA) +OBJECT_DECLARE_TYPE(XlnxCSUDMA, XlnxCSUDMAClass, XLNX_CSU_DMA) + +struct XlnxCSUDMAClass { + SysBusDeviceClass parent_class; + + /* + * read: Start a read transfer on a Xilinx CSU DMA engine + * + * @s: the Xilinx CSU DMA engine to start the transfer on + * @addr: the address to read + * @len: the number of bytes to read at 'addr' + * + * @return a MemTxResult indicating whether the operation succeeded ('len' + * bytes were read) or failed. + */ + MemTxResult (*read)(XlnxCSUDMA *s, hwaddr addr, uint32_t len); +}; #endif diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index 1c37cec4ae..fbe0b1e956 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -117,7 +117,7 @@ static void glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab, shdr_table = load_at(fd, ehdr->e_shoff, sizeof(struct elf_shdr) * ehdr->e_shnum); if (!shdr_table) { - return ; + return; } if (must_swab) { @@ -312,25 +312,26 @@ static struct elf_note *glue(get_elf_note_type, SZ)(struct elf_note *nhdr, return nhdr; } -static int glue(load_elf, SZ)(const char *name, int fd, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, - int must_swab, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, - uint32_t *pflags, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, - symbol_fn_t sym_cb) +static ssize_t glue(load_elf, SZ)(const char *name, int fd, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, + int must_swab, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, + symbol_fn_t sym_cb) { struct elfhdr ehdr; struct elf_phdr *phdr = NULL, *ph; - int size, i, total_size; + int size, i; + ssize_t total_size; elf_word mem_size, file_size, data_offset; uint64_t addr, low = (uint64_t)-1, high = 0; GMappedFile *mapped_file = NULL; uint8_t *data = NULL; - int ret = ELF_LOAD_FAILED; + ssize_t ret = ELF_LOAD_FAILED; if (read(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr)) goto fail; @@ -482,7 +483,7 @@ static int glue(load_elf, SZ)(const char *name, int fd, } } - if (mem_size > INT_MAX - total_size) { + if (mem_size > SSIZE_MAX - total_size) { ret = ELF_LOAD_TOO_BIG; goto fail; } @@ -554,6 +555,19 @@ static int glue(load_elf, SZ)(const char *name, int fd, if (res != MEMTX_OK) { goto fail; } + /* + * We need to zero'ify the space that is not copied + * from file + */ + if (file_size < mem_size) { + res = address_space_set(as ? as : &address_space_memory, + addr + file_size, 0, + mem_size - file_size, + MEMTXATTRS_UNSPECIFIED); + if (res != MEMTX_OK) { + goto fail; + } + } } } diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h index 5a0dd0c8cf..7f3259a630 100644 --- a/include/hw/firmware/smbios.h +++ b/include/hw/firmware/smbios.h @@ -1,6 +1,8 @@ #ifndef QEMU_SMBIOS_H #define QEMU_SMBIOS_H +#include "qapi/qapi-types-machine.h" + /* * SMBIOS Support * @@ -16,6 +18,8 @@ #define SMBIOS_MAX_TYPE 127 +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) /* memory area description, used by type 19 table */ struct smbios_phys_mem_area { @@ -23,14 +27,6 @@ struct smbios_phys_mem_area { uint64_t length; }; -/* - * SMBIOS spec defined tables - */ -typedef enum SmbiosEntryPointType { - SMBIOS_ENTRY_POINT_21, - SMBIOS_ENTRY_POINT_30, -} SmbiosEntryPointType; - /* SMBIOS Entry Point * There are two types of entry points defined in the SMBIOS specification * (see below). BIOS must place the entry point(s) at a 16-byte-aligned @@ -193,6 +189,26 @@ struct smbios_type_4 { uint8_t thread_count; uint16_t processor_characteristics; uint16_t processor_family2; + /* SMBIOS spec 3.0.0, Table 21 */ + uint16_t core_count2; + uint16_t core_enabled2; + uint16_t thread_count2; +} QEMU_PACKED; + +typedef enum smbios_type_4_len_ver { + SMBIOS_TYPE_4_LEN_V28 = offsetofend(struct smbios_type_4, + processor_family2), + SMBIOS_TYPE_4_LEN_V30 = offsetofend(struct smbios_type_4, thread_count2), +} smbios_type_4_len_ver; + +/* SMBIOS type 8 - Port Connector Information */ +struct smbios_type_8 { + struct smbios_structure_header header; + uint8_t internal_reference_str; + uint8_t internal_connector_type; + uint8_t external_reference_str; + uint8_t external_connector_type; + uint8_t port_type; } QEMU_PACKED; /* SMBIOS type 11 - OEM strings */ diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h index e1636ce7fe..904eecf62c 100644 --- a/include/hw/gpio/aspeed_gpio.h +++ b/include/hw/gpio/aspeed_gpio.h @@ -17,9 +17,9 @@ OBJECT_DECLARE_TYPE(AspeedGPIOState, AspeedGPIOClass, ASPEED_GPIO) #define ASPEED_GPIO_MAX_NR_SETS 8 +#define ASPEED_GPIOS_PER_SET 32 #define ASPEED_REGS_PER_BANK 14 #define ASPEED_GPIO_MAX_NR_REGS (ASPEED_REGS_PER_BANK * ASPEED_GPIO_MAX_NR_SETS) -#define ASPEED_GPIO_NR_PINS 228 #define ASPEED_GROUPS_PER_SET 4 #define ASPEED_GPIO_NR_DEBOUNCE_REGS 3 #define ASPEED_CHARS_PER_GROUP_LABEL 4 @@ -50,17 +50,30 @@ enum GPIORegType { gpio_reg_input_mask, }; +/* GPIO index mode */ +enum GPIORegIndexType { + gpio_reg_idx_data = 0, + gpio_reg_idx_direction, + gpio_reg_idx_interrupt, + gpio_reg_idx_debounce, + gpio_reg_idx_tolerance, + gpio_reg_idx_cmd_src, + gpio_reg_idx_input_mask, + gpio_reg_idx_reserved, + gpio_reg_idx_new_w_cmd_src, + gpio_reg_idx_new_r_cmd_src, +}; + typedef struct AspeedGPIOReg { uint16_t set_idx; enum GPIORegType type; - } AspeedGPIOReg; +} AspeedGPIOReg; struct AspeedGPIOClass { SysBusDevice parent_obj; const GPIOSetProperties *props; uint32_t nr_gpio_pins; uint32_t nr_gpio_sets; - uint32_t gap; const AspeedGPIOReg *reg_table; }; @@ -72,7 +85,7 @@ struct AspeedGPIOState { MemoryRegion iomem; int pending; qemu_irq irq; - qemu_irq gpios[ASPEED_GPIO_NR_PINS]; + qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET]; /* Parallel GPIO Registers */ uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS]; @@ -94,4 +107,4 @@ struct AspeedGPIOState { } sets[ASPEED_GPIO_MAX_NR_SETS]; }; -#endif /* _ASPEED_GPIO_H_ */ +#endif /* ASPEED_GPIO_H */ diff --git a/include/hw/hw.h b/include/hw/hw.h index fc5301f293..045c1c8b09 100644 --- a/include/hw/hw.h +++ b/include/hw/hw.h @@ -5,6 +5,6 @@ #error Cannot include hw/hw.h from user emulation #endif -void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +G_NORETURN void hw_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2); #endif diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h index 21dc28aee9..4a2297307b 100644 --- a/include/hw/hyperv/hyperv-proto.h +++ b/include/hw/hyperv/hyperv-proto.h @@ -24,12 +24,17 @@ #define HV_STATUS_INVALID_PORT_ID 17 #define HV_STATUS_INVALID_CONNECTION_ID 18 #define HV_STATUS_INSUFFICIENT_BUFFERS 19 +#define HV_STATUS_NOT_ACKNOWLEDGED 20 +#define HV_STATUS_NO_DATA 27 /* * Hypercall numbers */ #define HV_POST_MESSAGE 0x005c #define HV_SIGNAL_EVENT 0x005d +#define HV_POST_DEBUG_DATA 0x0069 +#define HV_RETRIEVE_DEBUG_DATA 0x006a +#define HV_RESET_DEBUG_SESSION 0x006b #define HV_HYPERCALL_FAST (1u << 16) /* @@ -127,4 +132,51 @@ struct hyperv_event_flags_page { struct hyperv_event_flags slot[HV_SINT_COUNT]; }; +/* + * Kernel debugger structures + */ + +/* Options flags for hyperv_reset_debug_session */ +#define HV_DEBUG_PURGE_INCOMING_DATA 0x00000001 +#define HV_DEBUG_PURGE_OUTGOING_DATA 0x00000002 +struct hyperv_reset_debug_session_input { + uint32_t options; +} __attribute__ ((__packed__)); + +struct hyperv_reset_debug_session_output { + uint32_t host_ip; + uint32_t target_ip; + uint16_t host_port; + uint16_t target_port; + uint8_t host_mac[6]; + uint8_t target_mac[6]; +} __attribute__ ((__packed__)); + +/* Options for hyperv_post_debug_data */ +#define HV_DEBUG_POST_LOOP 0x00000001 + +struct hyperv_post_debug_data_input { + uint32_t count; + uint32_t options; + /*uint8_t data[HV_HYP_PAGE_SIZE - 2 * sizeof(uint32_t)];*/ +} __attribute__ ((__packed__)); + +struct hyperv_post_debug_data_output { + uint32_t pending_count; +} __attribute__ ((__packed__)); + +/* Options for hyperv_retrieve_debug_data */ +#define HV_DEBUG_RETRIEVE_LOOP 0x00000001 +#define HV_DEBUG_RETRIEVE_TEST_ACTIVITY 0x00000002 + +struct hyperv_retrieve_debug_data_input { + uint32_t count; + uint32_t options; + uint64_t timeout; +} __attribute__ ((__packed__)); + +struct hyperv_retrieve_debug_data_output { + uint32_t retrieved_count; + uint32_t remaining_count; +} __attribute__ ((__packed__)); #endif diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h index a63ee0003c..015c3524b1 100644 --- a/include/hw/hyperv/hyperv.h +++ b/include/hw/hyperv/hyperv.h @@ -81,4 +81,62 @@ void hyperv_synic_update(CPUState *cs, bool enable, hwaddr msg_page_addr, hwaddr event_page_addr); bool hyperv_is_synic_enabled(void); +/* + * Process HVCALL_RESET_DEBUG_SESSION hypercall. + */ +uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa); +/* + * Process HVCALL_RETREIVE_DEBUG_DATA hypercall. + */ +uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa, + bool fast); +/* + * Process HVCALL_POST_DEBUG_DATA hypercall. + */ +uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast); + +uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count); +uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count); +void hyperv_syndbg_set_pending_page(uint64_t ingpa); +uint64_t hyperv_syndbg_query_options(void); + +typedef enum HvSynthDbgMsgType { + HV_SYNDBG_MSG_CONNECTION_INFO, + HV_SYNDBG_MSG_SEND, + HV_SYNDBG_MSG_RECV, + HV_SYNDBG_MSG_SET_PENDING_PAGE, + HV_SYNDBG_MSG_QUERY_OPTIONS +} HvDbgSynthMsgType; + +typedef struct HvSynDbgMsg { + HvDbgSynthMsgType type; + union { + struct { + uint32_t host_ip; + uint16_t host_port; + } connection_info; + struct { + uint64_t buf_gpa; + uint32_t count; + uint32_t pending_count; + bool is_raw; + } send; + struct { + uint64_t buf_gpa; + uint32_t count; + uint32_t options; + uint64_t timeout; + uint32_t retrieved_count; + bool is_raw; + } recv; + struct { + uint64_t buf_gpa; + } pending_page; + struct { + uint64_t options; + } query_options; + } u; +} HvSynDbgMsg; +typedef uint16_t (*HvSynDbgHandler)(void *context, HvSynDbgMsg *msg); +void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context); #endif diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h index f98bea3888..8ea660dd8e 100644 --- a/include/hw/hyperv/vmbus.h +++ b/include/hw/hyperv/vmbus.h @@ -223,7 +223,4 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, unsigned iov_cnt, size_t accessed); -void vmbus_save_req(QEMUFile *f, VMBusChanReq *req); -void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size); - #endif diff --git a/include/hw/i2c/arm_sbcon_i2c.h b/include/hw/i2c/arm_sbcon_i2c.h index ad96781e7a..f54d1e5413 100644 --- a/include/hw/i2c/arm_sbcon_i2c.h +++ b/include/hw/i2c/arm_sbcon_i2c.h @@ -9,8 +9,9 @@ * * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef HW_I2C_ARM_SBCON_H -#define HW_I2C_ARM_SBCON_H + +#ifndef HW_I2C_ARM_SBCON_I2C_H +#define HW_I2C_ARM_SBCON_I2C_H #include "hw/sysbus.h" #include "hw/i2c/bitbang_i2c.h" @@ -34,4 +35,4 @@ struct ArmSbconI2CState { int in; }; -#endif /* HW_I2C_ARM_SBCON_H */ +#endif /* HW_I2C_ARM_SBCON_I2C_H */ diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h index 565f833066..adc904d6c1 100644 --- a/include/hw/i2c/aspeed_i2c.h +++ b/include/hw/i2c/aspeed_i2c.h @@ -23,38 +23,221 @@ #include "hw/i2c/i2c.h" #include "hw/sysbus.h" +#include "hw/registerfields.h" #include "qom/object.h" #define TYPE_ASPEED_I2C "aspeed.i2c" #define TYPE_ASPEED_2400_I2C TYPE_ASPEED_I2C "-ast2400" #define TYPE_ASPEED_2500_I2C TYPE_ASPEED_I2C "-ast2500" #define TYPE_ASPEED_2600_I2C TYPE_ASPEED_I2C "-ast2600" +#define TYPE_ASPEED_1030_I2C TYPE_ASPEED_I2C "-ast1030" OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C) #define ASPEED_I2C_NR_BUSSES 16 #define ASPEED_I2C_MAX_POOL_SIZE 0x800 +#define ASPEED_I2C_OLD_NUM_REG 11 +#define ASPEED_I2C_NEW_NUM_REG 22 + +/* Tx State Machine */ +#define I2CD_TX_STATE_MASK 0xf +#define I2CD_IDLE 0x0 +#define I2CD_MACTIVE 0x8 +#define I2CD_MSTART 0x9 +#define I2CD_MSTARTR 0xa +#define I2CD_MSTOP 0xb +#define I2CD_MTXD 0xc +#define I2CD_MRXACK 0xd +#define I2CD_MRXD 0xe +#define I2CD_MTXACK 0xf +#define I2CD_SWAIT 0x1 +#define I2CD_SRXD 0x4 +#define I2CD_STXACK 0x5 +#define I2CD_STXD 0x6 +#define I2CD_SRXACK 0x7 +#define I2CD_RECOVER 0x3 + +/* I2C Global Register */ +REG32(I2C_CTRL_STATUS, 0x0) /* Device Interrupt Status */ +REG32(I2C_CTRL_ASSIGN, 0x8) /* Device Interrupt Target Assignment */ +REG32(I2C_CTRL_GLOBAL, 0xC) /* Global Control Register */ + FIELD(I2C_CTRL_GLOBAL, REG_MODE, 2, 1) + FIELD(I2C_CTRL_GLOBAL, SRAM_EN, 0, 1) +REG32(I2C_CTRL_NEW_CLK_DIVIDER, 0x10) /* New mode clock divider */ + +/* I2C Old Mode Device (Bus) Register */ +REG32(I2CD_FUN_CTRL, 0x0) /* I2CD Function Control */ + FIELD(I2CD_FUN_CTRL, POOL_PAGE_SEL, 20, 3) /* AST2400 */ + SHARED_FIELD(M_SDA_LOCK_EN, 16, 1) + SHARED_FIELD(MULTI_MASTER_DIS, 15, 1) + SHARED_FIELD(M_SCL_DRIVE_EN, 14, 1) + SHARED_FIELD(MSB_STS, 9, 1) + SHARED_FIELD(SDA_DRIVE_IT_EN, 8, 1) + SHARED_FIELD(M_SDA_DRIVE_IT_EN, 7, 1) + SHARED_FIELD(M_HIGH_SPEED_EN, 6, 1) + SHARED_FIELD(DEF_ADDR_EN, 5, 1) + SHARED_FIELD(DEF_ALERT_EN, 4, 1) + SHARED_FIELD(DEF_ARP_EN, 3, 1) + SHARED_FIELD(DEF_GCALL_EN, 2, 1) + SHARED_FIELD(SLAVE_EN, 1, 1) + SHARED_FIELD(MASTER_EN, 0, 1) +REG32(I2CD_AC_TIMING1, 0x04) /* Clock and AC Timing Control #1 */ +REG32(I2CD_AC_TIMING2, 0x08) /* Clock and AC Timing Control #2 */ +REG32(I2CD_INTR_CTRL, 0x0C) /* I2CD Interrupt Control */ +REG32(I2CD_INTR_STS, 0x10) /* I2CD Interrupt Status */ + SHARED_FIELD(SLAVE_ADDR_MATCH, 31, 1) /* 0: addr1 1: addr2 */ + SHARED_FIELD(SLAVE_ADDR_RX_PENDING, 29, 1) + SHARED_FIELD(SLAVE_INACTIVE_TIMEOUT, 15, 1) + SHARED_FIELD(SDA_DL_TIMEOUT, 14, 1) + SHARED_FIELD(BUS_RECOVER_DONE, 13, 1) + SHARED_FIELD(SMBUS_ALERT, 12, 1) /* Bus [0-3] only */ + FIELD(I2CD_INTR_STS, SMBUS_ARP_ADDR, 11, 1) /* Removed */ + FIELD(I2CD_INTR_STS, SMBUS_DEV_ALERT_ADDR, 10, 1) /* Removed */ + FIELD(I2CD_INTR_STS, SMBUS_DEF_ADDR, 9, 1) /* Removed */ + FIELD(I2CD_INTR_STS, GCALL_ADDR, 8, 1) /* Removed */ + FIELD(I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1) /* use RX_DONE */ + SHARED_FIELD(SCL_TIMEOUT, 6, 1) + SHARED_FIELD(ABNORMAL, 5, 1) + SHARED_FIELD(NORMAL_STOP, 4, 1) + SHARED_FIELD(ARBIT_LOSS, 3, 1) + SHARED_FIELD(RX_DONE, 2, 1) + SHARED_FIELD(TX_NAK, 1, 1) + SHARED_FIELD(TX_ACK, 0, 1) +REG32(I2CD_CMD, 0x14) /* I2CD Command/Status */ + SHARED_FIELD(SDA_OE, 28, 1) + SHARED_FIELD(SDA_O, 27, 1) + SHARED_FIELD(SCL_OE, 26, 1) + SHARED_FIELD(SCL_O, 25, 1) + SHARED_FIELD(TX_TIMING, 23, 2) + SHARED_FIELD(TX_STATE, 19, 4) + SHARED_FIELD(SCL_LINE_STS, 18, 1) + SHARED_FIELD(SDA_LINE_STS, 17, 1) + SHARED_FIELD(BUS_BUSY_STS, 16, 1) + SHARED_FIELD(SDA_OE_OUT_DIR, 15, 1) + SHARED_FIELD(SDA_O_OUT_DIR, 14, 1) + SHARED_FIELD(SCL_OE_OUT_DIR, 13, 1) + SHARED_FIELD(SCL_O_OUT_DIR, 12, 1) + SHARED_FIELD(BUS_RECOVER_CMD_EN, 11, 1) + SHARED_FIELD(S_ALT_EN, 10, 1) + /* Command Bits */ + SHARED_FIELD(RX_DMA_EN, 9, 1) + SHARED_FIELD(TX_DMA_EN, 8, 1) + SHARED_FIELD(RX_BUFF_EN, 7, 1) + SHARED_FIELD(TX_BUFF_EN, 6, 1) + SHARED_FIELD(M_STOP_CMD, 5, 1) + SHARED_FIELD(M_S_RX_CMD_LAST, 4, 1) + SHARED_FIELD(M_RX_CMD, 3, 1) + SHARED_FIELD(S_TX_CMD, 2, 1) + SHARED_FIELD(M_TX_CMD, 1, 1) + SHARED_FIELD(M_START_CMD, 0, 1) +REG32(I2CD_DEV_ADDR, 0x18) /* Slave Device Address */ + SHARED_FIELD(SLAVE_DEV_ADDR1, 0, 7) +REG32(I2CD_POOL_CTRL, 0x1C) /* Pool Buffer Control */ + SHARED_FIELD(RX_COUNT, 24, 5) + SHARED_FIELD(RX_SIZE, 16, 5) + SHARED_FIELD(TX_COUNT, 9, 5) + FIELD(I2CD_POOL_CTRL, OFFSET, 2, 6) /* AST2400 */ +REG32(I2CD_BYTE_BUF, 0x20) /* Transmit/Receive Byte Buffer */ + SHARED_FIELD(RX_BUF, 8, 8) + SHARED_FIELD(TX_BUF, 0, 8) +REG32(I2CD_DMA_ADDR, 0x24) /* DMA Buffer Address */ +REG32(I2CD_DMA_LEN, 0x28) /* DMA Transfer Length < 4KB */ + +/* I2C New Mode Device (Bus) Register */ +REG32(I2CC_FUN_CTRL, 0x0) + FIELD(I2CC_FUN_CTRL, RB_EARLY_DONE_EN, 22, 1) + FIELD(I2CC_FUN_CTRL, DMA_DIS_AUTO_RECOVER, 21, 1) + FIELD(I2CC_FUN_CTRL, S_SAVE_ADDR, 20, 1) + FIELD(I2CC_FUN_CTRL, M_PKT_RETRY_CNT, 18, 2) + /* 17:0 shared with I2CD_FUN_CTRL[17:0] */ +REG32(I2CC_AC_TIMING, 0x04) +REG32(I2CC_MS_TXRX_BYTE_BUF, 0x08) + /* 31:16 shared with I2CD_CMD[31:16] */ + /* 15:0 shared with I2CD_BYTE_BUF[15:0] */ +REG32(I2CC_POOL_CTRL, 0x0c) + /* 31:0 shared with I2CD_POOL_CTRL[31:0] */ +REG32(I2CM_INTR_CTRL, 0x10) +REG32(I2CM_INTR_STS, 0x14) + FIELD(I2CM_INTR_STS, PKT_STATE, 28, 4) + FIELD(I2CM_INTR_STS, PKT_CMD_TIMEOUT, 18, 1) + FIELD(I2CM_INTR_STS, PKT_CMD_FAIL, 17, 1) + FIELD(I2CM_INTR_STS, PKT_CMD_DONE, 16, 1) + FIELD(I2CM_INTR_STS, BUS_RECOVER_FAIL, 15, 1) + /* 14:0 shared with I2CD_INTR_STS[14:0] */ +REG32(I2CM_CMD, 0x18) + FIELD(I2CM_CMD, W1_CTRL, 31, 1) + FIELD(I2CM_CMD, PKT_DEV_ADDR, 24, 7) + FIELD(I2CM_CMD, HS_MASTER_MODE_LSB, 17, 3) + FIELD(I2CM_CMD, PKT_OP_EN, 16, 1) + /* 15:0 shared with I2CD_CMD[15:0] */ +REG32(I2CM_DMA_LEN, 0x1c) + FIELD(I2CM_DMA_LEN, RX_BUF_LEN_W1T, 31, 1) + FIELD(I2CM_DMA_LEN, RX_BUF_LEN, 16, 11) + FIELD(I2CM_DMA_LEN, TX_BUF_LEN_W1T, 15, 1) + FIELD(I2CM_DMA_LEN, TX_BUF_LEN, 0, 11) +REG32(I2CS_INTR_CTRL, 0x20) + FIELD(I2CS_INTR_CTRL, PKT_CMD_FAIL, 17, 1) + FIELD(I2CS_INTR_CTRL, PKT_CMD_DONE, 16, 1) +REG32(I2CS_INTR_STS, 0x24) + /* 31:29 shared with I2CD_INTR_STS[31:29] */ + FIELD(I2CS_INTR_STS, SLAVE_PARKING_STS, 24, 2) + FIELD(I2CS_INTR_STS, SLAVE_ADDR3_NAK, 22, 1) + FIELD(I2CS_INTR_STS, SLAVE_ADDR2_NAK, 21, 1) + FIELD(I2CS_INTR_STS, SLAVE_ADDR1_NAK, 20, 1) + FIELD(I2CS_INTR_STS, SLAVE_ADDR_INDICATOR, 18, 2) + FIELD(I2CS_INTR_STS, PKT_CMD_FAIL, 17, 1) + FIELD(I2CS_INTR_STS, PKT_CMD_DONE, 16, 1) + /* 14:0 shared with I2CD_INTR_STS[14:0] */ + FIELD(I2CS_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1) +REG32(I2CS_CMD, 0x28) + FIELD(I2CS_CMD, W1_CTRL, 31, 1) + FIELD(I2CS_CMD, PKT_MODE_ACTIVE_ADDR, 17, 2) + FIELD(I2CS_CMD, PKT_MODE_EN, 16, 1) + FIELD(I2CS_CMD, AUTO_NAK_INACTIVE_ADDR, 15, 1) + FIELD(I2CS_CMD, AUTO_NAK_ACTIVE_ADDR, 14, 1) + /* 13:0 shared with I2CD_CMD[13:0] */ +REG32(I2CS_DMA_LEN, 0x2c) + FIELD(I2CS_DMA_LEN, RX_BUF_LEN_W1T, 31, 1) + FIELD(I2CS_DMA_LEN, RX_BUF_LEN, 16, 11) + FIELD(I2CS_DMA_LEN, TX_BUF_LEN_W1T, 15, 1) + FIELD(I2CS_DMA_LEN, TX_BUF_LEN, 0, 11) +REG32(I2CM_DMA_TX_ADDR, 0x30) + FIELD(I2CM_DMA_TX_ADDR, ADDR, 0, 31) +REG32(I2CM_DMA_RX_ADDR, 0x34) + FIELD(I2CM_DMA_RX_ADDR, ADDR, 0, 31) +REG32(I2CS_DMA_TX_ADDR, 0x38) + FIELD(I2CS_DMA_TX_ADDR, ADDR, 0, 31) +REG32(I2CS_DMA_RX_ADDR, 0x3c) + FIELD(I2CS_DMA_RX_ADDR, ADDR, 0, 31) +REG32(I2CS_DEV_ADDR, 0x40) +REG32(I2CM_DMA_LEN_STS, 0x48) + FIELD(I2CM_DMA_LEN_STS, RX_LEN, 16, 13) + FIELD(I2CM_DMA_LEN_STS, TX_LEN, 0, 13) +REG32(I2CS_DMA_LEN_STS, 0x4c) + FIELD(I2CS_DMA_LEN_STS, RX_LEN, 16, 13) + FIELD(I2CS_DMA_LEN_STS, TX_LEN, 0, 13) +REG32(I2CC_DMA_ADDR, 0x50) +REG32(I2CC_DMA_LEN, 0x54) struct AspeedI2CState; -typedef struct AspeedI2CBus { +#define TYPE_ASPEED_I2C_BUS "aspeed.i2c.bus" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBus, ASPEED_I2C_BUS) +struct AspeedI2CBus { + SysBusDevice parent_obj; + struct AspeedI2CState *controller; + /* slave mode */ + I2CSlave *slave; + MemoryRegion mr; I2CBus *bus; uint8_t id; qemu_irq irq; - uint32_t ctrl; - uint32_t timing[2]; - uint32_t intr_ctrl; - uint32_t intr_status; - uint32_t cmd; - uint32_t buf; - uint32_t pool_ctrl; - uint32_t dma_addr; - uint32_t dma_len; -} AspeedI2CBus; + uint32_t regs[ASPEED_I2C_NEW_NUM_REG]; +}; struct AspeedI2CState { SysBusDevice parent_obj; @@ -64,6 +247,7 @@ struct AspeedI2CState { uint32_t intr_status; uint32_t ctrl_global; + uint32_t new_clk_divider; MemoryRegion pool_iomem; uint8_t pool[ASPEED_I2C_MAX_POOL_SIZE]; @@ -72,6 +256,11 @@ struct AspeedI2CState { AddressSpace dram_as; }; +#define TYPE_ASPEED_I2C_BUS_SLAVE "aspeed.i2c.slave" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBusSlave, ASPEED_I2C_BUS_SLAVE) +struct AspeedI2CBusSlave { + I2CSlave i2c; +}; struct AspeedI2CClass { SysBusDeviceClass parent_class; @@ -89,6 +278,104 @@ struct AspeedI2CClass { }; +static inline bool aspeed_i2c_is_new_mode(AspeedI2CState *s) +{ + return FIELD_EX32(s->ctrl_global, I2C_CTRL_GLOBAL, REG_MODE); +} + +static inline bool aspeed_i2c_bus_pkt_mode_en(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return ARRAY_FIELD_EX32(bus->regs, I2CM_CMD, PKT_OP_EN); + } + return false; +} + +static inline uint32_t aspeed_i2c_bus_ctrl_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CC_FUN_CTRL; + } + return R_I2CD_FUN_CTRL; +} + +static inline uint32_t aspeed_i2c_bus_cmd_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CM_CMD; + } + return R_I2CD_CMD; +} + +static inline uint32_t aspeed_i2c_bus_dev_addr_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CS_DEV_ADDR; + } + return R_I2CD_DEV_ADDR; +} + +static inline uint32_t aspeed_i2c_bus_intr_ctrl_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CM_INTR_CTRL; + } + return R_I2CD_INTR_CTRL; +} + +static inline uint32_t aspeed_i2c_bus_intr_sts_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CM_INTR_STS; + } + return R_I2CD_INTR_STS; +} + +static inline uint32_t aspeed_i2c_bus_pool_ctrl_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CC_POOL_CTRL; + } + return R_I2CD_POOL_CTRL; +} + +static inline uint32_t aspeed_i2c_bus_byte_buf_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CC_MS_TXRX_BYTE_BUF; + } + return R_I2CD_BYTE_BUF; +} + +static inline uint32_t aspeed_i2c_bus_dma_len_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CC_DMA_LEN; + } + return R_I2CD_DMA_LEN; +} + +static inline uint32_t aspeed_i2c_bus_dma_addr_offset(AspeedI2CBus *bus) +{ + if (aspeed_i2c_is_new_mode(bus->controller)) { + return R_I2CC_DMA_ADDR; + } + return R_I2CD_DMA_ADDR; +} + +static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus) +{ + return SHARED_ARRAY_FIELD_EX32(bus->regs, aspeed_i2c_bus_ctrl_offset(bus), + MASTER_EN); +} + +static inline bool aspeed_i2c_bus_is_enabled(AspeedI2CBus *bus) +{ + uint32_t ctrl_reg = aspeed_i2c_bus_ctrl_offset(bus); + return SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, MASTER_EN) || + SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, SLAVE_EN); +} + I2CBus *aspeed_i2c_get_bus(AspeedI2CState *s, int busnr); #endif /* ASPEED_I2C_H */ diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h index 5ca3b708c0..9b9581d230 100644 --- a/include/hw/i2c/i2c.h +++ b/include/hw/i2c/i2c.h @@ -12,6 +12,7 @@ enum i2c_event { I2C_START_RECV, I2C_START_SEND, + I2C_START_SEND_ASYNC, I2C_FINISH, I2C_NACK /* Masker NACKed a receive byte. */ }; @@ -28,6 +29,9 @@ struct I2CSlaveClass { /* Master to slave. Returns non-zero for a NAK, 0 for success. */ int (*send)(I2CSlave *s, uint8_t data); + /* Master to slave (asynchronous). Receiving slave must call i2c_ack(). */ + void (*send_async)(I2CSlave *s, uint8_t data); + /* * Slave to master. This cannot fail, the device should always * return something here. @@ -69,13 +73,25 @@ struct I2CNode { QLIST_ENTRY(I2CNode) next; }; +typedef struct I2CPendingMaster I2CPendingMaster; + +struct I2CPendingMaster { + QEMUBH *bh; + QSIMPLEQ_ENTRY(I2CPendingMaster) entry; +}; + typedef QLIST_HEAD(I2CNodeList, I2CNode) I2CNodeList; +typedef QSIMPLEQ_HEAD(I2CPendingMasters, I2CPendingMaster) I2CPendingMasters; struct I2CBus { BusState qbus; I2CNodeList current_devs; + I2CPendingMasters pending_masters; uint8_t saved_address; bool broadcast; + + /* Set from slave currently mastering the bus. */ + QEMUBH *bh; }; I2CBus *i2c_init_bus(DeviceState *parent, const char *name); @@ -115,9 +131,23 @@ int i2c_start_recv(I2CBus *bus, uint8_t address); */ int i2c_start_send(I2CBus *bus, uint8_t address); +/** + * i2c_start_send_async: start an asynchronous 'send' transfer on an I2C bus. + * + * @bus: #I2CBus to be used + * @address: address of the slave + * + * Return: 0 on success, -1 on error + */ +int i2c_start_send_async(I2CBus *bus, uint8_t address); + void i2c_end_transfer(I2CBus *bus); void i2c_nack(I2CBus *bus); +void i2c_ack(I2CBus *bus); +void i2c_bus_master(I2CBus *bus, QEMUBH *bh); +void i2c_bus_release(I2CBus *bus); int i2c_send(I2CBus *bus, uint8_t data); +int i2c_send_async(I2CBus *bus, uint8_t data); uint8_t i2c_recv(I2CBus *bus); bool i2c_scan_bus(I2CBus *bus, uint8_t address, bool broadcast, I2CNodeList *current_devs); diff --git a/include/hw/i2c/i2c_mux_pca954x.h b/include/hw/i2c/i2c_mux_pca954x.h index 8aaf9bbc39..3dd25ec983 100644 --- a/include/hw/i2c/i2c_mux_pca954x.h +++ b/include/hw/i2c/i2c_mux_pca954x.h @@ -1,5 +1,5 @@ -#ifndef QEMU_I2C_MUX_PCA954X -#define QEMU_I2C_MUX_PCA954X +#ifndef QEMU_I2C_MUX_PCA954X_H +#define QEMU_I2C_MUX_PCA954X_H #include "hw/i2c/i2c.h" diff --git a/include/hw/i2c/pmbus_device.h b/include/hw/i2c/pmbus_device.h index 62bd38c83f..93f5d57c9d 100644 --- a/include/hw/i2c/pmbus_device.h +++ b/include/hw/i2c/pmbus_device.h @@ -43,6 +43,7 @@ enum pmbus_registers { PMBUS_VOUT_DROOP = 0x28, /* R/W word */ PMBUS_VOUT_SCALE_LOOP = 0x29, /* R/W word */ PMBUS_VOUT_SCALE_MONITOR = 0x2A, /* R/W word */ + PMBUS_VOUT_MIN = 0x2B, /* R/W word */ PMBUS_COEFFICIENTS = 0x30, /* Read-only block 5 bytes */ PMBUS_POUT_MAX = 0x31, /* R/W word */ PMBUS_MAX_DUTY = 0x32, /* R/W word */ @@ -154,6 +155,7 @@ enum pmbus_registers { PMBUS_MFR_MAX_TEMP_1 = 0xC0, /* R/W word */ PMBUS_MFR_MAX_TEMP_2 = 0xC1, /* R/W word */ PMBUS_MFR_MAX_TEMP_3 = 0xC2, /* R/W word */ + PMBUS_IDLE_STATE = 0xFF, }; /* STATUS_WORD */ @@ -227,6 +229,8 @@ enum pmbus_registers { #define PB_MAX_PAGES 0x1F #define PB_ALL_PAGES 0xFF +#define PMBUS_ERR_BYTE 0xFF + #define TYPE_PMBUS_DEVICE "pmbus-device" OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass, PMBUS_DEVICE) @@ -255,6 +259,7 @@ OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass, #define PB_HAS_TEMP3 BIT_ULL(42) #define PB_HAS_TEMP_RATING BIT_ULL(43) #define PB_HAS_MFR_INFO BIT_ULL(50) +#define PB_HAS_STATUS_MFR_SPECIFIC BIT_ULL(51) struct PMBusDeviceClass { SMBusDeviceClass parent_class; @@ -295,6 +300,7 @@ typedef struct PMBusPage { uint16_t vout_droop; /* R/W word */ uint16_t vout_scale_loop; /* R/W word */ uint16_t vout_scale_monitor; /* R/W word */ + uint16_t vout_min; /* R/W word */ uint8_t coefficients[5]; /* Read-only block 5 bytes */ uint16_t pout_max; /* R/W word */ uint16_t max_duty; /* R/W word */ @@ -443,7 +449,7 @@ typedef struct PMBusCoefficients { * * Y = (m * x - b) * 10^R * - * @return uint32_t + * @return uint16_t */ uint16_t pmbus_data2direct_mode(PMBusCoefficients c, uint32_t value); @@ -456,6 +462,24 @@ uint16_t pmbus_data2direct_mode(PMBusCoefficients c, uint32_t value); */ uint32_t pmbus_direct_mode2data(PMBusCoefficients c, uint16_t value); +/** + * Convert sensor values to linear mode format + * + * L = D * 2^(-e) + * + * @return uint16 + */ +uint16_t pmbus_data2linear_mode(uint16_t value, int exp); + +/** + * Convert linear mode formatted data into sensor reading + * + * D = L * 2^e + * + * @return uint16 + */ +uint16_t pmbus_linear_mode2data(uint16_t value, int exp); + /** * @brief Send a block of data over PMBus * Assumes that the bytes in the block are already ordered correctly, @@ -504,6 +528,12 @@ int pmbus_page_config(PMBusDevice *pmdev, uint8_t page_index, uint64_t flags); */ void pmbus_check_limits(PMBusDevice *pmdev); +/** + * Enter an idle state where only the PMBUS_ERR_BYTE will be returned + * indefinitely until a new command is issued. + */ +void pmbus_idle(PMBusDevice *pmdev); + extern const VMStateDescription vmstate_pmbus_device; #define VMSTATE_PMBUS_DEVICE(_field, _state) { \ diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h new file mode 100644 index 0000000000..846c726085 --- /dev/null +++ b/include/hw/i386/hostmem-epc.h @@ -0,0 +1,28 @@ +/* + * SGX EPC backend + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_HOSTMEM_EPC_H +#define QEMU_HOSTMEM_EPC_H + +#include "sysemu/hostmem.h" + +#define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc" + +#define MEMORY_BACKEND_EPC(obj) \ + OBJECT_CHECK(HostMemoryBackendEpc, (obj), TYPE_MEMORY_BACKEND_EPC) + +typedef struct HostMemoryBackendEpc HostMemoryBackendEpc; + +struct HostMemoryBackendEpc { + HostMemoryBackend parent_obj; +}; + +#endif diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index 41783ee46d..46d973e629 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -58,7 +58,6 @@ typedef struct VTDContextEntry VTDContextEntry; typedef struct VTDContextCacheEntry VTDContextCacheEntry; typedef struct VTDAddressSpace VTDAddressSpace; typedef struct VTDIOTLBEntry VTDIOTLBEntry; -typedef struct VTDBus VTDBus; typedef union VTD_IR_TableEntry VTD_IR_TableEntry; typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress; typedef struct VTDPASIDDirEntry VTDPASIDDirEntry; @@ -98,11 +97,13 @@ struct VTDPASIDEntry { struct VTDAddressSpace { PCIBus *bus; uint8_t devfn; + uint32_t pasid; AddressSpace as; IOMMUMemoryRegion iommu; MemoryRegion root; /* The root container of the device */ MemoryRegion nodmar; /* The alias of shared nodmar MR */ MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */ + MemoryRegion iommu_ir_fault; /* Interrupt region for catching fault */ IntelIOMMUState *iommu_state; VTDContextCacheEntry context_cache_entry; QLIST_ENTRY(VTDAddressSpace) next; @@ -111,15 +112,10 @@ struct VTDAddressSpace { IOVATree *iova_tree; /* Traces mapped IOVA ranges */ }; -struct VTDBus { - PCIBus* bus; /* A reference to the bus to provide translation for */ - /* A table of VTDAddressSpace objects indexed by devfn */ - VTDAddressSpace *dev_as[]; -}; - struct VTDIOTLBEntry { uint64_t gfn; uint16_t domain_id; + uint32_t pasid; uint64_t slpte; uint64_t mask; uint8_t access_flags; @@ -145,7 +141,7 @@ enum { /* Interrupt Remapping Table Entry Definition */ union VTD_IR_TableEntry { struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t __reserved_1:8; /* Reserved 1 */ uint32_t vector:8; /* Interrupt Vector */ uint32_t irte_mode:1; /* IRTE Mode */ @@ -172,7 +168,7 @@ union VTD_IR_TableEntry { #endif uint32_t dest_id; /* Destination ID */ uint16_t source_id; /* Source-ID */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint64_t __reserved_2:44; /* Reserved 2 */ uint64_t sid_vtype:2; /* Source-ID Validation Type */ uint64_t sid_q:2; /* Source-ID Qualifier */ @@ -191,7 +187,7 @@ union VTD_IR_TableEntry { /* Programming format for MSI/MSI-X addresses */ union VTD_IR_MSIAddress { struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t __head:12; /* Should always be: 0x0fee */ uint32_t index_l:15; /* Interrupt index bit 14-0 */ uint32_t int_mode:1; /* Interrupt format */ @@ -228,6 +224,7 @@ struct IntelIOMMUState { bool caching_mode; /* RO - is cap CM enabled? */ bool scalable_mode; /* RO - is Scalable Mode supported? */ + bool snoop_control; /* RO - is SNP filed supported? */ dma_addr_t root; /* Current root table pointer */ bool root_scalable; /* Type of root table (scalable or not) */ @@ -252,8 +249,8 @@ struct IntelIOMMUState { uint32_t context_cache_gen; /* Should be in [1,MAX] */ GHashTable *iotlb; /* IOTLB */ - GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */ - VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */ + GHashTable *vtd_address_spaces; /* VTD address spaces */ + VTDAddressSpace *vtd_as_cache[VTD_PCI_BUS_MAX]; /* VTD address space cache */ /* list of registered notifiers */ QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers; @@ -266,6 +263,8 @@ struct IntelIOMMUState { bool buggy_eim; /* Force buggy EIM unless eim=off */ uint8_t aw_bits; /* Host/IOVA address width (in bits) */ bool dma_drain; /* Whether DMA r/w draining enabled */ + bool dma_translation; /* Whether DMA translation supported */ + bool pasid; /* Whether to support PASID */ /* * Protects IOMMU states in general. Currently it protects the @@ -277,6 +276,7 @@ struct IntelIOMMUState { /* Find the VTD Address space associated with the given bus pointer, * create a new one if none exists */ -VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn); +VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, + int devfn, unsigned int pasid); #endif diff --git a/include/hw/i386/ioapic_internal.h b/include/hw/i386/ioapic_internal.h index 021e715f11..9880443cc7 100644 --- a/include/hw/i386/ioapic_internal.h +++ b/include/hw/i386/ioapic_internal.h @@ -112,7 +112,6 @@ struct IOAPICCommonState { void ioapic_reset_common(DeviceState *dev); -void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s); void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level); #endif /* QEMU_IOAPIC_INTERNAL_H */ diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h index f25f837441..fad97a891d 100644 --- a/include/hw/i386/microvm.h +++ b/include/hw/i386/microvm.h @@ -18,7 +18,6 @@ #ifndef HW_I386_MICROVM_H #define HW_I386_MICROVM_H -#include "qemu-common.h" #include "exec/hwaddr.h" #include "qemu/notify.h" @@ -68,8 +67,6 @@ #define PCIE_ECAM_SIZE 0x10000000 /* Machine type options */ -#define MICROVM_MACHINE_PIT "pit" -#define MICROVM_MACHINE_PIC "pic" #define MICROVM_MACHINE_RTC "rtc" #define MICROVM_MACHINE_PCIE "pcie" #define MICROVM_MACHINE_IOAPIC2 "ioapic2" @@ -87,8 +84,6 @@ struct MicrovmMachineState { X86MachineState parent; /* Machine type options */ - OnOffAuto pic; - OnOffAuto pit; OnOffAuto rtc; OnOffAuto pcie; OnOffAuto ioapic2; @@ -104,6 +99,10 @@ struct MicrovmMachineState { Notifier machine_done; Notifier powerdown_req; struct GPEXConfig gpex; + + /* device tree */ + void *fdt; + uint32_t ioapic_phandle[2]; }; #define TYPE_MICROVM_MACHINE MACHINE_TYPE_NAME("microvm") diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 88dffe7517..0c76e82626 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -12,6 +12,9 @@ #include "hw/acpi/acpi_dev_interface.h" #include "hw/hotplug.h" #include "qom/object.h" +#include "hw/i386/sgx-epc.h" +#include "hw/firmware/smbios.h" +#include "hw/cxl/cxl.h" #define HPET_INTCAP "hpet-intcap" @@ -34,25 +37,26 @@ typedef struct PCMachineState { I2CBus *smbus; PFlashCFI01 *flash[2]; ISADevice *pcspk; + DeviceState *iommu; /* Configuration options: */ uint64_t max_ram_below_4g; OnOffAuto vmport; + SmbiosEntryPointType smbios_entry_point_type; bool acpi_build_enabled; bool smbus_enabled; bool sata_enabled; - bool pit_enabled; bool hpet_enabled; + bool i8042_enabled; bool default_bus_bypass_iommu; uint64_t max_fw_size; - /* NUMA information: */ - uint64_t numa_nodes; - uint64_t *node_mem; - /* ACPI Memory hotplug IO base address */ hwaddr memhp_io_base; + + SGXEPCState sgx_epc; + CXLState cxl_devices_state; } PCMachineState; #define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device" @@ -61,8 +65,10 @@ typedef struct PCMachineState { #define PC_MACHINE_VMPORT "vmport" #define PC_MACHINE_SMBUS "smbus" #define PC_MACHINE_SATA "sata" -#define PC_MACHINE_PIT "pit" +#define PC_MACHINE_I8042 "i8042" #define PC_MACHINE_MAX_FW_SIZE "max-fw-size" +#define PC_MACHINE_SMBIOS_EP "smbios-entry-point-type" + /** * PCMachineClass: * @@ -100,7 +106,6 @@ struct PCMachineClass { bool rsdp_in_ram; int legacy_acpi_table_size; unsigned acpi_data_size; - bool do_not_add_smb_acpi; int pci_root_uid; /* SMBIOS compat: */ @@ -113,13 +118,11 @@ struct PCMachineClass { bool has_reserved_memory; bool enforce_aligned_dimm; bool broken_reserved_end; + bool enforce_amd_1tb_hole; /* generate legacy CPU hotplug AML */ bool legacy_cpu_hotplug; - /* use DMA capable linuxboot option rom */ - bool linuxboot_dma_enabled; - /* use PVH to load kernels that support this feature */ bool pvh_enabled; @@ -157,7 +160,8 @@ void xen_load_linux(PCMachineState *pcms); void pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, - MemoryRegion **ram_memory); + MemoryRegion **ram_memory, + uint64_t pci_hole64_size); uint64_t pc_pci_hole64_start(void); DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus); void pc_basic_device_init(struct PCMachineState *pcms, @@ -165,19 +169,13 @@ void pc_basic_device_init(struct PCMachineState *pcms, ISADevice **rtc_state, bool create_fdctrl, uint32_t hpet_irqs); -void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd); void pc_cmos_init(PCMachineState *pcms, BusState *ide0, BusState *ide1, ISADevice *s); void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus); -void pc_pci_device_init(PCIBus *pci_bus); - -typedef void (*cpu_set_smm_t)(int smm, void *arg); void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs); -ISADevice *pc_find_fdc0(void); - /* port92.c */ #define PORT92_A20_LINE "a20" @@ -191,10 +189,25 @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, int *data_len); void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size); - -/* acpi-build.c */ +/* hw/i386/acpi-common.c */ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled); + +/* sgx.c */ +void pc_machine_init_sgx_epc(PCMachineState *pcms); + +extern GlobalProperty pc_compat_7_1[]; +extern const size_t pc_compat_7_1_len; + +extern GlobalProperty pc_compat_7_0[]; +extern const size_t pc_compat_7_0_len; + +extern GlobalProperty pc_compat_6_2[]; +extern const size_t pc_compat_6_2_len; + +extern GlobalProperty pc_compat_6_1[]; +extern const size_t pc_compat_6_1_len; extern GlobalProperty pc_compat_6_0[]; extern const size_t pc_compat_6_0_len; @@ -274,14 +287,6 @@ extern const size_t pc_compat_1_5_len; extern GlobalProperty pc_compat_1_4[]; extern const size_t pc_compat_1_4_len; -/* Helper for setting model-id for CPU models that changed model-id - * depending on QEMU versions up to QEMU 2.4. - */ -#define PC_CPU_MODEL_IDS(v) \ - { "qemu32-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ - { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ - { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, - #define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \ static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \ { \ @@ -300,5 +305,4 @@ extern const size_t pc_compat_1_4_len; } \ type_init(pc_machine_init_##suffix) -extern void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id); #endif diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h new file mode 100644 index 0000000000..581fac389a --- /dev/null +++ b/include/hw/i386/sgx-epc.h @@ -0,0 +1,70 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_SGX_EPC_H +#define QEMU_SGX_EPC_H + +#include "hw/i386/hostmem-epc.h" + +#define TYPE_SGX_EPC "sgx-epc" +#define SGX_EPC(obj) \ + OBJECT_CHECK(SGXEPCDevice, (obj), TYPE_SGX_EPC) +#define SGX_EPC_CLASS(oc) \ + OBJECT_CLASS_CHECK(SGXEPCDeviceClass, (oc), TYPE_SGX_EPC) +#define SGX_EPC_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SGXEPCDeviceClass, (obj), TYPE_SGX_EPC) + +#define SGX_EPC_ADDR_PROP "addr" +#define SGX_EPC_SIZE_PROP "size" +#define SGX_EPC_MEMDEV_PROP "memdev" +#define SGX_EPC_NUMA_NODE_PROP "node" + +/** + * SGXEPCDevice: + * @addr: starting guest physical address, where @SGXEPCDevice is mapped. + * Default value: 0, means that address is auto-allocated. + * @hostmem: host memory backend providing memory for @SGXEPCDevice + */ +typedef struct SGXEPCDevice { + /* private */ + DeviceState parent_obj; + + /* public */ + uint64_t addr; + uint32_t node; + HostMemoryBackendEpc *hostmem; +} SGXEPCDevice; + +/* + * @base: address in guest physical address space where EPC regions start + * @mr: address space container for memory devices + */ +typedef struct SGXEPCState { + uint64_t base; + uint64_t size; + + MemoryRegion mr; + + struct SGXEPCDevice **sections; + int nr_sections; +} SGXEPCState; + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size); +void sgx_epc_build_srat(GArray *table_data); + +static inline uint64_t sgx_epc_above_4g_end(SGXEPCState *sgx_epc) +{ + assert(sgx_epc != NULL && sgx_epc->base >= 0x100000000ULL); + + return sgx_epc->base + sgx_epc->size; +} + +#endif diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h index 9de92d33a1..7637edb430 100644 --- a/include/hw/i386/x86-iommu.h +++ b/include/hw/i386/x86-iommu.h @@ -33,12 +33,6 @@ OBJECT_DECLARE_TYPE(X86IOMMUState, X86IOMMUClass, X86_IOMMU_DEVICE) typedef struct X86IOMMUIrq X86IOMMUIrq; typedef struct X86IOMMU_MSIMessage X86IOMMU_MSIMessage; -typedef enum IommuType { - TYPE_INTEL, - TYPE_AMD, - TYPE_NONE -} IommuType; - struct X86IOMMUClass { SysBusDeviceClass parent; /* Intel/AMD specific realize() hook */ @@ -71,7 +65,6 @@ struct X86IOMMUState { OnOffAuto intr_supported; /* Whether vIOMMU supports IR */ bool dt_supported; /* Whether vIOMMU supports DT */ bool pt_supported; /* Whether vIOMMU supports pass-through */ - IommuType type; /* IOMMU type - AMD/Intel */ QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */ }; @@ -94,7 +87,7 @@ struct X86IOMMUIrq { struct X86IOMMU_MSIMessage { union { struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t __addr_head:12; /* 0xfee */ uint32_t dest:8; uint32_t __reserved:8; @@ -115,7 +108,7 @@ struct X86IOMMU_MSIMessage { }; union { struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint16_t trigger_mode:1; uint16_t level:1; uint16_t __resved:3; @@ -140,11 +133,6 @@ struct X86IOMMU_MSIMessage { */ X86IOMMUState *x86_iommu_get_default(void); -/* - * x86_iommu_get_type - get IOMMU type - */ -IommuType x86_iommu_get_type(void); - /** * x86_iommu_iec_register_notifier - register IEC (Interrupt Entry * Cache) notifiers diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index 6e9244a82c..df82c5fd42 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -17,7 +17,6 @@ #ifndef HW_I386_X86_H #define HW_I386_X86_H -#include "qemu-common.h" #include "exec/hwaddr.h" #include "qemu/notify.h" @@ -36,8 +35,8 @@ struct X86MachineClass { /* TSC rate migration: */ bool save_tsc_khz; - /* Enables contiguous-apic-ID mode */ - bool compat_apic_id_mode; + /* use DMA capable linuxboot option rom */ + bool fwcfg_dma_enabled; }; struct X86MachineState { @@ -57,14 +56,20 @@ struct X86MachineState { /* RAM information (sizes, addresses, configuration): */ ram_addr_t below_4g_mem_size, above_4g_mem_size; + /* Start address of the initial RAM above 4G */ + uint64_t above_4g_mem_start; + /* CPU and apic information: */ bool apic_xrupt_override; unsigned pci_irq_mask; unsigned apic_id_limit; uint16_t boot_cpus; + SgxEPCList *sgx_epc_list; OnOffAuto smm; OnOffAuto acpi; + OnOffAuto pit; + OnOffAuto pic; char *oem_id; char *oem_table_id; @@ -84,6 +89,8 @@ struct X86MachineState { #define X86_MACHINE_SMM "smm" #define X86_MACHINE_ACPI "acpi" +#define X86_MACHINE_PIT "pit" +#define X86_MACHINE_PIC "pic" #define X86_MACHINE_OEM_ID "x-oem-id" #define X86_MACHINE_OEM_TABLE_ID "x-oem-table-id" #define X86_MACHINE_BUS_LOCK_RATELIMIT "bus-lock-ratelimit" @@ -119,8 +126,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool linuxboot_dma_enabled); + bool pvh_enabled); bool x86_machine_is_smm_enabled(const X86MachineState *x86ms); bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms); @@ -141,4 +147,7 @@ void gsi_handler(void *opaque, int n, int level); void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name); DeviceState *ioapic_init_secondary(GSIState *gsi_state); +/* pc_sysfw.c */ +void x86_firmware_configure(void *ptr, int size); + #endif diff --git a/include/hw/ide.h b/include/hw/ide.h index c5ce5da4f4..60f1f4f714 100644 --- a/include/hw/ide.h +++ b/include/hw/ide.h @@ -8,9 +8,6 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, DriveInfo *hd0, DriveInfo *hd1); -/* ide-pci.c */ -int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux); - /* ide-mmio.c */ void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1); diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h index d00b02c920..9e9336a96c 100644 --- a/include/hw/ide/internal.h +++ b/include/hw/ide/internal.h @@ -375,6 +375,7 @@ struct IDEState { uint8_t unit; /* ide config */ IDEDriveKind drive_kind; + int drive_heads, drive_sectors; int cylinders, heads, sectors, chs_trans; int64_t nb_sectors; int mult_sectors; @@ -401,6 +402,8 @@ struct IDEState { uint8_t select; uint8_t status; + bool reset_reverts; + /* set for lba48 access */ uint8_t lba48; BlockBackend *blk; @@ -649,8 +652,8 @@ void ide_atapi_cmd(IDEState *s); void ide_atapi_cmd_reply_end(IDEState *s); /* hw/ide/qdev.c */ -void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, - int bus_id, int max_units); +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units); IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive); int ide_handle_rw_error(IDEState *s, int error, int op); diff --git a/include/hw/ide/piix.h b/include/hw/ide/piix.h new file mode 100644 index 0000000000..ef3ef3d62d --- /dev/null +++ b/include/hw/ide/piix.h @@ -0,0 +1,7 @@ +#ifndef HW_IDE_PIIX_H +#define HW_IDE_PIIX_H + +#define TYPE_PIIX3_IDE "piix3-ide" +#define TYPE_PIIX4_IDE "piix4-ide" + +#endif /* HW_IDE_PIIX_H */ diff --git a/include/hw/input/i8042.h b/include/hw/input/i8042.h index 1d90432dae..9fb3f8d787 100644 --- a/include/hw/input/i8042.h +++ b/include/hw/input/i8042.h @@ -9,18 +9,101 @@ #define HW_INPUT_I8042_H #include "hw/isa/isa.h" +#include "hw/sysbus.h" +#include "hw/input/ps2.h" #include "qom/object.h" +#define I8042_KBD_IRQ 0 +#define I8042_MOUSE_IRQ 1 + +typedef struct KBDState { + uint8_t write_cmd; /* if non zero, write data to port 60 is expected */ + uint8_t status; + uint8_t mode; + uint8_t outport; + uint32_t migration_flags; + uint32_t obsrc; + bool outport_present; + bool extended_state; + bool extended_state_loaded; + /* Bitmask of devices with data available. */ + uint8_t pending; + uint8_t obdata; + uint8_t cbdata; + uint8_t pending_tmp; + PS2KbdState ps2kbd; + PS2MouseState ps2mouse; + QEMUTimer *throttle_timer; + + qemu_irq irqs[2]; + qemu_irq a20_out; + hwaddr mask; +} KBDState; + +/* + * QEMU interface: + * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2 + * keyboard device has asserted its irq + * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2 + * mouse device has asserted its irq + * + Named GPIO output "a20": A20 line for x86 PCs + * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1) + */ + #define TYPE_I8042 "i8042" OBJECT_DECLARE_SIMPLE_TYPE(ISAKBDState, I8042) +struct ISAKBDState { + ISADevice parent_obj; + + KBDState kbd; + bool kbd_throttle; + MemoryRegion io[2]; + uint8_t kbd_irq; + uint8_t mouse_irq; +}; + +/* + * QEMU interface: + * + sysbus MMIO region 0: MemoryRegion defining the command/status/data + * registers (access determined by mask property and access type) + * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2 + * keyboard device has asserted its irq + * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2 + * mouse device has asserted its irq + * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1) + */ + +#define TYPE_I8042_MMIO "i8042-mmio" +OBJECT_DECLARE_SIMPLE_TYPE(MMIOKBDState, I8042_MMIO) + +struct MMIOKBDState { + SysBusDevice parent_obj; + + KBDState kbd; + uint32_t size; + MemoryRegion region; +}; + #define I8042_A20_LINE "a20" -void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq, - MemoryRegion *region, ram_addr_t size, - hwaddr mask); void i8042_isa_mouse_fake_event(ISAKBDState *isa); void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out); +static inline bool i8042_present(void) +{ + bool amb = false; + return object_resolve_path_type("", TYPE_I8042, &amb) || amb; +} + +/* + * ACPI v2, Table 5-10 - Fixed ACPI Description Table Boot Architecture + * Flags, bit offset 1 - 8042. + */ +static inline uint16_t iapc_boot_arch_8042(void) +{ + return i8042_present() ? 0x1 << 1 : 0x0 ; +} + #endif /* HW_INPUT_I8042_H */ diff --git a/include/hw/input/lasips2.h b/include/hw/input/lasips2.h index 0cd7b59064..01911c50f9 100644 --- a/include/hw/input/lasips2.h +++ b/include/hw/input/lasips2.h @@ -4,13 +4,77 @@ * Copyright (c) 2019 Sven Schnelle * */ + +/* + * QEMU interface: + * + sysbus MMIO region 0: MemoryRegion defining the LASI PS2 keyboard + * registers + * + sysbus MMIO region 1: MemoryRegion defining the LASI PS2 mouse + * registers + * + sysbus IRQ 0: LASI PS2 output irq + * + Named GPIO input "lasips2-port-input-irq[0..1]": set to 1 if the downstream + * LASIPS2Port has asserted its irq + */ + #ifndef HW_INPUT_LASIPS2_H #define HW_INPUT_LASIPS2_H #include "exec/hwaddr.h" +#include "hw/sysbus.h" +#include "hw/input/ps2.h" + +#define TYPE_LASIPS2_PORT "lasips2-port" +OBJECT_DECLARE_TYPE(LASIPS2Port, LASIPS2PortDeviceClass, LASIPS2_PORT) + +struct LASIPS2PortDeviceClass { + DeviceClass parent; + + DeviceRealize parent_realize; +}; + +typedef struct LASIPS2State LASIPS2State; + +struct LASIPS2Port { + DeviceState parent_obj; + + LASIPS2State *lasips2; + MemoryRegion reg; + PS2State *ps2dev; + uint8_t id; + uint8_t control; + uint8_t buf; + bool loopback_rbne; + qemu_irq irq; +}; + +#define TYPE_LASIPS2_KBD_PORT "lasips2-kbd-port" +OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2KbdPort, LASIPS2_KBD_PORT) + +struct LASIPS2KbdPort { + LASIPS2Port parent_obj; + + PS2KbdState kbd; +}; + +#define TYPE_LASIPS2_MOUSE_PORT "lasips2-mouse-port" +OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2MousePort, LASIPS2_MOUSE_PORT) + +struct LASIPS2MousePort { + LASIPS2Port parent_obj; + + PS2MouseState mouse; +}; + +struct LASIPS2State { + SysBusDevice parent_obj; + + LASIPS2KbdPort kbd_port; + LASIPS2MousePort mouse_port; + uint8_t int_status; + qemu_irq irq; +}; #define TYPE_LASIPS2 "lasips2" - -void lasips2_init(MemoryRegion *address_space, hwaddr base, qemu_irq irq); +OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2State, LASIPS2) #endif /* HW_INPUT_LASIPS2_H */ diff --git a/include/hw/input/lm832x.h b/include/hw/input/lm832x.h index 2a58ccf891..e0e5d5ef20 100644 --- a/include/hw/input/lm832x.h +++ b/include/hw/input/lm832x.h @@ -18,8 +18,8 @@ * with this program; if not, see . */ -#ifndef HW_INPUT_LM832X -#define HW_INPUT_LM832X +#ifndef HW_INPUT_LM832X_H +#define HW_INPUT_LM832X_H #define TYPE_LM8323 "lm8323" diff --git a/include/hw/input/pl050.h b/include/hw/input/pl050.h new file mode 100644 index 0000000000..89ec4fafc9 --- /dev/null +++ b/include/hw/input/pl050.h @@ -0,0 +1,59 @@ +/* + * Arm PrimeCell PL050 Keyboard / Mouse Interface + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#ifndef HW_PL050_H +#define HW_PL050_H + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/input/ps2.h" +#include "hw/irq.h" + +struct PL050DeviceClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; +}; + +#define TYPE_PL050 "pl050" +OBJECT_DECLARE_TYPE(PL050State, PL050DeviceClass, PL050) + +struct PL050State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + PS2State *ps2dev; + uint32_t cr; + uint32_t clk; + uint32_t last; + int pending; + qemu_irq irq; + bool is_mouse; +}; + +#define TYPE_PL050_KBD_DEVICE "pl050_keyboard" +OBJECT_DECLARE_SIMPLE_TYPE(PL050KbdState, PL050_KBD_DEVICE) + +struct PL050KbdState { + PL050State parent_obj; + + PS2KbdState kbd; +}; + +#define TYPE_PL050_MOUSE_DEVICE "pl050_mouse" +OBJECT_DECLARE_SIMPLE_TYPE(PL050MouseState, PL050_MOUSE_DEVICE) + +struct PL050MouseState { + PL050State parent_obj; + + PS2MouseState mouse; +}; + +#endif diff --git a/include/hw/input/ps2.h b/include/hw/input/ps2.h index 35d983897a..ff777582cd 100644 --- a/include/hw/input/ps2.h +++ b/include/hw/input/ps2.h @@ -25,28 +25,89 @@ #ifndef HW_PS2_H #define HW_PS2_H +#include "hw/sysbus.h" + #define PS2_MOUSE_BUTTON_LEFT 0x01 #define PS2_MOUSE_BUTTON_RIGHT 0x02 #define PS2_MOUSE_BUTTON_MIDDLE 0x04 #define PS2_MOUSE_BUTTON_SIDE 0x08 #define PS2_MOUSE_BUTTON_EXTRA 0x10 -typedef struct PS2State PS2State; +struct PS2DeviceClass { + SysBusDeviceClass parent_class; + + DeviceReset parent_reset; +}; + +/* + * PS/2 buffer size. Keep 256 bytes for compatibility with + * older QEMU versions. + */ +#define PS2_BUFFER_SIZE 256 + +typedef struct { + uint8_t data[PS2_BUFFER_SIZE]; + int rptr, wptr, cwptr, count; +} PS2Queue; + +/* Output IRQ */ +#define PS2_DEVICE_IRQ 0 + +struct PS2State { + SysBusDevice parent_obj; + + PS2Queue queue; + int32_t write_cmd; + qemu_irq irq; +}; + +#define TYPE_PS2_DEVICE "ps2-device" +OBJECT_DECLARE_TYPE(PS2State, PS2DeviceClass, PS2_DEVICE) + +struct PS2KbdState { + PS2State parent_obj; + + int scan_enabled; + int translate; + int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */ + int ledstate; + bool need_high_bit; + unsigned int modifiers; /* bitmask of MOD_* constants above */ +}; + +#define TYPE_PS2_KBD_DEVICE "ps2-kbd" +OBJECT_DECLARE_SIMPLE_TYPE(PS2KbdState, PS2_KBD_DEVICE) + +struct PS2MouseState { + PS2State parent_obj; + + uint8_t mouse_status; + uint8_t mouse_resolution; + uint8_t mouse_sample_rate; + uint8_t mouse_wrap; + uint8_t mouse_type; /* 0 = PS2, 3 = IMPS/2, 4 = IMEX */ + uint8_t mouse_detect_state; + int mouse_dx; /* current values, needed for 'poll' mode */ + int mouse_dy; + int mouse_dz; + int mouse_dw; + uint8_t mouse_buttons; +}; + +#define TYPE_PS2_MOUSE_DEVICE "ps2-mouse" +OBJECT_DECLARE_SIMPLE_TYPE(PS2MouseState, PS2_MOUSE_DEVICE) /* ps2.c */ -void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg); -void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg); -void ps2_write_mouse(void *, int val); -void ps2_write_keyboard(void *, int val); +void ps2_write_mouse(PS2MouseState *s, int val); +void ps2_write_keyboard(PS2KbdState *s, int val); uint32_t ps2_read_data(PS2State *s); void ps2_queue_noirq(PS2State *s, int b); -void ps2_raise_irq(PS2State *s); void ps2_queue(PS2State *s, int b); void ps2_queue_2(PS2State *s, int b1, int b2); void ps2_queue_3(PS2State *s, int b1, int b2, int b3); void ps2_queue_4(PS2State *s, int b1, int b2, int b3, int b4); -void ps2_keyboard_set_translation(void *opaque, int mode); -void ps2_mouse_fake_event(void *opaque); +void ps2_keyboard_set_translation(PS2KbdState *s, int mode); +void ps2_mouse_fake_event(PS2MouseState *s); int ps2_queue_empty(PS2State *s); #endif /* HW_PS2_H */ diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index 91491a2f66..ab5182a28a 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -36,7 +36,14 @@ #define GICV3_MAXIRQ 1020 #define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL) +#define GICV3_LPI_INTID_START 8192 + +/* + * The redistributor in GICv3 has two 64KB frames per CPU; in + * GICv4 it has four 64KB frames per CPU. + */ #define GICV3_REDIST_SIZE 0x20000 +#define GICV4_REDIST_SIZE 0x40000 /* Number of SGI target-list bits */ #define GICV3_TARGETLIST_BITS 16 @@ -44,11 +51,6 @@ /* Maximum number of list registers (architectural limit) */ #define GICV3_LR_MAX 16 -/* Minimum BPR for Secure, or when security not enabled */ -#define GIC_MIN_BPR 0 -/* Minimum BPR for Nonsecure when security is enabled */ -#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1) - /* For some distributor fields we want to model the array of 32-bit * register values which hold various bitmaps corresponding to enabled, * pending, etc bits. These macros and functions facilitate that; the @@ -172,6 +174,9 @@ struct GICv3CPUState { uint32_t gicr_igrpmodr0; uint32_t gicr_nsacr; uint8_t gicr_ipriorityr[GIC_INTERNAL]; + /* VLPI_base page registers */ + uint64_t gicr_vpropbaser; + uint64_t gicr_vpendbaser; /* CPU interface */ uint64_t icc_sre_el1; @@ -196,36 +201,63 @@ struct GICv3CPUState { int num_list_regs; int vpribits; /* number of virtual priority bits */ int vprebits; /* number of virtual preemption bits */ + int pribits; /* number of physical priority bits */ + int prebits; /* number of physical preemption bits */ /* Current highest priority pending interrupt for this CPU. * This is cached information that can be recalculated from the * real state above; it doesn't need to be migrated. */ PendingIrq hppi; + + /* + * Cached information recalculated from LPI tables + * in guest memory + */ + PendingIrq hpplpi; + + /* Cached information recalculated from vLPI tables in guest memory */ + PendingIrq hppvlpi; + /* This is temporary working state, to avoid a malloc in gicv3_update() */ bool seenbetter; }; +/* + * The redistributor pages might be split into more than one region + * on some machine types if there are many CPUs. + */ +typedef struct GICv3RedistRegion { + GICv3State *gic; + MemoryRegion iomem; + uint32_t cpuidx; /* index of first CPU this region covers */ +} GICv3RedistRegion; + struct GICv3State { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ MemoryRegion iomem_dist; /* Distributor */ - MemoryRegion *iomem_redist; /* Redistributor Regions */ + GICv3RedistRegion *redist_regions; /* Redistributor Regions */ uint32_t *redist_region_count; /* redistributor count within each region */ uint32_t nb_redist_regions; /* number of redist regions */ uint32_t num_cpu; uint32_t num_irq; uint32_t revision; + bool lpi_enable; bool security_extn; + bool force_8bit_prio; bool irq_reset_nonsecure; bool gicd_no_migration_shift_bug; int dev_fd; /* kvm device fd if backed by kvm vgic support */ Error *migration_blocker; + MemoryRegion *dma; + AddressSpace dma_as; + /* Distributor */ /* for a GIC with the security extensions the NS banked version of this @@ -249,6 +281,8 @@ struct GICv3State { uint32_t gicd_nsacr[DIV_ROUND_UP(GICV3_MAXIRQ, 16)]; GICv3CPUState *cpu; + /* List of all ITSes connected to this GIC */ + GPtrArray *itslist; }; #define GICV3_BITMAP_ACCESSORS(BMP) \ @@ -293,6 +327,6 @@ struct ARMGICv3CommonClass { }; void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, - const MemoryRegionOps *ops, Error **errp); + const MemoryRegionOps *ops); #endif diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h index 5a0952b404..a11a0f6654 100644 --- a/include/hw/intc/arm_gicv3_its_common.h +++ b/include/hw/intc/arm_gicv3_its_common.h @@ -25,17 +25,35 @@ #include "hw/intc/arm_gicv3_common.h" #include "qom/object.h" +#define TYPE_ARM_GICV3_ITS "arm-gicv3-its" + #define ITS_CONTROL_SIZE 0x10000 #define ITS_TRANS_SIZE 0x10000 #define ITS_SIZE (ITS_CONTROL_SIZE + ITS_TRANS_SIZE) #define GITS_CTLR 0x0 #define GITS_IIDR 0x4 +#define GITS_TYPER 0x8 #define GITS_CBASER 0x80 #define GITS_CWRITER 0x88 #define GITS_CREADR 0x90 #define GITS_BASER 0x100 +#define GITS_TRANSLATER 0x0040 + +typedef struct { + bool indirect; + uint16_t entry_sz; + uint32_t page_sz; + uint32_t num_entries; + uint64_t base_addr; +} TableDesc; + +typedef struct { + uint32_t num_entries; + uint64_t base_addr; +} CmdQDesc; + struct GICv3ITSState { SysBusDevice parent_obj; @@ -52,17 +70,42 @@ struct GICv3ITSState { /* Registers */ uint32_t ctlr; uint32_t iidr; + uint64_t typer; uint64_t cbaser; uint64_t cwriter; uint64_t creadr; uint64_t baser[8]; + TableDesc dt; + TableDesc ct; + TableDesc vpet; + CmdQDesc cq; + Error *migration_blocker; }; typedef struct GICv3ITSState GICv3ITSState; -void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops); +void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops, + const MemoryRegionOps *tops); + +/* + * The ITS should call this when it is realized to add itself + * to its GIC's list of connected ITSes. + */ +static inline void gicv3_add_its(GICv3State *s, DeviceState *its) +{ + g_ptr_array_add(s->itslist, its); +} + +/* + * The ITS can use this for operations that must be performed on + * every ITS connected to the same GIC that it is + */ +static inline void gicv3_foreach_its(GICv3State *s, GFunc func, void *opaque) +{ + g_ptr_array_foreach(s->itslist, func, opaque); +} #define TYPE_ARM_GICV3_ITS_COMMON "arm-gicv3-its-common" typedef struct GICv3ITSCommonClass GICv3ITSCommonClass; diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h index 39c71e1593..0180c7b0ca 100644 --- a/include/hw/intc/armv7m_nvic.h +++ b/include/hw/intc/armv7m_nvic.h @@ -80,18 +80,10 @@ struct NVICState { int vectpending_prio; /* group prio of the exeception in vectpending */ MemoryRegion sysregmem; - MemoryRegion sysreg_ns_mem; - MemoryRegion systickmem; - MemoryRegion systick_ns_mem; - MemoryRegion ras_mem; - MemoryRegion container; - MemoryRegion defaultmem; uint32_t num_irq; qemu_irq excpout; qemu_irq sysresetreq; - - SysTickState systick[M_REG_NUM_BANKS]; }; #endif diff --git a/include/hw/intc/exynos4210_combiner.h b/include/hw/intc/exynos4210_combiner.h new file mode 100644 index 0000000000..bd207a7e6e --- /dev/null +++ b/include/hw/intc/exynos4210_combiner.h @@ -0,0 +1,57 @@ +/* + * Samsung exynos4210 Interrupt Combiner + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_INTC_EXYNOS4210_COMBINER_H +#define HW_INTC_EXYNOS4210_COMBINER_H + +#include "hw/sysbus.h" + +/* + * State for each output signal of internal combiner + */ +typedef struct CombinerGroupState { + uint8_t src_mask; /* 1 - source enabled, 0 - disabled */ + uint8_t src_pending; /* Pending source interrupts before masking */ +} CombinerGroupState; + +#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER) + +/* Number of groups and total number of interrupts for the internal combiner */ +#define IIC_NGRP 64 +#define IIC_NIRQ (IIC_NGRP * 8) +#define IIC_REGSET_SIZE 0x41 + +struct Exynos4210CombinerState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + struct CombinerGroupState group[IIC_NGRP]; + uint32_t reg_set[IIC_REGSET_SIZE]; + uint32_t icipsr[2]; + uint32_t external; /* 1 means that this combiner is external */ + + qemu_irq output_irq[IIC_NGRP]; +}; + +#endif diff --git a/include/hw/intc/exynos4210_gic.h b/include/hw/intc/exynos4210_gic.h new file mode 100644 index 0000000000..f64c4069c6 --- /dev/null +++ b/include/hw/intc/exynos4210_gic.h @@ -0,0 +1,43 @@ +/* + * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#ifndef HW_INTC_EXYNOS4210_GIC_H +#define HW_INTC_EXYNOS4210_GIC_H + +#include "hw/sysbus.h" + +#define TYPE_EXYNOS4210_GIC "exynos4210.gic" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC) + +#define EXYNOS4210_GIC_NCPUS 2 + +struct Exynos4210GicState { + SysBusDevice parent_obj; + + MemoryRegion cpu_container; + MemoryRegion dist_container; + MemoryRegion cpu_alias[EXYNOS4210_GIC_NCPUS]; + MemoryRegion dist_alias[EXYNOS4210_GIC_NCPUS]; + uint32_t num_cpu; + DeviceState *gic; +}; + +#endif diff --git a/include/hw/intc/goldfish_pic.h b/include/hw/intc/goldfish_pic.h index ad13ab37fc..e9d552f796 100644 --- a/include/hw/intc/goldfish_pic.h +++ b/include/hw/intc/goldfish_pic.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/include/hw/intc/ibex_plic.h b/include/hw/intc/ibex_plic.h deleted file mode 100644 index 7fc495db99..0000000000 --- a/include/hw/intc/ibex_plic.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * QEMU RISC-V lowRISC Ibex PLIC - * - * Copyright (c) 2020 Western Digital - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef HW_IBEX_PLIC_H -#define HW_IBEX_PLIC_H - -#include "hw/sysbus.h" -#include "qom/object.h" - -#define TYPE_IBEX_PLIC "ibex-plic" -OBJECT_DECLARE_SIMPLE_TYPE(IbexPlicState, IBEX_PLIC) - -struct IbexPlicState { - /*< private >*/ - SysBusDevice parent_obj; - - /*< public >*/ - MemoryRegion mmio; - - uint32_t *pending; - uint32_t *hidden_pending; - uint32_t *claimed; - uint32_t *source; - uint32_t *priority; - uint32_t *enable; - uint32_t threshold; - uint32_t claim; - - /* config */ - uint32_t num_cpus; - uint32_t num_sources; - - uint32_t pending_base; - uint32_t pending_num; - - uint32_t source_base; - uint32_t source_num; - - uint32_t priority_base; - uint32_t priority_num; - - uint32_t enable_base; - uint32_t enable_num; - - uint32_t threshold_base; - - uint32_t claim_base; -}; - -#endif /* HW_IBEX_PLIC_H */ diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h new file mode 100644 index 0000000000..15b8c999f6 --- /dev/null +++ b/include/hw/intc/loongarch_extioi.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch 3A5000 ext interrupt controller definitions + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "hw/sysbus.h" +#include "hw/loongarch/virt.h" + +#ifndef LOONGARCH_EXTIOI_H +#define LOONGARCH_EXTIOI_H + +#define LS3A_INTC_IP 8 +#define EXTIOI_IRQS (256) +#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) +/* map to ipnum per 32 irqs */ +#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) +#define EXTIOI_IRQS_COREMAP_SIZE 256 +#define EXTIOI_IRQS_NODETYPE_COUNT 16 +#define EXTIOI_IRQS_GROUP_COUNT 8 + +#define APIC_OFFSET 0x400 +#define APIC_BASE (0x1000ULL + APIC_OFFSET) + +#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET) +#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET) +#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET) +#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET) +#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET) +#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET) +#define EXTIOI_ISR_START (0x700 - APIC_OFFSET) +#define EXTIOI_ISR_END (0x720 - APIC_OFFSET) +#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET) +#define EXTIOI_COREISR_END (0xB20 - APIC_OFFSET) +#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) +#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) + +#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" +OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI) +struct LoongArchExtIOI { + SysBusDevice parent_obj; + /* hardware state */ + uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; + uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; + uint32_t isr[EXTIOI_IRQS / 32]; + uint32_t coreisr[LOONGARCH_MAX_VCPUS][EXTIOI_IRQS_GROUP_COUNT]; + uint32_t enable[EXTIOI_IRQS / 32]; + uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; + uint32_t coremap[EXTIOI_IRQS / 4]; + uint32_t sw_pending[EXTIOI_IRQS / 32]; + DECLARE_BITMAP(sw_isr[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS); + uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; + uint8_t sw_coremap[EXTIOI_IRQS]; + qemu_irq parent_irq[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP]; + qemu_irq irq[EXTIOI_IRQS]; + MemoryRegion extioi_iocsr_mem[LOONGARCH_MAX_VCPUS]; + MemoryRegion extioi_system_mem; +}; +#endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h new file mode 100644 index 0000000000..0ee48fca55 --- /dev/null +++ b/include/hw/intc/loongarch_ipi.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch ipi interrupt header files + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_IPI_H +#define HW_LOONGARCH_IPI_H + +#include "hw/sysbus.h" + +/* Mainy used by iocsr read and write */ +#define SMP_IPI_MAILBOX 0x1000ULL +#define CORE_STATUS_OFF 0x0 +#define CORE_EN_OFF 0x4 +#define CORE_SET_OFF 0x8 +#define CORE_CLEAR_OFF 0xc +#define CORE_BUF_20 0x20 +#define CORE_BUF_28 0x28 +#define CORE_BUF_30 0x30 +#define CORE_BUF_38 0x38 +#define IOCSR_IPI_SEND 0x40 +#define IOCSR_MAIL_SEND 0x48 +#define IOCSR_ANY_SEND 0x158 + +#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND) +#define MAIL_SEND_OFFSET 0 +#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) + +#define MAX_IPI_CORE_NUM 4 +#define MAX_IPI_MBX_NUM 4 + +#define TYPE_LOONGARCH_IPI "loongarch_ipi" +OBJECT_DECLARE_SIMPLE_TYPE(LoongArchIPI, LOONGARCH_IPI) + +typedef struct IPICore { + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + /* 64bit buf divide into 2 32bit buf */ + uint32_t buf[MAX_IPI_MBX_NUM * 2]; + qemu_irq irq; +} IPICore; + +struct LoongArchIPI { + SysBusDevice parent_obj; + MemoryRegion ipi_iocsr_mem[MAX_IPI_CORE_NUM]; + MemoryRegion ipi64_iocsr_mem[MAX_IPI_CORE_NUM]; +}; + +#endif diff --git a/include/hw/intc/loongarch_pch_msi.h b/include/hw/intc/loongarch_pch_msi.h new file mode 100644 index 0000000000..6d67560dea --- /dev/null +++ b/include/hw/intc/loongarch_pch_msi.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch 7A1000 I/O interrupt controller definitions + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#define TYPE_LOONGARCH_PCH_MSI "loongarch_pch_msi" +OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHMSI, LOONGARCH_PCH_MSI) + +/* Msi irq start start from 64 to 255 */ +#define PCH_MSI_IRQ_START 64 +#define PCH_MSI_IRQ_END 255 +#define PCH_MSI_IRQ_NUM 192 + +struct LoongArchPCHMSI { + SysBusDevice parent_obj; + qemu_irq pch_msi_irq[PCH_MSI_IRQ_NUM]; + MemoryRegion msi_mmio; + /* irq base passed to upper extioi intc */ + unsigned int irq_base; +}; diff --git a/include/hw/intc/loongarch_pch_pic.h b/include/hw/intc/loongarch_pch_pic.h new file mode 100644 index 0000000000..2d4aa9ed6f --- /dev/null +++ b/include/hw/intc/loongarch_pch_pic.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch 7A1000 I/O interrupt controller definitions + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#define TYPE_LOONGARCH_PCH_PIC "loongarch_pch_pic" +#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PCH_PIC#name +OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHPIC, LOONGARCH_PCH_PIC) + +#define PCH_PIC_IRQ_START 0 +#define PCH_PIC_IRQ_END 63 +#define PCH_PIC_IRQ_NUM 64 +#define PCH_PIC_INT_ID_VAL 0x7000000UL +#define PCH_PIC_INT_ID_NUM 0x3f0001UL + +#define PCH_PIC_INT_ID_LO 0x00 +#define PCH_PIC_INT_ID_HI 0x04 +#define PCH_PIC_INT_MASK_LO 0x20 +#define PCH_PIC_INT_MASK_HI 0x24 +#define PCH_PIC_HTMSI_EN_LO 0x40 +#define PCH_PIC_HTMSI_EN_HI 0x44 +#define PCH_PIC_INT_EDGE_LO 0x60 +#define PCH_PIC_INT_EDGE_HI 0x64 +#define PCH_PIC_INT_CLEAR_LO 0x80 +#define PCH_PIC_INT_CLEAR_HI 0x84 +#define PCH_PIC_AUTO_CTRL0_LO 0xc0 +#define PCH_PIC_AUTO_CTRL0_HI 0xc4 +#define PCH_PIC_AUTO_CTRL1_LO 0xe0 +#define PCH_PIC_AUTO_CTRL1_HI 0xe4 +#define PCH_PIC_ROUTE_ENTRY_OFFSET 0x100 +#define PCH_PIC_ROUTE_ENTRY_END 0x13f +#define PCH_PIC_HTMSI_VEC_OFFSET 0x200 +#define PCH_PIC_HTMSI_VEC_END 0x23f +#define PCH_PIC_INT_STATUS_LO 0x3a0 +#define PCH_PIC_INT_STATUS_HI 0x3a4 +#define PCH_PIC_INT_POL_LO 0x3e0 +#define PCH_PIC_INT_POL_HI 0x3e4 + +#define STATUS_LO_START 0 +#define STATUS_HI_START 0x4 +#define POL_LO_START 0x40 +#define POL_HI_START 0x44 +struct LoongArchPCHPIC { + SysBusDevice parent_obj; + qemu_irq parent_irq[64]; + uint64_t int_mask; /*0x020 interrupt mask register*/ + uint64_t htmsi_en; /*0x040 1=msi*/ + uint64_t intedge; /*0x060 edge=1 level =0*/ + uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/ + uint64_t auto_crtl0; /*0x0c0*/ + uint64_t auto_crtl1; /*0x0e0*/ + uint64_t last_intirr; /* edge detection */ + uint64_t intirr; /* 0x380 interrupt request register */ + uint64_t intisr; /* 0x3a0 interrupt service register */ + /* + * 0x3e0 interrupt level polarity selection + * register 0 for high level trigger + */ + uint64_t int_polarity; + + uint8_t route_entry[64]; /*0x100 - 0x138*/ + uint8_t htmsi_vector[64]; /*0x200 - 0x238*/ + + MemoryRegion iomem32_low; + MemoryRegion iomem32_high; + MemoryRegion iomem8; +}; diff --git a/include/hw/intc/m68k_irqc.h b/include/hw/intc/m68k_irqc.h index dbcfcfc2e0..ef91f21812 100644 --- a/include/hw/intc/m68k_irqc.h +++ b/include/hw/intc/m68k_irqc.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/include/hw/intc/nios2_vic.h b/include/hw/intc/nios2_vic.h new file mode 100644 index 0000000000..ac507b9d74 --- /dev/null +++ b/include/hw/intc/nios2_vic.h @@ -0,0 +1,64 @@ +/* + * Vectored Interrupt Controller for nios2 processor + * + * Copyright (c) 2022 Neuroblade + * + * Interface: + * QOM property "cpu": link to the Nios2 CPU (must be set) + * Unnamed GPIO inputs 0..NIOS2_VIC_MAX_IRQ-1: input IRQ lines + * IRQ should be connected to nios2 IRQ0. + * + * Reference: "Embedded Peripherals IP User Guide + * for Intel® Quartus® Prime Design Suite: 21.4" + * Chapter 38 "Vectored Interrupt Controller Core" + * See: https://www.intel.com/content/www/us/en/docs/programmable/683130/21-4/vectored-interrupt-controller-core.html + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_INTC_NIOS2_VIC_H +#define HW_INTC_NIOS2_VIC_H + +#define TYPE_NIOS2_VIC "nios2-vic" +OBJECT_DECLARE_SIMPLE_TYPE(Nios2VIC, NIOS2_VIC) + +#define NIOS2_VIC_MAX_IRQ 32 + +struct Nios2VIC { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + qemu_irq output_int; + + /* properties */ + CPUState *cpu; + MemoryRegion csr; + + uint32_t int_config[NIOS2_VIC_MAX_IRQ]; + uint32_t vic_config; + uint32_t int_raw_status; + uint32_t int_enable; + uint32_t sw_int; + uint32_t vic_status; + uint32_t vec_tbl_base; + uint32_t vec_tbl_addr; +}; + +#endif /* HW_INTC_NIOS2_VIC_H */ diff --git a/include/hw/intc/ppc-uic.h b/include/hw/intc/ppc-uic.h index 22dd5e5ac2..4d82e9a3c6 100644 --- a/include/hw/intc/ppc-uic.h +++ b/include/hw/intc/ppc-uic.h @@ -25,8 +25,7 @@ #ifndef HW_INTC_PPC_UIC_H #define HW_INTC_PPC_UIC_H -#include "hw/sysbus.h" -#include "qom/object.h" +#include "hw/ppc/ppc4xx.h" #define TYPE_PPC_UIC "ppc-uic" OBJECT_DECLARE_SIMPLE_TYPE(PPCUIC, PPC_UIC) @@ -56,14 +55,13 @@ enum { struct PPCUIC { /*< private >*/ - SysBusDevice parent_obj; + Ppc4xxDcrDeviceState parent_obj; /*< public >*/ qemu_irq output_int; qemu_irq output_cint; /* properties */ - CPUState *cpu; uint32_t dcr_base; bool use_vectors; diff --git a/include/hw/intc/riscv_aclint.h b/include/hw/intc/riscv_aclint.h new file mode 100644 index 0000000000..693415eb6d --- /dev/null +++ b/include/hw/intc/riscv_aclint.h @@ -0,0 +1,83 @@ +/* + * RISC-V ACLINT (Advanced Core Local Interruptor) interface + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef HW_RISCV_ACLINT_H +#define HW_RISCV_ACLINT_H + +#include "hw/sysbus.h" + +#define TYPE_RISCV_ACLINT_MTIMER "riscv.aclint.mtimer" + +#define RISCV_ACLINT_MTIMER(obj) \ + OBJECT_CHECK(RISCVAclintMTimerState, (obj), TYPE_RISCV_ACLINT_MTIMER) + +typedef struct RISCVAclintMTimerState { + /*< private >*/ + SysBusDevice parent_obj; + uint64_t time_delta; + uint64_t *timecmp; + QEMUTimer **timers; + + /*< public >*/ + MemoryRegion mmio; + uint32_t hartid_base; + uint32_t num_harts; + uint32_t timecmp_base; + uint32_t time_base; + uint32_t aperture_size; + uint32_t timebase_freq; + qemu_irq *timer_irqs; +} RISCVAclintMTimerState; + +DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, + uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq, + bool provide_rdtime); + +#define TYPE_RISCV_ACLINT_SWI "riscv.aclint.swi" + +#define RISCV_ACLINT_SWI(obj) \ + OBJECT_CHECK(RISCVAclintSwiState, (obj), TYPE_RISCV_ACLINT_SWI) + +typedef struct RISCVAclintSwiState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion mmio; + uint32_t hartid_base; + uint32_t num_harts; + uint32_t sswi; + qemu_irq *soft_irqs; +} RISCVAclintSwiState; + +DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base, + uint32_t num_harts, bool sswi); + +enum { + RISCV_ACLINT_DEFAULT_MTIMECMP = 0x0, + RISCV_ACLINT_DEFAULT_MTIME = 0x7ff8, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE = 0x8000, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ = 10000000, + RISCV_ACLINT_MAX_HARTS = 4095, + RISCV_ACLINT_SWI_SIZE = 0x4000 +}; + +#endif diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h new file mode 100644 index 0000000000..de8532fbc3 --- /dev/null +++ b/include/hw/intc/riscv_aplic.h @@ -0,0 +1,79 @@ +/* + * RISC-V APLIC (Advanced Platform Level Interrupt Controller) interface + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef HW_RISCV_APLIC_H +#define HW_RISCV_APLIC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_RISCV_APLIC "riscv.aplic" + +typedef struct RISCVAPLICState RISCVAPLICState; +DECLARE_INSTANCE_CHECKER(RISCVAPLICState, RISCV_APLIC, TYPE_RISCV_APLIC) + +#define APLIC_MIN_SIZE 0x4000 +#define APLIC_SIZE_ALIGN(__x) (((__x) + (APLIC_MIN_SIZE - 1)) & \ + ~(APLIC_MIN_SIZE - 1)) +#define APLIC_SIZE(__num_harts) (APLIC_MIN_SIZE + \ + APLIC_SIZE_ALIGN(32 * (__num_harts))) + +struct RISCVAPLICState { + /*< private >*/ + SysBusDevice parent_obj; + qemu_irq *external_irqs; + + /*< public >*/ + MemoryRegion mmio; + uint32_t bitfield_words; + uint32_t domaincfg; + uint32_t mmsicfgaddr; + uint32_t mmsicfgaddrH; + uint32_t smsicfgaddr; + uint32_t smsicfgaddrH; + uint32_t genmsi; + uint32_t *sourcecfg; + uint32_t *state; + uint32_t *target; + uint32_t *idelivery; + uint32_t *iforce; + uint32_t *ithreshold; + + /* topology */ +#define QEMU_APLIC_MAX_CHILDREN 16 + struct RISCVAPLICState *parent; + struct RISCVAPLICState *children[QEMU_APLIC_MAX_CHILDREN]; + uint16_t num_children; + + /* config */ + uint32_t aperture_size; + uint32_t hartid_base; + uint32_t num_harts; + uint32_t iprio_mask; + uint32_t num_irqs; + bool msimode; + bool mmode; +}; + +void riscv_aplic_add_child(DeviceState *parent, DeviceState *child); + +DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources, + uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent); + +#endif diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h new file mode 100644 index 0000000000..58c2aaa8dc --- /dev/null +++ b/include/hw/intc/riscv_imsic.h @@ -0,0 +1,68 @@ +/* + * RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef HW_RISCV_IMSIC_H +#define HW_RISCV_IMSIC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_RISCV_IMSIC "riscv.imsic" + +typedef struct RISCVIMSICState RISCVIMSICState; +DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC) + +#define IMSIC_MMIO_PAGE_SHIFT 12 +#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT) +#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ) + +#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6 +#define IMSIC_MMIO_GROUP_MIN_SHIFT 24 + +#define IMSIC_HART_NUM_GUESTS(__guest_bits) \ + (1U << (__guest_bits)) +#define IMSIC_HART_SIZE(__guest_bits) \ + (IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ) +#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \ + (1U << (__hart_bits)) +#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \ + (IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits)) + +struct RISCVIMSICState { + /*< private >*/ + SysBusDevice parent_obj; + qemu_irq *external_irqs; + + /*< public >*/ + MemoryRegion mmio; + uint32_t num_eistate; + uint32_t *eidelivery; + uint32_t *eithreshold; + uint32_t *eistate; + + /* config */ + bool mmode; + uint32_t hartid; + uint32_t num_pages; + uint32_t num_irqs; +}; + +DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, + uint32_t num_pages, uint32_t num_ids); + +#endif diff --git a/include/hw/intc/rx_icu.h b/include/hw/intc/rx_icu.h index 7f5889b36f..b23504f3dd 100644 --- a/include/hw/intc/rx_icu.h +++ b/include/hw/intc/rx_icu.h @@ -73,4 +73,4 @@ struct RXICUState { #define TYPE_RX_ICU "rx-icu" OBJECT_DECLARE_SIMPLE_TYPE(RXICUState, RX_ICU) -#endif /* RX_ICU_H */ +#endif /* HW_INTC_RX_ICU_H */ diff --git a/include/hw/intc/sifive_clint.h b/include/hw/intc/sifive_clint.h deleted file mode 100644 index a30be0f3d6..0000000000 --- a/include/hw/intc/sifive_clint.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * SiFive CLINT (Core Local Interruptor) interface - * - * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu - * Copyright (c) 2017 SiFive, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef HW_SIFIVE_CLINT_H -#define HW_SIFIVE_CLINT_H - -#include "hw/sysbus.h" - -#define TYPE_SIFIVE_CLINT "riscv.sifive.clint" - -#define SIFIVE_CLINT(obj) \ - OBJECT_CHECK(SiFiveCLINTState, (obj), TYPE_SIFIVE_CLINT) - -typedef struct SiFiveCLINTState { - /*< private >*/ - SysBusDevice parent_obj; - - /*< public >*/ - MemoryRegion mmio; - uint32_t hartid_base; - uint32_t num_harts; - uint32_t sip_base; - uint32_t timecmp_base; - uint32_t time_base; - uint32_t aperture_size; - uint32_t timebase_freq; -} SiFiveCLINTState; - -DeviceState *sifive_clint_create(hwaddr addr, hwaddr size, - uint32_t hartid_base, uint32_t num_harts, uint32_t sip_base, - uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq, - bool provide_rdtime); - -enum { - SIFIVE_SIP_BASE = 0x0, - SIFIVE_TIMECMP_BASE = 0x4000, - SIFIVE_TIME_BASE = 0xBFF8 -}; - -enum { - SIFIVE_CLINT_TIMEBASE_FREQ = 10000000 -}; - -#endif diff --git a/include/hw/intc/sifive_plic.h b/include/hw/intc/sifive_plic.h index 1e451a270c..134cf39a96 100644 --- a/include/hw/intc/sifive_plic.h +++ b/include/hw/intc/sifive_plic.h @@ -72,9 +72,13 @@ struct SiFivePLICState { uint32_t context_base; uint32_t context_stride; uint32_t aperture_size; + + qemu_irq *m_external_irqs; + qemu_irq *s_external_irqs; }; DeviceState *sifive_plic_create(hwaddr addr, char *hart_config, + uint32_t num_harts, uint32_t hartid_base, uint32_t num_sources, uint32_t num_priorities, uint32_t priority_base, uint32_t pending_base, uint32_t enable_base, diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h index 75014e74ae..cbcdda509d 100644 --- a/include/hw/ipack/ipack.h +++ b/include/hw/ipack/ipack.h @@ -73,9 +73,9 @@ extern const VMStateDescription vmstate_ipack_device; VMSTATE_STRUCT(_field, _state, 1, vmstate_ipack_device, IPackDevice) IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot); -void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size, - DeviceState *parent, - const char *name, uint8_t n_slots, - qemu_irq_handler handler); +void ipack_bus_init(IPackBus *bus, size_t bus_size, + DeviceState *parent, + uint8_t n_slots, + qemu_irq_handler handler); #endif diff --git a/include/hw/irq.h b/include/hw/irq.h index dc7abf199e..645b73d251 100644 --- a/include/hw/irq.h +++ b/include/hw/irq.h @@ -46,11 +46,6 @@ void qemu_free_irq(qemu_irq irq); /* Returns a new IRQ with opposite polarity. */ qemu_irq qemu_irq_invert(qemu_irq irq); -/* Returns a new IRQ which feeds into both the passed IRQs. - * It's probably better to use the TYPE_SPLIT_IRQ device instead. - */ -qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2); - /* For internal use in qtest. Similar to qemu_irq_split, but operating on an existing vector of qemu_irq. */ void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n); diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h index a6ae8a583f..d272d879fb 100644 --- a/include/hw/isa/i8259_internal.h +++ b/include/hw/isa/i8259_internal.h @@ -72,8 +72,5 @@ struct PICCommonState { void pic_reset_common(PICCommonState *s); ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master); void pic_stat_update_irq(int irq, int level); -bool pic_get_statistics(InterruptStatsProvider *obj, - uint64_t **irq_counts, unsigned int *nb_irqs); -void pic_print_info(InterruptStatsProvider *obj, Monitor *mon); #endif /* QEMU_I8259_INTERNAL_H */ diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h index 6fd8bef29a..1e21dc9694 100644 --- a/include/hw/isa/isa.h +++ b/include/hw/isa/isa.h @@ -16,20 +16,6 @@ OBJECT_DECLARE_TYPE(ISADevice, ISADeviceClass, ISA_DEVICE) #define TYPE_ISA_BUS "ISA" OBJECT_DECLARE_SIMPLE_TYPE(ISABus, ISA_BUS) -#define TYPE_APPLE_SMC "isa-applesmc" -#define APPLESMC_MAX_DATA_LENGTH 32 -#define APPLESMC_PROP_IO_BASE "iobase" - -static inline uint16_t applesmc_port(void) -{ - Object *obj = object_resolve_path_type("", TYPE_APPLE_SMC, NULL); - - if (obj) { - return object_property_get_uint(obj, APPLESMC_PROP_IO_BASE, NULL); - } - return 0; -} - #define TYPE_ISADMA "isa-dma" typedef struct IsaDmaClass IsaDmaClass; @@ -64,7 +50,6 @@ struct IsaDmaClass { struct ISADeviceClass { DeviceClass parent_class; - void (*build_aml)(ISADevice *dev, Aml *scope); }; struct ISABus { @@ -83,8 +68,6 @@ struct ISADevice { DeviceState parent_obj; /*< public >*/ - int8_t isairq[2]; /* -1 = unassigned */ - int nirqs; int ioport_id; }; @@ -92,7 +75,6 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space, MemoryRegion *address_space_io, Error **errp); void isa_bus_irqs(ISABus *bus, qemu_irq *irqs); qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq); -void isa_init_irq(ISADevice *dev, qemu_irq *p, unsigned isairq); void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq); void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16); IsaDma *isa_get_dma(ISABus *bus, int nchan); @@ -153,6 +135,4 @@ static inline ISABus *isa_bus_from_device(ISADevice *d) return ISA_BUS(qdev_get_parent_bus(DEVICE(d))); } -#define TYPE_PIIX4_PCI_DEVICE "piix4-isa" - #endif diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h index 0f01aaa471..eaa07881c5 100644 --- a/include/hw/isa/vt82c686.h +++ b/include/hw/isa/vt82c686.h @@ -1,11 +1,15 @@ #ifndef HW_VT82C686_H #define HW_VT82C686_H +#include "hw/pci/pci.h" + #define TYPE_VT82C686B_ISA "vt82c686b-isa" -#define TYPE_VT82C686B_PM "vt82c686b-pm" +#define TYPE_VT82C686B_USB_UHCI "vt82c686b-usb-uhci" #define TYPE_VT8231_ISA "vt8231-isa" -#define TYPE_VT8231_PM "vt8231-pm" #define TYPE_VIA_AC97 "via-ac97" +#define TYPE_VIA_IDE "via-ide" #define TYPE_VIA_MC97 "via-mc97" +void via_isa_set_irq(PCIDevice *d, int n, int level); + #endif diff --git a/include/hw/loader.h b/include/hw/loader.h index cbfc184873..70248e0da7 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -40,8 +40,8 @@ ssize_t load_image_size(const char *filename, void *addr, size_t size); * * Returns the size of the loaded image on success, -1 otherwise. */ -int load_image_targphys_as(const char *filename, - hwaddr addr, uint64_t max_sz, AddressSpace *as); +ssize_t load_image_targphys_as(const char *filename, + hwaddr addr, uint64_t max_sz, AddressSpace *as); /**load_targphys_hex_as: * @filename: Path to the .hex file @@ -53,14 +53,15 @@ int load_image_targphys_as(const char *filename, * * Returns the size of the loaded .hex file on success, -1 otherwise. */ -int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as); +ssize_t load_targphys_hex_as(const char *filename, hwaddr *entry, + AddressSpace *as); /** load_image_targphys: * Same as load_image_targphys_as(), but doesn't allow the caller to specify * an AddressSpace. */ -int load_image_targphys(const char *filename, hwaddr, - uint64_t max_sz); +ssize_t load_image_targphys(const char *filename, hwaddr, + uint64_t max_sz); /** * load_image_mr: load an image into a memory region @@ -73,7 +74,7 @@ int load_image_targphys(const char *filename, hwaddr, * If the file is larger than the memory region's size the call will fail. * Returns -1 on failure, or the size of the file. */ -int load_image_mr(const char *filename, MemoryRegion *mr); +ssize_t load_image_mr(const char *filename, MemoryRegion *mr); /* This is the limit on the maximum uncompressed image size that * load_image_gzipped_buffer() and load_image_gzipped() will read. It prevents @@ -81,16 +82,16 @@ int load_image_mr(const char *filename, MemoryRegion *mr); */ #define LOAD_IMAGE_MAX_GUNZIP_BYTES (256 << 20) -int load_image_gzipped_buffer(const char *filename, uint64_t max_sz, - uint8_t **buffer); -int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); +ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz, + uint8_t **buffer); +ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); #define ELF_LOAD_FAILED -1 #define ELF_LOAD_NOT_ELF -2 #define ELF_LOAD_WRONG_ARCH -3 #define ELF_LOAD_WRONG_ENDIAN -4 #define ELF_LOAD_TOO_BIG -5 -const char *load_elf_strerror(int error); +const char *load_elf_strerror(ssize_t error); /** load_elf_ram_sym: * @filename: Path of ELF file @@ -128,48 +129,48 @@ const char *load_elf_strerror(int error); typedef void (*symbol_fn_t)(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size); -int load_elf_ram_sym(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, symbol_fn_t sym_cb); +ssize_t load_elf_ram_sym(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int big_endian, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, symbol_fn_t sym_cb); /** load_elf_ram: * Same as load_elf_ram_sym(), but doesn't allow the caller to specify a * symbol callback function */ -int load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom); +ssize_t load_elf_ram(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, + int big_endian, int elf_machine, int clear_lsb, + int data_swab, AddressSpace *as, bool load_rom); /** load_elf_as: * Same as load_elf_ram(), but always loads the elf as ROM */ -int load_elf_as(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as); +ssize_t load_elf_as(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab, + AddressSpace *as); /** load_elf: * Same as load_elf_as(), but doesn't allow the caller to specify an * AddressSpace. */ -int load_elf(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab); +ssize_t load_elf(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab); /** load_elf_hdr: * @filename: Path of ELF file @@ -183,8 +184,8 @@ int load_elf(const char *filename, */ void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp); -int load_aout(const char *filename, hwaddr addr, int max_sz, - int bswap_needed, hwaddr target_page_size); +ssize_t load_aout(const char *filename, hwaddr addr, int max_sz, + int bswap_needed, hwaddr target_page_size); #define LOAD_UIMAGE_LOADADDR_INVALID (-1) @@ -205,19 +206,19 @@ int load_aout(const char *filename, hwaddr addr, int max_sz, * * Returns the size of the loaded image on success, -1 otherwise. */ -int load_uimage_as(const char *filename, hwaddr *ep, - hwaddr *loadaddr, int *is_linux, - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, AddressSpace *as); +ssize_t load_uimage_as(const char *filename, hwaddr *ep, + hwaddr *loadaddr, int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, AddressSpace *as); /** load_uimage: * Same as load_uimage_as(), but doesn't allow the caller to specify an * AddressSpace. */ -int load_uimage(const char *filename, hwaddr *ep, - hwaddr *loadaddr, int *is_linux, - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque); +ssize_t load_uimage(const char *filename, hwaddr *ep, + hwaddr *loadaddr, int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque); /** * load_ramdisk_as: @@ -232,15 +233,15 @@ int load_uimage(const char *filename, hwaddr *ep, * * Returns the size of the loaded image on success, -1 otherwise. */ -int load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz, - AddressSpace *as); +ssize_t load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz, + AddressSpace *as); /** * load_ramdisk: * Same as load_ramdisk_as(), but doesn't allow the caller to specify * an AddressSpace. */ -int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz); +ssize_t load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz); ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen); @@ -253,9 +254,9 @@ void pstrcpy_targphys(const char *name, extern bool option_rom_has_mr; extern bool rom_file_has_mr; -int rom_add_file(const char *file, const char *fw_dir, - hwaddr addr, int32_t bootindex, - bool option_rom, MemoryRegion *mr, AddressSpace *as); +ssize_t rom_add_file(const char *file, const char *fw_dir, + hwaddr addr, int32_t bootindex, + bool option_rom, MemoryRegion *mr, AddressSpace *as); MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, @@ -336,17 +337,25 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict); #define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \ rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true) -#define PC_ROM_MIN_VGA 0xc0000 -#define PC_ROM_MIN_OPTION 0xc8000 -#define PC_ROM_MAX 0xe0000 -#define PC_ROM_ALIGN 0x800 -#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA) - -int rom_add_vga(const char *file); -int rom_add_option(const char *file, int32_t bootindex); +ssize_t rom_add_vga(const char *file); +ssize_t rom_add_option(const char *file, int32_t bootindex); /* This is the usual maximum in uboot, so if a uImage overflows this, it would * overflow on real hardware too. */ #define UBOOT_MAX_GUNZIP_BYTES (64 << 20) +typedef struct RomGap { + hwaddr base; + size_t size; +} RomGap; + +/** + * rom_find_largest_gap_between: return largest gap between ROMs in given range + * + * Given a range of addresses, this function finds the largest + * contiguous subrange which has no ROMs loaded to it. That is, + * it finds the biggest gap which is free for use for other things. + */ +RomGap rom_find_largest_gap_between(hwaddr base, size_t size); + #endif diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h new file mode 100644 index 0000000000..45c383f5a7 --- /dev/null +++ b/include/hw/loongarch/virt.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Definitions for loongarch board emulation. + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_H +#define HW_LOONGARCH_H + +#include "target/loongarch/cpu.h" +#include "hw/boards.h" +#include "qemu/queue.h" +#include "hw/intc/loongarch_ipi.h" + +#define LOONGARCH_MAX_VCPUS 4 + +#define VIRT_ISA_IO_BASE 0x18000000UL +#define VIRT_ISA_IO_SIZE 0x0004000 +#define VIRT_FWCFG_BASE 0x1e020000UL +#define VIRT_BIOS_BASE 0x1c000000UL +#define VIRT_BIOS_SIZE (4 * MiB) + +#define VIRT_LOWMEM_BASE 0 +#define VIRT_LOWMEM_SIZE 0x10000000 +#define VIRT_HIGHMEM_BASE 0x90000000 +#define VIRT_GED_EVT_ADDR 0x100e0000 +#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) +#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) + +struct LoongArchMachineState { + /*< private >*/ + MachineState parent_obj; + + IPICore ipi_core[MAX_IPI_CORE_NUM]; + MemoryRegion lowmem; + MemoryRegion highmem; + MemoryRegion isa_io; + MemoryRegion bios; + bool bios_loaded; + /* State for other subsystems/APIs: */ + FWCfgState *fw_cfg; + Notifier machine_done; + OnOffAuto acpi; + char *oem_id; + char *oem_table_id; + DeviceState *acpi_ged; + int fdt_size; + DeviceState *platform_bus_dev; + PCIBus *pci_bus; +}; + +#define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt") +OBJECT_DECLARE_SIMPLE_TYPE(LoongArchMachineState, LOONGARCH_MACHINE) +bool loongarch_is_acpi_enabled(LoongArchMachineState *lams); +void loongarch_acpi_setup(LoongArchMachineState *lams); +#endif diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h index decf17ce42..8cbd587bbf 100644 --- a/include/hw/m68k/mcf.h +++ b/include/hw/m68k/mcf.h @@ -2,6 +2,7 @@ #define HW_MCF_H /* Motorola ColdFire device prototypes. */ +#include "exec/hwaddr.h" #include "target/m68k/cpu-qom.h" /* mcf_uart.c */ diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h index bcf62f825c..acf887c83d 100644 --- a/include/hw/mem/nvdimm.h +++ b/include/hw/mem/nvdimm.h @@ -29,14 +29,6 @@ #include "hw/acpi/aml-build.h" #include "qom/object.h" -#define NVDIMM_DEBUG 0 -#define nvdimm_debug(fmt, ...) \ - do { \ - if (NVDIMM_DEBUG) { \ - fprintf(stderr, "nvdimm: " fmt, ## __VA_ARGS__); \ - } \ - } while (0) - /* * The minimum label data size is required by NVDIMM Namespace * specification, see the chapter 2 Namespaces: @@ -103,6 +95,8 @@ struct NVDIMMClass { /* write @size bytes from @buf to NVDIMM label data at @offset. */ void (*write_label_data)(NVDIMMDevice *nvdimm, const void *buf, uint64_t size, uint64_t offset); + void (*realize)(NVDIMMDevice *nvdimm, Error **errp); + void (*unrealize)(NVDIMMDevice *nvdimm); }; #define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem" diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h index 1473e6db62..322bebe555 100644 --- a/include/hw/mem/pc-dimm.h +++ b/include/hw/mem/pc-dimm.h @@ -63,6 +63,7 @@ struct PCDIMMDeviceClass { /* public */ void (*realize)(PCDIMMDevice *dimm, Error **errp); + void (*unrealize)(PCDIMMDevice *dimm); }; void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine, diff --git a/include/hw/mips/bios.h b/include/hw/mips/bios.h index c03007999a..44acb6815b 100644 --- a/include/hw/mips/bios.h +++ b/include/hw/mips/bios.h @@ -5,7 +5,7 @@ #include "cpu.h" #define BIOS_SIZE (4 * MiB) -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN #define BIOS_FILENAME "mips_bios.bin" #else #define BIOS_FILENAME "mipsel_bios.bin" diff --git a/include/hw/mips/bootloader.h b/include/hw/mips/bootloader.h index b5f48d71bb..fffb0b7da8 100644 --- a/include/hw/mips/bootloader.h +++ b/include/hw/mips/bootloader.h @@ -12,8 +12,12 @@ #include "exec/cpu-defs.h" void bl_gen_jump_to(uint32_t **p, target_ulong jump_addr); -void bl_gen_jump_kernel(uint32_t **p, target_ulong sp, target_ulong a0, - target_ulong a1, target_ulong a2, target_ulong a3, +void bl_gen_jump_kernel(uint32_t **p, + bool set_sp, target_ulong sp, + bool set_a0, target_ulong a0, + bool set_a1, target_ulong a1, + bool set_a2, target_ulong a2, + bool set_a3, target_ulong a3, target_ulong kernel_addr); void bl_gen_write_ulong(uint32_t **p, target_ulong addr, target_ulong val); void bl_gen_write_u32(uint32_t **p, target_ulong addr, uint32_t val); diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h index 6c9c8805f3..101799f7d3 100644 --- a/include/hw/mips/mips.h +++ b/include/hw/mips/mips.h @@ -9,9 +9,6 @@ #include "exec/memory.h" -/* gt64xxx.c */ -PCIBus *gt64120_register(qemu_irq *pic); - /* bonito.c */ PCIBus *bonito_init(qemu_irq *pic); diff --git a/include/hw/misc/armv7m_ras.h b/include/hw/misc/armv7m_ras.h new file mode 100644 index 0000000000..ba6daccf3f --- /dev/null +++ b/include/hw/misc/armv7m_ras.h @@ -0,0 +1,37 @@ +/* + * Arm M-profile RAS (Reliability, Availability and Serviceability) block + * + * Copyright (c) 2021 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the RAS register block of an M-profile CPU + * (the registers starting at 0xE0005000 with ERRFRn). + * + * QEMU interface: + * + sysbus MMIO region 0: the register bank + * + * The QEMU implementation currently provides "minimal RAS" only. + */ + +#ifndef HW_MISC_ARMV7M_RAS_H +#define HW_MISC_ARMV7M_RAS_H + +#include "hw/sysbus.h" + +#define TYPE_ARMV7M_RAS "armv7m-ras" +OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MRAS, ARMV7M_RAS) + +struct ARMv7MRAS { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion iomem; +}; + +#endif diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h index 94d5ada95f..ecb1b67de8 100644 --- a/include/hw/misc/aspeed_hace.h +++ b/include/hw/misc/aspeed_hace.h @@ -15,9 +15,12 @@ #define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400" #define TYPE_ASPEED_AST2500_HACE TYPE_ASPEED_HACE "-ast2500" #define TYPE_ASPEED_AST2600_HACE TYPE_ASPEED_HACE "-ast2600" +#define TYPE_ASPEED_AST1030_HACE TYPE_ASPEED_HACE "-ast1030" + OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE) #define ASPEED_HACE_NR_REGS (0x64 >> 2) +#define ASPEED_HACE_MAX_SG 256 /* max number of entries */ struct AspeedHACEState { SysBusDevice parent; @@ -25,7 +28,10 @@ struct AspeedHACEState { MemoryRegion iomem; qemu_irq irq; + struct iovec iov_cache[ASPEED_HACE_MAX_SG]; uint32_t regs[ASPEED_HACE_NR_REGS]; + uint32_t total_req_len; + uint32_t iov_count; MemoryRegion *dram_mr; AddressSpace dram_as; @@ -37,7 +43,8 @@ struct AspeedHACEClass { uint32_t src_mask; uint32_t dest_mask; + uint32_t key_mask; uint32_t hash_mask; }; -#endif /* _ASPEED_HACE_H_ */ +#endif /* ASPEED_HACE_H */ diff --git a/include/hw/misc/aspeed_i3c.h b/include/hw/misc/aspeed_i3c.h new file mode 100644 index 0000000000..39679dfa1a --- /dev/null +++ b/include/hw/misc/aspeed_i3c.h @@ -0,0 +1,48 @@ +/* + * ASPEED I3C Controller + * + * Copyright (C) 2021 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#ifndef ASPEED_I3C_H +#define ASPEED_I3C_H + +#include "hw/sysbus.h" + +#define TYPE_ASPEED_I3C "aspeed.i3c" +#define TYPE_ASPEED_I3C_DEVICE "aspeed.i3c.device" +OBJECT_DECLARE_TYPE(AspeedI3CState, AspeedI3CClass, ASPEED_I3C) + +#define ASPEED_I3C_NR_REGS (0x70 >> 2) +#define ASPEED_I3C_DEVICE_NR_REGS (0x300 >> 2) +#define ASPEED_I3C_NR_DEVICES 6 + +OBJECT_DECLARE_SIMPLE_TYPE(AspeedI3CDevice, ASPEED_I3C_DEVICE) +typedef struct AspeedI3CDevice { + /* */ + SysBusDevice parent; + + /* */ + MemoryRegion mr; + qemu_irq irq; + + uint8_t id; + uint32_t regs[ASPEED_I3C_DEVICE_NR_REGS]; +} AspeedI3CDevice; + +typedef struct AspeedI3CState { + /* */ + SysBusDevice parent; + + /* */ + MemoryRegion iomem; + MemoryRegion iomem_container; + qemu_irq irq; + + uint32_t regs[ASPEED_I3C_NR_REGS]; + AspeedI3CDevice devices[ASPEED_I3C_NR_DEVICES]; +} AspeedI3CState; +#endif /* ASPEED_I3C_H */ diff --git a/include/hw/misc/aspeed_lpc.h b/include/hw/misc/aspeed_lpc.h index df418cfcd3..fd228731d2 100644 --- a/include/hw/misc/aspeed_lpc.h +++ b/include/hw/misc/aspeed_lpc.h @@ -44,4 +44,4 @@ typedef struct AspeedLPCState { uint32_t hicr7; } AspeedLPCState; -#endif /* _ASPEED_LPC_H_ */ +#endif /* ASPEED_LPC_H */ diff --git a/include/hw/misc/aspeed_peci.h b/include/hw/misc/aspeed_peci.h new file mode 100644 index 0000000000..8382707d9f --- /dev/null +++ b/include/hw/misc/aspeed_peci.h @@ -0,0 +1,29 @@ +/* + * Aspeed PECI Controller + * + * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com) + * + * This code is licensed under the GPL version 2 or later. See the COPYING + * file in the top-level directory. + */ + +#ifndef ASPEED_PECI_H +#define ASPEED_PECI_H + +#include "hw/sysbus.h" + +#define ASPEED_PECI_NR_REGS ((0xFC + 4) >> 2) +#define TYPE_ASPEED_PECI "aspeed.peci" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedPECIState, ASPEED_PECI); + +struct AspeedPECIState { + /* */ + SysBusDevice parent; + + MemoryRegion mmio; + qemu_irq irq; + + uint32_t regs[ASPEED_PECI_NR_REGS]; +}; + +#endif diff --git a/include/hw/misc/aspeed_sbc.h b/include/hw/misc/aspeed_sbc.h new file mode 100644 index 0000000000..405e6782b9 --- /dev/null +++ b/include/hw/misc/aspeed_sbc.h @@ -0,0 +1,45 @@ +/* + * ASPEED Secure Boot Controller + * + * Copyright (C) 2021-2022 IBM Corp. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ASPEED_SBC_H +#define ASPEED_SBC_H + +#include "hw/sysbus.h" + +#define TYPE_ASPEED_SBC "aspeed.sbc" +#define TYPE_ASPEED_AST2600_SBC TYPE_ASPEED_SBC "-ast2600" +OBJECT_DECLARE_TYPE(AspeedSBCState, AspeedSBCClass, ASPEED_SBC) + +#define ASPEED_SBC_NR_REGS (0x93c >> 2) + +#define QSR_AES BIT(27) +#define QSR_RSA1024 (0x0 << 12) +#define QSR_RSA2048 (0x1 << 12) +#define QSR_RSA3072 (0x2 << 12) +#define QSR_RSA4096 (0x3 << 12) +#define QSR_SHA224 (0x0 << 10) +#define QSR_SHA256 (0x1 << 10) +#define QSR_SHA384 (0x2 << 10) +#define QSR_SHA512 (0x3 << 10) + +struct AspeedSBCState { + SysBusDevice parent; + + bool emmc_abr; + uint32_t signing_settings; + + MemoryRegion iomem; + + uint32_t regs[ASPEED_SBC_NR_REGS]; +}; + +struct AspeedSBCClass { + SysBusDeviceClass parent_class; +}; + +#endif /* ASPEED_SBC_H */ diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h index d49bfb02fb..5c7c04eedf 100644 --- a/include/hw/misc/aspeed_scu.h +++ b/include/hw/misc/aspeed_scu.h @@ -19,6 +19,7 @@ OBJECT_DECLARE_TYPE(AspeedSCUState, AspeedSCUClass, ASPEED_SCU) #define TYPE_ASPEED_2400_SCU TYPE_ASPEED_SCU "-ast2400" #define TYPE_ASPEED_2500_SCU TYPE_ASPEED_SCU "-ast2500" #define TYPE_ASPEED_2600_SCU TYPE_ASPEED_SCU "-ast2600" +#define TYPE_ASPEED_1030_SCU TYPE_ASPEED_SCU "-ast1030" #define ASPEED_SCU_NR_REGS (0x1A8 >> 2) #define ASPEED_AST2600_SCU_NR_REGS (0xE20 >> 2) @@ -43,6 +44,10 @@ struct AspeedSCUState { #define AST2500_A1_SILICON_REV 0x04010303U #define AST2600_A0_SILICON_REV 0x05000303U #define AST2600_A1_SILICON_REV 0x05010303U +#define AST2600_A2_SILICON_REV 0x05020303U +#define AST2600_A3_SILICON_REV 0x05030303U +#define AST1030_A0_SILICON_REV 0x80000000U +#define AST1030_A1_SILICON_REV 0x80010000U #define ASPEED_IS_AST2500(si_rev) ((((si_rev) >> 24) & 0xff) == 0x04) @@ -54,8 +59,10 @@ struct AspeedSCUClass { const uint32_t *resets; uint32_t (*calc_hpll)(AspeedSCUState *s, uint32_t hpll_reg); + uint32_t (*get_apb)(AspeedSCUState *s); uint32_t apb_divider; uint32_t nr_regs; + bool clkin_25Mhz; const MemoryRegionOps *ops; }; @@ -314,4 +321,44 @@ uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s); SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ SCU_AST2500_HW_STRAP_RESERVED1) +/* + * SCU200 H-PLL Parameter Register (for Aspeed AST2600 SOC) + * + * 28:26 H-PLL Parameters + * 25 Enable H-PLL reset + * 24 Enable H-PLL bypass mode + * 23 Turn off H-PLL + * 22:19 H-PLL Post Divider (P) + * 18:13 H-PLL Numerator (M) + * 12:0 H-PLL Denumerator (N) + * + * (Output frequency) = CLKIN(25MHz) * [(M+1) / (N+1)] / (P+1) + * + * The default frequency is 1200Mhz when CLKIN = 25MHz + */ +#define SCU_AST2600_H_PLL_BYPASS_EN (0x1 << 24) +#define SCU_AST2600_H_PLL_OFF (0x1 << 23) + +/* + * SCU310 Clock Selection Register Set 4 (for Aspeed AST1030 SOC) + * + * 31 I3C Clock Source selection + * 30:28 I3C clock divider selection + * 26:24 MAC AHB clock divider selection + * 22:20 RGMII 125MHz clock divider ration + * 19:16 RGMII 50MHz clock divider ration + * 15 LHCLK clock generation/output enable control + * 14:12 LHCLK divider selection + * 11:8 APB Bus PCLK divider selection + * 7 Select PECI clock source + * 6 Select UART debug port clock source + * 5 Select UART6 clock source + * 4 Select UART5 clock source + * 3 Select UART4 clock source + * 2 Select UART3 clock source + * 1 Select UART2 clock source + * 0 Select UART1 clock source + */ +#define SCU_AST1030_CLK_GET_PCLK_DIV(x) (((x) >> 8) & 0xf) + #endif /* ASPEED_SCU_H */ diff --git a/include/hw/misc/bcm2835_cprman.h b/include/hw/misc/bcm2835_cprman.h index 3df4ceedd2..0d38036728 100644 --- a/include/hw/misc/bcm2835_cprman.h +++ b/include/hw/misc/bcm2835_cprman.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef HW_MISC_CPRMAN_H -#define HW_MISC_CPRMAN_H +#ifndef HW_MISC_BCM2835_CPRMAN_H +#define HW_MISC_BCM2835_CPRMAN_H #include "hw/sysbus.h" #include "hw/qdev-clock.h" diff --git a/include/hw/misc/bcm2835_cprman_internals.h b/include/hw/misc/bcm2835_cprman_internals.h index 339759b307..7617aff96f 100644 --- a/include/hw/misc/bcm2835_cprman_internals.h +++ b/include/hw/misc/bcm2835_cprman_internals.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef HW_MISC_CPRMAN_INTERNALS_H -#define HW_MISC_CPRMAN_INTERNALS_H +#ifndef HW_MISC_BCM2835_CPRMAN_INTERNALS_H +#define HW_MISC_BCM2835_CPRMAN_INTERNALS_H #include "hw/registerfields.h" #include "hw/misc/bcm2835_cprman.h" diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h new file mode 100644 index 0000000000..ecc7065ce8 --- /dev/null +++ b/include/hw/misc/lasi.h @@ -0,0 +1,78 @@ +/* + * HP-PARISC Lasi chipset emulation. + * + * (C) 2019 by Helge Deller + * + * This work is licensed under the GNU GPL license version 2 or later. + * + * Documentation available at: + * https://parisc.wiki.kernel.org/images-parisc/7/79/Lasi_ers.pdf + */ + +#ifndef LASI_H +#define LASI_H + +#include "exec/address-spaces.h" +#include "hw/pci/pci_host.h" +#include "hw/boards.h" + +#define TYPE_LASI_CHIP "lasi-chip" +OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP) + +#define LASI_IRR 0x00 /* RO */ +#define LASI_IMR 0x04 +#define LASI_IPR 0x08 +#define LASI_ICR 0x0c +#define LASI_IAR 0x10 + +#define LASI_LPT 0x02000 +#define LASI_UART 0x05000 +#define LASI_LAN 0x07000 +#define LASI_RTC 0x09000 + +#define LASI_PCR 0x0C000 /* LASI Power Control register */ +#define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */ +#define LASI_VER 0x0C008 /* LASI Version Control register */ +#define LASI_IORESET 0x0C00C /* LASI I/O Reset register */ +#define LASI_AMR 0x0C010 /* LASI Arbitration Mask register */ +#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ +#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ + +#define LASI_BIT(x) (1ul << (x)) +#define LASI_IRQ_BITS (LASI_BIT(5) | LASI_BIT(7) | LASI_BIT(8) | LASI_BIT(9) \ + | LASI_BIT(13) | LASI_BIT(14) | LASI_BIT(16) | LASI_BIT(17) \ + | LASI_BIT(18) | LASI_BIT(19) | LASI_BIT(20) | LASI_BIT(21) \ + | LASI_BIT(26)) + +#define ICR_BUS_ERROR_BIT LASI_BIT(8) /* bit 8 in ICR */ +#define ICR_TOC_BIT LASI_BIT(1) /* bit 1 in ICR */ + +#define LASI_IRQS 27 + +#define LASI_IRQ_HPA 14 +#define LASI_IRQ_UART_HPA 5 +#define LASI_IRQ_LPT_HPA 7 +#define LASI_IRQ_LAN_HPA 8 +#define LASI_IRQ_SCSI_HPA 9 +#define LASI_IRQ_AUDIO_HPA 13 +#define LASI_IRQ_PS2KBD_HPA 26 +#define LASI_IRQ_PS2MOU_HPA 26 + +struct LasiState { + PCIHostState parent_obj; + + uint32_t irr; + uint32_t imr; + uint32_t ipr; + uint32_t icr; + uint32_t iar; + + uint32_t errlog; + uint32_t amr; + uint32_t rtc; + time_t rtc_ref; + + MemoryRegion this_mem; +}; + +#endif diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h index 3058b30685..5fe7a7f592 100644 --- a/include/hw/misc/mac_via.h +++ b/include/hw/misc/mac_via.h @@ -15,20 +15,20 @@ #include "qom/object.h" +#define VIA_SIZE 0x2000 + /* VIA 1 */ -#define VIA1_IRQ_ONE_SECOND_BIT 0 -#define VIA1_IRQ_60HZ_BIT 1 -#define VIA1_IRQ_ADB_READY_BIT 2 -#define VIA1_IRQ_ADB_DATA_BIT 3 -#define VIA1_IRQ_ADB_CLOCK_BIT 4 +#define VIA1_IRQ_ONE_SECOND_BIT CA2_INT_BIT +#define VIA1_IRQ_60HZ_BIT CA1_INT_BIT +#define VIA1_IRQ_ADB_READY_BIT SR_INT_BIT +#define VIA1_IRQ_ADB_DATA_BIT CB2_INT_BIT +#define VIA1_IRQ_ADB_CLOCK_BIT CB1_INT_BIT -#define VIA1_IRQ_NB 8 - -#define VIA1_IRQ_ONE_SECOND (1 << VIA1_IRQ_ONE_SECOND_BIT) -#define VIA1_IRQ_60HZ (1 << VIA1_IRQ_60HZ_BIT) -#define VIA1_IRQ_ADB_READY (1 << VIA1_IRQ_ADB_READY_BIT) -#define VIA1_IRQ_ADB_DATA (1 << VIA1_IRQ_ADB_DATA_BIT) -#define VIA1_IRQ_ADB_CLOCK (1 << VIA1_IRQ_ADB_CLOCK_BIT) +#define VIA1_IRQ_ONE_SECOND BIT(VIA1_IRQ_ONE_SECOND_BIT) +#define VIA1_IRQ_60HZ BIT(VIA1_IRQ_60HZ_BIT) +#define VIA1_IRQ_ADB_READY BIT(VIA1_IRQ_ADB_READY_BIT) +#define VIA1_IRQ_ADB_DATA BIT(VIA1_IRQ_ADB_DATA_BIT) +#define VIA1_IRQ_ADB_CLOCK BIT(VIA1_IRQ_ADB_CLOCK_BIT) #define TYPE_MOS6522_Q800_VIA1 "mos6522-q800-via1" @@ -38,60 +38,16 @@ struct MOS6522Q800VIA1State { /*< private >*/ MOS6522State parent_obj; - qemu_irq irqs[VIA1_IRQ_NB]; + MemoryRegion via_mem; + + qemu_irq auxmode_irq; uint8_t last_b; - uint8_t PRAM[256]; - - /* external timers */ - QEMUTimer *one_second_timer; - int64_t next_second; - QEMUTimer *sixty_hz_timer; - int64_t next_sixty_hz; -}; - - -/* VIA 2 */ -#define VIA2_IRQ_SCSI_DATA_BIT 0 -#define VIA2_IRQ_SLOT_BIT 1 -#define VIA2_IRQ_UNUSED_BIT 2 -#define VIA2_IRQ_SCSI_BIT 3 -#define VIA2_IRQ_ASC_BIT 4 - -#define VIA2_IRQ_NB 8 - -#define VIA2_IRQ_SCSI_DATA (1 << VIA2_IRQ_SCSI_DATA_BIT) -#define VIA2_IRQ_SLOT (1 << VIA2_IRQ_SLOT_BIT) -#define VIA2_IRQ_UNUSED (1 << VIA2_IRQ_SCSI_BIT) -#define VIA2_IRQ_SCSI (1 << VIA2_IRQ_UNUSED_BIT) -#define VIA2_IRQ_ASC (1 << VIA2_IRQ_ASC_BIT) - -#define TYPE_MOS6522_Q800_VIA2 "mos6522-q800-via2" -OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA2State, MOS6522_Q800_VIA2) - -struct MOS6522Q800VIA2State { - /*< private >*/ - MOS6522State parent_obj; -}; - - -#define TYPE_MAC_VIA "mac_via" -OBJECT_DECLARE_SIMPLE_TYPE(MacVIAState, MAC_VIA) - -struct MacVIAState { - SysBusDevice busdev; - - VMChangeStateEntry *vmstate; - - /* MMIO */ - MemoryRegion mmio; - MemoryRegion via1mem; - MemoryRegion via2mem; - - /* VIAs */ - MOS6522Q800VIA1State mos6522_via1; - MOS6522Q800VIA2State mos6522_via2; /* RTC */ + uint8_t PRAM[256]; + BlockBackend *blk; + VMChangeStateEntry *vmstate; + uint32_t tick_offset; uint8_t data_out; @@ -101,7 +57,6 @@ struct MacVIAState { uint8_t cmd; int wprotect; int alt; - BlockBackend *blk; /* ADB */ ADBBusState adb_bus; @@ -112,6 +67,45 @@ struct MacVIAState { uint8_t adb_data_in[128]; uint8_t adb_data_out[16]; uint8_t adb_autopoll_cmd; + + /* external timers */ + QEMUTimer *one_second_timer; + int64_t next_second; + QEMUTimer *sixty_hz_timer; + int64_t next_sixty_hz; +}; + + +/* VIA 2 */ +#define VIA2_IRQ_SCSI_DATA_BIT CA2_INT_BIT +#define VIA2_IRQ_NUBUS_BIT CA1_INT_BIT +#define VIA2_IRQ_SCSI_BIT CB2_INT_BIT +#define VIA2_IRQ_ASC_BIT CB1_INT_BIT + +#define VIA2_IRQ_SCSI_DATA BIT(VIA2_IRQ_SCSI_DATA_BIT) +#define VIA2_IRQ_NUBUS BIT(VIA2_IRQ_NUBUS_BIT) +#define VIA2_IRQ_UNUSED BIT(VIA2_IRQ_SCSI_BIT) +#define VIA2_IRQ_SCSI BIT(VIA2_IRQ_UNUSED_BIT) +#define VIA2_IRQ_ASC BIT(VIA2_IRQ_ASC_BIT) + +#define VIA2_NUBUS_IRQ_NB 7 + +#define VIA2_NUBUS_IRQ_9 0 +#define VIA2_NUBUS_IRQ_A 1 +#define VIA2_NUBUS_IRQ_B 2 +#define VIA2_NUBUS_IRQ_C 3 +#define VIA2_NUBUS_IRQ_D 4 +#define VIA2_NUBUS_IRQ_E 5 +#define VIA2_NUBUS_IRQ_INTVIDEO 6 + +#define TYPE_MOS6522_Q800_VIA2 "mos6522-q800-via2" +OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA2State, MOS6522_Q800_VIA2) + +struct MOS6522Q800VIA2State { + /*< private >*/ + MOS6522State parent_obj; + + MemoryRegion via_mem; }; #endif diff --git a/include/hw/misc/macio/macio.h b/include/hw/misc/macio/macio.h index 6c05f3bfd2..95d30a1745 100644 --- a/include/hw/misc/macio/macio.h +++ b/include/hw/misc/macio/macio.h @@ -33,11 +33,32 @@ #include "hw/misc/macio/cuda.h" #include "hw/misc/macio/gpio.h" #include "hw/misc/macio/pmu.h" -#include "hw/ppc/mac.h" +#include "hw/nvram/mac_nvram.h" #include "hw/ppc/mac_dbdma.h" #include "hw/ppc/openpic.h" #include "qom/object.h" +/* Old World IRQs */ +#define OLDWORLD_CUDA_IRQ 0x12 +#define OLDWORLD_ESCCB_IRQ 0x10 +#define OLDWORLD_ESCCA_IRQ 0xf +#define OLDWORLD_IDE0_IRQ 0xd +#define OLDWORLD_IDE0_DMA_IRQ 0x2 +#define OLDWORLD_IDE1_IRQ 0xe +#define OLDWORLD_IDE1_DMA_IRQ 0x3 + +/* New World IRQs */ +#define NEWWORLD_CUDA_IRQ 0x19 +#define NEWWORLD_PMU_IRQ 0x19 +#define NEWWORLD_ESCCB_IRQ 0x24 +#define NEWWORLD_ESCCA_IRQ 0x25 +#define NEWWORLD_IDE0_IRQ 0xd +#define NEWWORLD_IDE0_DMA_IRQ 0x2 +#define NEWWORLD_IDE1_IRQ 0xe +#define NEWWORLD_IDE1_DMA_IRQ 0x3 +#define NEWWORLD_EXTING_GPIO1 0x2f +#define NEWWORLD_EXTING_GPIO9 0x37 + /* MacIO virtual bus */ #define TYPE_MACIO_BUS "macio-bus" OBJECT_DECLARE_SIMPLE_TYPE(MacIOBusState, MACIO_BUS) diff --git a/include/hw/misc/macio/pmu.h b/include/hw/misc/macio/pmu.h index 78237d99a2..00fcdd23f5 100644 --- a/include/hw/misc/macio/pmu.h +++ b/include/hw/misc/macio/pmu.h @@ -193,8 +193,6 @@ struct PMUState { MemoryRegion mem; uint64_t frequency; - qemu_irq via_irq; - bool via_irq_state; /* PMU state */ MOS6522PMUState mos6522_pmu; diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h index fc95d22b0f..0bc22a8395 100644 --- a/include/hw/misc/mos6522.h +++ b/include/hw/misc/mos6522.h @@ -32,6 +32,8 @@ #include "hw/input/adb.h" #include "qom/object.h" +#define MOS6522_NUM_REGS 16 + /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ #define SR_EXT 0x0c /* Shift on external clock */ @@ -41,18 +43,43 @@ #define IER_SET 0x80 /* set bits in IER */ #define IER_CLR 0 /* clear bits in IER */ -#define CA2_INT 0x01 -#define CA1_INT 0x02 -#define SR_INT 0x04 /* Shift register full/empty */ -#define CB2_INT 0x08 -#define CB1_INT 0x10 -#define T2_INT 0x20 /* Timer 2 interrupt */ -#define T1_INT 0x40 /* Timer 1 interrupt */ +#define CA2_INT_BIT 0 +#define CA1_INT_BIT 1 +#define SR_INT_BIT 2 /* Shift register full/empty */ +#define CB2_INT_BIT 3 +#define CB1_INT_BIT 4 +#define T2_INT_BIT 5 /* Timer 2 interrupt */ +#define T1_INT_BIT 6 /* Timer 1 interrupt */ + +#define CA2_INT BIT(CA2_INT_BIT) +#define CA1_INT BIT(CA1_INT_BIT) +#define SR_INT BIT(SR_INT_BIT) +#define CB2_INT BIT(CB2_INT_BIT) +#define CB1_INT BIT(CB1_INT_BIT) +#define T2_INT BIT(T2_INT_BIT) +#define T1_INT BIT(T1_INT_BIT) + +#define VIA_NUM_INTS 5 /* Bits in ACR */ #define T1MODE 0xc0 /* Timer 1 mode */ #define T1MODE_CONT 0x40 /* continuous interrupts */ +/* Bits in PCR */ +#define CB2_CTRL_MASK 0xe0 +#define CB2_CTRL_SHIFT 5 +#define CB1_CTRL_MASK 0x10 +#define CB1_CTRL_SHIFT 4 +#define CA2_CTRL_MASK 0x0e +#define CA2_CTRL_SHIFT 1 +#define CA1_CTRL_MASK 0x1 +#define CA1_CTRL_SHIFT 0 + +#define C2_POS 0x2 +#define C2_IND 0x1 + +#define C1_POS 0x1 + /* VIA registers */ #define VIA_REG_B 0x00 #define VIA_REG_A 0x01 @@ -121,6 +148,7 @@ struct MOS6522State { uint64_t frequency; qemu_irq irq; + uint8_t last_irq_levels; }; #define TYPE_MOS6522 "mos6522" @@ -130,10 +158,8 @@ struct MOS6522DeviceClass { DeviceClass parent_class; DeviceReset parent_reset; - void (*set_sr_int)(MOS6522State *dev); void (*portB_write)(MOS6522State *dev); void (*portA_write)(MOS6522State *dev); - void (*update_irq)(MOS6522State *dev); /* These are used to influence the CUDA MacOS timebase calibration */ uint64_t (*get_timer1_counter_value)(MOS6522State *dev, MOS6522Timer *ti); uint64_t (*get_timer2_counter_value)(MOS6522State *dev, MOS6522Timer *ti); @@ -147,4 +173,6 @@ extern const VMStateDescription vmstate_mos6522; uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size); void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size); +void hmp_info_via(Monitor *mon, const QDict *qdict); + #endif /* MOS6522_H */ diff --git a/include/hw/misc/npcm7xx_gcr.h b/include/hw/misc/npcm7xx_gcr.h index 13109d9d32..9419e0a7d2 100644 --- a/include/hw/misc/npcm7xx_gcr.h +++ b/include/hw/misc/npcm7xx_gcr.h @@ -19,6 +19,36 @@ #include "exec/memory.h" #include "hw/sysbus.h" +/* + * NPCM7XX PWRON STRAP bit fields + * 12: SPI0 powered by VSBV3 at 1.8V + * 11: System flash attached to BMC + * 10: BSP alternative pins. + * 9:8: Flash UART command route enabled. + * 7: Security enabled. + * 6: HI-Z state control. + * 5: ECC disabled. + * 4: Reserved + * 3: JTAG2 enabled. + * 2:0: CPU and DRAM clock frequency. + */ +#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12) +#define NPCM7XX_PWRON_STRAP_SFAB BIT(11) +#define NPCM7XX_PWRON_STRAP_BSPA BIT(10) +#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8) +#define FUP_NORM_UART2 3 +#define FUP_PROG_UART3 2 +#define FUP_PROG_UART2 1 +#define FUP_NORM_UART3 0 +#define NPCM7XX_PWRON_STRAP_SECEN BIT(7) +#define NPCM7XX_PWRON_STRAP_HIZ BIT(6) +#define NPCM7XX_PWRON_STRAP_ECC BIT(5) +#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4) +#define NPCM7XX_PWRON_STRAP_J2EN BIT(3) +#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x) +#define CKFRQ_SKIPINIT 0x000 +#define CKFRQ_DEFAULT 0x111 + /* * Number of registers in our device state structure. Don't change this without * incrementing the version_id in the vmstate. diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h index ca3c5bb533..e520566ab0 100644 --- a/include/hw/misc/pvpanic.h +++ b/include/hw/misc/pvpanic.h @@ -22,14 +22,6 @@ #define PVPANIC_IOPORT_PROP "ioport" -/* The bit of supported pv event, TODO: include uapi header and remove this */ -#define PVPANIC_F_PANICKED 0 -#define PVPANIC_F_CRASHLOADED 1 - -/* The pv event value */ -#define PVPANIC_PANICKED (1 << PVPANIC_F_PANICKED) -#define PVPANIC_CRASHLOADED (1 << PVPANIC_F_CRASHLOADED) - /* * PVPanicState for any device type */ @@ -41,13 +33,4 @@ struct PVPanicState { void pvpanic_setup_io(PVPanicState *s, DeviceState *dev, unsigned size); -static inline uint16_t pvpanic_port(void) -{ - Object *o = object_resolve_path_type("", TYPE_PVPANIC_ISA_DEVICE, NULL); - if (!o) { - return 0; - } - return object_property_get_uint(o, PVPANIC_IOPORT_PROP, NULL); -} - #endif diff --git a/include/hw/misc/stm32f4xx_exti.h b/include/hw/misc/stm32f4xx_exti.h index ea6b0097b0..fc11c595fa 100644 --- a/include/hw/misc/stm32f4xx_exti.h +++ b/include/hw/misc/stm32f4xx_exti.h @@ -22,8 +22,8 @@ * THE SOFTWARE. */ -#ifndef HW_STM_EXTI_H -#define HW_STM_EXTI_H +#ifndef HW_STM32F4XX_EXTI_H +#define HW_STM32F4XX_EXTI_H #include "hw/sysbus.h" #include "qom/object.h" diff --git a/include/hw/misc/stm32f4xx_syscfg.h b/include/hw/misc/stm32f4xx_syscfg.h index 6f8ca49228..9fce67f4b4 100644 --- a/include/hw/misc/stm32f4xx_syscfg.h +++ b/include/hw/misc/stm32f4xx_syscfg.h @@ -22,8 +22,8 @@ * THE SOFTWARE. */ -#ifndef HW_STM_SYSCFG_H -#define HW_STM_SYSCFG_H +#ifndef HW_STM32F4XX_SYSCFG_H +#define HW_STM32F4XX_SYSCFG_H #include "hw/sysbus.h" #include "qom/object.h" diff --git a/include/hw/misc/virt_ctrl.h b/include/hw/misc/virt_ctrl.h index edfadc4695..25a237e518 100644 --- a/include/hw/misc/virt_ctrl.h +++ b/include/hw/misc/virt_ctrl.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h new file mode 100644 index 0000000000..2857f4169a --- /dev/null +++ b/include/hw/misc/xlnx-versal-crl.h @@ -0,0 +1,235 @@ +/* + * QEMU model of the Clock-Reset-LPD (CRL). + * + * Copyright (c) 2022 Xilinx Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Written by Edgar E. Iglesias + */ +#ifndef HW_MISC_XLNX_VERSAL_CRL_H +#define HW_MISC_XLNX_VERSAL_CRL_H + +#include "hw/sysbus.h" +#include "hw/register.h" +#include "target/arm/cpu.h" + +#define TYPE_XLNX_VERSAL_CRL "xlnx,versal-crl" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL) + +REG32(ERR_CTRL, 0x0) + FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1) +REG32(IR_STATUS, 0x4) + FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1) +REG32(IR_MASK, 0x8) + FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1) +REG32(IR_ENABLE, 0xc) + FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1) +REG32(IR_DISABLE, 0x10) + FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1) +REG32(WPROT, 0x1c) + FIELD(WPROT, ACTIVE, 0, 1) +REG32(PLL_CLK_OTHER_DMN, 0x20) + FIELD(PLL_CLK_OTHER_DMN, APLL_BYPASS, 0, 1) +REG32(RPLL_CTRL, 0x40) + FIELD(RPLL_CTRL, POST_SRC, 24, 3) + FIELD(RPLL_CTRL, PRE_SRC, 20, 3) + FIELD(RPLL_CTRL, CLKOUTDIV, 16, 2) + FIELD(RPLL_CTRL, FBDIV, 8, 8) + FIELD(RPLL_CTRL, BYPASS, 3, 1) + FIELD(RPLL_CTRL, RESET, 0, 1) +REG32(RPLL_CFG, 0x44) + FIELD(RPLL_CFG, LOCK_DLY, 25, 7) + FIELD(RPLL_CFG, LOCK_CNT, 13, 10) + FIELD(RPLL_CFG, LFHF, 10, 2) + FIELD(RPLL_CFG, CP, 5, 4) + FIELD(RPLL_CFG, RES, 0, 4) +REG32(RPLL_FRAC_CFG, 0x48) + FIELD(RPLL_FRAC_CFG, ENABLED, 31, 1) + FIELD(RPLL_FRAC_CFG, SEED, 22, 3) + FIELD(RPLL_FRAC_CFG, ALGRTHM, 19, 1) + FIELD(RPLL_FRAC_CFG, ORDER, 18, 1) + FIELD(RPLL_FRAC_CFG, DATA, 0, 16) +REG32(PLL_STATUS, 0x50) + FIELD(PLL_STATUS, RPLL_STABLE, 2, 1) + FIELD(PLL_STATUS, RPLL_LOCK, 0, 1) +REG32(RPLL_TO_XPD_CTRL, 0x100) + FIELD(RPLL_TO_XPD_CTRL, CLKACT, 25, 1) + FIELD(RPLL_TO_XPD_CTRL, DIVISOR0, 8, 10) +REG32(LPD_TOP_SWITCH_CTRL, 0x104) + FIELD(LPD_TOP_SWITCH_CTRL, CLKACT_ADMA, 26, 1) + FIELD(LPD_TOP_SWITCH_CTRL, CLKACT, 25, 1) + FIELD(LPD_TOP_SWITCH_CTRL, DIVISOR0, 8, 10) + FIELD(LPD_TOP_SWITCH_CTRL, SRCSEL, 0, 3) +REG32(LPD_LSBUS_CTRL, 0x108) + FIELD(LPD_LSBUS_CTRL, CLKACT, 25, 1) + FIELD(LPD_LSBUS_CTRL, DIVISOR0, 8, 10) + FIELD(LPD_LSBUS_CTRL, SRCSEL, 0, 3) +REG32(CPU_R5_CTRL, 0x10c) + FIELD(CPU_R5_CTRL, CLKACT_OCM2, 28, 1) + FIELD(CPU_R5_CTRL, CLKACT_OCM, 27, 1) + FIELD(CPU_R5_CTRL, CLKACT_CORE, 26, 1) + FIELD(CPU_R5_CTRL, CLKACT, 25, 1) + FIELD(CPU_R5_CTRL, DIVISOR0, 8, 10) + FIELD(CPU_R5_CTRL, SRCSEL, 0, 3) +REG32(IOU_SWITCH_CTRL, 0x114) + FIELD(IOU_SWITCH_CTRL, CLKACT, 25, 1) + FIELD(IOU_SWITCH_CTRL, DIVISOR0, 8, 10) + FIELD(IOU_SWITCH_CTRL, SRCSEL, 0, 3) +REG32(GEM0_REF_CTRL, 0x118) + FIELD(GEM0_REF_CTRL, CLKACT_RX, 27, 1) + FIELD(GEM0_REF_CTRL, CLKACT_TX, 26, 1) + FIELD(GEM0_REF_CTRL, CLKACT, 25, 1) + FIELD(GEM0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(GEM0_REF_CTRL, SRCSEL, 0, 3) +REG32(GEM1_REF_CTRL, 0x11c) + FIELD(GEM1_REF_CTRL, CLKACT_RX, 27, 1) + FIELD(GEM1_REF_CTRL, CLKACT_TX, 26, 1) + FIELD(GEM1_REF_CTRL, CLKACT, 25, 1) + FIELD(GEM1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(GEM1_REF_CTRL, SRCSEL, 0, 3) +REG32(GEM_TSU_REF_CTRL, 0x120) + FIELD(GEM_TSU_REF_CTRL, CLKACT, 25, 1) + FIELD(GEM_TSU_REF_CTRL, DIVISOR0, 8, 10) + FIELD(GEM_TSU_REF_CTRL, SRCSEL, 0, 3) +REG32(USB0_BUS_REF_CTRL, 0x124) + FIELD(USB0_BUS_REF_CTRL, CLKACT, 25, 1) + FIELD(USB0_BUS_REF_CTRL, DIVISOR0, 8, 10) + FIELD(USB0_BUS_REF_CTRL, SRCSEL, 0, 3) +REG32(UART0_REF_CTRL, 0x128) + FIELD(UART0_REF_CTRL, CLKACT, 25, 1) + FIELD(UART0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(UART0_REF_CTRL, SRCSEL, 0, 3) +REG32(UART1_REF_CTRL, 0x12c) + FIELD(UART1_REF_CTRL, CLKACT, 25, 1) + FIELD(UART1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(UART1_REF_CTRL, SRCSEL, 0, 3) +REG32(SPI0_REF_CTRL, 0x130) + FIELD(SPI0_REF_CTRL, CLKACT, 25, 1) + FIELD(SPI0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(SPI0_REF_CTRL, SRCSEL, 0, 3) +REG32(SPI1_REF_CTRL, 0x134) + FIELD(SPI1_REF_CTRL, CLKACT, 25, 1) + FIELD(SPI1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(SPI1_REF_CTRL, SRCSEL, 0, 3) +REG32(CAN0_REF_CTRL, 0x138) + FIELD(CAN0_REF_CTRL, CLKACT, 25, 1) + FIELD(CAN0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(CAN0_REF_CTRL, SRCSEL, 0, 3) +REG32(CAN1_REF_CTRL, 0x13c) + FIELD(CAN1_REF_CTRL, CLKACT, 25, 1) + FIELD(CAN1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(CAN1_REF_CTRL, SRCSEL, 0, 3) +REG32(I2C0_REF_CTRL, 0x140) + FIELD(I2C0_REF_CTRL, CLKACT, 25, 1) + FIELD(I2C0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(I2C0_REF_CTRL, SRCSEL, 0, 3) +REG32(I2C1_REF_CTRL, 0x144) + FIELD(I2C1_REF_CTRL, CLKACT, 25, 1) + FIELD(I2C1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(I2C1_REF_CTRL, SRCSEL, 0, 3) +REG32(DBG_LPD_CTRL, 0x148) + FIELD(DBG_LPD_CTRL, CLKACT, 25, 1) + FIELD(DBG_LPD_CTRL, DIVISOR0, 8, 10) + FIELD(DBG_LPD_CTRL, SRCSEL, 0, 3) +REG32(TIMESTAMP_REF_CTRL, 0x14c) + FIELD(TIMESTAMP_REF_CTRL, CLKACT, 25, 1) + FIELD(TIMESTAMP_REF_CTRL, DIVISOR0, 8, 10) + FIELD(TIMESTAMP_REF_CTRL, SRCSEL, 0, 3) +REG32(CRL_SAFETY_CHK, 0x150) +REG32(PSM_REF_CTRL, 0x154) + FIELD(PSM_REF_CTRL, DIVISOR0, 8, 10) + FIELD(PSM_REF_CTRL, SRCSEL, 0, 3) +REG32(DBG_TSTMP_CTRL, 0x158) + FIELD(DBG_TSTMP_CTRL, CLKACT, 25, 1) + FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 10) + FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3) +REG32(CPM_TOPSW_REF_CTRL, 0x15c) + FIELD(CPM_TOPSW_REF_CTRL, CLKACT, 25, 1) + FIELD(CPM_TOPSW_REF_CTRL, DIVISOR0, 8, 10) + FIELD(CPM_TOPSW_REF_CTRL, SRCSEL, 0, 3) +REG32(USB3_DUAL_REF_CTRL, 0x160) + FIELD(USB3_DUAL_REF_CTRL, CLKACT, 25, 1) + FIELD(USB3_DUAL_REF_CTRL, DIVISOR0, 8, 10) + FIELD(USB3_DUAL_REF_CTRL, SRCSEL, 0, 3) +REG32(RST_CPU_R5, 0x300) + FIELD(RST_CPU_R5, RESET_PGE, 4, 1) + FIELD(RST_CPU_R5, RESET_AMBA, 2, 1) + FIELD(RST_CPU_R5, RESET_CPU1, 1, 1) + FIELD(RST_CPU_R5, RESET_CPU0, 0, 1) +REG32(RST_ADMA, 0x304) + FIELD(RST_ADMA, RESET, 0, 1) +REG32(RST_GEM0, 0x308) + FIELD(RST_GEM0, RESET, 0, 1) +REG32(RST_GEM1, 0x30c) + FIELD(RST_GEM1, RESET, 0, 1) +REG32(RST_SPARE, 0x310) + FIELD(RST_SPARE, RESET, 0, 1) +REG32(RST_USB0, 0x314) + FIELD(RST_USB0, RESET, 0, 1) +REG32(RST_UART0, 0x318) + FIELD(RST_UART0, RESET, 0, 1) +REG32(RST_UART1, 0x31c) + FIELD(RST_UART1, RESET, 0, 1) +REG32(RST_SPI0, 0x320) + FIELD(RST_SPI0, RESET, 0, 1) +REG32(RST_SPI1, 0x324) + FIELD(RST_SPI1, RESET, 0, 1) +REG32(RST_CAN0, 0x328) + FIELD(RST_CAN0, RESET, 0, 1) +REG32(RST_CAN1, 0x32c) + FIELD(RST_CAN1, RESET, 0, 1) +REG32(RST_I2C0, 0x330) + FIELD(RST_I2C0, RESET, 0, 1) +REG32(RST_I2C1, 0x334) + FIELD(RST_I2C1, RESET, 0, 1) +REG32(RST_DBG_LPD, 0x338) + FIELD(RST_DBG_LPD, RPU_DBG1_RESET, 5, 1) + FIELD(RST_DBG_LPD, RPU_DBG0_RESET, 4, 1) + FIELD(RST_DBG_LPD, RESET_HSDP, 1, 1) + FIELD(RST_DBG_LPD, RESET, 0, 1) +REG32(RST_GPIO, 0x33c) + FIELD(RST_GPIO, RESET, 0, 1) +REG32(RST_TTC, 0x344) + FIELD(RST_TTC, TTC3_RESET, 3, 1) + FIELD(RST_TTC, TTC2_RESET, 2, 1) + FIELD(RST_TTC, TTC1_RESET, 1, 1) + FIELD(RST_TTC, TTC0_RESET, 0, 1) +REG32(RST_TIMESTAMP, 0x348) + FIELD(RST_TIMESTAMP, RESET, 0, 1) +REG32(RST_SWDT, 0x34c) + FIELD(RST_SWDT, RESET, 0, 1) +REG32(RST_OCM, 0x350) + FIELD(RST_OCM, RESET, 0, 1) +REG32(RST_IPI, 0x354) + FIELD(RST_IPI, RESET, 0, 1) +REG32(RST_SYSMON, 0x358) + FIELD(RST_SYSMON, SEQ_RST, 1, 1) + FIELD(RST_SYSMON, CFG_RST, 0, 1) +REG32(RST_FPD, 0x360) + FIELD(RST_FPD, SRST, 1, 1) + FIELD(RST_FPD, POR, 0, 1) +REG32(PSM_RST_MODE, 0x370) + FIELD(PSM_RST_MODE, WAKEUP, 2, 1) + FIELD(PSM_RST_MODE, RST_MODE, 0, 2) + +#define CRL_R_MAX (R_PSM_RST_MODE + 1) + +#define RPU_MAX_CPU 2 + +struct XlnxVersalCRL { + SysBusDevice parent_obj; + qemu_irq irq; + + struct { + ARMCPU *cpu_r5[RPU_MAX_CPU]; + DeviceState *adma[8]; + DeviceState *uart[2]; + DeviceState *gem[2]; + DeviceState *usb; + } cfg; + + RegisterInfoArray *reg_array; + uint32_t regs[CRL_R_MAX]; + RegisterInfo regs_info[CRL_R_MAX]; +}; +#endif diff --git a/include/hw/misc/xlnx-versal-pmc-iou-slcr.h b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h new file mode 100644 index 0000000000..2170420f01 --- /dev/null +++ b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h @@ -0,0 +1,78 @@ +/* + * Header file for the Xilinx Versal's PMC IOU SLCR + * + * Copyright (C) 2021 Xilinx Inc + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This is a model of Xilinx Versal's PMC I/O Peripheral Control and Status + * module documented in Versal's Technical Reference manual [1] and the Versal + * ACAP Register reference [2]. + * + * References: + * + * [1] Versal ACAP Technical Reference Manual, + * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf + * + * [2] Versal ACAP Register Reference, + * https://www.xilinx.com/html_docs/registers/am012/am012-versal-register-reference.html#mod___pmc_iop_slcr.html + * + * QEMU interface: + * + sysbus MMIO region 0: MemoryRegion for the device's registers + * + sysbus IRQ 0: PMC (AXI and APB) parity error interrupt detected by the PMC + * I/O peripherals. + * + sysbus IRQ 1: Device interrupt. + * + Named GPIO output "sd-emmc-sel[0]": Enables 0: SD mode or 1: eMMC mode on + * SD/eMMC controller 0. + * + Named GPIO output "sd-emmc-sel[1]": Enables 0: SD mode or 1: eMMC mode on + * SD/eMMC controller 1. + * + Named GPIO output "qspi-ospi-mux-sel": Selects 0: QSPI linear region or 1: + * OSPI linear region. + * + Named GPIO output "ospi-mux-sel": Selects 0: OSPI Indirect access mode or + * 1: OSPI direct access mode. + */ + +#ifndef XLNX_VERSAL_PMC_IOU_SLCR_H +#define XLNX_VERSAL_PMC_IOU_SLCR_H + +#include "hw/register.h" + +#define TYPE_XILINX_VERSAL_PMC_IOU_SLCR "xlnx.versal-pmc-iou-slcr" + +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR) + +#define XILINX_VERSAL_PMC_IOU_SLCR_R_MAX (0x828 / 4 + 1) + +struct XlnxVersalPmcIouSlcr { + SysBusDevice parent_obj; + MemoryRegion iomem; + qemu_irq irq_parity_imr; + qemu_irq irq_imr; + qemu_irq sd_emmc_sel[2]; + qemu_irq qspi_ospi_mux_sel; + qemu_irq ospi_mux_sel; + + uint32_t regs[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX]; + RegisterInfo regs_info[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX]; +}; + +#endif /* XLNX_VERSAL_PMC_IOU_SLCR_H */ diff --git a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h new file mode 100644 index 0000000000..b8ca9434af --- /dev/null +++ b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h @@ -0,0 +1,93 @@ +/* + * QEMU model of ZynqMP APU Control. + * + * Copyright (c) 2013-2022 Xilinx Inc + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Written by Peter Crosthwaite and + * Edgar E. Iglesias + * + */ +#ifndef HW_MISC_XLNX_ZYNQMP_APU_CTRL_H +#define HW_MISC_XLNX_ZYNQMP_APU_CTRL_H + +#include "hw/sysbus.h" +#include "hw/register.h" +#include "target/arm/cpu.h" + +#define TYPE_XLNX_ZYNQMP_APU_CTRL "xlnx.apu-ctrl" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPAPUCtrl, XLNX_ZYNQMP_APU_CTRL) + +REG32(APU_ERR_CTRL, 0x0) + FIELD(APU_ERR_CTRL, PSLVERR, 0, 1) +REG32(ISR, 0x10) + FIELD(ISR, INV_APB, 0, 1) +REG32(IMR, 0x14) + FIELD(IMR, INV_APB, 0, 1) +REG32(IEN, 0x18) + FIELD(IEN, INV_APB, 0, 1) +REG32(IDS, 0x1c) + FIELD(IDS, INV_APB, 0, 1) +REG32(CONFIG_0, 0x20) + FIELD(CONFIG_0, CFGTE, 24, 4) + FIELD(CONFIG_0, CFGEND, 16, 4) + FIELD(CONFIG_0, VINITHI, 8, 4) + FIELD(CONFIG_0, AA64NAA32, 0, 4) +REG32(CONFIG_1, 0x24) + FIELD(CONFIG_1, L2RSTDISABLE, 29, 1) + FIELD(CONFIG_1, L1RSTDISABLE, 28, 1) + FIELD(CONFIG_1, CP15DISABLE, 0, 4) +REG32(RVBARADDR0L, 0x40) + FIELD(RVBARADDR0L, ADDR, 2, 30) +REG32(RVBARADDR0H, 0x44) + FIELD(RVBARADDR0H, ADDR, 0, 8) +REG32(RVBARADDR1L, 0x48) + FIELD(RVBARADDR1L, ADDR, 2, 30) +REG32(RVBARADDR1H, 0x4c) + FIELD(RVBARADDR1H, ADDR, 0, 8) +REG32(RVBARADDR2L, 0x50) + FIELD(RVBARADDR2L, ADDR, 2, 30) +REG32(RVBARADDR2H, 0x54) + FIELD(RVBARADDR2H, ADDR, 0, 8) +REG32(RVBARADDR3L, 0x58) + FIELD(RVBARADDR3L, ADDR, 2, 30) +REG32(RVBARADDR3H, 0x5c) + FIELD(RVBARADDR3H, ADDR, 0, 8) +REG32(ACE_CTRL, 0x60) + FIELD(ACE_CTRL, AWQOS, 16, 4) + FIELD(ACE_CTRL, ARQOS, 0, 4) +REG32(SNOOP_CTRL, 0x80) + FIELD(SNOOP_CTRL, ACE_INACT, 4, 1) + FIELD(SNOOP_CTRL, ACP_INACT, 0, 1) +REG32(PWRCTL, 0x90) + FIELD(PWRCTL, CLREXMONREQ, 17, 1) + FIELD(PWRCTL, L2FLUSHREQ, 16, 1) + FIELD(PWRCTL, CPUPWRDWNREQ, 0, 4) +REG32(PWRSTAT, 0x94) + FIELD(PWRSTAT, CLREXMONACK, 17, 1) + FIELD(PWRSTAT, L2FLUSHDONE, 16, 1) + FIELD(PWRSTAT, DBGNOPWRDWN, 0, 4) + +#define APU_R_MAX ((R_PWRSTAT) + 1) + +#define APU_MAX_CPU 4 + +struct XlnxZynqMPAPUCtrl { + SysBusDevice busdev; + + ARMCPU *cpus[APU_MAX_CPU]; + /* WFIs towards PMU. */ + qemu_irq wfi_out[4]; + /* CPU Power status towards INTC Redirect. */ + qemu_irq cpu_power_status[4]; + qemu_irq irq_imr; + + uint8_t cpu_pwrdwn_req; + uint8_t cpu_in_wfi; + + RegisterInfoArray *reg_array; + uint32_t regs[APU_R_MAX]; + RegisterInfo regs_info[APU_R_MAX]; +}; + +#endif diff --git a/include/hw/misc/xlnx-zynqmp-crf.h b/include/hw/misc/xlnx-zynqmp-crf.h new file mode 100644 index 0000000000..02ef0bdeee --- /dev/null +++ b/include/hw/misc/xlnx-zynqmp-crf.h @@ -0,0 +1,211 @@ +/* + * QEMU model of the CRF - Clock Reset FPD. + * + * Copyright (c) 2022 Xilinx Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * Written by Edgar E. Iglesias + */ +#ifndef HW_MISC_XLNX_ZYNQMP_CRF_H +#define HW_MISC_XLNX_ZYNQMP_CRF_H + +#include "hw/sysbus.h" +#include "hw/register.h" + +#define TYPE_XLNX_ZYNQMP_CRF "xlnx.zynqmp_crf" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPCRF, XLNX_ZYNQMP_CRF) + +REG32(ERR_CTRL, 0x0) + FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1) +REG32(IR_STATUS, 0x4) + FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1) +REG32(IR_MASK, 0x8) + FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1) +REG32(IR_ENABLE, 0xc) + FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1) +REG32(IR_DISABLE, 0x10) + FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1) +REG32(CRF_WPROT, 0x1c) + FIELD(CRF_WPROT, ACTIVE, 0, 1) +REG32(APLL_CTRL, 0x20) + FIELD(APLL_CTRL, POST_SRC, 24, 3) + FIELD(APLL_CTRL, PRE_SRC, 20, 3) + FIELD(APLL_CTRL, CLKOUTDIV, 17, 1) + FIELD(APLL_CTRL, DIV2, 16, 1) + FIELD(APLL_CTRL, FBDIV, 8, 7) + FIELD(APLL_CTRL, BYPASS, 3, 1) + FIELD(APLL_CTRL, RESET, 0, 1) +REG32(APLL_CFG, 0x24) + FIELD(APLL_CFG, LOCK_DLY, 25, 7) + FIELD(APLL_CFG, LOCK_CNT, 13, 10) + FIELD(APLL_CFG, LFHF, 10, 2) + FIELD(APLL_CFG, CP, 5, 4) + FIELD(APLL_CFG, RES, 0, 4) +REG32(APLL_FRAC_CFG, 0x28) + FIELD(APLL_FRAC_CFG, ENABLED, 31, 1) + FIELD(APLL_FRAC_CFG, SEED, 22, 3) + FIELD(APLL_FRAC_CFG, ALGRTHM, 19, 1) + FIELD(APLL_FRAC_CFG, ORDER, 18, 1) + FIELD(APLL_FRAC_CFG, DATA, 0, 16) +REG32(DPLL_CTRL, 0x2c) + FIELD(DPLL_CTRL, POST_SRC, 24, 3) + FIELD(DPLL_CTRL, PRE_SRC, 20, 3) + FIELD(DPLL_CTRL, CLKOUTDIV, 17, 1) + FIELD(DPLL_CTRL, DIV2, 16, 1) + FIELD(DPLL_CTRL, FBDIV, 8, 7) + FIELD(DPLL_CTRL, BYPASS, 3, 1) + FIELD(DPLL_CTRL, RESET, 0, 1) +REG32(DPLL_CFG, 0x30) + FIELD(DPLL_CFG, LOCK_DLY, 25, 7) + FIELD(DPLL_CFG, LOCK_CNT, 13, 10) + FIELD(DPLL_CFG, LFHF, 10, 2) + FIELD(DPLL_CFG, CP, 5, 4) + FIELD(DPLL_CFG, RES, 0, 4) +REG32(DPLL_FRAC_CFG, 0x34) + FIELD(DPLL_FRAC_CFG, ENABLED, 31, 1) + FIELD(DPLL_FRAC_CFG, SEED, 22, 3) + FIELD(DPLL_FRAC_CFG, ALGRTHM, 19, 1) + FIELD(DPLL_FRAC_CFG, ORDER, 18, 1) + FIELD(DPLL_FRAC_CFG, DATA, 0, 16) +REG32(VPLL_CTRL, 0x38) + FIELD(VPLL_CTRL, POST_SRC, 24, 3) + FIELD(VPLL_CTRL, PRE_SRC, 20, 3) + FIELD(VPLL_CTRL, CLKOUTDIV, 17, 1) + FIELD(VPLL_CTRL, DIV2, 16, 1) + FIELD(VPLL_CTRL, FBDIV, 8, 7) + FIELD(VPLL_CTRL, BYPASS, 3, 1) + FIELD(VPLL_CTRL, RESET, 0, 1) +REG32(VPLL_CFG, 0x3c) + FIELD(VPLL_CFG, LOCK_DLY, 25, 7) + FIELD(VPLL_CFG, LOCK_CNT, 13, 10) + FIELD(VPLL_CFG, LFHF, 10, 2) + FIELD(VPLL_CFG, CP, 5, 4) + FIELD(VPLL_CFG, RES, 0, 4) +REG32(VPLL_FRAC_CFG, 0x40) + FIELD(VPLL_FRAC_CFG, ENABLED, 31, 1) + FIELD(VPLL_FRAC_CFG, SEED, 22, 3) + FIELD(VPLL_FRAC_CFG, ALGRTHM, 19, 1) + FIELD(VPLL_FRAC_CFG, ORDER, 18, 1) + FIELD(VPLL_FRAC_CFG, DATA, 0, 16) +REG32(PLL_STATUS, 0x44) + FIELD(PLL_STATUS, VPLL_STABLE, 5, 1) + FIELD(PLL_STATUS, DPLL_STABLE, 4, 1) + FIELD(PLL_STATUS, APLL_STABLE, 3, 1) + FIELD(PLL_STATUS, VPLL_LOCK, 2, 1) + FIELD(PLL_STATUS, DPLL_LOCK, 1, 1) + FIELD(PLL_STATUS, APLL_LOCK, 0, 1) +REG32(APLL_TO_LPD_CTRL, 0x48) + FIELD(APLL_TO_LPD_CTRL, DIVISOR0, 8, 6) +REG32(DPLL_TO_LPD_CTRL, 0x4c) + FIELD(DPLL_TO_LPD_CTRL, DIVISOR0, 8, 6) +REG32(VPLL_TO_LPD_CTRL, 0x50) + FIELD(VPLL_TO_LPD_CTRL, DIVISOR0, 8, 6) +REG32(ACPU_CTRL, 0x60) + FIELD(ACPU_CTRL, CLKACT_HALF, 25, 1) + FIELD(ACPU_CTRL, CLKACT_FULL, 24, 1) + FIELD(ACPU_CTRL, DIVISOR0, 8, 6) + FIELD(ACPU_CTRL, SRCSEL, 0, 3) +REG32(DBG_TRACE_CTRL, 0x64) + FIELD(DBG_TRACE_CTRL, CLKACT, 24, 1) + FIELD(DBG_TRACE_CTRL, DIVISOR0, 8, 6) + FIELD(DBG_TRACE_CTRL, SRCSEL, 0, 3) +REG32(DBG_FPD_CTRL, 0x68) + FIELD(DBG_FPD_CTRL, CLKACT, 24, 1) + FIELD(DBG_FPD_CTRL, DIVISOR0, 8, 6) + FIELD(DBG_FPD_CTRL, SRCSEL, 0, 3) +REG32(DP_VIDEO_REF_CTRL, 0x70) + FIELD(DP_VIDEO_REF_CTRL, CLKACT, 24, 1) + FIELD(DP_VIDEO_REF_CTRL, DIVISOR1, 16, 6) + FIELD(DP_VIDEO_REF_CTRL, DIVISOR0, 8, 6) + FIELD(DP_VIDEO_REF_CTRL, SRCSEL, 0, 3) +REG32(DP_AUDIO_REF_CTRL, 0x74) + FIELD(DP_AUDIO_REF_CTRL, CLKACT, 24, 1) + FIELD(DP_AUDIO_REF_CTRL, DIVISOR1, 16, 6) + FIELD(DP_AUDIO_REF_CTRL, DIVISOR0, 8, 6) + FIELD(DP_AUDIO_REF_CTRL, SRCSEL, 0, 3) +REG32(DP_STC_REF_CTRL, 0x7c) + FIELD(DP_STC_REF_CTRL, CLKACT, 24, 1) + FIELD(DP_STC_REF_CTRL, DIVISOR1, 16, 6) + FIELD(DP_STC_REF_CTRL, DIVISOR0, 8, 6) + FIELD(DP_STC_REF_CTRL, SRCSEL, 0, 3) +REG32(DDR_CTRL, 0x80) + FIELD(DDR_CTRL, CLKACT, 24, 1) + FIELD(DDR_CTRL, DIVISOR0, 8, 6) + FIELD(DDR_CTRL, SRCSEL, 0, 3) +REG32(GPU_REF_CTRL, 0x84) + FIELD(GPU_REF_CTRL, PP1_CLKACT, 26, 1) + FIELD(GPU_REF_CTRL, PP0_CLKACT, 25, 1) + FIELD(GPU_REF_CTRL, CLKACT, 24, 1) + FIELD(GPU_REF_CTRL, DIVISOR0, 8, 6) + FIELD(GPU_REF_CTRL, SRCSEL, 0, 3) +REG32(SATA_REF_CTRL, 0xa0) + FIELD(SATA_REF_CTRL, CLKACT, 24, 1) + FIELD(SATA_REF_CTRL, DIVISOR0, 8, 6) + FIELD(SATA_REF_CTRL, SRCSEL, 0, 3) +REG32(PCIE_REF_CTRL, 0xb4) + FIELD(PCIE_REF_CTRL, CLKACT, 24, 1) + FIELD(PCIE_REF_CTRL, DIVISOR0, 8, 6) + FIELD(PCIE_REF_CTRL, SRCSEL, 0, 3) +REG32(GDMA_REF_CTRL, 0xb8) + FIELD(GDMA_REF_CTRL, CLKACT, 24, 1) + FIELD(GDMA_REF_CTRL, DIVISOR0, 8, 6) + FIELD(GDMA_REF_CTRL, SRCSEL, 0, 3) +REG32(DPDMA_REF_CTRL, 0xbc) + FIELD(DPDMA_REF_CTRL, CLKACT, 24, 1) + FIELD(DPDMA_REF_CTRL, DIVISOR0, 8, 6) + FIELD(DPDMA_REF_CTRL, SRCSEL, 0, 3) +REG32(TOPSW_MAIN_CTRL, 0xc0) + FIELD(TOPSW_MAIN_CTRL, CLKACT, 24, 1) + FIELD(TOPSW_MAIN_CTRL, DIVISOR0, 8, 6) + FIELD(TOPSW_MAIN_CTRL, SRCSEL, 0, 3) +REG32(TOPSW_LSBUS_CTRL, 0xc4) + FIELD(TOPSW_LSBUS_CTRL, CLKACT, 24, 1) + FIELD(TOPSW_LSBUS_CTRL, DIVISOR0, 8, 6) + FIELD(TOPSW_LSBUS_CTRL, SRCSEL, 0, 3) +REG32(DBG_TSTMP_CTRL, 0xf8) + FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 6) + FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3) +REG32(RST_FPD_TOP, 0x100) + FIELD(RST_FPD_TOP, PCIE_CFG_RESET, 19, 1) + FIELD(RST_FPD_TOP, PCIE_BRIDGE_RESET, 18, 1) + FIELD(RST_FPD_TOP, PCIE_CTRL_RESET, 17, 1) + FIELD(RST_FPD_TOP, DP_RESET, 16, 1) + FIELD(RST_FPD_TOP, SWDT_RESET, 15, 1) + FIELD(RST_FPD_TOP, AFI_FM5_RESET, 12, 1) + FIELD(RST_FPD_TOP, AFI_FM4_RESET, 11, 1) + FIELD(RST_FPD_TOP, AFI_FM3_RESET, 10, 1) + FIELD(RST_FPD_TOP, AFI_FM2_RESET, 9, 1) + FIELD(RST_FPD_TOP, AFI_FM1_RESET, 8, 1) + FIELD(RST_FPD_TOP, AFI_FM0_RESET, 7, 1) + FIELD(RST_FPD_TOP, GDMA_RESET, 6, 1) + FIELD(RST_FPD_TOP, GPU_PP1_RESET, 5, 1) + FIELD(RST_FPD_TOP, GPU_PP0_RESET, 4, 1) + FIELD(RST_FPD_TOP, GPU_RESET, 3, 1) + FIELD(RST_FPD_TOP, GT_RESET, 2, 1) + FIELD(RST_FPD_TOP, SATA_RESET, 1, 1) +REG32(RST_FPD_APU, 0x104) + FIELD(RST_FPD_APU, ACPU3_PWRON_RESET, 13, 1) + FIELD(RST_FPD_APU, ACPU2_PWRON_RESET, 12, 1) + FIELD(RST_FPD_APU, ACPU1_PWRON_RESET, 11, 1) + FIELD(RST_FPD_APU, ACPU0_PWRON_RESET, 10, 1) + FIELD(RST_FPD_APU, APU_L2_RESET, 8, 1) + FIELD(RST_FPD_APU, ACPU3_RESET, 3, 1) + FIELD(RST_FPD_APU, ACPU2_RESET, 2, 1) + FIELD(RST_FPD_APU, ACPU1_RESET, 1, 1) + FIELD(RST_FPD_APU, ACPU0_RESET, 0, 1) +REG32(RST_DDR_SS, 0x108) + FIELD(RST_DDR_SS, DDR_RESET, 3, 1) + FIELD(RST_DDR_SS, APM_RESET, 2, 1) + +#define CRF_R_MAX (R_RST_DDR_SS + 1) + +struct XlnxZynqMPCRF { + SysBusDevice parent_obj; + MemoryRegion iomem; + qemu_irq irq_ir; + + RegisterInfoArray *reg_array; + uint32_t regs[CRF_R_MAX]; + RegisterInfo regs_info[CRF_R_MAX]; +}; + +#endif diff --git a/include/hw/net/allwinner-sun8i-emac.h b/include/hw/net/allwinner-sun8i-emac.h index 460a58f1ca..185895f4e1 100644 --- a/include/hw/net/allwinner-sun8i-emac.h +++ b/include/hw/net/allwinner-sun8i-emac.h @@ -101,4 +101,4 @@ struct AwSun8iEmacState { }; -#endif /* HW_NET_ALLWINNER_SUN8I_H */ +#endif /* HW_NET_ALLWINNER_SUN8I_EMAC_H */ diff --git a/include/hw/net/mv88w8618_eth.h b/include/hw/net/mv88w8618_eth.h new file mode 100644 index 0000000000..41074940ec --- /dev/null +++ b/include/hw/net/mv88w8618_eth.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Marvell MV88W8618 / Freecom MusicPal emulation. + * + * Copyright (c) 2008-2021 QEMU contributors + */ + +#ifndef HW_NET_MV88W8618_ETH_H +#define HW_NET_MV88W8618_ETH_H + +#define TYPE_MV88W8618_ETH "mv88w8618_eth" + +#endif diff --git a/include/hw/nubus/mac-nubus-bridge.h b/include/hw/nubus/mac-nubus-bridge.h index 36aa098dd4..be4dd83530 100644 --- a/include/hw/nubus/mac-nubus-bridge.h +++ b/include/hw/nubus/mac-nubus-bridge.h @@ -6,19 +6,24 @@ * */ -#ifndef HW_NUBUS_MAC_H -#define HW_NUBUS_MAC_H +#ifndef HW_NUBUS_MAC_NUBUS_BRIDGE_H +#define HW_NUBUS_MAC_NUBUS_BRIDGE_H #include "hw/nubus/nubus.h" #include "qom/object.h" +#define MAC_NUBUS_FIRST_SLOT 0x9 +#define MAC_NUBUS_LAST_SLOT 0xe +#define MAC_NUBUS_SLOT_NB (MAC_NUBUS_LAST_SLOT - MAC_NUBUS_FIRST_SLOT + 1) + #define TYPE_MAC_NUBUS_BRIDGE "mac-nubus-bridge" -OBJECT_DECLARE_SIMPLE_TYPE(MacNubusState, MAC_NUBUS_BRIDGE) +OBJECT_DECLARE_SIMPLE_TYPE(MacNubusBridge, MAC_NUBUS_BRIDGE) -struct MacNubusState { - SysBusDevice sysbus_dev; +struct MacNubusBridge { + NubusBridge parent_obj; - NubusBus *bus; + MemoryRegion super_slot_alias; + MemoryRegion slot_alias; }; #endif diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h index e2b5cf260b..b3b4d2eadb 100644 --- a/include/hw/nubus/nubus.h +++ b/include/hw/nubus/nubus.h @@ -10,17 +10,23 @@ #define HW_NUBUS_NUBUS_H #include "hw/qdev-properties.h" +#include "hw/sysbus.h" #include "exec/address-spaces.h" #include "qom/object.h" +#include "qemu/units.h" #define NUBUS_SUPER_SLOT_SIZE 0x10000000U -#define NUBUS_SUPER_SLOT_NB 0x9 +#define NUBUS_SUPER_SLOT_NB 0xe + +#define NUBUS_SLOT_BASE (NUBUS_SUPER_SLOT_SIZE * \ + (NUBUS_SUPER_SLOT_NB + 1)) #define NUBUS_SLOT_SIZE 0x01000000 -#define NUBUS_SLOT_NB 0xF +#define NUBUS_FIRST_SLOT 0x0 +#define NUBUS_LAST_SLOT 0xf +#define NUBUS_SLOT_NB (NUBUS_LAST_SLOT - NUBUS_FIRST_SLOT + 1) -#define NUBUS_FIRST_SLOT 0x9 -#define NUBUS_LAST_SLOT 0xF +#define NUBUS_IRQS 16 #define TYPE_NUBUS_DEVICE "nubus-device" OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE) @@ -29,40 +35,41 @@ OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE) OBJECT_DECLARE_SIMPLE_TYPE(NubusBus, NUBUS_BUS) #define TYPE_NUBUS_BRIDGE "nubus-bridge" +OBJECT_DECLARE_SIMPLE_TYPE(NubusBridge, NUBUS_BRIDGE); struct NubusBus { BusState qbus; + AddressSpace nubus_as; + MemoryRegion nubus_mr; + MemoryRegion super_slot_io; MemoryRegion slot_io; - int current_slot; + uint16_t slot_available_mask; + + qemu_irq irqs[NUBUS_IRQS]; }; +#define NUBUS_DECL_ROM_MAX_SIZE (128 * KiB) + struct NubusDevice { DeviceState qdev; - int slot_nb; + int32_t slot; + MemoryRegion super_slot_mem; MemoryRegion slot_mem; - /* Format Block */ - - MemoryRegion fblock_io; - - uint32_t rom_length; - uint32_t rom_crc; - uint8_t rom_rev; - uint8_t rom_format; - uint8_t byte_lanes; - int32_t directory_offset; - - /* ROM */ - - MemoryRegion rom_io; - const uint8_t *rom; + char *romfile; + MemoryRegion decl_rom; }; -void nubus_register_rom(NubusDevice *dev, const uint8_t *rom, uint32_t size, - int revision, int format, uint8_t byte_lanes); +void nubus_set_irq(NubusDevice *nd, int level); + +struct NubusBridge { + SysBusDevice parent_obj; + + NubusBus bus; +}; #endif diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 0e7a8bc7af..c1f81a5f13 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -342,4 +342,25 @@ bool fw_cfg_dma_enabled(void *opaque); */ const char *fw_cfg_arch_key_name(uint16_t key); +/** + * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified + * by key. + * @fw_cfg: The firmware config instance to store the data in. + * @size_key: The firmware config key to store the size of the loaded + * data under, with fw_cfg_add_i32(). + * @data_key: The firmware config key to store the loaded data under, + * with fw_cfg_add_bytes(). + * @image_name: The name of the image file to load. If it is NULL, the + * function returns without doing anything. + * @try_decompress: Whether the image should be decompressed (gunzipped) before + * adding it to fw_cfg. If decompression fails, the image is + * loaded as-is. + * + * In case of failure, the function prints an error message to stderr and the + * process exits with status 1. + */ +void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, + uint16_t data_key, const char *image_name, + bool try_decompress); + #endif diff --git a/include/hw/nvram/mac_nvram.h b/include/hw/nvram/mac_nvram.h new file mode 100644 index 0000000000..b780aca470 --- /dev/null +++ b/include/hw/nvram/mac_nvram.h @@ -0,0 +1,51 @@ +/* + * PowerMac NVRAM emulation + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef MAC_NVRAM_H +#define MAC_NVRAM_H + +#include "exec/memory.h" +#include "hw/sysbus.h" + +#define MACIO_NVRAM_SIZE 0x2000 + +#define TYPE_MACIO_NVRAM "macio-nvram" +OBJECT_DECLARE_SIMPLE_TYPE(MacIONVRAMState, MACIO_NVRAM) + +struct MacIONVRAMState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + uint32_t size; + uint32_t it_shift; + + MemoryRegion mem; + uint8_t *data; +}; + +void pmac_format_nvram_partition(MacIONVRAMState *nvr, int len); + +#endif /* MAC_NVRAM_H */ diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h new file mode 100644 index 0000000000..87d59ef3c0 --- /dev/null +++ b/include/hw/nvram/xlnx-bbram.h @@ -0,0 +1,54 @@ +/* + * QEMU model of the Xilinx BBRAM Battery Backed RAM + * + * Copyright (c) 2015-2021 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_BBRAM_H +#define XLNX_BBRAM_H + +#include "sysemu/block-backend.h" +#include "hw/qdev-core.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" + +#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1) + +#define TYPE_XLNX_BBRAM "xlnx,bbram-ctrl" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM); + +struct XlnxBBRam { + SysBusDevice parent_obj; + qemu_irq irq_bbram; + + BlockBackend *blk; + + uint32_t crc_zpads; + bool bbram8_wo; + bool blk_ro; + + uint32_t regs[RMAX_XLNX_BBRAM]; + RegisterInfo regs_info[RMAX_XLNX_BBRAM]; +}; + +#endif diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h new file mode 100644 index 0000000000..58414e468b --- /dev/null +++ b/include/hw/nvram/xlnx-efuse.h @@ -0,0 +1,132 @@ +/* + * QEMU model of the Xilinx eFuse core + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef XLNX_EFUSE_H +#define XLNX_EFUSE_H + +#include "sysemu/block-backend.h" +#include "hw/qdev-core.h" + +#define TYPE_XLNX_EFUSE "xlnx,efuse" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE); + +struct XlnxEFuse { + DeviceState parent_obj; + BlockBackend *blk; + bool blk_ro; + uint32_t *fuse32; + + DeviceState *dev; + + bool init_tbits; + + uint8_t efuse_nr; + uint32_t efuse_size; + + uint32_t *ro_bits; + uint32_t ro_bits_cnt; +}; + +/** + * xlnx_efuse_calc_crc: + * @data: an array of 32-bit words for which the CRC should be computed + * @u32_cnt: the array size in number of 32-bit words + * @zpads: the number of 32-bit zeros prepended to @data before computation + * + * This function is used to compute the CRC for an array of 32-bit words, + * using a Xilinx-specific data padding. + * + * Returns: the computed 32-bit CRC + */ +uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt, + unsigned zpads); + +/** + * xlnx_efuse_get_bit: + * @s: the efuse object + * @bit: the efuse bit-address to read the data + * + * Returns: the bit, 0 or 1, at @bit of object @s + */ +bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit); + +/** + * xlnx_efuse_set_bit: + * @s: the efuse object + * @bit: the efuse bit-address to be written a value of 1 + * + * Returns: true on success, false on failure + */ +bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit); + +/** + * xlnx_efuse_k256_check: + * @s: the efuse object + * @crc: the 32-bit CRC to be compared with + * @start: the efuse bit-address (which must be multiple of 32) of the + * start of a 256-bit array + * + * This function computes the CRC of a 256-bit array starting at @start + * then compares to the given @crc + * + * Returns: true of @crc == computed, false otherwise + */ +bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start); + +/** + * xlnx_efuse_tbits_check: + * @s: the efuse object + * + * This function inspects a number of efuse bits at specific addresses + * to see if they match a validation pattern. Each pattern is a group + * of 4 bits, and there are 3 groups. + * + * Returns: a 3-bit mask, where a bit of '1' means the corresponding + * group has a valid pattern. + */ +uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s); + +/** + * xlnx_efuse_get_row: + * @s: the efuse object + * @bit: the efuse bit address for which a 32-bit value is read + * + * Returns: the entire 32 bits of the efuse, starting at a bit + * address that is multiple of 32 and contains the bit at @bit + */ +static inline uint32_t xlnx_efuse_get_row(XlnxEFuse *s, unsigned int bit) +{ + if (!(s->fuse32)) { + return 0; + } else { + unsigned int row_idx = bit / 32; + + assert(row_idx < (s->efuse_size * s->efuse_nr / 32)); + return s->fuse32[row_idx]; + } +} + +#endif diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h new file mode 100644 index 0000000000..a873dc5cb0 --- /dev/null +++ b/include/hw/nvram/xlnx-versal-efuse.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_VERSAL_EFUSE_H +#define XLNX_VERSAL_EFUSE_H + +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/nvram/xlnx-efuse.h" + +#define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1) + +#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx,versal-efuse" +#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx,pmc-efuse-cache" + +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL); +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE); + +struct XlnxVersalEFuseCtrl { + SysBusDevice parent_obj; + qemu_irq irq_efuse_imr; + + XlnxEFuse *efuse; + + void *extra_pg0_lock_spec; /* Opaque property */ + uint32_t extra_pg0_lock_n16; + + uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; + RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; +}; + +struct XlnxVersalEFuseCache { + SysBusDevice parent_obj; + MemoryRegion iomem; + + XlnxEFuse *efuse; +}; + +/** + * xlnx_versal_efuse_read_row: + * @s: the efuse object + * @bit: the bit-address within the 32-bit row to be read + * @denied: if non-NULL, to receive true if the row is write-only + * + * Returns: the 32-bit word containing address @bit; 0 if @denies is true + */ +uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *s, uint32_t bit, bool *denied); + +#endif diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h new file mode 100644 index 0000000000..6b051ec4f1 --- /dev/null +++ b/include/hw/nvram/xlnx-zynqmp-efuse.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_ZYNQMP_EFUSE_H +#define XLNX_ZYNQMP_EFUSE_H + +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/nvram/xlnx-efuse.h" + +#define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1) + +#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx,zynqmp-efuse" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE); + +struct XlnxZynqMPEFuse { + SysBusDevice parent_obj; + qemu_irq irq; + + XlnxEFuse *efuse; + uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX]; + RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX]; +}; + +#endif diff --git a/include/hw/openrisc/boot.h b/include/hw/openrisc/boot.h new file mode 100644 index 0000000000..25a313d63a --- /dev/null +++ b/include/hw/openrisc/boot.h @@ -0,0 +1,34 @@ +/* + * QEMU OpenRISC boot helpers. + * + * Copyright (c) 2022 Stafford Horne + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef OPENRISC_BOOT_H +#define OPENRISC_BOOT_H + +#include "exec/cpu-defs.h" + +hwaddr openrisc_load_kernel(ram_addr_t ram_size, + const char *kernel_filename, + uint32_t *bootstrap_pc); + +hwaddr openrisc_load_initrd(void *fdt, const char *filename, + hwaddr load_start, uint64_t mem_size); + +uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start, + uint64_t mem_size); + +#endif /* OPENRISC_BOOT_H */ diff --git a/include/hw/pci-bridge/pci_expander_bridge.h b/include/hw/pci-bridge/pci_expander_bridge.h new file mode 100644 index 0000000000..0b3856d615 --- /dev/null +++ b/include/hw/pci-bridge/pci_expander_bridge.h @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PCI_EXPANDER_BRIDGE_H +#define PCI_EXPANDER_BRIDGE_H + +#include "hw/cxl/cxl.h" + +void pxb_cxl_hook_up_registers(CXLState *state, PCIBus *bus, Error **errp); + +#endif /* PCI_EXPANDER_BRIDGE_H */ diff --git a/include/hw/pci-bridge/xio3130_downstream.h b/include/hw/pci-bridge/xio3130_downstream.h new file mode 100644 index 0000000000..1d10139aea --- /dev/null +++ b/include/hw/pci-bridge/xio3130_downstream.h @@ -0,0 +1,15 @@ +/* + * TI X3130 pci express downstream port switch + * + * Copyright (C) 2022 Igor Mammedov + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H +#define HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H + +#define TYPE_XIO3130_DOWNSTREAM "xio3130-downstream" + +#endif + diff --git a/include/hw/pci-host/dino.h b/include/hw/pci-host/dino.h new file mode 100644 index 0000000000..a1b0184940 --- /dev/null +++ b/include/hw/pci-host/dino.h @@ -0,0 +1,146 @@ +/* + * HP-PARISC Dino PCI chipset emulation, as in B160L and similiar machines + * + * (C) 2017-2019 by Helge Deller + * + * This work is licensed under the GNU GPL license version 2 or later. + * + * Documentation available at: + * https://parisc.wiki.kernel.org/images-parisc/9/91/Dino_ers.pdf + * https://parisc.wiki.kernel.org/images-parisc/7/70/Dino_3_1_Errata.pdf + */ + +#ifndef DINO_H +#define DINO_H + +#include "hw/pci/pci_host.h" + +#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost" +OBJECT_DECLARE_SIMPLE_TYPE(DinoState, DINO_PCI_HOST_BRIDGE) + +#define DINO_IAR0 0x004 +#define DINO_IODC 0x008 +#define DINO_IRR0 0x00C /* RO */ +#define DINO_IAR1 0x010 +#define DINO_IRR1 0x014 /* RO */ +#define DINO_IMR 0x018 +#define DINO_IPR 0x01C +#define DINO_TOC_ADDR 0x020 +#define DINO_ICR 0x024 +#define DINO_ILR 0x028 /* RO */ +#define DINO_IO_COMMAND 0x030 /* WO */ +#define DINO_IO_STATUS 0x034 /* RO */ +#define DINO_IO_CONTROL 0x038 +#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */ +#define DINO_IO_ERR_INFO 0x044 /* RO */ +#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */ +#define DINO_IO_FBB_EN 0x05c +#define DINO_IO_ADDR_EN 0x060 +#define DINO_PCI_CONFIG_ADDR 0x064 +#define DINO_PCI_CONFIG_DATA 0x068 +#define DINO_PCI_IO_DATA 0x06c +#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */ +#define DINO_GSC2X_CONFIG 0x7b4 /* RO */ +#define DINO_GMASK 0x800 +#define DINO_PAMR 0x804 +#define DINO_PAPR 0x808 +#define DINO_DAMODE 0x80c +#define DINO_PCICMD 0x810 +#define DINO_PCISTS 0x814 /* R/WC */ +#define DINO_MLTIM 0x81c +#define DINO_BRDG_FEAT 0x820 +#define DINO_PCIROR 0x824 +#define DINO_PCIWOR 0x828 +#define DINO_TLTIM 0x830 + +#define DINO_IRQS 11 /* bits 0-10 are architected */ +#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ +#define DINO_LOCAL_IRQS (DINO_IRQS + 1) +#define DINO_MASK_IRQ(x) (1 << (x)) + +#define DINO_IRQ_PCIINTA 0 +#define DINO_IRQ_PCIINTB 1 +#define DINO_IRQ_PCIINTC 2 +#define DINO_IRQ_PCIINTD 3 +#define DINO_IRQ_PCIINTE 4 +#define DINO_IRQ_PCIINTF 5 +#define DINO_IRQ_GSCEXTINT 6 +#define DINO_IRQ_BUSERRINT 7 +#define DINO_IRQ_PS2INT 8 +#define DINO_IRQ_UNUSED 9 +#define DINO_IRQ_RS232INT 10 + +#define PCIINTA 0x001 +#define PCIINTB 0x002 +#define PCIINTC 0x004 +#define PCIINTD 0x008 +#define PCIINTE 0x010 +#define PCIINTF 0x020 +#define GSCEXTINT 0x040 +/* #define xxx 0x080 - bit 7 is "default" */ +/* #define xxx 0x100 - bit 8 not used */ +/* #define xxx 0x200 - bit 9 not used */ +#define RS232INT 0x400 + +#define DINO_MEM_CHUNK_SIZE (8 * MiB) + +#define DINO800_REGS (1 + (DINO_TLTIM - DINO_GMASK) / 4) +static const uint32_t reg800_keep_bits[DINO800_REGS] = { + MAKE_64BIT_MASK(0, 1), /* GMASK */ + MAKE_64BIT_MASK(0, 7), /* PAMR */ + MAKE_64BIT_MASK(0, 7), /* PAPR */ + MAKE_64BIT_MASK(0, 8), /* DAMODE */ + MAKE_64BIT_MASK(0, 7), /* PCICMD */ + MAKE_64BIT_MASK(0, 9), /* PCISTS */ + MAKE_64BIT_MASK(0, 32), /* Undefined */ + MAKE_64BIT_MASK(0, 8), /* MLTIM */ + MAKE_64BIT_MASK(0, 30), /* BRDG_FEAT */ + MAKE_64BIT_MASK(0, 24), /* PCIROR */ + MAKE_64BIT_MASK(0, 22), /* PCIWOR */ + MAKE_64BIT_MASK(0, 32), /* Undocumented */ + MAKE_64BIT_MASK(0, 9), /* TLTIM */ +}; + +/* offsets to DINO HPA: */ +#define DINO_PCI_ADDR 0x064 +#define DINO_CONFIG_DATA 0x068 +#define DINO_IO_DATA 0x06c + +struct DinoState { + PCIHostState parent_obj; + + /* + * PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops, + * so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops. + */ + uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */ + + uint32_t iar0; + uint32_t iar1; + uint32_t imr; + uint32_t ipr; + uint32_t icr; + uint32_t ilr; + uint32_t io_fbb_en; + uint32_t io_addr_en; + uint32_t io_control; + uint32_t toc_addr; + + uint32_t reg800[DINO800_REGS]; + + MemoryRegion this_mem; + MemoryRegion pci_mem; + MemoryRegion pci_mem_alias[32]; + + MemoryRegion *memory_as; + + AddressSpace bm_as; + MemoryRegion bm; + MemoryRegion bm_ram_alias; + MemoryRegion bm_pci_alias; + MemoryRegion bm_cpu_alias; + + qemu_irq irqs[DINO_IRQS]; +}; + +#endif diff --git a/migration/qemu-file-channel.h b/include/hw/pci-host/grackle.h similarity index 68% rename from migration/qemu-file-channel.h rename to include/hw/pci-host/grackle.h index 0028a09eb6..7ad3a779f0 100644 --- a/migration/qemu-file-channel.h +++ b/include/hw/pci-host/grackle.h @@ -1,7 +1,8 @@ /* - * QEMUFile backend for QIOChannel objects + * QEMU Grackle PCI host (heathrow OldWorld PowerMac) * - * Copyright (c) 2015-2016 Red Hat, Inc + * Copyright (c) 2006-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -22,11 +23,22 @@ * THE SOFTWARE. */ -#ifndef QEMU_FILE_CHANNEL_H -#define QEMU_FILE_CHANNEL_H +#ifndef GRACKLE_H +#define GRACKLE_H -#include "io/channel.h" +#include "hw/pci/pci_host.h" + +#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost" +OBJECT_DECLARE_SIMPLE_TYPE(GrackleState, GRACKLE_PCI_HOST_BRIDGE) + +struct GrackleState { + PCIHostState parent_obj; + + uint32_t ofw_addr; + qemu_irq irqs[4]; + MemoryRegion pci_mmio; + MemoryRegion pci_hole; + MemoryRegion pci_io; +}; -QEMUFile *qemu_fopen_channel_input(QIOChannel *ioc); -QEMUFile *qemu_fopen_channel_output(QIOChannel *ioc); #endif diff --git a/include/hw/pci-host/i440fx.h b/include/hw/pci-host/i440fx.h index 7fcfd9485c..d02bf1ed6b 100644 --- a/include/hw/pci-host/i440fx.h +++ b/include/hw/pci-host/i440fx.h @@ -35,8 +35,8 @@ struct PCII440FXState { #define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX" -PCIBus *i440fx_init(const char *host_type, const char *pci_type, - PCII440FXState **pi440fx_state, +PCIBus *i440fx_init(const char *pci_type, + DeviceState *dev, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, ram_addr_t ram_size, @@ -45,6 +45,5 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, MemoryRegion *pci_memory, MemoryRegion *ram_memory); -PCIBus *find_i440fx(void); #endif diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h new file mode 100644 index 0000000000..df7fa55a30 --- /dev/null +++ b/include/hw/pci-host/ls7a.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CPU + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LS7A_H +#define HW_LS7A_H + +#include "hw/pci/pci.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci-host/pam.h" +#include "qemu/units.h" +#include "qemu/range.h" +#include "qom/object.h" + +#define VIRT_PCI_MEM_BASE 0x40000000UL +#define VIRT_PCI_MEM_SIZE 0x40000000UL +#define VIRT_PCI_IO_OFFSET 0x4000 +#define VIRT_PCI_CFG_BASE 0x20000000 +#define VIRT_PCI_CFG_SIZE 0x08000000 +#define VIRT_PCI_IO_BASE 0x18004000UL +#define VIRT_PCI_IO_SIZE 0xC000 + +#define VIRT_PCH_REG_BASE 0x10000000UL +#define VIRT_IOAPIC_REG_BASE (VIRT_PCH_REG_BASE) +#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL + +/* + * According to the kernel pch irq start from 64 offset + * 0 ~ 16 irqs used for non-pci device while 16 ~ 64 irqs + * used for pci device. + */ +#define PCH_PIC_IRQ_OFFSET 64 +#define VIRT_DEVICE_IRQS 16 +#define VIRT_PCI_IRQS 48 +#define VIRT_UART_IRQ (PCH_PIC_IRQ_OFFSET + 2) +#define VIRT_UART_BASE 0x1fe001e0 +#define VIRT_UART_SIZE 0X100 +#define VIRT_RTC_IRQ (PCH_PIC_IRQ_OFFSET + 3) +#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000) +#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100) +#define VIRT_RTC_LEN 0x100 +#define VIRT_SCI_IRQ (PCH_PIC_IRQ_OFFSET + 4) + +#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 +#define VIRT_PLATFORM_BUS_SIZE 0x2000000 +#define VIRT_PLATFORM_BUS_NUM_IRQS 2 +#define VIRT_PLATFORM_BUS_IRQ 69 +#endif diff --git a/include/hw/pci-host/pnv_phb3.h b/include/hw/pci-host/pnv_phb3.h index e2a2e36245..4854f6d2f6 100644 --- a/include/hw/pci-host/pnv_phb3.h +++ b/include/hw/pci-host/pnv_phb3.h @@ -14,8 +14,10 @@ #include "hw/pci/pcie_port.h" #include "hw/ppc/xics.h" #include "qom/object.h" +#include "hw/pci-host/pnv_phb.h" typedef struct PnvPHB3 PnvPHB3; +typedef struct PnvChip PnvChip; /* * PHB3 XICS Source for MSIs @@ -102,15 +104,16 @@ struct PnvPBCQState { }; /* - * PHB3 PCIe Root port + * PHB3 PCIe Root Bus */ -#define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root-bus" +#define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root" +struct PnvPHB3RootBus { + PCIBus parent; -#define TYPE_PNV_PHB3_ROOT_PORT "pnv-phb3-root-port" - -typedef struct PnvPHB3RootPort { - PCIESlot parent_obj; -} PnvPHB3RootPort; + uint32_t chip_id; + uint32_t phb_id; +}; +OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3RootBus, PNV_PHB3_ROOT_BUS) /* * PHB3 PCIe Host Bridge for PowerNV machines (POWER8) @@ -126,7 +129,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3, PNV_PHB3) #define PCI_MMIO_TOTAL_SIZE (0x1ull << 60) struct PnvPHB3 { - PCIExpressHost parent_obj; + DeviceState parent; + + PnvPHB *phb_base; uint32_t chip_id; uint32_t phb_id; @@ -154,14 +159,15 @@ struct PnvPHB3 { PnvPBCQState pbcq; - PnvPHB3RootPort root; - QLIST_HEAD(, PnvPhb3DMASpace) dma_spaces; + + PnvChip *chip; }; uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size); void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size); void pnv_phb3_update_regions(PnvPHB3 *phb); void pnv_phb3_remap_irqs(PnvPHB3 *phb); +void pnv_phb3_bus_init(DeviceState *dev, PnvPHB3 *phb); #endif /* PCI_HOST_PNV_PHB3_H */ diff --git a/include/hw/pci-host/pnv_phb3_regs.h b/include/hw/pci-host/pnv_phb3_regs.h index a174ef1f70..38f8ce9d74 100644 --- a/include/hw/pci-host/pnv_phb3_regs.h +++ b/include/hw/pci-host/pnv_phb3_regs.h @@ -12,22 +12,6 @@ #include "qemu/host-utils.h" -/* - * QEMU version of the GETFIELD/SETFIELD macros - * - * These are common with the PnvXive model. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - /* * PBCQ XSCOM registers */ diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h index 27556ae534..50d4faa001 100644 --- a/include/hw/pci-host/pnv_phb4.h +++ b/include/hw/pci-host/pnv_phb4.h @@ -15,8 +15,10 @@ #include "hw/ppc/xive.h" #include "qom/object.h" +typedef struct PnvPhb4PecState PnvPhb4PecState; typedef struct PnvPhb4PecStack PnvPhb4PecStack; typedef struct PnvPHB4 PnvPHB4; +typedef struct PnvPHB PnvPHB; typedef struct PnvChip PnvChip; /* @@ -44,14 +46,16 @@ typedef struct PnvPhb4DMASpace { } PnvPhb4DMASpace; /* - * PHB4 PCIe Root port + * PHB4 PCIe Root Bus */ -#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root-bus" -#define TYPE_PNV_PHB4_ROOT_PORT "pnv-phb4-root-port" +#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root" +struct PnvPHB4RootBus { + PCIBus parent; -typedef struct PnvPHB4RootPort { - PCIESlot parent_obj; -} PnvPHB4RootPort; + uint32_t chip_id; + uint32_t phb_id; +}; +OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4RootBus, PNV_PHB4_ROOT_BUS) /* * PHB4 PCIe Host Bridge for PowerNV machines (POWER9) @@ -76,15 +80,15 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4, PNV_PHB4) #define PCI_MMIO_TOTAL_SIZE (0x1ull << 60) struct PnvPHB4 { - PCIExpressHost parent_obj; + DeviceState parent; - PnvPHB4RootPort root; + PnvPHB *phb_base; uint32_t chip_id; uint32_t phb_id; - uint64_t version; - uint16_t device_id; + /* The owner PEC */ + PnvPhb4PecState *pec; char bus_path[8]; @@ -109,6 +113,29 @@ struct PnvPHB4 { MemoryRegion pci_mmio; MemoryRegion pci_io; + /* PCI registers (excluding pass-through) */ +#define PHB4_PEC_PCI_STK_REGS_COUNT 0xf + uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT]; + MemoryRegion pci_regs_mr; + + /* Nest registers */ +#define PHB4_PEC_NEST_STK_REGS_COUNT 0x17 + uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT]; + MemoryRegion nest_regs_mr; + + /* PHB pass-through XSCOM */ + MemoryRegion phb_regs_mr; + + /* Memory windows from PowerBus to PHB */ + MemoryRegion phbbar; + MemoryRegion intbar; + MemoryRegion mmbar0; + MemoryRegion mmbar1; + uint64_t mmio0_base; + uint64_t mmio0_size; + uint64_t mmio1_base; + uint64_t mmio1_size; + /* On-chip IODA tables */ uint64_t ioda_LIST[PNV_PHB4_MAX_LSIs]; uint64_t ioda_MIST[PNV_PHB4_MAX_MIST]; @@ -127,13 +154,12 @@ struct PnvPHB4 { XiveSource xsrc; qemu_irq *qirqs; - PnvPhb4PecStack *stack; - QLIST_HEAD(, PnvPhb4DMASpace) dma_spaces; }; void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon); -void pnv_phb4_update_regions(PnvPhb4PecStack *stack); +int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index); +void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb); extern const MemoryRegionOps pnv_phb4_xscom_ops; /* @@ -142,46 +168,6 @@ extern const MemoryRegionOps pnv_phb4_xscom_ops; #define TYPE_PNV_PHB4_PEC "pnv-phb4-pec" OBJECT_DECLARE_TYPE(PnvPhb4PecState, PnvPhb4PecClass, PNV_PHB4_PEC) -#define TYPE_PNV_PHB4_PEC_STACK "pnv-phb4-pec-stack" -OBJECT_DECLARE_SIMPLE_TYPE(PnvPhb4PecStack, PNV_PHB4_PEC_STACK) - -/* Per-stack data */ -struct PnvPhb4PecStack { - DeviceState parent; - - /* My own stack number */ - uint32_t stack_no; - - /* Nest registers */ -#define PHB4_PEC_NEST_STK_REGS_COUNT 0x17 - uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT]; - MemoryRegion nest_regs_mr; - - /* PCI registers (excluding pass-through) */ -#define PHB4_PEC_PCI_STK_REGS_COUNT 0xf - uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT]; - MemoryRegion pci_regs_mr; - - /* PHB pass-through XSCOM */ - MemoryRegion phb_regs_mr; - - /* Memory windows from PowerBus to PHB */ - MemoryRegion mmbar0; - MemoryRegion mmbar1; - MemoryRegion phbbar; - MemoryRegion intbar; - uint64_t mmio0_base; - uint64_t mmio0_size; - uint64_t mmio1_base; - uint64_t mmio1_size; - - /* The owner PEC */ - PnvPhb4PecState *pec; - - /* The actual PHB */ - PnvPHB4 phb; -}; - struct PnvPhb4PecState { DeviceState parent; @@ -197,14 +183,14 @@ struct PnvPhb4PecState { MemoryRegion nest_regs_mr; /* PCI registers, excluding per-stack */ -#define PHB4_PEC_PCI_REGS_COUNT 0x2 +#define PHB4_PEC_PCI_REGS_COUNT 0x3 uint64_t pci_regs[PHB4_PEC_PCI_REGS_COUNT]; MemoryRegion pci_regs_mr; - /* Stacks */ - #define PHB4_PEC_MAX_STACKS 3 - uint32_t num_stacks; - PnvPhb4PecStack stacks[PHB4_PEC_MAX_STACKS]; + /* PHBs */ + uint32_t num_phbs; + + PnvChip *chip; }; @@ -219,6 +205,24 @@ struct PnvPhb4PecClass { int compat_size; const char *stk_compat; int stk_compat_size; + uint64_t version; + const char *phb_type; + const uint32_t *num_phbs; }; +/* + * POWER10 definitions + */ + +#define TYPE_PNV_PHB5 "pnv-phb5" +#define PNV_PHB5(obj) \ + OBJECT_CHECK(PnvPhb4, (obj), TYPE_PNV_PHB5) + +#define PNV_PHB5_VERSION 0x000000a500000001ull +#define PNV_PHB5_DEVICE_ID 0x0652 + +#define TYPE_PNV_PHB5_PEC "pnv-phb5-pec" +#define PNV_PHB5_PEC(obj) \ + OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB5_PEC) + #endif /* PCI_HOST_PNV_PHB4_H */ diff --git a/include/hw/pci-host/pnv_phb4_regs.h b/include/hw/pci-host/pnv_phb4_regs.h index 55df2c3e5e..4a0d3b28ef 100644 --- a/include/hw/pci-host/pnv_phb4_regs.h +++ b/include/hw/pci-host/pnv_phb4_regs.h @@ -220,11 +220,14 @@ #define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63) #define PHB_ETU_ERR_SUMMARY 0x2c8 #define PHB_INT_NOTIFY_ADDR 0x300 +#define PHB_INT_NOTIFY_ADDR_64K PPC_BIT(1) /* P10 */ #define PHB_INT_NOTIFY_INDEX 0x308 /* Fundamental register set B */ #define PHB_VERSION 0x800 #define PHB_CTRLR 0x810 +#define PHB_CTRLR_IRQ_PQ_DISABLE PPC_BIT(9) /* P10 */ +#define PHB_CTRLR_IRQ_ABT_MODE PPC_BIT(10) /* P10 */ #define PHB_CTRLR_IRQ_PGSZ_64K PPC_BIT(11) #define PHB_CTRLR_IRQ_STORE_EOI PPC_BIT(12) #define PHB_CTRLR_MMIO_RD_STRICT PPC_BIT(13) diff --git a/include/hw/pci-host/remote.h b/include/hw/pci-host/remote.h index 3dcf6aa51d..690a01f0fe 100644 --- a/include/hw/pci-host/remote.h +++ b/include/hw/pci-host/remote.h @@ -8,8 +8,8 @@ * */ -#ifndef REMOTE_PCIHOST_H -#define REMOTE_PCIHOST_H +#ifndef PCI_HOST_REMOTE_H +#define PCI_HOST_REMOTE_H #include "exec/memory.h" #include "hw/pci/pcie_host.h" diff --git a/include/hw/pci/msi.h b/include/hw/pci/msi.h index 4087688486..58aa576215 100644 --- a/include/hw/pci/msi.h +++ b/include/hw/pci/msi.h @@ -43,6 +43,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector); void msi_send_message(PCIDevice *dev, MSIMessage msg); void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len); unsigned int msi_nr_vectors_allocated(const PCIDevice *dev); +void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp); static inline bool msi_present(const PCIDevice *dev) { diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h index 4c4a60c739..0e6f257e45 100644 --- a/include/hw/pci/msix.h +++ b/include/hw/pci/msix.h @@ -33,9 +33,10 @@ bool msix_is_masked(PCIDevice *dev, unsigned vector); void msix_set_pending(PCIDevice *dev, unsigned vector); void msix_clr_pending(PCIDevice *dev, int vector); -int msix_vector_use(PCIDevice *dev, unsigned vector); +void msix_vector_use(PCIDevice *dev, unsigned vector); void msix_vector_unuse(PCIDevice *dev, unsigned vector); void msix_unuse_all_vectors(PCIDevice *dev); +void msix_set_mask(PCIDevice *dev, int vector, bool mask); void msix_notify(PCIDevice *dev, unsigned vector); diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index d0f4266e37..6ccaaf5154 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -7,9 +7,6 @@ /* PCI includes legacy ISA access. */ #include "hw/isa/isa.h" -#include "hw/pci/pcie.h" -#include "qom/object.h" - extern bool pci_available; /* PCI bus */ @@ -19,6 +16,7 @@ extern bool pci_available; #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) #define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn)) +#define PCI_BDF_TO_DEVFN(x) ((x) & 0xff) #define PCI_BUS_MAX 256 #define PCI_DEVFN_MAX 256 #define PCI_SLOT_MAX 32 @@ -78,6 +76,7 @@ extern bool pci_available; #define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_SUBDEVICE_ID_QEMU 0x1100 +/* legacy virtio-pci devices */ #define PCI_DEVICE_ID_VIRTIO_NET 0x1000 #define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001 #define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002 @@ -86,9 +85,15 @@ extern bool pci_available; #define PCI_DEVICE_ID_VIRTIO_RNG 0x1005 #define PCI_DEVICE_ID_VIRTIO_9P 0x1009 #define PCI_DEVICE_ID_VIRTIO_VSOCK 0x1012 -#define PCI_DEVICE_ID_VIRTIO_PMEM 0x1013 -#define PCI_DEVICE_ID_VIRTIO_IOMMU 0x1014 -#define PCI_DEVICE_ID_VIRTIO_MEM 0x1015 + +/* + * modern virtio-pci devices get their id assigned automatically, + * there is no need to add #defines here. It gets calculated as + * + * PCI_DEVICE_ID = PCI_DEVICE_ID_VIRTIO_10_BASE + + * virtio_bus_get_vdev_id(bus) + */ +#define PCI_DEVICE_ID_VIRTIO_10_BASE 0x1040 #define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001 @@ -108,6 +113,7 @@ extern bool pci_available; #define PCI_DEVICE_ID_REDHAT_MDPY 0x000f #define PCI_DEVICE_ID_REDHAT_NVME 0x0010 #define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011 +#define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012 #define PCI_DEVICE_ID_REDHAT_QXL 0x0100 #define FMT_PCIBUS PRIx64 @@ -129,6 +135,10 @@ typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num, pcibus_t addr, pcibus_t size, int type); typedef void PCIUnregisterFunc(PCIDevice *pci_dev); +typedef void MSITriggerFunc(PCIDevice *dev, MSIMessage msg); +typedef MSIMessage MSIPrepareMessageFunc(PCIDevice *dev, unsigned vector); +typedef MSIMessage MSIxPrepareMessageFunc(PCIDevice *dev, unsigned vector); + typedef struct PCIIORegion { pcibus_t addr; /* current PCI mapping address. -1 means not mapped */ #define PCI_BAR_UNMAPPED (~(pcibus_t)0) @@ -156,6 +166,7 @@ enum { #define QEMU_PCI_VGA_IO_HI_SIZE 0x20 #include "hw/pci/pci_regs.h" +#include "hw/pci/pcie.h" /* PCI HEADER_TYPE */ #define PCI_HEADER_TYPE_MULTI_FUNCTION 0x80 @@ -195,6 +206,8 @@ enum { QEMU_PCIE_LNKSTA_DLLLA = (1 << QEMU_PCIE_LNKSTA_DLLLA_BITNR), #define QEMU_PCIE_EXTCAP_INIT_BITNR 9 QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR), +#define QEMU_PCIE_CXL_BITNR 10 + QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR), }; #define TYPE_PCI_DEVICE "pci-device" @@ -202,6 +215,12 @@ typedef struct PCIDeviceClass PCIDeviceClass; DECLARE_OBJ_CHECKERS(PCIDevice, PCIDeviceClass, PCI_DEVICE, TYPE_PCI_DEVICE) +/* + * Implemented by devices that can be plugged on CXL buses. In the spec, this is + * actually a "CXL Component, but we name it device to match the PCI naming. + */ +#define INTERFACE_CXL_DEVICE "cxl-device" + /* Implemented by devices that can be plugged on PCI Express buses */ #define INTERFACE_PCIE_DEVICE "pci-express-device" @@ -268,6 +287,7 @@ typedef struct PCIReqIDCache PCIReqIDCache; struct PCIDevice { DeviceState qdev; bool partially_hotplugged; + bool has_power; /* PCI config space */ uint8_t *config; @@ -321,6 +341,14 @@ struct PCIDevice { /* Space to store MSIX table & pending bit array */ uint8_t *msix_table; uint8_t *msix_pba; + + /* May be used by INTx or MSI during interrupt notification */ + void *irq_opaque; + + MSITriggerFunc *msi_trigger; + MSIPrepareMessageFunc *msi_prepare_message; + MSIxPrepareMessageFunc *msix_prepare_message; + /* MemoryRegion container for msix exclusive BAR setup */ MemoryRegion msix_exclusive_bar; /* Memory Regions for MSIX table and pending bit entries. */ @@ -400,14 +428,19 @@ typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin); #define TYPE_PCI_BUS "PCI" OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS) #define TYPE_PCIE_BUS "PCIE" +#define TYPE_CXL_BUS "CXL" + +typedef void (*pci_bus_dev_fn)(PCIBus *b, PCIDevice *d, void *opaque); +typedef void (*pci_bus_fn)(PCIBus *b, void *opaque); +typedef void *(*pci_bus_ret_fn)(PCIBus *b, void *opaque); bool pci_bus_is_express(PCIBus *bus); -void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, - const char *name, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min, const char *typename); +void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, + const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename); PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, @@ -458,23 +491,23 @@ static inline int pci_dev_bus_num(const PCIDevice *dev) int pci_bus_numa_node(PCIBus *bus); void pci_for_each_device(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque), + pci_bus_dev_fn fn, void *opaque); void pci_for_each_device_reverse(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *bus, PCIDevice *d, - void *opaque), + pci_bus_dev_fn fn, void *opaque); -void pci_for_each_bus_depth_first(PCIBus *bus, - void *(*begin)(PCIBus *bus, void *parent_state), - void (*end)(PCIBus *bus, void *state), - void *parent_state); +void pci_for_each_device_under_bus(PCIBus *bus, + pci_bus_dev_fn fn, void *opaque); +void pci_for_each_device_under_bus_reverse(PCIBus *bus, + pci_bus_dev_fn fn, + void *opaque); +void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, + pci_bus_fn end, void *parent_state); PCIDevice *pci_get_function_0(PCIDevice *pci_dev); /* Use this wrapper when specific scan order is not required. */ static inline -void pci_for_each_bus(PCIBus *bus, - void (*fn)(PCIBus *bus, void *opaque), - void *opaque) +void pci_for_each_bus(PCIBus *bus, pci_bus_fn fn, void *opaque) { pci_for_each_bus_depth_first(bus, NULL, fn, opaque); } @@ -493,6 +526,9 @@ typedef AddressSpace *(*PCIIOMMUFunc)(PCIBus *, void *, int); AddressSpace *pci_device_iommu_address_space(PCIDevice *dev); void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque); +pcibus_t pci_bar_address(PCIDevice *d, + int reg, uint8_t type, pcibus_t size); + static inline void pci_set_byte(uint8_t *config, uint8_t val) { @@ -659,60 +695,44 @@ static inline void pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg) { uint8_t val = pci_get_byte(config); - uint8_t rval = reg << ctz32(mask); - pci_set_byte(config, (~mask & val) | (mask & rval)); -} + uint8_t rval; -static inline uint8_t -pci_get_byte_by_mask(uint8_t *config, uint8_t mask) -{ - uint8_t val = pci_get_byte(config); - return (val & mask) >> ctz32(mask); + assert(mask); + rval = reg << ctz32(mask); + pci_set_byte(config, (~mask & val) | (mask & rval)); } static inline void pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg) { uint16_t val = pci_get_word(config); - uint16_t rval = reg << ctz32(mask); - pci_set_word(config, (~mask & val) | (mask & rval)); -} + uint16_t rval; -static inline uint16_t -pci_get_word_by_mask(uint8_t *config, uint16_t mask) -{ - uint16_t val = pci_get_word(config); - return (val & mask) >> ctz32(mask); + assert(mask); + rval = reg << ctz32(mask); + pci_set_word(config, (~mask & val) | (mask & rval)); } static inline void pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg) { uint32_t val = pci_get_long(config); - uint32_t rval = reg << ctz32(mask); - pci_set_long(config, (~mask & val) | (mask & rval)); -} + uint32_t rval; -static inline uint32_t -pci_get_long_by_mask(uint8_t *config, uint32_t mask) -{ - uint32_t val = pci_get_long(config); - return (val & mask) >> ctz32(mask); + assert(mask); + rval = reg << ctz32(mask); + pci_set_long(config, (~mask & val) | (mask & rval)); } static inline void pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg) { uint64_t val = pci_get_quad(config); - uint64_t rval = reg << ctz32(mask); - pci_set_quad(config, (~mask & val) | (mask & rval)); -} + uint64_t rval; -static inline uint64_t -pci_get_quad_by_mask(uint8_t *config, uint64_t mask) -{ - uint64_t val = pci_get_quad(config); - return (val & mask) >> ctz32(mask); + assert(mask); + rval = reg << ctz32(mask); + pci_set_quad(config, (~mask & val) | (mask & rval)); } PCIDevice *pci_new_multifunction(int devfn, bool multifunction, @@ -730,6 +750,11 @@ void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev); qemu_irq pci_allocate_irq(PCIDevice *pci_dev); void pci_set_irq(PCIDevice *pci_dev, int level); +static inline int pci_intx(PCIDevice *pci_dev) +{ + return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; +} + static inline void pci_irq_assert(PCIDevice *pci_dev) { pci_set_irq(pci_dev, 1); @@ -750,6 +775,11 @@ static inline void pci_irq_pulse(PCIDevice *pci_dev) pci_irq_deassert(pci_dev); } +static inline int pci_is_cxl(const PCIDevice *d) +{ + return d->cap_present & QEMU_PCIE_CAP_CXL; +} + static inline int pci_is_express(const PCIDevice *d) { return d->cap_present & QEMU_PCI_CAP_EXPRESS; @@ -768,6 +798,11 @@ static inline int pci_is_express_downstream_port(const PCIDevice *d) return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT; } +static inline int pci_is_vf(const PCIDevice *d) +{ + return d->exp.sriov_vf.pf != NULL; +} + static inline uint32_t pci_config_size(const PCIDevice *d) { return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE; @@ -801,9 +836,10 @@ static inline AddressSpace *pci_get_address_space(PCIDevice *dev) */ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { - return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir); + return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, + dir, attrs); } /** @@ -821,7 +857,8 @@ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr, static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr, void *buf, dma_addr_t len) { - return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return pci_dma_rw(dev, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); } /** @@ -839,19 +876,24 @@ static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr, static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr, const void *buf, dma_addr_t len) { - return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE); + return pci_dma_rw(dev, addr, (void *) buf, len, + DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED); } -#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \ - static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \ - dma_addr_t addr) \ - { \ - return ld##_l##_dma(pci_get_address_space(dev), addr); \ - } \ - static inline void st##_s##_pci_dma(PCIDevice *dev, \ - dma_addr_t addr, uint##_bits##_t val) \ - { \ - st##_s##_dma(pci_get_address_space(dev), addr, val); \ +#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \ + static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \ + dma_addr_t addr, \ + uint##_bits##_t *val, \ + MemTxAttrs attrs) \ + { \ + return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \ + } \ + static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \ + dma_addr_t addr, \ + uint##_bits##_t val, \ + MemTxAttrs attrs) \ + { \ + return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \ } PCI_DMA_DEFINE_LDST(ub, b, 8); @@ -864,12 +906,25 @@ PCI_DMA_DEFINE_LDST(q_be, q_be, 64); #undef PCI_DMA_DEFINE_LDST +/** + * pci_dma_map: Map device PCI address space range into host virtual address + * @dev: #PCIDevice to be accessed + * @addr: address within that device's address space + * @plen: pointer to length of buffer; updated on return to indicate + * if only a subset of the requested range has been mapped + * @dir: indicates the transfer direction + * + * Return: A host pointer, or %NULL if the resources needed to + * perform the mapping are exhausted (in that case *@plen + * is set to zero). + */ static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t *plen, DMADirection dir) { void *buf; - buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir); + buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir, + MEMTXATTRS_UNSPECIFIED); return buf; } @@ -904,5 +959,6 @@ extern const VMStateDescription vmstate_pci_device; } MSIMessage pci_get_msi_message(PCIDevice *dev, int vector); +void pci_set_power(PCIDevice *pci_dev, bool state); #endif diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index a94d350034..ba4bafac7c 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -28,6 +28,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" +#include "hw/cxl/cxl.h" #include "qom/object.h" typedef struct PCIBridgeWindows PCIBridgeWindows; @@ -80,6 +81,25 @@ struct PCIBridge { #define PCI_BRIDGE_DEV_PROP_CHASSIS_NR "chassis_nr" #define PCI_BRIDGE_DEV_PROP_MSI "msi" #define PCI_BRIDGE_DEV_PROP_SHPC "shpc" +typedef struct CXLHost CXLHost; + +struct PXBDev { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + uint8_t bus_nr; + uint16_t numa_node; + bool bypass_iommu; + struct cxl_dev { + CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */ + } cxl; +}; + +typedef struct PXBDev PXBDev; +#define TYPE_PXB_CXL_DEVICE "pxb-cxl" +DECLARE_INSTANCE_CHECKER(PXBDev, PXB_CXL_DEV, + TYPE_PXB_CXL_DEVICE) int pci_bridge_ssvid_init(PCIDevice *dev, uint8_t offset, uint16_t svid, uint16_t ssid, @@ -138,6 +158,7 @@ typedef struct PCIBridgeQemuCap { uint64_t mem_pref_64; /* Prefetchable memory to reserve (64-bit MMIO) */ } PCIBridgeQemuCap; +#define REDHAT_PCI_CAP_TYPE_OFFSET 3 #define REDHAT_PCI_CAP_RESOURCE_RESERVE 1 /* @@ -152,6 +173,13 @@ typedef struct PCIResReserve { uint64_t mem_pref_64; } PCIResReserve; +#define REDHAT_PCI_CAP_RES_RESERVE_BUS_RES 4 +#define REDHAT_PCI_CAP_RES_RESERVE_IO 8 +#define REDHAT_PCI_CAP_RES_RESERVE_MEM 16 +#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_32 20 +#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_64 24 +#define REDHAT_PCI_CAP_RES_RESERVE_CAP_SIZE 32 + int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, PCIResReserve res_reserve, Error **errp); diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h index 347440d42c..5653175957 100644 --- a/include/hw/pci/pci_bus.h +++ b/include/hw/pci/pci_bus.h @@ -24,8 +24,12 @@ enum PCIBusFlags { PCI_BUS_IS_ROOT = 0x0001, /* PCIe extended configuration space is accessible on this bus */ PCI_BUS_EXTENDED_CONFIG_SPACE = 0x0002, + /* This is a CXL Type BUS */ + PCI_BUS_CXL = 0x0004, }; +#define PCI_NO_PASID UINT32_MAX + struct PCIBus { BusState qbus; enum PCIBusFlags flags; @@ -53,6 +57,11 @@ struct PCIBus { Notifier machine_done; }; +static inline bool pci_bus_is_cxl(PCIBus *bus) +{ + return !!(bus->flags & PCI_BUS_CXL); +} + static inline bool pci_bus_is_root(PCIBus *bus) { return !!(bus->flags & PCI_BUS_IS_ROOT); diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h index 0655d8caa3..e78f2ef6e1 100644 --- a/include/hw/pci/pci_ids.h +++ b/include/hw/pci/pci_ids.h @@ -53,6 +53,7 @@ #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 +#define PCI_CLASS_MEMORY_CXL 0x0502 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 @@ -156,6 +157,9 @@ /* Vendors and devices. Sort key: vendor first, device next. */ +/* Ref: PCIe r6.0 Table 6-32 */ +#define PCI_VENDOR_ID_PCI_SIG 0x0001 + #define PCI_VENDOR_ID_LSI_LOGIC 0x1000 #define PCI_DEVICE_ID_LSI_53C810 0x0001 #define PCI_DEVICE_ID_LSI_53C895A 0x0012 @@ -237,6 +241,7 @@ #define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e #define PCI_DEVICE_ID_INTEL_82801D 0x24CD #define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab +#define PCI_DEVICE_ID_INTEL_NVME 0x5845 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 diff --git a/include/hw/pci/pci_regs.h b/include/hw/pci/pci_regs.h index 77ba64b931..a590140962 100644 --- a/include/hw/pci/pci_regs.h +++ b/include/hw/pci/pci_regs.h @@ -4,5 +4,6 @@ #include "standard-headers/linux/pci_regs.h" #define PCI_PM_CAP_VER_1_1 0x0002 /* PCI PM spec ver. 1.1 */ +#define PCI_PM_CAP_VER_1_2 0x0003 /* PCI PM spec ver. 1.2 */ #endif diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 6063bee0ec..698d3de851 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -24,7 +24,9 @@ #include "hw/pci/pci_regs.h" #include "hw/pci/pcie_regs.h" #include "hw/pci/pcie_aer.h" +#include "hw/pci/pcie_sriov.h" #include "hw/hotplug.h" +#include "hw/pci/pcie_doe.h" typedef enum { /* for attention and power indicator */ @@ -81,6 +83,11 @@ struct PCIExpressDevice { /* ACS */ uint16_t acs_cap; + + /* SR/IOV */ + uint16_t sriov_cap; + PCIESriovPF sriov_pf; + PCIESriovVF sriov_vf; }; #define COMPAT_PROP_PCP "power_controller_present" @@ -112,6 +119,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len); int pcie_cap_slot_post_load(void *opaque, int version_id); void pcie_cap_slot_push_attention_button(PCIDevice *dev); +void pcie_cap_slot_enable_power(PCIDevice *dev); void pcie_cap_root_init(PCIDevice *dev); void pcie_cap_root_reset(PCIDevice *dev); diff --git a/include/hw/pci/pcie_doe.h b/include/hw/pci/pcie_doe.h new file mode 100644 index 0000000000..ba4d8b03bd --- /dev/null +++ b/include/hw/pci/pcie_doe.h @@ -0,0 +1,123 @@ +/* + * PCIe Data Object Exchange + * + * Copyright (C) 2021 Avery Design Systems, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef PCIE_DOE_H +#define PCIE_DOE_H + +#include "qemu/range.h" +#include "qemu/typedefs.h" +#include "hw/register.h" + +/* + * Reference: + * PCIe r6.0 - 7.9.24 Data Object Exchange Extended Capability + */ +/* Capabilities Register - r6.0 7.9.24.2 */ +#define PCI_EXP_DOE_CAP 0x04 +REG32(PCI_DOE_CAP_REG, 0) + FIELD(PCI_DOE_CAP_REG, INTR_SUPP, 0, 1) + FIELD(PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM, 1, 11) + +/* Control Register - r6.0 7.9.24.3 */ +#define PCI_EXP_DOE_CTRL 0x08 +REG32(PCI_DOE_CAP_CONTROL, 0) + FIELD(PCI_DOE_CAP_CONTROL, DOE_ABORT, 0, 1) + FIELD(PCI_DOE_CAP_CONTROL, DOE_INTR_EN, 1, 1) + FIELD(PCI_DOE_CAP_CONTROL, DOE_GO, 31, 1) + +/* Status Register - r6.0 7.9.24.4 */ +#define PCI_EXP_DOE_STATUS 0x0c +REG32(PCI_DOE_CAP_STATUS, 0) + FIELD(PCI_DOE_CAP_STATUS, DOE_BUSY, 0, 1) + FIELD(PCI_DOE_CAP_STATUS, DOE_INTR_STATUS, 1, 1) + FIELD(PCI_DOE_CAP_STATUS, DOE_ERROR, 2, 1) + FIELD(PCI_DOE_CAP_STATUS, DATA_OBJ_RDY, 31, 1) + +/* Write Data Mailbox Register - r6.0 7.9.24.5 */ +#define PCI_EXP_DOE_WR_DATA_MBOX 0x10 + +/* Read Data Mailbox Register - 7.9.xx.6 */ +#define PCI_EXP_DOE_RD_DATA_MBOX 0x14 + +/* PCI-SIG defined Data Object Types - r6.0 Table 6-32 */ +#define PCI_SIG_DOE_DISCOVERY 0x00 + +#define PCI_DOE_DW_SIZE_MAX (1 << 18) +#define PCI_DOE_PROTOCOL_NUM_MAX 256 + +#define DATA_OBJ_BUILD_HEADER1(v, p) (((p) << 16) | (v)) +#define DATA_OBJ_LEN_MASK(len) ((len) & (PCI_DOE_DW_SIZE_MAX - 1)) + +typedef struct DOEHeader DOEHeader; +typedef struct DOEProtocol DOEProtocol; +typedef struct DOECap DOECap; + +struct DOEHeader { + uint16_t vendor_id; + uint8_t data_obj_type; + uint8_t reserved; + uint32_t length; +} QEMU_PACKED; + +/* Protocol infos and rsp function callback */ +struct DOEProtocol { + uint16_t vendor_id; + uint8_t data_obj_type; + bool (*handle_request)(DOECap *); +}; + +struct DOECap { + /* Owner */ + PCIDevice *pdev; + + uint16_t offset; + + struct { + bool intr; + uint16_t vec; + } cap; + + struct { + bool abort; + bool intr; + bool go; + } ctrl; + + struct { + bool busy; + bool intr; + bool error; + bool ready; + } status; + + uint32_t *write_mbox; + uint32_t *read_mbox; + + /* Mailbox position indicator */ + uint32_t read_mbox_idx; + uint32_t read_mbox_len; + uint32_t write_mbox_len; + + /* Protocols and its callback response */ + DOEProtocol *protocols; + uint16_t protocol_num; +}; + +void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset, + DOEProtocol *protocols, bool intr, uint16_t vec); +void pcie_doe_fini(DOECap *doe_cap); +bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size, + uint32_t *buf); +void pcie_doe_write_config(DOECap *doe_cap, uint32_t addr, + uint32_t val, int size); +uint32_t pcie_doe_build_protocol(DOEProtocol *p); +void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap); +void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp); +uint32_t pcie_doe_get_obj_len(void *obj); +#endif /* PCIE_DOE_H */ diff --git a/include/hw/pci/pcie_host.h b/include/hw/pci/pcie_host.h index 076457b270..82d92177da 100644 --- a/include/hw/pci/pcie_host.h +++ b/include/hw/pci/pcie_host.h @@ -60,15 +60,15 @@ void pcie_host_mmcfg_update(PCIExpressHost *e, /* * PCI express ECAM (Enhanced Configuration Address Mapping) format. * AKA mmcfg address - * bit 20 - 28: bus number + * bit 20 - 27: bus number * bit 15 - 19: device number * bit 12 - 14: function number * bit 0 - 11: offset in configuration space of a given device */ -#define PCIE_MMCFG_SIZE_MAX (1ULL << 29) +#define PCIE_MMCFG_SIZE_MAX (1ULL << 28) #define PCIE_MMCFG_SIZE_MIN (1ULL << 20) #define PCIE_MMCFG_BUS_BIT 20 -#define PCIE_MMCFG_BUS_MASK 0x1ff +#define PCIE_MMCFG_BUS_MASK 0xff #define PCIE_MMCFG_DEVFN_BIT 12 #define PCIE_MMCFG_DEVFN_MASK 0xff #define PCIE_MMCFG_CONFOFFSET_MASK 0xfff diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index e25b289ce8..7b8193061a 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -39,6 +39,8 @@ struct PCIEPort { void pcie_port_init_reg(PCIDevice *d); +PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn); + #define TYPE_PCIE_SLOT "pcie-slot" OBJECT_DECLARE_SIMPLE_TYPE(PCIESlot, PCIE_SLOT) diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 1db86b0ec4..963dc2e170 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -179,4 +179,8 @@ typedef enum PCIExpLinkWidth { #define PCI_ACS_VER 0x1 #define PCI_ACS_SIZEOF 8 +/* DOE Capability Register Fields */ +#define PCI_DOE_VER 0x1 +#define PCI_DOE_SIZEOF 24 + #endif /* QEMU_PCIE_REGS_H */ diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h new file mode 100644 index 0000000000..80f5c84e75 --- /dev/null +++ b/include/hw/pci/pcie_sriov.h @@ -0,0 +1,77 @@ +/* + * pcie_sriov.h: + * + * Implementation of SR/IOV emulation support. + * + * Copyright (c) 2015 Knut Omang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_PCIE_SRIOV_H +#define QEMU_PCIE_SRIOV_H + +struct PCIESriovPF { + uint16_t num_vfs; /* Number of virtual functions created */ + uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */ + const char *vfname; /* Reference to the device type used for the VFs */ + PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */ +}; + +struct PCIESriovVF { + PCIDevice *pf; /* Pointer back to owner physical function */ + uint16_t vf_number; /* Logical VF number of this function */ +}; + +void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, + const char *vfname, uint16_t vf_dev_id, + uint16_t init_vfs, uint16_t total_vfs, + uint16_t vf_offset, uint16_t vf_stride); +void pcie_sriov_pf_exit(PCIDevice *dev); + +/* Set up a VF bar in the SR/IOV bar area */ +void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, + uint8_t type, dma_addr_t size); + +/* Instantiate a bar for a VF */ +void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, + MemoryRegion *memory); + +/* + * Default (minimal) page size support values + * as required by the SR/IOV standard: + * 0x553 << 12 = 0x553000 = 4K + 8K + 64K + 256K + 1M + 4M + */ +#define SRIOV_SUP_PGSIZE_MINREQ 0x553 + +/* + * Optionally add supported page sizes to the mask of supported page sizes + * Page size values are interpreted as opt_sup_pgsize << 12. + */ +void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize); + +/* SR/IOV capability config write handler */ +void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, + uint32_t val, int len); + +/* Reset SR/IOV VF Enable bit to unregister all VFs */ +void pcie_sriov_pf_disable_vfs(PCIDevice *dev); + +/* Get logical VF number of a VF - only valid for VFs */ +uint16_t pcie_sriov_vf_number(PCIDevice *dev); + +/* + * Get the physical function that owns this VF. + * Returns NULL if dev is not a virtual function + */ +PCIDevice *pcie_sriov_get_pf(PCIDevice *dev); + +/* + * Get the n-th VF of this physical function - only valid for PF. + * Returns NULL if index is invalid + */ +PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n); + +#endif /* QEMU_PCIE_SRIOV_H */ diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h index 74ff44bff0..ebdaf8a493 100644 --- a/include/hw/ppc/openpic.h +++ b/include/hw/ppc/openpic.h @@ -21,7 +21,6 @@ enum { typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; -#define OPENPIC_MODEL_RAVEN 0 #define OPENPIC_MODEL_FSL_MPIC_20 1 #define OPENPIC_MODEL_FSL_MPIC_42 2 #define OPENPIC_MODEL_KEYLARGO 3 @@ -32,13 +31,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; #define OPENPIC_MAX_IRQ (OPENPIC_MAX_SRC + OPENPIC_MAX_IPI + \ OPENPIC_MAX_TMR) -/* Raven */ -#define RAVEN_MAX_CPU 2 -#define RAVEN_MAX_EXT 48 -#define RAVEN_MAX_IRQ 64 -#define RAVEN_MAX_TMR OPENPIC_MAX_TMR -#define RAVEN_MAX_IPI OPENPIC_MAX_IPI - /* KeyLargo */ #define KEYLARGO_MAX_CPU 4 #define KEYLARGO_MAX_EXT 64 @@ -49,14 +41,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; /* Timers don't exist but this makes the code happy... */ #define KEYLARGO_TMR_IRQ (KEYLARGO_IPI_IRQ + KEYLARGO_MAX_IPI) -/* Interrupt definitions */ -#define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */ -#define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */ -#define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */ -#define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */ -/* First doorbell IRQ */ -#define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI)) - typedef struct FslMpicInfo { int max_ext; } FslMpicInfo; @@ -67,7 +51,8 @@ typedef enum IRQType { IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */ } IRQType; -/* Round up to the nearest 64 IRQs so that the queue length +/* + * Round up to the nearest 64 IRQs so that the queue length * won't change when moving between 32 and 64 bit hosts. */ #define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63) @@ -117,8 +102,10 @@ typedef struct OpenPICTimer { bool qemu_timer_active; /* Is the qemu_timer is running? */ struct QEMUTimer *qemu_timer; struct OpenPICState *opp; /* Device timer is part of. */ - /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last - current_count written or read, only defined if qemu_timer_active. */ + /* + * The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last + * current_count written or read, only defined if qemu_timer_active. + */ uint64_t origin_time; } OpenPICTimer; diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index d69cee17b2..9ef7e2d0dc 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -27,11 +27,13 @@ #include "hw/ppc/pnv_pnor.h" #include "hw/ppc/pnv_psi.h" #include "hw/ppc/pnv_occ.h" +#include "hw/ppc/pnv_sbe.h" #include "hw/ppc/pnv_homer.h" #include "hw/ppc/pnv_xive.h" #include "hw/ppc/pnv_core.h" #include "hw/pci-host/pnv_phb3.h" #include "hw/pci-host/pnv_phb4.h" +#include "hw/pci-host/pnv_phb.h" #include "qom/object.h" #define TYPE_PNV_CHIP "pnv-chip" @@ -52,7 +54,7 @@ struct PnvChip { uint64_t cores_mask; PnvCore **cores; - uint32_t num_phbs; + uint32_t num_pecs; MemoryRegion xscom_mmio; MemoryRegion xscom; @@ -80,7 +82,12 @@ struct Pnv8Chip { PnvHomer homer; #define PNV8_CHIP_PHB3_MAX 4 - PnvPHB3 phbs[PNV8_CHIP_PHB3_MAX]; + /* + * The array is used to allow quick access to the phbs by + * pnv_ics_get_child() and pnv_ics_resend_child(). + */ + PnvPHB *phbs[PNV8_CHIP_PHB3_MAX]; + uint32_t num_phbs; XICSFabric *xics; }; @@ -99,6 +106,7 @@ struct Pnv9Chip { Pnv9Psi psi; PnvLpcController lpc; PnvOCC occ; + PnvSBE sbe; PnvHomer homer; uint32_t nr_quads; @@ -124,10 +132,23 @@ struct Pnv10Chip { PnvChip parent_obj; /*< public >*/ + PnvXive2 xive; Pnv9Psi psi; PnvLpcController lpc; + PnvOCC occ; + PnvSBE sbe; + PnvHomer homer; + + uint32_t nr_quads; + PnvQuad *quads; + +#define PNV10_CHIP_MAX_PEC 2 + PnvPhb4PecState pecs[PNV10_CHIP_MAX_PEC]; }; +#define PNV10_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf) +#define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f) + struct PnvChipClass { /*< private >*/ SysBusDeviceClass parent_class; @@ -135,6 +156,7 @@ struct PnvChipClass { /*< public >*/ uint64_t chip_cfam_id; uint64_t cores_mask; + uint32_t num_pecs; uint32_t num_phbs; DeviceRealize parent_realize; @@ -170,29 +192,10 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8NVL, DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9, TYPE_PNV_CHIP_POWER9) -#define TYPE_PNV_CHIP_POWER10 PNV_CHIP_TYPE_NAME("power10_v1.0") +#define TYPE_PNV_CHIP_POWER10 PNV_CHIP_TYPE_NAME("power10_v2.0") DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10, TYPE_PNV_CHIP_POWER10) -/* - * This generates a HW chip id depending on an index, as found on a - * two socket system with dual chip modules : - * - * 0x0, 0x1, 0x10, 0x11 - * - * 4 chips should be the maximum - * - * TODO: use a machine property to define the chip ids - */ -#define PNV_CHIP_HWID(i) ((((i) & 0x3e) << 3) | ((i) & 0x1)) - -/* - * Converts back a HW chip id to an index. This is useful to calculate - * the MMIO addresses of some controllers which depend on the chip id. - */ -#define PNV_CHIP_INDEX(chip) \ - (((chip)->chip_id >> 2) * 2 + ((chip)->chip_id & 0x3)) - PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir); #define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv") @@ -234,6 +237,9 @@ struct PnvMachineState { hwaddr fw_load_addr; }; +PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id); +Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp); + #define PNV_FDT_ADDR 0x01000000 #define PNV_TIMEBASE_FREQ 512000000ULL @@ -256,11 +262,11 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV_OCC_COMMON_AREA_SIZE 0x0000000000800000ull #define PNV_OCC_COMMON_AREA_BASE 0x7fff800000ull #define PNV_OCC_SENSOR_BASE(chip) (PNV_OCC_COMMON_AREA_BASE + \ - PNV_OCC_SENSOR_DATA_BLOCK_BASE(PNV_CHIP_INDEX(chip))) + PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id)) #define PNV_HOMER_SIZE 0x0000000000400000ull #define PNV_HOMER_BASE(chip) \ - (0x7ffd800000ull + ((uint64_t)PNV_CHIP_INDEX(chip)) * PNV_HOMER_SIZE) + (0x7ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE) /* @@ -279,16 +285,16 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); */ #define PNV_ICP_SIZE 0x0000000000100000ull #define PNV_ICP_BASE(chip) \ - (0x0003ffff80000000ull + (uint64_t) PNV_CHIP_INDEX(chip) * PNV_ICP_SIZE) + (0x0003ffff80000000ull + (uint64_t) (chip)->chip_id * PNV_ICP_SIZE) #define PNV_PSIHB_SIZE 0x0000000000100000ull #define PNV_PSIHB_BASE(chip) \ - (0x0003fffe80000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * PNV_PSIHB_SIZE) + (0x0003fffe80000000ull + (uint64_t)(chip)->chip_id * PNV_PSIHB_SIZE) #define PNV_PSIHB_FSP_SIZE 0x0000000100000000ull #define PNV_PSIHB_FSP_BASE(chip) \ - (0x0003ffe000000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * \ + (0x0003ffe000000000ull + (uint64_t)(chip)->chip_id * \ PNV_PSIHB_FSP_SIZE) /* @@ -324,11 +330,11 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV9_OCC_COMMON_AREA_SIZE 0x0000000000800000ull #define PNV9_OCC_COMMON_AREA_BASE 0x203fff800000ull #define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \ - PNV_OCC_SENSOR_DATA_BLOCK_BASE(PNV_CHIP_INDEX(chip))) + PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id)) #define PNV9_HOMER_SIZE 0x0000000000400000ull #define PNV9_HOMER_BASE(chip) \ - (0x203ffd800000ull + ((uint64_t)PNV_CHIP_INDEX(chip)) * PNV9_HOMER_SIZE) + (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE) /* * POWER10 MMIO base addresses - 16TB stride per chip @@ -342,10 +348,37 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV10_LPCM_SIZE 0x0000000100000000ull #define PNV10_LPCM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030000000000ull) +#define PNV10_XIVE2_IC_SIZE 0x0000000002000000ull +#define PNV10_XIVE2_IC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030200000000ull) + #define PNV10_PSIHB_ESB_SIZE 0x0000000000100000ull #define PNV10_PSIHB_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030202000000ull) #define PNV10_PSIHB_SIZE 0x0000000000100000ull #define PNV10_PSIHB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203000000ull) +#define PNV10_XIVE2_TM_SIZE 0x0000000000040000ull +#define PNV10_XIVE2_TM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203180000ull) + +#define PNV10_XIVE2_NVC_SIZE 0x0000000008000000ull +#define PNV10_XIVE2_NVC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030208000000ull) + +#define PNV10_XIVE2_NVPG_SIZE 0x0000010000000000ull +#define PNV10_XIVE2_NVPG_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006040000000000ull) + +#define PNV10_XIVE2_ESB_SIZE 0x0000010000000000ull +#define PNV10_XIVE2_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006050000000000ull) + +#define PNV10_XIVE2_END_SIZE 0x0000020000000000ull +#define PNV10_XIVE2_END_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006060000000000ull) + +#define PNV10_OCC_COMMON_AREA_SIZE 0x0000000000800000ull +#define PNV10_OCC_COMMON_AREA_BASE 0x300fff800000ull +#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \ + PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id)) + +#define PNV10_HOMER_SIZE 0x0000000000400000ull +#define PNV10_HOMER_BASE(chip) \ + (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE) + #endif /* PPC_PNV_H */ diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h index 6ecee98a76..c22eab2e1f 100644 --- a/include/hw/ppc/pnv_core.h +++ b/include/hw/ppc/pnv_core.h @@ -67,7 +67,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvQuad, PNV_QUAD) struct PnvQuad { DeviceState parent_obj; - uint32_t id; + uint32_t quad_id; MemoryRegion xscom_regs; }; #endif /* PPC_PNV_CORE_H */ diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h index 1889e3083c..07e8b19311 100644 --- a/include/hw/ppc/pnv_homer.h +++ b/include/hw/ppc/pnv_homer.h @@ -32,6 +32,9 @@ DECLARE_INSTANCE_CHECKER(PnvHomer, PNV8_HOMER, #define TYPE_PNV9_HOMER TYPE_PNV_HOMER "-POWER9" DECLARE_INSTANCE_CHECKER(PnvHomer, PNV9_HOMER, TYPE_PNV9_HOMER) +#define TYPE_PNV10_HOMER TYPE_PNV_HOMER "-POWER10" +DECLARE_INSTANCE_CHECKER(PnvHomer, PNV10_HOMER, + TYPE_PNV10_HOMER) struct PnvHomer { DeviceState parent; diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h index e893e763dd..8a8d1a3d42 100644 --- a/include/hw/ppc/pnv_lpc.h +++ b/include/hw/ppc/pnv_lpc.h @@ -1,7 +1,7 @@ /* * QEMU PowerPC PowerNV LPC controller * - * Copyright (c) 2016, IBM Corporation. + * Copyright (c) 2016-2022, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -20,7 +20,6 @@ #ifndef PPC_PNV_LPC_H #define PPC_PNV_LPC_H -#include "hw/ppc/pnv_psi.h" #include "qom/object.h" #define TYPE_PNV_LPC "pnv-lpc" @@ -84,15 +83,12 @@ struct PnvLpcController { MemoryRegion xscom_regs; /* PSI to generate interrupts */ - PnvPsi *psi; + qemu_irq psi_irq; }; - struct PnvLpcClass { DeviceClass parent_class; - int psi_irq; - DeviceRealize parent_realize; }; diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h index b78185aeca..90a81dae2b 100644 --- a/include/hw/ppc/pnv_occ.h +++ b/include/hw/ppc/pnv_occ.h @@ -1,7 +1,7 @@ /* * QEMU PowerPC PowerNV Emulation of a few OCC related registers * - * Copyright (c) 2015-2017, IBM Corporation. + * Copyright (c) 2015-2022, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -20,7 +20,6 @@ #ifndef PPC_PNV_OCC_H #define PPC_PNV_OCC_H -#include "hw/ppc/pnv_psi.h" #include "qom/object.h" #define TYPE_PNV_OCC "pnv-occ" @@ -32,6 +31,8 @@ DECLARE_INSTANCE_CHECKER(PnvOCC, PNV8_OCC, #define TYPE_PNV9_OCC TYPE_PNV_OCC "-POWER9" DECLARE_INSTANCE_CHECKER(PnvOCC, PNV9_OCC, TYPE_PNV9_OCC) +#define TYPE_PNV10_OCC TYPE_PNV_OCC "-POWER10" +DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC) #define PNV_OCC_SENSOR_DATA_BLOCK_OFFSET 0x00580000 #define PNV_OCC_SENSOR_DATA_BLOCK_SIZE 0x00025800 @@ -42,19 +43,17 @@ struct PnvOCC { /* OCC Misc interrupt */ uint64_t occmisc; - PnvPsi *psi; + qemu_irq psi_irq; MemoryRegion xscom_regs; MemoryRegion sram_regs; }; - struct PnvOCCClass { DeviceClass parent_class; int xscom_size; const MemoryRegionOps *xscom_ops; - int psi_irq; }; #define PNV_OCC_SENSOR_DATA_BLOCK_BASE(i) \ diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h index 99f9a3adfb..bab2f79844 100644 --- a/include/hw/ppc/pnv_pnor.h +++ b/include/hw/ppc/pnv_pnor.h @@ -6,8 +6,10 @@ * This code is licensed under the GPL version 2 or later. See the * COPYING file in the top-level directory. */ -#ifndef _PPC_PNV_PNOR_H -#define _PPC_PNV_PNOR_H + +#ifndef PPC_PNV_PNOR_H +#define PPC_PNV_PNOR_H + #include "qom/object.h" /* @@ -28,4 +30,4 @@ struct PnvPnor { MemoryRegion mmio; }; -#endif /* _PPC_PNV_PNOR_H */ +#endif /* PPC_PNV_PNOR_H */ diff --git a/include/hw/ppc/pnv_psi.h b/include/hw/ppc/pnv_psi.h index eb841b34a1..8253469b8f 100644 --- a/include/hw/ppc/pnv_psi.h +++ b/include/hw/ppc/pnv_psi.h @@ -1,7 +1,7 @@ /* * QEMU PowerPC PowerNV Processor Service Interface (PSI) model * - * Copyright (c) 2015-2017, IBM Corporation. + * Copyright (c) 2015-2022, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -79,13 +79,10 @@ struct PnvPsiClass { uint64_t bar_mask; const char *compat; int compat_size; - - void (*irq_set)(PnvPsi *psi, int, bool state); }; /* The PSI and FSP interrupts are muxed on the same IRQ number */ typedef enum PnvPsiIrq { - PSIHB_IRQ_PSI, /* internal use only */ PSIHB_IRQ_FSP, /* internal use only */ PSIHB_IRQ_OCC, PSIHB_IRQ_FSI, @@ -96,8 +93,6 @@ typedef enum PnvPsiIrq { #define PSI_NUM_INTERRUPTS 6 -void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state); - /* P9 PSI Interrupts */ #define PSIHB9_IRQ_PSI 0 #define PSIHB9_IRQ_OCC 1 diff --git a/include/hw/ppc/pnv_sbe.h b/include/hw/ppc/pnv_sbe.h new file mode 100644 index 0000000000..f54a3ae9ba --- /dev/null +++ b/include/hw/ppc/pnv_sbe.h @@ -0,0 +1,55 @@ +/* + * QEMU PowerPC PowerNV Emulation of some SBE behaviour + * + * Copyright (c) 2022, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef PPC_PNV_SBE_H +#define PPC_PNV_SBE_H + +#include "qom/object.h" + +#define TYPE_PNV_SBE "pnv-sbe" +OBJECT_DECLARE_TYPE(PnvSBE, PnvSBEClass, PNV_SBE) +#define TYPE_PNV9_SBE TYPE_PNV_SBE "-POWER9" +DECLARE_INSTANCE_CHECKER(PnvSBE, PNV9_SBE, TYPE_PNV9_SBE) +#define TYPE_PNV10_SBE TYPE_PNV_SBE "-POWER10" +DECLARE_INSTANCE_CHECKER(PnvSBE, PNV10_SBE, TYPE_PNV10_SBE) + +struct PnvSBE { + DeviceState xd; + + uint64_t mbox[8]; + uint64_t sbe_doorbell; + uint64_t host_doorbell; + + qemu_irq psi_irq; + QEMUTimer *timer; + + MemoryRegion xscom_mbox_regs; + MemoryRegion xscom_ctrl_regs; +}; + +struct PnvSBEClass { + DeviceClass parent_class; + + int xscom_ctrl_size; + int xscom_mbox_size; + const MemoryRegionOps *xscom_ctrl_ops; + const MemoryRegionOps *xscom_mbox_ops; +}; + +#endif /* PPC_PNV_SBE_H */ diff --git a/include/hw/ppc/pnv_xive.h b/include/hw/ppc/pnv_xive.h index 7928e27963..b5d91505e5 100644 --- a/include/hw/ppc/pnv_xive.h +++ b/include/hw/ppc/pnv_xive.h @@ -12,6 +12,7 @@ #include "hw/ppc/xive.h" #include "qom/object.h" +#include "hw/ppc/xive2.h" struct PnvChip; @@ -95,4 +96,74 @@ struct PnvXiveClass { void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon); +/* + * XIVE2 interrupt controller (POWER10) + */ +#define TYPE_PNV_XIVE2 "pnv-xive2" +OBJECT_DECLARE_TYPE(PnvXive2, PnvXive2Class, PNV_XIVE2); + +typedef struct PnvXive2 { + Xive2Router parent_obj; + + /* Owning chip */ + struct PnvChip *chip; + + /* XSCOM addresses giving access to the controller registers */ + MemoryRegion xscom_regs; + + MemoryRegion ic_mmio; + MemoryRegion ic_mmios[8]; + MemoryRegion esb_mmio; + MemoryRegion end_mmio; + MemoryRegion nvc_mmio; + MemoryRegion nvpg_mmio; + MemoryRegion tm_mmio; + + /* Shortcut values for the Main MMIO regions */ + hwaddr ic_base; + uint32_t ic_shift; + hwaddr esb_base; + uint32_t esb_shift; + hwaddr end_base; + uint32_t end_shift; + hwaddr nvc_base; + uint32_t nvc_shift; + hwaddr nvpg_base; + uint32_t nvpg_shift; + hwaddr tm_base; + uint32_t tm_shift; + + /* Interrupt controller registers */ + uint64_t cq_regs[0x40]; + uint64_t vc_regs[0x100]; + uint64_t pc_regs[0x100]; + uint64_t tctxt_regs[0x30]; + + /* To change default behavior */ + uint64_t capabilities; + uint64_t config; + + /* Our XIVE source objects for IPIs and ENDs */ + XiveSource ipi_source; + Xive2EndSource end_source; + + /* + * Virtual Structure Descriptor tables + * These are in a SRAM protected by ECC. + */ + uint64_t vsds[9][XIVE_BLOCK_MAX]; + + /* Translation tables */ + uint64_t tables[8][XIVE_BLOCK_MAX]; + +} PnvXive2; + +typedef struct PnvXive2Class { + Xive2RouterClass parent_class; + + DeviceRealize parent_realize; +} PnvXive2Class; + +void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon); + #endif /* PPC_PNV_XIVE_H */ diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h index 2ff9f7a8d6..c6e9ef8dd2 100644 --- a/include/hw/ppc/pnv_xscom.h +++ b/include/hw/ppc/pnv_xscom.h @@ -92,6 +92,12 @@ struct PnvXScomInterfaceClass { #define PNV9_XSCOM_OCC_BASE PNV_XSCOM_OCC_BASE #define PNV9_XSCOM_OCC_SIZE 0x8000 +#define PNV9_XSCOM_SBE_CTRL_BASE 0x00050008 +#define PNV9_XSCOM_SBE_CTRL_SIZE 0x1 + +#define PNV9_XSCOM_SBE_MBOX_BASE 0x000D0050 +#define PNV9_XSCOM_SBE_MBOX_SIZE 0x16 + #define PNV9_XSCOM_PBA_BASE 0x5012b00 #define PNV9_XSCOM_PBA_SIZE 0x40 @@ -131,6 +137,27 @@ struct PnvXScomInterfaceClass { #define PNV10_XSCOM_PSIHB_BASE 0x3011D00 #define PNV10_XSCOM_PSIHB_SIZE 0x100 +#define PNV10_XSCOM_OCC_BASE PNV9_XSCOM_OCC_BASE +#define PNV10_XSCOM_OCC_SIZE PNV9_XSCOM_OCC_SIZE + +#define PNV10_XSCOM_SBE_CTRL_BASE PNV9_XSCOM_SBE_CTRL_BASE +#define PNV10_XSCOM_SBE_CTRL_SIZE PNV9_XSCOM_SBE_CTRL_SIZE + +#define PNV10_XSCOM_SBE_MBOX_BASE PNV9_XSCOM_SBE_MBOX_BASE +#define PNV10_XSCOM_SBE_MBOX_SIZE PNV9_XSCOM_SBE_MBOX_SIZE + +#define PNV10_XSCOM_PBA_BASE 0x01010CDA +#define PNV10_XSCOM_PBA_SIZE 0x40 + +#define PNV10_XSCOM_XIVE2_BASE 0x2010800 +#define PNV10_XSCOM_XIVE2_SIZE 0x400 + +#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */ +#define PNV10_XSCOM_PEC_NEST_SIZE 0x100 + +#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */ +#define PNV10_XSCOM_PEC_PCI_SIZE 0x200 + void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp); int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, uint64_t xscom_base, uint64_t xscom_size, diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h index 93e614cffd..02af03ada2 100644 --- a/include/hw/ppc/ppc.h +++ b/include/hw/ppc/ppc.h @@ -54,6 +54,10 @@ struct ppc_tb_t { uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset); clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq); +void cpu_ppc_tb_free(CPUPPCState *env); +void cpu_ppc_hdecr_init(CPUPPCState *env); +void cpu_ppc_hdecr_exit(CPUPPCState *env); + /* Embedded PowerPC DCR management */ typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn); typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val); @@ -95,11 +99,11 @@ enum { ARCH_MAC99_U3, }; -#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) -#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) -#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) -#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03) -#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04) +#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) +#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) +#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) +#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03) +#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04) #define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05) #define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06) #define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07) diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h index 980f964b5a..f8c86e09ec 100644 --- a/include/hw/ppc/ppc4xx.h +++ b/include/hw/ppc/ppc4xx.h @@ -27,26 +27,129 @@ #include "hw/ppc/ppc.h" #include "exec/memory.h" - -/* PowerPC 4xx core initialization */ -PowerPCCPU *ppc4xx_init(const char *cpu_model, - clk_setup_t *cpu_clk, clk_setup_t *tb_clk, - uint32_t sysclk); - -void ppc4xx_sdram_banks(MemoryRegion *ram, int nr_banks, - MemoryRegion ram_memories[], - hwaddr ram_bases[], hwaddr ram_sizes[], - const ram_addr_t sdram_bank_sizes[]); - -void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks, - MemoryRegion ram_memories[], - hwaddr *ram_bases, - hwaddr *ram_sizes, - int do_init); - -void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum, - qemu_irq irqs[4]); +#include "hw/sysbus.h" #define TYPE_PPC4xx_PCI_HOST_BRIDGE "ppc4xx-pcihost" +/* + * Generic DCR device + */ +#define TYPE_PPC4xx_DCR_DEVICE "ppc4xx-dcr-device" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxDcrDeviceState, PPC4xx_DCR_DEVICE); +struct Ppc4xxDcrDeviceState { + SysBusDevice parent_obj; + + PowerPCCPU *cpu; +}; + +void ppc4xx_dcr_register(Ppc4xxDcrDeviceState *dev, int dcrn, void *opaque, + dcr_read_cb dcr_read, dcr_write_cb dcr_write); +bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu, + Error **errp); + +/* Memory Access Layer (MAL) */ +#define TYPE_PPC4xx_MAL "ppc4xx-mal" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxMalState, PPC4xx_MAL); +struct Ppc4xxMalState { + Ppc4xxDcrDeviceState parent_obj; + + qemu_irq irqs[4]; + uint32_t cfg; + uint32_t esr; + uint32_t ier; + uint32_t txcasr; + uint32_t txcarr; + uint32_t txeobisr; + uint32_t txdeir; + uint32_t rxcasr; + uint32_t rxcarr; + uint32_t rxeobisr; + uint32_t rxdeir; + uint32_t *txctpr; + uint32_t *rxctpr; + uint32_t *rcbs; + uint8_t txcnum; + uint8_t rxcnum; +}; + +/* Peripheral local bus arbitrer */ +#define TYPE_PPC4xx_PLB "ppc4xx-plb" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxPlbState, PPC4xx_PLB); +struct Ppc4xxPlbState { + Ppc4xxDcrDeviceState parent_obj; + + uint32_t acr; + uint32_t bear; + uint32_t besr; +}; + +/* Peripheral controller */ +#define TYPE_PPC4xx_EBC "ppc4xx-ebc" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxEbcState, PPC4xx_EBC); +struct Ppc4xxEbcState { + Ppc4xxDcrDeviceState parent_obj; + + uint32_t addr; + uint32_t bcr[8]; + uint32_t bap[8]; + uint32_t bear; + uint32_t besr0; + uint32_t besr1; + uint32_t cfg; +}; + +/* SDRAM DDR controller */ +typedef struct { + MemoryRegion ram; + MemoryRegion container; /* used for clipping */ + hwaddr base; + hwaddr size; + uint32_t bcr; +} Ppc4xxSdramBank; + +#define SDR0_DDR0_DDRM_ENCODE(n) ((((unsigned long)(n)) & 0x03) << 29) +#define SDR0_DDR0_DDRM_DDR1 0x20000000 +#define SDR0_DDR0_DDRM_DDR2 0x40000000 + +#define TYPE_PPC4xx_SDRAM_DDR "ppc4xx-sdram-ddr" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdrState, PPC4xx_SDRAM_DDR); +struct Ppc4xxSdramDdrState { + Ppc4xxDcrDeviceState parent_obj; + + MemoryRegion *dram_mr; + uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */ + Ppc4xxSdramBank bank[4]; + qemu_irq irq; + + uint32_t addr; + uint32_t besr0; + uint32_t besr1; + uint32_t bear; + uint32_t cfg; + uint32_t status; + uint32_t rtr; + uint32_t pmit; + uint32_t tr; + uint32_t ecccfg; + uint32_t eccesr; +}; + +void ppc4xx_sdram_ddr_enable(Ppc4xxSdramDdrState *s); + +/* SDRAM DDR2 controller */ +#define TYPE_PPC4xx_SDRAM_DDR2 "ppc4xx-sdram-ddr2" +OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdr2State, PPC4xx_SDRAM_DDR2); +struct Ppc4xxSdramDdr2State { + Ppc4xxDcrDeviceState parent_obj; + + MemoryRegion *dram_mr; + uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */ + Ppc4xxSdramBank bank[4]; + + uint32_t addr; + uint32_t mcopt2; +}; + +void ppc4xx_sdram_ddr2_enable(Ppc4xxSdramDdr2State *s); + #endif /* PPC4XX_H */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 637652ad16..04a95669ab 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -100,23 +100,30 @@ typedef enum { #define FDT_MAX_SIZE 0x200000 -/* - * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken - * from Linux kernel arch/powerpc/mm/numa.h. It represents the - * amount of associativity domains for non-CPU resources. - * - * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity - * array for any non-CPU resource. - * - * VCPU_ASSOC_SIZE represents the size of ibm,associativity array - * for CPUs, which has an extra element (vcpu_id) in the end. - */ -#define MAX_DISTANCE_REF_POINTS 4 -#define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1) -#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1) +/* Max number of GPUs per system */ +#define NVGPU_MAX_NUM 6 -/* Max number of these GPUsper a physical box */ -#define NVGPU_MAX_NUM 6 +/* Max number of NUMA nodes */ +#define NUMA_NODES_MAX_NUM (MAX_NODES + NVGPU_MAX_NUM) + +/* + * NUMA FORM1 macros. FORM1_DIST_REF_POINTS was taken from + * MAX_DISTANCE_REF_POINTS in arch/powerpc/mm/numa.h from Linux + * kernel source. It represents the amount of associativity domains + * for non-CPU resources. + * + * FORM1_NUMA_ASSOC_SIZE is the base array size of an ibm,associativity + * array for any non-CPU resource. + */ +#define FORM1_DIST_REF_POINTS 4 +#define FORM1_NUMA_ASSOC_SIZE (FORM1_DIST_REF_POINTS + 1) + +/* + * FORM2 NUMA affinity has a single associativity domain, giving + * us a assoc size of 2. + */ +#define FORM2_DIST_REF_POINTS 1 +#define FORM2_NUMA_ASSOC_SIZE (FORM2_DIST_REF_POINTS + 1) typedef struct SpaprCapabilities SpaprCapabilities; struct SpaprCapabilities { @@ -145,6 +152,7 @@ struct SpaprMachineClass { hwaddr rma_limit; /* clamp the RMA to this size */ bool pre_5_1_assoc_refpoints; bool pre_5_2_numa_associativity; + bool pre_6_2_numa_affinity; bool (*phb_placement)(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, @@ -156,6 +164,21 @@ struct SpaprMachineClass { SpaprIrq *irq; }; +#define WDT_MAX_WATCHDOGS 4 /* Maximum number of watchdog devices */ + +#define TYPE_SPAPR_WDT "spapr-wdt" +OBJECT_DECLARE_SIMPLE_TYPE(SpaprWatchdog, SPAPR_WDT) + +typedef struct SpaprWatchdog { + /*< private >*/ + DeviceState parent_obj; + /*< public >*/ + + QEMUTimer timer; + uint8_t action; /* One of PSERIES_WDTF_ACTION_xxx */ + uint8_t leave_others; /* leaveOtherWatchdogsRunningOnTimeout */ +} SpaprWatchdog; + /** * SpaprMachineState: */ @@ -186,9 +209,12 @@ struct SpaprMachineState { Vof *vof; uint64_t rtc_offset; /* Now used only during incoming migration */ struct PPCTimebase tb; - bool has_graphics; + bool want_stdout_path; uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */ + /* Nested HV support (TCG only) */ + uint64_t nested_ptcr; + Notifier epow_notifier; QTAILQ_HEAD(, SpaprEventLogEntry) pending_events; bool use_hotplug_event_source; @@ -249,9 +275,12 @@ struct SpaprMachineState { unsigned gpu_numa_id; SpaprTpmProxy *tpm_proxy; - uint32_t numa_assoc_array[MAX_NODES + NVGPU_MAX_NUM][NUMA_ASSOC_SIZE]; + uint32_t FORM1_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE]; + uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE]; Error *fwnmi_migration_blocker; + + SpaprWatchdog wds[WDT_MAX_WATCHDOGS]; }; #define H_SUCCESS 0 @@ -332,6 +361,8 @@ struct SpaprMachineState { #define H_P7 -60 #define H_P8 -61 #define H_P9 -62 +#define H_NOOP -63 +#define H_UNSUPPORTED -67 #define H_OVERLAP -68 #define H_UNSUPPORTED_FLAG -256 #define H_MULTI_THREADS_ACTIVE -9005 @@ -550,8 +581,10 @@ struct SpaprMachineState { #define H_SCM_UNBIND_ALL 0x3FC #define H_SCM_HEALTH 0x400 #define H_RPT_INVALIDATE 0x448 +#define H_SCM_FLUSH 0x44C +#define H_WATCHDOG 0x45C -#define MAX_HCALL_OPCODE H_RPT_INVALIDATE +#define MAX_HCALL_OPCODE H_WATCHDOG /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. @@ -568,7 +601,14 @@ struct SpaprMachineState { #define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3) /* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */ #define KVMPPC_H_VOF_CLIENT (KVMPPC_HCALL_BASE + 0x5) -#define KVMPPC_HCALL_MAX KVMPPC_H_VOF_CLIENT + +/* Platform-specific hcalls used for nested HV KVM */ +#define KVMPPC_H_SET_PARTITION_TABLE (KVMPPC_HCALL_BASE + 0x800) +#define KVMPPC_H_ENTER_NESTED (KVMPPC_HCALL_BASE + 0x804) +#define KVMPPC_H_TLB_INVALIDATE (KVMPPC_HCALL_BASE + 0x808) +#define KVMPPC_H_COPY_TOFROM_GUEST (KVMPPC_HCALL_BASE + 0x80C) + +#define KVMPPC_HCALL_MAX KVMPPC_H_COPY_TOFROM_GUEST /* * The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating @@ -578,6 +618,65 @@ struct SpaprMachineState { #define SVM_H_TPM_COMM 0xEF10 #define SVM_HCALL_MAX SVM_H_TPM_COMM +/* + * Register state for entering a nested guest with H_ENTER_NESTED. + * New member must be added at the end. + */ +struct kvmppc_hv_guest_state { + uint64_t version; /* version of this structure layout, must be first */ + uint32_t lpid; + uint32_t vcpu_token; + /* These registers are hypervisor privileged (at least for writing) */ + uint64_t lpcr; + uint64_t pcr; + uint64_t amor; + uint64_t dpdes; + uint64_t hfscr; + int64_t tb_offset; + uint64_t dawr0; + uint64_t dawrx0; + uint64_t ciabr; + uint64_t hdec_expiry; + uint64_t purr; + uint64_t spurr; + uint64_t ic; + uint64_t vtb; + uint64_t hdar; + uint64_t hdsisr; + uint64_t heir; + uint64_t asdr; + /* These are OS privileged but need to be set late in guest entry */ + uint64_t srr0; + uint64_t srr1; + uint64_t sprg[4]; + uint64_t pidr; + uint64_t cfar; + uint64_t ppr; + /* Version 1 ends here */ + uint64_t dawr1; + uint64_t dawrx1; + /* Version 2 ends here */ +}; + +/* Latest version of hv_guest_state structure */ +#define HV_GUEST_STATE_VERSION 2 + +/* Linux 64-bit powerpc pt_regs struct, used by nested HV */ +struct kvmppc_pt_regs { + uint64_t gpr[32]; + uint64_t nip; + uint64_t msr; + uint64_t orig_gpr3; /* Used for restarting system calls */ + uint64_t ctr; + uint64_t link; + uint64_t xer; + uint64_t ccr; + uint64_t softe; /* Soft enabled/disabled */ + uint64_t trap; /* Reason for being here */ + uint64_t dar; /* Fault registers */ + uint64_t dsisr; /* on 4xx/Book-E used for ESR */ + uint64_t result; /* Result of a system call */ +}; typedef struct SpaprDeviceTreeUpdateHeader { uint32_t version_id; @@ -595,6 +694,9 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm, void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn); target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args); + +void spapr_exit_nested(PowerPCCPU *cpu, int excp); + target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong shift); target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr, @@ -662,6 +764,7 @@ void push_sregs_to_kvm_pr(SpaprMachineState *spapr); #define RTAS_DDW_PGSIZE_128M 0x20 #define RTAS_DDW_PGSIZE_256M 0x40 #define RTAS_DDW_PGSIZE_16G 0x80 +#define RTAS_DDW_PGSIZE_2M 0x100 /* RTAS tokens */ #define RTAS_TOKEN_BASE 0x2000 @@ -745,7 +848,8 @@ static inline uint64_t ppc64_phys_to_real(uint64_t addr) static inline uint32_t rtas_ld(target_ulong phys, int n) { - return ldl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n)); + return ldl_be_phys(&address_space_memory, + ppc64_phys_to_real(phys + 4 * n)); } static inline uint64_t rtas_ldq(target_ulong phys, int n) @@ -755,7 +859,7 @@ static inline uint64_t rtas_ldq(target_ulong phys, int n) static inline void rtas_st(target_ulong phys, int n, uint32_t val) { - stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val); + stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4 * n), val); } typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm, @@ -818,6 +922,7 @@ struct SpaprTceTable { bool bypass; bool need_vfio; bool skipping_replay; + bool def_win; int fd; MemoryRegion root; IOMMUMemoryRegion iommu; @@ -943,6 +1048,7 @@ extern const VMStateDescription vmstate_spapr_cap_large_decr; extern const VMStateDescription vmstate_spapr_cap_ccf_assist; extern const VMStateDescription vmstate_spapr_cap_fwnmi; extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate; +extern const VMStateDescription vmstate_spapr_wdt; static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap) { @@ -979,4 +1085,7 @@ target_ulong spapr_vof_client_architecture_support(MachineState *ms, target_ulong ovec_addr); void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt); +/* H_WATCHDOG */ +void spapr_watchdog_init(SpaprMachineState *spapr); + #endif /* HW_SPAPR_H */ diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h index dab3dfc76c..b560514560 100644 --- a/include/hw/ppc/spapr_cpu_core.h +++ b/include/hw/ppc/spapr_cpu_core.h @@ -48,6 +48,11 @@ typedef struct SpaprCpuState { bool prod; /* not migrated, only used to improve dispatch latencies */ struct ICPState *icp; struct XiveTCTX *tctx; + + /* Fields for nested-HV support */ + bool in_nested; /* true while the L2 is executing */ + CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */ + int64_t nested_tb_offset; /* L1->L2 TB offset */ } SpaprCpuState; static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu) diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h index 6f9f02d3de..7cb3367400 100644 --- a/include/hw/ppc/spapr_numa.h +++ b/include/hw/ppc/spapr_numa.h @@ -24,6 +24,7 @@ */ void spapr_numa_associativity_init(SpaprMachineState *spapr, MachineState *machine); +void spapr_numa_associativity_check(SpaprMachineState *spapr); void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas); void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, int offset, int nodeid); diff --git a/include/hw/ppc/spapr_nvdimm.h b/include/hw/ppc/spapr_nvdimm.h index 764f999f54..e9436cb6ef 100644 --- a/include/hw/ppc/spapr_nvdimm.h +++ b/include/hw/ppc/spapr_nvdimm.h @@ -21,5 +21,6 @@ void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt); bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm, uint64_t size, Error **errp); void spapr_add_nvdimm(DeviceState *dev, uint64_t slot); +void spapr_nvdimm_finish_flushes(void); #endif diff --git a/include/hw/ppc/spapr_ovec.h b/include/hw/ppc/spapr_ovec.h index 48b716a060..c3e8b98e7e 100644 --- a/include/hw/ppc/spapr_ovec.h +++ b/include/hw/ppc/spapr_ovec.h @@ -49,6 +49,7 @@ typedef struct SpaprOptionVector SpaprOptionVector; /* option vector 5 */ #define OV5_DRCONF_MEMORY OV_BIT(2, 2) #define OV5_FORM1_AFFINITY OV_BIT(5, 0) +#define OV5_FORM2_AFFINITY OV_BIT(5, 2) #define OV5_HP_EVT OV_BIT(6, 5) #define OV5_HPT_RESIZE OV_BIT(6, 7) #define OV5_DRMEM_V2 OV_BIT(22, 0) diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h index 4bea87f39c..7eae1a4847 100644 --- a/include/hw/ppc/spapr_vio.h +++ b/include/hw/ppc/spapr_vio.h @@ -91,35 +91,47 @@ static inline void spapr_vio_irq_pulse(SpaprVioDevice *dev) static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr, uint32_t size, DMADirection dir) { - return dma_memory_valid(&dev->as, taddr, size, dir); + return dma_memory_valid(&dev->as, taddr, size, dir, MEMTXATTRS_UNSPECIFIED); } static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr, void *buf, uint32_t size) { - return (dma_memory_read(&dev->as, taddr, buf, size) != 0) ? + return (dma_memory_read(&dev->as, taddr, + buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr, const void *buf, uint32_t size) { - return (dma_memory_write(&dev->as, taddr, buf, size) != 0) ? + return (dma_memory_write(&dev->as, taddr, + buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr, uint8_t c, uint32_t size) { - return (dma_memory_set(&dev->as, taddr, c, size) != 0) ? + return (dma_memory_set(&dev->as, taddr, + c, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } -#define vio_stb(_dev, _addr, _val) (stb_dma(&(_dev)->as, (_addr), (_val))) -#define vio_sth(_dev, _addr, _val) (stw_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_stl(_dev, _addr, _val) (stl_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_stq(_dev, _addr, _val) (stq_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr))) +#define vio_stb(_dev, _addr, _val) \ + (stb_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_sth(_dev, _addr, _val) \ + (stw_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_stl(_dev, _addr, _val) \ + (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_stq(_dev, _addr, _val) \ + (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_ldq(_dev, _addr) \ + ({ \ + uint64_t _val; \ + ldq_be_dma(&(_dev)->as, (_addr), &_val, MEMTXATTRS_UNSPECIFIED); \ + _val; \ + }) int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq); diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h index 97fdef758b..f8c0effcaf 100644 --- a/include/hw/ppc/vof.h +++ b/include/hw/ppc/vof.h @@ -6,6 +6,11 @@ #ifndef HW_VOF_H #define HW_VOF_H +#include "qom/object.h" +#include "exec/address-spaces.h" +#include "exec/memory.h" +#include "cpu.h" + typedef struct Vof { uint64_t top_addr; /* copied from rma_size */ GArray *claimed; /* array of SpaprOfClaimed */ diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 445eccfe6b..f7eea4ca81 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -160,7 +160,7 @@ DECLARE_CLASS_CHECKERS(XiveNotifierClass, XIVE_NOTIFIER, struct XiveNotifierClass { InterfaceClass parent; - void (*notify)(XiveNotifier *xn, uint32_t lisn); + void (*notify)(XiveNotifier *xn, uint32_t lisn, bool pq_checked); }; /* @@ -176,6 +176,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XiveSource, XIVE_SOURCE) */ #define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */ #define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */ +#define XIVE_SRC_PQ_DISABLE 0x4 /* Disable check on the PQ state bits */ struct XiveSource { DeviceState parent; @@ -261,6 +262,10 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) #define XIVE_ESB_QUEUED (XIVE_ESB_VAL_P | XIVE_ESB_VAL_Q) #define XIVE_ESB_OFF XIVE_ESB_VAL_Q +bool xive_esb_trigger(uint8_t *pq); +bool xive_esb_eoi(uint8_t *pq); +uint8_t xive_esb_set(uint8_t *pq, uint8_t value); + /* * "magic" Event State Buffer (ESB) MMIO offsets. * @@ -274,6 +279,7 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) #define XIVE_ESB_STORE_EOI 0x400 /* Store */ #define XIVE_ESB_LOAD_EOI 0x000 /* Load */ #define XIVE_ESB_GET 0x800 /* Load */ +#define XIVE_ESB_INJECT 0x800 /* Store */ #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ @@ -282,6 +288,30 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno); uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq); +/* + * Source status helpers + */ +static inline void xive_source_set_status(XiveSource *xsrc, uint32_t srcno, + uint8_t status, bool enable) +{ + if (enable) { + xsrc->status[srcno] |= status; + } else { + xsrc->status[srcno] &= ~status; + } +} + +static inline void xive_source_set_asserted(XiveSource *xsrc, uint32_t srcno, + bool enable) +{ + xive_source_set_status(xsrc, srcno, XIVE_STATUS_ASSERTED, enable); +} + +static inline bool xive_source_is_asserted(XiveSource *xsrc, uint32_t srcno) +{ + return xsrc->status[srcno] & XIVE_STATUS_ASSERTED; +} + void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon); @@ -331,6 +361,11 @@ struct XiveTCTX { XivePresenter *xptr; }; +static inline uint32_t xive_tctx_word2(uint8_t *ring) +{ + return *((uint32_t *) &ring[TM_WORD2]); +} + /* * XIVE Router */ @@ -352,6 +387,10 @@ struct XiveRouterClass { /* XIVE table accessors */ int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, XiveEAS *eas); + int (*get_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq); + int (*set_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq); int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end); int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, @@ -373,7 +412,7 @@ int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt); int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt, uint8_t word_number); -void xive_router_notify(XiveNotifier *xn, uint32_t lisn); +void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); /* * XIVE Presenter @@ -404,6 +443,10 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool cam_ignore, uint32_t logic_serv); +bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv); /* * XIVE Fabric (Interface between Interrupt Controller and Machine) @@ -450,6 +493,17 @@ struct XiveENDSource { */ #define XIVE_PRIORITY_MAX 7 +/* + * Convert a priority number to an Interrupt Pending Buffer (IPB) + * register, which indicates a pending interrupt at the priority + * corresponding to the bit number + */ +static inline uint8_t xive_priority_to_ipb(uint8_t priority) +{ + return priority > XIVE_PRIORITY_MAX ? + 0 : 1 << (XIVE_PRIORITY_MAX - priority); +} + /* * XIVE Thread Interrupt Management Aera (TIMA) * @@ -473,6 +527,7 @@ Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); void xive_tctx_reset(XiveTCTX *tctx); void xive_tctx_destroy(XiveTCTX *tctx); void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); +void xive_tctx_reset_os_signal(XiveTCTX *tctx); /* * KVM XIVE device helpers diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h new file mode 100644 index 0000000000..e9e3ea135e --- /dev/null +++ b/include/hw/ppc/xive2.h @@ -0,0 +1,109 @@ +/* + * QEMU PowerPC XIVE2 interrupt controller model (POWER10) + * + * Copyright (c) 2019-2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + * + */ + +#ifndef PPC_XIVE2_H +#define PPC_XIVE2_H + +#include "hw/ppc/xive2_regs.h" + +/* + * XIVE2 Router (POWER10) + */ +typedef struct Xive2Router { + SysBusDevice parent; + + XiveFabric *xfb; +} Xive2Router; + +#define TYPE_XIVE2_ROUTER "xive2-router" +OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER); + +/* + * Configuration flags + */ + +#define XIVE2_GEN1_TIMA_OS 0x00000001 +#define XIVE2_VP_SAVE_RESTORE 0x00000002 +#define XIVE2_THREADID_8BITS 0x00000004 + +typedef struct Xive2RouterClass { + SysBusDeviceClass parent; + + /* XIVE table accessors */ + int (*get_eas)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + Xive2Eas *eas); + int (*get_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq); + int (*set_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + uint8_t *pq); + int (*get_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end); + int (*write_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end, uint8_t word_number); + int (*get_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp); + int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp, uint8_t word_number); + uint8_t (*get_block_id)(Xive2Router *xrtr); + uint32_t (*get_config)(Xive2Router *xrtr); +} Xive2RouterClass; + +int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, + Xive2Eas *eas); +int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end); +int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, + Xive2End *end, uint8_t word_number); +int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp); +int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp, uint8_t word_number); +uint32_t xive2_router_get_config(Xive2Router *xrtr); + +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); + +/* + * XIVE2 Presenter (POWER10) + */ + +int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv); + +/* + * XIVE2 END ESBs (POWER10) + */ + +#define TYPE_XIVE2_END_SOURCE "xive2-end-source" +OBJECT_DECLARE_SIMPLE_TYPE(Xive2EndSource, XIVE2_END_SOURCE) + +typedef struct Xive2EndSource { + DeviceState parent; + + uint32_t nr_ends; + + /* ESB memory region */ + uint32_t esb_shift; + MemoryRegion esb_mmio; + + Xive2Router *xrtr; +} Xive2EndSource; + +/* + * XIVE2 Thread Interrupt Management Area (POWER10) + */ + +void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size); +uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size); + +#endif /* PPC_XIVE2_H */ diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h new file mode 100644 index 0000000000..14605bd458 --- /dev/null +++ b/include/hw/ppc/xive2_regs.h @@ -0,0 +1,210 @@ +/* + * QEMU PowerPC XIVE2 internal structure definitions (POWER10) + * + * Copyright (c) 2019-2022, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_XIVE2_REGS_H +#define PPC_XIVE2_REGS_H + +/* + * Thread Interrupt Management Area (TIMA) + * + * In Gen1 mode (P9 compat mode) word 2 is the same. However in Gen2 + * mode (P10), the CAM line is slightly different as the VP space was + * increased. + */ +#define TM2_QW0W2_VU PPC_BIT32(0) +#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31) +#define TM2_QW1W2_VO PPC_BIT32(0) +#define TM2_QW1W2_HO PPC_BIT32(1) +#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31) +#define TM2_QW2W2_VP PPC_BIT32(0) +#define TM2_QW2W2_HP PPC_BIT32(1) +#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31) +#define TM2_QW3W2_VT PPC_BIT32(0) +#define TM2_QW3W2_HT PPC_BIT32(1) +#define TM2_QW3W2_LP PPC_BIT32(6) +#define TM2_QW3W2_LE PPC_BIT32(7) + +/* + * Event Assignment Structure (EAS) + */ + +typedef struct Xive2Eas { + uint64_t w; +#define EAS2_VALID PPC_BIT(0) +#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ +#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ +#define EAS2_MASKED PPC_BIT(32) /* Masked */ +#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ +} Xive2Eas; + +#define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) +#define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) + +void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon); + +/* + * Event Notifification Descriptor (END) + */ + +typedef struct Xive2End { + uint32_t w0; +#define END2_W0_VALID PPC_BIT32(0) /* "v" bit */ +#define END2_W0_ENQUEUE PPC_BIT32(5) /* "q" bit */ +#define END2_W0_UCOND_NOTIFY PPC_BIT32(6) /* "n" bit */ +#define END2_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit */ +#define END2_W0_BACKLOG PPC_BIT32(8) /* "b" bit */ +#define END2_W0_PRECL_ESC_CTL PPC_BIT32(9) /* "p" bit */ +#define END2_W0_UNCOND_ESCALATE PPC_BIT32(10) /* "u" bit */ +#define END2_W0_ESCALATE_CTL PPC_BIT32(11) /* "e" bit */ +#define END2_W0_ADAPTIVE_ESC PPC_BIT32(12) /* "a" bit */ +#define END2_W0_ESCALATE_END PPC_BIT32(13) /* "N" bit */ +#define END2_W0_FIRMWARE1 PPC_BIT32(16) /* Owned by FW */ +#define END2_W0_FIRMWARE2 PPC_BIT32(17) /* Owned by FW */ +#define END2_W0_AEC_SIZE PPC_BITMASK32(18, 19) +#define END2_W0_AEG_SIZE PPC_BITMASK32(20, 23) +#define END2_W0_EQ_VG_PREDICT PPC_BITMASK32(24, 31) /* Owned by HW */ + uint32_t w1; +#define END2_W1_ESn PPC_BITMASK32(0, 1) +#define END2_W1_ESn_P PPC_BIT32(0) +#define END2_W1_ESn_Q PPC_BIT32(1) +#define END2_W1_ESe PPC_BITMASK32(2, 3) +#define END2_W1_ESe_P PPC_BIT32(2) +#define END2_W1_ESe_Q PPC_BIT32(3) +#define END2_W1_GEN_FLIPPED PPC_BIT32(8) +#define END2_W1_GENERATION PPC_BIT32(9) +#define END2_W1_PAGE_OFF PPC_BITMASK32(10, 31) + uint32_t w2; +#define END2_W2_RESERVED PPC_BITMASK32(4, 7) +#define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31) + uint32_t w3; +#define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24) +#define END2_W3_QSIZE PPC_BITMASK32(28, 31) + uint32_t w4; +#define END2_W4_END_BLOCK PPC_BITMASK32(4, 7) +#define END2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) +#define END2_W4_ESB_BLOCK PPC_BITMASK32(0, 3) +#define END2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) + uint32_t w5; +#define END2_W5_ESC_END_DATA PPC_BITMASK32(1, 31) + uint32_t w6; +#define END2_W6_FORMAT_BIT PPC_BIT32(0) +#define END2_W6_IGNORE PPC_BIT32(1) +#define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7) +#define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31) +#define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31) + uint32_t w7; +#define END2_W7_TOPO PPC_BITMASK32(0, 3) /* Owned by HW */ +#define END2_W7_F0_PRIORITY PPC_BITMASK32(8, 15) +#define END2_W7_F1_LOG_SERVER_ID PPC_BITMASK32(4, 31) +} Xive2End; + +#define xive2_end_is_valid(end) (be32_to_cpu((end)->w0) & END2_W0_VALID) +#define xive2_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END2_W0_ENQUEUE) +#define xive2_end_is_notify(end) \ + (be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY) +#define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG) +#define xive2_end_is_escalate(end) \ + (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL) +#define xive2_end_is_uncond_escalation(end) \ + (be32_to_cpu((end)->w0) & END2_W0_UNCOND_ESCALATE) +#define xive2_end_is_silent_escalation(end) \ + (be32_to_cpu((end)->w0) & END2_W0_SILENT_ESCALATE) +#define xive2_end_is_escalate_end(end) \ + (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_END) +#define xive2_end_is_firmware1(end) \ + (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1) +#define xive2_end_is_firmware2(end) \ + (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2) + +static inline uint64_t xive2_end_qaddr(Xive2End *end) +{ + return ((uint64_t) be32_to_cpu(end->w2) & END2_W2_EQ_ADDR_HI) << 32 | + (be32_to_cpu(end->w3) & END2_W3_EQ_ADDR_LO); +} + +void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon); +void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, + Monitor *mon); +void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, + Monitor *mon); + +/* + * Notification Virtual Processor (NVP) + */ +typedef struct Xive2Nvp { + uint32_t w0; +#define NVP2_W0_VALID PPC_BIT32(0) +#define NVP2_W0_HW PPC_BIT32(7) +#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */ + uint32_t w1; +#define NVP2_W1_CO PPC_BIT32(13) +#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15) +#define NVP2_W1_CO_THRID_VALID PPC_BIT32(16) +#define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31) + uint32_t w2; +#define NVP2_W2_CPPR PPC_BITMASK32(0, 7) +#define NVP2_W2_IPB PPC_BITMASK32(8, 15) +#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23) + uint32_t w3; + uint32_t w4; +#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */ +#define NVP2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) /* N:0 */ +#define NVP2_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) /* N:1 */ +#define NVP2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) /* N:1 */ + uint32_t w5; +#define NVP2_W5_PSIZE PPC_BITMASK32(0, 1) +#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7) +#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31) + uint32_t w6; + uint32_t w7; +} Xive2Nvp; + +#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID) +#define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW) +#define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO) + +/* + * The VP number space in a block is defined by the END2_W6_VP_OFFSET + * field of the XIVE END. When running in Gen1 mode (P9 compat mode), + * the VP space is reduced to (1 << 19) VPs per block + */ +#define XIVE2_NVP_SHIFT 24 +#define XIVE2_NVP_COUNT (1 << XIVE2_NVP_SHIFT) + +static inline uint32_t xive2_nvp_cam_line(uint8_t nvp_blk, uint32_t nvp_idx) +{ + return (nvp_blk << XIVE2_NVP_SHIFT) | nvp_idx; +} + +static inline uint32_t xive2_nvp_idx(uint32_t cam_line) +{ + return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1); +} + +static inline uint32_t xive2_nvp_blk(uint32_t cam_line) +{ + return (cam_line >> XIVE2_NVP_SHIFT) & 0xf; +} + +/* + * Notification Virtual Group or Crowd (NVG/NVC) + */ +typedef struct Xive2Nvgc { + uint32_t w0; +#define NVGC2_W0_VALID PPC_BIT32(0) + uint32_t w1; + uint32_t w2; + uint32_t w3; + uint32_t w4; + uint32_t w5; + uint32_t w6; + uint32_t w7; +} Xive2Nvgc; + +#endif /* PPC_XIVE2_REGS_H */ diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h index c443218475..4dc02b0de4 100644 --- a/include/hw/ptimer.h +++ b/include/hw/ptimer.h @@ -33,9 +33,17 @@ * to stderr when the guest attempts to enable the timer. */ -/* The default ptimer policy retains backward compatibility with the legacy - * timers. Custom policies are adjusting the default one. Consider providing - * a correct policy for your timer. +/* + * The 'legacy' ptimer policy retains backward compatibility with the + * traditional ptimer behaviour from before policy flags were introduced. + * It has several weird behaviours which don't match typical hardware + * timer behaviour. For a new device using ptimers, you should not + * use PTIMER_POLICY_LEGACY, but instead check the actual behaviour + * that you need and specify the right set of policy flags to get that. + * + * If you are overhauling an existing device that uses PTIMER_POLICY_LEGACY + * and are in a position to check or test the real hardware behaviour, + * consider updating it to specify the right policy flags. * * The rough edges of the default policy: * - Starting to run with a period = 0 emits error message and stops the @@ -54,7 +62,7 @@ * since the last period, effectively restarting the timer with a * counter = counter value at the moment of change (.i.e. one less). */ -#define PTIMER_POLICY_DEFAULT 0 +#define PTIMER_POLICY_LEGACY 0 /* Periodic timer counter stays with "0" for a one period before wrapping * around. */ diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index bafc311bfa..785dd5a56e 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -26,6 +26,7 @@ typedef enum DeviceCategory { DEVICE_CATEGORY_SOUND, DEVICE_CATEGORY_MISC, DEVICE_CATEGORY_CPU, + DEVICE_CATEGORY_WATCHDOG, DEVICE_CATEGORY_MAX } DeviceCategory; @@ -176,11 +177,12 @@ struct DeviceState { Object parent_obj; /*< public >*/ - const char *id; + char *id; char *canonical_path; bool realized; bool pending_deleted_event; - QemuOpts *opts; + int64_t pending_deleted_expires_ms; + QDict *opts; int hotplugged; bool allow_unplug_during_migration; BusState *parent_bus; @@ -191,6 +193,7 @@ struct DeviceState { int instance_id_alias; int alias_required_for_version; ResettableState reset; + GSList *unplug_blockers; }; struct DeviceListener { @@ -201,8 +204,12 @@ struct DeviceListener { * informs qdev if a device should be visible or hidden. We can * hide a failover device depending for example on the device * opts. + * + * On errors, it returns false and errp is set. Device creation + * should fail in this case. */ - bool (*hide_device)(DeviceListener *listener, QemuOpts *device_opts); + bool (*hide_device)(DeviceListener *listener, const QDict *device_opts, + bool from_json, Error **errp); QTAILQ_ENTRY(DeviceListener) link; }; @@ -264,6 +271,7 @@ struct BusState { HotplugHandler *hotplug_handler; int max_index; bool realized; + bool full; int num_children; /* @@ -314,6 +322,7 @@ compat_props_add(GPtrArray *arr, * The returned object has a reference count of 1. */ DeviceState *qdev_new(const char *name); + /** * qdev_try_new: Try to create a device on the heap * @name: device type to create @@ -322,6 +331,7 @@ DeviceState *qdev_new(const char *name); * does not exist, rather than asserting. */ DeviceState *qdev_try_new(const char *name); + /** * qdev_realize: Realize @dev. * @dev: device to realize @@ -340,6 +350,7 @@ DeviceState *qdev_try_new(const char *name); * qdev_realize_and_unref() instead. */ bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp); + /** * qdev_realize_and_unref: Realize @dev and drop a reference * @dev: device to realize @@ -365,6 +376,7 @@ bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp); * would be incorrect. For that use case you want qdev_realize(). */ bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp); + /** * qdev_unrealize: Unrealize a device * @dev: device to unrealize @@ -374,7 +386,7 @@ bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp); * * - unrealize any child buses by calling qbus_unrealize() * (this will recursively unrealize any devices on those buses) - * - call the the unrealize method of @dev + * - call the unrealize method of @dev * * The device can then be freed by causing its reference count to go * to zero. @@ -408,6 +420,34 @@ void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, void qdev_machine_creation_done(void); bool qdev_machine_modified(void); +/** + * qdev_add_unplug_blocker: Add an unplug blocker to a device + * + * @dev: Device to be blocked from unplug + * @reason: Reason for blocking + */ +void qdev_add_unplug_blocker(DeviceState *dev, Error *reason); + +/** + * qdev_del_unplug_blocker: Remove an unplug blocker from a device + * + * @dev: Device to be unblocked + * @reason: Pointer to the Error used with qdev_add_unplug_blocker. + * Used as a handle to lookup the blocker for deletion. + */ +void qdev_del_unplug_blocker(DeviceState *dev, Error *reason); + +/** + * qdev_unplug_blocked: Confirm if a device is blocked from unplug + * + * @dev: Device to be tested + * @reason: Returns one of the reasons why the device is blocked, + * if any + * + * Returns: true if device is blocked from unplug, false otherwise + */ +bool qdev_unplug_blocked(DeviceState *dev, Error **errp); + /** * GpioPolarity: Polarity of a GPIO line * @@ -443,6 +483,7 @@ typedef enum { * For named input GPIO lines, use qdev_get_gpio_in_named(). */ qemu_irq qdev_get_gpio_in(DeviceState *dev, int n); + /** * qdev_get_gpio_in_named: Get one of a device's named input GPIO lines * @dev: Device whose GPIO we want @@ -464,7 +505,7 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n); * qdev_connect_gpio_out: Connect one of a device's anonymous output GPIO lines * @dev: Device whose GPIO to connect * @n: Number of the anonymous output GPIO line (which must be in range) - * @pin: qemu_irq to connect the output line to + * @input_pin: qemu_irq to connect the output line to * * This function connects an anonymous output GPIO line on a device * up to an arbitrary qemu_irq, so that when the device asserts that @@ -481,7 +522,7 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n); * qemu_irqs at once, or to connect multiple outbound GPIOs to the * same qemu_irq. (Warning: there is no assertion or other guard to * catch this error: the model will just not do the right thing.) - * Instead, for fan-out you can use the TYPE_IRQ_SPLIT device: connect + * Instead, for fan-out you can use the TYPE_SPLIT_IRQ device: connect * a device's outbound GPIO to the splitter's input, and connect each * of the splitter's outputs to a different device. For fan-in you * can use the TYPE_OR_IRQ device, which is a model of a logical OR @@ -490,12 +531,14 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n); * For named output GPIO lines, use qdev_connect_gpio_out_named(). */ void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin); + /** - * qdev_connect_gpio_out: Connect one of a device's anonymous output GPIO lines + * qdev_connect_gpio_out_named: Connect one of a device's named output + * GPIO lines * @dev: Device whose GPIO to connect * @name: Name of the output GPIO array * @n: Number of the anonymous output GPIO line (which must be in range) - * @pin: qemu_irq to connect the output line to + * @input_pin: qemu_irq to connect the output line to * * This function connects an anonymous output GPIO line on a device * up to an arbitrary qemu_irq, so that when the device asserts that @@ -513,10 +556,11 @@ void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin); * qemu_irqs at once, or to connect multiple outbound GPIOs to the * same qemu_irq; see qdev_connect_gpio_out() for details. * - * For named output GPIO lines, use qdev_connect_gpio_out_named(). + * For anonymous output GPIO lines, use qdev_connect_gpio_out(). */ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, - qemu_irq pin); + qemu_irq input_pin); + /** * qdev_get_gpio_out_connector: Get the qemu_irq connected to an output GPIO * @dev: Device whose output GPIO we are interested in @@ -534,6 +578,7 @@ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, * by the platform-bus subsystem. */ qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n); + /** * qdev_intercept_gpio_out: Intercept an existing GPIO connection * @dev: Device to intercept the outbound GPIO line from @@ -575,6 +620,7 @@ BusState *qdev_get_child_bus(DeviceState *dev, const char *name); * hold of an input GPIO line to manipulate it. */ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n); + /** * qdev_init_gpio_out: create an array of anonymous output GPIO lines * @dev: Device to create output GPIOs for @@ -597,10 +643,15 @@ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n); * * See qdev_connect_gpio_out() for how code that uses such a device * can connect to one of its output GPIO lines. + * + * There is no need to release the @pins allocated array because it + * will be automatically released when @dev calls its instance_finalize() + * handler. */ void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n); + /** - * qdev_init_gpio_out: create an array of named output GPIO lines + * qdev_init_gpio_out_named: create an array of named output GPIO lines * @dev: Device to create output GPIOs for * @pins: Pointer to qemu_irq or qemu_irq array for the GPIO lines * @name: Name to give this array of GPIO lines @@ -612,6 +663,7 @@ void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n); */ void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, const char *name, int n); + /** * qdev_init_gpio_in_named_with_opaque: create an array of input GPIO lines * for the specified device @@ -673,9 +725,9 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id); typedef int (qbus_walkerfn)(BusState *bus, void *opaque); typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); -void qbus_create_inplace(void *bus, size_t size, const char *typename, - DeviceState *parent, const char *name); -BusState *qbus_create(const char *typename, DeviceState *parent, const char *name); +void qbus_init(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name); +BusState *qbus_new(const char *typename, DeviceState *parent, const char *name); bool qbus_realize(BusState *bus, Error **errp); void qbus_unrealize(BusState *bus); @@ -798,18 +850,43 @@ static inline bool qbus_is_hotpluggable(BusState *bus) return bus->hotplug_handler; } +/** + * qbus_mark_full: Mark this bus as full, so no more devices can be attached + * @bus: Bus to mark as full + * + * By default, QEMU will allow devices to be plugged into a bus up + * to the bus class's device count limit. Calling this function + * marks a particular bus as full, so that no more devices can be + * plugged into it. In particular this means that the bus will not + * be considered as a candidate for plugging in devices created by + * the user on the commandline or via the monitor. + * If a machine has multiple buses of a given type, such as I2C, + * where some of those buses in the real hardware are used only for + * internal devices and some are exposed via expansion ports, you + * can use this function to mark the internal-only buses as full + * after you have created all their internal devices. Then user + * created devices will appear on the expansion-port bus where + * guest software expects them. + */ +static inline void qbus_mark_full(BusState *bus) +{ + bus->full = true; +} + void device_listener_register(DeviceListener *listener); void device_listener_unregister(DeviceListener *listener); /** * @qdev_should_hide_device: - * @opts: QemuOpts as passed on cmdline. + * @opts: options QDict + * @from_json: true if @opts entries are typed, false for all strings + * @errp: pointer to error object * * Check if a device should be added. * When a device is added via qdev_device_add() this will be called, * and return if the device should be added now or not. */ -bool qdev_should_hide_device(QemuOpts *opts); +bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp); typedef enum MachineInitPhase { /* current_machine is NULL. */ diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 0ef97d60ce..e1df08876c 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -17,6 +17,7 @@ struct Property { const PropertyInfo *info; ptrdiff_t offset; uint8_t bitnr; + uint64_t bitmask; bool set_default; union { int64_t i; @@ -32,6 +33,7 @@ struct PropertyInfo { const char *name; const char *description; const QEnumLookup *enum_table; + bool realized_set_allowed; /* allow setting property on realized device */ int (*print)(Object *obj, Property *prop, char *dest, size_t len); void (*set_default_value)(ObjectProperty *op, const Property *prop); ObjectProperty *(*create)(ObjectClass *oc, const char *name, @@ -53,6 +55,7 @@ extern const PropertyInfo qdev_prop_uint16; extern const PropertyInfo qdev_prop_uint32; extern const PropertyInfo qdev_prop_int32; extern const PropertyInfo qdev_prop_uint64; +extern const PropertyInfo qdev_prop_uint64_checkmask; extern const PropertyInfo qdev_prop_int64; extern const PropertyInfo qdev_prop_size; extern const PropertyInfo qdev_prop_string; @@ -102,6 +105,16 @@ extern const PropertyInfo qdev_prop_link; .set_default = true, \ .defval.u = (bool)_defval) +/** + * The DEFINE_PROP_UINT64_CHECKMASK macro checks a user-supplied value + * against corresponding bitmask, rejects the value if it violates. + * The default value is set in instance_init(). + */ +#define DEFINE_PROP_UINT64_CHECKMASK(_name, _state, _field, _bitmask) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_uint64_checkmask, uint64_t, \ + .bitmask = (_bitmask), \ + .set_default = false) + #define PROP_ARRAY_LEN_PREFIX "len-" /** diff --git a/include/hw/rdma/rdma.h b/include/hw/rdma/rdma.h index e77e43a170..80b2e531c4 100644 --- a/include/hw/rdma/rdma.h +++ b/include/hw/rdma/rdma.h @@ -31,7 +31,7 @@ typedef struct RdmaProvider RdmaProvider; struct RdmaProviderClass { InterfaceClass parent; - void (*print_statistics)(Monitor *mon, RdmaProvider *obj); + void (*format_statistics)(RdmaProvider *obj, GString *buf); }; #endif diff --git a/include/hw/register.h b/include/hw/register.h index b480e3882c..6a076cfcdf 100644 --- a/include/hw/register.h +++ b/include/hw/register.h @@ -204,6 +204,14 @@ RegisterInfoArray *register_init_block32(DeviceState *owner, bool debug_enabled, uint64_t memory_size); +RegisterInfoArray *register_init_block64(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint64_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size); + /** * This function should be called to cleanup the registers that were initialized * when calling register_init_block32(). This function should only be called diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h index 93fa4a84c2..1330ca77de 100644 --- a/include/hw/registerfields.h +++ b/include/hw/registerfields.h @@ -30,6 +30,10 @@ enum { A_ ## reg = (addr) }; \ enum { R_ ## reg = (addr) / 2 }; +#define REG64(reg, addr) \ + enum { A_ ## reg = (addr) }; \ + enum { R_ ## reg = (addr) / 8 }; + /* Define SHIFT, LENGTH and MASK constants for a field within a register */ /* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH @@ -55,9 +59,24 @@ extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_SEX8(storage, reg, field) \ + sextract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_SEX16(storage, reg, field) \ + sextract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_SEX32(storage, reg, field) \ + sextract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_SEX64(storage, reg, field) \ + sextract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) + /* Extract a field from an array of registers */ #define ARRAY_FIELD_EX32(regs, reg, field) \ FIELD_EX32((regs)[R_ ## reg], reg, field) +#define ARRAY_FIELD_EX64(regs, reg, field) \ + FIELD_EX64((regs)[R_ ## reg], reg, field) /* Deposit a register field. * Assigning values larger then the target field will result in @@ -89,7 +108,40 @@ _d; }) #define FIELD_DP64(storage, reg, field, val) ({ \ struct { \ - unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ + uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \ + } _v = { .v = val }; \ + uint64_t _d; \ + _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, _v.v); \ + _d; }) + +#define FIELD_SDP8(storage, reg, field, val) ({ \ + struct { \ + signed int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } _v = { .v = val }; \ + uint8_t _d; \ + _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, _v.v); \ + _d; }) +#define FIELD_SDP16(storage, reg, field, val) ({ \ + struct { \ + signed int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } _v = { .v = val }; \ + uint16_t _d; \ + _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, _v.v); \ + _d; }) +#define FIELD_SDP32(storage, reg, field, val) ({ \ + struct { \ + signed int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } _v = { .v = val }; \ + uint32_t _d; \ + _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, _v.v); \ + _d; }) +#define FIELD_SDP64(storage, reg, field, val) ({ \ + struct { \ + int64_t v:R_ ## reg ## _ ## field ## _LENGTH; \ } _v = { .v = val }; \ uint64_t _d; \ _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ @@ -99,5 +151,77 @@ /* Deposit a field to array of registers. */ #define ARRAY_FIELD_DP32(regs, reg, field, val) \ (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val); +#define ARRAY_FIELD_DP64(regs, reg, field, val) \ + (regs)[R_ ## reg] = FIELD_DP64((regs)[R_ ## reg], reg, field, val); + + +/* + * These macros can be used for defining and extracting fields that have the + * same bit position across multiple registers. + */ + +/* Define shared SHIFT, LENGTH, and MASK constants */ +#define SHARED_FIELD(name, shift, length) \ + enum { name ## _ ## SHIFT = (shift)}; \ + enum { name ## _ ## LENGTH = (length)}; \ + enum { name ## _ ## MASK = MAKE_64BIT_MASK(shift, length)}; + +/* Extract a shared field */ +#define SHARED_FIELD_EX8(storage, field) \ + extract8((storage), field ## _SHIFT, field ## _LENGTH) + +#define SHARED_FIELD_EX16(storage, field) \ + extract16((storage), field ## _SHIFT, field ## _LENGTH) + +#define SHARED_FIELD_EX32(storage, field) \ + extract32((storage), field ## _SHIFT, field ## _LENGTH) + +#define SHARED_FIELD_EX64(storage, field) \ + extract64((storage), field ## _SHIFT, field ## _LENGTH) + +/* Extract a shared field from a register array */ +#define SHARED_ARRAY_FIELD_EX32(regs, offset, field) \ + SHARED_FIELD_EX32((regs)[(offset)], field) +#define SHARED_ARRAY_FIELD_EX64(regs, offset, field) \ + SHARED_FIELD_EX64((regs)[(offset)], field) + +/* Deposit a shared field */ +#define SHARED_FIELD_DP8(storage, field, val) ({ \ + struct { \ + unsigned int v:field ## _LENGTH; \ + } _v = { .v = val }; \ + uint8_t _d; \ + _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \ + _d; }) + +#define SHARED_FIELD_DP16(storage, field, val) ({ \ + struct { \ + unsigned int v:field ## _LENGTH; \ + } _v = { .v = val }; \ + uint16_t _d; \ + _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \ + _d; }) + +#define SHARED_FIELD_DP32(storage, field, val) ({ \ + struct { \ + unsigned int v:field ## _LENGTH; \ + } _v = { .v = val }; \ + uint32_t _d; \ + _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \ + _d; }) + +#define SHARED_FIELD_DP64(storage, field, val) ({ \ + struct { \ + uint64_t v:field ## _LENGTH; \ + } _v = { .v = val }; \ + uint64_t _d; \ + _d = deposit64((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \ + _d; }) + +/* Deposit a shared field to a register array */ +#define SHARED_ARRAY_FIELD_DP32(regs, offset, field, val) \ + (regs)[(offset)] = SHARED_FIELD_DP32((regs)[(offset)], field, val); +#define SHARED_ARRAY_FIELD_DP64(regs, offset, field, val) \ + (regs)[(offset)] = SHARED_FIELD_DP64((regs)[(offset)], field, val); #endif diff --git a/include/hw/remote/iommu.h b/include/hw/remote/iommu.h new file mode 100644 index 0000000000..33b68a8f4b --- /dev/null +++ b/include/hw/remote/iommu.h @@ -0,0 +1,40 @@ +/** + * Copyright © 2022 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef REMOTE_IOMMU_H +#define REMOTE_IOMMU_H + +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci.h" + +#ifndef INT2VOIDP +#define INT2VOIDP(i) (void *)(uintptr_t)(i) +#endif + +typedef struct RemoteIommuElem { + MemoryRegion *mr; + + AddressSpace as; +} RemoteIommuElem; + +#define TYPE_REMOTE_IOMMU "x-remote-iommu" +OBJECT_DECLARE_SIMPLE_TYPE(RemoteIommu, REMOTE_IOMMU) + +struct RemoteIommu { + Object parent; + + GHashTable *elem_by_devfn; + + QemuMutex lock; +}; + +void remote_iommu_setup(PCIBus *pci_bus); + +void remote_iommu_unplug_dev(PCIDevice *pci_dev); + +#endif diff --git a/include/hw/remote/machine.h b/include/hw/remote/machine.h index 2a2a33c4b2..ac32fda387 100644 --- a/include/hw/remote/machine.h +++ b/include/hw/remote/machine.h @@ -22,6 +22,10 @@ struct RemoteMachineState { RemotePCIHost *host; RemoteIOHubState iohub; + + bool vfio_user; + + bool auto_shutdown; }; /* Used to pass to co-routine device and ioc. */ diff --git a/include/hw/remote/vfio-user-obj.h b/include/hw/remote/vfio-user-obj.h new file mode 100644 index 0000000000..87ab78b875 --- /dev/null +++ b/include/hw/remote/vfio-user-obj.h @@ -0,0 +1,6 @@ +#ifndef VFIO_USER_OBJ_H +#define VFIO_USER_OBJ_H + +void vfu_object_set_bus_irq(PCIBus *pci_bus); + +#endif diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h index 0e89400b09..93e5f8760d 100644 --- a/include/hw/riscv/boot.h +++ b/include/hw/riscv/boot.h @@ -25,12 +25,12 @@ #include "hw/riscv/riscv_hart.h" #define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin" -#define RISCV32_BIOS_ELF "opensbi-riscv32-generic-fw_dynamic.elf" #define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin" -#define RISCV64_BIOS_ELF "opensbi-riscv64-generic-fw_dynamic.elf" bool riscv_is_32bit(RISCVHartArrayState *harts); +char *riscv_plic_hart_config_string(int hart_count); + target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, target_ulong firmware_end_addr); target_ulong riscv_find_and_load_firmware(MachineState *machine, @@ -46,15 +46,17 @@ target_ulong riscv_load_kernel(const char *kernel_filename, symbol_fn_t sym_cb); hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, uint64_t kernel_entry, hwaddr *start); -uint32_t riscv_load_fdt(hwaddr dram_start, uint64_t dram_size, void *fdt); +uint64_t riscv_load_fdt(hwaddr dram_start, uint64_t dram_size, void *fdt); void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts, hwaddr saddr, hwaddr rom_base, hwaddr rom_size, uint64_t kernel_entry, - uint32_t fdt_load_addr, void *fdt); + uint64_t fdt_load_addr); void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base, hwaddr rom_size, uint32_t reset_vec_size, uint64_t kernel_entry); +void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr); +void riscv_setup_firmware_boot(MachineState *machine); #endif /* RISCV_BOOT_H */ diff --git a/include/hw/riscv/boot_opensbi.h b/include/hw/riscv/boot_opensbi.h index 0d5ddd6c3d..c19cad4818 100644 --- a/include/hw/riscv/boot_opensbi.h +++ b/include/hw/riscv/boot_opensbi.h @@ -4,8 +4,9 @@ * * Based on include/sbi/{fw_dynamic.h,sbi_scratch.h} from the OpenSBI project. */ -#ifndef OPENSBI_H -#define OPENSBI_H + +#ifndef RISCV_BOOT_OPENSBI_H +#define RISCV_BOOT_OPENSBI_H /** Expected value of info magic ('OSBI' ascii string in hex) */ #define FW_DYNAMIC_INFO_MAGIC_VALUE 0x4942534f diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h index d30916f45d..a757b240e0 100644 --- a/include/hw/riscv/microchip_pfsoc.h +++ b/include/hw/riscv/microchip_pfsoc.h @@ -88,8 +88,11 @@ enum { MICROCHIP_PFSOC_L2LIM, MICROCHIP_PFSOC_PLIC, MICROCHIP_PFSOC_MMUART0, + MICROCHIP_PFSOC_WDOG0, MICROCHIP_PFSOC_SYSREG, + MICROCHIP_PFSOC_AXISW, MICROCHIP_PFSOC_MPUCFG, + MICROCHIP_PFSOC_FMETER, MICROCHIP_PFSOC_DDR_SGMII_PHY, MICROCHIP_PFSOC_EMMC_SD, MICROCHIP_PFSOC_DDR_CFG, @@ -97,19 +100,28 @@ enum { MICROCHIP_PFSOC_MMUART2, MICROCHIP_PFSOC_MMUART3, MICROCHIP_PFSOC_MMUART4, + MICROCHIP_PFSOC_WDOG1, + MICROCHIP_PFSOC_WDOG2, + MICROCHIP_PFSOC_WDOG3, + MICROCHIP_PFSOC_WDOG4, MICROCHIP_PFSOC_SPI0, MICROCHIP_PFSOC_SPI1, + MICROCHIP_PFSOC_I2C0, MICROCHIP_PFSOC_I2C1, + MICROCHIP_PFSOC_CAN0, + MICROCHIP_PFSOC_CAN1, MICROCHIP_PFSOC_GEM0, MICROCHIP_PFSOC_GEM1, MICROCHIP_PFSOC_GPIO0, MICROCHIP_PFSOC_GPIO1, MICROCHIP_PFSOC_GPIO2, + MICROCHIP_PFSOC_RTC, MICROCHIP_PFSOC_ENVM_CFG, MICROCHIP_PFSOC_ENVM_DATA, + MICROCHIP_PFSOC_USB, MICROCHIP_PFSOC_QSPI_XIP, MICROCHIP_PFSOC_IOSCB, - MICROCHIP_PFSOC_EMMC_SD_MUX, + MICROCHIP_PFSOC_FABRIC_FIC3, MICROCHIP_PFSOC_DRAM_LO, MICROCHIP_PFSOC_DRAM_LO_ALIAS, MICROCHIP_PFSOC_DRAM_HI, @@ -138,7 +150,6 @@ enum { #define MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT 1 #define MICROCHIP_PFSOC_COMPUTE_CPU_COUNT 4 -#define MICROCHIP_PFSOC_PLIC_HART_CONFIG "MS" #define MICROCHIP_PFSOC_PLIC_NUM_SOURCES 185 #define MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES 7 #define MICROCHIP_PFSOC_PLIC_PRIORITY_BASE 0x04 diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h index 9f93bebdac..6665cd5794 100644 --- a/include/hw/riscv/opentitan.h +++ b/include/hw/riscv/opentitan.h @@ -20,23 +20,33 @@ #define HW_OPENTITAN_H #include "hw/riscv/riscv_hart.h" -#include "hw/intc/ibex_plic.h" +#include "hw/intc/sifive_plic.h" #include "hw/char/ibex_uart.h" #include "hw/timer/ibex_timer.h" +#include "hw/ssi/ibex_spi_host.h" #include "qom/object.h" #define TYPE_RISCV_IBEX_SOC "riscv.lowrisc.ibex.soc" OBJECT_DECLARE_SIMPLE_TYPE(LowRISCIbexSoCState, RISCV_IBEX_SOC) +enum { + OPENTITAN_SPI_HOST0, + OPENTITAN_SPI_HOST1, + OPENTITAN_NUM_SPI_HOSTS, +}; + struct LowRISCIbexSoCState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ RISCVHartArrayState cpus; - IbexPlicState plic; + SiFivePLICState plic; IbexUartState uart; IbexTimerState timer; + IbexSPIHostState spi_host[OPENTITAN_NUM_SPI_HOSTS]; + + uint32_t resetvec; MemoryRegion flash_mem; MemoryRegion rom; @@ -57,13 +67,16 @@ enum { IBEX_DEV_FLASH, IBEX_DEV_FLASH_VIRTUAL, IBEX_DEV_UART, + IBEX_DEV_SPI_DEVICE, + IBEX_DEV_SPI_HOST0, + IBEX_DEV_SPI_HOST1, IBEX_DEV_GPIO, - IBEX_DEV_SPI, IBEX_DEV_I2C, IBEX_DEV_PATTGEN, IBEX_DEV_TIMER, IBEX_DEV_SENSOR_CTRL, IBEX_DEV_OTP_CTRL, + IBEX_DEV_LC_CTRL, IBEX_DEV_PWRMGR, IBEX_DEV_RSTMGR, IBEX_DEV_CLKMGR, @@ -87,15 +100,19 @@ enum { }; enum { - IBEX_TIMER_TIMEREXPIRED0_0 = 125, - IBEX_UART0_RX_PARITY_ERR_IRQ = 8, - IBEX_UART0_RX_TIMEOUT_IRQ = 7, - IBEX_UART0_RX_BREAK_ERR_IRQ = 6, - IBEX_UART0_RX_FRAME_ERR_IRQ = 5, - IBEX_UART0_RX_OVERFLOW_IRQ = 4, - IBEX_UART0_TX_EMPTY_IRQ = 3, - IBEX_UART0_RX_WATERMARK_IRQ = 2, - IBEX_UART0_TX_WATERMARK_IRQ = 1, + IBEX_UART0_TX_WATERMARK_IRQ = 1, + IBEX_UART0_RX_WATERMARK_IRQ = 2, + IBEX_UART0_TX_EMPTY_IRQ = 3, + IBEX_UART0_RX_OVERFLOW_IRQ = 4, + IBEX_UART0_RX_FRAME_ERR_IRQ = 5, + IBEX_UART0_RX_BREAK_ERR_IRQ = 6, + IBEX_UART0_RX_TIMEOUT_IRQ = 7, + IBEX_UART0_RX_PARITY_ERR_IRQ = 8, + IBEX_TIMER_TIMEREXPIRED0_0 = 127, + IBEX_SPI_HOST0_ERR_IRQ = 151, + IBEX_SPI_HOST0_SPI_EVENT_IRQ = 152, + IBEX_SPI_HOST1_ERR_IRQ = 153, + IBEX_SPI_HOST1_SPI_EVENT_IRQ = 154, }; #endif diff --git a/include/hw/riscv/shakti_c.h b/include/hw/riscv/shakti_c.h index 50a2b79086..daf0aae13f 100644 --- a/include/hw/riscv/shakti_c.h +++ b/include/hw/riscv/shakti_c.h @@ -16,8 +16,8 @@ * this program. If not, see . */ -#ifndef HW_SHAKTI_H -#define HW_SHAKTI_H +#ifndef HW_SHAKTI_C_H +#define HW_SHAKTI_C_H #include "hw/riscv/riscv_hart.h" #include "hw/boards.h" diff --git a/include/hw/riscv/sifive_e.h b/include/hw/riscv/sifive_e.h index 83604da805..d738745925 100644 --- a/include/hw/riscv/sifive_e.h +++ b/include/hw/riscv/sifive_e.h @@ -22,6 +22,7 @@ #include "hw/riscv/riscv_hart.h" #include "hw/riscv/sifive_cpu.h" #include "hw/gpio/sifive_gpio.h" +#include "hw/boards.h" #define TYPE_RISCV_E_SOC "riscv.sifive.e.soc" #define RISCV_E_SOC(obj) \ @@ -41,7 +42,7 @@ typedef struct SiFiveESoCState { typedef struct SiFiveEState { /*< private >*/ - SysBusDevice parent_obj; + MachineState parent_obj; /*< public >*/ SiFiveESoCState soc; diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h index 2656b39808..8f63a183c4 100644 --- a/include/hw/riscv/sifive_u.h +++ b/include/hw/riscv/sifive_u.h @@ -27,6 +27,7 @@ #include "hw/misc/sifive_u_otp.h" #include "hw/misc/sifive_u_prci.h" #include "hw/ssi/sifive_spi.h" +#include "hw/timer/sifive_pwm.h" #define TYPE_RISCV_U_SOC "riscv.sifive.u.soc" #define RISCV_U_SOC(obj) \ @@ -49,6 +50,7 @@ typedef struct SiFiveUSoCState { SiFiveSPIState spi0; SiFiveSPIState spi2; CadenceGEMState gem; + SiFivePwmState pwm[2]; uint32_t serial; char *cpu_type; @@ -92,7 +94,9 @@ enum { SIFIVE_U_DEV_FLASH0, SIFIVE_U_DEV_DRAM, SIFIVE_U_DEV_GEM, - SIFIVE_U_DEV_GEM_MGMT + SIFIVE_U_DEV_GEM_MGMT, + SIFIVE_U_DEV_PWM0, + SIFIVE_U_DEV_PWM1 }; enum { @@ -126,6 +130,14 @@ enum { SIFIVE_U_PDMA_IRQ5 = 28, SIFIVE_U_PDMA_IRQ6 = 29, SIFIVE_U_PDMA_IRQ7 = 30, + SIFIVE_U_PWM0_IRQ0 = 42, + SIFIVE_U_PWM0_IRQ1 = 43, + SIFIVE_U_PWM0_IRQ2 = 44, + SIFIVE_U_PWM0_IRQ3 = 45, + SIFIVE_U_PWM1_IRQ0 = 46, + SIFIVE_U_PWM1_IRQ1 = 47, + SIFIVE_U_PWM1_IRQ2 = 48, + SIFIVE_U_PWM1_IRQ3 = 49, SIFIVE_U_QSPI0_IRQ = 51, SIFIVE_U_GEM_IRQ = 53 }; @@ -144,7 +156,6 @@ enum { #define SIFIVE_U_MANAGEMENT_CPU_COUNT 1 #define SIFIVE_U_COMPUTE_CPU_COUNT 4 -#define SIFIVE_U_PLIC_HART_CONFIG "MS" #define SIFIVE_U_PLIC_NUM_SOURCES 54 #define SIFIVE_U_PLIC_NUM_PRIORITIES 7 #define SIFIVE_U_PLIC_PRIORITY_BASE 0x04 diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h index cdd1a13011..73d69234de 100644 --- a/include/hw/riscv/spike.h +++ b/include/hw/riscv/spike.h @@ -43,6 +43,7 @@ struct SpikeState { enum { SPIKE_MROM, + SPIKE_HTIF, SPIKE_CLINT, SPIKE_DRAM }; diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index 349fee1f89..be4ab8fe7f 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -24,25 +24,38 @@ #include "hw/block/flash.h" #include "qom/object.h" -#define VIRT_CPUS_MAX 8 -#define VIRT_SOCKETS_MAX 8 +#define VIRT_CPUS_MAX_BITS 9 +#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS) +#define VIRT_SOCKETS_MAX_BITS 2 +#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS) #define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt") typedef struct RISCVVirtState RISCVVirtState; DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE, TYPE_RISCV_VIRT_MACHINE) +typedef enum RISCVVirtAIAType { + VIRT_AIA_TYPE_NONE = 0, + VIRT_AIA_TYPE_APLIC, + VIRT_AIA_TYPE_APLIC_IMSIC, +} RISCVVirtAIAType; + struct RISCVVirtState { /*< private >*/ MachineState parent; /*< public >*/ + Notifier machine_done; + DeviceState *platform_bus_dev; RISCVHartArrayState soc[VIRT_SOCKETS_MAX]; - DeviceState *plic[VIRT_SOCKETS_MAX]; + DeviceState *irqchip[VIRT_SOCKETS_MAX]; PFlashCFI01 *flash[2]; FWCfgState *fw_cfg; int fdt_size; + bool have_aclint; + RISCVVirtAIAType aia_type; + int aia_guests; }; enum { @@ -51,14 +64,20 @@ enum { VIRT_TEST, VIRT_RTC, VIRT_CLINT, + VIRT_ACLINT_SSWI, VIRT_PLIC, + VIRT_APLIC_M, + VIRT_APLIC_S, VIRT_UART0, VIRT_VIRTIO, VIRT_FW_CFG, + VIRT_IMSIC_M, + VIRT_IMSIC_S, VIRT_FLASH, VIRT_DRAM, VIRT_PCIE_MMIO, VIRT_PCIE_PIO, + VIRT_PLATFORM_BUS, VIRT_PCIE_ECAM }; @@ -68,12 +87,19 @@ enum { VIRTIO_IRQ = 1, /* 1 to 8 */ VIRTIO_COUNT = 8, PCIE_IRQ = 0x20, /* 32 to 35 */ - VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */ + VIRT_PLATFORM_BUS_IRQ = 64, /* 64 to 96 */ + VIRTIO_NDEV = 96 /* Arbitrary maximum number of interrupts */ }; -#define VIRT_PLIC_HART_CONFIG "MS" -#define VIRT_PLIC_NUM_SOURCES 127 -#define VIRT_PLIC_NUM_PRIORITIES 7 +#define VIRT_PLATFORM_BUS_NUM_IRQS 32 + +#define VIRT_IRQCHIP_IPI_MSI 1 +#define VIRT_IRQCHIP_NUM_MSIS 255 +#define VIRT_IRQCHIP_NUM_SOURCES VIRTIO_NDEV +#define VIRT_IRQCHIP_NUM_PRIO_BITS 3 +#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3 +#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U) + #define VIRT_PLIC_PRIORITY_BASE 0x04 #define VIRT_PLIC_PENDING_BASE 0x1000 #define VIRT_PLIC_ENABLE_BASE 0x2000 @@ -87,7 +113,14 @@ enum { #define FDT_PCI_INT_CELLS 1 #define FDT_PLIC_ADDR_CELLS 0 #define FDT_PLIC_INT_CELLS 1 -#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \ - FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS) +#define FDT_APLIC_INT_CELLS 2 +#define FDT_IMSIC_INT_CELLS 0 +#define FDT_MAX_INT_CELLS 2 +#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \ + 1 + FDT_MAX_INT_CELLS) +#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \ + 1 + FDT_PLIC_INT_CELLS) +#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \ + 1 + FDT_APLIC_INT_CELLS) #endif diff --git a/include/hw/rtc/goldfish_rtc.h b/include/hw/rtc/goldfish_rtc.h index 79ca7daf5d..162be33863 100644 --- a/include/hw/rtc/goldfish_rtc.h +++ b/include/hw/rtc/goldfish_rtc.h @@ -42,6 +42,8 @@ struct GoldfishRTCState { uint32_t irq_pending; uint32_t irq_enabled; uint32_t time_high; + + bool big_endian; }; #endif diff --git a/include/hw/rtc/m48t59.h b/include/hw/rtc/m48t59.h index d9b45eb161..c14937476c 100644 --- a/include/hw/rtc/m48t59.h +++ b/include/hw/rtc/m48t59.h @@ -47,4 +47,4 @@ struct NvramClass { void (*toggle_lock)(Nvram *obj, int lock); }; -#endif /* HW_M48T59_H */ +#endif /* HW_RTC_M48T59_H */ diff --git a/include/hw/rtc/mc146818rtc.h b/include/hw/rtc/mc146818rtc.h index d92615e6bc..6342ae6920 100644 --- a/include/hw/rtc/mc146818rtc.h +++ b/include/hw/rtc/mc146818rtc.h @@ -25,6 +25,8 @@ struct RTCState { MemoryRegion coalesced_io; uint8_t cmos_data[256]; uint8_t cmos_index; + uint8_t isairq; + uint16_t io_base; int32_t base_year; uint64_t base_rtc; uint64_t last_update; @@ -48,11 +50,10 @@ struct RTCState { }; #define RTC_ISA_IRQ 8 -#define RTC_ISA_BASE 0x70 ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq); void rtc_set_memory(ISADevice *dev, int addr, int val); int rtc_get_memory(ISADevice *dev, int addr); -#endif /* MC146818RTC_H */ +#endif /* HW_RTC_MC146818RTC_H */ diff --git a/include/hw/rtc/sun4v-rtc.h b/include/hw/rtc/sun4v-rtc.h index fd868f6ed2..fc54dfcba4 100644 --- a/include/hw/rtc/sun4v-rtc.h +++ b/include/hw/rtc/sun4v-rtc.h @@ -9,8 +9,8 @@ * version. */ -#ifndef HW_RTC_SUN4V -#define HW_RTC_SUN4V +#ifndef HW_RTC_SUN4V_RTC_H +#define HW_RTC_SUN4V_RTC_H #include "exec/hwaddr.h" diff --git a/include/hw/rtc/xlnx-zynqmp-rtc.h b/include/hw/rtc/xlnx-zynqmp-rtc.h index 5f1ad0a946..f0c6a2d78a 100644 --- a/include/hw/rtc/xlnx-zynqmp-rtc.h +++ b/include/hw/rtc/xlnx-zynqmp-rtc.h @@ -24,8 +24,8 @@ * THE SOFTWARE. */ -#ifndef HW_RTC_XLNX_ZYNQMP_H -#define HW_RTC_XLNX_ZYNQMP_H +#ifndef HW_RTC_XLNX_ZYNQMP_RTC_H +#define HW_RTC_XLNX_ZYNQMP_RTC_H #include "hw/register.h" #include "hw/sysbus.h" diff --git a/include/hw/rx/rx62n.h b/include/hw/rx/rx62n.h index 3ed80dba0d..73ceeb58e5 100644 --- a/include/hw/rx/rx62n.h +++ b/include/hw/rx/rx62n.h @@ -21,8 +21,8 @@ * this program. If not, see . */ -#ifndef HW_RX_RX62N_MCU_H -#define HW_RX_RX62N_MCU_H +#ifndef HW_RX_RX62N_H +#define HW_RX_RX62N_H #include "target/rx/cpu.h" #include "hw/intc/rx_icu.h" diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h index 10ed1df1bb..75e5381613 100644 --- a/include/hw/s390x/css.h +++ b/include/hw/s390x/css.h @@ -146,7 +146,8 @@ struct SubchDev { static inline void sch_gen_unit_exception(SubchDev *sch) { - sch->curr_status.scsw.ctrl &= ~SCSW_ACTL_START_PEND; + sch->curr_status.scsw.ctrl &= ~(SCSW_ACTL_DEVICE_ACTIVE | + SCSW_ACTL_SUBCH_ACTIVE); sch->curr_status.scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | diff --git a/include/hw/s390x/ioinst.h b/include/hw/s390x/ioinst.h index 3771fff9d4..ea8d0f2444 100644 --- a/include/hw/s390x/ioinst.h +++ b/include/hw/s390x/ioinst.h @@ -107,7 +107,7 @@ QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong"); #define PMCW_FLAGS_MASK_MP 0x0004 #define PMCW_FLAGS_MASK_TF 0x0002 #define PMCW_FLAGS_MASK_DNV 0x0001 -#define PMCW_FLAGS_MASK_INVALID 0x0700 +#define PMCW_FLAGS_MASK_INVALID 0xc300 #define PMCW_CHARS_MASK_ST 0x00e00000 #define PMCW_CHARS_MASK_MBFC 0x00000004 diff --git a/include/hw/s390x/pv.h b/include/hw/s390x/pv.h index 1f1f545bfc..9360aa1091 100644 --- a/include/hw/s390x/pv.h +++ b/include/hw/s390x/pv.h @@ -38,6 +38,7 @@ static inline bool s390_is_pv(void) return ccw->pv; } +int s390_pv_query_info(void); int s390_pv_vm_enable(void); void s390_pv_vm_disable(void); int s390_pv_set_sec_parms(uint64_t origin, uint64_t length); @@ -46,8 +47,17 @@ void s390_pv_prep_reset(void); int s390_pv_verify(void); void s390_pv_unshare(void); void s390_pv_inject_reset_error(CPUState *cs); +uint64_t kvm_s390_pv_dmp_get_size_cpu(void); +uint64_t kvm_s390_pv_dmp_get_size_mem_state(void); +uint64_t kvm_s390_pv_dmp_get_size_completion_data(void); +bool kvm_s390_pv_info_basic_valid(void); +int kvm_s390_dump_init(void); +int kvm_s390_dump_cpu(S390CPU *cpu, void *buff); +int kvm_s390_dump_mem_state(uint64_t addr, size_t len, void *dest); +int kvm_s390_dump_completion_data(void *buff); #else /* CONFIG_KVM */ static inline bool s390_is_pv(void) { return false; } +static inline int s390_pv_query_info(void) { return 0; } static inline int s390_pv_vm_enable(void) { return 0; } static inline void s390_pv_vm_disable(void) {} static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; } @@ -56,6 +66,15 @@ static inline void s390_pv_prep_reset(void) {} static inline int s390_pv_verify(void) { return 0; } static inline void s390_pv_unshare(void) {} static inline void s390_pv_inject_reset_error(CPUState *cs) {}; +static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; } +static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; } +static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; } +static inline bool kvm_s390_pv_info_basic_valid(void) { return false; } +static inline int kvm_s390_dump_init(void) { return 0; } +static inline int kvm_s390_dump_cpu(S390CPU *cpu, void *buff) { return 0; } +static inline int kvm_s390_dump_mem_state(uint64_t addr, size_t len, + void *dest) { return 0; } +static inline int kvm_s390_dump_completion_data(void *buff) { return 0; } #endif /* CONFIG_KVM */ int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h index 49ae9f03d3..0605fcea24 100644 --- a/include/hw/s390x/s390-pci-bus.h +++ b/include/hw/s390x/s390-pci-bus.h @@ -37,6 +37,7 @@ #define ZPCI_MAX_UID 0xffff #define UID_UNDEFINED 0 #define UID_CHECKING_ENABLED 0x01 +#define ZPCI_DTSM 0x40 OBJECT_DECLARE_SIMPLE_TYPE(S390pciState, S390_PCI_HOST_BRIDGE) OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBus, S390_PCI_BUS) @@ -81,9 +82,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(S390PCIIOMMU, S390_PCI_IOMMU) #define ZPCI_SDMA_ADDR 0x100000000ULL #define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1 << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_DEFAULT_ACC 0 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) @@ -137,7 +135,7 @@ enum ZpciIoatDtype { #define ZPCI_TABLE_BITS 11 #define ZPCI_PT_BITS 8 -#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT) +#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + TARGET_PAGE_BITS) #define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS) #define ZPCI_RTE_FLAG_MASK 0x3fffULL @@ -316,14 +314,17 @@ typedef struct ZpciFmb { } ZpciFmb; QEMU_BUILD_BUG_MSG(offsetof(ZpciFmb, fmt0) != 48, "padding in ZpciFmb"); -#define ZPCI_DEFAULT_FN_GRP 0x20 +#define ZPCI_DEFAULT_FN_GRP 0xFF +#define ZPCI_SIM_GRP_START 0xF0 typedef struct S390PCIGroup { ClpRspQueryPciGrp zpci_group; int id; + int host_id; QTAILQ_ENTRY(S390PCIGroup) link; } S390PCIGroup; -S390PCIGroup *s390_group_create(int id); +S390PCIGroup *s390_group_create(int id, int host_id); S390PCIGroup *s390_group_find(int id); +S390PCIGroup *s390_group_find_host_sim(int host_id); struct S390PCIBusDevice { DeviceState qdev; @@ -352,6 +353,8 @@ struct S390PCIBusDevice { IndAddr *indicator; bool pci_unplug_request_processed; bool unplug_requested; + bool interp; + bool forwarding_assist; QTAILQ_ENTRY(S390PCIBusDevice) link; }; @@ -370,6 +373,7 @@ struct S390pciState { QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs; QTAILQ_HEAD(, S390PCIDMACount) zpci_dma_limit; QTAILQ_HEAD(, S390PCIGroup) zpci_groups; + uint8_t next_sim_grp; }; S390pciState *s390_get_phb(void); diff --git a/include/hw/s390x/s390-pci-clp.h b/include/hw/s390x/s390-pci-clp.h index 96b8e3f133..03b7f9ba5f 100644 --- a/include/hw/s390x/s390-pci-clp.h +++ b/include/hw/s390x/s390-pci-clp.h @@ -9,8 +9,8 @@ * directory. */ -#ifndef HW_S390_PCI_CLP -#define HW_S390_PCI_CLP +#ifndef HW_S390_PCI_CLP_H +#define HW_S390_PCI_CLP_H /* CLP common request & response block size */ #define CLP_BLK_SIZE 4096 @@ -163,7 +163,8 @@ typedef struct ClpRspQueryPciGrp { uint8_t fr; uint16_t maxstbl; uint16_t mui; - uint64_t reserved3; + uint8_t dtsm; + uint8_t reserved3[7]; uint64_t dasm; /* dma address space mask */ uint64_t msia; /* MSI address */ uint64_t reserved4; diff --git a/include/hw/s390x/s390-pci-kvm.h b/include/hw/s390x/s390-pci-kvm.h new file mode 100644 index 0000000000..933814a402 --- /dev/null +++ b/include/hw/s390x/s390-pci-kvm.h @@ -0,0 +1,38 @@ +/* + * s390 PCI KVM interfaces + * + * Copyright 2022 IBM Corp. + * Author(s): Matthew Rosato + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390_PCI_KVM_H +#define HW_S390_PCI_KVM_H + +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/s390-pci-inst.h" + +#ifdef CONFIG_KVM +bool s390_pci_kvm_interp_allowed(void); +int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, bool assist); +int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev); +#else +static inline bool s390_pci_kvm_interp_allowed(void) +{ + return false; +} +static inline int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, + bool assist) +{ + return -EINVAL; +} +static inline int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/include/hw/s390x/s390-pci-vfio.h b/include/hw/s390x/s390-pci-vfio.h index ff708aef50..ae1b126ff7 100644 --- a/include/hw/s390x/s390-pci-vfio.h +++ b/include/hw/s390x/s390-pci-vfio.h @@ -20,6 +20,7 @@ bool s390_pci_update_dma_avail(int fd, unsigned int *avail); S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s, S390PCIBusDevice *pbdev); void s390_pci_end_dma_count(S390pciState *s, S390PCIDMACount *cnt); +bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh); void s390_pci_get_clp_info(S390PCIBusDevice *pbdev); #else static inline bool s390_pci_update_dma_avail(int fd, unsigned int *avail) @@ -33,6 +34,10 @@ static inline S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s, } static inline void s390_pci_end_dma_count(S390pciState *s, S390PCIDMACount *cnt) { } +static inline bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh) +{ + return false; +} static inline void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) { } #endif diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h index 3331990e02..9bba21a916 100644 --- a/include/hw/s390x/s390-virtio-ccw.h +++ b/include/hw/s390x/s390-virtio-ccw.h @@ -39,6 +39,7 @@ struct S390CcwMachineClass { bool cpu_model_allowed; bool css_migration_enabled; bool hpage_1m_allowed; + int max_threads; }; /* runtime-instrumentation allowed by the machine */ diff --git a/include/hw/s390x/storage-keys.h b/include/hw/s390x/storage-keys.h index 2888d42d0b..aa2ec2aae5 100644 --- a/include/hw/s390x/storage-keys.h +++ b/include/hw/s390x/storage-keys.h @@ -28,9 +28,72 @@ struct S390SKeysState { struct S390SKeysClass { DeviceClass parent_class; - int (*skeys_enabled)(S390SKeysState *ks); + + /** + * @skeys_are_enabled: + * + * Check whether storage keys are enabled. If not enabled, they were not + * enabled lazily either by the guest via a storage key instruction or + * by the host during migration. + * + * If disabled, everything not explicitly triggered by the guest, + * such as outgoing migration or dirty/change tracking, should not touch + * storage keys and should not lazily enable it. + * + * @ks: the #S390SKeysState + * + * Returns false if not enabled and true if enabled. + */ + bool (*skeys_are_enabled)(S390SKeysState *ks); + + /** + * @enable_skeys: + * + * Lazily enable storage keys. If this function is not implemented, + * setting a storage key will lazily enable storage keys implicitly + * instead. TCG guests have to make sure to flush the TLB of all CPUs + * if storage keys were not enabled before this call. + * + * @ks: the #S390SKeysState + * + * Returns false if not enabled before this call, and true if already + * enabled. + */ + bool (*enable_skeys)(S390SKeysState *ks); + + /** + * @get_skeys: + * + * Get storage keys for the given PFN range. This call will fail if + * storage keys have not been lazily enabled yet. + * + * Callers have to validate that a GFN is valid before this call. + * + * @ks: the #S390SKeysState + * @start_gfn: the start GFN to get storage keys for + * @count: the number of storage keys to get + * @keys: the byte array where storage keys will be stored to + * + * Returns 0 on success, returns an error if getting a storage key failed. + */ int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, uint8_t *keys); + /** + * @set_skeys: + * + * Set storage keys for the given PFN range. This call will fail if + * storage keys have not been lazily enabled yet and implicit + * enablement is not supported. + * + * Callers have to validate that a GFN is valid before this call. + * + * @ks: the #S390SKeysState + * @start_gfn: the start GFN to set storage keys for + * @count: the number of storage keys to set + * @keys: the byte array where storage keys will be read from + * + * Returns 0 on success, returns an error if setting a storage key failed. + */ int (*set_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, uint8_t *keys); }; diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h index b1ec27612f..13b17496f8 100644 --- a/include/hw/scsi/esp.h +++ b/include/hw/scsi/esp.h @@ -51,7 +51,7 @@ struct ESPState { ESPDMAMemoryReadWriteFunc dma_memory_write; void *dma_opaque; void (*dma_cb)(ESPState *s); - void (*pdma_cb)(ESPState *s); + uint8_t pdma_cb; uint8_t mig_version_id; @@ -150,6 +150,15 @@ struct SysBusESPState { #define TCHI_FAS100A 0x4 #define TCHI_AM53C974 0x12 +/* PDMA callbacks */ +enum pdma_cb { + SATN_PDMA_CB = 0, + S_WITHOUT_SATN_PDMA_CB = 1, + SATN_STOP_PDMA_CB = 2, + WRITE_RESPONSE_PDMA_CB = 3, + DO_DMA_PDMA_CB = 4 +}; + void esp_dma_enable(ESPState *s, int irq, int level); void esp_request_cancelled(SCSIRequest *req); void esp_command_complete(SCSIRequest *req, size_t resid); diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 0b726bc78c..6ea4b64fe7 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -30,7 +30,7 @@ struct SCSIRequest { int16_t status; int16_t host_status; void *hba_private; - size_t resid; + uint64_t residual; SCSICommand cmd; NotifierList cancel_notifiers; @@ -59,7 +59,7 @@ struct SCSIDeviceClass { void (*realize)(SCSIDevice *dev, Error **errp); void (*unrealize)(SCSIDevice *dev); int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, - void *hba_private); + size_t buf_len, void *hba_private); SCSIRequest *(*alloc_req)(SCSIDevice *s, uint32_t tag, uint32_t lun, uint8_t *buf, void *hba_private); void (*unit_attention_reported)(SCSIDevice *s); @@ -122,10 +122,10 @@ struct SCSIBusInfo { int tcq; int max_channel, max_target, max_lun; int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, - void *hba_private); + size_t buf_len, void *hba_private); void (*transfer_data)(SCSIRequest *req, uint32_t arg); void (*fail)(SCSIRequest *req); - void (*complete)(SCSIRequest *req, size_t resid); + void (*complete)(SCSIRequest *req, size_t residual); void (*cancel)(SCSIRequest *req); void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense); QEMUSGList *(*get_sg_list)(SCSIRequest *req); @@ -146,8 +146,34 @@ struct SCSIBus { const SCSIBusInfo *info; }; -void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, - const SCSIBusInfo *info, const char *bus_name); +/** + * scsi_bus_init_named: Initialize a SCSI bus with the specified name + * @bus: SCSIBus object to initialize + * @bus_size: size of @bus object + * @host: Device which owns the bus (generally the SCSI controller) + * @info: structure defining callbacks etc for the controller + * @bus_name: Name to use for this bus + * + * This in-place initializes @bus as a new SCSI bus with a name + * provided by the caller. It is the caller's responsibility to make + * sure that name does not clash with the name of any other bus in the + * system. Unless you need the new bus to have a specific name, you + * should use scsi_bus_init() instead. + */ +void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name); + +/** + * scsi_bus_init: Initialize a SCSI bus + * + * This in-place-initializes @bus as a new SCSI bus and gives it + * an automatically generated unique name. + */ +static inline void scsi_bus_init(SCSIBus *bus, size_t bus_size, + DeviceState *host, const SCSIBusInfo *info) +{ + scsi_bus_init_named(bus, bus_size, host, info, NULL); +} static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d) { @@ -160,20 +186,21 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, BlockdevOnError rerror, BlockdevOnError werror, const char *serial, Error **errp); +void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense); void scsi_bus_legacy_handle_cmdline(SCSIBus *bus); -void scsi_legacy_handle_cmdline(void); SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, uint32_t tag, uint32_t lun, void *hba_private); SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, - uint8_t *buf, void *hba_private); + uint8_t *buf, size_t buf_len, void *hba_private); int32_t scsi_req_enqueue(SCSIRequest *req); SCSIRequest *scsi_req_ref(SCSIRequest *req); void scsi_req_unref(SCSIRequest *req); int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, - void *hba_private); -int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf); + size_t buf_len, void *hba_private); +int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, + size_t buf_len); void scsi_req_build_sense(SCSIRequest *req, SCSISense sense); void scsi_req_print(SCSIRequest *req); void scsi_req_continue(SCSIRequest *req); @@ -200,4 +227,10 @@ SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int target, int lun); /* scsi-generic.c. */ extern const SCSIReqOps scsi_generic_req_ops; +/* scsi-disk.c */ +#define SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR 0 +#define SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD 1 +#define SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE 2 +#define SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED 3 + #endif diff --git a/include/hw/sd/allwinner-sdhost.h b/include/hw/sd/allwinner-sdhost.h index bfe08ff4ef..30c1e60404 100644 --- a/include/hw/sd/allwinner-sdhost.h +++ b/include/hw/sd/allwinner-sdhost.h @@ -130,6 +130,7 @@ struct AwSdHostClass { /** Maximum buffer size in bytes per DMA descriptor */ size_t max_desc_size; + bool is_sun4i; }; diff --git a/include/hw/sd/npcm7xx_sdhci.h b/include/hw/sd/npcm7xx_sdhci.h new file mode 100644 index 0000000000..d728f0a40d --- /dev/null +++ b/include/hw/sd/npcm7xx_sdhci.h @@ -0,0 +1,65 @@ +/* + * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef NPCM7XX_SDHCI_H +#define NPCM7XX_SDHCI_H + +#include "hw/sd/sdhci.h" +#include "qom/object.h" + +#define TYPE_NPCM7XX_SDHCI "npcm7xx.sdhci" +#define NPCM7XX_PRSTVALS_SIZE 6 +#define NPCM7XX_PRSTVALS 0x60 +#define NPCM7XX_PRSTVALS_0 0x0 +#define NPCM7XX_PRSTVALS_1 0x2 +#define NPCM7XX_PRSTVALS_2 0x4 +#define NPCM7XX_PRSTVALS_3 0x6 +#define NPCM7XX_PRSTVALS_4 0x8 +#define NPCM7XX_PRSTVALS_5 0xA +#define NPCM7XX_BOOTTOCTRL 0x10 +#define NPCM7XX_SDHCI_REGSIZE 0x20 + +#define NPCM7XX_PRSNTS_RESET 0x04A00000 +#define NPCM7XX_BLKGAP_RESET 0x80 +#define NPCM7XX_CAPAB_RESET 0x0100200161EE0399 +#define NPCM7XX_MAXCURR_RESET 0x0000000000000005 +#define NPCM7XX_HCVER_RESET 0x1002 + +#define NPCM7XX_PRSTVALS_0_RESET 0x0040 +#define NPCM7XX_PRSTVALS_1_RESET 0x0001 +#define NPCM7XX_PRSTVALS_3_RESET 0x0001 + +OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSDHCIState, NPCM7XX_SDHCI) + +typedef struct NPCM7xxRegs { + /* Preset Values Register Field, read-only */ + uint16_t prstvals[NPCM7XX_PRSTVALS_SIZE]; + /* Boot Timeout Control Register, read-write */ + uint32_t boottoctrl; +} NPCM7xxRegisters; + +typedef struct NPCM7xxSDHCIState { + SysBusDevice parent; + + MemoryRegion container; + MemoryRegion iomem; + BusState *bus; + NPCM7xxRegisters regs; + + SDHCIState sdhci; +} NPCM7xxSDHCIState; + +#endif /* NPCM7XX_SDHCI_H */ diff --git a/include/hw/sensor/emc141x_regs.h b/include/hw/sensor/emc141x_regs.h index 0560fb7c5c..e509a43d55 100644 --- a/include/hw/sensor/emc141x_regs.h +++ b/include/hw/sensor/emc141x_regs.h @@ -9,8 +9,8 @@ * later. See the COPYING file in the top-level directory. */ -#ifndef TMP105_REGS_H -#define TMP105_REGS_H +#ifndef EMC141X_REGS_H +#define EMC141X_REGS_H #define EMC1413_DEVICE_ID 0x21 #define EMC1414_DEVICE_ID 0x25 diff --git a/include/hw/sensor/isl_pmbus_vr.h b/include/hw/sensor/isl_pmbus_vr.h new file mode 100644 index 0000000000..aa2c2767df --- /dev/null +++ b/include/hw/sensor/isl_pmbus_vr.h @@ -0,0 +1,57 @@ +/* + * PMBus device for Renesas Digital Multiphase Voltage Regulators + * + * Copyright 2022 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_MISC_ISL_PMBUS_VR_H +#define HW_MISC_ISL_PMBUS_VR_H + +#include "hw/i2c/pmbus_device.h" +#include "qom/object.h" + +#define TYPE_ISL69259 "isl69259" +#define TYPE_ISL69260 "isl69260" +#define TYPE_RAA228000 "raa228000" +#define TYPE_RAA229004 "raa229004" +#define ISL_MAX_IC_DEVICE_ID_LEN 16 + +struct ISLState { + PMBusDevice parent; + + uint8_t ic_device_id[ISL_MAX_IC_DEVICE_ID_LEN]; + uint8_t ic_device_id_len; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(ISLState, ISL69260) + +#define ISL_CAPABILITY_DEFAULT 0x40 +#define ISL_OPERATION_DEFAULT 0x80 +#define ISL_ON_OFF_CONFIG_DEFAULT 0x16 +#define ISL_VOUT_MODE_DEFAULT 0x40 +#define ISL_VOUT_COMMAND_DEFAULT 0x0384 +#define ISL_VOUT_MAX_DEFAULT 0x08FC +#define ISL_VOUT_MARGIN_HIGH_DEFAULT 0x0640 +#define ISL_VOUT_MARGIN_LOW_DEFAULT 0xFA +#define ISL_VOUT_TRANSITION_RATE_DEFAULT 0x64 +#define ISL_VOUT_OV_FAULT_LIMIT_DEFAULT 0x076C +#define ISL_OT_FAULT_LIMIT_DEFAULT 0x7D +#define ISL_OT_WARN_LIMIT_DEFAULT 0x07D0 +#define ISL_VIN_OV_WARN_LIMIT_DEFAULT 0x36B0 +#define ISL_VIN_UV_WARN_LIMIT_DEFAULT 0x1F40 +#define ISL_IIN_OC_FAULT_LIMIT_DEFAULT 0x32 +#define ISL_TON_DELAY_DEFAULT 0x14 +#define ISL_TON_RISE_DEFAULT 0x01F4 +#define ISL_TOFF_FALL_DEFAULT 0x01F4 +#define ISL_REVISION_DEFAULT 0x33 +#define ISL_READ_VOUT_DEFAULT 1000 +#define ISL_READ_IOUT_DEFAULT 40 +#define ISL_READ_POUT_DEFAULT 4 +#define ISL_READ_TEMP_DEFAULT 25 +#define ISL_READ_VIN_DEFAULT 1100 +#define ISL_READ_IIN_DEFAULT 40 +#define ISL_READ_PIN_DEFAULT 4 + +#endif diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h index becb596979..ec716cdd45 100644 --- a/include/hw/sh4/sh.h +++ b/include/hw/sh4/sh.h @@ -44,25 +44,18 @@ typedef struct { uint16_t portbmask_trigger; /* Return 0 if no action was taken */ int (*port_change_cb) (uint16_t porta, uint16_t portb, - uint16_t * periph_pdtra, - uint16_t * periph_portdira, - uint16_t * periph_pdtrb, - uint16_t * periph_portdirb); + uint16_t *periph_pdtra, + uint16_t *periph_portdira, + uint16_t *periph_pdtrb, + uint16_t *periph_portdirb); } sh7750_io_device; int sh7750_register_io_device(struct SH7750State *s, - sh7750_io_device * device); + sh7750_io_device *device); /* sh_serial.c */ +#define TYPE_SH_SERIAL "sh-serial" #define SH_SERIAL_FEAT_SCIF (1 << 0) -void sh_serial_init(MemoryRegion *sysmem, - hwaddr base, int feat, - uint32_t freq, Chardev *chr, - qemu_irq eri_source, - qemu_irq rxi_source, - qemu_irq txi_source, - qemu_irq tei_source, - qemu_irq bri_source); /* sh7750.c */ qemu_irq sh7750_irl(struct SH7750State *s); diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h index 65f3425057..f62d5c5e13 100644 --- a/include/hw/sh4/sh_intc.h +++ b/include/hw/sh4/sh_intc.h @@ -58,7 +58,7 @@ struct intc_desc { }; int sh_intc_get_pending_vector(struct intc_desc *desc, int imask); -struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id); + void sh_intc_toggle_source(struct intc_source *source, int enable_adj, int assert_adj); diff --git a/include/hw/southbridge/piix.h b/include/hw/southbridge/piix.h index 6387f2b612..2693778b23 100644 --- a/include/hw/southbridge/piix.h +++ b/include/hw/southbridge/piix.h @@ -15,12 +15,6 @@ #include "hw/pci/pci.h" #include "qom/object.h" -#define TYPE_PIIX4_PM "PIIX4_PM" - -I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, - qemu_irq sci_irq, qemu_irq smi_irq, - int smm_enabled, DeviceState **piix4_pm); - /* PIRQRC[A:D]: PIRQx Route Control Registers */ #define PIIX_PIRQCA 0x60 #define PIIX_PIRQCB 0x61 @@ -70,10 +64,8 @@ typedef struct PIIXState PIIX3State; DECLARE_INSTANCE_CHECKER(PIIX3State, PIIX3_PCI_DEVICE, TYPE_PIIX3_PCI_DEVICE) -extern PCIDevice *piix4_dev; - -PIIX3State *piix3_create(PCIBus *pci_bus, ISABus **isa_bus); - -DeviceState *piix4_create(PCIBus *pci_bus, ISABus **isa_bus, I2CBus **smbus); +#define TYPE_PIIX3_DEVICE "PIIX3" +#define TYPE_PIIX3_XEN_DEVICE "PIIX3-xen" +#define TYPE_PIIX4_PCI_DEVICE "piix4-isa" #endif diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index cdaf165300..8e1dda556b 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -29,68 +29,36 @@ #include "hw/sysbus.h" #include "qom/object.h" -typedef struct AspeedSegments { - hwaddr addr; - uint32_t size; -} AspeedSegments; - struct AspeedSMCState; -typedef struct AspeedSMCController { - const char *name; - uint8_t r_conf; - uint8_t r_ce_ctrl; - uint8_t r_ctrl0; - uint8_t r_timings; - uint8_t nregs_timings; - uint8_t conf_enable_w0; - uint8_t max_peripherals; - const AspeedSegments *segments; - hwaddr flash_window_base; - uint32_t flash_window_size; - uint32_t features; - hwaddr dma_flash_mask; - hwaddr dma_dram_mask; - uint32_t nregs; - uint32_t (*segment_to_reg)(const struct AspeedSMCState *s, - const AspeedSegments *seg); - void (*reg_to_segment)(const struct AspeedSMCState *s, uint32_t reg, - AspeedSegments *seg); - void (*dma_ctrl)(struct AspeedSMCState *s, uint32_t value); -} AspeedSMCController; +struct AspeedSMCClass; + +#define TYPE_ASPEED_SMC_FLASH "aspeed.smc.flash" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedSMCFlash, ASPEED_SMC_FLASH) +struct AspeedSMCFlash { + SysBusDevice parent_obj; -typedef struct AspeedSMCFlash { struct AspeedSMCState *controller; - - uint8_t id; - uint32_t size; + struct AspeedSMCClass *asc; + uint8_t cs; MemoryRegion mmio; - DeviceState *flash; -} AspeedSMCFlash; +}; #define TYPE_ASPEED_SMC "aspeed.smc" OBJECT_DECLARE_TYPE(AspeedSMCState, AspeedSMCClass, ASPEED_SMC) -struct AspeedSMCClass { - SysBusDevice parent_obj; - const AspeedSMCController *ctrl; -}; - #define ASPEED_SMC_R_MAX (0x100 / 4) +#define ASPEED_SMC_CS_MAX 5 struct AspeedSMCState { SysBusDevice parent_obj; - const AspeedSMCController *ctrl; - MemoryRegion mmio; + MemoryRegion mmio_flash_container; MemoryRegion mmio_flash; - MemoryRegion mmio_flash_alias; qemu_irq irq; - int irqline; - uint32_t num_cs; qemu_irq *cs_lines; bool inject_failure; @@ -109,10 +77,42 @@ struct AspeedSMCState { MemoryRegion *dram_mr; AddressSpace dram_as; - AspeedSMCFlash *flashes; + AspeedSMCFlash flashes[ASPEED_SMC_CS_MAX]; uint8_t snoop_index; uint8_t snoop_dummies; }; +typedef struct AspeedSegments { + hwaddr addr; + uint32_t size; +} AspeedSegments; + +struct AspeedSMCClass { + SysBusDeviceClass parent_obj; + + uint8_t r_conf; + uint8_t r_ce_ctrl; + uint8_t r_ctrl0; + uint8_t r_timings; + uint8_t nregs_timings; + uint8_t conf_enable_w0; + uint8_t cs_num_max; + const uint32_t *resets; + const AspeedSegments *segments; + uint32_t segment_addr_mask; + hwaddr flash_window_base; + uint32_t flash_window_size; + uint32_t features; + hwaddr dma_flash_mask; + hwaddr dma_dram_mask; + uint32_t nregs; + uint32_t (*segment_to_reg)(const AspeedSMCState *s, + const AspeedSegments *seg); + void (*reg_to_segment)(const AspeedSMCState *s, uint32_t reg, + AspeedSegments *seg); + void (*dma_ctrl)(AspeedSMCState *s, uint32_t value); + int (*addr_width)(const AspeedSMCState *s); +}; + #endif /* ASPEED_SMC_H */ diff --git a/include/hw/ssi/ibex_spi_host.h b/include/hw/ssi/ibex_spi_host.h new file mode 100644 index 0000000000..1f6d077766 --- /dev/null +++ b/include/hw/ssi/ibex_spi_host.h @@ -0,0 +1,94 @@ + +/* + * QEMU model of the Ibex SPI Controller + * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/ + * + * Copyright (C) 2022 Western Digital + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef IBEX_SPI_HOST_H +#define IBEX_SPI_HOST_H + +#include "hw/sysbus.h" +#include "hw/hw.h" +#include "hw/ssi/ssi.h" +#include "qemu/fifo8.h" +#include "qom/object.h" +#include "hw/registerfields.h" +#include "qemu/timer.h" + +#define TYPE_IBEX_SPI_HOST "ibex-spi" +#define IBEX_SPI_HOST(obj) \ + OBJECT_CHECK(IbexSPIHostState, (obj), TYPE_IBEX_SPI_HOST) + +/* SPI Registers */ +#define IBEX_SPI_HOST_INTR_STATE (0x00 / 4) /* rw1c */ +#define IBEX_SPI_HOST_INTR_ENABLE (0x04 / 4) /* rw */ +#define IBEX_SPI_HOST_INTR_TEST (0x08 / 4) /* wo */ +#define IBEX_SPI_HOST_ALERT_TEST (0x0c / 4) /* wo */ +#define IBEX_SPI_HOST_CONTROL (0x10 / 4) /* rw */ +#define IBEX_SPI_HOST_STATUS (0x14 / 4) /* ro */ +#define IBEX_SPI_HOST_CONFIGOPTS (0x18 / 4) /* rw */ +#define IBEX_SPI_HOST_CSID (0x1c / 4) /* rw */ +#define IBEX_SPI_HOST_COMMAND (0x20 / 4) /* wo */ +/* RX/TX Modelled by FIFO */ +#define IBEX_SPI_HOST_RXDATA (0x24 / 4) +#define IBEX_SPI_HOST_TXDATA (0x28 / 4) + +#define IBEX_SPI_HOST_ERROR_ENABLE (0x2c / 4) /* rw */ +#define IBEX_SPI_HOST_ERROR_STATUS (0x30 / 4) /* rw1c */ +#define IBEX_SPI_HOST_EVENT_ENABLE (0x34 / 4) /* rw */ + +/* FIFO Len in Bytes */ +#define IBEX_SPI_HOST_TXFIFO_LEN 288 +#define IBEX_SPI_HOST_RXFIFO_LEN 256 + +/* Max Register (Based on addr) */ +#define IBEX_SPI_HOST_MAX_REGS (IBEX_SPI_HOST_EVENT_ENABLE + 1) + +/* MISC */ +#define TX_INTERRUPT_TRIGGER_DELAY_NS 100 +#define BIDIRECTIONAL_TRANSFER 3 + +typedef struct { + /* */ + SysBusDevice parent_obj; + + /* */ + MemoryRegion mmio; + uint32_t regs[IBEX_SPI_HOST_MAX_REGS]; + /* Multi-reg that sets config opts per CS */ + uint32_t *config_opts; + Fifo8 rx_fifo; + Fifo8 tx_fifo; + QEMUTimer *fifo_trigger_handle; + + qemu_irq event; + qemu_irq host_err; + uint32_t num_cs; + qemu_irq *cs_lines; + SSIBus *ssi; + + /* Used to track the init status, for replicating TXDATA ghost writes */ + bool init_status; +} IbexSPIHostState; + +#endif diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h index f411858ab0..6950f86810 100644 --- a/include/hw/ssi/ssi.h +++ b/include/hw/ssi/ssi.h @@ -59,6 +59,9 @@ struct SSIPeripheralClass { struct SSIPeripheral { DeviceState parent_obj; + /* cache the class */ + SSIPeripheralClass *spc; + /* Chip select state */ bool cs; }; diff --git a/include/hw/ssi/xlnx-versal-ospi.h b/include/hw/ssi/xlnx-versal-ospi.h new file mode 100644 index 0000000000..5d131d351d --- /dev/null +++ b/include/hw/ssi/xlnx-versal-ospi.h @@ -0,0 +1,111 @@ +/* + * Header file for the Xilinx Versal's OSPI controller + * + * Copyright (C) 2021 Xilinx Inc + * Written by Francisco Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This is a model of Xilinx Versal's Octal SPI flash memory controller + * documented in Versal's Technical Reference manual [1] and the Versal ACAP + * Register reference [2]. + * + * References: + * + * [1] Versal ACAP Technical Reference Manual, + * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf + * + * [2] Versal ACAP Register Reference, + * https://www.xilinx.com/html_docs/registers/am012/am012-versal-register-reference.html#mod___ospi.html + * + * + * QEMU interface: + * + sysbus MMIO region 0: MemoryRegion for the device's registers + * + sysbus MMIO region 1: MemoryRegion for flash memory linear address space + * (data transfer). + * + sysbus IRQ 0: Device interrupt. + * + Named GPIO input "ospi-mux-sel": 0: enables indirect access mode + * and 1: enables direct access mode. + * + Property "dac-with-indac": Allow both direct accesses and indirect + * accesses simultaneously. + * + Property "indac-write-disabled": Disable indirect access writes. + */ + +#ifndef XLNX_VERSAL_OSPI_H +#define XLNX_VERSAL_OSPI_H + +#include "hw/register.h" +#include "hw/ssi/ssi.h" +#include "qemu/fifo8.h" +#include "hw/dma/xlnx_csu_dma.h" + +#define TYPE_XILINX_VERSAL_OSPI "xlnx.versal-ospi" + +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalOspi, XILINX_VERSAL_OSPI) + +#define XILINX_VERSAL_OSPI_R_MAX (0xfc / 4 + 1) + +/* + * Indirect operations + */ +typedef struct IndOp { + uint32_t flash_addr; + uint32_t num_bytes; + uint32_t done_bytes; + bool completed; +} IndOp; + +struct XlnxVersalOspi { + SysBusDevice parent_obj; + + MemoryRegion iomem; + MemoryRegion iomem_dac; + + uint8_t num_cs; + qemu_irq *cs_lines; + + SSIBus *spi; + + Fifo8 rx_fifo; + Fifo8 tx_fifo; + + Fifo8 rx_sram; + Fifo8 tx_sram; + + qemu_irq irq; + + XlnxCSUDMA *dma_src; + bool ind_write_disabled; + bool dac_with_indac; + bool dac_enable; + bool src_dma_inprog; + + IndOp rd_ind_op[2]; + IndOp wr_ind_op[2]; + + uint32_t regs[XILINX_VERSAL_OSPI_R_MAX]; + RegisterInfo regs_info[XILINX_VERSAL_OSPI_R_MAX]; + + /* Maximum inferred membank size is 512 bytes */ + uint8_t stig_membank[512]; +}; + +#endif /* XLNX_VERSAL_OSPI_H */ diff --git a/include/hw/timer/armv7m_systick.h b/include/hw/timer/armv7m_systick.h index 84496faaf9..ee09b13881 100644 --- a/include/hw/timer/armv7m_systick.h +++ b/include/hw/timer/armv7m_systick.h @@ -15,11 +15,23 @@ #include "hw/sysbus.h" #include "qom/object.h" #include "hw/ptimer.h" +#include "hw/clock.h" #define TYPE_SYSTICK "armv7m_systick" OBJECT_DECLARE_SIMPLE_TYPE(SysTickState, SYSTICK) +/* + * QEMU interface: + * + sysbus MMIO region 0 is the register interface (covering + * the registers which are mapped at address 0xE000E010) + * + sysbus IRQ 0 is the interrupt line to the NVIC + * + Clock input "refclk" is the external reference clock + * (used when SYST_CSR.CLKSOURCE == 0) + * + Clock input "cpuclk" is the main CPU clock + * (used when SYST_CSR.CLKSOURCE == 1) + */ + struct SysTickState { /*< private >*/ SysBusDevice parent_obj; @@ -31,28 +43,8 @@ struct SysTickState { ptimer_state *ptimer; MemoryRegion iomem; qemu_irq irq; + Clock *refclk; + Clock *cpuclk; }; -/* - * Multiplication factor to convert from system clock ticks to qemu timer - * ticks. This should be set (by board code, usually) to a value - * equal to NANOSECONDS_PER_SECOND / frq, where frq is the clock frequency - * in Hz of the CPU. - * - * This value is used by the systick device when it is running in - * its "use the CPU clock" mode (ie when SYST_CSR.CLKSOURCE == 1) to - * set how fast the timer should tick. - * - * TODO: we should refactor this so that rather than using a global - * we use a device property or something similar. This is complicated - * because (a) the property would need to be plumbed through from the - * board code down through various layers to the systick device - * and (b) the property needs to be modifiable after realize, because - * the stellaris board uses this to implement the behaviour where the - * guest can reprogram the PLL registers to downclock the CPU, and the - * systick device needs to react accordingly. Possibly this should - * be deferred until we have a good API for modelling clock trees. - */ -extern int system_clock_scale; - #endif diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h index d36034a10c..07dc6b6f2c 100644 --- a/include/hw/timer/aspeed_timer.h +++ b/include/hw/timer/aspeed_timer.h @@ -31,6 +31,7 @@ OBJECT_DECLARE_TYPE(AspeedTimerCtrlState, AspeedTimerClass, ASPEED_TIMER) #define TYPE_ASPEED_2400_TIMER TYPE_ASPEED_TIMER "-ast2400" #define TYPE_ASPEED_2500_TIMER TYPE_ASPEED_TIMER "-ast2500" #define TYPE_ASPEED_2600_TIMER TYPE_ASPEED_TIMER "-ast2600" +#define TYPE_ASPEED_1030_TIMER TYPE_ASPEED_TIMER "-ast1030" #define ASPEED_TIMER_NR_TIMERS 8 diff --git a/include/hw/timer/bcm2835_systmr.h b/include/hw/timer/bcm2835_systmr.h index bd3097d746..a8f605beeb 100644 --- a/include/hw/timer/bcm2835_systmr.h +++ b/include/hw/timer/bcm2835_systmr.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef BCM2835_SYSTIMER_H -#define BCM2835_SYSTIMER_H +#ifndef BCM2835_SYSTMR_H +#define BCM2835_SYSTMR_H #include "hw/sysbus.h" #include "hw/irq.h" diff --git a/include/hw/timer/cadence_ttc.h b/include/hw/timer/cadence_ttc.h new file mode 100644 index 0000000000..e1251383f2 --- /dev/null +++ b/include/hw/timer/cadence_ttc.h @@ -0,0 +1,54 @@ +/* + * Xilinx Zynq cadence TTC model + * + * Copyright (c) 2011 Xilinx Inc. + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com) + * Copyright (c) 2012 PetaLogix Pty Ltd. + * Written By Haibing Ma + * M. Habib + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#ifndef HW_TIMER_CADENCE_TTC_H +#define HW_TIMER_CADENCE_TTC_H + +#include "hw/sysbus.h" +#include "qemu/timer.h" + +typedef struct { + QEMUTimer *timer; + int freq; + + uint32_t reg_clock; + uint32_t reg_count; + uint32_t reg_value; + uint16_t reg_interval; + uint16_t reg_match[3]; + uint32_t reg_intr; + uint32_t reg_intr_en; + uint32_t reg_event_ctrl; + uint32_t reg_event; + + uint64_t cpu_time; + unsigned int cpu_time_valid; + + qemu_irq irq; +} CadenceTimerState; + +#define TYPE_CADENCE_TTC "cadence_ttc" +OBJECT_DECLARE_SIMPLE_TYPE(CadenceTTCState, CADENCE_TTC) + +struct CadenceTTCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + CadenceTimerState timer[3]; +}; + +#endif diff --git a/include/hw/timer/ibex_timer.h b/include/hw/timer/ibex_timer.h index 6a43537003..41f5c82a92 100644 --- a/include/hw/timer/ibex_timer.h +++ b/include/hw/timer/ibex_timer.h @@ -33,6 +33,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(IbexTimerState, IBEX_TIMER) struct IbexTimerState { /* */ SysBusDevice parent_obj; + uint64_t mtimecmp; + QEMUTimer *mtimer; /* Internal timer for M-mode interrupt */ /* */ MemoryRegion mmio; @@ -43,10 +45,11 @@ struct IbexTimerState { uint32_t timer_compare_upper0; uint32_t timer_intr_enable; uint32_t timer_intr_state; - uint32_t timer_intr_test; uint32_t timebase_freq; qemu_irq irq; + + qemu_irq m_timer_irq; }; #endif /* HW_IBEX_TIMER_H */ diff --git a/include/hw/timer/sifive_pwm.h b/include/hw/timer/sifive_pwm.h new file mode 100644 index 0000000000..6a8cf7b29e --- /dev/null +++ b/include/hw/timer/sifive_pwm.h @@ -0,0 +1,62 @@ +/* + * SiFive PWM + * + * Copyright (c) 2020 Western Digital + * + * Author: Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_SIFIVE_PWM_H +#define HW_SIFIVE_PWM_H + +#include "hw/sysbus.h" +#include "qemu/timer.h" +#include "qom/object.h" + +#define TYPE_SIFIVE_PWM "sifive-pwm" + +#define SIFIVE_PWM(obj) \ + OBJECT_CHECK(SiFivePwmState, (obj), TYPE_SIFIVE_PWM) + +#define SIFIVE_PWM_CHANS 4 +#define SIFIVE_PWM_IRQS SIFIVE_PWM_CHANS + +typedef struct SiFivePwmState { + /* */ + SysBusDevice parent_obj; + + /* */ + MemoryRegion mmio; + QEMUTimer timer[SIFIVE_PWM_CHANS]; + /* + * if en bit(s) set, is the number of ticks when pwmcount was 0 + * if en bit(s) not set, is the number of ticks in pwmcount + */ + uint64_t tick_offset; + uint64_t freq_hz; + + uint32_t pwmcfg; + uint32_t pwmcmp[SIFIVE_PWM_CHANS]; + + qemu_irq irqs[SIFIVE_PWM_IRQS]; +} SiFivePwmState; + +#endif /* HW_SIFIVE_PWM_H */ diff --git a/include/hw/timer/stellaris-gptm.h b/include/hw/timer/stellaris-gptm.h new file mode 100644 index 0000000000..fde1fc6f0c --- /dev/null +++ b/include/hw/timer/stellaris-gptm.h @@ -0,0 +1,51 @@ +/* + * Luminary Micro Stellaris General Purpose Timer Module + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#ifndef HW_TIMER_STELLARIS_GPTM_H +#define HW_TIMER_STELLARIS_GPTM_H + +#include "qom/object.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/clock.h" + +#define TYPE_STELLARIS_GPTM "stellaris-gptm" +OBJECT_DECLARE_SIMPLE_TYPE(gptm_state, STELLARIS_GPTM) + +/* + * QEMU interface: + * + sysbus MMIO region 0: register bank + * + sysbus IRQ 0: timer interrupt + * + unnamed GPIO output 0: trigger output for the ADC + * + Clock input "clk": the 32-bit countdown timer runs at this speed + */ +struct gptm_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t config; + uint32_t mode[2]; + uint32_t control; + uint32_t state; + uint32_t mask; + uint32_t load[2]; + uint32_t match[2]; + uint32_t prescale[2]; + uint32_t match_prescale[2]; + uint32_t rtc; + int64_t tick[2]; + struct gptm_state *opaque[2]; + QEMUTimer *timer[2]; + /* The timers have an alternate output used to trigger the ADC. */ + qemu_irq trigger; + qemu_irq irq; + Clock *clk; +}; + +#endif diff --git a/include/hw/tricore/tc27x_soc.h b/include/hw/tricore/tc27x_soc.h index 6a7e5b54f5..dd3a7485c8 100644 --- a/include/hw/tricore/tc27x_soc.h +++ b/include/hw/tricore/tc27x_soc.h @@ -18,8 +18,8 @@ * License along with this library; if not, see . */ -#ifndef TC27X_SoC_H -#define TC27X_SoC_H +#ifndef TC27X_SOC_H +#define TC27X_SOC_H #include "hw/sysbus.h" #include "target/tricore/cpu.h" diff --git a/include/hw/tricore/triboard.h b/include/hw/tricore/triboard.h index f3844be447..094c8bd563 100644 --- a/include/hw/tricore/triboard.h +++ b/include/hw/tricore/triboard.h @@ -21,7 +21,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/boards.h" -#include "hw/arm/boot.h" #include "sysemu/sysemu.h" #include "exec/address-spaces.h" #include "qom/object.h" diff --git a/include/hw/tricore/tricore_testdevice.h b/include/hw/tricore/tricore_testdevice.h index 2c56c51bcb..1e2b8942ac 100644 --- a/include/hw/tricore/tricore_testdevice.h +++ b/include/hw/tricore/tricore_testdevice.h @@ -15,9 +15,8 @@ * License along with this library; if not, see . */ - -#ifndef HW_TRICORE_TESTDEV_H -#define HW_TRICORE_TESTDEV_H +#ifndef HW_TRICORE_TESTDEVICE_H +#define HW_TRICORE_TESTDEVICE_H #include "hw/sysbus.h" #include "hw/hw.h" diff --git a/include/hw/usb.h b/include/hw/usb.h index 33668dd0a9..32c23a5ca2 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -66,42 +66,42 @@ //#define USB_STATE_POWERED 2 #define USB_STATE_DEFAULT 3 //#define USB_STATE_ADDRESS 4 -//#define USB_STATE_CONFIGURED 5 +//#define USB_STATE_CONFIGURED 5 #define USB_STATE_SUSPENDED 6 -#define USB_CLASS_AUDIO 1 -#define USB_CLASS_COMM 2 -#define USB_CLASS_HID 3 -#define USB_CLASS_PHYSICAL 5 -#define USB_CLASS_STILL_IMAGE 6 -#define USB_CLASS_PRINTER 7 -#define USB_CLASS_MASS_STORAGE 8 -#define USB_CLASS_HUB 9 -#define USB_CLASS_CDC_DATA 0x0a -#define USB_CLASS_CSCID 0x0b -#define USB_CLASS_CONTENT_SEC 0x0d -#define USB_CLASS_APP_SPEC 0xfe -#define USB_CLASS_VENDOR_SPEC 0xff +#define USB_CLASS_AUDIO 1 +#define USB_CLASS_COMM 2 +#define USB_CLASS_HID 3 +#define USB_CLASS_PHYSICAL 5 +#define USB_CLASS_STILL_IMAGE 6 +#define USB_CLASS_PRINTER 7 +#define USB_CLASS_MASS_STORAGE 8 +#define USB_CLASS_HUB 9 +#define USB_CLASS_CDC_DATA 0x0a +#define USB_CLASS_CSCID 0x0b +#define USB_CLASS_CONTENT_SEC 0x0d +#define USB_CLASS_APP_SPEC 0xfe +#define USB_CLASS_VENDOR_SPEC 0xff #define USB_SUBCLASS_UNDEFINED 0 #define USB_SUBCLASS_AUDIO_CONTROL 1 #define USB_SUBCLASS_AUDIO_STREAMING 2 #define USB_SUBCLASS_AUDIO_MIDISTREAMING 3 -#define USB_DIR_OUT 0 -#define USB_DIR_IN 0x80 +#define USB_DIR_OUT 0 +#define USB_DIR_IN 0x80 -#define USB_TYPE_MASK (0x03 << 5) -#define USB_TYPE_STANDARD (0x00 << 5) -#define USB_TYPE_CLASS (0x01 << 5) -#define USB_TYPE_VENDOR (0x02 << 5) -#define USB_TYPE_RESERVED (0x03 << 5) +#define USB_TYPE_MASK (0x03 << 5) +#define USB_TYPE_STANDARD (0x00 << 5) +#define USB_TYPE_CLASS (0x01 << 5) +#define USB_TYPE_VENDOR (0x02 << 5) +#define USB_TYPE_RESERVED (0x03 << 5) -#define USB_RECIP_MASK 0x1f -#define USB_RECIP_DEVICE 0x00 -#define USB_RECIP_INTERFACE 0x01 -#define USB_RECIP_ENDPOINT 0x02 -#define USB_RECIP_OTHER 0x03 +#define USB_RECIP_MASK 0x1f +#define USB_RECIP_DEVICE 0x00 +#define USB_RECIP_INTERFACE 0x01 +#define USB_RECIP_ENDPOINT 0x02 +#define USB_RECIP_OTHER 0x03 #define DeviceRequest ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define DeviceOutRequest ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) @@ -126,28 +126,28 @@ #define EndpointOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) -#define USB_REQ_GET_STATUS 0x00 -#define USB_REQ_CLEAR_FEATURE 0x01 -#define USB_REQ_SET_FEATURE 0x03 -#define USB_REQ_SET_ADDRESS 0x05 -#define USB_REQ_GET_DESCRIPTOR 0x06 -#define USB_REQ_SET_DESCRIPTOR 0x07 -#define USB_REQ_GET_CONFIGURATION 0x08 -#define USB_REQ_SET_CONFIGURATION 0x09 -#define USB_REQ_GET_INTERFACE 0x0A -#define USB_REQ_SET_INTERFACE 0x0B -#define USB_REQ_SYNCH_FRAME 0x0C +#define USB_REQ_GET_STATUS 0x00 +#define USB_REQ_CLEAR_FEATURE 0x01 +#define USB_REQ_SET_FEATURE 0x03 +#define USB_REQ_SET_ADDRESS 0x05 +#define USB_REQ_GET_DESCRIPTOR 0x06 +#define USB_REQ_SET_DESCRIPTOR 0x07 +#define USB_REQ_GET_CONFIGURATION 0x08 +#define USB_REQ_SET_CONFIGURATION 0x09 +#define USB_REQ_GET_INTERFACE 0x0A +#define USB_REQ_SET_INTERFACE 0x0B +#define USB_REQ_SYNCH_FRAME 0x0C #define USB_REQ_SET_SEL 0x30 #define USB_REQ_SET_ISOCH_DELAY 0x31 -#define USB_DEVICE_SELF_POWERED 0 -#define USB_DEVICE_REMOTE_WAKEUP 1 +#define USB_DEVICE_SELF_POWERED 0 +#define USB_DEVICE_REMOTE_WAKEUP 1 -#define USB_DT_DEVICE 0x01 -#define USB_DT_CONFIG 0x02 -#define USB_DT_STRING 0x03 -#define USB_DT_INTERFACE 0x04 -#define USB_DT_ENDPOINT 0x05 +#define USB_DT_DEVICE 0x01 +#define USB_DT_CONFIG 0x02 +#define USB_DT_STRING 0x03 +#define USB_DT_INTERFACE 0x04 +#define USB_DT_ENDPOINT 0x05 #define USB_DT_DEVICE_QUALIFIER 0x06 #define USB_DT_OTHER_SPEED_CONFIG 0x07 #define USB_DT_DEBUG 0x0A @@ -167,10 +167,10 @@ #define USB_CFG_ATT_WAKEUP (1 << 5) #define USB_CFG_ATT_BATTERY (1 << 4) -#define USB_ENDPOINT_XFER_CONTROL 0 -#define USB_ENDPOINT_XFER_ISOC 1 -#define USB_ENDPOINT_XFER_BULK 2 -#define USB_ENDPOINT_XFER_INT 3 +#define USB_ENDPOINT_XFER_CONTROL 0 +#define USB_ENDPOINT_XFER_ISOC 1 +#define USB_ENDPOINT_XFER_BULK 2 +#define USB_ENDPOINT_XFER_INT 3 #define USB_ENDPOINT_XFER_INVALID 255 #define USB_INTERFACE_INVALID 255 @@ -569,9 +569,9 @@ static inline bool usb_device_is_scsi_storage(USBDevice *dev) /* quirks.c */ /* In bulk endpoints are streaming data sources (iow behave like isoc eps) */ -#define USB_QUIRK_BUFFER_BULK_IN 0x01 +#define USB_QUIRK_BUFFER_BULK_IN 0x01 /* Bulk pkts in FTDI format, need special handling when combining packets */ -#define USB_QUIRK_IS_FTDI 0x02 +#define USB_QUIRK_IS_FTDI 0x02 int usb_get_quirks(uint16_t vendor_id, uint16_t product_id, uint8_t interface_class, uint8_t interface_subclass, diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h index a7eb531485..0bf3f2aa17 100644 --- a/include/hw/usb/dwc2-regs.h +++ b/include/hw/usb/dwc2-regs.h @@ -39,791 +39,791 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DWC2_HW_H -#define DWC2_HW_H +#ifndef DWC2_REGS_H +#define DWC2_REGS_H -#define HSOTG_REG(x) (x) +#define HSOTG_REG(x) (x) -#define GOTGCTL HSOTG_REG(0x000) -#define GOTGCTL_CHIRPEN BIT(27) -#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22) -#define GOTGCTL_MULT_VALID_BC_SHIFT 22 -#define GOTGCTL_OTGVER BIT(20) -#define GOTGCTL_BSESVLD BIT(19) -#define GOTGCTL_ASESVLD BIT(18) -#define GOTGCTL_DBNC_SHORT BIT(17) -#define GOTGCTL_CONID_B BIT(16) -#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15) -#define GOTGCTL_DEVHNPEN BIT(11) -#define GOTGCTL_HSTSETHNPEN BIT(10) -#define GOTGCTL_HNPREQ BIT(9) -#define GOTGCTL_HSTNEGSCS BIT(8) -#define GOTGCTL_SESREQ BIT(1) -#define GOTGCTL_SESREQSCS BIT(0) +#define GOTGCTL HSOTG_REG(0x000) +#define GOTGCTL_CHIRPEN BIT(27) +#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22) +#define GOTGCTL_MULT_VALID_BC_SHIFT 22 +#define GOTGCTL_OTGVER BIT(20) +#define GOTGCTL_BSESVLD BIT(19) +#define GOTGCTL_ASESVLD BIT(18) +#define GOTGCTL_DBNC_SHORT BIT(17) +#define GOTGCTL_CONID_B BIT(16) +#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15) +#define GOTGCTL_DEVHNPEN BIT(11) +#define GOTGCTL_HSTSETHNPEN BIT(10) +#define GOTGCTL_HNPREQ BIT(9) +#define GOTGCTL_HSTNEGSCS BIT(8) +#define GOTGCTL_SESREQ BIT(1) +#define GOTGCTL_SESREQSCS BIT(0) -#define GOTGINT HSOTG_REG(0x004) -#define GOTGINT_DBNCE_DONE BIT(19) -#define GOTGINT_A_DEV_TOUT_CHG BIT(18) -#define GOTGINT_HST_NEG_DET BIT(17) -#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9) -#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8) -#define GOTGINT_SES_END_DET BIT(2) +#define GOTGINT HSOTG_REG(0x004) +#define GOTGINT_DBNCE_DONE BIT(19) +#define GOTGINT_A_DEV_TOUT_CHG BIT(18) +#define GOTGINT_HST_NEG_DET BIT(17) +#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9) +#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8) +#define GOTGINT_SES_END_DET BIT(2) -#define GAHBCFG HSOTG_REG(0x008) -#define GAHBCFG_AHB_SINGLE BIT(23) -#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22) -#define GAHBCFG_REM_MEM_SUPP BIT(21) -#define GAHBCFG_P_TXF_EMP_LVL BIT(8) -#define GAHBCFG_NP_TXF_EMP_LVL BIT(7) -#define GAHBCFG_DMA_EN BIT(5) -#define GAHBCFG_HBSTLEN_MASK (0xf << 1) -#define GAHBCFG_HBSTLEN_SHIFT 1 -#define GAHBCFG_HBSTLEN_SINGLE 0 -#define GAHBCFG_HBSTLEN_INCR 1 -#define GAHBCFG_HBSTLEN_INCR4 3 -#define GAHBCFG_HBSTLEN_INCR8 5 -#define GAHBCFG_HBSTLEN_INCR16 7 -#define GAHBCFG_GLBL_INTR_EN BIT(0) -#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \ - GAHBCFG_NP_TXF_EMP_LVL | \ - GAHBCFG_DMA_EN | \ - GAHBCFG_GLBL_INTR_EN) +#define GAHBCFG HSOTG_REG(0x008) +#define GAHBCFG_AHB_SINGLE BIT(23) +#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22) +#define GAHBCFG_REM_MEM_SUPP BIT(21) +#define GAHBCFG_P_TXF_EMP_LVL BIT(8) +#define GAHBCFG_NP_TXF_EMP_LVL BIT(7) +#define GAHBCFG_DMA_EN BIT(5) +#define GAHBCFG_HBSTLEN_MASK (0xf << 1) +#define GAHBCFG_HBSTLEN_SHIFT 1 +#define GAHBCFG_HBSTLEN_SINGLE 0 +#define GAHBCFG_HBSTLEN_INCR 1 +#define GAHBCFG_HBSTLEN_INCR4 3 +#define GAHBCFG_HBSTLEN_INCR8 5 +#define GAHBCFG_HBSTLEN_INCR16 7 +#define GAHBCFG_GLBL_INTR_EN BIT(0) +#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \ + GAHBCFG_NP_TXF_EMP_LVL | \ + GAHBCFG_DMA_EN | \ + GAHBCFG_GLBL_INTR_EN) -#define GUSBCFG HSOTG_REG(0x00C) -#define GUSBCFG_FORCEDEVMODE BIT(30) -#define GUSBCFG_FORCEHOSTMODE BIT(29) -#define GUSBCFG_TXENDDELAY BIT(28) -#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27) -#define GUSBCFG_ICUSBCAP BIT(26) -#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25) -#define GUSBCFG_INDICATORPASSTHROUGH BIT(24) -#define GUSBCFG_INDICATORCOMPLEMENT BIT(23) -#define GUSBCFG_TERMSELDLPULSE BIT(22) -#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21) -#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20) -#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19) -#define GUSBCFG_ULPI_AUTO_RES BIT(18) -#define GUSBCFG_ULPI_FS_LS BIT(17) -#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16) -#define GUSBCFG_PHY_LP_CLK_SEL BIT(15) -#define GUSBCFG_USBTRDTIM_MASK (0xf << 10) -#define GUSBCFG_USBTRDTIM_SHIFT 10 -#define GUSBCFG_HNPCAP BIT(9) -#define GUSBCFG_SRPCAP BIT(8) -#define GUSBCFG_DDRSEL BIT(7) -#define GUSBCFG_PHYSEL BIT(6) -#define GUSBCFG_FSINTF BIT(5) -#define GUSBCFG_ULPI_UTMI_SEL BIT(4) -#define GUSBCFG_PHYIF16 BIT(3) -#define GUSBCFG_PHYIF8 (0 << 3) -#define GUSBCFG_TOUTCAL_MASK (0x7 << 0) -#define GUSBCFG_TOUTCAL_SHIFT 0 -#define GUSBCFG_TOUTCAL_LIMIT 0x7 -#define GUSBCFG_TOUTCAL(_x) ((_x) << 0) +#define GUSBCFG HSOTG_REG(0x00C) +#define GUSBCFG_FORCEDEVMODE BIT(30) +#define GUSBCFG_FORCEHOSTMODE BIT(29) +#define GUSBCFG_TXENDDELAY BIT(28) +#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27) +#define GUSBCFG_ICUSBCAP BIT(26) +#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25) +#define GUSBCFG_INDICATORPASSTHROUGH BIT(24) +#define GUSBCFG_INDICATORCOMPLEMENT BIT(23) +#define GUSBCFG_TERMSELDLPULSE BIT(22) +#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21) +#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20) +#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19) +#define GUSBCFG_ULPI_AUTO_RES BIT(18) +#define GUSBCFG_ULPI_FS_LS BIT(17) +#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16) +#define GUSBCFG_PHY_LP_CLK_SEL BIT(15) +#define GUSBCFG_USBTRDTIM_MASK (0xf << 10) +#define GUSBCFG_USBTRDTIM_SHIFT 10 +#define GUSBCFG_HNPCAP BIT(9) +#define GUSBCFG_SRPCAP BIT(8) +#define GUSBCFG_DDRSEL BIT(7) +#define GUSBCFG_PHYSEL BIT(6) +#define GUSBCFG_FSINTF BIT(5) +#define GUSBCFG_ULPI_UTMI_SEL BIT(4) +#define GUSBCFG_PHYIF16 BIT(3) +#define GUSBCFG_PHYIF8 (0 << 3) +#define GUSBCFG_TOUTCAL_MASK (0x7 << 0) +#define GUSBCFG_TOUTCAL_SHIFT 0 +#define GUSBCFG_TOUTCAL_LIMIT 0x7 +#define GUSBCFG_TOUTCAL(_x) ((_x) << 0) -#define GRSTCTL HSOTG_REG(0x010) -#define GRSTCTL_AHBIDLE BIT(31) -#define GRSTCTL_DMAREQ BIT(30) -#define GRSTCTL_TXFNUM_MASK (0x1f << 6) -#define GRSTCTL_TXFNUM_SHIFT 6 -#define GRSTCTL_TXFNUM_LIMIT 0x1f -#define GRSTCTL_TXFNUM(_x) ((_x) << 6) -#define GRSTCTL_TXFFLSH BIT(5) -#define GRSTCTL_RXFFLSH BIT(4) -#define GRSTCTL_IN_TKNQ_FLSH BIT(3) -#define GRSTCTL_FRMCNTRRST BIT(2) -#define GRSTCTL_HSFTRST BIT(1) -#define GRSTCTL_CSFTRST BIT(0) +#define GRSTCTL HSOTG_REG(0x010) +#define GRSTCTL_AHBIDLE BIT(31) +#define GRSTCTL_DMAREQ BIT(30) +#define GRSTCTL_TXFNUM_MASK (0x1f << 6) +#define GRSTCTL_TXFNUM_SHIFT 6 +#define GRSTCTL_TXFNUM_LIMIT 0x1f +#define GRSTCTL_TXFNUM(_x) ((_x) << 6) +#define GRSTCTL_TXFFLSH BIT(5) +#define GRSTCTL_RXFFLSH BIT(4) +#define GRSTCTL_IN_TKNQ_FLSH BIT(3) +#define GRSTCTL_FRMCNTRRST BIT(2) +#define GRSTCTL_HSFTRST BIT(1) +#define GRSTCTL_CSFTRST BIT(0) -#define GINTSTS HSOTG_REG(0x014) -#define GINTMSK HSOTG_REG(0x018) -#define GINTSTS_WKUPINT BIT(31) -#define GINTSTS_SESSREQINT BIT(30) -#define GINTSTS_DISCONNINT BIT(29) -#define GINTSTS_CONIDSTSCHNG BIT(28) -#define GINTSTS_LPMTRANRCVD BIT(27) -#define GINTSTS_PTXFEMP BIT(26) -#define GINTSTS_HCHINT BIT(25) -#define GINTSTS_PRTINT BIT(24) -#define GINTSTS_RESETDET BIT(23) -#define GINTSTS_FET_SUSP BIT(22) -#define GINTSTS_INCOMPL_IP BIT(21) -#define GINTSTS_INCOMPL_SOOUT BIT(21) -#define GINTSTS_INCOMPL_SOIN BIT(20) -#define GINTSTS_OEPINT BIT(19) -#define GINTSTS_IEPINT BIT(18) -#define GINTSTS_EPMIS BIT(17) -#define GINTSTS_RESTOREDONE BIT(16) -#define GINTSTS_EOPF BIT(15) -#define GINTSTS_ISOUTDROP BIT(14) -#define GINTSTS_ENUMDONE BIT(13) -#define GINTSTS_USBRST BIT(12) -#define GINTSTS_USBSUSP BIT(11) -#define GINTSTS_ERLYSUSP BIT(10) -#define GINTSTS_I2CINT BIT(9) -#define GINTSTS_ULPI_CK_INT BIT(8) -#define GINTSTS_GOUTNAKEFF BIT(7) -#define GINTSTS_GINNAKEFF BIT(6) -#define GINTSTS_NPTXFEMP BIT(5) -#define GINTSTS_RXFLVL BIT(4) -#define GINTSTS_SOF BIT(3) -#define GINTSTS_OTGINT BIT(2) -#define GINTSTS_MODEMIS BIT(1) -#define GINTSTS_CURMODE_HOST BIT(0) +#define GINTSTS HSOTG_REG(0x014) +#define GINTMSK HSOTG_REG(0x018) +#define GINTSTS_WKUPINT BIT(31) +#define GINTSTS_SESSREQINT BIT(30) +#define GINTSTS_DISCONNINT BIT(29) +#define GINTSTS_CONIDSTSCHNG BIT(28) +#define GINTSTS_LPMTRANRCVD BIT(27) +#define GINTSTS_PTXFEMP BIT(26) +#define GINTSTS_HCHINT BIT(25) +#define GINTSTS_PRTINT BIT(24) +#define GINTSTS_RESETDET BIT(23) +#define GINTSTS_FET_SUSP BIT(22) +#define GINTSTS_INCOMPL_IP BIT(21) +#define GINTSTS_INCOMPL_SOOUT BIT(21) +#define GINTSTS_INCOMPL_SOIN BIT(20) +#define GINTSTS_OEPINT BIT(19) +#define GINTSTS_IEPINT BIT(18) +#define GINTSTS_EPMIS BIT(17) +#define GINTSTS_RESTOREDONE BIT(16) +#define GINTSTS_EOPF BIT(15) +#define GINTSTS_ISOUTDROP BIT(14) +#define GINTSTS_ENUMDONE BIT(13) +#define GINTSTS_USBRST BIT(12) +#define GINTSTS_USBSUSP BIT(11) +#define GINTSTS_ERLYSUSP BIT(10) +#define GINTSTS_I2CINT BIT(9) +#define GINTSTS_ULPI_CK_INT BIT(8) +#define GINTSTS_GOUTNAKEFF BIT(7) +#define GINTSTS_GINNAKEFF BIT(6) +#define GINTSTS_NPTXFEMP BIT(5) +#define GINTSTS_RXFLVL BIT(4) +#define GINTSTS_SOF BIT(3) +#define GINTSTS_OTGINT BIT(2) +#define GINTSTS_MODEMIS BIT(1) +#define GINTSTS_CURMODE_HOST BIT(0) -#define GRXSTSR HSOTG_REG(0x01C) -#define GRXSTSP HSOTG_REG(0x020) -#define GRXSTS_FN_MASK (0x7f << 25) -#define GRXSTS_FN_SHIFT 25 -#define GRXSTS_PKTSTS_MASK (0xf << 17) -#define GRXSTS_PKTSTS_SHIFT 17 -#define GRXSTS_PKTSTS_GLOBALOUTNAK 1 -#define GRXSTS_PKTSTS_OUTRX 2 -#define GRXSTS_PKTSTS_HCHIN 2 -#define GRXSTS_PKTSTS_OUTDONE 3 -#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3 -#define GRXSTS_PKTSTS_SETUPDONE 4 -#define GRXSTS_PKTSTS_DATATOGGLEERR 5 -#define GRXSTS_PKTSTS_SETUPRX 6 -#define GRXSTS_PKTSTS_HCHHALTED 7 -#define GRXSTS_HCHNUM_MASK (0xf << 0) -#define GRXSTS_HCHNUM_SHIFT 0 -#define GRXSTS_DPID_MASK (0x3 << 15) -#define GRXSTS_DPID_SHIFT 15 -#define GRXSTS_BYTECNT_MASK (0x7ff << 4) -#define GRXSTS_BYTECNT_SHIFT 4 -#define GRXSTS_EPNUM_MASK (0xf << 0) -#define GRXSTS_EPNUM_SHIFT 0 +#define GRXSTSR HSOTG_REG(0x01C) +#define GRXSTSP HSOTG_REG(0x020) +#define GRXSTS_FN_MASK (0x7f << 25) +#define GRXSTS_FN_SHIFT 25 +#define GRXSTS_PKTSTS_MASK (0xf << 17) +#define GRXSTS_PKTSTS_SHIFT 17 +#define GRXSTS_PKTSTS_GLOBALOUTNAK 1 +#define GRXSTS_PKTSTS_OUTRX 2 +#define GRXSTS_PKTSTS_HCHIN 2 +#define GRXSTS_PKTSTS_OUTDONE 3 +#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3 +#define GRXSTS_PKTSTS_SETUPDONE 4 +#define GRXSTS_PKTSTS_DATATOGGLEERR 5 +#define GRXSTS_PKTSTS_SETUPRX 6 +#define GRXSTS_PKTSTS_HCHHALTED 7 +#define GRXSTS_HCHNUM_MASK (0xf << 0) +#define GRXSTS_HCHNUM_SHIFT 0 +#define GRXSTS_DPID_MASK (0x3 << 15) +#define GRXSTS_DPID_SHIFT 15 +#define GRXSTS_BYTECNT_MASK (0x7ff << 4) +#define GRXSTS_BYTECNT_SHIFT 4 +#define GRXSTS_EPNUM_MASK (0xf << 0) +#define GRXSTS_EPNUM_SHIFT 0 -#define GRXFSIZ HSOTG_REG(0x024) -#define GRXFSIZ_DEPTH_MASK (0xffff << 0) -#define GRXFSIZ_DEPTH_SHIFT 0 +#define GRXFSIZ HSOTG_REG(0x024) +#define GRXFSIZ_DEPTH_MASK (0xffff << 0) +#define GRXFSIZ_DEPTH_SHIFT 0 -#define GNPTXFSIZ HSOTG_REG(0x028) +#define GNPTXFSIZ HSOTG_REG(0x028) /* Use FIFOSIZE_* constants to access this register */ -#define GNPTXSTS HSOTG_REG(0x02C) -#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24) -#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24 -#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16) -#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16 -#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff) -#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0) -#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0 -#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff) +#define GNPTXSTS HSOTG_REG(0x02C) +#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24) +#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24 +#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16) +#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16 +#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff) +#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0) +#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0 +#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff) -#define GI2CCTL HSOTG_REG(0x0030) -#define GI2CCTL_BSYDNE BIT(31) -#define GI2CCTL_RW BIT(30) -#define GI2CCTL_I2CDATSE0 BIT(28) -#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26) -#define GI2CCTL_I2CDEVADDR_SHIFT 26 -#define GI2CCTL_I2CSUSPCTL BIT(25) -#define GI2CCTL_ACK BIT(24) -#define GI2CCTL_I2CEN BIT(23) -#define GI2CCTL_ADDR_MASK (0x7f << 16) -#define GI2CCTL_ADDR_SHIFT 16 -#define GI2CCTL_REGADDR_MASK (0xff << 8) -#define GI2CCTL_REGADDR_SHIFT 8 -#define GI2CCTL_RWDATA_MASK (0xff << 0) -#define GI2CCTL_RWDATA_SHIFT 0 +#define GI2CCTL HSOTG_REG(0x0030) +#define GI2CCTL_BSYDNE BIT(31) +#define GI2CCTL_RW BIT(30) +#define GI2CCTL_I2CDATSE0 BIT(28) +#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26) +#define GI2CCTL_I2CDEVADDR_SHIFT 26 +#define GI2CCTL_I2CSUSPCTL BIT(25) +#define GI2CCTL_ACK BIT(24) +#define GI2CCTL_I2CEN BIT(23) +#define GI2CCTL_ADDR_MASK (0x7f << 16) +#define GI2CCTL_ADDR_SHIFT 16 +#define GI2CCTL_REGADDR_MASK (0xff << 8) +#define GI2CCTL_REGADDR_SHIFT 8 +#define GI2CCTL_RWDATA_MASK (0xff << 0) +#define GI2CCTL_RWDATA_SHIFT 0 -#define GPVNDCTL HSOTG_REG(0x0034) -#define GGPIO HSOTG_REG(0x0038) -#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16) +#define GPVNDCTL HSOTG_REG(0x0034) +#define GGPIO HSOTG_REG(0x0038) +#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16) -#define GUID HSOTG_REG(0x003c) -#define GSNPSID HSOTG_REG(0x0040) -#define GHWCFG1 HSOTG_REG(0x0044) -#define GSNPSID_ID_MASK GENMASK(31, 16) +#define GUID HSOTG_REG(0x003c) +#define GSNPSID HSOTG_REG(0x0040) +#define GHWCFG1 HSOTG_REG(0x0044) +#define GSNPSID_ID_MASK GENMASK(31, 16) -#define GHWCFG2 HSOTG_REG(0x0048) -#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31) -#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26) -#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26 -#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24) -#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24 -#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22) -#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22 -#define GHWCFG2_MULTI_PROC_INT BIT(20) -#define GHWCFG2_DYNAMIC_FIFO BIT(19) -#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18) -#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14) -#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14 -#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10) -#define GHWCFG2_NUM_DEV_EP_SHIFT 10 -#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8) -#define GHWCFG2_FS_PHY_TYPE_SHIFT 8 -#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0 -#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1 -#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2 -#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3 -#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6) -#define GHWCFG2_HS_PHY_TYPE_SHIFT 6 -#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 -#define GHWCFG2_HS_PHY_TYPE_UTMI 1 -#define GHWCFG2_HS_PHY_TYPE_ULPI 2 -#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 -#define GHWCFG2_POINT2POINT BIT(5) -#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3) -#define GHWCFG2_ARCHITECTURE_SHIFT 3 -#define GHWCFG2_SLAVE_ONLY_ARCH 0 -#define GHWCFG2_EXT_DMA_ARCH 1 -#define GHWCFG2_INT_DMA_ARCH 2 -#define GHWCFG2_OP_MODE_MASK (0x7 << 0) -#define GHWCFG2_OP_MODE_SHIFT 0 -#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0 -#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1 -#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2 -#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 -#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 -#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 -#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 -#define GHWCFG2_OP_MODE_UNDEFINED 7 +#define GHWCFG2 HSOTG_REG(0x0048) +#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31) +#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26) +#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26 +#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24) +#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24 +#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22) +#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22 +#define GHWCFG2_MULTI_PROC_INT BIT(20) +#define GHWCFG2_DYNAMIC_FIFO BIT(19) +#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18) +#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14) +#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14 +#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10) +#define GHWCFG2_NUM_DEV_EP_SHIFT 10 +#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8) +#define GHWCFG2_FS_PHY_TYPE_SHIFT 8 +#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0 +#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1 +#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2 +#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3 +#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6) +#define GHWCFG2_HS_PHY_TYPE_SHIFT 6 +#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 +#define GHWCFG2_HS_PHY_TYPE_UTMI 1 +#define GHWCFG2_HS_PHY_TYPE_ULPI 2 +#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 +#define GHWCFG2_POINT2POINT BIT(5) +#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3) +#define GHWCFG2_ARCHITECTURE_SHIFT 3 +#define GHWCFG2_SLAVE_ONLY_ARCH 0 +#define GHWCFG2_EXT_DMA_ARCH 1 +#define GHWCFG2_INT_DMA_ARCH 2 +#define GHWCFG2_OP_MODE_MASK (0x7 << 0) +#define GHWCFG2_OP_MODE_SHIFT 0 +#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0 +#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1 +#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2 +#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 +#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 +#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 +#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 +#define GHWCFG2_OP_MODE_UNDEFINED 7 -#define GHWCFG3 HSOTG_REG(0x004c) -#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16) -#define GHWCFG3_DFIFO_DEPTH_SHIFT 16 -#define GHWCFG3_OTG_LPM_EN BIT(15) -#define GHWCFG3_BC_SUPPORT BIT(14) -#define GHWCFG3_OTG_ENABLE_HSIC BIT(13) -#define GHWCFG3_ADP_SUPP BIT(12) -#define GHWCFG3_SYNCH_RESET_TYPE BIT(11) -#define GHWCFG3_OPTIONAL_FEATURES BIT(10) -#define GHWCFG3_VENDOR_CTRL_IF BIT(9) -#define GHWCFG3_I2C BIT(8) -#define GHWCFG3_OTG_FUNC BIT(7) -#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4) -#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4 -#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0) -#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0 +#define GHWCFG3 HSOTG_REG(0x004c) +#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16) +#define GHWCFG3_DFIFO_DEPTH_SHIFT 16 +#define GHWCFG3_OTG_LPM_EN BIT(15) +#define GHWCFG3_BC_SUPPORT BIT(14) +#define GHWCFG3_OTG_ENABLE_HSIC BIT(13) +#define GHWCFG3_ADP_SUPP BIT(12) +#define GHWCFG3_SYNCH_RESET_TYPE BIT(11) +#define GHWCFG3_OPTIONAL_FEATURES BIT(10) +#define GHWCFG3_VENDOR_CTRL_IF BIT(9) +#define GHWCFG3_I2C BIT(8) +#define GHWCFG3_OTG_FUNC BIT(7) +#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4) +#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4 +#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0) +#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0 -#define GHWCFG4 HSOTG_REG(0x0050) -#define GHWCFG4_DESC_DMA_DYN BIT(31) -#define GHWCFG4_DESC_DMA BIT(30) -#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26) -#define GHWCFG4_NUM_IN_EPS_SHIFT 26 -#define GHWCFG4_DED_FIFO_EN BIT(25) -#define GHWCFG4_DED_FIFO_SHIFT 25 -#define GHWCFG4_SESSION_END_FILT_EN BIT(24) -#define GHWCFG4_B_VALID_FILT_EN BIT(23) -#define GHWCFG4_A_VALID_FILT_EN BIT(22) -#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21) -#define GHWCFG4_IDDIG_FILT_EN BIT(20) -#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16) -#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16 -#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14) -#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14 -#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0 -#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1 -#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2 -#define GHWCFG4_ACG_SUPPORTED BIT(12) -#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11) +#define GHWCFG4 HSOTG_REG(0x0050) +#define GHWCFG4_DESC_DMA_DYN BIT(31) +#define GHWCFG4_DESC_DMA BIT(30) +#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26) +#define GHWCFG4_NUM_IN_EPS_SHIFT 26 +#define GHWCFG4_DED_FIFO_EN BIT(25) +#define GHWCFG4_DED_FIFO_SHIFT 25 +#define GHWCFG4_SESSION_END_FILT_EN BIT(24) +#define GHWCFG4_B_VALID_FILT_EN BIT(23) +#define GHWCFG4_A_VALID_FILT_EN BIT(22) +#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21) +#define GHWCFG4_IDDIG_FILT_EN BIT(20) +#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16) +#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16 +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14) +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14 +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0 +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1 +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2 +#define GHWCFG4_ACG_SUPPORTED BIT(12) +#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11) #define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10) -#define GHWCFG4_XHIBER BIT(7) -#define GHWCFG4_HIBER BIT(6) -#define GHWCFG4_MIN_AHB_FREQ BIT(5) -#define GHWCFG4_POWER_OPTIMIZ BIT(4) -#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0) -#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0 +#define GHWCFG4_XHIBER BIT(7) +#define GHWCFG4_HIBER BIT(6) +#define GHWCFG4_MIN_AHB_FREQ BIT(5) +#define GHWCFG4_POWER_OPTIMIZ BIT(4) +#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0) +#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0 -#define GLPMCFG HSOTG_REG(0x0054) -#define GLPMCFG_INVSELHSIC BIT(31) -#define GLPMCFG_HSICCON BIT(30) -#define GLPMCFG_RSTRSLPSTS BIT(29) -#define GLPMCFG_ENBESL BIT(28) -#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25) -#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25 -#define GLPMCFG_SNDLPM BIT(24) -#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21) -#define GLPMCFG_RETRY_CNT_SHIFT 21 -#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21) -#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22) -#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17) -#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17 -#define GLPMCFG_L1RESUMEOK BIT(16) -#define GLPMCFG_SLPSTS BIT(15) -#define GLPMCFG_COREL1RES_MASK (0x3 << 13) -#define GLPMCFG_COREL1RES_SHIFT 13 -#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8) -#define GLPMCFG_HIRD_THRES_SHIFT 8 -#define GLPMCFG_HIRD_THRES_EN (0x10 << 8) -#define GLPMCFG_ENBLSLPM BIT(7) -#define GLPMCFG_BREMOTEWAKE BIT(6) -#define GLPMCFG_HIRD_MASK (0xf << 2) -#define GLPMCFG_HIRD_SHIFT 2 -#define GLPMCFG_APPL1RES BIT(1) -#define GLPMCFG_LPMCAP BIT(0) +#define GLPMCFG HSOTG_REG(0x0054) +#define GLPMCFG_INVSELHSIC BIT(31) +#define GLPMCFG_HSICCON BIT(30) +#define GLPMCFG_RSTRSLPSTS BIT(29) +#define GLPMCFG_ENBESL BIT(28) +#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25) +#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25 +#define GLPMCFG_SNDLPM BIT(24) +#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21) +#define GLPMCFG_RETRY_CNT_SHIFT 21 +#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21) +#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22) +#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17) +#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17 +#define GLPMCFG_L1RESUMEOK BIT(16) +#define GLPMCFG_SLPSTS BIT(15) +#define GLPMCFG_COREL1RES_MASK (0x3 << 13) +#define GLPMCFG_COREL1RES_SHIFT 13 +#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8) +#define GLPMCFG_HIRD_THRES_SHIFT 8 +#define GLPMCFG_HIRD_THRES_EN (0x10 << 8) +#define GLPMCFG_ENBLSLPM BIT(7) +#define GLPMCFG_BREMOTEWAKE BIT(6) +#define GLPMCFG_HIRD_MASK (0xf << 2) +#define GLPMCFG_HIRD_SHIFT 2 +#define GLPMCFG_APPL1RES BIT(1) +#define GLPMCFG_LPMCAP BIT(0) -#define GPWRDN HSOTG_REG(0x0058) -#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24) -#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24 -#define GPWRDN_ADP_INT BIT(23) -#define GPWRDN_BSESSVLD BIT(22) -#define GPWRDN_IDSTS BIT(21) -#define GPWRDN_LINESTATE_MASK (0x3 << 19) -#define GPWRDN_LINESTATE_SHIFT 19 -#define GPWRDN_STS_CHGINT_MSK BIT(18) -#define GPWRDN_STS_CHGINT BIT(17) -#define GPWRDN_SRP_DET_MSK BIT(16) -#define GPWRDN_SRP_DET BIT(15) -#define GPWRDN_CONNECT_DET_MSK BIT(14) -#define GPWRDN_CONNECT_DET BIT(13) -#define GPWRDN_DISCONN_DET_MSK BIT(12) -#define GPWRDN_DISCONN_DET BIT(11) -#define GPWRDN_RST_DET_MSK BIT(10) -#define GPWRDN_RST_DET BIT(9) -#define GPWRDN_LNSTSCHG_MSK BIT(8) -#define GPWRDN_LNSTSCHG BIT(7) -#define GPWRDN_DIS_VBUS BIT(6) -#define GPWRDN_PWRDNSWTCH BIT(5) -#define GPWRDN_PWRDNRSTN BIT(4) -#define GPWRDN_PWRDNCLMP BIT(3) -#define GPWRDN_RESTORE BIT(2) -#define GPWRDN_PMUACTV BIT(1) -#define GPWRDN_PMUINTSEL BIT(0) +#define GPWRDN HSOTG_REG(0x0058) +#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24) +#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24 +#define GPWRDN_ADP_INT BIT(23) +#define GPWRDN_BSESSVLD BIT(22) +#define GPWRDN_IDSTS BIT(21) +#define GPWRDN_LINESTATE_MASK (0x3 << 19) +#define GPWRDN_LINESTATE_SHIFT 19 +#define GPWRDN_STS_CHGINT_MSK BIT(18) +#define GPWRDN_STS_CHGINT BIT(17) +#define GPWRDN_SRP_DET_MSK BIT(16) +#define GPWRDN_SRP_DET BIT(15) +#define GPWRDN_CONNECT_DET_MSK BIT(14) +#define GPWRDN_CONNECT_DET BIT(13) +#define GPWRDN_DISCONN_DET_MSK BIT(12) +#define GPWRDN_DISCONN_DET BIT(11) +#define GPWRDN_RST_DET_MSK BIT(10) +#define GPWRDN_RST_DET BIT(9) +#define GPWRDN_LNSTSCHG_MSK BIT(8) +#define GPWRDN_LNSTSCHG BIT(7) +#define GPWRDN_DIS_VBUS BIT(6) +#define GPWRDN_PWRDNSWTCH BIT(5) +#define GPWRDN_PWRDNRSTN BIT(4) +#define GPWRDN_PWRDNCLMP BIT(3) +#define GPWRDN_RESTORE BIT(2) +#define GPWRDN_PMUACTV BIT(1) +#define GPWRDN_PMUINTSEL BIT(0) -#define GDFIFOCFG HSOTG_REG(0x005c) -#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16) -#define GDFIFOCFG_EPINFOBASE_SHIFT 16 -#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0) -#define GDFIFOCFG_GDFIFOCFG_SHIFT 0 +#define GDFIFOCFG HSOTG_REG(0x005c) +#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16) +#define GDFIFOCFG_EPINFOBASE_SHIFT 16 +#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0) +#define GDFIFOCFG_GDFIFOCFG_SHIFT 0 -#define ADPCTL HSOTG_REG(0x0060) -#define ADPCTL_AR_MASK (0x3 << 27) -#define ADPCTL_AR_SHIFT 27 -#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26) -#define ADPCTL_ADP_SNS_INT_MSK BIT(25) -#define ADPCTL_ADP_PRB_INT_MSK BIT(24) -#define ADPCTL_ADP_TMOUT_INT BIT(23) -#define ADPCTL_ADP_SNS_INT BIT(22) -#define ADPCTL_ADP_PRB_INT BIT(21) -#define ADPCTL_ADPENA BIT(20) -#define ADPCTL_ADPRES BIT(19) -#define ADPCTL_ENASNS BIT(18) -#define ADPCTL_ENAPRB BIT(17) -#define ADPCTL_RTIM_MASK (0x7ff << 6) -#define ADPCTL_RTIM_SHIFT 6 -#define ADPCTL_PRB_PER_MASK (0x3 << 4) -#define ADPCTL_PRB_PER_SHIFT 4 -#define ADPCTL_PRB_DELTA_MASK (0x3 << 2) -#define ADPCTL_PRB_DELTA_SHIFT 2 -#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0) -#define ADPCTL_PRB_DSCHRG_SHIFT 0 +#define ADPCTL HSOTG_REG(0x0060) +#define ADPCTL_AR_MASK (0x3 << 27) +#define ADPCTL_AR_SHIFT 27 +#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26) +#define ADPCTL_ADP_SNS_INT_MSK BIT(25) +#define ADPCTL_ADP_PRB_INT_MSK BIT(24) +#define ADPCTL_ADP_TMOUT_INT BIT(23) +#define ADPCTL_ADP_SNS_INT BIT(22) +#define ADPCTL_ADP_PRB_INT BIT(21) +#define ADPCTL_ADPENA BIT(20) +#define ADPCTL_ADPRES BIT(19) +#define ADPCTL_ENASNS BIT(18) +#define ADPCTL_ENAPRB BIT(17) +#define ADPCTL_RTIM_MASK (0x7ff << 6) +#define ADPCTL_RTIM_SHIFT 6 +#define ADPCTL_PRB_PER_MASK (0x3 << 4) +#define ADPCTL_PRB_PER_SHIFT 4 +#define ADPCTL_PRB_DELTA_MASK (0x3 << 2) +#define ADPCTL_PRB_DELTA_SHIFT 2 +#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0) +#define ADPCTL_PRB_DSCHRG_SHIFT 0 -#define GREFCLK HSOTG_REG(0x0064) -#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15) -#define GREFCLK_REFCLKPER_SHIFT 15 -#define GREFCLK_REF_CLK_MODE BIT(14) -#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff) +#define GREFCLK HSOTG_REG(0x0064) +#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15) +#define GREFCLK_REFCLKPER_SHIFT 15 +#define GREFCLK_REF_CLK_MODE BIT(14) +#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff) #define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0 -#define GINTMSK2 HSOTG_REG(0x0068) -#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0) +#define GINTMSK2 HSOTG_REG(0x0068) +#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0) -#define GINTSTS2 HSOTG_REG(0x006c) -#define GINTSTS2_WKUP_ALERT_INT BIT(0) +#define GINTSTS2 HSOTG_REG(0x006c) +#define GINTSTS2_WKUP_ALERT_INT BIT(0) -#define HPTXFSIZ HSOTG_REG(0x100) +#define HPTXFSIZ HSOTG_REG(0x100) /* Use FIFOSIZE_* constants to access this register */ -#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4)) +#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4)) /* Use FIFOSIZE_* constants to access this register */ /* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */ -#define FIFOSIZE_DEPTH_MASK (0xffff << 16) -#define FIFOSIZE_DEPTH_SHIFT 16 -#define FIFOSIZE_STARTADDR_MASK (0xffff << 0) -#define FIFOSIZE_STARTADDR_SHIFT 0 -#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff) +#define FIFOSIZE_DEPTH_MASK (0xffff << 16) +#define FIFOSIZE_DEPTH_SHIFT 16 +#define FIFOSIZE_STARTADDR_MASK (0xffff << 0) +#define FIFOSIZE_STARTADDR_SHIFT 0 +#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff) /* Device mode registers */ -#define DCFG HSOTG_REG(0x800) -#define DCFG_DESCDMA_EN BIT(23) -#define DCFG_EPMISCNT_MASK (0x1f << 18) -#define DCFG_EPMISCNT_SHIFT 18 -#define DCFG_EPMISCNT_LIMIT 0x1f -#define DCFG_EPMISCNT(_x) ((_x) << 18) -#define DCFG_IPG_ISOC_SUPPORDED BIT(17) -#define DCFG_PERFRINT_MASK (0x3 << 11) -#define DCFG_PERFRINT_SHIFT 11 -#define DCFG_PERFRINT_LIMIT 0x3 -#define DCFG_PERFRINT(_x) ((_x) << 11) -#define DCFG_DEVADDR_MASK (0x7f << 4) -#define DCFG_DEVADDR_SHIFT 4 -#define DCFG_DEVADDR_LIMIT 0x7f -#define DCFG_DEVADDR(_x) ((_x) << 4) -#define DCFG_NZ_STS_OUT_HSHK BIT(2) -#define DCFG_DEVSPD_MASK (0x3 << 0) -#define DCFG_DEVSPD_SHIFT 0 -#define DCFG_DEVSPD_HS 0 -#define DCFG_DEVSPD_FS 1 -#define DCFG_DEVSPD_LS 2 -#define DCFG_DEVSPD_FS48 3 +#define DCFG HSOTG_REG(0x800) +#define DCFG_DESCDMA_EN BIT(23) +#define DCFG_EPMISCNT_MASK (0x1f << 18) +#define DCFG_EPMISCNT_SHIFT 18 +#define DCFG_EPMISCNT_LIMIT 0x1f +#define DCFG_EPMISCNT(_x) ((_x) << 18) +#define DCFG_IPG_ISOC_SUPPORDED BIT(17) +#define DCFG_PERFRINT_MASK (0x3 << 11) +#define DCFG_PERFRINT_SHIFT 11 +#define DCFG_PERFRINT_LIMIT 0x3 +#define DCFG_PERFRINT(_x) ((_x) << 11) +#define DCFG_DEVADDR_MASK (0x7f << 4) +#define DCFG_DEVADDR_SHIFT 4 +#define DCFG_DEVADDR_LIMIT 0x7f +#define DCFG_DEVADDR(_x) ((_x) << 4) +#define DCFG_NZ_STS_OUT_HSHK BIT(2) +#define DCFG_DEVSPD_MASK (0x3 << 0) +#define DCFG_DEVSPD_SHIFT 0 +#define DCFG_DEVSPD_HS 0 +#define DCFG_DEVSPD_FS 1 +#define DCFG_DEVSPD_LS 2 +#define DCFG_DEVSPD_FS48 3 -#define DCTL HSOTG_REG(0x804) +#define DCTL HSOTG_REG(0x804) #define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19) -#define DCTL_PWRONPRGDONE BIT(11) -#define DCTL_CGOUTNAK BIT(10) -#define DCTL_SGOUTNAK BIT(9) -#define DCTL_CGNPINNAK BIT(8) -#define DCTL_SGNPINNAK BIT(7) -#define DCTL_TSTCTL_MASK (0x7 << 4) -#define DCTL_TSTCTL_SHIFT 4 -#define DCTL_GOUTNAKSTS BIT(3) -#define DCTL_GNPINNAKSTS BIT(2) -#define DCTL_SFTDISCON BIT(1) -#define DCTL_RMTWKUPSIG BIT(0) +#define DCTL_PWRONPRGDONE BIT(11) +#define DCTL_CGOUTNAK BIT(10) +#define DCTL_SGOUTNAK BIT(9) +#define DCTL_CGNPINNAK BIT(8) +#define DCTL_SGNPINNAK BIT(7) +#define DCTL_TSTCTL_MASK (0x7 << 4) +#define DCTL_TSTCTL_SHIFT 4 +#define DCTL_GOUTNAKSTS BIT(3) +#define DCTL_GNPINNAKSTS BIT(2) +#define DCTL_SFTDISCON BIT(1) +#define DCTL_RMTWKUPSIG BIT(0) -#define DSTS HSOTG_REG(0x808) -#define DSTS_SOFFN_MASK (0x3fff << 8) -#define DSTS_SOFFN_SHIFT 8 -#define DSTS_SOFFN_LIMIT 0x3fff -#define DSTS_SOFFN(_x) ((_x) << 8) -#define DSTS_ERRATICERR BIT(3) -#define DSTS_ENUMSPD_MASK (0x3 << 1) -#define DSTS_ENUMSPD_SHIFT 1 -#define DSTS_ENUMSPD_HS 0 -#define DSTS_ENUMSPD_FS 1 -#define DSTS_ENUMSPD_LS 2 -#define DSTS_ENUMSPD_FS48 3 -#define DSTS_SUSPSTS BIT(0) +#define DSTS HSOTG_REG(0x808) +#define DSTS_SOFFN_MASK (0x3fff << 8) +#define DSTS_SOFFN_SHIFT 8 +#define DSTS_SOFFN_LIMIT 0x3fff +#define DSTS_SOFFN(_x) ((_x) << 8) +#define DSTS_ERRATICERR BIT(3) +#define DSTS_ENUMSPD_MASK (0x3 << 1) +#define DSTS_ENUMSPD_SHIFT 1 +#define DSTS_ENUMSPD_HS 0 +#define DSTS_ENUMSPD_FS 1 +#define DSTS_ENUMSPD_LS 2 +#define DSTS_ENUMSPD_FS48 3 +#define DSTS_SUSPSTS BIT(0) -#define DIEPMSK HSOTG_REG(0x810) -#define DIEPMSK_NAKMSK BIT(13) -#define DIEPMSK_BNAININTRMSK BIT(9) -#define DIEPMSK_TXFIFOUNDRNMSK BIT(8) -#define DIEPMSK_TXFIFOEMPTY BIT(7) -#define DIEPMSK_INEPNAKEFFMSK BIT(6) -#define DIEPMSK_INTKNEPMISMSK BIT(5) -#define DIEPMSK_INTKNTXFEMPMSK BIT(4) -#define DIEPMSK_TIMEOUTMSK BIT(3) -#define DIEPMSK_AHBERRMSK BIT(2) -#define DIEPMSK_EPDISBLDMSK BIT(1) -#define DIEPMSK_XFERCOMPLMSK BIT(0) +#define DIEPMSK HSOTG_REG(0x810) +#define DIEPMSK_NAKMSK BIT(13) +#define DIEPMSK_BNAININTRMSK BIT(9) +#define DIEPMSK_TXFIFOUNDRNMSK BIT(8) +#define DIEPMSK_TXFIFOEMPTY BIT(7) +#define DIEPMSK_INEPNAKEFFMSK BIT(6) +#define DIEPMSK_INTKNEPMISMSK BIT(5) +#define DIEPMSK_INTKNTXFEMPMSK BIT(4) +#define DIEPMSK_TIMEOUTMSK BIT(3) +#define DIEPMSK_AHBERRMSK BIT(2) +#define DIEPMSK_EPDISBLDMSK BIT(1) +#define DIEPMSK_XFERCOMPLMSK BIT(0) -#define DOEPMSK HSOTG_REG(0x814) -#define DOEPMSK_BNAMSK BIT(9) -#define DOEPMSK_BACK2BACKSETUP BIT(6) -#define DOEPMSK_STSPHSERCVDMSK BIT(5) -#define DOEPMSK_OUTTKNEPDISMSK BIT(4) -#define DOEPMSK_SETUPMSK BIT(3) -#define DOEPMSK_AHBERRMSK BIT(2) -#define DOEPMSK_EPDISBLDMSK BIT(1) -#define DOEPMSK_XFERCOMPLMSK BIT(0) +#define DOEPMSK HSOTG_REG(0x814) +#define DOEPMSK_BNAMSK BIT(9) +#define DOEPMSK_BACK2BACKSETUP BIT(6) +#define DOEPMSK_STSPHSERCVDMSK BIT(5) +#define DOEPMSK_OUTTKNEPDISMSK BIT(4) +#define DOEPMSK_SETUPMSK BIT(3) +#define DOEPMSK_AHBERRMSK BIT(2) +#define DOEPMSK_EPDISBLDMSK BIT(1) +#define DOEPMSK_XFERCOMPLMSK BIT(0) -#define DAINT HSOTG_REG(0x818) -#define DAINTMSK HSOTG_REG(0x81C) -#define DAINT_OUTEP_SHIFT 16 -#define DAINT_OUTEP(_x) (1 << ((_x) + 16)) -#define DAINT_INEP(_x) (1 << (_x)) +#define DAINT HSOTG_REG(0x818) +#define DAINTMSK HSOTG_REG(0x81C) +#define DAINT_OUTEP_SHIFT 16 +#define DAINT_OUTEP(_x) (1 << ((_x) + 16)) +#define DAINT_INEP(_x) (1 << (_x)) -#define DTKNQR1 HSOTG_REG(0x820) -#define DTKNQR2 HSOTG_REG(0x824) -#define DTKNQR3 HSOTG_REG(0x830) -#define DTKNQR4 HSOTG_REG(0x834) -#define DIEPEMPMSK HSOTG_REG(0x834) +#define DTKNQR1 HSOTG_REG(0x820) +#define DTKNQR2 HSOTG_REG(0x824) +#define DTKNQR3 HSOTG_REG(0x830) +#define DTKNQR4 HSOTG_REG(0x834) +#define DIEPEMPMSK HSOTG_REG(0x834) -#define DVBUSDIS HSOTG_REG(0x828) -#define DVBUSPULSE HSOTG_REG(0x82C) +#define DVBUSDIS HSOTG_REG(0x828) +#define DVBUSPULSE HSOTG_REG(0x82C) -#define DIEPCTL0 HSOTG_REG(0x900) -#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20)) +#define DIEPCTL0 HSOTG_REG(0x900) +#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20)) -#define DOEPCTL0 HSOTG_REG(0xB00) -#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20)) +#define DOEPCTL0 HSOTG_REG(0xB00) +#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20)) /* EP0 specialness: * bits[29..28] - reserved (no SetD0PID, SetD1PID) * bits[25..22] - should always be zero, this isn't a periodic endpoint * bits[10..0] - MPS setting different for EP0 */ -#define D0EPCTL_MPS_MASK (0x3 << 0) -#define D0EPCTL_MPS_SHIFT 0 -#define D0EPCTL_MPS_64 0 -#define D0EPCTL_MPS_32 1 -#define D0EPCTL_MPS_16 2 -#define D0EPCTL_MPS_8 3 +#define D0EPCTL_MPS_MASK (0x3 << 0) +#define D0EPCTL_MPS_SHIFT 0 +#define D0EPCTL_MPS_64 0 +#define D0EPCTL_MPS_32 1 +#define D0EPCTL_MPS_16 2 +#define D0EPCTL_MPS_8 3 -#define DXEPCTL_EPENA BIT(31) -#define DXEPCTL_EPDIS BIT(30) -#define DXEPCTL_SETD1PID BIT(29) -#define DXEPCTL_SETODDFR BIT(29) -#define DXEPCTL_SETD0PID BIT(28) -#define DXEPCTL_SETEVENFR BIT(28) -#define DXEPCTL_SNAK BIT(27) -#define DXEPCTL_CNAK BIT(26) -#define DXEPCTL_TXFNUM_MASK (0xf << 22) -#define DXEPCTL_TXFNUM_SHIFT 22 -#define DXEPCTL_TXFNUM_LIMIT 0xf -#define DXEPCTL_TXFNUM(_x) ((_x) << 22) -#define DXEPCTL_STALL BIT(21) -#define DXEPCTL_SNP BIT(20) -#define DXEPCTL_EPTYPE_MASK (0x3 << 18) -#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18) -#define DXEPCTL_EPTYPE_ISO (0x1 << 18) -#define DXEPCTL_EPTYPE_BULK (0x2 << 18) -#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18) +#define DXEPCTL_EPENA BIT(31) +#define DXEPCTL_EPDIS BIT(30) +#define DXEPCTL_SETD1PID BIT(29) +#define DXEPCTL_SETODDFR BIT(29) +#define DXEPCTL_SETD0PID BIT(28) +#define DXEPCTL_SETEVENFR BIT(28) +#define DXEPCTL_SNAK BIT(27) +#define DXEPCTL_CNAK BIT(26) +#define DXEPCTL_TXFNUM_MASK (0xf << 22) +#define DXEPCTL_TXFNUM_SHIFT 22 +#define DXEPCTL_TXFNUM_LIMIT 0xf +#define DXEPCTL_TXFNUM(_x) ((_x) << 22) +#define DXEPCTL_STALL BIT(21) +#define DXEPCTL_SNP BIT(20) +#define DXEPCTL_EPTYPE_MASK (0x3 << 18) +#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18) +#define DXEPCTL_EPTYPE_ISO (0x1 << 18) +#define DXEPCTL_EPTYPE_BULK (0x2 << 18) +#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18) -#define DXEPCTL_NAKSTS BIT(17) -#define DXEPCTL_DPID BIT(16) -#define DXEPCTL_EOFRNUM BIT(16) -#define DXEPCTL_USBACTEP BIT(15) -#define DXEPCTL_NEXTEP_MASK (0xf << 11) -#define DXEPCTL_NEXTEP_SHIFT 11 -#define DXEPCTL_NEXTEP_LIMIT 0xf -#define DXEPCTL_NEXTEP(_x) ((_x) << 11) -#define DXEPCTL_MPS_MASK (0x7ff << 0) -#define DXEPCTL_MPS_SHIFT 0 -#define DXEPCTL_MPS_LIMIT 0x7ff -#define DXEPCTL_MPS(_x) ((_x) << 0) +#define DXEPCTL_NAKSTS BIT(17) +#define DXEPCTL_DPID BIT(16) +#define DXEPCTL_EOFRNUM BIT(16) +#define DXEPCTL_USBACTEP BIT(15) +#define DXEPCTL_NEXTEP_MASK (0xf << 11) +#define DXEPCTL_NEXTEP_SHIFT 11 +#define DXEPCTL_NEXTEP_LIMIT 0xf +#define DXEPCTL_NEXTEP(_x) ((_x) << 11) +#define DXEPCTL_MPS_MASK (0x7ff << 0) +#define DXEPCTL_MPS_SHIFT 0 +#define DXEPCTL_MPS_LIMIT 0x7ff +#define DXEPCTL_MPS(_x) ((_x) << 0) -#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20)) -#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20)) -#define DXEPINT_SETUP_RCVD BIT(15) -#define DXEPINT_NYETINTRPT BIT(14) -#define DXEPINT_NAKINTRPT BIT(13) -#define DXEPINT_BBLEERRINTRPT BIT(12) -#define DXEPINT_PKTDRPSTS BIT(11) -#define DXEPINT_BNAINTR BIT(9) -#define DXEPINT_TXFIFOUNDRN BIT(8) -#define DXEPINT_OUTPKTERR BIT(8) -#define DXEPINT_TXFEMP BIT(7) -#define DXEPINT_INEPNAKEFF BIT(6) -#define DXEPINT_BACK2BACKSETUP BIT(6) -#define DXEPINT_INTKNEPMIS BIT(5) -#define DXEPINT_STSPHSERCVD BIT(5) -#define DXEPINT_INTKNTXFEMP BIT(4) -#define DXEPINT_OUTTKNEPDIS BIT(4) -#define DXEPINT_TIMEOUT BIT(3) -#define DXEPINT_SETUP BIT(3) -#define DXEPINT_AHBERR BIT(2) -#define DXEPINT_EPDISBLD BIT(1) -#define DXEPINT_XFERCOMPL BIT(0) +#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20)) +#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20)) +#define DXEPINT_SETUP_RCVD BIT(15) +#define DXEPINT_NYETINTRPT BIT(14) +#define DXEPINT_NAKINTRPT BIT(13) +#define DXEPINT_BBLEERRINTRPT BIT(12) +#define DXEPINT_PKTDRPSTS BIT(11) +#define DXEPINT_BNAINTR BIT(9) +#define DXEPINT_TXFIFOUNDRN BIT(8) +#define DXEPINT_OUTPKTERR BIT(8) +#define DXEPINT_TXFEMP BIT(7) +#define DXEPINT_INEPNAKEFF BIT(6) +#define DXEPINT_BACK2BACKSETUP BIT(6) +#define DXEPINT_INTKNEPMIS BIT(5) +#define DXEPINT_STSPHSERCVD BIT(5) +#define DXEPINT_INTKNTXFEMP BIT(4) +#define DXEPINT_OUTTKNEPDIS BIT(4) +#define DXEPINT_TIMEOUT BIT(3) +#define DXEPINT_SETUP BIT(3) +#define DXEPINT_AHBERR BIT(2) +#define DXEPINT_EPDISBLD BIT(1) +#define DXEPINT_XFERCOMPL BIT(0) -#define DIEPTSIZ0 HSOTG_REG(0x910) -#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19) -#define DIEPTSIZ0_PKTCNT_SHIFT 19 -#define DIEPTSIZ0_PKTCNT_LIMIT 0x3 -#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19) -#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0) -#define DIEPTSIZ0_XFERSIZE_SHIFT 0 -#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f -#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0) +#define DIEPTSIZ0 HSOTG_REG(0x910) +#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19) +#define DIEPTSIZ0_PKTCNT_SHIFT 19 +#define DIEPTSIZ0_PKTCNT_LIMIT 0x3 +#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19) +#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0) +#define DIEPTSIZ0_XFERSIZE_SHIFT 0 +#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f +#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0) -#define DOEPTSIZ0 HSOTG_REG(0xB10) -#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29) -#define DOEPTSIZ0_SUPCNT_SHIFT 29 -#define DOEPTSIZ0_SUPCNT_LIMIT 0x3 -#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29) -#define DOEPTSIZ0_PKTCNT BIT(19) -#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0) -#define DOEPTSIZ0_XFERSIZE_SHIFT 0 +#define DOEPTSIZ0 HSOTG_REG(0xB10) +#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29) +#define DOEPTSIZ0_SUPCNT_SHIFT 29 +#define DOEPTSIZ0_SUPCNT_LIMIT 0x3 +#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29) +#define DOEPTSIZ0_PKTCNT BIT(19) +#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0) +#define DOEPTSIZ0_XFERSIZE_SHIFT 0 -#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20)) -#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20)) -#define DXEPTSIZ_MC_MASK (0x3 << 29) -#define DXEPTSIZ_MC_SHIFT 29 -#define DXEPTSIZ_MC_LIMIT 0x3 -#define DXEPTSIZ_MC(_x) ((_x) << 29) -#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19) -#define DXEPTSIZ_PKTCNT_SHIFT 19 -#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff -#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff) -#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19) -#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0) -#define DXEPTSIZ_XFERSIZE_SHIFT 0 -#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff -#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff) -#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0) +#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20)) +#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20)) +#define DXEPTSIZ_MC_MASK (0x3 << 29) +#define DXEPTSIZ_MC_SHIFT 29 +#define DXEPTSIZ_MC_LIMIT 0x3 +#define DXEPTSIZ_MC(_x) ((_x) << 29) +#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19) +#define DXEPTSIZ_PKTCNT_SHIFT 19 +#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff +#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff) +#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19) +#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0) +#define DXEPTSIZ_XFERSIZE_SHIFT 0 +#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff +#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff) +#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0) -#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20)) -#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20)) +#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20)) +#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20)) -#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20)) +#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20)) -#define PCGCTL HSOTG_REG(0x0e00) -#define PCGCTL_IF_DEV_MODE BIT(31) -#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29) -#define PCGCTL_P2HD_PRT_SPD_SHIFT 29 -#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27) -#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27 -#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20) -#define PCGCTL_MAC_DEV_ADDR_SHIFT 20 -#define PCGCTL_MAX_TERMSEL BIT(19) -#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17) -#define PCGCTL_MAX_XCVRSELECT_SHIFT 17 -#define PCGCTL_PORT_POWER BIT(16) -#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14) -#define PCGCTL_PRT_CLK_SEL_SHIFT 14 -#define PCGCTL_ESS_REG_RESTORED BIT(13) -#define PCGCTL_EXTND_HIBER_SWITCH BIT(12) -#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11) -#define PCGCTL_ENBL_EXTND_HIBER BIT(10) -#define PCGCTL_RESTOREMODE BIT(9) -#define PCGCTL_RESETAFTSUSP BIT(8) -#define PCGCTL_DEEP_SLEEP BIT(7) -#define PCGCTL_PHY_IN_SLEEP BIT(6) -#define PCGCTL_ENBL_SLEEP_GATING BIT(5) -#define PCGCTL_RSTPDWNMODULE BIT(3) -#define PCGCTL_PWRCLMP BIT(2) -#define PCGCTL_GATEHCLK BIT(1) -#define PCGCTL_STOPPCLK BIT(0) +#define PCGCTL HSOTG_REG(0x0e00) +#define PCGCTL_IF_DEV_MODE BIT(31) +#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29) +#define PCGCTL_P2HD_PRT_SPD_SHIFT 29 +#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27) +#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27 +#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20) +#define PCGCTL_MAC_DEV_ADDR_SHIFT 20 +#define PCGCTL_MAX_TERMSEL BIT(19) +#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17) +#define PCGCTL_MAX_XCVRSELECT_SHIFT 17 +#define PCGCTL_PORT_POWER BIT(16) +#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14) +#define PCGCTL_PRT_CLK_SEL_SHIFT 14 +#define PCGCTL_ESS_REG_RESTORED BIT(13) +#define PCGCTL_EXTND_HIBER_SWITCH BIT(12) +#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11) +#define PCGCTL_ENBL_EXTND_HIBER BIT(10) +#define PCGCTL_RESTOREMODE BIT(9) +#define PCGCTL_RESETAFTSUSP BIT(8) +#define PCGCTL_DEEP_SLEEP BIT(7) +#define PCGCTL_PHY_IN_SLEEP BIT(6) +#define PCGCTL_ENBL_SLEEP_GATING BIT(5) +#define PCGCTL_RSTPDWNMODULE BIT(3) +#define PCGCTL_PWRCLMP BIT(2) +#define PCGCTL_GATEHCLK BIT(1) +#define PCGCTL_STOPPCLK BIT(0) #define PCGCCTL1 HSOTG_REG(0xe04) #define PCGCCTL1_TIMER (0x3 << 1) #define PCGCCTL1_GATEEN BIT(0) -#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000)) +#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000)) /* Host Mode Registers */ -#define HCFG HSOTG_REG(0x0400) -#define HCFG_MODECHTIMEN BIT(31) -#define HCFG_PERSCHEDENA BIT(26) -#define HCFG_FRLISTEN_MASK (0x3 << 24) -#define HCFG_FRLISTEN_SHIFT 24 -#define HCFG_FRLISTEN_8 (0 << 24) -#define FRLISTEN_8_SIZE 8 -#define HCFG_FRLISTEN_16 BIT(24) -#define FRLISTEN_16_SIZE 16 -#define HCFG_FRLISTEN_32 (2 << 24) -#define FRLISTEN_32_SIZE 32 -#define HCFG_FRLISTEN_64 (3 << 24) -#define FRLISTEN_64_SIZE 64 -#define HCFG_DESCDMA BIT(23) -#define HCFG_RESVALID_MASK (0xff << 8) -#define HCFG_RESVALID_SHIFT 8 -#define HCFG_ENA32KHZ BIT(7) -#define HCFG_FSLSSUPP BIT(2) -#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0) -#define HCFG_FSLSPCLKSEL_SHIFT 0 -#define HCFG_FSLSPCLKSEL_30_60_MHZ 0 -#define HCFG_FSLSPCLKSEL_48_MHZ 1 -#define HCFG_FSLSPCLKSEL_6_MHZ 2 +#define HCFG HSOTG_REG(0x0400) +#define HCFG_MODECHTIMEN BIT(31) +#define HCFG_PERSCHEDENA BIT(26) +#define HCFG_FRLISTEN_MASK (0x3 << 24) +#define HCFG_FRLISTEN_SHIFT 24 +#define HCFG_FRLISTEN_8 (0 << 24) +#define FRLISTEN_8_SIZE 8 +#define HCFG_FRLISTEN_16 BIT(24) +#define FRLISTEN_16_SIZE 16 +#define HCFG_FRLISTEN_32 (2 << 24) +#define FRLISTEN_32_SIZE 32 +#define HCFG_FRLISTEN_64 (3 << 24) +#define FRLISTEN_64_SIZE 64 +#define HCFG_DESCDMA BIT(23) +#define HCFG_RESVALID_MASK (0xff << 8) +#define HCFG_RESVALID_SHIFT 8 +#define HCFG_ENA32KHZ BIT(7) +#define HCFG_FSLSSUPP BIT(2) +#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0) +#define HCFG_FSLSPCLKSEL_SHIFT 0 +#define HCFG_FSLSPCLKSEL_30_60_MHZ 0 +#define HCFG_FSLSPCLKSEL_48_MHZ 1 +#define HCFG_FSLSPCLKSEL_6_MHZ 2 -#define HFIR HSOTG_REG(0x0404) -#define HFIR_FRINT_MASK (0xffff << 0) -#define HFIR_FRINT_SHIFT 0 -#define HFIR_RLDCTRL BIT(16) +#define HFIR HSOTG_REG(0x0404) +#define HFIR_FRINT_MASK (0xffff << 0) +#define HFIR_FRINT_SHIFT 0 +#define HFIR_RLDCTRL BIT(16) -#define HFNUM HSOTG_REG(0x0408) -#define HFNUM_FRREM_MASK (0xffff << 16) -#define HFNUM_FRREM_SHIFT 16 -#define HFNUM_FRNUM_MASK (0xffff << 0) -#define HFNUM_FRNUM_SHIFT 0 -#define HFNUM_MAX_FRNUM 0x3fff +#define HFNUM HSOTG_REG(0x0408) +#define HFNUM_FRREM_MASK (0xffff << 16) +#define HFNUM_FRREM_SHIFT 16 +#define HFNUM_FRNUM_MASK (0xffff << 0) +#define HFNUM_FRNUM_SHIFT 0 +#define HFNUM_MAX_FRNUM 0x3fff -#define HPTXSTS HSOTG_REG(0x0410) -#define TXSTS_QTOP_ODD BIT(31) -#define TXSTS_QTOP_CHNEP_MASK (0xf << 27) -#define TXSTS_QTOP_CHNEP_SHIFT 27 -#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25) -#define TXSTS_QTOP_TOKEN_SHIFT 25 -#define TXSTS_QTOP_TERMINATE BIT(24) -#define TXSTS_QSPCAVAIL_MASK (0xff << 16) -#define TXSTS_QSPCAVAIL_SHIFT 16 -#define TXSTS_FSPCAVAIL_MASK (0xffff << 0) -#define TXSTS_FSPCAVAIL_SHIFT 0 +#define HPTXSTS HSOTG_REG(0x0410) +#define TXSTS_QTOP_ODD BIT(31) +#define TXSTS_QTOP_CHNEP_MASK (0xf << 27) +#define TXSTS_QTOP_CHNEP_SHIFT 27 +#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25) +#define TXSTS_QTOP_TOKEN_SHIFT 25 +#define TXSTS_QTOP_TERMINATE BIT(24) +#define TXSTS_QSPCAVAIL_MASK (0xff << 16) +#define TXSTS_QSPCAVAIL_SHIFT 16 +#define TXSTS_FSPCAVAIL_MASK (0xffff << 0) +#define TXSTS_FSPCAVAIL_SHIFT 0 -#define HAINT HSOTG_REG(0x0414) -#define HAINTMSK HSOTG_REG(0x0418) -#define HFLBADDR HSOTG_REG(0x041c) +#define HAINT HSOTG_REG(0x0414) +#define HAINTMSK HSOTG_REG(0x0418) +#define HFLBADDR HSOTG_REG(0x041c) -#define HPRT0 HSOTG_REG(0x0440) -#define HPRT0_SPD_MASK (0x3 << 17) -#define HPRT0_SPD_SHIFT 17 -#define HPRT0_SPD_HIGH_SPEED 0 -#define HPRT0_SPD_FULL_SPEED 1 -#define HPRT0_SPD_LOW_SPEED 2 -#define HPRT0_TSTCTL_MASK (0xf << 13) -#define HPRT0_TSTCTL_SHIFT 13 -#define HPRT0_PWR BIT(12) -#define HPRT0_LNSTS_MASK (0x3 << 10) -#define HPRT0_LNSTS_SHIFT 10 -#define HPRT0_RST BIT(8) -#define HPRT0_SUSP BIT(7) -#define HPRT0_RES BIT(6) -#define HPRT0_OVRCURRCHG BIT(5) -#define HPRT0_OVRCURRACT BIT(4) -#define HPRT0_ENACHG BIT(3) -#define HPRT0_ENA BIT(2) -#define HPRT0_CONNDET BIT(1) -#define HPRT0_CONNSTS BIT(0) +#define HPRT0 HSOTG_REG(0x0440) +#define HPRT0_SPD_MASK (0x3 << 17) +#define HPRT0_SPD_SHIFT 17 +#define HPRT0_SPD_HIGH_SPEED 0 +#define HPRT0_SPD_FULL_SPEED 1 +#define HPRT0_SPD_LOW_SPEED 2 +#define HPRT0_TSTCTL_MASK (0xf << 13) +#define HPRT0_TSTCTL_SHIFT 13 +#define HPRT0_PWR BIT(12) +#define HPRT0_LNSTS_MASK (0x3 << 10) +#define HPRT0_LNSTS_SHIFT 10 +#define HPRT0_RST BIT(8) +#define HPRT0_SUSP BIT(7) +#define HPRT0_RES BIT(6) +#define HPRT0_OVRCURRCHG BIT(5) +#define HPRT0_OVRCURRACT BIT(4) +#define HPRT0_ENACHG BIT(3) +#define HPRT0_ENA BIT(2) +#define HPRT0_CONNDET BIT(1) +#define HPRT0_CONNSTS BIT(0) -#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch)) -#define HCCHAR_CHENA BIT(31) -#define HCCHAR_CHDIS BIT(30) -#define HCCHAR_ODDFRM BIT(29) -#define HCCHAR_DEVADDR_MASK (0x7f << 22) -#define HCCHAR_DEVADDR_SHIFT 22 -#define HCCHAR_MULTICNT_MASK (0x3 << 20) -#define HCCHAR_MULTICNT_SHIFT 20 -#define HCCHAR_EPTYPE_MASK (0x3 << 18) -#define HCCHAR_EPTYPE_SHIFT 18 -#define HCCHAR_LSPDDEV BIT(17) -#define HCCHAR_EPDIR BIT(15) -#define HCCHAR_EPNUM_MASK (0xf << 11) -#define HCCHAR_EPNUM_SHIFT 11 -#define HCCHAR_MPS_MASK (0x7ff << 0) -#define HCCHAR_MPS_SHIFT 0 +#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch)) +#define HCCHAR_CHENA BIT(31) +#define HCCHAR_CHDIS BIT(30) +#define HCCHAR_ODDFRM BIT(29) +#define HCCHAR_DEVADDR_MASK (0x7f << 22) +#define HCCHAR_DEVADDR_SHIFT 22 +#define HCCHAR_MULTICNT_MASK (0x3 << 20) +#define HCCHAR_MULTICNT_SHIFT 20 +#define HCCHAR_EPTYPE_MASK (0x3 << 18) +#define HCCHAR_EPTYPE_SHIFT 18 +#define HCCHAR_LSPDDEV BIT(17) +#define HCCHAR_EPDIR BIT(15) +#define HCCHAR_EPNUM_MASK (0xf << 11) +#define HCCHAR_EPNUM_SHIFT 11 +#define HCCHAR_MPS_MASK (0x7ff << 0) +#define HCCHAR_MPS_SHIFT 0 -#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch)) -#define HCSPLT_SPLTENA BIT(31) -#define HCSPLT_COMPSPLT BIT(16) -#define HCSPLT_XACTPOS_MASK (0x3 << 14) -#define HCSPLT_XACTPOS_SHIFT 14 -#define HCSPLT_XACTPOS_MID 0 -#define HCSPLT_XACTPOS_END 1 -#define HCSPLT_XACTPOS_BEGIN 2 -#define HCSPLT_XACTPOS_ALL 3 -#define HCSPLT_HUBADDR_MASK (0x7f << 7) -#define HCSPLT_HUBADDR_SHIFT 7 -#define HCSPLT_PRTADDR_MASK (0x7f << 0) -#define HCSPLT_PRTADDR_SHIFT 0 +#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch)) +#define HCSPLT_SPLTENA BIT(31) +#define HCSPLT_COMPSPLT BIT(16) +#define HCSPLT_XACTPOS_MASK (0x3 << 14) +#define HCSPLT_XACTPOS_SHIFT 14 +#define HCSPLT_XACTPOS_MID 0 +#define HCSPLT_XACTPOS_END 1 +#define HCSPLT_XACTPOS_BEGIN 2 +#define HCSPLT_XACTPOS_ALL 3 +#define HCSPLT_HUBADDR_MASK (0x7f << 7) +#define HCSPLT_HUBADDR_SHIFT 7 +#define HCSPLT_PRTADDR_MASK (0x7f << 0) +#define HCSPLT_PRTADDR_SHIFT 0 -#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch)) -#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch)) -#define HCINTMSK_RESERVED14_31 (0x3ffff << 14) -#define HCINTMSK_FRM_LIST_ROLL BIT(13) -#define HCINTMSK_XCS_XACT BIT(12) -#define HCINTMSK_BNA BIT(11) -#define HCINTMSK_DATATGLERR BIT(10) -#define HCINTMSK_FRMOVRUN BIT(9) -#define HCINTMSK_BBLERR BIT(8) -#define HCINTMSK_XACTERR BIT(7) -#define HCINTMSK_NYET BIT(6) -#define HCINTMSK_ACK BIT(5) -#define HCINTMSK_NAK BIT(4) -#define HCINTMSK_STALL BIT(3) -#define HCINTMSK_AHBERR BIT(2) -#define HCINTMSK_CHHLTD BIT(1) -#define HCINTMSK_XFERCOMPL BIT(0) +#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch)) +#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch)) +#define HCINTMSK_RESERVED14_31 (0x3ffff << 14) +#define HCINTMSK_FRM_LIST_ROLL BIT(13) +#define HCINTMSK_XCS_XACT BIT(12) +#define HCINTMSK_BNA BIT(11) +#define HCINTMSK_DATATGLERR BIT(10) +#define HCINTMSK_FRMOVRUN BIT(9) +#define HCINTMSK_BBLERR BIT(8) +#define HCINTMSK_XACTERR BIT(7) +#define HCINTMSK_NYET BIT(6) +#define HCINTMSK_ACK BIT(5) +#define HCINTMSK_NAK BIT(4) +#define HCINTMSK_STALL BIT(3) +#define HCINTMSK_AHBERR BIT(2) +#define HCINTMSK_CHHLTD BIT(1) +#define HCINTMSK_XFERCOMPL BIT(0) -#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch)) -#define TSIZ_DOPNG BIT(31) -#define TSIZ_SC_MC_PID_MASK (0x3 << 29) -#define TSIZ_SC_MC_PID_SHIFT 29 -#define TSIZ_SC_MC_PID_DATA0 0 -#define TSIZ_SC_MC_PID_DATA2 1 -#define TSIZ_SC_MC_PID_DATA1 2 -#define TSIZ_SC_MC_PID_MDATA 3 -#define TSIZ_SC_MC_PID_SETUP 3 -#define TSIZ_PKTCNT_MASK (0x3ff << 19) -#define TSIZ_PKTCNT_SHIFT 19 -#define TSIZ_NTD_MASK (0xff << 8) -#define TSIZ_NTD_SHIFT 8 -#define TSIZ_SCHINFO_MASK (0xff << 0) -#define TSIZ_SCHINFO_SHIFT 0 -#define TSIZ_XFERSIZE_MASK (0x7ffff << 0) -#define TSIZ_XFERSIZE_SHIFT 0 +#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch)) +#define TSIZ_DOPNG BIT(31) +#define TSIZ_SC_MC_PID_MASK (0x3 << 29) +#define TSIZ_SC_MC_PID_SHIFT 29 +#define TSIZ_SC_MC_PID_DATA0 0 +#define TSIZ_SC_MC_PID_DATA2 1 +#define TSIZ_SC_MC_PID_DATA1 2 +#define TSIZ_SC_MC_PID_MDATA 3 +#define TSIZ_SC_MC_PID_SETUP 3 +#define TSIZ_PKTCNT_MASK (0x3ff << 19) +#define TSIZ_PKTCNT_SHIFT 19 +#define TSIZ_NTD_MASK (0xff << 8) +#define TSIZ_NTD_SHIFT 8 +#define TSIZ_SCHINFO_MASK (0xff << 0) +#define TSIZ_SCHINFO_SHIFT 0 +#define TSIZ_XFERSIZE_MASK (0x7ffff << 0) +#define TSIZ_XFERSIZE_SHIFT 0 -#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch)) +#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch)) -#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch)) +#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch)) -#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch)) +#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch)) /** * struct dwc2_dma_desc - DMA descriptor structure, @@ -836,64 +836,64 @@ * Status quadlet and Data buffer pointer. */ struct dwc2_dma_desc { - uint32_t status; - uint32_t buf; + uint32_t status; + uint32_t buf; } __packed; /* Host Mode DMA descriptor status quadlet */ -#define HOST_DMA_A BIT(31) -#define HOST_DMA_STS_MASK (0x3 << 28) -#define HOST_DMA_STS_SHIFT 28 -#define HOST_DMA_STS_PKTERR BIT(28) -#define HOST_DMA_EOL BIT(26) -#define HOST_DMA_IOC BIT(25) -#define HOST_DMA_SUP BIT(24) -#define HOST_DMA_ALT_QTD BIT(23) -#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17) -#define HOST_DMA_QTD_OFFSET_SHIFT 17 -#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0) -#define HOST_DMA_ISOC_NBYTES_SHIFT 0 -#define HOST_DMA_NBYTES_MASK (0x1ffff << 0) -#define HOST_DMA_NBYTES_SHIFT 0 -#define HOST_DMA_NBYTES_LIMIT 131071 +#define HOST_DMA_A BIT(31) +#define HOST_DMA_STS_MASK (0x3 << 28) +#define HOST_DMA_STS_SHIFT 28 +#define HOST_DMA_STS_PKTERR BIT(28) +#define HOST_DMA_EOL BIT(26) +#define HOST_DMA_IOC BIT(25) +#define HOST_DMA_SUP BIT(24) +#define HOST_DMA_ALT_QTD BIT(23) +#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17) +#define HOST_DMA_QTD_OFFSET_SHIFT 17 +#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0) +#define HOST_DMA_ISOC_NBYTES_SHIFT 0 +#define HOST_DMA_NBYTES_MASK (0x1ffff << 0) +#define HOST_DMA_NBYTES_SHIFT 0 +#define HOST_DMA_NBYTES_LIMIT 131071 /* Device Mode DMA descriptor status quadlet */ -#define DEV_DMA_BUFF_STS_MASK (0x3 << 30) -#define DEV_DMA_BUFF_STS_SHIFT 30 -#define DEV_DMA_BUFF_STS_HREADY 0 -#define DEV_DMA_BUFF_STS_DMABUSY 1 -#define DEV_DMA_BUFF_STS_DMADONE 2 -#define DEV_DMA_BUFF_STS_HBUSY 3 -#define DEV_DMA_STS_MASK (0x3 << 28) -#define DEV_DMA_STS_SHIFT 28 -#define DEV_DMA_STS_SUCC 0 -#define DEV_DMA_STS_BUFF_FLUSH 1 -#define DEV_DMA_STS_BUFF_ERR 3 -#define DEV_DMA_L BIT(27) -#define DEV_DMA_SHORT BIT(26) -#define DEV_DMA_IOC BIT(25) -#define DEV_DMA_SR BIT(24) -#define DEV_DMA_MTRF BIT(23) -#define DEV_DMA_ISOC_PID_MASK (0x3 << 23) -#define DEV_DMA_ISOC_PID_SHIFT 23 -#define DEV_DMA_ISOC_PID_DATA0 0 -#define DEV_DMA_ISOC_PID_DATA2 1 -#define DEV_DMA_ISOC_PID_DATA1 2 -#define DEV_DMA_ISOC_PID_MDATA 3 -#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12) -#define DEV_DMA_ISOC_FRNUM_SHIFT 12 -#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0) -#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff -#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0) -#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff -#define DEV_DMA_ISOC_NBYTES_SHIFT 0 -#define DEV_DMA_NBYTES_MASK (0xffff << 0) -#define DEV_DMA_NBYTES_SHIFT 0 -#define DEV_DMA_NBYTES_LIMIT 0xffff +#define DEV_DMA_BUFF_STS_MASK (0x3 << 30) +#define DEV_DMA_BUFF_STS_SHIFT 30 +#define DEV_DMA_BUFF_STS_HREADY 0 +#define DEV_DMA_BUFF_STS_DMABUSY 1 +#define DEV_DMA_BUFF_STS_DMADONE 2 +#define DEV_DMA_BUFF_STS_HBUSY 3 +#define DEV_DMA_STS_MASK (0x3 << 28) +#define DEV_DMA_STS_SHIFT 28 +#define DEV_DMA_STS_SUCC 0 +#define DEV_DMA_STS_BUFF_FLUSH 1 +#define DEV_DMA_STS_BUFF_ERR 3 +#define DEV_DMA_L BIT(27) +#define DEV_DMA_SHORT BIT(26) +#define DEV_DMA_IOC BIT(25) +#define DEV_DMA_SR BIT(24) +#define DEV_DMA_MTRF BIT(23) +#define DEV_DMA_ISOC_PID_MASK (0x3 << 23) +#define DEV_DMA_ISOC_PID_SHIFT 23 +#define DEV_DMA_ISOC_PID_DATA0 0 +#define DEV_DMA_ISOC_PID_DATA2 1 +#define DEV_DMA_ISOC_PID_DATA1 2 +#define DEV_DMA_ISOC_PID_MDATA 3 +#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12) +#define DEV_DMA_ISOC_FRNUM_SHIFT 12 +#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0) +#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff +#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0) +#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff +#define DEV_DMA_ISOC_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_MASK (0xffff << 0) +#define DEV_DMA_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_LIMIT 0xffff -#define MAX_DMA_DESC_NUM_GENERIC 64 -#define MAX_DMA_DESC_NUM_HS_ISOC 256 +#define MAX_DMA_DESC_NUM_GENERIC 64 +#define MAX_DMA_DESC_NUM_HS_ISOC 256 -#endif /* __DWC2_HW_H__ */ +#endif /* DWC2_REGS_H */ diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h index c874b9f292..f30a26f7f4 100644 --- a/include/hw/usb/hcd-musb.h +++ b/include/hw/usb/hcd-musb.h @@ -10,8 +10,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef HW_USB_MUSB_H -#define HW_USB_MUSB_H +#ifndef HW_USB_HCD_MUSB_H +#define HW_USB_HCD_MUSB_H enum musb_irq_source_e { musb_irq_suspend = 0, diff --git a/include/hw/usb/msd.h b/include/hw/usb/msd.h index 7538c54569..f9fd862b52 100644 --- a/include/hw/usb/msd.h +++ b/include/hw/usb/msd.h @@ -17,7 +17,7 @@ enum USBMSDMode { USB_MSDM_CSW /* Command Status. */ }; -struct usb_msd_csw { +struct QEMU_PACKED usb_msd_csw { uint32_t sig; uint32_t tag; uint32_t residue; @@ -40,6 +40,7 @@ struct MSDState { bool removable; bool commandlog; SCSIDevice *scsi_dev; + bool needs_reset; }; typedef struct MSDState MSDState; diff --git a/include/hw/usb/xlnx-usb-subsystem.h b/include/hw/usb/xlnx-usb-subsystem.h index 999e423951..5b730abd84 100644 --- a/include/hw/usb/xlnx-usb-subsystem.h +++ b/include/hw/usb/xlnx-usb-subsystem.h @@ -22,8 +22,8 @@ * THE SOFTWARE. */ -#ifndef XLNX_VERSAL_USB_SUBSYSTEM_H -#define XLNX_VERSAL_USB_SUBSYSTEM_H +#ifndef XLNX_USB_SUBSYSTEM_H +#define XLNX_USB_SUBSYSTEM_H #include "hw/usb/xlnx-versal-usb2-ctrl-regs.h" #include "hw/usb/hcd-dwc3.h" diff --git a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h index b76dce0419..633bf3013a 100644 --- a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h +++ b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h @@ -23,8 +23,8 @@ * THE SOFTWARE. */ -#ifndef XLNX_USB2_REGS_H -#define XLNX_USB2_REGS_H +#ifndef XLNX_VERSAL_USB2_CTRL_REGS_H +#define XLNX_VERSAL_USB2_CTRL_REGS_H #define TYPE_XILINX_VERSAL_USB2_CTRL_REGS "xlnx.versal-usb2-ctrl-regs" diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 8af11b0a76..e573f5a9f1 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -98,7 +98,7 @@ typedef struct VFIOContainer { typedef struct VFIOGuestIOMMU { VFIOContainer *container; - IOMMUMemoryRegion *iommu; + IOMMUMemoryRegion *iommu_mr; hwaddr iommu_offset; IOMMUNotifier n; QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 8475c5a29d..eab46d7f0b 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -69,6 +69,8 @@ typedef int (*vhost_set_vring_kick_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev, struct vhost_vring_file *file); +typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev, + struct vhost_vring_file *file); typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev, struct vhost_vring_state *r); typedef int (*vhost_set_features_op)(struct vhost_dev *dev, @@ -145,6 +147,7 @@ typedef struct VhostOps { vhost_get_vring_base_op vhost_get_vring_base; vhost_set_vring_kick_op vhost_set_vring_kick; vhost_set_vring_call_op vhost_set_vring_call; + vhost_set_vring_err_op vhost_set_vring_err; vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout; vhost_set_features_op vhost_set_features; vhost_get_features_op vhost_get_features; @@ -173,12 +176,6 @@ typedef struct VhostOps { vhost_force_iommu_op vhost_force_iommu; } VhostOps; -extern const VhostOps user_ops; -extern const VhostOps vdpa_ops; - -int vhost_set_backend_type(struct vhost_dev *dev, - VhostBackendType backend_type); - int vhost_backend_update_device_iotlb(struct vhost_dev *dev, uint64_t iova, uint64_t uaddr, uint64_t len, diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h index 7c91f15040..ea085ee1ed 100644 --- a/include/hw/virtio/vhost-user-blk.h +++ b/include/hw/virtio/vhost-user-blk.h @@ -34,7 +34,6 @@ struct VHostUserBlk { struct virtio_blk_config blkcfg; uint16_t num_queues; uint32_t queue_size; - uint32_t config_wce; struct vhost_dev dev; struct vhost_inflight *inflight; VhostUserState vhost_user; diff --git a/include/hw/virtio/vhost-user-fs.h b/include/hw/virtio/vhost-user-fs.h index 0d62834c25..94c3aaa84e 100644 --- a/include/hw/virtio/vhost-user-fs.h +++ b/include/hw/virtio/vhost-user-fs.h @@ -11,8 +11,8 @@ * top-level directory. */ -#ifndef _QEMU_VHOST_USER_FS_H -#define _QEMU_VHOST_USER_FS_H +#ifndef QEMU_VHOST_USER_FS_H +#define QEMU_VHOST_USER_FS_H #include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" @@ -44,4 +44,4 @@ struct VHostUserFS { /*< public >*/ }; -#endif /* _QEMU_VHOST_USER_FS_H */ +#endif /* QEMU_VHOST_USER_FS_H */ diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h new file mode 100644 index 0000000000..a9d3f9b049 --- /dev/null +++ b/include/hw/virtio/vhost-user-gpio.h @@ -0,0 +1,45 @@ +/* + * Vhost-user GPIO virtio device + * + * Copyright (c) 2021 Viresh Kumar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _QEMU_VHOST_USER_GPIO_H +#define _QEMU_VHOST_USER_GPIO_H + +#include "hw/virtio/virtio.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "standard-headers/linux/virtio_gpio.h" +#include "chardev/char-fe.h" + +#define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device" +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserGPIO, VHOST_USER_GPIO); + +struct VHostUserGPIO { + /*< private >*/ + VirtIODevice parent_obj; + CharBackend chardev; + struct virtio_gpio_config config; + struct vhost_virtqueue *vhost_vqs; + struct vhost_dev vhost_dev; + VhostUserState vhost_user; + VirtQueue *command_vq; + VirtQueue *interrupt_vq; + /** + * There are at least two steps of initialization of the + * vhost-user device. The first is a "connect" step and + * second is a "start" step. Make a separation between + * those initialization phases by using two fields. + * + * @connected: see vu_gpio_connect()/vu_gpio_disconnect() + * @started_vu: see vu_gpio_start()/vu_gpio_stop() + */ + bool connected; + bool started_vu; + /*< public >*/ +}; + +#endif /* _QEMU_VHOST_USER_GPIO_H */ diff --git a/include/hw/virtio/vhost-user-i2c.h b/include/hw/virtio/vhost-user-i2c.h index deae47a76d..0f7acd40e3 100644 --- a/include/hw/virtio/vhost-user-i2c.h +++ b/include/hw/virtio/vhost-user-i2c.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _QEMU_VHOST_USER_I2C_H -#define _QEMU_VHOST_USER_I2C_H +#ifndef QEMU_VHOST_USER_I2C_H +#define QEMU_VHOST_USER_I2C_H #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" @@ -25,4 +25,7 @@ struct VHostUserI2C { bool connected; }; -#endif /* _QEMU_VHOST_USER_I2C_H */ +/* Virtio Feature bits */ +#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0 + +#endif /* QEMU_VHOST_USER_I2C_H */ diff --git a/include/hw/virtio/vhost-user-rng.h b/include/hw/virtio/vhost-user-rng.h new file mode 100644 index 0000000000..ddd9f01eea --- /dev/null +++ b/include/hw/virtio/vhost-user-rng.h @@ -0,0 +1,33 @@ +/* + * Vhost-user RNG virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_VHOST_USER_RNG_H +#define QEMU_VHOST_USER_RNG_H + +#include "hw/virtio/virtio.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "chardev/char-fe.h" + +#define TYPE_VHOST_USER_RNG "vhost-user-rng" +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG) + +struct VHostUserRNG { + /*< private >*/ + VirtIODevice parent; + CharBackend chardev; + struct vhost_virtqueue *vhost_vq; + struct vhost_dev vhost_dev; + VhostUserState vhost_user; + VirtQueue *req_vq; + bool connected; + + /*< public >*/ +}; + +#endif /* QEMU_VHOST_USER_RNG_H */ diff --git a/include/hw/virtio/vhost-user-vsock.h b/include/hw/virtio/vhost-user-vsock.h index 4cfd558245..67aa46c952 100644 --- a/include/hw/virtio/vhost-user-vsock.h +++ b/include/hw/virtio/vhost-user-vsock.h @@ -8,8 +8,8 @@ * top-level directory. */ -#ifndef _QEMU_VHOST_USER_VSOCK_H -#define _QEMU_VHOST_USER_VSOCK_H +#ifndef QEMU_VHOST_USER_VSOCK_H +#define QEMU_VHOST_USER_VSOCK_H #include "hw/virtio/vhost-vsock-common.h" #include "hw/virtio/vhost-user.h" @@ -33,4 +33,4 @@ struct VHostUserVSock { /*< public >*/ }; -#endif /* _QEMU_VHOST_USER_VSOCK_H */ +#endif /* QEMU_VHOST_USER_VSOCK_H */ diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index a9abca3288..191216a74f 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -11,19 +11,79 @@ #include "chardev/char-fe.h" #include "hw/virtio/virtio.h" +/** + * VhostUserHostNotifier - notifier information for one queue + * @rcu: rcu_head for cleanup + * @mr: memory region of notifier + * @addr: current mapped address + * @unmap_addr: address to be un-mapped + * @idx: virtioqueue index + * + * The VhostUserHostNotifier entries are re-used. When an old mapping + * is to be released it is moved to @unmap_addr and @addr is replaced. + * Once the RCU process has completed the unmap @unmap_addr is + * cleared. + */ typedef struct VhostUserHostNotifier { + struct rcu_head rcu; MemoryRegion mr; void *addr; - bool set; + void *unmap_addr; + int idx; } VhostUserHostNotifier; +/** + * VhostUserState - shared state for all vhost-user devices + * @chr: the character backend for the socket + * @notifiers: GPtrArray of @VhostUserHostnotifier + * @memory_slots: + */ typedef struct VhostUserState { CharBackend *chr; - VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX]; + GPtrArray *notifiers; int memory_slots; + bool supports_config; } VhostUserState; +/** + * vhost_user_init() - initialise shared vhost_user state + * @user: allocated area for storing shared state + * @chr: the chardev for the vhost socket + * @errp: error handle + * + * User can either directly g_new() space for the state or embed + * VhostUserState in their larger device structure and just point to + * it. + * + * Return: true on success, false on error while setting errp. + */ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp); + +/** + * vhost_user_cleanup() - cleanup state + * @user: ptr to use state + * + * Cleans up shared state and notifiers, callee is responsible for + * freeing the @VhostUserState memory itself. + */ void vhost_user_cleanup(VhostUserState *user); +/** + * vhost_user_async_close() - cleanup vhost-user post connection drop + * @d: DeviceState for the associated device (passed to callback) + * @chardev: the CharBackend associated with the connection + * @vhost: the common vhost device + * @cb: the user callback function to complete the clean-up + * + * This function is used to handle the shutdown of a vhost-user + * connection to a backend. We handle this centrally to make sure we + * do all the steps and handle potential races due to VM shutdowns. + * Once the connection is disabled we call a backhalf to ensure + */ +typedef void (*vu_async_close_fn)(DeviceState *cb); + +void vhost_user_async_close(DeviceState *d, + CharBackend *chardev, struct vhost_dev *vhost, + vu_async_close_fn cb); + #endif diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 9188226d8b..1111d85643 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -12,7 +12,12 @@ #ifndef HW_VIRTIO_VHOST_VDPA_H #define HW_VIRTIO_VHOST_VDPA_H +#include + +#include "hw/virtio/vhost-iova-tree.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/virtio.h" +#include "standard-headers/linux/vhost_types.h" typedef struct VhostVDPAHostNotifier { MemoryRegion mr; @@ -21,10 +26,24 @@ typedef struct VhostVDPAHostNotifier { typedef struct vhost_vdpa { int device_fd; + int index; uint32_t msg_type; + bool iotlb_batch_begin_sent; MemoryListener listener; + struct vhost_vdpa_iova_range iova_range; + uint64_t acked_features; + bool shadow_vqs_enabled; + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; + GPtrArray *shadow_vqs; + const VhostShadowVirtqueueOps *shadow_vq_ops; + void *shadow_vq_ops_opaque; struct vhost_dev *dev; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; } VhostVDPA; +int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly); +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size); + #endif diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h index e412b5ee98..93c782101d 100644 --- a/include/hw/virtio/vhost-vsock-common.h +++ b/include/hw/virtio/vhost-vsock-common.h @@ -8,8 +8,8 @@ * top-level directory. */ -#ifndef _QEMU_VHOST_VSOCK_COMMON_H -#define _QEMU_VHOST_VSOCK_COMMON_H +#ifndef QEMU_VHOST_VSOCK_COMMON_H +#define QEMU_VHOST_VSOCK_COMMON_H #include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" @@ -35,13 +35,18 @@ struct VHostVSockCommon { VirtQueue *trans_vq; QEMUTimer *post_load_timer; + + /* features */ + OnOffAuto seqpacket; }; int vhost_vsock_common_start(VirtIODevice *vdev); void vhost_vsock_common_stop(VirtIODevice *vdev); int vhost_vsock_common_pre_save(void *opaque); int vhost_vsock_common_post_load(void *opaque, int version_id); -void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name); +void vhost_vsock_common_realize(VirtIODevice *vdev); void vhost_vsock_common_unrealize(VirtIODevice *vdev); +uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp); -#endif /* _QEMU_VHOST_VSOCK_COMMON_H */ +#endif /* QEMU_VHOST_VSOCK_COMMON_H */ diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 045d0fd9f2..67a6807fac 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -5,6 +5,9 @@ #include "hw/virtio/virtio.h" #include "exec/memory.h" +#define VHOST_F_DEVICE_IOTLB 63 +#define VHOST_USER_F_PROTOCOL_FEATURES 30 + /* Generic structures common for any vhost based device. */ struct vhost_inflight { @@ -29,6 +32,7 @@ struct vhost_virtqueue { unsigned long long used_phys; unsigned used_size; EventNotifier masked_notifier; + EventNotifier error_notifier; struct vhost_dev *dev; }; @@ -61,6 +65,12 @@ typedef struct VhostDevConfigOps { } VhostDevConfigOps; struct vhost_memory; + +/** + * struct vhost_dev - common vhost_dev structure + * @vhost_ops: backend specific ops + * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier) + */ struct vhost_dev { VirtIODevice *vdev; MemoryListener memory_listener; @@ -71,17 +81,23 @@ struct vhost_dev { int n_tmp_sections; MemoryRegionSection *tmp_sections; struct vhost_virtqueue *vqs; - int nvqs; + unsigned int nvqs; /* the first virtqueue which would be used by this vhost dev */ int vq_index; + /* one past the last vq index for the virtio device (not vhost) */ + int vq_index_end; /* if non-zero, minimum required value for max_queues */ int num_queues; uint64_t features; + /** @acked_features: final set of negotiated features */ uint64_t acked_features; + /** @backend_features: backend specific feature bits */ uint64_t backend_features; + /** @protocol_features: final negotiated protocol features */ uint64_t protocol_features; uint64_t max_queues; uint64_t backend_cap; + /* @started: is the vhost device started? */ bool started; bool log_enabled; uint64_t log_size; @@ -95,6 +111,10 @@ struct vhost_dev { const VhostDevConfigOps *config_ops; }; +extern const VhostOps kernel_ops; +extern const VhostOps user_ops; +extern const VhostOps vdpa_ops; + struct vhost_net { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; @@ -102,15 +122,142 @@ struct vhost_net { NetClientState *nc; }; +/** + * vhost_dev_init() - initialise the vhost interface + * @hdev: the common vhost_dev structure + * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa) + * @backend_type: type of backend + * @busyloop_timeout: timeout for polling virtqueue + * @errp: error handle + * + * The initialisation of the vhost device will trigger the + * initialisation of the backend and potentially capability + * negotiation of backend interface. Configuration of the VirtIO + * itself won't happen until the interface is started. + * + * Return: 0 on success, non-zero on error while setting errp. + */ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout, Error **errp); + +/** + * vhost_dev_cleanup() - tear down and cleanup vhost interface + * @hdev: the common vhost_dev structure + */ void vhost_dev_cleanup(struct vhost_dev *hdev); -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev); -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev); + +/** + * vhost_dev_enable_notifiers() - enable event notifiers + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * + * Enable notifications directly to the vhost device rather than being + * triggered by QEMU itself. Notifications should be enabled before + * the vhost device is started via @vhost_dev_start. + * + * Return: 0 on success, < 0 on error. + */ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); + +/** + * vhost_dev_disable_notifiers - disable event notifications + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * + * Disable direct notifications to vhost device. + */ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); +/** + * vhost_dev_is_started() - report status of vhost device + * @hdev: common vhost_dev structure + * + * Return the started status of the vhost device + */ +static inline bool vhost_dev_is_started(struct vhost_dev *hdev) +{ + return hdev->started; +} + +/** + * vhost_dev_start() - start the vhost device + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * @vrings: true to have vrings enabled in this call + * + * Starts the vhost device. From this point VirtIO feature negotiation + * can start and the device can start processing VirtIO transactions. + * + * Return: 0 on success, < 0 on error. + */ +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + +/** + * vhost_dev_stop() - stop the vhost device + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * @vrings: true to have vrings disabled in this call + * + * Stop the vhost device. After the device is stopped the notifiers + * can be disabled (@vhost_dev_disable_notifiers) and the device can + * be torn down (@vhost_dev_cleanup). + */ +void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + +/** + * DOC: vhost device configuration handling + * + * The VirtIO device configuration space is used for rarely changing + * or initialisation time parameters. The configuration can be updated + * by either the guest driver or the device itself. If the device can + * change the configuration over time the vhost handler should + * register a @VhostDevConfigOps structure with + * @vhost_dev_set_config_notifier so the guest can be notified. Some + * devices register a handler anyway and will signal an error if an + * unexpected config change happens. + */ + +/** + * vhost_dev_get_config() - fetch device configuration + * @hdev: common vhost_dev_structure + * @config: pointer to device appropriate config structure + * @config_len: size of device appropriate config structure + * + * Return: 0 on success, < 0 on error while setting errp + */ +int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, + uint32_t config_len, Error **errp); + +/** + * vhost_dev_set_config() - set device configuration + * @hdev: common vhost_dev_structure + * @data: pointer to data to set + * @offset: offset into configuration space + * @size: length of set + * @flags: @VhostSetConfigType flags + * + * By use of @offset/@size a subset of the configuration space can be + * written to. The @flags are used to indicate if it is a normal + * transaction or related to migration. + * + * Return: 0 on success, non-zero on error + */ +int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags); + +/** + * vhost_dev_set_config_notifier() - register VhostDevConfigOps + * @hdev: common vhost_dev_structure + * @ops: notifier ops + * + * If the device is expected to change configuration a notifier can be + * setup to handle the case. + */ +void vhost_dev_set_config_notifier(struct vhost_dev *dev, + const VhostDevConfigOps *ops); + + /* Test and clear masked event pending status. * Should be called after unmask to avoid losing events. */ @@ -120,8 +267,29 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n); */ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, bool mask); + +/** + * vhost_get_features() - return a sanitised set of feature bits + * @hdev: common vhost_dev structure + * @feature_bits: pointer to terminated table of feature bits + * @features: original feature set + * + * This returns a set of features bits that is an intersection of what + * is supported by the vhost backend (hdev->features), the supported + * feature_bits and the requested feature set. + */ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, uint64_t features); + +/** + * vhost_ack_features() - set vhost acked_features + * @hdev: common vhost_dev structure + * @feature_bits: pointer to terminated table of feature bits + * @features: requested feature set + * + * This sets the internal hdev->acked_features to the intersection of + * the backends advertised features and the supported feature_bits. + */ void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, uint64_t features); bool vhost_has_free_slot(void); @@ -130,14 +298,11 @@ int vhost_net_set_backend(struct vhost_dev *hdev, struct vhost_vring_file *file); int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); -int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, - uint32_t config_len, Error **errp); -int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, - uint32_t offset, uint32_t size, uint32_t flags); -/* notifier callback in case vhost device config space changed - */ -void vhost_dev_set_config_notifier(struct vhost_dev *dev, - const VhostDevConfigOps *ops); + +int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, unsigned idx); +void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, unsigned idx); void vhost_dev_reset_inflight(struct vhost_inflight *inflight); void vhost_dev_free_inflight(struct vhost_inflight *inflight); diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h index 6818a23a2d..07aae69042 100644 --- a/include/hw/virtio/virtio-access.h +++ b/include/hw/virtio/virtio-access.h @@ -28,7 +28,7 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev) { #if defined(LEGACY_VIRTIO_IS_BIENDIAN) return virtio_is_big_endian(vdev); -#elif defined(TARGET_WORDS_BIGENDIAN) +#elif TARGET_BIG_ENDIAN if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { /* Devices conforming to VIRTIO 1.0 or later are always LE. */ return false; @@ -149,7 +149,7 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr) static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return virtio_access_is_big_endian(vdev) ? s : bswap16(s); #else return virtio_access_is_big_endian(vdev) ? bswap16(s) : s; @@ -215,7 +215,7 @@ static inline void virtio_tswap16s(VirtIODevice *vdev, uint16_t *s) static inline uint32_t virtio_tswap32(VirtIODevice *vdev, uint32_t s) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return virtio_access_is_big_endian(vdev) ? s : bswap32(s); #else return virtio_access_is_big_endian(vdev) ? bswap32(s) : s; @@ -229,7 +229,7 @@ static inline void virtio_tswap32s(VirtIODevice *vdev, uint32_t *s) static inline uint64_t virtio_tswap64(VirtIODevice *vdev, uint64_t s) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return virtio_access_is_big_endian(vdev) ? s : bswap64(s); #else return virtio_access_is_big_endian(vdev) ? bswap64(s) : s; diff --git a/include/hw/virtio/virtio-blk-common.h b/include/hw/virtio/virtio-blk-common.h new file mode 100644 index 0000000000..31daada3e3 --- /dev/null +++ b/include/hw/virtio/virtio-blk-common.h @@ -0,0 +1,20 @@ +/* + * Virtio Block Device common helpers + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef VIRTIO_BLK_COMMON_H +#define VIRTIO_BLK_COMMON_H + +#include "hw/virtio/virtio.h" + +extern const VirtIOConfigSizeParams virtio_blk_cfg_size_params; + +#endif diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index 29655a406d..7f589b4146 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -19,6 +19,7 @@ #include "hw/block/block.h" #include "sysemu/iothread.h" #include "sysemu/block-backend.h" +#include "sysemu/block-ram-registrar.h" #include "qom/object.h" #define TYPE_VIRTIO_BLK "virtio-blk-device" @@ -64,6 +65,7 @@ struct VirtIOBlock { struct VirtIOBlockDataPlane *dataplane; uint64_t host_features; size_t config_size; + BlockRAMRegistrar blk_ram_registrar; }; typedef struct VirtIOBlockReq { @@ -90,7 +92,7 @@ typedef struct MultiReqBuffer { bool is_write; } MultiReqBuffer; -bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); +void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh); #endif diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h index ef8abe49c5..7ab8c9dab0 100644 --- a/include/hw/virtio/virtio-bus.h +++ b/include/hw/virtio/virtio-bus.h @@ -93,6 +93,7 @@ struct VirtioBusClass { */ bool has_variable_vring_alignment; AddressSpace *(*get_dma_as)(DeviceState *d); + bool (*iommu_enabled)(DeviceState *d); }; struct VirtioBusState { @@ -154,5 +155,6 @@ void virtio_bus_release_ioeventfd(VirtioBusState *bus); int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign); /* Tell the bus that the ioeventfd handler is no longer required. */ void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n); - +/* Whether the IOMMU is enabled for this device */ +bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev); #endif /* VIRTIO_BUS_H */ diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h index a2228d7b2e..348749f5d5 100644 --- a/include/hw/virtio/virtio-crypto.h +++ b/include/hw/virtio/virtio-crypto.h @@ -50,6 +50,7 @@ typedef struct VirtIOCryptoConf { uint32_t mac_algo_l; uint32_t mac_algo_h; uint32_t aead_algo; + uint32_t akcipher_algo; /* Maximum length of cipher key */ uint32_t max_cipher_key_len; @@ -71,9 +72,7 @@ typedef struct VirtIOCryptoReq { size_t in_len; VirtQueue *vq; struct VirtIOCrypto *vcrypto; - union { - CryptoDevBackendSymOpInfo *sym_op_info; - } u; + CryptoDevBackendOpInfo op_info; } VirtIOCryptoReq; typedef struct VirtIOCryptoQueue { diff --git a/include/hw/virtio/virtio-gpu-bswap.h b/include/hw/virtio/virtio-gpu-bswap.h index e2bee8f595..9124108485 100644 --- a/include/hw/virtio/virtio-gpu-bswap.h +++ b/include/hw/virtio/virtio-gpu-bswap.h @@ -24,13 +24,12 @@ virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) le32_to_cpus(&hdr->flags); le64_to_cpus(&hdr->fence_id); le32_to_cpus(&hdr->ctx_id); - le32_to_cpus(&hdr->padding); } static inline void virtio_gpu_bswap_32(void *ptr, size_t size) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN size_t i; struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 24c6628944..2e28507efe 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -22,6 +22,7 @@ #include "sysemu/vhost-user-backend.h" #include "standard-headers/linux/virtio_gpu.h" +#include "standard-headers/linux/virtio_ids.h" #include "qom/object.h" #define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base" @@ -37,8 +38,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL) #define TYPE_VHOST_USER_GPU "vhost-user-gpu" OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU) -#define VIRTIO_ID_GPU 16 - struct virtio_gpu_simple_resource { uint32_t resource_id; uint32_t width; @@ -81,6 +80,7 @@ struct virtio_gpu_scanout { struct virtio_gpu_requested_state { uint16_t width_mm, height_mm; uint32_t width, height; + uint32_t refresh_rate; int x, y; }; @@ -147,8 +147,8 @@ struct VirtIOGPUBaseClass { DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \ DEFINE_PROP_BIT("edid", _state, _conf.flags, \ VIRTIO_GPU_FLAG_EDID_ENABLED, true), \ - DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \ - DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768) + DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \ + DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800) typedef struct VGPUDMABuf { QemuDmaBuf buf; @@ -187,7 +187,7 @@ struct VirtIOGPU { struct { QTAILQ_HEAD(, VGPUDMABuf) bufs; - VGPUDMABuf *primary; + VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS]; } dmabuf; }; @@ -273,7 +273,8 @@ void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res); int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb); + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r); /* virtio-gpu-3d.c */ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h index 273e35c04b..2ad5ee320b 100644 --- a/include/hw/virtio/virtio-iommu.h +++ b/include/hw/virtio/virtio-iommu.h @@ -26,7 +26,7 @@ #include "qom/object.h" #define TYPE_VIRTIO_IOMMU "virtio-iommu-device" -#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-device-base" +#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOIOMMU, VIRTIO_IOMMU) #define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region" @@ -37,6 +37,8 @@ typedef struct IOMMUDevice { int devfn; IOMMUMemoryRegion iommu_mr; AddressSpace as; + MemoryRegion root; /* The root container of the device */ + MemoryRegion bypass_mr; /* The alias of shared memory MR */ } IOMMUDevice; typedef struct IOMMUPciBus { @@ -56,8 +58,9 @@ struct VirtIOIOMMU { ReservedRegion *reserved_regions; uint32_t nb_reserved_regions; GTree *domains; - QemuMutex mutex; + QemuRecMutex mutex; GTree *endpoints; + bool boot_bypass; }; #endif diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h index 9a6e348fa2..7745cfc1a3 100644 --- a/include/hw/virtio/virtio-mem.h +++ b/include/hw/virtio/virtio-mem.h @@ -30,6 +30,8 @@ OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass, #define VIRTIO_MEM_REQUESTED_SIZE_PROP "requested-size" #define VIRTIO_MEM_BLOCK_SIZE_PROP "block-size" #define VIRTIO_MEM_ADDR_PROP "memaddr" +#define VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "unplugged-inaccessible" +#define VIRTIO_MEM_PREALLOC_PROP "prealloc" struct VirtIOMEM { VirtIODevice parent_obj; @@ -62,12 +64,19 @@ struct VirtIOMEM { /* block size and alignment */ uint64_t block_size; + /* + * Whether we indicate VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE to the guest. + * For !x86 targets this will always be "on" and consequently indicate + * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. + */ + OnOffAuto unplugged_inaccessible; + + /* whether to prealloc memory when plugging new blocks */ + bool prealloc; + /* notifiers to notify when "size" changes */ NotifierList size_change_notifiers; - /* don't migrate unplugged memory */ - NotifierWithReturn precopy_notifier; - /* listeners to notify on plug/unplug activity. */ QLIST_HEAD(, RamDiscardListener) rdl_list; }; diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 824a69c23f..ef234ffe7e 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -35,6 +35,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET) * and latency. */ #define TX_BURST 256 +/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */ +#define MAC_TABLE_ENTRIES 64 + typedef struct virtio_net_conf { uint32_t txtimer; @@ -194,8 +197,9 @@ struct VirtIONet { NICConf nic_conf; DeviceState *qdev; int multiqueue; - uint16_t max_queues; - uint16_t curr_queues; + uint16_t max_queue_pairs; + uint16_t curr_queue_pairs; + uint16_t max_ncs; size_t config_size; char *netclient_name; char *netclient_type; @@ -209,12 +213,18 @@ struct VirtIONet { bool failover_primary_hidden; bool failover; DeviceListener primary_listener; + QDict *primary_opts; + bool primary_opts_from_json; Notifier migration_state; VirtioNetRssData rss_data; struct NetRxPkt *rx_pkt; struct EBPFRSSContext ebpf_rss; }; +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num); void virtio_net_set_netclient_name(VirtIONet *n, const char *name, const char *type); diff --git a/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h similarity index 98% rename from hw/virtio/virtio-pci.h rename to include/hw/virtio/virtio-pci.h index 2446dcd9ae..938799e8f6 100644 --- a/hw/virtio/virtio-pci.h +++ b/include/hw/virtio/virtio-pci.h @@ -117,6 +117,11 @@ typedef struct VirtIOPCIRegion { typedef struct VirtIOPCIQueue { uint16_t num; bool enabled; + /* + * No need to migrate the reset status, because it is always 0 + * when the migration starts. + */ + bool reset; uint32_t desc[2]; uint32_t avail[2]; uint32_t used[2]; diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h index 543681bc18..a36aad9c86 100644 --- a/include/hw/virtio/virtio-scsi.h +++ b/include/hw/virtio/virtio-scsi.h @@ -55,10 +55,8 @@ struct VirtIOSCSIConf { bool seg_max_adjust; uint32_t max_sectors; uint32_t cmd_per_lun; -#ifdef CONFIG_VHOST_SCSI char *vhostfd; char *wwpn; -#endif CharBackend chardev; uint32_t boot_tpgt; IOThread *iothread; @@ -94,42 +92,6 @@ struct VirtIOSCSI { uint32_t host_features; }; -typedef struct VirtIOSCSIReq { - /* Note: - * - fields up to resp_iov are initialized by virtio_scsi_init_req; - * - fields starting at vring are zeroed by virtio_scsi_init_req. - * */ - VirtQueueElement elem; - - VirtIOSCSI *dev; - VirtQueue *vq; - QEMUSGList qsgl; - QEMUIOVector resp_iov; - - union { - /* Used for two-stage request submission */ - QTAILQ_ENTRY(VirtIOSCSIReq) next; - - /* Used for cancellation of request during TMFs */ - int remaining; - }; - - SCSIRequest *sreq; - size_t resp_size; - enum SCSIXferMode mode; - union { - VirtIOSCSICmdResp cmd; - VirtIOSCSICtrlTMFResp tmf; - VirtIOSCSICtrlANResp an; - VirtIOSCSIEvent event; - } resp; - union { - VirtIOSCSICmdReq cmd; - VirtIOSCSICtrlTMFReq tmf; - VirtIOSCSICtrlANReq an; - } req; -} VirtIOSCSIReq; - static inline void virtio_scsi_acquire(VirtIOSCSI *s) { if (s->ctx) { @@ -151,13 +113,6 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp); void virtio_scsi_common_unrealize(DeviceState *dev); -bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq); -bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq); -bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq); -void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req); -void virtio_scsi_free_req(VirtIOSCSIReq *req); -void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, - uint32_t event, uint32_t reason); void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp); int virtio_scsi_dataplane_start(VirtIODevice *s); diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 8bab9cfb75..acfd4df125 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -22,8 +22,14 @@ #include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_ring.h" #include "qom/object.h" +#include "hw/virtio/vhost.h" -/* A guest should never accept this. It implies negotiation is broken. */ +/* + * A guest should never accept this. It implies negotiation is broken + * between the driver frontend and the device. This bit is re-used for + * vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU + * and a vhost-user backend. + */ #define VIRTIO_F_BAD_FEATURE 30 #define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \ @@ -43,8 +49,14 @@ typedef struct VirtIOFeature { size_t end; } VirtIOFeature; -size_t virtio_feature_get_config_size(const VirtIOFeature *features, - uint64_t host_features); +typedef struct VirtIOConfigSizeParams { + size_t min_size; + size_t max_size; + const VirtIOFeature *feature_sizes; +} VirtIOConfigSizeParams; + +size_t virtio_get_config_size(const VirtIOConfigSizeParams *params, + uint64_t host_features); typedef struct VirtQueue VirtQueue; @@ -70,6 +82,11 @@ typedef struct VirtQueueElement #define TYPE_VIRTIO_DEVICE "virtio-device" OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE) +typedef struct { + int virtio_bit; + const char *feature_desc; +} qmp_virtio_feature_map_t; + enum virtio_device_endian { VIRTIO_DEVICE_ENDIAN_UNKNOWN, VIRTIO_DEVICE_ENDIAN_LITTLE, @@ -94,20 +111,30 @@ struct VirtIODevice VirtQueue *vq; MemoryListener listener; uint16_t device_id; + /* @vm_running: current VM running state via virtio_vmstate_change() */ bool vm_running; bool broken; /* device in invalid state, needs reset */ bool use_disabled_flag; /* allow use of 'disable' flag when needed */ bool disabled; /* device in temporarily disabled state */ + /** + * @use_started: true if the @started flag should be used to check the + * current state of the VirtIO device. Otherwise status bits + * should be checked for a current status of the device. + * @use_started is only set via QMP and defaults to true for all + * modern machines (since 4.1). + */ bool use_started; bool started; bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */ bool disable_legacy_check; + bool vhost_started; VMChangeStateEntry *vmstate; char *bus_name; uint8_t device_endian; bool use_guest_notifier_mask; AddressSpace *dma_as; QLIST_HEAD(, VirtQueue) *vector_queues; + QTAILQ_ENTRY(VirtIODevice) next; }; struct VirtioDeviceClass { @@ -128,6 +155,10 @@ struct VirtioDeviceClass { void (*set_config)(VirtIODevice *vdev, const uint8_t *config); void (*reset)(VirtIODevice *vdev); void (*set_status)(VirtIODevice *vdev, uint8_t val); + /* Device must validate queue_index. */ + void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index); + /* Device must validate queue_index. */ + void (*queue_enable)(VirtIODevice *vdev, uint32_t queue_index); /* For transitional devices, this is a bitmap of features * that are only exposed on the legacy interface but not * the modern one. @@ -160,22 +191,22 @@ struct VirtioDeviceClass { int (*post_load)(VirtIODevice *vdev); const VMStateDescription *vmsd; bool (*primary_unplug_pending)(void *opaque); + struct vhost_dev *(*get_vhost)(VirtIODevice *vdev); }; void virtio_instance_init_common(Object *proxy_obj, void *data, size_t vdev_size, const char *vdev_name); -void virtio_init(VirtIODevice *vdev, const char *name, - uint16_t device_id, size_t config_size); +void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size); + void virtio_cleanup(VirtIODevice *vdev); -void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void virtio_error(VirtIODevice *vdev, const char *fmt, ...) G_GNUC_PRINTF(2, 3); /* Set the child bus name. */ void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name); typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *); -typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *); VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, VirtIOHandleOutput handle_output); @@ -266,6 +297,8 @@ int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n, MemoryRegion *mr, bool assign); int virtio_set_status(VirtIODevice *vdev, uint8_t val); void virtio_reset(void *opaque); +void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index); +void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index); void virtio_update_irq(VirtIODevice *vdev); int virtio_set_features(VirtIODevice *vdev, uint64_t val); @@ -289,7 +322,9 @@ typedef struct VirtIORNGConf VirtIORNGConf; DEFINE_PROP_BIT64("iommu_platform", _state, _field, \ VIRTIO_F_IOMMU_PLATFORM, false), \ DEFINE_PROP_BIT64("packed", _state, _field, \ - VIRTIO_F_RING_PACKED, false) + VIRTIO_F_RING_PACKED, false), \ + DEFINE_PROP_BIT64("queue_reset", _state, _field, \ + VIRTIO_F_RING_RESET, true) hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n); bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n); @@ -317,8 +352,9 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev); EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq); void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled); void virtio_queue_host_notifier_read(EventNotifier *n); -void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, - VirtIOHandleAIOOutput handle_output); +void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx); +void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx); +void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx); VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector); VirtQueue *virtio_vector_next_queue(VirtQueue *vq); @@ -362,6 +398,16 @@ static inline bool virtio_is_big_endian(VirtIODevice *vdev) return false; } +/** + * virtio_device_started() - check if device started + * @vdev - the VirtIO device + * @status - the devices status bits + * + * Check if the device is started. For most modern machines this is + * tracked via the @vdev->started field (to support migration), + * otherwise we check for the final negotiated status bit that + * indicates everything is ready. + */ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status) { if (vdev->use_started) { @@ -371,6 +417,24 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status) return status & VIRTIO_CONFIG_S_DRIVER_OK; } +/** + * virtio_device_should_start() - check if device startable + * @vdev - the VirtIO device + * @status - the devices status bits + * + * This is similar to virtio_device_started() but also encapsulates a + * check on the VM status which would prevent a device starting + * anyway. + */ +static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status) +{ + if (!vdev->vm_running) { + return false; + } + + return virtio_device_started(vdev, status); +} + static inline void virtio_set_started(VirtIODevice *vdev, bool started) { if (started) { diff --git a/include/hw/watchdog/wdt_aspeed.h b/include/hw/watchdog/wdt_aspeed.h index 80b03661e3..dfa5dfa424 100644 --- a/include/hw/watchdog/wdt_aspeed.h +++ b/include/hw/watchdog/wdt_aspeed.h @@ -19,6 +19,7 @@ OBJECT_DECLARE_TYPE(AspeedWDTState, AspeedWDTClass, ASPEED_WDT) #define TYPE_ASPEED_2400_WDT TYPE_ASPEED_WDT "-ast2400" #define TYPE_ASPEED_2500_WDT TYPE_ASPEED_WDT "-ast2500" #define TYPE_ASPEED_2600_WDT TYPE_ASPEED_WDT "-ast2600" +#define TYPE_ASPEED_1030_WDT TYPE_ASPEED_WDT "-ast1030" #define ASPEED_WDT_REGS_MAX (0x20 / 4) @@ -44,6 +45,9 @@ struct AspeedWDTClass { uint32_t reset_ctrl_reg; void (*reset_pulse)(AspeedWDTState *s, uint32_t property); void (*wdt_reload)(AspeedWDTState *s); + uint64_t (*sanitize_ctrl)(uint64_t data); + uint32_t default_status; + uint32_t default_reload_value; }; #endif /* WDT_ASPEED_H */ diff --git a/include/hw/watchdog/wdt_imx2.h b/include/hw/watchdog/wdt_imx2.h index 023d83f48f..600a552d2e 100644 --- a/include/hw/watchdog/wdt_imx2.h +++ b/include/hw/watchdog/wdt_imx2.h @@ -9,8 +9,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef IMX2_WDT_H -#define IMX2_WDT_H +#ifndef WDT_IMX2_H +#define WDT_IMX2_H #include "qemu/bitops.h" #include "hw/sysbus.h" @@ -88,4 +88,4 @@ struct IMX2WdtState { bool wcr_wdt_locked; /* affects WDT (never cleared) */ }; -#endif /* IMX2_WDT_H */ +#endif /* WDT_IMX2_H */ diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h index 4c0f747445..629a904d1a 100644 --- a/include/hw/xen/xen-bus-helper.h +++ b/include/hw/xen/xen-bus-helper.h @@ -22,11 +22,11 @@ void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid, void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) - GCC_FMT_ATTR(6, 0); + G_GNUC_PRINTF(6, 0); void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, ...) - GCC_FMT_ATTR(6, 7); + G_GNUC_PRINTF(6, 7); /* Read from node/key unless node is empty, in which case read from key */ int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index 6bdbf3ff82..713e763348 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -88,10 +88,10 @@ enum xenbus_state xen_device_backend_get_state(XenDevice *xendev); void xen_device_backend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) - GCC_FMT_ATTR(3, 4); + G_GNUC_PRINTF(3, 4); void xen_device_frontend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) - GCC_FMT_ATTR(3, 4); + G_GNUC_PRINTF(3, 4); int xen_device_frontend_scanf(XenDevice *xendev, const char *key, const char *fmt, ...); diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index 0f9962b1c1..afdf9c436a 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -21,8 +21,8 @@ extern enum xen_mode xen_mode; extern bool xen_domid_restrict; int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num); +int xen_set_pci_link_route(uint8_t link, uint8_t irq); void xen_piix3_set_irq(void *opaque, int irq_num, int level); -void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len); void xen_hvm_inject_msi(uint64_t addr, uint32_t data); int xen_is_pirq_msi(uint32_t msi_data); diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index a8118b41ac..77ce17d8a4 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -316,12 +316,6 @@ static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, device, intx, level); } -static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, - uint8_t irq) -{ - return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq); -} - static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, uint32_t msi_data) { @@ -358,7 +352,7 @@ static inline int xen_restrict(domid_t domid) void destroy_hvm_domain(bool reboot); /* shutdown/destroy current domain because of an error */ -void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2); #ifdef HVM_PARAM_VMPORT_REGS_PFN static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h index 83e5174d90..7cd4bc2b82 100644 --- a/include/hw/xen/xen_pvdev.h +++ b/include/hw/xen/xen_pvdev.h @@ -76,6 +76,6 @@ void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev); int xen_pv_send_notify(struct XenLegacyDevice *xendev); void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level, - const char *fmt, ...) GCC_FMT_ATTR(3, 4); + const char *fmt, ...) G_GNUC_PRINTF(3, 4); #endif /* QEMU_HW_XEN_PVDEV_H */ diff --git a/include/io/channel-command.h b/include/io/channel-command.h index 27e42bdadc..98934e6d9e 100644 --- a/include/io/channel-command.h +++ b/include/io/channel-command.h @@ -41,35 +41,13 @@ struct QIOChannelCommand { QIOChannel parent; int writefd; int readfd; - pid_t pid; + GPid pid; +#ifdef WIN32 + bool blocking; +#endif }; -/** - * qio_channel_command_new_pid: - * @writefd: the FD connected to the command's stdin - * @readfd: the FD connected to the command's stdout - * @pid: the PID of the running child command - * @errp: pointer to a NULL-initialized error object - * - * Create a channel for performing I/O with the - * previously spawned command identified by @pid. - * The two file descriptors provide the connection - * to command's stdio streams, either one or which - * may be -1 to indicate that stream is not open. - * - * The channel will take ownership of the process - * @pid and will kill it when closing the channel. - * Similarly it will take responsibility for - * closing the file descriptors @writefd and @readfd. - * - * Returns: the command channel object, or NULL on error - */ -QIOChannelCommand * -qio_channel_command_new_pid(int writefd, - int readfd, - pid_t pid); - /** * qio_channel_command_new_spawn: * @argv: the NULL terminated list of command arguments diff --git a/include/io/channel-null.h b/include/io/channel-null.h new file mode 100644 index 0000000000..f6d54e63cf --- /dev/null +++ b/include/io/channel-null.h @@ -0,0 +1,55 @@ +/* + * QEMU I/O channels null driver + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QIO_CHANNEL_FILE_H +#define QIO_CHANNEL_FILE_H + +#include "io/channel.h" +#include "qom/object.h" + +#define TYPE_QIO_CHANNEL_NULL "qio-channel-null" +OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelNull, QIO_CHANNEL_NULL) + + +/** + * QIOChannelNull: + * + * The QIOChannelNull object provides a channel implementation + * that discards all writes and returns EOF for all reads. + */ + +struct QIOChannelNull { + QIOChannel parent; + bool closed; +}; + + +/** + * qio_channel_null_new: + * + * Create a new IO channel object that discards all writes + * and returns EOF for all reads. + * + * Returns: the new channel object + */ +QIOChannelNull * +qio_channel_null_new(void); + +#endif /* QIO_CHANNEL_NULL_H */ diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h index e747e63514..513c428fe4 100644 --- a/include/io/channel-socket.h +++ b/include/io/channel-socket.h @@ -47,6 +47,8 @@ struct QIOChannelSocket { socklen_t localAddrLen; struct sockaddr_storage remoteAddr; socklen_t remoteAddrLen; + ssize_t zero_copy_queued; + ssize_t zero_copy_sent; }; diff --git a/include/io/channel.h b/include/io/channel.h index 88988979f8..c680ee7480 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -32,12 +32,15 @@ OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, #define QIO_CHANNEL_ERR_BLOCK -2 +#define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 + typedef enum QIOChannelFeature QIOChannelFeature; enum QIOChannelFeature { QIO_CHANNEL_FEATURE_FD_PASS, QIO_CHANNEL_FEATURE_SHUTDOWN, QIO_CHANNEL_FEATURE_LISTEN, + QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, }; @@ -104,6 +107,7 @@ struct QIOChannelClass { size_t niov, int *fds, size_t nfds, + int flags, Error **errp); ssize_t (*io_readv)(QIOChannel *ioc, const struct iovec *iov, @@ -136,6 +140,8 @@ struct QIOChannelClass { IOHandler *io_read, IOHandler *io_write, void *opaque); + int (*io_flush)(QIOChannel *ioc, + Error **errp); }; /* General I/O handling functions */ @@ -228,6 +234,7 @@ ssize_t qio_channel_readv_full(QIOChannel *ioc, * @niov: the length of the @iov array * @fds: an array of file handles to send * @nfds: number of file handles in @fds + * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) * @errp: pointer to a NULL-initialized error object * * Write data to the IO channel, reading it from the @@ -260,6 +267,7 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp); /** @@ -837,6 +845,7 @@ int qio_channel_readv_full_all(QIOChannel *ioc, * @niov: the length of the @iov array * @fds: an array of file handles to send * @nfds: number of file handles in @fds + * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) * @errp: pointer to a NULL-initialized error object * * @@ -846,6 +855,14 @@ int qio_channel_readv_full_all(QIOChannel *ioc, * to be written, yielding from the current coroutine * if required. * + * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags, + * instead of waiting for all requested data to be written, + * this function will wait until it's all queued for writing. + * In this case, if the buffer gets changed between queueing and + * sending, the updated buffer will be sent. If this is not a + * desired behavior, it's suggested to call qio_channel_flush() + * before reusing the buffer. + * * Returns: 0 if all bytes were written, or -1 on error */ @@ -853,6 +870,25 @@ int qio_channel_writev_full_all(QIOChannel *ioc, const struct iovec *iov, size_t niov, int *fds, size_t nfds, - Error **errp); + int flags, Error **errp); + +/** + * qio_channel_flush: + * @ioc: the channel object + * @errp: pointer to a NULL-initialized error object + * + * Will block until every packet queued with + * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY + * is sent, or return in case of any error. + * + * If not implemented, acts as a no-op, and returns 0. + * + * Returns -1 if any error is found, + * 1 if every send failed to use zero copy. + * 0 otherwise. + */ + +int qio_channel_flush(QIOChannel *ioc, + Error **errp); #endif /* QIO_CHANNEL_H */ diff --git a/include/libdecnumber/dconfig.h b/include/libdecnumber/dconfig.h index 0f7dccef1f..2bc0ba7f14 100644 --- a/include/libdecnumber/dconfig.h +++ b/include/libdecnumber/dconfig.h @@ -28,7 +28,7 @@ 02110-1301, USA. */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define WORDS_BIGENDIAN 1 #else #define WORDS_BIGENDIAN 0 diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h index aa115fed07..41bc2a0d36 100644 --- a/include/libdecnumber/decNumber.h +++ b/include/libdecnumber/decNumber.h @@ -116,12 +116,16 @@ decNumber * decNumberFromUInt32(decNumber *, uint32_t); decNumber *decNumberFromInt64(decNumber *, int64_t); decNumber *decNumberFromUInt64(decNumber *, uint64_t); + decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t); + decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t); decNumber * decNumberFromString(decNumber *, const char *, decContext *); char * decNumberToString(const decNumber *, char *); char * decNumberToEngString(const decNumber *, char *); uint32_t decNumberToUInt32(const decNumber *, decContext *); int32_t decNumberToInt32(const decNumber *, decContext *); int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set); + void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh); uint8_t * decNumberGetBCD(const decNumber *, uint8_t *); decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t); diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h index 4d53c077f2..6198ca8593 100644 --- a/include/libdecnumber/decNumberLocal.h +++ b/include/libdecnumber/decNumberLocal.h @@ -98,7 +98,7 @@ /* Shared lookup tables */ extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */ - extern const uLong DECPOWERS[19]; /* powers of ten table */ + extern const uLong DECPOWERS[20]; /* powers of ten table */ /* The following are included from decDPD.h */ extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */ extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */ diff --git a/include/migration/blocker.h b/include/migration/blocker.h index acd27018e9..9cebe2ba06 100644 --- a/include/migration/blocker.h +++ b/include/migration/blocker.h @@ -25,6 +25,22 @@ */ int migrate_add_blocker(Error *reason, Error **errp); +/** + * @migrate_add_blocker_internal - prevent migration from proceeding without + * only-migrate implications + * + * @reason - an error to be returned whenever migration is attempted + * + * @errp - [out] The reason (if any) we cannot block migration right now. + * + * @returns - 0 on success, -EBUSY on failure, with errp set. + * + * Some of the migration blockers can be temporary (e.g., for a few seconds), + * so it shouldn't need to conflict with "-only-migratable". For those cases, + * we can call this function rather than @migrate_add_blocker(). + */ +int migrate_add_blocker_internal(Error *reason, Error **errp); + /** * @migrate_del_blocker - remove a blocking error from migration * diff --git a/include/migration/colo.h b/include/migration/colo.h index 768e1f04c3..5fbe1a6d5d 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -37,4 +37,5 @@ COLOMode get_colo_mode(void); void colo_do_failover(void); void colo_checkpoint_notify(void *opaque); +void colo_shutdown(void); #endif diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 017c03675c..ad24aa1934 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -181,9 +181,7 @@ struct VMStateDescription { int unmigratable; int version_id; int minimum_version_id; - int minimum_version_id_old; MigrationPriority priority; - LoadStateHandler *load_state_old; int (*pre_load)(void *opaque); int (*post_load)(void *opaque, int version_id); int (*pre_save)(void *opaque); diff --git a/include/monitor/hmp-target.h b/include/monitor/hmp-target.h index 60fc92722a..1891a19b21 100644 --- a/include/monitor/hmp-target.h +++ b/include/monitor/hmp-target.h @@ -48,6 +48,8 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict); void hmp_info_tlb(Monitor *mon, const QDict *qdict); void hmp_mce(Monitor *mon, const QDict *qdict); void hmp_info_local_apic(Monitor *mon, const QDict *qdict); -void hmp_info_io_apic(Monitor *mon, const QDict *qdict); +void hmp_info_sev(Monitor *mon, const QDict *qdict); +void hmp_info_sgx(Monitor *mon, const QDict *qdict); +void hmp_info_via(Monitor *mon, const QDict *qdict); #endif /* MONITOR_HMP_TARGET_H */ diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 3baa1058e2..dfbc0c9a2f 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -15,8 +15,10 @@ #define HMP_H #include "qemu/readline.h" +#include "qemu/coroutine.h" +#include "qapi/qapi-types-common.h" -void hmp_handle_error(Monitor *mon, Error *err); +bool hmp_handle_error(Monitor *mon, Error *err); void hmp_info_name(Monitor *mon, const QDict *qdict); void hmp_info_version(Monitor *mon, const QDict *qdict); @@ -80,7 +82,7 @@ void hmp_netdev_del(Monitor *mon, const QDict *qdict); void hmp_getfd(Monitor *mon, const QDict *qdict); void hmp_closefd(Monitor *mon, const QDict *qdict); void hmp_sendkey(Monitor *mon, const QDict *qdict); -void hmp_screendump(Monitor *mon, const QDict *qdict); +void coroutine_fn hmp_screendump(Monitor *mon, const QDict *qdict); void hmp_chardev_add(Monitor *mon, const QDict *qdict); void hmp_chardev_change(Monitor *mon, const QDict *qdict); void hmp_chardev_remove(Monitor *mon, const QDict *qdict); @@ -94,6 +96,11 @@ void hmp_qom_list(Monitor *mon, const QDict *qdict); void hmp_qom_get(Monitor *mon, const QDict *qdict); void hmp_qom_set(Monitor *mon, const QDict *qdict); void hmp_info_qom_tree(Monitor *mon, const QDict *dict); +void hmp_virtio_query(Monitor *mon, const QDict *qdict); +void hmp_virtio_status(Monitor *mon, const QDict *qdict); +void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict); +void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict); +void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict); void object_add_completion(ReadLineState *rs, int nb_args, const char *str); void object_del_completion(ReadLineState *rs, int nb_args, const char *str); void device_add_completion(ReadLineState *rs, int nb_args, const char *str); @@ -124,12 +131,17 @@ void hmp_info_ramblock(Monitor *mon, const QDict *qdict); void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict); void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict); void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict); -void hmp_info_sev(Monitor *mon, const QDict *qdict); void hmp_info_replay(Monitor *mon, const QDict *qdict); void hmp_replay_break(Monitor *mon, const QDict *qdict); void hmp_replay_delete_break(Monitor *mon, const QDict *qdict); void hmp_replay_seek(Monitor *mon, const QDict *qdict); void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict); void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict); +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_human_readable_text_helper(Monitor *mon, + HumanReadableText *(*qmp_handler)(Error **)); +void hmp_info_stats(Monitor *mon, const QDict *qdict); #endif diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 1a8a369b50..737e750670 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -31,9 +31,10 @@ void monitor_resume(Monitor *mon); int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp); int monitor_fd_param(Monitor *mon, const char *fdname, Error **errp); +int monitor_puts(Monitor *mon, const char *str); int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); -int monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 0); +int monitor_printf(Monitor *mon, const char *fmt, ...) G_GNUC_PRINTF(2, 3); void monitor_flush(Monitor *mon); int monitor_set_cpu(Monitor *mon, int cpu_index); int monitor_get_cpu_index(Monitor *mon); @@ -53,5 +54,10 @@ int64_t monitor_fdset_dup_fd_find(int dup_fd); void monitor_register_hmp(const char *name, bool info, void (*cmd)(Monitor *mon, const QDict *qdict)); +void monitor_register_hmp_info_hrt(const char *name, + HumanReadableText *(*handler)(Error **errp)); + +int error_vprintf_unless_qmp(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +int error_printf_unless_qmp(const char *fmt, ...) G_GNUC_PRINTF(1, 2); #endif /* MONITOR_H */ diff --git a/include/monitor/qdev.h b/include/monitor/qdev.h index eaa947d73a..1d57bf6577 100644 --- a/include/monitor/qdev.h +++ b/include/monitor/qdev.h @@ -9,6 +9,31 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp); int qdev_device_help(QemuOpts *opts); DeviceState *qdev_device_add(QemuOpts *opts, Error **errp); -void qdev_set_id(DeviceState *dev, const char *id); +DeviceState *qdev_device_add_from_qdict(const QDict *opts, + bool from_json, Error **errp); + +/** + * qdev_set_id: parent the device and set its id if provided. + * @dev: device to handle + * @id: id to be given to the device, or NULL. + * + * Returns: the id of the device in case of success; otherwise NULL. + * + * @dev must be unrealized, unparented and must not have an id. + * + * If @id is non-NULL, this function tries to setup @dev qom path as + * "/peripheral/id". If @id is already taken, it fails. If it succeeds, + * the id field of @dev is set to @id (@dev now owns the given @id + * parameter). + * + * If @id is NULL, this function generates a unique name and setups @dev + * qom path as "/peripheral-anon/name". This name is not set as the id + * of @dev. + * + * Upon success, it returns the id/name (generated or provided). The + * returned string is owned by the corresponding child property and must + * not be freed by the caller. + */ +const char *qdev_set_id(DeviceState *dev, char *id, Error **errp); #endif diff --git a/include/monitor/stats.h b/include/monitor/stats.h new file mode 100644 index 0000000000..fcf0983154 --- /dev/null +++ b/include/monitor/stats.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef STATS_H +#define STATS_H + +#include "qapi/qapi-types-stats.h" + +typedef void StatRetrieveFunc(StatsResultList **result, StatsTarget target, + strList *names, strList *targets, Error **errp); +typedef void SchemaRetrieveFunc(StatsSchemaList **result, Error **errp); + +/* + * Register callbacks for the QMP query-stats command. + * + * @provider: stats provider checked against QMP command arguments + * @stats_fn: routine to query stats: + * @schema_fn: routine to query stat schemas: + */ +void add_stats_callbacks(StatsProvider provider, + StatRetrieveFunc *stats_fn, + SchemaRetrieveFunc *schemas_fn); + +/* + * Helper routines for adding stats entries to the results lists. + */ +void add_stats_entry(StatsResultList **, StatsProvider, const char *id, + StatsList *stats_list); +void add_stats_schema(StatsSchemaList **, StatsProvider, StatsTarget, + StatsSchemaValueList *); + +/* + * True if a string matches the filter passed to the stats_fn callabck, + * false otherwise. + * + * Note that an empty list means no filtering, i.e. all strings will + * return true. + */ +bool apply_str_list_filter(const char *string, strList *list); + +#endif /* STATS_H */ diff --git a/include/net/eth.h b/include/net/eth.h index 7767ae880e..6e699b0d7a 100644 --- a/include/net/eth.h +++ b/include/net/eth.h @@ -159,7 +159,7 @@ struct tcp_hdr { u_short th_dport; /* destination port */ uint32_t th_seq; /* sequence number */ uint32_t th_ack; /* acknowledgment number */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN u_char th_off : 4, /* data offset */ th_x2:4; /* (unused) */ #else diff --git a/include/net/net.h b/include/net/net.h index 5d1508081f..dc20b31e9f 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -44,6 +44,9 @@ typedef struct NICConf { typedef void (NetPoll)(NetClientState *, bool enable); typedef bool (NetCanReceive)(NetClientState *); +typedef int (NetStart)(NetClientState *); +typedef int (NetLoad)(NetClientState *); +typedef void (NetStop)(NetClientState *); typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t); typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int); typedef void (NetCleanup) (NetClientState *); @@ -62,6 +65,7 @@ typedef struct SocketReadState SocketReadState; typedef void (SocketReadStateFinalize)(SocketReadState *rs); typedef void (NetAnnounce)(NetClientState *); typedef bool (SetSteeringEBPF)(NetClientState *, int); +typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **); typedef struct NetClientInfo { NetClientDriver type; @@ -70,6 +74,9 @@ typedef struct NetClientInfo { NetReceive *receive_raw; NetReceiveIOV *receive_iov; NetCanReceive *can_receive; + NetStart *start; + NetLoad *load; + NetStop *stop; NetCleanup *cleanup; LinkStatusChanged *link_status_changed; QueryRxFilter *query_rx_filter; @@ -84,6 +91,7 @@ typedef struct NetClientInfo { SetVnetBE *set_vnet_be; NetAnnounce *announce; SetSteeringEBPF *set_steering_ebpf; + NetCheckPeerType *check_peer_type; } NetClientInfo; struct NetClientState { @@ -103,6 +111,7 @@ struct NetClientState { int vnet_hdr_len; bool is_netdev; bool do_not_pad; /* do not pad to the minimum ethernet frame length */ + bool is_datapath; QTAILQ_HEAD(, NetFilterState) filters; }; @@ -134,6 +143,10 @@ NetClientState *qemu_new_net_client(NetClientInfo *info, NetClientState *peer, const char *model, const char *name); +NetClientState *qemu_new_net_control_client(NetClientInfo *info, + NetClientState *peer, + const char *model, + const char *name); NICState *qemu_new_nic(NetClientInfo *info, NICConf *conf, const char *model, @@ -164,6 +177,8 @@ ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf, void qemu_purge_queued_packets(NetClientState *nc); void qemu_flush_queued_packets(NetClientState *nc); void qemu_flush_or_purge_queued_packets(NetClientState *nc, bool purge); +void qemu_set_info_str(NetClientState *nc, + const char *fmt, ...) G_GNUC_PRINTF(2, 3); void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]); bool qemu_has_ufo(NetClientState *nc); bool qemu_has_vnet_hdr(NetClientState *nc); @@ -207,9 +222,11 @@ extern NICInfo nd_table[MAX_NICS]; extern const char *host_net_devices[]; /* from net.c */ -int net_client_parse(QemuOptsList *opts_list, const char *str); +bool netdev_is_modern(const char *optarg); +void netdev_parse_modern(const char *optarg); +void net_client_parse(QemuOptsList *opts_list, const char *str); void show_netdevs(void); -int net_init_clients(Error **errp); +void net_init_clients(void); void net_check_clients(void); void net_cleanup(void); void hmp_host_net_add(Monitor *mon, const QDict *qdict); diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index 172b0051d8..40b9a40074 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -14,14 +14,17 @@ typedef struct VhostNetOptions { VhostBackendType backend_type; NetClientState *net_backend; uint32_t busyloop_timeout; + unsigned int nvqs; void *opaque; } VhostNetOptions; uint64_t vhost_net_get_max_queues(VHostNetState *net); struct vhost_net *vhost_net_init(VhostNetOptions *options); -int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues); -void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues); +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); void vhost_net_cleanup(VHostNetState *net); @@ -45,4 +48,8 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net); int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu); +void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc, + int vq_index); +int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc, + int vq_index); #endif diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h index 1083f95122..8b7b25c0b5 100644 --- a/include/qapi/compat-policy.h +++ b/include/qapi/compat-policy.h @@ -13,10 +13,17 @@ #ifndef QAPI_COMPAT_POLICY_H #define QAPI_COMPAT_POLICY_H +#include "qapi/error.h" #include "qapi/qapi-types-compat.h" extern CompatPolicy compat_policy; +bool compat_policy_input_ok(unsigned special_features, + const CompatPolicy *policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp); + /* * Create a QObject input visitor for @obj for use with QMP * diff --git a/include/qapi/error.h b/include/qapi/error.h index 4a9260b0cc..d798faeec3 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -320,7 +320,7 @@ ErrorClass error_get_class(const Error *err); void error_setg_internal(Error **errp, const char *src, int line, const char *func, const char *fmt, ...) - GCC_FMT_ATTR(5, 6); + G_GNUC_PRINTF(5, 6); /* * Just like error_setg(), with @os_error info added to the message. @@ -336,7 +336,7 @@ void error_setg_internal(Error **errp, void error_setg_errno_internal(Error **errp, const char *fname, int line, const char *func, int os_error, const char *fmt, ...) - GCC_FMT_ATTR(6, 7); + G_GNUC_PRINTF(6, 7); #ifdef _WIN32 /* @@ -350,7 +350,7 @@ void error_setg_errno_internal(Error **errp, void error_setg_win32_internal(Error **errp, const char *src, int line, const char *func, int win32_err, const char *fmt, ...) - GCC_FMT_ATTR(6, 7); + G_GNUC_PRINTF(6, 7); #endif /* @@ -383,21 +383,21 @@ void error_propagate(Error **dst_errp, Error *local_err); */ void error_propagate_prepend(Error **dst_errp, Error *local_err, const char *fmt, ...) - GCC_FMT_ATTR(3, 4); + G_GNUC_PRINTF(3, 4); /* * Prepend some text to @errp's human-readable error message. * The text is made by formatting @fmt, @ap like vprintf(). */ void error_vprepend(Error *const *errp, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); + G_GNUC_PRINTF(2, 0); /* * Prepend some text to @errp's human-readable error message. * The text is made by formatting @fmt, ... like printf(). */ void error_prepend(Error *const *errp, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /* * Append a printf-style human-readable explanation to an existing error. @@ -414,7 +414,7 @@ void error_prepend(Error *const *errp, const char *fmt, ...) * newline. */ void error_append_hint(Error *const *errp, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /* * Convenience function to report open() failure. @@ -458,13 +458,13 @@ void error_report_err(Error *err); * Convenience function to error_prepend(), warn_report() and free @err. */ void warn_reportf_err(Error *err, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /* * Convenience function to error_prepend(), error_report() and free @err. */ void error_reportf_err(Error *err, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /* * Just like error_setg(), except you get to specify the error class. @@ -477,7 +477,7 @@ void error_reportf_err(Error *err, const char *fmt, ...) void error_set_internal(Error **errp, const char *src, int line, const char *func, ErrorClass err_class, const char *fmt, ...) - GCC_FMT_ATTR(6, 7); + G_GNUC_PRINTF(6, 7); /* * Make @errp parameter easier to use regardless of argument value diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h index 075203dc67..1e4240fd0d 100644 --- a/include/qapi/qmp/dispatch.h +++ b/include/qapi/qmp/dispatch.h @@ -21,12 +21,10 @@ typedef void (QmpCommandFunc)(QDict *, QObject **, Error **); typedef enum QmpCommandOptions { - QCO_NO_OPTIONS = 0x0, QCO_NO_SUCCESS_RESP = (1U << 0), QCO_ALLOW_OOB = (1U << 1), QCO_ALLOW_PRECONFIG = (1U << 2), QCO_COROUTINE = (1U << 3), - QCO_DEPRECATED = (1U << 4), } QmpCommandOptions; typedef struct QmpCommand @@ -35,6 +33,7 @@ typedef struct QmpCommand /* Runs in coroutine context if QCO_COROUTINE is set */ QmpCommandFunc *fn; QmpCommandOptions options; + unsigned special_features; QTAILQ_ENTRY(QmpCommand) node; bool enabled; const char *disable_reason; @@ -43,7 +42,8 @@ typedef struct QmpCommand typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList; void qmp_register_command(QmpCommandList *cmds, const char *name, - QmpCommandFunc *fn, QmpCommandOptions options); + QmpCommandFunc *fn, QmpCommandOptions options, + unsigned special_features); const QmpCommand *qmp_find_command(const QmpCommandList *cmds, const char *name); void qmp_disable_command(QmpCommandList *cmds, const char *name, diff --git a/include/qapi/qmp/qbool.h b/include/qapi/qmp/qbool.h index 2f888d1057..0d09726939 100644 --- a/include/qapi/qmp/qbool.h +++ b/include/qapi/qmp/qbool.h @@ -21,6 +21,10 @@ struct QBool { bool value; }; +void qbool_unref(QBool *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QBool, qbool_unref) + QBool *qbool_from_bool(bool value); bool qbool_get_bool(const QBool *qb); diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h index d5b5430e21..82e90fc072 100644 --- a/include/qapi/qmp/qdict.h +++ b/include/qapi/qmp/qdict.h @@ -30,6 +30,10 @@ struct QDict { QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX]; }; +void qdict_unref(QDict *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QDict, qdict_unref) + /* Object API */ QDict *qdict_new(void); const char *qdict_entry_key(const QDictEntry *entry); @@ -64,7 +68,4 @@ const char *qdict_get_try_str(const QDict *qdict, const char *key); QDict *qdict_clone_shallow(const QDict *src); -QObject *qdict_crumple(const QDict *src, Error **errp); -void qdict_flatten(QDict *qdict); - #endif /* QDICT_H */ diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 596fce0c54..87ca83b155 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -50,9 +50,6 @@ #define QERR_MISSING_PARAMETER \ "Parameter '%s' is missing" -#define QERR_PERMISSION_DENIED \ - "Insufficient permission to perform this operation" - #define QERR_PROPERTY_VALUE_BAD \ "Property '%s.%s' doesn't take value '%s'" diff --git a/include/qapi/qmp/qjson.h b/include/qapi/qmp/qjson.h index 593b40b4e0..7bd8d2de1b 100644 --- a/include/qapi/qmp/qjson.h +++ b/include/qapi/qmp/qjson.h @@ -17,13 +17,13 @@ QObject *qobject_from_json(const char *string, Error **errp); QObject *qobject_from_vjsonf_nofail(const char *string, va_list ap) - GCC_FMT_ATTR(1, 0); + G_GNUC_PRINTF(1, 0); QObject *qobject_from_jsonf_nofail(const char *string, ...) - GCC_FMT_ATTR(1, 2); + G_GNUC_PRINTF(1, 2); QDict *qdict_from_vjsonf_nofail(const char *string, va_list ap) - GCC_FMT_ATTR(1, 0); + G_GNUC_PRINTF(1, 0); QDict *qdict_from_jsonf_nofail(const char *string, ...) - GCC_FMT_ATTR(1, 2); + G_GNUC_PRINTF(1, 2); GString *qobject_to_json(const QObject *obj); GString *qobject_to_json_pretty(const QObject *obj, bool pretty); diff --git a/include/qapi/qmp/qlist.h b/include/qapi/qmp/qlist.h index 06e98ad5f4..e4e985d435 100644 --- a/include/qapi/qmp/qlist.h +++ b/include/qapi/qmp/qlist.h @@ -26,6 +26,10 @@ struct QList { QTAILQ_HEAD(,QListEntry) head; }; +void qlist_unref(QList *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QList, qlist_unref) + #define qlist_append(qlist, obj) \ qlist_append_obj(qlist, QOBJECT(obj)) diff --git a/include/qapi/qmp/qnull.h b/include/qapi/qmp/qnull.h index e84ecceedb..7feb7c7d83 100644 --- a/include/qapi/qmp/qnull.h +++ b/include/qapi/qmp/qnull.h @@ -26,4 +26,8 @@ static inline QNull *qnull(void) return qobject_ref(&qnull_); } +void qnull_unref(QNull *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNull, qnull_unref) + #endif /* QNULL_H */ diff --git a/include/qapi/qmp/qnum.h b/include/qapi/qmp/qnum.h index 7f84e20bfb..e86788dd2e 100644 --- a/include/qapi/qmp/qnum.h +++ b/include/qapi/qmp/qnum.h @@ -54,6 +54,10 @@ struct QNum { } u; }; +void qnum_unref(QNum *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNum, qnum_unref) + QNum *qnum_from_int(int64_t value); QNum *qnum_from_uint(uint64_t value); QNum *qnum_from_double(double value); diff --git a/include/qapi/qmp/qstring.h b/include/qapi/qmp/qstring.h index 1d8ba46936..318d815d6a 100644 --- a/include/qapi/qmp/qstring.h +++ b/include/qapi/qmp/qstring.h @@ -20,6 +20,10 @@ struct QString { const char *string; }; +void qstring_unref(QString *q); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QString, qstring_unref) + QString *qstring_new(void); QString *qstring_from_str(const char *str); QString *qstring_from_substr(const char *str, size_t start, size_t end); diff --git a/include/qapi/qobject-input-visitor.h b/include/qapi/qobject-input-visitor.h index 8d69388810..95985e25e5 100644 --- a/include/qapi/qobject-input-visitor.h +++ b/include/qapi/qobject-input-visitor.h @@ -15,7 +15,6 @@ #ifndef QOBJECT_INPUT_VISITOR_H #define QOBJECT_INPUT_VISITOR_H -#include "qapi/qapi-types-compat.h" #include "qapi/visitor.h" typedef struct QObjectInputVisitor QObjectInputVisitor; @@ -59,9 +58,6 @@ typedef struct QObjectInputVisitor QObjectInputVisitor; */ Visitor *qobject_input_visitor_new(QObject *obj); -void qobject_input_visitor_set_policy(Visitor *v, - CompatPolicyInput deprecated); - /* * Create a QObject input visitor for @obj for use with keyval_parse() * diff --git a/include/qapi/qobject-output-visitor.h b/include/qapi/qobject-output-visitor.h index f2a2f92a00..2b1726baf5 100644 --- a/include/qapi/qobject-output-visitor.h +++ b/include/qapi/qobject-output-visitor.h @@ -15,7 +15,6 @@ #define QOBJECT_OUTPUT_VISITOR_H #include "qapi/visitor.h" -#include "qapi/qapi-types-compat.h" typedef struct QObjectOutputVisitor QObjectOutputVisitor; @@ -54,7 +53,4 @@ typedef struct QObjectOutputVisitor QObjectOutputVisitor; */ Visitor *qobject_output_visitor_new(QObject **result); -void qobject_output_visitor_set_policy(Visitor *v, - CompatPolicyOutput deprecated); - #endif diff --git a/include/qapi/type-helpers.h b/include/qapi/type-helpers.h new file mode 100644 index 0000000000..be1f181526 --- /dev/null +++ b/include/qapi/type-helpers.h @@ -0,0 +1,14 @@ +/* + * QAPI common helper functions + * + * This file provides helper functions related to types defined + * in the QAPI schema. + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qapi-types-common.h" + +HumanReadableText *human_readable_text_from_str(GString *str); diff --git a/include/qapi/util.h b/include/qapi/util.h index d7bfb30e25..81a2b13a33 100644 --- a/include/qapi/util.h +++ b/include/qapi/util.h @@ -11,9 +11,15 @@ #ifndef QAPI_UTIL_H #define QAPI_UTIL_H +typedef enum { + QAPI_DEPRECATED, + QAPI_UNSTABLE, +} QapiSpecialFeature; + typedef struct QEnumLookup { const char *const *array; - int size; + const unsigned char *const special_features; + const int size; } QEnumLookup; const char *qapi_enum_lookup(const QEnumLookup *lookup, int val); diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h index 3b950f6e3d..2badec5ba4 100644 --- a/include/qapi/visitor-impl.h +++ b/include/qapi/visitor-impl.h @@ -114,14 +114,19 @@ struct Visitor void (*optional)(Visitor *v, const char *name, bool *present); /* Optional */ - bool (*deprecated_accept)(Visitor *v, const char *name, Error **errp); + bool (*policy_reject)(Visitor *v, const char *name, + unsigned special_features, Error **errp); /* Optional */ - bool (*deprecated)(Visitor *v, const char *name); + bool (*policy_skip)(Visitor *v, const char *name, + unsigned special_features); /* Must be set */ VisitorType type; + /* Optional */ + struct CompatPolicy compat_policy; + /* Must be set for output visitors, optional otherwise. */ void (*complete)(Visitor *v, void *opaque); diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index b3c9ef7a81..d53a84c9ba 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -16,6 +16,7 @@ #define QAPI_VISITOR_H #include "qapi/qapi-builtin-types.h" +#include "qapi/qapi-types-compat.h" /* * The QAPI schema defines both a set of C data types, and a QMP wire @@ -460,22 +461,39 @@ void visit_end_alternate(Visitor *v, void **obj); bool visit_optional(Visitor *v, const char *name, bool *present); /* - * Should we reject deprecated member @name? + * Should we reject member @name due to policy? + * + * @special_features is the member's special features encoded as a + * bitset of QapiSpecialFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ -bool visit_deprecated_accept(Visitor *v, const char *name, Error **errp); +bool visit_policy_reject(Visitor *v, const char *name, + unsigned special_features, Error **errp); /* - * Should we visit deprecated member @name? + * + * Should we skip member @name due to policy? + * + * @special_features is the member's special features encoded as a + * bitset of QapiSpecialFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ -bool visit_deprecated(Visitor *v, const char *name); +bool visit_policy_skip(Visitor *v, const char *name, + unsigned special_features); + +/* + * Set policy for handling deprecated management interfaces. + * + * Intended use: call visit_set_policy(v, &compat_policy) when + * visiting management interface input or output. + */ +void visit_set_policy(Visitor *v, CompatPolicy *policy); /* * Visit an enum value. diff --git a/include/qemu-common.h b/include/qemu-common.h deleted file mode 100644 index 73bcf763ed..0000000000 --- a/include/qemu-common.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * This file is supposed to be included only by .c files. No header file should - * depend on qemu-common.h, as this would easily lead to circular header - * dependencies. - * - * If a header file uses a definition from qemu-common.h, that definition - * must be moved to a separate header file, and the header that uses it - * must include that header. - */ -#ifndef QEMU_COMMON_H -#define QEMU_COMMON_H - -#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) - -/* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2021 " \ - "Fabrice Bellard and the QEMU Project developers" - -/* Bug reporting information for --help arguments, About dialogs, etc */ -#define QEMU_HELP_BOTTOM \ - "See for how to report bugs.\n" \ - "More information on the QEMU project at ." - -/* main function, renamed */ -#if defined(CONFIG_COCOA) -int qemu_main(int argc, char **argv, char **envp); -#endif - -void qemu_get_timedate(struct tm *tm, int offset); -int qemu_timedate_diff(struct tm *tm); - -void *qemu_oom_check(void *ptr); - -ssize_t qemu_write_full(int fd, const void *buf, size_t count) - QEMU_WARN_UNUSED_RESULT; - -#ifndef _WIN32 -int qemu_pipe(int pipefd[2]); -/* like openpty() but also makes it raw; return master fd */ -int qemu_openpty_raw(int *aslave, char *pty_name); -#endif - -#ifdef _WIN32 -/* MinGW needs type casts for the 'buf' and 'optval' arguments. */ -#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ - getsockopt(sockfd, level, optname, (void *)optval, optlen) -#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ - setsockopt(sockfd, level, optname, (const void *)optval, optlen) -#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags) -#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ - sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) -#else -#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ - getsockopt(sockfd, level, optname, optval, optlen) -#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ - setsockopt(sockfd, level, optname, optval, optlen) -#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags) -#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ - sendto(sockfd, buf, len, flags, destaddr, addrlen) -#endif - -void cpu_exec_init_all(void); -void cpu_exec_step_atomic(CPUState *cpu); - -/** - * set_preferred_target_page_bits: - * @bits: number of bits needed to represent an address within the page - * - * Set the preferred target page size (the actual target page - * size may be smaller than any given CPU's preference). - * Returns true on success, false on failure (which can only happen - * if this is called after the system has already finalized its - * choice of page size and the requested page size is smaller than that). - */ -bool set_preferred_target_page_bits(int bits); - -/** - * finalize_target_page_bits: - * Commit the final value set by set_preferred_target_page_bits. - */ -void finalize_target_page_bits(void); - -/** - * Sends a (part of) iovec down a socket, yielding when the socket is full, or - * Receives data into a (part of) iovec from a socket, - * yielding when there is no data in the socket. - * The same interface as qemu_sendv_recvv(), with added yielding. - * XXX should mark these as coroutine_fn - */ -ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, - size_t offset, size_t bytes, bool do_send); -#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \ - qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false) -#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \ - qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true) - -/** - * The same as above, but with just a single buffer - */ -ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send); -#define qemu_co_recv(sockfd, buf, bytes) \ - qemu_co_send_recv(sockfd, buf, bytes, false) -#define qemu_co_send(sockfd, buf, bytes) \ - qemu_co_send_recv(sockfd, buf, bytes, true) - -void qemu_progress_init(int enabled, float min_skip); -void qemu_progress_end(void); -void qemu_progress_print(float delta, int max); -const char *qemu_get_vm_name(void); - -/* OS specific functions */ -void os_setup_early_signal_handling(void); -int os_parse_cmd_args(int index, const char *optarg); - -/* - * Hexdump a line of a byte buffer into a hexadecimal/ASCII buffer - */ -#define QEMU_HEXDUMP_LINE_BYTES 16 /* Number of bytes to dump */ -#define QEMU_HEXDUMP_LINE_LEN 75 /* Number of characters in line */ -void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr, - unsigned int len, bool ascii); - -/* - * Hexdump a buffer to a file. An optional string prefix is added to every line - */ - -void qemu_hexdump(FILE *fp, const char *prefix, - const void *bufptr, size_t size); - -/* - * helper to parse debug environment variables - */ -int parse_debug_env(const char *name, int max, int initial); - -const char *qemu_ether_ntoa(const MACAddr *mac); -void page_size_init(void); - -/* returns non-zero if dump is in progress, otherwise zero is - * returned. */ -bool dump_in_progress(void); - -#endif diff --git a/linux-user/host/riscv32/hostdep.h b/include/qemu-main.h similarity index 51% rename from linux-user/host/riscv32/hostdep.h rename to include/qemu-main.h index adf9edbf2d..940960a7db 100644 --- a/linux-user/host/riscv32/hostdep.h +++ b/include/qemu-main.h @@ -1,11 +1,11 @@ /* - * hostdep.h : things which are dependent on the host architecture - * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ +#ifndef QEMU_MAIN_H +#define QEMU_MAIN_H -#ifndef RISCV32_HOSTDEP_H -#define RISCV32_HOSTDEP_H +int qemu_default_main(void); +extern int (*qemu_main)(void); -#endif +#endif /* QEMU_MAIN_H */ diff --git a/include/qemu/accel.h b/include/qemu/accel.h index 4f4c283f6f..ce4747634a 100644 --- a/include/qemu/accel.h +++ b/include/qemu/accel.h @@ -43,6 +43,10 @@ typedef struct AccelClass { bool (*has_memory)(MachineState *ms, AddressSpace *as, hwaddr start_addr, hwaddr size); #endif + + /* gdbstub related hooks */ + int (*gdbstub_supported_sstep_flags)(void); + bool *allowed; /* * Array of global properties that would be applied when specific @@ -68,6 +72,7 @@ typedef struct AccelClass { AccelClass *accel_find(const char *opt_name); AccelState *current_accel(void); +const char *current_accel_name(void); void accel_init_interfaces(AccelClass *ac); @@ -91,4 +96,12 @@ void accel_cpu_instance_init(CPUState *cpu); */ bool accel_cpu_realizefn(CPUState *cpu, Error **errp); +/** + * accel_supported_gdbstub_sstep_flags: + * + * Returns the supported single step modes for the configured + * accelerator. + */ +int accel_supported_gdbstub_sstep_flags(void); + #endif /* QEMU_ACCEL_H */ diff --git a/include/qemu/async-teardown.h b/include/qemu/async-teardown.h new file mode 100644 index 0000000000..092e7a37e7 --- /dev/null +++ b/include/qemu/async-teardown.h @@ -0,0 +1,22 @@ +/* + * Asynchronous teardown + * + * Copyright IBM, Corp. 2022 + * + * Authors: + * Claudio Imbrenda + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ +#ifndef QEMU_ASYNC_TEARDOWN_H +#define QEMU_ASYNC_TEARDOWN_H + +#include "config-host.h" + +#ifdef CONFIG_LINUX +void init_async_teardown(void); +#endif + +#endif diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 112a29910b..874134fd19 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -15,6 +15,8 @@ #ifndef QEMU_ATOMIC_H #define QEMU_ATOMIC_H +#include "compiler.h" + /* Compiler barrier */ #define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) @@ -81,7 +83,7 @@ * no processors except Alpha need a barrier here. Leave it in if * using Thread Sanitizer to avoid warnings, otherwise optimize it away. */ -#if defined(__SANITIZE_THREAD__) +#ifdef QEMU_SANITIZE_THREAD #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) #elif defined(__alpha__) #define smp_read_barrier_depends() asm volatile("mb":::"memory") @@ -131,7 +133,7 @@ #define qatomic_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_read__nocheck(ptr); \ }) @@ -139,14 +141,14 @@ __atomic_store_n(ptr, i, __ATOMIC_RELAXED) #define qatomic_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_set__nocheck(ptr, i); \ } while(0) /* See above: most compilers currently treat consume and acquire the * same, but this slows down qatomic_rcu_read unnecessarily. */ -#ifdef __SANITIZE_THREAD__ +#ifdef QEMU_SANITIZE_THREAD #define qatomic_rcu_read__nocheck(ptr, valptr) \ __atomic_load(ptr, valptr, __ATOMIC_CONSUME); #else @@ -157,27 +159,27 @@ #define qatomic_rcu_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ qatomic_rcu_read__nocheck(ptr, &_val); \ _val; \ }) #define qatomic_rcu_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) #define qatomic_load_acquire(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ _val; \ }) #define qatomic_store_release(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) @@ -189,7 +191,7 @@ }) #define qatomic_xchg(ptr, i) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_xchg__nocheck(ptr, i); \ }) @@ -202,7 +204,7 @@ }) #define qatomic_cmpxchg(ptr, old, new) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_cmpxchg__nocheck(ptr, old, new); \ }) @@ -254,7 +256,7 @@ #define qatomic_mb_read(ptr) \ qatomic_load_acquire(ptr) -#if !defined(__SANITIZE_THREAD__) && \ +#if !defined(QEMU_SANITIZE_THREAD) && \ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__)) /* This is more efficient than a store plus a fence. */ # define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h index 670d3577cc..a915fdc0a1 100644 --- a/include/qemu/bitmap.h +++ b/include/qemu/bitmap.h @@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len); void bitmap_set_atomic(unsigned long *map, long i, long len); void bitmap_clear(unsigned long *map, long start, long nr); bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); +bool bitmap_test_and_clear(unsigned long *map, long start, long nr); void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, long nr); unsigned long bitmap_find_next_zero_area(unsigned long *map, diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index 2d3bb8bbed..346d05f2aa 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -19,8 +19,6 @@ extern "C" { #endif -#include "fpu/softfloat-types.h" - #ifdef BSWAP_FROM_BYTESWAP static inline uint16_t bswap16(uint16_t x) { @@ -84,7 +82,7 @@ static inline void bswap64s(uint64_t *s) *s = bswap64(*s); } -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define be_bswap(v, size) (v) #define le_bswap(v, size) glue(bswap, size)(v) #define be_bswaps(v, size) @@ -188,7 +186,7 @@ CPU_CONVERT(le, 64, uint64_t) * a compile-time constant if you pass in a constant. So this can be * used to initialize static variables. */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN # define const_le32(_x) \ ((((_x) & 0x000000ffU) << 24) | \ (((_x) & 0x0000ff00U) << 8) | \ @@ -202,64 +200,6 @@ CPU_CONVERT(le, 64, uint64_t) # define const_le16(_x) (_x) #endif -/* Unions for reinterpreting between floats and integers. */ - -typedef union { - float32 f; - uint32_t l; -} CPU_FloatU; - -typedef union { - float64 d; -#if defined(HOST_WORDS_BIGENDIAN) - struct { - uint32_t upper; - uint32_t lower; - } l; -#else - struct { - uint32_t lower; - uint32_t upper; - } l; -#endif - uint64_t ll; -} CPU_DoubleU; - -typedef union { - floatx80 d; - struct { - uint64_t lower; - uint16_t upper; - } l; -} CPU_LDoubleU; - -typedef union { - float128 q; -#if defined(HOST_WORDS_BIGENDIAN) - struct { - uint32_t upmost; - uint32_t upper; - uint32_t lower; - uint32_t lowest; - } l; - struct { - uint64_t upper; - uint64_t lower; - } ll; -#else - struct { - uint32_t lowest; - uint32_t lower; - uint32_t upper; - uint32_t upmost; - } l; - struct { - uint64_t lower; - uint64_t upper; - } ll; -#endif -} CPU_QuadU; - /* unaligned/endian-independent pointer access */ /* diff --git a/include/qemu/buffer.h b/include/qemu/buffer.h index d34d2c857c..e95dfd696c 100644 --- a/include/qemu/buffer.h +++ b/include/qemu/buffer.h @@ -49,7 +49,7 @@ struct Buffer { * to identify in debug traces. */ void buffer_init(Buffer *buffer, const char *name, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /** * buffer_shrink: diff --git a/include/qemu/cacheinfo.h b/include/qemu/cacheinfo.h new file mode 100644 index 0000000000..019a157ea0 --- /dev/null +++ b/include/qemu/cacheinfo.h @@ -0,0 +1,21 @@ +/* + * QEMU host cacheinfo information + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_CACHEINFO_H +#define QEMU_CACHEINFO_H + +/* + * These variables represent our best guess at the host icache and + * dcache sizes, expressed both as the size in bytes and as the + * base-2 log of the size in bytes. They are initialized at startup + * (via an attribute 'constructor' function). + */ +extern int qemu_icache_linesize; +extern int qemu_icache_linesize_log; +extern int qemu_dcache_linesize; +extern int qemu_dcache_linesize_log; + +#endif diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 72fbe7068c..756ec181b6 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -7,6 +7,11 @@ #ifndef COMPILER_H #define COMPILER_H +#define HOST_BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + +/* HOST_LONG_BITS is the size of a native pointer in bits. */ +#define HOST_LONG_BITS (__SIZEOF_POINTER__ * 8) + #if defined __clang_analyzer__ || defined __COVERITY__ #define QEMU_STATIC_ANALYSIS 1 #endif @@ -17,12 +22,6 @@ #define QEMU_EXTERN_C extern #endif -#define QEMU_NORETURN __attribute__ ((__noreturn__)) - -#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) - -#define QEMU_SENTINEL __attribute__((sentinel)) - #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) # define QEMU_PACKED __attribute__((gcc_struct, packed)) #else @@ -83,19 +82,12 @@ #define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \ sizeof(QEMU_BUILD_BUG_ON_STRUCT(x))) -#if defined(__clang__) -/* clang doesn't support gnu_printf, so use printf. */ -# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m))) -#else -/* Use gnu_printf (qemu uses standard format strings). */ -# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) -# if defined(_WIN32) +#if !defined(__clang__) && defined(_WIN32) /* * Map __printf__ to __gnu_printf__ because we want standard format strings even * when MinGW or GLib include files use __printf__. */ -# define __printf__ __gnu_printf__ -# endif +# define __printf__ __gnu_printf__ #endif #ifndef __has_warning @@ -118,6 +110,14 @@ #define __has_attribute(x) 0 /* compatibility with older GCC */ #endif +#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer) +# define QEMU_SANITIZE_ADDRESS 1 +#endif + +#if defined(__SANITIZE_THREAD__) || __has_feature(thread_sanitizer) +# define QEMU_SANITIZE_THREAD 1 +#endif + /* * GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC * versions we support have the "flatten" attribute. Clang may not have the @@ -166,22 +166,6 @@ #define QEMU_ALWAYS_INLINE #endif -/** - * qemu_build_not_reached() - * - * The compiler, during optimization, is expected to prove that a call - * to this function cannot be reached and remove it. If the compiler - * supports QEMU_ERROR, this will be reported at compile time; otherwise - * this will be reported at link time due to the missing symbol. - */ -extern void QEMU_NORETURN QEMU_ERROR("code path is reachable") - qemu_build_not_reached_always(void); -#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__) && !defined(XEMU_DEBUG_BUILD) -#define qemu_build_not_reached() qemu_build_not_reached_always() -#else -#define qemu_build_not_reached() g_assert_not_reached() -#endif - /** * In most cases, normal "fallthrough" comments are good enough for * switch-case statements, but sometimes the compiler has problems @@ -204,4 +188,17 @@ extern void QEMU_NORETURN QEMU_ERROR("code path is reachable") #define QEMU_DISABLE_CFI #endif +/* + * Apple clang version 14 has a bug in its __builtin_subcll(); define + * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it. + * When a version of Apple clang which has this bug fixed is released + * we can add an upper bound to this check. + * See https://gitlab.com/qemu-project/qemu/-/issues/1631 + * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details. + * The bug never made it into any upstream LLVM releases, only Apple ones. + */ +#if defined(__apple_build_version__) && __clang_major__ >= 14 +#define BUILTIN_SUBCLL_BROKEN +#endif + #endif /* COMPILER_H */ diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h index f605423321..321e7c7c03 100644 --- a/include/qemu/config-file.h +++ b/include/qemu/config-file.h @@ -12,7 +12,6 @@ void qemu_add_opts(QemuOptsList *list); void qemu_add_drive_opts(QemuOptsList *list); int qemu_global_option(const char *str); -void qemu_config_write(FILE *fp); int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname, Error **errp); diff --git a/include/qemu/coroutine-tls.h b/include/qemu/coroutine-tls.h new file mode 100644 index 0000000000..1558a826aa --- /dev/null +++ b/include/qemu/coroutine-tls.h @@ -0,0 +1,165 @@ +/* + * QEMU Thread Local Storage for coroutines + * + * Copyright Red Hat + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + * It is forbidden to access Thread Local Storage in coroutines because + * compiler optimizations may cause values to be cached across coroutine + * re-entry. Coroutines can run in more than one thread through the course of + * their life, leading bugs when stale TLS values from the wrong thread are + * used as a result of compiler optimization. + * + * An example is: + * + * ..code-block:: c + * :caption: A coroutine that may see the wrong TLS value + * + * static __thread AioContext *current_aio_context; + * ... + * static void coroutine_fn foo(void) + * { + * aio_notify(current_aio_context); + * qemu_coroutine_yield(); + * aio_notify(current_aio_context); // <-- may be stale after yielding! + * } + * + * This header provides macros for safely defining variables in Thread Local + * Storage: + * + * ..code-block:: c + * :caption: A coroutine that safely uses TLS + * + * QEMU_DEFINE_STATIC_CO_TLS(AioContext *, current_aio_context) + * ... + * static void coroutine_fn foo(void) + * { + * aio_notify(get_current_aio_context()); + * qemu_coroutine_yield(); + * aio_notify(get_current_aio_context()); // <-- safe + * } + */ + +#ifndef QEMU_COROUTINE_TLS_H +#define QEMU_COROUTINE_TLS_H + +/* + * To stop the compiler from caching TLS values we define accessor functions + * with __attribute__((noinline)) plus asm volatile("") to prevent + * optimizations that override noinline. + * + * The compiler can still analyze noinline code and make optimizations based on + * that knowledge, so an inline asm output operand is used to prevent + * optimizations that make assumptions about the address of the TLS variable. + * + * This is fragile and ultimately needs to be solved by a mechanism that is + * guaranteed to work by the compiler (e.g. stackless coroutines), but for now + * we use this approach to prevent issues. + */ + +/** + * QEMU_DECLARE_CO_TLS: + * @type: the variable's C type + * @var: the variable name + * + * Declare an extern variable in Thread Local Storage from a header file: + * + * .. code-block:: c + * :caption: Declaring an extern variable in Thread Local Storage + * + * QEMU_DECLARE_CO_TLS(int, my_count) + * ... + * int c = get_my_count(); + * set_my_count(c + 1); + * *get_ptr_my_count() = 0; + * + * This is a coroutine-safe replacement for the __thread keyword and is + * equivalent to the following code: + * + * .. code-block:: c + * :caption: Declaring a TLS variable using __thread + * + * extern __thread int my_count; + * ... + * int c = my_count; + * my_count = c + 1; + * *(&my_count) = 0; + */ +#define QEMU_DECLARE_CO_TLS(type, var) \ + __attribute__((noinline)) type get_##var(void); \ + __attribute__((noinline)) void set_##var(type v); \ + __attribute__((noinline)) type *get_ptr_##var(void); + +/** + * QEMU_DEFINE_CO_TLS: + * @type: the variable's C type + * @var: the variable name + * + * Define a variable in Thread Local Storage that was previously declared from + * a header file with QEMU_DECLARE_CO_TLS(): + * + * .. code-block:: c + * :caption: Defining a variable in Thread Local Storage + * + * QEMU_DEFINE_CO_TLS(int, my_count) + * + * This is a coroutine-safe replacement for the __thread keyword and is + * equivalent to the following code: + * + * .. code-block:: c + * :caption: Defining a TLS variable using __thread + * + * __thread int my_count; + */ +#define QEMU_DEFINE_CO_TLS(type, var) \ + static __thread type co_tls_##var; \ + type get_##var(void) { asm volatile(""); return co_tls_##var; } \ + void set_##var(type v) { asm volatile(""); co_tls_##var = v; } \ + type *get_ptr_##var(void) \ + { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; } + +/** + * QEMU_DEFINE_STATIC_CO_TLS: + * @type: the variable's C type + * @var: the variable name + * + * Define a static variable in Thread Local Storage: + * + * .. code-block:: c + * :caption: Defining a static variable in Thread Local Storage + * + * QEMU_DEFINE_STATIC_CO_TLS(int, my_count) + * ... + * int c = get_my_count(); + * set_my_count(c + 1); + * *get_ptr_my_count() = 0; + * + * This is a coroutine-safe replacement for the __thread keyword and is + * equivalent to the following code: + * + * .. code-block:: c + * :caption: Defining a static TLS variable using __thread + * + * static __thread int my_count; + * ... + * int c = my_count; + * my_count = c + 1; + * *(&my_count) = 0; + */ +#define QEMU_DEFINE_STATIC_CO_TLS(type, var) \ + static __thread type co_tls_##var; \ + static __attribute__((noinline, unused)) \ + type get_##var(void) \ + { asm volatile(""); return co_tls_##var; } \ + static __attribute__((noinline, unused)) \ + void set_##var(type v) \ + { asm volatile(""); co_tls_##var = v; } \ + static __attribute__((noinline, unused)) \ + type *get_ptr_##var(void) \ + { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; } + +#endif /* QEMU_COROUTINE_TLS_H */ diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index 4829ff373d..89650a2d7f 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -92,12 +92,12 @@ void coroutine_fn qemu_coroutine_yield(void); /** * Get the AioContext of the given coroutine */ -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); +AioContext *qemu_coroutine_get_aio_context(Coroutine *co); /** * Get the currently executing coroutine */ -Coroutine *coroutine_fn qemu_coroutine_self(void); +Coroutine *qemu_coroutine_self(void); /** * Return whether or not currently inside a coroutine @@ -198,27 +198,40 @@ typedef struct CoQueue { */ void qemu_co_queue_init(CoQueue *queue); +typedef enum { + /* + * Enqueue at front instead of back. Use this to re-queue a request when + * its wait condition is not satisfied after being woken up. + */ + CO_QUEUE_WAIT_FRONT = 0x1, +} CoQueueWaitFlags; + /** * Adds the current coroutine to the CoQueue and transfers control to the * caller of the coroutine. The mutex is unlocked during the wait and * locked again afterwards. */ #define qemu_co_queue_wait(queue, lock) \ - qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock)) -void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock); + qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), 0) +#define qemu_co_queue_wait_flags(queue, lock, flags) \ + qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), (flags)) +void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock, + CoQueueWaitFlags flags); /** - * Removes the next coroutine from the CoQueue, and wake it up. + * Removes the next coroutine from the CoQueue, and queue it to run after + * the currently-running coroutine yields. * Returns true if a coroutine was removed, false if the queue is empty. - * OK to run from coroutine and non-coroutine context. + * Used from coroutine context, use qemu_co_enter_next outside. */ -bool qemu_co_queue_next(CoQueue *queue); +bool coroutine_fn qemu_co_queue_next(CoQueue *queue); /** - * Empties the CoQueue; all coroutines are woken up. - * OK to run from coroutine and non-coroutine context. + * Empties the CoQueue and queues the coroutine to run after + * the currently-running coroutine yields. + * Used from coroutine context, use qemu_co_enter_all outside. */ -void qemu_co_queue_restart_all(CoQueue *queue); +void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); /** * Removes the next coroutine from the CoQueue, and wake it up. Unlike @@ -233,6 +246,19 @@ void qemu_co_queue_restart_all(CoQueue *queue); qemu_co_enter_next_impl(queue, QEMU_MAKE_LOCKABLE(lock)) bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock); +/** + * Empties the CoQueue, waking the waiting coroutine one at a time. Unlike + * qemu_co_queue_all, this function releases the lock during aio_co_wake + * because it is meant to be used outside coroutine context; in that case, the + * coroutine is entered immediately, before qemu_co_enter_all returns. + * + * If used in coroutine context, qemu_co_enter_all is equivalent to + * qemu_co_queue_all. + */ +#define qemu_co_enter_all(queue, lock) \ + qemu_co_enter_all_impl(queue, QEMU_MAKE_LOCKABLE(lock)) +void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock); + /** * Checks if the CoQueue is empty. */ @@ -261,7 +287,7 @@ void qemu_co_rwlock_init(CoRwlock *lock); * of a parallel writer, control is transferred to the caller of the current * coroutine. */ -void qemu_co_rwlock_rdlock(CoRwlock *lock); +void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock); /** * Write Locks the CoRwlock from a reader. This is a bit more efficient than @@ -270,7 +296,7 @@ void qemu_co_rwlock_rdlock(CoRwlock *lock); * to the caller of the current coroutine; another writer might run while * @qemu_co_rwlock_upgrade blocks. */ -void qemu_co_rwlock_upgrade(CoRwlock *lock); +void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock); /** * Downgrades a write-side critical section to a reader. Downgrading with @@ -278,20 +304,20 @@ void qemu_co_rwlock_upgrade(CoRwlock *lock); * followed by @qemu_co_rwlock_rdlock. This makes it more efficient, but * may also sometimes be necessary for correctness. */ -void qemu_co_rwlock_downgrade(CoRwlock *lock); +void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock); /** * Write Locks the mutex. If the lock cannot be taken immediately because * of a parallel reader, control is transferred to the caller of the current * coroutine. */ -void qemu_co_rwlock_wrlock(CoRwlock *lock); +void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock); /** * Unlocks the read/write lock and schedules the next coroutine that was * waiting for this lock to be run. */ -void qemu_co_rwlock_unlock(CoRwlock *lock); +void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock); typedef struct QemuCoSleep { Coroutine *to_wake; @@ -316,6 +342,19 @@ static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) qemu_co_sleep_ns_wakeable(&w, type, ns); } +typedef void CleanupFunc(void *opaque); +/** + * Run entry in a coroutine and start timer. Wait for entry to finish or for + * timer to elapse, what happen first. If entry finished, return 0, if timer + * elapsed earlier, return -ETIMEDOUT. + * + * Be careful, entry execution is not canceled, user should handle it somehow. + * If @clean is provided, it's called after coroutine finish if timeout + * happened. + */ +int coroutine_fn qemu_co_timeout(CoroutineEntry *entry, void *opaque, + uint64_t timeout_ns, CleanupFunc clean); + /** * Wake a coroutine if it is sleeping in qemu_co_sleep_ns. The timer will be * deleted. @sleep_state must be the variable whose address was given to @@ -331,6 +370,41 @@ void qemu_co_sleep_wake(QemuCoSleep *w); */ void coroutine_fn yield_until_fd_readable(int fd); +/** + * Increase coroutine pool size + */ +void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size); + +/** + * Decrease coroutine pool size + */ +void qemu_coroutine_dec_pool_size(unsigned int additional_pool_size); + #include "qemu/lockable.h" +/** + * Sends a (part of) iovec down a socket, yielding when the socket is full, or + * Receives data into a (part of) iovec from a socket, + * yielding when there is no data in the socket. + * The same interface as qemu_sendv_recvv(), with added yielding. + * XXX should mark these as coroutine_fn + */ +ssize_t coroutine_fn qemu_co_sendv_recvv(int sockfd, struct iovec *iov, + unsigned iov_cnt, size_t offset, + size_t bytes, bool do_send); +#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false) +#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true) + +/** + * The same as above, but with just a single buffer + */ +ssize_t coroutine_fn qemu_co_send_recv(int sockfd, void *buf, size_t bytes, + bool do_send); +#define qemu_co_recv(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, false) +#define qemu_co_send(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, true) + #endif /* QEMU_COROUTINE_H */ diff --git a/include/qemu/cpu-float.h b/include/qemu/cpu-float.h new file mode 100644 index 0000000000..a26b9faf68 --- /dev/null +++ b/include/qemu/cpu-float.h @@ -0,0 +1,64 @@ +#ifndef QEMU_CPU_FLOAT_H +#define QEMU_CPU_FLOAT_H + +#include "fpu/softfloat-types.h" + +/* Unions for reinterpreting between floats and integers. */ + +typedef union { + float32 f; + uint32_t l; +} CPU_FloatU; + +typedef union { + float64 d; +#if HOST_BIG_ENDIAN + struct { + uint32_t upper; + uint32_t lower; + } l; +#else + struct { + uint32_t lower; + uint32_t upper; + } l; +#endif + uint64_t ll; +} CPU_DoubleU; + +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + +typedef union { + float128 q; +#if HOST_BIG_ENDIAN + struct { + uint32_t upmost; + uint32_t upper; + uint32_t lower; + uint32_t lowest; + } l; + struct { + uint64_t upper; + uint64_t lower; + } ll; +#else + struct { + uint32_t lowest; + uint32_t lower; + uint32_t upper; + uint32_t upmost; + } l; + struct { + uint64_t lower; + uint64_t upper; + } ll; +#endif +} CPU_QuadU; + +#endif /* QEMU_CPU_FLOAT_H */ diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h index 09fc245b91..7adb12d320 100644 --- a/include/qemu/cpuid.h +++ b/include/qemu/cpuid.h @@ -45,12 +45,26 @@ #ifndef bit_AVX2 #define bit_AVX2 (1 << 5) #endif -#ifndef bit_AVX512F -#define bit_AVX512F (1 << 16) -#endif #ifndef bit_BMI2 #define bit_BMI2 (1 << 8) #endif +#ifndef bit_AVX512F +#define bit_AVX512F (1 << 16) +#endif +#ifndef bit_AVX512DQ +#define bit_AVX512DQ (1 << 17) +#endif +#ifndef bit_AVX512BW +#define bit_AVX512BW (1 << 30) +#endif +#ifndef bit_AVX512VL +#define bit_AVX512VL (1u << 31) +#endif + +/* Leaf 7, %ecx */ +#ifndef bit_AVX512VBMI2 +#define bit_AVX512VBMI2 (1 << 6) +#endif /* Leaf 0x80000001, %ecx */ #ifndef bit_LZCNT diff --git a/include/qemu/crc-ccitt.h b/include/qemu/crc-ccitt.h index 06ee55b159..d6eb49146d 100644 --- a/include/qemu/crc-ccitt.h +++ b/include/qemu/crc-ccitt.h @@ -11,8 +11,8 @@ * SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CRC_CCITT_H -#define _CRC_CCITT_H +#ifndef CRC_CCITT_H +#define CRC_CCITT_H extern uint16_t const crc_ccitt_table[256]; extern uint16_t const crc_ccitt_false_table[256]; @@ -30,4 +30,4 @@ static inline uint16_t crc_ccitt_false_byte(uint16_t crc, const uint8_t c) return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; } -#endif /* _CRC_CCITT_H */ +#endif /* CRC_CCITT_H */ diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h index 986ed8e15f..92c436d8c7 100644 --- a/include/qemu/cutils.h +++ b/include/qemu/cutils.h @@ -1,6 +1,24 @@ #ifndef QEMU_CUTILS_H #define QEMU_CUTILS_H +/* + * si_prefix: + * @exp10: exponent of 10, a multiple of 3 between -18 and 18 inclusive. + * + * Return a SI prefix (n, u, m, K, M, etc.) corresponding + * to the given exponent of 10. + */ +const char *si_prefix(unsigned int exp10); + +/* + * iec_binary_prefix: + * @exp2: exponent of 2, a multiple of 10 between 0 and 60 inclusive. + * + * Return an IEC binary prefix (Ki, Mi, etc.) corresponding + * to the given exponent of 2. + */ +const char *iec_binary_prefix(unsigned int exp2); + /** * pstrcpy: * @buf: buffer to copy string into @@ -129,9 +147,6 @@ static inline const char *qemu_strchrnul(const char *s, int c) const char *qemu_strchrnul(const char *s, int c); #endif time_t mktimegm(struct tm *tm); -int qemu_fdatasync(int fd); -int qemu_msync(void *addr, size_t length, int fd); -int fcntl_setfl(int fd, int flag); int qemu_parse_fd(const char *param); int qemu_strtoi(const char *nptr, const char **endptr, int base, int *result); @@ -196,17 +211,61 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n); */ int qemu_pstrcmp0(const char **str1, const char **str2); +/* Find program directory, and save it for later usage with + * qemu_get_exec_dir(). + * Try OS specific API first, if not working, parse from argv0. */ +void qemu_init_exec_dir(const char *argv0); + +/* Get the saved exec dir. */ +const char *qemu_get_exec_dir(void); /** * get_relocated_path: * @dir: the directory (typically a `CONFIG_*DIR` variable) to be relocated. * * Returns a path for @dir that uses the directory of the running executable - * as the prefix. For example, if `bindir` is `/usr/bin` and @dir is - * `/usr/share/qemu`, the function will append `../share/qemu` to the - * directory that contains the running executable and return the result. + * as the prefix. + * + * When a directory named `qemu-bundle` exists in the directory of the running + * executable, the path to the directory will be prepended to @dir. For + * example, if the directory of the running executable is `/qemu/build` @dir + * is `/usr/share/qemu`, the result will be + * `/qemu/build/qemu-bundle/usr/share/qemu`. The directory is expected to exist + * in the build tree. + * + * Otherwise, the directory of the running executable will be used as the + * prefix and it appends the relative path from `bindir` to @dir. For example, + * if the directory of the running executable is `/opt/qemu/bin`, `bindir` is + * `/usr/bin` and @dir is `/usr/share/qemu`, the result will be + * `/opt/qemu/bin/../share/qemu`. + * * The returned string should be freed by the caller. */ char *get_relocated_path(const char *dir); +static inline const char *yes_no(bool b) +{ + return b ? "yes" : "no"; +} + +/* + * helper to parse debug environment variables + */ +int parse_debug_env(const char *name, int max, int initial); + +/* + * Hexdump a line of a byte buffer into a hexadecimal/ASCII buffer + */ +#define QEMU_HEXDUMP_LINE_BYTES 16 /* Number of bytes to dump */ +#define QEMU_HEXDUMP_LINE_LEN 75 /* Number of characters in line */ +void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr, + unsigned int len, bool ascii); + +/* + * Hexdump a buffer to a file. An optional string prefix is added to every line + */ + +void qemu_hexdump(FILE *fp, const char *prefix, + const void *bufptr, size_t size); + #endif diff --git a/include/qemu/dbus.h b/include/qemu/dbus.h index 9d591f9ee4..08f00dfd53 100644 --- a/include/qemu/dbus.h +++ b/include/qemu/dbus.h @@ -12,6 +12,30 @@ #include +#include "qom/object.h" +#include "chardev/char.h" +#include "qemu/notify.h" +#include "qemu/typedefs.h" + +/* glib/gio 2.68 */ +#define DBUS_METHOD_INVOCATION_HANDLED TRUE +#define DBUS_METHOD_INVOCATION_UNHANDLED FALSE + +/* in msec */ +#define DBUS_DEFAULT_TIMEOUT 1000 + +#define DBUS_DISPLAY1_ROOT "/org/qemu/Display1" + +#define DBUS_DISPLAY_ERROR (dbus_display_error_quark()) +GQuark dbus_display_error_quark(void); + +typedef enum { + DBUS_DISPLAY_ERROR_FAILED, + DBUS_DISPLAY_ERROR_INVALID, + DBUS_DISPLAY_ERROR_UNSUPPORTED, + DBUS_DISPLAY_N_ERRORS, +} DBusDisplayError; + GStrv qemu_dbus_get_queued_owners(GDBusConnection *connection, const char *name, Error **errp); diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h index 9d197daca3..3ae2357fda 100644 --- a/include/qemu/error-report.h +++ b/include/qemu/error-report.h @@ -30,23 +30,21 @@ void loc_set_none(void); void loc_set_cmdline(char **argv, int idx, int cnt); void loc_set_file(const char *fname, int lno); -int error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); -int error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -int error_vprintf_unless_qmp(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); -int error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +int error_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +int error_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2); -void error_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); -void warn_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); -void info_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); +void error_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +void warn_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +void info_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); -void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -void warn_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -void info_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void error_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2); +void warn_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2); +void info_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2); bool error_report_once_cond(bool *printed, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); bool warn_report_once_cond(bool *printed, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); void error_init(const char *argv0); @@ -72,8 +70,6 @@ void error_init(const char *argv0); fmt, ##__VA_ARGS__); \ }) -const char *error_get_progname(void); - extern bool message_with_timestamp; extern bool error_with_guestname; extern const char *error_guest_name; diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h index b79add035d..8a4ff308e1 100644 --- a/include/qemu/event_notifier.h +++ b/include/qemu/event_notifier.h @@ -38,6 +38,7 @@ int event_notifier_test_and_clear(EventNotifier *); #ifdef CONFIG_POSIX void event_notifier_init_fd(EventNotifier *, int fd); int event_notifier_get_fd(const EventNotifier *); +int event_notifier_get_wfd(const EventNotifier *); #else HANDLE event_notifier_get_handle(EventNotifier *); #endif diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 5e71b6d6f7..af4e4ab746 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -76,20 +76,9 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size); * * Store result of merging @a and @b into @result. * @result is allowed to be equal to @a or @b. - * - * Return true if the merge was successful, - * false if it was not attempted. + * All bitmaps must have same size. */ -bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result); - -/** - * hbitmap_can_merge: - * - * hbitmap_can_merge(a, b) && hbitmap_can_merge(a, result) is sufficient and - * necessary for hbitmap_merge will not fail. - * - */ -bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b); +void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result); /** * hbitmap_empty: @@ -340,6 +329,18 @@ bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, int64_t max_dirty_count, int64_t *dirty_start, int64_t *dirty_count); +/* + * bdrv_dirty_bitmap_status: + * @hb: The HBitmap to operate on + * @start: The bit to start from + * @count: Number of bits to proceed + * @pnum: Out-parameter. How many bits has same value starting from @start + * + * Returns true if bitmap is dirty at @start, false otherwise. + */ +bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count, + int64_t *pnum); + /** * hbitmap_iter_next: * @hbi: HBitmapIter to operate on. diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h new file mode 100644 index 0000000000..4f265fed8d --- /dev/null +++ b/include/qemu/help-texts.h @@ -0,0 +1,13 @@ +#ifndef QEMU_HELP_TEXTS_H +#define QEMU_HELP_TEXTS_H + +/* Copyright string for -version arguments, About dialogs, etc */ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2022 " \ + "Fabrice Bellard and the QEMU Project developers" + +/* Bug reporting information for --help arguments, About dialogs, etc */ +#define QEMU_HELP_BOTTOM \ + "See for how to report bugs.\n" \ + "More information on the QEMU project at ." + +#endif diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 711b221704..b3434ec0bc 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -23,11 +23,16 @@ * THE SOFTWARE. */ +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + #ifndef HOST_UTILS_H #define HOST_UTILS_H #include "qemu/compiler.h" #include "qemu/bswap.h" +#include "qemu/int128.h" #ifdef CONFIG_INT128 static inline void mulu64(uint64_t *plow, uint64_t *phigh, @@ -52,43 +57,39 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) return (__int128_t)a * b / c; } -static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh, + uint64_t divisor) { - if (divisor == 0) { - return 1; - } else { - __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; - __uint128_t result = dividend / divisor; - *plow = result; - *phigh = dividend % divisor; - return result > UINT64_MAX; - } + __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; + __uint128_t result = dividend / divisor; + + *plow = result; + *phigh = result >> 64; + return dividend % divisor; } -static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +static inline int64_t divs128(uint64_t *plow, int64_t *phigh, + int64_t divisor) { - if (divisor == 0) { - return 1; - } else { - __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; - __int128_t result = dividend / divisor; - *plow = result; - *phigh = dividend % divisor; - return result != *plow; - } + __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; + __int128_t result = dividend / divisor; + + *plow = result; + *phigh = result >> 64; + return dividend % divisor; } #else void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b); void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b); -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor); static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { union { uint64_t ll; struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t high, low; #else uint32_t low, high; @@ -357,6 +358,14 @@ static inline uint64_t revbit64(uint64_t x) #endif } +/** + * Return the absolute value of a 64-bit integer as an unsigned 64-bit value + */ +static inline uint64_t uabs64(int64_t v) +{ + return v < 0 ? -v : v; +} + /** * sadd32_overflow - addition with overflow indication * @x, @y: addends @@ -367,12 +376,7 @@ static inline uint64_t revbit64(uint64_t x) */ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return ((*ret ^ x) & ~(x ^ y)) < 0; -#endif } /** @@ -385,12 +389,7 @@ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return ((*ret ^ x) & ~(x ^ y)) < 0; -#endif } /** @@ -403,12 +402,7 @@ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return *ret < x; -#endif } /** @@ -421,12 +415,7 @@ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return *ret < x; -#endif } /** @@ -440,12 +429,7 @@ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret) */ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return ((*ret ^ x) & (x ^ y)) < 0; -#endif } /** @@ -459,12 +443,7 @@ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return ((*ret ^ x) & (x ^ y)) < 0; -#endif } /** @@ -478,12 +457,7 @@ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return x < y; -#endif } /** @@ -497,12 +471,7 @@ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return x < y; -#endif } /** @@ -515,13 +484,7 @@ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret) */ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - int64_t z = (int64_t)x * y; - *ret = z; - return *ret != z; -#endif } /** @@ -534,14 +497,7 @@ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - uint64_t hi, lo; - muls64(&lo, &hi, x, y); - *ret = lo; - return hi != ((int64_t)lo >> 63); -#endif } /** @@ -554,13 +510,7 @@ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - uint64_t z = (uint64_t)x * y; - *ret = z; - return z > UINT32_MAX; -#endif } /** @@ -573,12 +523,41 @@ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); +} + +/* + * Unsigned 128x64 multiplication. + * Returns true if the result got truncated to 128 bits. + * Otherwise, returns false and the multiplication result via plow and phigh. + */ +static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor) +{ +#if defined(CONFIG_INT128) + bool res; + __uint128_t r; + __uint128_t f = ((__uint128_t)*phigh << 64) | *plow; + res = __builtin_mul_overflow(f, factor, &r); + + *plow = r; + *phigh = r >> 64; + + return res; #else - uint64_t hi; - mulu64(ret, &hi, x, y); - return hi != 0; + uint64_t dhi = *phigh; + uint64_t dlo = *plow; + uint64_t ahi; + uint64_t blo, bhi; + + if (dhi == 0) { + mulu64(plow, phigh, dlo, factor); + return false; + } + + mulu64(plow, &ahi, dlo, factor); + mulu64(&blo, &bhi, dhi, factor); + + return uadd64_overflow(ahi, blo, phigh) || bhi != 0; #endif } @@ -617,7 +596,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry) */ static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow) { -#if __has_builtin(__builtin_subcll) +#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN) unsigned long long b = *pborrow; x = __builtin_subcll(x, y, b, &b); *pborrow = b & 1; @@ -728,4 +707,83 @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); */ void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); +/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd + * (https://gmplib.org/repo/gmp/file/tip/longlong.h) + * + * Licensed under the GPLv2/LGPLv3 + */ +static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, + uint64_t n0, uint64_t d) +{ +#if defined(__x86_64__) + uint64_t q; + asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); + return q; +#elif defined(__s390x__) && !defined(__clang__) + /* Need to use a TImode type to get an even register pair for DLGR. */ + unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; + asm("dlgr %0, %1" : "+r"(n) : "r"(d)); + *r = n >> 64; + return n; +#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) + /* From Power ISA 2.06, programming note for divdeu. */ + uint64_t q1, q2, Q, r1, r2, R; + asm("divdeu %0,%2,%4; divdu %1,%3,%4" + : "=&r"(q1), "=r"(q2) + : "r"(n1), "r"(n0), "r"(d)); + r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ + r2 = n0 - (q2 * d); + Q = q1 + q2; + R = r1 + r2; + if (R >= d || R < r2) { /* overflow implies R > d */ + Q += 1; + R -= d; + } + *r = R; + return Q; +#else + uint64_t d0, d1, q0, q1, r1, r0, m; + + d0 = (uint32_t)d; + d1 = d >> 32; + + r1 = n1 % d1; + q1 = n1 / d1; + m = q1 * d0; + r1 = (r1 << 32) | (n0 >> 32); + if (r1 < m) { + q1 -= 1; + r1 += d; + if (r1 >= d) { + if (r1 < m) { + q1 -= 1; + r1 += d; + } + } + } + r1 -= m; + + r0 = r1 % d1; + q0 = r1 / d1; + m = q0 * d0; + r0 = (r0 << 32) | (uint32_t)n0; + if (r0 < m) { + q0 -= 1; + r0 += d; + if (r0 >= d) { + if (r0 < m) { + q0 -= 1; + r0 += d; + } + } + } + r0 -= m; + + *r = r0; + return (q1 << 32) | q0; +#endif +} + +Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor); +Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor); #endif diff --git a/include/qemu/hw-version.h b/include/qemu/hw-version.h new file mode 100644 index 0000000000..730a8c904d --- /dev/null +++ b/include/qemu/hw-version.h @@ -0,0 +1,27 @@ +/* + * QEMU "hardware version" machinery + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_HW_VERSION_H +#define QEMU_HW_VERSION_H + +/* + * Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default + * instead of QEMU_VERSION, so setting hw_version on MachineClass + * is no longer mandatory. + * + * Do NOT change this string, or it will break compatibility on all + * machine classes that don't set hw_version. + */ +#define QEMU_HW_VERSION "2.5+" + +/* QEMU "hardware version" setting. Used to replace code that exposed + * QEMU_VERSION to guests in the past and need to keep compatibility. + * Do not use qemu_hw_version() in new code. + */ +void qemu_set_hw_version(const char *); +const char *qemu_hw_version(void); + +#endif diff --git a/include/qemu/int128.h b/include/qemu/int128.h index 64500385e3..d2b76ca6ac 100644 --- a/include/qemu/int128.h +++ b/include/qemu/int128.h @@ -1,9 +1,9 @@ #ifndef INT128_H #define INT128_H -#ifdef CONFIG_INT128 #include "qemu/bswap.h" +#ifdef CONFIG_INT128 typedef __int128_t Int128; static inline Int128 int128_make64(uint64_t a) @@ -58,6 +58,11 @@ static inline Int128 int128_exts64(int64_t a) return a; } +static inline Int128 int128_not(Int128 a) +{ + return ~a; +} + static inline Int128 int128_and(Int128 a, Int128 b) { return a & b; @@ -68,11 +73,21 @@ static inline Int128 int128_or(Int128 a, Int128 b) return a | b; } +static inline Int128 int128_xor(Int128 a, Int128 b) +{ + return a ^ b; +} + static inline Int128 int128_rshift(Int128 a, int n) { return a >> n; } +static inline Int128 int128_urshift(Int128 a, int n) +{ + return (__uint128_t)a >> n; +} + static inline Int128 int128_lshift(Int128 a, int n) { return a << n; @@ -113,11 +128,21 @@ static inline bool int128_ge(Int128 a, Int128 b) return a >= b; } +static inline bool int128_uge(Int128 a, Int128 b) +{ + return ((__uint128_t)a) >= ((__uint128_t)b); +} + static inline bool int128_lt(Int128 a, Int128 b) { return a < b; } +static inline bool int128_ult(Int128 a, Int128 b) +{ + return (__uint128_t)a < (__uint128_t)b; +} + static inline bool int128_le(Int128 a, Int128 b) { return a <= b; @@ -155,31 +180,77 @@ static inline void int128_subfrom(Int128 *a, Int128 b) static inline Int128 bswap128(Int128 a) { +#if __has_builtin(__builtin_bswap128) + return __builtin_bswap128(a); +#else return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); +#endif +} + +static inline int clz128(Int128 a) +{ + if (a >> 64) { + return __builtin_clzll(a >> 64); + } else { + return (a) ? __builtin_clzll((uint64_t)a) + 64 : 128; + } +} + +static inline Int128 int128_divu(Int128 a, Int128 b) +{ + return (__uint128_t)a / (__uint128_t)b; +} + +static inline Int128 int128_remu(Int128 a, Int128 b) +{ + return (__uint128_t)a % (__uint128_t)b; +} + +static inline Int128 int128_divs(Int128 a, Int128 b) +{ + return a / b; +} + +static inline Int128 int128_rems(Int128 a, Int128 b) +{ + return a % b; } #else /* !CONFIG_INT128 */ typedef struct Int128 Int128; +/* + * We guarantee that the in-memory byte representation of an + * Int128 is that of a host-endian-order 128-bit integer + * (whether using this struct or the __int128_t version of the type). + * Some code using this type relies on this (eg when copying it into + * guest memory or a gdb protocol buffer, or by using Int128 in + * a union with other integer types). + */ struct Int128 { +#if HOST_BIG_ENDIAN + int64_t hi; + uint64_t lo; +#else uint64_t lo; int64_t hi; +#endif }; static inline Int128 int128_make64(uint64_t a) { - return (Int128) { a, 0 }; + return (Int128) { .lo = a, .hi = 0 }; } static inline Int128 int128_makes64(int64_t a) { - return (Int128) { a, a >> 63 }; + return (Int128) { .lo = a, .hi = a >> 63 }; } static inline Int128 int128_make128(uint64_t lo, uint64_t hi) { - return (Int128) { lo, hi }; + return (Int128) { .lo = lo, .hi = hi }; } static inline uint64_t int128_get64(Int128 a) @@ -210,22 +281,32 @@ static inline Int128 int128_one(void) static inline Int128 int128_2_64(void) { - return (Int128) { 0, 1 }; + return int128_make128(0, 1); } static inline Int128 int128_exts64(int64_t a) { - return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 }; + return int128_make128(a, (a < 0) ? -1 : 0); +} + +static inline Int128 int128_not(Int128 a) +{ + return int128_make128(~a.lo, ~a.hi); } static inline Int128 int128_and(Int128 a, Int128 b) { - return (Int128) { a.lo & b.lo, a.hi & b.hi }; + return int128_make128(a.lo & b.lo, a.hi & b.hi); } static inline Int128 int128_or(Int128 a, Int128 b) { - return (Int128) { a.lo | b.lo, a.hi | b.hi }; + return int128_make128(a.lo | b.lo, a.hi | b.hi); +} + +static inline Int128 int128_xor(Int128 a, Int128 b) +{ + return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi); } static inline Int128 int128_rshift(Int128 a, int n) @@ -242,6 +323,20 @@ static inline Int128 int128_rshift(Int128 a, int n) } } +static inline Int128 int128_urshift(Int128 a, int n) +{ + uint64_t h = a.hi; + if (!n) { + return a; + } + h = h >> (n & 63); + if (n >= 64) { + return int128_make64(h); + } else { + return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h); + } +} + static inline Int128 int128_lshift(Int128 a, int n) { uint64_t l = a.lo << (n & 63); @@ -297,11 +392,21 @@ static inline bool int128_ge(Int128 a, Int128 b) return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo); } +static inline bool int128_uge(Int128 a, Int128 b) +{ + return (uint64_t)a.hi > (uint64_t)b.hi || (a.hi == b.hi && a.lo >= b.lo); +} + static inline bool int128_lt(Int128 a, Int128 b) { return !int128_ge(a, b); } +static inline bool int128_ult(Int128 a, Int128 b) +{ + return !int128_uge(a, b); +} + static inline bool int128_le(Int128 a, Int128 b) { return int128_ge(b, a); @@ -337,5 +442,34 @@ static inline void int128_subfrom(Int128 *a, Int128 b) *a = int128_sub(*a, b); } +static inline Int128 bswap128(Int128 a) +{ + return int128_make128(bswap64(a.hi), bswap64(a.lo)); +} + +static inline int clz128(Int128 a) +{ + if (a.hi) { + return __builtin_clzll(a.hi); + } else { + return (a.lo) ? __builtin_clzll(a.lo) + 64 : 128; + } +} + +Int128 int128_divu(Int128, Int128); +Int128 int128_remu(Int128, Int128); +Int128 int128_divs(Int128, Int128); +Int128 int128_rems(Int128, Int128); + #endif /* CONFIG_INT128 */ + +static inline void bswap128s(Int128 *s) +{ + *s = bswap128(*s); +} + +#define UINT128_MAX int128_make128(~0LL, ~0LL) +#define INT128_MAX int128_make128(UINT64_MAX, INT64_MAX) +#define INT128_MIN int128_make128(0, INT64_MIN) + #endif /* INT128_H */ diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h index b66cf93c4b..8528e5c98f 100644 --- a/include/qemu/iova-tree.h +++ b/include/qemu/iova-tree.h @@ -29,6 +29,7 @@ #define IOVA_OK (0) #define IOVA_ERR_INVALID (-1) /* Invalid parameters */ #define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */ +#define IOVA_ERR_NOMEM (-3) /* Cannot allocate */ typedef struct IOVATree IOVATree; typedef struct DMAMap { @@ -59,7 +60,7 @@ IOVATree *iova_tree_new(void); * * Return: 0 if succeeded, or <0 if error. */ -int iova_tree_insert(IOVATree *tree, DMAMap *map); +int iova_tree_insert(IOVATree *tree, const DMAMap *map); /** * iova_tree_remove: @@ -71,10 +72,8 @@ int iova_tree_insert(IOVATree *tree, DMAMap *map); * provided. The range does not need to be exactly what has inserted, * all the mappings that are included in the provided range will be * removed from the tree. Here map->translated_addr is meaningless. - * - * Return: 0 if succeeded, or <0 if error. */ -int iova_tree_remove(IOVATree *tree, DMAMap *map); +void iova_tree_remove(IOVATree *tree, DMAMap map); /** * iova_tree_find: @@ -82,7 +81,7 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map); * @tree: the iova tree to search from * @map: the mapping to search * - * Search for a mapping in the iova tree that overlaps with the + * Search for a mapping in the iova tree that iova overlaps with the * mapping range specified. Only the first found mapping will be * returned. * @@ -92,7 +91,25 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map); * user is responsible to make sure the pointer is valid (say, no * concurrent deletion in progress). */ -DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map); +const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map); + +/** + * iova_tree_find_iova: + * + * @tree: the iova tree to search from + * @map: the mapping to search + * + * Search for a mapping in the iova tree that translated_addr overlaps with the + * mapping range specified. Only the first found mapping will be + * returned. + * + * Return: DMAMap pointer if found, or NULL if not found. Note that + * the returned DMAMap pointer is maintained internally. User should + * only read the content but never modify or free the content. Also, + * user is responsible to make sure the pointer is valid (say, no + * concurrent deletion in progress). + */ +const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map); /** * iova_tree_find_address: @@ -105,7 +122,7 @@ DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map); * * Return: same as iova_tree_find(). */ -DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova); +const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova); /** * iova_tree_foreach: @@ -119,6 +136,23 @@ DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova); */ void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator); +/** + * iova_tree_alloc_map: + * + * @tree: the iova tree to allocate from + * @map: the new map (as translated addr & size) to allocate in the iova region + * @iova_begin: the minimum address of the allocation + * @iova_end: the maximum addressable direction of the allocation + * + * Allocates a new region of a given size, between iova_min and iova_max. + * + * Return: Same as iova_tree_insert, but cannot overlap and can return error if + * iova tree is out of free contiguous range. The caller gets the assigned iova + * in map->iova. + */ +int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin, + hwaddr iova_end); + /** * iova_tree_destroy: * diff --git a/include/qemu/job.h b/include/qemu/job.h index 41162ed494..e502787dd8 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -40,27 +40,62 @@ typedef struct JobTxn JobTxn; * Long-running operation. */ typedef struct Job { + + /* Fields set at initialization (job_create), and never modified */ + /** The ID of the job. May be NULL for internal jobs. */ char *id; - /** The type of this job. */ + /** + * The type of this job. + * All callbacks are called with job_mutex *not* held. + */ const JobDriver *driver; + /** + * The coroutine that executes the job. If not NULL, it is reentered when + * busy is false and the job is cancelled. + * Initialized in job_start() + */ + Coroutine *co; + + /** True if this job should automatically finalize itself */ + bool auto_finalize; + + /** True if this job should automatically dismiss itself */ + bool auto_dismiss; + + /** + * The completion function that will be called when the job completes. + * Called with AioContext lock held, since many callback implementations + * use bdrv_* functions that require to hold the lock. + */ + BlockCompletionFunc *cb; + + /** The opaque value that is passed to the completion function. */ + void *opaque; + + /* ProgressMeter API is thread-safe */ + ProgressMeter progress; + + /** + * AioContext to run the job coroutine in. + * The job Aiocontext can be read when holding *either* + * the BQL (so we are in the main loop) or the job_mutex. + * It can only be written when we hold *both* BQL + * and the job_mutex. + */ + AioContext *aio_context; + + + /** Protected by job_mutex */ + /** Reference count of the block job */ int refcnt; /** Current state; See @JobStatus for details. */ JobStatus status; - /** AioContext to run the job coroutine in */ - AioContext *aio_context; - - /** - * The coroutine that executes the job. If not NULL, it is reentered when - * busy is false and the job is cancelled. - */ - Coroutine *co; - /** * Timer that is used by @job_sleep_ns. Accessed under job_mutex (in * job.c). @@ -76,7 +111,7 @@ typedef struct Job { /** * Set to false by the job while the coroutine has yielded and may be * re-entered by job_enter(). There may still be I/O or event loop activity - * pending. Accessed under block_job_mutex (in blockjob.c). + * pending. Accessed under job_mutex. * * When the job is deferred to the main loop, busy is true as long as the * bottom half is still pending. @@ -112,14 +147,6 @@ typedef struct Job { /** Set to true when the job has deferred work to the main loop. */ bool deferred_to_main_loop; - /** True if this job should automatically finalize itself */ - bool auto_finalize; - - /** True if this job should automatically dismiss itself */ - bool auto_dismiss; - - ProgressMeter progress; - /** * Return code from @run and/or @prepare callback(s). * Not final until the job has reached the CONCLUDED status. @@ -134,12 +161,6 @@ typedef struct Job { */ Error *err; - /** The completion function that will be called when the job completes. */ - BlockCompletionFunc *cb; - - /** The opaque value that is passed to the completion function. */ - void *opaque; - /** Notifiers called when a cancelled job is finalised */ NotifierList on_finalize_cancelled; @@ -167,8 +188,15 @@ typedef struct Job { /** * Callbacks and other information about a Job driver. + * All callbacks are invoked with job_mutex *not* held. */ struct JobDriver { + + /* + * These fields are initialized when this object is created, + * and are never changed afterwards + */ + /** Derived Job struct size */ size_t instance_size; @@ -184,9 +212,18 @@ struct JobDriver { * aborted. If it returns zero, the job moves into the WAITING state. If it * is the last job to complete in its transaction, all jobs in the * transaction move from WAITING to PENDING. + * + * This callback must be run in the job's context. */ int coroutine_fn (*run)(Job *job, Error **errp); + /* + * Functions run without regard to the BQL that may run in any + * arbitrary thread. These functions do not need to be thread-safe + * because the caller ensures that they are invoked from one + * thread at time. + */ + /** * If the callback is not NULL, it will be invoked when the job transitions * into the paused state. Paused jobs must not perform any asynchronous @@ -201,6 +238,13 @@ struct JobDriver { */ void coroutine_fn (*resume)(Job *job); + /* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + /** * Called when the job is resumed by the user (i.e. user_paused becomes * false). .user_resume is called before .resume. @@ -220,6 +264,9 @@ struct JobDriver { * * This callback will not be invoked if the job has already failed. * If it fails, abort and then clean will be called. + * + * Called with AioContext lock held, since many callbacs implementations + * use bdrv_* functions that require to hold the lock. */ int (*prepare)(Job *job); @@ -230,6 +277,9 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. + * + * Called with AioContext lock held, since many callback implementations + * use bdrv_* functions that require to hold the lock. */ void (*commit)(Job *job); @@ -240,6 +290,9 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. + * + * Called with AioContext lock held, since many callback implementations + * use bdrv_* functions that require to hold the lock. */ void (*abort)(Job *job); @@ -248,16 +301,35 @@ struct JobDriver { * .commit() or .abort(). Regardless of which callback is invoked after * completion, .clean() will always be called, even if the job does not * belong to a transaction group. + * + * Called with AioContext lock held, since many callbacs implementations + * use bdrv_* functions that require to hold the lock. */ void (*clean)(Job *job); /** * If the callback is not NULL, it will be invoked in job_cancel_async + * + * This function must return true if the job will be cancelled + * immediately without any further I/O (mandatory if @force is + * true), and false otherwise. This lets the generic job layer + * know whether a job has been truly (force-)cancelled, or whether + * it is just in a special completion mode (like mirror after + * READY). + * (If the callback is NULL, the job is assumed to terminate + * without I/O.) + * + * Called with AioContext lock held, since many callback implementations + * use bdrv_* functions that require to hold the lock. */ - void (*cancel)(Job *job, bool force); + bool (*cancel)(Job *job, bool force); - /** Called when the job is freed */ + /** + * Called when the job is freed. + * Called with AioContext lock held, since many callback implementations + * use bdrv_* functions that require to hold the lock. + */ void (*free)(Job *job); }; @@ -272,6 +344,30 @@ typedef enum JobCreateFlags { JOB_MANUAL_DISMISS = 0x04, } JobCreateFlags; +extern QemuMutex job_mutex; + +#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex) + +#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex) + +/** + * job_lock: + * + * Take the mutex protecting the list of jobs and their status. + * Most functions called by the monitor need to call job_lock + * and job_unlock manually. On the other hand, function called + * by the block jobs themselves and by the block layer will take the + * lock for you. + */ +void job_lock(void); + +/** + * job_unlock: + * + * Release the mutex protecting the list of jobs and their status. + */ +void job_unlock(void); + /** * Allocate and return a new job transaction. Jobs can be added to the * transaction using job_txn_add_job(). @@ -288,23 +384,20 @@ JobTxn *job_txn_new(void); /** * Release a reference that was previously acquired with job_txn_add_job or * job_txn_new. If it's the last reference to the object, it will be freed. + * + * Called with job lock *not* held. */ void job_txn_unref(JobTxn *txn); -/** - * @txn: The transaction (may be NULL) - * @job: Job to add to the transaction - * - * Add @job to the transaction. The @job must not already be in a transaction. - * The caller must call either job_txn_unref() or job_completed() to release - * the reference that is automatically grabbed here. - * - * If @txn is NULL, the function does nothing. +/* + * Same as job_txn_unref(), but called with job lock held. + * Might release the lock temporarily. */ -void job_txn_add_job(JobTxn *txn, Job *job); +void job_txn_unref_locked(JobTxn *txn); /** * Create a new long-running job and return it. + * Called with job_mutex *not* held. * * @job_id: The id of the newly-created job, or %NULL for internal jobs * @driver: The class object for the newly-created job. @@ -322,20 +415,27 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, /** * Add a reference to Job refcnt, it will be decreased with job_unref, and then * be freed if it comes to be the last reference. + * + * Called with job lock held. */ -void job_ref(Job *job); +void job_ref_locked(Job *job); /** - * Release a reference that was previously acquired with job_ref() or + * Release a reference that was previously acquired with job_ref_locked() or * job_create(). If it's the last reference to the object, it will be freed. + * + * Takes AioContext lock internally to invoke a job->driver callback. + * Called with job lock held. */ -void job_unref(Job *job); +void job_unref_locked(Job *job); /** * @job: The job that has made progress * @done: How much progress the job made since the last call * * Updates the progress counter of the job. + * + * May be called with mutex held or not held. */ void job_progress_update(Job *job, uint64_t done); @@ -346,6 +446,8 @@ void job_progress_update(Job *job, uint64_t done); * * Sets the expected end value of the progress counter of a job so that a * completion percentage can be calculated when the progress is updated. + * + * May be called with mutex held or not held. */ void job_progress_set_remaining(Job *job, uint64_t remaining); @@ -361,27 +463,27 @@ void job_progress_set_remaining(Job *job, uint64_t remaining); * length before, and job_progress_update() afterwards. * (So the operation acts as a parenthesis in regards to the main job * operation running in background.) + * + * May be called with mutex held or not held. */ void job_progress_increase_remaining(Job *job, uint64_t delta); -/** To be called when a cancelled job is finalised. */ -void job_event_cancelled(Job *job); - -/** To be called when a successfully completed job is finalised. */ -void job_event_completed(Job *job); - /** * Conditionally enter the job coroutine if the job is ready to run, not * already busy and fn() returns true. fn() is called while under the job_lock * critical section. + * + * Called with job lock held, but might release it temporarily. */ -void job_enter_cond(Job *job, bool(*fn)(Job *job)); +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); /** * @job: A job that has not yet been started. * * Begins execution of a job. * Takes ownership of one reference to the job object. + * + * Called with job_mutex *not* held. */ void job_start(Job *job); @@ -389,6 +491,7 @@ void job_start(Job *job); * @job: The job to enter. * * Continue the specified job by entering the coroutine. + * Called with job_mutex *not* held. */ void job_enter(Job *job); @@ -397,6 +500,8 @@ void job_enter(Job *job); * * Pause now if job_pause() has been called. Jobs that perform lots of I/O * must call this between requests so that the job can be paused. + * + * Called with job_mutex *not* held. */ void coroutine_fn job_pause_point(Job *job); @@ -404,8 +509,9 @@ void coroutine_fn job_pause_point(Job *job); * @job: The job that calls the function. * * Yield the job coroutine. + * Called with job_mutex *not* held. */ -void job_yield(Job *job); +void coroutine_fn job_yield(Job *job); /** * @job: The job that calls the function. @@ -414,10 +520,11 @@ void job_yield(Job *job); * Put the job to sleep (assuming that it wasn't canceled) for @ns * %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately * interrupt the wait. + * + * Called with job_mutex *not* held. */ void coroutine_fn job_sleep_ns(Job *job, int64_t ns); - /** Returns the JobType of a given Job. */ JobType job_type(const Job *job); @@ -427,102 +534,165 @@ const char *job_type_str(const Job *job); /** Returns true if the job should not be visible to the management layer. */ bool job_is_internal(Job *job); -/** Returns whether the job is scheduled for cancellation. */ +/** + * Returns whether the job is being cancelled. + * Called with job_mutex *not* held. + */ bool job_is_cancelled(Job *job); -/** Returns whether the job is in a completed state. */ -bool job_is_completed(Job *job); +/* Same as job_is_cancelled(), but called with job lock held. */ +bool job_is_cancelled_locked(Job *job); -/** Returns whether the job is ready to be completed. */ +/** + * Returns whether the job is scheduled for cancellation (at an + * indefinite point). + * Called with job_mutex *not* held. + */ +bool job_cancel_requested(Job *job); + +/** + * Returns whether the job is in a completed state. + * Called with job lock held. + */ +bool job_is_completed_locked(Job *job); + +/** + * Returns whether the job is ready to be completed. + * Called with job_mutex *not* held. + */ bool job_is_ready(Job *job); +/* Same as job_is_ready(), but called with job lock held. */ +bool job_is_ready_locked(Job *job); + /** * Request @job to pause at the next pause point. Must be paired with * job_resume(). If the job is supposed to be resumed by user action, call - * job_user_pause() instead. + * job_user_pause_locked() instead. + * + * Called with job lock *not* held. */ void job_pause(Job *job); -/** Resumes a @job paused with job_pause. */ +/* Same as job_pause(), but called with job lock held. */ +void job_pause_locked(Job *job); + +/** Resumes a @job paused with job_pause. Called with job lock *not* held. */ void job_resume(Job *job); +/* + * Same as job_resume(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_resume_locked(Job *job); + /** * Asynchronously pause the specified @job. * Do not allow a resume until a matching call to job_user_resume. + * Called with job lock held. */ -void job_user_pause(Job *job, Error **errp); +void job_user_pause_locked(Job *job, Error **errp); -/** Returns true if the job is user-paused. */ -bool job_user_paused(Job *job); +/** + * Returns true if the job is user-paused. + * Called with job lock held. + */ +bool job_user_paused_locked(Job *job); /** * Resume the specified @job. - * Must be paired with a preceding job_user_pause. + * Must be paired with a preceding job_user_pause_locked. + * Called with job lock held, but might release it temporarily. */ -void job_user_resume(Job *job, Error **errp); +void job_user_resume_locked(Job *job, Error **errp); /** * Get the next element from the list of block jobs after @job, or the * first one if @job is %NULL. * * Returns the requested job, or %NULL if there are no more jobs left. + * Called with job lock *not* held. */ Job *job_next(Job *job); +/* Same as job_next(), but called with job lock held. */ +Job *job_next_locked(Job *job); + /** * Get the job identified by @id (which must not be %NULL). * * Returns the requested job, or %NULL if it doesn't exist. + * Called with job lock held. */ -Job *job_get(const char *id); +Job *job_get_locked(const char *id); /** * Check whether the verb @verb can be applied to @job in its current state. * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM * returned. + * + * Called with job lock held. */ -int job_apply_verb(Job *job, JobVerb verb, Error **errp); +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); -/** The @job could not be started, free it. */ +/** + * The @job could not be started, free it. + * Called with job_mutex *not* held. + */ void job_early_fail(Job *job); -/** Moves the @job from RUNNING to READY */ +/** + * Moves the @job from RUNNING to READY. + * Called with job_mutex *not* held. + */ void job_transition_to_ready(Job *job); -/** Asynchronously complete the specified @job. */ -void job_complete(Job *job, Error **errp); +/** + * Asynchronously complete the specified @job. + * Called with job lock held, but might release it temporarily. + */ +void job_complete_locked(Job *job, Error **errp); /** * Asynchronously cancel the specified @job. If @force is true, the job should * be cancelled immediately without waiting for a consistent state. + * Called with job lock held. */ -void job_cancel(Job *job, bool force); +void job_cancel_locked(Job *job, bool force); /** - * Cancels the specified job like job_cancel(), but may refuse to do so if the - * operation isn't meaningful in the current state of the job. + * Cancels the specified job like job_cancel_locked(), but may refuse + * to do so if the operation isn't meaningful in the current state of the job. + * Called with job lock held. */ -void job_user_cancel(Job *job, bool force, Error **errp); +void job_user_cancel_locked(Job *job, bool force, Error **errp); /** * Synchronously cancel the @job. The completion callback is called - * before the function returns. The job may actually complete - * instead of canceling itself; the circumstances under which this - * happens depend on the kind of job that is active. + * before the function returns. If @force is false, the job may + * actually complete instead of canceling itself; the circumstances + * under which this happens depend on the kind of job that is active. * * Returns the return value from the job if the job actually completed * during the call, or -ECANCELED if it was canceled. * - * Callers must hold the AioContext lock of job->aio_context. + * Called with job_lock *not* held. */ -int job_cancel_sync(Job *job); +int job_cancel_sync(Job *job, bool force); -/** Synchronously cancels all jobs using job_cancel_sync(). */ +/* Same as job_cancel_sync, but called with job lock held. */ +int job_cancel_sync_locked(Job *job, bool force); + +/** + * Synchronously force-cancels all jobs using job_cancel_sync_locked(). + * + * Called with job_lock *not* held. + */ void job_cancel_sync_all(void); /** * @job: The job to be completed. - * @errp: Error object which may be set by job_complete(); this is not + * @errp: Error object which may be set by job_complete_locked(); this is not * necessarily set on every error, the job return value has to be * checked as well. * @@ -531,10 +701,9 @@ void job_cancel_sync_all(void); * function). * * Returns the return value from the job. - * - * Callers must hold the AioContext lock of job->aio_context. + * Called with job_lock held. */ -int job_complete_sync(Job *job, Error **errp); +int job_complete_sync_locked(Job *job, Error **errp); /** * For a @job that has finished its work and is pending awaiting explicit @@ -543,14 +712,18 @@ int job_complete_sync(Job *job, Error **errp); * FIXME: Make the below statement universally true: * For jobs that support the manual workflow mode, all graph changes that occur * as a result will occur after this command and before a successful reply. + * + * Called with job lock held. */ -void job_finalize(Job *job, Error **errp); +void job_finalize_locked(Job *job, Error **errp); /** * Remove the concluded @job from the query list and resets the passed pointer * to %NULL. Returns an error if the job is not actually concluded. + * + * Called with job lock held. */ -void job_dismiss(Job **job, Error **errp); +void job_dismiss_locked(Job **job, Error **errp); /** * Synchronously finishes the given @job. If @finish is given, it is called to @@ -559,8 +732,20 @@ void job_dismiss(Job **job, Error **errp); * Returns 0 if the job is successfully completed, -ECANCELED if the job was * cancelled before completing, and -errno in other error cases. * - * Callers must hold the AioContext lock of job->aio_context. + * Called with job_lock held, but might release it temporarily. */ -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp); +int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), + Error **errp); + +/** + * Sets the @job->aio_context. + * Called with job_mutex *not* held. + * + * This function must run in the main thread to protect against + * concurrent read in job_finish_sync_locked(), takes the job_mutex + * lock to protect against the read in job_do_yield_locked(), and must + * be called when the job is quiescent. + */ +void job_set_aio_context(Job *job, AioContext *ctx); #endif diff --git a/include/qemu/keyval.h b/include/qemu/keyval.h new file mode 100644 index 0000000000..1187b68303 --- /dev/null +++ b/include/qemu/keyval.h @@ -0,0 +1,15 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef KEYVAL_H +#define KEYVAL_H + +QDict *keyval_parse_into(QDict *qdict, const char *params, const char *implied_key, + bool *p_help, Error **errp); +QDict *keyval_parse(const char *params, const char *implied_key, + bool *help, Error **errp); +void keyval_merge(QDict *old, const QDict *new, Error **errp); + +#endif /* KEYVAL_H */ diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h index 2f0a5b080e..d47c9cd446 100644 --- a/include/qemu/log-for-trace.h +++ b/include/qemu/log-for-trace.h @@ -30,6 +30,6 @@ static inline bool qemu_loglevel_mask(int mask) } /* main logging function */ -int GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...); +void G_GNUC_PRINTF(1, 2) qemu_log(const char *fmt, ...); #endif diff --git a/include/qemu/log.h b/include/qemu/log.h index 9b80660207..c5643d8dd5 100644 --- a/include/qemu/log.h +++ b/include/qemu/log.h @@ -3,46 +3,16 @@ /* A small part of this API is split into its own header */ #include "qemu/log-for-trace.h" -#include "qemu/rcu.h" - -typedef struct QemuLogFile { - struct rcu_head rcu; - FILE *fd; -} QemuLogFile; - -/* Private global variable, don't use */ -extern QemuLogFile *qemu_logfile; - /* * The new API: - * */ -/* Log settings checking macros: */ +/* Returns true if qemu_log() will really write somewhere. */ +bool qemu_log_enabled(void); -/* Returns true if qemu_log() will really write somewhere - */ -static inline bool qemu_log_enabled(void) -{ - return qemu_logfile != NULL; -} - -/* Returns true if qemu_log() will write somewhere else than stderr - */ -static inline bool qemu_log_separate(void) -{ - QemuLogFile *logfile; - bool res = false; - - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile && logfile->fd != stderr) { - res = true; - } - rcu_read_unlock(); - return res; -} +/* Returns true if qemu_log() will write somewhere other than stderr. */ +bool qemu_log_separate(void); #define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_IN_ASM (1 << 1) @@ -64,51 +34,15 @@ static inline bool qemu_log_separate(void) #define CPU_LOG_PLUGIN (1 << 18) /* LOG_STRACE is used for user-mode strace logging. */ #define LOG_STRACE (1 << 19) +#define LOG_PER_THREAD (1 << 20) -/* Lock output for a series of related logs. Since this is not needed - * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we - * assume that qemu_loglevel_mask has already been tested, and that - * qemu_loglevel is never set when qemu_logfile is unset. - */ +/* Lock/unlock output. */ -static inline FILE *qemu_log_lock(void) -{ - QemuLogFile *logfile; - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - qemu_flockfile(logfile->fd); - return logfile->fd; - } else { - return NULL; - } -} - -static inline void qemu_log_unlock(FILE *fd) -{ - if (fd) { - qemu_funlockfile(fd); - } - rcu_read_unlock(); -} +FILE *qemu_log_trylock(void) G_GNUC_WARN_UNUSED_RESULT; +void qemu_log_unlock(FILE *fd); /* Logging functions: */ -/* vfprintf-like logging function - */ -static inline void GCC_FMT_ATTR(1, 0) -qemu_log_vprintf(const char *fmt, va_list va) -{ - QemuLogFile *logfile; - - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - vfprintf(logfile->fd, fmt, va); - } - rcu_read_unlock(); -} - /* log only if a bit is set on the current loglevel mask: * @mask: bit to check in the mask * @fmt: printf-style format string @@ -147,9 +81,9 @@ typedef struct QEMULogItem { extern const QEMULogItem qemu_log_items[]; -void qemu_set_log(int log_flags); -void qemu_log_needs_buffers(void); -void qemu_set_log_filename(const char *filename, Error **errp); +bool qemu_set_log(int log_flags, Error **errp); +bool qemu_set_log_filename(const char *filename, Error **errp); +bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp); void qemu_set_dfilter_ranges(const char *ranges, Error **errp); bool qemu_log_in_addr_range(uint64_t addr); int qemu_str_to_log_mask(const char *str); @@ -159,9 +93,4 @@ int qemu_str_to_log_mask(const char *str); */ void qemu_print_log_usage(FILE *f); -/* fflush() the log file */ -void qemu_log_flush(void); -/* Close the log file */ -void qemu_log_close(void); - #endif diff --git a/include/qemu/madvise.h b/include/qemu/madvise.h new file mode 100644 index 0000000000..e155f59a0d --- /dev/null +++ b/include/qemu/madvise.h @@ -0,0 +1,95 @@ +/* + * QEMU madvise wrapper functions + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MADVISE_H +#define QEMU_MADVISE_H + +#define QEMU_MADV_INVALID -1 + +#if defined(CONFIG_MADVISE) + +#define QEMU_MADV_WILLNEED MADV_WILLNEED +#define QEMU_MADV_DONTNEED MADV_DONTNEED +#ifdef MADV_DONTFORK +#define QEMU_MADV_DONTFORK MADV_DONTFORK +#else +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#endif +#ifdef MADV_MERGEABLE +#define QEMU_MADV_MERGEABLE MADV_MERGEABLE +#else +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_UNMERGEABLE +#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE +#else +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_DODUMP +#define QEMU_MADV_DODUMP MADV_DODUMP +#else +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_DONTDUMP +#define QEMU_MADV_DONTDUMP MADV_DONTDUMP +#else +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_HUGEPAGE +#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE +#else +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#endif +#ifdef MADV_NOHUGEPAGE +#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE +#else +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#endif +#ifdef MADV_REMOVE +#define QEMU_MADV_REMOVE MADV_REMOVE +#else +#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED +#endif +#ifdef MADV_POPULATE_WRITE +#define QEMU_MADV_POPULATE_WRITE MADV_POPULATE_WRITE +#else +#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID +#endif + +#elif defined(CONFIG_POSIX_MADVISE) + +#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED +#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED +#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID + +#else /* no-op */ + +#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_REMOVE QEMU_MADV_INVALID +#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID + +#endif + +int qemu_madvise(void *addr, size_t len, int advice); + +#endif diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 6846f80787..19f40a7222 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -26,9 +26,19 @@ #define QEMU_MAIN_LOOP_H #include "block/aio.h" +#include "qom/object.h" +#include "sysemu/event-loop-base.h" #define SIG_IPI SIGUSR1 +#define TYPE_MAIN_LOOP "main-loop" +OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP) + +struct MainLoop { + EventLoopBase parent_obj; +}; +typedef struct MainLoop MainLoop; + /** * qemu_init_main_loop: Set up the process so that it can run the main loop. * @@ -147,6 +157,8 @@ typedef void WaitObjectFunc(void *opaque); * in the main loop's calls to WaitForMultipleObjects. When the handle * is in a signaled state, QEMU will call @func. * + * If the same HANDLE is added twice, this function returns -1. + * * @handle: The Windows handle to be observed. * @func: A function to be called when @handle is in a signaled state. * @opaque: A pointer-size value that is passed to @func. @@ -242,9 +254,63 @@ AioContext *iohandler_get_aio_context(void); * must always be taken outside other locks. This function helps * functions take different paths depending on whether the current * thread is running within the main loop mutex. + * + * This function should never be used in the block layer, because + * unit tests, block layer tools and qemu-storage-daemon do not + * have a BQL. + * Please instead refer to qemu_in_main_thread(). */ bool qemu_mutex_iothread_locked(void); +/** + * qemu_in_main_thread: return whether it's possible to safely access + * the global state of the block layer. + * + * Global state of the block layer is not accessible from I/O threads + * or worker threads; only from threads that "own" the default + * AioContext that qemu_get_aio_context() returns. For tests, block + * layer tools and qemu-storage-daemon there is a designated thread that + * runs the event loop for qemu_get_aio_context(), and that is the + * main thread. + * + * For emulators, however, any thread that holds the BQL can act + * as the block layer main thread; this will be any of the actual + * main thread, the vCPU threads or the RCU thread. + * + * For clarity, do not use this function outside the block layer. + */ +bool qemu_in_main_thread(void); + +/* + * Mark and check that the function is part of the Global State API. + * Please refer to include/block/block-global-state.h for more + * information about GS API. + */ +#define GLOBAL_STATE_CODE() \ + do { \ + assert(qemu_in_main_thread()); \ + } while (0) + +/* + * Mark and check that the function is part of the I/O API. + * Please refer to include/block/block-io.h for more + * information about IO API. + */ +#define IO_CODE() \ + do { \ + /* nop */ \ + } while (0) + +/* + * Mark and check that the function is part of the "I/O OR GS" API. + * Please refer to include/block/block-io.h for more + * information about "IO or GS" API. + */ +#define IO_OR_GS_CODE() \ + do { \ + /* nop */ \ + } while (0) + /** * qemu_mutex_lock_iothread: Lock the main loop mutex. * diff --git a/include/qemu/memalign.h b/include/qemu/memalign.h new file mode 100644 index 0000000000..fa299f3bf6 --- /dev/null +++ b/include/qemu/memalign.h @@ -0,0 +1,61 @@ +/* + * Allocation and free functions for aligned memory + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MEMALIGN_H +#define QEMU_MEMALIGN_H + +/** + * qemu_try_memalign: Allocate aligned memory + * @alignment: required alignment, in bytes + * @size: size of allocation, in bytes + * + * Allocate memory on an aligned boundary (i.e. the returned + * address will be an exact multiple of @alignment). + * @alignment must be a power of 2, or the function will assert(). + * On success, returns allocated memory; on failure, returns NULL. + * + * The memory allocated through this function must be freed via + * qemu_vfree() (and not via free()). + */ +void *qemu_try_memalign(size_t alignment, size_t size); +/** + * qemu_memalign: Allocate aligned memory, without failing + * @alignment: required alignment, in bytes + * @size: size of allocation, in bytes + * + * Allocate memory in the same way as qemu_try_memalign(), but + * abort() with an error message if the memory allocation fails. + * + * The memory allocated through this function must be freed via + * qemu_vfree() (and not via free()). + */ +void *qemu_memalign(size_t alignment, size_t size); +/** + * qemu_vfree: Free memory allocated through qemu_memalign + * @ptr: memory to free + * + * This function must be used to free memory allocated via qemu_memalign() + * or qemu_try_memalign(). (Using the wrong free function will cause + * subtle bugs on Windows hosts.) + */ +void qemu_vfree(void *ptr); +/* + * It's an analog of GLIB's g_autoptr_cleanup_generic_gfree(), used to define + * g_autofree macro. + */ +static inline void qemu_cleanup_generic_vfree(void *p) +{ + void **pp = (void **)p; + qemu_vfree(*pp); +} + +/* + * Analog of g_autofree, but qemu_vfree is called on cleanup instead of g_free. + */ +#define QEMU_AUTO_VFREE __attribute__((cleanup(qemu_cleanup_generic_vfree))) + +#endif diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h index 90d0eee705..2825e231a7 100644 --- a/include/qemu/mmap-alloc.h +++ b/include/qemu/mmap-alloc.h @@ -4,8 +4,6 @@ size_t qemu_fd_getpagesize(int fd); -size_t qemu_mempath_getpagesize(const char *mem_path); - /** * qemu_ram_mmap: mmap anonymous memory, the specified file or device. * @@ -35,4 +33,27 @@ void *qemu_ram_mmap(int fd, void qemu_ram_munmap(int fd, void *ptr, size_t size); +/* + * Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example, + * consumed by qemu_ram_mmap(). + */ + +/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */ +#define QEMU_MAP_READONLY (1 << 0) + +/* Use MAP_SHARED instead of MAP_PRIVATE. */ +#define QEMU_MAP_SHARED (1 << 1) + +/* + * Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without + * QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC. + */ +#define QEMU_MAP_SYNC (1 << 2) + +/* + * Use MAP_NORESERVE to skip reservation of swap space (or huge pages if + * applicable). Bail out if not supported/effective. + */ +#define QEMU_MAP_NORESERVE (1 << 3) + #endif diff --git a/include/qemu/module.h b/include/qemu/module.h index 3deac0078b..c37ce74b16 100644 --- a/include/qemu/module.h +++ b/include/qemu/module.h @@ -61,30 +61,57 @@ typedef enum { #define fuzz_target_init(function) module_init(function, \ MODULE_INIT_FUZZ_TARGET) #define migration_init(function) module_init(function, MODULE_INIT_MIGRATION) -#define block_module_load_one(lib) module_load_one("block-", lib, false) -#define ui_module_load_one(lib) module_load_one("ui-", lib, false) -#define audio_module_load_one(lib) module_load_one("audio-", lib, false) +#define block_module_load(lib, errp) module_load("block-", lib, errp) +#define ui_module_load(lib, errp) module_load("ui-", lib, errp) +#define audio_module_load(lib, errp) module_load("audio-", lib, errp) void register_module_init(void (*fn)(void), module_init_type type); void register_dso_module_init(void (*fn)(void), module_init_type type); void module_call_init(module_init_type type); -bool module_load_one(const char *prefix, const char *lib_name, bool mayfail); -void module_load_qom_one(const char *type); + +/* + * module_load: attempt to load a module from a set of directories + * + * directories searched are: + * - getenv("QEMU_MODULE_DIR") + * - get_relocated_path(CONFIG_QEMU_MODDIR); + * - /var/run/qemu/${version_dir} + * + * prefix: a subsystem prefix, or the empty string ("audio-", ..., "") + * name: name of the module + * errp: error to set in case the module is found, but load failed. + * + * Return value: -1 on error (errp set if not NULL). + * 0 if module or one of its dependencies are not installed, + * 1 if the module is found and loaded, + * 2 if the module is already loaded, or module is built-in. + */ +int module_load(const char *prefix, const char *name, Error **errp); + +/* + * module_load_qom: attempt to load a module to provide a QOM type + * + * type: the type to be provided + * errp: error to set. + * + * Return value: as per module_load. + */ +int module_load_qom(const char *type, Error **errp); void module_load_qom_all(void); void module_allow_arch(const char *arch); /** * DOC: module info annotation macros * - * `scripts/modinfo-collect.py` will collect module info, + * ``scripts/modinfo-collect.py`` will collect module info, * using the preprocessor and -DQEMU_MODINFO. * - * `scripts/modinfo-generate.py` will create a module meta-data database + * ``scripts/modinfo-generate.py`` will create a module meta-data database * from the collected information so qemu knows about module * dependencies and QOM objects implemented by modules. * - * See `*.modinfo` and `modinfo.c` in the build directory to check the + * See ``*.modinfo`` and ``modinfo.c`` in the build directory to check the * script results. */ #ifdef QEMU_MODINFO @@ -135,6 +162,16 @@ void module_allow_arch(const char *arch); */ #define module_opts(name) modinfo(opts, name) +/** + * module_kconfig + * + * @name: Kconfig requirement necessary to load the module + * + * This module requires a core module that should be implemented and + * enabled in Kconfig. + */ +#define module_kconfig(name) modinfo(kconfig, name) + /* * module info database * diff --git a/include/qemu/mprotect.h b/include/qemu/mprotect.h new file mode 100644 index 0000000000..1e83d1433e --- /dev/null +++ b/include/qemu/mprotect.h @@ -0,0 +1,14 @@ +/* + * QEMU mprotect functions + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_MPROTECT_H +#define QEMU_MPROTECT_H + +int qemu_mprotect_rw(void *addr, size_t size); +int qemu_mprotect_rwx(void *addr, size_t size); +int qemu_mprotect_none(void *addr, size_t size); + +#endif diff --git a/include/qemu/option.h b/include/qemu/option.h index 306bf07575..b349828782 100644 --- a/include/qemu/option.h +++ b/include/qemu/option.h @@ -144,10 +144,6 @@ void qemu_opts_print_help(QemuOptsList *list, bool print_caption); void qemu_opts_free(QemuOptsList *list); QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list); -QDict *keyval_parse_into(QDict *qdict, const char *params, const char *implied_key, - bool *p_help, Error **errp); -QDict *keyval_parse(const char *params, const char *implied_key, - bool *help, Error **errp); -void keyval_merge(QDict *old, const QDict *new, Error **errp); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuOpts, qemu_opts_del) #endif diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 54762dd6ef..fd712ee883 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -34,6 +34,18 @@ #include "exec/poison.h" #endif +/* + * HOST_WORDS_BIGENDIAN was replaced with HOST_BIG_ENDIAN. Prevent it from + * creeping back in. + */ +#pragma GCC poison HOST_WORDS_BIGENDIAN + +/* + * TARGET_WORDS_BIGENDIAN was replaced with TARGET_BIG_ENDIAN. Prevent it from + * creeping back in. + */ +#pragma GCC poison TARGET_WORDS_BIGENDIAN + #include "qemu/compiler.h" /* Older versions of C++ don't get definitions of various macros from @@ -63,7 +75,7 @@ QEMU_EXTERN_C int daemon(int, int); #ifdef _WIN32 /* as defined in sdkddkver.h */ #ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0600 /* Vista */ +#define _WIN32_WINNT 0x0601 /* Windows 7 API (should be in sync with glib) */ #endif /* reduces the number of implicitly included headers */ #ifndef WIN32_LEAN_AND_MEAN @@ -157,6 +169,31 @@ extern "C" { #define assert(x) g_assert(x) #endif +/** + * qemu_build_not_reached() + * + * The compiler, during optimization, is expected to prove that a call + * to this function cannot be reached and remove it. If the compiler + * supports QEMU_ERROR, this will be reported at compile time; otherwise + * this will be reported at link time due to the missing symbol. + */ +G_NORETURN extern +void QEMU_ERROR("code path is reachable") + qemu_build_not_reached_always(void); +#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__) && !defined(XEMU_DEBUG_BUILD) +#define qemu_build_not_reached() qemu_build_not_reached_always() +#else +#define qemu_build_not_reached() g_assert_not_reached() +#endif + +/** + * qemu_build_assert() + * + * The compiler, during optimization, is expected to prove that the + * assertion is true. + */ +#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached() + /* * According to waitpid man page: * WCOREDUMP @@ -214,6 +251,8 @@ extern "C" { #define ESHUTDOWN 4099 #endif +#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) + /* time_t may be either 32 or 64 bits depending on the host OS, and * can be either signed or unsigned, so we can't just hardcode a * specific maximum value. This is not a C preprocessor constant, @@ -244,15 +283,6 @@ extern "C" { #define TIME_MAX TYPE_MAXIMUM(time_t) #endif -/* HOST_LONG_BITS is the size of a native pointer in bits. */ -#if UINTPTR_MAX == UINT32_MAX -# define HOST_LONG_BITS 32 -#elif UINTPTR_MAX == UINT64_MAX -# define HOST_LONG_BITS 64 -#else -# error Unknown pointer size -#endif - /* Mac OSX has a bug that incorrectly defines SIZE_MAX with * the wrong type. Our replacement isn't usable in preprocessor * expressions, but it is sufficient for our needs. */ @@ -379,127 +409,10 @@ extern "C" { #endif int qemu_daemon(int nochdir, int noclose); -void *qemu_try_memalign(size_t alignment, size_t size); -void *qemu_memalign(size_t alignment, size_t size); void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared, bool noreserve); -void qemu_vfree(void *ptr); void qemu_anon_ram_free(void *ptr, size_t size); -/* - * It's an analog of GLIB's g_autoptr_cleanup_generic_gfree(), used to define - * g_autofree macro. - */ -static inline void qemu_cleanup_generic_vfree(void *p) -{ - void **pp = (void **)p; - qemu_vfree(*pp); -} - -/* - * Analog of g_autofree, but qemu_vfree is called on cleanup instead of g_free. - */ -#define QEMU_AUTO_VFREE __attribute__((cleanup(qemu_cleanup_generic_vfree))) - -/* - * Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example, - * consumed by qemu_ram_mmap(). - */ - -/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */ -#define QEMU_MAP_READONLY (1 << 0) - -/* Use MAP_SHARED instead of MAP_PRIVATE. */ -#define QEMU_MAP_SHARED (1 << 1) - -/* - * Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without - * QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC. - */ -#define QEMU_MAP_SYNC (1 << 2) - -/* - * Use MAP_NORESERVE to skip reservation of swap space (or huge pages if - * applicable). Bail out if not supported/effective. - */ -#define QEMU_MAP_NORESERVE (1 << 3) - - -#define QEMU_MADV_INVALID -1 - -#if defined(CONFIG_MADVISE) - -#define QEMU_MADV_WILLNEED MADV_WILLNEED -#define QEMU_MADV_DONTNEED MADV_DONTNEED -#ifdef MADV_DONTFORK -#define QEMU_MADV_DONTFORK MADV_DONTFORK -#else -#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID -#endif -#ifdef MADV_MERGEABLE -#define QEMU_MADV_MERGEABLE MADV_MERGEABLE -#else -#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID -#endif -#ifdef MADV_UNMERGEABLE -#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE -#else -#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID -#endif -#ifdef MADV_DODUMP -#define QEMU_MADV_DODUMP MADV_DODUMP -#else -#define QEMU_MADV_DODUMP QEMU_MADV_INVALID -#endif -#ifdef MADV_DONTDUMP -#define QEMU_MADV_DONTDUMP MADV_DONTDUMP -#else -#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID -#endif -#ifdef MADV_HUGEPAGE -#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE -#else -#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID -#endif -#ifdef MADV_NOHUGEPAGE -#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE -#else -#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID -#endif -#ifdef MADV_REMOVE -#define QEMU_MADV_REMOVE MADV_REMOVE -#else -#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED -#endif - -#elif defined(CONFIG_POSIX_MADVISE) - -#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED -#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED -#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID -#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID -#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID -#define QEMU_MADV_DODUMP QEMU_MADV_INVALID -#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID -#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID -#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID -#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED - -#else /* no-op */ - -#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID -#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID -#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID -#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID -#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID -#define QEMU_MADV_DODUMP QEMU_MADV_INVALID -#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID -#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID -#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID -#define QEMU_MADV_REMOVE QEMU_MADV_INVALID - -#endif - #ifdef _WIN32 #define HAVE_CHARDEV_SERIAL 1 #elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ @@ -517,6 +430,14 @@ static inline void qemu_cleanup_generic_vfree(void *p) #define SIGIO SIGPOLL #endif +#ifdef HAVE_MADVISE_WITHOUT_PROTOTYPE +/* + * See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for discussion + * about Solaris missing the madvise() prototype. + */ +extern int madvise(char *, size_t, int); +#endif + #if defined(CONFIG_LINUX) #ifndef BUS_MCEERR_AR #define BUS_MCEERR_AR 4 @@ -537,9 +458,9 @@ static inline void qemu_cleanup_generic_vfree(void *p) /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ # define QEMU_VMALLOC_ALIGN (256 * 4096) #elif defined(__linux__) && defined(__sparc__) -# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size, SHMLBA) +# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA) #else -# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size +# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size() #endif #ifdef CONFIG_POSIX @@ -570,11 +491,6 @@ void sigaction_invoke(struct sigaction *action, struct qemu_signalfd_siginfo *info); #endif -int qemu_madvise(void *addr, size_t len, int advice); -int qemu_mprotect_rw(void *addr, size_t size); -int qemu_mprotect_rwx(void *addr, size_t size); -int qemu_mprotect_none(void *addr, size_t size); - /* * Don't introduce new usage of this function, prefer the following * qemu_open/qemu_create that take an "Error **errp" @@ -654,45 +570,18 @@ static inline void qemu_timersub(const struct timeval *val1, #define qemu_timersub timersub #endif +ssize_t qemu_write_full(int fd, const void *buf, size_t count) + G_GNUC_WARN_UNUSED_RESULT; + void qemu_set_cloexec(int fd); -/* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default - * instead of QEMU_VERSION, so setting hw_version on MachineClass - * is no longer mandatory. - * - * Do NOT change this string, or it will break compatibility on all - * machine classes that don't set hw_version. - */ -#define QEMU_HW_VERSION "2.5+" - -/* QEMU "hardware version" setting. Used to replace code that exposed - * QEMU_VERSION to guests in the past and need to keep compatibility. - * Do not use qemu_hw_version() in new code. - */ -void qemu_set_hw_version(const char *); -const char *qemu_hw_version(void); - -void fips_set_state(bool requested); -bool fips_get_state(void); - -/* Return a dynamically allocated pathname denoting a file or directory that is - * appropriate for storing local state. - * - * @relative_pathname need not start with a directory separator; one will be - * added automatically. +/* Return a dynamically allocated directory path that is appropriate for storing + * local state. * * The caller is responsible for releasing the value returned with g_free() * after use. */ -char *qemu_get_local_state_pathname(const char *relative_pathname); - -/* Find program directory, and save it for later usage with - * qemu_get_exec_dir(). - * Try OS specific API first, if not working, parse from argv0. */ -void qemu_init_exec_dir(const char *argv0); - -/* Get the saved exec dir. */ -const char *qemu_get_exec_dir(void); +char *qemu_get_local_state_dir(void); /** * qemu_getauxval: @@ -705,8 +594,23 @@ unsigned long qemu_getauxval(unsigned long type); void qemu_set_tty_echo(int fd, bool echo); -void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus, - Error **errp); +typedef struct ThreadContext ThreadContext; + +/** + * qemu_prealloc_mem: + * @fd: the fd mapped into the area, -1 for anonymous memory + * @area: start address of the are to preallocate + * @sz: the size of the area to preallocate + * @max_threads: maximum number of threads to use + * @errp: returns an error if this function fails + * + * Preallocate memory (populate/prefault page tables writable) for the virtual + * memory area starting at @area with the size of @sz. After a successful call, + * each page in the area was faulted in writable at least once, for example, + * after allocating file blocks for mapped files. + */ +void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, Error **errp); /** * qemu_get_pid_name: @@ -735,13 +639,15 @@ pid_t qemu_fork(Error **errp); /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even * when intptr_t is 32-bit and we are aligning a long long. */ -extern uintptr_t qemu_real_host_page_size; -extern intptr_t qemu_real_host_page_mask; +static inline uintptr_t qemu_real_host_page_size(void) +{ + return getpagesize(); +} -extern int qemu_icache_linesize; -extern int qemu_icache_linesize_log; -extern int qemu_dcache_linesize; -extern int qemu_dcache_linesize_log; +static inline intptr_t qemu_real_host_page_mask(void) +{ + return -(intptr_t)qemu_real_host_page_size(); +} /* * After using getopt or getopt_long, if you need to parse another set @@ -758,15 +664,20 @@ static inline void qemu_reset_optind(void) #endif } +int qemu_fdatasync(int fd); + /** - * qemu_get_host_name: - * @errp: Error object + * Sync changes made to the memory mapped file back to the backing + * storage. For POSIX compliant systems this will fallback + * to regular msync call. Otherwise it will trigger whole file sync + * (including the metadata case there is no support to skip that otherwise) * - * Operating system agnostic way of querying host name. - * - * Returns allocated hostname (caller should free), NULL on failure. + * @addr - start of the memory area to be synced + * @length - length of the are to be synced + * @fd - file descriptor for the file to be synced + * (mandatory only for POSIX non-compliant systems) */ -char *qemu_get_host_name(Error **errp); +int qemu_msync(void *addr, size_t length, int fd); /** * qemu_get_host_physmem: @@ -785,19 +696,15 @@ size_t qemu_get_host_physmem(void); * for the current thread. */ #if defined(MAC_OS_VERSION_11_0) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0 + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 static inline void qemu_thread_jit_execute(void) { - if (__builtin_available(macOS 11.0, *)) { - pthread_jit_write_protect_np(true); - } + pthread_jit_write_protect_np(true); } static inline void qemu_thread_jit_write(void) { - if (__builtin_available(macOS 11.0, *)) { - pthread_jit_write_protect_np(false); - } + pthread_jit_write_protect_np(false); } #else static inline void qemu_thread_jit_write(void) {} diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h index 0f59226727..8ad13c110c 100644 --- a/include/qemu/plugin-memory.h +++ b/include/qemu/plugin-memory.h @@ -37,4 +37,4 @@ struct qemu_plugin_hwaddr { bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, bool is_store, struct qemu_plugin_hwaddr *data); -#endif /* _PLUGIN_MEMORY_H_ */ +#endif /* PLUGIN_MEMORY_H */ diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index 9a8438f683..a772e14193 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -12,6 +12,7 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "qemu/option.h" +#include "exec/memopidx.h" /* * Events that plugins can subscribe to. @@ -36,6 +37,25 @@ enum qemu_plugin_event { struct qemu_plugin_desc; typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList; +/* + * Construct a qemu_plugin_meminfo_t. + */ +static inline qemu_plugin_meminfo_t +make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw) +{ + return oi | (rw << 16); +} + +/* + * Extract the memory operation direction from a qemu_plugin_meminfo_t. + * Other portions may be extracted via get_memop and get_mmuidx. + */ +static inline enum qemu_plugin_mem_rw +get_plugin_meminfo_rw(qemu_plugin_meminfo_t i) +{ + return i >> 16; +} + #ifdef CONFIG_PLUGIN extern QemuOptsList qemu_plugin_opts; @@ -143,10 +163,12 @@ struct qemu_plugin_tb { /** * qemu_plugin_tb_insn_get(): get next plugin record for translation. - * + * @tb: the internal tb context + * @pc: address of instruction */ static inline -struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) +struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb, + uint64_t pc) { struct qemu_plugin_insn *insn; int i, j; @@ -159,6 +181,7 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) g_byte_array_set_size(insn->data, 0); insn->calls_helpers = false; insn->mem_helper = false; + insn->vaddr = pc; for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { @@ -180,7 +203,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a6, uint64_t a7, uint64_t a8); void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret); -void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo); +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw); void qemu_plugin_flush_cb(void); @@ -200,6 +224,23 @@ void qemu_plugin_disable_mem_helpers(CPUState *cpu); */ void qemu_plugin_user_exit(void); +/** + * qemu_plugin_user_prefork_lock(): take plugin lock before forking + * + * This is a user-mode only helper to take the internal plugin lock + * before a fork event. This is ensure a consistent lock state + */ +void qemu_plugin_user_prefork_lock(void); + +/** + * qemu_plugin_user_postfork(): reset the plugin lock + * @is_child: is this thread the child + * + * This user-mode only helper resets the lock state after a fork so we + * can continue using the plugin interface. + */ +void qemu_plugin_user_postfork(bool is_child); + #else /* !CONFIG_PLUGIN */ static inline void qemu_plugin_add_opts(void) @@ -244,7 +285,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) { } static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, - uint32_t meminfo) + MemOpIdx oi, + enum qemu_plugin_mem_rw rw) { } static inline void qemu_plugin_flush_cb(void) @@ -262,6 +304,13 @@ static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu) static inline void qemu_plugin_user_exit(void) { } + +static inline void qemu_plugin_user_prefork_lock(void) +{ } + +static inline void qemu_plugin_user_postfork(bool is_child) +{ } + #endif /* !CONFIG_PLUGIN */ #endif /* QEMU_PLUGIN_H */ diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index e6e815abc5..d0e9d03adf 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -7,8 +7,9 @@ * * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef QEMU_PLUGIN_API_H -#define QEMU_PLUGIN_API_H + +#ifndef QEMU_QEMU_PLUGIN_H +#define QEMU_QEMU_PLUGIN_H #include #include @@ -577,4 +578,51 @@ int qemu_plugin_n_max_vcpus(void); */ void qemu_plugin_outs(const char *string); -#endif /* QEMU_PLUGIN_API_H */ +/** + * qemu_plugin_bool_parse() - parses a boolean argument in the form of + * "=[on|yes|true|off|no|false]" + * + * @name: argument name, the part before the equals sign + * @val: argument value, what's after the equals sign + * @ret: output return value + * + * returns true if the combination @name=@val parses correctly to a boolean + * argument, and false otherwise + */ +bool qemu_plugin_bool_parse(const char *name, const char *val, bool *ret); + +/** + * qemu_plugin_path_to_binary() - path to binary file being executed + * + * Return a string representing the path to the binary. For user-mode + * this is the main executable. For system emulation we currently + * return NULL. The user should g_free() the string once no longer + * needed. + */ +const char *qemu_plugin_path_to_binary(void); + +/** + * qemu_plugin_start_code() - returns start of text segment + * + * Returns the nominal start address of the main text segment in + * user-mode. Currently returns 0 for system emulation. + */ +uint64_t qemu_plugin_start_code(void); + +/** + * qemu_plugin_end_code() - returns end of text segment + * + * Returns the nominal end address of the main text segment in + * user-mode. Currently returns 0 for system emulation. + */ +uint64_t qemu_plugin_end_code(void); + +/** + * qemu_plugin_entry_code() - returns start address for module + * + * Returns the nominal entry address of the main text segment in + * user-mode. Currently returns 0 for system emulation. + */ +uint64_t qemu_plugin_entry_code(void); + +#endif /* QEMU_QEMU_PLUGIN_H */ diff --git a/include/qemu/qemu-print.h b/include/qemu/qemu-print.h index 40b596262f..1b70920648 100644 --- a/include/qemu/qemu-print.h +++ b/include/qemu/qemu-print.h @@ -13,11 +13,11 @@ #ifndef QEMU_PRINT_H #define QEMU_PRINT_H -int qemu_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); -int qemu_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +int qemu_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +int qemu_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2); int qemu_vfprintf(FILE *stream, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); -int qemu_fprintf(FILE *stream, const char *fmt, ...) GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 0); +int qemu_fprintf(FILE *stream, const char *fmt, ...) G_GNUC_PRINTF(2, 3); #endif diff --git a/include/qemu/qemu-progress.h b/include/qemu/qemu-progress.h new file mode 100644 index 0000000000..137e1c316f --- /dev/null +++ b/include/qemu/qemu-progress.h @@ -0,0 +1,8 @@ +#ifndef QEMU_PROGRESS_H +#define QEMU_PROGRESS_H + +void qemu_progress_init(int enabled, float min_skip); +void qemu_progress_end(void); +void qemu_progress_print(float delta, int max); + +#endif /* QEMU_PROGRESS_H */ diff --git a/include/qemu/range.h b/include/qemu/range.h index f62b363e0d..7e2b1cc447 100644 --- a/include/qemu/range.h +++ b/include/qemu/range.h @@ -114,8 +114,8 @@ static inline uint64_t range_upb(Range *range) * @size may be 0. If the range would overflow, returns -ERANGE, otherwise * 0. */ -static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob, - uint64_t size) +G_GNUC_WARN_UNUSED_RESULT +static inline int range_init(Range *range, uint64_t lob, uint64_t size) { if (lob + size < lob) { return -ERANGE; diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 515d327cf1..b063c6fde8 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -27,7 +27,9 @@ #include "qemu/thread.h" #include "qemu/queue.h" #include "qemu/atomic.h" +#include "qemu/notify.h" #include "qemu/sys_membarrier.h" +#include "qemu/coroutine-tls.h" #ifdef __cplusplus extern "C" { @@ -66,13 +68,20 @@ struct rcu_reader_data { /* Data used for registry, protected by rcu_registry_lock */ QLIST_ENTRY(rcu_reader_data) node; + + /* + * NotifierList used to force an RCU grace period. Accessed under + * rcu_registry_lock. Note that the notifier is called _outside_ + * the thread! + */ + NotifierList force_rcu; }; -extern __thread struct rcu_reader_data rcu_reader; +QEMU_DECLARE_CO_TLS(struct rcu_reader_data, rcu_reader) static inline void rcu_read_lock(void) { - struct rcu_reader_data *p_rcu_reader = &rcu_reader; + struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader(); unsigned ctr; if (p_rcu_reader->depth++ > 0) { @@ -88,7 +97,7 @@ static inline void rcu_read_lock(void) static inline void rcu_read_unlock(void) { - struct rcu_reader_data *p_rcu_reader = &rcu_reader; + struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader(); assert(p_rcu_reader->depth != 0); if (--p_rcu_reader->depth > 0) { @@ -180,6 +189,13 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock) #define RCU_READ_LOCK_GUARD() \ g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock() +/* + * Force-RCU notifiers tell readers that they should exit their + * read-side critical section. + */ +void rcu_add_force_rcu_notifier(Notifier *n); +void rcu_remove_force_rcu_notifier(Notifier *n); + #ifdef __cplusplus } #endif diff --git a/include/qemu/readline.h b/include/qemu/readline.h index e81258322b..622aa4564f 100644 --- a/include/qemu/readline.h +++ b/include/qemu/readline.h @@ -5,7 +5,7 @@ #define READLINE_MAX_CMDS 64 #define READLINE_MAX_COMPLETIONS 256 -typedef void GCC_FMT_ATTR(2, 3) ReadLinePrintfFunc(void *opaque, +typedef void G_GNUC_PRINTF(2, 3) ReadLinePrintfFunc(void *opaque, const char *fmt, ...); typedef void ReadLineFlushFunc(void *opaque); typedef void ReadLineFunc(void *opaque, const char *str, diff --git a/include/qemu/selfmap.h b/include/qemu/selfmap.h index 80cf920fba..3479a2a618 100644 --- a/include/qemu/selfmap.h +++ b/include/qemu/selfmap.h @@ -41,4 +41,4 @@ GSList *read_self_maps(void); */ void free_self_maps(GSList *info); -#endif /* _SELFMAP_H_ */ +#endif /* SELFMAP_H */ diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index 0c34bf2398..2b0698a7c9 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -14,12 +14,43 @@ int inet_aton(const char *cp, struct in_addr *ia); /* misc helpers */ bool fd_is_socket(int fd); int qemu_socket(int domain, int type, int protocol); + +#ifndef WIN32 +/** + * qemu_socketpair: + * @domain: specifies a communication domain, such as PF_UNIX + * @type: specifies the socket type. + * @protocol: specifies a particular protocol to be used with the socket + * @sv: an array to store the pair of socket created + * + * Creates an unnamed pair of connected sockets in the specified domain, + * of the specified type, and using the optionally specified protocol. + * And automatically set the close-on-exec flags on the returned sockets + * + * Return 0 on success. + */ +int qemu_socketpair(int domain, int type, int protocol, int sv[2]); +#endif + int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +/* + * A variant of send(2) which handles partial send. + * + * Return the number of bytes transferred over the socket. + * Set errno if fewer than `count' bytes are sent. + * + * This function don't work with non-blocking socket's. + * Any of the possibilities with non-blocking socket's is bad: + * - return a short write (then name is wrong) + * - busy wait adding (errno == EAGAIN) to the loop + */ +ssize_t qemu_send_full(int s, const void *buf, size_t count) + G_GNUC_WARN_UNUSED_RESULT; int socket_set_cork(int fd, int v); int socket_set_nodelay(int fd); -void qemu_set_block(int fd); -int qemu_try_set_nonblock(int fd); -void qemu_set_nonblock(int fd); +void qemu_socket_set_block(int fd); +int qemu_socket_try_set_nonblock(int fd); +void qemu_socket_set_nonblock(int fd); int socket_set_fast_reuse(int fd); #ifdef WIN32 @@ -40,6 +71,7 @@ NetworkAddressFamily inet_netfamily(int family); int unix_listen(const char *path, Error **errp); int unix_connect(const char *path, Error **errp); +char *socket_uri(SocketAddress *addr); SocketAddress *socket_parse(const char *str, Error **errp); int socket_connect(SocketAddress *addr, Error **errp); int socket_listen(SocketAddress *addr, int num, Error **errp); @@ -47,6 +79,8 @@ void socket_listen_cleanup(int fd, Error **errp); int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp); /* Old, ipv4 only bits. Don't use for new code. */ +int convert_host_port(struct sockaddr_in *saddr, const char *host, + const char *port, Error **errp); int parse_host_port(struct sockaddr_in *saddr, const char *str, Error **errp); int socket_init(void); @@ -121,5 +155,4 @@ SocketAddress *socket_address_flatten(SocketAddressLegacy *addr); * Return 0 on success. */ int socket_address_parse_named_fd(SocketAddress *addr, Error **errp); - #endif /* QEMU_SOCKETS_H */ diff --git a/include/qemu/thread-context.h b/include/qemu/thread-context.h new file mode 100644 index 0000000000..2ebd6b7fe1 --- /dev/null +++ b/include/qemu/thread-context.h @@ -0,0 +1,57 @@ +/* + * QEMU Thread Context + * + * Copyright Red Hat Inc., 2022 + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SYSEMU_THREAD_CONTEXT_H +#define SYSEMU_THREAD_CONTEXT_H + +#include "qapi/qapi-types-machine.h" +#include "qemu/thread.h" +#include "qom/object.h" + +#define TYPE_THREAD_CONTEXT "thread-context" +OBJECT_DECLARE_TYPE(ThreadContext, ThreadContextClass, + THREAD_CONTEXT) + +struct ThreadContextClass { + ObjectClass parent_class; +}; + +struct ThreadContext { + /* private */ + Object parent; + + /* private */ + unsigned int thread_id; + QemuThread thread; + + /* Semaphore to wait for context thread action. */ + QemuSemaphore sem; + /* Semaphore to wait for action in context thread. */ + QemuSemaphore sem_thread; + /* Mutex to synchronize requests. */ + QemuMutex mutex; + + /* Commands for the thread to execute. */ + int thread_cmd; + void *thread_cmd_data; + + /* CPU affinity bitmap used for initialization. */ + unsigned long *init_cpu_bitmap; + int init_cpu_nbits; +}; + +void thread_context_create_thread(ThreadContext *tc, QemuThread *thread, + const char *name, + void *(*start_routine)(void *), void *arg, + int mode); + +#endif /* SYSEMU_THREAD_CONTEXT_H */ diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h index b792e6ef37..5f2f3d1386 100644 --- a/include/qemu/thread-posix.h +++ b/include/qemu/thread-posix.h @@ -27,14 +27,9 @@ struct QemuCond { }; struct QemuSemaphore { -#ifndef CONFIG_SEM_TIMEDWAIT - pthread_mutex_t lock; - pthread_cond_t cond; + QemuMutex mutex; + QemuCond cond; unsigned int count; -#else - sem_t sem; -#endif - bool initialized; }; struct QemuEvent { diff --git a/include/qemu/thread.h b/include/qemu/thread.h index 460568d67d..7c6703bce3 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -185,10 +185,14 @@ void qemu_event_destroy(QemuEvent *ev); void qemu_thread_create(QemuThread *thread, const char *name, void *(*start_routine)(void *), void *arg, int mode); +int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus, + unsigned long nbits); +int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus, + unsigned long *nbits); void *qemu_thread_join(QemuThread *thread); void qemu_thread_get_self(QemuThread *thread); bool qemu_thread_is_self(QemuThread *thread); -void qemu_thread_exit(void *retval) QEMU_NORETURN; +G_NORETURN void qemu_thread_exit(void *retval); void qemu_thread_naming(bool enable); struct Notifier; @@ -227,7 +231,7 @@ struct QemuSpin { static inline void qemu_spin_init(QemuSpin *spin) { - __sync_lock_release(&spin->value); + qatomic_set(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_create(spin, __tsan_mutex_not_static); #endif @@ -246,7 +250,7 @@ static inline void qemu_spin_lock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, 0); #endif - while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { + while (unlikely(qatomic_xchg(&spin->value, 1))) { while (qatomic_read(&spin->value)) { cpu_relax(); } @@ -261,7 +265,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock); #endif - bool busy = __sync_lock_test_and_set(&spin->value, true); + bool busy = qatomic_xchg(&spin->value, true); #ifdef CONFIG_TSAN unsigned flags = __tsan_mutex_try_lock; flags |= busy ? __tsan_mutex_try_lock_failed : 0; @@ -280,7 +284,7 @@ static inline void qemu_spin_unlock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_unlock(spin, 0); #endif - __sync_lock_release(&spin->value); + qatomic_store_release(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_post_unlock(spin, 0); #endif diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 063ffefe19..046a9701a2 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -520,7 +520,7 @@ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, int scale, int attributes, QEMUTimerCB *cb, void *opaque) { - QEMUTimer *ts = (QEMUTimer *)g_malloc0(sizeof(QEMUTimer)); + QEMUTimer *ts = (QEMUTimer *)g_new0(QEMUTimer, 1); timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque); return ts; } diff --git a/include/qemu/transactions.h b/include/qemu/transactions.h index 92c5965235..2f2060acd9 100644 --- a/include/qemu/transactions.h +++ b/include/qemu/transactions.h @@ -31,6 +31,9 @@ * tran_create(), call your "prepare" functions on it, and finally call * tran_abort() or tran_commit() to finalize the transaction by corresponding * finalization actions in reverse order. + * + * The clean() functions registered by the drivers in a transaction are called + * last, after all abort() or commit() functions have been called. */ #ifndef QEMU_TRANSACTIONS_H diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index ee60eb3de4..688408e048 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -26,6 +26,7 @@ typedef struct AddressSpace AddressSpace; typedef struct AioContext AioContext; typedef struct Aml Aml; typedef struct AnnounceTimer AnnounceTimer; +typedef struct ArchCPU ArchCPU; typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; typedef struct BdrvDirtyBitmapIter BdrvDirtyBitmapIter; typedef struct BlockBackend BlockBackend; @@ -39,7 +40,10 @@ typedef struct CompatProperty CompatProperty; typedef struct CoMutex CoMutex; typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct CPUAddressSpace CPUAddressSpace; +typedef struct CPUArchState CPUArchState; +typedef struct CPUJumpCache CPUJumpCache; typedef struct CPUState CPUState; +typedef struct CPUTLBEntryFull CPUTLBEntryFull; typedef struct DeviceListener DeviceListener; typedef struct DeviceState DeviceState; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; @@ -86,6 +90,8 @@ typedef struct PCIDevice PCIDevice; typedef struct PCIEAERErr PCIEAERErr; typedef struct PCIEAERLog PCIEAERLog; typedef struct PCIEAERMsg PCIEAERMsg; +typedef struct PCIESriovPF PCIESriovPF; +typedef struct PCIESriovVF PCIESriovVF; typedef struct PCIEPort PCIEPort; typedef struct PCIESlot PCIESlot; typedef struct PCIExpressDevice PCIExpressDevice; @@ -125,6 +131,9 @@ typedef struct VirtIODevice VirtIODevice; typedef struct Visitor Visitor; typedef struct VMChangeStateEntry VMChangeStateEntry; typedef struct VMStateDescription VMStateDescription; +typedef struct DumpState DumpState; +typedef struct GraphicHwOps GraphicHwOps; +typedef struct QEMUCursor QEMUCursor; /* * Pointer types diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h index 4491c8e1a6..bde9495b25 100644 --- a/include/qemu/vfio-helpers.h +++ b/include/qemu/vfio-helpers.h @@ -18,7 +18,7 @@ typedef struct QEMUVFIOState QEMUVFIOState; QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp); void qemu_vfio_close(QEMUVFIOState *s); int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size, - bool temporary, uint64_t *iova_list); + bool temporary, uint64_t *iova_list, Error **errp); int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s); void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host); void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index, diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h index 121ea1dedf..cd43193b80 100644 --- a/include/qemu/vhost-user-server.h +++ b/include/qemu/vhost-user-server.h @@ -42,6 +42,8 @@ typedef struct { const VuDevIface *vu_iface; /* Protected by ctx lock */ + unsigned int refcount; + bool wait_idle; VuDev vu_dev; QIOChannel *ioc; /* The I/O channel with the client */ QIOChannelSocket *sioc; /* The underlying data channel with the client */ @@ -59,6 +61,9 @@ bool vhost_user_server_start(VuServer *server, void vhost_user_server_stop(VuServer *server); +void vhost_user_server_ref(VuServer *server); +void vhost_user_server_unref(VuServer *server); + void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx); void vhost_user_server_detach_aio_context(VuServer *server); diff --git a/include/qemu/win_dump_defs.h b/include/qemu/win_dump_defs.h index 145096e8ee..73a44e2408 100644 --- a/include/qemu/win_dump_defs.h +++ b/include/qemu/win_dump_defs.h @@ -11,11 +11,22 @@ #ifndef QEMU_WIN_DUMP_DEFS_H #define QEMU_WIN_DUMP_DEFS_H +typedef struct WinDumpPhyMemRun32 { + uint32_t BasePage; + uint32_t PageCount; +} QEMU_PACKED WinDumpPhyMemRun32; + typedef struct WinDumpPhyMemRun64 { uint64_t BasePage; uint64_t PageCount; } QEMU_PACKED WinDumpPhyMemRun64; +typedef struct WinDumpPhyMemDesc32 { + uint32_t NumberOfRuns; + uint32_t NumberOfPages; + WinDumpPhyMemRun32 Run[86]; +} QEMU_PACKED WinDumpPhyMemDesc32; + typedef struct WinDumpPhyMemDesc64 { uint32_t NumberOfRuns; uint32_t unused; @@ -33,6 +44,39 @@ typedef struct WinDumpExceptionRecord { uint64_t ExceptionInformation[15]; } QEMU_PACKED WinDumpExceptionRecord; +typedef struct WinDumpHeader32 { + char Signature[4]; + char ValidDump[4]; + uint32_t MajorVersion; + uint32_t MinorVersion; + uint32_t DirectoryTableBase; + uint32_t PfnDatabase; + uint32_t PsLoadedModuleList; + uint32_t PsActiveProcessHead; + uint32_t MachineImageType; + uint32_t NumberProcessors; + union { + struct { + uint32_t BugcheckCode; + uint32_t BugcheckParameter1; + uint32_t BugcheckParameter2; + uint32_t BugcheckParameter3; + uint32_t BugcheckParameter4; + }; + uint8_t BugcheckData[20]; + }; + uint8_t VersionUser[32]; + uint32_t reserved0; + uint32_t KdDebuggerDataBlock; + union { + WinDumpPhyMemDesc32 PhysicalMemoryBlock; + uint8_t PhysicalMemoryBlockBuffer[700]; + }; + uint8_t reserved1[3200]; + uint32_t RequiredDumpSpace; + uint8_t reserved2[92]; +} QEMU_PACKED WinDumpHeader32; + typedef struct WinDumpHeader64 { char Signature[4]; char ValidDump[4]; @@ -81,24 +125,48 @@ typedef struct WinDumpHeader64 { uint8_t reserved[4018]; } QEMU_PACKED WinDumpHeader64; +typedef union WinDumpHeader { + struct { + char Signature[4]; + char ValidDump[4]; + }; + WinDumpHeader32 x32; + WinDumpHeader64 x64; +} WinDumpHeader; + #define KDBG_OWNER_TAG_OFFSET64 0x10 #define KDBG_MM_PFN_DATABASE_OFFSET64 0xC0 #define KDBG_KI_BUGCHECK_DATA_OFFSET64 0x88 #define KDBG_KI_PROCESSOR_BLOCK_OFFSET64 0x218 #define KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 0x338 +#define KDBG_OWNER_TAG_OFFSET KDBG_OWNER_TAG_OFFSET64 +#define KDBG_MM_PFN_DATABASE_OFFSET KDBG_MM_PFN_DATABASE_OFFSET64 +#define KDBG_KI_BUGCHECK_DATA_OFFSET KDBG_KI_BUGCHECK_DATA_OFFSET64 +#define KDBG_KI_PROCESSOR_BLOCK_OFFSET KDBG_KI_PROCESSOR_BLOCK_OFFSET64 +#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 + #define VMCOREINFO_ELF_NOTE_HDR_SIZE 24 +#define VMCOREINFO_WIN_DUMP_NOTE_SIZE64 (sizeof(WinDumpHeader64) + \ + VMCOREINFO_ELF_NOTE_HDR_SIZE) +#define VMCOREINFO_WIN_DUMP_NOTE_SIZE32 (sizeof(WinDumpHeader32) + \ + VMCOREINFO_ELF_NOTE_HDR_SIZE) #define WIN_CTX_X64 0x00100000L +#define WIN_CTX_X86 0x00010000L #define WIN_CTX_CTL 0x00000001L #define WIN_CTX_INT 0x00000002L #define WIN_CTX_SEG 0x00000004L #define WIN_CTX_FP 0x00000008L #define WIN_CTX_DBG 0x00000010L +#define WIN_CTX_EXT 0x00000020L -#define WIN_CTX_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP) -#define WIN_CTX_ALL (WIN_CTX_FULL | WIN_CTX_SEG | WIN_CTX_DBG) +#define WIN_CTX64_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP) +#define WIN_CTX64_ALL (WIN_CTX64_FULL | WIN_CTX_SEG | WIN_CTX_DBG) + +#define WIN_CTX32_FULL (WIN_CTX_X86 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_SEG) +#define WIN_CTX32_ALL (WIN_CTX32_FULL | WIN_CTX_FP | WIN_CTX_DBG | WIN_CTX_EXT) #define LIVE_SYSTEM_DUMP 0x00000161 @@ -107,7 +175,41 @@ typedef struct WinM128A { int64_t high; } QEMU_ALIGNED(16) WinM128A; -typedef struct WinContext { +typedef struct WinContext32 { + uint32_t ContextFlags; + + uint32_t Dr0; + uint32_t Dr1; + uint32_t Dr2; + uint32_t Dr3; + uint32_t Dr6; + uint32_t Dr7; + + uint8_t FloatSave[112]; + + uint32_t SegGs; + uint32_t SegFs; + uint32_t SegEs; + uint32_t SegDs; + + uint32_t Edi; + uint32_t Esi; + uint32_t Ebx; + uint32_t Edx; + uint32_t Ecx; + uint32_t Eax; + + uint32_t Ebp; + uint32_t Eip; + uint32_t SegCs; + uint32_t EFlags; + uint32_t Esp; + uint32_t SegSs; + + uint8_t ExtendedRegisters[512]; +} QEMU_ALIGNED(16) WinContext32; + +typedef struct WinContext64 { uint64_t PHome[6]; uint32_t ContextFlags; @@ -174,6 +276,11 @@ typedef struct WinContext { uint64_t LastBranchFromRip; uint64_t LastExceptionToRip; uint64_t LastExceptionFromRip; -} QEMU_ALIGNED(16) WinContext; +} QEMU_ALIGNED(16) WinContext64; + +typedef union WinContext { + WinContext32 x32; + WinContext64 x64; +} WinContext; #endif /* QEMU_WIN_DUMP_DEFS_H */ diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h index a83fe8e749..f1d0f7be74 100644 --- a/include/qemu/xattr.h +++ b/include/qemu/xattr.h @@ -22,7 +22,9 @@ #ifdef CONFIG_LIBATTR # include #else -# define ENOATTR ENODATA +# if !defined(ENOATTR) +# define ENOATTR ENODATA +# endif # include #endif diff --git a/include/qom/object.h b/include/qom/object.h index 14072b91cf..92aed2ac3e 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -21,7 +21,6 @@ #include "qapi/qapi-builtin-types.h" #include "qemu/module.h" -#include "qom/object.h" struct TypeImpl; typedef struct TypeImpl *Type; @@ -621,7 +620,7 @@ Object *object_new_with_props(const char *typename, Object *parent, const char *id, Error **errp, - ...) QEMU_SENTINEL; + ...) G_GNUC_NULL_TERMINATED; /** * object_new_with_propv: @@ -681,7 +680,7 @@ void object_apply_compat_props(Object *obj); * * Returns: %true on success, %false on error. */ -bool object_set_props(Object *obj, Error **errp, ...) QEMU_SENTINEL; +bool object_set_props(Object *obj, Error **errp, ...) G_GNUC_NULL_TERMINATED; /** * object_set_propv: @@ -733,7 +732,7 @@ void object_initialize(void *obj, size_t size, const char *typename); bool object_initialize_child_with_props(Object *parentobj, const char *propname, void *childobj, size_t size, const char *type, - Error **errp, ...) QEMU_SENTINEL; + Error **errp, ...) G_GNUC_NULL_TERMINATED; /** * object_initialize_child_with_propsv: @@ -1548,6 +1547,18 @@ Object *object_resolve_path(const char *path, bool *ambiguous); Object *object_resolve_path_type(const char *path, const char *typename, bool *ambiguous); +/** + * object_resolve_path_at: + * @parent: the object in which to resolve the path + * @path: the path to resolve + * + * This is like object_resolve_path(), except paths not starting with + * a slash are relative to @parent. + * + * Returns: The resolved object or NULL on path lookup failure. + */ +Object *object_resolve_path_at(Object *parent, const char *path); + /** * object_resolve_path_component: * @parent: the object in which to resolve the path diff --git a/include/scsi/constants.h b/include/scsi/constants.h index 2a32c08b5e..6a8bad556a 100644 --- a/include/scsi/constants.h +++ b/include/scsi/constants.h @@ -225,6 +225,7 @@ #define TYPE_NO_LUN 0x7f /* Mode page codes for mode sense/set */ +#define MODE_PAGE_VENDOR_SPECIFIC 0x00 #define MODE_PAGE_R_W_ERROR 0x01 #define MODE_PAGE_HD_GEOMETRY 0x04 #define MODE_PAGE_FLEXIBLE_DISK_GEOMETRY 0x05 @@ -234,6 +235,7 @@ #define MODE_PAGE_FAULT_FAIL 0x1c #define MODE_PAGE_TO_PROTECT 0x1d #define MODE_PAGE_CAPABILITIES 0x2a +#define MODE_PAGE_APPLE_VENDOR 0x30 #define MODE_PAGE_ALLS 0x3f /* Not in Mt. Fuji, but in ATAPI 2.6 -- deprecated now in favor * of MODE_PAGE_SENSE_POWER */ diff --git a/semihosting/common-semi.h b/include/semihosting/common-semi.h similarity index 96% rename from semihosting/common-semi.h rename to include/semihosting/common-semi.h index 0bfab1c669..0a91db7c41 100644 --- a/semihosting/common-semi.h +++ b/include/semihosting/common-semi.h @@ -34,6 +34,6 @@ #ifndef COMMON_SEMI_H #define COMMON_SEMI_H -target_ulong do_common_semihosting(CPUState *cs); +void do_common_semihosting(CPUState *cs); #endif /* COMMON_SEMI_H */ diff --git a/include/semihosting/console.h b/include/semihosting/console.h index 0238f540f4..bd78e5f03f 100644 --- a/include/semihosting/console.h +++ b/include/semihosting/console.h @@ -12,58 +12,48 @@ #include "cpu.h" /** - * qemu_semihosting_console_outs: - * @env: CPUArchState - * @s: host address of null terminated guest string + * qemu_semihosting_console_read: + * @cs: CPUState + * @buf: host buffer + * @len: buffer size * - * Send a null terminated guest string to the debug console. This may - * be the remote gdb session if a softmmu guest is currently being - * debugged. + * Receive at least one character from debug console. As this call may + * block if no data is available we suspend the CPU and will re-execute the + * instruction when data is there. Therefore two conditions must be met: * - * Returns: number of bytes written. - */ -int qemu_semihosting_console_outs(CPUArchState *env, target_ulong s); - -/** - * qemu_semihosting_console_outc: - * @env: CPUArchState - * @s: host address of null terminated guest string - * - * Send single character from guest memory to the debug console. This - * may be the remote gdb session if a softmmu guest is currently being - * debugged. - * - * Returns: nothing - */ -void qemu_semihosting_console_outc(CPUArchState *env, target_ulong c); - -/** - * qemu_semihosting_console_inc: - * @env: CPUArchState - * - * Receive single character from debug console. This may be the remote - * gdb session if a softmmu guest is currently being debugged. As this - * call may block if no data is available we suspend the CPU and will - * re-execute the instruction when data is there. Therefore two - * conditions must be met: * - CPUState is synchronized before calling this function * - pc is only updated once the character is successfully returned * - * Returns: character read OR cpu_loop_exit! + * Returns: number of characters read, OR cpu_loop_exit! */ -target_ulong qemu_semihosting_console_inc(CPUArchState *env); +int qemu_semihosting_console_read(CPUState *cs, void *buf, int len); /** - * qemu_semihosting_log_out: - * @s: pointer to string - * @len: length of string + * qemu_semihosting_console_write: + * @buf: host buffer + * @len: buffer size * - * Send a string to the debug output. Unlike console_out these strings - * can't be sent to a remote gdb instance as they don't exist in guest - * memory. + * Write len bytes from buf to the debug console. * - * Returns: number of bytes written + * Returns: number of bytes written -- this should only ever be short + * on some sort of i/o error. */ -int qemu_semihosting_log_out(const char *s, int len); +int qemu_semihosting_console_write(void *buf, int len); + +/* + * qemu_semihosting_console_block_until_ready: + * @cs: CPUState + * + * If no data is available we suspend the CPU and will re-execute the + * instruction when data is available. + */ +void qemu_semihosting_console_block_until_ready(CPUState *cs); + +/** + * qemu_semihosting_console_ready: + * + * Return true if characters are available for read; does not block. + */ +bool qemu_semihosting_console_ready(void); #endif /* SEMIHOST_CONSOLE_H */ diff --git a/include/semihosting/guestfd.h b/include/semihosting/guestfd.h new file mode 100644 index 0000000000..3d426fedab --- /dev/null +++ b/include/semihosting/guestfd.h @@ -0,0 +1,91 @@ +/* + * Hosted file support for semihosting syscalls. + * + * Copyright (c) 2005, 2007 CodeSourcery. + * Copyright (c) 2019 Linaro + * Copyright © 2020 by Keith Packard + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SEMIHOSTING_GUESTFD_H +#define SEMIHOSTING_GUESTFD_H + +typedef enum GuestFDType { + GuestFDUnused = 0, + GuestFDHost, + GuestFDGDB, + GuestFDStatic, + GuestFDConsole, +} GuestFDType; + +/* + * Guest file descriptors are integer indexes into an array of + * these structures (we will dynamically resize as necessary). + */ +typedef struct GuestFD { + GuestFDType type; + union { + int hostfd; + struct { + const uint8_t *data; + size_t len; + size_t off; + } staticfile; + }; +} GuestFD; + +/* + * For ARM semihosting, we have a separate structure for routing + * data for the console which is outside the guest fd address space. + */ +extern GuestFD console_in_gf; +extern GuestFD console_out_gf; + +/** + * alloc_guestfd: + * + * Allocate an unused GuestFD index. The associated guestfd index + * will still be GuestFDUnused until it is initialized. + */ +int alloc_guestfd(void); + +/** + * dealloc_guestfd: + * @guestfd: GuestFD index + * + * Deallocate a GuestFD index. The associated GuestFD structure + * will be recycled for a subsequent allocation. + */ +void dealloc_guestfd(int guestfd); + +/** + * get_guestfd: + * @guestfd: GuestFD index + * + * Return the GuestFD structure associated with an initialized @guestfd, + * or NULL if it has not been allocated, or hasn't been initialized. + */ +GuestFD *get_guestfd(int guestfd); + +/** + * associate_guestfd: + * @guestfd: GuestFD index + * @hostfd: host file descriptor + * + * Initialize the GuestFD for @guestfd to GuestFDHost using @hostfd. + */ +void associate_guestfd(int guestfd, int hostfd); + +/** + * staticfile_guestfd: + * @guestfd: GuestFD index + * @data: data to be read + * @len: length of @data + * + * Initialize the GuestFD for @guestfd to GuestFDStatic. + * The @len bytes at @data will be returned to the guest on reads. + */ +void staticfile_guestfd(int guestfd, const uint8_t *data, size_t len); + +#endif /* SEMIHOSTING_GUESTFD_H */ diff --git a/include/semihosting/semihost.h b/include/semihosting/semihost.h index 0c55ade3ac..efd2efa25a 100644 --- a/include/semihosting/semihost.h +++ b/include/semihosting/semihost.h @@ -27,7 +27,7 @@ typedef enum SemihostingTarget { } SemihostingTarget; #ifdef CONFIG_USER_ONLY -static inline bool semihosting_enabled(void) +static inline bool semihosting_enabled(bool is_user) { return true; } @@ -51,27 +51,25 @@ static inline const char *semihosting_get_cmdline(void) { return NULL; } - -static inline Chardev *semihosting_get_chardev(void) -{ - return NULL; -} -static inline void qemu_semihosting_console_init(void) -{ -} #else /* !CONFIG_USER_ONLY */ -bool semihosting_enabled(void); +/** + * semihosting_enabled: + * @is_user: true if guest code is in usermode (i.e. not privileged) + * + * Return true if guest code is allowed to make semihosting calls. + */ +bool semihosting_enabled(bool is_user); SemihostingTarget semihosting_get_target(void); const char *semihosting_get_arg(int i); int semihosting_get_argc(void); const char *semihosting_get_cmdline(void); void semihosting_arg_fallback(const char *file, const char *cmd); -Chardev *semihosting_get_chardev(void); /* for vl.c hooks */ void qemu_semihosting_enable(void); int qemu_semihosting_config_options(const char *opt); -void qemu_semihosting_connect_chardevs(void); -void qemu_semihosting_console_init(void); +void qemu_semihosting_chardev_init(void); +void qemu_semihosting_console_init(Chardev *); #endif /* CONFIG_USER_ONLY */ +void qemu_semihosting_guestfd_init(void); #endif /* SEMIHOST_H */ diff --git a/include/semihosting/softmmu-uaccess.h b/include/semihosting/softmmu-uaccess.h new file mode 100644 index 0000000000..4f08dfc098 --- /dev/null +++ b/include/semihosting/softmmu-uaccess.h @@ -0,0 +1,59 @@ +/* + * Helper routines to provide target memory access for semihosting + * syscalls in system emulation mode. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#ifndef SEMIHOSTING_SOFTMMU_UACCESS_H +#define SEMIHOSTING_SOFTMMU_UACCESS_H + +#include "cpu.h" + +#define get_user_u64(val, addr) \ + ({ uint64_t val_ = 0; \ + int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ + &val_, sizeof(val_), 0); \ + (val) = tswap64(val_); ret_; }) + +#define get_user_u32(val, addr) \ + ({ uint32_t val_ = 0; \ + int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ + &val_, sizeof(val_), 0); \ + (val) = tswap32(val_); ret_; }) + +#define get_user_u8(val, addr) \ + ({ uint8_t val_ = 0; \ + int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ + &val_, sizeof(val_), 0); \ + (val) = val_; ret_; }) + +#define get_user_ual(arg, p) get_user_u32(arg, p) + +#define put_user_u64(val, addr) \ + ({ uint64_t val_ = tswap64(val); \ + cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); }) + +#define put_user_u32(val, addr) \ + ({ uint32_t val_ = tswap32(val); \ + cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); }) + +#define put_user_ual(arg, p) put_user_u32(arg, p) + +void *softmmu_lock_user(CPUArchState *env, target_ulong addr, + target_ulong len, bool copy); +#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) + +char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr); +#define lock_user_string(p) softmmu_lock_user_string(env, p) + +void softmmu_unlock_user(CPUArchState *env, void *p, + target_ulong addr, target_ulong len); +#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len) + +ssize_t softmmu_strlen_user(CPUArchState *env, target_ulong addr); +#define target_strlen(p) softmmu_strlen_user(env, p) + +#endif /* SEMIHOSTING_SOFTMMU_UACCESS_H */ diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h new file mode 100644 index 0000000000..3a5ec229eb --- /dev/null +++ b/include/semihosting/syscalls.h @@ -0,0 +1,75 @@ +/* + * Syscall implementations for semihosting. + * + * Copyright (c) 2022 Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SEMIHOSTING_SYSCALLS_H +#define SEMIHOSTING_SYSCALLS_H + +/* + * Argument loading from the guest is performed by the caller; + * results are returned via the 'complete' callback. + * + * String operands are in address/len pairs. The len argument may be 0 + * (when the semihosting abi does not already provide the length), + * or non-zero (where it should include the terminating zero). + */ + +typedef struct GuestFD GuestFD; + +void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + int gdb_flags, int mode); + +void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete, + int fd); + +void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong buf, target_ulong len); + +void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len); + +void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong buf, target_ulong len); + +void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len); + +void semihost_sys_lseek(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, int64_t off, int gdb_whence); + +void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete, + int fd); + +void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb, + gdb_syscall_complete_cb flen_cb, + int fd, target_ulong fstat_addr); + +void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong addr); + +void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + target_ulong addr); + +void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len); + +void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong oname, target_ulong oname_len, + target_ulong nname, target_ulong nname_len); + +void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong cmd, target_ulong cmd_len); + +void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong tv_addr, target_ulong tz_addr); + +void semihost_sys_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, GIOCondition cond, int timeout); + +#endif /* SEMIHOSTING_SYSCALLS_H */ diff --git a/include/standard-headers/asm-m68k/bootinfo-virt.h b/include/standard-headers/asm-m68k/bootinfo-virt.h index 81be1e0924..75ac6bbd7d 100644 --- a/include/standard-headers/asm-m68k/bootinfo-virt.h +++ b/include/standard-headers/asm-m68k/bootinfo-virt.h @@ -13,6 +13,9 @@ #define BI_VIRT_VIRTIO_BASE 0x8004 #define BI_VIRT_CTRL_BASE 0x8005 +/* No longer used -- replaced with BI_RNG_SEED -- but don't reuse this index: + * #define BI_VIRT_RNG_SEED 0x8006 */ + #define VIRT_BOOTI_VERSION MK_BI_VERSION(2, 0) #endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */ diff --git a/include/standard-headers/asm-m68k/bootinfo.h b/include/standard-headers/asm-m68k/bootinfo.h index 7b790e8ec8..b7a8dd2514 100644 --- a/include/standard-headers/asm-m68k/bootinfo.h +++ b/include/standard-headers/asm-m68k/bootinfo.h @@ -57,7 +57,13 @@ struct mem_info { /* (struct mem_info) */ #define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ /* (string) */ - +/* + * A random seed used to initialize the RNG. Record format: + * + * - length [ 2 bytes, 16-bit big endian ] + * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ] + */ +#define BI_RNG_SEED 0x0008 /* * Linux/m68k Architectures (BI_MACHTYPE) diff --git a/include/standard-headers/asm-x86/bootparam.h b/include/standard-headers/asm-x86/bootparam.h index 072e2ed546..0b06d2bff1 100644 --- a/include/standard-headers/asm-x86/bootparam.h +++ b/include/standard-headers/asm-x86/bootparam.h @@ -10,11 +10,13 @@ #define SETUP_EFI 4 #define SETUP_APPLE_PROPERTIES 5 #define SETUP_JAILHOUSE 6 +#define SETUP_CC_BLOB 7 +#define SETUP_IMA 8 +#define SETUP_RNG_SEED 9 +#define SETUP_ENUM_MAX SETUP_RNG_SEED #define SETUP_INDIRECT (1<<31) - -/* SETUP_INDIRECT | max(SETUP_*) */ -#define SETUP_TYPE_MAX (SETUP_INDIRECT | SETUP_JAILHOUSE) +#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT) /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/include/standard-headers/asm-x86/kvm_para.h b/include/standard-headers/asm-x86/kvm_para.h index 204cfb8640..f0235e58a1 100644 --- a/include/standard-headers/asm-x86/kvm_para.h +++ b/include/standard-headers/asm-x86/kvm_para.h @@ -8,6 +8,7 @@ * should be used to determine that a VM is running under KVM. */ #define KVM_CPUID_SIGNATURE 0x40000000 +#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" /* This CPUID returns two feature bitmaps in eax, edx. Before enabling * a particular paravirtualization, the appropriate feature bit should diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index 352b51fd0a..48b620cbef 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -103,6 +103,12 @@ extern "C" { /* 8 bpp Red */ #define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ +/* 10 bpp Red */ +#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */ + +/* 12 bpp Red */ +#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */ + /* 16 bpp Red */ #define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */ @@ -307,6 +313,13 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + /* 3 plane non-subsampled (444) YCbCr * 16 bits per component, but only 10 bits are used and 6 bits are padded * index 0: Y plane, [15:0] Y:x [10:6] little endian @@ -372,6 +385,12 @@ extern "C" { #define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#define fourcc_mod_get_vendor(modifier) \ + (((modifier) >> 56) & 0xff) + +#define fourcc_mod_is_vendor(modifier, vendor) \ + (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor) + #define fourcc_mod_code(vendor, val) \ ((((uint64_t)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL)) @@ -539,7 +558,7 @@ extern "C" { * * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear * and at index 1. The clear color is stored at index 2, and the pitch should - * be ignored. The clear color structure is 256 bits. The first 128 bits + * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits * represents Raw Clear Color Red, Green, Blue and Alpha color each represented * by 32 bits. The raw clear color is consumed by the 3d engine and generates * the converted clear color of size 64 bits. The first 32 bits store the Lower @@ -552,6 +571,53 @@ extern "C" { */ #define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8) +/* + * Intel Tile 4 layout + * + * This is a tiled layout using 4KB tiles in a row-major layout. It has the same + * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It + * only differs from Tile Y at the 256B granularity in between. At this + * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape + * of 64B x 8 rows. + */ +#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9) + +/* + * Intel color control surfaces (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10) + +/* + * Intel color control surfaces (CCS) for DG2 media compression. + * + * The main surface is Tile 4 and at plane index 0. For semi-planar formats + * like NV12, the Y and UV planes are Tile 4 and are located at plane indices + * 0 and 1, respectively. The CCS for all planes are stored outside of the + * GEM object in a reserved memory area dedicated for the storage of the + * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface + * pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11) + +/* + * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. The + * clear color is stored at plane index 1 and the pitch should be 64 bytes + * aligned. The format of the 256 bits of clear color data matches the one used + * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description + * for details. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) + /* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * @@ -589,6 +655,28 @@ extern "C" { */ #define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1) +/* + * Qualcomm Tiled Format + * + * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed. + * Implementation may be platform and base-format specific. + * + * Each macrotile consists of m x n (mostly 4 x 4) tiles. + * Pixel data pitch/stride is aligned with macrotile width. + * Pixel data height is aligned with macrotile height. + * Entire pixel data buffer is aligned with 4k(bytes). + */ +#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3) + +/* + * Qualcomm Alternate Tiled Format + * + * Alternate tiled format typically only used within GMEM. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2) + + /* Vivante framebuffer modifiers */ /* @@ -841,6 +929,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) * and UV. Some SAND-using hardware stores UV in a separate tiled * image from Y to reduce the column height, which is not supported * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. */ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ @@ -899,9 +991,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) /* * The top 4 bits (out of the 56 bits alloted for specifying vendor specific - * modifiers) denote the category for modifiers. Currently we have only two - * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen - * different categories. + * modifiers) denote the category for modifiers. Currently we have three + * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of + * sixteen different categories. */ #define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \ fourcc_mod_code(ARM, ((uint64_t)(__type) << 52) | ((__val) & 0x000fffffffffffffULL)) @@ -1016,6 +1108,109 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) */ #define AFBC_FORMAT_MOD_USM (1ULL << 12) +/* + * Arm Fixed-Rate Compression (AFRC) modifiers + * + * AFRC is a proprietary fixed rate image compression protocol and format, + * designed to provide guaranteed bandwidth and memory footprint + * reductions in graphics and media use-cases. + * + * AFRC buffers consist of one or more planes, with the same components + * and meaning as an uncompressed buffer using the same pixel format. + * + * Within each plane, the pixel/luma/chroma values are grouped into + * "coding unit" blocks which are individually compressed to a + * fixed size (in bytes). All coding units within a given plane of a buffer + * store the same number of values, and have the same compressed size. + * + * The coding unit size is configurable, allowing different rates of compression. + * + * The start of each AFRC buffer plane must be aligned to an alignment granule which + * depends on the coding unit size. + * + * Coding Unit Size Plane Alignment + * ---------------- --------------- + * 16 bytes 1024 bytes + * 24 bytes 512 bytes + * 32 bytes 2048 bytes + * + * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned + * to a multiple of the paging tile dimensions. + * The dimensions of each paging tile depend on whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Layout Paging Tile Width Paging Tile Height + * ------ ----------------- ------------------ + * SCAN 16 coding units 4 coding units + * ROT 8 coding units 8 coding units + * + * The dimensions of each coding unit depend on the number of components + * in the compressed plane and whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Number of Components in Plane Layout Coding Unit Width Coding Unit Height + * ----------------------------- --------- ----------------- ------------------ + * 1 SCAN 16 samples 4 samples + * Example: 16x4 luma samples in a 'Y' plane + * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 1 ROT 8 samples 8 samples + * Example: 8x8 luma samples in a 'Y' plane + * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 2 DONT CARE 8 samples 4 samples + * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 3 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer without alpha + * ----------------------------- --------- ----------------- ------------------ + * 4 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer with alpha + */ + +#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02 + +#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode) + +/* + * AFRC coding unit size modifier. + * + * Indicates the number of bytes used to store each compressed coding unit for + * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance + * is the same for both Cb and Cr, which may be stored in separate planes. + * + * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store + * each compressed coding unit in the first plane of the buffer. For RGBA buffers + * this is the only plane, while for semi-planar and fully-planar YUV buffers, + * this corresponds to the luma plane. + * + * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store + * each compressed coding unit in the second and third planes in the buffer. + * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s). + * + * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified + * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero. + * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and + * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified. + */ +#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf +#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL) + +#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size) +#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4) + +/* + * AFRC scanline memory layout. + * + * Indicates if the buffer uses the scanline-optimised layout + * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout. + * The memory layout is the same for all planes. + */ +#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8) + /* * Arm 16x16 Block U-Interleaved modifier * @@ -1167,6 +1362,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) #define AMD_FMT_MOD_TILE_VER_GFX9 1 #define AMD_FMT_MOD_TILE_VER_GFX10 2 #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 +#define AMD_FMT_MOD_TILE_VER_GFX11 4 /* * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical @@ -1182,6 +1378,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 #define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26 #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 +#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 #define AMD_FMT_MOD_DCC_BLOCK_64B 0 #define AMD_FMT_MOD_DCC_BLOCK_128B 1 diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h index 053d3fafdf..4537da20cc 100644 --- a/include/standard-headers/linux/ethtool.h +++ b/include/standard-headers/linux/ethtool.h @@ -231,6 +231,7 @@ enum tunable_id { ETHTOOL_RX_COPYBREAK, ETHTOOL_TX_COPYBREAK, ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ + ETHTOOL_TX_COPYBREAK_BUF_SIZE, /* * Add your fresh new tunable attribute above and remember to update * tunable_strings[] in net/ethtool/common.c @@ -256,7 +257,7 @@ struct ethtool_tunable { uint32_t id; uint32_t type_id; uint32_t len; - void *data[0]; + void *data[]; }; #define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff @@ -321,7 +322,7 @@ struct ethtool_regs { uint32_t cmd; uint32_t version; uint32_t len; - uint8_t data[0]; + uint8_t data[]; }; /** @@ -347,7 +348,7 @@ struct ethtool_eeprom { uint32_t magic; uint32_t offset; uint32_t len; - uint8_t data[0]; + uint8_t data[]; }; /** @@ -603,6 +604,7 @@ enum ethtool_link_ext_state { ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, ETHTOOL_LINK_EXT_STATE_OVERHEAT, + ETHTOOL_LINK_EXT_STATE_MODULE, }; /* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */ @@ -639,6 +641,8 @@ enum ethtool_link_ext_substate_link_logical_mismatch { enum ethtool_link_ext_substate_bad_signal_integrity { ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS, }; /* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */ @@ -647,6 +651,11 @@ enum ethtool_link_ext_substate_cable_issue { ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE, }; +/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */ +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + #define ETH_GSTRING_LEN 32 /** @@ -704,6 +713,29 @@ enum ethtool_stringset { ETH_SS_COUNT }; +/** + * enum ethtool_module_power_mode_policy - plug-in module power mode policy + * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode. + * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host + * to high power mode when the first port using it is put administratively + * up and to low power mode when the last port using it is put + * administratively down. + */ +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO, +}; + +/** + * enum ethtool_module_power_mode - plug-in module power mode + * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode. + * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode. + */ +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH, +}; + /** * struct ethtool_gstrings - string set for data tagging * @cmd: Command number = %ETHTOOL_GSTRINGS @@ -720,7 +752,7 @@ struct ethtool_gstrings { uint32_t cmd; uint32_t string_set; uint32_t len; - uint8_t data[0]; + uint8_t data[]; }; /** @@ -745,7 +777,7 @@ struct ethtool_sset_info { uint32_t cmd; uint32_t reserved; uint64_t sset_mask; - uint32_t data[0]; + uint32_t data[]; }; /** @@ -785,7 +817,7 @@ struct ethtool_test { uint32_t flags; uint32_t reserved; uint32_t len; - uint64_t data[0]; + uint64_t data[]; }; /** @@ -802,7 +834,7 @@ struct ethtool_test { struct ethtool_stats { uint32_t cmd; uint32_t n_stats; - uint64_t data[0]; + uint64_t data[]; }; /** @@ -819,7 +851,7 @@ struct ethtool_stats { struct ethtool_perm_addr { uint32_t cmd; uint32_t size; - uint8_t data[0]; + uint8_t data[]; }; /* boolean flags controlling per-interface behavior characteristics. @@ -1128,7 +1160,7 @@ struct ethtool_rxnfc { struct ethtool_rxfh_indir { uint32_t cmd; uint32_t size; - uint32_t ring_index[0]; + uint32_t ring_index[]; }; /** @@ -1169,7 +1201,7 @@ struct ethtool_rxfh { uint8_t hfunc; uint8_t rsvd8[3]; uint32_t rsvd32; - uint32_t rss_config[0]; + uint32_t rss_config[]; }; #define ETH_RXFH_CONTEXT_ALLOC 0xffffffff #define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff @@ -1254,7 +1286,7 @@ struct ethtool_dump { uint32_t version; uint32_t flag; uint32_t len; - uint8_t data[0]; + uint8_t data[]; }; #define ETH_FW_DUMP_DISABLE 0 @@ -1286,7 +1318,7 @@ struct ethtool_get_features_block { struct ethtool_gfeatures { uint32_t cmd; uint32_t size; - struct ethtool_get_features_block features[0]; + struct ethtool_get_features_block features[]; }; /** @@ -1308,7 +1340,7 @@ struct ethtool_set_features_block { struct ethtool_sfeatures { uint32_t cmd; uint32_t size; - struct ethtool_set_features_block features[0]; + struct ethtool_set_features_block features[]; }; /** @@ -1659,6 +1691,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; @@ -2054,7 +2087,7 @@ struct ethtool_link_settings { uint8_t master_slave_state; uint8_t reserved1[1]; uint32_t reserved[7]; - uint32_t link_mode_masks[0]; + uint32_t link_mode_masks[]; /* layout of link_mode_masks fields: * uint32_t map_supported[link_mode_masks_nwords]; * uint32_t map_advertising[link_mode_masks_nwords]; diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index cce105bfba..bda06258be 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -181,6 +181,19 @@ * - add FUSE_OPEN_KILL_SUIDGID * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT * - add FUSE_SETXATTR_ACL_KILL_SGID + * + * 7.34 + * - add FUSE_SYNCFS + * + * 7.35 + * - add FOPEN_NOFLUSH + * + * 7.36 + * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag + * - add flags2 to fuse_init_in and fuse_init_out + * - add FUSE_SECURITY_CTX init flag + * - add security context to create, mkdir, symlink, and mknod requests + * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX */ #ifndef _LINUX_FUSE_H @@ -212,7 +225,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 33 +#define FUSE_KERNEL_MINOR_VERSION 36 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -283,12 +296,14 @@ struct fuse_file_lock { * FOPEN_NONSEEKABLE: the file is not seekable * FOPEN_CACHE_DIR: allow caching this directory * FOPEN_STREAM: the file is stream-like (no file position at all) + * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE) */ #define FOPEN_DIRECT_IO (1 << 0) #define FOPEN_KEEP_CACHE (1 << 1) #define FOPEN_NONSEEKABLE (1 << 2) #define FOPEN_CACHE_DIR (1 << 3) #define FOPEN_STREAM (1 << 4) +#define FOPEN_NOFLUSH (1 << 5) /** * INIT request/reply flags @@ -329,6 +344,11 @@ struct fuse_file_lock { * write/truncate sgid is killed only if file has group * execute permission. (Same as Linux VFS behavior). * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in + * FUSE_INIT_EXT: extended fuse_init_in request + * FUSE_INIT_RESERVED: reserved, do not use + * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and + * mknod + * FUSE_HAS_INODE_DAX: use per inode DAX */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -360,6 +380,11 @@ struct fuse_file_lock { #define FUSE_SUBMOUNTS (1 << 27) #define FUSE_HANDLE_KILLPRIV_V2 (1 << 28) #define FUSE_SETXATTR_EXT (1 << 29) +#define FUSE_INIT_EXT (1 << 30) +#define FUSE_INIT_RESERVED (1 << 31) +/* bits 32..63 get shifted down 32 bits into the flags2 field */ +#define FUSE_SECURITY_CTX (1ULL << 32) +#define FUSE_HAS_INODE_DAX (1ULL << 33) /** * CUSE INIT request/reply flags @@ -442,8 +467,10 @@ struct fuse_file_lock { * fuse_attr flags * * FUSE_ATTR_SUBMOUNT: Object is a submount root + * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode */ #define FUSE_ATTR_SUBMOUNT (1 << 0) +#define FUSE_ATTR_DAX (1 << 1) /** * Open flags @@ -505,6 +532,7 @@ enum fuse_opcode { FUSE_COPY_FILE_RANGE = 47, FUSE_SETUPMAPPING = 48, FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -728,6 +756,8 @@ struct fuse_init_in { uint32_t minor; uint32_t max_readahead; uint32_t flags; + uint32_t flags2; + uint32_t unused[11]; }; #define FUSE_COMPAT_INIT_OUT_SIZE 8 @@ -744,7 +774,8 @@ struct fuse_init_out { uint32_t time_gran; uint16_t max_pages; uint16_t map_alignment; - uint32_t unused[8]; + uint32_t flags2; + uint32_t unused[7]; }; #define CUSE_INIT_INFO_MAX 4096 @@ -852,9 +883,12 @@ struct fuse_dirent { char name[]; }; -#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) -#define FUSE_DIRENT_ALIGN(x) \ +/* Align variable length records to 64bit boundary */ +#define FUSE_REC_ALIGN(x) \ (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1)) + +#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) +#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x) #define FUSE_DIRENT_SIZE(d) \ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) @@ -967,4 +1001,30 @@ struct fuse_removemapping_one { #define FUSE_REMOVEMAPPING_MAX_ENTRY \ (PAGE_SIZE / sizeof(struct fuse_removemapping_one)) +struct fuse_syncfs_in { + uint64_t padding; +}; + +/* + * For each security context, send fuse_secctx with size of security context + * fuse_secctx will be followed by security context name and this in turn + * will be followed by actual context label. + * fuse_secctx, name, context + */ +struct fuse_secctx { + uint32_t size; + uint32_t padding; +}; + +/* + * Contains the information about how many fuse_secctx structures are being + * sent and what's the total size of all security contexts (including + * size of fuse_secctx_header). + * + */ +struct fuse_secctx_header { + uint32_t size; + uint32_t nr_secctx; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h index b5e86b40ab..50790aee5a 100644 --- a/include/standard-headers/linux/input-event-codes.h +++ b/include/standard-headers/linux/input-event-codes.h @@ -278,7 +278,8 @@ #define KEY_PAUSECD 201 #define KEY_PROG3 202 #define KEY_PROG4 203 -#define KEY_DASHBOARD 204 /* AL Dashboard */ +#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */ +#define KEY_DASHBOARD KEY_ALL_APPLICATIONS #define KEY_SUSPEND 205 #define KEY_CLOSE 206 /* AC Close */ #define KEY_PLAY 207 @@ -612,6 +613,7 @@ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ #define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ +#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ @@ -660,6 +662,27 @@ /* Select an area of screen to be copied */ #define KEY_SELECTIVE_SCREENSHOT 0x27a +/* Move the focus to the next or previous user controllable element within a UI container */ +#define KEY_NEXT_ELEMENT 0x27b +#define KEY_PREVIOUS_ELEMENT 0x27c + +/* Toggle Autopilot engagement */ +#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d + +/* Shortcut Keys */ +#define KEY_MARK_WAYPOINT 0x27e +#define KEY_SOS 0x27f +#define KEY_NAV_CHART 0x280 +#define KEY_FISHING_CHART 0x281 +#define KEY_SINGLE_RANGE_RADAR 0x282 +#define KEY_DUAL_RANGE_RADAR 0x283 +#define KEY_RADAR_OVERLAY 0x284 +#define KEY_TRADITIONAL_SONAR 0x285 +#define KEY_CLEARVU_SONAR 0x286 +#define KEY_SIDEVU_SONAR 0x287 +#define KEY_NAV_INFO 0x288 +#define KEY_BRIGHTNESS_MENU 0x289 + /* * Some keyboards have keys which do not have a defined meaning, these keys * are intended to be programmed / bound to macros by the user. For most diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h index 7822c24178..942ea6aaa9 100644 --- a/include/standard-headers/linux/input.h +++ b/include/standard-headers/linux/input.h @@ -75,10 +75,13 @@ struct input_id { * Note that input core does not clamp reported values to the * [minimum, maximum] limits, such task is left to userspace. * - * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z) - * is reported in units per millimeter (units/mm), resolution - * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported - * in units per radian. + * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z, + * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units + * per millimeter (units/mm), resolution for rotational axes + * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian. + * The resolution for the size axes (ABS_MT_TOUCH_MAJOR, + * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR) + * is reported in units per millimeter (units/mm). * When INPUT_PROP_ACCELEROMETER is set the resolution changes. * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in * units per g (units/g) and in units per degree per second @@ -268,6 +271,7 @@ struct input_mask { #define BUS_RMI 0x1D #define BUS_CEC 0x1E #define BUS_INTEL_ISHTP 0x1F +#define BUS_AMD_SFH 0x20 /* * MT_TOOL types diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index e709ae8235..57b8e2ffb1 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -301,23 +301,23 @@ #define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */ #define PCI_SID_CHASSIS_NR 3 /* Chassis Number */ -/* Message Signalled Interrupt registers */ +/* Message Signaled Interrupt registers */ -#define PCI_MSI_FLAGS 2 /* Message Control */ +#define PCI_MSI_FLAGS 0x02 /* Message Control */ #define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */ #define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */ #define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */ #define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */ #define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */ #define PCI_MSI_RFU 3 /* Rest of capability flags */ -#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */ -#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ -#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */ -#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */ -#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */ -#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ -#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ -#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */ +#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */ +#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ +#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */ +#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */ +#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */ +#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */ +#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */ +#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */ /* MSI-X registers (in MSI-X capability) */ #define PCI_MSIX_FLAGS 2 /* Message Control */ @@ -335,10 +335,10 @@ /* MSI-X Table entry format (in memory mapped by a BAR) */ #define PCI_MSIX_ENTRY_SIZE 16 -#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */ -#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */ -#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */ -#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */ +#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */ +#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */ +#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */ +#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */ #define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 /* CompactPCI Hotswap Register */ @@ -470,7 +470,7 @@ /* PCI Express capability registers */ -#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS 0x02 /* Capabilities register */ #define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ @@ -484,7 +484,7 @@ #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ -#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */ #define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */ #define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */ #define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */ @@ -497,13 +497,19 @@ #define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ -#define PCI_EXP_DEVCTL 8 /* Device Control */ +#define PCI_EXP_DEVCTL 0x08 /* Device Control */ #define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ #define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ #define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */ #define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ #define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ #define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ +#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */ +#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */ +#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */ +#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */ +#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */ +#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */ #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ #define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ #define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ @@ -516,7 +522,7 @@ #define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */ #define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */ #define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ -#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define PCI_EXP_DEVSTA 0x0a /* Device Status */ #define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */ #define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */ #define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */ @@ -524,7 +530,7 @@ #define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */ #define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */ -#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */ #define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ @@ -543,7 +549,7 @@ #define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ #define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ #define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ -#define PCI_EXP_LNKCTL 16 /* Link Control */ +#define PCI_EXP_LNKCTL 0x10 /* Link Control */ #define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ #define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */ #define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */ @@ -556,7 +562,7 @@ #define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ #define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ #define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */ -#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_LNKSTA 0x12 /* Link Status */ #define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ #define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */ #define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */ @@ -576,7 +582,7 @@ #define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ #define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */ -#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */ #define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ #define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ #define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */ @@ -589,7 +595,7 @@ #define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */ #define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */ #define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */ -#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTCTL 0x18 /* Slot Control */ #define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */ #define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */ #define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */ @@ -610,8 +616,9 @@ #define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */ #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ +#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */ #define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */ -#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_SLTSTA 0x1a /* Slot Status */ #define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ #define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ #define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */ @@ -621,15 +628,15 @@ #define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ #define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */ #define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */ -#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCTL 0x1c /* Root Control */ #define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */ #define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */ #define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */ #define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ #define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ -#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ -#define PCI_EXP_RTSTA 32 /* Root Status */ +#define PCI_EXP_RTSTA 0x20 /* Root Status */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -640,7 +647,7 @@ * Use pcie_capability_read_word() and similar interfaces to use them * safely. */ -#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ #define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ @@ -652,7 +659,7 @@ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ -#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ +#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ @@ -664,9 +671,9 @@ #define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */ #define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */ #define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */ -#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */ -#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */ -#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ +#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */ +#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */ +#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ @@ -674,7 +681,7 @@ #define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */ #define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */ #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ -#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ +#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */ #define PCI_EXP_LNKCTL2_TLS 0x000f #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ @@ -685,12 +692,12 @@ #define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */ #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */ -#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ -#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ -#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ +#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */ +#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */ +#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */ -#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ -#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */ +#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */ +#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */ /* Extended Capabilities (PCI-X 2.0 and Express) */ #define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) @@ -730,13 +737,14 @@ #define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT +#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 /* Advanced Error Reporting */ -#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ +#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */ #define PCI_ERR_UNC_UND 0x00000001 /* Undefined */ #define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ #define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */ @@ -754,11 +762,11 @@ #define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */ #define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */ #define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */ -#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */ +#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */ /* Same bits as above */ -#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */ +#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */ /* Same bits as above */ -#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */ +#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */ #define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */ #define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */ #define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */ @@ -767,20 +775,20 @@ #define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */ #define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */ #define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */ -#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */ +#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */ /* Same bits as above */ -#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */ -#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */ +#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/ +#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */ #define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ #define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ #define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ #define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ -#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */ -#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */ +#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */ +#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */ #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ #define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */ #define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */ -#define PCI_ERR_ROOT_STATUS 48 +#define PCI_ERR_ROOT_STATUS 0x30 #define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ #define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */ #define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */ @@ -789,52 +797,52 @@ #define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ -#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ +#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */ /* Virtual Channel */ -#define PCI_VC_PORT_CAP1 4 +#define PCI_VC_PORT_CAP1 0x04 #define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */ #define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */ #define PCI_VC_CAP1_ARB_SIZE 0x00000c00 -#define PCI_VC_PORT_CAP2 8 +#define PCI_VC_PORT_CAP2 0x08 #define PCI_VC_CAP2_32_PHASE 0x00000002 #define PCI_VC_CAP2_64_PHASE 0x00000004 #define PCI_VC_CAP2_128_PHASE 0x00000008 #define PCI_VC_CAP2_ARB_OFF 0xff000000 -#define PCI_VC_PORT_CTRL 12 +#define PCI_VC_PORT_CTRL 0x0c #define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001 -#define PCI_VC_PORT_STATUS 14 +#define PCI_VC_PORT_STATUS 0x0e #define PCI_VC_PORT_STATUS_TABLE 0x00000001 -#define PCI_VC_RES_CAP 16 +#define PCI_VC_RES_CAP 0x10 #define PCI_VC_RES_CAP_32_PHASE 0x00000002 #define PCI_VC_RES_CAP_64_PHASE 0x00000004 #define PCI_VC_RES_CAP_128_PHASE 0x00000008 #define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010 #define PCI_VC_RES_CAP_256_PHASE 0x00000020 #define PCI_VC_RES_CAP_ARB_OFF 0xff000000 -#define PCI_VC_RES_CTRL 20 +#define PCI_VC_RES_CTRL 0x14 #define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000 #define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000 #define PCI_VC_RES_CTRL_ID 0x07000000 #define PCI_VC_RES_CTRL_ENABLE 0x80000000 -#define PCI_VC_RES_STATUS 26 +#define PCI_VC_RES_STATUS 0x1a #define PCI_VC_RES_STATUS_TABLE 0x00000001 #define PCI_VC_RES_STATUS_NEGO 0x00000002 #define PCI_CAP_VC_BASE_SIZEOF 0x10 -#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C +#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c /* Power Budgeting */ -#define PCI_PWR_DSR 4 /* Data Select Register */ -#define PCI_PWR_DATA 8 /* Data Register */ +#define PCI_PWR_DSR 0x04 /* Data Select Register */ +#define PCI_PWR_DATA 0x08 /* Data Register */ #define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */ #define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */ #define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */ #define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */ #define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */ #define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */ -#define PCI_PWR_CAP 12 /* Capability */ +#define PCI_PWR_CAP 0x0c /* Capability */ #define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ -#define PCI_EXT_CAP_PWR_SIZEOF 16 +#define PCI_EXT_CAP_PWR_SIZEOF 0x10 /* Root Complex Event Collector Endpoint Association */ #define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */ @@ -958,7 +966,7 @@ #define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ #define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ #define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ -#define PCI_EXT_CAP_SRIOV_SIZEOF 64 +#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40 #define PCI_LTR_MAX_SNOOP_LAT 0x4 #define PCI_LTR_MAX_NOSNOOP_LAT 0x6 @@ -1011,12 +1019,12 @@ #define PCI_TPH_LOC_NONE 0x000 /* no location */ #define PCI_TPH_LOC_CAP 0x200 /* in capability */ #define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */ -#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */ -#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */ -#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */ +#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */ +#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */ +#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */ /* Downstream Port Containment */ -#define PCI_EXP_DPC_CAP 4 /* DPC Capability */ +#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */ #define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */ #define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */ #define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */ @@ -1024,19 +1032,19 @@ #define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */ #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ -#define PCI_EXP_DPC_CTL 6 /* DPC control */ +#define PCI_EXP_DPC_CTL 0x06 /* DPC control */ #define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */ #define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ #define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ -#define PCI_EXP_DPC_STATUS 8 /* DPC Status */ +#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ -#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ +#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ #define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */ #define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */ @@ -1080,7 +1088,11 @@ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ +#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff) +#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf) +#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff) #define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */ +#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff) /* Data Link Feature */ #define PCI_DLF_CAP 0x04 /* Capabilities Register */ @@ -1092,4 +1104,30 @@ #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 +/* Data Object Exchange */ +#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */ +#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */ +#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */ +#define PCI_DOE_CTRL 0x08 /* DOE Control Register */ +#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */ +#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */ +#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */ +#define PCI_DOE_STATUS 0x0c /* DOE Status Register */ +#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */ +#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */ +#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */ +#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */ +#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */ +#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */ + +/* DOE Data Object - note not actually registers */ +#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff +#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000 +#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff + +#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000 +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000 + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/standard-headers/linux/pvpanic.h b/include/standard-headers/linux/pvpanic.h new file mode 100644 index 0000000000..54b7485390 --- /dev/null +++ b/include/standard-headers/linux/pvpanic.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef __PVPANIC_H__ +#define __PVPANIC_H__ + +#define PVPANIC_PANICKED (1 << 0) +#define PVPANIC_CRASH_LOADED (1 << 1) + +#endif /* __PVPANIC_H__ */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index 0bd2684a2a..c41a73fe36 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -87,7 +87,7 @@ struct vhost_msg { struct vhost_msg_v2 { uint32_t type; - uint32_t reserved; + uint32_t asid; union { struct vhost_iotlb_msg iotlb; uint8_t padding[64]; @@ -107,7 +107,7 @@ struct vhost_memory_region { struct vhost_memory { uint32_t nregions; uint32_t padding; - struct vhost_memory_region regions[0]; + struct vhost_memory_region regions[]; }; /* VHOST_SCSI specific definitions */ @@ -135,7 +135,7 @@ struct vhost_scsi_target { struct vhost_vdpa_config { uint32_t off; uint32_t len; - uint8_t buf[0]; + uint8_t buf[]; }; /* vhost vdpa IOVA range @@ -153,4 +153,15 @@ struct vhost_vdpa_iova_range { /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ #define VHOST_NET_F_VIRTIO_NET_HDR 27 +/* Use message type V2 */ +#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 +/* IOTLB can accept batching hints */ +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 +/* IOTLB can accept address space identifier through V2 type of IOTLB + * message + */ +#define VHOST_BACKEND_F_IOTLB_ASID 0x3 +/* Device can be suspended */ +#define VHOST_BACKEND_F_SUSPEND 0x4 + #endif diff --git a/include/standard-headers/linux/virtio_9p.h b/include/standard-headers/linux/virtio_9p.h index f5604fc5fb..da61dee98c 100644 --- a/include/standard-headers/linux/virtio_9p.h +++ b/include/standard-headers/linux/virtio_9p.h @@ -38,7 +38,7 @@ struct virtio_9p_config { /* length of the tag name */ __virtio16 tag_len; /* non-NULL terminated tag name */ - uint8_t tag[0]; + uint8_t tag[]; } QEMU_PACKED; #endif /* _LINUX_VIRTIO_9P_H */ diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h index 22e3a85f67..965ee6ae23 100644 --- a/include/standard-headers/linux/virtio_config.h +++ b/include/standard-headers/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 38 +#define VIRTIO_TRANSPORT_F_END 41 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -80,6 +80,12 @@ /* This feature indicates support for the packed virtqueue layout. */ #define VIRTIO_F_RING_PACKED 34 +/* + * Inorder feature indicates that all buffers are used by the device + * in the same order in which they have been made available. + */ +#define VIRTIO_F_IN_ORDER 35 + /* * This feature indicates that memory accesses by the driver and the * device are ordered in a way described by the platform. @@ -90,4 +96,9 @@ * Does the device support Single Root I/O Virtualization? */ #define VIRTIO_F_SR_IOV 37 + +/* + * This feature indicates that the driver can reset a queue individually. + */ +#define VIRTIO_F_RING_RESET 40 #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h index 5ff0b4ee59..68066dafb6 100644 --- a/include/standard-headers/linux/virtio_crypto.h +++ b/include/standard-headers/linux/virtio_crypto.h @@ -37,6 +37,7 @@ #define VIRTIO_CRYPTO_SERVICE_HASH 1 #define VIRTIO_CRYPTO_SERVICE_MAC 2 #define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4 #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) @@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) +#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04) +#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05) uint32_t opcode; uint32_t algo; uint32_t flag; @@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req { uint8_t padding[32]; }; +struct virtio_crypto_rsa_session_para { +#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0 +#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1 + uint32_t padding_algo; + +#define VIRTIO_CRYPTO_RSA_NO_HASH 0 +#define VIRTIO_CRYPTO_RSA_MD2 1 +#define VIRTIO_CRYPTO_RSA_MD3 2 +#define VIRTIO_CRYPTO_RSA_MD4 3 +#define VIRTIO_CRYPTO_RSA_MD5 4 +#define VIRTIO_CRYPTO_RSA_SHA1 5 +#define VIRTIO_CRYPTO_RSA_SHA256 6 +#define VIRTIO_CRYPTO_RSA_SHA384 7 +#define VIRTIO_CRYPTO_RSA_SHA512 8 +#define VIRTIO_CRYPTO_RSA_SHA224 9 + uint32_t hash_algo; +}; + +struct virtio_crypto_ecdsa_session_para { +#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0 +#define VIRTIO_CRYPTO_CURVE_NIST_P192 1 +#define VIRTIO_CRYPTO_CURVE_NIST_P224 2 +#define VIRTIO_CRYPTO_CURVE_NIST_P256 3 +#define VIRTIO_CRYPTO_CURVE_NIST_P384 4 +#define VIRTIO_CRYPTO_CURVE_NIST_P521 5 + uint32_t curve_id; + uint32_t padding; +}; + +struct virtio_crypto_akcipher_session_para { +#define VIRTIO_CRYPTO_NO_AKCIPHER 0 +#define VIRTIO_CRYPTO_AKCIPHER_RSA 1 +#define VIRTIO_CRYPTO_AKCIPHER_DSA 2 +#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3 + uint32_t algo; + +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1 +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2 + uint32_t keytype; + uint32_t keylen; + + union { + struct virtio_crypto_rsa_session_para rsa; + struct virtio_crypto_ecdsa_session_para ecdsa; + } u; +}; + +struct virtio_crypto_akcipher_create_session_req { + struct virtio_crypto_akcipher_session_para para; + uint8_t padding[36]; +}; + struct virtio_crypto_alg_chain_session_para { #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 @@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req { mac_create_session; struct virtio_crypto_aead_create_session_req aead_create_session; + struct virtio_crypto_akcipher_create_session_req + akcipher_create_session; struct virtio_crypto_destroy_session_req destroy_session; uint8_t padding[56]; @@ -266,6 +325,14 @@ struct virtio_crypto_op_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) #define VIRTIO_CRYPTO_AEAD_DECRYPT \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00) +#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_SIGN \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02) +#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03) uint32_t opcode; /* algo should be service-specific algorithms */ uint32_t algo; @@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req { uint8_t padding[32]; }; +struct virtio_crypto_akcipher_para { + uint32_t src_data_len; + uint32_t dst_data_len; +}; + +struct virtio_crypto_akcipher_data_req { + struct virtio_crypto_akcipher_para para; + uint8_t padding[40]; +}; + /* The request of the data virtqueue's packet */ struct virtio_crypto_op_data_req { struct virtio_crypto_op_header header; @@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req { struct virtio_crypto_hash_data_req hash_req; struct virtio_crypto_mac_data_req mac_req; struct virtio_crypto_aead_data_req aead_req; + struct virtio_crypto_akcipher_data_req akcipher_req; uint8_t padding[48]; } u; }; @@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req { #define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_NOTSUPP 3 #define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ +#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */ +#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) @@ -438,7 +518,7 @@ struct virtio_crypto_config { uint32_t max_cipher_key_len; /* Maximum length of authenticated key */ uint32_t max_auth_key_len; - uint32_t reserve; + uint32_t akcipher_algo; /* Maximum size of each crypto request's content */ uint64_t max_size; }; diff --git a/include/standard-headers/linux/virtio_gpio.h b/include/standard-headers/linux/virtio_gpio.h new file mode 100644 index 0000000000..2b5cf06349 --- /dev/null +++ b/include/standard-headers/linux/virtio_gpio.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _LINUX_VIRTIO_GPIO_H +#define _LINUX_VIRTIO_GPIO_H + +#include "standard-headers/linux/types.h" + +/* Virtio GPIO Feature bits */ +#define VIRTIO_GPIO_F_IRQ 0 + +/* Virtio GPIO request types */ +#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001 +#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002 +#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003 +#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004 +#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005 +#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006 + +/* Possible values of the status field */ +#define VIRTIO_GPIO_STATUS_OK 0x0 +#define VIRTIO_GPIO_STATUS_ERR 0x1 + +/* Direction types */ +#define VIRTIO_GPIO_DIRECTION_NONE 0x00 +#define VIRTIO_GPIO_DIRECTION_OUT 0x01 +#define VIRTIO_GPIO_DIRECTION_IN 0x02 + +/* Virtio GPIO IRQ types */ +#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00 +#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01 +#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 +#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 +#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 +#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 + +struct virtio_gpio_config { + uint16_t ngpio; + uint8_t padding[2]; + uint32_t gpio_names_size; +}; + +/* Virtio GPIO Request / Response */ +struct virtio_gpio_request { + uint16_t type; + uint16_t gpio; + uint32_t value; +}; + +struct virtio_gpio_response { + uint8_t status; + uint8_t value; +}; + +struct virtio_gpio_response_get_names { + uint8_t status; + uint8_t value[]; +}; + +/* Virtio GPIO IRQ Request / Response */ +struct virtio_gpio_irq_request { + uint16_t gpio; +}; + +struct virtio_gpio_irq_response { + uint8_t status; +}; + +/* Possible values of the interrupt status field */ +#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0 +#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1 + +#endif /* _LINUX_VIRTIO_GPIO_H */ diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h index 1357e4774e..2da48d3d4c 100644 --- a/include/standard-headers/linux/virtio_gpu.h +++ b/include/standard-headers/linux/virtio_gpu.h @@ -59,6 +59,11 @@ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */ #define VIRTIO_GPU_F_RESOURCE_BLOB 3 +/* + * VIRTIO_GPU_CMD_CREATE_CONTEXT with + * context_init and multiple timelines + */ +#define VIRTIO_GPU_F_CONTEXT_INIT 4 enum virtio_gpu_ctrl_type { VIRTIO_GPU_UNDEFINED = 0, @@ -122,14 +127,20 @@ enum virtio_gpu_shm_id { VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 }; -#define VIRTIO_GPU_FLAG_FENCE (1 << 0) +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) +/* + * If the following flag is set, then ring_idx contains the index + * of the command ring that needs to used when creating the fence + */ +#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1) struct virtio_gpu_ctrl_hdr { uint32_t type; uint32_t flags; uint64_t fence_id; uint32_t ctx_id; - uint32_t padding; + uint8_t ring_idx; + uint8_t padding[3]; }; /* data passed in the cursor vq */ @@ -269,10 +280,11 @@ struct virtio_gpu_resource_create_3d { }; /* VIRTIO_GPU_CMD_CTX_CREATE */ +#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff struct virtio_gpu_ctx_create { struct virtio_gpu_ctrl_hdr hdr; uint32_t nlen; - uint32_t padding; + uint32_t context_init; char debug_name[64]; }; diff --git a/include/standard-headers/linux/virtio_i2c.h b/include/standard-headers/linux/virtio_i2c.h new file mode 100644 index 0000000000..09fa907793 --- /dev/null +++ b/include/standard-headers/linux/virtio_i2c.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ +/* + * Definitions for virtio I2C Adpter + * + * Copyright (c) 2021 Intel Corporation. All rights reserved. + */ + +#ifndef _LINUX_VIRTIO_I2C_H +#define _LINUX_VIRTIO_I2C_H + +#include "standard-headers/linux/const.h" +#include "standard-headers/linux/types.h" + +/* Virtio I2C Feature bits */ +#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0 + +/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */ +#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0) + +/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */ +#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1) + +/** + * struct virtio_i2c_out_hdr - the virtio I2C message OUT header + * @addr: the controlled device address + * @padding: used to pad to full dword + * @flags: used for feature extensibility + */ +struct virtio_i2c_out_hdr { + uint16_t addr; + uint16_t padding; + uint32_t flags; +}; + +/** + * struct virtio_i2c_in_hdr - the virtio I2C message IN header + * @status: the processing result from the backend + */ +struct virtio_i2c_in_hdr { + uint8_t status; +}; + +/* The final status written by the device */ +#define VIRTIO_I2C_MSG_OK 0 +#define VIRTIO_I2C_MSG_ERR 1 + +#endif /* _LINUX_VIRTIO_I2C_H */ diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h index 4fe842c3a3..7aa2eb7662 100644 --- a/include/standard-headers/linux/virtio_ids.h +++ b/include/standard-headers/linux/virtio_ids.h @@ -54,7 +54,31 @@ #define VIRTIO_ID_SOUND 25 /* virtio sound */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ +#define VIRTIO_ID_RPMB 28 /* virtio rpmb */ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ +#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */ +#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */ +#define VIRTIO_ID_SCMI 32 /* virtio SCMI */ +#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/ +#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */ +#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */ +#define VIRTIO_ID_CAN 36 /* virtio can */ +#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */ +#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */ +#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */ #define VIRTIO_ID_BT 40 /* virtio bluetooth */ +#define VIRTIO_ID_GPIO 41 /* virtio gpio */ + +/* + * Virtio Transitional IDs + */ + +#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */ +#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */ +#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */ +#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */ +#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */ +#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */ +#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/standard-headers/linux/virtio_iommu.h b/include/standard-headers/linux/virtio_iommu.h index b9443b83a1..366379c2f0 100644 --- a/include/standard-headers/linux/virtio_iommu.h +++ b/include/standard-headers/linux/virtio_iommu.h @@ -16,6 +16,7 @@ #define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_PROBE 4 #define VIRTIO_IOMMU_F_MMIO 5 +#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6 struct virtio_iommu_range_64 { uint64_t start; @@ -36,6 +37,8 @@ struct virtio_iommu_config { struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ uint32_t probe_size; + uint8_t bypass; + uint8_t reserved[3]; }; /* Request types */ @@ -66,11 +69,14 @@ struct virtio_iommu_req_tail { uint8_t reserved[3]; }; +#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0) + struct virtio_iommu_req_attach { struct virtio_iommu_req_head head; uint32_t domain; uint32_t endpoint; - uint8_t reserved[8]; + uint32_t flags; + uint8_t reserved[4]; struct virtio_iommu_req_tail tail; }; diff --git a/include/standard-headers/linux/virtio_mem.h b/include/standard-headers/linux/virtio_mem.h index 05e5ade75d..18c74c527c 100644 --- a/include/standard-headers/linux/virtio_mem.h +++ b/include/standard-headers/linux/virtio_mem.h @@ -68,9 +68,10 @@ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG). * * There are no guarantees what will happen if unplugged memory is - * read/written. Such memory should, in general, not be touched. E.g., - * even writing might succeed, but the values will simply be discarded at - * random points in time. + * read/written. In general, unplugged memory should not be touched, because + * the resulting action is undefined. There is one exception: without + * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable + * region can be read, to simplify creation of memory dumps. * * It can happen that the device cannot process a request, because it is * busy. The device driver has to retry later. @@ -87,6 +88,8 @@ /* node_id is an ACPI PXM and is valid */ #define VIRTIO_MEM_F_ACPI_PXM 0 +/* unplugged memory must not be accessed */ +#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1 /* --- virtio-mem: guest -> host requests --- */ diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h index e0a070518f..42c68caf71 100644 --- a/include/standard-headers/linux/virtio_net.h +++ b/include/standard-headers/linux/virtio_net.h @@ -56,7 +56,7 @@ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ - +#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */ #define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */ #define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */ #define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */ @@ -355,4 +355,36 @@ struct virtio_net_hash_config { #define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 +/* + * Control notifications coalescing. + * + * Request the device to change the notifications coalescing parameters. + * + * Available with the VIRTIO_NET_F_NOTF_COAL feature bit. + */ +#define VIRTIO_NET_CTRL_NOTF_COAL 6 +/* + * Set the tx-usecs/tx-max-packets parameters. + */ +struct virtio_net_ctrl_coal_tx { + /* Maximum number of packets to send before a TX notification */ + uint32_t tx_max_packets; + /* Maximum number of usecs to delay a TX notification */ + uint32_t tx_usecs; +}; + +#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0 + +/* + * Set the rx-usecs/rx-max-packets parameters. + */ +struct virtio_net_ctrl_coal_rx { + /* Maximum number of packets to receive before a RX notification */ + uint32_t rx_max_packets; + /* Maximum number of usecs to delay a RX notification */ + uint32_t rx_usecs; +}; + +#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1 + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h index db7a8e2fcb..be912cfc95 100644 --- a/include/standard-headers/linux/virtio_pci.h +++ b/include/standard-headers/linux/virtio_pci.h @@ -202,6 +202,8 @@ struct virtio_pci_cfg_cap { #define VIRTIO_PCI_COMMON_Q_AVAILHI 44 #define VIRTIO_PCI_COMMON_Q_USEDLO 48 #define VIRTIO_PCI_COMMON_Q_USEDHI 52 +#define VIRTIO_PCI_COMMON_Q_NDATA 56 +#define VIRTIO_PCI_COMMON_Q_RESET 58 #endif /* VIRTIO_PCI_NO_MODERN */ diff --git a/include/standard-headers/linux/virtio_pcidev.h b/include/standard-headers/linux/virtio_pcidev.h new file mode 100644 index 0000000000..bdf1d062da --- /dev/null +++ b/include/standard-headers/linux/virtio_pcidev.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: Johannes Berg + */ +#ifndef _LINUX_VIRTIO_PCIDEV_H +#define _LINUX_VIRTIO_PCIDEV_H +#include "standard-headers/linux/types.h" + +/** + * enum virtio_pcidev_ops - virtual PCI device operations + * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors + * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but + * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE) + * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for + * the number + * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports + * the 16- or 32-bit write that would otherwise be done into memory, + * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above + * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be + * all zeroes) to signal the PME# pin. + */ +enum virtio_pcidev_ops { + VIRTIO_PCIDEV_OP_RESERVED = 0, + VIRTIO_PCIDEV_OP_CFG_READ, + VIRTIO_PCIDEV_OP_CFG_WRITE, + VIRTIO_PCIDEV_OP_MMIO_READ, + VIRTIO_PCIDEV_OP_MMIO_WRITE, + VIRTIO_PCIDEV_OP_MMIO_MEMSET, + VIRTIO_PCIDEV_OP_INT, + VIRTIO_PCIDEV_OP_MSI, + VIRTIO_PCIDEV_OP_PME, +}; + +/** + * struct virtio_pcidev_msg - virtio PCI device operation + * @op: the operation to do + * @bar: the bar (only with BAR read/write messages) + * @reserved: reserved + * @size: the size of the read/write (in bytes) + * @addr: the address to read/write + * @data: the data, normally @size long, but just one byte for + * %VIRTIO_PCIDEV_OP_MMIO_MEMSET + * + * Note: the fields are all in native (CPU) endian, however, the + * @data values will often be in little endian (see the ops above.) + */ +struct virtio_pcidev_msg { + uint8_t op; + uint8_t bar; + uint16_t reserved; + uint32_t size; + uint64_t addr; + uint8_t data[]; +}; + +#endif /* _LINUX_VIRTIO_PCIDEV_H */ diff --git a/include/standard-headers/linux/virtio_ring.h b/include/standard-headers/linux/virtio_ring.h index 0fa0e1067f..22f6eb8ca7 100644 --- a/include/standard-headers/linux/virtio_ring.h +++ b/include/standard-headers/linux/virtio_ring.h @@ -91,15 +91,21 @@ #define VRING_USED_ALIGN_SIZE 4 #define VRING_DESC_ALIGN_SIZE 16 -/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ +/** + * struct vring_desc - Virtio ring descriptors, + * 16 bytes long. These can chain together via @next. + * + * @addr: buffer address (guest-physical) + * @len: buffer length + * @flags: descriptor flags + * @next: index of the next descriptor in the chain, + * if the VRING_DESC_F_NEXT flag is set. We chain unused + * descriptors via this, too. + */ struct vring_desc { - /* Address (guest-physical). */ __virtio64 addr; - /* Length. */ __virtio32 len; - /* The flags as indicated above. */ __virtio16 flags; - /* We chain unused descriptors via this, too */ __virtio16 next; }; diff --git a/include/standard-headers/linux/virtio_scmi.h b/include/standard-headers/linux/virtio_scmi.h new file mode 100644 index 0000000000..8f2c305aea --- /dev/null +++ b/include/standard-headers/linux/virtio_scmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (C) 2020-2021 OpenSynergy GmbH + * Copyright (C) 2021 ARM Ltd. + */ + +#ifndef _LINUX_VIRTIO_SCMI_H +#define _LINUX_VIRTIO_SCMI_H + +#include "standard-headers/linux/virtio_types.h" + +/* Device implements some SCMI notifications, or delayed responses. */ +#define VIRTIO_SCMI_F_P2A_CHANNELS 0 + +/* Device implements any SCMI statistics shared memory region */ +#define VIRTIO_SCMI_F_SHARED_MEMORY 1 + +/* Virtqueues */ + +#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */ +#define VIRTIO_SCMI_VQ_RX 1 /* eventq */ +#define VIRTIO_SCMI_VQ_MAX_CNT 2 + +#endif /* _LINUX_VIRTIO_SCMI_H */ diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h index 3a23488e42..467e751b17 100644 --- a/include/standard-headers/linux/virtio_vsock.h +++ b/include/standard-headers/linux/virtio_vsock.h @@ -97,7 +97,8 @@ enum virtio_vsock_shutdown { /* VIRTIO_VSOCK_OP_RW flags values */ enum virtio_vsock_rw { - VIRTIO_VSOCK_SEQ_EOR = 1, + VIRTIO_VSOCK_SEQ_EOM = 1, + VIRTIO_VSOCK_SEQ_EOR = 2, }; #endif /* _LINUX_VIRTIO_VSOCK_H */ diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h index 032f6979d7..8cc7996def 100644 --- a/include/sysemu/accel-ops.h +++ b/include/sysemu/accel-ops.h @@ -10,6 +10,7 @@ #ifndef ACCEL_OPS_H #define ACCEL_OPS_H +#include "exec/hwaddr.h" #include "qom/object.h" #define ACCEL_OPS_SUFFIX "-ops" @@ -28,18 +29,28 @@ struct AccelOpsClass { /* initialization function called when accel is chosen */ void (*ops_init)(AccelOpsClass *ops); + bool (*cpus_are_resettable)(void); + void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */ void (*kick_vcpu_thread)(CPUState *cpu); + bool (*cpu_thread_is_idle)(CPUState *cpu); void (*synchronize_post_reset)(CPUState *cpu); void (*synchronize_post_init)(CPUState *cpu); void (*synchronize_state)(CPUState *cpu); void (*synchronize_pre_loadvm)(CPUState *cpu); + void (*synchronize_pre_resume)(bool step_pending); void (*handle_interrupt)(CPUState *cpu, int mask); int64_t (*get_virtual_clock)(void); int64_t (*get_elapsed_ticks)(void); + + /* gdbstub hooks */ + bool (*supports_guest_debug)(void); + int (*insert_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); + int (*remove_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); + void (*remove_all_breakpoints)(CPUState *cpu); }; #endif /* ACCEL_OPS_H */ diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h index e723c467eb..8850cb1a14 100644 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@ -23,22 +23,12 @@ enum { QEMU_ARCH_RISCV = (1 << 19), QEMU_ARCH_RX = (1 << 20), QEMU_ARCH_AVR = (1 << 21), - - QEMU_ARCH_NONE = (1 << 31), + QEMU_ARCH_HEXAGON = (1 << 22), + QEMU_ARCH_LOONGARCH = (1 << 23), }; extern const uint32_t arch_type; -int kvm_available(void); -int xen_available(void); - -/* default virtio transport per architecture */ -#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | QEMU_ARCH_ARM | \ - QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \ - QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \ - QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \ - QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA) -#define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X) -#define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K) +void qemu_init_arch_modules(void); #endif diff --git a/include/sysemu/block-backend-common.h b/include/sysemu/block-backend-common.h new file mode 100644 index 0000000000..2391679c56 --- /dev/null +++ b/include/sysemu/block-backend-common.h @@ -0,0 +1,102 @@ +/* + * QEMU Block backends + * + * Copyright (C) 2014-2016 Red Hat, Inc. + * + * Authors: + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 + * or later. See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BLOCK_BACKEND_COMMON_H +#define BLOCK_BACKEND_COMMON_H + +#include "qemu/iov.h" +#include "block/throttle-groups.h" + +/* + * TODO Have to include block/block.h for a bunch of block layer + * types. Unfortunately, this pulls in the whole BlockDriverState + * API, which we don't want used by many BlockBackend users. Some of + * the types belong here, and the rest should be split into a common + * header and one for the BlockDriverState API. + */ +#include "block/block.h" + +/* Callbacks for block device models */ +typedef struct BlockDevOps { + + /* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + + /* + * Runs when virtual media changed (monitor commands eject, change) + * Argument load is true on load and false on eject. + * Beware: doesn't run when a host device's physical media + * changes. Sure would be useful if it did. + * Device models with removable media must implement this callback. + */ + void (*change_media_cb)(void *opaque, bool load, Error **errp); + /* + * Runs when an eject request is issued from the monitor, the tray + * is closed, and the medium is locked. + * Device models that do not implement is_medium_locked will not need + * this callback. Device models that can lock the medium or tray might + * want to implement the callback and unlock the tray when "force" is + * true, even if they do not support eject requests. + */ + void (*eject_request_cb)(void *opaque, bool force); + + /* + * Is the virtual medium locked into the device? + * Device models implement this only when device has such a lock. + */ + bool (*is_medium_locked)(void *opaque); + + /* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + + /* + * Is the virtual tray open? + * Device models implement this only when the device has a tray. + */ + bool (*is_tray_open)(void *opaque); + + /* + * Runs when the size changed (e.g. monitor command block_resize) + */ + void (*resize_cb)(void *opaque); + /* + * Runs when the backend receives a drain request. + */ + void (*drained_begin)(void *opaque); + /* + * Runs when the backend's last drain request ends. + */ + void (*drained_end)(void *opaque); + /* + * Is the device still busy? + */ + bool (*drained_poll)(void *opaque); +} BlockDevOps; + +/* + * This struct is embedded in (the private) BlockBackend struct and contains + * fields that must be public. This is in particular for QLIST_ENTRY() and + * friends so that BlockBackends can be kept in lists outside block-backend.c + */ +typedef struct BlockBackendPublic { + ThrottleGroupMember throttle_group_member; +} BlockBackendPublic; + +#endif /* BLOCK_BACKEND_COMMON_H */ diff --git a/include/sysemu/block-backend-global-state.h b/include/sysemu/block-backend-global-state.h new file mode 100644 index 0000000000..6858e39cb6 --- /dev/null +++ b/include/sysemu/block-backend-global-state.h @@ -0,0 +1,116 @@ +/* + * QEMU Block backends + * + * Copyright (C) 2014-2016 Red Hat, Inc. + * + * Authors: + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 + * or later. See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BLOCK_BACKEND_GLOBAL_STATE_H +#define BLOCK_BACKEND_GLOBAL_STATE_H + +#include "block-backend-common.h" + +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + +BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm); +BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, + uint64_t shared_perm, Error **errp); +BlockBackend *blk_new_open(const char *filename, const char *reference, + QDict *options, int flags, Error **errp); +int blk_get_refcnt(BlockBackend *blk); +void blk_ref(BlockBackend *blk); +void blk_unref(BlockBackend *blk); +void blk_remove_all_bs(void); +BlockBackend *blk_by_name(const char *name); +BlockBackend *blk_next(BlockBackend *blk); +BlockBackend *blk_all_next(BlockBackend *blk); +bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp); +void monitor_remove_blk(BlockBackend *blk); + +BlockBackendPublic *blk_get_public(BlockBackend *blk); +BlockBackend *blk_by_public(BlockBackendPublic *public); + +void blk_remove_bs(BlockBackend *blk); +int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp); +int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp); +bool bdrv_has_blk(BlockDriverState *bs); +bool bdrv_is_root_node(BlockDriverState *bs); +int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, + Error **errp); +void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm); + +void blk_iostatus_enable(BlockBackend *blk); +BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk); +void blk_iostatus_disable(BlockBackend *blk); +void blk_iostatus_reset(BlockBackend *blk); +int blk_attach_dev(BlockBackend *blk, DeviceState *dev); +void blk_detach_dev(BlockBackend *blk, DeviceState *dev); +DeviceState *blk_get_attached_dev(BlockBackend *blk); +BlockBackend *blk_by_dev(void *dev); +BlockBackend *blk_by_qdev_id(const char *id, Error **errp); +void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque); + +void blk_activate(BlockBackend *blk, Error **errp); + +int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags); +void blk_aio_cancel(BlockAIOCB *acb); +int blk_commit_all(void); +void blk_drain(BlockBackend *blk); +void blk_drain_all(void); +void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, + BlockdevOnError on_write_error); +bool blk_supports_write_perm(BlockBackend *blk); +bool blk_is_sg(BlockBackend *blk); +void blk_set_enable_write_cache(BlockBackend *blk, bool wce); +int blk_get_flags(BlockBackend *blk); +bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp); +void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason); +void blk_op_block_all(BlockBackend *blk, Error *reason); +void blk_op_unblock_all(BlockBackend *blk, Error *reason); +int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, + Error **errp); +void blk_add_aio_context_notifier(BlockBackend *blk, + void (*attached_aio_context)(AioContext *new_context, void *opaque), + void (*detach_aio_context)(void *opaque), void *opaque); +void blk_remove_aio_context_notifier(BlockBackend *blk, + void (*attached_aio_context)(AioContext *, + void *), + void (*detach_aio_context)(void *), + void *opaque); +void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify); +void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify); +BlockBackendRootState *blk_get_root_state(BlockBackend *blk); +void blk_update_root_state(BlockBackend *blk); +bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk); +int blk_get_open_flags_from_root_state(BlockBackend *blk); + +int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, + int64_t pos, int size); +int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size); +int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz); +int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo); + +void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg); +void blk_io_limits_disable(BlockBackend *blk); +void blk_io_limits_enable(BlockBackend *blk, const char *group); +void blk_io_limits_update_group(BlockBackend *blk, const char *group); +void blk_set_force_allow_inactivate(BlockBackend *blk); + +bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp); +void blk_unregister_buf(BlockBackend *blk, void *host, size_t size); + +const BdrvChild *blk_root(BlockBackend *blk); + +int blk_make_empty(BlockBackend *blk, Error **errp); + +#endif /* BLOCK_BACKEND_GLOBAL_STATE_H */ diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h new file mode 100644 index 0000000000..50f5aa2e07 --- /dev/null +++ b/include/sysemu/block-backend-io.h @@ -0,0 +1,179 @@ +/* + * QEMU Block backends + * + * Copyright (C) 2014-2016 Red Hat, Inc. + * + * Authors: + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 + * or later. See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BLOCK_BACKEND_IO_H +#define BLOCK_BACKEND_IO_H + +#include "block-backend-common.h" + +/* + * I/O API functions. These functions are thread-safe. + * + * See include/block/block-io.h for more information about + * the I/O API. + */ + +const char *blk_name(const BlockBackend *blk); + +BlockDriverState *blk_bs(BlockBackend *blk); + +void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow); +void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow); +void blk_set_disable_request_queuing(BlockBackend *blk, bool disable); +bool blk_iostatus_is_enabled(const BlockBackend *blk); + +char *blk_get_attached_dev_id(BlockBackend *blk); + +BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, + int64_t bytes, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); + +BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, + QEMUIOVector *qiov, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, + QEMUIOVector *qiov, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_flush(BlockBackend *blk, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes, + BlockCompletionFunc *cb, void *opaque); +void blk_aio_cancel_async(BlockAIOCB *acb); +BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, + BlockCompletionFunc *cb, void *opaque); + +void blk_inc_in_flight(BlockBackend *blk); +void blk_dec_in_flight(BlockBackend *blk); +bool blk_is_inserted(BlockBackend *blk); +bool blk_is_available(BlockBackend *blk); +void blk_lock_medium(BlockBackend *blk, bool locked); +void blk_eject(BlockBackend *blk, bool eject_flag); +int64_t blk_getlength(BlockBackend *blk); +void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr); +int64_t blk_nb_sectors(BlockBackend *blk); +void *blk_try_blockalign(BlockBackend *blk, size_t size); +void *blk_blockalign(BlockBackend *blk, size_t size); +bool blk_is_writable(BlockBackend *blk); +bool blk_enable_write_cache(BlockBackend *blk); +BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read); +BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, + int error); +void blk_error_action(BlockBackend *blk, BlockErrorAction action, + bool is_read, int error); +void blk_iostatus_set_err(BlockBackend *blk, int error); +int blk_get_max_iov(BlockBackend *blk); +int blk_get_max_hw_iov(BlockBackend *blk); + +void blk_io_plug(BlockBackend *blk); +void blk_io_unplug(BlockBackend *blk); +AioContext *blk_get_aio_context(BlockBackend *blk); +BlockAcctStats *blk_get_stats(BlockBackend *blk); +void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, + BlockCompletionFunc *cb, + void *opaque, int ret); + +uint32_t blk_get_request_alignment(BlockBackend *blk); +uint32_t blk_get_max_transfer(BlockBackend *blk); +uint64_t blk_get_max_hw_transfer(BlockBackend *blk); + +int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, + BlockBackend *blk_out, int64_t off_out, + int64_t bytes, BdrvRequestFlags read_flags, + BdrvRequestFlags write_flags); + + +/* + * "I/O or GS" API functions. These functions can run without + * the BQL, but only in one specific iothread/main loop. + * + * See include/block/block-io.h for more information about + * the "I/O or GS" API. + */ + +int generated_co_wrapper blk_pread(BlockBackend *blk, int64_t offset, + int64_t bytes, void *buf, + BdrvRequestFlags flags); +int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes, + void *buf, BdrvRequestFlags flags); + +int generated_co_wrapper blk_preadv(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); + +int generated_co_wrapper blk_preadv_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, + BdrvRequestFlags flags); +int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, BdrvRequestFlags flags); + +int generated_co_wrapper blk_pwrite(BlockBackend *blk, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags); +int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes, + const void *buf, BdrvRequestFlags flags); + +int generated_co_wrapper blk_pwritev(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); + +int generated_co_wrapper blk_pwritev_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, + BdrvRequestFlags flags); +int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, + int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); + +int generated_co_wrapper blk_pwrite_compressed(BlockBackend *blk, + int64_t offset, int64_t bytes, + const void *buf); +int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset, + int64_t bytes, const void *buf); + +int generated_co_wrapper blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, + int64_t bytes, + BdrvRequestFlags flags); +int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, + int64_t bytes, BdrvRequestFlags flags); + +int generated_co_wrapper blk_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes); +int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes); + +int generated_co_wrapper blk_flush(BlockBackend *blk); +int coroutine_fn blk_co_flush(BlockBackend *blk); + +int generated_co_wrapper blk_ioctl(BlockBackend *blk, unsigned long int req, + void *buf); +int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, + void *buf); + +int generated_co_wrapper blk_truncate(BlockBackend *blk, int64_t offset, + bool exact, PreallocMode prealloc, + BdrvRequestFlags flags, Error **errp); +int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact, + PreallocMode prealloc, BdrvRequestFlags flags, + Error **errp); + +#endif /* BLOCK_BACKEND_IO_H */ diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index 9ac5f7bbd3..038be9fc40 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -13,265 +13,9 @@ #ifndef BLOCK_BACKEND_H #define BLOCK_BACKEND_H -#include "qemu/iov.h" -#include "block/throttle-groups.h" +#include "block-backend-global-state.h" +#include "block-backend-io.h" -/* - * TODO Have to include block/block.h for a bunch of block layer - * types. Unfortunately, this pulls in the whole BlockDriverState - * API, which we don't want used by many BlockBackend users. Some of - * the types belong here, and the rest should be split into a common - * header and one for the BlockDriverState API. - */ -#include "block/block.h" - -/* Callbacks for block device models */ -typedef struct BlockDevOps { - /* - * Runs when virtual media changed (monitor commands eject, change) - * Argument load is true on load and false on eject. - * Beware: doesn't run when a host device's physical media - * changes. Sure would be useful if it did. - * Device models with removable media must implement this callback. - */ - void (*change_media_cb)(void *opaque, bool load, Error **errp); - /* - * Runs when an eject request is issued from the monitor, the tray - * is closed, and the medium is locked. - * Device models that do not implement is_medium_locked will not need - * this callback. Device models that can lock the medium or tray might - * want to implement the callback and unlock the tray when "force" is - * true, even if they do not support eject requests. - */ - void (*eject_request_cb)(void *opaque, bool force); - /* - * Is the virtual tray open? - * Device models implement this only when the device has a tray. - */ - bool (*is_tray_open)(void *opaque); - /* - * Is the virtual medium locked into the device? - * Device models implement this only when device has such a lock. - */ - bool (*is_medium_locked)(void *opaque); - /* - * Runs when the size changed (e.g. monitor command block_resize) - */ - void (*resize_cb)(void *opaque); - /* - * Runs when the backend receives a drain request. - */ - void (*drained_begin)(void *opaque); - /* - * Runs when the backend's last drain request ends. - */ - void (*drained_end)(void *opaque); - /* - * Is the device still busy? - */ - bool (*drained_poll)(void *opaque); -} BlockDevOps; - -/* This struct is embedded in (the private) BlockBackend struct and contains - * fields that must be public. This is in particular for QLIST_ENTRY() and - * friends so that BlockBackends can be kept in lists outside block-backend.c - * */ -typedef struct BlockBackendPublic { - ThrottleGroupMember throttle_group_member; -} BlockBackendPublic; - -BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm); -BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, - uint64_t shared_perm, Error **errp); -BlockBackend *blk_new_open(const char *filename, const char *reference, - QDict *options, int flags, Error **errp); -int blk_get_refcnt(BlockBackend *blk); -void blk_ref(BlockBackend *blk); -void blk_unref(BlockBackend *blk); -void blk_remove_all_bs(void); -const char *blk_name(const BlockBackend *blk); -BlockBackend *blk_by_name(const char *name); -BlockBackend *blk_next(BlockBackend *blk); -BlockBackend *blk_all_next(BlockBackend *blk); -bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp); -void monitor_remove_blk(BlockBackend *blk); - -BlockBackendPublic *blk_get_public(BlockBackend *blk); -BlockBackend *blk_by_public(BlockBackendPublic *public); - -BlockDriverState *blk_bs(BlockBackend *blk); -void blk_remove_bs(BlockBackend *blk); -int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp); -bool bdrv_has_blk(BlockDriverState *bs); -bool bdrv_is_root_node(BlockDriverState *bs); -int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, - Error **errp); -void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm); - -void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow); -void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow); -void blk_set_disable_request_queuing(BlockBackend *blk, bool disable); -void blk_iostatus_enable(BlockBackend *blk); -bool blk_iostatus_is_enabled(const BlockBackend *blk); -BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk); -void blk_iostatus_disable(BlockBackend *blk); -void blk_iostatus_reset(BlockBackend *blk); -void blk_iostatus_set_err(BlockBackend *blk, int error); -int blk_attach_dev(BlockBackend *blk, DeviceState *dev); -void blk_detach_dev(BlockBackend *blk, DeviceState *dev); -DeviceState *blk_get_attached_dev(BlockBackend *blk); -char *blk_get_attached_dev_id(BlockBackend *blk); -BlockBackend *blk_by_dev(void *dev); -BlockBackend *blk_by_qdev_id(const char *id, Error **errp); -void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque); -int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); -int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, - unsigned int bytes, - QEMUIOVector *qiov, size_t qiov_offset, - BdrvRequestFlags flags); -int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); - -static inline int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, - unsigned int bytes, void *buf, - BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - return blk_co_preadv(blk, offset, bytes, &qiov, flags); -} - -static inline int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, - unsigned int bytes, void *buf, - BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - - return blk_co_pwritev(blk, offset, bytes, &qiov, flags); -} - -int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags); -BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags, - BlockCompletionFunc *cb, void *opaque); -int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags); -int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes); -int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes, - BdrvRequestFlags flags); -int64_t blk_getlength(BlockBackend *blk); -void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr); -int64_t blk_nb_sectors(BlockBackend *blk); -BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, - QEMUIOVector *qiov, BdrvRequestFlags flags, - BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, - QEMUIOVector *qiov, BdrvRequestFlags flags, - BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *blk_aio_flush(BlockBackend *blk, - BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes, - BlockCompletionFunc *cb, void *opaque); -void blk_aio_cancel(BlockAIOCB *acb); -void blk_aio_cancel_async(BlockAIOCB *acb); -int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf); -BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, - BlockCompletionFunc *cb, void *opaque); -int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes); -int blk_co_flush(BlockBackend *blk); -int blk_flush(BlockBackend *blk); -int blk_commit_all(void); -void blk_inc_in_flight(BlockBackend *blk); -void blk_dec_in_flight(BlockBackend *blk); -void blk_drain(BlockBackend *blk); -void blk_drain_all(void); -void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, - BlockdevOnError on_write_error); -BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read); -BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, - int error); -void blk_error_action(BlockBackend *blk, BlockErrorAction action, - bool is_read, int error); -bool blk_supports_write_perm(BlockBackend *blk); -bool blk_is_writable(BlockBackend *blk); -bool blk_is_sg(BlockBackend *blk); -bool blk_enable_write_cache(BlockBackend *blk); -void blk_set_enable_write_cache(BlockBackend *blk, bool wce); -void blk_invalidate_cache(BlockBackend *blk, Error **errp); -bool blk_is_inserted(BlockBackend *blk); -bool blk_is_available(BlockBackend *blk); -void blk_lock_medium(BlockBackend *blk, bool locked); -void blk_eject(BlockBackend *blk, bool eject_flag); -int blk_get_flags(BlockBackend *blk); -uint32_t blk_get_request_alignment(BlockBackend *blk); -uint32_t blk_get_max_transfer(BlockBackend *blk); -uint64_t blk_get_max_hw_transfer(BlockBackend *blk); -int blk_get_max_iov(BlockBackend *blk); -void blk_set_guest_block_size(BlockBackend *blk, int align); -void *blk_try_blockalign(BlockBackend *blk, size_t size); -void *blk_blockalign(BlockBackend *blk, size_t size); -bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp); -void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason); -void blk_op_block_all(BlockBackend *blk, Error *reason); -void blk_op_unblock_all(BlockBackend *blk, Error *reason); -AioContext *blk_get_aio_context(BlockBackend *blk); -int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, - Error **errp); -void blk_add_aio_context_notifier(BlockBackend *blk, - void (*attached_aio_context)(AioContext *new_context, void *opaque), - void (*detach_aio_context)(void *opaque), void *opaque); -void blk_remove_aio_context_notifier(BlockBackend *blk, - void (*attached_aio_context)(AioContext *, - void *), - void (*detach_aio_context)(void *), - void *opaque); -void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify); -void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify); -void blk_io_plug(BlockBackend *blk); -void blk_io_unplug(BlockBackend *blk); -BlockAcctStats *blk_get_stats(BlockBackend *blk); -BlockBackendRootState *blk_get_root_state(BlockBackend *blk); -void blk_update_root_state(BlockBackend *blk); -bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk); -int blk_get_open_flags_from_root_state(BlockBackend *blk); - -void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, - BlockCompletionFunc *cb, void *opaque); -int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags); -int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, - int bytes); -int blk_truncate(BlockBackend *blk, int64_t offset, bool exact, - PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); -int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes); -int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, - int64_t pos, int size); -int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size); -int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz); -int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo); -BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, - BlockCompletionFunc *cb, - void *opaque, int ret); - -void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg); -void blk_io_limits_disable(BlockBackend *blk); -void blk_io_limits_enable(BlockBackend *blk, const char *group); -void blk_io_limits_update_group(BlockBackend *blk, const char *group); -void blk_set_force_allow_inactivate(BlockBackend *blk); - -void blk_register_buf(BlockBackend *blk, void *host, size_t size); -void blk_unregister_buf(BlockBackend *blk, void *host); - -int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, - BlockBackend *blk_out, int64_t off_out, - int bytes, BdrvRequestFlags read_flags, - BdrvRequestFlags write_flags); - -const BdrvChild *blk_root(BlockBackend *blk); - -int blk_make_empty(BlockBackend *blk, Error **errp); +/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */ #endif diff --git a/include/sysemu/block-ram-registrar.h b/include/sysemu/block-ram-registrar.h new file mode 100644 index 0000000000..d8b2f7942b --- /dev/null +++ b/include/sysemu/block-ram-registrar.h @@ -0,0 +1,37 @@ +/* + * BlockBackend RAM Registrar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef BLOCK_RAM_REGISTRAR_H +#define BLOCK_RAM_REGISTRAR_H + +#include "exec/ramlist.h" + +/** + * struct BlockRAMRegistrar: + * + * Keeps RAMBlock memory registered with a BlockBackend using + * blk_register_buf() including hotplugged memory. + * + * Emulated devices or other BlockBackend users initialize a BlockRAMRegistrar + * with blk_ram_registrar_init() before submitting I/O requests with the + * BDRV_REQ_REGISTERED_BUF flag set. + */ +typedef struct { + BlockBackend *blk; + RAMBlockNotifier notifier; + bool ok; +} BlockRAMRegistrar; + +void blk_ram_registrar_init(BlockRAMRegistrar *r, BlockBackend *blk); +void blk_ram_registrar_destroy(BlockRAMRegistrar *r); + +/* Have all RAMBlocks been registered successfully? */ +static inline bool blk_ram_registrar_ok(BlockRAMRegistrar *r) +{ + return r->ok; +} + +#endif /* BLOCK_RAM_REGISTRAR_H */ diff --git a/include/sysemu/blockdev.h b/include/sysemu/blockdev.h index 41d6302b4d..b3be0ae7e9 100644 --- a/include/sysemu/blockdev.h +++ b/include/sysemu/blockdev.h @@ -13,9 +13,6 @@ #include "block/block.h" #include "qemu/queue.h" -void blockdev_mark_auto_del(BlockBackend *blk); -void blockdev_auto_del(BlockBackend *blk); - typedef enum { IF_DEFAULT = -1, /* for use with drive_add() only */ /* @@ -39,6 +36,16 @@ struct DriveInfo { QTAILQ_ENTRY(DriveInfo) next; }; +/* + * Global state (GS) API. These functions run under the BQL. + * + * See include/block/block-global-state.h for more information about + * the GS API. + */ + +void blockdev_mark_auto_del(BlockBackend *blk); +void blockdev_auto_del(BlockBackend *blk); + DriveInfo *blk_legacy_dinfo(BlockBackend *blk); DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo); BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo); @@ -46,14 +53,10 @@ BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo); void override_max_devs(BlockInterfaceType type, int max_devs); DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit); -void drive_mark_claimed_by_board(void); void drive_check_orphaned(void); DriveInfo *drive_get_by_index(BlockInterfaceType type, int index); int drive_get_max_bus(BlockInterfaceType type); -int drive_get_max_devs(BlockInterfaceType type); -DriveInfo *drive_get_next(BlockInterfaceType type); -QemuOpts *drive_def(const char *optstr); QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, const char *optstr); DriveInfo *drive_new(QemuOpts *arg, BlockInterfaceType block_default_type, diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h index ed6ee5c46c..2e786fe7fb 100644 --- a/include/sysemu/cpu-timers.h +++ b/include/sysemu/cpu-timers.h @@ -59,6 +59,7 @@ int64_t icount_round(int64_t count); /* if the CPUs are idle, start accounting real time to virtual clock. */ void icount_start_warp_timer(void); void icount_account_warp_timer(void); +void icount_notify_exit(void); /* * CPU Ticks and Clock diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h index 868f1192de..1bace3379b 100644 --- a/include/sysemu/cpus.h +++ b/include/sysemu/cpus.h @@ -7,6 +7,9 @@ /* register accel-specific operations */ void cpus_register_accel(const AccelOpsClass *i); +/* return registers ops */ +const AccelOpsClass *cpus_get_accel(void); + /* accel/dummy-cpus.c */ /* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */ @@ -55,6 +58,4 @@ extern int smp_cores; extern int smp_threads; #endif -void list_cpus(const char *optarg); - #endif diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h index f4d4057d4d..cf9b3f07fe 100644 --- a/include/sysemu/cryptodev.h +++ b/include/sysemu/cryptodev.h @@ -50,13 +50,13 @@ typedef struct CryptoDevBackendClient enum CryptoDevBackendAlgType { CRYPTODEV_BACKEND_ALG_SYM, + CRYPTODEV_BACKEND_ALG_ASYM, CRYPTODEV_BACKEND_ALG__MAX, }; /** * CryptoDevBackendSymSessionInfo: * - * @op_code: operation code (refer to virtio_crypto.h) * @cipher_alg: algorithm type of CIPHER * @key_len: byte length of cipher key * @hash_alg: algorithm type of HASH/MAC @@ -74,7 +74,6 @@ enum CryptoDevBackendAlgType { */ typedef struct CryptoDevBackendSymSessionInfo { /* corresponding with virtio crypto spec */ - uint32_t op_code; uint32_t cipher_alg; uint32_t key_len; uint32_t hash_alg; @@ -89,11 +88,37 @@ typedef struct CryptoDevBackendSymSessionInfo { uint8_t *auth_key; } CryptoDevBackendSymSessionInfo; +/** + * CryptoDevBackendAsymSessionInfo: + */ +typedef struct CryptoDevBackendRsaPara { + uint32_t padding_algo; + uint32_t hash_algo; +} CryptoDevBackendRsaPara; + +typedef struct CryptoDevBackendAsymSessionInfo { + /* corresponding with virtio crypto spec */ + uint32_t algo; + uint32_t keytype; + uint32_t keylen; + uint8_t *key; + union { + CryptoDevBackendRsaPara rsa; + } u; +} CryptoDevBackendAsymSessionInfo; + +typedef struct CryptoDevBackendSessionInfo { + uint32_t op_code; + union { + CryptoDevBackendSymSessionInfo sym_sess_info; + CryptoDevBackendAsymSessionInfo asym_sess_info; + } u; + uint64_t session_id; +} CryptoDevBackendSessionInfo; + /** * CryptoDevBackendSymOpInfo: * - * @session_id: session index which was previously - * created by cryptodev_backend_sym_create_session() * @aad_len: byte length of additional authenticated data * @iv_len: byte length of initialization vector or counter * @src_len: byte length of source data @@ -119,7 +144,6 @@ typedef struct CryptoDevBackendSymSessionInfo { * */ typedef struct CryptoDevBackendSymOpInfo { - uint64_t session_id; uint32_t aad_len; uint32_t iv_len; uint32_t src_len; @@ -138,27 +162,64 @@ typedef struct CryptoDevBackendSymOpInfo { uint8_t data[]; } CryptoDevBackendSymOpInfo; + +/** + * CryptoDevBackendAsymOpInfo: + * + * @src_len: byte length of source data + * @dst_len: byte length of destination data + * @src: point to the source data + * @dst: point to the destination data + * + */ +typedef struct CryptoDevBackendAsymOpInfo { + uint32_t src_len; + uint32_t dst_len; + uint8_t *src; + uint8_t *dst; +} CryptoDevBackendAsymOpInfo; + +typedef struct CryptoDevBackendOpInfo { + enum CryptoDevBackendAlgType algtype; + uint32_t op_code; + uint64_t session_id; + union { + CryptoDevBackendSymOpInfo *sym_op_info; + CryptoDevBackendAsymOpInfo *asym_op_info; + } u; +} CryptoDevBackendOpInfo; + +typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret); struct CryptoDevBackendClass { ObjectClass parent_class; void (*init)(CryptoDevBackend *backend, Error **errp); void (*cleanup)(CryptoDevBackend *backend, Error **errp); - int64_t (*create_session)(CryptoDevBackend *backend, - CryptoDevBackendSymSessionInfo *sess_info, - uint32_t queue_index, Error **errp); + int (*create_session)(CryptoDevBackend *backend, + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); + int (*close_session)(CryptoDevBackend *backend, - uint64_t session_id, - uint32_t queue_index, Error **errp); - int (*do_sym_op)(CryptoDevBackend *backend, - CryptoDevBackendSymOpInfo *op_info, - uint32_t queue_index, Error **errp); + uint64_t session_id, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); + + int (*do_op)(CryptoDevBackend *backend, + CryptoDevBackendOpInfo *op_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); }; typedef enum CryptoDevBackendOptionsType { CRYPTODEV_BACKEND_TYPE_NONE = 0, CRYPTODEV_BACKEND_TYPE_BUILTIN = 1, CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2, + CRYPTODEV_BACKEND_TYPE_LKCF = 3, CRYPTODEV_BACKEND_TYPE__MAX, } CryptoDevBackendOptionsType; @@ -190,6 +251,7 @@ struct CryptoDevBackendConf { uint32_t mac_algo_l; uint32_t mac_algo_h; uint32_t aead_algo; + uint32_t akcipher_algo; /* Maximum length of cipher key */ uint32_t max_cipher_key_len; /* Maximum length of authenticated key */ @@ -247,55 +309,69 @@ void cryptodev_backend_cleanup( Error **errp); /** - * cryptodev_backend_sym_create_session: + * cryptodev_backend_create_session: * @backend: the cryptodev backend object * @sess_info: parameters needed by session creating * @queue_index: queue index of cryptodev backend client * @errp: pointer to a NULL-initialized error object + * @cb: callback when session create is compeleted + * @opaque: parameter passed to callback * - * Create a session for symmetric algorithms + * Create a session for symmetric/asymmetric algorithms * - * Returns: session id on success, or -1 on error + * Returns: 0 for success and cb will be called when creation is completed, + * negative value for error, and cb will not be called. */ -int64_t cryptodev_backend_sym_create_session( +int cryptodev_backend_create_session( CryptoDevBackend *backend, - CryptoDevBackendSymSessionInfo *sess_info, - uint32_t queue_index, Error **errp); + CryptoDevBackendSessionInfo *sess_info, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); /** - * cryptodev_backend_sym_close_session: + * cryptodev_backend_close_session: * @backend: the cryptodev backend object * @session_id: the session id * @queue_index: queue index of cryptodev backend client * @errp: pointer to a NULL-initialized error object + * @cb: callback when session create is compeleted + * @opaque: parameter passed to callback * - * Close a session for symmetric algorithms which was previously - * created by cryptodev_backend_sym_create_session() + * Close a session for which was previously + * created by cryptodev_backend_create_session() * - * Returns: 0 on success, or Negative on error + * Returns: 0 for success and cb will be called when creation is completed, + * negative value for error, and cb will not be called. */ -int cryptodev_backend_sym_close_session( +int cryptodev_backend_close_session( CryptoDevBackend *backend, uint64_t session_id, - uint32_t queue_index, Error **errp); + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque); /** * cryptodev_backend_crypto_operation: * @backend: the cryptodev backend object - * @opaque: pointer to a VirtIOCryptoReq object + * @opaque1: pointer to a VirtIOCryptoReq object * @queue_index: queue index of cryptodev backend client * @errp: pointer to a NULL-initialized error object + * @cb: callbacks when operation is completed + * @opaque2: parameter passed to cb * * Do crypto operation, such as encryption and * decryption * - * Returns: VIRTIO_CRYPTO_OK on success, - * or -VIRTIO_CRYPTO_* on error + * Returns: 0 for success and cb will be called when creation is completed, + * negative value for error, and cb will not be called. */ int cryptodev_backend_crypto_operation( CryptoDevBackend *backend, - void *opaque, - uint32_t queue_index, Error **errp); + void *opaque1, + uint32_t queue_index, + CryptoDevCompletionFunc cb, + void *opaque2); /** * cryptodev_backend_set_used: diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h index 8a2fe55622..ca5339beae 100644 --- a/include/sysemu/device_tree.h +++ b/include/sysemu/device_tree.h @@ -121,6 +121,7 @@ uint32_t qemu_fdt_get_phandle(void *fdt, const char *path); uint32_t qemu_fdt_alloc_phandle(void *fdt); int qemu_fdt_nop_node(void *fdt, const char *node_path); int qemu_fdt_add_subnode(void *fdt, const char *name); +int qemu_fdt_add_path(void *fdt, const char *path); #define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \ do { \ @@ -135,6 +136,7 @@ int qemu_fdt_add_subnode(void *fdt, const char *name); } while (0) void qemu_fdt_dumpdtb(void *fdt, int size); +void hmp_dumpdtb(Monitor *mon, const QDict *qdict); /** * qemu_fdt_setprop_sized_cells_from_array: @@ -195,6 +197,15 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt, qdt_tmp); \ }) + +/** + * qemu_fdt_randomize_seeds: + * @fdt: device tree blob + * + * Re-randomize all "rng-seed" properties with new seeds. + */ +void qemu_fdt_randomize_seeds(void *fdt); + #define FDT_PCI_RANGE_RELOCATABLE 0x80000000 #define FDT_PCI_RANGE_PREFETCHABLE 0x40000000 #define FDT_PCI_RANGE_ALIASED 0x20000000 diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h new file mode 100644 index 0000000000..8d2c1f3a6b --- /dev/null +++ b/include/sysemu/dirtylimit.h @@ -0,0 +1,37 @@ +/* + * Dirty page rate limit common functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_DIRTYRLIMIT_H +#define QEMU_DIRTYRLIMIT_H + +#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */ + +int64_t vcpu_dirty_rate_get(int cpu_index); +void vcpu_dirty_rate_stat_start(void); +void vcpu_dirty_rate_stat_stop(void); +void vcpu_dirty_rate_stat_initialize(void); +void vcpu_dirty_rate_stat_finalize(void); + +void dirtylimit_state_lock(void); +void dirtylimit_state_unlock(void); +void dirtylimit_state_initialize(void); +void dirtylimit_state_finalize(void); +bool dirtylimit_in_service(void); +bool dirtylimit_vcpu_index_valid(int cpu_index); +void dirtylimit_process(void); +void dirtylimit_change(bool start); +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable); +void dirtylimit_set_all(uint64_t quota, + bool enable); +void dirtylimit_vcpu_execute(CPUState *cpu); +#endif diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h new file mode 100644 index 0000000000..4d3b9a4902 --- /dev/null +++ b/include/sysemu/dirtyrate.h @@ -0,0 +1,28 @@ +/* + * dirty page rate helper functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_DIRTYRATE_H +#define QEMU_DIRTYRATE_H + +typedef struct VcpuStat { + int nvcpu; /* number of vcpu */ + DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ +} VcpuStat; + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot); + +void global_dirty_log_change(unsigned int flag, + bool start); +#endif diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h index 3201e7901d..a1ac5bc1b5 100644 --- a/include/sysemu/dma.h +++ b/include/sysemu/dma.h @@ -15,24 +15,11 @@ #include "block/block.h" #include "block/accounting.h" -typedef struct ScatterGatherEntry ScatterGatherEntry; - typedef enum { DMA_DIRECTION_TO_DEVICE = 0, DMA_DIRECTION_FROM_DEVICE = 1, } DMADirection; -struct QEMUSGList { - ScatterGatherEntry *sg; - int nsg; - int nalloc; - size_t size; - DeviceState *dev; - AddressSpace *as; -}; - -#ifndef CONFIG_USER_ONLY - /* * When an IOMMU is present, bus addresses become distinct from * CPU/memory physical addresses and may be a different size. Because @@ -45,6 +32,17 @@ typedef uint64_t dma_addr_t; #define DMA_ADDR_BITS 64 #define DMA_ADDR_FMT "%" PRIx64 +typedef struct ScatterGatherEntry ScatterGatherEntry; + +struct QEMUSGList { + ScatterGatherEntry *sg; + int nsg; + int nalloc; + dma_addr_t size; + DeviceState *dev; + AddressSpace *as; +}; + static inline void dma_barrier(AddressSpace *as, DMADirection dir) { /* @@ -73,19 +71,20 @@ static inline void dma_barrier(AddressSpace *as, DMADirection dir) * dma_memory_{read,write}() and check for errors */ static inline bool dma_memory_valid(AddressSpace *as, dma_addr_t addr, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { return address_space_access_valid(as, addr, len, dir == DMA_DIRECTION_FROM_DEVICE, - MEMTXATTRS_UNSPECIFIED); + attrs); } static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, + MemTxAttrs attrs) { - return address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, + return address_space_rw(as, addr, attrs, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); } @@ -93,7 +92,9 @@ static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len) { - return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return dma_memory_rw_relaxed(as, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); } static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, @@ -102,7 +103,8 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, dma_addr_t len) { return dma_memory_rw_relaxed(as, addr, (void *)buf, len, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, + MEMTXATTRS_UNSPECIFIED); } /** @@ -117,14 +119,15 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, * @buf: buffer with the data transferred * @len: the number of bytes to read or write * @dir: indicates the transfer direction + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { dma_barrier(as, dir); - return dma_memory_rw_relaxed(as, addr, buf, len, dir); + return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs); } /** @@ -138,11 +141,14 @@ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @buf: buffer with the data transferred * @len: length of the data transferred + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, - void *buf, dma_addr_t len) + void *buf, dma_addr_t len, + MemTxAttrs attrs) { - return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return dma_memory_rw(as, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, attrs); } /** @@ -156,12 +162,14 @@ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @buf: buffer with the data transferred * @len: the number of bytes to write + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr, - const void *buf, dma_addr_t len) + const void *buf, dma_addr_t len, + MemTxAttrs attrs) { return dma_memory_rw(as, addr, (void *)buf, len, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, attrs); } /** @@ -175,9 +183,10 @@ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @c: constant byte to fill the memory * @len: the number of bytes to fill with the constant byte + * @attrs: memory transaction attributes */ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, - uint8_t c, dma_addr_t len); + uint8_t c, dma_addr_t len, MemTxAttrs attrs); /** * address_space_map: Map a physical memory region into a host virtual address. @@ -191,16 +200,17 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @len: pointer to length of buffer; updated on return * @dir: indicates the transfer direction + * @attrs: memory attributes */ static inline void *dma_memory_map(AddressSpace *as, dma_addr_t addr, dma_addr_t *len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { hwaddr xlen = *len; void *p; p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE, - MEMTXATTRS_UNSPECIFIED); + attrs); *len = xlen; return p; } @@ -228,32 +238,34 @@ static inline void dma_memory_unmap(AddressSpace *as, } #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ - static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \ - dma_addr_t addr) \ - { \ - uint##_bits##_t val; \ - dma_memory_read(as, addr, &val, (_bits) / 8); \ - return _end##_bits##_to_cpu(val); \ - } \ - static inline void st##_sname##_##_end##_dma(AddressSpace *as, \ - dma_addr_t addr, \ - uint##_bits##_t val) \ - { \ - val = cpu_to_##_end##_bits(val); \ - dma_memory_write(as, addr, &val, (_bits) / 8); \ + static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \ + dma_addr_t addr, \ + uint##_bits##_t *pval, \ + MemTxAttrs attrs) \ + { \ + MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \ + _end##_bits##_to_cpus(pval); \ + return res; \ + } \ + static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \ + dma_addr_t addr, \ + uint##_bits##_t val, \ + MemTxAttrs attrs) \ + { \ + val = cpu_to_##_end##_bits(val); \ + return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \ } -static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr) +static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr, + uint8_t *val, MemTxAttrs attrs) { - uint8_t val; - - dma_memory_read(as, addr, &val, 1); - return val; + return dma_memory_read(as, addr, val, 1, attrs); } -static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val) +static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr, + uint8_t val, MemTxAttrs attrs) { - dma_memory_write(as, addr, &val, 1); + return dma_memory_write(as, addr, &val, 1, attrs); } DEFINE_LDST_DMA(uw, w, 16, le); @@ -274,7 +286,6 @@ void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, AddressSpace *as); void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); void qemu_sglist_destroy(QEMUSGList *qsg); -#endif typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov, BlockCompletionFunc *cb, void *cb_opaque, @@ -290,8 +301,10 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk, BlockAIOCB *dma_blk_write(BlockBackend *blk, QEMUSGList *sg, uint64_t offset, uint32_t align, BlockCompletionFunc *cb, void *opaque); -uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); -uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); +MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual, + QEMUSGList *sg, MemTxAttrs attrs); +MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual, + QEMUSGList *sg, MemTxAttrs attrs); void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, QEMUSGList *sg, enum BlockAcctType type); diff --git a/include/sysemu/dump-arch.h b/include/sysemu/dump-arch.h index e25b02e990..59bbc9be38 100644 --- a/include/sysemu/dump-arch.h +++ b/include/sysemu/dump-arch.h @@ -21,6 +21,9 @@ typedef struct ArchDumpInfo { uint32_t page_size; /* The target's page size. If it's variable and * unknown, then this should be the maximum. */ uint64_t phys_base; /* The target's physmem base. */ + void (*arch_sections_add_fn)(DumpState *s); + uint64_t (*arch_sections_write_hdr_fn)(DumpState *s, uint8_t *buff); + int (*arch_sections_write_fn)(DumpState *s, uint8_t *buff); } ArchDumpInfo; struct GuestPhysBlockList; /* memory_mapping.h */ diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h index 250143cb5a..4ffed0b659 100644 --- a/include/sysemu/dump.h +++ b/include/sysemu/dump.h @@ -154,20 +154,35 @@ typedef struct DumpState { GuestPhysBlockList guest_phys_blocks; ArchDumpInfo dump_info; MemoryMappingList list; - uint16_t phdr_num; - uint32_t sh_info; - bool have_section; bool resume; bool detached; - ssize_t note_size; hwaddr memory_offset; int fd; - GuestPhysBlock *next_block; - ram_addr_t start; - bool has_filter; - int64_t begin; - int64_t length; + /* + * Dump filter area variables + * + * A filtered dump only contains the guest memory designated by + * the start address and length variables defined below. + * + * If length is 0, no filtering is applied. + */ + int64_t filter_area_begin; /* Start address of partial guest memory area */ + int64_t filter_area_length; /* Length of partial guest memory area */ + + /* Elf dump related data */ + uint32_t phdr_num; + uint32_t shdr_num; + ssize_t note_size; + hwaddr shdr_offset; + hwaddr phdr_offset; + hwaddr section_offset; + hwaddr note_offset; + + void *elf_section_hdrs; /* Pointer to section header buffer */ + void *elf_section_data; /* Pointer to section data buffer */ + uint64_t elf_section_data_size; /* Size of section data */ + GArray *string_table_buf; /* String table data buffer */ uint8_t *note_buf; /* buffer for notes */ size_t note_buf_offset; /* the writing place in note_buf */ @@ -200,4 +215,9 @@ typedef struct DumpState { uint16_t cpu_to_dump16(DumpState *s, uint16_t val); uint32_t cpu_to_dump32(DumpState *s, uint32_t val); uint64_t cpu_to_dump64(DumpState *s, uint64_t val); + +int64_t dump_filtered_memblock_size(GuestPhysBlock *block, int64_t filter_area_start, + int64_t filter_area_length); +int64_t dump_filtered_memblock_start(GuestPhysBlock *block, int64_t filter_area_start, + int64_t filter_area_length); #endif diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h new file mode 100644 index 0000000000..2748bf6ae1 --- /dev/null +++ b/include/sysemu/event-loop-base.h @@ -0,0 +1,41 @@ +/* + * QEMU event-loop backend + * + * Copyright (C) 2022 Red Hat Inc + * + * Authors: + * Nicolas Saenz Julienne + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_EVENT_LOOP_BASE_H +#define QEMU_EVENT_LOOP_BASE_H + +#include "qom/object.h" +#include "block/aio.h" +#include "qemu/typedefs.h" + +#define TYPE_EVENT_LOOP_BASE "event-loop-base" +OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass, + EVENT_LOOP_BASE) + +struct EventLoopBaseClass { + ObjectClass parent_class; + + void (*init)(EventLoopBase *base, Error **errp); + void (*update_params)(EventLoopBase *base, Error **errp); + bool (*can_be_deleted)(EventLoopBase *base); +}; + +struct EventLoopBase { + Object parent; + + /* AioContext AIO engine parameters */ + int64_t aio_max_batch; + + /* AioContext thread pool parameters */ + int64_t thread_pool_min; + int64_t thread_pool_max; +}; +#endif diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h index 247f0661d1..bf8f99a824 100644 --- a/include/sysemu/hax.h +++ b/include/sysemu/hax.h @@ -25,17 +25,23 @@ int hax_sync_vcpus(void); #ifdef NEED_CPU_H +# ifdef CONFIG_HAX +# define CONFIG_HAX_IS_POSSIBLE +# endif +#else /* !NEED_CPU_H */ +# define CONFIG_HAX_IS_POSSIBLE +#endif -#ifdef CONFIG_HAX +#ifdef CONFIG_HAX_IS_POSSIBLE -int hax_enabled(void); +extern bool hax_allowed; -#else /* CONFIG_HAX */ +#define hax_enabled() (hax_allowed) -#define hax_enabled() (0) +#else /* !CONFIG_HAX_IS_POSSIBLE */ -#endif /* CONFIG_HAX */ +#define hax_enabled() (0) -#endif /* NEED_CPU_H */ +#endif /* CONFIG_HAX_IS_POSSIBLE */ #endif /* QEMU_HAX_H */ diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 9ff5c16963..39326f1d4f 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -18,6 +18,7 @@ #include "qom/object.h" #include "exec/memory.h" #include "qemu/bitmap.h" +#include "qemu/thread-context.h" #define TYPE_MEMORY_BACKEND "memory-backend" OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass, @@ -66,6 +67,7 @@ struct HostMemoryBackend { bool merge, dump, use_canonical_path; bool prealloc, is_mapped, share, reserve; uint32_t prealloc_threads; + ThreadContext *prealloc_context; DECLARE_BITMAP(host_nodes, MAX_NODES + 1); HostMemPolicy policy; diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h index 8b66a4e7d0..6545f7cd61 100644 --- a/include/sysemu/hvf_int.h +++ b/include/sysemu/hvf_int.h @@ -11,7 +11,11 @@ #ifndef HVF_INT_H #define HVF_INT_H +#ifdef __aarch64__ +#include +#else #include +#endif /* hvf_slot flags */ #define HVF_SLOT_LOG (1 << 0) @@ -40,19 +44,25 @@ struct HVFState { int num_slots; hvf_vcpu_caps *hvf_caps; + uint64_t vtimer_offset; }; extern HVFState *hvf_state; struct hvf_vcpu_state { - int fd; + uint64_t fd; + void *exit; + bool vtimer_masked; + sigset_t unblock_ipi_mask; }; void assert_hvf_ok(hv_return_t ret); +int hvf_arch_init(void); int hvf_arch_init_vcpu(CPUState *cpu); void hvf_arch_vcpu_destroy(CPUState *cpu); int hvf_vcpu_exec(CPUState *); hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t); int hvf_put_registers(CPUState *); int hvf_get_registers(CPUState *); +void hvf_kick_vcpu_thread(CPUState *cpu); #endif diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h index 01b5ebf442..22903a55f7 100644 --- a/include/sysemu/hw_accel.h +++ b/include/sysemu/hw_accel.h @@ -23,9 +23,4 @@ void cpu_synchronize_post_reset(CPUState *cpu); void cpu_synchronize_post_init(CPUState *cpu); void cpu_synchronize_pre_loadvm(CPUState *cpu); -static inline bool cpu_check_are_resettable(void) -{ - return kvm_enabled() ? kvm_cpu_check_are_resettable() : true; -} - #endif /* QEMU_HW_ACCEL_H */ diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h index 7f714bd136..8f8601d6ab 100644 --- a/include/sysemu/iothread.h +++ b/include/sysemu/iothread.h @@ -17,11 +17,12 @@ #include "block/aio.h" #include "qemu/thread.h" #include "qom/object.h" +#include "sysemu/event-loop-base.h" #define TYPE_IOTHREAD "iothread" struct IOThread { - Object parent_obj; + EventLoopBase parent_obj; QemuThread thread; AioContext *ctx; @@ -37,9 +38,6 @@ struct IOThread { int64_t poll_max_ns; int64_t poll_grow; int64_t poll_shrink; - - /* AioContext AIO engine parameters */ - int64_t aio_max_batch; }; typedef struct IOThread IOThread; diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index a1ab1ee12d..e9a97eda8c 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -209,6 +209,11 @@ DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE, extern KVMState *kvm_state; typedef struct Notifier Notifier; +typedef struct KVMRouteChange { + KVMState *s; + int changes; +} KVMRouteChange; + /* external API */ bool kvm_has_free_slot(MachineState *ms); @@ -234,20 +239,31 @@ int kvm_has_intx_set_mask(void); bool kvm_arm_supports_user_irq(void); +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); +int kvm_on_sigbus(int code, void *addr); + #ifdef NEED_CPU_H #include "cpu.h" void kvm_flush_coalesced_mmio_buffer(void); -int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type); -int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, - target_ulong len, int type); -void kvm_remove_all_breakpoints(CPUState *cpu); +/** + * kvm_update_guest_debug(): ensure KVM debug structures updated + * @cs: the CPUState for this cpu + * @reinject_trap: KVM trap injection control + * + * There are usually per-arch specifics which will be handled by + * calling down to kvm_arch_update_guest_debug after the generic + * fields have been set. + */ +#ifdef KVM_CAP_SET_GUEST_DEBUG int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); - -int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); -int kvm_on_sigbus(int code, void *addr); +#else +static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -EINVAL; +} +#endif /* internal API */ @@ -333,6 +349,8 @@ bool kvm_device_supported(int vmfd, uint64_t type); extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; +void kvm_arch_accel_class_init(ObjectClass *oc); + void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); @@ -466,7 +484,7 @@ void kvm_init_cpu_signals(CPUState *cpu); /** * kvm_irqchip_add_msi_route - Add MSI route for specific vector - * @s: KVM state + * @c: KVMRouteChange instance. * @vector: which vector to add. This can be either MSI/MSIX * vector. The function will automatically detect whether * MSI/MSIX is enabled, and fetch corresponding MSI @@ -475,10 +493,24 @@ void kvm_init_cpu_signals(CPUState *cpu); * as @NULL, an empty MSI message will be inited. * @return: virq (>=0) when success, errno (<0) when failed. */ -int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev); +int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, PCIDevice *dev); void kvm_irqchip_commit_routes(KVMState *s); + +static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) +{ + return (KVMRouteChange) { .s = s, .changes = 0 }; +} + +static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c) +{ + if (c->changes) { + kvm_irqchip_commit_routes(c->s); + c->changes = 0; + } +} + void kvm_irqchip_release_virq(KVMState *s, int virq); int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter); @@ -547,4 +579,7 @@ bool kvm_cpu_check_are_resettable(void); bool kvm_arch_cpu_check_are_resettable(void); +bool kvm_dirty_ring_enabled(void); + +uint32_t kvm_dirty_ring_size(void); #endif diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index c788452cd9..3b4adcdc10 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -10,6 +10,7 @@ #define QEMU_KVM_INT_H #include "exec/memory.h" +#include "qapi/qapi-types-common.h" #include "qemu/accel.h" #include "sysemu/kvm.h" @@ -36,8 +37,83 @@ typedef struct KVMMemoryListener { int as_id; } KVMMemoryListener; +#define KVM_MSI_HASHTAB_SIZE 256 + +enum KVMDirtyRingReaperState { + KVM_DIRTY_RING_REAPER_NONE = 0, + /* The reaper is sleeping */ + KVM_DIRTY_RING_REAPER_WAIT, + /* The reaper is reaping for dirty pages */ + KVM_DIRTY_RING_REAPER_REAPING, +}; + +/* + * KVM reaper instance, responsible for collecting the KVM dirty bits + * via the dirty ring. + */ +struct KVMDirtyRingReaper { + /* The reaper thread */ + QemuThread reaper_thr; + volatile uint64_t reaper_iteration; /* iteration number of reaper thr */ + volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */ +}; +struct KVMState +{ + AccelState parent_obj; + + int nr_slots; + int fd; + int vmfd; + int coalesced_mmio; + int coalesced_pio; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + bool coalesced_flush_in_progress; + int vcpu_events; + int robust_singlestep; + int debugregs; +#ifdef KVM_CAP_SET_GUEST_DEBUG + QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; +#endif + int max_nested_state_len; + int many_ioeventfds; + int intx_set_mask; + int kvm_shadow_mem; + bool kernel_irqchip_allowed; + bool kernel_irqchip_required; + OnOffAuto kernel_irqchip_split; + bool sync_mmu; + uint64_t manual_dirty_log_protect; + /* The man page (and posix) say ioctl numbers are signed int, but + * they're not. Linux, glibc and *BSD all treat ioctl numbers as + * unsigned, and treating them as signed here can break things */ + unsigned irq_set_ioctl; + unsigned int sigmask_len; + GHashTable *gsimap; +#ifdef KVM_CAP_IRQ_ROUTING + struct kvm_irq_routing *irq_routes; + int nr_allocated_irq_routes; + unsigned long *used_gsi_bitmap; + unsigned int gsi_count; + QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; +#endif + KVMMemoryListener memory_listener; + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; + + /* For "info mtree -f" to tell if an MR is registered in KVM */ + int nr_as; + struct KVMAs { + KVMMemoryListener *ml; + AddressSpace *as; + } *as; + uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ + uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ + struct KVMDirtyRingReaper reaper; + NotifyVmexitOption notify_vmexit; + uint32_t notify_window; +}; + void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, - AddressSpace *as, int as_id); + AddressSpace *as, int as_id, const char *name); void kvm_set_max_memslot_size(hwaddr max_slot_size); diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h index 4b20f1a639..3bbeb1bcb4 100644 --- a/include/sysemu/memory_mapping.h +++ b/include/sysemu/memory_mapping.h @@ -15,8 +15,7 @@ #define MEMORY_MAPPING_H #include "qemu/queue.h" -#include "exec/cpu-defs.h" -#include "exec/memory.h" +#include "exec/cpu-common.h" typedef struct GuestPhysBlock { /* visible to guest, reflects PCI hole, etc */ @@ -43,7 +42,7 @@ typedef struct GuestPhysBlockList { /* The physical and virtual address in the memory mapping are contiguous. */ typedef struct MemoryMapping { hwaddr phys_addr; - target_ulong virt_addr; + vaddr virt_addr; ram_addr_t length; QTAILQ_ENTRY(MemoryMapping) next; } MemoryMapping; diff --git a/include/sysemu/nvmm.h b/include/sysemu/nvmm.h index 6d216599b0..833670fccb 100644 --- a/include/sysemu/nvmm.h +++ b/include/sysemu/nvmm.h @@ -10,8 +10,7 @@ #ifndef QEMU_NVMM_H #define QEMU_NVMM_H -#include "config-host.h" -#include "qemu-common.h" +#ifdef NEED_CPU_H #ifdef CONFIG_NVMM @@ -23,4 +22,6 @@ int nvmm_enabled(void); #endif /* CONFIG_NVMM */ -#endif /* CONFIG_NVMM */ +#endif /* NEED_CPU_H */ + +#endif /* QEMU_NVMM_H */ diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h index 2edf33658a..58de7c994d 100644 --- a/include/sysemu/os-posix.h +++ b/include/sysemu/os-posix.h @@ -42,7 +42,9 @@ extern "C" { #endif +int os_parse_cmd_args(int index, const char *optarg); void os_set_line_buffering(void); +void os_setup_early_signal_handling(void); void os_set_proc_name(const char *s); void os_setup_signal_handling(void); void os_daemonize(void); @@ -52,9 +54,7 @@ int os_mlock(void); #define closesocket(s) close(s) #define ioctlsocket(s, r, v) ioctl(s, r, v) -typedef struct timeval qemu_timeval; -#define qemu_gettimeofday(tp) gettimeofday(tp, NULL) - +int os_set_daemonize(bool d); bool is_daemonized(void); /** diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h index 43f569b5c2..5b38c7bd04 100644 --- a/include/sysemu/os-win32.h +++ b/include/sysemu/os-win32.h @@ -30,6 +30,23 @@ #include #include +#ifdef HAVE_AFUNIX_H +#include +#else +/* + * Fallback definitions of things we need in afunix.h, if not available from + * the used Windows SDK or MinGW headers. + */ +#define UNIX_PATH_MAX 108 + +typedef struct sockaddr_un { + ADDRESS_FAMILY sun_family; + char sun_path[UNIX_PATH_MAX]; +} SOCKADDR_UN, *PSOCKADDR_UN; + +#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256) +#endif + #ifdef __cplusplus extern "C" { #endif @@ -62,8 +79,10 @@ struct tm *localtime_r(const time_t *timep, struct tm *result); static inline void os_setup_signal_handling(void) {} static inline void os_daemonize(void) {} static inline void os_setup_post(void) {} -void os_set_line_buffering(void); static inline void os_set_proc_name(const char *dummy) {} +static inline int os_parse_cmd_args(int index, const char *optarg) { return -1; } +void os_set_line_buffering(void); +void os_setup_early_signal_handling(void); int getpagesize(void); @@ -71,11 +90,13 @@ int getpagesize(void); # define EPROTONOSUPPORT EINVAL #endif -typedef struct { - long tv_sec; - long tv_usec; -} qemu_timeval; -int qemu_gettimeofday(qemu_timeval *tp); +static inline int os_set_daemonize(bool d) +{ + if (d) { + return -ENOTSUP; + } + return 0; +} static inline bool is_daemonized(void) { @@ -105,20 +126,22 @@ static inline char *realpath(const char *path, char *resolved_path) return resolved_path; } -/* ??? Mingw appears to export _lock_file and _unlock_file as the functions - * with which to lock a stdio handle. But something is wrong in the markup, - * either in the header or the library, such that we get undefined references - * to "_imp___lock_file" etc when linking. Since we seem to have no other - * alternative, and the usage within the logging functions isn't critical, - * ignore FILE locking. +/* + * Older versions of MinGW do not import _lock_file and _unlock_file properly. + * This was fixed for v6.0.0 with commit b48e3ac8969d. */ - static inline void qemu_flockfile(FILE *f) { +#ifdef HAVE__LOCK_FILE + _lock_file(f); +#endif } static inline void qemu_funlockfile(FILE *f) { +#ifdef HAVE__LOCK_FILE + _unlock_file(f); +#endif } /* We wrap all the sockets functions so that we can diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 0f3b0f7eac..7ec0882b50 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -160,9 +160,14 @@ void replay_shutdown_request(ShutdownCause cause); Returns 0 in PLAY mode if checkpoint was not found. Returns 1 in all other cases. */ bool replay_checkpoint(ReplayCheckpoint checkpoint); -/*! Used to determine that checkpoint is pending. +/*! Used to determine that checkpoint or async event is pending. Does not proceed to the next event in the log. */ -bool replay_has_checkpoint(void); +bool replay_has_event(void); +/* + * Processes the async events added to the queue (while recording) + * or reads the events from the file (while replaying). + */ +void replay_async_events(void); /* Asynchronous events queue */ @@ -193,7 +198,7 @@ uint64_t blkreplay_next_id(void); /*! Registers char driver to save it's events */ void replay_register_char_driver(struct Chardev *chr); /*! Saves write to char device event to the log */ -void replay_chr_be_write(struct Chardev *s, uint8_t *buf, int len); +void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len); /*! Writes char write return value to the replay log. */ void replay_char_write_event_save(int res, int offset); /*! Reads char write return value from the replay log. */ diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h index 0b0d6d7598..609e4d50c2 100644 --- a/include/sysemu/reset.h +++ b/include/sysemu/reset.h @@ -1,10 +1,13 @@ #ifndef QEMU_SYSEMU_RESET_H #define QEMU_SYSEMU_RESET_H +#include "qapi/qapi-events-run-state.h" + typedef void QEMUResetHandler(void *opaque); void qemu_register_reset(QEMUResetHandler *func, void *opaque); +void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque); void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); -void qemu_devices_reset(void); +void qemu_devices_reset(ShutdownCause reason); #endif diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h new file mode 100644 index 0000000000..159702b45b --- /dev/null +++ b/include/sysemu/rtc.h @@ -0,0 +1,58 @@ +/* + * RTC configuration and clock read + * + * Copyright (c) 2003-2021 QEMU contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef SYSEMU_RTC_H +#define SYSEMU_RTC_H + +/** + * qemu_get_timedate: Get the current RTC time + * @tm: struct tm to fill in with RTC time + * @offset: offset in seconds to adjust the RTC time by before + * converting to struct tm format. + * + * This function fills in @tm with the current RTC time, as adjusted + * by @offset (for example, if @offset is 3600 then the returned time/date + * will be one hour further ahead than the current RTC time). + * + * The usual use is by RTC device models, which should call this function + * to find the time/date value that they should return to the guest + * when it reads the RTC registers. + * + * The behaviour of the clock whose value this function returns will + * depend on the -rtc command line option passed by the user. + */ +void qemu_get_timedate(struct tm *tm, int offset); + +/** + * qemu_timedate_diff: Return difference between a struct tm and the RTC + * @tm: struct tm containing the date/time to compare against + * + * Returns the difference in seconds between the RTC clock time + * and the date/time specified in @tm. For example, if @tm specifies + * a timestamp one hour further ahead than the current RTC time + * then this function will return 3600. + */ +int qemu_timedate_diff(struct tm *tm); + +#endif diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h index a535691573..f3ed52548e 100644 --- a/include/sysemu/runstate.h +++ b/include/sysemu/runstate.h @@ -34,7 +34,13 @@ static inline bool shutdown_caused_by_guest(ShutdownCause cause) } void vm_start(void); -int vm_prepare_start(void); + +/** + * vm_prepare_start: Prepare for starting/resuming the VM + * + * @step_pending: whether any of the CPUs is about to be single-stepped by gdb + */ +int vm_prepare_start(bool step_pending); int vm_stop(RunState state); int vm_stop_force_state(RunState state); int vm_shutdown(void); @@ -69,6 +75,7 @@ void qemu_system_killed(int signal, pid_t pid); void qemu_system_reset(ShutdownCause reason); void qemu_system_guest_panicked(GuestPanicInformation *info); void qemu_system_guest_crashloaded(GuestPanicInformation *info); +bool qemu_system_dump_in_progress(void); #endif diff --git a/include/sysemu/sev.h b/include/sysemu/sev.h deleted file mode 100644 index 94d821d737..0000000000 --- a/include/sysemu/sev.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * QEMU Secure Encrypted Virutualization (SEV) support - * - * Copyright: Advanced Micro Devices, 2016-2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_SEV_H -#define QEMU_SEV_H - -#include "sysemu/kvm.h" - -bool sev_enabled(void); -int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); -int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); -int sev_inject_launch_secret(const char *hdr, const char *secret, - uint64_t gpa, Error **errp); - -int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size); -void sev_es_set_reset_vector(CPUState *cpu); - -#endif diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 8fae667172..6a7a31e64d 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -13,10 +13,11 @@ extern const char *qemu_name; extern QemuUUID qemu_uuid; extern bool qemu_uuid_set; +const char *qemu_get_vm_name(void); + void qemu_add_exit_notifier(Notifier *notify); void qemu_remove_exit_notifier(Notifier *notify); -void qemu_run_machine_init_done_notifiers(void); void qemu_add_machine_init_done_notifier(Notifier *notify); void qemu_remove_machine_init_done_notifier(Notifier *notify); @@ -33,6 +34,7 @@ typedef enum { } VGAInterfaceType; extern int vga_interface_type; +extern bool vga_interface_created; extern int graphic_width; extern int graphic_height; @@ -40,12 +42,8 @@ extern int graphic_depth; extern int display_opengl; extern const char *keyboard_layout; extern int win2k_install_hack; -extern int alt_grab; -extern int ctrl_grab; extern int graphic_rotate; extern int old_param; -extern int boot_menu; -extern bool boot_strict; extern uint8_t *boot_splash_filedata; extern bool enable_mlock; extern bool enable_cpu_pm; @@ -104,8 +102,8 @@ void qemu_boot_set(const char *boot_order, Error **errp); bool defaults_enabled(void); -void qemu_init(int argc, char **argv, char **envp); -void qemu_main_loop(void); +void qemu_init(int argc, char **argv); +int qemu_main_loop(void); void qemu_cleanup(void); extern QemuOptsList qemu_legacy_drive_opts; diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h index 68b2206463..fb40e30ff6 100644 --- a/include/sysemu/tpm.h +++ b/include/sysemu/tpm.h @@ -80,6 +80,12 @@ static inline TPMVersion tpm_get_version(TPMIf *ti) #define tpm_init() (0) #define tpm_cleanup() +/* needed for an alignment check in non-tpm code */ +static inline Object *TPM_IS_CRB(Object *obj) +{ + return NULL; +} + #endif /* CONFIG_TPM */ #endif /* QEMU_TPM_H */ diff --git a/include/sysemu/watchdog.h b/include/sysemu/watchdog.h index a08d16380d..745c89b02b 100644 --- a/include/sysemu/watchdog.h +++ b/include/sysemu/watchdog.h @@ -25,21 +25,8 @@ #include "qemu/queue.h" #include "qapi/qapi-types-run-state.h" -struct WatchdogTimerModel { - QLIST_ENTRY(WatchdogTimerModel) entry; - - /* Short name of the device - used to select it on the command line. */ - const char *wdt_name; - /* Longer description (eg. manufacturer and full model number). */ - const char *wdt_description; -}; -typedef struct WatchdogTimerModel WatchdogTimerModel; - /* in hw/watchdog.c */ -int select_watchdog(const char *p); -int select_watchdog_action(const char *action); WatchdogAction get_watchdog_action(void); -void watchdog_add_model(WatchdogTimerModel *model); void watchdog_perform_action(void); #endif /* QEMU_WATCHDOG_H */ diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h new file mode 100644 index 0000000000..2ba22bd5fe --- /dev/null +++ b/include/tcg/tcg-ldst.h @@ -0,0 +1,79 @@ +/* + * Memory helpers that will be used by TCG generated code. + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_LDST_H +#define TCG_LDST_H + +#ifdef CONFIG_SOFTMMU + +/* Value zero-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +/* Value sign-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + +#else + +G_NORETURN void helper_unaligned_ld(CPUArchState *env, target_ulong addr); +G_NORETURN void helper_unaligned_st(CPUArchState *env, target_ulong addr); + +#endif /* CONFIG_SOFTMMU */ +#endif /* TCG_LDST_H */ diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h index da55fed870..28cafbcc5c 100644 --- a/include/tcg/tcg-op-gvec.h +++ b/include/tcg/tcg-op-gvec.h @@ -218,6 +218,25 @@ typedef struct { bool write_aofs; } GVecGen4; +typedef struct { + /* + * Expand inline as a 64-bit or 32-bit integer. Only one of these will be + * non-NULL. + */ + void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t); + void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t); + /* Expand inline with a host vector type. */ + void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t); + /* Expand out-of-line helper w/descriptor, data in descriptor. */ + gen_helper_gvec_4 *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; +} GVecGen4i; + void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, const GVecGen2 *); void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, @@ -231,6 +250,9 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, const GVecGen3i *); void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, const GVecGen4 *); +void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz, int64_t c, + const GVecGen4i *); /* Expand a specific vector operation. */ diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index 3cc670ae29..4c1d7beb35 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -332,6 +332,7 @@ void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags); void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); @@ -746,6 +747,8 @@ void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags); void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags); void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); @@ -1058,7 +1061,6 @@ static inline void tcg_gen_plugin_cb_end(void) #if TARGET_LONG_BITS == 32 #define tcg_temp_new() tcg_temp_new_i32() -#define tcg_global_reg_new tcg_global_reg_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 #define tcg_temp_local_new() tcg_temp_local_new_i32() #define tcg_temp_free tcg_temp_free_i32 @@ -1066,7 +1068,6 @@ static inline void tcg_gen_plugin_cb_end(void) #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 #else #define tcg_temp_new() tcg_temp_new_i64() -#define tcg_global_reg_new tcg_global_reg_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 #define tcg_temp_local_new() tcg_temp_local_new_i64() #define tcg_temp_free tcg_temp_free_i64 @@ -1111,7 +1112,7 @@ static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) { - tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ); + tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ); } static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) @@ -1131,7 +1132,7 @@ static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) { - tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ); + tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ); } void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, @@ -1294,6 +1295,8 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); #define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 #define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 #define tcg_gen_bswap_tl tcg_gen_bswap64_i64 +#define tcg_gen_hswap_tl tcg_gen_hswap_i64 +#define tcg_gen_wswap_tl tcg_gen_wswap_i64 #define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 #define tcg_gen_extr_i64_tl tcg_gen_extr32_i64 #define tcg_gen_andc_tl tcg_gen_andc_i64 @@ -1409,6 +1412,7 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); #define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 #define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S) #define tcg_gen_bswap_tl tcg_gen_bswap32_i32 +#define tcg_gen_hswap_tl tcg_gen_hswap_i32 #define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 #define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 #define tcg_gen_andc_tl tcg_gen_andc_i32 @@ -1499,6 +1503,11 @@ static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b) glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b); } +static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s) +{ + glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s); +} + static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a, intptr_t b, TCGLabel *label) { diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index ab773a6531..dad9f84e26 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -288,6 +288,9 @@ DEF(or_vec, 1, 2, 0, IMPLVEC) DEF(xor_vec, 1, 2, 0, IMPLVEC) DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec)) DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec)) +DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec)) +DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec)) +DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec)) DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec)) DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 595dfdbc55..79aa496059 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -27,12 +27,12 @@ #include "cpu.h" #include "exec/memop.h" +#include "exec/memopidx.h" #include "qemu/bitops.h" #include "qemu/plugin.h" #include "qemu/queue.h" #include "tcg/tcg-mo.h" #include "tcg-target.h" -#include "qemu/int128.h" #include "tcg/tcg-cond.h" /* XXX: make safe guess about sizes */ @@ -43,7 +43,7 @@ #else #define MAX_OPC_PARAM_PER_ARG 1 #endif -#define MAX_OPC_PARAM_IARGS 6 +#define MAX_OPC_PARAM_IARGS 7 #define MAX_OPC_PARAM_OARGS 1 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) @@ -183,6 +183,9 @@ typedef uint64_t TCGRegSet; #define TCG_TARGET_HAS_not_vec 0 #define TCG_TARGET_HAS_andc_vec 0 #define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 #define TCG_TARGET_HAS_roti_vec 0 #define TCG_TARGET_HAS_rots_vec 0 #define TCG_TARGET_HAS_rotv_vec 0 @@ -406,7 +409,7 @@ typedef TCGv_ptr TCGv_env; #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 /* Helper can be safely suppressed if the return value is not used. */ #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 -/* Helper is QEMU_NORETURN. */ +/* Helper is G_NORETURN. */ #define TCG_CALL_NO_RETURN 0x0008 /* convenience version of most used call flags */ @@ -441,6 +444,8 @@ typedef enum TCGTempVal { typedef enum TCGTempKind { /* Temp is dead at the end of all basic blocks. */ TEMP_NORMAL, + /* Temp is live across conditional branch, but dead otherwise. */ + TEMP_EBB, /* Temp is saved across basic blocks but dead at the end of TBs. */ TEMP_LOCAL, /* Temp is saved across both basic blocks and translation blocks. */ @@ -503,9 +508,6 @@ typedef struct TCGOp { /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; -#ifdef CONFIG_PLUGIN - QSIMPLEQ_ENTRY(TCGOp) plugin_link; -#endif /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; @@ -615,9 +617,6 @@ struct TCGContext { /* descriptor of the instruction being translated */ struct qemu_plugin_insn *plugin_insn; - - /* list to quickly access the injected ops */ - QSIMPLEQ_HEAD(, TCGOp) plugin_ops; #endif GHashTable *const_table[TCG_TYPE_COUNT]; @@ -888,7 +887,7 @@ void tcg_register_thread(void); void tcg_prologue_init(TCGContext *s); void tcg_func_start(TCGContext *s); -int tcg_gen_code(TCGContext *s, TranslationBlock *tb); +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start); void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); @@ -1038,8 +1037,8 @@ int tcg_check_temp_count(void); #endif int64_t tcg_cpu_exec_time(void); -void tcg_dump_info(void); -void tcg_dump_op_count(void); +void tcg_dump_info(GString *buf); +void tcg_dump_op_count(GString *buf); #define TCG_CT_CONST 1 /* any constant of register size */ @@ -1152,9 +1151,11 @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); #if UINTPTR_MAX == UINT32_MAX # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) +# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x))) #else # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x))) +# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x))) #endif TCGLabel *gen_new_label(void); @@ -1242,44 +1243,6 @@ static inline size_t tcg_current_code_size(TCGContext *s) return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } -/* Combine the MemOp and mmu_idx parameters into a single value. */ -typedef uint32_t TCGMemOpIdx; - -/** - * make_memop_idx - * @op: memory operation - * @idx: mmu index - * - * Encode these values into a single parameter. - */ -static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) -{ - tcg_debug_assert(idx <= 15); - return (op << 4) | idx; -} - -/** - * get_memop - * @oi: combined op/idx parameter - * - * Extract the memory operation from the combined value. - */ -static inline MemOp get_memop(TCGMemOpIdx oi) -{ - return oi >> 4; -} - -/** - * get_mmuidx - * @oi: combined op/idx parameter - * - * Extract the mmu index from the combined value. - */ -static inline unsigned get_mmuidx(TCGMemOpIdx oi) -{ - return oi & 15; -} - /** * tcg_qemu_tb_exec: * @env: pointer to CPUArchState for the CPU @@ -1367,162 +1330,17 @@ uint64_t dup_const(unsigned vece, uint64_t c); : (qemu_build_not_reached_always(), 0)) \ : dup_const(VECE, C)) -/* - * Memory helpers that will be used by TCG generated code. - */ -#ifdef CONFIG_SOFTMMU -/* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); - -/* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); - -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr); - -/* Temporary aliases until backends are converted. */ -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_ret_ldsw_mmu helper_be_ldsw_mmu -# define helper_ret_lduw_mmu helper_be_lduw_mmu -# define helper_ret_ldsl_mmu helper_be_ldsl_mmu -# define helper_ret_ldul_mmu helper_be_ldul_mmu -# define helper_ret_ldl_mmu helper_be_ldul_mmu -# define helper_ret_ldq_mmu helper_be_ldq_mmu -# define helper_ret_stw_mmu helper_be_stw_mmu -# define helper_ret_stl_mmu helper_be_stl_mmu -# define helper_ret_stq_mmu helper_be_stq_mmu +#if TARGET_LONG_BITS == 64 +# define dup_const_tl dup_const #else -# define helper_ret_ldsw_mmu helper_le_ldsw_mmu -# define helper_ret_lduw_mmu helper_le_lduw_mmu -# define helper_ret_ldsl_mmu helper_le_ldsl_mmu -# define helper_ret_ldul_mmu helper_le_ldul_mmu -# define helper_ret_ldl_mmu helper_le_ldul_mmu -# define helper_ret_ldq_mmu helper_le_ldq_mmu -# define helper_ret_stw_mmu helper_le_stw_mmu -# define helper_ret_stl_mmu helper_le_stl_mmu -# define helper_ret_stq_mmu helper_le_stq_mmu +# define dup_const_tl(VECE, C) \ + (__builtin_constant_p(VECE) \ + ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \ + : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \ + : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \ + : (qemu_build_not_reached_always(), 0)) \ + : (target_long)dup_const(VECE, C)) #endif -#endif /* CONFIG_SOFTMMU */ - -uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); - -#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ -TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ - (CPUArchState *env, target_ulong addr, TYPE val, \ - TCGMemOpIdx oi, uintptr_t retaddr); - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) -#else -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) -#endif - -GEN_ATOMIC_HELPER_ALL(fetch_add) -GEN_ATOMIC_HELPER_ALL(fetch_sub) -GEN_ATOMIC_HELPER_ALL(fetch_and) -GEN_ATOMIC_HELPER_ALL(fetch_or) -GEN_ATOMIC_HELPER_ALL(fetch_xor) -GEN_ATOMIC_HELPER_ALL(fetch_smin) -GEN_ATOMIC_HELPER_ALL(fetch_umin) -GEN_ATOMIC_HELPER_ALL(fetch_smax) -GEN_ATOMIC_HELPER_ALL(fetch_umax) - -GEN_ATOMIC_HELPER_ALL(add_fetch) -GEN_ATOMIC_HELPER_ALL(sub_fetch) -GEN_ATOMIC_HELPER_ALL(and_fetch) -GEN_ATOMIC_HELPER_ALL(or_fetch) -GEN_ATOMIC_HELPER_ALL(xor_fetch) -GEN_ATOMIC_HELPER_ALL(smin_fetch) -GEN_ATOMIC_HELPER_ALL(umin_fetch) -GEN_ATOMIC_HELPER_ALL(smax_fetch) -GEN_ATOMIC_HELPER_ALL(umax_fetch) - -GEN_ATOMIC_HELPER_ALL(xchg) - -#undef GEN_ATOMIC_HELPER_ALL -#undef GEN_ATOMIC_HELPER - -Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); - -Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_DEBUG_TCG void tcg_assert_listed_vecop(TCGOpcode); diff --git a/include/trace-tcg.h b/include/trace-tcg.h deleted file mode 100644 index da68608c85..0000000000 --- a/include/trace-tcg.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef TRACE_TCG_H -#define TRACE_TCG_H - -#include "trace/generated-tcg-tracers.h" - -#endif /* TRACE_TCG_H */ diff --git a/include/ui/clipboard.h b/include/ui/clipboard.h index b45b984c9f..ce76aa451f 100644 --- a/include/ui/clipboard.h +++ b/include/ui/clipboard.h @@ -20,8 +20,10 @@ */ typedef enum QemuClipboardType QemuClipboardType; +typedef enum QemuClipboardNotifyType QemuClipboardNotifyType; typedef enum QemuClipboardSelection QemuClipboardSelection; typedef struct QemuClipboardPeer QemuClipboardPeer; +typedef struct QemuClipboardNotify QemuClipboardNotify; typedef struct QemuClipboardInfo QemuClipboardInfo; /** @@ -55,18 +57,46 @@ enum QemuClipboardSelection { * struct QemuClipboardPeer * * @name: peer name. - * @update: notifier for clipboard updates. + * @notifier: notifier for clipboard updates. * @request: callback for clipboard data requests. * * Clipboard peer description. */ struct QemuClipboardPeer { const char *name; - Notifier update; + Notifier notifier; void (*request)(QemuClipboardInfo *info, QemuClipboardType type); }; +/** + * enum QemuClipboardNotifyType + * + * @QEMU_CLIPBOARD_UPDATE_INFO: clipboard info update + * @QEMU_CLIPBOARD_RESET_SERIAL: reset clipboard serial + * + * Clipboard notify type. + */ +enum QemuClipboardNotifyType { + QEMU_CLIPBOARD_UPDATE_INFO, + QEMU_CLIPBOARD_RESET_SERIAL, +}; + +/** + * struct QemuClipboardNotify + * + * @type: the type of event. + * @info: a QemuClipboardInfo event. + * + * Clipboard notify data. + */ +struct QemuClipboardNotify { + QemuClipboardNotifyType type; + union { + QemuClipboardInfo *info; + }; +}; + /** * struct QemuClipboardInfo * @@ -74,6 +104,8 @@ struct QemuClipboardPeer { * @owner: clipboard owner. * @selection: clipboard selection. * @types: clipboard data array (one entry per type). + * @has_serial: whether @serial is available. + * @serial: the grab serial counter. * * Clipboard content data and metadata. */ @@ -81,6 +113,8 @@ struct QemuClipboardInfo { uint32_t refcount; QemuClipboardPeer *owner; QemuClipboardSelection selection; + bool has_serial; + uint32_t serial; struct { bool available; bool requested; @@ -109,6 +143,47 @@ void qemu_clipboard_peer_register(QemuClipboardPeer *peer); */ void qemu_clipboard_peer_unregister(QemuClipboardPeer *peer); +/** + * qemu_clipboard_peer_owns + * + * @peer: peer information. + * @selection: clipboard selection. + * + * Return TRUE if the peer owns the clipboard. + */ +bool qemu_clipboard_peer_owns(QemuClipboardPeer *peer, + QemuClipboardSelection selection); + +/** + * qemu_clipboard_peer_release + * + * @peer: peer information. + * @selection: clipboard selection. + * + * If the peer owns the clipboard, release it. + */ +void qemu_clipboard_peer_release(QemuClipboardPeer *peer, + QemuClipboardSelection selection); + +/** + * qemu_clipboard_info + * + * @selection: clipboard selection. + * + * Return the current clipboard data & owner informations. + */ +QemuClipboardInfo *qemu_clipboard_info(QemuClipboardSelection selection); + +/** + * qemu_clipboard_check_serial + * + * @info: clipboard info. + * @client: whether to check from the client context and priority. + * + * Return TRUE if the @info has a higher serial than the current clipboard. + */ +bool qemu_clipboard_check_serial(QemuClipboardInfo *info, bool client); + /** * qemu_clipboard_info_new * @@ -157,6 +232,13 @@ void qemu_clipboard_info_unref(QemuClipboardInfo *info); */ void qemu_clipboard_update(QemuClipboardInfo *info); +/** + * qemu_clipboard_reset_serial + * + * Reset the clipboard serial. + */ +void qemu_clipboard_reset_serial(void); + /** * qemu_clipboard_request * @@ -190,4 +272,6 @@ void qemu_clipboard_set_data(QemuClipboardPeer *peer, const void *data, bool update); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuClipboardInfo, qemu_clipboard_info_unref) + #endif /* QEMU_CLIPBOARD_H */ diff --git a/include/ui/console.h b/include/ui/console.h index 61baa085a3..5a88700c7f 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -70,6 +70,7 @@ void hmp_mouse_set(Monitor *mon, const QDict *qdict); /* keysym is a unicode code except for special keys (see QEMU_KEY_xxx constants) */ #define QEMU_KEY_ESC1(c) ((c) | 0xe100) +#define QEMU_KEY_TAB 0x0009 #define QEMU_KEY_BACKSPACE 0x007f #define QEMU_KEY_UP QEMU_KEY_ESC1('A') #define QEMU_KEY_DOWN QEMU_KEY_ESC1('B') @@ -108,6 +109,17 @@ struct QemuConsoleClass { #define QEMU_ALLOCATED_FLAG 0x01 #define QEMU_PLACEHOLDER_FLAG 0x02 +typedef struct ScanoutTexture { + uint32_t backing_id; + bool backing_y_0_top; + uint32_t backing_width; + uint32_t backing_height; + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; +} ScanoutTexture; + typedef struct DisplaySurface { pixman_format_code_t format; pixman_image_t *image; @@ -128,6 +140,7 @@ typedef struct QemuUIInfo { int yoff; uint32_t width; uint32_t height; + uint32_t refresh_rate; } QemuUIInfo; /* cursor data format is 32bit RGBA */ @@ -167,10 +180,35 @@ typedef struct QemuDmaBuf { uint32_t fourcc; uint64_t modifier; uint32_t texture; + uint32_t x; + uint32_t y; + uint32_t scanout_width; + uint32_t scanout_height; bool y0_top; + void *sync; + int fence_fd; + bool allow_fences; + bool draw_submitted; } QemuDmaBuf; +enum display_scanout { + SCANOUT_NONE, + SCANOUT_SURFACE, + SCANOUT_TEXTURE, + SCANOUT_DMABUF, +}; + +typedef struct DisplayScanout { + enum display_scanout kind; + union { + /* DisplaySurface *surface; is kept in QemuConsole */ + ScanoutTexture texture; + QemuDmaBuf *dmabuf; + }; +} DisplayScanout; + typedef struct DisplayState DisplayState; +typedef struct DisplayGLCtx DisplayGLCtx; typedef struct DisplayChangeListenerOps { const char *dpy_name; @@ -205,16 +243,6 @@ typedef struct DisplayChangeListenerOps { void (*dpy_cursor_define)(DisplayChangeListener *dcl, QEMUCursor *cursor); - /* required if GL */ - QEMUGLContext (*dpy_gl_ctx_create)(DisplayChangeListener *dcl, - QEMUGLParams *params); - /* required if GL */ - void (*dpy_gl_ctx_destroy)(DisplayChangeListener *dcl, - QEMUGLContext ctx); - /* required if GL */ - int (*dpy_gl_ctx_make_current)(DisplayChangeListener *dcl, - QEMUGLContext ctx); - /* required if GL */ void (*dpy_gl_scanout_disable)(DisplayChangeListener *dcl); /* required if GL */ @@ -255,6 +283,31 @@ struct DisplayChangeListener { QLIST_ENTRY(DisplayChangeListener) next; }; +typedef struct DisplayGLCtxOps { + bool (*dpy_gl_ctx_is_compatible_dcl)(DisplayGLCtx *dgc, + DisplayChangeListener *dcl); + QEMUGLContext (*dpy_gl_ctx_create)(DisplayGLCtx *dgc, + QEMUGLParams *params); + void (*dpy_gl_ctx_destroy)(DisplayGLCtx *dgc, + QEMUGLContext ctx); + int (*dpy_gl_ctx_make_current)(DisplayGLCtx *dgc, + QEMUGLContext ctx); + void (*dpy_gl_ctx_create_texture)(DisplayGLCtx *dgc, + DisplaySurface *surface); + void (*dpy_gl_ctx_destroy_texture)(DisplayGLCtx *dgc, + DisplaySurface *surface); + void (*dpy_gl_ctx_update_texture)(DisplayGLCtx *dgc, + DisplaySurface *surface, + int x, int y, int w, int h); +} DisplayGLCtxOps; + +struct DisplayGLCtx { + const DisplayGLCtxOps *ops; +#ifdef CONFIG_OPENGL + QemuGLShader *gls; /* optional shared shader */ +#endif +}; + DisplayState *init_displaystate(void); DisplaySurface *qemu_create_displaysurface_from(int width, int height, pixman_format_code_t format, @@ -284,7 +337,7 @@ void unregister_displaychangelistener(DisplayChangeListener *dcl); bool dpy_ui_info_supported(QemuConsole *con); const QemuUIInfo *dpy_get_ui_info(const QemuConsole *con); -int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info); +int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info, bool delay); void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h); void dpy_gfx_update_full(QemuConsole *con); @@ -380,10 +433,8 @@ typedef struct GraphicHwOps { void (*gfx_update)(void *opaque); bool gfx_update_async; /* if true, calls graphic_hw_update_done() */ void (*text_update)(void *opaque, console_ch_t *text); - void (*update_interval)(void *opaque, uint64_t interval); - int (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info); + void (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info); void (*gl_block)(void *opaque, bool block); - void (*gl_flushed)(void *opaque); } GraphicHwOps; QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, @@ -399,10 +450,11 @@ void graphic_hw_update_done(QemuConsole *con); void graphic_hw_invalidate(QemuConsole *con); void graphic_hw_text_update(QemuConsole *con, console_ch_t *chardata); void graphic_hw_gl_block(QemuConsole *con, bool block); -void graphic_hw_gl_flushed(QemuConsole *con); void qemu_console_early_init(void); +void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *ctx); + QemuConsole *qemu_console_lookup_by_index(unsigned int index); QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head); QemuConsole *qemu_console_lookup_by_device_name(const char *device_id, @@ -412,6 +464,7 @@ bool qemu_console_is_visible(QemuConsole *con); bool qemu_console_is_graphic(QemuConsole *con); bool qemu_console_is_fixedsize(QemuConsole *con); bool qemu_console_is_gl_blocked(QemuConsole *con); +bool qemu_console_is_multihead(DeviceState *dev); char *qemu_console_get_label(QemuConsole *con); int qemu_console_get_index(QemuConsole *con); uint32_t qemu_console_get_head(QemuConsole *con); @@ -467,11 +520,20 @@ int vnc_display_pw_expire(const char *id, time_t expires); void vnc_parse(const char *str); int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp); bool vnc_display_reload_certs(const char *id, Error **errp); +bool vnc_display_update(DisplayUpdateOptionsVNC *arg, Error **errp); /* input.c */ int index_from_key(const char *key, size_t key_length); +#ifdef CONFIG_LINUX /* udmabuf.c */ int udmabuf_fd(void); +#endif + +/* util.c */ +bool qemu_console_fill_device_address(QemuConsole *con, + char *device_address, + size_t size, + Error **errp); #endif diff --git a/include/ui/dbus-display.h b/include/ui/dbus-display.h new file mode 100644 index 0000000000..7c9ec1a069 --- /dev/null +++ b/include/ui/dbus-display.h @@ -0,0 +1,17 @@ +#ifndef DBUS_DISPLAY_H +#define DBUS_DISPLAY_H + +#include "qapi/error.h" +#include "ui/dbus-module.h" + +static inline bool qemu_using_dbus_display(Error **errp) +{ + if (!using_dbus_display) { + error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, + "D-Bus display is not in use"); + return false; + } + return true; +} + +#endif /* DBUS_DISPLAY_H */ diff --git a/include/ui/dbus-module.h b/include/ui/dbus-module.h new file mode 100644 index 0000000000..5442ee0bb6 --- /dev/null +++ b/include/ui/dbus-module.h @@ -0,0 +1,11 @@ +#ifndef DBUS_MODULE_H +#define DBUS_MODULE_H + +struct QemuDBusDisplayOps { + bool (*add_client)(int csock, Error **errp); +}; + +extern int using_dbus_display; +extern struct QemuDBusDisplayOps qemu_dbus_display; + +#endif /* DBUS_MODULE_H */ diff --git a/include/ui/egl-context.h b/include/ui/egl-context.h index 9374fe41e3..c2761d747a 100644 --- a/include/ui/egl-context.h +++ b/include/ui/egl-context.h @@ -4,10 +4,10 @@ #include "ui/console.h" #include "ui/egl-helpers.h" -QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, +QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params); -void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx); -int qemu_egl_make_context_current(DisplayChangeListener *dcl, +void qemu_egl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx); +int qemu_egl_make_context_current(DisplayGLCtx *dgc, QEMUGLContext ctx); #endif /* EGL_CONTEXT_H */ diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h index f1bf8f97fc..2fb6e0dd6b 100644 --- a/include/ui/egl-helpers.h +++ b/include/ui/egl-helpers.h @@ -19,6 +19,7 @@ typedef struct egl_fb { GLuint texture; GLuint framebuffer; bool delete_texture; + QemuDmaBuf *dmabuf; } egl_fb; void egl_fb_destroy(egl_fb *fb); @@ -45,6 +46,8 @@ int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc, void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf); void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf); +void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf); +void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf); #endif diff --git a/include/ui/gtk.h b/include/ui/gtk.h index 80d6bbd9b5..ae0f53740d 100644 --- a/include/ui/gtk.h +++ b/include/ui/gtk.h @@ -35,6 +35,7 @@ typedef struct GtkDisplayState GtkDisplayState; typedef struct VirtualGfxConsole { GtkWidget *drawing_area; + DisplayGLCtx dgc; DisplayChangeListener dcl; QKbdState *kbd; DisplaySurface *ds; @@ -143,7 +144,6 @@ struct GtkDisplayState { bool external_pause_update; QemuClipboardPeer cbpeer; - QemuClipboardInfo *cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT]; uint32_t cbpending[QEMU_CLIPBOARD_SELECTION__COUNT]; GtkClipboard *gtkcb[QEMU_CLIPBOARD_SELECTION__COUNT]; bool cbowner[QEMU_CLIPBOARD_SELECTION__COUNT]; @@ -155,7 +155,8 @@ extern bool gtk_use_gl_area; /* ui/gtk.c */ void gd_update_windowsize(VirtualConsole *vc); -int gd_monitor_update_interval(GtkWidget *widget); +void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget); +void gd_hw_gl_flushed(void *vc); /* ui/gtk-egl.c */ void gd_egl_init(VirtualConsole *vc); @@ -165,7 +166,7 @@ void gd_egl_update(DisplayChangeListener *dcl, void gd_egl_refresh(DisplayChangeListener *dcl); void gd_egl_switch(DisplayChangeListener *dcl, DisplaySurface *surface); -QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl, +QEMUGLContext gd_egl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params); void gd_egl_scanout_disable(DisplayChangeListener *dcl); void gd_egl_scanout_texture(DisplayChangeListener *dcl, @@ -182,12 +183,12 @@ void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl, uint32_t hot_x, uint32_t hot_y); void gd_egl_cursor_position(DisplayChangeListener *dcl, uint32_t pos_x, uint32_t pos_y); -void gd_egl_release_dmabuf(DisplayChangeListener *dcl, - QemuDmaBuf *dmabuf); +void gd_egl_flush(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, uint32_t w, uint32_t h); void gd_egl_scanout_flush(DisplayChangeListener *dcl, uint32_t x, uint32_t y, uint32_t w, uint32_t h); void gtk_egl_init(DisplayGLMode mode); -int gd_egl_make_current(DisplayChangeListener *dcl, +int gd_egl_make_current(DisplayGLCtx *dgc, QEMUGLContext ctx); /* ui/gtk-gl-area.c */ @@ -198,9 +199,9 @@ void gd_gl_area_update(DisplayChangeListener *dcl, void gd_gl_area_refresh(DisplayChangeListener *dcl); void gd_gl_area_switch(DisplayChangeListener *dcl, DisplaySurface *surface); -QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, +QEMUGLContext gd_gl_area_create_context(DisplayGLCtx *dgc, QEMUGLParams *params); -void gd_gl_area_destroy_context(DisplayChangeListener *dcl, +void gd_gl_area_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx); void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, QemuDmaBuf *dmabuf); @@ -215,7 +216,7 @@ void gd_gl_area_scanout_disable(DisplayChangeListener *dcl); void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, uint32_t x, uint32_t y, uint32_t w, uint32_t h); void gtk_gl_area_init(void); -int gd_gl_area_make_current(DisplayChangeListener *dcl, +int gd_gl_area_make_current(DisplayGLCtx *dgc, QEMUGLContext ctx); /* gtk-clipboard.c */ diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h index 806ddcd7cd..0c775604d1 100644 --- a/include/ui/qemu-pixman.h +++ b/include/ui/qemu-pixman.h @@ -19,7 +19,7 @@ * feeding libjpeg / libpng and writing screenshots. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define PIXMAN_BE_r8g8b8 PIXMAN_r8g8b8 # define PIXMAN_BE_x8r8g8b8 PIXMAN_x8r8g8b8 # define PIXMAN_BE_a8r8g8b8 PIXMAN_a8r8g8b8 diff --git a/include/ui/qemu-spice.h b/include/ui/qemu-spice.h index 71ecd6cfd1..21fe195e18 100644 --- a/include/ui/qemu-spice.h +++ b/include/ui/qemu-spice.h @@ -40,6 +40,12 @@ int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, #define SPICE_NEEDS_SET_MM_TIME 0 #endif +#if defined(SPICE_SERVER_VERSION) && (SPICE_SERVER_VERSION >= 0x000f00) +#define SPICE_HAS_ATTACHED_WORKER 1 +#else +#define SPICE_HAS_ATTACHED_WORKER 0 +#endif + #else /* CONFIG_SPICE */ #include "qemu/error-report.h" diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h index f85c117a78..8fb7e08262 100644 --- a/include/ui/sdl2.h +++ b/include/ui/sdl2.h @@ -5,7 +5,18 @@ #undef WIN32_LEAN_AND_MEAN #include + +/* with Alpine / muslc SDL headers pull in directfb headers + * which in turn trigger warning about redundant decls for + * direct_waitqueue_deinit. + */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" + #include + +#pragma GCC diagnostic pop + #ifdef CONFIG_SDL_IMAGE # include #endif @@ -16,6 +27,7 @@ #endif struct sdl2_console { + DisplayGLCtx dgc; DisplayChangeListener dcl; DisplaySurface *surface; DisplayOptions *opts; @@ -65,10 +77,10 @@ void sdl2_gl_switch(DisplayChangeListener *dcl, void sdl2_gl_refresh(DisplayChangeListener *dcl); void sdl2_gl_redraw(struct sdl2_console *scon); -QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, +QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params); -void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx); -int sdl2_gl_make_context_current(DisplayChangeListener *dcl, +void sdl2_gl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx); +int sdl2_gl_make_context_current(DisplayGLCtx *dgc, QEMUGLContext ctx); void sdl2_gl_scanout_disable(DisplayChangeListener *dcl); diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h index ed298d58f0..e271e011da 100644 --- a/include/ui/spice-display.h +++ b/include/ui/spice-display.h @@ -86,6 +86,7 @@ typedef struct SimpleSpiceCursor SimpleSpiceCursor; struct SimpleSpiceDisplay { DisplaySurface *ds; + DisplayGLCtx dgc; DisplayChangeListener dcl; void *buf; int bufsize; @@ -183,8 +184,4 @@ void qemu_spice_display_start(void); void qemu_spice_display_stop(void); int qemu_spice_display_is_running(SimpleSpiceDisplay *ssd); -bool qemu_spice_fill_device_address(QemuConsole *con, - char *device_address, - size_t size); - #endif diff --git a/include/user/safe-syscall.h b/include/user/safe-syscall.h new file mode 100644 index 0000000000..ddceef12e2 --- /dev/null +++ b/include/user/safe-syscall.h @@ -0,0 +1,140 @@ +/* + * safe-syscall.h: prototypes for linux-user signal-race-safe syscalls + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef LINUX_USER_SAFE_SYSCALL_H +#define LINUX_USER_SAFE_SYSCALL_H + +/** + * safe_syscall: + * @int number: number of system call to make + * ...: arguments to the system call + * + * Call a system call if guest signal not pending. + * This has the same API as the libc syscall() function, except that it + * may return -1 with errno == QEMU_ERESTARTSYS if a signal was pending. + * + * Returns: the system call result, or -1 with an error code in errno + * (Errnos are host errnos; we rely on QEMU_ERESTARTSYS not clashing + * with any of the host errno values.) + */ + +/* + * A guide to using safe_syscall() to handle interactions between guest + * syscalls and guest signals: + * + * Guest syscalls come in two flavours: + * + * (1) Non-interruptible syscalls + * + * These are guest syscalls that never get interrupted by signals and + * so never return EINTR. They can be implemented straightforwardly in + * QEMU: just make sure that if the implementation code has to make any + * blocking calls that those calls are retried if they return EINTR. + * It's also OK to implement these with safe_syscall, though it will be + * a little less efficient if a signal is delivered at the 'wrong' moment. + * + * Some non-interruptible syscalls need to be handled using block_signals() + * to block signals for the duration of the syscall. This mainly applies + * to code which needs to modify the data structures used by the + * host_signal_handler() function and the functions it calls, including + * all syscalls which change the thread's signal mask. + * + * (2) Interruptible syscalls + * + * These are guest syscalls that can be interrupted by signals and + * for which we need to either return EINTR or arrange for the guest + * syscall to be restarted. This category includes both syscalls which + * always restart (and in the kernel return -ERESTARTNOINTR), ones + * which only restart if there is no handler (kernel returns -ERESTARTNOHAND + * or -ERESTART_RESTARTBLOCK), and the most common kind which restart + * if the handler was registered with SA_RESTART (kernel returns + * -ERESTARTSYS). System calls which are only interruptible in some + * situations (like 'open') also need to be handled this way. + * + * Here it is important that the host syscall is made + * via this safe_syscall() function, and *not* via the host libc. + * If the host libc is used then the implementation will appear to work + * most of the time, but there will be a race condition where a + * signal could arrive just before we make the host syscall inside libc, + * and then the guest syscall will not correctly be interrupted. + * Instead the implementation of the guest syscall can use the safe_syscall + * function but otherwise just return the result or errno in the usual + * way; the main loop code will take care of restarting the syscall + * if appropriate. + * + * (If the implementation needs to make multiple host syscalls this is + * OK; any which might really block must be via safe_syscall(); for those + * which are only technically blocking (ie which we know in practice won't + * stay in the host kernel indefinitely) it's OK to use libc if necessary. + * You must be able to cope with backing out correctly if some safe_syscall + * you make in the implementation returns either -QEMU_ERESTARTSYS or + * EINTR though.) + * + * block_signals() cannot be used for interruptible syscalls. + * + * + * How and why the safe_syscall implementation works: + * + * The basic setup is that we make the host syscall via a known + * section of host native assembly. If a signal occurs, our signal + * handler checks the interrupted host PC against the addresse of that + * known section. If the PC is before or at the address of the syscall + * instruction then we change the PC to point at a "return + * -QEMU_ERESTARTSYS" code path instead, and then exit the signal handler + * (causing the safe_syscall() call to immediately return that value). + * Then in the main.c loop if we see this magic return value we adjust + * the guest PC to wind it back to before the system call, and invoke + * the guest signal handler as usual. + * + * This winding-back will happen in two cases: + * (1) signal came in just before we took the host syscall (a race); + * in this case we'll take the guest signal and have another go + * at the syscall afterwards, and this is indistinguishable for the + * guest from the timing having been different such that the guest + * signal really did win the race + * (2) signal came in while the host syscall was blocking, and the + * host kernel decided the syscall should be restarted; + * in this case we want to restart the guest syscall also, and so + * rewinding is the right thing. (Note that "restart" semantics mean + * "first call the signal handler, then reattempt the syscall".) + * The other situation to consider is when a signal came in while the + * host syscall was blocking, and the host kernel decided that the syscall + * should not be restarted; in this case QEMU's host signal handler will + * be invoked with the PC pointing just after the syscall instruction, + * with registers indicating an EINTR return; the special code in the + * handler will not kick in, and we will return EINTR to the guest as + * we should. + * + * Notice that we can leave the host kernel to make the decision for + * us about whether to do a restart of the syscall or not; we do not + * need to check SA_RESTART flags in QEMU or distinguish the various + * kinds of restartability. + */ + +/* The core part of this function is implemented in assembly */ +extern long safe_syscall_base(int *pending, long number, ...); +extern long safe_syscall_set_errno_tail(int value); + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +#define safe_syscall(...) \ + safe_syscall_base(&((TaskState *)thread_cpu->opaque)->signal_pending, \ + __VA_ARGS__) + +#endif diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h index 614cfacfa5..b4e53d3870 100644 --- a/include/user/syscall-trace.h +++ b/include/user/syscall-trace.h @@ -39,4 +39,4 @@ static inline void record_syscall_return(void *cpu, int num, abi_long ret) } -#endif /* _SYSCALL_TRACE_H_ */ +#endif /* SYSCALL_TRACE_H */ diff --git a/io/channel-buffer.c b/io/channel-buffer.c index baa4e2b089..bf52011be2 100644 --- a/io/channel-buffer.c +++ b/io/channel-buffer.c @@ -81,6 +81,7 @@ static ssize_t qio_channel_buffer_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelBuffer *bioc = QIO_CHANNEL_BUFFER(ioc); diff --git a/io/channel-command.c b/io/channel-command.c index b2a9e27138..74516252ba 100644 --- a/io/channel-command.c +++ b/io/channel-command.c @@ -26,11 +26,30 @@ #include "qemu/sockets.h" #include "trace.h" - -QIOChannelCommand * +/** + * qio_channel_command_new_pid: + * @writefd: the FD connected to the command's stdin + * @readfd: the FD connected to the command's stdout + * @pid: the PID/HANDLE of the running child command + * @errp: pointer to a NULL-initialized error object + * + * Create a channel for performing I/O with the + * previously spawned command identified by @pid. + * The two file descriptors provide the connection + * to command's stdio streams, either one or which + * may be -1 to indicate that stream is not open. + * + * The channel will take ownership of the process + * @pid and will kill it when closing the channel. + * Similarly it will take responsibility for + * closing the file descriptors @writefd and @readfd. + * + * Returns: the command channel object, or NULL on error + */ +static QIOChannelCommand * qio_channel_command_new_pid(int writefd, int readfd, - pid_t pid) + GPid pid) { QIOChannelCommand *ioc; @@ -40,119 +59,41 @@ qio_channel_command_new_pid(int writefd, ioc->writefd = writefd; ioc->pid = pid; - trace_qio_channel_command_new_pid(ioc, writefd, readfd, pid); + trace_qio_channel_command_new_pid(ioc, writefd, readfd, +#ifdef WIN32 + GetProcessId(pid) +#else + pid +#endif + ); return ioc; } - -#ifndef WIN32 QIOChannelCommand * qio_channel_command_new_spawn(const char *const argv[], int flags, Error **errp) { - pid_t pid = -1; - int stdinfd[2] = { -1, -1 }; - int stdoutfd[2] = { -1, -1 }; - int devnull = -1; - bool stdinnull = false, stdoutnull = false; - QIOChannelCommand *ioc; + g_autoptr(GError) err = NULL; + GPid pid = 0; + GSpawnFlags gflags = G_SPAWN_CLOEXEC_PIPES | G_SPAWN_DO_NOT_REAP_CHILD; + int stdinfd = -1, stdoutfd = -1; flags = flags & O_ACCMODE; + gflags |= flags == O_WRONLY ? G_SPAWN_STDOUT_TO_DEV_NULL : 0; - if (flags == O_RDONLY) { - stdinnull = true; - } - if (flags == O_WRONLY) { - stdoutnull = true; + if (!g_spawn_async_with_pipes(NULL, (char **)argv, NULL, gflags, NULL, NULL, + &pid, + flags == O_RDONLY ? NULL : &stdinfd, + flags == O_WRONLY ? NULL : &stdoutfd, + NULL, &err)) { + error_setg(errp, "%s", err->message); + return NULL; } - if (stdinnull || stdoutnull) { - devnull = open("/dev/null", O_RDWR); - if (devnull < 0) { - error_setg_errno(errp, errno, - "Unable to open /dev/null"); - goto error; - } - } - - if ((!stdinnull && pipe(stdinfd) < 0) || - (!stdoutnull && pipe(stdoutfd) < 0)) { - error_setg_errno(errp, errno, - "Unable to open pipe"); - goto error; - } - - pid = qemu_fork(errp); - if (pid < 0) { - goto error; - } - - if (pid == 0) { /* child */ - dup2(stdinnull ? devnull : stdinfd[0], STDIN_FILENO); - dup2(stdoutnull ? devnull : stdoutfd[1], STDOUT_FILENO); - /* Leave stderr connected to qemu's stderr */ - - if (!stdinnull) { - close(stdinfd[0]); - close(stdinfd[1]); - } - if (!stdoutnull) { - close(stdoutfd[0]); - close(stdoutfd[1]); - } - if (devnull != -1) { - close(devnull); - } - - execv(argv[0], (char * const *)argv); - _exit(1); - } - - if (!stdinnull) { - close(stdinfd[0]); - } - if (!stdoutnull) { - close(stdoutfd[1]); - } - - ioc = qio_channel_command_new_pid(stdinnull ? devnull : stdinfd[1], - stdoutnull ? devnull : stdoutfd[0], - pid); - trace_qio_channel_command_new_spawn(ioc, argv[0], flags); - return ioc; - - error: - if (devnull != -1) { - close(devnull); - } - if (stdinfd[0] != -1) { - close(stdinfd[0]); - } - if (stdinfd[1] != -1) { - close(stdinfd[1]); - } - if (stdoutfd[0] != -1) { - close(stdoutfd[0]); - } - if (stdoutfd[1] != -1) { - close(stdoutfd[1]); - } - return NULL; + return qio_channel_command_new_pid(stdinfd, stdoutfd, pid); } -#else /* WIN32 */ -QIOChannelCommand * -qio_channel_command_new_spawn(const char *const argv[], - int flags, - Error **errp) -{ - error_setg_errno(errp, ENOSYS, - "Command spawn not supported on this platform"); - return NULL; -} -#endif /* WIN32 */ - #ifndef WIN32 static int qio_channel_command_abort(QIOChannelCommand *ioc, Error **errp) @@ -195,6 +136,23 @@ static int qio_channel_command_abort(QIOChannelCommand *ioc, return 0; } +#else +static int qio_channel_command_abort(QIOChannelCommand *ioc, + Error **errp) +{ + DWORD ret; + + TerminateProcess(ioc->pid, 0); + ret = WaitForSingleObject(ioc->pid, 1000); + if (ret != WAIT_OBJECT_0) { + error_setg(errp, + "Process %llu refused to die", + (unsigned long long)GetProcessId(ioc->pid)); + return -1; + } + + return 0; +} #endif /* ! WIN32 */ @@ -203,7 +161,7 @@ static void qio_channel_command_init(Object *obj) QIOChannelCommand *ioc = QIO_CHANNEL_COMMAND(obj); ioc->readfd = -1; ioc->writefd = -1; - ioc->pid = -1; + ioc->pid = 0; } static void qio_channel_command_finalize(Object *obj) @@ -218,12 +176,27 @@ static void qio_channel_command_finalize(Object *obj) } ioc->writefd = ioc->readfd = -1; if (ioc->pid > 0) { -#ifndef WIN32 qio_channel_command_abort(ioc, NULL); -#endif + g_spawn_close_pid(ioc->pid); } } +#ifdef WIN32 +static bool win32_fd_poll(int fd, gushort events) +{ + GPollFD pfd = { .fd = _get_osfhandle(fd), .events = events }; + int res; + + do { + res = g_poll(&pfd, 1, 0); + } while (res < 0 && errno == EINTR); + if (res == 0) { + return false; + } + + return true; +} +#endif static ssize_t qio_channel_command_readv(QIOChannel *ioc, const struct iovec *iov, @@ -235,6 +208,12 @@ static ssize_t qio_channel_command_readv(QIOChannel *ioc, QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); ssize_t ret; +#ifdef WIN32 + if (!cioc->blocking && !win32_fd_poll(cioc->readfd, G_IO_IN)) { + return QIO_CHANNEL_ERR_BLOCK; + } +#endif + retry: ret = readv(cioc->readfd, iov, niov); if (ret < 0) { @@ -258,11 +237,18 @@ static ssize_t qio_channel_command_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); ssize_t ret; +#ifdef WIN32 + if (!cioc->blocking && !win32_fd_poll(cioc->writefd, G_IO_OUT)) { + return QIO_CHANNEL_ERR_BLOCK; + } +#endif + retry: ret = writev(cioc->writefd, iov, niov); if (ret <= 0) { @@ -285,14 +271,16 @@ static int qio_channel_command_set_blocking(QIOChannel *ioc, { QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); - if (enabled) { - qemu_set_block(cioc->writefd); - qemu_set_block(cioc->readfd); - } else { - qemu_set_nonblock(cioc->writefd); - qemu_set_nonblock(cioc->readfd); - } +#ifdef WIN32 + cioc->blocking = enabled; +#else + if ((cioc->writefd >= 0 && !g_unix_set_fd_nonblocking(cioc->writefd, !enabled, NULL)) || + (cioc->readfd >= 0 && !g_unix_set_fd_nonblocking(cioc->readfd, !enabled, NULL))) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; + } +#endif return 0; } @@ -329,6 +317,8 @@ static int qio_channel_command_close(QIOChannel *ioc, (unsigned long long)cioc->pid); return -1; } +#else + WaitForSingleObject(cioc->pid, INFINITE); #endif if (rv < 0) { @@ -346,8 +336,10 @@ static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc, void *opaque) { QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); - aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque); - aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque); + aio_set_fd_handler(ctx, cioc->readfd, false, + io_read, NULL, NULL, NULL, opaque); + aio_set_fd_handler(ctx, cioc->writefd, false, + NULL, io_write, NULL, NULL, opaque); } diff --git a/io/channel-file.c b/io/channel-file.c index c4bf799a80..b67687c2aa 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -114,6 +114,7 @@ static ssize_t qio_channel_file_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); @@ -139,14 +140,19 @@ static int qio_channel_file_set_blocking(QIOChannel *ioc, bool enabled, Error **errp) { +#ifdef WIN32 + /* not implemented */ + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; +#else QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); - if (enabled) { - qemu_set_block(fioc->fd); - } else { - qemu_set_nonblock(fioc->fd); + if (!g_unix_set_fd_nonblocking(fioc->fd, !enabled, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; } return 0; +#endif } @@ -191,7 +197,8 @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc, void *opaque) { QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); - aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, + NULL, NULL, opaque); } static GSource *qio_channel_file_create_watch(QIOChannel *ioc, diff --git a/io/channel-null.c b/io/channel-null.c new file mode 100644 index 0000000000..75e3781507 --- /dev/null +++ b/io/channel-null.c @@ -0,0 +1,237 @@ +/* + * QEMU I/O channels null driver + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "io/channel-null.h" +#include "io/channel-watch.h" +#include "qapi/error.h" +#include "trace.h" +#include "qemu/iov.h" + +typedef struct QIOChannelNullSource QIOChannelNullSource; +struct QIOChannelNullSource { + GSource parent; + QIOChannel *ioc; + GIOCondition condition; +}; + + +QIOChannelNull * +qio_channel_null_new(void) +{ + QIOChannelNull *ioc; + + ioc = QIO_CHANNEL_NULL(object_new(TYPE_QIO_CHANNEL_NULL)); + + trace_qio_channel_null_new(ioc); + + return ioc; +} + + +static void +qio_channel_null_init(Object *obj) +{ + QIOChannelNull *ioc = QIO_CHANNEL_NULL(obj); + ioc->closed = false; +} + + +static ssize_t +qio_channel_null_readv(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds G_GNUC_UNUSED, + size_t *nfds G_GNUC_UNUSED, + Error **errp) +{ + QIOChannelNull *nioc = QIO_CHANNEL_NULL(ioc); + + if (nioc->closed) { + error_setg_errno(errp, EINVAL, + "Channel is closed"); + return -1; + } + + return 0; +} + + +static ssize_t +qio_channel_null_writev(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int *fds G_GNUC_UNUSED, + size_t nfds G_GNUC_UNUSED, + int flags G_GNUC_UNUSED, + Error **errp) +{ + QIOChannelNull *nioc = QIO_CHANNEL_NULL(ioc); + + if (nioc->closed) { + error_setg_errno(errp, EINVAL, + "Channel is closed"); + return -1; + } + + return iov_size(iov, niov); +} + + +static int +qio_channel_null_set_blocking(QIOChannel *ioc G_GNUC_UNUSED, + bool enabled G_GNUC_UNUSED, + Error **errp G_GNUC_UNUSED) +{ + return 0; +} + + +static off_t +qio_channel_null_seek(QIOChannel *ioc G_GNUC_UNUSED, + off_t offset G_GNUC_UNUSED, + int whence G_GNUC_UNUSED, + Error **errp G_GNUC_UNUSED) +{ + return 0; +} + + +static int +qio_channel_null_close(QIOChannel *ioc, + Error **errp G_GNUC_UNUSED) +{ + QIOChannelNull *nioc = QIO_CHANNEL_NULL(ioc); + + nioc->closed = true; + return 0; +} + + +static void +qio_channel_null_set_aio_fd_handler(QIOChannel *ioc G_GNUC_UNUSED, + AioContext *ctx G_GNUC_UNUSED, + IOHandler *io_read G_GNUC_UNUSED, + IOHandler *io_write G_GNUC_UNUSED, + void *opaque G_GNUC_UNUSED) +{ +} + + +static gboolean +qio_channel_null_source_prepare(GSource *source G_GNUC_UNUSED, + gint *timeout) +{ + *timeout = -1; + + return TRUE; +} + + +static gboolean +qio_channel_null_source_check(GSource *source G_GNUC_UNUSED) +{ + return TRUE; +} + + +static gboolean +qio_channel_null_source_dispatch(GSource *source, + GSourceFunc callback, + gpointer user_data) +{ + QIOChannelFunc func = (QIOChannelFunc)callback; + QIOChannelNullSource *ssource = (QIOChannelNullSource *)source; + + return (*func)(ssource->ioc, + ssource->condition, + user_data); +} + + +static void +qio_channel_null_source_finalize(GSource *source) +{ + QIOChannelNullSource *ssource = (QIOChannelNullSource *)source; + + object_unref(OBJECT(ssource->ioc)); +} + + +GSourceFuncs qio_channel_null_source_funcs = { + qio_channel_null_source_prepare, + qio_channel_null_source_check, + qio_channel_null_source_dispatch, + qio_channel_null_source_finalize +}; + + +static GSource * +qio_channel_null_create_watch(QIOChannel *ioc, + GIOCondition condition) +{ + GSource *source; + QIOChannelNullSource *ssource; + + source = g_source_new(&qio_channel_null_source_funcs, + sizeof(QIOChannelNullSource)); + ssource = (QIOChannelNullSource *)source; + + ssource->ioc = ioc; + object_ref(OBJECT(ioc)); + + ssource->condition = condition; + + return source; +} + + +static void +qio_channel_null_class_init(ObjectClass *klass, + void *class_data G_GNUC_UNUSED) +{ + QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); + + ioc_klass->io_writev = qio_channel_null_writev; + ioc_klass->io_readv = qio_channel_null_readv; + ioc_klass->io_set_blocking = qio_channel_null_set_blocking; + ioc_klass->io_seek = qio_channel_null_seek; + ioc_klass->io_close = qio_channel_null_close; + ioc_klass->io_create_watch = qio_channel_null_create_watch; + ioc_klass->io_set_aio_fd_handler = qio_channel_null_set_aio_fd_handler; +} + + +static const TypeInfo qio_channel_null_info = { + .parent = TYPE_QIO_CHANNEL, + .name = TYPE_QIO_CHANNEL_NULL, + .instance_size = sizeof(QIOChannelNull), + .instance_init = qio_channel_null_init, + .class_init = qio_channel_null_class_init, +}; + + +static void +qio_channel_null_register_types(void) +{ + type_register_static(&qio_channel_null_info); +} + +type_init(qio_channel_null_register_types); diff --git a/io/channel-socket.c b/io/channel-socket.c index 606ec97cf7..b76dca9cc1 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qapi/qapi-visit-sockets.h" #include "qemu/module.h" @@ -26,6 +25,14 @@ #include "io/channel-watch.h" #include "trace.h" #include "qapi/clone-visitor.h" +#ifdef CONFIG_LINUX +#include +#include + +#if (defined(MSG_ZEROCOPY) && defined(SO_ZEROCOPY)) +#define QEMU_MSG_ZEROCOPY +#endif +#endif #define SOCKET_MAX_FDS 16 @@ -55,6 +62,8 @@ qio_channel_socket_new(void) sioc = QIO_CHANNEL_SOCKET(object_new(TYPE_QIO_CHANNEL_SOCKET)); sioc->fd = -1; + sioc->zero_copy_queued = 0; + sioc->zero_copy_sent = 0; ioc = QIO_CHANNEL(sioc); qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN); @@ -154,6 +163,16 @@ int qio_channel_socket_connect_sync(QIOChannelSocket *ioc, return -1; } +#ifdef QEMU_MSG_ZEROCOPY + int ret, v = 1; + ret = setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &v, sizeof(v)); + if (ret == 0) { + /* Zero copy available on host */ + qio_channel_set_feature(QIO_CHANNEL(ioc), + QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY); + } +#endif + return 0; } @@ -461,7 +480,7 @@ static void qio_channel_socket_copy_fds(struct msghdr *msg, } /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */ - qemu_set_block(fd); + qemu_socket_set_block(fd); #ifndef MSG_CMSG_CLOEXEC qemu_set_cloexec(fd); @@ -525,6 +544,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); @@ -533,6 +553,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc, char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)]; size_t fdsize = sizeof(int) * nfds; struct cmsghdr *cmsg; + int sflags = 0; memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)); @@ -557,19 +578,44 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc, memcpy(CMSG_DATA(cmsg), fds, fdsize); } + if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) { +#ifdef QEMU_MSG_ZEROCOPY + sflags = MSG_ZEROCOPY; +#else + /* + * We expect QIOChannel class entry point to have + * blocked this code path already + */ + g_assert_not_reached(); +#endif + } + retry: - ret = sendmsg(sioc->fd, &msg, 0); + ret = sendmsg(sioc->fd, &msg, sflags); if (ret <= 0) { - if (errno == EAGAIN) { + switch (errno) { + case EAGAIN: return QIO_CHANNEL_ERR_BLOCK; - } - if (errno == EINTR) { + case EINTR: goto retry; + case ENOBUFS: + if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) { + error_setg_errno(errp, errno, + "Process can't lock enough memory for using MSG_ZEROCOPY"); + return -1; + } + break; } + error_setg_errno(errp, errno, "Unable to write to socket"); return -1; } + + if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) { + sioc->zero_copy_queued++; + } + return ret; } #else /* WIN32 */ @@ -620,6 +666,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); @@ -658,6 +705,80 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc, } #endif /* WIN32 */ + +#ifdef QEMU_MSG_ZEROCOPY +static int qio_channel_socket_flush(QIOChannel *ioc, + Error **errp) +{ + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); + struct msghdr msg = {}; + struct sock_extended_err *serr; + struct cmsghdr *cm; + char control[CMSG_SPACE(sizeof(*serr))]; + int received; + int ret; + + if (sioc->zero_copy_queued == sioc->zero_copy_sent) { + return 0; + } + + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + memset(control, 0, sizeof(control)); + + ret = 1; + + while (sioc->zero_copy_sent < sioc->zero_copy_queued) { + received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE); + if (received < 0) { + switch (errno) { + case EAGAIN: + /* Nothing on errqueue, wait until something is available */ + qio_channel_wait(ioc, G_IO_ERR); + continue; + case EINTR: + continue; + default: + error_setg_errno(errp, errno, + "Unable to read errqueue"); + return -1; + } + } + + cm = CMSG_FIRSTHDR(&msg); + if (cm->cmsg_level != SOL_IP && cm->cmsg_type != IP_RECVERR && + cm->cmsg_level != SOL_IPV6 && cm->cmsg_type != IPV6_RECVERR) { + error_setg_errno(errp, EPROTOTYPE, + "Wrong cmsg in errqueue"); + return -1; + } + + serr = (void *) CMSG_DATA(cm); + if (serr->ee_errno != SO_EE_ORIGIN_NONE) { + error_setg_errno(errp, serr->ee_errno, + "Error on socket"); + return -1; + } + if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) { + error_setg_errno(errp, serr->ee_origin, + "Error not from zero copy"); + return -1; + } + + /* No errors, count successfully finished sendmsg()*/ + sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1; + + /* If any sendmsg() succeeded using zero copy, return 0 at the end */ + if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) { + ret = 0; + } + } + + return ret; +} + +#endif /* QEMU_MSG_ZEROCOPY */ + static int qio_channel_socket_set_blocking(QIOChannel *ioc, bool enabled, @@ -666,9 +787,9 @@ qio_channel_socket_set_blocking(QIOChannel *ioc, QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); if (enabled) { - qemu_set_block(sioc->fd); + qemu_socket_set_block(sioc->fd); } else { - qemu_set_nonblock(sioc->fd); + qemu_socket_set_nonblock(sioc->fd); } return 0; } @@ -681,9 +802,9 @@ qio_channel_socket_set_delay(QIOChannel *ioc, QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); int v = enabled ? 0 : 1; - qemu_setsockopt(sioc->fd, - IPPROTO_TCP, TCP_NODELAY, - &v, sizeof(v)); + setsockopt(sioc->fd, + IPPROTO_TCP, TCP_NODELAY, + &v, sizeof(v)); } @@ -761,7 +882,8 @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc, void *opaque) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); - aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, sioc->fd, false, + io_read, io_write, NULL, NULL, opaque); } static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, @@ -787,6 +909,9 @@ static void qio_channel_socket_class_init(ObjectClass *klass, ioc_klass->io_set_delay = qio_channel_socket_set_delay; ioc_klass->io_create_watch = qio_channel_socket_create_watch; ioc_klass->io_set_aio_fd_handler = qio_channel_socket_set_aio_fd_handler; +#ifdef QEMU_MSG_ZEROCOPY + ioc_klass->io_flush = qio_channel_socket_flush; +#endif } static const TypeInfo qio_channel_socket_info = { diff --git a/io/channel-tls.c b/io/channel-tls.c index 2ae1b92fc0..4ce08ccc28 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master, ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS)); ioc->master = master; + if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN); + } object_ref(OBJECT(master)); ioc->session = qcrypto_tls_session_new( @@ -301,6 +304,7 @@ static ssize_t qio_channel_tls_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); diff --git a/io/channel-watch.c b/io/channel-watch.c index 0289b3647c..ad7c568a84 100644 --- a/io/channel-watch.c +++ b/io/channel-watch.c @@ -115,28 +115,24 @@ static gboolean qio_channel_socket_source_check(GSource *source) { static struct timeval tv0; - QIOChannelSocketSource *ssource = (QIOChannelSocketSource *)source; - WSANETWORKEVENTS ev; fd_set rfds, wfds, xfds; if (!ssource->condition) { return 0; } - WSAEnumNetworkEvents(ssource->socket, ssource->ioc->event, &ev); - FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&xfds); if (ssource->condition & G_IO_IN) { - FD_SET((SOCKET)ssource->socket, &rfds); + FD_SET(ssource->socket, &rfds); } if (ssource->condition & G_IO_OUT) { - FD_SET((SOCKET)ssource->socket, &wfds); + FD_SET(ssource->socket, &wfds); } if (ssource->condition & G_IO_PRI) { - FD_SET((SOCKET)ssource->socket, &xfds); + FD_SET(ssource->socket, &xfds); } ssource->revents = 0; if (select(0, &rfds, &wfds, &xfds, &tv0) == 0) { @@ -285,11 +281,9 @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc, GSource *source; QIOChannelSocketSource *ssource; -#ifdef WIN32 WSAEventSelect(socket, ioc->event, FD_READ | FD_ACCEPT | FD_CLOSE | FD_CONNECT | FD_WRITE | FD_OOB); -#endif source = g_source_new(&qio_channel_socket_source_funcs, sizeof(QIOChannelSocketSource)); diff --git a/io/channel-websock.c b/io/channel-websock.c index 70889bb54d..fb4932ade7 100644 --- a/io/channel-websock.c +++ b/io/channel-websock.c @@ -32,7 +32,7 @@ #define QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN 24 #define QIO_CHANNEL_WEBSOCK_GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" -#define QIO_CHANNEL_WEBSOCK_GUID_LEN strlen(QIO_CHANNEL_WEBSOCK_GUID) +#define QIO_CHANNEL_WEBSOCK_GUID_LEN (sizeof(QIO_CHANNEL_WEBSOCK_GUID) - 1) #define QIO_CHANNEL_WEBSOCK_HEADER_PROTOCOL "sec-websocket-protocol" #define QIO_CHANNEL_WEBSOCK_HEADER_VERSION "sec-websocket-version" @@ -157,7 +157,7 @@ enum { QIO_CHANNEL_WEBSOCK_OPCODE_PONG = 0xA }; -static void GCC_FMT_ATTR(2, 3) +static void G_GNUC_PRINTF(2, 3) qio_channel_websock_handshake_send_res(QIOChannelWebsock *ioc, const char *resmsg, ...) @@ -1127,6 +1127,7 @@ static ssize_t qio_channel_websock_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK(ioc); diff --git a/io/channel.c b/io/channel.c index e8b019dc36..0640941ac5 100644 --- a/io/channel.c +++ b/io/channel.c @@ -72,18 +72,32 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); - if ((fds || nfds) && - !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) { + if (fds || nfds) { + if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) { + error_setg_errno(errp, EINVAL, + "Channel does not support file descriptor passing"); + return -1; + } + if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) { + error_setg_errno(errp, EINVAL, + "Zero Copy does not support file descriptor passing"); + return -1; + } + } + + if ((flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) && + !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) { error_setg_errno(errp, EINVAL, - "Channel does not support file descriptor passing"); + "Requested Zero Copy feature is not available"); return -1; } - return klass->io_writev(ioc, iov, niov, fds, nfds, errp); + return klass->io_writev(ioc, iov, niov, fds, nfds, flags, errp); } @@ -217,14 +231,14 @@ int qio_channel_writev_all(QIOChannel *ioc, size_t niov, Error **errp) { - return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, errp); + return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, 0, errp); } int qio_channel_writev_full_all(QIOChannel *ioc, const struct iovec *iov, size_t niov, int *fds, size_t nfds, - Error **errp) + int flags, Error **errp) { int ret = -1; struct iovec *local_iov = g_new(struct iovec, niov); @@ -237,8 +251,10 @@ int qio_channel_writev_full_all(QIOChannel *ioc, while (nlocal_iov > 0) { ssize_t len; - len = qio_channel_writev_full(ioc, local_iov, nlocal_iov, fds, nfds, - errp); + + len = qio_channel_writev_full(ioc, local_iov, nlocal_iov, fds, + nfds, flags, errp); + if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { qio_channel_yield(ioc, G_IO_OUT); @@ -277,7 +293,7 @@ ssize_t qio_channel_writev(QIOChannel *ioc, size_t niov, Error **errp) { - return qio_channel_writev_full(ioc, iov, niov, NULL, 0, errp); + return qio_channel_writev_full(ioc, iov, niov, NULL, 0, 0, errp); } @@ -297,7 +313,7 @@ ssize_t qio_channel_write(QIOChannel *ioc, Error **errp) { struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen }; - return qio_channel_writev_full(ioc, &iov, 1, NULL, 0, errp); + return qio_channel_writev_full(ioc, &iov, 1, NULL, 0, 0, errp); } @@ -473,6 +489,19 @@ off_t qio_channel_io_seek(QIOChannel *ioc, return klass->io_seek(ioc, offset, whence, errp); } +int qio_channel_flush(QIOChannel *ioc, + Error **errp) +{ + QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); + + if (!klass->io_flush || + !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) { + return 0; + } + + return klass->io_flush(ioc, errp); +} + static void qio_channel_restart_read(void *opaque) { diff --git a/io/dns-resolver.c b/io/dns-resolver.c index a5946a93bf..53b0e8407a 100644 --- a/io/dns-resolver.c +++ b/io/dns-resolver.c @@ -122,7 +122,7 @@ static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver, .ipv4 = iaddr->ipv4, .has_ipv6 = iaddr->has_ipv6, .ipv6 = iaddr->ipv6, -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP .has_mptcp = iaddr->has_mptcp, .mptcp = iaddr->mptcp, #endif diff --git a/io/meson.build b/io/meson.build index bbcd3c53a4..283b9b2bdb 100644 --- a/io/meson.build +++ b/io/meson.build @@ -3,6 +3,7 @@ io_ss.add(files( 'channel-buffer.c', 'channel-command.c', 'channel-file.c', + 'channel-null.c', 'channel-socket.c', 'channel-tls.c', 'channel-util.c', diff --git a/io/trace-events b/io/trace-events index c5e814eb44..3cc5cf1efd 100644 --- a/io/trace-events +++ b/io/trace-events @@ -10,6 +10,9 @@ qio_task_thread_result(void *task) "Task thread result task=%p" qio_task_thread_source_attach(void *task, void *source) "Task thread source attach task=%p source=%p" qio_task_thread_source_cancel(void *task, void *source) "Task thread source cancel task=%p source=%p" +# channel-null.c +qio_channel_null_new(void *ioc) "Null new ioc=%p" + # channel-socket.c qio_channel_socket_new(void *ioc) "Socket new ioc=%p" qio_channel_socket_new_fd(void *ioc, int fd) "Socket new ioc=%p fd=%d" diff --git a/iothread.c b/iothread.c index ddbbde61f7..529194a566 100644 --- a/iothread.c +++ b/iothread.c @@ -17,6 +17,7 @@ #include "qemu/module.h" #include "block/aio.h" #include "block/block.h" +#include "sysemu/event-loop-base.h" #include "sysemu/iothread.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" @@ -152,10 +153,15 @@ static void iothread_init_gcontext(IOThread *iothread) iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); } -static void iothread_set_aio_context_params(IOThread *iothread, Error **errp) +static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) { + IOThread *iothread = IOTHREAD(base); ERRP_GUARD(); + if (!iothread->ctx) { + return; + } + aio_context_set_poll_params(iothread->ctx, iothread->poll_max_ns, iothread->poll_grow, @@ -166,14 +172,18 @@ static void iothread_set_aio_context_params(IOThread *iothread, Error **errp) } aio_context_set_aio_params(iothread->ctx, - iothread->aio_max_batch, + iothread->parent_obj.aio_max_batch, errp); + + aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min, + base->thread_pool_max, errp); } -static void iothread_complete(UserCreatable *obj, Error **errp) + +static void iothread_init(EventLoopBase *base, Error **errp) { Error *local_error = NULL; - IOThread *iothread = IOTHREAD(obj); + IOThread *iothread = IOTHREAD(base); char *thread_name; iothread->stopping = false; @@ -189,7 +199,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp) */ iothread_init_gcontext(iothread); - iothread_set_aio_context_params(iothread, &local_error); + iothread_set_aio_context_params(base, &local_error); if (local_error) { error_propagate(errp, local_error); aio_context_unref(iothread->ctx); @@ -201,7 +211,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp) * to inherit. */ thread_name = g_strdup_printf("IO %s", - object_get_canonical_path_component(OBJECT(obj))); + object_get_canonical_path_component(OBJECT(base))); qemu_thread_create(&iothread->thread, thread_name, iothread_run, iothread, QEMU_THREAD_JOINABLE); g_free(thread_name); @@ -215,36 +225,31 @@ static void iothread_complete(UserCreatable *obj, Error **errp) typedef struct { const char *name; ptrdiff_t offset; /* field's byte offset in IOThread struct */ -} PollParamInfo; +} IOThreadParamInfo; -static PollParamInfo poll_max_ns_info = { +static IOThreadParamInfo poll_max_ns_info = { "poll-max-ns", offsetof(IOThread, poll_max_ns), }; -static PollParamInfo poll_grow_info = { +static IOThreadParamInfo poll_grow_info = { "poll-grow", offsetof(IOThread, poll_grow), }; -static PollParamInfo poll_shrink_info = { +static IOThreadParamInfo poll_shrink_info = { "poll-shrink", offsetof(IOThread, poll_shrink), }; -static PollParamInfo aio_max_batch_info = { - "aio-max-batch", offsetof(IOThread, aio_max_batch), -}; static void iothread_get_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) + const char *name, IOThreadParamInfo *info, Error **errp) { IOThread *iothread = IOTHREAD(obj); - PollParamInfo *info = opaque; int64_t *field = (void *)iothread + info->offset; visit_type_int64(v, name, field, errp); } static bool iothread_set_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) + const char *name, IOThreadParamInfo *info, Error **errp) { IOThread *iothread = IOTHREAD(obj); - PollParamInfo *info = opaque; int64_t *field = (void *)iothread + info->offset; int64_t value; @@ -266,16 +271,18 @@ static bool iothread_set_param(Object *obj, Visitor *v, static void iothread_get_poll_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { + IOThreadParamInfo *info = opaque; - iothread_get_param(obj, v, name, opaque, errp); + iothread_get_param(obj, v, name, info, errp); } static void iothread_set_poll_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { IOThread *iothread = IOTHREAD(obj); + IOThreadParamInfo *info = opaque; - if (!iothread_set_param(obj, v, name, opaque, errp)) { + if (!iothread_set_param(obj, v, name, info, errp)) { return; } @@ -288,33 +295,12 @@ static void iothread_set_poll_param(Object *obj, Visitor *v, } } -static void iothread_get_aio_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) -{ - - iothread_get_param(obj, v, name, opaque, errp); -} - -static void iothread_set_aio_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) -{ - IOThread *iothread = IOTHREAD(obj); - - if (!iothread_set_param(obj, v, name, opaque, errp)) { - return; - } - - if (iothread->ctx) { - aio_context_set_aio_params(iothread->ctx, - iothread->aio_max_batch, - errp); - } -} - static void iothread_class_init(ObjectClass *klass, void *class_data) { - UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); - ucc->complete = iothread_complete; + EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass); + + bc->init = iothread_init; + bc->update_params = iothread_set_aio_context_params; object_class_property_add(klass, "poll-max-ns", "int", iothread_get_poll_param, @@ -328,23 +314,15 @@ static void iothread_class_init(ObjectClass *klass, void *class_data) iothread_get_poll_param, iothread_set_poll_param, NULL, &poll_shrink_info); - object_class_property_add(klass, "aio-max-batch", "int", - iothread_get_aio_param, - iothread_set_aio_param, - NULL, &aio_max_batch_info); } static const TypeInfo iothread_info = { .name = TYPE_IOTHREAD, - .parent = TYPE_OBJECT, + .parent = TYPE_EVENT_LOOP_BASE, .class_init = iothread_class_init, .instance_size = sizeof(IOThread), .instance_init = iothread_instance_init, .instance_finalize = iothread_instance_finalize, - .interfaces = (InterfaceInfo[]) { - {TYPE_USER_CREATABLE}, - {} - }, }; static void iothread_register_types(void) @@ -381,7 +359,7 @@ static int query_one_iothread(Object *object, void *opaque) info->poll_max_ns = iothread->poll_max_ns; info->poll_grow = iothread->poll_grow; info->poll_shrink = iothread->poll_shrink; - info->aio_max_batch = iothread->aio_max_batch; + info->aio_max_batch = iothread->parent_obj.aio_max_batch; QAPI_LIST_APPEND(*tail, info); return 0; diff --git a/job-qmp.c b/job-qmp.c index 829a28aa70..d498fc89c0 100644 --- a/job-qmp.c +++ b/job-qmp.c @@ -29,119 +29,117 @@ #include "qapi/error.h" #include "trace/trace-root.h" -/* Get a job using its ID and acquire its AioContext */ -static Job *find_job(const char *id, AioContext **aio_context, Error **errp) +/* + * Get a job using its ID. Called with job_mutex held. + */ +static Job *find_job_locked(const char *id, Error **errp) { Job *job; - *aio_context = NULL; - - job = job_get(id); + job = job_get_locked(id); if (!job) { error_setg(errp, "Job not found"); return NULL; } - *aio_context = job->aio_context; - aio_context_acquire(*aio_context); - return job; } void qmp_job_cancel(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_cancel(job); - job_user_cancel(job, true, errp); - aio_context_release(aio_context); + job_user_cancel_locked(job, true, errp); } void qmp_job_pause(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_pause(job); - job_user_pause(job, errp); - aio_context_release(aio_context); + job_user_pause_locked(job, errp); } void qmp_job_resume(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_resume(job); - job_user_resume(job, errp); - aio_context_release(aio_context); + job_user_resume_locked(job, errp); } void qmp_job_complete(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_complete(job); - job_complete(job, errp); - aio_context_release(aio_context); + job_complete_locked(job, errp); } void qmp_job_finalize(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_finalize(job); - job_ref(job); - job_finalize(job, errp); + job_ref_locked(job); + job_finalize_locked(job, errp); - /* - * Job's context might have changed via job_finalize (and job_txn_apply - * automatically acquires the new one), so make sure we release the correct - * one. - */ - aio_context = job->aio_context; - job_unref(job); - aio_context_release(aio_context); + job_unref_locked(job); } void qmp_job_dismiss(const char *id, Error **errp) { - AioContext *aio_context; - Job *job = find_job(id, &aio_context, errp); + Job *job; + + JOB_LOCK_GUARD(); + job = find_job_locked(id, errp); if (!job) { return; } trace_qmp_job_dismiss(job); - job_dismiss(&job, errp); - aio_context_release(aio_context); + job_dismiss_locked(&job, errp); } -static JobInfo *job_query_single(Job *job, Error **errp) +/* Called with job_mutex held. */ +static JobInfo *job_query_single_locked(Job *job, Error **errp) { JobInfo *info; uint64_t progress_current; @@ -171,17 +169,15 @@ JobInfoList *qmp_query_jobs(Error **errp) JobInfoList *head = NULL, **tail = &head; Job *job; - for (job = job_next(NULL); job; job = job_next(job)) { + JOB_LOCK_GUARD(); + + for (job = job_next_locked(NULL); job; job = job_next_locked(job)) { JobInfo *value; - AioContext *aio_context; if (job_is_internal(job)) { continue; } - aio_context = job->aio_context; - aio_context_acquire(aio_context); - value = job_query_single(job, errp); - aio_context_release(aio_context); + value = job_query_single_locked(job, errp); if (!value) { qapi_free_JobInfoList(head); return NULL; diff --git a/job.c b/job.c index e7a5d28854..72d57f0934 100644 --- a/job.c +++ b/job.c @@ -32,6 +32,27 @@ #include "trace/trace-root.h" #include "qapi/qapi-events-job.h" +/* + * The job API is composed of two categories of functions. + * + * The first includes functions used by the monitor. The monitor is + * peculiar in that it accesses the job list with job_get, and + * therefore needs consistency across job_get and the actual operation + * (e.g. job_user_cancel). To achieve this consistency, the caller + * calls job_lock/job_unlock itself around the whole operation. + * + * + * The second includes functions used by the job drivers and sometimes + * by the core block layer. These delegate the locking to the callee instead. + */ + +/* + * job_mutex protects the jobs list, but also makes the + * struct job fields thread-safe. + */ +QemuMutex job_mutex; + +/* Protected by job_mutex */ static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); /* Job State Transition Table */ @@ -74,17 +95,12 @@ struct JobTxn { int refcnt; }; -/* Right now, this mutex is only needed to synchronize accesses to job->busy - * and job->sleep_timer, such as concurrent calls to job_do_yield and - * job_enter. */ -static QemuMutex job_mutex; - -static void job_lock(void) +void job_lock(void) { qemu_mutex_lock(&job_mutex); } -static void job_unlock(void) +void job_unlock(void) { qemu_mutex_unlock(&job_mutex); } @@ -102,19 +118,38 @@ JobTxn *job_txn_new(void) return txn; } -static void job_txn_ref(JobTxn *txn) +/* Called with job_mutex held. */ +static void job_txn_ref_locked(JobTxn *txn) { txn->refcnt++; } -void job_txn_unref(JobTxn *txn) +void job_txn_unref_locked(JobTxn *txn) { if (txn && --txn->refcnt == 0) { g_free(txn); } } -void job_txn_add_job(JobTxn *txn, Job *job) +void job_txn_unref(JobTxn *txn) +{ + JOB_LOCK_GUARD(); + job_txn_unref_locked(txn); +} + +/** + * @txn: The transaction (may be NULL) + * @job: Job to add to the transaction + * + * Add @job to the transaction. The @job must not already be in a transaction. + * The caller must call either job_txn_unref() or job_completed() to release + * the reference that is automatically grabbed here. + * + * If @txn is NULL, the function does nothing. + * + * Called with job_mutex held. + */ +static void job_txn_add_job_locked(JobTxn *txn, Job *job) { if (!txn) { return; @@ -124,21 +159,22 @@ void job_txn_add_job(JobTxn *txn, Job *job) job->txn = txn; QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); - job_txn_ref(txn); + job_txn_ref_locked(txn); } -static void job_txn_del_job(Job *job) +/* Called with job_mutex held. */ +static void job_txn_del_job_locked(Job *job) { if (job->txn) { QLIST_REMOVE(job, txn_list); - job_txn_unref(job->txn); + job_txn_unref_locked(job->txn); job->txn = NULL; } } -static int job_txn_apply(Job *job, int fn(Job *)) +/* Called with job_mutex held, but releases it temporarily. */ +static int job_txn_apply_locked(Job *job, int fn(Job *)) { - AioContext *inner_ctx; Job *other_job, *next; JobTxn *txn = job->txn; int rc = 0; @@ -149,25 +185,16 @@ static int job_txn_apply(Job *job, int fn(Job *)) * we need to release it here to avoid holding the lock twice - which would * break AIO_WAIT_WHILE from within fn. */ - job_ref(job); - aio_context_release(job->aio_context); + job_ref_locked(job); QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { - inner_ctx = other_job->aio_context; - aio_context_acquire(inner_ctx); rc = fn(other_job); - aio_context_release(inner_ctx); if (rc) { break; } } - /* - * Note that job->aio_context might have been changed by calling fn, so we - * can't use a local variable to cache it. - */ - aio_context_acquire(job->aio_context); - job_unref(job); + job_unref_locked(job); return rc; } @@ -176,7 +203,8 @@ bool job_is_internal(Job *job) return (job->id == NULL); } -static void job_state_transition(Job *job, JobStatus s1) +/* Called with job_mutex held. */ +static void job_state_transition_locked(Job *job, JobStatus s1) { JobStatus s0 = job->status; assert(s1 >= 0 && s1 < JOB_STATUS__MAX); @@ -191,7 +219,7 @@ static void job_state_transition(Job *job, JobStatus s1) } } -int job_apply_verb(Job *job, JobVerb verb, Error **errp) +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp) { JobStatus s0 = job->status; assert(verb >= 0 && verb < JOB_VERB__MAX); @@ -215,34 +243,60 @@ const char *job_type_str(const Job *job) return JobType_str(job_type(job)); } +bool job_is_cancelled_locked(Job *job) +{ + /* force_cancel may be true only if cancelled is true, too */ + assert(job->cancelled || !job->force_cancel); + return job->force_cancel; +} + bool job_is_cancelled(Job *job) +{ + JOB_LOCK_GUARD(); + return job_is_cancelled_locked(job); +} + +/* Called with job_mutex held. */ +static bool job_cancel_requested_locked(Job *job) { return job->cancelled; } +bool job_cancel_requested(Job *job) +{ + JOB_LOCK_GUARD(); + return job_cancel_requested_locked(job); +} + +bool job_is_ready_locked(Job *job) +{ + switch (job->status) { + case JOB_STATUS_UNDEFINED: + case JOB_STATUS_CREATED: + case JOB_STATUS_RUNNING: + case JOB_STATUS_PAUSED: + case JOB_STATUS_WAITING: + case JOB_STATUS_PENDING: + case JOB_STATUS_ABORTING: + case JOB_STATUS_CONCLUDED: + case JOB_STATUS_NULL: + return false; + case JOB_STATUS_READY: + case JOB_STATUS_STANDBY: + return true; + default: + g_assert_not_reached(); + } + return false; +} + bool job_is_ready(Job *job) { - switch (job->status) { - case JOB_STATUS_UNDEFINED: - case JOB_STATUS_CREATED: - case JOB_STATUS_RUNNING: - case JOB_STATUS_PAUSED: - case JOB_STATUS_WAITING: - case JOB_STATUS_PENDING: - case JOB_STATUS_ABORTING: - case JOB_STATUS_CONCLUDED: - case JOB_STATUS_NULL: - return false; - case JOB_STATUS_READY: - case JOB_STATUS_STANDBY: - return true; - default: - g_assert_not_reached(); - } - return false; + JOB_LOCK_GUARD(); + return job_is_ready_locked(job); } -bool job_is_completed(Job *job) +bool job_is_completed_locked(Job *job) { switch (job->status) { case JOB_STATUS_UNDEFINED: @@ -264,17 +318,24 @@ bool job_is_completed(Job *job) return false; } -static bool job_started(Job *job) +static bool job_is_completed(Job *job) +{ + JOB_LOCK_GUARD(); + return job_is_completed_locked(job); +} + +static bool job_started_locked(Job *job) { return job->co; } -static bool job_should_pause(Job *job) +/* Called with job_mutex held. */ +static bool job_should_pause_locked(Job *job) { return job->pause_count > 0; } -Job *job_next(Job *job) +Job *job_next_locked(Job *job) { if (!job) { return QLIST_FIRST(&jobs); @@ -282,7 +343,13 @@ Job *job_next(Job *job) return QLIST_NEXT(job, job_list); } -Job *job_get(const char *id) +Job *job_next(Job *job) +{ + JOB_LOCK_GUARD(); + return job_next_locked(job); +} + +Job *job_get_locked(const char *id) { Job *job; @@ -295,6 +362,18 @@ Job *job_get(const char *id) return NULL; } +void job_set_aio_context(Job *job, AioContext *ctx) +{ + /* protect against read in job_finish_sync_locked and job_start */ + GLOBAL_STATE_CODE(); + /* protect against read in job_do_yield_locked */ + JOB_LOCK_GUARD(); + /* ensure the job is quiescent while the AioContext is changed */ + assert(job->paused || job_is_completed_locked(job)); + job->aio_context = ctx; +} + +/* Called with job_mutex *not* held. */ static void job_sleep_timer_cb(void *opaque) { Job *job = opaque; @@ -308,6 +387,8 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, { Job *job; + JOB_LOCK_GUARD(); + if (job_id) { if (flags & JOB_INTERNAL) { error_setg(errp, "Cannot specify job ID for internal job"); @@ -317,7 +398,7 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, error_setg(errp, "Invalid job ID '%s'", job_id); return NULL; } - if (job_get(job_id)) { + if (job_get_locked(job_id)) { error_setg(errp, "Job ID '%s' already in use", job_id); return NULL; } @@ -345,8 +426,9 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, notifier_list_init(&job->on_finalize_completed); notifier_list_init(&job->on_pending); notifier_list_init(&job->on_ready); + notifier_list_init(&job->on_idle); - job_state_transition(job, JOB_STATUS_CREATED); + job_state_transition_locked(job, JOB_STATUS_CREATED); aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, QEMU_CLOCK_REALTIME, SCALE_NS, job_sleep_timer_cb, job); @@ -357,29 +439,37 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, * consolidating the job management logic */ if (!txn) { txn = job_txn_new(); - job_txn_add_job(txn, job); - job_txn_unref(txn); + job_txn_add_job_locked(txn, job); + job_txn_unref_locked(txn); } else { - job_txn_add_job(txn, job); + job_txn_add_job_locked(txn, job); } return job; } -void job_ref(Job *job) +void job_ref_locked(Job *job) { ++job->refcnt; } -void job_unref(Job *job) +void job_unref_locked(Job *job) { + GLOBAL_STATE_CODE(); + if (--job->refcnt == 0) { assert(job->status == JOB_STATUS_NULL); assert(!timer_pending(&job->sleep_timer)); assert(!job->txn); if (job->driver->free) { + AioContext *aio_context = job->aio_context; + job_unlock(); + /* FIXME: aiocontext lock is required because cb calls blk_unref */ + aio_context_acquire(aio_context); job->driver->free(job); + aio_context_release(aio_context); + job_lock(); } QLIST_REMOVE(job, job_list); @@ -406,48 +496,56 @@ void job_progress_increase_remaining(Job *job, uint64_t delta) progress_increase_remaining(&job->progress, delta); } -void job_event_cancelled(Job *job) +/** + * To be called when a cancelled job is finalised. + * Called with job_mutex held. + */ +static void job_event_cancelled_locked(Job *job) { notifier_list_notify(&job->on_finalize_cancelled, job); } -void job_event_completed(Job *job) +/** + * To be called when a successfully completed job is finalised. + * Called with job_mutex held. + */ +static void job_event_completed_locked(Job *job) { notifier_list_notify(&job->on_finalize_completed, job); } -static void job_event_pending(Job *job) +/* Called with job_mutex held. */ +static void job_event_pending_locked(Job *job) { notifier_list_notify(&job->on_pending, job); } -static void job_event_ready(Job *job) +/* Called with job_mutex held. */ +static void job_event_ready_locked(Job *job) { notifier_list_notify(&job->on_ready, job); } -static void job_event_idle(Job *job) +/* Called with job_mutex held. */ +static void job_event_idle_locked(Job *job) { notifier_list_notify(&job->on_idle, job); } -void job_enter_cond(Job *job, bool(*fn)(Job *job)) +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) { - if (!job_started(job)) { + if (!job_started_locked(job)) { return; } if (job->deferred_to_main_loop) { return; } - job_lock(); if (job->busy) { - job_unlock(); return; } if (fn && !fn(job)) { - job_unlock(); return; } @@ -455,12 +553,14 @@ void job_enter_cond(Job *job, bool(*fn)(Job *job)) timer_del(&job->sleep_timer); job->busy = true; job_unlock(); - aio_co_enter(job->aio_context, job->co); + aio_co_wake(job->co); + job_lock(); } void job_enter(Job *job) { - job_enter_cond(job, NULL); + JOB_LOCK_GUARD(); + job_enter_cond_locked(job, NULL); } /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. @@ -468,100 +568,137 @@ void job_enter(Job *job) * is allowed and cancels the timer. * * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be - * called explicitly. */ -static void coroutine_fn job_do_yield(Job *job, uint64_t ns) + * called explicitly. + * + * Called with job_mutex held, but releases it temporarily. + */ +static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) { - job_lock(); + AioContext *next_aio_context; + if (ns != -1) { timer_mod(&job->sleep_timer, ns); } job->busy = false; - job_event_idle(job); + job_event_idle_locked(job); job_unlock(); qemu_coroutine_yield(); + job_lock(); - /* Set by job_enter_cond() before re-entering the coroutine. */ + next_aio_context = job->aio_context; + /* + * Coroutine has resumed, but in the meanwhile the job AioContext + * might have changed via bdrv_try_change_aio_context(), so we need to move + * the coroutine too in the new aiocontext. + */ + while (qemu_get_current_aio_context() != next_aio_context) { + job_unlock(); + aio_co_reschedule_self(next_aio_context); + job_lock(); + next_aio_context = job->aio_context; + } + + /* Set by job_enter_cond_locked() before re-entering the coroutine. */ assert(job->busy); } -void coroutine_fn job_pause_point(Job *job) +/* Called with job_mutex held, but releases it temporarily. */ +static void coroutine_fn job_pause_point_locked(Job *job) { - assert(job && job_started(job)); + assert(job && job_started_locked(job)); - if (!job_should_pause(job)) { + if (!job_should_pause_locked(job)) { return; } - if (job_is_cancelled(job)) { + if (job_is_cancelled_locked(job)) { return; } if (job->driver->pause) { + job_unlock(); job->driver->pause(job); + job_lock(); } - if (job_should_pause(job) && !job_is_cancelled(job)) { + if (job_should_pause_locked(job) && !job_is_cancelled_locked(job)) { JobStatus status = job->status; - job_state_transition(job, status == JOB_STATUS_READY - ? JOB_STATUS_STANDBY - : JOB_STATUS_PAUSED); + job_state_transition_locked(job, status == JOB_STATUS_READY + ? JOB_STATUS_STANDBY + : JOB_STATUS_PAUSED); job->paused = true; - job_do_yield(job, -1); + job_do_yield_locked(job, -1); job->paused = false; - job_state_transition(job, status); + job_state_transition_locked(job, status); } if (job->driver->resume) { + job_unlock(); job->driver->resume(job); + job_lock(); } } -void job_yield(Job *job) +void coroutine_fn job_pause_point(Job *job) { + JOB_LOCK_GUARD(); + job_pause_point_locked(job); +} + +void coroutine_fn job_yield(Job *job) +{ + JOB_LOCK_GUARD(); assert(job->busy); /* Check cancellation *before* setting busy = false, too! */ - if (job_is_cancelled(job)) { + if (job_is_cancelled_locked(job)) { return; } - if (!job_should_pause(job)) { - job_do_yield(job, -1); + if (!job_should_pause_locked(job)) { + job_do_yield_locked(job, -1); } - job_pause_point(job); + job_pause_point_locked(job); } void coroutine_fn job_sleep_ns(Job *job, int64_t ns) { + JOB_LOCK_GUARD(); assert(job->busy); /* Check cancellation *before* setting busy = false, too! */ - if (job_is_cancelled(job)) { + if (job_is_cancelled_locked(job)) { return; } - if (!job_should_pause(job)) { - job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); + if (!job_should_pause_locked(job)) { + job_do_yield_locked(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); } - job_pause_point(job); + job_pause_point_locked(job); } -/* Assumes the block_job_mutex is held */ -static bool job_timer_not_pending(Job *job) +/* Assumes the job_mutex is held */ +static bool job_timer_not_pending_locked(Job *job) { return !timer_pending(&job->sleep_timer); } -void job_pause(Job *job) +void job_pause_locked(Job *job) { job->pause_count++; if (!job->paused) { - job_enter(job); + job_enter_cond_locked(job, NULL); } } -void job_resume(Job *job) +void job_pause(Job *job) +{ + JOB_LOCK_GUARD(); + job_pause_locked(job); +} + +void job_resume_locked(Job *job) { assert(job->pause_count > 0); job->pause_count--; @@ -570,12 +707,18 @@ void job_resume(Job *job) } /* kick only if no timer is pending */ - job_enter_cond(job, job_timer_not_pending); + job_enter_cond_locked(job, job_timer_not_pending_locked); } -void job_user_pause(Job *job, Error **errp) +void job_resume(Job *job) { - if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { + JOB_LOCK_GUARD(); + job_resume_locked(job); +} + +void job_user_pause_locked(Job *job, Error **errp) +{ + if (job_apply_verb_locked(job, JOB_VERB_PAUSE, errp)) { return; } if (job->user_paused) { @@ -583,87 +726,95 @@ void job_user_pause(Job *job, Error **errp) return; } job->user_paused = true; - job_pause(job); + job_pause_locked(job); } -bool job_user_paused(Job *job) +bool job_user_paused_locked(Job *job) { return job->user_paused; } -void job_user_resume(Job *job, Error **errp) +void job_user_resume_locked(Job *job, Error **errp) { assert(job); + GLOBAL_STATE_CODE(); if (!job->user_paused || job->pause_count <= 0) { error_setg(errp, "Can't resume a job that was not paused"); return; } - if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { + if (job_apply_verb_locked(job, JOB_VERB_RESUME, errp)) { return; } if (job->driver->user_resume) { + job_unlock(); job->driver->user_resume(job); + job_lock(); } job->user_paused = false; - job_resume(job); + job_resume_locked(job); } -static void job_do_dismiss(Job *job) +/* Called with job_mutex held, but releases it temporarily. */ +static void job_do_dismiss_locked(Job *job) { assert(job); job->busy = false; job->paused = false; job->deferred_to_main_loop = true; - job_txn_del_job(job); + job_txn_del_job_locked(job); - job_state_transition(job, JOB_STATUS_NULL); - job_unref(job); + job_state_transition_locked(job, JOB_STATUS_NULL); + job_unref_locked(job); } -void job_dismiss(Job **jobptr, Error **errp) +void job_dismiss_locked(Job **jobptr, Error **errp) { Job *job = *jobptr; /* similarly to _complete, this is QMP-interface only. */ assert(job->id); - if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { + if (job_apply_verb_locked(job, JOB_VERB_DISMISS, errp)) { return; } - job_do_dismiss(job); + job_do_dismiss_locked(job); *jobptr = NULL; } void job_early_fail(Job *job) { + JOB_LOCK_GUARD(); assert(job->status == JOB_STATUS_CREATED); - job_do_dismiss(job); + job_do_dismiss_locked(job); } -static void job_conclude(Job *job) +/* Called with job_mutex held. */ +static void job_conclude_locked(Job *job) { - job_state_transition(job, JOB_STATUS_CONCLUDED); - if (job->auto_dismiss || !job_started(job)) { - job_do_dismiss(job); + job_state_transition_locked(job, JOB_STATUS_CONCLUDED); + if (job->auto_dismiss || !job_started_locked(job)) { + job_do_dismiss_locked(job); } } -static void job_update_rc(Job *job) +/* Called with job_mutex held. */ +static void job_update_rc_locked(Job *job) { - if (!job->ret && job_is_cancelled(job)) { + if (!job->ret && job_is_cancelled_locked(job)) { job->ret = -ECANCELED; } if (job->ret) { if (!job->err) { error_setg(&job->err, "%s", strerror(-job->ret)); } - job_state_transition(job, JOB_STATUS_ABORTING); + job_state_transition_locked(job, JOB_STATUS_ABORTING); } } static void job_commit(Job *job) { assert(!job->ret); + GLOBAL_STATE_CODE(); if (job->driver->commit) { job->driver->commit(job); } @@ -672,6 +823,7 @@ static void job_commit(Job *job) static void job_abort(Job *job) { assert(job->ret); + GLOBAL_STATE_CODE(); if (job->driver->abort) { job->driver->abort(job); } @@ -679,19 +831,31 @@ static void job_abort(Job *job) static void job_clean(Job *job) { + GLOBAL_STATE_CODE(); if (job->driver->clean) { job->driver->clean(job); } } -static int job_finalize_single(Job *job) +/* + * Called with job_mutex held, but releases it temporarily. + * Takes AioContext lock internally to invoke a job->driver callback. + */ +static int job_finalize_single_locked(Job *job) { - assert(job_is_completed(job)); + int job_ret; + AioContext *ctx = job->aio_context; + + assert(job_is_completed_locked(job)); /* Ensure abort is called for late-transactional failures */ - job_update_rc(job); + job_update_rc_locked(job); - if (!job->ret) { + job_ret = job->ret; + job_unlock(); + aio_context_acquire(ctx); + + if (!job_ret) { job_commit(job); } else { job_abort(job); @@ -699,46 +863,77 @@ static int job_finalize_single(Job *job) job_clean(job); if (job->cb) { - job->cb(job->opaque, job->ret); + job->cb(job->opaque, job_ret); } + aio_context_release(ctx); + job_lock(); + /* Emit events only if we actually started */ - if (job_started(job)) { - if (job_is_cancelled(job)) { - job_event_cancelled(job); + if (job_started_locked(job)) { + if (job_is_cancelled_locked(job)) { + job_event_cancelled_locked(job); } else { - job_event_completed(job); + job_event_completed_locked(job); } } - job_txn_del_job(job); - job_conclude(job); + job_txn_del_job_locked(job); + job_conclude_locked(job); return 0; } -static void job_cancel_async(Job *job, bool force) +/* + * Called with job_mutex held, but releases it temporarily. + * Takes AioContext lock internally to invoke a job->driver callback. + */ +static void job_cancel_async_locked(Job *job, bool force) { + AioContext *ctx = job->aio_context; + GLOBAL_STATE_CODE(); if (job->driver->cancel) { - job->driver->cancel(job, force); + job_unlock(); + aio_context_acquire(ctx); + force = job->driver->cancel(job, force); + aio_context_release(ctx); + job_lock(); + } else { + /* No .cancel() means the job will behave as if force-cancelled */ + force = true; } + if (job->user_paused) { /* Do not call job_enter here, the caller will handle it. */ if (job->driver->user_resume) { + job_unlock(); job->driver->user_resume(job); + job_lock(); } job->user_paused = false; assert(job->pause_count > 0); job->pause_count--; } - job->cancelled = true; - /* To prevent 'force == false' overriding a previous 'force == true' */ - job->force_cancel |= force; + + /* + * Ignore soft cancel requests after the job is already done + * (We will still invoke job->driver->cancel() above, but if the + * job driver supports soft cancelling and the job is done, that + * should be a no-op, too. We still call it so it can override + * @force.) + */ + if (force || !job->deferred_to_main_loop) { + job->cancelled = true; + /* To prevent 'force == false' overriding a previous 'force == true' */ + job->force_cancel |= force; + } } -static void job_completed_txn_abort(Job *job) +/* + * Called with job_mutex held, but releases it temporarily. + * Takes AioContext lock internally to invoke a job->driver callback. + */ +static void job_completed_txn_abort_locked(Job *job) { - AioContext *outer_ctx = job->aio_context; - AioContext *ctx; JobTxn *txn = job->txn; Job *other_job; @@ -749,159 +944,164 @@ static void job_completed_txn_abort(Job *job) return; } txn->aborting = true; - job_txn_ref(txn); + job_txn_ref_locked(txn); - /* We can only hold the single job's AioContext lock while calling - * job_finalize_single() because the finalization callbacks can involve - * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */ - aio_context_release(outer_ctx); + job_ref_locked(job); /* Other jobs are effectively cancelled by us, set the status for * them; this job, however, may or may not be cancelled, depending * on the caller, so leave it. */ QLIST_FOREACH(other_job, &txn->jobs, txn_list) { if (other_job != job) { - ctx = other_job->aio_context; - aio_context_acquire(ctx); - job_cancel_async(other_job, false); - aio_context_release(ctx); + /* + * This is a transaction: If one job failed, no result will matter. + * Therefore, pass force=true to terminate all other jobs as quickly + * as possible. + */ + job_cancel_async_locked(other_job, true); } } while (!QLIST_EMPTY(&txn->jobs)) { other_job = QLIST_FIRST(&txn->jobs); - ctx = other_job->aio_context; - aio_context_acquire(ctx); - if (!job_is_completed(other_job)) { - assert(job_is_cancelled(other_job)); - job_finish_sync(other_job, NULL, NULL); + if (!job_is_completed_locked(other_job)) { + assert(job_cancel_requested_locked(other_job)); + job_finish_sync_locked(other_job, NULL, NULL); } - job_finalize_single(other_job); - aio_context_release(ctx); + job_finalize_single_locked(other_job); } - aio_context_acquire(outer_ctx); - - job_txn_unref(txn); + job_unref_locked(job); + job_txn_unref_locked(txn); } -static int job_prepare(Job *job) +/* Called with job_mutex held, but releases it temporarily */ +static int job_prepare_locked(Job *job) { + int ret; + AioContext *ctx = job->aio_context; + + GLOBAL_STATE_CODE(); + if (job->ret == 0 && job->driver->prepare) { - job->ret = job->driver->prepare(job); - job_update_rc(job); + job_unlock(); + aio_context_acquire(ctx); + ret = job->driver->prepare(job); + aio_context_release(ctx); + job_lock(); + job->ret = ret; + job_update_rc_locked(job); } + return job->ret; } -static int job_needs_finalize(Job *job) +/* Called with job_mutex held */ +static int job_needs_finalize_locked(Job *job) { return !job->auto_finalize; } -static void job_do_finalize(Job *job) +/* Called with job_mutex held */ +static void job_do_finalize_locked(Job *job) { int rc; assert(job && job->txn); /* prepare the transaction to complete */ - rc = job_txn_apply(job, job_prepare); + rc = job_txn_apply_locked(job, job_prepare_locked); if (rc) { - job_completed_txn_abort(job); + job_completed_txn_abort_locked(job); } else { - job_txn_apply(job, job_finalize_single); + job_txn_apply_locked(job, job_finalize_single_locked); } } -void job_finalize(Job *job, Error **errp) +void job_finalize_locked(Job *job, Error **errp) { assert(job && job->id); - if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { + if (job_apply_verb_locked(job, JOB_VERB_FINALIZE, errp)) { return; } - job_do_finalize(job); + job_do_finalize_locked(job); } -static int job_transition_to_pending(Job *job) +/* Called with job_mutex held. */ +static int job_transition_to_pending_locked(Job *job) { - job_state_transition(job, JOB_STATUS_PENDING); + job_state_transition_locked(job, JOB_STATUS_PENDING); if (!job->auto_finalize) { - job_event_pending(job); + job_event_pending_locked(job); } return 0; } void job_transition_to_ready(Job *job) { - job_state_transition(job, JOB_STATUS_READY); - job_event_ready(job); + JOB_LOCK_GUARD(); + job_state_transition_locked(job, JOB_STATUS_READY); + job_event_ready_locked(job); } -static void job_completed_txn_success(Job *job) +/* Called with job_mutex held. */ +static void job_completed_txn_success_locked(Job *job) { JobTxn *txn = job->txn; Job *other_job; - job_state_transition(job, JOB_STATUS_WAITING); + job_state_transition_locked(job, JOB_STATUS_WAITING); /* * Successful completion, see if there are other running jobs in this * txn. */ QLIST_FOREACH(other_job, &txn->jobs, txn_list) { - if (!job_is_completed(other_job)) { + if (!job_is_completed_locked(other_job)) { return; } assert(other_job->ret == 0); } - job_txn_apply(job, job_transition_to_pending); + job_txn_apply_locked(job, job_transition_to_pending_locked); /* If no jobs need manual finalization, automatically do so */ - if (job_txn_apply(job, job_needs_finalize) == 0) { - job_do_finalize(job); + if (job_txn_apply_locked(job, job_needs_finalize_locked) == 0) { + job_do_finalize_locked(job); } } -static void job_completed(Job *job) +/* Called with job_mutex held. */ +static void job_completed_locked(Job *job) { - assert(job && job->txn && !job_is_completed(job)); + assert(job && job->txn && !job_is_completed_locked(job)); - job_update_rc(job); + job_update_rc_locked(job); trace_job_completed(job, job->ret); if (job->ret) { - job_completed_txn_abort(job); + job_completed_txn_abort_locked(job); } else { - job_completed_txn_success(job); + job_completed_txn_success_locked(job); } } -/** Useful only as a type shim for aio_bh_schedule_oneshot. */ +/** + * Useful only as a type shim for aio_bh_schedule_oneshot. + * Called with job_mutex *not* held. + */ static void job_exit(void *opaque) { Job *job = (Job *)opaque; - AioContext *ctx; - - job_ref(job); - aio_context_acquire(job->aio_context); + JOB_LOCK_GUARD(); + job_ref_locked(job); /* This is a lie, we're not quiescent, but still doing the completion * callbacks. However, completion callbacks tend to involve operations that * drain block nodes, and if .drained_poll still returned true, we would * deadlock. */ job->busy = false; - job_event_idle(job); + job_event_idle_locked(job); - job_completed(job); - - /* - * Note that calling job_completed can move the job to a different - * aio_context, so we cannot cache from above. job_txn_apply takes care of - * acquiring the new lock, and we ref/unref to avoid job_completed freeing - * the job underneath us. - */ - ctx = job->aio_context; - job_unref(job); - aio_context_release(ctx); + job_completed_locked(job); + job_unref_locked(job); } /** @@ -911,118 +1111,169 @@ static void job_exit(void *opaque) static void coroutine_fn job_co_entry(void *opaque) { Job *job = opaque; + int ret; assert(job && job->driver && job->driver->run); - job_pause_point(job); - job->ret = job->driver->run(job, &job->err); - job->deferred_to_main_loop = true; - job->busy = true; + WITH_JOB_LOCK_GUARD() { + assert(job->aio_context == qemu_get_current_aio_context()); + job_pause_point_locked(job); + } + ret = job->driver->run(job, &job->err); + WITH_JOB_LOCK_GUARD() { + job->ret = ret; + job->deferred_to_main_loop = true; + job->busy = true; + } aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job); } void job_start(Job *job) { - assert(job && !job_started(job) && job->paused && - job->driver && job->driver->run); - job->co = qemu_coroutine_create(job_co_entry, job); - job->pause_count--; - job->busy = true; - job->paused = false; - job_state_transition(job, JOB_STATUS_RUNNING); + assert(qemu_in_main_thread()); + + WITH_JOB_LOCK_GUARD() { + assert(job && !job_started_locked(job) && job->paused && + job->driver && job->driver->run); + job->co = qemu_coroutine_create(job_co_entry, job); + job->pause_count--; + job->busy = true; + job->paused = false; + job_state_transition_locked(job, JOB_STATUS_RUNNING); + } aio_co_enter(job->aio_context, job->co); } -void job_cancel(Job *job, bool force) +void job_cancel_locked(Job *job, bool force) { if (job->status == JOB_STATUS_CONCLUDED) { - job_do_dismiss(job); + job_do_dismiss_locked(job); return; } - job_cancel_async(job, force); - if (!job_started(job)) { - job_completed(job); + job_cancel_async_locked(job, force); + if (!job_started_locked(job)) { + job_completed_locked(job); } else if (job->deferred_to_main_loop) { - job_completed_txn_abort(job); + /* + * job_cancel_async() ignores soft-cancel requests for jobs + * that are already done (i.e. deferred to the main loop). We + * have to check again whether the job is really cancelled. + * (job_cancel_requested() and job_is_cancelled() are equivalent + * here, because job_cancel_async() will make soft-cancel + * requests no-ops when deferred_to_main_loop is true. We + * choose to call job_is_cancelled() to show that we invoke + * job_completed_txn_abort() only for force-cancelled jobs.) + */ + if (job_is_cancelled_locked(job)) { + job_completed_txn_abort_locked(job); + } } else { - job_enter(job); + job_enter_cond_locked(job, NULL); } } -void job_user_cancel(Job *job, bool force, Error **errp) +void job_user_cancel_locked(Job *job, bool force, Error **errp) { - if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { + if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) { return; } - job_cancel(job, force); + job_cancel_locked(job, force); } -/* A wrapper around job_cancel() taking an Error ** parameter so it may be - * used with job_finish_sync() without the need for (rather nasty) function - * pointer casts there. */ -static void job_cancel_err(Job *job, Error **errp) +/* A wrapper around job_cancel_locked() taking an Error ** parameter so it may + * be used with job_finish_sync_locked() without the need for (rather nasty) + * function pointer casts there. + * + * Called with job_mutex held. + */ +static void job_cancel_err_locked(Job *job, Error **errp) { - job_cancel(job, false); + job_cancel_locked(job, false); } -int job_cancel_sync(Job *job) +/** + * Same as job_cancel_err(), but force-cancel. + * Called with job_mutex held. + */ +static void job_force_cancel_err_locked(Job *job, Error **errp) { - return job_finish_sync(job, &job_cancel_err, NULL); + job_cancel_locked(job, true); +} + +int job_cancel_sync_locked(Job *job, bool force) +{ + if (force) { + return job_finish_sync_locked(job, &job_force_cancel_err_locked, NULL); + } else { + return job_finish_sync_locked(job, &job_cancel_err_locked, NULL); + } +} + +int job_cancel_sync(Job *job, bool force) +{ + JOB_LOCK_GUARD(); + return job_cancel_sync_locked(job, force); } void job_cancel_sync_all(void) { Job *job; - AioContext *aio_context; + JOB_LOCK_GUARD(); - while ((job = job_next(NULL))) { - aio_context = job->aio_context; - aio_context_acquire(aio_context); - job_cancel_sync(job); - aio_context_release(aio_context); + while ((job = job_next_locked(NULL))) { + job_cancel_sync_locked(job, true); } } -int job_complete_sync(Job *job, Error **errp) +int job_complete_sync_locked(Job *job, Error **errp) { - return job_finish_sync(job, job_complete, errp); + return job_finish_sync_locked(job, job_complete_locked, errp); } -void job_complete(Job *job, Error **errp) +void job_complete_locked(Job *job, Error **errp) { /* Should not be reachable via external interface for internal jobs */ assert(job->id); - if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { + GLOBAL_STATE_CODE(); + if (job_apply_verb_locked(job, JOB_VERB_COMPLETE, errp)) { return; } - if (job_is_cancelled(job) || !job->driver->complete) { + if (job_cancel_requested_locked(job) || !job->driver->complete) { error_setg(errp, "The active block job '%s' cannot be completed", job->id); return; } + job_unlock(); job->driver->complete(job, errp); + job_lock(); } -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) +int job_finish_sync_locked(Job *job, + void (*finish)(Job *, Error **errp), + Error **errp) { Error *local_err = NULL; int ret; + GLOBAL_STATE_CODE(); - job_ref(job); + job_ref_locked(job); if (finish) { finish(job, &local_err); } if (local_err) { error_propagate(errp, local_err); - job_unref(job); + job_unref_locked(job); return -EBUSY; } - AIO_WAIT_WHILE(job->aio_context, - (job_enter(job), !job_is_completed(job))); + job_unlock(); + AIO_WAIT_WHILE_UNLOCKED(job->aio_context, + (job_enter(job), !job_is_completed(job))); + job_lock(); - ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; - job_unref(job); + ret = (job_is_cancelled_locked(job) && job->ret == 0) + ? -ECANCELED : job->ret; + job_unref_locked(job); return ret; } diff --git a/libdecnumber/decContext.c b/libdecnumber/decContext.c index 7d97a65ac5..1956edf0a7 100644 --- a/libdecnumber/decContext.c +++ b/libdecnumber/decContext.c @@ -53,12 +53,13 @@ static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */ const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */ /* ------------------------------------------------------------------ */ -/* Powers of ten (powers[n]==10**n, 0<=n<=9) */ +/* Powers of ten (powers[n]==10**n, 0<=n<=19) */ /* ------------------------------------------------------------------ */ -const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000, +const uLong DECPOWERS[20] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL, 1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL, - 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, }; + 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, + 10000000000000000000ULL,}; /* ------------------------------------------------------------------ */ /* decContextClearStatus -- clear bits in current status */ diff --git a/libdecnumber/decNumber.c b/libdecnumber/decNumber.c index 1ffe458ad8..31282adafd 100644 --- a/libdecnumber/decNumber.c +++ b/libdecnumber/decNumber.c @@ -167,6 +167,7 @@ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" #include "libdecnumber/dconfig.h" #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" @@ -263,6 +264,7 @@ static decNumber * decTrim(decNumber *, decContext *, Flag, Int *); static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int, Unit *, Int); static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int); +static bool mulUInt128ByPowOf10(uLong *, uLong *, uInt); #if !DECSUBSET /* decFinish == decFinalize when no subset arithmetic needed */ @@ -462,6 +464,41 @@ decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin) return dn; } /* decNumberFromUInt64 */ +decNumber *decNumberFromInt128(decNumber *dn, uint64_t lo, int64_t hi) +{ + uint64_t unsig_hi = hi; + if (hi < 0) { + if (lo == 0) { + unsig_hi = -unsig_hi; + } else { + unsig_hi = ~unsig_hi; + lo = -lo; + } + } + + decNumberFromUInt128(dn, lo, unsig_hi); + if (hi < 0) { + dn->bits = DECNEG; /* sign needed */ + } + return dn; +} /* decNumberFromInt128 */ + +decNumber *decNumberFromUInt128(decNumber *dn, uint64_t lo, uint64_t hi) +{ + uint64_t rem; + Unit *up; /* work pointer */ + decNumberZero(dn); /* clean */ + if (lo == 0 && hi == 0) { + return dn; /* [or decGetDigits bad call] */ + } + for (up = dn->lsu; hi > 0 || lo > 0; up++) { + rem = divu128(&lo, &hi, DECDPUNMAX + 1); + *up = (Unit)rem; + } + dn->digits = decGetDigits(dn->lsu, up - dn->lsu); + return dn; +} /* decNumberFromUInt128 */ + /* ------------------------------------------------------------------ */ /* to-int64 -- conversion to int64 */ /* */ @@ -506,6 +543,68 @@ Invalid: return 0; } /* decNumberIntegralToInt64 */ +/* ------------------------------------------------------------------ */ +/* decNumberIntegralToInt128 -- conversion to int128 */ +/* */ +/* dn is the decNumber to convert. dn is assumed to have been */ +/* rounded to a floating point integer value. */ +/* set is the context for reporting errors */ +/* returns the converted decNumber via plow and phigh */ +/* */ +/* Invalid is set if the decNumber is a NaN, Infinite or is out of */ +/* range for a signed 128 bit integer. */ +/* ------------------------------------------------------------------ */ + +void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh) +{ + int d; /* work */ + const Unit *up; /* .. */ + uint64_t lo = 0, hi = 0; + + if (decNumberIsSpecial(dn) || (dn->exponent < 0) || + (dn->digits + dn->exponent > 39)) { + goto Invalid; + } + + up = dn->lsu; /* -> lsu */ + + for (d = (dn->digits - 1) / DECDPUN; d >= 0; d--) { + if (mulu128(&lo, &hi, DECDPUNMAX + 1)) { + /* overflow */ + goto Invalid; + } + if (uadd64_overflow(lo, up[d], &lo)) { + if (uadd64_overflow(hi, 1, &hi)) { + /* overflow */ + goto Invalid; + } + } + } + + if (mulUInt128ByPowOf10(&lo, &hi, dn->exponent)) { + /* overflow */ + goto Invalid; + } + + if (decNumberIsNegative(dn)) { + if (lo == 0) { + *phigh = -hi; + *plow = 0; + } else { + *phigh = ~hi; + *plow = -lo; + } + } else { + *plow = lo; + *phigh = hi; + } + + return; + +Invalid: + decContextSetStatus(set, DEC_Invalid_operation); +} /* decNumberIntegralToInt128 */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ @@ -7849,6 +7948,38 @@ static Int decGetDigits(Unit *uar, Int len) { return digits; } /* decGetDigits */ +/* ------------------------------------------------------------------ */ +/* mulUInt128ByPowOf10 -- multiply a 128-bit unsigned integer by a */ +/* power of 10. */ +/* */ +/* The 128-bit factor composed of plow and phigh is multiplied */ +/* by 10^exp. */ +/* */ +/* plow pointer to the low 64 bits of the first factor */ +/* phigh pointer to the high 64 bits of the first factor */ +/* exp the exponent of the power of 10 of the second factor */ +/* */ +/* If the result fits in 128 bits, returns false and the */ +/* multiplication result through plow and phigh. */ +/* Otherwise, returns true. */ +/* ------------------------------------------------------------------ */ +static bool mulUInt128ByPowOf10(uLong *plow, uLong *phigh, uInt pow10) +{ + while (pow10 >= ARRAY_SIZE(powers)) { + if (mulu128(plow, phigh, powers[ARRAY_SIZE(powers) - 1])) { + /* Overflow */ + return true; + } + pow10 -= ARRAY_SIZE(powers) - 1; + } + + if (pow10 > 0) { + return mulu128(plow, phigh, powers[pow10]); + } else { + return false; + } +} + #if DECTRACE | DECCHECK /* ------------------------------------------------------------------ */ /* decNumberShow -- display a number [debug aid] */ diff --git a/libdecnumber/dpd/decimal64.c b/libdecnumber/dpd/decimal64.c index 4816176410..290dbe8177 100644 --- a/libdecnumber/dpd/decimal64.c +++ b/libdecnumber/dpd/decimal64.c @@ -617,7 +617,6 @@ static const uInt multies[]={131073, 26215, 5243, 1049, 210}; #endif void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { Int cut; /* work */ - Int n; /* output bunch counter */ Int digits=dn->digits; /* digit countdown */ uInt dpd; /* densely packed decimal value */ uInt bin; /* binary value 0-999 */ @@ -676,7 +675,7 @@ void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { bin=0; /* [keep compiler quiet] */ #endif - for(n=0; digits>0; n++) { /* each output bunch */ + while (digits > 0) { /* each output bunch */ #if DECDPUN==3 /* fast path, 3-at-a-time */ bin=*inu; /* 3 digits ready for convert */ digits-=3; /* [may go negative] */ diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 3d2ce9912d..4bf2d7246e 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -75,9 +75,11 @@ struct kvm_regs { /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 -#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) +#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \ + KVM_ARM_DEVICE_TYPE_SHIFT) #define KVM_ARM_DEVICE_ID_SHIFT 16 -#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) +#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \ + KVM_ARM_DEVICE_ID_SHIFT) /* Supported device IDs */ #define KVM_ARM_DEVICE_VGIC_V2 0 @@ -139,8 +141,10 @@ struct kvm_guest_debug_arch { __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS]; }; +#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0) struct kvm_debug_exit_arch { __u32 hsr; + __u32 hsr_high; /* ESR_EL2[61:32] */ __u64 far; /* used for watchpoints */ }; @@ -281,6 +285,11 @@ struct kvm_arm_copy_mte_tags { #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2 + /* SVE registers */ #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) @@ -327,6 +336,31 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM64_SVE_VLS_WORDS \ ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1) +/* Bitmap feature firmware registers */ +#define KVM_REG_ARM_FW_FEAT_BMAP (0x0016 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_FEAT_BMAP_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW_FEAT_BMAP | \ + ((r) & 0xffff)) + +#define KVM_REG_ARM_STD_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(0) + +enum { + KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0, +}; + +#define KVM_REG_ARM_STD_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(1) + +enum { + KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0, +}; + +#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2) + +enum { + KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, + KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, +}; + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 @@ -362,6 +396,7 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 @@ -411,6 +446,16 @@ struct kvm_arm_copy_mte_tags { #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED +/* arm64-specific kvm_run::system_event flags */ +/* + * Reset caused by a PSCI v1.1 SYSTEM_RESET2 call. + * Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET. + */ +#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0) + +/* run->fail_entry.hardware_entry_failure_reason codes. */ +#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0) + #endif #endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-arm64/unistd.h b/linux-headers/asm-arm64/unistd.h index f83a70e07d..ce2ee8f1e3 100644 --- a/linux-headers/asm-arm64/unistd.h +++ b/linux-headers/asm-arm64/unistd.h @@ -20,5 +20,6 @@ #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_MEMFD_SECRET #include diff --git a/linux-headers/asm-generic/mman-common.h b/linux-headers/asm-generic/mman-common.h index 1567a3294c..6c1aa92a92 100644 --- a/linux-headers/asm-generic/mman-common.h +++ b/linux-headers/asm-generic/mman-common.h @@ -75,6 +75,8 @@ #define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ #define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index f211961ce1..45fa180cc5 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog) /* kernel/ptrace.c */ #define __NR_ptrace 117 -__SYSCALL(__NR_ptrace, sys_ptrace) +__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace) /* kernel/sched/core.c */ #define __NR_sched_setparam 118 @@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise) #define __NR_remap_file_pages 234 __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) #define __NR_mbind 235 -__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind) +__SYSCALL(__NR_mbind, sys_mbind) #define __NR_get_mempolicy 236 -__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy) +__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) #define __NR_set_mempolicy 237 -__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy) +__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) #define __NR_migrate_pages 238 -__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages) +__SYSCALL(__NR_migrate_pages, sys_migrate_pages) #define __NR_move_pages 239 -__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages) +__SYSCALL(__NR_move_pages, sys_move_pages) #endif #define __NR_rt_tgsigqueueinfo 240 @@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_kexec_file_load 294 __SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) /* 295 through 402 are unassigned to sync up with generic numbers, don't use */ -#if __BITS_PER_LONG == 32 +#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32 #define __NR_clock_gettime64 403 __SYSCALL(__NR_clock_gettime64, sys_clock_gettime) #define __NR_clock_settime64 404 @@ -873,8 +873,21 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#ifdef __ARCH_WANT_MEMFD_SECRET +#define __NR_memfd_secret 447 +__SYSCALL(__NR_memfd_secret, sys_memfd_secret) +#endif +#define __NR_process_mrelease 448 +__SYSCALL(__NR_process_mrelease, sys_process_mrelease) + +#define __NR_futex_waitv 449 +__SYSCALL(__NR_futex_waitv, sys_futex_waitv) + +#define __NR_set_mempolicy_home_node 450 +__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) + #undef __NR_syscalls -#define __NR_syscalls 447 +#define __NR_syscalls 451 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-mips/mman.h b/linux-headers/asm-mips/mman.h index 40b210c65a..1be428663c 100644 --- a/linux-headers/asm-mips/mman.h +++ b/linux-headers/asm-mips/mman.h @@ -101,6 +101,8 @@ #define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ #define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index 09cd297698..1f14a6fad3 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -376,5 +376,8 @@ #define __NR_landlock_create_ruleset (__NR_Linux + 444) #define __NR_landlock_add_rule (__NR_Linux + 445) #define __NR_landlock_restrict_self (__NR_Linux + 446) +#define __NR_process_mrelease (__NR_Linux + 448) +#define __NR_futex_waitv (__NR_Linux + 449) +#define __NR_set_mempolicy_home_node (__NR_Linux + 450) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index 780e0cead6..e5a8ebec78 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -352,5 +352,8 @@ #define __NR_landlock_create_ruleset (__NR_Linux + 444) #define __NR_landlock_add_rule (__NR_Linux + 445) #define __NR_landlock_restrict_self (__NR_Linux + 446) +#define __NR_process_mrelease (__NR_Linux + 448) +#define __NR_futex_waitv (__NR_Linux + 449) +#define __NR_set_mempolicy_home_node (__NR_Linux + 450) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index 06a2b3b55e..871d57168f 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -422,5 +422,8 @@ #define __NR_landlock_create_ruleset (__NR_Linux + 444) #define __NR_landlock_add_rule (__NR_Linux + 445) #define __NR_landlock_restrict_self (__NR_Linux + 446) +#define __NR_process_mrelease (__NR_Linux + 448) +#define __NR_futex_waitv (__NR_Linux + 449) +#define __NR_set_mempolicy_home_node (__NR_Linux + 450) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index cd5a8a41b2..585c7fefbc 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -429,6 +429,9 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 8458effa8d..350f7ec0ac 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -401,6 +401,9 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/bitsperlong.h b/linux-headers/asm-riscv/bitsperlong.h new file mode 100644 index 0000000000..cc5c45a9ce --- /dev/null +++ b/linux-headers/asm-riscv/bitsperlong.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_BITSPERLONG_H +#define _ASM_RISCV_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include + +#endif /* _ASM_RISCV_BITSPERLONG_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h new file mode 100644 index 0000000000..7351417afd --- /dev/null +++ b/linux-headers/asm-riscv/kvm.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#ifndef __LINUX_KVM_RISCV_H +#define __LINUX_KVM_RISCV_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +#define KVM_INTERRUPT_SET -1U +#define KVM_INTERRUPT_UNSET -2U + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +/* KVM Debug exit structure */ +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { +}; + +/* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_config { + unsigned long isa; +}; + +/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_core { + struct user_regs_struct regs; + unsigned long mode; +}; + +/* Possible privilege modes for kvm_riscv_core */ +#define KVM_RISCV_MODE_S 1 +#define KVM_RISCV_MODE_U 0 + +/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_csr { + unsigned long sstatus; + unsigned long sie; + unsigned long stvec; + unsigned long sscratch; + unsigned long sepc; + unsigned long scause; + unsigned long stval; + unsigned long sip; + unsigned long satp; + unsigned long scounteren; +}; + +/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_timer { + __u64 frequency; + __u64 time; + __u64 compare; + __u64 state; +}; + +/* + * ISA extension IDs specific to KVM. This is not the same as the host ISA + * extension IDs as that is internal to the host and should not be exposed + * to the guest. This should always be contiguous to keep the mapping simple + * in KVM implementation. + */ +enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_A = 0, + KVM_RISCV_ISA_EXT_C, + KVM_RISCV_ISA_EXT_D, + KVM_RISCV_ISA_EXT_F, + KVM_RISCV_ISA_EXT_H, + KVM_RISCV_ISA_EXT_I, + KVM_RISCV_ISA_EXT_M, + KVM_RISCV_ISA_EXT_SVPBMT, + KVM_RISCV_ISA_EXT_SSTC, + KVM_RISCV_ISA_EXT_MAX, +}; + +/* Possible states for kvm_riscv_timer */ +#define KVM_RISCV_TIMER_STATE_OFF 0 +#define KVM_RISCV_TIMER_STATE_ON 1 + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000 +#define KVM_REG_RISCV_TYPE_SHIFT 24 + +/* Config registers are mapped as type 1 */ +#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CONFIG_REG(name) \ + (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long)) + +/* Core registers are mapped as type 2 */ +#define KVM_REG_RISCV_CORE (0x02 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CORE_REG(name) \ + (offsetof(struct kvm_riscv_core, name) / sizeof(unsigned long)) + +/* Control and status registers are mapped as type 3 */ +#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CSR_REG(name) \ + (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) + +/* Timer registers are mapped as type 4 */ +#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_TIMER_REG(name) \ + (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64)) + +/* F extension registers are mapped as type 5 */ +#define KVM_REG_RISCV_FP_F (0x05 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_F_REG(name) \ + (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32)) + +/* D extension registers are mapped as type 6 */ +#define KVM_REG_RISCV_FP_D (0x06 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_D_REG(name) \ + (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64)) + +/* ISA Extension registers are mapped as type 7 */ +#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) + +#endif + +#endif /* __LINUX_KVM_RISCV_H */ diff --git a/linux-headers/asm-riscv/mman.h b/linux-headers/asm-riscv/mman.h new file mode 100644 index 0000000000..8eebf89f5a --- /dev/null +++ b/linux-headers/asm-riscv/mman.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-riscv/unistd.h b/linux-headers/asm-riscv/unistd.h new file mode 100644 index 0000000000..73d7cdd2ec --- /dev/null +++ b/linux-headers/asm-riscv/unistd.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2018 David Abdurachmanov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif /* __LP64__ */ + +#define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_MEMFD_SECRET + +#include + +/* + * Allows the instruction cache to be flushed from userspace. Despite RISC-V + * having a direct 'fence.i' instruction available to userspace (which we + * can't trap!), that's not actually viable when running on Linux because the + * kernel might schedule a process on another hart. There is no way for + * userspace to handle this without invoking the kernel (as it doesn't know the + * thread->hart mappings), so we've defined a RISC-V specific system call to + * flush the instruction cache. + * + * __NR_riscv_flush_icache is defined to flush the instruction cache over an + * address range, with the flush applying to either all threads or just the + * caller. We don't currently do anything with the address range, that's just + * in there for forwards compatibility. + */ +#ifndef __NR_riscv_flush_icache +#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) +#endif +__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index f053b8304a..e2afd95420 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -74,6 +74,7 @@ struct kvm_s390_io_adapter_req { #define KVM_S390_VM_CRYPTO 2 #define KVM_S390_VM_CPU_MODEL 3 #define KVM_S390_VM_MIGRATION 4 +#define KVM_S390_VM_CPU_TOPOLOGY 5 /* kvm attributes for mem_ctrl */ #define KVM_S390_VM_MEM_ENABLE_CMMA 0 diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 0c3cd299e4..8e644d65f5 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -419,5 +419,8 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index 8dfc08b5e6..51da542fec 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -367,5 +367,8 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index a6c327f8ad..46de10a809 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -198,13 +198,13 @@ struct kvm_msrs { __u32 nmsrs; /* number of msrs in entries */ __u32 pad; - struct kvm_msr_entry entries[0]; + struct kvm_msr_entry entries[]; }; /* for KVM_GET_MSR_INDEX_LIST */ struct kvm_msr_list { __u32 nmsrs; /* number of msrs in entries */ - __u32 indices[0]; + __u32 indices[]; }; /* Maximum size of any access bitmap in bytes */ @@ -241,7 +241,7 @@ struct kvm_cpuid_entry { struct kvm_cpuid { __u32 nent; __u32 padding; - struct kvm_cpuid_entry entries[0]; + struct kvm_cpuid_entry entries[]; }; struct kvm_cpuid_entry2 { @@ -263,7 +263,7 @@ struct kvm_cpuid_entry2 { struct kvm_cpuid2 { __u32 nent; __u32 padding; - struct kvm_cpuid_entry2 entries[0]; + struct kvm_cpuid_entry2 entries[]; }; /* for KVM_GET_PIT and KVM_SET_PIT */ @@ -295,6 +295,7 @@ struct kvm_debug_exit_arch { #define KVM_GUESTDBG_USE_HW_BP 0x00020000 #define KVM_GUESTDBG_INJECT_DB 0x00040000 #define KVM_GUESTDBG_INJECT_BP 0x00080000 +#define KVM_GUESTDBG_BLOCKIRQ 0x00100000 /* for KVM_SET_GUEST_DEBUG */ struct kvm_guest_debug_arch { @@ -305,7 +306,8 @@ struct kvm_pit_state { struct kvm_pit_channel_state channels[3]; }; -#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 +#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 +#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002 struct kvm_pit_state2 { struct kvm_pit_channel_state channels[3]; @@ -324,6 +326,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 #define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 +#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -358,7 +361,10 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u8 reserved[27]; + struct { + __u8 pending; + } triple_fault; + __u8 reserved[26]; __u8 exception_has_payload; __u64 exception_payload; }; @@ -372,9 +378,23 @@ struct kvm_debugregs { __u64 reserved[9]; }; -/* for KVM_CAP_XSAVE */ +/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */ struct kvm_xsave { + /* + * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes + * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * respectively, when invoked on the vm file descriptor. + * + * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * will always be at least 4096. Currently, it is only greater + * than 4096 if a dynamic feature has been enabled with + * ``arch_prctl()``, but this may change in the future. + * + * The offsets of the state save areas in struct kvm_xsave follow + * the contents of CPUID leaf 0xD on the host. + */ __u32 region[1024]; + __u32 extra[]; }; #define KVM_MAX_XCRS 16 @@ -413,11 +433,13 @@ struct kvm_sync_regs { struct kvm_vcpu_events events; }; -#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) -#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) -#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) -#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) +#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) +#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) +#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5) +#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6) #define KVM_STATE_NESTED_FORMAT_VMX 0 #define KVM_STATE_NESTED_FORMAT_SVM 1 @@ -437,6 +459,9 @@ struct kvm_sync_regs { #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 +/* attributes for system fd (group 0) */ +#define KVM_X86_XCOMP_GUEST_SUPP 0 + struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; @@ -497,10 +522,14 @@ struct kvm_pmu_event_filter { __u32 fixed_counter_bitmap; __u32 flags; __u32 pad[4]; - __u64 events[0]; + __u64 events[]; }; #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ +#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ +#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ + #endif /* _ASM_X86_KVM_H */ diff --git a/linux-headers/asm-x86/mman.h b/linux-headers/asm-x86/mman.h index d4a8d0424b..775dbd3aff 100644 --- a/linux-headers/asm-x86/mman.h +++ b/linux-headers/asm-x86/mman.h @@ -5,20 +5,6 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS -/* - * Take the 4 protection key bits out of the vma->vm_flags - * value and turn them in to the bits that we can put in - * to a pte. - * - * Only override these if Protection Keys are available - * (which is only on 64-bit). - */ -#define arch_vm_get_page_prot(vm_flags) __pgprot( \ - ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \ - ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \ - ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ - ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) - #define arch_calc_vm_prot_bits(prot, key) ( \ ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \ ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index 66e96c0c68..87e1e977af 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -437,6 +437,10 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_memfd_secret 447 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index b8ff6f14ee..147a78d623 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -359,6 +359,10 @@ #define __NR_landlock_create_ruleset 444 #define __NR_landlock_add_rule 445 #define __NR_landlock_restrict_self 446 +#define __NR_memfd_secret 447 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 06a1097c15..27098db7fb 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -312,6 +312,10 @@ #define __NR_landlock_create_ruleset (__X32_SYSCALL_BIT + 444) #define __NR_landlock_add_rule (__X32_SYSCALL_BIT + 445) #define __NR_landlock_restrict_self (__X32_SYSCALL_BIT + 446) +#define __NR_memfd_secret (__X32_SYSCALL_BIT + 447) +#define __NR_process_mrelease (__X32_SYSCALL_BIT + 448) +#define __NR_futex_waitv (__X32_SYSCALL_BIT + 449) +#define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index bcaf66cc4d..ebdafa576d 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -269,6 +269,9 @@ struct kvm_xen_exit { #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 +#define KVM_EXIT_RISCV_SBI 35 +#define KVM_EXIT_RISCV_CSR 36 +#define KVM_EXIT_NOTIFY 37 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -397,13 +400,23 @@ struct kvm_run { * "ndata" is correct, that new fields are enumerated in "flags", * and that each flag enumerates fields that are 64-bit aligned * and sized (so that ndata+internal.data[] is valid/accurate). + * + * Space beyond the defined fields may be used to store arbitrary + * debug information relating to the emulation failure. It is + * accounted for in "ndata" but the format is unspecified and is + * not represented in "flags". Any such information is *not* ABI! */ struct { __u32 suberror; __u32 ndata; __u64 flags; - __u8 insn_size; - __u8 insn_bytes[15]; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; + /* Arbitrary debug data may follow. */ } emulation_failure; /* KVM_EXIT_OSI */ struct { @@ -433,8 +446,15 @@ struct kvm_run { #define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 +#define KVM_SYSTEM_EVENT_WAKEUP 4 +#define KVM_SYSTEM_EVENT_SUSPEND 5 +#define KVM_SYSTEM_EVENT_SEV_TERM 6 __u32 type; - __u64 flags; + __u32 ndata; + union { + __u64 flags; + __u64 data[16]; + }; } system_event; /* KVM_EXIT_S390_STSI */ struct { @@ -469,6 +489,25 @@ struct kvm_run { } msr; /* KVM_EXIT_XEN */ struct kvm_xen_exit xen; + /* KVM_EXIT_RISCV_SBI */ + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; + /* KVM_EXIT_RISCV_CSR */ + struct { + unsigned long csr_num; + unsigned long new_value; + unsigned long write_mask; + unsigned long ret_value; + } riscv_csr; + /* KVM_EXIT_NOTIFY */ + struct { +#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) + __u32 flags; + } notify; /* Fix the size of the union. */ char padding[256]; }; @@ -515,7 +554,7 @@ struct kvm_coalesced_mmio { struct kvm_coalesced_mmio_ring { __u32 first, last; - struct kvm_coalesced_mmio coalesced_mmio[0]; + struct kvm_coalesced_mmio coalesced_mmio[]; }; #define KVM_COALESCED_MMIO_MAX \ @@ -544,9 +583,12 @@ struct kvm_s390_mem_op { __u32 op; /* type of operation */ __u64 buf; /* buffer in userspace */ union { - __u8 ar; /* the access register number */ + struct { + __u8 ar; /* the access register number */ + __u8 key; /* access key, ignored if flag unset */ + }; __u32 sida_offset; /* offset into the sida */ - __u8 reserved[32]; /* should be set to 0 */ + __u8 reserved[32]; /* ignored */ }; }; /* types for kvm_s390_mem_op->op */ @@ -554,9 +596,12 @@ struct kvm_s390_mem_op { #define KVM_S390_MEMOP_LOGICAL_WRITE 1 #define KVM_S390_MEMOP_SIDA_READ 2 #define KVM_S390_MEMOP_SIDA_WRITE 3 +#define KVM_S390_MEMOP_ABSOLUTE_READ 4 +#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) +#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) /* for KVM_INTERRUPT */ struct kvm_interrupt { @@ -588,7 +633,7 @@ struct kvm_clear_dirty_log { /* for KVM_SET_SIGNAL_MASK */ struct kvm_signal_mask { __u32 len; - __u8 sigset[0]; + __u8 sigset[]; }; /* for KVM_TPR_ACCESS_REPORTING */ @@ -616,6 +661,7 @@ struct kvm_vapic_addr { #define KVM_MP_STATE_OPERATING 7 #define KVM_MP_STATE_LOAD 8 #define KVM_MP_STATE_AP_RESET_HOLD 9 +#define KVM_MP_STATE_SUSPENDED 10 struct kvm_mp_state { __u32 mp_state; @@ -1112,6 +1158,23 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 +#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 +#define KVM_CAP_VM_GPA_BITS 207 +#define KVM_CAP_XSAVE2 208 +#define KVM_CAP_SYS_ATTRIBUTES 209 +#define KVM_CAP_PPC_AIL_MODE_3 210 +#define KVM_CAP_S390_MEM_OP_EXTENSION 211 +#define KVM_CAP_PMU_CAPABILITY 212 +#define KVM_CAP_DISABLE_QUIRKS2 213 +#define KVM_CAP_VM_TSC_CONTROL 214 +#define KVM_CAP_SYSTEM_EVENT_DATA 215 +#define KVM_CAP_ARM_SYSTEM_SUSPEND 216 +#define KVM_CAP_S390_PROTECTED_DUMP 217 +#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218 +#define KVM_CAP_X86_NOTIFY_VMEXIT 219 +#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 +#define KVM_CAP_S390_ZPCI_OP 221 +#define KVM_CAP_S390_CPU_TOPOLOGY 222 #ifdef KVM_CAP_IRQ_ROUTING @@ -1143,11 +1206,20 @@ struct kvm_irq_routing_hv_sint { __u32 sint; }; +struct kvm_irq_routing_xen_evtchn { + __u32 port; + __u32 vcpu; + __u32 priority; +}; + +#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1)) + /* gsi routing entry types */ #define KVM_IRQ_ROUTING_IRQCHIP 1 #define KVM_IRQ_ROUTING_MSI 2 #define KVM_IRQ_ROUTING_S390_ADAPTER 3 #define KVM_IRQ_ROUTING_HV_SINT 4 +#define KVM_IRQ_ROUTING_XEN_EVTCHN 5 struct kvm_irq_routing_entry { __u32 gsi; @@ -1159,6 +1231,7 @@ struct kvm_irq_routing_entry { struct kvm_irq_routing_msi msi; struct kvm_irq_routing_s390_adapter adapter; struct kvm_irq_routing_hv_sint hv_sint; + struct kvm_irq_routing_xen_evtchn xen_evtchn; __u32 pad[8]; } u; }; @@ -1166,7 +1239,7 @@ struct kvm_irq_routing_entry { struct kvm_irq_routing { __u32 nr; __u32 flags; - struct kvm_irq_routing_entry entries[0]; + struct kvm_irq_routing_entry entries[]; }; #endif @@ -1189,6 +1262,8 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) +#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) +#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) struct kvm_xen_hvm_config { __u32 flags; @@ -1223,11 +1298,16 @@ struct kvm_irqfd { /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ #define KVM_CLOCK_TSC_STABLE 2 +#define KVM_CLOCK_REALTIME (1 << 2) +#define KVM_CLOCK_HOST_TSC (1 << 3) struct kvm_clock_data { __u64 clock; __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; /* For KVM_CAP_SW_TLB */ @@ -1279,7 +1359,7 @@ struct kvm_dirty_tlb { struct kvm_reg_list { __u64 n; /* number of regs */ - __u64 reg[0]; + __u64 reg[]; }; struct kvm_one_reg { @@ -1422,7 +1502,8 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) /* Available with KVM_CAP_PPC_GET_PVINFO */ #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) -/* Available with KVM_CAP_TSC_CONTROL */ +/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with +* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) /* Available with KVM_CAP_PCI_2_3 */ @@ -1597,6 +1678,55 @@ struct kvm_s390_pv_unp { __u64 tweak; }; +enum pv_cmd_dmp_id { + KVM_PV_DUMP_INIT, + KVM_PV_DUMP_CONFIG_STOR_STATE, + KVM_PV_DUMP_COMPLETE, + KVM_PV_DUMP_CPU, +}; + +struct kvm_s390_pv_dmp { + __u64 subcmd; + __u64 buff_addr; + __u64 buff_len; + __u64 gaddr; /* For dump storage state */ + __u64 reserved[4]; +}; + +enum pv_cmd_info_id { + KVM_PV_INFO_VM, + KVM_PV_INFO_DUMP, +}; + +struct kvm_s390_pv_info_dump { + __u64 dump_cpu_buffer_len; + __u64 dump_config_mem_buffer_per_1m; + __u64 dump_config_finalize_len; +}; + +struct kvm_s390_pv_info_vm { + __u64 inst_calls_list[4]; + __u64 max_cpus; + __u64 max_guests; + __u64 max_guest_addr; + __u64 feature_indication; +}; + +struct kvm_s390_pv_info_header { + __u32 id; + __u32 len_max; + __u32 len_written; + __u32 reserved; +}; + +struct kvm_s390_pv_info { + struct kvm_s390_pv_info_header header; + union { + struct kvm_s390_pv_info_dump dump; + struct kvm_s390_pv_info_vm vm; + }; +}; + enum pv_cmd_id { KVM_PV_ENABLE, KVM_PV_DISABLE, @@ -1605,6 +1735,8 @@ enum pv_cmd_id { KVM_PV_VERIFY, KVM_PV_PREP_RESET, KVM_PV_UNSHARE_ALL, + KVM_PV_INFO, + KVM_PV_DUMP, }; struct kvm_pv_cmd { @@ -1638,6 +1770,32 @@ struct kvm_xen_hvm_attr { struct { __u64 gfn; } shared_info; + struct { + __u32 send_port; + __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */ + __u32 flags; +#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0) +#define KVM_XEN_EVTCHN_UPDATE (1 << 1) +#define KVM_XEN_EVTCHN_RESET (1 << 2) + /* + * Events sent by the guest are either looped back to + * the guest itself (potentially on a different port#) + * or signalled via an eventfd. + */ + union { + struct { + __u32 port; + __u32 vcpu; + __u32 priority; + } port; + struct { + __u32 port; /* Zero for eventfd */ + __s32 fd; + } eventfd; + __u32 padding[4]; + } deliver; + } evtchn; + __u32 xen_version; __u64 pad[8]; } u; }; @@ -1646,11 +1804,17 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 +#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn) + #define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) #define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) @@ -1668,6 +1832,13 @@ struct kvm_xen_vcpu_attr { __u64 time_blocked; __u64 time_offline; } runstate; + __u32 vcpu_id; + struct { + __u32 port; + __u32 priority; + __u64 expires_ns; + } timer; + __u8 vector; } u; }; @@ -1678,6 +1849,10 @@ struct kvm_xen_vcpu_attr { #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 +#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 +#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { @@ -1932,6 +2107,8 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +#define KVM_PMU_CAP_DISABLE (1 << 0) + /** * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. * @flags: Some extra information for header, always 0 for now. @@ -1965,7 +2142,9 @@ struct kvm_stats_header { #define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT) #define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT) #define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT) -#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK +#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST #define KVM_STATS_UNIT_SHIFT 4 #define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT) @@ -1973,7 +2152,8 @@ struct kvm_stats_header { #define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) -#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES +#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN #define KVM_STATS_BASE_SHIFT 8 #define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) @@ -1988,8 +2168,9 @@ struct kvm_stats_header { * @size: The number of data items for this stats. * Every data item is of type __u64. * @offset: The offset of the stats to the start of stat structure in - * struture kvm or kvm_vcpu. - * @unused: Unused field for future usage. Always 0 for now. + * structure kvm or kvm_vcpu. + * @bucket_size: A parameter value used for histogram stats. It is only used + * for linear histogram stats, specifying the size of the bucket; * @name: The name string for the stats. Its size is indicated by the * &kvm_stats_header->name_size. */ @@ -1998,10 +2179,50 @@ struct kvm_stats_desc { __s16 exponent; __u16 size; __u32 offset; - __u32 unused; + __u32 bucket_size; char name[]; }; #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) + +/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */ +#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0) +#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1) + +/* Available with KVM_CAP_S390_ZPCI_OP */ +#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op) + +struct kvm_s390_zpci_op { + /* in */ + __u32 fh; /* target device */ + __u8 op; /* operation to perform */ + __u8 pad[3]; + union { + /* for KVM_S390_ZPCIOP_REG_AEN */ + struct { + __u64 ibv; /* Guest addr of interrupt bit vector */ + __u64 sb; /* Guest addr of summary bit */ + __u32 flags; + __u32 noi; /* Number of interrupts */ + __u8 isc; /* Guest interrupt subclass */ + __u8 sbo; /* Offset of guest summary bit vector */ + __u16 pad; + } reg_aen; + __u64 reserved[8]; + } u; +}; + +/* types for kvm_s390_zpci_op->op */ +#define KVM_S390_ZPCIOP_REG_AEN 0 +#define KVM_S390_ZPCIOP_DEREG_AEN 1 + +/* flags for kvm_s390_zpci_op->u.reg_aen.flags */ +#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) + #endif /* __LINUX_KVM_H */ diff --git a/linux-headers/linux/psci.h b/linux-headers/linux/psci.h index a6772d508b..213b2a0f70 100644 --- a/linux-headers/linux/psci.h +++ b/linux-headers/linux/psci.h @@ -82,6 +82,10 @@ #define PSCI_0_2_TOS_UP_NO_MIGRATE 1 #define PSCI_0_2_TOS_MP 2 +/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */ +#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 +#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h index 8479af5f4c..a3a377cd44 100644 --- a/linux-headers/linux/userfaultfd.h +++ b/linux-headers/linux/userfaultfd.h @@ -32,7 +32,9 @@ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ UFFD_FEATURE_MINOR_HUGETLBFS | \ - UFFD_FEATURE_MINOR_SHMEM) + UFFD_FEATURE_MINOR_SHMEM | \ + UFFD_FEATURE_EXACT_ADDRESS | \ + UFFD_FEATURE_WP_HUGETLBFS_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -46,7 +48,8 @@ #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ - (__u64)1 << _UFFDIO_CONTINUE) + (__u64)1 << _UFFDIO_CONTINUE | \ + (__u64)1 << _UFFDIO_WRITEPROTECT) /* * Valid ioctl command number range with this API is from 0x00 to @@ -189,6 +192,13 @@ struct uffdio_api { * * UFFD_FEATURE_MINOR_SHMEM indicates the same support as * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. + * + * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page + * faults would be provided and the offset within the page would not be + * masked. + * + * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd + * write-protection mode is supported on both shmem and hugetlbfs. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -201,6 +211,8 @@ struct uffdio_api { #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) #define UFFD_FEATURE_MINOR_SHMEM (1<<10) +#define UFFD_FEATURE_EXACT_ADDRESS (1<<11) +#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) __u64 features; __u64 ioctls; diff --git a/linux-headers/linux/vduse.h b/linux-headers/linux/vduse.h new file mode 100644 index 0000000000..6d2ca064b5 --- /dev/null +++ b/linux-headers/linux/vduse.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _VDUSE_H_ +#define _VDUSE_H_ + +#include + +#define VDUSE_BASE 0x81 + +/* The ioctls for control device (/dev/vduse/control) */ + +#define VDUSE_API_VERSION 0 + +/* + * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION). + * This is used for future extension. + */ +#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64) + +/* Set the version of VDUSE API that userspace supported. */ +#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64) + +/** + * struct vduse_dev_config - basic configuration of a VDUSE device + * @name: VDUSE device name, needs to be NUL terminated + * @vendor_id: virtio vendor id + * @device_id: virtio device id + * @features: virtio features + * @vq_num: the number of virtqueues + * @vq_align: the allocation alignment of virtqueue's metadata + * @reserved: for future use, needs to be initialized to zero + * @config_size: the size of the configuration space + * @config: the buffer of the configuration space + * + * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device. + */ +struct vduse_dev_config { +#define VDUSE_NAME_MAX 256 + char name[VDUSE_NAME_MAX]; + __u32 vendor_id; + __u32 device_id; + __u64 features; + __u32 vq_num; + __u32 vq_align; + __u32 reserved[13]; + __u32 config_size; + __u8 config[]; +}; + +/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */ +#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config) + +/* + * Destroy a VDUSE device. Make sure there are no more references + * to the char device (/dev/vduse/$NAME). + */ +#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX]) + +/* The ioctls for VDUSE device (/dev/vduse/$NAME) */ + +/** + * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last] + * @offset: the mmap offset on returned file descriptor + * @start: start of the IOVA region + * @last: last of the IOVA region + * @perm: access permission of the IOVA region + * + * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region. + */ +struct vduse_iotlb_entry { + __u64 offset; + __u64 start; + __u64 last; +#define VDUSE_ACCESS_RO 0x1 +#define VDUSE_ACCESS_WO 0x2 +#define VDUSE_ACCESS_RW 0x3 + __u8 perm; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return the corresponding file descriptor. Return -EINVAL means the + * IOVA region doesn't exist. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry) + +/* + * Get the negotiated virtio features. It's a subset of the features in + * struct vduse_dev_config which can be accepted by virtio driver. It's + * only valid after FEATURES_OK status bit is set. + */ +#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64) + +/** + * struct vduse_config_data - data used to update configuration space + * @offset: the offset from the beginning of configuration space + * @length: the length to write to configuration space + * @buffer: the buffer used to write from + * + * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device + * configuration space. + */ +struct vduse_config_data { + __u32 offset; + __u32 length; + __u8 buffer[]; +}; + +/* Set device configuration space */ +#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data) + +/* + * Inject a config interrupt. It's usually used to notify virtio driver + * that device configuration space has changed. + */ +#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13) + +/** + * struct vduse_vq_config - basic configuration of a virtqueue + * @index: virtqueue index + * @max_size: the max size of virtqueue + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue. + */ +struct vduse_vq_config { + __u32 index; + __u16 max_size; + __u16 reserved[13]; +}; + +/* + * Setup the specified virtqueue. Make sure all virtqueues have been + * configured before the device is attached to vDPA bus. + */ +#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config) + +/** + * struct vduse_vq_state_split - split virtqueue state + * @avail_index: available index + */ +struct vduse_vq_state_split { + __u16 avail_index; +}; + +/** + * struct vduse_vq_state_packed - packed virtqueue state + * @last_avail_counter: last driver ring wrap counter observed by device + * @last_avail_idx: device available index + * @last_used_counter: device ring wrap counter + * @last_used_idx: used index + */ +struct vduse_vq_state_packed { + __u16 last_avail_counter; + __u16 last_avail_idx; + __u16 last_used_counter; + __u16 last_used_idx; +}; + +/** + * struct vduse_vq_info - information of a virtqueue + * @index: virtqueue index + * @num: the size of virtqueue + * @desc_addr: address of desc area + * @driver_addr: address of driver area + * @device_addr: address of device area + * @split: split virtqueue state + * @packed: packed virtqueue state + * @ready: ready status of virtqueue + * + * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information. + */ +struct vduse_vq_info { + __u32 index; + __u32 num; + __u64 desc_addr; + __u64 driver_addr; + __u64 device_addr; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; + __u8 ready; +}; + +/* Get the specified virtqueue's information. Caller should set index field. */ +#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info) + +/** + * struct vduse_vq_eventfd - eventfd configuration for a virtqueue + * @index: virtqueue index + * @fd: eventfd, -1 means de-assigning the eventfd + * + * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd. + */ +struct vduse_vq_eventfd { + __u32 index; +#define VDUSE_EVENTFD_DEASSIGN -1 + int fd; +}; + +/* + * Setup kick eventfd for specified virtqueue. The kick eventfd is used + * by VDUSE kernel module to notify userspace to consume the avail vring. + */ +#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd) + +/* + * Inject an interrupt for specific virtqueue. It's used to notify virtio driver + * to consume the used vring. + */ +#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32) + +/** + * struct vduse_iova_umem - userspace memory configuration for one IOVA region + * @uaddr: start address of userspace memory, it must be aligned to page size + * @iova: start of the IOVA region + * @size: size of the IOVA region + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM + * ioctls to register/de-register userspace memory for IOVA regions + */ +struct vduse_iova_umem { + __u64 uaddr; + __u64 iova; + __u64 size; + __u64 reserved[3]; +}; + +/* Register userspace memory for IOVA regions */ +#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem) + +/* De-register the userspace memory. Caller should set iova and size field. */ +#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem) + +/** + * struct vduse_iova_info - information of one IOVA region + * @start: start of the IOVA region + * @last: last of the IOVA region + * @capability: capability of the IOVA regsion + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of + * one IOVA region. + */ +struct vduse_iova_info { + __u64 start; + __u64 last; +#define VDUSE_IOVA_CAP_UMEM (1 << 0) + __u64 capability; + __u64 reserved[3]; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return some information on it. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info) + +/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */ + +/** + * enum vduse_req_type - request type + * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace + * @VDUSE_SET_STATUS: set the device status + * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for + * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl + */ +enum vduse_req_type { + VDUSE_GET_VQ_STATE, + VDUSE_SET_STATUS, + VDUSE_UPDATE_IOTLB, +}; + +/** + * struct vduse_vq_state - virtqueue state + * @index: virtqueue index + * @split: split virtqueue state + * @packed: packed virtqueue state + */ +struct vduse_vq_state { + __u32 index; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; +}; + +/** + * struct vduse_dev_status - device status + * @status: device status + */ +struct vduse_dev_status { + __u8 status; +}; + +/** + * struct vduse_iova_range - IOVA range [start, last] + * @start: start of the IOVA range + * @last: last of the IOVA range + */ +struct vduse_iova_range { + __u64 start; + __u64 last; +}; + +/** + * struct vduse_dev_request - control request + * @type: request type + * @request_id: request id + * @reserved: for future use + * @vq_state: virtqueue state, only index field is available + * @s: device status + * @iova: IOVA range for updating + * @padding: padding + * + * Structure used by read(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_request { + __u32 type; + __u32 request_id; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + struct vduse_dev_status s; + struct vduse_iova_range iova; + __u32 padding[32]; + }; +}; + +/** + * struct vduse_dev_response - response to control request + * @request_id: corresponding request id + * @result: the result of request + * @reserved: for future use, needs to be initialized to zero + * @vq_state: virtqueue state + * @padding: padding + * + * Structure used by write(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_response { + __u32 request_id; +#define VDUSE_REQ_RESULT_OK 0x00 +#define VDUSE_REQ_RESULT_FAILED 0x01 + __u32 result; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + __u32 padding[32]; + }; +}; + +#endif /* _VDUSE_H_ */ diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index e680594f27..ede44b5572 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -323,7 +323,7 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) #define VFIO_REGION_TYPE_GFX (1) #define VFIO_REGION_TYPE_CCW (2) -#define VFIO_REGION_TYPE_MIGRATION (3) +#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3) /* sub-types for VFIO_REGION_TYPE_PCI_* */ @@ -405,225 +405,29 @@ struct vfio_region_gfx_edid { #define VFIO_REGION_SUBTYPE_CCW_CRW (3) /* sub-types for VFIO_REGION_TYPE_MIGRATION */ -#define VFIO_REGION_SUBTYPE_MIGRATION (1) - -/* - * The structure vfio_device_migration_info is placed at the 0th offset of - * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related - * migration information. Field accesses from this structure are only supported - * at their native width and alignment. Otherwise, the result is undefined and - * vendor drivers should return an error. - * - * device_state: (read/write) - * - The user application writes to this field to inform the vendor driver - * about the device state to be transitioned to. - * - The vendor driver should take the necessary actions to change the - * device state. After successful transition to a given state, the - * vendor driver should return success on write(device_state, state) - * system call. If the device state transition fails, the vendor driver - * should return an appropriate -errno for the fault condition. - * - On the user application side, if the device state transition fails, - * that is, if write(device_state, state) returns an error, read - * device_state again to determine the current state of the device from - * the vendor driver. - * - The vendor driver should return previous state of the device unless - * the vendor driver has encountered an internal error, in which case - * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR. - * - The user application must use the device reset ioctl to recover the - * device from VFIO_DEVICE_STATE_ERROR state. If the device is - * indicated to be in a valid device state by reading device_state, the - * user application may attempt to transition the device to any valid - * state reachable from the current state or terminate itself. - * - * device_state consists of 3 bits: - * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear, - * it indicates the _STOP state. When the device state is changed to - * _STOP, driver should stop the device before write() returns. - * - If bit 1 is set, it indicates the _SAVING state, which means that the - * driver should start gathering device state information that will be - * provided to the VFIO user application to save the device's state. - * - If bit 2 is set, it indicates the _RESUMING state, which means that - * the driver should prepare to resume the device. Data provided through - * the migration region should be used to resume the device. - * Bits 3 - 31 are reserved for future use. To preserve them, the user - * application should perform a read-modify-write operation on this - * field when modifying the specified bits. - * - * +------- _RESUMING - * |+------ _SAVING - * ||+----- _RUNNING - * ||| - * 000b => Device Stopped, not saving or resuming - * 001b => Device running, which is the default state - * 010b => Stop the device & save the device state, stop-and-copy state - * 011b => Device running and save the device state, pre-copy state - * 100b => Device stopped and the device state is resuming - * 101b => Invalid state - * 110b => Error state - * 111b => Invalid state - * - * State transitions: - * - * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP - * (100b) (001b) (011b) (010b) (000b) - * 0. Running or default state - * | - * - * 1. Normal Shutdown (optional) - * |------------------------------------->| - * - * 2. Save the state or suspend - * |------------------------->|---------->| - * - * 3. Save the state during live migration - * |----------->|------------>|---------->| - * - * 4. Resuming - * |<---------| - * - * 5. Resumed - * |--------->| - * - * 0. Default state of VFIO device is _RUNNING when the user application starts. - * 1. During normal shutdown of the user application, the user application may - * optionally change the VFIO device state from _RUNNING to _STOP. This - * transition is optional. The vendor driver must support this transition but - * must not require it. - * 2. When the user application saves state or suspends the application, the - * device state transitions from _RUNNING to stop-and-copy and then to _STOP. - * On state transition from _RUNNING to stop-and-copy, driver must stop the - * device, save the device state and send it to the application through the - * migration region. The sequence to be followed for such transition is given - * below. - * 3. In live migration of user application, the state transitions from _RUNNING - * to pre-copy, to stop-and-copy, and to _STOP. - * On state transition from _RUNNING to pre-copy, the driver should start - * gathering the device state while the application is still running and send - * the device state data to application through the migration region. - * On state transition from pre-copy to stop-and-copy, the driver must stop - * the device, save the device state and send it to the user application - * through the migration region. - * Vendor drivers must support the pre-copy state even for implementations - * where no data is provided to the user before the stop-and-copy state. The - * user must not be required to consume all migration data before the device - * transitions to a new state, including the stop-and-copy state. - * The sequence to be followed for above two transitions is given below. - * 4. To start the resuming phase, the device state should be transitioned from - * the _RUNNING to the _RESUMING state. - * In the _RESUMING state, the driver should use the device state data - * received through the migration region to resume the device. - * 5. After providing saved device data to the driver, the application should - * change the state from _RESUMING to _RUNNING. - * - * reserved: - * Reads on this field return zero and writes are ignored. - * - * pending_bytes: (read only) - * The number of pending bytes still to be migrated from the vendor driver. - * - * data_offset: (read only) - * The user application should read data_offset field from the migration - * region. The user application should read the device data from this - * offset within the migration region during the _SAVING state or write - * the device data during the _RESUMING state. See below for details of - * sequence to be followed. - * - * data_size: (read/write) - * The user application should read data_size to get the size in bytes of - * the data copied in the migration region during the _SAVING state and - * write the size in bytes of the data copied in the migration region - * during the _RESUMING state. - * - * The format of the migration region is as follows: - * ------------------------------------------------------------------ - * |vfio_device_migration_info| data section | - * | | /////////////////////////////// | - * ------------------------------------------------------------------ - * ^ ^ - * offset 0-trapped part data_offset - * - * The structure vfio_device_migration_info is always followed by the data - * section in the region, so data_offset will always be nonzero. The offset - * from where the data is copied is decided by the kernel driver. The data - * section can be trapped, mmapped, or partitioned, depending on how the kernel - * driver defines the data section. The data section partition can be defined - * as mapped by the sparse mmap capability. If mmapped, data_offset must be - * page aligned, whereas initial section which contains the - * vfio_device_migration_info structure, might not end at the offset, which is - * page aligned. The user is not required to access through mmap regardless - * of the capabilities of the region mmap. - * The vendor driver should determine whether and how to partition the data - * section. The vendor driver should return data_offset accordingly. - * - * The sequence to be followed while in pre-copy state and stop-and-copy state - * is as follows: - * a. Read pending_bytes, indicating the start of a new iteration to get device - * data. Repeated read on pending_bytes at this stage should have no side - * effects. - * If pending_bytes == 0, the user application should not iterate to get data - * for that device. - * If pending_bytes > 0, perform the following steps. - * b. Read data_offset, indicating that the vendor driver should make data - * available through the data section. The vendor driver should return this - * read operation only after data is available from (region + data_offset) - * to (region + data_offset + data_size). - * c. Read data_size, which is the amount of data in bytes available through - * the migration region. - * Read on data_offset and data_size should return the offset and size of - * the current buffer if the user application reads data_offset and - * data_size more than once here. - * d. Read data_size bytes of data from (region + data_offset) from the - * migration region. - * e. Process the data. - * f. Read pending_bytes, which indicates that the data from the previous - * iteration has been read. If pending_bytes > 0, go to step b. - * - * The user application can transition from the _SAVING|_RUNNING - * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the - * number of pending bytes. The user application should iterate in _SAVING - * (stop-and-copy) until pending_bytes is 0. - * - * The sequence to be followed while _RESUMING device state is as follows: - * While data for this device is available, repeat the following steps: - * a. Read data_offset from where the user application should write data. - * b. Write migration data starting at the migration region + data_offset for - * the length determined by data_size from the migration source. - * c. Write data_size, which indicates to the vendor driver that data is - * written in the migration region. Vendor driver must return this write - * operations on consuming data. Vendor driver should apply the - * user-provided migration region data to the device resume state. - * - * If an error occurs during the above sequences, the vendor driver can return - * an error code for next read() or write() operation, which will terminate the - * loop. The user application should then take the next necessary action, for - * example, failing migration or terminating the user application. - * - * For the user application, data is opaque. The user application should write - * data in the same order as the data is received and the data should be of - * same transaction size at the source. - */ +#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1) struct vfio_device_migration_info { __u32 device_state; /* VFIO device state */ -#define VFIO_DEVICE_STATE_STOP (0) -#define VFIO_DEVICE_STATE_RUNNING (1 << 0) -#define VFIO_DEVICE_STATE_SAVING (1 << 1) -#define VFIO_DEVICE_STATE_RESUMING (1 << 2) -#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ - VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) +#define VFIO_DEVICE_STATE_V1_STOP (0) +#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0) +#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1) +#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2) +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \ + VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) #define VFIO_DEVICE_STATE_VALID(state) \ - (state & VFIO_DEVICE_STATE_RESUMING ? \ - (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) + (state & VFIO_DEVICE_STATE_V1_RESUMING ? \ + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1) #define VFIO_DEVICE_STATE_IS_ERROR(state) \ - ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING)) + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING)) #define VFIO_DEVICE_STATE_SET_ERROR(state) \ - ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; __u64 pending_bytes; @@ -839,7 +643,7 @@ enum { }; /** - * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12, + * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12, * struct vfio_pci_hot_reset_info) * * Return: 0 on success, -errno on failure: @@ -966,7 +770,7 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) /** - * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17, + * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17, * struct vfio_device_feature) * * Get, set, or probe feature data of the device. The feature is selected @@ -1002,6 +806,186 @@ struct vfio_device_feature { */ #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) +/* + * Indicates the device can support the migration API through + * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and + * ERROR states are always supported. Support for additional states is + * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be + * set. + * + * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and + * RESUMING are supported. + * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P + * is supported in addition to the STOP_COPY states. + * + * Other combinations of flags have behavior to be defined in the future. + */ +struct vfio_device_feature_migration { + __aligned_u64 flags; +#define VFIO_MIGRATION_STOP_COPY (1 << 0) +#define VFIO_MIGRATION_P2P (1 << 1) +}; +#define VFIO_DEVICE_FEATURE_MIGRATION 1 + +/* + * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO + * device. The new state is supplied in device_state, see enum + * vfio_device_mig_state for details + * + * The kernel migration driver must fully transition the device to the new state + * value before the operation returns to the user. + * + * The kernel migration driver must not generate asynchronous device state + * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET + * ioctl as described above. + * + * If this function fails then current device_state may be the original + * operating state or some other state along the combination transition path. + * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt + * to return to the original state, or attempt to return to some other state + * such as RUNNING or STOP. + * + * If the new_state starts a new data transfer session then the FD associated + * with that session is returned in data_fd. The user is responsible to close + * this FD when it is finished. The user must consider the migration data stream + * carried over the FD to be opaque and must preserve the byte order of the + * stream. The user is not required to preserve buffer segmentation when writing + * the data stream during the RESUMING operation. + * + * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO + * device, data_fd will be -1. + */ +struct vfio_device_feature_mig_state { + __u32 device_state; /* From enum vfio_device_mig_state */ + __s32 data_fd; +}; +#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2 + +/* + * The device migration Finite State Machine is described by the enum + * vfio_device_mig_state. Some of the FSM arcs will create a migration data + * transfer session by returning a FD, in this case the migration data will + * flow over the FD using read() and write() as discussed below. + * + * There are 5 states to support VFIO_MIGRATION_STOP_COPY: + * RUNNING - The device is running normally + * STOP - The device does not change the internal or external state + * STOP_COPY - The device internal state can be read out + * RESUMING - The device is stopped and is loading a new internal state + * ERROR - The device has failed and must be reset + * + * And 1 optional state to support VFIO_MIGRATION_P2P: + * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA + * + * The FSM takes actions on the arcs between FSM states. The driver implements + * the following behavior for the FSM arcs: + * + * RUNNING_P2P -> STOP + * STOP_COPY -> STOP + * While in STOP the device must stop the operation of the device. The device + * must not generate interrupts, DMA, or any other change to external state. + * It must not change its internal state. When stopped the device and kernel + * migration driver must accept and respond to interaction to support external + * subsystems in the STOP state, for example PCI MSI-X and PCI config space. + * Failure by the user to restrict device access while in STOP must not result + * in error conditions outside the user context (ex. host system faults). + * + * The STOP_COPY arc will terminate a data transfer session. + * + * RESUMING -> STOP + * Leaving RESUMING terminates a data transfer session and indicates the + * device should complete processing of the data delivered by write(). The + * kernel migration driver should complete the incorporation of data written + * to the data transfer FD into the device internal state and perform + * final validity and consistency checking of the new device state. If the + * user provided data is found to be incomplete, inconsistent, or otherwise + * invalid, the migration driver must fail the SET_STATE ioctl and + * optionally go to the ERROR state as described below. + * + * While in STOP the device has the same behavior as other STOP states + * described above. + * + * To abort a RESUMING session the device must be reset. + * + * RUNNING_P2P -> RUNNING + * While in RUNNING the device is fully operational, the device may generate + * interrupts, DMA, respond to MMIO, all vfio device regions are functional, + * and the device may advance its internal state. + * + * RUNNING -> RUNNING_P2P + * STOP -> RUNNING_P2P + * While in RUNNING_P2P the device is partially running in the P2P quiescent + * state defined below. + * + * STOP -> STOP_COPY + * This arc begin the process of saving the device state and will return a + * new data_fd. + * + * While in the STOP_COPY state the device has the same behavior as STOP + * with the addition that the data transfers session continues to stream the + * migration state. End of stream on the FD indicates the entire device + * state has been transferred. + * + * The user should take steps to restrict access to vfio device regions while + * the device is in STOP_COPY or risk corruption of the device migration data + * stream. + * + * STOP -> RESUMING + * Entering the RESUMING state starts a process of restoring the device state + * and will return a new data_fd. The data stream fed into the data_fd should + * be taken from the data transfer output of a single FD during saving from + * a compatible device. The migration driver may alter/reset the internal + * device state for this arc if required to prepare the device to receive the + * migration data. + * + * any -> ERROR + * ERROR cannot be specified as a device state, however any transition request + * can be failed with an errno return and may then move the device_state into + * ERROR. In this case the device was unable to execute the requested arc and + * was also unable to restore the device to any valid device_state. + * To recover from ERROR VFIO_DEVICE_RESET must be used to return the + * device_state back to RUNNING. + * + * The optional peer to peer (P2P) quiescent state is intended to be a quiescent + * state for the device for the purposes of managing multiple devices within a + * user context where peer-to-peer DMA between devices may be active. The + * RUNNING_P2P states must prevent the device from initiating + * any new P2P DMA transactions. If the device can identify P2P transactions + * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration + * driver must complete any such outstanding operations prior to completing the + * FSM arc into a P2P state. For the purpose of specification the states + * behave as though the device was fully running if not supported. Like while in + * STOP or STOP_COPY the user must not touch the device, otherwise the state + * can be exited. + * + * The remaining possible transitions are interpreted as combinations of the + * above FSM arcs. As there are multiple paths through the FSM arcs the path + * should be selected based on the following rules: + * - Select the shortest path. + * Refer to vfio_mig_get_next_state() for the result of the algorithm. + * + * The automatic transit through the FSM arcs that make up the combination + * transition is invisible to the user. When working with combination arcs the + * user may see any step along the path in the device_state if SET_STATE + * fails. When handling these types of errors users should anticipate future + * revisions of this protocol using new states and those states becoming + * visible in this case. + * + * The optional states cannot be used with SET_STATE if the device does not + * support them. The user can discover if these states are supported by using + * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can + * avoid knowing about these optional states if the kernel driver supports them. + */ +enum vfio_device_mig_state { + VFIO_DEVICE_STATE_ERROR = 0, + VFIO_DEVICE_STATE_STOP = 1, + VFIO_DEVICE_STATE_RUNNING = 2, + VFIO_DEVICE_STATE_STOP_COPY = 3, + VFIO_DEVICE_STATE_RESUMING = 4, + VFIO_DEVICE_STATE_RUNNING_P2P = 5, +}; + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/linux-headers/linux/vfio_zdev.h b/linux-headers/linux/vfio_zdev.h index b4309397b6..77f2aff1f2 100644 --- a/linux-headers/linux/vfio_zdev.h +++ b/linux-headers/linux/vfio_zdev.h @@ -29,6 +29,9 @@ struct vfio_device_info_cap_zpci_base { __u16 fmb_length; /* Measurement Block Length (in bytes) */ __u8 pft; /* PCI Function Type */ __u8 gid; /* PCI function group ID */ + /* End of version 1 */ + __u32 fh; /* PCI function handle */ + /* End of version 2 */ }; /** @@ -47,6 +50,10 @@ struct vfio_device_info_cap_zpci_group { __u16 noi; /* Maximum number of MSIs */ __u16 maxstbl; /* Maximum Store Block Length */ __u8 version; /* Supported PCI Version */ + /* End of version 1 */ + __u8 reserved; + __u16 imaxstbl; /* Maximum Interpreted Store Block Length */ + /* End of version 2 */ }; /** diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index c998860d7b..f9f115a7c7 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -89,11 +89,6 @@ /* Set or get vhost backend capability */ -/* Use message type V2 */ -#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 -/* IOTLB can accept batching hints */ -#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 - #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) @@ -150,4 +145,39 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + +/* Get the count of all virtqueues */ +#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) + +/* Get the number of virtqueue groups. */ +#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32) + +/* Get the number of address spaces. */ +#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int) + +/* Get the group for a virtqueue: read index, write group in num, + * The virtqueue index is stored in the index field of + * vhost_vring_state. The group for this specific virtqueue is + * returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ + struct vhost_vring_state) +/* Set the ASID for a virtqueue group. The group index is stored in + * the index field of vhost_vring_state, the ASID associated with this + * group is stored at num field of vhost_vring_state. + */ +#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ + struct vhost_vring_state) + +/* Suspend a device so it does not process virtqueue requests anymore + * + * After the return of ioctl the device must preserve all the necessary state + * (the virtqueue vring base plus the possible device specific states) that is + * required for restoring in the future. The device must not change its + * configuration after that point. + */ +#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) + #endif diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index ee72a1c20f..9875d609a9 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" #include "qemu/guest-random.h" #include "semihosting/common-semi.h" #include "target/arm/syndrome.h" @@ -77,9 +78,8 @@ void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); - int trapnr, ec, fsc; + int trapnr, ec, fsc, si_code, si_signo; abi_long ret; - target_siginfo_t info; for (;;) { cpu_exec_start(cs); @@ -89,6 +89,15 @@ void cpu_loop(CPUARMState *env) switch (trapnr) { case EXCP_SWI: + /* + * On syscall, PSTATE.ZA is preserved, along with the ZA matrix. + * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0); + arm_rebuild_hflags(env); + arm_reset_sve_state(env); + } ret = do_syscall(env, env->xregs[8], env->xregs[0], @@ -98,9 +107,9 @@ void cpu_loop(CPUARMState *env) env->xregs[4], env->xregs[5], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->xregs[0] = ret; } break; @@ -108,50 +117,53 @@ void cpu_loop(CPUARMState *env) /* just indicate that signals should be handled asap */ break; case EXCP_UDEF: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPN; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc); break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info._sifields._sigfault._addr = env->exception.vaddress; - - /* We should only arrive here with EC in {DATAABORT, INSNABORT}. */ ec = syn_get_ec(env->exception.syndrome); - assert(ec == EC_DATAABORT || ec == EC_INSNABORT); - - /* Both EC have the same format for FSC, or close enough. */ - fsc = extract32(env->exception.syndrome, 0, 6); - switch (fsc) { - case 0x04 ... 0x07: /* Translation fault, level {0-3} */ - info.si_code = TARGET_SEGV_MAPERR; + switch (ec) { + case EC_DATAABORT: + case EC_INSNABORT: + /* Both EC have the same format for FSC, or close enough. */ + fsc = extract32(env->exception.syndrome, 0, 6); + switch (fsc) { + case 0x04 ... 0x07: /* Translation fault, level {0-3} */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MAPERR; + break; + case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ + case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_ACCERR; + break; + case 0x11: /* Synchronous Tag Check Fault */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MTESERR; + break; + case 0x21: /* Alignment fault */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + default: + g_assert_not_reached(); + } break; - case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ - case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ - info.si_code = TARGET_SEGV_ACCERR; - break; - case 0x11: /* Synchronous Tag Check Fault */ - info.si_code = TARGET_SEGV_MTESERR; + case EC_PCALIGNMENT: + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; break; default: g_assert_not_reached(); } - - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(si_signo, si_code, env->exception.vaddress); break; case EXCP_DEBUG: case EXCP_BKPT: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_SEMIHOST: - env->xregs[0] = do_common_semihosting(cs); + do_common_semihosting(cs); env->pc += 4; break; case EXCP_YIELD: @@ -168,11 +180,7 @@ void cpu_loop(CPUARMState *env) /* Check for MTE asynchronous faults */ if (unlikely(env->cp15.tfsr_el[0])) { env->cp15.tfsr_el[0] = 0; - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info._sifields._sigfault._addr = 0; - info.si_code = TARGET_SEGV_MTEAERR; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MTEAERR, 0); } process_pending_signals(env); @@ -202,7 +210,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) } env->pc = regs->pc; env->xregs[31] = regs->sp; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN env->cp15.sctlr_el[1] |= SCTLR_E0E; for (i = 1; i < 4; ++i) { env->cp15.sctlr_el[i] |= SCTLR_EE; diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 662bcd1c4e..6a2c6e06d2 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -77,7 +78,8 @@ struct target_extra_context { struct target_sve_context { struct target_aarch64_ctx head; uint16_t vl; - uint16_t reserved[3]; + uint16_t flags; + uint16_t reserved[2]; /* The actual SVE data immediately follows. It is laid out * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of * the original struct pointer. @@ -100,6 +102,24 @@ struct target_sve_context { #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \ (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17)) +#define TARGET_SVE_SIG_FLAG_SM 1 + +#define TARGET_ZA_MAGIC 0x54366345 + +struct target_za_context { + struct target_aarch64_ctx head; + uint16_t vl; + uint16_t reserved[3]; + /* The actual ZA data immediately follows. */ +}; + +#define TARGET_ZA_SIG_REGS_OFFSET \ + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -108,7 +128,6 @@ struct target_rt_sigframe { struct target_rt_frame_record { uint64_t fp; uint64_t lr; - uint32_t tramp[2]; }; static void target_setup_general_frame(struct target_rt_sigframe *sf, @@ -147,7 +166,7 @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd, for (i = 0; i < 32; i++) { uint64_t *q = aa64_vfp_qreg(env, i); -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN __put_user(q[0], &fpsimd->vregs[i * 2 + 1]); __put_user(q[1], &fpsimd->vregs[i * 2]); #else @@ -173,13 +192,17 @@ static void target_setup_end_record(struct target_aarch64_ctx *end) } static void target_setup_sve_record(struct target_sve_context *sve, - CPUARMState *env, int vq, int size) + CPUARMState *env, int size) { - int i, j; + int i, j, vq = sve_vq(env); + memset(sve, 0, sizeof(*sve)); __put_user(TARGET_SVE_MAGIC, &sve->head.magic); __put_user(size, &sve->head.size); __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl); + if (FIELD_EX64(env->svcr, SVCR, SM)) { + __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags); + } /* Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian store @@ -200,6 +223,35 @@ static void target_setup_sve_record(struct target_sve_context *sve, } } +static void target_setup_za_record(struct target_za_context *za, + CPUARMState *env, int size) +{ + int vq = sme_vq(env); + int vl = vq * TARGET_SVE_VQ_BYTES; + int i, j; + + memset(za, 0, sizeof(*za)); + __put_user(TARGET_ZA_MAGIC, &za->head.magic); + __put_user(size, &za->head.size); + __put_user(vl, &za->vl); + + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return; + } + assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq)); + + /* + * Note that ZA vectors are stored as a byte stream, + * with each byte element at a subsequent address. + */ + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __put_user_e(env->zarray[i].d[j], z + j, le); + } + } +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -233,7 +285,7 @@ static void target_restore_fpsimd_record(CPUARMState *env, for (i = 0; i < 32; i++) { uint64_t *q = aa64_vfp_qreg(env, i); -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN __get_user(q[0], &fpsimd->vregs[i * 2 + 1]); __get_user(q[1], &fpsimd->vregs[i * 2]); #else @@ -243,12 +295,50 @@ static void target_restore_fpsimd_record(CPUARMState *env, } } -static void target_restore_sve_record(CPUARMState *env, - struct target_sve_context *sve, int vq) +static bool target_restore_sve_record(CPUARMState *env, + struct target_sve_context *sve, + int size, int *svcr) { - int i, j; + int i, j, vl, vq, flags; + bool sm; - /* Note that SVE regs are stored as a byte stream, with each byte element + __get_user(vl, &sve->vl); + __get_user(flags, &sve->flags); + + sm = flags & TARGET_SVE_SIG_FLAG_SM; + + /* The cpu must support Streaming or Non-streaming SVE. */ + if (sm + ? !cpu_isar_feature(aa64_sme, env_archcpu(env)) + : !cpu_isar_feature(aa64_sve, env_archcpu(env))) { + return false; + } + + /* + * Note that we cannot use sve_vq() because that depends on the + * current setting of PSTATE.SM, not the state to be restored. + */ + vq = sve_vqm1_for_el_sm(env, 0, sm) + 1; + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.SM. */ + if (size <= sizeof(*sve)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); + + /* + * Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian load * of our 64-bit hunks. */ @@ -270,6 +360,46 @@ static void target_restore_sve_record(CPUARMState *env, } } } + return true; +} + +static bool target_restore_za_record(CPUARMState *env, + struct target_za_context *za, + int size, int *svcr) +{ + int i, j, vl, vq; + + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { + return false; + } + + __get_user(vl, &za->vl); + vq = sme_vq(env); + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.ZA. */ + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); + + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __get_user_e(env->zarray[i].d[j], z + j, le); + } + } + return true; } static int target_restore_sigframe(CPUARMState *env, @@ -278,10 +408,12 @@ static int target_restore_sigframe(CPUARMState *env, struct target_aarch64_ctx *ctx, *extra = NULL; struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; + struct target_za_context *za = NULL; uint64_t extra_datap = 0; bool used_extra = false; - bool err = false; - int vq = 0, sve_size = 0; + int sve_size = 0; + int za_size = 0; + int svcr = 0; target_restore_general_frame(env, sf); @@ -294,8 +426,7 @@ static int target_restore_sigframe(CPUARMState *env, switch (magic) { case 0: if (size != 0) { - err = true; - goto exit; + goto err; } if (used_extra) { ctx = NULL; @@ -307,42 +438,46 @@ static int target_restore_sigframe(CPUARMState *env, case TARGET_FPSIMD_MAGIC: if (fpsimd || size != sizeof(struct target_fpsimd_context)) { - err = true; - goto exit; + goto err; } fpsimd = (struct target_fpsimd_context *)ctx; break; case TARGET_SVE_MAGIC: - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = (env->vfp.zcr_el[1] & 0xf) + 1; - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); - if (!sve && size == sve_size) { - sve = (struct target_sve_context *)ctx; - break; - } + if (sve || size < sizeof(struct target_sve_context)) { + goto err; } - err = true; - goto exit; + sve = (struct target_sve_context *)ctx; + sve_size = size; + break; + + case TARGET_ZA_MAGIC: + if (za || size < sizeof(struct target_za_context)) { + goto err; + } + za = (struct target_za_context *)ctx; + za_size = size; + break; case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { - err = true; - goto exit; + goto err; } __get_user(extra_datap, &((struct target_extra_context *)ctx)->datap); __get_user(extra_size, &((struct target_extra_context *)ctx)->size); extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0); + if (!extra) { + return 1; + } break; default: /* Unknown record -- we certainly didn't generate it. * Did we in fact get out of sync? */ - err = true; - goto exit; + goto err; } ctx = (void *)ctx + size; } @@ -351,17 +486,26 @@ static int target_restore_sigframe(CPUARMState *env, if (fpsimd) { target_restore_fpsimd_record(env, fpsimd); } else { - err = true; + goto err; } /* SVE data, if present, overwrites FPSIMD data. */ - if (sve) { - target_restore_sve_record(env, sve, vq); + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { + goto err; + } + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { + goto err; + } + if (env->svcr != svcr) { + env->svcr = svcr; + arm_rebuild_hflags(env); } - - exit: unlock_user(extra, extra_datap, 0); - return err; + return 0; + + err: + unlock_user(extra, extra_datap, 0); + return 1; } static abi_ulong get_sigframe(struct target_sigaction *ka, @@ -423,7 +567,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; + int sve_size = 0, za_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -433,11 +578,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = (env->vfp.zcr_el[1] & 0xf) + 1; - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || + cpu_isar_feature(aa64_sme, env_archcpu(env))) { + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); } + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + /* ZA state needs saving only if it is enabled. */ + if (FIELD_EX64(env->svcr, SVCR, ZA)) { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); + } else { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); + } + za_ofs = alloc_sigframe_space(za_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -460,9 +614,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, layout.total_size = MAX(layout.total_size, sizeof(struct target_rt_sigframe)); - /* Reserve space for the return code. On a real system this would - * be within the VDSO. So, despite the name this is not a "real" - * record within the frame. + /* + * Reserve space for the standard frame unwind pair: fp, lr. + * Despite the name this is not a "real" record within the frame. */ fr_ofs = layout.total_size; layout.total_size += sizeof(struct target_rt_frame_record); @@ -484,7 +638,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, target_setup_end_record((void *)frame + layout.extra_end_ofs); } if (sve_ofs) { - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); + target_setup_sve_record((void *)frame + sve_ofs, env, sve_size); + } + if (za_ofs) { + target_setup_za_record((void *)frame + za_ofs, env, za_size); } /* Set up the stack frame for unwinding. */ @@ -495,15 +652,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { return_addr = ka->sa_restorer; } else { - /* - * mov x8,#__NR_rt_sigreturn; svc #0 - * Since these are instructions they need to be put as little-endian - * regardless of target default or current CPU endianness. - */ - __put_user_e(0xd2801168, &fr->tramp[0], le); - __put_user_e(0xd4000001, &fr->tramp[1], le); - return_addr = frame_addr + fr_ofs - + offsetof(struct target_rt_frame_record, tramp); + return_addr = default_rt_sigreturn; } env->xregs[0] = usig; env->xregs[29] = frame_addr + fr_ofs; @@ -516,6 +665,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } + /* + * Invoke the signal handler with both SM and ZA disabled. + * When clearing SM, ResetSVEState, per SMSTOP. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + arm_reset_sve_state(env); + } + if (env->svcr) { + env->svcr = 0; + arm_rebuild_hflags(env); + } + if (info) { tswap_siginfo(&frame->info, info); env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); @@ -564,15 +725,32 @@ long do_rt_sigreturn(CPUARMState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_sigreturn(CPUARMState *env) { return do_rt_sigreturn(env); } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* + * mov x8,#__NR_rt_sigreturn; svc #0 + * Since these are instructions they need to be put as little-endian + * regardless of target default or current CPU endianness. + */ + __put_user_e(0xd2801168, &tramp[0], le); + __put_user_e(0xd4000001, &tramp[1], le); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h index 97a477bd3e..f90359faf2 100644 --- a/linux-user/aarch64/target_cpu.h +++ b/linux-user/aarch64/target_cpu.h @@ -34,10 +34,13 @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags) static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) { - /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is + /* + * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is * different from AArch32 Linux, which uses TPIDRRO. */ env->cp15.tpidr_el[0] = newtls; + /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */ + env->cp15.tpidr2_el0 = 0; } static inline abi_ulong get_sp_from_cpustate(CPUARMState *state) diff --git a/linux-user/aarch64/target_mman.h b/linux-user/aarch64/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/aarch64/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h new file mode 100644 index 0000000000..907c314146 --- /dev/null +++ b/linux-user/aarch64/target_prctl.h @@ -0,0 +1,220 @@ +/* + * AArch64 specific prctl functions for linux-user + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef AARCH64_TARGET_PRCTL_H +#define AARCH64_TARGET_PRCTL_H + +static abi_long do_prctl_sve_get_vl(CPUArchState *env) +{ + ARMCPU *cpu = env_archcpu(env); + if (cpu_isar_feature(aa64_sve, cpu)) { + /* PSTATE.SM is always unset on syscall entry. */ + return sve_vq(env) * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sve_get_vl do_prctl_sve_get_vl + +static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) +{ + /* + * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. + * Note the kernel definition of sve_vl_valid allows for VQ=512, + * i.e. VL=8192, even though the current architectural maximum is VQ=16. + */ + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) + && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { + uint32_t vq, old_vq; + + /* PSTATE.SM is always unset on syscall entry. */ + old_vq = sve_vq(env); + + /* + * Bound the value of arg2, so that we know that it fits into + * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to + * sort out the length supported by the cpu. + */ + vq = MAX(arg2 / 16, 1); + vq = MIN(vq, ARM_MAX_VQ); + env->vfp.zcr_el[1] = vq - 1; + arm_rebuild_hflags(env); + + vq = sve_vq(env); + if (vq < old_vq) { + aarch64_sve_narrow_vq(env, vq); + } + return vq * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sve_set_vl do_prctl_sve_set_vl + +static abi_long do_prctl_sme_get_vl(CPUArchState *env) +{ + ARMCPU *cpu = env_archcpu(env); + if (cpu_isar_feature(aa64_sme, cpu)) { + return sme_vq(env) * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sme_get_vl do_prctl_sme_get_vl + +static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) +{ + /* + * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. + * Note the kernel definition of sve_vl_valid allows for VQ=512, + * i.e. VL=8192, even though the architectural maximum is VQ=16. + */ + if (cpu_isar_feature(aa64_sme, env_archcpu(env)) + && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { + int vq, old_vq; + + old_vq = sme_vq(env); + + /* + * Bound the value of vq, so that we know that it fits into + * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared + * on syscall entry, we are not modifying the current SVE + * vector length. + */ + vq = MAX(arg2 / 16, 1); + vq = MIN(vq, 16); + env->vfp.smcr_el[1] = + FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); + + /* Delay rebuilding hflags until we know if ZA must change. */ + vq = sve_vqm1_for_el_sm(env, 0, true) + 1; + + if (vq != old_vq) { + /* + * PSTATE.ZA state is cleared on any change to SVL. + * We need not call arm_rebuild_hflags because PSTATE.SM was + * cleared on syscall entry, so this hasn't changed VL. + */ + env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); + arm_rebuild_hflags(env); + } + return vq * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sme_set_vl do_prctl_sme_set_vl + +static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) +{ + ARMCPU *cpu = env_archcpu(env); + + if (cpu_isar_feature(aa64_pauth, cpu)) { + int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY | + PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY); + int ret = 0; + Error *err = NULL; + + if (arg2 == 0) { + arg2 = all; + } else if (arg2 & ~all) { + return -TARGET_EINVAL; + } + if (arg2 & PR_PAC_APIAKEY) { + ret |= qemu_guest_getrandom(&env->keys.apia, + sizeof(ARMPACKey), &err); + } + if (arg2 & PR_PAC_APIBKEY) { + ret |= qemu_guest_getrandom(&env->keys.apib, + sizeof(ARMPACKey), &err); + } + if (arg2 & PR_PAC_APDAKEY) { + ret |= qemu_guest_getrandom(&env->keys.apda, + sizeof(ARMPACKey), &err); + } + if (arg2 & PR_PAC_APDBKEY) { + ret |= qemu_guest_getrandom(&env->keys.apdb, + sizeof(ARMPACKey), &err); + } + if (arg2 & PR_PAC_APGAKEY) { + ret |= qemu_guest_getrandom(&env->keys.apga, + sizeof(ARMPACKey), &err); + } + if (ret != 0) { + /* + * Some unknown failure in the crypto. The best + * we can do is log it and fail the syscall. + * The real syscall cannot fail this way. + */ + qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s", + error_get_pretty(err)); + error_free(err); + return -TARGET_EIO; + } + return 0; + } + return -TARGET_EINVAL; +} +#define do_prctl_reset_keys do_prctl_reset_keys + +static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2) +{ + abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE; + ARMCPU *cpu = env_archcpu(env); + + if (cpu_isar_feature(aa64_mte, cpu)) { + valid_mask |= PR_MTE_TCF_MASK; + valid_mask |= PR_MTE_TAG_MASK; + } + + if (arg2 & ~valid_mask) { + return -TARGET_EINVAL; + } + env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE; + + if (cpu_isar_feature(aa64_mte, cpu)) { + switch (arg2 & PR_MTE_TCF_MASK) { + case PR_MTE_TCF_NONE: + case PR_MTE_TCF_SYNC: + case PR_MTE_TCF_ASYNC: + break; + default: + return -EINVAL; + } + + /* + * Write PR_MTE_TCF to SCTLR_EL1[TCF0]. + * Note that the syscall values are consistent with hw. + */ + env->cp15.sctlr_el[1] = + deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT); + + /* + * Write PR_MTE_TAG to GCR_EL1[Exclude]. + * Note that the syscall uses an include mask, + * and hardware uses an exclude mask -- invert. + */ + env->cp15.gcr_el1 = + deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT); + arm_rebuild_hflags(env); + } + return 0; +} +#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl + +static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env) +{ + ARMCPU *cpu = env_archcpu(env); + abi_long ret = 0; + + if (env->tagged_addr_enable) { + ret |= PR_TAGGED_ADDR_ENABLE; + } + if (cpu_isar_feature(aa64_mte, cpu)) { + /* See do_prctl_set_tagged_addr_ctrl. */ + ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT; + ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1); + } + return ret; +} +#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl + +#endif /* AARCH64_TARGET_PRCTL_H */ diff --git a/linux-user/aarch64/target_resource.h b/linux-user/aarch64/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/aarch64/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h index 18013e1b23..40e399d990 100644 --- a/linux-user/aarch64/target_signal.h +++ b/linux-user/aarch64/target_signal.h @@ -1,28 +1,12 @@ #ifndef AARCH64_TARGET_SIGNAL_H #define AARCH64_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */ #define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */ #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* AARCH64_TARGET_SIGNAL_H */ diff --git a/linux-user/aarch64/target_structs.h b/linux-user/aarch64/target_structs.h index 7c748344ca..3a06f373c3 100644 --- a/linux-user/aarch64/target_structs.h +++ b/linux-user/aarch64/target_structs.h @@ -1,58 +1 @@ -/* - * ARM AArch64 specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef AARCH64_TARGET_STRUCTS_H -#define AARCH64_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h index 76f6c3391d..c055133725 100644 --- a/linux-user/aarch64/target_syscall.h +++ b/linux-user/aarch64/target_syscall.h @@ -8,39 +8,15 @@ struct target_pt_regs { uint64_t pstate; }; -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN #define UNAME_MACHINE "aarch64_be" #else #define UNAME_MACHINE "aarch64" #endif #define UNAME_MINIMUM_RELEASE "3.8.0" #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 -#define TARGET_PR_SVE_SET_VL 50 -#define TARGET_PR_SVE_GET_VL 51 - -#define TARGET_PR_PAC_RESET_KEYS 54 -# define TARGET_PR_PAC_APIAKEY (1 << 0) -# define TARGET_PR_PAC_APIBKEY (1 << 1) -# define TARGET_PR_PAC_APDAKEY (1 << 2) -# define TARGET_PR_PAC_APDBKEY (1 << 3) -# define TARGET_PR_PAC_APGAKEY (1 << 4) - -#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55 -#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56 -# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0) -/* MTE tag check fault modes */ -# define TARGET_PR_MTE_TCF_SHIFT 1 -# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT) -# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT) -# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT) -# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT) -/* MTE tag inclusion mask */ -# define TARGET_PR_MTE_TAG_SHIFT 3 -# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT) - #endif /* AARCH64_TARGET_SYSCALL_H */ diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c index 7ce2461a02..2ea039aa71 100644 --- a/linux-user/alpha/cpu_loop.c +++ b/linux-user/alpha/cpu_loop.c @@ -18,15 +18,15 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUAlphaState *env) { CPUState *cs = env_cpu(env); - int trapnr; - target_siginfo_t info; + int trapnr, si_code; abi_long sysret; while (1) { @@ -52,35 +52,12 @@ void cpu_loop(CPUAlphaState *env) fprintf(stderr, "External interrupt. Exit\n"); exit(EXIT_FAILURE); break; - case EXCP_MMFAULT: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID - ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); - info._sifields._sigfault._addr = env->trap_arg0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_UNALIGN: - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->trap_arg0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_OPCDEC: do_sigill: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPC; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); break; case EXCP_ARITH: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_FLTINV; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_FLTINV, env->pc); break; case EXCP_FEN: /* No-op. Linux simply re-enables the FPU. */ @@ -89,20 +66,10 @@ void cpu_loop(CPUAlphaState *env) switch (env->error_code) { case 0x80: /* BPT */ - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; + goto do_sigtrap_brkpt; case 0x81: /* BUGCHK */ - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; + goto do_sigtrap_unk; case 0x83: /* CALLSYS */ trapnr = env->ir[IR_V0]; @@ -111,11 +78,11 @@ void cpu_loop(CPUAlphaState *env) env->ir[IR_A2], env->ir[IR_A3], env->ir[IR_A4], env->ir[IR_A5], 0, 0); - if (sysret == -TARGET_ERESTARTSYS) { + if (sysret == -QEMU_ERESTARTSYS) { env->pc -= 4; break; } - if (sysret == -TARGET_QEMU_ESIGRETURN) { + if (sysret == -QEMU_ESIGRETURN) { break; } /* Syscall writes 0 to V0 to bypass error check, similar @@ -143,47 +110,43 @@ void cpu_loop(CPUAlphaState *env) abort(); case 0xAA: /* GENTRAP */ - info.si_signo = TARGET_SIGFPE; switch (env->ir[IR_A0]) { case TARGET_GEN_INTOVF: - info.si_code = TARGET_FPE_INTOVF; + si_code = TARGET_FPE_INTOVF; break; case TARGET_GEN_INTDIV: - info.si_code = TARGET_FPE_INTDIV; + si_code = TARGET_FPE_INTDIV; break; case TARGET_GEN_FLTOVF: - info.si_code = TARGET_FPE_FLTOVF; + si_code = TARGET_FPE_FLTOVF; break; case TARGET_GEN_FLTUND: - info.si_code = TARGET_FPE_FLTUND; + si_code = TARGET_FPE_FLTUND; break; case TARGET_GEN_FLTINV: - info.si_code = TARGET_FPE_FLTINV; + si_code = TARGET_FPE_FLTINV; break; case TARGET_GEN_FLTINE: - info.si_code = TARGET_FPE_FLTRES; + si_code = TARGET_FPE_FLTRES; break; case TARGET_GEN_ROPRAND: - info.si_code = 0; + si_code = TARGET_FPE_FLTUNK; break; default: - info.si_signo = TARGET_SIGTRAP; - info.si_code = 0; - break; + goto do_sigtrap_unk; } - info.si_errno = 0; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, si_code, env->pc); break; default: goto do_sigill; } break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + do_sigtrap_brkpt: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + break; + do_sigtrap_unk: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_UNK, env->pc); break; case EXCP_INTERRUPT: /* Just indicate that signals should be handled asap. */ diff --git a/linux-user/alpha/signal.c b/linux-user/alpha/signal.c index 1129ffeea1..4ec42994d4 100644 --- a/linux-user/alpha/signal.c +++ b/linux-user/alpha/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -54,13 +55,11 @@ struct target_ucontext { struct target_sigframe { struct target_sigcontext sc; - unsigned int retcode[3]; }; struct target_rt_sigframe { target_siginfo_t info; struct target_ucontext uc; - unsigned int retcode[3]; }; #define INSN_MOV_R30_R16 0x47fe0410 @@ -141,12 +140,7 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { r26 = ka->ka_restorer; } else { - __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); - __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, - &frame->retcode[1]); - __put_user(INSN_CALLSYS, &frame->retcode[2]); - /* imb() */ - r26 = frame_addr + offsetof(struct target_sigframe, retcode); + r26 = default_sigreturn; } unlock_user_struct(frame, frame_addr, 1); @@ -195,12 +189,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { r26 = ka->ka_restorer; } else { - __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); - __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, - &frame->retcode[1]); - __put_user(INSN_CALLSYS, &frame->retcode[2]); - /* imb(); */ - r26 = frame_addr + offsetof(struct target_rt_sigframe, retcode); + r26 = default_rt_sigreturn; } if (err) { @@ -236,11 +225,11 @@ long do_sigreturn(CPUAlphaState *env) restore_sigcontext(env, sc); unlock_user_struct(sc, sc_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUAlphaState *env) @@ -260,11 +249,29 @@ long do_rt_sigreturn(CPUAlphaState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(INSN_MOV_R30_R16, &tramp[0]); + __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]); + __put_user(INSN_CALLSYS, &tramp[2]); + + default_rt_sigreturn = sigtramp_page + 3 * 4; + __put_user(INSN_MOV_R30_R16, &tramp[3]); + __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]); + __put_user(INSN_CALLSYS, &tramp[5]); + + unlock_user(tramp, sigtramp_page, 6 * 4); } diff --git a/linux-user/alpha/target_mman.h b/linux-user/alpha/target_mman.h new file mode 100644 index 0000000000..cd6e3d70a6 --- /dev/null +++ b/linux-user/alpha/target_mman.h @@ -0,0 +1,8 @@ +#ifndef ALPHA_TARGET_MMAN_H +#define ALPHA_TARGET_MMAN_H + +#define TARGET_MADV_DONTNEED 6 + +#include "../generic/target_mman.h" + +#endif diff --git a/linux-user/alpha/target_prctl.h b/linux-user/alpha/target_prctl.h new file mode 100644 index 0000000000..5629ddbf39 --- /dev/null +++ b/linux-user/alpha/target_prctl.h @@ -0,0 +1 @@ +#include "../generic/target_prctl_unalign.h" diff --git a/linux-user/alpha/target_resource.h b/linux-user/alpha/target_resource.h new file mode 100644 index 0000000000..c9b082faee --- /dev/null +++ b/linux-user/alpha/target_resource.h @@ -0,0 +1,21 @@ +#ifndef ALPHA_TARGET_RESOURCE_H +#define ALPHA_TARGET_RESOURCE_H + +#include "../generic/target_resource.h" + +#undef TARGET_RLIM_INFINITY +#define TARGET_RLIM_INFINITY 0x7fffffffffffffffull + +#undef TARGET_RLIMIT_NOFILE +#define TARGET_RLIMIT_NOFILE 6 + +#undef TARGET_RLIMIT_AS +#define TARGET_RLIMIT_AS 7 + +#undef TARGET_RLIMIT_NPROC +#define TARGET_RLIMIT_NPROC 8 + +#undef TARGET_RLIMIT_MEMLOCK +#define TARGET_RLIMIT_MEMLOCK 9 + +#endif diff --git a/linux-user/alpha/target_signal.h b/linux-user/alpha/target_signal.h index 250642913e..bbb06e5463 100644 --- a/linux-user/alpha/target_signal.h +++ b/linux-user/alpha/target_signal.h @@ -62,7 +62,6 @@ typedef struct target_sigaltstack { #define TARGET_SA_SIGINFO 0x00000040 #define TARGET_MINSIGSTKSZ 4096 -#define TARGET_SIGSTKSZ 16384 /* From . */ #define TARGET_GEN_INTOVF -1 /* integer overflow */ @@ -93,6 +92,7 @@ typedef struct target_sigaltstack { #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_KA_RESTORER +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ diff --git a/linux-user/alpha/target_syscall.h b/linux-user/alpha/target_syscall.h index 03091bf0a8..fda3a49f29 100644 --- a/linux-user/alpha/target_syscall.h +++ b/linux-user/alpha/target_syscall.h @@ -63,7 +63,6 @@ struct target_pt_regs { #define TARGET_UAC_NOPRINT 1 #define TARGET_UAC_NOFIX 2 #define TARGET_UAC_SIGBUS 4 -#define TARGET_MINSIGSTKSZ 4096 #define TARGET_MCL_CURRENT 0x2000 #define TARGET_MCL_FUTURE 0x4000 #define TARGET_MCL_ONFAULT 0x8000 diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index 69632d15be..c0790f3246 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -18,11 +18,13 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "elf.h" #include "cpu_loop-common.h" +#include "signal-common.h" #include "semihosting/common-semi.h" +#include "target/arm/syndrome.h" #define get_user_code_u32(x, gaddr, env) \ ({ abi_long __r = get_user_u32((x), (gaddr)); \ @@ -72,10 +74,70 @@ put_user_u16(__x, (gaddr)); \ }) -/* Commpage handling -- there is no commpage for AArch64 */ +/* + * Similar to code in accel/tcg/user-exec.c, but outside the execution loop. + * Must be called with mmap_lock. + * We get the PC of the entry address - which is as good as anything, + * on a real kernel what you get depends on which mode it uses. + */ +static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size) +{ + int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID; + int page_flags; + + /* Enforce guest required alignment. */ + if (unlikely(addr & (size - 1))) { + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); + return NULL; + } + + page_flags = page_get_flags(addr); + if (unlikely((page_flags & need_flags) != need_flags)) { + force_sig_fault(TARGET_SIGSEGV, + page_flags & PAGE_VALID ? + TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr); + return NULL; + } + + return g2h(env_cpu(env), addr); +} /* - * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt + * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst + * Input: + * r0 = oldval + * r1 = newval + * r2 = pointer to target value + * + * Output: + * r0 = 0 if *ptr was changed, non-0 if no exchange happened + * C set if *ptr was changed, clear if no exchange happened + */ +static void arm_kernel_cmpxchg32_helper(CPUARMState *env) +{ + uint32_t oldval, newval, val, addr, cpsr, *host_addr; + + oldval = env->regs[0]; + newval = env->regs[1]; + addr = env->regs[2]; + + mmap_lock(); + host_addr = atomic_mmu_lookup(env, addr, 4); + if (!host_addr) { + mmap_unlock(); + return; + } + + val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval); + mmap_unlock(); + + cpsr = (val == oldval) * CPSR_C; + cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); + env->regs[0] = cpsr ? 0 : -1; +} + +/* + * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst * Input: * r0 = pointer to oldval * r1 = pointer to newval @@ -92,61 +154,54 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env) { uint64_t oldval, newval, val; uint32_t addr, cpsr; - target_siginfo_t info; + uint64_t *host_addr; - /* Based on the 32 bit code in do_kernel_trap */ + addr = env->regs[0]; + if (get_user_u64(oldval, addr)) { + goto segv; + } - /* XXX: This only works between threads, not between processes. - It's probably possible to implement this with native host - operations. However things like ldrex/strex are much harder so - there's not much point trying. */ - start_exclusive(); - cpsr = cpsr_read(env); + addr = env->regs[1]; + if (get_user_u64(newval, addr)) { + goto segv; + } + + mmap_lock(); addr = env->regs[2]; - - if (get_user_u64(oldval, env->regs[0])) { - env->exception.vaddress = env->regs[0]; - goto segv; - }; - - if (get_user_u64(newval, env->regs[1])) { - env->exception.vaddress = env->regs[1]; - goto segv; - }; - - if (get_user_u64(val, addr)) { - env->exception.vaddress = addr; - goto segv; + host_addr = atomic_mmu_lookup(env, addr, 8); + if (!host_addr) { + mmap_unlock(); + return; } +#ifdef CONFIG_ATOMIC64 + val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval); + cpsr = (val == oldval) * CPSR_C; +#else + /* + * This only works between threads, not between processes, but since + * the host has no 64-bit cmpxchg, it is the best that we can do. + */ + start_exclusive(); + val = *host_addr; if (val == oldval) { - val = newval; - - if (put_user_u64(val, addr)) { - env->exception.vaddress = addr; - goto segv; - }; - - env->regs[0] = 0; - cpsr |= CPSR_C; + *host_addr = newval; + cpsr = CPSR_C; } else { - env->regs[0] = -1; - cpsr &= ~CPSR_C; + cpsr = 0; } - cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); end_exclusive(); +#endif + mmap_unlock(); + + cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); + env->regs[0] = cpsr ? 0 : -1; return; -segv: - end_exclusive(); - /* We get the PC of the entry address - which is as good as anything, - on a real kernel what you get depends on which mode it uses. */ - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->exception.vaddress; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + segv: + force_sig_fault(TARGET_SIGSEGV, + page_get_flags(addr) & PAGE_VALID ? + TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr); } /* Handle a jump to the kernel code page. */ @@ -154,36 +209,13 @@ static int do_kernel_trap(CPUARMState *env) { uint32_t addr; - uint32_t cpsr; - uint32_t val; switch (env->regs[15]) { case 0xffff0fa0: /* __kernel_memory_barrier */ - /* ??? No-op. Will need to do better for SMP. */ + smp_mb(); break; case 0xffff0fc0: /* __kernel_cmpxchg */ - /* XXX: This only works between threads, not between processes. - It's probably possible to implement this with native host - operations. However things like ldrex/strex are much harder so - there's not much point trying. */ - start_exclusive(); - cpsr = cpsr_read(env); - addr = env->regs[2]; - /* FIXME: This should SEGV if the access fails. */ - if (get_user_u32(val, addr)) - val = ~env->regs[0]; - if (val == env->regs[0]) { - val = env->regs[1]; - /* FIXME: Check for segfaults. */ - put_user_u32(val, addr); - env->regs[0] = 0; - cpsr |= CPSR_C; - } else { - env->regs[0] = -1; - cpsr &= ~CPSR_C; - } - cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); - end_exclusive(); + arm_kernel_cmpxchg32_helper(env); break; case 0xffff0fe0: /* __kernel_get_tls */ env->regs[0] = cpu_get_tls(env); @@ -198,7 +230,7 @@ do_kernel_trap(CPUARMState *env) /* Jump back to the caller. */ addr = env->regs[14]; if (addr & 1) { - env->thumb = 1; + env->thumb = true; addr &= ~1; } env->regs[15] = addr; @@ -266,16 +298,13 @@ static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) ts->fpa.fpsr |= raise & ~enabled; if (raise & enabled) { - target_siginfo_t info = { }; - /* * The kernel's nwfpe emulator does not pass a real si_code. - * It merely uses send_sig(SIGFPE, current, 1). + * It merely uses send_sig(SIGFPE, current, 1), which results in + * __send_signal() filling out SI_KERNEL with pid and uid 0 (under + * the "SEND_SIG_PRIV" case). That's what our force_sig() does. */ - info.si_signo = TARGET_SIGFPE; - info.si_code = TARGET_SI_KERNEL; - - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig(TARGET_SIGFPE); } else { env->regs[15] += 4; } @@ -285,10 +314,8 @@ static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); - int trapnr; + int trapnr, si_signo, si_code; unsigned int n, insn; - target_siginfo_t info; - uint32_t addr; abi_ulong ret; for(;;) { @@ -323,11 +350,8 @@ void cpu_loop(CPUARMState *env) break; } - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPN; - info._sifields._sigfault._addr = env->regs[15]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, + env->regs[15]); } break; case EXCP_SWI: @@ -395,18 +419,14 @@ void cpu_loop(CPUARMState *env) * Otherwise SIGILL. This includes any SWI with * immediate not originally 0x9fxxxx, because * of the earlier XOR. + * Like the real kernel, we report the addr of the + * SWI in the siginfo si_addr but leave the PC + * pointing at the insn after the SWI. */ - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLTRP; - info._sifields._sigfault._addr = env->regs[15]; - if (env->thumb) { - info._sifields._sigfault._addr -= 2; - } else { - info._sifields._sigfault._addr -= 4; - } - queue_signal(env, info.si_signo, - QEMU_SI_FAULT, &info); + abi_ulong faultaddr = env->regs[15]; + faultaddr -= env->thumb ? 2 : 4; + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP, + faultaddr); } break; } @@ -420,16 +440,16 @@ void cpu_loop(CPUARMState *env) env->regs[4], env->regs[5], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->regs[15] -= env->thumb ? 2 : 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[0] = ret; } } } break; case EXCP_SEMIHOST: - env->regs[0] = do_common_semihosting(cs); + do_common_semihosting(cs); env->regs[15] += env->thumb ? 2 : 4; break; case EXCP_INTERRUPT: @@ -437,23 +457,35 @@ void cpu_loop(CPUARMState *env) break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: - addr = env->exception.vaddress; - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = addr; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + /* For user-only we don't set TTBCR_EAE, so look at the FSR. */ + switch (env->exception.fsr & 0x1f) { + case 0x1: /* Alignment */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + case 0x3: /* Access flag fault, level 1 */ + case 0x6: /* Access flag fault, level 2 */ + case 0x9: /* Domain fault, level 1 */ + case 0xb: /* Domain fault, level 2 */ + case 0xd: /* Permission fault, level 1 */ + case 0xf: /* Permission fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_ACCERR; + break; + case 0x5: /* Translation fault, level 1 */ + case 0x7: /* Translation fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MAPERR; + break; + default: + g_assert_not_reached(); } + force_sig_fault(si_signo, si_code, env->exception.vaddress); break; case EXCP_DEBUG: case EXCP_BKPT: excp_debug: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]); break; case EXCP_KERNEL_TRAP: if (do_kernel_trap(env)) @@ -486,7 +518,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) for(i = 0; i < 16; i++) { env->regs[i] = regs->uregs[i]; } -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN /* Enable BE8. */ if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 && (info->elf_flags & EF_ARM_BE8)) { diff --git a/linux-user/arm/nwfpe/double_cpdo.c b/linux-user/arm/nwfpe/double_cpdo.c index 1cef380852..d45ece2e2f 100644 --- a/linux-user/arm/nwfpe/double_cpdo.c +++ b/linux-user/arm/nwfpe/double_cpdo.c @@ -150,7 +150,7 @@ unsigned int DoubleCPDO(const unsigned int opcode) case MNF_CODE: { unsigned int *p = (unsigned int*)&rFm; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN p[0] ^= 0x80000000; #else p[1] ^= 0x80000000; @@ -162,7 +162,7 @@ unsigned int DoubleCPDO(const unsigned int opcode) case ABS_CODE: { unsigned int *p = (unsigned int*)&rFm; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN p[0] &= 0x7fffffff; #else p[1] &= 0x7fffffff; diff --git a/linux-user/arm/nwfpe/fpa11_cpdt.c b/linux-user/arm/nwfpe/fpa11_cpdt.c index c32b0c2faa..fee525937c 100644 --- a/linux-user/arm/nwfpe/fpa11_cpdt.c +++ b/linux-user/arm/nwfpe/fpa11_cpdt.c @@ -44,7 +44,7 @@ void loadDouble(const unsigned int Fn, target_ulong addr) unsigned int *p; p = (unsigned int*)&fpa11->fpreg[Fn].fDouble; fpa11->fType[Fn] = typeDouble; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN /* FIXME - handle failure of get_user() */ get_user_u32(p[0], addr); /* sign & exponent */ get_user_u32(p[1], addr + 4); @@ -147,7 +147,7 @@ void storeDouble(const unsigned int Fn, target_ulong addr) default: val = fpa11->fpreg[Fn].fDouble; } /* FIXME - handle put_user() failures */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN put_user_u32(p[0], addr); /* msw */ put_user_u32(p[1], addr + 4); /* lsw */ #else diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c index 32b68ee302..cf99fd7b8a 100644 --- a/linux-user/arm/signal.c +++ b/linux-user/arm/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -45,15 +46,7 @@ struct target_sigcontext { abi_ulong fault_address; }; -struct target_ucontext_v1 { - abi_ulong tuc_flags; - abi_ulong tuc_link; - target_stack_t tuc_stack; - struct target_sigcontext tuc_mcontext; - target_sigset_t tuc_sigmask; /* mask last for extensibility */ -}; - -struct target_ucontext_v2 { +struct target_ucontext { abi_ulong tuc_flags; abi_ulong tuc_link; target_stack_t tuc_stack; @@ -97,68 +90,30 @@ struct target_iwmmxt_sigframe { #define TARGET_VFP_MAGIC 0x56465001 #define TARGET_IWMMXT_MAGIC 0x12ef842a -struct sigframe_v1 +struct sigframe { - struct target_sigcontext sc; - abi_ulong extramask[TARGET_NSIG_WORDS-1]; + struct target_ucontext uc; abi_ulong retcode[4]; }; -struct sigframe_v2 -{ - struct target_ucontext_v2 uc; - abi_ulong retcode[4]; -}; - -struct rt_sigframe_v1 -{ - abi_ulong pinfo; - abi_ulong puc; - struct target_siginfo info; - struct target_ucontext_v1 uc; - abi_ulong retcode[4]; -}; - -struct rt_sigframe_v2 +struct rt_sigframe { struct target_siginfo info; - struct target_ucontext_v2 uc; - abi_ulong retcode[4]; + struct sigframe sig; }; +static abi_ptr sigreturn_fdpic_tramp; + /* - * For ARM syscalls, we encode the syscall number into the instruction. + * Up to 3 words of 'retcode' in the sigframe are code, + * with retcode[3] being used by fdpic for the function descriptor. + * This code is not actually executed, but is retained for ABI compat. + * + * We will create a table of 8 retcode variants in the sigtramp page. + * Let each table entry use 3 words. */ -#define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) -#define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) - -/* - * For Thumb syscalls, we pass the syscall number via r7. We therefore - * need two 16-bit instructions. - */ -#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) -#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) - -static const abi_ulong retcodes[4] = { - SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, - SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN -}; - -/* - * Stub needed to make sure the FD register (r9) contains the right - * value. - */ -static const unsigned long sigreturn_fdpic_codes[3] = { - 0xe59fc004, /* ldr r12, [pc, #4] to read function descriptor */ - 0xe59c9004, /* ldr r9, [r12, #4] to setup GOT */ - 0xe59cf000 /* ldr pc, [r12] to jump into restorer */ -}; - -static const unsigned long sigreturn_fdpic_thumb_codes[3] = { - 0xc008f8df, /* ldr r12, [pc, #8] to read function descriptor */ - 0x9004f8dc, /* ldr r9, [r12, #4] to setup GOT */ - 0xf000f8dc /* ldr pc, [r12] to jump into restorer */ -}; +#define RETCODE_WORDS 3 +#define RETCODE_BYTES (RETCODE_WORDS * 4) static inline int valid_user_regs(CPUARMState *regs) { @@ -206,15 +161,15 @@ get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) } static int -setup_return(CPUARMState *env, struct target_sigaction *ka, - abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) +setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, + struct sigframe *frame, abi_ulong sp_addr) { abi_ulong handler = 0; abi_ulong handler_fdpic_GOT = 0; abi_ulong retcode; - - int thumb; + int thumb, retcode_idx; int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); + bool copy_retcode; if (is_fdpic) { /* In FDPIC mode, ka->_sa_handler points to a function @@ -231,6 +186,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, } thumb = handler & 1; + retcode_idx = thumb + (ka->sa_flags & TARGET_SA_SIGINFO ? 2 : 0); uint32_t cpsr = cpsr_read(env); @@ -248,53 +204,37 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { if (is_fdpic) { - /* For FDPIC we ensure that the restorer is called with a - * correct r9 value. For that we need to write code on - * the stack that sets r9 and jumps back to restorer - * value. - */ - if (thumb) { - __put_user(sigreturn_fdpic_thumb_codes[0], rc); - __put_user(sigreturn_fdpic_thumb_codes[1], rc + 1); - __put_user(sigreturn_fdpic_thumb_codes[2], rc + 2); - __put_user((abi_ulong)ka->sa_restorer, rc + 3); - } else { - __put_user(sigreturn_fdpic_codes[0], rc); - __put_user(sigreturn_fdpic_codes[1], rc + 1); - __put_user(sigreturn_fdpic_codes[2], rc + 2); - __put_user((abi_ulong)ka->sa_restorer, rc + 3); - } - - retcode = rc_addr + thumb; + __put_user((abi_ulong)ka->sa_restorer, &frame->retcode[3]); + retcode = (sigreturn_fdpic_tramp + + retcode_idx * RETCODE_BYTES + thumb); + copy_retcode = true; } else { retcode = ka->sa_restorer; + copy_retcode = false; } } else { - unsigned int idx = thumb; + retcode = default_sigreturn + retcode_idx * RETCODE_BYTES + thumb; + copy_retcode = true; + } - if (ka->sa_flags & TARGET_SA_SIGINFO) { - idx += 2; - } - - __put_user(retcodes[idx], rc); - - retcode = rc_addr + thumb; + /* Copy the code to the stack slot for ABI compatibility. */ + if (copy_retcode) { + memcpy(frame->retcode, g2h_untagged(retcode & ~1), RETCODE_BYTES); } env->regs[0] = usig; if (is_fdpic) { env->regs[9] = handler_fdpic_GOT; } - env->regs[13] = frame_addr; + env->regs[13] = sp_addr; env->regs[14] = retcode; env->regs[15] = handler & (thumb ? ~1 : ~3); cpsr_write(env, cpsr, CPSR_IT | CPSR_T | CPSR_E, CPSRWriteByInstr); - arm_rebuild_hflags(env); return 0; } -static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) +static abi_ulong *setup_sigframe_vfp(abi_ulong *regspace, CPUARMState *env) { int i; struct target_vfp_sigframe *vfpframe; @@ -311,8 +251,7 @@ static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) return (abi_ulong*)(vfpframe+1); } -static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, - CPUARMState *env) +static abi_ulong *setup_sigframe_iwmmxt(abi_ulong *regspace, CPUARMState *env) { int i; struct target_iwmmxt_sigframe *iwmmxtframe; @@ -331,15 +270,15 @@ static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, return (abi_ulong*)(iwmmxtframe+1); } -static void setup_sigframe_v2(struct target_ucontext_v2 *uc, - target_sigset_t *set, CPUARMState *env) +static void setup_sigframe(struct target_ucontext *uc, + target_sigset_t *set, CPUARMState *env) { struct target_sigaltstack stack; int i; abi_ulong *regspace; /* Clear all the bits of the ucontext we don't use. */ - memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); + memset(uc, 0, offsetof(struct target_ucontext, tuc_mcontext)); memset(&stack, 0, sizeof(stack)); target_save_altstack(&stack, env); @@ -349,10 +288,10 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc, /* Save coprocessor signal frame. */ regspace = uc->tuc_regspace; if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { - regspace = setup_sigframe_v2_vfp(regspace, env); + regspace = setup_sigframe_vfp(regspace, env); } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = setup_sigframe_v2_iwmmxt(regspace, env); + regspace = setup_sigframe_iwmmxt(regspace, env); } /* Write terminating magic word */ @@ -363,148 +302,23 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc, } } -/* compare linux/arch/arm/kernel/signal.c:setup_frame() */ -static void setup_frame_v1(int usig, struct target_sigaction *ka, - target_sigset_t *set, CPUARMState *regs) -{ - struct sigframe_v1 *frame; - abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); - int i; - - trace_user_setup_frame(regs, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - setup_sigcontext(&frame->sc, regs, set->sig[0]); - - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __put_user(set->sig[i], &frame->extramask[i - 1]); - } - - if (setup_return(regs, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct sigframe_v1, retcode))) { - goto sigsegv; - } - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - -static void setup_frame_v2(int usig, struct target_sigaction *ka, - target_sigset_t *set, CPUARMState *regs) -{ - struct sigframe_v2 *frame; - abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); - - trace_user_setup_frame(regs, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - setup_sigframe_v2(&frame->uc, set, regs); - - if (setup_return(regs, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct sigframe_v2, retcode))) { - goto sigsegv; - } - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - void setup_frame(int usig, struct target_sigaction *ka, target_sigset_t *set, CPUARMState *regs) { - if (get_osversion() >= 0x020612) { - setup_frame_v2(usig, ka, set, regs); - } else { - setup_frame_v1(usig, ka, set, regs); - } -} + struct sigframe *frame; + abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); -/* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ -static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, - target_siginfo_t *info, - target_sigset_t *set, CPUARMState *env) -{ - struct rt_sigframe_v1 *frame; - abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); - struct target_sigaltstack stack; - int i; - abi_ulong info_addr, uc_addr; - - trace_user_setup_rt_frame(env, frame_addr); + trace_user_setup_frame(regs, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto sigsegv; } - info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); - __put_user(info_addr, &frame->pinfo); - uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); - __put_user(uc_addr, &frame->puc); - tswap_siginfo(&frame->info, info); + setup_sigframe(&frame->uc, set, regs); - /* Clear all the bits of the ucontext we don't use. */ - memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); - - memset(&stack, 0, sizeof(stack)); - target_save_altstack(&stack, env); - memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); - - setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); - for(i = 0; i < TARGET_NSIG_WORDS; i++) { - __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); - } - - if (setup_return(env, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct rt_sigframe_v1, retcode))) { + if (setup_return(regs, ka, usig, frame, frame_addr)) { goto sigsegv; } - env->regs[1] = info_addr; - env->regs[2] = uc_addr; - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - -static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, - target_siginfo_t *info, - target_sigset_t *set, CPUARMState *env) -{ - struct rt_sigframe_v2 *frame; - abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); - abi_ulong info_addr, uc_addr; - - trace_user_setup_rt_frame(env, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); - uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); - tswap_siginfo(&frame->info, info); - - setup_sigframe_v2(&frame->uc, set, env); - - if (setup_return(env, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct rt_sigframe_v2, retcode))) { - goto sigsegv; - } - - env->regs[1] = info_addr; - env->regs[2] = uc_addr; - unlock_user_struct(frame, frame_addr, 1); return; sigsegv: @@ -516,11 +330,33 @@ void setup_rt_frame(int usig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUARMState *env) { - if (get_osversion() >= 0x020612) { - setup_rt_frame_v2(usig, ka, info, set, env); - } else { - setup_rt_frame_v1(usig, ka, info, set, env); + struct rt_sigframe *frame; + abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); + abi_ulong info_addr, uc_addr; + + trace_user_setup_rt_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto sigsegv; } + + info_addr = frame_addr + offsetof(struct rt_sigframe, info); + uc_addr = frame_addr + offsetof(struct rt_sigframe, sig.uc); + tswap_siginfo(&frame->info, info); + + setup_sigframe(&frame->sig.uc, set, env); + + if (setup_return(env, ka, usig, &frame->sig, frame_addr)) { + goto sigsegv; + } + + env->regs[1] = info_addr; + env->regs[2] = uc_addr; + + unlock_user_struct(frame, frame_addr, 1); + return; +sigsegv: + unlock_user_struct(frame, frame_addr, 1); + force_sigsegv(usig); } static int @@ -547,62 +383,13 @@ restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) __get_user(env->regs[15], &sc->arm_pc); __get_user(cpsr, &sc->arm_cpsr); cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); - arm_rebuild_hflags(env); err |= !valid_user_regs(env); return err; } -static long do_sigreturn_v1(CPUARMState *env) -{ - abi_ulong frame_addr; - struct sigframe_v1 *frame = NULL; - target_sigset_t set; - sigset_t host_set; - int i; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - __get_user(set.sig[0], &frame->sc.oldmask); - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __get_user(set.sig[i], &frame->extramask[i - 1]); - } - - target_to_host_sigset_internal(&host_set, &set); - set_sigmask(&host_set); - - if (restore_sigcontext(env, &frame->sc)) { - goto badframe; - } - -#if 0 - /* Send SIGTRAP if we're single-stepping */ - if (ptrace_cancel_bpt(current)) - send_sig(SIGTRAP, current, 1); -#endif - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) +static abi_ulong *restore_sigframe_vfp(CPUARMState *env, abi_ulong *regspace) { int i; abi_ulong magic, sz; @@ -632,8 +419,8 @@ static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) return (abi_ulong*)(vfpframe + 1); } -static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, - abi_ulong *regspace) +static abi_ulong *restore_sigframe_iwmmxt(CPUARMState *env, + abi_ulong *regspace) { int i; abi_ulong magic, sz; @@ -657,9 +444,9 @@ static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, return (abi_ulong*)(iwmmxtframe + 1); } -static int do_sigframe_return_v2(CPUARMState *env, - target_ulong context_addr, - struct target_ucontext_v2 *uc) +static int do_sigframe_return(CPUARMState *env, + target_ulong context_addr, + struct target_ucontext *uc) { sigset_t host_set; abi_ulong *regspace; @@ -667,19 +454,20 @@ static int do_sigframe_return_v2(CPUARMState *env, target_to_host_sigset(&host_set, &uc->tuc_sigmask); set_sigmask(&host_set); - if (restore_sigcontext(env, &uc->tuc_mcontext)) + if (restore_sigcontext(env, &uc->tuc_mcontext)) { return 1; + } /* Restore coprocessor signal frame */ regspace = uc->tuc_regspace; if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { - regspace = restore_sigframe_v2_vfp(env, regspace); + regspace = restore_sigframe_vfp(env, regspace); if (!regspace) { return 1; } } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = restore_sigframe_v2_iwmmxt(env, regspace); + regspace = restore_sigframe_iwmmxt(env, regspace); if (!regspace) { return 1; } @@ -696,10 +484,10 @@ static int do_sigframe_return_v2(CPUARMState *env, return 0; } -static long do_sigreturn_v2(CPUARMState *env) +long do_sigreturn(CPUARMState *env) { abi_ulong frame_addr; - struct sigframe_v2 *frame = NULL; + struct sigframe *frame = NULL; /* * Since we stacked the signal on a 64-bit boundary, @@ -716,116 +504,127 @@ static long do_sigreturn_v2(CPUARMState *env) goto badframe; } - if (do_sigframe_return_v2(env, - frame_addr - + offsetof(struct sigframe_v2, uc), - &frame->uc)) { + if (do_sigframe_return(env, + frame_addr + offsetof(struct sigframe, uc), + &frame->uc)) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -long do_sigreturn(CPUARMState *env) -{ - if (get_osversion() >= 0x020612) { - return do_sigreturn_v2(env); - } else { - return do_sigreturn_v1(env); - } -} - -static long do_rt_sigreturn_v1(CPUARMState *env) -{ - abi_ulong frame_addr; - struct rt_sigframe_v1 *frame = NULL; - sigset_t host_set; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_rt_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); - set_sigmask(&host_set); - - if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { - goto badframe; - } - - target_restore_altstack(&frame->uc.tuc_stack, env); - -#if 0 - /* Send SIGTRAP if we're single-stepping */ - if (ptrace_cancel_bpt(current)) - send_sig(SIGTRAP, current, 1); -#endif - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - unlock_user_struct(frame, frame_addr, 0); - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -static long do_rt_sigreturn_v2(CPUARMState *env) -{ - abi_ulong frame_addr; - struct rt_sigframe_v2 *frame = NULL; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_rt_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - if (do_sigframe_return_v2(env, - frame_addr - + offsetof(struct rt_sigframe_v2, uc), - &frame->uc)) { - goto badframe; - } - - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - unlock_user_struct(frame, frame_addr, 0); - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUARMState *env) { - if (get_osversion() >= 0x020612) { - return do_rt_sigreturn_v2(env); - } else { - return do_rt_sigreturn_v1(env); + abi_ulong frame_addr; + struct rt_sigframe *frame = NULL; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + frame_addr = env->regs[13]; + trace_user_do_rt_sigreturn(env, frame_addr); + if (frame_addr & 7) { + goto badframe; } + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + + if (do_sigframe_return(env, + frame_addr + offsetof(struct rt_sigframe, sig.uc), + &frame->sig.uc)) { + goto badframe; + } + + unlock_user_struct(frame, frame_addr, 0); + return -QEMU_ESIGRETURN; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return -QEMU_ESIGRETURN; +} + +/* + * EABI syscalls pass the number via r7. + * Note that the kernel still adds the OABI syscall number to the trap, + * presumably for backward ABI compatibility with unwinders. + */ +#define ARM_MOV_R7_IMM(X) (0xe3a07000 | (X)) +#define ARM_SWI_SYS(X) (0xef000000 | (X) | ARM_SYSCALL_BASE) + +#define THUMB_MOVS_R7_IMM(X) (0x2700 | (X)) +#define THUMB_SWI_SYS 0xdf00 + +static void write_arm_sigreturn(uint32_t *rc, int syscall) +{ + __put_user(ARM_MOV_R7_IMM(syscall), rc); + __put_user(ARM_SWI_SYS(syscall), rc + 1); + /* Wrote 8 of 12 bytes */ +} + +static void write_thm_sigreturn(uint32_t *rc, int syscall) +{ + __put_user(THUMB_SWI_SYS << 16 | THUMB_MOVS_R7_IMM(syscall), rc); + /* Wrote 4 of 12 bytes */ +} + +/* + * Stub needed to make sure the FD register (r9) contains the right value. + * Use the same instruction sequence as the kernel. + */ +static void write_arm_fdpic_sigreturn(uint32_t *rc, int ofs) +{ + assert(ofs <= 0xfff); + __put_user(0xe59d3000 | ofs, rc + 0); /* ldr r3, [sp, #ofs] */ + __put_user(0xe8930908, rc + 1); /* ldm r3, { r3, r9 } */ + __put_user(0xe12fff13, rc + 2); /* bx r3 */ + /* Wrote 12 of 12 bytes */ +} + +static void write_thm_fdpic_sigreturn(void *vrc, int ofs) +{ + uint16_t *rc = vrc; + + assert((ofs & ~0x3fc) == 0); + __put_user(0x9b00 | (ofs >> 2), rc + 0); /* ldr r3, [sp, #ofs] */ + __put_user(0xcb0c, rc + 1); /* ldm r3, { r2, r3 } */ + __put_user(0x4699, rc + 2); /* mov r9, r3 */ + __put_user(0x4710, rc + 3); /* bx r2 */ + /* Wrote 8 of 12 bytes */ +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t total_size = 8 * RETCODE_BYTES; + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, total_size, 0); + + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + write_arm_sigreturn(&tramp[0 * RETCODE_WORDS], TARGET_NR_sigreturn); + write_thm_sigreturn(&tramp[1 * RETCODE_WORDS], TARGET_NR_sigreturn); + write_arm_sigreturn(&tramp[2 * RETCODE_WORDS], TARGET_NR_rt_sigreturn); + write_thm_sigreturn(&tramp[3 * RETCODE_WORDS], TARGET_NR_rt_sigreturn); + + sigreturn_fdpic_tramp = sigtramp_page + 4 * RETCODE_BYTES; + write_arm_fdpic_sigreturn(tramp + 4 * RETCODE_WORDS, + offsetof(struct sigframe, retcode[3])); + write_thm_fdpic_sigreturn(tramp + 5 * RETCODE_WORDS, + offsetof(struct sigframe, retcode[3])); + write_arm_fdpic_sigreturn(tramp + 6 * RETCODE_WORDS, + offsetof(struct rt_sigframe, sig.retcode[3])); + write_thm_fdpic_sigreturn(tramp + 7 * RETCODE_WORDS, + offsetof(struct rt_sigframe, sig.retcode[3])); + + unlock_user(tramp, sigtramp_page, total_size); } diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h index 709d19bc9e..89ba274cfc 100644 --- a/linux-user/arm/target_cpu.h +++ b/linux-user/arm/target_cpu.h @@ -34,9 +34,9 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs) } else { /* * We need to be able to map the commpage. - * See validate_guest_space in linux-user/elfload.c. + * See init_guest_commpage in linux-user/elfload.c. */ - return 0xffff0000ul; + return 0xfffffffful; } } #define MAX_RESERVED_VA arm_max_reserved_va diff --git a/linux-user/arm/target_mman.h b/linux-user/arm/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/arm/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/arm/target_prctl.h b/linux-user/arm/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/arm/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/arm/target_resource.h b/linux-user/arm/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/arm/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h index 0998dd6dfa..0e6351d9f7 100644 --- a/linux-user/arm/target_signal.h +++ b/linux-user/arm/target_signal.h @@ -1,25 +1,9 @@ #ifndef ARM_TARGET_SIGNAL_H #define ARM_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* ARM_TARGET_SIGNAL_H */ diff --git a/linux-user/arm/target_structs.h b/linux-user/arm/target_structs.h index 339b070bf1..3a06f373c3 100644 --- a/linux-user/arm/target_structs.h +++ b/linux-user/arm/target_structs.h @@ -1,59 +1 @@ -/* - * ARM specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef ARM_TARGET_STRUCTS_H -#define ARM_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ - abi_ulong __unused1; - abi_ulong shm_dtime; /* time of last shmdt() */ - abi_ulong __unused2; - abi_ulong shm_ctime; /* time of last change by shmctl() */ - abi_ulong __unused3; - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -struct target_oabi_flock64 { - abi_short l_type; - abi_short l_whence; - abi_llong l_start; - abi_llong l_len; - abi_int l_pid; -} QEMU_PACKED; -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/arm/target_syscall.h b/linux-user/arm/target_syscall.h index e870ed7a54..412ad434cf 100644 --- a/linux-user/arm/target_syscall.h +++ b/linux-user/arm/target_syscall.h @@ -18,7 +18,7 @@ struct target_pt_regs { #define ARM_NR_set_tls (ARM_NR_BASE + 5) #define ARM_NR_get_tls (ARM_NR_BASE + 6) -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN #define UNAME_MACHINE "armv5teb" #else #define UNAME_MACHINE "armv5tel" @@ -27,7 +27,6 @@ struct target_pt_regs { #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h index 8828af28a4..e644d2ef90 100644 --- a/linux-user/cpu_loop-common.h +++ b/linux-user/cpu_loop-common.h @@ -21,17 +21,11 @@ #define CPU_LOOP_COMMON_H #include "exec/log.h" +#include "special-errno.h" -#define EXCP_DUMP(env, fmt, ...) \ -do { \ - CPUState *cs = env_cpu(env); \ - fprintf(stderr, fmt , ## __VA_ARGS__); \ - cpu_dump_state(cs, stderr, 0); \ - if (qemu_log_separate()) { \ - qemu_log(fmt, ## __VA_ARGS__); \ - log_cpu_state(cs, 0); \ - } \ -} while (0) +void target_exception_dump(CPUArchState *env, const char *fmt, int code); +#define EXCP_DUMP(env, fmt, code) \ + target_exception_dump(env, fmt, code) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs); #endif diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c index 334edddd1e..01e6ff16fc 100644 --- a/linux-user/cris/cpu_loop.c +++ b/linux-user/cris/cpu_loop.c @@ -18,16 +18,16 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUCRISState *env) { CPUState *cs = env_cpu(env); int trapnr, ret; - target_siginfo_t info; - + while (1) { cpu_exec_start(cs); trapnr = cpu_exec(cs); @@ -35,19 +35,9 @@ void cpu_loop(CPUCRISState *env) process_queued_cpu_work(cs); switch (trapnr) { - case 0xaa: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->pregs[PR_EDA]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; case EXCP_INTERRUPT: - /* just indicate that signals should be handled asap */ - break; + /* just indicate that signals should be handled asap */ + break; case EXCP_BREAK: ret = do_syscall(env, env->regs[9], @@ -58,17 +48,14 @@ void cpu_loop(CPUCRISState *env) env->pregs[7], env->pregs[11], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 2; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[10] = ret; } break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); diff --git a/linux-user/cris/signal.c b/linux-user/cris/signal.c index 1e02194377..4f532b2903 100644 --- a/linux-user/cris/signal.c +++ b/linux-user/cris/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -96,6 +97,14 @@ static abi_ulong get_sigframe(CPUCRISState *env, int framesize) return sp - framesize; } +static void setup_sigreturn(uint16_t *retcode) +{ + /* This is movu.w __NR_sigreturn, r9; break 13; */ + __put_user(0x9c5f, retcode + 0); + __put_user(TARGET_NR_sigreturn, retcode + 1); + __put_user(0xe93d, retcode + 2); +} + void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUCRISState *env) { @@ -111,14 +120,8 @@ void setup_frame(int sig, struct target_sigaction *ka, /* * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't * use this trampoline anymore but it sets it up for GDB. - * In QEMU, using the trampoline simplifies things a bit so we use it. - * - * This is movu.w __NR_sigreturn, r9; break 13; */ - __put_user(0x9c5f, frame->retcode+0); - __put_user(TARGET_NR_sigreturn, - frame->retcode + 1); - __put_user(0xe93d, frame->retcode + 2); + setup_sigreturn(frame->retcode); /* Save the mask. */ __put_user(set->sig[0], &frame->sc.oldmask); @@ -134,7 +137,7 @@ void setup_frame(int sig, struct target_sigaction *ka, env->regs[10] = sig; env->pc = (unsigned long) ka->_sa_handler; /* Link SRP so the guest returns through the trampoline. */ - env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); + env->pregs[PR_SRP] = default_sigreturn; unlock_user_struct(frame, frame_addr, 1); return; @@ -174,10 +177,10 @@ long do_sigreturn(CPUCRISState *env) restore_sigcontext(&frame->sc, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUCRISState *env) @@ -186,3 +189,14 @@ long do_rt_sigreturn(CPUCRISState *env) qemu_log_mask(LOG_UNIMP, "do_rt_sigreturn: not implemented\n"); return -TARGET_ENOSYS; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + setup_sigreturn(tramp); + + unlock_user(tramp, sigtramp_page, 6); +} diff --git a/linux-user/cris/target_mman.h b/linux-user/cris/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/cris/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/cris/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/cris/target_resource.h b/linux-user/cris/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/cris/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h index 495a142896..ab0653fcdc 100644 --- a/linux-user/cris/target_signal.h +++ b/linux-user/cris/target_signal.h @@ -1,25 +1,9 @@ #ifndef CRIS_TARGET_SIGNAL_H #define CRIS_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* CRIS_TARGET_SIGNAL_H */ diff --git a/linux-user/cris/target_structs.h b/linux-user/cris/target_structs.h index f949d2331e..3a06f373c3 100644 --- a/linux-user/cris/target_structs.h +++ b/linux-user/cris/target_structs.h @@ -1,58 +1 @@ -/* - * CRIS specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CRIS_TARGET_STRUCTS_H -#define CRIS_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/cris/target_syscall.h b/linux-user/cris/target_syscall.h index 19e1281403..0b5ebf1f02 100644 --- a/linux-user/cris/target_syscall.h +++ b/linux-user/cris/target_syscall.h @@ -39,7 +39,6 @@ struct target_pt_regs { }; #define TARGET_CLONE_BACKWARDS2 -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 01e9a833fb..20894b633f 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -6,6 +6,10 @@ #include #include "qemu.h" +#include "user-internals.h" +#include "signal-common.h" +#include "loader.h" +#include "user-mmap.h" #include "disas/disas.h" #include "qemu/bitops.h" #include "qemu/path.h" @@ -14,6 +18,7 @@ #include "qemu/units.h" #include "qemu/selfmap.h" #include "qapi/error.h" +#include "target_signal.h" #ifdef _ARCH_PPC64 #undef ARCH_DLINFO @@ -100,7 +105,7 @@ int info_is_fdpic(struct image_info *info) #define ELIBBAD 80 #endif -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN #define ELF_DATA ELFDATA2MSB #else #define ELF_DATA ELFDATA2LSB @@ -125,19 +130,6 @@ typedef abi_int target_pid_t; #ifdef TARGET_I386 -#define ELF_PLATFORM get_elf_platform() - -static const char *get_elf_platform(void) -{ - static char elf_platform[] = "i386"; - int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); - if (family > 6) - family = 6; - if (family >= 3) - elf_platform[1] = '0' + family; - return elf_platform; -} - #define ELF_HWCAP get_elf_hwcap() static uint32_t get_elf_hwcap(void) @@ -153,6 +145,8 @@ static uint32_t get_elf_hwcap(void) #define ELF_CLASS ELFCLASS64 #define ELF_ARCH EM_X86_64 +#define ELF_PLATFORM "x86_64" + static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { regs->rax = 0; @@ -201,6 +195,27 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); } +#if ULONG_MAX > UINT32_MAX +#define INIT_GUEST_COMMPAGE +static bool init_guest_commpage(void) +{ + /* + * The vsyscall page is at a high negative address aka kernel space, + * which means that we cannot actually allocate it with target_mmap. + * We still should be able to use page_set_flags, unless the user + * has specified -R reserved_va, which would trigger an assert(). + */ + if (reserved_va != 0 && + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) { + error_report("Cannot allocate vsyscall page"); + exit(EXIT_FAILURE); + } + page_set_flags(TARGET_VSYSCALL_PAGE, + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE, + PAGE_EXEC | PAGE_VALID); + return true; +} +#endif #else #define ELF_START_MMAP 0x80000000 @@ -216,6 +231,22 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en #define ELF_CLASS ELFCLASS32 #define ELF_ARCH EM_386 +#define ELF_PLATFORM get_elf_platform() +#define EXSTACK_DEFAULT true + +static const char *get_elf_platform(void) +{ + static char elf_platform[] = "i386"; + int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); + if (family > 6) { + family = 6; + } + if (family >= 3) { + elf_platform[1] = '0' + family; + } + return elf_platform; +} + static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { @@ -278,6 +309,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en #define ELF_ARCH EM_ARM #define ELF_CLASS ELFCLASS32 +#define EXSTACK_DEFAULT true static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) @@ -385,11 +417,12 @@ enum { /* The commpage only exists for 32 bit kernels */ -#define ARM_COMMPAGE (intptr_t)0xffff0f00u +#define HI_COMMPAGE (intptr_t)0xffff0f00u static bool init_guest_commpage(void) { - void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size); + abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size; + void *want = g2h_untagged(commpage); void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); @@ -408,6 +441,9 @@ static bool init_guest_commpage(void) perror("Protecting guest commpage"); exit(EXIT_FAILURE); } + + page_set_flags(commpage, commpage + qemu_host_page_size, + PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -478,7 +514,7 @@ static const char *get_elf_platform(void) { CPUARMState *env = thread_cpu->env_ptr; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN # define END "b" #else # define END "l" @@ -509,7 +545,7 @@ static const char *get_elf_platform(void) #define ELF_ARCH EM_AARCH64 #define ELF_CLASS ELFCLASS64 -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN # define ELF_PLATFORM "aarch64_be" #else # define ELF_PLATFORM "aarch64" @@ -596,6 +632,18 @@ enum { ARM_HWCAP2_A64_RNG = 1 << 16, ARM_HWCAP2_A64_BTI = 1 << 17, ARM_HWCAP2_A64_MTE = 1 << 18, + ARM_HWCAP2_A64_ECV = 1 << 19, + ARM_HWCAP2_A64_AFP = 1 << 20, + ARM_HWCAP2_A64_RPRES = 1 << 21, + ARM_HWCAP2_A64_MTE3 = 1 << 22, + ARM_HWCAP2_A64_SME = 1 << 23, + ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, + ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, + ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, + ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, + ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, + ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, + ARM_HWCAP2_A64_SME_FA64 = 1 << 30, }; #define ELF_HWCAP get_elf_hwcap() @@ -665,6 +713,14 @@ static uint32_t get_elf_hwcap2(void) GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); + GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | + ARM_HWCAP2_A64_SME_F32F32 | + ARM_HWCAP2_A64_SME_B16F32 | + ARM_HWCAP2_A64_SME_F16F32 | + ARM_HWCAP2_A64_SME_I8I32)); + GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); + GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); + GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); return hwcaps; } @@ -713,7 +769,7 @@ static inline void init_thread(struct target_pt_regs *regs, #define ELF_MACHINE PPC_ELF_MACHINE #define ELF_START_MMAP 0x80000000 -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) #define elf_check_arch(x) ( (x) == EM_PPC64 ) @@ -722,6 +778,7 @@ static inline void init_thread(struct target_pt_regs *regs, #else #define ELF_CLASS ELFCLASS32 +#define EXSTACK_DEFAULT true #endif @@ -774,6 +831,8 @@ enum { QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ + QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ + QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ }; #define ELF_HWCAP get_elf_hwcap() @@ -831,6 +890,8 @@ static uint32_t get_elf_hwcap2(void) QEMU_PPC_FEATURE2_VEC_CRYPTO); GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); + GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | + QEMU_PPC_FEATURE2_MMA); #undef GET_FEATURE #undef GET_FEATURE2 @@ -865,7 +926,7 @@ static uint32_t get_elf_hwcap2(void) static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) { _regs->gpr[1] = infop->start_stack; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) if (get_ppc64_abi(infop) < 2) { uint64_t val; get_user_u64(val, infop->entry + 8); @@ -896,7 +957,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en (*regs)[33] = tswapreg(env->msr); (*regs)[35] = tswapreg(env->ctr); (*regs)[36] = tswapreg(env->lr); - (*regs)[37] = tswapreg(env->xer); + (*regs)[37] = tswapreg(cpu_read_xer(env)); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { ccr |= env->crf[i] << (32 - ((i + 1) * 4)); @@ -909,6 +970,98 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #endif +#ifdef TARGET_LOONGARCH64 + +#define ELF_START_MMAP 0x80000000 + +#define ELF_CLASS ELFCLASS64 +#define ELF_ARCH EM_LOONGARCH +#define EXSTACK_DEFAULT true + +#define elf_check_arch(x) ((x) == EM_LOONGARCH) + +static inline void init_thread(struct target_pt_regs *regs, + struct image_info *infop) +{ + /*Set crmd PG,DA = 1,0 */ + regs->csr.crmd = 2 << 3; + regs->csr.era = infop->entry; + regs->regs[3] = infop->start_stack; +} + +/* See linux kernel: arch/loongarch/include/asm/elf.h */ +#define ELF_NREG 45 +typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; + +enum { + TARGET_EF_R0 = 0, + TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, + TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, +}; + +static void elf_core_copy_regs(target_elf_gregset_t *regs, + const CPULoongArchState *env) +{ + int i; + + (*regs)[TARGET_EF_R0] = 0; + + for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { + (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); + } + + (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); + (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); +} + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#define ELF_HWCAP get_elf_hwcap() + +/* See arch/loongarch/include/uapi/asm/hwcap.h */ +enum { + HWCAP_LOONGARCH_CPUCFG = (1 << 0), + HWCAP_LOONGARCH_LAM = (1 << 1), + HWCAP_LOONGARCH_UAL = (1 << 2), + HWCAP_LOONGARCH_FPU = (1 << 3), + HWCAP_LOONGARCH_LSX = (1 << 4), + HWCAP_LOONGARCH_LASX = (1 << 5), + HWCAP_LOONGARCH_CRC32 = (1 << 6), + HWCAP_LOONGARCH_COMPLEX = (1 << 7), + HWCAP_LOONGARCH_CRYPTO = (1 << 8), + HWCAP_LOONGARCH_LVZ = (1 << 9), + HWCAP_LOONGARCH_LBT_X86 = (1 << 10), + HWCAP_LOONGARCH_LBT_ARM = (1 << 11), + HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), +}; + +static uint32_t get_elf_hwcap(void) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); + uint32_t hwcaps = 0; + + hwcaps |= HWCAP_LOONGARCH_CRC32; + + if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { + hwcaps |= HWCAP_LOONGARCH_UAL; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { + hwcaps |= HWCAP_LOONGARCH_FPU; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { + hwcaps |= HWCAP_LOONGARCH_LAM; + } + + return hwcaps; +} + +#define ELF_PLATFORM "loongarch" + +#endif /* TARGET_LOONGARCH64 */ + #ifdef TARGET_MIPS #define ELF_START_MMAP 0x80000000 @@ -919,8 +1072,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #define ELF_CLASS ELFCLASS32 #endif #define ELF_ARCH EM_MIPS - -#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS) +#define EXSTACK_DEFAULT true #ifdef TARGET_ABI_MIPSN32 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) @@ -928,6 +1080,37 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) #endif +#define ELF_BASE_PLATFORM get_elf_base_platform() + +#define MATCH_PLATFORM_INSN(_flags, _base_platform) \ + do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ + { return _base_platform; } } while (0) + +static const char *get_elf_base_platform(void) +{ + MIPSCPU *cpu = MIPS_CPU(thread_cpu); + + /* 64 bit ISAs goes first */ + MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); + MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); + MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); + MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); + MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); + MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); + MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); + + /* 32 bit ISAs */ + MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); + MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); + MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); + MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); + MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); + + /* Fallback */ + return "mips"; +} +#undef MATCH_PLATFORM_INSN + static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { @@ -1093,7 +1276,47 @@ static void init_thread(struct target_pt_regs *regs, struct image_info *infop) { regs->ea = infop->entry; regs->sp = infop->start_stack; - regs->estatus = 0x3; +} + +#define LO_COMMPAGE TARGET_PAGE_SIZE + +static bool init_guest_commpage(void) +{ + static const uint8_t kuser_page[4 + 2 * 64] = { + /* __kuser_helper_version */ + [0x00] = 0x02, 0x00, 0x00, 0x00, + + /* __kuser_cmpxchg */ + [0x04] = 0x3a, 0x6c, 0x3b, 0x00, /* trap 16 */ + 0x3a, 0x28, 0x00, 0xf8, /* ret */ + + /* __kuser_sigtramp */ + [0x44] = 0xc4, 0x22, 0x80, 0x00, /* movi r2, __NR_rt_sigreturn */ + 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */ + }; + + void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size); + void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + + if (addr == MAP_FAILED) { + perror("Allocating guest commpage"); + exit(EXIT_FAILURE); + } + if (addr != want) { + return false; + } + + memcpy(addr, kuser_page, sizeof(kuser_page)); + + if (mprotect(addr, qemu_host_page_size, PROT_READ)) { + perror("Protecting guest commpage"); + exit(EXIT_FAILURE); + } + + page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + PAGE_READ | PAGE_EXEC | PAGE_VALID); + return true; } #define ELF_EXEC_PAGESIZE 4096 @@ -1126,7 +1349,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, (*regs)[30] = -1; /* R_SSTATUS */ (*regs)[31] = tswapreg(env->regs[R_RA]); - (*regs)[32] = tswapreg(env->regs[R_PC]); + (*regs)[32] = tswapreg(env->pc); (*regs)[33] = -1; /* R_STATUS */ (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); @@ -1443,7 +1666,7 @@ static uint32_t get_elf_hwcap(void) uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C'); - return cpu->env.misa & mask; + return cpu->env.misa_ext & mask; #undef MISA_BIT } @@ -1473,13 +1696,41 @@ static inline void init_thread(struct target_pt_regs *regs, regs->iaoq[0] = infop->entry; regs->iaoq[1] = infop->entry + 4; regs->gr[23] = 0; - regs->gr[24] = infop->arg_start; - regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong); + regs->gr[24] = infop->argv; + regs->gr[25] = infop->argc; /* The top-of-stack contains a linkage buffer. */ regs->gr[30] = infop->start_stack + 64; regs->gr[31] = infop->entry; } +#define LO_COMMPAGE 0 + +static bool init_guest_commpage(void) +{ + void *want = g2h_untagged(LO_COMMPAGE); + void *addr = mmap(want, qemu_host_page_size, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + + if (addr == MAP_FAILED) { + perror("Allocating guest commpage"); + exit(EXIT_FAILURE); + } + if (addr != want) { + return false; + } + + /* + * On Linux, page zero is normally marked execute only + gateway. + * Normal read or write is supposed to fail (thus PROT_NONE above), + * but specific offsets have kernel code mapped to raise permissions + * and implement syscalls. Here, simply mark the page executable. + * Special case the entry points during translation (see do_page_zero). + */ + page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + PAGE_EXEC | PAGE_VALID); + return true; +} + #endif /* TARGET_HPPA */ #ifdef TARGET_XTENSA @@ -1556,6 +1807,10 @@ static inline void init_thread(struct target_pt_regs *regs, #endif /* TARGET_HEXAGON */ +#ifndef ELF_BASE_PLATFORM +#define ELF_BASE_PLATFORM (NULL) +#endif + #ifndef ELF_PLATFORM #define ELF_PLATFORM (NULL) #endif @@ -1591,6 +1846,10 @@ static inline void init_thread(struct target_pt_regs *regs, #define bswaptls(ptr) bswap32s(ptr) #endif +#ifndef EXSTACK_DEFAULT +#define EXSTACK_DEFAULT false +#endif + #include "elf.h" /* We must delay the following stanzas until after "elf.h". */ @@ -1866,17 +2125,28 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm, struct image_info *info) { abi_ulong size, error, guard; + int prot; size = guest_stack_size; if (size < STACK_LOWER_LIMIT) { size = STACK_LOWER_LIMIT; } - guard = TARGET_PAGE_SIZE; - if (guard < qemu_real_host_page_size) { - guard = qemu_real_host_page_size; + + if (STACK_GROWS_DOWN) { + guard = TARGET_PAGE_SIZE; + if (guard < qemu_real_host_page_size()) { + guard = qemu_real_host_page_size(); + } + } else { + /* no guard page for hppa target where stack grows upwards. */ + guard = 0; } - error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE, + prot = PROT_READ | PROT_WRITE; + if (info->exec_stack) { + prot |= PROT_EXEC; + } + error = target_mmap(0, size + guard, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) { perror("mmap stack"); @@ -1889,7 +2159,6 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm, info->stack_limit = error + guard; return info->stack_limit + size - sizeof(void *); } else { - target_mprotect(error + size, guard, PROT_NONE); info->stack_limit = error + size; return error; } @@ -1986,8 +2255,8 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, int i; abi_ulong u_rand_bytes; uint8_t k_rand_bytes[16]; - abi_ulong u_platform; - const char *k_platform; + abi_ulong u_platform, u_base_platform; + const char *k_platform, *k_base_platform; const int n = sizeof(elf_addr_t); sp = p; @@ -2009,6 +2278,22 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, } } + u_base_platform = 0; + k_base_platform = ELF_BASE_PLATFORM; + if (k_base_platform) { + size_t len = strlen(k_base_platform) + 1; + if (STACK_GROWS_DOWN) { + sp -= (len + n - 1) & ~(n - 1); + u_base_platform = sp; + /* FIXME - check return value of memcpy_to_target() for failure */ + memcpy_to_target(sp, k_base_platform, len); + } else { + memcpy_to_target(sp, k_base_platform, len); + u_base_platform = sp; + sp += len + 1; + } + } + u_platform = 0; k_platform = ELF_PLATFORM; if (k_platform) { @@ -2050,6 +2335,8 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, } size = (DLINFO_ITEMS + 1) * 2; + if (k_base_platform) + size += 2; if (k_platform) size += 2; #ifdef DLINFO_ARCH_ITEMS @@ -2077,8 +2364,10 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, u_envp = u_argv + (argc + 1) * n; u_auxv = u_envp + (envc + 1) * n; info->saved_auxv = u_auxv; - info->arg_start = u_argv; - info->arg_end = u_argv + argc * n; + info->argc = argc; + info->envc = envc; + info->argv = u_argv; + info->envp = u_envp; /* This is correct because Linux defines * elf_addr_t as Elf32_Off / Elf64_Off @@ -2125,6 +2414,9 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); #endif + if (u_base_platform) { + NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); + } if (u_platform) { NEW_AUX_ENT(AT_PLATFORM, u_platform); } @@ -2157,10 +2449,17 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, return sp; } -#ifndef ARM_COMMPAGE -#define ARM_COMMPAGE 0 +#if defined(HI_COMMPAGE) +#define LO_COMMPAGE -1 +#elif defined(LO_COMMPAGE) +#define HI_COMMPAGE 0 +#else +#define HI_COMMPAGE 0 +#define LO_COMMPAGE -1 +#ifndef INIT_GUEST_COMMPAGE #define init_guest_commpage() true #endif +#endif static void pgb_fail_in_use(const char *image_name) { @@ -2218,6 +2517,9 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, if (test != addr) { pgb_fail_in_use(image_name); } + qemu_log_mask(CPU_LOG_PAGE, + "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n", + __func__, addr, guest_hiaddr - guest_loaddr); } /** @@ -2260,6 +2562,9 @@ static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, if (mmap_start != MAP_FAILED) { munmap(mmap_start, guest_size); if (mmap_start == (void *) align_start) { + qemu_log_mask(CPU_LOG_PAGE, + "%s: base @ %p for %" PRIdPTR" bytes\n", + __func__, mmap_start + offset, guest_size); return (uintptr_t) mmap_start + offset; } } @@ -2284,8 +2589,7 @@ static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size, brk = (uintptr_t)sbrk(0); if (!maps) { - ret = pgd_find_hole_fallback(guest_size, brk, align, offset); - return ret == -1 ? -1 : ret - guest_loaddr; + return pgd_find_hole_fallback(guest_size, brk, align, offset); } /* The first hole is before the first map entry. */ @@ -2325,7 +2629,7 @@ static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size, /* Record the lowest successful match. */ if (ret < 0) { - ret = align_start - guest_loaddr; + ret = align_start; } /* If this hole contains the identity map, select it. */ if (align_start <= guest_loaddr && @@ -2339,6 +2643,12 @@ static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size, } free_self_maps(maps); + if (ret != -1) { + qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %" PRIxPTR + " for %" PRIuPTR " bytes\n", + __func__, ret, guest_size); + } + return ret; } @@ -2358,7 +2668,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, } loaddr &= -align; - if (ARM_COMMPAGE) { + if (HI_COMMPAGE) { /* * Extend the allocation to include the commpage. * For a 64-bit host, this is just 4GiB; for a 32-bit host we @@ -2369,14 +2679,16 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) { hiaddr = (uintptr_t) 4 << 30; } else { - offset = -(ARM_COMMPAGE & -align); + offset = -(HI_COMMPAGE & -align); } + } else if (LO_COMMPAGE != -1) { + loaddr = MIN(loaddr, LO_COMMPAGE & -align); } addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset); if (addr == -1) { /* - * If ARM_COMMPAGE, there *might* be a non-consecutive allocation + * If HI_COMMPAGE, there *might* be a non-consecutive allocation * that can satisfy both. But as the normal arm32 link base address * is ~32k, and we extend down to include the commpage, making the * overhead only ~96k, this is unlikely. @@ -2388,6 +2700,9 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, } guest_base = addr; + + qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %"PRIxPTR" for %" PRIuPTR" bytes\n", + __func__, addr, hiaddr - loaddr); } static void pgb_dynamic(const char *image_name, long align) @@ -2397,7 +2712,7 @@ static void pgb_dynamic(const char *image_name, long align) * All we need is a commpage that satisfies align. * If we do not need a commpage, leave guest_base == 0. */ - if (ARM_COMMPAGE) { + if (HI_COMMPAGE) { uintptr_t addr, commpage; /* 64-bit hosts should have used reserved_va. */ @@ -2407,7 +2722,7 @@ static void pgb_dynamic(const char *image_name, long align) * By putting the commpage at the first hole, that puts guest_base * just above that, and maximises the positive guest addresses. */ - commpage = ARM_COMMPAGE & -align; + commpage = HI_COMMPAGE & -align; addr = pgb_find_hole(commpage, -commpage, align, 0); assert(addr != -1); guest_base = addr; @@ -2439,11 +2754,14 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0); if (addr == MAP_FAILED || addr != test) { error_report("Unable to reserve 0x%lx bytes of virtual address " - "space at %p (%s) for use as guest address space (check your" + "space at %p (%s) for use as guest address space (check your " "virtual memory ulimit setting, min_mmap_addr or reserve less " "using -R option)", reserved_va, test, strerror(errno)); exit(EXIT_FAILURE); } + + qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n", + __func__, addr, reserved_va); } void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, @@ -2678,6 +2996,7 @@ static void load_elf_image(const char *image_name, int image_fd, */ loaddr = -1, hiaddr = 0; info->alignment = 0; + info->exec_stack = EXSTACK_DEFAULT; for (i = 0; i < ehdr->e_phnum; ++i) { struct elf_phdr *eppnt = phdr + i; if (eppnt->p_type == PT_LOAD) { @@ -2720,6 +3039,8 @@ static void load_elf_image(const char *image_name, int image_fd, if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) { goto exit_errmsg; } + } else if (eppnt->p_type == PT_GNU_STACK) { + info->exec_stack = eppnt->p_flags & PF_X; } } @@ -2732,11 +3053,17 @@ static void load_elf_image(const char *image_name, int image_fd, * and the stack, lest they be placed immediately after * the data segment and block allocation from the brk. * - * 16MB is chosen as "large enough" without being so large - * as to allow the result to not fit with a 32-bit guest on - * a 32-bit host. + * 16MB is chosen as "large enough" without being so large as + * to allow the result to not fit with a 32-bit guest on a + * 32-bit host. However some 64 bit guests (e.g. s390x) + * attempt to place their heap further ahead and currently + * nothing stops them smashing into QEMUs address space. */ +#if TARGET_LONG_BITS == 64 + info->reserve_brk = 32 * MiB; +#else info->reserve_brk = 16 * MiB; +#endif hiaddr += info->reserve_brk; if (ehdr->e_type == ET_EXEC) { @@ -3246,6 +3573,22 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) #endif } + /* + * TODO: load a vdso, which would also contain the signal trampolines. + * Otherwise, allocate a private page to hold them. + */ + if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { + abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + if (tramp_page == -1) { + return -errno; + } + + setup_sigtramp(tramp_page); + target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); + } + bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, info, (elf_interpreter ? &interp_info : NULL)); info->start_stack = bprm->p; @@ -3882,7 +4225,7 @@ static int fill_note_info(struct elf_note_info *info, if (cpu == thread_cpu) { continue; } - fill_thread_info(info, (CPUArchState *)cpu->env_ptr); + fill_thread_info(info, cpu->env_ptr); } cpu_list_unlock(); diff --git a/linux-user/exit.c b/linux-user/exit.c index 527e29cbc1..fa6ef0b9b4 100644 --- a/linux-user/exit.c +++ b/linux-user/exit.c @@ -17,7 +17,9 @@ * along with this program; if not, see . */ #include "qemu/osdep.h" +#include "exec/gdbstub.h" #include "qemu.h" +#include "user-internals.h" #ifdef CONFIG_GPROF #include #endif diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c index 86b6f484d3..146aaaafaa 100644 --- a/linux-user/fd-trans.c +++ b/linux-user/fd-trans.c @@ -27,7 +27,9 @@ #include #endif #include "qemu.h" +#include "user-internals.h" #include "fd-trans.h" +#include "signal-common.h" enum { QEMU_IFLA_BR_UNSPEC, @@ -136,6 +138,9 @@ enum { QEMU_IFLA_PROP_LIST, QEMU_IFLA_ALT_IFNAME, QEMU_IFLA_PERM_ADDRESS, + QEMU_IFLA_PROTO_DOWN_REASON, + QEMU_IFLA_PARENT_DEV_NAME, + QEMU_IFLA_PARENT_DEV_BUS_NAME, QEMU___IFLA_MAX }; @@ -177,6 +182,8 @@ enum { QEMU_IFLA_BRPORT_BACKUP_PORT, QEMU_IFLA_BRPORT_MRP_RING_OPEN, QEMU_IFLA_BRPORT_MRP_IN_OPEN, + QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, + QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, QEMU___IFLA_BRPORT_MAX }; @@ -266,6 +273,37 @@ enum { QEMU___RTA_MAX }; +enum { + QEMU_IFLA_VF_STATS_RX_PACKETS, + QEMU_IFLA_VF_STATS_TX_PACKETS, + QEMU_IFLA_VF_STATS_RX_BYTES, + QEMU_IFLA_VF_STATS_TX_BYTES, + QEMU_IFLA_VF_STATS_BROADCAST, + QEMU_IFLA_VF_STATS_MULTICAST, + QEMU_IFLA_VF_STATS_PAD, + QEMU_IFLA_VF_STATS_RX_DROPPED, + QEMU_IFLA_VF_STATS_TX_DROPPED, + QEMU__IFLA_VF_STATS_MAX, +}; + +enum { + QEMU_IFLA_VF_UNSPEC, + QEMU_IFLA_VF_MAC, + QEMU_IFLA_VF_VLAN, + QEMU_IFLA_VF_TX_RATE, + QEMU_IFLA_VF_SPOOFCHK, + QEMU_IFLA_VF_LINK_STATE, + QEMU_IFLA_VF_RATE, + QEMU_IFLA_VF_RSS_QUERY_EN, + QEMU_IFLA_VF_STATS, + QEMU_IFLA_VF_TRUST, + QEMU_IFLA_VF_IB_NODE_GUID, + QEMU_IFLA_VF_IB_PORT_GUID, + QEMU_IFLA_VF_VLAN_LIST, + QEMU_IFLA_VF_BROADCAST, + QEMU__IFLA_VF_MAX, +}; + TargetFdTrans **target_fd_trans; QemuMutex target_fd_trans_lock; unsigned int target_fd_max; @@ -571,6 +609,8 @@ static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, /* uin32_t */ case QEMU_IFLA_BRPORT_COST: case QEMU_IFLA_BRPORT_BACKUP_PORT: + case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT: + case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT: u32 = NLA_DATA(nlattr); *u32 = tswap32(*u32); break; @@ -803,6 +843,145 @@ static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr, return 0; } +static abi_long host_to_target_data_vlan_list_nlattr(struct nlattr *nlattr, + void *context) +{ + struct ifla_vf_vlan_info *vlan_info; + + switch (nlattr->nla_type) { + /* struct ifla_vf_vlan_info */ + case IFLA_VF_VLAN_INFO: + vlan_info = NLA_DATA(nlattr); + vlan_info->vf = tswap32(vlan_info->vf); + vlan_info->vlan = tswap32(vlan_info->vlan); + vlan_info->qos = tswap32(vlan_info->qos); + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown host VLAN LIST type: %d\n", + nlattr->nla_type); + break; + } + return 0; +} + +static abi_long host_to_target_data_vf_stats_nlattr(struct nlattr *nlattr, + void *context) +{ + uint64_t *u64; + + switch (nlattr->nla_type) { + /* uint64_t */ + case QEMU_IFLA_VF_STATS_RX_PACKETS: + case QEMU_IFLA_VF_STATS_TX_PACKETS: + case QEMU_IFLA_VF_STATS_RX_BYTES: + case QEMU_IFLA_VF_STATS_TX_BYTES: + case QEMU_IFLA_VF_STATS_BROADCAST: + case QEMU_IFLA_VF_STATS_MULTICAST: + case QEMU_IFLA_VF_STATS_PAD: + case QEMU_IFLA_VF_STATS_RX_DROPPED: + case QEMU_IFLA_VF_STATS_TX_DROPPED: + u64 = NLA_DATA(nlattr); + *u64 = tswap64(*u64); + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown host VF STATS type: %d\n", + nlattr->nla_type); + break; + } + return 0; +} + +static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr, + void *context) +{ + struct ifla_vf_mac *mac; + struct ifla_vf_vlan *vlan; + struct ifla_vf_vlan_info *vlan_info; + struct ifla_vf_spoofchk *spoofchk; + struct ifla_vf_rate *rate; + struct ifla_vf_link_state *link_state; + struct ifla_vf_rss_query_en *rss_query_en; + struct ifla_vf_trust *trust; + struct ifla_vf_guid *guid; + + switch (nlattr->nla_type) { + /* struct ifla_vf_mac */ + case QEMU_IFLA_VF_MAC: + mac = NLA_DATA(nlattr); + mac->vf = tswap32(mac->vf); + break; + /* struct ifla_vf_broadcast */ + case QEMU_IFLA_VF_BROADCAST: + break; + /* struct struct ifla_vf_vlan */ + case QEMU_IFLA_VF_VLAN: + vlan = NLA_DATA(nlattr); + vlan->vf = tswap32(vlan->vf); + vlan->vlan = tswap32(vlan->vlan); + vlan->qos = tswap32(vlan->qos); + break; + /* struct ifla_vf_vlan_info */ + case QEMU_IFLA_VF_TX_RATE: + vlan_info = NLA_DATA(nlattr); + vlan_info->vf = tswap32(vlan_info->vf); + vlan_info->vlan = tswap32(vlan_info->vlan); + vlan_info->qos = tswap32(vlan_info->qos); + break; + /* struct ifla_vf_spoofchk */ + case QEMU_IFLA_VF_SPOOFCHK: + spoofchk = NLA_DATA(nlattr); + spoofchk->vf = tswap32(spoofchk->vf); + spoofchk->setting = tswap32(spoofchk->setting); + break; + /* struct ifla_vf_rate */ + case QEMU_IFLA_VF_RATE: + rate = NLA_DATA(nlattr); + rate->vf = tswap32(rate->vf); + rate->min_tx_rate = tswap32(rate->min_tx_rate); + rate->max_tx_rate = tswap32(rate->max_tx_rate); + break; + /* struct ifla_vf_link_state */ + case QEMU_IFLA_VF_LINK_STATE: + link_state = NLA_DATA(nlattr); + link_state->vf = tswap32(link_state->vf); + link_state->link_state = tswap32(link_state->link_state); + break; + /* struct ifla_vf_rss_query_en */ + case QEMU_IFLA_VF_RSS_QUERY_EN: + rss_query_en = NLA_DATA(nlattr); + rss_query_en->vf = tswap32(rss_query_en->vf); + rss_query_en->setting = tswap32(rss_query_en->setting); + break; + /* struct ifla_vf_trust */ + case QEMU_IFLA_VF_TRUST: + trust = NLA_DATA(nlattr); + trust->vf = tswap32(trust->vf); + trust->setting = tswap32(trust->setting); + break; + /* struct ifla_vf_guid */ + case QEMU_IFLA_VF_IB_NODE_GUID: + case QEMU_IFLA_VF_IB_PORT_GUID: + guid = NLA_DATA(nlattr); + guid->vf = tswap32(guid->vf); + guid->guid = tswap32(guid->guid); + break; + /* nested */ + case QEMU_IFLA_VF_VLAN_LIST: + return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len, + NULL, + host_to_target_data_vlan_list_nlattr); + case QEMU_IFLA_VF_STATS: + return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len, + NULL, + host_to_target_data_vf_stats_nlattr); + default: + qemu_log_mask(LOG_UNIMP, "Unknown host VFINFO type: %d\n", + nlattr->nla_type); + break; + } + return 0; +} + static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) { uint32_t *u32; @@ -816,9 +995,12 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) case QEMU_IFLA_ADDRESS: case QEMU_IFLA_BROADCAST: case QEMU_IFLA_PERM_ADDRESS: + case QEMU_IFLA_PHYS_PORT_ID: /* string */ case QEMU_IFLA_IFNAME: case QEMU_IFLA_QDISC: + case QEMU_IFLA_PARENT_DEV_NAME: + case QEMU_IFLA_PARENT_DEV_BUS_NAME: break; /* uin8_t */ case QEMU_IFLA_OPERSTATE: @@ -937,6 +1119,10 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, NULL, host_to_target_data_xdp_nlattr); + case QEMU_IFLA_VFINFO_LIST: + return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, + NULL, + host_to_target_data_vfinfo_nlattr); default: qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); @@ -1436,7 +1622,7 @@ TargetFdTrans target_signalfd_trans = { .host_to_target_data = host_to_target_data_signalfd, }; -static abi_long swap_data_eventfd(void *buf, size_t len) +static abi_long swap_data_u64(void *buf, size_t len) { uint64_t *counter = buf; int i; @@ -1454,13 +1640,16 @@ static abi_long swap_data_eventfd(void *buf, size_t len) } TargetFdTrans target_eventfd_trans = { - .host_to_target_data = swap_data_eventfd, - .target_to_host_data = swap_data_eventfd, + .host_to_target_data = swap_data_u64, + .target_to_host_data = swap_data_u64, }; -#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ - (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ - defined(__NR_inotify_init1)) +TargetFdTrans target_timerfd_trans = { + .host_to_target_data = swap_data_u64, +}; + +#if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \ + defined(TARGET_NR_inotify_init1)) static abi_long host_to_target_data_inotify(void *buf, size_t len) { struct inotify_event *ev; diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h index 1b9fa2041c..910faaf237 100644 --- a/linux-user/fd-trans.h +++ b/linux-user/fd-trans.h @@ -130,6 +130,7 @@ extern TargetFdTrans target_netlink_route_trans; extern TargetFdTrans target_netlink_audit_trans; extern TargetFdTrans target_signalfd_trans; extern TargetFdTrans target_eventfd_trans; +extern TargetFdTrans target_timerfd_trans; #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ defined(__NR_inotify_init1)) diff --git a/linux-user/flatload.c b/linux-user/flatload.c index 3e5594cf89..e99570ca18 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -36,6 +36,9 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" +#include "loader.h" +#include "user-mmap.h" #include "flat.h" #include "target_flat.h" @@ -805,7 +808,7 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info) /* Stash our initial stack pointer into the mm structure */ info->start_code = libinfo[0].start_code; - info->end_code = libinfo[0].start_code = libinfo[0].text_len; + info->end_code = libinfo[0].start_code + libinfo[0].text_len; info->start_data = libinfo[0].start_data; info->end_data = libinfo[0].end_data; info->start_brk = libinfo[0].start_brk; diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h index 943bc1a1e2..6fd05b77bb 100644 --- a/linux-user/generic/signal.h +++ b/linux-user/generic/signal.h @@ -55,6 +55,21 @@ #define TARGET_SIG_UNBLOCK 1 /* for unblocking signals */ #define TARGET_SIG_SETMASK 2 /* for setting the signal mask */ +/* this struct defines a stack used during syscall handling */ +typedef struct target_sigaltstack { + abi_ulong ss_sp; + abi_int ss_flags; + abi_ulong ss_size; +} target_stack_t; + +/* + * sigaltstack controls + */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_MINSIGSTKSZ 2048 + /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ /* mask for all SS_xxx flags */ diff --git a/linux-user/generic/target_errno_defs.h b/linux-user/generic/target_errno_defs.h index 17d85e0b61..c2f9d403e7 100644 --- a/linux-user/generic/target_errno_defs.h +++ b/linux-user/generic/target_errno_defs.h @@ -147,21 +147,4 @@ #define TARGET_ERFKILL 132 /* Operation not possible due to RF-kill */ #define TARGET_EHWPOISON 133 /* Memory page has hardware error */ -/* QEMU internal, not visible to the guest. This is returned when a - * system call should be restarted, to tell the main loop that it - * should wind the guest PC backwards so it will re-execute the syscall - * after handling any pending signals. They match with the ones the guest - * kernel uses for the same purpose. - */ -#define TARGET_ERESTARTSYS 512 /* Restart system call (if SA_RESTART) */ - -/* QEMU internal, not visible to the guest. This is returned by the - * do_sigreturn() code after a successful sigreturn syscall, to indicate - * that it has correctly set the guest registers and so the main loop - * should not touch them. We use the value the guest would use for - * ERESTART_NOINTR (which is kernel internal) to guarantee that we won't - * clash with a valid guest errno now or in the future. - */ -#define TARGET_QEMU_ESIGRETURN 513 /* Return from signal */ - #endif diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h new file mode 100644 index 0000000000..1436a3c543 --- /dev/null +++ b/linux-user/generic/target_mman.h @@ -0,0 +1,92 @@ +#ifndef LINUX_USER_TARGET_MMAN_H +#define LINUX_USER_TARGET_MMAN_H + +#ifndef TARGET_MADV_NORMAL +#define TARGET_MADV_NORMAL 0 +#endif + +#ifndef TARGET_MADV_RANDOM +#define TARGET_MADV_RANDOM 1 +#endif + +#ifndef TARGET_MADV_SEQUENTIAL +#define TARGET_MADV_SEQUENTIAL 2 +#endif + +#ifndef TARGET_MADV_WILLNEED +#define TARGET_MADV_WILLNEED 3 +#endif + +#ifndef TARGET_MADV_DONTNEED +#define TARGET_MADV_DONTNEED 4 +#endif + +#ifndef TARGET_MADV_FREE +#define TARGET_MADV_FREE 8 +#endif + +#ifndef TARGET_MADV_REMOVE +#define TARGET_MADV_REMOVE 9 +#endif + +#ifndef TARGET_MADV_DONTFORK +#define TARGET_MADV_DONTFORK 10 +#endif + +#ifndef TARGET_MADV_DOFORK +#define TARGET_MADV_DOFORK 11 +#endif + +#ifndef TARGET_MADV_MERGEABLE +#define TARGET_MADV_MERGEABLE 12 +#endif + +#ifndef TARGET_MADV_UNMERGEABLE +#define TARGET_MADV_UNMERGEABLE 13 +#endif + +#ifndef TARGET_MADV_HUGEPAGE +#define TARGET_MADV_HUGEPAGE 14 +#endif + +#ifndef TARGET_MADV_NOHUGEPAGE +#define TARGET_MADV_NOHUGEPAGE 15 +#endif + +#ifndef TARGET_MADV_DONTDUMP +#define TARGET_MADV_DONTDUMP 16 +#endif + +#ifndef TARGET_MADV_DODUMP +#define TARGET_MADV_DODUMP 17 +#endif + +#ifndef TARGET_MADV_WIPEONFORK +#define TARGET_MADV_WIPEONFORK 18 +#endif + +#ifndef TARGET_MADV_KEEPONFORK +#define TARGET_MADV_KEEPONFORK 19 +#endif + +#ifndef TARGET_MADV_COLD +#define TARGET_MADV_COLD 20 +#endif + +#ifndef TARGET_MADV_PAGEOUT +#define TARGET_MADV_PAGEOUT 21 +#endif + +#ifndef TARGET_MADV_POPULATE_READ +#define TARGET_MADV_POPULATE_READ 22 +#endif + +#ifndef TARGET_MADV_POPULATE_WRITE +#define TARGET_MADV_POPULATE_WRITE 23 +#endif + +#ifndef TARGET_MADV_DONTNEED_LOCKED +#define TARGET_MADV_DONTNEED_LOCKED 24 +#endif + +#endif diff --git a/linux-user/generic/target_prctl_unalign.h b/linux-user/generic/target_prctl_unalign.h new file mode 100644 index 0000000000..bc3b83af2a --- /dev/null +++ b/linux-user/generic/target_prctl_unalign.h @@ -0,0 +1,27 @@ +/* + * Generic prctl unalign functions for linux-user + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef GENERIC_TARGET_PRCTL_UNALIGN_H +#define GENERIC_TARGET_PRCTL_UNALIGN_H + +static abi_long do_prctl_get_unalign(CPUArchState *env, target_long arg2) +{ + CPUState *cs = env_cpu(env); + uint32_t res = PR_UNALIGN_NOPRINT; + if (cs->prctl_unalign_sigbus) { + res |= PR_UNALIGN_SIGBUS; + } + return put_user_u32(res, arg2); +} +#define do_prctl_get_unalign do_prctl_get_unalign + +static abi_long do_prctl_set_unalign(CPUArchState *env, target_long arg2) +{ + env_cpu(env)->prctl_unalign_sigbus = arg2 & PR_UNALIGN_SIGBUS; + return 0; +} +#define do_prctl_set_unalign do_prctl_set_unalign + +#endif /* GENERIC_TARGET_PRCTL_UNALIGN_H */ diff --git a/linux-user/generic/target_resource.h b/linux-user/generic/target_resource.h new file mode 100644 index 0000000000..37d3eb09b3 --- /dev/null +++ b/linux-user/generic/target_resource.h @@ -0,0 +1,38 @@ +/* + * Target definitions of RLIMIT_* constants. These may be overridden by an + * architecture specific header if needed. + */ + +#ifndef GENERIC_TARGET_RESOURCE_H +#define GENERIC_TARGET_RESOURCE_H + +struct target_rlimit { + abi_ulong rlim_cur; + abi_ulong rlim_max; +}; + +struct target_rlimit64 { + abi_ullong rlim_cur; + abi_ullong rlim_max; +}; + +#define TARGET_RLIM_INFINITY ((abi_ulong)-1) + +#define TARGET_RLIMIT_CPU 0 +#define TARGET_RLIMIT_FSIZE 1 +#define TARGET_RLIMIT_DATA 2 +#define TARGET_RLIMIT_STACK 3 +#define TARGET_RLIMIT_CORE 4 +#define TARGET_RLIMIT_RSS 5 +#define TARGET_RLIMIT_NPROC 6 +#define TARGET_RLIMIT_NOFILE 7 +#define TARGET_RLIMIT_MEMLOCK 8 +#define TARGET_RLIMIT_AS 9 +#define TARGET_RLIMIT_LOCKS 10 +#define TARGET_RLIMIT_SIGPENDING 11 +#define TARGET_RLIMIT_MSGQUEUE 12 +#define TARGET_RLIMIT_NICE 13 +#define TARGET_RLIMIT_RTPRIO 14 +#define TARGET_RLIMIT_RTTIME 15 + +#endif diff --git a/linux-user/generic/target_structs.h b/linux-user/generic/target_structs.h new file mode 100644 index 0000000000..09ff858b6e --- /dev/null +++ b/linux-user/generic/target_structs.h @@ -0,0 +1,58 @@ +/* + * Generic structures for linux-user + * + * Copyright (c) 2013 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef GENERIC_TARGET_STRUCTS_H +#define GENERIC_TARGET_STRUCTS_H + +struct target_ipc_perm { + abi_int __key; /* Key. */ + abi_uint uid; /* Owner's user ID. */ + abi_uint gid; /* Owner's group ID. */ + abi_uint cuid; /* Creator's user ID. */ + abi_uint cgid; /* Creator's group ID. */ + abi_ushort mode; /* Read/write permission. */ + abi_ushort __pad1; + abi_ushort __seq; /* Sequence number. */ + abi_ushort __pad2; + abi_ulong __unused1; + abi_ulong __unused2; +}; + +struct target_shmid_ds { + struct target_ipc_perm shm_perm; /* operation permission struct */ + abi_long shm_segsz; /* size of segment in bytes */ + abi_ulong shm_atime; /* time of last shmat() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused1; +#endif + abi_ulong shm_dtime; /* time of last shmdt() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused2; +#endif + abi_ulong shm_ctime; /* time of last change by shmctl() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused3; +#endif + abi_int shm_cpid; /* pid of creator */ + abi_int shm_lpid; /* pid of last shmop */ + abi_ulong shm_nattch; /* number of current attaches */ + abi_ulong __unused4; + abi_ulong __unused5; +}; + +#endif diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c index bc34f5d7c3..b84e25bf71 100644 --- a/linux-user/hexagon/cpu_loop.c +++ b/linux-user/hexagon/cpu_loop.c @@ -20,14 +20,15 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" #include "internal.h" void cpu_loop(CPUHexagonState *env) { CPUState *cs = env_cpu(env); - int trapnr, signum, sigcode; - target_ulong sigaddr; + int trapnr; target_ulong syscallnum; target_ulong ret; @@ -37,10 +38,6 @@ void cpu_loop(CPUHexagonState *env) cpu_exec_end(cs); process_queued_cpu_work(cs); - signum = 0; - sigcode = 0; - sigaddr = 0; - switch (trapnr) { case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ @@ -57,18 +54,12 @@ void cpu_loop(CPUHexagonState *env) env->gpr[4], env->gpr[5], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->gpr[HEX_REG_PC] -= 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->gpr[0] = ret; } break; - case HEX_EXCP_FETCH_NO_UPAGE: - case HEX_EXCP_PRIV_NO_UREAD: - case HEX_EXCP_PRIV_NO_UWRITE: - signum = TARGET_SIGSEGV; - sigcode = TARGET_SEGV_MAPERR; - break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; @@ -77,17 +68,6 @@ void cpu_loop(CPUHexagonState *env) trapnr); exit(EXIT_FAILURE); } - - if (signum) { - target_siginfo_t info = { - .si_signo = signum, - .si_errno = 0, - .si_code = sigcode, - ._sifields._sigfault._addr = sigaddr - }; - queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); - } - process_pending_signals(env); } } diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c index 85eab5e943..ad4e3822d5 100644 --- a/linux-user/hexagon/signal.c +++ b/linux-user/hexagon/signal.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -161,6 +162,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_ucontext(&frame->uc, env, set); tswap_siginfo(&frame->info, info); + /* + * The on-stack signal trampoline is no longer executed; + * however, the libgcc signal frame unwinding code checks + * for the presence of these two numeric magic values. + */ install_sigtramp(frame->tramp); env->gpr[HEX_REG_PC] = ka->_sa_handler; @@ -170,8 +176,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, frame_addr + offsetof(struct target_rt_sigframe, info); env->gpr[HEX_REG_R02] = frame_addr + offsetof(struct target_rt_sigframe, uc); - env->gpr[HEX_REG_LR] = - frame_addr + offsetof(struct target_rt_sigframe, tramp); + env->gpr[HEX_REG_LR] = default_rt_sigreturn; return; @@ -263,10 +268,21 @@ long do_rt_sigreturn(CPUHexagonState *env) target_restore_altstack(&frame->uc.uc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 * 2, 0); + assert(tramp != NULL); + + default_rt_sigreturn = sigtramp_page; + install_sigtramp(tramp); + + unlock_user(tramp, sigtramp_page, 4 * 2); +} diff --git a/linux-user/hexagon/target_mman.h b/linux-user/hexagon/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/hexagon/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/hexagon/target_prctl.h b/linux-user/hexagon/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/hexagon/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/hexagon/target_resource.h b/linux-user/hexagon/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/hexagon/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/hexagon/target_signal.h b/linux-user/hexagon/target_signal.h index 345cf1cbb8..68fb71312e 100644 --- a/linux-user/hexagon/target_signal.h +++ b/linux-user/hexagon/target_signal.h @@ -18,17 +18,8 @@ #ifndef HEXAGON_TARGET_SIGNAL_H #define HEXAGON_TARGET_SIGNAL_H -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 - #include "../generic/signal.h" -#endif /* TARGET_SIGNAL_H */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + +#endif /* HEXAGON_TARGET_SIGNAL_H */ diff --git a/linux-user/hexagon/target_structs.h b/linux-user/hexagon/target_structs.h index c217d9442a..3a06f373c3 100644 --- a/linux-user/hexagon/target_structs.h +++ b/linux-user/hexagon/target_structs.h @@ -1,54 +1 @@ -/* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -/* - * Hexagon specific structures for linux-user - */ -#ifndef HEXAGON_TARGET_STRUCTS_H -#define HEXAGON_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ - abi_ulong __unused1; - abi_ulong shm_dtime; /* time of last shmdt() */ - abi_ulong __unused2; - abi_ulong shm_ctime; /* time of last change by shmctl() */ - abi_ulong __unused3; - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/host/aarch64/hostdep.h b/linux-user/host/aarch64/hostdep.h deleted file mode 100644 index a8d41a21ad..0000000000 --- a/linux-user/host/aarch64/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef AARCH64_HOSTDEP_H -#define AARCH64_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - __u64 *pcreg = &uc->uc_mcontext.pc; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/aarch64/safe-syscall.inc.S b/linux-user/host/aarch64/safe-syscall.inc.S deleted file mode 100644 index bc1f5a9792..0000000000 --- a/linux-user/host/aarch64/safe-syscall.inc.S +++ /dev/null @@ -1,75 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2016 Red Hat, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, #function - .type safe_syscall_start, #function - .type safe_syscall_end, #function - - /* This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -safe_syscall_base: - .cfi_startproc - /* The syscall calling convention isn't the same as the - * C one: - * we enter with x0 == *signal_pending - * x1 == syscall number - * x2 ... x7, (stack) == syscall arguments - * and return the result in x0 - * and the syscall instruction needs - * x8 == syscall number - * x0 ... x6 == syscall arguments - * and returns the result in x0 - * Shuffle everything around appropriately. - */ - mov x9, x0 /* signal_pending pointer */ - mov x8, x1 /* syscall number */ - mov x0, x2 /* syscall arguments */ - mov x1, x3 - mov x2, x4 - mov x3, x5 - mov x4, x6 - mov x5, x7 - ldr x6, [sp] - - /* This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* if signal_pending is non-zero, don't do the call */ - ldr w10, [x9] - cbnz w10, 0f - svc 0x0 -safe_syscall_end: - /* code path for having successfully executed the syscall */ - ret - -0: - /* code path when we didn't execute the syscall */ - mov x0, #-TARGET_ERESTARTSYS - ret - .cfi_endproc - - .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/arm/hostdep.h b/linux-user/host/arm/hostdep.h deleted file mode 100644 index 9276fe6ceb..0000000000 --- a/linux-user/host/arm/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef ARM_HOSTDEP_H -#define ARM_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - unsigned long *pcreg = &uc->uc_mcontext.arm_pc; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/arm/safe-syscall.inc.S b/linux-user/host/arm/safe-syscall.inc.S deleted file mode 100644 index 88c4958504..0000000000 --- a/linux-user/host/arm/safe-syscall.inc.S +++ /dev/null @@ -1,90 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2016 Red Hat, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, %function - - .cfi_sections .debug_frame - - .text - .syntax unified - .arm - .align 2 - - /* This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -safe_syscall_base: - .fnstart - .cfi_startproc - mov r12, sp /* save entry stack */ - push { r4, r5, r6, r7, r8, lr } - .save { r4, r5, r6, r7, r8, lr } - .cfi_adjust_cfa_offset 24 - .cfi_rel_offset r4, 0 - .cfi_rel_offset r5, 4 - .cfi_rel_offset r6, 8 - .cfi_rel_offset r7, 12 - .cfi_rel_offset r8, 16 - .cfi_rel_offset lr, 20 - - /* The syscall calling convention isn't the same as the C one: - * we enter with r0 == *signal_pending - * r1 == syscall number - * r2, r3, [sp+0] ... [sp+12] == syscall arguments - * and return the result in r0 - * and the syscall instruction needs - * r7 == syscall number - * r0 ... r6 == syscall arguments - * and returns the result in r0 - * Shuffle everything around appropriately. - * Note the 16 bytes that we pushed to save registers. - */ - mov r8, r0 /* copy signal_pending */ - mov r7, r1 /* syscall number */ - mov r0, r2 /* syscall args */ - mov r1, r3 - ldm r12, { r2, r3, r4, r5, r6 } - - /* This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* if signal_pending is non-zero, don't do the call */ - ldr r12, [r8] /* signal_pending */ - tst r12, r12 - bne 1f - swi 0 -safe_syscall_end: - /* code path for having successfully executed the syscall */ - pop { r4, r5, r6, r7, r8, pc } - -1: - /* code path when we didn't execute the syscall */ - ldr r0, =-TARGET_ERESTARTSYS - pop { r4, r5, r6, r7, r8, pc } - .fnend - .cfi_endproc - - .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/i386/hostdep.h b/linux-user/host/i386/hostdep.h deleted file mode 100644 index 073be74d87..0000000000 --- a/linux-user/host/i386/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef I386_HOSTDEP_H -#define I386_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - greg_t *pcreg = &uc->uc_mcontext.gregs[REG_EIP]; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/i386/safe-syscall.inc.S b/linux-user/host/i386/safe-syscall.inc.S deleted file mode 100644 index 9e58fc6504..0000000000 --- a/linux-user/host/i386/safe-syscall.inc.S +++ /dev/null @@ -1,100 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2016 Red Hat, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, @function - - /* This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -safe_syscall_base: - .cfi_startproc - push %ebp - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset ebp, 0 - push %esi - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset esi, 0 - push %edi - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset edi, 0 - push %ebx - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset ebx, 0 - - /* The syscall calling convention isn't the same as the C one: - * we enter with 0(%esp) == return address - * 4(%esp) == *signal_pending - * 8(%esp) == syscall number - * 12(%esp) ... 32(%esp) == syscall arguments - * and return the result in eax - * and the syscall instruction needs - * eax == syscall number - * ebx, ecx, edx, esi, edi, ebp == syscall arguments - * and returns the result in eax - * Shuffle everything around appropriately. - * Note the 16 bytes that we pushed to save registers. - */ - mov 12+16(%esp), %ebx /* the syscall arguments */ - mov 16+16(%esp), %ecx - mov 20+16(%esp), %edx - mov 24+16(%esp), %esi - mov 28+16(%esp), %edi - mov 32+16(%esp), %ebp - - /* This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* if signal_pending is non-zero, don't do the call */ - mov 4+16(%esp), %eax /* signal_pending */ - cmpl $0, (%eax) - jnz 1f - mov 8+16(%esp), %eax /* syscall number */ - int $0x80 -safe_syscall_end: - /* code path for having successfully executed the syscall */ - pop %ebx - .cfi_remember_state - .cfi_adjust_cfa_offset -4 - .cfi_restore ebx - pop %edi - .cfi_adjust_cfa_offset -4 - .cfi_restore edi - pop %esi - .cfi_adjust_cfa_offset -4 - .cfi_restore esi - pop %ebp - .cfi_adjust_cfa_offset -4 - .cfi_restore ebp - ret - -1: - /* code path when we didn't execute the syscall */ - .cfi_restore_state - mov $-TARGET_ERESTARTSYS, %eax - jmp safe_syscall_end - .cfi_endproc - - .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/ia64/hostdep.h b/linux-user/host/ia64/hostdep.h deleted file mode 100644 index 263bf7658e..0000000000 --- a/linux-user/host/ia64/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef IA64_HOSTDEP_H -#define IA64_HOSTDEP_H - -#endif diff --git a/linux-user/host/mips/hostdep.h b/linux-user/host/mips/hostdep.h deleted file mode 100644 index ba111d75c3..0000000000 --- a/linux-user/host/mips/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef MIPS_HOSTDEP_H -#define MIPS_HOSTDEP_H - -#endif diff --git a/linux-user/host/ppc/hostdep.h b/linux-user/host/ppc/hostdep.h deleted file mode 100644 index 23d8bd9d47..0000000000 --- a/linux-user/host/ppc/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef PPC_HOSTDEP_H -#define PPC_HOSTDEP_H - -#endif diff --git a/linux-user/host/ppc64/hostdep.h b/linux-user/host/ppc64/hostdep.h deleted file mode 100644 index 98979ad917..0000000000 --- a/linux-user/host/ppc64/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef PPC64_HOSTDEP_H -#define PPC64_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - unsigned long *pcreg = &uc->uc_mcontext.gp_regs[PT_NIP]; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/ppc64/safe-syscall.inc.S b/linux-user/host/ppc64/safe-syscall.inc.S deleted file mode 100644 index 875133173b..0000000000 --- a/linux-user/host/ppc64/safe-syscall.inc.S +++ /dev/null @@ -1,96 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2016 Red Hat, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, @function - - .text - - /* This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -#if _CALL_ELF == 2 -safe_syscall_base: - .cfi_startproc - .localentry safe_syscall_base,0 -#else - .section ".opd","aw" - .align 3 -safe_syscall_base: - .quad .L.safe_syscall_base,.TOC.@tocbase,0 - .previous -.L.safe_syscall_base: - .cfi_startproc -#endif - /* We enter with r3 == *signal_pending - * r4 == syscall number - * r5 ... r10 == syscall arguments - * and return the result in r3 - * and the syscall instruction needs - * r0 == syscall number - * r3 ... r8 == syscall arguments - * and returns the result in r3 - * Shuffle everything around appropriately. - */ - std 14, 16(1) /* Preserve r14 in SP+16 */ - .cfi_offset 14, 16 - mr 14, 3 /* signal_pending */ - mr 0, 4 /* syscall number */ - mr 3, 5 /* syscall arguments */ - mr 4, 6 - mr 5, 7 - mr 6, 8 - mr 7, 9 - mr 8, 10 - - /* This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* if signal_pending is non-zero, don't do the call */ - lwz 12, 0(14) - cmpwi 0, 12, 0 - bne- 0f - sc -safe_syscall_end: - /* code path when we did execute the syscall */ - ld 14, 16(1) /* restore r14 to its original value */ - bnslr+ - - /* syscall failed; return negative errno */ - neg 3, 3 - blr - - /* code path when we didn't execute the syscall */ -0: addi 3, 0, -TARGET_ERESTARTSYS - ld 14, 16(1) /* restore r14 to its original value */ - blr - .cfi_endproc - -#if _CALL_ELF == 2 - .size safe_syscall_base, .-safe_syscall_base -#else - .size safe_syscall_base, .-.L.safe_syscall_base - .size .L.safe_syscall_base, .-.L.safe_syscall_base -#endif diff --git a/linux-user/host/riscv64/hostdep.h b/linux-user/host/riscv64/hostdep.h deleted file mode 100644 index 865f0fb9ff..0000000000 --- a/linux-user/host/riscv64/hostdep.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef RISCV64_HOSTDEP_H -#define RISCV64_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - unsigned long *pcreg = &uc->uc_mcontext.__gregs[REG_PC]; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/riscv64/safe-syscall.inc.S b/linux-user/host/riscv64/safe-syscall.inc.S deleted file mode 100644 index 9ca3fbfd1e..0000000000 --- a/linux-user/host/riscv64/safe-syscall.inc.S +++ /dev/null @@ -1,77 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2018 Linaro, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, @function - .type safe_syscall_start, @function - .type safe_syscall_end, @function - - /* - * This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -safe_syscall_base: - .cfi_startproc - /* - * The syscall calling convention is nearly the same as C: - * we enter with a0 == *signal_pending - * a1 == syscall number - * a2 ... a7 == syscall arguments - * and return the result in a0 - * and the syscall instruction needs - * a7 == syscall number - * a0 ... a5 == syscall arguments - * and returns the result in a0 - * Shuffle everything around appropriately. - */ - mv t0, a0 /* signal_pending pointer */ - mv t1, a1 /* syscall number */ - mv a0, a2 /* syscall arguments */ - mv a1, a3 - mv a2, a4 - mv a3, a5 - mv a4, a6 - mv a5, a7 - mv a7, t1 - - /* - * This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* If signal_pending is non-zero, don't do the call */ - lw t1, 0(t0) - bnez t1, 0f - scall -safe_syscall_end: - /* code path for having successfully executed the syscall */ - ret - -0: - /* code path when we didn't execute the syscall */ - li a0, -TARGET_ERESTARTSYS - ret - .cfi_endproc - - .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/s390/hostdep.h b/linux-user/host/s390/hostdep.h deleted file mode 100644 index afcba5a16a..0000000000 --- a/linux-user/host/s390/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef S390_HOSTDEP_H -#define S390_HOSTDEP_H - -#endif diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h deleted file mode 100644 index 4f0171f36f..0000000000 --- a/linux-user/host/s390x/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef S390X_HOSTDEP_H -#define S390X_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - unsigned long *pcreg = &uc->uc_mcontext.psw.addr; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/host/s390x/safe-syscall.inc.S b/linux-user/host/s390x/safe-syscall.inc.S deleted file mode 100644 index 414b44ad38..0000000000 --- a/linux-user/host/s390x/safe-syscall.inc.S +++ /dev/null @@ -1,90 +0,0 @@ -/* - * safe-syscall.inc.S : host-specific assembly fragment - * to handle signals occurring at the same time as system calls. - * This is intended to be included by linux-user/safe-syscall.S - * - * Written by Richard Henderson - * Copyright (C) 2016 Red Hat, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - - .global safe_syscall_base - .global safe_syscall_start - .global safe_syscall_end - .type safe_syscall_base, @function - - /* This is the entry point for making a system call. The calling - * convention here is that of a C varargs function with the - * first argument an 'int *' to the signal_pending flag, the - * second one the system call number (as a 'long'), and all further - * arguments being syscall arguments (also 'long'). - * We return a long which is the syscall's return value, which - * may be negative-errno on failure. Conversion to the - * -1-and-errno-set convention is done by the calling wrapper. - */ -safe_syscall_base: - .cfi_startproc - stmg %r6,%r15,48(%r15) /* save all call-saved registers */ - .cfi_offset %r15,-40 - .cfi_offset %r14,-48 - .cfi_offset %r13,-56 - .cfi_offset %r12,-64 - .cfi_offset %r11,-72 - .cfi_offset %r10,-80 - .cfi_offset %r9,-88 - .cfi_offset %r8,-96 - .cfi_offset %r7,-104 - .cfi_offset %r6,-112 - lgr %r1,%r15 - lg %r0,8(%r15) /* load eos */ - aghi %r15,-160 - .cfi_adjust_cfa_offset 160 - stg %r1,0(%r15) /* store back chain */ - stg %r0,8(%r15) /* store eos */ - - /* The syscall calling convention isn't the same as the - * C one: - * we enter with r2 == *signal_pending - * r3 == syscall number - * r4, r5, r6, (stack) == syscall arguments - * and return the result in r2 - * and the syscall instruction needs - * r1 == syscall number - * r2 ... r7 == syscall arguments - * and returns the result in r2 - * Shuffle everything around appropriately. - */ - lgr %r8,%r2 /* signal_pending pointer */ - lgr %r1,%r3 /* syscall number */ - lgr %r2,%r4 /* syscall args */ - lgr %r3,%r5 - lgr %r4,%r6 - lmg %r5,%r7,320(%r15) - - /* This next sequence of code works in conjunction with the - * rewind_if_safe_syscall_function(). If a signal is taken - * and the interrupted PC is anywhere between 'safe_syscall_start' - * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. - * The code sequence must therefore be able to cope with this, and - * the syscall instruction must be the final one in the sequence. - */ -safe_syscall_start: - /* if signal_pending is non-zero, don't do the call */ - icm %r0,15,0(%r8) - jne 2f - svc 0 -safe_syscall_end: - -1: lg %r15,0(%r15) /* load back chain */ - .cfi_remember_state - .cfi_adjust_cfa_offset -160 - lmg %r6,%r15,48(%r15) /* load saved registers */ - br %r14 - .cfi_restore_state -2: lghi %r2, -TARGET_ERESTARTSYS - j 1b - .cfi_endproc - - .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/sparc/hostdep.h b/linux-user/host/sparc/hostdep.h deleted file mode 100644 index 391ad923cf..0000000000 --- a/linux-user/host/sparc/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef SPARC_HOSTDEP_H -#define SPARC_HOSTDEP_H - -#endif diff --git a/linux-user/host/sparc64/hostdep.h b/linux-user/host/sparc64/hostdep.h deleted file mode 100644 index ce3968fca0..0000000000 --- a/linux-user/host/sparc64/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef SPARC64_HOSTDEP_H -#define SPARC64_HOSTDEP_H - -#endif diff --git a/linux-user/host/x32/hostdep.h b/linux-user/host/x32/hostdep.h deleted file mode 100644 index 2c2d6d37da..0000000000 --- a/linux-user/host/x32/hostdep.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef X32_HOSTDEP_H -#define X32_HOSTDEP_H - -#endif diff --git a/linux-user/host/x86_64/hostdep.h b/linux-user/host/x86_64/hostdep.h deleted file mode 100644 index a4fefb5114..0000000000 --- a/linux-user/host/x86_64/hostdep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * * Written by Peter Maydell - * - * Copyright (C) 2016 Linaro Limited - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef X86_64_HOSTDEP_H -#define X86_64_HOSTDEP_H - -/* We have a safe-syscall.inc.S */ -#define HAVE_SAFE_SYSCALL - -#ifndef __ASSEMBLER__ - -/* These are defined by the safe-syscall.inc.S file */ -extern char safe_syscall_start[]; -extern char safe_syscall_end[]; - -/* Adjust the signal context to rewind out of safe-syscall if we're in it */ -static inline void rewind_if_in_safe_syscall(void *puc) -{ - ucontext_t *uc = puc; - greg_t *pcreg = &uc->uc_mcontext.gregs[REG_RIP]; - - if (*pcreg > (uintptr_t)safe_syscall_start - && *pcreg < (uintptr_t)safe_syscall_end) { - *pcreg = (uintptr_t)safe_syscall_start; - } -} - -#endif /* __ASSEMBLER__ */ - -#endif diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index 82d8183821..8ab1335106 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -19,7 +19,9 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" static abi_ulong hppa_lws(CPUHPPAState *env) { @@ -108,7 +110,6 @@ static abi_ulong hppa_lws(CPUHPPAState *env) void cpu_loop(CPUHPPAState *env) { CPUState *cs = env_cpu(env); - target_siginfo_t info; abi_ulong ret; int trapnr; @@ -131,8 +132,8 @@ void cpu_loop(CPUHPPAState *env) env->iaoq_f = env->gr[31]; env->iaoq_b = env->gr[31] + 4; break; - case -TARGET_ERESTARTSYS: - case -TARGET_QEMU_ESIGRETURN: + case -QEMU_ERESTARTSYS: + case -QEMU_ESIGRETURN: break; } break; @@ -142,58 +143,48 @@ void cpu_loop(CPUHPPAState *env) env->iaoq_f = env->gr[31]; env->iaoq_b = env->gr[31] + 4; break; - case EXCP_ITLB_MISS: - case EXCP_DTLB_MISS: - case EXCP_NA_ITLB_MISS: - case EXCP_NA_DTLB_MISS: case EXCP_IMP: - case EXCP_DMP: - case EXCP_DMB: - case EXCP_PAGE_REF: - case EXCP_DMAR: - case EXCP_DMPI: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - info._sifields._sigfault._addr = env->cr[CR_IOR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_UNALIGN: - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->cr[CR_IOR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, env->iaoq_f); break; case EXCP_ILL: + EXCP_DUMP(env, "qemu: EXCP_ILL exception %#x\n", trapnr); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f); + break; case EXCP_PRIV_OPR: + /* check for glibc ABORT_INSTRUCTION "iitlbp %r0,(%sr0, %r0)" */ + EXCP_DUMP(env, "qemu: EXCP_PRIV_OPR exception %#x\n", trapnr); + if (env->cr[CR_IIR] == 0x04000000) { + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f); + } else { + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->iaoq_f); + } + break; case EXCP_PRIV_REG: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPN; - info._sifields._sigfault._addr = env->iaoq_f; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + EXCP_DUMP(env, "qemu: EXCP_PRIV_REG exception %#x\n", trapnr); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVREG, env->iaoq_f); break; case EXCP_OVERFLOW: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->iaoq_f); + break; case EXCP_COND: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_CONDTRAP, env->iaoq_f); + break; case EXCP_ASSIST: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->iaoq_f; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f); + break; + case EXCP_BREAK: + EXCP_DUMP(env, "qemu: EXCP_BREAK exception %#x\n", trapnr); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f & ~3); break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; default: - g_assert_not_reached(); + EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); + abort(); } process_pending_signals(env); } diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c index 0e266f472d..f253a15864 100644 --- a/linux-user/hppa/signal.c +++ b/linux-user/hppa/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -40,7 +41,7 @@ struct target_ucontext { }; struct target_rt_sigframe { - abi_uint tramp[9]; + abi_uint tramp[2]; /* syscall restart return address */ target_siginfo_t info; struct target_ucontext uc; /* hidden location of upper halves of pa2.0 64-bit gregs */ @@ -48,23 +49,13 @@ struct target_rt_sigframe { static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) { - int flags = 0; int i; - /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */ - - if (env->iaoq_f < TARGET_PAGE_SIZE) { - /* In the gateway page, executing a syscall. */ - flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */ - __put_user(env->gr[31], &sc->sc_iaoq[0]); - __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]); - } else { - __put_user(env->iaoq_f, &sc->sc_iaoq[0]); - __put_user(env->iaoq_b, &sc->sc_iaoq[1]); - } + __put_user(env->iaoq_f, &sc->sc_iaoq[0]); + __put_user(env->iaoq_b, &sc->sc_iaoq[1]); __put_user(0, &sc->sc_iasq[0]); __put_user(0, &sc->sc_iasq[1]); - __put_user(flags, &sc->sc_flags); + __put_user(0, &sc->sc_flags); __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]); for (i = 1; i < 32; ++i) { @@ -100,9 +91,15 @@ static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) __get_user(env->cr[CR_SAR], &sc->sc_sar); } -/* No, this doesn't look right, but it's copied straight from the kernel. */ +#if TARGET_ABI_BITS == 32 +#define SIGFRAME 64 +#define FUNCTIONCALLFRAME 48 +#else +#define SIGFRAME 128 +#define FUNCTIONCALLFRAME 96 +#endif #define PARISC_RT_SIGFRAME_SIZE32 \ - ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64) + ((sizeof(struct target_rt_sigframe) + FUNCTIONCALLFRAME + SIGFRAME) & -SIGFRAME) void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, @@ -117,7 +114,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { sp = (ts->sigaltstack_used.ss_sp + 0x7f) & ~0x3f; } - frame_addr = QEMU_ALIGN_UP(sp, 64); + frame_addr = QEMU_ALIGN_UP(sp, SIGFRAME); sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32; trace_user_setup_rt_frame(env, frame_addr); @@ -138,14 +135,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_sigcontext(&frame->uc.tuc_mcontext, env); - __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */ - __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */ - __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */ - __put_user(0x08000240, frame->tramp + 3); /* nop */ - unlock_user_struct(frame, frame_addr, 1); - env->gr[2] = h2g(frame->tramp); + env->gr[2] = default_rt_sigreturn; env->gr[30] = sp; env->gr[26] = sig; env->gr[25] = h2g(&frame->info); @@ -190,9 +182,29 @@ long do_rt_sigreturn(CPUArchState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6*4, 0); + abi_ulong SIGFRAME_CONTEXT_REGS32; + assert(tramp != NULL); + + SIGFRAME_CONTEXT_REGS32 = offsetof(struct target_rt_sigframe, uc.tuc_mcontext); + SIGFRAME_CONTEXT_REGS32 -= PARISC_RT_SIGFRAME_SIZE32; + + __put_user(SIGFRAME_CONTEXT_REGS32, tramp + 0); + __put_user(0x08000240, tramp + 1); /* nop - b/c dwarf2 unwind routines */ + __put_user(0x34190000, tramp + 2); /* ldi 0, %r25 (in_syscall=0) */ + __put_user(0x3414015a, tramp + 3); /* ldi __NR_rt_sigreturn, %r20 */ + __put_user(0xe4008200, tramp + 4); /* ble 0x100(%sr2, %r0) */ + __put_user(0x08000240, tramp + 5); /* nop */ + + default_rt_sigreturn = (sigtramp_page + 8) | 3; + unlock_user(tramp, sigtramp_page, 6*4); } diff --git a/linux-user/hppa/target_mman.h b/linux-user/hppa/target_mman.h new file mode 100644 index 0000000000..66dd9f7941 --- /dev/null +++ b/linux-user/hppa/target_mman.h @@ -0,0 +1,15 @@ +#ifndef HPPA_TARGET_MMAN_H +#define HPPA_TARGET_MMAN_H + +#define TARGET_MADV_MERGEABLE 65 +#define TARGET_MADV_UNMERGEABLE 66 +#define TARGET_MADV_HUGEPAGE 67 +#define TARGET_MADV_NOHUGEPAGE 68 +#define TARGET_MADV_DONTDUMP 69 +#define TARGET_MADV_DODUMP 70 +#define TARGET_MADV_WIPEONFORK 71 +#define TARGET_MADV_KEEPONFORK 72 + +#include "../generic/target_mman.h" + +#endif diff --git a/linux-user/hppa/target_prctl.h b/linux-user/hppa/target_prctl.h new file mode 100644 index 0000000000..5629ddbf39 --- /dev/null +++ b/linux-user/hppa/target_prctl.h @@ -0,0 +1 @@ +#include "../generic/target_prctl_unalign.h" diff --git a/linux-user/hppa/target_resource.h b/linux-user/hppa/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/hppa/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/hppa/target_signal.h b/linux-user/hppa/target_signal.h index 7f525362e9..190bb3d653 100644 --- a/linux-user/hppa/target_signal.h +++ b/linux-user/hppa/target_signal.h @@ -64,11 +64,12 @@ typedef struct target_sigaltstack { #define TARGET_SA_NOCLDWAIT 0x00000080 #define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ /* mask for all SS_xxx flags */ #define TARGET_SS_FLAG_BITS TARGET_SS_AUTODISARM +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* HPPA_TARGET_SIGNAL_H */ diff --git a/linux-user/hppa/target_syscall.h b/linux-user/hppa/target_syscall.h index 0018bcb5c4..9a8f8ca628 100644 --- a/linux-user/hppa/target_syscall.h +++ b/linux-user/hppa/target_syscall.h @@ -22,9 +22,10 @@ struct target_pt_regs { #define UNAME_MACHINE "parisc" #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 +#define TARGET_DEFAULT_STACK_SIZE 80 * 1024 * 1024UL + #endif /* HPPA_TARGET_SYSCALL_H */ diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index f813e87294..865413c08f 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -18,9 +18,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "qemu/timer.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" +#include "user-mmap.h" /***********************************************************/ /* CPUX86 core interface */ @@ -81,17 +84,6 @@ static void set_idt(int n, unsigned int dpl) } #endif -static void gen_signal(CPUX86State *env, int sig, int code, abi_ptr addr) -{ - target_siginfo_t info = { - .si_signo = sig, - .si_code = code, - ._sifields._sigfault._addr = addr - }; - - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); -} - #ifdef TARGET_X86_64 static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len) { @@ -104,7 +96,7 @@ static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len) } env->error_code = PG_ERROR_W_MASK | PG_ERROR_U_MASK; - gen_signal(env, TARGET_SIGSEGV, TARGET_SEGV_MAPERR, addr); + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, addr); return false; } @@ -145,7 +137,7 @@ static void emulate_vsyscall(CPUX86State *env) } /* - * Validate the the pointer arguments. + * Validate the pointer arguments. */ switch (syscall) { case TARGET_NR_gettimeofday: @@ -177,8 +169,8 @@ static void emulate_vsyscall(CPUX86State *env) ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI], env->regs[R_EDX], env->regs[10], env->regs[8], env->regs[9], 0, 0); - g_assert(ret != -TARGET_ERESTARTSYS); - g_assert(ret != -TARGET_QEMU_ESIGRETURN); + g_assert(ret != -QEMU_ERESTARTSYS); + g_assert(ret != -QEMU_ESIGRETURN); if (ret == -TARGET_EFAULT) { goto sigsegv; } @@ -190,16 +182,25 @@ static void emulate_vsyscall(CPUX86State *env) return; sigsegv: - /* Like force_sig(SIGSEGV). */ - gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0); + force_sig(TARGET_SIGSEGV); } #endif +static bool maybe_handle_vm86_trap(CPUX86State *env, int trapnr) +{ +#ifndef TARGET_X86_64 + if (env->eflags & VM_MASK) { + handle_vm86_trap(env, trapnr); + return true; + } +#endif + return false; +} + void cpu_loop(CPUX86State *env) { CPUState *cs = env_cpu(env); int trapnr; - abi_ulong pc; abi_ulong ret; for(;;) { @@ -220,9 +221,9 @@ void cpu_loop(CPUX86State *env) env->regs[R_EDI], env->regs[R_EBP], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->eip -= 2; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[R_EAX] = ret; } break; @@ -238,9 +239,9 @@ void cpu_loop(CPUX86State *env) env->regs[8], env->regs[9], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->eip -= 2; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[R_EAX] = ret; } break; @@ -252,73 +253,61 @@ void cpu_loop(CPUX86State *env) #endif case EXCP0B_NOSEG: case EXCP0C_STACK: - gen_signal(env, TARGET_SIGBUS, TARGET_SI_KERNEL, 0); + force_sig(TARGET_SIGBUS); break; case EXCP0D_GPF: /* XXX: potential problem if ABI32 */ -#ifndef TARGET_X86_64 - if (env->eflags & VM_MASK) { - handle_vm86_fault(env); + if (maybe_handle_vm86_trap(env, trapnr)) { break; } -#endif - gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0); + force_sig(TARGET_SIGSEGV); break; case EXCP0E_PAGE: - gen_signal(env, TARGET_SIGSEGV, - (env->error_code & 1 ? - TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR), - env->cr[2]); + force_sig_fault(TARGET_SIGSEGV, + (env->error_code & PG_ERROR_P_MASK ? + TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR), + env->cr[2]); break; case EXCP00_DIVZ: -#ifndef TARGET_X86_64 - if (env->eflags & VM_MASK) { - handle_vm86_trap(env, trapnr); + if (maybe_handle_vm86_trap(env, trapnr)) { break; } -#endif - gen_signal(env, TARGET_SIGFPE, TARGET_FPE_INTDIV, env->eip); + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->eip); break; case EXCP01_DB: - case EXCP03_INT3: -#ifndef TARGET_X86_64 - if (env->eflags & VM_MASK) { - handle_vm86_trap(env, trapnr); + if (maybe_handle_vm86_trap(env, trapnr)) { break; } -#endif - if (trapnr == EXCP01_DB) { - gen_signal(env, TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip); - } else { - gen_signal(env, TARGET_SIGTRAP, TARGET_SI_KERNEL, 0); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip); + break; + case EXCP03_INT3: + if (maybe_handle_vm86_trap(env, trapnr)) { + break; } + force_sig(TARGET_SIGTRAP); break; case EXCP04_INTO: case EXCP05_BOUND: -#ifndef TARGET_X86_64 - if (env->eflags & VM_MASK) { - handle_vm86_trap(env, trapnr); + if (maybe_handle_vm86_trap(env, trapnr)) { break; } -#endif - gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0); + force_sig(TARGET_SIGSEGV); break; case EXCP06_ILLOP: - gen_signal(env, TARGET_SIGILL, TARGET_ILL_ILLOPN, env->eip); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->eip); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: - gen_signal(env, TARGET_SIGTRAP, TARGET_TRAP_BRKPT, 0); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: - pc = env->segs[R_CS].base + env->eip; - EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", - (long)pc, trapnr); + EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); abort(); } process_pending_signals(env); diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c index 841cd19651..60fa07d6f9 100644 --- a/linux-user/i386/signal.c +++ b/linux-user/i386/signal.c @@ -18,11 +18,16 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */ +#define TARGET_FP_XSTATE_MAGIC1 0x46505853U /* FPXS */ +#define TARGET_FP_XSTATE_MAGIC2 0x46505845U /* FPXE */ +#define TARGET_FP_XSTATE_MAGIC2_SIZE 4 + struct target_fpreg { uint16_t significand[4]; uint16_t exponent; @@ -38,6 +43,35 @@ struct target_xmmreg { uint32_t element[4]; }; +struct target_fpx_sw_bytes { + uint32_t magic1; + uint32_t extended_size; + uint64_t xfeatures; + uint32_t xstate_size; + uint32_t reserved[7]; +}; +QEMU_BUILD_BUG_ON(sizeof(struct target_fpx_sw_bytes) != 12*4); + +struct target_fpstate_fxsave { + /* FXSAVE format */ + uint16_t cw; + uint16_t sw; + uint16_t twd; + uint16_t fop; + uint64_t rip; + uint64_t rdp; + uint32_t mxcsr; + uint32_t mxcsr_mask; + uint32_t st_space[32]; + uint32_t xmm_space[64]; + uint32_t hw_reserved[12]; + struct target_fpx_sw_bytes sw_reserved; + uint8_t xfeatures[]; +}; +#define TARGET_FXSAVE_SIZE sizeof(struct target_fpstate_fxsave) +QEMU_BUILD_BUG_ON(TARGET_FXSAVE_SIZE != 512); +QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_fxsave, sw_reserved) != 464); + struct target_fpstate_32 { /* Regular FPU environment */ uint32_t cw; @@ -50,35 +84,21 @@ struct target_fpstate_32 { struct target_fpreg st[8]; uint16_t status; uint16_t magic; /* 0xffff = regular FPU data only */ - - /* FXSR FPU environment */ - uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */ - uint32_t mxcsr; - uint32_t reserved; - struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */ - struct target_xmmreg xmm[8]; - uint32_t padding[56]; + struct target_fpstate_fxsave fxsave; }; -struct target_fpstate_64 { - /* FXSAVE format */ - uint16_t cw; - uint16_t sw; - uint16_t twd; - uint16_t fop; - uint64_t rip; - uint64_t rdp; - uint32_t mxcsr; - uint32_t mxcsr_mask; - uint32_t st_space[32]; - uint32_t xmm_space[64]; - uint32_t reserved[24]; -}; +/* + * For simplicity, setup_frame aligns struct target_fpstate_32 to + * 16 bytes, so ensure that the FXSAVE area is also aligned. + */ +QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_32, fxsave) & 15); #ifndef TARGET_X86_64 # define target_fpstate target_fpstate_32 +# define TARGET_FPSTATE_FXSAVE_OFFSET offsetof(struct target_fpstate_32, fxsave) #else -# define target_fpstate target_fpstate_64 +# define target_fpstate target_fpstate_fxsave +# define TARGET_FPSTATE_FXSAVE_OFFSET 0 #endif struct target_sigcontext_32 { @@ -162,10 +182,25 @@ struct sigframe { abi_ulong pretcode; int sig; struct target_sigcontext sc; - struct target_fpstate fpstate; + /* + * The actual fpstate is placed after retcode[] below, to make + * room for the variable-sized xsave data. The older unused fpstate + * has to be kept to avoid changing the offset of extramask[], which + * is part of the ABI. + */ + struct target_fpstate fpstate_unused; abi_ulong extramask[TARGET_NSIG_WORDS-1]; char retcode[8]; + + /* + * This field will be 16-byte aligned in memory. Applying QEMU_ALIGNED + * to it ensures that the base of the frame has an appropriate alignment + * too. + */ + struct target_fpstate fpstate QEMU_ALIGNED(8); }; +#define TARGET_SIGFRAME_FXSAVE_OFFSET ( \ + offsetof(struct sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET) struct rt_sigframe { abi_ulong pretcode; @@ -174,26 +209,62 @@ struct rt_sigframe { abi_ulong puc; struct target_siginfo info; struct target_ucontext uc; - struct target_fpstate fpstate; char retcode[8]; + struct target_fpstate fpstate QEMU_ALIGNED(8); }; - +#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \ + offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET) #else struct rt_sigframe { abi_ulong pretcode; struct target_ucontext uc; struct target_siginfo info; - struct target_fpstate fpstate; + struct target_fpstate fpstate QEMU_ALIGNED(16); }; - +#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \ + offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET) #endif /* * Set up a signal frame. */ -/* XXX: save x87 state */ +static void xsave_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave, + abi_ulong fxsave_addr) +{ + if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { + /* fxsave_addr must be 16 byte aligned for fxsave */ + assert(!(fxsave_addr & 0xf)); + + cpu_x86_fxsave(env, fxsave_addr); + __put_user(0, &fxsave->sw_reserved.magic1); + } else { + uint32_t xstate_size = xsave_area_size(env->xcr0, false); + uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE; + + /* + * extended_size is the offset from fpstate_addr to right after the end + * of the extended save states. On 32-bit that includes the legacy + * FSAVE area. + */ + uint32_t extended_size = TARGET_FPSTATE_FXSAVE_OFFSET + + xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE; + + /* fxsave_addr must be 64 byte aligned for xsave */ + assert(!(fxsave_addr & 0x3f)); + + /* Zero the header, XSAVE *adds* features to an existing save state. */ + memset(fxsave->xfeatures, 0, 64); + cpu_x86_xsave(env, fxsave_addr); + __put_user(TARGET_FP_XSTATE_MAGIC1, &fxsave->sw_reserved.magic1); + __put_user(extended_size, &fxsave->sw_reserved.extended_size); + __put_user(env->xcr0, &fxsave->sw_reserved.xfeatures); + __put_user(xstate_size, &fxsave->sw_reserved.xstate_size); + __put_user(TARGET_FP_XSTATE_MAGIC2, (uint32_t *) &fxsave->xfeatures[xfeatures_size]); + } +} + static void setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr) @@ -225,13 +296,14 @@ static void setup_sigcontext(struct target_sigcontext *sc, cpu_x86_fsave(env, fpstate_addr, 1); fpstate->status = fpstate->sw; - magic = 0xffff; + if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) { + magic = 0xffff; + } else { + xsave_sigcontext(env, &fpstate->fxsave, + fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET); + magic = 0; + } __put_user(magic, &fpstate->magic); - __put_user(fpstate_addr, &sc->fpstate); - - /* non-iBCS2 extensions.. */ - __put_user(mask, &sc->oldmask); - __put_user(env->cr[2], &sc->cr2); #else __put_user(env->regs[R_EDI], &sc->rdi); __put_user(env->regs[R_ESI], &sc->rsi); @@ -261,15 +333,14 @@ static void setup_sigcontext(struct target_sigcontext *sc, __put_user((uint16_t)0, &sc->fs); __put_user(env->segs[R_SS].selector, &sc->ss); + xsave_sigcontext(env, fpstate, fpstate_addr); +#endif + + __put_user(fpstate_addr, &sc->fpstate); + + /* non-iBCS2 extensions.. */ __put_user(mask, &sc->oldmask); __put_user(env->cr[2], &sc->cr2); - - /* fpstate_addr must be 16 byte aligned for fxsave */ - assert(!(fpstate_addr & 0xf)); - - cpu_x86_fxsave(env, fpstate_addr); - __put_user(fpstate_addr, &sc->fpstate); -#endif } /* @@ -277,7 +348,7 @@ static void setup_sigcontext(struct target_sigcontext *sc, */ static inline abi_ulong -get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) +get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t fxsave_offset) { unsigned long esp; @@ -301,14 +372,34 @@ get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) #endif } -#ifndef TARGET_X86_64 - return (esp - frame_size) & -8ul; -#else - return ((esp - frame_size) & (~15ul)) - 8; -#endif + if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) { + return (esp - (fxsave_offset + TARGET_FXSAVE_SIZE)) & -8ul; + } else if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { + return ((esp - TARGET_FXSAVE_SIZE) & -16ul) - fxsave_offset; + } else { + size_t xstate_size = + xsave_area_size(env->xcr0, false) + TARGET_FP_XSTATE_MAGIC2_SIZE; + return ((esp - xstate_size) & -64ul) - fxsave_offset; + } } #ifndef TARGET_X86_64 +static void install_sigtramp(void *tramp) +{ + /* This is popl %eax ; movl $syscall,%eax ; int $0x80 */ + __put_user(0xb858, (uint16_t *)(tramp + 0)); + __put_user(TARGET_NR_sigreturn, (int32_t *)(tramp + 2)); + __put_user(0x80cd, (uint16_t *)(tramp + 6)); +} + +static void install_rt_sigtramp(void *tramp) +{ + /* This is movl $syscall,%eax ; int $0x80 */ + __put_user(0xb8, (uint8_t *)(tramp + 0)); + __put_user(TARGET_NR_rt_sigreturn, (int32_t *)(tramp + 1)); + __put_user(0x80cd, (uint16_t *)(tramp + 5)); +} + /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUX86State *env) @@ -317,7 +408,7 @@ void setup_frame(int sig, struct target_sigaction *ka, struct sigframe *frame; int i; - frame_addr = get_sigframe(ka, env, sizeof(*frame)); + frame_addr = get_sigframe(ka, env, TARGET_SIGFRAME_FXSAVE_OFFSET); trace_user_setup_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) @@ -337,16 +428,9 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { - uint16_t val16; - abi_ulong retcode_addr; - retcode_addr = frame_addr + offsetof(struct sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - /* This is popl %eax ; movl $,%eax ; int $0x80 */ - val16 = 0xb858; - __put_user(val16, (uint16_t *)(frame->retcode+0)); - __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); - val16 = 0x80cd; - __put_user(val16, (uint16_t *)(frame->retcode+6)); + /* This is no longer used, but is retained for ABI compatibility. */ + install_sigtramp(frame->retcode); + __put_user(default_sigreturn, &frame->pretcode); } /* Set up registers for signal handler */ @@ -380,7 +464,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, struct rt_sigframe *frame; int i; - frame_addr = get_sigframe(ka, env, sizeof(*frame)); + frame_addr = get_sigframe(ka, env, TARGET_RT_SIGFRAME_FXSAVE_OFFSET); trace_user_setup_rt_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) @@ -399,7 +483,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } /* Create the ucontext. */ - __put_user(0, &frame->uc.tuc_flags); + if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { + __put_user(1, &frame->uc.tuc_flags); + } else { + __put_user(0, &frame->uc.tuc_flags); + } __put_user(0, &frame->uc.tuc_link); target_save_altstack(&frame->uc.tuc_stack, env); setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, @@ -411,24 +499,18 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ -#ifndef TARGET_X86_64 if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { - uint16_t val16; - addr = frame_addr + offsetof(struct rt_sigframe, retcode); - __put_user(addr, &frame->pretcode); - /* This is movl $,%eax ; int $0x80 */ - __put_user(0xb8, (char *)(frame->retcode+0)); - __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); - val16 = 0x80cd; - __put_user(val16, (uint16_t *)(frame->retcode+5)); - } +#ifdef TARGET_X86_64 + /* For x86_64, SA_RESTORER is required ABI. */ + goto give_sigsegv; #else - /* XXX: Would be slightly better to return -EFAULT here if test fails - assert(ka->sa_flags & TARGET_SA_RESTORER); */ - __put_user(ka->sa_restorer, &frame->pretcode); + /* This is no longer used, but is retained for ABI compatibility. */ + install_rt_sigtramp(frame->retcode); + __put_user(default_rt_sigreturn, &frame->pretcode); #endif + } /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; @@ -459,10 +541,37 @@ give_sigsegv: force_sigsegv(sig); } +static int xrstor_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave, + abi_ulong fxsave_addr) +{ + if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { + uint32_t extended_size = tswapl(fxsave->sw_reserved.extended_size); + uint32_t xstate_size = tswapl(fxsave->sw_reserved.xstate_size); + uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE; + + /* Linux checks MAGIC2 using xstate_size, not extended_size. */ + if (tswapl(fxsave->sw_reserved.magic1) == TARGET_FP_XSTATE_MAGIC1 && + extended_size >= TARGET_FPSTATE_FXSAVE_OFFSET + xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE) { + if (!access_ok(env_cpu(env), VERIFY_READ, fxsave_addr, + extended_size - TARGET_FPSTATE_FXSAVE_OFFSET)) { + return 1; + } + if (tswapl(*(uint32_t *) &fxsave->xfeatures[xfeatures_size]) == TARGET_FP_XSTATE_MAGIC2) { + cpu_x86_xrstor(env, fxsave_addr); + return 0; + } + } + /* fall through to fxrstor */ + } + + cpu_x86_fxrstor(env, fxsave_addr); + return 0; +} + static int restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) { - unsigned int err = 0; + int err = 1; abi_ulong fpstate_addr; unsigned int tmpflags; @@ -513,20 +622,28 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) fpstate_addr = tswapl(sc->fpstate); if (fpstate_addr != 0) { - if (!access_ok(env_cpu(env), VERIFY_READ, fpstate_addr, - sizeof(struct target_fpstate))) { - goto badframe; + struct target_fpstate *fpstate; + if (!lock_user_struct(VERIFY_READ, fpstate, fpstate_addr, + sizeof(struct target_fpstate))) { + return err; } #ifndef TARGET_X86_64 - cpu_x86_frstor(env, fpstate_addr, 1); + if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) { + cpu_x86_frstor(env, fpstate_addr, 1); + err = 0; + } else { + err = xrstor_sigcontext(env, &fpstate->fxsave, + fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET); + } #else - cpu_x86_fxrstor(env, fpstate_addr); + err = xrstor_sigcontext(env, fpstate, fpstate_addr); #endif + unlock_user_struct(fpstate, fpstate_addr, 0); + } else { + err = 0; } return err; -badframe: - return 1; } /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */ @@ -555,12 +672,12 @@ long do_sigreturn(CPUX86State *env) if (restore_sigcontext(env, &frame->sc)) goto badframe; unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } #endif @@ -584,10 +701,26 @@ long do_rt_sigreturn(CPUX86State *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } + +#ifndef TARGET_X86_64 +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + install_sigtramp(tramp); + + default_rt_sigreturn = sigtramp_page + 8; + install_rt_sigtramp(tramp + 8); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} +#endif diff --git a/linux-user/i386/target_elf.h b/linux-user/i386/target_elf.h index 1c6142e7da..238a9aba73 100644 --- a/linux-user/i386/target_elf.h +++ b/linux-user/i386/target_elf.h @@ -9,6 +9,6 @@ #define I386_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu32"; + return "max"; } #endif diff --git a/linux-user/i386/target_mman.h b/linux-user/i386/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/i386/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/i386/target_prctl.h b/linux-user/i386/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/i386/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/i386/target_resource.h b/linux-user/i386/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/i386/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h index 50361af874..9315cba241 100644 --- a/linux-user/i386/target_signal.h +++ b/linux-user/i386/target_signal.h @@ -1,25 +1,9 @@ #ifndef I386_TARGET_SIGNAL_H #define I386_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* I386_TARGET_SIGNAL_H */ diff --git a/linux-user/i386/target_structs.h b/linux-user/i386/target_structs.h index e22847fd20..3a06f373c3 100644 --- a/linux-user/i386/target_structs.h +++ b/linux-user/i386/target_structs.h @@ -1,58 +1 @@ -/* - * i386 specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef I386_TARGET_STRUCTS_H -#define I386_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/i386/target_syscall.h b/linux-user/i386/target_syscall.h index ed356b3908..aaade06b13 100644 --- a/linux-user/i386/target_syscall.h +++ b/linux-user/i386/target_syscall.h @@ -150,7 +150,6 @@ struct target_vm86plus_struct { #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h new file mode 100644 index 0000000000..be079684a2 --- /dev/null +++ b/linux-user/include/host/aarch64/host-signal.h @@ -0,0 +1,87 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef AARCH64_HOST_SIGNAL_H +#define AARCH64_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ +#ifndef ESR_MAGIC +#define ESR_MAGIC 0x45535201 +struct esr_context { + struct _aarch64_ctx head; + uint64_t esr; +}; +#endif + +static inline struct _aarch64_ctx *first_ctx(host_sigcontext *uc) +{ + return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; +} + +static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) +{ + return (struct _aarch64_ctx *)((char *)hdr + hdr->size); +} + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.pc; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.pc = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + struct _aarch64_ctx *hdr; + uint32_t insn; + + /* Find the esr_context, which has the WnR bit in it */ + for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) { + if (hdr->magic == ESR_MAGIC) { + struct esr_context const *ec = (struct esr_context const *)hdr; + uint64_t esr = ec->esr; + + /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */ + return extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; + } + } + + /* + * Fall back to parsing instructions; will only be needed + * for really ancient (pre-3.16) kernels. + */ + insn = *(uint32_t *)host_signal_pc(uc); + + return (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ + || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ + || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ + || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ + || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ + || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ + || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ + /* Ignore bits 10, 11 & 21, controlling indexing. */ + || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ + || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ + /* Ignore bits 23 & 24, controlling indexing. */ + || (insn & 0x3a400000) == 0x28000000; /* C3.3.7,14-16 */ +} + +#endif diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h new file mode 100644 index 0000000000..4f9e2abc4b --- /dev/null +++ b/linux-user/include/host/alpha/host-signal.h @@ -0,0 +1,55 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ALPHA_HOST_SIGNAL_H +#define ALPHA_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.sc_pc; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.sc_pc = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + uint32_t *pc = (uint32_t *)host_signal_pc(uc); + uint32_t insn = *pc; + + /* XXX: need kernel patch to get write flag faster */ + switch (insn >> 26) { + case 0x0d: /* stw */ + case 0x0e: /* stb */ + case 0x0f: /* stq_u */ + case 0x24: /* stf */ + case 0x25: /* stg */ + case 0x26: /* sts */ + case 0x27: /* stt */ + case 0x2c: /* stl */ + case 0x2d: /* stq */ + case 0x2e: /* stl_c */ + case 0x2f: /* stq_c */ + return true; + } + return false; +} + +#endif diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h new file mode 100644 index 0000000000..faba496d24 --- /dev/null +++ b/linux-user/include/host/arm/host-signal.h @@ -0,0 +1,43 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ARM_HOST_SIGNAL_H +#define ARM_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.arm_pc; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.arm_pc = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + /* + * In the FSR, bit 11 is WnR, assuming a v6 or + * later processor. On v5 we will always report + * this as a read, which will fail later. + */ + uint32_t fsr = uc->uc_mcontext.error_code; + return extract32(fsr, 11, 1); +} + +#endif diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h new file mode 100644 index 0000000000..e2b64f077f --- /dev/null +++ b/linux-user/include/host/i386/host-signal.h @@ -0,0 +1,38 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef I386_HOST_SIGNAL_H +#define I386_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.gregs[REG_EIP]; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.gregs[REG_EIP] = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe + && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); +} + +#endif diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h new file mode 100644 index 0000000000..d33c3fc03e --- /dev/null +++ b/linux-user/include/host/loongarch64/host-signal.h @@ -0,0 +1,93 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 WANG Xuerui + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef LOONGARCH64_HOST_SIGNAL_H +#define LOONGARCH64_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.__pc; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.__pc = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc); + uint32_t insn = pinsn[0]; + + /* Detect store by reading the instruction at the program counter. */ + switch ((insn >> 26) & 0b111111) { + case 0b001000: /* {ll,sc}.[wd] */ + switch ((insn >> 24) & 0b11) { + case 0b01: /* sc.w */ + case 0b11: /* sc.d */ + return true; + } + break; + case 0b001001: /* {ld,st}ox4.[wd] ({ld,st}ptr.[wd]) */ + switch ((insn >> 24) & 0b11) { + case 0b01: /* stox4.w (stptr.w) */ + case 0b11: /* stox4.d (stptr.d) */ + return true; + } + break; + case 0b001010: /* {ld,st}.* family */ + switch ((insn >> 22) & 0b1111) { + case 0b0100: /* st.b */ + case 0b0101: /* st.h */ + case 0b0110: /* st.w */ + case 0b0111: /* st.d */ + case 0b1101: /* fst.s */ + case 0b1111: /* fst.d */ + return true; + } + break; + case 0b001110: /* indexed, atomic, bounds-checking memory operations */ + switch ((insn >> 15) & 0b11111111111) { + case 0b00000100000: /* stx.b */ + case 0b00000101000: /* stx.h */ + case 0b00000110000: /* stx.w */ + case 0b00000111000: /* stx.d */ + case 0b00001110000: /* fstx.s */ + case 0b00001111000: /* fstx.d */ + case 0b00011101100: /* fstgt.s */ + case 0b00011101101: /* fstgt.d */ + case 0b00011101110: /* fstle.s */ + case 0b00011101111: /* fstle.d */ + case 0b00011111000: /* stgt.b */ + case 0b00011111001: /* stgt.h */ + case 0b00011111010: /* stgt.w */ + case 0b00011111011: /* stgt.d */ + case 0b00011111100: /* stle.b */ + case 0b00011111101: /* stle.h */ + case 0b00011111110: /* stle.w */ + case 0b00011111111: /* stle.d */ + case 0b00011000000 ... 0b00011100011: /* am* insns */ + return true; + } + break; + } + + return false; +} + +#endif diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h new file mode 100644 index 0000000000..0dbc5cecfd --- /dev/null +++ b/linux-user/include/host/mips/host-signal.h @@ -0,0 +1,75 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef MIPS_HOST_SIGNAL_H +#define MIPS_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.pc; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.pc = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +#if defined(__misp16) || defined(__mips_micromips) +#error "Unsupported encoding" +#endif + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + uint32_t insn = *(uint32_t *)host_signal_pc(uc); + + /* Detect all store instructions at program counter. */ + switch ((insn >> 26) & 077) { + case 050: /* SB */ + case 051: /* SH */ + case 052: /* SWL */ + case 053: /* SW */ + case 054: /* SDL */ + case 055: /* SDR */ + case 056: /* SWR */ + case 070: /* SC */ + case 071: /* SWC1 */ + case 074: /* SCD */ + case 075: /* SDC1 */ + case 077: /* SD */ +#if !defined(__mips_isa_rev) || __mips_isa_rev < 6 + case 072: /* SWC2 */ + case 076: /* SDC2 */ +#endif + return true; + case 023: /* COP1X */ + /* + * Required in all versions of MIPS64 since + * MIPS64r1 and subsequent versions of MIPS32r2. + */ + switch (insn & 077) { + case 010: /* SWXC1 */ + case 011: /* SDXC1 */ + case 015: /* SUXC1 */ + return true; + } + break; + } + return false; +} + +#endif diff --git a/linux-user/include/host/ppc64/host-signal.h b/linux-user/include/host/ppc64/host-signal.h new file mode 100644 index 0000000000..c4ea866472 --- /dev/null +++ b/linux-user/include/host/ppc64/host-signal.h @@ -0,0 +1,41 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef PPC_HOST_SIGNAL_H +#define PPC_HOST_SIGNAL_H + +/* Needed for PT_* constants */ +#include + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.gp_regs[PT_NIP]; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.gp_regs[PT_NIP] = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + return uc->uc_mcontext.gp_regs[PT_TRAP] != 0x400 + && (uc->uc_mcontext.gp_regs[PT_DSISR] & 0x02000000); +} + +#endif diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h new file mode 100644 index 0000000000..decacb2325 --- /dev/null +++ b/linux-user/include/host/riscv/host-signal.h @@ -0,0 +1,71 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef RISCV_HOST_SIGNAL_H +#define RISCV_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.__gregs[REG_PC]; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.__gregs[REG_PC] = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + /* + * Detect store by reading the instruction at the program counter. + * Do not read more than 16 bits, because we have not yet determined + * the size of the instruction. + */ + const uint16_t *pinsn = (const uint16_t *)host_signal_pc(uc); + uint16_t insn = pinsn[0]; + + /* 16-bit instructions */ + switch (insn & 0xe003) { + case 0xa000: /* c.fsd */ + case 0xc000: /* c.sw */ + case 0xe000: /* c.sd (rv64) / c.fsw (rv32) */ + case 0xa002: /* c.fsdsp */ + case 0xc002: /* c.swsp */ + case 0xe002: /* c.sdsp (rv64) / c.fswsp (rv32) */ + return true; + } + + /* 32-bit instructions, major opcodes */ + switch (insn & 0x7f) { + case 0x23: /* store */ + case 0x27: /* store-fp */ + return true; + case 0x2f: /* amo */ + /* + * The AMO function code is in bits 25-31, unread as yet. + * The AMO functions are LR (read), SC (write), and the + * rest are all read-modify-write. + */ + insn = pinsn[1]; + return (insn >> 11) != 2; /* LR */ + } + + return false; +} + +#endif diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h new file mode 100644 index 0000000000..e6d3ec26dc --- /dev/null +++ b/linux-user/include/host/s390/host-signal.h @@ -0,0 +1,138 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef S390_HOST_SIGNAL_H +#define S390_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.psw.addr; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.psw.addr = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + uint16_t *pinsn = (uint16_t *)host_signal_pc(uc); + + /* + * ??? On linux, the non-rt signal handler has 4 (!) arguments instead + * of the normal 2 arguments. The 4th argument contains the "Translation- + * Exception Identification for DAT Exceptions" from the hardware (aka + * "int_parm_long"), which does in fact contain the is_write value. + * The rt signal handler, as far as I can tell, does not give this value + * at all. Not that we could get to it from here even if it were. + * So fall back to parsing instructions. Treat read-modify-write ones as + * writes, which is not fully correct, but for tracking self-modifying code + * this is better than treating them as reads. Checking si_addr page flags + * might be a viable improvement, albeit a racy one. + */ + /* ??? This is not even close to complete. */ + switch (pinsn[0] >> 8) { + case 0x50: /* ST */ + case 0x42: /* STC */ + case 0x40: /* STH */ + case 0x44: /* EX */ + case 0xba: /* CS */ + case 0xbb: /* CDS */ + return true; + case 0xc4: /* RIL format insns */ + switch (pinsn[0] & 0xf) { + case 0xf: /* STRL */ + case 0xb: /* STGRL */ + case 0x7: /* STHRL */ + return true; + } + break; + case 0xc6: /* RIL-b format insns */ + switch (pinsn[0] & 0xf) { + case 0x0: /* EXRL */ + return true; + } + break; + case 0xc8: /* SSF format insns */ + switch (pinsn[0] & 0xf) { + case 0x2: /* CSST */ + return true; + } + break; + case 0xe3: /* RXY format insns */ + switch (pinsn[2] & 0xff) { + case 0x50: /* STY */ + case 0x24: /* STG */ + case 0x72: /* STCY */ + case 0x70: /* STHY */ + case 0x8e: /* STPQ */ + case 0x3f: /* STRVH */ + case 0x3e: /* STRV */ + case 0x2f: /* STRVG */ + return true; + } + break; + case 0xe6: + switch (pinsn[2] & 0xff) { + case 0x09: /* VSTEBRH */ + case 0x0a: /* VSTEBRG */ + case 0x0b: /* VSTEBRF */ + case 0x0e: /* VSTBR */ + case 0x0f: /* VSTER */ + case 0x3f: /* VSTRLR */ + return true; + } + break; + case 0xe7: + switch (pinsn[2] & 0xff) { + case 0x08: /* VSTEB */ + case 0x09: /* VSTEH */ + case 0x0a: /* VSTEG */ + case 0x0b: /* VSTEF */ + case 0x0e: /* VST */ + case 0x1a: /* VSCEG */ + case 0x1b: /* VSCEF */ + case 0x3e: /* VSTM */ + case 0x3f: /* VSTL */ + return true; + } + break; + case 0xeb: /* RSY format insns */ + switch (pinsn[2] & 0xff) { + case 0x14: /* CSY */ + case 0x30: /* CSG */ + case 0x31: /* CDSY */ + case 0x3e: /* CDSG */ + case 0xe4: /* LANG */ + case 0xe6: /* LAOG */ + case 0xe7: /* LAXG */ + case 0xe8: /* LAAG */ + case 0xea: /* LAALG */ + case 0xf4: /* LAN */ + case 0xf6: /* LAO */ + case 0xf7: /* LAX */ + case 0xfa: /* LAAL */ + case 0xf8: /* LAA */ + return true; + } + break; + } + return false; +} + +#endif diff --git a/linux-user/include/host/s390x/host-signal.h b/linux-user/include/host/s390x/host-signal.h new file mode 100644 index 0000000000..0e83f9358d --- /dev/null +++ b/linux-user/include/host/s390x/host-signal.h @@ -0,0 +1 @@ +#include "../s390/host-signal.h" diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h new file mode 100644 index 0000000000..64957c2bca --- /dev/null +++ b/linux-user/include/host/sparc64/host-signal.h @@ -0,0 +1,64 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SPARC64_HOST_SIGNAL_H +#define SPARC64_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is struct sigcontext. */ +typedef struct sigcontext host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *sc) +{ + return sc->sigc_regs.tpc; +} + +static inline void host_signal_set_pc(host_sigcontext *sc, uintptr_t pc) +{ + sc->sigc_regs.tpc = pc; + sc->sigc_regs.tnpc = pc + 4; +} + +static inline void *host_signal_mask(host_sigcontext *sc) +{ + return &sc->sigc_mask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + uint32_t insn = *(uint32_t *)host_signal_pc(uc); + + if ((insn >> 30) == 3) { + switch ((insn >> 19) & 0x3f) { + case 0x05: /* stb */ + case 0x15: /* stba */ + case 0x06: /* sth */ + case 0x16: /* stha */ + case 0x04: /* st */ + case 0x14: /* sta */ + case 0x07: /* std */ + case 0x17: /* stda */ + case 0x0e: /* stx */ + case 0x1e: /* stxa */ + case 0x24: /* stf */ + case 0x34: /* stfa */ + case 0x27: /* stdf */ + case 0x37: /* stdfa */ + case 0x26: /* stqf */ + case 0x36: /* stqfa */ + case 0x25: /* stfsr */ + case 0x3c: /* casa */ + case 0x3e: /* casxa */ + return true; + } + } + return false; +} + +#endif diff --git a/linux-user/include/host/x32/host-signal.h b/linux-user/include/host/x32/host-signal.h new file mode 100644 index 0000000000..26800591d3 --- /dev/null +++ b/linux-user/include/host/x32/host-signal.h @@ -0,0 +1 @@ +#include "../x86_64/host-signal.h" diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h new file mode 100644 index 0000000000..5a7627fedc --- /dev/null +++ b/linux-user/include/host/x86_64/host-signal.h @@ -0,0 +1,37 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (C) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef X86_64_HOST_SIGNAL_H +#define X86_64_HOST_SIGNAL_H + +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ +typedef ucontext_t host_sigcontext; + +static inline uintptr_t host_signal_pc(host_sigcontext *uc) +{ + return uc->uc_mcontext.gregs[REG_RIP]; +} + +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) +{ + uc->uc_mcontext.gregs[REG_RIP] = pc; +} + +static inline void *host_signal_mask(host_sigcontext *uc) +{ + return &uc->uc_sigmask; +} + +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) +{ + return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe + && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); +} + +#endif diff --git a/linux-user/include/special-errno.h b/linux-user/include/special-errno.h new file mode 100644 index 0000000000..4120455baa --- /dev/null +++ b/linux-user/include/special-errno.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU internal errno values for implementing user-only POSIX. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2021 Linaro, Ltd. + */ + +#ifndef SPECIAL_ERRNO_H +#define SPECIAL_ERRNO_H + +/* + * All of these are QEMU internal, not visible to the guest. + * They should be chosen so as to not overlap with any host + * or guest errno. + */ + +/* + * This is returned when a system call should be restarted, to tell the + * main loop that it should wind the guest PC backwards so it will + * re-execute the syscall after handling any pending signals. + */ +#define QEMU_ERESTARTSYS 512 + +/* + * This is returned after a successful sigreturn syscall, to indicate + * that it has correctly set the guest registers and so the main loop + * should not touch them. + */ +#define QEMU_ESIGRETURN 513 + +#endif /* SPECIAL_ERRNO_H */ diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h index 7193c3b226..071f7ca253 100644 --- a/linux-user/ioctls.h +++ b/linux-user/ioctls.h @@ -96,9 +96,7 @@ IOCTL(BLKROGET, IOC_R, MK_PTR(TYPE_INT)) IOCTL(BLKRRPART, 0, TYPE_NULL) IOCTL(BLKGETSIZE, IOC_R, MK_PTR(TYPE_ULONG)) -#ifdef BLKGETSIZE64 IOCTL(BLKGETSIZE64, IOC_R, MK_PTR(TYPE_ULONGLONG)) -#endif IOCTL(BLKFLSBUF, 0, TYPE_NULL) IOCTL(BLKRASET, 0, TYPE_INT) IOCTL(BLKRAGET, IOC_R, MK_PTR(TYPE_LONG)) @@ -107,33 +105,15 @@ IOCTL_SPECIAL(BLKPG, IOC_W, do_ioctl_blkpg, MK_PTR(MK_STRUCT(STRUCT_blkpg_ioctl_arg))) -#ifdef BLKDISCARD IOCTL(BLKDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif -#ifdef BLKIOMIN IOCTL(BLKIOMIN, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKIOOPT IOCTL(BLKIOOPT, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKALIGNOFF IOCTL(BLKALIGNOFF, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKPBSZGET IOCTL(BLKPBSZGET, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKDISCARDZEROES IOCTL(BLKDISCARDZEROES, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKSECDISCARD IOCTL(BLKSECDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif -#ifdef BLKROTATIONAL IOCTL(BLKROTATIONAL, IOC_R, MK_PTR(TYPE_SHORT)) -#endif -#ifdef BLKZEROOUT IOCTL(BLKZEROOUT, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif IOCTL(FDMSGON, 0, TYPE_NULL) IOCTL(FDMSGOFF, 0, TYPE_NULL) @@ -149,17 +129,13 @@ IOCTL(FDTWADDLE, 0, TYPE_NULL) IOCTL(FDEJECT, 0, TYPE_NULL) -#ifdef FIBMAP IOCTL(FIBMAP, IOC_W | IOC_R, MK_PTR(TYPE_LONG)) -#endif #ifdef FICLONE IOCTL(FICLONE, IOC_W, TYPE_INT) IOCTL(FICLONERANGE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_file_clone_range))) #endif -#ifdef FIGETBSZ IOCTL(FIGETBSZ, IOC_R, MK_PTR(TYPE_LONG)) -#endif #ifdef CONFIG_FIEMAP IOCTL_SPECIAL(FS_IOC_FIEMAP, IOC_W | IOC_R, do_ioctl_fs_ioc_fiemap, MK_PTR(MK_STRUCT(STRUCT_fiemap))) @@ -637,6 +613,10 @@ IOCTL(LOOP_SET_STATUS64, IOC_W, MK_PTR(MK_STRUCT(STRUCT_loop_info64))) IOCTL(LOOP_GET_STATUS64, IOC_R, MK_PTR(MK_STRUCT(STRUCT_loop_info64))) IOCTL(LOOP_CHANGE_FD, 0, TYPE_INT) + IOCTL(LOOP_SET_CAPACITY, 0, TYPE_INT) + IOCTL(LOOP_SET_DIRECT_IO, 0, TYPE_INT) + IOCTL(LOOP_SET_BLOCK_SIZE, 0, TYPE_INT) + IOCTL(LOOP_CONFIGURE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_loop_config))) IOCTL(LOOP_CTL_ADD, 0, TYPE_INT) IOCTL(LOOP_CTL_REMOVE, 0, TYPE_INT) diff --git a/linux-user/linux_loop.h b/linux-user/linux_loop.h index c69fea11e4..f80b96f1ff 100644 --- a/linux-user/linux_loop.h +++ b/linux-user/linux_loop.h @@ -96,6 +96,8 @@ struct loop_info64 { #define LOOP_CHANGE_FD 0x4C06 #define LOOP_SET_CAPACITY 0x4C07 #define LOOP_SET_DIRECT_IO 0x4C08 +#define LOOP_SET_BLOCK_SIZE 0x4C09 +#define LOOP_CONFIGURE 0x4C0A /* /dev/loop-control interface */ #define LOOP_CTL_ADD 0x4C80 diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c index 9d4eb5e94b..745cce70ab 100644 --- a/linux-user/linuxload.c +++ b/linux-user/linuxload.c @@ -2,6 +2,8 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" +#include "loader.h" #define NGROUPS 32 @@ -90,6 +92,11 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, envp = sp; sp -= (argc + 1) * n; argv = sp; + ts->info->envp = envp; + ts->info->envc = envc; + ts->info->argv = argv; + ts->info->argc = argc; + if (push_ptr) { /* FIXME - handle put_user() failures */ sp -= n; @@ -97,19 +104,22 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, sp -= n; put_user_ual(argv, sp); } + sp -= n; /* FIXME - handle put_user() failures */ put_user_ual(argc, sp); - ts->info->arg_start = stringp; + + ts->info->arg_strings = stringp; while (argc-- > 0) { /* FIXME - handle put_user() failures */ put_user_ual(stringp, argv); argv += n; stringp += target_strlen(stringp) + 1; } - ts->info->arg_end = stringp; /* FIXME - handle put_user() failures */ put_user_ual(0, argv); + + ts->info->env_strings = stringp; while (envc-- > 0) { /* FIXME - handle put_user() failures */ put_user_ual(stringp, envp); diff --git a/linux-user/loader.h b/linux-user/loader.h new file mode 100644 index 0000000000..f375ee0679 --- /dev/null +++ b/linux-user/loader.h @@ -0,0 +1,59 @@ +/* + * loader.h: prototypes for linux-user guest binary loader + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef LINUX_USER_LOADER_H +#define LINUX_USER_LOADER_H + +/* + * Read a good amount of data initially, to hopefully get all the + * program headers loaded. + */ +#define BPRM_BUF_SIZE 1024 + +/* + * This structure is used to hold the arguments that are + * used when loading binaries. + */ +struct linux_binprm { + char buf[BPRM_BUF_SIZE] __attribute__((aligned)); + abi_ulong p; + int fd; + int e_uid, e_gid; + int argc, envc; + char **argv; + char **envp; + char *filename; /* Name of binary */ + int (*core_dump)(int, const CPUArchState *); /* coredump routine */ +}; + +void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); +abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, + abi_ulong stringp, int push_ptr); +int loader_exec(int fdexec, const char *filename, char **argv, char **envp, + struct target_pt_regs *regs, struct image_info *infop, + struct linux_binprm *); + +uint32_t get_elf_eflags(int fd); +int load_elf_binary(struct linux_binprm *bprm, struct image_info *info); +int load_flt_binary(struct linux_binprm *bprm, struct image_info *info); + +abi_long memcpy_to_target(abi_ulong dest, const void *src, + unsigned long len); + +extern unsigned long guest_stack_size; + +#endif /* LINUX_USER_LOADER_H */ diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c new file mode 100644 index 0000000000..894fdd111a --- /dev/null +++ b/linux-user/loongarch64/cpu_loop.c @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch user cpu_loop. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "user-internals.h" +#include "cpu_loop-common.h" +#include "signal-common.h" + +void cpu_loop(CPULoongArchState *env) +{ + CPUState *cs = env_cpu(env); + int trapnr, si_code; + abi_long ret; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + switch (trapnr) { + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCCODE_SYS: + env->pc += 4; + ret = do_syscall(env, env->gpr[11], + env->gpr[4], env->gpr[5], + env->gpr[6], env->gpr[7], + env->gpr[8], env->gpr[9], + -1, -1); + if (ret == -QEMU_ERESTARTSYS) { + env->pc -= 4; + break; + } + if (ret == -QEMU_ESIGRETURN) { + /* + * Returning from a successful sigreturn syscall. + * Avoid clobbering register state. + */ + break; + } + env->gpr[4] = ret; + break; + case EXCCODE_INE: + force_sig_fault(TARGET_SIGILL, 0, env->pc); + break; + case EXCCODE_FPE: + si_code = TARGET_FPE_FLTUNK; + if (GET_FP_CAUSE(env->fcsr0) & FP_INVALID) { + si_code = TARGET_FPE_FLTINV; + } else if (GET_FP_CAUSE(env->fcsr0) & FP_DIV0) { + si_code = TARGET_FPE_FLTDIV; + } else if (GET_FP_CAUSE(env->fcsr0) & FP_OVERFLOW) { + si_code = TARGET_FPE_FLTOVF; + } else if (GET_FP_CAUSE(env->fcsr0) & FP_UNDERFLOW) { + si_code = TARGET_FPE_FLTUND; + } else if (GET_FP_CAUSE(env->fcsr0) & FP_INEXACT) { + si_code = TARGET_FPE_FLTRES; + } + force_sig_fault(TARGET_SIGFPE, si_code, env->pc); + break; + case EXCP_DEBUG: + case EXCCODE_BRK: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + break; + case EXCCODE_BCE: + force_sig_fault(TARGET_SIGSYS, TARGET_SI_KERNEL, env->pc); + break; + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + default: + EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + exit(EXIT_FAILURE); + } + process_pending_signals(env); + } +} + +void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) { + env->gpr[i] = regs->regs[i]; + } + env->pc = regs->csr.era; + +} diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c new file mode 100644 index 0000000000..7c7afb652e --- /dev/null +++ b/linux-user/loongarch64/signal.c @@ -0,0 +1,315 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch emulation of Linux signals + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "user-internals.h" +#include "signal-common.h" +#include "linux-user/trace.h" + +#include "target/loongarch/internals.h" + +/* FP context was used */ +#define SC_USED_FP (1 << 0) + +struct target_sigcontext { + uint64_t sc_pc; + uint64_t sc_regs[32]; + uint32_t sc_flags; + uint64_t sc_extcontext[0] QEMU_ALIGNED(16); +}; + + +#define FPU_CTX_MAGIC 0x46505501 +#define FPU_CTX_ALIGN 8 +struct target_fpu_context { + uint64_t regs[32]; + uint64_t fcc; + uint32_t fcsr; +} QEMU_ALIGNED(FPU_CTX_ALIGN); + +#define CONTEXT_INFO_ALIGN 16 +struct target_sctx_info { + uint32_t magic; + uint32_t size; + uint64_t padding; +} QEMU_ALIGNED(CONTEXT_INFO_ALIGN); + +struct target_ucontext { + abi_ulong tuc_flags; + abi_ptr tuc_link; + target_stack_t tuc_stack; + target_sigset_t tuc_sigmask; + uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)]; + struct target_sigcontext tuc_mcontext; +}; + +struct target_rt_sigframe { + struct target_siginfo rs_info; + struct target_ucontext rs_uc; +}; + +/* + * These two structures are not present in guest memory, are private + * to the signal implementation, but are largely copied from the + * kernel's signal implementation. + */ +struct ctx_layout { + void *haddr; + abi_ptr gaddr; + unsigned int size; +}; + +struct extctx_layout { + unsigned int size; + unsigned int flags; + struct ctx_layout fpu; + struct ctx_layout end; +}; + +static abi_ptr extframe_alloc(struct extctx_layout *extctx, + struct ctx_layout *sctx, unsigned size, + unsigned align, abi_ptr orig_sp) +{ + abi_ptr sp = orig_sp; + + sp -= sizeof(struct target_sctx_info) + size; + align = MAX(align, CONTEXT_INFO_ALIGN); + sp = ROUND_DOWN(sp, align); + sctx->gaddr = sp; + + size = orig_sp - sp; + sctx->size = size; + extctx->size += size; + + return sp; +} + +static abi_ptr setup_extcontext(struct extctx_layout *extctx, abi_ptr sp) +{ + memset(extctx, 0, sizeof(struct extctx_layout)); + + /* Grow down, alloc "end" context info first. */ + sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp); + + /* For qemu, there is no lazy fp context switch, so fp always present. */ + extctx->flags = SC_USED_FP; + sp = extframe_alloc(extctx, &extctx->fpu, + sizeof(struct target_rt_sigframe), FPU_CTX_ALIGN, sp); + + return sp; +} + +static void setup_sigframe(CPULoongArchState *env, + struct target_sigcontext *sc, + struct extctx_layout *extctx) +{ + struct target_sctx_info *info; + struct target_fpu_context *fpu_ctx; + int i; + + __put_user(extctx->flags, &sc->sc_flags); + __put_user(env->pc, &sc->sc_pc); + __put_user(0, &sc->sc_regs[0]); + for (i = 1; i < 32; ++i) { + __put_user(env->gpr[i], &sc->sc_regs[i]); + } + + /* + * Set fpu context + */ + info = extctx->fpu.haddr; + __put_user(FPU_CTX_MAGIC, &info->magic); + __put_user(extctx->fpu.size, &info->size); + + fpu_ctx = (struct target_fpu_context *)(info + 1); + for (i = 0; i < 32; ++i) { + __put_user(env->fpr[i], &fpu_ctx->regs[i]); + } + __put_user(read_fcc(env), &fpu_ctx->fcc); + __put_user(env->fcsr0, &fpu_ctx->fcsr); + + /* + * Set end context + */ + info = extctx->end.haddr; + __put_user(0, &info->magic); + __put_user(extctx->end.size, &info->size); +} + +static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame) +{ + memset(extctx, 0, sizeof(*extctx)); + + while (1) { + uint32_t magic, size; + + if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) { + return false; + } + + switch (magic) { + case 0: /* END */ + extctx->end.gaddr = frame; + extctx->end.size = size; + extctx->size += size; + return true; + + case FPU_CTX_MAGIC: + if (size < (sizeof(struct target_sctx_info) + + sizeof(struct target_fpu_context))) { + return false; + } + extctx->fpu.gaddr = frame; + extctx->fpu.size = size; + extctx->size += size; + break; + default: + return false; + } + + frame += size; + } +} + +static void restore_sigframe(CPULoongArchState *env, + struct target_sigcontext *sc, + struct extctx_layout *extctx) +{ + int i; + + __get_user(env->pc, &sc->sc_pc); + for (i = 1; i < 32; ++i) { + __get_user(env->gpr[i], &sc->sc_regs[i]); + } + + if (extctx->fpu.haddr) { + struct target_fpu_context *fpu_ctx = + extctx->fpu.haddr + sizeof(struct target_sctx_info); + uint64_t fcc; + + for (i = 0; i < 32; ++i) { + __get_user(env->fpr[i], &fpu_ctx->regs[i]); + } + __get_user(fcc, &fpu_ctx->fcc); + write_fcc(env, fcc); + __get_user(env->fcsr0, &fpu_ctx->fcsr); + restore_fp_status(env); + } +} + +/* + * Determine which stack to use. + */ +static abi_ptr get_sigframe(struct target_sigaction *ka, + CPULoongArchState *env, + struct extctx_layout *extctx) +{ + abi_ulong sp; + + sp = target_sigsp(get_sp_from_cpustate(env), ka); + sp = ROUND_DOWN(sp, 16); + sp = setup_extcontext(extctx, sp); + sp -= sizeof(struct target_rt_sigframe); + + assert(QEMU_IS_ALIGNED(sp, 16)); + + return sp; +} + +void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, + target_sigset_t *set, CPULoongArchState *env) +{ + struct target_rt_sigframe *frame; + struct extctx_layout extctx; + abi_ptr frame_addr; + int i; + + frame_addr = get_sigframe(ka, env, &extctx); + trace_user_setup_rt_frame(env, frame_addr); + + frame = lock_user(VERIFY_WRITE, frame_addr, + sizeof(*frame) + extctx.size, 0); + if (!frame) { + force_sigsegv(sig); + return; + } + extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); + extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr); + + tswap_siginfo(&frame->rs_info, info); + + __put_user(0, &frame->rs_uc.tuc_flags); + __put_user(0, &frame->rs_uc.tuc_link); + target_save_altstack(&frame->rs_uc.tuc_stack, env); + + setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); + + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); + } + + env->gpr[4] = sig; + env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info); + env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc); + env->gpr[3] = frame_addr; + env->gpr[1] = default_rt_sigreturn; + + env->pc = ka->_sa_handler; + unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size); +} + +long do_rt_sigreturn(CPULoongArchState *env) +{ + struct target_rt_sigframe *frame; + struct extctx_layout extctx; + abi_ulong frame_addr; + sigset_t blocked; + + frame_addr = env->gpr[3]; + trace_user_do_rt_sigreturn(env, frame_addr); + + if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) { + goto badframe; + } + + frame = lock_user(VERIFY_READ, frame_addr, + sizeof(*frame) + extctx.size, 1); + if (!frame) { + goto badframe; + } + if (extctx.fpu.gaddr) { + extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); + } + + target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); + set_sigmask(&blocked); + + restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); + + target_restore_altstack(&frame->rs_uc.tuc_stack, env); + + unlock_user(frame, frame_addr, 0); + return -QEMU_ESIGRETURN; + + badframe: + force_sig(TARGET_SIGSEGV); + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + __put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */ + __put_user(0x002b0000, tramp + 1); /* syscall 0 */ + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/loongarch64/sockbits.h b/linux-user/loongarch64/sockbits.h new file mode 100644 index 0000000000..1cffcae120 --- /dev/null +++ b/linux-user/loongarch64/sockbits.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_SOCKBITS_H +#define LOONGARCH_TARGET_SOCKBITS_H + +#include "../generic/sockbits.h" + +#endif diff --git a/linux-user/loongarch64/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h new file mode 100644 index 0000000000..be00915adf --- /dev/null +++ b/linux-user/loongarch64/syscall_nr.h @@ -0,0 +1,312 @@ +/* + * This file contains the system call numbers. + * Do not modify. + * This file is generated by scripts/gensyscalls.sh + */ +#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H +#define LINUX_USER_LOONGARCH_SYSCALL_NR_H + +#define TARGET_NR_io_setup 0 +#define TARGET_NR_io_destroy 1 +#define TARGET_NR_io_submit 2 +#define TARGET_NR_io_cancel 3 +#define TARGET_NR_io_getevents 4 +#define TARGET_NR_setxattr 5 +#define TARGET_NR_lsetxattr 6 +#define TARGET_NR_fsetxattr 7 +#define TARGET_NR_getxattr 8 +#define TARGET_NR_lgetxattr 9 +#define TARGET_NR_fgetxattr 10 +#define TARGET_NR_listxattr 11 +#define TARGET_NR_llistxattr 12 +#define TARGET_NR_flistxattr 13 +#define TARGET_NR_removexattr 14 +#define TARGET_NR_lremovexattr 15 +#define TARGET_NR_fremovexattr 16 +#define TARGET_NR_getcwd 17 +#define TARGET_NR_lookup_dcookie 18 +#define TARGET_NR_eventfd2 19 +#define TARGET_NR_epoll_create1 20 +#define TARGET_NR_epoll_ctl 21 +#define TARGET_NR_epoll_pwait 22 +#define TARGET_NR_dup 23 +#define TARGET_NR_dup3 24 +#define TARGET_NR_fcntl 25 +#define TARGET_NR_inotify_init1 26 +#define TARGET_NR_inotify_add_watch 27 +#define TARGET_NR_inotify_rm_watch 28 +#define TARGET_NR_ioctl 29 +#define TARGET_NR_ioprio_set 30 +#define TARGET_NR_ioprio_get 31 +#define TARGET_NR_flock 32 +#define TARGET_NR_mknodat 33 +#define TARGET_NR_mkdirat 34 +#define TARGET_NR_unlinkat 35 +#define TARGET_NR_symlinkat 36 +#define TARGET_NR_linkat 37 +#define TARGET_NR_umount2 39 +#define TARGET_NR_mount 40 +#define TARGET_NR_pivot_root 41 +#define TARGET_NR_nfsservctl 42 +#define TARGET_NR_statfs 43 +#define TARGET_NR_fstatfs 44 +#define TARGET_NR_truncate 45 +#define TARGET_NR_ftruncate 46 +#define TARGET_NR_fallocate 47 +#define TARGET_NR_faccessat 48 +#define TARGET_NR_chdir 49 +#define TARGET_NR_fchdir 50 +#define TARGET_NR_chroot 51 +#define TARGET_NR_fchmod 52 +#define TARGET_NR_fchmodat 53 +#define TARGET_NR_fchownat 54 +#define TARGET_NR_fchown 55 +#define TARGET_NR_openat 56 +#define TARGET_NR_close 57 +#define TARGET_NR_vhangup 58 +#define TARGET_NR_pipe2 59 +#define TARGET_NR_quotactl 60 +#define TARGET_NR_getdents64 61 +#define TARGET_NR_lseek 62 +#define TARGET_NR_read 63 +#define TARGET_NR_write 64 +#define TARGET_NR_readv 65 +#define TARGET_NR_writev 66 +#define TARGET_NR_pread64 67 +#define TARGET_NR_pwrite64 68 +#define TARGET_NR_preadv 69 +#define TARGET_NR_pwritev 70 +#define TARGET_NR_sendfile 71 +#define TARGET_NR_pselect6 72 +#define TARGET_NR_ppoll 73 +#define TARGET_NR_signalfd4 74 +#define TARGET_NR_vmsplice 75 +#define TARGET_NR_splice 76 +#define TARGET_NR_tee 77 +#define TARGET_NR_readlinkat 78 +#define TARGET_NR_sync 81 +#define TARGET_NR_fsync 82 +#define TARGET_NR_fdatasync 83 +#define TARGET_NR_sync_file_range 84 +#define TARGET_NR_timerfd_create 85 +#define TARGET_NR_timerfd_settime 86 +#define TARGET_NR_timerfd_gettime 87 +#define TARGET_NR_utimensat 88 +#define TARGET_NR_acct 89 +#define TARGET_NR_capget 90 +#define TARGET_NR_capset 91 +#define TARGET_NR_personality 92 +#define TARGET_NR_exit 93 +#define TARGET_NR_exit_group 94 +#define TARGET_NR_waitid 95 +#define TARGET_NR_set_tid_address 96 +#define TARGET_NR_unshare 97 +#define TARGET_NR_futex 98 +#define TARGET_NR_set_robust_list 99 +#define TARGET_NR_get_robust_list 100 +#define TARGET_NR_nanosleep 101 +#define TARGET_NR_getitimer 102 +#define TARGET_NR_setitimer 103 +#define TARGET_NR_kexec_load 104 +#define TARGET_NR_init_module 105 +#define TARGET_NR_delete_module 106 +#define TARGET_NR_timer_create 107 +#define TARGET_NR_timer_gettime 108 +#define TARGET_NR_timer_getoverrun 109 +#define TARGET_NR_timer_settime 110 +#define TARGET_NR_timer_delete 111 +#define TARGET_NR_clock_settime 112 +#define TARGET_NR_clock_gettime 113 +#define TARGET_NR_clock_getres 114 +#define TARGET_NR_clock_nanosleep 115 +#define TARGET_NR_syslog 116 +#define TARGET_NR_ptrace 117 +#define TARGET_NR_sched_setparam 118 +#define TARGET_NR_sched_setscheduler 119 +#define TARGET_NR_sched_getscheduler 120 +#define TARGET_NR_sched_getparam 121 +#define TARGET_NR_sched_setaffinity 122 +#define TARGET_NR_sched_getaffinity 123 +#define TARGET_NR_sched_yield 124 +#define TARGET_NR_sched_get_priority_max 125 +#define TARGET_NR_sched_get_priority_min 126 +#define TARGET_NR_sched_rr_get_interval 127 +#define TARGET_NR_restart_syscall 128 +#define TARGET_NR_kill 129 +#define TARGET_NR_tkill 130 +#define TARGET_NR_tgkill 131 +#define TARGET_NR_sigaltstack 132 +#define TARGET_NR_rt_sigsuspend 133 +#define TARGET_NR_rt_sigaction 134 +#define TARGET_NR_rt_sigprocmask 135 +#define TARGET_NR_rt_sigpending 136 +#define TARGET_NR_rt_sigtimedwait 137 +#define TARGET_NR_rt_sigqueueinfo 138 +#define TARGET_NR_rt_sigreturn 139 +#define TARGET_NR_setpriority 140 +#define TARGET_NR_getpriority 141 +#define TARGET_NR_reboot 142 +#define TARGET_NR_setregid 143 +#define TARGET_NR_setgid 144 +#define TARGET_NR_setreuid 145 +#define TARGET_NR_setuid 146 +#define TARGET_NR_setresuid 147 +#define TARGET_NR_getresuid 148 +#define TARGET_NR_setresgid 149 +#define TARGET_NR_getresgid 150 +#define TARGET_NR_setfsuid 151 +#define TARGET_NR_setfsgid 152 +#define TARGET_NR_times 153 +#define TARGET_NR_setpgid 154 +#define TARGET_NR_getpgid 155 +#define TARGET_NR_getsid 156 +#define TARGET_NR_setsid 157 +#define TARGET_NR_getgroups 158 +#define TARGET_NR_setgroups 159 +#define TARGET_NR_uname 160 +#define TARGET_NR_sethostname 161 +#define TARGET_NR_setdomainname 162 +#define TARGET_NR_getrusage 165 +#define TARGET_NR_umask 166 +#define TARGET_NR_prctl 167 +#define TARGET_NR_getcpu 168 +#define TARGET_NR_gettimeofday 169 +#define TARGET_NR_settimeofday 170 +#define TARGET_NR_adjtimex 171 +#define TARGET_NR_getpid 172 +#define TARGET_NR_getppid 173 +#define TARGET_NR_getuid 174 +#define TARGET_NR_geteuid 175 +#define TARGET_NR_getgid 176 +#define TARGET_NR_getegid 177 +#define TARGET_NR_gettid 178 +#define TARGET_NR_sysinfo 179 +#define TARGET_NR_mq_open 180 +#define TARGET_NR_mq_unlink 181 +#define TARGET_NR_mq_timedsend 182 +#define TARGET_NR_mq_timedreceive 183 +#define TARGET_NR_mq_notify 184 +#define TARGET_NR_mq_getsetattr 185 +#define TARGET_NR_msgget 186 +#define TARGET_NR_msgctl 187 +#define TARGET_NR_msgrcv 188 +#define TARGET_NR_msgsnd 189 +#define TARGET_NR_semget 190 +#define TARGET_NR_semctl 191 +#define TARGET_NR_semtimedop 192 +#define TARGET_NR_semop 193 +#define TARGET_NR_shmget 194 +#define TARGET_NR_shmctl 195 +#define TARGET_NR_shmat 196 +#define TARGET_NR_shmdt 197 +#define TARGET_NR_socket 198 +#define TARGET_NR_socketpair 199 +#define TARGET_NR_bind 200 +#define TARGET_NR_listen 201 +#define TARGET_NR_accept 202 +#define TARGET_NR_connect 203 +#define TARGET_NR_getsockname 204 +#define TARGET_NR_getpeername 205 +#define TARGET_NR_sendto 206 +#define TARGET_NR_recvfrom 207 +#define TARGET_NR_setsockopt 208 +#define TARGET_NR_getsockopt 209 +#define TARGET_NR_shutdown 210 +#define TARGET_NR_sendmsg 211 +#define TARGET_NR_recvmsg 212 +#define TARGET_NR_readahead 213 +#define TARGET_NR_brk 214 +#define TARGET_NR_munmap 215 +#define TARGET_NR_mremap 216 +#define TARGET_NR_add_key 217 +#define TARGET_NR_request_key 218 +#define TARGET_NR_keyctl 219 +#define TARGET_NR_clone 220 +#define TARGET_NR_execve 221 +#define TARGET_NR_mmap 222 +#define TARGET_NR_fadvise64 223 +#define TARGET_NR_swapon 224 +#define TARGET_NR_swapoff 225 +#define TARGET_NR_mprotect 226 +#define TARGET_NR_msync 227 +#define TARGET_NR_mlock 228 +#define TARGET_NR_munlock 229 +#define TARGET_NR_mlockall 230 +#define TARGET_NR_munlockall 231 +#define TARGET_NR_mincore 232 +#define TARGET_NR_madvise 233 +#define TARGET_NR_remap_file_pages 234 +#define TARGET_NR_mbind 235 +#define TARGET_NR_get_mempolicy 236 +#define TARGET_NR_set_mempolicy 237 +#define TARGET_NR_migrate_pages 238 +#define TARGET_NR_move_pages 239 +#define TARGET_NR_rt_tgsigqueueinfo 240 +#define TARGET_NR_perf_event_open 241 +#define TARGET_NR_accept4 242 +#define TARGET_NR_recvmmsg 243 +#define TARGET_NR_arch_specific_syscall 244 +#define TARGET_NR_wait4 260 +#define TARGET_NR_prlimit64 261 +#define TARGET_NR_fanotify_init 262 +#define TARGET_NR_fanotify_mark 263 +#define TARGET_NR_name_to_handle_at 264 +#define TARGET_NR_open_by_handle_at 265 +#define TARGET_NR_clock_adjtime 266 +#define TARGET_NR_syncfs 267 +#define TARGET_NR_setns 268 +#define TARGET_NR_sendmmsg 269 +#define TARGET_NR_process_vm_readv 270 +#define TARGET_NR_process_vm_writev 271 +#define TARGET_NR_kcmp 272 +#define TARGET_NR_finit_module 273 +#define TARGET_NR_sched_setattr 274 +#define TARGET_NR_sched_getattr 275 +#define TARGET_NR_renameat2 276 +#define TARGET_NR_seccomp 277 +#define TARGET_NR_getrandom 278 +#define TARGET_NR_memfd_create 279 +#define TARGET_NR_bpf 280 +#define TARGET_NR_execveat 281 +#define TARGET_NR_userfaultfd 282 +#define TARGET_NR_membarrier 283 +#define TARGET_NR_mlock2 284 +#define TARGET_NR_copy_file_range 285 +#define TARGET_NR_preadv2 286 +#define TARGET_NR_pwritev2 287 +#define TARGET_NR_pkey_mprotect 288 +#define TARGET_NR_pkey_alloc 289 +#define TARGET_NR_pkey_free 290 +#define TARGET_NR_statx 291 +#define TARGET_NR_io_pgetevents 292 +#define TARGET_NR_rseq 293 +#define TARGET_NR_kexec_file_load 294 +#define TARGET_NR_pidfd_send_signal 424 +#define TARGET_NR_io_uring_setup 425 +#define TARGET_NR_io_uring_enter 426 +#define TARGET_NR_io_uring_register 427 +#define TARGET_NR_open_tree 428 +#define TARGET_NR_move_mount 429 +#define TARGET_NR_fsopen 430 +#define TARGET_NR_fsconfig 431 +#define TARGET_NR_fsmount 432 +#define TARGET_NR_fspick 433 +#define TARGET_NR_pidfd_open 434 +#define TARGET_NR_clone3 435 +#define TARGET_NR_close_range 436 +#define TARGET_NR_openat2 437 +#define TARGET_NR_pidfd_getfd 438 +#define TARGET_NR_faccessat2 439 +#define TARGET_NR_process_madvise 440 +#define TARGET_NR_epoll_pwait2 441 +#define TARGET_NR_mount_setattr 442 +#define TARGET_NR_quotactl_fd 443 +#define TARGET_NR_landlock_create_ruleset 444 +#define TARGET_NR_landlock_add_rule 445 +#define TARGET_NR_landlock_restrict_self 446 +#define TARGET_NR_process_mrelease 448 +#define TARGET_NR_futex_waitv 449 +#define TARGET_NR_set_mempolicy_home_node 450 +#define TARGET_NR_syscalls 451 + +#endif /* LINUX_USER_LOONGARCH_SYSCALL_NR_H */ diff --git a/linux-user/loongarch64/target_cpu.h b/linux-user/loongarch64/target_cpu.h new file mode 100644 index 0000000000..a29af66156 --- /dev/null +++ b/linux-user/loongarch64/target_cpu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch specific CPU ABI and functions for linux-user + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_CPU_H +#define LOONGARCH_TARGET_CPU_H + +static inline void cpu_clone_regs_child(CPULoongArchState *env, + target_ulong newsp, unsigned flags) +{ + if (newsp) { + env->gpr[3] = newsp; + } + env->gpr[4] = 0; +} + +static inline void cpu_clone_regs_parent(CPULoongArchState *env, + unsigned flags) +{ +} + +static inline void cpu_set_tls(CPULoongArchState *env, target_ulong newtls) +{ + env->gpr[2] = newtls; +} + +static inline abi_ulong get_sp_from_cpustate(CPULoongArchState *state) +{ + return state->gpr[3]; +} +#endif diff --git a/linux-user/loongarch64/target_elf.h b/linux-user/loongarch64/target_elf.h new file mode 100644 index 0000000000..95c3f05a46 --- /dev/null +++ b/linux-user/loongarch64/target_elf.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_ELF_H +#define LOONGARCH_TARGET_ELF_H +static inline const char *cpu_get_model(uint32_t eflags) +{ + return "la464"; +} +#endif diff --git a/linux-user/loongarch64/target_errno_defs.h b/linux-user/loongarch64/target_errno_defs.h new file mode 100644 index 0000000000..c198b8aca9 --- /dev/null +++ b/linux-user/loongarch64/target_errno_defs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_ERRNO_DEFS_H +#define LOONGARCH_TARGET_ERRNO_DEFS_H + +/* Target uses generic errno */ +#include "../generic/target_errno_defs.h" + +#endif diff --git a/linux-user/loongarch64/target_fcntl.h b/linux-user/loongarch64/target_fcntl.h new file mode 100644 index 0000000000..99bf586854 --- /dev/null +++ b/linux-user/loongarch64/target_fcntl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_FCNTL_H +#define LOONGARCH_TARGET_FCNTL_H + +#include "../generic/fcntl.h" + +#endif diff --git a/linux-user/loongarch64/target_mman.h b/linux-user/loongarch64/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/loongarch64/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/loongarch64/target_prctl.h b/linux-user/loongarch64/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/loongarch64/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/loongarch64/target_resource.h b/linux-user/loongarch64/target_resource.h new file mode 100644 index 0000000000..0f86bf24ee --- /dev/null +++ b/linux-user/loongarch64/target_resource.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_RESOURCE_H +#define LOONGARCH_TARGET_RESOURCE_H + +#include "../generic/target_resource.h" + +#endif diff --git a/linux-user/loongarch64/target_signal.h b/linux-user/loongarch64/target_signal.h new file mode 100644 index 0000000000..ad3aaffcb4 --- /dev/null +++ b/linux-user/loongarch64/target_signal.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_SIGNAL_H +#define LOONGARCH_TARGET_SIGNAL_H + +#include "../generic/signal.h" + +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + +#endif /* LOONGARCH_TARGET_SIGNAL_H */ diff --git a/linux-user/loongarch64/target_structs.h b/linux-user/loongarch64/target_structs.h new file mode 100644 index 0000000000..6041441e15 --- /dev/null +++ b/linux-user/loongarch64/target_structs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_STRUCTS_H +#define LOONGARCH_TARGET_STRUCTS_H + +#include "../generic/target_structs.h" + +#endif diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h new file mode 100644 index 0000000000..8b5de52124 --- /dev/null +++ b/linux-user/loongarch64/target_syscall.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_SYSCALL_H +#define LOONGARCH_TARGET_SYSCALL_H + +#include "qemu/units.h" + +/* + * this struct defines the way the registers are stored on the + * stack during a system call. + */ + +struct target_pt_regs { + /* Saved main processor registers. */ + target_ulong regs[32]; + + /* Saved special registers. */ + struct { + target_ulong era; + target_ulong badv; + target_ulong crmd; + target_ulong prmd; + target_ulong euen; + target_ulong ecfg; + target_ulong estat; + } csr; + target_ulong orig_a0; + target_ulong __last[0]; +}; + +#define UNAME_MACHINE "loongarch64" +#define UNAME_MINIMUM_RELEASE "5.19.0" + +#define TARGET_MCL_CURRENT 1 +#define TARGET_MCL_FUTURE 2 +#define TARGET_MCL_ONFAULT 4 + +#define TARGET_FORCE_SHMLBA + +static inline abi_ulong target_shmlba(CPULoongArchState *env) +{ + return 64 * KiB; +} + +#endif diff --git a/linux-user/loongarch64/termbits.h b/linux-user/loongarch64/termbits.h new file mode 100644 index 0000000000..d425db8748 --- /dev/null +++ b/linux-user/loongarch64/termbits.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_TARGET_TERMBITS_H +#define LOONGARCH_TARGET_TERMBITS_H + +#include "../generic/termbits.h" + +#endif diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c index c7a500b58c..caead1cb74 100644 --- a/linux-user/m68k/cpu_loop.c +++ b/linux-user/m68k/cpu_loop.c @@ -18,16 +18,16 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUM68KState *env) { CPUState *cs = env_cpu(env); int trapnr; unsigned int n; - target_siginfo_t info; for(;;) { cpu_exec_start(cs); @@ -36,39 +36,25 @@ void cpu_loop(CPUM68KState *env) process_queued_cpu_work(cs); switch(trapnr) { - case EXCP_HALT_INSN: - /* Semihosing syscall. */ - env->pc += 4; - do_m68k_semihosting(env, env->dregs[0]); - break; case EXCP_ILLEGAL: case EXCP_LINEA: case EXCP_LINEF: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPN; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc); break; case EXCP_CHK: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_INTOVF; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + case EXCP_TRAPCC: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->mmu.ar); break; case EXCP_DIV0: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_INTDIV; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->mmu.ar); + break; + case EXCP_TRACE: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_TRACE, env->mmu.ar); break; case EXCP_TRAP0: { abi_long ret; n = env->dregs[0]; - env->pc += 2; ret = do_syscall(env, n, env->dregs[1], @@ -78,9 +64,9 @@ void cpu_loop(CPUM68KState *env) env->dregs[5], env->aregs[0], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 2; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->dregs[0] = ret; } } @@ -88,21 +74,12 @@ void cpu_loop(CPUM68KState *env) case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; - case EXCP_ACCESS: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->mmu.ar; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } + case EXCP_TRAP0 + 1 ... EXCP_TRAP0 + 14: + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP, env->pc); break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + case EXCP_TRAP15: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); diff --git a/linux-user/m68k/signal.c b/linux-user/m68k/signal.c index d06230655e..5f35354487 100644 --- a/linux-user/m68k/signal.c +++ b/linux-user/m68k/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -38,7 +39,6 @@ struct target_sigframe int sig; int code; abi_ulong psc; - char retcode[8]; abi_ulong extramask[TARGET_NSIG_WORDS-1]; struct target_sigcontext sc; }; @@ -75,7 +75,6 @@ struct target_rt_sigframe int sig; abi_ulong pinfo; abi_ulong puc; - char retcode[8]; struct target_siginfo info; struct target_ucontext uc; }; @@ -129,7 +128,6 @@ void setup_frame(int sig, struct target_sigaction *ka, { struct target_sigframe *frame; abi_ulong frame_addr; - abi_ulong retcode_addr; abi_ulong sc_addr; int i; @@ -151,16 +149,7 @@ void setup_frame(int sig, struct target_sigaction *ka, } /* Set up to return from userspace. */ - - retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - - /* moveq #,d0; trap #0 */ - - __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), - (uint32_t *)(frame->retcode)); - - /* Set up to return from userspace */ + __put_user(default_sigreturn, &frame->pretcode); env->aregs[7] = frame_addr; env->pc = ka->_sa_handler; @@ -287,7 +276,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, { struct target_rt_sigframe *frame; abi_ulong frame_addr; - abi_ulong retcode_addr; abi_ulong info_addr; abi_ulong uc_addr; int err = 0; @@ -324,17 +312,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } /* Set up to return from userspace. */ - - retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - - /* moveq #,d0; notb d0; trap #0 */ - - __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), - (uint32_t *)(frame->retcode + 0)); - __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); - - /* Set up to return from userspace */ + __put_user(default_rt_sigreturn, &frame->pretcode); env->aregs[7] = frame_addr; env->pc = ka->_sa_handler; @@ -375,11 +353,11 @@ long do_sigreturn(CPUM68KState *env) restore_sigcontext(env, &frame->sc); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUM68KState *env) @@ -403,10 +381,30 @@ long do_rt_sigreturn(CPUM68KState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + void *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 + 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + + /* moveq #,d0; trap #0 */ + __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), (uint32_t *)tramp); + + default_rt_sigreturn = sigtramp_page + 4; + + /* moveq #,d0; notb d0; trap #0 */ + __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), + (uint32_t *)(tramp + 4)); + __put_user(0x4e40, (uint16_t *)(tramp + 8)); + + unlock_user(tramp, sigtramp_page, 4 + 6); } diff --git a/linux-user/m68k/target_mman.h b/linux-user/m68k/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/m68k/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/m68k/target_prctl.h b/linux-user/m68k/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/m68k/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/m68k/target_resource.h b/linux-user/m68k/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/m68k/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h index d096544ef8..6e0f4b74e3 100644 --- a/linux-user/m68k/target_signal.h +++ b/linux-user/m68k/target_signal.h @@ -1,25 +1,9 @@ #ifndef M68K_TARGET_SIGNAL_H #define M68K_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* M68K_TARGET_SIGNAL_H */ diff --git a/linux-user/m68k/target_structs.h b/linux-user/m68k/target_structs.h index e373d481e1..3a06f373c3 100644 --- a/linux-user/m68k/target_structs.h +++ b/linux-user/m68k/target_structs.h @@ -1,58 +1 @@ -/* - * m68k specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef M68K_TARGET_STRUCTS_H -#define M68K_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/m68k/target_syscall.h b/linux-user/m68k/target_syscall.h index 23359a6299..8d4ddbd76c 100644 --- a/linux-user/m68k/target_syscall.h +++ b/linux-user/m68k/target_syscall.h @@ -20,7 +20,6 @@ struct target_pt_regs { #define UNAME_MACHINE "m68k" #define UNAME_MINIMUM_RELEASE "2.6.32" -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/main.c b/linux-user/main.c index 37ed50d98e..a17fed045b 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -18,10 +18,9 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qemu/units.h" #include "qemu/accel.h" -#include "sysemu/tcg.h" #include "qemu-version.h" #include #include @@ -30,6 +29,7 @@ #include "qapi/error.h" #include "qemu.h" +#include "user-internals.h" #include "qemu/path.h" #include "qemu/queue.h" #include "qemu/config-file.h" @@ -39,6 +39,7 @@ #include "qemu/module.h" #include "qemu/plugin.h" #include "exec/exec-all.h" +#include "exec/gdbstub.h" #include "tcg/tcg.h" #include "qemu/timer.h" #include "qemu/envlist.h" @@ -49,6 +50,13 @@ #include "cpu_loop-common.h" #include "crypto/init.h" #include "fd-trans.h" +#include "signal-common.h" +#include "loader.h" +#include "user-mmap.h" + +#ifdef CONFIG_SEMIHOSTING +#include "semihosting/semihost.h" +#endif #ifndef AT_FLAGS_PRESERVE_ARGV0 #define AT_FLAGS_PRESERVE_ARGV0_BIT 0 @@ -80,6 +88,7 @@ static bool enable_strace; * Used to support command line arguments overriding environment variables. */ static int last_log_mask; +static const char *last_log_filename; /* * When running 32-on-64 we should make sure we can fit all of the possible @@ -115,18 +124,15 @@ static void usage(int exitcode); static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; const char *qemu_uname_release; +#if !defined(TARGET_DEFAULT_STACK_SIZE) /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so we allocate a bigger stack. Need a better solution, for example by remapping the process stack directly at the right place */ -unsigned long guest_stack_size = 8 * 1024 * 1024UL; - -#if defined(TARGET_I386) -int cpu_get_pic_interrupt(CPUX86State *env) -{ - return -1; -} +#define TARGET_DEFAULT_STACK_SIZE 8 * 1024 * 1024UL #endif +unsigned long guest_stack_size = TARGET_DEFAULT_STACK_SIZE; + /***********************************************************/ /* Helper routines for implementing atomic operations. */ @@ -136,10 +142,12 @@ void fork_start(void) start_exclusive(); mmap_fork_start(); cpu_list_lock(); + qemu_plugin_user_prefork_lock(); } void fork_end(int child) { + qemu_plugin_user_postfork(child); mmap_fork_end(child); if (child) { CPUState *cpu, *next_cpu; @@ -192,12 +200,26 @@ void stop_all_tasks(void) /* Assumes contents are already zeroed. */ void init_task_state(TaskState *ts) { + long ticks_per_sec; + struct timespec bt; + ts->used = 1; ts->sigaltstack_used = (struct target_sigaltstack) { .ss_sp = 0, .ss_size = 0, .ss_flags = TARGET_SS_DISABLE, }; + + /* Capture task start time relative to system boot */ + + ticks_per_sec = sysconf(_SC_CLK_TCK); + + if ((ticks_per_sec > 0) && !clock_gettime(CLOCK_BOOTTIME, &bt)) { + /* start_boottime is expressed in clock ticks */ + ts->start_boottime = bt.tv_sec * (uint64_t) ticks_per_sec; + ts->start_boottime += bt.tv_nsec * (uint64_t) ticks_per_sec / + NANOSECONDS_PER_SECOND; + } } CPUArchState *cpu_copy(CPUArchState *env) @@ -245,7 +267,7 @@ static void handle_arg_dfilter(const char *arg) static void handle_arg_log_filename(const char *arg) { - qemu_set_log_filename(arg, &error_fatal); + last_log_filename = arg; } static void handle_arg_set_env(const char *arg) @@ -463,7 +485,7 @@ static const struct qemu_argument arg_table[] = { "", "[[enable=]][,events=][,file=]"}, #ifdef CONFIG_PLUGIN {"plugin", "QEMU_PLUGIN", true, handle_arg_plugin, - "", "[file=][,arg=]"}, + "", "[file=][,=]"}, #endif {"version", "QEMU_VERSION", false, handle_arg_version, "", "display version information and exit"}, @@ -631,7 +653,6 @@ int main(int argc, char **argv, char **envp) int i; int ret; int execfd; - int log_mask; unsigned long max_reserved_va; bool preserve_argv0; @@ -653,7 +674,8 @@ int main(int argc, char **argv, char **envp) struct rlimit lim; if (getrlimit(RLIMIT_STACK, &lim) == 0 && lim.rlim_cur != RLIM_INFINITY - && lim.rlim_cur == (target_long)lim.rlim_cur) { + && lim.rlim_cur == (target_long)lim.rlim_cur + && lim.rlim_cur > guest_stack_size) { guest_stack_size = lim.rlim_cur; } } @@ -665,11 +687,9 @@ int main(int argc, char **argv, char **envp) optind = parse_args(argc, argv); - log_mask = last_log_mask | (enable_strace ? LOG_STRACE : 0); - if (log_mask) { - qemu_log_needs_buffers(); - qemu_set_log(log_mask); - } + qemu_set_log_filename_flags(last_log_filename, + last_log_mask | (enable_strace * LOG_STRACE), + &error_fatal); if (!trace_init_backends()) { exit(1); @@ -846,21 +866,36 @@ int main(int argc, char **argv, char **envp) g_free(target_environ); if (qemu_loglevel_mask(CPU_LOG_PAGE)) { - qemu_log("guest_base %p\n", (void *)guest_base); - log_page_dump("binary load"); + FILE *f = qemu_log_trylock(); + if (f) { + fprintf(f, "guest_base %p\n", (void *)guest_base); + fprintf(f, "page layout changed following binary load\n"); + page_dump(f); - qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk); - qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code); - qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", info->start_code); - qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", info->start_data); - qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data); - qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", info->start_stack); - qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); - qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); - qemu_log("argv_start 0x" TARGET_ABI_FMT_lx "\n", info->arg_start); - qemu_log("env_start 0x" TARGET_ABI_FMT_lx "\n", - info->arg_end + (abi_ulong)sizeof(abi_ulong)); - qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx "\n", info->saved_auxv); + fprintf(f, "start_brk 0x" TARGET_ABI_FMT_lx "\n", + info->start_brk); + fprintf(f, "end_code 0x" TARGET_ABI_FMT_lx "\n", + info->end_code); + fprintf(f, "start_code 0x" TARGET_ABI_FMT_lx "\n", + info->start_code); + fprintf(f, "start_data 0x" TARGET_ABI_FMT_lx "\n", + info->start_data); + fprintf(f, "end_data 0x" TARGET_ABI_FMT_lx "\n", + info->end_data); + fprintf(f, "start_stack 0x" TARGET_ABI_FMT_lx "\n", + info->start_stack); + fprintf(f, "brk 0x" TARGET_ABI_FMT_lx "\n", + info->brk); + fprintf(f, "entry 0x" TARGET_ABI_FMT_lx "\n", + info->entry); + fprintf(f, "argv_start 0x" TARGET_ABI_FMT_lx "\n", + info->argv); + fprintf(f, "env_start 0x" TARGET_ABI_FMT_lx "\n", + info->envp); + fprintf(f, "auxv_start 0x" TARGET_ABI_FMT_lx "\n", + info->saved_auxv); + qemu_log_unlock(f); + } } target_set_brk(info->brk); @@ -882,6 +917,11 @@ int main(int argc, char **argv, char **envp) } gdb_handlesig(cpu, 0); } + +#ifdef CONFIG_SEMIHOSTING + qemu_semihosting_guestfd_init(); +#endif + cpu_loop(env); /* never exits */ return 0; diff --git a/linux-user/meson.build b/linux-user/meson.build index 9549f81682..de4320af05 100644 --- a/linux-user/meson.build +++ b/linux-user/meson.build @@ -1,3 +1,12 @@ +if not have_linux_user + subdir_done() +endif + +linux_user_ss = ss.source_set() + +common_user_inc += include_directories('include/host/' / host_arch) +common_user_inc += include_directories('include') + linux_user_ss.add(files( 'elfload.c', 'exit.c', @@ -5,10 +14,10 @@ linux_user_ss.add(files( 'linuxload.c', 'main.c', 'mmap.c', - 'safe-syscall.S', 'signal.c', 'strace.c', 'syscall.c', + 'thunk.c', 'uaccess.c', 'uname.c', )) @@ -35,3 +44,5 @@ subdir('sh4') subdir('sparc') subdir('x86_64') subdir('xtensa') + +specific_ss.add_all(when: 'CONFIG_LINUX_USER', if_true: linux_user_ss) diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c index c3396a6e09..5ccf9e942e 100644 --- a/linux-user/microblaze/cpu_loop.c +++ b/linux-user/microblaze/cpu_loop.c @@ -18,16 +18,16 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUMBState *env) { CPUState *cs = env_cpu(env); - int trapnr, ret; - target_siginfo_t info; - + int trapnr, ret, si_code; + while (1) { cpu_exec_start(cs); trapnr = cpu_exec(cs); @@ -35,19 +35,9 @@ void cpu_loop(CPUMBState *env) process_queued_cpu_work(cs); switch (trapnr) { - case 0xaa: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = 0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; case EXCP_INTERRUPT: - /* just indicate that signals should be handled asap */ - break; + /* just indicate that signals should be handled asap */ + break; case EXCP_SYSCALL: /* Return address is 4 bytes after the call. */ env->regs[14] += 4; @@ -61,10 +51,10 @@ void cpu_loop(CPUMBState *env) env->regs[9], env->regs[10], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { /* Wind back to before the syscall. */ env->pc -= 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[3] = ret; } /* All syscall exits result in guest r14 being equal to the @@ -75,6 +65,7 @@ void cpu_loop(CPUMBState *env) */ env->regs[14] = env->pc; break; + case EXCP_HW_EXCP: env->regs[17] = env->pc + 4; if (env->iflags & D_FLAG) { @@ -82,42 +73,41 @@ void cpu_loop(CPUMBState *env) env->pc -= 4; /* FIXME: if branch was immed, replay the imm as well. */ } - env->iflags &= ~(IMM_FLAG | D_FLAG); - switch (env->esr & 31) { - case ESR_EC_DIVZERO: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_FLTDIV; - info._sifields._sigfault._addr = 0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case ESR_EC_FPU: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - if (env->fsr & FSR_IO) { - info.si_code = TARGET_FPE_FLTINV; - } - if (env->fsr & FSR_DZ) { - info.si_code = TARGET_FPE_FLTDIV; - } - info._sifields._sigfault._addr = 0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - default: - fprintf(stderr, "Unhandled hw-exception: 0x%x\n", - env->esr & ESR_EC_MASK); - cpu_dump_state(cs, stderr, 0); - exit(EXIT_FAILURE); - break; + case ESR_EC_DIVZERO: + si_code = TARGET_FPE_INTDIV; + break; + case ESR_EC_FPU: + /* + * Note that the kernel passes along fsr as si_code + * if there's no recognized bit set. Possibly this + * implies that si_code is 0, but follow the structure. + */ + si_code = env->fsr; + if (si_code & FSR_IO) { + si_code = TARGET_FPE_FLTINV; + } else if (si_code & FSR_OF) { + si_code = TARGET_FPE_FLTOVF; + } else if (si_code & FSR_UF) { + si_code = TARGET_FPE_FLTUND; + } else if (si_code & FSR_DZ) { + si_code = TARGET_FPE_FLTDIV; + } else if (si_code & FSR_DO) { + si_code = TARGET_FPE_FLTRES; + } + break; + default: + fprintf(stderr, "Unhandled hw-exception: 0x%x\n", + env->esr & ESR_EC_MASK); + cpu_dump_state(cs, stderr, 0); + exit(EXIT_FAILURE); } + force_sig_fault(TARGET_SIGFPE, si_code, env->pc); break; + case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); diff --git a/linux-user/microblaze/signal.c b/linux-user/microblaze/signal.c index 4c483bd8c6..5188d74025 100644 --- a/linux-user/microblaze/signal.c +++ b/linux-user/microblaze/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -160,17 +161,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Kernel does not use SA_RESTORER. */ - /* addi r12, r0, __NR_sigreturn */ - __put_user(0x31800000U | TARGET_NR_rt_sigreturn, frame->tramp + 0); - /* brki r14, 0x8 */ - __put_user(0xb9cc0008U, frame->tramp + 1); - /* * Return from sighandler will jump to the tramp. * Negative 8 offset because return is rtsd r15, 8 */ - env->regs[15] = - frame_addr + offsetof(struct target_rt_sigframe, tramp) - 8; + env->regs[15] = default_rt_sigreturn - 8; /* Set up registers for signal handler */ env->regs[1] = frame_addr; @@ -212,10 +207,26 @@ long do_rt_sigreturn(CPUMBState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* + * addi r12, r0, __NR_rt_sigreturn + * brki r14, 0x8 + */ + __put_user(0x31800000U | TARGET_NR_rt_sigreturn, tramp); + __put_user(0xb9cc0008U, tramp + 1); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); } diff --git a/linux-user/microblaze/target_mman.h b/linux-user/microblaze/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/microblaze/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/microblaze/target_prctl.h b/linux-user/microblaze/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/microblaze/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/microblaze/target_resource.h b/linux-user/microblaze/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/microblaze/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h index 1c326296de..7dc5c45f00 100644 --- a/linux-user/microblaze/target_signal.h +++ b/linux-user/microblaze/target_signal.h @@ -1,24 +1,8 @@ #ifndef MICROBLAZE_TARGET_SIGNAL_H #define MICROBLAZE_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* MICROBLAZE_TARGET_SIGNAL_H */ diff --git a/linux-user/microblaze/target_structs.h b/linux-user/microblaze/target_structs.h index d08f6a53a8..3a06f373c3 100644 --- a/linux-user/microblaze/target_structs.h +++ b/linux-user/microblaze/target_structs.h @@ -1,58 +1 @@ -/* - * MicroBlaze specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef MICROBLAZE_TARGET_STRUCTS_H -#define MICROBLAZE_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/microblaze/target_syscall.h b/linux-user/microblaze/target_syscall.h index 7f653db34f..43362a1664 100644 --- a/linux-user/microblaze/target_syscall.h +++ b/linux-user/microblaze/target_syscall.h @@ -49,7 +49,6 @@ struct target_pt_regs { }; #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 9d813ece4e..8735e58bad 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" #include "elf.h" #include "internal.h" #include "fpu_helper.h" @@ -38,36 +39,32 @@ enum { BRK_DIVZERO = 7 }; -static int do_break(CPUMIPSState *env, target_siginfo_t *info, - unsigned int code) +static void do_tr_or_bp(CPUMIPSState *env, unsigned int code, bool trap) { - int ret = -1; + target_ulong pc = env->active_tc.PC; switch (code) { case BRK_OVERFLOW: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, pc); + break; case BRK_DIVZERO: - info->si_signo = TARGET_SIGFPE; - info->si_errno = 0; - info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV; - queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info); - ret = 0; + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, pc); break; default: - info->si_signo = TARGET_SIGTRAP; - info->si_errno = 0; - queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info); - ret = 0; + if (trap) { + force_sig(TARGET_SIGTRAP); + } else { + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, pc); + } break; } - - return ret; } void cpu_loop(CPUMIPSState *env) { CPUState *cs = env_cpu(env); - target_siginfo_t info; - int trapnr; + int trapnr, si_code; + unsigned int code; abi_long ret; # ifdef TARGET_ABI_MIPSO32 unsigned int syscall_num; @@ -139,11 +136,11 @@ done_syscall: env->active_tc.gpr[8], env->active_tc.gpr[9], env->active_tc.gpr[10], env->active_tc.gpr[11]); # endif /* O32 */ - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->active_tc.PC -= 4; break; } - if (ret == -TARGET_QEMU_ESIGRETURN) { + if (ret == -QEMU_ESIGRETURN) { /* Returning from a successful sigreturn syscall. Avoid clobbering register state. */ break; @@ -156,162 +153,55 @@ done_syscall: } env->active_tc.gpr[2] = ret; break; - case EXCP_TLBL: - case EXCP_TLBS: - case EXCP_AdEL: - case EXCP_AdES: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->CP0_BadVAddr; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_CpU: case EXCP_RI: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = 0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + case EXCP_DSPDIS: + force_sig(TARGET_SIGILL); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_DSPDIS: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPC; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, + env->active_tc.PC); break; case EXCP_FPE: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_FLTUNK; + si_code = TARGET_FPE_FLTUNK; if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { - info.si_code = TARGET_FPE_FLTINV; + si_code = TARGET_FPE_FLTINV; } else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_DIV0) { - info.si_code = TARGET_FPE_FLTDIV; + si_code = TARGET_FPE_FLTDIV; } else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_OVERFLOW) { - info.si_code = TARGET_FPE_FLTOVF; + si_code = TARGET_FPE_FLTOVF; } else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_UNDERFLOW) { - info.si_code = TARGET_FPE_FLTUND; + si_code = TARGET_FPE_FLTUND; } else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INEXACT) { - info.si_code = TARGET_FPE_FLTRES; + si_code = TARGET_FPE_FLTRES; } - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, si_code, env->active_tc.PC); break; + /* The code below was inspired by the MIPS Linux kernel trap * handling code in arch/mips/kernel/traps.c. */ case EXCP_BREAK: - { - abi_ulong trap_instr; - unsigned int code; - - if (env->hflags & MIPS_HFLAG_M16) { - if (env->insn_flags & ASE_MICROMIPS) { - /* microMIPS mode */ - ret = get_user_u16(trap_instr, env->active_tc.PC); - if (ret != 0) { - goto error; - } - - if ((trap_instr >> 10) == 0x11) { - /* 16-bit instruction */ - code = trap_instr & 0xf; - } else { - /* 32-bit instruction */ - abi_ulong instr_lo; - - ret = get_user_u16(instr_lo, - env->active_tc.PC + 2); - if (ret != 0) { - goto error; - } - trap_instr = (trap_instr << 16) | instr_lo; - code = ((trap_instr >> 6) & ((1 << 20) - 1)); - /* Unfortunately, microMIPS also suffers from - the old assembler bug... */ - if (code >= (1 << 10)) { - code >>= 10; - } - } - } else { - /* MIPS16e mode */ - ret = get_user_u16(trap_instr, env->active_tc.PC); - if (ret != 0) { - goto error; - } - code = (trap_instr >> 6) & 0x3f; - } - } else { - ret = get_user_u32(trap_instr, env->active_tc.PC); - if (ret != 0) { - goto error; - } - - /* As described in the original Linux kernel code, the - * below checks on 'code' are to work around an old - * assembly bug. - */ - code = ((trap_instr >> 6) & ((1 << 20) - 1)); - if (code >= (1 << 10)) { - code >>= 10; - } - } - - if (do_break(env, &info, code) != 0) { - goto error; - } + /* + * As described in the original Linux kernel code, the below + * checks on 'code' are to work around an old assembly bug. + */ + code = env->error_code; + if (code >= (1 << 10)) { + code >>= 10; } + do_tr_or_bp(env, code, false); break; case EXCP_TRAP: - { - abi_ulong trap_instr; - unsigned int code = 0; - - if (env->hflags & MIPS_HFLAG_M16) { - /* microMIPS mode */ - abi_ulong instr[2]; - - ret = get_user_u16(instr[0], env->active_tc.PC) || - get_user_u16(instr[1], env->active_tc.PC + 2); - - trap_instr = (instr[0] << 16) | instr[1]; - } else { - ret = get_user_u32(trap_instr, env->active_tc.PC); - } - - if (ret != 0) { - goto error; - } - - /* The immediate versions don't provide a code. */ - if (!(trap_instr & 0xFC000000)) { - if (env->hflags & MIPS_HFLAG_M16) { - /* microMIPS mode */ - code = ((trap_instr >> 12) & ((1 << 4) - 1)); - } else { - code = ((trap_instr >> 6) & ((1 << 10) - 1)); - } - } - - if (do_break(env, &info, code) != 0) { - goto error; - } - } + do_tr_or_bp(env, env->error_code, true); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: -error: EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); abort(); } @@ -400,7 +290,10 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) env->CP0_Status |= (1 << CP0St_FR); env->hflags |= MIPS_HFLAG_F64; } - } else if (!prog_req.fre && !prog_req.frdefault && + } else if (prog_req.fr1) { + env->CP0_Status |= (1 << CP0St_FR); + env->hflags |= MIPS_HFLAG_F64; + } else if (!prog_req.fre && !prog_req.frdefault && !prog_req.fr1 && !prog_req.single && !prog_req.soft) { fprintf(stderr, "qemu: Can't find a matching FPU mode\n"); exit(1); diff --git a/linux-user/mips/signal.c b/linux-user/mips/signal.c index e6be807a81..58a9d7a8a3 100644 --- a/linux-user/mips/signal.c +++ b/linux-user/mips/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -86,10 +87,8 @@ struct target_rt_sigframe { }; /* Install trampoline to jump back from signal handler */ -static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) +static void install_sigtramp(uint32_t *tramp, unsigned int syscall) { - int err = 0; - /* * Set up the return code ... * @@ -99,7 +98,6 @@ static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) __put_user(0x24020000 + syscall, tramp + 0); __put_user(0x0000000c , tramp + 1); - return err; } static inline void setup_sigcontext(CPUMIPSState *regs, @@ -211,8 +209,6 @@ void setup_frame(int sig, struct target_sigaction * ka, goto give_sigsegv; } - install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); - setup_sigcontext(regs, &frame->sf_sc); for(i = 0; i < TARGET_NSIG_WORDS; i++) { @@ -233,7 +229,7 @@ void setup_frame(int sig, struct target_sigaction * ka, regs->active_tc.gpr[ 5] = 0; regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); regs->active_tc.gpr[29] = frame_addr; - regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); + regs->active_tc.gpr[31] = default_sigreturn; /* The original kernel code sets CP0_EPC to the handler * since it returns to userland using eret * we cannot do this here, and we must set PC directly */ @@ -285,11 +281,11 @@ long do_sigreturn(CPUMIPSState *regs) /* I am not sure this is right, but it seems to work * maybe a problem with nested signals ? */ regs->CP0_EPC = 0; - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } # endif /* O32 */ @@ -307,8 +303,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); - tswap_siginfo(&frame->rs_info, info); __put_user(0, &frame->rs_uc.tuc_flags); @@ -337,11 +331,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, env->active_tc.gpr[ 6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc); env->active_tc.gpr[29] = frame_addr; - env->active_tc.gpr[31] = frame_addr - + offsetof(struct target_rt_sigframe, rs_code); - /* The original kernel code sets CP0_EPC to the handler - * since it returns to userland using eret - * we cannot do this here, and we must set PC directly */ + env->active_tc.gpr[31] = default_rt_sigreturn; + + /* + * The original kernel code sets CP0_EPC to the handler + * since it returns to userland using eret + * we cannot do this here, and we must set PC directly + */ env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; mips_set_hflags_isa_mode_from_pc(env); unlock_user_struct(frame, frame_addr, 1); @@ -375,9 +371,25 @@ long do_rt_sigreturn(CPUMIPSState *env) /* I am not sure this is right, but it seems to work * maybe a problem with nested signals ? */ env->CP0_EPC = 0; - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + +#ifdef TARGET_ARCH_HAS_SETUP_FRAME + default_sigreturn = sigtramp_page; + install_sigtramp(tramp, TARGET_NR_sigreturn); +#endif + + default_rt_sigreturn = sigtramp_page + 8; + install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); + + unlock_user(tramp, sigtramp_page, 2 * 8); } diff --git a/linux-user/mips/target_mman.h b/linux-user/mips/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/mips/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/mips/target_prctl.h b/linux-user/mips/target_prctl.h new file mode 100644 index 0000000000..e028333db9 --- /dev/null +++ b/linux-user/mips/target_prctl.h @@ -0,0 +1,88 @@ +/* + * MIPS specific prctl functions for linux-user + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef MIPS_TARGET_PRCTL_H +#define MIPS_TARGET_PRCTL_H + +static abi_long do_prctl_get_fp_mode(CPUArchState *env) +{ + abi_long ret = 0; + + if (env->CP0_Status & (1 << CP0St_FR)) { + ret |= PR_FP_MODE_FR; + } + if (env->CP0_Config5 & (1 << CP0C5_FRE)) { + ret |= PR_FP_MODE_FRE; + } + return ret; +} +#define do_prctl_get_fp_mode do_prctl_get_fp_mode + +static abi_long do_prctl_set_fp_mode(CPUArchState *env, abi_long arg2) +{ + bool old_fr = env->CP0_Status & (1 << CP0St_FR); + bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE); + bool new_fr = arg2 & PR_FP_MODE_FR; + bool new_fre = arg2 & PR_FP_MODE_FRE; + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; + + /* If nothing to change, return right away, successfully. */ + if (old_fr == new_fr && old_fre == new_fre) { + return 0; + } + /* Check the value is valid */ + if (arg2 & ~known_bits) { + return -TARGET_EOPNOTSUPP; + } + /* Setting FRE without FR is not supported. */ + if (new_fre && !new_fr) { + return -TARGET_EOPNOTSUPP; + } + if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) { + /* FR1 is not supported */ + return -TARGET_EOPNOTSUPP; + } + if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64)) + && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { + /* cannot set FR=0 */ + return -TARGET_EOPNOTSUPP; + } + if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) { + /* Cannot set FRE=1 */ + return -TARGET_EOPNOTSUPP; + } + + int i; + fpr_t *fpr = env->active_fpu.fpr; + for (i = 0; i < 32 ; i += 2) { + if (!old_fr && new_fr) { + fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX]; + } else if (old_fr && !new_fr) { + fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX]; + } + } + + if (new_fr) { + env->CP0_Status |= (1 << CP0St_FR); + env->hflags |= MIPS_HFLAG_F64; + } else { + env->CP0_Status &= ~(1 << CP0St_FR); + env->hflags &= ~MIPS_HFLAG_F64; + } + if (new_fre) { + env->CP0_Config5 |= (1 << CP0C5_FRE); + if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { + env->hflags |= MIPS_HFLAG_FRE; + } + } else { + env->CP0_Config5 &= ~(1 << CP0C5_FRE); + env->hflags &= ~MIPS_HFLAG_FRE; + } + + return 0; +} +#define do_prctl_set_fp_mode do_prctl_set_fp_mode + +#endif /* MIPS_TARGET_PRCTL_H */ diff --git a/linux-user/mips/target_resource.h b/linux-user/mips/target_resource.h new file mode 100644 index 0000000000..6d131b041d --- /dev/null +++ b/linux-user/mips/target_resource.h @@ -0,0 +1,24 @@ +#ifndef MIPS_TARGET_RESOURCE_H +#define MIPS_TARGET_RESOURCE_H + +#include "../generic/target_resource.h" + +#undef TARGET_RLIM_INFINITY +#define TARGET_RLIM_INFINITY 0x7fffffffUL + +#undef TARGET_RLIMIT_NOFILE +#define TARGET_RLIMIT_NOFILE 5 + +#undef TARGET_RLIMIT_AS +#define TARGET_RLIMIT_AS 6 + +#undef TARGET_RLIMIT_RSS +#define TARGET_RLIMIT_RSS 7 + +#undef TARGET_RLIMIT_NPROC +#define TARGET_RLIMIT_NPROC 8 + +#undef TARGET_RLIMIT_MEMLOCK +#define TARGET_RLIMIT_MEMLOCK 9 + +#endif diff --git a/linux-user/mips/target_signal.h b/linux-user/mips/target_signal.h index d521765f6b..fa542c1f4e 100644 --- a/linux-user/mips/target_signal.h +++ b/linux-user/mips/target_signal.h @@ -67,12 +67,12 @@ typedef struct target_sigaltstack { #define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */ #define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 #if defined(TARGET_ABI_MIPSO32) /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h index f59057493a..08ead67810 100644 --- a/linux-user/mips/target_syscall.h +++ b/linux-user/mips/target_syscall.h @@ -24,7 +24,6 @@ struct target_pt_regs { #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 @@ -36,10 +35,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env) return 0x40000; } -/* MIPS-specific prctl() options */ -#define TARGET_PR_SET_FP_MODE 45 -#define TARGET_PR_GET_FP_MODE 46 -#define TARGET_PR_FP_MODE_FR (1 << 0) -#define TARGET_PR_FP_MODE_FRE (1 << 1) - #endif /* MIPS_TARGET_SYSCALL_H */ diff --git a/linux-user/mips64/target_mman.h b/linux-user/mips64/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/mips64/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/mips64/target_prctl.h b/linux-user/mips64/target_prctl.h new file mode 100644 index 0000000000..18da9ae619 --- /dev/null +++ b/linux-user/mips64/target_prctl.h @@ -0,0 +1 @@ +#include "../mips/target_prctl.h" diff --git a/linux-user/mips64/target_resource.h b/linux-user/mips64/target_resource.h new file mode 100644 index 0000000000..fe29002a12 --- /dev/null +++ b/linux-user/mips64/target_resource.h @@ -0,0 +1 @@ +#include "../mips/target_resource.h" diff --git a/linux-user/mips64/target_signal.h b/linux-user/mips64/target_signal.h index d857c55e4c..b05098f7f6 100644 --- a/linux-user/mips64/target_signal.h +++ b/linux-user/mips64/target_signal.h @@ -65,7 +65,6 @@ typedef struct target_sigaltstack { #define TARGET_SA_RESETHAND 0x80000000 #define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ @@ -76,4 +75,6 @@ typedef struct target_sigaltstack { /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* MIPS64_TARGET_SIGNAL_H */ diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h index cd1e1b4969..358dc2d64c 100644 --- a/linux-user/mips64/target_syscall.h +++ b/linux-user/mips64/target_syscall.h @@ -21,7 +21,6 @@ struct target_pt_regs { #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 @@ -33,10 +32,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env) return 0x40000; } -/* MIPS-specific prctl() options */ -#define TARGET_PR_SET_FP_MODE 45 -#define TARGET_PR_GET_FP_MODE 46 -#define TARGET_PR_FP_MODE_FR (1 << 0) -#define TARGET_PR_FP_MODE_FRE (1 << 1) - #endif /* MIPS64_TARGET_SYSCALL_H */ diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 0e103859fe..10f5079331 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -20,6 +20,9 @@ #include "trace.h" #include "exec/log.h" #include "qemu.h" +#include "user-internals.h" +#include "user-mmap.h" +#include "target_mman.h" static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; @@ -103,6 +106,8 @@ static int validate_prot_to_pageflags(int *host_prot, int prot) page_flags |= PAGE_MTE; } } +#elif defined(TARGET_HPPA) + valid |= PROT_GROWSDOWN | PROT_GROWSUP; #endif return prot & ~valid ? 0 : page_flags; @@ -175,9 +180,10 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) goto error; } } + page_set_flags(start, start + len, page_flags); - mmap_unlock(); - return 0; + ret = 0; + error: mmap_unlock(); return ret; @@ -249,8 +255,12 @@ static int mmap_frag(abi_ulong real_start, # define TASK_UNMAPPED_BASE (1ul << 38) #endif #else +#ifdef TARGET_HPPA +# define TASK_UNMAPPED_BASE 0xfa000000 +#else # define TASK_UNMAPPED_BASE 0x40000000 #endif +#endif abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; unsigned long last_brk; @@ -422,7 +432,8 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, int flags, int fd, abi_ulong offset) { - abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; + abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len, + passthrough_start = -1, passthrough_end = -1; int page_flags, host_prot; mmap_lock(); @@ -492,7 +503,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, may need to truncate file maps at EOF and add extra anonymous pages up to the targets page boundary. */ - if ((qemu_real_host_page_size < qemu_host_page_size) && + if ((qemu_real_host_page_size() < qemu_host_page_size) && !(flags & MAP_ANONYMOUS)) { struct stat sb; @@ -535,6 +546,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, host_start += offset - host_offset; } start = h2g(host_start); + passthrough_start = start; + passthrough_end = start + len; } else { if (start & ~TARGET_PAGE_MASK) { errno = EINVAL; @@ -617,6 +630,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, host_prot, flags, fd, offset1); if (p == MAP_FAILED) goto fail; + passthrough_start = real_start; + passthrough_end = real_end; } } the_end1: @@ -624,13 +639,28 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, page_flags |= PAGE_ANON; } page_flags |= PAGE_RESET; - page_set_flags(start, start + len, page_flags); + if (passthrough_start == passthrough_end) { + page_set_flags(start, start + len, page_flags); + } else { + if (start < passthrough_start) { + page_set_flags(start, passthrough_start, page_flags); + } + page_set_flags(passthrough_start, passthrough_end, + page_flags | PAGE_PASSTHROUGH); + if (passthrough_end < start + len) { + page_set_flags(passthrough_end, start + len, page_flags); + } + } the_end: trace_target_mmap_complete(start); if (qemu_loglevel_mask(CPU_LOG_PAGE)) { - log_page_dump(__func__); + FILE *f = qemu_log_trylock(); + if (f) { + fprintf(f, "page layout changed following mmap\n"); + page_dump(f); + qemu_log_unlock(f); + } } - tb_invalidate_phys_range(start, start + len); mmap_unlock(); return start; fail: @@ -734,7 +764,6 @@ int target_munmap(abi_ulong start, abi_ulong len) if (ret == 0) { page_set_flags(start, start + len, 0); - tb_invalidate_phys_range(start, start + len); } mmap_unlock(); return ret; @@ -824,7 +853,74 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID | PAGE_RESET); } - tb_invalidate_phys_range(new_addr, new_addr + new_size); mmap_unlock(); return new_addr; } + +static bool can_passthrough_madv_dontneed(abi_ulong start, abi_ulong end) +{ + ulong addr; + + if ((start | end) & ~qemu_host_page_mask) { + return false; + } + + for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { + if (!(page_get_flags(addr) & PAGE_PASSTHROUGH)) { + return false; + } + } + + return true; +} + +abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) +{ + abi_ulong len, end; + int ret = 0; + + if (start & ~TARGET_PAGE_MASK) { + return -TARGET_EINVAL; + } + len = TARGET_PAGE_ALIGN(len_in); + + if (len_in && !len) { + return -TARGET_EINVAL; + } + + end = start + len; + if (end < start) { + return -TARGET_EINVAL; + } + + if (end == start) { + return 0; + } + + if (!guest_range_valid_untagged(start, len)) { + return -TARGET_EINVAL; + } + + /* + * A straight passthrough may not be safe because qemu sometimes turns + * private file-backed mappings into anonymous mappings. + * + * This is a hint, so ignoring and returning success is ok. + * + * This breaks MADV_DONTNEED, completely implementing which is quite + * complicated. However, there is one low-hanging fruit: mappings that are + * known to have the same semantics in the host and the guest. In this case + * passthrough is safe, so do it. + */ + mmap_lock(); + if (advice == TARGET_MADV_DONTNEED && + can_passthrough_madv_dontneed(start, end)) { + ret = get_errno(madvise(g2h_untagged(start), len, MADV_DONTNEED)); + if (ret == 0) { + page_reset_target_data(start, start + len); + } + } + mmap_unlock(); + + return ret; +} diff --git a/linux-user/nios2/cpu_loop.c b/linux-user/nios2/cpu_loop.c index 9869083fa1..da77ede76b 100644 --- a/linux-user/nios2/cpu_loop.c +++ b/linux-user/nios2/cpu_loop.c @@ -19,13 +19,13 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUNios2State *env) { CPUState *cs = env_cpu(env); - Nios2CPU *cpu = NIOS2_CPU(cs); - target_siginfo_t info; int trapnr, ret; for (;;) { @@ -37,9 +37,34 @@ void cpu_loop(CPUNios2State *env) case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; + + case EXCP_DIV: + /* Match kernel's handle_diverror_c(). */ + env->pc -= 4; + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc); + break; + + case EXCP_UNALIGN: + case EXCP_UNALIGND: + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, + env->ctrl[CR_BADADDR]); + break; + + case EXCP_ILLEGAL: + case EXCP_UNIMPL: + /* Match kernel's handle_illegal_c(). */ + env->pc -= 4; + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); + break; + case EXCP_SUPERI: + /* Match kernel's handle_supervisor_instr(). */ + env->pc -= 4; + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc); + break; + case EXCP_TRAP: - if (env->regs[R_AT] == 0) { - abi_long ret; + switch (env->error_code) { + case 0: qemu_log_mask(CPU_LOG_INT, "\nSyscall\n"); ret = do_syscall(env, env->regs[2], @@ -47,70 +72,73 @@ void cpu_loop(CPUNios2State *env) env->regs[7], env->regs[8], env->regs[9], 0, 0); - if (env->regs[2] == 0) { /* FIXME: syscall 0 workaround */ - ret = 0; + if (ret == -QEMU_ESIGRETURN) { + /* rt_sigreturn has set all state. */ + break; } - + if (ret == -QEMU_ERESTARTSYS) { + env->pc -= 4; + break; + } + /* + * See the code after translate_rc_and_ret: all negative + * values are errors (aided by userspace restricted to 2G), + * errno is returned positive in r2, and error indication + * is a boolean in r7. + */ env->regs[2] = abs(ret); - /* Return value is 0..4096 */ - env->regs[7] = (ret > 0xfffffffffffff000ULL); - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[CR_STATUS] &= ~0x3; - env->regs[R_EA] = env->regs[R_PC] + 4; - env->regs[R_PC] += 4; + env->regs[7] = ret < 0; break; - } else { - qemu_log_mask(CPU_LOG_INT, "\nTrap\n"); - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[CR_STATUS] &= ~0x3; - env->regs[R_EA] = env->regs[R_PC] + 4; - env->regs[R_PC] = cpu->exception_addr; - - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + case 1: + qemu_log_mask(CPU_LOG_INT, "\nTrap 1\n"); + force_sig_fault(TARGET_SIGUSR1, 0, env->pc); break; - } - case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case 0xaa: - switch (env->regs[R_PC]) { - /*case 0x1000:*/ /* TODO:__kuser_helper_version */ - case 0x1004: /* __kuser_cmpxchg */ - start_exclusive(); - if (env->regs[4] & 0x3) { - goto kuser_fail; - } - ret = get_user_u32(env->regs[2], env->regs[4]); - if (ret) { - end_exclusive(); - goto kuser_fail; - } - env->regs[2] -= env->regs[5]; - if (env->regs[2] == 0) { - put_user_u32(env->regs[6], env->regs[4]); - } - end_exclusive(); - env->regs[R_PC] = env->regs[R_RA]; + case 2: + qemu_log_mask(CPU_LOG_INT, "\nTrap 2\n"); + force_sig_fault(TARGET_SIGUSR2, 0, env->pc); + break; + case 31: + qemu_log_mask(CPU_LOG_INT, "\nTrap 31\n"); + /* Match kernel's breakpoint_c(). */ + env->pc -= 4; + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; - /*case 0x1040:*/ /* TODO:__kuser_sigtramp */ default: - ; -kuser_fail: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* TODO: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->regs[R_PC]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + qemu_log_mask(CPU_LOG_INT, "\nTrap %d\n", env->error_code); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP, env->pc); + break; + + case 16: /* QEMU specific, for __kuser_cmpxchg */ + { + abi_ptr g = env->regs[4]; + uint32_t *h, n, o; + + if (g & 0x3) { + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, g); + break; + } + ret = page_get_flags(g); + if (!(ret & PAGE_VALID)) { + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, g); + break; + } + if (!(ret & PAGE_READ) || !(ret & PAGE_WRITE)) { + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR, g); + break; + } + h = g2h(cs, g); + o = env->regs[5]; + n = env->regs[6]; + env->regs[2] = qatomic_cmpxchg(h, o, n) - o; + } + break; } break; + + case EXCP_DEBUG: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + break; default: EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n", trapnr); @@ -123,31 +151,6 @@ kuser_fail: void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { - env->regs[0] = 0; - env->regs[1] = regs->r1; - env->regs[2] = regs->r2; - env->regs[3] = regs->r3; - env->regs[4] = regs->r4; - env->regs[5] = regs->r5; - env->regs[6] = regs->r6; - env->regs[7] = regs->r7; - env->regs[8] = regs->r8; - env->regs[9] = regs->r9; - env->regs[10] = regs->r10; - env->regs[11] = regs->r11; - env->regs[12] = regs->r12; - env->regs[13] = regs->r13; - env->regs[14] = regs->r14; - env->regs[15] = regs->r15; - /* TODO: unsigned long orig_r2; */ - env->regs[R_RA] = regs->ra; - env->regs[R_FP] = regs->fp; env->regs[R_SP] = regs->sp; - env->regs[R_GP] = regs->gp; - env->regs[CR_ESTATUS] = regs->estatus; - env->regs[R_EA] = regs->ea; - /* TODO: unsigned long orig_r7; */ - - /* Emulate eret when starting thread. */ - env->regs[R_PC] = regs->ea; + env->pc = regs->ea; } diff --git a/linux-user/nios2/signal.c b/linux-user/nios2/signal.c index cc3872f11d..32b3dc99c6 100644 --- a/linux-user/nios2/signal.c +++ b/linux-user/nios2/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -41,7 +42,7 @@ struct target_rt_sigframe { struct target_ucontext uc; }; -static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) +static void rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) { unsigned long *gregs = uc->tuc_mcontext.gregs; @@ -72,14 +73,11 @@ static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) __put_user(env->regs[R_RA], &gregs[23]); __put_user(env->regs[R_FP], &gregs[24]); __put_user(env->regs[R_GP], &gregs[25]); - __put_user(env->regs[R_EA], &gregs[27]); + __put_user(env->pc, &gregs[27]); __put_user(env->regs[R_SP], &gregs[28]); - - return 0; } -static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, - int *pr2) +static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc) { int temp; unsigned long *gregs = uc->tuc_mcontext.gregs; @@ -123,19 +121,17 @@ static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, __get_user(env->regs[R_GP], &gregs[25]); /* Not really necessary no user settable bits */ __get_user(temp, &gregs[26]); - __get_user(env->regs[R_EA], &gregs[27]); + __get_user(env->pc, &gregs[27]); __get_user(env->regs[R_RA], &gregs[23]); __get_user(env->regs[R_SP], &gregs[28]); target_restore_altstack(&uc->tuc_stack, env); - - *pr2 = env->regs[2]; return 0; } -static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, - size_t frame_size) +static abi_ptr get_sigframe(struct target_sigaction *ka, CPUNios2State *env, + size_t frame_size) { unsigned long usp; @@ -143,7 +139,7 @@ static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, usp = target_sigsp(get_sp_from_cpustate(env), ka); /* Verify, is it 32 or 64 bit aligned */ - return (void *)((usp - frame_size) & -8UL); + return (usp - frame_size) & -8; } void setup_rt_frame(int sig, struct target_sigaction *ka, @@ -152,26 +148,24 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, CPUNios2State *env) { struct target_rt_sigframe *frame; - int i, err = 0; + abi_ptr frame_addr; + int i; - frame = get_sigframe(ka, env, sizeof(*frame)); - - if (ka->sa_flags & SA_SIGINFO) { - tswap_siginfo(&frame->info, info); + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + force_sigsegv(sig); + return; } + tswap_siginfo(&frame->info, info); + /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); target_save_altstack(&frame->uc.tuc_stack, env); - err |= rt_setup_ucontext(&frame->uc, env); + rt_setup_ucontext(&frame->uc, env); for (i = 0; i < TARGET_NSIG_WORDS; i++) { - __put_user((abi_ulong)set->sig[i], - (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); - } - - if (err) { - goto give_sigsegv; + __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } /* Set up to return from userspace; jump to fixed address sigreturn @@ -179,26 +173,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, env->regs[R_RA] = (unsigned long) (0x1044); /* Set up registers for signal handler */ - env->regs[R_SP] = (unsigned long) frame; - env->regs[4] = (unsigned long) sig; - env->regs[5] = (unsigned long) &frame->info; - env->regs[6] = (unsigned long) &frame->uc; - env->regs[R_EA] = (unsigned long) ka->_sa_handler; - return; + env->regs[R_SP] = frame_addr; + env->regs[4] = sig; + env->regs[5] = frame_addr + offsetof(struct target_rt_sigframe, info); + env->regs[6] = frame_addr + offsetof(struct target_rt_sigframe, uc); + env->pc = ka->_sa_handler; -give_sigsegv: - if (sig == TARGET_SIGSEGV) { - ka->_sa_handler = TARGET_SIG_DFL; - } - force_sigsegv(sig); - return; -} - -long do_sigreturn(CPUNios2State *env) -{ - trace_user_do_sigreturn(env, 0); - qemu_log_mask(LOG_UNIMP, "do_sigreturn: not implemented\n"); - return -TARGET_ENOSYS; + unlock_user_struct(frame, frame_addr, 1); } long do_rt_sigreturn(CPUNios2State *env) @@ -207,24 +188,23 @@ long do_rt_sigreturn(CPUNios2State *env) abi_ulong frame_addr = env->regs[R_SP]; struct target_rt_sigframe *frame; sigset_t set; - int rval; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); - do_sigprocmask(SIG_SETMASK, &set, NULL); + set_sigmask(&set); - if (rt_restore_ucontext(env, &frame->uc, &rval)) { + if (rt_restore_ucontext(env, &frame->uc)) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); - return rval; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return 0; + return -QEMU_ESIGRETURN; } diff --git a/linux-user/nios2/target_cpu.h b/linux-user/nios2/target_cpu.h index 2d2008f002..830b4c0741 100644 --- a/linux-user/nios2/target_cpu.h +++ b/linux-user/nios2/target_cpu.h @@ -27,6 +27,7 @@ static inline void cpu_clone_regs_child(CPUNios2State *env, target_ulong newsp, env->regs[R_SP] = newsp; } env->regs[R_RET0] = 0; + env->regs[7] = 0; } static inline void cpu_clone_regs_parent(CPUNios2State *env, unsigned flags) diff --git a/linux-user/nios2/target_mman.h b/linux-user/nios2/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/nios2/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/nios2/target_prctl.h b/linux-user/nios2/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/nios2/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/nios2/target_resource.h b/linux-user/nios2/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/nios2/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h index aebf749f12..46ca5948ce 100644 --- a/linux-user/nios2/target_signal.h +++ b/linux-user/nios2/target_signal.h @@ -1,22 +1,9 @@ #ifndef NIOS2_TARGET_SIGNAL_H #define NIOS2_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* sigaltstack controls */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +/* Nios2 uses a fixed address on the kuser page for sigreturn. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 + #endif /* NIOS2_TARGET_SIGNAL_H */ diff --git a/linux-user/nios2/target_structs.h b/linux-user/nios2/target_structs.h index daa2886f98..3a06f373c3 100644 --- a/linux-user/nios2/target_structs.h +++ b/linux-user/nios2/target_structs.h @@ -1,58 +1 @@ -/* - * Nios2 specific structures for linux-user - * - * Copyright (c) 2016 Marek Vasut - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef NIOS2_TARGET_STRUCTS_H -#define NIOS2_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/nios2/target_syscall.h b/linux-user/nios2/target_syscall.h index 78006c24d4..561b28d281 100644 --- a/linux-user/nios2/target_syscall.h +++ b/linux-user/nios2/target_syscall.h @@ -30,7 +30,6 @@ struct target_pt_regs { unsigned long orig_r7; }; -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c index b33fa77718..a7aa586c8f 100644 --- a/linux-user/openrisc/cpu_loop.c +++ b/linux-user/openrisc/cpu_loop.c @@ -18,16 +18,16 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUOpenRISCState *env) { CPUState *cs = env_cpu(env); int trapnr; abi_long ret; - target_siginfo_t info; for (;;) { cpu_exec_start(cs); @@ -46,54 +46,36 @@ void cpu_loop(CPUOpenRISCState *env) cpu_get_gpr(env, 6), cpu_get_gpr(env, 7), cpu_get_gpr(env, 8), 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { cpu_set_gpr(env, 11, ret); } break; - case EXCP_DPF: - case EXCP_IPF: - case EXCP_RANGE: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_ALIGN: - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, env->eear); break; case EXCP_ILLEGAL: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPC; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_FPE: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); break; case EXCP_INTERRUPT: /* We processed the pending cpu work above. */ break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; + case EXCP_RANGE: + /* Requires SR.OVE set, which linux-user won't do. */ + cpu_abort(cs, "Unexpected RANGE exception"); + case EXCP_FPE: + /* + * Requires FPSCR.FPEE set. Writes to FPSCR from usermode not + * yet enabled in kernel ABI, so linux-user does not either. + */ + cpu_abort(cs, "Unexpected FPE exception"); default: g_assert_not_reached(); } diff --git a/linux-user/openrisc/signal.c b/linux-user/openrisc/signal.c index 5c5640a284..be8b68784a 100644 --- a/linux-user/openrisc/signal.c +++ b/linux-user/openrisc/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -37,7 +38,6 @@ typedef struct target_ucontext { typedef struct target_rt_sigframe { struct target_siginfo info; target_ucontext uc; - uint32_t retcode[4]; /* trampoline code */ } target_rt_sigframe; static void restore_sigcontext(CPUOpenRISCState *env, target_sigcontext *sc) @@ -115,14 +115,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } - /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1; l.nop; l.nop */ - __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, frame->retcode + 0); - __put_user(0x20000001, frame->retcode + 1); - __put_user(0x15000000, frame->retcode + 2); - __put_user(0x15000000, frame->retcode + 3); - /* Set up registers for signal handler */ - cpu_set_gpr(env, 9, frame_addr + offsetof(target_rt_sigframe, retcode)); + cpu_set_gpr(env, 9, default_rt_sigreturn); cpu_set_gpr(env, 3, sig); cpu_set_gpr(env, 4, frame_addr + offsetof(target_rt_sigframe, info)); cpu_set_gpr(env, 5, frame_addr + offsetof(target_rt_sigframe, uc)); @@ -168,3 +162,16 @@ long do_rt_sigreturn(CPUOpenRISCState *env) force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1 */ + __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, tramp + 0); + __put_user(0x20000001, tramp + 1); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/openrisc/target_mman.h b/linux-user/openrisc/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/openrisc/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/openrisc/target_prctl.h b/linux-user/openrisc/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/openrisc/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/openrisc/target_resource.h b/linux-user/openrisc/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/openrisc/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/openrisc/target_signal.h b/linux-user/openrisc/target_signal.h index 8283eaf544..5b9d40974a 100644 --- a/linux-user/openrisc/target_signal.h +++ b/linux-user/openrisc/target_signal.h @@ -1,29 +1,8 @@ #ifndef OPENRISC_TARGET_SIGNAL_H #define OPENRISC_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_long ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - -/* sigaltstack controls */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_SA_NOCLDSTOP 0x00000001 -#define TARGET_SA_NOCLDWAIT 0x00000002 -#define TARGET_SA_SIGINFO 0x00000004 -#define TARGET_SA_ONSTACK 0x08000000 -#define TARGET_SA_RESTART 0x10000000 -#define TARGET_SA_NODEFER 0x40000000 -#define TARGET_SA_RESETHAND 0x80000000 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* OPENRISC_TARGET_SIGNAL_H */ diff --git a/linux-user/openrisc/target_structs.h b/linux-user/openrisc/target_structs.h index e98e2bc799..3a06f373c3 100644 --- a/linux-user/openrisc/target_structs.h +++ b/linux-user/openrisc/target_structs.h @@ -1,58 +1 @@ -/* - * OpenRISC specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef OPENRISC_TARGET_STRUCTS_H -#define OPENRISC_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/openrisc/target_syscall.h b/linux-user/openrisc/target_syscall.h index ef0d89a551..7fe5b73d3b 100644 --- a/linux-user/openrisc/target_syscall.h +++ b/linux-user/openrisc/target_syscall.h @@ -15,7 +15,6 @@ struct target_pt_regs { #define UNAME_MACHINE "openrisc" #define UNAME_MINIMUM_RELEASE "2.6.32" -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c index fa91ea0eed..02204ad8be 100644 --- a/linux-user/ppc/cpu_loop.c +++ b/linux-user/ppc/cpu_loop.c @@ -18,9 +18,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "qemu/timer.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) { @@ -52,14 +54,6 @@ uint64_t cpu_ppc_load_vtb(CPUPPCState *env) return cpu_ppc_get_tb(env); } -uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env) -__attribute__ (( alias ("cpu_ppc_load_tbu") )); - -uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env) -{ - return cpu_ppc_load_tbl(env) & 0x3FFFFF80; -} - /* XXX: to be fixed */ int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) { @@ -74,8 +68,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) void cpu_loop(CPUPPCState *env) { CPUState *cs = env_cpu(env); - target_siginfo_t info; - int trapnr; + int trapnr, si_signo, si_code; target_ulong ret; for(;;) { @@ -100,97 +93,37 @@ void cpu_loop(CPUPPCState *env) "Aborting\n"); break; case POWERPC_EXCP_DSI: /* Data storage exception */ - /* XXX: check this. Seems bugged */ - switch (env->error_code & 0xFF000000) { - case 0x40000000: - case 0x42000000: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - break; - case 0x04000000: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLADR; - break; - case 0x08000000: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - break; - default: - /* Let's send a regular segfault... */ - EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", - env->error_code); - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - break; - } - info._sifields._sigfault._addr = env->spr[SPR_DAR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ - /* XXX: check this */ - switch (env->error_code & 0xFF000000) { - case 0x40000000: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - break; - case 0x10000000: - case 0x08000000: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - break; - default: - /* Let's send a regular segfault... */ - EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", - env->error_code); - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - break; - } - info._sifields._sigfault._addr = env->nip - 4; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + /* FIXME: handle maperr in ppc_cpu_record_sigsegv. */ + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, + env->spr[SPR_DAR]); break; case POWERPC_EXCP_EXTERNAL: /* External input */ cpu_abort(cs, "External interrupt while in user mode. " "Aborting\n"); break; - case POWERPC_EXCP_ALIGN: /* Alignment exception */ - /* XXX: check this */ - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_PROGRAM: /* Program exception */ case POWERPC_EXCP_HV_EMU: /* HV emulation */ /* XXX: check this */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; + si_signo = TARGET_SIGFPE; switch (env->error_code & 0xF) { case POWERPC_EXCP_FP_OX: - info.si_code = TARGET_FPE_FLTOVF; + si_code = TARGET_FPE_FLTOVF; break; case POWERPC_EXCP_FP_UX: - info.si_code = TARGET_FPE_FLTUND; + si_code = TARGET_FPE_FLTUND; break; case POWERPC_EXCP_FP_ZX: case POWERPC_EXCP_FP_VXZDZ: - info.si_code = TARGET_FPE_FLTDIV; + si_code = TARGET_FPE_FLTDIV; break; case POWERPC_EXCP_FP_XX: - info.si_code = TARGET_FPE_FLTRES; + si_code = TARGET_FPE_FLTRES; break; case POWERPC_EXCP_FP_VXSOFT: - info.si_code = TARGET_FPE_FLTINV; + si_code = TARGET_FPE_FLTINV; break; case POWERPC_EXCP_FP_VXSNAN: case POWERPC_EXCP_FP_VXISI: @@ -199,56 +132,56 @@ void cpu_loop(CPUPPCState *env) case POWERPC_EXCP_FP_VXVC: case POWERPC_EXCP_FP_VXSQRT: case POWERPC_EXCP_FP_VXCVI: - info.si_code = TARGET_FPE_FLTSUB; + si_code = TARGET_FPE_FLTSUB; break; default: EXCP_DUMP(env, "Unknown floating point exception (%02x)\n", env->error_code); + si_code = 0; break; } break; case POWERPC_EXCP_INVAL: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; + si_signo = TARGET_SIGILL; switch (env->error_code & 0xF) { case POWERPC_EXCP_INVAL_INVAL: - info.si_code = TARGET_ILL_ILLOPC; + si_code = TARGET_ILL_ILLOPC; break; case POWERPC_EXCP_INVAL_LSWX: - info.si_code = TARGET_ILL_ILLOPN; + si_code = TARGET_ILL_ILLOPN; break; case POWERPC_EXCP_INVAL_SPR: - info.si_code = TARGET_ILL_PRVREG; + si_code = TARGET_ILL_PRVREG; break; case POWERPC_EXCP_INVAL_FP: - info.si_code = TARGET_ILL_COPROC; + si_code = TARGET_ILL_COPROC; break; default: EXCP_DUMP(env, "Unknown invalid operation (%02x)\n", env->error_code & 0xF); - info.si_code = TARGET_ILL_ILLADR; + si_code = TARGET_ILL_ILLADR; break; } break; case POWERPC_EXCP_PRIV: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; + si_signo = TARGET_SIGILL; switch (env->error_code & 0xF) { case POWERPC_EXCP_PRIV_OPC: - info.si_code = TARGET_ILL_PRVOPC; + si_code = TARGET_ILL_PRVOPC; break; case POWERPC_EXCP_PRIV_REG: - info.si_code = TARGET_ILL_PRVREG; + si_code = TARGET_ILL_PRVREG; break; default: EXCP_DUMP(env, "Unknown privilege violation (%02x)\n", env->error_code & 0xF); - info.si_code = TARGET_ILL_PRVOPC; + si_code = TARGET_ILL_PRVOPC; break; } break; case POWERPC_EXCP_TRAP: - cpu_abort(cs, "Tried to call a TRAP\n"); + si_signo = TARGET_SIGTRAP; + si_code = TARGET_TRAP_BRKPT; break; default: /* Should not happen ! */ @@ -256,28 +189,19 @@ void cpu_loop(CPUPPCState *env) env->error_code); break; } - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(si_signo, si_code, env->nip); break; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_COPROC; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ + case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ + case POWERPC_EXCP_VPU: /* Vector unavailable exception */ + force_sig_fault(TARGET_SIGILL, TARGET_ILL_COPROC, env->nip); break; case POWERPC_EXCP_SYSCALL: /* System call exception */ case POWERPC_EXCP_SYSCALL_VECTORED: cpu_abort(cs, "Syscall exception while in user mode. " "Aborting\n"); break; - case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_COPROC; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_DECR: /* Decrementer exception */ cpu_abort(cs, "Decrementer interrupt while in user mode. " "Aborting\n"); @@ -298,13 +222,6 @@ void cpu_loop(CPUPPCState *env) cpu_abort(cs, "Instruction TLB exception while in user mode. " "Aborting\n"); break; - case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_COPROC; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ cpu_abort(cs, "Embedded floating-point data IRQ not handled\n"); break; @@ -361,25 +278,10 @@ void cpu_loop(CPUPPCState *env) cpu_abort(cs, "Hypervisor instruction segment exception " "while in user mode. Aborting\n"); break; - case POWERPC_EXCP_VPU: /* Vector unavailable exception */ - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_COPROC; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ cpu_abort(cs, "Programmable interval timer interrupt " "while in user mode. Aborting\n"); break; - case POWERPC_EXCP_IO: /* IO error exception */ - cpu_abort(cs, "IO error exception while in user mode. " - "Aborting\n"); - break; - case POWERPC_EXCP_RUNM: /* Run mode exception */ - cpu_abort(cs, "Run mode exception while in user mode. " - "Aborting\n"); - break; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ cpu_abort(cs, "Emulation trap exception not handled\n"); break; @@ -434,11 +336,11 @@ void cpu_loop(CPUPPCState *env) ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6], env->gpr[7], env->gpr[8], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->nip -= 4; break; } - if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) { + if (ret == (target_ulong)(-QEMU_ESIGRETURN)) { /* Returning from a successful sigreturn syscall. Avoid corrupting register state. */ break; @@ -450,10 +352,7 @@ void cpu_loop(CPUPPCState *env) env->gpr[3] = ret; break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->nip); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index edfad28a37..07729c1653 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -202,9 +203,6 @@ struct target_func_ptr { #endif -/* We use the mc_pad field for the signal return trampoline. */ -#define tramp mc_pad - /* See arch/powerpc/kernel/signal.c. */ static target_ulong get_sigframe(struct target_sigaction *ka, CPUPPCState *env, @@ -217,8 +215,7 @@ static target_ulong get_sigframe(struct target_sigaction *ka, return (oldsp - frame_size) & ~0xFUL; } -#if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ - (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) +#if TARGET_BIG_ENDIAN == HOST_BIG_ENDIAN #define PPC_VEC_HI 0 #define PPC_VEC_LO 1 #else @@ -231,7 +228,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) { target_ulong msr = env->msr; int i; - target_ulong ccr = 0; + uint32_t ccr = 0; /* In general, the kernel attempts to be intelligent about what it needs to save for Altivec/FP/SPE registers. We don't care that @@ -244,7 +241,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); - __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); + __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { ccr |= env->crf[i] << (32 - ((i + 1) * 4)); @@ -308,10 +305,8 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) static void encode_trampoline(int sigret, uint32_t *tramp) { /* Set up the sigreturn trampoline: li r0,sigret; sc. */ - if (sigret) { - __put_user(0x38000000 | sigret, &tramp[0]); - __put_user(0x44000002, &tramp[1]); - } + __put_user(0x38000000 | sigret, &tramp[0]); + __put_user(0x44000002, &tramp[1]); } static void restore_user_regs(CPUPPCState *env, @@ -319,6 +314,7 @@ static void restore_user_regs(CPUPPCState *env, { target_ulong save_r2 = 0; target_ulong msr; + target_ulong xer; target_ulong ccr; int i; @@ -334,9 +330,11 @@ static void restore_user_regs(CPUPPCState *env, __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); - __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); - __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); + __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]); + cpu_write_xer(env, xer); + + __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; } @@ -437,12 +435,7 @@ void setup_frame(int sig, struct target_sigaction *ka, /* Save user regs. */ save_user_regs(env, &frame->mctx); - /* Construct the trampoline code on the stack. */ - encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); - - /* The kernel checks for the presence of a VDSO here. We don't - emulate a vdso, so use a sigreturn system call. */ - env->lr = (target_ulong) h2g(frame->mctx.tramp); + env->lr = default_sigreturn; /* Turn off all fp exceptions. */ env->fpscr = 0; @@ -478,15 +471,12 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUPPCState *env) { struct target_rt_sigframe *rt_sf; - uint32_t *trampptr = 0; struct target_mcontext *mctx = 0; target_ulong rt_sf_addr, newsp = 0; int i, err = 0; #if defined(TARGET_PPC64) struct target_sigcontext *sc = 0; -#if !defined(TARGET_ABI32) struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; -#endif #endif rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); @@ -508,22 +498,17 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, #if defined(TARGET_PPC64) mctx = &rt_sf->uc.tuc_sigcontext.mcontext; - trampptr = &rt_sf->trampoline[0]; sc = &rt_sf->uc.tuc_sigcontext; __put_user(h2g(mctx), &sc->regs); __put_user(sig, &sc->signal); #else mctx = &rt_sf->uc.tuc_mcontext; - trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; #endif save_user_regs(env, mctx); - encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); - /* The kernel checks for the presence of a VDSO here. We don't - emulate a vdso, so use a sigreturn system call. */ - env->lr = (target_ulong) h2g(trampptr); + env->lr = default_rt_sigreturn; /* Turn off all fp exceptions. */ env->fpscr = 0; @@ -542,7 +527,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); env->gpr[6] = (target_ulong) h2g(rt_sf); -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) if (get_ppc64_abi(image) < 2) { /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ struct target_func_ptr *handler = @@ -557,7 +542,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, env->nip = (target_ulong) ka->_sa_handler; #endif -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN /* Signal handlers are entered in big-endian mode. */ ppc_store_msr(env, env->msr & ~(1ull << MSR_LE)); #else @@ -574,7 +559,7 @@ sigsegv: } -#if !defined(TARGET_PPC64) || defined(TARGET_ABI32) +#if !defined(TARGET_PPC64) long do_sigreturn(CPUPPCState *env) { struct target_sigcontext *sc = NULL; @@ -587,12 +572,9 @@ long do_sigreturn(CPUPPCState *env) if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) goto sigsegv; -#if defined(TARGET_PPC64) - set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); -#else __get_user(set.sig[0], &sc->oldmask); __get_user(set.sig[1], &sc->_unused[3]); -#endif + target_to_host_sigset_internal(&blocked, &set); set_sigmask(&blocked); @@ -603,13 +585,13 @@ long do_sigreturn(CPUPPCState *env) unlock_user_struct(sr, sr_addr, 1); unlock_user_struct(sc, sc_addr, 1); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; sigsegv: unlock_user_struct(sr, sr_addr, 1); unlock_user_struct(sc, sc_addr, 1); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } #endif /* !defined(TARGET_PPC64) */ @@ -658,12 +640,12 @@ long do_rt_sigreturn(CPUPPCState *env) target_restore_altstack(&rt_sf->uc.tuc_stack, env); unlock_user_struct(rt_sf, rt_sf_addr, 1); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; sigsegv: unlock_user_struct(rt_sf, rt_sf_addr, 1); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } /* This syscall implements {get,set,swap}context for userland. */ @@ -716,8 +698,24 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, /* We cannot return to a partially updated context. */ force_sig(TARGET_SIGSEGV); } - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + +#ifdef TARGET_ARCH_HAS_SETUP_FRAME + default_sigreturn = sigtramp_page; + encode_trampoline(TARGET_NR_sigreturn, tramp + 0); +#endif + + default_rt_sigreturn = sigtramp_page + 8; + encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} diff --git a/linux-user/ppc/target_mman.h b/linux-user/ppc/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/ppc/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/ppc/target_prctl.h b/linux-user/ppc/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/ppc/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/ppc/target_resource.h b/linux-user/ppc/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/ppc/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h index 72fcdd9bfa..5be24e152b 100644 --- a/linux-user/ppc/target_signal.h +++ b/linux-user/ppc/target_signal.h @@ -1,27 +1,11 @@ #ifndef PPC_TARGET_SIGNAL_H #define PPC_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #if !defined(TARGET_PPC64) #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* PPC_TARGET_SIGNAL_H */ diff --git a/linux-user/ppc/target_syscall.h b/linux-user/ppc/target_syscall.h index b9c4b813d3..77b36d0b46 100644 --- a/linux-user/ppc/target_syscall.h +++ b/linux-user/ppc/target_syscall.h @@ -36,7 +36,7 @@ struct target_pt_regs { abi_ulong link; abi_ulong xer; abi_ulong ccr; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) abi_ulong softe; #else abi_ulong mq; /* 601 only (not used at present) */ @@ -58,8 +58,8 @@ struct target_revectored_struct { * flags masks */ -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) -#ifdef TARGET_WORDS_BIGENDIAN +#if defined(TARGET_PPC64) +#if TARGET_BIG_ENDIAN #define UNAME_MACHINE "ppc64" #else #define UNAME_MACHINE "ppc64le" @@ -71,7 +71,6 @@ struct target_revectored_struct { #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 0x2000 #define TARGET_MCL_FUTURE 0x4000 #define TARGET_MCL_ONFAULT 0x8000 diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 3b0b6b75fe..e2e93fbd1d 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -1,26 +1,24 @@ #ifndef QEMU_H #define QEMU_H -#include "hostdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/cpu_ldst.h" #undef DEBUG_REMAP #include "exec/user/abitypes.h" -#include "exec/user/thunk.h" #include "syscall_defs.h" #include "target_syscall.h" -#include "exec/gdbstub.h" -/* This is the size of the host kernel's sigset_t, needed where we make +/* + * This is the size of the host kernel's sigset_t, needed where we make * direct system calls that take a sigset_t pointer and a size. */ #define SIGSET_T_SIZE (_NSIG / 8) -/* This struct is used to hold certain information about the image. +/* + * This struct is used to hold certain information about the image. * Basically, it replicates in user space what would be certain * task_struct fields in the kernel */ @@ -42,19 +40,24 @@ struct image_info { abi_ulong data_offset; abi_ulong saved_auxv; abi_ulong auxv_len; - abi_ulong arg_start; - abi_ulong arg_end; - abi_ulong arg_strings; - abi_ulong env_strings; + abi_ulong argc; + abi_ulong argv; + abi_ulong envc; + abi_ulong envp; abi_ulong file_string; uint32_t elf_flags; - int personality; + int personality; abi_ulong alignment; + bool exec_stack; + + /* Generic semihosting knows about these pointers. */ + abi_ulong arg_strings; /* strings for argv */ + abi_ulong env_strings; /* strings for envp; ends arg_strings */ /* The fields below are used in FDPIC mode. */ abi_ulong loadmap_addr; uint16_t nsegs; - void *loadsegs; + void *loadsegs; abi_ulong pt_dynamic_addr; abi_ulong interpreter_loadmap_addr; abi_ulong interpreter_pt_dynamic_addr; @@ -91,15 +94,11 @@ struct vm86_saved_state { #include "nwfpe/fpa11.h" #endif -#define MAX_SIGQUEUE_SIZE 1024 - struct emulated_sigtable { int pending; /* true if signal is pending */ target_siginfo_t info; }; -/* NOTE: we force a big alignment so that the stack stored after is - aligned too */ typedef struct TaskState { pid_t ts_tid; /* tid (or pid) of this task */ #ifdef TARGET_ARM @@ -134,20 +133,23 @@ typedef struct TaskState { struct emulated_sigtable sync_signal; struct emulated_sigtable sigtab[TARGET_NSIG]; - /* This thread's signal mask, as requested by the guest program. + /* + * This thread's signal mask, as requested by the guest program. * The actual signal mask of this thread may differ: * + we don't let SIGSEGV and SIGBUS be blocked while running guest code * + sometimes we block all signals to avoid races */ sigset_t signal_mask; - /* The signal mask imposed by a guest sigsuspend syscall, if we are + /* + * The signal mask imposed by a guest sigsuspend syscall, if we are * currently in the middle of such a syscall */ sigset_t sigsuspend_mask; /* Nonzero if we're leaving a sigsuspend and sigsuspend_mask is valid. */ int in_sigsuspend; - /* Nonzero if process_pending_signals() needs to do something (either + /* + * Nonzero if process_pending_signals() needs to do something (either * handle a pending signal or unblock signals). * This flag is written from a signal handler so should be accessed via * the qatomic_read() and qatomic_set() functions. (It is not accessed @@ -157,333 +159,12 @@ typedef struct TaskState { /* This thread's sigaltstack, if it has one */ struct target_sigaltstack sigaltstack_used; -} __attribute__((aligned(16))) TaskState; -extern char *exec_path; -void init_task_state(TaskState *ts); -void task_settid(TaskState *); -void stop_all_tasks(void); -extern const char *qemu_uname_release; -extern unsigned long mmap_min_addr; + /* Start time of task after system boot in clock ticks */ + uint64_t start_boottime; +} TaskState; -/* ??? See if we can avoid exposing so much of the loader internals. */ - -/* Read a good amount of data initially, to hopefully get all the - program headers loaded. */ -#define BPRM_BUF_SIZE 1024 - -/* - * This structure is used to hold the arguments that are - * used when loading binaries. - */ -struct linux_binprm { - char buf[BPRM_BUF_SIZE] __attribute__((aligned)); - abi_ulong p; - int fd; - int e_uid, e_gid; - int argc, envc; - char **argv; - char **envp; - char * filename; /* Name of binary */ - int (*core_dump)(int, const CPUArchState *); /* coredump routine */ -}; - -typedef struct IOCTLEntry IOCTLEntry; - -typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, - int fd, int cmd, abi_long arg); - -struct IOCTLEntry { - int target_cmd; - unsigned int host_cmd; - const char *name; - int access; - do_ioctl_fn *do_ioctl; - const argtype arg_type[5]; -}; - -extern IOCTLEntry ioctl_entries[]; - -#define IOC_R 0x0001 -#define IOC_W 0x0002 -#define IOC_RW (IOC_R | IOC_W) - -void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); -abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, - abi_ulong stringp, int push_ptr); -int loader_exec(int fdexec, const char *filename, char **argv, char **envp, - struct target_pt_regs * regs, struct image_info *infop, - struct linux_binprm *); - -/* Returns true if the image uses the FDPIC ABI. If this is the case, - * we have to provide some information (loadmap, pt_dynamic_info) such - * that the program can be relocated adequately. This is also useful - * when handling signals. - */ -int info_is_fdpic(struct image_info *info); - -uint32_t get_elf_eflags(int fd); -int load_elf_binary(struct linux_binprm *bprm, struct image_info *info); -int load_flt_binary(struct linux_binprm *bprm, struct image_info *info); - -abi_long memcpy_to_target(abi_ulong dest, const void *src, - unsigned long len); -void target_set_brk(abi_ulong new_brk); abi_long do_brk(abi_ulong new_brk); -void syscall_init(void); -abi_long do_syscall(void *cpu_env, int num, abi_long arg1, - abi_long arg2, abi_long arg3, abi_long arg4, - abi_long arg5, abi_long arg6, abi_long arg7, - abi_long arg8); -extern __thread CPUState *thread_cpu; -void cpu_loop(CPUArchState *env); -const char *target_strerror(int err); -int get_osversion(void); -void init_qemu_uname_release(void); -void fork_start(void); -void fork_end(int child); - -/** - * probe_guest_base: - * @image_name: the executable being loaded - * @loaddr: the lowest fixed address in the executable - * @hiaddr: the highest fixed address in the executable - * - * Creates the initial guest address space in the host memory space. - * - * If @loaddr == 0, then no address in the executable is fixed, - * i.e. it is fully relocatable. In that case @hiaddr is the size - * of the executable. - * - * This function will not return if a valid value for guest_base - * cannot be chosen. On return, the executable loader can expect - * - * target_mmap(loaddr, hiaddr - loaddr, ...) - * - * to succeed. - */ -void probe_guest_base(const char *image_name, - abi_ulong loaddr, abi_ulong hiaddr); - -#include "qemu/log.h" - -/* safe_syscall.S */ - -/** - * safe_syscall: - * @int number: number of system call to make - * ...: arguments to the system call - * - * Call a system call if guest signal not pending. - * This has the same API as the libc syscall() function, except that it - * may return -1 with errno == TARGET_ERESTARTSYS if a signal was pending. - * - * Returns: the system call result, or -1 with an error code in errno - * (Errnos are host errnos; we rely on TARGET_ERESTARTSYS not clashing - * with any of the host errno values.) - */ - -/* A guide to using safe_syscall() to handle interactions between guest - * syscalls and guest signals: - * - * Guest syscalls come in two flavours: - * - * (1) Non-interruptible syscalls - * - * These are guest syscalls that never get interrupted by signals and - * so never return EINTR. They can be implemented straightforwardly in - * QEMU: just make sure that if the implementation code has to make any - * blocking calls that those calls are retried if they return EINTR. - * It's also OK to implement these with safe_syscall, though it will be - * a little less efficient if a signal is delivered at the 'wrong' moment. - * - * Some non-interruptible syscalls need to be handled using block_signals() - * to block signals for the duration of the syscall. This mainly applies - * to code which needs to modify the data structures used by the - * host_signal_handler() function and the functions it calls, including - * all syscalls which change the thread's signal mask. - * - * (2) Interruptible syscalls - * - * These are guest syscalls that can be interrupted by signals and - * for which we need to either return EINTR or arrange for the guest - * syscall to be restarted. This category includes both syscalls which - * always restart (and in the kernel return -ERESTARTNOINTR), ones - * which only restart if there is no handler (kernel returns -ERESTARTNOHAND - * or -ERESTART_RESTARTBLOCK), and the most common kind which restart - * if the handler was registered with SA_RESTART (kernel returns - * -ERESTARTSYS). System calls which are only interruptible in some - * situations (like 'open') also need to be handled this way. - * - * Here it is important that the host syscall is made - * via this safe_syscall() function, and *not* via the host libc. - * If the host libc is used then the implementation will appear to work - * most of the time, but there will be a race condition where a - * signal could arrive just before we make the host syscall inside libc, - * and then then guest syscall will not correctly be interrupted. - * Instead the implementation of the guest syscall can use the safe_syscall - * function but otherwise just return the result or errno in the usual - * way; the main loop code will take care of restarting the syscall - * if appropriate. - * - * (If the implementation needs to make multiple host syscalls this is - * OK; any which might really block must be via safe_syscall(); for those - * which are only technically blocking (ie which we know in practice won't - * stay in the host kernel indefinitely) it's OK to use libc if necessary. - * You must be able to cope with backing out correctly if some safe_syscall - * you make in the implementation returns either -TARGET_ERESTARTSYS or - * EINTR though.) - * - * block_signals() cannot be used for interruptible syscalls. - * - * - * How and why the safe_syscall implementation works: - * - * The basic setup is that we make the host syscall via a known - * section of host native assembly. If a signal occurs, our signal - * handler checks the interrupted host PC against the addresse of that - * known section. If the PC is before or at the address of the syscall - * instruction then we change the PC to point at a "return - * -TARGET_ERESTARTSYS" code path instead, and then exit the signal handler - * (causing the safe_syscall() call to immediately return that value). - * Then in the main.c loop if we see this magic return value we adjust - * the guest PC to wind it back to before the system call, and invoke - * the guest signal handler as usual. - * - * This winding-back will happen in two cases: - * (1) signal came in just before we took the host syscall (a race); - * in this case we'll take the guest signal and have another go - * at the syscall afterwards, and this is indistinguishable for the - * guest from the timing having been different such that the guest - * signal really did win the race - * (2) signal came in while the host syscall was blocking, and the - * host kernel decided the syscall should be restarted; - * in this case we want to restart the guest syscall also, and so - * rewinding is the right thing. (Note that "restart" semantics mean - * "first call the signal handler, then reattempt the syscall".) - * The other situation to consider is when a signal came in while the - * host syscall was blocking, and the host kernel decided that the syscall - * should not be restarted; in this case QEMU's host signal handler will - * be invoked with the PC pointing just after the syscall instruction, - * with registers indicating an EINTR return; the special code in the - * handler will not kick in, and we will return EINTR to the guest as - * we should. - * - * Notice that we can leave the host kernel to make the decision for - * us about whether to do a restart of the syscall or not; we do not - * need to check SA_RESTART flags in QEMU or distinguish the various - * kinds of restartability. - */ -#ifdef HAVE_SAFE_SYSCALL -/* The core part of this function is implemented in assembly */ -extern long safe_syscall_base(int *pending, long number, ...); - -#define safe_syscall(...) \ - ({ \ - long ret_; \ - int *psp_ = &((TaskState *)thread_cpu->opaque)->signal_pending; \ - ret_ = safe_syscall_base(psp_, __VA_ARGS__); \ - if (is_error(ret_)) { \ - errno = -ret_; \ - ret_ = -1; \ - } \ - ret_; \ - }) - -#else - -/* Fallback for architectures which don't yet provide a safe-syscall assembly - * fragment; note that this is racy! - * This should go away when all host architectures have been updated. - */ -#define safe_syscall syscall - -#endif - -/* syscall.c */ -int host_to_target_waitstatus(int status); - -/* strace.c */ -void print_syscall(void *cpu_env, int num, - abi_long arg1, abi_long arg2, abi_long arg3, - abi_long arg4, abi_long arg5, abi_long arg6); -void print_syscall_ret(void *cpu_env, int num, abi_long ret, - abi_long arg1, abi_long arg2, abi_long arg3, - abi_long arg4, abi_long arg5, abi_long arg6); -/** - * print_taken_signal: - * @target_signum: target signal being taken - * @tinfo: target_siginfo_t which will be passed to the guest for the signal - * - * Print strace output indicating that this signal is being taken by the guest, - * in a format similar to: - * --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} --- - */ -void print_taken_signal(int target_signum, const target_siginfo_t *tinfo); - -/* signal.c */ -void process_pending_signals(CPUArchState *cpu_env); -void signal_init(void); -int queue_signal(CPUArchState *env, int sig, int si_type, - target_siginfo_t *info); -void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); -void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); -int target_to_host_signal(int sig); -int host_to_target_signal(int sig); -long do_sigreturn(CPUArchState *env); -long do_rt_sigreturn(CPUArchState *env); -abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, - CPUArchState *env); -int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset); -abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, - abi_ulong unew_ctx, abi_long ctx_size); -/** - * block_signals: block all signals while handling this guest syscall - * - * Block all signals, and arrange that the signal mask is returned to - * its correct value for the guest before we resume execution of guest code. - * If this function returns non-zero, then the caller should immediately - * return -TARGET_ERESTARTSYS to the main loop, which will take the pending - * signal and restart execution of the syscall. - * If block_signals() returns zero, then the caller can continue with - * emulation of the system call knowing that no signals can be taken - * (and therefore that no race conditions will result). - * This should only be called once, because if it is called a second time - * it will always return non-zero. (Think of it like a mutex that can't - * be recursively locked.) - * Signals will be unblocked again by process_pending_signals(). - * - * Return value: non-zero if there was a pending signal, zero if not. - */ -int block_signals(void); /* Returns non zero if signal pending */ - -#ifdef TARGET_I386 -/* vm86.c */ -void save_v86_state(CPUX86State *env); -void handle_vm86_trap(CPUX86State *env, int trapno); -void handle_vm86_fault(CPUX86State *env); -int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr); -#elif defined(TARGET_SPARC64) -void sparc64_set_context(CPUSPARCState *env); -void sparc64_get_context(CPUSPARCState *env); -#endif - -/* mmap.c */ -int target_mprotect(abi_ulong start, abi_ulong len, int prot); -abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, - int flags, int fd, abi_ulong offset); -int target_munmap(abi_ulong start, abi_ulong len); -abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, - abi_ulong new_size, unsigned long flags, - abi_ulong new_addr); -extern unsigned long last_brk; -extern abi_ulong mmap_next_start; -abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong); -void mmap_fork_start(void); -void mmap_fork_end(int child); - -/* main.c */ -extern unsigned long guest_stack_size; /* user access */ @@ -560,7 +241,7 @@ static inline bool access_ok(CPUState *cpu, int type, } while (0) -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN # define __put_user(x, hptr) __put_user_e(x, hptr, be) # define __get_user(x, hptr) __get_user_e(x, hptr, be) #else @@ -667,80 +348,4 @@ void *lock_user_string(abi_ulong guest_addr); #define unlock_user_struct(host_ptr, guest_addr, copy) \ unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0) -#include - -static inline int is_error(abi_long ret) -{ - return (abi_ulong)ret >= (abi_ulong)(-4096); -} - -#if TARGET_ABI_BITS == 32 -static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) -{ -#ifdef TARGET_WORDS_BIGENDIAN - return ((uint64_t)word0 << 32) | word1; -#else - return ((uint64_t)word1 << 32) | word0; -#endif -} -#else /* TARGET_ABI_BITS == 32 */ -static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) -{ - return word0; -} -#endif /* TARGET_ABI_BITS != 32 */ - -void print_termios(void *arg); - -/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ -#ifdef TARGET_ARM -static inline int regpairs_aligned(void *cpu_env, int num) -{ - return ((((CPUARMState *)cpu_env)->eabi) == 1) ; -} -#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) -static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } -#elif defined(TARGET_PPC) && !defined(TARGET_PPC64) -/* - * SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs - * of registers which translates to the same as ARM/MIPS, because we start with - * r3 as arg1 - */ -static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } -#elif defined(TARGET_SH4) -/* SH4 doesn't align register pairs, except for p{read,write}64 */ -static inline int regpairs_aligned(void *cpu_env, int num) -{ - switch (num) { - case TARGET_NR_pread64: - case TARGET_NR_pwrite64: - return 1; - - default: - return 0; - } -} -#elif defined(TARGET_XTENSA) -static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } -#elif defined(TARGET_HEXAGON) -static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } -#else -static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } -#endif - -/** - * preexit_cleanup: housekeeping before the guest exits - * - * env: the CPU state - * code: the exit code - */ -void preexit_cleanup(CPUArchState *env, int code); - -/* Include target-specific struct and function definitions; - * they may need access to the target-independent structures - * above, so include them last. - */ -#include "target_cpu.h" -#include "target_structs.h" - #endif /* QEMU_H */ diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c index 74a9628dc9..bffca7db12 100644 --- a/linux-user/riscv/cpu_loop.c +++ b/linux-user/riscv/cpu_loop.c @@ -18,18 +18,18 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" #include "elf.h" #include "semihosting/common-semi.h" void cpu_loop(CPURISCVState *env) { CPUState *cs = env_cpu(env); - int trapnr, signum, sigcode; - target_ulong sigaddr; + int trapnr; target_ulong ret; for (;;) { @@ -38,10 +38,6 @@ void cpu_loop(CPURISCVState *env) cpu_exec_end(cs); process_queued_cpu_work(cs); - signum = 0; - sigcode = 0; - sigaddr = 0; - switch (trapnr) { case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ @@ -67,9 +63,9 @@ void cpu_loop(CPURISCVState *env) env->gpr[xA5], 0, 0); } - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 4; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->gpr[xA0] = ret; } if (cs->singlestep_enabled) { @@ -77,29 +73,16 @@ void cpu_loop(CPURISCVState *env) } break; case RISCV_EXCP_ILLEGAL_INST: - signum = TARGET_SIGILL; - sigcode = TARGET_ILL_ILLOPC; + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); break; case RISCV_EXCP_BREAKPOINT: - signum = TARGET_SIGTRAP; - sigcode = TARGET_TRAP_BRKPT; - sigaddr = env->pc; - break; - case RISCV_EXCP_INST_PAGE_FAULT: - case RISCV_EXCP_LOAD_PAGE_FAULT: - case RISCV_EXCP_STORE_PAGE_FAULT: - signum = TARGET_SIGSEGV; - sigcode = TARGET_SEGV_MAPERR; - sigaddr = env->badaddr; - break; - case RISCV_EXCP_SEMIHOST: - env->gpr[xA0] = do_common_semihosting(cs); - env->pc += 4; - break; case EXCP_DEBUG: gdbstep: - signum = TARGET_SIGTRAP; - sigcode = TARGET_TRAP_BRKPT; + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + break; + case RISCV_EXCP_SEMIHOST: + do_common_semihosting(cs); + env->pc += 4; break; default: EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n", @@ -107,16 +90,6 @@ void cpu_loop(CPURISCVState *env) exit(EXIT_FAILURE); } - if (signum) { - target_siginfo_t info = { - .si_signo = signum, - .si_errno = 0, - .si_code = sigcode, - ._sifields._sigfault._addr = sigaddr - }; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - process_pending_signals(env); } } @@ -131,7 +104,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) env->gpr[xSP] = regs->sp; env->elf_flags = info->elf_flags; - if ((env->misa & RVE) && !(env->elf_flags & EF_RISCV_RVE)) { + if ((env->misa_ext & RVE) && !(env->elf_flags & EF_RISCV_RVE)) { error_report("Incompatible ELF: RVE cpu requires RVE ABI binary"); exit(EXIT_FAILURE); } diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c index 9405c7fd9a..eaa168199a 100644 --- a/linux-user/riscv/signal.c +++ b/linux-user/riscv/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -46,7 +47,6 @@ struct target_ucontext { }; struct target_rt_sigframe { - uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */ struct target_siginfo info; struct target_ucontext uc; }; @@ -64,9 +64,7 @@ static abi_ulong get_sigframe(struct target_sigaction *ka, /* This is the X/Open sanctioned signal stack switching. */ sp = target_sigsp(sp, ka) - framesize; - - /* XXX: kernel aligns with 0xf ? */ - sp &= ~3UL; /* align sp on 4-byte boundary */ + sp &= ~0xf; return sp; } @@ -104,12 +102,6 @@ static void setup_ucontext(struct target_ucontext *uc, setup_sigcontext(&uc->uc_mcontext, env); } -static inline void install_sigtramp(uint32_t *tramp) -{ - __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */ - __put_user(0x00000073, tramp + 1); /* ecall */ -} - void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPURISCVState *env) @@ -126,14 +118,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_ucontext(&frame->uc, env, set); tswap_siginfo(&frame->info, info); - install_sigtramp(frame->tramp); env->pc = ka->_sa_handler; env->gpr[xSP] = frame_addr; env->gpr[xA0] = sig; env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc); - env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp); + env->gpr[xRA] = default_rt_sigreturn; return; @@ -195,10 +186,22 @@ long do_rt_sigreturn(CPURISCVState *env) target_restore_altstack(&frame->uc.uc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */ + __put_user(0x00000073, tramp + 1); /* ecall */ + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/riscv/target_mman.h b/linux-user/riscv/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/riscv/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/riscv/target_prctl.h b/linux-user/riscv/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/riscv/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/riscv/target_resource.h b/linux-user/riscv/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/riscv/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/riscv/target_signal.h b/linux-user/riscv/target_signal.h index f113ba9a55..6c0470f0bc 100644 --- a/linux-user/riscv/target_signal.h +++ b/linux-user/riscv/target_signal.h @@ -1,18 +1,8 @@ #ifndef RISCV_TARGET_SIGNAL_H #define RISCV_TARGET_SIGNAL_H -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* RISCV_TARGET_SIGNAL_H */ diff --git a/linux-user/riscv/target_structs.h b/linux-user/riscv/target_structs.h index ea3e5ed17e..3a06f373c3 100644 --- a/linux-user/riscv/target_structs.h +++ b/linux-user/riscv/target_structs.h @@ -1,46 +1 @@ -/* - * RISC-V specific structures for linux-user - * - * This is a copy of ../aarch64/target_structs.h atm. - * - */ -#ifndef RISCV_TARGET_STRUCTS_H -#define RISCV_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/riscv/target_syscall.h b/linux-user/riscv/target_syscall.h index dc597c8972..7601f10c28 100644 --- a/linux-user/riscv/target_syscall.h +++ b/linux-user/riscv/target_syscall.h @@ -45,12 +45,12 @@ struct target_pt_regs { #ifdef TARGET_RISCV32 #define UNAME_MACHINE "riscv32" +#define UNAME_MINIMUM_RELEASE "5.4.0" #else #define UNAME_MACHINE "riscv64" -#endif #define UNAME_MINIMUM_RELEASE "4.15.0" +#endif -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c index 6a69a6dd26..8b7ac2879e 100644 --- a/linux-user/s390x/cpu_loop.c +++ b/linux-user/s390x/cpu_loop.c @@ -18,12 +18,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" -/* s390x masks the fault address it reports in si_addr for SIGSEGV and SIGBUS */ -#define S390X_FAIL_ADDR_MASK -4096LL static int get_pgm_data_si_code(int dxc_code) { @@ -58,7 +57,6 @@ void cpu_loop(CPUS390XState *env) { CPUState *cs = env_cpu(env); int trapnr, n, sig; - target_siginfo_t info; target_ulong addr; abi_long ret; @@ -83,11 +81,20 @@ void cpu_loop(CPUS390XState *env) ret = do_syscall(env, n, env->regs[2], env->regs[3], env->regs[4], env->regs[5], env->regs[6], env->regs[7], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->psw.addr -= env->int_svc_ilen; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->regs[2] = ret; } + + if (unlikely(cs->singlestep_enabled)) { + /* + * cpu_tb_exec() did not raise EXCP_DEBUG, because it has seen + * that EXCP_SVC was already pending. + */ + cs->exception_index = EXCP_DEBUG; + } + break; case EXCP_DEBUG: @@ -109,12 +116,13 @@ void cpu_loop(CPUS390XState *env) n = TARGET_ILL_ILLOPC; goto do_signal_pc; case PGM_PROTECTION: + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR, + env->__excp_addr); + break; case PGM_ADDRESSING: - sig = TARGET_SIGSEGV; - /* XXX: check env->error_code */ - n = TARGET_SEGV_MAPERR; - addr = env->__excp_addr & S390X_FAIL_ADDR_MASK; - goto do_signal; + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, + env->__excp_addr); + break; case PGM_EXECUTE: case PGM_SPECIFICATION: case PGM_SPECIAL_OP: @@ -157,11 +165,7 @@ void cpu_loop(CPUS390XState *env) */ env->psw.addr += env->int_pgm_ilen; do_signal: - info.si_signo = sig; - info.si_errno = 0; - info.si_code = n; - info._sifields._sigfault._addr = addr; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(sig, n, addr); break; case EXCP_ATOMIC: diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index bf8a8fbfe9..f72165576f 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -67,7 +68,6 @@ typedef struct { target_sigregs sregs; int signo; target_sigregs_ext sregs_ext; - uint16_t retcode; } sigframe; #define TARGET_UC_VXRS 2 @@ -84,7 +84,11 @@ struct target_ucontext { typedef struct { uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; - uint16_t retcode; + /* + * This field is no longer initialized by the kernel, but it's still a part + * of the ABI. + */ + uint16_t svc_insn; struct target_siginfo info; struct target_ucontext uc; } rt_sigframe; @@ -142,6 +146,7 @@ static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) * We have to store the fp registers to current->thread.fp_regs * to merge them with the emulated registers. */ + __put_user(env->fpc, &sregs->fpregs.fpc); for (i = 0; i < 16; i++) { __put_user(*get_freg(env, i), &sregs->fpregs.fprs[i]); } @@ -208,9 +213,7 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { restorer = ka->sa_restorer; } else { - restorer = frame_addr + offsetof(sigframe, retcode); - __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, - &frame->retcode); + restorer = default_sigreturn; } /* Set up registers for signal handler */ @@ -261,9 +264,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { restorer = ka->sa_restorer; } else { - restorer = frame_addr + offsetof(typeof(*frame), retcode); - __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, - &frame->retcode); + restorer = default_rt_sigreturn; } /* Create siginfo on the signal stack. */ @@ -331,6 +332,7 @@ static void restore_sigregs(CPUS390XState *env, target_sigregs *sc) for (i = 0; i < 16; i++) { __get_user(env->aregs[i], &sc->regs.acrs[i]); } + __get_user(env->fpc, &sc->fpregs.fpc); for (i = 0; i < 16; i++) { __get_user(*get_freg(env, i), &sc->fpregs.fprs[i]); } @@ -364,7 +366,7 @@ long do_sigreturn(CPUS390XState *env) trace_user_do_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } /* Make sure that we're initializing all of target_set. */ @@ -378,7 +380,7 @@ long do_sigreturn(CPUS390XState *env) restore_sigregs_ext(env, &frame->sregs_ext); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUS390XState *env) @@ -390,7 +392,7 @@ long do_rt_sigreturn(CPUS390XState *env) trace_user_do_rt_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); @@ -402,5 +404,19 @@ long do_rt_sigreturn(CPUS390XState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 + 2, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, &tramp[0]); + + default_rt_sigreturn = sigtramp_page + 2; + __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, &tramp[1]); + + unlock_user(tramp, sigtramp_page, 2 + 2); } diff --git a/linux-user/s390x/target_mman.h b/linux-user/s390x/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/s390x/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/s390x/target_prctl.h b/linux-user/s390x/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/s390x/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/s390x/target_resource.h b/linux-user/s390x/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/s390x/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h index bbfc464d44..41e0e34a55 100644 --- a/linux-user/s390x/target_signal.h +++ b/linux-user/s390x/target_signal.h @@ -1,22 +1,9 @@ #ifndef S390X_TARGET_SIGNAL_H #define S390X_TARGET_SIGNAL_H -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* S390X_TARGET_SIGNAL_H */ diff --git a/linux-user/s390x/target_syscall.h b/linux-user/s390x/target_syscall.h index 94f84178db..4018988a25 100644 --- a/linux-user/s390x/target_syscall.h +++ b/linux-user/s390x/target_syscall.h @@ -27,7 +27,6 @@ struct target_pt_regs { #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS2 -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/semihost.c b/linux-user/semihost.c index f53ab526fb..cee62a365c 100644 --- a/linux-user/semihost.c +++ b/linux-user/semihost.c @@ -13,41 +13,9 @@ #include "qemu/osdep.h" #include "semihosting/console.h" #include "qemu.h" +#include "user-internals.h" #include -int qemu_semihosting_console_outs(CPUArchState *env, target_ulong addr) -{ - int len = target_strlen(addr); - void *s; - if (len < 0){ - qemu_log_mask(LOG_GUEST_ERROR, - "%s: passed inaccessible address " TARGET_FMT_lx, - __func__, addr); - return 0; - } - s = lock_user(VERIFY_READ, addr, (long)(len + 1), 1); - g_assert(s); /* target_strlen has already verified this will work */ - len = write(STDERR_FILENO, s, len); - unlock_user(s, addr, 0); - return len; -} - -void qemu_semihosting_console_outc(CPUArchState *env, target_ulong addr) -{ - char c; - - if (get_user_u8(c, addr)) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: passed inaccessible address " TARGET_FMT_lx, - __func__, addr); - } else { - if (write(STDERR_FILENO, &c, 1) != 1) { - qemu_log_mask(LOG_UNIMP, "%s: unexpected write to stdout failure", - __func__); - } - } -} - /* * For linux-user we can safely block. However as we want to return as * soon as a character is read we need to tweak the termio to disable @@ -55,21 +23,28 @@ void qemu_semihosting_console_outc(CPUArchState *env, target_ulong addr) * program is expecting more normal behaviour. This is slow but * nothing using semihosting console reading is expecting to be fast. */ -target_ulong qemu_semihosting_console_inc(CPUArchState *env) +int qemu_semihosting_console_read(CPUState *cs, void *buf, int len) { - uint8_t c; + int ret; struct termios old_tio, new_tio; /* Disable line-buffering and echo */ tcgetattr(STDIN_FILENO, &old_tio); new_tio = old_tio; new_tio.c_lflag &= (~ICANON & ~ECHO); + new_tio.c_cc[VMIN] = 1; + new_tio.c_cc[VTIME] = 0; tcsetattr(STDIN_FILENO, TCSANOW, &new_tio); - c = getchar(); + ret = fread(buf, 1, len, stdin); /* restore config */ tcsetattr(STDIN_FILENO, TCSANOW, &old_tio); - return (target_ulong) c; + return ret; +} + +int qemu_semihosting_console_write(void *buf, int len) +{ + return fwrite(buf, 1, len, stderr); } diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c index 222ed1c670..c805f9db11 100644 --- a/linux-user/sh4/cpu_loop.c +++ b/linux-user/sh4/cpu_loop.c @@ -18,15 +18,15 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" void cpu_loop(CPUSH4State *env) { CPUState *cs = env_cpu(env); int trapnr, ret; - target_siginfo_t info; while (1) { bool arch_interrupt = true; @@ -48,9 +48,9 @@ void cpu_loop(CPUSH4State *env) env->gregs[0], env->gregs[1], 0, 0); - if (ret == -TARGET_ERESTARTSYS) { + if (ret == -QEMU_ERESTARTSYS) { env->pc -= 2; - } else if (ret != -TARGET_QEMU_ESIGRETURN) { + } else if (ret != -QEMU_ESIGRETURN) { env->gregs[0] = ret; } break; @@ -58,18 +58,7 @@ void cpu_loop(CPUSH4State *env) /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case 0xa0: - case 0xc0: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->tea; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c index 0451e65806..c4ba962708 100644 --- a/linux-user/sh4/signal.c +++ b/linux-user/sh4/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -51,7 +52,6 @@ struct target_sigframe { struct target_sigcontext sc; target_ulong extramask[TARGET_NSIG_WORDS-1]; - uint16_t retcode[3]; }; @@ -67,7 +67,6 @@ struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; - uint16_t retcode[3]; }; @@ -162,7 +161,7 @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) __get_user(regs->fpul, &sc->sc_fpul); regs->tra = -1; /* disable syscall checks */ - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); + regs->flags = 0; } void setup_frame(int sig, struct target_sigaction *ka, @@ -189,15 +188,9 @@ void setup_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { - regs->pr = (unsigned long) ka->sa_restorer; + regs->pr = ka->sa_restorer; } else { - /* Generate return code (system call to sigreturn) */ - abi_ulong retcode_addr = frame_addr + - offsetof(struct target_sigframe, retcode); - __put_user(MOVW(2), &frame->retcode[0]); - __put_user(TRAP_NOARG, &frame->retcode[1]); - __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); - regs->pr = (unsigned long) retcode_addr; + regs->pr = default_sigreturn; } /* Set up registers for signal handler */ @@ -206,7 +199,7 @@ void setup_frame(int sig, struct target_sigaction *ka, regs->gregs[5] = 0; regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); regs->pc = (unsigned long) ka->_sa_handler; - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); + regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK); unlock_user_struct(frame, frame_addr, 1); return; @@ -247,15 +240,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { - regs->pr = (unsigned long) ka->sa_restorer; + regs->pr = ka->sa_restorer; } else { - /* Generate return code (system call to sigreturn) */ - abi_ulong retcode_addr = frame_addr + - offsetof(struct target_rt_sigframe, retcode); - __put_user(MOVW(2), &frame->retcode[0]); - __put_user(TRAP_NOARG, &frame->retcode[1]); - __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); - regs->pr = (unsigned long) retcode_addr; + regs->pr = default_rt_sigreturn; } /* Set up registers for signal handler */ @@ -264,7 +251,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); regs->pc = (unsigned long) ka->_sa_handler; - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); + regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK); unlock_user_struct(frame, frame_addr, 1); return; @@ -299,12 +286,12 @@ long do_sigreturn(CPUSH4State *regs) restore_sigcontext(regs, &frame->sc); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } long do_rt_sigreturn(CPUSH4State *regs) @@ -326,10 +313,28 @@ long do_rt_sigreturn(CPUSH4State *regs) target_restore_altstack(&frame->uc.tuc_stack, regs); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(MOVW(2), &tramp[0]); + __put_user(TRAP_NOARG, &tramp[1]); + __put_user(TARGET_NR_sigreturn, &tramp[2]); + + default_rt_sigreturn = sigtramp_page + 6; + __put_user(MOVW(2), &tramp[3]); + __put_user(TRAP_NOARG, &tramp[4]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[5]); + + unlock_user(tramp, sigtramp_page, 2 * 6); } diff --git a/linux-user/sh4/target_mman.h b/linux-user/sh4/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/sh4/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/sh4/target_prctl.h b/linux-user/sh4/target_prctl.h new file mode 100644 index 0000000000..5629ddbf39 --- /dev/null +++ b/linux-user/sh4/target_prctl.h @@ -0,0 +1 @@ +#include "../generic/target_prctl_unalign.h" diff --git a/linux-user/sh4/target_resource.h b/linux-user/sh4/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/sh4/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h index d7309b7136..eee6a1a7cd 100644 --- a/linux-user/sh4/target_signal.h +++ b/linux-user/sh4/target_signal.h @@ -1,25 +1,9 @@ #ifndef SH4_TARGET_SIGNAL_H #define SH4_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* SH4_TARGET_SIGNAL_H */ diff --git a/linux-user/sh4/target_structs.h b/linux-user/sh4/target_structs.h index 00ac39478b..3a06f373c3 100644 --- a/linux-user/sh4/target_structs.h +++ b/linux-user/sh4/target_structs.h @@ -1,58 +1 @@ -/* - * SH4 specific structures for linux-user - * - * Copyright (c) 2013 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef SH4_TARGET_STRUCTS_H -#define SH4_TARGET_STRUCTS_H - -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; - -#endif +#include "../generic/target_structs.h" diff --git a/linux-user/sh4/target_syscall.h b/linux-user/sh4/target_syscall.h index c1437adafe..148398855d 100644 --- a/linux-user/sh4/target_syscall.h +++ b/linux-user/sh4/target_syscall.h @@ -15,7 +15,6 @@ struct target_pt_regs { #define UNAME_MACHINE "sh4" #define UNAME_MINIMUM_RELEASE "2.6.32" -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/sh4/termbits.h b/linux-user/sh4/termbits.h index f91b5c51cf..eeabd2d7a9 100644 --- a/linux-user/sh4/termbits.h +++ b/linux-user/sh4/termbits.h @@ -273,7 +273,7 @@ ebugging only */ #define TARGET_TIOCSERGETLSR TARGET_IOR('T', 89, unsigned int) /* 0x5459 */ /* Get line sta tus register */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +# define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #define TARGET_TIOCSERGETMULTI TARGET_IOR('T', 90, int) /* 0x545A */ /* Get multiport config */ #define TARGET_TIOCSERSETMULTI TARGET_IOW('T', 91, int) /* 0x545B diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index ea86328b28..3e2dc604c2 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -20,6 +20,14 @@ #ifndef SIGNAL_COMMON_H #define SIGNAL_COMMON_H +#include "special-errno.h" + +/* Fallback addresses into sigtramp page. */ +extern abi_ulong default_sigreturn; +extern abi_ulong default_rt_sigreturn; + +void setup_sigtramp(abi_ulong tramp_page); + int on_sig_stack(unsigned long sp); int sas_ss_flags(unsigned long sp); abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka); @@ -40,6 +48,7 @@ void tswap_siginfo(target_siginfo_t *tinfo, void set_sigmask(const sigset_t *set); void force_sig(int sig); void force_sigsegv(int oldsig); +void force_sig_fault(int sig, int code, abi_ulong addr); #if defined(TARGET_ARCH_HAS_SETUP_FRAME) void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUArchState *env); @@ -47,4 +56,112 @@ void setup_frame(int sig, struct target_sigaction *ka, void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUArchState *env); + +void process_pending_signals(CPUArchState *cpu_env); +void signal_init(void); +void queue_signal(CPUArchState *env, int sig, int si_type, + target_siginfo_t *info); +void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); +void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); +int target_to_host_signal(int sig); +int host_to_target_signal(int sig); +long do_sigreturn(CPUArchState *env); +long do_rt_sigreturn(CPUArchState *env); +abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, + CPUArchState *env); +int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset); +abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, + abi_ulong unew_ctx, abi_long ctx_size); +/** + * block_signals: block all signals while handling this guest syscall + * + * Block all signals, and arrange that the signal mask is returned to + * its correct value for the guest before we resume execution of guest code. + * If this function returns non-zero, then the caller should immediately + * return -QEMU_ERESTARTSYS to the main loop, which will take the pending + * signal and restart execution of the syscall. + * If block_signals() returns zero, then the caller can continue with + * emulation of the system call knowing that no signals can be taken + * (and therefore that no race conditions will result). + * This should only be called once, because if it is called a second time + * it will always return non-zero. (Think of it like a mutex that can't + * be recursively locked.) + * Signals will be unblocked again by process_pending_signals(). + * + * Return value: non-zero if there was a pending signal, zero if not. + */ +int block_signals(void); /* Returns non zero if signal pending */ + +/** + * process_sigsuspend_mask: read and apply syscall-local signal mask + * + * Read the guest signal mask from @sigset, length @sigsize. + * Convert that to a host signal mask and save it to sigpending_mask. + * + * Return value: negative target errno, or zero; + * store &sigpending_mask into *pset on success. + */ +int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset, + target_ulong sigsize); + +/** + * finish_sigsuspend_mask: finish a sigsuspend-like syscall + * + * Set in_sigsuspend if we need to use the modified sigset + * during process_pending_signals. + */ +static inline void finish_sigsuspend_mask(int ret) +{ + if (ret != -QEMU_ERESTARTSYS) { + TaskState *ts = (TaskState *)thread_cpu->opaque; + ts->in_sigsuspend = 1; + } +} + +#if defined(SIGSTKFLT) && defined(TARGET_SIGSTKFLT) +#define MAKE_SIG_ENTRY_SIGSTKFLT MAKE_SIG_ENTRY(SIGSTKFLT) +#else +#define MAKE_SIG_ENTRY_SIGSTKFLT +#endif + +#if defined(SIGIOT) && defined(TARGET_SIGIOT) +#define MAKE_SIG_ENTRY_SIGIOT MAKE_SIG_ENTRY(SIGIOT) +#else +#define MAKE_SIG_ENTRY_SIGIOT +#endif + +#define MAKE_SIGNAL_LIST \ + MAKE_SIG_ENTRY(SIGHUP) \ + MAKE_SIG_ENTRY(SIGINT) \ + MAKE_SIG_ENTRY(SIGQUIT) \ + MAKE_SIG_ENTRY(SIGILL) \ + MAKE_SIG_ENTRY(SIGTRAP) \ + MAKE_SIG_ENTRY(SIGABRT) \ + MAKE_SIG_ENTRY(SIGBUS) \ + MAKE_SIG_ENTRY(SIGFPE) \ + MAKE_SIG_ENTRY(SIGKILL) \ + MAKE_SIG_ENTRY(SIGUSR1) \ + MAKE_SIG_ENTRY(SIGSEGV) \ + MAKE_SIG_ENTRY(SIGUSR2) \ + MAKE_SIG_ENTRY(SIGPIPE) \ + MAKE_SIG_ENTRY(SIGALRM) \ + MAKE_SIG_ENTRY(SIGTERM) \ + MAKE_SIG_ENTRY(SIGCHLD) \ + MAKE_SIG_ENTRY(SIGCONT) \ + MAKE_SIG_ENTRY(SIGSTOP) \ + MAKE_SIG_ENTRY(SIGTSTP) \ + MAKE_SIG_ENTRY(SIGTTIN) \ + MAKE_SIG_ENTRY(SIGTTOU) \ + MAKE_SIG_ENTRY(SIGURG) \ + MAKE_SIG_ENTRY(SIGXCPU) \ + MAKE_SIG_ENTRY(SIGXFSZ) \ + MAKE_SIG_ENTRY(SIGVTALRM) \ + MAKE_SIG_ENTRY(SIGPROF) \ + MAKE_SIG_ENTRY(SIGWINCH) \ + MAKE_SIG_ENTRY(SIGIO) \ + MAKE_SIG_ENTRY(SIGPWR) \ + MAKE_SIG_ENTRY(SIGSYS) \ + MAKE_SIG_ENTRY_SIGSTKFLT \ + MAKE_SIG_ENTRY_SIGIOT + #endif diff --git a/linux-user/signal.c b/linux-user/signal.c index a8faea6f09..61c6fa3fcf 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -18,18 +18,29 @@ */ #include "qemu/osdep.h" #include "qemu/bitops.h" +#include "exec/gdbstub.h" +#include "hw/core/tcg-cpu-ops.h" + #include #include #include "qemu.h" +#include "user-internals.h" +#include "strace.h" +#include "loader.h" #include "trace.h" #include "signal-common.h" +#include "host-signal.h" +#include "user/safe-syscall.h" static struct target_sigaction sigact_table[TARGET_NSIG]; static void host_signal_handler(int host_signum, siginfo_t *info, void *puc); +/* Fallback addresses into sigtramp page. */ +abi_ulong default_sigreturn; +abi_ulong default_rt_sigreturn; /* * System includes define _NSIG as SIGRTMAX + 1, @@ -42,40 +53,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info, QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG); #endif static uint8_t host_to_target_signal_table[_NSIG] = { - [SIGHUP] = TARGET_SIGHUP, - [SIGINT] = TARGET_SIGINT, - [SIGQUIT] = TARGET_SIGQUIT, - [SIGILL] = TARGET_SIGILL, - [SIGTRAP] = TARGET_SIGTRAP, - [SIGABRT] = TARGET_SIGABRT, -/* [SIGIOT] = TARGET_SIGIOT,*/ - [SIGBUS] = TARGET_SIGBUS, - [SIGFPE] = TARGET_SIGFPE, - [SIGKILL] = TARGET_SIGKILL, - [SIGUSR1] = TARGET_SIGUSR1, - [SIGSEGV] = TARGET_SIGSEGV, - [SIGUSR2] = TARGET_SIGUSR2, - [SIGPIPE] = TARGET_SIGPIPE, - [SIGALRM] = TARGET_SIGALRM, - [SIGTERM] = TARGET_SIGTERM, -#ifdef SIGSTKFLT - [SIGSTKFLT] = TARGET_SIGSTKFLT, -#endif - [SIGCHLD] = TARGET_SIGCHLD, - [SIGCONT] = TARGET_SIGCONT, - [SIGSTOP] = TARGET_SIGSTOP, - [SIGTSTP] = TARGET_SIGTSTP, - [SIGTTIN] = TARGET_SIGTTIN, - [SIGTTOU] = TARGET_SIGTTOU, - [SIGURG] = TARGET_SIGURG, - [SIGXCPU] = TARGET_SIGXCPU, - [SIGXFSZ] = TARGET_SIGXFSZ, - [SIGVTALRM] = TARGET_SIGVTALRM, - [SIGPROF] = TARGET_SIGPROF, - [SIGWINCH] = TARGET_SIGWINCH, - [SIGIO] = TARGET_SIGIO, - [SIGPWR] = TARGET_SIGPWR, - [SIGSYS] = TARGET_SIGSYS, +#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig, + MAKE_SIGNAL_LIST +#undef MAKE_SIG_ENTRY /* next signals stay the same */ }; @@ -202,7 +182,7 @@ int block_signals(void) /* Wrapper for sigprocmask function * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset - * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if + * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if * a signal was already pending and the syscall must be restarted, or * 0 on success. * If set is NULL, this is guaranteed not to fail. @@ -219,7 +199,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) int i; if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } switch (how) { @@ -247,7 +227,6 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) return 0; } -#if !defined(TARGET_NIOS2) /* Just set the guest's signal mask to the specified value; the * caller is assumed to have called block_signals() already. */ @@ -257,7 +236,6 @@ void set_sigmask(const sigset_t *set) ts->signal_mask = *set; } -#endif /* sigaltstack management */ @@ -395,7 +373,12 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, case TARGET_SIGCHLD: tinfo->_sifields._sigchld._pid = info->si_pid; tinfo->_sifields._sigchld._uid = info->si_uid; - tinfo->_sifields._sigchld._status = info->si_status; + if (si_code == CLD_EXITED) + tinfo->_sifields._sigchld._status = info->si_status; + else + tinfo->_sifields._sigchld._status + = host_to_target_signal(info->si_status & 0x7f) + | (info->si_status & ~0x7f); tinfo->_sifields._sigchld._utime = info->si_utime; tinfo->_sifields._sigchld._stime = info->si_stime; si_type = QEMU_SI_CHLD; @@ -636,7 +619,7 @@ void force_sig(int sig) { CPUState *cpu = thread_cpu; CPUArchState *env = cpu->env_ptr; - target_siginfo_t info; + target_siginfo_t info = {}; info.si_signo = sig; info.si_errno = 0; @@ -646,6 +629,23 @@ void force_sig(int sig) queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); } +/* + * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the + * 'force' part is handled in process_pending_signals(). + */ +void force_sig_fault(int sig, int code, abi_ulong addr) +{ + CPUState *cpu = thread_cpu; + CPUArchState *env = cpu->env_ptr; + target_siginfo_t info = {}; + + info.si_signo = sig; + info.si_errno = 0; + info.si_code = code; + info._sifields._sigfault._addr = addr; + queue_signal(env, sig, QEMU_SI_FAULT, &info); +} + /* Force a SIGSEGV if we couldn't write to memory trying to set * up the signal frame. oldsig is the signal we were trying to handle * at the point of failure. @@ -661,11 +661,41 @@ void force_sigsegv(int oldsig) } force_sig(TARGET_SIGSEGV); } - #endif +void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, bool maperr, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigsegv) { + tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); + } + + force_sig_fault(TARGET_SIGSEGV, + maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, + addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); +} + +void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigbus) { + tcg_ops->record_sigbus(cpu, addr, access_type, ra); + } + + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); +} + /* abort execution with signal */ -static void QEMU_NORETURN dump_core_and_abort(int target_sig) +static G_NORETURN +void dump_core_and_abort(int target_sig) { CPUState *cpu = thread_cpu; CPUArchState *env = cpu->env_ptr; @@ -674,7 +704,7 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig) struct sigaction act; host_sig = target_to_host_signal(target_sig); - trace_user_force_sig(env, target_sig, host_sig); + trace_user_dump_core_and_abort(env, target_sig, host_sig); gdb_signalled(env, target_sig); /* dump core if supported by target binary format */ @@ -720,8 +750,8 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig) /* queue a signal so that it will be send to the virtual CPU as soon as possible */ -int queue_signal(CPUArchState *env, int sig, int si_type, - target_siginfo_t *info) +void queue_signal(CPUArchState *env, int sig, int si_type, + target_siginfo_t *info) { CPUState *cpu = env_cpu(env); TaskState *ts = cpu->opaque; @@ -734,67 +764,130 @@ int queue_signal(CPUArchState *env, int sig, int si_type, ts->sync_signal.pending = sig; /* signal that a new signal is pending */ qatomic_set(&ts->signal_pending, 1); - return 1; /* indicates that the signal was queued */ } -#ifndef HAVE_SAFE_SYSCALL + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ static inline void rewind_if_in_safe_syscall(void *puc) { - /* Default version: never rewind */ -} -#endif + host_sigcontext *uc = (host_sigcontext *)puc; + uintptr_t pcreg = host_signal_pc(uc); -static void host_signal_handler(int host_signum, siginfo_t *info, - void *puc) + if (pcreg > (uintptr_t)safe_syscall_start + && pcreg < (uintptr_t)safe_syscall_end) { + host_signal_set_pc(uc, (uintptr_t)safe_syscall_start); + } +} + +static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) { CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = env_cpu(env); TaskState *ts = cpu->opaque; - - int sig; target_siginfo_t tinfo; - ucontext_t *uc = puc; + host_sigcontext *uc = puc; struct emulated_sigtable *k; + int guest_sig; + uintptr_t pc = 0; + bool sync_sig = false; + void *sigmask = host_signal_mask(uc); - /* the CPU emulator uses some host signals to detect exceptions, - we forward to it some signals */ - if ((host_signum == SIGSEGV || host_signum == SIGBUS) - && info->si_code > 0) { - if (cpu_signal_handler(host_signum, info, puc)) - return; + /* + * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special + * handling wrt signal blocking and unwinding. + */ + if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { + MMUAccessType access_type; + uintptr_t host_addr; + abi_ptr guest_addr; + bool is_write; + + host_addr = (uintptr_t)info->si_addr; + + /* + * Convert forcefully to guest address space: addresses outside + * reserved_va are still valid to report via SEGV_MAPERR. + */ + guest_addr = h2g_nocheck(host_addr); + + pc = host_signal_pc(uc); + is_write = host_signal_write(info, uc); + access_type = adjust_signal_pc(&pc, is_write); + + if (host_sig == SIGSEGV) { + bool maperr = true; + + if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { + /* If this was a write to a TB protected page, restart. */ + if (is_write && + handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) { + return; + } + + /* + * With reserved_va, the whole address space is PROT_NONE, + * which means that we may get ACCERR when we want MAPERR. + */ + if (page_get_flags(guest_addr) & PAGE_VALID) { + maperr = false; + } else { + info->si_code = SEGV_MAPERR; + } + } + + sigprocmask(SIG_SETMASK, sigmask, NULL); + cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); + } else { + sigprocmask(SIG_SETMASK, sigmask, NULL); + if (info->si_code == BUS_ADRALN) { + cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); + } + } + + sync_sig = true; } /* get target signal number */ - sig = host_to_target_signal(host_signum); - if (sig < 1 || sig > TARGET_NSIG) + guest_sig = host_to_target_signal(host_sig); + if (guest_sig < 1 || guest_sig > TARGET_NSIG) { return; - trace_user_host_signal(env, host_signum, sig); + } + trace_user_host_signal(env, host_sig, guest_sig); + + host_to_target_siginfo_noswap(&tinfo, info); + k = &ts->sigtab[guest_sig - 1]; + k->info = tinfo; + k->pending = guest_sig; + ts->signal_pending = 1; + + /* + * For synchronous signals, unwind the cpu state to the faulting + * insn and then exit back to the main loop so that the signal + * is delivered immediately. + */ + if (sync_sig) { + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, pc); + } rewind_if_in_safe_syscall(puc); - host_to_target_siginfo_noswap(&tinfo, info); - k = &ts->sigtab[sig - 1]; - k->info = tinfo; - k->pending = sig; - ts->signal_pending = 1; - - /* Block host signals until target signal handler entered. We + /* + * Block host signals until target signal handler entered. We * can't block SIGSEGV or SIGBUS while we're executing guest * code in case the guest code provokes one in the window between * now and it getting out to the main loop. Signals will be * unblocked again in process_pending_signals(). * - * WARNING: we cannot use sigfillset() here because the uc_sigmask + * WARNING: we cannot use sigfillset() here because the sigmask * field is a kernel sigset_t, which is much smaller than the * libc sigset_t which sigfillset() operates on. Using sigfillset() * would write 0xff bytes off the end of the structure and trash * data on the struct. - * We can't use sizeof(uc->uc_sigmask) either, because the libc - * headers define the struct field with the wrong (too large) type. */ - memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); - sigdelset(&uc->uc_sigmask, SIGSEGV); - sigdelset(&uc->uc_sigmask, SIGBUS); + memset(sigmask, 0xff, SIGSET_T_SIZE); + sigdelset(sigmask, SIGSEGV); + sigdelset(sigmask, SIGBUS); /* interrupt the virtual CPU as soon as possible */ cpu_exit(thread_cpu); @@ -862,7 +955,7 @@ int do_sigaction(int sig, const struct target_sigaction *act, } if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } k = &sigact_table[sig - 1]; @@ -876,7 +969,6 @@ int do_sigaction(int sig, const struct target_sigaction *act, oact->sa_mask = k->sa_mask; } if (act) { - /* FIXME: This is not threadsafe. */ __get_user(k->_sa_handler, &act->_sa_handler); __get_user(k->sa_flags, &act->sa_flags); #ifdef TARGET_ARCH_HAS_SA_RESTORER @@ -1026,7 +1118,6 @@ void process_pending_signals(CPUArchState *cpu_env) sigset_t *blocked_set; while (qatomic_read(&ts->signal_pending)) { - /* FIXME: This is not threadsafe. */ sigfillset(&set); sigprocmask(SIG_SETMASK, &set, 0); @@ -1078,3 +1169,26 @@ void process_pending_signals(CPUArchState *cpu_env) } ts->in_sigsuspend = 0; } + +int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset, + target_ulong sigsize) +{ + TaskState *ts = (TaskState *)thread_cpu->opaque; + sigset_t *host_set = &ts->sigsuspend_mask; + target_sigset_t *target_sigset; + + if (sigsize != sizeof(*target_sigset)) { + /* Like the kernel, we enforce correct size sigsets */ + return -TARGET_EINVAL; + } + + target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1); + if (!target_sigset) { + return -TARGET_EFAULT; + } + target_to_host_sigset(host_set, target_sigset); + unlock_user(target_sigset, sigset, 0); + + *pset = host_set; + return 0; +} diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index 02532f198d..434c90a55f 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" #define SPARC64_STACK_BIAS 2047 @@ -153,7 +154,6 @@ void cpu_loop (CPUSPARCState *env) CPUState *cs = env_cpu(env); int trapnr; abi_long ret; - target_siginfo_t info; while (1) { cpu_exec_start(cs); @@ -179,7 +179,7 @@ void cpu_loop (CPUSPARCState *env) env->regwptr[2], env->regwptr[3], env->regwptr[4], env->regwptr[5], 0, 0); - if (ret == -TARGET_ERESTARTSYS || ret == -TARGET_QEMU_ESIGRETURN) { + if (ret == -QEMU_ERESTARTSYS || ret == -QEMU_ESIGRETURN) { break; } if ((abi_ulong)ret >= (abi_ulong)(-515)) { @@ -217,17 +217,6 @@ void cpu_loop (CPUSPARCState *env) case TT_WIN_UNF: /* window underflow */ restore_window(env); break; - case TT_TFAULT: - case TT_DFAULT: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->mmuregs[4]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; #else case TT_SPILL: /* window overflow */ save_window(env); @@ -235,20 +224,6 @@ void cpu_loop (CPUSPARCState *env) case TT_FILL: /* window underflow */ restore_window(env); break; - case TT_TFAULT: - case TT_DFAULT: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - if (trapnr == TT_DFAULT) - info._sifields._sigfault._addr = env->dmmu.mmuregs[4]; - else - info._sifields._sigfault._addr = cpu_tsptr(env)->tpc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; #ifndef TARGET_ABI32 case 0x16e: flush_windows(env); @@ -264,19 +239,10 @@ void cpu_loop (CPUSPARCState *env) /* just indicate that signals should be handled asap */ break; case TT_ILL_INSN: - { - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = TARGET_ILL_ILLOPC; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index 0cc3db5570..b501750fe0 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -241,6 +242,12 @@ static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env) } #ifdef TARGET_ARCH_HAS_SETUP_FRAME +static void install_sigtramp(uint32_t *tramp, int syscall) +{ + __put_user(0x82102000u + syscall, &tramp[0]); /* mov syscall, %g1 */ + __put_user(0x91d02010u, &tramp[1]); /* t 0x10 */ +} + void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUSPARCState *env) { @@ -290,13 +297,9 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { env->regwptr[WREG_O7] = ka->ka_restorer; } else { - env->regwptr[WREG_O7] = sf_addr + - offsetof(struct target_signal_frame, insns) - 2 * 4; - - /* mov __NR_sigreturn, %g1 */ - __put_user(0x821020d8u, &sf->insns[0]); - /* t 0x10 */ - __put_user(0x91d02010u, &sf->insns[1]); + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(sf->insns, TARGET_NR_sigreturn); + env->regwptr[WREG_O7] = default_sigreturn; } unlock_user(sf, sf_addr, sf_size); } @@ -357,13 +360,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { env->regwptr[WREG_O7] = ka->ka_restorer; } else { - env->regwptr[WREG_O7] = - sf_addr + offsetof(struct target_rt_signal_frame, insns) - 2 * 4; - - /* mov __NR_rt_sigreturn, %g1 */ - __put_user(0x82102065u, &sf->insns[0]); - /* t 0x10 */ - __put_user(0x91d02010u, &sf->insns[1]); + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(sf->insns, TARGET_NR_rt_sigreturn); + env->regwptr[WREG_O7] = default_rt_sigreturn; } #else env->regwptr[WREG_O7] = ka->ka_restorer; @@ -432,12 +431,12 @@ long do_sigreturn(CPUSPARCState *env) set_sigmask(&host_set); unlock_user_struct(sf, sf_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; segv_and_exit: unlock_user_struct(sf, sf_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; #else return -TARGET_ENOSYS; #endif @@ -496,12 +495,12 @@ long do_rt_sigreturn(CPUSPARCState *env) env->npc = tnpc; unlock_user_struct(sf, sf_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; segv_and_exit: unlock_user_struct(sf, sf_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; } #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) @@ -774,4 +773,18 @@ do_sigsegv: unlock_user_struct(ucp, ucp_addr, 1); force_sig(TARGET_SIGSEGV); } +#else +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + install_sigtramp(tramp, TARGET_NR_sigreturn); + + default_rt_sigreturn = sigtramp_page + 8; + install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} #endif diff --git a/linux-user/sparc/target_mman.h b/linux-user/sparc/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/sparc/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/sparc/target_prctl.h b/linux-user/sparc/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/sparc/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/sparc/target_resource.h b/linux-user/sparc/target_resource.h new file mode 100644 index 0000000000..d9a2fb814a --- /dev/null +++ b/linux-user/sparc/target_resource.h @@ -0,0 +1,17 @@ +#ifndef SPARC_TARGET_RESOURCE_H +#define SPARC_TARGET_RESOURCE_H + +#include "../generic/target_resource.h" + +#if TARGET_ABI_BITS == 32 +#undef TARGET_RLIM_INFINITY +#define TARGET_RLIM_INFINITY 0x7fffffffUL +#endif + +#undef TARGET_RLIMIT_NOFILE +#define TARGET_RLIMIT_NOFILE 6 + +#undef TARGET_RLIMIT_NPROC +#define TARGET_RLIMIT_NPROC 7 + +#endif diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h index 34f9a12519..87757f0c4e 100644 --- a/linux-user/sparc/target_signal.h +++ b/linux-user/sparc/target_signal.h @@ -65,10 +65,13 @@ typedef struct target_sigaltstack { #define TARGET_ARCH_HAS_KA_RESTORER 1 #define TARGET_MINSIGSTKSZ 4096 -#define TARGET_SIGSTKSZ 16384 #ifdef TARGET_ABI32 #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 +#else +/* For sparc64, use of KA_RESTORER is mandatory. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 #endif /* bit-flags */ diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h index 087b39d39c..be77e44eb8 100644 --- a/linux-user/sparc/target_syscall.h +++ b/linux-user/sparc/target_syscall.h @@ -34,7 +34,6 @@ struct target_pt_regs { * and copy_thread(). */ #define TARGET_CLONE_BACKWARDS -#define TARGET_MINSIGSTKSZ 4096 #define TARGET_MCL_CURRENT 0x2000 #define TARGET_MCL_FUTURE 0x4000 #define TARGET_MCL_ONFAULT 0x8000 diff --git a/linux-user/strace.c b/linux-user/strace.c index cce0a5d1e3..9ae5a812cd 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -1,4 +1,5 @@ #include "qemu/osdep.h" + #include #include #include @@ -14,15 +15,19 @@ #include #include #include "qemu.h" +#include "user-internals.h" +#include "strace.h" +#include "signal-common.h" +#include "target_mman.h" struct syscallname { int nr; const char *name; const char *format; - void (*call)(void *, const struct syscallname *, + void (*call)(CPUArchState *, const struct syscallname *, abi_long, abi_long, abi_long, abi_long, abi_long, abi_long); - void (*result)(void *, const struct syscallname *, abi_long, + void (*result)(CPUArchState *, const struct syscallname *, abi_long, abi_long, abi_long, abi_long, abi_long, abi_long, abi_long); }; @@ -78,6 +83,7 @@ UNUSED static void print_buf(abi_long addr, abi_long len, int last); UNUSED static void print_raw_param(const char *, abi_long, int); UNUSED static void print_timeval(abi_ulong, int); UNUSED static void print_timespec(abi_ulong, int); +UNUSED static void print_timespec64(abi_ulong, int); UNUSED static void print_timezone(abi_ulong, int); UNUSED static void print_itimerval(abi_ulong, int); UNUSED static void print_number(abi_long, int); @@ -138,30 +144,21 @@ if( cmd == val ) { \ qemu_log("%d", cmd); } +static const char * const target_signal_name[] = { +#define MAKE_SIG_ENTRY(sig) [TARGET_##sig] = #sig, + MAKE_SIGNAL_LIST +#undef MAKE_SIG_ENTRY +}; + static void print_signal(abi_ulong arg, int last) { const char *signal_name = NULL; - switch(arg) { - case TARGET_SIGHUP: signal_name = "SIGHUP"; break; - case TARGET_SIGINT: signal_name = "SIGINT"; break; - case TARGET_SIGQUIT: signal_name = "SIGQUIT"; break; - case TARGET_SIGILL: signal_name = "SIGILL"; break; - case TARGET_SIGABRT: signal_name = "SIGABRT"; break; - case TARGET_SIGFPE: signal_name = "SIGFPE"; break; - case TARGET_SIGKILL: signal_name = "SIGKILL"; break; - case TARGET_SIGSEGV: signal_name = "SIGSEGV"; break; - case TARGET_SIGPIPE: signal_name = "SIGPIPE"; break; - case TARGET_SIGALRM: signal_name = "SIGALRM"; break; - case TARGET_SIGTERM: signal_name = "SIGTERM"; break; - case TARGET_SIGUSR1: signal_name = "SIGUSR1"; break; - case TARGET_SIGUSR2: signal_name = "SIGUSR2"; break; - case TARGET_SIGCHLD: signal_name = "SIGCHLD"; break; - case TARGET_SIGCONT: signal_name = "SIGCONT"; break; - case TARGET_SIGSTOP: signal_name = "SIGSTOP"; break; - case TARGET_SIGTTIN: signal_name = "SIGTTIN"; break; - case TARGET_SIGTTOU: signal_name = "SIGTTOU"; break; + + if (arg < ARRAY_SIZE(target_signal_name)) { + signal_name = target_signal_name[arg]; } + if (signal_name == NULL) { print_raw_param("%ld", arg, last); return; @@ -590,7 +587,7 @@ print_fdset(int n, abi_ulong target_fds_addr) /* select */ #ifdef TARGET_NR__newselect static void -print_newselect(void *cpu_env, const struct syscallname *name, +print_newselect(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { @@ -608,7 +605,7 @@ print_newselect(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_semctl static void -print_semctl(void *cpu_env, const struct syscallname *name, +print_semctl(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { @@ -620,7 +617,7 @@ print_semctl(void *cpu_env, const struct syscallname *name, #endif static void -print_execve(void *cpu_env, const struct syscallname *name, +print_execve(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { @@ -653,7 +650,7 @@ print_execve(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_ipc static void -print_ipc(void *cpu_env, const struct syscallname *name, +print_ipc(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { @@ -686,7 +683,7 @@ print_syscall_err(abi_long ret) const char *errstr; qemu_log(" = "); - if (ret < 0) { + if (is_error(ret)) { errstr = target_strerror(-ret); if (errstr) { qemu_log("-1 errno=%d (%s)", (int)-ret, errstr); @@ -697,7 +694,7 @@ print_syscall_err(abi_long ret) } static void -print_syscall_ret_addr(void *cpu_env, const struct syscallname *name, +print_syscall_ret_addr(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -718,7 +715,7 @@ print_syscall_ret_raw(struct syscallname *name, abi_long ret) #ifdef TARGET_NR__newselect static void -print_syscall_ret_newselect(void *cpu_env, const struct syscallname *name, +print_syscall_ret_newselect(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -748,7 +745,7 @@ print_syscall_ret_newselect(void *cpu_env, const struct syscallname *name, #define TARGET_TIME_ERROR 5 /* clock not synchronized */ #ifdef TARGET_NR_adjtimex static void -print_syscall_ret_adjtimex(void *cpu_env, const struct syscallname *name, +print_syscall_ret_adjtimex(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -783,7 +780,7 @@ print_syscall_ret_adjtimex(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_clock_gettime) || defined(TARGET_NR_clock_getres) static void -print_syscall_ret_clock_gettime(void *cpu_env, const struct syscallname *name, +print_syscall_ret_clock_gettime(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -800,9 +797,27 @@ print_syscall_ret_clock_gettime(void *cpu_env, const struct syscallname *name, #define print_syscall_ret_clock_getres print_syscall_ret_clock_gettime #endif +#if defined(TARGET_NR_clock_gettime64) +static void +print_syscall_ret_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + if (!print_syscall_err(ret)) { + qemu_log(TARGET_ABI_FMT_ld, ret); + qemu_log(" ("); + print_timespec64(arg1, 1); + qemu_log(")"); + } + + qemu_log("\n"); +} +#endif + #ifdef TARGET_NR_gettimeofday static void -print_syscall_ret_gettimeofday(void *cpu_env, const struct syscallname *name, +print_syscall_ret_gettimeofday(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -821,7 +836,7 @@ print_syscall_ret_gettimeofday(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_getitimer static void -print_syscall_ret_getitimer(void *cpu_env, const struct syscallname *name, +print_syscall_ret_getitimer(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -840,7 +855,7 @@ print_syscall_ret_getitimer(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_getitimer static void -print_syscall_ret_setitimer(void *cpu_env, const struct syscallname *name, +print_syscall_ret_setitimer(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -859,7 +874,7 @@ print_syscall_ret_setitimer(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_listxattr) || defined(TARGET_NR_llistxattr) \ || defined(TARGGET_NR_flistxattr) static void -print_syscall_ret_listxattr(void *cpu_env, const struct syscallname *name, +print_syscall_ret_listxattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -891,7 +906,7 @@ print_syscall_ret_listxattr(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_ioctl static void -print_syscall_ret_ioctl(void *cpu_env, const struct syscallname *name, +print_syscall_ret_ioctl(CPUArchState *cpu_env, const struct syscallname *name, abi_long ret, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -1491,6 +1506,11 @@ print_file_mode(abi_long mode, int last) const char *sep = ""; const struct flags *m; + if (mode == 0) { + qemu_log("000%s", get_comma(last)); + return; + } + for (m = &mode_flags[0]; m->f_string != NULL; m++) { if ((m->f_value & mode) == m->f_value) { qemu_log("%s%s", m->f_string, sep); @@ -1657,6 +1677,27 @@ print_timespec(abi_ulong ts_addr, int last) } } +static void +print_timespec64(abi_ulong ts_addr, int last) +{ + if (ts_addr) { + struct target__kernel_timespec *ts; + + ts = lock_user(VERIFY_READ, ts_addr, sizeof(*ts), 1); + if (!ts) { + print_pointer(ts_addr, last); + return; + } + qemu_log("{tv_sec = %lld" + ",tv_nsec = %lld}%s", + (long long)tswap64(ts->tv_sec), (long long)tswap64(ts->tv_nsec), + get_comma(last)); + unlock_user(ts, ts_addr, 0); + } else { + qemu_log("NULL%s", get_comma(last)); + } +} + static void print_timezone(abi_ulong tz_addr, int last) { @@ -1757,7 +1798,7 @@ print_termios(void *arg) #ifdef TARGET_NR_accept static void -print_accept(void *cpu_env, const struct syscallname *name, +print_accept(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1771,7 +1812,7 @@ print_accept(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_access static void -print_access(void *cpu_env, const struct syscallname *name, +print_access(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1784,7 +1825,7 @@ print_access(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_acct static void -print_acct(void *cpu_env, const struct syscallname *name, +print_acct(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1796,7 +1837,7 @@ print_acct(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_brk static void -print_brk(void *cpu_env, const struct syscallname *name, +print_brk(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1808,7 +1849,7 @@ print_brk(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_chdir static void -print_chdir(void *cpu_env, const struct syscallname *name, +print_chdir(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1820,7 +1861,7 @@ print_chdir(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_chroot static void -print_chroot(void *cpu_env, const struct syscallname *name, +print_chroot(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1832,7 +1873,7 @@ print_chroot(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_chmod static void -print_chmod(void *cpu_env, const struct syscallname *name, +print_chmod(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1845,7 +1886,7 @@ print_chmod(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_chown) || defined(TARGET_NR_lchown) static void -print_chown(void *cpu_env, const struct syscallname *name, +print_chown(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1860,7 +1901,7 @@ print_chown(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_clock_adjtime static void -print_clock_adjtime(void *cpu_env, const struct syscallname *name, +print_clock_adjtime(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1884,7 +1925,7 @@ static void do_print_clone(unsigned int flags, abi_ulong newsp, } static void -print_clone(void *cpu_env, const struct syscallname *name, +print_clone(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { @@ -1904,7 +1945,7 @@ print_clone(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_creat static void -print_creat(void *cpu_env, const struct syscallname *name, +print_creat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1917,7 +1958,7 @@ print_creat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_execv static void -print_execv(void *cpu_env, const struct syscallname *name, +print_execv(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1928,9 +1969,9 @@ print_execv(void *cpu_env, const struct syscallname *name, } #endif -#ifdef TARGET_NR_faccessat +#if defined(TARGET_NR_faccessat) || defined(TARGET_NR_faccessat2) static void -print_faccessat(void *cpu_env, const struct syscallname *name, +print_faccessat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1945,7 +1986,7 @@ print_faccessat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_fallocate static void -print_fallocate(void *cpu_env, const struct syscallname *name, +print_fallocate(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1965,7 +2006,7 @@ print_fallocate(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_fchmodat static void -print_fchmodat(void *cpu_env, const struct syscallname *name, +print_fchmodat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1980,7 +2021,7 @@ print_fchmodat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_fchownat static void -print_fchownat(void *cpu_env, const struct syscallname *name, +print_fchownat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -1996,7 +2037,7 @@ print_fchownat(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_fcntl) || defined(TARGET_NR_fcntl64) static void -print_fcntl(void *cpu_env, const struct syscallname *name, +print_fcntl(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2132,7 +2173,7 @@ print_fcntl(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_fgetxattr static void -print_fgetxattr(void *cpu_env, const struct syscallname *name, +print_fgetxattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2147,7 +2188,7 @@ print_fgetxattr(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_flistxattr static void -print_flistxattr(void *cpu_env, const struct syscallname *name, +print_flistxattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2161,7 +2202,7 @@ print_flistxattr(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_getxattr) || defined(TARGET_NR_lgetxattr) static void -print_getxattr(void *cpu_env, const struct syscallname *name, +print_getxattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2177,7 +2218,7 @@ print_getxattr(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_listxattr) || defined(TARGET_NR_llistxattr) static void -print_listxattr(void *cpu_env, const struct syscallname *name, +print_listxattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2192,7 +2233,7 @@ print_listxattr(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_fremovexattr) static void -print_fremovexattr(void *cpu_env, const struct syscallname *name, +print_fremovexattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2205,7 +2246,7 @@ print_fremovexattr(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_removexattr) || defined(TARGET_NR_lremovexattr) static void -print_removexattr(void *cpu_env, const struct syscallname *name, +print_removexattr(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2219,7 +2260,7 @@ print_removexattr(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_futimesat static void -print_futimesat(void *cpu_env, const struct syscallname *name, +print_futimesat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2234,7 +2275,7 @@ print_futimesat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_gettimeofday static void -print_gettimeofday(void *cpu_env, const struct syscallname *name, +print_gettimeofday(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2247,7 +2288,7 @@ print_gettimeofday(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_settimeofday static void -print_settimeofday(void *cpu_env, const struct syscallname *name, +print_settimeofday(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2260,7 +2301,7 @@ print_settimeofday(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_clock_gettime) || defined(TARGET_NR_clock_getres) static void -print_clock_gettime(void *cpu_env, const struct syscallname *name, +print_clock_gettime(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2272,9 +2313,22 @@ print_clock_gettime(void *cpu_env, const struct syscallname *name, #define print_clock_getres print_clock_gettime #endif +#if defined(TARGET_NR_clock_gettime64) +static void +print_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_enums(clockids, arg0, 0); + print_pointer(arg1, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_clock_settime static void -print_clock_settime(void *cpu_env, const struct syscallname *name, +print_clock_settime(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2287,7 +2341,7 @@ print_clock_settime(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_getitimer static void -print_getitimer(void *cpu_env, const struct syscallname *name, +print_getitimer(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2300,7 +2354,7 @@ print_getitimer(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_setitimer static void -print_setitimer(void *cpu_env, const struct syscallname *name, +print_setitimer(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2314,7 +2368,7 @@ print_setitimer(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_link static void -print_link(void *cpu_env, const struct syscallname *name, +print_link(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2327,7 +2381,7 @@ print_link(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_linkat static void -print_linkat(void *cpu_env, const struct syscallname *name, +print_linkat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2343,7 +2397,7 @@ print_linkat(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR__llseek) || defined(TARGET_NR_llseek) static void -print__llseek(void *cpu_env, const struct syscallname *name, +print__llseek(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2366,7 +2420,7 @@ print__llseek(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_lseek static void -print_lseek(void *cpu_env, const struct syscallname *name, +print_lseek(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2397,7 +2451,7 @@ print_lseek(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_truncate static void -print_truncate(void *cpu_env, const struct syscallname *name, +print_truncate(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2410,7 +2464,7 @@ print_truncate(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_truncate64 static void -print_truncate64(void *cpu_env, const struct syscallname *name, +print_truncate64(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2427,7 +2481,7 @@ print_truncate64(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_ftruncate64 static void -print_ftruncate64(void *cpu_env, const struct syscallname *name, +print_ftruncate64(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2444,7 +2498,7 @@ print_ftruncate64(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mlockall static void -print_mlockall(void *cpu_env, const struct syscallname *name, +print_mlockall(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2456,7 +2510,7 @@ print_mlockall(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_socket) static void -print_socket(void *cpu_env, const struct syscallname *name, +print_socket(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2917,7 +2971,7 @@ static struct { }; static void -print_socketcall(void *cpu_env, const struct syscallname *name, +print_socketcall(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2938,7 +2992,7 @@ print_socketcall(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_bind) static void -print_bind(void *cpu_env, const struct syscallname *name, +print_bind(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2952,7 +3006,7 @@ print_bind(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \ defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) static void -print_stat(void *cpu_env, const struct syscallname *name, +print_stat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2966,9 +3020,49 @@ print_stat(void *cpu_env, const struct syscallname *name, #define print_lstat64 print_stat #endif +#if defined(TARGET_NR_madvise) +static struct enums madvise_advice[] = { + ENUM_TARGET(MADV_NORMAL), + ENUM_TARGET(MADV_RANDOM), + ENUM_TARGET(MADV_SEQUENTIAL), + ENUM_TARGET(MADV_WILLNEED), + ENUM_TARGET(MADV_DONTNEED), + ENUM_TARGET(MADV_FREE), + ENUM_TARGET(MADV_REMOVE), + ENUM_TARGET(MADV_DONTFORK), + ENUM_TARGET(MADV_DOFORK), + ENUM_TARGET(MADV_MERGEABLE), + ENUM_TARGET(MADV_UNMERGEABLE), + ENUM_TARGET(MADV_HUGEPAGE), + ENUM_TARGET(MADV_NOHUGEPAGE), + ENUM_TARGET(MADV_DONTDUMP), + ENUM_TARGET(MADV_DODUMP), + ENUM_TARGET(MADV_WIPEONFORK), + ENUM_TARGET(MADV_KEEPONFORK), + ENUM_TARGET(MADV_COLD), + ENUM_TARGET(MADV_PAGEOUT), + ENUM_TARGET(MADV_POPULATE_READ), + ENUM_TARGET(MADV_POPULATE_WRITE), + ENUM_TARGET(MADV_DONTNEED_LOCKED), + ENUM_END, +}; + +static void +print_madvise(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_pointer(arg0, 0); + print_raw_param("%d", arg1, 0); + print_enums(madvise_advice, arg2, 1); + print_syscall_epilogue(name); +} +#endif + #if defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) static void -print_fstat(void *cpu_env, const struct syscallname *name, +print_fstat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2982,7 +3076,7 @@ print_fstat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mkdir static void -print_mkdir(void *cpu_env, const struct syscallname *name, +print_mkdir(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -2995,7 +3089,7 @@ print_mkdir(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mkdirat static void -print_mkdirat(void *cpu_env, const struct syscallname *name, +print_mkdirat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3009,7 +3103,7 @@ print_mkdirat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rmdir static void -print_rmdir(void *cpu_env, const struct syscallname *name, +print_rmdir(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3021,7 +3115,7 @@ print_rmdir(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rt_sigaction static void -print_rt_sigaction(void *cpu_env, const struct syscallname *name, +print_rt_sigaction(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3035,7 +3129,7 @@ print_rt_sigaction(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rt_sigprocmask static void -print_rt_sigprocmask(void *cpu_env, const struct syscallname *name, +print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3055,7 +3149,7 @@ print_rt_sigprocmask(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rt_sigqueueinfo static void -print_rt_sigqueueinfo(void *cpu_env, const struct syscallname *name, +print_rt_sigqueueinfo(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3080,7 +3174,7 @@ print_rt_sigqueueinfo(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rt_tgsigqueueinfo static void -print_rt_tgsigqueueinfo(void *cpu_env, const struct syscallname *name, +print_rt_tgsigqueueinfo(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3164,7 +3258,7 @@ print_syslog_action(abi_ulong arg, int last) } static void -print_syslog(void *cpu_env, const struct syscallname *name, +print_syslog(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3178,7 +3272,7 @@ print_syslog(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mknod static void -print_mknod(void *cpu_env, const struct syscallname *name, +print_mknod(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3197,7 +3291,7 @@ print_mknod(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mknodat static void -print_mknodat(void *cpu_env, const struct syscallname *name, +print_mknodat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3217,7 +3311,7 @@ print_mknodat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mq_open static void -print_mq_open(void *cpu_env, const struct syscallname *name, +print_mq_open(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3236,7 +3330,7 @@ print_mq_open(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_open static void -print_open(void *cpu_env, const struct syscallname *name, +print_open(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3253,7 +3347,7 @@ print_open(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_openat static void -print_openat(void *cpu_env, const struct syscallname *name, +print_openat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3269,9 +3363,37 @@ print_openat(void *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_pidfd_send_signal +static void +print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + void *p; + target_siginfo_t uinfo; + + print_syscall_prologue(name); + print_raw_param("%d", arg0, 0); + print_signal(arg1, 0); + + p = lock_user(VERIFY_READ, arg2, sizeof(target_siginfo_t), 1); + if (p) { + get_target_siginfo(&uinfo, p); + print_siginfo(&uinfo); + + unlock_user(p, arg2, 0); + } else { + print_pointer(arg2, 0); + } + + print_raw_param("%u", arg3, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_mq_unlink static void -print_mq_unlink(void *cpu_env, const struct syscallname *name, +print_mq_unlink(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3283,7 +3405,7 @@ print_mq_unlink(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat) static void -print_fstatat64(void *cpu_env, const struct syscallname *name, +print_fstatat64(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3299,7 +3421,7 @@ print_fstatat64(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_readlink static void -print_readlink(void *cpu_env, const struct syscallname *name, +print_readlink(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3313,7 +3435,7 @@ print_readlink(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_readlinkat static void -print_readlinkat(void *cpu_env, const struct syscallname *name, +print_readlinkat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3328,7 +3450,7 @@ print_readlinkat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_rename static void -print_rename(void *cpu_env, const struct syscallname *name, +print_rename(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3341,7 +3463,7 @@ print_rename(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_renameat static void -print_renameat(void *cpu_env, const struct syscallname *name, +print_renameat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3356,7 +3478,7 @@ print_renameat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_statfs static void -print_statfs(void *cpu_env, const struct syscallname *name, +print_statfs(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3369,7 +3491,7 @@ print_statfs(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_statfs64 static void -print_statfs64(void *cpu_env, const struct syscallname *name, +print_statfs64(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3382,7 +3504,7 @@ print_statfs64(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_symlink static void -print_symlink(void *cpu_env, const struct syscallname *name, +print_symlink(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3395,7 +3517,7 @@ print_symlink(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_symlinkat static void -print_symlinkat(void *cpu_env, const struct syscallname *name, +print_symlinkat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3409,7 +3531,7 @@ print_symlinkat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mount static void -print_mount(void *cpu_env, const struct syscallname *name, +print_mount(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3425,7 +3547,7 @@ print_mount(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_umount static void -print_umount(void *cpu_env, const struct syscallname *name, +print_umount(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3437,7 +3559,7 @@ print_umount(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_umount2 static void -print_umount2(void *cpu_env, const struct syscallname *name, +print_umount2(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3450,7 +3572,7 @@ print_umount2(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_unlink static void -print_unlink(void *cpu_env, const struct syscallname *name, +print_unlink(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3462,7 +3584,7 @@ print_unlink(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_unlinkat static void -print_unlinkat(void *cpu_env, const struct syscallname *name, +print_unlinkat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3476,7 +3598,7 @@ print_unlinkat(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_unshare static void -print_unshare(void *cpu_env, const struct syscallname *name, +print_unshare(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3486,9 +3608,24 @@ print_unshare(void *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_clock_nanosleep +static void +print_clock_nanosleep(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_enums(clockids, arg0, 0); + print_raw_param("%d", arg1, 0); + print_timespec(arg2, 0); + print_timespec(arg3, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_utime static void -print_utime(void *cpu_env, const struct syscallname *name, +print_utime(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3501,7 +3638,7 @@ print_utime(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_utimes static void -print_utimes(void *cpu_env, const struct syscallname *name, +print_utimes(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3514,7 +3651,7 @@ print_utimes(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_utimensat static void -print_utimensat(void *cpu_env, const struct syscallname *name, +print_utimensat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3529,7 +3666,7 @@ print_utimensat(void *cpu_env, const struct syscallname *name, #if defined(TARGET_NR_mmap) || defined(TARGET_NR_mmap2) static void -print_mmap(void *cpu_env, const struct syscallname *name, +print_mmap(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3547,7 +3684,7 @@ print_mmap(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_mprotect static void -print_mprotect(void *cpu_env, const struct syscallname *name, +print_mprotect(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3561,7 +3698,7 @@ print_mprotect(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_munmap static void -print_munmap(void *cpu_env, const struct syscallname *name, +print_munmap(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3573,56 +3710,61 @@ print_munmap(void *cpu_env, const struct syscallname *name, #endif #ifdef TARGET_NR_futex -static void print_futex_op(abi_long tflag, int last) +static void print_futex_op(int cmd, int last) { -#define print_op(val) \ -if( cmd == val ) { \ - qemu_log(#val); \ - return; \ -} + static const char * const futex_names[] = { +#define NAME(X) [X] = #X + NAME(FUTEX_WAIT), + NAME(FUTEX_WAKE), + NAME(FUTEX_FD), + NAME(FUTEX_REQUEUE), + NAME(FUTEX_CMP_REQUEUE), + NAME(FUTEX_WAKE_OP), + NAME(FUTEX_LOCK_PI), + NAME(FUTEX_UNLOCK_PI), + NAME(FUTEX_TRYLOCK_PI), + NAME(FUTEX_WAIT_BITSET), + NAME(FUTEX_WAKE_BITSET), + NAME(FUTEX_WAIT_REQUEUE_PI), + NAME(FUTEX_CMP_REQUEUE_PI), + NAME(FUTEX_LOCK_PI2), +#undef NAME + }; - int cmd = (int)tflag; -#ifdef FUTEX_PRIVATE_FLAG - if (cmd & FUTEX_PRIVATE_FLAG) { - qemu_log("FUTEX_PRIVATE_FLAG|"); - cmd &= ~FUTEX_PRIVATE_FLAG; + unsigned base_cmd = cmd & FUTEX_CMD_MASK; + + if (base_cmd < ARRAY_SIZE(futex_names)) { + qemu_log("%s%s%s", + (cmd & FUTEX_PRIVATE_FLAG ? "FUTEX_PRIVATE_FLAG|" : ""), + (cmd & FUTEX_CLOCK_REALTIME ? "FUTEX_CLOCK_REALTIME|" : ""), + futex_names[base_cmd]); + } else { + qemu_log("0x%x", cmd); } -#endif -#ifdef FUTEX_CLOCK_REALTIME - if (cmd & FUTEX_CLOCK_REALTIME) { - qemu_log("FUTEX_CLOCK_REALTIME|"); - cmd &= ~FUTEX_CLOCK_REALTIME; - } -#endif - print_op(FUTEX_WAIT) - print_op(FUTEX_WAKE) - print_op(FUTEX_FD) - print_op(FUTEX_REQUEUE) - print_op(FUTEX_CMP_REQUEUE) - print_op(FUTEX_WAKE_OP) - print_op(FUTEX_LOCK_PI) - print_op(FUTEX_UNLOCK_PI) - print_op(FUTEX_TRYLOCK_PI) -#ifdef FUTEX_WAIT_BITSET - print_op(FUTEX_WAIT_BITSET) -#endif -#ifdef FUTEX_WAKE_BITSET - print_op(FUTEX_WAKE_BITSET) -#endif - /* unknown values */ - qemu_log("%d", cmd); } static void -print_futex(void *cpu_env, const struct syscallname *name, +print_futex(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { + abi_long op = arg1 & FUTEX_CMD_MASK; print_syscall_prologue(name); print_pointer(arg0, 0); print_futex_op(arg1, 0); print_raw_param(",%d", arg2, 0); - print_pointer(arg3, 0); /* struct timespec */ + switch (op) { + case FUTEX_WAIT: + case FUTEX_WAIT_BITSET: + case FUTEX_LOCK_PI: + case FUTEX_LOCK_PI2: + case FUTEX_WAIT_REQUEUE_PI: + print_timespec(arg3, 0); + break; + default: + print_pointer(arg3, 0); + break; + } print_pointer(arg4, 0); print_raw_param("%d", arg4, 1); print_syscall_epilogue(name); @@ -3631,7 +3773,7 @@ print_futex(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_kill static void -print_kill(void *cpu_env, const struct syscallname *name, +print_kill(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3644,7 +3786,7 @@ print_kill(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_tkill static void -print_tkill(void *cpu_env, const struct syscallname *name, +print_tkill(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3657,7 +3799,7 @@ print_tkill(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_tgkill static void -print_tgkill(void *cpu_env, const struct syscallname *name, +print_tgkill(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3671,7 +3813,7 @@ print_tgkill(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_statx static void -print_statx(void *cpu_env, const struct syscallname *name, +print_statx(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3687,7 +3829,7 @@ print_statx(void *cpu_env, const struct syscallname *name, #ifdef TARGET_NR_ioctl static void -print_ioctl(void *cpu_env, const struct syscallname *name, +print_ioctl(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { @@ -3772,55 +3914,74 @@ static int nsyscalls = ARRAY_SIZE(scnames); * The public interface to this module. */ void -print_syscall(void *cpu_env, int num, +print_syscall(CPUArchState *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { int i; - const char *format="%s(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ")"; + FILE *f; + const char *format = "%s(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," + TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," + TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ")"; - qemu_log("%d ", getpid()); + f = qemu_log_trylock(); + if (!f) { + return; + } + fprintf(f, "%d ", getpid()); - for(i=0;i. + */ + +#ifndef LINUX_USER_STRACE_H +#define LINUX_USER_STRACE_H + +void print_syscall(CPUArchState *cpu_env, int num, + abi_long arg1, abi_long arg2, abi_long arg3, + abi_long arg4, abi_long arg5, abi_long arg6); +void print_syscall_ret(CPUArchState *cpu_env, int num, abi_long ret, + abi_long arg1, abi_long arg2, abi_long arg3, + abi_long arg4, abi_long arg5, abi_long arg6); +/** + * print_taken_signal: + * @target_signum: target signal being taken + * @tinfo: target_siginfo_t which will be passed to the guest for the signal + * + * Print strace output indicating that this signal is being taken by the guest, + * in a format similar to: + * --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} --- + */ +void print_taken_signal(int target_signum, const target_siginfo_t *tinfo); + +#endif /* LINUX_USER_STRACE_H */ diff --git a/linux-user/strace.list b/linux-user/strace.list index 278596acd1..3a898e2532 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -91,7 +91,8 @@ print_syscall_ret_clock_gettime }, #endif #ifdef TARGET_NR_clock_nanosleep -{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, NULL, NULL }, +{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, print_clock_nanosleep, + NULL }, #endif #ifdef TARGET_NR_clock_settime { TARGET_NR_clock_settime, "clock_settime" , NULL, print_clock_settime, NULL }, @@ -102,6 +103,9 @@ #ifdef TARGET_NR_close { TARGET_NR_close, "close" , "%s(%d)", NULL, NULL }, #endif +#ifdef TARGET_NR_close_range +{ TARGET_NR_close_range, "close_range" , "%s(%u,%u,%u)", NULL, NULL }, +#endif #ifdef TARGET_NR_connect { TARGET_NR_connect, "connect" , "%s(%d,%#x,%d)", NULL, NULL }, #endif @@ -177,6 +181,9 @@ #ifdef TARGET_NR_faccessat { TARGET_NR_faccessat, "faccessat" , NULL, print_faccessat, NULL }, #endif +#ifdef TARGET_NR_faccessat2 +{ TARGET_NR_faccessat2, "faccessat2" , NULL, print_faccessat, NULL }, +#endif #ifdef TARGET_NR_fadvise64 { TARGET_NR_fadvise64, "fadvise64" , NULL, NULL, NULL }, #endif @@ -278,10 +285,10 @@ { TARGET_NR_getcwd, "getcwd" , "%s(%p,%d)", NULL, NULL }, #endif #ifdef TARGET_NR_getdents -{ TARGET_NR_getdents, "getdents" , NULL, NULL, NULL }, +{ TARGET_NR_getdents, "getdents" , "%s(%d,%p,%u)", NULL, NULL }, #endif #ifdef TARGET_NR_getdents64 -{ TARGET_NR_getdents64, "getdents64" , NULL, NULL, NULL }, +{ TARGET_NR_getdents64, "getdents64" , "%s(%d,%p,%u)", NULL, NULL }, #endif #ifdef TARGET_NR_getdomainname { TARGET_NR_getdomainname, "getdomainname" , NULL, NULL, NULL }, @@ -351,7 +358,7 @@ { TARGET_NR_getpriority, "getpriority", "%s(%#x,%#x)", NULL, NULL }, #endif #ifdef TARGET_NR_getrandom -{ TARGET_NR_getrandom, "getrandom", NULL, NULL, NULL }, +{ TARGET_NR_getrandom, "getrandom", "%s(%p,%u,%u)", NULL, NULL }, #endif #ifdef TARGET_NR_getresgid { TARGET_NR_getresgid, "getresgid" , NULL, NULL, NULL }, @@ -384,8 +391,13 @@ { TARGET_NR_getsockopt, "getsockopt" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_get_thread_area +#if defined(TARGET_I386) && defined(TARGET_ABI32) { TARGET_NR_get_thread_area, "get_thread_area", "%s(0x"TARGET_ABI_FMT_lx")", NULL, NULL }, +#elif defined(TARGET_M68K) +{ TARGET_NR_get_thread_area, "get_thread_area" , "%s()", + NULL, print_syscall_ret_addr }, +#endif #endif #ifdef TARGET_NR_gettid { TARGET_NR_gettid, "gettid" , "%s()", NULL, NULL }, @@ -536,7 +548,7 @@ { TARGET_NR_lstat64, "lstat64" , NULL, print_lstat64, NULL }, #endif #ifdef TARGET_NR_madvise -{ TARGET_NR_madvise, "madvise" , NULL, NULL, NULL }, +{ TARGET_NR_madvise, "madvise" , NULL, print_madvise, NULL }, #endif #ifdef TARGET_NR_madvise1 { TARGET_NR_madvise1, "madvise1" , NULL, NULL, NULL }, @@ -1522,7 +1534,10 @@ { TARGET_NR_timer_gettime, "timer_gettime" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_timer_settime -{ TARGET_NR_timer_settime, "timer_settime" , NULL, NULL, NULL }, +{ TARGET_NR_timer_settime, "timer_settime" , "%s(%d,%d,%p,%p)", NULL, NULL }, +#endif +#ifdef TARGET_NR_timer_settime64 +{ TARGET_NR_timer_settime64, "timer_settime64" , "%s(%d,%d,%p,%p)", NULL, NULL }, #endif #ifdef TARGET_NR_timerfd { TARGET_NR_timerfd, "timerfd" , NULL, NULL, NULL }, @@ -1659,6 +1674,15 @@ #ifdef TARGET_NR_pipe2 { TARGET_NR_pipe2, "pipe2", NULL, NULL, NULL }, #endif +#ifdef TARGET_NR_pidfd_open +{ TARGET_NR_pidfd_open, "pidfd_open", "%s(%d,%u)", NULL, NULL }, +#endif +#ifdef TARGET_NR_pidfd_send_signal +{ TARGET_NR_pidfd_send_signal, "pidfd_send_signal", NULL, print_pidfd_send_signal, NULL }, +#endif +#ifdef TARGET_NR_pidfd_getfd +{ TARGET_NR_pidfd_getfd, "pidfd_getfd", "%s(%d,%d,%u)", NULL, NULL }, +#endif #ifdef TARGET_NR_atomic_cmpxchg_32 { TARGET_NR_atomic_cmpxchg_32, "atomic_cmpxchg_32", NULL, NULL, NULL }, #endif @@ -1671,3 +1695,7 @@ #ifdef TARGET_NR_copy_file_range { TARGET_NR_copy_file_range, "copy_file_range", "%s(%d,%p,%d,%p,"TARGET_ABI_FMT_lu",%u)", NULL, NULL }, #endif +#ifdef TARGET_NR_clock_gettime64 +{ TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64, + print_syscall_ret_clock_gettime64 }, +#endif diff --git a/linux-user/syscall.c b/linux-user/syscall.c index ccd3892b2d..cedf22c5b5 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -95,7 +95,50 @@ #include #include #include + +#ifdef HAVE_SYS_MOUNT_FSCONFIG +/* + * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h, + * which in turn prevents use of linux/fs.h. So we have to + * define the constants ourselves for now. + */ +#define FS_IOC_GETFLAGS _IOR('f', 1, long) +#define FS_IOC_SETFLAGS _IOW('f', 2, long) +#define FS_IOC_GETVERSION _IOR('v', 1, long) +#define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) +#define FS_IOC32_GETFLAGS _IOR('f', 1, int) +#define FS_IOC32_SETFLAGS _IOW('f', 2, int) +#define FS_IOC32_GETVERSION _IOR('v', 1, int) +#define FS_IOC32_SETVERSION _IOW('v', 2, int) + +#define BLKGETSIZE64 _IOR(0x12,114,size_t) +#define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) +#define BLKDISCARDZEROES _IO(0x12,124) +#define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) +#define BLKZEROOUT _IO(0x12,127) + +#define FIBMAP _IO(0x00,1) +#define FIGETBSZ _IO(0x00,2) + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +#define FICLONE _IOW(0x94, 9, int) +#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) + +#else #include +#endif #include #if defined(CONFIG_FIEMAP) #include @@ -127,12 +170,20 @@ #include "uname.h" #include "qemu.h" +#include "user-internals.h" +#include "strace.h" +#include "signal-common.h" +#include "loader.h" +#include "user-mmap.h" +#include "user/safe-syscall.h" #include "qemu/guest-random.h" #include "qemu/selfmap.h" #include "user/syscall-trace.h" +#include "special-errno.h" #include "qapi/error.h" #include "fd-trans.h" #include "tcg/tcg.h" +#include "cpu_loop-common.h" #ifndef CLONE_IO #define CLONE_IO 0x80000000 /* Clone io context */ @@ -191,8 +242,10 @@ //#define DEBUG_ERESTARTSYS //#include -#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) -#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) +#define VFAT_IOCTL_READDIR_BOTH \ + _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) +#define VFAT_IOCTL_READDIR_SHORT \ + _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) #undef _syscall0 #undef _syscall1 @@ -263,9 +316,6 @@ static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ #if defined(__NR_futex_time64) # define __NR_sys_futex_time64 __NR_futex_time64 #endif -#define __NR_sys_inotify_init __NR_inotify_init -#define __NR_sys_inotify_add_watch __NR_inotify_add_watch -#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch #define __NR_sys_statx __NR_statx #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) @@ -314,8 +364,12 @@ _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) #ifdef __NR_exit_group _syscall1(int,exit_group,int,error_code) #endif -#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) -_syscall1(int,set_tid_address,int *,tidptr) +#if defined(__NR_close_range) && defined(TARGET_NR_close_range) +#define __NR_sys_close_range __NR_close_range +_syscall3(int,sys_close_range,int,first,int,last,int,flags) +#ifndef CLOSE_RANGE_CLOEXEC +#define CLOSE_RANGE_CLOEXEC (1U << 2) +#endif #endif #if defined(__NR_futex) _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, @@ -325,12 +379,52 @@ _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, const struct timespec *,timeout,int *,uaddr2,int,val3) #endif +#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) +_syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags); +#endif +#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) +_syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info, + unsigned int, flags); +#endif +#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) +_syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags); +#endif #define __NR_sys_sched_getaffinity __NR_sched_getaffinity _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long *, user_mask_ptr); #define __NR_sys_sched_setaffinity __NR_sched_setaffinity _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long *, user_mask_ptr); +/* sched_attr is not defined in glibc */ +struct sched_attr { + uint32_t size; + uint32_t sched_policy; + uint64_t sched_flags; + int32_t sched_nice; + uint32_t sched_priority; + uint64_t sched_runtime; + uint64_t sched_deadline; + uint64_t sched_period; + uint32_t sched_util_min; + uint32_t sched_util_max; +}; +#define __NR_sys_sched_getattr __NR_sched_getattr +_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, + unsigned int, size, unsigned int, flags); +#define __NR_sys_sched_setattr __NR_sched_setattr +_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr, + unsigned int, flags); +#define __NR_sys_sched_getscheduler __NR_sched_getscheduler +_syscall1(int, sys_sched_getscheduler, pid_t, pid); +#define __NR_sys_sched_setscheduler __NR_sched_setscheduler +_syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy, + const struct sched_param *, param); +#define __NR_sys_sched_getparam __NR_sched_getparam +_syscall2(int, sys_sched_getparam, pid_t, pid, + struct sched_param *, param); +#define __NR_sys_sched_setparam __NR_sched_setparam +_syscall2(int, sys_sched_setparam, pid_t, pid, + const struct sched_param *, param); #define __NR_sys_getcpu __NR_getcpu _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, @@ -438,33 +532,6 @@ static int sys_renameat2(int oldfd, const char *old, #ifdef CONFIG_INOTIFY #include - -#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) -static int sys_inotify_init(void) -{ - return (inotify_init()); -} -#endif -#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) -static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) -{ - return (inotify_add_watch(fd, pathname, mask)); -} -#endif -#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) -static int sys_inotify_rm_watch(int fd, int32_t wd) -{ - return (inotify_rm_watch(fd, wd)); -} -#endif -#ifdef CONFIG_INOTIFY1 -#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) -static int sys_inotify_init1(int flags) -{ - return (inotify_init1(flags)); -} -#endif -#endif #else /* Userspace can usually survive runtime without inotify */ #undef TARGET_NR_inotify_init @@ -491,20 +558,25 @@ _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, #if defined(TARGET_NR_timer_create) /* Maximum of 32 active POSIX timers allowed at any one time. */ -static timer_t g_posix_timers[32] = { 0, } ; +#define GUEST_TIMER_MAX 32 +static timer_t g_posix_timers[GUEST_TIMER_MAX]; +static int g_posix_timer_allocated[GUEST_TIMER_MAX]; static inline int next_free_host_timer(void) { - int k ; - /* FIXME: Does finding the next free slot require a lock? */ - for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { - if (g_posix_timers[k] == 0) { - g_posix_timers[k] = (timer_t) 1; + int k; + for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) { + if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) { return k; } } return -1; } + +static inline void free_host_timer_slot(int id) +{ + qatomic_store_release(g_posix_timer_allocated + id, 0); +} #endif static inline int host_to_target_errno(int host_errno) @@ -529,7 +601,7 @@ static inline int target_to_host_errno(int target_errno) } } -static inline abi_long get_errno(abi_long ret) +abi_long get_errno(abi_long ret) { if (ret == -1) return -host_to_target_errno(errno); @@ -539,16 +611,34 @@ static inline abi_long get_errno(abi_long ret) const char *target_strerror(int err) { - if (err == TARGET_ERESTARTSYS) { + if (err == QEMU_ERESTARTSYS) { return "To be restarted"; } - if (err == TARGET_QEMU_ESIGRETURN) { + if (err == QEMU_ESIGRETURN) { return "Successful exit from sigreturn"; } return strerror(target_to_host_errno(err)); } +static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) +{ + int i; + uint8_t b; + if (usize <= ksize) { + return 1; + } + for (i = ksize; i < usize; i++) { + if (get_user_u8(b, addr + i)) { + return -TARGET_EFAULT; + } + if (b != 0) { + return 0; + } + } + return 1; +} + #define safe_syscall0(type, name) \ static type safe_##name(void) \ { \ @@ -1026,6 +1116,10 @@ static inline int target_to_host_resource(int code) return RLIMIT_RSS; case TARGET_RLIMIT_RTPRIO: return RLIMIT_RTPRIO; +#ifdef RLIMIT_RTTIME + case TARGET_RLIMIT_RTTIME: + return RLIMIT_RTTIME; +#endif case TARGET_RLIMIT_SIGPENDING: return RLIMIT_SIGPENDING; case TARGET_RLIMIT_STACK: @@ -1362,14 +1456,12 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, * The 6th arg is actually two args smashed together, * so we cannot use the C library. */ - sigset_t set; struct { sigset_t *set; size_t size; } sig, *sig_ptr; abi_ulong arg_sigset, arg_sigsize, *arg7; - target_sigset_t *target_sigset; n = arg1; rfd_addr = arg2; @@ -1410,10 +1502,8 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, } /* Extract the two packed args for the sigset */ + sig_ptr = NULL; if (arg6) { - sig_ptr = &sig; - sig.size = SIGSET_T_SIZE; - arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); if (!arg7) { return -TARGET_EFAULT; @@ -1423,28 +1513,22 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, unlock_user(arg7, arg6, 0); if (arg_sigset) { - sig.set = &set; - if (arg_sigsize != sizeof(*target_sigset)) { - /* Like the kernel, we enforce correct size sigsets */ - return -TARGET_EINVAL; + ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize); + if (ret != 0) { + return ret; } - target_sigset = lock_user(VERIFY_READ, arg_sigset, - sizeof(*target_sigset), 1); - if (!target_sigset) { - return -TARGET_EFAULT; - } - target_to_host_sigset(&set, target_sigset); - unlock_user(target_sigset, arg_sigset, 0); - } else { - sig.set = NULL; + sig_ptr = &sig; + sig.size = SIGSET_T_SIZE; } - } else { - sig_ptr = NULL; } ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, ts_ptr, sig_ptr)); + if (sig_ptr) { + finish_sigsuspend_mask(ret); + } + if (!is_error(ret)) { if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { return -TARGET_EFAULT; @@ -1500,8 +1584,7 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, } if (ppoll) { struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; - target_sigset_t *target_set; - sigset_t _set, *set = &_set; + sigset_t *set = NULL; if (arg3) { if (time64) { @@ -1520,25 +1603,19 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, } if (arg4) { - if (arg5 != sizeof(target_sigset_t)) { + ret = process_sigsuspend_mask(&set, arg4, arg5); + if (ret != 0) { unlock_user(target_pfd, arg1, 0); - return -TARGET_EINVAL; + return ret; } - - target_set = lock_user(VERIFY_READ, arg4, - sizeof(target_sigset_t), 1); - if (!target_set) { - unlock_user(target_pfd, arg1, 0); - return -TARGET_EFAULT; - } - target_to_host_sigset(set, target_set); - } else { - set = NULL; } ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, set, SIGSET_T_SIZE)); + if (set) { + finish_sigsuspend_mask(ret); + } if (!is_error(ret) && arg3) { if (time64) { if (host_to_target_timespec64(arg3, timeout_ts)) { @@ -1550,9 +1627,6 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, } } } - if (arg4) { - unlock_user(target_set, arg4, 0); - } } else { struct timespec ts, *pts; @@ -1578,21 +1652,12 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, } #endif -static abi_long do_pipe2(int host_pipe[], int flags) -{ -#ifdef CONFIG_PIPE2 - return pipe2(host_pipe, flags); -#else - return -ENOSYS; -#endif -} - -static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, +static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes, int flags, int is_pipe2) { int host_pipe[2]; abi_long ret; - ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); + ret = pipe2(host_pipe, flags); if (is_error(ret)) return get_errno(ret); @@ -1601,22 +1666,22 @@ static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, pipe syscall, but didn't replicate this into the pipe2 syscall. */ if (!is_pipe2) { #if defined(TARGET_ALPHA) - ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; + cpu_env->ir[IR_A4] = host_pipe[1]; return host_pipe[0]; #elif defined(TARGET_MIPS) - ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; + cpu_env->active_tc.gpr[3] = host_pipe[1]; return host_pipe[0]; #elif defined(TARGET_SH4) - ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; + cpu_env->gregs[1] = host_pipe[1]; return host_pipe[0]; #elif defined(TARGET_SPARC) - ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; + cpu_env->regwptr[1] = host_pipe[1]; return host_pipe[0]; #endif } if (put_user_s32(host_pipe[0], pipedes) - || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) + || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int))) return -TARGET_EFAULT; return get_errno(ret); } @@ -1690,6 +1755,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, lladdr = (struct target_sockaddr_ll *)addr; lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); lladdr->sll_hatype = tswap16(lladdr->sll_hatype); + } else if (sa_family == AF_INET6) { + struct sockaddr_in6 *in6addr; + + in6addr = (struct sockaddr_in6 *)addr; + in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id); } unlock_user(target_saddr, target_addr, 0); @@ -2121,6 +2191,9 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, return -TARGET_EINVAL; ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); + if (!ip_mreq_source) { + return -TARGET_EFAULT; + } ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); unlock_user (ip_mreq_source, optval_addr, 0); break; @@ -3285,7 +3358,8 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, if (fd_trans_host_to_target_data(fd)) { ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, MIN(msg.msg_iov->iov_len, len)); - } else { + } + if (!is_error(ret)) { ret = host_to_target_cmsg(msgp, &msg); } if (!is_error(ret)) { @@ -4855,7 +4929,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, * We can't fit all the extents into the fixed size buffer. * Allocate one that is large enough and use it instead. */ - host_ifconf = malloc(outbufsz); + host_ifconf = g_try_malloc(outbufsz); if (!host_ifconf) { return -TARGET_ENOMEM; } @@ -4903,7 +4977,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, } if (free_buf) { - free(host_ifconf); + g_free(host_ifconf); } return ret; @@ -5044,7 +5118,7 @@ do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, target_size = thunk_type_size(arg_type, THUNK_TARGET); /* construct host copy of urb and metadata */ - lurb = g_try_malloc0(sizeof(struct live_urb)); + lurb = g_try_new0(struct live_urb, 1); if (!lurb) { return -TARGET_ENOMEM; } @@ -6282,9 +6356,241 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) return ret; } #endif /* defined(TARGET_ABI32 */ - #endif /* defined(TARGET_I386) */ +/* + * These constants are generic. Supply any that are missing from the host. + */ +#ifndef PR_SET_NAME +# define PR_SET_NAME 15 +# define PR_GET_NAME 16 +#endif +#ifndef PR_SET_FP_MODE +# define PR_SET_FP_MODE 45 +# define PR_GET_FP_MODE 46 +# define PR_FP_MODE_FR (1 << 0) +# define PR_FP_MODE_FRE (1 << 1) +#endif +#ifndef PR_SVE_SET_VL +# define PR_SVE_SET_VL 50 +# define PR_SVE_GET_VL 51 +# define PR_SVE_VL_LEN_MASK 0xffff +# define PR_SVE_VL_INHERIT (1 << 17) +#endif +#ifndef PR_PAC_RESET_KEYS +# define PR_PAC_RESET_KEYS 54 +# define PR_PAC_APIAKEY (1 << 0) +# define PR_PAC_APIBKEY (1 << 1) +# define PR_PAC_APDAKEY (1 << 2) +# define PR_PAC_APDBKEY (1 << 3) +# define PR_PAC_APGAKEY (1 << 4) +#endif +#ifndef PR_SET_TAGGED_ADDR_CTRL +# define PR_SET_TAGGED_ADDR_CTRL 55 +# define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) +#endif +#ifndef PR_MTE_TCF_SHIFT +# define PR_MTE_TCF_SHIFT 1 +# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TAG_SHIFT 3 +# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) +#endif +#ifndef PR_SET_IO_FLUSHER +# define PR_SET_IO_FLUSHER 57 +# define PR_GET_IO_FLUSHER 58 +#endif +#ifndef PR_SET_SYSCALL_USER_DISPATCH +# define PR_SET_SYSCALL_USER_DISPATCH 59 +#endif +#ifndef PR_SME_SET_VL +# define PR_SME_SET_VL 63 +# define PR_SME_GET_VL 64 +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) +#endif + +#include "target_prctl.h" + +static abi_long do_prctl_inval0(CPUArchState *env) +{ + return -TARGET_EINVAL; +} + +static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) +{ + return -TARGET_EINVAL; +} + +#ifndef do_prctl_get_fp_mode +#define do_prctl_get_fp_mode do_prctl_inval0 +#endif +#ifndef do_prctl_set_fp_mode +#define do_prctl_set_fp_mode do_prctl_inval1 +#endif +#ifndef do_prctl_sve_get_vl +#define do_prctl_sve_get_vl do_prctl_inval0 +#endif +#ifndef do_prctl_sve_set_vl +#define do_prctl_sve_set_vl do_prctl_inval1 +#endif +#ifndef do_prctl_reset_keys +#define do_prctl_reset_keys do_prctl_inval1 +#endif +#ifndef do_prctl_set_tagged_addr_ctrl +#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1 +#endif +#ifndef do_prctl_get_tagged_addr_ctrl +#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0 +#endif +#ifndef do_prctl_get_unalign +#define do_prctl_get_unalign do_prctl_inval1 +#endif +#ifndef do_prctl_set_unalign +#define do_prctl_set_unalign do_prctl_inval1 +#endif +#ifndef do_prctl_sme_get_vl +#define do_prctl_sme_get_vl do_prctl_inval0 +#endif +#ifndef do_prctl_sme_set_vl +#define do_prctl_sme_set_vl do_prctl_inval1 +#endif + +static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + abi_long ret; + + switch (option) { + case PR_GET_PDEATHSIG: + { + int deathsig; + ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig, + arg3, arg4, arg5)); + if (!is_error(ret) && + put_user_s32(host_to_target_signal(deathsig), arg2)) { + return -TARGET_EFAULT; + } + return ret; + } + case PR_SET_PDEATHSIG: + return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2), + arg3, arg4, arg5)); + case PR_GET_NAME: + { + void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); + if (!name) { + return -TARGET_EFAULT; + } + ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name, + arg3, arg4, arg5)); + unlock_user(name, arg2, 16); + return ret; + } + case PR_SET_NAME: + { + void *name = lock_user(VERIFY_READ, arg2, 16, 1); + if (!name) { + return -TARGET_EFAULT; + } + ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name, + arg3, arg4, arg5)); + unlock_user(name, arg2, 0); + return ret; + } + case PR_GET_FP_MODE: + return do_prctl_get_fp_mode(env); + case PR_SET_FP_MODE: + return do_prctl_set_fp_mode(env, arg2); + case PR_SVE_GET_VL: + return do_prctl_sve_get_vl(env); + case PR_SVE_SET_VL: + return do_prctl_sve_set_vl(env, arg2); + case PR_SME_GET_VL: + return do_prctl_sme_get_vl(env); + case PR_SME_SET_VL: + return do_prctl_sme_set_vl(env, arg2); + case PR_PAC_RESET_KEYS: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_reset_keys(env, arg2); + case PR_SET_TAGGED_ADDR_CTRL: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_set_tagged_addr_ctrl(env, arg2); + case PR_GET_TAGGED_ADDR_CTRL: + if (arg2 || arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_get_tagged_addr_ctrl(env); + + case PR_GET_UNALIGN: + return do_prctl_get_unalign(env, arg2); + case PR_SET_UNALIGN: + return do_prctl_set_unalign(env, arg2); + + case PR_CAP_AMBIENT: + case PR_CAPBSET_READ: + case PR_CAPBSET_DROP: + case PR_GET_DUMPABLE: + case PR_SET_DUMPABLE: + case PR_GET_KEEPCAPS: + case PR_SET_KEEPCAPS: + case PR_GET_SECUREBITS: + case PR_SET_SECUREBITS: + case PR_GET_TIMING: + case PR_SET_TIMING: + case PR_GET_TIMERSLACK: + case PR_SET_TIMERSLACK: + case PR_MCE_KILL: + case PR_MCE_KILL_GET: + case PR_GET_NO_NEW_PRIVS: + case PR_SET_NO_NEW_PRIVS: + case PR_GET_IO_FLUSHER: + case PR_SET_IO_FLUSHER: + /* Some prctl options have no pointer arguments and we can pass on. */ + return get_errno(prctl(option, arg2, arg3, arg4, arg5)); + + case PR_GET_CHILD_SUBREAPER: + case PR_SET_CHILD_SUBREAPER: + case PR_GET_SPECULATION_CTRL: + case PR_SET_SPECULATION_CTRL: + case PR_GET_TID_ADDRESS: + /* TODO */ + return -TARGET_EINVAL; + + case PR_GET_FPEXC: + case PR_SET_FPEXC: + /* Was used for SPE on PowerPC. */ + return -TARGET_EINVAL; + + case PR_GET_ENDIAN: + case PR_SET_ENDIAN: + case PR_GET_FPEMU: + case PR_SET_FPEMU: + case PR_SET_MM: + case PR_GET_SECCOMP: + case PR_SET_SECCOMP: + case PR_SET_SYSCALL_USER_DISPATCH: + case PR_GET_THP_DISABLE: + case PR_SET_THP_DISABLE: + case PR_GET_TSC: + case PR_SET_TSC: + /* Disable to prevent the target disabling stuff we need. */ + return -TARGET_EINVAL; + + default: + qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n", + option); + return -TARGET_EINVAL; + } +} + #define NEW_STACK_SIZE 0x40000 @@ -6447,7 +6753,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, } if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } fork_start(); @@ -6660,6 +6966,14 @@ typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 +struct target_oabi_flock64 { + abi_short l_type; + abi_short l_whence; + abi_llong l_start; + abi_llong l_len; + abi_int l_pid; +} QEMU_PACKED; + static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, abi_ulong target_flock_addr) { @@ -7014,7 +7328,7 @@ void syscall_init(void) } #ifdef TARGET_NR_truncate64 -static inline abi_long target_truncate64(void *cpu_env, const char *arg1, +static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1, abi_long arg2, abi_long arg3, abi_long arg4) @@ -7028,7 +7342,7 @@ static inline abi_long target_truncate64(void *cpu_env, const char *arg1, #endif #ifdef TARGET_NR_ftruncate64 -static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, +static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4) @@ -7323,12 +7637,12 @@ static inline int target_to_host_mlockall_arg(int arg) #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ defined(TARGET_NR_newfstatat)) -static inline abi_long host_to_target_stat64(void *cpu_env, +static inline abi_long host_to_target_stat64(CPUArchState *cpu_env, abi_ulong target_addr, struct stat *host_st) { #if defined(TARGET_ARM) && defined(TARGET_ABI32) - if (((CPUARMState *)cpu_env)->eabi) { + if (cpu_env->eabi) { struct target_eabi_stat64 *target_st; if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) @@ -7493,15 +7807,16 @@ static int do_safe_futex(int *uaddr, int op, int val, futexes locally would make futexes shared between multiple processes tricky. However they're probably useless because guest atomic operations won't work either. */ -#if defined(TARGET_NR_futex) -static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, - target_ulong timeout, target_ulong uaddr2, int val3) +#if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64) +static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr, + int op, int val, target_ulong timeout, + target_ulong uaddr2, int val3) { - struct timespec ts, *pts; + struct timespec ts, *pts = NULL; + void *haddr2 = NULL; int base_op; - /* ??? We assume FUTEX_* constants are the same on both host - and target. */ + /* We assume FUTEX_* constants are the same on both host and target. */ #ifdef FUTEX_CMD_MASK base_op = op & FUTEX_CMD_MASK; #else @@ -7510,85 +7825,53 @@ static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, switch (base_op) { case FUTEX_WAIT: case FUTEX_WAIT_BITSET: - if (timeout) { - pts = &ts; - target_to_host_timespec(pts, timeout); - } else { - pts = NULL; - } - return do_safe_futex(g2h(cpu, uaddr), - op, tswap32(val), pts, NULL, val3); + val = tswap32(val); + break; + case FUTEX_WAIT_REQUEUE_PI: + val = tswap32(val); + haddr2 = g2h(cpu, uaddr2); + break; + case FUTEX_LOCK_PI: + case FUTEX_LOCK_PI2: + break; case FUTEX_WAKE: - return do_safe_futex(g2h(cpu, uaddr), - op, val, NULL, NULL, 0); + case FUTEX_WAKE_BITSET: + case FUTEX_TRYLOCK_PI: + case FUTEX_UNLOCK_PI: + timeout = 0; + break; case FUTEX_FD: - return do_safe_futex(g2h(cpu, uaddr), - op, val, NULL, NULL, 0); - case FUTEX_REQUEUE: + val = target_to_host_signal(val); + timeout = 0; + break; case FUTEX_CMP_REQUEUE: + case FUTEX_CMP_REQUEUE_PI: + val3 = tswap32(val3); + /* fall through */ + case FUTEX_REQUEUE: case FUTEX_WAKE_OP: - /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the - TIMEOUT parameter is interpreted as a uint32_t by the kernel. - But the prototype takes a `struct timespec *'; insert casts - to satisfy the compiler. We do not need to tswap TIMEOUT - since it's not compared to guest memory. */ - pts = (struct timespec *)(uintptr_t) timeout; - return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), - (base_op == FUTEX_CMP_REQUEUE - ? tswap32(val3) : val3)); + /* + * For these, the 4th argument is not TIMEOUT, but VAL2. + * But the prototype of do_safe_futex takes a pointer, so + * insert casts to satisfy the compiler. We do not need + * to tswap VAL2 since it's not compared to guest memory. + */ + pts = (struct timespec *)(uintptr_t)timeout; + timeout = 0; + haddr2 = g2h(cpu, uaddr2); + break; default: return -TARGET_ENOSYS; } -} -#endif - -#if defined(TARGET_NR_futex_time64) -static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op, - int val, target_ulong timeout, - target_ulong uaddr2, int val3) -{ - struct timespec ts, *pts; - int base_op; - - /* ??? We assume FUTEX_* constants are the same on both host - and target. */ -#ifdef FUTEX_CMD_MASK - base_op = op & FUTEX_CMD_MASK; -#else - base_op = op; -#endif - switch (base_op) { - case FUTEX_WAIT: - case FUTEX_WAIT_BITSET: - if (timeout) { - pts = &ts; - if (target_to_host_timespec64(pts, timeout)) { - return -TARGET_EFAULT; - } - } else { - pts = NULL; + if (timeout) { + pts = &ts; + if (time64 + ? target_to_host_timespec64(pts, timeout) + : target_to_host_timespec(pts, timeout)) { + return -TARGET_EFAULT; } - return do_safe_futex(g2h(cpu, uaddr), op, - tswap32(val), pts, NULL, val3); - case FUTEX_WAKE: - return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); - case FUTEX_FD: - return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); - case FUTEX_REQUEUE: - case FUTEX_CMP_REQUEUE: - case FUTEX_WAKE_OP: - /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the - TIMEOUT parameter is interpreted as a uint32_t by the kernel. - But the prototype takes a `struct timespec *'; insert casts - to satisfy the compiler. We do not need to tswap TIMEOUT - since it's not compared to guest memory. */ - pts = (struct timespec *)(uintptr_t) timeout; - return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), - (base_op == FUTEX_CMP_REQUEUE - ? tswap32(val3) : val3)); - default: - return -TARGET_ENOSYS; } + return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3); } #endif @@ -7725,9 +8008,9 @@ int host_to_target_waitstatus(int status) return status; } -static int open_self_cmdline(void *cpu_env, int fd) +static int open_self_cmdline(CPUArchState *cpu_env, int fd) { - CPUState *cpu = env_cpu((CPUArchState *)cpu_env); + CPUState *cpu = env_cpu(cpu_env); struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; int i; @@ -7742,9 +8025,9 @@ static int open_self_cmdline(void *cpu_env, int fd) return 0; } -static int open_self_maps(void *cpu_env, int fd) +static int open_self_maps(CPUArchState *cpu_env, int fd) { - CPUState *cpu = env_cpu((CPUArchState *)cpu_env); + CPUState *cpu = env_cpu(cpu_env); TaskState *ts = cpu->opaque; GSList *map_info = read_self_maps(); GSList *s; @@ -7766,7 +8049,11 @@ static int open_self_maps(void *cpu_env, int fd) continue; } +#ifdef TARGET_HPPA + if (h2g(max) == ts->info->stack_limit) { +#else if (h2g(min) == ts->info->stack_limit) { +#endif path = "[stack]"; } else { path = e->path; @@ -7778,7 +8065,7 @@ static int open_self_maps(void *cpu_env, int fd) (flags & PAGE_READ) ? 'r' : '-', (flags & PAGE_WRITE_ORG) ? 'w' : '-', (flags & PAGE_EXEC) ? 'x' : '-', - e->is_priv ? 'p' : '-', + e->is_priv ? 'p' : 's', (uint64_t) e->offset, e->dev, e->inode); if (path) { dprintf(fd, "%*s%s\n", 73 - count, "", path); @@ -7804,9 +8091,9 @@ static int open_self_maps(void *cpu_env, int fd) return 0; } -static int open_self_stat(void *cpu_env, int fd) +static int open_self_stat(CPUArchState *cpu_env, int fd) { - CPUState *cpu = env_cpu((CPUArchState *)cpu_env); + CPUState *cpu = env_cpu(cpu_env); TaskState *ts = cpu->opaque; g_autoptr(GString) buf = g_string_new(NULL); int i; @@ -7823,6 +8110,9 @@ static int open_self_stat(void *cpu_env, int fd) } else if (i == 3) { /* ppid */ g_string_printf(buf, FMT_pid " ", getppid()); + } else if (i == 21) { + /* starttime */ + g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime); } else if (i == 27) { /* stack bottom */ g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); @@ -7839,9 +8129,9 @@ static int open_self_stat(void *cpu_env, int fd) return 0; } -static int open_self_auxv(void *cpu_env, int fd) +static int open_self_auxv(CPUArchState *cpu_env, int fd) { - CPUState *cpu = env_cpu((CPUArchState *)cpu_env); + CPUState *cpu = env_cpu(cpu_env); TaskState *ts = cpu->opaque; abi_ulong auxv = ts->info->saved_auxv; abi_ulong len = ts->info->auxv_len; @@ -7893,7 +8183,34 @@ static int is_proc_myself(const char *filename, const char *entry) return 0; } -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \ +static void excp_dump_file(FILE *logfile, CPUArchState *env, + const char *fmt, int code) +{ + if (logfile) { + CPUState *cs = env_cpu(env); + + fprintf(logfile, fmt, code); + fprintf(logfile, "Failing executable: %s\n", exec_path); + cpu_dump_state(cs, logfile, 0); + open_self_maps(env, fileno(logfile)); + } +} + +void target_exception_dump(CPUArchState *env, const char *fmt, int code) +{ + /* dump to console */ + excp_dump_file(stderr, env, fmt, code); + + /* dump to log file */ + if (qemu_log_separate()) { + FILE *logfile = qemu_log_trylock(); + + excp_dump_file(logfile, env, fmt, code); + qemu_log_unlock(logfile); + } +} + +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) static int is_proc(const char *filename, const char *entry) { @@ -7901,8 +8218,8 @@ static int is_proc(const char *filename, const char *entry) } #endif -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) -static int open_net_route(void *cpu_env, int fd) +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN +static int open_net_route(CPUArchState *cpu_env, int fd) { FILE *fp; char *line = NULL; @@ -7947,7 +8264,7 @@ static int open_net_route(void *cpu_env, int fd) #endif #if defined(TARGET_SPARC) -static int open_cpuinfo(void *cpu_env, int fd) +static int open_cpuinfo(CPUArchState *cpu_env, int fd) { dprintf(fd, "type\t\t: sun4u\n"); return 0; @@ -7955,7 +8272,7 @@ static int open_cpuinfo(void *cpu_env, int fd) #endif #if defined(TARGET_HPPA) -static int open_cpuinfo(void *cpu_env, int fd) +static int open_cpuinfo(CPUArchState *cpu_env, int fd) { dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); @@ -7967,18 +8284,18 @@ static int open_cpuinfo(void *cpu_env, int fd) #endif #if defined(TARGET_M68K) -static int open_hardware(void *cpu_env, int fd) +static int open_hardware(CPUArchState *cpu_env, int fd) { dprintf(fd, "Model:\t\tqemu-m68k\n"); return 0; } #endif -static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) +static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) { struct fake_open { const char *filename; - int (*fill)(void *cpu_env, int fd); + int (*fill)(CPUArchState *cpu_env, int fd); int (*cmp)(const char *s1, const char *s2); }; const struct fake_open *fake_open; @@ -7987,7 +8304,7 @@ static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, { "stat", open_self_stat, is_proc_myself }, { "auxv", open_self_auxv, is_proc_myself }, { "cmdline", open_self_cmdline, is_proc_myself }, -#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN { "/proc/net/route", open_net_route, is_proc }, #endif #if defined(TARGET_SPARC) || defined(TARGET_HPPA) @@ -8000,8 +8317,7 @@ static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, }; if (is_proc_myself(pathname, "exe")) { - int execfd = qemu_getauxval(AT_EXECFD); - return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); + return safe_openat(dirfd, exec_path, flags, mode); } for (fake_open = fakes; fake_open->filename; fake_open++) { @@ -8015,16 +8331,22 @@ static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, char filename[PATH_MAX]; int fd, r; - /* create temporary file to map stat to */ - tmpdir = getenv("TMPDIR"); - if (!tmpdir) - tmpdir = "/tmp"; - snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); - fd = mkstemp(filename); + fd = memfd_create("qemu-open", 0); if (fd < 0) { - return fd; + if (errno != ENOSYS) { + return fd; + } + /* create temporary file to map stat to */ + tmpdir = getenv("TMPDIR"); + if (!tmpdir) + tmpdir = "/tmp"; + snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); + fd = mkstemp(filename); + if (fd < 0) { + return fd; + } + unlink(filename); } - unlink(filename); if ((r = fake_open->fill(cpu_env, fd))) { int e = errno; @@ -8128,6 +8450,159 @@ static int host_to_target_cpu_mask(const unsigned long *host_mask, return 0; } +#ifdef TARGET_NR_getdents +static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) +{ + g_autofree void *hdirp = NULL; + void *tdirp; + int hlen, hoff, toff; + int hreclen, treclen; + off64_t prev_diroff = 0; + + hdirp = g_try_malloc(count); + if (!hdirp) { + return -TARGET_ENOMEM; + } + +#ifdef EMULATE_GETDENTS_WITH_GETDENTS + hlen = sys_getdents(dirfd, hdirp, count); +#else + hlen = sys_getdents64(dirfd, hdirp, count); +#endif + + hlen = get_errno(hlen); + if (is_error(hlen)) { + return hlen; + } + + tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); + if (!tdirp) { + return -TARGET_EFAULT; + } + + for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { +#ifdef EMULATE_GETDENTS_WITH_GETDENTS + struct linux_dirent *hde = hdirp + hoff; +#else + struct linux_dirent64 *hde = hdirp + hoff; +#endif + struct target_dirent *tde = tdirp + toff; + int namelen; + uint8_t type; + + namelen = strlen(hde->d_name); + hreclen = hde->d_reclen; + treclen = offsetof(struct target_dirent, d_name) + namelen + 2; + treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); + + if (toff + treclen > count) { + /* + * If the host struct is smaller than the target struct, or + * requires less alignment and thus packs into less space, + * then the host can return more entries than we can pass + * on to the guest. + */ + if (toff == 0) { + toff = -TARGET_EINVAL; /* result buffer is too small */ + break; + } + /* + * Return what we have, resetting the file pointer to the + * location of the first record not returned. + */ + lseek64(dirfd, prev_diroff, SEEK_SET); + break; + } + + prev_diroff = hde->d_off; + tde->d_ino = tswapal(hde->d_ino); + tde->d_off = tswapal(hde->d_off); + tde->d_reclen = tswap16(treclen); + memcpy(tde->d_name, hde->d_name, namelen + 1); + + /* + * The getdents type is in what was formerly a padding byte at the + * end of the structure. + */ +#ifdef EMULATE_GETDENTS_WITH_GETDENTS + type = *((uint8_t *)hde + hreclen - 1); +#else + type = hde->d_type; +#endif + *((uint8_t *)tde + treclen - 1) = type; + } + + unlock_user(tdirp, arg2, toff); + return toff; +} +#endif /* TARGET_NR_getdents */ + +#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) +static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) +{ + g_autofree void *hdirp = NULL; + void *tdirp; + int hlen, hoff, toff; + int hreclen, treclen; + off64_t prev_diroff = 0; + + hdirp = g_try_malloc(count); + if (!hdirp) { + return -TARGET_ENOMEM; + } + + hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); + if (is_error(hlen)) { + return hlen; + } + + tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); + if (!tdirp) { + return -TARGET_EFAULT; + } + + for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { + struct linux_dirent64 *hde = hdirp + hoff; + struct target_dirent64 *tde = tdirp + toff; + int namelen; + + namelen = strlen(hde->d_name) + 1; + hreclen = hde->d_reclen; + treclen = offsetof(struct target_dirent64, d_name) + namelen; + treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); + + if (toff + treclen > count) { + /* + * If the host struct is smaller than the target struct, or + * requires less alignment and thus packs into less space, + * then the host can return more entries than we can pass + * on to the guest. + */ + if (toff == 0) { + toff = -TARGET_EINVAL; /* result buffer is too small */ + break; + } + /* + * Return what we have, resetting the file pointer to the + * location of the first record not returned. + */ + lseek64(dirfd, prev_diroff, SEEK_SET); + break; + } + + prev_diroff = hde->d_off; + tde->d_ino = tswap64(hde->d_ino); + tde->d_off = tswap64(hde->d_off); + tde->d_reclen = tswap16(treclen); + tde->d_type = hde->d_type; + memcpy(tde->d_name, hde->d_name, namelen); + } + + unlock_user(tdirp, arg2, toff); + return toff; +} +#endif /* TARGET_NR_getdents64 */ + #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) #endif @@ -8137,7 +8612,7 @@ _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) * of syscall results, can be performed. * All errnos that do_syscall() returns must be -TARGET_. */ -static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, +static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) @@ -8164,7 +8639,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, Do thread termination if we have more then one thread. */ if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } pthread_mutex_lock(&clone_lock); @@ -8261,10 +8736,51 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, ret = do_open_by_handle_at(arg1, arg2, arg3); fd_trans_unregister(ret); return ret; +#endif +#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) + case TARGET_NR_pidfd_open: + return get_errno(pidfd_open(arg1, arg2)); +#endif +#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) + case TARGET_NR_pidfd_send_signal: + { + siginfo_t uinfo, *puinfo; + + if (arg3) { + p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); + if (!p) { + return -TARGET_EFAULT; + } + target_to_host_siginfo(&uinfo, p); + unlock_user(p, arg3, 0); + puinfo = &uinfo; + } else { + puinfo = NULL; + } + ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), + puinfo, arg4)); + } + return ret; +#endif +#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) + case TARGET_NR_pidfd_getfd: + return get_errno(pidfd_getfd(arg1, arg2, arg3)); #endif case TARGET_NR_close: fd_trans_unregister(arg1); return get_errno(close(arg1)); +#if defined(__NR_close_range) && defined(TARGET_NR_close_range) + case TARGET_NR_close_range: + ret = get_errno(sys_close_range(arg1, arg2, arg3)); + if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) { + abi_long fd, maxfd; + maxfd = MIN(arg2, target_fd_max); + for (fd = arg1; fd < maxfd; fd++) { + fd_trans_unregister(fd); + } + } + return ret; +#endif case TARGET_NR_brk: return do_brk(arg1); @@ -8421,7 +8937,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, * before the execve completes and makes it the other * program's problem. */ - ret = get_errno(safe_execve(p, argp, envp)); + if (is_proc_myself(p, "exe")) { + ret = get_errno(safe_execve(exec_path, argp, envp)); + } else { + ret = get_errno(safe_execve(p, argp, envp)); + } unlock_user(p, arg1, 0); goto execve_end; @@ -8498,7 +9018,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxpid: - ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); + cpu_env->ir[IR_A4] = getppid(); return get_errno(getpid()); #endif #ifdef TARGET_NR_getpid @@ -8676,6 +9196,15 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg2, 0); return ret; #endif +#if defined(TARGET_NR_faccessat2) + case TARGET_NR_faccessat2: + if (!(p = lock_user_string(arg2))) { + return -TARGET_EFAULT; + } + ret = get_errno(faccessat(arg1, p, arg3, arg4)); + unlock_user(p, arg2, 0); + return ret; +#endif #ifdef TARGET_NR_nice /* not on alpha */ case TARGET_NR_nice: return get_errno(nice(arg1)); @@ -9021,13 +9550,20 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, if (!is_error(ret)) { host_to_target_old_sigset(&mask, &oldset); ret = mask; - ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ + cpu_env->ir[IR_V0] = 0; /* force no error */ } #else sigset_t set, oldset, *set_ptr; int how; if (arg2) { + p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); + if (!p) { + return -TARGET_EFAULT; + } + target_to_host_old_sigset(&set, p); + unlock_user(p, arg2, 0); + set_ptr = &set; switch (arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; @@ -9041,11 +9577,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, default: return -TARGET_EINVAL; } - if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) - return -TARGET_EFAULT; - target_to_host_old_sigset(&set, p); - unlock_user(p, arg2, 0); - set_ptr = &set; } else { how = 0; set_ptr = NULL; @@ -9071,6 +9602,13 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } if (arg2) { + p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); + if (!p) { + return -TARGET_EFAULT; + } + target_to_host_sigset(&set, p); + unlock_user(p, arg2, 0); + set_ptr = &set; switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; @@ -9084,11 +9622,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, default: return -TARGET_EINVAL; } - if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) - return -TARGET_EFAULT; - target_to_host_sigset(&set, p); - unlock_user(p, arg2, 0); - set_ptr = &set; } else { how = 0; set_ptr = NULL; @@ -9141,40 +9674,35 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_sigsuspend case TARGET_NR_sigsuspend: { - TaskState *ts = cpu->opaque; + sigset_t *set; + #if defined(TARGET_ALPHA) - abi_ulong mask = arg1; - target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); + TaskState *ts = cpu->opaque; + /* target_to_host_old_sigset will bswap back */ + abi_ulong mask = tswapal(arg1); + set = &ts->sigsuspend_mask; + target_to_host_old_sigset(set, &mask); #else - if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) - return -TARGET_EFAULT; - target_to_host_old_sigset(&ts->sigsuspend_mask, p); - unlock_user(p, arg1, 0); -#endif - ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, - SIGSET_T_SIZE)); - if (ret != -TARGET_ERESTARTSYS) { - ts->in_sigsuspend = 1; + ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t)); + if (ret != 0) { + return ret; } +#endif + ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); + finish_sigsuspend_mask(ret); } return ret; #endif case TARGET_NR_rt_sigsuspend: { - TaskState *ts = cpu->opaque; + sigset_t *set; - if (arg2 != sizeof(target_sigset_t)) { - return -TARGET_EINVAL; - } - if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) - return -TARGET_EFAULT; - target_to_host_sigset(&ts->sigsuspend_mask, p); - unlock_user(p, arg1, 0); - ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, - SIGSET_T_SIZE)); - if (ret != -TARGET_ERESTARTSYS) { - ts->in_sigsuspend = 1; + ret = process_sigsuspend_mask(&set, arg1, arg2); + if (ret != 0) { + return ret; } + ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); + finish_sigsuspend_mask(ret); } return ret; #ifdef TARGET_NR_rt_sigtimedwait @@ -9269,7 +9797,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } target_to_host_siginfo(&uinfo, p); unlock_user(p, arg3, 0); - ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); + ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo)); } return ret; case TARGET_NR_rt_tgsigqueueinfo: @@ -9282,19 +9810,19 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } target_to_host_siginfo(&uinfo, p); unlock_user(p, arg4, 0); - ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); + ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo)); } return ret; #ifdef TARGET_NR_sigreturn case TARGET_NR_sigreturn: if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } return do_sigreturn(cpu_env); #endif case TARGET_NR_rt_sigreturn: if (block_signals()) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } return do_rt_sigreturn(cpu_env); case TARGET_NR_sethostname: @@ -9491,11 +10019,22 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!p || !p2) { ret = -TARGET_EFAULT; + } else if (!arg4) { + /* Short circuit this for the magic exe check. */ + ret = -TARGET_EINVAL; } else if (is_proc_myself((const char *)p, "exe")) { char real[PATH_MAX], *temp; temp = realpath(exec_path, real); - ret = temp == NULL ? get_errno(-1) : strlen(real) ; - snprintf((char *)p2, arg4, "%s", real); + /* Return value is # of bytes that we wrote to the buffer. */ + if (temp == NULL) { + ret = get_errno(-1); + } else { + /* Don't worry about sign mismatch as earlier mapping + * logic would have thrown a bad address error. */ + ret = MIN(strlen(real), arg4); + /* We cannot NUL terminate the string. */ + memcpy(p2, real, ret); + } } else { ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); } @@ -9642,7 +10181,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } #ifdef TARGET_ALPHA /* Return value is the unbiased priority. Signal no error. */ - ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; + cpu_env->ir[IR_V0] = 0; #else /* Return value is a biased priority to avoid negative numbers. */ ret = 20 - ret; @@ -10218,162 +10757,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_getdents case TARGET_NR_getdents: -#ifdef EMULATE_GETDENTS_WITH_GETDENTS -#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 - { - struct target_dirent *target_dirp; - struct linux_dirent *dirp; - abi_long count = arg3; - - dirp = g_try_malloc(count); - if (!dirp) { - return -TARGET_ENOMEM; - } - - ret = get_errno(sys_getdents(arg1, dirp, count)); - if (!is_error(ret)) { - struct linux_dirent *de; - struct target_dirent *tde; - int len = ret; - int reclen, treclen; - int count1, tnamelen; - - count1 = 0; - de = dirp; - if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) - return -TARGET_EFAULT; - tde = target_dirp; - while (len > 0) { - reclen = de->d_reclen; - tnamelen = reclen - offsetof(struct linux_dirent, d_name); - assert(tnamelen >= 0); - treclen = tnamelen + offsetof(struct target_dirent, d_name); - assert(count1 + treclen <= count); - tde->d_reclen = tswap16(treclen); - tde->d_ino = tswapal(de->d_ino); - tde->d_off = tswapal(de->d_off); - memcpy(tde->d_name, de->d_name, tnamelen); - de = (struct linux_dirent *)((char *)de + reclen); - len -= reclen; - tde = (struct target_dirent *)((char *)tde + treclen); - count1 += treclen; - } - ret = count1; - unlock_user(target_dirp, arg2, ret); - } - g_free(dirp); - } -#else - { - struct linux_dirent *dirp; - abi_long count = arg3; - - if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) - return -TARGET_EFAULT; - ret = get_errno(sys_getdents(arg1, dirp, count)); - if (!is_error(ret)) { - struct linux_dirent *de; - int len = ret; - int reclen; - de = dirp; - while (len > 0) { - reclen = de->d_reclen; - if (reclen > len) - break; - de->d_reclen = tswap16(reclen); - tswapls(&de->d_ino); - tswapls(&de->d_off); - de = (struct linux_dirent *)((char *)de + reclen); - len -= reclen; - } - } - unlock_user(dirp, arg2, ret); - } -#endif -#else - /* Implement getdents in terms of getdents64 */ - { - struct linux_dirent64 *dirp; - abi_long count = arg3; - - dirp = lock_user(VERIFY_WRITE, arg2, count, 0); - if (!dirp) { - return -TARGET_EFAULT; - } - ret = get_errno(sys_getdents64(arg1, dirp, count)); - if (!is_error(ret)) { - /* Convert the dirent64 structs to target dirent. We do this - * in-place, since we can guarantee that a target_dirent is no - * larger than a dirent64; however this means we have to be - * careful to read everything before writing in the new format. - */ - struct linux_dirent64 *de; - struct target_dirent *tde; - int len = ret; - int tlen = 0; - - de = dirp; - tde = (struct target_dirent *)dirp; - while (len > 0) { - int namelen, treclen; - int reclen = de->d_reclen; - uint64_t ino = de->d_ino; - int64_t off = de->d_off; - uint8_t type = de->d_type; - - namelen = strlen(de->d_name); - treclen = offsetof(struct target_dirent, d_name) - + namelen + 2; - treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); - - memmove(tde->d_name, de->d_name, namelen + 1); - tde->d_ino = tswapal(ino); - tde->d_off = tswapal(off); - tde->d_reclen = tswap16(treclen); - /* The target_dirent type is in what was formerly a padding - * byte at the end of the structure: - */ - *(((char *)tde) + treclen - 1) = type; - - de = (struct linux_dirent64 *)((char *)de + reclen); - tde = (struct target_dirent *)((char *)tde + treclen); - len -= reclen; - tlen += treclen; - } - ret = tlen; - } - unlock_user(dirp, arg2, ret); - } -#endif - return ret; + return do_getdents(arg1, arg2, arg3); #endif /* TARGET_NR_getdents */ #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) case TARGET_NR_getdents64: - { - struct linux_dirent64 *dirp; - abi_long count = arg3; - if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) - return -TARGET_EFAULT; - ret = get_errno(sys_getdents64(arg1, dirp, count)); - if (!is_error(ret)) { - struct linux_dirent64 *de; - int len = ret; - int reclen; - de = dirp; - while (len > 0) { - reclen = de->d_reclen; - if (reclen > len) - break; - de->d_reclen = tswap16(reclen); - tswap64s((uint64_t *)&de->d_ino); - tswap64s((uint64_t *)&de->d_off); - de = (struct linux_dirent64 *)((char *)de + reclen); - len -= reclen; - } - } - unlock_user(dirp, arg2, ret); - } - return ret; + return do_getdents64(arg1, arg2, arg3); #endif /* TARGET_NR_getdents64 */ #if defined(TARGET_NR__newselect) case TARGET_NR__newselect: @@ -10536,30 +10924,32 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; case TARGET_NR_sched_setparam: { - struct sched_param *target_schp; + struct target_sched_param *target_schp; struct sched_param schp; if (arg2 == 0) { return -TARGET_EINVAL; } - if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) + if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) { return -TARGET_EFAULT; + } schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg2, 0); - return get_errno(sched_setparam(arg1, &schp)); + return get_errno(sys_sched_setparam(arg1, &schp)); } case TARGET_NR_sched_getparam: { - struct sched_param *target_schp; + struct target_sched_param *target_schp; struct sched_param schp; if (arg2 == 0) { return -TARGET_EINVAL; } - ret = get_errno(sched_getparam(arg1, &schp)); + ret = get_errno(sys_sched_getparam(arg1, &schp)); if (!is_error(ret)) { - if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) + if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) { return -TARGET_EFAULT; + } target_schp->sched_priority = tswap32(schp.sched_priority); unlock_user_struct(target_schp, arg2, 1); } @@ -10567,19 +10957,106 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; case TARGET_NR_sched_setscheduler: { - struct sched_param *target_schp; + struct target_sched_param *target_schp; struct sched_param schp; if (arg3 == 0) { return -TARGET_EINVAL; } - if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) + if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) { return -TARGET_EFAULT; + } schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg3, 0); - return get_errno(sched_setscheduler(arg1, arg2, &schp)); + return get_errno(sys_sched_setscheduler(arg1, arg2, &schp)); } case TARGET_NR_sched_getscheduler: - return get_errno(sched_getscheduler(arg1)); + return get_errno(sys_sched_getscheduler(arg1)); + case TARGET_NR_sched_getattr: + { + struct target_sched_attr *target_scha; + struct sched_attr scha; + if (arg2 == 0) { + return -TARGET_EINVAL; + } + if (arg3 > sizeof(scha)) { + arg3 = sizeof(scha); + } + ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4)); + if (!is_error(ret)) { + target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0); + if (!target_scha) { + return -TARGET_EFAULT; + } + target_scha->size = tswap32(scha.size); + target_scha->sched_policy = tswap32(scha.sched_policy); + target_scha->sched_flags = tswap64(scha.sched_flags); + target_scha->sched_nice = tswap32(scha.sched_nice); + target_scha->sched_priority = tswap32(scha.sched_priority); + target_scha->sched_runtime = tswap64(scha.sched_runtime); + target_scha->sched_deadline = tswap64(scha.sched_deadline); + target_scha->sched_period = tswap64(scha.sched_period); + if (scha.size > offsetof(struct sched_attr, sched_util_min)) { + target_scha->sched_util_min = tswap32(scha.sched_util_min); + target_scha->sched_util_max = tswap32(scha.sched_util_max); + } + unlock_user(target_scha, arg2, arg3); + } + return ret; + } + case TARGET_NR_sched_setattr: + { + struct target_sched_attr *target_scha; + struct sched_attr scha; + uint32_t size; + int zeroed; + if (arg2 == 0) { + return -TARGET_EINVAL; + } + if (get_user_u32(size, arg2)) { + return -TARGET_EFAULT; + } + if (!size) { + size = offsetof(struct target_sched_attr, sched_util_min); + } + if (size < offsetof(struct target_sched_attr, sched_util_min)) { + if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { + return -TARGET_EFAULT; + } + return -TARGET_E2BIG; + } + + zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size); + if (zeroed < 0) { + return zeroed; + } else if (zeroed == 0) { + if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { + return -TARGET_EFAULT; + } + return -TARGET_E2BIG; + } + if (size > sizeof(struct target_sched_attr)) { + size = sizeof(struct target_sched_attr); + } + + target_scha = lock_user(VERIFY_READ, arg2, size, 1); + if (!target_scha) { + return -TARGET_EFAULT; + } + scha.size = size; + scha.sched_policy = tswap32(target_scha->sched_policy); + scha.sched_flags = tswap64(target_scha->sched_flags); + scha.sched_nice = tswap32(target_scha->sched_nice); + scha.sched_priority = tswap32(target_scha->sched_priority); + scha.sched_runtime = tswap64(target_scha->sched_runtime); + scha.sched_deadline = tswap64(target_scha->sched_deadline); + scha.sched_period = tswap64(target_scha->sched_period); + if (size > offsetof(struct target_sched_attr, sched_util_min)) { + scha.sched_util_min = tswap32(target_scha->sched_util_min); + scha.sched_util_max = tswap32(target_scha->sched_util_max); + } + unlock_user(target_scha, arg2, 0); + return get_errno(sys_sched_setattr(arg1, &scha, arg3)); + } case TARGET_NR_sched_yield: return get_errno(sched_yield()); case TARGET_NR_sched_get_priority_max: @@ -10621,290 +11098,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; #endif case TARGET_NR_prctl: - switch (arg1) { - case PR_GET_PDEATHSIG: - { - int deathsig; - ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); - if (!is_error(ret) && arg2 - && put_user_s32(deathsig, arg2)) { - return -TARGET_EFAULT; - } - return ret; - } -#ifdef PR_GET_NAME - case PR_GET_NAME: - { - void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); - if (!name) { - return -TARGET_EFAULT; - } - ret = get_errno(prctl(arg1, (unsigned long)name, - arg3, arg4, arg5)); - unlock_user(name, arg2, 16); - return ret; - } - case PR_SET_NAME: - { - void *name = lock_user(VERIFY_READ, arg2, 16, 1); - if (!name) { - return -TARGET_EFAULT; - } - ret = get_errno(prctl(arg1, (unsigned long)name, - arg3, arg4, arg5)); - unlock_user(name, arg2, 0); - return ret; - } -#endif -#ifdef TARGET_MIPS - case TARGET_PR_GET_FP_MODE: - { - CPUMIPSState *env = ((CPUMIPSState *)cpu_env); - ret = 0; - if (env->CP0_Status & (1 << CP0St_FR)) { - ret |= TARGET_PR_FP_MODE_FR; - } - if (env->CP0_Config5 & (1 << CP0C5_FRE)) { - ret |= TARGET_PR_FP_MODE_FRE; - } - return ret; - } - case TARGET_PR_SET_FP_MODE: - { - CPUMIPSState *env = ((CPUMIPSState *)cpu_env); - bool old_fr = env->CP0_Status & (1 << CP0St_FR); - bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE); - bool new_fr = arg2 & TARGET_PR_FP_MODE_FR; - bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE; - - const unsigned int known_bits = TARGET_PR_FP_MODE_FR | - TARGET_PR_FP_MODE_FRE; - - /* If nothing to change, return right away, successfully. */ - if (old_fr == new_fr && old_fre == new_fre) { - return 0; - } - /* Check the value is valid */ - if (arg2 & ~known_bits) { - return -TARGET_EOPNOTSUPP; - } - /* Setting FRE without FR is not supported. */ - if (new_fre && !new_fr) { - return -TARGET_EOPNOTSUPP; - } - if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) { - /* FR1 is not supported */ - return -TARGET_EOPNOTSUPP; - } - if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64)) - && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { - /* cannot set FR=0 */ - return -TARGET_EOPNOTSUPP; - } - if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) { - /* Cannot set FRE=1 */ - return -TARGET_EOPNOTSUPP; - } - - int i; - fpr_t *fpr = env->active_fpu.fpr; - for (i = 0; i < 32 ; i += 2) { - if (!old_fr && new_fr) { - fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX]; - } else if (old_fr && !new_fr) { - fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX]; - } - } - - if (new_fr) { - env->CP0_Status |= (1 << CP0St_FR); - env->hflags |= MIPS_HFLAG_F64; - } else { - env->CP0_Status &= ~(1 << CP0St_FR); - env->hflags &= ~MIPS_HFLAG_F64; - } - if (new_fre) { - env->CP0_Config5 |= (1 << CP0C5_FRE); - if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { - env->hflags |= MIPS_HFLAG_FRE; - } - } else { - env->CP0_Config5 &= ~(1 << CP0C5_FRE); - env->hflags &= ~MIPS_HFLAG_FRE; - } - - return 0; - } -#endif /* MIPS */ -#ifdef TARGET_AARCH64 - case TARGET_PR_SVE_SET_VL: - /* - * We cannot support either PR_SVE_SET_VL_ONEXEC or - * PR_SVE_VL_INHERIT. Note the kernel definition - * of sve_vl_valid allows for VQ=512, i.e. VL=8192, - * even though the current architectural maximum is VQ=16. - */ - ret = -TARGET_EINVAL; - if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) - && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { - CPUARMState *env = cpu_env; - ARMCPU *cpu = env_archcpu(env); - uint32_t vq, old_vq; - - old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; - vq = MAX(arg2 / 16, 1); - vq = MIN(vq, cpu->sve_max_vq); - - if (vq < old_vq) { - aarch64_sve_narrow_vq(env, vq); - } - env->vfp.zcr_el[1] = vq - 1; - arm_rebuild_hflags(env); - ret = vq * 16; - } - return ret; - case TARGET_PR_SVE_GET_VL: - ret = -TARGET_EINVAL; - { - ARMCPU *cpu = env_archcpu(cpu_env); - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; - } - } - return ret; - case TARGET_PR_PAC_RESET_KEYS: - { - CPUARMState *env = cpu_env; - ARMCPU *cpu = env_archcpu(env); - - if (arg3 || arg4 || arg5) { - return -TARGET_EINVAL; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY | - TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY | - TARGET_PR_PAC_APGAKEY); - int ret = 0; - Error *err = NULL; - - if (arg2 == 0) { - arg2 = all; - } else if (arg2 & ~all) { - return -TARGET_EINVAL; - } - if (arg2 & TARGET_PR_PAC_APIAKEY) { - ret |= qemu_guest_getrandom(&env->keys.apia, - sizeof(ARMPACKey), &err); - } - if (arg2 & TARGET_PR_PAC_APIBKEY) { - ret |= qemu_guest_getrandom(&env->keys.apib, - sizeof(ARMPACKey), &err); - } - if (arg2 & TARGET_PR_PAC_APDAKEY) { - ret |= qemu_guest_getrandom(&env->keys.apda, - sizeof(ARMPACKey), &err); - } - if (arg2 & TARGET_PR_PAC_APDBKEY) { - ret |= qemu_guest_getrandom(&env->keys.apdb, - sizeof(ARMPACKey), &err); - } - if (arg2 & TARGET_PR_PAC_APGAKEY) { - ret |= qemu_guest_getrandom(&env->keys.apga, - sizeof(ARMPACKey), &err); - } - if (ret != 0) { - /* - * Some unknown failure in the crypto. The best - * we can do is log it and fail the syscall. - * The real syscall cannot fail this way. - */ - qemu_log_mask(LOG_UNIMP, - "PR_PAC_RESET_KEYS: Crypto failure: %s", - error_get_pretty(err)); - error_free(err); - return -TARGET_EIO; - } - return 0; - } - } - return -TARGET_EINVAL; - case TARGET_PR_SET_TAGGED_ADDR_CTRL: - { - abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE; - CPUARMState *env = cpu_env; - ARMCPU *cpu = env_archcpu(env); - - if (cpu_isar_feature(aa64_mte, cpu)) { - valid_mask |= TARGET_PR_MTE_TCF_MASK; - valid_mask |= TARGET_PR_MTE_TAG_MASK; - } - - if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) { - return -TARGET_EINVAL; - } - env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE; - - if (cpu_isar_feature(aa64_mte, cpu)) { - switch (arg2 & TARGET_PR_MTE_TCF_MASK) { - case TARGET_PR_MTE_TCF_NONE: - case TARGET_PR_MTE_TCF_SYNC: - case TARGET_PR_MTE_TCF_ASYNC: - break; - default: - return -EINVAL; - } - - /* - * Write PR_MTE_TCF to SCTLR_EL1[TCF0]. - * Note that the syscall values are consistent with hw. - */ - env->cp15.sctlr_el[1] = - deposit64(env->cp15.sctlr_el[1], 38, 2, - arg2 >> TARGET_PR_MTE_TCF_SHIFT); - - /* - * Write PR_MTE_TAG to GCR_EL1[Exclude]. - * Note that the syscall uses an include mask, - * and hardware uses an exclude mask -- invert. - */ - env->cp15.gcr_el1 = - deposit64(env->cp15.gcr_el1, 0, 16, - ~arg2 >> TARGET_PR_MTE_TAG_SHIFT); - arm_rebuild_hflags(env); - } - return 0; - } - case TARGET_PR_GET_TAGGED_ADDR_CTRL: - { - abi_long ret = 0; - CPUARMState *env = cpu_env; - ARMCPU *cpu = env_archcpu(env); - - if (arg2 || arg3 || arg4 || arg5) { - return -TARGET_EINVAL; - } - if (env->tagged_addr_enable) { - ret |= TARGET_PR_TAGGED_ADDR_ENABLE; - } - if (cpu_isar_feature(aa64_mte, cpu)) { - /* See above. */ - ret |= (extract64(env->cp15.sctlr_el[1], 38, 2) - << TARGET_PR_MTE_TCF_SHIFT); - ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16, - ~env->cp15.gcr_el1); - } - return ret; - } -#endif /* AARCH64 */ - case PR_GET_SECCOMP: - case PR_SET_SECCOMP: - /* Disable seccomp to prevent the target disabling syscalls we - * need. */ - return -TARGET_EINVAL; - default: - /* Most prctl options have no pointer arguments */ - return get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); - } + return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5); break; #ifdef TARGET_NR_arch_prctl case TARGET_NR_arch_prctl: @@ -11244,39 +11438,58 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); - if (!target_grouplist) - return -TARGET_EFAULT; - for(i = 0;i < ret; i++) - target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); - unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } + ret = get_errno(getgroups(gidsetsize, grouplist)); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * sizeof(target_id), 0); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < ret; i++) { + target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); + } + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); + } + return ret; } - return ret; case TARGET_NR_setgroups: { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist = NULL; + g_autofree gid_t *grouplist = NULL; int i; - if (gidsetsize) { - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); + + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * sizeof(target_id), 1); if (!target_grouplist) { return -TARGET_EFAULT; } for (i = 0; i < gidsetsize; i++) { grouplist[i] = low2highgid(tswapid(target_grouplist[i])); } - unlock_user(target_grouplist, arg2, 0); + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); } return get_errno(setgroups(gidsetsize, grouplist)); } @@ -11367,7 +11580,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, { uid_t euid; euid=geteuid(); - ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; + cpu_env->ir[IR_A4]=euid; } return get_errno(getuid()); #endif @@ -11377,7 +11590,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, { uid_t egid; egid=getegid(); - ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; + cpu_env->ir[IR_A4]=egid; } return get_errno(getgid()); #endif @@ -11389,7 +11602,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, case TARGET_GSI_IEEE_FP_CONTROL: { uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); - uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr; + uint64_t swcr = cpu_env->swcr; swcr &= ~SWCR_STATUS_MASK; swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; @@ -11431,8 +11644,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, * could be queried. Therefore, we store the status * bits only in FPCR. */ - ((CPUAlphaState *)cpu_env)->swcr - = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); + cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); fpcr = cpu_alpha_load_fpcr(cpu_env); fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); @@ -11456,7 +11668,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, fex = alpha_ieee_fpcr_to_swcr(fpcr); fex = exc & ~fex; fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; - fex &= ((CPUArchState *)cpu_env)->swcr; + fex &= (cpu_env)->swcr; /* Update the hardware fpcr. */ fpcr |= alpha_ieee_swcr_to_fpcr(exc); @@ -11488,9 +11700,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; - info._sifields._sigfault._addr - = ((CPUArchState *)cpu_env)->pc; - queue_signal((CPUArchState *)cpu_env, info.si_signo, + info._sifields._sigfault._addr = (cpu_env)->pc; + queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); } ret = 0; @@ -11563,41 +11774,59 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * 4, 0); if (!target_grouplist) { return -TARGET_EFAULT; } - for(i = 0;i < ret; i++) + for (i = 0; i < ret; i++) { target_grouplist[i] = tswap32(grouplist[i]); + } unlock_user(target_grouplist, arg2, gidsetsize * 4); } + return ret; } - return ret; #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); - if (!target_grouplist) { - return -TARGET_EFAULT; + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * 4, 1); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < gidsetsize; i++) { + grouplist[i] = tswap32(target_grouplist[i]); + } + unlock_user(target_grouplist, arg2, 0); } - for(i = 0;i < gidsetsize; i++) - grouplist[i] = tswap32(target_grouplist[i]); - unlock_user(target_grouplist, arg2, 0); return get_errno(setgroups(gidsetsize, grouplist)); } #endif @@ -11696,7 +11925,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return -host_to_target_errno(ret); #endif -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: @@ -11761,11 +11990,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_madvise case TARGET_NR_madvise: - /* A straight passthrough may not be safe because qemu sometimes - turns private file-backed mappings into anonymous mappings. - This will break MADV_DONTNEED. - This is a hint, so ignoring and returning success is ok. */ - return 0; + return target_madvise(arg1, arg2, arg3); #endif #ifdef TARGET_NR_fcntl64 case TARGET_NR_fcntl64: @@ -11776,7 +12001,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, to_flock64_fn *copyto = copy_to_user_flock64; #ifdef TARGET_ARM - if (!((CPUARMState *)cpu_env)->eabi) { + if (!cpu_env->eabi) { copyfrom = copy_from_user_oabi_flock64; copyto = copy_to_user_oabi_flock64; } @@ -11827,7 +12052,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return get_errno(sys_gettid()); #ifdef TARGET_NR_readahead case TARGET_NR_readahead: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) if (regpairs_aligned(cpu_env, num)) { arg2 = arg3; arg3 = arg4; @@ -12004,13 +12229,13 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_set_thread_area case TARGET_NR_set_thread_area: #if defined(TARGET_MIPS) - ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; + cpu_env->active_tc.CP0_UserLocal = arg1; return 0; #elif defined(TARGET_CRIS) if (arg1 & 0xff) ret = -TARGET_EINVAL; else { - ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; + cpu_env->pregs[PR_PID] = arg1; ret = 0; } return ret; @@ -12154,9 +12379,14 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } #endif -#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) +#if defined(TARGET_NR_set_tid_address) case TARGET_NR_set_tid_address: - return get_errno(set_tid_address((int *)g2h(cpu, arg1))); + { + TaskState *ts = cpu->opaque; + ts->child_tidptr = arg1; + /* do not call host set_tid_address() syscall, instead return tid() */ + return get_errno(sys_gettid()); + } #endif case TARGET_NR_tkill: @@ -12243,41 +12473,41 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_futex case TARGET_NR_futex: - return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6); + return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6); #endif #ifdef TARGET_NR_futex_time64 case TARGET_NR_futex_time64: - return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6); + return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6); #endif -#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) +#ifdef CONFIG_INOTIFY +#if defined(TARGET_NR_inotify_init) case TARGET_NR_inotify_init: - ret = get_errno(sys_inotify_init()); + ret = get_errno(inotify_init()); if (ret >= 0) { fd_trans_register(ret, &target_inotify_trans); } return ret; #endif -#ifdef CONFIG_INOTIFY1 -#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) +#if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1) case TARGET_NR_inotify_init1: - ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, + ret = get_errno(inotify_init1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); if (ret >= 0) { fd_trans_register(ret, &target_inotify_trans); } return ret; #endif -#endif -#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) +#if defined(TARGET_NR_inotify_add_watch) case TARGET_NR_inotify_add_watch: p = lock_user_string(arg2); - ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); + ret = get_errno(inotify_add_watch(arg1, path(p), arg3)); unlock_user(p, arg2, 0); return ret; #endif -#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) +#if defined(TARGET_NR_inotify_rm_watch) case TARGET_NR_inotify_rm_watch: - return get_errno(sys_inotify_rm_watch(arg1, arg2)); + return get_errno(inotify_rm_watch(arg1, arg2)); +#endif #endif #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) @@ -12514,7 +12744,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #endif /* CONFIG_EVENTFD */ #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) case TARGET_NR_fallocate: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), target_offset64(arg5, arg6))); #else @@ -12525,7 +12755,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #if defined(CONFIG_SYNC_FILE_RANGE) #if defined(TARGET_NR_sync_file_range) case TARGET_NR_sync_file_range: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) #if defined(TARGET_MIPS) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg7)); @@ -12547,7 +12777,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, case TARGET_NR_arm_sync_file_range: #endif /* This is like sync_file_range but the arguments are reordered */ -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg2)); #else @@ -12638,29 +12868,21 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_epoll_pwait) case TARGET_NR_epoll_pwait: { - target_sigset_t *target_set; - sigset_t _set, *set = &_set; + sigset_t *set = NULL; if (arg5) { - if (arg6 != sizeof(target_sigset_t)) { - ret = -TARGET_EINVAL; + ret = process_sigsuspend_mask(&set, arg5, arg6); + if (ret != 0) { break; } - - target_set = lock_user(VERIFY_READ, arg5, - sizeof(target_sigset_t), 1); - if (!target_set) { - ret = -TARGET_EFAULT; - break; - } - target_to_host_sigset(set, target_set); - unlock_user(target_set, arg5, 0); - } else { - set = NULL; } ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, set, SIGSET_T_SIZE)); + + if (set) { + finish_sigsuspend_mask(ret); + } break; } #endif @@ -12703,8 +12925,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { return -TARGET_EFAULT; } - rnew.rlim_cur = tswap64(target_rnew->rlim_cur); - rnew.rlim_max = tswap64(target_rnew->rlim_max); + __get_user(rnew.rlim_cur, &target_rnew->rlim_cur); + __get_user(rnew.rlim_max, &target_rnew->rlim_max); unlock_user_struct(target_rnew, arg3, 0); rnewp = &rnew; } @@ -12714,8 +12936,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { return -TARGET_EFAULT; } - target_rold->rlim_cur = tswap64(rold.rlim_cur); - target_rold->rlim_max = tswap64(rold.rlim_max); + __put_user(rold.rlim_cur, &target_rold->rlim_cur); + __put_user(rold.rlim_max, &target_rold->rlim_max); unlock_user_struct(target_rold, arg4, 1); } return ret; @@ -12745,8 +12967,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = arg6; - queue_signal((CPUArchState *)cpu_env, info.si_signo, - QEMU_SI_FAULT, &info); + queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); ret = 0xdeadbeef; } @@ -12781,15 +13002,18 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, phost_sevp = &host_sevp; ret = target_to_host_sigevent(phost_sevp, arg2); if (ret != 0) { + free_host_timer_slot(timer_index); return ret; } } ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); if (ret) { - phtimer = NULL; + free_host_timer_slot(timer_index); } else { if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { + timer_delete(*phtimer); + free_host_timer_slot(timer_index); return -TARGET_EFAULT; } } @@ -12925,7 +13149,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, } else { timer_t htimer = g_posix_timers[timerid]; ret = get_errno(timer_delete(htimer)); - g_posix_timers[timerid] = 0; + free_host_timer_slot(timerid); } return ret; } @@ -12933,8 +13157,12 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_create: - return get_errno(timerfd_create(arg1, - target_to_host_bitmask(arg2, fcntl_flags_tbl))); + ret = get_errno(timerfd_create(arg1, + target_to_host_bitmask(arg2, fcntl_flags_tbl))); + if (ret >= 0) { + fd_trans_register(ret, &target_timerfd_trans); + } + return ret; #endif #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) @@ -13115,7 +13343,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; } -abi_long do_syscall(void *cpu_env, int num, abi_long arg1, +abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) @@ -13132,7 +13360,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, static bool flag; flag = !flag; if (flag) { - return -TARGET_ERESTARTSYS; + return -QEMU_ERESTARTSYS; } } #endif diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index a5ce487dcc..77864de57f 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -74,7 +74,7 @@ || defined(TARGET_M68K) || defined(TARGET_CRIS) \ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \ || defined(TARGET_NIOS2) || defined(TARGET_RISCV) \ - || defined(TARGET_XTENSA) + || defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64) #define TARGET_IOC_SIZEBITS 14 #define TARGET_IOC_DIRBITS 2 @@ -437,11 +437,11 @@ struct target_dirent { }; struct target_dirent64 { - uint64_t d_ino; - int64_t d_off; - unsigned short d_reclen; + abi_ullong d_ino; + abi_llong d_off; + abi_ushort d_reclen; unsigned char d_type; - char d_name[256]; + char d_name[]; }; @@ -688,7 +688,7 @@ typedef struct target_siginfo { #define TARGET_FPE_FLTINV (7) /* floating point invalid operation */ #define TARGET_FPE_FLTSUB (8) /* subscript out of range */ #define TARGET_FPE_FLTUNK (14) /* undiagnosed fp exception */ -#define TARGET_NSIGFPE 15 +#define TARGET_FPE_CONDTRAP (15) /* trap on condition */ /* * SIGSEGV si_codes @@ -715,58 +715,9 @@ typedef struct target_siginfo { #define TARGET_TRAP_TRACE (2) /* process trace trap */ #define TARGET_TRAP_BRANCH (3) /* process taken branch trap */ #define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */ +#define TARGET_TRAP_UNK (5) /* undiagnosed trap */ -struct target_rlimit { - abi_ulong rlim_cur; - abi_ulong rlim_max; -}; - -#if defined(TARGET_ALPHA) -#define TARGET_RLIM_INFINITY 0x7fffffffffffffffull -#elif defined(TARGET_MIPS) || (defined(TARGET_SPARC) && TARGET_ABI_BITS == 32) -#define TARGET_RLIM_INFINITY 0x7fffffffUL -#else -#define TARGET_RLIM_INFINITY ((abi_ulong)-1) -#endif - -#if defined(TARGET_MIPS) -#define TARGET_RLIMIT_CPU 0 -#define TARGET_RLIMIT_FSIZE 1 -#define TARGET_RLIMIT_DATA 2 -#define TARGET_RLIMIT_STACK 3 -#define TARGET_RLIMIT_CORE 4 -#define TARGET_RLIMIT_RSS 7 -#define TARGET_RLIMIT_NPROC 8 -#define TARGET_RLIMIT_NOFILE 5 -#define TARGET_RLIMIT_MEMLOCK 9 -#define TARGET_RLIMIT_AS 6 -#define TARGET_RLIMIT_LOCKS 10 -#define TARGET_RLIMIT_SIGPENDING 11 -#define TARGET_RLIMIT_MSGQUEUE 12 -#define TARGET_RLIMIT_NICE 13 -#define TARGET_RLIMIT_RTPRIO 14 -#else -#define TARGET_RLIMIT_CPU 0 -#define TARGET_RLIMIT_FSIZE 1 -#define TARGET_RLIMIT_DATA 2 -#define TARGET_RLIMIT_STACK 3 -#define TARGET_RLIMIT_CORE 4 -#define TARGET_RLIMIT_RSS 5 -#if defined(TARGET_SPARC) -#define TARGET_RLIMIT_NOFILE 6 -#define TARGET_RLIMIT_NPROC 7 -#else -#define TARGET_RLIMIT_NPROC 6 -#define TARGET_RLIMIT_NOFILE 7 -#endif -#define TARGET_RLIMIT_MEMLOCK 8 -#define TARGET_RLIMIT_AS 9 -#define TARGET_RLIMIT_LOCKS 10 -#define TARGET_RLIMIT_SIGPENDING 11 -#define TARGET_RLIMIT_MSGQUEUE 12 -#define TARGET_RLIMIT_NICE 13 -#define TARGET_RLIMIT_RTPRIO 14 -#endif +#include "target_resource.h" struct target_pollfd { int fd; /* file descriptor */ @@ -1219,6 +1170,10 @@ struct target_rtc_pll_info { #define TARGET_LOOP_SET_STATUS64 0x4C04 #define TARGET_LOOP_GET_STATUS64 0x4C05 #define TARGET_LOOP_CHANGE_FD 0x4C06 +#define TARGET_LOOP_SET_CAPACITY 0x4C07 +#define TARGET_LOOP_SET_DIRECT_IO 0x4C08 +#define TARGET_LOOP_SET_BLOCK_SIZE 0x4C09 +#define TARGET_LOOP_CONFIGURE 0x4C0A #define TARGET_LOOP_CTL_ADD 0x4C80 #define TARGET_LOOP_CTL_REMOVE 0x4C81 @@ -1291,7 +1246,7 @@ struct target_winsize { #include "termbits.h" -#if defined(TARGET_MIPS) +#if defined(TARGET_MIPS) || defined(TARGET_XTENSA) #define TARGET_PROT_SEM 0x10 #else #define TARGET_PROT_SEM 0x08 @@ -1603,7 +1558,7 @@ struct target_stat64 { struct target_stat { abi_ulong st_dev; abi_ulong st_ino; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) abi_ulong st_nlink; unsigned int st_mode; #else @@ -1624,12 +1579,12 @@ struct target_stat { abi_ulong target_st_ctime_nsec; abi_ulong __unused4; abi_ulong __unused5; -#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#if defined(TARGET_PPC64) abi_ulong __unused6; #endif }; -#if !defined(TARGET_PPC64) || defined(TARGET_ABI32) +#if !defined(TARGET_PPC64) #define TARGET_HAS_STRUCT_STAT64 struct QEMU_PACKED target_stat64 { unsigned long long st_dev; @@ -2129,7 +2084,8 @@ struct target_stat64 { abi_ulong __unused5; }; -#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV) +#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) \ + || defined(TARGET_RISCV) || defined(TARGET_HEXAGON) /* These are the asm-generic versions of the stat and stat64 structures */ @@ -2240,30 +2196,9 @@ struct target_stat64 { uint64_t st_ino; }; -#elif defined(TARGET_HEXAGON) +#elif defined(TARGET_LOONGARCH64) -struct target_stat { - unsigned long long st_dev; - unsigned long long st_ino; - unsigned int st_mode; - unsigned int st_nlink; - unsigned int st_uid; - unsigned int st_gid; - unsigned long long st_rdev; - target_ulong __pad1; - long long st_size; - target_long st_blksize; - int __pad2; - long long st_blocks; - - target_long target_st_atime; - target_long target_st_atime_nsec; - target_long target_st_mtime; - target_long target_st_mtime_nsec; - target_long target_st_ctime; - target_long target_st_ctime_nsec; - int __unused[2]; -}; +/* LoongArch no newfstatat/fstat syscall. */ #else #error unsupported CPU @@ -2327,7 +2262,8 @@ struct target_statfs64 { }; #elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \ defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \ - defined(TARGET_RISCV)) && !defined(TARGET_ABI32) + defined(TARGET_RISCV) || defined(TARGET_LOONGARCH64)) && \ + !defined(TARGET_ABI32) struct target_statfs { abi_long f_type; abi_long f_bsize; @@ -2714,7 +2650,7 @@ struct linux_dirent { long d_ino; unsigned long d_off; unsigned short d_reclen; - char d_name[256]; /* We must not include limits.h! */ + char d_name[]; }; struct linux_dirent64 { @@ -2722,7 +2658,7 @@ struct linux_dirent64 { int64_t d_off; unsigned short d_reclen; unsigned char d_type; - char d_name[256]; + char d_name[]; }; struct target_mq_attr { @@ -2764,6 +2700,9 @@ struct target_drm_i915_getparam { #define FUTEX_TRYLOCK_PI 8 #define FUTEX_WAIT_BITSET 9 #define FUTEX_WAKE_BITSET 10 +#define FUTEX_WAIT_REQUEUE_PI 11 +#define FUTEX_CMP_REQUEUE_PI 12 +#define FUTEX_LOCK_PI2 13 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -2791,10 +2730,6 @@ struct target_epoll_event { #define TARGET_EP_MAX_EVENTS (INT_MAX / sizeof(struct target_epoll_event)) #endif -struct target_rlimit64 { - uint64_t rlim_cur; - uint64_t rlim_max; -}; struct target_ucred { uint32_t pid; @@ -2910,4 +2845,22 @@ struct target_statx { /* 0x100 */ }; +/* from kernel's include/linux/sched/types.h */ +struct target_sched_attr { + abi_uint size; + abi_uint sched_policy; + abi_ullong sched_flags; + abi_int sched_nice; + abi_uint sched_priority; + abi_ullong sched_runtime; + abi_ullong sched_deadline; + abi_ullong sched_period; + abi_uint sched_util_min; + abi_uint sched_util_max; +}; + +struct target_sched_param { + abi_int sched_priority; +}; + #endif diff --git a/linux-user/syscall_types.h b/linux-user/syscall_types.h index ba2c1518eb..c3b43f8022 100644 --- a/linux-user/syscall_types.h +++ b/linux-user/syscall_types.h @@ -201,6 +201,12 @@ STRUCT(loop_info64, MK_ARRAY(TYPE_CHAR, 32), /* lo_encrypt_key */ MK_ARRAY(TYPE_ULONGLONG, 2)) /* lo_init */ +STRUCT(loop_config, + TYPE_INT, /* fd */ + TYPE_INT, /* block_size */ + MK_STRUCT(STRUCT_loop_info64), /* info */ + MK_ARRAY(TYPE_ULONGLONG, 8)) /* __reserved */ + /* mag tape ioctls */ STRUCT(mtop, TYPE_SHORT, TYPE_INT) STRUCT(mtget, TYPE_LONG, TYPE_LONG, TYPE_LONG, TYPE_LONG, TYPE_LONG, diff --git a/thunk.c b/linux-user/thunk.c similarity index 99% rename from thunk.c rename to linux-user/thunk.c index fc5be1a502..dac4bf11c6 100644 --- a/thunk.c +++ b/linux-user/thunk.c @@ -17,6 +17,7 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu.h" #include "exec/user/thunk.h" diff --git a/linux-user/trace-events b/linux-user/trace-events index e7d2f54e94..f33717f248 100644 --- a/linux-user/trace-events +++ b/linux-user/trace-events @@ -9,7 +9,7 @@ user_setup_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 user_setup_rt_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 user_do_rt_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 user_do_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 -user_force_sig(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)" +user_dump_core_and_abort(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)" user_handle_signal(void *env, int target_sig) "env=%p signal %d" user_host_signal(void *env, int host_sig, int target_sig) "env=%p signal %d (target %d)" user_queue_signal(void *env, int target_sig) "env=%p signal %d" diff --git a/linux-user/uaccess.c b/linux-user/uaccess.c index 6a5b029607..425cbf677f 100644 --- a/linux-user/uaccess.c +++ b/linux-user/uaccess.c @@ -3,6 +3,7 @@ #include "qemu/cutils.h" #include "qemu.h" +#include "user-internals.h" void *lock_user(int type, abi_ulong guest_addr, ssize_t len, bool copy) { diff --git a/linux-user/uname.c b/linux-user/uname.c index a09ffe1ea7..32f71f2492 100644 --- a/linux-user/uname.c +++ b/linux-user/uname.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" -//#include "qemu-common.h" +#include "user-internals.h" #include "uname.h" /* return highest utsname machine name for emulated instruction set @@ -28,7 +28,7 @@ * NB: the default emulated CPU ("any") might not match any existing CPU, e.g. * on ARM it has all features turned on, so there is no perfect arch string to * return here */ -const char *cpu_to_uname_machine(void *cpu_env) +const char *cpu_to_uname_machine(CPUArchState *cpu_env) { #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) @@ -40,7 +40,7 @@ const char *cpu_to_uname_machine(void *cpu_env) /* in theory, endianness is configurable on some ARM CPUs, but this isn't * used in user mode emulation */ -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN #define utsname_suffix "b" #else #define utsname_suffix "l" @@ -54,7 +54,7 @@ const char *cpu_to_uname_machine(void *cpu_env) return "armv5te" utsname_suffix; #elif defined(TARGET_I386) && !defined(TARGET_X86_64) /* see arch/x86/kernel/cpu/bugs.c: check_bugs(), 386, 486, 586, 686 */ - CPUState *cpu = env_cpu((CPUX86State *)cpu_env); + CPUState *cpu = env_cpu(cpu_env); int family = object_property_get_int(OBJECT(cpu), "family", NULL); if (family == 4) { return "i486"; diff --git a/linux-user/uname.h b/linux-user/uname.h index 4503094211..4ae563f46c 100644 --- a/linux-user/uname.h +++ b/linux-user/uname.h @@ -4,7 +4,7 @@ #include #include -const char *cpu_to_uname_machine(void *cpu_env); +const char *cpu_to_uname_machine(CPUArchState *cpu_env); int sys_uname(struct new_utsname *buf); #endif /* UNAME_H */ diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h new file mode 100644 index 0000000000..0280e76add --- /dev/null +++ b/linux-user/user-internals.h @@ -0,0 +1,186 @@ +/* + * user-internals.h: prototypes etc internal to the linux-user implementation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef LINUX_USER_USER_INTERNALS_H +#define LINUX_USER_USER_INTERNALS_H + +#include "exec/user/thunk.h" +#include "exec/exec-all.h" +#include "qemu/log.h" + +extern char *exec_path; +void init_task_state(TaskState *ts); +void task_settid(TaskState *); +void stop_all_tasks(void); +extern const char *qemu_uname_release; +extern unsigned long mmap_min_addr; + +typedef struct IOCTLEntry IOCTLEntry; + +typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, + int fd, int cmd, abi_long arg); + +struct IOCTLEntry { + int target_cmd; + unsigned int host_cmd; + const char *name; + int access; + do_ioctl_fn *do_ioctl; + const argtype arg_type[5]; +}; + +extern IOCTLEntry ioctl_entries[]; + +#define IOC_R 0x0001 +#define IOC_W 0x0002 +#define IOC_RW (IOC_R | IOC_W) + +/* + * Returns true if the image uses the FDPIC ABI. If this is the case, + * we have to provide some information (loadmap, pt_dynamic_info) such + * that the program can be relocated adequately. This is also useful + * when handling signals. + */ +int info_is_fdpic(struct image_info *info); + +void target_set_brk(abi_ulong new_brk); +void syscall_init(void); +abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5, abi_long arg6, abi_long arg7, + abi_long arg8); +extern __thread CPUState *thread_cpu; +G_NORETURN void cpu_loop(CPUArchState *env); +abi_long get_errno(abi_long ret); +const char *target_strerror(int err); +int get_osversion(void); +void init_qemu_uname_release(void); +void fork_start(void); +void fork_end(int child); + +/** + * probe_guest_base: + * @image_name: the executable being loaded + * @loaddr: the lowest fixed address in the executable + * @hiaddr: the highest fixed address in the executable + * + * Creates the initial guest address space in the host memory space. + * + * If @loaddr == 0, then no address in the executable is fixed, + * i.e. it is fully relocatable. In that case @hiaddr is the size + * of the executable. + * + * This function will not return if a valid value for guest_base + * cannot be chosen. On return, the executable loader can expect + * + * target_mmap(loaddr, hiaddr - loaddr, ...) + * + * to succeed. + */ +void probe_guest_base(const char *image_name, + abi_ulong loaddr, abi_ulong hiaddr); + +/* syscall.c */ +int host_to_target_waitstatus(int status); + +#ifdef TARGET_I386 +/* vm86.c */ +void save_v86_state(CPUX86State *env); +void handle_vm86_trap(CPUX86State *env, int trapno); +void handle_vm86_fault(CPUX86State *env); +int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr); +#elif defined(TARGET_SPARC64) +void sparc64_set_context(CPUSPARCState *env); +void sparc64_get_context(CPUSPARCState *env); +#endif + +static inline int is_error(abi_long ret) +{ + return (abi_ulong)ret >= (abi_ulong)(-4096); +} + +#if (TARGET_ABI_BITS == 32) && !defined(TARGET_ABI_MIPSN32) +static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) +{ +#if TARGET_BIG_ENDIAN + return ((uint64_t)word0 << 32) | word1; +#else + return ((uint64_t)word1 << 32) | word0; +#endif +} +#else /* TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) */ +static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) +{ + return word0; +} +#endif /* TARGET_ABI_BITS != 32 */ + +void print_termios(void *arg); + +/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ +#ifdef TARGET_ARM +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) +{ + return cpu_env->eabi == 1; +} +#elif defined(TARGET_MIPS) && defined(TARGET_ABI_MIPSO32) +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; } +#elif defined(TARGET_PPC) && !defined(TARGET_PPC64) +/* + * SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs + * of registers which translates to the same as ARM/MIPS, because we start with + * r3 as arg1 + */ +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; } +#elif defined(TARGET_SH4) +/* SH4 doesn't align register pairs, except for p{read,write}64 */ +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) +{ + switch (num) { + case TARGET_NR_pread64: + case TARGET_NR_pwrite64: + return 1; + + default: + return 0; + } +} +#elif defined(TARGET_XTENSA) +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; } +#elif defined(TARGET_HEXAGON) +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; } +#else +static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 0; } +#endif + +/** + * preexit_cleanup: housekeeping before the guest exits + * + * env: the CPU state + * code: the exit code + */ +void preexit_cleanup(CPUArchState *env, int code); + +/* + * Include target-specific struct and function definitions; + * they may need access to the target-independent structures + * above, so include them last. + */ +#include "target_cpu.h" +#include "target_structs.h" + +#endif diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h new file mode 100644 index 0000000000..480ce1c114 --- /dev/null +++ b/linux-user/user-mmap.h @@ -0,0 +1,35 @@ +/* + * user-mmap.h: prototypes for linux-user guest binary loader + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef LINUX_USER_USER_MMAP_H +#define LINUX_USER_USER_MMAP_H + +int target_mprotect(abi_ulong start, abi_ulong len, int prot); +abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, + int flags, int fd, abi_ulong offset); +int target_munmap(abi_ulong start, abi_ulong len); +abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, + abi_ulong new_size, unsigned long flags, + abi_ulong new_addr); +abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice); +extern unsigned long last_brk; +extern abi_ulong mmap_next_start; +abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong); +void mmap_fork_start(void); +void mmap_fork_end(int child); + +#endif /* LINUX_USER_USER_MMAP_H */ diff --git a/linux-user/vm86.c b/linux-user/vm86.c index 4412522c4c..c2facf3fc2 100644 --- a/linux-user/vm86.c +++ b/linux-user/vm86.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" //#define DEBUG_VM86 diff --git a/linux-user/x86_64/target_elf.h b/linux-user/x86_64/target_elf.h index 7b76a90de8..3f628f8d66 100644 --- a/linux-user/x86_64/target_elf.h +++ b/linux-user/x86_64/target_elf.h @@ -9,6 +9,6 @@ #define X86_64_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu64"; + return "max"; } #endif diff --git a/linux-user/x86_64/target_mman.h b/linux-user/x86_64/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/x86_64/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/x86_64/target_prctl.h b/linux-user/x86_64/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/x86_64/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/x86_64/target_resource.h b/linux-user/x86_64/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/x86_64/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h index 4ea74f20dd..9d9717406f 100644 --- a/linux-user/x86_64/target_signal.h +++ b/linux-user/x86_64/target_signal.h @@ -1,24 +1,9 @@ #ifndef X86_64_TARGET_SIGNAL_H #define X86_64_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +/* For x86_64, use of SA_RESTORER is mandatory. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 + #endif /* X86_64_TARGET_SIGNAL_H */ diff --git a/linux-user/x86_64/target_structs.h b/linux-user/x86_64/target_structs.h index ce367b253b..f1181383c4 100644 --- a/linux-user/x86_64/target_structs.h +++ b/linux-user/x86_64/target_structs.h @@ -19,41 +19,7 @@ #ifndef X86_64_TARGET_STRUCTS_H #define X86_64_TARGET_STRUCTS_H -struct target_ipc_perm { - abi_int __key; /* Key. */ - abi_uint uid; /* Owner's user ID. */ - abi_uint gid; /* Owner's group ID. */ - abi_uint cuid; /* Creator's user ID. */ - abi_uint cgid; /* Creator's group ID. */ - abi_ushort mode; /* Read/write permission. */ - abi_ushort __pad1; - abi_ushort __seq; /* Sequence number. */ - abi_ushort __pad2; - abi_ulong __unused1; - abi_ulong __unused2; -}; - -struct target_shmid_ds { - struct target_ipc_perm shm_perm; /* operation permission struct */ - abi_long shm_segsz; /* size of segment in bytes */ - abi_ulong shm_atime; /* time of last shmat() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused1; -#endif - abi_ulong shm_dtime; /* time of last shmdt() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused2; -#endif - abi_ulong shm_ctime; /* time of last change by shmctl() */ -#if TARGET_ABI_BITS == 32 - abi_ulong __unused3; -#endif - abi_int shm_cpid; /* pid of creator */ - abi_int shm_lpid; /* pid of last shmop */ - abi_ulong shm_nattch; /* number of current attaches */ - abi_ulong __unused4; - abi_ulong __unused5; -}; +#include "../generic/target_structs.h" /* The x86 definition differs from the generic one in that the * two padding fields exist whether the ABI is 32 bits or 64 bits. diff --git a/linux-user/x86_64/target_syscall.h b/linux-user/x86_64/target_syscall.h index 3ecccb72be..fb558345d3 100644 --- a/linux-user/x86_64/target_syscall.h +++ b/linux-user/x86_64/target_syscall.h @@ -100,7 +100,6 @@ struct target_msqid64_ds { #define TARGET_ARCH_SET_FS 0x1002 #define TARGET_ARCH_GET_FS 0x1003 #define TARGET_ARCH_GET_GS 0x1004 -#define TARGET_MINSIGSTKSZ 2048 #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c index 64831c9199..d51ce05392 100644 --- a/linux-user/xtensa/cpu_loop.c +++ b/linux-user/xtensa/cpu_loop.c @@ -19,7 +19,9 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "cpu_loop-common.h" +#include "signal-common.h" static void xtensa_rfw(CPUXtensaState *env) { @@ -124,7 +126,6 @@ static void xtensa_underflow12(CPUXtensaState *env) void cpu_loop(CPUXtensaState *env) { CPUState *cs = env_cpu(env); - target_siginfo_t info; abi_ulong ret; int trapnr; @@ -161,14 +162,12 @@ void cpu_loop(CPUXtensaState *env) case EXC_USER: switch (env->sregs[EXCCAUSE]) { case ILLEGAL_INSTRUCTION_CAUSE: + force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, + env->sregs[EPC1]); + break; case PRIVILEGED_CAUSE: - info.si_signo = TARGET_SIGILL; - info.si_errno = 0; - info.si_code = - env->sregs[EXCCAUSE] == ILLEGAL_INSTRUCTION_CAUSE ? - TARGET_ILL_ILLOPC : TARGET_ILL_PRVOPC; - info._sifields._sigfault._addr = env->sregs[EPC1]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, + env->sregs[EPC1]); break; case SYSCALL_CAUSE: @@ -182,11 +181,11 @@ void cpu_loop(CPUXtensaState *env) env->regs[2] = ret; break; - case -TARGET_ERESTARTSYS: + case -QEMU_ERESTARTSYS: env->pc -= 3; break; - case -TARGET_QEMU_ESIGRETURN: + case -QEMU_ESIGRETURN: break; } break; @@ -217,20 +216,8 @@ void cpu_loop(CPUXtensaState *env) break; case INTEGER_DIVIDE_BY_ZERO_CAUSE: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_INTDIV; - info._sifields._sigfault._addr = env->sregs[EPC1]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - - case LOAD_PROHIBITED_CAUSE: - case STORE_PROHIBITED_CAUSE: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - info._sifields._sigfault._addr = env->sregs[EXCVADDR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, + env->sregs[EPC1]); break; default: @@ -239,10 +226,8 @@ void cpu_loop(CPUXtensaState *env) } break; case EXCP_DEBUG: - info.si_signo = TARGET_SIGTRAP; - info.si_errno = 0; - info.si_code = TARGET_TRAP_BRKPT; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, + env->sregs[EPC1]); break; case EXC_DEBUG: default: diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c index 72771e1294..f5fb8b5cbe 100644 --- a/linux-user/xtensa/signal.c +++ b/linux-user/xtensa/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu.h" +#include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" @@ -127,6 +128,29 @@ static int setup_sigcontext(struct target_rt_sigframe *frame, return 1; } +static void install_sigtramp(uint8_t *tramp) +{ +#if TARGET_BIG_ENDIAN + /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ + __put_user(0x22, &tramp[0]); + __put_user(0x0a, &tramp[1]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[2]); + /* Generate instruction: SYSCALL */ + __put_user(0x00, &tramp[3]); + __put_user(0x05, &tramp[4]); + __put_user(0x00, &tramp[5]); +#else + /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ + __put_user(0x22, &tramp[0]); + __put_user(0xa0, &tramp[1]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[2]); + /* Generate instruction: SYSCALL */ + __put_user(0x00, &tramp[3]); + __put_user(0x50, &tramp[4]); + __put_user(0x00, &tramp[5]); +#endif +} + void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUXtensaState *env) @@ -163,26 +187,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { ra = ka->sa_restorer; } else { - ra = frame_addr + offsetof(struct target_rt_sigframe, retcode); -#ifdef TARGET_WORDS_BIGENDIAN - /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ - __put_user(0x22, &frame->retcode[0]); - __put_user(0x0a, &frame->retcode[1]); - __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); - /* Generate instruction: SYSCALL */ - __put_user(0x00, &frame->retcode[3]); - __put_user(0x05, &frame->retcode[4]); - __put_user(0x00, &frame->retcode[5]); -#else - /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ - __put_user(0x22, &frame->retcode[0]); - __put_user(0xa0, &frame->retcode[1]); - __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); - /* Generate instruction: SYSCALL */ - __put_user(0x00, &frame->retcode[3]); - __put_user(0x50, &frame->retcode[4]); - __put_user(0x00, &frame->retcode[5]); -#endif + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(frame->retcode); + ra = default_rt_sigreturn; } memset(env->regs, 0, sizeof(env->regs)); env->pc = ka->_sa_handler; @@ -256,10 +263,20 @@ long do_rt_sigreturn(CPUXtensaState *env) target_restore_altstack(&frame->uc.tuc_stack, env); unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; + return -QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint8_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0); + assert(tramp != NULL); + + default_rt_sigreturn = sigtramp_page; + install_sigtramp(tramp); + unlock_user(tramp, sigtramp_page, 6); } diff --git a/linux-user/xtensa/target_mman.h b/linux-user/xtensa/target_mman.h new file mode 100644 index 0000000000..e7ba6070fe --- /dev/null +++ b/linux-user/xtensa/target_mman.h @@ -0,0 +1 @@ +#include "../generic/target_mman.h" diff --git a/linux-user/xtensa/target_prctl.h b/linux-user/xtensa/target_prctl.h new file mode 100644 index 0000000000..eb53b31ad5 --- /dev/null +++ b/linux-user/xtensa/target_prctl.h @@ -0,0 +1 @@ +/* No special prctl support required. */ diff --git a/linux-user/xtensa/target_resource.h b/linux-user/xtensa/target_resource.h new file mode 100644 index 0000000000..227259594c --- /dev/null +++ b/linux-user/xtensa/target_resource.h @@ -0,0 +1 @@ +#include "../generic/target_resource.h" diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h index c60bf656f6..e4b1bea5cb 100644 --- a/linux-user/xtensa/target_signal.h +++ b/linux-user/xtensa/target_signal.h @@ -1,23 +1,8 @@ #ifndef XTENSA_TARGET_SIGNAL_H #define XTENSA_TARGET_SIGNAL_H -/* this struct defines a stack used during syscall handling */ - -typedef struct target_sigaltstack { - abi_ulong ss_sp; - abi_int ss_flags; - abi_ulong ss_size; -} target_stack_t; - -/* - * sigaltstack controls - */ -#define TARGET_SS_ONSTACK 1 -#define TARGET_SS_DISABLE 2 - -#define TARGET_MINSIGSTKSZ 2048 -#define TARGET_SIGSTKSZ 8192 - #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif diff --git a/linux-user/xtensa/target_structs.h b/linux-user/xtensa/target_structs.h index 9cde6844b8..cb1b3411cf 100644 --- a/linux-user/xtensa/target_structs.h +++ b/linux-user/xtensa/target_structs.h @@ -15,7 +15,7 @@ struct target_ipc_perm { struct target_semid64_ds { struct target_ipc_perm sem_perm; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN abi_ulong __unused1; abi_ulong sem_otime; abi_ulong __unused2; diff --git a/meson.build b/meson.build index 84cf016077..0ae9c6f8a1 100644 --- a/meson.build +++ b/meson.build @@ -1,14 +1,16 @@ -project('qemu', ['c'], meson_version: '>=0.55.0', - default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=c++17', 'b_colorout=auto'] + - (meson.version().version_compare('>=0.56.0') ? [ 'b_staticpic=false' ] : []), - version: run_command('head', meson.source_root() / 'QEMU_VERSION').stdout().strip()) +project('qemu', ['c'], meson_version: '>=0.61.3', + default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=c++17', 'b_colorout=auto', + 'b_staticpic=false', 'stdsplit=false', 'optimization=3', 'b_pie=true'], + version: files('QEMU_VERSION')) + +add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true) +add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow']) +add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough']) + +meson.add_postconf_script(find_program('scripts/symlink-install-tree.py')) not_found = dependency('', required: false) -if meson.version().version_compare('>=0.56.0') - keyval = import('keyval') -else - keyval = import('unstable-keyval') -endif +keyval = import('keyval') ss = import('sourceset') fs = import('fs') @@ -42,25 +44,53 @@ qemu_icondir = get_option('datadir') / 'icons' config_host_data = configuration_data() genh = [] +qapi_trace_events = [] + +bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin'] +supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] +supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64', + 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64'] + +cpu = host_machine.cpu_family() + +# Unify riscv* to a single family. +if cpu in ['riscv32', 'riscv64'] + cpu = 'riscv' +endif + +targetos = host_machine.system() target_dirs = config_host['TARGET_DIRS'].split() -have_user = false +have_linux_user = false +have_bsd_user = false have_system = false foreach target : target_dirs - have_user = have_user or target.endswith('-user') + have_linux_user = have_linux_user or target.endswith('linux-user') + have_bsd_user = have_bsd_user or target.endswith('bsd-user') have_system = have_system or target.endswith('-softmmu') endforeach -have_tools = 'CONFIG_TOOLS' in config_host +have_user = have_linux_user or have_bsd_user +have_tools = get_option('tools') \ + .disable_auto_if(not have_system) \ + .allowed() +have_ga = get_option('guest_agent') \ + .disable_auto_if(not have_system and not have_tools) \ + .require(targetos in ['sunos', 'linux', 'windows', 'freebsd'], + error_message: 'unsupported OS for QEMU guest agent') \ + .allowed() have_block = have_system or have_tools python = import('python').find_installation() -supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] -supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64', - 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64'] - -cpu = host_machine.cpu_family() -targetos = host_machine.system() +if cpu not in supported_cpus + host_arch = 'unknown' +elif cpu == 'x86' + host_arch = 'i386' +elif cpu == 'mips64' + host_arch = 'mips' +else + host_arch = cpu +endif if cpu in ['x86', 'x86_64'] kvm_targets = ['i386-softmmu', 'x86_64-softmmu'] @@ -72,13 +102,28 @@ elif cpu in ['ppc', 'ppc64'] kvm_targets = ['ppc-softmmu', 'ppc64-softmmu'] elif cpu in ['mips', 'mips64'] kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu'] +elif cpu in ['riscv'] + kvm_targets = ['riscv32-softmmu', 'riscv64-softmmu'] else kvm_targets = [] endif +kvm_targets_c = '""' +if get_option('kvm').allowed() and targetos == 'linux' + kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' +endif +config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) + accelerator_targets = { 'CONFIG_KVM': kvm_targets } + +if cpu in ['aarch64'] + accelerator_targets += { + 'CONFIG_HVF': ['aarch64-softmmu'] + } +endif + if cpu in ['x86', 'x86_64', 'arm', 'aarch64'] - # i368 emulator provides xenpv machine type for multiple architectures + # i386 emulator provides xenpv machine type for multiple architectures accelerator_targets += { 'CONFIG_XEN': ['i386-softmmu', 'x86_64-softmmu'], } @@ -99,33 +144,141 @@ if targetos != 'darwin' endif edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] -install_edk2_blobs = false -if get_option('install_blobs') - foreach target : target_dirs - install_edk2_blobs = install_edk2_blobs or target in edk2_targets - endforeach +unpack_edk2_blobs = false +foreach target : edk2_targets + if target in target_dirs + bzip2 = find_program('bzip2', required: get_option('install_blobs')) + unpack_edk2_blobs = bzip2.found() + break + endif +endforeach + +dtrace = not_found +stap = not_found +if 'dtrace' in get_option('trace_backends') + dtrace = find_program('dtrace', required: true) + stap = find_program('stap', required: false) + if stap.found() + # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol + # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility + # instead. QEMU --enable-modules depends on this because the SystemTap + # semaphores are linked into the main binary and not the module's shared + # object. + add_global_arguments('-DSTAP_SDT_V2', + native: false, language: ['c', 'cpp', 'objc']) + endif endif -bzip2 = find_program('bzip2', required: install_edk2_blobs) +if get_option('iasl') == '' + iasl = find_program('iasl', required: false) +else + iasl = find_program(get_option('iasl'), required: true) +endif ################## # Compiler flags # ################## -# Specify linker-script with add_project_link_arguments so that it is not placed -# within a linker --start-group/--end-group pair -if 'CONFIG_FUZZ' in config_host - add_project_link_arguments(['-Wl,-T,', - (meson.current_source_dir() / 'tests/qtest/fuzz/fork_fuzz.ld')], - native: false, language: ['c', 'cpp', 'objc']) +qemu_cflags = config_host['QEMU_CFLAGS'].split() +qemu_objcflags = config_host['QEMU_OBJCFLAGS'].split() +qemu_ldflags = config_host['QEMU_LDFLAGS'].split() + +if enable_static + qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static' endif -add_global_arguments(config_host['QEMU_CFLAGS'].split(), - native: false, language: ['c', 'objc']) -add_global_arguments(config_host['QEMU_CXXFLAGS'].split(), - native: false, language: 'cpp') -add_global_link_arguments(config_host['QEMU_LDFLAGS'].split(), - native: false, language: ['c', 'cpp', 'objc']) +# Detect support for PT_GNU_RELRO + DT_BIND_NOW. +# The combination is known as "full relro", because .got.plt is read-only too. +qemu_ldflags += cc.get_supported_link_arguments('-Wl,-z,relro', '-Wl,-z,now') + +if targetos == 'windows' + qemu_ldflags += cc.get_supported_link_arguments('-Wl,--no-seh', '-Wl,--nxcompat') + # Disable ASLR for debug builds to allow debugging with gdb + if get_option('optimization') == '0' + qemu_ldflags += cc.get_supported_link_arguments('-Wl,--dynamicbase') + endif +endif + +if get_option('gprof') + qemu_cflags += ['-p'] + qemu_objcflags += ['-p'] + qemu_ldflags += ['-p'] +endif + +# Specify linker-script with add_project_link_arguments so that it is not placed +# within a linker --start-group/--end-group pair +if get_option('fuzzing') + add_project_link_arguments(['-Wl,-T,', + (meson.current_source_dir() / 'tests/qtest/fuzz/fork_fuzz.ld')], + native: false, language: ['c', 'cpp', 'objc']) + + # Specify a filter to only instrument code that is directly related to + # virtual-devices. + configure_file(output: 'instrumentation-filter', + input: 'scripts/oss-fuzz/instrumentation-filter-template', + copy: true) + + if cc.compiles('int main () { return 0; }', + name: '-fsanitize-coverage-allowlist=/dev/null', + args: ['-fsanitize-coverage-allowlist=/dev/null', + '-fsanitize-coverage=trace-pc'] ) + add_global_arguments('-fsanitize-coverage-allowlist=instrumentation-filter', + native: false, language: ['c', 'cpp', 'objc']) + endif + + if get_option('fuzzing_engine') == '' + # Add CFLAGS to tell clang to add fuzzer-related instrumentation to all the + # compiled code. To build non-fuzzer binaries with --enable-fuzzing, link + # everything with fsanitize=fuzzer-no-link. Otherwise, the linker will be + # unable to bind the fuzzer-related callbacks added by instrumentation. + add_global_arguments('-fsanitize=fuzzer-no-link', + native: false, language: ['c', 'cpp', 'objc']) + add_global_link_arguments('-fsanitize=fuzzer-no-link', + native: false, language: ['c', 'cpp', 'objc']) + # For the actual fuzzer binaries, we need to link against the libfuzzer + # library. They need to be configurable, to support OSS-Fuzz + fuzz_exe_ldflags = ['-fsanitize=fuzzer'] + else + # LIB_FUZZING_ENGINE was set; assume we are running on OSS-Fuzz, and + # the needed CFLAGS have already been provided + fuzz_exe_ldflags = get_option('fuzzing_engine').split() + endif +endif + +add_global_arguments(qemu_cflags, native: false, language: ['c']) +add_global_arguments(qemu_objcflags, native: false, language: ['objc']) + +# Check that the C++ compiler exists and works with the C compiler. +link_language = 'c' +linker = cc +qemu_cxxflags = [] +if add_languages('cpp', required: false, native: false) + cxx = meson.get_compiler('cpp') + add_global_arguments(['-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS'], + native: false, language: 'cpp') + foreach k: qemu_cflags + if k not in ['-Wstrict-prototypes', '-Wmissing-prototypes', '-Wnested-externs', + '-Wold-style-declaration', '-Wold-style-definition', '-Wredundant-decls'] + qemu_cxxflags += [k] + endif + endforeach + add_global_arguments(qemu_cxxflags, native: false, language: 'cpp') + + if cxx.links(files('scripts/main.c'), args: qemu_cflags) + link_language = 'cpp' + linker = cxx + else + message('C++ compiler does not work with C compiler') + message('Disabling C++-specific optional code') + endif +endif + +# Exclude --warn-common with TSan to suppress warnings from the TSan libraries. +if targetos != 'sunos' and not config_host.has_key('CONFIG_TSAN') + qemu_ldflags += linker.get_supported_link_arguments('-Wl,--warn-common') +endif + +add_global_link_arguments(qemu_ldflags, native: false, language: ['c', 'cpp', 'objc']) if targetos == 'linux' add_project_arguments('-isystem', meson.current_source_dir() / 'linux-headers', @@ -136,13 +289,8 @@ endif add_project_arguments('-iquote', '.', '-iquote', meson.current_source_dir(), '-iquote', meson.current_source_dir() / 'include', - '-iquote', meson.current_source_dir() / 'disas/libvixl', language: ['c', 'cpp', 'objc']) -link_language = meson.get_external_property('link_language', 'cpp') -if link_language == 'cpp' - add_languages('cpp', required: true, native: false) -endif if host_machine.system() == 'darwin' add_languages('objc', required: false, native: false) endif @@ -174,15 +322,71 @@ xemu_version = custom_target('xemu-version-macro.h', # Target-specific checks and dependencies # ########################################### -if targetos != 'linux' and get_option('mpath').enabled() - error('Multipath is supported only on Linux') +# Fuzzing +if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ + not cc.links(''' + #include + #include + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } + ''', + args: ['-Werror', '-fsanitize=fuzzer']) + error('Your compiler does not support -fsanitize=fuzzer') endif -if targetos != 'linux' and get_option('multiprocess').enabled() - error('Multiprocess QEMU is supported only on Linux') +# Tracing backends +if 'ftrace' in get_option('trace_backends') and targetos != 'linux' + error('ftrace is supported only on Linux') +endif +if 'syslog' in get_option('trace_backends') and not cc.compiles(''' + #include + int main(void) { + openlog("qemu", LOG_PID, LOG_DAEMON); + syslog(LOG_INFO, "configure"); + return 0; + }''') + error('syslog is not supported on this system') endif -multiprocess_allowed = targetos == 'linux' and not get_option('multiprocess').disabled() +# Miscellaneous Linux-only features +get_option('mpath') \ + .require(targetos == 'linux', error_message: 'Multipath is supported only on Linux') + +multiprocess_allowed = get_option('multiprocess') \ + .require(targetos == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \ + .allowed() + +vfio_user_server_allowed = get_option('vfio_user_server') \ + .require(targetos == 'linux', error_message: 'vfio-user server is supported only on Linux') \ + .allowed() + +have_tpm = get_option('tpm') \ + .require(targetos != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ + .allowed() + +# vhost +have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(targetos != 'linux') \ + .require(targetos != 'windows', + error_message: 'vhost-user is not available on Windows').allowed() +have_vhost_vdpa = get_option('vhost_vdpa') \ + .require(targetos == 'linux', + error_message: 'vhost-vdpa is only available on Linux').allowed() +have_vhost_kernel = get_option('vhost_kernel') \ + .require(targetos == 'linux', + error_message: 'vhost-kernel is only available on Linux').allowed() +have_vhost_user_crypto = get_option('vhost_crypto') \ + .require(have_vhost_user, + error_message: 'vhost-crypto requires vhost-user to be enabled').allowed() + +have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel + +have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed() +have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() +have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() +have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa + +# Target-specific libraries and flags libm = cc.find_library('m', required: false) threads = dependency('threads') util = cc.find_library('util', required: false) @@ -194,7 +398,14 @@ iokit = [] emulator_link_args = [] nvmm =not_found hvf = not_found +midl = not_found +widl = not_found +pathcch = not_found +host_dsosuf = '.so' if targetos == 'windows' + midl = find_program('midl', required: false) + widl = find_program('widl', required: false) + pathcch = cc.find_library('pathcch') socket = cc.find_library('ws2_32') winmm = cc.find_library('winmm') @@ -203,9 +414,11 @@ if targetos == 'windows' depend_files: files('pc-bios/qemu-nsis.ico'), include_directories: include_directories('.'), depends: xemu_version) + host_dsosuf = '.dll' elif targetos == 'darwin' coref = dependency('appleframeworks', modules: 'CoreFoundation') iokit = dependency('appleframeworks', modules: 'IOKit', required: false) + host_dsosuf = '.dylib' elif targetos == 'sunos' socket = [cc.find_library('socket'), cc.find_library('nsl'), @@ -215,23 +428,18 @@ elif targetos == 'haiku' cc.find_library('network'), cc.find_library('bsd')] elif targetos == 'openbsd' - if not get_option('tcg').disabled() and target_dirs.length() > 0 + if get_option('tcg').allowed() and target_dirs.length() > 0 # Disable OpenBSD W^X if available emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded') endif endif +# Target-specific configuration of accelerators accelerators = [] -if not get_option('kvm').disabled() and targetos == 'linux' +if get_option('kvm').allowed() and targetos == 'linux' accelerators += 'CONFIG_KVM' endif -if not get_option('xen').disabled() and 'CONFIG_XEN_BACKEND' in config_host - accelerators += 'CONFIG_XEN' - have_xen_pci_passthrough = not get_option('xen_pci_passthrough').disabled() and targetos == 'linux' -else - have_xen_pci_passthrough = false -endif -if not get_option('whpx').disabled() and targetos == 'windows' +if get_option('whpx').allowed() and targetos == 'windows' if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64' error('WHPX requires 64-bit host') elif cc.has_header('WinHvPlatform.h', required: get_option('whpx')) and \ @@ -239,37 +447,35 @@ if not get_option('whpx').disabled() and targetos == 'windows' accelerators += 'CONFIG_WHPX' endif endif -if not get_option('hvf').disabled() +if get_option('hvf').allowed() hvf = dependency('appleframeworks', modules: 'Hypervisor', required: get_option('hvf')) if hvf.found() accelerators += 'CONFIG_HVF' endif endif -if not get_option('hax').disabled() +if get_option('hax').allowed() if get_option('hax').enabled() or targetos in ['windows', 'darwin', 'netbsd'] accelerators += 'CONFIG_HAX' endif endif if targetos == 'netbsd' - if cc.has_header_symbol('nvmm.h', 'nvmm_cpu_stop', required: get_option('nvmm')) - nvmm = cc.find_library('nvmm', required: get_option('nvmm')) - endif + nvmm = cc.find_library('nvmm', required: get_option('nvmm')) if nvmm.found() accelerators += 'CONFIG_NVMM' endif endif -tcg_arch = config_host['ARCH'] -if not get_option('tcg').disabled() - if cpu not in supported_cpus +tcg_arch = host_arch +if get_option('tcg').allowed() + if host_arch == 'unknown' if get_option('tcg_interpreter') - warning('Unsupported CPU @0@, will use TCG with TCI (experimental and slow)'.format(cpu)) + warning('Unsupported CPU @0@, will use TCG with TCI (slow)'.format(cpu)) else error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu)) endif elif get_option('tcg_interpreter') - warning('Use of the TCG interpretor is not recommended on this host') + warning('Use of the TCG interpreter is not recommended on this host') warning('architecture. There is a native TCG execution backend available') warning('which provides substantially better performance and reliability.') warning('It is strongly recommended to remove the --enable-tcg-interpreter') @@ -278,16 +484,10 @@ if not get_option('tcg').disabled() endif if get_option('tcg_interpreter') tcg_arch = 'tci' - elif config_host['ARCH'] == 'sparc64' - tcg_arch = 'sparc' - elif config_host['ARCH'] == 's390x' - tcg_arch = 's390' - elif config_host['ARCH'] in ['x86_64', 'x32'] + elif host_arch == 'x86_64' tcg_arch = 'i386' - elif config_host['ARCH'] == 'ppc64' + elif host_arch == 'ppc64' tcg_arch = 'ppc' - elif config_host['ARCH'] in ['riscv32', 'riscv64'] - tcg_arch = 'riscv' endif add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch, language: ['c', 'cpp', 'objc']) @@ -308,13 +508,6 @@ endif if 'CONFIG_WHPX' not in accelerators and get_option('whpx').enabled() error('WHPX not available on this platform') endif -if not have_xen_pci_passthrough and get_option('xen_pci_passthrough').enabled() - if 'CONFIG_XEN' in accelerators - error('Xen PCI passthrough not available on this platform') - else - error('Xen PCI passthrough requested but Xen not enabled') - endif -endif ################ # Dependencies # @@ -325,37 +518,81 @@ endif add_project_arguments(config_host['GLIB_CFLAGS'].split(), native: false, language: ['c', 'cpp', 'objc']) glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(), - link_args: config_host['GLIB_LIBS'].split()) + link_args: config_host['GLIB_LIBS'].split(), + version: config_host['GLIB_VERSION'], + variables: { + 'bindir': config_host['GLIB_BINDIR'], + }) # override glib dep with the configure results (for subprojects) meson.override_dependency('glib-2.0', glib) gio = not_found -if 'CONFIG_GIO' in config_host - gio = declare_dependency(compile_args: config_host['GIO_CFLAGS'].split(), - link_args: config_host['GIO_LIBS'].split()) +gdbus_codegen = not_found +gdbus_codegen_error = '@0@ requires gdbus-codegen, please install libgio' +if not get_option('gio').auto() or have_system + gio = dependency('gio-2.0', required: get_option('gio'), + method: 'pkg-config', kwargs: static_kwargs) + if gio.found() and not cc.links(''' + #include + int main(void) + { + g_dbus_proxy_new_sync(0, 0, 0, 0, 0, 0, 0, 0); + return 0; + }''', dependencies: [glib, gio]) + if get_option('gio').enabled() + error('The installed libgio is broken for static linking') + endif + gio = not_found + endif + if gio.found() + gdbus_codegen = find_program(gio.get_variable('gdbus_codegen'), + required: get_option('gio')) + gio_unix = dependency('gio-unix-2.0', required: get_option('gio'), + method: 'pkg-config', kwargs: static_kwargs) + gio = declare_dependency(dependencies: [gio, gio_unix], + version: gio.version()) + endif endif +if gdbus_codegen.found() and get_option('cfi') + gdbus_codegen = not_found + gdbus_codegen_error = '@0@ uses gdbus-codegen, which does not support control flow integrity' +endif + lttng = not_found -if 'CONFIG_TRACE_UST' in config_host - lttng = declare_dependency(link_args: config_host['LTTNG_UST_LIBS'].split()) +if 'ust' in get_option('trace_backends') + lttng = dependency('lttng-ust', required: true, version: '>= 2.1', + method: 'pkg-config', kwargs: static_kwargs) endif pixman = not_found if have_system or have_tools pixman = dependency('pixman-1', required: have_system, version:'>=0.21.8', method: 'pkg-config', kwargs: static_kwargs) endif -libaio = cc.find_library('aio', required: false) zlib = dependency('zlib', required: true, kwargs: static_kwargs) +libaio = not_found +if not get_option('linux_aio').auto() or have_block + libaio = cc.find_library('aio', has_headers: ['libaio.h'], + required: get_option('linux_aio'), + kwargs: static_kwargs) +endif + +linux_io_uring_test = ''' + #include + #include + + int main(void) { return 0; }''' + linux_io_uring = not_found if not get_option('linux_io_uring').auto() or have_block - linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'), + linux_io_uring = dependency('liburing', version: '>=0.3', + required: get_option('linux_io_uring'), method: 'pkg-config', kwargs: static_kwargs) + if not cc.links(linux_io_uring_test) + linux_io_uring = not_found + endif endif -libxml2 = not_found -if not get_option('libxml2').auto() or have_block - libxml2 = dependency('libxml-2.0', required: get_option('libxml2'), - method: 'pkg-config', kwargs: static_kwargs) -endif + libnfs = not_found if not get_option('libnfs').auto() or have_block libnfs = dependency('libnfs', version: '>=1.9.3', @@ -375,7 +612,7 @@ libattr_test = ''' libattr = not_found have_old_libattr = false -if not get_option('attr').disabled() +if get_option('attr').allowed() if cc.links(libattr_test) libattr = declare_dependency() else @@ -396,19 +633,32 @@ if not get_option('attr').disabled() endif endif -cocoa = dependency('appleframeworks', modules: 'Cocoa', required: get_option('cocoa')) -if cocoa.found() and get_option('sdl').enabled() - error('Cocoa and SDL cannot be enabled at the same time') -endif -if cocoa.found() and get_option('gtk').enabled() - error('Cocoa and GTK+ cannot be enabled at the same time') +cocoa = dependency('appleframeworks', modules: ['Cocoa', 'CoreVideo'], + required: get_option('cocoa')) + +vmnet = dependency('appleframeworks', modules: 'vmnet', required: get_option('vmnet')) +if vmnet.found() and not cc.has_header_symbol('vmnet/vmnet.h', + 'VMNET_BRIDGED_MODE', + dependencies: vmnet) + vmnet = not_found + if get_option('vmnet').enabled() + error('vmnet.framework API is outdated') + else + warning('vmnet.framework API is outdated, disabling') + endif endif seccomp = not_found +seccomp_has_sysrawrc = false if not get_option('seccomp').auto() or have_system or have_tools seccomp = dependency('libseccomp', version: '>=2.3.0', required: get_option('seccomp'), method: 'pkg-config', kwargs: static_kwargs) + if seccomp.found() + seccomp_has_sysrawrc = cc.has_header_symbol('seccomp.h', + 'SCMP_FLTATR_API_SYSRAWRC', + dependencies: seccomp) + endif endif libcap_ng = not_found @@ -438,43 +688,87 @@ else xkbcommon = dependency('xkbcommon', required: get_option('xkbcommon'), method: 'pkg-config', kwargs: static_kwargs) endif -vde = not_found -if config_host.has_key('CONFIG_VDE') - vde = declare_dependency(link_args: config_host['VDE_LIBS'].split()) -endif -pulse = not_found -if 'CONFIG_LIBPULSE' in config_host - pulse = declare_dependency(compile_args: config_host['PULSE_CFLAGS'].split(), - link_args: config_host['PULSE_LIBS'].split()) -endif -alsa = not_found -if 'CONFIG_ALSA' in config_host - alsa = declare_dependency(compile_args: config_host['ALSA_CFLAGS'].split(), - link_args: config_host['ALSA_LIBS'].split()) -endif -jack = not_found -if 'CONFIG_LIBJACK' in config_host - jack = declare_dependency(link_args: config_host['JACK_LIBS'].split()) -endif -spice = not_found -spice_headers = not_found -spice_protocol = not_found -if 'CONFIG_SPICE' in config_host - spice = declare_dependency(compile_args: config_host['SPICE_CFLAGS'].split(), - link_args: config_host['SPICE_LIBS'].split()) - spice_headers = declare_dependency(compile_args: config_host['SPICE_CFLAGS'].split()) -endif -if 'CONFIG_SPICE_PROTOCOL' in config_host - spice_protocol = declare_dependency(compile_args: config_host['SPICE_PROTOCOL_CFLAGS'].split()) -endif -rt = cc.find_library('rt', required: false) -libdl = not_found -if 'CONFIG_PLUGIN' in config_host or get_option('renderdoc').enabled() - libdl = cc.find_library('dl', required: false) - if targetos != 'windows' and not cc.has_function('dlopen', dependencies: libdl) - error('dlopen not found') + +slirp = not_found +if not get_option('slirp').auto() or have_system + slirp = dependency('slirp', required: get_option('slirp'), + method: 'pkg-config', kwargs: static_kwargs) + # slirp < 4.7 is incompatible with CFI support in QEMU. This is because + # it passes function pointers within libslirp as callbacks for timers. + # When using a system-wide shared libslirp, the type information for the + # callback is missing and the timer call produces a false positive with CFI. + # Do not use the "version" keyword argument to produce a better error. + # with control-flow integrity. + if get_option('cfi') and slirp.found() and slirp.version().version_compare('<4.7') + if get_option('slirp').enabled() + error('Control-Flow Integrity requires libslirp 4.7.') + else + warning('Cannot use libslirp since Control-Flow Integrity requires libslirp >= 4.7.') + slirp = not_found + endif endif endif + +vde = not_found +if not get_option('vde').auto() or have_system or have_tools + vde = cc.find_library('vdeplug', has_headers: ['libvdeplug.h'], + required: get_option('vde'), + kwargs: static_kwargs) +endif +if vde.found() and not cc.links(''' + #include + int main(void) + { + struct vde_open_args a = {0, 0, 0}; + char s[] = ""; + vde_open(s, s, &a); + return 0; + }''', dependencies: vde) + vde = not_found + if get_option('cap_ng').enabled() + error('could not link libvdeplug') + else + warning('could not link libvdeplug, disabling') + endif +endif + +pulse = not_found +if not get_option('pa').auto() or (targetos == 'linux' and have_system) + pulse = dependency('libpulse', required: get_option('pa'), + method: 'pkg-config', kwargs: static_kwargs) +endif +alsa = not_found +if not get_option('alsa').auto() or (targetos == 'linux' and have_system) + alsa = dependency('alsa', required: get_option('alsa'), + method: 'pkg-config', kwargs: static_kwargs) +endif +jack = not_found +if not get_option('jack').auto() or have_system + jack = dependency('jack', required: get_option('jack'), + method: 'pkg-config', kwargs: static_kwargs) +endif +sndio = not_found +if not get_option('sndio').auto() or have_system + sndio = dependency('sndio', required: get_option('sndio'), + method: 'pkg-config', kwargs: static_kwargs) +endif + +spice_protocol = not_found +if not get_option('spice_protocol').auto() or have_system + spice_protocol = dependency('spice-protocol', version: '>=0.12.3', + required: get_option('spice_protocol'), + method: 'pkg-config', kwargs: static_kwargs) +endif +spice = not_found +if not get_option('spice').auto() or have_system + spice = dependency('spice-server', version: '>=0.12.5', + required: get_option('spice'), + method: 'pkg-config', kwargs: static_kwargs) +endif +spice_headers = spice.partial_dependency(compile_args: true, includes: true) + +rt = cc.find_library('rt', required: false) + libiscsi = not_found if not get_option('libiscsi').auto() or have_block libiscsi = dependency('libiscsi', version: '>=1.9.0', @@ -487,18 +781,22 @@ if not get_option('zstd').auto() or have_block required: get_option('zstd'), method: 'pkg-config', kwargs: static_kwargs) endif -gbm = not_found -if 'CONFIG_GBM' in config_host - gbm = declare_dependency(compile_args: config_host['GBM_CFLAGS'].split(), - link_args: config_host['GBM_LIBS'].split()) -endif virgl = not_found -if not get_option('virglrenderer').auto() or have_system + +have_vhost_user_gpu = have_tools and targetos == 'linux' and pixman.found() +if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu virgl = dependency('virglrenderer', method: 'pkg-config', required: get_option('virglrenderer'), kwargs: static_kwargs) endif +blkio = not_found +if not get_option('blkio').auto() or have_block + blkio = dependency('blkio', + method: 'pkg-config', + required: get_option('blkio'), + kwargs: static_kwargs) +endif curl = not_found if not get_option('curl').auto() or have_block curl = dependency('libcurl', version: '>=7.29.0', @@ -517,7 +815,7 @@ endif mpathlibs = [libudev] mpathpersist = not_found mpathpersist_new_api = false -if targetos == 'linux' and have_tools and not get_option('mpath').disabled() +if targetos == 'linux' and have_tools and get_option('mpath').allowed() mpath_test_source_new = ''' #include #include @@ -586,8 +884,11 @@ endif iconv = not_found curses = not_found -if have_system and not get_option('curses').disabled() +if have_system and get_option('curses').allowed() curses_test = ''' + #if defined(__APPLE__) || defined(__OpenBSD__) + #define _XOPEN_SOURCE_EXTENDED 1 + #endif #include #include #include @@ -602,16 +903,12 @@ if have_system and not get_option('curses').disabled() }''' curses_dep_list = targetos == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] - foreach curses_dep : curses_dep_list - if not curses.found() - curses = dependency(curses_dep, - required: false, - method: 'pkg-config', - kwargs: static_kwargs) - endif - endforeach + curses = dependency(curses_dep_list, + required: false, + method: 'pkg-config', + kwargs: static_kwargs) msg = get_option('curses').enabled() ? 'curses library not found' : '' - curses_compile_args = ['-DNCURSES_WIDECHAR'] + curses_compile_args = ['-DNCURSES_WIDECHAR=1'] if curses.found() if cc.links(curses_test, args: curses_compile_args, dependencies: [curses]) curses = declare_dependency(compile_args: curses_compile_args, dependencies: [curses]) @@ -645,7 +942,7 @@ if have_system and not get_option('curses').disabled() endforeach endif endif - if not get_option('iconv').disabled() + if get_option('iconv').allowed() foreach link_args : [ ['-liconv'], [] ] # Programs will be linked with glib and this will bring in libiconv on FreeBSD. # We need to use libiconv if available because mixing libiconv's headers with @@ -698,7 +995,7 @@ if not get_option('brlapi').auto() or have_system endif sdl = not_found -if not get_option('sdl').auto() or (have_system and not cocoa.found()) +if not get_option('sdl').auto() or have_system sdl = dependency('sdl2', required: get_option('sdl'), kwargs: static_kwargs) sdl_image = not_found endif @@ -780,11 +1077,15 @@ if not get_option('glusterfs').auto() or have_block ''', dependencies: glusterfs) endif endif + libssh = not_found -if 'CONFIG_LIBSSH' in config_host - libssh = declare_dependency(compile_args: config_host['LIBSSH_CFLAGS'].split(), - link_args: config_host['LIBSSH_LIBS'].split()) +if not get_option('libssh').auto() or have_block + libssh = dependency('libssh', version: '>=0.8.7', + method: 'pkg-config', + required: get_option('libssh'), + kwargs: static_kwargs) endif + libbzip2 = not_found if not get_option('bzip2').auto() or have_block libbzip2 = cc.find_library('bz2', has_headers: ['bzlib.h'], @@ -820,23 +1121,65 @@ if liblzfse.found() and not cc.links(''' endif oss = not_found -if 'CONFIG_AUDIO_OSS' in config_host - oss = declare_dependency(link_args: config_host['OSS_LIBS'].split()) +if get_option('oss').allowed() and have_system + if not cc.has_header('sys/soundcard.h') + # not found + elif targetos == 'netbsd' + oss = cc.find_library('ossaudio', required: get_option('oss'), + kwargs: static_kwargs) + else + oss = declare_dependency() + endif + + if not oss.found() + if get_option('oss').enabled() + error('OSS not found') + endif + endif endif dsound = not_found -if 'CONFIG_AUDIO_DSOUND' in config_host - dsound = declare_dependency(link_args: config_host['DSOUND_LIBS'].split()) +if not get_option('dsound').auto() or (targetos == 'windows' and have_system) + if cc.has_header('dsound.h') + dsound = declare_dependency(link_args: ['-lole32', '-ldxguid']) + endif + + if not dsound.found() + if get_option('dsound').enabled() + error('DirectSound not found') + endif + endif endif + coreaudio = not_found -if 'CONFIG_AUDIO_COREAUDIO' in config_host - coreaudio = declare_dependency(link_args: config_host['COREAUDIO_LIBS'].split()) +if not get_option('coreaudio').auto() or (targetos == 'darwin' and have_system) + coreaudio = dependency('appleframeworks', modules: 'CoreAudio', + required: get_option('coreaudio')) endif + opengl = not_found -if 'CONFIG_OPENGL' in config_host - opengl = declare_dependency(compile_args: config_host['OPENGL_CFLAGS'].split(), - link_args: config_host['OPENGL_LIBS'].split()) +if not get_option('opengl').auto() or have_system or have_vhost_user_gpu + # FIXME: Use meson's 'gl' dep + if targetos == 'darwin' + opengl_libs=['-framework', 'OpenGL'] + elif targetos == 'windows' + opengl_libs=['-lopengl32', '-lgdi32'] + elif targetos == 'linux' + opengl_libs=['-lGLU', '-lGL'] + else + error('Unknown GL platform') + endif + + opengl = declare_dependency(compile_args: config_host['EPOXY_CFLAGS'].split(), + link_args: config_host['EPOXY_LIBS'].split() + opengl_libs) endif +gbm = not_found +if (have_system or have_tools) and (virgl.found() or opengl.found()) + gbm = dependency('gbm', method: 'pkg-config', required: false, + kwargs: static_kwargs) +endif +have_vhost_user_gpu = have_vhost_user_gpu and virgl.found() and opengl.found() and gbm.found() + tomllib = static_library('tomlpp', sources: files('toml.cpp'), include_directories: 'tomlplusplus/include') toml = declare_dependency(compile_args: ['-DTOML_HEADER_ONLY=0'], @@ -845,17 +1188,19 @@ toml = declare_dependency(compile_args: ['-DTOML_HEADER_ONLY=0'], genconfig = declare_dependency(include_directories: 'genconfig') -xemu_gtk = declare_dependency(compile_args: config_host['XEMU_GTK_CFLAGS'].split(), - link_args: config_host['XEMU_GTK_LIBS'].split()) +openssl = dependency('openssl', method: 'pkg-config', required: true, + kwargs: static_kwargs) -openssl = declare_dependency(compile_args: config_host['OPENSSL_CFLAGS'].split(), - link_args: config_host['OPENSSL_LIBS'].split()) +if targetos == 'windows' + libpcap = declare_dependency(include_directories: 'winpcap-loader/include', + link_args: ['-lws2_32']) +else + libpcap = dependency('libpcap', method: 'pkg-config', required: true, + kwargs: static_kwargs) +endif -libpcap = declare_dependency(compile_args: config_host['LIBPCAP_CFLAGS'].split(), - link_args: config_host['LIBPCAP_LIBS'].split()) - -libsamplerate = declare_dependency(compile_args: config_host['LIBSAMPLERATE_CFLAGS'].split(), - link_args: config_host['LIBSAMPLERATE_LIBS'].split()) +libsamplerate = dependency('samplerate', method: 'pkg-config', required: true, + kwargs: static_kwargs) gnutls = not_found gnutls_crypto = not_found @@ -895,6 +1240,7 @@ endif # gcrypt over nettle for performance reasons. gcrypt = not_found nettle = not_found +hogweed = not_found xts = 'none' if get_option('nettle').enabled() and get_option('gcrypt').enabled() @@ -932,13 +1278,28 @@ if not gnutls_crypto.found() endif endif +gmp = dependency('gmp', required: false, method: 'pkg-config', kwargs: static_kwargs) +if nettle.found() and gmp.found() + hogweed = dependency('hogweed', version: '>=3.4', + method: 'pkg-config', + required: get_option('nettle'), + kwargs: static_kwargs) +endif + + gtk = not_found gtkx11 = not_found vte = not_found -if not get_option('gtk').auto() or (have_system and not cocoa.found()) +have_gtk_clipboard = get_option('gtk_clipboard').enabled() + +# xemu: GTK is required in Linux builds for file selection interfaces. In the +# future, this may be relaxed in the future with alternative options. +require_gtk = targetos == 'linux' + +if require_gtk gtk = dependency('gtk+-3.0', version: '>=3.22.0', method: 'pkg-config', - required: get_option('gtk'), + required: require_gtk, kwargs: static_kwargs) if gtk.found() gtkx11 = dependency('gtk+-x11-3.0', version: '>=3.22.0', @@ -953,6 +1314,8 @@ if not get_option('gtk').auto() or (have_system and not cocoa.found()) required: get_option('vte'), kwargs: static_kwargs) endif + elif have_gtk_clipboard + error('GTK clipboard requested, but GTK not found') endif endif @@ -961,14 +1324,16 @@ if gtkx11.found() x11 = dependency('x11', method: 'pkg-config', required: gtkx11.found(), kwargs: static_kwargs) endif -vnc = not_found png = not_found +if get_option('png').allowed() and have_system + png = dependency('libpng', version: '>=1.6.34', required: get_option('png'), + method: 'pkg-config', kwargs: static_kwargs) +endif +vnc = not_found jpeg = not_found sasl = not_found -if not get_option('vnc').disabled() +if get_option('vnc').allowed() and have_system vnc = declare_dependency() # dummy dependency - png = dependency('libpng', required: get_option('vnc_png'), - method: 'pkg-config', kwargs: static_kwargs) jpeg = dependency('libjpeg', required: get_option('vnc_jpeg'), method: 'pkg-config', kwargs: static_kwargs) sasl = cc.find_library('sasl2', has_headers: ['sasl/sasl.h'], @@ -1011,7 +1376,7 @@ if not get_option('snappy').auto() or have_system required: get_option('snappy'), kwargs: static_kwargs) endif -if snappy.found() and not cc.links(''' +if snappy.found() and not linker.links(''' #include int main(void) { snappy_max_compressed_length(4096); return 0; }''', dependencies: snappy) snappy = not_found @@ -1039,19 +1404,122 @@ if lzo.found() and not cc.links(''' endif endif -rdma = not_found -if 'CONFIG_RDMA' in config_host - rdma = declare_dependency(link_args: config_host['RDMA_LIBS'].split()) -endif numa = not_found -if 'CONFIG_NUMA' in config_host - numa = declare_dependency(link_args: config_host['NUMA_LIBS'].split()) +if not get_option('numa').auto() or have_system or have_tools + numa = cc.find_library('numa', has_headers: ['numa.h'], + required: get_option('numa'), + kwargs: static_kwargs) endif +if numa.found() and not cc.links(''' + #include + int main(void) { return numa_available(); } + ''', dependencies: numa) + numa = not_found + if get_option('numa').enabled() + error('could not link numa') + else + warning('could not link numa, disabling') + endif +endif + +rdma = not_found +if not get_option('rdma').auto() or have_system + libumad = cc.find_library('ibumad', required: get_option('rdma')) + rdma_libs = [cc.find_library('rdmacm', has_headers: ['rdma/rdma_cma.h'], + required: get_option('rdma'), + kwargs: static_kwargs), + cc.find_library('ibverbs', required: get_option('rdma'), + kwargs: static_kwargs), + libumad] + rdma = declare_dependency(dependencies: rdma_libs) + foreach lib: rdma_libs + if not lib.found() + rdma = not_found + endif + endforeach +endif + xen = not_found -if 'CONFIG_XEN_BACKEND' in config_host - xen = declare_dependency(compile_args: config_host['XEN_CFLAGS'].split(), - link_args: config_host['XEN_LIBS'].split()) +if get_option('xen').enabled() or (get_option('xen').auto() and have_system) + xencontrol = dependency('xencontrol', required: false, + method: 'pkg-config', kwargs: static_kwargs) + if xencontrol.found() + xen_pc = declare_dependency(version: xencontrol.version(), + dependencies: [ + xencontrol, + # disabler: true makes xen_pc.found() return false if any is not found + dependency('xenstore', required: false, + method: 'pkg-config', kwargs: static_kwargs, + disabler: true), + dependency('xenforeignmemory', required: false, + method: 'pkg-config', kwargs: static_kwargs, + disabler: true), + dependency('xengnttab', required: false, + method: 'pkg-config', kwargs: static_kwargs, + disabler: true), + dependency('xenevtchn', required: false, + method: 'pkg-config', kwargs: static_kwargs, + disabler: true), + dependency('xendevicemodel', required: false, + method: 'pkg-config', kwargs: static_kwargs, + disabler: true), + # optional, no "disabler: true" + dependency('xentoolcore', required: false, + method: 'pkg-config', kwargs: static_kwargs)]) + if xen_pc.found() + xen = xen_pc + endif + endif + if not xen.found() + xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1', '4.6.0', '4.5.0', '4.2.0' ] + xen_libs = { + '4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.6.0': [ 'xenstore', 'xenctrl' ], + '4.5.0': [ 'xenstore', 'xenctrl' ], + '4.2.0': [ 'xenstore', 'xenctrl' ], + } + xen_deps = {} + foreach ver: xen_tests + # cache the various library tests to avoid polluting the logs + xen_test_deps = [] + foreach l: xen_libs[ver] + if l not in xen_deps + xen_deps += { l: cc.find_library(l, required: false) } + endif + xen_test_deps += xen_deps[l] + endforeach + + # Use -D to pick just one of the test programs in scripts/xen-detect.c + xen_version = ver.split('.') + xen_ctrl_version = xen_version[0] + \ + ('0' + xen_version[1]).substring(-2) + \ + ('0' + xen_version[2]).substring(-2) + if cc.links(files('scripts/xen-detect.c'), + args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version, + dependencies: xen_test_deps) + xen = declare_dependency(version: ver, dependencies: xen_test_deps) + break + endif + endforeach + endif + if xen.found() + accelerators += 'CONFIG_XEN' + elif get_option('xen').enabled() + error('could not compile and link Xen test program') + endif endif +have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ + .require(xen.found(), + error_message: 'Xen PCI passthrough requested but Xen not enabled') \ + .require(targetos == 'linux', + error_message: 'Xen PCI passthrough not available on this platform') \ + .allowed() + + cacard = not_found if not get_option('smartcard').auto() or have_system cacard = dependency('libcacard', required: get_option('smartcard'), @@ -1064,6 +1532,12 @@ if have_system method: 'pkg-config', kwargs: static_kwargs) endif +canokey = not_found +if have_system + canokey = dependency('canokey-qemu', required: get_option('canokey'), + method: 'pkg-config', + kwargs: static_kwargs) +endif usbredir = not_found if not get_option('usb_redir').auto() or have_system usbredir = dependency('libusbredirparser-0.5', required: get_option('usb_redir'), @@ -1099,12 +1573,17 @@ keyutils = dependency('libkeyutils', required: false, has_gettid = cc.has_function('gettid') +# libselinux +selinux = dependency('libselinux', + required: get_option('selinux'), + method: 'pkg-config', kwargs: static_kwargs) + # Malloc tests malloc = [] if get_option('malloc') == 'system' has_malloc_trim = \ - not get_option('malloc_trim').disabled() and \ + get_option('malloc_trim').allowed() and \ cc.links('''#include int main(void) { malloc_trim(0); return 0; }''') else @@ -1136,19 +1615,25 @@ statx_test = gnu_source_prefix + ''' has_statx = cc.links(statx_test) -have_vhost_user_blk_server = (targetos == 'linux' and - 'CONFIG_VHOST_USER' in config_host) +# Check whether statx() provides mount ID information -if get_option('vhost_user_blk_server').enabled() - if targetos != 'linux' - error('vhost_user_blk_server requires linux') - elif 'CONFIG_VHOST_USER' not in config_host - error('vhost_user_blk_server requires vhost-user support') - endif -elif get_option('vhost_user_blk_server').disabled() or not have_system - have_vhost_user_blk_server = false -endif +statx_mnt_id_test = gnu_source_prefix + ''' + #include + int main(void) { + struct statx statxbuf; + statx(0, "", 0, STATX_BASIC_STATS | STATX_MNT_ID, &statxbuf); + return statxbuf.stx_mnt_id; + }''' +has_statx_mnt_id = cc.links(statx_mnt_id_test) + +have_vhost_user_blk_server = get_option('vhost_user_blk_server') \ + .require(targetos == 'linux', + error_message: 'vhost_user_blk_server requires linux') \ + .require(have_vhost_user, + error_message: 'vhost_user_blk_server requires vhost-user support') \ + .disable_auto_if(not have_tools and not have_system) \ + .allowed() if get_option('fuse').disabled() and get_option('fuse_lseek').enabled() error('Cannot enable fuse-lseek while fuse is disabled') @@ -1159,7 +1644,7 @@ fuse = dependency('fuse3', required: get_option('fuse'), kwargs: static_kwargs) fuse_lseek = not_found -if not get_option('fuse_lseek').disabled() +if get_option('fuse_lseek').allowed() if fuse.version().version_compare('>=3.8') # Dummy dependency fuse_lseek = declare_dependency() @@ -1172,6 +1657,26 @@ if not get_option('fuse_lseek').disabled() endif endif +have_libvduse = (targetos == 'linux') +if get_option('libvduse').enabled() + if targetos != 'linux' + error('libvduse requires linux') + endif +elif get_option('libvduse').disabled() + have_libvduse = false +endif + +have_vduse_blk_export = (have_libvduse and targetos == 'linux') +if get_option('vduse_blk_export').enabled() + if targetos != 'linux' + error('vduse_blk_export requires linux') + elif not have_libvduse + error('vduse_blk_export requires libvduse support') + endif +elif get_option('vduse_blk_export').disabled() + have_vduse_blk_export = false +endif + # libbpf libbpf = dependency('libbpf', required: get_option('bpf'), method: 'pkg-config') if libbpf.found() and not cc.links(''' @@ -1189,6 +1694,50 @@ if libbpf.found() and not cc.links(''' endif endif +################# +# config-host.h # +################# + +audio_drivers_selected = [] +if have_system + audio_drivers_available = { + 'alsa': alsa.found(), + 'coreaudio': coreaudio.found(), + 'dsound': dsound.found(), + 'jack': jack.found(), + 'oss': oss.found(), + 'pa': pulse.found(), + 'sdl': sdl.found(), + 'sndio': sndio.found(), + } + foreach k, v: audio_drivers_available + config_host_data.set('CONFIG_AUDIO_' + k.to_upper(), v) + endforeach + + # Default to native drivers first, OSS second, SDL third + audio_drivers_priority = \ + [ 'pa', 'coreaudio', 'dsound', 'sndio', 'oss' ] + \ + (targetos == 'linux' ? [] : [ 'sdl' ]) + audio_drivers_default = [] + foreach k: audio_drivers_priority + if audio_drivers_available[k] + audio_drivers_default += k + endif + endforeach + + foreach k: get_option('audio_drv_list') + if k == 'default' + audio_drivers_selected += audio_drivers_default + elif not audio_drivers_available[k] + error('Audio driver "@0@" not available.'.format(k)) + else + audio_drivers_selected += k + endif + endforeach +endif +config_host_data.set('CONFIG_AUDIO_DRIVERS', + '"' + '", "'.join(audio_drivers_selected) + '", ') + if get_option('cfi') cfi_flags=[] # Check for dependency on LTO @@ -1234,44 +1783,63 @@ have_host_block_device = (targetos != 'darwin' or have_renderdoc = false if have_system and get_option('renderdoc').enabled() - if targetos == 'linux' and not libdl.found() - error('renderdoc required, but libdl was not found') - else - have_renderdoc = true - endif + have_renderdoc = true endif -################# -# config-host.h # -################# +dbus_display = get_option('dbus_display') \ + .require(gio.version().version_compare('>=2.64'), + error_message: '-display dbus requires glib>=2.64') \ + .require(gdbus_codegen.found(), + error_message: gdbus_codegen_error.format('-display dbus')) \ + .require(opengl.found() and gbm.found(), + error_message: '-display dbus requires epoxy/egl and gbm') \ + .allowed() -have_virtfs = (targetos == 'linux' and - have_system and - libattr.found() and - libcap_ng.found()) +have_virtfs = get_option('virtfs') \ + .require(targetos == 'linux' or targetos == 'darwin', + error_message: 'virtio-9p (virtfs) requires Linux or macOS') \ + .require(targetos == 'linux' or cc.has_function('pthread_fchdir_np'), + error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \ + .require(targetos == 'darwin' or (libattr.found() and libcap_ng.found()), + error_message: 'virtio-9p (virtfs) on Linux requires libcap-ng-devel and libattr-devel') \ + .disable_auto_if(not have_tools and not have_system) \ + .allowed() -have_virtfs_proxy_helper = have_virtfs and have_tools +have_virtfs_proxy_helper = targetos != 'darwin' and have_virtfs and have_tools -if get_option('virtfs').enabled() - if not have_virtfs - if targetos != 'linux' - error('virtio-9p (virtfs) requires Linux') - elif not libcap_ng.found() or not libattr.found() - error('virtio-9p (virtfs) requires libcap-ng-devel and libattr-devel') - elif not have_system - error('virtio-9p (virtfs) needs system emulation support') - endif - endif -elif get_option('virtfs').disabled() - have_virtfs = false +if get_option('block_drv_ro_whitelist') == '' + config_host_data.set('CONFIG_BDRV_RO_WHITELIST', '') +else + config_host_data.set('CONFIG_BDRV_RO_WHITELIST', + '"' + get_option('block_drv_ro_whitelist').replace(',', '", "') + '", ') +endif +if get_option('block_drv_rw_whitelist') == '' + config_host_data.set('CONFIG_BDRV_RW_WHITELIST', '') +else + config_host_data.set('CONFIG_BDRV_RW_WHITELIST', + '"' + get_option('block_drv_rw_whitelist').replace(',', '", "') + '", ') endif +foreach k : get_option('trace_backends') + config_host_data.set('CONFIG_TRACE_' + k.to_upper(), true) +endforeach +config_host_data.set_quoted('CONFIG_TRACE_FILE', get_option('trace_file')) +config_host_data.set_quoted('CONFIG_TLS_PRIORITY', get_option('tls_priority')) +if iasl.found() + config_host_data.set_quoted('CONFIG_IASL', iasl.full_path()) +endif config_host_data.set_quoted('CONFIG_BINDIR', get_option('prefix') / get_option('bindir')) config_host_data.set_quoted('CONFIG_PREFIX', get_option('prefix')) config_host_data.set_quoted('CONFIG_QEMU_CONFDIR', get_option('prefix') / qemu_confdir) config_host_data.set_quoted('CONFIG_QEMU_DATADIR', get_option('prefix') / qemu_datadir) config_host_data.set_quoted('CONFIG_QEMU_DESKTOPDIR', get_option('prefix') / qemu_desktopdir) -config_host_data.set_quoted('CONFIG_QEMU_FIRMWAREPATH', get_option('qemu_firmwarepath')) + +qemu_firmwarepath = '' +foreach k : get_option('qemu_firmwarepath') + qemu_firmwarepath += '"' + get_option('prefix') / k + '", ' +endforeach +config_host_data.set('CONFIG_QEMU_FIRMWAREPATH', qemu_firmwarepath) + config_host_data.set_quoted('CONFIG_QEMU_HELPERDIR', get_option('prefix') / get_option('libexecdir')) config_host_data.set_quoted('CONFIG_QEMU_ICONDIR', get_option('prefix') / qemu_icondir) config_host_data.set_quoted('CONFIG_QEMU_LOCALEDIR', get_option('prefix') / get_option('localedir')) @@ -1279,15 +1847,47 @@ config_host_data.set_quoted('CONFIG_QEMU_LOCALSTATEDIR', get_option('prefix') / config_host_data.set_quoted('CONFIG_QEMU_MODDIR', get_option('prefix') / qemu_moddir) config_host_data.set_quoted('CONFIG_SYSCONFDIR', get_option('prefix') / get_option('sysconfdir')) +if config_host.has_key('CONFIG_MODULES') + config_host_data.set('CONFIG_STAMP', run_command( + meson.current_source_dir() / 'scripts/qemu-stamp.py', + meson.project_version(), get_option('pkgversion'), '--', + meson.current_source_dir() / 'configure', + capture: true, check: true).stdout().strip()) +endif + +have_slirp_smbd = get_option('slirp_smbd') \ + .require(targetos != 'windows', error_message: 'Host smbd not supported on this platform.') \ + .allowed() +if have_slirp_smbd + smbd_path = get_option('smbd') + if smbd_path == '' + smbd_path = (targetos == 'solaris' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd') + endif + config_host_data.set_quoted('CONFIG_SMBD_COMMAND', smbd_path) +endif + +config_host_data.set('HOST_' + host_arch.to_upper(), 1) + +if get_option('module_upgrades') and not enable_modules + error('Cannot enable module-upgrades as modules are not enabled') +endif +config_host_data.set('CONFIG_MODULE_UPGRADES', get_option('module_upgrades')) + config_host_data.set('CONFIG_ATTR', libattr.found()) +config_host_data.set('CONFIG_BDRV_WHITELIST_TOOLS', get_option('block_drv_whitelist_in_tools')) config_host_data.set('CONFIG_BRLAPI', brlapi.found()) config_host_data.set('CONFIG_COCOA', cocoa.found()) +config_host_data.set('CONFIG_FUZZ', get_option('fuzzing')) +config_host_data.set('CONFIG_GCOV', get_option('b_coverage')) config_host_data.set('CONFIG_LIBUDEV', libudev.found()) config_host_data.set('CONFIG_LZO', lzo.found()) config_host_data.set('CONFIG_MPATH', mpathpersist.found()) config_host_data.set('CONFIG_MPATH_NEW_API', mpathpersist_new_api) +config_host_data.set('CONFIG_BLKIO', blkio.found()) config_host_data.set('CONFIG_CURL', curl.found()) config_host_data.set('CONFIG_CURSES', curses.found()) +config_host_data.set('CONFIG_GBM', gbm.found()) +config_host_data.set('CONFIG_GIO', gio.found()) config_host_data.set('CONFIG_GLUSTERFS', glusterfs.found()) if glusterfs.found() config_host_data.set('CONFIG_GLUSTERFS_XLATOR_OPT', glusterfs.version().version_compare('>=4')) @@ -1299,24 +1899,45 @@ if glusterfs.found() endif config_host_data.set('CONFIG_GTK', gtk.found()) config_host_data.set('CONFIG_VTE', vte.found()) +config_host_data.set('CONFIG_GTK_CLIPBOARD', have_gtk_clipboard) config_host_data.set('CONFIG_LIBATTR', have_old_libattr) config_host_data.set('CONFIG_LIBCAP_NG', libcap_ng.found()) config_host_data.set('CONFIG_EBPF', libbpf.found()) config_host_data.set('CONFIG_LIBDAXCTL', libdaxctl.found()) config_host_data.set('CONFIG_LIBISCSI', libiscsi.found()) config_host_data.set('CONFIG_LIBNFS', libnfs.found()) +config_host_data.set('CONFIG_LIBSSH', libssh.found()) +config_host_data.set('CONFIG_LINUX_AIO', libaio.found()) config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found()) config_host_data.set('CONFIG_LIBPMEM', libpmem.found()) +config_host_data.set('CONFIG_NUMA', numa.found()) +config_host_data.set('CONFIG_OPENGL', opengl.found()) +config_host_data.set('CONFIG_PROFILER', get_option('profiler')) config_host_data.set('CONFIG_RBD', rbd.found()) +config_host_data.set('CONFIG_RDMA', rdma.found()) config_host_data.set('CONFIG_SDL', sdl.found()) config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) config_host_data.set('CONFIG_SECCOMP', seccomp.found()) +if seccomp.found() + config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc) +endif config_host_data.set('CONFIG_SNAPPY', snappy.found()) +config_host_data.set('CONFIG_TPM', have_tpm) config_host_data.set('CONFIG_USB_LIBUSB', libusb.found()) +config_host_data.set('CONFIG_VDE', vde.found()) +config_host_data.set('CONFIG_VHOST_NET', have_vhost_net) +config_host_data.set('CONFIG_VHOST_NET_USER', have_vhost_net_user) +config_host_data.set('CONFIG_VHOST_NET_VDPA', have_vhost_net_vdpa) +config_host_data.set('CONFIG_VHOST_KERNEL', have_vhost_kernel) +config_host_data.set('CONFIG_VHOST_USER', have_vhost_user) +config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto) +config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa) +config_host_data.set('CONFIG_VMNET', vmnet.found()) config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server) +config_host_data.set('CONFIG_VDUSE_BLK_EXPORT', have_vduse_blk_export) +config_host_data.set('CONFIG_PNG', png.found()) config_host_data.set('CONFIG_VNC', vnc.found()) config_host_data.set('CONFIG_VNC_JPEG', jpeg.found()) -config_host_data.set('CONFIG_VNC_PNG', png.found()) config_host_data.set('CONFIG_VNC_SASL', sasl.found()) config_host_data.set('CONFIG_VIRTFS', have_virtfs) config_host_data.set('CONFIG_VTE', vte.found()) @@ -1325,24 +1946,59 @@ config_host_data.set('CONFIG_KEYUTILS', keyutils.found()) config_host_data.set('CONFIG_GETTID', has_gettid) config_host_data.set('CONFIG_GNUTLS', gnutls.found()) config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found()) +config_host_data.set('CONFIG_TASN1', tasn1.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) +config_host_data.set('CONFIG_HOGWEED', hogweed.found()) config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private') config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim) config_host_data.set('CONFIG_STATX', has_statx) +config_host_data.set('CONFIG_STATX_MNT_ID', has_statx_mnt_id) config_host_data.set('CONFIG_ZSTD', zstd.found()) config_host_data.set('CONFIG_FUSE', fuse.found()) config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found()) +config_host_data.set('CONFIG_SPICE_PROTOCOL', spice_protocol.found()) +if spice_protocol.found() +config_host_data.set('CONFIG_SPICE_PROTOCOL_MAJOR', spice_protocol.version().split('.')[0]) +config_host_data.set('CONFIG_SPICE_PROTOCOL_MINOR', spice_protocol.version().split('.')[1]) +config_host_data.set('CONFIG_SPICE_PROTOCOL_MICRO', spice_protocol.version().split('.')[2]) +endif +config_host_data.set('CONFIG_SPICE', spice.found()) config_host_data.set('CONFIG_X11', x11.found()) +config_host_data.set('CONFIG_DBUS_DISPLAY', dbus_display) config_host_data.set('CONFIG_CFI', get_option('cfi')) +config_host_data.set('CONFIG_SELINUX', selinux.found()) +config_host_data.set('CONFIG_XEN_BACKEND', xen.found()) +if xen.found() + # protect from xen.version() having less than three components + xen_version = xen.version().split('.') + ['0', '0'] + xen_ctrl_version = xen_version[0] + \ + ('0' + xen_version[1]).substring(-2) + \ + ('0' + xen_version[2]).substring(-2) + config_host_data.set('CONFIG_XEN_CTRL_INTERFACE_VERSION', xen_ctrl_version) +endif config_host_data.set('QEMU_VERSION', '"@0@"'.format(meson.project_version())) config_host_data.set('QEMU_VERSION_MAJOR', meson.project_version().split('.')[0]) config_host_data.set('QEMU_VERSION_MINOR', meson.project_version().split('.')[1]) config_host_data.set('QEMU_VERSION_MICRO', meson.project_version().split('.')[2]) +config_host_data.set_quoted('CONFIG_HOST_DSOSUF', host_dsosuf) config_host_data.set('HAVE_HOST_BLOCK_DEVICE', have_host_block_device) config_host_data.set('CONFIG_RENDERDOC', have_renderdoc) +have_coroutine_pool = get_option('coroutine_pool') +if get_option('debug_stack_usage') and have_coroutine_pool + message('Disabling coroutine pool to measure stack usage') + have_coroutine_pool = false +endif +config_host_data.set10('CONFIG_COROUTINE_POOL', have_coroutine_pool) +config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex')) +config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage')) +config_host_data.set('CONFIG_GPROF', get_option('gprof')) +config_host_data.set('CONFIG_LIVE_BLOCK_MIGRATION', get_option('live_block_migration').allowed()) +config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug')) +config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed()) + # has_header config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h')) config_host_data.set('CONFIG_LINUX_MAGIC_H', cc.has_header('linux/magic.h')) @@ -1353,34 +2009,54 @@ config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h')) config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h')) config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h')) config_host_data.set('HAVE_SYS_KCOV_H', cc.has_header('sys/kcov.h')) +if targetos == 'windows' + config_host_data.set('HAVE_AFUNIX_H', cc.has_header('afunix.h')) +endif # has_function +config_host_data.set('CONFIG_CLOSE_RANGE', cc.has_function('close_range')) config_host_data.set('CONFIG_ACCEPT4', cc.has_function('accept4')) config_host_data.set('CONFIG_CLOCK_ADJTIME', cc.has_function('clock_adjtime')) config_host_data.set('CONFIG_DUP3', cc.has_function('dup3')) config_host_data.set('CONFIG_FALLOCATE', cc.has_function('fallocate')) config_host_data.set('CONFIG_POSIX_FALLOCATE', cc.has_function('posix_fallocate')) -config_host_data.set('CONFIG_POSIX_MEMALIGN', cc.has_function('posix_memalign')) +# Note that we need to specify prefix: here to avoid incorrectly +# thinking that Windows has posix_memalign() +config_host_data.set('CONFIG_POSIX_MEMALIGN', cc.has_function('posix_memalign', prefix: '#include ')) +config_host_data.set('CONFIG_ALIGNED_MALLOC', cc.has_function('_aligned_malloc')) +config_host_data.set('CONFIG_VALLOC', cc.has_function('valloc')) +config_host_data.set('CONFIG_MEMALIGN', cc.has_function('memalign')) config_host_data.set('CONFIG_PPOLL', cc.has_function('ppoll')) config_host_data.set('CONFIG_PREADV', cc.has_function('preadv', prefix: '#include ')) -config_host_data.set('CONFIG_SEM_TIMEDWAIT', cc.has_function('sem_timedwait', dependencies: threads)) +config_host_data.set('CONFIG_PTHREAD_FCHDIR_NP', cc.has_function('pthread_fchdir_np')) config_host_data.set('CONFIG_SENDFILE', cc.has_function('sendfile')) config_host_data.set('CONFIG_SETNS', cc.has_function('setns') and cc.has_function('unshare')) config_host_data.set('CONFIG_SYNCFS', cc.has_function('syncfs')) config_host_data.set('CONFIG_SYNC_FILE_RANGE', cc.has_function('sync_file_range')) config_host_data.set('CONFIG_TIMERFD', cc.has_function('timerfd_create')) config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range')) +config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs')) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) +if rbd.found() + config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS', + cc.has_function('rbd_namespace_exists', + dependencies: rbd, + prefix: '#include ')) +endif +if rdma.found() + config_host_data.set('HAVE_IBV_ADVISE_MR', + cc.has_function('ibv_advise_mr', + dependencies: rdma, + prefix: '#include ')) +endif # has_header_symbol config_host_data.set('CONFIG_BYTESWAP_H', cc.has_header_symbol('byteswap.h', 'bswap_32')) config_host_data.set('CONFIG_EPOLL_CREATE1', cc.has_header_symbol('sys/epoll.h', 'epoll_create1')) -config_host_data.set('CONFIG_HAS_ENVIRON', - cc.has_header_symbol('unistd.h', 'environ', prefix: gnu_source_prefix)) config_host_data.set('CONFIG_FALLOCATE_PUNCH_HOLE', cc.has_header_symbol('linux/falloc.h', 'FALLOC_FL_PUNCH_HOLE') and cc.has_header_symbol('linux/falloc.h', 'FALLOC_FL_KEEP_SIZE')) @@ -1408,8 +2084,10 @@ config_host_data.set('CONFIG_SYSMACROS', cc.has_header_symbol('sys/sysmacros.h', 'makedev')) config_host_data.set('HAVE_OPTRESET', cc.has_header_symbol('getopt.h', 'optreset')) -config_host_data.set('HAVE_UTMPX', - cc.has_header_symbol('utmpx.h', 'struct utmpx')) +config_host_data.set('HAVE_IPPROTO_MPTCP', + cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP')) +config_host_data.set('HAVE_SYS_MOUNT_FSCONFIG', + cc.has_header_symbol('sys/mount.h', 'FSCONFIG_SET_FLAG')) # has_member config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID', @@ -1419,6 +2097,14 @@ config_host_data.set('HAVE_STRUCT_STAT_ST_ATIM', cc.has_member('struct stat', 'st_atim', prefix: '#include ')) +# has_type +config_host_data.set('CONFIG_IOVEC', + cc.has_type('struct iovec', + prefix: '#include ')) +config_host_data.set('HAVE_UTMPX', + cc.has_type('struct utmpx', + prefix: '#include ')) + config_host_data.set('CONFIG_EVENTFD', cc.links(''' #include int main(void) { return eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); }''')) @@ -1431,11 +2117,30 @@ config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + ''' #error Not supported #endif }''')) -config_host_data.set('CONFIG_MADVISE', cc.links(gnu_source_prefix + ''' + +has_madvise = cc.links(gnu_source_prefix + ''' #include #include #include - int main(void) { return madvise(NULL, 0, MADV_DONTNEED); }''')) + int main(void) { return madvise(NULL, 0, MADV_DONTNEED); }''') +missing_madvise_proto = false +if has_madvise + # Some platforms (illumos and Solaris before Solaris 11) provide madvise() + # but forget to prototype it. In this case, has_madvise will be true (the + # test program links despite a compile warning). To detect the + # missing-prototype case, we try again with a definitely-bogus prototype. + # This will only compile if the system headers don't provide the prototype; + # otherwise the conflicting prototypes will cause a compiler error. + missing_madvise_proto = cc.links(gnu_source_prefix + ''' + #include + #include + #include + extern int madvise(int); + int main(void) { return madvise(0); }''') +endif +config_host_data.set('CONFIG_MADVISE', has_madvise) +config_host_data.set('HAVE_MADVISE_WITHOUT_PROTOTYPE', missing_madvise_proto) + config_host_data.set('CONFIG_MEMFD', cc.links(gnu_source_prefix + ''' #include int main(void) { return memfd_create("foo", MFD_ALLOW_SEALING); }''')) @@ -1446,24 +2151,64 @@ config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + ''' #else int main(void) { struct file_handle fh; return open_by_handle_at(0, &fh, 0); } #endif''')) -config_host_data.set('CONFIG_PIPE2', cc.links(gnu_source_prefix + ''' - #include - #include - - int main(void) - { - int pipefd[2]; - return pipe2(pipefd, O_CLOEXEC); - }''')) config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(gnu_source_prefix + ''' #include #include int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); }''')) + +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_prefix + ''' + #include + + static void *f(void *p) { return NULL; } + int main(void) + { + pthread_t thread; + pthread_create(&thread, 0, f, 0); + pthread_setname_np(thread, "QEMU"); + return 0; + }''', dependencies: threads)) +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_prefix + ''' + #include + + static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; } + int main(void) + { + pthread_t thread; + pthread_create(&thread, 0, f, 0); + return 0; + }''', dependencies: threads)) +config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_prefix + ''' + #include + #include + + int main(void) + { + pthread_condattr_t attr + pthread_condattr_init(&attr); + pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + return 0; + }''', dependencies: threads)) +config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix + ''' + #include + + static void *f(void *p) { return NULL; } + int main(void) + { + int setsize = CPU_ALLOC_SIZE(64); + pthread_t thread; + cpu_set_t *cpuset; + pthread_create(&thread, 0, f, 0); + cpuset = CPU_ALLOC(64); + CPU_ZERO_S(setsize, cpuset); + pthread_setaffinity_np(thread, setsize, cpuset); + pthread_getaffinity_np(thread, setsize, cpuset); + CPU_FREE(cpuset); + return 0; + }''', dependencies: threads)) config_host_data.set('CONFIG_SIGNALFD', cc.links(gnu_source_prefix + ''' - #include - #include - #include - int main(void) { return syscall(SYS_signalfd, -1, NULL, _NSIG / 8); }''')) + #include + #include + int main(void) { return signalfd(-1, NULL, SFD_CLOEXEC); }''')) config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' #include #include @@ -1477,38 +2222,286 @@ config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' return 0; }''')) +config_host_data.set('HAVE_MLOCKALL', cc.links(gnu_source_prefix + ''' + #include + int main(void) { + return mlockall(MCL_FUTURE); + }''')) + +have_l2tpv3 = false +if get_option('l2tpv3').allowed() and have_system + have_l2tpv3 = cc.has_type('struct mmsghdr', + prefix: gnu_source_prefix + ''' + #include + #include ''') +endif +config_host_data.set('CONFIG_L2TPV3', have_l2tpv3) + +have_netmap = false +if get_option('netmap').allowed() and have_system + have_netmap = cc.compiles(''' + #include + #include + #include + #include + #if (NETMAP_API < 11) || (NETMAP_API > 15) + #error + #endif + int main(void) { return 0; }''') + if not have_netmap and get_option('netmap').enabled() + error('Netmap headers not available') + endif +endif +config_host_data.set('CONFIG_NETMAP', have_netmap) + +# Work around a system header bug with some kernel/XFS header +# versions where they both try to define 'struct fsxattr': +# xfs headers will not try to redefine structs from linux headers +# if this macro is set. +config_host_data.set('HAVE_FSXATTR', cc.links(''' + #include + struct fsxattr foo; + int main(void) { + return 0; + }''')) + # Some versions of Mac OS X incorrectly define SIZE_MAX config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' #include #include - int main(int argc, char *argv[]) { + int main(void) { return printf("%zu", SIZE_MAX); }''', args: ['-Werror'])) +atomic_test = ''' + #include + int main(void) + { + @0@ x = 0, y = 0; + y = __atomic_load_n(&x, __ATOMIC_RELAXED); + __atomic_store_n(&x, y, __ATOMIC_RELAXED); + __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); + __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); + return 0; + }''' + +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. +config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t'))) + +has_int128 = cc.links(''' + __int128_t a; + __uint128_t b; + int main (void) { + a = a + b; + b = a * b; + a = a * a; + return 0; + }''') + +config_host_data.set('CONFIG_INT128', has_int128) + +if has_int128 + # "do we have 128-bit atomics which are handled inline and specifically not + # via libatomic". The reason we can't use libatomic is documented in the + # comment starting "GCC is a house divided" in include/qemu/atomic128.h. + has_atomic128 = cc.links(atomic_test.format('unsigned __int128')) + + config_host_data.set('CONFIG_ATOMIC128', has_atomic128) + + if not has_atomic128 + has_cmpxchg128 = cc.links(''' + int main(void) + { + unsigned __int128 x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; + } + ''') + + config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128) + endif +endif + +config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + ''' + #include + int main(void) { + return getauxval(AT_HWCAP) == 0; + }''')) + +config_host_data.set('CONFIG_USBFS', have_linux_user and cc.compiles(''' + #include + + #ifndef USBDEVFS_GET_CAPABILITIES + #error "USBDEVFS_GET_CAPABILITIES undefined" + #endif + + #ifndef USBDEVFS_DISCONNECT_CLAIM + #error "USBDEVFS_DISCONNECT_CLAIM undefined" + #endif + + int main(void) { return 0; }''')) + +have_keyring = get_option('keyring') \ + .require(targetos == 'linux', error_message: 'keyring is only available on Linux') \ + .require(cc.compiles(''' + #include + #include + #include + #include + #include + int main(void) { + return syscall(__NR_keyctl, KEYCTL_READ, 0, NULL, NULL, 0); + }'''), error_message: 'keyctl syscall not available on this system').allowed() +config_host_data.set('CONFIG_SECRET_KEYRING', have_keyring) + +have_cpuid_h = cc.links(''' + #include + int main(void) { + unsigned a, b, c, d; + unsigned max = __get_cpuid_max(0, 0); + + if (max >= 1) { + __cpuid(1, a, b, c, d); + } + + if (max >= 7) { + __cpuid_count(7, 0, a, b, c, d); + } + + return 0; + }''') +config_host_data.set('CONFIG_CPUID_H', have_cpuid_h) + +config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \ + .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \ + .require(cc.links(''' + #pragma GCC push_options + #pragma GCC target("avx2") + #include + #include + static int bar(void *a) { + __m256i x = *(__m256i *)a; + return _mm256_testz_si256(x, x); + } + int main(int argc, char *argv[]) { return bar(argv[argc - 1]); } + '''), error_message: 'AVX2 not available').allowed()) + +config_host_data.set('CONFIG_AVX512F_OPT', get_option('avx512f') \ + .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512F') \ + .require(cc.links(''' + #pragma GCC push_options + #pragma GCC target("avx512f") + #include + #include + static int bar(void *a) { + __m512i x = *(__m512i *)a; + return _mm512_test_epi64_mask(x, x); + } + int main(int argc, char *argv[]) { return bar(argv[argc - 1]); } + '''), error_message: 'AVX512F not available').allowed()) + +have_pvrdma = get_option('pvrdma') \ + .require(rdma.found(), error_message: 'PVRDMA requires OpenFabrics libraries') \ + .require(cc.compiles(gnu_source_prefix + ''' + #include + int main(void) + { + char buf = 0; + void *addr = &buf; + addr = mremap(addr, 0, 1, MREMAP_MAYMOVE | MREMAP_FIXED); + + return 0; + }'''), error_message: 'PVRDMA requires mremap').allowed() + +if have_pvrdma + config_host_data.set('LEGACY_RDMA_REG_MR', not cc.links(''' + #include + int main(void) + { + struct ibv_mr *mr; + struct ibv_pd *pd = NULL; + size_t length = 10; + uint64_t iova = 0; + int access = 0; + void *addr = NULL; + + mr = ibv_reg_mr_iova(pd, addr, length, iova, access); + ibv_dereg_mr(mr); + return 0; + }''')) +endif + +if get_option('membarrier').disabled() + have_membarrier = false +elif targetos == 'windows' + have_membarrier = true +elif targetos == 'linux' + have_membarrier = cc.compiles(''' + #include + #include + #include + #include + int main(void) { + syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0); + syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0); + exit(0); + }''') +endif +config_host_data.set('CONFIG_MEMBARRIER', get_option('membarrier') \ + .require(have_membarrier, error_message: 'membarrier system call not available') \ + .allowed()) + +have_afalg = get_option('crypto_afalg') \ + .require(cc.compiles(gnu_source_prefix + ''' + #include + #include + #include + #include + int main(void) { + int sock; + sock = socket(AF_ALG, SOCK_SEQPACKET, 0); + return sock; + } + '''), error_message: 'AF_ALG requested but could not be detected').allowed() +config_host_data.set('CONFIG_AF_ALG', have_afalg) + +config_host_data.set('CONFIG_AF_VSOCK', cc.has_header_symbol( + 'linux/vm_sockets.h', 'AF_VSOCK', + prefix: '#include ', +)) + +have_vss = false +have_vss_sdk = false # old xp/2003 SDK +if targetos == 'windows' and link_language == 'cpp' + have_vss = cxx.compiles(''' + #define __MIDL_user_allocate_free_DEFINED__ + #include + int main(void) { return VSS_CTX_BACKUP; }''') + have_vss_sdk = cxx.has_header('vscoordint.h') +endif +config_host_data.set('HAVE_VSS_SDK', have_vss_sdk) -ignored = ['CONFIG_QEMU_INTERP_PREFIX'] # actually per-target -arrays = ['CONFIG_AUDIO_DRIVERS', 'CONFIG_BDRV_RW_WHITELIST', 'CONFIG_BDRV_RO_WHITELIST'] -strings = ['HOST_DSOSUF', 'CONFIG_IASL'] foreach k, v: config_host - if ignored.contains(k) - # do nothing - elif arrays.contains(k) - if v != '' - v = '"' + '", "'.join(v.split()) + '", ' - endif - config_host_data.set(k, v) - elif k == 'ARCH' - config_host_data.set('HOST_' + v.to_upper(), 1) - elif strings.contains(k) - if not k.startswith('CONFIG_') - k = 'CONFIG_' + k.to_upper() - endif - config_host_data.set_quoted(k, v) - elif k.startswith('CONFIG_') or k.startswith('HAVE_') or k.startswith('HOST_') + if k.startswith('CONFIG_') config_host_data.set(k, v == 'y' ? 1 : v) endif endforeach +# Older versions of MinGW do not import _lock_file and _unlock_file properly. +# This was fixed for v6.0.0 with commit b48e3ac8969d. +if targetos == 'windows' + config_host_data.set('HAVE__LOCK_FILE', cc.links(''' + #include + int main(void) { + _lock_file(NULL); + _unlock_file(NULL); + return 0; + }''', name: '_lock_file and _unlock_file')) +endif + ######################## # Target configuration # ######################## @@ -1524,14 +2517,12 @@ config_target_mak = {} disassemblers = { 'alpha' : ['CONFIG_ALPHA_DIS'], - 'arm' : ['CONFIG_ARM_DIS'], 'avr' : ['CONFIG_AVR_DIS'], 'cris' : ['CONFIG_CRIS_DIS'], 'hexagon' : ['CONFIG_HEXAGON_DIS'], 'hppa' : ['CONFIG_HPPA_DIS'], 'i386' : ['CONFIG_I386_DIS'], 'x86_64' : ['CONFIG_I386_DIS'], - 'x32' : ['CONFIG_I386_DIS'], 'm68k' : ['CONFIG_M68K_DIS'], 'microblaze' : ['CONFIG_MICROBLAZE_DIS'], 'mips' : ['CONFIG_MIPS_DIS'], @@ -1544,29 +2535,30 @@ disassemblers = { 'sh4' : ['CONFIG_SH4_DIS'], 'sparc' : ['CONFIG_SPARC_DIS'], 'xtensa' : ['CONFIG_XTENSA_DIS'], + 'loongarch' : ['CONFIG_LOONGARCH_DIS'], } if link_language == 'cpp' disassemblers += { - 'aarch64' : [ 'CONFIG_ARM_A64_DIS'], - 'arm' : [ 'CONFIG_ARM_DIS', 'CONFIG_ARM_A64_DIS'], 'mips' : [ 'CONFIG_MIPS_DIS', 'CONFIG_NANOMIPS_DIS'], } endif have_ivshmem = config_host_data.get('CONFIG_EVENTFD') host_kconfig = \ - ('CONFIG_TPM' in config_host ? ['CONFIG_TPM=y'] : []) + \ - ('CONFIG_SPICE' in config_host ? ['CONFIG_SPICE=y'] : []) + \ + (get_option('fuzzing') ? ['CONFIG_FUZZ=y'] : []) + \ + (have_tpm ? ['CONFIG_TPM=y'] : []) + \ + (spice.found() ? ['CONFIG_SPICE=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ - ('CONFIG_OPENGL' in config_host ? ['CONFIG_OPENGL=y'] : []) + \ + (opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \ (x11.found() ? ['CONFIG_X11=y'] : []) + \ - ('CONFIG_VHOST_USER' in config_host ? ['CONFIG_VHOST_USER=y'] : []) + \ - ('CONFIG_VHOST_VDPA' in config_host ? ['CONFIG_VHOST_VDPA=y'] : []) + \ - ('CONFIG_VHOST_KERNEL' in config_host ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ + (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ + (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ + (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ (have_virtfs ? ['CONFIG_VIRTFS=y'] : []) + \ ('CONFIG_LINUX' in config_host ? ['CONFIG_LINUX=y'] : []) + \ - ('CONFIG_PVRDMA' in config_host ? ['CONFIG_PVRDMA=y'] : []) + \ - (multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + (have_pvrdma ? ['CONFIG_PVRDMA=y'] : []) + \ + (multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \ + (vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) ignored = [ 'TARGET_XML_FILES', 'TARGET_ABI_DIR', 'TARGET_ARCH' ] @@ -1598,7 +2590,7 @@ foreach target : target_dirs config_target += { 'CONFIG_USER_ONLY': 'y', 'CONFIG_QEMU_INTERP_PREFIX': - config_host['CONFIG_QEMU_INTERP_PREFIX'].format(config_target['TARGET_NAME']) + get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']) } endif @@ -1609,8 +2601,6 @@ foreach target : target_dirs config_all += { sym: 'y' } if sym == 'CONFIG_TCG' and tcg_arch == 'tci' config_target += { 'CONFIG_TCG_INTERPRETER': 'y' } - elif sym == 'CONFIG_XEN' and have_xen_pci_passthrough - config_target += { 'CONFIG_XEN_PCI_PASSTHROUGH': 'y' } endif if target in modular_tcg config_target += { 'CONFIG_TCG_MODULAR': 'y' } @@ -1642,9 +2632,12 @@ foreach target : target_dirs if 'TARGET_ABI_DIR' not in config_target config_target += {'TARGET_ABI_DIR': config_target['TARGET_ARCH']} endif + if 'TARGET_BIG_ENDIAN' not in config_target + config_target += {'TARGET_BIG_ENDIAN': 'n'} + endif foreach k, v: disassemblers - if config_host['ARCH'].startswith(k) or config_target['TARGET_BASE_ARCH'].startswith(k) + if host_arch.startswith(k) or config_target['TARGET_BASE_ARCH'].startswith(k) foreach sym: v config_target += { sym: 'y' } config_all_disas += { sym: 'y' } @@ -1666,10 +2659,14 @@ foreach target : target_dirs config_target_data.set_quoted(k, v) elif v == 'y' config_target_data.set(k, 1) + elif v == 'n' + config_target_data.set(k, 0) else config_target_data.set(k, v) endif endforeach + config_target_data.set('QEMU_ARCH', + 'QEMU_ARCH_' + config_target['TARGET_BASE_ARCH'].to_upper()) config_target_h += {target: configure_file(output: target + '-config-target.h', configuration: config_target_data)} @@ -1715,24 +2712,33 @@ config_all += config_all_devices config_all += config_host config_all += config_all_disas config_all += { - 'CONFIG_XEN': config_host.has_key('CONFIG_XEN_BACKEND'), + 'CONFIG_XEN': xen.found(), 'CONFIG_SOFTMMU': have_system, 'CONFIG_USER_ONLY': have_user, 'CONFIG_ALL': true, } +target_configs_h = [] +foreach target: target_dirs + target_configs_h += config_target_h[target] + target_configs_h += config_devices_h.get(target, []) +endforeach +genh += custom_target('config-poison.h', + input: [target_configs_h], + output: 'config-poison.h', + capture: true, + command: [find_program('scripts/make-config-poison.sh'), + target_configs_h]) + ############## # Submodules # ############## capstone = not_found -capstone_opt = get_option('capstone') -if capstone_opt in ['enabled', 'auto', 'system'] - have_internal = fs.exists(meson.current_source_dir() / 'capstone/Makefile') - capstone = dependency('capstone', version: '>=4.0', +if not get_option('capstone').auto() or have_system or have_user + capstone = dependency('capstone', version: '>=3.0.5', kwargs: static_kwargs, method: 'pkg-config', - required: capstone_opt == 'system' or - capstone_opt == 'enabled' and not have_internal) + required: get_option('capstone')) # Some versions of capstone have broken pkg-config file # that reports a wrong -I path, causing the #include to @@ -1741,209 +2747,30 @@ if capstone_opt in ['enabled', 'auto', 'system'] if capstone.found() and not cc.compiles('#include ', dependencies: [capstone]) capstone = not_found - if capstone_opt == 'system' - error('system capstone requested, it does not appear to work') + if get_option('capstone').enabled() + error('capstone requested, but it does not appear to work') endif endif - - if capstone.found() - capstone_opt = 'system' - elif have_internal - capstone_opt = 'internal' - else - capstone_opt = 'disabled' - endif -endif -if capstone_opt == 'internal' - capstone_data = configuration_data() - capstone_data.set('CAPSTONE_USE_SYS_DYN_MEM', '1') - - capstone_files = files( - 'capstone/cs.c', - 'capstone/MCInst.c', - 'capstone/MCInstrDesc.c', - 'capstone/MCRegisterInfo.c', - 'capstone/SStream.c', - 'capstone/utils.c' - ) - - if 'CONFIG_ARM_DIS' in config_all_disas - capstone_data.set('CAPSTONE_HAS_ARM', '1') - capstone_files += files( - 'capstone/arch/ARM/ARMDisassembler.c', - 'capstone/arch/ARM/ARMInstPrinter.c', - 'capstone/arch/ARM/ARMMapping.c', - 'capstone/arch/ARM/ARMModule.c' - ) - endif - - # FIXME: This config entry currently depends on a c++ compiler. - # Which is needed for building libvixl, but not for capstone. - if 'CONFIG_ARM_A64_DIS' in config_all_disas - capstone_data.set('CAPSTONE_HAS_ARM64', '1') - capstone_files += files( - 'capstone/arch/AArch64/AArch64BaseInfo.c', - 'capstone/arch/AArch64/AArch64Disassembler.c', - 'capstone/arch/AArch64/AArch64InstPrinter.c', - 'capstone/arch/AArch64/AArch64Mapping.c', - 'capstone/arch/AArch64/AArch64Module.c' - ) - endif - - if 'CONFIG_PPC_DIS' in config_all_disas - capstone_data.set('CAPSTONE_HAS_POWERPC', '1') - capstone_files += files( - 'capstone/arch/PowerPC/PPCDisassembler.c', - 'capstone/arch/PowerPC/PPCInstPrinter.c', - 'capstone/arch/PowerPC/PPCMapping.c', - 'capstone/arch/PowerPC/PPCModule.c' - ) - endif - - if 'CONFIG_S390_DIS' in config_all_disas - capstone_data.set('CAPSTONE_HAS_SYSZ', '1') - capstone_files += files( - 'capstone/arch/SystemZ/SystemZDisassembler.c', - 'capstone/arch/SystemZ/SystemZInstPrinter.c', - 'capstone/arch/SystemZ/SystemZMapping.c', - 'capstone/arch/SystemZ/SystemZModule.c', - 'capstone/arch/SystemZ/SystemZMCTargetDesc.c' - ) - endif - - if 'CONFIG_I386_DIS' in config_all_disas - capstone_data.set('CAPSTONE_HAS_X86', 1) - capstone_files += files( - 'capstone/arch/X86/X86Disassembler.c', - 'capstone/arch/X86/X86DisassemblerDecoder.c', - 'capstone/arch/X86/X86ATTInstPrinter.c', - 'capstone/arch/X86/X86IntelInstPrinter.c', - 'capstone/arch/X86/X86InstPrinterCommon.c', - 'capstone/arch/X86/X86Mapping.c', - 'capstone/arch/X86/X86Module.c' - ) - endif - - configure_file(output: 'capstone-defs.h', configuration: capstone_data) - - capstone_cargs = [ - # FIXME: There does not seem to be a way to completely replace the c_args - # that come from add_project_arguments() -- we can only add to them. - # So: disable all warnings with a big hammer. - '-Wno-error', '-w', - - # Include all configuration defines via a header file, which will wind up - # as a dependency on the object file, and thus changes here will result - # in a rebuild. - '-include', 'capstone-defs.h' - ] - - libcapstone = static_library('capstone', - build_by_default: false, - sources: capstone_files, - c_args: capstone_cargs, - include_directories: 'capstone/include') - capstone = declare_dependency(link_with: libcapstone, - include_directories: 'capstone/include/capstone') -endif - -slirp = not_found -slirp_opt = 'disabled' -if have_system - slirp_opt = get_option('slirp') - if slirp_opt in ['enabled', 'auto', 'system'] - have_internal = fs.exists(meson.current_source_dir() / 'slirp/meson.build') - slirp = dependency('slirp', kwargs: static_kwargs, - method: 'pkg-config', - required: slirp_opt == 'system' or - slirp_opt == 'enabled' and not have_internal) - if slirp.found() - slirp_opt = 'system' - elif have_internal - slirp_opt = 'internal' - else - slirp_opt = 'disabled' - endif - endif - if slirp_opt == 'internal' - slirp_deps = [] - if targetos == 'windows' - slirp_deps = cc.find_library('iphlpapi') - elif targetos == 'darwin' - slirp_deps = cc.find_library('resolv') - endif - slirp_conf = configuration_data() - slirp_conf.set('SLIRP_MAJOR_VERSION', meson.project_version().split('.')[0]) - slirp_conf.set('SLIRP_MINOR_VERSION', meson.project_version().split('.')[1]) - slirp_conf.set('SLIRP_MICRO_VERSION', meson.project_version().split('.')[2]) - slirp_conf.set_quoted('SLIRP_VERSION_STRING', meson.project_version()) - slirp_cargs = ['-DG_LOG_DOMAIN="Slirp"'] - slirp_files = [ - 'slirp/src/arp_table.c', - 'slirp/src/bootp.c', - 'slirp/src/cksum.c', - 'slirp/src/dhcpv6.c', - 'slirp/src/dnssearch.c', - 'slirp/src/if.c', - 'slirp/src/ip6_icmp.c', - 'slirp/src/ip6_input.c', - 'slirp/src/ip6_output.c', - 'slirp/src/ip_icmp.c', - 'slirp/src/ip_input.c', - 'slirp/src/ip_output.c', - 'slirp/src/mbuf.c', - 'slirp/src/misc.c', - 'slirp/src/ncsi.c', - 'slirp/src/ndp_table.c', - 'slirp/src/sbuf.c', - 'slirp/src/slirp.c', - 'slirp/src/socket.c', - 'slirp/src/state.c', - 'slirp/src/stream.c', - 'slirp/src/tcp_input.c', - 'slirp/src/tcp_output.c', - 'slirp/src/tcp_subr.c', - 'slirp/src/tcp_timer.c', - 'slirp/src/tftp.c', - 'slirp/src/udp.c', - 'slirp/src/udp6.c', - 'slirp/src/util.c', - 'slirp/src/version.c', - 'slirp/src/vmstate.c', - ] - - configure_file( - input : 'slirp/src/libslirp-version.h.in', - output : 'libslirp-version.h', - configuration: slirp_conf) - - slirp_inc = include_directories('slirp', 'slirp/src') - libslirp = static_library('slirp', - build_by_default: false, - sources: slirp_files, - c_args: slirp_cargs, - include_directories: slirp_inc) - slirp = declare_dependency(link_with: libslirp, - dependencies: slirp_deps, - include_directories: slirp_inc) - endif endif -# For CFI, we need to compile slirp as a static library together with qemu. -# This is because we register slirp functions as callbacks for QEMU Timers. -# When using a system-wide shared libslirp, the type information for the -# callback is missing and the timer call produces a false positive with CFI. -# -# Now that slirp_opt has been defined, check if the selected slirp is compatible -# with control-flow integrity. -if get_option('cfi') and slirp_opt == 'system' - error('Control-Flow Integrity is not compatible with system-wide slirp.' \ - + ' Please configure with --enable-slirp=git') +libvfio_user_dep = not_found +if have_system and vfio_user_server_allowed + have_internal = fs.exists(meson.current_source_dir() / 'subprojects/libvfio-user/meson.build') + + if not have_internal + error('libvfio-user source not found - please pull git submodule') + endif + + libvfio_user_proj = subproject('libvfio-user') + + libvfio_user_lib = libvfio_user_proj.get_variable('libvfio_user_dep') + + libvfio_user_dep = declare_dependency(dependencies: [libvfio_user_lib]) endif fdt = not_found -fdt_opt = get_option('fdt') if have_system + fdt_opt = get_option('fdt') if fdt_opt in ['enabled', 'auto', 'system'] have_internal = fs.exists(meson.current_source_dir() / 'dtc/libfdt/Makefile.libfdt') fdt = cc.find_library('fdt', kwargs: static_kwargs, @@ -1952,13 +2779,16 @@ if have_system if fdt.found() and cc.links(''' #include #include - int main(void) { fdt_check_full(NULL, 0); return 0; }''', + int main(void) { fdt_find_max_phandle(NULL, NULL); return 0; }''', dependencies: fdt) fdt_opt = 'system' + elif fdt_opt == 'system' + error('system libfdt requested, but it is too old (1.5.1 or newer required)') elif have_internal fdt_opt = 'internal' else fdt_opt = 'disabled' + fdt = not_found endif endif if fdt_opt == 'internal' @@ -1983,6 +2813,8 @@ if have_system fdt = declare_dependency(link_with: libfdt, include_directories: fdt_inc) endif +else + fdt_opt = 'disabled' endif if not fdt.found() and fdt_required.length() > 0 error('fdt not available but required by targets ' + ', '.join(fdt_required)) @@ -1999,30 +2831,30 @@ config_host_data.set('CONFIG_SLIRP', slirp.found()) genh += configure_file(output: 'config-host.h', configuration: config_host_data) hxtool = find_program('scripts/hxtool') -shaderinclude = find_program('scripts/shaderinclude.pl') +shaderinclude = find_program('scripts/shaderinclude.py') qapi_gen = find_program('scripts/qapi-gen.py') -qapi_gen_depends = [ meson.source_root() / 'scripts/qapi/__init__.py', - meson.source_root() / 'scripts/qapi/commands.py', - meson.source_root() / 'scripts/qapi/common.py', - meson.source_root() / 'scripts/qapi/error.py', - meson.source_root() / 'scripts/qapi/events.py', - meson.source_root() / 'scripts/qapi/expr.py', - meson.source_root() / 'scripts/qapi/gen.py', - meson.source_root() / 'scripts/qapi/introspect.py', - meson.source_root() / 'scripts/qapi/parser.py', - meson.source_root() / 'scripts/qapi/schema.py', - meson.source_root() / 'scripts/qapi/source.py', - meson.source_root() / 'scripts/qapi/types.py', - meson.source_root() / 'scripts/qapi/visit.py', - meson.source_root() / 'scripts/qapi/common.py', - meson.source_root() / 'scripts/qapi-gen.py' +qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py', + meson.current_source_dir() / 'scripts/qapi/commands.py', + meson.current_source_dir() / 'scripts/qapi/common.py', + meson.current_source_dir() / 'scripts/qapi/error.py', + meson.current_source_dir() / 'scripts/qapi/events.py', + meson.current_source_dir() / 'scripts/qapi/expr.py', + meson.current_source_dir() / 'scripts/qapi/gen.py', + meson.current_source_dir() / 'scripts/qapi/introspect.py', + meson.current_source_dir() / 'scripts/qapi/parser.py', + meson.current_source_dir() / 'scripts/qapi/schema.py', + meson.current_source_dir() / 'scripts/qapi/source.py', + meson.current_source_dir() / 'scripts/qapi/types.py', + meson.current_source_dir() / 'scripts/qapi/visit.py', + meson.current_source_dir() / 'scripts/qapi/common.py', + meson.current_source_dir() / 'scripts/qapi-gen.py' ] packfile = find_program('scripts/pack-file.py') tracetool = [ python, files('scripts/tracetool.py'), - '--backend=' + config_host['TRACE_BACKENDS'] + '--backend=' + ','.join(get_option('trace_backends')) ] tracetool_depends = files( 'scripts/tracetool/backend/log.py', @@ -2032,19 +2864,15 @@ tracetool_depends = files( 'scripts/tracetool/backend/simple.py', 'scripts/tracetool/backend/syslog.py', 'scripts/tracetool/backend/ust.py', - 'scripts/tracetool/format/tcg_h.py', 'scripts/tracetool/format/ust_events_c.py', 'scripts/tracetool/format/ust_events_h.py', 'scripts/tracetool/format/__init__.py', 'scripts/tracetool/format/d.py', - 'scripts/tracetool/format/tcg_helper_c.py', 'scripts/tracetool/format/simpletrace_stap.py', 'scripts/tracetool/format/c.py', 'scripts/tracetool/format/h.py', - 'scripts/tracetool/format/tcg_helper_h.py', 'scripts/tracetool/format/log_stap.py', 'scripts/tracetool/format/stap.py', - 'scripts/tracetool/format/tcg_helper_wrapper_h.py', 'scripts/tracetool/__init__.py', 'scripts/tracetool/transform.py', 'scripts/tracetool/vcpu.py' @@ -2052,7 +2880,7 @@ tracetool_depends = files( qemu_version_cmd = [find_program('scripts/qemu-version.sh'), meson.current_source_dir(), - config_host['PKGVERSION'], meson.project_version()] + get_option('pkgversion'), meson.project_version()] qemu_version = custom_target('qemu-version.h', output: 'qemu-version.h', command: qemu_version_cmd, @@ -2062,7 +2890,7 @@ qemu_version = custom_target('qemu-version.h', genconfig_cmd = [ python, files('genconfig/gen_config.py'), - meson.source_root() / 'config_spec.yml', 'xemu-config.h' + meson.current_source_dir() / 'config_spec.yml', 'xemu-config.h' ] xemu_config = custom_target('xemu-config.h', @@ -2103,12 +2931,11 @@ genh += hxdep authz_ss = ss.source_set() blockdev_ss = ss.source_set() block_ss = ss.source_set() -bsd_user_ss = ss.source_set() chardev_ss = ss.source_set() common_ss = ss.source_set() crypto_ss = ss.source_set() +hwcore_ss = ss.source_set() io_ss = ss.source_set() -linux_user_ss = ss.source_set() qmp_ss = ss.source_set() qom_ss = ss.source_set() softmmu_ss = ss.source_set() @@ -2142,10 +2969,14 @@ trace_events_subdirs = [ 'qom', 'monitor', 'util', + 'gdbstub', ] -if have_user +if have_linux_user trace_events_subdirs += [ 'linux-user' ] endif +if have_bsd_user + trace_events_subdirs += [ 'bsd-user' ] +endif if have_block trace_events_subdirs += [ 'authz', @@ -2174,7 +3005,6 @@ if have_system 'hw/char', 'hw/display', 'hw/dma', - 'hw/hppa', 'hw/hyperv', 'hw/i2c', 'hw/i386', @@ -2189,6 +3019,7 @@ if have_system 'hw/misc/macio', 'hw/net', 'hw/net/can', + 'hw/nubus', 'hw/nvme', 'hw/nvram', 'hw/pci', @@ -2200,6 +3031,7 @@ if have_system 'hw/s390x', 'hw/scsi', 'hw/sd', + 'hw/sh4', 'hw/sparc', 'hw/sparc64', 'hw/ssi', @@ -2226,10 +3058,12 @@ if have_system or have_user 'accel/tcg', 'hw/core', 'target/arm', + 'target/arm/hvf', 'target/hppa', 'target/i386', 'target/i386/kvm', 'target/mips/tcg', + 'target/nios2', 'target/ppc', 'target/riscv', 'target/s390x', @@ -2239,11 +3073,19 @@ if have_system or have_user endif vhost_user = not_found -if 'CONFIG_VHOST_USER' in config_host +if targetos == 'linux' and have_vhost_user libvhost_user = subproject('libvhost-user') vhost_user = libvhost_user.get_variable('vhost_user_dep') endif +libvduse = not_found +if have_libvduse + libvduse_proj = subproject('libvduse') + libvduse = libvduse_proj.get_variable('libvduse_dep') +endif + +# NOTE: the trace/ subdirectory needs the qapi_trace_events variable +# that is filled in by qapi/. subdir('qapi') subdir('qobject') subdir('stubs') @@ -2253,6 +3095,8 @@ subdir('qom') subdir('authz') subdir('crypto') subdir('ui') +subdir('hw') +subdir('gdbstub') subdir('data') subdir('winpcap-loader') @@ -2262,6 +3106,18 @@ if enable_modules modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO') endif +qom_ss = qom_ss.apply(config_host, strict: false) +libqom = static_library('qom', qom_ss.sources() + genh, + dependencies: [qom_ss.dependencies()], + name_suffix: 'fa') +qom = declare_dependency(link_whole: libqom) + +event_loop_base = files('event-loop-base.c') +event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh, + build_by_default: true) +event_loop_base = declare_dependency(link_whole: event_loop_base, + dependencies: [qom]) + stub_ss = stub_ss.apply(config_all, strict: false) util_ss.add_all(trace_ss) @@ -2271,7 +3127,8 @@ libqemuutil = static_library('qemuutil', include_directories: 'util/xxHash', dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman]) qemuutil = declare_dependency(link_with: libqemuutil, - sources: genh + version_res) + sources: genh + version_res, + dependencies: [event_loop_base]) if have_system or have_user decodetree = generator(find_program('scripts/decodetree.py'), @@ -2294,7 +3151,9 @@ if have_block 'job.c', 'qemu-io-cmds.c', )) - block_ss.add(when: 'CONFIG_REPLICATION', if_true: files('replication.c')) + if config_host_data.get('CONFIG_REPLICATION') + block_ss.add(files('replication.c')) + endif subdir('nbd') subdir('scsi') @@ -2323,7 +3182,7 @@ common_ss.add(genconfig) subdir('softmmu') common_ss.add(capstone) -specific_ss.add(files('cpu.c', 'disas.c', 'gdbstub.c'), capstone) +specific_ss.add(files('cpu.c', 'disas.c'), capstone) # Work around a gcc bug/misfeature wherein constant propagation looks # through an alias: @@ -2338,7 +3197,7 @@ if get_option('b_lto') if get_option('cfi') pagevary_flags += '-fno-sanitize=cfi-icall' endif - pagevary = static_library('page-vary-common', sources: pagevary, + pagevary = static_library('page-vary-common', sources: pagevary + genh, c_args: pagevary_flags) pagevary = declare_dependency(link_with: pagevary) endif @@ -2352,22 +3211,17 @@ subdir('monitor') subdir('net') subdir('replay') subdir('semihosting') -subdir('hw') subdir('tcg') subdir('fpu') subdir('accel') subdir('plugins') -subdir('bsd-user') -subdir('linux-user') subdir('ebpf') -common_ss.add(libbpf) +common_user_inc = [] -bsd_user_ss.add(files('gdbstub.c')) -specific_ss.add_all(when: 'CONFIG_BSD_USER', if_true: bsd_user_ss) - -linux_user_ss.add(files('gdbstub.c', 'thunk.c')) -specific_ss.add_all(when: 'CONFIG_LINUX_USER', if_true: linux_user_ss) +subdir('common-user') +subdir('bsd-user') +subdir('linux-user') # needed for fuzzing binaries subdir('tests/qtest/libqos') @@ -2391,6 +3245,10 @@ modinfo_files = [] block_mods = [] softmmu_mods = [] foreach d, list : modules + if not (d == 'block' ? have_block : have_system) + continue + endif + foreach m, module_ss : list if enable_modules and targetos != 'windows' module_ss = module_ss.apply(config_all, strict: false) @@ -2460,14 +3318,23 @@ foreach d, list : target_modules endforeach if enable_modules - modinfo_src = custom_target('modinfo.c', - output: 'modinfo.c', - input: modinfo_files, - command: [modinfo_generate, '@INPUT@'], - capture: true) - modinfo_lib = static_library('modinfo', modinfo_src) - modinfo_dep = declare_dependency(link_whole: modinfo_lib) - softmmu_ss.add(modinfo_dep) + foreach target : target_dirs + if target.endswith('-softmmu') + config_target = config_target_mak[target] + config_devices_mak = target + '-config-devices.mak' + modinfo_src = custom_target('modinfo-' + target + '.c', + output: 'modinfo-' + target + '.c', + input: modinfo_files, + command: [modinfo_generate, '--devices', config_devices_mak, '@INPUT@'], + capture: true) + + modinfo_lib = static_library('modinfo-' + target + '.c', modinfo_src) + modinfo_dep = declare_dependency(link_with: modinfo_lib) + + arch = config_target['TARGET_NAME'] == 'sparc64' ? 'sparc64' : config_target['TARGET_BASE_ARCH'] + hw_arch[arch].add(modinfo_dep) + endif + endforeach endif nm = find_program('nm') @@ -2481,13 +3348,6 @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms', capture: true, command: [undefsym, nm, '@INPUT@']) -qom_ss = qom_ss.apply(config_host, strict: false) -libqom = static_library('qom', qom_ss.sources() + genh, - dependencies: [qom_ss.dependencies()], - name_suffix: 'fa') - -qom = declare_dependency(link_whole: libqom) - authz_ss = authz_ss.apply(config_host, strict: false) libauthz = static_library('authz', authz_ss.sources() + genh, dependencies: [authz_ss.dependencies()], @@ -2540,7 +3400,7 @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh, build_by_default: false) blockdev = declare_dependency(link_whole: [libblockdev], - dependencies: [block]) + dependencies: [block, event_loop_base]) qmp_ss = qmp_ss.apply(config_host, strict: false) libqmp = static_library('qmp', qmp_ss.sources() + genh, @@ -2552,12 +3412,13 @@ qmp = declare_dependency(link_whole: [libqmp]) libchardev = static_library('chardev', chardev_ss.sources() + genh, name_suffix: 'fa', - dependencies: [gnutls], + dependencies: chardev_ss.dependencies(), build_by_default: false) chardev = declare_dependency(link_whole: libchardev) -libhwcore = static_library('hwcore', sources: hwcore_files + genh, +hwcore_ss = hwcore_ss.apply(config_host, strict: false) +libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh, name_suffix: 'fa', build_by_default: false) hwcore = declare_dependency(link_whole: libhwcore) @@ -2567,13 +3428,18 @@ common_ss.add(hwcore) # Targets # ########### +emulator_modules = [] foreach m : block_mods + softmmu_mods - shared_module(m.name(), + emulator_modules += shared_module(m.name(), + build_by_default: true, name_prefix: '', link_whole: m, install: true, install_dir: qemu_moddir) endforeach +if emulator_modules.length() > 0 + alias_target('modules', emulator_modules) +endif softmmu_ss.add(authz, blockdev, chardev, crypto, io, qmp) common_ss.add(qom, qemuutil) @@ -2585,17 +3451,22 @@ common_all = common_ss.apply(config_all, strict: false) common_all = static_library('common', build_by_default: false, sources: common_all.sources() + genh, + include_directories: common_user_inc, implicit_include_directories: false, dependencies: common_all.dependencies(), name_suffix: 'fa') feature_to_c = find_program('scripts/feature_to_c.sh') +if targetos == 'darwin' + entitlement = find_program('scripts/entitlement.sh') +endif + emulators = {} foreach target : target_dirs config_target = config_target_mak[target] target_name = config_target['TARGET_NAME'] - arch = config_target['TARGET_BASE_ARCH'] + target_base_arch = config_target['TARGET_BASE_ARCH'] arch_srcs = [config_target_h[target]] arch_deps = [] c_args = ['-DNEED_CPU_H', @@ -2609,13 +3480,12 @@ foreach target : target_dirs target_inc += include_directories('linux-headers', is_system: true) endif if target.endswith('-softmmu') - qemu_target_name = 'qemu-system-' + target_name target_type='system' - t = target_softmmu_arch[arch].apply(config_target, strict: false) + t = target_softmmu_arch[target_base_arch].apply(config_target, strict: false) arch_srcs += t.sources() arch_deps += t.dependencies() - hw_dir = target_name == 'sparc64' ? 'sparc64' : arch + hw_dir = target_name == 'sparc64' ? 'sparc64' : target_base_arch hw = hw_arch[hw_dir].apply(config_target, strict: false) arch_srcs += hw.sources() arch_deps += hw.dependencies() @@ -2625,18 +3495,21 @@ foreach target : target_dirs else abi = config_target['TARGET_ABI_DIR'] target_type='user' - qemu_target_name = 'qemu-' + target_name - if arch in target_user_arch - t = target_user_arch[arch].apply(config_target, strict: false) + target_inc += common_user_inc + if target_base_arch in target_user_arch + t = target_user_arch[target_base_arch].apply(config_target, strict: false) arch_srcs += t.sources() arch_deps += t.dependencies() endif if 'CONFIG_LINUX_USER' in config_target base_dir = 'linux-user' - target_inc += include_directories('linux-user/host/' / config_host['ARCH']) - else + endif + if 'CONFIG_BSD_USER' in config_target base_dir = 'bsd-user' - target_inc += include_directories('bsd-user/freebsd') + target_inc += include_directories('bsd-user/' / targetos) + target_inc += include_directories('bsd-user/host/' / host_arch) + dir = base_dir / abi + arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c') endif target_inc += include_directories( base_dir, @@ -2662,7 +3535,7 @@ foreach target : target_dirs arch_srcs += gdbstub_xml endif - t = target_arch[arch].apply(config_target, strict: false) + t = target_arch[target_base_arch].apply(config_target, strict: false) arch_srcs += t.sources() arch_deps += t.dependencies() @@ -2686,23 +3559,23 @@ foreach target : target_dirs if target.endswith('-softmmu') execs = [{ 'name': 'qemu-system-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': files('softmmu/main.c'), 'dependencies': [] }] if targetos == 'windows' and (sdl.found() or gtk.found()) execs += [{ 'name': 'qemu-system-' + target_name + 'w', - 'gui': true, + 'win_subsystem': 'windows', 'sources': files('softmmu/main.c'), 'dependencies': [] }] endif - if config_host.has_key('CONFIG_FUZZ') + if get_option('fuzzing') specific_fuzz = specific_fuzz_ss.apply(config_target, strict: false) execs += [{ 'name': 'qemu-fuzz-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': specific_fuzz.sources(), 'dependencies': specific_fuzz.dependencies(), }] @@ -2710,7 +3583,7 @@ foreach target : target_dirs else execs = [{ 'name': 'qemu-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': [], 'dependencies': [] }] @@ -2729,7 +3602,7 @@ foreach target : target_dirs link_language: link_language, link_depends: [block_syms, qemu_syms] + exe.get('link_depends', []), link_args: link_args, - gui_app: exe['gui']) + win_subsystem: exe['win_subsystem']) if targetos == 'darwin' icon = 'pc-bios/qemu.rsrc' @@ -2747,21 +3620,17 @@ foreach target : target_dirs emulators += {exe['name'] : custom_target(exe['name'], input: build_input, output: exe['name'], - command: [ - files('scripts/entitlement.sh'), - '@OUTPUT@', - '@INPUT@' - ]) + command: [entitlement, '@OUTPUT@', '@INPUT@']) } - meson.add_install_script('scripts/entitlement.sh', '--install', + meson.add_install_script(entitlement, '--install', get_option('bindir') / exe['name'], install_input) else emulators += {exe['name']: emulator} endif - if 'CONFIG_TRACE_SYSTEMTAP' in config_host + if stap.found() foreach stp: [ {'ext': '.stp-build', 'fmt': 'stap', 'bin': meson.current_build_dir() / exe['name'], 'install': false}, {'ext': '.stp', 'fmt': 'stap', 'bin': get_option('prefix') / get_option('bindir') / exe['name'], 'install': true}, @@ -2793,11 +3662,7 @@ if 'CONFIG_PLUGIN' in config_host install_headers('include/qemu/qemu-plugin.h') endif -if 'CONFIG_GUEST_AGENT' in config_host - subdir('qga') -elif get_option('guest_agent_msi').enabled() - error('Guest agent MSI requested, but the guest agent is not being built') -endif +subdir('qga') # Don't build qemu-keymap if xkbcommon is not explicitly enabled # when we don't build tools or system @@ -2813,7 +3678,8 @@ if have_tools qemu_io = executable('qemu-io', files('qemu-io.c'), dependencies: [block, qemuutil], install: true) qemu_nbd = executable('qemu-nbd', files('qemu-nbd.c'), - dependencies: [blockdev, qemuutil, gnutls], install: true) + dependencies: [blockdev, qemuutil, gnutls, selinux], + install: true) subdir('storage-daemon') subdir('contrib/rdmacm-mux') @@ -2823,7 +3689,7 @@ if have_tools dependencies: qemuutil, install: true) - if 'CONFIG_VHOST_USER' in config_host + if have_vhost_user subdir('contrib/vhost-user-blk') subdir('contrib/vhost-user-gpu') subdir('contrib/vhost-user-input') @@ -2863,6 +3729,7 @@ if host_machine.system() == 'windows' '@OUTPUT@', get_option('prefix'), meson.current_source_dir(), + config_host['GLIB_BINDIR'], host_machine.cpu(), '--', '-DDISPLAYVERSION=' + meson.project_version(), @@ -2890,20 +3757,21 @@ endif summary_info = {} summary_info += {'Install prefix': get_option('prefix')} summary_info += {'BIOS directory': qemu_datadir} -summary_info += {'firmware path': get_option('qemu_firmwarepath')} -summary_info += {'binary directory': get_option('bindir')} -summary_info += {'library directory': get_option('libdir')} +pathsep = targetos == 'windows' ? ';' : ':' +summary_info += {'firmware path': pathsep.join(get_option('qemu_firmwarepath'))} +summary_info += {'binary directory': get_option('prefix') / get_option('bindir')} +summary_info += {'library directory': get_option('prefix') / get_option('libdir')} summary_info += {'module directory': qemu_moddir} -summary_info += {'libexec directory': get_option('libexecdir')} -summary_info += {'include directory': get_option('includedir')} -summary_info += {'config directory': get_option('sysconfdir')} +summary_info += {'libexec directory': get_option('prefix') / get_option('libexecdir')} +summary_info += {'include directory': get_option('prefix') / get_option('includedir')} +summary_info += {'config directory': get_option('prefix') / get_option('sysconfdir')} if targetos != 'windows' - summary_info += {'local state directory': get_option('localstatedir')} - summary_info += {'Manual directory': get_option('mandir')} + summary_info += {'local state directory': get_option('prefix') / get_option('localstatedir')} + summary_info += {'Manual directory': get_option('prefix') / get_option('mandir')} else summary_info += {'local state directory': 'queried at runtime'} endif -summary_info += {'Doc directory': get_option('docdir')} +summary_info += {'Doc directory': get_option('prefix') / get_option('docdir')} summary_info += {'Build directory': meson.current_build_dir()} summary_info += {'Source path': meson.current_source_dir()} summary_info += {'GIT submodules': config_host['GIT_SUBMODULES']} @@ -2914,16 +3782,17 @@ summary_info = {} summary_info += {'git': config_host['GIT']} summary_info += {'make': config_host['MAKE']} summary_info += {'python': '@0@ (version: @1@)'.format(python.full_path(), python.language_version())} -summary_info += {'sphinx-build': sphinx_build.found()} +summary_info += {'sphinx-build': sphinx_build} if config_host.has_key('HAVE_GDB_BIN') summary_info += {'gdb': config_host['HAVE_GDB_BIN']} endif +summary_info += {'iasl': iasl} summary_info += {'genisoimage': config_host['GENISOIMAGE']} -if targetos == 'windows' and config_host.has_key('CONFIG_GUEST_AGENT') - summary_info += {'wixl': wixl.found() ? wixl.full_path() : false} +if targetos == 'windows' and have_ga + summary_info += {'wixl': wixl} endif -if slirp_opt != 'disabled' and 'CONFIG_SLIRP_SMBD' in config_host - summary_info += {'smbd': config_host['CONFIG_SMBD_COMMAND']} +if slirp.found() and have_system + summary_info += {'smbd': have_slirp_smbd ? smbd_path : false} endif summary(summary_info, bool_yn: true, section: 'Host binaries') @@ -2936,27 +3805,25 @@ summary_info += {'block layer': have_block} summary_info += {'Install blobs': get_option('install_blobs')} summary_info += {'module support': config_host.has_key('CONFIG_MODULES')} if config_host.has_key('CONFIG_MODULES') - summary_info += {'alternative module path': config_host.has_key('CONFIG_MODULE_UPGRADES')} + summary_info += {'alternative module path': get_option('module_upgrades')} endif -summary_info += {'fuzzing support': config_host.has_key('CONFIG_FUZZ')} +summary_info += {'fuzzing support': get_option('fuzzing')} if have_system - summary_info += {'Audio drivers': config_host['CONFIG_AUDIO_DRIVERS']} + summary_info += {'Audio drivers': ' '.join(audio_drivers_selected)} endif -summary_info += {'Trace backends': config_host['TRACE_BACKENDS']} -if config_host['TRACE_BACKENDS'].split().contains('simple') - summary_info += {'Trace output file': config_host['CONFIG_TRACE_FILE'] + '-'} +summary_info += {'Trace backends': ','.join(get_option('trace_backends'))} +if 'simple' in get_option('trace_backends') + summary_info += {'Trace output file': get_option('trace_file') + '-'} endif -summary_info += {'QOM debugging': config_host.has_key('CONFIG_QOM_CAST_DEBUG')} -summary_info += {'vhost-kernel support': config_host.has_key('CONFIG_VHOST_KERNEL')} -summary_info += {'vhost-net support': config_host.has_key('CONFIG_VHOST_NET')} -summary_info += {'vhost-crypto support': config_host.has_key('CONFIG_VHOST_CRYPTO')} -summary_info += {'vhost-scsi support': config_host.has_key('CONFIG_VHOST_SCSI')} -summary_info += {'vhost-vsock support': config_host.has_key('CONFIG_VHOST_VSOCK')} -summary_info += {'vhost-user support': config_host.has_key('CONFIG_VHOST_USER')} +summary_info += {'D-Bus display': dbus_display} +summary_info += {'QOM debugging': get_option('qom_cast_debug')} +summary_info += {'vhost-kernel support': have_vhost_kernel} +summary_info += {'vhost-net support': have_vhost_net} +summary_info += {'vhost-user support': have_vhost_user} +summary_info += {'vhost-user-crypto support': have_vhost_user_crypto} summary_info += {'vhost-user-blk server support': have_vhost_user_blk_server} -summary_info += {'vhost-user-fs support': config_host.has_key('CONFIG_VHOST_USER_FS')} -summary_info += {'vhost-vdpa support': config_host.has_key('CONFIG_VHOST_VDPA')} -summary_info += {'build guest agent': config_host.has_key('CONFIG_GUEST_AGENT')} +summary_info += {'vhost-vdpa support': have_vhost_vdpa} +summary_info += {'build guest agent': have_ga} summary(summary_info, bool_yn: true, section: 'Configurable features') # Compilation information @@ -2973,11 +3840,6 @@ endif if targetos == 'darwin' summary_info += {'Objective-C compiler': ' '.join(meson.get_compiler('objc').cmd_array())} endif -if targetos == 'windows' - if 'WIN_SDK' in config_host - summary_info += {'Windows SDK': config_host['WIN_SDK']} - endif -endif summary_info += {'CFLAGS': ' '.join(get_option('c_args') + ['-O' + get_option('optimization')] + (get_option('debug') ? ['-g'] : []))} @@ -2986,24 +3848,31 @@ if link_language == 'cpp' + ['-O' + get_option('optimization')] + (get_option('debug') ? ['-g'] : []))} endif +if targetos == 'darwin' + summary_info += {'OBJCFLAGS': ' '.join(get_option('objc_args') + + ['-O' + get_option('optimization')] + + (get_option('debug') ? ['-g'] : []))} +endif link_args = get_option(link_language + '_link_args') if link_args.length() > 0 summary_info += {'LDFLAGS': ' '.join(link_args)} endif -summary_info += {'QEMU_CFLAGS': config_host['QEMU_CFLAGS']} -summary_info += {'QEMU_LDFLAGS': config_host['QEMU_LDFLAGS']} -summary_info += {'profiler': config_host.has_key('CONFIG_PROFILER')} +summary_info += {'QEMU_CFLAGS': ' '.join(qemu_cflags)} +summary_info += {'QEMU_CXXFLAGS': ' '.join(qemu_cxxflags)} +summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_objcflags)} +summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)} +summary_info += {'profiler': get_option('profiler')} summary_info += {'link-time optimization (LTO)': get_option('b_lto')} summary_info += {'PIE': get_option('b_pie')} summary_info += {'static build': config_host.has_key('CONFIG_STATIC')} summary_info += {'malloc trim support': has_malloc_trim} -summary_info += {'membarrier': config_host.has_key('CONFIG_MEMBARRIER')} -summary_info += {'debug stack usage': config_host.has_key('CONFIG_DEBUG_STACK_USAGE')} -summary_info += {'mutex debugging': config_host.has_key('CONFIG_DEBUG_MUTEX')} +summary_info += {'membarrier': have_membarrier} +summary_info += {'debug stack usage': get_option('debug_stack_usage')} +summary_info += {'mutex debugging': get_option('debug_mutex')} summary_info += {'memory allocator': get_option('malloc')} -summary_info += {'avx2 optimization': config_host.has_key('CONFIG_AVX2_OPT')} -summary_info += {'avx512f optimization': config_host.has_key('CONFIG_AVX512F_OPT')} -summary_info += {'gprof enabled': config_host.has_key('CONFIG_GPROF')} +summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')} +summary_info += {'avx512f optimization': config_host_data.get('CONFIG_AVX512F_OPT')} +summary_info += {'gprof enabled': get_option('gprof')} summary_info += {'gcov': get_option('b_coverage')} summary_info += {'thread sanitizer': config_host.has_key('CONFIG_TSAN')} summary_info += {'CFI support': get_option('cfi')} @@ -3011,27 +3880,26 @@ if get_option('cfi') summary_info += {'CFI debug support': get_option('cfi_debug')} endif summary_info += {'strip binaries': get_option('strip')} -summary_info += {'sparse': sparse.found() ? sparse.full_path() : false} +summary_info += {'sparse': sparse} summary_info += {'mingw32 support': targetos == 'windows'} +summary(summary_info, bool_yn: true, section: 'Compilation') # snarf the cross-compilation information for tests +summary_info = {} +have_cross = false foreach target: target_dirs - tcg_mak = meson.current_build_dir() / 'tests/tcg' / 'config-' + target + '.mak' + tcg_mak = meson.current_build_dir() / 'tests/tcg' / target / 'config-target.mak' if fs.exists(tcg_mak) config_cross_tcg = keyval.load(tcg_mak) - target = config_cross_tcg['TARGET_NAME'] - compiler = '' - if 'DOCKER_CROSS_CC_GUEST' in config_cross_tcg - summary_info += {target + ' tests': config_cross_tcg['DOCKER_CROSS_CC_GUEST'] + - ' via ' + config_cross_tcg['DOCKER_IMAGE']} - elif 'CROSS_CC_GUEST' in config_cross_tcg - summary_info += {target + ' tests' - : config_cross_tcg['CROSS_CC_GUEST'] } + if 'CC' in config_cross_tcg + summary_info += {config_cross_tcg['TARGET_NAME']: config_cross_tcg['CC']} + have_cross = true endif - endif + endif endforeach - -summary(summary_info, bool_yn: true, section: 'Compilation') +if have_cross + summary(summary_info, bool_yn: true, section: 'Cross compilers') +endif # Targets and accelerators summary_info = {} @@ -3041,15 +3909,15 @@ if have_system summary_info += {'HVF support': config_all.has_key('CONFIG_HVF')} summary_info += {'WHPX support': config_all.has_key('CONFIG_WHPX')} summary_info += {'NVMM support': config_all.has_key('CONFIG_NVMM')} - summary_info += {'Xen support': config_host.has_key('CONFIG_XEN_BACKEND')} - if config_host.has_key('CONFIG_XEN_BACKEND') - summary_info += {'xen ctrl version': config_host['CONFIG_XEN_CTRL_INTERFACE_VERSION']} + summary_info += {'Xen support': xen.found()} + if xen.found() + summary_info += {'xen ctrl version': xen.version()} endif endif summary_info += {'TCG support': config_all.has_key('CONFIG_TCG')} if config_all.has_key('CONFIG_TCG') if get_option('tcg_interpreter') - summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, experimental and slow)'} + summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, slow)'} else summary_info += {'TCG backend': 'native (@0@)'.format(cpu)} endif @@ -3060,124 +3928,138 @@ summary_info += {'target list': ' '.join(target_dirs)} if have_system summary_info += {'default devices': get_option('default_devices')} summary_info += {'out of process emulation': multiprocess_allowed} + summary_info += {'vfio-user server': vfio_user_server_allowed} endif summary(summary_info, bool_yn: true, section: 'Targets and accelerators') # Block layer summary_info = {} summary_info += {'coroutine backend': config_host['CONFIG_COROUTINE_BACKEND']} -summary_info += {'coroutine pool': config_host['CONFIG_COROUTINE_POOL'] == '1'} +summary_info += {'coroutine pool': have_coroutine_pool} if have_block - summary_info += {'Block whitelist (rw)': config_host['CONFIG_BDRV_RW_WHITELIST']} - summary_info += {'Block whitelist (ro)': config_host['CONFIG_BDRV_RO_WHITELIST']} - summary_info += {'Use block whitelist in tools': config_host.has_key('CONFIG_BDRV_WHITELIST_TOOLS')} + summary_info += {'Block whitelist (rw)': get_option('block_drv_rw_whitelist')} + summary_info += {'Block whitelist (ro)': get_option('block_drv_ro_whitelist')} + summary_info += {'Use block whitelist in tools': get_option('block_drv_whitelist_in_tools')} summary_info += {'VirtFS support': have_virtfs} summary_info += {'build virtiofs daemon': have_virtiofsd} - summary_info += {'Live block migration': config_host.has_key('CONFIG_LIVE_BLOCK_MIGRATION')} - summary_info += {'replication support': config_host.has_key('CONFIG_REPLICATION')} - summary_info += {'bochs support': config_host.has_key('CONFIG_BOCHS')} - summary_info += {'cloop support': config_host.has_key('CONFIG_CLOOP')} - summary_info += {'dmg support': config_host.has_key('CONFIG_DMG')} - summary_info += {'qcow v1 support': config_host.has_key('CONFIG_QCOW1')} - summary_info += {'vdi support': config_host.has_key('CONFIG_VDI')} - summary_info += {'vvfat support': config_host.has_key('CONFIG_VVFAT')} - summary_info += {'qed support': config_host.has_key('CONFIG_QED')} - summary_info += {'parallels support': config_host.has_key('CONFIG_PARALLELS')} - summary_info += {'FUSE exports': fuse.found()} + summary_info += {'Live block migration': config_host_data.get('CONFIG_LIVE_BLOCK_MIGRATION')} + summary_info += {'replication support': config_host_data.get('CONFIG_REPLICATION')} + summary_info += {'bochs support': get_option('bochs').allowed()} + summary_info += {'cloop support': get_option('cloop').allowed()} + summary_info += {'dmg support': get_option('dmg').allowed()} + summary_info += {'qcow v1 support': get_option('qcow1').allowed()} + summary_info += {'vdi support': get_option('vdi').allowed()} + summary_info += {'vvfat support': get_option('vvfat').allowed()} + summary_info += {'qed support': get_option('qed').allowed()} + summary_info += {'parallels support': get_option('parallels').allowed()} + summary_info += {'FUSE exports': fuse} + summary_info += {'VDUSE block exports': have_vduse_blk_export} endif summary(summary_info, bool_yn: true, section: 'Block layer support') # Crypto summary_info = {} -summary_info += {'TLS priority': config_host['CONFIG_TLS_PRIORITY']} -summary_info += {'GNUTLS support': gnutls.found()} -summary_info += {'GNUTLS crypto': gnutls_crypto.found()} -# TODO: add back version -summary_info += {'libgcrypt': gcrypt.found()} -# TODO: add back version -summary_info += {'nettle': nettle.found()} +summary_info += {'TLS priority': get_option('tls_priority')} +summary_info += {'GNUTLS support': gnutls} +if gnutls.found() + summary_info += {' GNUTLS crypto': gnutls_crypto.found()} +endif +summary_info += {'libgcrypt': gcrypt} +summary_info += {'nettle': nettle} if nettle.found() summary_info += {' XTS': xts != 'private'} endif -summary_info += {'crypto afalg': config_host.has_key('CONFIG_AF_ALG')} -summary_info += {'rng-none': config_host.has_key('CONFIG_RNG_NONE')} -summary_info += {'Linux keyring': config_host.has_key('CONFIG_SECRET_KEYRING')} +summary_info += {'AF_ALG support': have_afalg} +summary_info += {'rng-none': get_option('rng_none')} +summary_info += {'Linux keyring': have_keyring} summary(summary_info, bool_yn: true, section: 'Crypto') # Libraries summary_info = {} if targetos == 'darwin' - summary_info += {'Cocoa support': cocoa.found()} + summary_info += {'Cocoa support': cocoa} + summary_info += {'vmnet.framework support': vmnet} endif -# TODO: add back version -summary_info += {'SDL support': sdl.found()} -summary_info += {'SDL image support': sdl_image.found()} -# TODO: add back version -summary_info += {'GTK support': gtk.found()} -summary_info += {'pixman': pixman.found()} -# TODO: add back version -summary_info += {'VTE support': vte.found()} -# TODO: add back version -summary_info += {'slirp support': slirp_opt == 'disabled' ? false : slirp_opt} -summary_info += {'libtasn1': tasn1.found()} -summary_info += {'PAM': pam.found()} -summary_info += {'iconv support': iconv.found()} -summary_info += {'curses support': curses.found()} -# TODO: add back version -summary_info += {'virgl support': virgl.found()} -summary_info += {'curl support': curl.found()} -summary_info += {'Multipath support': mpathpersist.found()} -summary_info += {'VNC support': vnc.found()} +summary_info += {'SDL support': sdl} +summary_info += {'SDL image support': sdl_image} +summary_info += {'GTK support': gtk} +summary_info += {'pixman': pixman} +summary_info += {'VTE support': vte} +summary_info += {'slirp support': slirp} +summary_info += {'libtasn1': tasn1} +summary_info += {'PAM': pam} +summary_info += {'iconv support': iconv} +summary_info += {'curses support': curses} +summary_info += {'virgl support': virgl} +summary_info += {'blkio support': blkio} +summary_info += {'curl support': curl} +summary_info += {'Multipath support': mpathpersist} +summary_info += {'PNG support': png} +summary_info += {'VNC support': vnc} if vnc.found() - summary_info += {'VNC SASL support': sasl.found()} - summary_info += {'VNC JPEG support': jpeg.found()} - summary_info += {'VNC PNG support': png.found()} + summary_info += {'VNC SASL support': sasl} + summary_info += {'VNC JPEG support': jpeg} endif -summary_info += {'brlapi support': brlapi.found()} -summary_info += {'vde support': config_host.has_key('CONFIG_VDE')} -summary_info += {'netmap support': config_host.has_key('CONFIG_NETMAP')} -summary_info += {'Linux AIO support': config_host.has_key('CONFIG_LINUX_AIO')} -summary_info += {'Linux io_uring support': linux_io_uring.found()} -summary_info += {'ATTR/XATTR support': libattr.found()} -summary_info += {'RDMA support': config_host.has_key('CONFIG_RDMA')} -summary_info += {'PVRDMA support': config_host.has_key('CONFIG_PVRDMA')} +if targetos not in ['darwin', 'haiku', 'windows'] + summary_info += {'OSS support': oss} + summary_info += {'sndio support': sndio} +elif targetos == 'darwin' + summary_info += {'CoreAudio support': coreaudio} +elif targetos == 'windows' + summary_info += {'DirectSound support': dsound} +endif +if targetos == 'linux' + summary_info += {'ALSA support': alsa} + summary_info += {'PulseAudio support': pulse} +endif +summary_info += {'JACK support': jack} +summary_info += {'brlapi support': brlapi} +summary_info += {'vde support': vde} +summary_info += {'netmap support': have_netmap} +summary_info += {'l2tpv3 support': have_l2tpv3} +summary_info += {'Linux AIO support': libaio} +summary_info += {'Linux io_uring support': linux_io_uring} +summary_info += {'ATTR/XATTR support': libattr} +summary_info += {'RDMA support': rdma} +summary_info += {'PVRDMA support': have_pvrdma} summary_info += {'fdt support': fdt_opt == 'disabled' ? false : fdt_opt} -summary_info += {'libcap-ng support': libcap_ng.found()} -summary_info += {'bpf support': libbpf.found()} -# TODO: add back protocol and server version -summary_info += {'spice support': config_host.has_key('CONFIG_SPICE')} -summary_info += {'rbd support': rbd.found()} -summary_info += {'xfsctl support': config_host.has_key('CONFIG_XFS')} -summary_info += {'smartcard support': cacard.found()} -summary_info += {'U2F support': u2f.found()} -summary_info += {'libusb': libusb.found()} -summary_info += {'usb net redir': usbredir.found()} -summary_info += {'OpenGL support': config_host.has_key('CONFIG_OPENGL')} -summary_info += {'GBM': config_host.has_key('CONFIG_GBM')} -summary_info += {'libiscsi support': libiscsi.found()} -summary_info += {'libnfs support': libnfs.found()} +summary_info += {'libcap-ng support': libcap_ng} +summary_info += {'bpf support': libbpf} +summary_info += {'spice protocol support': spice_protocol} +if spice_protocol.found() + summary_info += {' spice server support': spice} +endif +summary_info += {'rbd support': rbd} +summary_info += {'smartcard support': cacard} +summary_info += {'U2F support': u2f} +summary_info += {'libusb': libusb} +summary_info += {'usb net redir': usbredir} +summary_info += {'OpenGL support (epoxy)': opengl} +summary_info += {'GBM': gbm} +summary_info += {'libiscsi support': libiscsi} +summary_info += {'libnfs support': libnfs} if targetos == 'windows' - if config_host.has_key('CONFIG_GUEST_AGENT') - summary_info += {'QGA VSS support': config_host.has_key('CONFIG_QGA_VSS')} - summary_info += {'QGA w32 disk info': config_host.has_key('CONFIG_QGA_NTDDSCSI')} + if have_ga + summary_info += {'QGA VSS support': have_qga_vss} endif endif -summary_info += {'seccomp support': seccomp.found()} -summary_info += {'GlusterFS support': glusterfs.found()} -summary_info += {'TPM support': config_host.has_key('CONFIG_TPM')} -summary_info += {'libssh support': config_host.has_key('CONFIG_LIBSSH')} -summary_info += {'lzo support': lzo.found()} -summary_info += {'snappy support': snappy.found()} -summary_info += {'bzip2 support': libbzip2.found()} -summary_info += {'lzfse support': liblzfse.found()} -summary_info += {'zstd support': zstd.found()} -summary_info += {'NUMA host support': config_host.has_key('CONFIG_NUMA')} -summary_info += {'libxml2': libxml2.found()} -summary_info += {'capstone': capstone_opt == 'disabled' ? false : capstone_opt} -summary_info += {'libpmem support': libpmem.found()} -summary_info += {'libdaxctl support': libdaxctl.found()} -summary_info += {'libudev': libudev.found()} +summary_info += {'seccomp support': seccomp} +summary_info += {'GlusterFS support': glusterfs} +summary_info += {'TPM support': have_tpm} +summary_info += {'libssh support': libssh} +summary_info += {'lzo support': lzo} +summary_info += {'snappy support': snappy} +summary_info += {'bzip2 support': libbzip2} +summary_info += {'lzfse support': liblzfse} +summary_info += {'zstd support': zstd} +summary_info += {'NUMA host support': numa} +summary_info += {'capstone': capstone} +summary_info += {'libpmem support': libpmem} +summary_info += {'libdaxctl support': libdaxctl} +summary_info += {'libudev': libudev} +# Dummy dependency, keep .found() summary_info += {'FUSE lseek': fuse_lseek.found()} +summary_info += {'selinux': selinux} summary(summary_info, bool_yn: true, section: 'Dependencies') if not supported_cpus.contains(cpu) diff --git a/meson_options.txt b/meson_options.txt index 685baef380..9e16d9fde1 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -1,24 +1,63 @@ +# These options do not correspond to a --enable/--disable-* option +# on the configure script command line. If you add more, list them in +# scripts/meson-buildoptions.py's SKIP_OPTIONS constant too. + option('qemu_suffix', type : 'string', value: 'qemu', description: 'Suffix for QEMU data/modules/config directories (can be empty)') -option('docdir', type : 'string', value : 'doc', +option('docdir', type : 'string', value : 'share/doc', description: 'Base directory for documentation installation (can be empty)') -option('qemu_firmwarepath', type : 'string', value : '', +option('qemu_firmwarepath', type : 'array', value : ['share/qemu-firmware'], description: 'search PATH for firmware files') +option('pkgversion', type : 'string', value : '', + description: 'use specified string as sub-version of the package') +option('smbd', type : 'string', value : '', + description: 'Path to smbd for slirp networking') option('sphinx_build', type : 'string', value : '', - description: 'Use specified sphinx-build [$sphinx_build] for building document (default to be empty)') - + description: 'Use specified sphinx-build for building document') +option('iasl', type : 'string', value : '', + description: 'Path to ACPI disassembler') +option('tls_priority', type : 'string', value : 'NORMAL', + description: 'Default TLS protocol/cipher priority string') option('default_devices', type : 'boolean', value : true, description: 'Include a default selection of devices in emulators') +option('audio_drv_list', type: 'array', value: ['default'], + choices: ['alsa', 'coreaudio', 'default', 'dsound', 'jack', 'oss', 'pa', 'sdl', 'sndio'], + description: 'Set audio driver list') +option('block_drv_rw_whitelist', type : 'string', value : '', + description: 'set block driver read-write whitelist (by default affects only QEMU, not tools like qemu-img)') +option('block_drv_ro_whitelist', type : 'string', value : '', + description: 'set block driver read-only whitelist (by default affects only QEMU, not tools like qemu-img)') +option('interp_prefix', type : 'string', value : '/usr/gnemul/qemu-%M', + description: 'where to find shared libraries etc., use %M for cpu name') +option('fuzzing_engine', type : 'string', value : '', + description: 'fuzzing engine library for OSS-Fuzz') +option('trace_file', type: 'string', value: 'trace', + description: 'Trace file prefix for simple backend') + +# Everything else can be set via --enable/--disable-* option +# on the configure script command line. After adding an option +# here make sure to run "make update-buildoptions". + option('docs', type : 'feature', value : 'auto', description: 'Documentations build support') +option('fuzzing', type : 'boolean', value: false, + description: 'build fuzzing targets') option('gettext', type : 'feature', value : 'auto', description: 'Localization of the GTK+ user interface') +option('module_upgrades', type : 'boolean', value : false, + description: 'try to load modules from alternate paths for upgrades') option('install_blobs', type : 'boolean', value : true, description: 'install provided firmware blobs') option('sparse', type : 'feature', value : 'auto', description: 'sparse checker') +option('guest_agent', type : 'feature', value : 'auto', + description: 'Build QEMU Guest Agent') option('guest_agent_msi', type : 'feature', value : 'auto', description: 'Build MSI package for the QEMU Guest Agent') +option('tools', type : 'feature', value : 'auto', + description: 'build support utilities that come with QEMU') +option('qga_vss', type : 'feature', value: 'auto', + description: 'build QGA VSS support (broken with MinGW)') option('malloc_trim', type : 'feature', value : 'auto', description: 'enable libc malloc_trim() for memory optimization') @@ -39,18 +78,36 @@ option('xen', type: 'feature', value: 'auto', description: 'Xen backend support') option('xen_pci_passthrough', type: 'feature', value: 'auto', description: 'Xen PCI passthrough support') -option('tcg', type: 'feature', value: 'auto', +option('tcg', type: 'feature', value: 'enabled', description: 'TCG support') option('tcg_interpreter', type: 'boolean', value: false, - description: 'TCG with bytecode interpreter (experimental and slow)') -option('cfi', type: 'boolean', value: 'false', + description: 'TCG with bytecode interpreter (slow)') +option('cfi', type: 'boolean', value: false, description: 'Control-Flow Integrity (CFI)') -option('cfi_debug', type: 'boolean', value: 'false', +option('cfi_debug', type: 'boolean', value: false, description: 'Verbose errors in case of CFI violation') option('multiprocess', type: 'feature', value: 'auto', description: 'Out of process device emulation support') option('renderdoc', type: 'feature', value: 'auto', description: 'RenderDoc frame capture support') +option('vfio_user_server', type: 'feature', value: 'disabled', + description: 'vfio-user server support') +option('dbus_display', type: 'feature', value: 'auto', + description: '-display dbus support') +option('tpm', type : 'feature', value : 'auto', + description: 'TPM support') + +# Do not enable it by default even for Mingw32, because it doesn't +# work on Wine. +option('membarrier', type: 'feature', value: 'disabled', + description: 'membarrier system call (for Linux 4.14+ or Windows') + +option('avx2', type: 'feature', value: 'auto', + description: 'AVX2 optimizations') +option('avx512f', type: 'feature', value: 'disabled', + description: 'AVX512F optimizations') +option('keyring', type: 'feature', value: 'auto', + description: 'Linux keyring support') option('attr', type : 'feature', value : 'auto', description: 'attr/xattr support') @@ -62,12 +119,16 @@ option('bzip2', type : 'feature', value : 'auto', description: 'bzip2 support for DMG images') option('cap_ng', type : 'feature', value : 'auto', description: 'cap_ng support') +option('blkio', type : 'feature', value : 'auto', + description: 'libblkio block device driver') option('bpf', type : 'feature', value : 'auto', description: 'eBPF support') option('cocoa', type : 'feature', value : 'auto', description: 'Cocoa user interface (macOS only)') option('curl', type : 'feature', value : 'auto', description: 'CURL block device driver') +option('gio', type : 'feature', value : 'auto', + description: 'use libgio for D-Bus support') option('glusterfs', type : 'feature', value : 'auto', description: 'Glusterfs block device driver') option('libiscsi', type : 'feature', value : 'auto', @@ -76,6 +137,8 @@ option('libnfs', type : 'feature', value : 'auto', description: 'libnfs block device driver') option('mpath', type : 'feature', value : 'auto', description: 'Multipath persistent reservation passthrough') +option('numa', type : 'feature', value : 'auto', + description: 'libnuma support') option('iconv', type : 'feature', value : 'auto', description: 'Font glyph conversion support') option('curses', type : 'feature', value : 'auto', @@ -86,16 +149,20 @@ option('nettle', type : 'feature', value : 'auto', description: 'nettle cryptography support') option('gcrypt', type : 'feature', value : 'auto', description: 'libgcrypt cryptography support') +option('crypto_afalg', type : 'feature', value : 'disabled', + description: 'Linux AF_ALG crypto backend driver') option('libdaxctl', type : 'feature', value : 'auto', description: 'libdaxctl support') option('libpmem', type : 'feature', value : 'auto', description: 'libpmem support') +option('libssh', type : 'feature', value : 'auto', + description: 'ssh block device support') option('libudev', type : 'feature', value : 'auto', description: 'Use libudev to enumerate host devices') option('libusb', type : 'feature', value : 'auto', description: 'libusb support for USB passthrough') -option('libxml2', type : 'feature', value : 'auto', - description: 'libxml2 support for Parallels image format') +option('linux_aio', type : 'feature', value : 'auto', + description: 'Linux AIO support') option('linux_io_uring', type : 'feature', value : 'auto', description: 'Linux io_uring support') option('lzfse', type : 'feature', value : 'auto', @@ -104,9 +171,15 @@ option('lzo', type : 'feature', value : 'auto', description: 'lzo compression support') option('rbd', type : 'feature', value : 'auto', description: 'Ceph block device driver') -option('gtk', type : 'feature', value : 'auto', - description: 'GTK+ user interface') -option('sdl', type : 'feature', value : 'auto', +option('opengl', type : 'feature', value : 'enabled', + description: 'OpenGL support') +option('rdma', type : 'feature', value : 'auto', + description: 'Enable RDMA-based migration') +option('pvrdma', type : 'feature', value : 'auto', + description: 'Enable PVRDMA support') +# option('gtk', type : 'feature', value : 'auto', +# description: 'GTK+ user interface') +option('sdl', type : 'feature', value : 'enabled', description: 'SDL user interface') option('sdl_image', type : 'feature', value : 'auto', description: 'SDL Image support for icons') @@ -116,22 +189,45 @@ option('smartcard', type : 'feature', value : 'auto', description: 'CA smartcard emulation support') option('snappy', type : 'feature', value : 'auto', description: 'snappy compression support') +option('spice', type : 'feature', value : 'auto', + description: 'Spice server support') +option('spice_protocol', type : 'feature', value : 'auto', + description: 'Spice protocol support') option('u2f', type : 'feature', value : 'auto', description: 'U2F emulation support') +option('canokey', type : 'feature', value : 'auto', + description: 'CanoKey support') option('usb_redir', type : 'feature', value : 'auto', description: 'libusbredir support') +option('l2tpv3', type : 'feature', value : 'auto', + description: 'l2tpv3 network backend support') +option('netmap', type : 'feature', value : 'auto', + description: 'netmap network backend support') +option('slirp', type: 'feature', value: 'enabled', + description: 'libslirp user mode network backend support') +option('vde', type : 'feature', value : 'auto', + description: 'vde network backend support') +option('vmnet', type : 'feature', value : 'auto', + description: 'vmnet.framework network backend support') option('virglrenderer', type : 'feature', value : 'auto', description: 'virgl rendering support') -option('vnc', type : 'feature', value : 'enabled', +option('png', type : 'feature', value : 'auto', + description: 'PNG support with libpng') +option('vnc', type : 'feature', value : 'auto', description: 'VNC server') option('vnc_jpeg', type : 'feature', value : 'auto', description: 'JPEG lossy compression for VNC server') -option('vnc_png', type : 'feature', value : 'auto', - description: 'PNG compression for VNC server') option('vnc_sasl', type : 'feature', value : 'auto', description: 'SASL authentication for VNC server') option('vte', type : 'feature', value : 'auto', description: 'vte support for the gtk UI') + +# GTK Clipboard implementation is disabled by default, since it may cause hangs +# of the guest VCPUs. See gitlab issue 1150: +# https://gitlab.com/qemu-project/qemu/-/issues/1150 + +option('gtk_clipboard', type: 'feature', value : 'disabled', + description: 'clipboard support for the gtk UI (EXPERIMENTAL, MAY HANG)') option('xkbcommon', type : 'feature', value : 'auto', description: 'xkbcommon support') option('zstd', type : 'feature', value : 'auto', @@ -141,19 +237,92 @@ option('fuse', type: 'feature', value: 'auto', option('fuse_lseek', type : 'feature', value : 'auto', description: 'SEEK_HOLE/SEEK_DATA support for FUSE exports') +option('trace_backends', type: 'array', value: ['log'], + choices: ['dtrace', 'ftrace', 'log', 'nop', 'simple', 'syslog', 'ust'], + description: 'Set available tracing backends') + +option('alsa', type: 'feature', value: 'auto', + description: 'ALSA sound support') +option('coreaudio', type: 'feature', value: 'auto', + description: 'CoreAudio sound support') +option('dsound', type: 'feature', value: 'auto', + description: 'DirectSound sound support') +option('jack', type: 'feature', value: 'auto', + description: 'JACK sound support') +option('oss', type: 'feature', value: 'auto', + description: 'OSS sound support') +option('pa', type: 'feature', value: 'auto', + description: 'PulseAudio sound support') +option('sndio', type: 'feature', value: 'auto', + description: 'sndio sound support') + +option('vhost_kernel', type: 'feature', value: 'auto', + description: 'vhost kernel backend support') +option('vhost_net', type: 'feature', value: 'auto', + description: 'vhost-net kernel acceleration support') +option('vhost_user', type: 'feature', value: 'auto', + description: 'vhost-user backend support') +option('vhost_crypto', type: 'feature', value: 'auto', + description: 'vhost-user crypto backend support') +option('vhost_vdpa', type: 'feature', value: 'auto', + description: 'vhost-vdpa kernel backend support') option('vhost_user_blk_server', type: 'feature', value: 'auto', description: 'build vhost-user-blk server') option('virtfs', type: 'feature', value: 'auto', description: 'virtio-9p support') option('virtiofsd', type: 'feature', value: 'auto', description: 'build virtiofs daemon (virtiofsd)') +option('libvduse', type: 'feature', value: 'auto', + description: 'build VDUSE Library') +option('vduse_blk_export', type: 'feature', value: 'auto', + description: 'VDUSE block export support') -option('capstone', type: 'combo', value: 'auto', - choices: ['disabled', 'enabled', 'auto', 'system', 'internal'], +option('capstone', type: 'feature', value: 'auto', description: 'Whether and how to find the capstone library') -option('slirp', type: 'combo', value: 'auto', - choices: ['disabled', 'enabled', 'auto', 'system', 'internal'], - description: 'Whether and how to find the slirp library') option('fdt', type: 'combo', value: 'auto', choices: ['disabled', 'enabled', 'auto', 'system', 'internal'], description: 'Whether and how to find the libfdt library') + +option('selinux', type: 'feature', value: 'auto', + description: 'SELinux support in qemu-nbd') +option('live_block_migration', type: 'feature', value: 'auto', + description: 'block migration in the main migration stream') +option('replication', type: 'feature', value: 'auto', + description: 'replication support') +option('bochs', type: 'feature', value: 'auto', + description: 'bochs image format support') +option('cloop', type: 'feature', value: 'auto', + description: 'cloop image format support') +option('dmg', type: 'feature', value: 'auto', + description: 'dmg image format support') +option('qcow1', type: 'feature', value: 'auto', + description: 'qcow1 image format support') +option('vdi', type: 'feature', value: 'auto', + description: 'vdi image format support') +option('vvfat', type: 'feature', value: 'auto', + description: 'vvfat image format support') +option('qed', type: 'feature', value: 'auto', + description: 'qed image format support') +option('parallels', type: 'feature', value: 'auto', + description: 'parallels image format support') +option('block_drv_whitelist_in_tools', type: 'boolean', value: false, + description: 'use block whitelist also in tools instead of only QEMU') +option('rng_none', type: 'boolean', value: false, + description: 'dummy RNG, avoid using /dev/(u)random and getrandom()') +option('coroutine_pool', type: 'boolean', value: true, + description: 'coroutine freelist (better performance)') +option('debug_mutex', type: 'boolean', value: false, + description: 'mutex debugging support') +option('debug_stack_usage', type: 'boolean', value: false, + description: 'measure coroutine stack usage') +option('qom_cast_debug', type: 'boolean', value: false, + description: 'cast debugging support') +option('gprof', type: 'boolean', value: false, + description: 'QEMU profiling with gprof') +option('profiler', type: 'boolean', value: false, + description: 'profiler support') +option('slirp_smbd', type : 'feature', value : 'auto', + description: 'use smbd (at path --smbd=*) in slirp networking') + +option('renderdoc', type: 'feature', value: 'auto', + description: 'improved RenderDoc frame capture support') \ No newline at end of file diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 35f5ef688d..9aba7d9c22 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -1215,7 +1215,10 @@ static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque) { DBMSaveState *s = &((DBMState *)opaque)->save; SaveBitmapState *dbms = NULL; + + qemu_mutex_lock_iothread(); if (init_dirty_bitmap_migration(s) < 0) { + qemu_mutex_unlock_iothread(); return -1; } @@ -1223,7 +1226,7 @@ static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque) send_bitmap_start(f, s, dbms); } qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS); - + qemu_mutex_unlock_iothread(); return 0; } diff --git a/migration/block.c b/migration/block.c index a950977855..4347da1526 100644 --- a/migration/block.c +++ b/migration/block.c @@ -28,7 +28,7 @@ #include "sysemu/block-backend.h" #include "trace.h" -#define BLK_MIG_BLOCK_SIZE (1 << 20) +#define BLK_MIG_BLOCK_SIZE (1ULL << 20) #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 @@ -568,8 +568,8 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); blk_mig_unlock(); } else { - ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf, - nr_sectors * BDRV_SECTOR_SIZE); + ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, + nr_sectors * BDRV_SECTOR_SIZE, blk->buf, 0); if (ret < 0) { goto error; } @@ -756,8 +756,8 @@ static int block_save_setup(QEMUFile *f, void *opaque) static int block_save_iterate(QEMUFile *f, void *opaque) { int ret; - int64_t last_ftell = qemu_ftell(f); - int64_t delta_ftell; + int64_t last_bytes = qemu_file_total_transferred(f); + int64_t delta_bytes; trace_migration_block_save("iterate", block_mig_state.submitted, block_mig_state.transferred); @@ -809,10 +809,10 @@ static int block_save_iterate(QEMUFile *f, void *opaque) } qemu_put_be64(f, BLK_MIG_FLAG_EOS); - delta_ftell = qemu_ftell(f) - last_ftell; - if (delta_ftell > 0) { + delta_bytes = qemu_file_total_transferred(f) - last_bytes; + if (delta_bytes > 0) { return 1; - } else if (delta_ftell < 0) { + } else if (delta_bytes < 0) { return -1; } else { return 0; @@ -880,8 +880,8 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, blk_mig_unlock(); /* Report at least one block pending during bulk phase */ - if (pending <= max_size && !block_mig_state.bulk_completed) { - pending = max_size + BLK_MIG_BLOCK_SIZE; + if (!pending && !block_mig_state.bulk_completed) { + pending = BLK_MIG_BLOCK_SIZE; } trace_migration_block_save_pending(pending); @@ -932,7 +932,7 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) return -EINVAL; } - blk_invalidate_cache(blk, &local_err); + blk_activate(blk, &local_err); if (local_err) { error_report_err(local_err); return -EINVAL; @@ -976,8 +976,8 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) cluster_size, BDRV_REQ_MAY_UNMAP); } else { - ret = blk_pwrite(blk, cur_addr, cur_buf, - cluster_size, 0); + ret = blk_pwrite(blk, cur_addr, cluster_size, cur_buf, + 0); } if (ret < 0) { break; diff --git a/migration/channel-block.c b/migration/channel-block.c new file mode 100644 index 0000000000..f4ab53acdb --- /dev/null +++ b/migration/channel-block.c @@ -0,0 +1,197 @@ +/* + * QEMU I/O channels block driver + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "migration/channel-block.h" +#include "qapi/error.h" +#include "block/block.h" +#include "trace.h" + +QIOChannelBlock * +qio_channel_block_new(BlockDriverState *bs) +{ + QIOChannelBlock *ioc; + + ioc = QIO_CHANNEL_BLOCK(object_new(TYPE_QIO_CHANNEL_BLOCK)); + + bdrv_ref(bs); + ioc->bs = bs; + + return ioc; +} + + +static void +qio_channel_block_finalize(Object *obj) +{ + QIOChannelBlock *ioc = QIO_CHANNEL_BLOCK(obj); + + g_clear_pointer(&ioc->bs, bdrv_unref); +} + + +static ssize_t +qio_channel_block_readv(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds, + size_t *nfds, + Error **errp) +{ + QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc); + QEMUIOVector qiov; + int ret; + + qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov); + ret = bdrv_readv_vmstate(bioc->bs, &qiov, bioc->offset); + if (ret < 0) { + error_setg_errno(errp, -ret, "bdrv_readv_vmstate failed"); + return -1; + } + + bioc->offset += qiov.size; + return qiov.size; +} + + +static ssize_t +qio_channel_block_writev(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int *fds, + size_t nfds, + int flags, + Error **errp) +{ + QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc); + QEMUIOVector qiov; + int ret; + + qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov); + ret = bdrv_writev_vmstate(bioc->bs, &qiov, bioc->offset); + if (ret < 0) { + error_setg_errno(errp, -ret, "bdrv_writev_vmstate failed"); + return -1; + } + + bioc->offset += qiov.size; + return qiov.size; +} + + +static int +qio_channel_block_set_blocking(QIOChannel *ioc, + bool enabled, + Error **errp) +{ + if (!enabled) { + error_setg(errp, "Non-blocking mode not supported for block devices"); + return -1; + } + return 0; +} + + +static off_t +qio_channel_block_seek(QIOChannel *ioc, + off_t offset, + int whence, + Error **errp) +{ + QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc); + + switch (whence) { + case SEEK_SET: + bioc->offset = offset; + break; + case SEEK_CUR: + bioc->offset += whence; + break; + case SEEK_END: + error_setg(errp, "Size of VMstate region is unknown"); + return (off_t)-1; + default: + g_assert_not_reached(); + } + + return bioc->offset; +} + + +static int +qio_channel_block_close(QIOChannel *ioc, + Error **errp) +{ + QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc); + int rv = bdrv_flush(bioc->bs); + + if (rv < 0) { + error_setg_errno(errp, -rv, + "Unable to flush VMState"); + return -1; + } + + g_clear_pointer(&bioc->bs, bdrv_unref); + bioc->offset = 0; + + return 0; +} + + +static void +qio_channel_block_set_aio_fd_handler(QIOChannel *ioc, + AioContext *ctx, + IOHandler *io_read, + IOHandler *io_write, + void *opaque) +{ + /* XXX anything we can do here ? */ +} + + +static void +qio_channel_block_class_init(ObjectClass *klass, + void *class_data G_GNUC_UNUSED) +{ + QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); + + ioc_klass->io_writev = qio_channel_block_writev; + ioc_klass->io_readv = qio_channel_block_readv; + ioc_klass->io_set_blocking = qio_channel_block_set_blocking; + ioc_klass->io_seek = qio_channel_block_seek; + ioc_klass->io_close = qio_channel_block_close; + ioc_klass->io_set_aio_fd_handler = qio_channel_block_set_aio_fd_handler; +} + +static const TypeInfo qio_channel_block_info = { + .parent = TYPE_QIO_CHANNEL, + .name = TYPE_QIO_CHANNEL_BLOCK, + .instance_size = sizeof(QIOChannelBlock), + .instance_finalize = qio_channel_block_finalize, + .class_init = qio_channel_block_class_init, +}; + +static void +qio_channel_block_register_types(void) +{ + type_register_static(&qio_channel_block_info); +} + +type_init(qio_channel_block_register_types); diff --git a/migration/channel-block.h b/migration/channel-block.h new file mode 100644 index 0000000000..31673824e6 --- /dev/null +++ b/migration/channel-block.h @@ -0,0 +1,59 @@ +/* + * QEMU I/O channels block driver + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QIO_CHANNEL_BLOCK_H +#define QIO_CHANNEL_BLOCK_H + +#include "io/channel.h" +#include "qom/object.h" + +#define TYPE_QIO_CHANNEL_BLOCK "qio-channel-block" +OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelBlock, QIO_CHANNEL_BLOCK) + + +/** + * QIOChannelBlock: + * + * The QIOChannelBlock object provides a channel implementation + * that is able to perform I/O on the BlockDriverState objects + * to the VMState region. + */ + +struct QIOChannelBlock { + QIOChannel parent; + BlockDriverState *bs; + off_t offset; +}; + + +/** + * qio_channel_block_new: + * @bs: the block driver state + * + * Create a new IO channel object that can perform + * I/O on a BlockDriverState object to the VMState + * region + * + * Returns: the new channel object + */ +QIOChannelBlock * +qio_channel_block_new(BlockDriverState *bs); + +#endif /* QIO_CHANNEL_BLOCK_H */ diff --git a/migration/channel.c b/migration/channel.c index c4fc000a1a..1b0815039f 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -14,7 +14,7 @@ #include "channel.h" #include "tls.h" #include "migration.h" -#include "qemu-file-channel.h" +#include "qemu-file.h" #include "trace.h" #include "qapi/error.h" #include "io/channel-tls.h" @@ -38,10 +38,7 @@ void migration_channel_process_incoming(QIOChannel *ioc) trace_migration_set_incoming_channel( ioc, object_get_typename(OBJECT(ioc))); - if (s->parameters.tls_creds && - *s->parameters.tls_creds && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { migration_tls_channel_process_incoming(s, ioc, &local_err); } else { migration_ioc_register_yank(ioc); @@ -71,10 +68,7 @@ void migration_channel_connect(MigrationState *s, ioc, object_get_typename(OBJECT(ioc)), hostname, error); if (!error) { - if (s->parameters.tls_creds && - *s->parameters.tls_creds && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { migration_tls_channel_connect(s, ioc, hostname, &error); if (!error) { @@ -86,7 +80,7 @@ void migration_channel_connect(MigrationState *s, return; } } else { - QEMUFile *f = qemu_fopen_channel_output(ioc); + QEMUFile *f = qemu_file_new_output(ioc); migration_ioc_register_yank(ioc); @@ -96,6 +90,5 @@ void migration_channel_connect(MigrationState *s, } } migrate_fd_connect(s, error); - g_free(s->hostname); error_free(error); } diff --git a/migration/colo.c b/migration/colo.c index 79fa1f6619..2b71722fd6 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -14,7 +14,6 @@ #include "sysemu/sysemu.h" #include "qapi/error.h" #include "qapi/qapi-commands-migration.h" -#include "qemu-file-channel.h" #include "migration.h" #include "qemu-file.h" #include "savevm.h" @@ -152,7 +151,7 @@ static void primary_vm_do_failover(void) * kick COLO thread which might wait at * qemu_sem_wait(&s->colo_checkpoint_sem). */ - colo_checkpoint_notify(migrate_get_current()); + colo_checkpoint_notify(s); /* * Wake up COLO thread which may blocked in recv() or send(), @@ -205,7 +204,7 @@ void colo_do_failover(void) vm_stop_force_state(RUN_STATE_COLO); } - switch (get_colo_mode()) { + switch (last_colo_mode = get_colo_mode()) { case COLO_MODE_PRIMARY: primary_vm_do_failover(); break; @@ -459,6 +458,10 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (ret < 0) { goto out; } + + if (migrate_auto_converge()) { + mig_throttle_counter_reset(); + } /* * Only save VM's live state, which not including device state. * TODO: We may need a timeout mechanism to prevent COLO process @@ -526,12 +529,10 @@ static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; QEMUFile *fb = NULL; - int64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); Error *local_err = NULL; int ret; - last_colo_mode = get_colo_mode(); - if (last_colo_mode != COLO_MODE_PRIMARY) { + if (get_colo_mode() != COLO_MODE_PRIMARY) { error_report("COLO mode must be COLO_MODE_PRIMARY"); return; } @@ -557,7 +558,7 @@ static void colo_process_checkpoint(MigrationState *s) goto out; } bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); - fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); + fb = qemu_file_new_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); @@ -575,8 +576,8 @@ static void colo_process_checkpoint(MigrationState *s) qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); - timer_mod(s->colo_delay_timer, - current_time + s->parameters.x_checkpoint_delay); + timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + + s->parameters.x_checkpoint_delay); while (s->state == MIGRATION_STATUS_COLO) { if (failover_get_state() != FAILOVER_STATUS_NONE) { @@ -640,6 +641,7 @@ out: */ if (s->rp_state.from_dst_file) { qemu_fclose(s->rp_state.from_dst_file); + s->rp_state.from_dst_file = NULL; } } @@ -663,8 +665,6 @@ void migrate_start_colo_process(MigrationState *s) colo_checkpoint_notify, s); qemu_sem_init(&s->colo_exit_sem, 0); - migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, - MIGRATION_STATUS_COLO); colo_process_checkpoint(s); qemu_mutex_lock_iothread(); } @@ -679,8 +679,8 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, qemu_mutex_lock_iothread(); vm_stop_force_state(RUN_STATE_COLO); - trace_colo_vm_state_change("run", "stop"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("run", "stop"); /* FIXME: This is unnecessary for periodic checkpoint mode */ colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_REPLY, @@ -782,8 +782,8 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, vmstate_loading = false; vm_start(); - trace_colo_vm_state_change("stop", "run"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("stop", "run"); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { return; @@ -816,6 +816,26 @@ static void colo_wait_handle_message(MigrationIncomingState *mis, } } +void colo_shutdown(void) +{ + MigrationIncomingState *mis = NULL; + MigrationState *s = NULL; + + switch (get_colo_mode()) { + case COLO_MODE_PRIMARY: + s = migrate_get_current(); + qemu_event_set(&s->colo_checkpoint_event); + qemu_sem_post(&s->colo_exit_sem); + break; + case COLO_MODE_SECONDARY: + mis = migration_incoming_get_current(); + qemu_sem_post(&mis->colo_incoming_sem); + break; + default: + break; + } +} + void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; @@ -829,8 +849,7 @@ void *colo_process_incoming_thread(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); - last_colo_mode = get_colo_mode(); - if (last_colo_mode != COLO_MODE_SECONDARY) { + if (get_colo_mode() != COLO_MODE_SECONDARY) { error_report("COLO mode must be COLO_MODE_SECONDARY"); return NULL; } @@ -853,7 +872,7 @@ void *colo_process_incoming_thread(void *opaque) colo_incoming_start_dirty_log(); bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); - fb = qemu_fopen_channel_input(QIO_CHANNEL(bioc)); + fb = qemu_file_new_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); @@ -867,8 +886,8 @@ void *colo_process_incoming_thread(void *opaque) abort(); #endif vm_start(); - trace_colo_vm_state_change("stop", "run"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("stop", "run"); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); @@ -918,11 +937,6 @@ out: /* Hope this not to be too long to loop here */ qemu_sem_wait(&mis->colo_incoming_sem); qemu_sem_destroy(&mis->colo_incoming_sem); - /* Must be called after failover BH is completed */ - if (mis->to_src_file) { - qemu_fclose(mis->to_src_file); - mis->to_src_file = NULL; - } rcu_unregister_thread(); return NULL; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 320c56ba2c..d6f1e01a70 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -15,7 +15,9 @@ #include "qapi/error.h" #include "cpu.h" #include "exec/ramblock.h" +#include "exec/ram_addr.h" #include "qemu/rcu_queue.h" +#include "qemu/main-loop.h" #include "qapi/qapi-commands-migration.h" #include "ram.h" #include "trace.h" @@ -23,11 +25,28 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/qmp/qdict.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "exec/memory.h" + +/* + * total_dirty_pages is procted by BQL and is used + * to stat dirty pages during the period of two + * memory_global_dirty_log_sync + */ +uint64_t total_dirty_pages; + +typedef struct DirtyPageRecord { + uint64_t start_pages; + uint64_t end_pages; +} DirtyPageRecord; static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; static struct DirtyRateStat DirtyStat; +static DirtyRateMeasureMode dirtyrate_mode = + DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; -static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) +static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time) { int64_t current_time; @@ -41,6 +60,132 @@ static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) return msec; } +static inline void record_dirtypages(DirtyPageRecord *dirty_pages, + CPUState *cpu, bool start) +{ + if (start) { + dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; + } else { + dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; + } +} + +static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, + int64_t calc_time_ms) +{ + uint64_t memory_size_MB; + uint64_t increased_dirty_pages = + dirty_pages.end_pages - dirty_pages.start_pages; + + memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; + + return memory_size_MB * 1000 / calc_time_ms; +} + +void global_dirty_log_change(unsigned int flag, bool start) +{ + qemu_mutex_lock_iothread(); + if (start) { + memory_global_dirty_log_start(flag); + } else { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +/* + * global_dirty_log_sync + * 1. sync dirty log from kvm + * 2. stop dirty tracking if needed. + */ +static void global_dirty_log_sync(unsigned int flag, bool one_shot) +{ + qemu_mutex_lock_iothread(); + memory_global_dirty_log_sync(); + if (one_shot) { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) +{ + CPUState *cpu; + DirtyPageRecord *records; + int nvcpu = 0; + + CPU_FOREACH(cpu) { + nvcpu++; + } + + stat->nvcpu = nvcpu; + stat->rates = g_new0(DirtyRateVcpu, nvcpu); + + records = g_new0(DirtyPageRecord, nvcpu); + + return records; +} + +static void vcpu_dirty_stat_collect(VcpuStat *stat, + DirtyPageRecord *records, + bool start) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + record_dirtypages(records, cpu, start); + } +} + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot) +{ + DirtyPageRecord *records; + int64_t init_time_ms; + int64_t duration; + int64_t dirtyrate; + int i = 0; + unsigned int gen_id; + +retry: + init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + cpu_list_lock(); + gen_id = cpu_list_generation_id_get(); + records = vcpu_dirty_stat_alloc(stat); + vcpu_dirty_stat_collect(stat, records, true); + cpu_list_unlock(); + + duration = dirty_stat_wait(calc_time_ms, init_time_ms); + + global_dirty_log_sync(flag, one_shot); + + cpu_list_lock(); + if (gen_id != cpu_list_generation_id_get()) { + g_free(records); + g_free(stat->rates); + cpu_list_unlock(); + goto retry; + } + vcpu_dirty_stat_collect(stat, records, false); + cpu_list_unlock(); + + for (i = 0; i < stat->nvcpu; i++) { + dirtyrate = do_calculate_dirtyrate(records[i], duration); + + stat->rates[i].id = i; + stat->rates[i].dirty_rate = dirtyrate; + + trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); + } + + g_free(records); + + return duration; +} + static bool is_sample_period_valid(int64_t sec) { if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || @@ -70,51 +215,94 @@ static int dirtyrate_set_state(int *state, int old_state, int new_state) static struct DirtyRateInfo *query_dirty_rate_info(void) { + int i; int64_t dirty_rate = DirtyStat.dirty_rate; - struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo)); - - if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { - info->has_dirty_rate = true; - info->dirty_rate = dirty_rate; - } + struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1); + DirtyRateVcpuList *head = NULL, **tail = &head; info->status = CalculatingState; info->start_time = DirtyStat.start_time; info->calc_time = DirtyStat.calc_time; info->sample_pages = DirtyStat.sample_pages; + info->mode = dirtyrate_mode; + + if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { + info->has_dirty_rate = true; + info->dirty_rate = dirty_rate; + + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + /* + * set sample_pages with 0 to indicate page sampling + * isn't enabled + **/ + info->sample_pages = 0; + info->has_vcpu_dirty_rate = true; + for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { + DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1); + rate->id = DirtyStat.dirty_ring.rates[i].id; + rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate; + QAPI_LIST_APPEND(tail, rate); + } + info->vcpu_dirty_rate = head; + } + + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { + info->sample_pages = 0; + } + } trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState)); return info; } -static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time, - uint64_t sample_pages) +static void init_dirtyrate_stat(int64_t start_time, + struct DirtyRateConfig config) { - DirtyStat.total_dirty_samples = 0; - DirtyStat.total_sample_count = 0; - DirtyStat.total_block_mem_MB = 0; DirtyStat.dirty_rate = -1; DirtyStat.start_time = start_time; - DirtyStat.calc_time = calc_time; - DirtyStat.sample_pages = sample_pages; + DirtyStat.calc_time = config.sample_period_seconds; + DirtyStat.sample_pages = config.sample_pages_per_gigabytes; + + switch (config.mode) { + case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING: + DirtyStat.page_sampling.total_dirty_samples = 0; + DirtyStat.page_sampling.total_sample_count = 0; + DirtyStat.page_sampling.total_block_mem_MB = 0; + break; + case DIRTY_RATE_MEASURE_MODE_DIRTY_RING: + DirtyStat.dirty_ring.nvcpu = -1; + DirtyStat.dirty_ring.rates = NULL; + break; + default: + break; + } +} + +static void cleanup_dirtyrate_stat(struct DirtyRateConfig config) +{ + /* last calc-dirty-rate qmp use dirty ring mode */ + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + free(DirtyStat.dirty_ring.rates); + DirtyStat.dirty_ring.rates = NULL; + } } static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) { - DirtyStat.total_dirty_samples += info->sample_dirty_count; - DirtyStat.total_sample_count += info->sample_pages_count; + DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count; + DirtyStat.page_sampling.total_sample_count += info->sample_pages_count; /* size of total pages in MB */ - DirtyStat.total_block_mem_MB += (info->ramblock_pages * - TARGET_PAGE_SIZE) >> 20; + DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages * + TARGET_PAGE_SIZE) >> 20; } static void update_dirtyrate(uint64_t msec) { uint64_t dirtyrate; - uint64_t total_dirty_samples = DirtyStat.total_dirty_samples; - uint64_t total_sample_count = DirtyStat.total_sample_count; - uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB; + uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples; + uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count; + uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB; dirtyrate = total_dirty_samples * total_block_mem_MB * 1000 / (total_sample_count * msec); @@ -327,21 +515,118 @@ static bool compare_page_hash_info(struct RamblockDirtyInfo *info, update_dirtyrate_stat(block_dinfo); } - if (DirtyStat.total_sample_count == 0) { + if (DirtyStat.page_sampling.total_sample_count == 0) { return false; } return true; } -static void calculate_dirtyrate(struct DirtyRateConfig config) +static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, + bool start) +{ + if (start) { + dirty_pages->start_pages = total_dirty_pages; + } else { + dirty_pages->end_pages = total_dirty_pages; + } +} + +static inline void dirtyrate_manual_reset_protect(void) +{ + RAMBlock *block = NULL; + + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_MIGRATABLE(block) { + memory_region_clear_dirty_bitmap(block->mr, 0, + block->used_length); + } + } +} + +static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) +{ + int64_t msec = 0; + int64_t start_time; + DirtyPageRecord dirty_pages; + + qemu_mutex_lock_iothread(); + memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); + + /* + * 1'round of log sync may return all 1 bits with + * KVM_DIRTY_LOG_INITIALLY_SET enable + * skip it unconditionally and start dirty tracking + * from 2'round of log sync + */ + memory_global_dirty_log_sync(); + + /* + * reset page protect manually and unconditionally. + * this make sure kvm dirty log be cleared if + * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. + */ + dirtyrate_manual_reset_protect(); + qemu_mutex_unlock_iothread(); + + record_dirtypages_bitmap(&dirty_pages, true); + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + DirtyStat.start_time = start_time / 1000; + + msec = config.sample_period_seconds * 1000; + msec = dirty_stat_wait(msec, start_time); + DirtyStat.calc_time = msec / 1000; + + /* + * do two things. + * 1. fetch dirty bitmap from kvm + * 2. stop dirty tracking + */ + global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true); + + record_dirtypages_bitmap(&dirty_pages, false); + + DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec); +} + +static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config) +{ + int64_t duration; + uint64_t dirtyrate = 0; + uint64_t dirtyrate_sum = 0; + int i = 0; + + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true); + + DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; + + /* calculate vcpu dirtyrate */ + duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000, + &DirtyStat.dirty_ring, + GLOBAL_DIRTY_DIRTY_RATE, + true); + + DirtyStat.calc_time = duration / 1000; + + /* calculate vm dirtyrate */ + for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { + dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate; + DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate; + dirtyrate_sum += dirtyrate; + } + + DirtyStat.dirty_rate = dirtyrate_sum; +} + +static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) { struct RamblockDirtyInfo *block_dinfo = NULL; int block_count = 0; int64_t msec = 0; int64_t initial_time; - rcu_register_thread(); rcu_read_lock(); initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) { @@ -350,7 +635,7 @@ static void calculate_dirtyrate(struct DirtyRateConfig config) rcu_read_unlock(); msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, initial_time); + msec = dirty_stat_wait(msec, initial_time); DirtyStat.start_time = initial_time / 1000; DirtyStat.calc_time = msec / 1000; @@ -364,16 +649,26 @@ static void calculate_dirtyrate(struct DirtyRateConfig config) out: rcu_read_unlock(); free_ramblock_dirty_info(block_dinfo, block_count); - rcu_unregister_thread(); +} + +static void calculate_dirtyrate(struct DirtyRateConfig config) +{ + if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { + calculate_dirtyrate_dirty_bitmap(config); + } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + calculate_dirtyrate_dirty_ring(config); + } else { + calculate_dirtyrate_sample_vm(config); + } + + trace_dirtyrate_calculate(DirtyStat.dirty_rate); } void *get_dirtyrate_thread(void *arg) { struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; int ret; - int64_t start_time; - int64_t calc_time; - uint64_t sample_pages; + rcu_register_thread(); ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, DIRTY_RATE_STATUS_MEASURING); @@ -382,11 +677,6 @@ void *get_dirtyrate_thread(void *arg) return NULL; } - start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; - calc_time = config.sample_period_seconds; - sample_pages = config.sample_pages_per_gigabytes; - init_dirtyrate_stat(start_time, calc_time, sample_pages); - calculate_dirtyrate(config); ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, @@ -394,15 +684,22 @@ void *get_dirtyrate_thread(void *arg) if (ret == -1) { error_report("change dirtyrate state failed."); } + + rcu_unregister_thread(); return NULL; } -void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, - int64_t sample_pages, Error **errp) +void qmp_calc_dirty_rate(int64_t calc_time, + bool has_sample_pages, + int64_t sample_pages, + bool has_mode, + DirtyRateMeasureMode mode, + Error **errp) { static struct DirtyRateConfig config; QemuThread thread; int ret; + int64_t start_time; /* * If the dirty rate is already being measured, don't attempt to start. @@ -419,6 +716,15 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, return; } + if (!has_mode) { + mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; + } + + if (has_sample_pages && mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + error_setg(errp, "either sample-pages or dirty-ring can be specified."); + return; + } + if (has_sample_pages) { if (!is_sample_pages_valid(sample_pages)) { error_setg(errp, "sample-pages is out of range[%d, %d].", @@ -430,6 +736,19 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES; } + /* + * dirty ring mode only works when kvm dirty ring is enabled. + * on the contrary, dirty bitmap mode is not. + */ + if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) && + !kvm_dirty_ring_enabled()) || + ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) && + kvm_dirty_ring_enabled())) { + error_setg(errp, "mode %s is not enabled, use other method instead.", + DirtyRateMeasureMode_str(mode)); + return; + } + /* * Init calculation state as unstarted. */ @@ -442,6 +761,19 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, config.sample_period_seconds = calc_time; config.sample_pages_per_gigabytes = sample_pages; + config.mode = mode; + + cleanup_dirtyrate_stat(config); + + /* + * update dirty rate mode so that we can figure out what mode has + * been used in last calculation + **/ + dirtyrate_mode = mode; + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; + init_dirtyrate_stat(start_time, config); + qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, (void *)&config, QEMU_THREAD_DETACHED); } @@ -463,12 +795,24 @@ void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict) info->sample_pages); monitor_printf(mon, "Period: %"PRIi64" (sec)\n", info->calc_time); + monitor_printf(mon, "Mode: %s\n", + DirtyRateMeasureMode_str(info->mode)); monitor_printf(mon, "Dirty rate: "); if (info->has_dirty_rate) { monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate); + if (info->has_vcpu_dirty_rate) { + DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate; + for (rate = head; rate != NULL; rate = rate->next) { + monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64 + " (MB/s)\n", rate->value->id, + rate->value->dirty_rate); + } + } } else { monitor_printf(mon, "(not ready)\n"); } + + qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate); g_free(info); } @@ -477,6 +821,9 @@ void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict) int64_t sec = qdict_get_try_int(qdict, "second", 0); int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1); bool has_sample_pages = (sample_pages != -1); + bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false); + bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false); + DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; Error *err = NULL; if (!sec) { @@ -484,7 +831,20 @@ void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict) return; } - qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, &err); + if (dirty_ring && dirty_bitmap) { + monitor_printf(mon, "Either dirty ring or dirty bitmap " + "can be specified!\n"); + return; + } + + if (dirty_bitmap) { + mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP; + } else if (dirty_ring) { + mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING; + } + + qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true, + mode, &err); if (err) { hmp_handle_error(mon, err); return; diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h index e1fd29089e..594a5c0bb6 100644 --- a/migration/dirtyrate.h +++ b/migration/dirtyrate.h @@ -13,6 +13,8 @@ #ifndef QEMU_MIGRATION_DIRTYRATE_H #define QEMU_MIGRATION_DIRTYRATE_H +#include "sysemu/dirtyrate.h" + /* * Sample 512 pages per GB as default. */ @@ -43,6 +45,7 @@ struct DirtyRateConfig { uint64_t sample_pages_per_gigabytes; /* sample pages per GB */ int64_t sample_period_seconds; /* time duration between two sampling */ + DirtyRateMeasureMode mode; /* mode of dirtyrate measurement */ }; /* @@ -58,17 +61,24 @@ struct RamblockDirtyInfo { uint32_t *hash_result; /* array of hash result for sampled pages */ }; +typedef struct SampleVMStat { + uint64_t total_dirty_samples; /* total dirty sampled page */ + uint64_t total_sample_count; /* total sampled pages */ + uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ +} SampleVMStat; + /* * Store calculation statistics for each measure. */ struct DirtyRateStat { - uint64_t total_dirty_samples; /* total dirty sampled page */ - uint64_t total_sample_count; /* total sampled pages */ - uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ int64_t dirty_rate; /* dirty rate in MB/s */ int64_t start_time; /* calculation start time in units of second */ int64_t calc_time; /* time duration of two sampling in units of second */ uint64_t sample_pages; /* sample pages per GB */ + union { + SampleVMStat page_sampling; + VcpuStat dirty_ring; + }; }; void *get_dirtyrate_thread(void *arg); diff --git a/migration/meson.build b/migration/meson.build index f8714dcb15..690487cf1a 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -4,7 +4,6 @@ migration_files = files( 'xbzrle.c', 'vmstate-types.c', 'vmstate.c', - 'qemu-file-channel.c', 'qemu-file.c', 'yank_functions.c', ) @@ -13,6 +12,7 @@ softmmu_ss.add(migration_files) softmmu_ss.add(files( 'block-dirty-bitmap.c', 'channel.c', + 'channel-block.c', 'colo-failover.c', 'colo.c', 'exec.c', @@ -27,8 +27,10 @@ softmmu_ss.add(files( 'tls.c', ), gnutls) -softmmu_ss.add(when: ['CONFIG_RDMA', rdma], if_true: files('rdma.c')) -softmmu_ss.add(when: 'CONFIG_LIVE_BLOCK_MIGRATION', if_true: files('block.c')) +softmmu_ss.add(when: rdma, if_true: files('rdma.c')) +if get_option('live_block_migration').allowed() + softmmu_ss.add(files('block.c')) +endif softmmu_ss.add(when: zstd, if_true: files('multifd-zstd.c')) specific_ss.add(when: 'CONFIG_SOFTMMU', diff --git a/migration/migration.c b/migration/migration.c index 041b8451a6..c19fb5cb3e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -30,7 +30,6 @@ #include "migration/misc.h" #include "migration.h" #include "savevm.h" -#include "qemu-file-channel.h" #include "qemu-file.h" #include "migration/vmstate.h" #include "block/block.h" @@ -49,6 +48,7 @@ #include "trace.h" #include "exec/target_page.h" #include "io/channel-buffer.h" +#include "io/channel-tls.h" #include "migration/colo.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -60,6 +60,7 @@ #include "qemu/yank.h" #include "sysemu/cpus.h" #include "yank_functions.h" +#include "sysemu/qtest.h" #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ @@ -162,7 +163,8 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, MIGRATION_CAPABILITY_COMPRESS, MIGRATION_CAPABILITY_XBZRLE, MIGRATION_CAPABILITY_X_COLO, - MIGRATION_CAPABILITY_VALIDATE_UUID); + MIGRATION_CAPABILITY_VALIDATE_UUID, + MIGRATION_CAPABILITY_ZERO_COPY_SEND); /* When we add fault tolerance, we could have several migrations at once. For now we don't need to add @@ -179,6 +181,18 @@ static int migration_maybe_pause(MigrationState *s, int new_state); static void migrate_fd_cancel(MigrationState *s); +static bool migrate_allow_multi_channels = true; + +void migrate_protocol_allow_multi_channels(bool allow) +{ + migrate_allow_multi_channels = allow; +} + +bool migrate_multi_channels_is_allowed(void) +{ + return migrate_allow_multi_channels; +} + static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) { uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; @@ -188,8 +202,6 @@ static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) void migration_object_init(void) { - Error *err = NULL; - /* This can only be called once. */ assert(!current_migration); current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); @@ -204,34 +216,42 @@ void migration_object_init(void) current_incoming->postcopy_remote_fds = g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); qemu_mutex_init(¤t_incoming->rp_mutex); + qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); qemu_event_init(¤t_incoming->main_thread_load_event, false); qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); + qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); qemu_mutex_init(¤t_incoming->page_request_mutex); current_incoming->page_requested = g_tree_new(page_request_addr_cmp); - if (!migration_object_check(current_migration, &err)) { - error_report_err(err); - exit(1); - } + migration_object_check(current_migration, &error_fatal); blk_mig_init(); ram_mig_init(); dirty_bitmap_mig_init(); } -void migration_cancel(void) +void migration_cancel(const Error *error) { + if (error) { + migrate_set_error(current_migration, error); + } migrate_fd_cancel(current_migration); } void migration_shutdown(void) { + /* + * When the QEMU main thread exit, the COLO thread + * may wait a semaphore. So, we should wakeup the + * COLO thread before migration shutdown. + */ + colo_shutdown(); /* * Cancel the current migration - that will (eventually) * stop the migration using this structure */ - migration_cancel(); + migration_cancel(NULL); object_unref(OBJECT(current_migration)); /* @@ -262,6 +282,19 @@ MigrationIncomingState *migration_incoming_get_current(void) return current_incoming; } +void migration_incoming_transport_cleanup(MigrationIncomingState *mis) +{ + if (mis->socket_address_list) { + qapi_free_SocketAddressList(mis->socket_address_list); + mis->socket_address_list = NULL; + } + + if (mis->transport_cleanup) { + mis->transport_cleanup(mis->transport_data); + mis->transport_data = mis->transport_cleanup = NULL; + } +} + void migration_incoming_state_destroy(void) { struct MigrationIncomingState *mis = migration_incoming_get_current(); @@ -282,10 +315,8 @@ void migration_incoming_state_destroy(void) g_array_free(mis->postcopy_remote_fds, TRUE); mis->postcopy_remote_fds = NULL; } - if (mis->transport_cleanup) { - mis->transport_cleanup(mis->transport_data); - } + migration_incoming_transport_cleanup(mis); qemu_event_reset(&mis->main_thread_load_event); if (mis->page_requested) { @@ -293,9 +324,10 @@ void migration_incoming_state_destroy(void) mis->page_requested = NULL; } - if (mis->socket_address_list) { - qapi_free_SocketAddressList(mis->socket_address_list); - mis->socket_address_list = NULL; + if (mis->postcopy_qemufile_dst) { + migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); + qemu_fclose(mis->postcopy_qemufile_dst); + mis->postcopy_qemufile_dst = NULL; } yank_unregister_instance(MIGRATION_YANK_INSTANCE); @@ -396,7 +428,7 @@ int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb, ram_addr_t start, uint64_t haddr) { - void *aligned = (void *)(uintptr_t)(haddr & (-qemu_ram_pagesize(rb))); + void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); bool received = false; WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { @@ -458,10 +490,12 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp) { const char *p = NULL; + migrate_protocol_allow_multi_channels(false); /* reset it anyway */ qapi_event_send_migration(MIGRATION_STATUS_SETUP); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { + migrate_protocol_allow_multi_channels(true); socket_start_incoming_migration(p ? p : uri, errp); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -490,9 +524,9 @@ static void process_incoming_migration_bh(void *opaque) if (!migrate_late_block_activate() || (autostart && (!global_state_received() || global_state_get_runstate() == RUN_STATE_RUNNING))) { - /* Make sure all file formats flush their mutable metadata. + /* Make sure all file formats throw away their mutable metadata. * If we get an error here, just don't restart the VM yet. */ - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); local_err = NULL; @@ -540,7 +574,8 @@ static void process_incoming_migration_bh(void *opaque) migration_incoming_state_destroy(); } -static void process_incoming_migration_co(void *opaque) +static void coroutine_fn +process_incoming_migration_co(void *opaque) { MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyState ps; @@ -578,8 +613,8 @@ static void process_incoming_migration_co(void *opaque) /* we get COLO info, and know if we are in COLO mode */ if (!ret && migration_incoming_colo_enabled()) { - /* Make sure all file formats flush their mutable metadata */ - bdrv_invalidate_cache_all(&local_err); + /* Make sure all file formats throw away their mutable metadata */ + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); goto fail; @@ -590,8 +625,10 @@ static void process_incoming_migration_co(void *opaque) mis->have_colo_incoming_thread = true; qemu_coroutine_yield(); + qemu_mutex_unlock_iothread(); /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&mis->colo_incoming_thread); + qemu_mutex_lock_iothread(); /* We hold the global iothread lock, so it is safe here */ colo_release_ram_cache(); } @@ -616,30 +653,25 @@ fail: } /** - * @migration_incoming_setup: Setup incoming migration - * - * Returns 0 for no error or 1 for error - * + * migration_incoming_setup: Setup incoming migration * @f: file for main migration channel * @errp: where to put errors + * + * Returns: %true on success, %false on error. */ -static int migration_incoming_setup(QEMUFile *f, Error **errp) +static bool migration_incoming_setup(QEMUFile *f, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); - Error *local_err = NULL; - if (multifd_load_setup(&local_err) != 0) { - /* We haven't been able to create multifd threads - nothing better to do */ - error_report_err(local_err); - exit(EXIT_FAILURE); + if (multifd_load_setup(errp) != 0) { + return false; } if (!mis->from_src_file) { mis->from_src_file = f; } qemu_file_set_blocking(f, false); - return 0; + return true; } void migration_incoming_process(void) @@ -649,28 +681,29 @@ void migration_incoming_process(void) } /* Returns true if recovered from a paused migration, otherwise false */ -static bool postcopy_try_recover(QEMUFile *f) +static bool postcopy_try_recover(void) { MigrationIncomingState *mis = migration_incoming_get_current(); if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { /* Resumed from a paused postcopy migration */ - mis->from_src_file = f; + /* This should be set already in migration_incoming_setup() */ + assert(mis->from_src_file); /* Postcopy has standalone thread to do vm load */ - qemu_file_set_blocking(f, true); + qemu_file_set_blocking(mis->from_src_file, true); /* Re-configure the return path */ - mis->to_src_file = qemu_file_get_return_path(f); + mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, MIGRATION_STATUS_POSTCOPY_RECOVER); /* * Here, we only wake up the main loading thread (while the - * fault thread will still be waiting), so that we can receive + * rest threads will still be waiting), so that we can receive * commands from source now, and answer it if needed. The - * fault thread will be woken up afterwards until we are sure + * rest threads will be woken up afterwards until we are sure * that source is ready to reply to page requests. */ qemu_sem_post(&mis->postcopy_pause_sem_dst); @@ -682,48 +715,50 @@ static bool postcopy_try_recover(QEMUFile *f) void migration_fd_process_incoming(QEMUFile *f, Error **errp) { - Error *local_err = NULL; - - if (postcopy_try_recover(f)) { + if (!migration_incoming_setup(f, errp)) { return; } - - if (migration_incoming_setup(f, &local_err)) { - error_propagate(errp, local_err); + if (postcopy_try_recover()) { return; } migration_incoming_process(); } +static bool migration_needs_multiple_sockets(void) +{ + return migrate_use_multifd() || migrate_postcopy_preempt(); +} + void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; bool start_migration; + QEMUFile *f; if (!mis->from_src_file) { /* The first connection (multifd may have multiple) */ - QEMUFile *f = qemu_fopen_channel_input(ioc); + f = qemu_file_new_input(ioc); - /* If it's a recovery, we're done */ - if (postcopy_try_recover(f)) { - return; - } - - if (migration_incoming_setup(f, &local_err)) { - error_propagate(errp, local_err); + if (!migration_incoming_setup(f, errp)) { return; } /* * Common migration only needs one channel, so we can start - * right now. Multifd needs more than one channel, we wait. + * right now. Some features need more than one channel, we wait. */ - start_migration = !migrate_use_multifd(); + start_migration = !migration_needs_multiple_sockets(); } else { /* Multiple connections */ - assert(migrate_use_multifd()); - start_migration = multifd_recv_new_channel(ioc, &local_err); + assert(migration_needs_multiple_sockets()); + if (migrate_use_multifd()) { + start_migration = multifd_recv_new_channel(ioc, &local_err); + } else { + assert(migrate_postcopy_preempt()); + f = qemu_file_new_input(ioc); + start_migration = postcopy_preempt_new_channel(mis, f); + } if (local_err) { error_propagate(errp, local_err); return; @@ -731,6 +766,10 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } if (start_migration) { + /* If it's a recovery, we're done */ + if (postcopy_try_recover()) { + return; + } migration_incoming_process(); } } @@ -744,11 +783,20 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) bool migration_has_all_channels(void) { MigrationIncomingState *mis = migration_incoming_get_current(); - bool all_channels; - all_channels = multifd_recv_all_channels_created(); + if (!mis->from_src_file) { + return false; + } - return all_channels && mis->from_src_file != NULL; + if (migrate_use_multifd()) { + return multifd_recv_all_channels_created(); + } + + if (migrate_postcopy_preempt()) { + return mis->postcopy_qemufile_dst != NULL; + } + + return true; } /* @@ -997,6 +1045,8 @@ static void populate_time_info(MigrationInfo *info, MigrationState *s) static void populate_ram_info(MigrationInfo *info, MigrationState *s) { + size_t page_size = qemu_target_page_size(); + info->has_ram = true; info->ram = g_malloc0(sizeof(*info->ram)); info->ram->transferred = ram_counters.transferred; @@ -1005,14 +1055,18 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) /* legacy value. It is not used anymore */ info->ram->skipped = 0; info->ram->normal = ram_counters.normal; - info->ram->normal_bytes = ram_counters.normal * - qemu_target_page_size(); + info->ram->normal_bytes = ram_counters.normal * page_size; info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_counters.dirty_sync_count; + info->ram->dirty_sync_missed_zero_copy = + ram_counters.dirty_sync_missed_zero_copy; info->ram->postcopy_requests = ram_counters.postcopy_requests; - info->ram->page_size = qemu_target_page_size(); + info->ram->page_size = page_size; info->ram->multifd_bytes = ram_counters.multifd_bytes; info->ram->pages_per_second = s->pages_per_second; + info->ram->precopy_bytes = ram_counters.precopy_bytes; + info->ram->downtime_bytes = ram_counters.downtime_bytes; + info->ram->postcopy_bytes = ram_counters.postcopy_bytes; if (migrate_use_xbzrle()) { info->has_xbzrle_cache = true; @@ -1063,6 +1117,7 @@ static void populate_disk_info(MigrationInfo *info) static void fill_source_migration_info(MigrationInfo *info) { MigrationState *s = migrate_get_current(); + int state = qatomic_read(&s->state); GSList *cur_blocker = migration_blockers; info->blocked_reasons = NULL; @@ -1082,7 +1137,7 @@ static void fill_source_migration_info(MigrationInfo *info) } info->has_blocked_reasons = info->blocked_reasons != NULL; - switch (s->state) { + switch (state) { case MIGRATION_STATUS_NONE: /* no migration has happened ever */ /* do not overwrite destination migration status */ @@ -1127,7 +1182,7 @@ static void fill_source_migration_info(MigrationInfo *info) info->has_status = true; break; } - info->status = s->state; + info->status = state; } typedef enum WriteTrackingSupport { @@ -1249,6 +1304,59 @@ static bool migrate_caps_check(bool *cap_list, } } +#ifdef CONFIG_LINUX + if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND] && + (!cap_list[MIGRATION_CAPABILITY_MULTIFD] || + cap_list[MIGRATION_CAPABILITY_COMPRESS] || + cap_list[MIGRATION_CAPABILITY_XBZRLE] || + migrate_multifd_compression() || + migrate_use_tls())) { + error_setg(errp, + "Zero copy only available for non-compressed non-TLS multifd migration"); + return false; + } +#else + if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) { + error_setg(errp, + "Zero copy currently only available on Linux"); + return false; + } +#endif + + + /* incoming side only */ + if (runstate_check(RUN_STATE_INMIGRATE) && + !migrate_multi_channels_is_allowed() && + cap_list[MIGRATION_CAPABILITY_MULTIFD]) { + error_setg(errp, "multifd is not supported by current protocol"); + return false; + } + + if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { + if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + error_setg(errp, "Postcopy preempt requires postcopy-ram"); + return false; + } + + /* + * Preempt mode requires urgent pages to be sent in separate + * channel, OTOH compression logic will disorder all pages into + * different compression channels, which is not compatible with the + * preempt assumptions on channel assignments. + */ + if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { + error_setg(errp, "Postcopy preempt not compatible with compress"); + return false; + } + } + + if (cap_list[MIGRATION_CAPABILITY_MULTIFD]) { + if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { + error_setg(errp, "Multifd is not compatible with compress"); + return false; + } + } + return true; } @@ -1464,6 +1572,16 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) return false; } +#ifdef CONFIG_LINUX + if (migrate_use_zero_copy_send() && + ((params->has_multifd_compression && params->multifd_compression) || + (params->has_tls_creds && params->tls_creds && *params->tls_creds))) { + error_setg(errp, + "Zero copy only available for non-compressed non-TLS multifd migration"); + return false; + } +#endif + return true; } @@ -1791,6 +1909,9 @@ static void migrate_fd_cleanup(MigrationState *s) qemu_bh_delete(s->cleanup_bh); s->cleanup_bh = NULL; + g_free(s->hostname); + s->hostname = NULL; + qemu_savevm_state_cleanup(); if (s->to_dst_file) { @@ -1817,6 +1938,12 @@ static void migrate_fd_cleanup(MigrationState *s) qemu_fclose(tmp); } + if (s->postcopy_qemufile_src) { + migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); + qemu_fclose(s->postcopy_qemufile_src); + s->postcopy_qemufile_src = NULL; + } + assert(!migration_is_active(s)); if (s->state == MIGRATION_STATUS_CANCELLING) { @@ -1914,7 +2041,7 @@ static void migrate_fd_cancel(MigrationState *s) if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { Error *local_err = NULL; - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } else { @@ -2053,6 +2180,20 @@ void migrate_init(MigrationState *s) s->threshold_size = 0; } +int migrate_add_blocker_internal(Error *reason, Error **errp) +{ + /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ + if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { + error_propagate_prepend(errp, error_copy(reason), + "disallowing migration blocker " + "(migration/snapshot in progress) for: "); + return -EBUSY; + } + + migration_blockers = g_slist_prepend(migration_blockers, reason); + return 0; +} + int migrate_add_blocker(Error *reason, Error **errp) { if (only_migratable) { @@ -2062,15 +2203,7 @@ int migrate_add_blocker(Error *reason, Error **errp) return -EACCES; } - if (migration_is_idle()) { - migration_blockers = g_slist_prepend(migration_blockers, reason); - return 0; - } - - error_propagate_prepend(errp, error_copy(reason), - "disallowing migration blocker " - "(migration in progress) for: "); - return -EBUSY; + return migrate_add_blocker_internal(reason, errp); } void migrate_del_blocker(Error *reason) @@ -2124,11 +2257,8 @@ void qmp_migrate_recover(const char *uri, Error **errp) return; } - if (qatomic_cmpxchg(&mis->postcopy_recover_triggered, - false, true) == true) { - error_setg(errp, "Migrate recovery is triggered already"); - return; - } + /* If there's an existing transport, release it */ + migration_incoming_transport_cleanup(mis); /* * Note that this call will never start a real migration; it will @@ -2136,12 +2266,6 @@ void qmp_migrate_recover(const char *uri, Error **errp) * to continue using that newly established channel. */ qemu_start_incoming_migration(uri, errp); - - /* Safe to dereference with the assert above */ - if (*errp) { - /* Reset the flag so user could still retry */ - qatomic_set(&mis->postcopy_recover_triggered, false); - } } void qmp_migrate_pause(Error **errp) @@ -2266,10 +2390,11 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, migrate_init(s); /* - * set ram_counters memory to zero for a + * set ram_counters compression_counters memory to zero for a * new migration */ memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&compression_counters, 0, sizeof(compression_counters)); return true; } @@ -2294,9 +2419,11 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, } } + migrate_protocol_allow_multi_channels(false); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { + migrate_protocol_allow_multi_channels(true); socket_start_outgoing_migration(s, p ? p : uri, &local_err); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -2330,7 +2457,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, void qmp_migrate_cancel(Error **errp) { - migration_cancel(); + migration_cancel(NULL); } void qmp_migrate_continue(MigrationStatus state, Error **errp) @@ -2509,6 +2636,7 @@ MultiFDCompression migrate_multifd_compression(void) s = migrate_get_current(); + assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX); return s->parameters.multifd_compression; } @@ -2530,6 +2658,26 @@ int migrate_multifd_zstd_level(void) return s->parameters.multifd_zstd_level; } +#ifdef CONFIG_LINUX +bool migrate_use_zero_copy_send(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND]; +} +#endif + +int migrate_use_tls(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->parameters.tls_creds && *s->parameters.tls_creds; +} + int migrate_use_xbzrle(void) { MigrationState *s; @@ -2593,6 +2741,15 @@ bool migrate_background_snapshot(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; } +bool migrate_postcopy_preempt(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]; +} + /* migration thread support */ /* * Something bad happened to the RP stream, mark an error @@ -2625,7 +2782,7 @@ static struct rp_cmd_args { static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, ram_addr_t start, size_t len) { - long our_host_ps = qemu_real_host_page_size; + long our_host_ps = qemu_real_host_page_size(); trace_migrate_handle_rp_req_pages(rbname, start, len); @@ -2633,8 +2790,8 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, * Since we currently insist on matching page sizes, just sanity check * we're being asked for whole host pages. */ - if (start & (our_host_ps - 1) || - (len & (our_host_ps - 1))) { + if (!QEMU_IS_ALIGNED(start, our_host_ps) || + !QEMU_IS_ALIGNED(len, our_host_ps)) { error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT " len: %zd", __func__, start, len); mark_source_rp_bad(ms); @@ -2844,7 +3001,7 @@ retry: out: res = qemu_file_get_error(rp); if (res) { - if (res == -EIO && migration_in_postcopy()) { + if (res && migration_in_postcopy()) { /* * Maybe there is something we can do: it looks like a * network down issue, and we pause for a recovery. @@ -2932,6 +3089,12 @@ static int postcopy_start(MigrationState *ms) int64_t bandwidth = migrate_max_postcopy_bandwidth(); bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; + + if (postcopy_preempt_wait_channel(ms)) { + migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + return -1; + } + if (!migrate_pause_before_switchover()) { migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_POSTCOPY_ACTIVE); @@ -2973,10 +3136,7 @@ static int postcopy_start(MigrationState *ms) * that are dirty */ if (migrate_postcopy_ram()) { - if (ram_postcopy_send_discard_bitmap(ms)) { - error_report("postcopy send discard bitmap failed"); - goto fail; - } + ram_postcopy_send_discard_bitmap(ms); } /* @@ -3008,7 +3168,7 @@ static int postcopy_start(MigrationState *ms) */ bioc = qio_channel_buffer_new(4096); qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); - fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); + fb = qemu_file_new_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); /* @@ -3074,6 +3234,8 @@ static int postcopy_start(MigrationState *ms) MIGRATION_STATUS_FAILED); } + trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); + return ret; fail_closefb: @@ -3087,7 +3249,7 @@ fail: */ Error *local_err = NULL; - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } @@ -3158,7 +3320,6 @@ static void migration_completion(MigrationState *s) ret = global_state_store(); if (!ret) { - bool inactivate = !migrate_colo_enabled(); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); trace_migration_completion_vm_stop(ret); if (ret >= 0) { @@ -3166,12 +3327,15 @@ static void migration_completion(MigrationState *s) MIGRATION_STATUS_DEVICE); } if (ret >= 0) { + /* + * Inactivate disks except in COLO, and track that we + * have done so in order to remember to reactivate + * them if migration fails or is cancelled. + */ + s->block_inactive = !migrate_colo_enabled(); qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, - inactivate); - } - if (inactivate && ret >= 0) { - s->block_inactive = true; + s->block_inactive); } } qemu_mutex_unlock_iothread(); @@ -3182,9 +3346,17 @@ static void migration_completion(MigrationState *s) } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { trace_migration_completion_postcopy_end(); + qemu_mutex_lock_iothread(); qemu_savevm_state_complete_postcopy(s->to_dst_file); + qemu_mutex_unlock_iothread(); + + /* Shutdown the postcopy fast path thread */ + if (migrate_postcopy_preempt()) { + postcopy_preempt_shutdown_file(s); + } + trace_migration_completion_postcopy_end_after_complete(); - } else if (s->state == MIGRATION_STATUS_CANCELLING) { + } else { goto fail; } @@ -3200,32 +3372,37 @@ static void migration_completion(MigrationState *s) rp_error = await_return_path_close_on_source(s); trace_migration_return_path_end_after(rp_error); if (rp_error) { - goto fail_invalidate; + goto fail; } } if (qemu_file_get_error(s->to_dst_file)) { trace_migration_completion_file_err(); - goto fail_invalidate; + goto fail; } - if (!migrate_colo_enabled()) { + if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) { + /* COLO does not support postcopy */ + migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_COLO); + } else { migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_COMPLETED); } return; -fail_invalidate: - /* If not doing postcopy, vm_start() will be called: let's regain - * control on images. - */ - if (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_DEVICE) { +fail: + if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || + s->state == MIGRATION_STATUS_DEVICE)) { + /* + * If not doing postcopy, vm_start() will be called: let's + * regain control on images. + */ Error *local_err = NULL; qemu_mutex_lock_iothread(); - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } else { @@ -3234,7 +3411,6 @@ fail_invalidate: qemu_mutex_unlock_iothread(); } -fail: migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_FAILED); } @@ -3373,6 +3549,18 @@ static MigThrError postcopy_pause(MigrationState *s) qemu_file_shutdown(file); qemu_fclose(file); + /* + * Do the same to postcopy fast path socket too if there is. No + * locking needed because no racer as long as we do this before setting + * status to paused. + */ + if (s->postcopy_qemufile_src) { + migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); + qemu_file_shutdown(s->postcopy_qemufile_src); + qemu_fclose(s->postcopy_qemufile_src); + s->postcopy_qemufile_src = NULL; + } + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_POSTCOPY_PAUSED); @@ -3390,6 +3578,14 @@ static MigThrError postcopy_pause(MigrationState *s) if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { /* Woken up by a recover procedure. Give it a shot */ + if (postcopy_preempt_wait_channel(s)) { + /* + * Preempt enabled, and new channel create failed; loop + * back to wait for another recovery. + */ + continue; + } + /* * Firstly, let's wake up the return path now, with a new * return path channel. @@ -3428,8 +3624,13 @@ static MigThrError migration_detect_error(MigrationState *s) return MIG_THR_ERR_FATAL; } - /* Try to detect any file errors */ - ret = qemu_file_get_error_obj(s->to_dst_file, &local_error); + /* + * Try to detect any file errors. Note that postcopy_qemufile_src will + * be NULL when postcopy preempt is not enabled. + */ + ret = qemu_file_get_error_obj_any(s->to_dst_file, + s->postcopy_qemufile_src, + &local_error); if (!ret) { /* Everything is fine */ assert(!local_error); @@ -3441,7 +3642,7 @@ static MigThrError migration_detect_error(MigrationState *s) error_free(local_error); } - if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret == -EIO) { + if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { /* * For postcopy, we allow the network to be down for a * while. After that, it can be continued by a @@ -3464,7 +3665,8 @@ static MigThrError migration_detect_error(MigrationState *s) /* How many bytes have we transferred since the beginning of the migration */ static uint64_t migration_total_bytes(MigrationState *s) { - return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes; + return qemu_file_total_transferred(s->to_dst_file) + + ram_counters.multifd_bytes; } static void migration_calculate_complete(MigrationState *s) @@ -3594,28 +3796,21 @@ static void migration_iteration_finish(MigrationState *s) migration_calculate_complete(s); runstate_set(RUN_STATE_POSTMIGRATE); break; - - case MIGRATION_STATUS_ACTIVE: - /* - * We should really assert here, but since it's during - * migration, let's try to reduce the usage of assertions. - */ + case MIGRATION_STATUS_COLO: if (!migrate_colo_enabled()) { error_report("%s: critical error: calling COLO code without " "COLO enabled", __func__); } migrate_start_colo_process(s); - /* - * Fixme: we will run VM in COLO no matter its old running state. - * After exited COLO, we will keep running. - */ s->vm_was_running = true; /* Fallthrough */ case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: if (s->vm_was_running) { - vm_start(); + if (!runstate_check(RUN_STATE_SHUTDOWN)) { + vm_start(); + } } else { if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { runstate_set(RUN_STATE_POSTMIGRATE); @@ -3743,7 +3938,8 @@ static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { qemu_sem_timedwait(&s->wait_unplug_sem, 250); } - if (qemu_savevm_state_guest_unplug_pending()) { + if (qemu_savevm_state_guest_unplug_pending() && + !qtest_enabled()) { warn_report("migration: partially unplugged device on " "failure"); } @@ -3896,7 +4092,7 @@ static void *bg_migration_thread(void *opaque) */ s->bioc = qio_channel_buffer_new(512 * 1024); qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); - fb = qemu_fopen_channel_output(QIO_CHANNEL(s->bioc)); + fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); object_unref(OBJECT(s->bioc)); update_iteration_initial_status(s); @@ -4072,6 +4268,15 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } + /* This needs to be done before resuming a postcopy */ + if (postcopy_preempt_setup(s, &local_err)) { + error_report_err(local_err); + migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_FAILED); + migrate_fd_cleanup(s); + return; + } + if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, @@ -4196,6 +4401,11 @@ static Property migration_properties[] = { DEFINE_PROP_SIZE("announce-step", MigrationState, parameters.announce_step, DEFAULT_MIGRATE_ANNOUNCE_STEP), + DEFINE_PROP_BOOL("x-postcopy-preempt-break-huge", MigrationState, + postcopy_preempt_break_huge, true), + DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds), + DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname), + DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -4205,6 +4415,8 @@ static Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), + DEFINE_PROP_MIG_CAP("x-postcopy-preempt", + MIGRATION_CAPABILITY_POSTCOPY_PREEMPT), DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), @@ -4212,6 +4424,10 @@ static Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), DEFINE_PROP_MIG_CAP("x-background-snapshot", MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), +#ifdef CONFIG_LINUX + DEFINE_PROP_MIG_CAP("x-zero-copy-send", + MIGRATION_CAPABILITY_ZERO_COPY_SEND), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -4227,18 +4443,16 @@ static void migration_class_init(ObjectClass *klass, void *data) static void migration_instance_finalize(Object *obj) { MigrationState *ms = MIGRATION_OBJ(obj); - MigrationParameters *params = &ms->parameters; qemu_mutex_destroy(&ms->error_mutex); qemu_mutex_destroy(&ms->qemu_file_lock); - g_free(params->tls_hostname); - g_free(params->tls_creds); qemu_sem_destroy(&ms->wait_unplug_sem); qemu_sem_destroy(&ms->rate_limit_sem); qemu_sem_destroy(&ms->pause_sem); qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->postcopy_pause_rp_sem); qemu_sem_destroy(&ms->rp_state.rp_sem); + qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); error_free(ms->error); } @@ -4259,6 +4473,7 @@ static void migration_instance_init(Object *obj) /* Set has_* up only for parameter checks */ params->has_compress_level = true; params->has_compress_threads = true; + params->has_compress_wait_thread = true; params->has_decompress_threads = true; params->has_throttle_trigger_threshold = true; params->has_cpu_throttle_initial = true; @@ -4279,12 +4494,16 @@ static void migration_instance_init(Object *obj) params->has_announce_max = true; params->has_announce_rounds = true; params->has_announce_step = true; + params->has_tls_creds = true; + params->has_tls_hostname = true; + params->has_tls_authz = true; qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); qemu_sem_init(&ms->rp_state.rp_sem, 0); qemu_sem_init(&ms->rate_limit_sem, 0); qemu_sem_init(&ms->wait_unplug_sem, 0); + qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); qemu_mutex_init(&ms->qemu_file_lock); } diff --git a/migration/migration.h b/migration/migration.h index 7a5aa8c2fd..cdad8aceaa 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -23,6 +23,7 @@ #include "io/channel-buffer.h" #include "net/announce.h" #include "qom/object.h" +#include "postcopy-ram.h" struct PostcopyBlocktimeContext; @@ -45,14 +46,37 @@ struct PostcopyBlocktimeContext; */ #define CLEAR_BITMAP_SHIFT_MAX 31 +/* This is an abstraction of a "temp huge page" for postcopy's purpose */ +typedef struct { + /* + * This points to a temporary huge page as a buffer for UFFDIO_COPY. It's + * mmap()ed and needs to be freed when cleanup. + */ + void *tmp_huge_page; + /* + * This points to the host page we're going to install for this temp page. + * It tells us after we've received the whole page, where we should put it. + */ + void *host_addr; + /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */ + unsigned int target_pages; + /* Whether this page contains all zeros */ + bool all_zero; +} PostcopyTmpPage; + /* State for the incoming migration */ struct MigrationIncomingState { QEMUFile *from_src_file; - + /* Previously received RAM's RAMBlock pointer */ + RAMBlock *last_recv_block[RAM_CHANNEL_MAX]; /* A hook to allow cleanup at the end of incoming migration */ void *transport_data; void (*transport_cleanup)(void *data); - + /* + * Used to sync thread creations. Note that we can't create threads in + * parallel with this sem. + */ + QemuSemaphore thread_sync_sem; /* * Free at the start of the main state load, set as the main thread finishes * loading state. @@ -65,13 +89,11 @@ struct MigrationIncomingState { size_t largest_page_size; bool have_fault_thread; QemuThread fault_thread; - QemuSemaphore fault_thread_sem; /* Set this when we want the fault thread to quit */ bool fault_thread_quit; bool have_listen_thread; QemuThread listen_thread; - QemuSemaphore listen_thread_sem; /* For the kernel to send us notifications */ int userfault_fd; @@ -81,7 +103,39 @@ struct MigrationIncomingState { QemuMutex rp_mutex; /* We send replies from multiple threads */ /* RAMBlock of last request sent to source */ RAMBlock *last_rb; - void *postcopy_tmp_page; + /* + * Number of postcopy channels including the default precopy channel, so + * vanilla postcopy will only contain one channel which contain both + * precopy and postcopy streams. + * + * This is calculated when the src requests to enable postcopy but before + * it starts. Its value can depend on e.g. whether postcopy preemption is + * enabled. + */ + unsigned int postcopy_channels; + /* QEMUFile for postcopy only; it'll be handled by a separate thread */ + QEMUFile *postcopy_qemufile_dst; + /* Postcopy priority thread is used to receive postcopy requested pages */ + QemuThread postcopy_prio_thread; + bool postcopy_prio_thread_created; + /* + * Used to sync between the ram load main thread and the fast ram load + * thread. It protects postcopy_qemufile_dst, which is the postcopy + * fast channel. + * + * The ram fast load thread will take it mostly for the whole lifecycle + * because it needs to continuously read data from the channel, and + * it'll only release this mutex if postcopy is interrupted, so that + * the ram load main thread will take this mutex over and properly + * release the broken channel. + */ + QemuMutex postcopy_prio_thread_mutex; + /* + * An array of temp host huge pages to be used, one for each postcopy + * channel. + */ + PostcopyTmpPage *postcopy_tmp_pages; + /* This is shared for all postcopy channels */ void *postcopy_tmp_zero_page; /* PostCopyFD's for external userfaultfds & handlers of shared memory */ GArray *postcopy_remote_fds; @@ -103,9 +157,15 @@ struct MigrationIncomingState { struct PostcopyBlocktimeContext *blocktime_ctx; /* notify PAUSED postcopy incoming migrations to try to continue */ - bool postcopy_recover_triggered; QemuSemaphore postcopy_pause_sem_dst; QemuSemaphore postcopy_pause_sem_fault; + /* + * This semaphore is used to allow the ram fast load thread (only when + * postcopy preempt is enabled) fall into sleep when there's network + * interruption detected. When the recovery is done, the main load + * thread will kick the fast ram load thread using this semaphore. + */ + QemuSemaphore postcopy_pause_sem_fast_load; /* List of listening socket addresses */ SocketAddressList *socket_address_list; @@ -130,6 +190,7 @@ struct MigrationIncomingState { MigrationIncomingState *migration_incoming_get_current(void); void migration_incoming_state_destroy(void); +void migration_incoming_transport_cleanup(MigrationIncomingState *mis); /* * Functions to work with blocktime context */ @@ -156,6 +217,15 @@ struct MigrationState { QEMUBH *cleanup_bh; /* Protected by qemu_file_lock */ QEMUFile *to_dst_file; + /* Postcopy specific transfer channel */ + QEMUFile *postcopy_qemufile_src; + /* + * It is posted when the preempt channel is established. Note: this is + * used for both the start or recover of a postcopy migration. We'll + * post to this sem every time a new preempt channel is created in the + * main thread, and we keep post() and wait() in pair. + */ + QemuSemaphore postcopy_qemufile_src_sem; QIOChannelBuffer *bioc; /* * Protects to_dst_file/from_dst_file pointers. We need to make sure we @@ -270,6 +340,13 @@ struct MigrationState { bool send_configuration; /* Whether we send section footer during migration */ bool send_section_footer; + /* + * Whether we allow break sending huge pages when postcopy preempt is + * enabled. When disabled, we won't interrupt precopy within sending a + * host huge page, which is the old behavior of vanilla postcopy. + * NOTE: this parameter is ignored if postcopy preempt is not enabled. + */ + bool postcopy_preempt_break_huge; /* Needed by postcopy-pause state */ QemuSemaphore postcopy_pause_sem; @@ -339,6 +416,12 @@ MultiFDCompression migrate_multifd_compression(void); int migrate_multifd_zlib_level(void); int migrate_multifd_zstd_level(void); +#ifdef CONFIG_LINUX +bool migrate_use_zero_copy_send(void); +#else +#define migrate_use_zero_copy_send() (false) +#endif +int migrate_use_tls(void); int migrate_use_xbzrle(void); uint64_t migrate_xbzrle_cache_size(void); bool migrate_colo_enabled(void); @@ -358,6 +441,7 @@ int migrate_decompress_threads(void); bool migrate_use_events(void); bool migrate_postcopy_blocktime(void); bool migrate_background_snapshot(void); +bool migrate_postcopy_preempt(void); /* Sending on the return path - generic and then for each message type */ void migrate_send_rp_shut(MigrationIncomingState *mis, @@ -388,8 +472,12 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); void migration_make_urgent_request(void); void migration_consume_urgent_request(void); bool migration_rate_limit(void); -void migration_cancel(void); +void migration_cancel(const Error *error); void populate_vfio_info(MigrationInfo *info); +void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); + +bool migrate_multi_channels_is_allowed(void); +void migrate_protocol_allow_multi_channels(bool allow); #endif diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index ab4ba75d75..18213a9513 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include #include "qemu/rcu.h" +#include "exec/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -26,6 +27,8 @@ struct zlib_data { uint8_t *zbuff; /* size of compressed buffer */ uint32_t zbuff_len; + /* uncompressed buffer of size qemu_target_page_size() */ + uint8_t *buf; }; /* Multifd zlib compression */ @@ -42,30 +45,40 @@ struct zlib_data { */ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); - struct zlib_data *z = g_malloc0(sizeof(struct zlib_data)); + struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; + const char *err_msg; zs->zalloc = Z_NULL; zs->zfree = Z_NULL; zs->opaque = Z_NULL; if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) { - g_free(z); - error_setg(errp, "multifd %d: deflate init failed", p->id); - return -1; + err_msg = "deflate init failed"; + goto err_free_z; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - z->zbuff_len *= 2; + /* This is the maxium size of the compressed buffer */ + z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE); z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { - deflateEnd(&z->zs); - g_free(z); - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); - return -1; + err_msg = "out of memory for zbuff"; + goto err_deflate_end; + } + z->buf = g_try_malloc(qemu_target_page_size()); + if (!z->buf) { + err_msg = "out of memory for buf"; + goto err_free_zbuff; } p->data = z; return 0; + +err_free_zbuff: + g_free(z->zbuff); +err_deflate_end: + deflateEnd(&z->zs); +err_free_z: + g_free(z); + error_setg(errp, "multifd %u: %s", p->id, err_msg); + return -1; } /** @@ -74,6 +87,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) * Close the channel and return memory. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -82,6 +96,8 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) deflateEnd(&z->zs); g_free(z->zbuff); z->zbuff = NULL; + g_free(z->buf); + z->buf = NULL; g_free(p->data); p->data = NULL; } @@ -95,27 +111,33 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used + * @errp: pointer to an error */ -static int zlib_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) +static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) { - struct iovec *iov = p->pages->iov; struct zlib_data *z = p->data; + size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t out_size = 0; int ret; uint32_t i; - for (i = 0; i < used; i++) { + for (i = 0; i < p->normal_num; i++) { uint32_t available = z->zbuff_len - out_size; int flush = Z_NO_FLUSH; - if (i == used - 1) { + if (i == p->normal_num - 1) { flush = Z_SYNC_FLUSH; } - zs->avail_in = iov[i].iov_len; - zs->next_in = iov[i].iov_base; + /* + * Since the VM might be running, the page may be changing concurrently + * with compression. zlib does not guarantee that this is safe, + * therefore copy the page before calling deflate(). + */ + memcpy(z->buf, p->pages->block->host + p->normal[i], page_size); + zs->avail_in = page_size; + zs->next_in = z->buf; zs->avail_out = available; zs->next_out = z->zbuff + out_size; @@ -132,42 +154,26 @@ static int zlib_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) ret = deflate(zs, flush); } while (ret == Z_OK && zs->avail_in && zs->avail_out); if (ret == Z_OK && zs->avail_in) { - error_setg(errp, "multifd %d: deflate failed to compress all input", + error_setg(errp, "multifd %u: deflate failed to compress all input", p->id); return -1; } if (ret != Z_OK) { - error_setg(errp, "multifd %d: deflate returned %d instead of Z_OK", + error_setg(errp, "multifd %u: deflate returned %d instead of Z_OK", p->id, ret); return -1; } out_size += available - zs->avail_out; } + p->iov[p->iovs_num].iov_base = z->zbuff; + p->iov[p->iovs_num].iov_len = out_size; + p->iovs_num++; p->next_packet_size = out_size; p->flags |= MULTIFD_FLAG_ZLIB; return 0; } -/** - * zlib_send_write: do the actual write of the data - * - * Do the actual write of the comprresed buffer. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @used: number of pages used - * @errp: pointer to an error - */ -static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) -{ - struct zlib_data *z = p->data; - - return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size, - errp); -} - /** * zlib_recv_setup: setup receive side * @@ -180,8 +186,7 @@ static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) */ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); - struct zlib_data *z = g_malloc0(sizeof(struct zlib_data)); + struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; p->data = z; @@ -191,17 +196,15 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) zs->avail_in = 0; zs->next_in = Z_NULL; if (inflateInit(zs) != Z_OK) { - error_setg(errp, "multifd %d: inflate init failed", p->id); + error_setg(errp, "multifd %u: inflate init failed", p->id); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - /* We know compression "could" use more space */ - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { inflateEnd(zs); - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); return -1; } return 0; @@ -234,23 +237,23 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) { struct zlib_data *z = p->data; + size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t in_size = p->next_packet_size; /* we measure the change of total_out */ uint32_t out_size = zs->total_out; - uint32_t expected_size = used * qemu_target_page_size(); + uint32_t expected_size = p->normal_num * page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; int ret; int i; if (flags != MULTIFD_FLAG_ZLIB) { - error_setg(errp, "multifd %d: flags received %x flags expected %x", + error_setg(errp, "multifd %u: flags received %x flags expected %x", p->id, flags, MULTIFD_FLAG_ZLIB); return -1; } @@ -263,17 +266,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) zs->avail_in = in_size; zs->next_in = z->zbuff; - for (i = 0; i < used; i++) { - struct iovec *iov = &p->pages->iov[i]; + for (i = 0; i < p->normal_num; i++) { int flush = Z_NO_FLUSH; unsigned long start = zs->total_out; - if (i == used - 1) { + if (i == p->normal_num - 1) { flush = Z_SYNC_FLUSH; } - zs->avail_out = iov->iov_len; - zs->next_out = iov->iov_base; + zs->avail_out = page_size; + zs->next_out = p->host + p->normal[i]; /* * Welcome to inflate semantics @@ -286,21 +288,21 @@ static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) do { ret = inflate(zs, flush); } while (ret == Z_OK && zs->avail_in - && (zs->total_out - start) < iov->iov_len); - if (ret == Z_OK && (zs->total_out - start) < iov->iov_len) { - error_setg(errp, "multifd %d: inflate generated too few output", + && (zs->total_out - start) < page_size); + if (ret == Z_OK && (zs->total_out - start) < page_size) { + error_setg(errp, "multifd %u: inflate generated too few output", p->id); return -1; } if (ret != Z_OK) { - error_setg(errp, "multifd %d: inflate returned %d instead of Z_OK", + error_setg(errp, "multifd %u: inflate returned %d instead of Z_OK", p->id, ret); return -1; } } out_size = zs->total_out - out_size; if (out_size != expected_size) { - error_setg(errp, "multifd %d: packet size received %d size expected %d", + error_setg(errp, "multifd %u: packet size received %u size expected %u", p->id, out_size, expected_size); return -1; } @@ -311,7 +313,6 @@ static MultiFDMethods multifd_zlib_ops = { .send_setup = zlib_send_setup, .send_cleanup = zlib_send_cleanup, .send_prepare = zlib_send_prepare, - .send_write = zlib_send_write, .recv_setup = zlib_recv_setup, .recv_cleanup = zlib_recv_cleanup, .recv_pages = zlib_recv_pages diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index 693bddf8c9..d788d309f2 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include #include "qemu/rcu.h" +#include "exec/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -47,7 +48,6 @@ struct zstd_data { */ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zstd_data *z = g_new0(struct zstd_data, 1); int res; @@ -55,7 +55,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) z->zcs = ZSTD_createCStream(); if (!z->zcs) { g_free(z); - error_setg(errp, "multifd %d: zstd createCStream failed", p->id); + error_setg(errp, "multifd %u: zstd createCStream failed", p->id); return -1; } @@ -63,18 +63,17 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) if (ZSTD_isError(res)) { ZSTD_freeCStream(z->zcs); g_free(z); - error_setg(errp, "multifd %d: initCStream failed with error %s", + error_setg(errp, "multifd %u: initCStream failed with error %s", p->id, ZSTD_getErrorName(res)); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - z->zbuff_len *= 2; + /* This is the maxium size of the compressed buffer */ + z->zbuff_len = ZSTD_compressBound(MULTIFD_PACKET_SIZE); z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { ZSTD_freeCStream(z->zcs); g_free(z); - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); return -1; } return 0; @@ -86,6 +85,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) * Close the channel and return memory. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -108,12 +108,12 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used + * @errp: pointer to an error */ -static int zstd_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) +static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) { - struct iovec *iov = p->pages->iov; struct zstd_data *z = p->data; + size_t page_size = qemu_target_page_size(); int ret; uint32_t i; @@ -121,14 +121,14 @@ static int zstd_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) z->out.size = z->zbuff_len; z->out.pos = 0; - for (i = 0; i < used; i++) { + for (i = 0; i < p->normal_num; i++) { ZSTD_EndDirective flush = ZSTD_e_continue; - if (i == used - 1) { + if (i == p->normal_num - 1) { flush = ZSTD_e_flush; } - z->in.src = iov[i].iov_base; - z->in.size = iov[i].iov_len; + z->in.src = p->pages->block->host + p->normal[i]; + z->in.size = page_size; z->in.pos = 0; /* @@ -144,41 +144,25 @@ static int zstd_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) } while (ret > 0 && (z->in.size - z->in.pos > 0) && (z->out.size - z->out.pos > 0)); if (ret > 0 && (z->in.size - z->in.pos > 0)) { - error_setg(errp, "multifd %d: compressStream buffer too small", + error_setg(errp, "multifd %u: compressStream buffer too small", p->id); return -1; } if (ZSTD_isError(ret)) { - error_setg(errp, "multifd %d: compressStream error %s", + error_setg(errp, "multifd %u: compressStream error %s", p->id, ZSTD_getErrorName(ret)); return -1; } } + p->iov[p->iovs_num].iov_base = z->zbuff; + p->iov[p->iovs_num].iov_len = z->out.pos; + p->iovs_num++; p->next_packet_size = z->out.pos; p->flags |= MULTIFD_FLAG_ZSTD; return 0; } -/** - * zstd_send_write: do the actual write of the data - * - * Do the actual write of the comprresed buffer. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @used: number of pages used - * @errp: pointer to an error - */ -static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) -{ - struct zstd_data *z = p->data; - - return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size, - errp); -} - /** * zstd_recv_setup: setup receive side * @@ -191,7 +175,6 @@ static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) */ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zstd_data *z = g_new0(struct zstd_data, 1); int ret; @@ -199,7 +182,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) z->zds = ZSTD_createDStream(); if (!z->zds) { g_free(z); - error_setg(errp, "multifd %d: zstd createDStream failed", p->id); + error_setg(errp, "multifd %u: zstd createDStream failed", p->id); return -1; } @@ -207,20 +190,18 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) if (ZSTD_isError(ret)) { ZSTD_freeDStream(z->zds); g_free(z); - error_setg(errp, "multifd %d: initDStream failed with error %s", + error_setg(errp, "multifd %u: initDStream failed with error %s", p->id, ZSTD_getErrorName(ret)); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - /* We know compression "could" use more space */ - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { ZSTD_freeDStream(z->zds); g_free(z); - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); return -1; } return 0; @@ -254,21 +235,21 @@ static void zstd_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t in_size = p->next_packet_size; uint32_t out_size = 0; - uint32_t expected_size = used * qemu_target_page_size(); + size_t page_size = qemu_target_page_size(); + uint32_t expected_size = p->normal_num * page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; struct zstd_data *z = p->data; int ret; int i; if (flags != MULTIFD_FLAG_ZSTD) { - error_setg(errp, "multifd %d: flags received %x flags expected %x", + error_setg(errp, "multifd %u: flags received %x flags expected %x", p->id, flags, MULTIFD_FLAG_ZSTD); return -1; } @@ -282,11 +263,9 @@ static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) z->in.size = in_size; z->in.pos = 0; - for (i = 0; i < used; i++) { - struct iovec *iov = &p->pages->iov[i]; - - z->out.dst = iov->iov_base; - z->out.size = iov->iov_len; + for (i = 0; i < p->normal_num; i++) { + z->out.dst = p->host + p->normal[i]; + z->out.size = page_size; z->out.pos = 0; /* @@ -300,21 +279,21 @@ static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) do { ret = ZSTD_decompressStream(z->zds, &z->out, &z->in); } while (ret > 0 && (z->in.size - z->in.pos > 0) - && (z->out.pos < iov->iov_len)); - if (ret > 0 && (z->out.pos < iov->iov_len)) { - error_setg(errp, "multifd %d: decompressStream buffer too small", + && (z->out.pos < page_size)); + if (ret > 0 && (z->out.pos < page_size)) { + error_setg(errp, "multifd %u: decompressStream buffer too small", p->id); return -1; } if (ZSTD_isError(ret)) { - error_setg(errp, "multifd %d: decompressStream returned %s", + error_setg(errp, "multifd %u: decompressStream returned %s", p->id, ZSTD_getErrorName(ret)); return ret; } out_size += z->out.pos; } if (out_size != expected_size) { - error_setg(errp, "multifd %d: packet size received %d size expected %d", + error_setg(errp, "multifd %u: packet size received %u size expected %u", p->id, out_size, expected_size); return -1; } @@ -325,7 +304,6 @@ static MultiFDMethods multifd_zstd_ops = { .send_setup = zstd_send_setup, .send_cleanup = zstd_send_cleanup, .send_prepare = zstd_send_prepare, - .send_write = zstd_send_write, .recv_setup = zstd_recv_setup, .recv_cleanup = zstd_recv_cleanup, .recv_pages = zstd_recv_pages diff --git a/migration/multifd.c b/migration/multifd.c index 377da78f5b..509bbbe3bf 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -66,6 +66,7 @@ static int nocomp_send_setup(MultiFDSendParams *p, Error **errp) * For no compression this function does nothing. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -81,33 +82,24 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int nocomp_send_prepare(MultiFDSendParams *p, uint32_t used, - Error **errp) +static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) { - p->next_packet_size = used * qemu_target_page_size(); + MultiFDPages_t *pages = p->pages; + size_t page_size = qemu_target_page_size(); + + for (int i = 0; i < p->normal_num; i++) { + p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i]; + p->iov[p->iovs_num].iov_len = page_size; + p->iovs_num++; + } + + p->next_packet_size = p->normal_num * page_size; p->flags |= MULTIFD_FLAG_NOCOMP; return 0; } -/** - * nocomp_send_write: do the actual write of the data - * - * For no compression we just have to write the data. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @used: number of pages used - * @errp: pointer to an error - */ -static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) -{ - return qio_channel_writev_all(p->c, p->pages->iov, used, errp); -} - /** * nocomp_recv_setup: setup receive side * @@ -142,26 +134,29 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int nocomp_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; + size_t page_size = qemu_target_page_size(); if (flags != MULTIFD_FLAG_NOCOMP) { - error_setg(errp, "multifd %d: flags received %x flags expected %x", + error_setg(errp, "multifd %u: flags received %x flags expected %x", p->id, flags, MULTIFD_FLAG_NOCOMP); return -1; } - return qio_channel_readv_all(p->c, p->pages->iov, used, errp); + for (int i = 0; i < p->normal_num; i++) { + p->iov[i].iov_base = p->host + p->normal[i]; + p->iov[i].iov_len = page_size; + } + return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp); } static MultiFDMethods multifd_nocomp_ops = { .send_setup = nocomp_send_setup, .send_cleanup = nocomp_send_cleanup, .send_prepare = nocomp_send_prepare, - .send_write = nocomp_send_write, .recv_setup = nocomp_recv_setup, .recv_cleanup = nocomp_recv_cleanup, .recv_pages = nocomp_recv_pages @@ -214,8 +209,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) } if (msg.version != MULTIFD_VERSION) { - error_setg(errp, "multifd: received packet version %d " - "expected %d", msg.version, MULTIFD_VERSION); + error_setg(errp, "multifd: received packet version %u " + "expected %u", msg.version, MULTIFD_VERSION); return -1; } @@ -231,8 +226,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) } if (msg.id > migrate_multifd_channels()) { - error_setg(errp, "multifd: received channel version %d " - "expected %d", msg.version, MULTIFD_VERSION); + error_setg(errp, "multifd: received channel version %u " + "expected %u", msg.version, MULTIFD_VERSION); return -1; } @@ -244,7 +239,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size) MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1); pages->allocated = size; - pages->iov = g_new0(struct iovec, size); pages->offset = g_new0(ram_addr_t, size); return pages; @@ -252,12 +246,10 @@ static MultiFDPages_t *multifd_pages_init(size_t size) static void multifd_pages_clear(MultiFDPages_t *pages) { - pages->used = 0; + pages->num = 0; pages->allocated = 0; pages->packet_num = 0; pages->block = NULL; - g_free(pages->iov); - pages->iov = NULL; g_free(pages->offset); pages->offset = NULL; g_free(pages); @@ -270,7 +262,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) packet->flags = cpu_to_be32(p->flags); packet->pages_alloc = cpu_to_be32(p->pages->allocated); - packet->pages_used = cpu_to_be32(p->pages->used); + packet->normal_pages = cpu_to_be32(p->normal_num); packet->next_packet_size = cpu_to_be32(p->next_packet_size); packet->packet_num = cpu_to_be64(p->packet_num); @@ -278,9 +270,9 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) strncpy(packet->ramblock, p->pages->block->idstr, 256); } - for (i = 0; i < p->pages->used; i++) { + for (i = 0; i < p->normal_num; i++) { /* there are architectures where ram_addr_t is 32 bit */ - uint64_t temp = p->pages->offset[i]; + uint64_t temp = p->normal[i]; packet->offset[i] = cpu_to_be64(temp); } @@ -289,7 +281,8 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) { MultiFDPacket_t *packet = p->packet; - uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + size_t page_size = qemu_target_page_size(); + uint32_t page_count = MULTIFD_PACKET_SIZE / page_size; RAMBlock *block; int i; @@ -304,7 +297,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) packet->version = be32_to_cpu(packet->version); if (packet->version != MULTIFD_VERSION) { error_setg(errp, "multifd: received packet " - "version %d and expected version %d", + "version %u and expected version %u", packet->version, MULTIFD_VERSION); return -1; } @@ -316,33 +309,25 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) * If we received a packet that is 100 times bigger than expected * just stop migration. It is a magic number. */ - if (packet->pages_alloc > pages_max * 100) { + if (packet->pages_alloc > page_count) { error_setg(errp, "multifd: received packet " - "with size %d and expected a maximum size of %d", - packet->pages_alloc, pages_max * 100) ; + "with size %u and expected a size of %u", + packet->pages_alloc, page_count) ; return -1; } - /* - * We received a packet that is bigger than expected but inside - * reasonable limits (see previous comment). Just reallocate. - */ - if (packet->pages_alloc > p->pages->allocated) { - multifd_pages_clear(p->pages); - p->pages = multifd_pages_init(packet->pages_alloc); - } - p->pages->used = be32_to_cpu(packet->pages_used); - if (p->pages->used > packet->pages_alloc) { + p->normal_num = be32_to_cpu(packet->normal_pages); + if (p->normal_num > packet->pages_alloc) { error_setg(errp, "multifd: received packet " - "with %d pages and expected maximum pages are %d", - p->pages->used, packet->pages_alloc) ; + "with %u pages and expected maximum pages are %u", + p->normal_num, packet->pages_alloc) ; return -1; } p->next_packet_size = be32_to_cpu(packet->next_packet_size); p->packet_num = be64_to_cpu(packet->packet_num); - if (p->pages->used == 0) { + if (p->normal_num == 0) { return 0; } @@ -355,17 +340,17 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) return -1; } - for (i = 0; i < p->pages->used; i++) { + p->host = block->host; + for (i = 0; i < p->normal_num; i++) { uint64_t offset = be64_to_cpu(packet->offset[i]); - if (offset > (block->used_length - qemu_target_page_size())) { + if (offset > (block->used_length - page_size)) { error_setg(errp, "multifd: offset too long %" PRIu64 " (max " RAM_ADDR_FMT ")", offset, block->used_length); return -1; } - p->pages->iov[i].iov_base = block->host + offset; - p->pages->iov[i].iov_len = qemu_target_page_size(); + p->normal[i] = offset; } return 0; @@ -442,15 +427,15 @@ static int multifd_send_pages(QEMUFile *f) } qemu_mutex_unlock(&p->mutex); } - assert(!p->pages->used); + assert(!p->pages->num); assert(!p->pages->block); p->packet_num = multifd_send_state->packet_num++; multifd_send_state->pages = p->pages; p->pages = pages; - transferred = ((uint64_t) pages->used) * qemu_target_page_size() + transferred = ((uint64_t) pages->num) * qemu_target_page_size() + p->packet_len; - qemu_file_update_transfer(f, transferred); + qemu_file_acct_rate_limit(f, transferred); ram_counters.multifd_bytes += transferred; ram_counters.transferred += transferred; qemu_mutex_unlock(&p->mutex); @@ -468,12 +453,10 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) } if (pages->block == block) { - pages->offset[pages->used] = offset; - pages->iov[pages->used].iov_base = block->host + offset; - pages->iov[pages->used].iov_len = qemu_target_page_size(); - pages->used++; + pages->offset[pages->num] = offset; + pages->num++; - if (pages->used < pages->allocated) { + if (pages->num < pages->allocated) { return 1; } } @@ -523,6 +506,9 @@ static void multifd_send_terminate_threads(Error *err) qemu_mutex_lock(&p->mutex); p->quit = true; qemu_sem_post(&p->sem); + if (p->c) { + qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + } qemu_mutex_unlock(&p->mutex); } } @@ -531,7 +517,7 @@ void multifd_save_cleanup(void) { int i; - if (!migrate_use_multifd()) { + if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { return; } multifd_send_terminate_threads(NULL); @@ -546,6 +532,9 @@ void multifd_save_cleanup(void) MultiFDSendParams *p = &multifd_send_state->params[i]; Error *local_err = NULL; + if (p->registered_yank) { + migration_ioc_unregister_yank(p->c); + } socket_send_channel_destroy(p->c); p->c = NULL; qemu_mutex_destroy(&p->mutex); @@ -553,13 +542,15 @@ void multifd_save_cleanup(void) qemu_sem_destroy(&p->sem_sync); g_free(p->name); p->name = NULL; - g_free(p->tls_hostname); - p->tls_hostname = NULL; multifd_pages_clear(p->pages); p->pages = NULL; p->packet_len = 0; g_free(p->packet); p->packet = NULL; + g_free(p->iov); + p->iov = NULL; + g_free(p->normal); + p->normal = NULL; multifd_send_state->ops->send_cleanup(p, &local_err); if (local_err) { migrate_set_error(migrate_get_current(), local_err); @@ -575,19 +566,51 @@ void multifd_save_cleanup(void) multifd_send_state = NULL; } -void multifd_send_sync_main(QEMUFile *f) +static int multifd_zero_copy_flush(QIOChannel *c) +{ + int ret; + Error *err = NULL; + + ret = qio_channel_flush(c, &err); + if (ret < 0) { + error_report_err(err); + return -1; + } + if (ret == 1) { + dirty_sync_missed_zero_copy(); + } + + return ret; +} + +int multifd_send_sync_main(QEMUFile *f) { int i; + bool flush_zero_copy; if (!migrate_use_multifd()) { - return; + return 0; } - if (multifd_send_state->pages->used) { + if (multifd_send_state->pages->num) { if (multifd_send_pages(f) < 0) { error_report("%s: multifd_send_pages fail", __func__); - return; + return -1; } } + + /* + * When using zero-copy, it's necessary to flush the pages before any of + * the pages can be sent again, so we'll make sure the new version of the + * pages will always arrive _later_ than the old pages. + * + * Currently we achieve this by flushing the zero-page requested writes + * per ram iteration, but in the future we could potentially optimize it + * to be less frequent, e.g. only after we finished one whole scanning of + * all the dirty bitmaps. + */ + + flush_zero_copy = migrate_use_zero_copy_send(); + for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -598,17 +621,21 @@ void multifd_send_sync_main(QEMUFile *f) if (p->quit) { error_report("%s: channel %d has already quit", __func__, i); qemu_mutex_unlock(&p->mutex); - return; + return -1; } p->packet_num = multifd_send_state->packet_num++; p->flags |= MULTIFD_FLAG_SYNC; p->pending_job++; - qemu_file_update_transfer(f, p->packet_len); + qemu_file_acct_rate_limit(f, p->packet_len); ram_counters.multifd_bytes += p->packet_len; ram_counters.transferred += p->packet_len; qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); + + if (flush_zero_copy && p->c && (multifd_zero_copy_flush(p->c) < 0)) { + return -1; + } } for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -617,6 +644,8 @@ void multifd_send_sync_main(QEMUFile *f) qemu_sem_wait(&p->sem_sync); } trace_multifd_send_sync_main(multifd_send_state->packet_num); + + return 0; } static void *multifd_send_thread(void *opaque) @@ -624,7 +653,7 @@ static void *multifd_send_thread(void *opaque) MultiFDSendParams *p = opaque; Error *local_err = NULL; int ret = 0; - uint32_t flags = 0; + bool use_zero_copy_send = migrate_use_zero_copy_send(); trace_multifd_send_thread_start(p->id); rcu_register_thread(); @@ -645,13 +674,23 @@ static void *multifd_send_thread(void *opaque) qemu_mutex_lock(&p->mutex); if (p->pending_job) { - uint32_t used = p->pages->used; uint64_t packet_num = p->packet_num; - flags = p->flags; + uint32_t flags = p->flags; + p->normal_num = 0; - if (used) { - ret = multifd_send_state->ops->send_prepare(p, used, - &local_err); + if (use_zero_copy_send) { + p->iovs_num = 0; + } else { + p->iovs_num = 1; + } + + for (int i = 0; i < p->pages->num; i++) { + p->normal[p->normal_num] = p->pages->offset[i]; + p->normal_num++; + } + + if (p->normal_num) { + ret = multifd_send_state->ops->send_prepare(p, &local_err); if (ret != 0) { qemu_mutex_unlock(&p->mutex); break; @@ -660,25 +699,31 @@ static void *multifd_send_thread(void *opaque) multifd_send_fill_packet(p); p->flags = 0; p->num_packets++; - p->num_pages += used; - p->pages->used = 0; + p->total_normal_pages += p->normal_num; + p->pages->num = 0; p->pages->block = NULL; qemu_mutex_unlock(&p->mutex); - trace_multifd_send(p->id, packet_num, used, flags, + trace_multifd_send(p->id, packet_num, p->normal_num, flags, p->next_packet_size); - ret = qio_channel_write_all(p->c, (void *)p->packet, - p->packet_len, &local_err); - if (ret != 0) { - break; - } - - if (used) { - ret = multifd_send_state->ops->send_write(p, used, &local_err); + if (use_zero_copy_send) { + /* Send header first, without zerocopy */ + ret = qio_channel_write_all(p->c, (void *)p->packet, + p->packet_len, &local_err); if (ret != 0) { break; } + } else { + /* Send header using the same writev call */ + p->iov[0].iov_len = p->packet_len; + p->iov[0].iov_base = p->packet; + } + + ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL, + 0, p->write_flags, &local_err); + if (ret != 0) { + break; } qemu_mutex_lock(&p->mutex); @@ -719,7 +764,7 @@ out: qemu_mutex_unlock(&p->mutex); rcu_unregister_thread(); - trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages); + trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages); return NULL; } @@ -770,7 +815,7 @@ static void multifd_tls_channel_connect(MultiFDSendParams *p, Error **errp) { MigrationState *s = migrate_get_current(); - const char *hostname = p->tls_hostname; + const char *hostname = s->hostname; QIOChannelTLS *tioc; tioc = migration_tls_client_create(s, ioc, hostname, errp); @@ -791,16 +836,12 @@ static bool multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc, Error *error) { - MigrationState *s = migrate_get_current(); - trace_multifd_set_outgoing_channel( - ioc, object_get_typename(OBJECT(ioc)), p->tls_hostname, error); + ioc, object_get_typename(OBJECT(ioc)), + migrate_get_current()->hostname, error); if (!error) { - if (s->parameters.tls_creds && - *s->parameters.tls_creds && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { multifd_tls_channel_connect(p, ioc, &error); if (!error) { /* @@ -813,7 +854,8 @@ static bool multifd_channel_connect(MultiFDSendParams *p, return false; } } else { - /* update for tls qio channel */ + migration_ioc_register_yank(ioc); + p->registered_yank = true; p->c = ioc; qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, QEMU_THREAD_JOINABLE); @@ -869,12 +911,15 @@ int multifd_save_setup(Error **errp) int thread_count; uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); uint8_t i; - MigrationState *s; if (!migrate_use_multifd()) { return 0; } - s = migrate_get_current(); + if (!migrate_multi_channels_is_allowed()) { + error_setg(errp, "multifd is not supported by current protocol"); + return -1; + } + thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); @@ -899,7 +944,16 @@ int multifd_save_setup(Error **errp) p->packet->magic = cpu_to_be32(MULTIFD_MAGIC); p->packet->version = cpu_to_be32(MULTIFD_VERSION); p->name = g_strdup_printf("multifdsend_%d", i); - p->tls_hostname = g_strdup(s->hostname); + /* We need one extra place for the packet header */ + p->iov = g_new0(struct iovec, page_count + 1); + p->normal = g_new0(ram_addr_t, page_count); + + if (migrate_use_zero_copy_send()) { + p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; + } else { + p->write_flags = 0; + } + socket_send_channel_create(multifd_new_send_channel_async, p); } @@ -967,7 +1021,7 @@ int multifd_load_cleanup(Error **errp) { int i; - if (!migrate_use_multifd()) { + if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { return 0; } multifd_recv_terminate_threads(NULL); @@ -987,21 +1041,20 @@ int multifd_load_cleanup(Error **errp) for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - if (OBJECT(p->c)->ref == 1) { - migration_ioc_unregister_yank(p->c); - } - + migration_ioc_unregister_yank(p->c); object_unref(OBJECT(p->c)); p->c = NULL; qemu_mutex_destroy(&p->mutex); qemu_sem_destroy(&p->sem_sync); g_free(p->name); p->name = NULL; - multifd_pages_clear(p->pages); - p->pages = NULL; p->packet_len = 0; g_free(p->packet); p->packet = NULL; + g_free(p->iov); + p->iov = NULL; + g_free(p->normal); + p->normal = NULL; multifd_recv_state->ops->recv_cleanup(p); } qemu_sem_destroy(&multifd_recv_state->sem_sync); @@ -1050,7 +1103,6 @@ static void *multifd_recv_thread(void *opaque) rcu_register_thread(); while (true) { - uint32_t used; uint32_t flags; if (p->quit) { @@ -1073,18 +1125,17 @@ static void *multifd_recv_thread(void *opaque) break; } - used = p->pages->used; flags = p->flags; /* recv methods don't know how to handle the SYNC flag */ p->flags &= ~MULTIFD_FLAG_SYNC; - trace_multifd_recv(p->id, p->packet_num, used, flags, + trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags, p->next_packet_size); p->num_packets++; - p->num_pages += used; + p->total_normal_pages += p->normal_num; qemu_mutex_unlock(&p->mutex); - if (used) { - ret = multifd_recv_state->ops->recv_pages(p, used, &local_err); + if (p->normal_num) { + ret = multifd_recv_state->ops->recv_pages(p, &local_err); if (ret != 0) { break; } @@ -1105,7 +1156,7 @@ static void *multifd_recv_thread(void *opaque) qemu_mutex_unlock(&p->mutex); rcu_unregister_thread(); - trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages); + trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages); return NULL; } @@ -1119,6 +1170,10 @@ int multifd_load_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } + if (!migrate_multi_channels_is_allowed()) { + error_setg(errp, "multifd is not supported by current protocol"); + return -1; + } thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); @@ -1133,11 +1188,12 @@ int multifd_load_setup(Error **errp) qemu_sem_init(&p->sem_sync, 0); p->quit = false; p->id = i; - p->pages = multifd_pages_init(page_count); p->packet_len = sizeof(MultiFDPacket_t) + sizeof(uint64_t) * page_count; p->packet = g_malloc0(p->packet_len); p->name = g_strdup_printf("multifdrecv_%d", i); + p->iov = g_new0(struct iovec, page_count); + p->normal = g_new0(ram_addr_t, page_count); } for (i = 0; i < thread_count; i++) { diff --git a/migration/multifd.h b/migration/multifd.h index 8d6751f5ed..519f498643 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -20,7 +20,7 @@ int multifd_load_cleanup(Error **errp); bool multifd_recv_all_channels_created(void); bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp); void multifd_recv_sync_main(void); -void multifd_send_sync_main(QEMUFile *f); +int multifd_send_sync_main(QEMUFile *f); int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset); /* Multifd Compression flags */ @@ -42,7 +42,8 @@ typedef struct { uint32_t flags; /* maximum number of allocated pages */ uint32_t pages_alloc; - uint32_t pages_used; + /* non zero pages */ + uint32_t normal_pages; /* size of the next packet that contains pages */ uint32_t next_packet_size; uint64_t packet_num; @@ -53,65 +54,85 @@ typedef struct { typedef struct { /* number of used pages */ - uint32_t used; + uint32_t num; /* number of allocated pages */ uint32_t allocated; /* global number of generated multifd packets */ uint64_t packet_num; /* offset of each page */ ram_addr_t *offset; - /* pointer to each page */ - struct iovec *iov; RAMBlock *block; } MultiFDPages_t; typedef struct { - /* this fields are not changed once the thread is created */ + /* Fields are only written at creating/deletion time */ + /* No lock required for them, they are read only */ + /* channel number */ uint8_t id; /* channel thread name */ char *name; - /* tls hostname */ - char *tls_hostname; /* channel thread id */ QemuThread thread; /* communication channel */ QIOChannel *c; + /* is the yank function registered */ + bool registered_yank; + /* packet allocated len */ + uint32_t packet_len; + /* multifd flags for sending ram */ + int write_flags; + /* sem where to wait for more work */ QemuSemaphore sem; + /* syncs main thread and channels */ + QemuSemaphore sem_sync; + /* this mutex protects the following parameters */ QemuMutex mutex; /* is this channel thread running */ bool running; /* should this thread finish */ bool quit; - /* thread has work to do */ - int pending_job; - /* array of pages to sent */ - MultiFDPages_t *pages; - /* packet allocated len */ - uint32_t packet_len; - /* pointer to the packet */ - MultiFDPacket_t *packet; /* multifd flags for each packet */ uint32_t flags; - /* size of the next packet that contains pages */ - uint32_t next_packet_size; /* global number of generated multifd packets */ uint64_t packet_num; - /* thread local variables */ + /* thread has work to do */ + int pending_job; + /* array of pages to sent. + * The owner of 'pages' depends of 'pending_job' value: + * pending_job == 0 -> migration_thread can use it. + * pending_job != 0 -> multifd_channel can use it. + */ + MultiFDPages_t *pages; + + /* thread local variables. No locking required */ + + /* pointer to the packet */ + MultiFDPacket_t *packet; + /* size of the next packet that contains pages */ + uint32_t next_packet_size; /* packets sent through this channel */ uint64_t num_packets; - /* pages sent through this channel */ - uint64_t num_pages; - /* syncs main thread and channels */ - QemuSemaphore sem_sync; + /* non zero pages sent through this channel */ + uint64_t total_normal_pages; + /* buffers to send */ + struct iovec *iov; + /* number of iovs used */ + uint32_t iovs_num; + /* Pages that are not zero */ + ram_addr_t *normal; + /* num of non zero pages */ + uint32_t normal_num; /* used for compression methods */ void *data; } MultiFDSendParams; typedef struct { - /* this fields are not changed once the thread is created */ + /* Fields are only written at creating/deletion time */ + /* No lock required for them, they are read only */ + /* channel number */ uint8_t id; /* channel thread name */ @@ -120,31 +141,41 @@ typedef struct { QemuThread thread; /* communication channel */ QIOChannel *c; + /* packet allocated len */ + uint32_t packet_len; + + /* syncs main thread and channels */ + QemuSemaphore sem_sync; + /* this mutex protects the following parameters */ QemuMutex mutex; /* is this channel thread running */ bool running; /* should this thread finish */ bool quit; - /* array of pages to receive */ - MultiFDPages_t *pages; - /* packet allocated len */ - uint32_t packet_len; - /* pointer to the packet */ - MultiFDPacket_t *packet; /* multifd flags for each packet */ uint32_t flags; /* global number of generated multifd packets */ uint64_t packet_num; - /* thread local variables */ + + /* thread local variables. No locking required */ + + /* pointer to the packet */ + MultiFDPacket_t *packet; /* size of the next packet that contains pages */ uint32_t next_packet_size; /* packets sent through this channel */ uint64_t num_packets; - /* pages sent through this channel */ - uint64_t num_pages; - /* syncs main thread and channels */ - QemuSemaphore sem_sync; + /* ramblock host address */ + uint8_t *host; + /* non zero pages recv through this channel */ + uint64_t total_normal_pages; + /* buffers to recv */ + struct iovec *iov; + /* Pages that are not zero */ + ram_addr_t *normal; + /* num of non zero pages */ + uint32_t normal_num; /* used for de-compression methods */ void *data; } MultiFDRecvParams; @@ -155,15 +186,13 @@ typedef struct { /* Cleanup for sending side */ void (*send_cleanup)(MultiFDSendParams *p, Error **errp); /* Prepare the send packet */ - int (*send_prepare)(MultiFDSendParams *p, uint32_t used, Error **errp); - /* Write the send packet */ - int (*send_write)(MultiFDSendParams *p, uint32_t used, Error **errp); + int (*send_prepare)(MultiFDSendParams *p, Error **errp); /* Setup for receiving side */ int (*recv_setup)(MultiFDRecvParams *p, Error **errp); /* Cleanup for receiving side */ void (*recv_cleanup)(MultiFDRecvParams *p); /* Read all pages */ - int (*recv_pages)(MultiFDRecvParams *p, uint32_t used, Error **errp); + int (*recv_pages)(MultiFDRecvParams *p, Error **errp); } MultiFDMethods; void multifd_register_ops(int method, MultiFDMethods *ops); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 2e9697bdd2..b9a37ef255 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include "qemu/rcu.h" +#include "qemu/madvise.h" #include "exec/target_page.h" #include "migration.h" #include "qemu-file.h" @@ -32,6 +33,10 @@ #include "trace.h" #include "hw/boards.h" #include "exec/ramblock.h" +#include "socket.h" +#include "qemu-file.h" +#include "yank_functions.h" +#include "tls.h" /* Arbitrary limit on size of each discard command, * keeps them around ~200 bytes @@ -77,6 +82,20 @@ int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) &pnd); } +/* + * NOTE: this routine is not thread safe, we can't call it concurrently. But it + * should be good enough for migration's purposes. + */ +void postcopy_thread_create(MigrationIncomingState *mis, + QemuThread *thread, const char *name, + void *(*fn)(void *), int joinable) +{ + qemu_sem_init(&mis->thread_sync_sem, 0); + qemu_thread_create(thread, name, fn, mis, joinable); + qemu_sem_wait(&mis->thread_sync_sem); + qemu_sem_destroy(&mis->thread_sync_sem); +} + /* Postcopy needs to detect accesses to pages that haven't yet been copied * across, and efficiently map new pages in, the techniques for doing this * are target OS specific. @@ -283,15 +302,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) } #ifdef UFFD_FEATURE_THREAD_ID - if (migrate_postcopy_blocktime() && mis && - UFFD_FEATURE_THREAD_ID & supported_features) { - /* kernel supports that feature */ - /* don't create blocktime_context if it exists */ - if (!mis->blocktime_ctx) { - mis->blocktime_ctx = blocktime_context_new(); - } - + if (UFFD_FEATURE_THREAD_ID & supported_features) { asked_features |= UFFD_FEATURE_THREAD_ID; + if (migrate_postcopy_blocktime()) { + if (!mis->blocktime_ctx) { + mis->blocktime_ctx = blocktime_context_new(); + } + } } #endif @@ -306,7 +323,7 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) return false; } - if (qemu_real_host_page_size != ram_pagesize_summary()) { + if (qemu_real_host_page_size() != ram_pagesize_summary()) { bool have_hp = false; /* We've got a huge page */ #ifdef UFFD_FEATURE_MISSING_HUGETLBFS @@ -344,7 +361,7 @@ static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) */ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) { - long pagesize = qemu_real_host_page_size; + long pagesize = qemu_real_host_page_size(); int ufd = -1; bool ret = false; /* Error unless we change it */ void *testarea = NULL; @@ -402,7 +419,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) strerror(errno)); goto out; } - g_assert(((size_t)testarea & (pagesize - 1)) == 0); + g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); reg_struct.range.start = (uintptr_t)testarea; reg_struct.range.len = pagesize; @@ -525,6 +542,28 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis) return 0; } +static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) +{ + int i; + + if (mis->postcopy_tmp_pages) { + for (i = 0; i < mis->postcopy_channels; i++) { + if (mis->postcopy_tmp_pages[i].tmp_huge_page) { + munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, + mis->largest_page_size); + mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; + } + } + g_free(mis->postcopy_tmp_pages); + mis->postcopy_tmp_pages = NULL; + } + + if (mis->postcopy_tmp_zero_page) { + munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); + mis->postcopy_tmp_zero_page = NULL; + } +} + /* * At the end of a migration where postcopy_ram_incoming_init was called. */ @@ -532,6 +571,11 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { trace_postcopy_ram_incoming_cleanup_entry(); + if (mis->postcopy_prio_thread_created) { + qemu_thread_join(&mis->postcopy_prio_thread); + mis->postcopy_prio_thread_created = false; + } + if (mis->have_fault_thread) { Error *local_err = NULL; @@ -566,14 +610,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) } } - if (mis->postcopy_tmp_page) { - munmap(mis->postcopy_tmp_page, mis->largest_page_size); - mis->postcopy_tmp_page = NULL; - } - if (mis->postcopy_tmp_zero_page) { - munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); - mis->postcopy_tmp_zero_page = NULL; - } + postcopy_temp_pages_cleanup(mis); + trace_postcopy_ram_incoming_cleanup_blocktime( get_postcopy_total_blocktime()); @@ -660,7 +698,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, struct uffdio_range range; int ret; trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); - range.start = client_addr & ~(pagesize - 1); + range.start = ROUND_DOWN(client_addr, pagesize); range.len = pagesize; ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); if (ret) { @@ -671,6 +709,29 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, return ret; } +static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, + ram_addr_t start, uint64_t haddr) +{ + void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); + + /* + * Discarded pages (via RamDiscardManager) are never migrated. On unlikely + * access, place a zeropage, which will also set the relevant bits in the + * recv_bitmap accordingly, so we won't try placing a zeropage twice. + * + * Checking a single bit is sufficient to handle pagesize > TPS as either + * all relevant bits are set or not. + */ + assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); + if (ramblock_page_is_discarded(rb, start)) { + bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); + + return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); + } + + return migrate_send_rp_req_pages(mis, rb, start, haddr); +} + /* * Callback from shared fault handlers to ask for a page, * the page must be specified by a RAMBlock and an offset in that rb @@ -679,8 +740,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t client_addr, uint64_t rb_offset) { - size_t pagesize = qemu_ram_pagesize(rb); - uint64_t aligned_rbo = rb_offset & ~(pagesize - 1); + uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); MigrationIncomingState *mis = migration_incoming_get_current(); trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), @@ -690,7 +750,7 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, qemu_ram_get_idstr(rb), rb_offset); return postcopy_wake_shared(pcfd, client_addr, rb); } - migrate_send_rp_req_pages(mis, rb, aligned_rbo, client_addr); + postcopy_request_page(mis, rb, aligned_rbo, client_addr); return 0; } @@ -840,15 +900,11 @@ static void mark_postcopy_blocktime_end(uintptr_t addr) affected_cpu); } -static bool postcopy_pause_fault_thread(MigrationIncomingState *mis) +static void postcopy_pause_fault_thread(MigrationIncomingState *mis) { trace_postcopy_pause_fault_thread(); - qemu_sem_wait(&mis->postcopy_pause_sem_fault); - trace_postcopy_pause_fault_thread_continued(); - - return true; } /* @@ -865,7 +921,7 @@ static void *postcopy_ram_fault_thread(void *opaque) trace_postcopy_ram_fault_thread_entry(); rcu_register_thread(); mis->last_rb = NULL; /* last RAMBlock we sent part of */ - qemu_sem_post(&mis->fault_thread_sem); + qemu_sem_post(&mis->thread_sync_sem); struct pollfd *pfd; size_t pfd_len = 2 + mis->postcopy_remote_fds->len; @@ -908,13 +964,7 @@ static void *postcopy_ram_fault_thread(void *opaque) * broken already using the event. We should hold until * the channel is rebuilt. */ - if (postcopy_pause_fault_thread(mis)) { - /* Continue to read the userfaultfd */ - } else { - error_report("%s: paused but don't allow to continue", - __func__); - break; - } + postcopy_pause_fault_thread(mis); } if (pfd[1].revents) { @@ -970,7 +1020,7 @@ static void *postcopy_ram_fault_thread(void *opaque) break; } - rb_offset &= ~(qemu_ram_pagesize(rb) - 1); + rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, qemu_ram_get_idstr(rb), rb_offset, @@ -984,19 +1034,12 @@ retry: * Send the request to the source - we want to request one * of our host page sizes (which is >= TPS) */ - ret = migrate_send_rp_req_pages(mis, rb, rb_offset, - msg.arg.pagefault.address); + ret = postcopy_request_page(mis, rb, rb_offset, + msg.arg.pagefault.address); if (ret) { /* May be network failure, try to wait for recovery */ - if (ret == -EIO && postcopy_pause_fault_thread(mis)) { - /* We got reconnected somehow, try to continue */ - goto retry; - } else { - /* This is a unavoidable fault */ - error_report("%s: migrate_send_rp_req_pages() get %d", - __func__, ret); - break; - } + postcopy_pause_fault_thread(mis); + goto retry; } } @@ -1062,6 +1105,58 @@ retry: return NULL; } +static int postcopy_temp_pages_setup(MigrationIncomingState *mis) +{ + PostcopyTmpPage *tmp_page; + int err, i, channels; + void *temp_page; + + if (migrate_postcopy_preempt()) { + /* If preemption enabled, need extra channel for urgent requests */ + mis->postcopy_channels = RAM_CHANNEL_MAX; + } else { + /* Both precopy/postcopy on the same channel */ + mis->postcopy_channels = 1; + } + + channels = mis->postcopy_channels; + mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); + + for (i = 0; i < channels; i++) { + tmp_page = &mis->postcopy_tmp_pages[i]; + temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (temp_page == MAP_FAILED) { + err = errno; + error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", + __func__, i, strerror(err)); + /* Clean up will be done later */ + return -err; + } + tmp_page->tmp_huge_page = temp_page; + /* Initialize default states for each tmp page */ + postcopy_temp_page_reset(tmp_page); + } + + /* + * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages + */ + mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mis->postcopy_tmp_zero_page == MAP_FAILED) { + err = errno; + mis->postcopy_tmp_zero_page = NULL; + error_report("%s: Failed to map large zero page %s", + __func__, strerror(err)); + return -err; + } + + memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); + + return 0; +} + int postcopy_ram_incoming_setup(MigrationIncomingState *mis) { /* Open the fd for the kernel to give us userfaults */ @@ -1089,11 +1184,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } - qemu_sem_init(&mis->fault_thread_sem, 0); - qemu_thread_create(&mis->fault_thread, "postcopy/fault", - postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); - qemu_sem_wait(&mis->fault_thread_sem); - qemu_sem_destroy(&mis->fault_thread_sem); + postcopy_thread_create(mis, &mis->fault_thread, "fault-default", + postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); mis->have_fault_thread = true; /* Mark so that we get notified of accesses to unwritten areas */ @@ -1102,31 +1194,20 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } - mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, - PROT_READ | PROT_WRITE, MAP_PRIVATE | - MAP_ANONYMOUS, -1, 0); - if (mis->postcopy_tmp_page == MAP_FAILED) { - mis->postcopy_tmp_page = NULL; - error_report("%s: Failed to map postcopy_tmp_page %s", - __func__, strerror(errno)); + if (postcopy_temp_pages_setup(mis)) { + /* Error dumped in the sub-function */ return -1; } - /* - * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages - */ - mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, - -1, 0); - if (mis->postcopy_tmp_zero_page == MAP_FAILED) { - int e = errno; - mis->postcopy_tmp_zero_page = NULL; - error_report("%s: Failed to map large zero page %s", - __func__, strerror(e)); - return -e; + if (migrate_postcopy_preempt()) { + /* + * This thread needs to be created after the temp pages because + * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. + */ + postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", + postcopy_preempt_thread, QEMU_THREAD_JOINABLE); + mis->postcopy_prio_thread_created = true; } - memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); trace_postcopy_ram_enable_notify(); @@ -1311,6 +1392,16 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, #endif /* ------------------------------------------------------------------------- */ +void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) +{ + tmp_page->target_pages = 0; + tmp_page->host_addr = NULL; + /* + * This is set to true when reset, and cleared as long as we received any + * of the non-zero small page within this huge page. + */ + tmp_page->all_zero = true; +} void postcopy_fault_thread_notify(MigrationIncomingState *mis) { @@ -1435,6 +1526,10 @@ void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) MigrationIncomingState *mis = migration_incoming_get_current(); GArray *pcrfds = mis->postcopy_remote_fds; + if (!pcrfds) { + /* migration has already finished and freed the array */ + return; + } for (i = 0; i < pcrfds->len; i++) { struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); if (cur->fd == pcfd->fd) { @@ -1443,3 +1538,159 @@ void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) } } } + +bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) +{ + /* + * The new loading channel has its own threads, so it needs to be + * blocked too. It's by default true, just be explicit. + */ + qemu_file_set_blocking(file, true); + mis->postcopy_qemufile_dst = file; + trace_postcopy_preempt_new_channel(); + + /* Start the migration immediately */ + return true; +} + +/* + * Setup the postcopy preempt channel with the IOC. If ERROR is specified, + * setup the error instead. This helper will free the ERROR if specified. + */ +static void +postcopy_preempt_send_channel_done(MigrationState *s, + QIOChannel *ioc, Error *local_err) +{ + if (local_err) { + migrate_set_error(s, local_err); + error_free(local_err); + } else { + migration_ioc_register_yank(ioc); + s->postcopy_qemufile_src = qemu_file_new_output(ioc); + trace_postcopy_preempt_new_channel(); + } + + /* + * Kick the waiter in all cases. The waiter should check upon + * postcopy_qemufile_src to know whether it failed or not. + */ + qemu_sem_post(&s->postcopy_qemufile_src_sem); +} + +static void +postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) +{ + g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); + MigrationState *s = opaque; + Error *local_err = NULL; + + qio_task_propagate_error(task, &local_err); + postcopy_preempt_send_channel_done(s, ioc, local_err); +} + +static void +postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) +{ + g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); + MigrationState *s = opaque; + QIOChannelTLS *tioc; + Error *local_err = NULL; + + if (qio_task_propagate_error(task, &local_err)) { + goto out; + } + + if (migrate_channel_requires_tls_upgrade(ioc)) { + tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err); + if (!tioc) { + goto out; + } + trace_postcopy_preempt_tls_handshake(); + qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); + qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, + s, NULL, NULL); + /* Setup the channel until TLS handshake finished */ + return; + } + +out: + /* This handles both good and error cases */ + postcopy_preempt_send_channel_done(s, ioc, local_err); +} + +/* Returns 0 if channel established, -1 for error. */ +int postcopy_preempt_wait_channel(MigrationState *s) +{ + /* If preempt not enabled, no need to wait */ + if (!migrate_postcopy_preempt()) { + return 0; + } + + /* + * We need the postcopy preempt channel to be established before + * starting doing anything. + */ + qemu_sem_wait(&s->postcopy_qemufile_src_sem); + + return s->postcopy_qemufile_src ? 0 : -1; +} + +int postcopy_preempt_setup(MigrationState *s, Error **errp) +{ + if (!migrate_postcopy_preempt()) { + return 0; + } + + if (!migrate_multi_channels_is_allowed()) { + error_setg(errp, "Postcopy preempt is not supported as current " + "migration stream does not support multi-channels."); + return -1; + } + + /* Kick an async task to connect */ + socket_send_channel_create(postcopy_preempt_send_channel_new, s); + + return 0; +} + +static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) +{ + trace_postcopy_pause_fast_load(); + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + trace_postcopy_pause_fast_load_continued(); +} + +void *postcopy_preempt_thread(void *opaque) +{ + MigrationIncomingState *mis = opaque; + int ret; + + trace_postcopy_preempt_thread_entry(); + + rcu_register_thread(); + + qemu_sem_post(&mis->thread_sync_sem); + + /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + while (1) { + ret = ram_load_postcopy(mis->postcopy_qemufile_dst, + RAM_CHANNEL_POSTCOPY); + /* If error happened, go into recovery routine */ + if (ret) { + postcopy_pause_ram_fast_load(mis); + } else { + /* We're done */ + break; + } + } + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + + rcu_unregister_thread(); + + trace_postcopy_preempt_thread_exit(); + + return NULL; +} diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index 6d2b3cf124..6147bf7d1d 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -135,6 +135,10 @@ void postcopy_remove_notifier(NotifierWithReturn *n); /* Call the notifier list set by postcopy_add_start_notifier */ int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp); +void postcopy_thread_create(MigrationIncomingState *mis, + QemuThread *thread, const char *name, + void *(*fn)(void *), int joinable); + struct PostCopyFD; /* ufd is a pointer to the struct uffd_msg *TODO: more Portable! */ @@ -179,4 +183,15 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, uint64_t client_addr, int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t client_addr, uint64_t offset); +/* Hard-code channels for now for postcopy preemption */ +enum PostcopyChannels { + RAM_CHANNEL_PRECOPY = 0, + RAM_CHANNEL_POSTCOPY = 1, + RAM_CHANNEL_MAX, +}; + +bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); +int postcopy_preempt_setup(MigrationState *s, Error **errp); +int postcopy_preempt_wait_channel(MigrationState *s); + #endif diff --git a/migration/qemu-file-channel.c b/migration/qemu-file-channel.c deleted file mode 100644 index bb5a5752df..0000000000 --- a/migration/qemu-file-channel.c +++ /dev/null @@ -1,194 +0,0 @@ -/* - * QEMUFile backend for QIOChannel objects - * - * Copyright (c) 2015-2016 Red Hat, Inc - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu-file-channel.h" -#include "qemu-file.h" -#include "io/channel-socket.h" -#include "io/channel-tls.h" -#include "qemu/iov.h" -#include "qemu/yank.h" -#include "yank_functions.h" - - -static ssize_t channel_writev_buffer(void *opaque, - struct iovec *iov, - int iovcnt, - int64_t pos, - Error **errp) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - ssize_t done = 0; - struct iovec *local_iov = g_new(struct iovec, iovcnt); - struct iovec *local_iov_head = local_iov; - unsigned int nlocal_iov = iovcnt; - - nlocal_iov = iov_copy(local_iov, nlocal_iov, - iov, iovcnt, - 0, iov_size(iov, iovcnt)); - - while (nlocal_iov > 0) { - ssize_t len; - len = qio_channel_writev(ioc, local_iov, nlocal_iov, errp); - if (len == QIO_CHANNEL_ERR_BLOCK) { - if (qemu_in_coroutine()) { - qio_channel_yield(ioc, G_IO_OUT); - } else { - qio_channel_wait(ioc, G_IO_OUT); - } - continue; - } - if (len < 0) { - done = -EIO; - goto cleanup; - } - - iov_discard_front(&local_iov, &nlocal_iov, len); - done += len; - } - - cleanup: - g_free(local_iov_head); - return done; -} - - -static ssize_t channel_get_buffer(void *opaque, - uint8_t *buf, - int64_t pos, - size_t size, - Error **errp) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - ssize_t ret; - - do { - ret = qio_channel_read(ioc, (char *)buf, size, errp); - if (ret < 0) { - if (ret == QIO_CHANNEL_ERR_BLOCK) { - if (qemu_in_coroutine()) { - qio_channel_yield(ioc, G_IO_IN); - } else { - qio_channel_wait(ioc, G_IO_IN); - } - } else { - return -EIO; - } - } - } while (ret == QIO_CHANNEL_ERR_BLOCK); - - return ret; -} - - -static int channel_close(void *opaque, Error **errp) -{ - int ret; - QIOChannel *ioc = QIO_CHANNEL(opaque); - ret = qio_channel_close(ioc, errp); - object_unref(OBJECT(ioc)); - return ret; -} - - -static int channel_shutdown(void *opaque, - bool rd, - bool wr, - Error **errp) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - - if (qio_channel_has_feature(ioc, - QIO_CHANNEL_FEATURE_SHUTDOWN)) { - QIOChannelShutdown mode; - if (rd && wr) { - mode = QIO_CHANNEL_SHUTDOWN_BOTH; - } else if (rd) { - mode = QIO_CHANNEL_SHUTDOWN_READ; - } else { - mode = QIO_CHANNEL_SHUTDOWN_WRITE; - } - if (qio_channel_shutdown(ioc, mode, errp) < 0) { - return -EIO; - } - } - return 0; -} - - -static int channel_set_blocking(void *opaque, - bool enabled, - Error **errp) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - - if (qio_channel_set_blocking(ioc, enabled, errp) < 0) { - return -1; - } - return 0; -} - -static QEMUFile *channel_get_input_return_path(void *opaque) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - - return qemu_fopen_channel_output(ioc); -} - -static QEMUFile *channel_get_output_return_path(void *opaque) -{ - QIOChannel *ioc = QIO_CHANNEL(opaque); - - return qemu_fopen_channel_input(ioc); -} - -static const QEMUFileOps channel_input_ops = { - .get_buffer = channel_get_buffer, - .close = channel_close, - .shut_down = channel_shutdown, - .set_blocking = channel_set_blocking, - .get_return_path = channel_get_input_return_path, -}; - - -static const QEMUFileOps channel_output_ops = { - .writev_buffer = channel_writev_buffer, - .close = channel_close, - .shut_down = channel_shutdown, - .set_blocking = channel_set_blocking, - .get_return_path = channel_get_output_return_path, -}; - - -QEMUFile *qemu_fopen_channel_input(QIOChannel *ioc) -{ - object_ref(OBJECT(ioc)); - return qemu_fopen_ops(ioc, &channel_input_ops, true); -} - -QEMUFile *qemu_fopen_channel_output(QIOChannel *ioc) -{ - object_ref(OBJECT(ioc)); - return qemu_fopen_ops(ioc, &channel_output_ops, true); -} diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 6338d8e2ff..2d5f74ffc2 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" #include +#include "qemu/madvise.h" #include "qemu/error-report.h" #include "qemu/iov.h" #include "migration.h" @@ -34,15 +35,24 @@ #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) struct QEMUFile { - const QEMUFileOps *ops; const QEMUFileHooks *hooks; - void *opaque; + QIOChannel *ioc; + bool is_writable; - int64_t bytes_xfer; - int64_t xfer_limit; + /* + * Maximum amount of data in bytes to transfer during one + * rate limiting time window + */ + int64_t rate_limit_max; + /* + * Total amount of data in bytes queued for transfer + * during this rate limiting time window + */ + int64_t rate_limit_used; + + /* The sum of bytes transferred on the wire */ + int64_t total_transferred; - int64_t pos; /* start of buffer when writing, end of buffer - when reading */ int buf_index; int buf_size; /* 0 when writing */ uint8_t buf[IO_BUF_SIZE]; @@ -55,40 +65,54 @@ struct QEMUFile { Error *last_error_obj; /* has the file has been shutdown */ bool shutdown; - /* Whether opaque points to a QIOChannel */ - bool has_ioc; }; /* * Stop a file from being read/written - not all backing files can do this * typically only sockets can. + * + * TODO: convert to propagate Error objects instead of squashing + * to a fixed errno value */ int qemu_file_shutdown(QEMUFile *f) { - int ret; + int ret = 0; f->shutdown = true; - if (!f->ops->shut_down) { - return -ENOSYS; - } - ret = f->ops->shut_down(f->opaque, true, true, NULL); + /* + * We must set qemufile error before the real shutdown(), otherwise + * there can be a race window where we thought IO all went though + * (because last_error==NULL) but actually IO has already stopped. + * + * If without correct ordering, the race can happen like this: + * + * page receiver other thread + * ------------- ------------ + * qemu_get_buffer() + * do shutdown() + * returns 0 (buffer all zero) + * (we didn't check this retcode) + * try to detect IO error + * last_error==NULL, IO okay + * install ALL-ZERO page + * set last_error + * --> guest crash! + */ if (!f->last_error) { qemu_file_set_error(f, -EIO); } - return ret; -} -/* - * Result: QEMUFile* for a 'return path' for comms in the opposite direction - * NULL if not available - */ -QEMUFile *qemu_file_get_return_path(QEMUFile *f) -{ - if (!f->ops->get_return_path) { - return NULL; + if (!qio_channel_has_feature(f->ioc, + QIO_CHANNEL_FEATURE_SHUTDOWN)) { + return -ENOSYS; } - return f->ops->get_return_path(f->opaque); + + if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL) < 0) { + ret = -EIO; + } + + return ret; } bool qemu_file_mode_is_not_valid(const char *mode) @@ -103,18 +127,37 @@ bool qemu_file_mode_is_not_valid(const char *mode) return false; } -QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops, bool has_ioc) +static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable) { QEMUFile *f; f = g_new0(QEMUFile, 1); - f->opaque = opaque; - f->ops = ops; - f->has_ioc = has_ioc; + object_ref(ioc); + f->ioc = ioc; + f->is_writable = is_writable; + return f; } +/* + * Result: QEMUFile* for a 'return path' for comms in the opposite direction + * NULL if not available + */ +QEMUFile *qemu_file_get_return_path(QEMUFile *f) +{ + return qemu_file_new_impl(f->ioc, !f->is_writable); +} + +QEMUFile *qemu_file_new_output(QIOChannel *ioc) +{ + return qemu_file_new_impl(ioc, true); +} + +QEMUFile *qemu_file_new_input(QIOChannel *ioc) +{ + return qemu_file_new_impl(ioc, false); +} void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks) { @@ -138,6 +181,33 @@ int qemu_file_get_error_obj(QEMUFile *f, Error **errp) return f->last_error; } +/* + * Get last error for either stream f1 or f2 with optional Error*. + * The error returned (non-zero) can be either from f1 or f2. + * + * If any of the qemufile* is NULL, then skip the check on that file. + * + * When there is no error on both qemufile, zero is returned. + */ +int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp) +{ + int ret = 0; + + if (f1) { + ret = qemu_file_get_error_obj(f1, errp); + /* If there's already error detected, return */ + if (ret) { + return ret; + } + } + + if (f2) { + ret = qemu_file_get_error_obj(f2, errp); + } + + return ret; +} + /* * Set the last error for stream f with optional Error* */ @@ -173,7 +243,7 @@ void qemu_file_set_error(QEMUFile *f, int ret) bool qemu_file_is_writable(QEMUFile *f) { - return f->ops->writev_buffer; + return f->is_writable; } static void qemu_iovec_release_ram(QEMUFile *f) @@ -211,6 +281,7 @@ static void qemu_iovec_release_ram(QEMUFile *f) memset(f->may_free, 0, sizeof(f->may_free)); } + /** * Flushes QEMUFile buffer * @@ -219,10 +290,6 @@ static void qemu_iovec_release_ram(QEMUFile *f) */ void qemu_fflush(QEMUFile *f) { - ssize_t ret = 0; - ssize_t expect = 0; - Error *local_error = NULL; - if (!qemu_file_is_writable(f)) { return; } @@ -231,22 +298,18 @@ void qemu_fflush(QEMUFile *f) return; } if (f->iovcnt > 0) { - expect = iov_size(f->iov, f->iovcnt); - ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos, - &local_error); + Error *local_error = NULL; + if (qio_channel_writev_all(f->ioc, + f->iov, f->iovcnt, + &local_error) < 0) { + qemu_file_set_error_obj(f, -EIO, local_error); + } else { + f->total_transferred += iov_size(f->iov, f->iovcnt); + } qemu_iovec_release_ram(f); } - if (ret >= 0) { - f->pos += ret; - } - /* We expect the QEMUFile write impl to send the full - * data set we requested, so sanity check that. - */ - if (ret != expect) { - qemu_file_set_error_obj(f, ret < 0 ? ret : -EIO, local_error); - } f->buf_index = 0; f->iovcnt = 0; } @@ -256,7 +319,7 @@ void ram_control_before_iterate(QEMUFile *f, uint64_t flags) int ret = 0; if (f->hooks && f->hooks->before_ram_iterate) { - ret = f->hooks->before_ram_iterate(f, f->opaque, flags, NULL); + ret = f->hooks->before_ram_iterate(f, flags, NULL); if (ret < 0) { qemu_file_set_error(f, ret); } @@ -268,7 +331,7 @@ void ram_control_after_iterate(QEMUFile *f, uint64_t flags) int ret = 0; if (f->hooks && f->hooks->after_ram_iterate) { - ret = f->hooks->after_ram_iterate(f, f->opaque, flags, NULL); + ret = f->hooks->after_ram_iterate(f, flags, NULL); if (ret < 0) { qemu_file_set_error(f, ret); } @@ -280,7 +343,7 @@ void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data) int ret = -EINVAL; if (f->hooks && f->hooks->hook_ram_load) { - ret = f->hooks->hook_ram_load(f, f->opaque, flags, data); + ret = f->hooks->hook_ram_load(f, flags, data); if (ret < 0) { qemu_file_set_error(f, ret); } @@ -300,16 +363,16 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, uint64_t *bytes_sent) { if (f->hooks && f->hooks->save_page) { - int ret = f->hooks->save_page(f, f->opaque, block_offset, + int ret = f->hooks->save_page(f, block_offset, offset, size, bytes_sent); if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { - f->bytes_xfer += size; + f->rate_limit_used += size; } if (ret != RAM_SAVE_CONTROL_DELAYED && ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (bytes_sent && *bytes_sent > 0) { - qemu_update_position(f, *bytes_sent); + qemu_file_credit_transfer(f, *bytes_sent); } else if (ret < 0) { qemu_file_set_error(f, ret); } @@ -348,25 +411,37 @@ static ssize_t qemu_fill_buffer(QEMUFile *f) return 0; } - len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos, - IO_BUF_SIZE - pending, &local_error); + do { + len = qio_channel_read(f->ioc, + (char *)f->buf + pending, + IO_BUF_SIZE - pending, + &local_error); + if (len == QIO_CHANNEL_ERR_BLOCK) { + if (qemu_in_coroutine()) { + qio_channel_yield(f->ioc, G_IO_IN); + } else { + qio_channel_wait(f->ioc, G_IO_IN); + } + } else if (len < 0) { + len = -EIO; + } + } while (len == QIO_CHANNEL_ERR_BLOCK); + if (len > 0) { f->buf_size += len; - f->pos += len; + f->total_transferred += len; } else if (len == 0) { qemu_file_set_error_obj(f, -EIO, local_error); - } else if (len != -EAGAIN) { - qemu_file_set_error_obj(f, len, local_error); } else { - error_free(local_error); + qemu_file_set_error_obj(f, len, local_error); } return len; } -void qemu_update_position(QEMUFile *f, size_t size) +void qemu_file_credit_transfer(QEMUFile *f, size_t size) { - f->pos += size; + f->total_transferred += size; } /** Closes the file @@ -379,16 +454,16 @@ void qemu_update_position(QEMUFile *f, size_t size) */ int qemu_fclose(QEMUFile *f) { - int ret; + int ret, ret2; qemu_fflush(f); ret = qemu_file_get_error(f); - if (f->ops->close) { - int ret2 = f->ops->close(f->opaque, NULL); - if (ret >= 0) { - ret = ret2; - } + ret2 = qio_channel_close(f->ioc, NULL); + if (ret >= 0) { + ret = ret2; } + g_clear_pointer(&f->ioc, object_unref); + /* If any error was spotted before closing, we should report it * instead of the close() return value. */ @@ -456,7 +531,7 @@ void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size, return; } - f->bytes_xfer += size; + f->rate_limit_used += size; add_to_iovec(f, buf, size, may_free); } @@ -474,7 +549,7 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) l = size; } memcpy(f->buf + f->buf_index, buf, l); - f->bytes_xfer += l; + f->rate_limit_used += l; add_buf_to_iovec(f, l); if (qemu_file_get_error(f)) { break; @@ -491,7 +566,7 @@ void qemu_put_byte(QEMUFile *f, int v) } f->buf[f->buf_index] = v; - f->bytes_xfer++; + f->rate_limit_used++; add_buf_to_iovec(f, 1); } @@ -647,9 +722,9 @@ int qemu_get_byte(QEMUFile *f) return result; } -int64_t qemu_ftell_fast(QEMUFile *f) +int64_t qemu_file_total_transferred_fast(QEMUFile *f) { - int64_t ret = f->pos; + int64_t ret = f->total_transferred; int i; for (i = 0; i < f->iovcnt; i++) { @@ -659,10 +734,10 @@ int64_t qemu_ftell_fast(QEMUFile *f) return ret; } -int64_t qemu_ftell(QEMUFile *f) +int64_t qemu_file_total_transferred(QEMUFile *f) { qemu_fflush(f); - return f->pos; + return f->total_transferred; } int qemu_file_rate_limit(QEMUFile *f) @@ -673,7 +748,7 @@ int qemu_file_rate_limit(QEMUFile *f) if (qemu_file_get_error(f)) { return 1; } - if (f->xfer_limit > 0 && f->bytes_xfer > f->xfer_limit) { + if (f->rate_limit_max > 0 && f->rate_limit_used > f->rate_limit_max) { return 1; } return 0; @@ -681,22 +756,22 @@ int qemu_file_rate_limit(QEMUFile *f) int64_t qemu_file_get_rate_limit(QEMUFile *f) { - return f->xfer_limit; + return f->rate_limit_max; } void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) { - f->xfer_limit = limit; + f->rate_limit_max = limit; } void qemu_file_reset_rate_limit(QEMUFile *f) { - f->bytes_xfer = 0; + f->rate_limit_used = 0; } -void qemu_file_update_transfer(QEMUFile *f, int64_t len) +void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len) { - f->bytes_xfer += len; + f->rate_limit_used += len; } void qemu_put_be16(QEMUFile *f, unsigned int v) @@ -850,19 +925,18 @@ void qemu_put_counted_string(QEMUFile *f, const char *str) */ void qemu_file_set_blocking(QEMUFile *f, bool block) { - if (f->ops->set_blocking) { - f->ops->set_blocking(f->opaque, block, NULL); - } + qio_channel_set_blocking(f->ioc, block, NULL); } /* - * Return the ioc object if it's a migration channel. Note: it can return NULL - * for callers passing in a non-migration qemufile. E.g. see qemu_fopen_bdrv() - * and its usage in e.g. load_snapshot(). So we need to check against NULL - * before using it. If without the check, migration_incoming_state_destroy() - * could fail for load_snapshot(). + * qemu_file_get_ioc: + * + * Get the ioc object for the file, without incrementing + * the reference count. + * + * Returns: the ioc object */ QIOChannel *qemu_file_get_ioc(QEMUFile *file) { - return file->has_ioc ? QIO_CHANNEL(file->opaque) : NULL; + return file->ioc; } diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 3f36d4dc8c..fa13d04d78 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -29,47 +29,12 @@ #include "exec/cpu-common.h" #include "io/channel.h" -/* Read a chunk of data from a file at the given position. The pos argument - * can be ignored if the file is only be used for streaming. The number of - * bytes actually read should be returned. - */ -typedef ssize_t (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf, - int64_t pos, size_t size, - Error **errp); - -/* Close a file - * - * Return negative error number on error, 0 or positive value on success. - * - * The meaning of return value on success depends on the specific back-end being - * used. - */ -typedef int (QEMUFileCloseFunc)(void *opaque, Error **errp); - -/* Called to return the OS file descriptor associated to the QEMUFile. - */ -typedef int (QEMUFileGetFD)(void *opaque); - -/* Called to change the blocking mode of the file - */ -typedef int (QEMUFileSetBlocking)(void *opaque, bool enabled, Error **errp); - -/* - * This function writes an iovec to file. The handler must write all - * of the data or return a negative errno value. - */ -typedef ssize_t (QEMUFileWritevBufferFunc)(void *opaque, struct iovec *iov, - int iovcnt, int64_t pos, - Error **errp); - /* * This function provides hooks around different * stages of RAM migration. - * 'opaque' is the backend specific data in QEMUFile * 'data' is call specific data associated with the 'flags' value */ -typedef int (QEMURamHookFunc)(QEMUFile *f, void *opaque, uint64_t flags, - void *data); +typedef int (QEMURamHookFunc)(QEMUFile *f, uint64_t flags, void *data); /* * Constants used by ram_control_* hooks @@ -84,34 +49,11 @@ typedef int (QEMURamHookFunc)(QEMUFile *f, void *opaque, uint64_t flags, * This function allows override of where the RAM page * is saved (such as RDMA, for example.) */ -typedef size_t (QEMURamSaveFunc)(QEMUFile *f, void *opaque, - ram_addr_t block_offset, - ram_addr_t offset, - size_t size, - uint64_t *bytes_sent); - -/* - * Return a QEMUFile for comms in the opposite direction - */ -typedef QEMUFile *(QEMURetPathFunc)(void *opaque); - -/* - * Stop any read or write (depending on flags) on the underlying - * transport on the QEMUFile. - * Existing blocking reads/writes must be woken - * Returns 0 on success, -err on error - */ -typedef int (QEMUFileShutdownFunc)(void *opaque, bool rd, bool wr, - Error **errp); - -typedef struct QEMUFileOps { - QEMUFileGetBufferFunc *get_buffer; - QEMUFileCloseFunc *close; - QEMUFileSetBlocking *set_blocking; - QEMUFileWritevBufferFunc *writev_buffer; - QEMURetPathFunc *get_return_path; - QEMUFileShutdownFunc *shut_down; -} QEMUFileOps; +typedef size_t (QEMURamSaveFunc)(QEMUFile *f, + ram_addr_t block_offset, + ram_addr_t offset, + size_t size, + uint64_t *bytes_sent); typedef struct QEMUFileHooks { QEMURamHookFunc *before_ram_iterate; @@ -120,12 +62,41 @@ typedef struct QEMUFileHooks { QEMURamSaveFunc *save_page; } QEMUFileHooks; -QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops, bool has_ioc); +QEMUFile *qemu_file_new_input(QIOChannel *ioc); +QEMUFile *qemu_file_new_output(QIOChannel *ioc); void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks); -int qemu_get_fd(QEMUFile *f); int qemu_fclose(QEMUFile *f); -int64_t qemu_ftell(QEMUFile *f); -int64_t qemu_ftell_fast(QEMUFile *f); + +/* + * qemu_file_total_transferred: + * + * Report the total number of bytes transferred with + * this file. + * + * For writable files, any pending buffers will be + * flushed, so the reported value will be equal to + * the number of bytes transferred on the wire. + * + * For readable files, the reported value will be + * equal to the number of bytes transferred on the + * wire. + * + * Returns: the total bytes transferred + */ +int64_t qemu_file_total_transferred(QEMUFile *f); + +/* + * qemu_file_total_transferred_fast: + * + * As qemu_file_total_transferred except for writable + * files, where no flush is performed and the reported + * amount will include the size of any queued buffers, + * on top of the amount actually transferred. + * + * Returns: the total bytes transferred and queued + */ +int64_t qemu_file_total_transferred_fast(QEMUFile *f); + /* * put_buffer without copying the buffer. * The buffer should be available till it is sent asynchronously. @@ -150,12 +121,27 @@ int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src); */ int qemu_peek_byte(QEMUFile *f, int offset); void qemu_file_skip(QEMUFile *f, int size); -void qemu_update_position(QEMUFile *f, size_t size); +/* + * qemu_file_credit_transfer: + * + * Report on a number of bytes that have been transferred + * out of band from the main file object I/O methods. This + * accounting information tracks the total migration traffic. + */ +void qemu_file_credit_transfer(QEMUFile *f, size_t size); void qemu_file_reset_rate_limit(QEMUFile *f); -void qemu_file_update_transfer(QEMUFile *f, int64_t len); +/* + * qemu_file_acct_rate_limit: + * + * Report on a number of bytes the have been transferred + * out of band from the main file object I/O methods, and + * need to be applied to the rate limiting calcuations + */ +void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len); void qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate); int64_t qemu_file_get_rate_limit(QEMUFile *f); int qemu_file_get_error_obj(QEMUFile *f, Error **errp); +int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp); void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err); void qemu_file_set_error(QEMUFile *f, int ret); int qemu_file_shutdown(QEMUFile *f); diff --git a/migration/ram.c b/migration/ram.c index 7a43bfd7af..f25ebd9620 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -30,7 +30,9 @@ #include "qemu/cutils.h" #include "qemu/bitops.h" #include "qemu/bitmap.h" +#include "qemu/madvise.h" #include "qemu/main-loop.h" +#include "io/channel-null.h" #include "xbzrle.h" #include "ram.h" #include "migration.h" @@ -56,6 +58,8 @@ #include "multifd.h" #include "sysemu/runstate.h" +#include "hw/boards.h" /* for machine_dump_guest_core() */ + #if defined(__linux__) #include "qemu/userfaultfd.h" #endif /* defined(__linux__) */ @@ -79,11 +83,6 @@ /* 0x80 is reserved in migration.h start with 0x100 next */ #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 -static inline bool is_zero_range(uint8_t *p, uint64_t size) -{ - return buffer_is_zero(p, size); -} - XBZRLECacheStats xbzrle_counters; /* struct contains XBZRLE cache and a static page @@ -297,6 +296,20 @@ struct RAMSrcPageRequest { QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; }; +typedef struct { + /* + * Cached ramblock/offset values if preempted. They're only meaningful if + * preempted==true below. + */ + RAMBlock *ram_block; + unsigned long ram_page; + /* + * Whether a postcopy preemption just happened. Will be reset after + * precopy recovered to background migration. + */ + bool preempted; +} PostcopyPreemptState; + /* State of RAM for migration */ struct RAMState { /* QEMUFile used for this migration */ @@ -328,7 +341,8 @@ struct RAMState { uint64_t xbzrle_bytes_prev; /* Start using XBZRLE (e.g., after the first round). */ bool xbzrle_enabled; - + /* Are we on the last stage of migration */ + bool last_stage; /* compression statistics since the beginning of the period */ /* amount of count that no free thread to compress data */ uint64_t compress_thread_busy_prev; @@ -350,6 +364,14 @@ struct RAMState { /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; + + /* Postcopy preemption informations */ + PostcopyPreemptState postcopy_preempt_state; + /* + * Current channel we're using on src VM. Only valid if postcopy-preempt + * is enabled. + */ + unsigned int postcopy_channel; }; typedef struct RAMState RAMState; @@ -357,6 +379,17 @@ static RAMState *ram_state; static NotifierWithReturnList precopy_notifier_list; +static void postcopy_preempt_reset(RAMState *rs) +{ + memset(&rs->postcopy_preempt_state, 0, sizeof(PostcopyPreemptState)); +} + +/* Whether postcopy has queued requests? */ +static bool postcopy_has_request(RAMState *rs) +{ + return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); +} + void precopy_infrastructure_init(void) { notifier_with_return_list_init(&precopy_notifier_list); @@ -389,6 +422,23 @@ uint64_t ram_bytes_remaining(void) MigrationStats ram_counters; +static void ram_transferred_add(uint64_t bytes) +{ + if (runstate_is_running()) { + ram_counters.precopy_bytes += bytes; + } else if (migration_in_postcopy()) { + ram_counters.postcopy_bytes += bytes; + } else { + ram_counters.downtime_bytes += bytes; + } + ram_counters.transferred += bytes; +} + +void dirty_sync_missed_zero_copy(void) +{ + ram_counters.dirty_sync_missed_zero_copy++; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -397,6 +447,28 @@ struct PageSearchStatus { unsigned long page; /* Set once we wrap around */ bool complete_round; + /* + * [POSTCOPY-ONLY] Whether current page is explicitly requested by + * postcopy. When set, the request is "urgent" because the dest QEMU + * threads are waiting for us. + */ + bool postcopy_requested; + /* + * [POSTCOPY-ONLY] The target channel to use to send current page. + * + * Note: This may _not_ match with the value in postcopy_requested + * above. Let's imagine the case where the postcopy request is exactly + * the page that we're sending in progress during precopy. In this case + * we'll have postcopy_requested set to true but the target channel + * will be the precopy channel (so that we don't split brain on that + * specific page since the precopy channel already contains partial of + * that page data). + * + * Besides that specific use case, postcopy_target_channel should + * always be equal to postcopy_requested, because by default we send + * postcopy pages via postcopy preempt channel. + */ + bool postcopy_target_channel; }; typedef struct PageSearchStatus PageSearchStatus; @@ -438,8 +510,6 @@ static QemuThread *compress_threads; */ static QemuMutex comp_done_lock; static QemuCond comp_done_cond; -/* The empty QEMUFileOps will be used by file in CompressParam */ -static const QEMUFileOps empty_ops = { }; static QEMUFile *decomp_file; static DecompressParam *decomp_param; @@ -450,6 +520,9 @@ static QemuCond decomp_done_cond; static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, ram_addr_t offset, uint8_t *source_buf); +static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, + bool postcopy_requested); + static void *do_data_compress(void *opaque) { CompressParam *param = opaque; @@ -550,7 +623,8 @@ static int compress_threads_save_setup(void) /* comp_param[i].file is just used as a dummy buffer to save data, * set its ops to empty. */ - comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops, false); + comp_param[i].file = qemu_file_new_output( + QIO_CHANNEL(qio_channel_null_new())); comp_param[i].done = true; comp_param[i].quit = false; qemu_mutex_init(&comp_param[i].mutex); @@ -639,6 +713,15 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period, } } +void mig_throttle_counter_reset(void) +{ + RAMState *rs = ram_state; + + rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + rs->num_dirty_pages_period = 0; + rs->bytes_xfer_prev = ram_counters.transferred; +} + /** * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache * @@ -677,11 +760,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * @current_addr: addr of the page * @block: block that contains the page we want to send * @offset: offset inside the block for the page - * @last_stage: if we are at the completion stage */ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, - ram_addr_t offset, bool last_stage) + ram_addr_t offset) { int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; @@ -689,7 +771,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, if (!cache_is_cached(XBZRLE.cache, current_addr, ram_counters.dirty_sync_count)) { xbzrle_counters.cache_miss++; - if (!last_stage) { + if (!rs->last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, ram_counters.dirty_sync_count) == -1) { return -1; @@ -728,7 +810,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, * Update the cache contents, so that it corresponds to the data * sent, in all cases except where we skip the page. */ - if (!last_stage && encoded_len != 0) { + if (!rs->last_stage && encoded_len != 0) { memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); /* * In the case where we couldn't compress, ensure that the caller @@ -761,7 +843,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, * RAM_SAVE_FLAG_CONTINUE. */ xbzrle_counters.bytes += bytes_xbzrle - 8; - ram_counters.transferred += bytes_xbzrle; + ram_transferred_add(bytes_xbzrle); return 1; } @@ -789,8 +871,7 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, return find_next_bit(bitmap, size, start); } -static void migration_clear_memory_region_dirty_bitmap(RAMState *rs, - RAMBlock *rb, +static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, unsigned long page) { uint8_t shift; @@ -812,14 +893,13 @@ static void migration_clear_memory_region_dirty_bitmap(RAMState *rs, assert(shift >= 6); size = 1ULL << (TARGET_PAGE_BITS + shift); - start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size); + start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); memory_region_clear_dirty_bitmap(rb->mr, start, size); } static void -migration_clear_memory_region_dirty_bitmap_range(RAMState *rs, - RAMBlock *rb, +migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, unsigned long start, unsigned long npages) { @@ -832,10 +912,45 @@ migration_clear_memory_region_dirty_bitmap_range(RAMState *rs, * exclusive. */ for (i = chunk_start; i < chunk_end; i += chunk_pages) { - migration_clear_memory_region_dirty_bitmap(rs, rb, i); + migration_clear_memory_region_dirty_bitmap(rb, i); } } +/* + * colo_bitmap_find_diry:find contiguous dirty pages from start + * + * Returns the page offset within memory region of the start of the contiguout + * dirty page + * + * @rs: current RAM state + * @rb: RAMBlock where to search for dirty pages + * @start: page where we start the search + * @num: the number of contiguous dirty pages + */ +static inline +unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, + unsigned long start, unsigned long *num) +{ + unsigned long size = rb->used_length >> TARGET_PAGE_BITS; + unsigned long *bitmap = rb->bmap; + unsigned long first, next; + + *num = 0; + + if (ramblock_is_ignored(rb)) { + return size; + } + + first = find_next_bit(bitmap, size, start); + if (first >= size) { + return first; + } + next = find_next_zero_bit(bitmap, size, first + 1); + assert(next >= first); + *num = next - first; + return first; +} + static inline bool migration_bitmap_clear_dirty(RAMState *rs, RAMBlock *rb, unsigned long page) @@ -850,7 +965,7 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, * the page in the chunk we clear the remote dirty bitmap for all. * Clearing it earlier won't be a problem, but too late will. */ - migration_clear_memory_region_dirty_bitmap(rs, rb, page); + migration_clear_memory_region_dirty_bitmap(rb, page); ret = test_and_clear_bit(page, rb->bmap); if (ret) { @@ -860,6 +975,81 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, return ret; } +static void dirty_bitmap_clear_section(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr offset = section->offset_within_region; + const hwaddr size = int128_get64(section->size); + const unsigned long start = offset >> TARGET_PAGE_BITS; + const unsigned long npages = size >> TARGET_PAGE_BITS; + RAMBlock *rb = section->mr->ram_block; + uint64_t *cleared_bits = opaque; + + /* + * We don't grab ram_state->bitmap_mutex because we expect to run + * only when starting migration or during postcopy recovery where + * we don't have concurrent access. + */ + if (!migration_in_postcopy() && !migrate_background_snapshot()) { + migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); + } + *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); + bitmap_clear(rb->bmap, start, npages); +} + +/* + * Exclude all dirty pages from migration that fall into a discarded range as + * managed by a RamDiscardManager responsible for the mapped memory region of + * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. + * + * Discarded pages ("logically unplugged") have undefined content and must + * not get migrated, because even reading these pages for migration might + * result in undesired behavior. + * + * Returns the number of cleared bits in the RAMBlock dirty bitmap. + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ +static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) +{ + uint64_t cleared_bits = 0; + + if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = 0, + .size = int128_make64(qemu_ram_get_used_length(rb)), + }; + + ram_discard_manager_replay_discarded(rdm, §ion, + dirty_bitmap_clear_section, + &cleared_bits); + } + return cleared_bits; +} + +/* + * Check if a host-page aligned page falls into a discarded range as managed by + * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ +bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) +{ + if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = start, + .size = int128_make64(qemu_ram_pagesize(rb)), + }; + + return !ram_discard_manager_is_populated(rdm, §ion); + } + return false; +} + /* Called with RCU critical section */ static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) { @@ -1044,6 +1234,15 @@ static void migration_bitmap_sync_precopy(RAMState *rs) } } +static void ram_release_page(const char *rbname, uint64_t offset) +{ + if (!migrate_release_ram() || !migration_in_postcopy()) { + return; + } + + ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); +} + /** * save_zero_page_to_file: send the zero page to the file * @@ -1061,10 +1260,11 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file, uint8_t *p = block->host + offset; int len = 0; - if (is_zero_range(p, TARGET_PAGE_SIZE)) { + if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; + ram_release_page(block->idstr, offset); } return len; } @@ -1084,21 +1284,12 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) if (len) { ram_counters.duplicate++; - ram_counters.transferred += len; + ram_transferred_add(len); return 1; } return -1; } -static void ram_release_pages(const char *rbname, uint64_t offset, int pages) -{ - if (!migrate_release_ram() || !migration_in_postcopy()) { - return; - } - - ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS); -} - /* * @pages: the number of pages written by the control path, * < 0 - error @@ -1120,7 +1311,7 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, } if (bytes_xmit) { - ram_counters.transferred += bytes_xmit; + ram_transferred_add(bytes_xmit); *pages = 1; } @@ -1151,16 +1342,16 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, uint8_t *buf, bool async) { - ram_counters.transferred += save_page_header(rs, rs->f, block, - offset | RAM_SAVE_FLAG_PAGE); + ram_transferred_add(save_page_header(rs, rs->f, block, + offset | RAM_SAVE_FLAG_PAGE)); if (async) { qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE, - migrate_release_ram() & + migrate_release_ram() && migration_in_postcopy()); } else { qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE); } - ram_counters.transferred += TARGET_PAGE_SIZE; + ram_transferred_add(TARGET_PAGE_SIZE); ram_counters.normal++; return 1; } @@ -1176,9 +1367,8 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, * @rs: current RAM state * @block: block that contains the page we want to send * @offset: offset inside the block for the page - * @last_stage: if we are at the completion stage */ -static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) +static int ram_save_page(RAMState *rs, PageSearchStatus *pss) { int pages = -1; uint8_t *p; @@ -1193,8 +1383,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) XBZRLE_cache_lock(); if (rs->xbzrle_enabled && !migration_in_postcopy()) { pages = save_xbzrle_page(rs, &p, current_addr, block, - offset, last_stage); - if (!last_stage) { + offset); + if (!rs->last_stage) { /* Can't send this cached data async, since the cache page * might get updated before it gets to the wire */ @@ -1227,13 +1417,11 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, ram_addr_t offset, uint8_t *source_buf) { RAMState *rs = ram_state; - uint8_t *p = block->host + (offset & TARGET_PAGE_MASK); - bool zero_page = false; + uint8_t *p = block->host + offset; int ret; if (save_zero_page_to_file(rs, f, block, offset)) { - zero_page = true; - goto exit; + return true; } save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); @@ -1248,18 +1436,14 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, if (ret < 0) { qemu_file_set_error(migrate_get_current()->to_dst_file, ret); error_report("compressed data failed!"); - return false; } - -exit: - ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1); - return zero_page; + return false; } static void update_compress_thread_counts(const CompressParam *param, int bytes_xmit) { - ram_counters.transferred += bytes_xmit; + ram_transferred_add(bytes_xmit); if (param->zero_page) { ram_counters.duplicate++; @@ -1360,6 +1544,13 @@ retry: */ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) { + /* + * This is not a postcopy requested page, mark it "not urgent", and use + * precopy channel to send it. + */ + pss->postcopy_requested = false; + pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; + pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); if (pss->complete_round && pss->block == rs->last_seen_block && pss->page >= rs->last_page) { @@ -1419,28 +1610,33 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) */ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) { + struct RAMSrcPageRequest *entry; RAMBlock *block = NULL; - if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) { + if (!postcopy_has_request(rs)) { return NULL; } QEMU_LOCK_GUARD(&rs->src_page_req_mutex); - if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { - struct RAMSrcPageRequest *entry = - QSIMPLEQ_FIRST(&rs->src_page_requests); - block = entry->rb; - *offset = entry->offset; - if (entry->len > TARGET_PAGE_SIZE) { - entry->len -= TARGET_PAGE_SIZE; - entry->offset += TARGET_PAGE_SIZE; - } else { - memory_region_unref(block->mr); - QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); - g_free(entry); - migration_consume_urgent_request(); - } + /* + * This should _never_ change even after we take the lock, because no one + * should be taking anything off the request list other than us. + */ + assert(postcopy_has_request(rs)); + + entry = QSIMPLEQ_FIRST(&rs->src_page_requests); + block = entry->rb; + *offset = entry->offset; + + if (entry->len > TARGET_PAGE_SIZE) { + entry->len -= TARGET_PAGE_SIZE; + entry->offset += TARGET_PAGE_SIZE; + } else { + memory_region_unref(block->mr); + QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); + g_free(entry); + migration_consume_urgent_request(); } return block; @@ -1497,7 +1693,7 @@ static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, /* Check if page is from UFFD-managed region. */ if (pss->block->flags & RAM_UF_WRITEPROTECT) { void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); - uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS; + uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; /* Flush async buffers before un-protect. */ qemu_fflush(rs->f); @@ -1566,25 +1762,70 @@ out: return ret; } +static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, + ram_addr_t size) +{ + const ram_addr_t end = offset + size; + + /* + * We read one byte of each page; this will preallocate page tables if + * required and populate the shared zeropage on MAP_PRIVATE anonymous memory + * where no page was populated yet. This might require adaption when + * supporting other mappings, like shmem. + */ + for (; offset < end; offset += block->page_size) { + char tmp = *((char *)block->host + offset); + + /* Don't optimize the read out */ + asm volatile("" : "+r" (tmp)); + } +} + +static inline int populate_read_section(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr size = int128_get64(section->size); + hwaddr offset = section->offset_within_region; + RAMBlock *block = section->mr->ram_block; + + populate_read_range(block, offset, size); + return 0; +} + /* - * ram_block_populate_pages: populate memory in the RAM block by reading - * an integer from the beginning of each page. + * ram_block_populate_read: preallocate page tables and populate pages in the + * RAM block by reading a byte of each page. * * Since it's solely used for userfault_fd WP feature, here we just * hardcode page size to qemu_real_host_page_size. * * @block: RAM block to populate */ -static void ram_block_populate_pages(RAMBlock *block) +static void ram_block_populate_read(RAMBlock *rb) { - char *ptr = (char *) block->host; + /* + * Skip populating all pages that fall into a discarded range as managed by + * a RamDiscardManager responsible for the mapped memory region of the + * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock + * must not get populated automatically. We don't have to track + * modifications via userfaultfd WP reliably, because these pages will + * not be part of the migration stream either way -- see + * ramblock_dirty_bitmap_exclude_discarded_pages(). + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ + if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = 0, + .size = rb->mr->size, + }; - for (ram_addr_t offset = 0; offset < block->used_length; - offset += qemu_real_host_page_size) { - char tmp = *(ptr + offset); - - /* Don't optimize the read out */ - asm volatile("" : "+r" (tmp)); + ram_discard_manager_replay_populated(rdm, §ion, + populate_read_section, NULL); + } else { + populate_read_range(rb, 0, rb->used_length); } } @@ -1611,7 +1852,7 @@ void ram_write_tracking_prepare(void) * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip * pages with pte_none() entries in page table. */ - ram_block_populate_pages(block); + ram_block_populate_read(block); } } @@ -1646,13 +1887,14 @@ int ram_write_tracking_start(void) block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { goto fail; } + block->flags |= RAM_UF_WRITEPROTECT; + memory_region_ref(block->mr); + /* Apply UFFD write protection to the block memory range */ if (uffd_change_protection(rs->uffdio_fd, block->host, block->max_length, true, false)) { goto fail; } - block->flags |= RAM_UF_WRITEPROTECT; - memory_region_ref(block->mr); trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, block->host, block->max_length); @@ -1760,6 +2002,55 @@ void ram_write_tracking_stop(void) } #endif /* defined(__linux__) */ +/* + * Check whether two addr/offset of the ramblock falls onto the same host huge + * page. Returns true if so, false otherwise. + */ +static bool offset_on_same_huge_page(RAMBlock *rb, uint64_t addr1, + uint64_t addr2) +{ + size_t page_size = qemu_ram_pagesize(rb); + + addr1 = ROUND_DOWN(addr1, page_size); + addr2 = ROUND_DOWN(addr2, page_size); + + return addr1 == addr2; +} + +/* + * Whether a previous preempted precopy huge page contains current requested + * page? Returns true if so, false otherwise. + * + * This should really happen very rarely, because it means when we were sending + * during background migration for postcopy we're sending exactly the page that + * some vcpu got faulted on on dest node. When it happens, we probably don't + * need to do much but drop the request, because we know right after we restore + * the precopy stream it'll be serviced. It'll slightly affect the order of + * postcopy requests to be serviced (e.g. it'll be the same as we move current + * request to the end of the queue) but it shouldn't be a big deal. The most + * imporant thing is we can _never_ try to send a partial-sent huge page on the + * POSTCOPY channel again, otherwise that huge page will got "split brain" on + * two channels (PRECOPY, POSTCOPY). + */ +static bool postcopy_preempted_contains(RAMState *rs, RAMBlock *block, + ram_addr_t offset) +{ + PostcopyPreemptState *state = &rs->postcopy_preempt_state; + + /* No preemption at all? */ + if (!state->preempted) { + return false; + } + + /* Not even the same ramblock? */ + if (state->ram_block != block) { + return false; + } + + return offset_on_same_huge_page(block, offset, + state->ram_page << TARGET_PAGE_BITS); +} + /** * get_queued_page: unqueue a page from the postcopy requests * @@ -1799,7 +2090,20 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) } while (block && !dirty); - if (!block) { + if (block) { + /* See comment above postcopy_preempted_contains() */ + if (postcopy_preempted_contains(rs, block, offset)) { + trace_postcopy_preempt_hit(block->idstr, offset); + /* + * If what we preempted previously was exactly what we're + * requesting right now, restore the preempted precopy + * immediately, boosting its priority as it's requested by + * postcopy. + */ + postcopy_preempt_restore(rs, pss, true); + return true; + } + } else { /* * Poll write faults too if background snapshot is enabled; that's * when we have vcpus got blocked by the write protected pages. @@ -1821,6 +2125,9 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) * really rare. */ pss->complete_round = false; + /* Mark it an urgent request, meanwhile using POSTCOPY channel */ + pss->postcopy_requested = true; + pss->postcopy_target_channel = RAM_CHANNEL_POSTCOPY; } return !!block; @@ -1899,7 +2206,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) } struct RAMSrcPageRequest *new_entry = - g_malloc0(sizeof(struct RAMSrcPageRequest)); + g_new0(struct RAMSrcPageRequest, 1); new_entry->rb = ramblock; new_entry->offset = start; new_entry->len = len; @@ -1972,10 +2279,8 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) * * @rs: current RAM state * @pss: data about the page we want to send - * @last_stage: if we are at the completion stage */ -static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, - bool last_stage) +static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) { RAMBlock *block = pss->block; ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; @@ -1999,22 +2304,143 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, xbzrle_cache_zero_page(rs, block->offset + offset); XBZRLE_cache_unlock(); } - ram_release_pages(block->idstr, offset, res); return res; } /* - * Do not use multifd for: - * 1. Compression as the first page in the new block should be posted out - * before sending the compressed page - * 2. In postcopy as one whole host page should be placed + * Do not use multifd in postcopy as one whole host page should be + * placed. Meanwhile postcopy requires atomic update of pages, so even + * if host page size == guest page size the dest guest during run may + * still see partially copied pages which is data corruption. */ - if (!save_page_use_compression(rs) && migrate_use_multifd() - && !migration_in_postcopy()) { + if (migrate_use_multifd() && !migration_in_postcopy()) { return ram_save_multifd_page(rs, block, offset); } - return ram_save_page(rs, pss, last_stage); + return ram_save_page(rs, pss); +} + +static bool postcopy_needs_preempt(RAMState *rs, PageSearchStatus *pss) +{ + MigrationState *ms = migrate_get_current(); + + /* Not enabled eager preempt? Then never do that. */ + if (!migrate_postcopy_preempt()) { + return false; + } + + /* If the user explicitly disabled breaking of huge page, skip */ + if (!ms->postcopy_preempt_break_huge) { + return false; + } + + /* If the ramblock we're sending is a small page? Never bother. */ + if (qemu_ram_pagesize(pss->block) == TARGET_PAGE_SIZE) { + return false; + } + + /* Not in postcopy at all? */ + if (!migration_in_postcopy()) { + return false; + } + + /* + * If we're already handling a postcopy request, don't preempt as this page + * has got the same high priority. + */ + if (pss->postcopy_requested) { + return false; + } + + /* If there's postcopy requests, then check it up! */ + return postcopy_has_request(rs); +} + +/* Returns true if we preempted precopy, false otherwise */ +static void postcopy_do_preempt(RAMState *rs, PageSearchStatus *pss) +{ + PostcopyPreemptState *p_state = &rs->postcopy_preempt_state; + + trace_postcopy_preempt_triggered(pss->block->idstr, pss->page); + + /* + * Time to preempt precopy. Cache current PSS into preempt state, so that + * after handling the postcopy pages we can recover to it. We need to do + * so because the dest VM will have partial of the precopy huge page kept + * over in its tmp huge page caches; better move on with it when we can. + */ + p_state->ram_block = pss->block; + p_state->ram_page = pss->page; + p_state->preempted = true; +} + +/* Whether we're preempted by a postcopy request during sending a huge page */ +static bool postcopy_preempt_triggered(RAMState *rs) +{ + return rs->postcopy_preempt_state.preempted; +} + +static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, + bool postcopy_requested) +{ + PostcopyPreemptState *state = &rs->postcopy_preempt_state; + + assert(state->preempted); + + pss->block = state->ram_block; + pss->page = state->ram_page; + + /* Whether this is a postcopy request? */ + pss->postcopy_requested = postcopy_requested; + /* + * When restoring a preempted page, the old data resides in PRECOPY + * slow channel, even if postcopy_requested is set. So always use + * PRECOPY channel here. + */ + pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; + + trace_postcopy_preempt_restored(pss->block->idstr, pss->page); + + /* Reset preempt state, most importantly, set preempted==false */ + postcopy_preempt_reset(rs); +} + +static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss) +{ + MigrationState *s = migrate_get_current(); + unsigned int channel = pss->postcopy_target_channel; + QEMUFile *next; + + if (channel != rs->postcopy_channel) { + if (channel == RAM_CHANNEL_PRECOPY) { + next = s->to_dst_file; + } else { + next = s->postcopy_qemufile_src; + } + /* Update and cache the current channel */ + rs->f = next; + rs->postcopy_channel = channel; + + /* + * If channel switched, reset last_sent_block since the old sent block + * may not be on the same channel. + */ + rs->last_sent_block = NULL; + + trace_postcopy_preempt_switch_channel(channel); + } + + trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); +} + +/* We need to make sure rs->f always points to the default channel elsewhere */ +static void postcopy_preempt_reset_channel(RAMState *rs) +{ + if (migrate_postcopy_preempt() && migration_in_postcopy()) { + rs->postcopy_channel = RAM_CHANNEL_PRECOPY; + rs->f = migrate_get_current()->to_dst_file; + trace_postcopy_preempt_reset_channel(); + } } /** @@ -2031,12 +2457,9 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, * Returns the number of pages written or negative on error * * @rs: current RAM state - * @ms: current migration state * @pss: data about the page we want to send - * @last_stage: if we are at the completion stage */ -static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, - bool last_stage) +static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) { int tmppages, pages = 0; size_t pagesize_bits = @@ -2051,10 +2474,19 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, return 0; } + if (migrate_postcopy_preempt() && migration_in_postcopy()) { + postcopy_preempt_choose_channel(rs, pss); + } + do { + if (postcopy_needs_preempt(rs, pss)) { + postcopy_do_preempt(rs, pss); + break; + } + /* Check the pages is dirty and if it is send it */ if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { - tmppages = ram_save_target_page(rs, pss, last_stage); + tmppages = ram_save_target_page(rs, pss); if (tmppages < 0) { return tmppages; } @@ -2073,7 +2505,20 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, offset_in_ramblock(pss->block, ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)); /* The offset we leave with is the min boundary of host page and block */ - pss->page = MIN(pss->page, hostpage_boundary) - 1; + pss->page = MIN(pss->page, hostpage_boundary); + + /* + * When with postcopy preempt mode, flush the data as soon as possible for + * postcopy requests, because we've already sent a whole huge page, so the + * dst node should already have enough resource to atomically filling in + * the current missing page. + * + * More importantly, when using separate postcopy channel, we must do + * explicit flush or it won't flush until the buffer is full. + */ + if (migrate_postcopy_preempt() && pss->postcopy_requested) { + qemu_fflush(rs->f); + } res = ram_save_release_protection(rs, pss, start_page); return (res < 0 ? res : pages); @@ -2088,13 +2533,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, * or negative on error * * @rs: current RAM state - * @last_stage: if we are at the completion stage * * On systems where host-page-size > target-page-size it will send all the * pages in a host page that are dirty. */ - -static int ram_find_and_save_block(RAMState *rs, bool last_stage) +static int ram_find_and_save_block(RAMState *rs) { PageSearchStatus pss; int pages = 0; @@ -2105,25 +2548,42 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) return pages; } + /* + * Always keep last_seen_block/last_page valid during this procedure, + * because find_dirty_block() relies on these values (e.g., we compare + * last_seen_block with pss.block to see whether we searched all the + * ramblocks) to detect the completion of migration. Having NULL value + * of last_seen_block can conditionally cause below loop to run forever. + */ + if (!rs->last_seen_block) { + rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); + rs->last_page = 0; + } + pss.block = rs->last_seen_block; pss.page = rs->last_page; pss.complete_round = false; - if (!pss.block) { - pss.block = QLIST_FIRST_RCU(&ram_list.blocks); - } - do { again = true; found = get_queued_page(rs, &pss); if (!found) { - /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, &pss, &again); + /* + * Recover previous precopy ramblock/offset if postcopy has + * preempted precopy. Otherwise find the next dirty bit. + */ + if (postcopy_preempt_triggered(rs)) { + postcopy_preempt_restore(rs, &pss, false); + found = true; + } else { + /* priority queue empty, so just search for something dirty */ + found = find_dirty_block(rs, &pss, &again); + } } if (found) { - pages = ram_save_host_page(rs, &pss, last_stage); + pages = ram_save_host_page(rs, &pss); } } while (!pages && again); @@ -2141,8 +2601,8 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) ram_counters.duplicate += pages; } else { ram_counters.normal += pages; - ram_counters.transferred += size; - qemu_update_position(f, size); + ram_transferred_add(size); + qemu_file_credit_transfer(f, size); } } @@ -2218,7 +2678,14 @@ static void ram_save_cleanup(void *opaque) /* caller have hold iothread lock or is in a bh, so there is * no writing race against the migration bitmap */ - memory_global_dirty_log_stop(); + if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { + /* + * do not stop dirty log without starting it, since + * memory_global_dirty_log_stop will assert that + * memory_global_dirty_log_start/stop used in pairs + */ + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); + } } RAMBLOCK_FOREACH_NOT_IGNORED(block) { @@ -2240,44 +2707,12 @@ static void ram_state_reset(RAMState *rs) rs->last_page = 0; rs->last_version = ram_list.version; rs->xbzrle_enabled = false; + postcopy_preempt_reset(rs); + rs->postcopy_channel = RAM_CHANNEL_PRECOPY; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ -/* - * 'expected' is the value you expect the bitmap mostly to be full - * of; it won't bother printing lines that are all this value. - * If 'todump' is null the migration bitmap is dumped. - */ -void ram_debug_dump_bitmap(unsigned long *todump, bool expected, - unsigned long pages) -{ - int64_t cur; - int64_t linelen = 128; - char linebuf[129]; - - for (cur = 0; cur < pages; cur += linelen) { - int64_t curb; - bool found = false; - /* - * Last line; catch the case where the line length - * is longer than remaining ram - */ - if (cur + linelen > pages) { - linelen = pages - cur; - } - for (curb = 0; curb < linelen; curb++) { - bool thisbit = test_bit(cur + curb, todump); - linebuf[curb] = thisbit ? '1' : '.'; - found = found || (thisbit != expected); - } - if (found) { - linebuf[curb] = '\0'; - fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf); - } - } -} - /* **** functions for postcopy ***** */ void ram_postcopy_migrated_memory_release(MigrationState *ms) @@ -2303,14 +2738,12 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) /** * postcopy_send_discard_bm_ram: discard a RAMBlock * - * Returns zero on success - * * Callback from postcopy_each_ram_send_discard for each RAMBlock * * @ms: current migration state * @block: RAMBlock to discard */ -static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) +static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) { unsigned long end = block->used_length >> TARGET_PAGE_BITS; unsigned long current; @@ -2334,15 +2767,13 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) postcopy_discard_send_range(ms, one, discard_length); current = one + discard_length; } - - return 0; } +static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); + /** * postcopy_each_ram_send_discard: discard all RAMBlocks * - * Returns 0 for success or negative for error - * * Utility for the outgoing postcopy code. * Calls postcopy_send_discard_bm_ram for each RAMBlock * passing it bitmap indexes and name. @@ -2351,27 +2782,29 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) * * @ms: current migration state */ -static int postcopy_each_ram_send_discard(MigrationState *ms) +static void postcopy_each_ram_send_discard(MigrationState *ms) { struct RAMBlock *block; - int ret; RAMBLOCK_FOREACH_NOT_IGNORED(block) { postcopy_discard_send_init(ms, block->idstr); + /* + * Deal with TPS != HPS and huge pages. It discard any partially sent + * host-page size chunks, mark any partially dirty host-page size + * chunks as all dirty. In this case the host-page is the host-page + * for the particular RAMBlock, i.e. it might be a huge page. + */ + postcopy_chunk_hostpages_pass(ms, block); + /* * Postcopy sends chunks of bitmap over the wire, but it * just needs indexes at this point, avoids it having * target page specific code. */ - ret = postcopy_send_discard_bm_ram(ms, block); + postcopy_send_discard_bm_ram(ms, block); postcopy_discard_send_finish(ms); - if (ret) { - return ret; - } } - - return 0; } /** @@ -2441,38 +2874,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) } } -/** - * postcopy_chunk_hostpages: discard any partially sent host page - * - * Utility for the outgoing postcopy code. - * - * Discard any partially sent host-page size chunks, mark any partially - * dirty host-page size chunks as all dirty. In this case the host-page - * is the host-page for the particular RAMBlock, i.e. it might be a huge page - * - * Returns zero on success - * - * @ms: current migration state - * @block: block we want to work with - */ -static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block) -{ - postcopy_discard_send_init(ms, block->idstr); - - /* - * Ensure that all partially dirty host pages are made fully dirty. - */ - postcopy_chunk_hostpages_pass(ms, block); - - postcopy_discard_send_finish(ms); - return 0; -} - /** * ram_postcopy_send_discard_bitmap: transmit the discard bitmap * - * Returns zero on success - * * Transmit the set of pages to be discarded after precopy to the target * these are pages that: * a) Have been previously transmitted but are now dirty again @@ -2483,11 +2887,9 @@ static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block) * * @ms: current migration state */ -int ram_postcopy_send_discard_bitmap(MigrationState *ms) +void ram_postcopy_send_discard_bitmap(MigrationState *ms) { RAMState *rs = ram_state; - RAMBlock *block; - int ret; RCU_READ_LOCK_GUARD(); @@ -2499,21 +2901,9 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) rs->last_sent_block = NULL; rs->last_page = 0; - RAMBLOCK_FOREACH_NOT_IGNORED(block) { - /* Deal with TPS != HPS and huge pages */ - ret = postcopy_chunk_hostpages(ms, block); - if (ret) { - return ret; - } + postcopy_each_ram_send_discard(ms); -#ifdef DEBUG_POSTCOPY - ram_debug_dump_bitmap(block->bmap, true, - block->used_length >> TARGET_PAGE_BITS); -#endif - } trace_ram_postcopy_send_discard_bitmap(); - - return postcopy_each_ram_send_discard(ms); } /** @@ -2670,6 +3060,19 @@ static void ram_list_init_bitmaps(void) } } +static void migration_bitmap_clear_discarded_pages(RAMState *rs) +{ + unsigned long pages; + RAMBlock *rb; + + RCU_READ_LOCK_GUARD(); + + RAMBLOCK_FOREACH_NOT_IGNORED(rb) { + pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); + rs->migration_dirty_pages -= pages; + } +} + static void ram_init_bitmaps(RAMState *rs) { /* For memory_global_dirty_log_start below. */ @@ -2680,12 +3083,18 @@ static void ram_init_bitmaps(RAMState *rs) ram_list_init_bitmaps(); /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); migration_bitmap_sync_precopy(rs); } } qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); + + /* + * After an eventual first bitmap sync, fixup the initial bitmap + * containing all 1s to exclude any discarded pages from migration. + */ + migration_bitmap_clear_discarded_pages(rs); } static int ram_init_all(RAMState **rsp) @@ -2777,8 +3186,7 @@ void qemu_guest_free_page_hint(void *addr, size_t len) * are initially set. Otherwise those skipped pages will be sent in * the next round after syncing from the memory region bitmap. */ - migration_clear_memory_region_dirty_bitmap_range(ram_state, block, - start, npages); + migration_clear_memory_region_dirty_bitmap_range(block, start, npages); ram_state->migration_dirty_pages -= bitmap_count_one_with_offset(block->bmap, start, npages); bitmap_clear(block->bmap, start, npages); @@ -2805,6 +3213,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) { RAMState **rsp = opaque; RAMBlock *block; + int ret; if (compress_threads_save_setup()) { return -1; @@ -2839,7 +3248,11 @@ static int ram_save_setup(QEMUFile *f, void *opaque) ram_control_before_iterate(f, RAM_CONTROL_SETUP); ram_control_after_iterate(f, RAM_CONTROL_SETUP); - multifd_send_sync_main(f); + ret = multifd_send_sync_main(f); + if (ret < 0) { + return ret; + } + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); @@ -2891,14 +3304,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); i = 0; while ((ret = qemu_file_rate_limit(f)) == 0 || - !QSIMPLEQ_EMPTY(&rs->src_page_requests)) { + postcopy_has_request(rs)) { int pages; if (qemu_file_get_error(f)) { break; } - pages = ram_find_and_save_block(rs, false); + pages = ram_find_and_save_block(rs); /* no more pages to sent */ if (pages == 0) { done = 1; @@ -2939,6 +3352,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } qemu_mutex_unlock(&rs->bitmap_mutex); + postcopy_preempt_reset_channel(rs); + /* * Must occur before EOS (or any QEMUFile operation) * because of RDMA protocol. @@ -2948,10 +3363,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) out: if (ret >= 0 && migration_is_setup_or_active(migrate_get_current()->state)) { - multifd_send_sync_main(rs->f); + ret = multifd_send_sync_main(rs->f); + if (ret < 0) { + return ret; + } + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); - ram_counters.transferred += 8; + ram_transferred_add(8); ret = qemu_file_get_error(f); } @@ -2978,6 +3397,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque) RAMState *rs = *temp; int ret = 0; + rs->last_stage = !migration_in_colo_state(); + WITH_RCU_READ_LOCK_GUARD() { if (!migration_in_postcopy()) { migration_bitmap_sync_precopy(rs); @@ -2991,7 +3412,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) while (true) { int pages; - pages = ram_find_and_save_block(rs, !migration_in_colo_state()); + pages = ram_find_and_save_block(rs); /* no more blocks to sent */ if (pages == 0) { break; @@ -3006,13 +3427,21 @@ static int ram_save_complete(QEMUFile *f, void *opaque) ram_control_after_iterate(f, RAM_CONTROL_FINISH); } - if (ret >= 0) { - multifd_send_sync_main(rs->f); - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); + if (ret < 0) { + return ret; } - return ret; + postcopy_preempt_reset_channel(rs); + + ret = multifd_send_sync_main(rs->f); + if (ret < 0) { + return ret; + } + + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + qemu_fflush(f); + + return 0; } static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, @@ -3085,12 +3514,16 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) * * Returns a pointer from within the RCU-protected ram_list. * + * @mis: the migration incoming state pointer * @f: QEMUFile where to read the data from * @flags: Page flags (mostly to see if it's a continuation of previous block) + * @channel: the channel we're using */ -static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags) +static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, + QEMUFile *f, int flags, + int channel) { - static RAMBlock *block; + RAMBlock *block = mis->last_recv_block[channel]; char id[256]; uint8_t len; @@ -3117,6 +3550,8 @@ static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags) return NULL; } + mis->last_recv_block[channel] = block; + return block; } @@ -3180,7 +3615,7 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, */ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) { - if (ch != 0 || !is_zero_range(host, size)) { + if (ch != 0 || !buffer_is_zero(host, size)) { memset(host, ch, size); } } @@ -3401,6 +3836,10 @@ int colo_init_ram_cache(void) } return -errno; } + if (!machine_dump_guest_core(current_machine)) { + qemu_madvise(block->colo_cache, block->used_length, + QEMU_MADV_DONTDUMP); + } } } @@ -3437,7 +3876,7 @@ void colo_incoming_start_dirty_log(void) /* Discard this dirty bitmap record */ bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); } - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); @@ -3449,7 +3888,7 @@ void colo_release_ram_cache(void) { RAMBlock *block; - memory_global_dirty_log_stop(); + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); RAMBLOCK_FOREACH_NOT_IGNORED(block) { g_free(block->bmap); block->bmap = NULL; @@ -3530,18 +3969,15 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis) * rcu_read_lock is taken prior to this being called. * * @f: QEMUFile where to send the data + * @channel: the channel to use for loading */ -static int ram_load_postcopy(QEMUFile *f) +int ram_load_postcopy(QEMUFile *f, int channel) { int flags = 0, ret = 0; bool place_needed = false; bool matches_target_page_size = false; MigrationIncomingState *mis = migration_incoming_get_current(); - /* Temporary page that is later 'placed' */ - void *postcopy_host_page = mis->postcopy_tmp_page; - void *host_page = NULL; - bool all_zero = true; - int target_pages = 0; + PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { ram_addr_t addr; @@ -3565,10 +4001,10 @@ static int ram_load_postcopy(QEMUFile *f) flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; - trace_ram_load_postcopy_loop((uint64_t)addr, flags); + trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE)) { - block = ram_block_from_stream(f, flags); + block = ram_block_from_stream(mis, f, flags, channel); if (!block) { ret = -EINVAL; break; @@ -3585,7 +4021,7 @@ static int ram_load_postcopy(QEMUFile *f) ret = -EINVAL; break; } - target_pages++; + tmp_page->target_pages++; matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; /* * Postcopy requires that we place whole host pages atomically; @@ -3597,16 +4033,21 @@ static int ram_load_postcopy(QEMUFile *f) * however the source ensures it always sends all the components * of a host page in one chunk. */ - page_buffer = postcopy_host_page + + page_buffer = tmp_page->tmp_huge_page + host_page_offset_from_ram_block_offset(block, addr); /* If all TP are zero then we can optimise the place */ - if (target_pages == 1) { - host_page = host_page_from_ram_block_offset(block, addr); - } else if (host_page != host_page_from_ram_block_offset(block, - addr)) { + if (tmp_page->target_pages == 1) { + tmp_page->host_addr = + host_page_from_ram_block_offset(block, addr); + } else if (tmp_page->host_addr != + host_page_from_ram_block_offset(block, addr)) { /* not the 1st TP within the HP */ - error_report("Non-same host page %p/%p", host_page, - host_page_from_ram_block_offset(block, addr)); + error_report("Non-same host page detected on channel %d: " + "Target host page %p, received host page %p " + "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", + channel, tmp_page->host_addr, + host_page_from_ram_block_offset(block, addr), + block->idstr, addr, tmp_page->target_pages); ret = -EINVAL; break; } @@ -3615,10 +4056,11 @@ static int ram_load_postcopy(QEMUFile *f) * If it's the last part of a host page then we place the host * page */ - if (target_pages == (block->page_size / TARGET_PAGE_SIZE)) { + if (tmp_page->target_pages == + (block->page_size / TARGET_PAGE_SIZE)) { place_needed = true; } - place_source = postcopy_host_page; + place_source = tmp_page->tmp_huge_page; } switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { @@ -3632,12 +4074,12 @@ static int ram_load_postcopy(QEMUFile *f) memset(page_buffer, ch, TARGET_PAGE_SIZE); } if (ch) { - all_zero = false; + tmp_page->all_zero = false; } break; case RAM_SAVE_FLAG_PAGE: - all_zero = false; + tmp_page->all_zero = false; if (!matches_target_page_size) { /* For huge pages, we always use temporary buffer */ qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); @@ -3655,7 +4097,7 @@ static int ram_load_postcopy(QEMUFile *f) } break; case RAM_SAVE_FLAG_COMPRESS_PAGE: - all_zero = false; + tmp_page->all_zero = false; len = qemu_get_be32(f); if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { error_report("Invalid compressed data length: %d", len); @@ -3687,16 +4129,14 @@ static int ram_load_postcopy(QEMUFile *f) } if (!ret && place_needed) { - if (all_zero) { - ret = postcopy_place_page_zero(mis, host_page, block); + if (tmp_page->all_zero) { + ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); } else { - ret = postcopy_place_page(mis, host_page, place_source, - block); + ret = postcopy_place_page(mis, tmp_page->host_addr, + place_source, block); } place_needed = false; - target_pages = 0; - /* Assume we have a zero page until we detect something different */ - all_zero = true; + postcopy_temp_page_reset(tmp_page); } } @@ -3727,7 +4167,6 @@ void colo_flush_ram_cache(void) unsigned long offset = 0; memory_global_dirty_log_sync(); - qemu_mutex_lock(&ram_state->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(ram_state, block); @@ -3739,24 +4178,30 @@ void colo_flush_ram_cache(void) block = QLIST_FIRST_RCU(&ram_list.blocks); while (block) { - offset = migration_bitmap_find_dirty(ram_state, block, offset); + unsigned long num = 0; + offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); if (!offset_in_ramblock(block, ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { offset = 0; + num = 0; block = QLIST_NEXT_RCU(block, next); } else { - migration_bitmap_clear_dirty(ram_state, block, offset); + unsigned long i = 0; + + for (i = 0; i < num; i++) { + migration_bitmap_clear_dirty(ram_state, block, offset + i); + } dst_host = block->host + (((ram_addr_t)offset) << TARGET_PAGE_BITS); src_host = block->colo_cache + (((ram_addr_t)offset) << TARGET_PAGE_BITS); - memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); + offset += num; } } } trace_colo_flush_ram_cache_end(); - qemu_mutex_unlock(&ram_state->bitmap_mutex); } /** @@ -3771,6 +4216,7 @@ void colo_flush_ram_cache(void) */ static int ram_load_precopy(QEMUFile *f) { + MigrationIncomingState *mis = migration_incoming_get_current(); int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; /* ADVISE is earlier, it shows the source has the postcopy capability on */ bool postcopy_advised = postcopy_is_advised(); @@ -3809,7 +4255,8 @@ static int ram_load_precopy(QEMUFile *f) if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { - RAMBlock *block = ram_block_from_stream(f, flags); + RAMBlock *block = ram_block_from_stream(mis, f, flags, + RAM_CHANNEL_PRECOPY); host = host_from_ram_block_offset(block, addr); /* @@ -3986,7 +4433,12 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) */ WITH_RCU_READ_LOCK_GUARD() { if (postcopy_running) { - ret = ram_load_postcopy(f); + /* + * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of + * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to + * service fast page faults. + */ + ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); } else { ret = ram_load_precopy(f); } @@ -4115,6 +4567,10 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) */ bitmap_complement(block->bmap, block->bmap, nbits); + /* Clear dirty bits of discarded ranges that we don't want to migrate. */ + ramblock_dirty_bitmap_clear_discarded_pages(block); + + /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ trace_ram_dirty_bitmap_reload_complete(block->idstr); /* @@ -4144,6 +4600,12 @@ static int ram_resume_prepare(MigrationState *s, void *opaque) return 0; } +void postcopy_preempt_shutdown_file(MigrationState *s) +{ + qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); + qemu_fflush(s->postcopy_qemufile_src); +} + static SaveVMHandlers savevm_ram_handlers = { .save_setup = ram_save_setup, .save_live_iterate = ram_save_iterate, @@ -4178,9 +4640,8 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, * Abort and indicate a proper reason. */ error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); - migrate_set_error(migrate_get_current(), err); + migration_cancel(err); error_free(err); - migration_cancel(); } switch (ps) { diff --git a/migration/ram.h b/migration/ram.h index 4833e9fd5b..c7af65ac74 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -50,18 +50,18 @@ bool ramblock_is_ignored(RAMBlock *block); int xbzrle_cache_resize(uint64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); +void mig_throttle_counter_reset(void); uint64_t ram_pagesize_summary(void); int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); void acct_update_position(QEMUFile *f, size_t size, bool zero); -void ram_debug_dump_bitmap(unsigned long *todump, bool expected, - unsigned long pages); void ram_postcopy_migrated_memory_release(MigrationState *ms); /* For outgoing discard bitmap */ -int ram_postcopy_send_discard_bitmap(MigrationState *ms); +void ram_postcopy_send_discard_bitmap(MigrationState *ms); /* For incoming postcopy discard */ int ram_discard_range(const char *block_name, uint64_t start, size_t length); int ram_postcopy_incoming_init(MigrationIncomingState *mis); +int ram_load_postcopy(QEMUFile *f, int channel); void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); @@ -72,6 +72,9 @@ void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr); int64_t ramblock_recv_bitmap_send(QEMUFile *file, const char *block_name); int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); +bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start); +void postcopy_preempt_shutdown_file(MigrationState *s); +void *postcopy_preempt_thread(void *opaque); /* ram cache */ int colo_init_ram_cache(void); @@ -86,4 +89,6 @@ void ram_write_tracking_prepare(void); int ram_write_tracking_start(void); void ram_write_tracking_stop(void); +void dirty_sync_missed_zero_copy(void); + #endif diff --git a/migration/rdma.c b/migration/rdma.c index 5c2d113aa9..94a55dd95b 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -21,7 +21,6 @@ #include "migration.h" #include "qemu-file.h" #include "ram.h" -#include "qemu-file-channel.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -358,9 +357,11 @@ typedef struct RDMAContext { struct ibv_context *verbs; struct rdma_event_channel *channel; struct ibv_qp *qp; /* queue pair */ - struct ibv_comp_channel *comp_channel; /* completion channel */ + struct ibv_comp_channel *recv_comp_channel; /* recv completion channel */ + struct ibv_comp_channel *send_comp_channel; /* send completion channel */ struct ibv_pd *pd; /* protection domain */ - struct ibv_cq *cq; /* completion queue */ + struct ibv_cq *recv_cq; /* recvieve completion queue */ + struct ibv_cq *send_cq; /* send completion queue */ /* * If a previous write failed (perhaps because of a failed @@ -1059,21 +1060,34 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma) return -1; } - /* create completion channel */ - rdma->comp_channel = ibv_create_comp_channel(rdma->verbs); - if (!rdma->comp_channel) { - error_report("failed to allocate completion channel"); + /* create receive completion channel */ + rdma->recv_comp_channel = ibv_create_comp_channel(rdma->verbs); + if (!rdma->recv_comp_channel) { + error_report("failed to allocate receive completion channel"); goto err_alloc_pd_cq; } /* - * Completion queue can be filled by both read and write work requests, - * so must reflect the sum of both possible queue sizes. + * Completion queue can be filled by read work requests. */ - rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), - NULL, rdma->comp_channel, 0); - if (!rdma->cq) { - error_report("failed to allocate completion queue"); + rdma->recv_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), + NULL, rdma->recv_comp_channel, 0); + if (!rdma->recv_cq) { + error_report("failed to allocate receive completion queue"); + goto err_alloc_pd_cq; + } + + /* create send completion channel */ + rdma->send_comp_channel = ibv_create_comp_channel(rdma->verbs); + if (!rdma->send_comp_channel) { + error_report("failed to allocate send completion channel"); + goto err_alloc_pd_cq; + } + + rdma->send_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), + NULL, rdma->send_comp_channel, 0); + if (!rdma->send_cq) { + error_report("failed to allocate send completion queue"); goto err_alloc_pd_cq; } @@ -1083,11 +1097,19 @@ err_alloc_pd_cq: if (rdma->pd) { ibv_dealloc_pd(rdma->pd); } - if (rdma->comp_channel) { - ibv_destroy_comp_channel(rdma->comp_channel); + if (rdma->recv_comp_channel) { + ibv_destroy_comp_channel(rdma->recv_comp_channel); + } + if (rdma->send_comp_channel) { + ibv_destroy_comp_channel(rdma->send_comp_channel); + } + if (rdma->recv_cq) { + ibv_destroy_cq(rdma->recv_cq); + rdma->recv_cq = NULL; } rdma->pd = NULL; - rdma->comp_channel = NULL; + rdma->recv_comp_channel = NULL; + rdma->send_comp_channel = NULL; return -1; } @@ -1104,8 +1126,8 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma) attr.cap.max_recv_wr = 3; attr.cap.max_send_sge = 1; attr.cap.max_recv_sge = 1; - attr.send_cq = rdma->cq; - attr.recv_cq = rdma->cq; + attr.send_cq = rdma->send_cq; + attr.recv_cq = rdma->recv_cq; attr.qp_type = IBV_QPT_RC; ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr); @@ -1117,19 +1139,82 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma) return 0; } +/* Check whether On-Demand Paging is supported by RDAM device */ +static bool rdma_support_odp(struct ibv_context *dev) +{ + struct ibv_device_attr_ex attr = {0}; + int ret = ibv_query_device_ex(dev, NULL, &attr); + if (ret) { + return false; + } + + if (attr.odp_caps.general_caps & IBV_ODP_SUPPORT) { + return true; + } + + return false; +} + +/* + * ibv_advise_mr to avoid RNR NAK error as far as possible. + * The responder mr registering with ODP will sent RNR NAK back to + * the requester in the face of the page fault. + */ +static void qemu_rdma_advise_prefetch_mr(struct ibv_pd *pd, uint64_t addr, + uint32_t len, uint32_t lkey, + const char *name, bool wr) +{ +#ifdef HAVE_IBV_ADVISE_MR + int ret; + int advice = wr ? IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE : + IBV_ADVISE_MR_ADVICE_PREFETCH; + struct ibv_sge sg_list = {.lkey = lkey, .addr = addr, .length = len}; + + ret = ibv_advise_mr(pd, advice, + IBV_ADVISE_MR_FLAG_FLUSH, &sg_list, 1); + /* ignore the error */ + if (ret) { + trace_qemu_rdma_advise_mr(name, len, addr, strerror(errno)); + } else { + trace_qemu_rdma_advise_mr(name, len, addr, "successed"); + } +#endif +} + static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma) { int i; RDMALocalBlocks *local = &rdma->local_ram_blocks; for (i = 0; i < local->nb_blocks; i++) { + int access = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE; + local->block[i].mr = ibv_reg_mr(rdma->pd, local->block[i].local_host_addr, - local->block[i].length, - IBV_ACCESS_LOCAL_WRITE | - IBV_ACCESS_REMOTE_WRITE + local->block[i].length, access ); + + if (!local->block[i].mr && + errno == ENOTSUP && rdma_support_odp(rdma->verbs)) { + access |= IBV_ACCESS_ON_DEMAND; + /* register ODP mr */ + local->block[i].mr = + ibv_reg_mr(rdma->pd, + local->block[i].local_host_addr, + local->block[i].length, access); + trace_qemu_rdma_register_odp_mr(local->block[i].block_name); + + if (local->block[i].mr) { + qemu_rdma_advise_prefetch_mr(rdma->pd, + (uintptr_t)local->block[i].local_host_addr, + local->block[i].length, + local->block[i].mr->lkey, + local->block[i].block_name, + true); + } + } + if (!local->block[i].mr) { perror("Failed to register local dest ram block!"); break; @@ -1215,28 +1300,40 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma, */ if (!block->pmr[chunk]) { uint64_t len = chunk_end - chunk_start; + int access = rkey ? IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE : + 0; trace_qemu_rdma_register_and_get_keys(len, chunk_start); - block->pmr[chunk] = ibv_reg_mr(rdma->pd, - chunk_start, len, - (rkey ? (IBV_ACCESS_LOCAL_WRITE | - IBV_ACCESS_REMOTE_WRITE) : 0)); + block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access); + if (!block->pmr[chunk] && + errno == ENOTSUP && rdma_support_odp(rdma->verbs)) { + access |= IBV_ACCESS_ON_DEMAND; + /* register ODP mr */ + block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access); + trace_qemu_rdma_register_odp_mr(block->block_name); - if (!block->pmr[chunk]) { - perror("Failed to register chunk!"); - fprintf(stderr, "Chunk details: block: %d chunk index %d" - " start %" PRIuPTR " end %" PRIuPTR - " host %" PRIuPTR - " local %" PRIuPTR " registrations: %d\n", - block->index, chunk, (uintptr_t)chunk_start, - (uintptr_t)chunk_end, host_addr, - (uintptr_t)block->local_host_addr, - rdma->total_registrations); - return -1; + if (block->pmr[chunk]) { + qemu_rdma_advise_prefetch_mr(rdma->pd, (uintptr_t)chunk_start, + len, block->pmr[chunk]->lkey, + block->block_name, rkey); + + } } - rdma->total_registrations++; } + if (!block->pmr[chunk]) { + perror("Failed to register chunk!"); + fprintf(stderr, "Chunk details: block: %d chunk index %d" + " start %" PRIuPTR " end %" PRIuPTR + " host %" PRIuPTR + " local %" PRIuPTR " registrations: %d\n", + block->index, chunk, (uintptr_t)chunk_start, + (uintptr_t)chunk_end, host_addr, + (uintptr_t)block->local_host_addr, + rdma->total_registrations); + return -1; + } + rdma->total_registrations++; if (lkey) { *lkey = block->pmr[chunk]->lkey; @@ -1272,30 +1369,6 @@ const char *print_wrid(int wrid) return wrid_desc[wrid]; } -/* - * RDMA requires memory registration (mlock/pinning), but this is not good for - * overcommitment. - * - * In preparation for the future where LRU information or workload-specific - * writable writable working set memory access behavior is available to QEMU - * it would be nice to have in place the ability to UN-register/UN-pin - * particular memory regions from the RDMA hardware when it is determine that - * those regions of memory will likely not be accessed again in the near future. - * - * While we do not yet have such information right now, the following - * compile-time option allows us to perform a non-optimized version of this - * behavior. - * - * By uncommenting this option, you will cause *all* RDMA transfers to be - * unregistered immediately after the transfer completes on both sides of the - * connection. This has no effect in 'rdma-pin-all' mode, only regular mode. - * - * This will have a terrible impact on migration performance, so until future - * workload information or LRU information is available, do not attempt to use - * this feature except for basic testing. - */ -/* #define RDMA_UNREGISTRATION_EXAMPLE */ - /* * Perform a non-optimized memory unregistration after every transfer * for demonstration purposes, only if pin-all is not requested. @@ -1388,47 +1461,19 @@ static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index, return result; } -/* - * Set bit for unregistration in the next iteration. - * We cannot transmit right here, but will unpin later. - */ -static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index, - uint64_t chunk, uint64_t wr_id) -{ - if (rdma->unregistrations[rdma->unregister_next] != 0) { - error_report("rdma migration: queue is full"); - } else { - RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); - - if (!test_and_set_bit(chunk, block->unregister_bitmap)) { - trace_qemu_rdma_signal_unregister_append(chunk, - rdma->unregister_next); - - rdma->unregistrations[rdma->unregister_next++] = - qemu_rdma_make_wrid(wr_id, index, chunk); - - if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) { - rdma->unregister_next = 0; - } - } else { - trace_qemu_rdma_signal_unregister_already(chunk); - } - } -} - /* * Consult the connection manager to see a work request * (of any kind) has completed. * Return the work request ID that completed. */ -static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, - uint32_t *byte_len) +static uint64_t qemu_rdma_poll(RDMAContext *rdma, struct ibv_cq *cq, + uint64_t *wr_id_out, uint32_t *byte_len) { int ret; struct ibv_wc wc; uint64_t wr_id; - ret = ibv_poll_cq(rdma->cq, 1, &wc); + ret = ibv_poll_cq(cq, 1, &wc); if (!ret) { *wr_id_out = RDMA_WRID_NONE; @@ -1473,18 +1518,6 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, if (rdma->nb_sent > 0) { rdma->nb_sent--; } - - if (!rdma->pin_all) { - /* - * FYI: If one wanted to signal a specific chunk to be unregistered - * using LRU or workload-specific information, this is the function - * you would call to do so. That chunk would then get asynchronously - * unregistered later. - */ -#ifdef RDMA_UNREGISTRATION_EXAMPLE - qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id); -#endif - } } else { trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent); } @@ -1500,7 +1533,8 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, /* Wait for activity on the completion channel. * Returns 0 on success, none-0 on error. */ -static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) +static int qemu_rdma_wait_comp_channel(RDMAContext *rdma, + struct ibv_comp_channel *comp_channel) { struct rdma_cm_event *cm_event; int ret = -1; @@ -1511,7 +1545,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) */ if (rdma->migration_started_on_destination && migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) { - yield_until_fd_readable(rdma->comp_channel->fd); + yield_until_fd_readable(comp_channel->fd); } else { /* This is the source side, we're in a separate thread * or destination prior to migration_fd_process_incoming() @@ -1522,7 +1556,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) */ while (!rdma->error_state && !rdma->received_error) { GPollFD pfds[2]; - pfds[0].fd = rdma->comp_channel->fd; + pfds[0].fd = comp_channel->fd; pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; pfds[0].revents = 0; @@ -1580,6 +1614,17 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) return rdma->error_state; } +static struct ibv_comp_channel *to_channel(RDMAContext *rdma, int wrid) +{ + return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_comp_channel : + rdma->recv_comp_channel; +} + +static struct ibv_cq *to_cq(RDMAContext *rdma, int wrid) +{ + return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_cq : rdma->recv_cq; +} + /* * Block until the next work request has completed. * @@ -1600,13 +1645,15 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, struct ibv_cq *cq; void *cq_ctx; uint64_t wr_id = RDMA_WRID_NONE, wr_id_in; + struct ibv_comp_channel *ch = to_channel(rdma, wrid_requested); + struct ibv_cq *poll_cq = to_cq(rdma, wrid_requested); - if (ibv_req_notify_cq(rdma->cq, 0)) { + if (ibv_req_notify_cq(poll_cq, 0)) { return -1; } /* poll cq first */ while (wr_id != wrid_requested) { - ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); + ret = qemu_rdma_poll(rdma, poll_cq, &wr_id_in, byte_len); if (ret < 0) { return ret; } @@ -1627,12 +1674,12 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, } while (1) { - ret = qemu_rdma_wait_comp_channel(rdma); + ret = qemu_rdma_wait_comp_channel(rdma, ch); if (ret) { goto err_block_for_wrid; } - ret = ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx); + ret = ibv_get_cq_event(ch, &cq, &cq_ctx); if (ret) { perror("ibv_get_cq_event"); goto err_block_for_wrid; @@ -1646,7 +1693,7 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, } while (wr_id != wrid_requested) { - ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); + ret = qemu_rdma_poll(rdma, poll_cq, &wr_id_in, byte_len); if (ret < 0) { goto err_block_for_wrid; } @@ -2025,11 +2072,6 @@ retry: chunk_end = ram_chunk_end(block, chunk + chunks); - if (!rdma->pin_all) { -#ifdef RDMA_UNREGISTRATION_EXAMPLE - qemu_rdma_unregister_waiting(rdma); -#endif - } while (test_bit(chunk, block->transit_bitmap)) { (void)count; @@ -2362,13 +2404,21 @@ static void qemu_rdma_cleanup(RDMAContext *rdma) rdma_destroy_qp(rdma->cm_id); rdma->qp = NULL; } - if (rdma->cq) { - ibv_destroy_cq(rdma->cq); - rdma->cq = NULL; + if (rdma->recv_cq) { + ibv_destroy_cq(rdma->recv_cq); + rdma->recv_cq = NULL; } - if (rdma->comp_channel) { - ibv_destroy_comp_channel(rdma->comp_channel); - rdma->comp_channel = NULL; + if (rdma->send_cq) { + ibv_destroy_cq(rdma->send_cq); + rdma->send_cq = NULL; + } + if (rdma->recv_comp_channel) { + ibv_destroy_comp_channel(rdma->recv_comp_channel); + rdma->recv_comp_channel = NULL; + } + if (rdma->send_comp_channel) { + ibv_destroy_comp_channel(rdma->send_comp_channel); + rdma->send_comp_channel = NULL; } if (rdma->pd) { ibv_dealloc_pd(rdma->pd); @@ -2585,6 +2635,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) char ip[40] = "unknown"; struct rdma_addrinfo *res, *e; char port_str[16]; + int reuse = 1; for (idx = 0; idx < RDMA_WRID_MAX; idx++) { rdma->wr_data[idx].control_len = 0; @@ -2620,6 +2671,12 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) goto err_dest_init_bind_addr; } + ret = rdma_set_option(listen_id, RDMA_OPTION_ID, RDMA_OPTION_ID_REUSEADDR, + &reuse, sizeof reuse); + if (ret) { + ERROR(errp, "Error: could not set REUSEADDR option"); + goto err_dest_init_bind_addr; + } for (e = res; e != NULL; e = e->ai_next) { inet_ntop(e->ai_family, &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); @@ -2713,6 +2770,7 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, size_t niov, int *fds, size_t nfds, + int flags, Error **errp) { QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); @@ -3040,11 +3098,15 @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc, { QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); if (io_read) { - aio_set_fd_handler(ctx, rioc->rdmain->comp_channel->fd, - false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, + false, io_read, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, + false, io_read, io_write, NULL, NULL, opaque); } else { - aio_set_fd_handler(ctx, rioc->rdmaout->comp_channel->fd, - false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, + false, io_read, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, + false, io_read, io_write, NULL, NULL, opaque); } } @@ -3146,33 +3208,17 @@ qio_channel_rdma_shutdown(QIOChannel *ioc, * Offset is an offset to be added to block_offset and used * to also lookup the corresponding RAMBlock. * - * @size > 0 : - * Initiate an transfer this size. - * - * @size == 0 : - * A 'hint' or 'advice' that means that we wish to speculatively - * and asynchronously unregister this memory. In this case, there is no - * guarantee that the unregister will actually happen, for example, - * if the memory is being actively transmitted. Additionally, the memory - * may be re-registered at any future time if a write within the same - * chunk was requested again, even if you attempted to unregister it - * here. - * - * @size < 0 : TODO, not yet supported - * Unregister the memory NOW. This means that the caller does not - * expect there to be any future RDMA transfers and we just want to clean - * things up. This is used in case the upper layer owns the memory and - * cannot wait for qemu_fclose() to occur. + * @size : Number of bytes to transfer * * @bytes_sent : User-specificed pointer to indicate how many bytes were * sent. Usually, this will not be more than a few bytes of * the protocol because most transfers are sent asynchronously. */ -static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, +static size_t qemu_rdma_save_page(QEMUFile *f, ram_addr_t block_offset, ram_addr_t offset, size_t size, uint64_t *bytes_sent) { - QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque); + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); RDMAContext *rdma; int ret; @@ -3191,61 +3237,27 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, qemu_fflush(f); - if (size > 0) { - /* - * Add this page to the current 'chunk'. If the chunk - * is full, or the page doesn't belong to the current chunk, - * an actual RDMA write will occur and a new chunk will be formed. - */ - ret = qemu_rdma_write(f, rdma, block_offset, offset, size); - if (ret < 0) { - error_report("rdma migration: write error! %d", ret); - goto err; - } + /* + * Add this page to the current 'chunk'. If the chunk + * is full, or the page doesn't belong to the current chunk, + * an actual RDMA write will occur and a new chunk will be formed. + */ + ret = qemu_rdma_write(f, rdma, block_offset, offset, size); + if (ret < 0) { + error_report("rdma migration: write error! %d", ret); + goto err; + } - /* - * We always return 1 bytes because the RDMA - * protocol is completely asynchronous. We do not yet know - * whether an identified chunk is zero or not because we're - * waiting for other pages to potentially be merged with - * the current chunk. So, we have to call qemu_update_position() - * later on when the actual write occurs. - */ - if (bytes_sent) { - *bytes_sent = 1; - } - } else { - uint64_t index, chunk; - - /* TODO: Change QEMUFileOps prototype to be signed: size_t => long - if (size < 0) { - ret = qemu_rdma_drain_cq(f, rdma); - if (ret < 0) { - fprintf(stderr, "rdma: failed to synchronously drain" - " completion queue before unregistration.\n"); - goto err; - } - } - */ - - ret = qemu_rdma_search_ram_block(rdma, block_offset, - offset, size, &index, &chunk); - - if (ret) { - error_report("ram block search failed"); - goto err; - } - - qemu_rdma_signal_unregister(rdma, index, chunk, 0); - - /* - * TODO: Synchronous, guaranteed unregistration (should not occur during - * fast-path). Otherwise, unregisters will process on the next call to - * qemu_rdma_drain_cq() - if (size < 0) { - qemu_rdma_unregister_waiting(rdma); - } - */ + /* + * We always return 1 bytes because the RDMA + * protocol is completely asynchronous. We do not yet know + * whether an identified chunk is zero or not because we're + * waiting for other pages to potentially be merged with + * the current chunk. So, we have to call qemu_update_position() + * later on when the actual write occurs. + */ + if (bytes_sent) { + *bytes_sent = 1; } /* @@ -3257,7 +3269,22 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, */ while (1) { uint64_t wr_id, wr_id_in; - int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL); + int ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL); + if (ret < 0) { + error_report("rdma migration: polling error! %d", ret); + goto err; + } + + wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; + + if (wr_id == RDMA_WRID_NONE) { + break; + } + } + + while (1) { + uint64_t wr_id, wr_id_in; + int ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL); if (ret < 0) { error_report("rdma migration: polling error! %d", ret); goto err; @@ -3803,14 +3830,15 @@ rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name) return 0; } -static int rdma_load_hook(QEMUFile *f, void *opaque, uint64_t flags, void *data) +static int rdma_load_hook(QEMUFile *f, uint64_t flags, void *data) { + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); switch (flags) { case RAM_CONTROL_BLOCK_REG: - return rdma_block_notification_handle(opaque, data); + return rdma_block_notification_handle(rioc, data); case RAM_CONTROL_HOOK: - return qemu_rdma_registration_handle(f, opaque); + return qemu_rdma_registration_handle(f, rioc); default: /* Shouldn't be called with any other values */ @@ -3818,10 +3846,10 @@ static int rdma_load_hook(QEMUFile *f, void *opaque, uint64_t flags, void *data) } } -static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, +static int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags, void *data) { - QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque); + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); RDMAContext *rdma; RCU_READ_LOCK_GUARD(); @@ -3847,10 +3875,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, * Inform dest that dynamic registrations are done for now. * First, flush writes, if any. */ -static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, +static int qemu_rdma_registration_stop(QEMUFile *f, uint64_t flags, void *data) { - QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque); + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); RDMAContext *rdma; RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret = 0; @@ -4023,12 +4051,12 @@ static QEMUFile *qemu_fopen_rdma(RDMAContext *rdma, const char *mode) rioc = QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA)); if (mode[0] == 'w') { - rioc->file = qemu_fopen_channel_output(QIO_CHANNEL(rioc)); + rioc->file = qemu_file_new_output(QIO_CHANNEL(rioc)); rioc->rdmaout = rdma; rioc->rdmain = rdma->return_path; qemu_file_set_hooks(rioc->file, &rdma_write_hooks); } else { - rioc->file = qemu_fopen_channel_input(QIO_CHANNEL(rioc)); + rioc->file = qemu_file_new_input(QIO_CHANNEL(rioc)); rioc->rdmain = rdma; rioc->rdmaout = rdma->return_path; qemu_file_set_hooks(rioc->file, &rdma_read_hooks); diff --git a/migration/savevm.c b/migration/savevm.c index a2d1350239..e3a7fab2f4 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -35,8 +35,8 @@ #include "migration/misc.h" #include "migration/register.h" #include "migration/global_state.h" +#include "migration/channel-block.h" #include "ram.h" -#include "qemu-file-channel.h" #include "qemu-file.h" #include "savevm.h" #include "postcopy-ram.h" @@ -132,48 +132,13 @@ static struct mig_cmd_args { /***********************************************************/ /* savevm/loadvm support */ -static ssize_t block_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, - int64_t pos, Error **errp) -{ - int ret; - QEMUIOVector qiov; - - qemu_iovec_init_external(&qiov, iov, iovcnt); - ret = bdrv_writev_vmstate(opaque, &qiov, pos); - if (ret < 0) { - return ret; - } - - return qiov.size; -} - -static ssize_t block_get_buffer(void *opaque, uint8_t *buf, int64_t pos, - size_t size, Error **errp) -{ - return bdrv_load_vmstate(opaque, buf, pos, size); -} - -static int bdrv_fclose(void *opaque, Error **errp) -{ - return bdrv_flush(opaque); -} - -static const QEMUFileOps bdrv_read_ops = { - .get_buffer = block_get_buffer, - .close = bdrv_fclose -}; - -static const QEMUFileOps bdrv_write_ops = { - .writev_buffer = block_writev_buffer, - .close = bdrv_fclose -}; - static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) { if (is_writable) { - return qemu_fopen_ops(bs, &bdrv_write_ops, false); + return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); + } else { + return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); } - return qemu_fopen_ops(bs, &bdrv_read_ops, false); } @@ -918,9 +883,9 @@ static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, { int64_t old_offset, size; - old_offset = qemu_ftell_fast(f); + old_offset = qemu_file_total_transferred_fast(f); se->ops->save_state(f, se->opaque); - size = qemu_ftell_fast(f) - old_offset; + size = qemu_file_total_transferred_fast(f) - old_offset; if (vmdesc) { json_writer_int64(vmdesc, "size", size); @@ -1300,8 +1265,9 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) save_section_footer(f, se); if (ret < 0) { - error_report("failed to save SaveStateEntry with id(name): %d(%s)", - se->section_id, se->idstr); + error_report("failed to save SaveStateEntry with id(name): " + "%d(%s): %d", + se->section_id, se->idstr, ret); qemu_file_set_error(f, ret); } if (ret <= 0) { @@ -1439,7 +1405,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, if (inactivate_disks) { /* Inactivate before sending QEMU_VM_EOF so that the - * bdrv_invalidate_cache_all() on the other end won't fail. */ + * bdrv_activate_all() on the other end won't fail. */ ret = bdrv_inactivate_all(); if (ret) { error_report("%s: bdrv_inactivate_all() failed (%d)", @@ -1569,12 +1535,13 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) migrate_init(ms); memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&compression_counters, 0, sizeof(compression_counters)); ms->to_dst_file = f; - qemu_mutex_unlock_iothread(); #ifdef XBOX xemu_snapshots_save_extra_data(f); #endif + qemu_mutex_unlock_iothread(); qemu_savevm_state_header(f); qemu_savevm_state_setup(f); qemu_mutex_lock_iothread(); @@ -1689,6 +1656,7 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, { PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; + size_t page_size = qemu_target_page_size(); Error *local_err = NULL; trace_loadvm_postcopy_handle_advise(); @@ -1745,13 +1713,13 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, } remote_tps = qemu_get_be64(mis->from_src_file); - if (remote_tps != qemu_target_page_size()) { + if (remote_tps != page_size) { /* * Again, some differences could be dealt with, but for now keep it * simple. */ error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", - (int)remote_tps, qemu_target_page_size()); + (int)remote_tps, page_size); return -1; } @@ -1865,7 +1833,7 @@ static void *postcopy_ram_listen_thread(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_POSTCOPY_ACTIVE); - qemu_sem_post(&mis->listen_thread_sem); + qemu_sem_post(&mis->thread_sync_sem); trace_postcopy_ram_listen_thread_start(); rcu_register_thread(); @@ -1950,9 +1918,10 @@ static void *postcopy_ram_listen_thread(void *opaque) static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) { PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); - trace_loadvm_postcopy_handle_listen(); Error *local_err = NULL; + trace_loadvm_postcopy_handle_listen("enter"); + if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); return -1; @@ -1967,6 +1936,8 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) } } + trace_loadvm_postcopy_handle_listen("after discard"); + /* * Sensitise RAM - can now generate requests for blocks that don't exist * However, at this point the CPU shouldn't be running, and the IO @@ -1979,19 +1950,17 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) } } + trace_loadvm_postcopy_handle_listen("after uffd"); + if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { error_report_err(local_err); return -1; } mis->have_listen_thread = true; - /* Start up the listening thread and wait for it to signal ready */ - qemu_sem_init(&mis->listen_thread_sem, 0); - qemu_thread_create(&mis->listen_thread, "postcopy/listen", - postcopy_ram_listen_thread, NULL, - QEMU_THREAD_DETACHED); - qemu_sem_wait(&mis->listen_thread_sem); - qemu_sem_destroy(&mis->listen_thread_sem); + postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen", + postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); + trace_loadvm_postcopy_handle_listen("return"); return 0; } @@ -2001,25 +1970,29 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) Error *local_err = NULL; MigrationIncomingState *mis = opaque; + trace_loadvm_postcopy_handle_run_bh("enter"); + /* TODO we should move all of this lot into postcopy_ram.c or a shared code * in migration.c */ cpu_synchronize_all_post_init(); + trace_loadvm_postcopy_handle_run_bh("after cpu sync"); + qemu_announce_self(&mis->announce_timer, migrate_announce_params()); - /* Make sure all file formats flush their mutable metadata. + trace_loadvm_postcopy_handle_run_bh("after announce"); + + /* Make sure all file formats throw away their mutable metadata. * If we get an error here, just don't restart the VM yet. */ - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); local_err = NULL; autostart = false; } - trace_loadvm_postcopy_handle_run_cpu_sync(); - - trace_loadvm_postcopy_handle_run_vmstart(); + trace_loadvm_postcopy_handle_run_bh("after invalidate cache"); dirty_bitmap_mig_before_vm_start(); @@ -2032,6 +2005,8 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) } qemu_bh_delete(mis->bh); + + trace_loadvm_postcopy_handle_run_bh("return"); } /* After all discards we can start running and asking for pages */ @@ -2147,6 +2122,13 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) */ qemu_sem_post(&mis->postcopy_pause_sem_fault); + if (migrate_postcopy_preempt()) { + /* The channel should already be setup again; make sure of it */ + assert(mis->postcopy_qemufile_dst); + /* Kick the fast ram load thread too */ + qemu_sem_post(&mis->postcopy_pause_sem_fast_load); + } + return 0; } @@ -2188,7 +2170,7 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) bioc->usage += length; trace_loadvm_handle_cmd_packaged_received(ret); - QEMUFile *packf = qemu_fopen_channel_input(QIO_CHANNEL(bioc)); + QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); ret = qemu_loadvm_state_main(packf, mis); trace_loadvm_handle_cmd_packaged_main(ret); @@ -2275,12 +2257,13 @@ static int loadvm_process_command(QEMUFile *f) return qemu_file_get_error(f); } - trace_loadvm_process_command(cmd, len); if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); return -EINVAL; } + trace_loadvm_process_command(mig_cmd_args[cmd].name, len); + if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { error_report("%s received with bad length - expecting %zu, got %d", mig_cmd_args[cmd].name, @@ -2567,13 +2550,12 @@ void qemu_loadvm_state_cleanup(void) /* Return true if we should continue the migration, or false. */ static bool postcopy_pause_incoming(MigrationIncomingState *mis) { + int i; + trace_postcopy_pause_incoming(); assert(migrate_postcopy_ram()); - /* Clear the triggered bit to allow one recovery */ - mis->postcopy_recover_triggered = false; - /* * Unregister yank with either from/to src would work, since ioc behind it * is the same @@ -2592,12 +2574,37 @@ static bool postcopy_pause_incoming(MigrationIncomingState *mis) mis->to_src_file = NULL; qemu_mutex_unlock(&mis->rp_mutex); + /* + * NOTE: this must happen before reset the PostcopyTmpPages below, + * otherwise it's racy to reset those fields when the fast load thread + * can be accessing it in parallel. + */ + if (mis->postcopy_qemufile_dst) { + qemu_file_shutdown(mis->postcopy_qemufile_dst); + /* Take the mutex to make sure the fast ram load thread halted */ + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); + qemu_fclose(mis->postcopy_qemufile_dst); + mis->postcopy_qemufile_dst = NULL; + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + } + migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, MIGRATION_STATUS_POSTCOPY_PAUSED); /* Notify the fault thread for the invalidated file handle */ postcopy_fault_thread_notify(mis); + /* + * If network is interrupted, any temp page we received will be useless + * because we didn't mark them as "received" in receivedmap. After a + * proper recovery later (which will sync src dirty bitmap with receivedmap + * on dest) these cached small pages will be resent again. + */ + for (i = 0; i < mis->postcopy_channels; i++) { + postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); + } + error_report("Detected IO failure for postcopy. " "Migration paused."); @@ -2619,8 +2626,8 @@ retry: while (true) { section_type = qemu_get_byte(f); - if (qemu_file_get_error(f)) { - ret = qemu_file_get_error(f); + ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); + if (ret) { break; } @@ -2796,6 +2803,8 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, g_autoptr(GDateTime) now = g_date_time_new_now_local(); AioContext *aio_context; + GLOBAL_STATE_CODE(); + if (migration_is_blocked(errp)) { return false; } @@ -2876,7 +2885,7 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, goto the_end; } ret = qemu_savevm_state(f, errp); - vm_state_size = qemu_ftell(f); + vm_state_size = qemu_file_total_transferred(f); ret2 = qemu_fclose(f); if (ret < 0) { goto the_end; @@ -2940,7 +2949,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, goto the_end; } qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); - f = qemu_fopen_channel_output(QIO_CHANNEL(ioc)); + f = qemu_file_new_output(QIO_CHANNEL(ioc)); object_unref(OBJECT(ioc)); ret = qemu_save_device_state(f); if (ret < 0 || qemu_fclose(f) < 0) { @@ -2987,7 +2996,7 @@ void qmp_xen_load_devices_state(const char *filename, Error **errp) return; } qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); - f = qemu_fopen_channel_input(QIO_CHANNEL(ioc)); + f = qemu_file_new_input(QIO_CHANNEL(ioc)); object_unref(OBJECT(ioc)); ret = qemu_loadvm_state(f); @@ -3060,7 +3069,7 @@ bool load_snapshot(const char *name, const char *vmstate, goto err_drain; } - qemu_system_reset(SHUTDOWN_CAUSE_NONE); + qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); mis->from_src_file = f; if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { diff --git a/migration/socket.c b/migration/socket.c index 05705a32d8..e6fdf3c5e1 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -26,7 +26,7 @@ #include "io/channel-socket.h" #include "io/net-listener.h" #include "trace.h" - +#include "postcopy-ram.h" struct SocketOutgoingArgs { SocketAddress *saddr; @@ -39,6 +39,24 @@ void socket_send_channel_create(QIOTaskFunc f, void *data) f, data, NULL, NULL); } +QIOChannel *socket_send_channel_create_sync(Error **errp) +{ + QIOChannelSocket *sioc = qio_channel_socket_new(); + + if (!outgoing_args.saddr) { + object_unref(OBJECT(sioc)); + error_setg(errp, "Initial sock address not set!"); + return NULL; + } + + if (qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp) < 0) { + object_unref(OBJECT(sioc)); + return NULL; + } + + return QIO_CHANNEL(sioc); +} + int socket_send_channel_destroy(QIOChannel *send) { /* Remove channel */ @@ -74,9 +92,17 @@ static void socket_outgoing_migration(QIOTask *task, if (qio_task_propagate_error(task, &err)) { trace_migration_socket_outgoing_error(error_get_pretty(err)); - } else { - trace_migration_socket_outgoing_connected(data->hostname); + goto out; } + + trace_migration_socket_outgoing_connected(data->hostname); + + if (migrate_use_zero_copy_send() && + !qio_channel_has_feature(sioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) { + error_setg(&err, "Zero copy send feature not detected in host kernel"); + } + +out: migration_channel_connect(data->s, sioc, data->hostname, err); object_unref(OBJECT(sioc)); } @@ -158,6 +184,8 @@ socket_start_incoming_migration_internal(SocketAddress *saddr, if (migrate_use_multifd()) { num = migrate_multifd_channels(); + } else if (migrate_postcopy_preempt()) { + num = RAM_CHANNEL_MAX; } if (qio_net_listener_open_sync(listener, saddr, num, errp) < 0) { diff --git a/migration/socket.h b/migration/socket.h index 891dbccceb..dc54df4e6c 100644 --- a/migration/socket.h +++ b/migration/socket.h @@ -21,6 +21,7 @@ #include "io/task.h" void socket_send_channel_create(QIOTaskFunc f, void *data); +QIOChannel *socket_send_channel_create_sync(Error **errp); int socket_send_channel_destroy(QIOChannel *send); void socket_start_incoming_migration(const char *str, Error **errp); diff --git a/migration/tls.c b/migration/tls.c index ca1ea3bbdd..73e8c9d3c2 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -137,10 +137,6 @@ QIOChannelTLS *migration_tls_client_create(MigrationState *s, if (s->parameters.tls_hostname && *s->parameters.tls_hostname) { hostname = s->parameters.tls_hostname; } - if (!hostname) { - error_setg(errp, "No hostname available for TLS"); - return NULL; - } tioc = qio_channel_tls_new_client( ioc, creds, hostname, errp); @@ -170,3 +166,12 @@ void migration_tls_channel_connect(MigrationState *s, NULL, NULL); } + +bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc) +{ + if (!migrate_use_tls()) { + return false; + } + + return !object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS); +} diff --git a/migration/tls.h b/migration/tls.h index de4fe2cafd..98e23c9b0e 100644 --- a/migration/tls.h +++ b/migration/tls.h @@ -37,4 +37,8 @@ void migration_tls_channel_connect(MigrationState *s, QIOChannel *ioc, const char *hostname, Error **errp); + +/* Whether the QIO channel requires further TLS handshake? */ +bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc); + #endif diff --git a/migration/trace-events b/migration/trace-events index a1c0f034ab..57003edcbd 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -14,15 +14,14 @@ loadvm_handle_cmd_packaged_main(int ret) "%d" loadvm_handle_cmd_packaged_received(int ret) "%d" loadvm_handle_recv_bitmap(char *s) "%s" loadvm_postcopy_handle_advise(void) "" -loadvm_postcopy_handle_listen(void) "" +loadvm_postcopy_handle_listen(const char *str) "%s" loadvm_postcopy_handle_run(void) "" -loadvm_postcopy_handle_run_cpu_sync(void) "" -loadvm_postcopy_handle_run_vmstart(void) "" +loadvm_postcopy_handle_run_bh(const char *str) "%s" loadvm_postcopy_handle_resume(void) "" loadvm_postcopy_ram_handle_discard(void) "" loadvm_postcopy_ram_handle_discard_end(void) "" loadvm_postcopy_ram_handle_discard_header(const char *ramid, uint16_t len) "%s: %ud" -loadvm_process_command(uint16_t com, uint16_t len) "com=0x%x len=%d" +loadvm_process_command(const char *s, uint16_t len) "com=%s len=%d" loadvm_process_command_ping(uint32_t val) "0x%x" postcopy_ram_listen_thread_exit(void) "" postcopy_ram_listen_thread_start(void) "" @@ -94,7 +93,7 @@ migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned migration_throttle(void) "" ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx" ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s: addr: 0x%" PRIx64 " flags: 0x%x host: %p" -ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x" +ram_load_postcopy_loop(int channel, uint64_t addr, int flags) "chan=%d addr=0x%" PRIx64 " flags=0x%x" ram_postcopy_send_discard_bitmap(void) "" ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p" ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx" @@ -113,25 +112,31 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" +postcopy_preempt_triggered(char *str, unsigned long page) "during sending ramblock %s offset 0x%lx" +postcopy_preempt_restored(char *str, unsigned long page) "ramblock %s offset 0x%lx" +postcopy_preempt_hit(char *str, uint64_t offset) "ramblock %s offset 0x%"PRIx64 +postcopy_preempt_send_host_page(char *str, uint64_t offset) "ramblock %s offset 0x%"PRIx64 +postcopy_preempt_switch_channel(int channel) "%d" +postcopy_preempt_reset_channel(void) "" # multifd.c -multifd_new_send_channel_async(uint8_t id) "channel %d" -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d" -multifd_recv_new_channel(uint8_t id) "channel %d" +multifd_new_send_channel_async(uint8_t id) "channel %u" +multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u" +multifd_recv_new_channel(uint8_t id) "channel %u" multifd_recv_sync_main(long packet_num) "packet num %ld" -multifd_recv_sync_main_signal(uint8_t id) "channel %d" -multifd_recv_sync_main_wait(uint8_t id) "channel %d" +multifd_recv_sync_main_signal(uint8_t id) "channel %u" +multifd_recv_sync_main_wait(uint8_t id) "channel %u" multifd_recv_terminate_threads(bool error) "error %d" -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64 -multifd_recv_thread_start(uint8_t id) "%d" -multifd_send(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d" -multifd_send_error(uint8_t id) "channel %d" +multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64 +multifd_recv_thread_start(uint8_t id) "%u" +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u" +multifd_send_error(uint8_t id) "channel %u" multifd_send_sync_main(long packet_num) "packet num %ld" -multifd_send_sync_main_signal(uint8_t id) "channel %d" -multifd_send_sync_main_wait(uint8_t id) "channel %d" +multifd_send_sync_main_signal(uint8_t id) "channel %u" +multifd_send_sync_main_wait(uint8_t id) "channel %u" multifd_send_terminate_threads(bool error) "error %d" -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64 -multifd_send_thread_start(uint8_t id) "%d" +multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 +multifd_send_thread_start(uint8_t id) "%u" multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s" multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s" multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p" @@ -178,6 +183,7 @@ migration_thread_low_pending(uint64_t pending) "%" PRIu64 migrate_transferred(uint64_t tranferred, uint64_t time_spent, uint64_t bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %" PRIu64 " max_size %" PRId64 process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d" process_incoming_migration_co_postcopy_end_main(void) "" +postcopy_preempt_enabled(bool value) "%d" # channel.c migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s" @@ -212,6 +218,8 @@ qemu_rdma_poll_write(const char *compstr, int64_t comp, int left, uint64_t block qemu_rdma_poll_other(const char *compstr, int64_t comp, int left) "other completion %s (%" PRId64 ") received left %d" qemu_rdma_post_send_control(const char *desc) "CONTROL: sending %s.." qemu_rdma_register_and_get_keys(uint64_t len, void *start) "Registering %" PRIu64 " bytes @ %p" +qemu_rdma_register_odp_mr(const char *name) "Try to register On-Demand Paging memory region: %s" +qemu_rdma_advise_mr(const char *name, uint32_t len, uint64_t addr, const char *res) "Try to advise block %s prefetch at %" PRIu32 "@0x%" PRIx64 ": %s" qemu_rdma_registration_handle_compress(int64_t length, int index, int64_t offset) "Zapping zero chunk: %" PRId64 " bytes, index %d, offset %" PRId64 qemu_rdma_registration_handle_finished(void) "" qemu_rdma_registration_handle_ram_blocks(void) "" @@ -263,6 +271,8 @@ mark_postcopy_blocktime_begin(uint64_t addr, void *dd, uint32_t time, int cpu, i mark_postcopy_blocktime_end(uint64_t addr, void *dd, uint32_t time, int affected_cpu) "addr: 0x%" PRIx64 ", dd: %p, time: %u, affected_cpu: %d" postcopy_pause_fault_thread(void) "" postcopy_pause_fault_thread_continued(void) "" +postcopy_pause_fast_load(void) "" +postcopy_pause_fast_load_continued(void) "" postcopy_ram_fault_thread_entry(void) "" postcopy_ram_fault_thread_exit(void) "" postcopy_ram_fault_thread_fds_core(int baseufd, int quitfd) "ufd: %d quitfd: %d" @@ -278,6 +288,10 @@ postcopy_request_shared_page(const char *sharer, const char *rb, uint64_t rb_off postcopy_request_shared_page_present(const char *sharer, const char *rb, uint64_t rb_offset) "%s already %s offset 0x%"PRIx64 postcopy_wake_shared(uint64_t client_addr, const char *rb) "at 0x%"PRIx64" in %s" postcopy_page_req_del(void *addr, int count) "resolved page req %p total %d" +postcopy_preempt_tls_handshake(void) "" +postcopy_preempt_new_channel(void) "" +postcopy_preempt_thread_entry(void) "" +postcopy_preempt_thread_exit(void) "" get_mem_fault_cpu_index(int cpu, uint32_t pid) "cpu: %d, pid: %u" @@ -331,6 +345,8 @@ get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t crc) "ramblock n calc_page_dirty_rate(const char *idstr, uint32_t new_crc, uint32_t old_crc) "ramblock name: %s, new crc: %" PRIu32 ", old crc: %" PRIu32 skip_sample_ramblock(const char *idstr, uint64_t ramblock_size) "ramblock name: %s, ramblock size: %" PRIu64 find_page_matched(const char *idstr) "ramblock %s addr or size changed" +dirtyrate_calculate(int64_t dirtyrate) "dirty rate: %" PRIi64 " MB/s" +dirtyrate_do_calculate_vcpu(int idx, uint64_t rate) "vcpu[%d]: %"PRIu64 " MB/s" # block.c migration_block_init_shared(const char *blk_device_name) "Start migration for %s with shared base image" diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c index bf4d440308..e83bfccb9e 100644 --- a/migration/vmstate-types.c +++ b/migration/vmstate-types.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "qemu/cpu-float.h" #include "qemu-file.h" #include "migration.h" #include "migration/vmstate.h" diff --git a/migration/vmstate.c b/migration/vmstate.c index 05f87cdddc..924494bda3 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -90,12 +90,6 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, return -EINVAL; } if (version_id < vmsd->minimum_version_id) { - if (vmsd->load_state_old && - version_id >= vmsd->minimum_version_id_old) { - ret = vmsd->load_state_old(f, opaque, version_id); - trace_vmstate_load_state_end(vmsd->name, "old path", ret); - return ret; - } error_report("%s: incoming version_id %d is too old " "for local minimum version_id %d", vmsd->name, version_id, vmsd->minimum_version_id); @@ -366,7 +360,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *curr_elem = first_elem + size * i; vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems); - old_offset = qemu_ftell_fast(f); + old_offset = qemu_file_total_transferred_fast(f); if (field->flags & VMS_ARRAY_OF_POINTER) { assert(curr_elem); curr_elem = *(void **)curr_elem; @@ -396,7 +390,8 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, return ret; } - written_bytes = qemu_ftell_fast(f) - old_offset; + written_bytes = qemu_file_total_transferred_fast(f) - + old_offset; vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i); /* Compressed arrays only care about the first element */ diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index e00255f7ee..01b789a79e 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -40,8 +40,11 @@ #include "qapi/qapi-commands-pci.h" #include "qapi/qapi-commands-rocker.h" #include "qapi/qapi-commands-run-state.h" +#include "qapi/qapi-commands-stats.h" #include "qapi/qapi-commands-tpm.h" #include "qapi/qapi-commands-ui.h" +#include "qapi/qapi-commands-virtio.h" +#include "qapi/qapi-visit-virtio.h" #include "qapi/qapi-visit-net.h" #include "qapi/qapi-visit-migration.h" #include "qapi/qmp/qdict.h" @@ -52,9 +55,8 @@ #include "ui/console.h" #include "qemu/cutils.h" #include "qemu/error-report.h" -#include "exec/ramlist.h" +#include "hw/core/cpu.h" #include "hw/intc/intc.h" -#include "hw/rdma/rdma.h" #include "migration/snapshot.h" #include "migration/misc.h" @@ -62,11 +64,13 @@ #include #endif -void hmp_handle_error(Monitor *mon, Error *err) +bool hmp_handle_error(Monitor *mon, Error *err) { if (err) { error_reportf_err(err, "Error: "); + return true; } + return false; } /* @@ -195,27 +199,6 @@ void hmp_info_mice(Monitor *mon, const QDict *qdict) qapi_free_MouseInfoList(mice_list); } -static char *SocketAddress_to_str(SocketAddress *addr) -{ - switch (addr->type) { - case SOCKET_ADDRESS_TYPE_INET: - return g_strdup_printf("tcp:%s:%s", - addr->u.inet.host, - addr->u.inet.port); - case SOCKET_ADDRESS_TYPE_UNIX: - return g_strdup_printf("unix:%s", - addr->u.q_unix.path); - case SOCKET_ADDRESS_TYPE_FD: - return g_strdup_printf("fd:%s", addr->u.fd.str); - case SOCKET_ADDRESS_TYPE_VSOCK: - return g_strdup_printf("tcp:%s:%s", - addr->u.vsock.cid, - addr->u.vsock.port); - default: - return g_strdup("unknown address type"); - } -} - void hmp_info_migrate(Monitor *mon, const QDict *qdict) { MigrationInfo *info; @@ -293,6 +276,23 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) monitor_printf(mon, "postcopy request count: %" PRIu64 "\n", info->ram->postcopy_requests); } + if (info->ram->precopy_bytes) { + monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n", + info->ram->precopy_bytes >> 10); + } + if (info->ram->downtime_bytes) { + monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n", + info->ram->downtime_bytes >> 10); + } + if (info->ram->postcopy_bytes) { + monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n", + info->ram->postcopy_bytes >> 10); + } + if (info->ram->dirty_sync_missed_zero_copy) { + monitor_printf(mon, + "Zero-copy-send fallbacks happened: %" PRIu64 " times\n", + info->ram->dirty_sync_missed_zero_copy); + } } if (info->has_disk) { @@ -361,7 +361,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) monitor_printf(mon, "socket address: [\n"); for (addr = info->socket_address; addr; addr = addr->next) { - char *s = SocketAddress_to_str(addr->value); + char *s = socket_uri(addr->value); monitor_printf(mon, "\t%s\n", s); g_free(s); } @@ -577,8 +577,7 @@ void hmp_info_vnc(Monitor *mon, const QDict *qdict) info2l = qmp_query_vnc_servers(&err); info2l_head = info2l; - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } if (!info2l) { @@ -693,8 +692,7 @@ void hmp_info_balloon(Monitor *mon, const QDict *qdict) Error *err = NULL; info = qmp_query_balloon(&err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -713,7 +711,7 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) monitor_printf(mon, " "); if (dev->class_info->has_desc) { - monitor_printf(mon, "%s", dev->class_info->desc); + monitor_puts(mon, dev->class_info->desc); } else { monitor_printf(mon, "Class %04" PRId64, dev->class_info->q_class); } @@ -786,44 +784,6 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) } } -static int hmp_info_irq_foreach(Object *obj, void *opaque) -{ - InterruptStatsProvider *intc; - InterruptStatsProviderClass *k; - Monitor *mon = opaque; - - if (object_dynamic_cast(obj, TYPE_INTERRUPT_STATS_PROVIDER)) { - intc = INTERRUPT_STATS_PROVIDER(obj); - k = INTERRUPT_STATS_PROVIDER_GET_CLASS(obj); - uint64_t *irq_counts; - unsigned int nb_irqs, i; - if (k->get_statistics && - k->get_statistics(intc, &irq_counts, &nb_irqs)) { - if (nb_irqs > 0) { - monitor_printf(mon, "IRQ statistics for %s:\n", - object_get_typename(obj)); - for (i = 0; i < nb_irqs; i++) { - if (irq_counts[i] > 0) { - monitor_printf(mon, "%2d: %" PRId64 "\n", i, - irq_counts[i]); - } - } - } - } else { - monitor_printf(mon, "IRQ statistics not available for %s.\n", - object_get_typename(obj)); - } - } - - return 0; -} - -void hmp_info_irq(Monitor *mon, const QDict *qdict) -{ - object_child_foreach_recursive(object_get_root(), - hmp_info_irq_foreach, mon); -} - static int hmp_info_pic_foreach(Object *obj, void *opaque) { InterruptStatsProvider *intc; @@ -850,32 +810,6 @@ void hmp_info_pic(Monitor *mon, const QDict *qdict) hmp_info_pic_foreach, mon); } -static int hmp_info_rdma_foreach(Object *obj, void *opaque) -{ - RdmaProvider *rdma; - RdmaProviderClass *k; - Monitor *mon = opaque; - - if (object_dynamic_cast(obj, INTERFACE_RDMA_PROVIDER)) { - rdma = RDMA_PROVIDER(obj); - k = RDMA_PROVIDER_GET_CLASS(obj); - if (k->print_statistics) { - k->print_statistics(mon, rdma); - } else { - monitor_printf(mon, "RDMA statistics not available for %s.\n", - object_get_typename(obj)); - } - } - - return 0; -} - -void hmp_info_rdma(Monitor *mon, const QDict *qdict) -{ - object_child_foreach_recursive(object_get_root(), - hmp_info_rdma_foreach, mon); -} - void hmp_info_pci(Monitor *mon, const QDict *qdict) { PciInfoList *info_list, *info; @@ -925,10 +859,10 @@ void hmp_info_tpm(Monitor *mon, const QDict *qdict) c, TpmModel_str(ti->model)); monitor_printf(mon, " \\ %s: type=%s", - ti->id, TpmTypeOptionsKind_str(ti->options->type)); + ti->id, TpmType_str(ti->options->type)); switch (ti->options->type) { - case TPM_TYPE_OPTIONS_KIND_PASSTHROUGH: + case TPM_TYPE_PASSTHROUGH: tpo = ti->options->u.passthrough.data; monitor_printf(mon, "%s%s%s%s", tpo->has_path ? ",path=" : "", @@ -936,11 +870,11 @@ void hmp_info_tpm(Monitor *mon, const QDict *qdict) tpo->has_cancel_path ? ",cancel-path=" : "", tpo->has_cancel_path ? tpo->cancel_path : ""); break; - case TPM_TYPE_OPTIONS_KIND_EMULATOR: + case TPM_TYPE_EMULATOR: teo = ti->options->u.emulator.data; monitor_printf(mon, ",chardev=%s", teo->chardev); break; - case TPM_TYPE_OPTIONS_KIND__MAX: + case TPM_TYPE__MAX: break; } monitor_printf(mon, "\n"); @@ -1065,8 +999,7 @@ void hmp_ringbuf_read(Monitor *mon, const QDict *qdict) int i; data = qmp_ringbuf_read(chardev, size, false, 0, &err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1451,10 +1384,35 @@ void hmp_set_password(Monitor *mon, const QDict *qdict) { const char *protocol = qdict_get_str(qdict, "protocol"); const char *password = qdict_get_str(qdict, "password"); + const char *display = qdict_get_try_str(qdict, "display"); const char *connected = qdict_get_try_str(qdict, "connected"); Error *err = NULL; - qmp_set_password(protocol, password, !!connected, connected, &err); + SetPasswordOptions opts = { + .password = (char *)password, + .has_connected = !!connected, + }; + + opts.connected = qapi_enum_parse(&SetPasswordAction_lookup, connected, + SET_PASSWORD_ACTION_KEEP, &err); + if (err) { + goto out; + } + + opts.protocol = qapi_enum_parse(&DisplayProtocol_lookup, protocol, + DISPLAY_PROTOCOL_VNC, &err); + if (err) { + goto out; + } + + if (opts.protocol == DISPLAY_PROTOCOL_VNC) { + opts.u.vnc.has_display = !!display; + opts.u.vnc.display = (char *)display; + } + + qmp_set_password(&opts, &err); + +out: hmp_handle_error(mon, err); } @@ -1462,9 +1420,27 @@ void hmp_expire_password(Monitor *mon, const QDict *qdict) { const char *protocol = qdict_get_str(qdict, "protocol"); const char *whenstr = qdict_get_str(qdict, "time"); + const char *display = qdict_get_try_str(qdict, "display"); Error *err = NULL; - qmp_expire_password(protocol, whenstr, &err); + ExpirePasswordOptions opts = { + .time = (char *)whenstr, + }; + + opts.protocol = qapi_enum_parse(&DisplayProtocol_lookup, protocol, + DISPLAY_PROTOCOL_VNC, &err); + if (err) { + goto out; + } + + if (opts.protocol == DISPLAY_PROTOCOL_VNC) { + opts.u.vnc.has_display = !!display; + opts.u.vnc.display = (char *)display; + } + + qmp_expire_password(&opts, &err); + +out: hmp_handle_error(mon, err); } @@ -1484,6 +1460,7 @@ void hmp_change(Monitor *mon, const QDict *qdict) const char *target = qdict_get_str(qdict, "target"); const char *arg = qdict_get_try_str(qdict, "arg"); const char *read_only = qdict_get_try_str(qdict, "read-only-mode"); + bool force = qdict_get_try_bool(qdict, "force", false); BlockdevChangeReadOnlyMode read_only_mode = 0; Error *err = NULL; @@ -1496,7 +1473,7 @@ void hmp_change(Monitor *mon, const QDict *qdict) } if (strcmp(target, "passwd") == 0 || strcmp(target, "password") == 0) { - if (arg) { + if (!arg) { MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); monitor_read_password(hmp_mon, hmp_change_read_arg, NULL); return; @@ -1520,7 +1497,8 @@ void hmp_change(Monitor *mon, const QDict *qdict) } qmp_blockdev_change_medium(true, device, false, NULL, target, - !!arg, arg, !!read_only, read_only_mode, + !!arg, arg, true, force, + !!read_only, read_only_mode, &err); } @@ -1582,8 +1560,7 @@ void hmp_migrate(Monitor *mon, const QDict *qdict) qmp_migrate(uri, !!blk, blk, !!inc, inc, false, false, true, resume, &err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1733,9 +1710,19 @@ hmp_screendump(Monitor *mon, const QDict *qdict) const char *filename = qdict_get_str(qdict, "filename"); const char *id = qdict_get_try_str(qdict, "device"); int64_t head = qdict_get_try_int(qdict, "head", 0); + const char *input_format = qdict_get_try_str(qdict, "format"); Error *err = NULL; + ImageFormat format; - qmp_screendump(filename, id != NULL, id, id != NULL, head, &err); + format = qapi_enum_parse(&ImageFormat_lookup, input_format, + IMAGE_FORMAT_PPM, &err); + if (err) { + goto end; + } + + qmp_screendump(filename, id != NULL, id, id != NULL, head, + input_format != NULL, format, &err); +end: hmp_handle_error(mon, err); } @@ -1823,6 +1810,7 @@ void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) VirtioMEMDeviceInfo *vmi; MemoryDeviceInfo *value; PCDIMMDeviceInfo *di; + SgxEPCDeviceInfo *se; for (info = info_list; info; info = info->next) { value = info->value; @@ -1870,6 +1858,16 @@ void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) vmi->block_size); monitor_printf(mon, " memdev: %s\n", vmi->memdev); break; + case MEMORY_DEVICE_INFO_KIND_SGX_EPC: + se = value->u.sgx_epc.data; + monitor_printf(mon, "Memory device [%s]: \"%s\"\n", + MemoryDeviceInfoKind_str(value->type), + se->id ? se->id : ""); + monitor_printf(mon, " memaddr: 0x%" PRIx64 "\n", se->memaddr); + monitor_printf(mon, " size: %" PRIu64 "\n", se->size); + monitor_printf(mon, " node: %" PRId64 "\n", se->node); + monitor_printf(mon, " memdev: %s\n", se->memdev); + break; default: g_assert_not_reached(); } @@ -1907,8 +1905,7 @@ void hmp_rocker(Monitor *mon, const QDict *qdict) Error *err = NULL; rocker = qmp_query_rocker(name, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1926,8 +1923,7 @@ void hmp_rocker_ports(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_ports(name, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1935,7 +1931,7 @@ void hmp_rocker_ports(Monitor *mon, const QDict *qdict) monitor_printf(mon, " port link duplex neg?\n"); for (port = list; port; port = port->next) { - monitor_printf(mon, "%10s %-4s %-3s %2s %-3s\n", + monitor_printf(mon, "%10s %-4s %-3s %2s %s\n", port->value->name, port->value->enabled ? port->value->link_up ? "up" : "down" : "!ena", @@ -1955,8 +1951,7 @@ void hmp_rocker_of_dpa_flows(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_of_dpa_flows(name, tbl_id != -1, tbl_id, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -2105,8 +2100,7 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_of_dpa_groups(name, type != 9, type, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -2199,11 +2193,6 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) qapi_free_RockerOfDpaGroupList(list); } -void hmp_info_ramblock(Monitor *mon, const QDict *qdict) -{ - ram_block_dump(mon); -} - void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict) { Error *err = NULL; @@ -2232,3 +2221,543 @@ void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict) } hmp_handle_error(mon, err); } + +static void print_stats_schema_value(Monitor *mon, StatsSchemaValue *value) +{ + const char *unit = NULL; + monitor_printf(mon, " %s (%s%s", value->name, StatsType_str(value->type), + value->has_unit || value->exponent ? ", " : ""); + + if (value->has_unit) { + if (value->unit == STATS_UNIT_SECONDS) { + unit = "s"; + } else if (value->unit == STATS_UNIT_BYTES) { + unit = "B"; + } + } + + if (unit && value->base == 10 && + value->exponent >= -18 && value->exponent <= 18 && + value->exponent % 3 == 0) { + monitor_puts(mon, si_prefix(value->exponent)); + } else if (unit && value->base == 2 && + value->exponent >= 0 && value->exponent <= 60 && + value->exponent % 10 == 0) { + + monitor_puts(mon, iec_binary_prefix(value->exponent)); + } else if (value->exponent) { + /* Use exponential notation and write the unit's English name */ + monitor_printf(mon, "* %d^%d%s", + value->base, value->exponent, + value->has_unit ? " " : ""); + unit = NULL; + } + + if (value->has_unit) { + monitor_puts(mon, unit ? unit : StatsUnit_str(value->unit)); + } + + /* Print bucket size for linear histograms */ + if (value->type == STATS_TYPE_LINEAR_HISTOGRAM && value->has_bucket_size) { + monitor_printf(mon, ", bucket size=%d", value->bucket_size); + } + monitor_printf(mon, ")"); +} + +static StatsSchemaValueList *find_schema_value_list( + StatsSchemaList *list, StatsProvider provider, + StatsTarget target) +{ + StatsSchemaList *node; + + for (node = list; node; node = node->next) { + if (node->value->provider == provider && + node->value->target == target) { + return node->value->stats; + } + } + return NULL; +} + +static void print_stats_results(Monitor *mon, StatsTarget target, + bool show_provider, + StatsResult *result, + StatsSchemaList *schema) +{ + /* Find provider schema */ + StatsSchemaValueList *schema_value_list = + find_schema_value_list(schema, result->provider, target); + StatsList *stats_list; + + if (!schema_value_list) { + monitor_printf(mon, "failed to find schema list for %s\n", + StatsProvider_str(result->provider)); + return; + } + + if (show_provider) { + monitor_printf(mon, "provider: %s\n", + StatsProvider_str(result->provider)); + } + + for (stats_list = result->stats; stats_list; + stats_list = stats_list->next, + schema_value_list = schema_value_list->next) { + + Stats *stats = stats_list->value; + StatsValue *stats_value = stats->value; + StatsSchemaValue *schema_value = schema_value_list->value; + + /* Find schema entry */ + while (!g_str_equal(stats->name, schema_value->name)) { + if (!schema_value_list->next) { + monitor_printf(mon, "failed to find schema entry for %s\n", + stats->name); + return; + } + schema_value_list = schema_value_list->next; + schema_value = schema_value_list->value; + } + + print_stats_schema_value(mon, schema_value); + + if (stats_value->type == QTYPE_QNUM) { + monitor_printf(mon, ": %" PRId64 "\n", stats_value->u.scalar); + } else if (stats_value->type == QTYPE_QBOOL) { + monitor_printf(mon, ": %s\n", stats_value->u.boolean ? "yes" : "no"); + } else if (stats_value->type == QTYPE_QLIST) { + uint64List *list; + int i; + + monitor_printf(mon, ": "); + for (list = stats_value->u.list, i = 1; + list; + list = list->next, i++) { + monitor_printf(mon, "[%d]=%" PRId64 " ", i, list->value); + } + monitor_printf(mon, "\n"); + } + } +} + +/* Create the StatsFilter that is needed for an "info stats" invocation. */ +static StatsFilter *stats_filter(StatsTarget target, const char *names, + int cpu_index, StatsProvider provider) +{ + StatsFilter *filter = g_malloc0(sizeof(*filter)); + StatsProvider provider_idx; + StatsRequestList *request_list = NULL; + + filter->target = target; + switch (target) { + case STATS_TARGET_VM: + break; + case STATS_TARGET_VCPU: + { + strList *vcpu_list = NULL; + CPUState *cpu = qemu_get_cpu(cpu_index); + char *canonical_path = object_get_canonical_path(OBJECT(cpu)); + + QAPI_LIST_PREPEND(vcpu_list, canonical_path); + filter->u.vcpu.has_vcpus = true; + filter->u.vcpu.vcpus = vcpu_list; + break; + } + default: + break; + } + + if (!names && provider == STATS_PROVIDER__MAX) { + return filter; + } + + /* + * "info stats" can only query either one or all the providers. Querying + * by name, but not by provider, requires the creation of one filter per + * provider. + */ + for (provider_idx = 0; provider_idx < STATS_PROVIDER__MAX; provider_idx++) { + if (provider == STATS_PROVIDER__MAX || provider == provider_idx) { + StatsRequest *request = g_new0(StatsRequest, 1); + request->provider = provider_idx; + if (names && !g_str_equal(names, "*")) { + request->has_names = true; + request->names = strList_from_comma_list(names); + } + QAPI_LIST_PREPEND(request_list, request); + } + } + + filter->has_providers = true; + filter->providers = request_list; + return filter; +} + +void hmp_info_stats(Monitor *mon, const QDict *qdict) +{ + const char *target_str = qdict_get_str(qdict, "target"); + const char *provider_str = qdict_get_try_str(qdict, "provider"); + const char *names = qdict_get_try_str(qdict, "names"); + + StatsProvider provider = STATS_PROVIDER__MAX; + StatsTarget target; + Error *err = NULL; + g_autoptr(StatsSchemaList) schema = NULL; + g_autoptr(StatsResultList) stats = NULL; + g_autoptr(StatsFilter) filter = NULL; + StatsResultList *entry; + + target = qapi_enum_parse(&StatsTarget_lookup, target_str, -1, &err); + if (err) { + monitor_printf(mon, "invalid stats target %s\n", target_str); + goto exit_no_print; + } + if (provider_str) { + provider = qapi_enum_parse(&StatsProvider_lookup, provider_str, -1, &err); + if (err) { + monitor_printf(mon, "invalid stats provider %s\n", provider_str); + goto exit_no_print; + } + } + + schema = qmp_query_stats_schemas(provider_str ? true : false, + provider, &err); + if (err) { + goto exit; + } + + switch (target) { + case STATS_TARGET_VM: + filter = stats_filter(target, names, -1, provider); + break; + case STATS_TARGET_VCPU: {} + int cpu_index = monitor_get_cpu_index(mon); + filter = stats_filter(target, names, cpu_index, provider); + break; + default: + abort(); + } + + stats = qmp_query_stats(filter, &err); + if (err) { + goto exit; + } + for (entry = stats; entry; entry = entry->next) { + print_stats_results(mon, target, provider_str == NULL, entry->value, schema); + } + +exit: + if (err) { + monitor_printf(mon, "%s\n", error_get_pretty(err)); + } +exit_no_print: + error_free(err); +} + +static void hmp_virtio_dump_protocols(Monitor *mon, + VhostDeviceProtocols *pcol) +{ + strList *pcol_list = pcol->protocols; + while (pcol_list) { + monitor_printf(mon, "\t%s", pcol_list->value); + pcol_list = pcol_list->next; + if (pcol_list != NULL) { + monitor_printf(mon, ",\n"); + } + } + monitor_printf(mon, "\n"); + if (pcol->has_unknown_protocols) { + monitor_printf(mon, " unknown-protocols(0x%016"PRIx64")\n", + pcol->unknown_protocols); + } +} + +static void hmp_virtio_dump_status(Monitor *mon, + VirtioDeviceStatus *status) +{ + strList *status_list = status->statuses; + while (status_list) { + monitor_printf(mon, "\t%s", status_list->value); + status_list = status_list->next; + if (status_list != NULL) { + monitor_printf(mon, ",\n"); + } + } + monitor_printf(mon, "\n"); + if (status->has_unknown_statuses) { + monitor_printf(mon, " unknown-statuses(0x%016"PRIx32")\n", + status->unknown_statuses); + } +} + +static void hmp_virtio_dump_features(Monitor *mon, + VirtioDeviceFeatures *features) +{ + strList *transport_list = features->transports; + while (transport_list) { + monitor_printf(mon, "\t%s", transport_list->value); + transport_list = transport_list->next; + if (transport_list != NULL) { + monitor_printf(mon, ",\n"); + } + } + + monitor_printf(mon, "\n"); + strList *list = features->dev_features; + if (list) { + while (list) { + monitor_printf(mon, "\t%s", list->value); + list = list->next; + if (list != NULL) { + monitor_printf(mon, ",\n"); + } + } + monitor_printf(mon, "\n"); + } + + if (features->has_unknown_dev_features) { + monitor_printf(mon, " unknown-features(0x%016"PRIx64")\n", + features->unknown_dev_features); + } +} + +void hmp_virtio_query(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + VirtioInfoList *list = qmp_x_query_virtio(&err); + VirtioInfoList *node; + + if (err != NULL) { + hmp_handle_error(mon, err); + return; + } + + if (list == NULL) { + monitor_printf(mon, "No VirtIO devices\n"); + return; + } + + node = list; + while (node) { + monitor_printf(mon, "%s [%s]\n", node->value->path, + node->value->name); + node = node->next; + } + qapi_free_VirtioInfoList(list); +} + +void hmp_virtio_status(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + const char *path = qdict_get_try_str(qdict, "path"); + VirtioStatus *s = qmp_x_query_virtio_status(path, &err); + + if (err != NULL) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "%s:\n", path); + monitor_printf(mon, " device_name: %s %s\n", + s->name, s->has_vhost_dev ? "(vhost)" : ""); + monitor_printf(mon, " device_id: %d\n", s->device_id); + monitor_printf(mon, " vhost_started: %s\n", + s->vhost_started ? "true" : "false"); + monitor_printf(mon, " bus_name: %s\n", s->bus_name); + monitor_printf(mon, " broken: %s\n", + s->broken ? "true" : "false"); + monitor_printf(mon, " disabled: %s\n", + s->disabled ? "true" : "false"); + monitor_printf(mon, " disable_legacy_check: %s\n", + s->disable_legacy_check ? "true" : "false"); + monitor_printf(mon, " started: %s\n", + s->started ? "true" : "false"); + monitor_printf(mon, " use_started: %s\n", + s->use_started ? "true" : "false"); + monitor_printf(mon, " start_on_kick: %s\n", + s->start_on_kick ? "true" : "false"); + monitor_printf(mon, " use_guest_notifier_mask: %s\n", + s->use_guest_notifier_mask ? "true" : "false"); + monitor_printf(mon, " vm_running: %s\n", + s->vm_running ? "true" : "false"); + monitor_printf(mon, " num_vqs: %"PRId64"\n", s->num_vqs); + monitor_printf(mon, " queue_sel: %d\n", + s->queue_sel); + monitor_printf(mon, " isr: %d\n", s->isr); + monitor_printf(mon, " endianness: %s\n", + s->device_endian); + monitor_printf(mon, " status:\n"); + hmp_virtio_dump_status(mon, s->status); + monitor_printf(mon, " Guest features:\n"); + hmp_virtio_dump_features(mon, s->guest_features); + monitor_printf(mon, " Host features:\n"); + hmp_virtio_dump_features(mon, s->host_features); + monitor_printf(mon, " Backend features:\n"); + hmp_virtio_dump_features(mon, s->backend_features); + + if (s->has_vhost_dev) { + monitor_printf(mon, " VHost:\n"); + monitor_printf(mon, " nvqs: %d\n", + s->vhost_dev->nvqs); + monitor_printf(mon, " vq_index: %"PRId64"\n", + s->vhost_dev->vq_index); + monitor_printf(mon, " max_queues: %"PRId64"\n", + s->vhost_dev->max_queues); + monitor_printf(mon, " n_mem_sections: %"PRId64"\n", + s->vhost_dev->n_mem_sections); + monitor_printf(mon, " n_tmp_sections: %"PRId64"\n", + s->vhost_dev->n_tmp_sections); + monitor_printf(mon, " backend_cap: %"PRId64"\n", + s->vhost_dev->backend_cap); + monitor_printf(mon, " log_enabled: %s\n", + s->vhost_dev->log_enabled ? "true" : "false"); + monitor_printf(mon, " log_size: %"PRId64"\n", + s->vhost_dev->log_size); + monitor_printf(mon, " Features:\n"); + hmp_virtio_dump_features(mon, s->vhost_dev->features); + monitor_printf(mon, " Acked features:\n"); + hmp_virtio_dump_features(mon, s->vhost_dev->acked_features); + monitor_printf(mon, " Backend features:\n"); + hmp_virtio_dump_features(mon, s->vhost_dev->backend_features); + monitor_printf(mon, " Protocol features:\n"); + hmp_virtio_dump_protocols(mon, s->vhost_dev->protocol_features); + } + + qapi_free_VirtioStatus(s); +} + +void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + const char *path = qdict_get_try_str(qdict, "path"); + int queue = qdict_get_int(qdict, "queue"); + VirtVhostQueueStatus *s = + qmp_x_query_virtio_vhost_queue_status(path, queue, &err); + + if (err != NULL) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "%s:\n", path); + monitor_printf(mon, " device_name: %s (vhost)\n", + s->name); + monitor_printf(mon, " kick: %"PRId64"\n", s->kick); + monitor_printf(mon, " call: %"PRId64"\n", s->call); + monitor_printf(mon, " VRing:\n"); + monitor_printf(mon, " num: %"PRId64"\n", s->num); + monitor_printf(mon, " desc: 0x%016"PRIx64"\n", s->desc); + monitor_printf(mon, " desc_phys: 0x%016"PRIx64"\n", + s->desc_phys); + monitor_printf(mon, " desc_size: %"PRId32"\n", s->desc_size); + monitor_printf(mon, " avail: 0x%016"PRIx64"\n", s->avail); + monitor_printf(mon, " avail_phys: 0x%016"PRIx64"\n", + s->avail_phys); + monitor_printf(mon, " avail_size: %"PRId32"\n", s->avail_size); + monitor_printf(mon, " used: 0x%016"PRIx64"\n", s->used); + monitor_printf(mon, " used_phys: 0x%016"PRIx64"\n", + s->used_phys); + monitor_printf(mon, " used_size: %"PRId32"\n", s->used_size); + + qapi_free_VirtVhostQueueStatus(s); +} + +void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + const char *path = qdict_get_try_str(qdict, "path"); + int queue = qdict_get_int(qdict, "queue"); + VirtQueueStatus *s = qmp_x_query_virtio_queue_status(path, queue, &err); + + if (err != NULL) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "%s:\n", path); + monitor_printf(mon, " device_name: %s\n", s->name); + monitor_printf(mon, " queue_index: %d\n", s->queue_index); + monitor_printf(mon, " inuse: %d\n", s->inuse); + monitor_printf(mon, " used_idx: %d\n", s->used_idx); + monitor_printf(mon, " signalled_used: %d\n", + s->signalled_used); + monitor_printf(mon, " signalled_used_valid: %s\n", + s->signalled_used_valid ? "true" : "false"); + if (s->has_last_avail_idx) { + monitor_printf(mon, " last_avail_idx: %d\n", + s->last_avail_idx); + } + if (s->has_shadow_avail_idx) { + monitor_printf(mon, " shadow_avail_idx: %d\n", + s->shadow_avail_idx); + } + monitor_printf(mon, " VRing:\n"); + monitor_printf(mon, " num: %"PRId32"\n", s->vring_num); + monitor_printf(mon, " num_default: %"PRId32"\n", + s->vring_num_default); + monitor_printf(mon, " align: %"PRId32"\n", + s->vring_align); + monitor_printf(mon, " desc: 0x%016"PRIx64"\n", + s->vring_desc); + monitor_printf(mon, " avail: 0x%016"PRIx64"\n", + s->vring_avail); + monitor_printf(mon, " used: 0x%016"PRIx64"\n", + s->vring_used); + + qapi_free_VirtQueueStatus(s); +} + +void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + const char *path = qdict_get_try_str(qdict, "path"); + int queue = qdict_get_int(qdict, "queue"); + int index = qdict_get_try_int(qdict, "index", -1); + VirtioQueueElement *e; + VirtioRingDescList *list; + + e = qmp_x_query_virtio_queue_element(path, queue, index != -1, + index, &err); + if (err != NULL) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "%s:\n", path); + monitor_printf(mon, " device_name: %s\n", e->name); + monitor_printf(mon, " index: %d\n", e->index); + monitor_printf(mon, " desc:\n"); + monitor_printf(mon, " descs:\n"); + + list = e->descs; + while (list) { + monitor_printf(mon, " addr 0x%"PRIx64" len %d", + list->value->addr, list->value->len); + if (list->value->flags) { + strList *flag = list->value->flags; + monitor_printf(mon, " ("); + while (flag) { + monitor_printf(mon, "%s", flag->value); + flag = flag->next; + if (flag) { + monitor_printf(mon, ", "); + } + } + monitor_printf(mon, ")"); + } + list = list->next; + if (list) { + monitor_printf(mon, ",\n"); + } + } + monitor_printf(mon, "\n"); + monitor_printf(mon, " avail:\n"); + monitor_printf(mon, " flags: %d\n", e->avail->flags); + monitor_printf(mon, " idx: %d\n", e->avail->idx); + monitor_printf(mon, " ring: %d\n", e->avail->ring); + monitor_printf(mon, " used:\n"); + monitor_printf(mon, " flags: %d\n", e->used->flags); + monitor_printf(mon, " idx: %d\n", e->used->idx); + + qapi_free_VirtioQueueElement(e); +} diff --git a/monitor/hmp.c b/monitor/hmp.c index d50c3124e1..43fd69f984 100644 --- a/monitor/hmp.c +++ b/monitor/hmp.c @@ -26,6 +26,7 @@ #include #include "hw/qdev-core.h" #include "monitor-internal.h" +#include "monitor/hmp.h" #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qnum.h" @@ -284,10 +285,15 @@ void help_cmd(Monitor *mon, const char *name) if (!strcmp(name, "log")) { const QEMULogItem *item; monitor_printf(mon, "Log items (comma separated):\n"); - monitor_printf(mon, "%-10s %s\n", "none", "remove all logs"); + monitor_printf(mon, "%-15s %s\n", "none", "remove all logs"); for (item = qemu_log_items; item->mask != 0; item++) { - monitor_printf(mon, "%-10s %s\n", item->name, item->help); + monitor_printf(mon, "%-15s %s\n", item->name, item->help); } +#ifdef CONFIG_TRACE_LOG + monitor_printf(mon, "trace:PATTERN enable trace events\n"); + monitor_printf(mon, "\nUse \"log trace:help\" to get a list of " + "trace events.\n\n"); +#endif return; } @@ -307,8 +313,8 @@ void help_cmd(Monitor *mon, const char *name) static const char *pch; static sigjmp_buf expr_env; -static void GCC_FMT_ATTR(2, 3) QEMU_NORETURN -expr_error(Monitor *mon, const char *fmt, ...) +static G_NORETURN G_GNUC_PRINTF(2, 3) +void expr_error(Monitor *mon, const char *fmt, ...) { va_list ap; va_start(ap, fmt); @@ -980,6 +986,7 @@ static QDict *monitor_parse_arguments(Monitor *mon, { const char *tmp = p; int skip_key = 0; + int ret; /* option */ c = *typestr++; @@ -1002,11 +1009,27 @@ static QDict *monitor_parse_arguments(Monitor *mon, } if (skip_key) { p = tmp; + } else if (*typestr == 's') { + /* has option with string value */ + typestr++; + tmp = p++; + while (qemu_isspace(*p)) { + p++; + } + ret = get_str(buf, sizeof(buf), &p); + if (ret < 0) { + monitor_printf(mon, "%s: value expected for -%c\n", + cmd->name, *tmp); + goto fail; + } + qdict_put_str(qdict, key, buf); } else { - /* has option */ + /* has boolean option */ p++; qdict_put_bool(qdict, key, true); } + } else if (*typestr == 's') { + typestr++; } } break; @@ -1061,6 +1084,31 @@ fail: return NULL; } +static void hmp_info_human_readable_text(Monitor *mon, + HumanReadableText *(*handler)(Error **)) +{ + Error *err = NULL; + g_autoptr(HumanReadableText) info = handler(&err); + + if (hmp_handle_error(mon, err)) { + return; + } + + monitor_puts(mon, info->human_readable_text); +} + +static void handle_hmp_command_exec(Monitor *mon, + const HMPCommand *cmd, + QDict *qdict) +{ + if (cmd->cmd_info_hrt) { + hmp_info_human_readable_text(mon, + cmd->cmd_info_hrt); + } else { + cmd->cmd(mon, qdict); + } +} + typedef struct HandleHmpCommandCo { Monitor *mon; const HMPCommand *cmd; @@ -1071,7 +1119,7 @@ typedef struct HandleHmpCommandCo { static void handle_hmp_command_co(void *opaque) { HandleHmpCommandCo *data = opaque; - data->cmd->cmd(data->mon, data->qdict); + handle_hmp_command_exec(data->mon, data->cmd, data->qdict); monitor_set_cur(qemu_coroutine_self(), NULL); data->done = true; } @@ -1089,7 +1137,7 @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline) return; } - if (!cmd->cmd) { + if (!cmd->cmd && !cmd->cmd_info_hrt) { /* FIXME: is it useful to try autoload modules here ??? */ monitor_printf(&mon->common, "Command \"%.*s\" is not available.\n", (int)(cmdline - cmd_start), cmd_start); @@ -1109,7 +1157,7 @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline) if (!cmd->coroutine) { /* old_mon is non-NULL when called from qmp_human_monitor_command() */ Monitor *old_mon = monitor_set_cur(qemu_coroutine_self(), &mon->common); - cmd->cmd(&mon->common, qdict); + handle_hmp_command_exec(&mon->common, cmd, qdict); monitor_set_cur(qemu_coroutine_self(), old_mon); } else { HandleHmpCommandCo data = { @@ -1419,7 +1467,7 @@ static void monitor_event(void *opaque, QEMUChrEvent event) * These functions just adapt the readline interface in a typesafe way. We * could cast function pointers but that discards compiler checks. */ -static void GCC_FMT_ATTR(2, 3) monitor_readline_printf(void *opaque, +static void G_GNUC_PRINTF(2, 3) monitor_readline_printf(void *opaque, const char *fmt, ...) { MonitorHMP *mon = opaque; diff --git a/monitor/misc.c b/monitor/misc.c index ffe7966870..205487e2b9 100644 --- a/monitor/misc.c +++ b/monitor/misc.c @@ -41,14 +41,15 @@ #include "disas/disas.h" #include "sysemu/balloon.h" #include "qemu/timer.h" +#include "qemu/log.h" #include "sysemu/hw_accel.h" #include "sysemu/runstate.h" #include "authz/list.h" #include "qapi/util.h" #include "sysemu/blockdev.h" #include "sysemu/sysemu.h" -#include "sysemu/tcg.h" #include "sysemu/tpm.h" +#include "sysemu/device_tree.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/qmp/qstring.h" @@ -70,7 +71,9 @@ #include "qapi/qapi-commands-migration.h" #include "qapi/qapi-commands-misc.h" #include "qapi/qapi-commands-qom.h" +#include "qapi/qapi-commands-run-state.h" #include "qapi/qapi-commands-trace.h" +#include "qapi/qapi-commands-machine.h" #include "qapi/qapi-init-commands.h" #include "qapi/error.h" #include "qapi/qmp-event.h" @@ -82,6 +85,9 @@ #include "hw/s390x/storage-attributes.h" #endif +/* Make devices configuration available for use in hmp-commands*.hx templates */ +#include CONFIG_DEVICES + /* file descriptors passed via SCM_RIGHTS */ typedef struct mon_fd_t mon_fd_t; struct mon_fd_t { @@ -230,12 +236,13 @@ static void monitor_init_qmp_commands(void) qmp_init_marshal(&qmp_commands); - qmp_register_command(&qmp_commands, "device_add", qmp_device_add, - QCO_NO_OPTIONS); + qmp_register_command(&qmp_commands, "device_add", + qmp_device_add, 0, 0); QTAILQ_INIT(&qmp_cap_negotiation_commands); qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities", - qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG); + qmp_marshal_qmp_capabilities, + QCO_ALLOW_PRECONFIG, 0); } /* Set the current CPU defined by the user. Callers must hold BQL. */ @@ -301,6 +308,7 @@ int monitor_get_cpu_index(Monitor *mon) static void hmp_info_registers(Monitor *mon, const QDict *qdict) { bool all_cpus = qdict_get_try_bool(qdict, "cpustate_all", false); + int vcpu = qdict_get_try_int(qdict, "vcpu", -1); CPUState *cs; if (all_cpus) { @@ -309,13 +317,18 @@ static void hmp_info_registers(Monitor *mon, const QDict *qdict) cpu_dump_state(cs, NULL, CPU_DUMP_FPU); } } else { - cs = mon_get_cpu(mon); + cs = vcpu >= 0 ? qemu_get_cpu(vcpu) : mon_get_cpu(mon); if (!cs) { - monitor_printf(mon, "No CPU available\n"); + if (vcpu >= 0) { + monitor_printf(mon, "CPU#%d not available\n", vcpu); + } else { + monitor_printf(mon, "No CPU available\n"); + } return; } + monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); cpu_dump_state(cs, NULL, CPU_DUMP_FPU); } } @@ -415,8 +428,7 @@ static void hmp_logfile(Monitor *mon, const QDict *qdict) { Error *err = NULL; - qemu_set_log_filename(qdict_get_str(qdict, "filename"), &err); - if (err) { + if (!qemu_set_log_filename(qdict_get_str(qdict, "filename"), &err)) { error_report_err(err); } } @@ -425,6 +437,7 @@ static void hmp_log(Monitor *mon, const QDict *qdict) { int mask; const char *items = qdict_get_str(qdict, "items"); + Error *err = NULL; if (!strcmp(items, "none")) { mask = 0; @@ -435,7 +448,10 @@ static void hmp_log(Monitor *mon, const QDict *qdict) return; } } - qemu_set_log(mask); + + if (!qemu_set_log(mask, &err)) { + error_report_err(err); + } } static void hmp_singlestep(Monitor *mon, const QDict *qdict) @@ -470,10 +486,18 @@ static void hmp_gdbserver(Monitor *mon, const QDict *qdict) static void hmp_watchdog_action(Monitor *mon, const QDict *qdict) { - const char *action = qdict_get_str(qdict, "action"); - if (select_watchdog_action(action) == -1) { - monitor_printf(mon, "Unknown watchdog action '%s'\n", action); + Error *err = NULL; + WatchdogAction action; + char *qapi_value; + + qapi_value = g_ascii_strdown(qdict_get_str(qdict, "action"), -1); + action = qapi_enum_parse(&WatchdogAction_lookup, qapi_value, -1, &err); + g_free(qapi_value); + if (err) { + hmp_handle_error(mon, err); + return; } + qmp_watchdog_set_action(action, &error_abort); } static void monitor_printc(Monitor *mon, int c) @@ -708,7 +732,7 @@ static uint64_t vtop(void *ptr, Error **errp) uint64_t pinfo; uint64_t ret = -1; uintptr_t addr = (uintptr_t) ptr; - uintptr_t pagesize = qemu_real_host_page_size; + uintptr_t pagesize = qemu_real_host_page_size(); off_t offset = addr / pagesize * sizeof(pinfo); int fd; @@ -929,33 +953,6 @@ static void hmp_info_mtree(Monitor *mon, const QDict *qdict) mtree_info(flatview, dispatch_tree, owner, disabled); } -#ifdef CONFIG_PROFILER - -int64_t dev_time; - -static void hmp_info_profile(Monitor *mon, const QDict *qdict) -{ - static int64_t last_cpu_exec_time; - int64_t cpu_exec_time; - int64_t delta; - - cpu_exec_time = tcg_cpu_exec_time(); - delta = cpu_exec_time - last_cpu_exec_time; - - monitor_printf(mon, "async time %" PRId64 " (%0.3f)\n", - dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); - monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n", - delta, delta / (double)NANOSECONDS_PER_SECOND); - last_cpu_exec_time = cpu_exec_time; - dev_time = 0; -} -#else -static void hmp_info_profile(Monitor *mon, const QDict *qdict) -{ - monitor_printf(mon, "Internal profiler not compiled\n"); -} -#endif - /* Capture support */ static QLIST_HEAD (capture_list_head, CaptureState) capture_head; @@ -1043,7 +1040,7 @@ void qmp_getfd(const char *fdname, Error **errp) return; } - monfd = g_malloc0(sizeof(mon_fd_t)); + monfd = g_new0(mon_fd_t, 1); monfd->name = g_strdup(fdname); monfd->fd = fd; @@ -1963,7 +1960,7 @@ void monitor_register_hmp(const char *name, bool info, while (table->name != NULL) { if (strcmp(table->name, name) == 0) { - g_assert(table->cmd == NULL); + g_assert(table->cmd == NULL && table->cmd_info_hrt == NULL); table->cmd = cmd; return; } @@ -1972,6 +1969,22 @@ void monitor_register_hmp(const char *name, bool info, g_assert_not_reached(); } +void monitor_register_hmp_info_hrt(const char *name, + HumanReadableText *(*handler)(Error **errp)) +{ + HMPCommand *table = hmp_info_cmds; + + while (table->name != NULL) { + if (strcmp(table->name, name) == 0) { + g_assert(table->cmd == NULL && table->cmd_info_hrt == NULL); + table->cmd_info_hrt = handler; + return; + } + table++; + } + g_assert_not_reached(); +} + void monitor_init_globals(void) { monitor_init_globals_core(); diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h index 9c3a09cb01..a2cdbbf646 100644 --- a/monitor/monitor-internal.h +++ b/monitor/monitor-internal.h @@ -63,7 +63,8 @@ * '.' other form of optional type (for 'i' and 'l') * 'b' boolean * user mode accepts "on" or "off" - * '-' optional parameter (eg. '-f') + * '-' optional parameter (eg. '-f'); if followed by a 's', it + * specifies an optional string param (e.g. '-fs' allows '-f foo') * */ @@ -74,6 +75,13 @@ typedef struct HMPCommand { const char *help; const char *flags; /* p=preconfig */ void (*cmd)(Monitor *mon, const QDict *qdict); + /* + * If implementing a command that takes no arguments and simply + * prints formatted data, then leave @cmd NULL, and then set + * @cmd_info_hrt to the corresponding QMP handler that returns + * the formatted text. + */ + HumanReadableText *(*cmd_info_hrt)(Error **errp); bool coroutine; /* * @sub_table is a list of 2nd level of commands. If it does not exist, @@ -166,7 +174,6 @@ extern int mon_refcount; extern HMPCommand hmp_cmds[]; -int monitor_puts(Monitor *mon, const char *str); void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush, bool use_io_thread); void monitor_data_destroy(Monitor *mon); diff --git a/monitor/monitor.c b/monitor/monitor.c index 46a171bca6..86949024f6 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -286,6 +286,16 @@ int error_vprintf_unless_qmp(const char *fmt, va_list ap) return -1; } +int error_printf_unless_qmp(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = error_vprintf_unless_qmp(fmt, ap); + va_end(ap); + return ret; +} static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = { /* Limit guest-triggerable events to 1 per second */ @@ -474,6 +484,10 @@ static unsigned int qapi_event_throttle_hash(const void *key) hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); } + if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + hash += g_str_hash(qdict_get_str(evstate->data, "qom-path")); + } + return hash; } @@ -496,6 +510,11 @@ static gboolean qapi_event_throttle_equal(const void *a, const void *b) qdict_get_str(evb->data, "node-name")); } + if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + return !strcmp(qdict_get_str(eva->data, "qom-path"), + qdict_get_str(evb->data, "qom-path")); + } + return TRUE; } diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index f7d64a6457..81c8fdadf8 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -14,7 +14,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/option.h" #include "monitor/monitor.h" @@ -24,10 +23,10 @@ #include "chardev/char.h" #include "ui/qemu-spice.h" #include "ui/console.h" +#include "ui/dbus-display.h" #include "sysemu/kvm.h" #include "sysemu/runstate.h" #include "sysemu/runstate-action.h" -#include "sysemu/arch_init.h" #include "sysemu/blockdev.h" #include "sysemu/block-backend.h" #include "qapi/error.h" @@ -36,10 +35,16 @@ #include "qapi/qapi-commands-control.h" #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" +#include "qapi/qapi-commands-stats.h" #include "qapi/qapi-commands-ui.h" +#include "qapi/type-helpers.h" #include "qapi/qmp/qerror.h" +#include "exec/ramlist.h" #include "hw/mem/memory-device.h" #include "hw/acpi/acpi_dev_interface.h" +#include "hw/intc/intc.h" +#include "hw/rdma/rdma.h" +#include "monitor/stats.h" NameInfo *qmp_query_name(Error **errp) { @@ -58,7 +63,7 @@ KvmInfo *qmp_query_kvm(Error **errp) KvmInfo *info = g_malloc0(sizeof(*info)); info->enabled = kvm_enabled(); - info->present = kvm_available(); + info->present = accel_find("kvm"); return info; } @@ -81,7 +86,7 @@ void qmp_stop(Error **errp) { /* if there is a dump in background, we should wait until the dump * finished */ - if (dump_in_progress()) { + if (qemu_system_dump_in_progress()) { error_setg(errp, "There is a dump in process, please wait."); return; } @@ -111,7 +116,7 @@ void qmp_cont(Error **errp) /* if there is a dump in background, we should wait until the dump * finished */ - if (dump_in_progress()) { + if (qemu_system_dump_in_progress()) { error_setg(errp, "There is a dump in process, please wait."); return; } @@ -130,8 +135,11 @@ void qmp_cont(Error **errp) blk_iostatus_reset(blk); } - for (job = block_job_next(NULL); job; job = block_job_next(job)) { - block_job_iostatus_reset(job); + WITH_JOB_LOCK_GUARD() { + for (job = block_job_next_locked(NULL); job; + job = block_job_next_locked(job)) { + block_job_iostatus_reset_locked(job); + } } /* Continuing after completed migration. Images have been inactivated to @@ -140,7 +148,7 @@ void qmp_cont(Error **errp) * If there are no inactive block nodes (e.g. because the VM was just * paused rather than completing a migration), bdrv_inactivate_all() simply * doesn't do anything. */ - bdrv_invalidate_cache_all(&local_err); + bdrv_activate_all(&local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -164,45 +172,27 @@ void qmp_system_wakeup(Error **errp) qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, errp); } -void qmp_set_password(const char *protocol, const char *password, - bool has_connected, const char *connected, Error **errp) +void qmp_set_password(SetPasswordOptions *opts, Error **errp) { - int disconnect_if_connected = 0; - int fail_if_connected = 0; int rc; - if (has_connected) { - if (strcmp(connected, "fail") == 0) { - fail_if_connected = 1; - } else if (strcmp(connected, "disconnect") == 0) { - disconnect_if_connected = 1; - } else if (strcmp(connected, "keep") == 0) { - /* nothing */ - } else { - error_setg(errp, QERR_INVALID_PARAMETER, "connected"); - return; - } - } - - if (strcmp(protocol, "spice") == 0) { + if (opts->protocol == DISPLAY_PROTOCOL_SPICE) { if (!qemu_using_spice(errp)) { return; } - rc = qemu_spice.set_passwd(password, fail_if_connected, - disconnect_if_connected); - } else if (strcmp(protocol, "vnc") == 0) { - if (fail_if_connected || disconnect_if_connected) { + rc = qemu_spice.set_passwd(opts->password, + opts->connected == SET_PASSWORD_ACTION_FAIL, + opts->connected == SET_PASSWORD_ACTION_DISCONNECT); + } else { + assert(opts->protocol == DISPLAY_PROTOCOL_VNC); + if (opts->connected != SET_PASSWORD_ACTION_KEEP) { /* vnc supports "connected=keep" only */ error_setg(errp, QERR_INVALID_PARAMETER, "connected"); return; } /* Note that setting an empty password will not disable login through * this interface. */ - rc = vnc_display_password(NULL, password); - } else { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", - "'vnc' or 'spice'"); - return; + rc = vnc_display_password(opts->u.vnc.display, opts->password); } if (rc != 0) { @@ -210,11 +200,11 @@ void qmp_set_password(const char *protocol, const char *password, } } -void qmp_expire_password(const char *protocol, const char *whenstr, - Error **errp) +void qmp_expire_password(ExpirePasswordOptions *opts, Error **errp) { time_t when; int rc; + const char *whenstr = opts->time; if (strcmp(whenstr, "now") == 0) { when = 0; @@ -226,17 +216,14 @@ void qmp_expire_password(const char *protocol, const char *whenstr, when = strtoull(whenstr, NULL, 10); } - if (strcmp(protocol, "spice") == 0) { + if (opts->protocol == DISPLAY_PROTOCOL_SPICE) { if (!qemu_using_spice(errp)) { return; } rc = qemu_spice.set_pw_expire(when); - } else if (strcmp(protocol, "vnc") == 0) { - rc = vnc_display_pw_expire(NULL, when); } else { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", - "'vnc' or 'spice'"); - return; + assert(opts->protocol == DISPLAY_PROTOCOL_VNC); + rc = vnc_display_pw_expire(opts->u.vnc.display, when); } if (rc != 0) { @@ -282,6 +269,18 @@ void qmp_add_client(const char *protocol, const char *fdname, skipauth = has_skipauth ? skipauth : false; vnc_display_add_client(NULL, fd, skipauth); return; +#endif +#ifdef CONFIG_DBUS_DISPLAY + } else if (strcmp(protocol, "@dbus-display") == 0) { + if (!qemu_using_dbus_display(errp)) { + close(fd); + return; + } + if (!qemu_dbus_display.add_client(fd, errp)) { + close(fd); + return; + } + return; #endif } else if ((s = qemu_chr_find(protocol)) != NULL) { if (qemu_chr_add_client(s, fd) < 0) { @@ -323,7 +322,7 @@ ACPIOSTInfoList *qmp_query_acpi_ospm_status(Error **errp) MemoryInfo *qmp_query_memory_size_summary(Error **errp) { - MemoryInfo *mem_info = g_malloc0(sizeof(MemoryInfo)); + MemoryInfo *mem_info = g_new0(MemoryInfo, 1); MachineState *ms = MACHINE(qdev_get_machine()); mem_info->base_memory = ms->ram_size; @@ -351,3 +350,252 @@ void qmp_display_reload(DisplayReloadOptions *arg, Error **errp) abort(); } } + +void qmp_display_update(DisplayUpdateOptions *arg, Error **errp) +{ + switch (arg->type) { + case DISPLAY_UPDATE_TYPE_VNC: +#ifdef CONFIG_VNC + vnc_display_update(&arg->u.vnc, errp); +#else + error_setg(errp, "vnc is invalid, missing 'CONFIG_VNC'"); +#endif + break; + default: + abort(); + } +} + +static int qmp_x_query_rdma_foreach(Object *obj, void *opaque) +{ + RdmaProvider *rdma; + RdmaProviderClass *k; + GString *buf = opaque; + + if (object_dynamic_cast(obj, INTERFACE_RDMA_PROVIDER)) { + rdma = RDMA_PROVIDER(obj); + k = RDMA_PROVIDER_GET_CLASS(obj); + if (k->format_statistics) { + k->format_statistics(rdma, buf); + } else { + g_string_append_printf(buf, + "RDMA statistics not available for %s.\n", + object_get_typename(obj)); + } + } + + return 0; +} + +HumanReadableText *qmp_x_query_rdma(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + object_child_foreach_recursive(object_get_root(), + qmp_x_query_rdma_foreach, buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_ramblock(Error **errp) +{ + g_autoptr(GString) buf = ram_block_format(); + + return human_readable_text_from_str(buf); +} + +static int qmp_x_query_irq_foreach(Object *obj, void *opaque) +{ + InterruptStatsProvider *intc; + InterruptStatsProviderClass *k; + GString *buf = opaque; + + if (object_dynamic_cast(obj, TYPE_INTERRUPT_STATS_PROVIDER)) { + intc = INTERRUPT_STATS_PROVIDER(obj); + k = INTERRUPT_STATS_PROVIDER_GET_CLASS(obj); + uint64_t *irq_counts; + unsigned int nb_irqs, i; + if (k->get_statistics && + k->get_statistics(intc, &irq_counts, &nb_irqs)) { + if (nb_irqs > 0) { + g_string_append_printf(buf, "IRQ statistics for %s:\n", + object_get_typename(obj)); + for (i = 0; i < nb_irqs; i++) { + if (irq_counts[i] > 0) { + g_string_append_printf(buf, "%2d: %" PRId64 "\n", i, + irq_counts[i]); + } + } + } + } else { + g_string_append_printf(buf, + "IRQ statistics not available for %s.\n", + object_get_typename(obj)); + } + } + + return 0; +} + +HumanReadableText *qmp_x_query_irq(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + object_child_foreach_recursive(object_get_root(), + qmp_x_query_irq_foreach, buf); + + return human_readable_text_from_str(buf); +} + +typedef struct StatsCallbacks { + StatsProvider provider; + StatRetrieveFunc *stats_cb; + SchemaRetrieveFunc *schemas_cb; + QTAILQ_ENTRY(StatsCallbacks) next; +} StatsCallbacks; + +static QTAILQ_HEAD(, StatsCallbacks) stats_callbacks = + QTAILQ_HEAD_INITIALIZER(stats_callbacks); + +void add_stats_callbacks(StatsProvider provider, + StatRetrieveFunc *stats_fn, + SchemaRetrieveFunc *schemas_fn) +{ + StatsCallbacks *entry = g_new(StatsCallbacks, 1); + entry->provider = provider; + entry->stats_cb = stats_fn; + entry->schemas_cb = schemas_fn; + + QTAILQ_INSERT_TAIL(&stats_callbacks, entry, next); +} + +static bool invoke_stats_cb(StatsCallbacks *entry, + StatsResultList **stats_results, + StatsFilter *filter, StatsRequest *request, + Error **errp) +{ + strList *targets = NULL; + strList *names = NULL; + ERRP_GUARD(); + + if (request) { + if (request->provider != entry->provider) { + return true; + } + if (request->has_names && !request->names) { + return true; + } + names = request->has_names ? request->names : NULL; + } + + switch (filter->target) { + case STATS_TARGET_VM: + break; + case STATS_TARGET_VCPU: + if (filter->u.vcpu.has_vcpus) { + if (!filter->u.vcpu.vcpus) { + /* No targets allowed? Return no statistics. */ + return true; + } + targets = filter->u.vcpu.vcpus; + } + break; + default: + abort(); + } + + entry->stats_cb(stats_results, filter->target, names, targets, errp); + if (*errp) { + qapi_free_StatsResultList(*stats_results); + *stats_results = NULL; + return false; + } + return true; +} + +StatsResultList *qmp_query_stats(StatsFilter *filter, Error **errp) +{ + StatsResultList *stats_results = NULL; + StatsCallbacks *entry; + StatsRequestList *request; + + QTAILQ_FOREACH(entry, &stats_callbacks, next) { + if (filter->has_providers) { + for (request = filter->providers; request; request = request->next) { + if (!invoke_stats_cb(entry, &stats_results, filter, + request->value, errp)) { + break; + } + } + } else { + if (!invoke_stats_cb(entry, &stats_results, filter, NULL, errp)) { + break; + } + } + } + + return stats_results; +} + +StatsSchemaList *qmp_query_stats_schemas(bool has_provider, + StatsProvider provider, + Error **errp) +{ + StatsSchemaList *stats_results = NULL; + StatsCallbacks *entry; + ERRP_GUARD(); + + QTAILQ_FOREACH(entry, &stats_callbacks, next) { + if (!has_provider || provider == entry->provider) { + entry->schemas_cb(&stats_results, errp); + if (*errp) { + qapi_free_StatsSchemaList(stats_results); + return NULL; + } + } + } + + return stats_results; +} + +void add_stats_entry(StatsResultList **stats_results, StatsProvider provider, + const char *qom_path, StatsList *stats_list) +{ + StatsResult *entry = g_new0(StatsResult, 1); + + entry->provider = provider; + if (qom_path) { + entry->has_qom_path = true; + entry->qom_path = g_strdup(qom_path); + } + entry->stats = stats_list; + + QAPI_LIST_PREPEND(*stats_results, entry); +} + +void add_stats_schema(StatsSchemaList **schema_results, + StatsProvider provider, StatsTarget target, + StatsSchemaValueList *stats_list) +{ + StatsSchema *entry = g_new0(StatsSchema, 1); + + entry->provider = provider; + entry->target = target; + entry->stats = stats_list; + QAPI_LIST_PREPEND(*schema_results, entry); +} + +bool apply_str_list_filter(const char *string, strList *list) +{ + strList *str_list = NULL; + + if (!list) { + return true; + } + for (str_list = list; str_list; str_list = str_list->next) { + if (g_str_equal(string, str_list->value)) { + return true; + } + } + return false; +} diff --git a/nbd/client-connection.c b/nbd/client-connection.c index 7123b1e189..0c5f917efa 100644 --- a/nbd/client-connection.c +++ b/nbd/client-connection.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "trace.h" #include "block/nbd.h" @@ -33,22 +34,25 @@ struct NBDClientConnection { /* Initialization constants, never change */ SocketAddress *saddr; /* address to connect to */ QCryptoTLSCreds *tlscreds; + char *tlshostname; NBDExportInfo initial_info; bool do_negotiation; bool do_retry; QemuMutex mutex; - /* - * @sioc and @err represent a connection attempt. While running - * is true, they are only used by the connection thread, and mutex - * locking is not needed. Once the thread finishes, - * nbd_co_establish_connection then steals these pointers while - * under the mutex. - */ NBDExportInfo updated_info; + /* + * @sioc represents a successful result. While thread is running, @sioc is + * used only by thread and not protected by mutex. When thread is not + * running, @sioc is stolen by nbd_co_establish_connection() under mutex. + */ QIOChannelSocket *sioc; QIOChannel *ioc; + /* + * @err represents previous attempt. It may be copied by + * nbd_co_establish_connection() when it reports failure. + */ Error *err; /* All further fields are accessed only under mutex */ @@ -75,7 +79,8 @@ NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr, bool do_negotiation, const char *export_name, const char *x_dirty_bitmap, - QCryptoTLSCreds *tlscreds) + QCryptoTLSCreds *tlscreds, + const char *tlshostname) { NBDClientConnection *conn = g_new(NBDClientConnection, 1); @@ -83,6 +88,7 @@ NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr, *conn = (NBDClientConnection) { .saddr = QAPI_CLONE(SocketAddress, saddr), .tlscreds = tlscreds, + .tlshostname = g_strdup(tlshostname), .do_negotiation = do_negotiation, .initial_info.request_sizes = true, @@ -105,6 +111,7 @@ static void nbd_client_connection_do_free(NBDClientConnection *conn) } error_free(conn->err); qapi_free_SocketAddress(conn->saddr); + g_free(conn->tlshostname); object_unref(OBJECT(conn->tlscreds)); g_free(conn->initial_info.x_dirty_bitmap); g_free(conn->initial_info.name); @@ -118,6 +125,7 @@ static void nbd_client_connection_do_free(NBDClientConnection *conn) */ static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr, NBDExportInfo *info, QCryptoTLSCreds *tlscreds, + const char *tlshostname, QIOChannel **outioc, Error **errp) { int ret; @@ -138,7 +146,7 @@ static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr, } ret = nbd_receive_negotiate(NULL, QIO_CHANNEL(sioc), tlscreds, - tlscreds ? addr->u.inet.host : NULL, + tlshostname, outioc, info, errp); if (ret < 0) { /* @@ -170,18 +178,19 @@ static void *connect_thread_func(void *opaque) qemu_mutex_lock(&conn->mutex); while (!conn->detached) { + Error *local_err = NULL; + assert(!conn->sioc); conn->sioc = qio_channel_socket_new(); qemu_mutex_unlock(&conn->mutex); - error_free(conn->err); - conn->err = NULL; conn->updated_info = conn->initial_info; ret = nbd_connect(conn->sioc, conn->saddr, conn->do_negotiation ? &conn->updated_info : NULL, - conn->tlscreds, &conn->ioc, &conn->err); + conn->tlscreds, conn->tlshostname, + &conn->ioc, &local_err); /* * conn->updated_info will finally be returned to the user. Clear the @@ -194,10 +203,15 @@ static void *connect_thread_func(void *opaque) qemu_mutex_lock(&conn->mutex); + error_free(conn->err); + conn->err = NULL; + error_propagate(&conn->err, local_err); + if (ret < 0) { object_unref(OBJECT(conn->sioc)); conn->sioc = NULL; if (conn->do_retry && !conn->detached) { + trace_nbd_connect_thread_sleep(timeout); qemu_mutex_unlock(&conn->mutex); sleep(timeout); @@ -311,13 +325,17 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, } conn->running = true; - error_free(conn->err); - conn->err = NULL; qemu_thread_create(&thread, "nbd-connect", connect_thread_func, conn, QEMU_THREAD_DETACHED); } if (!blocking) { + if (conn->err) { + error_propagate(errp, error_copy(conn->err)); + } else { + error_setg(errp, "No connection at the moment"); + } + return NULL; } @@ -338,14 +356,30 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, * attempt as failed, but leave the connection thread running, * to reuse it for the next connection attempt. */ - error_setg(errp, "Connection attempt cancelled by other operation"); + if (conn->err) { + error_propagate(errp, error_copy(conn->err)); + } else { + /* + * The only possible case here is cancelling by open_timer + * during nbd_open(). So, the error message is for that case. + * If we have more use cases, we can refactor + * nbd_co_establish_connection_cancel() to take an additional + * parameter cancel_reason, that would be passed than to the + * caller of cancelled nbd_co_establish_connection(). + */ + error_setg(errp, "Connection attempt cancelled by timeout"); + } + return NULL; } else { - error_propagate(errp, conn->err); - conn->err = NULL; - if (!conn->sioc) { + /* Thread finished. There must be either error or sioc */ + assert(!conn->err != !conn->sioc); + + if (conn->err) { + error_propagate(errp, error_copy(conn->err)); return NULL; } + if (conn->do_negotiation) { memcpy(info, &conn->updated_info, sizeof(*info)); if (conn->ioc) { diff --git a/nbd/client.c b/nbd/client.c index 0c2db4bcba..30d5383cb1 100644 --- a/nbd/client.c +++ b/nbd/client.c @@ -1434,9 +1434,7 @@ nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size, len = qio_channel_readv(ioc, &iov, 1, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { - bdrv_dec_in_flight(bs); qio_channel_yield(ioc, G_IO_IN); - bdrv_inc_in_flight(bs); continue; } else if (len < 0) { return -EIO; diff --git a/nbd/server.c b/nbd/server.c index 3927f7789d..ada16089f3 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2020 Red Hat, Inc. + * Copyright (C) 2016-2022 Red Hat, Inc. * Copyright (C) 2005 Anthony Liguori * * Network Block Device Server Side @@ -25,6 +25,7 @@ #include "trace.h" #include "nbd-internal.h" #include "qemu/units.h" +#include "qemu/memalign.h" #define NBD_META_ID_BASE_ALLOCATION 0 #define NBD_META_ID_ALLOCATION_DEPTH 1 @@ -77,7 +78,6 @@ static int system_errno_to_nbd_errno(int err) typedef struct NBDRequestData NBDRequestData; struct NBDRequestData { - QSIMPLEQ_ENTRY(NBDRequestData) entry; NBDClient *client; uint8_t *data; bool complete; @@ -213,7 +213,7 @@ static int nbd_negotiate_send_rep(NBDClient *client, uint32_t type, /* Send an error reply. * Return -errno on error, 0 on success. */ -static int GCC_FMT_ATTR(4, 0) +static int G_GNUC_PRINTF(4, 0) nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, Error **errp, const char *fmt, va_list va) { @@ -253,7 +253,7 @@ nbd_sanitize_name(const char *name) /* Send an error reply. * Return -errno on error, 0 on success. */ -static int GCC_FMT_ATTR(4, 5) +static int G_GNUC_PRINTF(4, 5) nbd_negotiate_send_rep_err(NBDClient *client, uint32_t type, Error **errp, const char *fmt, ...) { @@ -269,7 +269,7 @@ nbd_negotiate_send_rep_err(NBDClient *client, uint32_t type, /* Drop remainder of the current option, and send a reply with the * given error type and message. Return -errno on read or write * failure; or 0 if connection is still live. */ -static int GCC_FMT_ATTR(4, 0) +static int G_GNUC_PRINTF(4, 0) nbd_opt_vdrop(NBDClient *client, uint32_t type, Error **errp, const char *fmt, va_list va) { @@ -282,7 +282,7 @@ nbd_opt_vdrop(NBDClient *client, uint32_t type, Error **errp, return ret; } -static int GCC_FMT_ATTR(4, 5) +static int G_GNUC_PRINTF(4, 5) nbd_opt_drop(NBDClient *client, uint32_t type, Error **errp, const char *fmt, ...) { @@ -296,7 +296,7 @@ nbd_opt_drop(NBDClient *client, uint32_t type, Error **errp, return ret; } -static int GCC_FMT_ATTR(3, 4) +static int G_GNUC_PRINTF(3, 4) nbd_opt_invalid(NBDClient *client, Error **errp, const char *fmt, ...) { int ret; @@ -879,7 +879,9 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta, if (!*query) { if (client->opt == NBD_OPT_LIST_META_CONTEXT) { meta->allocation_depth = meta->exp->allocation_depth; - memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); + if (meta->exp->nr_export_bitmaps) { + memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); + } } trace_nbd_negotiate_meta_query_parse("empty"); return true; @@ -894,7 +896,8 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta, if (nbd_strshift(&query, "dirty-bitmap:")) { trace_nbd_negotiate_meta_query_parse("dirty-bitmap:"); if (!*query) { - if (client->opt == NBD_OPT_LIST_META_CONTEXT) { + if (client->opt == NBD_OPT_LIST_META_CONTEXT && + meta->exp->nr_export_bitmaps) { memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); } trace_nbd_negotiate_meta_query_parse("empty"); @@ -980,7 +983,7 @@ static int nbd_negotiate_meta_queries(NBDClient *client, size_t i; size_t count = 0; - if (!client->structured_reply) { + if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) { return nbd_opt_invalid(client, errp, "request option '%s' when structured reply " "is not negotiated", @@ -1024,7 +1027,9 @@ static int nbd_negotiate_meta_queries(NBDClient *client, /* enable all known contexts */ meta->base_allocation = true; meta->allocation_depth = meta->exp->allocation_depth; - memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); + if (meta->exp->nr_export_bitmaps) { + memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); + } } else { for (i = 0; i < nb_queries; ++i) { ret = nbd_negotiate_meta_query(client, meta, errp); @@ -1413,6 +1418,9 @@ static int nbd_receive_request(NBDClient *client, NBDRequest *request, if (ret < 0) { return ret; } + if (ret == 0) { + return -EIO; + } /* Request [ 0 .. 3] magic (NBD_REQUEST_MAGIC) @@ -1634,8 +1642,7 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, int64_t size; uint64_t perm, shared_perm; bool readonly = !exp_args->writable; - bool shared = !exp_args->writable; - strList *bitmaps; + BlockDirtyBitmapOrStrList *bitmaps; size_t i; int ret; @@ -1685,11 +1692,12 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, exp->description = g_strdup(arg->description); exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE); + + if (nbd_server_max_connections() != 1) { + exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN; + } if (readonly) { exp->nbdflags |= NBD_FLAG_READ_ONLY; - if (shared) { - exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN; - } } else { exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES | NBD_FLAG_SEND_FAST_ZERO); @@ -1701,40 +1709,59 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, } exp->export_bitmaps = g_new0(BdrvDirtyBitmap *, exp->nr_export_bitmaps); for (i = 0, bitmaps = arg->bitmaps; bitmaps; - i++, bitmaps = bitmaps->next) { - const char *bitmap = bitmaps->value; + i++, bitmaps = bitmaps->next) + { + const char *bitmap; BlockDriverState *bs = blk_bs(blk); BdrvDirtyBitmap *bm = NULL; - while (bs) { - bm = bdrv_find_dirty_bitmap(bs, bitmap); - if (bm != NULL) { - break; + switch (bitmaps->value->type) { + case QTYPE_QSTRING: + bitmap = bitmaps->value->u.local; + while (bs) { + bm = bdrv_find_dirty_bitmap(bs, bitmap); + if (bm != NULL) { + break; + } + + bs = bdrv_filter_or_cow_bs(bs); } - bs = bdrv_filter_or_cow_bs(bs); + if (bm == NULL) { + ret = -ENOENT; + error_setg(errp, "Bitmap '%s' is not found", + bitmaps->value->u.local); + goto fail; + } + + if (readonly && bdrv_is_writable(bs) && + bdrv_dirty_bitmap_enabled(bm)) { + ret = -EINVAL; + error_setg(errp, "Enabled bitmap '%s' incompatible with " + "readonly export", bitmap); + goto fail; + } + break; + case QTYPE_QDICT: + bitmap = bitmaps->value->u.external.name; + bm = block_dirty_bitmap_lookup(bitmaps->value->u.external.node, + bitmap, NULL, errp); + if (!bm) { + ret = -ENOENT; + goto fail; + } + break; + default: + abort(); } - if (bm == NULL) { - ret = -ENOENT; - error_setg(errp, "Bitmap '%s' is not found", bitmap); - goto fail; - } + assert(bm); if (bdrv_dirty_bitmap_check(bm, BDRV_BITMAP_ALLOW_RO, errp)) { ret = -EINVAL; goto fail; } - if (readonly && bdrv_is_writable(bs) && - bdrv_dirty_bitmap_enabled(bm)) { - ret = -EINVAL; - error_setg(errp, - "Enabled bitmap '%s' incompatible with readonly export", - bitmap); - goto fail; - } - exp->export_bitmaps[i] = bm; assert(strlen(bitmap) <= BDRV_BITMAP_MAX_NAME_SIZE); } @@ -2012,8 +2039,8 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, stl_be_p(&chunk.length, pnum); ret = nbd_co_send_iov(client, iov, 1, errp); } else { - ret = blk_pread(exp->common.blk, offset + progress, - data + progress, pnum); + ret = blk_pread(exp->common.blk, offset + progress, pnum, + data + progress, 0); if (ret < 0) { error_setg_errno(errp, -ret, "reading from file failed"); break; @@ -2056,7 +2083,7 @@ static void nbd_extent_array_free(NBDExtentArray *ea) g_free(ea->extents); g_free(ea); } -G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free) /* Further modifications of the array after conversion are abandoned */ static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) @@ -2077,11 +2104,10 @@ static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) * Add extent to NBDExtentArray. If extent can't be added (no available space), * return -1. * For safety, when returning -1 for the first time, .can_add is set to false, - * further call to nbd_extent_array_add() will crash. - * (to avoid the situation, when after failing to add an extent (returned -1), - * user miss this failure and add another extent, which is successfully added - * (array is full, but new extent may be squashed into the last one), then we - * have invalid array with skipped extent) + * and further calls to nbd_extent_array_add() will crash. + * (this avoids the situation where a caller ignores failure to add one extent, + * where adding another extent that would squash into the last array entry + * would result in an incorrect range reported to the client) */ static int nbd_extent_array_add(NBDExtentArray *ea, uint32_t length, uint32_t flags) @@ -2280,7 +2306,7 @@ static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request, assert(client->recv_coroutine == qemu_coroutine_self()); ret = nbd_receive_request(client, request, errp); if (ret < 0) { - return ret; + return ret; } trace_nbd_co_receive_request_decode_type(request->handle, request->type, @@ -2418,7 +2444,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request, data, request->len, errp); } - ret = blk_pread(exp->common.blk, request->from, data, request->len); + ret = blk_pread(exp->common.blk, request->from, request->len, data, 0); if (ret < 0) { return nbd_send_generic_reply(client, request->handle, ret, "reading from file failed", errp); @@ -2485,7 +2511,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (request->flags & NBD_CMD_FLAG_FUA) { flags |= BDRV_REQ_FUA; } - ret = blk_pwrite(exp->common.blk, request->from, data, request->len, + ret = blk_pwrite(exp->common.blk, request->from, request->len, data, flags); return nbd_send_generic_reply(client, request->handle, ret, "writing to file failed", errp); @@ -2501,16 +2527,8 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (request->flags & NBD_CMD_FLAG_FAST_ZERO) { flags |= BDRV_REQ_NO_FALLBACK; } - ret = 0; - /* FIXME simplify this when blk_pwrite_zeroes switches to 64-bit */ - while (ret >= 0 && request->len) { - int align = client->check_align ?: 1; - int len = MIN(request->len, QEMU_ALIGN_DOWN(BDRV_REQUEST_MAX_BYTES, - align)); - ret = blk_pwrite_zeroes(exp->common.blk, request->from, len, flags); - request->len -= len; - request->from += len; - } + ret = blk_pwrite_zeroes(exp->common.blk, request->from, request->len, + flags); return nbd_send_generic_reply(client, request->handle, ret, "writing to file failed", errp); @@ -2524,16 +2542,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, "flush failed", errp); case NBD_CMD_TRIM: - ret = 0; - /* FIXME simplify this when blk_co_pdiscard switches to 64-bit */ - while (ret >= 0 && request->len) { - int align = client->check_align ?: 1; - int len = MIN(request->len, QEMU_ALIGN_DOWN(BDRV_REQUEST_MAX_BYTES, - align)); - ret = blk_co_pdiscard(exp->common.blk, request->from, len); - request->len -= len; - request->from += len; - } + ret = blk_co_pdiscard(exp->common.blk, request->from, request->len); if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) { ret = blk_co_flush(exp->common.blk); } @@ -2657,7 +2666,7 @@ static coroutine_fn void nbd_trip(void *opaque) } if (ret < 0) { - /* It wans't -EIO, so, according to nbd_co_receive_request() + /* It wasn't -EIO, so, according to nbd_co_receive_request() * semantics, we should return the error to the client. */ Error *export_err = local_err; diff --git a/nbd/trace-events b/nbd/trace-events index c4919a2dd5..b7032ca277 100644 --- a/nbd/trace-events +++ b/nbd/trace-events @@ -73,3 +73,6 @@ nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *n nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32 nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32 nbd_trip(void) "Reading request" + +# client-connection.c +nbd_connect_thread_sleep(uint64_t timeout) "timeout %" PRIu64 diff --git a/net/announce.c b/net/announce.c index 26f057f5ee..62c60192a3 100644 --- a/net/announce.c +++ b/net/announce.c @@ -7,7 +7,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #include "net/announce.h" #include "net/net.h" #include "qapi/clone-visitor.h" @@ -120,6 +120,19 @@ static int announce_self_create(uint8_t *buf, return 60; /* len (FCS will be added by hardware) */ } +/* + * Helper to print ethernet mac address + */ +static const char *qemu_ether_ntoa(const MACAddr *mac) +{ + static char ret[18]; + + snprintf(ret, sizeof(ret), "%02x:%02x:%02x:%02x:%02x:%02x", + mac->a[0], mac->a[1], mac->a[2], mac->a[3], mac->a[4], mac->a[5]); + + return ret; +} + static void qemu_announce_self_iter(NICState *nic, void *opaque) { AnnounceTimer *timer = opaque; diff --git a/net/can/can_socketcan.c b/net/can/can_socketcan.c index 4b68f60c6b..c1a1ad0563 100644 --- a/net/can/can_socketcan.c +++ b/net/can/can_socketcan.c @@ -76,19 +76,21 @@ QEMU_BUILD_BUG_ON(offsetof(qemu_can_frame, data) static void can_host_socketcan_display_msg(struct qemu_can_frame *msg) { int i; - FILE *logfile = qemu_log_lock(); - qemu_log("[cansocketcan]: %03X [%01d] %s %s", - msg->can_id & QEMU_CAN_EFF_MASK, - msg->can_dlc, - msg->can_id & QEMU_CAN_EFF_FLAG ? "EFF" : "SFF", - msg->can_id & QEMU_CAN_RTR_FLAG ? "RTR" : "DAT"); + FILE *logfile = qemu_log_trylock(); - for (i = 0; i < msg->can_dlc; i++) { - qemu_log(" %02X", msg->data[i]); + if (logfile) { + fprintf(logfile, "[cansocketcan]: %03X [%01d] %s %s", + msg->can_id & QEMU_CAN_EFF_MASK, + msg->can_dlc, + msg->can_id & QEMU_CAN_EFF_FLAG ? "EFF" : "SFF", + msg->can_id & QEMU_CAN_RTR_FLAG ? "RTR" : "DAT"); + + for (i = 0; i < msg->can_dlc; i++) { + fprintf(logfile, " %02X", msg->data[i]); + } + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); } - qemu_log("\n"); - qemu_log_flush(); - qemu_log_unlock(logfile); } static void can_host_socketcan_read(void *opaque) diff --git a/net/clients.h b/net/clients.h index 166355eded..e4095f3f0d 100644 --- a/net/clients.h +++ b/net/clients.h @@ -40,6 +40,12 @@ int net_init_hubport(const Netdev *netdev, const char *name, int net_init_socket(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp); +int net_init_stream(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); + +int net_init_dgram(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); + int net_init_tap(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp); @@ -67,4 +73,15 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, int net_init_pcap(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp); +#ifdef CONFIG_VMNET +int net_init_vmnet_host(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); + +int net_init_vmnet_shared(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); + +int net_init_vmnet_bridged(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); +#endif /* CONFIG_VMNET */ + #endif /* QEMU_NET_CLIENTS_H */ diff --git a/net/colo-compare.c b/net/colo-compare.c index b100e7b51f..787c740f14 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -13,7 +13,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "trace.h" #include "qapi/error.h" @@ -170,7 +169,7 @@ static bool packet_matches_str(const char *str, return false; } - return !memcmp(str, buf, strlen(str)); + return !memcmp(str, buf, packet_len); } static void notify_remote_frame(CompareState *s) @@ -197,7 +196,7 @@ static void colo_compare_inconsistency_notify(CompareState *s) /* Use restricted to colo_insert_packet() */ static gint seq_sorter(Packet *a, Packet *b, gpointer data) { - return a->tcp_seq - b->tcp_seq; + return b->tcp_seq - a->tcp_seq; } static void fill_pkt_tcp_info(void *data, uint32_t *max_ack) @@ -209,7 +208,8 @@ static void fill_pkt_tcp_info(void *data, uint32_t *max_ack) pkt->tcp_seq = ntohl(tcphd->th_seq); pkt->tcp_ack = ntohl(tcphd->th_ack); - *max_ack = *max_ack > pkt->tcp_ack ? *max_ack : pkt->tcp_ack; + /* Need to consider ACK will bigger than uint32_t MAX */ + *max_ack = pkt->tcp_ack - *max_ack > 0 ? pkt->tcp_ack : *max_ack; pkt->header_size = pkt->transport_header - (uint8_t *)pkt->data + (tcphd->th_off << 2); pkt->payload_size = pkt->size - pkt->header_size; @@ -264,7 +264,7 @@ static int packet_enqueue(CompareState *s, int mode, Connection **con) pkt = NULL; return -1; } - fill_connection_key(pkt, &key); + fill_connection_key(pkt, &key, false); conn = connection_get(s->connection_track_table, &key, @@ -413,19 +413,20 @@ static void colo_compare_tcp(CompareState *s, Connection *conn) * can ensure that the packet's payload is acknowledged by * primary and secondary. */ - uint32_t min_ack = conn->pack > conn->sack ? conn->sack : conn->pack; + uint32_t min_ack = conn->pack - conn->sack > 0 ? + conn->sack : conn->pack; pri: if (g_queue_is_empty(&conn->primary_list)) { return; } - ppkt = g_queue_pop_head(&conn->primary_list); + ppkt = g_queue_pop_tail(&conn->primary_list); sec: if (g_queue_is_empty(&conn->secondary_list)) { - g_queue_push_head(&conn->primary_list, ppkt); + g_queue_push_tail(&conn->primary_list, ppkt); return; } - spkt = g_queue_pop_head(&conn->secondary_list); + spkt = g_queue_pop_tail(&conn->secondary_list); if (ppkt->tcp_seq == ppkt->seq_end) { colo_release_primary_pkt(s, ppkt); @@ -456,7 +457,7 @@ sec: } } if (!ppkt) { - g_queue_push_head(&conn->secondary_list, spkt); + g_queue_push_tail(&conn->secondary_list, spkt); goto pri; } } @@ -475,7 +476,7 @@ sec: if (mark == COLO_COMPARE_FREE_PRIMARY) { conn->compare_seq = ppkt->seq_end; colo_release_primary_pkt(s, ppkt); - g_queue_push_head(&conn->secondary_list, spkt); + g_queue_push_tail(&conn->secondary_list, spkt); goto pri; } else if (mark == COLO_COMPARE_FREE_SECONDARY) { conn->compare_seq = spkt->seq_end; @@ -488,8 +489,8 @@ sec: goto pri; } } else { - g_queue_push_head(&conn->primary_list, ppkt); - g_queue_push_head(&conn->secondary_list, spkt); + g_queue_push_tail(&conn->primary_list, ppkt); + g_queue_push_tail(&conn->secondary_list, spkt); #ifdef DEBUG_COLO_PACKETS qemu_hexdump(stderr, "colo-compare ppkt", ppkt->data, ppkt->size); @@ -671,7 +672,7 @@ static void colo_compare_packet(CompareState *s, Connection *conn, while (!g_queue_is_empty(&conn->primary_list) && !g_queue_is_empty(&conn->secondary_list)) { - pkt = g_queue_pop_head(&conn->primary_list); + pkt = g_queue_pop_tail(&conn->primary_list); result = g_queue_find_custom(&conn->secondary_list, pkt, (GCompareFunc)HandlePacket); @@ -687,7 +688,7 @@ static void colo_compare_packet(CompareState *s, Connection *conn, * timeout, it will trigger a checkpoint request. */ trace_colo_compare_main("packet different"); - g_queue_push_head(&conn->primary_list, pkt); + g_queue_push_tail(&conn->primary_list, pkt); colo_compare_inconsistency_notify(s); break; @@ -805,7 +806,7 @@ static int compare_chr_send(CompareState *s, } if (!size) { - return 0; + return -1; } entry = g_slice_new(SendEntry); @@ -817,7 +818,7 @@ static int compare_chr_send(CompareState *s, entry->buf = g_malloc(size); memcpy(entry->buf, buf, size); } - g_queue_push_head(&sendco->send_list, entry); + g_queue_push_tail(&sendco->send_list, entry); if (sendco->done) { sendco->co = qemu_coroutine_create(_compare_chr_send, sendco); @@ -1265,7 +1266,7 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp) } if (!s->expired_scan_cycle) { - /* Set default value to 3000 MS */ + /* Set default value to 1000 MS */ s->expired_scan_cycle = REGULAR_PACKET_CHECK_MS; } @@ -1322,7 +1323,7 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp) s->connection_track_table = g_hash_table_new_full(connection_key_hash, connection_key_equal, g_free, - connection_destroy); + NULL); colo_compare_iothread(s); @@ -1345,7 +1346,7 @@ static void colo_flush_packets(void *opaque, void *user_data) Packet *pkt = NULL; while (!g_queue_is_empty(&conn->primary_list)) { - pkt = g_queue_pop_head(&conn->primary_list); + pkt = g_queue_pop_tail(&conn->primary_list); compare_chr_send(s, pkt->data, pkt->size, @@ -1355,7 +1356,7 @@ static void colo_flush_packets(void *opaque, void *user_data) packet_destroy_partial(pkt, NULL); } while (!g_queue_is_empty(&conn->secondary_list)) { - pkt = g_queue_pop_head(&conn->secondary_list); + pkt = g_queue_pop_tail(&conn->secondary_list); packet_destroy(pkt, NULL); } } diff --git a/net/colo.c b/net/colo.c index 3a3e6e89a0..fb2c36a026 100644 --- a/net/colo.c +++ b/net/colo.c @@ -44,14 +44,28 @@ int parse_packet_early(Packet *pkt) { int network_length; static const uint8_t vlan[] = {0x81, 0x00}; - uint8_t *data = pkt->data + pkt->vnet_hdr_len; + uint8_t *data = pkt->data; uint16_t l3_proto; - ssize_t l2hdr_len = eth_get_l2_hdr_length(data); + ssize_t l2hdr_len; - if (pkt->size < ETH_HLEN + pkt->vnet_hdr_len) { - trace_colo_proxy_main("pkt->size < ETH_HLEN"); + assert(data); + + /* Check the received vnet_hdr_len then add the offset */ + if ((pkt->vnet_hdr_len > sizeof(struct virtio_net_hdr_v1_hash)) || + (pkt->size < sizeof(struct eth_header) + sizeof(struct vlan_header) + + pkt->vnet_hdr_len)) { + /* + * The received remote packet maybe misconfiguration here, + * Please enable/disable filter module's the vnet_hdr flag at + * the same time. + */ + trace_colo_proxy_main_vnet_info("This received packet load wrong ", + pkt->vnet_hdr_len, pkt->size); return 1; } + data += pkt->vnet_hdr_len; + + l2hdr_len = eth_get_l2_hdr_length(data); /* * TODO: support vlan. @@ -83,19 +97,26 @@ int parse_packet_early(Packet *pkt) return 0; } -void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, Packet *pkt) +void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, + Packet *pkt, bool reverse) { + if (reverse) { + key->src = pkt->ip->ip_dst; + key->dst = pkt->ip->ip_src; + key->src_port = ntohs(tmp_ports & 0xffff); + key->dst_port = ntohs(tmp_ports >> 16); + } else { key->src = pkt->ip->ip_src; key->dst = pkt->ip->ip_dst; key->src_port = ntohs(tmp_ports >> 16); key->dst_port = ntohs(tmp_ports & 0xffff); + } } -void fill_connection_key(Packet *pkt, ConnectionKey *key) +void fill_connection_key(Packet *pkt, ConnectionKey *key, bool reverse) { - uint32_t tmp_ports; + uint32_t tmp_ports = 0; - memset(key, 0, sizeof(*key)); key->ip_proto = pkt->ip->ip_p; switch (key->ip_proto) { @@ -106,29 +127,15 @@ void fill_connection_key(Packet *pkt, ConnectionKey *key) case IPPROTO_SCTP: case IPPROTO_UDPLITE: tmp_ports = *(uint32_t *)(pkt->transport_header); - extract_ip_and_port(tmp_ports, key, pkt); break; case IPPROTO_AH: tmp_ports = *(uint32_t *)(pkt->transport_header + 4); - extract_ip_and_port(tmp_ports, key, pkt); break; default: break; } -} -void reverse_connection_key(ConnectionKey *key) -{ - struct in_addr tmp_ip; - uint16_t tmp_port; - - tmp_ip = key->src; - key->src = key->dst; - key->dst = tmp_ip; - - tmp_port = key->src_port; - key->src_port = key->dst_port; - key->dst_port = tmp_port; + extract_ip_and_port(tmp_ports, key, pkt, reverse); } Connection *connection_new(ConnectionKey *key) @@ -225,7 +232,7 @@ Connection *connection_get(GHashTable *connection_track_table, /* * clear the conn_list */ - while (!g_queue_is_empty(conn_list)) { + while (conn_list && !g_queue_is_empty(conn_list)) { connection_destroy(g_queue_pop_head(conn_list)); } } diff --git a/net/colo.h b/net/colo.h index d91cd245c4..22fc3031f7 100644 --- a/net/colo.h +++ b/net/colo.h @@ -18,6 +18,7 @@ #include "qemu/jhash.h" #include "qemu/timer.h" #include "net/eth.h" +#include "standard-headers/linux/virtio_net.h" #define HASHTABLE_MAX_SIZE 16384 @@ -89,9 +90,9 @@ typedef struct Connection { uint32_t connection_key_hash(const void *opaque); int connection_key_equal(const void *opaque1, const void *opaque2); int parse_packet_early(Packet *pkt); -void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, Packet *pkt); -void fill_connection_key(Packet *pkt, ConnectionKey *key); -void reverse_connection_key(ConnectionKey *key); +void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, + Packet *pkt, bool reverse); +void fill_connection_key(Packet *pkt, ConnectionKey *key, bool reverse); Connection *connection_new(ConnectionKey *key); void connection_destroy(void *opaque); Connection *connection_get(GHashTable *connection_track_table, diff --git a/net/dgram.c b/net/dgram.c new file mode 100644 index 0000000000..9f7bf38376 --- /dev/null +++ b/net/dgram.c @@ -0,0 +1,623 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2022 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "net/net.h" +#include "clients.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/sockets.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" + +typedef struct NetDgramState { + NetClientState nc; + int fd; + SocketReadState rs; + bool read_poll; /* waiting to receive data? */ + bool write_poll; /* waiting to transmit data? */ + /* contains destination iff connectionless */ + struct sockaddr *dest_addr; + socklen_t dest_len; +} NetDgramState; + +static void net_dgram_send(void *opaque); +static void net_dgram_writable(void *opaque); + +static void net_dgram_update_fd_handler(NetDgramState *s) +{ + qemu_set_fd_handler(s->fd, + s->read_poll ? net_dgram_send : NULL, + s->write_poll ? net_dgram_writable : NULL, + s); +} + +static void net_dgram_read_poll(NetDgramState *s, bool enable) +{ + s->read_poll = enable; + net_dgram_update_fd_handler(s); +} + +static void net_dgram_write_poll(NetDgramState *s, bool enable) +{ + s->write_poll = enable; + net_dgram_update_fd_handler(s); +} + +static void net_dgram_writable(void *opaque) +{ + NetDgramState *s = opaque; + + net_dgram_write_poll(s, false); + + qemu_flush_queued_packets(&s->nc); +} + +static ssize_t net_dgram_receive(NetClientState *nc, + const uint8_t *buf, size_t size) +{ + NetDgramState *s = DO_UPCAST(NetDgramState, nc, nc); + ssize_t ret; + + do { + if (s->dest_addr) { + ret = sendto(s->fd, buf, size, 0, s->dest_addr, s->dest_len); + } else { + ret = send(s->fd, buf, size, 0); + } + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == EAGAIN) { + net_dgram_write_poll(s, true); + return 0; + } + return ret; +} + +static void net_dgram_send_completed(NetClientState *nc, ssize_t len) +{ + NetDgramState *s = DO_UPCAST(NetDgramState, nc, nc); + + if (!s->read_poll) { + net_dgram_read_poll(s, true); + } +} + +static void net_dgram_rs_finalize(SocketReadState *rs) +{ + NetDgramState *s = container_of(rs, NetDgramState, rs); + + if (qemu_send_packet_async(&s->nc, rs->buf, + rs->packet_len, + net_dgram_send_completed) == 0) { + net_dgram_read_poll(s, false); + } +} + +static void net_dgram_send(void *opaque) +{ + NetDgramState *s = opaque; + int size; + + size = recv(s->fd, s->rs.buf, sizeof(s->rs.buf), 0); + if (size < 0) { + return; + } + if (size == 0) { + /* end of connection */ + net_dgram_read_poll(s, false); + net_dgram_write_poll(s, false); + return; + } + if (qemu_send_packet_async(&s->nc, s->rs.buf, size, + net_dgram_send_completed) == 0) { + net_dgram_read_poll(s, false); + } +} + +static int net_dgram_mcast_create(struct sockaddr_in *mcastaddr, + struct in_addr *localaddr, + Error **errp) +{ + struct ip_mreq imr; + int fd; + int val, ret; +#ifdef __OpenBSD__ + unsigned char loop; +#else + int loop; +#endif + + if (!IN_MULTICAST(ntohl(mcastaddr->sin_addr.s_addr))) { + error_setg(errp, "specified mcastaddr %s (0x%08x) " + "does not contain a multicast address", + inet_ntoa(mcastaddr->sin_addr), + (int)ntohl(mcastaddr->sin_addr.s_addr)); + return -1; + } + + fd = qemu_socket(PF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + error_setg_errno(errp, errno, "can't create datagram socket"); + return -1; + } + + /* + * Allow multiple sockets to bind the same multicast ip and port by setting + * SO_REUSEADDR. This is the only situation where SO_REUSEADDR should be set + * on windows. Use socket_set_fast_reuse otherwise as it sets SO_REUSEADDR + * only on posix systems. + */ + val = 1; + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)); + if (ret < 0) { + error_setg_errno(errp, errno, "can't set socket option SO_REUSEADDR"); + goto fail; + } + + ret = bind(fd, (struct sockaddr *)mcastaddr, sizeof(*mcastaddr)); + if (ret < 0) { + error_setg_errno(errp, errno, "can't bind ip=%s to socket", + inet_ntoa(mcastaddr->sin_addr)); + goto fail; + } + + /* Add host to multicast group */ + imr.imr_multiaddr = mcastaddr->sin_addr; + if (localaddr) { + imr.imr_interface = *localaddr; + } else { + imr.imr_interface.s_addr = htonl(INADDR_ANY); + } + + ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, + &imr, sizeof(struct ip_mreq)); + if (ret < 0) { + error_setg_errno(errp, errno, + "can't add socket to multicast group %s", + inet_ntoa(imr.imr_multiaddr)); + goto fail; + } + + /* Force mcast msgs to loopback (eg. several QEMUs in same host */ + loop = 1; + ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, + &loop, sizeof(loop)); + if (ret < 0) { + error_setg_errno(errp, errno, + "can't force multicast message to loopback"); + goto fail; + } + + /* If a bind address is given, only send packets from that address */ + if (localaddr != NULL) { + ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, + localaddr, sizeof(*localaddr)); + if (ret < 0) { + error_setg_errno(errp, errno, + "can't set the default network send interface"); + goto fail; + } + } + + qemu_socket_set_nonblock(fd); + return fd; +fail: + if (fd >= 0) { + closesocket(fd); + } + return -1; +} + +static void net_dgram_cleanup(NetClientState *nc) +{ + NetDgramState *s = DO_UPCAST(NetDgramState, nc, nc); + if (s->fd != -1) { + net_dgram_read_poll(s, false); + net_dgram_write_poll(s, false); + close(s->fd); + s->fd = -1; + } + g_free(s->dest_addr); + s->dest_addr = NULL; + s->dest_len = 0; +} + +static NetClientInfo net_dgram_socket_info = { + .type = NET_CLIENT_DRIVER_DGRAM, + .size = sizeof(NetDgramState), + .receive = net_dgram_receive, + .cleanup = net_dgram_cleanup, +}; + +static NetDgramState *net_dgram_fd_init(NetClientState *peer, + const char *model, + const char *name, + int fd, + Error **errp) +{ + NetClientState *nc; + NetDgramState *s; + + nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name); + + s = DO_UPCAST(NetDgramState, nc, nc); + + s->fd = fd; + net_socket_rs_init(&s->rs, net_dgram_rs_finalize, false); + net_dgram_read_poll(s, true); + + return s; +} + +static int net_dgram_mcast_init(NetClientState *peer, + const char *model, + const char *name, + SocketAddress *remote, + SocketAddress *local, + Error **errp) +{ + NetDgramState *s; + int fd, ret; + struct sockaddr_in *saddr; + + if (remote->type != SOCKET_ADDRESS_TYPE_INET) { + error_setg(errp, "multicast only support inet type"); + return -1; + } + + saddr = g_new(struct sockaddr_in, 1); + if (convert_host_port(saddr, remote->u.inet.host, remote->u.inet.port, + errp) < 0) { + g_free(saddr); + return -1; + } + + if (!local) { + fd = net_dgram_mcast_create(saddr, NULL, errp); + if (fd < 0) { + g_free(saddr); + return -1; + } + } else { + switch (local->type) { + case SOCKET_ADDRESS_TYPE_INET: { + struct in_addr localaddr; + + if (inet_aton(local->u.inet.host, &localaddr) == 0) { + g_free(saddr); + error_setg(errp, "localaddr '%s' is not a valid IPv4 address", + local->u.inet.host); + return -1; + } + + fd = net_dgram_mcast_create(saddr, &localaddr, errp); + if (fd < 0) { + g_free(saddr); + return -1; + } + break; + } + case SOCKET_ADDRESS_TYPE_FD: { + int newfd; + + fd = monitor_fd_param(monitor_cur(), local->u.fd.str, errp); + if (fd == -1) { + g_free(saddr); + return -1; + } + ret = qemu_socket_try_set_nonblock(fd); + if (ret < 0) { + g_free(saddr); + error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", + name, fd); + return -1; + } + + /* + * fd passed: multicast: "learn" dest_addr address from bound + * address and save it. Because this may be "shared" socket from a + * "master" process, datagrams would be recv() by ONLY ONE process: + * we must "clone" this dgram socket --jjo + */ + + saddr = g_new(struct sockaddr_in, 1); + + if (convert_host_port(saddr, local->u.inet.host, local->u.inet.port, + errp) < 0) { + g_free(saddr); + closesocket(fd); + return -1; + } + + /* must be bound */ + if (saddr->sin_addr.s_addr == 0) { + error_setg(errp, "can't setup multicast destination address"); + g_free(saddr); + closesocket(fd); + return -1; + } + /* clone dgram socket */ + newfd = net_dgram_mcast_create(saddr, NULL, errp); + if (newfd < 0) { + g_free(saddr); + closesocket(fd); + return -1; + } + /* clone newfd to fd, close newfd */ + dup2(newfd, fd); + close(newfd); + break; + } + default: + g_free(saddr); + error_setg(errp, "only support inet or fd type for local"); + return -1; + } + } + + s = net_dgram_fd_init(peer, model, name, fd, errp); + if (!s) { + g_free(saddr); + return -1; + } + + g_assert(s->dest_addr == NULL); + s->dest_addr = (struct sockaddr *)saddr; + s->dest_len = sizeof(*saddr); + + if (!local) { + qemu_set_info_str(&s->nc, "mcast=%s:%d", + inet_ntoa(saddr->sin_addr), + ntohs(saddr->sin_port)); + } else { + switch (local->type) { + case SOCKET_ADDRESS_TYPE_INET: + qemu_set_info_str(&s->nc, "mcast=%s:%d", + inet_ntoa(saddr->sin_addr), + ntohs(saddr->sin_port)); + break; + case SOCKET_ADDRESS_TYPE_FD: + qemu_set_info_str(&s->nc, "fd=%d (cloned mcast=%s:%d)", + fd, inet_ntoa(saddr->sin_addr), + ntohs(saddr->sin_port)); + break; + default: + g_assert_not_reached(); + } + } + + return 0; + +} + + +int net_init_dgram(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + NetDgramState *s; + int fd, ret; + SocketAddress *remote, *local; + struct sockaddr *dest_addr; + struct sockaddr_in laddr_in, raddr_in; + struct sockaddr_un laddr_un, raddr_un; + socklen_t dest_len; + + assert(netdev->type == NET_CLIENT_DRIVER_DGRAM); + + remote = netdev->u.dgram.remote; + local = netdev->u.dgram.local; + + /* detect multicast address */ + if (remote && remote->type == SOCKET_ADDRESS_TYPE_INET) { + struct sockaddr_in mcastaddr; + + if (convert_host_port(&mcastaddr, remote->u.inet.host, + remote->u.inet.port, errp) < 0) { + return -1; + } + + if (IN_MULTICAST(ntohl(mcastaddr.sin_addr.s_addr))) { + return net_dgram_mcast_init(peer, "dram", name, remote, local, + errp); + } + } + + /* unicast address */ + if (!local) { + error_setg(errp, "dgram requires local= parameter"); + return -1; + } + + if (remote) { + if (local->type == SOCKET_ADDRESS_TYPE_FD) { + error_setg(errp, "don't set remote with local.fd"); + return -1; + } + if (remote->type != local->type) { + error_setg(errp, "remote and local types must be the same"); + return -1; + } + } else { + if (local->type != SOCKET_ADDRESS_TYPE_FD) { + error_setg(errp, + "type=inet or type=unix requires remote parameter"); + return -1; + } + } + + switch (local->type) { + case SOCKET_ADDRESS_TYPE_INET: + if (convert_host_port(&laddr_in, local->u.inet.host, local->u.inet.port, + errp) < 0) { + return -1; + } + + if (convert_host_port(&raddr_in, remote->u.inet.host, + remote->u.inet.port, errp) < 0) { + return -1; + } + + fd = qemu_socket(PF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + error_setg_errno(errp, errno, "can't create datagram socket"); + return -1; + } + + ret = socket_set_fast_reuse(fd); + if (ret < 0) { + error_setg_errno(errp, errno, + "can't set socket option SO_REUSEADDR"); + closesocket(fd); + return -1; + } + ret = bind(fd, (struct sockaddr *)&laddr_in, sizeof(laddr_in)); + if (ret < 0) { + error_setg_errno(errp, errno, "can't bind ip=%s to socket", + inet_ntoa(laddr_in.sin_addr)); + closesocket(fd); + return -1; + } + qemu_socket_set_nonblock(fd); + + dest_len = sizeof(raddr_in); + dest_addr = g_malloc(dest_len); + memcpy(dest_addr, &raddr_in, dest_len); + break; + case SOCKET_ADDRESS_TYPE_UNIX: + ret = unlink(local->u.q_unix.path); + if (ret < 0 && errno != ENOENT) { + error_setg_errno(errp, errno, "failed to unlink socket %s", + local->u.q_unix.path); + return -1; + } + + laddr_un.sun_family = PF_UNIX; + ret = snprintf(laddr_un.sun_path, sizeof(laddr_un.sun_path), "%s", + local->u.q_unix.path); + if (ret < 0 || ret >= sizeof(laddr_un.sun_path)) { + error_setg(errp, "UNIX socket path '%s' is too long", + local->u.q_unix.path); + error_append_hint(errp, "Path must be less than %zu bytes\n", + sizeof(laddr_un.sun_path)); + } + + raddr_un.sun_family = PF_UNIX; + ret = snprintf(raddr_un.sun_path, sizeof(raddr_un.sun_path), "%s", + remote->u.q_unix.path); + if (ret < 0 || ret >= sizeof(raddr_un.sun_path)) { + error_setg(errp, "UNIX socket path '%s' is too long", + remote->u.q_unix.path); + error_append_hint(errp, "Path must be less than %zu bytes\n", + sizeof(raddr_un.sun_path)); + } + + fd = qemu_socket(PF_UNIX, SOCK_DGRAM, 0); + if (fd < 0) { + error_setg_errno(errp, errno, "can't create datagram socket"); + return -1; + } + + ret = bind(fd, (struct sockaddr *)&laddr_un, sizeof(laddr_un)); + if (ret < 0) { + error_setg_errno(errp, errno, "can't bind unix=%s to socket", + laddr_un.sun_path); + closesocket(fd); + return -1; + } + qemu_socket_set_nonblock(fd); + + dest_len = sizeof(raddr_un); + dest_addr = g_malloc(dest_len); + memcpy(dest_addr, &raddr_un, dest_len); + break; + case SOCKET_ADDRESS_TYPE_FD: + fd = monitor_fd_param(monitor_cur(), local->u.fd.str, errp); + if (fd == -1) { + return -1; + } + ret = qemu_socket_try_set_nonblock(fd); + if (ret < 0) { + error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", + name, fd); + return -1; + } + dest_addr = NULL; + dest_len = 0; + break; + default: + error_setg(errp, "only support inet or fd type for local"); + return -1; + } + + s = net_dgram_fd_init(peer, "dgram", name, fd, errp); + if (!s) { + return -1; + } + + if (remote) { + g_assert(s->dest_addr == NULL); + s->dest_addr = dest_addr; + s->dest_len = dest_len; + } + + switch (local->type) { + case SOCKET_ADDRESS_TYPE_INET: + qemu_set_info_str(&s->nc, "udp=%s:%d/%s:%d", + inet_ntoa(laddr_in.sin_addr), + ntohs(laddr_in.sin_port), + inet_ntoa(raddr_in.sin_addr), + ntohs(raddr_in.sin_port)); + break; + case SOCKET_ADDRESS_TYPE_UNIX: + qemu_set_info_str(&s->nc, "udp=%s:%s", + laddr_un.sun_path, raddr_un.sun_path); + break; + case SOCKET_ADDRESS_TYPE_FD: { + SocketAddress *sa; + SocketAddressType sa_type; + + sa = socket_local_address(fd, errp); + if (sa) { + sa_type = sa->type; + qapi_free_SocketAddress(sa); + + qemu_set_info_str(&s->nc, "fd=%d %s", fd, + SocketAddressType_str(sa_type)); + } else { + qemu_set_info_str(&s->nc, "fd=%d", fd); + } + break; + } + default: + g_assert_not_reached(); + } + + return 0; +} diff --git a/net/dump.c b/net/dump.c index a07ba62401..6a63b15359 100644 --- a/net/dump.c +++ b/net/dump.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "clients.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -33,6 +32,7 @@ #include "qapi/visitor.h" #include "net/filter.h" #include "qom/object.h" +#include "sysemu/rtc.h" typedef struct DumpState { int64_t start_ts; diff --git a/net/eth.c b/net/eth.c index fe876d1a55..f074b2f9f3 100644 --- a/net/eth.c +++ b/net/eth.c @@ -389,7 +389,6 @@ eth_is_ip6_extension_header_type(uint8_t hdr_type) case IP6_HOP_BY_HOP: case IP6_ROUTING: case IP6_FRAGMENT: - case IP6_ESP: case IP6_AUTHENTICATION: case IP6_DESTINATON: case IP6_MOBILITY: diff --git a/net/filter-mirror.c b/net/filter-mirror.c index f20240cc9f..34a63b5dbb 100644 --- a/net/filter-mirror.c +++ b/net/filter-mirror.c @@ -20,6 +20,7 @@ #include "chardev/char-fe.h" #include "qemu/iov.h" #include "qemu/sockets.h" +#include "block/aio-wait.h" #define TYPE_FILTER_MIRROR "filter-mirror" typedef struct MirrorState MirrorState; @@ -42,20 +43,21 @@ struct MirrorState { bool vnet_hdr; }; -static int filter_send(MirrorState *s, - const struct iovec *iov, - int iovcnt) +typedef struct FilterSendCo { + MirrorState *s; + char *buf; + ssize_t size; + bool done; + int ret; +} FilterSendCo; + +static int _filter_send(MirrorState *s, + char *buf, + ssize_t size) { NetFilterState *nf = NETFILTER(s); int ret = 0; - ssize_t size = 0; uint32_t len = 0; - char *buf; - - size = iov_size(iov, iovcnt); - if (!size) { - return 0; - } len = htonl(size); ret = qemu_chr_fe_write_all(&s->chr_out, (uint8_t *)&len, sizeof(len)); @@ -80,10 +82,7 @@ static int filter_send(MirrorState *s, } } - buf = g_malloc(size); - iov_to_buf(iov, iovcnt, 0, buf, size); ret = qemu_chr_fe_write_all(&s->chr_out, (uint8_t *)buf, size); - g_free(buf); if (ret != size) { goto err; } @@ -94,6 +93,47 @@ err: return ret < 0 ? ret : -EIO; } +static void coroutine_fn filter_send_co(void *opaque) +{ + FilterSendCo *data = opaque; + + data->ret = _filter_send(data->s, data->buf, data->size); + data->done = true; + g_free(data->buf); + aio_wait_kick(); +} + +static int filter_send(MirrorState *s, + const struct iovec *iov, + int iovcnt) +{ + ssize_t size = iov_size(iov, iovcnt); + char *buf = NULL; + + if (!size) { + return 0; + } + + buf = g_malloc(size); + iov_to_buf(iov, iovcnt, 0, buf, size); + + FilterSendCo data = { + .s = s, + .size = size, + .buf = buf, + .ret = 0, + }; + + Coroutine *co = qemu_coroutine_create(filter_send_co, &data); + qemu_coroutine_enter(co); + + while (!data.done) { + aio_poll(qemu_get_aio_context(), true); + } + + return data.ret; +} + static void redirector_to_filter(NetFilterState *nf, const uint8_t *buf, int len) diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c index cb3a96cde1..c18c4c2019 100644 --- a/net/filter-rewriter.c +++ b/net/filter-rewriter.c @@ -279,15 +279,7 @@ static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, */ if (pkt && is_tcp_packet(pkt)) { - fill_connection_key(pkt, &key); - - if (sender == nf->netdev) { - /* - * We need make tcp TX and RX packet - * into one connection. - */ - reverse_connection_key(&key); - } + fill_connection_key(pkt, &key, sender == nf->netdev); /* After failover we needn't change new TCP packet */ if (s->failover_mode && @@ -391,7 +383,7 @@ static void colo_rewriter_setup(NetFilterState *nf, Error **errp) s->connection_track_table = g_hash_table_new_full(connection_key_hash, connection_key_equal, g_free, - connection_destroy); + NULL); s->incoming_queue = qemu_new_net_queue(qemu_netfilter_pass_to_next, nf); } diff --git a/net/hub.c b/net/hub.c index 1375738bf1..67ca534856 100644 --- a/net/hub.c +++ b/net/hub.c @@ -313,6 +313,8 @@ void net_hub_check_clients(void) case NET_CLIENT_DRIVER_USER: case NET_CLIENT_DRIVER_TAP: case NET_CLIENT_DRIVER_SOCKET: + case NET_CLIENT_DRIVER_STREAM: + case NET_CLIENT_DRIVER_DGRAM: case NET_CLIENT_DRIVER_VDE: case NET_CLIENT_DRIVER_VHOST_USER: has_host_dev = 1; diff --git a/net/l2tpv3.c b/net/l2tpv3.c index e4d4218db6..350041a0d6 100644 --- a/net/l2tpv3.c +++ b/net/l2tpv3.c @@ -34,7 +34,7 @@ #include "qemu/sockets.h" #include "qemu/iov.h" #include "qemu/main-loop.h" - +#include "qemu/memalign.h" /* The buffer size needs to be investigated for optimum numbers and * optimum means of paging in on different systems. This size is @@ -716,15 +716,14 @@ int net_init_l2tpv3(const Netdev *netdev, s->vec = g_new(struct iovec, MAX_L2TPV3_IOVCNT); s->header_buf = g_malloc(s->header_size); - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); s->fd = fd; s->counter = 0; l2tpv3_read_poll(s, true); - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "l2tpv3: connected"); + qemu_set_info_str(&s->nc, "l2tpv3: connected"); return 0; outerr: qemu_del_net_client(nc); diff --git a/net/meson.build b/net/meson.build index 21ef96dbfa..a75a56092e 100644 --- a/net/meson.build +++ b/net/meson.build @@ -13,19 +13,25 @@ softmmu_ss.add(files( 'net.c', 'queue.c', 'socket.c', + 'stream.c', + 'dgram.c', 'util.c', )) softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('filter-replay.c')) -softmmu_ss.add(when: 'CONFIG_L2TPV3', if_true: files('l2tpv3.c')) +if have_l2tpv3 + softmmu_ss.add(files('l2tpv3.c')) +endif softmmu_ss.add(when: slirp, if_true: files('slirp.c')) -softmmu_ss.add(when: ['CONFIG_VDE', vde], if_true: files('vde.c')) -softmmu_ss.add(when: 'CONFIG_NETMAP', if_true: files('netmap.c')) -vhost_user_ss = ss.source_set() -vhost_user_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) -softmmu_ss.add_all(when: 'CONFIG_VHOST_NET_USER', if_true: vhost_user_ss) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) +softmmu_ss.add(when: vde, if_true: files('vde.c')) +if have_netmap + softmmu_ss.add(files('netmap.c')) +endif +if have_vhost_net_user + softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) + softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) +endif softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('tap-linux.c')) softmmu_ss.add(when: 'CONFIG_BSD', if_true: files('tap-bsd.c')) @@ -36,8 +42,18 @@ if not config_host.has_key('CONFIG_LINUX') and not config_host.has_key('CONFIG_B endif softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files(tap_posix)) softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('tap-win32.c')) -softmmu_ss.add(when: 'CONFIG_VHOST_NET_VDPA', if_true: files('vhost-vdpa.c')) softmmu_ss.add([libpcap, files('pcap.c')]) softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('capture_win_ifnames.c')) +if have_vhost_net_vdpa + softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-vdpa.c'), if_false: files('vhost-vdpa-stub.c')) + softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-vdpa-stub.c')) +endif +vmnet_files = files( + 'vmnet-common.m', + 'vmnet-bridged.m', + 'vmnet-host.c', + 'vmnet-shared.c' +) +softmmu_ss.add(when: vmnet, if_true: vmnet_files) subdir('can') diff --git a/net/net.c b/net/net.c index 134060cfb3..108685f4f8 100644 --- a/net/net.c +++ b/net/net.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "net/net.h" #include "clients.h" @@ -49,12 +48,14 @@ #include "qemu/qemu-print.h" #include "qemu/main-loop.h" #include "qemu/option.h" +#include "qemu/keyval.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" #include "sysemu/runstate.h" #include "net/colo-compare.h" #include "net/filter.h" #include "qapi/string-output-visitor.h" +#include "qapi/qobject-input-visitor.h" /* Net bridge is currently not supported for W32. */ #if !defined(_WIN32) @@ -64,16 +65,60 @@ static VMChangeStateEntry *net_change_state_entry; static QTAILQ_HEAD(, NetClientState) net_clients; +typedef struct NetdevQueueEntry { + Netdev *nd; + Location loc; + QSIMPLEQ_ENTRY(NetdevQueueEntry) entry; +} NetdevQueueEntry; + +typedef QSIMPLEQ_HEAD(, NetdevQueueEntry) NetdevQueue; + +static NetdevQueue nd_queue = QSIMPLEQ_HEAD_INITIALIZER(nd_queue); + /***********************************************************/ /* network device redirectors */ +int convert_host_port(struct sockaddr_in *saddr, const char *host, + const char *port, Error **errp) +{ + struct hostent *he; + const char *r; + long p; + + memset(saddr, 0, sizeof(*saddr)); + + saddr->sin_family = AF_INET; + if (host[0] == '\0') { + saddr->sin_addr.s_addr = 0; + } else { + if (qemu_isdigit(host[0])) { + if (!inet_aton(host, &saddr->sin_addr)) { + error_setg(errp, "host address '%s' is not a valid " + "IPv4 address", host); + return -1; + } + } else { + he = gethostbyname(host); + if (he == NULL) { + error_setg(errp, "can't resolve host address '%s'", host); + return -1; + } + saddr->sin_addr = *(struct in_addr *)he->h_addr; + } + } + if (qemu_strtol(port, &r, 0, &p) != 0) { + error_setg(errp, "port number '%s' is invalid", port); + return -1; + } + saddr->sin_port = htons(p); + return 0; +} + int parse_host_port(struct sockaddr_in *saddr, const char *str, Error **errp) { gchar **substrings; - struct hostent *he; - const char *addr, *p, *r; - int port, ret = 0; + int ret; substrings = g_strsplit(str, ":", 2); if (!substrings || !substrings[0] || !substrings[1]) { @@ -83,37 +128,7 @@ int parse_host_port(struct sockaddr_in *saddr, const char *str, goto out; } - addr = substrings[0]; - p = substrings[1]; - - saddr->sin_family = AF_INET; - if (addr[0] == '\0') { - saddr->sin_addr.s_addr = 0; - } else { - if (qemu_isdigit(addr[0])) { - if (!inet_aton(addr, &saddr->sin_addr)) { - error_setg(errp, "host address '%s' is not a valid " - "IPv4 address", addr); - ret = -1; - goto out; - } - } else { - he = gethostbyname(addr); - if (he == NULL) { - error_setg(errp, "can't resolve host address '%s'", addr); - ret = -1; - goto out; - } - saddr->sin_addr = *(struct in_addr *)he->h_addr; - } - } - port = strtol(p, (char **)&r, 0); - if (r == p) { - error_setg(errp, "port number '%s' is invalid", p); - ret = -1; - goto out; - } - saddr->sin_port = htons(port); + ret = convert_host_port(saddr, substrings[0], substrings[1], errp); out: g_strfreev(substrings); @@ -127,13 +142,20 @@ char *qemu_mac_strdup_printf(const uint8_t *macaddr) macaddr[3], macaddr[4], macaddr[5]); } +void qemu_set_info_str(NetClientState *nc, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vsnprintf(nc->info_str, sizeof(nc->info_str), fmt, ap); + va_end(ap); +} + void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]) { - snprintf(nc->info_str, sizeof(nc->info_str), - "model=%s,macaddr=%02x:%02x:%02x:%02x:%02x:%02x", - nc->model, - macaddr[0], macaddr[1], macaddr[2], - macaddr[3], macaddr[4], macaddr[5]); + qemu_set_info_str(nc, "model=%s,macaddr=%02x:%02x:%02x:%02x:%02x:%02x", + nc->model, macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); } static int mac_table[256] = {0}; @@ -237,7 +259,8 @@ static void qemu_net_client_setup(NetClientState *nc, NetClientState *peer, const char *model, const char *name, - NetClientDestructor *destructor) + NetClientDestructor *destructor, + bool is_datapath) { nc->info = info; nc->model = g_strdup(model); @@ -256,6 +279,7 @@ static void qemu_net_client_setup(NetClientState *nc, nc->incoming_queue = qemu_new_net_queue(qemu_deliver_packet_iov, nc); nc->destructor = destructor; + nc->is_datapath = is_datapath; QTAILQ_INIT(&nc->filters); } @@ -270,7 +294,23 @@ NetClientState *qemu_new_net_client(NetClientInfo *info, nc = g_malloc0(info->size); qemu_net_client_setup(nc, info, peer, model, name, - qemu_net_client_destructor); + qemu_net_client_destructor, true); + + return nc; +} + +NetClientState *qemu_new_net_control_client(NetClientInfo *info, + NetClientState *peer, + const char *model, + const char *name) +{ + NetClientState *nc; + + assert(info->size >= sizeof(NetClientState)); + + nc = g_malloc0(info->size); + qemu_net_client_setup(nc, info, peer, model, name, + qemu_net_client_destructor, false); return nc; } @@ -295,7 +335,7 @@ NICState *qemu_new_nic(NetClientInfo *info, for (i = 0; i < queues; i++) { qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name, - NULL); + NULL, true); nic->ncs[i].queue_index = i; } @@ -504,7 +544,7 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len) int qemu_set_vnet_le(NetClientState *nc, bool is_le) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN if (!nc || !nc->info->set_vnet_le) { return -ENOSYS; } @@ -517,7 +557,7 @@ int qemu_set_vnet_le(NetClientState *nc, bool is_le) int qemu_set_vnet_be(NetClientState *nc, bool is_be) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return 0; #else if (!nc || !nc->info->set_vnet_be) { @@ -982,6 +1022,8 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])( #endif [NET_CLIENT_DRIVER_TAP] = net_init_tap, [NET_CLIENT_DRIVER_SOCKET] = net_init_socket, + [NET_CLIENT_DRIVER_STREAM] = net_init_stream, + [NET_CLIENT_DRIVER_DGRAM] = net_init_dgram, #ifdef CONFIG_VDE [NET_CLIENT_DRIVER_VDE] = net_init_vde, #endif @@ -1002,6 +1044,11 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])( [NET_CLIENT_DRIVER_L2TPV3] = net_init_l2tpv3, #endif [NET_CLIENT_DRIVER_PCAP] = net_init_pcap, +#ifdef CONFIG_VMNET + [NET_CLIENT_DRIVER_VMNET_HOST] = net_init_vmnet_host, + [NET_CLIENT_DRIVER_VMNET_SHARED] = net_init_vmnet_shared, + [NET_CLIENT_DRIVER_VMNET_BRIDGED] = net_init_vmnet_bridged, +#endif /* CONFIG_VMNET */ }; @@ -1013,19 +1060,23 @@ static int net_client_init1(const Netdev *netdev, bool is_netdev, Error **errp) if (is_netdev) { if (netdev->type == NET_CLIENT_DRIVER_NIC || !net_client_init_fun[netdev->type]) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "type", - "a netdev backend type"); + error_setg(errp, "network backend '%s' is not compiled into this binary", + NetClientDriver_str(netdev->type)); return -1; } } else { if (netdev->type == NET_CLIENT_DRIVER_NONE) { return 0; /* nothing to do */ } - if (netdev->type == NET_CLIENT_DRIVER_HUBPORT || - !net_client_init_fun[netdev->type]) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "type", - "a net backend type (maybe it is not compiled " - "into this binary)"); + if (netdev->type == NET_CLIENT_DRIVER_HUBPORT) { + error_setg(errp, "network backend '%s' is only supported with -netdev/-nic", + NetClientDriver_str(netdev->type)); + return -1; + } + + if (!net_client_init_fun[netdev->type]) { + error_setg(errp, "network backend '%s' is not compiled into this binary", + NetClientDriver_str(netdev->type)); return -1; } @@ -1065,6 +1116,8 @@ void show_netdevs(void) int idx; const char *available_netdevs[] = { "socket", + "stream", + "dgram", "hubport", "tap", #ifdef CONFIG_SLIRP @@ -1087,6 +1140,11 @@ void show_netdevs(void) #endif #ifdef CONFIG_VHOST_VDPA "vhost-vdpa", +#endif +#ifdef CONFIG_VMNET + "vmnet-host", + "vmnet-shared", + "vmnet-bridged", #endif }; @@ -1532,36 +1590,97 @@ out: return ret; } -int net_init_clients(Error **errp) +static void netdev_init_modern(void) +{ + while (!QSIMPLEQ_EMPTY(&nd_queue)) { + NetdevQueueEntry *nd = QSIMPLEQ_FIRST(&nd_queue); + + QSIMPLEQ_REMOVE_HEAD(&nd_queue, entry); + loc_push_restore(&nd->loc); + net_client_init1(nd->nd, true, &error_fatal); + loc_pop(&nd->loc); + qapi_free_Netdev(nd->nd); + g_free(nd); + } +} + +void net_init_clients(void) { net_change_state_entry = qemu_add_vm_change_state_handler(net_vm_change_state_handler, NULL); QTAILQ_INIT(&net_clients); - if (qemu_opts_foreach(qemu_find_opts("netdev"), - net_init_netdev, NULL, errp)) { - return -1; - } + netdev_init_modern(); - if (qemu_opts_foreach(qemu_find_opts("nic"), net_param_nic, NULL, errp)) { - return -1; - } + qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, + &error_fatal); - if (qemu_opts_foreach(qemu_find_opts("net"), net_init_client, NULL, errp)) { - return -1; - } + qemu_opts_foreach(qemu_find_opts("nic"), net_param_nic, NULL, + &error_fatal); - return 0; + qemu_opts_foreach(qemu_find_opts("net"), net_init_client, NULL, + &error_fatal); } -int net_client_parse(QemuOptsList *opts_list, const char *optarg) +/* + * Does this -netdev argument use modern rather than traditional syntax? + * Modern syntax is to be parsed with netdev_parse_modern(). + * Traditional syntax is to be parsed with net_client_parse(). + */ +bool netdev_is_modern(const char *optarg) { - if (!qemu_opts_parse_noisily(opts_list, optarg, true)) { - return -1; + QemuOpts *opts; + bool is_modern; + const char *type; + static QemuOptsList dummy_opts = { + .name = "netdev", + .implied_opt_name = "type", + .head = QTAILQ_HEAD_INITIALIZER(dummy_opts.head), + .desc = { { } }, + }; + + if (optarg[0] == '{') { + /* This is JSON, which means it's modern syntax */ + return true; } - return 0; + opts = qemu_opts_create(&dummy_opts, NULL, false, &error_abort); + qemu_opts_do_parse(opts, optarg, dummy_opts.implied_opt_name, + &error_abort); + type = qemu_opt_get(opts, "type"); + is_modern = !g_strcmp0(type, "stream") || !g_strcmp0(type, "dgram"); + + qemu_opts_reset(&dummy_opts); + + return is_modern; +} + +/* + * netdev_parse_modern() uses modern, more expressive syntax than + * net_client_parse(), but supports only the -netdev option. + * netdev_parse_modern() appends to @nd_queue, whereas net_client_parse() + * appends to @qemu_netdev_opts. + */ +void netdev_parse_modern(const char *optarg) +{ + Visitor *v; + NetdevQueueEntry *nd; + + v = qobject_input_visitor_new_str(optarg, "type", &error_fatal); + nd = g_new(NetdevQueueEntry, 1); + visit_type_Netdev(v, NULL, &nd->nd, &error_fatal); + visit_free(v); + loc_save(&nd->loc); + + QSIMPLEQ_INSERT_TAIL(&nd_queue, nd, entry); +} + +void net_client_parse(QemuOptsList *opts_list, const char *optarg) +{ + if (!qemu_opts_parse_noisily(opts_list, optarg, true)) { + exit(1); + } } /* From FreeBSD */ diff --git a/net/slirp.c b/net/slirp.c index d5f6d621c1..a32a5f3661 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -27,7 +27,7 @@ #include "net/slirp.h" -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) #include #include #endif @@ -91,7 +91,7 @@ typedef struct SlirpState { Slirp *slirp; Notifier poll_notifier; Notifier exit_notifier; -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) gchar *smb_dir; #endif GSList *fwd; @@ -104,7 +104,7 @@ static QTAILQ_HEAD(, SlirpState) slirp_stacks = static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp); static int slirp_guestfwd(SlirpState *s, const char *config_str, Error **errp); -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) static int slirp_smb(SlirpState *s, const char *exported_dir, struct in_addr vserver_addr, Error **errp); static void slirp_smb_cleanup(SlirpState *s); @@ -184,23 +184,66 @@ static int64_t net_slirp_clock_get_ns(void *opaque) return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } +typedef struct SlirpTimer SlirpTimer; +struct SlirpTimer { + QEMUTimer timer; +#if SLIRP_CHECK_VERSION(4,7,0) + Slirp *slirp; + SlirpTimerId id; + void *cb_opaque; +#endif +}; + +#if SLIRP_CHECK_VERSION(4,7,0) +static void net_slirp_init_completed(Slirp *slirp, void *opaque) +{ + SlirpState *s = opaque; + s->slirp = slirp; +} + +static void net_slirp_timer_cb(void *opaque) +{ + SlirpTimer *t = opaque; + slirp_handle_timer(t->slirp, t->id, t->cb_opaque); +} + +static void *net_slirp_timer_new_opaque(SlirpTimerId id, + void *cb_opaque, void *opaque) +{ + SlirpState *s = opaque; + SlirpTimer *t = g_new(SlirpTimer, 1); + t->slirp = s->slirp; + t->id = id; + t->cb_opaque = cb_opaque; + timer_init_full(&t->timer, NULL, QEMU_CLOCK_VIRTUAL, + SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, + net_slirp_timer_cb, t); + return t; +} +#else static void *net_slirp_timer_new(SlirpTimerCb cb, void *cb_opaque, void *opaque) { - return timer_new_full(NULL, QEMU_CLOCK_VIRTUAL, - SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, - cb, cb_opaque); + SlirpTimer *t = g_new(SlirpTimer, 1); + timer_init_full(&t->timer, NULL, QEMU_CLOCK_VIRTUAL, + SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, + cb, cb_opaque); + return t; } +#endif static void net_slirp_timer_free(void *timer, void *opaque) { - timer_free(timer); + SlirpTimer *t = timer; + timer_del(&t->timer); + g_free(t); } static void net_slirp_timer_mod(void *timer, int64_t expire_timer, void *opaque) { - timer_mod(timer, expire_timer); + SlirpTimer *t = timer; + timer_mod(&t->timer, expire_timer); } static void net_slirp_register_poll_fd(int fd, void *opaque) @@ -222,7 +265,12 @@ static const SlirpCb slirp_cb = { .send_packet = net_slirp_send_packet, .guest_error = net_slirp_guest_error, .clock_get_ns = net_slirp_clock_get_ns, +#if SLIRP_CHECK_VERSION(4,7,0) + .init_completed = net_slirp_init_completed, + .timer_new_opaque = net_slirp_timer_new_opaque, +#else .timer_new = net_slirp_timer_new, +#endif .timer_free = net_slirp_timer_free, .timer_mod = net_slirp_timer_mod, .register_poll_fd = net_slirp_register_poll_fd, @@ -377,9 +425,10 @@ static int net_slirp_init(NetClientState *peer, const char *model, struct in6_addr ip6_prefix; struct in6_addr ip6_host; struct in6_addr ip6_dns; -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) struct in_addr smbsrv = { .s_addr = 0 }; #endif + SlirpConfig cfg = { 0 }; NetClientState *nc; SlirpState *s; char buf[20]; @@ -487,7 +536,7 @@ static int net_slirp_init(NetClientState *peer, const char *model, return -1; } -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) if (vsmbserver && !inet_aton(vsmbserver, &smbsrv)) { error_setg(errp, "Failed to parse SMB address"); return -1; @@ -562,18 +611,31 @@ static int net_slirp_init(NetClientState *peer, const char *model, nc = qemu_new_net_client(&net_slirp_info, peer, model, name); - snprintf(nc->info_str, sizeof(nc->info_str), - "net=%s,restrict=%s", inet_ntoa(net), - restricted ? "on" : "off"); + qemu_set_info_str(nc, "net=%s,restrict=%s", inet_ntoa(net), + restricted ? "on" : "off"); s = DO_UPCAST(SlirpState, nc, nc); - s->slirp = slirp_init(restricted, ipv4, net, mask, host, - ipv6, ip6_prefix, vprefix6_len, ip6_host, - vhostname, tftp_server_name, - tftp_export, bootfile, dhcp, - dns, ip6_dns, dnssearch, vdomainname, - &slirp_cb, s); + cfg.version = SLIRP_CHECK_VERSION(4,7,0) ? 4 : 1; + cfg.restricted = restricted; + cfg.in_enabled = ipv4; + cfg.vnetwork = net; + cfg.vnetmask = mask; + cfg.vhost = host; + cfg.in6_enabled = ipv6; + cfg.vprefix_addr6 = ip6_prefix; + cfg.vprefix_len = vprefix6_len; + cfg.vhost6 = ip6_host; + cfg.vhostname = vhostname; + cfg.tftp_server_name = tftp_server_name; + cfg.tftp_path = tftp_export; + cfg.bootfile = bootfile; + cfg.vdhcp_start = dhcp; + cfg.vnameserver = dns; + cfg.vnameserver6 = ip6_dns; + cfg.vdnssearch = dnssearch; + cfg.vdomainname = vdomainname; + s->slirp = slirp_new(&cfg, &slirp_cb, s); QTAILQ_INSERT_TAIL(&slirp_stacks, s, entry); /* @@ -602,7 +664,7 @@ static int net_slirp_init(NetClientState *peer, const char *model, } } } -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) if (smb_export) { if (slirp_smb(s, smb_export, smbsrv, errp) < 0) { goto error; @@ -803,7 +865,7 @@ void hmp_hostfwd_add(Monitor *mon, const QDict *qdict) } -#if defined(CONFIG_SLIRP_SMBD) +#if defined(CONFIG_SMBD_COMMAND) /* automatic user mode samba server configuration */ static void slirp_smb_cleanup(SlirpState *s) @@ -918,7 +980,7 @@ static int slirp_smb(SlirpState* s, const char *exported_dir, return 0; } -#endif /* defined(CONFIG_SLIRP_SMBD) */ +#endif /* defined(CONFIG_SMBD_COMMAND) */ static int guestfwd_can_read(void *opaque) { diff --git a/net/socket.c b/net/socket.c index 15b410e8d8..e62137c839 100644 --- a/net/socket.c +++ b/net/socket.c @@ -27,7 +27,6 @@ #include "clients.h" #include "monitor/monitor.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/sockets.h" @@ -120,9 +119,9 @@ static ssize_t net_socket_receive_dgram(NetClientState *nc, const uint8_t *buf, do { if (s->dgram_dst.sin_family != AF_UNIX) { - ret = qemu_sendto(s->fd, buf, size, 0, - (struct sockaddr *)&s->dgram_dst, - sizeof(s->dgram_dst)); + ret = sendto(s->fd, buf, size, 0, + (struct sockaddr *)&s->dgram_dst, + sizeof(s->dgram_dst)); } else { ret = send(s->fd, buf, size, 0); } @@ -163,7 +162,7 @@ static void net_socket_send(void *opaque) uint8_t buf1[NET_BUFSIZE]; const uint8_t *buf; - size = qemu_recv(s->fd, buf1, sizeof(buf1), 0); + size = recv(s->fd, buf1, sizeof(buf1), 0); if (size < 0) { if (errno != EWOULDBLOCK) goto eoc; @@ -180,7 +179,7 @@ static void net_socket_send(void *opaque) s->fd = -1; net_socket_rs_init(&s->rs, net_socket_rs_finalize, false); s->nc.link_down = true; - memset(s->nc.info_str, 0, sizeof(s->nc.info_str)); + qemu_set_info_str(&s->nc, "%s", ""); return; } @@ -198,7 +197,7 @@ static void net_socket_send_dgram(void *opaque) NetSocketState *s = opaque; int size; - size = qemu_recv(s->fd, s->rs.buf, sizeof(s->rs.buf), 0); + size = recv(s->fd, s->rs.buf, sizeof(s->rs.buf), 0); if (size < 0) return; if (size == 0) { @@ -246,7 +245,7 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, * only on posix systems. */ val = 1; - ret = qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)); + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)); if (ret < 0) { error_setg_errno(errp, errno, "can't set socket option SO_REUSEADDR"); @@ -268,8 +267,8 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, imr.imr_interface.s_addr = htonl(INADDR_ANY); } - ret = qemu_setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, - &imr, sizeof(struct ip_mreq)); + ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, + &imr, sizeof(struct ip_mreq)); if (ret < 0) { error_setg_errno(errp, errno, "can't add socket to multicast group %s", @@ -279,8 +278,8 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, /* Force mcast msgs to loopback (eg. several QEMUs in same host */ loop = 1; - ret = qemu_setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, - &loop, sizeof(loop)); + ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, + &loop, sizeof(loop)); if (ret < 0) { error_setg_errno(errp, errno, "can't force multicast message to loopback"); @@ -289,8 +288,8 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, /* If a bind address is given, only send packets from that address */ if (localaddr != NULL) { - ret = qemu_setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, - localaddr, sizeof(*localaddr)); + ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, + localaddr, sizeof(*localaddr)); if (ret < 0) { error_setg_errno(errp, errno, "can't set the default network send interface"); @@ -298,7 +297,7 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, } } - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); return fd; fail: if (fd >= 0) @@ -388,16 +387,15 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer, /* mcast: save bound address as dst */ if (is_connected && mcast != NULL) { s->dgram_dst = saddr; - snprintf(nc->info_str, sizeof(nc->info_str), - "socket: fd=%d (cloned mcast=%s:%d)", - fd, inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); + qemu_set_info_str(nc, "socket: fd=%d (cloned mcast=%s:%d)", fd, + inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); } else { if (sa_type == SOCKET_ADDRESS_TYPE_UNIX) { s->dgram_dst.sin_family = AF_UNIX; } - snprintf(nc->info_str, sizeof(nc->info_str), - "socket: fd=%d %s", fd, SocketAddressType_str(sa_type)); + qemu_set_info_str(nc, "socket: fd=%d %s", fd, + SocketAddressType_str(sa_type)); } return s; @@ -431,7 +429,7 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer, nc = qemu_new_net_client(&net_socket_info, peer, model, name); - snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d", fd); + qemu_set_info_str(nc, "socket: fd=%d", fd); s = DO_UPCAST(NetSocketState, nc, nc); @@ -498,9 +496,8 @@ static void net_socket_accept(void *opaque) s->fd = fd; s->nc.link_down = false; net_socket_connect(s); - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "socket: connection from %s:%d", - inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); + qemu_set_info_str(&s->nc, "socket: connection from %s:%d", + inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); } static int net_socket_listen_init(NetClientState *peer, @@ -523,7 +520,7 @@ static int net_socket_listen_init(NetClientState *peer, error_setg_errno(errp, errno, "can't create stream socket"); return -1; } - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); socket_set_fast_reuse(fd); @@ -571,7 +568,7 @@ static int net_socket_connect_init(NetClientState *peer, error_setg_errno(errp, errno, "can't create stream socket"); return -1; } - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); connected = 0; for(;;) { @@ -580,8 +577,7 @@ static int net_socket_connect_init(NetClientState *peer, if (errno == EINTR || errno == EWOULDBLOCK) { /* continue */ } else if (errno == EINPROGRESS || - errno == EALREADY || - errno == EINVAL) { + errno == EALREADY) { break; } else { error_setg_errno(errp, errno, "can't connect socket"); @@ -598,9 +594,8 @@ static int net_socket_connect_init(NetClientState *peer, return -1; } - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "socket: connect to %s:%d", - inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); + qemu_set_info_str(&s->nc, "socket: connect to %s:%d", + inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; } @@ -643,9 +638,8 @@ static int net_socket_mcast_init(NetClientState *peer, s->dgram_dst = saddr; - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "socket: mcast=%s:%d", - inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); + qemu_set_info_str(&s->nc, "socket: mcast=%s:%d", + inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; } @@ -689,7 +683,7 @@ static int net_socket_udp_init(NetClientState *peer, closesocket(fd); return -1; } - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); s = net_socket_fd_init(peer, model, name, fd, 0, NULL, errp); if (!s) { @@ -698,9 +692,8 @@ static int net_socket_udp_init(NetClientState *peer, s->dgram_dst = raddr; - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "socket: udp=%s:%d", - inet_ntoa(raddr.sin_addr), ntohs(raddr.sin_port)); + qemu_set_info_str(&s->nc, "socket: udp=%s:%d", inet_ntoa(raddr.sin_addr), + ntohs(raddr.sin_port)); return 0; } @@ -731,7 +724,7 @@ int net_init_socket(const Netdev *netdev, const char *name, if (fd == -1) { return -1; } - ret = qemu_try_set_nonblock(fd); + ret = qemu_socket_try_set_nonblock(fd); if (ret < 0) { error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", name, fd); diff --git a/net/stream.c b/net/stream.c new file mode 100644 index 0000000000..37ff727e0c --- /dev/null +++ b/net/stream.c @@ -0,0 +1,386 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2022 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "net/net.h" +#include "clients.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/sockets.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" +#include "io/channel.h" +#include "io/channel-socket.h" +#include "io/net-listener.h" +#include "qapi/qapi-events-net.h" + +typedef struct NetStreamState { + NetClientState nc; + QIOChannel *listen_ioc; + QIONetListener *listener; + QIOChannel *ioc; + guint ioc_read_tag; + guint ioc_write_tag; + SocketReadState rs; + unsigned int send_index; /* number of bytes sent*/ +} NetStreamState; + +static void net_stream_listen(QIONetListener *listener, + QIOChannelSocket *cioc, + void *opaque); + +static gboolean net_stream_writable(QIOChannel *ioc, + GIOCondition condition, + gpointer data) +{ + NetStreamState *s = data; + + s->ioc_write_tag = 0; + + qemu_flush_queued_packets(&s->nc); + + return G_SOURCE_REMOVE; +} + +static ssize_t net_stream_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); + uint32_t len = htonl(size); + struct iovec iov[] = { + { + .iov_base = &len, + .iov_len = sizeof(len), + }, { + .iov_base = (void *)buf, + .iov_len = size, + }, + }; + struct iovec local_iov[2]; + unsigned int nlocal_iov; + size_t remaining; + ssize_t ret; + + remaining = iov_size(iov, 2) - s->send_index; + nlocal_iov = iov_copy(local_iov, 2, iov, 2, s->send_index, remaining); + ret = qio_channel_writev(s->ioc, local_iov, nlocal_iov, NULL); + if (ret == QIO_CHANNEL_ERR_BLOCK) { + ret = 0; /* handled further down */ + } + if (ret == -1) { + s->send_index = 0; + return -errno; + } + if (ret < (ssize_t)remaining) { + s->send_index += ret; + s->ioc_write_tag = qio_channel_add_watch(s->ioc, G_IO_OUT, + net_stream_writable, s, NULL); + return 0; + } + s->send_index = 0; + return size; +} + +static gboolean net_stream_send(QIOChannel *ioc, + GIOCondition condition, + gpointer data); + +static void net_stream_send_completed(NetClientState *nc, ssize_t len) +{ + NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); + + if (!s->ioc_read_tag) { + s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, + net_stream_send, s, NULL); + } +} + +static void net_stream_rs_finalize(SocketReadState *rs) +{ + NetStreamState *s = container_of(rs, NetStreamState, rs); + + if (qemu_send_packet_async(&s->nc, rs->buf, + rs->packet_len, + net_stream_send_completed) == 0) { + if (s->ioc_read_tag) { + g_source_remove(s->ioc_read_tag); + s->ioc_read_tag = 0; + } + } +} + +static gboolean net_stream_send(QIOChannel *ioc, + GIOCondition condition, + gpointer data) +{ + NetStreamState *s = data; + int size; + int ret; + char buf1[NET_BUFSIZE]; + const char *buf; + + size = qio_channel_read(s->ioc, buf1, sizeof(buf1), NULL); + if (size < 0) { + if (errno != EWOULDBLOCK) { + goto eoc; + } + } else if (size == 0) { + /* end of connection */ + eoc: + s->ioc_read_tag = 0; + if (s->ioc_write_tag) { + g_source_remove(s->ioc_write_tag); + s->ioc_write_tag = 0; + } + if (s->listener) { + qio_net_listener_set_client_func(s->listener, net_stream_listen, + s, NULL); + } + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; + + net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); + s->nc.link_down = true; + qemu_set_info_str(&s->nc, "%s", ""); + + qapi_event_send_netdev_stream_disconnected(s->nc.name); + + return G_SOURCE_REMOVE; + } + buf = buf1; + + ret = net_fill_rstate(&s->rs, (const uint8_t *)buf, size); + + if (ret == -1) { + goto eoc; + } + + return G_SOURCE_CONTINUE; +} + +static void net_stream_cleanup(NetClientState *nc) +{ + NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); + if (s->ioc) { + if (QIO_CHANNEL_SOCKET(s->ioc)->fd != -1) { + if (s->ioc_read_tag) { + g_source_remove(s->ioc_read_tag); + s->ioc_read_tag = 0; + } + if (s->ioc_write_tag) { + g_source_remove(s->ioc_write_tag); + s->ioc_write_tag = 0; + } + } + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; + } + if (s->listen_ioc) { + if (s->listener) { + qio_net_listener_disconnect(s->listener); + object_unref(OBJECT(s->listener)); + s->listener = NULL; + } + object_unref(OBJECT(s->listen_ioc)); + s->listen_ioc = NULL; + } +} + +static NetClientInfo net_stream_info = { + .type = NET_CLIENT_DRIVER_STREAM, + .size = sizeof(NetStreamState), + .receive = net_stream_receive, + .cleanup = net_stream_cleanup, +}; + +static void net_stream_listen(QIONetListener *listener, + QIOChannelSocket *cioc, + void *opaque) +{ + NetStreamState *s = opaque; + SocketAddress *addr; + char *uri; + + object_ref(OBJECT(cioc)); + + qio_net_listener_set_client_func(s->listener, NULL, s, NULL); + + s->ioc = QIO_CHANNEL(cioc); + qio_channel_set_name(s->ioc, "stream-server"); + s->nc.link_down = false; + + s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, net_stream_send, + s, NULL); + + if (cioc->localAddr.ss_family == AF_UNIX) { + addr = qio_channel_socket_get_local_address(cioc, NULL); + } else { + addr = qio_channel_socket_get_remote_address(cioc, NULL); + } + g_assert(addr != NULL); + uri = socket_uri(addr); + qemu_set_info_str(&s->nc, "%s", uri); + g_free(uri); + qapi_event_send_netdev_stream_connected(s->nc.name, addr); + qapi_free_SocketAddress(addr); +} + +static void net_stream_server_listening(QIOTask *task, gpointer opaque) +{ + NetStreamState *s = opaque; + QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(s->listen_ioc); + SocketAddress *addr; + int ret; + + if (listen_sioc->fd < 0) { + qemu_set_info_str(&s->nc, "connection error"); + return; + } + + addr = qio_channel_socket_get_local_address(listen_sioc, NULL); + g_assert(addr != NULL); + ret = qemu_socket_try_set_nonblock(listen_sioc->fd); + if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { + qemu_set_info_str(&s->nc, "can't use file descriptor %s (errno %d)", + addr->u.fd.str, -ret); + return; + } + g_assert(ret == 0); + qapi_free_SocketAddress(addr); + + s->nc.link_down = true; + s->listener = qio_net_listener_new(); + + net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); + qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); + qio_net_listener_add(s->listener, listen_sioc); +} + +static int net_stream_server_init(NetClientState *peer, + const char *model, + const char *name, + SocketAddress *addr, + Error **errp) +{ + NetClientState *nc; + NetStreamState *s; + QIOChannelSocket *listen_sioc = qio_channel_socket_new(); + + nc = qemu_new_net_client(&net_stream_info, peer, model, name); + s = DO_UPCAST(NetStreamState, nc, nc); + + s->listen_ioc = QIO_CHANNEL(listen_sioc); + qio_channel_socket_listen_async(listen_sioc, addr, 0, + net_stream_server_listening, s, + NULL, NULL); + + return 0; +} + +static void net_stream_client_connected(QIOTask *task, gpointer opaque) +{ + NetStreamState *s = opaque; + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(s->ioc); + SocketAddress *addr; + gchar *uri; + int ret; + + if (sioc->fd < 0) { + qemu_set_info_str(&s->nc, "connection error"); + goto error; + } + + addr = qio_channel_socket_get_remote_address(sioc, NULL); + g_assert(addr != NULL); + uri = socket_uri(addr); + qemu_set_info_str(&s->nc, "%s", uri); + g_free(uri); + + ret = qemu_socket_try_set_nonblock(sioc->fd); + if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { + qemu_set_info_str(&s->nc, "can't use file descriptor %s (errno %d)", + addr->u.fd.str, -ret); + qapi_free_SocketAddress(addr); + goto error; + } + g_assert(ret == 0); + + net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); + + /* Disable Nagle algorithm on TCP sockets to reduce latency */ + qio_channel_set_delay(s->ioc, false); + + s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, net_stream_send, + s, NULL); + s->nc.link_down = false; + qapi_event_send_netdev_stream_connected(s->nc.name, addr); + qapi_free_SocketAddress(addr); + + return; +error: + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; +} + +static int net_stream_client_init(NetClientState *peer, + const char *model, + const char *name, + SocketAddress *addr, + Error **errp) +{ + NetStreamState *s; + NetClientState *nc; + QIOChannelSocket *sioc = qio_channel_socket_new(); + + nc = qemu_new_net_client(&net_stream_info, peer, model, name); + s = DO_UPCAST(NetStreamState, nc, nc); + + s->ioc = QIO_CHANNEL(sioc); + s->nc.link_down = true; + + qio_channel_socket_connect_async(sioc, addr, + net_stream_client_connected, s, + NULL, NULL); + + return 0; +} + +int net_init_stream(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + const NetdevStreamOptions *sock; + + assert(netdev->type == NET_CLIENT_DRIVER_STREAM); + sock = &netdev->u.stream; + + if (!sock->has_server || !sock->server) { + return net_stream_client_init(peer, "stream", name, sock->addr, errp); + } + return net_stream_server_init(peer, "stream", name, sock->addr, errp); +} diff --git a/net/tap-bsd.c b/net/tap-bsd.c index e45a6d124e..005ce05c6e 100644 --- a/net/tap-bsd.c +++ b/net/tap-bsd.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "tap_int.h" #include "qemu/cutils.h" @@ -99,7 +98,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } } - fcntl(fd, F_SETFL, O_NONBLOCK); + g_unix_set_fd_nonblocking(fd, true, NULL); return fd; } @@ -190,7 +189,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, goto error; } - fcntl(fd, F_SETFL, O_NONBLOCK); + g_unix_set_fd_nonblocking(fd, true, NULL); return fd; error: diff --git a/net/tap-linux.c b/net/tap-linux.c index 9584769740..304ff45071 100644 --- a/net/tap-linux.c +++ b/net/tap-linux.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "tap_int.h" #include "tap-linux.h" #include "net/tap.h" @@ -114,7 +113,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } pstrcpy(ifname, ifname_size, ifr.ifr_name); - fcntl(fd, F_SETFL, O_NONBLOCK); + g_unix_set_fd_nonblocking(fd, true, NULL); return fd; } @@ -150,6 +149,7 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) int tap_probe_vnet_hdr(int fd, Error **errp) { struct ifreq ifr; + memset(&ifr, 0, sizeof(ifr)); if (ioctl(fd, TUNGETIFF, &ifr) != 0) { /* TUNGETIFF is available since kernel v2.6.27 */ diff --git a/net/tap-linux.h b/net/tap-linux.h index 1d06fe0de6..bbbb62c2a7 100644 --- a/net/tap-linux.h +++ b/net/tap-linux.h @@ -45,10 +45,10 @@ #define IFF_DETACH_QUEUE 0x0400 /* Features for GSO (TUNSETOFFLOAD). */ -#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ -#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ -#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ -#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ -#define TUN_F_UFO 0x10 /* I can handle UFO packets */ +#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ +#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ +#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ +#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +#define TUN_F_UFO 0x10 /* I can handle UFO packets */ #endif /* QEMU_TAP_LINUX_H */ diff --git a/net/tap-solaris.c b/net/tap-solaris.c index d85224242b..a44f8805c2 100644 --- a/net/tap-solaris.c +++ b/net/tap-solaris.c @@ -27,7 +27,6 @@ #include "tap_int.h" #include "qemu/ctype.h" #include "qemu/cutils.h" -#include "qemu-common.h" #include #include @@ -199,7 +198,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } } - fcntl(fd, F_SETFL, O_NONBLOCK); + g_unix_set_fd_nonblocking(fd, true, NULL); return fd; } diff --git a/net/tap-win32.c b/net/tap-win32.c index 6096972f5d..a49c28ba5d 100644 --- a/net/tap-win32.c +++ b/net/tap-win32.c @@ -29,7 +29,6 @@ #include "qemu/osdep.h" #include "tap_int.h" -#include "qemu-common.h" #include "clients.h" /* net_init_tap */ #include "net/eth.h" #include "net/net.h" @@ -790,8 +789,7 @@ static int tap_win32_init(NetClientState *peer, const char *model, s = DO_UPCAST(TAPState, nc, nc); - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "tap: ifname=%s", ifname); + qemu_set_info_str(&s->nc, "tap: ifname=%s", ifname); s->handle = handle; diff --git a/net/tap.c b/net/tap.c index f5686bbf77..1210a0436d 100644 --- a/net/tap.c +++ b/net/tap.c @@ -38,7 +38,6 @@ #include "monitor/monitor.h" #include "sysemu/sysemu.h" #include "qapi/error.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -620,7 +619,10 @@ int net_init_bridge(const Netdev *netdev, const char *name, return -1; } - qemu_set_nonblock(fd); + if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; + } vnet_hdr = tap_probe_vnet_hdr(fd, errp); if (vnet_hdr < 0) { close(fd); @@ -628,8 +630,7 @@ int net_init_bridge(const Netdev *netdev, const char *name, } s = net_tap_fd_init(peer, "bridge", name, fd, vnet_hdr); - snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s,br=%s", helper, - br); + qemu_set_info_str(&s->nc, "helper=%s,br=%s", helper, br); return 0; } @@ -684,18 +685,16 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, tap_set_sndbuf(s->fd, tap, &err); if (err) { error_propagate(errp, err); - return; + goto failed; } if (tap->has_fd || tap->has_fds) { - snprintf(s->nc.info_str, sizeof(s->nc.info_str), "fd=%d", fd); + qemu_set_info_str(&s->nc, "fd=%d", fd); } else if (tap->has_helper) { - snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s", - tap->helper); + qemu_set_info_str(&s->nc, "helper=%s", tap->helper); } else { - snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "ifname=%s,script=%s,downscript=%s", ifname, script, - downscript); + qemu_set_info_str(&s->nc, "ifname=%s,script=%s,downscript=%s", ifname, + script, downscript); if (strcmp(downscript, "no") != 0) { snprintf(s->down_script, sizeof(s->down_script), "%s", downscript); @@ -717,8 +716,6 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } if (vhostfdname) { - int ret; - vhostfd = monitor_fd_param(monitor_cur(), vhostfdname, &err); if (vhostfd == -1) { if (tap->has_vhostforce && tap->vhostforce) { @@ -726,13 +723,12 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } else { warn_report_err(err); } - return; + goto failed; } - ret = qemu_try_set_nonblock(vhostfd); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", + if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { + error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", name, fd); - return; + goto failed; } } else { vhostfd = open("/dev/vhost-net", O_RDWR); @@ -744,11 +740,15 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, warn_report("tap: open vhost char device failed: %s", strerror(errno)); } - return; + goto failed; + } + if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + goto failed; } - qemu_set_nonblock(vhostfd); } options.opaque = (void *)(uintptr_t)vhostfd; + options.nvqs = 2; s->vhost_net = vhost_net_init(&options); if (!s->vhost_net) { @@ -757,11 +757,17 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } else { warn_report(VHOST_NET_INIT_FAILED); } - return; + goto failed; } } else if (vhostfdname) { error_setg(errp, "vhostfd(s)= is not valid without vhost"); + goto failed; } + + return; + +failed: + qemu_del_net_client(&s->nc); } static int get_fds(char *str, char *fds[], int max) @@ -832,9 +838,8 @@ int net_init_tap(const Netdev *netdev, const char *name, return -1; } - ret = qemu_try_set_nonblock(fd); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", + if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { + error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", name, fd); close(fd); return -1; @@ -889,9 +894,9 @@ int net_init_tap(const Netdev *netdev, const char *name, goto free_fail; } - ret = qemu_try_set_nonblock(fd); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", + ret = g_unix_set_fd_nonblocking(fd, true, NULL); + if (!ret) { + error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", name, fd); goto free_fail; } @@ -899,6 +904,7 @@ int net_init_tap(const Netdev *netdev, const char *name, if (i == 0) { vnet_hdr = tap_probe_vnet_hdr(fd, errp); if (vnet_hdr < 0) { + ret = -1; goto free_fail; } } else if (vnet_hdr != tap_probe_vnet_hdr(fd, NULL)) { @@ -945,7 +951,10 @@ free_fail: return -1; } - qemu_set_nonblock(fd); + if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; + } vnet_hdr = tap_probe_vnet_hdr(fd, errp); if (vnet_hdr < 0) { close(fd); diff --git a/net/trace-events b/net/trace-events index d7a17256cc..823a071bdc 100644 --- a/net/trace-events +++ b/net/trace-events @@ -9,6 +9,7 @@ vhost_user_event(const char *chr, int event) "chr: %s got event: %d" # colo.c colo_proxy_main(const char *chr) ": %s" +colo_proxy_main_vnet_info(const char *sta, uint32_t vnet_hdr, int size) ": %s pkt->vnet_hdr_len = %u, pkt->size = %d" # colo-compare.c colo_compare_main(const char *chr) ": %s" diff --git a/net/util.h b/net/util.h index 358185fd50..288312979f 100644 --- a/net/util.h +++ b/net/util.h @@ -30,7 +30,7 @@ * Structure of an internet header, naked of options. */ struct ip { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint8_t ip_v:4, /* version */ ip_hl:4; /* header length */ #else diff --git a/net/vde.c b/net/vde.c index 99189cccb6..c0a08662cc 100644 --- a/net/vde.c +++ b/net/vde.c @@ -27,7 +27,6 @@ #include "net/net.h" #include "clients.h" -#include "qemu-common.h" #include "qemu/option.h" #include "qemu/main-loop.h" #include "qapi/error.h" @@ -99,8 +98,7 @@ static int net_vde_init(NetClientState *peer, const char *model, nc = qemu_new_net_client(&net_vde_info, peer, model, name); - snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d", - sock, vde_datafd(vde)); + qemu_set_info_str(nc, "sock=%s,fd=%d", sock, vde_datafd(vde)); s = DO_UPCAST(VDEState, nc, nc); diff --git a/net/vhost-user.c b/net/vhost-user.c index 6adfcd623a..3a6b90da86 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -85,6 +85,7 @@ static int vhost_user_start(int queues, NetClientState *ncs[], options.net_backend = ncs[i]; options.opaque = be; options.busyloop_timeout = 0; + options.nvqs = 2; net = vhost_net_init(&options); if (!net) { error_report("failed to init vhost_net for queue %d", i); @@ -197,6 +198,19 @@ static bool vhost_user_has_ufo(NetClientState *nc) return true; } +static bool vhost_user_check_peer_type(NetClientState *nc, ObjectClass *oc, + Error **errp) +{ + const char *driver = object_class_get_name(oc); + + if (!g_str_has_prefix(driver, "virtio-net-")) { + error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); + return false; + } + + return true; +} + static NetClientInfo net_vhost_user_info = { .type = NET_CLIENT_DRIVER_VHOST_USER, .size = sizeof(NetVhostUserState), @@ -206,6 +220,7 @@ static NetClientInfo net_vhost_user_info = { .has_ufo = vhost_user_has_ufo, .set_vnet_be = vhost_user_set_vnet_endianness, .set_vnet_le = vhost_user_set_vnet_endianness, + .check_peer_type = vhost_user_check_peer_type, }; static gboolean net_vhost_user_watch(void *do_not_use, GIOCondition cond, @@ -326,8 +341,7 @@ static int net_vhost_user_init(NetClientState *peer, const char *device, user = g_new0(struct VhostUserState, 1); for (i = 0; i < queues; i++) { nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name); - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s", - i, chr->label); + qemu_set_info_str(nc, "vhost-user%d to %s", i, chr->label); nc->queue_index = i; if (!nc0) { nc0 = nc; @@ -396,27 +410,6 @@ static Chardev *net_vhost_claim_chardev( return chr; } -static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) -{ - const char *name = opaque; - const char *driver, *netdev; - - driver = qemu_opt_get(opts, "driver"); - netdev = qemu_opt_get(opts, "netdev"); - - if (!driver || !netdev) { - return 0; - } - - if (strcmp(netdev, name) == 0 && - !g_str_has_prefix(driver, "virtio-net-")) { - error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); - return -1; - } - - return 0; -} - int net_init_vhost_user(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { @@ -432,12 +425,6 @@ int net_init_vhost_user(const Netdev *netdev, const char *name, return -1; } - /* verify net frontend */ - if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, - (char *)name, errp)) { - return -1; - } - queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1; if (queues < 1 || queues > MAX_QUEUE_NUM) { error_setg(errp, diff --git a/net/vhost-vdpa-stub.c b/net/vhost-vdpa-stub.c new file mode 100644 index 0000000000..1732ed2443 --- /dev/null +++ b/net/vhost-vdpa-stub.c @@ -0,0 +1,21 @@ +/* + * vhost-vdpa-stub.c + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "clients.h" +#include "net/vhost-vdpa.h" +#include "qapi/error.h" + +int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); + return -1; +} diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 19187dce8c..e533f8a348 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -11,13 +11,17 @@ #include "qemu/osdep.h" #include "clients.h" +#include "hw/virtio/virtio-net.h" #include "net/vhost_net.h" #include "net/vhost-vdpa.h" #include "hw/virtio/vhost-vdpa.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/memalign.h" #include "qemu/option.h" #include "qapi/error.h" +#include #include #include #include "standard-headers/linux/virtio_net.h" @@ -29,7 +33,11 @@ typedef struct VhostVDPAState { NetClientState nc; struct vhost_vdpa vhost_vdpa; VHostNetState *vhost_net; - uint64_t acked_features; + + /* Control commands shadow buffers */ + void *cvq_cmd_out_buffer; + virtio_net_ctrl_ack *status; + bool started; } VhostVDPAState; @@ -41,6 +49,7 @@ const int vdpa_feature_bits[] = { VIRTIO_F_VERSION_1, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, VIRTIO_NET_F_GSO, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, @@ -52,8 +61,16 @@ const int vdpa_feature_bits[] = { VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MTU, + VIRTIO_NET_F_CTRL_RX, + VIRTIO_NET_F_CTRL_RX_EXTRA, + VIRTIO_NET_F_CTRL_VLAN, + VIRTIO_NET_F_CTRL_MAC_ADDR, + VIRTIO_NET_F_RSS, + VIRTIO_NET_F_MQ, + VIRTIO_NET_F_CTRL_VQ, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_GUEST_ANNOUNCE, @@ -61,6 +78,29 @@ const int vdpa_feature_bits[] = { VHOST_INVALID_FEATURE_BIT }; +/** Supported device specific feature bits with SVQ */ +static const uint64_t vdpa_svq_device_features = + BIT_ULL(VIRTIO_NET_F_CSUM) | + BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | + BIT_ULL(VIRTIO_NET_F_MTU) | + BIT_ULL(VIRTIO_NET_F_MAC) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | + BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | + BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | + BIT_ULL(VIRTIO_NET_F_HOST_ECN) | + BIT_ULL(VIRTIO_NET_F_HOST_UFO) | + BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | + BIT_ULL(VIRTIO_NET_F_STATUS) | + BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | + BIT_ULL(VIRTIO_NET_F_MQ) | + BIT_ULL(VIRTIO_F_ANY_LAYOUT) | + BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | + BIT_ULL(VIRTIO_NET_F_RSC_EXT) | + BIT_ULL(VIRTIO_NET_F_STANDBY); + VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); @@ -82,17 +122,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net) return ret; } -static void vhost_vdpa_del(NetClientState *ncs) -{ - VhostVDPAState *s; - assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); - s = DO_UPCAST(VhostVDPAState, nc, ncs); - if (s->vhost_net) { - vhost_net_cleanup(s->vhost_net); - } -} - -static int vhost_vdpa_add(NetClientState *ncs, void *be) +static int vhost_vdpa_add(NetClientState *ncs, void *be, + int queue_pair_index, int nvqs) { VhostNetOptions options; struct vhost_net *net = NULL; @@ -105,34 +136,44 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be) options.net_backend = ncs; options.opaque = be; options.busyloop_timeout = 0; + options.nvqs = nvqs; net = vhost_net_init(&options); if (!net) { error_report("failed to init vhost_net for queue"); - goto err; - } - if (s->vhost_net) { - vhost_net_cleanup(s->vhost_net); - g_free(s->vhost_net); + goto err_init; } s->vhost_net = net; ret = vhost_vdpa_net_check_device_id(net); if (ret) { - goto err; + goto err_check; } return 0; -err: - if (net) { - vhost_net_cleanup(net); - } - vhost_vdpa_del(ncs); +err_check: + vhost_net_cleanup(net); + g_free(net); +err_init: return -1; } static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_dev *dev = &s->vhost_net->dev; + /* + * If a peer NIC is attached, do not cleanup anything. + * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() + * when the guest is shutting down. + */ + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { + return; + } + qemu_vfree(s->cvq_cmd_out_buffer); + qemu_vfree(s->status); + if (dev->vq_index + dev->nvqs == dev->vq_index_end) { + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); + } if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -162,65 +203,535 @@ static bool vhost_vdpa_has_ufo(NetClientState *nc) } +static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, + Error **errp) +{ + const char *driver = object_class_get_name(oc); + + if (!g_str_has_prefix(driver, "virtio-net-")) { + error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); + return false; + } + + return true; +} + +/** Dummy receive in case qemu falls back to userland tap networking */ +static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + return size; +} + static NetClientInfo net_vhost_vdpa_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), + .receive = vhost_vdpa_receive, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, + .check_peer_type = vhost_vdpa_check_peer_type, }; -static int net_vhost_vdpa_init(NetClientState *peer, const char *device, - const char *name, const char *vhostdev) +static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) +{ + VhostIOVATree *tree = v->iova_tree; + DMAMap needle = { + /* + * No need to specify size or to look for more translations since + * this contiguous chunk was allocated by us. + */ + .translated_addr = (hwaddr)(uintptr_t)addr, + }; + const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); + int r; + + if (unlikely(!map)) { + error_report("Cannot locate expected map"); + return; + } + + r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1); + if (unlikely(r != 0)) { + error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); + } + + vhost_iova_tree_remove(tree, *map); +} + +static size_t vhost_vdpa_net_cvq_cmd_len(void) +{ + /* + * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. + * In buffer is always 1 byte, so it should fit here + */ + return sizeof(struct virtio_net_ctrl_hdr) + + 2 * sizeof(struct virtio_net_ctrl_mac) + + MAC_TABLE_ENTRIES * ETH_ALEN; +} + +static size_t vhost_vdpa_net_cvq_cmd_page_len(void) +{ + return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); +} + +/** Map CVQ buffer. */ +static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, + bool write) +{ + DMAMap map = {}; + int r; + + map.translated_addr = (hwaddr)(uintptr_t)buf; + map.size = size - 1; + map.perm = write ? IOMMU_RW : IOMMU_RO, + r = vhost_iova_tree_map_alloc(v->iova_tree, &map); + if (unlikely(r != IOVA_OK)) { + error_report("Cannot map injected element"); + return r; + } + + r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf, + !write); + if (unlikely(r < 0)) { + goto dma_map_err; + } + + return 0; + +dma_map_err: + vhost_iova_tree_remove(v->iova_tree, map); + return r; +} + +static int vhost_vdpa_net_cvq_start(NetClientState *nc) +{ + VhostVDPAState *s; + int r; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + s = DO_UPCAST(VhostVDPAState, nc, nc); + if (!s->vhost_vdpa.shadow_vqs_enabled) { + return 0; + } + + r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, + vhost_vdpa_net_cvq_cmd_page_len(), false); + if (unlikely(r < 0)) { + return r; + } + + r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, + vhost_vdpa_net_cvq_cmd_page_len(), true); + if (unlikely(r < 0)) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); + } + + return r; +} + +static void vhost_vdpa_net_cvq_stop(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (s->vhost_vdpa.shadow_vqs_enabled) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); + } +} + +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, + size_t in_len) +{ + /* Buffers for the device */ + const struct iovec out = { + .iov_base = s->cvq_cmd_out_buffer, + .iov_len = out_len, + }; + const struct iovec in = { + .iov_base = s->status, + .iov_len = sizeof(virtio_net_ctrl_ack), + }; + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); + int r; + + r = vhost_svq_add(svq, &out, 1, &in, 1, NULL); + if (unlikely(r != 0)) { + if (unlikely(r == -ENOSPC)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", + __func__); + } + return r; + } + + /* + * We can poll here since we've had BQL from the time we sent the + * descriptor. Also, we need to take the answer before SVQ pulls by itself, + * when BQL is released + */ + return vhost_svq_poll(svq); +} + +static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, + uint8_t cmd, const void *data, + size_t data_size) +{ + const struct virtio_net_ctrl_hdr ctrl = { + .class = class, + .cmd = cmd, + }; + + assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); + + memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); + memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size); + + return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size, + sizeof(virtio_net_ctrl_ack)); +} + +static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) +{ + uint64_t features = n->parent_obj.guest_features; + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) { + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + n->mac, sizeof(n->mac)); + if (unlikely(dev_written < 0)) { + return dev_written; + } + + return *s->status != VIRTIO_NET_OK; + } + + return 0; +} + +static int vhost_vdpa_net_load_mq(VhostVDPAState *s, + const VirtIONet *n) +{ + struct virtio_net_ctrl_mq mq; + uint64_t features = n->parent_obj.guest_features; + ssize_t dev_written; + + if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) { + return 0; + } + + mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); + dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq, + sizeof(mq)); + if (unlikely(dev_written < 0)) { + return dev_written; + } + + return *s->status != VIRTIO_NET_OK; +} + +static int vhost_vdpa_net_load(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + const VirtIONet *n; + int r; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (!v->shadow_vqs_enabled) { + return 0; + } + + n = VIRTIO_NET(v->dev->vdev); + r = vhost_vdpa_net_load_mac(s, n); + if (unlikely(r < 0)) { + return r; + } + r = vhost_vdpa_net_load_mq(s, n); + if (unlikely(r)) { + return r; + } + + return 0; +} + +static NetClientInfo net_vhost_vdpa_cvq_info = { + .type = NET_CLIENT_DRIVER_VHOST_VDPA, + .size = sizeof(VhostVDPAState), + .receive = vhost_vdpa_receive, + .start = vhost_vdpa_net_cvq_start, + .load = vhost_vdpa_net_load, + .stop = vhost_vdpa_net_cvq_stop, + .cleanup = vhost_vdpa_cleanup, + .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, + .has_ufo = vhost_vdpa_has_ufo, + .check_peer_type = vhost_vdpa_check_peer_type, +}; + +/** + * Validate and copy control virtqueue commands. + * + * Following QEMU guidelines, we offer a copy of the buffers to the device to + * prevent TOCTOU bugs. + */ +static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *opaque) +{ + VhostVDPAState *s = opaque; + size_t in_len; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; + /* Out buffer sent to both the vdpa device and the device model */ + struct iovec out = { + .iov_base = s->cvq_cmd_out_buffer, + }; + /* in buffer used for device model */ + const struct iovec in = { + .iov_base = &status, + .iov_len = sizeof(status), + }; + ssize_t dev_written = -EINVAL; + + out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, + s->cvq_cmd_out_buffer, + vhost_vdpa_net_cvq_cmd_len()); + dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status)); + if (unlikely(dev_written < 0)) { + goto out; + } + + if (unlikely(dev_written < sizeof(status))) { + error_report("Insufficient written data (%zu)", dev_written); + goto out; + } + + if (*s->status != VIRTIO_NET_OK) { + goto out; + } + + status = VIRTIO_NET_ERR; + virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1); + if (status != VIRTIO_NET_OK) { + error_report("Bad CVQ processing in model"); + } + +out: + in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, + sizeof(status)); + if (unlikely(in_len < sizeof(status))) { + error_report("Bad device CVQ written length"); + } + vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); + g_free(elem); + return dev_written < 0 ? dev_written : 0; +} + +static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { + .avail_handler = vhost_vdpa_net_handle_ctrl_avail, +}; + +static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath, + bool svq, + VhostIOVATree *iova_tree) { NetClientState *nc = NULL; VhostVDPAState *s; - int vdpa_device_fd = -1; int ret = 0; assert(name); - nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name); - snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA); - nc->queue_index = 0; - s = DO_UPCAST(VhostVDPAState, nc, nc); - vdpa_device_fd = qemu_open_old(vhostdev, O_RDWR); - if (vdpa_device_fd == -1) { - return -errno; + if (is_datapath) { + nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, + name); + } else { + nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, + device, name); } + qemu_set_info_str(nc, TYPE_VHOST_VDPA); + s = DO_UPCAST(VhostVDPAState, nc, nc); + s->vhost_vdpa.device_fd = vdpa_device_fd; - ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa); - assert(s->vhost_net); + s->vhost_vdpa.index = queue_pair_index; + s->vhost_vdpa.shadow_vqs_enabled = svq; + s->vhost_vdpa.iova_tree = iova_tree; + if (!is_datapath) { + s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); + s->status = qemu_memalign(qemu_real_host_page_size(), + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len()); + + s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; + s->vhost_vdpa.shadow_vq_ops_opaque = s; + } + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); + if (ret) { + qemu_del_net_client(nc); + return NULL; + } + return nc; +} + +static int vhost_vdpa_get_iova_range(int fd, + struct vhost_vdpa_iova_range *iova_range) +{ + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); + + return ret < 0 ? -errno : 0; +} + +static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) +{ + int ret = ioctl(fd, VHOST_GET_FEATURES, features); + if (unlikely(ret < 0)) { + error_setg_errno(errp, errno, + "Fail to query features from vhost-vDPA device"); + } return ret; } -static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) +static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, + int *has_cvq, Error **errp) { - const char *name = opaque; - const char *driver, *netdev; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + g_autofree struct vhost_vdpa_config *config = NULL; + __virtio16 *max_queue_pairs; + int ret; - driver = qemu_opt_get(opts, "driver"); - netdev = qemu_opt_get(opts, "netdev"); - if (!driver || !netdev) { - return 0; + if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { + *has_cvq = 1; + } else { + *has_cvq = 0; } - if (strcmp(netdev, name) == 0 && - !g_str_has_prefix(driver, "virtio-net-")) { - error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); - return -1; + + if (features & (1 << VIRTIO_NET_F_MQ)) { + config = g_malloc0(config_size + sizeof(*max_queue_pairs)); + config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); + config->len = sizeof(*max_queue_pairs); + + ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); + if (ret) { + error_setg(errp, "Fail to get config from vhost-vDPA device"); + return -ret; + } + + max_queue_pairs = (__virtio16 *)&config->buf; + + return lduw_le_p(max_queue_pairs); } - return 0; + + return 1; } int net_init_vhost_vdpa(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { const NetdevVhostVDPAOptions *opts; + uint64_t features; + int vdpa_device_fd; + g_autofree NetClientState **ncs = NULL; + g_autoptr(VhostIOVATree) iova_tree = NULL; + NetClientState *nc; + int queue_pairs, r, i = 0, has_cvq = 0; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = &netdev->u.vhost_vdpa; - /* verify net frontend */ - if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, - (char *)name, errp)) { + if (!opts->has_vhostdev && !opts->has_vhostfd) { + error_setg(errp, + "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); return -1; } - return net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, opts->vhostdev); + + if (opts->has_vhostdev && opts->has_vhostfd) { + error_setg(errp, + "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); + return -1; + } + + if (opts->has_vhostdev) { + vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); + if (vdpa_device_fd == -1) { + return -errno; + } + } else { + /* has_vhostfd */ + vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); + if (vdpa_device_fd == -1) { + error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); + return -1; + } + } + + r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); + if (unlikely(r < 0)) { + goto err; + } + + queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, + &has_cvq, errp); + if (queue_pairs < 0) { + qemu_close(vdpa_device_fd); + return queue_pairs; + } + + if (opts->x_svq) { + struct vhost_vdpa_iova_range iova_range; + + uint64_t invalid_dev_features = + features & ~vdpa_svq_device_features & + /* Transport are all accepted at this point */ + ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, + VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); + + if (invalid_dev_features) { + error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, + invalid_dev_features); + goto err_svq; + } + + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); + } + + ncs = g_malloc0(sizeof(*ncs) * queue_pairs); + + for (i = 0; i < queue_pairs; i++) { + ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, + vdpa_device_fd, i, 2, true, opts->x_svq, + iova_tree); + if (!ncs[i]) + goto err; + } + + if (has_cvq) { + nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, + vdpa_device_fd, i, 1, false, + opts->x_svq, iova_tree); + if (!nc) + goto err; + } + + /* iova_tree ownership belongs to last NetClientState */ + g_steal_pointer(&iova_tree); + return 0; + +err: + if (i) { + for (i--; i >= 0; i--) { + qemu_del_net_client(ncs[i]); + } + } + +err_svq: + qemu_close(vdpa_device_fd); + + return -1; } diff --git a/net/vmnet-bridged.m b/net/vmnet-bridged.m new file mode 100644 index 0000000000..46d2282863 --- /dev/null +++ b/net/vmnet-bridged.m @@ -0,0 +1,152 @@ +/* + * vmnet-bridged.m + * + * Copyright(c) 2022 Vladislav Yaroshchuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-types-net.h" +#include "qapi/error.h" +#include "clients.h" +#include "vmnet_int.h" + +#include + + +static bool validate_ifname(const char *ifname) +{ + xpc_object_t shared_if_list = vmnet_copy_shared_interface_list(); + bool match = false; + if (!xpc_array_get_count(shared_if_list)) { + goto done; + } + + match = !xpc_array_apply( + shared_if_list, + ^bool(size_t index, xpc_object_t value) { + return strcmp(xpc_string_get_string_ptr(value), ifname) != 0; + }); + +done: + xpc_release(shared_if_list); + return match; +} + + +static char* get_valid_ifnames() +{ + xpc_object_t shared_if_list = vmnet_copy_shared_interface_list(); + __block char *if_list = NULL; + __block char *if_list_prev = NULL; + + if (!xpc_array_get_count(shared_if_list)) { + goto done; + } + + xpc_array_apply( + shared_if_list, + ^bool(size_t index, xpc_object_t value) { + /* build list of strings like "en0 en1 en2 " */ + if_list = g_strconcat(xpc_string_get_string_ptr(value), + " ", + if_list_prev, + NULL); + g_free(if_list_prev); + if_list_prev = if_list; + return true; + }); + +done: + xpc_release(shared_if_list); + return if_list; +} + + +static bool validate_options(const Netdev *netdev, Error **errp) +{ + const NetdevVmnetBridgedOptions *options = &(netdev->u.vmnet_bridged); + char* if_list; + + if (!validate_ifname(options->ifname)) { + if_list = get_valid_ifnames(); + if (if_list) { + error_setg(errp, + "unsupported ifname '%s', expected one of [ %s]", + options->ifname, + if_list); + g_free(if_list); + } else { + error_setg(errp, + "unsupported ifname '%s', no supported " + "interfaces available", + options->ifname); + } + return false; + } + +#if !defined(MAC_OS_VERSION_11_0) || \ + MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_11_0 + if (options->has_isolated) { + error_setg(errp, + "vmnet-bridged.isolated feature is " + "unavailable: outdated vmnet.framework API"); + return false; + } +#endif + return true; +} + + +static xpc_object_t build_if_desc(const Netdev *netdev) +{ + const NetdevVmnetBridgedOptions *options = &(netdev->u.vmnet_bridged); + xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0); + + xpc_dictionary_set_uint64(if_desc, + vmnet_operation_mode_key, + VMNET_BRIDGED_MODE + ); + + xpc_dictionary_set_string(if_desc, + vmnet_shared_interface_name_key, + options->ifname); + +#if defined(MAC_OS_VERSION_11_0) && \ + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 + xpc_dictionary_set_bool(if_desc, + vmnet_enable_isolation_key, + options->isolated); +#endif + return if_desc; +} + + +static NetClientInfo net_vmnet_bridged_info = { + .type = NET_CLIENT_DRIVER_VMNET_BRIDGED, + .size = sizeof(VmnetState), + .receive = vmnet_receive_common, + .cleanup = vmnet_cleanup_common, +}; + + +int net_init_vmnet_bridged(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + NetClientState *nc = qemu_new_net_client(&net_vmnet_bridged_info, + peer, "vmnet-bridged", name); + xpc_object_t if_desc; + int result = -1; + + if (!validate_options(netdev, errp)) { + return result; + } + + if_desc = build_if_desc(netdev); + result = vmnet_if_create(nc, if_desc, errp); + xpc_release(if_desc); + return result; +} diff --git a/net/vmnet-common.m b/net/vmnet-common.m new file mode 100644 index 0000000000..2cb60b9ddd --- /dev/null +++ b/net/vmnet-common.m @@ -0,0 +1,378 @@ +/* + * vmnet-common.m - network client wrapper for Apple vmnet.framework + * + * Copyright(c) 2022 Vladislav Yaroshchuk + * Copyright(c) 2021 Phillip Tennen + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/log.h" +#include "qapi/qapi-types-net.h" +#include "vmnet_int.h" +#include "clients.h" +#include "qemu/error-report.h" +#include "qapi/error.h" + +#include +#include + + +static void vmnet_send_completed(NetClientState *nc, ssize_t len); + + +const char *vmnet_status_map_str(vmnet_return_t status) +{ + switch (status) { + case VMNET_SUCCESS: + return "success"; + case VMNET_FAILURE: + return "general failure (possibly not enough privileges)"; + case VMNET_MEM_FAILURE: + return "memory allocation failure"; + case VMNET_INVALID_ARGUMENT: + return "invalid argument specified"; + case VMNET_SETUP_INCOMPLETE: + return "interface setup is not complete"; + case VMNET_INVALID_ACCESS: + return "invalid access, permission denied"; + case VMNET_PACKET_TOO_BIG: + return "packet size is larger than MTU"; + case VMNET_BUFFER_EXHAUSTED: + return "buffers exhausted in kernel"; + case VMNET_TOO_MANY_PACKETS: + return "packet count exceeds limit"; +#if defined(MAC_OS_VERSION_11_0) && \ + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 + case VMNET_SHARING_SERVICE_BUSY: + return "conflict, sharing service is in use"; +#endif + default: + return "unknown vmnet error"; + } +} + + +/** + * Write packets from QEMU to vmnet interface. + * + * vmnet.framework supports iov, but writing more than + * one iov into vmnet interface fails with + * 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into + * one and passing it to vmnet works fine. That's the + * reason why receive_iov() left unimplemented. But it still + * works with good performance having .receive() only. + */ +ssize_t vmnet_receive_common(NetClientState *nc, + const uint8_t *buf, + size_t size) +{ + VmnetState *s = DO_UPCAST(VmnetState, nc, nc); + struct vmpktdesc packet; + struct iovec iov; + int pkt_cnt; + vmnet_return_t if_status; + + if (size > s->max_packet_size) { + warn_report("vmnet: packet is too big, %zu > %" PRIu64, + packet.vm_pkt_size, + s->max_packet_size); + return -1; + } + + iov.iov_base = (char *) buf; + iov.iov_len = size; + + packet.vm_pkt_iovcnt = 1; + packet.vm_flags = 0; + packet.vm_pkt_size = size; + packet.vm_pkt_iov = &iov; + pkt_cnt = 1; + + if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt); + if (if_status != VMNET_SUCCESS) { + error_report("vmnet: write error: %s\n", + vmnet_status_map_str(if_status)); + return -1; + } + + if (pkt_cnt) { + return size; + } + return 0; +} + + +/** + * Read packets from vmnet interface and write them + * to temporary buffers in VmnetState. + * + * Returns read packets number (may be 0) on success, + * -1 on error + */ +static int vmnet_read_packets(VmnetState *s) +{ + assert(s->packets_send_current_pos == s->packets_send_end_pos); + + struct vmpktdesc *packets = s->packets_buf; + vmnet_return_t status; + int i; + + /* Read as many packets as present */ + s->packets_send_current_pos = 0; + s->packets_send_end_pos = VMNET_PACKETS_LIMIT; + for (i = 0; i < s->packets_send_end_pos; ++i) { + packets[i].vm_pkt_size = s->max_packet_size; + packets[i].vm_pkt_iovcnt = 1; + packets[i].vm_flags = 0; + } + + status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos); + if (status != VMNET_SUCCESS) { + error_printf("vmnet: read failed: %s\n", + vmnet_status_map_str(status)); + s->packets_send_current_pos = 0; + s->packets_send_end_pos = 0; + return -1; + } + return s->packets_send_end_pos; +} + + +/** + * Write packets from temporary buffers in VmnetState + * to QEMU. + */ +static void vmnet_write_packets_to_qemu(VmnetState *s) +{ + while (s->packets_send_current_pos < s->packets_send_end_pos) { + ssize_t size = qemu_send_packet_async(&s->nc, + s->iov_buf[s->packets_send_current_pos].iov_base, + s->packets_buf[s->packets_send_current_pos].vm_pkt_size, + vmnet_send_completed); + + if (size == 0) { + /* QEMU is not ready to consume more packets - + * stop and wait for completion callback call */ + return; + } + ++s->packets_send_current_pos; + } +} + + +/** + * Bottom half callback that transfers packets from vmnet interface + * to QEMU. + * + * The process of transferring packets is three-staged: + * 1. Handle vmnet event; + * 2. Read packets from vmnet interface into temporary buffer; + * 3. Write packets from temporary buffer to QEMU. + * + * QEMU may suspend this process on the last stage, returning 0 from + * qemu_send_packet_async function. If this happens, we should + * respectfully wait until it is ready to consume more packets, + * write left ones in temporary buffer and only after this + * continue reading more packets from vmnet interface. + * + * Packets to be transferred are stored into packets_buf, + * in the window [packets_send_current_pos..packets_send_end_pos) + * including current_pos, excluding end_pos. + * + * Thus, if QEMU is not ready, buffer is not read and + * packets_send_current_pos < packets_send_end_pos. + */ +static void vmnet_send_bh(void *opaque) +{ + NetClientState *nc = (NetClientState *) opaque; + VmnetState *s = DO_UPCAST(VmnetState, nc, nc); + + /* + * Do nothing if QEMU is not ready - wait + * for completion callback invocation + */ + if (s->packets_send_current_pos < s->packets_send_end_pos) { + return; + } + + /* Read packets from vmnet interface */ + if (vmnet_read_packets(s) > 0) { + /* Send them to QEMU */ + vmnet_write_packets_to_qemu(s); + } +} + + +/** + * Completion callback to be invoked by QEMU when it becomes + * ready to consume more packets. + */ +static void vmnet_send_completed(NetClientState *nc, ssize_t len) +{ + VmnetState *s = DO_UPCAST(VmnetState, nc, nc); + + /* Callback is invoked eq queued packet is sent */ + ++s->packets_send_current_pos; + + /* Complete sending packets left in VmnetState buffers */ + vmnet_write_packets_to_qemu(s); + + /* And read new ones from vmnet if VmnetState buffer is ready */ + if (s->packets_send_current_pos < s->packets_send_end_pos) { + qemu_bh_schedule(s->send_bh); + } +} + + +static void vmnet_bufs_init(VmnetState *s) +{ + struct vmpktdesc *packets = s->packets_buf; + struct iovec *iov = s->iov_buf; + int i; + + for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) { + iov[i].iov_len = s->max_packet_size; + iov[i].iov_base = g_malloc0(iov[i].iov_len); + packets[i].vm_pkt_iov = iov + i; + } +} + + +int vmnet_if_create(NetClientState *nc, + xpc_object_t if_desc, + Error **errp) +{ + VmnetState *s = DO_UPCAST(VmnetState, nc, nc); + dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0); + __block vmnet_return_t if_status; + + s->if_queue = dispatch_queue_create( + "org.qemu.vmnet.if_queue", + DISPATCH_QUEUE_SERIAL + ); + + xpc_dictionary_set_bool( + if_desc, + vmnet_allocate_mac_address_key, + false + ); + +#ifdef DEBUG + qemu_log("vmnet.start.interface_desc:\n"); + xpc_dictionary_apply(if_desc, + ^bool(const char *k, xpc_object_t v) { + char *desc = xpc_copy_description(v); + qemu_log(" %s=%s\n", k, desc); + free(desc); + return true; + }); +#endif /* DEBUG */ + + s->vmnet_if = vmnet_start_interface( + if_desc, + s->if_queue, + ^(vmnet_return_t status, xpc_object_t interface_param) { + if_status = status; + if (status != VMNET_SUCCESS || !interface_param) { + dispatch_semaphore_signal(if_created_sem); + return; + } + +#ifdef DEBUG + qemu_log("vmnet.start.interface_param:\n"); + xpc_dictionary_apply(interface_param, + ^bool(const char *k, xpc_object_t v) { + char *desc = xpc_copy_description(v); + qemu_log(" %s=%s\n", k, desc); + free(desc); + return true; + }); +#endif /* DEBUG */ + + s->mtu = xpc_dictionary_get_uint64( + interface_param, + vmnet_mtu_key); + s->max_packet_size = xpc_dictionary_get_uint64( + interface_param, + vmnet_max_packet_size_key); + + dispatch_semaphore_signal(if_created_sem); + }); + + if (s->vmnet_if == NULL) { + dispatch_release(s->if_queue); + dispatch_release(if_created_sem); + error_setg(errp, + "unable to create interface with requested params"); + return -1; + } + + dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER); + dispatch_release(if_created_sem); + + if (if_status != VMNET_SUCCESS) { + dispatch_release(s->if_queue); + error_setg(errp, + "cannot create vmnet interface: %s", + vmnet_status_map_str(if_status)); + return -1; + } + + s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc); + vmnet_bufs_init(s); + + s->packets_send_current_pos = 0; + s->packets_send_end_pos = 0; + + vmnet_interface_set_event_callback( + s->vmnet_if, + VMNET_INTERFACE_PACKETS_AVAILABLE, + s->if_queue, + ^(interface_event_t event_id, xpc_object_t event) { + assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE); + /* + * This function is being called from a non qemu thread, so + * we only schedule a BH, and do the rest of the io completion + * handling from vmnet_send_bh() which runs in a qemu context. + */ + qemu_bh_schedule(s->send_bh); + }); + + return 0; +} + + +void vmnet_cleanup_common(NetClientState *nc) +{ + VmnetState *s = DO_UPCAST(VmnetState, nc, nc); + dispatch_semaphore_t if_stopped_sem; + + if (s->vmnet_if == NULL) { + return; + } + + if_stopped_sem = dispatch_semaphore_create(0); + vmnet_stop_interface( + s->vmnet_if, + s->if_queue, + ^(vmnet_return_t status) { + assert(status == VMNET_SUCCESS); + dispatch_semaphore_signal(if_stopped_sem); + }); + dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER); + + qemu_purge_queued_packets(nc); + + qemu_bh_delete(s->send_bh); + dispatch_release(if_stopped_sem); + dispatch_release(s->if_queue); + + for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) { + g_free(s->iov_buf[i].iov_base); + } +} diff --git a/net/vmnet-host.c b/net/vmnet-host.c new file mode 100644 index 0000000000..05f8d78864 --- /dev/null +++ b/net/vmnet-host.c @@ -0,0 +1,128 @@ +/* + * vmnet-host.c + * + * Copyright(c) 2022 Vladislav Yaroshchuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/uuid.h" +#include "qapi/qapi-types-net.h" +#include "qapi/error.h" +#include "clients.h" +#include "vmnet_int.h" + +#include + + +static bool validate_options(const Netdev *netdev, Error **errp) +{ + const NetdevVmnetHostOptions *options = &(netdev->u.vmnet_host); + +#if defined(MAC_OS_VERSION_11_0) && \ + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 + + QemuUUID net_uuid; + if (options->has_net_uuid && + qemu_uuid_parse(options->net_uuid, &net_uuid) < 0) { + error_setg(errp, "Invalid UUID provided in 'net-uuid'"); + return false; + } +#else + if (options->has_isolated) { + error_setg(errp, + "vmnet-host.isolated feature is " + "unavailable: outdated vmnet.framework API"); + return false; + } + + if (options->has_net_uuid) { + error_setg(errp, + "vmnet-host.net-uuid feature is " + "unavailable: outdated vmnet.framework API"); + return false; + } +#endif + + if ((options->has_start_address || + options->has_end_address || + options->has_subnet_mask) && + !(options->has_start_address && + options->has_end_address && + options->has_subnet_mask)) { + error_setg(errp, + "'start-address', 'end-address', 'subnet-mask' " + "should be provided together"); + return false; + } + + return true; +} + +static xpc_object_t build_if_desc(const Netdev *netdev) +{ + const NetdevVmnetHostOptions *options = &(netdev->u.vmnet_host); + xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0); + + xpc_dictionary_set_uint64(if_desc, + vmnet_operation_mode_key, + VMNET_HOST_MODE); + +#if defined(MAC_OS_VERSION_11_0) && \ + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 + + xpc_dictionary_set_bool(if_desc, + vmnet_enable_isolation_key, + options->isolated); + + QemuUUID net_uuid; + if (options->has_net_uuid) { + qemu_uuid_parse(options->net_uuid, &net_uuid); + xpc_dictionary_set_uuid(if_desc, + vmnet_network_identifier_key, + net_uuid.data); + } +#endif + + if (options->has_start_address) { + xpc_dictionary_set_string(if_desc, + vmnet_start_address_key, + options->start_address); + xpc_dictionary_set_string(if_desc, + vmnet_end_address_key, + options->end_address); + xpc_dictionary_set_string(if_desc, + vmnet_subnet_mask_key, + options->subnet_mask); + } + + return if_desc; +} + +static NetClientInfo net_vmnet_host_info = { + .type = NET_CLIENT_DRIVER_VMNET_HOST, + .size = sizeof(VmnetState), + .receive = vmnet_receive_common, + .cleanup = vmnet_cleanup_common, +}; + +int net_init_vmnet_host(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + NetClientState *nc = qemu_new_net_client(&net_vmnet_host_info, + peer, "vmnet-host", name); + xpc_object_t if_desc; + int result = -1; + + if (!validate_options(netdev, errp)) { + return result; + } + + if_desc = build_if_desc(netdev); + result = vmnet_if_create(nc, if_desc, errp); + xpc_release(if_desc); + return result; +} diff --git a/net/vmnet-shared.c b/net/vmnet-shared.c new file mode 100644 index 0000000000..18cadc72bd --- /dev/null +++ b/net/vmnet-shared.c @@ -0,0 +1,114 @@ +/* + * vmnet-shared.c + * + * Copyright(c) 2022 Vladislav Yaroshchuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-types-net.h" +#include "qapi/error.h" +#include "vmnet_int.h" +#include "clients.h" + +#include + + +static bool validate_options(const Netdev *netdev, Error **errp) +{ + const NetdevVmnetSharedOptions *options = &(netdev->u.vmnet_shared); + +#if !defined(MAC_OS_VERSION_11_0) || \ + MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_11_0 + if (options->has_isolated) { + error_setg(errp, + "vmnet-shared.isolated feature is " + "unavailable: outdated vmnet.framework API"); + return false; + } +#endif + + if ((options->has_start_address || + options->has_end_address || + options->has_subnet_mask) && + !(options->has_start_address && + options->has_end_address && + options->has_subnet_mask)) { + error_setg(errp, + "'start-address', 'end-address', 'subnet-mask' " + "should be provided together" + ); + return false; + } + + return true; +} + +static xpc_object_t build_if_desc(const Netdev *netdev) +{ + const NetdevVmnetSharedOptions *options = &(netdev->u.vmnet_shared); + xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0); + + xpc_dictionary_set_uint64( + if_desc, + vmnet_operation_mode_key, + VMNET_SHARED_MODE + ); + + if (options->has_nat66_prefix) { + xpc_dictionary_set_string(if_desc, + vmnet_nat66_prefix_key, + options->nat66_prefix); + } + + if (options->has_start_address) { + xpc_dictionary_set_string(if_desc, + vmnet_start_address_key, + options->start_address); + xpc_dictionary_set_string(if_desc, + vmnet_end_address_key, + options->end_address); + xpc_dictionary_set_string(if_desc, + vmnet_subnet_mask_key, + options->subnet_mask); + } + +#if defined(MAC_OS_VERSION_11_0) && \ + MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 + xpc_dictionary_set_bool( + if_desc, + vmnet_enable_isolation_key, + options->isolated + ); +#endif + + return if_desc; +} + +static NetClientInfo net_vmnet_shared_info = { + .type = NET_CLIENT_DRIVER_VMNET_SHARED, + .size = sizeof(VmnetState), + .receive = vmnet_receive_common, + .cleanup = vmnet_cleanup_common, +}; + +int net_init_vmnet_shared(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + NetClientState *nc = qemu_new_net_client(&net_vmnet_shared_info, + peer, "vmnet-shared", name); + xpc_object_t if_desc; + int result = -1; + + if (!validate_options(netdev, errp)) { + return result; + } + + if_desc = build_if_desc(netdev); + result = vmnet_if_create(nc, if_desc, errp); + xpc_release(if_desc); + return result; +} diff --git a/net/vmnet_int.h b/net/vmnet_int.h new file mode 100644 index 0000000000..adf6e8c20d --- /dev/null +++ b/net/vmnet_int.h @@ -0,0 +1,63 @@ +/* + * vmnet_int.h + * + * Copyright(c) 2022 Vladislav Yaroshchuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ +#ifndef VMNET_INT_H +#define VMNET_INT_H + +#include "qemu/osdep.h" +#include "vmnet_int.h" +#include "clients.h" + +#include +#include + +/** + * From vmnet.framework documentation + * + * Each read/write call allows up to 200 packets to be + * read or written for a maximum of 256KB. + * + * Each packet written should be a complete + * ethernet frame. + * + * https://developer.apple.com/documentation/vmnet + */ +#define VMNET_PACKETS_LIMIT 200 + +typedef struct VmnetState { + NetClientState nc; + interface_ref vmnet_if; + + uint64_t mtu; + uint64_t max_packet_size; + + dispatch_queue_t if_queue; + + QEMUBH *send_bh; + + struct vmpktdesc packets_buf[VMNET_PACKETS_LIMIT]; + int packets_send_current_pos; + int packets_send_end_pos; + + struct iovec iov_buf[VMNET_PACKETS_LIMIT]; +} VmnetState; + +const char *vmnet_status_map_str(vmnet_return_t status); + +int vmnet_if_create(NetClientState *nc, + xpc_object_t if_desc, + Error **errp); + +ssize_t vmnet_receive_common(NetClientState *nc, + const uint8_t *buf, + size_t size); + +void vmnet_cleanup_common(NetClientState *nc); + +#endif /* VMNET_INT_H */ diff --git a/os-posix.c b/os-posix.c index ae6c9f2a5e..4858650c3e 100644 --- a/os-posix.c +++ b/os-posix.c @@ -29,7 +29,6 @@ #include #include -#include "qemu-common.h" /* Needed early for CONFIG_BSD etc. */ #include "net/slirp.h" #include "qemu/qemu-options.h" @@ -40,6 +39,7 @@ #ifdef CONFIG_LINUX #include +#include "qemu/async-teardown.h" #endif /* @@ -152,11 +152,8 @@ int os_parse_cmd_args(int index, const char *optarg) daemonize = 1; break; #if defined(CONFIG_LINUX) - case QEMU_OPTION_enablefips: - warn_report("-enable-fips is deprecated, please build QEMU with " - "the `libgcrypt` library as the cryptography provider " - "to enable FIPS compliance"); - fips_set_state(true); + case QEMU_OPTION_asyncteardown: + init_async_teardown(); break; #endif default: @@ -224,7 +221,7 @@ void os_daemonize(void) pid_t pid; int fds[2]; - if (pipe(fds) == -1) { + if (!g_unix_open_pipe(fds, FD_CLOEXEC, NULL)) { exit(1); } @@ -249,7 +246,6 @@ void os_daemonize(void) close(fds[0]); daemon_pipe = fds[1]; - qemu_set_cloexec(daemon_pipe); setsid(); @@ -292,7 +288,7 @@ void os_setup_post(void) dup2(fd, 0); dup2(fd, 1); /* In case -D is given do not redirect stderr to /dev/null */ - if (!qemu_logfile) { + if (!qemu_log_enabled()) { dup2(fd, 2); } @@ -317,6 +313,12 @@ bool is_daemonized(void) return daemonize; } +int os_set_daemonize(bool d) +{ + daemonize = d; + return 0; +} + int os_mlock(void) { #ifdef HAVE_MLOCKALL diff --git a/os-win32.c b/os-win32.c index e31c921983..725ad652e8 100644 --- a/os-win32.c +++ b/os-win32.c @@ -26,7 +26,6 @@ #include "qemu/osdep.h" #include #include -#include "qemu-common.h" #include "sysemu/runstate.h" static BOOL WINAPI qemu_ctrl_handler(DWORD type) @@ -61,12 +60,3 @@ void os_set_line_buffering(void) setbuf(stdout, NULL); setbuf(stderr, NULL); } - -/* - * Parse OS specific command line options. - * return 0 if option handled, -1 otherwise - */ -int os_parse_cmd_args(int index, const char *optarg) -{ - return -1; -} diff --git a/page-vary-common.c b/page-vary-common.c index 9175556498..ab77672dd4 100644 --- a/page-vary-common.c +++ b/page-vary-common.c @@ -20,7 +20,6 @@ #define IN_PAGE_VARY 1 #include "qemu/osdep.h" -#include "qemu-common.h" #include "exec/page-vary.h" /* WARNING: This file must *not* be complied with -flto. */ diff --git a/page-vary.c b/page-vary.c index 057c7f1815..343b4adb95 100644 --- a/page-vary.c +++ b/page-vary.c @@ -20,7 +20,7 @@ #define IN_PAGE_VARY 1 #include "qemu/osdep.h" -#include "qemu-common.h" +#include "exec/page-vary.h" #include "exec/exec-all.h" bool set_preferred_target_page_bits(int bits) diff --git a/pc-bios/README b/pc-bios/README index db39d757b0..b94f3fb081 100644 --- a/pc-bios/README +++ b/pc-bios/README @@ -14,7 +14,7 @@ - SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware implementation for certain IBM POWER hardware. The sources are at https://github.com/aik/SLOF, and the image currently in qemu is - built from git tag qemu-slof-20210711. + built from git tag qemu-slof-20220719. - VOF (Virtual Open Firmware) is a minimalistic firmware to work with -machine pseries,x-vof=on. When enabled, the firmware acts as a slim shim and diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin index 96bdeaa487..211b2a4da2 100644 Binary files a/pc-bios/bios-256k.bin and b/pc-bios/bios-256k.bin differ diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin index eefd32197e..6204a714cd 100644 Binary files a/pc-bios/bios-microvm.bin and b/pc-bios/bios-microvm.bin differ diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin index 3ecaf4a709..12d6a037be 100644 Binary files a/pc-bios/bios.bin and b/pc-bios/bios.bin differ diff --git a/pc-bios/descriptors/meson.build b/pc-bios/descriptors/meson.build index 29efa16d99..66f85d01c4 100644 --- a/pc-bios/descriptors/meson.build +++ b/pc-bios/descriptors/meson.build @@ -1,4 +1,4 @@ -if install_edk2_blobs +if unpack_edk2_blobs and get_option('install_blobs') foreach f: [ '50-edk2-i386-secure.json', '50-edk2-x86_64-secure.json', @@ -10,7 +10,7 @@ if install_edk2_blobs configure_file(input: files(f), output: f, configuration: {'DATADIR': get_option('prefix') / qemu_datadir}, - install: get_option('install_blobs'), + install: true, install_dir: qemu_datadir / 'firmware') endforeach endif diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2 index 5bf311464a..0262f5bd8f 100644 Binary files a/pc-bios/edk2-aarch64-code.fd.bz2 and b/pc-bios/edk2-aarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2 index 7a98069814..4ca97b43ea 100644 Binary files a/pc-bios/edk2-arm-code.fd.bz2 and b/pc-bios/edk2-arm-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2 index e7b1befe2c..6e02c9b995 100644 Binary files a/pc-bios/edk2-i386-code.fd.bz2 and b/pc-bios/edk2-i386-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2 index b5df5bed30..a4b1cc92bd 100644 Binary files a/pc-bios/edk2-i386-secure-code.fd.bz2 and b/pc-bios/edk2-i386-secure-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2 index e1654d4003..37bfb0dbed 100644 Binary files a/pc-bios/edk2-x86_64-code.fd.bz2 and b/pc-bios/edk2-x86_64-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2 new file mode 100644 index 0000000000..1d65c61ded Binary files /dev/null and b/pc-bios/edk2-x86_64-microvm.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2 index 767274c38c..76dc6d5aad 100644 Binary files a/pc-bios/edk2-x86_64-secure-code.fd.bz2 and b/pc-bios/edk2-x86_64-secure-code.fd.bz2 differ diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img index 4ba8c7f8b8..0fa3808f16 100644 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build index 05eda6c0d2..452395b962 100644 --- a/pc-bios/keymaps/meson.build +++ b/pc-bios/keymaps/meson.build @@ -1,5 +1,5 @@ keymaps = { - 'ar': '-l ar', + 'ar': '-l ara', 'bepo': '-l fr -v dvorak', 'cz': '-l cz', 'da': '-l dk', @@ -39,9 +39,9 @@ else native_qemu_keymap = qemu_keymap endif -t = [] -foreach km, args: keymaps - if native_qemu_keymap.found() +if native_qemu_keymap.found() + t = [] + foreach km, args: keymaps # generate with qemu-kvm t += custom_target(km, build_by_default: true, @@ -49,20 +49,11 @@ foreach km, args: keymaps command: [native_qemu_keymap, '-f', '@OUTPUT@', args.split()], install: true, install_dir: qemu_datadir / 'keymaps') - else - # copy from source tree - t += custom_target(km, - build_by_default: true, - input: km, - output: km, - command: ['cp', '@INPUT@', '@OUTPUT@'], - install: true, - install_dir: qemu_datadir / 'keymaps') - endif -endforeach + endforeach -if native_qemu_keymap.found() alias_target('update-keymaps', t) +else + install_data(keymaps.keys(), install_dir: qemu_datadir / 'keymaps') endif install_data(['sl', 'sv'], install_dir: qemu_datadir / 'keymaps') diff --git a/pc-bios/meson.build b/pc-bios/meson.build index f2b32598af..388e0db6e4 100644 --- a/pc-bios/meson.build +++ b/pc-bios/meson.build @@ -1,4 +1,5 @@ -if install_edk2_blobs +roms = [] +if unpack_edk2_blobs fds = [ 'edk2-aarch64-code.fd', 'edk2-arm-code.fd', @@ -11,10 +12,10 @@ if install_edk2_blobs ] foreach f : fds - custom_target(f, + roms += custom_target(f, build_by_default: have_system, output: f, - input: '@0@.bz2'.format(f), + input: files('@0@.bz2'.format(f)), capture: true, install: get_option('install_blobs'), install_dir: qemu_datadir, @@ -22,7 +23,7 @@ if install_edk2_blobs endforeach endif -blobs = files( +blobs = [ 'bios.bin', 'bios-256k.bin', 'bios-microvm.bin', @@ -62,6 +63,7 @@ blobs = files( 'petalogix-s3adsp1800.dtb', 'petalogix-ml605.dtb', 'multiboot.bin', + 'multiboot_dma.bin', 'linuxboot.bin', 'linuxboot_dma.bin', 'kvmvapic.bin', @@ -78,10 +80,10 @@ blobs = files( 'hppa-firmware.img', 'opensbi-riscv32-generic-fw_dynamic.bin', 'opensbi-riscv64-generic-fw_dynamic.bin', - 'opensbi-riscv32-generic-fw_dynamic.elf', - 'opensbi-riscv64-generic-fw_dynamic.elf', 'npcm7xx_bootrom.bin', -) + 'vof.bin', + 'vof-nvram.bin', +] if get_option('install_blobs') install_data(blobs, install_dir: qemu_datadir) diff --git a/pc-bios/multiboot_dma.bin b/pc-bios/multiboot_dma.bin new file mode 100644 index 0000000000..c0e2c3102a Binary files /dev/null and b/pc-bios/multiboot_dma.bin differ diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc index e40e1d7025..d8203a5074 100644 Binary files a/pc-bios/openbios-ppc and b/pc-bios/openbios-ppc differ diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32 index 0c0aa094ec..118b1cc1c0 100644 Binary files a/pc-bios/openbios-sparc32 and b/pc-bios/openbios-sparc32 differ diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64 index a3e458517a..846cb4854c 100644 Binary files a/pc-bios/openbios-sparc64 and b/pc-bios/openbios-sparc64 differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index ae651e2993..81bab1adc9 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf deleted file mode 100644 index 3250d89408..0000000000 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf and /dev/null differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index f039884483..5eb0a74326 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf deleted file mode 100644 index ef261c98d1..0000000000 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf and /dev/null differ diff --git a/pc-bios/optionrom/Makefile b/pc-bios/optionrom/Makefile index 30771f8d17..b1fff0ba6c 100644 --- a/pc-bios/optionrom/Makefile +++ b/pc-bios/optionrom/Makefile @@ -1,72 +1,69 @@ -CURRENT_MAKEFILE := $(realpath $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))) -SRC_DIR := $(dir $(CURRENT_MAKEFILE)) -TOPSRC_DIR := $(SRC_DIR)/../.. +include config.mak +SRC_DIR := $(TOPSRC_DIR)/pc-bios/optionrom VPATH = $(SRC_DIR) -all: multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin +all: multiboot.bin multiboot_dma.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin # Dummy command so that make thinks it has done something @true -include ../../config-host.mak CFLAGS = -O2 -g -quiet-command = $(if $(V),$1,$(if $(2),@printf " %-7s %s\n" $2 $3 && $1, @$1)) -cc-option = $(if $(shell $(CC) $1 -c -o /dev/null -xc /dev/null >/dev/null 2>&1 && echo OK), $1, $2) +NULL := +SPACE := $(NULL) # +TARGET_PREFIX := $(patsubst %/,%:$(SPACE),$(TARGET_DIR)) -override CFLAGS += -march=i486 -Wall +quiet-@ = $(if $(V),,@$(if $1,printf "%s\n" "$(TARGET_PREFIX)$1" && )) +quiet-command = $(call quiet-@,$2 $@)$1 # Flags for dependency generation override CPPFLAGS += -MMD -MP -MT $@ -MF $(@D)/$(*F).d -override CFLAGS += $(filter -W%, $(QEMU_CFLAGS)) -override CFLAGS += $(CFLAGS_NOPIE) -ffreestanding -I$(TOPSRC_DIR)/include -override CFLAGS += $(call cc-option, -fno-stack-protector) -override CFLAGS += $(call cc-option, -m16) +override CFLAGS += -march=i486 -Wall $(EXTRA_CFLAGS) -m16 +override CFLAGS += -ffreestanding -I$(TOPSRC_DIR)/include -ifeq ($(filter -m16, $(CFLAGS)),) -# Attempt to work around compilers that lack -m16 (GCC <= 4.8, clang <= ??) -# On GCC we add -fno-toplevel-reorder to keep the order of asm blocks with -# respect to the rest of the code. clang does not have -fno-toplevel-reorder, -# but it places all asm blocks at the beginning and we're relying on it for -# the option ROM header. So just force clang not to use the integrated -# assembler, which doesn't support .code16gcc. -override CFLAGS += $(call cc-option, -fno-toplevel-reorder) -override CFLAGS += $(call cc-option, -no-integrated-as) -override CFLAGS += -m32 -include $(SRC_DIR)/code16gcc.h -endif +cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>/dev/null +cc-option = if $(call cc-test, $1); then \ + echo "$(TARGET_PREFIX)$1 detected" && echo "override CFLAGS += $1" >&3; else \ + echo "$(TARGET_PREFIX)$1 not detected" $(if $2,&& echo "override CFLAGS += $2" >&3); fi -Wa = -Wa, -override ASFLAGS += -32 -override CFLAGS += $(call cc-option, $(Wa)-32) +# If -fcf-protection is enabled in flags or compiler defaults that will +# conflict with -march=i486 +config-cc.mak: Makefile + $(quiet-@)($(call cc-option,-fcf-protection=none); \ + $(call cc-option,-fno-pie); \ + $(call cc-option,-no-pie); \ + $(call cc-option,-fno-stack-protector); \ + $(call cc-option,-Wno-array-bounds)) 3> config-cc.mak +-include config-cc.mak -LD_I386_EMULATION ?= elf_i386 -override LDFLAGS = -m $(LD_I386_EMULATION) -T $(SRC_DIR)/flat.lds - -all: multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin +override LDFLAGS = -nostdlib -Wl,-T,$(SRC_DIR)/flat.lds pvh.img: pvh.o pvh_main.o %.o: %.S - $(call quiet-command,$(CPP) $(CPPFLAGS) -c -o - $< | $(AS) $(ASFLAGS) -o $@,"AS","$@") + $(call quiet-command,$(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $<,Assembling) %.o: %.c - $(call quiet-command,$(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@,"CC","$@") + $(call quiet-command,$(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@,Compiling) %.img: %.o - $(call quiet-command,$(LD) $(LDFLAGS) -s -o $@ $^,"BUILD","$@") + $(call quiet-command,$(CC) $(CFLAGS) $(LDFLAGS) -s -o $@ $^,Linking) %.raw: %.img - $(call quiet-command,$(OBJCOPY) -O binary -j .text $< $@,"BUILD","$@") + $(call quiet-command,$(OBJCOPY) -O binary -j .text $< $@,Extracting raw object) %.bin: %.raw - $(call quiet-command,$(PYTHON) $(TOPSRC_DIR)/scripts/signrom.py $< $@,"SIGN","$@") + $(call quiet-command,$(PYTHON) $(TOPSRC_DIR)/scripts/signrom.py $< $@,Computing checksum into) include $(wildcard *.d) clean: rm -f *.o *.d *.raw *.img *.bin *~ +distclean: + rm -f config-cc.mak + # suppress auto-removal of intermediate files .SECONDARY: -.PHONY: all clean +.PHONY: all clean distclean diff --git a/pc-bios/optionrom/code16gcc.h b/pc-bios/optionrom/code16gcc.h deleted file mode 100644 index 9c8d25d508..0000000000 --- a/pc-bios/optionrom/code16gcc.h +++ /dev/null @@ -1,3 +0,0 @@ -asm( -".code16gcc\n" -); diff --git a/pc-bios/optionrom/multiboot.S b/pc-bios/optionrom/multiboot.S index b7efe4de34..181a4b03a3 100644 --- a/pc-bios/optionrom/multiboot.S +++ b/pc-bios/optionrom/multiboot.S @@ -68,7 +68,7 @@ run_multiboot: mov %eax, %es /* Read the bootinfo struct into RAM */ - read_fw_blob(FW_CFG_INITRD) + read_fw_blob_dma(FW_CFG_INITRD) /* FS = bootinfo_struct */ read_fw FW_CFG_INITRD_ADDR @@ -188,7 +188,7 @@ prot_mode: movl %eax, %gs /* Read the kernel and modules into RAM */ - read_fw_blob(FW_CFG_KERNEL) + read_fw_blob_dma(FW_CFG_KERNEL) /* Jump off to the kernel */ read_fw FW_CFG_KERNEL_ENTRY diff --git a/pc-bios/optionrom/multiboot_dma.S b/pc-bios/optionrom/multiboot_dma.S new file mode 100644 index 0000000000..d809af3e23 --- /dev/null +++ b/pc-bios/optionrom/multiboot_dma.S @@ -0,0 +1,2 @@ +#define USE_FW_CFG_DMA 1 +#include "multiboot.S" diff --git a/pc-bios/optionrom/optionrom.h b/pc-bios/optionrom/optionrom.h index a2b612f1a7..8d74c0ddf3 100644 --- a/pc-bios/optionrom/optionrom.h +++ b/pc-bios/optionrom/optionrom.h @@ -37,6 +37,17 @@ #define BIOS_CFG_IOPORT_CFG 0x510 #define BIOS_CFG_IOPORT_DATA 0x511 +#define FW_CFG_DMA_CTL_ERROR 0x01 +#define FW_CFG_DMA_CTL_READ 0x02 +#define FW_CFG_DMA_CTL_SKIP 0x04 +#define FW_CFG_DMA_CTL_SELECT 0x08 +#define FW_CFG_DMA_CTL_WRITE 0x10 + +#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */ + +#define BIOS_CFG_DMA_ADDR_HIGH 0x514 +#define BIOS_CFG_DMA_ADDR_LOW 0x518 + /* Break the translation block flow so -d cpu shows us values */ #define DEBUG_HERE \ jmp 1f; \ @@ -62,6 +73,61 @@ bswap %eax .endm + +/* + * Read data from the fw_cfg device using DMA. + * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp] + */ +.macro read_fw_dma VAR, SIZE, ADDR + /* Address */ + bswapl \ADDR + pushl \ADDR + + /* We only support 32 bit target addresses */ + xorl %eax, %eax + pushl %eax + mov $BIOS_CFG_DMA_ADDR_HIGH, %dx + outl %eax, (%dx) + + /* Size */ + bswapl \SIZE + pushl \SIZE + + /* Control */ + movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax + bswapl %eax + pushl %eax + + movl %esp, %eax /* Address of the struct we generated */ + bswapl %eax + mov $BIOS_CFG_DMA_ADDR_LOW, %dx + outl %eax, (%dx) /* Initiate DMA */ + +1: mov (%esp), %eax /* Wait for completion */ + bswapl %eax + testl $~FW_CFG_DMA_CTL_ERROR, %eax + jnz 1b + addl $16, %esp +.endm + + +/* + * Read a blob from the fw_cfg device using DMA + * Requires _ADDR, _SIZE and _DATA values for the parameter. + * + * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp + */ +#ifdef USE_FW_CFG_DMA +#define read_fw_blob_dma(var) \ + read_fw var ## _SIZE; \ + mov %eax, %ecx; \ + read_fw var ## _ADDR; \ + mov %eax, %edi ;\ + read_fw_dma var ## _DATA, %ecx, %edi +#else +#define read_fw_blob_dma(var) read_fw_blob(var) +#endif + #define read_fw_blob_pre(var) \ read_fw var ## _SIZE; \ mov %eax, %ecx; \ diff --git a/pc-bios/qboot.rom b/pc-bios/qboot.rom old mode 100644 new mode 100755 index 7634106a07..684000f57a Binary files a/pc-bios/qboot.rom and b/pc-bios/qboot.rom differ diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img index a560c1272f..554fcbd1b7 100644 Binary files a/pc-bios/s390-ccw.img and b/pc-bios/s390-ccw.img differ diff --git a/pc-bios/s390-ccw/Makefile b/pc-bios/s390-ccw/Makefile index cee9d2c63b..10e8f5cb63 100644 --- a/pc-bios/s390-ccw/Makefile +++ b/pc-bios/s390-ccw/Makefile @@ -2,57 +2,71 @@ all: build-all # Dummy command so that make thinks it has done something @true -include ../../config-host.mak +include config-host.mak CFLAGS = -O2 -g +MAKEFLAGS += -rR -quiet-command = $(if $(V),$1,$(if $(2),@printf " %-7s %s\n" $2 $3 && $1, @$1)) -cc-option = $(if $(shell $(CC) $1 $2 -S -o /dev/null -xc /dev/null \ - >/dev/null 2>&1 && echo OK),$2,$3) +NULL := +SPACE := $(NULL) # +TARGET_PREFIX := $(patsubst %/,%:$(SPACE),$(TARGET_DIR)) + +quiet-@ = $(if $(V),,@$(if $1,printf "%s\n" "$(TARGET_PREFIX)$1" && )) +quiet-command = $(call quiet-@,$2 $@)$1 VPATH_SUFFIXES = %.c %.h %.S %.m %.mak %.sh %.rc Kconfig% %.json.in set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1))) -$(call set-vpath, $(SRC_PATH)/pc-bios/s390-ccw) +$(call set-vpath, $(SRC_PATH)) # Flags for dependency generation QEMU_DGFLAGS = -MMD -MP -MT $@ -MF $(@D)/$(*F).d %.o: %.c - $(call quiet-command,$(CC) $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) \ - -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(EXTRA_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) \ + -c -o $@ $<,Compiling) %.o: %.S - $(call quiet-command,$(CCAS) $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) \ - -c -o $@ $<,"CCAS","$(TARGET_DIR)$@") + $(call quiet-command,$(CCAS) $(EXTRA_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) \ + -c -o $@ $<,Assembling) -.PHONY : all clean build-all +.PHONY : all clean build-all distclean OBJECTS = start.o main.o bootmap.o jump2ipl.o sclp.o menu.o \ virtio.o virtio-scsi.o virtio-blkdev.o libc.o cio.o dasd-ipl.o -QEMU_CFLAGS := -Wall $(filter -W%, $(QEMU_CFLAGS)) -QEMU_CFLAGS += $(call cc-option,-Werror $(QEMU_CFLAGS),-Wno-stringop-overflow) -QEMU_CFLAGS += -ffreestanding -fno-delete-null-pointer-checks -fno-common -fPIE -QEMU_CFLAGS += -fwrapv -fno-strict-aliasing -fno-asynchronous-unwind-tables -QEMU_CFLAGS += $(call cc-option, $(QEMU_CFLAGS), -fno-stack-protector) -QEMU_CFLAGS += -msoft-float -QEMU_CFLAGS += $(call cc-option, $(QEMU_CFLAGS),-march=z900,-march=z10) -QEMU_CFLAGS += -std=gnu99 +EXTRA_CFLAGS += -Wall +EXTRA_CFLAGS += -ffreestanding -fno-delete-null-pointer-checks -fno-common -fPIE +EXTRA_CFLAGS += -fwrapv -fno-strict-aliasing -fno-asynchronous-unwind-tables +EXTRA_CFLAGS += -msoft-float +EXTRA_CFLAGS += -std=gnu99 +LDFLAGS += -Wl,-pie -nostdlib + +cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>/dev/null +cc-option = if $(call cc-test, $1); then \ + echo "$(TARGET_PREFIX)$1 detected" && echo "EXTRA_CFLAGS += $1" >&3; else \ + echo "$(TARGET_PREFIX)$1 not detected" $(if $2,&& echo "EXTRA_CFLAGS += $2" >&3); fi + +config-cc.mak: Makefile + $(quiet-@)($(call cc-option,-Wno-stringop-overflow); \ + $(call cc-option,-fno-stack-protector); \ + $(call cc-option,-Wno-array-bounds); \ + $(call cc-option,-Wno-gnu); \ + $(call cc-option,-march=z900,-march=z10)) 3> config-cc.mak +-include config-cc.mak + LDFLAGS += -Wl,-pie -nostdlib build-all: s390-ccw.img s390-netboot.img s390-ccw.elf: $(OBJECTS) - $(call quiet-command,$(CC) $(LDFLAGS) -o $@ $(OBJECTS),"BUILD","$(TARGET_DIR)$@") - -STRIP ?= strip + $(call quiet-command,$(CC) $(LDFLAGS) -o $@ $(OBJECTS),Linking) s390-ccw.img: s390-ccw.elf - $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,"STRIP","$(TARGET_DIR)$@") + $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into) $(OBJECTS): Makefile -ifneq ($(wildcard $(SRC_PATH)/roms/SLOF/lib/libnet),) -include $(SRC_PATH)/pc-bios/s390-ccw/netboot.mak +ifneq ($(wildcard $(SRC_PATH)/../../roms/SLOF/lib/libnet),) +include $(SRC_PATH)/netboot.mak else s390-netboot.img: @echo "s390-netboot.img not built since roms/SLOF/ is not available." @@ -63,3 +77,6 @@ ALL_OBJS = $(sort $(OBJECTS) $(NETOBJS) $(LIBCOBJS) $(LIBNETOBJS)) clean: rm -f *.o *.d *.img *.elf *~ *.a + +distclean: + rm -f config-cc.mak diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c index 56411ab3b6..994e59c0b0 100644 --- a/pc-bios/s390-ccw/bootmap.c +++ b/pc-bios/s390-ccw/bootmap.c @@ -780,18 +780,37 @@ static void ipl_iso_el_torito(void) } } +/** + * Detect whether we're trying to boot from an .ISO image. + * These always have a signature string "CD001" at offset 0x8001. + */ +static bool has_iso_signature(void) +{ + int blksize = virtio_get_block_size(); + + if (!blksize || virtio_read(0x8000 / blksize, sec)) { + return false; + } + + return !memcmp("CD001", &sec[1], 5); +} + /*********************************************************************** * Bus specific IPL sequences */ static void zipl_load_vblk(void) { - if (virtio_guessed_disk_nature()) { - virtio_assume_iso9660(); - } - ipl_iso_el_torito(); + int blksize = virtio_get_block_size(); - if (virtio_guessed_disk_nature()) { + if (blksize == VIRTIO_ISO_BLOCK_SIZE || has_iso_signature()) { + if (blksize != VIRTIO_ISO_BLOCK_SIZE) { + virtio_assume_iso9660(); + } + ipl_iso_el_torito(); + } + + if (blksize != VIRTIO_DASD_DEFAULT_BLOCK_SIZE) { sclp_print("Using guessed DASD geometry.\n"); virtio_assume_eckd(); } diff --git a/pc-bios/s390-ccw/cio.h b/pc-bios/s390-ccw/cio.h index 1e5d4e92e1..88a88adfd2 100644 --- a/pc-bios/s390-ccw/cio.h +++ b/pc-bios/s390-ccw/cio.h @@ -20,7 +20,7 @@ struct pmcw { __u32 intparm; /* interruption parameter */ __u32 qf:1; /* qdio facility */ __u32 w:1; - __u32 isc:3; /* interruption sublass */ + __u32 isc:3; /* interruption subclass */ __u32 res5:3; /* reserved zeros */ __u32 ena:1; /* enabled */ __u32 lm:2; /* limit mode */ diff --git a/pc-bios/s390-ccw/iplb.h b/pc-bios/s390-ccw/iplb.h index 772d5c57c9..cb6ac8a880 100644 --- a/pc-bios/s390-ccw/iplb.h +++ b/pc-bios/s390-ccw/iplb.h @@ -81,7 +81,7 @@ extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE))); #define QIPL_FLAG_BM_OPTS_ZIPL 0x40 /* - * This definition must be kept in sync with the defininition + * This definition must be kept in sync with the definition * in hw/s390x/ipl.h */ struct QemuIplParameters { diff --git a/pc-bios/s390-ccw/main.c b/pc-bios/s390-ccw/main.c index 5d2b7ba94d..a2def83e82 100644 --- a/pc-bios/s390-ccw/main.c +++ b/pc-bios/s390-ccw/main.c @@ -14,6 +14,7 @@ #include "s390-ccw.h" #include "cio.h" #include "virtio.h" +#include "virtio-scsi.h" #include "dasd-ipl.h" char stack[PAGE_SIZE * 8] __attribute__((__aligned__(PAGE_SIZE))); @@ -218,6 +219,7 @@ static int virtio_setup(void) { VDev *vdev = virtio_get_device(); QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS; + int ret; memcpy(&qipl, early_qipl, sizeof(QemuIplParameters)); @@ -225,18 +227,26 @@ static int virtio_setup(void) menu_setup(); } - if (virtio_get_device_type() == VIRTIO_ID_NET) { + switch (vdev->senseid.cu_model) { + case VIRTIO_ID_NET: sclp_print("Network boot device detected\n"); vdev->netboot_start_addr = qipl.netboot_start_addr; - } else { - int ret = virtio_blk_setup_device(blk_schid); - if (ret) { - return ret; - } + return 0; + case VIRTIO_ID_BLOCK: + ret = virtio_blk_setup_device(blk_schid); + break; + case VIRTIO_ID_SCSI: + ret = virtio_scsi_setup_device(blk_schid); + break; + default: + panic("\n! No IPL device available !\n"); + } + + if (!ret) { IPL_assert(virtio_ipl_disk_is_valid(), "No valid IPL device detected"); } - return 0; + return ret; } static void ipl_boot_device(void) @@ -281,7 +291,7 @@ static void probe_boot_device(void) sclp_print("Could not find a suitable boot device (none specified)\n"); } -int main(void) +void main(void) { sclp_setup(); css_setup(); @@ -294,5 +304,4 @@ int main(void) } panic("Failed to load OS from hard disk\n"); - return 0; /* make compiler happy */ } diff --git a/pc-bios/s390-ccw/netboot.mak b/pc-bios/s390-ccw/netboot.mak index 68b4d7edcb..046aa35587 100644 --- a/pc-bios/s390-ccw/netboot.mak +++ b/pc-bios/s390-ccw/netboot.mak @@ -1,5 +1,5 @@ -SLOF_DIR := $(SRC_PATH)/roms/SLOF +SLOF_DIR := $(SRC_PATH)/../../roms/SLOF NETOBJS := start.o sclp.o cio.o virtio.o virtio-net.o jump2ipl.o netmain.o @@ -8,55 +8,55 @@ LIBNET_INC := -I$(SLOF_DIR)/lib/libnet NETLDFLAGS := $(LDFLAGS) -Wl,-Ttext=0x7800000 -$(NETOBJS): QEMU_CFLAGS += $(LIBC_INC) $(LIBNET_INC) +$(NETOBJS): EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC) s390-netboot.elf: $(NETOBJS) libnet.a libc.a - $(call quiet-command,$(CC) $(NETLDFLAGS) -o $@ $^,"BUILD","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(NETLDFLAGS) -o $@ $^,Linking) s390-netboot.img: s390-netboot.elf - $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,"STRIP","$(TARGET_DIR)$@") + $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into) # libc files: -LIBC_CFLAGS = $(QEMU_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ +LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ -MMD -MP -MT $@ -MF $(@:%.o=%.d) CTYPE_OBJS = isdigit.o isxdigit.o toupper.o %.o : $(SLOF_DIR)/lib/libc/ctype/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) STRING_OBJS = strcat.o strchr.o strrchr.o strcpy.o strlen.o strncpy.o \ strcmp.o strncmp.o strcasecmp.o strncasecmp.o strstr.o \ memset.o memcpy.o memmove.o memcmp.o %.o : $(SLOF_DIR)/lib/libc/string/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) STDLIB_OBJS = atoi.o atol.o strtoul.o strtol.o rand.o malloc.o free.o %.o : $(SLOF_DIR)/lib/libc/stdlib/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) STDIO_OBJS = sprintf.o snprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \ printf.o putc.o puts.o putchar.o stdchnls.o fileno.o %.o : $(SLOF_DIR)/lib/libc/stdio/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) sbrk.o: $(SLOF_DIR)/slof/sbrk.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) LIBCOBJS := $(STRING_OBJS) $(CTYPE_OBJS) $(STDLIB_OBJS) $(STDIO_OBJS) sbrk.o libc.a: $(LIBCOBJS) - $(call quiet-command,$(AR) -rc $@ $^,"AR","$(TARGET_DIR)$@") + $(call quiet-command,$(AR) -rc $@ $^,Creating static library) # libnet files: LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \ dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o -LIBNETCFLAGS = $(QEMU_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ +LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d) %.o : $(SLOF_DIR)/lib/libnet/%.c - $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,"CC","$(TARGET_DIR)$@") + $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,Compiling) libnet.a: $(LIBNETOBJS) - $(call quiet-command,$(AR) -rc $@ $^,"AR","$(TARGET_DIR)$@") + $(call quiet-command,$(AR) -rc $@ $^,Creating static library) diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h index 79db69ff54..b88e0550ab 100644 --- a/pc-bios/s390-ccw/s390-ccw.h +++ b/pc-bios/s390-ccw/s390-ccw.h @@ -57,6 +57,7 @@ void write_subsystem_identification(void); void write_iplb_location(void); extern char stack[PAGE_SIZE * 8] __attribute__((__aligned__(PAGE_SIZE))); unsigned int get_loadparm_index(void); +void main(void); /* sclp.c */ void sclp_print(const char *string); diff --git a/pc-bios/s390-ccw/start.S b/pc-bios/s390-ccw/start.S index 4d5ad21653..6072906df4 100644 --- a/pc-bios/s390-ccw/start.S +++ b/pc-bios/s390-ccw/start.S @@ -19,7 +19,7 @@ _start: larl %r2, __bss_start larl %r3, _end slgr %r3, %r2 /* get sizeof bss */ - ltgr %r3,%r3 /* bss emtpy? */ + ltgr %r3,%r3 /* bss empty? */ jz done aghi %r3,-1 srlg %r4,%r3,8 /* how many 256 byte chunks? */ diff --git a/pc-bios/s390-ccw/virtio-blkdev.c b/pc-bios/s390-ccw/virtio-blkdev.c index 7d35050292..794f99b42c 100644 --- a/pc-bios/s390-ccw/virtio-blkdev.c +++ b/pc-bios/s390-ccw/virtio-blkdev.c @@ -13,6 +13,9 @@ #include "virtio.h" #include "virtio-scsi.h" +#define VIRTIO_BLK_F_GEOMETRY (1 << 4) +#define VIRTIO_BLK_F_BLK_SIZE (1 << 6) + static int virtio_blk_read_many(VDev *vdev, ulong sector, void *load_addr, int sec_num) { @@ -112,23 +115,6 @@ VirtioGDN virtio_guessed_disk_nature(void) return virtio_get_device()->guessed_disk_nature; } -void virtio_assume_scsi(void) -{ - VDev *vdev = virtio_get_device(); - - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - vdev->guessed_disk_nature = VIRTIO_GDN_SCSI; - vdev->config.blk.blk_size = VIRTIO_SCSI_BLOCK_SIZE; - vdev->config.blk.physical_block_exp = 0; - vdev->blk_factor = 1; - break; - case VIRTIO_ID_SCSI: - vdev->scsi_block_size = VIRTIO_SCSI_BLOCK_SIZE; - break; - } -} - void virtio_assume_iso9660(void) { VDev *vdev = virtio_get_device(); @@ -155,7 +141,7 @@ void virtio_assume_eckd(void) vdev->config.blk.physical_block_exp = 0; switch (vdev->senseid.cu_model) { case VIRTIO_ID_BLOCK: - vdev->config.blk.blk_size = 4096; + vdev->config.blk.blk_size = VIRTIO_DASD_DEFAULT_BLOCK_SIZE; break; case VIRTIO_ID_SCSI: vdev->config.blk.blk_size = vdev->scsi_block_size; @@ -166,46 +152,19 @@ void virtio_assume_eckd(void) virtio_eckd_sectors_for_block_size(vdev->config.blk.blk_size); } -bool virtio_disk_is_scsi(void) -{ - VDev *vdev = virtio_get_device(); - - if (vdev->guessed_disk_nature == VIRTIO_GDN_SCSI) { - return true; - } - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - return (vdev->config.blk.geometry.heads == 255) - && (vdev->config.blk.geometry.sectors == 63) - && (virtio_get_block_size() == VIRTIO_SCSI_BLOCK_SIZE); - case VIRTIO_ID_SCSI: - return true; - } - return false; -} - -bool virtio_disk_is_eckd(void) -{ - VDev *vdev = virtio_get_device(); - const int block_size = virtio_get_block_size(); - - if (vdev->guessed_disk_nature == VIRTIO_GDN_DASD) { - return true; - } - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - return (vdev->config.blk.geometry.heads == 15) - && (vdev->config.blk.geometry.sectors == - virtio_eckd_sectors_for_block_size(block_size)); - case VIRTIO_ID_SCSI: - return false; - } - return false; -} - bool virtio_ipl_disk_is_valid(void) { - return virtio_disk_is_scsi() || virtio_disk_is_eckd(); + int blksize = virtio_get_block_size(); + VDev *vdev = virtio_get_device(); + + if (vdev->guessed_disk_nature == VIRTIO_GDN_SCSI || + vdev->guessed_disk_nature == VIRTIO_GDN_DASD) { + return true; + } + + return (vdev->senseid.cu_model == VIRTIO_ID_BLOCK || + vdev->senseid.cu_model == VIRTIO_ID_SCSI) && + blksize >= 512 && blksize <= 4096; } int virtio_get_block_size(void) @@ -214,7 +173,7 @@ int virtio_get_block_size(void) switch (vdev->senseid.cu_model) { case VIRTIO_ID_BLOCK: - return vdev->config.blk.blk_size << vdev->config.blk.physical_block_exp; + return vdev->config.blk.blk_size; case VIRTIO_ID_SCSI: return vdev->scsi_block_size; } @@ -266,34 +225,12 @@ uint64_t virtio_get_blocks(void) int virtio_blk_setup_device(SubChannelId schid) { VDev *vdev = virtio_get_device(); - int ret = 0; + vdev->guest_features[0] = VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_BLK_SIZE; vdev->schid = schid; virtio_setup_ccw(vdev); - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - sclp_print("Using virtio-blk.\n"); - if (!virtio_ipl_disk_is_valid()) { - /* make sure all getters but blocksize return 0 for - * invalid IPL disk - */ - memset(&vdev->config.blk, 0, sizeof(vdev->config.blk)); - virtio_assume_scsi(); - } - break; - case VIRTIO_ID_SCSI: - IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE, - "Config: sense size mismatch"); - IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE, - "Config: CDB size mismatch"); + sclp_print("Using virtio-blk.\n"); - sclp_print("Using virtio-scsi.\n"); - ret = virtio_scsi_setup(vdev); - break; - default: - panic("\n! No IPL device available !\n"); - } - - return ret; + return 0; } diff --git a/pc-bios/s390-ccw/virtio-scsi.c b/pc-bios/s390-ccw/virtio-scsi.c index 2c8d0f3097..dcce696a33 100644 --- a/pc-bios/s390-ccw/virtio-scsi.c +++ b/pc-bios/s390-ccw/virtio-scsi.c @@ -195,7 +195,7 @@ static bool scsi_read_capacity(VDev *vdev, /* virtio-scsi routines */ /* - * Tries to locate a SCSI device and and adds the information for the found + * Tries to locate a SCSI device and adds the information for the found * device to the vdev->scsi_device structure. * Returns 0 if SCSI device could be located, or a error code < 0 otherwise */ @@ -329,7 +329,7 @@ static void scsi_parse_capacity_report(void *data, } } -int virtio_scsi_setup(VDev *vdev) +static int virtio_scsi_setup(VDev *vdev) { int retry_test_unit_ready = 3; uint8_t data[256]; @@ -430,3 +430,20 @@ int virtio_scsi_setup(VDev *vdev) return 0; } + +int virtio_scsi_setup_device(SubChannelId schid) +{ + VDev *vdev = virtio_get_device(); + + vdev->schid = schid; + virtio_setup_ccw(vdev); + + IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE, + "Config: sense size mismatch"); + IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE, + "Config: CDB size mismatch"); + + sclp_print("Using virtio-scsi.\n"); + + return virtio_scsi_setup(vdev); +} diff --git a/pc-bios/s390-ccw/virtio-scsi.h b/pc-bios/s390-ccw/virtio-scsi.h index 4b14c2c2f9..e6b6cd4815 100644 --- a/pc-bios/s390-ccw/virtio-scsi.h +++ b/pc-bios/s390-ccw/virtio-scsi.h @@ -67,8 +67,8 @@ static inline bool virtio_scsi_response_ok(const VirtioScsiCmdResp *r) return r->response == VIRTIO_SCSI_S_OK && r->status == CDB_STATUS_GOOD; } -int virtio_scsi_setup(VDev *vdev); int virtio_scsi_read_many(VDev *vdev, ulong sector, void *load_addr, int sec_num); +int virtio_scsi_setup_device(SubChannelId schid); #endif /* VIRTIO_SCSI_H */ diff --git a/pc-bios/s390-ccw/virtio.c b/pc-bios/s390-ccw/virtio.c index 5d2c6e3381..f37510f312 100644 --- a/pc-bios/s390-ccw/virtio.c +++ b/pc-bios/s390-ccw/virtio.c @@ -220,7 +220,7 @@ int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd) void virtio_setup_ccw(VDev *vdev) { int i, rc, cfg_size = 0; - unsigned char status = VIRTIO_CONFIG_S_DRIVER_OK; + uint8_t status; struct VirtioFeatureDesc { uint32_t features; uint8_t index; @@ -234,6 +234,10 @@ void virtio_setup_ccw(VDev *vdev) run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false); + status = VIRTIO_CONFIG_S_ACKNOWLEDGE; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write ACKNOWLEDGE status to host"); + switch (vdev->senseid.cu_model) { case VIRTIO_ID_NET: vdev->nr_vqs = 2; @@ -253,9 +257,10 @@ void virtio_setup_ccw(VDev *vdev) default: panic("Unsupported virtio device\n"); } - IPL_assert( - run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false) == 0, - "Could not get block device configuration"); + + status |= VIRTIO_CONFIG_S_DRIVER; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write DRIVER status to host"); /* Feature negotiation */ for (i = 0; i < ARRAY_SIZE(vdev->guest_features); i++) { @@ -269,6 +274,9 @@ void virtio_setup_ccw(VDev *vdev) IPL_assert(rc == 0, "Could not set features bits"); } + rc = run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false); + IPL_assert(rc == 0, "Could not get virtio device configuration"); + for (i = 0; i < vdev->nr_vqs; i++) { VqInfo info = { .queue = (unsigned long long) ring_area + (i * VIRTIO_RING_SIZE), @@ -281,9 +289,8 @@ void virtio_setup_ccw(VDev *vdev) .num = 0, }; - IPL_assert( - run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false) == 0, - "Could not get block device VQ configuration"); + rc = run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false); + IPL_assert(rc == 0, "Could not get virtio device VQ configuration"); info.num = config.num; vring_init(&vdev->vrings[i], &info); vdev->vrings[i].schid = vdev->schid; @@ -291,9 +298,10 @@ void virtio_setup_ccw(VDev *vdev) run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false) == 0, "Cannot set VQ info"); } - IPL_assert( - run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false) == 0, - "Could not write status to host"); + + status |= VIRTIO_CONFIG_S_DRIVER_OK; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write DRIVER_OK status to host"); } bool virtio_is_supported(SubChannelId schid) diff --git a/pc-bios/s390-ccw/virtio.h b/pc-bios/s390-ccw/virtio.h index 19fceb6495..e657d381ec 100644 --- a/pc-bios/s390-ccw/virtio.h +++ b/pc-bios/s390-ccw/virtio.h @@ -182,22 +182,20 @@ enum guessed_disk_nature_type { typedef enum guessed_disk_nature_type VirtioGDN; VirtioGDN virtio_guessed_disk_nature(void); -void virtio_assume_scsi(void); void virtio_assume_eckd(void); void virtio_assume_iso9660(void); -extern bool virtio_disk_is_scsi(void); -extern bool virtio_disk_is_eckd(void); -extern bool virtio_ipl_disk_is_valid(void); -extern int virtio_get_block_size(void); -extern uint8_t virtio_get_heads(void); -extern uint8_t virtio_get_sectors(void); -extern uint64_t virtio_get_blocks(void); -extern int virtio_read_many(ulong sector, void *load_addr, int sec_num); +bool virtio_ipl_disk_is_valid(void); +int virtio_get_block_size(void); +uint8_t virtio_get_heads(void); +uint8_t virtio_get_sectors(void); +uint64_t virtio_get_blocks(void); +int virtio_read_many(ulong sector, void *load_addr, int sec_num); #define VIRTIO_SECTOR_SIZE 512 #define VIRTIO_ISO_BLOCK_SIZE 2048 #define VIRTIO_SCSI_BLOCK_SIZE 512 +#define VIRTIO_DASD_DEFAULT_BLOCK_SIZE 4096 static inline ulong virtio_sector_adjust(ulong sector) { diff --git a/pc-bios/s390-netboot.img b/pc-bios/s390-netboot.img index bc34af8a28..682da24a05 100644 Binary files a/pc-bios/s390-netboot.img and b/pc-bios/s390-netboot.img differ diff --git a/pc-bios/skiboot.lid b/pc-bios/skiboot.lid index 504b95e8b6..58ec5ec38e 100644 Binary files a/pc-bios/skiboot.lid and b/pc-bios/skiboot.lid differ diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin index 855521e650..ef9b81d628 100644 Binary files a/pc-bios/slof.bin and b/pc-bios/slof.bin differ diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin index 118caacde6..39b2405148 100644 Binary files a/pc-bios/vgabios-ati.bin and b/pc-bios/vgabios-ati.bin differ diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin index d79124b760..b20d67ccf5 100644 Binary files a/pc-bios/vgabios-bochs-display.bin and b/pc-bios/vgabios-bochs-display.bin differ diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin index 37bf5e7fe7..ebe53366e4 100644 Binary files a/pc-bios/vgabios-cirrus.bin and b/pc-bios/vgabios-cirrus.bin differ diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin index 0ff8ff21f0..4b5573a857 100644 Binary files a/pc-bios/vgabios-qxl.bin and b/pc-bios/vgabios-qxl.bin differ diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin index df5e9d615a..d458ec7436 100644 Binary files a/pc-bios/vgabios-ramfb.bin and b/pc-bios/vgabios-ramfb.bin differ diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin index 70f6094153..797e1036c9 100644 Binary files a/pc-bios/vgabios-stdvga.bin and b/pc-bios/vgabios-stdvga.bin differ diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin index 65af5be59d..3f8fe9de13 100644 Binary files a/pc-bios/vgabios-virtio.bin and b/pc-bios/vgabios-virtio.bin differ diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin index 89d42b32c1..d5f263a9f7 100644 Binary files a/pc-bios/vgabios-vmware.bin and b/pc-bios/vgabios-vmware.bin differ diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin index 320471e18d..d26af416ce 100644 Binary files a/pc-bios/vgabios.bin and b/pc-bios/vgabios.bin differ diff --git a/pc-bios/vof/Makefile b/pc-bios/vof/Makefile index aa1678c4d8..d1eb6ced7e 100644 --- a/pc-bios/vof/Makefile +++ b/pc-bios/vof/Makefile @@ -1,23 +1,31 @@ -all: build-all +include config.mak +VPATH=$(SRC_DIR) +all: vof.bin -build-all: vof.bin +NULL := +SPACE := $(NULL) # +TARGET_PREFIX := $(patsubst %/,%:$(SPACE),$(TARGET_DIR)) -CROSS ?= -CC = $(CROSS)gcc -LD = $(CROSS)ld -OBJCOPY = $(CROSS)objcopy +quiet-@ = $(if $(V),,@$(if $1,,printf "%s\n" "$(TARGET_PREFIX)$1" && )) +quiet-command = $(call quiet-@,$2 $@)$1 + +EXTRA_CFLAGS += -mcpu=power4 %.o: %.S - $(CC) -m32 -mbig-endian -mcpu=power4 -c -o $@ $< + $(call quiet-command, $(CC) $(EXTRA_CFLAGS) -c -o $@ $<,Assembling) %.o: %.c - $(CC) -m32 -mbig-endian -mcpu=power4 -c -fno-stack-protector -o $@ $< + $(call quiet-command, $(CC) $(EXTRA_CFLAGS) -c -fno-stack-protector -o $@ $<,Compiling) vof.elf: entry.o main.o ci.o bootmem.o libc.o - $(LD) -nostdlib -e_start -Tvof.lds -EB -o $@ $^ + $(call quiet-command, $(LD) -nostdlib -e_start -T$(SRC_DIR)/vof.lds -EB -o $@ $^,Linking) %.bin: %.elf - $(OBJCOPY) -O binary -j .text -j .data -j .toc -j .got2 $^ $@ + $(call quiet-command, $(OBJCOPY) -O binary -j .text -j .data -j .toc -j .got2 $^ $@,Extracting raw object) clean: rm -f *.o vof.bin vof.elf *~ + +distclean: clean + +.PHONY: all clean distclean diff --git a/plugins/api.c b/plugins/api.c index 2d521e6ba8..2078b16edb 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -36,6 +36,7 @@ #include "qemu/osdep.h" #include "qemu/plugin.h" +#include "qemu/log.h" #include "tcg/tcg.h" #include "exec/exec-all.h" #include "exec/ram_addr.h" @@ -44,8 +45,12 @@ #ifndef CONFIG_USER_ONLY #include "qemu/plugin-memory.h" #include "hw/boards.h" +#else +#include "qemu.h" +#ifdef CONFIG_LINUX +#include "loader.h" +#endif #endif -#include "trace/mem.h" /* Uninstall and Reset handlers */ @@ -246,22 +251,25 @@ const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn) unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) { - return info & TRACE_MEM_SZ_SHIFT_MASK; + MemOp op = get_memop(info); + return op & MO_SIZE; } bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_SE); + MemOp op = get_memop(info); + return op & MO_SIGN; } bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_BE); + MemOp op = get_memop(info); + return (op & MO_BSWAP) == MO_BE; } bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_ST); + return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; } /* @@ -277,11 +285,14 @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, { #ifdef CONFIG_SOFTMMU CPUState *cpu = current_cpu; - unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT; - hwaddr_info.is_store = info & TRACE_MEM_ST; + unsigned int mmu_idx = get_mmuidx(info); + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); + hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; + + assert(mmu_idx < NB_MMU_MODES); if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, - info & TRACE_MEM_ST, &hwaddr_info)) { + hwaddr_info.is_store, &hwaddr_info)) { error_report("invalid use of qemu_plugin_get_hwaddr"); return NULL; } @@ -383,3 +394,51 @@ void qemu_plugin_outs(const char *string) { qemu_log_mask(CPU_LOG_PLUGIN, "%s", string); } + +bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret) +{ + return name && value && qapi_bool_parse(name, value, ret, NULL); +} + +/* + * Binary path, start and end locations + */ +const char *qemu_plugin_path_to_binary(void) +{ + char *path = NULL; +#ifdef CONFIG_USER_ONLY + TaskState *ts = (TaskState *) current_cpu->opaque; + path = g_strdup(ts->bprm->filename); +#endif + return path; +} + +uint64_t qemu_plugin_start_code(void) +{ + uint64_t start = 0; +#ifdef CONFIG_USER_ONLY + TaskState *ts = (TaskState *) current_cpu->opaque; + start = ts->info->start_code; +#endif + return start; +} + +uint64_t qemu_plugin_end_code(void) +{ + uint64_t end = 0; +#ifdef CONFIG_USER_ONLY + TaskState *ts = (TaskState *) current_cpu->opaque; + end = ts->info->end_code; +#endif + return end; +} + +uint64_t qemu_plugin_entry_code(void) +{ + uint64_t entry = 0; +#ifdef CONFIG_USER_ONLY + TaskState *ts = (TaskState *) current_cpu->opaque; + entry = ts->info->entry; +#endif + return entry; +} diff --git a/plugins/core.c b/plugins/core.c index 6b2490f973..ccb770a485 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -27,7 +27,6 @@ #include "exec/helper-proto.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" -#include "trace/mem.h" /* mem_info macros */ #include "plugin.h" #include "qemu/compiler.h" @@ -57,7 +56,7 @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) { bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); - cpu_tb_jmp_cache_clear(cpu); + tcg_flush_jmp_cache(cpu); } static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) @@ -446,7 +445,8 @@ void exec_inline_op(struct qemu_plugin_dyn_cb *cb) } } -void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw) { GArray *arr = cpu->plugin_mem_cbs; size_t i; @@ -457,14 +457,14 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) for (i = 0; i < arr->len; i++) { struct qemu_plugin_dyn_cb *cb = &g_array_index(arr, struct qemu_plugin_dyn_cb, i); - int w = !!(info & TRACE_MEM_ST) + 1; - if (!(w & cb->rw)) { + if (!(rw & cb->rw)) { break; } switch (cb->type) { case PLUGIN_CB_REGULAR: - cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); + cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw), + vaddr, cb->userp); break; case PLUGIN_CB_INLINE: exec_inline_op(cb); @@ -526,6 +526,26 @@ void qemu_plugin_user_exit(void) qemu_plugin_atexit_cb(); } +/* + * Helpers for *-user to ensure locks are sane across fork() events. + */ + +void qemu_plugin_user_prefork_lock(void) +{ + qemu_rec_mutex_lock(&plugin.lock); +} + +void qemu_plugin_user_postfork(bool is_child) +{ + if (is_child) { + /* should we just reset via plugin_init? */ + qemu_rec_mutex_init(&plugin.lock); + } else { + qemu_rec_mutex_unlock(&plugin.lock); + } +} + + /* * Call this function after longjmp'ing to the main loop. It's possible that the * last instruction of a TB might have used helpers, and therefore the diff --git a/plugins/loader.c b/plugins/loader.c index 05df40398d..88c30bde2d 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -24,8 +24,10 @@ #include "qemu/rcu_queue.h" #include "qemu/qht.h" #include "qemu/bitmap.h" +#include "qemu/cacheinfo.h" #include "qemu/xxhash.h" #include "qemu/plugin.h" +#include "qemu/memalign.h" #include "hw/core/cpu.h" #include "exec/exec-all.h" #ifndef CONFIG_USER_ONLY @@ -94,6 +96,8 @@ static int plugin_add(void *opaque, const char *name, const char *value, { struct qemu_plugin_parse_arg *arg = opaque; struct qemu_plugin_desc *p; + bool is_on; + char *fullarg; if (strcmp(name, "file") == 0) { if (strcmp(value, "") == 0) { @@ -107,18 +111,32 @@ static int plugin_add(void *opaque, const char *name, const char *value, QTAILQ_INSERT_TAIL(arg->head, p, entry); } arg->curr = p; - } else if (strcmp(name, "arg") == 0) { + } else { if (arg->curr == NULL) { error_setg(errp, "missing earlier '-plugin file=' option"); return 1; } + + if (g_strcmp0(name, "arg") == 0 && + !qapi_bool_parse(name, value, &is_on, NULL)) { + if (strchr(value, '=') == NULL) { + /* Will treat arg="argname" as "argname=on" */ + fullarg = g_strdup_printf("%s=%s", value, "on"); + } else { + fullarg = g_strdup_printf("%s", value); + } + warn_report("using 'arg=%s' is deprecated", value); + error_printf("Please use '%s' directly\n", fullarg); + } else { + fullarg = g_strdup_printf("%s=%s", name, value); + } + p = arg->curr; p->argc++; p->argv = g_realloc_n(p->argv, p->argc, sizeof(char *)); - p->argv[p->argc - 1] = g_strdup(value); - } else { - error_setg(errp, "-plugin: unexpected parameter '%s'; ignored", name); + p->argv[p->argc - 1] = fullarg; } + return 0; } diff --git a/plugins/meson.build b/plugins/meson.build index e77723010e..752377c66d 100644 --- a/plugins/meson.build +++ b/plugins/meson.build @@ -1,9 +1,16 @@ -if 'CONFIG_HAS_LD_DYNAMIC_LIST' in config_host - plugin_ldflags = ['-Wl,--dynamic-list=' + (meson.build_root() / 'qemu-plugins-ld.symbols')] -elif 'CONFIG_HAS_LD_EXPORTED_SYMBOLS_LIST' in config_host - plugin_ldflags = ['-Wl,-exported_symbols_list,' + (meson.build_root() / 'qemu-plugins-ld64.symbols')] -else - plugin_ldflags = [] +plugin_ldflags = [] +# Modules need more symbols than just those in plugins/qemu-plugins.symbols +if not enable_modules + if targetos == 'darwin' + configure_file( + input: files('qemu-plugins.symbols'), + output: 'qemu-plugins-ld64.symbols', + capture: true, + command: ['sed', '-ne', 's/^[[:space:]]*\\(qemu_.*\\);/_\\1/p', '@INPUT@']) + plugin_ldflags = ['-Wl,-exported_symbols_list,plugins/qemu-plugins-ld64.symbols'] + else + plugin_ldflags = ['-Xlinker', '--dynamic-list=' + (meson.project_source_root() / 'plugins/qemu-plugins.symbols')] + endif endif specific_ss.add(when: 'CONFIG_PLUGIN', if_true: [files( diff --git a/plugins/plugin.h b/plugins/plugin.h index b13677d0dc..5eb2fdbc85 100644 --- a/plugins/plugin.h +++ b/plugins/plugin.h @@ -9,8 +9,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef PLUGIN_INTERNAL_H -#define PLUGIN_INTERNAL_H +#ifndef PLUGIN_H +#define PLUGIN_H #include #include "qemu/qht.h" @@ -97,4 +97,4 @@ void plugin_register_vcpu_mem_cb(GArray **arr, void exec_inline_op(struct qemu_plugin_dyn_cb *cb); -#endif /* _PLUGIN_INTERNAL_H_ */ +#endif /* PLUGIN_H */ diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 40b4ff3821..71f6c90549 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -1,37 +1,45 @@ { - qemu_plugin_uninstall; - qemu_plugin_reset; - qemu_plugin_register_vcpu_init_cb; + qemu_plugin_bool_parse; + qemu_plugin_end_code; + qemu_plugin_entry_code; + qemu_plugin_get_hwaddr; + qemu_plugin_hwaddr_device_name; + qemu_plugin_hwaddr_is_io; + qemu_plugin_hwaddr_phys_addr; + qemu_plugin_insn_data; + qemu_plugin_insn_disas; + qemu_plugin_insn_haddr; + qemu_plugin_insn_size; + qemu_plugin_insn_symbol; + qemu_plugin_insn_vaddr; + qemu_plugin_mem_is_big_endian; + qemu_plugin_mem_is_sign_extended; + qemu_plugin_mem_is_store; + qemu_plugin_mem_size_shift; + qemu_plugin_n_max_vcpus; + qemu_plugin_n_vcpus; + qemu_plugin_outs; + qemu_plugin_path_to_binary; + qemu_plugin_register_atexit_cb; + qemu_plugin_register_flush_cb; qemu_plugin_register_vcpu_exit_cb; qemu_plugin_register_vcpu_idle_cb; - qemu_plugin_register_vcpu_resume_cb; + qemu_plugin_register_vcpu_init_cb; qemu_plugin_register_vcpu_insn_exec_cb; qemu_plugin_register_vcpu_insn_exec_inline; qemu_plugin_register_vcpu_mem_cb; qemu_plugin_register_vcpu_mem_inline; - qemu_plugin_register_vcpu_tb_trans_cb; - qemu_plugin_register_vcpu_tb_exec_cb; - qemu_plugin_register_vcpu_tb_exec_inline; - qemu_plugin_register_flush_cb; + qemu_plugin_register_vcpu_resume_cb; qemu_plugin_register_vcpu_syscall_cb; qemu_plugin_register_vcpu_syscall_ret_cb; - qemu_plugin_register_atexit_cb; - qemu_plugin_tb_n_insns; + qemu_plugin_register_vcpu_tb_exec_cb; + qemu_plugin_register_vcpu_tb_exec_inline; + qemu_plugin_register_vcpu_tb_trans_cb; + qemu_plugin_reset; + qemu_plugin_start_code; qemu_plugin_tb_get_insn; + qemu_plugin_tb_n_insns; qemu_plugin_tb_vaddr; - qemu_plugin_insn_data; - qemu_plugin_insn_size; - qemu_plugin_insn_vaddr; - qemu_plugin_insn_haddr; - qemu_plugin_insn_disas; - qemu_plugin_mem_size_shift; - qemu_plugin_mem_is_sign_extended; - qemu_plugin_mem_is_big_endian; - qemu_plugin_mem_is_store; - qemu_plugin_get_hwaddr; - qemu_plugin_hwaddr_is_io; + qemu_plugin_uninstall; qemu_plugin_vcpu_for_each; - qemu_plugin_n_vcpus; - qemu_plugin_n_max_vcpus; - qemu_plugin_outs; }; diff --git a/po/LINGUAS b/po/LINGUAS index cc4b5c3b36..9b33a3659f 100644 --- a/po/LINGUAS +++ b/po/LINGUAS @@ -5,4 +5,5 @@ hu it sv tr +uk zh_CN diff --git a/po/tr.po b/po/tr.po index 632c7f3851..f4f0425c43 100644 --- a/po/tr.po +++ b/po/tr.po @@ -1,14 +1,15 @@ # Turkish translation for QEMU. # This file is put in the public domain. # Ozan Çağlayan , 2013. +# Oğuz Ersen , 2021. # msgid "" msgstr "" "Project-Id-Version: QEMU 1.4.50\n" "Report-Msgid-Bugs-To: qemu-devel@nongnu.org\n" "POT-Creation-Date: 2018-07-18 07:56+0200\n" -"PO-Revision-Date: 2013-04-22 18:35+0300\n" -"Last-Translator: Ozan Çağlayan \n" +"PO-Revision-Date: 2021-08-15 22:17+0300\n" +"Last-Translator: Oğuz Ersen \n" "Language-Team: Türkçe <>\n" "Language: tr\n" "MIME-Version: 1.0\n" @@ -33,24 +34,22 @@ msgid "Power _Down" msgstr "_Kapat" msgid "_Quit" -msgstr "" +msgstr "_Çıkış" msgid "_Fullscreen" -msgstr "" +msgstr "_Tam Ekran" msgid "_Copy" -msgstr "" +msgstr "_Kopyala" -#, fuzzy msgid "Zoom _In" -msgstr "Yakınlaş ve Sığ_dır" +msgstr "_Yakınlaş" -#, fuzzy msgid "Zoom _Out" -msgstr "Yakınlaş ve Sığ_dır" +msgstr "_Uzaklaş" msgid "Best _Fit" -msgstr "" +msgstr "_En Uygun" msgid "Zoom To _Fit" msgstr "Yakınlaş ve Sığ_dır" @@ -62,13 +61,13 @@ msgid "_Grab Input" msgstr "Girdiyi _Yakala" msgid "Show _Tabs" -msgstr "Se_kmeleri Göster" +msgstr "_Sekmeleri Göster" msgid "Detach Tab" -msgstr "" +msgstr "Sekmeyi Ayır" msgid "Show Menubar" -msgstr "" +msgstr "Menü Çubuğunu Göster" msgid "_Machine" msgstr "_Makine" diff --git a/po/uk.po b/po/uk.po new file mode 100644 index 0000000000..ff037808bf --- /dev/null +++ b/po/uk.po @@ -0,0 +1,75 @@ +# Ukrainian translation for QEMU. +# This file is put in the public domain. +# Andrij Mizyk , 2022. +# +msgid "" +msgstr "" +"Project-Id-Version: QEMU 1.4.50\n" +"Report-Msgid-Bugs-To: qemu-devel@nongnu.org\n" +"POT-Creation-Date: 2018-07-18 07:56+0200\n" +"PO-Revision-Date: 2022-06-13 01:33+0300\n" +"Last-Translator: Andrij Mizyk \n" +"Language-Team: Ukrainian\n" +"Language: uk\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Gtranslator 2.91.6\n" + +msgid " - Press Ctrl+Alt+G to release grab" +msgstr " - Натисніть Ctrl+Alt+G, щоб відпустити захоплення" + +msgid " [Paused]" +msgstr " [Призупинено]" + +msgid "_Pause" +msgstr "_Призупинити" + +msgid "_Reset" +msgstr "_Скинути" + +msgid "Power _Down" +msgstr "Вимкнути _живлення" + +msgid "_Quit" +msgstr "_Вийти" + +msgid "_Fullscreen" +msgstr "Повний _екран" + +msgid "_Copy" +msgstr "_Копіювати" + +msgid "Zoom _In" +msgstr "_Збільшити" + +msgid "Zoom _Out" +msgstr "З_меншити" + +msgid "Best _Fit" +msgstr "Найкращий _розмір" + +msgid "Zoom To _Fit" +msgstr "Збільшити до _розміру" + +msgid "Grab On _Hover" +msgstr "Захопити при _наведенні" + +msgid "_Grab Input" +msgstr "Захопити _введення" + +msgid "Show _Tabs" +msgstr "Показувати _вкладки" + +msgid "Detach Tab" +msgstr "Відʼєднати вкладку" + +msgid "Show Menubar" +msgstr "Показувати рядок меню" + +msgid "_Machine" +msgstr "_Машина" + +msgid "_View" +msgstr "_Вигляд" diff --git a/python/.gitignore b/python/.gitignore index c8b0e67fe6..904f324bb1 100644 --- a/python/.gitignore +++ b/python/.gitignore @@ -15,3 +15,8 @@ qemu.egg-info/ .venv/ .tox/ .dev-venv/ + +# Coverage.py reports +.coverage +.coverage.* +htmlcov/ diff --git a/python/Makefile b/python/Makefile index fe27a3e12e..b170708398 100644 --- a/python/Makefile +++ b/python/Makefile @@ -29,7 +29,7 @@ help: @echo " Performs no environment setup of any kind." @echo "" @echo "make develop:" - @echo " Install deps needed for for 'make check'," + @echo " Install deps needed for 'make check'," @echo " and install the qemu package in editable mode." @echo " (Can be used in or outside of a venv.)" @echo "" @@ -92,6 +92,13 @@ check: check-tox: @tox $(QEMU_TOX_EXTRA_ARGS) +.PHONY: check-coverage +check-coverage: + @coverage run -m avocado --config avocado.cfg run tests/*.py + @coverage combine + @coverage html + @coverage report + .PHONY: clean clean: python3 setup.py clean --all @@ -100,3 +107,5 @@ clean: .PHONY: distclean distclean: clean rm -rf qemu.egg-info/ .venv/ .tox/ $(QEMU_VENV_DIR) dist/ + rm -f .coverage .coverage.* + rm -rf htmlcov/ diff --git a/python/Pipfile.lock b/python/Pipfile.lock index 8ab41a3f60..ce46404ce0 100644 --- a/python/Pipfile.lock +++ b/python/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "eff562a688ebc6f3ffe67494dbb804b883e2159ad81c4d55d96da9f7aec13e91" + "sha256": "f1a25654d884a5b450e38d78b1f2e3ebb9073e421cc4358d4bbb83ac251a5670" }, "pipfile-spec": 6, "requires": { @@ -34,22 +34,23 @@ "sha256:09bdb456e02564731f8b5957cdd0c98a7f01d2db5e90eb1d794c353c28bfd705", "sha256:6a8a51f64dae307f6e0c9db752b66a7951e282389d8362cc1d39a56f3feeb31d" ], - "markers": "python_version ~= '3.6'", + "index": "pypi", "version": "==2.6.0" }, "avocado-framework": { "hashes": [ - "sha256:3fca7226d7d164f124af8a741e7fa658ff4345a0738ddc32907631fd688b38ed", - "sha256:48ac254c0ae2ef0c0ceeb38e3d3df0388718eda8f48b3ab55b30b252839f42b1" + "sha256:244cb569f8eb4e50a22ac82e1a2b2bba2458999f4281efbe2651bd415d59c65b", + "sha256:6f15998b67ecd0e7dde790c4de4dd249d6df52dfe6d5cc4e2dd6596df51c3583" ], "index": "pypi", - "version": "==87.0" + "version": "==90.0" }, "distlib": { "hashes": [ "sha256:106fef6dc37dd8c0e2c0a60d3fca3e77460a48907f335fa28420463a6f799736", "sha256:23e223426b28491b1ced97dc3bbe183027419dfc7982b4fa2f05d5f3ff10711c" ], + "index": "pypi", "version": "==0.3.2" }, "filelock": { @@ -57,6 +58,7 @@ "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836" ], + "index": "pypi", "version": "==3.0.12" }, "flake8": { @@ -88,7 +90,7 @@ "sha256:54161657e8ffc76596c4ede7080ca68cb02962a2e074a2586b695a93a925d36e", "sha256:e962bff7440364183203d179d7ae9ad90cb1f2b74dcb84300e88ecc42dca3351" ], - "markers": "python_version < '3.7'", + "index": "pypi", "version": "==5.1.4" }, "isort": { @@ -124,7 +126,7 @@ "sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93", "sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "index": "pypi", "version": "==1.6.0" }, "mccabe": { @@ -136,23 +138,23 @@ }, "mypy": { "hashes": [ - "sha256:15b948e1302682e3682f11f50208b726a246ab4e6c1b39f9264a8796bb416aa2", - "sha256:219a3116ecd015f8dca7b5d2c366c973509dfb9a8fc97ef044a36e3da66144a1", - "sha256:3b1fc683fb204c6b4403a1ef23f0b1fac8e4477091585e0c8c54cbdf7d7bb164", - "sha256:3beff56b453b6ef94ecb2996bea101a08f1f8a9771d3cbf4988a61e4d9973761", - "sha256:7687f6455ec3ed7649d1ae574136835a4272b65b3ddcf01ab8704ac65616c5ce", - "sha256:7ec45a70d40ede1ec7ad7f95b3c94c9cf4c186a32f6bacb1795b60abd2f9ef27", - "sha256:86c857510a9b7c3104cf4cde1568f4921762c8f9842e987bc03ed4f160925754", - "sha256:8a627507ef9b307b46a1fea9513d5c98680ba09591253082b4c48697ba05a4ae", - "sha256:8dfb69fbf9f3aeed18afffb15e319ca7f8da9642336348ddd6cab2713ddcf8f9", - "sha256:a34b577cdf6313bf24755f7a0e3f3c326d5c1f4fe7422d1d06498eb25ad0c600", - "sha256:a8ffcd53cb5dfc131850851cc09f1c44689c2812d0beb954d8138d4f5fc17f65", - "sha256:b90928f2d9eb2f33162405f32dde9f6dcead63a0971ca8a1b50eb4ca3e35ceb8", - "sha256:c56ffe22faa2e51054c5f7a3bc70a370939c2ed4de308c690e7949230c995913", - "sha256:f91c7ae919bbc3f96cd5e5b2e786b2b108343d1d7972ea130f7de27fdd547cf3" + "sha256:00cb1964a7476e871d6108341ac9c1a857d6bd20bf5877f4773ac5e9d92cd3cd", + "sha256:127de5a9b817a03a98c5ae8a0c46a20dc44442af6dcfa2ae7f96cb519b312efa", + "sha256:1f3976a945ad7f0a0727aafdc5651c2d3278e3c88dee94e2bf75cd3386b7b2f4", + "sha256:2f8c098f12b402c19b735aec724cc9105cc1a9eea405d08814eb4b14a6fb1a41", + "sha256:4ef13b619a289aa025f2273e05e755f8049bb4eaba6d703a425de37d495d178d", + "sha256:5d142f219bf8c7894dfa79ebfb7d352c4c63a325e75f10dfb4c3db9417dcd135", + "sha256:62eb5dd4ea86bda8ce386f26684f7f26e4bfe6283c9f2b6ca6d17faf704dcfad", + "sha256:64c36eb0936d0bfb7d8da49f92c18e312ad2e3ed46e5548ae4ca997b0d33bd59", + "sha256:75eed74d2faf2759f79c5f56f17388defd2fc994222312ec54ee921e37b31ad4", + "sha256:974bebe3699b9b46278a7f076635d219183da26e1a675c1f8243a69221758273", + "sha256:a5e5bb12b7982b179af513dddb06fca12285f0316d74f3964078acbfcf4c68f2", + "sha256:d31291df31bafb997952dc0a17ebb2737f802c754aed31dd155a8bfe75112c57", + "sha256:d3b4941de44341227ece1caaf5b08b23e42ad4eeb8b603219afb11e9d4cfb437", + "sha256:eadb865126da4e3c4c95bdb47fe1bb087a3e3ea14d39a3b13224b8a4d9f9a102" ], "index": "pypi", - "version": "==0.770" + "version": "==0.780" }, "mypy-extensions": { "hashes": [ @@ -166,7 +168,7 @@ "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "index": "pypi", "version": "==20.9" }, "pluggy": { @@ -174,7 +176,7 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "index": "pypi", "version": "==0.13.1" }, "py": { @@ -182,7 +184,7 @@ "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "index": "pypi", "version": "==1.10.0" }, "pycodestyle": { @@ -200,6 +202,14 @@ ], "version": "==2.0.0" }, + "pygments": { + "hashes": [ + "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f", + "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e" + ], + "index": "pypi", + "version": "==2.9.0" + }, "pylint": { "hashes": [ "sha256:082a6d461b54f90eea49ca90fff4ee8b6e45e8029e5dbd72f6107ef84f3779c0", @@ -213,13 +223,21 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "index": "pypi", "version": "==2.4.7" }, "qemu": { "editable": true, "path": "." }, + "setuptools": { + "hashes": [ + "sha256:22c7348c6d2976a52632c67f7ab0cdf40147db7789f9aed18734643fe9cf3373", + "sha256:4ce92f1e1f8f01233ee9952c04f6b81d1e02939d6e1b488428154974a4d0783e" + ], + "markers": "python_version >= '3.6'", + "version": "==59.6.0" + }, "six": { "hashes": [ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", @@ -286,15 +304,29 @@ "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342", "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84" ], - "markers": "python_version < '3.8'", + "index": "pypi", "version": "==3.10.0.0" }, + "urwid": { + "hashes": [ + "sha256:588bee9c1cb208d0906a9f73c613d2bd32c3ed3702012f51efe318a3f2127eae" + ], + "index": "pypi", + "version": "==2.1.2" + }, + "urwid-readline": { + "hashes": [ + "sha256:018020cbc864bb5ed87be17dc26b069eae2755cb29f3a9c569aac3bded1efaf4" + ], + "index": "pypi", + "version": "==0.13" + }, "virtualenv": { "hashes": [ "sha256:14fdf849f80dbb29a4eb6caa9875d476ee2a5cf76a5f5415fa2f1606010ab467", "sha256:2b0126166ea7c9c3661f5b8e06773d28f83322de7a3ff7d06f0aed18c9de6a76" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "index": "pypi", "version": "==20.4.7" }, "wrapt": { @@ -308,7 +340,7 @@ "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76", "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098" ], - "markers": "python_version < '3.10'", + "index": "pypi", "version": "==3.4.1" } } diff --git a/python/avocado.cfg b/python/avocado.cfg index 10dc6fb605..a460420059 100644 --- a/python/avocado.cfg +++ b/python/avocado.cfg @@ -1,3 +1,6 @@ +[run] +test_runner = nrunner + [simpletests] # Don't show stdout/stderr in the test *summary* status.failure_fields = ['status'] diff --git a/python/qemu/machine/README.rst b/python/qemu/machine/README.rst index ac2b4fffb4..8de2c3d772 100644 --- a/python/qemu/machine/README.rst +++ b/python/qemu/machine/README.rst @@ -2,7 +2,7 @@ qemu.machine package ==================== This package provides core utilities used for testing and debugging -QEMU. It is used by the iotests, vm tests, acceptance tests, and several +QEMU. It is used by the iotests, vm tests, avocado tests, and several other utilities in the ./scripts directory. It is not a fully-fledged SDK and it is subject to change at any time. diff --git a/python/qemu/machine/machine.py b/python/qemu/machine/machine.py index 971ed7e8c6..37191f433b 100644 --- a/python/qemu/machine/machine.py +++ b/python/qemu/machine/machine.py @@ -19,6 +19,7 @@ which provides facilities for managing the lifetime of a QEMU VM. import errno from itertools import chain +import locale import logging import os import shutil @@ -36,13 +37,14 @@ from typing import ( Sequence, Tuple, Type, + TypeVar, ) -from qemu.qmp import ( # pylint: disable=import-error +from qemu.qmp import SocketAddrT +from qemu.qmp.legacy import ( QEMUMonitorProtocol, QMPMessage, QMPReturnValue, - SocketAddrT, ) from . import console_socket @@ -67,12 +69,44 @@ class QEMUMachineAddDeviceError(QEMUMachineError): """ +class VMLaunchFailure(QEMUMachineError): + """ + Exception raised when a VM launch was attempted, but failed. + """ + def __init__(self, exitcode: Optional[int], + command: str, output: Optional[str]): + super().__init__(exitcode, command, output) + self.exitcode = exitcode + self.command = command + self.output = output + + def __str__(self) -> str: + ret = '' + if self.__cause__ is not None: + name = type(self.__cause__).__name__ + reason = str(self.__cause__) + if reason: + ret += f"{name}: {reason}" + else: + ret += f"{name}" + ret += '\n' + + if self.exitcode is not None: + ret += f"\tExit code: {self.exitcode}\n" + ret += f"\tCommand: {self.command}\n" + ret += f"\tOutput: {self.output}\n" + return ret + + class AbnormalShutdown(QEMUMachineError): """ Exception raised when a graceful shutdown was requested, but not performed. """ +_T = TypeVar('_T', bound='QEMUMachine') + + class QEMUMachine: """ A QEMU VM. @@ -93,11 +127,11 @@ class QEMUMachine: name: Optional[str] = None, base_temp_dir: str = "/var/tmp", monitor_address: Optional[SocketAddrT] = None, - socket_scm_helper: Optional[str] = None, sock_dir: Optional[str] = None, drain_console: bool = False, console_log: Optional[str] = None, - log_dir: Optional[str] = None): + log_dir: Optional[str] = None, + qmp_timer: Optional[float] = None): ''' Initialize a QEMUMachine @@ -107,11 +141,11 @@ class QEMUMachine: @param name: prefix for socket and log file names (default: qemu-PID) @param base_temp_dir: default location where temp files are created @param monitor_address: address for QMP monitor - @param socket_scm_helper: helper program, required for send_fd_scm() @param sock_dir: where to create socket (defaults to base_temp_dir) @param drain_console: (optional) True to drain console socket to buffer @param console_log: (optional) path to console log file @param log_dir: where to create and keep log files + @param qmp_timer: (optional) default QMP socket timeout @note: Qemu process is not started until launch() is used. ''' # pylint: disable=too-many-arguments @@ -121,21 +155,20 @@ class QEMUMachine: self._binary = binary self._args = list(args) self._wrapper = wrapper + self._qmp_timer = qmp_timer - self._name = name or "qemu-%d" % os.getpid() + self._name = name or f"qemu-{os.getpid()}-{id(self):02x}" + self._temp_dir: Optional[str] = None self._base_temp_dir = base_temp_dir - self._sock_dir = sock_dir or self._base_temp_dir + self._sock_dir = sock_dir self._log_dir = log_dir - self._socket_scm_helper = socket_scm_helper if monitor_address is not None: self._monitor_address = monitor_address - self._remove_monitor_sockfile = False else: self._monitor_address = os.path.join( - self._sock_dir, f"{self._name}-monitor.sock" + self.sock_dir, f"{self._name}-monitor.sock" ) - self._remove_monitor_sockfile = True self._console_log_path = console_log if self._console_log_path: @@ -153,20 +186,20 @@ class QEMUMachine: self._qmp_set = True # Enable QMP monitor by default. self._qmp_connection: Optional[QEMUMonitorProtocol] = None self._qemu_full_args: Tuple[str, ...] = () - self._temp_dir: Optional[str] = None self._launched = False self._machine: Optional[str] = None self._console_index = 0 self._console_set = False self._console_device_type: Optional[str] = None self._console_address = os.path.join( - self._sock_dir, f"{self._name}-console.sock" + self.sock_dir, f"{self._name}-console.sock" ) self._console_socket: Optional[socket.socket] = None self._remove_files: List[str] = [] self._user_killed = False + self._quit_issued = False - def __enter__(self) -> 'QEMUMachine': + def __enter__(self: _T) -> _T: return self def __exit__(self, @@ -182,8 +215,8 @@ class QEMUMachine: self._args.append('-monitor') self._args.append('null') - def add_fd(self, fd: int, fdset: int, - opaque: str, opts: str = '') -> 'QEMUMachine': + def add_fd(self: _T, fd: int, fdset: int, + opaque: str, opts: str = '') -> _T: """ Pass a file descriptor to the VM """ @@ -205,48 +238,22 @@ class QEMUMachine: def send_fd_scm(self, fd: Optional[int] = None, file_path: Optional[str] = None) -> int: """ - Send an fd or file_path to socket_scm_helper. + Send an fd or file_path to the remote via SCM_RIGHTS. - Exactly one of fd and file_path must be given. - If it is file_path, the helper will open that file and pass its own fd. + Exactly one of fd and file_path must be given. If it is + file_path, the file will be opened read-only and the new file + descriptor will be sent to the remote. """ - # In iotest.py, the qmp should always use unix socket. - assert self._qmp.is_scm_available() - if self._socket_scm_helper is None: - raise QEMUMachineError("No path to socket_scm_helper set") - if not os.path.exists(self._socket_scm_helper): - raise QEMUMachineError("%s does not exist" % - self._socket_scm_helper) - - # This did not exist before 3.4, but since then it is - # mandatory for our purpose - if hasattr(os, 'set_inheritable'): - os.set_inheritable(self._qmp.get_sock_fd(), True) - if fd is not None: - os.set_inheritable(fd, True) - - fd_param = ["%s" % self._socket_scm_helper, - "%d" % self._qmp.get_sock_fd()] - if file_path is not None: assert fd is None - fd_param.append(file_path) + with open(file_path, "rb") as passfile: + fd = passfile.fileno() + self._qmp.send_fd_scm(fd) else: assert fd is not None - fd_param.append(str(fd)) + self._qmp.send_fd_scm(fd) - proc = subprocess.run( - fd_param, - stdin=subprocess.DEVNULL, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - check=False, - close_fds=False, - ) - if proc.stdout: - LOG.debug(proc.stdout) - - return proc.returncode + return 0 @staticmethod def _remove_if_exists(path: str) -> None: @@ -283,8 +290,12 @@ class QEMUMachine: return self._subp.pid def _load_io_log(self) -> None: + # Assume that the output encoding of QEMU's terminal output is + # defined by our locale. If indeterminate, allow open() to fall + # back to the platform default. + _, encoding = locale.getlocale() if self._qemu_log_path is not None: - with open(self._qemu_log_path, "r") as iolog: + with open(self._qemu_log_path, "r", encoding=encoding) as iolog: self._iolog = iolog.read() @property @@ -326,8 +337,7 @@ class QEMUMachine: self._remove_files.append(self._console_address) if self._qmp_set: - if self._remove_monitor_sockfile: - assert isinstance(self._monitor_address, str) + if isinstance(self._monitor_address, str): self._remove_files.append(self._monitor_address) self._qmp_connection = QEMUMonitorProtocol( self._monitor_address, @@ -341,25 +351,39 @@ class QEMUMachine: self._qemu_log_path = os.path.join(self.log_dir, self._name + ".log") self._qemu_log_file = open(self._qemu_log_path, 'wb') + self._iolog = None + self._qemu_full_args = tuple(chain( + self._wrapper, + [self._binary], + self._base_args, + self._args + )) + def _post_launch(self) -> None: if self._qmp_connection: - self._qmp.accept() + self._qmp.accept(self._qmp_timer) + + def _close_qemu_log_file(self) -> None: + if self._qemu_log_file is not None: + self._qemu_log_file.close() + self._qemu_log_file = None def _post_shutdown(self) -> None: """ Called to cleanup the VM instance after the process has exited. May also be called after a failed launch. """ - # Comprehensive reset for the failed launch case: - self._early_cleanup() + try: + self._close_qmp_connection() + except Exception as err: # pylint: disable=broad-except + LOG.warning( + "Exception closing QMP connection: %s", + str(err) if str(err) else type(err).__name__ + ) + finally: + assert self._qmp_connection is None - if self._qmp_connection: - self._qmp.close() - self._qmp_connection = None - - if self._qemu_log_file is not None: - self._qemu_log_file.close() - self._qemu_log_file = None + self._close_qemu_log_file() self._load_io_log() @@ -382,6 +406,7 @@ class QEMUMachine: command = '' LOG.warning(msg, -int(exitcode), command) + self._quit_issued = False self._user_killed = False self._launched = False @@ -394,19 +419,28 @@ class QEMUMachine: if self._launched: raise QEMUMachineError('VM already launched') - self._iolog = None - self._qemu_full_args = () try: self._launch() - self._launched = True - except: - self._post_shutdown() + except BaseException as exc: + # We may have launched the process but it may + # have exited before we could connect via QMP. + # Assume the VM didn't launch or is exiting. + # If we don't wait for the process, exitcode() may still be + # 'None' by the time control is ceded back to the caller. + if self._launched: + self.wait() + else: + self._post_shutdown() - LOG.debug('Error launching VM') - if self._qemu_full_args: - LOG.debug('Command: %r', ' '.join(self._qemu_full_args)) - if self._iolog: - LOG.debug('Output: %r', self._iolog) + if isinstance(exc, Exception): + raise VMLaunchFailure( + exitcode=self.exitcode(), + command=' '.join(self._qemu_full_args), + output=self._iolog + ) from exc + + # Don't wrap 'BaseException'; doing so would downgrade + # that exception. However, we still want to clean up. raise def _launch(self) -> None: @@ -414,12 +448,6 @@ class QEMUMachine: Launch the VM and establish a QMP connection """ self._pre_launch() - self._qemu_full_args = tuple( - chain(self._wrapper, - [self._binary], - self._base_args, - self._args) - ) LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args)) # Cleaning up of this subprocess is guaranteed by _do_shutdown. @@ -430,18 +458,44 @@ class QEMUMachine: stderr=subprocess.STDOUT, shell=False, close_fds=False) + self._launched = True self._post_launch() + def _close_qmp_connection(self) -> None: + """ + Close the underlying QMP connection, if any. + + Dutifully report errors that occurred while closing, but assume + that any error encountered indicates an abnormal termination + process and not a failure to close. + """ + if self._qmp_connection is None: + return + + try: + self._qmp.close() + except EOFError: + # EOF can occur as an Exception here when using the Async + # QMP backend. It indicates that the server closed the + # stream. If we successfully issued 'quit' at any point, + # then this was expected. If the remote went away without + # our permission, it's worth reporting that as an abnormal + # shutdown case. + if not (self._user_killed or self._quit_issued): + raise + finally: + self._qmp_connection = None + def _early_cleanup(self) -> None: """ Perform any cleanup that needs to happen before the VM exits. - May be invoked by both soft and hard shutdown in failover scenarios. - Called additionally by _post_shutdown for comprehensive cleanup. + This method may be called twice upon shutdown, once each by soft + and hard shutdown in failover scenarios. """ # If we keep the console socket open, we may deadlock waiting # for QEMU to exit, while QEMU is waiting for the socket to - # become writeable. + # become writable. if self._console_socket is not None: self._console_socket.close() self._console_socket = None @@ -457,15 +511,13 @@ class QEMUMachine: self._subp.kill() self._subp.wait(timeout=60) - def _soft_shutdown(self, timeout: Optional[int], - has_quit: bool = False) -> None: + def _soft_shutdown(self, timeout: Optional[int]) -> None: """ Perform early cleanup, attempt to gracefully shut down the VM, and wait for it to terminate. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. - :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise ConnectionReset: On QMP communication errors :raise subprocess.TimeoutExpired: When timeout is exceeded waiting for @@ -474,21 +526,24 @@ class QEMUMachine: self._early_cleanup() if self._qmp_connection: - if not has_quit: - # Might raise ConnectionReset - self._qmp.cmd('quit') + try: + if not self._quit_issued: + # May raise ExecInterruptedError or StateError if the + # connection dies or has *already* died. + self.qmp('quit') + finally: + # Regardless, we want to quiesce the connection. + self._close_qmp_connection() # May raise subprocess.TimeoutExpired self._subp.wait(timeout=timeout) - def _do_shutdown(self, timeout: Optional[int], - has_quit: bool = False) -> None: + def _do_shutdown(self, timeout: Optional[int]) -> None: """ Attempt to shutdown the VM gracefully; fallback to a hard shutdown. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. - :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise AbnormalShutdown: When the VM could not be shut down gracefully. The inner exception will likely be ConnectionReset or @@ -496,13 +551,13 @@ class QEMUMachine: may result in its own exceptions, likely subprocess.TimeoutExpired. """ try: - self._soft_shutdown(timeout, has_quit) + self._soft_shutdown(timeout) except Exception as exc: self._hard_shutdown() raise AbnormalShutdown("Could not perform graceful shutdown") \ from exc - def shutdown(self, has_quit: bool = False, + def shutdown(self, hard: bool = False, timeout: Optional[int] = 30) -> None: """ @@ -512,7 +567,6 @@ class QEMUMachine: If the VM has not yet been launched, or shutdown(), wait(), or kill() have already been called, this method does nothing. - :param has_quit: When true, do not attempt to issue 'quit' QMP command. :param hard: When true, do not attempt graceful shutdown, and suppress the SIGKILL warning log message. :param timeout: Optional timeout in seconds for graceful shutdown. @@ -526,7 +580,7 @@ class QEMUMachine: self._user_killed = True self._hard_shutdown() else: - self._do_shutdown(timeout, has_quit) + self._do_shutdown(timeout) finally: self._post_shutdown() @@ -543,7 +597,8 @@ class QEMUMachine: :param timeout: Optional timeout in seconds. Default 30 seconds. A value of `None` is an infinite wait. """ - self.shutdown(has_quit=True, timeout=timeout) + self._quit_issued = True + self.shutdown(timeout=timeout) def set_qmp_monitor(self, enabled: bool = True) -> None: """ @@ -564,23 +619,34 @@ class QEMUMachine: return self._qmp_connection @classmethod - def _qmp_args(cls, _conv_keys: bool = True, **args: Any) -> Dict[str, Any]: - qmp_args = dict() - for key, value in args.items(): - if _conv_keys: - qmp_args[key.replace('_', '-')] = value - else: - qmp_args[key] = value - return qmp_args + def _qmp_args(cls, conv_keys: bool, + args: Dict[str, Any]) -> Dict[str, object]: + if conv_keys: + return {k.replace('_', '-'): v for k, v in args.items()} + + return args def qmp(self, cmd: str, - conv_keys: bool = True, + args_dict: Optional[Dict[str, object]] = None, + conv_keys: Optional[bool] = None, **args: Any) -> QMPMessage: """ Invoke a QMP command and return the response dict """ - qmp_args = self._qmp_args(conv_keys, **args) - return self._qmp.cmd(cmd, args=qmp_args) + if args_dict is not None: + assert not args + assert conv_keys is None + args = args_dict + conv_keys = False + + if conv_keys is None: + conv_keys = True + + qmp_args = self._qmp_args(conv_keys, args) + ret = self._qmp.cmd(cmd, args=qmp_args) + if cmd == 'quit' and 'error' not in ret and 'return' in ret: + self._quit_issued = True + return ret def command(self, cmd: str, conv_keys: bool = True, @@ -590,8 +656,11 @@ class QEMUMachine: On success return the response dict. On failure raise an exception. """ - qmp_args = self._qmp_args(conv_keys, **args) - return self._qmp.command(cmd, **qmp_args) + qmp_args = self._qmp_args(conv_keys, args) + ret = self._qmp.command(cmd, **qmp_args) + if cmd == 'quit': + self._quit_issued = True + return ret def get_qmp_event(self, wait: bool = False) -> Optional[QMPMessage]: """ @@ -608,7 +677,6 @@ class QEMUMachine: events = self._qmp.get_events(wait=wait) events.extend(self._events) del self._events[:] - self._qmp.clear_events() return events @staticmethod @@ -670,8 +738,9 @@ class QEMUMachine: :param timeout: Optional timeout, in seconds. See QEMUMonitorProtocol.pull_event. - :raise QMPTimeoutError: If timeout was non-zero and no matching events - were found. + :raise asyncio.TimeoutError: + If timeout was non-zero and no matching events were found. + :return: A QMP event matching the filter criteria. If timeout was 0 and no event matched, None. """ @@ -694,7 +763,7 @@ class QEMUMachine: event = self._qmp.pull_event(wait=timeout) if event is None: # NB: None is only returned when timeout is false-ish. - # Timeouts raise QMPTimeoutError instead! + # Timeouts raise asyncio.TimeoutError instead! break if _match(event): return event @@ -778,6 +847,15 @@ class QEMUMachine: dir=self._base_temp_dir) return self._temp_dir + @property + def sock_dir(self) -> str: + """ + Returns the directory used for sockfiles by this machine. + """ + if self._sock_dir: + return self._sock_dir + return self.temp_dir + @property def log_dir(self) -> str: """ diff --git a/python/qemu/machine/qtest.py b/python/qemu/machine/qtest.py index d6d9c6a34a..1a1fc6c9b0 100644 --- a/python/qemu/machine/qtest.py +++ b/python/qemu/machine/qtest.py @@ -26,7 +26,7 @@ from typing import ( TextIO, ) -from qemu.qmp import SocketAddrT # pylint: disable=import-error +from qemu.qmp import SocketAddrT from .machine import QEMUMachine @@ -112,19 +112,20 @@ class QEMUQtestMachine(QEMUMachine): def __init__(self, binary: str, args: Sequence[str] = (), + wrapper: Sequence[str] = (), name: Optional[str] = None, base_temp_dir: str = "/var/tmp", - socket_scm_helper: Optional[str] = None, - sock_dir: Optional[str] = None): + sock_dir: Optional[str] = None, + qmp_timer: Optional[float] = None): # pylint: disable=too-many-arguments if name is None: name = "qemu-%d" % os.getpid() if sock_dir is None: sock_dir = base_temp_dir - super().__init__(binary, args, name=name, base_temp_dir=base_temp_dir, - socket_scm_helper=socket_scm_helper, - sock_dir=sock_dir) + super().__init__(binary, args, wrapper=wrapper, name=name, + base_temp_dir=base_temp_dir, + sock_dir=sock_dir, qmp_timer=qmp_timer) self._qtest: Optional[QEMUQtestProtocol] = None self._qtest_path = os.path.join(sock_dir, name + "-qtest.sock") diff --git a/python/qemu/qmp/README.rst b/python/qemu/qmp/README.rst deleted file mode 100644 index c21951491c..0000000000 --- a/python/qemu/qmp/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -qemu.qmp package -================ - -This package provides a library used for connecting to and communicating -with QMP servers. It is used extensively by iotests, vm tests, -acceptance tests, and other utilities in the ./scripts directory. It is -not a fully-fledged SDK and is subject to change at any time. - -See the documentation in ``__init__.py`` for more information. diff --git a/python/qemu/qmp/__init__.py b/python/qemu/qmp/__init__.py index 269516a79b..69190d057a 100644 --- a/python/qemu/qmp/__init__.py +++ b/python/qemu/qmp/__init__.py @@ -1,423 +1,59 @@ """ QEMU Monitor Protocol (QMP) development library & tooling. -This package provides a fairly low-level class for communicating to QMP -protocol servers, as implemented by QEMU, the QEMU Guest Agent, and the -QEMU Storage Daemon. This library is not intended for production use. +This package provides a fairly low-level class for communicating +asynchronously with QMP protocol servers, as implemented by QEMU, the +QEMU Guest Agent, and the QEMU Storage Daemon. -`QEMUMonitorProtocol` is the primary class of interest, and all errors -raised derive from `QMPError`. +`QMPClient` provides the main functionality of this package. All errors +raised by this library derive from `QMPError`, see `qmp.error` for +additional detail. See `qmp.events` for an in-depth tutorial on +managing QMP events. """ -# Copyright (C) 2009, 2010 Red Hat Inc. +# Copyright (C) 2020-2022 John Snow for Red Hat, Inc. # # Authors: -# Luiz Capitulino +# John Snow # -# This work is licensed under the terms of the GNU GPL, version 2. See -# the COPYING file in the top-level directory. +# Based on earlier work by Luiz Capitulino . +# +# This work is licensed under the terms of the GNU LGPL, version 2 or +# later. See the COPYING file in the top-level directory. -import errno -import json import logging -import socket -from types import TracebackType -from typing import ( - Any, - Dict, - List, - Optional, - TextIO, - Tuple, - Type, - TypeVar, - Union, - cast, + +from .error import QMPError +from .events import EventListener +from .message import Message +from .protocol import ( + ConnectError, + Runstate, + SocketAddrT, + StateError, ) +from .qmp_client import ExecInterruptedError, ExecuteError, QMPClient -#: QMPMessage is an entire QMP message of any kind. -QMPMessage = Dict[str, Any] +# Suppress logging unless an application engages it. +logging.getLogger('qemu.qmp').addHandler(logging.NullHandler()) -#: QMPReturnValue is the 'return' value of a command. -QMPReturnValue = object -#: QMPObject is any object in a QMP message. -QMPObject = Dict[str, object] +# The order of these fields impact the Sphinx documentation order. +__all__ = ( + # Classes, most to least important + 'QMPClient', + 'Message', + 'EventListener', + 'Runstate', -# QMPMessage can be outgoing commands or incoming events/returns. -# QMPReturnValue is usually a dict/json object, but due to QAPI's -# 'returns-whitelist', it can actually be anything. -# -# {'return': {}} is a QMPMessage, -# {} is the QMPReturnValue. + # Exceptions, most generic to most explicit + 'QMPError', + 'StateError', + 'ConnectError', + 'ExecuteError', + 'ExecInterruptedError', - -InternetAddrT = Tuple[str, int] -UnixAddrT = str -SocketAddrT = Union[InternetAddrT, UnixAddrT] - - -class QMPError(Exception): - """ - QMP base exception - """ - - -class QMPConnectError(QMPError): - """ - QMP connection exception - """ - - -class QMPCapabilitiesError(QMPError): - """ - QMP negotiate capabilities exception - """ - - -class QMPTimeoutError(QMPError): - """ - QMP timeout exception - """ - - -class QMPProtocolError(QMPError): - """ - QMP protocol error; unexpected response - """ - - -class QMPResponseError(QMPError): - """ - Represents erroneous QMP monitor reply - """ - def __init__(self, reply: QMPMessage): - try: - desc = reply['error']['desc'] - except KeyError: - desc = reply - super().__init__(desc) - self.reply = reply - - -class QMPBadPortError(QMPError): - """ - Unable to parse socket address: Port was non-numerical. - """ - - -class QEMUMonitorProtocol: - """ - Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then - allow to handle commands and events. - """ - - #: Logger object for debugging messages - logger = logging.getLogger('QMP') - - def __init__(self, address: SocketAddrT, - server: bool = False, - nickname: Optional[str] = None): - """ - Create a QEMUMonitorProtocol class. - - @param address: QEMU address, can be either a unix socket path (string) - or a tuple in the form ( address, port ) for a TCP - connection - @param server: server mode listens on the socket (bool) - @raise OSError on socket connection errors - @note No connection is established, this is done by the connect() or - accept() methods - """ - self.__events: List[QMPMessage] = [] - self.__address = address - self.__sock = self.__get_sock() - self.__sockfile: Optional[TextIO] = None - self._nickname = nickname - if self._nickname: - self.logger = logging.getLogger('QMP').getChild(self._nickname) - if server: - self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.__sock.bind(self.__address) - self.__sock.listen(1) - - def __get_sock(self) -> socket.socket: - if isinstance(self.__address, tuple): - family = socket.AF_INET - else: - family = socket.AF_UNIX - return socket.socket(family, socket.SOCK_STREAM) - - def __negotiate_capabilities(self) -> QMPMessage: - greeting = self.__json_read() - if greeting is None or "QMP" not in greeting: - raise QMPConnectError - # Greeting seems ok, negotiate capabilities - resp = self.cmd('qmp_capabilities') - if resp and "return" in resp: - return greeting - raise QMPCapabilitiesError - - def __json_read(self, only_event: bool = False) -> Optional[QMPMessage]: - assert self.__sockfile is not None - while True: - data = self.__sockfile.readline() - if not data: - return None - # By definition, any JSON received from QMP is a QMPMessage, - # and we are asserting only at static analysis time that it - # has a particular shape. - resp: QMPMessage = json.loads(data) - if 'event' in resp: - self.logger.debug("<<< %s", resp) - self.__events.append(resp) - if not only_event: - continue - return resp - - def __get_events(self, wait: Union[bool, float] = False) -> None: - """ - Check for new events in the stream and cache them in __events. - - @param wait (bool): block until an event is available. - @param wait (float): If wait is a float, treat it as a timeout value. - - @raise QMPTimeoutError: If a timeout float is provided and the timeout - period elapses. - @raise QMPConnectError: If wait is True but no events could be - retrieved or if some other error occurred. - """ - - # Current timeout and blocking status - current_timeout = self.__sock.gettimeout() - - # Check for new events regardless and pull them into the cache: - self.__sock.settimeout(0) # i.e. setblocking(False) - try: - self.__json_read() - except OSError as err: - # EAGAIN: No data available; not critical - if err.errno != errno.EAGAIN: - raise - finally: - self.__sock.settimeout(current_timeout) - - # Wait for new events, if needed. - # if wait is 0.0, this means "no wait" and is also implicitly false. - if not self.__events and wait: - if isinstance(wait, float): - self.__sock.settimeout(wait) - try: - ret = self.__json_read(only_event=True) - except socket.timeout as err: - raise QMPTimeoutError("Timeout waiting for event") from err - except Exception as err: - msg = "Error while reading from socket" - raise QMPConnectError(msg) from err - finally: - self.__sock.settimeout(current_timeout) - - if ret is None: - raise QMPConnectError("Error while reading from socket") - - T = TypeVar('T') - - def __enter__(self: T) -> T: - # Implement context manager enter function. - return self - - def __exit__(self, - # pylint: disable=duplicate-code - # see https://github.com/PyCQA/pylint/issues/3619 - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType]) -> None: - # Implement context manager exit function. - self.close() - - @classmethod - def parse_address(cls, address: str) -> SocketAddrT: - """ - Parse a string into a QMP address. - - Figure out if the argument is in the port:host form. - If it's not, it's probably a file path. - """ - components = address.split(':') - if len(components) == 2: - try: - port = int(components[1]) - except ValueError: - msg = f"Bad port: '{components[1]}' in '{address}'." - raise QMPBadPortError(msg) from None - return (components[0], port) - - # Treat as filepath. - return address - - def connect(self, negotiate: bool = True) -> Optional[QMPMessage]: - """ - Connect to the QMP Monitor and perform capabilities negotiation. - - @return QMP greeting dict, or None if negotiate is false - @raise OSError on socket connection errors - @raise QMPConnectError if the greeting is not received - @raise QMPCapabilitiesError if fails to negotiate capabilities - """ - self.__sock.connect(self.__address) - self.__sockfile = self.__sock.makefile(mode='r') - if negotiate: - return self.__negotiate_capabilities() - return None - - def accept(self, timeout: Optional[float] = 15.0) -> QMPMessage: - """ - Await connection from QMP Monitor and perform capabilities negotiation. - - @param timeout: timeout in seconds (nonnegative float number, or - None). The value passed will set the behavior of the - underneath QMP socket as described in [1]. - Default value is set to 15.0. - - @return QMP greeting dict - @raise OSError on socket connection errors - @raise QMPConnectError if the greeting is not received - @raise QMPCapabilitiesError if fails to negotiate capabilities - - [1] - https://docs.python.org/3/library/socket.html#socket.socket.settimeout - """ - self.__sock.settimeout(timeout) - self.__sock, _ = self.__sock.accept() - self.__sockfile = self.__sock.makefile(mode='r') - return self.__negotiate_capabilities() - - def cmd_obj(self, qmp_cmd: QMPMessage) -> QMPMessage: - """ - Send a QMP command to the QMP Monitor. - - @param qmp_cmd: QMP command to be sent as a Python dict - @return QMP response as a Python dict - """ - self.logger.debug(">>> %s", qmp_cmd) - self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8')) - resp = self.__json_read() - if resp is None: - raise QMPConnectError("Unexpected empty reply from server") - self.logger.debug("<<< %s", resp) - return resp - - def cmd(self, name: str, - args: Optional[Dict[str, object]] = None, - cmd_id: Optional[object] = None) -> QMPMessage: - """ - Build a QMP command and send it to the QMP Monitor. - - @param name: command name (string) - @param args: command arguments (dict) - @param cmd_id: command id (dict, list, string or int) - """ - qmp_cmd: QMPMessage = {'execute': name} - if args: - qmp_cmd['arguments'] = args - if cmd_id: - qmp_cmd['id'] = cmd_id - return self.cmd_obj(qmp_cmd) - - def command(self, cmd: str, **kwds: object) -> QMPReturnValue: - """ - Build and send a QMP command to the monitor, report errors if any - """ - ret = self.cmd(cmd, kwds) - if 'error' in ret: - raise QMPResponseError(ret) - if 'return' not in ret: - raise QMPProtocolError( - "'return' key not found in QMP response '{}'".format(str(ret)) - ) - return cast(QMPReturnValue, ret['return']) - - def pull_event(self, - wait: Union[bool, float] = False) -> Optional[QMPMessage]: - """ - Pulls a single event. - - @param wait (bool): block until an event is available. - @param wait (float): If wait is a float, treat it as a timeout value. - - @raise QMPTimeoutError: If a timeout float is provided and the timeout - period elapses. - @raise QMPConnectError: If wait is True but no events could be - retrieved or if some other error occurred. - - @return The first available QMP event, or None. - """ - self.__get_events(wait) - - if self.__events: - return self.__events.pop(0) - return None - - def get_events(self, wait: bool = False) -> List[QMPMessage]: - """ - Get a list of available QMP events. - - @param wait (bool): block until an event is available. - @param wait (float): If wait is a float, treat it as a timeout value. - - @raise QMPTimeoutError: If a timeout float is provided and the timeout - period elapses. - @raise QMPConnectError: If wait is True but no events could be - retrieved or if some other error occurred. - - @return The list of available QMP events. - """ - self.__get_events(wait) - return self.__events - - def clear_events(self) -> None: - """ - Clear current list of pending events. - """ - self.__events = [] - - def close(self) -> None: - """ - Close the socket and socket file. - """ - if self.__sock: - self.__sock.close() - if self.__sockfile: - self.__sockfile.close() - - def settimeout(self, timeout: Optional[float]) -> None: - """ - Set the socket timeout. - - @param timeout (float): timeout in seconds (non-zero), or None. - @note This is a wrap around socket.settimeout - - @raise ValueError: if timeout was set to 0. - """ - if timeout == 0: - msg = "timeout cannot be 0; this engages non-blocking mode." - msg += " Use 'None' instead to disable timeouts." - raise ValueError(msg) - self.__sock.settimeout(timeout) - - def get_sock_fd(self) -> int: - """ - Get the socket file descriptor. - - @return The file descriptor number. - """ - return self.__sock.fileno() - - def is_scm_available(self) -> bool: - """ - Check if the socket allows for SCM_RIGHTS. - - @return True if SCM_RIGHTS is available, otherwise False. - """ - return self.__sock.family == socket.AF_UNIX + # Type aliases + 'SocketAddrT', +) diff --git a/python/qemu/qmp/error.py b/python/qemu/qmp/error.py new file mode 100644 index 0000000000..24ba4d5054 --- /dev/null +++ b/python/qemu/qmp/error.py @@ -0,0 +1,50 @@ +""" +QMP Error Classes + +This package seeks to provide semantic error classes that are intended +to be used directly by clients when they would like to handle particular +semantic failures (e.g. "failed to connect") without needing to know the +enumeration of possible reasons for that failure. + +QMPError serves as the ancestor for all exceptions raised by this +package, and is suitable for use in handling semantic errors from this +library. In most cases, individual public methods will attempt to catch +and re-encapsulate various exceptions to provide a semantic +error-handling interface. + +.. admonition:: QMP Exception Hierarchy Reference + + | `Exception` + | +-- `QMPError` + | +-- `ConnectError` + | +-- `StateError` + | +-- `ExecInterruptedError` + | +-- `ExecuteError` + | +-- `ListenerError` + | +-- `ProtocolError` + | +-- `DeserializationError` + | +-- `UnexpectedTypeError` + | +-- `ServerParseError` + | +-- `BadReplyError` + | +-- `GreetingError` + | +-- `NegotiationError` +""" + + +class QMPError(Exception): + """Abstract error class for all errors originating from this package.""" + + +class ProtocolError(QMPError): + """ + Abstract error class for protocol failures. + + Semantically, these errors are generally the fault of either the + protocol server or as a result of a bug in this library. + + :param error_message: Human-readable string describing the error. + """ + def __init__(self, error_message: str): + super().__init__(error_message) + #: Human-readable error message, without any prefix. + self.error_message: str = error_message diff --git a/python/qemu/qmp/events.py b/python/qemu/qmp/events.py new file mode 100644 index 0000000000..6199776cc6 --- /dev/null +++ b/python/qemu/qmp/events.py @@ -0,0 +1,717 @@ +""" +QMP Events and EventListeners + +Asynchronous QMP uses `EventListener` objects to listen for events. An +`EventListener` is a FIFO event queue that can be pre-filtered to listen +for only specific events. Each `EventListener` instance receives its own +copy of events that it hears, so events may be consumed without fear or +worry for depriving other listeners of events they need to hear. + + +EventListener Tutorial +---------------------- + +In all of the following examples, we assume that we have a `QMPClient` +instantiated named ``qmp`` that is already connected. + + +`listener()` context blocks with one name +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The most basic usage is by using the `listener()` context manager to +construct them: + +.. code:: python + + with qmp.listener('STOP') as listener: + await qmp.execute('stop') + await listener.get() + +The listener is active only for the duration of the ‘with’ block. This +instance listens only for ‘STOP’ events. + + +`listener()` context blocks with two or more names +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Multiple events can be selected for by providing any ``Iterable[str]``: + +.. code:: python + + with qmp.listener(('STOP', 'RESUME')) as listener: + await qmp.execute('stop') + event = await listener.get() + assert event['event'] == 'STOP' + + await qmp.execute('cont') + event = await listener.get() + assert event['event'] == 'RESUME' + + +`listener()` context blocks with no names +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By omitting names entirely, you can listen to ALL events. + +.. code:: python + + with qmp.listener() as listener: + await qmp.execute('stop') + event = await listener.get() + assert event['event'] == 'STOP' + +This isn’t a very good use case for this feature: In a non-trivial +running system, we may not know what event will arrive next. Grabbing +the top of a FIFO queue returning multiple kinds of events may be prone +to error. + + +Using async iterators to retrieve events +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you’d like to simply watch what events happen to arrive, you can use +the listener as an async iterator: + +.. code:: python + + with qmp.listener() as listener: + async for event in listener: + print(f"Event arrived: {event['event']}") + +This is analogous to the following code: + +.. code:: python + + with qmp.listener() as listener: + while True: + event = listener.get() + print(f"Event arrived: {event['event']}") + +This event stream will never end, so these blocks will never terminate. + + +Using asyncio.Task to concurrently retrieve events +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since a listener’s event stream will never terminate, it is not likely +useful to use that form in a script. For longer-running clients, we can +create event handlers by using `asyncio.Task` to create concurrent +coroutines: + +.. code:: python + + async def print_events(listener): + try: + async for event in listener: + print(f"Event arrived: {event['event']}") + except asyncio.CancelledError: + return + + with qmp.listener() as listener: + task = asyncio.Task(print_events(listener)) + await qmp.execute('stop') + await qmp.execute('cont') + task.cancel() + await task + +However, there is no guarantee that these events will be received by the +time we leave this context block. Once the context block is exited, the +listener will cease to hear any new events, and becomes inert. + +Be mindful of the timing: the above example will *probably*– but does +not *guarantee*– that both STOP/RESUMED events will be printed. The +example below outlines how to use listeners outside of a context block. + + +Using `register_listener()` and `remove_listener()` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create a listener with a longer lifetime, beyond the scope of a +single block, create a listener and then call `register_listener()`: + +.. code:: python + + class MyClient: + def __init__(self, qmp): + self.qmp = qmp + self.listener = EventListener() + + async def print_events(self): + try: + async for event in self.listener: + print(f"Event arrived: {event['event']}") + except asyncio.CancelledError: + return + + async def run(self): + self.task = asyncio.Task(self.print_events) + self.qmp.register_listener(self.listener) + await qmp.execute('stop') + await qmp.execute('cont') + + async def stop(self): + self.task.cancel() + await self.task + self.qmp.remove_listener(self.listener) + +The listener can be deactivated by using `remove_listener()`. When it is +removed, any possible pending events are cleared and it can be +re-registered at a later time. + + +Using the built-in all events listener +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The `QMPClient` object creates its own default listener named +:py:obj:`~Events.events` that can be used for the same purpose without +having to create your own: + +.. code:: python + + async def print_events(listener): + try: + async for event in listener: + print(f"Event arrived: {event['event']}") + except asyncio.CancelledError: + return + + task = asyncio.Task(print_events(qmp.events)) + + await qmp.execute('stop') + await qmp.execute('cont') + + task.cancel() + await task + + +Using both .get() and async iterators +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The async iterator and `get()` methods pull events from the same FIFO +queue. If you mix the usage of both, be aware: Events are emitted +precisely once per listener. + +If multiple contexts try to pull events from the same listener instance, +events are still emitted only precisely once. + +This restriction can be lifted by creating additional listeners. + + +Creating multiple listeners +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Additional `EventListener` objects can be created at-will. Each one +receives its own copy of events, with separate FIFO event queues. + +.. code:: python + + my_listener = EventListener() + qmp.register_listener(my_listener) + + await qmp.execute('stop') + copy1 = await my_listener.get() + copy2 = await qmp.events.get() + + assert copy1 == copy2 + +In this example, we await an event from both a user-created +`EventListener` and the built-in events listener. Both receive the same +event. + + +Clearing listeners +~~~~~~~~~~~~~~~~~~ + +`EventListener` objects can be cleared, clearing all events seen thus far: + +.. code:: python + + await qmp.execute('stop') + qmp.events.clear() + await qmp.execute('cont') + event = await qmp.events.get() + assert event['event'] == 'RESUME' + +`EventListener` objects are FIFO queues. If events are not consumed, +they will remain in the queue until they are witnessed or discarded via +`clear()`. FIFO queues will be drained automatically upon leaving a +context block, or when calling `remove_listener()`. + + +Accessing listener history +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`EventListener` objects record their history. Even after being cleared, +you can obtain a record of all events seen so far: + +.. code:: python + + await qmp.execute('stop') + await qmp.execute('cont') + qmp.events.clear() + + assert len(qmp.events.history) == 2 + assert qmp.events.history[0]['event'] == 'STOP' + assert qmp.events.history[1]['event'] == 'RESUME' + +The history is updated immediately and does not require the event to be +witnessed first. + + +Using event filters +~~~~~~~~~~~~~~~~~~~ + +`EventListener` objects can be given complex filtering criteria if names +are not sufficient: + +.. code:: python + + def job1_filter(event) -> bool: + event_data = event.get('data', {}) + event_job_id = event_data.get('id') + return event_job_id == "job1" + + with qmp.listener('JOB_STATUS_CHANGE', job1_filter) as listener: + await qmp.execute('blockdev-backup', arguments={'job-id': 'job1', ...}) + async for event in listener: + if event['data']['status'] == 'concluded': + break + +These filters might be most useful when parameterized. `EventListener` +objects expect a function that takes only a single argument (the raw +event, as a `Message`) and returns a bool; True if the event should be +accepted into the stream. You can create a function that adapts this +signature to accept configuration parameters: + +.. code:: python + + def job_filter(job_id: str) -> EventFilter: + def filter(event: Message) -> bool: + return event['data']['id'] == job_id + return filter + + with qmp.listener('JOB_STATUS_CHANGE', job_filter('job2')) as listener: + await qmp.execute('blockdev-backup', arguments={'job-id': 'job2', ...}) + async for event in listener: + if event['data']['status'] == 'concluded': + break + + +Activating an existing listener with `listen()` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Listeners with complex, long configurations can also be created manually +and activated temporarily by using `listen()` instead of `listener()`: + +.. code:: python + + listener = EventListener(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED', + 'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY', + 'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE')) + + with qmp.listen(listener): + await qmp.execute('blockdev-backup', arguments={'job-id': 'job3', ...}) + async for event in listener: + print(event) + if event['event'] == 'BLOCK_JOB_COMPLETED': + break + +Any events that are not witnessed by the time the block is left will be +cleared from the queue; entering the block is an implicit +`register_listener()` and leaving the block is an implicit +`remove_listener()`. + + +Activating multiple existing listeners with `listen()` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +While `listener()` is only capable of creating a single listener, +`listen()` is capable of activating multiple listeners simultaneously: + +.. code:: python + + def job_filter(job_id: str) -> EventFilter: + def filter(event: Message) -> bool: + return event['data']['id'] == job_id + return filter + + jobA = EventListener('JOB_STATUS_CHANGE', job_filter('jobA')) + jobB = EventListener('JOB_STATUS_CHANGE', job_filter('jobB')) + + with qmp.listen(jobA, jobB): + qmp.execute('blockdev-create', arguments={'job-id': 'jobA', ...}) + qmp.execute('blockdev-create', arguments={'job-id': 'jobB', ...}) + + async for event in jobA.get(): + if event['data']['status'] == 'concluded': + break + async for event in jobB.get(): + if event['data']['status'] == 'concluded': + break + + +Extending the `EventListener` class +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the case that a more specialized `EventListener` is desired to +provide either more functionality or more compact syntax for specialized +cases, it can be extended. + +One of the key methods to extend or override is +:py:meth:`~EventListener.accept()`. The default implementation checks an +incoming message for: + +1. A qualifying name, if any :py:obj:`~EventListener.names` were + specified at initialization time +2. That :py:obj:`~EventListener.event_filter()` returns True. + +This can be modified however you see fit to change the criteria for +inclusion in the stream. + +For convenience, a ``JobListener`` class could be created that simply +bakes in configuration so it does not need to be repeated: + +.. code:: python + + class JobListener(EventListener): + def __init__(self, job_id: str): + super().__init__(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED', + 'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY', + 'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE')) + self.job_id = job_id + + def accept(self, event) -> bool: + if not super().accept(event): + return False + if event['event'] in ('BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE'): + return event['data']['id'] == job_id + return event['data']['device'] == job_id + +From here on out, you can conjure up a custom-purpose listener that +listens only for job-related events for a specific job-id easily: + +.. code:: python + + listener = JobListener('job4') + with qmp.listener(listener): + await qmp.execute('blockdev-backup', arguments={'job-id': 'job4', ...}) + async for event in listener: + print(event) + if event['event'] == 'BLOCK_JOB_COMPLETED': + break + + +Experimental Interfaces & Design Issues +--------------------------------------- + +These interfaces are not ones I am sure I will keep or otherwise modify +heavily. + +qmp.listener()’s type signature +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`listener()` does not return anything, because it was assumed the caller +already had a handle to the listener. However, for +``qmp.listener(EventListener())`` forms, the caller will not have saved +a handle to the listener. + +Because this function can accept *many* listeners, I found it hard to +accurately type in a way where it could be used in both “one” or “many” +forms conveniently and in a statically type-safe manner. + +Ultimately, I removed the return altogether, but perhaps with more time +I can work out a way to re-add it. + + +API Reference +------------- + +""" + +import asyncio +from contextlib import contextmanager +import logging +from typing import ( + AsyncIterator, + Callable, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) + +from .error import QMPError +from .message import Message + + +EventNames = Union[str, Iterable[str], None] +EventFilter = Callable[[Message], bool] + + +class ListenerError(QMPError): + """ + Generic error class for `EventListener`-related problems. + """ + + +class EventListener: + """ + Selectively listens for events with runtime configurable filtering. + + This class is designed to be directly usable for the most common cases, + but it can be extended to provide more rigorous control. + + :param names: + One or more names of events to listen for. + When not provided, listen for ALL events. + :param event_filter: + An optional event filtering function. + When names are also provided, this acts as a secondary filter. + + When ``names`` and ``event_filter`` are both provided, the names + will be filtered first, and then the filter function will be called + second. The event filter function can assume that the format of the + event is a known format. + """ + def __init__( + self, + names: EventNames = None, + event_filter: Optional[EventFilter] = None, + ): + # Queue of 'heard' events yet to be witnessed by a caller. + self._queue: 'asyncio.Queue[Message]' = asyncio.Queue() + + # Intended as a historical record, NOT a processing queue or backlog. + self._history: List[Message] = [] + + #: Primary event filter, based on one or more event names. + self.names: Set[str] = set() + if isinstance(names, str): + self.names.add(names) + elif names is not None: + self.names.update(names) + + #: Optional, secondary event filter. + self.event_filter: Optional[EventFilter] = event_filter + + @property + def history(self) -> Tuple[Message, ...]: + """ + A read-only history of all events seen so far. + + This represents *every* event, including those not yet witnessed + via `get()` or ``async for``. It persists between `clear()` + calls and is immutable. + """ + return tuple(self._history) + + def accept(self, event: Message) -> bool: + """ + Determine if this listener accepts this event. + + This method determines which events will appear in the stream. + The default implementation simply checks the event against the + list of names and the event_filter to decide if this + `EventListener` accepts a given event. It can be + overridden/extended to provide custom listener behavior. + + User code is not expected to need to invoke this method. + + :param event: The event under consideration. + :return: `True`, if this listener accepts this event. + """ + name_ok = (not self.names) or (event['event'] in self.names) + return name_ok and ( + (not self.event_filter) or self.event_filter(event) + ) + + async def put(self, event: Message) -> None: + """ + Conditionally put a new event into the FIFO queue. + + This method is not designed to be invoked from user code, and it + should not need to be overridden. It is a public interface so + that `QMPClient` has an interface by which it can inform + registered listeners of new events. + + The event will be put into the queue if + :py:meth:`~EventListener.accept()` returns `True`. + + :param event: The new event to put into the FIFO queue. + """ + if not self.accept(event): + return + + self._history.append(event) + await self._queue.put(event) + + async def get(self) -> Message: + """ + Wait for the very next event in this stream. + + If one is already available, return that one. + """ + return await self._queue.get() + + def empty(self) -> bool: + """ + Return `True` if there are no pending events. + """ + return self._queue.empty() + + def clear(self) -> List[Message]: + """ + Clear this listener of all pending events. + + Called when an `EventListener` is being unregistered, this clears the + pending FIFO queue synchronously. It can be also be used to + manually clear any pending events, if desired. + + :return: The cleared events, if any. + + .. warning:: + Take care when discarding events. Cleared events will be + silently tossed on the floor. All events that were ever + accepted by this listener are visible in `history()`. + """ + events = [] + while True: + try: + events.append(self._queue.get_nowait()) + except asyncio.QueueEmpty: + break + + return events + + def __aiter__(self) -> AsyncIterator[Message]: + return self + + async def __anext__(self) -> Message: + """ + Enables the `EventListener` to function as an async iterator. + + It may be used like this: + + .. code:: python + + async for event in listener: + print(event) + + These iterators will never terminate of their own accord; you + must provide break conditions or otherwise prepare to run them + in an `asyncio.Task` that can be cancelled. + """ + return await self.get() + + +class Events: + """ + Events is a mix-in class that adds event functionality to the QMP class. + + It's designed specifically as a mix-in for `QMPClient`, and it + relies upon the class it is being mixed into having a 'logger' + property. + """ + def __init__(self) -> None: + self._listeners: List[EventListener] = [] + + #: Default, all-events `EventListener`. + self.events: EventListener = EventListener() + self.register_listener(self.events) + + # Parent class needs to have a logger + self.logger: logging.Logger + + async def _event_dispatch(self, msg: Message) -> None: + """ + Given a new event, propagate it to all of the active listeners. + + :param msg: The event to propagate. + """ + for listener in self._listeners: + await listener.put(msg) + + def register_listener(self, listener: EventListener) -> None: + """ + Register and activate an `EventListener`. + + :param listener: The listener to activate. + :raise ListenerError: If the given listener is already registered. + """ + if listener in self._listeners: + raise ListenerError("Attempted to re-register existing listener") + self.logger.debug("Registering %s.", str(listener)) + self._listeners.append(listener) + + def remove_listener(self, listener: EventListener) -> None: + """ + Unregister and deactivate an `EventListener`. + + The removed listener will have its pending events cleared via + `clear()`. The listener can be re-registered later when + desired. + + :param listener: The listener to deactivate. + :raise ListenerError: If the given listener is not registered. + """ + if listener == self.events: + raise ListenerError("Cannot remove the default listener.") + self.logger.debug("Removing %s.", str(listener)) + listener.clear() + self._listeners.remove(listener) + + @contextmanager + def listen(self, *listeners: EventListener) -> Iterator[None]: + r""" + Context manager: Temporarily listen with an `EventListener`. + + Accepts one or more `EventListener` objects and registers them, + activating them for the duration of the context block. + + `EventListener` objects will have any pending events in their + FIFO queue cleared upon exiting the context block, when they are + deactivated. + + :param \*listeners: One or more EventListeners to activate. + :raise ListenerError: If the given listener(s) are already active. + """ + _added = [] + + try: + for listener in listeners: + self.register_listener(listener) + _added.append(listener) + + yield + + finally: + for listener in _added: + self.remove_listener(listener) + + @contextmanager + def listener( + self, + names: EventNames = (), + event_filter: Optional[EventFilter] = None + ) -> Iterator[EventListener]: + """ + Context manager: Temporarily listen with a new `EventListener`. + + Creates an `EventListener` object and registers it, activating + it for the duration of the context block. + + :param names: + One or more names of events to listen for. + When not provided, listen for ALL events. + :param event_filter: + An optional event filtering function. + When names are also provided, this acts as a secondary filter. + + :return: The newly created and active `EventListener`. + """ + listener = EventListener(names, event_filter) + with self.listen(listener): + yield listener diff --git a/python/qemu/qmp/legacy.py b/python/qemu/qmp/legacy.py new file mode 100644 index 0000000000..1951754455 --- /dev/null +++ b/python/qemu/qmp/legacy.py @@ -0,0 +1,315 @@ +""" +(Legacy) Sync QMP Wrapper + +This module provides the `QEMUMonitorProtocol` class, which is a +synchronous wrapper around `QMPClient`. + +Its design closely resembles that of the original QEMUMonitorProtocol +class, originally written by Luiz Capitulino. It is provided here for +compatibility with scripts inside the QEMU source tree that expect the +old interface. +""" + +# +# Copyright (C) 2009-2022 Red Hat Inc. +# +# Authors: +# Luiz Capitulino +# John Snow +# +# This work is licensed under the terms of the GNU GPL, version 2. See +# the COPYING file in the top-level directory. +# + +import asyncio +from types import TracebackType +from typing import ( + Any, + Awaitable, + Dict, + List, + Optional, + Type, + TypeVar, + Union, +) + +from .error import QMPError +from .protocol import Runstate, SocketAddrT +from .qmp_client import QMPClient + + +#: QMPMessage is an entire QMP message of any kind. +QMPMessage = Dict[str, Any] + +#: QMPReturnValue is the 'return' value of a command. +QMPReturnValue = object + +#: QMPObject is any object in a QMP message. +QMPObject = Dict[str, object] + +# QMPMessage can be outgoing commands or incoming events/returns. +# QMPReturnValue is usually a dict/json object, but due to QAPI's +# 'command-returns-exceptions', it can actually be anything. +# +# {'return': {}} is a QMPMessage, +# {} is the QMPReturnValue. + + +class QMPBadPortError(QMPError): + """ + Unable to parse socket address: Port was non-numerical. + """ + + +class QEMUMonitorProtocol: + """ + Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) + and then allow to handle commands and events. + + :param address: QEMU address, can be either a unix socket path (string) + or a tuple in the form ( address, port ) for a TCP + connection + :param server: Act as the socket server. (See 'accept') + :param nickname: Optional nickname used for logging. + """ + + def __init__(self, address: SocketAddrT, + server: bool = False, + nickname: Optional[str] = None): + + self._qmp = QMPClient(nickname) + self._aloop = asyncio.get_event_loop() + self._address = address + self._timeout: Optional[float] = None + + if server: + self._sync(self._qmp.start_server(self._address)) + + _T = TypeVar('_T') + + def _sync( + self, future: Awaitable[_T], timeout: Optional[float] = None + ) -> _T: + return self._aloop.run_until_complete( + asyncio.wait_for(future, timeout=timeout) + ) + + def _get_greeting(self) -> Optional[QMPMessage]: + if self._qmp.greeting is not None: + # pylint: disable=protected-access + return self._qmp.greeting._asdict() + return None + + def __enter__(self: _T) -> _T: + # Implement context manager enter function. + return self + + def __exit__(self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType]) -> None: + # Implement context manager exit function. + self.close() + + @classmethod + def parse_address(cls, address: str) -> SocketAddrT: + """ + Parse a string into a QMP address. + + Figure out if the argument is in the port:host form. + If it's not, it's probably a file path. + """ + components = address.split(':') + if len(components) == 2: + try: + port = int(components[1]) + except ValueError: + msg = f"Bad port: '{components[1]}' in '{address}'." + raise QMPBadPortError(msg) from None + return (components[0], port) + + # Treat as filepath. + return address + + def connect(self, negotiate: bool = True) -> Optional[QMPMessage]: + """ + Connect to the QMP Monitor and perform capabilities negotiation. + + :return: QMP greeting dict, or None if negotiate is false + :raise ConnectError: on connection errors + """ + self._qmp.await_greeting = negotiate + self._qmp.negotiate = negotiate + + self._sync( + self._qmp.connect(self._address) + ) + return self._get_greeting() + + def accept(self, timeout: Optional[float] = 15.0) -> QMPMessage: + """ + Await connection from QMP Monitor and perform capabilities negotiation. + + :param timeout: + timeout in seconds (nonnegative float number, or None). + If None, there is no timeout, and this may block forever. + + :return: QMP greeting dict + :raise ConnectError: on connection errors + """ + self._qmp.await_greeting = True + self._qmp.negotiate = True + + self._sync(self._qmp.accept(), timeout) + + ret = self._get_greeting() + assert ret is not None + return ret + + def cmd_obj(self, qmp_cmd: QMPMessage) -> QMPMessage: + """ + Send a QMP command to the QMP Monitor. + + :param qmp_cmd: QMP command to be sent as a Python dict + :return: QMP response as a Python dict + """ + return dict( + self._sync( + # pylint: disable=protected-access + + # _raw() isn't a public API, because turning off + # automatic ID assignment is discouraged. For + # compatibility with iotests *only*, do it anyway. + self._qmp._raw(qmp_cmd, assign_id=False), + self._timeout + ) + ) + + def cmd(self, name: str, + args: Optional[Dict[str, object]] = None, + cmd_id: Optional[object] = None) -> QMPMessage: + """ + Build a QMP command and send it to the QMP Monitor. + + :param name: command name (string) + :param args: command arguments (dict) + :param cmd_id: command id (dict, list, string or int) + """ + qmp_cmd: QMPMessage = {'execute': name} + if args: + qmp_cmd['arguments'] = args + if cmd_id: + qmp_cmd['id'] = cmd_id + return self.cmd_obj(qmp_cmd) + + def command(self, cmd: str, **kwds: object) -> QMPReturnValue: + """ + Build and send a QMP command to the monitor, report errors if any + """ + return self._sync( + self._qmp.execute(cmd, kwds), + self._timeout + ) + + def pull_event(self, + wait: Union[bool, float] = False) -> Optional[QMPMessage]: + """ + Pulls a single event. + + :param wait: + If False or 0, do not wait. Return None if no events ready. + If True, wait forever until the next event. + Otherwise, wait for the specified number of seconds. + + :raise asyncio.TimeoutError: + When a timeout is requested and the timeout period elapses. + + :return: The first available QMP event, or None. + """ + if not wait: + # wait is False/0: "do not wait, do not except." + if self._qmp.events.empty(): + return None + + # If wait is 'True', wait forever. If wait is False/0, the events + # queue must not be empty; but it still needs some real amount + # of time to complete. + timeout = None + if wait and isinstance(wait, float): + timeout = wait + + return dict( + self._sync( + self._qmp.events.get(), + timeout + ) + ) + + def get_events(self, wait: Union[bool, float] = False) -> List[QMPMessage]: + """ + Get a list of QMP events and clear all pending events. + + :param wait: + If False or 0, do not wait. Return None if no events ready. + If True, wait until we have at least one event. + Otherwise, wait for up to the specified number of seconds for at + least one event. + + :raise asyncio.TimeoutError: + When a timeout is requested and the timeout period elapses. + + :return: A list of QMP events. + """ + events = [dict(x) for x in self._qmp.events.clear()] + if events: + return events + + event = self.pull_event(wait) + return [event] if event is not None else [] + + def clear_events(self) -> None: + """Clear current list of pending events.""" + self._qmp.events.clear() + + def close(self) -> None: + """Close the connection.""" + self._sync( + self._qmp.disconnect() + ) + + def settimeout(self, timeout: Optional[float]) -> None: + """ + Set the timeout for QMP RPC execution. + + This timeout affects the `cmd`, `cmd_obj`, and `command` methods. + The `accept`, `pull_event` and `get_event` methods have their + own configurable timeouts. + + :param timeout: + timeout in seconds, or None. + None will wait indefinitely. + """ + self._timeout = timeout + + def send_fd_scm(self, fd: int) -> None: + """ + Send a file descriptor to the remote via SCM_RIGHTS. + """ + self._qmp.send_fd_scm(fd) + + def __del__(self) -> None: + if self._qmp.runstate == Runstate.IDLE: + return + + if not self._aloop.is_running(): + self.close() + else: + # Garbage collection ran while the event loop was running. + # Nothing we can do about it now, but if we don't raise our + # own error, the user will be treated to a lot of traceback + # they might not understand. + raise QMPError( + "QEMUMonitorProtocol.close()" + " was not called before object was garbage collected" + ) diff --git a/python/qemu/qmp/message.py b/python/qemu/qmp/message.py new file mode 100644 index 0000000000..f76ccc9074 --- /dev/null +++ b/python/qemu/qmp/message.py @@ -0,0 +1,209 @@ +""" +QMP Message Format + +This module provides the `Message` class, which represents a single QMP +message sent to or from the server. +""" + +import json +from json import JSONDecodeError +from typing import ( + Dict, + Iterator, + Mapping, + MutableMapping, + Optional, + Union, +) + +from .error import ProtocolError + + +class Message(MutableMapping[str, object]): + """ + Represents a single QMP protocol message. + + QMP uses JSON objects as its basic communicative unit; so this + Python object is a :py:obj:`~collections.abc.MutableMapping`. It may + be instantiated from either another mapping (like a `dict`), or from + raw `bytes` that still need to be deserialized. + + Once instantiated, it may be treated like any other MutableMapping:: + + >>> msg = Message(b'{"hello": "world"}') + >>> assert msg['hello'] == 'world' + >>> msg['id'] = 'foobar' + >>> print(msg) + { + "hello": "world", + "id": "foobar" + } + + It can be converted to `bytes`:: + + >>> msg = Message({"hello": "world"}) + >>> print(bytes(msg)) + b'{"hello":"world","id":"foobar"}' + + Or back into a garden-variety `dict`:: + + >>> dict(msg) + {'hello': 'world'} + + + :param value: Initial value, if any. + :param eager: + When `True`, attempt to serialize or deserialize the initial value + immediately, so that conversion exceptions are raised during + the call to ``__init__()``. + """ + # pylint: disable=too-many-ancestors + + def __init__(self, + value: Union[bytes, Mapping[str, object]] = b'{}', *, + eager: bool = True): + self._data: Optional[bytes] = None + self._obj: Optional[Dict[str, object]] = None + + if isinstance(value, bytes): + self._data = value + if eager: + self._obj = self._deserialize(self._data) + else: + self._obj = dict(value) + if eager: + self._data = self._serialize(self._obj) + + # Methods necessary to implement the MutableMapping interface, see: + # https://docs.python.org/3/library/collections.abc.html#collections.abc.MutableMapping + + # We get pop, popitem, clear, update, setdefault, __contains__, + # keys, items, values, get, __eq__ and __ne__ for free. + + def __getitem__(self, key: str) -> object: + return self._object[key] + + def __setitem__(self, key: str, value: object) -> None: + self._object[key] = value + self._data = None + + def __delitem__(self, key: str) -> None: + del self._object[key] + self._data = None + + def __iter__(self) -> Iterator[str]: + return iter(self._object) + + def __len__(self) -> int: + return len(self._object) + + # Dunder methods not related to MutableMapping: + + def __repr__(self) -> str: + if self._obj is not None: + return f"Message({self._object!r})" + return f"Message({bytes(self)!r})" + + def __str__(self) -> str: + """Pretty-printed representation of this QMP message.""" + return json.dumps(self._object, indent=2) + + def __bytes__(self) -> bytes: + """bytes representing this QMP message.""" + if self._data is None: + self._data = self._serialize(self._obj or {}) + return self._data + + # Conversion Methods + + @property + def _object(self) -> Dict[str, object]: + """ + A `dict` representing this QMP message. + + Generated on-demand, if required. This property is private + because it returns an object that could be used to invalidate + the internal state of the `Message` object. + """ + if self._obj is None: + self._obj = self._deserialize(self._data or b'{}') + return self._obj + + @classmethod + def _serialize(cls, value: object) -> bytes: + """ + Serialize a JSON object as `bytes`. + + :raise ValueError: When the object cannot be serialized. + :raise TypeError: When the object cannot be serialized. + + :return: `bytes` ready to be sent over the wire. + """ + return json.dumps(value, separators=(',', ':')).encode('utf-8') + + @classmethod + def _deserialize(cls, data: bytes) -> Dict[str, object]: + """ + Deserialize JSON `bytes` into a native Python `dict`. + + :raise DeserializationError: + If JSON deserialization fails for any reason. + :raise UnexpectedTypeError: + If the data does not represent a JSON object. + + :return: A `dict` representing this QMP message. + """ + try: + obj = json.loads(data) + except JSONDecodeError as err: + emsg = "Failed to deserialize QMP message." + raise DeserializationError(emsg, data) from err + if not isinstance(obj, dict): + raise UnexpectedTypeError( + "QMP message is not a JSON object.", + obj + ) + return obj + + +class DeserializationError(ProtocolError): + """ + A QMP message was not understood as JSON. + + When this Exception is raised, ``__cause__`` will be set to the + `json.JSONDecodeError` Exception, which can be interrogated for + further details. + + :param error_message: Human-readable string describing the error. + :param raw: The raw `bytes` that prompted the failure. + """ + def __init__(self, error_message: str, raw: bytes): + super().__init__(error_message) + #: The raw `bytes` that were not understood as JSON. + self.raw: bytes = raw + + def __str__(self) -> str: + return "\n".join([ + super().__str__(), + f" raw bytes were: {str(self.raw)}", + ]) + + +class UnexpectedTypeError(ProtocolError): + """ + A QMP message was JSON, but not a JSON object. + + :param error_message: Human-readable string describing the error. + :param value: The deserialized JSON value that wasn't an object. + """ + def __init__(self, error_message: str, value: object): + super().__init__(error_message) + #: The JSON value that was expected to be an object. + self.value: object = value + + def __str__(self) -> str: + strval = json.dumps(self.value, indent=2) + return "\n".join([ + super().__str__(), + f" json value was: {strval}", + ]) diff --git a/python/qemu/qmp/models.py b/python/qemu/qmp/models.py new file mode 100644 index 0000000000..de87f87804 --- /dev/null +++ b/python/qemu/qmp/models.py @@ -0,0 +1,146 @@ +""" +QMP Data Models + +This module provides simplistic data classes that represent the few +structures that the QMP spec mandates; they are used to verify incoming +data to make sure it conforms to spec. +""" +# pylint: disable=too-few-public-methods + +from collections import abc +import copy +from typing import ( + Any, + Dict, + Mapping, + Optional, + Sequence, +) + + +class Model: + """ + Abstract data model, representing some QMP object of some kind. + + :param raw: The raw object to be validated. + :raise KeyError: If any required fields are absent. + :raise TypeError: If any required fields have the wrong type. + """ + def __init__(self, raw: Mapping[str, Any]): + self._raw = raw + + def _check_key(self, key: str) -> None: + if key not in self._raw: + raise KeyError(f"'{self._name}' object requires '{key}' member") + + def _check_value(self, key: str, type_: type, typestr: str) -> None: + assert key in self._raw + if not isinstance(self._raw[key], type_): + raise TypeError( + f"'{self._name}' member '{key}' must be a {typestr}" + ) + + def _check_member(self, key: str, type_: type, typestr: str) -> None: + self._check_key(key) + self._check_value(key, type_, typestr) + + @property + def _name(self) -> str: + return type(self).__name__ + + def __repr__(self) -> str: + return f"{self._name}({self._raw!r})" + + +class Greeting(Model): + """ + Defined in qmp-spec.txt, section 2.2, "Server Greeting". + + :param raw: The raw Greeting object. + :raise KeyError: If any required fields are absent. + :raise TypeError: If any required fields have the wrong type. + """ + def __init__(self, raw: Mapping[str, Any]): + super().__init__(raw) + #: 'QMP' member + self.QMP: QMPGreeting # pylint: disable=invalid-name + + self._check_member('QMP', abc.Mapping, "JSON object") + self.QMP = QMPGreeting(self._raw['QMP']) + + def _asdict(self) -> Dict[str, object]: + """ + For compatibility with the iotests sync QMP wrapper. + + The legacy QMP interface needs Greetings as a garden-variety Dict. + + This interface is private in the hopes that it will be able to + be dropped again in the near-future. Caller beware! + """ + return dict(copy.deepcopy(self._raw)) + + +class QMPGreeting(Model): + """ + Defined in qmp-spec.txt, section 2.2, "Server Greeting". + + :param raw: The raw QMPGreeting object. + :raise KeyError: If any required fields are absent. + :raise TypeError: If any required fields have the wrong type. + """ + def __init__(self, raw: Mapping[str, Any]): + super().__init__(raw) + #: 'version' member + self.version: Mapping[str, object] + #: 'capabilities' member + self.capabilities: Sequence[object] + + self._check_member('version', abc.Mapping, "JSON object") + self.version = self._raw['version'] + + self._check_member('capabilities', abc.Sequence, "JSON array") + self.capabilities = self._raw['capabilities'] + + +class ErrorResponse(Model): + """ + Defined in qmp-spec.txt, section 2.4.2, "error". + + :param raw: The raw ErrorResponse object. + :raise KeyError: If any required fields are absent. + :raise TypeError: If any required fields have the wrong type. + """ + def __init__(self, raw: Mapping[str, Any]): + super().__init__(raw) + #: 'error' member + self.error: ErrorInfo + #: 'id' member + self.id: Optional[object] = None # pylint: disable=invalid-name + + self._check_member('error', abc.Mapping, "JSON object") + self.error = ErrorInfo(self._raw['error']) + + if 'id' in raw: + self.id = raw['id'] + + +class ErrorInfo(Model): + """ + Defined in qmp-spec.txt, section 2.4.2, "error". + + :param raw: The raw ErrorInfo object. + :raise KeyError: If any required fields are absent. + :raise TypeError: If any required fields have the wrong type. + """ + def __init__(self, raw: Mapping[str, Any]): + super().__init__(raw) + #: 'class' member, with an underscore to avoid conflicts in Python. + self.class_: str + #: 'desc' member + self.desc: str + + self._check_member('class', str, "string") + self.class_ = self._raw['class'] + + self._check_member('desc', str, "string") + self.desc = self._raw['desc'] diff --git a/python/qemu/qmp/protocol.py b/python/qemu/qmp/protocol.py new file mode 100644 index 0000000000..6ea86650ad --- /dev/null +++ b/python/qemu/qmp/protocol.py @@ -0,0 +1,1048 @@ +""" +Generic Asynchronous Message-based Protocol Support + +This module provides a generic framework for sending and receiving +messages over an asyncio stream. `AsyncProtocol` is an abstract class +that implements the core mechanisms of a simple send/receive protocol, +and is designed to be extended. + +In this package, it is used as the implementation for the `QMPClient` +class. +""" + +# It's all the docstrings ... ! It's long for a good reason ^_^; +# pylint: disable=too-many-lines + +import asyncio +from asyncio import StreamReader, StreamWriter +from enum import Enum +from functools import wraps +import logging +from ssl import SSLContext +from typing import ( + Any, + Awaitable, + Callable, + Generic, + List, + Optional, + Tuple, + TypeVar, + Union, + cast, +) + +from .error import QMPError +from .util import ( + bottom_half, + create_task, + exception_summary, + flush, + is_closing, + pretty_traceback, + upper_half, + wait_closed, +) + + +T = TypeVar('T') +_U = TypeVar('_U') +_TaskFN = Callable[[], Awaitable[None]] # aka ``async def func() -> None`` + +InternetAddrT = Tuple[str, int] +UnixAddrT = str +SocketAddrT = Union[UnixAddrT, InternetAddrT] + + +class Runstate(Enum): + """Protocol session runstate.""" + + #: Fully quiesced and disconnected. + IDLE = 0 + #: In the process of connecting or establishing a session. + CONNECTING = 1 + #: Fully connected and active session. + RUNNING = 2 + #: In the process of disconnecting. + #: Runstate may be returned to `IDLE` by calling `disconnect()`. + DISCONNECTING = 3 + + +class ConnectError(QMPError): + """ + Raised when the initial connection process has failed. + + This Exception always wraps a "root cause" exception that can be + interrogated for additional information. + + :param error_message: Human-readable string describing the error. + :param exc: The root-cause exception. + """ + def __init__(self, error_message: str, exc: Exception): + super().__init__(error_message) + #: Human-readable error string + self.error_message: str = error_message + #: Wrapped root cause exception + self.exc: Exception = exc + + def __str__(self) -> str: + cause = str(self.exc) + if not cause: + # If there's no error string, use the exception name. + cause = exception_summary(self.exc) + return f"{self.error_message}: {cause}" + + +class StateError(QMPError): + """ + An API command (connect, execute, etc) was issued at an inappropriate time. + + This error is raised when a command like + :py:meth:`~AsyncProtocol.connect()` is issued at an inappropriate + time. + + :param error_message: Human-readable string describing the state violation. + :param state: The actual `Runstate` seen at the time of the violation. + :param required: The `Runstate` required to process this command. + """ + def __init__(self, error_message: str, + state: Runstate, required: Runstate): + super().__init__(error_message) + self.error_message = error_message + self.state = state + self.required = required + + +F = TypeVar('F', bound=Callable[..., Any]) # pylint: disable=invalid-name + + +# Don't Panic. +def require(required_state: Runstate) -> Callable[[F], F]: + """ + Decorator: protect a method so it can only be run in a certain `Runstate`. + + :param required_state: The `Runstate` required to invoke this method. + :raise StateError: When the required `Runstate` is not met. + """ + def _decorator(func: F) -> F: + # _decorator is the decorator that is built by calling the + # require() decorator factory; e.g.: + # + # @require(Runstate.IDLE) def foo(): ... + # will replace 'foo' with the result of '_decorator(foo)'. + + @wraps(func) + def _wrapper(proto: 'AsyncProtocol[Any]', + *args: Any, **kwargs: Any) -> Any: + # _wrapper is the function that gets executed prior to the + # decorated method. + + name = type(proto).__name__ + + if proto.runstate != required_state: + if proto.runstate == Runstate.CONNECTING: + emsg = f"{name} is currently connecting." + elif proto.runstate == Runstate.DISCONNECTING: + emsg = (f"{name} is disconnecting." + " Call disconnect() to return to IDLE state.") + elif proto.runstate == Runstate.RUNNING: + emsg = f"{name} is already connected and running." + elif proto.runstate == Runstate.IDLE: + emsg = f"{name} is disconnected and idle." + else: + assert False + raise StateError(emsg, proto.runstate, required_state) + # No StateError, so call the wrapped method. + return func(proto, *args, **kwargs) + + # Return the decorated method; + # Transforming Func to Decorated[Func]. + return cast(F, _wrapper) + + # Return the decorator instance from the decorator factory. Phew! + return _decorator + + +class AsyncProtocol(Generic[T]): + """ + AsyncProtocol implements a generic async message-based protocol. + + This protocol assumes the basic unit of information transfer between + client and server is a "message", the details of which are left up + to the implementation. It assumes the sending and receiving of these + messages is full-duplex and not necessarily correlated; i.e. it + supports asynchronous inbound messages. + + It is designed to be extended by a specific protocol which provides + the implementations for how to read and send messages. These must be + defined in `_do_recv()` and `_do_send()`, respectively. + + Other callbacks have a default implementation, but are intended to be + either extended or overridden: + + - `_establish_session`: + The base implementation starts the reader/writer tasks. + A protocol implementation can override this call, inserting + actions to be taken prior to starting the reader/writer tasks + before the super() call; actions needing to occur afterwards + can be written after the super() call. + - `_on_message`: + Actions to be performed when a message is received. + - `_cb_outbound`: + Logging/Filtering hook for all outbound messages. + - `_cb_inbound`: + Logging/Filtering hook for all inbound messages. + This hook runs *before* `_on_message()`. + + :param name: + Name used for logging messages, if any. By default, messages + will log to 'qemu.qmp.protocol', but each individual connection + can be given its own logger by giving it a name; messages will + then log to 'qemu.qmp.protocol.${name}'. + """ + # pylint: disable=too-many-instance-attributes + + #: Logger object for debugging messages from this connection. + logger = logging.getLogger(__name__) + + # Maximum allowable size of read buffer + _limit = (64 * 1024) + + # ------------------------- + # Section: Public interface + # ------------------------- + + def __init__(self, name: Optional[str] = None) -> None: + #: The nickname for this connection, if any. + self.name: Optional[str] = name + if self.name is not None: + self.logger = self.logger.getChild(self.name) + + # stream I/O + self._reader: Optional[StreamReader] = None + self._writer: Optional[StreamWriter] = None + + # Outbound Message queue + self._outgoing: asyncio.Queue[T] + + # Special, long-running tasks: + self._reader_task: Optional[asyncio.Future[None]] = None + self._writer_task: Optional[asyncio.Future[None]] = None + + # Aggregate of the above two tasks, used for Exception management. + self._bh_tasks: Optional[asyncio.Future[Tuple[None, None]]] = None + + #: Disconnect task. The disconnect implementation runs in a task + #: so that asynchronous disconnects (initiated by the + #: reader/writer) are allowed to wait for the reader/writers to + #: exit. + self._dc_task: Optional[asyncio.Future[None]] = None + + self._runstate = Runstate.IDLE + self._runstate_changed: Optional[asyncio.Event] = None + + # Server state for start_server() and _incoming() + self._server: Optional[asyncio.AbstractServer] = None + self._accepted: Optional[asyncio.Event] = None + + def __repr__(self) -> str: + cls_name = type(self).__name__ + tokens = [] + if self.name is not None: + tokens.append(f"name={self.name!r}") + tokens.append(f"runstate={self.runstate.name}") + return f"<{cls_name} {' '.join(tokens)}>" + + @property # @upper_half + def runstate(self) -> Runstate: + """The current `Runstate` of the connection.""" + return self._runstate + + @upper_half + async def runstate_changed(self) -> Runstate: + """ + Wait for the `runstate` to change, then return that runstate. + """ + await self._runstate_event.wait() + return self.runstate + + @upper_half + @require(Runstate.IDLE) + async def start_server_and_accept( + self, address: SocketAddrT, + ssl: Optional[SSLContext] = None + ) -> None: + """ + Accept a connection and begin processing message queues. + + If this call fails, `runstate` is guaranteed to be set back to `IDLE`. + This method is precisely equivalent to calling `start_server()` + followed by `accept()`. + + :param address: + Address to listen on; UNIX socket path or TCP address/port. + :param ssl: SSL context to use, if any. + + :raise StateError: When the `Runstate` is not `IDLE`. + :raise ConnectError: + When a connection or session cannot be established. + + This exception will wrap a more concrete one. In most cases, + the wrapped exception will be `OSError` or `EOFError`. If a + protocol-level failure occurs while establishing a new + session, the wrapped error may also be an `QMPError`. + """ + await self.start_server(address, ssl) + await self.accept() + assert self.runstate == Runstate.RUNNING + + @upper_half + @require(Runstate.IDLE) + async def start_server(self, address: SocketAddrT, + ssl: Optional[SSLContext] = None) -> None: + """ + Start listening for an incoming connection, but do not wait for a peer. + + This method starts listening for an incoming connection, but + does not block waiting for a peer. This call will return + immediately after binding and listening on a socket. A later + call to `accept()` must be made in order to finalize the + incoming connection. + + :param address: + Address to listen on; UNIX socket path or TCP address/port. + :param ssl: SSL context to use, if any. + + :raise StateError: When the `Runstate` is not `IDLE`. + :raise ConnectError: + When the server could not start listening on this address. + + This exception will wrap a more concrete one. In most cases, + the wrapped exception will be `OSError`. + """ + await self._session_guard( + self._do_start_server(address, ssl), + 'Failed to establish connection') + assert self.runstate == Runstate.CONNECTING + + @upper_half + @require(Runstate.CONNECTING) + async def accept(self) -> None: + """ + Accept an incoming connection and begin processing message queues. + + If this call fails, `runstate` is guaranteed to be set back to `IDLE`. + + :raise StateError: When the `Runstate` is not `CONNECTING`. + :raise QMPError: When `start_server()` was not called yet. + :raise ConnectError: + When a connection or session cannot be established. + + This exception will wrap a more concrete one. In most cases, + the wrapped exception will be `OSError` or `EOFError`. If a + protocol-level failure occurs while establishing a new + session, the wrapped error may also be an `QMPError`. + """ + if self._accepted is None: + raise QMPError("Cannot call accept() before start_server().") + await self._session_guard( + self._do_accept(), + 'Failed to establish connection') + await self._session_guard( + self._establish_session(), + 'Failed to establish session') + assert self.runstate == Runstate.RUNNING + + @upper_half + @require(Runstate.IDLE) + async def connect(self, address: SocketAddrT, + ssl: Optional[SSLContext] = None) -> None: + """ + Connect to the server and begin processing message queues. + + If this call fails, `runstate` is guaranteed to be set back to `IDLE`. + + :param address: + Address to connect to; UNIX socket path or TCP address/port. + :param ssl: SSL context to use, if any. + + :raise StateError: When the `Runstate` is not `IDLE`. + :raise ConnectError: + When a connection or session cannot be established. + + This exception will wrap a more concrete one. In most cases, + the wrapped exception will be `OSError` or `EOFError`. If a + protocol-level failure occurs while establishing a new + session, the wrapped error may also be an `QMPError`. + """ + await self._session_guard( + self._do_connect(address, ssl), + 'Failed to establish connection') + await self._session_guard( + self._establish_session(), + 'Failed to establish session') + assert self.runstate == Runstate.RUNNING + + @upper_half + async def disconnect(self) -> None: + """ + Disconnect and wait for all tasks to fully stop. + + If there was an exception that caused the reader/writers to + terminate prematurely, it will be raised here. + + :raise Exception: When the reader or writer terminate unexpectedly. + """ + self.logger.debug("disconnect() called.") + self._schedule_disconnect() + await self._wait_disconnect() + + # -------------------------- + # Section: Session machinery + # -------------------------- + + async def _session_guard(self, coro: Awaitable[None], emsg: str) -> None: + """ + Async guard function used to roll back to `IDLE` on any error. + + On any Exception, the state machine will be reset back to + `IDLE`. Most Exceptions will be wrapped with `ConnectError`, but + `BaseException` events will be left alone (This includes + asyncio.CancelledError, even prior to Python 3.8). + + :param error_message: + Human-readable string describing what connection phase failed. + + :raise BaseException: + When `BaseException` occurs in the guarded block. + :raise ConnectError: + When any other error is encountered in the guarded block. + """ + # Note: After Python 3.6 support is removed, this should be an + # @asynccontextmanager instead of accepting a callback. + try: + await coro + except BaseException as err: + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + try: + # Reset the runstate back to IDLE. + await self.disconnect() + except: + # We don't expect any Exceptions from the disconnect function + # here, because we failed to connect in the first place. + # The disconnect() function is intended to perform + # only cannot-fail cleanup here, but you never know. + emsg = ( + "Unexpected bottom half exception. " + "This is a bug in the QMP library. " + "Please report it to and " + "CC: John Snow ." + ) + self.logger.critical("%s:\n%s\n", emsg, pretty_traceback()) + raise + + # CancelledError is an Exception with special semantic meaning; + # We do NOT want to wrap it up under ConnectError. + # NB: CancelledError is not a BaseException before Python 3.8 + if isinstance(err, asyncio.CancelledError): + raise + + # Any other kind of error can be treated as some kind of connection + # failure broadly. Inspect the 'exc' field to explore the root + # cause in greater detail. + if isinstance(err, Exception): + raise ConnectError(emsg, err) from err + + # Raise BaseExceptions un-wrapped, they're more important. + raise + + @property + def _runstate_event(self) -> asyncio.Event: + # asyncio.Event() objects should not be created prior to entrance into + # an event loop, so we can ensure we create it in the correct context. + # Create it on-demand *only* at the behest of an 'async def' method. + if not self._runstate_changed: + self._runstate_changed = asyncio.Event() + return self._runstate_changed + + @upper_half + @bottom_half + def _set_state(self, state: Runstate) -> None: + """ + Change the `Runstate` of the protocol connection. + + Signals the `runstate_changed` event. + """ + if state == self._runstate: + return + + self.logger.debug("Transitioning from '%s' to '%s'.", + str(self._runstate), str(state)) + self._runstate = state + self._runstate_event.set() + self._runstate_event.clear() + + @bottom_half + async def _stop_server(self) -> None: + """ + Stop listening for / accepting new incoming connections. + """ + if self._server is None: + return + + try: + self.logger.debug("Stopping server.") + self._server.close() + await self._server.wait_closed() + self.logger.debug("Server stopped.") + finally: + self._server = None + + @bottom_half # However, it does not run from the R/W tasks. + async def _incoming(self, + reader: asyncio.StreamReader, + writer: asyncio.StreamWriter) -> None: + """ + Accept an incoming connection and signal the upper_half. + + This method does the minimum necessary to accept a single + incoming connection. It signals back to the upper_half ASAP so + that any errors during session initialization can occur + naturally in the caller's stack. + + :param reader: Incoming `asyncio.StreamReader` + :param writer: Incoming `asyncio.StreamWriter` + """ + peer = writer.get_extra_info('peername', 'Unknown peer') + self.logger.debug("Incoming connection from %s", peer) + + if self._reader or self._writer: + # Sadly, we can have more than one pending connection + # because of https://bugs.python.org/issue46715 + # Close any extra connections we don't actually want. + self.logger.warning("Extraneous connection inadvertently accepted") + writer.close() + return + + # A connection has been accepted; stop listening for new ones. + assert self._accepted is not None + await self._stop_server() + self._reader, self._writer = (reader, writer) + self._accepted.set() + + @upper_half + async def _do_start_server(self, address: SocketAddrT, + ssl: Optional[SSLContext] = None) -> None: + """ + Start listening for an incoming connection, but do not wait for a peer. + + This method starts listening for an incoming connection, but does not + block waiting for a peer. This call will return immediately after + binding and listening to a socket. A later call to accept() must be + made in order to finalize the incoming connection. + + :param address: + Address to listen on; UNIX socket path or TCP address/port. + :param ssl: SSL context to use, if any. + + :raise OSError: For stream-related errors. + """ + assert self.runstate == Runstate.IDLE + self._set_state(Runstate.CONNECTING) + + self.logger.debug("Awaiting connection on %s ...", address) + self._accepted = asyncio.Event() + + if isinstance(address, tuple): + coro = asyncio.start_server( + self._incoming, + host=address[0], + port=address[1], + ssl=ssl, + backlog=1, + limit=self._limit, + ) + else: + coro = asyncio.start_unix_server( + self._incoming, + path=address, + ssl=ssl, + backlog=1, + limit=self._limit, + ) + + # Allow runstate watchers to witness 'CONNECTING' state; some + # failures in the streaming layer are synchronous and will not + # otherwise yield. + await asyncio.sleep(0) + + # This will start the server (bind(2), listen(2)). It will also + # call accept(2) if we yield, but we don't block on that here. + self._server = await coro + self.logger.debug("Server listening on %s", address) + + @upper_half + async def _do_accept(self) -> None: + """ + Wait for and accept an incoming connection. + + Requires that we have not yet accepted an incoming connection + from the upper_half, but it's OK if the server is no longer + running because the bottom_half has already accepted the + connection. + """ + assert self._accepted is not None + await self._accepted.wait() + assert self._server is None + self._accepted = None + + self.logger.debug("Connection accepted.") + + @upper_half + async def _do_connect(self, address: SocketAddrT, + ssl: Optional[SSLContext] = None) -> None: + """ + Acting as the transport client, initiate a connection to a server. + + :param address: + Address to connect to; UNIX socket path or TCP address/port. + :param ssl: SSL context to use, if any. + + :raise OSError: For stream-related errors. + """ + assert self.runstate == Runstate.IDLE + self._set_state(Runstate.CONNECTING) + + # Allow runstate watchers to witness 'CONNECTING' state; some + # failures in the streaming layer are synchronous and will not + # otherwise yield. + await asyncio.sleep(0) + + self.logger.debug("Connecting to %s ...", address) + + if isinstance(address, tuple): + connect = asyncio.open_connection( + address[0], + address[1], + ssl=ssl, + limit=self._limit, + ) + else: + connect = asyncio.open_unix_connection( + path=address, + ssl=ssl, + limit=self._limit, + ) + self._reader, self._writer = await connect + + self.logger.debug("Connected.") + + @upper_half + async def _establish_session(self) -> None: + """ + Establish a new session. + + Starts the readers/writer tasks; subclasses may perform their + own negotiations here. The Runstate will be RUNNING upon + successful conclusion. + """ + assert self.runstate == Runstate.CONNECTING + + self._outgoing = asyncio.Queue() + + reader_coro = self._bh_loop_forever(self._bh_recv_message, 'Reader') + writer_coro = self._bh_loop_forever(self._bh_send_message, 'Writer') + + self._reader_task = create_task(reader_coro) + self._writer_task = create_task(writer_coro) + + self._bh_tasks = asyncio.gather( + self._reader_task, + self._writer_task, + ) + + self._set_state(Runstate.RUNNING) + await asyncio.sleep(0) # Allow runstate_event to process + + @upper_half + @bottom_half + def _schedule_disconnect(self) -> None: + """ + Initiate a disconnect; idempotent. + + This method is used both in the upper-half as a direct + consequence of `disconnect()`, and in the bottom-half in the + case of unhandled exceptions in the reader/writer tasks. + + It can be invoked no matter what the `runstate` is. + """ + if not self._dc_task: + self._set_state(Runstate.DISCONNECTING) + self.logger.debug("Scheduling disconnect.") + self._dc_task = create_task(self._bh_disconnect()) + + @upper_half + async def _wait_disconnect(self) -> None: + """ + Waits for a previously scheduled disconnect to finish. + + This method will gather any bottom half exceptions and re-raise + the one that occurred first; presuming it to be the root cause + of any subsequent Exceptions. It is intended to be used in the + upper half of the call chain. + + :raise Exception: + Arbitrary exception re-raised on behalf of the reader/writer. + """ + assert self.runstate == Runstate.DISCONNECTING + assert self._dc_task + + aws: List[Awaitable[object]] = [self._dc_task] + if self._bh_tasks: + aws.insert(0, self._bh_tasks) + all_defined_tasks = asyncio.gather(*aws) + + # Ensure disconnect is done; Exception (if any) is not raised here: + await asyncio.wait((self._dc_task,)) + + try: + await all_defined_tasks # Raise Exceptions from the bottom half. + finally: + self._cleanup() + self._set_state(Runstate.IDLE) + + @upper_half + def _cleanup(self) -> None: + """ + Fully reset this object to a clean state and return to `IDLE`. + """ + def _paranoid_task_erase(task: Optional['asyncio.Future[_U]'] + ) -> Optional['asyncio.Future[_U]']: + # Help to erase a task, ENSURING it is fully quiesced first. + assert (task is None) or task.done() + return None if (task and task.done()) else task + + assert self.runstate == Runstate.DISCONNECTING + self._dc_task = _paranoid_task_erase(self._dc_task) + self._reader_task = _paranoid_task_erase(self._reader_task) + self._writer_task = _paranoid_task_erase(self._writer_task) + self._bh_tasks = _paranoid_task_erase(self._bh_tasks) + + self._reader = None + self._writer = None + self._accepted = None + + # NB: _runstate_changed cannot be cleared because we still need it to + # send the final runstate changed event ...! + + # ---------------------------- + # Section: Bottom Half methods + # ---------------------------- + + @bottom_half + async def _bh_disconnect(self) -> None: + """ + Disconnect and cancel all outstanding tasks. + + It is designed to be called from its task context, + :py:obj:`~AsyncProtocol._dc_task`. By running in its own task, + it is free to wait on any pending actions that may still need to + occur in either the reader or writer tasks. + """ + assert self.runstate == Runstate.DISCONNECTING + + def _done(task: Optional['asyncio.Future[Any]']) -> bool: + return task is not None and task.done() + + # If the server is running, stop it. + await self._stop_server() + + # Are we already in an error pathway? If either of the tasks are + # already done, or if we have no tasks but a reader/writer; we + # must be. + # + # NB: We can't use _bh_tasks to check for premature task + # completion, because it may not yet have had a chance to run + # and gather itself. + tasks = tuple(filter(None, (self._writer_task, self._reader_task))) + error_pathway = _done(self._reader_task) or _done(self._writer_task) + if not tasks: + error_pathway |= bool(self._reader) or bool(self._writer) + + try: + # Try to flush the writer, if possible. + # This *may* cause an error and force us over into the error path. + if not error_pathway: + await self._bh_flush_writer() + except BaseException as err: + error_pathway = True + emsg = "Failed to flush the writer" + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + raise + finally: + # Cancel any still-running tasks (Won't raise): + if self._writer_task is not None and not self._writer_task.done(): + self.logger.debug("Cancelling writer task.") + self._writer_task.cancel() + if self._reader_task is not None and not self._reader_task.done(): + self.logger.debug("Cancelling reader task.") + self._reader_task.cancel() + + # Close out the tasks entirely (Won't raise): + if tasks: + self.logger.debug("Waiting for tasks to complete ...") + await asyncio.wait(tasks) + + # Lastly, close the stream itself. (*May raise*!): + await self._bh_close_stream(error_pathway) + self.logger.debug("Disconnected.") + + @bottom_half + async def _bh_flush_writer(self) -> None: + if not self._writer_task: + return + + self.logger.debug("Draining the outbound queue ...") + await self._outgoing.join() + if self._writer is not None: + self.logger.debug("Flushing the StreamWriter ...") + await flush(self._writer) + + @bottom_half + async def _bh_close_stream(self, error_pathway: bool = False) -> None: + # NB: Closing the writer also implcitly closes the reader. + if not self._writer: + return + + if not is_closing(self._writer): + self.logger.debug("Closing StreamWriter.") + self._writer.close() + + self.logger.debug("Waiting for StreamWriter to close ...") + try: + await wait_closed(self._writer) + except Exception: # pylint: disable=broad-except + # It's hard to tell if the Stream is already closed or + # not. Even if one of the tasks has failed, it may have + # failed for a higher-layered protocol reason. The + # stream could still be open and perfectly fine. + # I don't know how to discern its health here. + + if error_pathway: + # We already know that *something* went wrong. Let's + # just trust that the Exception we already have is the + # better one to present to the user, even if we don't + # genuinely *know* the relationship between the two. + self.logger.debug( + "Discarding Exception from wait_closed:\n%s\n", + pretty_traceback(), + ) + else: + # Oops, this is a brand-new error! + raise + finally: + self.logger.debug("StreamWriter closed.") + + @bottom_half + async def _bh_loop_forever(self, async_fn: _TaskFN, name: str) -> None: + """ + Run one of the bottom-half methods in a loop forever. + + If the bottom half ever raises any exception, schedule a + disconnect that will terminate the entire loop. + + :param async_fn: The bottom-half method to run in a loop. + :param name: The name of this task, used for logging. + """ + try: + while True: + await async_fn() + except asyncio.CancelledError: + # We have been cancelled by _bh_disconnect, exit gracefully. + self.logger.debug("Task.%s: cancelled.", name) + return + except BaseException as err: + self.logger.log( + logging.INFO if isinstance(err, EOFError) else logging.ERROR, + "Task.%s: %s", + name, exception_summary(err) + ) + self.logger.debug("Task.%s: failure:\n%s\n", + name, pretty_traceback()) + self._schedule_disconnect() + raise + finally: + self.logger.debug("Task.%s: exiting.", name) + + @bottom_half + async def _bh_send_message(self) -> None: + """ + Wait for an outgoing message, then send it. + + Designed to be run in `_bh_loop_forever()`. + """ + msg = await self._outgoing.get() + try: + await self._send(msg) + finally: + self._outgoing.task_done() + + @bottom_half + async def _bh_recv_message(self) -> None: + """ + Wait for an incoming message and call `_on_message` to route it. + + Designed to be run in `_bh_loop_forever()`. + """ + msg = await self._recv() + await self._on_message(msg) + + # -------------------- + # Section: Message I/O + # -------------------- + + @upper_half + @bottom_half + def _cb_outbound(self, msg: T) -> T: + """ + Callback: outbound message hook. + + This is intended for subclasses to be able to add arbitrary + hooks to filter or manipulate outgoing messages. The base + implementation does nothing but log the message without any + manipulation of the message. + + :param msg: raw outbound message + :return: final outbound message + """ + self.logger.debug("--> %s", str(msg)) + return msg + + @upper_half + @bottom_half + def _cb_inbound(self, msg: T) -> T: + """ + Callback: inbound message hook. + + This is intended for subclasses to be able to add arbitrary + hooks to filter or manipulate incoming messages. The base + implementation does nothing but log the message without any + manipulation of the message. + + This method does not "handle" incoming messages; it is a filter. + The actual "endpoint" for incoming messages is `_on_message()`. + + :param msg: raw inbound message + :return: processed inbound message + """ + self.logger.debug("<-- %s", str(msg)) + return msg + + @upper_half + @bottom_half + async def _readline(self) -> bytes: + """ + Wait for a newline from the incoming reader. + + This method is provided as a convenience for upper-layer + protocols, as many are line-based. + + This method *may* return a sequence of bytes without a trailing + newline if EOF occurs, but *some* bytes were received. In this + case, the next call will raise `EOFError`. It is assumed that + the layer 5 protocol will decide if there is anything meaningful + to be done with a partial message. + + :raise OSError: For stream-related errors. + :raise EOFError: + If the reader stream is at EOF and there are no bytes to return. + :return: bytes, including the newline. + """ + assert self._reader is not None + msg_bytes = await self._reader.readline() + + if not msg_bytes: + if self._reader.at_eof(): + raise EOFError + + return msg_bytes + + @upper_half + @bottom_half + async def _do_recv(self) -> T: + """ + Abstract: Read from the stream and return a message. + + Very low-level; intended to only be called by `_recv()`. + """ + raise NotImplementedError + + @upper_half + @bottom_half + async def _recv(self) -> T: + """ + Read an arbitrary protocol message. + + .. warning:: + This method is intended primarily for `_bh_recv_message()` + to use in an asynchronous task loop. Using it outside of + this loop will "steal" messages from the normal routing + mechanism. It is safe to use prior to `_establish_session()`, + but should not be used otherwise. + + This method uses `_do_recv()` to retrieve the raw message, and + then transforms it using `_cb_inbound()`. + + :return: A single (filtered, processed) protocol message. + """ + message = await self._do_recv() + return self._cb_inbound(message) + + @upper_half + @bottom_half + def _do_send(self, msg: T) -> None: + """ + Abstract: Write a message to the stream. + + Very low-level; intended to only be called by `_send()`. + """ + raise NotImplementedError + + @upper_half + @bottom_half + async def _send(self, msg: T) -> None: + """ + Send an arbitrary protocol message. + + This method will transform any outgoing messages according to + `_cb_outbound()`. + + .. warning:: + Like `_recv()`, this method is intended to be called by + the writer task loop that processes outgoing + messages. Calling it directly may circumvent logic + implemented by the caller meant to correlate outgoing and + incoming messages. + + :raise OSError: For problems with the underlying stream. + """ + msg = self._cb_outbound(msg) + self._do_send(msg) + + @bottom_half + async def _on_message(self, msg: T) -> None: + """ + Called to handle the receipt of a new message. + + .. caution:: + This is executed from within the reader loop, so be advised + that waiting on either the reader or writer task will lead + to deadlock. Additionally, any unhandled exceptions will + directly cause the loop to halt, so logic may be best-kept + to a minimum if at all possible. + + :param msg: The incoming message, already logged/filtered. + """ + # Nothing to do in the abstract case. diff --git a/python/qemu/qmp/qmp_client.py b/python/qemu/qmp/qmp_client.py new file mode 100644 index 0000000000..5dcda04a75 --- /dev/null +++ b/python/qemu/qmp/qmp_client.py @@ -0,0 +1,655 @@ +""" +QMP Protocol Implementation + +This module provides the `QMPClient` class, which can be used to connect +and send commands to a QMP server such as QEMU. The QMP class can be +used to either connect to a listening server, or used to listen and +accept an incoming connection from that server. +""" + +import asyncio +import logging +import socket +import struct +from typing import ( + Dict, + List, + Mapping, + Optional, + Union, + cast, +) + +from .error import ProtocolError, QMPError +from .events import Events +from .message import Message +from .models import ErrorResponse, Greeting +from .protocol import AsyncProtocol, Runstate, require +from .util import ( + bottom_half, + exception_summary, + pretty_traceback, + upper_half, +) + + +class _WrappedProtocolError(ProtocolError): + """ + Abstract exception class for Protocol errors that wrap an Exception. + + :param error_message: Human-readable string describing the error. + :param exc: The root-cause exception. + """ + def __init__(self, error_message: str, exc: Exception): + super().__init__(error_message) + self.exc = exc + + def __str__(self) -> str: + return f"{self.error_message}: {self.exc!s}" + + +class GreetingError(_WrappedProtocolError): + """ + An exception occurred during the Greeting phase. + + :param error_message: Human-readable string describing the error. + :param exc: The root-cause exception. + """ + + +class NegotiationError(_WrappedProtocolError): + """ + An exception occurred during the Negotiation phase. + + :param error_message: Human-readable string describing the error. + :param exc: The root-cause exception. + """ + + +class ExecuteError(QMPError): + """ + Exception raised by `QMPClient.execute()` on RPC failure. + + :param error_response: The RPC error response object. + :param sent: The sent RPC message that caused the failure. + :param received: The raw RPC error reply received. + """ + def __init__(self, error_response: ErrorResponse, + sent: Message, received: Message): + super().__init__(error_response.error.desc) + #: The sent `Message` that caused the failure + self.sent: Message = sent + #: The received `Message` that indicated failure + self.received: Message = received + #: The parsed error response + self.error: ErrorResponse = error_response + #: The QMP error class + self.error_class: str = error_response.error.class_ + + +class ExecInterruptedError(QMPError): + """ + Exception raised by `execute()` (et al) when an RPC is interrupted. + + This error is raised when an `execute()` statement could not be + completed. This can occur because the connection itself was + terminated before a reply was received. + + The true cause of the interruption will be available via `disconnect()`. + """ + + +class _MsgProtocolError(ProtocolError): + """ + Abstract error class for protocol errors that have a `Message` object. + + This Exception class is used for protocol errors where the `Message` + was mechanically understood, but was found to be inappropriate or + malformed. + + :param error_message: Human-readable string describing the error. + :param msg: The QMP `Message` that caused the error. + """ + def __init__(self, error_message: str, msg: Message): + super().__init__(error_message) + #: The received `Message` that caused the error. + self.msg: Message = msg + + def __str__(self) -> str: + return "\n".join([ + super().__str__(), + f" Message was: {str(self.msg)}\n", + ]) + + +class ServerParseError(_MsgProtocolError): + """ + The Server sent a `Message` indicating parsing failure. + + i.e. A reply has arrived from the server, but it is missing the "ID" + field, indicating a parsing error. + + :param error_message: Human-readable string describing the error. + :param msg: The QMP `Message` that caused the error. + """ + + +class BadReplyError(_MsgProtocolError): + """ + An execution reply was successfully routed, but not understood. + + If a QMP message is received with an 'id' field to allow it to be + routed, but is otherwise malformed, this exception will be raised. + + A reply message is malformed if it is missing either the 'return' or + 'error' keys, or if the 'error' value has missing keys or members of + the wrong type. + + :param error_message: Human-readable string describing the error. + :param msg: The malformed reply that was received. + :param sent: The message that was sent that prompted the error. + """ + def __init__(self, error_message: str, msg: Message, sent: Message): + super().__init__(error_message, msg) + #: The sent `Message` that caused the failure + self.sent = sent + + +class QMPClient(AsyncProtocol[Message], Events): + """ + Implements a QMP client connection. + + QMP can be used to establish a connection as either the transport + client or server, though this class always acts as the QMP client. + + :param name: Optional nickname for the connection, used for logging. + + Basic script-style usage looks like this:: + + qmp = QMPClient('my_virtual_machine_name') + await qmp.connect(('127.0.0.1', 1234)) + ... + res = await qmp.execute('block-query') + ... + await qmp.disconnect() + + Basic async client-style usage looks like this:: + + class Client: + def __init__(self, name: str): + self.qmp = QMPClient(name) + + async def watch_events(self): + try: + async for event in self.qmp.events: + print(f"Event: {event['event']}") + except asyncio.CancelledError: + return + + async def run(self, address='/tmp/qemu.socket'): + await self.qmp.connect(address) + asyncio.create_task(self.watch_events()) + await self.qmp.runstate_changed.wait() + await self.disconnect() + + See `qmp.events` for more detail on event handling patterns. + """ + #: Logger object used for debugging messages. + logger = logging.getLogger(__name__) + + # Read buffer limit; large enough to accept query-qmp-schema + _limit = (256 * 1024) + + # Type alias for pending execute() result items + _PendingT = Union[Message, ExecInterruptedError] + + def __init__(self, name: Optional[str] = None) -> None: + super().__init__(name) + Events.__init__(self) + + #: Whether or not to await a greeting after establishing a connection. + self.await_greeting: bool = True + + #: Whether or not to perform capabilities negotiation upon connection. + #: Implies `await_greeting`. + self.negotiate: bool = True + + # Cached Greeting, if one was awaited. + self._greeting: Optional[Greeting] = None + + # Command ID counter + self._execute_id = 0 + + # Incoming RPC reply messages. + self._pending: Dict[ + Union[str, None], + 'asyncio.Queue[QMPClient._PendingT]' + ] = {} + + @property + def greeting(self) -> Optional[Greeting]: + """The `Greeting` from the QMP server, if any.""" + return self._greeting + + @upper_half + async def _establish_session(self) -> None: + """ + Initiate the QMP session. + + Wait for the QMP greeting and perform capabilities negotiation. + + :raise GreetingError: When the greeting is not understood. + :raise NegotiationError: If the negotiation fails. + :raise EOFError: When the server unexpectedly hangs up. + :raise OSError: For underlying stream errors. + """ + self._greeting = None + self._pending = {} + + if self.await_greeting or self.negotiate: + self._greeting = await self._get_greeting() + + if self.negotiate: + await self._negotiate() + + # This will start the reader/writers: + await super()._establish_session() + + @upper_half + async def _get_greeting(self) -> Greeting: + """ + :raise GreetingError: When the greeting is not understood. + :raise EOFError: When the server unexpectedly hangs up. + :raise OSError: For underlying stream errors. + + :return: the Greeting object given by the server. + """ + self.logger.debug("Awaiting greeting ...") + + try: + msg = await self._recv() + return Greeting(msg) + except (ProtocolError, KeyError, TypeError) as err: + emsg = "Did not understand Greeting" + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + raise GreetingError(emsg, err) from err + except BaseException as err: + # EOFError, OSError, or something unexpected. + emsg = "Failed to receive Greeting" + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + raise + + @upper_half + async def _negotiate(self) -> None: + """ + Perform QMP capabilities negotiation. + + :raise NegotiationError: When negotiation fails. + :raise EOFError: When the server unexpectedly hangs up. + :raise OSError: For underlying stream errors. + """ + self.logger.debug("Negotiating capabilities ...") + + arguments: Dict[str, List[str]] = {} + if self._greeting and 'oob' in self._greeting.QMP.capabilities: + arguments.setdefault('enable', []).append('oob') + msg = self.make_execute_msg('qmp_capabilities', arguments=arguments) + + # It's not safe to use execute() here, because the reader/writers + # aren't running. AsyncProtocol *requires* that a new session + # does not fail after the reader/writers are running! + try: + await self._send(msg) + reply = await self._recv() + assert 'return' in reply + assert 'error' not in reply + except (ProtocolError, AssertionError) as err: + emsg = "Negotiation failed" + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + raise NegotiationError(emsg, err) from err + except BaseException as err: + # EOFError, OSError, or something unexpected. + emsg = "Negotiation failed" + self.logger.error("%s: %s", emsg, exception_summary(err)) + self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) + raise + + @bottom_half + async def _bh_disconnect(self) -> None: + try: + await super()._bh_disconnect() + finally: + if self._pending: + self.logger.debug("Cancelling pending executions") + keys = self._pending.keys() + for key in keys: + self.logger.debug("Cancelling execution '%s'", key) + self._pending[key].put_nowait( + ExecInterruptedError("Disconnected") + ) + + self.logger.debug("QMP Disconnected.") + + @upper_half + def _cleanup(self) -> None: + super()._cleanup() + assert not self._pending + + @bottom_half + async def _on_message(self, msg: Message) -> None: + """ + Add an incoming message to the appropriate queue/handler. + + :raise ServerParseError: When Message indicates server parse failure. + """ + # Incoming messages are not fully parsed/validated here; + # do only light peeking to know how to route the messages. + + if 'event' in msg: + await self._event_dispatch(msg) + return + + # Below, we assume everything left is an execute/exec-oob response. + + exec_id = cast(Optional[str], msg.get('id')) + + if exec_id in self._pending: + await self._pending[exec_id].put(msg) + return + + # We have a message we can't route back to a caller. + + is_error = 'error' in msg + has_id = 'id' in msg + + if is_error and not has_id: + # This is very likely a server parsing error. + # It doesn't inherently belong to any pending execution. + # Instead of performing clever recovery, just terminate. + # See "NOTE" in qmp-spec.txt, section 2.4.2 + raise ServerParseError( + ("Server sent an error response without an ID, " + "but there are no ID-less executions pending. " + "Assuming this is a server parser failure."), + msg + ) + + # qmp-spec.txt, section 2.4: + # 'Clients should drop all the responses + # that have an unknown "id" field.' + self.logger.log( + logging.ERROR if is_error else logging.WARNING, + "Unknown ID '%s', message dropped.", + exec_id, + ) + self.logger.debug("Unroutable message: %s", str(msg)) + + @upper_half + @bottom_half + async def _do_recv(self) -> Message: + """ + :raise OSError: When a stream error is encountered. + :raise EOFError: When the stream is at EOF. + :raise ProtocolError: + When the Message is not understood. + See also `Message._deserialize`. + + :return: A single QMP `Message`. + """ + msg_bytes = await self._readline() + msg = Message(msg_bytes, eager=True) + return msg + + @upper_half + @bottom_half + def _do_send(self, msg: Message) -> None: + """ + :raise ValueError: JSON serialization failure + :raise TypeError: JSON serialization failure + :raise OSError: When a stream error is encountered. + """ + assert self._writer is not None + self._writer.write(bytes(msg)) + + @upper_half + def _get_exec_id(self) -> str: + exec_id = f"__qmp#{self._execute_id:05d}" + self._execute_id += 1 + return exec_id + + @upper_half + async def _issue(self, msg: Message) -> Union[None, str]: + """ + Issue a QMP `Message` and do not wait for a reply. + + :param msg: The QMP `Message` to send to the server. + + :return: The ID of the `Message` sent. + """ + msg_id: Optional[str] = None + if 'id' in msg: + assert isinstance(msg['id'], str) + msg_id = msg['id'] + + self._pending[msg_id] = asyncio.Queue(maxsize=1) + try: + await self._outgoing.put(msg) + except: + del self._pending[msg_id] + raise + + return msg_id + + @upper_half + async def _reply(self, msg_id: Union[str, None]) -> Message: + """ + Await a reply to a previously issued QMP message. + + :param msg_id: The ID of the previously issued message. + + :return: The reply from the server. + :raise ExecInterruptedError: + When the reply could not be retrieved because the connection + was lost, or some other problem. + """ + queue = self._pending[msg_id] + + try: + result = await queue.get() + if isinstance(result, ExecInterruptedError): + raise result + return result + finally: + del self._pending[msg_id] + + @upper_half + async def _execute(self, msg: Message, assign_id: bool = True) -> Message: + """ + Send a QMP `Message` to the server and await a reply. + + This method *assumes* you are sending some kind of an execute + statement that *will* receive a reply. + + An execution ID will be assigned if assign_id is `True`. It can be + disabled, but this requires that an ID is manually assigned + instead. For manually assigned IDs, you must not use the string + '__qmp#' anywhere in the ID. + + :param msg: The QMP `Message` to execute. + :param assign_id: If True, assign a new execution ID. + + :return: Execution reply from the server. + :raise ExecInterruptedError: + When the reply could not be retrieved because the connection + was lost, or some other problem. + """ + if assign_id: + msg['id'] = self._get_exec_id() + elif 'id' in msg: + assert isinstance(msg['id'], str) + assert '__qmp#' not in msg['id'] + + exec_id = await self._issue(msg) + return await self._reply(exec_id) + + @upper_half + @require(Runstate.RUNNING) + async def _raw( + self, + msg: Union[Message, Mapping[str, object], bytes], + assign_id: bool = True, + ) -> Message: + """ + Issue a raw `Message` to the QMP server and await a reply. + + :param msg: + A Message to send to the server. It may be a `Message`, any + Mapping (including Dict), or raw bytes. + :param assign_id: + Assign an arbitrary execution ID to this message. If + `False`, the existing id must either be absent (and no other + such pending execution may omit an ID) or a string. If it is + a string, it must not start with '__qmp#' and no other such + pending execution may currently be using that ID. + + :return: Execution reply from the server. + + :raise ExecInterruptedError: + When the reply could not be retrieved because the connection + was lost, or some other problem. + :raise TypeError: + When assign_id is `False`, an ID is given, and it is not a string. + :raise ValueError: + When assign_id is `False`, but the ID is not usable; + Either because it starts with '__qmp#' or it is already in-use. + """ + # 1. convert generic Mapping or bytes to a QMP Message + # 2. copy Message objects so that we assign an ID only to the copy. + msg = Message(msg) + + exec_id = msg.get('id') + if not assign_id and 'id' in msg: + if not isinstance(exec_id, str): + raise TypeError(f"ID ('{exec_id}') must be a string.") + if exec_id.startswith('__qmp#'): + raise ValueError( + f"ID ('{exec_id}') must not start with '__qmp#'." + ) + + if not assign_id and exec_id in self._pending: + raise ValueError( + f"ID '{exec_id}' is in-use and cannot be used." + ) + + return await self._execute(msg, assign_id=assign_id) + + @upper_half + @require(Runstate.RUNNING) + async def execute_msg(self, msg: Message) -> object: + """ + Execute a QMP command and return its value. + + :param msg: The QMP `Message` to execute. + + :return: + The command execution return value from the server. The type of + object returned depends on the command that was issued, + though most in QEMU return a `dict`. + :raise ValueError: + If the QMP `Message` does not have either the 'execute' or + 'exec-oob' fields set. + :raise ExecuteError: When the server returns an error response. + :raise ExecInterruptedError: if the connection was terminated early. + """ + if not ('execute' in msg or 'exec-oob' in msg): + raise ValueError("Requires 'execute' or 'exec-oob' message") + + # Copy the Message so that the ID assigned by _execute() is + # local to this method; allowing the ID to be seen in raised + # Exceptions but without modifying the caller's held copy. + msg = Message(msg) + reply = await self._execute(msg) + + if 'error' in reply: + try: + error_response = ErrorResponse(reply) + except (KeyError, TypeError) as err: + # Error response was malformed. + raise BadReplyError( + "QMP error reply is malformed", reply, msg, + ) from err + + raise ExecuteError(error_response, msg, reply) + + if 'return' not in reply: + raise BadReplyError( + "QMP reply is missing a 'error' or 'return' member", + reply, msg, + ) + + return reply['return'] + + @classmethod + def make_execute_msg(cls, cmd: str, + arguments: Optional[Mapping[str, object]] = None, + oob: bool = False) -> Message: + """ + Create an executable message to be sent by `execute_msg` later. + + :param cmd: QMP command name. + :param arguments: Arguments (if any). Must be JSON-serializable. + :param oob: If `True`, execute "out of band". + + :return: An executable QMP `Message`. + """ + msg = Message({'exec-oob' if oob else 'execute': cmd}) + if arguments is not None: + msg['arguments'] = arguments + return msg + + @upper_half + async def execute(self, cmd: str, + arguments: Optional[Mapping[str, object]] = None, + oob: bool = False) -> object: + """ + Execute a QMP command and return its value. + + :param cmd: QMP command name. + :param arguments: Arguments (if any). Must be JSON-serializable. + :param oob: If `True`, execute "out of band". + + :return: + The command execution return value from the server. The type of + object returned depends on the command that was issued, + though most in QEMU return a `dict`. + :raise ExecuteError: When the server returns an error response. + :raise ExecInterruptedError: if the connection was terminated early. + """ + msg = self.make_execute_msg(cmd, arguments, oob=oob) + return await self.execute_msg(msg) + + @upper_half + @require(Runstate.RUNNING) + def send_fd_scm(self, fd: int) -> None: + """ + Send a file descriptor to the remote via SCM_RIGHTS. + """ + assert self._writer is not None + sock = self._writer.transport.get_extra_info('socket') + + if sock.family != socket.AF_UNIX: + raise QMPError("Sending file descriptors requires a UNIX socket.") + + if not hasattr(sock, 'sendmsg'): + # We need to void the warranty sticker. + # Access to sendmsg is scheduled for removal in Python 3.11. + # Find the real backing socket to use it anyway. + sock = sock._sock # pylint: disable=protected-access + + sock.sendmsg( + [b' '], + [(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))] + ) diff --git a/python/qemu/qmp/qmp_shell.py b/python/qemu/qmp/qmp_shell.py index 337acfce2d..619ab42ced 100644 --- a/python/qemu/qmp/qmp_shell.py +++ b/python/qemu/qmp/qmp_shell.py @@ -1,11 +1,12 @@ # -# Copyright (C) 2009, 2010 Red Hat Inc. +# Copyright (C) 2009-2022 Red Hat Inc. # # Authors: # Luiz Capitulino +# John Snow # -# This work is licensed under the terms of the GNU GPL, version 2. See -# the COPYING file in the top-level directory. +# This work is licensed under the terms of the GNU LGPL, version 2 or +# later. See the COPYING file in the top-level directory. # """ @@ -86,8 +87,10 @@ import logging import os import re import readline +from subprocess import Popen import sys from typing import ( + IO, Iterator, List, NoReturn, @@ -95,8 +98,13 @@ from typing import ( Sequence, ) -from qemu import qmp -from qemu.qmp import QMPMessage +from qemu.qmp import ConnectError, QMPError, SocketAddrT +from qemu.qmp.legacy import ( + QEMUMonitorProtocol, + QMPBadPortError, + QMPMessage, + QMPObject, +) LOG = logging.getLogger(__name__) @@ -125,7 +133,7 @@ class QMPCompleter: return None -class QMPShellError(qmp.QMPError): +class QMPShellError(QMPError): """ QMP Shell Base error class. """ @@ -153,7 +161,7 @@ class FuzzyJSON(ast.NodeTransformer): return node -class QMPShell(qmp.QEMUMonitorProtocol): +class QMPShell(QEMUMonitorProtocol): """ QMPShell provides a basic readline-based QMP shell. @@ -161,9 +169,12 @@ class QMPShell(qmp.QEMUMonitorProtocol): :param pretty: Pretty-print QMP messages. :param verbose: Echo outgoing QMP messages to console. """ - def __init__(self, address: qmp.SocketAddrT, - pretty: bool = False, verbose: bool = False): - super().__init__(address) + def __init__(self, address: SocketAddrT, + pretty: bool = False, + verbose: bool = False, + server: bool = False, + logfile: Optional[str] = None): + super().__init__(address, server=server) self._greeting: Optional[QMPMessage] = None self._completer = QMPCompleter() self._transmode = False @@ -172,6 +183,10 @@ class QMPShell(qmp.QEMUMonitorProtocol): '.qmp-shell_history') self.pretty = pretty self.verbose = verbose + self.logfile = None + + if logfile is not None: + self.logfile = open(logfile, "w", encoding='utf-8') def close(self) -> None: # Hook into context manager of parent to save shell history. @@ -237,7 +252,7 @@ class QMPShell(qmp.QEMUMonitorProtocol): def _cli_expr(self, tokens: Sequence[str], - parent: qmp.QMPObject) -> None: + parent: QMPObject) -> None: for arg in tokens: (key, sep, val) = arg.partition('=') if sep != '=': @@ -312,11 +327,11 @@ class QMPShell(qmp.QEMUMonitorProtocol): self._cli_expr(cmdargs[1:], qmpcmd['arguments']) return qmpcmd - def _print(self, qmp_message: object) -> None: + def _print(self, qmp_message: object, fh: IO[str] = sys.stdout) -> None: jsobj = json.dumps(qmp_message, indent=4 if self.pretty else None, sort_keys=self.pretty) - print(str(jsobj)) + print(str(jsobj), file=fh) def _execute_cmd(self, cmdline: str) -> bool: try: @@ -339,6 +354,9 @@ class QMPShell(qmp.QEMUMonitorProtocol): print('Disconnected') return False self._print(resp) + if self.logfile is not None: + cmd = {**qmpcmd, **resp} + self._print(cmd, fh=self.logfile) return True def connect(self, negotiate: bool = True) -> None: @@ -381,7 +399,6 @@ class QMPShell(qmp.QEMUMonitorProtocol): if cmdline == '': for event in self.get_events(): print(event) - self.clear_events() return True return self._execute_cmd(cmdline) @@ -404,9 +421,12 @@ class HMPShell(QMPShell): :param pretty: Pretty-print QMP messages. :param verbose: Echo outgoing QMP messages to console. """ - def __init__(self, address: qmp.SocketAddrT, - pretty: bool = False, verbose: bool = False): - super().__init__(address, pretty, verbose) + def __init__(self, address: SocketAddrT, + pretty: bool = False, + verbose: bool = False, + server: bool = False, + logfile: Optional[str] = None): + super().__init__(address, pretty, verbose, server, logfile) self._cpu_index = 0 def _cmd_completion(self) -> None: @@ -499,6 +519,8 @@ def main() -> None: help='Verbose (echo commands sent and received)') parser.add_argument('-p', '--pretty', action='store_true', help='Pretty-print JSON') + parser.add_argument('-l', '--logfile', + help='Save log of all QMP messages to PATH') default_server = os.environ.get('QMP_SOCKET') parser.add_argument('qmp_server', action='store', @@ -513,23 +535,76 @@ def main() -> None: try: address = shell_class.parse_address(args.qmp_server) - except qmp.QMPBadPortError: + except QMPBadPortError: parser.error(f"Bad port number: {args.qmp_server}") return # pycharm doesn't know error() is noreturn - with shell_class(address, args.pretty, args.verbose) as qemu: + with shell_class(address, args.pretty, args.verbose, args.logfile) as qemu: try: qemu.connect(negotiate=not args.skip_negotiation) - except qmp.QMPConnectError: - die("Didn't get QMP greeting message") - except qmp.QMPCapabilitiesError: - die("Couldn't negotiate capabilities") - except OSError as err: - die(f"Couldn't connect to {args.qmp_server}: {err!s}") + except ConnectError as err: + if isinstance(err.exc, OSError): + die(f"Couldn't connect to {args.qmp_server}: {err!s}") + die(str(err)) for _ in qemu.repl(): pass +def main_wrap() -> None: + """ + qmp-shell-wrap entry point: parse command line arguments and + start the REPL. + """ + parser = argparse.ArgumentParser() + parser.add_argument('-H', '--hmp', action='store_true', + help='Use HMP interface') + parser.add_argument('-v', '--verbose', action='store_true', + help='Verbose (echo commands sent and received)') + parser.add_argument('-p', '--pretty', action='store_true', + help='Pretty-print JSON') + parser.add_argument('-l', '--logfile', + help='Save log of all QMP messages to PATH') + + parser.add_argument('command', nargs=argparse.REMAINDER, + help='QEMU command line to invoke') + + args = parser.parse_args() + + cmd = args.command + if len(cmd) != 0 and cmd[0] == '--': + cmd = cmd[1:] + if len(cmd) == 0: + cmd = ["qemu-system-x86_64"] + + sockpath = "qmp-shell-wrap-%d" % os.getpid() + cmd += ["-qmp", "unix:%s" % sockpath] + + shell_class = HMPShell if args.hmp else QMPShell + + try: + address = shell_class.parse_address(sockpath) + except QMPBadPortError: + parser.error(f"Bad port number: {sockpath}") + return # pycharm doesn't know error() is noreturn + + try: + with shell_class(address, args.pretty, args.verbose, + True, args.logfile) as qemu: + with Popen(cmd): + + try: + qemu.accept() + except ConnectError as err: + if isinstance(err.exc, OSError): + die(f"Couldn't connect to {args.qmp_server}: {err!s}") + die(str(err)) + + for _ in qemu.repl(): + pass + finally: + os.unlink(sockpath) + + if __name__ == '__main__': main() diff --git a/python/qemu/qmp/qmp_tui.py b/python/qemu/qmp/qmp_tui.py new file mode 100644 index 0000000000..ce239d8979 --- /dev/null +++ b/python/qemu/qmp/qmp_tui.py @@ -0,0 +1,652 @@ +# Copyright (c) 2021 +# +# Authors: +# Niteesh Babu G S +# +# This work is licensed under the terms of the GNU LGPL, version 2 or +# later. See the COPYING file in the top-level directory. +""" +QMP TUI + +QMP TUI is an asynchronous interface built on top the of the QMP library. +It is the successor of QMP-shell and is bought-in as a replacement for it. + +Example Usage: qmp-tui +Full Usage: qmp-tui --help +""" + +import argparse +import asyncio +import json +import logging +from logging import Handler, LogRecord +import signal +from typing import ( + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from pygments import lexers +from pygments import token as Token +import urwid +import urwid_readline + +from .error import ProtocolError +from .legacy import QEMUMonitorProtocol, QMPBadPortError +from .message import DeserializationError, Message, UnexpectedTypeError +from .protocol import ConnectError, Runstate +from .qmp_client import ExecInterruptedError, QMPClient +from .util import create_task, pretty_traceback + + +# The name of the signal that is used to update the history list +UPDATE_MSG: str = 'UPDATE_MSG' + + +palette = [ + (Token.Punctuation, '', '', '', 'h15,bold', 'g7'), + (Token.Text, '', '', '', '', 'g7'), + (Token.Name.Tag, '', '', '', 'bold,#f88', 'g7'), + (Token.Literal.Number.Integer, '', '', '', '#fa0', 'g7'), + (Token.Literal.String.Double, '', '', '', '#6f6', 'g7'), + (Token.Keyword.Constant, '', '', '', '#6af', 'g7'), + ('DEBUG', '', '', '', '#ddf', 'g7'), + ('INFO', '', '', '', 'g100', 'g7'), + ('WARNING', '', '', '', '#ff6', 'g7'), + ('ERROR', '', '', '', '#a00', 'g7'), + ('CRITICAL', '', '', '', '#a00', 'g7'), + ('background', '', 'black', '', '', 'g7'), +] + + +def format_json(msg: str) -> str: + """ + Formats valid/invalid multi-line JSON message into a single-line message. + + Formatting is first tried using the standard json module. If that fails + due to an decoding error then a simple string manipulation is done to + achieve a single line JSON string. + + Converting into single line is more asthetically pleasing when looking + along with error messages. + + Eg: + Input: + [ 1, + true, + 3 ] + The above input is not a valid QMP message and produces the following error + "QMP message is not a JSON object." + When displaying this in TUI in multiline mode we get + + [ 1, + true, + 3 ]: QMP message is not a JSON object. + + whereas in singleline mode we get the following + + [1, true, 3]: QMP message is not a JSON object. + + The single line mode is more asthetically pleasing. + + :param msg: + The message to formatted into single line. + + :return: Formatted singleline message. + """ + try: + msg = json.loads(msg) + return str(json.dumps(msg)) + except json.decoder.JSONDecodeError: + msg = msg.replace('\n', '') + words = msg.split(' ') + words = list(filter(None, words)) + return ' '.join(words) + + +def has_handler_type(logger: logging.Logger, + handler_type: Type[Handler]) -> bool: + """ + The Logger class has no interface to check if a certain type of handler is + installed or not. So we provide an interface to do so. + + :param logger: + Logger object + :param handler_type: + The type of the handler to be checked. + + :return: returns True if handler of type `handler_type`. + """ + for handler in logger.handlers: + if isinstance(handler, handler_type): + return True + return False + + +class App(QMPClient): + """ + Implements the QMP TUI. + + Initializes the widgets and starts the urwid event loop. + + :param address: + Address of the server to connect to. + :param num_retries: + The number of times to retry before stopping to reconnect. + :param retry_delay: + The delay(sec) before each retry + """ + def __init__(self, address: Union[str, Tuple[str, int]], num_retries: int, + retry_delay: Optional[int]) -> None: + urwid.register_signal(type(self), UPDATE_MSG) + self.window = Window(self) + self.address = address + self.aloop: Optional[asyncio.AbstractEventLoop] = None + self.num_retries = num_retries + self.retry_delay = retry_delay if retry_delay else 2 + self.retry: bool = False + self.exiting: bool = False + super().__init__() + + def add_to_history(self, msg: str, level: Optional[str] = None) -> None: + """ + Appends the msg to the history list. + + :param msg: + The raw message to be appended in string type. + """ + urwid.emit_signal(self, UPDATE_MSG, msg, level) + + def _cb_outbound(self, msg: Message) -> Message: + """ + Callback: outbound message hook. + + Appends the outgoing messages to the history box. + + :param msg: raw outbound message. + :return: final outbound message. + """ + str_msg = str(msg) + + if not has_handler_type(logging.getLogger(), TUILogHandler): + logging.debug('Request: %s', str_msg) + self.add_to_history('<-- ' + str_msg) + return msg + + def _cb_inbound(self, msg: Message) -> Message: + """ + Callback: outbound message hook. + + Appends the incoming messages to the history box. + + :param msg: raw inbound message. + :return: final inbound message. + """ + str_msg = str(msg) + + if not has_handler_type(logging.getLogger(), TUILogHandler): + logging.debug('Request: %s', str_msg) + self.add_to_history('--> ' + str_msg) + return msg + + async def _send_to_server(self, msg: Message) -> None: + """ + This coroutine sends the message to the server. + The message has to be pre-validated. + + :param msg: + Pre-validated message to be to sent to the server. + + :raise Exception: When an unhandled exception is caught. + """ + try: + await self._raw(msg, assign_id='id' not in msg) + except ExecInterruptedError as err: + logging.info('Error server disconnected before reply %s', str(err)) + self.add_to_history('Server disconnected before reply', 'ERROR') + except Exception as err: + logging.error('Exception from _send_to_server: %s', str(err)) + raise err + + def cb_send_to_server(self, raw_msg: str) -> None: + """ + Validates and sends the message to the server. + The raw string message is first converted into a Message object + and is then sent to the server. + + :param raw_msg: + The raw string message to be sent to the server. + + :raise Exception: When an unhandled exception is caught. + """ + try: + msg = Message(bytes(raw_msg, encoding='utf-8')) + create_task(self._send_to_server(msg)) + except (DeserializationError, UnexpectedTypeError) as err: + raw_msg = format_json(raw_msg) + logging.info('Invalid message: %s', err.error_message) + self.add_to_history(f'{raw_msg}: {err.error_message}', 'ERROR') + + def unhandled_input(self, key: str) -> None: + """ + Handle's keys which haven't been handled by the child widgets. + + :param key: + Unhandled key + """ + if key == 'esc': + self.kill_app() + + def kill_app(self) -> None: + """ + Initiates killing of app. A bridge between asynchronous and synchronous + code. + """ + create_task(self._kill_app()) + + async def _kill_app(self) -> None: + """ + This coroutine initiates the actual disconnect process and calls + urwid.ExitMainLoop() to kill the TUI. + + :raise Exception: When an unhandled exception is caught. + """ + self.exiting = True + await self.disconnect() + logging.debug('Disconnect finished. Exiting app') + raise urwid.ExitMainLoop() + + async def disconnect(self) -> None: + """ + Overrides the disconnect method to handle the errors locally. + """ + try: + await super().disconnect() + except (OSError, EOFError) as err: + logging.info('disconnect: %s', str(err)) + self.retry = True + except ProtocolError as err: + logging.info('disconnect: %s', str(err)) + except Exception as err: + logging.error('disconnect: Unhandled exception %s', str(err)) + raise err + + def _set_status(self, msg: str) -> None: + """ + Sets the message as the status. + + :param msg: + The message to be displayed in the status bar. + """ + self.window.footer.set_text(msg) + + def _get_formatted_address(self) -> str: + """ + Returns a formatted version of the server's address. + + :return: formatted address + """ + if isinstance(self.address, tuple): + host, port = self.address + addr = f'{host}:{port}' + else: + addr = f'{self.address}' + return addr + + async def _initiate_connection(self) -> Optional[ConnectError]: + """ + Tries connecting to a server a number of times with a delay between + each try. If all retries failed then return the error faced during + the last retry. + + :return: Error faced during last retry. + """ + current_retries = 0 + err = None + + # initial try + await self.connect_server() + while self.retry and current_retries < self.num_retries: + logging.info('Connection Failed, retrying in %d', self.retry_delay) + status = f'[Retry #{current_retries} ({self.retry_delay}s)]' + self._set_status(status) + + await asyncio.sleep(self.retry_delay) + + err = await self.connect_server() + current_retries += 1 + # If all retries failed report the last error + if err: + logging.info('All retries failed: %s', err) + return err + return None + + async def manage_connection(self) -> None: + """ + Manage the connection based on the current run state. + + A reconnect is issued when the current state is IDLE and the number + of retries is not exhausted. + A disconnect is issued when the current state is DISCONNECTING. + """ + while not self.exiting: + if self.runstate == Runstate.IDLE: + err = await self._initiate_connection() + # If retry is still true then, we have exhausted all our tries. + if err: + self._set_status(f'[Error: {err.error_message}]') + else: + addr = self._get_formatted_address() + self._set_status(f'[Connected {addr}]') + elif self.runstate == Runstate.DISCONNECTING: + self._set_status('[Disconnected]') + await self.disconnect() + # check if a retry is needed + if self.runstate == Runstate.IDLE: + continue + await self.runstate_changed() + + async def connect_server(self) -> Optional[ConnectError]: + """ + Initiates a connection to the server at address `self.address` + and in case of a failure, sets the status to the respective error. + """ + try: + await self.connect(self.address) + self.retry = False + except ConnectError as err: + logging.info('connect_server: ConnectError %s', str(err)) + self.retry = True + return err + return None + + def run(self, debug: bool = False) -> None: + """ + Starts the long running co-routines and the urwid event loop. + + :param debug: + Enables/Disables asyncio event loop debugging + """ + screen = urwid.raw_display.Screen() + screen.set_terminal_properties(256) + + self.aloop = asyncio.get_event_loop() + self.aloop.set_debug(debug) + + # Gracefully handle SIGTERM and SIGINT signals + cancel_signals = [signal.SIGTERM, signal.SIGINT] + for sig in cancel_signals: + self.aloop.add_signal_handler(sig, self.kill_app) + + event_loop = urwid.AsyncioEventLoop(loop=self.aloop) + main_loop = urwid.MainLoop(urwid.AttrMap(self.window, 'background'), + unhandled_input=self.unhandled_input, + screen=screen, + palette=palette, + handle_mouse=True, + event_loop=event_loop) + + create_task(self.manage_connection(), self.aloop) + try: + main_loop.run() + except Exception as err: + logging.error('%s\n%s\n', str(err), pretty_traceback()) + raise err + + +class StatusBar(urwid.Text): + """ + A simple statusbar modelled using the Text widget. The status can be + set using the set_text function. All text set is aligned to right. + + :param text: Initial text to be displayed. Default is empty str. + """ + def __init__(self, text: str = ''): + super().__init__(text, align='right') + + +class Editor(urwid_readline.ReadlineEdit): + """ + A simple editor modelled using the urwid_readline.ReadlineEdit widget. + Mimcs GNU readline shortcuts and provides history support. + + The readline shortcuts can be found below: + https://github.com/rr-/urwid_readline#features + + Along with the readline features, this editor also has support for + history. Pressing the 'up'/'down' switches between the prev/next messages + available in the history. + + Currently there is no support to save the history to a file. The history of + previous commands is lost on exit. + + :param parent: Reference to the TUI object. + """ + def __init__(self, parent: App) -> None: + super().__init__(caption='> ', multiline=True) + self.parent = parent + self.history: List[str] = [] + self.last_index: int = -1 + self.show_history: bool = False + + def keypress(self, size: Tuple[int, int], key: str) -> Optional[str]: + """ + Handles the keypress on this widget. + + :param size: + The current size of the widget. + :param key: + The key to be handled. + + :return: Unhandled key if any. + """ + msg = self.get_edit_text() + if key == 'up' and not msg: + # Show the history when 'up arrow' is pressed with no input text. + # NOTE: The show_history logic is necessary because in 'multiline' + # mode (which we use) 'up arrow' is used to move between lines. + if not self.history: + return None + self.show_history = True + last_msg = self.history[self.last_index] + self.set_edit_text(last_msg) + self.edit_pos = len(last_msg) + elif key == 'up' and self.show_history: + self.last_index = max(self.last_index - 1, -len(self.history)) + self.set_edit_text(self.history[self.last_index]) + self.edit_pos = len(self.history[self.last_index]) + elif key == 'down' and self.show_history: + if self.last_index == -1: + self.set_edit_text('') + self.show_history = False + else: + self.last_index += 1 + self.set_edit_text(self.history[self.last_index]) + self.edit_pos = len(self.history[self.last_index]) + elif key == 'meta enter': + # When using multiline, enter inserts a new line into the editor + # send the input to the server on alt + enter + self.parent.cb_send_to_server(msg) + self.history.append(msg) + self.set_edit_text('') + self.last_index = -1 + self.show_history = False + else: + self.show_history = False + self.last_index = -1 + return cast(Optional[str], super().keypress(size, key)) + return None + + +class EditorWidget(urwid.Filler): + """ + Wrapper around the editor widget. + + The Editor is a flow widget and has to wrapped inside a box widget. + This class wraps the Editor inside filler widget. + + :param parent: Reference to the TUI object. + """ + def __init__(self, parent: App) -> None: + super().__init__(Editor(parent), valign='top') + + +class HistoryBox(urwid.ListBox): + """ + This widget is modelled using the ListBox widget, contains the list of + all messages both QMP messages and log messsages to be shown in the TUI. + + The messages are urwid.Text widgets. On every append of a message, the + focus is shifted to the last appended message. + + :param parent: Reference to the TUI object. + """ + def __init__(self, parent: App) -> None: + self.parent = parent + self.history = urwid.SimpleFocusListWalker([]) + super().__init__(self.history) + + def add_to_history(self, + history: Union[str, List[Tuple[str, str]]]) -> None: + """ + Appends a message to the list and set the focus to the last appended + message. + + :param history: + The history item(message/event) to be appended to the list. + """ + self.history.append(urwid.Text(history)) + self.history.set_focus(len(self.history) - 1) + + def mouse_event(self, size: Tuple[int, int], _event: str, button: float, + _x: int, _y: int, focus: bool) -> None: + # Unfortunately there are no urwid constants that represent the mouse + # events. + if button == 4: # Scroll up event + super().keypress(size, 'up') + elif button == 5: # Scroll down event + super().keypress(size, 'down') + + +class HistoryWindow(urwid.Frame): + """ + This window composes the HistoryBox and EditorWidget in a horizontal split. + By default the first focus is given to the history box. + + :param parent: Reference to the TUI object. + """ + def __init__(self, parent: App) -> None: + self.parent = parent + self.editor_widget = EditorWidget(parent) + self.editor = urwid.LineBox(self.editor_widget) + self.history = HistoryBox(parent) + self.body = urwid.Pile([('weight', 80, self.history), + ('weight', 20, self.editor)]) + super().__init__(self.body) + urwid.connect_signal(self.parent, UPDATE_MSG, self.cb_add_to_history) + + def cb_add_to_history(self, msg: str, level: Optional[str] = None) -> None: + """ + Appends a message to the history box + + :param msg: + The message to be appended to the history box. + :param level: + The log level of the message, if it is a log message. + """ + formatted = [] + if level: + msg = f'[{level}]: {msg}' + formatted.append((level, msg)) + else: + lexer = lexers.JsonLexer() # pylint: disable=no-member + for token in lexer.get_tokens(msg): + formatted.append(token) + self.history.add_to_history(formatted) + + +class Window(urwid.Frame): + """ + This window is the top most widget of the TUI and will contain other + windows. Each child of this widget is responsible for displaying a specific + functionality. + + :param parent: Reference to the TUI object. + """ + def __init__(self, parent: App) -> None: + self.parent = parent + footer = StatusBar() + body = HistoryWindow(parent) + super().__init__(body, footer=footer) + + +class TUILogHandler(Handler): + """ + This handler routes all the log messages to the TUI screen. + It is installed to the root logger to so that the log message from all + libraries begin used is routed to the screen. + + :param tui: Reference to the TUI object. + """ + def __init__(self, tui: App) -> None: + super().__init__() + self.tui = tui + + def emit(self, record: LogRecord) -> None: + """ + Emits a record to the TUI screen. + + Appends the log message to the TUI screen + """ + level = record.levelname + msg = record.getMessage() + self.tui.add_to_history(msg, level) + + +def main() -> None: + """ + Driver of the whole script, parses arguments, initialize the TUI and + the logger. + """ + parser = argparse.ArgumentParser(description='QMP TUI') + parser.add_argument('qmp_server', help='Address of the QMP server. ' + 'Format ') + parser.add_argument('--num-retries', type=int, default=10, + help='Number of times to reconnect before giving up.') + parser.add_argument('--retry-delay', type=int, + help='Time(s) to wait before next retry. ' + 'Default action is to wait 2s between each retry.') + parser.add_argument('--log-file', help='The Log file name') + parser.add_argument('--log-level', default='WARNING', + help='Log level ') + parser.add_argument('--asyncio-debug', action='store_true', + help='Enable debug mode for asyncio loop. ' + 'Generates lot of output, makes TUI unusable when ' + 'logs are logged in the TUI. ' + 'Use only when logging to a file.') + args = parser.parse_args() + + try: + address = QEMUMonitorProtocol.parse_address(args.qmp_server) + except QMPBadPortError as err: + parser.error(str(err)) + + app = App(address, args.num_retries, args.retry_delay) + + root_logger = logging.getLogger() + root_logger.setLevel(logging.getLevelName(args.log_level)) + + if args.log_file: + root_logger.addHandler(logging.FileHandler(args.log_file)) + else: + root_logger.addHandler(TUILogHandler(app)) + + app.run(args.asyncio_debug) + + +if __name__ == '__main__': + main() diff --git a/python/qemu/qmp/util.py b/python/qemu/qmp/util.py new file mode 100644 index 0000000000..ca6225e9cd --- /dev/null +++ b/python/qemu/qmp/util.py @@ -0,0 +1,219 @@ +""" +Miscellaneous Utilities + +This module provides asyncio utilities and compatibility wrappers for +Python 3.6 to provide some features that otherwise become available in +Python 3.7+. + +Various logging and debugging utilities are also provided, such as +`exception_summary()` and `pretty_traceback()`, used primarily for +adding information into the logging stream. +""" + +import asyncio +import sys +import traceback +from typing import ( + Any, + Coroutine, + Optional, + TypeVar, + cast, +) + + +T = TypeVar('T') + + +# -------------------------- +# Section: Utility Functions +# -------------------------- + + +async def flush(writer: asyncio.StreamWriter) -> None: + """ + Utility function to ensure a StreamWriter is *fully* drained. + + `asyncio.StreamWriter.drain` only promises we will return to below + the "high-water mark". This function ensures we flush the entire + buffer -- by setting the high water mark to 0 and then calling + drain. The flow control limits are restored after the call is + completed. + """ + transport = cast( # type: ignore[redundant-cast] + asyncio.WriteTransport, writer.transport + ) + + # https://github.com/python/typeshed/issues/5779 + low, high = transport.get_write_buffer_limits() # type: ignore + transport.set_write_buffer_limits(0, 0) + try: + await writer.drain() + finally: + transport.set_write_buffer_limits(high, low) + + +def upper_half(func: T) -> T: + """ + Do-nothing decorator that annotates a method as an "upper-half" method. + + These methods must not call bottom-half functions directly, but can + schedule them to run. + """ + return func + + +def bottom_half(func: T) -> T: + """ + Do-nothing decorator that annotates a method as a "bottom-half" method. + + These methods must take great care to handle their own exceptions whenever + possible. If they go unhandled, they will cause termination of the loop. + + These methods do not, in general, have the ability to directly + report information to a caller’s context and will usually be + collected as a Task result instead. + + They must not call upper-half functions directly. + """ + return func + + +# ------------------------------- +# Section: Compatibility Wrappers +# ------------------------------- + + +def create_task(coro: Coroutine[Any, Any, T], + loop: Optional[asyncio.AbstractEventLoop] = None + ) -> 'asyncio.Future[T]': + """ + Python 3.6-compatible `asyncio.create_task` wrapper. + + :param coro: The coroutine to execute in a task. + :param loop: Optionally, the loop to create the task in. + + :return: An `asyncio.Future` object. + """ + if sys.version_info >= (3, 7): + if loop is not None: + return loop.create_task(coro) + return asyncio.create_task(coro) # pylint: disable=no-member + + # Python 3.6: + return asyncio.ensure_future(coro, loop=loop) + + +def is_closing(writer: asyncio.StreamWriter) -> bool: + """ + Python 3.6-compatible `asyncio.StreamWriter.is_closing` wrapper. + + :param writer: The `asyncio.StreamWriter` object. + :return: `True` if the writer is closing, or closed. + """ + if sys.version_info >= (3, 7): + return writer.is_closing() + + # Python 3.6: + transport = writer.transport + assert isinstance(transport, asyncio.WriteTransport) + return transport.is_closing() + + +async def wait_closed(writer: asyncio.StreamWriter) -> None: + """ + Python 3.6-compatible `asyncio.StreamWriter.wait_closed` wrapper. + + :param writer: The `asyncio.StreamWriter` to wait on. + """ + if sys.version_info >= (3, 7): + await writer.wait_closed() + return + + # Python 3.6 + transport = writer.transport + assert isinstance(transport, asyncio.WriteTransport) + + while not transport.is_closing(): + await asyncio.sleep(0) + + # This is an ugly workaround, but it's the best I can come up with. + sock = transport.get_extra_info('socket') + + if sock is None: + # Our transport doesn't have a socket? ... + # Nothing we can reasonably do. + return + + while sock.fileno() != -1: + await asyncio.sleep(0) + + +def asyncio_run(coro: Coroutine[Any, Any, T], *, debug: bool = False) -> T: + """ + Python 3.6-compatible `asyncio.run` wrapper. + + :param coro: A coroutine to execute now. + :return: The return value from the coroutine. + """ + if sys.version_info >= (3, 7): + return asyncio.run(coro, debug=debug) + + # Python 3.6 + loop = asyncio.get_event_loop() + loop.set_debug(debug) + ret = loop.run_until_complete(coro) + loop.close() + + return ret + + +# ---------------------------- +# Section: Logging & Debugging +# ---------------------------- + + +def exception_summary(exc: BaseException) -> str: + """ + Return a summary string of an arbitrary exception. + + It will be of the form "ExceptionType: Error Message", if the error + string is non-empty, and just "ExceptionType" otherwise. + """ + name = type(exc).__qualname__ + smod = type(exc).__module__ + if smod not in ("__main__", "builtins"): + name = smod + '.' + name + + error = str(exc) + if error: + return f"{name}: {error}" + return name + + +def pretty_traceback(prefix: str = " | ") -> str: + """ + Formats the current traceback, indented to provide visual distinction. + + This is useful for printing a traceback within a traceback for + debugging purposes when encapsulating errors to deliver them up the + stack; when those errors are printed, this helps provide a nice + visual grouping to quickly identify the parts of the error that + belong to the inner exception. + + :param prefix: The prefix to append to each line of the traceback. + :return: A string, formatted something like the following:: + + | Traceback (most recent call last): + | File "foobar.py", line 42, in arbitrary_example + | foo.baz() + | ArbitraryError: [Errno 42] Something bad happened! + """ + output = "".join(traceback.format_exception(*sys.exc_info())) + + exc_lines = [] + for line in output.split('\n'): + exc_lines.append(prefix + line) + + # The last line is always empty, omit it + return "\n".join(exc_lines[:-1]) diff --git a/python/qemu/utils/README.rst b/python/qemu/utils/README.rst index 975fbf4d7d..d5f2da1454 100644 --- a/python/qemu/utils/README.rst +++ b/python/qemu/utils/README.rst @@ -2,6 +2,6 @@ qemu.utils package ================== This package provides miscellaneous utilities used for testing and -debugging QEMU. It is used primarily by the vm and acceptance tests. +debugging QEMU. It is used primarily by the vm and avocado tests. See the documentation in ``__init__.py`` for more information. diff --git a/python/qemu/utils/__init__.py b/python/qemu/utils/__init__.py index 7f1a5138c4..017cfdcda7 100644 --- a/python/qemu/utils/__init__.py +++ b/python/qemu/utils/__init__.py @@ -15,7 +15,11 @@ various tasks not directly related to the launching of a VM. # the COPYING file in the top-level directory. # +import os import re +import shutil +from subprocess import CalledProcessError +import textwrap from typing import Optional # pylint: disable=import-error @@ -23,6 +27,8 @@ from .accel import kvm_available, list_accel, tcg_available __all__ = ( + 'VerboseProcessError', + 'add_visual_margin', 'get_info_usernet_hostfwd_port', 'kvm_available', 'list_accel', @@ -43,3 +49,114 @@ def get_info_usernet_hostfwd_port(info_usernet_output: str) -> Optional[int]: if match is not None: return int(match[1]) return None + + +# pylint: disable=too-many-arguments +def add_visual_margin( + content: str = '', + width: Optional[int] = None, + name: Optional[str] = None, + padding: int = 1, + upper_left: str = '┏', + lower_left: str = '┗', + horizontal: str = '━', + vertical: str = '┃', +) -> str: + """ + Decorate and wrap some text with a visual decoration around it. + + This function assumes that the text decoration characters are single + characters that display using a single monospace column. + + ┏━ Example ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + ┃ This is what this function looks like with text content that's + ┃ wrapped to 66 characters. The right-hand margin is left open to + ┃ accommodate the occasional unicode character that might make + ┃ predicting the total "visual" width of a line difficult. This + ┃ provides a visual distinction that's good-enough, though. + ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + :param content: The text to wrap and decorate. + :param width: + The number of columns to use, including for the decoration + itself. The default (None) uses the available width of the + current terminal, or a fallback of 72 lines. A negative number + subtracts a fixed-width from the default size. The default obeys + the COLUMNS environment variable, if set. + :param name: A label to apply to the upper-left of the box. + :param padding: How many columns of padding to apply inside. + :param upper_left: Upper-left single-width text decoration character. + :param lower_left: Lower-left single-width text decoration character. + :param horizontal: Horizontal single-width text decoration character. + :param vertical: Vertical single-width text decoration character. + """ + if width is None or width < 0: + avail = shutil.get_terminal_size(fallback=(72, 24))[0] + if width is None: + _width = avail + else: + _width = avail + width + else: + _width = width + + prefix = vertical + (' ' * padding) + + def _bar(name: Optional[str], top: bool = True) -> str: + ret = upper_left if top else lower_left + if name is not None: + ret += f"{horizontal} {name} " + + filler_len = _width - len(ret) + ret += f"{horizontal * filler_len}" + return ret + + def _wrap(line: str) -> str: + return os.linesep.join( + textwrap.wrap( + line, width=_width - padding, initial_indent=prefix, + subsequent_indent=prefix, replace_whitespace=False, + drop_whitespace=True, break_on_hyphens=False) + ) + + return os.linesep.join(( + _bar(name, top=True), + os.linesep.join(_wrap(line) for line in content.splitlines()), + _bar(None, top=False), + )) + + +class VerboseProcessError(CalledProcessError): + """ + The same as CalledProcessError, but more verbose. + + This is useful for debugging failed calls during test executions. + The return code, signal (if any), and terminal output will be displayed + on unhandled exceptions. + """ + def summary(self) -> str: + """Return the normal CalledProcessError str() output.""" + return super().__str__() + + def __str__(self) -> str: + lmargin = ' ' + width = -len(lmargin) + sections = [] + + # Does self.stdout contain both stdout and stderr? + has_combined_output = self.stderr is None + + name = 'output' if has_combined_output else 'stdout' + if self.stdout: + sections.append(add_visual_margin(self.stdout, width, name)) + else: + sections.append(f"{name}: N/A") + + if self.stderr: + sections.append(add_visual_margin(self.stderr, width, 'stderr')) + elif not has_combined_output: + sections.append("stderr: N/A") + + return os.linesep.join(( + self.summary(), + textwrap.indent(os.linesep.join(sections), prefix=lmargin), + )) diff --git a/python/qemu/qmp/qemu_ga_client.py b/python/qemu/utils/qemu_ga_client.py similarity index 94% rename from python/qemu/qmp/qemu_ga_client.py rename to python/qemu/utils/qemu_ga_client.py index 67ac0b4211..8c38a7ac9c 100644 --- a/python/qemu/qmp/qemu_ga_client.py +++ b/python/qemu/utils/qemu_ga_client.py @@ -5,7 +5,7 @@ Usage: Start QEMU with: -# qemu [...] -chardev socket,path=/tmp/qga.sock,server,wait=off,id=qga0 \ +# qemu [...] -chardev socket,path=/tmp/qga.sock,server=on,wait=off,id=qga0 \ -device virtio-serial \ -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 @@ -37,8 +37,8 @@ See also: https://wiki.qemu.org/Features/QAPI/GuestAgent # the COPYING file in the top-level directory. import argparse +import asyncio import base64 -import errno import os import random import sys @@ -50,8 +50,8 @@ from typing import ( Sequence, ) -from qemu import qmp -from qemu.qmp import SocketAddrT +from qemu.qmp import ConnectError, SocketAddrT +from qemu.qmp.legacy import QEMUMonitorProtocol # This script has not seen many patches or careful attention in quite @@ -61,7 +61,7 @@ from qemu.qmp import SocketAddrT # pylint: disable=missing-docstring -class QemuGuestAgent(qmp.QEMUMonitorProtocol): +class QemuGuestAgent(QEMUMonitorProtocol): def __getattr__(self, name: str) -> Callable[..., Any]: def wrapper(**kwds: object) -> object: return self.command('guest-' + name.replace('_', '-'), **kwds) @@ -149,7 +149,7 @@ class QemuGuestAgentClient: self.qga.settimeout(timeout) try: self.qga.ping() - except TimeoutError: + except asyncio.TimeoutError: return False return True @@ -172,7 +172,7 @@ class QemuGuestAgentClient: try: getattr(self.qga, 'suspend' + '_' + mode)() # On error exception will raise - except TimeoutError: + except asyncio.TimeoutError: # On success command will timed out return @@ -182,7 +182,7 @@ class QemuGuestAgentClient: try: self.qga.shutdown(mode=mode) - except TimeoutError: + except asyncio.TimeoutError: pass @@ -277,7 +277,7 @@ commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m] def send_command(address: str, cmd: str, args: Sequence[str]) -> None: if not os.path.exists(address): - print('%s not found' % address) + print(f"'{address}' not found. (Is QEMU running?)") sys.exit(1) if cmd not in commands: @@ -287,10 +287,10 @@ def send_command(address: str, cmd: str, args: Sequence[str]) -> None: try: client = QemuGuestAgentClient(address) - except OSError as err: + except ConnectError as err: print(err) - if err.errno == errno.ECONNREFUSED: - print('Hint: qemu is not running?') + if isinstance(err.exc, ConnectionError): + print('(Is QEMU running?)') sys.exit(1) if cmd == 'fsfreeze' and args[0] == 'freeze': diff --git a/python/qemu/qmp/qom.py b/python/qemu/utils/qom.py similarity index 99% rename from python/qemu/qmp/qom.py rename to python/qemu/utils/qom.py index 8ff28a8343..bcf192f477 100644 --- a/python/qemu/qmp/qom.py +++ b/python/qemu/utils/qom.py @@ -32,7 +32,8 @@ QOM commands: import argparse -from . import QMPResponseError +from qemu.qmp import ExecuteError + from .qom_common import QOMCommand @@ -233,7 +234,7 @@ class QOMTree(QOMCommand): rsp = self.qmp.command('qom-get', path=path, property=item.name) print(f" {item.name}: {rsp} ({item.type})") - except QMPResponseError as err: + except ExecuteError as err: print(f" {item.name}: ({item.type})") print('') for item in items: diff --git a/python/qemu/qmp/qom_common.py b/python/qemu/utils/qom_common.py similarity index 95% rename from python/qemu/qmp/qom_common.py rename to python/qemu/utils/qom_common.py index a59ae1a2a1..80da1b2304 100644 --- a/python/qemu/qmp/qom_common.py +++ b/python/qemu/utils/qom_common.py @@ -27,11 +27,8 @@ from typing import ( TypeVar, ) -from . import QEMUMonitorProtocol, QMPError - - -# The following is needed only for a type alias. -Subparsers = argparse._SubParsersAction # pylint: disable=protected-access +from qemu.qmp import QMPError +from qemu.qmp.legacy import QEMUMonitorProtocol class ObjectPropertyInfo: @@ -89,7 +86,7 @@ class QOMCommand: self.qmp.connect() @classmethod - def register(cls, subparsers: Subparsers) -> None: + def register(cls, subparsers: Any) -> None: """ Register this command with the argument parser. diff --git a/python/qemu/qmp/qom_fuse.py b/python/qemu/utils/qom_fuse.py similarity index 97% rename from python/qemu/qmp/qom_fuse.py rename to python/qemu/utils/qom_fuse.py index 43f4671fdb..8dcd59fcde 100644 --- a/python/qemu/qmp/qom_fuse.py +++ b/python/qemu/utils/qom_fuse.py @@ -48,7 +48,8 @@ from typing import ( import fuse from fuse import FUSE, FuseOSError, Operations -from . import QMPResponseError +from qemu.qmp import ExecuteError + from .qom_common import QOMCommand @@ -99,7 +100,7 @@ class QOMFuse(QOMCommand, Operations): try: self.qom_list(path) return True - except QMPResponseError: + except ExecuteError: return False def is_property(self, path: str) -> bool: @@ -112,7 +113,7 @@ class QOMFuse(QOMCommand, Operations): if item.name == prop: return True return False - except QMPResponseError: + except ExecuteError: return False def is_link(self, path: str) -> bool: @@ -125,7 +126,7 @@ class QOMFuse(QOMCommand, Operations): if item.name == prop and item.link: return True return False - except QMPResponseError: + except ExecuteError: return False def read(self, path: str, size: int, offset: int, fh: IO[bytes]) -> bytes: @@ -138,7 +139,7 @@ class QOMFuse(QOMCommand, Operations): try: data = str(self.qmp.command('qom-get', path=path, property=prop)) data += '\n' # make values shell friendly - except QMPResponseError as err: + except ExecuteError as err: raise FuseOSError(EPERM) from err if offset > len(data): diff --git a/python/setup.cfg b/python/setup.cfg index 14bab90288..c2c61c7519 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -36,28 +36,39 @@ packages = # version, use e.g. "pipenv install --dev pylint==3.0.0". # Subsequently, edit 'Pipfile' to remove e.g. 'pylint = "==3.0.0'. devel = - avocado-framework >= 87.0 + avocado-framework >= 90.0 flake8 >= 3.6.0 fusepy >= 2.0.4 isort >= 5.1.2 - mypy >= 0.770 + mypy >= 0.780 pylint >= 2.8.0 tox >= 3.18.0 + urwid >= 2.1.2 + urwid-readline >= 0.13 + Pygments >= 2.9.0 # Provides qom-fuse functionality fuse = fusepy >= 2.0.4 +# QMP TUI dependencies +tui = + urwid >= 2.1.2 + urwid-readline >= 0.13 + Pygments >= 2.9.0 + [options.entry_points] console_scripts = - qom = qemu.qmp.qom:main - qom-set = qemu.qmp.qom:QOMSet.entry_point - qom-get = qemu.qmp.qom:QOMGet.entry_point - qom-list = qemu.qmp.qom:QOMList.entry_point - qom-tree = qemu.qmp.qom:QOMTree.entry_point - qom-fuse = qemu.qmp.qom_fuse:QOMFuse.entry_point [fuse] - qemu-ga-client = qemu.qmp.qemu_ga_client:main + qom = qemu.utils.qom:main + qom-set = qemu.utils.qom:QOMSet.entry_point + qom-get = qemu.utils.qom:QOMGet.entry_point + qom-list = qemu.utils.qom:QOMList.entry_point + qom-tree = qemu.utils.qom:QOMTree.entry_point + qom-fuse = qemu.utils.qom_fuse:QOMFuse.entry_point [fuse] + qemu-ga-client = qemu.utils.qemu_ga_client:main qmp-shell = qemu.qmp.qmp_shell:main + qmp-shell-wrap = qemu.qmp.qmp_shell:main_wrap + qmp-tui = qemu.qmp.qmp_tui:main [tui] [flake8] extend-ignore = E722 # Prefer pylint's bare-except checks to flake8's @@ -68,13 +79,28 @@ strict = True python_version = 3.6 warn_unused_configs = True namespace_packages = True +warn_unused_ignores = False -[mypy-qemu.qmp.qom_fuse] +[mypy-qemu.utils.qom_fuse] # fusepy has no type stubs: allow_subclassing_any = True +[mypy-qemu.qmp.qmp_tui] +# urwid and urwid_readline have no type stubs: +allow_subclassing_any = True + +# The following missing import directives are because these libraries do not +# provide type stubs. Allow them on an as-needed basis for mypy. [mypy-fuse] -# fusepy has no type stubs: +ignore_missing_imports = True + +[mypy-urwid] +ignore_missing_imports = True + +[mypy-urwid_readline] +ignore_missing_imports = True + +[mypy-pygments] ignore_missing_imports = True [pylint.messages control] @@ -87,7 +113,12 @@ ignore_missing_imports = True # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". -disable= +disable=consider-using-f-string, + consider-using-with, + too-many-arguments, + too-many-function-args, # mypy handles this with less false positives. + too-many-instance-attributes, + no-member, # mypy also handles this better. [pylint.basic] # Good variable names which should always be accepted, separated by a comma. @@ -100,10 +131,17 @@ good-names=i, fh, # fh = open(...) fd, # fd = os.open(...) c, # for c in string: ... + T, # for TypeVars. See pylint#3401 [pylint.similarities] # Ignore imports when computing similarities. ignore-imports=yes +ignore-signatures=yes + +# Minimum lines number of a similarity. +# TODO: Remove after we opt in to Pylint 2.8.3. See commit msg. +min-similarity-lines=6 + [isort] force_grid_wrap=4 @@ -128,5 +166,16 @@ allowlist_externals = make deps = .[devel] .[fuse] # Workaround to trigger tox venv rebuild + .[tui] # Workaround to trigger tox venv rebuild commands = make check + +# Coverage.py [https://coverage.readthedocs.io/en/latest/] is a tool for +# measuring code coverage of Python programs. It monitors your program, +# noting which parts of the code have been executed, then analyzes the +# source to identify code that could have been executed but was not. + +[coverage:run] +concurrency = multiprocessing +source = qemu/ +parallel = true diff --git a/python/setup.py b/python/setup.py index 2014f81b75..c5bc45919a 100755 --- a/python/setup.py +++ b/python/setup.py @@ -5,9 +5,26 @@ Copyright (c) 2020-2021 John Snow for Red Hat, Inc. """ import setuptools +from setuptools.command import bdist_egg +import sys import pkg_resources +class bdist_egg_guard(bdist_egg.bdist_egg): + """ + Protect against bdist_egg from being executed + + This prevents calling 'setup.py install' directly, as the 'install' + CLI option will invoke the deprecated bdist_egg hook. "pip install" + calls the more modern bdist_wheel hook, which is what we want. + """ + def run(self): + sys.exit( + 'Installation directly via setup.py is not supported.\n' + 'Please use `pip install .` instead.' + ) + + def main(): """ QEMU tooling installer @@ -16,7 +33,7 @@ def main(): # https://medium.com/@daveshawley/safely-using-setup-cfg-for-metadata-1babbe54c108 pkg_resources.require('setuptools>=39.2') - setuptools.setup() + setuptools.setup(cmdclass={'bdist_egg': bdist_egg_guard}) if __name__ == '__main__': diff --git a/python/tests/iotests-mypy.sh b/python/tests/iotests-mypy.sh new file mode 100755 index 0000000000..ee76470819 --- /dev/null +++ b/python/tests/iotests-mypy.sh @@ -0,0 +1,4 @@ +#!/bin/sh -e + +cd ../tests/qemu-iotests/ +python3 -m linters --mypy diff --git a/python/tests/iotests-pylint.sh b/python/tests/iotests-pylint.sh new file mode 100755 index 0000000000..33c5ae900a --- /dev/null +++ b/python/tests/iotests-pylint.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +cd ../tests/qemu-iotests/ +# See commit message for environment variable explainer. +SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m linters --pylint diff --git a/python/tests/protocol.py b/python/tests/protocol.py new file mode 100644 index 0000000000..56c4d441f9 --- /dev/null +++ b/python/tests/protocol.py @@ -0,0 +1,596 @@ +import asyncio +from contextlib import contextmanager +import os +import socket +from tempfile import TemporaryDirectory + +import avocado + +from qemu.qmp import ConnectError, Runstate +from qemu.qmp.protocol import AsyncProtocol, StateError +from qemu.qmp.util import asyncio_run, create_task + + +class NullProtocol(AsyncProtocol[None]): + """ + NullProtocol is a test mockup of an AsyncProtocol implementation. + + It adds a fake_session instance variable that enables a code path + that bypasses the actual connection logic, but still allows the + reader/writers to start. + + Because the message type is defined as None, an asyncio.Event named + 'trigger_input' is created that prohibits the reader from + incessantly being able to yield None; this event can be poked to + simulate an incoming message. + + For testing symmetry with do_recv, an interface is added to "send" a + Null message. + + For testing purposes, a "simulate_disconnection" method is also + added which allows us to trigger a bottom half disconnect without + injecting any real errors into the reader/writer loops; in essence + it performs exactly half of what disconnect() normally does. + """ + def __init__(self, name=None): + self.fake_session = False + self.trigger_input: asyncio.Event + super().__init__(name) + + async def _establish_session(self): + self.trigger_input = asyncio.Event() + await super()._establish_session() + + async def _do_start_server(self, address, ssl=None): + if self.fake_session: + self._accepted = asyncio.Event() + self._set_state(Runstate.CONNECTING) + await asyncio.sleep(0) + else: + await super()._do_start_server(address, ssl) + + async def _do_accept(self): + if self.fake_session: + self._accepted = None + else: + await super()._do_accept() + + async def _do_connect(self, address, ssl=None): + if self.fake_session: + self._set_state(Runstate.CONNECTING) + await asyncio.sleep(0) + else: + await super()._do_connect(address, ssl) + + async def _do_recv(self) -> None: + await self.trigger_input.wait() + self.trigger_input.clear() + + def _do_send(self, msg: None) -> None: + pass + + async def send_msg(self) -> None: + await self._outgoing.put(None) + + async def simulate_disconnect(self) -> None: + """ + Simulates a bottom-half disconnect. + + This method schedules a disconnection but does not wait for it + to complete. This is used to put the loop into the DISCONNECTING + state without fully quiescing it back to IDLE. This is normally + something you cannot coax AsyncProtocol to do on purpose, but it + will be similar to what happens with an unhandled Exception in + the reader/writer. + + Under normal circumstances, the library design requires you to + await on disconnect(), which awaits the disconnect task and + returns bottom half errors as a pre-condition to allowing the + loop to return back to IDLE. + """ + self._schedule_disconnect() + + +class LineProtocol(AsyncProtocol[str]): + def __init__(self, name=None): + super().__init__(name) + self.rx_history = [] + + async def _do_recv(self) -> str: + raw = await self._readline() + msg = raw.decode() + self.rx_history.append(msg) + return msg + + def _do_send(self, msg: str) -> None: + assert self._writer is not None + self._writer.write(msg.encode() + b'\n') + + async def send_msg(self, msg: str) -> None: + await self._outgoing.put(msg) + + +def run_as_task(coro, allow_cancellation=False): + """ + Run a given coroutine as a task. + + Optionally, wrap it in a try..except block that allows this + coroutine to be canceled gracefully. + """ + async def _runner(): + try: + await coro + except asyncio.CancelledError: + if allow_cancellation: + return + raise + return create_task(_runner()) + + +@contextmanager +def jammed_socket(): + """ + Opens up a random unused TCP port on localhost, then jams it. + """ + socks = [] + + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('127.0.0.1', 0)) + sock.listen(1) + address = sock.getsockname() + + socks.append(sock) + + # I don't *fully* understand why, but it takes *two* un-accepted + # connections to start jamming the socket. + for _ in range(2): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect(address) + socks.append(sock) + + yield address + + finally: + for sock in socks: + sock.close() + + +class Smoke(avocado.Test): + + def setUp(self): + self.proto = NullProtocol() + + def test__repr__(self): + self.assertEqual( + repr(self.proto), + "" + ) + + def testRunstate(self): + self.assertEqual( + self.proto.runstate, + Runstate.IDLE + ) + + def testDefaultName(self): + self.assertEqual( + self.proto.name, + None + ) + + def testLogger(self): + self.assertEqual( + self.proto.logger.name, + 'qemu.qmp.protocol' + ) + + def testName(self): + self.proto = NullProtocol('Steve') + + self.assertEqual( + self.proto.name, + 'Steve' + ) + + self.assertEqual( + self.proto.logger.name, + 'qemu.qmp.protocol.Steve' + ) + + self.assertEqual( + repr(self.proto), + "" + ) + + +class TestBase(avocado.Test): + + def setUp(self): + self.proto = NullProtocol(type(self).__name__) + self.assertEqual(self.proto.runstate, Runstate.IDLE) + self.runstate_watcher = None + + def tearDown(self): + self.assertEqual(self.proto.runstate, Runstate.IDLE) + + async def _asyncSetUp(self): + pass + + async def _asyncTearDown(self): + if self.runstate_watcher: + await self.runstate_watcher + + @staticmethod + def async_test(async_test_method): + """ + Decorator; adds SetUp and TearDown to async tests. + """ + async def _wrapper(self, *args, **kwargs): + loop = asyncio.get_event_loop() + loop.set_debug(True) + + await self._asyncSetUp() + await async_test_method(self, *args, **kwargs) + await self._asyncTearDown() + + return _wrapper + + # Definitions + + # The states we expect a "bad" connect/accept attempt to transition through + BAD_CONNECTION_STATES = ( + Runstate.CONNECTING, + Runstate.DISCONNECTING, + Runstate.IDLE, + ) + + # The states we expect a "good" session to transition through + GOOD_CONNECTION_STATES = ( + Runstate.CONNECTING, + Runstate.RUNNING, + Runstate.DISCONNECTING, + Runstate.IDLE, + ) + + # Helpers + + async def _watch_runstates(self, *states): + """ + This launches a task alongside (most) tests below to confirm that + the sequence of runstate changes that occur is exactly as + anticipated. + """ + async def _watcher(): + for state in states: + new_state = await self.proto.runstate_changed() + self.assertEqual( + new_state, + state, + msg=f"Expected state '{state.name}'", + ) + + self.runstate_watcher = create_task(_watcher()) + # Kick the loop and force the task to block on the event. + await asyncio.sleep(0) + + +class State(TestBase): + + @TestBase.async_test + async def testSuperfluousDisconnect(self): + """ + Test calling disconnect() while already disconnected. + """ + await self._watch_runstates( + Runstate.DISCONNECTING, + Runstate.IDLE, + ) + await self.proto.disconnect() + + +class Connect(TestBase): + """ + Tests primarily related to calling Connect(). + """ + async def _bad_connection(self, family: str): + assert family in ('INET', 'UNIX') + + if family == 'INET': + await self.proto.connect(('127.0.0.1', 0)) + elif family == 'UNIX': + await self.proto.connect('/dev/null') + + async def _hanging_connection(self): + with jammed_socket() as addr: + await self.proto.connect(addr) + + async def _bad_connection_test(self, family: str): + await self._watch_runstates(*self.BAD_CONNECTION_STATES) + + with self.assertRaises(ConnectError) as context: + await self._bad_connection(family) + + self.assertIsInstance(context.exception.exc, OSError) + self.assertEqual( + context.exception.error_message, + "Failed to establish connection" + ) + + @TestBase.async_test + async def testBadINET(self): + """ + Test an immediately rejected call to an IP target. + """ + await self._bad_connection_test('INET') + + @TestBase.async_test + async def testBadUNIX(self): + """ + Test an immediately rejected call to a UNIX socket target. + """ + await self._bad_connection_test('UNIX') + + @TestBase.async_test + async def testCancellation(self): + """ + Test what happens when a connection attempt is aborted. + """ + # Note that accept() cannot be cancelled outright, as it isn't a task. + # However, we can wrap it in a task and cancel *that*. + await self._watch_runstates(*self.BAD_CONNECTION_STATES) + task = run_as_task(self._hanging_connection(), allow_cancellation=True) + + state = await self.proto.runstate_changed() + self.assertEqual(state, Runstate.CONNECTING) + + # This is insider baseball, but the connection attempt has + # yielded *just* before the actual connection attempt, so kick + # the loop to make sure it's truly wedged. + await asyncio.sleep(0) + + task.cancel() + await task + + @TestBase.async_test + async def testTimeout(self): + """ + Test what happens when a connection attempt times out. + """ + await self._watch_runstates(*self.BAD_CONNECTION_STATES) + task = run_as_task(self._hanging_connection()) + + # More insider baseball: to improve the speed of this test while + # guaranteeing that the connection even gets a chance to start, + # verify that the connection hangs *first*, then await the + # result of the task with a nearly-zero timeout. + + state = await self.proto.runstate_changed() + self.assertEqual(state, Runstate.CONNECTING) + await asyncio.sleep(0) + + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(task, timeout=0) + + @TestBase.async_test + async def testRequire(self): + """ + Test what happens when a connection attempt is made while CONNECTING. + """ + await self._watch_runstates(*self.BAD_CONNECTION_STATES) + task = run_as_task(self._hanging_connection(), allow_cancellation=True) + + state = await self.proto.runstate_changed() + self.assertEqual(state, Runstate.CONNECTING) + + with self.assertRaises(StateError) as context: + await self._bad_connection('UNIX') + + self.assertEqual( + context.exception.error_message, + "NullProtocol is currently connecting." + ) + self.assertEqual(context.exception.state, Runstate.CONNECTING) + self.assertEqual(context.exception.required, Runstate.IDLE) + + task.cancel() + await task + + @TestBase.async_test + async def testImplicitRunstateInit(self): + """ + Test what happens if we do not wait on the runstate event until + AFTER a connection is made, i.e., connect()/accept() themselves + initialize the runstate event. All of the above tests force the + initialization by waiting on the runstate *first*. + """ + task = run_as_task(self._hanging_connection(), allow_cancellation=True) + + # Kick the loop to coerce the state change + await asyncio.sleep(0) + assert self.proto.runstate == Runstate.CONNECTING + + # We already missed the transition to CONNECTING + await self._watch_runstates(Runstate.DISCONNECTING, Runstate.IDLE) + + task.cancel() + await task + + +class Accept(Connect): + """ + All of the same tests as Connect, but using the accept() interface. + """ + async def _bad_connection(self, family: str): + assert family in ('INET', 'UNIX') + + if family == 'INET': + await self.proto.start_server_and_accept(('example.com', 1)) + elif family == 'UNIX': + await self.proto.start_server_and_accept('/dev/null') + + async def _hanging_connection(self): + with TemporaryDirectory(suffix='.qmp') as tmpdir: + sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock") + await self.proto.start_server_and_accept(sock) + + +class FakeSession(TestBase): + + def setUp(self): + super().setUp() + self.proto.fake_session = True + + async def _asyncSetUp(self): + await super()._asyncSetUp() + await self._watch_runstates(*self.GOOD_CONNECTION_STATES) + + async def _asyncTearDown(self): + await self.proto.disconnect() + await super()._asyncTearDown() + + #### + + @TestBase.async_test + async def testFakeConnect(self): + + """Test the full state lifecycle (via connect) with a no-op session.""" + await self.proto.connect('/not/a/real/path') + self.assertEqual(self.proto.runstate, Runstate.RUNNING) + + @TestBase.async_test + async def testFakeAccept(self): + """Test the full state lifecycle (via accept) with a no-op session.""" + await self.proto.start_server_and_accept('/not/a/real/path') + self.assertEqual(self.proto.runstate, Runstate.RUNNING) + + @TestBase.async_test + async def testFakeRecv(self): + """Test receiving a fake/null message.""" + await self.proto.start_server_and_accept('/not/a/real/path') + + logname = self.proto.logger.name + with self.assertLogs(logname, level='DEBUG') as context: + self.proto.trigger_input.set() + self.proto.trigger_input.clear() + await asyncio.sleep(0) # Kick reader. + + self.assertEqual( + context.output, + [f"DEBUG:{logname}:<-- None"], + ) + + @TestBase.async_test + async def testFakeSend(self): + """Test sending a fake/null message.""" + await self.proto.start_server_and_accept('/not/a/real/path') + + logname = self.proto.logger.name + with self.assertLogs(logname, level='DEBUG') as context: + # Cheat: Send a Null message to nobody. + await self.proto.send_msg() + # Kick writer; awaiting on a queue.put isn't sufficient to yield. + await asyncio.sleep(0) + + self.assertEqual( + context.output, + [f"DEBUG:{logname}:--> None"], + ) + + async def _prod_session_api( + self, + current_state: Runstate, + error_message: str, + accept: bool = True + ): + with self.assertRaises(StateError) as context: + if accept: + await self.proto.start_server_and_accept('/not/a/real/path') + else: + await self.proto.connect('/not/a/real/path') + + self.assertEqual(context.exception.error_message, error_message) + self.assertEqual(context.exception.state, current_state) + self.assertEqual(context.exception.required, Runstate.IDLE) + + @TestBase.async_test + async def testAcceptRequireRunning(self): + """Test that accept() cannot be called when Runstate=RUNNING""" + await self.proto.start_server_and_accept('/not/a/real/path') + + await self._prod_session_api( + Runstate.RUNNING, + "NullProtocol is already connected and running.", + accept=True, + ) + + @TestBase.async_test + async def testConnectRequireRunning(self): + """Test that connect() cannot be called when Runstate=RUNNING""" + await self.proto.start_server_and_accept('/not/a/real/path') + + await self._prod_session_api( + Runstate.RUNNING, + "NullProtocol is already connected and running.", + accept=False, + ) + + @TestBase.async_test + async def testAcceptRequireDisconnecting(self): + """Test that accept() cannot be called when Runstate=DISCONNECTING""" + await self.proto.start_server_and_accept('/not/a/real/path') + + # Cheat: force a disconnect. + await self.proto.simulate_disconnect() + + await self._prod_session_api( + Runstate.DISCONNECTING, + ("NullProtocol is disconnecting." + " Call disconnect() to return to IDLE state."), + accept=True, + ) + + @TestBase.async_test + async def testConnectRequireDisconnecting(self): + """Test that connect() cannot be called when Runstate=DISCONNECTING""" + await self.proto.start_server_and_accept('/not/a/real/path') + + # Cheat: force a disconnect. + await self.proto.simulate_disconnect() + + await self._prod_session_api( + Runstate.DISCONNECTING, + ("NullProtocol is disconnecting." + " Call disconnect() to return to IDLE state."), + accept=False, + ) + + +class SimpleSession(TestBase): + + def setUp(self): + super().setUp() + self.server = LineProtocol(type(self).__name__ + '-server') + + async def _asyncSetUp(self): + await super()._asyncSetUp() + await self._watch_runstates(*self.GOOD_CONNECTION_STATES) + + async def _asyncTearDown(self): + await self.proto.disconnect() + try: + await self.server.disconnect() + except EOFError: + pass + await super()._asyncTearDown() + + @TestBase.async_test + async def testSmoke(self): + with TemporaryDirectory(suffix='.qmp') as tmpdir: + sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock") + server_task = create_task(self.server.start_server_and_accept(sock)) + + # give the server a chance to start listening [...] + await asyncio.sleep(0) + await self.proto.connect(sock) diff --git a/python/tests/pylint.sh b/python/tests/pylint.sh index 4b10b34db7..03d64705a1 100755 --- a/python/tests/pylint.sh +++ b/python/tests/pylint.sh @@ -1,2 +1,3 @@ #!/bin/sh -e -python3 -m pylint qemu/ +# See commit message for environment variable explainer. +SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint qemu/ diff --git a/qapi/acpi.json b/qapi/acpi.json index 51f0d55db7..d148f6db9f 100644 --- a/qapi/acpi.json +++ b/qapi/acpi.json @@ -133,8 +133,9 @@ # Example: # # <- { "event": "ACPI_DEVICE_OST", -# "data": { "device": "d1", "slot": "0", -# "slot-type": "DIMM", "source": 1, "status": 0 } } +# "data": { "info": { "device": "d1", "slot": "0", +# "slot-type": "DIMM", "source": 1, "status": 0 } }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } # ## { 'event': 'ACPI_DEVICE_OST', diff --git a/qapi/audio.json b/qapi/audio.json index 9cba0df8a4..1e0a24bdfc 100644 --- a/qapi/audio.json +++ b/qapi/audio.json @@ -1,4 +1,5 @@ # -*- mode: python -*- +# vim: filetype=python # # Copyright (C) 2015-2019 Zoltán Kővágó # @@ -105,6 +106,28 @@ '*out': 'AudiodevAlsaPerDirectionOptions', '*threshold': 'uint32' } } +## +# @AudiodevSndioOptions: +# +# Options of the sndio audio backend. +# +# @in: options of the capture stream +# +# @out: options of the playback stream +# +# @dev: the name of the sndio device to use (default 'default') +# +# @latency: play buffer size (in microseconds) +# +# Since: 7.2 +## +{ 'struct': 'AudiodevSndioOptions', + 'data': { + '*in': 'AudiodevPerDirectionOptions', + '*out': 'AudiodevPerDirectionOptions', + '*dev': 'str', + '*latency': 'uint32'} } + ## # @AudiodevCoreaudioPerDirectionOptions: # @@ -351,7 +374,6 @@ '*out': 'AudiodevPerDirectionOptions', '*path': 'str' } } - ## # @AudioFormat: # @@ -386,8 +408,8 @@ # Since: 4.0 ## { 'enum': 'AudiodevDriver', - 'data': [ 'none', 'alsa', 'coreaudio', 'dsound', 'jack', 'oss', 'pa', - 'sdl', 'spice', 'wav' ] } + 'data': [ 'none', 'alsa', 'coreaudio', 'dbus', 'dsound', 'jack', 'oss', 'pa', + 'sdl', 'sndio', 'spice', 'wav' ] } ## # @Audiodev: @@ -412,10 +434,12 @@ 'none': 'AudiodevGenericOptions', 'alsa': 'AudiodevAlsaOptions', 'coreaudio': 'AudiodevCoreaudioOptions', + 'dbus': 'AudiodevGenericOptions', 'dsound': 'AudiodevDsoundOptions', 'jack': 'AudiodevJackOptions', 'oss': 'AudiodevOssOptions', 'pa': 'AudiodevPaOptions', 'sdl': 'AudiodevSdlOptions', + 'sndio': 'AudiodevSndioOptions', 'spice': 'AudiodevGenericOptions', 'wav': 'AudiodevWavOptions' } } diff --git a/qapi/block-core.json b/qapi/block-core.json index 675d8265eb..95ac4fa634 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -139,6 +139,52 @@ '*encryption-format': 'RbdImageEncryptionFormat' } } +## +# @ImageInfoSpecificKind: +# +# @luks: Since 2.7 +# @rbd: Since 6.1 +# +# Since: 1.7 +## +{ 'enum': 'ImageInfoSpecificKind', + 'data': [ 'qcow2', 'vmdk', 'luks', 'rbd' ] } + +## +# @ImageInfoSpecificQCow2Wrapper: +# +# Since: 1.7 +## +{ 'struct': 'ImageInfoSpecificQCow2Wrapper', + 'data': { 'data': 'ImageInfoSpecificQCow2' } } + +## +# @ImageInfoSpecificVmdkWrapper: +# +# Since: 6.1 +## +{ 'struct': 'ImageInfoSpecificVmdkWrapper', + 'data': { 'data': 'ImageInfoSpecificVmdk' } } + +## +# @ImageInfoSpecificLUKSWrapper: +# +# Since: 2.7 +## +{ 'struct': 'ImageInfoSpecificLUKSWrapper', + 'data': { 'data': 'QCryptoBlockInfoLUKS' } } +# If we need to add block driver specific parameters for +# LUKS in future, then we'll subclass QCryptoBlockInfoLUKS +# to define a ImageInfoSpecificLUKS + +## +# @ImageInfoSpecificRbdWrapper: +# +# Since: 6.1 +## +{ 'struct': 'ImageInfoSpecificRbdWrapper', + 'data': { 'data': 'ImageInfoSpecificRbd' } } + ## # @ImageInfoSpecific: # @@ -147,14 +193,13 @@ # Since: 1.7 ## { 'union': 'ImageInfoSpecific', + 'base': { 'type': 'ImageInfoSpecificKind' }, + 'discriminator': 'type', 'data': { - 'qcow2': 'ImageInfoSpecificQCow2', - 'vmdk': 'ImageInfoSpecificVmdk', - # If we need to add block driver specific parameters for - # LUKS in future, then we'll subclass QCryptoBlockInfoLUKS - # to define a ImageInfoSpecificLUKS - 'luks': 'QCryptoBlockInfoLUKS', - 'rbd': 'ImageInfoSpecificRbd' + 'qcow2': 'ImageInfoSpecificQCow2Wrapper', + 'vmdk': 'ImageInfoSpecificVmdkWrapper', + 'luks': 'ImageInfoSpecificLUKSWrapper', + 'rbd': 'ImageInfoSpecificRbdWrapper' } } ## @@ -192,7 +237,6 @@ # information (since 1.7) # # Since: 1.3 -# ## { 'struct': 'ImageInfo', 'data': {'filename': 'str', 'format': 'str', '*dirty-flag': 'bool', @@ -243,7 +287,6 @@ # supports it # # Since: 1.4 -# ## { 'struct': 'ImageCheck', 'data': {'filename': 'str', 'format': 'str', 'check-errors': 'int', @@ -283,7 +326,6 @@ # @filename: filename that is referred to by @offset # # Since: 2.6 -# ## { 'struct': 'MapEntry', 'data': {'start': 'int', 'length': 'int', 'data': 'bool', @@ -295,9 +337,9 @@ # # Cache mode information for a block device # -# @writeback: true if writeback mode is enabled -# @direct: true if the host page cache is bypassed (O_DIRECT) -# @no-flush: true if flush requests are ignored for the device +# @writeback: true if writeback mode is enabled +# @direct: true if the host page cache is bypassed (O_DIRECT) +# @no-flush: true if flush requests are ignored for the device # # Since: 2.3 ## @@ -400,7 +442,6 @@ # has one or more dirty bitmaps) (Since 4.2) # # Since: 0.14 -# ## { 'struct': 'BlockDeviceInfo', 'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str', @@ -446,11 +487,11 @@ # @granularity: granularity of the dirty bitmap in bytes (since 1.4) # # @recording: true if the bitmap is recording new writes from the guest. -# Replaces `active` and `disabled` statuses. (since 4.0) +# Replaces ``active`` and ``disabled`` statuses. (since 4.0) # # @busy: true if the bitmap is in-use by some operation (NBD or jobs) # and cannot be modified via QMP or used by another operation. -# Replaces `locked` and `frozen` statuses. (since 4.0) +# Replaces ``locked`` and ``frozen`` statuses. (since 4.0) # # @persistent: true if the bitmap was stored on disk, is scheduled to be stored # on disk, or both. (since 4.0) @@ -563,7 +604,7 @@ # @inserted: @BlockDeviceInfo describing the device if media is # present # -# Since: 0.14 +# Since: 0.14 ## { 'struct': 'BlockInfo', 'data': {'device': 'str', '*qdev': 'str', 'type': 'str', 'removable': 'bool', @@ -696,8 +737,8 @@ # } # ## -{ 'command': 'query-block', 'returns': ['BlockInfo'] } - +{ 'command': 'query-block', 'returns': ['BlockInfo'], + 'allow-preconfig': true } ## # @BlockDeviceTimedStats: @@ -755,9 +796,9 @@ # # Statistics of a virtual block device or a block backing device. # -# @rd_bytes: The number of bytes read by the device. +# @rd_bytes: The number of bytes read by the device. # -# @wr_bytes: The number of bytes written by the device. +# @wr_bytes: The number of bytes written by the device. # # @unmap_bytes: The number of bytes unmapped by the device (Since 4.2) # @@ -914,7 +955,7 @@ 'data': { 'file': 'BlockStatsSpecificFile', 'host_device': { 'type': 'BlockStatsSpecificFile', - 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' }, + 'if': 'HAVE_HOST_BLOCK_DEVICE' }, 'nvme': 'BlockStatsSpecificNvme' } } ## @@ -930,7 +971,7 @@ # @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block # device. (since 3.0) # -# @stats: A @BlockDeviceStats for the device. +# @stats: A @BlockDeviceStats for the device. # # @driver-specific: Optional driver-specific stats. (Since 4.2) # @@ -1073,7 +1114,8 @@ ## { 'command': 'query-blockstats', 'data': { '*query-nodes': 'bool' }, - 'returns': ['BlockStats'] } + 'returns': ['BlockStats'], + 'allow-preconfig': true } ## # @BlockdevOnError: @@ -1222,7 +1264,8 @@ # # Since: 1.1 ## -{ 'command': 'query-block-jobs', 'returns': ['BlockJobInfo'] } +{ 'command': 'query-block-jobs', 'returns': ['BlockJobInfo'], + 'allow-preconfig': true } ## # @block_resize: @@ -1235,7 +1278,7 @@ # # @node-name: graph node name to get the image resized (Since 2.0) # -# @size: new image size in bytes +# @size: new image size in bytes # # Returns: - nothing on success # - If @device is not a valid block device, DeviceNotFound @@ -1253,7 +1296,8 @@ 'data': { '*device': 'str', '*node-name': 'str', 'size': 'int' }, - 'coroutine': true } + 'coroutine': true, + 'allow-preconfig': true } ## # @NewImageMode: @@ -1393,6 +1437,9 @@ # # @x-perf: Performance options. (Since 6.0) # +# Features: +# @unstable: Member @x-perf is experimental. +# # Note: @on-source-error and @on-target-error only affect background # I/O. If an error occurs during a guest write request, the device's # rerror/werror actions will be used. @@ -1407,7 +1454,9 @@ '*on-source-error': 'BlockdevOnError', '*on-target-error': 'BlockdevOnError', '*auto-finalize': 'bool', '*auto-dismiss': 'bool', - '*filter-node-name': 'str', '*x-perf': 'BackupPerf' } } + '*filter-node-name': 'str', + '*x-perf': { 'type': 'BackupPerf', + 'features': [ 'unstable' ] } } } ## # @DriveBackup: @@ -1464,8 +1513,8 @@ # ## { 'command': 'blockdev-snapshot-sync', - 'data': 'BlockdevSnapshotSync' } - + 'data': 'BlockdevSnapshotSync', + 'allow-preconfig': true } ## # @blockdev-snapshot: @@ -1506,7 +1555,8 @@ ## { 'command': 'blockdev-snapshot', 'data': 'BlockdevSnapshot', - 'features': [ 'allow-write-only-overlay' ] } + 'features': [ 'allow-write-only-overlay' ], + 'allow-preconfig': true } ## # @change-backing-file: @@ -1538,7 +1588,8 @@ ## { 'command': 'change-backing-file', 'data': { 'device': 'str', 'image-node-name': 'str', - 'backing-file': 'str' } } + 'backing-file': 'str' }, + 'allow-preconfig': true } ## # @block-commit: @@ -1648,7 +1699,8 @@ '*backing-file': 'str', '*speed': 'int', '*on-error': 'BlockdevOnError', '*filter-node-name': 'str', - '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } + '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, + 'allow-preconfig': true } ## # @drive-backup: @@ -1659,6 +1711,9 @@ # The operation can be stopped before it has completed using the # block-job-cancel command. # +# Features: +# @deprecated: This command is deprecated. Use @blockdev-backup instead. +# # Returns: - nothing on success # - If @device is not a valid block device, GenericError # @@ -1674,7 +1729,8 @@ # ## { 'command': 'drive-backup', 'boxed': true, - 'data': 'DriveBackup' } + 'data': 'DriveBackup', 'features': ['deprecated'], + 'allow-preconfig': true } ## # @blockdev-backup: @@ -1691,6 +1747,7 @@ # Since: 2.3 # # Example: +# # -> { "execute": "blockdev-backup", # "arguments": { "device": "src-id", # "sync": "full", @@ -1699,8 +1756,8 @@ # ## { 'command': 'blockdev-backup', 'boxed': true, - 'data': 'BlockdevBackup' } - + 'data': 'BlockdevBackup', + 'allow-preconfig': true } ## # @query-named-block-nodes: @@ -1723,6 +1780,7 @@ # "file":"disks/test.qcow2", # "node-name": "my-node", # "backing_file_depth":1, +# "detect_zeroes":"off", # "bps":1000000, # "bps_rd":0, # "bps_wr":0, @@ -1765,7 +1823,8 @@ ## { 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ], - 'data': { '*flat': 'bool' } } + 'data': { '*flat': 'bool' }, + 'allow-preconfig': true } ## # @XDbgBlockGraphNodeType: @@ -1825,14 +1884,11 @@ # # @resize: This permission is required to change the size of a block node. # -# @graph-mod: This permission is required to change the node that this -# BdrvChild points to. -# # Since: 4.0 ## { 'enum': 'BlockPermission', - 'data': [ 'consistent-read', 'write', 'write-unchanged', 'resize', - 'graph-mod' ] } + 'data': [ 'consistent-read', 'write', 'write-unchanged', 'resize' ] } + ## # @XDbgBlockGraphEdge: # @@ -1871,9 +1927,14 @@ # # Get the block graph. # +# Features: +# @unstable: This command is meant for debugging. +# # Since: 4.0 ## -{ 'command': 'x-debug-query-block-graph', 'returns': 'XDbgBlockGraph' } +{ 'command': 'x-debug-query-block-graph', 'returns': 'XDbgBlockGraph', + 'features': [ 'unstable' ], + 'allow-preconfig': true } ## # @drive-mirror: @@ -1901,7 +1962,8 @@ # ## { 'command': 'drive-mirror', 'boxed': true, - 'data': 'DriveMirror' } + 'data': 'DriveMirror', + 'allow-preconfig': true } ## # @DriveMirror: @@ -1911,8 +1973,8 @@ # @job-id: identifier for the newly-created block job. If # omitted, the device name will be used. (Since 2.7) # -# @device: the device name or node-name of a root node whose writes should be -# mirrored. +# @device: the device name or node-name of a root node whose writes should be +# mirrored. # # @target: the target of the new image. If the file exists, or if it # is a device, the existing file/device will be used as the new @@ -1932,7 +1994,7 @@ # @mode: whether and how QEMU should create a new image, default is # 'absolute-paths'. # -# @speed: the maximum speed, in bytes per second +# @speed: the maximum speed, in bytes per second # # @sync: what parts of the disk image should be copied to the destination # (all the disk, only the sectors allocated in the topmost image, or @@ -1953,6 +2015,7 @@ # @on-target-error: the action to take on an error on the target, # default 'report' (no limitations, since this applies to # a different block device than @device). +# # @unmap: Whether to try to unmap target sectors where source has # only zero. If true, and target unallocated sectors will read as zero, # target image sectors will be unmapped; otherwise, zeroes will be @@ -1974,6 +2037,7 @@ # When true, this job will automatically disappear from the query # list without user intervention. # Defaults to true. (Since 3.1) +# # Since: 1.3 ## { 'struct': 'DriveMirror', @@ -2024,7 +2088,7 @@ '*persistent': 'bool', '*disabled': 'bool' } } ## -# @BlockDirtyBitmapMergeSource: +# @BlockDirtyBitmapOrStr: # # @local: name of the bitmap, attached to the same node as target bitmap. # @@ -2032,7 +2096,7 @@ # # Since: 4.1 ## -{ 'alternate': 'BlockDirtyBitmapMergeSource', +{ 'alternate': 'BlockDirtyBitmapOrStr', 'data': { 'local': 'str', 'external': 'BlockDirtyBitmap' } } @@ -2051,7 +2115,7 @@ ## { 'struct': 'BlockDirtyBitmapMerge', 'data': { 'node': 'str', 'target': 'str', - 'bitmaps': ['BlockDirtyBitmapMergeSource'] } } + 'bitmaps': ['BlockDirtyBitmapOrStr'] } } ## # @block-dirty-bitmap-add: @@ -2072,7 +2136,8 @@ # ## { 'command': 'block-dirty-bitmap-add', - 'data': 'BlockDirtyBitmapAdd' } + 'data': 'BlockDirtyBitmapAdd', + 'allow-preconfig': true } ## # @block-dirty-bitmap-remove: @@ -2096,7 +2161,8 @@ # ## { 'command': 'block-dirty-bitmap-remove', - 'data': 'BlockDirtyBitmap' } + 'data': 'BlockDirtyBitmap', + 'allow-preconfig': true } ## # @block-dirty-bitmap-clear: @@ -2119,7 +2185,8 @@ # ## { 'command': 'block-dirty-bitmap-clear', - 'data': 'BlockDirtyBitmap' } + 'data': 'BlockDirtyBitmap', + 'allow-preconfig': true } ## # @block-dirty-bitmap-enable: @@ -2140,7 +2207,8 @@ # ## { 'command': 'block-dirty-bitmap-enable', - 'data': 'BlockDirtyBitmap' } + 'data': 'BlockDirtyBitmap', + 'allow-preconfig': true } ## # @block-dirty-bitmap-disable: @@ -2161,7 +2229,8 @@ # ## { 'command': 'block-dirty-bitmap-disable', - 'data': 'BlockDirtyBitmap' } + 'data': 'BlockDirtyBitmap', + 'allow-preconfig': true } ## # @block-dirty-bitmap-merge: @@ -2193,7 +2262,8 @@ # ## { 'command': 'block-dirty-bitmap-merge', - 'data': 'BlockDirtyBitmapMerge' } + 'data': 'BlockDirtyBitmapMerge', + 'allow-preconfig': true } ## # @BlockDirtyBitmapSha256: @@ -2212,6 +2282,9 @@ # # Get bitmap SHA256. # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: - BlockDirtyBitmapSha256 on success # - If @node is not a valid block device, DeviceNotFound # - If @name is not found or if hashing has failed, GenericError with an @@ -2220,7 +2293,9 @@ # Since: 2.10 ## { 'command': 'x-debug-block-dirty-bitmap-sha256', - 'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256' } + 'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256', + 'features': [ 'unstable' ], + 'allow-preconfig': true } ## # @blockdev-mirror: @@ -2241,7 +2316,7 @@ # broken Quorum files. By default, @device is replaced, although # implicitly created filters on it are kept. # -# @speed: the maximum speed, in bytes per second +# @speed: the maximum speed, in bytes per second # # @sync: what parts of the disk image should be copied to the destination # (all the disk, only the sectors allocated in the topmost image, or @@ -2283,6 +2358,7 @@ # When true, this job will automatically disappear from the query # list without user intervention. # Defaults to true. (Since 3.1) +# # Returns: nothing on success. # # Since: 2.6 @@ -2305,7 +2381,8 @@ '*on-target-error': 'BlockdevOnError', '*filter-node-name': 'str', '*copy-mode': 'MirrorCopyMode', - '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } + '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, + 'allow-preconfig': true } ## # @BlockIOThrottle: @@ -2450,27 +2527,57 @@ # # Properties for throttle-group objects. # -# The options starting with x- are aliases for the same key without x- in -# the @limits object. As indicated by the x- prefix, this is not a stable -# interface and may be removed or changed incompatibly in the future. Use -# @limits for a supported stable interface. -# # @limits: limits to apply for this throttle group # +# Features: +# @unstable: All members starting with x- are aliases for the same key +# without x- in the @limits object. This is not a stable +# interface and may be removed or changed incompatibly in +# the future. Use @limits for a supported stable +# interface. +# # Since: 2.11 ## { 'struct': 'ThrottleGroupProperties', 'data': { '*limits': 'ThrottleLimits', - '*x-iops-total' : 'int', '*x-iops-total-max' : 'int', - '*x-iops-total-max-length' : 'int', '*x-iops-read' : 'int', - '*x-iops-read-max' : 'int', '*x-iops-read-max-length' : 'int', - '*x-iops-write' : 'int', '*x-iops-write-max' : 'int', - '*x-iops-write-max-length' : 'int', '*x-bps-total' : 'int', - '*x-bps-total-max' : 'int', '*x-bps-total-max-length' : 'int', - '*x-bps-read' : 'int', '*x-bps-read-max' : 'int', - '*x-bps-read-max-length' : 'int', '*x-bps-write' : 'int', - '*x-bps-write-max' : 'int', '*x-bps-write-max-length' : 'int', - '*x-iops-size' : 'int' } } + '*x-iops-total': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-total-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-total-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-size': { 'type': 'int', + 'features': [ 'unstable' ] } } } ## # @block-stream: @@ -2577,7 +2684,8 @@ '*base-node': 'str', '*backing-file': 'str', '*bottom': 'str', '*speed': 'int', '*on-error': 'BlockdevOnError', '*filter-node-name': 'str', - '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } + '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, + 'allow-preconfig': true } ## # @block-job-set-speed: @@ -2601,7 +2709,8 @@ # Since: 1.1 ## { 'command': 'block-job-set-speed', - 'data': { 'device': 'str', 'speed': 'int' } } + 'data': { 'device': 'str', 'speed': 'int' }, + 'allow-preconfig': true } ## # @block-job-cancel: @@ -2640,7 +2749,8 @@ # # Since: 1.1 ## -{ 'command': 'block-job-cancel', 'data': { 'device': 'str', '*force': 'bool' } } +{ 'command': 'block-job-cancel', 'data': { 'device': 'str', '*force': 'bool' }, + 'allow-preconfig': true } ## # @block-job-pause: @@ -2664,7 +2774,8 @@ # # Since: 1.3 ## -{ 'command': 'block-job-pause', 'data': { 'device': 'str' } } +{ 'command': 'block-job-pause', 'data': { 'device': 'str' }, + 'allow-preconfig': true } ## # @block-job-resume: @@ -2686,7 +2797,8 @@ # # Since: 1.3 ## -{ 'command': 'block-job-resume', 'data': { 'device': 'str' } } +{ 'command': 'block-job-resume', 'data': { 'device': 'str' }, + 'allow-preconfig': true } ## # @block-job-complete: @@ -2714,7 +2826,8 @@ # # Since: 1.3 ## -{ 'command': 'block-job-complete', 'data': { 'device': 'str' } } +{ 'command': 'block-job-complete', 'data': { 'device': 'str' }, + 'allow-preconfig': true } ## # @block-job-dismiss: @@ -2734,7 +2847,8 @@ # # Since: 2.12 ## -{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } } +{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' }, + 'allow-preconfig': true } ## # @block-job-finalize: @@ -2752,7 +2866,8 @@ # # Since: 2.12 ## -{ 'command': 'block-job-finalize', 'data': { 'id': 'str' } } +{ 'command': 'block-job-finalize', 'data': { 'id': 'str' }, + 'allow-preconfig': true } ## # @BlockdevDiscardOptions: @@ -2796,7 +2911,7 @@ ## { 'enum': 'BlockdevAioOptions', 'data': [ 'threads', 'native', - { 'name': 'io_uring', 'if': 'defined(CONFIG_LINUX_IO_URING)' } ] } + { 'name': 'io_uring', 'if': 'CONFIG_LINUX_IO_URING' } ] } ## # @BlockdevCacheOptions: @@ -2825,20 +2940,30 @@ # @blklogwrites: Since 3.0 # @blkreplay: Since 4.2 # @compress: Since 5.0 +# @copy-before-write: Since 6.2 +# @snapshot-access: Since 7.0 # # Since: 2.9 ## { 'enum': 'BlockdevDriver', 'data': [ 'blkdebug', 'blklogwrites', 'blkreplay', 'blkverify', 'bochs', - 'cloop', 'compress', 'copy-on-read', 'dmg', 'file', 'ftp', 'ftps', - 'gluster', - {'name': 'host_cdrom', 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' }, - {'name': 'host_device', 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' }, - 'http', 'https', 'iscsi', - 'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', 'parallels', - 'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd', - { 'name': 'replication', 'if': 'defined(CONFIG_REPLICATION)' }, - 'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] } + 'cloop', 'compress', 'copy-before-write', 'copy-on-read', 'dmg', + 'file', 'snapshot-access', 'ftp', 'ftps', 'gluster', + {'name': 'host_cdrom', 'if': 'HAVE_HOST_BLOCK_DEVICE' }, + {'name': 'host_device', 'if': 'HAVE_HOST_BLOCK_DEVICE' }, + 'http', 'https', + { 'name': 'io_uring', 'if': 'CONFIG_BLKIO' }, + 'iscsi', + 'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', + { 'name': 'nvme-io_uring', 'if': 'CONFIG_BLKIO' }, + 'parallels', 'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', + 'raw', 'rbd', + { 'name': 'replication', 'if': 'CONFIG_REPLICATION' }, + 'ssh', 'throttle', 'vdi', 'vhdx', + { 'name': 'virtio-blk-vfio-pci', 'if': 'CONFIG_BLKIO' }, + { 'name': 'virtio-blk-vhost-user', 'if': 'CONFIG_BLKIO' }, + { 'name': 'virtio-blk-vhost-vdpa', 'if': 'CONFIG_BLKIO' }, + 'vmdk', 'vpc', 'vvfat' ] } ## # @BlockdevOptionsFile: @@ -2850,6 +2975,12 @@ # for this device (default: none, forward the commands via SG_IO; # since 2.11) # @aio: AIO backend (default: threads) (since: 2.8) +# @aio-max-batch: maximum number of requests to batch together into a single +# submission in the AIO backend. The smallest value between +# this and the aio-max-batch value of the IOThread object is +# chosen. +# 0 means that the AIO backend will handle it automatically. +# (default: 0, since 6.2) # @locking: whether to enable file locking. If set to 'auto', only enable # when Open File Descriptor (OFD) locking API is available # (default: auto, since 2.10) @@ -2870,6 +3001,7 @@ # read-only when the last writer is detached. This # allows giving QEMU write permissions only on demand # when an operation actually needs write access. +# @unstable: Member x-check-cache-dropped is meant for debugging. # # Since: 2.9 ## @@ -2878,11 +3010,13 @@ '*pr-manager': 'str', '*locking': 'OnOffAuto', '*aio': 'BlockdevAioOptions', + '*aio-max-batch': 'int', '*drop-cache': {'type': 'bool', - 'if': 'defined(CONFIG_LINUX)'}, - '*x-check-cache-dropped': 'bool' }, + 'if': 'CONFIG_LINUX'}, + '*x-check-cache-dropped': { 'type': 'bool', + 'features': [ 'unstable' ] } }, 'features': [ { 'name': 'dynamic-auto-read-only', - 'if': 'defined(CONFIG_POSIX)' } ] } + 'if': 'CONFIG_POSIX' } ] } ## # @BlockdevOptionsNull: @@ -2967,7 +3101,6 @@ 'base': 'BlockdevOptionsGenericFormat', 'data': { '*key-secret': 'str' } } - ## # @BlockdevOptionsGenericCOWFormat: # @@ -3082,10 +3215,9 @@ 'base': 'BlockdevOptionsGenericCOWFormat', 'data': { '*encrypt': 'BlockdevQcowEncryption' } } - - ## # @BlockdevQcow2EncryptionFormat: +# # @aes: AES-CBC with plain64 initialization vectors # # Since: 2.10 @@ -3238,15 +3370,14 @@ ## # @BlockdevOptionsSsh: # -# @server: host address +# @server: host address # -# @path: path to the image on the host +# @path: path to the image on the host # -# @user: user as which to connect, defaults to current -# local user name +# @user: user as which to connect, defaults to current local user name # -# @host-key-check: Defines how and what to check the host key against -# (default: known_hosts) +# @host-key-check: Defines how and what to check the host key against +# (default: known_hosts) # # Since: 2.9 ## @@ -3256,7 +3387,6 @@ '*user': 'str', '*host-key-check': 'SshHostKeyCheck' } } - ## # @BlkdebugEvent: # @@ -3556,6 +3686,72 @@ '*debug': 'int', '*logfile': 'str' } } +## +# @BlockdevOptionsIoUring: +# +# Driver specific block device options for the io_uring backend. +# +# @filename: path to the image file +# +# Since: 7.2 +## +{ 'struct': 'BlockdevOptionsIoUring', + 'data': { 'filename': 'str' }, + 'if': 'CONFIG_BLKIO' } + +## +# @BlockdevOptionsNvmeIoUring: +# +# Driver specific block device options for the nvme-io_uring backend. +# +# @path: path to the NVMe namespace's character device (e.g. /dev/ng0n1). +# +# Since: 7.2 +## +{ 'struct': 'BlockdevOptionsNvmeIoUring', + 'data': { 'path': 'str' }, + 'if': 'CONFIG_BLKIO' } + +## +# @BlockdevOptionsVirtioBlkVfioPci: +# +# Driver specific block device options for the virtio-blk-vfio-pci backend. +# +# @path: path to the PCI device's sysfs directory (e.g. +# /sys/bus/pci/devices/0000:00:01.0). +# +# Since: 7.2 +## +{ 'struct': 'BlockdevOptionsVirtioBlkVfioPci', + 'data': { 'path': 'str' }, + 'if': 'CONFIG_BLKIO' } + +## +# @BlockdevOptionsVirtioBlkVhostUser: +# +# Driver specific block device options for the virtio-blk-vhost-user backend. +# +# @path: path to the vhost-user UNIX domain socket. +# +# Since: 7.2 +## +{ 'struct': 'BlockdevOptionsVirtioBlkVhostUser', + 'data': { 'path': 'str' }, + 'if': 'CONFIG_BLKIO' } + +## +# @BlockdevOptionsVirtioBlkVhostVdpa: +# +# Driver specific block device options for the virtio-blk-vhost-vdpa backend. +# +# @path: path to the vhost-vdpa character device. +# +# Since: 7.2 +## +{ 'struct': 'BlockdevOptionsVirtioBlkVhostVdpa', + 'data': { 'path': 'str' }, + 'if': 'CONFIG_BLKIO' } + ## # @IscsiTransport: # @@ -3620,7 +3816,6 @@ '*header-digest': 'IscsiHeaderDigest', '*timeout': 'int' } } - ## # @RbdAuthMode: # @@ -3774,7 +3969,7 @@ # Since: 2.9 ## { 'enum' : 'ReplicationMode', 'data' : [ 'primary', 'secondary' ], - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @BlockdevOptionsReplication: @@ -3793,7 +3988,7 @@ 'base': 'BlockdevOptionsGenericFormat', 'data': { 'mode': 'ReplicationMode', '*top-id': 'str' }, - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @NFSTransport: @@ -3979,6 +4174,8 @@ # # @tls-creds: TLS credentials ID # +# @tls-hostname: TLS hostname override for certificate validation (Since 7.0) +# # @x-dirty-bitmap: A metadata context name such as "qemu:dirty-bitmap:NAME" # or "qemu:allocation-depth" to query in place of the # traditional "base:allocation" block status (see @@ -3994,14 +4191,25 @@ # future requests before a successful reconnect will # immediately fail. Default 0 (Since 4.2) # +# @open-timeout: In seconds. If zero, the nbd driver tries the connection +# only once, and fails to open if the connection fails. +# If non-zero, the nbd driver will repeat connection attempts +# until successful or until @open-timeout seconds have elapsed. +# Default 0 (Since 7.0) +# +# Features: +# @unstable: Member @x-dirty-bitmap is experimental. +# # Since: 2.9 ## { 'struct': 'BlockdevOptionsNbd', 'data': { 'server': 'SocketAddress', '*export': 'str', '*tls-creds': 'str', - '*x-dirty-bitmap': 'str', - '*reconnect-delay': 'uint32' } } + '*tls-hostname': 'str', + '*x-dirty-bitmap': { 'type': 'str', 'features': [ 'unstable' ] }, + '*reconnect-delay': 'uint32', + '*open-timeout': 'uint32' } } ## # @BlockdevOptionsRaw: @@ -4025,6 +4233,7 @@ # @throttle-group: the name of the throttle-group object to use. It # must already exist. # @file: reference to or definition of the data source block device +# # Since: 2.11 ## { 'struct': 'BlockdevOptionsThrottle', @@ -4049,6 +4258,62 @@ 'base': 'BlockdevOptionsGenericFormat', 'data': { '*bottom': 'str' } } +## +# @OnCbwError: +# +# An enumeration of possible behaviors for copy-before-write operation +# failures. +# +# @break-guest-write: report the error to the guest. This way, the guest +# will not be able to overwrite areas that cannot be +# backed up, so the backup process remains valid. +# +# @break-snapshot: continue guest write. Doing so will make the provided +# snapshot state invalid and any backup or export +# process based on it will finally fail. +# +# Since: 7.1 +## +{ 'enum': 'OnCbwError', + 'data': [ 'break-guest-write', 'break-snapshot' ] } + +## +# @BlockdevOptionsCbw: +# +# Driver specific block device options for the copy-before-write driver, +# which does so called copy-before-write operations: when data is +# written to the filter, the filter first reads corresponding blocks +# from its file child and copies them to @target child. After successfully +# copying, the write request is propagated to file child. If copying +# fails, the original write request is failed too and no data is written +# to file child. +# +# @target: The target for copy-before-write operations. +# +# @bitmap: If specified, copy-before-write filter will do +# copy-before-write operations only for dirty regions of the +# bitmap. Bitmap size must be equal to length of file and +# target child of the filter. Note also, that bitmap is used +# only to initialize internal bitmap of the process, so further +# modifications (or removing) of specified bitmap doesn't +# influence the filter. (Since 7.0) +# +# @on-cbw-error: Behavior on failure of copy-before-write operation. +# Default is @break-guest-write. (Since 7.1) +# +# @cbw-timeout: Zero means no limit. Non-zero sets the timeout in seconds +# for copy-before-write operation. When a timeout occurs, +# the respective copy-before-write operation will fail, and +# the @on-cbw-error parameter will decide how this failure +# is handled. Default 0. (Since 7.1) +# +# Since: 6.2 +## +{ 'struct': 'BlockdevOptionsCbw', + 'base': 'BlockdevOptionsGenericFormat', + 'data': { 'target': 'BlockdevRef', '*bitmap': 'BlockDirtyBitmap', + '*on-cbw-error': 'OnCbwError', '*cbw-timeout': 'uint32' } } + ## # @BlockdevOptions: # @@ -4101,6 +4366,7 @@ 'bochs': 'BlockdevOptionsGenericFormat', 'cloop': 'BlockdevOptionsGenericFormat', 'compress': 'BlockdevOptionsGenericFormat', + 'copy-before-write':'BlockdevOptionsCbw', 'copy-on-read':'BlockdevOptionsCor', 'dmg': 'BlockdevOptionsGenericFormat', 'file': 'BlockdevOptionsFile', @@ -4108,11 +4374,13 @@ 'ftps': 'BlockdevOptionsCurlFtps', 'gluster': 'BlockdevOptionsGluster', 'host_cdrom': { 'type': 'BlockdevOptionsFile', - 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' }, + 'if': 'HAVE_HOST_BLOCK_DEVICE' }, 'host_device': { 'type': 'BlockdevOptionsFile', - 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' }, + 'if': 'HAVE_HOST_BLOCK_DEVICE' }, 'http': 'BlockdevOptionsCurlHttp', 'https': 'BlockdevOptionsCurlHttps', + 'io_uring': { 'type': 'BlockdevOptionsIoUring', + 'if': 'CONFIG_BLKIO' }, 'iscsi': 'BlockdevOptionsIscsi', 'luks': 'BlockdevOptionsLUKS', 'nbd': 'BlockdevOptionsNbd', @@ -4120,6 +4388,8 @@ 'null-aio': 'BlockdevOptionsNull', 'null-co': 'BlockdevOptionsNull', 'nvme': 'BlockdevOptionsNVMe', + 'nvme-io_uring': { 'type': 'BlockdevOptionsNvmeIoUring', + 'if': 'CONFIG_BLKIO' }, 'parallels': 'BlockdevOptionsGenericFormat', 'preallocate':'BlockdevOptionsPreallocate', 'qcow2': 'BlockdevOptionsQcow2', @@ -4129,11 +4399,21 @@ 'raw': 'BlockdevOptionsRaw', 'rbd': 'BlockdevOptionsRbd', 'replication': { 'type': 'BlockdevOptionsReplication', - 'if': 'defined(CONFIG_REPLICATION)' }, + 'if': 'CONFIG_REPLICATION' }, + 'snapshot-access': 'BlockdevOptionsGenericFormat', 'ssh': 'BlockdevOptionsSsh', 'throttle': 'BlockdevOptionsThrottle', 'vdi': 'BlockdevOptionsGenericFormat', 'vhdx': 'BlockdevOptionsGenericFormat', + 'virtio-blk-vfio-pci': + { 'type': 'BlockdevOptionsVirtioBlkVfioPci', + 'if': 'CONFIG_BLKIO' }, + 'virtio-blk-vhost-user': + { 'type': 'BlockdevOptionsVirtioBlkVhostUser', + 'if': 'CONFIG_BLKIO' }, + 'virtio-blk-vhost-vdpa': + { 'type': 'BlockdevOptionsVirtioBlkVhostVdpa', + 'if': 'CONFIG_BLKIO' }, 'vmdk': 'BlockdevOptionsGenericCOWFormat', 'vpc': 'BlockdevOptionsGenericFormat', 'vvfat': 'BlockdevOptionsVVFAT' @@ -4219,7 +4499,8 @@ # <- { "return": {} } # ## -{ 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true } +{ 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true, + 'allow-preconfig': true } ## # @blockdev-reopen: @@ -4263,7 +4544,8 @@ # Since: 6.1 ## { 'command': 'blockdev-reopen', - 'data': { 'options': ['BlockdevOptions'] } } + 'data': { 'options': ['BlockdevOptions'] }, + 'allow-preconfig': true } ## # @blockdev-del: @@ -4296,7 +4578,8 @@ # <- { "return": {} } # ## -{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' } } +{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' }, + 'allow-preconfig': true } ## # @BlockdevCreateOptionsFile: @@ -4307,8 +4590,8 @@ # @size: Size of the virtual disk in bytes # @preallocation: Preallocation mode for the new image (default: off; # allowed values: off, -# falloc (if defined CONFIG_POSIX_FALLOCATE), -# full (if defined CONFIG_POSIX)) +# falloc (if CONFIG_POSIX_FALLOCATE), +# full (if CONFIG_POSIX)) # @nocow: Turn off copy-on-write (valid only on btrfs; default: off) # @extent-size-hint: Extent size hint to add to the image file; 0 for not # adding an extent size hint (default: 1 MB, since 5.1) @@ -4331,8 +4614,8 @@ # @size: Size of the virtual disk in bytes # @preallocation: Preallocation mode for the new image (default: off; # allowed values: off, -# falloc (if defined CONFIG_GLUSTERFS_FALLOCATE), -# full (if defined CONFIG_GLUSTERFS_ZEROFILL)) +# falloc (if CONFIG_GLUSTERFS_FALLOCATE), +# full (if CONFIG_GLUSTERFS_ZEROFILL)) # # Since: 2.12 ## @@ -4412,15 +4695,14 @@ ## # @BlockdevQcow2Version: # -# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2) -# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3) +# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2) +# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3) # # Since: 2.12 ## { 'enum': 'BlockdevQcow2Version', 'data': [ 'v2', 'v3' ] } - ## # @Qcow2CompressionType: # @@ -4432,7 +4714,7 @@ # Since: 5.1 ## { 'enum': 'Qcow2CompressionType', - 'data': [ 'zlib', { 'name': 'zstd', 'if': 'defined(CONFIG_ZSTD)' } ] } + 'data': [ 'zlib', { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } ## # @BlockdevCreateOptionsQcow2: @@ -4527,18 +4809,18 @@ # # Subformat options for VMDK images # -# @monolithicSparse: Single file image with sparse cluster allocation +# @monolithicSparse: Single file image with sparse cluster allocation # -# @monolithicFlat: Single flat data image and a descriptor file +# @monolithicFlat: Single flat data image and a descriptor file # # @twoGbMaxExtentSparse: Data is split into 2GB (per virtual LBA) sparse extent # files, in addition to a descriptor file # -# @twoGbMaxExtentFlat: Data is split into 2GB (per virtual LBA) flat extent -# files, in addition to a descriptor file +# @twoGbMaxExtentFlat: Data is split into 2GB (per virtual LBA) flat extent +# files, in addition to a descriptor file # -# @streamOptimized: Single file image sparse cluster allocation, optimized -# for streaming over network. +# @streamOptimized: Single file image sparse cluster allocation, optimized +# for streaming over network. # # Since: 4.0 ## @@ -4576,6 +4858,8 @@ # @adapter-type: The adapter type used to fill in the descriptor. Default: ide. # @hwversion: Hardware version. The meaningful options are "4" or "6". # Default: "4". +# @toolsversion: VMware guest tools version. +# Default: "2147483647" (Since 6.2) # @zeroed-grain: Whether to enable zeroed-grain feature for sparse subformats. # Default: false. # @@ -4589,9 +4873,9 @@ '*backing-file': 'str', '*adapter-type': 'BlockdevVmdkAdapterType', '*hwversion': 'str', + '*toolsversion': 'str', '*zeroed-grain': 'bool' } } - ## # @BlockdevCreateOptionsSsh: # @@ -4627,7 +4911,7 @@ # @BlockdevVhdxSubformat: # # @dynamic: Growing image file -# @fixed: Preallocated fixed-size image file +# @fixed: Preallocated fixed-size image file # # Since: 2.12 ## @@ -4665,7 +4949,7 @@ # @BlockdevVpcSubformat: # # @dynamic: Growing image file -# @fixed: Preallocated fixed-size image file +# @fixed: Preallocated fixed-size image file # # Since: 2.12 ## @@ -4728,15 +5012,16 @@ # Starts a job to create an image format on a given node. The job is # automatically finalized, but a manual job-dismiss is required. # -# @job-id: Identifier for the newly created job. +# @job-id: Identifier for the newly created job. # -# @options: Options for the image creation. +# @options: Options for the image creation. # # Since: 3.0 ## { 'command': 'blockdev-create', 'data': { 'job-id': 'str', - 'options': 'BlockdevCreateOptions' } } + 'options': 'BlockdevCreateOptions' }, + 'allow-preconfig': true } ## # @BlockdevAmendOptionsLUKS: @@ -4768,7 +5053,7 @@ # # Options for amending an image format # -# @driver: Block driver of the node to amend. +# @driver: Block driver of the node to amend. # # Since: 5.1 ## @@ -4786,17 +5071,20 @@ # Starts a job to amend format specific options of an existing open block device # The job is automatically finalized, but a manual job-dismiss is required. # -# @job-id: Identifier for the newly created job. +# @job-id: Identifier for the newly created job. # -# @node-name: Name of the block node to work on +# @node-name: Name of the block node to work on # -# @options: Options (driver specific) +# @options: Options (driver specific) # -# @force: Allow unsafe operations, format specific -# For luks that allows erase of the last active keyslot -# (permanent loss of data), -# and replacement of an active keyslot -# (possible loss of data if IO error happens) +# @force: Allow unsafe operations, format specific +# For luks that allows erase of the last active keyslot +# (permanent loss of data), +# and replacement of an active keyslot +# (possible loss of data if IO error happens) +# +# Features: +# @unstable: This command is experimental. # # Since: 5.1 ## @@ -4804,7 +5092,9 @@ 'data': { 'job-id': 'str', 'node-name': 'str', 'options': 'BlockdevAmendOptions', - '*force': 'bool' } } + '*force': 'bool' }, + 'features': [ 'unstable' ], + 'allow-preconfig': true } ## # @BlockErrorAction: @@ -4822,7 +5112,6 @@ { 'enum': 'BlockErrorAction', 'data': [ 'ignore', 'report', 'stop' ] } - ## # @BLOCK_IMAGE_CORRUPTED: # @@ -4857,10 +5146,9 @@ # Example: # # <- { "event": "BLOCK_IMAGE_CORRUPTED", -# "data": { "device": "ide0-hd0", "node-name": "node0", -# "msg": "Prevented active L1 table overwrite", "offset": 196608, -# "size": 65536 }, -# "timestamp": { "seconds": 1378126126, "microseconds": 966463 } } +# "data": { "device": "", "node-name": "drive", "fatal": false, +# "msg": "L2 table offset 0x2a2a2a00 unaligned (L1 index: 0)" }, +# "timestamp": { "seconds": 1648243240, "microseconds": 906060 } } # # Since: 1.7 ## @@ -4910,7 +5198,8 @@ # "data": { "device": "ide0-hd1", # "node-name": "#block212", # "operation": "write", -# "action": "stop" }, +# "action": "stop", +# "reason": "No space left on device" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } # ## @@ -5050,7 +5339,7 @@ # # <- { "event": "BLOCK_JOB_READY", # "data": { "device": "drive0", "type": "mirror", "speed": 0, -# "len": 2097152, "offset": 2097152 } +# "len": 2097152, "offset": 2097152 }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } # ## @@ -5076,8 +5365,8 @@ # # Example: # -# <- { "event": "BLOCK_JOB_WAITING", -# "data": { "device": "drive0", "type": "mirror" }, +# <- { "event": "BLOCK_JOB_PENDING", +# "data": { "type": "mirror", "id": "backup_1" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } # ## @@ -5155,7 +5444,8 @@ # ## { 'command': 'block-set-write-threshold', - 'data': { 'node-name': 'str', 'write-threshold': 'uint64' } } + 'data': { 'node-name': 'str', 'write-threshold': 'uint64' }, + 'allow-preconfig': true } ## # @x-blockdev-change: @@ -5175,16 +5465,18 @@ # # @node: the name of the node that will be added. # -# Note: this command is experimental, and its API is not stable. It -# does not support all kinds of operations, all kinds of children, nor -# all block drivers. +# Features: +# @unstable: This command is experimental, and its API is not stable. It +# does not support all kinds of operations, all kinds of +# children, nor all block drivers. # -# FIXME Removing children from a quorum node means introducing gaps in the -# child indices. This cannot be represented in the 'children' list of -# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename(). +# FIXME Removing children from a quorum node means introducing +# gaps in the child indices. This cannot be represented in the +# 'children' list of BlockdevOptionsQuorum, as returned by +# .bdrv_refresh_filename(). # -# Warning: The data in a new quorum child MUST be consistent with that of -# the rest of the array. +# Warning: The data in a new quorum child MUST be consistent +# with that of the rest of the array. # # Since: 2.7 # @@ -5213,7 +5505,9 @@ { 'command': 'x-blockdev-change', 'data' : { 'parent': 'str', '*child': 'str', - '*node': 'str' } } + '*node': 'str' }, + 'features': [ 'unstable' ], + 'allow-preconfig': true } ## # @x-blockdev-set-iothread: @@ -5230,8 +5524,9 @@ # @force: true if the node and its children should be moved when a BlockBackend # is already attached # -# Note: this command is experimental and intended for test cases that need -# control over IOThreads only. +# Features: +# @unstable: This command is experimental and intended for test cases that +# need control over IOThreads only. # # Since: 2.12 # @@ -5253,7 +5548,9 @@ { 'command': 'x-blockdev-set-iothread', 'data' : { 'node-name': 'str', 'iothread': 'StrOrNull', - '*force': 'bool' } } + '*force': 'bool' }, + 'features': [ 'unstable' ], + 'allow-preconfig': true } ## # @QuorumOpType: @@ -5385,7 +5682,8 @@ # ## { 'command': 'blockdev-snapshot-internal-sync', - 'data': 'BlockdevSnapshotInternal' } + 'data': 'BlockdevSnapshotInternal', + 'allow-preconfig': true } ## # @blockdev-snapshot-delete-internal-sync: @@ -5432,4 +5730,5 @@ ## { 'command': 'blockdev-snapshot-delete-internal-sync', 'data': { 'device': 'str', '*id': 'str', '*name': 'str'}, - 'returns': 'SnapshotInfo' } + 'returns': 'SnapshotInfo', + 'allow-preconfig': true } diff --git a/qapi/block-export.json b/qapi/block-export.json index 0ed63442a8..4627bbc4e6 100644 --- a/qapi/block-export.json +++ b/qapi/block-export.json @@ -6,6 +6,7 @@ ## { 'include': 'sockets.json' } +{ 'include': 'block-core.json' } ## # @NbdServerOptions: @@ -21,7 +22,9 @@ # recreated on the fly while the NBD server is active. # If missing, it will default to denying access (since 4.0). # @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. (since 5.2; default: 0) +# time, 0 for unlimited. Setting this to 1 also stops +# the server from advertising multiple client support +# (since 5.2; default: 0) # # Since: 4.2 ## @@ -50,7 +53,9 @@ # recreated on the fly while the NBD server is active. # If missing, it will default to denying access (since 4.0). # @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. (since 5.2; default: 0) +# time, 0 for unlimited. Setting this to 1 also stops +# the server from advertising multiple client support +# (since 5.2; default: 0). # # Returns: error if the server is already running. # @@ -60,7 +65,8 @@ 'data': { 'addr': 'SocketAddressLegacy', '*tls-creds': 'str', '*tls-authz': 'str', - '*max-connections': 'uint32' } } + '*max-connections': 'uint32' }, + 'allow-preconfig': true } ## # @BlockExportOptionsNbdBase: @@ -89,6 +95,7 @@ # @device, so the NBD client can use NBD_OPT_SET_META_CONTEXT with # the metadata context name "qemu:dirty-bitmap:BITMAP" to inspect # each bitmap. +# Since 7.1 bitmap may be specified by node/name pair. # # @allocation-depth: Also export the allocation depth map for @device, so # the NBD client can use NBD_OPT_SET_META_CONTEXT with @@ -99,7 +106,8 @@ ## { 'struct': 'BlockExportOptionsNbd', 'base': 'BlockExportOptionsNbdBase', - 'data': { '*bitmaps': ['str'], '*allocation-depth': 'bool' } } + 'data': { '*bitmaps': ['BlockDirtyBitmapOrStr'], + '*allocation-depth': 'bool' } } ## # @BlockExportOptionsVhostUserBlk: @@ -168,7 +176,28 @@ 'data': { 'mountpoint': 'str', '*growable': 'bool', '*allow-other': 'FuseExportAllowOther' }, - 'if': 'defined(CONFIG_FUSE)' } + 'if': 'CONFIG_FUSE' } + +## +# @BlockExportOptionsVduseBlk: +# +# A vduse-blk block export. +# +# @name: the name of VDUSE device (must be unique across the host). +# @num-queues: the number of virtqueues. Defaults to 1. +# @queue-size: the size of virtqueue. Defaults to 256. +# @logical-block-size: Logical block size in bytes. Range [512, PAGE_SIZE] +# and must be power of 2. Defaults to 512 bytes. +# @serial: the serial number of virtio block device. Defaults to empty string. +# +# Since: 7.1 +## +{ 'struct': 'BlockExportOptionsVduseBlk', + 'data': { 'name': 'str', + '*num-queues': 'uint16', + '*queue-size': 'uint16', + '*logical-block-size': 'size', + '*serial': 'str' } } ## # @NbdServerAddOptions: @@ -208,7 +237,8 @@ # Since: 1.3 ## { 'command': 'nbd-server-add', - 'data': 'NbdServerAddOptions', 'boxed': true, 'features': ['deprecated'] } + 'data': 'NbdServerAddOptions', 'boxed': true, 'features': ['deprecated'], + 'allow-preconfig': true } ## # @BlockExportRemoveMode: @@ -219,13 +249,13 @@ # # @hard: Drop all connections immediately and remove export. # -# Potential additional modes to be added in the future: +# TODO: Potential additional modes to be added in the future: # -# hide: Just hide export from new clients, leave existing connections as is. -# Remove export after all clients are disconnected. +# hide: Just hide export from new clients, leave existing connections as is. +# Remove export after all clients are disconnected. # -# soft: Hide export from new clients, answer with ESHUTDOWN for all further -# requests from existing clients. +# soft: Hide export from new clients, answer with ESHUTDOWN for all further +# requests from existing clients. # # Since: 2.12 ## @@ -253,7 +283,8 @@ ## { 'command': 'nbd-server-remove', 'data': {'name': 'str', '*mode': 'BlockExportRemoveMode'}, - 'features': ['deprecated'] } + 'features': ['deprecated'], + 'allow-preconfig': true } ## # @nbd-server-stop: @@ -263,7 +294,8 @@ # # Since: 1.3 ## -{ 'command': 'nbd-server-stop' } +{ 'command': 'nbd-server-stop', + 'allow-preconfig': true } ## # @BlockExportType: @@ -273,12 +305,16 @@ # @nbd: NBD export # @vhost-user-blk: vhost-user-blk export (since 5.2) # @fuse: FUSE export (since: 6.0) +# @vduse-blk: vduse-blk export (since 7.1) # # Since: 4.2 ## { 'enum': 'BlockExportType', - 'data': [ 'nbd', 'vhost-user-blk', - { 'name': 'fuse', 'if': 'defined(CONFIG_FUSE)' } ] } + 'data': [ 'nbd', + { 'name': 'vhost-user-blk', + 'if': 'CONFIG_VHOST_USER_BLK_SERVER' }, + { 'name': 'fuse', 'if': 'CONFIG_FUSE' }, + { 'name': 'vduse-blk', 'if': 'CONFIG_VDUSE_BLK_EXPORT' } ] } ## # @BlockExportOptions: @@ -319,9 +355,12 @@ 'discriminator': 'type', 'data': { 'nbd': 'BlockExportOptionsNbd', - 'vhost-user-blk': 'BlockExportOptionsVhostUserBlk', + 'vhost-user-blk': { 'type': 'BlockExportOptionsVhostUserBlk', + 'if': 'CONFIG_VHOST_USER_BLK_SERVER' }, 'fuse': { 'type': 'BlockExportOptionsFuse', - 'if': 'defined(CONFIG_FUSE)' } + 'if': 'CONFIG_FUSE' }, + 'vduse-blk': { 'type': 'BlockExportOptionsVduseBlk', + 'if': 'CONFIG_VDUSE_BLK_EXPORT' } } } ## @@ -332,7 +371,8 @@ # Since: 5.2 ## { 'command': 'block-export-add', - 'data': 'BlockExportOptions', 'boxed': true } + 'data': 'BlockExportOptions', 'boxed': true, + 'allow-preconfig': true } ## # @block-export-del: @@ -352,7 +392,8 @@ # Since: 5.2 ## { 'command': 'block-export-del', - 'data': { 'id': 'str', '*mode': 'BlockExportRemoveMode' } } + 'data': { 'id': 'str', '*mode': 'BlockExportRemoveMode' }, + 'allow-preconfig': true } ## # @BLOCK_EXPORT_DELETED: @@ -381,7 +422,7 @@ # block-export-del command, but before the shutdown has # completed) # -# Since: 5.2 +# Since: 5.2 ## { 'struct': 'BlockExportInfo', 'data': { 'id': 'str', @@ -396,4 +437,5 @@ # # Since: 5.2 ## -{ 'command': 'query-block-exports', 'returns': ['BlockExportInfo'] } +{ 'command': 'query-block-exports', 'returns': ['BlockExportInfo'], + 'allow-preconfig': true } diff --git a/qapi/block.json b/qapi/block.json index 82fcf2c914..5fe068f903 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -50,9 +50,9 @@ # # Type of Floppy drive to be emulated by the Floppy Disk Controller. # -# @144: 1.44MB 3.5" drive -# @288: 2.88MB 3.5" drive -# @120: 1.2MB 5.25" drive +# @144: 1.44MB 3.5" drive +# @288: 2.88MB 3.5" drive +# @120: 1.2MB 5.25" drive # @none: No drive connected # @auto: Automatically determined by inserted media at boot # @@ -105,7 +105,8 @@ # # Returns: - Nothing on success # - If @device is not a valid block device, DeviceNotFound -# Notes: Ejecting a device with no media results in success +# +# Notes: Ejecting a device with no media results in success # # Since: 0.14 # @@ -285,7 +286,6 @@ 'data': { 'id': 'str', 'node-name': 'str'} } - ## # @BlockdevChangeReadOnlyMode: # @@ -299,12 +299,10 @@ # @read-write: Makes the device writable # # Since: 2.3 -# ## { 'enum': 'BlockdevChangeReadOnlyMode', 'data': ['retain', 'read-only', 'read-write'] } - ## # @blockdev-change-medium: # @@ -326,6 +324,11 @@ # @read-only-mode: change the read-only mode of the device; defaults # to 'retain' # +# @force: if false (the default), an eject request through blockdev-open-tray +# will be sent to the guest if it has locked the tray (and the tray +# will not be opened immediately); if true, the tray will be opened +# regardless of whether it is locked. (since 7.1) +# # Features: # @deprecated: Member @device is deprecated. Use @id instead. # @@ -367,9 +370,9 @@ '*id': 'str', 'filename': 'str', '*format': 'str', + '*force': 'bool', '*read-only-mode': 'BlockdevChangeReadOnlyMode' } } - ## # @DEVICE_TRAY_MOVED: # @@ -493,7 +496,8 @@ # <- { "return": {} } ## { 'command': 'block_set_io_throttle', 'boxed': true, - 'data': 'BlockIOThrottle' } + 'data': 'BlockIOThrottle', + 'allow-preconfig': true } ## # @block-latency-histogram-set: @@ -569,4 +573,5 @@ '*boundaries': ['uint64'], '*boundaries-read': ['uint64'], '*boundaries-write': ['uint64'], - '*boundaries-flush': ['uint64'] } } + '*boundaries-flush': ['uint64'] }, + 'allow-preconfig': true } diff --git a/qapi/char.json b/qapi/char.json index adf2685f68..923dc5056d 100644 --- a/qapi/char.json +++ b/qapi/char.json @@ -216,7 +216,7 @@ # # Configuration info for file chardevs. # -# @in: The name of the input file +# @in: The name of the input file # @out: The name of the output file # @append: Open the file in append mode (default false to # truncate) (Since 2.6) @@ -329,7 +329,6 @@ 'data': { '*signal': 'bool' }, 'base': 'ChardevCommon' } - ## # @ChardevSpiceChannel: # @@ -342,7 +341,7 @@ { 'struct': 'ChardevSpiceChannel', 'data': { 'type': 'str' }, 'base': 'ChardevCommon', - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @ChardevSpicePort: @@ -356,17 +355,31 @@ { 'struct': 'ChardevSpicePort', 'data': { 'fqdn': 'str' }, 'base': 'ChardevCommon', - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } + +## +# @ChardevDBus: +# +# Configuration info for DBus chardevs. +# +# @name: name of the channel (following docs/spice-port-fqdn.txt) +# +# Since: 7.0 +## +{ 'struct': 'ChardevDBus', + 'data': { 'name': 'str' }, + 'base': 'ChardevCommon', + 'if': 'CONFIG_DBUS_DISPLAY' } ## # @ChardevVC: # # Configuration info for virtual console chardevs. # -# @width: console width, in pixels +# @width: console width, in pixels # @height: console height, in pixels -# @cols: console width, in chars -# @rows: console height, in chars +# @cols: console width, in chars +# @rows: console height, in chars # # Since: 1.5 ## @@ -399,47 +412,205 @@ # @clipboard: enable/disable clipboard, default is disabled. # # Since: 6.1 -# ## { 'struct': 'ChardevQemuVDAgent', 'data': { '*mouse': 'bool', '*clipboard': 'bool' }, 'base': 'ChardevCommon', - 'if': 'defined(CONFIG_SPICE_PROTOCOL)' } + 'if': 'CONFIG_SPICE_PROTOCOL' } + +## +# @ChardevBackendKind: +# +# @pipe: Since 1.5 +# @udp: Since 1.5 +# @mux: Since 1.5 +# @msmouse: Since 1.5 +# @wctablet: Since 2.9 +# @braille: Since 1.5 +# @testdev: Since 2.2 +# @stdio: Since 1.5 +# @console: Since 1.5 +# @spicevmc: Since 1.5 +# @spiceport: Since 1.5 +# @qemu-vdagent: Since 6.1 +# @dbus: Since 7.0 +# @vc: v1.5 +# @ringbuf: Since 1.6 +# @memory: Since 1.5 +# +# Since: 1.4 +## +{ 'enum': 'ChardevBackendKind', + 'data': [ 'file', + 'serial', + 'parallel', + 'pipe', + 'socket', + 'udp', + 'pty', + 'null', + 'mux', + 'msmouse', + 'wctablet', + 'braille', + 'testdev', + 'stdio', + 'console', + { 'name': 'spicevmc', 'if': 'CONFIG_SPICE' }, + { 'name': 'spiceport', 'if': 'CONFIG_SPICE' }, + { 'name': 'qemu-vdagent', 'if': 'CONFIG_SPICE_PROTOCOL' }, + { 'name': 'dbus', 'if': 'CONFIG_DBUS_DISPLAY' }, + 'vc', + 'ringbuf', + # next one is just for compatibility + 'memory' ] } + +## +# @ChardevFileWrapper: +# +# Since: 1.4 +## +{ 'struct': 'ChardevFileWrapper', + 'data': { 'data': 'ChardevFile' } } + +## +# @ChardevHostdevWrapper: +# +# Since: 1.4 +## +{ 'struct': 'ChardevHostdevWrapper', + 'data': { 'data': 'ChardevHostdev' } } + +## +# @ChardevSocketWrapper: +# +# Since: 1.4 +## +{ 'struct': 'ChardevSocketWrapper', + 'data': { 'data': 'ChardevSocket' } } + +## +# @ChardevUdpWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevUdpWrapper', + 'data': { 'data': 'ChardevUdp' } } + +## +# @ChardevCommonWrapper: +# +# Since: 2.6 +## +{ 'struct': 'ChardevCommonWrapper', + 'data': { 'data': 'ChardevCommon' } } + +## +# @ChardevMuxWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevMuxWrapper', + 'data': { 'data': 'ChardevMux' } } + +## +# @ChardevStdioWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevStdioWrapper', + 'data': { 'data': 'ChardevStdio' } } + +## +# @ChardevSpiceChannelWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevSpiceChannelWrapper', + 'data': { 'data': 'ChardevSpiceChannel' }, + 'if': 'CONFIG_SPICE' } + +## +# @ChardevSpicePortWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevSpicePortWrapper', + 'data': { 'data': 'ChardevSpicePort' }, + 'if': 'CONFIG_SPICE' } + +## +# @ChardevQemuVDAgentWrapper: +# +# Since: 6.1 +## +{ 'struct': 'ChardevQemuVDAgentWrapper', + 'data': { 'data': 'ChardevQemuVDAgent' }, + 'if': 'CONFIG_SPICE_PROTOCOL' } + +## +# @ChardevDBusWrapper: +# +# Since: 7.0 +## +{ 'struct': 'ChardevDBusWrapper', + 'data': { 'data': 'ChardevDBus' }, + 'if': 'CONFIG_DBUS_DISPLAY' } + +## +# @ChardevVCWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevVCWrapper', + 'data': { 'data': 'ChardevVC' } } + +## +# @ChardevRingbufWrapper: +# +# Since: 1.5 +## +{ 'struct': 'ChardevRingbufWrapper', + 'data': { 'data': 'ChardevRingbuf' } } ## # @ChardevBackend: # # Configuration info for the new chardev backend. # -# Since: 1.4 (testdev since 2.2, wctablet since 2.9, vdagent since 6.1) +# Since: 1.4 ## { 'union': 'ChardevBackend', - 'data': { 'file': 'ChardevFile', - 'serial': 'ChardevHostdev', - 'parallel': 'ChardevHostdev', - 'pipe': 'ChardevHostdev', - 'socket': 'ChardevSocket', - 'udp': 'ChardevUdp', - 'pty': 'ChardevCommon', - 'null': 'ChardevCommon', - 'mux': 'ChardevMux', - 'msmouse': 'ChardevCommon', - 'wctablet': 'ChardevCommon', - 'braille': 'ChardevCommon', - 'testdev': 'ChardevCommon', - 'stdio': 'ChardevStdio', - 'console': 'ChardevCommon', - 'spicevmc': { 'type': 'ChardevSpiceChannel', - 'if': 'defined(CONFIG_SPICE)' }, - 'spiceport': { 'type': 'ChardevSpicePort', - 'if': 'defined(CONFIG_SPICE)' }, - 'qemu-vdagent': { 'type': 'ChardevQemuVDAgent', - 'if': 'defined(CONFIG_SPICE_PROTOCOL)' }, - 'vc': 'ChardevVC', - 'ringbuf': 'ChardevRingbuf', + 'base': { 'type': 'ChardevBackendKind' }, + 'discriminator': 'type', + 'data': { 'file': 'ChardevFileWrapper', + 'serial': 'ChardevHostdevWrapper', + 'parallel': 'ChardevHostdevWrapper', + 'pipe': 'ChardevHostdevWrapper', + 'socket': 'ChardevSocketWrapper', + 'udp': 'ChardevUdpWrapper', + 'pty': 'ChardevCommonWrapper', + 'null': 'ChardevCommonWrapper', + 'mux': 'ChardevMuxWrapper', + 'msmouse': 'ChardevCommonWrapper', + 'wctablet': 'ChardevCommonWrapper', + 'braille': 'ChardevCommonWrapper', + 'testdev': 'ChardevCommonWrapper', + 'stdio': 'ChardevStdioWrapper', + 'console': 'ChardevCommonWrapper', + 'spicevmc': { 'type': 'ChardevSpiceChannelWrapper', + 'if': 'CONFIG_SPICE' }, + 'spiceport': { 'type': 'ChardevSpicePortWrapper', + 'if': 'CONFIG_SPICE' }, + 'qemu-vdagent': { 'type': 'ChardevQemuVDAgentWrapper', + 'if': 'CONFIG_SPICE_PROTOCOL' }, + 'dbus': { 'type': 'ChardevDBusWrapper', + 'if': 'CONFIG_DBUS_DISPLAY' }, + 'vc': 'ChardevVCWrapper', + 'ringbuf': 'ChardevRingbufWrapper', # next one is just for compatibility - 'memory': 'ChardevRingbuf' } } + 'memory': 'ChardevRingbufWrapper' } } ## # @ChardevReturn: diff --git a/qapi/common.json b/qapi/common.json index 7c976296f0..356db3f670 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -192,8 +192,17 @@ # Keys to toggle input-linux between host and guest. # # Since: 4.0 -# ## { 'enum': 'GrabToggleKeys', 'data': [ 'ctrl-ctrl', 'alt-alt', 'shift-shift','meta-meta', 'scrolllock', 'ctrl-scrolllock' ] } + +## +# @HumanReadableText: +# +# @human-readable-text: Formatted output intended for humans. +# +# Since: 6.2 +## +{ 'struct': 'HumanReadableText', + 'data': { 'human-readable-text': 'str' } } diff --git a/qapi/compat.json b/qapi/compat.json index ae3afc22df..39b52872d5 100644 --- a/qapi/compat.json +++ b/qapi/compat.json @@ -1,4 +1,5 @@ # -*- Mode: Python -*- +# vim: filetype=python ## # = Compatibility policy @@ -40,13 +41,22 @@ # # Limitation: covers only syntactic aspects of QMP, i.e. stuff tagged # with feature 'deprecated'. We may want to extend it to cover -# semantic aspects, CLI, and experimental features. +# semantic aspects and CLI. +# +# Limitation: deprecated-output policy @hide is not implemented for +# enumeration values. They behave the same as with policy @accept. # # @deprecated-input: how to handle deprecated input (default 'accept') # @deprecated-output: how to handle deprecated output (default 'accept') +# @unstable-input: how to handle unstable input (default 'accept') +# (since 6.2) +# @unstable-output: how to handle unstable output (default 'accept') +# (since 6.2) # # Since: 6.0 ## { 'struct': 'CompatPolicy', 'data': { '*deprecated-input': 'CompatPolicyInput', - '*deprecated-output': 'CompatPolicyOutput' } } + '*deprecated-output': 'CompatPolicyOutput', + '*unstable-input': 'CompatPolicyInput', + '*unstable-output': 'CompatPolicyOutput' } } diff --git a/qapi/control.json b/qapi/control.json index 71a838d49e..afca2043af 100644 --- a/qapi/control.json +++ b/qapi/control.json @@ -33,7 +33,6 @@ # all the QMP capabilities will be turned off by default. # # Since: 0.13 -# ## { 'command': 'qmp_capabilities', 'data': { '*enable': [ 'QMPCapability' ] }, @@ -49,7 +48,6 @@ # (Please refer to qmp-spec.txt for more information on OOB) # # Since: 2.12 -# ## { 'enum': 'QMPCapability', 'data': [ 'oob' ] } @@ -70,7 +68,6 @@ { 'struct': 'VersionTriple', 'data': {'major': 'int', 'minor': 'int', 'micro': 'int'} } - ## # @VersionInfo: # @@ -195,14 +192,14 @@ # # Options to be used for adding a new monitor. # -# @id: Name of the monitor +# @id: Name of the monitor # -# @mode: Selects the monitor mode (default: readline in the system -# emulator, control in qemu-storage-daemon) +# @mode: Selects the monitor mode (default: readline in the system +# emulator, control in qemu-storage-daemon) # -# @pretty: Enables pretty printing (QMP only) +# @pretty: Enables pretty printing (QMP only) # -# @chardev: Name of a character device to expose the monitor on +# @chardev: Name of a character device to expose the monitor on # # Since: 5.0 ## diff --git a/qapi/crypto.json b/qapi/crypto.json index 1ec54c15ca..653e6e3f3d 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -24,7 +24,6 @@ 'prefix': 'QCRYPTO_TLS_CREDS_ENDPOINT', 'data': ['client', 'server']} - ## # @QCryptoSecretFormat: # @@ -32,13 +31,13 @@ # # @raw: raw bytes. When encoded in JSON only valid UTF-8 sequences can be used # @base64: arbitrary base64 encoded binary data +# # Since: 2.6 ## { 'enum': 'QCryptoSecretFormat', 'prefix': 'QCRYPTO_SECRET_FORMAT', 'data': ['raw', 'base64']} - ## # @QCryptoHashAlgorithm: # @@ -51,13 +50,13 @@ # @sha384: SHA-384. (since 2.7) # @sha512: SHA-512. (since 2.7) # @ripemd160: RIPEMD-160. (since 2.7) +# # Since: 2.6 ## { 'enum': 'QCryptoHashAlgorithm', 'prefix': 'QCRYPTO_HASH_ALG', 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160']} - ## # @QCryptoCipherAlgorithm: # @@ -75,6 +74,7 @@ # @twofish-128: Twofish with 128 bit / 16 byte keys # @twofish-192: Twofish with 192 bit / 24 byte keys # @twofish-256: Twofish with 256 bit / 32 byte keys +# # Since: 2.6 ## { 'enum': 'QCryptoCipherAlgorithm', @@ -85,7 +85,6 @@ 'serpent-128', 'serpent-192', 'serpent-256', 'twofish-128', 'twofish-192', 'twofish-256']} - ## # @QCryptoCipherMode: # @@ -95,13 +94,13 @@ # @cbc: Cipher Block Chaining # @xts: XEX with tweaked code book and ciphertext stealing # @ctr: Counter (Since 2.8) +# # Since: 2.6 ## { 'enum': 'QCryptoCipherMode', 'prefix': 'QCRYPTO_CIPHER_MODE', 'data': ['ecb', 'cbc', 'xts', 'ctr']} - ## # @QCryptoIVGenAlgorithm: # @@ -114,6 +113,7 @@ # @plain: 64-bit sector number truncated to 32-bits # @plain64: 64-bit sector number # @essiv: 64-bit sector number encrypted with a hash of the encryption key +# # Since: 2.6 ## { 'enum': 'QCryptoIVGenAlgorithm', @@ -170,12 +170,12 @@ # @key-secret: the ID of a QCryptoSecret object providing the # decryption key. Mandatory except when probing image for # metadata only. +# # Since: 2.6 ## { 'struct': 'QCryptoBlockOptionsLUKS', 'data': { '*key-secret': 'str' }} - ## # @QCryptoBlockCreateOptionsLUKS: # @@ -194,6 +194,7 @@ # @iter-time: number of milliseconds to spend in # PBKDF passphrase processing. Currently defaults # to 2000. (since 2.8) +# # Since: 2.6 ## { 'struct': 'QCryptoBlockCreateOptionsLUKS', @@ -205,7 +206,6 @@ '*hash-alg': 'QCryptoHashAlgorithm', '*iter-time': 'int'}} - ## # @QCryptoBlockOpenOptions: # @@ -220,7 +220,6 @@ 'data': { 'qcow': 'QCryptoBlockOptionsQCow', 'luks': 'QCryptoBlockOptionsLUKS' } } - ## # @QCryptoBlockCreateOptions: # @@ -235,7 +234,6 @@ 'data': { 'qcow': 'QCryptoBlockOptionsQCow', 'luks': 'QCryptoBlockCreateOptionsLUKS' } } - ## # @QCryptoBlockInfoBase: # @@ -249,7 +247,6 @@ { 'struct': 'QCryptoBlockInfoBase', 'data': { 'format': 'QCryptoBlockFormat' }} - ## # @QCryptoBlockInfoLUKSSlot: # @@ -269,7 +266,6 @@ '*stripes': 'int', 'key-offset': 'int' } } - ## # @QCryptoBlockInfoLUKS: # @@ -315,15 +311,14 @@ # # Defines state of keyslots that are affected by the update # -# @active: The slots contain the given password and marked as active -# @inactive: The slots are erased (contain garbage) and marked as inactive +# @active: The slots contain the given password and marked as active +# @inactive: The slots are erased (contain garbage) and marked as inactive # # Since: 5.1 ## { 'enum': 'QCryptoBlockLUKSKeyslotState', 'data': [ 'active', 'inactive' ] } - ## # @QCryptoBlockAmendOptionsLUKS: # @@ -332,33 +327,32 @@ # # @state: the desired state of the keyslots # -# @new-secret: The ID of a QCryptoSecret object providing the password to be -# written into added active keyslots +# @new-secret: The ID of a QCryptoSecret object providing the password to be +# written into added active keyslots # -# @old-secret: Optional (for deactivation only) -# If given will deactivate all keyslots that -# match password located in QCryptoSecret with this ID +# @old-secret: Optional (for deactivation only) +# If given will deactivate all keyslots that +# match password located in QCryptoSecret with this ID # -# @iter-time: Optional (for activation only) -# Number of milliseconds to spend in -# PBKDF passphrase processing for the newly activated keyslot. -# Currently defaults to 2000. +# @iter-time: Optional (for activation only) +# Number of milliseconds to spend in +# PBKDF passphrase processing for the newly activated keyslot. +# Currently defaults to 2000. # -# @keyslot: Optional. ID of the keyslot to activate/deactivate. -# For keyslot activation, keyslot should not be active already -# (this is unsafe to update an active keyslot), -# but possible if 'force' parameter is given. -# If keyslot is not given, first free keyslot will be written. +# @keyslot: Optional. ID of the keyslot to activate/deactivate. +# For keyslot activation, keyslot should not be active already +# (this is unsafe to update an active keyslot), +# but possible if 'force' parameter is given. +# If keyslot is not given, first free keyslot will be written. # -# For keyslot deactivation, this parameter specifies the exact -# keyslot to deactivate +# For keyslot deactivation, this parameter specifies the exact +# keyslot to deactivate # -# @secret: Optional. The ID of a QCryptoSecret object providing the -# password to use to retrieve current master key. -# Defaults to the same secret that was used to open the image +# @secret: Optional. The ID of a QCryptoSecret object providing the +# password to use to retrieve current master key. +# Defaults to the same secret that was used to open the image # -# -# Since 5.1 +# Since: 5.1 ## { 'struct': 'QCryptoBlockAmendOptionsLUKS', 'data': { 'state': 'QCryptoBlockLUKSKeyslotState', @@ -540,3 +534,67 @@ 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, '*sanity-check': 'bool', '*passwordid': 'str' } } +## +# @QCryptoAkCipherAlgorithm: +# +# The supported algorithms for asymmetric encryption ciphers +# +# @rsa: RSA algorithm +# +# Since: 7.1 +## +{ 'enum': 'QCryptoAkCipherAlgorithm', + 'prefix': 'QCRYPTO_AKCIPHER_ALG', + 'data': ['rsa']} + +## +# @QCryptoAkCipherKeyType: +# +# The type of asymmetric keys. +# +# Since: 7.1 +## +{ 'enum': 'QCryptoAkCipherKeyType', + 'prefix': 'QCRYPTO_AKCIPHER_KEY_TYPE', + 'data': ['public', 'private']} + +## +# @QCryptoRSAPaddingAlgorithm: +# +# The padding algorithm for RSA. +# +# @raw: no padding used +# @pkcs1: pkcs1#v1.5 +# +# Since: 7.1 +## +{ 'enum': 'QCryptoRSAPaddingAlgorithm', + 'prefix': 'QCRYPTO_RSA_PADDING_ALG', + 'data': ['raw', 'pkcs1']} + +## +# @QCryptoAkCipherOptionsRSA: +# +# Specific parameters for RSA algorithm. +# +# @hash-alg: QCryptoHashAlgorithm +# @padding-alg: QCryptoRSAPaddingAlgorithm +# +# Since: 7.1 +## +{ 'struct': 'QCryptoAkCipherOptionsRSA', + 'data': { 'hash-alg':'QCryptoHashAlgorithm', + 'padding-alg': 'QCryptoRSAPaddingAlgorithm'}} + +## +# @QCryptoAkCipherOptions: +# +# The options that are available for all asymmetric key algorithms +# when creating a new QCryptoAkCipher. +# +# Since: 7.1 +## +{ 'union': 'QCryptoAkCipherOptions', + 'base': { 'alg': 'QCryptoAkCipherAlgorithm' }, + 'discriminator': 'alg', + 'data': { 'rsa': 'QCryptoAkCipherOptionsRSA' }} diff --git a/qapi/dump.json b/qapi/dump.json index f7c4267e3f..6fc215dd47 100644 --- a/qapi/dump.json +++ b/qapi/dump.json @@ -83,7 +83,7 @@ # Example: # # -> { "execute": "dump-guest-memory", -# "arguments": { "protocol": "fd:dump" } } +# "arguments": { "paging": false, "protocol": "fd:dump" } } # <- { "return": {} } # ## @@ -161,9 +161,10 @@ # # Example: # -# { "event": "DUMP_COMPLETED", -# "data": {"result": {"total": 1090650112, "status": "completed", -# "completed": 1090650112} } } +# <- { "event": "DUMP_COMPLETED", +# "data": { "result": { "total": 1090650112, "status": "completed", +# "completed": 1090650112 } }, +# "timestamp": { "seconds": 1648244171, "microseconds": 950316 } } # ## { 'event': 'DUMP_COMPLETED' , @@ -185,8 +186,8 @@ # # Returns the available formats for dump-guest-memory # -# Returns: A @DumpGuestMemoryCapability object listing available formats for -# dump-guest-memory +# Returns: A @DumpGuestMemoryCapability object listing available formats for +# dump-guest-memory # # Since: 2.0 # @@ -194,7 +195,7 @@ # # -> { "execute": "query-dump-guest-memory-capability" } # <- { "return": { "formats": -# ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] } +# ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] } } # ## { 'command': 'query-dump-guest-memory-capability', diff --git a/qapi/introspect.json b/qapi/introspect.json index 39bd303778..183148b2e9 100644 --- a/qapi/introspect.json +++ b/qapi/introspect.json @@ -142,14 +142,38 @@ # # Additional SchemaInfo members for meta-type 'enum'. # -# @values: the enumeration type's values, in no particular order. +# @members: the enum type's members, in no particular order +# (since 6.2). +# +# @values: the enumeration type's member names, in no particular order. +# Redundant with @members. Just for backward compatibility. +# +# Features: +# @deprecated: Member @values is deprecated. Use @members instead. # # Values of this type are JSON string on the wire. # # Since: 2.5 ## { 'struct': 'SchemaInfoEnum', - 'data': { 'values': ['str'] } } + 'data': { 'members': [ 'SchemaInfoEnumMember' ], + 'values': { 'type': [ 'str' ], + 'features': [ 'deprecated' ] } } } + +## +# @SchemaInfoEnumMember: +# +# An object member. +# +# @name: the member's name, as defined in the QAPI schema. +# +# @features: names of features associated with the member, in no +# particular order. +# +# Since: 6.2 +## +{ 'struct': 'SchemaInfoEnumMember', + 'data': { 'name': 'str', '*features': [ 'str' ] } } ## # @SchemaInfoArray: diff --git a/qapi/job.json b/qapi/job.json index 1a6ef03451..d5f84e9615 100644 --- a/qapi/job.json +++ b/qapi/job.json @@ -173,7 +173,6 @@ ## { 'command': 'job-cancel', 'data': { 'id': 'str' } } - ## # @job-complete: # diff --git a/qapi/machine-target.json b/qapi/machine-target.json index e7811654b7..2e267fa458 100644 --- a/qapi/machine-target.json +++ b/qapi/machine-target.json @@ -54,7 +54,6 @@ { 'enum': 'CpuModelExpansionType', 'data': [ 'static', 'full' ] } - ## # @CpuModelCompareResult: # @@ -89,7 +88,7 @@ ## { 'struct': 'CpuModelBaselineInfo', 'data': { 'model': 'CpuModelInfo' }, - 'if': 'defined(TARGET_S390X)' } + 'if': 'TARGET_S390X' } ## # @CpuModelCompareInfo: @@ -112,7 +111,7 @@ { 'struct': 'CpuModelCompareInfo', 'data': { 'result': 'CpuModelCompareResult', 'responsible-properties': ['str'] }, - 'if': 'defined(TARGET_S390X)' } + 'if': 'TARGET_S390X' } ## # @query-cpu-model-comparison: @@ -156,7 +155,7 @@ { 'command': 'query-cpu-model-comparison', 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' }, 'returns': 'CpuModelCompareInfo', - 'if': 'defined(TARGET_S390X)' } + 'if': 'TARGET_S390X' } ## # @query-cpu-model-baseline: @@ -200,7 +199,7 @@ 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' }, 'returns': 'CpuModelBaselineInfo', - 'if': 'defined(TARGET_S390X)' } + 'if': 'TARGET_S390X' } ## # @CpuModelExpansionInfo: @@ -213,7 +212,9 @@ ## { 'struct': 'CpuModelExpansionInfo', 'data': { 'model': 'CpuModelInfo' }, - 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' } + 'if': { 'any': [ 'TARGET_S390X', + 'TARGET_I386', + 'TARGET_ARM' ] } } ## # @query-cpu-model-expansion: @@ -252,7 +253,9 @@ 'data': { 'type': 'CpuModelExpansionType', 'model': 'CpuModelInfo' }, 'returns': 'CpuModelExpansionInfo', - 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' } + 'if': { 'any': [ 'TARGET_S390X', + 'TARGET_I386', + 'TARGET_ARM' ] } } ## # @CpuDefinitionInfo: @@ -316,7 +319,12 @@ 'typename': 'str', '*alias-of' : 'str', 'deprecated' : 'bool' }, - 'if': 'defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_I386) || defined(TARGET_S390X) || defined(TARGET_MIPS)' } + 'if': { 'any': [ 'TARGET_PPC', + 'TARGET_ARM', + 'TARGET_I386', + 'TARGET_S390X', + 'TARGET_MIPS', + 'TARGET_LOONGARCH64' ] } } ## # @query-cpu-definitions: @@ -328,4 +336,9 @@ # Since: 1.2 ## { 'command': 'query-cpu-definitions', 'returns': ['CpuDefinitionInfo'], - 'if': 'defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_I386) || defined(TARGET_S390X) || defined(TARGET_MIPS)' } + 'if': { 'any': [ 'TARGET_PPC', + 'TARGET_ARM', + 'TARGET_I386', + 'TARGET_S390X', + 'TARGET_MIPS', + 'TARGET_LOONGARCH64' ] } } diff --git a/qapi/machine.json b/qapi/machine.json index 157712f006..b9228a5e46 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -30,7 +30,7 @@ ## { 'enum' : 'SysEmuTarget', 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'cris', 'hppa', 'i386', - 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', + 'loongarch64', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', 'mips64el', 'mipsel', 'nios2', 'or1k', 'ppc', 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4', 'sh4eb', 'sparc', 'sparc64', 'tricore', @@ -77,7 +77,6 @@ # additional fields will be listed (since 3.0) # # Since: 2.12 -# ## { 'union' : 'CpuInfoFast', 'base' : { 'cpu-index' : 'int', @@ -109,7 +108,6 @@ # "socket-id": 0 # }, # "qom-path": "/machine/unattached/device[0]", -# "arch":"x86", # "target":"x86_64", # "cpu-index": 0 # }, @@ -121,7 +119,6 @@ # "socket-id": 1 # }, # "qom-path": "/machine/unattached/device[2]", -# "arch":"x86", # "target":"x86_64", # "cpu-index": 1 # } @@ -301,6 +298,7 @@ # returning does not indicate that a guest has accepted the request or # that it has shut down. Many guests will respond to this command by # prompting the user in some way. +# # Example: # # -> { "execute": "system_powerdown" } @@ -317,9 +315,9 @@ # query-current-machine), wake-up guest from suspend if the guest is # in SUSPENDED state. Return an error otherwise. # -# Since: 1.1 +# Since: 1.1 # -# Returns: nothing. +# Returns: nothing. # # Note: prior to 4.0, this command does nothing in case the guest # isn't suspended. @@ -370,9 +368,9 @@ # Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or all CPUs (ppc64). # The command fails when the guest doesn't support injecting. # -# Returns: If successful, nothing +# Returns: If successful, nothing # -# Since: 0.14 +# Since: 0.14 # # Note: prior to 2.1, this command was only supported for x86 and s390 VMs # @@ -504,6 +502,40 @@ 'dst': 'uint16', 'val': 'uint8' }} +## +# @CXLFixedMemoryWindowOptions: +# +# Create a CXL Fixed Memory Window +# +# @size: Size of the Fixed Memory Window in bytes. Must be a multiple +# of 256MiB. +# @interleave-granularity: Number of contiguous bytes for which +# accesses will go to a given interleave target. +# Accepted values [256, 512, 1k, 2k, 4k, 8k, 16k] +# @targets: Target root bridge IDs from -device ...,id= for each root +# bridge. +# +# Since 7.1 +## +{ 'struct': 'CXLFixedMemoryWindowOptions', + 'data': { + 'size': 'size', + '*interleave-granularity': 'size', + 'targets': ['str'] }} + +## +# @CXLFMWProperties: +# +# List of CXL Fixed Memory Windows. +# +# @cxl-fmw: List of CXLFixedMemoryWindowOptions +# +# Since 7.1 +## +{ 'struct' : 'CXLFMWProperties', + 'data': { 'cxl-fmw': ['CXLFixedMemoryWindowOptions'] } +} + ## # @X86CPURegister32: # @@ -841,6 +873,7 @@ # "merge": false, # "dump": true, # "prealloc": false, +# "share": false, # "host-nodes": [0, 1], # "policy": "bind" # }, @@ -849,6 +882,7 @@ # "merge": false, # "dump": true, # "prealloc": true, +# "share": false, # "host-nodes": [2, 3], # "policy": "preferred" # } @@ -867,11 +901,12 @@ # # @node-id: NUMA node ID the CPU belongs to # @socket-id: socket number within node/board the CPU belongs to -# @die-id: die number within node/board the CPU belongs to (Since 4.1) -# @core-id: core number within die the CPU belongs to +# @die-id: die number within socket the CPU belongs to (since 4.1) +# @cluster-id: cluster number within die the CPU belongs to (since 7.1) +# @core-id: core number within cluster the CPU belongs to # @thread-id: thread number within core the CPU belongs to # -# Note: currently there are 5 properties that could be present +# Note: currently there are 6 properties that could be present # but management should be prepared to pass through other # properties with device_add command to allow for future # interface extension. This also requires the filed names to be kept in @@ -883,6 +918,7 @@ 'data': { '*node-id': 'int', '*socket-id': 'int', '*die-id': 'int', + '*cluster-id': 'int', '*core-id': 'int', '*thread-id': 'int' } @@ -922,9 +958,9 @@ # # -> { "execute": "query-hotpluggable-cpus" } # <- {"return": [ -# { "props": { "core": 8 }, "type": "POWER8-spapr-cpu-core", +# { "props": { "core-id": 8 }, "type": "POWER8-spapr-cpu-core", # "vcpus-count": 1 }, -# { "props": { "core": 0 }, "type": "POWER8-spapr-cpu-core", +# { "props": { "core-id": 0 }, "type": "POWER8-spapr-cpu-core", # "vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"} # ]}' # @@ -970,7 +1006,7 @@ # preconfigure stage to configure numa mapping before initializing # machine. # -# Since 3.0 +# Since: 3.0 ## { 'command': 'set-numa-node', 'boxed': true, 'data': 'NumaOptions', @@ -1019,7 +1055,6 @@ # Formula used: logical_vm_size = vm_ram_size - balloon_size # # Since: 0.14 -# ## { 'struct': 'BalloonInfo', 'data': {'actual': 'int' } } @@ -1039,7 +1074,7 @@ # # -> { "execute": "query-balloon" } # <- { "return": { -# "actual": 1073741824, +# "actual": 1073741824 # } # } # @@ -1194,24 +1229,123 @@ } } +## +# @SgxEPCDeviceInfo: +# +# Sgx EPC state information +# +# @id: device's ID +# +# @memaddr: physical address in memory, where device is mapped +# +# @size: size of memory that the device provides +# +# @memdev: memory backend linked with device +# +# @node: the numa node (Since: 7.0) +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCDeviceInfo', + 'data': { '*id': 'str', + 'memaddr': 'size', + 'size': 'size', + 'node': 'int', + 'memdev': 'str' + } +} + +## +# @MemoryDeviceInfoKind: +# +# Since: 2.1 +## +{ 'enum': 'MemoryDeviceInfoKind', + 'data': [ 'dimm', 'nvdimm', 'virtio-pmem', 'virtio-mem', 'sgx-epc' ] } + +## +# @PCDIMMDeviceInfoWrapper: +# +# Since: 2.1 +## +{ 'struct': 'PCDIMMDeviceInfoWrapper', + 'data': { 'data': 'PCDIMMDeviceInfo' } } + +## +# @VirtioPMEMDeviceInfoWrapper: +# +# Since: 2.1 +## +{ 'struct': 'VirtioPMEMDeviceInfoWrapper', + 'data': { 'data': 'VirtioPMEMDeviceInfo' } } + +## +# @VirtioMEMDeviceInfoWrapper: +# +# Since: 2.1 +## +{ 'struct': 'VirtioMEMDeviceInfoWrapper', + 'data': { 'data': 'VirtioMEMDeviceInfo' } } + +## +# @SgxEPCDeviceInfoWrapper: +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCDeviceInfoWrapper', + 'data': { 'data': 'SgxEPCDeviceInfo' } } + ## # @MemoryDeviceInfo: # # Union containing information about a memory device # # nvdimm is included since 2.12. virtio-pmem is included since 4.1. -# virtio-mem is included since 5.1. +# virtio-mem is included since 5.1. sgx-epc is included since 6.2. # # Since: 2.1 ## { 'union': 'MemoryDeviceInfo', - 'data': { 'dimm': 'PCDIMMDeviceInfo', - 'nvdimm': 'PCDIMMDeviceInfo', - 'virtio-pmem': 'VirtioPMEMDeviceInfo', - 'virtio-mem': 'VirtioMEMDeviceInfo' + 'base': { 'type': 'MemoryDeviceInfoKind' }, + 'discriminator': 'type', + 'data': { 'dimm': 'PCDIMMDeviceInfoWrapper', + 'nvdimm': 'PCDIMMDeviceInfoWrapper', + 'virtio-pmem': 'VirtioPMEMDeviceInfoWrapper', + 'virtio-mem': 'VirtioMEMDeviceInfoWrapper', + 'sgx-epc': 'SgxEPCDeviceInfoWrapper' } } +## +# @SgxEPC: +# +# Sgx EPC cmdline information +# +# @memdev: memory backend linked with device +# +# @node: the numa node (Since: 7.0) +# +# Since: 6.2 +## +{ 'struct': 'SgxEPC', + 'data': { 'memdev': 'str', + 'node': 'int' + } +} + +## +# @SgxEPCProperties: +# +# SGX properties of machine types. +# +# @sgx-epc: list of ids of memory-backend-epc objects. +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCProperties', + 'data': { 'sgx-epc': ['SgxEPC'] } +} + ## # @query-memory-devices: # @@ -1245,8 +1379,11 @@ # action). # # @id: device's ID +# # @size: the new size of memory that the device provides # +# @qom-path: path to the device object in the QOM tree (since 6.2) +# # Note: this event is rate-limited. # # Since: 5.1 @@ -1254,13 +1391,13 @@ # Example: # # <- { "event": "MEMORY_DEVICE_SIZE_CHANGE", -# "data": { "id": "vm0", "size": 1073741824}, +# "data": { "id": "vm0", "size": 1073741824, +# "qom-path": "/machine/unattached/device[2]" }, # "timestamp": { "seconds": 1588168529, "microseconds": 201316 } } # ## { 'event': 'MEMORY_DEVICE_SIZE_CHANGE', - 'data': { '*id': 'str', 'size': 'size' } } - + 'data': { '*id': 'str', 'size': 'size', 'qom-path' : 'str'} } ## # @MEM_UNPLUG_ERROR: @@ -1271,11 +1408,15 @@ # # @msg: Informative message # +# Features: +# @deprecated: This event is deprecated. Use @DEVICE_UNPLUG_GUEST_ERROR +# instead. +# # Since: 2.4 # # Example: # -# <- { "event": "MEM_UNPLUG_ERROR" +# <- { "event": "MEM_UNPLUG_ERROR", # "data": { "device": "dimm1", # "msg": "acpi: device unplug for unsupported device" # }, @@ -1283,7 +1424,38 @@ # ## { 'event': 'MEM_UNPLUG_ERROR', - 'data': { 'device': 'str', 'msg': 'str' } } + 'data': { 'device': 'str', 'msg': 'str' }, + 'features': ['deprecated'] } + +## +# @BootConfiguration: +# +# Schema for virtual machine boot configuration. +# +# @order: Boot order (a=floppy, c=hard disk, d=CD-ROM, n=network) +# +# @once: Boot order to apply on first boot +# +# @menu: Whether to show a boot menu +# +# @splash: The name of the file to be passed to the firmware as logo picture, if @menu is true. +# +# @splash-time: How long to show the logo picture, in milliseconds +# +# @reboot-timeout: Timeout before guest reboots after boot fails +# +# @strict: Whether to attempt booting from devices not included in the boot order +# +# Since: 7.1 +## +{ 'struct': 'BootConfiguration', 'data': { + '*order': 'str', + '*once': 'str', + '*menu': 'bool', + '*splash': 'str', + '*splash-time': 'int', + '*reboot-timeout': 'int', + '*strict': 'bool' } } ## # @SMPConfiguration: @@ -1297,7 +1469,9 @@ # # @dies: number of dies per socket in the CPU topology # -# @cores: number of cores per thread in the CPU topology +# @clusters: number of clusters per die in the CPU topology (since 7.0) +# +# @cores: number of cores per cluster in the CPU topology # # @threads: number of threads per core in the CPU topology # @@ -1309,6 +1483,202 @@ '*cpus': 'int', '*sockets': 'int', '*dies': 'int', + '*clusters': 'int', '*cores': 'int', '*threads': 'int', '*maxcpus': 'int' } } + +## +# @x-query-irq: +# +# Query interrupt statistics +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: interrupt statistics +# +# Since: 6.2 +## +{ 'command': 'x-query-irq', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-jit: +# +# Query TCG compiler statistics +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: TCG compiler statistics +# +# Since: 6.2 +## +{ 'command': 'x-query-jit', + 'returns': 'HumanReadableText', + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } + +## +# @x-query-numa: +# +# Query NUMA topology information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: topology information +# +# Since: 6.2 +## +{ 'command': 'x-query-numa', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-opcount: +# +# Query TCG opcode counters +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: TCG opcode counters +# +# Since: 6.2 +## +{ 'command': 'x-query-opcount', + 'returns': 'HumanReadableText', + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } + +## +# @x-query-profile: +# +# Query TCG profiling information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: profile information +# +# Since: 6.2 +## +{ 'command': 'x-query-profile', + 'returns': 'HumanReadableText', + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } + +## +# @x-query-ramblock: +# +# Query system ramblock information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: system ramblock information +# +# Since: 6.2 +## +{ 'command': 'x-query-ramblock', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-rdma: +# +# Query RDMA state +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: RDMA state +# +# Since: 6.2 +## +{ 'command': 'x-query-rdma', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-roms: +# +# Query information on the registered ROMS +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: registered ROMs +# +# Since: 6.2 +## +{ 'command': 'x-query-roms', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-usb: +# +# Query information on the USB devices +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: USB device information +# +# Since: 6.2 +## +{ 'command': 'x-query-usb', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @SmbiosEntryPointType: +# +# @32: SMBIOS version 2.1 (32-bit) Entry Point +# +# @64: SMBIOS version 3.0 (64-bit) Entry Point +# +# Since: 7.0 +## +{ 'enum': 'SmbiosEntryPointType', + 'data': [ '32', '64' ] } + +## +# @MemorySizeConfiguration: +# +# Schema for memory size configuration. +# +# @size: memory size in bytes +# +# @max-size: maximum hotpluggable memory size in bytes +# +# @slots: number of available memory slots for hotplug +# +# Since: 7.1 +## +{ 'struct': 'MemorySizeConfiguration', 'data': { + '*size': 'size', + '*max-size': 'size', + '*slots': 'uint64' } } + +## +# @dumpdtb: +# +# Save the FDT in dtb format. +# +# @filename: name of the dtb file to be created +# +# Since: 7.2 +# +# Example: +# {"execute": "dumpdtb"} +# "arguments": { "filename": "fdt.dtb" } } +# +## +{ 'command': 'dumpdtb', + 'data': { 'filename': 'str' }, + 'if': 'CONFIG_FDT' } diff --git a/qapi/meson.build b/qapi/meson.build index c356a385e3..fbdb442fdf 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -10,7 +10,10 @@ util_ss.add(files( 'string-input-visitor.c', 'string-output-visitor.c', )) -if have_system or have_tools +if have_system + util_ss.add(files('qapi-type-helpers.c')) +endif +if have_system or have_tools or have_ga util_ss.add(files( 'qmp-dispatch.c', 'qmp-event.c', @@ -43,8 +46,10 @@ qapi_all_modules = [ 'replay', 'run-state', 'sockets', + 'stats', 'trace', 'transaction', + 'virtio', 'yank', ] if have_system @@ -64,21 +69,6 @@ if have_system or have_tools ] endif -qapi_storage_daemon_modules = [ - 'block-core', - 'block-export', - 'char', - 'common', - 'control', - 'crypto', - 'introspect', - 'job', - 'qom', - 'sockets', - 'pragma', - 'transaction', -] - qapi_nonmodule_outputs = [ 'qapi-introspect.c', 'qapi-introspect.h', 'qapi-types.c', 'qapi-types.h', @@ -111,6 +101,7 @@ foreach module : qapi_all_modules 'qapi-events-@0@.h'.format(module), 'qapi-commands-@0@.c'.format(module), 'qapi-commands-@0@.h'.format(module), + 'qapi-commands-@0@.trace-events'.format(module), ] endif if module.endswith('-target') @@ -134,6 +125,9 @@ foreach output : qapi_util_outputs if output.endswith('.h') genh += qapi_files[i] endif + if output.endswith('.trace-events') + qapi_trace_events += qapi_files[i] + endif util_ss.add(qapi_files[i]) i = i + 1 endforeach @@ -142,6 +136,9 @@ foreach output : qapi_specific_outputs + qapi_nonmodule_outputs if output.endswith('.h') genh += qapi_files[i] endif + if output.endswith('.trace-events') + qapi_trace_events += qapi_files[i] + endif specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: qapi_files[i]) i = i + 1 endforeach diff --git a/qapi/migration.json b/qapi/migration.json index 1124a2dda8..88ecf86ac8 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -46,6 +46,19 @@ # @pages-per-second: the number of memory pages transferred per second # (Since 4.0) # +# @precopy-bytes: The number of bytes sent in the pre-copy phase +# (since 7.0). +# +# @downtime-bytes: The number of bytes sent while the guest is paused +# (since 7.0). +# +# @postcopy-bytes: The number of bytes sent during the post-copy phase +# (since 7.0). +# +# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could +# not avoid copying dirty pages. This is between +# 0 and @dirty-sync-count * @multifd-channels. +# (since 7.1) # Since: 0.14 ## { 'struct': 'MigrationStats', @@ -54,7 +67,10 @@ 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 'mbps' : 'number', 'dirty-sync-count' : 'int', 'postcopy-requests' : 'int', 'page-size' : 'int', - 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } } + 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', + 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', + 'postcopy-bytes' : 'uint64', + 'dirty-sync-missed-zero-copy' : 'uint64' } } ## # @XBZRLECacheStats: @@ -140,7 +156,6 @@ # (since 4.2) # # Since: 2.3 -# ## { 'enum': 'MigrationStatus', 'data': [ 'none', 'setup', 'cancelling', 'cancelled', @@ -155,7 +170,6 @@ # @transferred: amount of bytes transferred to the target VM by VFIO devices # # Since: 5.2 -# ## { 'struct': 'VfioStats', 'data': {'transferred': 'int' } } @@ -452,14 +466,33 @@ # procedure starts. The VM RAM is saved with running VM. # (since 6.0) # +# @zero-copy-send: Controls behavior on sending memory pages on migration. +# When true, enables a zero-copy mechanism for sending +# memory pages, if host supports it. +# Requires that QEMU be permitted to use locked memory +# for guest RAM pages. +# (since 7.1) +# @postcopy-preempt: If enabled, the migration process will allow postcopy +# requests to preempt precopy stream, so postcopy requests +# will be handled faster. This is a performance feature and +# should not affect the correctness of postcopy migration. +# (since 7.1) +# +# Features: +# @unstable: Members @x-colo and @x-ignore-shared are experimental. +# # Since: 1.2 ## { 'enum': 'MigrationCapability', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', - 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', + 'compress', 'events', 'postcopy-ram', + { 'name': 'x-colo', 'features': [ 'unstable' ] }, + 'release-ram', 'block', 'return-path', 'pause-before-switchover', 'multifd', 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', - 'x-ignore-shared', 'validate-uuid', 'background-snapshot'] } + { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, + 'validate-uuid', 'background-snapshot', + 'zero-copy-send', 'postcopy-preempt'] } ## # @MigrationCapabilityStatus: @@ -529,11 +562,10 @@ # @zstd: use zstd compression method. # # Since: 5.0 -# ## { 'enum': 'MultiFDCompression', 'data': [ 'none', 'zlib', - { 'name': 'zstd', 'if': 'defined(CONFIG_ZSTD)' } ] } + { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } ## # @BitmapMigrationBitmapAliasTransform: @@ -724,6 +756,7 @@ # will consume more CPU. # Defaults to 1. (Since 5.0) # +# # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such # aliases may for example be the corresponding names on the @@ -743,6 +776,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## { 'enum': 'MigrationParameter', @@ -753,7 +789,9 @@ 'cpu-throttle-initial', 'cpu-throttle-increment', 'cpu-throttle-tailslow', 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', - 'downtime-limit', 'x-checkpoint-delay', 'block-incremental', + 'downtime-limit', + { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, + 'block-incremental', 'multifd-channels', 'xbzrle-cache-size', 'max-postcopy-bandwidth', 'max-cpu-throttle', 'multifd-compression', @@ -903,6 +941,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## # TODO either fuse back into MigrationParameters, or make @@ -925,7 +966,8 @@ '*tls-authz': 'StrOrNull', '*max-bandwidth': 'size', '*downtime-limit': 'uint64', - '*x-checkpoint-delay': 'uint32', + '*x-checkpoint-delay': { 'type': 'uint32', + 'features': [ 'unstable' ] }, '*block-incremental': 'bool', '*multifd-channels': 'uint8', '*xbzrle-cache-size': 'size', @@ -1099,6 +1141,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## { 'struct': 'MigrationParameters', @@ -1119,7 +1164,8 @@ '*tls-authz': 'str', '*max-bandwidth': 'size', '*downtime-limit': 'uint64', - '*x-checkpoint-delay': 'uint32', + '*x-checkpoint-delay': { 'type': 'uint32', + 'features': [ 'unstable' ] }, '*block-incremental': 'bool', '*multifd-channels': 'uint8', '*xbzrle-cache-size': 'size', @@ -1164,10 +1210,10 @@ # ask the client to automatically reconnect using the new parameters # once migration finished successfully. Only implemented for SPICE. # -# @protocol: must be "spice" -# @hostname: migration target hostname -# @port: spice tcp port for plaintext channels -# @tls-port: spice tcp port for tls-secured channels +# @protocol: must be "spice" +# @hostname: migration target hostname +# @port: spice tcp port for plaintext channels +# @tls-port: spice tcp port for tls-secured channels # @cert-subject: server certificate subject # # Since: 0.14 @@ -1351,6 +1397,9 @@ # If sent to the Secondary, the Secondary side will run failover work, # then takes over server operation to become the service VM. # +# Features: +# @unstable: This command is experimental. +# # Since: 2.8 # # Example: @@ -1359,7 +1408,8 @@ # <- { "return": {} } # ## -{ 'command': 'x-colo-lost-heartbeat' } +{ 'command': 'x-colo-lost-heartbeat', + 'features': [ 'unstable' ] } ## # @migrate_cancel: @@ -1388,7 +1438,9 @@ # @state: The state the migration is currently expected to be in # # Returns: nothing on success +# # Since: 2.11 +# # Example: # # -> { "execute": "migrate-continue" , "arguments": @@ -1562,7 +1614,7 @@ ## { 'command': 'xen-set-replication', 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @ReplicationStatus: @@ -1578,14 +1630,14 @@ ## { 'struct': 'ReplicationStatus', 'data': { 'error': 'bool', '*desc': 'str' }, - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @query-xen-replication-status: # # Query replication status while the vm is running. # -# Returns: A @ReplicationResult object showing the status. +# Returns: A @ReplicationStatus object showing the status. # # Example: # @@ -1596,7 +1648,7 @@ ## { 'command': 'query-xen-replication-status', 'returns': 'ReplicationStatus', - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @xen-colo-do-checkpoint: @@ -1613,7 +1665,7 @@ # Since: 2.9 ## { 'command': 'xen-colo-do-checkpoint', - 'if': 'defined(CONFIG_REPLICATION)' } + 'if': 'CONFIG_REPLICATION' } ## # @COLOStatus: @@ -1645,7 +1697,7 @@ # Example: # # -> { "execute": "query-colo-status" } -# <- { "return": { "mode": "primary", "reason": "request" } } +# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } # # Since: 3.1 ## @@ -1702,12 +1754,29 @@ # Since: 4.2 # # Example: -# {"event": "UNPLUG_PRIMARY", "data": {"device-id": "hostdev0"} } +# +# <- { "event": "UNPLUG_PRIMARY", +# "data": { "device-id": "hostdev0" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } # ## { 'event': 'UNPLUG_PRIMARY', 'data': { 'device-id': 'str' } } +## +# @DirtyRateVcpu: +# +# Dirty rate of vcpu. +# +# @id: vcpu index. +# +# @dirty-rate: dirty rate. +# +# Since: 6.2 +## +{ 'struct': 'DirtyRateVcpu', + 'data': { 'id': 'int', 'dirty-rate': 'int64' } } + ## # @DirtyRateStatus: # @@ -1720,11 +1789,26 @@ # @measured: the dirtyrate thread has measured and results are available. # # Since: 5.2 -# ## { 'enum': 'DirtyRateStatus', 'data': [ 'unstarted', 'measuring', 'measured'] } +## +# @DirtyRateMeasureMode: +# +# An enumeration of mode of measuring dirtyrate. +# +# @page-sampling: calculate dirtyrate by sampling pages. +# +# @dirty-ring: calculate dirtyrate by dirty ring. +# +# @dirty-bitmap: calculate dirtyrate by dirty bitmap. +# +# Since: 6.2 +## +{ 'enum': 'DirtyRateMeasureMode', + 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } + ## # @DirtyRateInfo: # @@ -1743,15 +1827,22 @@ # @sample-pages: page count per GB for sample dirty pages # the default value is 512 (since 6.1) # -# Since: 5.2 +# @mode: mode containing method of calculate dirtyrate includes +# 'page-sampling' and 'dirty-ring' (Since 6.2) # +# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring +# mode specified (Since 6.2) +# +# Since: 5.2 ## { 'struct': 'DirtyRateInfo', 'data': {'*dirty-rate': 'int64', 'status': 'DirtyRateStatus', 'start-time': 'int64', 'calc-time': 'int64', - 'sample-pages': 'uint64'} } + 'sample-pages': 'uint64', + 'mode': 'DirtyRateMeasureMode', + '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } ## # @calc-dirty-rate: @@ -1763,15 +1854,20 @@ # @sample-pages: page count per GB for sample dirty pages # the default value is 512 (since 6.1) # +# @mode: mechanism of calculating dirtyrate includes +# 'page-sampling' and 'dirty-ring' (Since 6.1) +# # Since: 5.2 # # Example: -# {"command": "calc-dirty-rate", "data": {"calc-time": 1, -# 'sample-pages': 512} } +# +# {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, +# 'sample-pages': 512} } # ## { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', - '*sample-pages': 'int'} } + '*sample-pages': 'int', + '*mode': 'DirtyRateMeasureMode'} } ## # @query-dirty-rate: @@ -1782,6 +1878,86 @@ ## { 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } +## +# @DirtyLimitInfo: +# +# Dirty page rate limit information of a virtual CPU. +# +# @cpu-index: index of a virtual CPU. +# +# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual +# CPU, 0 means unlimited. +# +# @current-rate: current dirty page rate (MB/s) for a virtual CPU. +# +# Since: 7.1 +# +## +{ 'struct': 'DirtyLimitInfo', + 'data': { 'cpu-index': 'int', + 'limit-rate': 'uint64', + 'current-rate': 'uint64' } } + +## +# @set-vcpu-dirty-limit: +# +# Set the upper limit of dirty page rate for virtual CPUs. +# +# Requires KVM with accelerator property "dirty-ring-size" set. +# A virtual CPU's dirty page rate is a measure of its memory load. +# To observe dirty page rates, use @calc-dirty-rate. +# +# @cpu-index: index of a virtual CPU, default is all. +# +# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. +# +# Since: 7.1 +# +# Example: +# {"execute": "set-vcpu-dirty-limit"} +# "arguments": { "dirty-rate": 200, +# "cpu-index": 1 } } +# +## +{ 'command': 'set-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int', + 'dirty-rate': 'uint64' } } + +## +# @cancel-vcpu-dirty-limit: +# +# Cancel the upper limit of dirty page rate for virtual CPUs. +# +# Cancel the dirty page limit for the vCPU which has been set with +# set-vcpu-dirty-limit command. Note that this command requires +# support from dirty ring, same as the "set-vcpu-dirty-limit". +# +# @cpu-index: index of a virtual CPU, default is all. +# +# Since: 7.1 +# +# Example: +# {"execute": "cancel-vcpu-dirty-limit"} +# "arguments": { "cpu-index": 1 } } +# +## +{ 'command': 'cancel-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int'} } + +## +# @query-vcpu-dirty-limit: +# +# Returns information about virtual CPU dirty page rate limits, if any. +# +# Since: 7.1 +# +# Example: +# {"execute": "query-vcpu-dirty-limit"} +# +## +{ 'command': 'query-vcpu-dirty-limit', + 'returns': [ 'DirtyLimitInfo' ] } + ## # @snapshot-save: # @@ -1810,7 +1986,7 @@ # Example: # # -> { "execute": "snapshot-save", -# "data": { +# "arguments": { # "job-id": "snapsave0", # "tag": "my-snap", # "vmstate": "disk0", @@ -1819,16 +1995,23 @@ # } # <- { "return": { } } # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, # "data": {"status": "created", "id": "snapsave0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, # "data": {"status": "running", "id": "snapsave0"}} -# <- {"event": "STOP"} -# <- {"event": "RESUME"} +# <- {"event": "STOP", +# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } +# <- {"event": "RESUME", +# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, # "data": {"status": "waiting", "id": "snapsave0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, # "data": {"status": "pending", "id": "snapsave0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, # "data": {"status": "concluded", "id": "snapsave0"}} # -> {"execute": "query-jobs"} # <- {"return": [{"current-progress": 1, @@ -1871,7 +2054,7 @@ # Example: # # -> { "execute": "snapshot-load", -# "data": { +# "arguments": { # "job-id": "snapload0", # "tag": "my-snap", # "vmstate": "disk0", @@ -1880,16 +2063,23 @@ # } # <- { "return": { } } # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, # "data": {"status": "created", "id": "snapload0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, # "data": {"status": "running", "id": "snapload0"}} -# <- {"event": "STOP"} -# <- {"event": "RESUME"} +# <- {"event": "STOP", +# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } +# <- {"event": "RESUME", +# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, # "data": {"status": "waiting", "id": "snapload0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, # "data": {"status": "pending", "id": "snapload0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, # "data": {"status": "concluded", "id": "snapload0"}} # -> {"execute": "query-jobs"} # <- {"return": [{"current-progress": 1, @@ -1924,7 +2114,7 @@ # Example: # # -> { "execute": "snapshot-delete", -# "data": { +# "arguments": { # "job-id": "snapdelete0", # "tag": "my-snap", # "devices": ["disk0", "disk1"] @@ -1932,14 +2122,19 @@ # } # <- { "return": { } } # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, # "data": {"status": "created", "id": "snapdelete0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, # "data": {"status": "running", "id": "snapdelete0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, # "data": {"status": "waiting", "id": "snapdelete0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, # "data": {"status": "pending", "id": "snapdelete0"}} # <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, # "data": {"status": "concluded", "id": "snapdelete0"}} # -> {"execute": "query-jobs"} # <- {"return": [{"current-progress": 1, diff --git a/qapi/misc-target.json b/qapi/misc-target.json index 5573dcf8f0..4944c0528f 100644 --- a/qapi/misc-target.json +++ b/qapi/misc-target.json @@ -2,29 +2,6 @@ # vim: filetype=python # -## -# @RTC_CHANGE: -# -# Emitted when the guest changes the RTC time. -# -# @offset: offset between base RTC clock (as specified by -rtc base), and -# new RTC clock value -# -# Note: This event is rate-limited. -# -# Since: 0.13 -# -# Example: -# -# <- { "event": "RTC_CHANGE", -# "data": { "offset": 78 }, -# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } -# -## -{ 'event': 'RTC_CHANGE', - 'data': { 'offset': 'int' }, - 'if': 'defined(TARGET_ALPHA) || defined(TARGET_ARM) || defined(TARGET_HPPA) || defined(TARGET_I386) || defined(TARGET_MIPS) || defined(TARGET_MIPS64) || defined(TARGET_PPC) || defined(TARGET_PPC64) || defined(TARGET_S390X) || defined(TARGET_SH4) || defined(TARGET_SPARC)' } - ## # @rtc-reset-reinjection: # @@ -42,8 +19,7 @@ # ## { 'command': 'rtc-reset-reinjection', - 'if': 'defined(TARGET_I386)' } - + 'if': 'TARGET_I386' } ## # @SevState: @@ -69,7 +45,7 @@ { 'enum': 'SevState', 'data': ['uninit', 'launch-update', 'launch-secret', 'running', 'send-update', 'receive-update' ], - 'if': 'defined(TARGET_I386)' } + 'if': 'TARGET_I386' } ## # @SevInfo: @@ -101,7 +77,7 @@ 'state' : 'SevState', 'handle' : 'uint32' }, - 'if': 'defined(TARGET_I386)' + 'if': 'TARGET_I386' } ## @@ -122,8 +98,7 @@ # ## { 'command': 'query-sev', 'returns': 'SevInfo', - 'if': 'defined(TARGET_I386)' } - + 'if': 'TARGET_I386' } ## # @SevLaunchMeasureInfo: @@ -133,10 +108,9 @@ # @data: the measurement value encoded in base64 # # Since: 2.12 -# ## { 'struct': 'SevLaunchMeasureInfo', 'data': {'data': 'str'}, - 'if': 'defined(TARGET_I386)' } + 'if': 'TARGET_I386' } ## # @query-sev-launch-measure: @@ -154,8 +128,7 @@ # ## { 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo', - 'if': 'defined(TARGET_I386)' } - + 'if': 'TARGET_I386' } ## # @SevCapability: @@ -163,9 +136,11 @@ # The struct describes capability for a Secure Encrypted Virtualization # feature. # -# @pdh: Platform Diffie-Hellman key (base64 encoded) +# @pdh: Platform Diffie-Hellman key (base64 encoded) # -# @cert-chain: PDH certificate chain (base64 encoded) +# @cert-chain: PDH certificate chain (base64 encoded) +# +# @cpu0-id: Unique ID of CPU0 (base64 encoded) (since 7.1) # # @cbitpos: C-bit location in page table entry # @@ -177,9 +152,10 @@ { 'struct': 'SevCapability', 'data': { 'pdh': 'str', 'cert-chain': 'str', + 'cpu0-id': 'str', 'cbitpos': 'int', 'reduced-phys-bits': 'int'}, - 'if': 'defined(TARGET_I386)' } + 'if': 'TARGET_I386' } ## # @query-sev-capabilities: @@ -195,11 +171,12 @@ # # -> { "execute": "query-sev-capabilities" } # <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", +# "cpu0-id": "2lvmGwo+...61iEinw==", # "cbitpos": 47, "reduced-phys-bits": 5}} # ## { 'command': 'query-sev-capabilities', 'returns': 'SevCapability', - 'if': 'defined(TARGET_I386)' } + 'if': 'TARGET_I386' } ## # @sev-inject-launch-secret: @@ -213,11 +190,49 @@ # @gpa: the guest physical address where secret will be injected. # # Since: 6.0 -# ## { 'command': 'sev-inject-launch-secret', 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' }, - 'if': 'defined(TARGET_I386)' } + 'if': 'TARGET_I386' } + +## +# @SevAttestationReport: +# +# The struct describes attestation report for a Secure Encrypted +# Virtualization feature. +# +# @data: guest attestation report (base64 encoded) +# +# Since: 6.1 +## +{ 'struct': 'SevAttestationReport', + 'data': { 'data': 'str'}, + 'if': 'TARGET_I386' } + +## +# @query-sev-attestation-report: +# +# This command is used to get the SEV attestation report, and is +# supported on AMD X86 platforms only. +# +# @mnonce: a random 16 bytes value encoded in base64 (it will be +# included in report) +# +# Returns: SevAttestationReport objects. +# +# Since: 6.1 +# +# Example: +# +# -> { "execute" : "query-sev-attestation-report", +# "arguments": { "mnonce": "aaaaaaa" } } +# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +# +## +{ 'command': 'query-sev-attestation-report', + 'data': { 'mnonce': 'str' }, + 'returns': 'SevAttestationReport', + 'if': 'TARGET_I386' } ## # @dump-skeys: @@ -239,7 +254,7 @@ ## { 'command': 'dump-skeys', 'data': { 'filename': 'str' }, - 'if': 'defined(TARGET_S390X)' } + 'if': 'TARGET_S390X' } ## # @GICCapability: @@ -264,7 +279,7 @@ 'data': { 'version': 'int', 'emulated': 'bool', 'kernel': 'bool' }, - 'if': 'defined(TARGET_ARM)' } + 'if': 'TARGET_ARM' } ## # @query-gic-capabilities: @@ -284,42 +299,92 @@ # ## { 'command': 'query-gic-capabilities', 'returns': ['GICCapability'], - 'if': 'defined(TARGET_ARM)' } - + 'if': 'TARGET_ARM' } ## -# @SevAttestationReport: +# @SGXEPCSection: # -# The struct describes attestation report for a Secure Encrypted Virtualization -# feature. +# Information about intel SGX EPC section info # -# @data: guest attestation report (base64 encoded) +# @node: the numa node # +# @size: the size of EPC section # -# Since: 6.1 +# Since: 7.0 ## -{ 'struct': 'SevAttestationReport', - 'data': { 'data': 'str'}, - 'if': 'defined(TARGET_I386)' } +{ 'struct': 'SGXEPCSection', + 'data': { 'node': 'int', + 'size': 'uint64'}} ## -# @query-sev-attestation-report: +# @SGXInfo: # -# This command is used to get the SEV attestation report, and is supported on AMD -# X86 platforms only. +# Information about intel Safe Guard eXtension (SGX) support # -# @mnonce: a random 16 bytes value encoded in base64 (it will be included in report) +# @sgx: true if SGX is supported # -# Returns: SevAttestationReport objects. +# @sgx1: true if SGX1 is supported # -# Since: 6.1 +# @sgx2: true if SGX2 is supported +# +# @flc: true if FLC is supported +# +# @section-size: The EPC section size for guest +# Redundant with @sections. Just for backward compatibility. +# +# @sections: The EPC sections info for guest (Since: 7.0) +# +# Features: +# @deprecated: Member @section-size is deprecated. Use @sections instead. +# +# Since: 6.2 +## +{ 'struct': 'SGXInfo', + 'data': { 'sgx': 'bool', + 'sgx1': 'bool', + 'sgx2': 'bool', + 'flc': 'bool', + 'section-size': { 'type': 'uint64', + 'features': [ 'deprecated' ] }, + 'sections': ['SGXEPCSection']}, + 'if': 'TARGET_I386' } + +## +# @query-sgx: +# +# Returns information about SGX +# +# Returns: @SGXInfo +# +# Since: 6.2 # # Example: # -# -> { "execute" : "query-sev-attestation-report", "arguments": { "mnonce": "aaaaaaa" } } -# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +# -> { "execute": "query-sgx" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, "section-size" : 96468992, +# "sections": [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } # ## -{ 'command': 'query-sev-attestation-report', 'data': { 'mnonce': 'str' }, - 'returns': 'SevAttestationReport', - 'if': 'defined(TARGET_I386)' } +{ 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } + +## +# @query-sgx-capabilities: +# +# Returns information from host SGX capabilities +# +# Returns: @SGXInfo +# +# Since: 6.2 +# +# Example: +# +# -> { "execute": "query-sgx-capabilities" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, "section-size" : 96468992, +# "section" : [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } +# +## +{ 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } diff --git a/qapi/misc.json b/qapi/misc.json index 5c2ca3b556..27ef5a2b20 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -14,8 +14,8 @@ # Allow client connections for VNC, Spice and socket based # character devices to be passed in to QEMU via SCM_RIGHTS. # -# @protocol: protocol name. Valid names are "vnc", "spice" or the -# name of a character device (eg. from -chardev id=XXXX) +# @protocol: protocol name. Valid names are "vnc", "spice", "@dbus-display" or +# the name of a character device (eg. from -chardev id=XXXX) # # @fdname: file descriptor name previously passed via 'getfd' command # @@ -136,7 +136,7 @@ # # Stop all guest VCPU execution. # -# Since: 0.14 +# Since: 0.14 # # Notes: This function will succeed even if the guest is already in the stopped # state. In "inmigrate" state, it will ensure that the guest @@ -156,9 +156,9 @@ # # Resume guest VCPU execution. # -# Since: 0.14 +# Since: 0.14 # -# Returns: If successful, nothing +# Returns: If successful, nothing # # Notes: This command will succeed if the guest is currently running. It # will also succeed if the guest is in the "inmigrate" state; in @@ -185,7 +185,10 @@ # available during the preconfig state (i.e. when the --preconfig command # line option was in use). # -# Since 3.0 +# Features: +# @unstable: This command is experimental. +# +# Since: 3.0 # # Returns: nothing # @@ -195,7 +198,8 @@ # <- { "return": {} } # ## -{ 'command': 'x-exit-preconfig', 'allow-preconfig': true } +{ 'command': 'x-exit-preconfig', 'allow-preconfig': true, + 'features': [ 'unstable' ] } ## # @human-monitor-command: @@ -523,3 +527,60 @@ 'data': { '*option': 'str' }, 'returns': ['CommandLineOptionInfo'], 'allow-preconfig': true } + +## +# @RTC_CHANGE: +# +# Emitted when the guest changes the RTC time. +# +# @offset: offset in seconds between base RTC clock (as specified +# by -rtc base), and new RTC clock value +# +# @qom-path: path to the RTC object in the QOM tree +# +# Note: This event is rate-limited. +# It is not guaranteed that the RTC in the system implements +# this event, or even that the system has an RTC at all. +# +# Since: 0.13 +# +# Example: +# +# <- { "event": "RTC_CHANGE", +# "data": { "offset": 78 }, +# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } +# +## +{ 'event': 'RTC_CHANGE', + 'data': { 'offset': 'int', 'qom-path': 'str' } } + +## +# @VFU_CLIENT_HANGUP: +# +# Emitted when the client of a TYPE_VFIO_USER_SERVER closes the +# communication channel +# +# @vfu-id: ID of the TYPE_VFIO_USER_SERVER object. It is the last component +# of @vfu-qom-path referenced below +# +# @vfu-qom-path: path to the TYPE_VFIO_USER_SERVER object in the QOM tree +# +# @dev-id: ID of attached PCI device +# +# @dev-qom-path: path to attached PCI device in the QOM tree +# +# Since: 7.1 +# +# Example: +# +# <- { "event": "VFU_CLIENT_HANGUP", +# "data": { "vfu-id": "vfu1", +# "vfu-qom-path": "/objects/vfu1", +# "dev-id": "sas1", +# "dev-qom-path": "/machine/peripheral/sas1" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# +## +{ 'event': 'VFU_CLIENT_HANGUP', + 'data': { 'vfu-id': 'str', 'vfu-qom-path': 'str', + 'dev-id': 'str', 'dev-qom-path': 'str' } } diff --git a/qapi/net.json b/qapi/net.json index ce568161f5..53acec579e 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -7,6 +7,7 @@ ## { 'include': 'common.json' } +{ 'include': 'sockets.json' } ## # @set_link: @@ -51,7 +52,7 @@ # # -> { "execute": "netdev_add", # "arguments": { "type": "user", "id": "netdev1", -# "dnssearch": "example.org" } } +# "dnssearch": [ { "str": "example.org" } ] } } # <- { "return": {} } # ## @@ -298,7 +299,7 @@ # # @udp: use the udp version of l2tpv3 encapsulation # -# @cookie64: use 64 bit coookies +# @cookie64: use 64 bit cookies # # @counter: have sequence counter # @@ -442,15 +443,193 @@ # @vhostdev: path of vhost-vdpa device # (default:'/dev/vhost-vdpa-0') # +# @vhostfd: file descriptor of an already opened vhost vdpa device +# # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) +# (default: false) +# +# Features: +# @unstable: Member @x-svq is experimental. +# # Since: 5.1 ## { 'struct': 'NetdevVhostVDPAOptions', 'data': { '*vhostdev': 'str', - '*queues': 'int' } } + '*vhostfd': 'str', + '*queues': 'int', + '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } } + +## +# @NetdevVmnetHostOptions: +# +# vmnet (host mode) network backend. +# +# Allows the vmnet interface to communicate with other vmnet +# interfaces that are in host mode and also with the host. +# +# @start-address: The starting IPv4 address to use for the interface. +# Must be in the private IP range (RFC 1918). Must be +# specified along with @end-address and @subnet-mask. +# This address is used as the gateway address. The +# subsequent address up to and including end-address are +# placed in the DHCP pool. +# +# @end-address: The DHCP IPv4 range end address to use for the +# interface. Must be in the private IP range (RFC 1918). +# Must be specified along with @start-address and +# @subnet-mask. +# +# @subnet-mask: The IPv4 subnet mask to use on the interface. Must +# be specified along with @start-address and @subnet-mask. +# +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate +# with any other vmnet interfaces. Only communication with +# host is allowed. Requires at least macOS Big Sur 11.0. +# +# @net-uuid: The identifier (UUID) to uniquely identify the isolated +# network vmnet interface should be added to. If +# set, no DHCP service is provided for this interface and +# network communication is allowed only with other interfaces +# added to this network identified by the UUID. Requires +# at least macOS Big Sur 11.0. +# +# Since: 7.1 +## +{ 'struct': 'NetdevVmnetHostOptions', + 'data': { + '*start-address': 'str', + '*end-address': 'str', + '*subnet-mask': 'str', + '*isolated': 'bool', + '*net-uuid': 'str' }, + 'if': 'CONFIG_VMNET' } + +## +# @NetdevVmnetSharedOptions: +# +# vmnet (shared mode) network backend. +# +# Allows traffic originating from the vmnet interface to reach the +# Internet through a network address translator (NAT). +# The vmnet interface can communicate with the host and with +# other shared mode interfaces on the same subnet. If no DHCP +# settings, subnet mask and IPv6 prefix specified, the interface can +# communicate with any of other interfaces in shared mode. +# +# @start-address: The starting IPv4 address to use for the interface. +# Must be in the private IP range (RFC 1918). Must be +# specified along with @end-address and @subnet-mask. +# This address is used as the gateway address. The +# subsequent address up to and including end-address are +# placed in the DHCP pool. +# +# @end-address: The DHCP IPv4 range end address to use for the +# interface. Must be in the private IP range (RFC 1918). +# Must be specified along with @start-address and @subnet-mask. +# +# @subnet-mask: The IPv4 subnet mask to use on the interface. Must +# be specified along with @start-address and @subnet-mask. +# +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate +# with any other vmnet interfaces. Only communication with +# host is allowed. Requires at least macOS Big Sur 11.0. +# +# @nat66-prefix: The IPv6 prefix to use into guest network. Must be a +# unique local address i.e. start with fd00::/8 and have +# length of 64. +# +# Since: 7.1 +## +{ 'struct': 'NetdevVmnetSharedOptions', + 'data': { + '*start-address': 'str', + '*end-address': 'str', + '*subnet-mask': 'str', + '*isolated': 'bool', + '*nat66-prefix': 'str' }, + 'if': 'CONFIG_VMNET' } + +## +# @NetdevVmnetBridgedOptions: +# +# vmnet (bridged mode) network backend. +# +# Bridges the vmnet interface with a physical network interface. +# +# @ifname: The name of the physical interface to be bridged. +# +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate +# with any other vmnet interfaces. Only communication with +# host is allowed. Requires at least macOS Big Sur 11.0. +# +# Since: 7.1 +## +{ 'struct': 'NetdevVmnetBridgedOptions', + 'data': { + 'ifname': 'str', + '*isolated': 'bool' }, + 'if': 'CONFIG_VMNET' } + +## +# @NetdevStreamOptions: +# +# Configuration info for stream socket netdev +# +# @addr: socket address to listen on (server=true) +# or connect to (server=false) +# @server: create server socket (default: false) +# +# Only SocketAddress types 'unix', 'inet' and 'fd' are supported. +# +# Since: 7.2 +## +{ 'struct': 'NetdevStreamOptions', + 'data': { + 'addr': 'SocketAddress', + '*server': 'bool' } } + +## +# @NetdevDgramOptions: +# +# Configuration info for datagram socket netdev. +# +# @remote: remote address +# @local: local address +# +# Only SocketAddress types 'unix', 'inet' and 'fd' are supported. +# +# If remote address is present and it's a multicast address, local address +# is optional. Otherwise local address is required and remote address is +# optional. +# +# .. table:: Valid parameters combination table +# :widths: auto +# +# ============= ======== ===== +# remote local okay? +# ============= ======== ===== +# absent absent no +# absent not fd no +# absent fd yes +# multicast absent yes +# multicast present yes +# not multicast absent no +# not multicast present yes +# ============= ======== ===== +# +# Since: 7.2 +## +{ 'struct': 'NetdevDgramOptions', + 'data': { + '*local': 'SocketAddress', + '*remote': 'SocketAddress' } } ## # @NetdevPcapOptions: @@ -473,11 +652,19 @@ # Since: 2.7 # # @vhost-vdpa since 5.1 +# @vmnet-host since 7.1 +# @vmnet-shared since 7.1 +# @vmnet-bridged since 7.1 +# @stream since 7.2 +# @dgram since 7.2 ## { 'enum': 'NetClientDriver', - 'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'vde', - 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa', - 'pcap' ] } + 'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'stream', + 'dgram', 'vde', 'bridge', 'hubport', 'netmap', 'vhost-user', + 'vhost-vdpa', 'pcap', + { 'name': 'vmnet-host', 'if': 'CONFIG_VMNET' }, + { 'name': 'vmnet-shared', 'if': 'CONFIG_VMNET' }, + { 'name': 'vmnet-bridged', 'if': 'CONFIG_VMNET' }] } ## # @Netdev: @@ -491,6 +678,11 @@ # Since: 1.2 # # 'l2tpv3' - since 2.1 +# 'vmnet-host' - since 7.1 +# 'vmnet-shared' - since 7.1 +# 'vmnet-bridged' - since 7.1 +# 'stream' since 7.2 +# 'dgram' since 7.2 ## { 'union': 'Netdev', 'base': { 'id': 'str', 'type': 'NetClientDriver' }, @@ -501,13 +693,21 @@ 'tap': 'NetdevTapOptions', 'l2tpv3': 'NetdevL2TPv3Options', 'socket': 'NetdevSocketOptions', + 'stream': 'NetdevStreamOptions', + 'dgram': 'NetdevDgramOptions', 'vde': 'NetdevVdeOptions', 'bridge': 'NetdevBridgeOptions', 'hubport': 'NetdevHubPortOptions', 'netmap': 'NetdevNetmapOptions', 'vhost-user': 'NetdevVhostUserOptions', 'vhost-vdpa': 'NetdevVhostVDPAOptions', - 'pcap': 'NetdevPcapOptions' } } + 'pcap': 'NetdevPcapOptions', + 'vmnet-host': { 'type': 'NetdevVmnetHostOptions', + 'if': 'CONFIG_VMNET' }, + 'vmnet-shared': { 'type': 'NetdevVmnetSharedOptions', + 'if': 'CONFIG_VMNET' }, + 'vmnet-bridged': { 'type': 'NetdevVmnetBridgedOptions', + 'if': 'CONFIG_VMNET' } } } ## # @RxState: @@ -636,7 +836,6 @@ # "data": { "name": "vnet0", # "path": "/machine/peripheral/vnet0/virtio-backend" }, # "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } -# } # ## { 'event': 'NIC_RX_FILTER_CHANGED', @@ -700,14 +899,65 @@ # Failover primary devices which were hidden (not hotplugged when requested) # before will now be hotplugged by the virtio-net standby device. # -# device-id: QEMU device id of the unplugged device +# @device-id: QEMU device id of the unplugged device +# # Since: 4.2 # # Example: # # <- { "event": "FAILOVER_NEGOTIATED", -# "data": "net1" } +# "data": { "device-id": "net1" }, +# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } # ## { 'event': 'FAILOVER_NEGOTIATED', 'data': {'device-id': 'str'} } + +## +# @NETDEV_STREAM_CONNECTED: +# +# Emitted when the netdev stream backend is connected +# +# @netdev-id: QEMU netdev id that is connected +# @addr: The destination address +# +# Since: 7.2 +# +# Example: +# +# <- { "event": "NETDEV_STREAM_CONNECTED", +# "data": { "netdev-id": "netdev0", +# "addr": { "port": "47666", "ipv6": true, +# "host": "::1", "type": "inet" } }, +# "timestamp": { "seconds": 1666269863, "microseconds": 311222 } } +# +# or +# +# <- { "event": "NETDEV_STREAM_CONNECTED", +# "data": { "netdev-id": "netdev0", +# "addr": { "path": "/tmp/qemu0", "type": "unix" } }, +# "timestamp": { "seconds": 1666269706, "microseconds": 413651 } } +# +## +{ 'event': 'NETDEV_STREAM_CONNECTED', + 'data': { 'netdev-id': 'str', + 'addr': 'SocketAddress' } } + +## +# @NETDEV_STREAM_DISCONNECTED: +# +# Emitted when the netdev stream backend is disconnected +# +# @netdev-id: QEMU netdev id that is disconnected +# +# Since: 7.2 +# +# Example: +# +# <- { 'event': 'NETDEV_STREAM_DISCONNECTED', +# 'data': {'netdev-id': 'netdev0'}, +# 'timestamp': {'seconds': 1663330937, 'microseconds': 526695} } +# +## +{ 'event': 'NETDEV_STREAM_DISCONNECTED', + 'data': { 'netdev-id': 'str' } } diff --git a/qapi/pragma.json b/qapi/pragma.json index 3bc0335d1f..7f810b0e97 100644 --- a/qapi/pragma.json +++ b/qapi/pragma.json @@ -1,9 +1,12 @@ +# -*- Mode: Python -*- +# vim: filetype=python + { 'pragma': { 'doc-required': true } } # Whitelists to permit QAPI rule violations; think twice before you # add to them! { 'pragma': { - # Commands allowed to return a non-dictionary: + # Command names containing '_' 'command-name-exceptions': [ 'add_client', 'block_resize', @@ -21,6 +24,7 @@ 'system_powerdown', 'system_reset', 'system_wakeup' ], + # Commands allowed to return a non-dictionary 'command-returns-exceptions': [ 'human-monitor-command', 'qom-get', diff --git a/qapi/qapi-forward-visitor.c b/qapi/qapi-forward-visitor.c index a4b111e22a..e36d9bc9ba 100644 --- a/qapi/qapi-forward-visitor.c +++ b/qapi/qapi-forward-visitor.c @@ -23,7 +23,6 @@ #include "qapi/qmp/qnum.h" #include "qapi/qmp/qstring.h" #include "qemu/cutils.h" -#include "qemu/option.h" struct ForwardFieldVisitor { Visitor visitor; @@ -246,25 +245,27 @@ static void forward_field_optional(Visitor *v, const char *name, bool *present) visit_optional(ffv->target, name, present); } -static bool forward_field_deprecated_accept(Visitor *v, const char *name, - Error **errp) +static bool forward_field_policy_reject(Visitor *v, const char *name, + unsigned special_features, + Error **errp) { ForwardFieldVisitor *ffv = to_ffv(v); if (!forward_field_translate_name(ffv, &name, errp)) { - return false; + return true; } - return visit_deprecated_accept(ffv->target, name, errp); + return visit_policy_reject(ffv->target, name, special_features, errp); } -static bool forward_field_deprecated(Visitor *v, const char *name) +static bool forward_field_policy_skip(Visitor *v, const char *name, + unsigned special_features) { ForwardFieldVisitor *ffv = to_ffv(v); if (!forward_field_translate_name(ffv, &name, NULL)) { - return false; + return true; } - return visit_deprecated(ffv->target, name); + return visit_policy_skip(ffv->target, name, special_features); } static void forward_field_complete(Visitor *v, void *opaque) @@ -313,8 +314,8 @@ Visitor *visitor_forward_field(Visitor *target, const char *from, const char *to v->visitor.type_any = forward_field_type_any; v->visitor.type_null = forward_field_type_null; v->visitor.optional = forward_field_optional; - v->visitor.deprecated_accept = forward_field_deprecated_accept; - v->visitor.deprecated = forward_field_deprecated; + v->visitor.policy_reject = forward_field_policy_reject; + v->visitor.policy_skip = forward_field_policy_skip; v->visitor.complete = forward_field_complete; v->visitor.free = forward_field_free; diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json index 4912b9744e..f000b90744 100644 --- a/qapi/qapi-schema.json +++ b/qapi/qapi-schema.json @@ -93,3 +93,5 @@ { 'include': 'audio.json' } { 'include': 'acpi.json' } { 'include': 'pci.json' } +{ 'include': 'stats.json' } +{ 'include': 'virtio.json' } diff --git a/qapi/qapi-type-helpers.c b/qapi/qapi-type-helpers.c new file mode 100644 index 0000000000..f76b34f647 --- /dev/null +++ b/qapi/qapi-type-helpers.c @@ -0,0 +1,23 @@ +/* + * QAPI common helper functions + * + * This file provides helper functions related to types defined + * in the QAPI schema. + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/type-helpers.h" + +HumanReadableText *human_readable_text_from_str(GString *str) +{ + HumanReadableText *ret = g_new0(HumanReadableText, 1); + + ret->human_readable_text = g_steal_pointer(&str->str); + + return ret; +} diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c index 3c24bb3d45..63596e11c5 100644 --- a/qapi/qapi-util.c +++ b/qapi/qapi-util.c @@ -11,10 +11,53 @@ */ #include "qemu/osdep.h" +#include "qapi/compat-policy.h" #include "qapi/error.h" #include "qemu/ctype.h" #include "qapi/qmp/qerror.h" +CompatPolicy compat_policy; + +static bool compat_policy_input_ok1(const char *adjective, + CompatPolicyInput policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp) +{ + switch (policy) { + case COMPAT_POLICY_INPUT_ACCEPT: + return true; + case COMPAT_POLICY_INPUT_REJECT: + error_set(errp, error_class, "%s %s %s disabled by policy", + adjective, kind, name); + return false; + case COMPAT_POLICY_INPUT_CRASH: + default: + abort(); + } +} + +bool compat_policy_input_ok(unsigned special_features, + const CompatPolicy *policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp) +{ + if ((special_features & 1u << QAPI_DEPRECATED) + && !compat_policy_input_ok1("Deprecated", + policy->deprecated_input, + error_class, kind, name, errp)) { + return false; + } + if ((special_features & (1u << QAPI_UNSTABLE)) + && !compat_policy_input_ok1("Unstable", + policy->unstable_input, + error_class, kind, name, errp)) { + return false; + } + return true; +} + const char *qapi_enum_lookup(const QEnumLookup *lookup, int val) { assert(val >= 0 && val < lookup->size); @@ -70,7 +113,7 @@ bool qapi_bool_parse(const char *name, const char *value, bool *obj, Error **err * may contain only letters, digits, hyphen and period. * The special exception for enumeration names is not implemented. * See docs/devel/qapi-code-gen.txt for more on QAPI naming rules. - * Keep this consistent with scripts/qapi.py! + * Keep this consistent with scripts/qapi-gen.py! * If @complete, the parse fails unless it consumes @str completely. * Return its length on success, -1 on failure. */ diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c index a641adec51..6c13510a2b 100644 --- a/qapi/qapi-visit-core.c +++ b/qapi/qapi-visit-core.c @@ -13,12 +13,17 @@ */ #include "qemu/osdep.h" +#include "qapi/compat-policy.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/visitor.h" #include "qapi/visitor-impl.h" #include "trace.h" +/* Zero-initialization must result in default policy */ +QEMU_BUILD_BUG_ON(COMPAT_POLICY_INPUT_ACCEPT || COMPAT_POLICY_OUTPUT_ACCEPT); + + void visit_complete(Visitor *v, void *opaque) { assert(v->type != VISITOR_OUTPUT || v->complete); @@ -135,22 +140,29 @@ bool visit_optional(Visitor *v, const char *name, bool *present) return *present; } -bool visit_deprecated_accept(Visitor *v, const char *name, Error **errp) +bool visit_policy_reject(Visitor *v, const char *name, + unsigned special_features, Error **errp) { - trace_visit_deprecated_accept(v, name); - if (v->deprecated_accept) { - return v->deprecated_accept(v, name, errp); + trace_visit_policy_reject(v, name); + if (v->policy_reject) { + return v->policy_reject(v, name, special_features, errp); } - return true; + return false; } -bool visit_deprecated(Visitor *v, const char *name) +bool visit_policy_skip(Visitor *v, const char *name, + unsigned special_features) { - trace_visit_deprecated(v, name); - if (v->deprecated) { - return v->deprecated(v, name); + trace_visit_policy_skip(v, name); + if (v->policy_skip) { + return v->policy_skip(v, name, special_features); } - return true; + return false; +} + +void visit_set_policy(Visitor *v, CompatPolicy *policy) +{ + v->compat_policy = *policy; } bool visit_is_input(Visitor *v) @@ -384,7 +396,7 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj, const QEnumLookup *lookup, Error **errp) { int64_t value; - char *enum_str; + g_autofree char *enum_str = NULL; if (!visit_type_str(v, name, &enum_str, errp)) { return false; @@ -392,12 +404,19 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj, value = qapi_enum_parse(lookup, enum_str, -1, NULL); if (value < 0) { - error_setg(errp, QERR_INVALID_PARAMETER, enum_str); - g_free(enum_str); + error_setg(errp, "Parameter '%s' does not accept value '%s'", + name ? name : "null", enum_str); + return false; + } + + if (lookup->special_features + && !compat_policy_input_ok(lookup->special_features[value], + &v->compat_policy, + ERROR_CLASS_GENERIC_ERROR, + "value", enum_str, errp)) { return false; } - g_free(enum_str); *obj = value; return true; } diff --git a/qapi/qdev.json b/qapi/qdev.json index b83178220b..2708fb4e99 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -32,17 +32,26 @@ ## # @device_add: # +# Add a device. +# # @driver: the name of the new device's driver # # @bus: the device's parent bus (device tree path) # # @id: the device's ID, must be unique # -# Additional arguments depend on the type. -# -# Add a device. +# Features: +# @json-cli: If present, the "-device" command line option supports JSON +# syntax with a structure identical to the arguments of this +# command. +# @json-cli-hotplug: If present, the "-device" command line option supports JSON +# syntax without the reference counting leak that broke +# hot-unplug # # Notes: +# +# Additional arguments depend on the type. +# # 1. For detailed information about this command, please refer to the # 'docs/qdev-device-use.txt' file. # @@ -67,7 +76,8 @@ ## { 'command': 'device_add', 'data': {'driver': 'str', '*bus': 'str', '*id': 'str'}, - 'gen': false } # so we can get the additional arguments + 'gen': false, # so we can get the additional arguments + 'features': ['json-cli', 'json-cli-hotplug'] } ## # @device_del: @@ -84,7 +94,9 @@ # This command merely requests that the guest begin the hot removal # process. Completion of the device removal process is signaled with a # DEVICE_DELETED event. Guest reset will automatically complete removal -# for all devices. +# for all devices. If a guest-side error in the hot removal process is +# detected, the device will not be removed and a DEVICE_UNPLUG_GUEST_ERROR +# event is sent. Some errors cannot be detected. # # Since: 0.14 # @@ -108,9 +120,9 @@ # At this point, it's safe to reuse the specified device ID. Device removal can # be initiated by the guest or by HMP/QMP commands. # -# @device: device name +# @device: the device's ID if it has one # -# @path: device path +# @path: the device's QOM path # # Since: 1.5 # @@ -124,3 +136,25 @@ ## { 'event': 'DEVICE_DELETED', 'data': { '*device': 'str', 'path': 'str' } } + +## +# @DEVICE_UNPLUG_GUEST_ERROR: +# +# Emitted when a device hot unplug fails due to a guest reported error. +# +# @device: the device's ID if it has one +# +# @path: the device's QOM path +# +# Since: 6.2 +# +# Example: +# +# <- { "event": "DEVICE_UNPLUG_GUEST_ERROR", +# "data": { "device": "core1", +# "path": "/machine/peripheral/core1" }, +# "timestamp": { "seconds": 1615570772, "microseconds": 202844 } } +# +## +{ 'event': 'DEVICE_UNPLUG_GUEST_ERROR', + 'data': { '*device': 'str', 'path': 'str' } } diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index 59600210ce..0990873ec8 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -21,18 +21,15 @@ #include "qapi/qmp/qjson.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" -#include "sysemu/runstate.h" #include "qapi/qmp/qbool.h" #include "qemu/coroutine.h" #include "qemu/main-loop.h" -CompatPolicy compat_policy; - Visitor *qobject_input_visitor_new_qmp(QObject *obj) { Visitor *v = qobject_input_visitor_new(obj); - qobject_input_visitor_set_policy(v, compat_policy.deprecated_input); + visit_set_policy(v, &compat_policy); return v; } @@ -40,7 +37,7 @@ Visitor *qobject_output_visitor_new_qmp(QObject **result) { Visitor *v = qobject_output_visitor_new(result); - qobject_output_visitor_set_policy(v, compat_policy.deprecated_output); + visit_set_policy(v, &compat_policy); return v; } @@ -176,19 +173,10 @@ QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, "The command %s has not been found", command); goto out; } - if (cmd->options & QCO_DEPRECATED) { - switch (compat_policy.deprecated_input) { - case COMPAT_POLICY_INPUT_ACCEPT: - break; - case COMPAT_POLICY_INPUT_REJECT: - error_set(&err, ERROR_CLASS_COMMAND_NOT_FOUND, - "Deprecated command %s disabled by policy", - command); - goto out; - case COMPAT_POLICY_INPUT_CRASH: - default: - abort(); - } + if (!compat_policy_input_ok(cmd->special_features, &compat_policy, + ERROR_CLASS_COMMAND_NOT_FOUND, + "command", command, &err)) { + goto out; } if (!cmd->enabled) { error_set(&err, ERROR_CLASS_COMMAND_NOT_FOUND, diff --git a/qapi/qmp-event.c b/qapi/qmp-event.c index 19d3cd0038..0fe0d0a5a6 100644 --- a/qapi/qmp-event.c +++ b/qapi/qmp-event.c @@ -20,15 +20,12 @@ static void timestamp_put(QDict *qdict) { - int err; QDict *ts; - qemu_timeval tv; + int64_t rt = g_get_real_time(); - err = qemu_gettimeofday(&tv); - /* Put -1 to indicate failure of getting host time */ ts = qdict_from_jsonf_nofail("{ 'seconds': %lld, 'microseconds': %lld }", - err < 0 ? -1LL : (long long)tv.tv_sec, - err < 0 ? -1LL : (long long)tv.tv_usec); + (long long)rt / G_USEC_PER_SEC, + (long long)rt % G_USEC_PER_SEC); qdict_put(qdict, "timestamp", ts); } diff --git a/qapi/qmp-registry.c b/qapi/qmp-registry.c index f78c064aae..485bc5e6fc 100644 --- a/qapi/qmp-registry.c +++ b/qapi/qmp-registry.c @@ -16,7 +16,8 @@ #include "qapi/qmp/dispatch.h" void qmp_register_command(QmpCommandList *cmds, const char *name, - QmpCommandFunc *fn, QmpCommandOptions options) + QmpCommandFunc *fn, QmpCommandOptions options, + unsigned special_features) { QmpCommand *cmd = g_malloc0(sizeof(*cmd)); @@ -27,6 +28,7 @@ void qmp_register_command(QmpCommandList *cmds, const char *name, cmd->fn = fn; cmd->enabled = true; cmd->options = options; + cmd->special_features = special_features; QTAILQ_INSERT_TAIL(cmds, cmd, node); } diff --git a/qapi/qobject-input-visitor.c b/qapi/qobject-input-visitor.c index 04b790412e..3e8aca6b15 100644 --- a/qapi/qobject-input-visitor.c +++ b/qapi/qobject-input-visitor.c @@ -28,7 +28,7 @@ #include "qapi/qmp/qnum.h" #include "qapi/qmp/qstring.h" #include "qemu/cutils.h" -#include "qemu/option.h" +#include "qemu/keyval.h" typedef struct StackObject { const char *name; /* Name of @obj in its parent, if any */ @@ -44,7 +44,6 @@ typedef struct StackObject { struct QObjectInputVisitor { Visitor visitor; - CompatPolicyInput deprecated_policy; /* Root of visit at visitor creation. */ QObject *root; @@ -664,22 +663,13 @@ static void qobject_input_optional(Visitor *v, const char *name, bool *present) *present = true; } -static bool qobject_input_deprecated_accept(Visitor *v, const char *name, - Error **errp) +static bool qobject_input_policy_reject(Visitor *v, const char *name, + unsigned special_features, + Error **errp) { - QObjectInputVisitor *qiv = to_qiv(v); - - switch (qiv->deprecated_policy) { - case COMPAT_POLICY_INPUT_ACCEPT: - return true; - case COMPAT_POLICY_INPUT_REJECT: - error_setg(errp, "Deprecated parameter '%s' disabled by policy", - name); - return false; - case COMPAT_POLICY_INPUT_CRASH: - default: - abort(); - } + return !compat_policy_input_ok(special_features, &v->compat_policy, + ERROR_CLASS_GENERIC_ERROR, + "parameter", name, errp); } static void qobject_input_free(Visitor *v) @@ -716,7 +706,7 @@ static QObjectInputVisitor *qobject_input_visitor_base_new(QObject *obj) v->visitor.end_list = qobject_input_end_list; v->visitor.start_alternate = qobject_input_start_alternate; v->visitor.optional = qobject_input_optional; - v->visitor.deprecated_accept = qobject_input_deprecated_accept; + v->visitor.policy_reject = qobject_input_policy_reject; v->visitor.free = qobject_input_free; v->root = qobject_ref(obj); @@ -739,14 +729,6 @@ Visitor *qobject_input_visitor_new(QObject *obj) return &v->visitor; } -void qobject_input_visitor_set_policy(Visitor *v, - CompatPolicyInput deprecated) -{ - QObjectInputVisitor *qiv = to_qiv(v); - - qiv->deprecated_policy = deprecated; -} - Visitor *qobject_input_visitor_new_keyval(QObject *obj) { QObjectInputVisitor *v = qobject_input_visitor_base_new(obj); diff --git a/qapi/qobject-output-visitor.c b/qapi/qobject-output-visitor.c index e4873308d4..74770edd73 100644 --- a/qapi/qobject-output-visitor.c +++ b/qapi/qobject-output-visitor.c @@ -32,7 +32,6 @@ typedef struct QStackEntry { struct QObjectOutputVisitor { Visitor visitor; - CompatPolicyOutput deprecated_policy; QSLIST_HEAD(, QStackEntry) stack; /* Stack of unfinished containers */ QObject *root; /* Root of the output visit */ @@ -210,11 +209,15 @@ static bool qobject_output_type_null(Visitor *v, const char *name, return true; } -static bool qobject_output_deprecated(Visitor *v, const char *name) +static bool qobject_output_policy_skip(Visitor *v, const char *name, + unsigned special_features) { - QObjectOutputVisitor *qov = to_qov(v); + CompatPolicy *pol = &v->compat_policy; - return qov->deprecated_policy != COMPAT_POLICY_OUTPUT_HIDE; + return ((special_features & 1u << QAPI_DEPRECATED) + && pol->deprecated_output == COMPAT_POLICY_OUTPUT_HIDE) + || ((special_features & 1u << QAPI_UNSTABLE) + && pol->unstable_output == COMPAT_POLICY_OUTPUT_HIDE); } /* Finish building, and return the root object. @@ -266,7 +269,7 @@ Visitor *qobject_output_visitor_new(QObject **result) v->visitor.type_number = qobject_output_type_number; v->visitor.type_any = qobject_output_type_any; v->visitor.type_null = qobject_output_type_null; - v->visitor.deprecated = qobject_output_deprecated; + v->visitor.policy_skip = qobject_output_policy_skip; v->visitor.complete = qobject_output_complete; v->visitor.free = qobject_output_free; @@ -275,11 +278,3 @@ Visitor *qobject_output_visitor_new(QObject **result) return &v->visitor; } - -void qobject_output_visitor_set_policy(Visitor *v, - CompatPolicyOutput deprecated) -{ - QObjectOutputVisitor *qov = to_qov(v); - - qov->deprecated_policy = deprecated; -} diff --git a/qapi/qom.json b/qapi/qom.json index 6d5f4a88e6..30e76653ad 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -499,6 +499,28 @@ '*repeat': 'bool', '*grab-toggle': 'GrabToggleKeys' } } +## +# @EventLoopBaseProperties: +# +# Common properties for event loops +# +# @aio-max-batch: maximum number of requests in a batch for the AIO engine, +# 0 means that the engine will use its default. +# (default: 0) +# +# @thread-pool-min: minimum number of threads reserved in the thread pool +# (default:0) +# +# @thread-pool-max: maximum number of threads the thread pool can contain +# (default:64) +# +# Since: 7.1 +## +{ 'struct': 'EventLoopBaseProperties', + 'data': { '*aio-max-batch': 'int', + '*thread-pool-min': 'int', + '*thread-pool-max': 'int' } } + ## # @IothreadProperties: # @@ -516,17 +538,26 @@ # algorithm detects it is spending too long polling without # encountering events. 0 selects a default behaviour (default: 0) # -# @aio-max-batch: maximum number of requests in a batch for the AIO engine, -# 0 means that the engine will use its default -# (default:0, since 6.1) +# The @aio-max-batch option is available since 6.1. # # Since: 2.0 ## { 'struct': 'IothreadProperties', + 'base': 'EventLoopBaseProperties', 'data': { '*poll-max-ns': 'int', '*poll-grow': 'int', - '*poll-shrink': 'int', - '*aio-max-batch': 'int' } } + '*poll-shrink': 'int' } } + +## +# @MainLoopProperties: +# +# Properties for the main-loop object. +# +# Since: 7.1 +## +{ 'struct': 'MainLoopProperties', + 'base': 'EventLoopBaseProperties', + 'data': {} } ## # @MemoryBackendProperties: @@ -547,6 +578,9 @@ # # @prealloc-threads: number of CPU threads to use for prealloc (default: 1) # +# @prealloc-context: thread context to use for creation of preallocation threads +# (default: none) (since 7.2) +# # @share: if false, the memory is private to QEMU; if true, it is shared # (default: false) # @@ -555,14 +589,12 @@ # # @size: size of the memory region in bytes # -# @x-use-canonical-path-for-ramblock-id: if true, the canoncial path is used +# @x-use-canonical-path-for-ramblock-id: if true, the canonical path is used # for ramblock-id. Disable this for 4.0 # machine types or older to allow # migration with newer QEMU versions. -# This option is considered stable -# despite the x- prefix. (default: -# false generally, but true for machine -# types <= 4.0) +# (default: false generally, +# but true for machine types <= 4.0) # # Note: prealloc=true and reserve=false cannot be set at the same time. With # reserve=true, the behavior depends on the operating system: for example, @@ -579,6 +611,7 @@ '*policy': 'HostMemPolicy', '*prealloc': 'bool', '*prealloc-threads': 'uint32', + '*prealloc-context': 'str', '*share': 'bool', '*reserve': 'bool', 'size': 'size', @@ -618,7 +651,7 @@ 'data': { '*align': 'size', '*discard-data': 'bool', 'mem-path': 'str', - '*pmem': { 'type': 'bool', 'if': 'defined(CONFIG_LIBPMEM)' }, + '*pmem': { 'type': 'bool', 'if': 'CONFIG_LIBPMEM' }, '*readonly': 'bool' } } ## @@ -647,6 +680,23 @@ '*hugetlbsize': 'size', '*seal': 'bool' } } +## +# @MemoryBackendEpcProperties: +# +# Properties for memory-backend-epc objects. +# +# The @share boolean option is true by default with epc +# +# The @merge boolean option is false by default with epc +# +# The @dump boolean option is false by default with epc +# +# Since: 6.2 +## +{ 'struct': 'MemoryBackendEpcProperties', + 'base': 'MemoryBackendProperties', + 'data': {} } + ## # @PrManagerHelperProperties: # @@ -688,6 +738,20 @@ { 'struct': 'RemoteObjectProperties', 'data': { 'fd': 'str', 'devid': 'str' } } +## +# @VfioUserServerProperties: +# +# Properties for x-vfio-user-server objects. +# +# @socket: socket to be used by the libvfio-user library +# +# @device: the ID of the device to be emulated at the server +# +# Since: 7.1 +## +{ 'struct': 'VfioUserServerProperties', + 'data': { 'socket': 'SocketAddress', 'device': 'str' } } + ## # @RngProperties: # @@ -754,6 +818,10 @@ # @reduced-phys-bits: number of bits in physical addresses that become # unavailable when SEV is enabled # +# @kernel-hashes: if true, add hashes of kernel/initrd/cmdline to a +# designated guest firmware page for measured boot +# with -kernel (default: false) (since 6.2) +# # Since: 2.12 ## { 'struct': 'SevGuestProperties', @@ -763,11 +831,37 @@ '*policy': 'uint32', '*handle': 'uint32', '*cbitpos': 'uint32', - 'reduced-phys-bits': 'uint32' } } + 'reduced-phys-bits': 'uint32', + '*kernel-hashes': 'bool' } } + +## +# @ThreadContextProperties: +# +# Properties for thread context objects. +# +# @cpu-affinity: the list of host CPU numbers used as CPU affinity for all +# threads created in the thread context (default: QEMU main +# thread CPU affinity) +# +# @node-affinity: the list of host node numbers that will be resolved to a +# list of host CPU numbers used as CPU affinity. This is a +# shortcut for specifying the list of host CPU numbers +# belonging to the host nodes manually by setting +# @cpu-affinity. (default: QEMU main thread affinity) +# +# Since: 7.2 +## +{ 'struct': 'ThreadContextProperties', + 'data': { '*cpu-affinity': ['uint16'], + '*node-affinity': ['uint16'] } } + ## # @ObjectType: # +# Features: +# @unstable: Member @x-remote-object is experimental. +# # Since: 6.0 ## { 'enum': 'ObjectType', @@ -777,12 +871,14 @@ 'authz-pam', 'authz-simple', 'can-bus', - 'can-host-socketcan', + { 'name': 'can-host-socketcan', + 'if': 'CONFIG_LINUX' }, 'colo-compare', 'cryptodev-backend', 'cryptodev-backend-builtin', + 'cryptodev-backend-lkcf', { 'name': 'cryptodev-vhost-user', - 'if': 'defined(CONFIG_VHOST_CRYPTO)' }, + 'if': 'CONFIG_VHOST_CRYPTO' }, 'dbus-vmstate', 'filter-buffer', 'filter-dump', @@ -791,28 +887,37 @@ 'filter-replay', 'filter-rewriter', 'input-barrier', - 'input-linux', + { 'name': 'input-linux', + 'if': 'CONFIG_LINUX' }, 'iothread', + 'main-loop', + { 'name': 'memory-backend-epc', + 'if': 'CONFIG_LINUX' }, 'memory-backend-file', { 'name': 'memory-backend-memfd', - 'if': 'defined(CONFIG_LINUX)' }, + 'if': 'CONFIG_LINUX' }, 'memory-backend-ram', 'pef-guest', - 'pr-manager-helper', + { 'name': 'pr-manager-helper', + 'if': 'CONFIG_LINUX' }, 'qtest', 'rng-builtin', 'rng-egd', - 'rng-random', + { 'name': 'rng-random', + 'if': 'CONFIG_POSIX' }, 'secret', - 'secret_keyring', + { 'name': 'secret_keyring', + 'if': 'CONFIG_SECRET_KEYRING' }, 'sev-guest', + 'thread-context', 's390-pv-guest', 'throttle-group', 'tls-creds-anon', 'tls-creds-psk', 'tls-creds-x509', 'tls-cipher-suites', - 'x-remote-object' + { 'name': 'x-remote-object', 'features': [ 'unstable' ] }, + { 'name': 'x-vfio-user-server', 'features': [ 'unstable' ] } ] } ## @@ -835,12 +940,14 @@ 'authz-listfile': 'AuthZListFileProperties', 'authz-pam': 'AuthZPAMProperties', 'authz-simple': 'AuthZSimpleProperties', - 'can-host-socketcan': 'CanHostSocketcanProperties', + 'can-host-socketcan': { 'type': 'CanHostSocketcanProperties', + 'if': 'CONFIG_LINUX' }, 'colo-compare': 'ColoCompareProperties', 'cryptodev-backend': 'CryptodevBackendProperties', 'cryptodev-backend-builtin': 'CryptodevBackendProperties', + 'cryptodev-backend-lkcf': 'CryptodevBackendProperties', 'cryptodev-vhost-user': { 'type': 'CryptodevVhostUserProperties', - 'if': 'defined(CONFIG_VHOST_CRYPTO)' }, + 'if': 'CONFIG_VHOST_CRYPTO' }, 'dbus-vmstate': 'DBusVMStateProperties', 'filter-buffer': 'FilterBufferProperties', 'filter-dump': 'FilterDumpProperties', @@ -849,26 +956,35 @@ 'filter-replay': 'NetfilterProperties', 'filter-rewriter': 'FilterRewriterProperties', 'input-barrier': 'InputBarrierProperties', - 'input-linux': 'InputLinuxProperties', + 'input-linux': { 'type': 'InputLinuxProperties', + 'if': 'CONFIG_LINUX' }, 'iothread': 'IothreadProperties', + 'main-loop': 'MainLoopProperties', + 'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties', + 'if': 'CONFIG_LINUX' }, 'memory-backend-file': 'MemoryBackendFileProperties', 'memory-backend-memfd': { 'type': 'MemoryBackendMemfdProperties', - 'if': 'defined(CONFIG_LINUX)' }, + 'if': 'CONFIG_LINUX' }, 'memory-backend-ram': 'MemoryBackendProperties', - 'pr-manager-helper': 'PrManagerHelperProperties', + 'pr-manager-helper': { 'type': 'PrManagerHelperProperties', + 'if': 'CONFIG_LINUX' }, 'qtest': 'QtestProperties', 'rng-builtin': 'RngProperties', 'rng-egd': 'RngEgdProperties', - 'rng-random': 'RngRandomProperties', + 'rng-random': { 'type': 'RngRandomProperties', + 'if': 'CONFIG_POSIX' }, 'secret': 'SecretProperties', - 'secret_keyring': 'SecretKeyringProperties', + 'secret_keyring': { 'type': 'SecretKeyringProperties', + 'if': 'CONFIG_SECRET_KEYRING' }, 'sev-guest': 'SevGuestProperties', + 'thread-context': 'ThreadContextProperties', 'throttle-group': 'ThrottleGroupProperties', 'tls-creds-anon': 'TlsCredsAnonProperties', 'tls-creds-psk': 'TlsCredsPskProperties', 'tls-creds-x509': 'TlsCredsX509Properties', 'tls-cipher-suites': 'TlsCredsProperties', - 'x-remote-object': 'RemoteObjectProperties' + 'x-remote-object': 'RemoteObjectProperties', + 'x-vfio-user-server': 'VfioUserServerProperties' } } ## diff --git a/qapi/replay.json b/qapi/replay.json index bfd83d7591..729470300d 100644 --- a/qapi/replay.json +++ b/qapi/replay.json @@ -1,4 +1,5 @@ # -*- Mode: Python -*- +# vim: filetype=python # ## @@ -39,7 +40,6 @@ # @icount: current number of executed instructions. # # Since: 5.2 -# ## { 'struct': 'ReplayInfo', 'data': { 'mode': 'ReplayMode', '*filename': 'str', 'icount': 'int' } } @@ -80,7 +80,7 @@ # # Example: # -# -> { "execute": "replay-break", "data": { "icount": 220414 } } +# -> { "execute": "replay-break", "arguments": { "icount": 220414 } } # ## { 'command': 'replay-break', 'data': { 'icount': 'int' } } @@ -116,6 +116,6 @@ # # Example: # -# -> { "execute": "replay-seek", "data": { "icount": 220414 } } +# -> { "execute": "replay-seek", "arguments": { "icount": 220414 } } ## { 'command': 'replay-seek', 'data': { 'icount': 'int' } } diff --git a/qapi/run-state.json b/qapi/run-state.json index 43d66d700f..419c188dd1 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -86,12 +86,16 @@ # ignores --no-reboot. This is useful for sanitizing # hypercalls on s390 that are used during kexec/kdump/boot # +# @snapshot-load: A snapshot is being loaded by the record & replay +# subsystem. This value is used only within QEMU. It +# doesn't occur in QMP. (since 7.2) +# ## { 'enum': 'ShutdownCause', # Beware, shutdown_caused_by_guest() depends on enumeration order 'data': [ 'none', 'host-error', 'host-qmp-quit', 'host-qmp-system-reset', 'host-signal', 'host-ui', 'guest-shutdown', 'guest-reset', - 'guest-panic', 'subsystem-reset'] } + 'guest-panic', 'subsystem-reset', 'snapshot-load'] } ## # @StatusInfo: @@ -104,7 +108,7 @@ # # @status: the virtual machine @RunState # -# Since: 0.14 +# Since: 0.14 # # Notes: @singlestep is enabled through the GDB stub ## @@ -118,7 +122,7 @@ # # Returns: @StatusInfo reflecting all VCPUs # -# Since: 0.14 +# Since: 0.14 # # Example: # @@ -150,7 +154,8 @@ # # Example: # -# <- { "event": "SHUTDOWN", "data": { "guest": true }, +# <- { "event": "SHUTDOWN", +# "data": { "guest": true, "reason": "guest-shutdown" }, # "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } # ## @@ -188,7 +193,8 @@ # # Example: # -# <- { "event": "RESET", "data": { "guest": false }, +# <- { "event": "RESET", +# "data": { "guest": false, "reason": "guest-reset" }, # "timestamp": { "seconds": 1267041653, "microseconds": 9518 } } # ## @@ -346,7 +352,7 @@ # # @poweroff: Shutdown the VM and exit # -# @pause: pause the VM# +# @pause: pause the VM # # Since: 6.0 ## @@ -362,10 +368,13 @@ # # @shutdown: Shutdown the VM and exit, according to the shutdown action # +# @exit-failure: Shutdown the VM and exit with nonzero status +# (since 7.1) +# # Since: 6.0 ## { 'enum': 'PanicAction', - 'data': [ 'pause', 'shutdown', 'none' ] } + 'data': [ 'pause', 'shutdown', 'exit-failure', 'none' ] } ## # @watchdog-set-action: @@ -424,7 +433,8 @@ # Example: # # <- { "event": "GUEST_PANICKED", -# "data": { "action": "pause" } } +# "data": { "action": "pause" }, +# "timestamp": { "seconds": 1648245231, "microseconds": 900001 } } # ## { 'event': 'GUEST_PANICKED', @@ -444,7 +454,8 @@ # Example: # # <- { "event": "GUEST_CRASHLOADED", -# "data": { "action": "run" } } +# "data": { "action": "run" }, +# "timestamp": { "seconds": 1648245259, "microseconds": 893771 } } # ## { 'event': 'GUEST_CRASHLOADED', @@ -567,7 +578,9 @@ # <- { "event": "MEMORY_FAILURE", # "data": { "recipient": "hypervisor", # "action": "fatal", -# "flags": { 'action-required': false } } +# "flags": { "action-required": false, +# "recursive": false } }, +# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } # ## { 'event': 'MEMORY_FAILURE', @@ -586,13 +599,11 @@ # @guest: memory failure at guest memory, # # Since: 5.2 -# ## { 'enum': 'MemoryFailureRecipient', 'data': [ 'hypervisor', 'guest' ] } - ## # @MemoryFailureAction: # @@ -613,7 +624,6 @@ # to handle memory failures. # # Since: 5.2 -# ## { 'enum': 'MemoryFailureAction', 'data': [ 'ignore', @@ -633,8 +643,24 @@ # failure was still in progress. # # Since: 5.2 -# ## { 'struct': 'MemoryFailureFlags', 'data': { 'action-required': 'bool', 'recursive': 'bool'} } + +## +# @NotifyVmexitOption: +# +# An enumeration of the options specified when enabling notify VM exit +# +# @run: enable the feature, do nothing and continue if the notify VM exit happens. +# +# @internal-error: enable the feature, raise a internal error if the notify +# VM exit happens. +# +# @disable: disable the feature. +# +# Since: 7.2 +## +{ 'enum': 'NotifyVmexitOption', + 'data': [ 'run', 'internal-error', 'disable' ] } \ No newline at end of file diff --git a/qapi/sockets.json b/qapi/sockets.json index 735eb4abb5..bad74e34d3 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -69,7 +69,7 @@ '*ipv4': 'bool', '*ipv6': 'bool', '*keep-alive': 'bool', - '*mptcp': { 'type': 'bool', 'if': 'defined(IPPROTO_MPTCP)' } } } + '*mptcp': { 'type': 'bool', 'if': 'HAVE_IPPROTO_MPTCP' } } } ## # @UnixSocketAddress: @@ -89,8 +89,8 @@ { 'struct': 'UnixSocketAddress', 'data': { 'path': 'str', - '*abstract': { 'type': 'bool', 'if': 'defined(CONFIG_LINUX)' }, - '*tight': { 'type': 'bool', 'if': 'defined(CONFIG_LINUX)' } } } + '*abstract': { 'type': 'bool', 'if': 'CONFIG_LINUX' }, + '*tight': { 'type': 'bool', 'if': 'CONFIG_LINUX' } } } ## # @VsockSocketAddress: @@ -110,6 +110,38 @@ 'cid': 'str', 'port': 'str' } } +## +# @InetSocketAddressWrapper: +# +# Since: 1.3 +## +{ 'struct': 'InetSocketAddressWrapper', + 'data': { 'data': 'InetSocketAddress' } } + +## +# @UnixSocketAddressWrapper: +# +# Since: 1.3 +## +{ 'struct': 'UnixSocketAddressWrapper', + 'data': { 'data': 'UnixSocketAddress' } } + +## +# @VsockSocketAddressWrapper: +# +# Since: 2.8 +## +{ 'struct': 'VsockSocketAddressWrapper', + 'data': { 'data': 'VsockSocketAddress' } } + +## +# @StringWrapper: +# +# Since: 1.3 +## +{ 'struct': 'StringWrapper', + 'data': { 'data': 'String' } } + ## # @SocketAddressLegacy: # @@ -117,27 +149,27 @@ # # Note: This type is deprecated in favor of SocketAddress. The # difference between SocketAddressLegacy and SocketAddress is that the -# latter is a flat union rather than a simple union. Flat is nicer -# because it avoids nesting on the wire, i.e. that form has fewer {}. - +# latter has fewer {} on the wire. # # Since: 1.3 ## { 'union': 'SocketAddressLegacy', + 'base': { 'type': 'SocketAddressType' }, + 'discriminator': 'type', 'data': { - 'inet': 'InetSocketAddress', - 'unix': 'UnixSocketAddress', - 'vsock': 'VsockSocketAddress', - 'fd': 'String' } } + 'inet': 'InetSocketAddressWrapper', + 'unix': 'UnixSocketAddressWrapper', + 'vsock': 'VsockSocketAddressWrapper', + 'fd': 'StringWrapper' } } ## # @SocketAddressType: # # Available SocketAddress types # -# @inet: Internet address +# @inet: Internet address # -# @unix: Unix domain socket +# @unix: Unix domain socket # # @vsock: VMCI address # @@ -157,7 +189,7 @@ # Captures the address of a socket, which could also be a named file # descriptor # -# @type: Transport type +# @type: Transport type # # Since: 2.9 ## diff --git a/qapi/stats.json b/qapi/stats.json new file mode 100644 index 0000000000..57db5b1c74 --- /dev/null +++ b/qapi/stats.json @@ -0,0 +1,251 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# Copyright (c) 2022 Oracle and/or its affiliates. +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +## +# = Statistics +## + +## +# @StatsType: +# +# Enumeration of statistics types +# +# @cumulative: stat is cumulative; value can only increase. +# @instant: stat is instantaneous; value can increase or decrease. +# @peak: stat is the peak value; value can only increase. +# @linear-histogram: stat is a linear histogram. +# @log2-histogram: stat is a logarithmic histogram, with one bucket +# for each power of two. +# +# Since: 7.1 +## +{ 'enum' : 'StatsType', + 'data' : [ 'cumulative', 'instant', 'peak', 'linear-histogram', + 'log2-histogram' ] } + +## +# @StatsUnit: +# +# Enumeration of unit of measurement for statistics +# +# @bytes: stat reported in bytes. +# @seconds: stat reported in seconds. +# @cycles: stat reported in clock cycles. +# @boolean: stat is a boolean value. +# +# Since: 7.1 +## +{ 'enum' : 'StatsUnit', + 'data' : [ 'bytes', 'seconds', 'cycles', 'boolean' ] } + +## +# @StatsProvider: +# +# Enumeration of statistics providers. +# +# Since: 7.1 +## +{ 'enum': 'StatsProvider', + 'data': [ 'kvm' ] } + +## +# @StatsTarget: +# +# The kinds of objects on which one can request statistics. +# +# @vm: statistics that apply to the entire virtual machine or +# the entire QEMU process. +# +# @vcpu: statistics that apply to a single virtual CPU. +# +# Since: 7.1 +## +{ 'enum': 'StatsTarget', + 'data': [ 'vm', 'vcpu' ] } + +## +# @StatsRequest: +# +# Indicates a set of statistics that should be returned by query-stats. +# +# @provider: provider for which to return statistics. + +# @names: statistics to be returned (all if omitted). +# +# Since: 7.1 +## +{ 'struct': 'StatsRequest', + 'data': { 'provider': 'StatsProvider', + '*names': [ 'str' ] } } + +## +# @StatsVCPUFilter: +# +# @vcpus: list of QOM paths for the desired vCPU objects. +# +# Since: 7.1 +## +{ 'struct': 'StatsVCPUFilter', + 'data': { '*vcpus': [ 'str' ] } } + +## +# @StatsFilter: +# +# The arguments to the query-stats command; specifies a target for which to +# request statistics and optionally the required subset of information for +# that target: +# - which vCPUs to request statistics for +# - which providers to request statistics from +# - which named values to return within each provider +# +# Since: 7.1 +## +{ 'union': 'StatsFilter', + 'base': { + 'target': 'StatsTarget', + '*providers': [ 'StatsRequest' ] }, + 'discriminator': 'target', + 'data': { 'vcpu': 'StatsVCPUFilter' } } + +## +# @StatsValue: +# +# @scalar: single unsigned 64-bit integers. +# @list: list of unsigned 64-bit integers (used for histograms). +# +# Since: 7.1 +## +{ 'alternate': 'StatsValue', + 'data': { 'scalar': 'uint64', + 'boolean': 'bool', + 'list': [ 'uint64' ] } } + +## +# @Stats: +# +# @name: name of stat. +# @value: stat value. +# +# Since: 7.1 +## +{ 'struct': 'Stats', + 'data': { 'name': 'str', + 'value' : 'StatsValue' } } + +## +# @StatsResult: +# +# @provider: provider for this set of statistics. +# +# @qom-path: Path to the object for which the statistics are returned, +# if the object is exposed in the QOM tree +# +# @stats: list of statistics. +# +# Since: 7.1 +## +{ 'struct': 'StatsResult', + 'data': { 'provider': 'StatsProvider', + '*qom-path': 'str', + 'stats': [ 'Stats' ] } } + +## +# @query-stats: +# +# Return runtime-collected statistics for objects such as the +# VM or its vCPUs. +# +# The arguments are a StatsFilter and specify the provider and objects +# to return statistics about. +# +# Returns: a list of StatsResult, one for each provider and object +# (e.g., for each vCPU). +# +# Since: 7.1 +## +{ 'command': 'query-stats', + 'data': 'StatsFilter', + 'boxed': true, + 'returns': [ 'StatsResult' ] } + +## +# @StatsSchemaValue: +# +# Schema for a single statistic. +# +# @name: name of the statistic; each element of the schema is uniquely +# identified by a target, a provider (both available in @StatsSchema) +# and the name. +# +# @type: kind of statistic. +# +# @unit: basic unit of measure for the statistic; if missing, the statistic +# is a simple number or counter. +# +# @base: base for the multiple of @unit in which the statistic is measured. +# Only present if @exponent is non-zero; @base and @exponent together +# form a SI prefix (e.g., _nano-_ for ``base=10`` and ``exponent=-9``) +# or IEC binary prefix (e.g. _kibi-_ for ``base=2`` and ``exponent=10``) +# +# @exponent: exponent for the multiple of @unit in which the statistic is +# expressed, or 0 for the basic unit +# +# @bucket-size: Present when @type is "linear-histogram", contains the width +# of each bucket of the histogram. +# +# Since: 7.1 +## +{ 'struct': 'StatsSchemaValue', + 'data': { 'name': 'str', + 'type': 'StatsType', + '*unit': 'StatsUnit', + '*base': 'int8', + 'exponent': 'int16', + '*bucket-size': 'uint32' } } + +## +# @StatsSchema: +# +# Schema for all available statistics for a provider and target. +# +# @provider: provider for this set of statistics. +# +# @target: the kind of object that can be queried through the provider. +# +# @stats: list of statistics. +# +# Since: 7.1 +## +{ 'struct': 'StatsSchema', + 'data': { 'provider': 'StatsProvider', + 'target': 'StatsTarget', + 'stats': [ 'StatsSchemaValue' ] } } + +## +# @query-stats-schemas: +# +# Return the schema for all available runtime-collected statistics. +# +# Note: runtime-collected statistics and their names fall outside QEMU's usual +# deprecation policies. QEMU will try to keep the set of available data +# stable, together with their names, but will not guarantee stability +# at all costs; the same is true of providers that source statistics +# externally, e.g. from Linux. For example, if the same value is being +# tracked with different names on different architectures or by different +# providers, one of them might be renamed. A statistic might go away if +# an algorithm is changed or some code is removed; changing a default +# might cause previously useful statistics to always report 0. Such +# changes, however, are expected to be rare. +# +# Since: 7.1 +## +{ 'command': 'query-stats-schemas', + 'data': { '*provider': 'StatsProvider' }, + 'returns': [ 'StatsSchema' ] } diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c index 5506c933de..71ddc92b7b 100644 --- a/qapi/string-output-visitor.c +++ b/qapi/string-output-visitor.c @@ -14,7 +14,6 @@ #include "qemu/cutils.h" #include "qapi/string-output-visitor.h" #include "qapi/visitor-impl.h" -#include "qemu/host-utils.h" #include #include "qemu/range.h" diff --git a/qapi/tpm.json b/qapi/tpm.json index 75590979fd..4e2ea9756a 100644 --- a/qapi/tpm.json +++ b/qapi/tpm.json @@ -18,7 +18,7 @@ # Since: 1.5 ## { 'enum': 'TpmModel', 'data': [ 'tpm-tis', 'tpm-crb', 'tpm-spapr' ], - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @query-tpm-models: @@ -36,7 +36,7 @@ # ## { 'command': 'query-tpm-models', 'returns': ['TpmModel'], - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @TpmType: @@ -50,7 +50,7 @@ # Since: 1.5 ## { 'enum': 'TpmType', 'data': [ 'passthrough', 'emulator' ], - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @query-tpm-types: @@ -68,7 +68,7 @@ # ## { 'command': 'query-tpm-types', 'returns': ['TpmType'], - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @TPMPassthroughOptions: @@ -85,7 +85,7 @@ { 'struct': 'TPMPassthroughOptions', 'data': { '*path': 'str', '*cancel-path': 'str' }, - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @TPMEmulatorOptions: @@ -97,7 +97,25 @@ # Since: 2.11 ## { 'struct': 'TPMEmulatorOptions', 'data': { 'chardev' : 'str' }, - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } + +## +# @TPMPassthroughOptionsWrapper: +# +# Since: 1.5 +## +{ 'struct': 'TPMPassthroughOptionsWrapper', + 'data': { 'data': 'TPMPassthroughOptions' }, + 'if': 'CONFIG_TPM' } + +## +# @TPMEmulatorOptionsWrapper: +# +# Since: 2.11 +## +{ 'struct': 'TPMEmulatorOptionsWrapper', + 'data': { 'data': 'TPMEmulatorOptions' }, + 'if': 'CONFIG_TPM' } ## # @TpmTypeOptions: @@ -110,9 +128,11 @@ # Since: 1.5 ## { 'union': 'TpmTypeOptions', - 'data': { 'passthrough' : 'TPMPassthroughOptions', - 'emulator': 'TPMEmulatorOptions' }, - 'if': 'defined(CONFIG_TPM)' } + 'base': { 'type': 'TpmType' }, + 'discriminator': 'type', + 'data': { 'passthrough' : 'TPMPassthroughOptionsWrapper', + 'emulator': 'TPMEmulatorOptionsWrapper' }, + 'if': 'CONFIG_TPM' } ## # @TPMInfo: @@ -131,7 +151,7 @@ 'data': {'id': 'str', 'model': 'TpmModel', 'options': 'TpmTypeOptions' }, - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } ## # @query-tpm: @@ -162,4 +182,4 @@ # ## { 'command': 'query-tpm', 'returns': ['TPMInfo'], - 'if': 'defined(CONFIG_TPM)' } + 'if': 'CONFIG_TPM' } diff --git a/qapi/trace-events b/qapi/trace-events index cccafc07e5..ab108c4f0e 100644 --- a/qapi/trace-events +++ b/qapi/trace-events @@ -17,8 +17,8 @@ visit_start_alternate(void *v, const char *name, void *obj, size_t size) "v=%p n visit_end_alternate(void *v, void *obj) "v=%p obj=%p" visit_optional(void *v, const char *name, bool *present) "v=%p name=%s present=%p" -visit_deprecated_accept(void *v, const char *name) "v=%p name=%s" -visit_deprecated(void *v, const char *name) "v=%p name=%s" +visit_policy_reject(void *v, const char *name) "v=%p name=%s" +visit_policy_skip(void *v, const char *name) "v=%p name=%s" visit_type_enum(void *v, const char *name, int *obj) "v=%p name=%s obj=%p" visit_type_int(void *v, const char *name, int64_t *obj) "v=%p name=%s obj=%p" diff --git a/qapi/trace.json b/qapi/trace.json index 47c68f04da..6c6982a587 100644 --- a/qapi/trace.json +++ b/qapi/trace.json @@ -1,4 +1,5 @@ # -*- mode: python -*- +# vim: filetype=python # # Copyright (C) 2011-2016 Lluís Vilanova # @@ -68,7 +69,7 @@ # # -> { "execute": "trace-event-get-state", # "arguments": { "name": "qemu_memalign" } } -# <- { "return": [ { "name": "qemu_memalign", "state": "disabled" } ] } +# <- { "return": [ { "name": "qemu_memalign", "state": "disabled", "vcpu": false } ] } # ## { 'command': 'trace-event-get-state', @@ -99,7 +100,7 @@ # Example: # # -> { "execute": "trace-event-set-state", -# "arguments": { "name": "qemu_memalign", "enable": "true" } } +# "arguments": { "name": "qemu_memalign", "enable": true } } # <- { "return": {} } # ## diff --git a/qapi/transaction.json b/qapi/transaction.json index 894258d9e2..381a2df782 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -38,41 +38,132 @@ { 'enum': 'ActionCompletionMode', 'data': [ 'individual', 'grouped' ] } +## +# @TransactionActionKind: +# +# @abort: Since 1.6 +# @block-dirty-bitmap-add: Since 2.5 +# @block-dirty-bitmap-remove: Since 4.2 +# @block-dirty-bitmap-clear: Since 2.5 +# @block-dirty-bitmap-enable: Since 4.0 +# @block-dirty-bitmap-disable: Since 4.0 +# @block-dirty-bitmap-merge: Since 4.0 +# @blockdev-backup: Since 2.3 +# @blockdev-snapshot: Since 2.5 +# @blockdev-snapshot-internal-sync: Since 1.7 +# @blockdev-snapshot-sync: since 1.1 +# @drive-backup: Since 1.6 +# +# Features: +# @deprecated: Member @drive-backup is deprecated. Use member +# @blockdev-backup instead. +# +# Since: 1.1 +## +{ 'enum': 'TransactionActionKind', + 'data': [ 'abort', 'block-dirty-bitmap-add', 'block-dirty-bitmap-remove', + 'block-dirty-bitmap-clear', 'block-dirty-bitmap-enable', + 'block-dirty-bitmap-disable', 'block-dirty-bitmap-merge', + 'blockdev-backup', 'blockdev-snapshot', + 'blockdev-snapshot-internal-sync', 'blockdev-snapshot-sync', + { 'name': 'drive-backup', 'features': [ 'deprecated' ] } ] } + +## +# @AbortWrapper: +# +# Since: 1.6 +## +{ 'struct': 'AbortWrapper', + 'data': { 'data': 'Abort' } } + +## +# @BlockDirtyBitmapAddWrapper: +# +# Since: 2.5 +## +{ 'struct': 'BlockDirtyBitmapAddWrapper', + 'data': { 'data': 'BlockDirtyBitmapAdd' } } + +## +# @BlockDirtyBitmapWrapper: +# +# Since: 2.5 +## +{ 'struct': 'BlockDirtyBitmapWrapper', + 'data': { 'data': 'BlockDirtyBitmap' } } + +## +# @BlockDirtyBitmapMergeWrapper: +# +# Since: 4.0 +## +{ 'struct': 'BlockDirtyBitmapMergeWrapper', + 'data': { 'data': 'BlockDirtyBitmapMerge' } } + +## +# @BlockdevBackupWrapper: +# +# Since: 2.3 +## +{ 'struct': 'BlockdevBackupWrapper', + 'data': { 'data': 'BlockdevBackup' } } + +## +# @BlockdevSnapshotWrapper: +# +# Since: 2.5 +## +{ 'struct': 'BlockdevSnapshotWrapper', + 'data': { 'data': 'BlockdevSnapshot' } } + +## +# @BlockdevSnapshotInternalWrapper: +# +# Since: 1.7 +## +{ 'struct': 'BlockdevSnapshotInternalWrapper', + 'data': { 'data': 'BlockdevSnapshotInternal' } } + +## +# @BlockdevSnapshotSyncWrapper: +# +# Since: 1.1 +## +{ 'struct': 'BlockdevSnapshotSyncWrapper', + 'data': { 'data': 'BlockdevSnapshotSync' } } + +## +# @DriveBackupWrapper: +# +# Since: 1.6 +## +{ 'struct': 'DriveBackupWrapper', + 'data': { 'data': 'DriveBackup' } } + ## # @TransactionAction: # # A discriminated record of operations that can be performed with -# @transaction. Action @type can be: -# -# - @abort: since 1.6 -# - @block-dirty-bitmap-add: since 2.5 -# - @block-dirty-bitmap-remove: since 4.2 -# - @block-dirty-bitmap-clear: since 2.5 -# - @block-dirty-bitmap-enable: since 4.0 -# - @block-dirty-bitmap-disable: since 4.0 -# - @block-dirty-bitmap-merge: since 4.0 -# - @blockdev-backup: since 2.3 -# - @blockdev-snapshot: since 2.5 -# - @blockdev-snapshot-internal-sync: since 1.7 -# - @blockdev-snapshot-sync: since 1.1 -# - @drive-backup: since 1.6 +# @transaction. # # Since: 1.1 ## { 'union': 'TransactionAction', + 'base': { 'type': 'TransactionActionKind' }, + 'discriminator': 'type', 'data': { - 'abort': 'Abort', - 'block-dirty-bitmap-add': 'BlockDirtyBitmapAdd', - 'block-dirty-bitmap-remove': 'BlockDirtyBitmap', - 'block-dirty-bitmap-clear': 'BlockDirtyBitmap', - 'block-dirty-bitmap-enable': 'BlockDirtyBitmap', - 'block-dirty-bitmap-disable': 'BlockDirtyBitmap', - 'block-dirty-bitmap-merge': 'BlockDirtyBitmapMerge', - 'blockdev-backup': 'BlockdevBackup', - 'blockdev-snapshot': 'BlockdevSnapshot', - 'blockdev-snapshot-internal-sync': 'BlockdevSnapshotInternal', - 'blockdev-snapshot-sync': 'BlockdevSnapshotSync', - 'drive-backup': 'DriveBackup' + 'abort': 'AbortWrapper', + 'block-dirty-bitmap-add': 'BlockDirtyBitmapAddWrapper', + 'block-dirty-bitmap-remove': 'BlockDirtyBitmapWrapper', + 'block-dirty-bitmap-clear': 'BlockDirtyBitmapWrapper', + 'block-dirty-bitmap-enable': 'BlockDirtyBitmapWrapper', + 'block-dirty-bitmap-disable': 'BlockDirtyBitmapWrapper', + 'block-dirty-bitmap-merge': 'BlockDirtyBitmapMergeWrapper', + 'blockdev-backup': 'BlockdevBackupWrapper', + 'blockdev-snapshot': 'BlockdevSnapshotWrapper', + 'blockdev-snapshot-internal-sync': 'BlockdevSnapshotInternalWrapper', + 'blockdev-snapshot-sync': 'BlockdevSnapshotSyncWrapper', + 'drive-backup': 'DriveBackupWrapper' } } ## diff --git a/qapi/ui.json b/qapi/ui.json index 7c503841a7..bf903996cb 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -10,20 +10,71 @@ { 'include': 'sockets.json' } ## -# @set_password: +# @DisplayProtocol: # -# Sets the password of a remote display session. +# Display protocols which support changing password options. +# +# Since: 7.0 +## +{ 'enum': 'DisplayProtocol', + 'data': [ 'vnc', 'spice' ] } + +## +# @SetPasswordAction: +# +# An action to take on changing a password on a connection with active clients. +# +# @keep: maintain existing clients +# +# @fail: fail the command if clients are connected +# +# @disconnect: disconnect existing clients +# +# Since: 7.0 +## +{ 'enum': 'SetPasswordAction', + 'data': [ 'keep', 'fail', 'disconnect' ] } + +## +# @SetPasswordOptions: +# +# Options for set_password. # # @protocol: - 'vnc' to modify the VNC server password # - 'spice' to modify the Spice server password # # @password: the new password # -# @connected: how to handle existing clients when changing the -# password. If nothing is specified, defaults to 'keep' -# 'fail' to fail the command if clients are connected -# 'disconnect' to disconnect existing clients -# 'keep' to maintain existing clients +# @connected: How to handle existing clients when changing the +# password. If nothing is specified, defaults to 'keep'. +# For VNC, only 'keep' is currently implemented. +# +# Since: 7.0 +## +{ 'union': 'SetPasswordOptions', + 'base': { 'protocol': 'DisplayProtocol', + 'password': 'str', + '*connected': 'SetPasswordAction' }, + 'discriminator': 'protocol', + 'data': { 'vnc': 'SetPasswordOptionsVnc' } } + +## +# @SetPasswordOptionsVnc: +# +# Options for set_password specific to the VNC procotol. +# +# @display: The id of the display where the password should be changed. +# Defaults to the first. +# +# Since: 7.0 +## +{ 'struct': 'SetPasswordOptionsVnc', + 'data': { '*display': 'str' } } + +## +# @set_password: +# +# Set the password of a remote display server. # # Returns: - Nothing on success # - If Spice is not enabled, DeviceNotFound @@ -37,15 +88,15 @@ # <- { "return": {} } # ## -{ 'command': 'set_password', - 'data': {'protocol': 'str', 'password': 'str', '*connected': 'str'} } +{ 'command': 'set_password', 'boxed': true, 'data': 'SetPasswordOptions' } ## -# @expire_password: +# @ExpirePasswordOptions: # -# Expire the password of a remote display server. +# General options for expire_password. # -# @protocol: the name of the remote display protocol 'vnc' or 'spice' +# @protocol: - 'vnc' to modify the VNC server expiration +# - 'spice' to modify the Spice server expiration # # @time: when to expire the password. # @@ -54,16 +105,42 @@ # - '+INT' where INT is the number of seconds from now (integer) # - 'INT' where INT is the absolute time in seconds # -# Returns: - Nothing on success -# - If @protocol is 'spice' and Spice is not active, DeviceNotFound -# -# Since: 0.14 -# # Notes: Time is relative to the server and currently there is no way to # coordinate server time with client time. It is not recommended to # use the absolute time version of the @time parameter unless you're # sure you are on the same machine as the QEMU instance. # +# Since: 7.0 +## +{ 'union': 'ExpirePasswordOptions', + 'base': { 'protocol': 'DisplayProtocol', + 'time': 'str' }, + 'discriminator': 'protocol', + 'data': { 'vnc': 'ExpirePasswordOptionsVnc' } } + +## +# @ExpirePasswordOptionsVnc: +# +# Options for expire_password specific to the VNC procotol. +# +# @display: The id of the display where the expiration should be changed. +# Defaults to the first. +# +# Since: 7.0 +## +{ 'struct': 'ExpirePasswordOptionsVnc', + 'data': { '*display': 'str' } } + +## +# @expire_password: +# +# Expire the password of a remote display server. +# +# Returns: - Nothing on success +# - If @protocol is 'spice' and Spice is not active, DeviceNotFound +# +# Since: 0.14 +# # Example: # # -> { "execute": "expire_password", "arguments": { "protocol": "vnc", @@ -71,14 +148,28 @@ # <- { "return": {} } # ## -{ 'command': 'expire_password', 'data': {'protocol': 'str', 'time': 'str'} } +{ 'command': 'expire_password', 'boxed': true, 'data': 'ExpirePasswordOptions' } + +## +# @ImageFormat: +# +# Supported image format types. +# +# @png: PNG format +# +# @ppm: PPM format +# +# Since: 7.1 +## +{ 'enum': 'ImageFormat', + 'data': ['ppm', 'png'] } ## # @screendump: # -# Write a PPM of the VGA screen to a file. +# Capture the contents of a screen and write it to a file. # -# @filename: the path of a new PPM file to store the image +# @filename: the path of a new file to store the image # # @device: ID of the display device that should be dumped. If this parameter # is missing, the primary display will be used. (Since 2.12) @@ -87,6 +178,8 @@ # parameter is missing, head #0 will be used. Also note that the head # can only be specified in conjunction with the device ID. (Since 2.12) # +# @format: image format for screendump. (default: ppm) (Since 7.1) +# # Returns: Nothing on success # # Since: 0.14 @@ -99,7 +192,8 @@ # ## { 'command': 'screendump', - 'data': {'filename': 'str', '*device': 'str', '*head': 'int'}, + 'data': {'filename': 'str', '*device': 'str', '*head': 'int', + '*format': 'ImageFormat'}, 'coroutine': true } ## @@ -123,7 +217,7 @@ 'data': { 'host': 'str', 'port': 'str', 'family': 'NetworkAddressFamily' }, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SpiceServerInfo: @@ -137,7 +231,7 @@ { 'struct': 'SpiceServerInfo', 'base': 'SpiceBasicInfo', 'data': { '*auth': 'str' }, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SpiceChannel: @@ -163,7 +257,7 @@ 'base': 'SpiceBasicInfo', 'data': {'connection-id': 'int', 'channel-type': 'int', 'channel-id': 'int', 'tls': 'bool'}, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SpiceQueryMouseMode: @@ -183,7 +277,7 @@ ## { 'enum': 'SpiceQueryMouseMode', 'data': [ 'client', 'server', 'unknown' ], - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SpiceInfo: @@ -222,7 +316,7 @@ 'data': {'enabled': 'bool', 'migrated': 'bool', '*host': 'str', '*port': 'int', '*tls-port': 'int', '*auth': 'str', '*compiled-version': 'str', 'mouse-mode': 'SpiceQueryMouseMode', '*channels': ['SpiceChannel']}, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @query-spice: @@ -240,8 +334,10 @@ # "enabled": true, # "auth": "spice", # "port": 5920, +# "migrated":false, # "tls-port": 5921, # "host": "0.0.0.0", +# "mouse-mode":"client", # "channels": [ # { # "port": "54924", @@ -268,7 +364,7 @@ # ## { 'command': 'query-spice', 'returns': 'SpiceInfo', - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SPICE_CONNECTED: @@ -294,7 +390,7 @@ { 'event': 'SPICE_CONNECTED', 'data': { 'server': 'SpiceBasicInfo', 'client': 'SpiceBasicInfo' }, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SPICE_INITIALIZED: @@ -323,7 +419,7 @@ { 'event': 'SPICE_INITIALIZED', 'data': { 'server': 'SpiceServerInfo', 'client': 'SpiceChannel' }, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SPICE_DISCONNECTED: @@ -349,7 +445,7 @@ { 'event': 'SPICE_DISCONNECTED', 'data': { 'server': 'SpiceBasicInfo', 'client': 'SpiceBasicInfo' }, - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # @SPICE_MIGRATE_COMPLETED: @@ -365,7 +461,7 @@ # ## { 'event': 'SPICE_MIGRATE_COMPLETED', - 'if': 'defined(CONFIG_SPICE)' } + 'if': 'CONFIG_SPICE' } ## # == VNC @@ -393,7 +489,7 @@ 'service': 'str', 'family': 'NetworkAddressFamily', 'websocket': 'bool' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncServerInfo: @@ -408,7 +504,7 @@ { 'struct': 'VncServerInfo', 'base': 'VncBasicInfo', 'data': { '*auth': 'str' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncClientInfo: @@ -426,7 +522,7 @@ { 'struct': 'VncClientInfo', 'base': 'VncBasicInfo', 'data': { '*x509_dname': 'str', '*sasl_username': 'str' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncInfo: @@ -469,7 +565,7 @@ 'data': {'enabled': 'bool', '*host': 'str', '*family': 'NetworkAddressFamily', '*service': 'str', '*auth': 'str', '*clients': ['VncClientInfo']}, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncPrimaryAuth: @@ -481,7 +577,7 @@ { 'enum': 'VncPrimaryAuth', 'data': [ 'none', 'vnc', 'ra2', 'ra2ne', 'tight', 'ultra', 'tls', 'vencrypt', 'sasl' ], - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncVencryptSubAuth: @@ -496,7 +592,7 @@ 'tls-vnc', 'x509-vnc', 'tls-plain', 'x509-plain', 'tls-sasl', 'x509-sasl' ], - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncServerInfo2: @@ -514,7 +610,7 @@ 'base': 'VncBasicInfo', 'data': { 'auth' : 'VncPrimaryAuth', '*vencrypt' : 'VncVencryptSubAuth' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VncInfo2: @@ -547,7 +643,7 @@ 'auth' : 'VncPrimaryAuth', '*vencrypt' : 'VncVencryptSubAuth', '*display' : 'str' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @query-vnc: @@ -571,7 +667,8 @@ # { # "host":"127.0.0.1", # "service":"50401", -# "family":"ipv4" +# "family":"ipv4", +# "websocket":false # } # ] # } @@ -579,7 +676,7 @@ # ## { 'command': 'query-vnc', 'returns': 'VncInfo', - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @query-vnc-servers: # @@ -590,7 +687,7 @@ # Since: 2.3 ## { 'command': 'query-vnc-servers', 'returns': ['VncInfo2'], - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @change-vnc-password: @@ -606,7 +703,7 @@ ## { 'command': 'change-vnc-password', 'data': { 'password': 'str' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VNC_CONNECTED: @@ -626,17 +723,17 @@ # # <- { "event": "VNC_CONNECTED", # "data": { -# "server": { "auth": "sasl", "family": "ipv4", +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, # "service": "5901", "host": "0.0.0.0" }, # "client": { "family": "ipv4", "service": "58425", -# "host": "127.0.0.1" } }, +# "host": "127.0.0.1", "websocket": false } }, # "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } # ## { 'event': 'VNC_CONNECTED', 'data': { 'server': 'VncServerInfo', 'client': 'VncBasicInfo' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VNC_INITIALIZED: @@ -654,9 +751,9 @@ # # <- { "event": "VNC_INITIALIZED", # "data": { -# "server": { "auth": "sasl", "family": "ipv4", +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, # "service": "5901", "host": "0.0.0.0"}, -# "client": { "family": "ipv4", "service": "46089", +# "client": { "family": "ipv4", "service": "46089", "websocket": false, # "host": "127.0.0.1", "sasl_username": "luiz" } }, # "timestamp": { "seconds": 1263475302, "microseconds": 150772 } } # @@ -664,7 +761,7 @@ { 'event': 'VNC_INITIALIZED', 'data': { 'server': 'VncServerInfo', 'client': 'VncClientInfo' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # @VNC_DISCONNECTED: @@ -681,9 +778,9 @@ # # <- { "event": "VNC_DISCONNECTED", # "data": { -# "server": { "auth": "sasl", "family": "ipv4", +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, # "service": "5901", "host": "0.0.0.0" }, -# "client": { "family": "ipv4", "service": "58425", +# "client": { "family": "ipv4", "service": "58425", "websocket": false, # "host": "127.0.0.1", "sasl_username": "luiz" } }, # "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } # @@ -691,7 +788,7 @@ { 'event': 'VNC_DISCONNECTED', 'data': { 'server': 'VncServerInfo', 'client': 'VncClientInfo' }, - 'if': 'defined(CONFIG_VNC)' } + 'if': 'CONFIG_VNC' } ## # = Input @@ -797,7 +894,6 @@ # are effectively synonyms. # # Since: 1.3 -# ## { 'enum': 'QKeyCode', 'data': [ 'unmapped', @@ -824,6 +920,30 @@ 'ac_home', 'ac_back', 'ac_forward', 'ac_refresh', 'ac_bookmarks', 'lang1', 'lang2' ] } +## +# @KeyValueKind: +# +# Since: 1.3 +## +{ 'enum': 'KeyValueKind', + 'data': [ 'number', 'qcode' ] } + +## +# @IntWrapper: +# +# Since: 1.3 +## +{ 'struct': 'IntWrapper', + 'data': { 'data': 'int' } } + +## +# @QKeyCodeWrapper: +# +# Since: 1.3 +## +{ 'struct': 'QKeyCodeWrapper', + 'data': { 'data': 'QKeyCode' } } + ## # @KeyValue: # @@ -832,9 +952,11 @@ # Since: 1.3 ## { 'union': 'KeyValue', + 'base': { 'type': 'KeyValueKind' }, + 'discriminator': 'type', 'data': { - 'number': 'int', - 'qcode': 'QKeyCode' } } + 'number': 'IntWrapper', + 'qcode': 'QKeyCodeWrapper' } } ## # @send-key: @@ -879,7 +1001,7 @@ ## { 'enum' : 'InputButton', 'data' : [ 'left', 'middle', 'right', 'wheel-up', 'wheel-down', 'side', - 'extra' ] } + 'extra', 'wheel-left', 'wheel-right' ] } ## # @InputAxis: @@ -896,8 +1018,8 @@ # # Keyboard input event. # -# @key: Which key this event is for. -# @down: True for key-down and false for key-up events. +# @key: Which key this event is for. +# @down: True for key-down and false for key-up events. # # Since: 2.0 ## @@ -911,7 +1033,7 @@ # Pointer button input event. # # @button: Which button this event is for. -# @down: True for key-down and false for key-up events. +# @down: True for key-down and false for key-up events. # # Since: 2.0 ## @@ -934,6 +1056,38 @@ 'data' : { 'axis' : 'InputAxis', 'value' : 'int' } } +## +# @InputEventKind: +# +# Since: 2.0 +## +{ 'enum': 'InputEventKind', + 'data': [ 'key', 'btn', 'rel', 'abs' ] } + +## +# @InputKeyEventWrapper: +# +# Since: 2.0 +## +{ 'struct': 'InputKeyEventWrapper', + 'data': { 'data': 'InputKeyEvent' } } + +## +# @InputBtnEventWrapper: +# +# Since: 2.0 +## +{ 'struct': 'InputBtnEventWrapper', + 'data': { 'data': 'InputBtnEvent' } } + +## +# @InputMoveEventWrapper: +# +# Since: 2.0 +## +{ 'struct': 'InputMoveEventWrapper', + 'data': { 'data': 'InputMoveEvent' } } + ## # @InputEvent: # @@ -949,10 +1103,12 @@ # Since: 2.0 ## { 'union' : 'InputEvent', - 'data' : { 'key' : 'InputKeyEvent', - 'btn' : 'InputBtnEvent', - 'rel' : 'InputMoveEvent', - 'abs' : 'InputMoveEvent' } } + 'base': { 'type': 'InputEventKind' }, + 'discriminator': 'type', + 'data' : { 'key' : 'InputKeyEventWrapper', + 'btn' : 'InputBtnEventWrapper', + 'rel' : 'InputMoveEventWrapper', + 'abs' : 'InputMoveEventWrapper' } } ## # @input-send-event: @@ -1039,13 +1195,20 @@ # assuming the guest will resize the display to match # the window size then. Otherwise it defaults to "off". # Since 3.1 +# @show-tabs: Display the tab bar for switching between the various graphical +# interfaces (e.g. VGA and virtual console character devices) +# by default. +# Since 7.1 +# @show-menubar: Display the main window menubar. Defaults to "on". +# Since 8.0 # # Since: 2.12 -# ## { 'struct' : 'DisplayGTK', 'data' : { '*grab-on-hover' : 'bool', - '*zoom-to-fit' : 'bool' } } + '*zoom-to-fit' : 'bool', + '*show-tabs' : 'bool', + '*show-menubar' : 'bool' } } ## # @DisplayEGLHeadless: @@ -1056,26 +1219,47 @@ # available node on the host. # # Since: 3.1 -# ## { 'struct' : 'DisplayEGLHeadless', 'data' : { '*rendernode' : 'str' } } - ## - # @DisplayGLMode: - # - # Display OpenGL mode. - # - # @off: Disable OpenGL (default). - # @on: Use OpenGL, pick context type automatically. - # Would better be named 'auto' but is called 'on' for backward - # compatibility with bool type. - # @core: Use OpenGL with Core (desktop) Context. - # @es: Use OpenGL with ES (embedded systems) Context. - # - # Since: 3.0 - # - ## +## +# @DisplayDBus: +# +# DBus display options. +# +# @addr: The D-Bus bus address (default to the session bus). +# +# @rendernode: Which DRM render node should be used. Default is the first +# available node on the host. +# +# @p2p: Whether to use peer-to-peer connections (accepted through +# ``add_client``). +# +# @audiodev: Use the specified DBus audiodev to export audio. +# +# Since: 7.0 +## +{ 'struct' : 'DisplayDBus', + 'data' : { '*rendernode' : 'str', + '*addr': 'str', + '*p2p': 'bool', + '*audiodev': 'str' } } + +## +# @DisplayGLMode: +# +# Display OpenGL mode. +# +# @off: Disable OpenGL (default). +# @on: Use OpenGL, pick context type automatically. +# Would better be named 'auto' but is called 'on' for backward +# compatibility with bool type. +# @core: Use OpenGL with Core (desktop) Context. +# @es: Use OpenGL with ES (embedded systems) Context. +# +# Since: 3.0 +## { 'enum' : 'DisplayGLMode', 'data' : [ 'off', 'on', 'core', 'es' ] } @@ -1084,14 +1268,64 @@ # # Curses display options. # -# @charset: Font charset used by guest (default: CP437). +# @charset: Font charset used by guest (default: CP437). # # Since: 4.0 -# ## { 'struct' : 'DisplayCurses', 'data' : { '*charset' : 'str' } } +## +# @DisplayCocoa: +# +# Cocoa display options. +# +# @left-command-key: Enable/disable forwarding of left command key to +# guest. Allows command-tab window switching on the +# host without sending this key to the guest when +# "off". Defaults to "on" +# +# @full-grab: Capture all key presses, including system combos. This +# requires accessibility permissions, since it performs +# a global grab on key events. (default: off) +# See https://support.apple.com/en-in/guide/mac-help/mh32356/mac +# +# @swap-opt-cmd: Swap the Option and Command keys so that their key codes match +# their position on non-Mac keyboards and you can use Meta/Super +# and Alt where you expect them. (default: off) +# +# Since: 7.0 +## +{ 'struct': 'DisplayCocoa', + 'data': { + '*left-command-key': 'bool', + '*full-grab': 'bool', + '*swap-opt-cmd': 'bool' + } } + +## +# @HotKeyMod: +# +# Set of modifier keys that need to be held for shortcut key actions. +# +# Since: 7.1 +## +{ 'enum' : 'HotKeyMod', + 'data' : [ 'lctrl-lalt', 'lshift-lctrl-lalt', 'rctrl' ] } + +## +# @DisplaySDL: +# +# SDL2 display options. +# +# @grab-mod: Modifier keys that should be pressed together with the +# "G" key to release the mouse grab. +# +# Since: 7.1 +## +{ 'struct' : 'DisplaySDL', + 'data' : { '*grab-mod' : 'HotKeyMod' } } + ## # @DisplayType: # @@ -1126,36 +1360,38 @@ # application to connect to it. The server will redirect # the serial console and QEMU monitors. (Since 4.0) # -# Since: 2.12 +# @dbus: Start a D-Bus service for the display. (Since 7.0) # +# Since: 2.12 ## { 'enum' : 'DisplayType', 'data' : [ { 'name': 'default' }, { 'name': 'none' }, - { 'name': 'gtk', 'if': 'defined(CONFIG_GTK)' }, - { 'name': 'sdl', 'if': 'defined(CONFIG_SDL)' }, + { 'name': 'gtk', 'if': 'CONFIG_GTK' }, + { 'name': 'sdl', 'if': 'CONFIG_SDL' }, { 'name': 'egl-headless', - 'if': 'defined(CONFIG_OPENGL) && defined(CONFIG_GBM)' }, - { 'name': 'curses', 'if': 'defined(CONFIG_CURSES)' }, - { 'name': 'cocoa', 'if': 'defined(CONFIG_COCOA)' }, - { 'name': 'spice-app', 'if': 'defined(CONFIG_SPICE)'}, - { 'name': 'xemu' } ] } + 'if': { 'all': ['CONFIG_OPENGL', 'CONFIG_GBM'] } }, + { 'name': 'curses', 'if': 'CONFIG_CURSES' }, + { 'name': 'cocoa', 'if': 'CONFIG_COCOA' }, + { 'name': 'spice-app', 'if': 'CONFIG_SPICE' }, + { 'name': 'dbus', 'if': 'CONFIG_DBUS_DISPLAY' }, + { 'name': 'xemu' } + ] +} ## # @DisplayOptions: # # Display (user interface) options. # -# @type: Which DisplayType qemu should use. -# @full-screen: Start user interface in fullscreen mode (default: off). -# @window-close: Allow to quit qemu with window close button (default: on). -# @show-cursor: Force showing the mouse cursor (default: off). -# (since: 5.0) -# @gl: Enable OpenGL support (default: off). +# @type: Which DisplayType qemu should use. +# @full-screen: Start user interface in fullscreen mode (default: off). +# @window-close: Allow to quit qemu with window close button (default: on). +# @show-cursor: Force showing the mouse cursor (default: off). (since: 5.0) +# @gl: Enable OpenGL support (default: off). # # Since: 2.12 -# ## { 'union' : 'DisplayOptions', 'base' : { 'type' : 'DisplayType', @@ -1165,10 +1401,13 @@ '*gl' : 'DisplayGLMode' }, 'discriminator' : 'type', 'data' : { - 'gtk': { 'type': 'DisplayGTK', 'if': 'defined(CONFIG_GTK)' }, - 'curses': { 'type': 'DisplayCurses', 'if': 'defined(CONFIG_CURSES)' }, + 'gtk': { 'type': 'DisplayGTK', 'if': 'CONFIG_GTK' }, + 'cocoa': { 'type': 'DisplayCocoa', 'if': 'CONFIG_COCOA' }, + 'curses': { 'type': 'DisplayCurses', 'if': 'CONFIG_CURSES' }, 'egl-headless': { 'type': 'DisplayEGLHeadless', - 'if': 'defined(CONFIG_OPENGL) && defined(CONFIG_GBM)' } + 'if': { 'all': ['CONFIG_OPENGL', 'CONFIG_GBM'] } }, + 'dbus': { 'type': 'DisplayDBus', 'if': 'CONFIG_DBUS_DISPLAY' }, + 'sdl': { 'type': 'DisplaySDL', 'if': 'CONFIG_SDL' } } } @@ -1180,7 +1419,6 @@ # Returns: @DisplayOptions # # Since: 3.1 -# ## { 'command': 'query-display-options', 'returns': 'DisplayOptions' } @@ -1193,7 +1431,6 @@ # @vnc: VNC display # # Since: 6.0 -# ## { 'enum': 'DisplayReloadType', 'data': ['vnc'] } @@ -1206,7 +1443,6 @@ # @tls-certs: reload tls certs or not. # # Since: 6.0 -# ## { 'struct': 'DisplayReloadOptionsVNC', 'data': { '*tls-certs': 'bool' } } @@ -1219,7 +1455,6 @@ # @type: Specify the display type. # # Since: 6.0 -# ## { 'union': 'DisplayReloadOptions', 'base': {'type': 'DisplayReloadType'}, @@ -1245,3 +1480,65 @@ { 'command': 'display-reload', 'data': 'DisplayReloadOptions', 'boxed' : true } + +## +# @DisplayUpdateType: +# +# Available DisplayUpdate types. +# +# @vnc: VNC display +# +# Since: 7.1 +## +{ 'enum': 'DisplayUpdateType', + 'data': ['vnc'] } + +## +# @DisplayUpdateOptionsVNC: +# +# Specify the VNC reload options. +# +# @addresses: If specified, change set of addresses +# to listen for connections. Addresses configured +# for websockets are not touched. +# +# Since: 7.1 +## +{ 'struct': 'DisplayUpdateOptionsVNC', + 'data': { '*addresses': ['SocketAddress'] } } + +## +# @DisplayUpdateOptions: +# +# Options of the display configuration reload. +# +# @type: Specify the display type. +# +# Since: 7.1 +## +{ 'union': 'DisplayUpdateOptions', + 'base': {'type': 'DisplayUpdateType'}, + 'discriminator': 'type', + 'data': { 'vnc': 'DisplayUpdateOptionsVNC' } } + +## +# @display-update: +# +# Update display configuration. +# +# Returns: Nothing on success. +# +# Since: 7.1 +# +# Example: +# +# -> { "execute": "display-update", +# "arguments": { "type": "vnc", "addresses": +# [ { "type": "inet", "host": "0.0.0.0", +# "port": "5901" } ] } } +# <- { "return": {} } +# +## +{ 'command': 'display-update', + 'data': 'DisplayUpdateOptions', + 'boxed' : true } diff --git a/qapi/virtio.json b/qapi/virtio.json new file mode 100644 index 0000000000..019d2d1987 --- /dev/null +++ b/qapi/virtio.json @@ -0,0 +1,954 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# + +## +# = Virtio devices +## + +## +# @VirtioInfo: +# +# Basic information about a given VirtIODevice +# +# @path: The VirtIODevice's canonical QOM path +# +# @name: Name of the VirtIODevice +# +# Since: 7.2 +# +## +{ 'struct': 'VirtioInfo', + 'data': { 'path': 'str', + 'name': 'str' } } + +## +# @x-query-virtio: +# +# Returns a list of all realized VirtIODevices +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: List of gathered VirtIODevices +# +# Since: 7.2 +# +# Example: +# +# -> { "execute": "x-query-virtio" } +# <- { "return": [ +# { +# "name": "virtio-input", +# "path": "/machine/peripheral-anon/device[4]/virtio-backend" +# }, +# { +# "name": "virtio-crypto", +# "path": "/machine/peripheral/crypto0/virtio-backend" +# }, +# { +# "name": "virtio-scsi", +# "path": "/machine/peripheral-anon/device[2]/virtio-backend" +# }, +# { +# "name": "virtio-net", +# "path": "/machine/peripheral-anon/device[1]/virtio-backend" +# }, +# { +# "name": "virtio-serial", +# "path": "/machine/peripheral-anon/device[0]/virtio-backend" +# } +# ] +# } +# +## + +{ 'command': 'x-query-virtio', + 'returns': [ 'VirtioInfo' ], + 'features': [ 'unstable' ] } + +## +# @VhostStatus: +# +# Information about a vhost device. This information will only be +# displayed if the vhost device is active. +# +# @n-mem-sections: vhost_dev n_mem_sections +# +# @n-tmp-sections: vhost_dev n_tmp_sections +# +# @nvqs: vhost_dev nvqs (number of virtqueues being used) +# +# @vq-index: vhost_dev vq_index +# +# @features: vhost_dev features +# +# @acked-features: vhost_dev acked_features +# +# @backend-features: vhost_dev backend_features +# +# @protocol-features: vhost_dev protocol_features +# +# @max-queues: vhost_dev max_queues +# +# @backend-cap: vhost_dev backend_cap +# +# @log-enabled: vhost_dev log_enabled flag +# +# @log-size: vhost_dev log_size +# +# Since: 7.2 +# +## + +{ 'struct': 'VhostStatus', + 'data': { 'n-mem-sections': 'int', + 'n-tmp-sections': 'int', + 'nvqs': 'uint32', + 'vq-index': 'int', + 'features': 'VirtioDeviceFeatures', + 'acked-features': 'VirtioDeviceFeatures', + 'backend-features': 'VirtioDeviceFeatures', + 'protocol-features': 'VhostDeviceProtocols', + 'max-queues': 'uint64', + 'backend-cap': 'uint64', + 'log-enabled': 'bool', + 'log-size': 'uint64' } } + +## +# @VirtioStatus: +# +# Full status of the virtio device with most VirtIODevice members. +# Also includes the full status of the corresponding vhost device +# if the vhost device is active. +# +# @name: VirtIODevice name +# +# @device-id: VirtIODevice ID +# +# @vhost-started: VirtIODevice vhost_started flag +# +# @guest-features: VirtIODevice guest_features +# +# @host-features: VirtIODevice host_features +# +# @backend-features: VirtIODevice backend_features +# +# @device-endian: VirtIODevice device_endian +# +# @num-vqs: VirtIODevice virtqueue count. This is the number of active +# virtqueues being used by the VirtIODevice. +# +# @status: VirtIODevice configuration status (VirtioDeviceStatus) +# +# @isr: VirtIODevice ISR +# +# @queue-sel: VirtIODevice queue_sel +# +# @vm-running: VirtIODevice vm_running flag +# +# @broken: VirtIODevice broken flag +# +# @disabled: VirtIODevice disabled flag +# +# @use-started: VirtIODevice use_started flag +# +# @started: VirtIODevice started flag +# +# @start-on-kick: VirtIODevice start_on_kick flag +# +# @disable-legacy-check: VirtIODevice disabled_legacy_check flag +# +# @bus-name: VirtIODevice bus_name +# +# @use-guest-notifier-mask: VirtIODevice use_guest_notifier_mask flag +# +# @vhost-dev: Corresponding vhost device info for a given VirtIODevice. +# Present if the given VirtIODevice has an active vhost +# device. +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtioStatus', + 'data': { 'name': 'str', + 'device-id': 'uint16', + 'vhost-started': 'bool', + 'device-endian': 'str', + 'guest-features': 'VirtioDeviceFeatures', + 'host-features': 'VirtioDeviceFeatures', + 'backend-features': 'VirtioDeviceFeatures', + 'num-vqs': 'int', + 'status': 'VirtioDeviceStatus', + 'isr': 'uint8', + 'queue-sel': 'uint16', + 'vm-running': 'bool', + 'broken': 'bool', + 'disabled': 'bool', + 'use-started': 'bool', + 'started': 'bool', + 'start-on-kick': 'bool', + 'disable-legacy-check': 'bool', + 'bus-name': 'str', + 'use-guest-notifier-mask': 'bool', + '*vhost-dev': 'VhostStatus' } } + +## +# @x-query-virtio-status: +# +# Poll for a comprehensive status of a given virtio device +# +# @path: Canonical QOM path of the VirtIODevice +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: VirtioStatus of the virtio device +# +# Since: 7.2 +# +# Examples: +# +# 1. Poll for the status of virtio-crypto (no vhost-crypto active) +# +# -> { "execute": "x-query-virtio-status", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend" } +# } +# <- { "return": { +# "device-endian": "little", +# "bus-name": "", +# "disable-legacy-check": false, +# "name": "virtio-crypto", +# "started": true, +# "device-id": 20, +# "backend-features": { +# "transports": [], +# "dev-features": [] +# }, +# "start-on-kick": false, +# "isr": 1, +# "broken": false, +# "status": { +# "statuses": [ +# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", +# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", +# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", +# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" +# ] +# }, +# "num-vqs": 2, +# "guest-features": { +# "dev-features": [], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "host-features": { +# "unknown-dev-features": 1073741824, +# "dev-features": [], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# }, +# "use-guest-notifier-mask": true, +# "vm-running": true, +# "queue-sel": 1, +# "disabled": false, +# "vhost-started": false, +# "use-started": true +# } +# } +# +# 2. Poll for the status of virtio-net (vhost-net is active) +# +# -> { "execute": "x-query-virtio-status", +# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend" } +# } +# <- { "return": { +# "device-endian": "little", +# "bus-name": "", +# "disabled-legacy-check": false, +# "name": "virtio-net", +# "started": true, +# "device-id": 1, +# "vhost-dev": { +# "n-tmp-sections": 4, +# "n-mem-sections": 4, +# "max-queues": 1, +# "backend-cap": 2, +# "log-size": 0, +# "backend-features": { +# "dev-features": [], +# "transports": [] +# }, +# "nvqs": 2, +# "protocol-features": { +# "protocols": [] +# }, +# "vq-index": 0, +# "log-enabled": false, +# "acked-features": { +# "dev-features": [ +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "features": { +# "dev-features": [ +# "VHOST_F_LOG_ALL: Logging write descriptors supported", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# } +# }, +# "backend-features": { +# "dev-features": [ +# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", +# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# }, +# "start-on-kick": false, +# "isr": 1, +# "broken": false, +# "status": { +# "statuses": [ +# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", +# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", +# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", +# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" +# ] +# }, +# "num-vqs": 3, +# "guest-features": { +# "dev-features": [ +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "host-features": { +# "dev-features": [ +# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", +# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# }, +# "use-guest-notifier-mask": true, +# "vm-running": true, +# "queue-sel": 2, +# "disabled": false, +# "vhost-started": true, +# "use-started": true +# } +# } +# +## + +{ 'command': 'x-query-virtio-status', + 'data': { 'path': 'str' }, + 'returns': 'VirtioStatus', + 'features': [ 'unstable' ] } + +## +# @VirtioDeviceStatus: +# +# A structure defined to list the configuration statuses of a virtio +# device +# +# @statuses: List of decoded configuration statuses of the virtio +# device +# +# @unknown-statuses: Virtio device statuses bitmap that have not been decoded +# +# Since: 7.2 +## + +{ 'struct': 'VirtioDeviceStatus', + 'data': { 'statuses': [ 'str' ], + '*unknown-statuses': 'uint8' } } + +## +# @VhostDeviceProtocols: +# +# A structure defined to list the vhost user protocol features of a +# Vhost User device +# +# @protocols: List of decoded vhost user protocol features of a vhost +# user device +# +# @unknown-protocols: Vhost user device protocol features bitmap that +# have not been decoded +# +# Since: 7.2 +## + +{ 'struct': 'VhostDeviceProtocols', + 'data': { 'protocols': [ 'str' ], + '*unknown-protocols': 'uint64' } } + +## +# @VirtioDeviceFeatures: +# +# The common fields that apply to most Virtio devices. Some devices +# may not have their own device-specific features (e.g. virtio-rng). +# +# @transports: List of transport features of the virtio device +# +# @dev-features: List of device-specific features (if the device has +# unique features) +# +# @unknown-dev-features: Virtio device features bitmap that have not +# been decoded +# +# Since: 7.2 +## + +{ 'struct': 'VirtioDeviceFeatures', + 'data': { 'transports': [ 'str' ], + '*dev-features': [ 'str' ], + '*unknown-dev-features': 'uint64' } } + +## +# @VirtQueueStatus: +# +# Information of a VirtIODevice VirtQueue, including most members of +# the VirtQueue data structure. +# +# @name: Name of the VirtIODevice that uses this VirtQueue +# +# @queue-index: VirtQueue queue_index +# +# @inuse: VirtQueue inuse +# +# @vring-num: VirtQueue vring.num +# +# @vring-num-default: VirtQueue vring.num_default +# +# @vring-align: VirtQueue vring.align +# +# @vring-desc: VirtQueue vring.desc (descriptor area) +# +# @vring-avail: VirtQueue vring.avail (driver area) +# +# @vring-used: VirtQueue vring.used (device area) +# +# @last-avail-idx: VirtQueue last_avail_idx or return of vhost_dev +# vhost_get_vring_base (if vhost active) +# +# @shadow-avail-idx: VirtQueue shadow_avail_idx +# +# @used-idx: VirtQueue used_idx +# +# @signalled-used: VirtQueue signalled_used +# +# @signalled-used-valid: VirtQueue signalled_used_valid flag +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtQueueStatus', + 'data': { 'name': 'str', + 'queue-index': 'uint16', + 'inuse': 'uint32', + 'vring-num': 'uint32', + 'vring-num-default': 'uint32', + 'vring-align': 'uint32', + 'vring-desc': 'uint64', + 'vring-avail': 'uint64', + 'vring-used': 'uint64', + '*last-avail-idx': 'uint16', + '*shadow-avail-idx': 'uint16', + 'used-idx': 'uint16', + 'signalled-used': 'uint16', + 'signalled-used-valid': 'bool' } } + +## +# @x-query-virtio-queue-status: +# +# Return the status of a given VirtIODevice's VirtQueue +# +# @path: VirtIODevice canonical QOM path +# +# @queue: VirtQueue index to examine +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: VirtQueueStatus of the VirtQueue +# +# Notes: last_avail_idx will not be displayed in the case where +# the selected VirtIODevice has a running vhost device and +# the VirtIODevice VirtQueue index (queue) does not exist for +# the corresponding vhost device vhost_virtqueue. Also, +# shadow_avail_idx will not be displayed in the case where +# the selected VirtIODevice has a running vhost device. +# +# Since: 7.2 +# +# Examples: +# +# 1. Get VirtQueueStatus for virtio-vsock (vhost-vsock running) +# +# -> { "execute": "x-query-virtio-queue-status", +# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", +# "queue": 1 } +# } +# <- { "return": { +# "signalled-used": 0, +# "inuse": 0, +# "name": "vhost-vsock", +# "vring-align": 4096, +# "vring-desc": 5217370112, +# "signalled-used-valid": false, +# "vring-num-default": 128, +# "vring-avail": 5217372160, +# "queue-index": 1, +# "last-avail-idx": 0, +# "vring-used": 5217372480, +# "used-idx": 0, +# "vring-num": 128 +# } +# } +# +# 2. Get VirtQueueStatus for virtio-serial (no vhost) +# +# -> { "execute": "x-query-virtio-queue-status", +# "arguments": { "path": "/machine/peripheral-anon/device[0]/virtio-backend", +# "queue": 20 } +# } +# <- { "return": { +# "signalled-used": 0, +# "inuse": 0, +# "name": "virtio-serial", +# "vring-align": 4096, +# "vring-desc": 5182074880, +# "signalled-used-valid": false, +# "vring-num-default": 128, +# "vring-avail": 5182076928, +# "queue-index": 20, +# "last-avail-idx": 0, +# "vring-used": 5182077248, +# "used-idx": 0, +# "shadow-avail-idx": 0, +# "vring-num": 128 +# } +# } +# +## + +{ 'command': 'x-query-virtio-queue-status', + 'data': { 'path': 'str', 'queue': 'uint16' }, + 'returns': 'VirtQueueStatus', + 'features': [ 'unstable' ] } + +## +# @VirtVhostQueueStatus: +# +# Information of a vhost device's vhost_virtqueue, including most +# members of the vhost_dev vhost_virtqueue data structure. +# +# @name: Name of the VirtIODevice that uses this vhost_virtqueue +# +# @kick: vhost_virtqueue kick +# +# @call: vhost_virtqueue call +# +# @desc: vhost_virtqueue desc +# +# @avail: vhost_virtqueue avail +# +# @used: vhost_virtqueue used +# +# @num: vhost_virtqueue num +# +# @desc-phys: vhost_virtqueue desc_phys (descriptor area phys. addr.) +# +# @desc-size: vhost_virtqueue desc_size +# +# @avail-phys: vhost_virtqueue avail_phys (driver area phys. addr.) +# +# @avail-size: vhost_virtqueue avail_size +# +# @used-phys: vhost_virtqueue used_phys (device area phys. addr.) +# +# @used-size: vhost_virtqueue used_size +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtVhostQueueStatus', + 'data': { 'name': 'str', + 'kick': 'int', + 'call': 'int', + 'desc': 'uint64', + 'avail': 'uint64', + 'used': 'uint64', + 'num': 'int', + 'desc-phys': 'uint64', + 'desc-size': 'uint32', + 'avail-phys': 'uint64', + 'avail-size': 'uint32', + 'used-phys': 'uint64', + 'used-size': 'uint32' } } + +## +# @x-query-virtio-vhost-queue-status: +# +# Return information of a given vhost device's vhost_virtqueue +# +# @path: VirtIODevice canonical QOM path +# +# @queue: vhost_virtqueue index to examine +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: VirtVhostQueueStatus of the vhost_virtqueue +# +# Since: 7.2 +# +# Examples: +# +# 1. Get vhost_virtqueue status for vhost-crypto +# +# -> { "execute": "x-query-virtio-vhost-queue-status", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", +# "queue": 0 } +# } +# <- { "return": { +# "avail-phys": 5216124928, +# "name": "virtio-crypto", +# "used-phys": 5216127040, +# "avail-size": 2054, +# "desc-size": 16384, +# "used-size": 8198, +# "desc": 140141447430144, +# "num": 1024, +# "call": 0, +# "avail": 140141447446528, +# "desc-phys": 5216108544, +# "used": 140141447448640, +# "kick": 0 +# } +# } +# +# 2. Get vhost_virtqueue status for vhost-vsock +# +# -> { "execute": "x-query-virtio-vhost-queue-status", +# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", +# "queue": 0 } +# } +# <- { "return": { +# "avail-phys": 5182261248, +# "name": "vhost-vsock", +# "used-phys": 5182261568, +# "avail-size": 262, +# "desc-size": 2048, +# "used-size": 1030, +# "desc": 140141413580800, +# "num": 128, +# "call": 0, +# "avail": 140141413582848, +# "desc-phys": 5182259200, +# "used": 140141413583168, +# "kick": 0 +# } +# } +# +## + +{ 'command': 'x-query-virtio-vhost-queue-status', + 'data': { 'path': 'str', 'queue': 'uint16' }, + 'returns': 'VirtVhostQueueStatus', + 'features': [ 'unstable' ] } + +## +# @VirtioRingDesc: +# +# Information regarding the vring descriptor area +# +# @addr: Guest physical address of the descriptor area +# +# @len: Length of the descriptor area +# +# @flags: List of descriptor flags +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtioRingDesc', + 'data': { 'addr': 'uint64', + 'len': 'uint32', + 'flags': [ 'str' ] } } + +## +# @VirtioRingAvail: +# +# Information regarding the avail vring (a.k.a. driver area) +# +# @flags: VRingAvail flags +# +# @idx: VRingAvail index +# +# @ring: VRingAvail ring[] entry at provided index +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtioRingAvail', + 'data': { 'flags': 'uint16', + 'idx': 'uint16', + 'ring': 'uint16' } } + +## +# @VirtioRingUsed: +# +# Information regarding the used vring (a.k.a. device area) +# +# @flags: VRingUsed flags +# +# @idx: VRingUsed index +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtioRingUsed', + 'data': { 'flags': 'uint16', + 'idx': 'uint16' } } + +## +# @VirtioQueueElement: +# +# Information regarding a VirtQueue's VirtQueueElement including +# descriptor, driver, and device areas +# +# @name: Name of the VirtIODevice that uses this VirtQueue +# +# @index: Index of the element in the queue +# +# @descs: List of descriptors (VirtioRingDesc) +# +# @avail: VRingAvail info +# +# @used: VRingUsed info +# +# Since: 7.2 +# +## + +{ 'struct': 'VirtioQueueElement', + 'data': { 'name': 'str', + 'index': 'uint32', + 'descs': [ 'VirtioRingDesc' ], + 'avail': 'VirtioRingAvail', + 'used': 'VirtioRingUsed' } } + +## +# @x-query-virtio-queue-element: +# +# Return the information about a VirtQueue's VirtQueueElement +# +# @path: VirtIODevice canonical QOM path +# +# @queue: VirtQueue index to examine +# +# @index: Index of the element in the queue +# (default: head of the queue) +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: VirtioQueueElement information +# +# Since: 7.2 +# +# Examples: +# +# 1. Introspect on virtio-net's VirtQueue 0 at index 5 +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend", +# "queue": 0, +# "index": 5 } +# } +# <- { "return": { +# "index": 5, +# "name": "virtio-net", +# "descs": [ +# { +# "flags": ["write"], +# "len": 1536, +# "addr": 5257305600 +# } +# ], +# "avail": { +# "idx": 256, +# "flags": 0, +# "ring": 5 +# }, +# "used": { +# "idx": 13, +# "flags": 0 +# } +# } +# } +# +# 2. Introspect on virtio-crypto's VirtQueue 1 at head +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", +# "queue": 1 } +# } +# <- { "return": { +# "index": 0, +# "name": "virtio-crypto", +# "descs": [ +# { +# "flags": [], +# "len": 0, +# "addr": 8080268923184214134 +# } +# ], +# "avail": { +# "idx": 280, +# "flags": 0, +# "ring": 0 +# }, +# "used": { +# "idx": 280, +# "flags": 0 +# } +# } +# } +# +# 3. Introspect on virtio-scsi's VirtQueue 2 at head +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral-anon/device[2]/virtio-backend", +# "queue": 2 } +# } +# <- { "return": { +# "index": 19, +# "name": "virtio-scsi", +# "descs": [ +# { +# "flags": ["used", "indirect", "write"], +# "len": 4099327944, +# "addr": 12055409292258155293 +# } +# ], +# "avail": { +# "idx": 1147, +# "flags": 0, +# "ring": 19 +# }, +# "used": { +# "idx": 280, +# "flags": 0 +# } +# } +# } +# +## + +{ 'command': 'x-query-virtio-queue-element', + 'data': { 'path': 'str', 'queue': 'uint16', '*index': 'uint16' }, + 'returns': 'VirtioQueueElement', + 'features': [ 'unstable' ] } diff --git a/qemu-edid.c b/qemu-edid.c index c3a9fba10d..92e1a660a7 100644 --- a/qemu-edid.c +++ b/qemu-edid.c @@ -10,8 +10,8 @@ #include "hw/display/edid.h" static qemu_edid_info info = { - .prefx = 1024, - .prefy = 768, + .prefx = 1280, + .prefy = 800, }; static void usage(FILE *out) @@ -92,6 +92,10 @@ int main(int argc, char *argv[]) fprintf(stderr, "not a number: %s\n", optarg); exit(1); } + if (dpi == 0) { + fprintf(stderr, "cannot be zero: %s\n", optarg); + exit(1); + } break; case 'v': info.vendor = optarg; diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index b3620f29e5..1b1dab5b17 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -46,15 +46,15 @@ SRST ERST DEF("convert", img_convert, - "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename") + "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file [-F backing_fmt]] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename") SRST -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME ERST DEF("create", img_create, - "create [--object objectdef] [-q] [-f fmt] [-b backing_file] [-F backing_fmt] [-u] [-o options] filename [size]") + "create [--object objectdef] [-q] [-f fmt] [-b backing_file [-F backing_fmt]] [-u] [-o options] filename [size]") SRST -.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE] [-F BACKING_FMT] [-u] [-o OPTIONS] FILENAME [SIZE] +.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE [-F BACKING_FMT]] [-u] [-o OPTIONS] FILENAME [SIZE] ERST DEF("dd", img_dd, diff --git a/qemu-img.c b/qemu-img.c index 908fd0cce5..a9b3a8103c 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -25,7 +25,8 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" +#include "qemu/help-texts.h" +#include "qemu/qemu-progress.h" #include "qemu-version.h" #include "qapi/error.h" #include "qapi/qapi-commands-block-core.h" @@ -42,6 +43,7 @@ #include "qemu/module.h" #include "qemu/sockets.h" #include "qemu/units.h" +#include "qemu/memalign.h" #include "qom/object_interfaces.h" #include "sysemu/block-backend.h" #include "block/block_int.h" @@ -98,7 +100,8 @@ static void format_print(void *opaque, const char *name) printf(" %s", name); } -static void QEMU_NORETURN GCC_FMT_ATTR(1, 2) error_exit(const char *fmt, ...) +static G_NORETURN G_GNUC_PRINTF(1, 2) +void error_exit(const char *fmt, ...) { va_list ap; @@ -110,18 +113,21 @@ static void QEMU_NORETURN GCC_FMT_ATTR(1, 2) error_exit(const char *fmt, ...) exit(EXIT_FAILURE); } -static void QEMU_NORETURN missing_argument(const char *option) +static G_NORETURN +void missing_argument(const char *option) { error_exit("missing argument for option '%s'", option); } -static void QEMU_NORETURN unrecognized_option(const char *option) +static G_NORETURN +void unrecognized_option(const char *option) { error_exit("unrecognized option '%s'", option); } /* Please keep in synch with docs/tools/qemu-img.rst */ -static void QEMU_NORETURN help(void) +static G_NORETURN +void help(void) { const char *help_msg = QEMU_IMG_VERSION @@ -158,8 +164,8 @@ static void QEMU_NORETURN help(void) " 'output_filename' is the destination disk image filename\n" " 'output_fmt' is the destination format\n" " 'options' is a comma separated list of format specific options in a\n" - " name=value format. Use -o ? for an overview of the options supported by the\n" - " used format\n" + " name=value format. Use -o help for an overview of the options supported by\n" + " the used format\n" " 'snapshot_param' is param used for internal snapshot, format\n" " is 'snapshot.id=[ID],snapshot.name=[NAME]', or\n" " '[ID_OR_NAME]'\n" @@ -283,7 +289,7 @@ static QemuOptsList qemu_source_opts = { }, }; -static int GCC_FMT_ATTR(2, 3) qprintf(bool quiet, const char *fmt, ...) +static int G_GNUC_PRINTF(2, 3) qprintf(bool quiet, const char *fmt, ...) { int ret = 0; if (!quiet) { @@ -902,13 +908,14 @@ static void common_block_job_cb(void *opaque, int ret) static void run_block_job(BlockJob *job, Error **errp) { uint64_t progress_current, progress_total; - AioContext *aio_context = blk_get_aio_context(job->blk); + AioContext *aio_context = block_job_get_aio_context(job); int ret = 0; - aio_context_acquire(aio_context); - job_ref(&job->job); + job_lock(); + job_ref_locked(&job->job); do { float progress = 0.0f; + job_unlock(); aio_poll(aio_context, true); progress_get_snapshot(&job->job.progress, &progress_current, @@ -917,15 +924,17 @@ static void run_block_job(BlockJob *job, Error **errp) progress = (float)progress_current / progress_total * 100.f; } qemu_progress_print(progress, 0); - } while (!job_is_ready(&job->job) && !job_is_completed(&job->job)); + job_lock(); + } while (!job_is_ready_locked(&job->job) && + !job_is_completed_locked(&job->job)); - if (!job_is_completed(&job->job)) { - ret = job_complete_sync(&job->job, errp); + if (!job_is_completed_locked(&job->job)) { + ret = job_complete_sync_locked(&job->job, errp); } else { ret = job->job.ret; } - job_unref(&job->job); - aio_context_release(aio_context); + job_unref_locked(&job->job); + job_unlock(); /* publish completion progress only when success */ if (!ret) { @@ -1171,19 +1180,34 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum, } } + if (i == n) { + /* + * The whole buf is the same. + * No reason to split it into chunks, so return now. + */ + *pnum = i; + return !is_zero; + } + tail = (sector_num + i) & (alignment - 1); if (tail) { if (is_zero && i <= tail) { - /* treat unallocated areas which only consist - * of a small tail as allocated. */ + /* + * For sure next sector after i is data, and it will rewrite this + * tail anyway due to RMW. So, let's just write data now. + */ is_zero = false; } if (!is_zero) { - /* align up end offset of allocated areas. */ + /* If possible, align up end offset of allocated areas. */ i += alignment - tail; i = MIN(i, n); } else { - /* align down end offset of zero areas. */ + /* + * For sure next sector after i is data, and it will rewrite this + * tail anyway due to RMW. Better is avoid RMW and write zeroes up + * to aligned bound. + */ i -= tail; } } @@ -1288,7 +1312,7 @@ static int check_empty_sectors(BlockBackend *blk, int64_t offset, int ret = 0; int64_t idx; - ret = blk_pread(blk, offset, buffer, bytes); + ret = blk_pread(blk, offset, bytes, buffer, 0); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", offset, filename, strerror(-ret)); @@ -1505,7 +1529,7 @@ static int img_compare(int argc, char **argv) int64_t pnum; chunk = MIN(chunk, IO_BUF_SIZE); - ret = blk_pread(blk1, offset, buf1, chunk); + ret = blk_pread(blk1, offset, chunk, buf1, 0); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", @@ -1513,7 +1537,7 @@ static int img_compare(int argc, char **argv) ret = 4; goto out; } - ret = blk_pread(blk2, offset, buf2, chunk); + ret = blk_pread(blk2, offset, chunk, buf2, 0); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", @@ -1602,16 +1626,16 @@ static void do_dirty_bitmap_merge(const char *dst_node, const char *dst_name, const char *src_node, const char *src_name, Error **errp) { - BlockDirtyBitmapMergeSource *merge_src; - BlockDirtyBitmapMergeSourceList *list = NULL; + BlockDirtyBitmapOrStr *merge_src; + BlockDirtyBitmapOrStrList *list = NULL; - merge_src = g_new0(BlockDirtyBitmapMergeSource, 1); + merge_src = g_new0(BlockDirtyBitmapOrStr, 1); merge_src->type = QTYPE_QDICT; merge_src->u.external.node = g_strdup(src_node); merge_src->u.external.name = g_strdup(src_name); QAPI_LIST_PREPEND(list, merge_src); qmp_block_dirty_bitmap_merge(dst_node, dst_name, list, errp); - qapi_free_BlockDirtyBitmapMergeSourceList(list); + qapi_free_BlockDirtyBitmapOrStrList(list); } enum ImgConvertBlockStatus { @@ -2093,7 +2117,7 @@ static int convert_do_copy(ImgConvertState *s) if (s->compressed && !s->ret) { /* signal EOF to align */ - ret = blk_pwrite_compressed(s->target, 0, NULL, 0); + ret = blk_pwrite_compressed(s->target, 0, 0, NULL); if (ret < 0) { return ret; } @@ -2183,7 +2207,8 @@ static int img_convert(int argc, char **argv) int c, bs_i, flags, src_flags = BDRV_O_NO_SHARE; const char *fmt = NULL, *out_fmt = NULL, *cache = "unsafe", *src_cache = BDRV_DEFAULT_CACHE, *out_baseimg = NULL, - *out_filename, *out_baseimg_param, *snapshot_name = NULL; + *out_filename, *out_baseimg_param, *snapshot_name = NULL, + *backing_fmt = NULL; BlockDriver *drv = NULL, *proto_drv = NULL; BlockDriverInfo bdi; BlockDriverState *out_bs; @@ -2223,7 +2248,7 @@ static int img_convert(int argc, char **argv) {"skip-broken-bitmaps", no_argument, 0, OPTION_SKIP_BROKEN}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WUr:", + c = getopt_long(argc, argv, ":hf:O:B:CcF:o:l:S:pt:T:qnm:WUr:", long_options, NULL); if (c == -1) { break; @@ -2253,6 +2278,9 @@ static int img_convert(int argc, char **argv) case 'c': s.compressed = true; break; + case 'F': + backing_fmt = optarg; + break; case 'o': if (accumulate_options(&options, optarg) < 0) { goto fail_getopt; @@ -2521,7 +2549,7 @@ static int img_convert(int argc, char **argv) qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s.total_sectors * BDRV_SECTOR_SIZE, &error_abort); - ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL); + ret = add_old_style_options(out_fmt, opts, out_baseimg, backing_fmt); if (ret < 0) { goto out; } @@ -2628,6 +2656,14 @@ static int img_convert(int argc, char **argv) goto out; } + if (flags & BDRV_O_NOCACHE) { + /* + * If we open the target with O_DIRECT, it may be necessary to + * extend its size to align to the physical sector size. + */ + flags |= BDRV_O_RESIZE; + } + if (skip_create) { s.target = img_open(tgt_image_opts, out_filename, out_fmt, flags, writethrough, s.quiet, false); @@ -3277,11 +3313,11 @@ static int img_snapshot(int argc, char **argv) char *filename, *snapshot_name = NULL; int c, ret = 0, bdrv_oflags; int action = 0; - qemu_timeval tv; bool quiet = false; Error *err = NULL; bool image_opts = false; bool force_share = false; + int64_t rt; bdrv_oflags = BDRV_O_RDWR; /* Parse commandline parameters */ @@ -3378,9 +3414,9 @@ static int img_snapshot(int argc, char **argv) memset(&sn, 0, sizeof(sn)); pstrcpy(sn.name, sizeof(sn.name), snapshot_name); - qemu_gettimeofday(&tv); - sn.date_sec = tv.tv_sec; - sn.date_nsec = tv.tv_usec * 1000; + rt = g_get_real_time(); + sn.date_sec = rt / G_USEC_PER_SEC; + sn.date_nsec = (rt % G_USEC_PER_SEC) * 1000; ret = bdrv_snapshot_create(bs, &sn); if (ret) { @@ -3746,7 +3782,7 @@ static int img_rebase(int argc, char **argv) n = old_backing_size - offset; } - ret = blk_pread(blk_old_backing, offset, buf_old, n); + ret = blk_pread(blk_old_backing, offset, n, buf_old, 0); if (ret < 0) { error_report("error while reading from old backing file"); goto out; @@ -3760,7 +3796,7 @@ static int img_rebase(int argc, char **argv) n = new_backing_size - offset; } - ret = blk_pread(blk_new_backing, offset, buf_new, n); + ret = blk_pread(blk_new_backing, offset, n, buf_new, 0); if (ret < 0) { error_report("error while reading from new backing file"); goto out; @@ -3779,8 +3815,8 @@ static int img_rebase(int argc, char **argv) if (buf_old_is_zero) { ret = blk_pwrite_zeroes(blk, offset + written, pnum, 0); } else { - ret = blk_pwrite(blk, offset + written, - buf_old + written, pnum, 0); + ret = blk_pwrite(blk, offset + written, pnum, + buf_old + written, 0); } if (ret < 0) { error_report("Error while writing to COW image: %s", @@ -4335,7 +4371,7 @@ static int img_bench(int argc, char **argv) struct timeval t1, t2; int i; bool force_share = false; - size_t buf_size; + size_t buf_size = 0; for (;;) { static const struct option long_options[] = { @@ -4534,7 +4570,7 @@ static int img_bench(int argc, char **argv) data.buf = blk_blockalign(blk, buf_size); memset(data.buf, pattern, data.nrreq * data.bufsize); - blk_register_buf(blk, data.buf, buf_size); + blk_register_buf(blk, data.buf, buf_size, &error_fatal); data.qiov = g_new(QEMUIOVector, data.nrreq); for (i = 0; i < data.nrreq; i++) { @@ -4557,7 +4593,7 @@ static int img_bench(int argc, char **argv) out: if (data.buf) { - blk_unregister_buf(blk, data.buf); + blk_unregister_buf(blk, data.buf, buf_size); } qemu_vfree(data.buf); blk_unref(blk); @@ -4886,7 +4922,7 @@ static int img_dd(int argc, char **argv) const char *out_fmt = "raw"; const char *fmt = NULL; int64_t size = 0; - int64_t block_count = 0, out_pos, in_pos; + int64_t out_pos, in_pos; bool force_share = false; struct DdInfo dd = { .flags = 0, @@ -5086,31 +5122,24 @@ static int img_dd(int argc, char **argv) in.buf = g_new(uint8_t, in.bsz); - for (out_pos = 0; in_pos < size; block_count++) { - int in_ret, out_ret; + for (out_pos = 0; in_pos < size; ) { + int bytes = (in_pos + in.bsz > size) ? size - in_pos : in.bsz; - if (in_pos + in.bsz > size) { - in_ret = blk_pread(blk1, in_pos, in.buf, size - in_pos); - } else { - in_ret = blk_pread(blk1, in_pos, in.buf, in.bsz); - } - if (in_ret < 0) { + ret = blk_pread(blk1, in_pos, bytes, in.buf, 0); + if (ret < 0) { error_report("error while reading from input image file: %s", - strerror(-in_ret)); - ret = -1; + strerror(-ret)); goto out; } - in_pos += in_ret; + in_pos += bytes; - out_ret = blk_pwrite(blk2, out_pos, in.buf, in_ret, 0); - - if (out_ret < 0) { + ret = blk_pwrite(blk2, out_pos, bytes, in.buf, 0); + if (ret < 0) { error_report("error while writing to output image file: %s", - strerror(-out_ret)); - ret = -1; + strerror(-ret)); goto out; } - out_pos += out_ret; + out_pos += bytes; } out: @@ -5350,7 +5379,6 @@ int main(int argc, char **argv) { const img_cmd_t *cmd; const char *cmdname; - Error *local_error = NULL; int c; static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, @@ -5368,10 +5396,7 @@ int main(int argc, char **argv) module_call_init(MODULE_INIT_TRACE); qemu_init_exec_dir(argv[0]); - if (qemu_init_main_loop(&local_error)) { - error_report_err(local_error); - exit(EXIT_FAILURE); - } + qemu_init_main_loop(&error_fatal); qcrypto_init(&error_fatal); @@ -5418,7 +5443,7 @@ int main(int argc, char **argv) exit(1); } trace_init_file(); - qemu_set_log(LOG_TRACE); + qemu_set_log(LOG_TRACE, &error_fatal); /* find the command */ for (cmd = img_cmds; cmd->name != NULL; cmd++) { diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c index 46593d632d..952dc940f1 100644 --- a/qemu-io-cmds.c +++ b/qemu-io-cmds.c @@ -21,6 +21,7 @@ #include "qemu/option.h" #include "qemu/timer.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #define CMD_NOFILE_OK 0x01 @@ -540,28 +541,34 @@ fail: static int do_pread(BlockBackend *blk, char *buf, int64_t offset, int64_t bytes, int64_t *total) { + int ret; + if (bytes > INT_MAX) { return -ERANGE; } - *total = blk_pread(blk, offset, (uint8_t *)buf, bytes); - if (*total < 0) { - return *total; + ret = blk_pread(blk, offset, bytes, (uint8_t *)buf, 0); + if (ret < 0) { + return ret; } + *total = bytes; return 1; } static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset, int64_t bytes, int flags, int64_t *total) { + int ret; + if (bytes > INT_MAX) { return -ERANGE; } - *total = blk_pwrite(blk, offset, (uint8_t *)buf, bytes, flags); - if (*total < 0) { - return *total; + ret = blk_pwrite(blk, offset, bytes, (uint8_t *)buf, flags); + if (ret < 0) { + return ret; } + *total = bytes; return 1; } @@ -603,10 +610,6 @@ static int do_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, .done = false, }; - if (bytes > INT_MAX) { - return -ERANGE; - } - co = qemu_coroutine_create(co_pwrite_zeroes_entry, &data); bdrv_coroutine_enter(blk_bs(blk), co); while (!data.done) { @@ -628,7 +631,7 @@ static int do_write_compressed(BlockBackend *blk, char *buf, int64_t offset, return -ERANGE; } - ret = blk_pwrite_compressed(blk, offset, buf, bytes); + ret = blk_pwrite_compressed(blk, offset, bytes, buf); if (ret < 0) { return ret; } @@ -1160,8 +1163,9 @@ static int write_f(BlockBackend *blk, int argc, char **argv) if (count < 0) { print_cvtnum_err(count, argv[optind]); return count; - } else if (count > BDRV_REQUEST_MAX_BYTES) { - printf("length cannot exceed %" PRIu64 ", given %s\n", + } else if (count > BDRV_REQUEST_MAX_BYTES && + !(flags & BDRV_REQ_NO_FALLBACK)) { + printf("length cannot exceed %" PRIu64 " without -n, given %s\n", (uint64_t)BDRV_REQUEST_MAX_BYTES, argv[optind]); return -EINVAL; } @@ -1993,11 +1997,9 @@ static int map_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum) { int64_t num; - int num_checked; int ret, firstret; - num_checked = MIN(bytes, BDRV_REQUEST_MAX_BYTES); - ret = bdrv_is_allocated(bs, offset, num_checked, &num); + ret = bdrv_is_allocated(bs, offset, bytes, &num); if (ret < 0) { return ret; } @@ -2009,8 +2011,7 @@ static int map_is_allocated(BlockDriverState *bs, int64_t offset, offset += num; bytes -= num; - num_checked = MIN(bytes, BDRV_REQUEST_MAX_BYTES); - ret = bdrv_is_allocated(bs, offset, num_checked, &num); + ret = bdrv_is_allocated(bs, offset, bytes, &num); if (ret == firstret && num) { *pnum += num; } else { diff --git a/qemu-io.c b/qemu-io.c index 57f07501df..2bd7bfb650 100644 --- a/qemu-io.c +++ b/qemu-io.c @@ -15,7 +15,8 @@ #include #endif -#include "qemu-common.h" +#include "qemu/help-texts.h" +#include "qemu/cutils.h" #include "qapi/error.h" #include "qemu-io.h" #include "qemu/error-report.h" @@ -323,13 +324,13 @@ static char *get_prompt(void) static char prompt[FILENAME_MAX + 2 /*"> "*/ + 1 /*"\0"*/ ]; if (!prompt[0]) { - snprintf(prompt, sizeof(prompt), "%s> ", error_get_progname()); + snprintf(prompt, sizeof(prompt), "%s> ", g_get_prgname()); } return prompt; } -static void GCC_FMT_ATTR(2, 3) readline_printf_func(void *opaque, +static void G_GNUC_PRINTF(2, 3) readline_printf_func(void *opaque, const char *fmt, ...) { va_list ap; @@ -529,7 +530,6 @@ int main(int argc, char **argv) int flags = BDRV_O_UNMAP; int ret; bool writethrough = true; - Error *local_error = NULL; QDict *opts = NULL; const char *format = NULL; bool force_share = false; @@ -599,10 +599,10 @@ int main(int argc, char **argv) break; case 'V': printf("%s version " QEMU_FULL_VERSION "\n" - QEMU_COPYRIGHT "\n", error_get_progname()); + QEMU_COPYRIGHT "\n", g_get_prgname()); exit(0); case 'h': - usage(error_get_progname()); + usage(g_get_prgname()); exit(0); case 'U': force_share = true; @@ -614,13 +614,13 @@ int main(int argc, char **argv) imageOpts = true; break; default: - usage(error_get_progname()); + usage(g_get_prgname()); exit(1); } } if ((argc - optind) > 1) { - usage(error_get_progname()); + usage(g_get_prgname()); exit(1); } @@ -629,16 +629,13 @@ int main(int argc, char **argv) exit(1); } - if (qemu_init_main_loop(&local_error)) { - error_report_err(local_error); - exit(1); - } + qemu_init_main_loop(&error_fatal); if (!trace_init_backends()) { exit(1); } trace_init_file(); - qemu_set_log(LOG_TRACE); + qemu_set_log(LOG_TRACE, &error_fatal); /* initialize commands */ qemuio_add_command(&quit_cmd); diff --git a/qemu-keymap.c b/qemu-keymap.c index 536e8f2385..4095b654a6 100644 --- a/qemu-keymap.c +++ b/qemu-keymap.c @@ -187,6 +187,7 @@ int main(int argc, char *argv[]) } fprintf(outfile, + "# SPDX-License-Identifier: GPL-2.0-or-later\n" "#\n" "# generated by qemu-keymap\n" "# model : %s\n" diff --git a/qemu-nbd.c b/qemu-nbd.c index 26ffbf15af..0cd5aa6f02 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -21,7 +21,7 @@ #include #include -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "sysemu/block-backend.h" @@ -47,6 +47,10 @@ #include "trace/control.h" #include "qemu-version.h" +#ifdef CONFIG_SELINUX +#include +#endif + #ifdef __linux__ #define HAVE_NBD_DEVICE 1 #else @@ -64,6 +68,8 @@ #define QEMU_NBD_OPT_FORK 263 #define QEMU_NBD_OPT_TLSAUTHZ 264 #define QEMU_NBD_OPT_PID_FILE 265 +#define QEMU_NBD_OPT_SELINUX_LABEL 266 +#define QEMU_NBD_OPT_TLSHOSTNAME 267 #define MBR_SIZE 512 @@ -116,6 +122,9 @@ static void usage(const char *name) " --fork fork off the server process and exit the parent\n" " once the server is running\n" " --pid-file=PATH store the server's process ID in the given file\n" +#ifdef CONFIG_SELINUX +" --selinux-label=LABEL set SELinux process label on listening socket\n" +#endif #if HAVE_NBD_DEVICE "\n" "Kernel NBD client support:\n" @@ -135,7 +144,9 @@ static void usage(const char *name) " 'snapshot.id=[ID],snapshot.name=[NAME]', or\n" " '[ID_OR_NAME]'\n" " -n, --nocache disable host cache\n" -" --cache=MODE set cache mode (none, writeback, ...)\n" +" --cache=MODE set cache mode used to access the disk image, the\n" +" valid options are: 'none', 'writeback' (default),\n" +" 'writethrough', 'directsync' and 'unsafe'\n" " --aio=MODE set AIO mode (native, io_uring or threads)\n" " --discard=MODE set discard mode (ignore, unmap)\n" " --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n" @@ -452,6 +463,7 @@ static const char *socket_activation_validate_opts(const char *device, const char *sockpath, const char *address, const char *port, + const char *selinux, bool list) { if (device != NULL) { @@ -470,6 +482,10 @@ static const char *socket_activation_validate_opts(const char *device, return "TCP port number can't be set when using socket activation"; } + if (selinux != NULL) { + return "SELinux label can't be set when using socket activation"; + } + if (list) { return "List mode is incompatible with socket activation"; } @@ -527,11 +543,14 @@ int main(int argc, char **argv) { "export-name", required_argument, NULL, 'x' }, { "description", required_argument, NULL, 'D' }, { "tls-creds", required_argument, NULL, QEMU_NBD_OPT_TLSCREDS }, + { "tls-hostname", required_argument, NULL, QEMU_NBD_OPT_TLSHOSTNAME }, { "tls-authz", required_argument, NULL, QEMU_NBD_OPT_TLSAUTHZ }, { "image-opts", no_argument, NULL, QEMU_NBD_OPT_IMAGE_OPTS }, { "trace", required_argument, NULL, 'T' }, { "fork", no_argument, NULL, QEMU_NBD_OPT_FORK }, { "pid-file", required_argument, NULL, QEMU_NBD_OPT_PID_FILE }, + { "selinux-label", required_argument, NULL, + QEMU_NBD_OPT_SELINUX_LABEL }, { NULL, 0, NULL, 0 } }; int ch; @@ -548,16 +567,18 @@ int main(int argc, char **argv) QDict *options = NULL; const char *export_name = NULL; /* defaults to "" later for server mode */ const char *export_description = NULL; - strList *bitmaps = NULL; + BlockDirtyBitmapOrStrList *bitmaps = NULL; bool alloc_depth = false; const char *tlscredsid = NULL; + const char *tlshostname = NULL; bool imageOpts = false; - bool writethrough = true; + bool writethrough = false; /* Client will flush as needed. */ bool fork_process = false; bool list = false; int old_stderr = -1; unsigned socket_activation; const char *pid_file_name = NULL; + const char *selinux_label = NULL; BlockExportOptions *export_opts; #ifdef CONFIG_POSIX @@ -666,7 +687,14 @@ int main(int argc, char **argv) alloc_depth = true; break; case 'B': - QAPI_LIST_PREPEND(bitmaps, g_strdup(optarg)); + { + BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1); + *el = (BlockDirtyBitmapOrStr) { + .type = QTYPE_QSTRING, + .u.local = g_strdup(optarg), + }; + QAPI_LIST_PREPEND(bitmaps, el); + } break; case 'k': sockpath = optarg; @@ -729,6 +757,9 @@ int main(int argc, char **argv) case QEMU_NBD_OPT_TLSCREDS: tlscredsid = optarg; break; + case QEMU_NBD_OPT_TLSHOSTNAME: + tlshostname = optarg; + break; case QEMU_NBD_OPT_IMAGE_OPTS: imageOpts = true; break; @@ -747,6 +778,9 @@ int main(int argc, char **argv) case QEMU_NBD_OPT_PID_FILE: pid_file_name = optarg; break; + case QEMU_NBD_OPT_SELINUX_LABEL: + selinux_label = optarg; + break; } } @@ -777,15 +811,18 @@ int main(int argc, char **argv) exit(1); } trace_init_file(); - qemu_set_log(LOG_TRACE); + qemu_set_log(LOG_TRACE, &error_fatal); socket_activation = check_socket_activation(); if (socket_activation == 0) { - setup_address_and_port(&bindto, &port); + if (!sockpath) { + setup_address_and_port(&bindto, &port); + } } else { /* Using socket activation - check user didn't use -p etc. */ const char *err_msg = socket_activation_validate_opts(device, sockpath, bindto, port, + selinux_label, list); if (err_msg != NULL) { error_report("%s", err_msg); @@ -801,10 +838,6 @@ int main(int argc, char **argv) } if (tlscredsid) { - if (sockpath) { - error_report("TLS is only supported with IPv4/IPv6"); - exit(EXIT_FAILURE); - } if (device) { error_report("TLS is not supported with a host device"); exit(EXIT_FAILURE); @@ -813,6 +846,10 @@ int main(int argc, char **argv) error_report("TLS authorization is incompatible with export list"); exit(EXIT_FAILURE); } + if (tlshostname && !list) { + error_report("TLS hostname is only supported with export list"); + exit(EXIT_FAILURE); + } tlscreds = nbd_get_tls_creds(tlscredsid, list, &local_err); if (local_err) { error_reportf_err(local_err, "Failed to get TLS creds: "); @@ -823,11 +860,28 @@ int main(int argc, char **argv) error_report("--tls-authz is not permitted without --tls-creds"); exit(EXIT_FAILURE); } + if (tlshostname) { + error_report("--tls-hostname is not permitted without --tls-creds"); + exit(EXIT_FAILURE); + } + } + + if (selinux_label) { +#ifdef CONFIG_SELINUX + if (sockpath == NULL && device == NULL) { + error_report("--selinux-label is not permitted without --socket"); + exit(EXIT_FAILURE); + } +#else + error_report("SELinux support not enabled in this binary"); + exit(EXIT_FAILURE); +#endif } if (list) { saddr = nbd_build_socket_address(sockpath, bindto, port); - return qemu_nbd_client_list(saddr, tlscreds, bindto); + return qemu_nbd_client_list(saddr, tlscreds, + tlshostname ? tlshostname : bindto); } #if !HAVE_NBD_DEVICE @@ -855,13 +909,14 @@ int main(int argc, char **argv) if ((device && !verbose) || fork_process) { #ifndef WIN32 + g_autoptr(GError) err = NULL; int stderr_fd[2]; pid_t pid; int ret; - if (qemu_pipe(stderr_fd) < 0) { + if (!g_unix_open_pipe(stderr_fd, FD_CLOEXEC, &err)) { error_report("Error setting up communication pipe: %s", - strerror(errno)); + err->message); exit(EXIT_FAILURE); } @@ -938,6 +993,13 @@ int main(int argc, char **argv) } else { backlog = MIN(shared, SOMAXCONN); } +#ifdef CONFIG_SELINUX + if (selinux_label && setsockcreatecon_raw(selinux_label) == -1) { + error_report("Cannot set SELinux socket create context to %s: %s", + selinux_label, strerror(errno)); + exit(EXIT_FAILURE); + } +#endif saddr = nbd_build_socket_address(sockpath, bindto, port); if (qio_net_listener_open_sync(server, saddr, backlog, &local_err) < 0) { @@ -945,6 +1007,13 @@ int main(int argc, char **argv) error_report_err(local_err); exit(EXIT_FAILURE); } +#ifdef CONFIG_SELINUX + if (selinux_label && setsockcreatecon_raw(NULL) == -1) { + error_report("Cannot clear SELinux socket create context: %s", + strerror(errno)); + exit(EXIT_FAILURE); + } +#endif } else { size_t i; /* See comment in check_socket_activation above. */ @@ -963,10 +1032,7 @@ int main(int argc, char **argv) } } - if (qemu_init_main_loop(&local_err)) { - error_report_err(local_err); - exit(EXIT_FAILURE); - } + qemu_init_main_loop(&error_fatal); bdrv_init(); atexit(qemu_nbd_shutdown); @@ -1029,7 +1095,7 @@ int main(int argc, char **argv) bs->detect_zeroes = detect_zeroes; - nbd_server_is_qemu_nbd(true); + nbd_server_is_qemu_nbd(shared); export_opts = g_new(BlockExportOptions, 1); *export_opts = (BlockExportOptions) { diff --git a/qemu-options.hx b/qemu-options.hx index 83aa59a920..e52289479b 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -36,7 +36,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ " nvdimm=on|off controls NVDIMM support (default=off)\n" " memory-encryption=@var{} memory encryption object to use (default=none)\n" " hmat=on|off controls ACPI HMAT support (default=off)\n" - " memory-backend='backend-id' specifies explicitly provided backend for main RAM (default=none)\n", + " memory-backend='backend-id' specifies explicitly provided backend for main RAM (default=none)\n" + " cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n", QEMU_ARCH_ALL) SRST ``-machine [type=]name[,prop=value[,...]]`` @@ -124,10 +125,48 @@ SRST -object memory-backend-ram,id=pc.ram,size=512M,x-use-canonical-path-for-ramblock-id=off -machine memory-backend=pc.ram -m 512M + + ``cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]`` + Define a CXL Fixed Memory Window (CFMW). + + Described in the CXL 2.0 ECN: CEDT CFMWS & QTG _DSM. + + They are regions of Host Physical Addresses (HPA) on a system which + may be interleaved across one or more CXL host bridges. The system + software will assign particular devices into these windows and + configure the downstream Host-managed Device Memory (HDM) decoders + in root ports, switch ports and devices appropriately to meet the + interleave requirements before enabling the memory devices. + + ``targets.X=target`` provides the mapping to CXL host bridges + which may be identified by the id provided in the -device entry. + Multiple entries are needed to specify all the targets when + the fixed memory window represents interleaved memory. X is the + target index from 0. + + ``size=size`` sets the size of the CFMW. This must be a multiple of + 256MiB. The region will be aligned to 256MiB but the location is + platform and configuration dependent. + + ``interleave-granularity=granularity`` sets the granularity of + interleave. Default 256KiB. Only 256KiB, 512KiB, 1024KiB, 2048KiB + 4096KiB, 8192KiB and 16384KiB granularities supported. + + Example: + + :: + + -machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512k ERST -HXCOMM Deprecated by -machine -DEF("M", HAS_ARG, QEMU_OPTION_M, "", QEMU_ARCH_ALL) +DEF("M", HAS_ARG, QEMU_OPTION_M, + " sgx-epc.0.memdev=memid,sgx-epc.0.node=numaid\n", + QEMU_ARCH_ALL) + +SRST +``sgx-epc.0.memdev=@var{memid},sgx-epc.0.node=@var{numaid}`` + Define an SGX EPC section. +ERST DEF("cpu", HAS_ARG, QEMU_OPTION_cpu, "-cpu cpu select CPU ('-cpu help' for list)\n", QEMU_ARCH_ALL) @@ -146,6 +185,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel, " split-wx=on|off (enable TCG split w^x mapping)\n" " tb-size=n (TCG translation block cache size)\n" " dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n" + " notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n" " thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL) SRST ``-accel name[,prop=value[,...]]`` @@ -197,41 +237,112 @@ SRST is disabled (dirty-ring-size=0). When enabled, KVM will instead record dirty pages in a bitmap. + ``notify-vmexit=run|internal-error|disable,notify-window=n`` + Enables or disables notify VM exit support on x86 host and specify + the corresponding notify window to trigger the VM exit if enabled. + ``run`` option enables the feature. It does nothing and continue + if the exit happens. ``internal-error`` option enables the feature. + It raises a internal error. ``disable`` option doesn't enable the feature. + This feature can mitigate the CPU stuck issue due to event windows don't + open up for a specified of time (i.e. notify-window). + Default: notify-vmexit=run,notify-window=0. + ERST DEF("smp", HAS_ARG, QEMU_OPTION_smp, - "-smp [[cpus=]n][,maxcpus=cpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n" - " set the number of CPUs to 'n' [default=1]\n" + "-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]\n" + " set the number of initial CPUs to 'n' [default=1]\n" " maxcpus= maximum number of total CPUs, including\n" " offline CPUs for hotplug, etc\n" - " sockets= number of discrete sockets in the system\n" - " dies= number of CPU dies on one socket (for PC only)\n" - " cores= number of CPU cores on one socket (for PC, it's on one die)\n" - " threads= number of threads on one CPU core\n", - QEMU_ARCH_ALL) + " sockets= number of sockets on the machine board\n" + " dies= number of dies in one socket\n" + " clusters= number of clusters in one die\n" + " cores= number of cores in one cluster\n" + " threads= number of threads in one core\n" + "Note: Different machines may have different subsets of the CPU topology\n" + " parameters supported, so the actual meaning of the supported parameters\n" + " will vary accordingly. For example, for a machine type that supports a\n" + " three-level CPU hierarchy of sockets/cores/threads, the parameters will\n" + " sequentially mean as below:\n" + " sockets means the number of sockets on the machine board\n" + " cores means the number of cores in one socket\n" + " threads means the number of threads in one core\n" + " For a particular machine type board, an expected CPU topology hierarchy\n" + " can be defined through the supported sub-option. Unsupported parameters\n" + " can also be provided in addition to the sub-option, but their values\n" + " must be set as 1 in the purpose of correct parsing.\n", + QEMU_ARCH_ALL) SRST -``-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]`` +``-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]`` Simulate a SMP system with '\ ``n``\ ' CPUs initially present on the machine type board. On boards supporting CPU hotplug, the optional '\ ``maxcpus``\ ' parameter can be set to enable further CPUs to be - added at runtime. If omitted the maximum number of CPUs will be - set to match the initial CPU count. Both parameters are subject to - an upper limit that is determined by the specific machine type chosen. + added at runtime. When both parameters are omitted, the maximum number + of CPUs will be calculated from the provided topology members and the + initial CPU count will match the maximum number. When only one of them + is given then the omitted one will be set to its counterpart's value. + Both parameters may be specified, but the maximum number of CPUs must + be equal to or greater than the initial CPU count. Product of the + CPU topology hierarchy must be equal to the maximum number of CPUs. + Both parameters are subject to an upper limit that is determined by + the specific machine type chosen. - To control reporting of CPU topology information, the number of sockets, - dies per socket, cores per die, and threads per core can be specified. - The sum `` sockets * cores * dies * threads `` must be equal to the - maximum CPU count. CPU targets may only support a subset of the topology - parameters. Where a CPU target does not support use of a particular - topology parameter, its value should be assumed to be 1 for the purpose - of computing the CPU maximum count. + To control reporting of CPU topology information, values of the topology + parameters can be specified. Machines may only support a subset of the + parameters and different machines may have different subsets supported + which vary depending on capacity of the corresponding CPU targets. So + for a particular machine type board, an expected topology hierarchy can + be defined through the supported sub-option. Unsupported parameters can + also be provided in addition to the sub-option, but their values must be + set as 1 in the purpose of correct parsing. Either the initial CPU count, or at least one of the topology parameters - must be specified. Values for any omitted parameters will be computed - from those which are given. Historically preference was given to the - coarsest topology parameters when computing missing values (ie sockets - preferred over cores, which were preferred over threads), however, this - behaviour is considered liable to change. + must be specified. The specified parameters must be greater than zero, + explicit configuration like "cpus=0" is not allowed. Values for any + omitted parameters will be computed from those which are given. + + For example, the following sub-option defines a CPU topology hierarchy + (2 sockets totally on the machine, 2 cores per socket, 2 threads per + core) for a machine that only supports sockets/cores/threads. + Some members of the option can be omitted but their values will be + automatically computed: + + :: + + -smp 8,sockets=2,cores=2,threads=2,maxcpus=8 + + The following sub-option defines a CPU topology hierarchy (2 sockets + totally on the machine, 2 dies per socket, 2 cores per die, 2 threads + per core) for PC machines which support sockets/dies/cores/threads. + Some members of the option can be omitted but their values will be + automatically computed: + + :: + + -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16 + + The following sub-option defines a CPU topology hierarchy (2 sockets + totally on the machine, 2 clusters per socket, 2 cores per cluster, + 2 threads per core) for ARM virt machines which support sockets/clusters + /cores/threads. Some members of the option can be omitted but their values + will be automatically computed: + + :: + + -smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16 + + Historically preference was given to the coarsest topology parameters + when computing missing values (ie sockets preferred over cores, which + were preferred over threads), however, this behaviour is considered + liable to change. Prior to 6.2 the preference was sockets over cores + over threads. Since 6.2 the preference is cores over sockets over threads. + + For example, the following option defines a machine board with 2 sockets + of 1 core before 6.2 and 1 socket of 2 cores after 6.2: + + :: + + -smp 2 ERST DEF("numa", HAS_ARG, QEMU_OPTION_numa, @@ -251,7 +362,7 @@ SRST \ ``-numa cpu,node-id=node[,socket-id=x][,core-id=y][,thread-id=z]`` \ -``-numa hmat-lb,initiator=node,target=node,hierarchy=hierarchy,data-type=tpye[,latency=lat][,bandwidth=bw]`` +``-numa hmat-lb,initiator=node,target=node,hierarchy=hierarchy,data-type=type[,latency=lat][,bandwidth=bw]`` \ ``-numa hmat-cache,node-id=node,size=size,level=level[,associativity=str][,policy=str][,line=size]`` Define a NUMA node and assign RAM and VCPUs to it. Set the NUMA @@ -387,7 +498,7 @@ SRST -m 2G \ -object memory-backend-ram,size=1G,id=m0 \ -object memory-backend-ram,size=1G,id=m1 \ - -smp 2 \ + -smp 2,sockets=2,maxcpus=2 \ -numa node,nodeid=0,memdev=m0 \ -numa node,nodeid=1,memdev=m1,initiator=0 \ -numa cpu,node-id=0,socket-id=0 \ @@ -594,9 +705,35 @@ SRST (deprecated) environment variables. ERST +DEF("audio", HAS_ARG, QEMU_OPTION_audio, + "-audio [driver=]driver,model=value[,prop[=value][,...]]\n" + " specifies the audio backend and device to use;\n" + " apart from 'model', options are the same as for -audiodev.\n" + " use '-audio model=help' to show possible devices.\n", + QEMU_ARCH_ALL) +SRST +``-audio [driver=]driver,model=value[,prop[=value][,...]]`` + This option is a shortcut for configuring both the guest audio + hardware and the host audio backend in one go. + The driver option is the same as with the corresponding ``-audiodev`` option below. + The guest hardware model can be set with ``model=modelname``. + + Use ``driver=help`` to list the available drivers, + and ``model=help`` to list the available device types. + + The following two example do exactly the same, to show how ``-audio`` + can be used to shorten the command line length: + + .. parsed-literal:: + + |qemu_system| -audiodev pa,id=pa -device sb16,audiodev=pa + |qemu_system| -audio pa,model=sb16 +ERST + DEF("audiodev", HAS_ARG, QEMU_OPTION_audiodev, "-audiodev [driver=]driver,id=id[,prop[=value][,...]]\n" " specifies the audio backend to use\n" + " Use ``-audiodev help`` to list the available drivers\n" " id= identifier of the backend\n" " timer-period= timer period in microseconds\n" " in|out.mixing-engine= use mixing engine to mix streams inside QEMU\n" @@ -643,8 +780,14 @@ DEF("audiodev", HAS_ARG, QEMU_OPTION_audiodev, "-audiodev sdl,id=id[,prop[=value][,...]]\n" " in|out.buffer-count= number of buffers\n" #endif +#ifdef CONFIG_AUDIO_SNDIO + "-audiodev sndio,id=id[,prop[=value][,...]]\n" +#endif #ifdef CONFIG_SPICE "-audiodev spice,id=id[,prop[=value][,...]]\n" +#endif +#ifdef CONFIG_DBUS_DISPLAY + "-audiodev dbus,id=id[,prop[=value][,...]]\n" #endif "-audiodev wav,id=id[,prop[=value][,...]]\n" " path= path of wav file to record\n", @@ -806,6 +949,19 @@ SRST ``in|out.buffer-count=count`` Sets the count of the buffers. +``-audiodev sndio,id=id[,prop[=value][,...]]`` + Creates a backend using SNDIO. This backend is available on + OpenBSD and most other Unix-like systems. + + Sndio specific options are: + + ``in|out.dev=device`` + Specify the sndio device to use for input and/or output. Default + is ``default``. + + ``in|out.latency=usecs`` + Sets the desired period length in microseconds. + ``-audiodev spice,id=id[,prop[=value][,...]]`` Creates a backend that sends audio through SPICE. This backend requires ``-spice`` and automatically selected in that case, so @@ -822,33 +978,6 @@ SRST ``qemu.wav``. ERST -DEF("soundhw", HAS_ARG, QEMU_OPTION_soundhw, - "-soundhw c1,... enable audio support\n" - " and only specified sound cards (comma separated list)\n" - " use '-soundhw help' to get the list of supported cards\n" - " use '-soundhw all' to enable all of them\n", QEMU_ARCH_ALL) -SRST -``-soundhw card1[,card2,...] or -soundhw all`` - Enable audio and selected sound hardware. Use 'help' to print all - available sound hardware. For example: - - .. parsed-literal:: - - |qemu_system_x86| -soundhw sb16,adlib disk.img - |qemu_system_x86| -soundhw es1370 disk.img - |qemu_system_x86| -soundhw ac97 disk.img - |qemu_system_x86| -soundhw hda disk.img - |qemu_system_x86| -soundhw all disk.img - |qemu_system_x86| -soundhw help - - Note that Linux's i810\_audio OSS kernel (for AC97) module might - require manually specifying clocking. - - :: - - modprobe i810_audio clocking=48000 -ERST - DEF("device", HAS_ARG, QEMU_OPTION_device, "-device driver[,prop[=value][,...]]\n" " add device (based on driver)\n" @@ -916,7 +1045,7 @@ SRST details on the external interface. ``-device isa-ipmi-kcs,bmc=id[,ioport=val][,irq=val]`` - Add a KCS IPMI interafce on the ISA bus. This also adds a + Add a KCS IPMI interface on the ISA bus. This also adds a corresponding ACPI and SMBIOS entries, if appropriate. ``bmc=id`` @@ -936,7 +1065,7 @@ SRST is 0xe4 and the default interrupt is 5. ``-device pci-ipmi-kcs,bmc=id`` - Add a KCS IPMI interafce on the PCI bus. + Add a KCS IPMI interface on the PCI bus. ``bmc=id`` The BMC to connect to, one of ipmi-bmc-sim or ipmi-bmc-extern above. @@ -1005,6 +1134,31 @@ DEFHEADING() DEFHEADING(Block device options:) +SRST +The QEMU block device handling options have a long history and +have gone through several iterations as the feature set and complexity +of the block layer have grown. Many online guides to QEMU often +reference older and deprecated options, which can lead to confusion. + +The most explicit way to describe disks is to use a combination of +``-device`` to specify the hardware device and ``-blockdev`` to +describe the backend. The device defines what the guest sees and the +backend describes how QEMU handles the data. It is the only guaranteed +stable interface for describing block devices and as such is +recommended for management tools and scripting. + +The ``-drive`` option combines the device and backend into a single +command line option which is a more human friendly. There is however no +interface stability guarantee although some older board models still +need updating to work with the modern blockdev forms. + +Older options like ``-hda`` are essentially macros which expand into +``-drive`` options for various drive interfaces. The original forms +bake in a lot of assumptions from the days when QEMU was emulating a +legacy PC, they are not recommended for modern configurations. + +ERST + DEF("fda", HAS_ARG, QEMU_OPTION_fda, "-fda/-fdb file use 'file' as floppy disk 0/1 image\n", QEMU_ARCH_ALL) DEF("fdb", HAS_ARG, QEMU_OPTION_fdb, "", QEMU_ARCH_ALL) @@ -1307,7 +1461,7 @@ SRST the bus number and the unit id. ``index=index`` - This option defines where is connected the drive by using an + This option defines where the drive is connected by using an index in the list of available connectors of a given interface type. @@ -1485,13 +1639,6 @@ SRST Use file as SecureDigital card image. ERST -DEF("pflash", HAS_ARG, QEMU_OPTION_pflash, - "-pflash file use 'file' as a parallel flash image\n", QEMU_ARCH_ALL) -SRST -``-pflash file`` - Use file as a parallel flash image. -ERST - DEF("snapshot", 0, QEMU_OPTION_snapshot, "-snapshot write to temporary files instead of disk image files\n", QEMU_ARCH_ALL) @@ -1501,6 +1648,14 @@ SRST the raw disk image you use is not written back. You can however force the write back by pressing C-a s (see the :ref:`disk images` chapter in the System Emulation Users Guide). + + .. warning:: + snapshot is incompatible with ``-blockdev`` (instead use qemu-img + to manually create snapshot images to attach to your blockdev). + If you have mixed ``-blockdev`` and ``-drive`` declarations you + can use the 'snapshot' property on your drive declarations + instead of this global option. + ERST DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, @@ -1650,7 +1805,7 @@ SRST directory on host is made directly accessible by guest as a pass-through file system by using the 9P network protocol for communication between host and guests, if desired even accessible, shared by several guests - simultaniously. + simultaneously. Note that ``-virtfs`` is actually just a convenience shortcut for its generalized form ``-fsdev -device virtio-9p-pci``. @@ -1833,12 +1988,13 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, "-display spice-app[,gl=on|off]\n" #endif #if defined(CONFIG_SDL) - "-display sdl[,alt_grab=on|off][,ctrl_grab=on|off][,gl=on|core|es|off]\n" - " [,show-cursor=on|off][,window-close=on|off]\n" + "-display sdl[,gl=on|core|es|off][,grab-mod=][,show-cursor=on|off]\n" + " [,window-close=on|off]\n" #endif #if defined(CONFIG_GTK) "-display gtk[,full-screen=on|off][,gl=on|off][,grab-on-hover=on|off]\n" - " [,show-cursor=on|off][,window-close=on|off]\n" + " [,show-tabs=on|off][,show-cursor=on|off][,window-close=on|off]\n" + " [,show-menubar=on|off]\n" #endif #if defined(CONFIG_VNC) "-display vnc=[,]\n" @@ -1846,8 +2002,18 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #if defined(CONFIG_CURSES) "-display curses[,charset=]\n" #endif +#if defined(CONFIG_COCOA) + "-display cocoa[,full-grab=on|off][,swap-opt-cmd=on|off]\n" +#endif #if defined(CONFIG_OPENGL) "-display egl-headless[,rendernode=]\n" +#endif +#if defined(CONFIG_DBUS_DISPLAY) + "-display dbus[,addr=]\n" + " [,gl=on|core|es|off][,rendernode=]\n" +#endif +#if defined(CONFIG_COCOA) + "-display cocoa[,show-cursor=on|off][,left-command-key=on|off]\n" #endif "-display none\n" " select display backend type\n" @@ -1866,23 +2032,35 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, , QEMU_ARCH_ALL) SRST ``-display type`` - Select type of display to use. This option is a replacement for the - old style -sdl/-curses/... options. Use ``-display help`` to list - the available display types. Valid values for type are + Select type of display to use. Use ``-display help`` to list the available + display types. Valid values for type are ``spice-app[,gl=on|off]`` Start QEMU as a Spice server and launch the default Spice client application. The Spice server will redirect the serial consoles and QEMU monitors. (Since 4.0) + ``dbus`` + Export the display over D-Bus interfaces. (Since 7.0) + + The connection is registered with the "org.qemu" name (and queued when + already owned). + + ``addr=`` : D-Bus bus address to connect to. + + ``p2p=yes|no`` : Use peer-to-peer connection, accepted via QMP ``add_client``. + + ``gl=on|off|core|es`` : Use OpenGL for rendering (the D-Bus interface + will share framebuffers with DMABUF file descriptors). + ``sdl`` Display video output via SDL (usually in a separate graphics window; see the SDL documentation for other possibilities). Valid parameters are: - ``alt_grab=on|off`` : Use Control+Alt+Shift-g to toggle mouse grabbing - - ``ctrl_grab=on|off`` : Use Right-Control-g to toggle mouse grabbing + ``grab-mod=`` : Used to select the modifier keys for toggling + the mouse grabbing in conjunction with the "g" key. ```` can be + either ``lshift-lctrl-lalt`` or ``rctrl``. ``gl=on|off|core|es`` : Use OpenGL for displaying @@ -1901,10 +2079,16 @@ SRST ``grab-on-hover=on|off`` : Grab keyboard input on mouse hover + ``show-tabs=on|off`` : Display the tab bar for switching between the + various graphical interfaces (e.g. VGA and + virtual console character devices) by default. + ``show-cursor=on|off`` : Force showing the mouse cursor ``window-close=on|off`` : Allow to quit qemu with window close button + ``show-menubar=on|off`` : Display the main window menubar, defaults to "on" + ``curses[,charset=]`` Display video output via curses. For graphics device models which support a text mode, QEMU can display this output using a @@ -1916,6 +2100,15 @@ SRST ``charset=CP850`` for IBM CP850 encoding. The default is ``CP437``. + ``cocoa`` + Display video output in a Cocoa window. Mac only. This interface + provides drop-down menus and other UI elements to configure and + control the VM during runtime. Valid parameters are: + + ``show-cursor=on|off`` : Force showing the mouse cursor + + ``left-command-key=on|off`` : Disable forwarding left command key to host + ``egl-headless[,rendernode=]`` Offload all OpenGL operations to a local DRI device. For any graphical display, this display needs to be paired with either @@ -1948,53 +2141,7 @@ SRST Use C-a h for help on switching between the console and monitor. ERST -DEF("curses", 0, QEMU_OPTION_curses, - "-curses shorthand for -display curses\n", - QEMU_ARCH_ALL) -SRST -``-curses`` - Normally, if QEMU is compiled with graphical window support, it - displays output such as guest graphics, guest console, and the QEMU - monitor in a window. With this option, QEMU can display the VGA - output when in text mode using a curses/ncurses interface. Nothing - is displayed in graphical mode. -ERST - -DEF("alt-grab", 0, QEMU_OPTION_alt_grab, - "-alt-grab use Ctrl-Alt-Shift to grab mouse (instead of Ctrl-Alt)\n", - QEMU_ARCH_ALL) -SRST -``-alt-grab`` - Use Ctrl-Alt-Shift to grab mouse (instead of Ctrl-Alt). Note that - this also affects the special keys (for fullscreen, monitor-mode - switching, etc). -ERST - -DEF("ctrl-grab", 0, QEMU_OPTION_ctrl_grab, - "-ctrl-grab use Right-Ctrl to grab mouse (instead of Ctrl-Alt)\n", - QEMU_ARCH_ALL) -SRST -``-ctrl-grab`` - Use Right-Ctrl to grab mouse (instead of Ctrl-Alt). Note that this - also affects the special keys (for fullscreen, monitor-mode - switching, etc). -ERST - -DEF("no-quit", 0, QEMU_OPTION_no_quit, - "-no-quit disable SDL/GTK window close capability (deprecated)\n", QEMU_ARCH_ALL) -SRST -``-no-quit`` - Disable window close capability (SDL and GTK only). This option is - deprecated, please use ``-display ...,window-close=off`` instead. -ERST - -DEF("sdl", 0, QEMU_OPTION_sdl, - "-sdl shorthand for -display sdl\n", QEMU_ARCH_ALL) -SRST -``-sdl`` - Enable SDL. -ERST - +#ifdef CONFIG_SPICE DEF("spice", HAS_ARG, QEMU_OPTION_spice, "-spice [port=port][,tls-port=secured-port][,x509-dir=]\n" " [,x509-key-file=][,x509-key-password=]\n" @@ -2016,6 +2163,7 @@ DEF("spice", HAS_ARG, QEMU_OPTION_spice, " enable spice\n" " at least one of {port, tls-port} is mandatory\n", QEMU_ARCH_ALL) +#endif SRST ``-spice option[,option[,...]]`` Enable the spice remote desktop protocol. Valid options are @@ -2450,7 +2598,10 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 3 fields\n" "-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n" " [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n" + " [,processor-id=%d]\n" " specify SMBIOS type 4 fields\n" + "-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n" + " specify SMBIOS type 8 fields\n" "-smbios type=11[,value=str][,path=filename]\n" " specify SMBIOS type 11 fields\n" "-smbios type=17[,loc_pfx=str][,bank=str][,manufacturer=str][,serial=str]\n" @@ -2475,7 +2626,7 @@ SRST ``-smbios type=3[,manufacturer=str][,version=str][,serial=str][,asset=str][,sku=str]`` Specify SMBIOS type 3 fields -``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str]`` +``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-id=%d]`` Specify SMBIOS type 4 fields ``-smbios type=11[,value=str][,path=filename]`` @@ -2635,6 +2786,20 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, "-netdev socket,id=str[,fd=h][,udp=host:port][,localaddr=host:port]\n" " configure a network backend to connect to another network\n" " using an UDP tunnel\n" + "-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off]\n" + "-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off]\n" + "-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor\n" + " configure a network backend to connect to another network\n" + " using a socket connection in stream mode.\n" + "-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=inet,local.host=addr]\n" + "-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=fd,local.str=file-descriptor]\n" + " configure a network backend to connect to a multicast maddr and port\n" + " use ``local.host=addr`` to specify the host address to send packets from\n" + "-netdev dgram,id=str,local.type=inet,local.host=addr,local.port=port[,remote.type=inet,remote.host=addr,remote.port=port]\n" + "-netdev dgram,id=str,local.type=unix,local.path=path[,remote.type=unix,remote.path=path]\n" + "-netdev dgram,id=str,local.type=fd,local.str=file-descriptor\n" + " configure a network backend to connect to another network\n" + " using an UDP tunnel\n" #ifdef CONFIG_VDE "-netdev vde,id=str[,sock=socketpath][,port=n][,group=groupname][,mode=octalmode]\n" " configure a network backend to connect to port 'n' of a vde switch\n" @@ -2653,8 +2818,29 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, " configure a vhost-user network, backed by a chardev 'dev'\n" #endif #ifdef __linux__ - "-netdev vhost-vdpa,id=str,vhostdev=/path/to/dev\n" + "-netdev vhost-vdpa,id=str[,vhostdev=/path/to/dev][,vhostfd=h]\n" " configure a vhost-vdpa network,Establish a vhost-vdpa netdev\n" + " use 'vhostdev=/path/to/dev' to open a vhost vdpa device\n" + " use 'vhostfd=h' to connect to an already opened vhost vdpa device\n" +#endif +#ifdef CONFIG_VMNET + "-netdev vmnet-host,id=str[,isolated=on|off][,net-uuid=uuid]\n" + " [,start-address=addr,end-address=addr,subnet-mask=mask]\n" + " configure a vmnet network backend in host mode with ID 'str',\n" + " isolate this interface from others with 'isolated',\n" + " configure the address range and choose a subnet mask,\n" + " specify network UUID 'uuid' to disable DHCP and interact with\n" + " vmnet-host interfaces within this isolated network\n" + "-netdev vmnet-shared,id=str[,isolated=on|off][,nat66-prefix=addr]\n" + " [,start-address=addr,end-address=addr,subnet-mask=mask]\n" + " configure a vmnet network backend in shared mode with ID 'str',\n" + " configure the address range and choose a subnet mask,\n" + " set IPv6 ULA prefix (of length 64) to use for internal network,\n" + " isolate this interface from others with 'isolated'\n" + "-netdev vmnet-bridged,id=str,ifname=name[,isolated=on|off]\n" + " configure a vmnet network backend in bridged mode with ID 'str',\n" + " use 'ifname=name' to select a physical network interface to be bridged,\n" + " isolate this interface from others with 'isolated'\n" #endif "-netdev hubport,id=str,hubid=n[,netdev=nd]\n" " configure a hub port on the hub with ID 'n'\n", QEMU_ARCH_ALL) @@ -2674,6 +2860,9 @@ DEF("nic", HAS_ARG, QEMU_OPTION_nic, #endif #ifdef CONFIG_POSIX "vhost-user|" +#endif +#ifdef CONFIG_VMNET + "vmnet-host|vmnet-shared|vmnet-bridged|" #endif "socket][,option][,...][mac=macaddr]\n" " initialize an on-board / default host NIC (using MAC address\n" @@ -2696,6 +2885,9 @@ DEF("net", HAS_ARG, QEMU_OPTION_net, #endif #ifdef CONFIG_NETMAP "netmap|" +#endif +#ifdef CONFIG_VMNET + "vmnet-host|vmnet-shared|vmnet-bridged|" #endif "socket][,option][,option][,...]\n" " old way to initialize a host network interface\n" @@ -3134,7 +3326,7 @@ SRST -netdev type=vhost-user,id=net0,chardev=chr0 \ -device virtio-net-pci,netdev=net0 -``-netdev vhost-vdpa,vhostdev=/path/to/dev`` +``-netdev vhost-vdpa[,vhostdev=/path/to/dev][,vhostfd=h]`` Establish a vhost-vdpa netdev. vDPA device is a device that uses a datapath which complies with @@ -3180,7 +3372,7 @@ DEFHEADING(Character device options:) DEF("chardev", HAS_ARG, QEMU_OPTION_chardev, "-chardev help\n" "-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" - "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off][,reconnect=seconds]\n" + "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off]\n" " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,mux=on|off]\n" " [,logfile=PATH][,logappend=on|off][,tls-creds=ID][,tls-authz=ID] (tcp)\n" "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds]\n" @@ -3568,12 +3760,67 @@ DEFHEADING() #endif -DEFHEADING(Linux/Multiboot boot specific:) +DEFHEADING(Boot Image or Kernel specific:) SRST -When using these options, you can use a given Linux or Multiboot kernel -without installing it in the disk image. It can be useful for easier -testing of various kernels. +There are broadly 4 ways you can boot a system with QEMU. + - specify a firmware and let it control finding a kernel + - specify a firmware and pass a hint to the kernel to boot + - direct kernel image boot + - manually load files into the guest's address space + +The third method is useful for quickly testing kernels but as there is +no firmware to pass configuration information to the kernel the +hardware must either be probeable, the kernel built for the exact +configuration or passed some configuration data (e.g. a DTB blob) +which tells the kernel what drivers it needs. This exact details are +often hardware specific. + +The final method is the most generic way of loading images into the +guest address space and used mostly for ``bare metal`` type +development where the reset vectors of the processor are taken into +account. + +ERST + +SRST + +For x86 machines and some other architectures ``-bios`` will generally +do the right thing with whatever it is given. For other machines the +more strict ``-pflash`` option needs an image that is sized for the +flash device for the given machine type. + +Please see the :ref:`system-targets-ref` section of the manual for +more detailed documentation. + +ERST + +DEF("bios", HAS_ARG, QEMU_OPTION_bios, \ + "-bios file set the filename for the BIOS\n", QEMU_ARCH_ALL) +SRST +``-bios file`` + Set the filename for the BIOS. +ERST + +DEF("pflash", HAS_ARG, QEMU_OPTION_pflash, + "-pflash file use 'file' as a parallel flash image\n", QEMU_ARCH_ALL) +SRST +``-pflash file`` + Use file as a parallel flash image. +ERST + +SRST + +The kernel options were designed to work with Linux kernels although +other things (like hypervisors) can be packaged up as a kernel +executable image. The exact format of a executable image is usually +architecture specific. + +The way in which the kernel is started (what address it is loaded at, +what if any information is passed to it via CPU registers, the state +of the hardware when it is started, and so on) is also architecture +specific. Typically it follows the specification laid down by the +Linux kernel for how kernels for that architecture must be started. ERST @@ -3613,13 +3860,34 @@ SRST kernel on boot. ERST +SRST + +Finally you can also manually load images directly into the address +space of the guest. This is most useful for developers who already +know the layout of their guest and take care to ensure something sane +will happen when the reset vector executes. + +The generic loader can be invoked by using the loader device: + +``-device loader,addr=,data=,data-len=[,data-be=][,cpu-num=]`` + +there is also the guest loader which operates in a similar way but +tweaks the DTB so a hypervisor loaded via ``-kernel`` can find where +the guest image is: + +``-device guest-loader,addr=[,kernel=,[bootargs=]][,initrd=]`` + +ERST + DEFHEADING() DEFHEADING(Debug/Expert options:) DEF("compat", HAS_ARG, QEMU_OPTION_compat, "-compat [deprecated-input=accept|reject|crash][,deprecated-output=accept|hide]\n" - " Policy for handling deprecated management interfaces\n", + " Policy for handling deprecated management interfaces\n" + "-compat [unstable-input=accept|reject|crash][,unstable-output=accept|hide]\n" + " Policy for handling unstable management interfaces\n", QEMU_ARCH_ALL) SRST ``-compat [deprecated-input=@var{input-policy}][,deprecated-output=@var{output-policy}]`` @@ -3637,6 +3905,22 @@ SRST Suppress deprecated command results and events Limitation: covers only syntactic aspects of QMP. + +``-compat [unstable-input=@var{input-policy}][,unstable-output=@var{output-policy}]`` + Set policy for handling unstable management interfaces (experimental): + + ``unstable-input=accept`` (default) + Accept unstable commands and arguments + ``unstable-input=reject`` + Reject unstable commands and arguments + ``unstable-input=crash`` + Crash on unstable commands and arguments + ``unstable-output=accept`` (default) + Emit unstable command results and events + ``unstable-output=hide`` + Suppress unstable command results and events + + Limitation: covers only syntactic aspects of QMP. ERST DEF("fw_cfg", HAS_ARG, QEMU_OPTION_fwcfg, @@ -4045,15 +4329,10 @@ SRST To list all the data directories, use ``-L help``. ERST -DEF("bios", HAS_ARG, QEMU_OPTION_bios, \ - "-bios file set the filename for the BIOS\n", QEMU_ARCH_ALL) -SRST -``-bios file`` - Set the filename for the BIOS. -ERST - DEF("enable-kvm", 0, QEMU_OPTION_enable_kvm, \ - "-enable-kvm enable KVM full virtualization support\n", QEMU_ARCH_ALL) + "-enable-kvm enable KVM full virtualization support\n", + QEMU_ARCH_ARM | QEMU_ARCH_I386 | QEMU_ARCH_MIPS | QEMU_ARCH_PPC | + QEMU_ARCH_RISCV | QEMU_ARCH_S390X) SRST ``-enable-kvm`` Enable KVM full virtualization support. This option is only @@ -4061,16 +4340,17 @@ SRST ERST DEF("xen-domid", HAS_ARG, QEMU_OPTION_xen_domid, - "-xen-domid id specify xen guest domain id\n", QEMU_ARCH_ALL) + "-xen-domid id specify xen guest domain id\n", + QEMU_ARCH_ARM | QEMU_ARCH_I386) DEF("xen-attach", 0, QEMU_OPTION_xen_attach, "-xen-attach attach to existing xen domain\n" " libxl will use this when starting QEMU\n", - QEMU_ARCH_ALL) + QEMU_ARCH_ARM | QEMU_ARCH_I386) DEF("xen-domid-restrict", 0, QEMU_OPTION_xen_domid_restrict, "-xen-domid-restrict restrict set of available xen operations\n" " to specified domain id. (Does not affect\n" " xenpv machine type).\n", - QEMU_ARCH_ALL) + QEMU_ARCH_ARM | QEMU_ARCH_I386) SRST ``-xen-domid id`` Specify xen guest domain id (XEN only). @@ -4102,7 +4382,7 @@ DEF("action", HAS_ARG, QEMU_OPTION_action, " action when guest reboots [default=reset]\n" "-action shutdown=poweroff|pause\n" " action when guest shuts down [default=poweroff]\n" - "-action panic=pause|shutdown|none\n" + "-action panic=pause|shutdown|exit-failure|none\n" " action when guest panics [default=shutdown]\n" "-action watchdog=reset|shutdown|poweroff|inject-nmi|pause|debug|none\n" " action when watchdog fires [default=reset]\n", @@ -4118,7 +4398,7 @@ SRST ``-action panic=none`` ``-action reboot=shutdown,shutdown=pause`` - ``-watchdog i6300esb -action watchdog=pause`` + ``-device i6300esb -action watchdog=pause`` ERST @@ -4236,35 +4516,6 @@ SRST specifies the snapshot name used to load the initial VM state. ERST -DEF("watchdog", HAS_ARG, QEMU_OPTION_watchdog, \ - "-watchdog model\n" \ - " enable virtual hardware watchdog [default=none]\n", - QEMU_ARCH_ALL) -SRST -``-watchdog model`` - Create a virtual hardware watchdog device. Once enabled (by a guest - action), the watchdog must be periodically polled by an agent inside - the guest or else the guest will be restarted. Choose a model for - which your guest has drivers. - - The model is the model of hardware watchdog to emulate. Use - ``-watchdog help`` to list available hardware models. Only one - watchdog can be enabled for a guest. - - The following models may be available: - - ``ib700`` - iBASE 700 is a very simple ISA watchdog with a single timer. - - ``i6300esb`` - Intel 6300ESB I/O controller hub is a much more featureful - PCI-based dual-timer watchdog. - - ``diag288`` - A virtual watchdog for s390x backed by the diagnose 288 - hypercall (currently KVM only). -ERST - DEF("watchdog-action", HAS_ARG, QEMU_OPTION_watchdog_action, \ "-watchdog-action reset|shutdown|poweroff|inject-nmi|pause|debug|none\n" \ " action when watchdog fires [default=reset]\n", @@ -4286,7 +4537,7 @@ SRST Examples: - ``-watchdog i6300esb -watchdog-action pause``; \ ``-watchdog ib700`` + ``-device i6300esb -watchdog-action pause`` ERST @@ -4417,12 +4668,12 @@ SRST information about the facilities this enables. ERST DEF("semihosting-config", HAS_ARG, QEMU_OPTION_semihosting_config, - "-semihosting-config [enable=on|off][,target=native|gdb|auto][,chardev=id][,arg=str[,...]]\n" \ + "-semihosting-config [enable=on|off][,target=native|gdb|auto][,chardev=id][,userspace=on|off][,arg=str[,...]]\n" \ " semihosting configuration\n", QEMU_ARCH_ARM | QEMU_ARCH_M68K | QEMU_ARCH_XTENSA | QEMU_ARCH_MIPS | QEMU_ARCH_NIOS2 | QEMU_ARCH_RISCV) SRST -``-semihosting-config [enable=on|off][,target=native|gdb|auto][,chardev=id][,arg=str[,...]]`` +``-semihosting-config [enable=on|off][,target=native|gdb|auto][,chardev=id][,userspace=on|off][,arg=str[,...]]`` Enable and configure semihosting (ARM, M68K, Xtensa, MIPS, Nios II, RISC-V only). @@ -4449,6 +4700,13 @@ SRST Send the output to a chardev backend output for native or auto output when not in gdb + ``userspace=on|off`` + Allows code running in guest userspace to access the semihosting + interface. The default is that only privileged guest code can + make semihosting calls. Note that setting ``userspace=on`` should + only be used if all guest code is trusted (for example, in + bare-metal test case code). + ``arg=str1,arg=str2,...`` Allows the user to pass input arguments, and can be used multiple times to build up a list. The old-style @@ -4499,18 +4757,14 @@ SRST ERST DEF("readconfig", HAS_ARG, QEMU_OPTION_readconfig, - "-readconfig \n", QEMU_ARCH_ALL) + "-readconfig \n" + " read config file\n", QEMU_ARCH_ALL) SRST ``-readconfig file`` Read device configuration from file. This approach is useful when you want to spawn QEMU process with many command line options but you don't want to exceed the command line character limit. ERST -DEF("writeconfig", HAS_ARG, QEMU_OPTION_writeconfig, - "-writeconfig \n" - " read/write config file (deprecated)\n", QEMU_ARCH_ALL) -SRST -ERST DEF("no-user-config", 0, QEMU_OPTION_nouserconfig, "-no-user-config\n" @@ -4532,19 +4786,18 @@ SRST ERST DEF("plugin", HAS_ARG, QEMU_OPTION_plugin, - "-plugin [file=][,arg=]\n" + "-plugin [file=][,=]\n" " load a plugin\n", QEMU_ARCH_ALL) SRST -``-plugin file=file[,arg=string]`` +``-plugin file=file[,argname=argvalue]`` Load a plugin. ``file=file`` Load the given plugin from a shared library file. - ``arg=string`` - Argument string passed to the plugin. (Can be given multiple - times.) + ``argname=argvalue`` + Argument passed to the plugin. (Can be given multiple times.) ERST HXCOMM Internal use @@ -4552,13 +4805,22 @@ DEF("qtest", HAS_ARG, QEMU_OPTION_qtest, "", QEMU_ARCH_ALL) DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log, "", QEMU_ARCH_ALL) #ifdef __linux__ -DEF("enable-fips", 0, QEMU_OPTION_enablefips, - "-enable-fips enable FIPS 140-2 compliance\n", +DEF("async-teardown", 0, QEMU_OPTION_asyncteardown, + "-async-teardown enable asynchronous teardown\n", QEMU_ARCH_ALL) #endif SRST -``-enable-fips`` - Enable FIPS 140-2 compliance mode. +``-async-teardown`` + Enable asynchronous teardown. A new process called "cleanup/" + will be created at startup sharing the address space with the main qemu + process, using clone. It will wait for the main qemu process to + terminate completely, and then exit. + This allows qemu to terminate very quickly even if the guest was + huge, leaving the teardown of the address space to the cleanup + process. Since the cleanup process shares the same cgroups as the + main qemu process, accounting is performed correctly. This only + works if the cleanup process is not forcefully killed with SIGKILL + before the main qemu process has terminated completely. ERST DEF("msg", HAS_ARG, QEMU_OPTION_msg, @@ -5035,8 +5297,8 @@ SRST read the colo-compare git log. ``-object cryptodev-backend-builtin,id=id[,queues=queues]`` - Creates a cryptodev backend which executes crypto opreation from - the QEMU cipher APIS. The id parameter is a unique ID that will + Creates a cryptodev backend which executes crypto operations from + the QEMU cipher APIs. The id parameter is a unique ID that will be used to reference this cryptodev backend from the ``virtio-crypto`` device. The queues parameter is optional, which specify the queue number of cryptodev backend, the default @@ -5150,7 +5412,7 @@ SRST -object secret,id=sec0,keyid=secmaster0,format=base64,\\ data=$SECRET,iv=$( #include "qapi/error.h" #include "qemu/sockets.h" #include "channel.h" +#include "cutils.h" #ifdef CONFIG_SOLARIS #include @@ -34,7 +36,7 @@ static gboolean ga_channel_listen_accept(GIOChannel *channel, g_warning("error converting fd to gsocket: %s", strerror(errno)); goto out; } - qemu_set_nonblock(client_fd); + qemu_socket_set_nonblock(client_fd); ret = ga_channel_client_add(c, client_fd); if (ret) { g_warning("error setting up connection"); @@ -119,7 +121,7 @@ static int ga_channel_client_add(GAChannel *c, int fd) } static gboolean ga_channel_open(GAChannel *c, const gchar *path, - GAChannelMethod method, int fd) + GAChannelMethod method, int fd, Error **errp) { int ret; c->method = method; @@ -127,27 +129,48 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, switch (c->method) { case GA_CHANNEL_VIRTIO_SERIAL: { assert(fd < 0); - fd = qemu_open_old(path, O_RDWR | O_NONBLOCK + fd = qga_open_cloexec( + path, #ifndef CONFIG_SOLARIS - | O_ASYNC + O_ASYNC | #endif - ); + O_RDWR | O_NONBLOCK, + 0 + ); if (fd == -1) { - g_critical("error opening channel: %s", strerror(errno)); + error_setg_errno(errp, errno, "error opening channel '%s'", path); return false; } #ifdef CONFIG_SOLARIS ret = ioctl(fd, I_SETSIG, S_OUTPUT | S_INPUT | S_HIPRI); if (ret == -1) { - g_critical("error setting event mask for channel: %s", - strerror(errno)); + error_setg_errno(errp, errno, "error setting event mask for channel"); close(fd); return false; } #endif +#ifdef __FreeBSD__ + /* + * In the default state channel sends echo of every command to a + * client. The client programm doesn't expect this and raises an + * error. Suppress echo by resetting ECHO terminal flag. + */ + struct termios tio; + if (tcgetattr(fd, &tio) < 0) { + error_setg_errno(errp, errno, "error getting channel termios attrs"); + close(fd); + return false; + } + tio.c_lflag &= ~ECHO; + if (tcsetattr(fd, TCSAFLUSH, &tio) < 0) { + error_setg_errno(errp, errno, "error setting channel termios attrs"); + close(fd); + return false; + } +#endif /* __FreeBSD__ */ ret = ga_channel_client_add(c, fd); if (ret) { - g_critical("error adding channel to main loop"); + error_setg(errp, "error adding channel to main loop"); close(fd); return false; } @@ -157,9 +180,9 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, struct termios tio; assert(fd < 0); - fd = qemu_open_old(path, O_RDWR | O_NOCTTY | O_NONBLOCK); + fd = qga_open_cloexec(path, O_RDWR | O_NOCTTY | O_NONBLOCK, 0); if (fd == -1) { - g_critical("error opening channel: %s", strerror(errno)); + error_setg_errno(errp, errno, "error opening channel '%s'", path); return false; } tcgetattr(fd, &tio); @@ -180,7 +203,7 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, tcsetattr(fd, TCSANOW, &tio); ret = ga_channel_client_add(c, fd); if (ret) { - g_critical("error adding channel to main loop"); + error_setg(errp, "error adding channel to main loop"); close(fd); return false; } @@ -188,12 +211,8 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, } case GA_CHANNEL_UNIX_LISTEN: { if (fd < 0) { - Error *local_err = NULL; - - fd = unix_listen(path, &local_err); - if (local_err != NULL) { - g_critical("%s", error_get_pretty(local_err)); - error_free(local_err); + fd = unix_listen(path, errp); + if (fd < 0) { return false; } } @@ -202,24 +221,19 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, } case GA_CHANNEL_VSOCK_LISTEN: { if (fd < 0) { - Error *local_err = NULL; SocketAddress *addr; char *addr_str; addr_str = g_strdup_printf("vsock:%s", path); - addr = socket_parse(addr_str, &local_err); + addr = socket_parse(addr_str, errp); g_free(addr_str); - if (local_err != NULL) { - g_critical("%s", error_get_pretty(local_err)); - error_free(local_err); + if (!addr) { return false; } - fd = socket_listen(addr, 1, &local_err); + fd = socket_listen(addr, 1, errp); qapi_free_SocketAddress(addr); - if (local_err != NULL) { - g_critical("%s", error_get_pretty(local_err)); - error_free(local_err); + if (fd < 0) { return false; } } @@ -227,7 +241,7 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path, break; } default: - g_critical("error binding/listening to specified socket"); + error_setg(errp, "error binding/listening to specified socket"); return false; } @@ -272,12 +286,14 @@ GIOStatus ga_channel_read(GAChannel *c, gchar *buf, gsize size, gsize *count) GAChannel *ga_channel_new(GAChannelMethod method, const gchar *path, int listen_fd, GAChannelCallback cb, gpointer opaque) { + Error *err = NULL; GAChannel *c = g_new0(GAChannel, 1); c->event_cb = cb; c->user_data = opaque; - if (!ga_channel_open(c, path, method, listen_fd)) { - g_critical("error opening channel"); + if (!ga_channel_open(c, path, method, listen_fd, &err)) { + g_critical("%s", error_get_pretty(err)); + error_free(err); ga_channel_free(c); return NULL; } diff --git a/qga/commands-bsd.c b/qga/commands-bsd.c new file mode 100644 index 0000000000..15cade2d4c --- /dev/null +++ b/qga/commands-bsd.c @@ -0,0 +1,200 @@ +/* + * QEMU Guest Agent BSD-specific command implementations + * + * Copyright (c) Virtuozzo International GmbH. + * + * Authors: + * Alexander Ivanov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qga-qapi-commands.h" +#include "qapi/qmp/qerror.h" +#include "qapi/error.h" +#include "qemu/queue.h" +#include "commands-common.h" +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) +bool build_fs_mount_list(FsMountList *mounts, Error **errp) +{ + FsMount *mount; + struct statfs *mntbuf, *mntp; + struct stat statbuf; + int i, count, ret; + + count = getmntinfo(&mntbuf, MNT_NOWAIT); + if (count == 0) { + error_setg_errno(errp, errno, "getmntinfo failed"); + return false; + } + + for (i = 0; i < count; i++) { + mntp = &mntbuf[i]; + ret = stat(mntp->f_mntonname, &statbuf); + if (ret != 0) { + error_setg_errno(errp, errno, "stat failed on %s", + mntp->f_mntonname); + return false; + } + + mount = g_new0(FsMount, 1); + + mount->dirname = g_strdup(mntp->f_mntonname); + mount->devtype = g_strdup(mntp->f_fstypename); + mount->devmajor = major(mount->dev); + mount->devminor = minor(mount->dev); + mount->fsid = mntp->f_fsid; + mount->dev = statbuf.st_dev; + + QTAILQ_INSERT_TAIL(mounts, mount, next); + } + return true; +} +#endif /* CONFIG_FSFREEZE || CONFIG_FSTRIM */ + +#if defined(CONFIG_FSFREEZE) +static int ufssuspend_fd = -1; +static int ufssuspend_cnt; + +int64_t qmp_guest_fsfreeze_do_freeze_list(bool has_mountpoints, + strList *mountpoints, + FsMountList mounts, + Error **errp) +{ + int ret; + strList *list; + struct FsMount *mount; + + if (ufssuspend_fd != -1) { + error_setg(errp, "filesystems have already frozen"); + return -1; + } + + ufssuspend_cnt = 0; + ufssuspend_fd = qemu_open(_PATH_UFSSUSPEND, O_RDWR, errp); + if (ufssuspend_fd == -1) { + return -1; + } + + QTAILQ_FOREACH_REVERSE(mount, &mounts, next) { + /* + * To issue fsfreeze in the reverse order of mounts, check if the + * mount is listed in the list here + */ + if (has_mountpoints) { + for (list = mountpoints; list; list = list->next) { + if (g_str_equal(list->value, mount->dirname)) { + break; + } + } + if (!list) { + continue; + } + } + + /* Only UFS supports suspend */ + if (!g_str_equal(mount->devtype, "ufs")) { + continue; + } + + ret = ioctl(ufssuspend_fd, UFSSUSPEND, &mount->fsid); + if (ret == -1) { + /* + * ioctl returns EBUSY for all the FS except the first one + * that was suspended + */ + if (errno == EBUSY) { + continue; + } + error_setg_errno(errp, errno, "failed to freeze %s", + mount->dirname); + goto error; + } + ufssuspend_cnt++; + } + return ufssuspend_cnt; +error: + close(ufssuspend_fd); + ufssuspend_fd = -1; + return -1; + +} + +/* + * We don't need to call UFSRESUME ioctl because all the frozen FS + * are thawed on /dev/ufssuspend closing. + */ +int qmp_guest_fsfreeze_do_thaw(Error **errp) +{ + int ret = ufssuspend_cnt; + ufssuspend_cnt = 0; + if (ufssuspend_fd != -1) { + close(ufssuspend_fd); + ufssuspend_fd = -1; + } + return ret; +} + +GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +GuestDiskInfoList *qmp_guest_get_disks(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} +#endif /* CONFIG_FSFREEZE */ + +#ifdef HAVE_GETIFADDRS +/* + * Fill "buf" with MAC address by ifaddrs. Pointer buf must point to a + * buffer with ETHER_ADDR_LEN length at least. + * + * Returns false in case of an error, otherwise true. "obtained" arguument + * is true if a MAC address was obtained successful, otherwise false. + */ +bool guest_get_hw_addr(struct ifaddrs *ifa, unsigned char *buf, + bool *obtained, Error **errp) +{ + struct sockaddr_dl *sdp; + + *obtained = false; + + if (ifa->ifa_addr->sa_family != AF_LINK) { + /* We can get HW address only for AF_LINK family. */ + g_debug("failed to get MAC address of %s", ifa->ifa_name); + return true; + } + + sdp = (struct sockaddr_dl *)ifa->ifa_addr; + memcpy(buf, sdp->sdl_data + sdp->sdl_nlen, ETHER_ADDR_LEN); + *obtained = true; + + return true; +} +#endif /* HAVE_GETIFADDRS */ diff --git a/qga/commands-common.h b/qga/commands-common.h index 90785ed4bb..8c1c56aac9 100644 --- a/qga/commands-common.h +++ b/qga/commands-common.h @@ -10,6 +10,57 @@ #define QGA_COMMANDS_COMMON_H #include "qga-qapi-types.h" +#include "guest-agent-core.h" +#include "qemu/queue.h" + +#if defined(__linux__) +#include +#ifdef FIFREEZE +#define CONFIG_FSFREEZE +#endif +#ifdef FITRIM +#define CONFIG_FSTRIM +#endif +#endif /* __linux__ */ + +#ifdef __FreeBSD__ +#include +#ifdef UFSSUSPEND +#define CONFIG_FSFREEZE +#endif +#endif /* __FreeBSD__ */ + +#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) +typedef struct FsMount { + char *dirname; + char *devtype; + unsigned int devmajor, devminor; +#if defined(__FreeBSD__) + dev_t dev; + fsid_t fsid; +#endif + QTAILQ_ENTRY(FsMount) next; +} FsMount; + +typedef QTAILQ_HEAD(FsMountList, FsMount) FsMountList; + +bool build_fs_mount_list(FsMountList *mounts, Error **errp); +void free_fs_mount_list(FsMountList *mounts); +#endif /* CONFIG_FSFREEZE || CONFIG_FSTRIM */ + +#if defined(CONFIG_FSFREEZE) +int64_t qmp_guest_fsfreeze_do_freeze_list(bool has_mountpoints, + strList *mountpoints, + FsMountList mounts, + Error **errp); +int qmp_guest_fsfreeze_do_thaw(Error **errp); +#endif /* CONFIG_FSFREEZE */ + +#ifdef HAVE_GETIFADDRS +#include +bool guest_get_hw_addr(struct ifaddrs *ifa, unsigned char *buf, + bool *obtained, Error **errp); +#endif typedef struct GuestFileHandle GuestFileHandle; @@ -18,4 +69,15 @@ GuestFileHandle *guest_file_handle_find(int64_t id, Error **errp); GuestFileRead *guest_file_read_unsafe(GuestFileHandle *gfh, int64_t count, Error **errp); +/** + * qga_get_host_name: + * @errp: Error object + * + * Operating system agnostic way of querying host name. + * Compared to g_get_host_name(), it doesn't cache the result. + * + * Returns allocated hostname (caller should free), NULL on failure. + */ +char *qga_get_host_name(Error **errp); + #endif diff --git a/qga/commands-linux.c b/qga/commands-linux.c new file mode 100644 index 0000000000..214e408fcd --- /dev/null +++ b/qga/commands-linux.c @@ -0,0 +1,286 @@ +/* + * QEMU Guest Agent Linux-specific command implementations + * + * Copyright IBM Corp. 2011 + * + * Authors: + * Michael Roth + * Michal Privoznik + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "commands-common.h" +#include "cutils.h" +#include +#include + +#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) +static int dev_major_minor(const char *devpath, + unsigned int *devmajor, unsigned int *devminor) +{ + struct stat st; + + *devmajor = 0; + *devminor = 0; + + if (stat(devpath, &st) < 0) { + slog("failed to stat device file '%s': %s", devpath, strerror(errno)); + return -1; + } + if (S_ISDIR(st.st_mode)) { + /* It is bind mount */ + return -2; + } + if (S_ISBLK(st.st_mode)) { + *devmajor = major(st.st_rdev); + *devminor = minor(st.st_rdev); + return 0; + } + return -1; +} + +static bool build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp) +{ + struct mntent *ment; + FsMount *mount; + char const *mtab = "/proc/self/mounts"; + FILE *fp; + unsigned int devmajor, devminor; + + fp = setmntent(mtab, "r"); + if (!fp) { + error_setg(errp, "failed to open mtab file: '%s'", mtab); + return false; + } + + while ((ment = getmntent(fp))) { + /* + * An entry which device name doesn't start with a '/' is + * either a dummy file system or a network file system. + * Add special handling for smbfs and cifs as is done by + * coreutils as well. + */ + if ((ment->mnt_fsname[0] != '/') || + (strcmp(ment->mnt_type, "smbfs") == 0) || + (strcmp(ment->mnt_type, "cifs") == 0)) { + continue; + } + if (dev_major_minor(ment->mnt_fsname, &devmajor, &devminor) == -2) { + /* Skip bind mounts */ + continue; + } + + mount = g_new0(FsMount, 1); + mount->dirname = g_strdup(ment->mnt_dir); + mount->devtype = g_strdup(ment->mnt_type); + mount->devmajor = devmajor; + mount->devminor = devminor; + + QTAILQ_INSERT_TAIL(mounts, mount, next); + } + + endmntent(fp); + return true; +} + +static void decode_mntname(char *name, int len) +{ + int i, j = 0; + for (i = 0; i <= len; i++) { + if (name[i] != '\\') { + name[j++] = name[i]; + } else if (name[i + 1] == '\\') { + name[j++] = '\\'; + i++; + } else if (name[i + 1] >= '0' && name[i + 1] <= '3' && + name[i + 2] >= '0' && name[i + 2] <= '7' && + name[i + 3] >= '0' && name[i + 3] <= '7') { + name[j++] = (name[i + 1] - '0') * 64 + + (name[i + 2] - '0') * 8 + + (name[i + 3] - '0'); + i += 3; + } else { + name[j++] = name[i]; + } + } +} + +/* + * Walk the mount table and build a list of local file systems + */ +bool build_fs_mount_list(FsMountList *mounts, Error **errp) +{ + FsMount *mount; + char const *mountinfo = "/proc/self/mountinfo"; + FILE *fp; + char *line = NULL, *dash; + size_t n; + char check; + unsigned int devmajor, devminor; + int ret, dir_s, dir_e, type_s, type_e, dev_s, dev_e; + + fp = fopen(mountinfo, "r"); + if (!fp) { + return build_fs_mount_list_from_mtab(mounts, errp); + } + + while (getline(&line, &n, fp) != -1) { + ret = sscanf(line, "%*u %*u %u:%u %*s %n%*s%n%c", + &devmajor, &devminor, &dir_s, &dir_e, &check); + if (ret < 3) { + continue; + } + dash = strstr(line + dir_e, " - "); + if (!dash) { + continue; + } + ret = sscanf(dash, " - %n%*s%n %n%*s%n%c", + &type_s, &type_e, &dev_s, &dev_e, &check); + if (ret < 1) { + continue; + } + line[dir_e] = 0; + dash[type_e] = 0; + dash[dev_e] = 0; + decode_mntname(line + dir_s, dir_e - dir_s); + decode_mntname(dash + dev_s, dev_e - dev_s); + if (devmajor == 0) { + /* btrfs reports major number = 0 */ + if (strcmp("btrfs", dash + type_s) != 0 || + dev_major_minor(dash + dev_s, &devmajor, &devminor) < 0) { + continue; + } + } + + mount = g_new0(FsMount, 1); + mount->dirname = g_strdup(line + dir_s); + mount->devtype = g_strdup(dash + type_s); + mount->devmajor = devmajor; + mount->devminor = devminor; + + QTAILQ_INSERT_TAIL(mounts, mount, next); + } + free(line); + + fclose(fp); + return true; +} +#endif /* CONFIG_FSFREEZE || CONFIG_FSTRIM */ + +#ifdef CONFIG_FSFREEZE +/* + * Walk list of mounted file systems in the guest, and freeze the ones which + * are real local file systems. + */ +int64_t qmp_guest_fsfreeze_do_freeze_list(bool has_mountpoints, + strList *mountpoints, + FsMountList mounts, + Error **errp) +{ + struct FsMount *mount; + strList *list; + int fd, ret, i = 0; + + QTAILQ_FOREACH_REVERSE(mount, &mounts, next) { + /* To issue fsfreeze in the reverse order of mounts, check if the + * mount is listed in the list here */ + if (has_mountpoints) { + for (list = mountpoints; list; list = list->next) { + if (strcmp(list->value, mount->dirname) == 0) { + break; + } + } + if (!list) { + continue; + } + } + + fd = qga_open_cloexec(mount->dirname, O_RDONLY, 0); + if (fd == -1) { + error_setg_errno(errp, errno, "failed to open %s", mount->dirname); + return -1; + } + + /* we try to cull filesystems we know won't work in advance, but other + * filesystems may not implement fsfreeze for less obvious reasons. + * these will report EOPNOTSUPP. we simply ignore these when tallying + * the number of frozen filesystems. + * if a filesystem is mounted more than once (aka bind mount) a + * consecutive attempt to freeze an already frozen filesystem will + * return EBUSY. + * + * any other error means a failure to freeze a filesystem we + * expect to be freezable, so return an error in those cases + * and return system to thawed state. + */ + ret = ioctl(fd, FIFREEZE); + if (ret == -1) { + if (errno != EOPNOTSUPP && errno != EBUSY) { + error_setg_errno(errp, errno, "failed to freeze %s", + mount->dirname); + close(fd); + return -1; + } + } else { + i++; + } + close(fd); + } + return i; +} + +int qmp_guest_fsfreeze_do_thaw(Error **errp) +{ + int ret; + FsMountList mounts; + FsMount *mount; + int fd, i = 0, logged; + Error *local_err = NULL; + + QTAILQ_INIT(&mounts); + if (!build_fs_mount_list(&mounts, &local_err)) { + error_propagate(errp, local_err); + return -1; + } + + QTAILQ_FOREACH(mount, &mounts, next) { + logged = false; + fd = qga_open_cloexec(mount->dirname, O_RDONLY, 0); + if (fd == -1) { + continue; + } + /* we have no way of knowing whether a filesystem was actually unfrozen + * as a result of a successful call to FITHAW, only that if an error + * was returned the filesystem was *not* unfrozen by that particular + * call. + * + * since multiple preceding FIFREEZEs require multiple calls to FITHAW + * to unfreeze, continuing issuing FITHAW until an error is returned, + * in which case either the filesystem is in an unfreezable state, or, + * more likely, it was thawed previously (and remains so afterward). + * + * also, since the most recent successful call is the one that did + * the actual unfreeze, we can use this to provide an accurate count + * of the number of filesystems unfrozen by guest-fsfreeze-thaw, which + * may * be useful for determining whether a filesystem was unfrozen + * during the freeze/thaw phase by a process other than qemu-ga. + */ + do { + ret = ioctl(fd, FITHAW); + if (ret == 0 && !logged) { + i++; + logged = true; + } + } while (ret == 0); + close(fd); + } + + free_fs_mount_list(&mounts); + + return i; +} +#endif /* CONFIG_FSFREEZE */ diff --git a/qga/commands-posix-ssh.c b/qga/commands-posix-ssh.c index 2dda136d64..f3a580b8cc 100644 --- a/qga/commands-posix-ssh.c +++ b/qga/commands-posix-ssh.c @@ -45,8 +45,6 @@ get_passwd_entry(const char *username, Error **errp) g_autoptr(GError) err = NULL; struct passwd *p; - ERRP_GUARD(); - p = g_unix_get_passwd_entry_qemu(username, &err); if (p == NULL) { error_setg(errp, "failed to lookup user '%s': %s", @@ -61,8 +59,6 @@ static bool mkdir_for_user(const char *path, const struct passwd *p, mode_t mode, Error **errp) { - ERRP_GUARD(); - if (g_mkdir(path, mode) == -1) { error_setg(errp, "failed to create directory '%s': %s", path, g_strerror(errno)); @@ -87,8 +83,6 @@ mkdir_for_user(const char *path, const struct passwd *p, static bool check_openssh_pub_key(const char *key, Error **errp) { - ERRP_GUARD(); - /* simple sanity-check, we may want more? */ if (!key || key[0] == '#' || strchr(key, '\n')) { error_setg(errp, "invalid OpenSSH public key: '%s'", key); @@ -104,8 +98,6 @@ check_openssh_pub_keys(strList *keys, size_t *nkeys, Error **errp) size_t n = 0; strList *k; - ERRP_GUARD(); - for (k = keys; k != NULL; k = k->next) { if (!check_openssh_pub_key(k->value, errp)) { return false; @@ -126,8 +118,6 @@ write_authkeys(const char *path, const GStrv keys, g_autofree char *contents = NULL; g_autoptr(GError) err = NULL; - ERRP_GUARD(); - contents = g_strjoinv("\n", keys); if (!g_file_set_contents(path, contents, -1, &err)) { error_setg(errp, "failed to write to '%s': %s", path, err->message); @@ -155,8 +145,6 @@ read_authkeys(const char *path, Error **errp) g_autoptr(GError) err = NULL; g_autofree char *contents = NULL; - ERRP_GUARD(); - if (!g_file_get_contents(path, &contents, NULL, &err)) { error_setg(errp, "failed to read '%s': %s", path, err->message); return NULL; @@ -178,7 +166,6 @@ qmp_guest_ssh_add_authorized_keys(const char *username, strList *keys, strList *k; size_t nkeys, nauthkeys; - ERRP_GUARD(); reset = has_reset && reset; if (!check_openssh_pub_keys(keys, &nkeys, errp)) { @@ -228,8 +215,6 @@ qmp_guest_ssh_remove_authorized_keys(const char *username, strList *keys, GStrv a; size_t nkeys = 0; - ERRP_GUARD(); - if (!check_openssh_pub_keys(keys, NULL, errp)) { return; } @@ -277,8 +262,6 @@ qmp_guest_ssh_get_authorized_keys(const char *username, Error **errp) g_autoptr(GuestAuthorizedKeys) ret = NULL; int i; - ERRP_GUARD(); - p = get_passwd_entry(username, errp); if (p == NULL) { return NULL; diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 75dbaab68e..182eba4a38 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -16,49 +16,39 @@ #include #include #include -#include "qemu-common.h" -#include "guest-agent-core.h" #include "qga-qapi-commands.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" -#include "qemu/queue.h" #include "qemu/host-utils.h" #include "qemu/sockets.h" #include "qemu/base64.h" #include "qemu/cutils.h" #include "commands-common.h" +#include "block/nvme.h" +#include "cutils.h" #ifdef HAVE_UTMPX #include #endif -#ifndef CONFIG_HAS_ENVIRON -#ifdef __APPLE__ -#include -#define environ (*_NSGetEnviron()) -#else -extern char **environ; -#endif -#endif - #if defined(__linux__) #include -#include -#include -#include -#include -#include #include +#include #ifdef CONFIG_LIBUDEV #include #endif - -#ifdef FIFREEZE -#define CONFIG_FSFREEZE #endif -#ifdef FITRIM -#define CONFIG_FSTRIM + +#ifdef HAVE_GETIFADDRS +#include +#include +#include +#include +#include +#ifdef CONFIG_SOLARIS +#include #endif #endif @@ -88,13 +78,27 @@ void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) pid_t pid; int status; +#ifdef CONFIG_SOLARIS + const char *powerdown_flag = "-i5"; + const char *halt_flag = "-i0"; + const char *reboot_flag = "-i6"; +#elif defined(CONFIG_BSD) + const char *powerdown_flag = "-p"; + const char *halt_flag = "-h"; + const char *reboot_flag = "-r"; +#else + const char *powerdown_flag = "-P"; + const char *halt_flag = "-H"; + const char *reboot_flag = "-r"; +#endif + slog("guest-shutdown called, mode: %s", mode); if (!has_mode || strcmp(mode, "powerdown") == 0) { - shutdown_flag = "-P"; + shutdown_flag = powerdown_flag; } else if (strcmp(mode, "halt") == 0) { - shutdown_flag = "-H"; + shutdown_flag = halt_flag; } else if (strcmp(mode, "reboot") == 0) { - shutdown_flag = "-r"; + shutdown_flag = reboot_flag; } else { error_setg(errp, "mode is invalid (valid values are: halt|powerdown|reboot"); @@ -109,8 +113,16 @@ void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) reopen_fd_to_null(1); reopen_fd_to_null(2); - execle("/sbin/shutdown", "shutdown", "-h", shutdown_flag, "+0", - "hypervisor initiated shutdown", (char *)NULL, environ); +#ifdef CONFIG_SOLARIS + execl("/sbin/shutdown", "shutdown", shutdown_flag, "-g0", "-y", + "hypervisor initiated shutdown", (char *)NULL); +#elif defined(CONFIG_BSD) + execl("/sbin/shutdown", "shutdown", shutdown_flag, "+0", + "hypervisor initiated shutdown", (char *)NULL); +#else + execl("/sbin/shutdown", "shutdown", "-h", shutdown_flag, "+0", + "hypervisor initiated shutdown", (char *)NULL); +#endif _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); @@ -136,20 +148,6 @@ void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) /* succeeded */ } -int64_t qmp_guest_get_time(Error **errp) -{ - int ret; - qemu_timeval tq; - - ret = qemu_gettimeofday(&tq); - if (ret < 0) { - error_setg_errno(errp, errno, "Failed to get time"); - return -1; - } - - return tq.tv_sec * 1000000000LL + tq.tv_usec * 1000; -} - void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) { int ret; @@ -207,8 +205,7 @@ void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) /* Use '/sbin/hwclock -w' to set RTC from the system time, * or '/sbin/hwclock -s' to set the system time from RTC. */ - execle(hwclock_path, "hwclock", has_time ? "-w" : "-s", - NULL, environ); + execl(hwclock_path, "hwclock", has_time ? "-w" : "-s", NULL); _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); @@ -340,73 +337,71 @@ find_open_flag(const char *mode_str, Error **errp) static FILE * safe_open_or_create(const char *path, const char *mode, Error **errp) { - Error *local_err = NULL; int oflag; + int fd = -1; + FILE *f = NULL; - oflag = find_open_flag(mode, &local_err); - if (local_err == NULL) { - int fd; - - /* If the caller wants / allows creation of a new file, we implement it - * with a two step process: open() + (open() / fchmod()). - * - * First we insist on creating the file exclusively as a new file. If - * that succeeds, we're free to set any file-mode bits on it. (The - * motivation is that we want to set those file-mode bits independently - * of the current umask.) - * - * If the exclusive creation fails because the file already exists - * (EEXIST is not possible for any other reason), we just attempt to - * open the file, but in this case we won't be allowed to change the - * file-mode bits on the preexistent file. - * - * The pathname should never disappear between the two open()s in - * practice. If it happens, then someone very likely tried to race us. - * In this case just go ahead and report the ENOENT from the second - * open() to the caller. - * - * If the caller wants to open a preexistent file, then the first - * open() is decisive and its third argument is ignored, and the second - * open() and the fchmod() are never called. - */ - fd = open(path, oflag | ((oflag & O_CREAT) ? O_EXCL : 0), 0); - if (fd == -1 && errno == EEXIST) { - oflag &= ~(unsigned)O_CREAT; - fd = open(path, oflag); - } - - if (fd == -1) { - error_setg_errno(&local_err, errno, "failed to open file '%s' " - "(mode: '%s')", path, mode); - } else { - qemu_set_cloexec(fd); - - if ((oflag & O_CREAT) && fchmod(fd, DEFAULT_NEW_FILE_MODE) == -1) { - error_setg_errno(&local_err, errno, "failed to set permission " - "0%03o on new file '%s' (mode: '%s')", - (unsigned)DEFAULT_NEW_FILE_MODE, path, mode); - } else { - FILE *f; - - f = fdopen(fd, mode); - if (f == NULL) { - error_setg_errno(&local_err, errno, "failed to associate " - "stdio stream with file descriptor %d, " - "file '%s' (mode: '%s')", fd, path, mode); - } else { - return f; - } - } - - close(fd); - if (oflag & O_CREAT) { - unlink(path); - } - } + oflag = find_open_flag(mode, errp); + if (oflag < 0) { + goto end; } - error_propagate(errp, local_err); - return NULL; + /* If the caller wants / allows creation of a new file, we implement it + * with a two step process: open() + (open() / fchmod()). + * + * First we insist on creating the file exclusively as a new file. If + * that succeeds, we're free to set any file-mode bits on it. (The + * motivation is that we want to set those file-mode bits independently + * of the current umask.) + * + * If the exclusive creation fails because the file already exists + * (EEXIST is not possible for any other reason), we just attempt to + * open the file, but in this case we won't be allowed to change the + * file-mode bits on the preexistent file. + * + * The pathname should never disappear between the two open()s in + * practice. If it happens, then someone very likely tried to race us. + * In this case just go ahead and report the ENOENT from the second + * open() to the caller. + * + * If the caller wants to open a preexistent file, then the first + * open() is decisive and its third argument is ignored, and the second + * open() and the fchmod() are never called. + */ + fd = qga_open_cloexec(path, oflag | ((oflag & O_CREAT) ? O_EXCL : 0), 0); + if (fd == -1 && errno == EEXIST) { + oflag &= ~(unsigned)O_CREAT; + fd = qga_open_cloexec(path, oflag, 0); + } + if (fd == -1) { + error_setg_errno(errp, errno, + "failed to open file '%s' (mode: '%s')", + path, mode); + goto end; + } + + if ((oflag & O_CREAT) && fchmod(fd, DEFAULT_NEW_FILE_MODE) == -1) { + error_setg_errno(errp, errno, "failed to set permission " + "0%03o on new file '%s' (mode: '%s')", + (unsigned)DEFAULT_NEW_FILE_MODE, path, mode); + goto end; + } + + f = fdopen(fd, mode); + if (f == NULL) { + error_setg_errno(errp, errno, "failed to associate stdio stream with " + "file descriptor %d, file '%s' (mode: '%s')", + fd, path, mode); + } + +end: + if (f == NULL && fd != -1) { + close(fd); + if (oflag & O_CREAT) { + unlink(path); + } + } + return f; } int64_t qmp_guest_file_open(const char *path, bool has_mode, const char *mode, @@ -429,7 +424,11 @@ int64_t qmp_guest_file_open(const char *path, bool has_mode, const char *mode, /* set fd non-blocking to avoid common use cases (like reading from a * named pipe) from hanging the agent */ - qemu_set_nonblock(fileno(fh)); + if (!g_unix_set_fd_nonblocking(fileno(fh), true, NULL)) { + fclose(fh); + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return -1; + } handle = guest_file_handle_add(fh, errp); if (handle < 0) { @@ -615,20 +614,8 @@ void qmp_guest_file_flush(int64_t handle, Error **errp) } } -/* linux-specific implementations. avoid this if at all possible. */ -#if defined(__linux__) - #if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) -typedef struct FsMount { - char *dirname; - char *devtype; - unsigned int devmajor, devminor; - QTAILQ_ENTRY(FsMount) next; -} FsMount; - -typedef QTAILQ_HEAD(FsMountList, FsMount) FsMountList; - -static void free_fs_mount_list(FsMountList *mounts) +void free_fs_mount_list(FsMountList *mounts) { FsMount *mount, *temp; @@ -643,158 +630,158 @@ static void free_fs_mount_list(FsMountList *mounts) g_free(mount); } } +#endif -static int dev_major_minor(const char *devpath, - unsigned int *devmajor, unsigned int *devminor) +#if defined(CONFIG_FSFREEZE) +typedef enum { + FSFREEZE_HOOK_THAW = 0, + FSFREEZE_HOOK_FREEZE, +} FsfreezeHookArg; + +static const char *fsfreeze_hook_arg_string[] = { + "thaw", + "freeze", +}; + +static void execute_fsfreeze_hook(FsfreezeHookArg arg, Error **errp) { - struct stat st; + int status; + pid_t pid; + const char *hook; + const char *arg_str = fsfreeze_hook_arg_string[arg]; + Error *local_err = NULL; - *devmajor = 0; - *devminor = 0; + hook = ga_fsfreeze_hook(ga_state); + if (!hook) { + return; + } + if (access(hook, X_OK) != 0) { + error_setg_errno(errp, errno, "can't access fsfreeze hook '%s'", hook); + return; + } - if (stat(devpath, &st) < 0) { - slog("failed to stat device file '%s': %s", devpath, strerror(errno)); - return -1; + slog("executing fsfreeze hook with arg '%s'", arg_str); + pid = fork(); + if (pid == 0) { + setsid(); + reopen_fd_to_null(0); + reopen_fd_to_null(1); + reopen_fd_to_null(2); + + execl(hook, hook, arg_str, NULL); + _exit(EXIT_FAILURE); + } else if (pid < 0) { + error_setg_errno(errp, errno, "failed to create child process"); + return; } - if (S_ISDIR(st.st_mode)) { - /* It is bind mount */ - return -2; + + ga_wait_child(pid, &status, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; } - if (S_ISBLK(st.st_mode)) { - *devmajor = major(st.st_rdev); - *devminor = minor(st.st_rdev); - return 0; + + if (!WIFEXITED(status)) { + error_setg(errp, "fsfreeze hook has terminated abnormally"); + return; + } + + status = WEXITSTATUS(status); + if (status) { + error_setg(errp, "fsfreeze hook has failed with status %d", status); + return; } - return -1; } /* - * Walk the mount table and build a list of local file systems + * Return status of freeze/thaw */ -static void build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp) +GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp) { - struct mntent *ment; - FsMount *mount; - char const *mtab = "/proc/self/mounts"; - FILE *fp; - unsigned int devmajor, devminor; - - fp = setmntent(mtab, "r"); - if (!fp) { - error_setg(errp, "failed to open mtab file: '%s'", mtab); - return; + if (ga_is_frozen(ga_state)) { + return GUEST_FSFREEZE_STATUS_FROZEN; } - while ((ment = getmntent(fp))) { - /* - * An entry which device name doesn't start with a '/' is - * either a dummy file system or a network file system. - * Add special handling for smbfs and cifs as is done by - * coreutils as well. - */ - if ((ment->mnt_fsname[0] != '/') || - (strcmp(ment->mnt_type, "smbfs") == 0) || - (strcmp(ment->mnt_type, "cifs") == 0)) { - continue; - } - if (dev_major_minor(ment->mnt_fsname, &devmajor, &devminor) == -2) { - /* Skip bind mounts */ - continue; - } - - mount = g_new0(FsMount, 1); - mount->dirname = g_strdup(ment->mnt_dir); - mount->devtype = g_strdup(ment->mnt_type); - mount->devmajor = devmajor; - mount->devminor = devminor; - - QTAILQ_INSERT_TAIL(mounts, mount, next); - } - - endmntent(fp); + return GUEST_FSFREEZE_STATUS_THAWED; } -static void decode_mntname(char *name, int len) +int64_t qmp_guest_fsfreeze_freeze(Error **errp) { - int i, j = 0; - for (i = 0; i <= len; i++) { - if (name[i] != '\\') { - name[j++] = name[i]; - } else if (name[i + 1] == '\\') { - name[j++] = '\\'; - i++; - } else if (name[i + 1] >= '0' && name[i + 1] <= '3' && - name[i + 2] >= '0' && name[i + 2] <= '7' && - name[i + 3] >= '0' && name[i + 3] <= '7') { - name[j++] = (name[i + 1] - '0') * 64 + - (name[i + 2] - '0') * 8 + - (name[i + 3] - '0'); - i += 3; - } else { - name[j++] = name[i]; - } - } + return qmp_guest_fsfreeze_freeze_list(false, NULL, errp); } -static void build_fs_mount_list(FsMountList *mounts, Error **errp) +int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints, + strList *mountpoints, + Error **errp) { - FsMount *mount; - char const *mountinfo = "/proc/self/mountinfo"; - FILE *fp; - char *line = NULL, *dash; - size_t n; - char check; - unsigned int devmajor, devminor; - int ret, dir_s, dir_e, type_s, type_e, dev_s, dev_e; + int ret; + FsMountList mounts; + Error *local_err = NULL; - fp = fopen(mountinfo, "r"); - if (!fp) { - build_fs_mount_list_from_mtab(mounts, errp); - return; + slog("guest-fsfreeze called"); + + execute_fsfreeze_hook(FSFREEZE_HOOK_FREEZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; } - while (getline(&line, &n, fp) != -1) { - ret = sscanf(line, "%*u %*u %u:%u %*s %n%*s%n%c", - &devmajor, &devminor, &dir_s, &dir_e, &check); - if (ret < 3) { - continue; - } - dash = strstr(line + dir_e, " - "); - if (!dash) { - continue; - } - ret = sscanf(dash, " - %n%*s%n %n%*s%n%c", - &type_s, &type_e, &dev_s, &dev_e, &check); - if (ret < 1) { - continue; - } - line[dir_e] = 0; - dash[type_e] = 0; - dash[dev_e] = 0; - decode_mntname(line + dir_s, dir_e - dir_s); - decode_mntname(dash + dev_s, dev_e - dev_s); - if (devmajor == 0) { - /* btrfs reports major number = 0 */ - if (strcmp("btrfs", dash + type_s) != 0 || - dev_major_minor(dash + dev_s, &devmajor, &devminor) < 0) { - continue; - } - } - - mount = g_new0(FsMount, 1); - mount->dirname = g_strdup(line + dir_s); - mount->devtype = g_strdup(dash + type_s); - mount->devmajor = devmajor; - mount->devminor = devminor; - - QTAILQ_INSERT_TAIL(mounts, mount, next); + QTAILQ_INIT(&mounts); + if (!build_fs_mount_list(&mounts, &local_err)) { + error_propagate(errp, local_err); + return -1; } - free(line); - fclose(fp); + /* cannot risk guest agent blocking itself on a write in this state */ + ga_set_frozen(ga_state); + + ret = qmp_guest_fsfreeze_do_freeze_list(has_mountpoints, mountpoints, + mounts, errp); + + free_fs_mount_list(&mounts); + /* We may not issue any FIFREEZE here. + * Just unset ga_state here and ready for the next call. + */ + if (ret == 0) { + ga_unset_frozen(ga_state); + } else if (ret < 0) { + qmp_guest_fsfreeze_thaw(NULL); + } + return ret; +} + +int64_t qmp_guest_fsfreeze_thaw(Error **errp) +{ + int ret; + + ret = qmp_guest_fsfreeze_do_thaw(errp); + if (ret >= 0) { + ga_unset_frozen(ga_state); + execute_fsfreeze_hook(FSFREEZE_HOOK_THAW, errp); + } else { + ret = 0; + } + + return ret; +} + +static void guest_fsfreeze_cleanup(void) +{ + Error *err = NULL; + + if (ga_is_frozen(ga_state) == GUEST_FSFREEZE_STATUS_FROZEN) { + qmp_guest_fsfreeze_thaw(&err); + if (err) { + slog("failed to clean up frozen filesystems: %s", + error_get_pretty(err)); + error_free(err); + } + } } #endif +/* linux-specific implementations. avoid this if at all possible. */ +#if defined(__linux__) #if defined(CONFIG_FSFREEZE) static char *get_pci_driver(char const *syspath, int pathlen, Error **errp) @@ -889,7 +876,8 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, if (driver && (g_str_equal(driver, "ata_piix") || g_str_equal(driver, "sym53c8xx") || g_str_equal(driver, "virtio-pci") || - g_str_equal(driver, "ahci"))) { + g_str_equal(driver, "ahci") || + g_str_equal(driver, "nvme"))) { break; } @@ -984,6 +972,8 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, g_debug("no host for '%s' (driver '%s')", syspath, driver); goto cleanup; } + } else if (strcmp(driver, "nvme") == 0) { + disk->bus_type = GUEST_DISK_BUS_TYPE_NVME; } else { g_debug("unknown driver '%s' (sysfs path '%s')", driver, syspath); goto cleanup; @@ -1201,7 +1191,15 @@ static void build_guest_fsinfo_for_device(char const *devpath, syspath = realpath(devpath, NULL); if (!syspath) { - error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + if (errno != ENOENT) { + error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + return; + } + + /* ENOENT: This devpath may not exist because of container config */ + if (!fs->name) { + fs->name = g_path_get_basename(devpath); + } return; } @@ -1387,6 +1385,76 @@ static GuestDiskInfoList *get_disk_partitions( return ret; } +static void get_nvme_smart(GuestDiskInfo *disk) +{ + int fd; + GuestNVMeSmart *smart; + NvmeSmartLog log = {0}; + struct nvme_admin_cmd cmd = { + .opcode = NVME_ADM_CMD_GET_LOG_PAGE, + .nsid = NVME_NSID_BROADCAST, + .addr = (uintptr_t)&log, + .data_len = sizeof(log), + .cdw10 = NVME_LOG_SMART_INFO | (1 << 15) /* RAE bit */ + | (((sizeof(log) >> 2) - 1) << 16) + }; + + fd = qga_open_cloexec(disk->name, O_RDONLY, 0); + if (fd == -1) { + g_debug("Failed to open device: %s: %s", disk->name, g_strerror(errno)); + return; + } + + if (ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd)) { + g_debug("Failed to get smart: %s: %s", disk->name, g_strerror(errno)); + close(fd); + return; + } + + disk->has_smart = true; + disk->smart = g_new0(GuestDiskSmart, 1); + disk->smart->type = GUEST_DISK_BUS_TYPE_NVME; + + smart = &disk->smart->u.nvme; + smart->critical_warning = log.critical_warning; + smart->temperature = lduw_le_p(&log.temperature); /* unaligned field */ + smart->available_spare = log.available_spare; + smart->available_spare_threshold = log.available_spare_threshold; + smart->percentage_used = log.percentage_used; + smart->data_units_read_lo = le64_to_cpu(log.data_units_read[0]); + smart->data_units_read_hi = le64_to_cpu(log.data_units_read[1]); + smart->data_units_written_lo = le64_to_cpu(log.data_units_written[0]); + smart->data_units_written_hi = le64_to_cpu(log.data_units_written[1]); + smart->host_read_commands_lo = le64_to_cpu(log.host_read_commands[0]); + smart->host_read_commands_hi = le64_to_cpu(log.host_read_commands[1]); + smart->host_write_commands_lo = le64_to_cpu(log.host_write_commands[0]); + smart->host_write_commands_hi = le64_to_cpu(log.host_write_commands[1]); + smart->controller_busy_time_lo = le64_to_cpu(log.controller_busy_time[0]); + smart->controller_busy_time_hi = le64_to_cpu(log.controller_busy_time[1]); + smart->power_cycles_lo = le64_to_cpu(log.power_cycles[0]); + smart->power_cycles_hi = le64_to_cpu(log.power_cycles[1]); + smart->power_on_hours_lo = le64_to_cpu(log.power_on_hours[0]); + smart->power_on_hours_hi = le64_to_cpu(log.power_on_hours[1]); + smart->unsafe_shutdowns_lo = le64_to_cpu(log.unsafe_shutdowns[0]); + smart->unsafe_shutdowns_hi = le64_to_cpu(log.unsafe_shutdowns[1]); + smart->media_errors_lo = le64_to_cpu(log.media_errors[0]); + smart->media_errors_hi = le64_to_cpu(log.media_errors[1]); + smart->number_of_error_log_entries_lo = + le64_to_cpu(log.number_of_error_log_entries[0]); + smart->number_of_error_log_entries_hi = + le64_to_cpu(log.number_of_error_log_entries[1]); + + close(fd); +} + +static void get_disk_smart(GuestDiskInfo *disk) +{ + if (disk->has_address + && (disk->address->bus_type == GUEST_DISK_BUS_TYPE_NVME)) { + get_nvme_smart(disk); + } +} + GuestDiskInfoList *qmp_guest_get_disks(Error **errp) { GuestDiskInfoList *ret = NULL; @@ -1460,6 +1528,7 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) } get_disk_deps(disk_dir, disk); + get_disk_smart(disk); ret = get_disk_partitions(ret, de->d_name, disk_dir, dev_name); } @@ -1516,8 +1585,7 @@ GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) Error *local_err = NULL; QTAILQ_INIT(&mounts); - build_fs_mount_list(&mounts, &local_err); - if (local_err) { + if (!build_fs_mount_list(&mounts, &local_err)) { error_propagate(errp, local_err); return NULL; } @@ -1537,250 +1605,6 @@ GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) free_fs_mount_list(&mounts); return ret; } - - -typedef enum { - FSFREEZE_HOOK_THAW = 0, - FSFREEZE_HOOK_FREEZE, -} FsfreezeHookArg; - -static const char *fsfreeze_hook_arg_string[] = { - "thaw", - "freeze", -}; - -static void execute_fsfreeze_hook(FsfreezeHookArg arg, Error **errp) -{ - int status; - pid_t pid; - const char *hook; - const char *arg_str = fsfreeze_hook_arg_string[arg]; - Error *local_err = NULL; - - hook = ga_fsfreeze_hook(ga_state); - if (!hook) { - return; - } - if (access(hook, X_OK) != 0) { - error_setg_errno(errp, errno, "can't access fsfreeze hook '%s'", hook); - return; - } - - slog("executing fsfreeze hook with arg '%s'", arg_str); - pid = fork(); - if (pid == 0) { - setsid(); - reopen_fd_to_null(0); - reopen_fd_to_null(1); - reopen_fd_to_null(2); - - execle(hook, hook, arg_str, NULL, environ); - _exit(EXIT_FAILURE); - } else if (pid < 0) { - error_setg_errno(errp, errno, "failed to create child process"); - return; - } - - ga_wait_child(pid, &status, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - if (!WIFEXITED(status)) { - error_setg(errp, "fsfreeze hook has terminated abnormally"); - return; - } - - status = WEXITSTATUS(status); - if (status) { - error_setg(errp, "fsfreeze hook has failed with status %d", status); - return; - } -} - -/* - * Return status of freeze/thaw - */ -GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp) -{ - if (ga_is_frozen(ga_state)) { - return GUEST_FSFREEZE_STATUS_FROZEN; - } - - return GUEST_FSFREEZE_STATUS_THAWED; -} - -int64_t qmp_guest_fsfreeze_freeze(Error **errp) -{ - return qmp_guest_fsfreeze_freeze_list(false, NULL, errp); -} - -/* - * Walk list of mounted file systems in the guest, and freeze the ones which - * are real local file systems. - */ -int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints, - strList *mountpoints, - Error **errp) -{ - int ret = 0, i = 0; - strList *list; - FsMountList mounts; - struct FsMount *mount; - Error *local_err = NULL; - int fd; - - slog("guest-fsfreeze called"); - - execute_fsfreeze_hook(FSFREEZE_HOOK_FREEZE, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - QTAILQ_INIT(&mounts); - build_fs_mount_list(&mounts, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - /* cannot risk guest agent blocking itself on a write in this state */ - ga_set_frozen(ga_state); - - QTAILQ_FOREACH_REVERSE(mount, &mounts, next) { - /* To issue fsfreeze in the reverse order of mounts, check if the - * mount is listed in the list here */ - if (has_mountpoints) { - for (list = mountpoints; list; list = list->next) { - if (strcmp(list->value, mount->dirname) == 0) { - break; - } - } - if (!list) { - continue; - } - } - - fd = qemu_open_old(mount->dirname, O_RDONLY); - if (fd == -1) { - error_setg_errno(errp, errno, "failed to open %s", mount->dirname); - goto error; - } - - /* we try to cull filesystems we know won't work in advance, but other - * filesystems may not implement fsfreeze for less obvious reasons. - * these will report EOPNOTSUPP. we simply ignore these when tallying - * the number of frozen filesystems. - * if a filesystem is mounted more than once (aka bind mount) a - * consecutive attempt to freeze an already frozen filesystem will - * return EBUSY. - * - * any other error means a failure to freeze a filesystem we - * expect to be freezable, so return an error in those cases - * and return system to thawed state. - */ - ret = ioctl(fd, FIFREEZE); - if (ret == -1) { - if (errno != EOPNOTSUPP && errno != EBUSY) { - error_setg_errno(errp, errno, "failed to freeze %s", - mount->dirname); - close(fd); - goto error; - } - } else { - i++; - } - close(fd); - } - - free_fs_mount_list(&mounts); - /* We may not issue any FIFREEZE here. - * Just unset ga_state here and ready for the next call. - */ - if (i == 0) { - ga_unset_frozen(ga_state); - } - return i; - -error: - free_fs_mount_list(&mounts); - qmp_guest_fsfreeze_thaw(NULL); - return 0; -} - -/* - * Walk list of frozen file systems in the guest, and thaw them. - */ -int64_t qmp_guest_fsfreeze_thaw(Error **errp) -{ - int ret; - FsMountList mounts; - FsMount *mount; - int fd, i = 0, logged; - Error *local_err = NULL; - - QTAILQ_INIT(&mounts); - build_fs_mount_list(&mounts, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return 0; - } - - QTAILQ_FOREACH(mount, &mounts, next) { - logged = false; - fd = qemu_open_old(mount->dirname, O_RDONLY); - if (fd == -1) { - continue; - } - /* we have no way of knowing whether a filesystem was actually unfrozen - * as a result of a successful call to FITHAW, only that if an error - * was returned the filesystem was *not* unfrozen by that particular - * call. - * - * since multiple preceding FIFREEZEs require multiple calls to FITHAW - * to unfreeze, continuing issuing FITHAW until an error is returned, - * in which case either the filesystem is in an unfreezable state, or, - * more likely, it was thawed previously (and remains so afterward). - * - * also, since the most recent successful call is the one that did - * the actual unfreeze, we can use this to provide an accurate count - * of the number of filesystems unfrozen by guest-fsfreeze-thaw, which - * may * be useful for determining whether a filesystem was unfrozen - * during the freeze/thaw phase by a process other than qemu-ga. - */ - do { - ret = ioctl(fd, FITHAW); - if (ret == 0 && !logged) { - i++; - logged = true; - } - } while (ret == 0); - close(fd); - } - - ga_unset_frozen(ga_state); - free_fs_mount_list(&mounts); - - execute_fsfreeze_hook(FSFREEZE_HOOK_THAW, errp); - - return i; -} - -static void guest_fsfreeze_cleanup(void) -{ - Error *err = NULL; - - if (ga_is_frozen(ga_state) == GUEST_FSFREEZE_STATUS_FROZEN) { - qmp_guest_fsfreeze_thaw(&err); - if (err) { - slog("failed to clean up frozen filesystems: %s", - error_get_pretty(err)); - error_free(err); - } - } -} #endif /* CONFIG_FSFREEZE */ #if defined(CONFIG_FSTRIM) @@ -1796,15 +1620,12 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) FsMountList mounts; struct FsMount *mount; int fd; - Error *local_err = NULL; struct fstrim_range r; slog("guest-fstrim called"); QTAILQ_INIT(&mounts); - build_fs_mount_list(&mounts, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!build_fs_mount_list(&mounts, errp)) { return NULL; } @@ -1816,7 +1637,7 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) QAPI_LIST_PREPEND(response->paths, result); - fd = qemu_open_old(mount->dirname, O_RDONLY); + fd = qga_open_cloexec(mount->dirname, O_RDONLY, 0); if (fd == -1) { result->error = g_strdup_printf("failed to open: %s", strerror(errno)); @@ -1888,7 +1709,7 @@ static int run_process_child(const char *command[], Error **errp) spawn_flag = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL | G_SPAWN_STDERR_TO_DEV_NULL; - success = g_spawn_sync(NULL, (char **)command, environ, spawn_flag, + success = g_spawn_sync(NULL, (char **)command, NULL, spawn_flag, NULL, NULL, NULL, NULL, &exit_status, &g_err); @@ -2104,10 +1925,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (systemd_supports_mode(mode, &local_err)) { mode_supported = true; systemd_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); @@ -2116,10 +1937,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (pmutils_supports_mode(mode, &local_err)) { mode_supported = true; pmutils_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); @@ -2154,223 +1975,6 @@ void qmp_guest_suspend_hybrid(Error **errp) guest_suspend(SUSPEND_MODE_HYBRID, errp); } -static GuestNetworkInterface * -guest_find_interface(GuestNetworkInterfaceList *head, - const char *name) -{ - for (; head; head = head->next) { - if (strcmp(head->value->name, name) == 0) { - return head->value; - } - } - - return NULL; -} - -static int guest_get_network_stats(const char *name, - GuestNetworkInterfaceStat *stats) -{ - int name_len; - char const *devinfo = "/proc/net/dev"; - FILE *fp; - char *line = NULL, *colon; - size_t n = 0; - fp = fopen(devinfo, "r"); - if (!fp) { - return -1; - } - name_len = strlen(name); - while (getline(&line, &n, fp) != -1) { - long long dummy; - long long rx_bytes; - long long rx_packets; - long long rx_errs; - long long rx_dropped; - long long tx_bytes; - long long tx_packets; - long long tx_errs; - long long tx_dropped; - char *trim_line; - trim_line = g_strchug(line); - if (trim_line[0] == '\0') { - continue; - } - colon = strchr(trim_line, ':'); - if (!colon) { - continue; - } - if (colon - name_len == trim_line && - strncmp(trim_line, name, name_len) == 0) { - if (sscanf(colon + 1, - "%lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld", - &rx_bytes, &rx_packets, &rx_errs, &rx_dropped, - &dummy, &dummy, &dummy, &dummy, - &tx_bytes, &tx_packets, &tx_errs, &tx_dropped, - &dummy, &dummy, &dummy, &dummy) != 16) { - continue; - } - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_packets; - stats->rx_errs = rx_errs; - stats->rx_dropped = rx_dropped; - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_packets; - stats->tx_errs = tx_errs; - stats->tx_dropped = tx_dropped; - fclose(fp); - g_free(line); - return 0; - } - } - fclose(fp); - g_free(line); - g_debug("/proc/net/dev: Interface '%s' not found", name); - return -1; -} - -/* - * Build information about guest interfaces - */ -GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) -{ - GuestNetworkInterfaceList *head = NULL, **tail = &head; - struct ifaddrs *ifap, *ifa; - - if (getifaddrs(&ifap) < 0) { - error_setg_errno(errp, errno, "getifaddrs failed"); - goto error; - } - - for (ifa = ifap; ifa; ifa = ifa->ifa_next) { - GuestNetworkInterface *info; - GuestIpAddressList **address_tail; - GuestIpAddress *address_item = NULL; - GuestNetworkInterfaceStat *interface_stat = NULL; - char addr4[INET_ADDRSTRLEN]; - char addr6[INET6_ADDRSTRLEN]; - int sock; - struct ifreq ifr; - unsigned char *mac_addr; - void *p; - - g_debug("Processing %s interface", ifa->ifa_name); - - info = guest_find_interface(head, ifa->ifa_name); - - if (!info) { - info = g_malloc0(sizeof(*info)); - info->name = g_strdup(ifa->ifa_name); - - QAPI_LIST_APPEND(tail, info); - } - - if (!info->has_hardware_address && ifa->ifa_flags & SIOCGIFHWADDR) { - /* we haven't obtained HW address yet */ - sock = socket(PF_INET, SOCK_STREAM, 0); - if (sock == -1) { - error_setg_errno(errp, errno, "failed to create socket"); - goto error; - } - - memset(&ifr, 0, sizeof(ifr)); - pstrcpy(ifr.ifr_name, IF_NAMESIZE, info->name); - if (ioctl(sock, SIOCGIFHWADDR, &ifr) == -1) { - error_setg_errno(errp, errno, - "failed to get MAC address of %s", - ifa->ifa_name); - close(sock); - goto error; - } - - close(sock); - mac_addr = (unsigned char *) &ifr.ifr_hwaddr.sa_data; - - info->hardware_address = - g_strdup_printf("%02x:%02x:%02x:%02x:%02x:%02x", - (int) mac_addr[0], (int) mac_addr[1], - (int) mac_addr[2], (int) mac_addr[3], - (int) mac_addr[4], (int) mac_addr[5]); - - info->has_hardware_address = true; - } - - if (ifa->ifa_addr && - ifa->ifa_addr->sa_family == AF_INET) { - /* interface with IPv4 address */ - p = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; - if (!inet_ntop(AF_INET, p, addr4, sizeof(addr4))) { - error_setg_errno(errp, errno, "inet_ntop failed"); - goto error; - } - - address_item = g_malloc0(sizeof(*address_item)); - address_item->ip_address = g_strdup(addr4); - address_item->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV4; - - if (ifa->ifa_netmask) { - /* Count the number of set bits in netmask. - * This is safe as '1' and '0' cannot be shuffled in netmask. */ - p = &((struct sockaddr_in *)ifa->ifa_netmask)->sin_addr; - address_item->prefix = ctpop32(((uint32_t *) p)[0]); - } - } else if (ifa->ifa_addr && - ifa->ifa_addr->sa_family == AF_INET6) { - /* interface with IPv6 address */ - p = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; - if (!inet_ntop(AF_INET6, p, addr6, sizeof(addr6))) { - error_setg_errno(errp, errno, "inet_ntop failed"); - goto error; - } - - address_item = g_malloc0(sizeof(*address_item)); - address_item->ip_address = g_strdup(addr6); - address_item->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV6; - - if (ifa->ifa_netmask) { - /* Count the number of set bits in netmask. - * This is safe as '1' and '0' cannot be shuffled in netmask. */ - p = &((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_addr; - address_item->prefix = - ctpop32(((uint32_t *) p)[0]) + - ctpop32(((uint32_t *) p)[1]) + - ctpop32(((uint32_t *) p)[2]) + - ctpop32(((uint32_t *) p)[3]); - } - } - - if (!address_item) { - continue; - } - - address_tail = &info->ip_addresses; - while (*address_tail) { - address_tail = &(*address_tail)->next; - } - QAPI_LIST_APPEND(address_tail, address_item); - - info->has_ip_addresses = true; - - if (!info->has_statistics) { - interface_stat = g_malloc0(sizeof(*interface_stat)); - if (guest_get_network_stats(info->name, interface_stat) == -1) { - info->has_statistics = false; - g_free(interface_stat); - } else { - info->statistics = interface_stat; - info->has_statistics = true; - } - } - } - - freeifaddrs(ifap); - return head; - -error: - freeifaddrs(ifap); - qapi_free_GuestNetworkInterfaceList(head); - return NULL; -} - /* Transfer online/offline status between @vcpu and the guest system. * * On input either @errp or *@errp must be NULL. @@ -2510,7 +2114,9 @@ int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp) return processed; } +#endif /* __linux__ */ +#if defined(__linux__) || defined(__FreeBSD__) void qmp_guest_set_user_password(const char *username, const char *password, bool crypted, @@ -2544,17 +2150,22 @@ void qmp_guest_set_user_password(const char *username, goto out; } +#ifdef __FreeBSD__ + chpasswddata = g_strdup(rawpasswddata); + passwd_path = g_find_program_in_path("pw"); +#else chpasswddata = g_strdup_printf("%s:%s\n", username, rawpasswddata); - chpasswdlen = strlen(chpasswddata); - passwd_path = g_find_program_in_path("chpasswd"); +#endif + + chpasswdlen = strlen(chpasswddata); if (!passwd_path) { error_setg(errp, "cannot find 'passwd' program in PATH"); goto out; } - if (pipe(datafd) < 0) { + if (!g_unix_open_pipe(datafd, FD_CLOEXEC, NULL)) { error_setg(errp, "cannot create pipe FDs"); goto out; } @@ -2568,11 +2179,17 @@ void qmp_guest_set_user_password(const char *username, reopen_fd_to_null(1); reopen_fd_to_null(2); +#ifdef __FreeBSD__ + const char *h_arg; + h_arg = (crypted) ? "-H" : "-h"; + execl(passwd_path, "pw", "usermod", "-n", username, h_arg, "0", NULL); +#else if (crypted) { - execle(passwd_path, "chpasswd", "-e", NULL, environ); + execl(passwd_path, "chpasswd", "-e", NULL); } else { - execle(passwd_path, "chpasswd", NULL, environ); + execl(passwd_path, "chpasswd", NULL); } +#endif _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); @@ -2615,7 +2232,17 @@ out: close(datafd[1]); } } +#else /* __linux__ || __FreeBSD__ */ +void qmp_guest_set_user_password(const char *username, + const char *password, + bool crypted, + Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); +} +#endif /* __linux__ || __FreeBSD__ */ +#ifdef __linux__ static void ga_read_sysfs_file(int dirfd, const char *pathname, char *buf, int size, Error **errp) { @@ -2923,6 +2550,206 @@ GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) return info; } +#define MAX_NAME_LEN 128 +static GuestDiskStatsInfoList *guest_get_diskstats(Error **errp) +{ +#ifdef CONFIG_LINUX + GuestDiskStatsInfoList *head = NULL, **tail = &head; + const char *diskstats = "/proc/diskstats"; + FILE *fp; + size_t n; + char *line = NULL; + + fp = fopen(diskstats, "r"); + if (fp == NULL) { + error_setg_errno(errp, errno, "open(\"%s\")", diskstats); + return NULL; + } + + while (getline(&line, &n, fp) != -1) { + g_autofree GuestDiskStatsInfo *diskstatinfo = NULL; + g_autofree GuestDiskStats *diskstat = NULL; + char dev_name[MAX_NAME_LEN]; + unsigned int ios_pgr, tot_ticks, rq_ticks, wr_ticks, dc_ticks, fl_ticks; + unsigned long rd_ios, rd_merges_or_rd_sec, rd_ticks_or_wr_sec, wr_ios; + unsigned long wr_merges, rd_sec_or_wr_ios, wr_sec; + unsigned long dc_ios, dc_merges, dc_sec, fl_ios; + unsigned int major, minor; + int i; + + i = sscanf(line, "%u %u %s %lu %lu %lu" + "%lu %lu %lu %lu %u %u %u %u" + "%lu %lu %lu %u %lu %u", + &major, &minor, dev_name, + &rd_ios, &rd_merges_or_rd_sec, &rd_sec_or_wr_ios, + &rd_ticks_or_wr_sec, &wr_ios, &wr_merges, &wr_sec, + &wr_ticks, &ios_pgr, &tot_ticks, &rq_ticks, + &dc_ios, &dc_merges, &dc_sec, &dc_ticks, + &fl_ios, &fl_ticks); + + if (i < 7) { + continue; + } + + diskstatinfo = g_new0(GuestDiskStatsInfo, 1); + diskstatinfo->name = g_strdup(dev_name); + diskstatinfo->major = major; + diskstatinfo->minor = minor; + + diskstat = g_new0(GuestDiskStats, 1); + if (i == 7) { + diskstat->has_read_ios = true; + diskstat->read_ios = rd_ios; + diskstat->has_read_sectors = true; + diskstat->read_sectors = rd_merges_or_rd_sec; + diskstat->has_write_ios = true; + diskstat->write_ios = rd_sec_or_wr_ios; + diskstat->has_write_sectors = true; + diskstat->write_sectors = rd_ticks_or_wr_sec; + } + if (i >= 14) { + diskstat->has_read_ios = true; + diskstat->read_ios = rd_ios; + diskstat->has_read_sectors = true; + diskstat->read_sectors = rd_sec_or_wr_ios; + diskstat->has_read_merges = true; + diskstat->read_merges = rd_merges_or_rd_sec; + diskstat->has_read_ticks = true; + diskstat->read_ticks = rd_ticks_or_wr_sec; + diskstat->has_write_ios = true; + diskstat->write_ios = wr_ios; + diskstat->has_write_sectors = true; + diskstat->write_sectors = wr_sec; + diskstat->has_write_merges = true; + diskstat->write_merges = wr_merges; + diskstat->has_write_ticks = true; + diskstat->write_ticks = wr_ticks; + diskstat->has_ios_pgr = true; + diskstat->ios_pgr = ios_pgr; + diskstat->has_total_ticks = true; + diskstat->total_ticks = tot_ticks; + diskstat->has_weight_ticks = true; + diskstat->weight_ticks = rq_ticks; + } + if (i >= 18) { + diskstat->has_discard_ios = true; + diskstat->discard_ios = dc_ios; + diskstat->has_discard_merges = true; + diskstat->discard_merges = dc_merges; + diskstat->has_discard_sectors = true; + diskstat->discard_sectors = dc_sec; + diskstat->has_discard_ticks = true; + diskstat->discard_ticks = dc_ticks; + } + if (i >= 20) { + diskstat->has_flush_ios = true; + diskstat->flush_ios = fl_ios; + diskstat->has_flush_ticks = true; + diskstat->flush_ticks = fl_ticks; + } + + diskstatinfo->stats = g_steal_pointer(&diskstat); + QAPI_LIST_APPEND(tail, diskstatinfo); + diskstatinfo = NULL; + } + free(line); + fclose(fp); + return head; +#else + g_debug("disk stats reporting available only for Linux"); + return NULL; +#endif +} + +GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp) +{ + return guest_get_diskstats(errp); +} + +GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp) +{ + GuestCpuStatsList *head = NULL, **tail = &head; + const char *cpustats = "/proc/stat"; + int clk_tck = sysconf(_SC_CLK_TCK); + FILE *fp; + size_t n; + char *line = NULL; + + fp = fopen(cpustats, "r"); + if (fp == NULL) { + error_setg_errno(errp, errno, "open(\"%s\")", cpustats); + return NULL; + } + + while (getline(&line, &n, fp) != -1) { + GuestCpuStats *cpustat = NULL; + GuestLinuxCpuStats *linuxcpustat; + int i; + unsigned long user, system, idle, iowait, irq, softirq, steal, guest; + unsigned long nice, guest_nice; + char name[64]; + + i = sscanf(line, "%s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu", + name, &user, &nice, &system, &idle, &iowait, &irq, &softirq, + &steal, &guest, &guest_nice); + + /* drop "cpu 1 2 3 ...", get "cpuX 1 2 3 ..." only */ + if ((i == EOF) || strncmp(name, "cpu", 3) || (name[3] == '\0')) { + continue; + } + + if (i < 5) { + slog("Parsing cpu stat from %s failed, see \"man proc\"", cpustats); + break; + } + + cpustat = g_new0(GuestCpuStats, 1); + cpustat->type = GUEST_CPU_STATS_TYPE_LINUX; + + linuxcpustat = &cpustat->u.q_linux; + linuxcpustat->cpu = atoi(&name[3]); + linuxcpustat->user = user * 1000 / clk_tck; + linuxcpustat->nice = nice * 1000 / clk_tck; + linuxcpustat->system = system * 1000 / clk_tck; + linuxcpustat->idle = idle * 1000 / clk_tck; + + if (i > 5) { + linuxcpustat->has_iowait = true; + linuxcpustat->iowait = iowait * 1000 / clk_tck; + } + + if (i > 6) { + linuxcpustat->has_irq = true; + linuxcpustat->irq = irq * 1000 / clk_tck; + linuxcpustat->has_softirq = true; + linuxcpustat->softirq = softirq * 1000 / clk_tck; + } + + if (i > 8) { + linuxcpustat->has_steal = true; + linuxcpustat->steal = steal * 1000 / clk_tck; + } + + if (i > 9) { + linuxcpustat->has_guest = true; + linuxcpustat->guest = guest * 1000 / clk_tck; + } + + if (i > 10) { + linuxcpustat->has_guest = true; + linuxcpustat->guest = guest * 1000 / clk_tck; + linuxcpustat->has_guestnice = true; + linuxcpustat->guestnice = guest_nice * 1000 / clk_tck; + } + + QAPI_LIST_APPEND(tail, cpustat); + } + + free(line); + fclose(fp); + return head; +} + #else /* defined(__linux__) */ void qmp_guest_suspend_disk(Error **errp) @@ -2940,12 +2767,6 @@ void qmp_guest_suspend_hybrid(Error **errp) error_setg(errp, QERR_UNSUPPORTED); } -GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) -{ - error_setg(errp, QERR_UNSUPPORTED); - return NULL; -} - GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); @@ -2958,14 +2779,6 @@ int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp) return -1; } -void qmp_guest_set_user_password(const char *username, - const char *password, - bool crypted, - Error **errp) -{ - error_setg(errp, QERR_UNSUPPORTED); -} - GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); @@ -2987,6 +2800,274 @@ GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) #endif +#ifdef HAVE_GETIFADDRS +static GuestNetworkInterface * +guest_find_interface(GuestNetworkInterfaceList *head, + const char *name) +{ + for (; head; head = head->next) { + if (strcmp(head->value->name, name) == 0) { + return head->value; + } + } + + return NULL; +} + +static int guest_get_network_stats(const char *name, + GuestNetworkInterfaceStat *stats) +{ +#ifdef CONFIG_LINUX + int name_len; + char const *devinfo = "/proc/net/dev"; + FILE *fp; + char *line = NULL, *colon; + size_t n = 0; + fp = fopen(devinfo, "r"); + if (!fp) { + g_debug("failed to open network stats %s: %s", devinfo, + g_strerror(errno)); + return -1; + } + name_len = strlen(name); + while (getline(&line, &n, fp) != -1) { + long long dummy; + long long rx_bytes; + long long rx_packets; + long long rx_errs; + long long rx_dropped; + long long tx_bytes; + long long tx_packets; + long long tx_errs; + long long tx_dropped; + char *trim_line; + trim_line = g_strchug(line); + if (trim_line[0] == '\0') { + continue; + } + colon = strchr(trim_line, ':'); + if (!colon) { + continue; + } + if (colon - name_len == trim_line && + strncmp(trim_line, name, name_len) == 0) { + if (sscanf(colon + 1, + "%lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld", + &rx_bytes, &rx_packets, &rx_errs, &rx_dropped, + &dummy, &dummy, &dummy, &dummy, + &tx_bytes, &tx_packets, &tx_errs, &tx_dropped, + &dummy, &dummy, &dummy, &dummy) != 16) { + continue; + } + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_packets; + stats->rx_errs = rx_errs; + stats->rx_dropped = rx_dropped; + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_packets; + stats->tx_errs = tx_errs; + stats->tx_dropped = tx_dropped; + fclose(fp); + g_free(line); + return 0; + } + } + fclose(fp); + g_free(line); + g_debug("/proc/net/dev: Interface '%s' not found", name); +#else /* !CONFIG_LINUX */ + g_debug("Network stats reporting available only for Linux"); +#endif /* !CONFIG_LINUX */ + return -1; +} + +#ifndef __FreeBSD__ +/* + * Fill "buf" with MAC address by ifaddrs. Pointer buf must point to a + * buffer with ETHER_ADDR_LEN length at least. + * + * Returns false in case of an error, otherwise true. "obtained" argument + * is true if a MAC address was obtained successful, otherwise false. + */ +bool guest_get_hw_addr(struct ifaddrs *ifa, unsigned char *buf, + bool *obtained, Error **errp) +{ + struct ifreq ifr; + int sock; + + *obtained = false; + + /* we haven't obtained HW address yet */ + sock = socket(PF_INET, SOCK_STREAM, 0); + if (sock == -1) { + error_setg_errno(errp, errno, "failed to create socket"); + return false; + } + + memset(&ifr, 0, sizeof(ifr)); + pstrcpy(ifr.ifr_name, IF_NAMESIZE, ifa->ifa_name); + if (ioctl(sock, SIOCGIFHWADDR, &ifr) == -1) { + /* + * We can't get the hw addr of this interface, but that's not a + * fatal error. + */ + if (errno == EADDRNOTAVAIL) { + /* The interface doesn't have a hw addr (e.g. loopback). */ + g_debug("failed to get MAC address of %s: %s", + ifa->ifa_name, strerror(errno)); + } else{ + g_warning("failed to get MAC address of %s: %s", + ifa->ifa_name, strerror(errno)); + } + } else { +#ifdef CONFIG_SOLARIS + memcpy(buf, &ifr.ifr_addr.sa_data, ETHER_ADDR_LEN); +#else + memcpy(buf, &ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); +#endif + *obtained = true; + } + close(sock); + return true; +} +#endif /* __FreeBSD__ */ + +/* + * Build information about guest interfaces + */ +GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) +{ + GuestNetworkInterfaceList *head = NULL, **tail = &head; + struct ifaddrs *ifap, *ifa; + + if (getifaddrs(&ifap) < 0) { + error_setg_errno(errp, errno, "getifaddrs failed"); + goto error; + } + + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + GuestNetworkInterface *info; + GuestIpAddressList **address_tail; + GuestIpAddress *address_item = NULL; + GuestNetworkInterfaceStat *interface_stat = NULL; + char addr4[INET_ADDRSTRLEN]; + char addr6[INET6_ADDRSTRLEN]; + unsigned char mac_addr[ETHER_ADDR_LEN]; + bool obtained; + void *p; + + g_debug("Processing %s interface", ifa->ifa_name); + + info = guest_find_interface(head, ifa->ifa_name); + + if (!info) { + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(ifa->ifa_name); + + QAPI_LIST_APPEND(tail, info); + } + + if (!info->has_hardware_address) { + if (!guest_get_hw_addr(ifa, mac_addr, &obtained, errp)) { + goto error; + } + if (obtained) { + info->hardware_address = + g_strdup_printf("%02x:%02x:%02x:%02x:%02x:%02x", + (int) mac_addr[0], (int) mac_addr[1], + (int) mac_addr[2], (int) mac_addr[3], + (int) mac_addr[4], (int) mac_addr[5]); + info->has_hardware_address = true; + } + } + + if (ifa->ifa_addr && + ifa->ifa_addr->sa_family == AF_INET) { + /* interface with IPv4 address */ + p = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; + if (!inet_ntop(AF_INET, p, addr4, sizeof(addr4))) { + error_setg_errno(errp, errno, "inet_ntop failed"); + goto error; + } + + address_item = g_malloc0(sizeof(*address_item)); + address_item->ip_address = g_strdup(addr4); + address_item->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV4; + + if (ifa->ifa_netmask) { + /* Count the number of set bits in netmask. + * This is safe as '1' and '0' cannot be shuffled in netmask. */ + p = &((struct sockaddr_in *)ifa->ifa_netmask)->sin_addr; + address_item->prefix = ctpop32(((uint32_t *) p)[0]); + } + } else if (ifa->ifa_addr && + ifa->ifa_addr->sa_family == AF_INET6) { + /* interface with IPv6 address */ + p = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; + if (!inet_ntop(AF_INET6, p, addr6, sizeof(addr6))) { + error_setg_errno(errp, errno, "inet_ntop failed"); + goto error; + } + + address_item = g_malloc0(sizeof(*address_item)); + address_item->ip_address = g_strdup(addr6); + address_item->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV6; + + if (ifa->ifa_netmask) { + /* Count the number of set bits in netmask. + * This is safe as '1' and '0' cannot be shuffled in netmask. */ + p = &((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_addr; + address_item->prefix = + ctpop32(((uint32_t *) p)[0]) + + ctpop32(((uint32_t *) p)[1]) + + ctpop32(((uint32_t *) p)[2]) + + ctpop32(((uint32_t *) p)[3]); + } + } + + if (!address_item) { + continue; + } + + address_tail = &info->ip_addresses; + while (*address_tail) { + address_tail = &(*address_tail)->next; + } + QAPI_LIST_APPEND(address_tail, address_item); + + info->has_ip_addresses = true; + + if (!info->has_statistics) { + interface_stat = g_malloc0(sizeof(*interface_stat)); + if (guest_get_network_stats(info->name, interface_stat) == -1) { + info->has_statistics = false; + g_free(interface_stat); + } else { + info->statistics = interface_stat; + info->has_statistics = true; + } + } + } + + freeifaddrs(ifap); + return head; + +error: + freeifaddrs(ifap); + qapi_free_GuestNetworkInterfaceList(head); + return NULL; +} + +#else + +GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +#endif /* HAVE_GETIFADDRS */ + #if !defined(CONFIG_FSFREEZE) GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) @@ -3031,6 +3112,18 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) return NULL; } +GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + #endif /* CONFIG_FSFREEZE */ #if !defined(CONFIG_FSTRIM) @@ -3042,26 +3135,30 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) } #endif -/* add unsupported commands to the blacklist */ -GList *ga_command_blacklist_init(GList *blacklist) +/* add unsupported commands to the list of blocked RPCs */ +GList *ga_command_init_blockedrpcs(GList *blockedrpcs) { #if !defined(__linux__) { const char *list[] = { "guest-suspend-disk", "guest-suspend-ram", - "guest-suspend-hybrid", "guest-network-get-interfaces", - "guest-get-vcpus", "guest-set-vcpus", + "guest-suspend-hybrid", "guest-get-vcpus", "guest-set-vcpus", "guest-get-memory-blocks", "guest-set-memory-blocks", "guest-get-memory-block-size", "guest-get-memory-block-info", NULL}; char **p = (char **)list; while (*p) { - blacklist = g_list_append(blacklist, g_strdup(*p++)); + blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++)); } } #endif +#if !defined(HAVE_GETIFADDRS) + blockedrpcs = g_list_append(blockedrpcs, + g_strdup("guest-network-get-interfaces")); +#endif + #if !defined(CONFIG_FSFREEZE) { const char *list[] = { @@ -3072,18 +3169,18 @@ GList *ga_command_blacklist_init(GList *blacklist) char **p = (char **)list; while (*p) { - blacklist = g_list_append(blacklist, g_strdup(*p++)); + blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++)); } } #endif #if !defined(CONFIG_FSTRIM) - blacklist = g_list_append(blacklist, g_strdup("guest-fstrim")); + blockedrpcs = g_list_append(blockedrpcs, g_strdup("guest-fstrim")); #endif - blacklist = g_list_append(blacklist, g_strdup("guest-get-devices")); + blockedrpcs = g_list_append(blockedrpcs, g_strdup("guest-get-devices")); - return blacklist; + return blockedrpcs; } /* register init/cleanup routines for stateful command groups */ @@ -3303,3 +3400,38 @@ GuestDeviceInfoList *qmp_guest_get_devices(Error **errp) return NULL; } + +#ifndef HOST_NAME_MAX +# ifdef _POSIX_HOST_NAME_MAX +# define HOST_NAME_MAX _POSIX_HOST_NAME_MAX +# else +# define HOST_NAME_MAX 255 +# endif +#endif + +char *qga_get_host_name(Error **errp) +{ + long len = -1; + g_autofree char *hostname = NULL; + +#ifdef _SC_HOST_NAME_MAX + len = sysconf(_SC_HOST_NAME_MAX); +#endif /* _SC_HOST_NAME_MAX */ + + if (len < 0) { + len = HOST_NAME_MAX; + } + + /* Unfortunately, gethostname() below does not guarantee a + * NULL terminated string. Therefore, allocate one byte more + * to be sure. */ + hostname = g_new0(char, len + 1); + + if (gethostname(hostname, len) < 0) { + error_setg_errno(errp, errno, + "cannot get hostname"); + return NULL; + } + + return g_steal_pointer(&hostname); +} diff --git a/qga/commands-win32.c b/qga/commands-win32.c index 7bac0c5d42..ec9f55b453 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -18,10 +18,8 @@ #include #include #include -#ifdef CONFIG_QGA_NTDDSCSI #include #include -#endif #include #include #include @@ -474,8 +472,6 @@ void qmp_guest_file_flush(int64_t handle, Error **errp) } } -#ifdef CONFIG_QGA_NTDDSCSI - static GuestDiskBusType win2qemu[] = { [BusTypeUnknown] = GUEST_DISK_BUS_TYPE_UNKNOWN, [BusTypeScsi] = GUEST_DISK_BUS_TYPE_SCSI, @@ -494,6 +490,11 @@ static GuestDiskBusType win2qemu[] = { #if (_WIN32_WINNT >= 0x0601) [BusTypeVirtual] = GUEST_DISK_BUS_TYPE_VIRTUAL, [BusTypeFileBackedVirtual] = GUEST_DISK_BUS_TYPE_FILE_BACKED_VIRTUAL, + /* + * BusTypeSpaces currently is not suported + */ + [BusTypeSpaces] = GUEST_DISK_BUS_TYPE_UNKNOWN, + [BusTypeNvme] = GUEST_DISK_BUS_TYPE_NVME, #endif }; @@ -512,15 +513,102 @@ DEFINE_GUID(GUID_DEVINTERFACE_STORAGEPORT, 0x2accfe60L, 0xc130, 0x11d2, 0xb0, 0x82, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b); +static void get_pci_address_for_device(GuestPCIAddress *pci, + HDEVINFO dev_info) +{ + SP_DEVINFO_DATA dev_info_data; + DWORD j; + DWORD size; + bool partial_pci = false; + + dev_info_data.cbSize = sizeof(SP_DEVINFO_DATA); + + for (j = 0; + SetupDiEnumDeviceInfo(dev_info, j, &dev_info_data); + j++) { + DWORD addr, bus, ui_slot, type; + int func, slot; + size = sizeof(DWORD); + + /* + * There is no need to allocate buffer in the next functions. The + * size is known and ULONG according to + * https://msdn.microsoft.com/en-us/library/windows/hardware/ff543095(v=vs.85).aspx + */ + if (!SetupDiGetDeviceRegistryProperty( + dev_info, &dev_info_data, SPDRP_BUSNUMBER, + &type, (PBYTE)&bus, size, NULL)) { + debug_error("failed to get PCI bus"); + bus = -1; + partial_pci = true; + } + + /* + * The function retrieves the device's address. This value will be + * transformed into device function and number + */ + if (!SetupDiGetDeviceRegistryProperty( + dev_info, &dev_info_data, SPDRP_ADDRESS, + &type, (PBYTE)&addr, size, NULL)) { + debug_error("failed to get PCI address"); + addr = -1; + partial_pci = true; + } + + /* + * This call returns UINumber of DEVICE_CAPABILITIES structure. + * This number is typically a user-perceived slot number. + */ + if (!SetupDiGetDeviceRegistryProperty( + dev_info, &dev_info_data, SPDRP_UI_NUMBER, + &type, (PBYTE)&ui_slot, size, NULL)) { + debug_error("failed to get PCI slot"); + ui_slot = -1; + partial_pci = true; + } + + /* + * SetupApi gives us the same information as driver with + * IoGetDeviceProperty. According to Microsoft: + * + * FunctionNumber = (USHORT)((propertyAddress) & 0x0000FFFF) + * DeviceNumber = (USHORT)(((propertyAddress) >> 16) & 0x0000FFFF) + * SPDRP_ADDRESS is propertyAddress, so we do the same. + * + * https://docs.microsoft.com/en-us/windows/desktop/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya + */ + if (partial_pci) { + pci->domain = -1; + pci->slot = -1; + pci->function = -1; + pci->bus = -1; + continue; + } else { + func = ((int)addr == -1) ? -1 : addr & 0x0000FFFF; + slot = ((int)addr == -1) ? -1 : (addr >> 16) & 0x0000FFFF; + if ((int)ui_slot != slot) { + g_debug("mismatch with reported slot values: %d vs %d", + (int)ui_slot, slot); + } + pci->domain = 0; + pci->slot = (int)ui_slot; + pci->function = func; + pci->bus = (int)bus; + return; + } + } +} + static GuestPCIAddress *get_pci_info(int number, Error **errp) { - HDEVINFO dev_info; + HDEVINFO dev_info = INVALID_HANDLE_VALUE; + HDEVINFO parent_dev_info = INVALID_HANDLE_VALUE; + SP_DEVINFO_DATA dev_info_data; SP_DEVICE_INTERFACE_DATA dev_iface_data; HANDLE dev_file; int i; GuestPCIAddress *pci = NULL; - bool partial_pci = false; pci = g_malloc0(sizeof(*pci)); pci->domain = -1; @@ -532,29 +620,27 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) DIGCF_PRESENT | DIGCF_DEVICEINTERFACE); if (dev_info == INVALID_HANDLE_VALUE) { error_setg_win32(errp, GetLastError(), "failed to get devices tree"); - goto out; + goto end; } g_debug("enumerating devices"); dev_info_data.cbSize = sizeof(SP_DEVINFO_DATA); dev_iface_data.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); for (i = 0; SetupDiEnumDeviceInfo(dev_info, i, &dev_info_data); i++) { - PSP_DEVICE_INTERFACE_DETAIL_DATA pdev_iface_detail_data = NULL; + g_autofree PSP_DEVICE_INTERFACE_DETAIL_DATA pdev_iface_detail_data = NULL; STORAGE_DEVICE_NUMBER sdn; - char *parent_dev_id = NULL; - HDEVINFO parent_dev_info; + g_autofree char *parent_dev_id = NULL; SP_DEVINFO_DATA parent_dev_info_data; - DWORD j; DWORD size = 0; g_debug("getting device path"); if (SetupDiEnumDeviceInterfaces(dev_info, &dev_info_data, &GUID_DEVINTERFACE_DISK, 0, &dev_iface_data)) { - while (!SetupDiGetDeviceInterfaceDetail(dev_info, &dev_iface_data, - pdev_iface_detail_data, - size, &size, - &dev_info_data)) { + if (!SetupDiGetDeviceInterfaceDetail(dev_info, &dev_iface_data, + pdev_iface_detail_data, + size, &size, + &dev_info_data)) { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { pdev_iface_detail_data = g_malloc(size); pdev_iface_detail_data->cbSize = @@ -562,21 +648,30 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) } else { error_setg_win32(errp, GetLastError(), "failed to get device interfaces"); - goto free_dev_info; + goto end; } } + if (!SetupDiGetDeviceInterfaceDetail(dev_info, &dev_iface_data, + pdev_iface_detail_data, + size, &size, + &dev_info_data)) { + // pdev_iface_detail_data already is allocated + error_setg_win32(errp, GetLastError(), + "failed to get device interfaces"); + goto end; + } + dev_file = CreateFile(pdev_iface_detail_data->DevicePath, 0, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL); - g_free(pdev_iface_detail_data); if (!DeviceIoControl(dev_file, IOCTL_STORAGE_GET_DEVICE_NUMBER, NULL, 0, &sdn, sizeof(sdn), &size, NULL)) { CloseHandle(dev_file); error_setg_win32(errp, GetLastError(), "failed to get device slot number"); - goto free_dev_info; + goto end; } CloseHandle(dev_file); @@ -586,7 +681,7 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) } else { error_setg_win32(errp, GetLastError(), "failed to get device interfaces"); - goto free_dev_info; + goto end; } g_debug("found device slot %d. Getting storage controller", number); @@ -596,17 +691,25 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) ULONG dev_id_size = 0; size = 0; - while (!SetupDiGetDeviceInstanceId(dev_info, &dev_info_data, - parent_dev_id, size, &size)) { + if (!SetupDiGetDeviceInstanceId(dev_info, &dev_info_data, + parent_dev_id, size, &size)) { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { parent_dev_id = g_malloc(size); } else { error_setg_win32(errp, GetLastError(), "failed to get device instance ID"); - goto out; + goto end; } } + if (!SetupDiGetDeviceInstanceId(dev_info, &dev_info_data, + parent_dev_id, size, &size)) { + // parent_dev_id already is allocated + error_setg_win32(errp, GetLastError(), + "failed to get device instance ID"); + goto end; + } + /* * CM API used here as opposed to * SetupDiGetDeviceProperty(..., DEVPKEY_Device_Parent, ...) @@ -617,14 +720,14 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) g_error("CM_Locate_DevInst failed with code %lx", cr); error_setg_win32(errp, GetLastError(), "failed to get device instance"); - goto out; + goto end; } cr = CM_Get_Parent(&parent_dev_inst, dev_inst, 0); if (cr != CR_SUCCESS) { g_error("CM_Get_Parent failed with code %lx", cr); error_setg_win32(errp, GetLastError(), "failed to get parent device instance"); - goto out; + goto end; } cr = CM_Get_Device_ID_Size(&dev_id_size, parent_dev_inst, 0); @@ -632,7 +735,7 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) g_error("CM_Get_Device_ID_Size failed with code %lx", cr); error_setg_win32(errp, GetLastError(), "failed to get parent device ID length"); - goto out; + goto end; } ++dev_id_size; @@ -647,7 +750,7 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) g_error("CM_Get_Device_ID failed with code %lx", cr); error_setg_win32(errp, GetLastError(), "failed to get parent device ID"); - goto out; + goto end; } } @@ -656,101 +759,32 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) parent_dev_info = SetupDiGetClassDevs(&GUID_DEVINTERFACE_STORAGEPORT, parent_dev_id, NULL, DIGCF_PRESENT | DIGCF_DEVICEINTERFACE); - g_free(parent_dev_id); if (parent_dev_info == INVALID_HANDLE_VALUE) { error_setg_win32(errp, GetLastError(), "failed to get parent device"); - goto out; + goto end; } parent_dev_info_data.cbSize = sizeof(SP_DEVINFO_DATA); if (!SetupDiEnumDeviceInfo(parent_dev_info, 0, &parent_dev_info_data)) { error_setg_win32(errp, GetLastError(), "failed to get parent device data"); - goto out; + goto end; } - for (j = 0; - SetupDiEnumDeviceInfo(parent_dev_info, j, &parent_dev_info_data); - j++) { - DWORD addr, bus, ui_slot, type; - int func, slot; + get_pci_address_for_device(pci, parent_dev_info); - /* - * There is no need to allocate buffer in the next functions. The - * size is known and ULONG according to - * https://msdn.microsoft.com/en-us/library/windows/hardware/ff543095(v=vs.85).aspx - */ - if (!SetupDiGetDeviceRegistryProperty( - parent_dev_info, &parent_dev_info_data, SPDRP_BUSNUMBER, - &type, (PBYTE)&bus, size, NULL)) { - debug_error("failed to get PCI bus"); - bus = -1; - partial_pci = true; - } - - /* - * The function retrieves the device's address. This value will be - * transformed into device function and number - */ - if (!SetupDiGetDeviceRegistryProperty( - parent_dev_info, &parent_dev_info_data, SPDRP_ADDRESS, - &type, (PBYTE)&addr, size, NULL)) { - debug_error("failed to get PCI address"); - addr = -1; - partial_pci = true; - } - - /* - * This call returns UINumber of DEVICE_CAPABILITIES structure. - * This number is typically a user-perceived slot number. - */ - if (!SetupDiGetDeviceRegistryProperty( - parent_dev_info, &parent_dev_info_data, SPDRP_UI_NUMBER, - &type, (PBYTE)&ui_slot, size, NULL)) { - debug_error("failed to get PCI slot"); - ui_slot = -1; - partial_pci = true; - } - - /* - * SetupApi gives us the same information as driver with - * IoGetDeviceProperty. According to Microsoft: - * - * FunctionNumber = (USHORT)((propertyAddress) & 0x0000FFFF) - * DeviceNumber = (USHORT)(((propertyAddress) >> 16) & 0x0000FFFF) - * SPDRP_ADDRESS is propertyAddress, so we do the same. - * - * https://docs.microsoft.com/en-us/windows/desktop/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya - */ - if (partial_pci) { - pci->domain = -1; - pci->slot = -1; - pci->function = -1; - pci->bus = -1; - continue; - } else { - func = ((int)addr == -1) ? -1 : addr & 0x0000FFFF; - slot = ((int)addr == -1) ? -1 : (addr >> 16) & 0x0000FFFF; - if ((int)ui_slot != slot) { - g_debug("mismatch with reported slot values: %d vs %d", - (int)ui_slot, slot); - } - pci->domain = 0; - pci->slot = (int)ui_slot; - pci->function = func; - pci->bus = (int)bus; - break; - } - } - SetupDiDestroyDeviceInfoList(parent_dev_info); break; } -free_dev_info: - SetupDiDestroyDeviceInfoList(dev_info); -out: +end: + if (parent_dev_info != INVALID_HANDLE_VALUE) { + SetupDiDestroyDeviceInfoList(parent_dev_info); + } + if (dev_info != INVALID_HANDLE_VALUE) { + SetupDiDestroyDeviceInfoList(dev_info); + } return pci; } @@ -916,7 +950,7 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) } else if (last_err == ERROR_INVALID_FUNCTION) { /* Possibly CD-ROM or a shared drive. Try to pass the volume */ g_debug("volume not on disk"); - disk = g_malloc0(sizeof(GuestDiskAddress)); + disk = g_new0(GuestDiskAddress, 1); disk->has_dev = true; disk->dev = g_strdup(name); get_single_disk_info(0xffffffff, disk, &local_err); @@ -939,7 +973,7 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) /* Go through each extent */ for (i = 0; i < extents->NumberOfDiskExtents; i++) { - disk = g_malloc0(sizeof(GuestDiskAddress)); + disk = g_new0(GuestDiskAddress, 1); /* Disk numbers directly correspond to numbers used in UNCs * @@ -976,7 +1010,6 @@ out: GuestDiskInfoList *qmp_guest_get_disks(Error **errp) { - ERRP_GUARD(); GuestDiskInfoList *ret = NULL; HDEVINFO dev_info; SP_DEVICE_INTERFACE_DATA dev_iface_data; @@ -1044,7 +1077,7 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) sdn.DeviceNumber); g_debug(" number: %lu", sdn.DeviceNumber); - address = g_malloc0(sizeof(GuestDiskAddress)); + address = g_new0(GuestDiskAddress, 1); address->has_dev = true; address->dev = g_strdup(disk->name); get_single_disk_info(sdn.DeviceNumber, address, &local_err); @@ -1066,21 +1099,6 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) return ret; } -#else - -static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) -{ - return NULL; -} - -GuestDiskInfoList *qmp_guest_get_disks(Error **errp) -{ - error_setg(errp, QERR_UNSUPPORTED); - return NULL; -} - -#endif /* CONFIG_QGA_NTDDSCSI */ - static GuestFilesystemInfo *build_guest_fsinfo(char *guid, Error **errp) { DWORD info_size; @@ -1336,7 +1354,7 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) continue; } - uc_path = g_malloc(sizeof(WCHAR) * char_count); + uc_path = g_new(WCHAR, char_count); if (!GetVolumePathNamesForVolumeNameW(guid, uc_path, char_count, &char_count) || !*uc_path) { /* strange, but this condition could be faced even with size == 2 */ @@ -1719,25 +1737,6 @@ static int64_t filetime_to_ns(const FILETIME *tf) - W32_FT_OFFSET) * 100; } -int64_t qmp_guest_get_time(Error **errp) -{ - SYSTEMTIME ts = {0}; - FILETIME tf; - - GetSystemTime(&ts); - if (ts.wYear < 1601 || ts.wYear > 30827) { - error_setg(errp, "Failed to get time"); - return -1; - } - - if (!SystemTimeToFileTime(&ts, &tf)) { - error_setg(errp, "Failed to convert system time: %d", (int)GetLastError()); - return -1; - } - - return filetime_to_ns(&tf); -} - void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) { Error *local_err = NULL; @@ -2006,8 +2005,8 @@ GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) return NULL; } -/* add unsupported commands to the blacklist */ -GList *ga_command_blacklist_init(GList *blacklist) +/* add unsupported commands to the list of blocked RPCs */ +GList *ga_command_init_blockedrpcs(GList *blockedrpcs) { const char *list_unsupported[] = { "guest-suspend-hybrid", @@ -2018,7 +2017,7 @@ GList *ga_command_blacklist_init(GList *blacklist) char **p = (char **)list_unsupported; while (*p) { - blacklist = g_list_append(blacklist, g_strdup(*p++)); + blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++)); } if (!vss_init(true)) { @@ -2029,11 +2028,11 @@ GList *ga_command_blacklist_init(GList *blacklist) p = (char **)list; while (*p) { - blacklist = g_list_append(blacklist, g_strdup(*p++)); + blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++)); } } - return blacklist; + return blockedrpcs; } /* register init/cleanup routines for stateful command groups */ @@ -2138,7 +2137,7 @@ typedef struct _ga_matrix_lookup_t { char const *version_id; } ga_matrix_lookup_t; -static ga_matrix_lookup_t const WIN_VERSION_MATRIX[2][8] = { +static ga_matrix_lookup_t const WIN_VERSION_MATRIX[2][7] = { { /* Desktop editions */ { 5, 0, "Microsoft Windows 2000", "2000"}, @@ -2147,7 +2146,6 @@ static ga_matrix_lookup_t const WIN_VERSION_MATRIX[2][8] = { { 6, 1, "Microsoft Windows 7" "7"}, { 6, 2, "Microsoft Windows 8", "8"}, { 6, 3, "Microsoft Windows 8.1", "8.1"}, - {10, 0, "Microsoft Windows 10", "10"}, { 0, 0, 0} },{ /* Server editions */ @@ -2157,24 +2155,29 @@ static ga_matrix_lookup_t const WIN_VERSION_MATRIX[2][8] = { { 6, 2, "Microsoft Windows Server 2012", "2012"}, { 6, 3, "Microsoft Windows Server 2012 R2", "2012r2"}, { 0, 0, 0}, - { 0, 0, 0}, { 0, 0, 0} } }; -typedef struct _ga_win_10_0_server_t { - int final_build; +typedef struct _ga_win_10_0_t { + int first_build; char const *version; char const *version_id; -} ga_win_10_0_server_t; +} ga_win_10_0_t; -static ga_win_10_0_server_t const WIN_10_0_SERVER_VERSION_MATRIX[4] = { +static ga_win_10_0_t const WIN_10_0_SERVER_VERSION_MATRIX[4] = { {14393, "Microsoft Windows Server 2016", "2016"}, {17763, "Microsoft Windows Server 2019", "2019"}, {20344, "Microsoft Windows Server 2022", "2022"}, {0, 0} }; +static ga_win_10_0_t const WIN_10_0_CLIENT_VERSION_MATRIX[3] = { + {10240, "Microsoft Windows 10", "10"}, + {22000, "Microsoft Windows 11", "11"}, + {0, 0} +}; + static void ga_get_win_version(RTL_OSVERSIONINFOEXW *info, Error **errp) { typedef NTSTATUS(WINAPI *rtl_get_version_t)( @@ -2202,19 +2205,24 @@ static char *ga_get_win_name(OSVERSIONINFOEXW const *os_version, bool id) DWORD build = os_version->dwBuildNumber; int tbl_idx = (os_version->wProductType != VER_NT_WORKSTATION); ga_matrix_lookup_t const *table = WIN_VERSION_MATRIX[tbl_idx]; - ga_win_10_0_server_t const *win_10_0_table = WIN_10_0_SERVER_VERSION_MATRIX; + ga_win_10_0_t const *win_10_0_table = tbl_idx ? + WIN_10_0_SERVER_VERSION_MATRIX : WIN_10_0_CLIENT_VERSION_MATRIX; + ga_win_10_0_t const *win_10_0_version = NULL; while (table->version != NULL) { - if (major == 10 && minor == 0 && tbl_idx) { + if (major == 10 && minor == 0) { while (win_10_0_table->version != NULL) { - if (build <= win_10_0_table->final_build) { - if (id) { - return g_strdup(win_10_0_table->version_id); - } else { - return g_strdup(win_10_0_table->version); - } + if (build >= win_10_0_table->first_build) { + win_10_0_version = win_10_0_table; } win_10_0_table++; } + if (win_10_0_table) { + if (id) { + return g_strdup(win_10_0_version->version_id); + } else { + return g_strdup(win_10_0_version->version); + } + } } else if (major == table->major && minor == table->minor) { if (id) { return g_strdup(table->version_id); @@ -2516,3 +2524,28 @@ GuestDeviceInfoList *qmp_guest_get_devices(Error **errp) } return head; } + +char *qga_get_host_name(Error **errp) +{ + wchar_t tmp[MAX_COMPUTERNAME_LENGTH + 1]; + DWORD size = G_N_ELEMENTS(tmp); + + if (GetComputerNameW(tmp, &size) == 0) { + error_setg_win32(errp, GetLastError(), "failed close handle"); + return NULL; + } + + return g_utf16_to_utf8(tmp, size, NULL, NULL, NULL); +} + +GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} diff --git a/qga/commands.c b/qga/commands.c index 80501e4a73..6cf978322e 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -18,7 +18,6 @@ #include "qapi/qmp/qerror.h" #include "qemu/base64.h" #include "qemu/cutils.h" -#include "qemu/atomic.h" #include "commands-common.h" /* Maximum captured guest-exec out_data/err_data - 16MB */ @@ -33,9 +32,8 @@ #define GUEST_FILE_READ_COUNT_MAX (48 * MiB) /* Note: in some situations, like with the fsfreeze, logging may be - * temporarilly disabled. if it is necessary that a command be able - * to log for accounting purposes, check ga_logging_enabled() beforehand, - * and use the QERR_QGA_LOGGING_DISABLED to generate an error + * temporarily disabled. if it is necessary that a command be able + * to log for accounting purposes, check ga_logging_enabled() beforehand. */ void slog(const gchar *fmt, ...) { @@ -162,13 +160,12 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) ges = g_new0(GuestExecStatus, 1); - bool finished = qatomic_mb_read(&gei->finished); + bool finished = gei->finished; /* need to wait till output channels are closed * to be sure we captured all output at this point */ if (gei->has_output) { - finished = finished && qatomic_mb_read(&gei->out.closed); - finished = finished && qatomic_mb_read(&gei->err.closed); + finished &= gei->out.closed && gei->err.closed; } ges->exited = finished; @@ -244,7 +241,7 @@ static char **guest_exec_get_args(const strList *entry, bool log) str = g_malloc(str_size); *str = 0; - args = g_malloc(count * sizeof(char *)); + args = g_new(char *, count); for (it = entry; it != NULL; it = it->next) { args[i++] = it->value; pstrcat(str, str_size, it->value); @@ -270,7 +267,7 @@ static void guest_exec_child_watch(GPid pid, gint status, gpointer data) (int32_t)gpid_to_int64(pid), (uint32_t)status); gei->status = status; - qatomic_mb_set(&gei->finished, true); + gei->finished = true; g_spawn_close_pid(pid); } @@ -326,7 +323,7 @@ static gboolean guest_exec_input_watch(GIOChannel *ch, done: g_io_channel_shutdown(ch, true, NULL); g_io_channel_unref(ch); - qatomic_mb_set(&p->closed, true); + p->closed = true; g_free(p->data); return false; @@ -380,7 +377,7 @@ static gboolean guest_exec_output_watch(GIOChannel *ch, close: g_io_channel_shutdown(ch, true, NULL); g_io_channel_unref(ch); - qatomic_mb_set(&p->closed, true); + p->closed = true; return false; } @@ -511,7 +508,7 @@ int ga_parse_whence(GuestFileWhence *whence, Error **errp) GuestHostName *qmp_guest_get_host_name(Error **errp) { GuestHostName *result = NULL; - g_autofree char *hostname = qemu_get_host_name(errp); + g_autofree char *hostname = qga_get_host_name(errp); /* * We want to avoid using g_get_host_name() because that @@ -585,3 +582,8 @@ GuestFileRead *qmp_guest_file_read(int64_t handle, bool has_count, return read_data; } + +int64_t qmp_guest_get_time(Error **errp) +{ + return g_get_real_time() * 1000; +} diff --git a/qga/cutils.c b/qga/cutils.c new file mode 100644 index 0000000000..b8e142ef64 --- /dev/null +++ b/qga/cutils.c @@ -0,0 +1,33 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "cutils.h" + +#include "qapi/error.h" + +/** + * qga_open_cloexec: + * @name: the pathname to open + * @flags: as in open() + * @mode: as in open() + * + * A wrapper for open() function which sets O_CLOEXEC. + * + * On error, -1 is returned. + */ +int qga_open_cloexec(const char *name, int flags, mode_t mode) +{ + int ret; + +#ifdef O_CLOEXEC + ret = open(name, flags | O_CLOEXEC, mode); +#else + ret = open(name, flags, mode); + if (ret >= 0) { + qemu_set_cloexec(ret); + } +#endif + + return ret; +} diff --git a/qga/cutils.h b/qga/cutils.h new file mode 100644 index 0000000000..f0f30a7d28 --- /dev/null +++ b/qga/cutils.h @@ -0,0 +1,8 @@ +#ifndef CUTILS_H_ +#define CUTILS_H_ + +#include "qemu/osdep.h" + +int qga_open_cloexec(const char *name, int flags, mode_t mode); + +#endif /* CUTILS_H_ */ diff --git a/qga/guest-agent-core.h b/qga/guest-agent-core.h index 9d01ea9c82..b4e7c52c61 100644 --- a/qga/guest-agent-core.h +++ b/qga/guest-agent-core.h @@ -24,7 +24,7 @@ typedef struct GACommandState GACommandState; extern GAState *ga_state; extern QmpCommandList ga_commands; -GList *ga_command_blacklist_init(GList *blacklist); +GList *ga_command_init_blockedrpcs(GList *blockedrpcs); void ga_command_state_init(GAState *s, GACommandState *cs); void ga_command_state_add(GACommandState *cs, void (*init)(void), @@ -36,7 +36,7 @@ void ga_command_state_free(GACommandState *cs); bool ga_logging_enabled(GAState *s); void ga_disable_logging(GAState *s); void ga_enable_logging(GAState *s); -void GCC_FMT_ATTR(1, 2) slog(const gchar *fmt, ...); +void G_GNUC_PRINTF(1, 2) slog(const gchar *fmt, ...); void ga_set_response_delimited(GAState *s); bool ga_is_frozen(GAState *s); void ga_set_frozen(GAState *s); diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index 0950e8c6be..3442383627 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -1,63 +1,37 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + NOT VersionNT64 - + + @@ -66,7 +40,7 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + Key="Software\$(var.QEMU_GA_MANUFACTURER)\$(var.QEMU_GA_DISTRO)\Tools\QemuGA"> - + diff --git a/qga/main.c b/qga/main.c index 15fd3a4149..b3580508fa 100644 --- a/qga/main.c +++ b/qga/main.c @@ -18,7 +18,7 @@ #include #include #endif -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qapi/qmp/json-parser.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qjson.h" @@ -27,7 +27,6 @@ #include "qapi/qmp/qerror.h" #include "qapi/error.h" #include "channel.h" -#include "qemu/bswap.h" #include "qemu/cutils.h" #include "qemu/help_option.h" #include "qemu/sockets.h" @@ -38,17 +37,16 @@ #include "qga/service-win32.h" #include "qga/vss-win32.h" #endif -#ifdef __linux__ -#include -#ifdef FIFREEZE -#define CONFIG_FSFREEZE -#endif -#endif +#include "commands-common.h" #ifndef _WIN32 +#ifdef __FreeBSD__ +#define QGA_VIRTIO_PATH_DEFAULT "/dev/vtcon/org.qemu.guest_agent.0" +#else /* __FreeBSD__ */ #define QGA_VIRTIO_PATH_DEFAULT "/dev/virtio-ports/org.qemu.guest_agent.0" -#define QGA_STATE_RELATIVE_DIR "run" +#endif /* __FreeBSD__ */ #define QGA_SERIAL_PATH_DEFAULT "/dev/ttyS0" +#define QGA_STATE_RELATIVE_DIR "run" #else #define QGA_VIRTIO_PATH_DEFAULT "\\\\.\\Global\\org.qemu.guest_agent.0" #define QGA_STATE_RELATIVE_DIR "qemu-ga" @@ -88,7 +86,7 @@ struct GAState { #endif bool delimit_response; bool frozen; - GList *blacklist; + GList *blockedrpcs; char *state_filepath_isfrozen; struct { const char *log_filepath; @@ -108,7 +106,7 @@ struct GAState *ga_state; QmpCommandList ga_commands; /* commands that are safe to issue while filesystems are frozen */ -static const char *ga_freeze_whitelist[] = { +static const char *ga_freeze_allowlist[] = { "guest-ping", "guest-info", "guest-sync", @@ -130,12 +128,12 @@ static void stop_agent(GAState *s, bool requested); static void init_dfl_pathnames(void) { + g_autofree char *state = qemu_get_local_state_dir(); + g_assert(dfl_pathnames.state_dir == NULL); g_assert(dfl_pathnames.pidfile == NULL); - dfl_pathnames.state_dir = qemu_get_local_state_pathname( - QGA_STATE_RELATIVE_DIR); - dfl_pathnames.pidfile = qemu_get_local_state_pathname( - QGA_STATE_RELATIVE_DIR G_DIR_SEPARATOR_S "qemu-ga.pid"); + dfl_pathnames.state_dir = g_build_filename(state, QGA_STATE_RELATIVE_DIR, NULL); + dfl_pathnames.pidfile = g_build_filename(state, QGA_STATE_RELATIVE_DIR, "qemu-ga.pid", NULL); } static void quit_handler(int sig) @@ -224,6 +222,10 @@ void reopen_fd_to_null(int fd) static void usage(const char *cmd) { +#ifdef CONFIG_FSFREEZE + g_autofree char *fsfreeze_hook = get_relocated_path(QGA_FSFREEZE_HOOK_DEFAULT); +#endif + printf( "Usage: %s [-m -p ] []\n" "QEMU Guest Agent " QEMU_FULL_VERSION "\n" @@ -257,8 +259,8 @@ QEMU_COPYRIGHT "\n" #ifdef _WIN32 " -s, --service service commands: install, uninstall, vss-install, vss-uninstall\n" #endif -" -b, --blacklist comma-separated list of RPCs to disable (no spaces, \"?\"\n" -" to list available RPCs)\n" +" -b, --block-rpcs comma-separated list of RPCs to disable (no spaces,\n" +" use \"help\" to list available RPCs)\n" " -D, --dump-conf dump a qemu-ga config file based on current config\n" " options / command-line parameters to stdout\n" " -r, --retry-path attempt re-opening path if it's unavailable or closed\n" @@ -271,7 +273,7 @@ QEMU_HELP_BOTTOM "\n" , cmd, QGA_VIRTIO_PATH_DEFAULT, QGA_SERIAL_PATH_DEFAULT, dfl_pathnames.pidfile, #ifdef CONFIG_FSFREEZE - QGA_FSFREEZE_HOOK_DEFAULT, + fsfreeze_hook, #endif dfl_pathnames.state_dir); } @@ -315,7 +317,6 @@ static void ga_log(const gchar *domain, GLogLevelFlags level, const gchar *msg, gpointer opaque) { GAState *s = opaque; - GTimeVal time; const char *level_str = ga_log_level_str(level); if (!ga_logging_enabled(s)) { @@ -330,9 +331,9 @@ static void ga_log(const gchar *domain, GLogLevelFlags level, #else if (level & s->log_level) { #endif - g_get_current_time(&time); - fprintf(s->log_file, - "%lu.%lu: %s: %s\n", time.tv_sec, time.tv_usec, level_str, msg); + g_autoptr(GDateTime) now = g_date_time_new_now_utc(); + g_autofree char *nowstr = g_date_time_format(now, "%s.%f"); + fprintf(s->log_file, "%s: %s: %s\n", nowstr, level_str, msg); fflush(s->log_file); } } @@ -361,31 +362,31 @@ static gint ga_strcmp(gconstpointer str1, gconstpointer str2) } /* disable commands that aren't safe for fsfreeze */ -static void ga_disable_non_whitelisted(const QmpCommand *cmd, void *opaque) +static void ga_disable_not_allowed(const QmpCommand *cmd, void *opaque) { - bool whitelisted = false; + bool allowed = false; int i = 0; const char *name = qmp_command_name(cmd); - while (ga_freeze_whitelist[i] != NULL) { - if (strcmp(name, ga_freeze_whitelist[i]) == 0) { - whitelisted = true; + while (ga_freeze_allowlist[i] != NULL) { + if (strcmp(name, ga_freeze_allowlist[i]) == 0) { + allowed = true; } i++; } - if (!whitelisted) { + if (!allowed) { g_debug("disabling command: %s", name); qmp_disable_command(&ga_commands, name, "the agent is in frozen state"); } } -/* [re-]enable all commands, except those explicitly blacklisted by user */ -static void ga_enable_non_blacklisted(const QmpCommand *cmd, void *opaque) +/* [re-]enable all commands, except those explicitly blocked by user */ +static void ga_enable_non_blocked(const QmpCommand *cmd, void *opaque) { - GList *blacklist = opaque; + GList *blockedrpcs = opaque; const char *name = qmp_command_name(cmd); - if (g_list_find_custom(blacklist, name, ga_strcmp) == NULL && + if (g_list_find_custom(blockedrpcs, name, ga_strcmp) == NULL && !qmp_command_is_enabled(cmd)) { g_debug("enabling command: %s", name); qmp_enable_command(&ga_commands, name); @@ -424,8 +425,8 @@ void ga_set_frozen(GAState *s) if (ga_is_frozen(s)) { return; } - /* disable all non-whitelisted (for frozen state) commands */ - qmp_for_each_command(&ga_commands, ga_disable_non_whitelisted, NULL); + /* disable all forbidden (for frozen state) commands */ + qmp_for_each_command(&ga_commands, ga_disable_not_allowed, NULL); g_warning("disabling logging due to filesystem freeze"); ga_disable_logging(s); s->frozen = true; @@ -463,8 +464,8 @@ void ga_unset_frozen(GAState *s) s->deferred_options.pid_filepath = NULL; } - /* enable all disabled, non-blacklisted commands */ - qmp_for_each_command(&ga_commands, ga_enable_non_blacklisted, s->blacklist); + /* enable all disabled, non-blocked commands */ + qmp_for_each_command(&ga_commands, ga_enable_non_blocked, s->blockedrpcs); s->frozen = false; if (!ga_delete_file(s->state_filepath_isfrozen)) { g_warning("unable to delete %s, fsfreeze may not function properly", @@ -610,7 +611,7 @@ static gboolean channel_event_cb(GIOCondition condition, gpointer data) * host-side chardev. sleep a bit to mitigate this */ if (s->virtio) { - usleep(100 * 1000); + g_usleep(G_USEC_PER_SEC / 10); } return true; default: @@ -894,7 +895,8 @@ int64_t ga_get_fd_handle(GAState *s, Error **errp) int64_t handle; g_assert(s->pstate_filepath); - /* we blacklist commands and avoid operations that potentially require + /* + * We block commands and avoid operations that potentially require * writing to disk when we're in a frozen state. this includes opening * new files, so we should never get here in that situation */ @@ -948,8 +950,8 @@ struct GAConfig { #ifdef _WIN32 const char *service; #endif - gchar *bliststr; /* blacklist may point to this string */ - GList *blacklist; + gchar *bliststr; /* blockedrpcs may point to this string */ + GList *blockedrpcs; int daemonize; GLogLevelFlags log_level; int dumpconf; @@ -961,6 +963,7 @@ static void config_load(GAConfig *config) GError *gerr = NULL; GKeyFile *keyfile; g_autofree char *conf = g_strdup(g_getenv("QGA_CONF")) ?: get_relocated_path(QGA_CONF_DEFAULT); + const gchar *blockrpcs_key = "block-rpcs"; /* read system config */ keyfile = g_key_file_new(); @@ -1007,10 +1010,16 @@ static void config_load(GAConfig *config) config->retry_path = g_key_file_get_boolean(keyfile, "general", "retry-path", &gerr); } + if (g_key_file_has_key(keyfile, "general", "blacklist", NULL)) { + g_warning("config using deprecated 'blacklist' key, should be replaced" + " with the 'block-rpcs' key."); + blockrpcs_key = "blacklist"; + } + if (g_key_file_has_key(keyfile, "general", blockrpcs_key, NULL)) { config->bliststr = - g_key_file_get_string(keyfile, "general", "blacklist", &gerr); - config->blacklist = g_list_concat(config->blacklist, + g_key_file_get_string(keyfile, "general", blockrpcs_key, &gerr); + config->blockedrpcs = g_list_concat(config->blockedrpcs, split_list(config->bliststr, ",")); } @@ -1070,8 +1079,8 @@ static void config_dump(GAConfig *config) config->log_level == G_LOG_LEVEL_MASK); g_key_file_set_boolean(keyfile, "general", "retry-path", config->retry_path); - tmp = list_join(config->blacklist, ','); - g_key_file_set_string(keyfile, "general", "blacklist", tmp); + tmp = list_join(config->blockedrpcs, ','); + g_key_file_set_string(keyfile, "general", "block-rpcs", tmp); g_free(tmp); tmp = g_key_file_to_data(keyfile, NULL, &error); @@ -1103,7 +1112,8 @@ static void config_parse(GAConfig *config, int argc, char **argv) { "method", 1, NULL, 'm' }, { "path", 1, NULL, 'p' }, { "daemonize", 0, NULL, 'd' }, - { "blacklist", 1, NULL, 'b' }, + { "block-rpcs", 1, NULL, 'b' }, + { "blacklist", 1, NULL, 'b' }, /* deprecated alias for 'block-rpcs' */ #ifdef _WIN32 { "service", 1, NULL, 's' }, #endif @@ -1161,8 +1171,8 @@ static void config_parse(GAConfig *config, int argc, char **argv) qmp_for_each_command(&ga_commands, ga_print_cmd, NULL); exit(EXIT_SUCCESS); } - config->blacklist = g_list_concat(config->blacklist, - split_list(optarg, ",")); + config->blockedrpcs = g_list_concat(config->blockedrpcs, + split_list(optarg, ",")); break; } #ifdef _WIN32 @@ -1216,7 +1226,7 @@ static void config_free(GAConfig *config) #ifdef CONFIG_FSFREEZE g_free(config->fsfreeze_hook); #endif - g_list_free_full(config->blacklist, g_free); + g_list_free_full(config->blockedrpcs, g_free); g_free(config); } @@ -1273,6 +1283,8 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) g_log_set_fatal_mask(NULL, G_LOG_LEVEL_ERROR); ga_enable_logging(s); + g_debug("Guest agent version %s started", QEMU_FULL_VERSION); + #ifdef _WIN32 /* On win32 the state directory is application specific (be it the default * or a user override). We got past the command line parsing; let's create @@ -1298,7 +1310,7 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) s->deferred_options.log_filepath = config->log_filepath; } ga_disable_logging(s); - qmp_for_each_command(&ga_commands, ga_disable_non_whitelisted, NULL); + qmp_for_each_command(&ga_commands, ga_disable_not_allowed, NULL); } else { if (config->daemonize) { become_daemon(config->pid_filepath); @@ -1322,10 +1334,10 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) return NULL; } - config->blacklist = ga_command_blacklist_init(config->blacklist); - if (config->blacklist) { - GList *l = config->blacklist; - s->blacklist = config->blacklist; + config->blockedrpcs = ga_command_init_blockedrpcs(config->blockedrpcs); + if (config->blockedrpcs) { + GList *l = config->blockedrpcs; + s->blockedrpcs = config->blockedrpcs; do { g_debug("disabling command: %s", (char *)l->data); qmp_disable_command(&ga_commands, l->data, NULL); diff --git a/qga/meson.build b/qga/meson.build index cfb1fbc085..3cfb9166e5 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -1,3 +1,33 @@ +if not have_ga + if get_option('guest_agent_msi').enabled() + error('Guest agent MSI requested, but the guest agent is not being built') + endif + have_qga_vss = false + subdir_done() +endif + +have_qga_vss = get_option('qga_vss') \ + .require(targetos == 'windows', + error_message: 'VSS support requires Windows') \ + .require(link_language == 'cpp', + error_message: 'VSS support requires a C++ compiler') \ + .require(have_vss, error_message: '''VSS support requires VSS headers. + If your Visual Studio installation doesn't have the VSS headers, + Please download and install Microsoft VSS SDK: + http://www.microsoft.com/en-us/download/details.aspx?id=23490 + On POSIX-systems, MinGW should provide headers in >=10.0 releases. + you can extract the SDK headers by: + $ scripts/extract-vsssdk-headers setup.exe + The headers are extracted in the directory 'inc/win2003'. + Then run configure with: --extra-cxxflags="-isystem /path/to/vss/inc/win2003"''') \ + .require(midl.found() or widl.found(), + error_message: 'VSS support requires midl or widl') \ + .require(not enable_static, + error_message: 'VSS support requires dynamic linking with GLib') \ + .allowed() + +all_qga = [] + qga_qapi_outputs = [ 'qga-qapi-commands.c', 'qga-qapi-commands.h', @@ -15,10 +45,18 @@ qga_qapi_outputs = [ 'qga-qapi-visit.h', ] +# Problem: to generate trace events, we'd have to add the .trace-events +# file to qapi_trace_events like we do in qapi/meson.build. Since +# qapi_trace_events is used by trace/meson.build, we'd have to move +# subdir('qga') above subdir('trace') in the top-level meson.build. +# Can't, because it would break the dependency of qga on qemuutil (which +# depends on trace_ss). Not worth solving now; simply suppress trace +# event generation instead. qga_qapi_files = custom_target('QGA QAPI files', output: qga_qapi_outputs, input: 'qapi-schema.json', - command: [ qapi_gen, '-o', 'qga', '-p', 'qga-', '@INPUT0@' ], + command: [ qapi_gen, '-o', 'qga', '-p', 'qga-', '@INPUT0@', + '--suppress-tracing' ], depend_files: qapi_gen_depends) qga_ss = ss.source_set() @@ -27,12 +65,19 @@ qga_ss.add(files( 'commands.c', 'guest-agent-command-state.c', 'main.c', + 'cutils.c', )) qga_ss.add(when: 'CONFIG_POSIX', if_true: files( 'channel-posix.c', 'commands-posix.c', 'commands-posix-ssh.c', )) +qga_ss.add(when: 'CONFIG_LINUX', if_true: files( + 'commands-linux.c', +)) +qga_ss.add(when: 'CONFIG_BSD', if_true: files( + 'commands-bsd.c', +)) qga_ss.add(when: 'CONFIG_WIN32', if_true: files( 'channel-win32.c', 'commands-win32.c', @@ -42,19 +87,24 @@ qga_ss.add(when: 'CONFIG_WIN32', if_true: files( qga_ss = qga_ss.apply(config_host, strict: false) +gen_tlb = [] +qga_libs = [] +if targetos == 'windows' + qga_libs += ['-lws2_32', '-lwinmm', '-lpowrprof', '-lwtsapi32', '-lwininet', '-liphlpapi', '-lnetapi32', + '-lsetupapi', '-lcfgmgr32'] + if have_qga_vss + qga_libs += ['-lole32', '-loleaut32', '-lshlwapi', '-lstdc++', '-Wl,--enable-stdcall-fixup'] + subdir('vss-win32') + endif +endif + qga = executable('qemu-ga', qga_ss.sources(), - link_args: config_host['LIBS_QGA'].split(), + link_args: qga_libs, dependencies: [qemuutil, libudev], install: true) -all_qga = [qga] +all_qga += qga if targetos == 'windows' - if 'CONFIG_QGA_VSS' in config_host - subdir('vss-win32') - else - gen_tlb = [] - endif - qemu_ga_msi_arch = { 'x86': ['-D', 'Arch=32'], 'x86_64': ['-a', 'x64', '-D', 'Arch=64'] @@ -69,24 +119,23 @@ if targetos == 'windows' if wixl.found() deps = [gen_tlb, qga] qemu_ga_msi_vss = [] - if 'CONFIG_QGA_VSS' in config_host + if have_qga_vss qemu_ga_msi_vss = ['-D', 'InstallVss'] deps += qga_vss endif qga_msi = custom_target('QGA MSI', input: files('installer/qemu-ga.wxs'), - output: 'qemu-ga-@0@.msi'.format(config_host['ARCH']), + output: 'qemu-ga-@0@.msi'.format(host_arch), depends: deps, command: [ - find_program('env'), - 'QEMU_GA_VERSION=' + config_host['QEMU_GA_VERSION'], - 'QEMU_GA_MANUFACTURER=' + config_host['QEMU_GA_MANUFACTURER'], - 'QEMU_GA_DISTRO=' + config_host['QEMU_GA_DISTRO'], - 'BUILD_DIR=' + meson.build_root(), wixl, '-o', '@OUTPUT0@', '@INPUT0@', qemu_ga_msi_arch[cpu], qemu_ga_msi_vss, - '-D', 'Mingw_dlls=' + config_host['QEMU_GA_MSI_MINGW_DLL_PATH'], + '-D', 'BUILD_DIR=' + meson.project_build_root(), + '-D', 'BIN_DIR=' + glib.get_variable('bindir'), + '-D', 'QEMU_GA_VERSION=' + config_host['QEMU_GA_VERSION'], + '-D', 'QEMU_GA_MANUFACTURER=' + config_host['QEMU_GA_MANUFACTURER'], + '-D', 'QEMU_GA_DISTRO=' + config_host['QEMU_GA_DISTRO'], ]) all_qga += [qga_msi] alias_target('msi', qga_msi) @@ -95,7 +144,7 @@ else if get_option('guest_agent_msi').enabled() error('MSI guest agent package is available only for MinGW Windows cross-compilation') endif - install_subdir('run', install_dir: get_option('localstatedir')) + install_emptydir(get_option('localstatedir') / 'run') endif alias_target('qemu-ga', all_qga) diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index fb17eebde3..796434ed34 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -16,8 +16,8 @@ { 'pragma': { 'doc-required': true } } -# Whitelists to permit QAPI rule violations; think twice before you -# add to them! +# Lists with items allowed to permit QAPI rule violations; think twice +# before you add to them! { 'pragma': { # Types whose member names may use '_' 'member-name-exceptions': [ @@ -392,7 +392,7 @@ ## # @guest-file-flush: # -# Write file changes bufferred in userspace to disk/kernel buffers +# Write file changes buffered in userspace to disk/kernel buffers # # @handle: filehandle returned by guest-file-open # @@ -827,13 +827,14 @@ # @mmc: Win multimedia card (MMC) bus type # @virtual: Win virtual bus type # @file-backed-virtual: Win file-backed bus type +# @nvme: NVMe disks (since 7.1) # # Since: 2.2; 'Unknown' and all entries below since 2.4 ## { 'enum': 'GuestDiskBusType', 'data': [ 'ide', 'fdc', 'scsi', 'virtio', 'xen', 'usb', 'uml', 'sata', 'sd', 'unknown', 'ieee1394', 'ssa', 'fibre', 'raid', 'iscsi', - 'sas', 'mmc', 'virtual', 'file-backed-virtual' ] } + 'sas', 'mmc', 'virtual', 'file-backed-virtual', 'nvme' ] } ## @@ -887,6 +888,55 @@ '*serial': 'str', '*dev': 'str', '*ccw-address': 'GuestCCWAddress'} } +## +# @GuestNVMeSmart: +# +# NVMe smart informations, based on NVMe specification, +# section +# +# Since: 7.1 +## +{ 'struct': 'GuestNVMeSmart', + 'data': {'critical-warning': 'int', + 'temperature': 'int', + 'available-spare': 'int', + 'available-spare-threshold': 'int', + 'percentage-used': 'int', + 'data-units-read-lo': 'uint64', + 'data-units-read-hi': 'uint64', + 'data-units-written-lo': 'uint64', + 'data-units-written-hi': 'uint64', + 'host-read-commands-lo': 'uint64', + 'host-read-commands-hi': 'uint64', + 'host-write-commands-lo': 'uint64', + 'host-write-commands-hi': 'uint64', + 'controller-busy-time-lo': 'uint64', + 'controller-busy-time-hi': 'uint64', + 'power-cycles-lo': 'uint64', + 'power-cycles-hi': 'uint64', + 'power-on-hours-lo': 'uint64', + 'power-on-hours-hi': 'uint64', + 'unsafe-shutdowns-lo': 'uint64', + 'unsafe-shutdowns-hi': 'uint64', + 'media-errors-lo': 'uint64', + 'media-errors-hi': 'uint64', + 'number-of-error-log-entries-lo': 'uint64', + 'number-of-error-log-entries-hi': 'uint64' } } + +## +# @GuestDiskSmart: +# +# Disk type related smart information. +# +# - @nvme: NVMe disk smart +# +# Since: 7.1 +## +{ 'union': 'GuestDiskSmart', + 'base': { 'type': 'GuestDiskBusType' }, + 'discriminator': 'type', + 'data': { 'nvme': 'GuestNVMeSmart' } } + ## # @GuestDiskInfo: # @@ -898,12 +948,14 @@ # @address: disk address information (only for non-virtual devices) # @alias: optional alias assigned to the disk, on Linux this is a name assigned # by device mapper +# @smart: disk smart information (Since 7.1) # -# Since 5.2 +# Since: 5.2 ## { 'struct': 'GuestDiskInfo', 'data': {'name': 'str', 'partition': 'bool', '*dependencies': ['str'], - '*address': 'GuestDiskAddress', '*alias': 'str'} } + '*address': 'GuestDiskAddress', '*alias': 'str', + '*smart': 'GuestDiskSmart'} } ## # @guest-get-disks: @@ -1140,6 +1192,7 @@ ## # @GuestExec: +# # @pid: pid of child process in guest OS # # Since: 2.5 @@ -1171,6 +1224,7 @@ ## # @GuestHostName: +# # @host-name: Fully qualified domain name of the guest OS # # Since: 2.10 @@ -1197,6 +1251,7 @@ ## # @GuestUser: +# # @user: Username # @domain: Logon domain (windows only) # @login-time: Time of login of this user on the computer. If multiple @@ -1380,7 +1435,7 @@ 'data': { 'keys': ['str'] }, - 'if': 'defined(CONFIG_POSIX)' } + 'if': 'CONFIG_POSIX' } ## @@ -1398,7 +1453,7 @@ { 'command': 'guest-ssh-get-authorized-keys', 'data': { 'username': 'str' }, 'returns': 'GuestAuthorizedKeys', - 'if': 'defined(CONFIG_POSIX)' } + 'if': 'CONFIG_POSIX' } ## # @guest-ssh-add-authorized-keys: @@ -1416,7 +1471,7 @@ ## { 'command': 'guest-ssh-add-authorized-keys', 'data': { 'username': 'str', 'keys': ['str'], '*reset': 'bool' }, - 'if': 'defined(CONFIG_POSIX)' } + 'if': 'CONFIG_POSIX' } ## # @guest-ssh-remove-authorized-keys: @@ -1434,4 +1489,171 @@ ## { 'command': 'guest-ssh-remove-authorized-keys', 'data': { 'username': 'str', 'keys': ['str'] }, - 'if': 'defined(CONFIG_POSIX)' } + 'if': 'CONFIG_POSIX' } + +## +# @GuestDiskStats: +# +# @read-sectors: sectors read +# +# @read-ios: reads completed successfully +# +# @read-merges: read requests merged +# +# @write-sectors: sectors written +# +# @write-ios: writes completed +# +# @write-merges: write requests merged +# +# @discard-sectors: sectors discarded +# +# @discard-ios: discards completed successfully +# +# @discard-merges: discard requests merged +# +# @flush-ios: flush requests completed successfully +# +# @read-ticks: time spent reading(ms) +# +# @write-ticks: time spent writing(ms) +# +# @discard-ticks: time spent discarding(ms) +# +# @flush-ticks: time spent flushing(ms) +# +# @ios-pgr: number of I/Os currently in flight +# +# @total-ticks: time spent doing I/Os (ms) +# +# @weight-ticks: weighted time spent doing I/Os since the last update of this field(ms) +# +# Since: 7.1 +## +{ 'struct': 'GuestDiskStats', + 'data': {'*read-sectors': 'uint64', + '*read-ios': 'uint64', + '*read-merges': 'uint64', + '*write-sectors': 'uint64', + '*write-ios': 'uint64', + '*write-merges': 'uint64', + '*discard-sectors': 'uint64', + '*discard-ios': 'uint64', + '*discard-merges': 'uint64', + '*flush-ios': 'uint64', + '*read-ticks': 'uint64', + '*write-ticks': 'uint64', + '*discard-ticks': 'uint64', + '*flush-ticks': 'uint64', + '*ios-pgr': 'uint64', + '*total-ticks': 'uint64', + '*weight-ticks': 'uint64' + } } + +## +# @GuestDiskStatsInfo: +# +# @name disk name +# +# @major major device number of disk +# +# @minor minor device number of disk +## +{ 'struct': 'GuestDiskStatsInfo', + 'data': {'name': 'str', + 'major': 'uint64', + 'minor': 'uint64', + 'stats': 'GuestDiskStats' } } + +## +# @guest-get-diskstats: +# +# Retrieve information about disk stats. +# Returns: List of disk stats of guest. +# +# Since: 7.1 +## +{ 'command': 'guest-get-diskstats', + 'returns': ['GuestDiskStatsInfo'] +} + +## +# @GuestCpuStatsType: +# +# An enumeration of OS type +# +# Since: 7.1 +## +{ 'enum': 'GuestCpuStatsType', + 'data': [ 'linux' ] } + + +## +# @GuestLinuxCpuStats: +# +# CPU statistics of Linux +# +# @cpu: CPU index in guest OS +# +# @user: Time spent in user mode +# +# @nice: Time spent in user mode with low priority (nice) +# +# @system: Time spent in system mode +# +# @idle: Time spent in the idle task +# +# @iowait: Time waiting for I/O to complete (since Linux 2.5.41) +# +# @irq: Time servicing interrupts (since Linux 2.6.0-test4) +# +# @softirq: Time servicing softirqs (since Linux 2.6.0-test4) +# +# @steal: Stolen time by host (since Linux 2.6.11) +# +# @guest: ime spent running a virtual CPU for guest operating systems under +# the control of the Linux kernel (since Linux 2.6.24) +# +# @guestnice: Time spent running a niced guest (since Linux 2.6.33) +# +# Since: 7.1 +## +{ 'struct': 'GuestLinuxCpuStats', + 'data': {'cpu': 'int', + 'user': 'uint64', + 'nice': 'uint64', + 'system': 'uint64', + 'idle': 'uint64', + '*iowait': 'uint64', + '*irq': 'uint64', + '*softirq': 'uint64', + '*steal': 'uint64', + '*guest': 'uint64', + '*guestnice': 'uint64' + } } + +## +# @GuestCpuStats: +# +# Get statistics of each CPU in millisecond. +# +# - @linux: Linux style CPU statistics +# +# Since: 7.1 +## +{ 'union': 'GuestCpuStats', + 'base': { 'type': 'GuestCpuStatsType' }, + 'discriminator': 'type', + 'data': { 'linux': 'GuestLinuxCpuStats' } } + +## +# @guest-get-cpustats: +# +# Retrieve information about CPU stats. +# Returns: List of CPU stats of guest. +# +# Since: 7.1 +## +{ 'command': 'guest-get-cpustats', + 'returns': ['GuestCpuStats'] +} diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index 40de133774..b8087e5baa 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -13,7 +13,11 @@ #include "qemu/osdep.h" #include "vss-common.h" -#include +#ifdef HAVE_VSS_SDK +#include +#else +#include +#endif #include "install.h" #include #include @@ -42,7 +46,8 @@ void errmsg(DWORD err, const char *text) * If text doesn't contains '(', negative precision is given, which is * treated as though it were missing. */ - char *msg = NULL, *nul = strchr(text, '('); + char *msg = NULL; + const char *nul = strchr(text, '('); int len = nul ? nul - text : -1; FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | @@ -513,7 +518,7 @@ namespace _com_util /* Stop QGA VSS provider service using Winsvc API */ STDAPI StopService(void) { - HRESULT hr; + HRESULT hr = S_OK; SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); SC_HANDLE service = NULL; diff --git a/qga/vss-win32/meson.build b/qga/vss-win32/meson.build index 90825edef3..9483ccd3b8 100644 --- a/qga/vss-win32/meson.build +++ b/qga/vss-win32/meson.build @@ -1,36 +1,38 @@ -if add_languages('cpp', required: false) - glib_dynamic = dependency('glib-2.0', static: false) - link_args = cc.get_supported_link_arguments(['-fstack-protector-all', '-fstack-protector-strong', - '-Wl,--add-stdcall-alias', '-Wl,--enable-stdcall-fixup']) +link_args = cc.get_supported_link_arguments([ + '-fstack-protector-all', + '-fstack-protector-strong', + '-Wl,--add-stdcall-alias', + '-Wl,--enable-stdcall-fixup' +]) - qga_vss = shared_module('qga-vss', ['requester.cpp', 'provider.cpp', 'install.cpp'], - name_prefix: '', - cpp_args: ['-Wno-unknown-pragmas', '-Wno-delete-non-virtual-dtor', '-Wno-non-virtual-dtor'], - link_args: link_args, - vs_module_defs: 'qga-vss.def', - dependencies: [glib_dynamic, socket, - cc.find_library('ole32'), - cc.find_library('oleaut32'), - cc.find_library('shlwapi'), - cc.find_library('uuid'), - cc.find_library('intl')]) +qga_vss = shared_module( + 'qga-vss', + ['requester.cpp', 'provider.cpp', 'install.cpp', genh], + name_prefix: '', + cpp_args: ['-Wno-unknown-pragmas', '-Wno-delete-non-virtual-dtor', '-Wno-non-virtual-dtor'], + link_args: link_args, + vs_module_defs: 'qga-vss.def', + dependencies: [ + glib, + socket, + cc.find_library('ole32'), + cc.find_library('oleaut32'), + cc.find_library('shlwapi'), + cc.find_library('uuid'), + cc.find_library('intl') + ] +) - all_qga += qga_vss -endif - -# rules to build qga-vss.tlb -# Currently, only native build is supported because building .tlb -# (TypeLibrary) from .idl requires WindowsSDK and MIDL (and cl.exe in VC++). -midl = find_program('midl', required: false) if midl.found() gen_tlb = custom_target('gen-tlb', input: 'qga-vss.idl', output: 'qga-vss.tlb', - command: [midl, '-tlb', '-I' + config_host['WIN_SDK'], - '@INPUT@', '@OUTPUT@']) + command: [midl, '@INPUT@', '/tlb', '@OUTPUT@']) else gen_tlb = custom_target('gen-tlb', - input: 'qga-vss.tlb', + input: 'qga-vss.idl', output: 'qga-vss.tlb', - command: ['cp', '@INPUT@', '@OUTPUT@']) + command: [widl, '-t', '@INPUT@', '-o', '@OUTPUT@']) endif + +all_qga += [ qga_vss, gen_tlb ] diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp index 72d8b0e19d..1b885e24ee 100644 --- a/qga/vss-win32/provider.cpp +++ b/qga/vss-win32/provider.cpp @@ -12,8 +12,12 @@ #include "qemu/osdep.h" #include "vss-common.h" -#include -#include +#ifdef HAVE_VSS_SDK +#include +#else +#include +#endif +#include #define VSS_TIMEOUT_MSEC (60*1000) diff --git a/qga/vss-win32/qga-vss.tlb b/qga/vss-win32/qga-vss.tlb deleted file mode 100644 index 226452a186..0000000000 Binary files a/qga/vss-win32/qga-vss.tlb and /dev/null differ diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 940a2c8f55..b371affeab 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -14,8 +14,8 @@ #include "vss-common.h" #include "requester.h" #include "install.h" -#include -#include +#include +#include /* Max wait time for frozen event (VSS can only hold writes for 10 seconds) */ #define VSS_TIMEOUT_FREEZE_MSEC 60000 @@ -354,12 +354,12 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) if (FAILED(hr)) { err_set(errset, hr, "failed to add %S to snapshot set", volume_name_wchar); - delete volume_name_wchar; + delete[] volume_name_wchar; goto out; } num_mount_points++; - delete volume_name_wchar; + delete[] volume_name_wchar; } if (num_mount_points == 0) { diff --git a/qga/vss-win32/requester.h b/qga/vss-win32/requester.h index 5a8e8faf0c..ecc5f2acb4 100644 --- a/qga/vss-win32/requester.h +++ b/qga/vss-win32/requester.h @@ -25,7 +25,7 @@ struct Error; typedef void (*ErrorSetFunc)(struct Error **errp, const char *src, int line, const char *func, int win32_err, const char *fmt, ...) - GCC_FMT_ATTR(6, 7); + G_GNUC_PRINTF(6, 7); typedef struct ErrorSet { ErrorSetFunc error_setg_win32_wrapper; struct Error **errp; /* restriction: must not be null */ diff --git a/qga/vss-win32/vss-common.h b/qga/vss-win32/vss-common.h index 61c170b52e..0e67e7822c 100644 --- a/qga/vss-win32/vss-common.h +++ b/qga/vss-win32/vss-common.h @@ -46,11 +46,7 @@ #undef VSS_E_MAXIMUM_NUMBER_OF_VOLUMES_REACHED #undef VSS_E_MAXIMUM_NUMBER_OF_SNAPSHOTS_REACHED -/* - * VSS headers must be installed from Microsoft VSS SDK 7.2 available at: - * http://www.microsoft.com/en-us/download/details.aspx?id=23490 - */ -#include +#include #include "vss-handles.h" /* Macros to convert char definitions to wchar */ @@ -68,12 +64,13 @@ const CLSID CLSID_QGAVSSProvider = { 0x6e6a3492, 0x8d4d, 0x440c, const TCHAR g_szClsid[] = TEXT("{6E6A3492-8D4D-440C-9619-5E5D0CC31CA8}"); const TCHAR g_szProgid[] = TEXT("QGAVSSProvider"); +#ifdef HAVE_VSS_SDK /* Enums undefined in VSS SDK 7.2 but defined in newer Windows SDK */ enum __VSS_VOLUME_SNAPSHOT_ATTRIBUTES { VSS_VOLSNAP_ATTR_NO_AUTORECOVERY = 0x00000002, VSS_VOLSNAP_ATTR_TXF_RECOVERY = 0x02000000 }; - +#endif /* COM pointer utility; call ->Release() when it goes out of scope */ template diff --git a/qobject/block-qdict.c b/qobject/block-qdict.c index 1487cc5dd8..4a83bda2c3 100644 --- a/qobject/block-qdict.c +++ b/qobject/block-qdict.c @@ -251,12 +251,12 @@ void qdict_array_split(QDict *src, QList **dst) if (is_subqdict) { qdict_extract_subqdict(src, &subqdict, prefix); assert(qdict_size(subqdict) > 0); + qlist_append_obj(*dst, QOBJECT(subqdict)); } else { qobject_ref(subqobj); qdict_del(src, indexstr); + qlist_append_obj(*dst, subqobj); } - - qlist_append_obj(*dst, subqobj ?: QOBJECT(subqdict)); } } diff --git a/qobject/json-parser.c b/qobject/json-parser.c index 008b326fb8..d498db6e70 100644 --- a/qobject/json-parser.c +++ b/qobject/json-parser.c @@ -54,7 +54,7 @@ static QObject *parse_value(JSONParserContext *ctxt); /** * Error handler */ -static void GCC_FMT_ATTR(3, 4) parse_error(JSONParserContext *ctxt, +static void G_GNUC_PRINTF(3, 4) parse_error(JSONParserContext *ctxt, JSONToken *token, const char *msg, ...) { va_list ap; diff --git a/qobject/qbool.c b/qobject/qbool.c index 16a600abb9..c7049c0c50 100644 --- a/qobject/qbool.c +++ b/qobject/qbool.c @@ -56,3 +56,8 @@ void qbool_destroy_obj(QObject *obj) assert(obj != NULL); g_free(qobject_to(QBool, obj)); } + +void qbool_unref(QBool *q) +{ + qobject_unref(q); +} diff --git a/qobject/qdict.c b/qobject/qdict.c index 0216ca7ac1..8faff230d3 100644 --- a/qobject/qdict.c +++ b/qobject/qdict.c @@ -442,3 +442,8 @@ void qdict_destroy_obj(QObject *obj) g_free(qdict); } + +void qdict_unref(QDict *q) +{ + qobject_unref(q); +} diff --git a/qobject/qlist.c b/qobject/qlist.c index 60562a1f52..356ad946b0 100644 --- a/qobject/qlist.c +++ b/qobject/qlist.c @@ -182,3 +182,8 @@ void qlist_destroy_obj(QObject *obj) g_free(qlist); } + +void qlist_unref(QList *q) +{ + qobject_unref(q); +} diff --git a/qobject/qnull.c b/qobject/qnull.c index b26b368219..445a5db7f3 100644 --- a/qobject/qnull.c +++ b/qobject/qnull.c @@ -29,3 +29,8 @@ bool qnull_is_equal(const QObject *x, const QObject *y) { return true; } + +void qnull_unref(QNull *q) +{ + qobject_unref(q); +} diff --git a/qobject/qnum.c b/qobject/qnum.c index 5dd66938dd..2bbeaedc7b 100644 --- a/qobject/qnum.c +++ b/qobject/qnum.c @@ -239,3 +239,8 @@ void qnum_destroy_obj(QObject *obj) assert(obj != NULL); g_free(qobject_to(QNum, obj)); } + +void qnum_unref(QNum *q) +{ + qobject_unref(q); +} diff --git a/qobject/qstring.c b/qobject/qstring.c index b4613899b9..794f8c9357 100644 --- a/qobject/qstring.c +++ b/qobject/qstring.c @@ -100,3 +100,8 @@ void qstring_destroy_obj(QObject *obj) g_free((char *)qs->string); g_free(qs); } + +void qstring_unref(QString *q) +{ + qobject_unref(q); +} diff --git a/qom/object.c b/qom/object.c index e86cb05b84..e25f1e96db 100644 --- a/qom/object.c +++ b/qom/object.c @@ -16,6 +16,7 @@ #include "qom/object.h" #include "qom/object_interfaces.h" #include "qemu/cutils.h" +#include "qemu/memalign.h" #include "qapi/visitor.h" #include "qapi/string-input-visitor.h" #include "qapi/string-output-visitor.h" @@ -525,8 +526,13 @@ void object_initialize(void *data, size_t size, const char *typename) #ifdef CONFIG_MODULES if (!type) { - module_load_qom_one(typename); - type = type_get_by_name(typename); + int rv = module_load_qom(typename, &error_fatal); + if (rv > 0) { + type = type_get_by_name(typename); + } else { + error_report("missing object type '%s'", typename); + exit(1); + } } #endif if (!type) { @@ -1032,8 +1038,13 @@ ObjectClass *module_object_class_by_name(const char *typename) oc = object_class_by_name(typename); #ifdef CONFIG_MODULES if (!oc) { - module_load_qom_one(typename); - oc = object_class_by_name(typename); + Error *local_err = NULL; + int rv = module_load_qom(typename, &local_err); + if (rv > 0) { + oc = object_class_by_name(typename); + } else if (rv < 0) { + error_report_err(local_err); + } } #endif return oc; @@ -1167,10 +1178,14 @@ GSList *object_class_get_list_sorted(const char *implements_type, Object *object_ref(void *objptr) { Object *obj = OBJECT(objptr); + uint32_t ref; + if (!obj) { return NULL; } - qatomic_inc(&obj->ref); + ref = qatomic_fetch_inc(&obj->ref); + /* Assert waaay before the integer overflows */ + g_assert(ref < INT_MAX); return obj; } @@ -1378,7 +1393,8 @@ bool object_property_get(Object *obj, const char *name, Visitor *v, } if (!prop->get) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property '%s.%s' is not readable", + object_get_typename(obj), name); return false; } prop->get(obj, v, name, prop->opaque, &err); @@ -1389,7 +1405,7 @@ bool object_property_get(Object *obj, const char *name, Visitor *v, bool object_property_set(Object *obj, const char *name, Visitor *v, Error **errp) { - Error *err = NULL; + ERRP_GUARD(); ObjectProperty *prop = object_property_find_err(obj, name, errp); if (prop == NULL) { @@ -1397,12 +1413,12 @@ bool object_property_set(Object *obj, const char *name, Visitor *v, } if (!prop->set) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property '%s.%s' is not writable", + object_get_typename(obj), name); return false; } - prop->set(obj, v, name, prop->opaque, &err); - error_propagate(errp, err); - return !err; + prop->set(obj, v, name, prop->opaque, errp); + return !*errp; } bool object_property_set_str(Object *obj, const char *name, @@ -2145,6 +2161,17 @@ Object *object_resolve_path(const char *path, bool *ambiguous) return object_resolve_path_type(path, TYPE_OBJECT, ambiguous); } +Object *object_resolve_path_at(Object *parent, const char *path) +{ + g_auto(GStrv) parts = g_strsplit(path, "/", 0); + + if (*path == '/') { + return object_resolve_abs_path(object_get_root(), parts + 1, + TYPE_OBJECT); + } + return object_resolve_abs_path(parent, parts, TYPE_OBJECT); +} + typedef struct StringProperty { char *(*get)(Object *, Error **); @@ -2783,13 +2810,13 @@ static void object_class_init(ObjectClass *klass, void *data) static void register_types(void) { - static TypeInfo interface_info = { + static const TypeInfo interface_info = { .name = TYPE_INTERFACE, .class_size = sizeof(InterfaceClass), .abstract = true, }; - static TypeInfo object_info = { + static const TypeInfo object_info = { .name = TYPE_OBJECT, .instance_size = sizeof(Object), .class_init = object_class_init, diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c index ad9b56b59a..f94b6c3193 100644 --- a/qom/object_interfaces.c +++ b/qom/object_interfaces.c @@ -17,6 +17,7 @@ #include "qemu/qemu-print.h" #include "qapi/opts-visitor.h" #include "qemu/config-file.h" +#include "qemu/keyval.h" bool user_creatable_complete(UserCreatable *uc, Error **errp) { @@ -46,25 +47,18 @@ static void object_set_properties_from_qdict(Object *obj, const QDict *qdict, Visitor *v, Error **errp) { const QDictEntry *e; - Error *local_err = NULL; - if (!visit_start_struct(v, NULL, NULL, 0, &local_err)) { - goto out; + if (!visit_start_struct(v, NULL, NULL, 0, errp)) { + return; } for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { - if (!object_property_set(obj, e->key, v, &local_err)) { - break; + if (!object_property_set(obj, e->key, v, errp)) { + goto out; } } - if (!local_err) { - visit_check_struct(v, &local_err); - } - visit_end_struct(v, NULL); - + visit_check_struct(v, errp); out: - if (local_err) { - error_propagate(errp, local_err); - } + visit_end_struct(v, NULL); } void object_set_properties_from_keyval(Object *obj, const QDict *qdict, diff --git a/qom/qom-qmp-cmds.c b/qom/qom-qmp-cmds.c index 2d6f41ecc7..2e63a4c184 100644 --- a/qom/qom-qmp-cmds.c +++ b/qom/qom-qmp-cmds.c @@ -49,7 +49,7 @@ ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp) object_property_iter_init(&iter, obj); while ((prop = object_property_iter_next(&iter))) { - ObjectPropertyInfo *value = g_malloc0(sizeof(ObjectPropertyInfo)); + ObjectPropertyInfo *value = g_new0(ObjectPropertyInfo, 1); QAPI_LIST_PREPEND(props, value); diff --git a/replay/replay-char.c b/replay/replay-char.c index dc0002367e..a31aded032 100644 --- a/replay/replay-char.c +++ b/replay/replay-char.c @@ -48,9 +48,9 @@ void replay_register_char_driver(Chardev *chr) char_drivers[drivers_count++] = chr; } -void replay_chr_be_write(Chardev *s, uint8_t *buf, int len) +void replay_chr_be_write(Chardev *s, const uint8_t *buf, int len) { - CharEvent *event = g_malloc0(sizeof(CharEvent)); + CharEvent *event = g_new0(CharEvent, 1); event->id = find_char_driver(s); if (event->id < 0) { @@ -85,7 +85,7 @@ void replay_event_char_read_save(void *opaque) void *replay_event_char_read_load(void) { - CharEvent *event = g_malloc0(sizeof(CharEvent)); + CharEvent *event = g_new0(CharEvent, 1); event->id = replay_get_byte(); replay_get_array_alloc(&event->buf, &event->len); diff --git a/replay/replay-events.c b/replay/replay-events.c index 15983dd250..af0721cc1a 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -119,7 +119,7 @@ void replay_add_event(ReplayAsyncEventKind event_kind, return; } - Event *event = g_malloc0(sizeof(Event)); + Event *event = g_new0(Event, 1); event->event_kind = event_kind; event->opaque = opaque; event->opaque2 = opaque2; @@ -170,13 +170,12 @@ void replay_block_event(QEMUBH *bh, uint64_t id) } } -static void replay_save_event(Event *event, int checkpoint) +static void replay_save_event(Event *event) { if (replay_mode != REPLAY_MODE_PLAY) { /* put the event into the file */ - replay_put_event(EVENT_ASYNC); - replay_put_byte(checkpoint); - replay_put_byte(event->event_kind); + g_assert(event->event_kind < REPLAY_ASYNC_COUNT); + replay_put_event(EVENT_ASYNC + event->event_kind); /* save event-specific data */ switch (event->event_kind) { @@ -206,36 +205,25 @@ static void replay_save_event(Event *event, int checkpoint) } /* Called with replay mutex locked */ -void replay_save_events(int checkpoint) +void replay_save_events(void) { g_assert(replay_mutex_locked()); - g_assert(checkpoint != CHECKPOINT_CLOCK_WARP_START); - g_assert(checkpoint != CHECKPOINT_CLOCK_VIRTUAL); while (!QTAILQ_EMPTY(&events_list)) { Event *event = QTAILQ_FIRST(&events_list); - replay_save_event(event, checkpoint); + replay_save_event(event); replay_run_event(event); QTAILQ_REMOVE(&events_list, event, events); g_free(event); } } -static Event *replay_read_event(int checkpoint) +static Event *replay_read_event(void) { Event *event; - if (replay_state.read_event_kind == -1) { - replay_state.read_event_checkpoint = replay_get_byte(); - replay_state.read_event_kind = replay_get_byte(); - replay_state.read_event_id = -1; - replay_check_error(); - } - - if (checkpoint != replay_state.read_event_checkpoint) { - return NULL; - } + ReplayAsyncEventKind event_kind = replay_state.data_kind - EVENT_ASYNC; /* Events that has not to be in the queue */ - switch (replay_state.read_event_kind) { + switch (event_kind) { case REPLAY_ASYNC_EVENT_BH: case REPLAY_ASYNC_EVENT_BH_ONESHOT: if (replay_state.read_event_id == -1) { @@ -243,18 +231,18 @@ static Event *replay_read_event(int checkpoint) } break; case REPLAY_ASYNC_EVENT_INPUT: - event = g_malloc0(sizeof(Event)); - event->event_kind = replay_state.read_event_kind; + event = g_new0(Event, 1); + event->event_kind = event_kind; event->opaque = replay_read_input_event(); return event; case REPLAY_ASYNC_EVENT_INPUT_SYNC: - event = g_malloc0(sizeof(Event)); - event->event_kind = replay_state.read_event_kind; + event = g_new0(Event, 1); + event->event_kind = event_kind; event->opaque = 0; return event; case REPLAY_ASYNC_EVENT_CHAR_READ: - event = g_malloc0(sizeof(Event)); - event->event_kind = replay_state.read_event_kind; + event = g_new0(Event, 1); + event->event_kind = event_kind; event->opaque = replay_event_char_read_load(); return event; case REPLAY_ASYNC_EVENT_BLOCK: @@ -263,19 +251,18 @@ static Event *replay_read_event(int checkpoint) } break; case REPLAY_ASYNC_EVENT_NET: - event = g_malloc0(sizeof(Event)); - event->event_kind = replay_state.read_event_kind; + event = g_new0(Event, 1); + event->event_kind = event_kind; event->opaque = replay_event_net_load(); return event; default: - error_report("Unknown ID %d of replay event", - replay_state.read_event_kind); + error_report("Unknown ID %d of replay event", event_kind); exit(1); break; } QTAILQ_FOREACH(event, &events_list, events) { - if (event->event_kind == replay_state.read_event_kind + if (event->event_kind == event_kind && (replay_state.read_event_id == -1 || replay_state.read_event_id == event->id)) { break; @@ -284,26 +271,23 @@ static Event *replay_read_event(int checkpoint) if (event) { QTAILQ_REMOVE(&events_list, event, events); - } else { - return NULL; } - /* Read event-specific data */ - return event; } /* Called with replay mutex locked */ -void replay_read_events(int checkpoint) +void replay_read_events(void) { g_assert(replay_mutex_locked()); - while (replay_state.data_kind == EVENT_ASYNC) { - Event *event = replay_read_event(checkpoint); + while (replay_state.data_kind >= EVENT_ASYNC + && replay_state.data_kind <= EVENT_ASYNC_LAST) { + Event *event = replay_read_event(); if (!event) { break; } replay_finish_event(); - replay_state.read_event_kind = -1; + replay_state.read_event_id = -1; replay_run_event(event); g_free(event); @@ -312,7 +296,7 @@ void replay_read_events(int checkpoint) void replay_init_events(void) { - replay_state.read_event_kind = -1; + replay_state.read_event_id = -1; } void replay_finish_events(void) diff --git a/replay/replay-internal.h b/replay/replay-internal.h index 97649ed8d7..b6836354ac 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -12,6 +12,19 @@ * */ +/* Asynchronous events IDs */ + +typedef enum ReplayAsyncEventKind { + REPLAY_ASYNC_EVENT_BH, + REPLAY_ASYNC_EVENT_BH_ONESHOT, + REPLAY_ASYNC_EVENT_INPUT, + REPLAY_ASYNC_EVENT_INPUT_SYNC, + REPLAY_ASYNC_EVENT_CHAR_READ, + REPLAY_ASYNC_EVENT_BLOCK, + REPLAY_ASYNC_EVENT_NET, + REPLAY_ASYNC_COUNT +} ReplayAsyncEventKind; + /* Any changes to order/number of events will need to bump REPLAY_VERSION */ enum ReplayEvents { /* for instruction event */ @@ -22,6 +35,7 @@ enum ReplayEvents { EVENT_EXCEPTION, /* for async events */ EVENT_ASYNC, + EVENT_ASYNC_LAST = EVENT_ASYNC + REPLAY_ASYNC_COUNT - 1, /* for shutdown requests, range allows recovery of ShutdownCause */ EVENT_SHUTDOWN, EVENT_SHUTDOWN_LAST = EVENT_SHUTDOWN + SHUTDOWN_CAUSE__MAX, @@ -49,21 +63,6 @@ enum ReplayEvents { EVENT_COUNT }; -/* Asynchronous events IDs */ - -enum ReplayAsyncEventKind { - REPLAY_ASYNC_EVENT_BH, - REPLAY_ASYNC_EVENT_BH_ONESHOT, - REPLAY_ASYNC_EVENT_INPUT, - REPLAY_ASYNC_EVENT_INPUT_SYNC, - REPLAY_ASYNC_EVENT_CHAR_READ, - REPLAY_ASYNC_EVENT_BLOCK, - REPLAY_ASYNC_EVENT_NET, - REPLAY_ASYNC_COUNT -}; - -typedef enum ReplayAsyncEventKind ReplayAsyncEventKind; - typedef struct ReplayState { /*! Cached clock values. */ int64_t cached_clock[REPLAY_CLOCK_COUNT]; @@ -83,12 +82,8 @@ typedef struct ReplayState { uint64_t block_request_id; /*! Prior value of the host clock */ uint64_t host_clock_last; - /*! Asynchronous event type read from the log */ - int32_t read_event_kind; /*! Asynchronous event id read from the log */ uint64_t read_event_id; - /*! Asynchronous event checkpoint id read from the log */ - int32_t read_event_checkpoint; } ReplayState; extern ReplayState replay_state; @@ -141,7 +136,7 @@ bool replay_next_event_is(int event); /*! Reads next clock value from the file. If clock kind read from the file is different from the parameter, the value is not used. */ -void replay_read_next_clock(unsigned int kind); +void replay_read_next_clock(ReplayClockKind kind); /* Asynchronous events queue */ @@ -152,9 +147,9 @@ void replay_finish_events(void); /*! Returns true if there are any unsaved events in the queue */ bool replay_has_events(void); /*! Saves events from queue into the file */ -void replay_save_events(int checkpoint); +void replay_save_events(void); /*! Read events from the file into the input queue */ -void replay_read_events(int checkpoint); +void replay_read_events(void); /*! Adds specified async event to the queue */ void replay_add_event(ReplayAsyncEventKind event_kind, void *opaque, void *opaque2, uint64_t id); diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c index e8767a1937..10a7cf7992 100644 --- a/replay/replay-snapshot.c +++ b/replay/replay-snapshot.c @@ -59,9 +59,7 @@ static const VMStateDescription vmstate_replay = { VMSTATE_UINT32(has_unread_data, ReplayState), VMSTATE_UINT64(file_offset, ReplayState), VMSTATE_UINT64(block_request_id, ReplayState), - VMSTATE_INT32(read_event_kind, ReplayState), VMSTATE_UINT64(read_event_id, ReplayState), - VMSTATE_INT32(read_event_checkpoint, ReplayState), VMSTATE_END_OF_LIST() }, }; diff --git a/replay/replay.c b/replay/replay.c index 6df2abc18c..9a0dc1cf44 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -22,7 +22,7 @@ /* Current version of the replay mechanism. Increase it when file format changes. */ -#define REPLAY_VERSION 0xe0200a +#define REPLAY_VERSION 0xe0200c /* Size of replay log header */ #define HEADER_SIZE (sizeof(uint32_t) + sizeof(uint64_t)) @@ -171,64 +171,49 @@ void replay_shutdown_request(ShutdownCause cause) bool replay_checkpoint(ReplayCheckpoint checkpoint) { - bool res = false; - static bool in_checkpoint; assert(EVENT_CHECKPOINT + checkpoint <= EVENT_CHECKPOINT_LAST); - if (!replay_file) { - return true; - } - - if (in_checkpoint) { - /* - Recursion occurs when HW event modifies timers. - Prevent performing icount warp in this case and - wait for another invocation of the checkpoint. - */ - g_assert(replay_mode == REPLAY_MODE_PLAY); - return false; - } - in_checkpoint = true; - replay_save_instructions(); if (replay_mode == REPLAY_MODE_PLAY) { g_assert(replay_mutex_locked()); if (replay_next_event_is(EVENT_CHECKPOINT + checkpoint)) { replay_finish_event(); - } else if (replay_state.data_kind != EVENT_ASYNC) { - res = false; - goto out; + } else { + return false; } - replay_read_events(checkpoint); - /* replay_read_events may leave some unread events. - Return false if not all of the events associated with - checkpoint were processed */ - res = replay_state.data_kind != EVENT_ASYNC; } else if (replay_mode == REPLAY_MODE_RECORD) { g_assert(replay_mutex_locked()); replay_put_event(EVENT_CHECKPOINT + checkpoint); - /* This checkpoint belongs to several threads. - Processing events from different threads is - non-deterministic */ - if (checkpoint != CHECKPOINT_CLOCK_WARP_START - /* FIXME: this is temporary fix, other checkpoints - may also be invoked from the different threads someday. - Asynchronous event processing should be refactored - to create additional replay event kind which is - nailed to the one of the threads and which processes - the event queue. */ - && checkpoint != CHECKPOINT_CLOCK_VIRTUAL) { - replay_save_events(checkpoint); - } - res = true; } -out: - in_checkpoint = false; - return res; + return true; } -bool replay_has_checkpoint(void) +void replay_async_events(void) +{ + static bool processing = false; + /* + * If we are already processing the events, recursion may occur + * in case of incorrect implementation when HW event modifies timers. + * Timer modification may invoke the icount warp, event processing, + * and cause the recursion. + */ + g_assert(!processing); + processing = true; + + replay_save_instructions(); + + if (replay_mode == REPLAY_MODE_PLAY) { + g_assert(replay_mutex_locked()); + replay_read_events(); + } else if (replay_mode == REPLAY_MODE_RECORD) { + g_assert(replay_mutex_locked()); + replay_save_events(); + } + processing = false; +} + +bool replay_has_event(void) { bool res = false; if (replay_mode == REPLAY_MODE_PLAY) { @@ -236,6 +221,8 @@ bool replay_has_checkpoint(void) replay_account_executed_instructions(); res = EVENT_CHECKPOINT <= replay_state.data_kind && replay_state.data_kind <= EVENT_CHECKPOINT_LAST; + res = res || (EVENT_ASYNC <= replay_state.data_kind + && replay_state.data_kind <= EVENT_ASYNC_LAST); } return res; } @@ -379,17 +366,14 @@ void replay_finish(void) fclose(replay_file); replay_file = NULL; } - if (replay_filename) { - g_free(replay_filename); - replay_filename = NULL; - } + g_free(replay_filename); + replay_filename = NULL; g_free(replay_snapshot); replay_snapshot = NULL; - replay_mode = REPLAY_MODE_NONE; - replay_finish_events(); + replay_mode = REPLAY_MODE_NONE; } void replay_add_blocker(Error *reason) diff --git a/roms/Makefile b/roms/Makefile index eeb5970348..5e44d97890 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -143,7 +143,8 @@ build-efi-roms: build-pxe-roms # efirom # edk2-basetools: - cd edk2/BaseTools && git submodule update --init --force + cd edk2/BaseTools && git submodule update --init --force \ + Source/C/BrotliCompress/brotli $(MAKE) -C edk2/BaseTools \ PYTHON_COMMAND=$${EDK2_PYTHON_COMMAND:-python3} \ EXTRA_OPTFLAGS='$(EDK2_BASETOOLS_OPTFLAGS)' \ @@ -177,14 +178,12 @@ opensbi32-generic: CROSS_COMPILE=$(riscv32_cross_prefix) \ PLATFORM="generic" cp opensbi/build/platform/generic/firmware/fw_dynamic.bin ../pc-bios/opensbi-riscv32-generic-fw_dynamic.bin - cp opensbi/build/platform/generic/firmware/fw_dynamic.elf ../pc-bios/opensbi-riscv32-generic-fw_dynamic.elf opensbi64-generic: $(MAKE) -C opensbi \ CROSS_COMPILE=$(riscv64_cross_prefix) \ PLATFORM="generic" cp opensbi/build/platform/generic/firmware/fw_dynamic.bin ../pc-bios/opensbi-riscv64-generic-fw_dynamic.bin - cp opensbi/build/platform/generic/firmware/fw_dynamic.elf ../pc-bios/opensbi-riscv64-generic-fw_dynamic.elf MESON = meson NINJA = ninja diff --git a/roms/Makefile.edk2 b/roms/Makefile.edk2 index a8ed325575..485f2244b1 100644 --- a/roms/Makefile.edk2 +++ b/roms/Makefile.edk2 @@ -13,6 +13,7 @@ SHELL = /bin/bash +target = RELEASE toolchain = $(shell source ./edk2-funcs.sh && qemu_edk2_get_toolchain $(1)) licenses := \ @@ -32,6 +33,7 @@ flashdevs := \ i386-secure-code \ x86_64-code \ x86_64-secure-code \ + x86_64-microvm \ \ arm-vars \ i386-vars @@ -50,8 +52,13 @@ all: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd.bz2) \ # we're building from a tarball and that they've already been fetched by # make-release/tarball scripts. submodules: - if test -d edk2/.git; then \ - cd edk2 && git submodule update --init --force; \ + if test -e edk2/.git; then \ + cd edk2 && git submodule update --init --force -- \ + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ + BaseTools/Source/C/BrotliCompress/brotli \ + CryptoPkg/Library/OpensslLib/openssl \ + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli \ + ; \ fi # See notes on the ".NOTPARALLEL" target and the "+" indicator in @@ -68,7 +75,7 @@ submodules: -D NETWORK_TLS_ENABLE \ -D TPM2_ENABLE \ -D TPM2_CONFIG_ENABLE - cp edk2/Build/ArmVirtQemu-AARCH64/DEBUG_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \ + cp edk2/Build/ArmVirtQemu-AARCH64/$(target)_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \ $@ truncate --size=64M $@ @@ -82,7 +89,7 @@ submodules: -D NETWORK_TLS_ENABLE \ -D TPM2_ENABLE \ -D TPM2_CONFIG_ENABLE - cp edk2/Build/ArmVirtQemu-ARM/DEBUG_$(call toolchain,arm)/FV/QEMU_EFI.fd \ + cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_EFI.fd \ $@ truncate --size=64M $@ @@ -96,7 +103,7 @@ submodules: -D NETWORK_TLS_ENABLE \ -D TPM_ENABLE \ -D TPM_CONFIG_ENABLE - cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ + cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ ../pc-bios/edk2-i386-secure-code.fd: submodules +./edk2-build.sh \ @@ -110,7 +117,7 @@ submodules: -D TPM_CONFIG_ENABLE \ -D SECURE_BOOT_ENABLE \ -D SMM_REQUIRE - cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ + cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ ../pc-bios/edk2-x86_64-code.fd: submodules +./edk2-build.sh \ @@ -122,7 +129,7 @@ submodules: -D NETWORK_TLS_ENABLE \ -D TPM_ENABLE \ -D TPM_CONFIG_ENABLE - cp edk2/Build/OvmfX64/DEBUG_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ + cp edk2/Build/OvmfX64/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ ../pc-bios/edk2-x86_64-secure-code.fd: submodules +./edk2-build.sh \ @@ -137,15 +144,25 @@ submodules: -D TPM_CONFIG_ENABLE \ -D SECURE_BOOT_ENABLE \ -D SMM_REQUIRE - cp edk2/Build/Ovmf3264/DEBUG_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ + cp edk2/Build/Ovmf3264/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ + +../pc-bios/edk2-x86_64-microvm.fd: submodules + +./edk2-build.sh \ + x86_64 \ + --arch=X64 \ + --platform=OvmfPkg/Microvm/MicrovmX64.dsc \ + -D NETWORK_IP6_ENABLE \ + -D NETWORK_HTTP_BOOT_ENABLE \ + -D NETWORK_TLS_ENABLE + cp edk2/Build/MicrovmX64/$(target)_$(call toolchain,x86_64)/FV/MICROVM.fd $@ ../pc-bios/edk2-arm-vars.fd: ../pc-bios/edk2-arm-code.fd - cp edk2/Build/ArmVirtQemu-ARM/DEBUG_$(call toolchain,arm)/FV/QEMU_VARS.fd \ + cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_VARS.fd \ $@ truncate --size=64M $@ ../pc-bios/edk2-i386-vars.fd: ../pc-bios/edk2-i386-code.fd - cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_VARS.fd $@ + cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_VARS.fd $@ # The license file accumulates several individual licenses from under edk2, # prefixing each individual license with a header (generated by "tail") that diff --git a/roms/SLOF b/roms/SLOF index dd0dcaa1c1..6b6c16b4b4 160000 --- a/roms/SLOF +++ b/roms/SLOF @@ -1 +1 @@ -Subproject commit dd0dcaa1c1085c159ddab709c7f274b3917be8bd +Subproject commit 6b6c16b4b40763507cf1f518096f3c3883c5cf2d diff --git a/roms/edk2 b/roms/edk2 index 06dc822d04..b24306f15d 160000 --- a/roms/edk2 +++ b/roms/edk2 @@ -1 +1 @@ -Subproject commit 06dc822d045c2bb42e497487935485302486e151 +Subproject commit b24306f15daa2ff8510b06702114724b33895d3c diff --git a/roms/edk2-build.sh b/roms/edk2-build.sh index d5391c7637..ea79dc27a2 100755 --- a/roms/edk2-build.sh +++ b/roms/edk2-build.sh @@ -50,6 +50,6 @@ qemu_edk2_set_cross_env "$emulation_target" build \ --cmd-len=65536 \ -n "$edk2_thread_count" \ - --buildtarget=DEBUG \ + --buildtarget=RELEASE \ --tagname="$edk2_toolchain" \ "${args[@]}" diff --git a/roms/openbios b/roms/openbios index 4a0041107b..0e0afae657 160000 --- a/roms/openbios +++ b/roms/openbios @@ -1 +1 @@ -Subproject commit 4a0041107b8ef77e0e8337bfcb5f8078887261a7 +Subproject commit 0e0afae6579c1efe9f0d85505b75ffe989554133 diff --git a/roms/opensbi b/roms/opensbi index 234ed8e427..4489876e93 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 234ed8e427f4d92903123199f6590d144e0d9351 +Subproject commit 4489876e933d8ba0d8bc6c64bae71e295d45faac diff --git a/roms/qboot b/roms/qboot index a5300c4949..8ca302e86d 160000 --- a/roms/qboot +++ b/roms/qboot @@ -1 +1 @@ -Subproject commit a5300c4949b8d4de2d34bedfaed66793f48ec948 +Subproject commit 8ca302e86d685fa05b16e2b208888243da319941 diff --git a/roms/seabios b/roms/seabios index 155821a199..3208b098f5 160000 --- a/roms/seabios +++ b/roms/seabios @@ -1 +1 @@ -Subproject commit 155821a1990b6de78dde5f98fa5ab90e802021e0 +Subproject commit 3208b098f51a9ef96d0dfa71d5ec3a3eaec88f0a diff --git a/roms/seabios-hppa b/roms/seabios-hppa index 73b740f771..673d2595d4 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit 73b740f77190643b2ada5ee97a9a108c6ef2a37b +Subproject commit 673d2595d4f773cc266cbf8dbaf2f475a6adb949 diff --git a/roms/skiboot b/roms/skiboot index 3a6fdede6c..24a7eb3596 160000 --- a/roms/skiboot +++ b/roms/skiboot @@ -1 +1 @@ -Subproject commit 3a6fdede6ce117facec0108afe716cf5d0472c3f +Subproject commit 24a7eb35966d93455520bc2debdd7954314b638b diff --git a/scripts/analyze-inclusions b/scripts/analyze-inclusions index 14806e18c6..45c821de32 100644 --- a/scripts/analyze-inclusions +++ b/scripts/analyze-inclusions @@ -46,7 +46,6 @@ grep_include() { } echo Found $(find . -name "*.d" | wc -l) object files -echo $(grep_include -F 'include/qemu-common.h') files include qemu-common.h echo $(grep_include -F 'hw/hw.h') files include hw/hw.h echo $(grep_include 'target/[a-z0-9]*/cpu\.h') files include cpu.h echo $(grep_include -F 'qapi-types.h') files include qapi-types.h @@ -86,9 +85,6 @@ analyze() { echo osdep.h: analyze ../include/qemu/osdep.h -echo qemu-common.h: -analyze -include ../include/qemu/osdep.h ../include/qemu-common.h - echo hw/hw.h: analyze -include ../include/qemu/osdep.h ../include/hw/hw.h diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py index d7177b212c..b82a1b0c58 100755 --- a/scripts/analyze-migration.py +++ b/scripts/analyze-migration.py @@ -588,7 +588,7 @@ if args.extract: dump.read(desc_only = True) print("desc.json") - f = open("desc.json", "wb") + f = open("desc.json", "w") f.truncate() f.write(jsonenc.encode(dump.vmsd_desc)) f.close() @@ -596,7 +596,7 @@ if args.extract: dump.read(write_memory = True) dict = dump.getDict() print("state.json") - f = open("state.json", "wb") + f = open("state.json", "w") f.truncate() f.write(jsonenc.encode(dict)) f.close() @@ -610,4 +610,4 @@ elif args.dump == "desc": dump.read(desc_only = True) print(jsonenc.encode(dump.vmsd_desc)) else: - raise Exception("Please specify either -x, -d state or -d dump") + raise Exception("Please specify either -x, -d state or -d desc") diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh index b38bb2726a..ef51d3b784 100755 --- a/scripts/archive-source.sh +++ b/scripts/archive-source.sh @@ -26,7 +26,7 @@ sub_file="${sub_tdir}/submodule.tar" # independent of what the developer currently has initialized # in their checkout, because the build environment is completely # different to the host OS. -submodules="dtc slirp meson ui/keycodemapdb" +submodules="dtc meson ui/keycodemapdb" submodules="$submodules tests/fp/berkeley-softfloat-3 tests/fp/berkeley-testfloat-3" submodules="$submodules ui/thirdparty/imgui ui/thirdparty/implot util/xxHash tomlplusplus genconfig" # xemu extras submodules="$submodules hw/xbox/nv2a/thirdparty/nv2a_vsh_cpu" diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index 85dbeb9ecf..08be813407 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -100,12 +100,20 @@ def snake_to_camel(func_name: str) -> str: def gen_wrapper(func: FuncDecl) -> str: assert not '_co_' in func.name assert func.return_type == 'int' - assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *'] + assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *', + 'BlockBackend *'] subsystem, subname = func.name.split('_', 1) name = f'{subsystem}_co_{subname}' - bs = 'bs' if func.args[0].type == 'BlockDriverState *' else 'child->bs' + + t = func.args[0].type + if t == 'BlockDriverState *': + bs = 'bs' + elif t == 'BdrvChild *': + bs = 'child->bs' + else: + bs = 'blk_bs(blk)' struct_name = snake_to_camel(name) return f"""\ diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index cb8eff233e..6ecabfb2b5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -223,11 +223,11 @@ our $Sparse = qr{ our $Attribute = qr{ const| volatile| - QEMU_NORETURN| - QEMU_WARN_UNUSED_RESULT| - QEMU_SENTINEL| + G_NORETURN| + G_GNUC_WARN_UNUSED_RESULT| + G_GNUC_NULL_TERMINATED| QEMU_PACKED| - GCC_FMT_ATTR + G_GNUC_PRINTF }x; our $Modifier; our $Inline = qr{inline}; @@ -307,7 +307,6 @@ our @typeList = ( qr{target_(?:u)?long}, qr{hwaddr}, # external libraries - qr{xml${Ident}}, qr{xen\w+_handle}, # Glib definitions qr{gchar}, @@ -1668,6 +1667,7 @@ sub process { # some scripts we imported from other projects. next if ($realfile =~ /\.(s|S)$/); next if ($realfile =~ /(checkpatch|get_maintainer)\.pl$/); + next if ($realfile =~ /^target\/hexagon\/imported\/*/); if ($rawline =~ /^\+.*\t/) { my $herevet = "$here\n" . cat_vet($rawline) . "\n"; @@ -1681,8 +1681,10 @@ sub process { # Block comment styles # Block comments use /* on a line of its own - if ($rawline !~ m@^\+.*/\*.*\*/[ \t)}]*$@ && #inline /*...*/ - $rawline =~ m@^\+.*/\*\*?+[ \t]*[^ \t]@) { # /* or /** non-blank + my $commentline = $rawline; + while ($commentline =~ s@^(\+.*)/\*.*\*/@$1@o) { # remove inline /*...*/ + } + if ($commentline =~ m@^\+.*/\*\*?+[ \t]*[^ \t]@) { # /* or /** non-blank WARN("Block comments use a leading /* on a separate line\n" . $herecurr); } @@ -2832,8 +2834,8 @@ sub process { } # check for pointless casting of g_malloc return - if ($line =~ /\*\s*\)\s*g_(try)?(m|re)alloc(0?)(_n)?\b/) { - if ($2 == 'm') { + if ($line =~ /\*\s*\)\s*g_(try|)(m|re)alloc(0?)(_n)?\b/) { + if ($2 eq 'm') { ERROR("unnecessary cast may hide bugs, use g_$1new$3 instead\n" . $herecurr); } else { ERROR("unnecessary cast may hide bugs, use g_$1renew$3 instead\n" . $herecurr); @@ -2850,6 +2852,11 @@ sub process { WARN("consider using g_path_get_$1() in preference to g_strdup($1())\n" . $herecurr); } +# enforce g_memdup2() over g_memdup() + if ($line =~ /\bg_memdup\s*\(/) { + ERROR("use g_memdup2() instead of unsafe g_memdup()\n" . $herecurr); + } + # recommend qemu_strto* over strto* for numeric conversions if ($line =~ /\b(strto[^kd].*?)\s*\(/) { ERROR("consider using qemu_$1 in preference to $1\n" . $herecurr); @@ -2878,6 +2885,7 @@ sub process { SCSIBusInfo| SCSIReqOps| Spice[A-Z][a-zA-Z0-9]*Interface| + TypeInfo| USBDesc[A-Z][a-zA-Z0-9]*| VhostOps| VMStateDescription| @@ -2969,10 +2977,10 @@ sub process { ERROR("use memset() instead of bzero()\n" . $herecurr); } if ($line =~ /\bgetpagesize\(\)/) { - ERROR("use qemu_real_host_page_size instead of getpagesize()\n" . $herecurr); + ERROR("use qemu_real_host_page_size() instead of getpagesize()\n" . $herecurr); } if ($line =~ /\bsysconf\(_SC_PAGESIZE\)/) { - ERROR("use qemu_real_host_page_size instead of sysconf(_SC_PAGESIZE)\n" . $herecurr); + ERROR("use qemu_real_host_page_size() instead of sysconf(_SC_PAGESIZE)\n" . $herecurr); } my $non_exit_glib_asserts = qr{g_assert_cmpstr| g_assert_cmpint| diff --git a/scripts/ci/org.centos/stream/8/build-environment.yml b/scripts/ci/org.centos/stream/8/build-environment.yml new file mode 100644 index 0000000000..42b0471634 --- /dev/null +++ b/scripts/ci/org.centos/stream/8/build-environment.yml @@ -0,0 +1,51 @@ +--- +- name: Installation of extra packages to build QEMU + hosts: all + tasks: + - name: Extra check for CentOS Stream 8 + lineinfile: + path: /etc/redhat-release + line: CentOS Stream release 8 + state: present + check_mode: yes + register: centos_stream_8 + + - name: Enable PowerTools repo on CentOS Stream 8 + ini_file: + path: /etc/yum.repos.d/CentOS-Stream-PowerTools.repo + section: powertools + option: enabled + value: "1" + when: + - ansible_facts['distribution'] == 'CentOS' + - ansible_facts['distribution_major_version'] == '8' + - centos_stream_8 + + - name: Install basic packages to build QEMU on CentOS Stream 8 + dnf: + name: + - device-mapper-multipath-devel + - glusterfs-api-devel + - gnutls-devel + - libcap-ng-devel + - libcurl-devel + - libfdt-devel + - libiscsi-devel + - libpmem-devel + - librados-devel + - librbd-devel + - libseccomp-devel + - libssh-devel + - libxkbcommon-devel + - ninja-build + - numactl-devel + - python3-sphinx + - redhat-rpm-config + - snappy-devel + - spice-server-devel + - systemd-devel + state: present + when: + - ansible_facts['distribution'] == 'CentOS' + - ansible_facts['distribution_major_version'] == '8' + - centos_stream_8 diff --git a/scripts/ci/org.centos/stream/8/x86_64/configure b/scripts/ci/org.centos/stream/8/x86_64/configure new file mode 100755 index 0000000000..a7f92aff90 --- /dev/null +++ b/scripts/ci/org.centos/stream/8/x86_64/configure @@ -0,0 +1,203 @@ +#!/bin/sh -e +# +# Configuration for QEMU based on CentOS Stream 8 x86_64 builds +# +# The "configure" command line is based on: +# +# https://git.centos.org/rpms/qemu-kvm/blob/c8s-stream-rhel/f/SPECS/qemu-kvm.spec +# +# But, because the SPEC file contains a number of conditionals and +# variable and expansions only available at RPM build time, this version +# was initially generated from an actual RPM build on an x86_64 platform. +# +# From that initial version, options that are required or are a +# consequence of non-upstream patches have been adapted. One example +# is "--without-default-devices" which is *not* present here, given +# that patches adding downstream specific devices are not available. +# +../configure \ +--prefix="/usr" \ +--libdir="/usr/lib64" \ +--datadir="/usr/share" \ +--sysconfdir="/etc" \ +--interp-prefix=/usr/qemu-%M \ +--localstatedir="/var" \ +--docdir="/usr/share/doc" \ +--libexecdir="/usr/libexec" \ +--extra-ldflags="-Wl,--build-id -Wl,-z,relro -Wl,-z,now" \ +--extra-cflags="-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection" \ +--with-suffix="qemu-kvm" \ +--firmwarepath=/usr/share/qemu-firmware \ +--with-git=meson \ +--with-git-submodules=update \ +--target-list="x86_64-softmmu" \ +--block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \ +--audio-drv-list="" \ +--block-drv-ro-whitelist="vmdk,vhdx,vpc,https,ssh" \ +--with-coroutine=ucontext \ +--with-git=git \ +--tls-priority=@QEMU,SYSTEM \ +--disable-attr \ +--disable-auth-pam \ +--disable-avx2 \ +--disable-avx512f \ +--disable-bochs \ +--disable-bpf \ +--disable-brlapi \ +--disable-bsd-user \ +--disable-bzip2 \ +--disable-cap-ng \ +--disable-capstone \ +--disable-cfi \ +--disable-cfi-debug \ +--disable-cloop \ +--disable-cocoa \ +--disable-coroutine-pool \ +--disable-crypto-afalg \ +--disable-curl \ +--disable-curses \ +--disable-debug-info \ +--disable-debug-mutex \ +--disable-debug-tcg \ +--disable-dmg \ +--disable-docs \ +--disable-fuse \ +--disable-fuse-lseek \ +--disable-gcrypt \ +--disable-gio \ +--disable-glusterfs \ +--disable-gnutls \ +--disable-gtk \ +--disable-guest-agent \ +--disable-guest-agent-msi \ +--disable-hax \ +--disable-hvf \ +--disable-iconv \ +--disable-kvm \ +--disable-libdaxctl \ +--disable-libiscsi \ +--disable-libnfs \ +--disable-libpmem \ +--disable-libssh \ +--disable-libudev \ +--disable-libusb \ +--disable-linux-aio \ +--disable-linux-io-uring \ +--disable-linux-user \ +--disable-live-block-migration \ +--disable-lto \ +--disable-lzfse \ +--disable-lzo \ +--disable-malloc-trim \ +--disable-membarrier \ +--disable-modules \ +--disable-module-upgrades \ +--disable-mpath \ +--disable-multiprocess \ +--disable-netmap \ +--disable-nettle \ +--disable-numa \ +--disable-nvmm \ +--disable-opengl \ +--disable-parallels \ +--disable-pie \ +--disable-pvrdma \ +--disable-qcow1 \ +--disable-qed \ +--disable-qom-cast-debug \ +--disable-rbd \ +--disable-rdma \ +--disable-replication \ +--disable-rng-none \ +--disable-safe-stack \ +--disable-sanitizers \ +--disable-sdl \ +--disable-sdl-image \ +--disable-seccomp \ +--disable-slirp-smbd \ +--disable-smartcard \ +--disable-snappy \ +--disable-sparse \ +--disable-spice \ +--disable-strip \ +--disable-system \ +--disable-tcg \ +--disable-tools \ +--disable-tpm \ +--disable-u2f \ +--disable-usb-redir \ +--disable-user \ +--disable-vde \ +--disable-vdi \ +--disable-vhost-crypto \ +--disable-vhost-kernel \ +--disable-vhost-net \ +--disable-vhost-user \ +--disable-vhost-user-blk-server \ +--disable-vhost-vdpa \ +--disable-virglrenderer \ +--disable-virtfs \ +--disable-virtiofsd \ +--disable-vnc \ +--disable-vnc-jpeg \ +--disable-png \ +--disable-vnc-sasl \ +--disable-vte \ +--disable-vvfat \ +--disable-werror \ +--disable-whpx \ +--disable-xen \ +--disable-xen-pci-passthrough \ +--disable-xkbcommon \ +--disable-zstd \ +--enable-attr \ +--enable-avx2 \ +--enable-cap-ng \ +--enable-capstone \ +--enable-coroutine-pool \ +--enable-curl \ +--enable-debug-info \ +--enable-docs \ +--enable-fdt \ +--enable-gcrypt \ +--enable-glusterfs \ +--enable-gnutls \ +--enable-guest-agent \ +--enable-iconv \ +--enable-kvm \ +--enable-libiscsi \ +--enable-libpmem \ +--enable-libssh \ +--enable-libusb \ +--enable-libudev \ +--enable-linux-aio \ +--enable-lzo \ +--enable-malloc-trim \ +--enable-modules \ +--enable-mpath \ +--enable-numa \ +--enable-opengl \ +--enable-pie \ +--enable-rbd \ +--enable-rdma \ +--enable-seccomp \ +--enable-snappy \ +--enable-smartcard \ +--enable-spice \ +--enable-system \ +--enable-tcg \ +--enable-tools \ +--enable-tpm \ +--enable-trace-backend=dtrace \ +--enable-usb-redir \ +--enable-virtiofsd \ +--enable-vhost-kernel \ +--enable-vhost-net \ +--enable-vhost-user \ +--enable-vhost-user-blk-server \ +--enable-vhost-vdpa \ +--enable-vnc \ +--enable-png \ +--enable-vnc-sasl \ +--enable-werror \ +--enable-xkbcommon diff --git a/scripts/ci/org.centos/stream/8/x86_64/test-avocado b/scripts/ci/org.centos/stream/8/x86_64/test-avocado new file mode 100755 index 0000000000..7aeecbcfb8 --- /dev/null +++ b/scripts/ci/org.centos/stream/8/x86_64/test-avocado @@ -0,0 +1,70 @@ +#!/bin/sh -e +# +# Runs a previously vetted list of tests, either marked explicitly for +# KVM and x86_64, or tests that are generic enough to be valid for all +# targets. Such a test list can be generated with: +# +# ./tests/venv/bin/avocado list --filter-by-tags-include-empty \ +# --filter-by-tags-include-empty-key -t accel:kvm,arch:x86_64 \ +# tests/avocado/ +# +# This is almost the complete list of avocado based tests available at +# the time this was compile, with the following exceptions: +# +# * Require machine type "x-remote": +# - tests/avocado/multiprocess.py:Multiprocess.test_multiprocess_x86_64 +# +# * Needs superuser privileges: +# - tests/avocado/virtiofs_submounts.py:VirtiofsSubmountsTest.test_pre_virtiofsd_set_up +# - tests/avocado/virtiofs_submounts.py:VirtiofsSubmountsTest.test_pre_launch_set_up +# - tests/avocado/virtiofs_submounts.py:VirtiofsSubmountsTest.test_post_launch_set_up +# - tests/avocado/virtiofs_submounts.py:VirtiofsSubmountsTest.test_post_mount_set_up +# - tests/avocado/virtiofs_submounts.py:VirtiofsSubmountsTest.test_two_runs +# +# * Requires display type "egl-headless": +# - tests/avocado/virtio-gpu.py:VirtioGPUx86.test_virtio_vga_virgl +# - tests/avocado/virtio-gpu.py:VirtioGPUx86.test_vhost_user_vga_virgl +# +# * Test is marked (unconditionally) to be skipped: +# - tests/avocado/virtio_check_params.py:VirtioMaxSegSettingsCheck.test_machine_types +# +make get-vm-images +./tests/venv/bin/avocado run \ + --job-results-dir=tests/results/ \ + tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_i440fx_kvm \ + tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm \ + tests/avocado/boot_linux_console.py:BootLinuxConsole.test_x86_64_pc \ + tests/avocado/cpu_queries.py:QueryCPUModelExpansion.test \ + tests/avocado/empty_cpu_model.py:EmptyCPUModel.test \ + tests/avocado/hotplug_cpu.py:HotPlugCPU.test \ + tests/avocado/info_usernet.py:InfoUsernet.test_hostfwd \ + tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu \ + tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_pt \ + tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_strict \ + tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_strict_cm \ + tests/avocado/linux_initrd.py:LinuxInitrd.test_with_2gib_file_should_exit_error_msg_with_linux_v3_6 \ + tests/avocado/linux_initrd.py:LinuxInitrd.test_with_2gib_file_should_work_with_linux_v4_16 \ + tests/avocado/migration.py:Migration.test_migration_with_exec \ + tests/avocado/migration.py:Migration.test_migration_with_tcp_localhost \ + tests/avocado/migration.py:Migration.test_migration_with_unix \ + tests/avocado/pc_cpu_hotplug_props.py:OmittedCPUProps.test_no_die_id \ + tests/avocado/replay_kernel.py:ReplayKernelNormal.test_x86_64_pc \ + tests/avocado/reverse_debugging.py:ReverseDebugging_X86_64.test_x86_64_pc \ + tests/avocado/version.py:Version.test_qmp_human_info_version \ + tests/avocado/virtio_version.py:VirtioVersionCheck.test_conventional_devs \ + tests/avocado/virtio_version.py:VirtioVersionCheck.test_modern_only_devs \ + tests/avocado/vnc.py:Vnc.test_change_password \ + tests/avocado/vnc.py:Vnc.test_change_password_requires_a_password \ + tests/avocado/vnc.py:Vnc.test_no_vnc \ + tests/avocado/vnc.py:Vnc.test_no_vnc_change_password \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_4_0 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_4_1 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_set_4_0 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_unset_4_1 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_v1_4_0 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_v1_set_4_0 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_v2_4_0 \ + tests/avocado/x86_cpu_model_versions.py:CascadelakeArchCapabilities.test_v2_unset_4_1 \ + tests/avocado/x86_cpu_model_versions.py:X86CPUModelAliases.test_4_0_alias_compatibility \ + tests/avocado/x86_cpu_model_versions.py:X86CPUModelAliases.test_4_1_alias \ + tests/avocado/x86_cpu_model_versions.py:X86CPUModelAliases.test_none_alias diff --git a/scripts/ci/org.centos/stream/README b/scripts/ci/org.centos/stream/README new file mode 100644 index 0000000000..e3eadfe3ea --- /dev/null +++ b/scripts/ci/org.centos/stream/README @@ -0,0 +1,17 @@ +This directory contains scripts for generating a build of QEMU that +closely matches the CentOS Stream[1] builds of the qemu-kvm package. + +To have the environment ready to configure, build QEMU and run tests, +please start with a CentOS Stream machine and: + + * apply the generic "build-environment.yml" playbook located at + scripts/ci/setup + + * apply the "build-environment.yml" in the directory following the + CentOS Stream version (such as "8"). + +This currently only covers CentOS Stream 8 environments and +packages[2]. + +[1] https://www.centos.org/centos-stream/ +[2] https://git.centos.org/rpms/qemu-kvm/commits/c8s-stream-rhel diff --git a/scripts/ci/setup/build-environment.yml b/scripts/ci/setup/build-environment.yml index 581c1c75d1..b04c2b7cee 100644 --- a/scripts/ci/setup/build-environment.yml +++ b/scripts/ci/setup/build-environment.yml @@ -19,6 +19,13 @@ - '((ansible_version.major == 2) and (ansible_version.minor >= 8)) or (ansible_version.major >= 3)' msg: "Unsuitable ansible version, please use version 2.8.0 or later" + - name: Add armhf foreign architecture to aarch64 hosts + command: dpkg --add-architecture armhf + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['architecture'] == 'aarch64' + - ansible_facts['distribution_version'] == '20.04' + - name: Update apt cache / upgrade packages via apt apt: update_cache: yes @@ -26,10 +33,9 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - - name: Install basic packages to build QEMU on Ubuntu 18.04/20.04 + - name: Install basic packages to build QEMU on Ubuntu 20.04 package: name: - # Originally from tests/docker/dockerfiles/ubuntu1804.docker - ccache - gcc - gettext @@ -83,7 +89,7 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - - name: Install packages to build QEMU on Ubuntu 18.04/20.04 on non-s390x + - name: Install packages to build QEMU on Ubuntu 20.04 on non-s390x package: name: - libspice-server-dev @@ -91,16 +97,7 @@ state: present when: - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['architecture'] != 's390x' - - - name: Install basic packages to build QEMU on Ubuntu 18.04 - package: - name: - # Originally from tests/docker/dockerfiles/ubuntu1804.docker - - clang - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '18.04' + - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64' - name: Install basic packages to build QEMU on Ubuntu 20.04 package: @@ -114,3 +111,70 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - ansible_facts['distribution_version'] == '20.04' + + - name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 20.04 + package: + name: + - binutils-arm-linux-gnueabihf + - gcc-arm-linux-gnueabihf + - libblkid-dev:armhf + - libc6-dev:armhf + - libffi-dev:armhf + - libglib2.0-dev:armhf + - libmount-dev:armhf + - libpcre2-dev:armhf + - libpixman-1-dev:armhf + - zlib1g-dev:armhf + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['distribution_version'] == '20.04' + - ansible_facts['architecture'] == 'aarch64' + + - name: Install basic packages to build QEMU on EL8 + dnf: + # This list of packages start with tests/docker/dockerfiles/centos8.docker + # but only include files that are common to all distro variants and present + # in the standard repos (no add-ons) + name: + - bzip2 + - bzip2-devel + - dbus-daemon + - diffutils + - gcc + - gcc-c++ + - genisoimage + - gettext + - git + - glib2-devel + - libaio-devel + - libepoxy-devel + - libgcrypt-devel + - lzo-devel + - make + - mesa-libEGL-devel + - nettle-devel + - ninja-build + - nmap-ncat + - perl-Test-Harness + - pixman-devel + - python36 + - rdma-core-devel + - spice-glib-devel + - systemtap-sdt-devel + - tar + - zlib-devel + state: present + when: + - ansible_facts['distribution_file_variety'] == 'RedHat' + - ansible_facts['distribution_version'] == '8' + + - name: Install packages only available on x86 and aarch64 + dnf: + # Spice server not available in ppc64le + name: + - spice-server + state: present + when: + - ansible_facts['distribution_file_variety'] == 'RedHat' + - ansible_facts['distribution_version'] == '8' + - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64' diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml index 1127db516f..33128be85d 100644 --- a/scripts/ci/setup/gitlab-runner.yml +++ b/scripts/ci/setup/gitlab-runner.yml @@ -69,3 +69,41 @@ name: gitlab-runner state: started enabled: yes + + - name: Download secondary gitlab-runner + get_url: + dest: /usr/local/bin/gitlab-runner-arm + url: "https://s3.amazonaws.com/gitlab-runner-downloads/v{{ gitlab_runner_version }}/binaries/gitlab-runner-{{ gitlab_runner_os }}-arm" + owner: gitlab-runner + group: gitlab-runner + mode: u=rwx,g=rwx,o=rx + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['architecture'] == 'aarch64' + - ansible_facts['distribution_version'] == '20.04' + + - name: Register secondary gitlab-runner + command: "/usr/local/bin/gitlab-runner-arm register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list aarch32,{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'" + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['architecture'] == 'aarch64' + - ansible_facts['distribution_version'] == '20.04' + + - name: Install the secondary gitlab-runner service using its own functionality + command: /usr/local/bin/gitlab-runner-arm install --user gitlab-runner --working-directory /home/gitlab-runner/arm -n gitlab-runner-arm + register: gitlab_runner_install_service_result + failed_when: "gitlab_runner_install_service_result.rc != 0 and \"already exists\" not in gitlab_runner_install_service_result.stderr" + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['architecture'] == 'aarch64' + - ansible_facts['distribution_version'] == '20.04' + + - name: Enable the secondary gitlab-runner service + service: + name: gitlab-runner-arm + state: started + enabled: yes + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['architecture'] == 'aarch64' + - ansible_facts['distribution_version'] == '20.04' diff --git a/scripts/clean-header-guards.pl b/scripts/clean-header-guards.pl index a6680253b1..a7fd8dc99f 100755 --- a/scripts/clean-header-guards.pl +++ b/scripts/clean-header-guards.pl @@ -32,8 +32,8 @@ use warnings; use Getopt::Std; # Stuff we don't want to clean because we import it into our tree: -my $exclude = qr,^(disas/libvixl/|include/standard-headers/ - |linux-headers/|pc-bios/|tests/tcg/|tests/multiboot/),x; +my $exclude = qr,^(include/standard-headers/|linux-headers/ + |pc-bios/|tests/tcg/|tests/multiboot/),x; # Stuff that is expected to fail the preprocessing test: my $exclude_cpp = qr,^include/libdecnumber/decNumberLocal.h,; diff --git a/scripts/clean-includes b/scripts/clean-includes index aaa7d4ceb3..d37bd4f692 100755 --- a/scripts/clean-includes +++ b/scripts/clean-includes @@ -51,7 +51,7 @@ GIT=no DUPHEAD=no # Extended regular expression defining files to ignore when using --all -XDIRREGEX='^(tests/tcg|tests/multiboot|pc-bios|disas/libvixl)' +XDIRREGEX='^(tests/tcg|tests/multiboot|pc-bios)' while true do diff --git a/scripts/cocci-macro-file.h b/scripts/cocci-macro-file.h index 20eea6b708..d247a5086e 100644 --- a/scripts/cocci-macro-file.h +++ b/scripts/cocci-macro-file.h @@ -19,9 +19,9 @@ */ /* From qemu/compiler.h */ -#define QEMU_NORETURN __attribute__ ((__noreturn__)) -#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) -#define QEMU_SENTINEL __attribute__((sentinel)) +#define G_NORETURN __attribute__ ((__noreturn__)) +#define G_GNUC_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#define G_GNUC_NULL_TERMINATED __attribute__((sentinel)) #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) # define QEMU_PACKED __attribute__((gcc_struct, packed)) @@ -34,7 +34,7 @@ #define QEMU_BUILD_BUG_ON(x) \ typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused)); -#define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) +#define G_GNUC_PRINTF(n, m) __attribute__((format(gnu_printf, n, m))) #define xglue(x, y) x ## y #define glue(x, y) xglue(x, y) diff --git a/scripts/coccinelle/use-g_new-etc.cocci b/scripts/coccinelle/use-g_new-etc.cocci new file mode 100644 index 0000000000..e2280e93b3 --- /dev/null +++ b/scripts/coccinelle/use-g_new-etc.cocci @@ -0,0 +1,75 @@ +// Use g_new() & friends where that makes obvious sense +@@ +type T; +@@ +-g_malloc(sizeof(T)) ++g_new(T, 1) +@@ +type T; +@@ +-g_try_malloc(sizeof(T)) ++g_try_new(T, 1) +@@ +type T; +@@ +-g_malloc0(sizeof(T)) ++g_new0(T, 1) +@@ +type T; +@@ +-g_try_malloc0(sizeof(T)) ++g_try_new0(T, 1) +@@ +type T; +expression n; +@@ +-g_malloc(sizeof(T) * (n)) ++g_new(T, n) +@@ +type T; +expression n; +@@ +-g_try_malloc(sizeof(T) * (n)) ++g_try_new(T, n) +@@ +type T; +expression n; +@@ +-g_malloc0(sizeof(T) * (n)) ++g_new0(T, n) +@@ +type T; +expression n; +@@ +-g_try_malloc0(sizeof(T) * (n)) ++g_try_new0(T, n) +@@ +type T; +expression p, n; +@@ +-g_realloc(p, sizeof(T) * (n)) ++g_renew(T, p, n) +@@ +type T; +expression p, n; +@@ +-g_try_realloc(p, sizeof(T) * (n)) ++g_try_renew(T, p, n) +@@ +type T; +expression n; +@@ +-(T *)g_new(T, n) ++g_new(T, n) +@@ +type T; +expression n; +@@ +-(T *)g_new0(T, n) ++g_new0(T, n) +@@ +type T; +expression p, n; +@@ +-(T *)g_renew(T, p, n) ++g_renew(T, p, n) diff --git a/scripts/coverity-scan/COMPONENTS.md b/scripts/coverity-scan/COMPONENTS.md index 183f26a32c..0e6ab4936e 100644 --- a/scripts/coverity-scan/COMPONENTS.md +++ b/scripts/coverity-scan/COMPONENTS.md @@ -22,7 +22,7 @@ i386 ~ (/qemu)?((/include)?/hw/i386/.*|/target/i386/.*|/hw/intc/[^/]*apic[^/]*\.c) m68k - ~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*) + ~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*|(/include)?/hw/nubus/.*) microblaze ~ (/qemu)?((/include)?/hw/microblaze/.*|/target/microblaze/.*) @@ -87,9 +87,6 @@ io ipmi ~ (/qemu)?((/include)?/hw/ipmi/.*) -libvixl - ~ (/qemu)?(/disas/libvixl/.*) - migration ~ (/qemu)?((/include)?/migration/.*) @@ -111,8 +108,8 @@ qemu-ga scsi ~ (/qemu)?(/scsi/.*|/hw/scsi/.*|/include/hw/scsi/.*) -slirp - ~ (/qemu)?(/.*slirp.*) +slirp (component should be ignored in analysis) + ~ (/qemu)?(/slirp/.*) tcg ~ (/qemu)?(/accel/tcg/.*|/replay/.*|/(.*/)?softmmu.*) @@ -146,3 +143,9 @@ testlibs tests ~ (/qemu)?(/tests/.*) + +loongarch + ~ (/qemu)?((/include)?/hw/(loongarch/.*|.*/loongarch.*)|/target/loongarch/.*) + +riscv + ~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*|/hw/.*/(riscv_|ibex_|sifive_).*) diff --git a/scripts/coverity-scan/coverity-scan.docker b/scripts/coverity-scan/coverity-scan.docker index ecff6ac5b4..6f60a52d23 100644 --- a/scripts/coverity-scan/coverity-scan.docker +++ b/scripts/coverity-scan/coverity-scan.docker @@ -59,7 +59,6 @@ ENV PACKAGES \ libubsan \ libudev-devel \ libusbx-devel \ - libxml2-devel \ libzstd-devel \ llvm \ lzo-devel \ diff --git a/scripts/coverity-scan/model.c b/scripts/coverity-scan/model.c index 9d4fba53d9..686d1a3008 100644 --- a/scripts/coverity-scan/model.c +++ b/scripts/coverity-scan/model.c @@ -356,7 +356,8 @@ int g_poll (GPollFD *fds, unsigned nfds, int timeout) typedef struct _GIOChannel GIOChannel; GIOChannel *g_io_channel_unix_new(int fd) { - GIOChannel *c = g_malloc0(sizeof(GIOChannel)); + /* cannot use incomplete type, the actual struct is roughly this size. */ + GIOChannel *c = g_malloc0(20 * sizeof(void *)); __coverity_escape__(fd); return c; } diff --git a/scripts/coverity-scan/run-coverity-scan b/scripts/coverity-scan/run-coverity-scan index 7395bbfad4..129672c86f 100755 --- a/scripts/coverity-scan/run-coverity-scan +++ b/scripts/coverity-scan/run-coverity-scan @@ -394,15 +394,15 @@ echo "Configuring..." --enable-opengl --enable-vte --enable-gnutls \ --enable-nettle --enable-curses --enable-curl \ --audio-drv-list=oss,alsa,sdl,pa --enable-virtfs \ - --enable-vnc --enable-vnc-sasl --enable-vnc-jpeg --enable-vnc-png \ + --enable-vnc --enable-vnc-sasl --enable-vnc-jpeg --enable-png \ --enable-xen --enable-brlapi \ --enable-linux-aio --enable-attr \ --enable-cap-ng --enable-trace-backends=log --enable-spice --enable-rbd \ - --enable-xfsctl --enable-libusb --enable-usb-redir \ + --enable-libusb --enable-usb-redir \ --enable-libiscsi --enable-libnfs --enable-seccomp \ --enable-tpm --enable-libssh --enable-lzo --enable-snappy --enable-bzip2 \ --enable-numa --enable-rdma --enable-smartcard --enable-virglrenderer \ - --enable-mpath --enable-libxml2 --enable-glusterfs \ + --enable-mpath --enable-glusterfs \ --enable-virtfs --enable-zstd echo "Running cov-build..." diff --git a/scripts/cpu-x86-uarch-abi.py b/scripts/cpu-x86-uarch-abi.py index 08acc52a81..82ff07582f 100644 --- a/scripts/cpu-x86-uarch-abi.py +++ b/scripts/cpu-x86-uarch-abi.py @@ -6,10 +6,10 @@ # compatibility levels for each CPU model. # -from qemu import qmp +from qemu.qmp.legacy import QEMUMonitorProtocol import sys -if len(sys.argv) != 1: +if len(sys.argv) != 2: print("syntax: %s QMP-SOCK\n\n" % __file__ + "Where QMP-SOCK points to a QEMU process such as\n\n" + " # qemu-system-x86_64 -qmp unix:/tmp/qmp,server,nowait " + @@ -66,8 +66,7 @@ levels = [ sock = sys.argv[1] -cmd = sys.argv[2] -shell = qmp.QEMUMonitorProtocol(sock) +shell = QEMUMonitorProtocol(sock) shell.connect() models = shell.cmd("query-cpu-definitions") diff --git a/scripts/device-crash-test b/scripts/device-crash-test index 8331c057b8..b74d887331 100755 --- a/scripts/device-crash-test +++ b/scripts/device-crash-test @@ -33,9 +33,18 @@ import re import random import argparse from itertools import chain +from pathlib import Path -sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'python')) -from qemu.machine import QEMUMachine +try: + from qemu.machine import QEMUMachine + from qemu.qmp import ConnectError +except ModuleNotFoundError as exc: + path = Path(__file__).resolve() + print(f"Module '{exc.name}' not found.") + print(" Try 'make check-venv' from your build directory,") + print(" and then one way to run this script is like so:") + print(f' > $builddir/tests/venv/bin/python3 "{path}"') + sys.exit(1) logger = logging.getLogger('device-crash-test') dbg = logger.debug @@ -92,6 +101,7 @@ ERROR_RULE_LIST = [ {'device':'pci-bridge', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0. {'device':'pci-bridge-seat', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0. {'device':'pxb', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0. + {'device':'pxb-cxl', 'expected':True}, # pxb-cxl devices cannot reside on a PCI bus. {'device':'scsi-block', 'expected':True}, # drive property not set {'device':'scsi-generic', 'expected':True}, # drive property not set {'device':'scsi-hd', 'expected':True}, # drive property not set @@ -175,7 +185,6 @@ ERROR_RULE_LIST = [ {'log':r"Multiple VT220 operator consoles are not supported"}, {'log':r"core 0 already populated"}, {'log':r"could not find stage1 bootloader"}, - {'log':r"No '.*' bus found for device"}, # other exitcode=1 failures not listed above will just generate INFO messages: {'exitcode':1, 'loglevel':logging.INFO}, @@ -317,9 +326,7 @@ class QemuBinaryInfo(object): try: vm.launch() mi['runnable'] = True - except KeyboardInterrupt: - raise - except: + except Exception: dbg("exception trying to run binary=%s machine=%s", self.binary, machine, exc_info=sys.exc_info()) dbg("log: %r", vm.get_log()) mi['runnable'] = False @@ -355,14 +362,14 @@ def checkOneCase(args, testcase): '-device', qemuOptsEscape(device)] cmdline = ' '.join([binary] + args) dbg("will launch QEMU: %s", cmdline) - vm = QEMUMachine(binary=binary, args=args) + vm = QEMUMachine(binary=binary, args=args, qmp_timer=15) + exc = None exc_traceback = None try: vm.launch() - except KeyboardInterrupt: - raise - except: + except Exception as this_exc: + exc = this_exc exc_traceback = traceback.format_exc() dbg("Exception while running test case") finally: @@ -370,8 +377,9 @@ def checkOneCase(args, testcase): ec = vm.exitcode() log = vm.get_log() - if exc_traceback is not None or ec != 0: - return {'exc_traceback':exc_traceback, + if exc is not None or ec != 0: + return {'exc': exc, + 'exc_traceback':exc_traceback, 'exitcode':ec, 'log':log, 'testcase':testcase, @@ -389,7 +397,7 @@ def binariesToTest(args, testcase): def accelsToTest(args, testcase): - if getBinaryInfo(args, testcase['binary']).kvm_available: + if getBinaryInfo(args, testcase['binary']).kvm_available and not args.tcg_only: yield 'kvm' yield 'tcg' @@ -459,6 +467,17 @@ def logFailure(f, level): for l in f['log'].strip().split('\n'): logger.log(level, "log: %s", l) logger.log(level, "exit code: %r", f['exitcode']) + + # If the Exception is merely a QMP connect error, + # reduce the logging level for its traceback to + # improve visual clarity. + if isinstance(f.get('exc'), ConnectError): + logger.log(level, "%s.%s: %s", + type(f['exc']).__module__, + type(f['exc']).__qualname__, + str(f['exc'])) + level = logging.DEBUG + if f['exc_traceback']: logger.log(level, "exception:") for l in f['exc_traceback'].split('\n'): @@ -491,6 +510,8 @@ def main(): help="Full mode: test cases that are expected to fail") parser.add_argument('--strict', action='store_true', dest='strict', help="Treat all warnings as fatal") + parser.add_argument('--tcg-only', action='store_true', dest='tcg_only', + help="Only test with TCG accelerator") parser.add_argument('qemu', nargs='*', metavar='QEMU', help='QEMU binary to run') args = parser.parse_args() @@ -503,6 +524,12 @@ def main(): lvl = logging.WARN logging.basicConfig(stream=sys.stdout, level=lvl, format='%(levelname)s: %(message)s') + if not args.debug: + # Async QMP, when in use, is chatty about connection failures. + # This script knowingly generates a ton of connection errors. + # Silence this logger. + logging.getLogger('qemu.qmp.qmp_client').setLevel(logging.CRITICAL) + fatal_failures = [] wl_stats = {} skipped = 0 diff --git a/scripts/download-macos-libs.py b/scripts/download-macos-libs.py index a47be41b29..6333e23cc0 100755 --- a/scripts/download-macos-libs.py +++ b/scripts/download-macos-libs.py @@ -182,7 +182,8 @@ def main(): 'libpixman', 'libepoxy', 'openssl11', - 'libpcap']) + 'libpcap', + 'libslirp']) if __name__ == '__main__': main() diff --git a/scripts/entitlement.sh b/scripts/entitlement.sh index e2c956a3ac..0f412949ec 100755 --- a/scripts/entitlement.sh +++ b/scripts/entitlement.sh @@ -15,7 +15,7 @@ ENTITLEMENT="$4" if $in_place; then trap 'rm "$DST.tmp"' exit - cp -af "$SRC" "$DST.tmp" + cp -pPf "$SRC" "$DST.tmp" SRC="$DST.tmp" else cd "$MESON_INSTALL_DESTDIR_PREFIX" diff --git a/scripts/feature_to_c.sh b/scripts/feature_to_c.sh index b1169899c1..c1f67c8f6a 100644 --- a/scripts/feature_to_c.sh +++ b/scripts/feature_to_c.sh @@ -56,6 +56,7 @@ for input; do done echo +echo '#include "exec/gdbstub.h"' echo "const char *const xml_builtin[][2] = {" for input; do diff --git a/scripts/gen-license.py b/scripts/gen-license.py index cc11d110d1..d0372e91ba 100755 --- a/scripts/gen-license.py +++ b/scripts/gen-license.py @@ -174,7 +174,7 @@ Lib('qemu', 'https://www.qemu.org/', Lib('slirp', 'https://gitlab.freedesktop.org/slirp', bsd_3clause, 'https://gitlab.freedesktop.org/slirp/libslirp/-/raw/master/COPYRIGHT', license_lines=(16,39), ships_static=all_platforms, - submodule=Submodule('slirp') + pkgconfig=PkgConfig('slirp'), pkg_win='libslirp', pkg_mac='libslirp', pkg_ubuntu='libslirp-dev' ), Lib('imgui', 'https://github.com/ocornut/imgui', diff --git a/scripts/gensyscalls.sh b/scripts/gensyscalls.sh index 8fb450e3c9..a2f7664b7b 100755 --- a/scripts/gensyscalls.sh +++ b/scripts/gensyscalls.sh @@ -44,6 +44,7 @@ read_includes() cpp -P -nostdinc -fdirectives-only \ -D_UAPI_ASM_$(upper ${arch})_BITSPERLONG_H \ + -D__ASM_$(upper ${arch})_BITSPERLONG_H \ -D__BITS_PER_LONG=${bits} \ -I${linux}/arch/${arch}/include/uapi/ \ -I${linux}/include/uapi \ @@ -99,4 +100,5 @@ generate_syscall_nr openrisc 32 "$output/linux-user/openrisc/syscall_nr.h" generate_syscall_nr riscv 32 "$output/linux-user/riscv/syscall32_nr.h" generate_syscall_nr riscv 64 "$output/linux-user/riscv/syscall64_nr.h" generate_syscall_nr hexagon 32 "$output/linux-user/hexagon/syscall_nr.h" +generate_syscall_nr loongarch 64 "$output/linux-user/loongarch64/syscall_nr.h" rm -fr "$TMP" diff --git a/scripts/git-submodule.sh b/scripts/git-submodule.sh index e225d3a963..7be41f5948 100755 --- a/scripts/git-submodule.sh +++ b/scripts/git-submodule.sh @@ -51,6 +51,12 @@ validate_error() { exit 1 } +if test -n "$maybe_modules" && ! test -e ".git" +then + echo "$0: unexpectedly called with submodules but no git checkout exists" + exit 1 +fi + modules="" for m in $maybe_modules do @@ -63,12 +69,6 @@ do fi done -if test -n "$maybe_modules" && ! test -e ".git" -then - echo "$0: unexpectedly called with submodules but no git checkout exists" - exit 1 -fi - case "$command" in status|validate) if test -z "$maybe_modules" diff --git a/scripts/hxtool-conv.pl b/scripts/hxtool-conv.pl deleted file mode 100755 index eede40b346..0000000000 --- a/scripts/hxtool-conv.pl +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/perl -w -# -# Script to convert .hx file STEXI/ETEXI blocks to SRST/ERST -# -# Copyright (C) 2020 Linaro -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# (at your option) any later version. See the COPYING file in the -# top-level directory. - -# This script was only ever intended as a one-off conversion operation. -# Please excuse the places where it is a bit hacky. -# Some manual intervention after the conversion is expected, as are -# some warnings from makeinfo. -# Warning: this script is not idempotent: don't try to run it on -# a .hx file that already has SRST/ERST sections. - -# Expected usage: -# scripts/hxtool-conv.pl file.hx > file.hx.new - -use utf8; - -my $reading_texi = 0; -my $texiblock = ''; -my @tables = (); - -sub update_tables($) { - my ($texi) = @_; - # Update our list of open table directives: every @table - # line in the texi fragment is added to the list, and every - # @end table line means we remove an entry from the list. - # If this fragment had a completely self contained table with - # both the @table and @end table lines, this will be a no-op. - foreach (split(/\n/, $texi)) { - push @tables, $_ if /^\@table/; - pop @tables if /^\@end table/; - } -} - -sub only_table_directives($) { - # Return true if every line in the fragment is a start or end table directive - my ($texi) = @_; - foreach (split(/\n/, $texi)) { - return 0 unless /^\@table/ or /^\@end table/; - } - return 1; -} - -sub output_rstblock($) { - # Write the output to /tmp/frag.texi, wrapped in whatever current @table - # lines we need. - my ($texi) = @_; - - # As a special case, if this fragment is only table directives and - # nothing else, update our set of open table directives but otherwise - # ignore it. This avoids emitting an empty SRST/ERST block. - if (only_table_directives($texi)) { - update_tables($texi); - return; - } - - open(my $fragfh, '>', '/tmp/frag.texi'); - # First output the currently active set of open table directives - print $fragfh join("\n", @tables); - # Next, update our list of open table directives. - # We need to do this before we emit the closing table directives - # so that we emit the right number if this fragment had an - # unbalanced set of directives. - update_tables($texi); - # Then emit the texi fragment itself. - print $fragfh "\n$texi\n"; - # Finally, add the necessary closing table directives. - print $fragfh "\@end table\n" x scalar @tables; - close $fragfh; - - # Now invoke makeinfo/pandoc on it and slurp the results into a string - open(my $fh, '-|', "makeinfo --force -o - --docbook " - . "-D 'qemu_system_x86 QEMU_SYSTEM_X86_MACRO' " - . "-D 'qemu_system QEMU_SYSTEM_MACRO' /tmp/frag.texi " - . " | pandoc -f docbook -t rst") - or die "can't start makeinfo/pandoc: $!"; - - binmode $fh, ':encoding(utf8)'; - - print "SRST\n"; - - # Slurp the whole thing into a string so we can do multiline - # string matches on it. - my $rst = do { - local $/ = undef; - <$fh>; - }; - $rst =~ s/^- − /- /gm; - $rst =~ s/“/"/gm; - $rst =~ s/”/"/gm; - $rst =~ s/‘/'/gm; - $rst =~ s/’/'/gm; - $rst =~ s/QEMU_SYSTEM_MACRO/|qemu_system|/g; - $rst =~ s/QEMU_SYSTEM_X86_MACRO/|qemu_system_x86|/g; - $rst =~ s/(?=::\n\n +\|qemu)/.. parsed-literal/g; - $rst =~ s/:\n\n::$/::/gm; - - # Fix up the invalid reference format makeinfo/pandoc emit: - # `Some string here <#anchorname>`__ - # should be: - # :ref:`anchorname` - $rst =~ s/\`[^<`]+\<\#([^>]+)\>\`__/:ref:`$1`/gm; - print $rst; - - close $fh or die "error on close: $!"; - print "ERST\n"; -} - -# Read the whole .hx input file. -while (<>) { - # Always print the current line - print; - if (/STEXI/) { - $reading_texi = 1; - $texiblock = ''; - next; - } - if (/ETEXI/) { - $reading_texi = 0; - # dump RST version of block - output_rstblock($texiblock); - next; - } - if ($reading_texi) { - # Accumulate the texi into a string - # but drop findex entries as they will confuse makeinfo - next if /^\@findex/; - $texiblock .= $_; - } -} - -die "Unexpectedly still in texi block at EOF" if $reading_texi; diff --git a/scripts/kvm/vmxcap b/scripts/kvm/vmxcap index 6fe66d5f57..ce27f5e635 100755 --- a/scripts/kvm/vmxcap +++ b/scripts/kvm/vmxcap @@ -23,6 +23,7 @@ MSR_IA32_VMX_TRUE_PROCBASED_CTLS = 0x48E MSR_IA32_VMX_TRUE_EXIT_CTLS = 0x48F MSR_IA32_VMX_TRUE_ENTRY_CTLS = 0x490 MSR_IA32_VMX_VMFUNC = 0x491 +MSR_IA32_VMX_PROCBASED_CTLS3 = 0x492 class msr(object): def __init__(self): @@ -71,6 +72,13 @@ class Control(object): s = 'yes' print(' %-40s %s' % (self.bits[bit], s)) +# All 64 bits in the tertiary controls MSR are allowed-1 +class Allowed1Control(Control): + def read2(self, nr): + m = msr() + val = m.read(nr, 0) + return (0, val) + class Misc(object): def __init__(self, name, bits, msr): self.name = name @@ -135,6 +143,7 @@ controls = [ 12: 'RDTSC exiting', 15: 'CR3-load exiting', 16: 'CR3-store exiting', + 17: 'Activate tertiary controls', 19: 'CR8-load exiting', 20: 'CR8-store exiting', 21: 'Use TPR shadow', @@ -186,6 +195,14 @@ controls = [ cap_msr = MSR_IA32_VMX_PROCBASED_CTLS2, ), + Allowed1Control( + name = 'tertiary processor-based controls', + bits = { + 4: 'Enable IPI virtualization' + }, + cap_msr = MSR_IA32_VMX_PROCBASED_CTLS3, + ), + Control( name = 'VM-Exit controls', bits = { @@ -249,6 +266,7 @@ controls = [ bits = { 0: 'Execute-only EPT translations', 6: 'Page-walk length 4', + 7: 'Page-walk length 5', 8: 'Paging-structure memory type UC', 14: 'Paging-structure memory type WB', 16: '2MB EPT pages', diff --git a/scripts/main.c b/scripts/main.c new file mode 100644 index 0000000000..b552c8e4ed --- /dev/null +++ b/scripts/main.c @@ -0,0 +1 @@ +int main(void) {} diff --git a/scripts/make-config-poison.sh b/scripts/make-config-poison.sh new file mode 100755 index 0000000000..d222a04304 --- /dev/null +++ b/scripts/make-config-poison.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +if test $# = 0; then + exit 0 +fi + +# Create list of config switches that should be poisoned in common code... +# but filter out CONFIG_TCG and CONFIG_USER_ONLY which are special. +exec sed -n \ + -e' /CONFIG_TCG/d' \ + -e '/CONFIG_USER_ONLY/d' \ + -e '/^#define / {' \ + -e 's///' \ + -e 's/ .*//' \ + -e 's/^/#pragma GCC poison /p' \ + -e '}' "$@" diff --git a/scripts/make-release b/scripts/make-release index a2a8cda33c..05b14ecc95 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -27,7 +27,12 @@ git submodule update --init # don't necessarily have much control over how a submodule handles its # submodule dependencies, so we continue to handle these on a case-by-case # basis for now. -(cd roms/edk2 && git submodule update --init) +(cd roms/edk2 && \ + git submodule update --init -- \ + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ + BaseTools/Source/C/BrotliCompress/brotli \ + CryptoPkg/Library/OpensslLib/openssl \ + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli) popd tar --exclude=.git -cjf ${destination}.tar.bz2 ${destination} rm -rf ${destination} diff --git a/scripts/meson-buildoptions.py b/scripts/meson-buildoptions.py new file mode 100755 index 0000000000..3e2b478538 --- /dev/null +++ b/scripts/meson-buildoptions.py @@ -0,0 +1,224 @@ +#! /usr/bin/env python3 + +# Generate configure command line options handling code, based on Meson's +# user build options introspection data +# +# Copyright (C) 2021 Red Hat, Inc. +# +# Author: Paolo Bonzini +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import textwrap +import shlex +import sys + +SKIP_OPTIONS = { + "default_devices", + "fuzzing_engine", + "qemu_suffix", + "smbd", +} + +OPTION_NAMES = { + "b_coverage": "gcov", + "b_lto": "lto", + "malloc": "enable-malloc", + "pkgversion": "with-pkgversion", + "qemu_firmwarepath": "firmwarepath", + "trace_backends": "enable-trace-backends", + "trace_file": "with-trace-file", +} + +BUILTIN_OPTIONS = { + "b_coverage", + "b_lto", + "datadir", + "includedir", + "libdir", + "libexecdir", + "localedir", + "localstatedir", + "mandir", + "strip", + "sysconfdir", +} + +LINE_WIDTH = 76 + + +# Convert the default value of an option to the string used in +# the help message +def value_to_help(value): + if isinstance(value, list): + return ",".join(value) + if isinstance(value, bool): + return "enabled" if value else "disabled" + return str(value) + + +def wrap(left, text, indent): + spaces = " " * indent + if len(left) >= indent: + yield left + left = spaces + else: + left = (left + spaces)[0:indent] + yield from textwrap.wrap( + text, width=LINE_WIDTH, initial_indent=left, subsequent_indent=spaces + ) + + +def sh_print(line=""): + print(' printf "%s\\n"', shlex.quote(line)) + + +def help_line(left, opt, indent, long): + right = f'{opt["description"]}' + if long: + value = value_to_help(opt["value"]) + if value != "auto" and value != "": + right += f" [{value}]" + if "choices" in opt and long: + choices = "/".join(sorted(opt["choices"])) + right += f" (choices: {choices})" + for x in wrap(" " + left, right, indent): + sh_print(x) + + +# Return whether the option (a dictionary) can be used with +# arguments. Booleans can never be used with arguments; +# combos allow an argument only if they accept other values +# than "auto", "enabled", and "disabled". +def allow_arg(opt): + if opt["type"] == "boolean": + return False + if opt["type"] != "combo": + return True + return not (set(opt["choices"]) <= {"auto", "disabled", "enabled"}) + + +# Return whether the option (a dictionary) can be used without +# arguments. Booleans can only be used without arguments; +# combos require an argument if they accept neither "enabled" +# nor "disabled" +def require_arg(opt): + if opt["type"] == "boolean": + return False + if opt["type"] != "combo": + return True + return not ({"enabled", "disabled"}.intersection(opt["choices"])) + + +def filter_options(json): + if ":" in json["name"]: + return False + if json["section"] == "user": + return json["name"] not in SKIP_OPTIONS + else: + return json["name"] in BUILTIN_OPTIONS + + +def load_options(json): + json = [x for x in json if filter_options(x)] + return sorted(json, key=lambda x: x["name"]) + + +def cli_option(opt): + name = opt["name"] + if name in OPTION_NAMES: + return OPTION_NAMES[name] + return name.replace("_", "-") + + +def cli_help_key(opt): + key = cli_option(opt) + if require_arg(opt): + return key + if opt["type"] == "boolean" and opt["value"]: + return f"disable-{key}" + return f"enable-{key}" + + +def cli_metavar(opt): + if opt["type"] == "string": + return "VALUE" + if opt["type"] == "array": + return "CHOICES" if "choices" in opt else "VALUES" + return "CHOICE" + + +def print_help(options): + print("meson_options_help() {") + for opt in sorted(options, key=cli_help_key): + key = cli_help_key(opt) + # The first section includes options that have an arguments, + # and booleans (i.e., only one of enable/disable makes sense) + if require_arg(opt): + metavar = cli_metavar(opt) + left = f"--{key}={metavar}" + help_line(left, opt, 27, True) + elif opt["type"] == "boolean": + left = f"--{key}" + help_line(left, opt, 27, False) + elif allow_arg(opt): + if opt["type"] == "combo" and "enabled" in opt["choices"]: + left = f"--{key}[=CHOICE]" + else: + left = f"--{key}=CHOICE" + help_line(left, opt, 27, True) + + sh_print() + sh_print("Optional features, enabled with --enable-FEATURE and") + sh_print("disabled with --disable-FEATURE, default is enabled if available") + sh_print("(unless built with --without-default-features):") + sh_print() + for opt in options: + key = opt["name"].replace("_", "-") + if opt["type"] != "boolean" and not allow_arg(opt): + help_line(key, opt, 18, False) + print("}") + + +def print_parse(options): + print("_meson_option_parse() {") + print(" case $1 in") + for opt in options: + key = cli_option(opt) + name = opt["name"] + if require_arg(opt): + if opt["type"] == "array" and not "choices" in opt: + print(f' --{key}=*) quote_sh "-D{name}=$(meson_option_build_array $2)" ;;') + else: + print(f' --{key}=*) quote_sh "-D{name}=$2" ;;') + elif opt["type"] == "boolean": + print(f' --enable-{key}) printf "%s" -D{name}=true ;;') + print(f' --disable-{key}) printf "%s" -D{name}=false ;;') + else: + if opt["type"] == "combo" and "enabled" in opt["choices"]: + print(f' --enable-{key}) printf "%s" -D{name}=enabled ;;') + if opt["type"] == "combo" and "disabled" in opt["choices"]: + print(f' --disable-{key}) printf "%s" -D{name}=disabled ;;') + if allow_arg(opt): + print(f' --enable-{key}=*) quote_sh "-D{name}=$2" ;;') + print(" *) return 1 ;;") + print(" esac") + print("}") + + +options = load_options(json.load(sys.stdin)) +print("# This file is generated by meson-buildoptions.py, do not edit!") +print_help(options) +print_parse(options) diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh new file mode 100644 index 0000000000..5272bc0277 --- /dev/null +++ b/scripts/meson-buildoptions.sh @@ -0,0 +1,479 @@ +# This file is generated by meson-buildoptions.py, do not edit! +meson_options_help() { + printf "%s\n" ' --audio-drv-list=CHOICES Set audio driver list [default] (choices: alsa/co' + printf "%s\n" ' reaudio/default/dsound/jack/oss/pa/sdl/sndio)' + printf "%s\n" ' --block-drv-ro-whitelist=VALUE' + printf "%s\n" ' set block driver read-only whitelist (by default' + printf "%s\n" ' affects only QEMU, not tools like qemu-img)' + printf "%s\n" ' --block-drv-rw-whitelist=VALUE' + printf "%s\n" ' set block driver read-write whitelist (by default' + printf "%s\n" ' affects only QEMU, not tools like qemu-img)' + printf "%s\n" ' --datadir=VALUE Data file directory [share]' + printf "%s\n" ' --disable-coroutine-pool coroutine freelist (better performance)' + printf "%s\n" ' --disable-install-blobs install provided firmware blobs' + printf "%s\n" ' --docdir=VALUE Base directory for documentation installation' + printf "%s\n" ' (can be empty) [share/doc]' + printf "%s\n" ' --enable-block-drv-whitelist-in-tools' + printf "%s\n" ' use block whitelist also in tools instead of only' + printf "%s\n" ' QEMU' + printf "%s\n" ' --enable-cfi Control-Flow Integrity (CFI)' + printf "%s\n" ' --enable-cfi-debug Verbose errors in case of CFI violation' + printf "%s\n" ' --enable-debug-mutex mutex debugging support' + printf "%s\n" ' --enable-debug-stack-usage' + printf "%s\n" ' measure coroutine stack usage' + printf "%s\n" ' --enable-fdt[=CHOICE] Whether and how to find the libfdt library' + printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)' + printf "%s\n" ' --enable-fuzzing build fuzzing targets' + printf "%s\n" ' --enable-gcov Enable coverage tracking.' + printf "%s\n" ' --enable-gprof QEMU profiling with gprof' + printf "%s\n" ' --enable-lto Use link time optimization' + printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:' + printf "%s\n" ' jemalloc/system/tcmalloc)' + printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for' + printf "%s\n" ' upgrades' + printf "%s\n" ' --enable-profiler profiler support' + printf "%s\n" ' --enable-qom-cast-debug cast debugging support' + printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and' + printf "%s\n" ' getrandom()' + printf "%s\n" ' --enable-strip Strip targets on install' + printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)' + printf "%s\n" ' --enable-trace-backends=CHOICES' + printf "%s\n" ' Set available tracing backends [log] (choices:' + printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' + printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' + printf "%s\n" ' firmware]' + printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' + printf "%s\n" ' --includedir=VALUE Header file directory [include]' + printf "%s\n" ' --interp-prefix=VALUE where to find shared libraries etc., use %M for' + printf "%s\n" ' cpu name [/usr/gnemul/qemu-%M]' + printf "%s\n" ' --libdir=VALUE Library directory [lib/x86_64-linux-gnu]' + printf "%s\n" ' --libexecdir=VALUE Library executable directory [libexec]' + printf "%s\n" ' --localedir=VALUE Locale data directory [share/locale]' + printf "%s\n" ' --localstatedir=VALUE Localstate data directory [/var/local]' + printf "%s\n" ' --mandir=VALUE Manual page directory [share/man]' + printf "%s\n" ' --sphinx-build=VALUE Use specified sphinx-build for building document' + printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]' + printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string' + printf "%s\n" ' [NORMAL]' + printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the' + printf "%s\n" ' package' + printf "%s\n" ' --with-trace-file=VALUE Trace file prefix for simple backend [trace]' + printf "%s\n" '' + printf "%s\n" 'Optional features, enabled with --enable-FEATURE and' + printf "%s\n" 'disabled with --disable-FEATURE, default is enabled if available' + printf "%s\n" '(unless built with --without-default-features):' + printf "%s\n" '' + printf "%s\n" ' alsa ALSA sound support' + printf "%s\n" ' attr attr/xattr support' + printf "%s\n" ' auth-pam PAM access control' + printf "%s\n" ' avx2 AVX2 optimizations' + printf "%s\n" ' avx512f AVX512F optimizations' + printf "%s\n" ' blkio libblkio block device driver' + printf "%s\n" ' bochs bochs image format support' + printf "%s\n" ' bpf eBPF support' + printf "%s\n" ' brlapi brlapi character device driver' + printf "%s\n" ' bzip2 bzip2 support for DMG images' + printf "%s\n" ' canokey CanoKey support' + printf "%s\n" ' cap-ng cap_ng support' + printf "%s\n" ' capstone Whether and how to find the capstone library' + printf "%s\n" ' cloop cloop image format support' + printf "%s\n" ' cocoa Cocoa user interface (macOS only)' + printf "%s\n" ' coreaudio CoreAudio sound support' + printf "%s\n" ' crypto-afalg Linux AF_ALG crypto backend driver' + printf "%s\n" ' curl CURL block device driver' + printf "%s\n" ' curses curses UI' + printf "%s\n" ' dbus-display -display dbus support' + printf "%s\n" ' dmg dmg image format support' + printf "%s\n" ' docs Documentations build support' + printf "%s\n" ' dsound DirectSound sound support' + printf "%s\n" ' fuse FUSE block device export' + printf "%s\n" ' fuse-lseek SEEK_HOLE/SEEK_DATA support for FUSE exports' + printf "%s\n" ' gcrypt libgcrypt cryptography support' + printf "%s\n" ' gettext Localization of the GTK+ user interface' + printf "%s\n" ' gio use libgio for D-Bus support' + printf "%s\n" ' glusterfs Glusterfs block device driver' + printf "%s\n" ' gnutls GNUTLS cryptography support' + printf "%s\n" ' gtk-clipboard clipboard support for the gtk UI (EXPERIMENTAL, MAY HANG)' + printf "%s\n" ' guest-agent Build QEMU Guest Agent' + printf "%s\n" ' guest-agent-msi Build MSI package for the QEMU Guest Agent' + printf "%s\n" ' hax HAX acceleration support' + printf "%s\n" ' hvf HVF acceleration support' + printf "%s\n" ' iconv Font glyph conversion support' + printf "%s\n" ' jack JACK sound support' + printf "%s\n" ' keyring Linux keyring support' + printf "%s\n" ' kvm KVM acceleration support' + printf "%s\n" ' l2tpv3 l2tpv3 network backend support' + printf "%s\n" ' libdaxctl libdaxctl support' + printf "%s\n" ' libiscsi libiscsi userspace initiator' + printf "%s\n" ' libnfs libnfs block device driver' + printf "%s\n" ' libpmem libpmem support' + printf "%s\n" ' libssh ssh block device support' + printf "%s\n" ' libudev Use libudev to enumerate host devices' + printf "%s\n" ' libusb libusb support for USB passthrough' + printf "%s\n" ' libvduse build VDUSE Library' + printf "%s\n" ' linux-aio Linux AIO support' + printf "%s\n" ' linux-io-uring Linux io_uring support' + printf "%s\n" ' live-block-migration' + printf "%s\n" ' block migration in the main migration stream' + printf "%s\n" ' lzfse lzfse support for DMG images' + printf "%s\n" ' lzo lzo compression support' + printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization' + printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows' + printf "%s\n" ' mpath Multipath persistent reservation passthrough' + printf "%s\n" ' multiprocess Out of process device emulation support' + printf "%s\n" ' netmap netmap network backend support' + printf "%s\n" ' nettle nettle cryptography support' + printf "%s\n" ' numa libnuma support' + printf "%s\n" ' nvmm NVMM acceleration support' + printf "%s\n" ' opengl OpenGL support' + printf "%s\n" ' oss OSS sound support' + printf "%s\n" ' pa PulseAudio sound support' + printf "%s\n" ' parallels parallels image format support' + printf "%s\n" ' png PNG support with libpng' + printf "%s\n" ' pvrdma Enable PVRDMA support' + printf "%s\n" ' qcow1 qcow1 image format support' + printf "%s\n" ' qed qed image format support' + printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)' + printf "%s\n" ' rbd Ceph block device driver' + printf "%s\n" ' rdma Enable RDMA-based migration' + printf "%s\n" ' renderdoc improved RenderDoc frame capture support' + printf "%s\n" ' replication replication support' + printf "%s\n" ' sdl SDL user interface' + printf "%s\n" ' sdl-image SDL Image support for icons' + printf "%s\n" ' seccomp seccomp support' + printf "%s\n" ' selinux SELinux support in qemu-nbd' + printf "%s\n" ' slirp libslirp user mode network backend support' + printf "%s\n" ' slirp-smbd use smbd (at path --smbd=*) in slirp networking' + printf "%s\n" ' smartcard CA smartcard emulation support' + printf "%s\n" ' snappy snappy compression support' + printf "%s\n" ' sndio sndio sound support' + printf "%s\n" ' sparse sparse checker' + printf "%s\n" ' spice Spice server support' + printf "%s\n" ' spice-protocol Spice protocol support' + printf "%s\n" ' tcg TCG support' + printf "%s\n" ' tools build support utilities that come with QEMU' + printf "%s\n" ' tpm TPM support' + printf "%s\n" ' u2f U2F emulation support' + printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' vde vde network backend support' + printf "%s\n" ' vdi vdi image format support' + printf "%s\n" ' vduse-blk-export' + printf "%s\n" ' VDUSE block export support' + printf "%s\n" ' vfio-user-server' + printf "%s\n" ' vfio-user server support' + printf "%s\n" ' vhost-crypto vhost-user crypto backend support' + printf "%s\n" ' vhost-kernel vhost kernel backend support' + printf "%s\n" ' vhost-net vhost-net kernel acceleration support' + printf "%s\n" ' vhost-user vhost-user backend support' + printf "%s\n" ' vhost-user-blk-server' + printf "%s\n" ' build vhost-user-blk server' + printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support' + printf "%s\n" ' virglrenderer virgl rendering support' + printf "%s\n" ' virtfs virtio-9p support' + printf "%s\n" ' virtiofsd build virtiofs daemon (virtiofsd)' + printf "%s\n" ' vmnet vmnet.framework network backend support' + printf "%s\n" ' vnc VNC server' + printf "%s\n" ' vnc-jpeg JPEG lossy compression for VNC server' + printf "%s\n" ' vnc-sasl SASL authentication for VNC server' + printf "%s\n" ' vte vte support for the gtk UI' + printf "%s\n" ' vvfat vvfat image format support' + printf "%s\n" ' whpx WHPX acceleration support' + printf "%s\n" ' xen Xen backend support' + printf "%s\n" ' xen-pci-passthrough' + printf "%s\n" ' Xen PCI passthrough support' + printf "%s\n" ' xkbcommon xkbcommon support' + printf "%s\n" ' zstd zstd compression support' +} +_meson_option_parse() { + case $1 in + --enable-alsa) printf "%s" -Dalsa=enabled ;; + --disable-alsa) printf "%s" -Dalsa=disabled ;; + --enable-attr) printf "%s" -Dattr=enabled ;; + --disable-attr) printf "%s" -Dattr=disabled ;; + --audio-drv-list=*) quote_sh "-Daudio_drv_list=$2" ;; + --enable-auth-pam) printf "%s" -Dauth_pam=enabled ;; + --disable-auth-pam) printf "%s" -Dauth_pam=disabled ;; + --enable-avx2) printf "%s" -Davx2=enabled ;; + --disable-avx2) printf "%s" -Davx2=disabled ;; + --enable-avx512f) printf "%s" -Davx512f=enabled ;; + --disable-avx512f) printf "%s" -Davx512f=disabled ;; + --enable-gcov) printf "%s" -Db_coverage=true ;; + --disable-gcov) printf "%s" -Db_coverage=false ;; + --enable-lto) printf "%s" -Db_lto=true ;; + --disable-lto) printf "%s" -Db_lto=false ;; + --enable-blkio) printf "%s" -Dblkio=enabled ;; + --disable-blkio) printf "%s" -Dblkio=disabled ;; + --block-drv-ro-whitelist=*) quote_sh "-Dblock_drv_ro_whitelist=$2" ;; + --block-drv-rw-whitelist=*) quote_sh "-Dblock_drv_rw_whitelist=$2" ;; + --enable-block-drv-whitelist-in-tools) printf "%s" -Dblock_drv_whitelist_in_tools=true ;; + --disable-block-drv-whitelist-in-tools) printf "%s" -Dblock_drv_whitelist_in_tools=false ;; + --enable-bochs) printf "%s" -Dbochs=enabled ;; + --disable-bochs) printf "%s" -Dbochs=disabled ;; + --enable-bpf) printf "%s" -Dbpf=enabled ;; + --disable-bpf) printf "%s" -Dbpf=disabled ;; + --enable-brlapi) printf "%s" -Dbrlapi=enabled ;; + --disable-brlapi) printf "%s" -Dbrlapi=disabled ;; + --enable-bzip2) printf "%s" -Dbzip2=enabled ;; + --disable-bzip2) printf "%s" -Dbzip2=disabled ;; + --enable-canokey) printf "%s" -Dcanokey=enabled ;; + --disable-canokey) printf "%s" -Dcanokey=disabled ;; + --enable-cap-ng) printf "%s" -Dcap_ng=enabled ;; + --disable-cap-ng) printf "%s" -Dcap_ng=disabled ;; + --enable-capstone) printf "%s" -Dcapstone=enabled ;; + --disable-capstone) printf "%s" -Dcapstone=disabled ;; + --enable-cfi) printf "%s" -Dcfi=true ;; + --disable-cfi) printf "%s" -Dcfi=false ;; + --enable-cfi-debug) printf "%s" -Dcfi_debug=true ;; + --disable-cfi-debug) printf "%s" -Dcfi_debug=false ;; + --enable-cloop) printf "%s" -Dcloop=enabled ;; + --disable-cloop) printf "%s" -Dcloop=disabled ;; + --enable-cocoa) printf "%s" -Dcocoa=enabled ;; + --disable-cocoa) printf "%s" -Dcocoa=disabled ;; + --enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;; + --disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;; + --enable-coroutine-pool) printf "%s" -Dcoroutine_pool=true ;; + --disable-coroutine-pool) printf "%s" -Dcoroutine_pool=false ;; + --enable-crypto-afalg) printf "%s" -Dcrypto_afalg=enabled ;; + --disable-crypto-afalg) printf "%s" -Dcrypto_afalg=disabled ;; + --enable-curl) printf "%s" -Dcurl=enabled ;; + --disable-curl) printf "%s" -Dcurl=disabled ;; + --enable-curses) printf "%s" -Dcurses=enabled ;; + --disable-curses) printf "%s" -Dcurses=disabled ;; + --datadir=*) quote_sh "-Ddatadir=$2" ;; + --enable-dbus-display) printf "%s" -Ddbus_display=enabled ;; + --disable-dbus-display) printf "%s" -Ddbus_display=disabled ;; + --enable-debug-mutex) printf "%s" -Ddebug_mutex=true ;; + --disable-debug-mutex) printf "%s" -Ddebug_mutex=false ;; + --enable-debug-stack-usage) printf "%s" -Ddebug_stack_usage=true ;; + --disable-debug-stack-usage) printf "%s" -Ddebug_stack_usage=false ;; + --enable-dmg) printf "%s" -Ddmg=enabled ;; + --disable-dmg) printf "%s" -Ddmg=disabled ;; + --docdir=*) quote_sh "-Ddocdir=$2" ;; + --enable-docs) printf "%s" -Ddocs=enabled ;; + --disable-docs) printf "%s" -Ddocs=disabled ;; + --enable-dsound) printf "%s" -Ddsound=enabled ;; + --disable-dsound) printf "%s" -Ddsound=disabled ;; + --enable-fdt) printf "%s" -Dfdt=enabled ;; + --disable-fdt) printf "%s" -Dfdt=disabled ;; + --enable-fdt=*) quote_sh "-Dfdt=$2" ;; + --enable-fuse) printf "%s" -Dfuse=enabled ;; + --disable-fuse) printf "%s" -Dfuse=disabled ;; + --enable-fuse-lseek) printf "%s" -Dfuse_lseek=enabled ;; + --disable-fuse-lseek) printf "%s" -Dfuse_lseek=disabled ;; + --enable-fuzzing) printf "%s" -Dfuzzing=true ;; + --disable-fuzzing) printf "%s" -Dfuzzing=false ;; + --enable-gcrypt) printf "%s" -Dgcrypt=enabled ;; + --disable-gcrypt) printf "%s" -Dgcrypt=disabled ;; + --enable-gettext) printf "%s" -Dgettext=enabled ;; + --disable-gettext) printf "%s" -Dgettext=disabled ;; + --enable-gio) printf "%s" -Dgio=enabled ;; + --disable-gio) printf "%s" -Dgio=disabled ;; + --enable-glusterfs) printf "%s" -Dglusterfs=enabled ;; + --disable-glusterfs) printf "%s" -Dglusterfs=disabled ;; + --enable-gnutls) printf "%s" -Dgnutls=enabled ;; + --disable-gnutls) printf "%s" -Dgnutls=disabled ;; + --enable-gprof) printf "%s" -Dgprof=true ;; + --disable-gprof) printf "%s" -Dgprof=false ;; + --enable-gtk-clipboard) printf "%s" -Dgtk_clipboard=enabled ;; + --disable-gtk-clipboard) printf "%s" -Dgtk_clipboard=disabled ;; + --enable-guest-agent) printf "%s" -Dguest_agent=enabled ;; + --disable-guest-agent) printf "%s" -Dguest_agent=disabled ;; + --enable-guest-agent-msi) printf "%s" -Dguest_agent_msi=enabled ;; + --disable-guest-agent-msi) printf "%s" -Dguest_agent_msi=disabled ;; + --enable-hax) printf "%s" -Dhax=enabled ;; + --disable-hax) printf "%s" -Dhax=disabled ;; + --enable-hvf) printf "%s" -Dhvf=enabled ;; + --disable-hvf) printf "%s" -Dhvf=disabled ;; + --iasl=*) quote_sh "-Diasl=$2" ;; + --enable-iconv) printf "%s" -Diconv=enabled ;; + --disable-iconv) printf "%s" -Diconv=disabled ;; + --includedir=*) quote_sh "-Dincludedir=$2" ;; + --enable-install-blobs) printf "%s" -Dinstall_blobs=true ;; + --disable-install-blobs) printf "%s" -Dinstall_blobs=false ;; + --interp-prefix=*) quote_sh "-Dinterp_prefix=$2" ;; + --enable-jack) printf "%s" -Djack=enabled ;; + --disable-jack) printf "%s" -Djack=disabled ;; + --enable-keyring) printf "%s" -Dkeyring=enabled ;; + --disable-keyring) printf "%s" -Dkeyring=disabled ;; + --enable-kvm) printf "%s" -Dkvm=enabled ;; + --disable-kvm) printf "%s" -Dkvm=disabled ;; + --enable-l2tpv3) printf "%s" -Dl2tpv3=enabled ;; + --disable-l2tpv3) printf "%s" -Dl2tpv3=disabled ;; + --enable-libdaxctl) printf "%s" -Dlibdaxctl=enabled ;; + --disable-libdaxctl) printf "%s" -Dlibdaxctl=disabled ;; + --libdir=*) quote_sh "-Dlibdir=$2" ;; + --libexecdir=*) quote_sh "-Dlibexecdir=$2" ;; + --enable-libiscsi) printf "%s" -Dlibiscsi=enabled ;; + --disable-libiscsi) printf "%s" -Dlibiscsi=disabled ;; + --enable-libnfs) printf "%s" -Dlibnfs=enabled ;; + --disable-libnfs) printf "%s" -Dlibnfs=disabled ;; + --enable-libpmem) printf "%s" -Dlibpmem=enabled ;; + --disable-libpmem) printf "%s" -Dlibpmem=disabled ;; + --enable-libssh) printf "%s" -Dlibssh=enabled ;; + --disable-libssh) printf "%s" -Dlibssh=disabled ;; + --enable-libudev) printf "%s" -Dlibudev=enabled ;; + --disable-libudev) printf "%s" -Dlibudev=disabled ;; + --enable-libusb) printf "%s" -Dlibusb=enabled ;; + --disable-libusb) printf "%s" -Dlibusb=disabled ;; + --enable-libvduse) printf "%s" -Dlibvduse=enabled ;; + --disable-libvduse) printf "%s" -Dlibvduse=disabled ;; + --enable-linux-aio) printf "%s" -Dlinux_aio=enabled ;; + --disable-linux-aio) printf "%s" -Dlinux_aio=disabled ;; + --enable-linux-io-uring) printf "%s" -Dlinux_io_uring=enabled ;; + --disable-linux-io-uring) printf "%s" -Dlinux_io_uring=disabled ;; + --enable-live-block-migration) printf "%s" -Dlive_block_migration=enabled ;; + --disable-live-block-migration) printf "%s" -Dlive_block_migration=disabled ;; + --localedir=*) quote_sh "-Dlocaledir=$2" ;; + --localstatedir=*) quote_sh "-Dlocalstatedir=$2" ;; + --enable-lzfse) printf "%s" -Dlzfse=enabled ;; + --disable-lzfse) printf "%s" -Dlzfse=disabled ;; + --enable-lzo) printf "%s" -Dlzo=enabled ;; + --disable-lzo) printf "%s" -Dlzo=disabled ;; + --enable-malloc=*) quote_sh "-Dmalloc=$2" ;; + --enable-malloc-trim) printf "%s" -Dmalloc_trim=enabled ;; + --disable-malloc-trim) printf "%s" -Dmalloc_trim=disabled ;; + --mandir=*) quote_sh "-Dmandir=$2" ;; + --enable-membarrier) printf "%s" -Dmembarrier=enabled ;; + --disable-membarrier) printf "%s" -Dmembarrier=disabled ;; + --enable-module-upgrades) printf "%s" -Dmodule_upgrades=true ;; + --disable-module-upgrades) printf "%s" -Dmodule_upgrades=false ;; + --enable-mpath) printf "%s" -Dmpath=enabled ;; + --disable-mpath) printf "%s" -Dmpath=disabled ;; + --enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;; + --disable-multiprocess) printf "%s" -Dmultiprocess=disabled ;; + --enable-netmap) printf "%s" -Dnetmap=enabled ;; + --disable-netmap) printf "%s" -Dnetmap=disabled ;; + --enable-nettle) printf "%s" -Dnettle=enabled ;; + --disable-nettle) printf "%s" -Dnettle=disabled ;; + --enable-numa) printf "%s" -Dnuma=enabled ;; + --disable-numa) printf "%s" -Dnuma=disabled ;; + --enable-nvmm) printf "%s" -Dnvmm=enabled ;; + --disable-nvmm) printf "%s" -Dnvmm=disabled ;; + --enable-opengl) printf "%s" -Dopengl=enabled ;; + --disable-opengl) printf "%s" -Dopengl=disabled ;; + --enable-oss) printf "%s" -Doss=enabled ;; + --disable-oss) printf "%s" -Doss=disabled ;; + --enable-pa) printf "%s" -Dpa=enabled ;; + --disable-pa) printf "%s" -Dpa=disabled ;; + --enable-parallels) printf "%s" -Dparallels=enabled ;; + --disable-parallels) printf "%s" -Dparallels=disabled ;; + --with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;; + --enable-png) printf "%s" -Dpng=enabled ;; + --disable-png) printf "%s" -Dpng=disabled ;; + --enable-profiler) printf "%s" -Dprofiler=true ;; + --disable-profiler) printf "%s" -Dprofiler=false ;; + --enable-pvrdma) printf "%s" -Dpvrdma=enabled ;; + --disable-pvrdma) printf "%s" -Dpvrdma=disabled ;; + --enable-qcow1) printf "%s" -Dqcow1=enabled ;; + --disable-qcow1) printf "%s" -Dqcow1=disabled ;; + --enable-qed) printf "%s" -Dqed=enabled ;; + --disable-qed) printf "%s" -Dqed=disabled ;; + --firmwarepath=*) quote_sh "-Dqemu_firmwarepath=$(meson_option_build_array $2)" ;; + --enable-qga-vss) printf "%s" -Dqga_vss=enabled ;; + --disable-qga-vss) printf "%s" -Dqga_vss=disabled ;; + --enable-qom-cast-debug) printf "%s" -Dqom_cast_debug=true ;; + --disable-qom-cast-debug) printf "%s" -Dqom_cast_debug=false ;; + --enable-rbd) printf "%s" -Drbd=enabled ;; + --disable-rbd) printf "%s" -Drbd=disabled ;; + --enable-rdma) printf "%s" -Drdma=enabled ;; + --disable-rdma) printf "%s" -Drdma=disabled ;; + --enable-renderdoc) printf "%s" -Drenderdoc=enabled ;; + --disable-renderdoc) printf "%s" -Drenderdoc=disabled ;; + --enable-replication) printf "%s" -Dreplication=enabled ;; + --disable-replication) printf "%s" -Dreplication=disabled ;; + --enable-rng-none) printf "%s" -Drng_none=true ;; + --disable-rng-none) printf "%s" -Drng_none=false ;; + --enable-sdl) printf "%s" -Dsdl=enabled ;; + --disable-sdl) printf "%s" -Dsdl=disabled ;; + --enable-sdl-image) printf "%s" -Dsdl_image=enabled ;; + --disable-sdl-image) printf "%s" -Dsdl_image=disabled ;; + --enable-seccomp) printf "%s" -Dseccomp=enabled ;; + --disable-seccomp) printf "%s" -Dseccomp=disabled ;; + --enable-selinux) printf "%s" -Dselinux=enabled ;; + --disable-selinux) printf "%s" -Dselinux=disabled ;; + --enable-slirp) printf "%s" -Dslirp=enabled ;; + --disable-slirp) printf "%s" -Dslirp=disabled ;; + --enable-slirp-smbd) printf "%s" -Dslirp_smbd=enabled ;; + --disable-slirp-smbd) printf "%s" -Dslirp_smbd=disabled ;; + --enable-smartcard) printf "%s" -Dsmartcard=enabled ;; + --disable-smartcard) printf "%s" -Dsmartcard=disabled ;; + --enable-snappy) printf "%s" -Dsnappy=enabled ;; + --disable-snappy) printf "%s" -Dsnappy=disabled ;; + --enable-sndio) printf "%s" -Dsndio=enabled ;; + --disable-sndio) printf "%s" -Dsndio=disabled ;; + --enable-sparse) printf "%s" -Dsparse=enabled ;; + --disable-sparse) printf "%s" -Dsparse=disabled ;; + --sphinx-build=*) quote_sh "-Dsphinx_build=$2" ;; + --enable-spice) printf "%s" -Dspice=enabled ;; + --disable-spice) printf "%s" -Dspice=disabled ;; + --enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;; + --disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;; + --enable-strip) printf "%s" -Dstrip=true ;; + --disable-strip) printf "%s" -Dstrip=false ;; + --sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;; + --enable-tcg) printf "%s" -Dtcg=enabled ;; + --disable-tcg) printf "%s" -Dtcg=disabled ;; + --enable-tcg-interpreter) printf "%s" -Dtcg_interpreter=true ;; + --disable-tcg-interpreter) printf "%s" -Dtcg_interpreter=false ;; + --tls-priority=*) quote_sh "-Dtls_priority=$2" ;; + --enable-tools) printf "%s" -Dtools=enabled ;; + --disable-tools) printf "%s" -Dtools=disabled ;; + --enable-tpm) printf "%s" -Dtpm=enabled ;; + --disable-tpm) printf "%s" -Dtpm=disabled ;; + --enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;; + --with-trace-file=*) quote_sh "-Dtrace_file=$2" ;; + --enable-u2f) printf "%s" -Du2f=enabled ;; + --disable-u2f) printf "%s" -Du2f=disabled ;; + --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; + --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-vde) printf "%s" -Dvde=enabled ;; + --disable-vde) printf "%s" -Dvde=disabled ;; + --enable-vdi) printf "%s" -Dvdi=enabled ;; + --disable-vdi) printf "%s" -Dvdi=disabled ;; + --enable-vduse-blk-export) printf "%s" -Dvduse_blk_export=enabled ;; + --disable-vduse-blk-export) printf "%s" -Dvduse_blk_export=disabled ;; + --enable-vfio-user-server) printf "%s" -Dvfio_user_server=enabled ;; + --disable-vfio-user-server) printf "%s" -Dvfio_user_server=disabled ;; + --enable-vhost-crypto) printf "%s" -Dvhost_crypto=enabled ;; + --disable-vhost-crypto) printf "%s" -Dvhost_crypto=disabled ;; + --enable-vhost-kernel) printf "%s" -Dvhost_kernel=enabled ;; + --disable-vhost-kernel) printf "%s" -Dvhost_kernel=disabled ;; + --enable-vhost-net) printf "%s" -Dvhost_net=enabled ;; + --disable-vhost-net) printf "%s" -Dvhost_net=disabled ;; + --enable-vhost-user) printf "%s" -Dvhost_user=enabled ;; + --disable-vhost-user) printf "%s" -Dvhost_user=disabled ;; + --enable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=enabled ;; + --disable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=disabled ;; + --enable-vhost-vdpa) printf "%s" -Dvhost_vdpa=enabled ;; + --disable-vhost-vdpa) printf "%s" -Dvhost_vdpa=disabled ;; + --enable-virglrenderer) printf "%s" -Dvirglrenderer=enabled ;; + --disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;; + --enable-virtfs) printf "%s" -Dvirtfs=enabled ;; + --disable-virtfs) printf "%s" -Dvirtfs=disabled ;; + --enable-virtiofsd) printf "%s" -Dvirtiofsd=enabled ;; + --disable-virtiofsd) printf "%s" -Dvirtiofsd=disabled ;; + --enable-vmnet) printf "%s" -Dvmnet=enabled ;; + --disable-vmnet) printf "%s" -Dvmnet=disabled ;; + --enable-vnc) printf "%s" -Dvnc=enabled ;; + --disable-vnc) printf "%s" -Dvnc=disabled ;; + --enable-vnc-jpeg) printf "%s" -Dvnc_jpeg=enabled ;; + --disable-vnc-jpeg) printf "%s" -Dvnc_jpeg=disabled ;; + --enable-vnc-sasl) printf "%s" -Dvnc_sasl=enabled ;; + --disable-vnc-sasl) printf "%s" -Dvnc_sasl=disabled ;; + --enable-vte) printf "%s" -Dvte=enabled ;; + --disable-vte) printf "%s" -Dvte=disabled ;; + --enable-vvfat) printf "%s" -Dvvfat=enabled ;; + --disable-vvfat) printf "%s" -Dvvfat=disabled ;; + --enable-whpx) printf "%s" -Dwhpx=enabled ;; + --disable-whpx) printf "%s" -Dwhpx=disabled ;; + --enable-xen) printf "%s" -Dxen=enabled ;; + --disable-xen) printf "%s" -Dxen=disabled ;; + --enable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=enabled ;; + --disable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=disabled ;; + --enable-xkbcommon) printf "%s" -Dxkbcommon=enabled ;; + --disable-xkbcommon) printf "%s" -Dxkbcommon=disabled ;; + --enable-zstd) printf "%s" -Dzstd=enabled ;; + --disable-zstd) printf "%s" -Dzstd=disabled ;; + *) return 1 ;; + esac +} diff --git a/scripts/meson.build b/scripts/meson.build index e8cc63896d..1c89e10a76 100644 --- a/scripts/meson.build +++ b/scripts/meson.build @@ -1,3 +1,3 @@ -if 'CONFIG_TRACE_SYSTEMTAP' in config_host +if stap.found() install_data('qemu-trace-stap', install_dir: get_option('bindir')) endif diff --git a/scripts/modinfo-collect.py b/scripts/modinfo-collect.py index 4acb188c3e..4e7584df66 100755 --- a/scripts/modinfo-collect.py +++ b/scripts/modinfo-collect.py @@ -18,13 +18,8 @@ def find_command(src, target, compile_commands): def process_command(src, command): skip = False - arg = False out = [] for item in shlex.split(command): - if arg: - out.append(x) - arg = False - continue if skip: skip = False continue @@ -51,6 +46,9 @@ def main(args): with open('compile_commands.json') as f: compile_commands = json.load(f) for src in args: + if not src.endswith('.c'): + print("MODINFO_DEBUG skip %s" % src) + continue print("MODINFO_DEBUG src %s" % src) command = find_command(src, target, compile_commands) cmdline = process_command(src, command) diff --git a/scripts/modinfo-generate.py b/scripts/modinfo-generate.py index f559eed007..b1538fcced 100755 --- a/scripts/modinfo-generate.py +++ b/scripts/modinfo-generate.py @@ -32,7 +32,7 @@ def parse_line(line): continue return (kind, data) -def generate(name, lines): +def generate(name, lines, enabled): arch = "" objs = [] deps = [] @@ -48,6 +48,14 @@ def generate(name, lines): opts.append(data) elif kind == 'arch': arch = data; + elif kind == 'kconfig': + # don't add a module which dependency is not enabled + # in kconfig + if data.strip() not in enabled: + print(" /* module {} isn't enabled in Kconfig. */" + .format(data.strip())) + print("/* },{ */") + return None else: print("unknown:", kind) exit(1) @@ -58,8 +66,8 @@ def generate(name, lines): print_array("objs", objs) print_array("deps", deps) print_array("opts", opts) - print("},{"); - return deps + print("},{") + return {dep.strip('" ') for dep in deps} def print_pre(): print("/* generated by scripts/modinfo-generate.py */") @@ -72,23 +80,38 @@ def print_post(): print("}};") def main(args): - deps = {} + if len(args) < 3 or args[0] != '--devices': + print('Expected: modinfo-generate.py --devices ' + 'config-device.mak [modinfo files]', file=sys.stderr) + exit(1) + + # get all devices enabled in kconfig, from *-config-device.mak + enabled = set() + with open(args[1]) as file: + for line in file.readlines(): + config = line.split('=') + if config[1].rstrip() == 'y': + enabled.add(config[0][7:]) # remove CONFIG_ + + deps = set() + modules = set() print_pre() - for modinfo in args: + for modinfo in args[2:]: with open(modinfo) as f: lines = f.readlines() print(" /* %s */" % modinfo) - (basename, ext) = os.path.splitext(modinfo) - deps[basename] = generate(basename, lines) + (basename, _) = os.path.splitext(modinfo) + moddeps = generate(basename, lines, enabled) + if moddeps is not None: + modules.add(basename) + deps.update(moddeps) print_post() - flattened_deps = {flat.strip('" ') for dep in deps.values() for flat in dep} error = False - for dep in flattened_deps: - if dep not in deps.keys(): - print("Dependency {} cannot be satisfied".format(dep), - file=sys.stderr) - error = True + for dep in deps.difference(modules): + print("Dependency {} cannot be satisfied".format(dep), + file=sys.stderr) + error = True if error: exit(1) diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index ee072c0502..0fe81efbbc 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -13,104 +13,87 @@ import sys class Suite(object): def __init__(self): - self.tests = list() - self.slow_tests = list() - self.executables = set() + self.deps = set() + self.speeds = ['quick'] + + def names(self, base): + return [base if speed == 'quick' else f'{base}-{speed}' for speed in self.speeds] + print(''' SPEED = quick -# $1 = environment, $2 = test command, $3 = test name, $4 = dir -.test-human-tap = $1 $(if $4,(cd $4 && $2),$2) -m $(SPEED) < /dev/null | ./scripts/tap-driver.pl --test-name="$3" $(if $(V),,--show-failures-only) -.test-human-exitcode = $1 $(PYTHON) scripts/test-driver.py $(if $4,-C$4) $(if $(V),--verbose) -- $2 < /dev/null -.test-tap-tap = $1 $(if $4,(cd $4 && $2),$2) < /dev/null | sed "s/^[a-z][a-z]* [0-9]*/& $3/" || true -.test-tap-exitcode = printf "%s\\n" 1..1 "`$1 $(if $4,(cd $4 && $2),$2) < /dev/null > /dev/null || echo "not "`ok 1 $3" -.test.human-print = echo $(if $(V),'$1 $2','Running test $3') && -.test.env = MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$(( $${RANDOM:-0} % 255 + 1))} +.speed.quick = $(foreach s,$(sort $(filter-out %-slow %-thorough, $1)), --suite $s) +.speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s) +.speed.thorough = $(foreach s,$(sort $1), --suite $s) -# $1 = test name, $2 = test target (human or tap) -.test.run = $(call .test.$2-print,$(.test.env.$1),$(.test.cmd.$1),$(.test.name.$1)) $(call .test-$2-$(.test.driver.$1),$(.test.env.$1),$(.test.cmd.$1),$(.test.name.$1),$(.test.dir.$1)) +.mtestargs = --no-rebuild -t 0 +ifneq ($(SPEED), quick) +.mtestargs += --setup $(SPEED) +endif +.mtestargs += $(subst -j,--num-processes , $(filter-out -j, $(lastword -j1 $(filter -j%, $(MAKEFLAGS))))) -.test.output-format = human -''') +.check.mtestargs = $(MTESTARGS) $(.mtestargs) $(if $(V),--verbose,--print-errorlogs) +.bench.mtestargs = $(MTESTARGS) $(.mtestargs) --benchmark --verbose''') introspect = json.load(sys.stdin) -i = 0 def process_tests(test, targets, suites): - global i - env = ' '.join(('%s=%s' % (shlex.quote(k), shlex.quote(v)) - for k, v in test['env'].items())) executable = test['cmd'][0] try: executable = os.path.relpath(executable) except: pass - if test['workdir'] is not None: - try: - test['cmd'][0] = os.path.relpath(executable, test['workdir']) - except: - test['cmd'][0] = executable - else: - test['cmd'][0] = executable - cmd = ' '.join((shlex.quote(x) for x in test['cmd'])) - driver = test['protocol'] if 'protocol' in test else 'exitcode' - i += 1 - if test['workdir'] is not None: - print('.test.dir.%d := %s' % (i, shlex.quote(test['workdir']))) - - if 'depends' in test: - deps = (targets.get(x, []) for x in test['depends']) - deps = itertools.chain.from_iterable(deps) - else: - deps = ['all'] - - print('.test.name.%d := %s' % (i, test['name'])) - print('.test.driver.%d := %s' % (i, driver)) - print('.test.env.%d := $(.test.env) %s' % (i, env)) - print('.test.cmd.%d := %s' % (i, cmd)) - print('.test.deps.%d := %s' % (i, ' '.join(deps))) - print('.PHONY: run-test-%d' % (i,)) - print('run-test-%d: $(.test.deps.%d)' % (i,i)) - print('\t@$(call .test.run,%d,$(.test.output-format))' % (i,)) + deps = (targets.get(x, []) for x in test['depends']) + deps = itertools.chain.from_iterable(deps) + deps = list(deps) test_suites = test['suite'] or ['default'] - is_slow = any(s.endswith('-slow') for s in test_suites) for s in test_suites: # The suite name in the introspection info is "PROJECT:SUITE" s = s.split(':')[1] + if s == 'slow' or s == 'thorough': + continue if s.endswith('-slow'): s = s[:-5] - if is_slow: - suites[s].slow_tests.append(i) - else: - suites[s].tests.append(i) - suites[s].executables.add(executable) + suites[s].speeds.append('slow') + if s.endswith('-thorough'): + s = s[:-9] + suites[s].speeds.append('thorough') + suites[s].deps.update(deps) def emit_prolog(suites, prefix): - all_tap = ' '.join(('%s-report-%s.tap' % (prefix, k) for k in suites.keys())) - print('.PHONY: %s %s-report.tap %s' % (prefix, prefix, all_tap)) - print('%s: run-tests' % (prefix,)) - print('%s-report.tap %s: %s-report%%.tap: all' % (prefix, all_tap, prefix)) - print('''\t$(MAKE) .test.output-format=tap --quiet -Otarget V=1 %s$* | ./scripts/tap-merge.pl | tee "$@" \\ - | ./scripts/tap-driver.pl $(if $(V),, --show-failures-only)''' % (prefix, )) + all_targets = ' '.join((f'{prefix}-{k}' for k in suites.keys())) + all_xml = ' '.join((f'{prefix}-report-{k}.junit.xml' for k in suites.keys())) + print() + print(f'all-{prefix}-targets = {all_targets}') + print(f'all-{prefix}-xml = {all_xml}') + print(f'.PHONY: {prefix} do-meson-{prefix} {prefix}-report.junit.xml $(all-{prefix}-targets) $(all-{prefix}-xml)') + print(f'ifeq ($(filter {prefix}, $(MAKECMDGOALS)),)') + print(f'.{prefix}.mtestargs += $(call .speed.$(SPEED), $(.{prefix}.mtest-suites))') + print(f'endif') + print(f'{prefix}-build: run-ninja') + print(f'{prefix} $(all-{prefix}-targets): do-meson-{prefix}') + print(f'do-meson-{prefix}: run-ninja; $(if $(MAKE.n),,+)$(MESON) test $(.{prefix}.mtestargs)') + print(f'{prefix}-report.junit.xml $(all-{prefix}-xml): {prefix}-report%.junit.xml: run-ninja') + print(f'\t$(MAKE) {prefix}$* MTESTARGS="$(MTESTARGS) --logbase {prefix}-report$*" && ln -f meson-logs/$@ .') + +def emit_suite_deps(name, suite, prefix): + deps = ' '.join(suite.deps) + targets = [f'{prefix}-{name}', f'{prefix}-report-{name}.junit.xml', f'{prefix}', f'{prefix}-report.junit.xml', + f'{prefix}-build'] + print() + print(f'.{prefix}-{name}.deps = {deps}') + for t in targets: + print(f'.ninja-goals.{t} += $(.{prefix}-{name}.deps)') def emit_suite(name, suite, prefix): - executables = ' '.join(suite.executables) - slow_test_numbers = ' '.join((str(x) for x in suite.slow_tests)) - test_numbers = ' '.join((str(x) for x in suite.tests)) - target = '%s-%s' % (prefix, name) - print('.test.quick.%s := %s' % (target, test_numbers)) - print('.test.slow.%s := $(.test.quick.%s) %s' % (target, target, slow_test_numbers)) - print('%s-build: %s' % (prefix, executables)) - print('.PHONY: %s' % (target, )) - print('.PHONY: %s-report-%s.tap' % (prefix, name)) - print('%s: run-tests' % (target, )) - print('ifneq ($(filter %s %s, $(MAKECMDGOALS)),)' % (target, prefix)) - print('.tests += $(.test.$(SPEED).%s)' % (target, )) - print('endif') - print('all-%s-targets += %s' % (prefix, target)) + emit_suite_deps(name, suite, prefix) + targets = f'{prefix}-{name} {prefix}-report-{name}.junit.xml {prefix} {prefix}-report.junit.xml' + print(f'ifneq ($(filter {targets}, $(MAKECMDGOALS)),)') + print(f'.{prefix}.mtest-suites += ' + ' '.join(suite.names(name))) + print(f'endif') targets = {t['id']: [os.path.relpath(f) for f in t['filename']] for t in introspect['targets']} @@ -128,5 +111,3 @@ for test in introspect['benchmarks']: emit_prolog(benchsuites, 'bench') for name, suite in benchsuites.items(): emit_suite(name, suite, 'bench') - -print('run-tests: $(patsubst %, run-test-%, $(.tests))') diff --git a/scripts/nsis.py b/scripts/nsis.py index 5135a05831..03ed7608a2 100644 --- a/scripts/nsis.py +++ b/scripts/nsis.py @@ -18,25 +18,53 @@ def signcode(path): return subprocess.run([cmd, path]) +def find_deps(exe_or_dll, search_path, analyzed_deps): + deps = [exe_or_dll] + output = subprocess.check_output(["objdump", "-p", exe_or_dll], text=True) + output = output.split("\n") + for line in output: + if not line.startswith("\tDLL Name: "): + continue + + dep = line.split("DLL Name: ")[1].strip() + if dep in analyzed_deps: + continue + + dll = os.path.join(search_path, dep) + if not os.path.exists(dll): + # assume it's a Windows provided dll, skip it + continue + + analyzed_deps.add(dep) + # locate the dll dependencies recursively + rdeps = find_deps(dll, search_path, analyzed_deps) + deps.extend(rdeps) + + return deps def main(): parser = argparse.ArgumentParser(description="QEMU NSIS build helper.") parser.add_argument("outfile") parser.add_argument("prefix") parser.add_argument("srcdir") + parser.add_argument("dlldir") parser.add_argument("cpu") parser.add_argument("nsisargs", nargs="*") args = parser.parse_args() + # canonicalize the Windows native prefix path + prefix = os.path.splitdrive(args.prefix)[1] destdir = tempfile.mkdtemp() try: - subprocess.run(["make", "install", "DESTDIR=" + destdir + os.path.sep]) + subprocess.run(["make", "install", "DESTDIR=" + destdir]) with open( - os.path.join(destdir + args.prefix, "system-emulations.nsh"), "w" - ) as nsh: - for exe in glob.glob( - os.path.join(destdir + args.prefix, "qemu-system-*.exe") - ): + os.path.join(destdir + prefix, "system-emulations.nsh"), "w" + ) as nsh, open( + os.path.join(destdir + prefix, "system-mui-text.nsh"), "w" + ) as muinsh: + for exe in sorted(glob.glob( + os.path.join(destdir + prefix, "qemu-system-*.exe") + )): exe = os.path.basename(exe) arch = exe[12:-4] nsh.write( @@ -49,23 +77,46 @@ def main(): arch, exe ) ) + if arch.endswith('w'): + desc = arch[:-1] + " emulation (GUI)." + else: + desc = arch + " emulation." - for exe in glob.glob(os.path.join(destdir + args.prefix, "*.exe")): + muinsh.write( + """ + !insertmacro MUI_DESCRIPTION_TEXT ${{Section_{0}}} "{1}" + """.format(arch, desc)) + + search_path = args.dlldir + print("Searching '%s' for the dependent dlls ..." % search_path) + dlldir = os.path.join(destdir + prefix, "dll") + os.mkdir(dlldir) + + for exe in glob.glob(os.path.join(destdir + prefix, "*.exe")): signcode(exe) + # find all dll dependencies + deps = set(find_deps(exe, search_path, set())) + deps.remove(exe) + + # copy all dlls to the DLLDIR + for dep in deps: + dllfile = os.path.join(dlldir, os.path.basename(dep)) + if (os.path.exists(dllfile)): + continue + print("Copying '%s' to '%s'" % (dep, dllfile)) + shutil.copy(dep, dllfile) + makensis = [ "makensis", "-V2", "-NOCD", "-DSRCDIR=" + args.srcdir, - "-DBINDIR=" + destdir + args.prefix, + "-DBINDIR=" + destdir + prefix, ] - dlldir = "w32" if args.cpu == "x86_64": - dlldir = "w64" makensis += ["-DW64"] - if os.path.exists(os.path.join(args.srcdir, "dll")): - makensis += ["-DDLLDIR={0}/dll/{1}".format(args.srcdir, dlldir)] + makensis += ["-DDLLDIR=" + dlldir] makensis += ["-DOUTFILE=" + args.outfile] + args.nsisargs subprocess.run(makensis) diff --git a/scripts/oss-fuzz/build.sh b/scripts/oss-fuzz/build.sh index 98b56e0521..3bda0d72c7 100755 --- a/scripts/oss-fuzz/build.sh +++ b/scripts/oss-fuzz/build.sh @@ -1,4 +1,4 @@ -#!/bin/sh -e +#!/bin/bash -e # # OSS-Fuzz build script. See: # https://google.github.io/oss-fuzz/getting-started/new-project-guide/#buildsh @@ -64,7 +64,7 @@ mkdir -p "$DEST_DIR/lib/" # Copy the shared libraries here # Build once to get the list of dynamic lib paths, and copy them over ../configure --disable-werror --cc="$CC" --cxx="$CXX" --enable-fuzzing \ - --prefix="$DEST_DIR" --bindir="$DEST_DIR" --datadir="$DEST_DIR/data/" \ + --prefix="/opt/qemu-oss-fuzz" \ --extra-cflags="$EXTRA_CFLAGS" --target-list="i386-softmmu" if ! make "-j$(nproc)" qemu-fuzz-i386; then @@ -81,16 +81,18 @@ if [ "$GITLAB_CI" != "true" ]; then # Build a second time to build the final binary with correct rpath ../configure --disable-werror --cc="$CC" --cxx="$CXX" --enable-fuzzing \ - --prefix="$DEST_DIR" --bindir="$DEST_DIR" --datadir="$DEST_DIR/data/" \ + --prefix="/opt/qemu-oss-fuzz" \ --extra-cflags="$EXTRA_CFLAGS" --extra-ldflags="-Wl,-rpath,\$ORIGIN/lib" \ --target-list="i386-softmmu" make "-j$(nproc)" qemu-fuzz-i386 V=1 fi -# Copy over the datadir -cp -r ../pc-bios/ "$DEST_DIR/pc-bios" +# Place data files in the preinstall tree +make install DESTDIR=$DEST_DIR/qemu-bundle +rm -rf $DEST_DIR/qemu-bundle/opt/qemu-oss-fuzz/bin +rm -rf $DEST_DIR/qemu-bundle/opt/qemu-oss-fuzz/libexec -targets=$(./qemu-fuzz-i386 | awk '$1 ~ /\*/ {print $2}') +targets=$(./qemu-fuzz-i386 | grep generic-fuzz | awk '$1 ~ /\*/ {print $2}') base_copy="$DEST_DIR/qemu-fuzz-i386-target-$(echo "$targets" | head -n 1)" cp "./qemu-fuzz-i386" "$base_copy" @@ -105,7 +107,7 @@ do # to be configured. We have some generic-fuzz-{pc-q35, floppy, ...} targets # that are thin wrappers around this target that set the required # environment variables according to predefined configs. - if [ "$target" != "generic-fuzz" ]; then + if [[ $target == "generic-fuzz-"* ]]; then ln $base_copy \ "$DEST_DIR/qemu-fuzz-i386-target-$target" fi diff --git a/scripts/oss-fuzz/instrumentation-filter-template b/scripts/oss-fuzz/instrumentation-filter-template new file mode 100644 index 0000000000..76d2b6139a --- /dev/null +++ b/scripts/oss-fuzz/instrumentation-filter-template @@ -0,0 +1,15 @@ +# Code that we actually want the fuzzer to target +# See: https://clang.llvm.org/docs/SanitizerCoverage.html#disabling-instrumentation-without-source-modification +# +src:*/hw/* +src:*/include/hw/* +src:*/slirp/* +src:*/net/* + +# We don't care about coverage over fuzzer-specific code, however we should +# instrument the fuzzer entry-point so libFuzzer always sees at least some +# coverage - otherwise it will exit after the first input +src:*/tests/qtest/fuzz/fuzz.c + +# Enable instrumentation for all functions in those files +fun:* diff --git a/scripts/oss-fuzz/output_reproducer.py b/scripts/oss-fuzz/output_reproducer.py index 3608b0600e..e8ef76b341 100755 --- a/scripts/oss-fuzz/output_reproducer.py +++ b/scripts/oss-fuzz/output_reproducer.py @@ -36,7 +36,7 @@ def c_header(owner): #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" """.format(date=date.today().year, owner=owner) diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py index 0e13d51054..38ca38a7b9 100644 --- a/scripts/qapi/commands.py +++ b/scripts/qapi/commands.py @@ -17,7 +17,6 @@ from typing import ( Dict, List, Optional, - Sequence, Set, ) @@ -26,11 +25,13 @@ from .gen import ( QAPIGenC, QAPISchemaModularCVisitor, build_params, + gen_special_features, ifcontext, ) from .schema import ( QAPISchema, QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaObjectType, QAPISchemaType, ) @@ -52,7 +53,8 @@ def gen_command_decl(name: str, def gen_call(name: str, arg_type: Optional[QAPISchemaObjectType], boxed: bool, - ret_type: Optional[QAPISchemaType]) -> str: + ret_type: Optional[QAPISchemaType], + gen_tracing: bool) -> str: ret = '' argstr = '' @@ -70,21 +72,67 @@ def gen_call(name: str, if ret_type: lhs = 'retval = ' - ret = mcgen(''' + name = c_name(name) + upper = name.upper() - %(lhs)sqmp_%(c_name)s(%(args)s&err); - error_propagate(errp, err); -''', - c_name=c_name(name), args=argstr, lhs=lhs) - if ret_type: + if gen_tracing: ret += mcgen(''' + + if (trace_event_get_state_backends(TRACE_QMP_ENTER_%(upper)s)) { + g_autoptr(GString) req_json = qobject_to_json(QOBJECT(args)); + + trace_qmp_enter_%(name)s(req_json->str); + } + ''', + upper=upper, name=name) + + ret += mcgen(''' + + %(lhs)sqmp_%(name)s(%(args)s&err); +''', + name=name, args=argstr, lhs=lhs) + + ret += mcgen(''' if (err) { +''') + + if gen_tracing: + ret += mcgen(''' + trace_qmp_exit_%(name)s(error_get_pretty(err), false); +''', + name=name) + + ret += mcgen(''' + error_propagate(errp, err); goto out; } +''') + + if ret_type: + ret += mcgen(''' qmp_marshal_output_%(c_name)s(retval, ret, errp); ''', c_name=ret_type.c_name()) + + if gen_tracing: + if ret_type: + ret += mcgen(''' + + if (trace_event_get_state_backends(TRACE_QMP_EXIT_%(upper)s)) { + g_autoptr(GString) ret_json = qobject_to_json(*ret); + + trace_qmp_exit_%(name)s(ret_json->str, true); + } + ''', + upper=upper, name=name) + else: + ret += mcgen(''' + + trace_qmp_exit_%(name)s("{}", true); + ''', + name=name) + return ret @@ -121,10 +169,19 @@ def gen_marshal_decl(name: str) -> str: proto=build_marshal_proto(name)) +def gen_trace(name: str) -> str: + return mcgen(''' +qmp_enter_%(name)s(const char *json) "%%s" +qmp_exit_%(name)s(const char *result, bool succeeded) "%%s %%d" +''', + name=c_name(name)) + + def gen_marshal(name: str, arg_type: Optional[QAPISchemaObjectType], boxed: bool, - ret_type: Optional[QAPISchemaType]) -> str: + ret_type: Optional[QAPISchemaType], + gen_tracing: bool) -> str: have_args = boxed or (arg_type and not arg_type.is_empty()) if have_args: assert arg_type is not None @@ -179,7 +236,7 @@ def gen_marshal(name: str, } ''') - ret += gen_call(name, arg_type, boxed, ret_type) + ret += gen_call(name, arg_type, boxed, ret_type, gen_tracing) ret += mcgen(''' @@ -217,9 +274,6 @@ def gen_register_command(name: str, coroutine: bool) -> str: options = [] - if 'deprecated' in [f.name for f in features]: - options += ['QCO_DEPRECATED'] - if not success_response: options += ['QCO_NO_SUCCESS_RESP'] if allow_oob: @@ -229,24 +283,24 @@ def gen_register_command(name: str, if coroutine: options += ['QCO_COROUTINE'] - if not options: - options = ['QCO_NO_OPTIONS'] - ret = mcgen(''' qmp_register_command(cmds, "%(name)s", - qmp_marshal_%(c_name)s, %(opts)s); + qmp_marshal_%(c_name)s, %(opts)s, %(feats)s); ''', name=name, c_name=c_name(name), - opts=" | ".join(options)) + opts=' | '.join(options) or 0, + feats=gen_special_features(features)) return ret class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor): - def __init__(self, prefix: str): + def __init__(self, prefix: str, gen_tracing: bool): super().__init__( prefix, 'qapi-commands', - ' * Schema-defined QAPI/QMP commands', None, __doc__) + ' * Schema-defined QAPI/QMP commands', None, __doc__, + gen_tracing=gen_tracing) self._visited_ret_types: Dict[QAPIGenC, Set[QAPISchemaType]] = {} + self._gen_tracing = gen_tracing def _begin_user_module(self, name: str) -> None: self._visited_ret_types[self._genc] = set() @@ -265,6 +319,16 @@ class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor): ''', commands=commands, visit=visit)) + + if self._gen_tracing and commands != 'qapi-commands': + self._genc.add(mcgen(''' +#include "qapi/qmp/qjson.h" +#include "trace/trace-%(nm)s_trace_events.h" +''', + nm=c_name(commands, protect=False))) + # We use c_name(commands, protect=False) to turn '-' into '_', to + # match .underscorify() in trace/meson.build + self._genh.add(mcgen(''' #include "%(types)s.h" @@ -301,7 +365,7 @@ void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds) def visit_command(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], arg_type: Optional[QAPISchemaObjectType], ret_type: Optional[QAPISchemaType], @@ -326,7 +390,10 @@ void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds) with ifcontext(ifcond, self._genh, self._genc): self._genh.add(gen_command_decl(name, arg_type, boxed, ret_type)) self._genh.add(gen_marshal_decl(name)) - self._genc.add(gen_marshal(name, arg_type, boxed, ret_type)) + self._genc.add(gen_marshal(name, arg_type, boxed, ret_type, + self._gen_tracing)) + if self._gen_tracing: + self._gen_trace_events.add(gen_trace(name)) with self._temp_module('./init'): with ifcontext(ifcond, self._genh, self._genc): self._genc.add(gen_register_command( @@ -336,7 +403,8 @@ void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds) def gen_commands(schema: QAPISchema, output_dir: str, - prefix: str) -> None: - vis = QAPISchemaGenCommandVisitor(prefix) + prefix: str, + gen_tracing: bool) -> None: + vis = QAPISchemaGenCommandVisitor(prefix, gen_tracing) schema.visit(vis) vis.write(output_dir) diff --git a/scripts/qapi/common.py b/scripts/qapi/common.py index 6ad1eeb61d..737b059e62 100644 --- a/scripts/qapi/common.py +++ b/scripts/qapi/common.py @@ -12,7 +12,14 @@ # See the COPYING file in the top-level directory. import re -from typing import Match, Optional, Sequence +from typing import ( + Any, + Dict, + Match, + Optional, + Sequence, + Union, +) #: Magic string that gets removed along with all space to its right. @@ -107,7 +114,7 @@ def c_name(name: str, protect: bool = True) -> str: 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not', 'not_eq', 'or', 'or_eq', 'xor', 'xor_eq']) # namespace pollution: - polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386']) + polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386', 'linux']) name = re.sub(r'[^A-Za-z0-9_]', '_', name) if protect and (name in (c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words) @@ -125,9 +132,6 @@ class Indentation: def __init__(self, initial: int = 0) -> None: self._level = initial - def __int__(self) -> int: - return self._level - def __repr__(self) -> str: return "{}({:d})".format(type(self).__name__, self._level) @@ -135,19 +139,13 @@ class Indentation: """Return the current indentation as a string of spaces.""" return ' ' * self._level - def __bool__(self) -> bool: - """True when there is a non-zero indentation.""" - return bool(self._level) - def increase(self, amount: int = 4) -> None: """Increase the indentation level by ``amount``, default 4.""" self._level += amount def decrease(self, amount: int = 4) -> None: """Decrease the indentation level by ``amount``, default 4.""" - if self._level < amount: - raise ArithmeticError( - f"Can't remove {amount:d} spaces from {self!r}") + assert amount <= self._level self._level -= amount @@ -162,8 +160,9 @@ def cgen(code: str, **kwds: object) -> str: Obey `indent`, and strip `EATSPACE`. """ raw = code % kwds - if indent: - raw = re.sub(r'^(?!(#|$))', str(indent), raw, flags=re.MULTILINE) + pfx = str(indent) + if pfx: + raw = re.sub(r'^(?!(#|$))', pfx, raw, flags=re.MULTILINE) return re.sub(re.escape(EATSPACE) + r' *', '', raw) @@ -194,22 +193,56 @@ def guardend(name: str) -> str: name=c_fname(name).upper()) -def gen_if(ifcond: Sequence[str]) -> str: - ret = '' - for ifc in ifcond: - ret += mcgen(''' +def gen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]], + cond_fmt: str, not_fmt: str, + all_operator: str, any_operator: str) -> str: + + def do_gen(ifcond: Union[str, Dict[str, Any]], + need_parens: bool) -> str: + if isinstance(ifcond, str): + return cond_fmt % ifcond + assert isinstance(ifcond, dict) and len(ifcond) == 1 + if 'not' in ifcond: + return not_fmt % do_gen(ifcond['not'], True) + if 'all' in ifcond: + gen = gen_infix(all_operator, ifcond['all']) + else: + gen = gen_infix(any_operator, ifcond['any']) + if need_parens: + gen = '(' + gen + ')' + return gen + + def gen_infix(operator: str, operands: Sequence[Any]) -> str: + return operator.join([do_gen(o, True) for o in operands]) + + if not ifcond: + return '' + return do_gen(ifcond, False) + + +def cgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str: + return gen_ifcond(ifcond, 'defined(%s)', '!%s', ' && ', ' || ') + + +def docgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str: + # TODO Doc generated for conditions needs polish + return gen_ifcond(ifcond, '%s', 'not %s', ' and ', ' or ') + + +def gen_if(cond: str) -> str: + if not cond: + return '' + return mcgen(''' #if %(cond)s -''', cond=ifc) - return ret +''', cond=cond) -def gen_endif(ifcond: Sequence[str]) -> str: - ret = '' - for ifc in reversed(ifcond): - ret += mcgen(''' +def gen_endif(cond: str) -> str: + if not cond: + return '' + return mcgen(''' #endif /* %(cond)s */ -''', cond=ifc) - return ret +''', cond=cond) def must_match(pattern: str, string: str) -> Match[str]: diff --git a/scripts/qapi/events.py b/scripts/qapi/events.py index fee8c671e7..27b44c49f5 100644 --- a/scripts/qapi/events.py +++ b/scripts/qapi/events.py @@ -12,7 +12,7 @@ This work is licensed under the terms of the GNU GPL, version 2. See the COPYING file in the top-level directory. """ -from typing import List, Optional, Sequence +from typing import List, Optional from .common import c_enum_const, c_name, mcgen from .gen import QAPISchemaModularCVisitor, build_params, ifcontext @@ -20,6 +20,7 @@ from .schema import ( QAPISchema, QAPISchemaEnumMember, QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaObjectType, ) from .source import QAPISourceInfo @@ -108,13 +109,15 @@ def gen_event_send(name: str, if not boxed: ret += gen_param_var(arg_type) - if 'deprecated' in [f.name for f in features]: - ret += mcgen(''' + for f in features: + if f.is_special(): + ret += mcgen(''' - if (compat_policy.deprecated_output == COMPAT_POLICY_OUTPUT_HIDE) { + if (compat_policy.%(feat)s_output == COMPAT_POLICY_OUTPUT_HIDE) { return; } -''') +''', + feat=f.name) ret += mcgen(''' @@ -227,7 +230,7 @@ void %(event_emit)s(%(event_enum)s event, QDict *qdict); def visit_event(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], arg_type: Optional[QAPISchemaObjectType], boxed: bool) -> None: diff --git a/scripts/qapi/expr.py b/scripts/qapi/expr.py index cf98923fa6..5a1782b57e 100644 --- a/scripts/qapi/expr.py +++ b/scripts/qapi/expr.py @@ -171,7 +171,7 @@ def check_defn_name_str(name: str, info: QAPISourceInfo, meta: str) -> None: - 'event' names adhere to `check_name_upper()`. - 'command' names adhere to `check_name_lower()`. - Else, meta is a type, and must pass `check_name_camel()`. - These names must not end with ``Kind`` nor ``List``. + These names must not end with ``List``. :param name: Name to check. :param info: QAPI schema source file information. @@ -187,9 +187,9 @@ def check_defn_name_str(name: str, info: QAPISourceInfo, meta: str) -> None: permit_underscore=name in info.pragma.command_name_exceptions) else: check_name_camel(name, info, meta) - if name.endswith('Kind') or name.endswith('List'): + if name.endswith('List'): raise QAPISemError( - info, "%s name should not end in '%s'" % (meta, name[-4:])) + info, "%s name should not end in 'List'" % meta) def check_keys(value: _JSONObject, @@ -259,14 +259,9 @@ def check_flags(expr: _JSONObject, info: QAPISourceInfo) -> None: def check_if(expr: _JSONObject, info: QAPISourceInfo, source: str) -> None: """ - Normalize and validate the ``if`` member of an object. + Validate the ``if`` member of an object. - The ``if`` member may be either a ``str`` or a ``List[str]``. - A ``str`` value will be normalized to ``List[str]``. - - :forms: - :sugared: ``Union[str, List[str]]`` - :canonical: ``List[str]`` + The ``if`` member may be either a ``str`` or a dict. :param expr: The expression containing the ``if`` member to validate. :param info: QAPI schema source file information. @@ -275,31 +270,53 @@ def check_if(expr: _JSONObject, info: QAPISourceInfo, source: str) -> None: :raise QAPISemError: When the "if" member fails validation, or when there are no non-empty conditions. - :return: None, ``expr`` is normalized in-place as needed. + :return: None """ + + def _check_if(cond: Union[str, object]) -> None: + if isinstance(cond, str): + if not re.fullmatch(r'[A-Z][A-Z0-9_]*', cond): + raise QAPISemError( + info, + "'if' condition '%s' of %s is not a valid identifier" + % (cond, source)) + return + + if not isinstance(cond, dict): + raise QAPISemError( + info, + "'if' condition of %s must be a string or an object" % source) + check_keys(cond, info, "'if' condition of %s" % source, [], + ["all", "any", "not"]) + if len(cond) != 1: + raise QAPISemError( + info, + "'if' condition of %s has conflicting keys" % source) + + if 'not' in cond: + _check_if(cond['not']) + elif 'all' in cond: + _check_infix('all', cond['all']) + else: + _check_infix('any', cond['any']) + + def _check_infix(operator: str, operands: object) -> None: + if not isinstance(operands, list): + raise QAPISemError( + info, + "'%s' condition of %s must be an array" + % (operator, source)) + if not operands: + raise QAPISemError( + info, "'if' condition [] of %s is useless" % source) + for operand in operands: + _check_if(operand) + ifcond = expr.get('if') if ifcond is None: return - if isinstance(ifcond, list): - if not ifcond: - raise QAPISemError( - info, "'if' condition [] of %s is useless" % source) - else: - # Normalize to a list - ifcond = expr['if'] = [ifcond] - - for elt in ifcond: - if not isinstance(elt, str): - raise QAPISemError( - info, - "'if' condition of %s must be a string or a list of strings" - % source) - if not elt.strip(): - raise QAPISemError( - info, - "'if' condition '%s' of %s makes no sense" - % (elt, source)) + _check_if(ifcond) def normalize_members(members: object) -> None: @@ -426,7 +443,7 @@ def check_features(features: Optional[object], check_keys(feat, info, source, ['name'], ['if']) check_name_is_str(feat['name'], info, source) source = "%s '%s'" % (source, feat['name']) - check_name_str(feat['name'], info, source) + check_name_lower(feat['name'], info, source) check_if(feat, info, source) @@ -455,7 +472,7 @@ def check_enum(expr: _JSONObject, info: QAPISourceInfo) -> None: for m in members] for member in members: source = "'data' member" - check_keys(member, info, source, ['name'], ['if']) + check_keys(member, info, source, ['name'], ['if', 'features']) member_name = member['name'] check_name_is_str(member_name, info, source) source = "%s '%s'" % (source, member_name) @@ -466,6 +483,7 @@ def check_enum(expr: _JSONObject, info: QAPISourceInfo) -> None: permit_upper=permissive, permit_underscore=permissive) check_if(member, info, source) + check_features(member.get('features'), info) def check_struct(expr: _JSONObject, info: QAPISourceInfo) -> None: @@ -496,27 +514,18 @@ def check_union(expr: _JSONObject, info: QAPISourceInfo) -> None: :return: None, ``expr`` is normalized in-place as needed. """ name = cast(str, expr['union']) # Checked in check_exprs - base = expr.get('base') - discriminator = expr.get('discriminator') + base = expr['base'] + discriminator = expr['discriminator'] members = expr['data'] - if discriminator is None: # simple union - if base is not None: - raise QAPISemError(info, "'base' requires 'discriminator'") - else: # flat union - check_type(base, info, "'base'", allow_dict=name) - if not base: - raise QAPISemError(info, "'discriminator' requires 'base'") - check_name_is_str(discriminator, info, "'discriminator'") + check_type(base, info, "'base'", allow_dict=name) + check_name_is_str(discriminator, info, "'discriminator'") if not isinstance(members, dict): raise QAPISemError(info, "'data' must be an object") for (key, value) in members.items(): source = "'data' member '%s'" % key - if discriminator is None: - check_name_lower(key, info, source) - # else: name is in discriminator enum, which gets checked check_keys(value, info, source, ['type'], ['if']) check_if(value, info, source) check_type(value['type'], info, source, allow_array=not base) @@ -545,7 +554,7 @@ def check_alternate(expr: _JSONObject, info: QAPISourceInfo) -> None: check_name_lower(key, info, source) check_keys(value, info, source, ['type'], ['if']) check_if(value, info, source) - check_type(value['type'], info, source) + check_type(value['type'], info, source, allow_array=True) def check_command(expr: _JSONObject, info: QAPISourceInfo) -> None: @@ -617,20 +626,15 @@ def check_exprs(exprs: List[_JSONObject]) -> List[_JSONObject]: if 'include' in expr: continue - if 'enum' in expr: - meta = 'enum' - elif 'union' in expr: - meta = 'union' - elif 'alternate' in expr: - meta = 'alternate' - elif 'struct' in expr: - meta = 'struct' - elif 'command' in expr: - meta = 'command' - elif 'event' in expr: - meta = 'event' - else: - raise QAPISemError(info, "expression is missing metatype") + metas = expr.keys() & {'enum', 'struct', 'union', 'alternate', + 'command', 'event'} + if len(metas) != 1: + raise QAPISemError( + info, + "expression must have exactly one key" + " 'enum', 'struct', 'union', 'alternate'," + " 'command', 'event'") + meta = metas.pop() check_name_is_str(expr[meta], info, "'%s'" % meta) name = cast(str, expr[meta]) @@ -652,8 +656,8 @@ def check_exprs(exprs: List[_JSONObject]) -> List[_JSONObject]: check_enum(expr, info) elif meta == 'union': check_keys(expr, info, meta, - ['union', 'data'], - ['base', 'discriminator', 'if', 'features']) + ['union', 'base', 'discriminator', 'data'], + ['if', 'features']) normalize_members(expr.get('base')) normalize_members(expr['data']) check_union(expr, info) diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index 1fa503bdbd..113b49134d 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -25,13 +25,13 @@ from typing import ( from .common import ( c_fname, c_name, - gen_endif, - gen_if, guardend, guardstart, mcgen, ) from .schema import ( + QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaModule, QAPISchemaObjectType, QAPISchemaVisitor, @@ -39,6 +39,12 @@ from .schema import ( from .source import QAPISourceInfo +def gen_special_features(features: Sequence[QAPISchemaFeature]) -> str: + special_features = [f"1u << QAPI_{feat.name.upper()}" + for feat in features if feat.is_special()] + return ' | '.join(special_features) or '0' + + class QAPIGen: def __init__(self, fname: str): self.fname = fname @@ -85,7 +91,7 @@ class QAPIGen: fp.write(text) -def _wrap_ifcond(ifcond: Sequence[str], before: str, after: str) -> str: +def _wrap_ifcond(ifcond: QAPISchemaIfCond, before: str, after: str) -> str: if before == after: return after # suppress empty #if ... #endif @@ -95,9 +101,9 @@ def _wrap_ifcond(ifcond: Sequence[str], before: str, after: str) -> str: if added[0] == '\n': out += '\n' added = added[1:] - out += gen_if(ifcond) + out += ifcond.gen_if() out += added - out += gen_endif(ifcond) + out += ifcond.gen_endif() return out @@ -127,9 +133,9 @@ def build_params(arg_type: Optional[QAPISchemaObjectType], class QAPIGenCCode(QAPIGen): def __init__(self, fname: str): super().__init__(fname) - self._start_if: Optional[Tuple[Sequence[str], str, str]] = None + self._start_if: Optional[Tuple[QAPISchemaIfCond, str, str]] = None - def start_if(self, ifcond: Sequence[str]) -> None: + def start_if(self, ifcond: QAPISchemaIfCond) -> None: assert self._start_if is None self._start_if = (ifcond, self._body, self._preamble) @@ -186,8 +192,13 @@ class QAPIGenH(QAPIGenC): return guardend(self.fname) +class QAPIGenTrace(QAPIGen): + def _top(self) -> str: + return super()._top() + '# AUTOMATICALLY GENERATED, DO NOT MODIFY\n\n' + + @contextmanager -def ifcontext(ifcond: Sequence[str], *args: QAPIGenCCode) -> Iterator[None]: +def ifcontext(ifcond: QAPISchemaIfCond, *args: QAPIGenCCode) -> Iterator[None]: """ A with-statement context manager that wraps with `start_if()` / `end_if()`. @@ -238,15 +249,18 @@ class QAPISchemaModularCVisitor(QAPISchemaVisitor): what: str, user_blurb: str, builtin_blurb: Optional[str], - pydoc: str): + pydoc: str, + gen_tracing: bool = False): self._prefix = prefix self._what = what self._user_blurb = user_blurb self._builtin_blurb = builtin_blurb self._pydoc = pydoc self._current_module: Optional[str] = None - self._module: Dict[str, Tuple[QAPIGenC, QAPIGenH]] = {} + self._module: Dict[str, Tuple[QAPIGenC, QAPIGenH, + Optional[QAPIGenTrace]]] = {} self._main_module: Optional[str] = None + self._gen_tracing = gen_tracing @property def _genc(self) -> QAPIGenC: @@ -258,6 +272,14 @@ class QAPISchemaModularCVisitor(QAPISchemaVisitor): assert self._current_module is not None return self._module[self._current_module][1] + @property + def _gen_trace_events(self) -> QAPIGenTrace: + assert self._gen_tracing + assert self._current_module is not None + gent = self._module[self._current_module][2] + assert gent is not None + return gent + @staticmethod def _module_dirname(name: str) -> str: if QAPISchemaModule.is_user_module(name): @@ -287,7 +309,12 @@ class QAPISchemaModularCVisitor(QAPISchemaVisitor): basename = self._module_filename(self._what, name) genc = QAPIGenC(basename + '.c', blurb, self._pydoc) genh = QAPIGenH(basename + '.h', blurb, self._pydoc) - self._module[name] = (genc, genh) + + gent: Optional[QAPIGenTrace] = None + if self._gen_tracing: + gent = QAPIGenTrace(basename + '.trace-events') + + self._module[name] = (genc, genh, gent) self._current_module = name @contextmanager @@ -298,12 +325,13 @@ class QAPISchemaModularCVisitor(QAPISchemaVisitor): self._current_module = old_module def write(self, output_dir: str, opt_builtins: bool = False) -> None: - for name in self._module: + for name, (genc, genh, gent) in self._module.items(): if QAPISchemaModule.is_builtin_module(name) and not opt_builtins: continue - (genc, genh) = self._module[name] genc.write(output_dir) genh.write(output_dir) + if gent is not None: + gent.write(output_dir) def _begin_builtin_module(self) -> None: pass diff --git a/scripts/qapi/introspect.py b/scripts/qapi/introspect.py index 9a348ca2e5..67c7d89aae 100644 --- a/scripts/qapi/introspect.py +++ b/scripts/qapi/introspect.py @@ -15,21 +15,14 @@ from typing import ( Any, Dict, Generic, - Iterable, List, Optional, Sequence, - Tuple, TypeVar, Union, ) -from .common import ( - c_name, - gen_endif, - gen_if, - mcgen, -) +from .common import c_name, mcgen from .gen import QAPISchemaMonolithicCVisitor from .schema import ( QAPISchema, @@ -38,6 +31,7 @@ from .schema import ( QAPISchemaEntity, QAPISchemaEnumMember, QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaObjectType, QAPISchemaObjectTypeMember, QAPISchemaType, @@ -74,6 +68,7 @@ JSONValue = Union[_Value, 'Annotated[_Value]'] # TypedDict constructs, so they are broadly typed here as simple # Python Dicts. SchemaInfo = Dict[str, object] +SchemaInfoEnumMember = Dict[str, object] SchemaInfoObject = Dict[str, object] SchemaInfoObjectVariant = Dict[str, object] SchemaInfoObjectMember = Dict[str, object] @@ -91,11 +86,11 @@ class Annotated(Generic[_ValueT]): """ # TODO: Remove after Python 3.7 adds @dataclass: # pylint: disable=too-few-public-methods - def __init__(self, value: _ValueT, ifcond: Iterable[str], + def __init__(self, value: _ValueT, ifcond: QAPISchemaIfCond, comment: Optional[str] = None): self.value = value self.comment: Optional[str] = comment - self.ifcond: Tuple[str, ...] = tuple(ifcond) + self.ifcond = ifcond def _tree_to_qlit(obj: JSONValue, @@ -124,11 +119,11 @@ def _tree_to_qlit(obj: JSONValue, ret = '' if obj.comment: ret += indent(level) + f"/* {obj.comment} */\n" - if obj.ifcond: - ret += gen_if(obj.ifcond) + if obj.ifcond.is_present(): + ret += obj.ifcond.gen_if() ret += _tree_to_qlit(obj.value, level) - if obj.ifcond: - ret += '\n' + gen_endif(obj.ifcond) + if obj.ifcond.is_present(): + ret += '\n' + obj.ifcond.gen_endif() return ret ret = '' @@ -254,7 +249,7 @@ const QLitObject %(c_name)s = %(c_string)s; return [Annotated(f.name, f.ifcond) for f in features] def _gen_tree(self, name: str, mtype: str, obj: Dict[str, object], - ifcond: Sequence[str] = (), + ifcond: QAPISchemaIfCond = QAPISchemaIfCond(), features: Sequence[QAPISchemaFeature] = ()) -> None: """ Build and append a SchemaInfo object to self._trees. @@ -280,8 +275,17 @@ const QLitObject %(c_name)s = %(c_string)s; obj['features'] = self._gen_features(features) self._trees.append(Annotated(obj, ifcond, comment)) - def _gen_member(self, member: QAPISchemaObjectTypeMember - ) -> Annotated[SchemaInfoObjectMember]: + def _gen_enum_member(self, member: QAPISchemaEnumMember + ) -> Annotated[SchemaInfoEnumMember]: + obj: SchemaInfoEnumMember = { + 'name': member.name, + } + if member.features: + obj['features'] = self._gen_features(member.features) + return Annotated(obj, member.ifcond) + + def _gen_object_member(self, member: QAPISchemaObjectTypeMember + ) -> Annotated[SchemaInfoObjectMember]: obj: SchemaInfoObjectMember = { 'name': member.name, 'type': self._use_type(member.type) @@ -305,30 +309,31 @@ const QLitObject %(c_name)s = %(c_string)s; self._gen_tree(name, 'builtin', {'json-type': json_type}) def visit_enum_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], members: List[QAPISchemaEnumMember], prefix: Optional[str]) -> None: self._gen_tree( name, 'enum', - {'values': [Annotated(m.name, m.ifcond) for m in members]}, + {'members': [self._gen_enum_member(m) for m in members], + 'values': [Annotated(m.name, m.ifcond) for m in members]}, ifcond, features ) def visit_array_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, element_type: QAPISchemaType) -> None: element = self._use_type(element_type) self._gen_tree('[' + element + ']', 'array', {'element-type': element}, ifcond) def visit_object_type_flat(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], members: List[QAPISchemaObjectTypeMember], variants: Optional[QAPISchemaVariants]) -> None: obj: SchemaInfoObject = { - 'members': [self._gen_member(m) for m in members] + 'members': [self._gen_object_member(m) for m in members] } if variants: obj['tag'] = variants.tag_member.name @@ -336,7 +341,7 @@ const QLitObject %(c_name)s = %(c_string)s; self._gen_tree(name, 'object', obj, ifcond, features) def visit_alternate_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], variants: QAPISchemaVariants) -> None: self._gen_tree( @@ -348,7 +353,7 @@ const QLitObject %(c_name)s = %(c_string)s; ) def visit_command(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], arg_type: Optional[QAPISchemaObjectType], ret_type: Optional[QAPISchemaType], gen: bool, @@ -367,7 +372,8 @@ const QLitObject %(c_name)s = %(c_string)s; self._gen_tree(name, 'command', obj, ifcond, features) def visit_event(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], features: List[QAPISchemaFeature], + ifcond: QAPISchemaIfCond, + features: List[QAPISchemaFeature], arg_type: Optional[QAPISchemaObjectType], boxed: bool) -> None: assert self._schema is not None diff --git a/scripts/qapi/main.py b/scripts/qapi/main.py index f2ea6e0ce4..fc216a53d3 100644 --- a/scripts/qapi/main.py +++ b/scripts/qapi/main.py @@ -32,7 +32,8 @@ def generate(schema_file: str, output_dir: str, prefix: str, unmask: bool = False, - builtins: bool = False) -> None: + builtins: bool = False, + gen_tracing: bool = False) -> None: """ Generate C code for the given schema into the target directory. @@ -49,7 +50,7 @@ def generate(schema_file: str, schema = QAPISchema(schema_file) gen_types(schema, output_dir, prefix, builtins) gen_visit(schema, output_dir, prefix, builtins) - gen_commands(schema, output_dir, prefix) + gen_commands(schema, output_dir, prefix, gen_tracing) gen_events(schema, output_dir, prefix) gen_introspect(schema, output_dir, prefix, unmask) @@ -74,6 +75,12 @@ def main() -> int: parser.add_argument('-u', '--unmask-non-abi-names', action='store_true', dest='unmask', help="expose non-ABI names in introspection") + + # Option --suppress-tracing exists so we can avoid solving build system + # problems. TODO Drop it when we no longer need it. + parser.add_argument('--suppress-tracing', action='store_true', + help="suppress adding trace events to qmp marshals") + parser.add_argument('schema', action='store') args = parser.parse_args() @@ -88,7 +95,8 @@ def main() -> int: output_dir=args.output_dir, prefix=args.prefix, unmask=args.unmask, - builtins=args.builtins) + builtins=args.builtins, + gen_tracing=not args.suppress_tracing) except QAPIError as err: print(f"{sys.argv[0]}: {str(err)}", file=sys.stderr) return 1 diff --git a/scripts/qapi/mypy.ini b/scripts/qapi/mypy.ini index 54ca4483d6..6625356429 100644 --- a/scripts/qapi/mypy.ini +++ b/scripts/qapi/mypy.ini @@ -3,11 +3,6 @@ strict = True disallow_untyped_calls = False python_version = 3.6 -[mypy-qapi.parser] -disallow_untyped_defs = False -disallow_incomplete_defs = False -check_untyped_defs = False - [mypy-qapi.schema] disallow_untyped_defs = False disallow_incomplete_defs = False diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index f03ba2cfec..1b006cdc13 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -18,6 +18,7 @@ from collections import OrderedDict import os import re from typing import ( + TYPE_CHECKING, Dict, List, Optional, @@ -30,9 +31,22 @@ from .error import QAPISemError, QAPISourceError from .source import QAPISourceInfo +if TYPE_CHECKING: + # pylint: disable=cyclic-import + # TODO: Remove cycle. [schema -> expr -> parser -> schema] + from .schema import QAPISchemaFeature, QAPISchemaMember + + +#: Represents a single Top Level QAPI schema expression. +TopLevelExpr = Dict[str, object] + # Return value alias for get_expr(). _ExprValue = Union[List[object], Dict[str, object], str, bool] +# FIXME: Consolidate and centralize definitions for TopLevelExpr, +# _ExprValue, _JSONValue, and _JSONObject; currently scattered across +# several modules. + class QAPIParseError(QAPISourceError): """Error class for all QAPI schema parsing errors.""" @@ -447,7 +461,10 @@ class QAPIDoc: """ class Section: - def __init__(self, parser, name=None, indent=0): + # pylint: disable=too-few-public-methods + def __init__(self, parser: QAPISchemaParser, + name: Optional[str] = None, indent: int = 0): + # parser, for error messages about indentation self._parser = parser # optional section name (argument/member or section name) @@ -456,7 +473,7 @@ class QAPIDoc: # the expected indent level of the text of this section self._indent = indent - def append(self, line): + def append(self, line: str) -> None: # Strip leading spaces corresponding to the expected indent level # Blank lines are always OK. if line: @@ -471,39 +488,47 @@ class QAPIDoc: self.text += line.rstrip() + '\n' class ArgSection(Section): - def __init__(self, parser, name, indent=0): + def __init__(self, parser: QAPISchemaParser, + name: str, indent: int = 0): super().__init__(parser, name, indent) - self.member = None + self.member: Optional['QAPISchemaMember'] = None - def connect(self, member): + def connect(self, member: 'QAPISchemaMember') -> None: self.member = member - def __init__(self, parser, info): + class NullSection(Section): + """ + Immutable dummy section for use at the end of a doc block. + """ + # pylint: disable=too-few-public-methods + def append(self, line: str) -> None: + assert False, "Text appended after end_comment() called." + + def __init__(self, parser: QAPISchemaParser, info: QAPISourceInfo): # self._parser is used to report errors with QAPIParseError. The # resulting error position depends on the state of the parser. # It happens to be the beginning of the comment. More or less # servicable, but action at a distance. self._parser = parser self.info = info - self.symbol = None + self.symbol: Optional[str] = None self.body = QAPIDoc.Section(parser) - # dict mapping parameter name to ArgSection - self.args = OrderedDict() - self.features = OrderedDict() - # a list of Section - self.sections = [] + # dicts mapping parameter/feature names to their ArgSection + self.args: Dict[str, QAPIDoc.ArgSection] = OrderedDict() + self.features: Dict[str, QAPIDoc.ArgSection] = OrderedDict() + self.sections: List[QAPIDoc.Section] = [] # the current section self._section = self.body self._append_line = self._append_body_line - def has_section(self, name): + def has_section(self, name: str) -> bool: """Return True if we have a section with this name.""" for i in self.sections: if i.name == name: return True return False - def append(self, line): + def append(self, line: str) -> None: """ Parse a comment line and add it to the documentation. @@ -524,18 +549,18 @@ class QAPIDoc: line = line[1:] self._append_line(line) - def end_comment(self): - self._end_section() + def end_comment(self) -> None: + self._switch_section(QAPIDoc.NullSection(self._parser)) @staticmethod - def _is_section_tag(name): + def _is_section_tag(name: str) -> bool: return name in ('Returns:', 'Since:', # those are often singular or plural 'Note:', 'Notes:', 'Example:', 'Examples:', 'TODO:') - def _append_body_line(self, line): + def _append_body_line(self, line: str) -> None: """ Process a line of documentation text in the body section. @@ -556,9 +581,11 @@ class QAPIDoc: if not line.endswith(':'): raise QAPIParseError(self._parser, "line should end with ':'") self.symbol = line[1:-1] - # FIXME invalid names other than the empty string aren't flagged + # Invalid names are not checked here, but the name provided MUST + # match the following definition, which *is* validated in expr.py. if not self.symbol: - raise QAPIParseError(self._parser, "invalid name") + raise QAPIParseError( + self._parser, "name required after '@'") elif self.symbol: # This is a definition documentation block if name.startswith('@') and name.endswith(':'): @@ -575,7 +602,7 @@ class QAPIDoc: # This is a free-form documentation block self._append_freeform(line) - def _append_args_line(self, line): + def _append_args_line(self, line: str) -> None: """ Process a line of documentation text in an argument section. @@ -621,7 +648,7 @@ class QAPIDoc: self._append_freeform(line) - def _append_features_line(self, line): + def _append_features_line(self, line: str) -> None: name = line.split(' ', 1)[0] if name.startswith('@') and name.endswith(':'): @@ -653,7 +680,7 @@ class QAPIDoc: self._append_freeform(line) - def _append_various_line(self, line): + def _append_various_line(self, line: str) -> None: """ Process a line of documentation text in an additional section. @@ -689,7 +716,11 @@ class QAPIDoc: self._append_freeform(line) - def _start_symbol_section(self, symbols_dict, name, indent): + def _start_symbol_section( + self, + symbols_dict: Dict[str, 'QAPIDoc.ArgSection'], + name: str, + indent: int) -> None: # FIXME invalid names other than the empty string aren't flagged if not name: raise QAPIParseError(self._parser, "invalid parameter name") @@ -697,34 +728,41 @@ class QAPIDoc: raise QAPIParseError(self._parser, "'%s' parameter name duplicated" % name) assert not self.sections - self._end_section() - self._section = QAPIDoc.ArgSection(self._parser, name, indent) - symbols_dict[name] = self._section + new_section = QAPIDoc.ArgSection(self._parser, name, indent) + self._switch_section(new_section) + symbols_dict[name] = new_section - def _start_args_section(self, name, indent): + def _start_args_section(self, name: str, indent: int) -> None: self._start_symbol_section(self.args, name, indent) - def _start_features_section(self, name, indent): + def _start_features_section(self, name: str, indent: int) -> None: self._start_symbol_section(self.features, name, indent) - def _start_section(self, name=None, indent=0): + def _start_section(self, name: Optional[str] = None, + indent: int = 0) -> None: if name in ('Returns', 'Since') and self.has_section(name): raise QAPIParseError(self._parser, "duplicated '%s' section" % name) - self._end_section() - self._section = QAPIDoc.Section(self._parser, name, indent) - self.sections.append(self._section) + new_section = QAPIDoc.Section(self._parser, name, indent) + self._switch_section(new_section) + self.sections.append(new_section) - def _end_section(self): - if self._section: - text = self._section.text = self._section.text.strip() - if self._section.name and (not text or text.isspace()): - raise QAPIParseError( - self._parser, - "empty doc section '%s'" % self._section.name) - self._section = None + def _switch_section(self, new_section: 'QAPIDoc.Section') -> None: + text = self._section.text = self._section.text.strip() - def _append_freeform(self, line): + # Only the 'body' section is allowed to have an empty body. + # All other sections, including anonymous ones, must have text. + if self._section != self.body and not text: + # We do not create anonymous sections unless there is + # something to put in them; this is a parser bug. + assert self._section.name + raise QAPIParseError( + self._parser, + "empty doc section '%s'" % self._section.name) + + self._section = new_section + + def _append_freeform(self, line: str) -> None: match = re.match(r'(@\S+:)', line) if match: raise QAPIParseError(self._parser, @@ -732,37 +770,41 @@ class QAPIDoc: % match.group(1)) self._section.append(line) - def connect_member(self, member): + def connect_member(self, member: 'QAPISchemaMember') -> None: if member.name not in self.args: # Undocumented TODO outlaw self.args[member.name] = QAPIDoc.ArgSection(self._parser, member.name) self.args[member.name].connect(member) - def connect_feature(self, feature): + def connect_feature(self, feature: 'QAPISchemaFeature') -> None: if feature.name not in self.features: raise QAPISemError(feature.info, "feature '%s' lacks documentation" % feature.name) self.features[feature.name].connect(feature) - def check_expr(self, expr): + def check_expr(self, expr: TopLevelExpr) -> None: if self.has_section('Returns') and 'command' not in expr: raise QAPISemError(self.info, "'Returns:' is only valid for commands") - def check(self): + def check(self) -> None: - def check_args_section(args, info, what): + def check_args_section( + args: Dict[str, QAPIDoc.ArgSection], what: str + ) -> None: bogus = [name for name, section in args.items() if not section.member] if bogus: raise QAPISemError( self.info, - "documented member%s '%s' %s not exist" - % ("s" if len(bogus) > 1 else "", - "', '".join(bogus), - "do" if len(bogus) > 1 else "does")) + "documented %s%s '%s' %s not exist" % ( + what, + "s" if len(bogus) > 1 else "", + "', '".join(bogus), + "do" if len(bogus) > 1 else "does" + )) - check_args_section(self.args, self.info, 'members') - check_args_section(self.features, self.info, 'features') + check_args_section(self.args, 'member') + check_args_section(self.features, 'feature') diff --git a/scripts/qapi/pylintrc b/scripts/qapi/pylintrc index c5275d5f59..a724628203 100644 --- a/scripts/qapi/pylintrc +++ b/scripts/qapi/pylintrc @@ -2,8 +2,7 @@ # Add files or directories matching the regex patterns to the ignore list. # The regex matches against base names, not paths. -ignore-patterns=parser.py, - schema.py, +ignore-patterns=schema.py, [MESSAGES CONTROL] @@ -23,6 +22,7 @@ disable=fixme, too-many-branches, too-many-statements, too-many-instance-attributes, + consider-using-f-string, [REPORTS] @@ -34,16 +34,12 @@ disable=fixme, [BASIC] -# Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - ex, - Run, - _, - fp, # fp = open(...) - fd, # fd = os.open(...) - ch, +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted. +# +# Suppress complaints about short names. PEP-8 is cool with them, +# and so are we. +good-names-rgxs=^[_a-z][_a-z0-9]?$ [VARIABLES] diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index d1d27ff7ee..3728340c37 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -19,12 +19,39 @@ import os import re from typing import Optional -from .common import POINTER_SUFFIX, c_name +from .common import ( + POINTER_SUFFIX, + c_name, + cgen_ifcond, + docgen_ifcond, + gen_endif, + gen_if, +) from .error import QAPIError, QAPISemError, QAPISourceError from .expr import check_exprs from .parser import QAPISchemaParser +class QAPISchemaIfCond: + def __init__(self, ifcond=None): + self.ifcond = ifcond + + def _cgen(self): + return cgen_ifcond(self.ifcond) + + def gen_if(self): + return gen_if(self._cgen()) + + def gen_endif(self): + return gen_endif(self._cgen()) + + def docgen(self): + return docgen_ifcond(self.ifcond) + + def is_present(self): + return bool(self.ifcond) + + class QAPISchemaEntity: meta: Optional[str] = None @@ -42,7 +69,7 @@ class QAPISchemaEntity: # such place). self.info = info self.doc = doc - self._ifcond = ifcond or [] + self._ifcond = ifcond or QAPISchemaIfCond() self.features = features or [] self._checked = False @@ -216,6 +243,7 @@ class QAPISchemaType(QAPISchemaEntity): 'number': 'QTYPE_QNUM', 'int': 'QTYPE_QNUM', 'boolean': 'QTYPE_QBOOL', + 'array': 'QTYPE_QLIST', 'object': 'QTYPE_QDICT' } return json2qtype.get(self.json_type()) @@ -227,9 +255,11 @@ class QAPISchemaType(QAPISchemaEntity): def check(self, schema): QAPISchemaEntity.check(self, schema) - if 'deprecated' in [f.name for f in self.features]: - raise QAPISemError( - self.info, "feature 'deprecated' is not supported for types") + for feat in self.features: + if feat.is_special(): + raise QAPISemError( + self.info, + f"feature '{feat.name}' is not supported for types") def describe(self): assert self.meta @@ -294,8 +324,8 @@ class QAPISchemaEnumType(QAPISchemaType): m.connect_doc(doc) def is_implicit(self): - # See QAPISchema._make_implicit_enum_type() and ._def_predefineds() - return self.name.endswith('Kind') or self.name == 'QType' + # See QAPISchema._def_predefineds() + return self.name == 'QType' def c_type(self): return c_name(self.name) @@ -366,8 +396,7 @@ class QAPISchemaObjectType(QAPISchemaType): def __init__(self, name, info, doc, ifcond, features, base, local_members, variants): # struct has local_members, optional base, and no variants - # flat union has base, variants, and no local_members - # simple union has local_members, variants, and no base + # union has base, variants, and no local_members super().__init__(name, info, doc, ifcond, features) self.meta = 'union' if variants else 'struct' assert base is None or isinstance(base, str) @@ -438,15 +467,6 @@ class QAPISchemaObjectType(QAPISchemaType): for m in self.local_members: m.connect_doc(doc) - @property - def ifcond(self): - assert self._checked - if isinstance(self._ifcond, QAPISchemaType): - # Simple union wrapper type inherits from wrapped type; - # see _make_implicit_object_type() - return self._ifcond.ifcond - return self._ifcond - def is_implicit(self): # See QAPISchema._make_implicit_object_type(), as well as # _def_predefineds() @@ -549,10 +569,9 @@ class QAPISchemaAlternateType(QAPISchemaType): class QAPISchemaVariants: def __init__(self, tag_name, info, tag_member, variants): - # Flat unions pass tag_name but not tag_member. - # Simple unions and alternates pass tag_member but not tag_name. - # After check(), tag_member is always set, and tag_name remains - # a reliable witness of being used by a flat union. + # Unions pass tag_name but not tag_member. + # Alternates pass tag_member but not tag_name. + # After check(), tag_member is always set. assert bool(tag_member) != bool(tag_name) assert (isinstance(tag_name, str) or isinstance(tag_member, QAPISchemaObjectTypeMember)) @@ -568,7 +587,7 @@ class QAPISchemaVariants: v.set_defined_in(name) def check(self, schema, seen): - if not self.tag_member: # flat union + if self._tag_name: # union self.tag_member = seen.get(c_name(self._tag_name)) base = "'base'" # Pointing to the base type when not implicit would be @@ -593,16 +612,16 @@ class QAPISchemaVariants: self.info, "discriminator member '%s' of %s must not be optional" % (self._tag_name, base)) - if self.tag_member.ifcond: + if self.tag_member.ifcond.is_present(): raise QAPISemError( self.info, "discriminator member '%s' of %s must not be conditional" % (self._tag_name, base)) - else: # simple union + else: # alternate assert isinstance(self.tag_member.type, QAPISchemaEnumType) assert not self.tag_member.optional - assert self.tag_member.ifcond == [] - if self._tag_name: # flat union + assert not self.tag_member.ifcond.is_present() + if self._tag_name: # union # branches that are not explicitly covered get an empty type cases = {v.name for v in self.variants} for m in self.tag_member.type.members: @@ -646,7 +665,7 @@ class QAPISchemaMember: assert isinstance(name, str) self.name = name self.info = info - self.ifcond = ifcond or [] + self.ifcond = ifcond or QAPISchemaIfCond() self.defined_in = None def set_defined_in(self, name): @@ -680,18 +699,10 @@ class QAPISchemaMember: assert role == 'member' role = 'parameter' elif defined_in.endswith('-base'): - # Implicit type created for a flat union's dict 'base' + # Implicit type created for a union's dict 'base' role = 'base ' + role else: - # Implicit type created for a simple union's branch - assert defined_in.endswith('-wrapper') - # Unreachable and not implemented assert False - elif defined_in.endswith('Kind'): - # See QAPISchema._make_implicit_enum_type() - # Implicit enum created for simple union's branches - assert role == 'value' - role = 'branch' elif defined_in != info.defn_name: return "%s '%s' of type '%s'" % (role, self.name, defined_in) return "%s '%s'" % (role, self.name) @@ -700,10 +711,26 @@ class QAPISchemaMember: class QAPISchemaEnumMember(QAPISchemaMember): role = 'value' + def __init__(self, name, info, ifcond=None, features=None): + super().__init__(name, info, ifcond) + for f in features or []: + assert isinstance(f, QAPISchemaFeature) + f.set_defined_in(name) + self.features = features or [] + + def connect_doc(self, doc): + super().connect_doc(doc) + if doc: + for f in self.features: + doc.connect_feature(f) + class QAPISchemaFeature(QAPISchemaMember): role = 'feature' + def is_special(self): + return self.name in ('deprecated', 'unstable') + class QAPISchemaObjectTypeMember(QAPISchemaMember): def __init__(self, name, info, typ, optional, ifcond=None, features=None): @@ -968,21 +995,19 @@ class QAPISchema: def _make_features(self, features, info): if features is None: return [] - return [QAPISchemaFeature(f['name'], info, f.get('if')) + return [QAPISchemaFeature(f['name'], info, + QAPISchemaIfCond(f.get('if'))) for f in features] - def _make_enum_members(self, values, info): - return [QAPISchemaEnumMember(v['name'], info, v.get('if')) - for v in values] + def _make_enum_member(self, name, ifcond, features, info): + return QAPISchemaEnumMember(name, info, + QAPISchemaIfCond(ifcond), + self._make_features(features, info)) - def _make_implicit_enum_type(self, name, info, ifcond, values): - # See also QAPISchemaObjectTypeMember.describe() - name = name + 'Kind' # reserved by check_defn_name_str() - self._def_entity(QAPISchemaEnumType( - name, info, None, ifcond, None, - self._make_enum_members(values, info), - None)) - return name + def _make_enum_members(self, values, info): + return [self._make_enum_member(v['name'], v.get('if'), + v.get('features'), info) + for v in values] def _make_array_type(self, element_type, info): name = element_type + 'List' # reserved by check_defn_name_str() @@ -998,17 +1023,9 @@ class QAPISchema: typ = self.lookup_entity(name, QAPISchemaObjectType) if typ: # The implicit object type has multiple users. This can - # happen only for simple unions' implicit wrapper types. - # Its ifcond should be the disjunction of its user's - # ifconds. Not implemented. Instead, we always pass the - # wrapped type's ifcond, which is trivially the same for all - # users. It's also necessary for the wrapper to compile. - # But it's not tight: the disjunction need not imply it. We - # may end up compiling useless wrapper types. - # TODO kill simple unions or implement the disjunction - - # pylint: disable=protected-access - assert (ifcond or []) == typ._ifcond + # only be a duplicate definition, which will be flagged + # later. + pass else: self._def_entity(QAPISchemaObjectType( name, info, None, ifcond, None, None, members, None)) @@ -1018,7 +1035,7 @@ class QAPISchema: name = expr['enum'] data = expr['data'] prefix = expr.get('prefix') - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) self._def_entity(QAPISchemaEnumType( name, info, doc, ifcond, features, @@ -1036,7 +1053,8 @@ class QAPISchema: self._make_features(features, info)) def _make_members(self, data, info): - return [self._make_member(key, value['type'], value.get('if'), + return [self._make_member(key, value['type'], + QAPISchemaIfCond(value.get('if')), value.get('features'), info) for (key, value) in data.items()] @@ -1044,7 +1062,7 @@ class QAPISchema: name = expr['struct'] base = expr.get('base') data = expr['data'] - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) self._def_entity(QAPISchemaObjectType( name, info, doc, ifcond, features, base, @@ -1052,56 +1070,44 @@ class QAPISchema: None)) def _make_variant(self, case, typ, ifcond, info): - return QAPISchemaVariant(case, info, typ, ifcond) - - def _make_simple_variant(self, case, typ, ifcond, info): if isinstance(typ, list): assert len(typ) == 1 typ = self._make_array_type(typ[0], info) - typ = self._make_implicit_object_type( - typ, info, self.lookup_type(typ), - 'wrapper', [self._make_member('data', typ, None, None, info)]) return QAPISchemaVariant(case, info, typ, ifcond) def _def_union_type(self, expr, info, doc): name = expr['union'] + base = expr['base'] + tag_name = expr['discriminator'] data = expr['data'] - base = expr.get('base') - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) - tag_name = expr.get('discriminator') - tag_member = None if isinstance(base, dict): base = self._make_implicit_object_type( name, info, ifcond, 'base', self._make_members(base, info)) - if tag_name: - variants = [self._make_variant(key, value['type'], - value.get('if'), info) - for (key, value) in data.items()] - members = [] - else: - variants = [self._make_simple_variant(key, value['type'], - value.get('if'), info) - for (key, value) in data.items()] - enum = [{'name': v.name, 'if': v.ifcond} for v in variants] - typ = self._make_implicit_enum_type(name, info, ifcond, enum) - tag_member = QAPISchemaObjectTypeMember('type', info, typ, False) - members = [tag_member] + variants = [ + self._make_variant(key, value['type'], + QAPISchemaIfCond(value.get('if')), + info) + for (key, value) in data.items()] + members = [] self._def_entity( QAPISchemaObjectType(name, info, doc, ifcond, features, base, members, QAPISchemaVariants( - tag_name, info, tag_member, variants))) + tag_name, info, None, variants))) def _def_alternate_type(self, expr, info, doc): name = expr['alternate'] data = expr['data'] - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) - variants = [self._make_variant(key, value['type'], value.get('if'), - info) - for (key, value) in data.items()] + variants = [ + self._make_variant(key, value['type'], + QAPISchemaIfCond(value.get('if')), + info) + for (key, value) in data.items()] tag_member = QAPISchemaObjectTypeMember('type', info, 'QType', False) self._def_entity( QAPISchemaAlternateType(name, info, doc, ifcond, features, @@ -1118,7 +1124,7 @@ class QAPISchema: allow_oob = expr.get('allow-oob', False) allow_preconfig = expr.get('allow-preconfig', False) coroutine = expr.get('coroutine', False) - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) if isinstance(data, OrderedDict): data = self._make_implicit_object_type( @@ -1137,7 +1143,7 @@ class QAPISchema: name = expr['event'] data = expr.get('data') boxed = expr.get('boxed', False) - ifcond = expr.get('if') + ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) if isinstance(data, OrderedDict): data = self._make_implicit_object_type( diff --git a/scripts/qapi/types.py b/scripts/qapi/types.py index 20d572a23a..477d027001 100644 --- a/scripts/qapi/types.py +++ b/scripts/qapi/types.py @@ -13,20 +13,19 @@ This work is licensed under the terms of the GNU GPL, version 2. # See the COPYING file in the top-level directory. """ -from typing import List, Optional, Sequence +from typing import List, Optional -from .common import ( - c_enum_const, - c_name, - gen_endif, - gen_if, - mcgen, +from .common import c_enum_const, c_name, mcgen +from .gen import ( + QAPISchemaModularCVisitor, + gen_special_features, + ifcontext, ) -from .gen import QAPISchemaModularCVisitor, ifcontext from .schema import ( QAPISchema, QAPISchemaEnumMember, QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaObjectType, QAPISchemaObjectTypeMember, QAPISchemaType, @@ -43,6 +42,8 @@ objects_seen = set() def gen_enum_lookup(name: str, members: List[QAPISchemaEnumMember], prefix: Optional[str] = None) -> str: + max_index = c_enum_const(name, '_MAX', prefix) + feats = '' ret = mcgen(''' const QEnumLookup %(c_name)s_lookup = { @@ -50,20 +51,35 @@ const QEnumLookup %(c_name)s_lookup = { ''', c_name=c_name(name)) for memb in members: - ret += gen_if(memb.ifcond) + ret += memb.ifcond.gen_if() index = c_enum_const(name, memb.name, prefix) ret += mcgen(''' [%(index)s] = "%(name)s", ''', index=index, name=memb.name) - ret += gen_endif(memb.ifcond) + ret += memb.ifcond.gen_endif() + + special_features = gen_special_features(memb.features) + if special_features != '0': + feats += mcgen(''' + [%(index)s] = %(special_features)s, +''', + index=index, special_features=special_features) + + if feats: + ret += mcgen(''' + }, + .special_features = (const unsigned char[%(max_index)s]) { +''', + max_index=max_index) + ret += feats ret += mcgen(''' }, .size = %(max_index)s }; ''', - max_index=c_enum_const(name, '_MAX', prefix)) + max_index=max_index) return ret @@ -80,12 +96,12 @@ typedef enum %(c_name)s { c_name=c_name(name)) for memb in enum_members: - ret += gen_if(memb.ifcond) + ret += memb.ifcond.gen_if() ret += mcgen(''' %(c_enum)s, ''', c_enum=c_enum_const(name, memb.name, prefix)) - ret += gen_endif(memb.ifcond) + ret += memb.ifcond.gen_endif() ret += mcgen(''' } %(c_name)s; @@ -125,7 +141,7 @@ struct %(c_name)s { def gen_struct_members(members: List[QAPISchemaObjectTypeMember]) -> str: ret = '' for memb in members: - ret += gen_if(memb.ifcond) + ret += memb.ifcond.gen_if() if memb.optional: ret += mcgen(''' bool has_%(c_name)s; @@ -135,11 +151,11 @@ def gen_struct_members(members: List[QAPISchemaObjectTypeMember]) -> str: %(c_type)s %(c_name)s; ''', c_type=memb.type.c_type(), c_name=c_name(memb.name)) - ret += gen_endif(memb.ifcond) + ret += memb.ifcond.gen_endif() return ret -def gen_object(name: str, ifcond: Sequence[str], +def gen_object(name: str, ifcond: QAPISchemaIfCond, base: Optional[QAPISchemaObjectType], members: List[QAPISchemaObjectTypeMember], variants: Optional[QAPISchemaVariants]) -> str: @@ -158,7 +174,7 @@ def gen_object(name: str, ifcond: Sequence[str], ret += mcgen(''' ''') - ret += gen_if(ifcond) + ret += ifcond.gen_if() ret += mcgen(''' struct %(c_name)s { ''', @@ -192,7 +208,7 @@ struct %(c_name)s { ret += mcgen(''' }; ''') - ret += gen_endif(ifcond) + ret += ifcond.gen_endif() return ret @@ -219,13 +235,13 @@ def gen_variants(variants: QAPISchemaVariants) -> str: for var in variants.variants: if var.type.name == 'q_empty': continue - ret += gen_if(var.ifcond) + ret += var.ifcond.gen_if() ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=var.type.c_unboxed_type(), c_name=c_name(var.name)) - ret += gen_endif(var.ifcond) + ret += var.ifcond.gen_endif() ret += mcgen(''' } u; @@ -307,7 +323,7 @@ class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor): def visit_enum_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], members: List[QAPISchemaEnumMember], prefix: Optional[str]) -> None: @@ -318,7 +334,7 @@ class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor): def visit_array_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, element_type: QAPISchemaType) -> None: with ifcontext(ifcond, self._genh, self._genc): self._genh.preamble_add(gen_fwd_object_or_array(name)) @@ -328,7 +344,7 @@ class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor): def visit_object_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], base: Optional[QAPISchemaObjectType], members: List[QAPISchemaObjectTypeMember], @@ -351,7 +367,7 @@ class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor): def visit_alternate_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], variants: QAPISchemaVariants) -> None: with ifcontext(ifcond, self._genh): diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py index 9e96f3c566..380fa197f5 100644 --- a/scripts/qapi/visit.py +++ b/scripts/qapi/visit.py @@ -13,22 +13,25 @@ This work is licensed under the terms of the GNU GPL, version 2. See the COPYING file in the top-level directory. """ -from typing import List, Optional, Sequence +from typing import List, Optional from .common import ( c_enum_const, c_name, - gen_endif, - gen_if, indent, mcgen, ) -from .gen import QAPISchemaModularCVisitor, ifcontext +from .gen import ( + QAPISchemaModularCVisitor, + gen_special_features, + ifcontext, +) from .schema import ( QAPISchema, QAPISchemaEnumMember, QAPISchemaEnumType, QAPISchemaFeature, + QAPISchemaIfCond, QAPISchemaObjectType, QAPISchemaObjectTypeMember, QAPISchemaType, @@ -77,22 +80,22 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) c_type=base.c_name()) for memb in members: - deprecated = 'deprecated' in [f.name for f in memb.features] - ret += gen_if(memb.ifcond) + ret += memb.ifcond.gen_if() if memb.optional: ret += mcgen(''' if (visit_optional(v, "%(name)s", &obj->has_%(c_name)s)) { ''', name=memb.name, c_name=c_name(memb.name)) indent.increase() - if deprecated: + special_features = gen_special_features(memb.features) + if special_features != '0': ret += mcgen(''' - if (!visit_deprecated_accept(v, "%(name)s", errp)) { + if (visit_policy_reject(v, "%(name)s", %(special_features)s, errp)) { return false; } - if (visit_deprecated(v, "%(name)s")) { + if (!visit_policy_skip(v, "%(name)s", %(special_features)s)) { ''', - name=memb.name) + name=memb.name, special_features=special_features) indent.increase() ret += mcgen(''' if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) { @@ -101,7 +104,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) ''', c_type=memb.type.c_name(), name=memb.name, c_name=c_name(memb.name)) - if deprecated: + if special_features != '0': indent.decrease() ret += mcgen(''' } @@ -111,7 +114,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) ret += mcgen(''' } ''') - ret += gen_endif(memb.ifcond) + ret += memb.ifcond.gen_endif() if variants: tag_member = variants.tag_member @@ -125,7 +128,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) for var in variants.variants: case_str = c_enum_const(tag_member.type.name, var.name, tag_member.type.prefix) - ret += gen_if(var.ifcond) + ret += var.ifcond.gen_if() if var.type.name == 'q_empty': # valid variant and nothing to do ret += mcgen(''' @@ -141,7 +144,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) case=case_str, c_type=var.type.c_name(), c_name=c_name(var.name)) - ret += gen_endif(var.ifcond) + ret += var.ifcond.gen_endif() ret += mcgen(''' default: abort(); @@ -227,7 +230,7 @@ bool visit_type_%(c_name)s(Visitor *v, const char *name, c_name=c_name(name)) for var in variants.variants: - ret += gen_if(var.ifcond) + ret += var.ifcond.gen_if() ret += mcgen(''' case %(case)s: ''', @@ -253,7 +256,7 @@ bool visit_type_%(c_name)s(Visitor *v, const char *name, ret += mcgen(''' break; ''') - ret += gen_endif(var.ifcond) + ret += var.ifcond.gen_endif() ret += mcgen(''' case QTYPE_NONE: @@ -352,7 +355,7 @@ class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor): def visit_enum_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], members: List[QAPISchemaEnumMember], prefix: Optional[str]) -> None: @@ -363,7 +366,7 @@ class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor): def visit_array_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, element_type: QAPISchemaType) -> None: with ifcontext(ifcond, self._genh, self._genc): self._genh.add(gen_visit_decl(name)) @@ -372,7 +375,7 @@ class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor): def visit_object_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], base: Optional[QAPISchemaObjectType], members: List[QAPISchemaObjectTypeMember], @@ -394,7 +397,7 @@ class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor): def visit_alternate_type(self, name: str, info: Optional[QAPISourceInfo], - ifcond: Sequence[str], + ifcond: QAPISchemaIfCond, features: List[QAPISchemaFeature], variants: QAPISchemaVariants) -> None: with ifcontext(ifcond, self._genh, self._genc): diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh index 7de996d536..6ef9f118d9 100755 --- a/scripts/qemu-binfmt-conf.sh +++ b/scripts/qemu-binfmt-conf.sh @@ -4,7 +4,7 @@ qemu_target_list="i386 i486 alpha arm armeb sparc sparc32plus sparc64 \ ppc ppc64 ppc64le m68k mips mipsel mipsn32 mipsn32el mips64 mips64el \ sh4 sh4eb s390x aarch64 aarch64_be hppa riscv32 riscv64 xtensa xtensaeb \ -microblaze microblazeel or1k x86_64 hexagon" +microblaze microblazeel or1k x86_64 hexagon loongarch64" i386_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00' i386_mask='\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' @@ -60,28 +60,28 @@ m68k_family=m68k # FIXME: We could use the other endianness on a MIPS host. -mips_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08' -mips_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' +mips_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +mips_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' mips_family=mips -mipsel_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00' -mipsel_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' +mipsel_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +mipsel_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' mipsel_family=mips -mipsn32_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08' -mipsn32_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' +mipsn32_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' +mipsn32_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' mipsn32_family=mips -mipsn32el_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00' -mipsn32el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' +mipsn32el_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' +mipsn32el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' mipsn32el_family=mips mips64_magic='\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08' -mips64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' +mips64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' mips64_family=mips mips64el_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00' -mips64el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' +mips64el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' mips64el_family=mips sh4_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00' @@ -140,6 +140,10 @@ hexagon_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x hexagon_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' hexagon_family=hexagon +loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02\x01' +loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' +loongarch64_family=loongarch + qemu_get_family() { cpu=${HOST_ARCH:-$(uname -m)} case "$cpu" in @@ -167,6 +171,9 @@ qemu_get_family() { riscv*) echo "riscv" ;; + loongarch*) + echo "loongarch" + ;; *) echo "$cpu" ;; @@ -340,7 +347,9 @@ PERSISTENT=no PRESERVE_ARG0=no QEMU_SUFFIX="" -options=$(getopt -o ds:Q:S:e:hc:p:g: -l debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,persistent:,preserve-argv0: -- "$@") +_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\ +persistent:,preserve-argv0:" +options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@") eval set -- "$options" while true ; do diff --git a/scripts/qemu-stamp.py b/scripts/qemu-stamp.py new file mode 100644 index 0000000000..7beeeb07ed --- /dev/null +++ b/scripts/qemu-stamp.py @@ -0,0 +1,24 @@ +#! /usr/bin/env python3 + +# Usage: scripts/qemu-stamp.py STRING1 STRING2... -- FILE1 FILE2... +import hashlib +import os +import sys + +sha = hashlib.sha1() +is_file = False +for arg in sys.argv[1:]: + if arg == '--': + is_file = True + continue + if is_file: + with open(arg, 'rb') as f: + for chunk in iter(lambda: f.read(65536), b''): + sha.update(chunk) + else: + sha.update(os.fsencode(arg)) + sha.update(b'\n') + +# The hash can start with a digit, which the compiler doesn't +# like as an symbol. So prefix it with an underscore +print("_" + sha.hexdigest()) diff --git a/scripts/qmp/qemu-ga-client b/scripts/qmp/qemu-ga-client index 102fd2cad9..56edd0234a 100755 --- a/scripts/qmp/qemu-ga-client +++ b/scripts/qmp/qemu-ga-client @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import qemu_ga_client +from qemu.utils import qemu_ga_client if __name__ == '__main__': diff --git a/scripts/qmp/qmp-shell-wrap b/scripts/qmp/qmp-shell-wrap new file mode 100755 index 0000000000..9e94da114f --- /dev/null +++ b/scripts/qmp/qmp-shell-wrap @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 + +import os +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) +from qemu.qmp import qmp_shell + + +if __name__ == '__main__': + qmp_shell.main_wrap() diff --git a/scripts/qmp/qom-fuse b/scripts/qmp/qom-fuse index a58c8ef979..d453807b27 100755 --- a/scripts/qmp/qom-fuse +++ b/scripts/qmp/qom-fuse @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp.qom_fuse import QOMFuse +from qemu.utils.qom_fuse import QOMFuse if __name__ == '__main__': diff --git a/scripts/qmp/qom-get b/scripts/qmp/qom-get index e4f3e0c013..04ebe052e8 100755 --- a/scripts/qmp/qom-get +++ b/scripts/qmp/qom-get @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp.qom import QOMGet +from qemu.utils.qom import QOMGet if __name__ == '__main__': diff --git a/scripts/qmp/qom-list b/scripts/qmp/qom-list index 7a071a54e1..853b85a8d3 100755 --- a/scripts/qmp/qom-list +++ b/scripts/qmp/qom-list @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp.qom import QOMList +from qemu.utils.qom import QOMList if __name__ == '__main__': diff --git a/scripts/qmp/qom-set b/scripts/qmp/qom-set index 9ca9e2ba10..06820feec4 100755 --- a/scripts/qmp/qom-set +++ b/scripts/qmp/qom-set @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp.qom import QOMSet +from qemu.utils.qom import QOMSet if __name__ == '__main__': diff --git a/scripts/qmp/qom-tree b/scripts/qmp/qom-tree index 7d0ccca3a4..760e172277 100755 --- a/scripts/qmp/qom-tree +++ b/scripts/qmp/qom-tree @@ -4,7 +4,7 @@ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp.qom import QOMTree +from qemu.utils.qom import QOMTree if __name__ == '__main__': diff --git a/scripts/render_block_graph.py b/scripts/render_block_graph.py index da6acf050d..8f731a5cfe 100755 --- a/scripts/render_block_graph.py +++ b/scripts/render_block_graph.py @@ -25,17 +25,14 @@ import json from graphviz import Digraph sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'python')) -from qemu.qmp import ( - QEMUMonitorProtocol, - QMPResponseError, -) +from qemu.qmp import QMPError +from qemu.qmp.legacy import QEMUMonitorProtocol def perm(arr): s = 'w' if 'write' in arr else '_' s += 'r' if 'consistent-read' in arr else '_' s += 'u' if 'write-unchanged' in arr else '_' - s += 'g' if 'graph-mod' in arr else '_' s += 's' if 'resize' in arr else '_' return s @@ -105,7 +102,7 @@ class LibvirtGuest(): reply = json.loads(subprocess.check_output(ar)) if 'error' in reply: - raise QMPResponseError(reply) + raise QMPError(reply) return reply['return'] diff --git a/scripts/shaderinclude.pl b/scripts/shaderinclude.pl deleted file mode 100644 index cd3bb40b12..0000000000 --- a/scripts/shaderinclude.pl +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env perl -use strict; -use warnings; - -my $file = shift; -open FILE, "<", $file or die "open $file: $!"; -my $name = $file; -$name =~ s|.*/||; -$name =~ s/[-.]/_/g; -print "static GLchar ${name}_src[] =\n"; -while () { - chomp; - printf " \"%s\\n\"\n", $_; -} -print " \"\\n\";\n"; -close FILE; diff --git a/scripts/shaderinclude.py b/scripts/shaderinclude.py new file mode 100644 index 0000000000..ab2aade2cd --- /dev/null +++ b/scripts/shaderinclude.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 Red Hat, Inc. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +import os + + +def main(args): + file_path = args[1] + basename = os.path.basename(file_path) + varname = basename.replace('-', '_').replace('.', '_') + + with os.fdopen(sys.stdout.fileno(), "wt", closefd=False, newline='\n') as stdout: + with open(file_path, "r", encoding='utf-8') as file: + print(f'static GLchar {varname}_src[] =', file=stdout) + for line in file: + line = line.rstrip() + print(f' "{line}\\n"', file=stdout) + print(' "\\n";', file=stdout) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/scripts/show-fixed-bugs.sh b/scripts/show-fixed-bugs.sh deleted file mode 100755 index a095a4d6ba..0000000000 --- a/scripts/show-fixed-bugs.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/sh - -# This script checks the git log for URLs to the QEMU launchpad bugtracker -# and optionally checks whether the corresponding bugs are not closed yet. - -show_help () { - echo "Usage:" - echo " -s : Start searching at this commit" - echo " -e : End searching at this commit" - echo " -c : Check if bugs are still open" - echo " -b : Open bugs in browser" -} - -while getopts "s:e:cbh" opt; do - case "$opt" in - s) start="$OPTARG" ;; - e) end="$OPTARG" ;; - c) check_if_open=1 ;; - b) show_in_browser=1 ;; - h) show_help ; exit 0 ;; - *) echo "Use -h for help." ; exit 1 ;; - esac -done - -if [ "x$start" = "x" ]; then - start=$(git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 2 | head -n 1) -fi -if [ "x$end" = "x" ]; then - end=$(git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 1) -fi - -if [ "x$start" = "x" ] || [ "x$end" = "x" ]; then - echo "Could not determine start or end revision ... Please note that this" - echo "script must be run from a checked out git repository of QEMU." - exit 1 -fi - -echo "Searching git log for bugs in the range $start..$end" - -urlstr='https://bugs.launchpad.net/\(bugs\|qemu/+bug\)/' -bug_urls=$(git log $start..$end \ - | sed -n '\,'"$urlstr"', s,\(.*\)\('"$urlstr"'\)\([0-9]*\).*,\2\4,p' \ - | sort -u) - -echo Found bug URLs: -for i in $bug_urls ; do echo " $i" ; done - -if [ "x$check_if_open" = "x1" ]; then - echo - echo "Checking which ones are still open..." - for i in $bug_urls ; do - if ! curl -s -L "$i" | grep "value status" | grep -q "Fix Released" ; then - echo " $i" - final_bug_urls="$final_bug_urls $i" - fi - done -else - final_bug_urls=$bug_urls -fi - -if [ "x$final_bug_urls" = "x" ]; then - echo "No open bugs found." -elif [ "x$show_in_browser" = "x1" ]; then - # Try to determine which browser we should use - if [ "x$BROWSER" != "x" ]; then - bugbrowser="$BROWSER" - elif command -v xdg-open >/dev/null 2>&1; then - bugbrowser=xdg-open - elif command -v gnome-open >/dev/null 2>&1; then - bugbrowser=gnome-open - elif [ "$(uname)" = "Darwin" ]; then - bugbrowser=open - elif command -v sensible-browser >/dev/null 2>&1; then - bugbrowser=sensible-browser - else - echo "Please set the BROWSER variable to the browser of your choice." - exit 1 - fi - # Now show the bugs in the browser - first=1 - for i in $final_bug_urls; do - "$bugbrowser" "$i" - if [ $first = 1 ]; then - # if it is the first entry, give the browser some time to start - # (to avoid messages like "Firefox is already running, but is - # not responding...") - sleep 4 - first=0 - fi - done -fi diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py index 4864435f39..fc370691e0 100644 --- a/scripts/simplebench/bench-example.py +++ b/scripts/simplebench/bench-example.py @@ -25,7 +25,7 @@ from bench_block_job import bench_block_copy, drv_file, drv_nbd def bench_func(env, case): """ Handle one "cell" of benchmarking table. """ - return bench_block_copy(env['qemu_binary'], env['cmd'], {} + return bench_block_copy(env['qemu_binary'], env['cmd'], {}, case['source'], case['target']) diff --git a/scripts/simplebench/bench_block_job.py b/scripts/simplebench/bench_block_job.py index 4f03c12169..56191db44b 100755 --- a/scripts/simplebench/bench_block_job.py +++ b/scripts/simplebench/bench_block_job.py @@ -27,7 +27,7 @@ import json sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine -from qemu.qmp import QMPConnectError +from qemu.qmp import ConnectError def bench_block_job(cmd, cmd_args, qemu_args): @@ -49,7 +49,7 @@ def bench_block_job(cmd, cmd_args, qemu_args): vm.launch() except OSError as e: return {'error': 'popen failed: ' + str(e)} - except (QMPConnectError, socket.timeout): + except (ConnectError, socket.timeout): return {'error': 'qemu failed: ' + str(vm.get_log())} try: diff --git a/scripts/simplebench/img_bench_templater.py b/scripts/simplebench/img_bench_templater.py new file mode 100755 index 0000000000..f8e1540ada --- /dev/null +++ b/scripts/simplebench/img_bench_templater.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# +# Process img-bench test templates +# +# Copyright (c) 2021 Virtuozzo International GmbH. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + + +import sys +import subprocess +import re +import json + +import simplebench +from results_to_text import results_to_text +from table_templater import Templater + + +def bench_func(env, case): + test = templater.gen(env['data'], case['data']) + + p = subprocess.run(test, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, universal_newlines=True) + + if p.returncode == 0: + try: + m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout) + return {'seconds': float(m.group(1))} + except Exception: + return {'error': f'failed to parse qemu-img output: {p.stdout}'} + else: + return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'} + + +if __name__ == '__main__': + if len(sys.argv) > 1: + print(""" +Usage: img_bench_templater.py < path/to/test-template.sh + +This script generates performance tests from a test template (example below), +runs them, and displays the results in a table. The template is read from +stdin. It must be written in bash and end with a `qemu-img bench` invocation +(whose result is parsed to get the test instance’s result). + +Use the following syntax in the template to create the various different test +instances: + + column templating: {var1|var2|...} - test will use different values in + different columns. You may use several {} constructions in the test, in this + case product of all choice-sets will be used. + + row templating: [var1|var2|...] - similar thing to define rows (test-cases) + +Test template example: + +Assume you want to compare two qemu-img binaries, called qemu-img-old and +qemu-img-new in your build directory in two test-cases with 4K writes and 64K +writes. The template may look like this: + +qemu_img=/path/to/qemu/build/qemu-img-{old|new} +$qemu_img create -f qcow2 /ssd/x.qcow2 1G +$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2 + +When passing this to stdin of img_bench_templater.py, the resulting comparison +table will contain two columns (for two binaries) and two rows (for two +test-cases). + +In addition to displaying the results, script also stores results in JSON +format into results.json file in current directory. +""") + sys.exit() + + templater = Templater(sys.stdin.read()) + + envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns] + cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows] + + result = simplebench.bench(bench_func, envs, cases, count=5, + initial_run=False) + print(results_to_text(result)) + with open('results.json', 'w') as f: + json.dump(result, f, indent=4) diff --git a/scripts/simplebench/table_templater.py b/scripts/simplebench/table_templater.py new file mode 100644 index 0000000000..950f3b3024 --- /dev/null +++ b/scripts/simplebench/table_templater.py @@ -0,0 +1,62 @@ +# Parser for test templates +# +# Copyright (c) 2021 Virtuozzo International GmbH. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import itertools +from lark import Lark + +grammar = """ +start: ( text | column_switch | row_switch )+ + +column_switch: "{" text ["|" text]+ "}" +row_switch: "[" text ["|" text]+ "]" +text: /[^|{}\[\]]+/ +""" + +parser = Lark(grammar) + +class Templater: + def __init__(self, template): + self.tree = parser.parse(template) + + c_switches = [] + r_switches = [] + for x in self.tree.children: + if x.data == 'column_switch': + c_switches.append([el.children[0].value for el in x.children]) + elif x.data == 'row_switch': + r_switches.append([el.children[0].value for el in x.children]) + + self.columns = list(itertools.product(*c_switches)) + self.rows = list(itertools.product(*r_switches)) + + def gen(self, column, row): + i = 0 + j = 0 + result = [] + + for x in self.tree.children: + if x.data == 'text': + result.append(x.children[0].value) + elif x.data == 'column_switch': + result.append(column[i]) + i += 1 + elif x.data == 'row_switch': + result.append(row[j]) + j += 1 + + return ''.join(result) diff --git a/scripts/switch-timer-api b/scripts/switch-timer-api deleted file mode 100755 index 41736d11dd..0000000000 --- a/scripts/switch-timer-api +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; -use Getopt::Long; -use FindBin; - -my @legacy = qw(qemu_clock_ptr qemu_get_clock_ns qemu_get_clock_ms qemu_register_clock_reset_notifier qemu_unregister_clock_reset_notifier qemu_new_timer qemu_free_timer qemu_del_timer qemu_mod_timer_ns qemu_mod_timer qemu_run_timers qemu_new_timer_ns qemu_new_timer_us qemu_new_timer_ms); -my $legacyre = '\b('.join('|', @legacy).')\b'; -my $option_git; -my $option_dryrun; -my $option_quiet; -my $option_rtc; -my $suffix=".tmp.$$"; -my @files; -my $getfiles = 'git grep -l -E \'\b((host|rt|vm|rtc)_clock\b|qemu_\w*timer)\' | egrep \'\.[ch]$\' | egrep -v \'qemu-timer\.c$|include/qemu/timer\.h$\''; - -sub Syntax -{ - print STDERR < \$option_dryrun, - "git|g" => \$option_git, - "quiet|q" => \$option_quiet, - "rtc|r" => \$option_rtc, - "help|h" => sub { Syntax(); exit(0); } - )) - { - Syntax(); - die "Bad options"; - } - - if ($#ARGV >=0) - { - @files = @ARGV; - } - else - { - @files = split(/\s+/, `$getfiles`); - } - - foreach my $file (@files) - { - die "Cannot find $file" unless (-f $file && -r $file); - } -} - -sub DoWarn -{ - my $text = shift @_; - my $line = shift @_; - return if ($option_quiet); - chomp ($line); - print STDERR "$text\n"; - print STDERR "$line\n\n"; -} - -sub Process -{ - my $ifn = shift @_; - my $ofn = $ifn.$suffix; - - my $intext; - my $outtext; - my $linenum = 0; - - open my $input, "<", $ifn || die "Cannot open $ifn for read: $!"; - - while (<$input>) - { - my $line = $_; - $intext .= $line; - $linenum++; - - # fix the specific uses - unless ($option_rtc) - { - $line =~ s/\bqemu_new_timer(_[num]s)\s*\((vm_|rt_|host_)clock\b/timer_new$1(XXX_$2clock/g; - $line =~ s/\bqemu_new_timer\s*\((vm_|rt_|host_)clock\b/timer_new(XXX_$1clock/g; - $line =~ s/\bqemu_get_clock(_[num]s)\s*\((vm_|rt_|host_)clock\b/qemu_clock_get$1(XXX_$2clock/g; - } - - # rtc is different - $line =~ s/\bqemu_new_timer(_[num]s)\s*\(rtc_clock\b/timer_new$1(rtc_clock/g; - $line =~ s/\bqemu_new_timer\s*\(rtc_clock\b/timer_new(rtc_clock/g; - $line =~ s/\bqemu_get_clock(_[num]s)\s*\(rtc_clock\b/qemu_clock_get$1(rtc_clock/g; - $line =~ s/\bqemu_register_clock_reset_notifier\s*\(rtc_clock\b/qemu_register_clock_reset_notifier(qemu_clock_ptr(rtc_clock)/g; - - unless ($option_rtc) - { - # fix up comments - $line =~ s/\b(vm_|rt_|host_)clock\b/XXX_$1clock/g if ($line =~ m,^[/ ]+\*,); - - # spurious fprintf error reporting - $line =~ s/: qemu_new_timer_ns failed/: timer_new_ns failed/g; - - # these have just changed name - $line =~ s/\bqemu_mod_timer\b/timer_mod/g; - $line =~ s/\bqemu_mod_timer_(ns|us|ms)\b/timer_mod_$1/g; - $line =~ s/\bqemu_free_timer\b/timer_free/g; - $line =~ s/\bqemu_del_timer\b/timer_del/g; - } - - # fix up rtc_clock - $line =~ s/QEMUClock \*rtc_clock;/QEMUClockType rtc_clock;/g; - $line =~ s/\brtc_clock = (vm_|rt_|host_)clock\b/rtc_clock = XXX_$1clock/g; - - unless ($option_rtc) - { - # replace any more general uses - $line =~ s/\b(vm_|rt_|host_)clock\b/qemu_clock_ptr(XXX_$1clock)/g; - } - - # fix up the place holders - $line =~ s/\bXXX_vm_clock\b/QEMU_CLOCK_VIRTUAL/g; - $line =~ s/\bXXX_rt_clock\b/QEMU_CLOCK_REALTIME/g; - $line =~ s/\bXXX_host_clock\b/QEMU_CLOCK_HOST/g; - - unless ($option_rtc) - { - DoWarn("$ifn:$linenum WARNING: timer $1 not fixed up", $line) if ($line =~ /\b((vm_|rt_|host_)clock)\b/); - DoWarn("$ifn:$linenum WARNING: function $1 not fixed up", $line) if ($line =~ /\b(qemu_new_timer\w+)\b/); - DoWarn("$ifn:$linenum WARNING: legacy function $1 remains", $line) if ($line =~ /$legacyre/o); - } - - $outtext .= $line; - } - - close $input; - - if ($intext ne $outtext) - { - print STDERR "Patching $ifn\n" unless ($option_quiet); - unless ($option_dryrun) - { - open my $output, ">", $ofn || die "Cannot open $ofn for write: $!"; - print $output $outtext; - close $output; - rename ($ofn, $ifn) || die "Cannot rename temp file to $ifn: $!"; - return 1; - } - } - return 0; -} - -sub DoCommit -{ - my $file = shift @_; - open (my $git, "| git commit -F - $file") || die "Cannot run git commit on $file: $!"; - print $git "timers api: use new timer api in $file\n\nConvert $file to use new timer API.\nThis is an automated commit made by scripts/switch-timer-api\n"; - close ($git); -} - -ParseOptions; - -foreach my $file (@files) -{ - my $changed = Process ($file); - DoCommit($file) if ($changed && $option_git); -} diff --git a/scripts/symlink-install-tree.py b/scripts/symlink-install-tree.py new file mode 100644 index 0000000000..67cb86dd52 --- /dev/null +++ b/scripts/symlink-install-tree.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +from pathlib import PurePath +import errno +import json +import os +import subprocess +import sys + +def destdir_join(d1: str, d2: str) -> str: + if not d1: + return d2 + # c:\destdir + c:\prefix must produce c:\destdir\prefix + return str(PurePath(d1, *PurePath(d2).parts[1:])) + +introspect = os.environ.get('MESONINTROSPECT') +out = subprocess.run([*introspect.split(' '), '--installed'], + stdout=subprocess.PIPE, check=True).stdout +for source, dest in json.loads(out).items(): + bundle_dest = destdir_join('qemu-bundle', dest) + path = os.path.dirname(bundle_dest) + try: + os.makedirs(path, exist_ok=True) + except BaseException as e: + print(f'error making directory {path}', file=sys.stderr) + raise e + try: + os.symlink(source, bundle_dest) + except BaseException as e: + if not isinstance(e, OSError) or e.errno != errno.EEXIST: + print(f'error making symbolic link {dest}', file=sys.stderr) + raise e diff --git a/scripts/tap-driver.pl b/scripts/tap-driver.pl deleted file mode 100755 index b1d3880c50..0000000000 --- a/scripts/tap-driver.pl +++ /dev/null @@ -1,379 +0,0 @@ -#! /usr/bin/env perl -# Copyright (C) 2011-2013 Free Software Foundation, Inc. -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# ---------------------------------- # -# Imports, static data, and setup. # -# ---------------------------------- # - -use warnings FATAL => 'all'; -use strict; -use Getopt::Long (); -use TAP::Parser; -use Term::ANSIColor qw(:constants); - -my $ME = "tap-driver.pl"; -my $VERSION = "2018-11-30"; - -my $USAGE = <<'END'; -Usage: - tap-driver [--test-name=TEST] [--color={always|never|auto}] - [--verbose] [--show-failures-only] -END - -my $HELP = "$ME: TAP-aware test driver for QEMU testsuite harness." . - "\n" . $USAGE; - -# It's important that NO_PLAN evaluates "false" as a boolean. -use constant NO_PLAN => 0; -use constant EARLY_PLAN => 1; -use constant LATE_PLAN => 2; - -use constant DIAG_STRING => "#"; - -# ------------------- # -# Global variables. # -# ------------------- # - -my $testno = 0; # Number of test results seen so far. -my $bailed_out = 0; # Whether a "Bail out!" directive has been seen. -my $failed = 0; # Final exit code - -# Whether the TAP plan has been seen or not, and if yes, which kind -# it is ("early" is seen before any test result, "late" otherwise). -my $plan_seen = NO_PLAN; - -# ----------------- # -# Option parsing. # -# ----------------- # - -my %cfg = ( - "color" => 0, - "verbose" => 0, - "show-failures-only" => 0, -); - -my $color = "auto"; -my $test_name = undef; - -# Perl's Getopt::Long allows options to take optional arguments after a space. -# Prevent --color by itself from consuming other arguments -foreach (@ARGV) { - if ($_ eq "--color" || $_ eq "-color") { - $_ = "--color=$color"; - } -} - -Getopt::Long::GetOptions - ( - 'help' => sub { print $HELP; exit 0; }, - 'version' => sub { print "$ME $VERSION\n"; exit 0; }, - 'test-name=s' => \$test_name, - 'color=s' => \$color, - 'show-failures-only' => sub { $cfg{"show-failures-only"} = 1; }, - 'verbose' => sub { $cfg{"verbose"} = 1; }, - ) or exit 1; - -if ($color =~ /^always$/i) { - $cfg{'color'} = 1; -} elsif ($color =~ /^never$/i) { - $cfg{'color'} = 0; -} elsif ($color =~ /^auto$/i) { - $cfg{'color'} = (-t STDOUT); -} else { - die "Invalid color mode: $color\n"; -} - -# ------------- # -# Prototypes. # -# ------------- # - -sub colored ($$); -sub decorate_result ($); -sub extract_tap_comment ($); -sub handle_tap_bailout ($); -sub handle_tap_plan ($); -sub handle_tap_result ($); -sub is_null_string ($); -sub main (); -sub report ($;$); -sub stringify_result_obj ($); -sub testsuite_error ($); - -# -------------- # -# Subroutines. # -# -------------- # - -# If the given string is undefined or empty, return true, otherwise -# return false. This function is useful to avoid pitfalls like: -# if ($message) { print "$message\n"; } -# which wouldn't print anything if $message is the literal "0". -sub is_null_string ($) -{ - my $str = shift; - return ! (defined $str and length $str); -} - -sub stringify_result_obj ($) -{ - my $result_obj = shift; - if ($result_obj->is_unplanned || $result_obj->number != $testno) - { - return "ERROR"; - } - elsif ($plan_seen == LATE_PLAN) - { - return "ERROR"; - } - elsif (!$result_obj->directive) - { - return $result_obj->is_ok ? "PASS" : "FAIL"; - } - elsif ($result_obj->has_todo) - { - return $result_obj->is_actual_ok ? "XPASS" : "XFAIL"; - } - elsif ($result_obj->has_skip) - { - return $result_obj->is_ok ? "SKIP" : "FAIL"; - } - die "$ME: INTERNAL ERROR"; # NOTREACHED -} - -sub colored ($$) -{ - my ($color_string, $text) = @_; - return $color_string . $text . RESET; -} - -sub decorate_result ($) -{ - my $result = shift; - return $result unless $cfg{"color"}; - my %color_for_result = - ( - "ERROR" => BOLD.MAGENTA, - "PASS" => GREEN, - "XPASS" => BOLD.YELLOW, - "FAIL" => BOLD.RED, - "XFAIL" => YELLOW, - "SKIP" => BLUE, - ); - if (my $color = $color_for_result{$result}) - { - return colored ($color, $result); - } - else - { - return $result; # Don't colorize unknown stuff. - } -} - -sub report ($;$) -{ - my ($msg, $result, $explanation) = (undef, @_); - if ($result =~ /^(?:X?(?:PASS|FAIL)|SKIP|ERROR)/) - { - # Output on console might be colorized. - $msg = decorate_result($result); - if ($result =~ /^(?:PASS|XFAIL|SKIP)/) - { - return if $cfg{"show-failures-only"}; - } - else - { - $failed = 1; - } - } - elsif ($result eq "#") - { - $msg = " "; - } - else - { - die "$ME: INTERNAL ERROR"; # NOTREACHED - } - $msg .= " $explanation" if defined $explanation; - print $msg . "\n"; -} - -sub testsuite_error ($) -{ - report "ERROR", "$test_name - $_[0]"; -} - -sub handle_tap_result ($) -{ - $testno++; - my $result_obj = shift; - - my $test_result = stringify_result_obj $result_obj; - my $string = $result_obj->number; - - my $description = $result_obj->description; - $string .= " $test_name" unless is_null_string $test_name; - $string .= " $description" unless is_null_string $description; - - if ($plan_seen == LATE_PLAN) - { - $string .= " # AFTER LATE PLAN"; - } - elsif ($result_obj->is_unplanned) - { - $string .= " # UNPLANNED"; - } - elsif ($result_obj->number != $testno) - { - $string .= " # OUT-OF-ORDER (expecting $testno)"; - } - elsif (my $directive = $result_obj->directive) - { - $string .= " # $directive"; - my $explanation = $result_obj->explanation; - $string .= " $explanation" - unless is_null_string $explanation; - } - - report $test_result, $string; -} - -sub handle_tap_plan ($) -{ - my $plan = shift; - if ($plan_seen) - { - # Error, only one plan per stream is acceptable. - testsuite_error "multiple test plans"; - return; - } - # The TAP plan can come before or after *all* the TAP results; we speak - # respectively of an "early" or a "late" plan. If we see the plan line - # after at least one TAP result has been seen, assume we have a late - # plan; in this case, any further test result seen after the plan will - # be flagged as an error. - $plan_seen = ($testno >= 1 ? LATE_PLAN : EARLY_PLAN); - # If $testno > 0, we have an error ("too many tests run") that will be - # automatically dealt with later, so don't worry about it here. If - # $plan_seen is true, we have an error due to a repeated plan, and that - # has already been dealt with above. Otherwise, we have a valid "plan - # with SKIP" specification, and should report it as a particular kind - # of SKIP result. - if ($plan->directive && $testno == 0) - { - my $explanation = is_null_string ($plan->explanation) ? - undef : "- " . $plan->explanation; - report "SKIP", $explanation; - } -} - -sub handle_tap_bailout ($) -{ - my ($bailout, $msg) = ($_[0], "Bail out!"); - $bailed_out = 1; - $msg .= " " . $bailout->explanation - unless is_null_string $bailout->explanation; - testsuite_error $msg; -} - -sub extract_tap_comment ($) -{ - my $line = shift; - if (index ($line, DIAG_STRING) == 0) - { - # Strip leading `DIAG_STRING' from `$line'. - $line = substr ($line, length (DIAG_STRING)); - # And strip any leading and trailing whitespace left. - $line =~ s/(?:^\s*|\s*$)//g; - # Return what is left (if any). - return $line; - } - return ""; -} - -sub main () -{ - my $iterator = TAP::Parser::Iterator::Stream->new(\*STDIN); - my $parser = TAP::Parser->new ({iterator => $iterator }); - - STDOUT->autoflush(1); - while (defined (my $cur = $parser->next)) - { - # Parsing of TAP input should stop after a "Bail out!" directive. - next if $bailed_out; - - if ($cur->is_plan) - { - handle_tap_plan ($cur); - } - elsif ($cur->is_test) - { - handle_tap_result ($cur); - } - elsif ($cur->is_bailout) - { - handle_tap_bailout ($cur); - } - elsif ($cfg{"verbose"}) - { - my $comment = extract_tap_comment ($cur->raw); - report "#", "$comment" if length $comment; - } - } - # A "Bail out!" directive should cause us to ignore any following TAP - # error. - if (!$bailed_out) - { - if (!$plan_seen) - { - testsuite_error "missing test plan"; - } - elsif ($parser->tests_planned != $parser->tests_run) - { - my ($planned, $run) = ($parser->tests_planned, $parser->tests_run); - my $bad_amount = $run > $planned ? "many" : "few"; - testsuite_error (sprintf "too %s tests run (expected %d, got %d)", - $bad_amount, $planned, $run); - } - } -} - -# ----------- # -# Main code. # -# ----------- # - -main; -exit($failed); - -# Local Variables: -# perl-indent-level: 2 -# perl-continued-statement-offset: 2 -# perl-continued-brace-offset: 0 -# perl-brace-offset: 0 -# perl-brace-imaginary-offset: 0 -# perl-label-offset: -2 -# cperl-indent-level: 2 -# cperl-brace-offset: 0 -# cperl-continued-brace-offset: 0 -# cperl-label-offset: -2 -# cperl-extra-newline-before-brace: t -# cperl-merge-trailing-else: nil -# cperl-continued-statement-offset: 2 -# End: diff --git a/scripts/tap-merge.pl b/scripts/tap-merge.pl deleted file mode 100755 index 10ccf57bb2..0000000000 --- a/scripts/tap-merge.pl +++ /dev/null @@ -1,111 +0,0 @@ -#! /usr/bin/env perl -# Copyright (C) 2018 Red Hat, Inc. -# -# Author: Paolo Bonzini -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# ---------------------------------- # -# Imports, static data, and setup. # -# ---------------------------------- # - -use warnings FATAL => 'all'; -use strict; -use Getopt::Long (); -use TAP::Parser; - -my $ME = "tap-merge.pl"; -my $VERSION = "2018-11-30"; - -my $HELP = "$ME: merge multiple TAP inputs from stdin."; - -use constant DIAG_STRING => "#"; - -# ----------------- # -# Option parsing. # -# ----------------- # - -Getopt::Long::GetOptions - ( - 'help' => sub { print $HELP; exit 0; }, - 'version' => sub { print "$ME $VERSION\n"; exit 0; }, - ); - -# -------------- # -# Subroutines. # -# -------------- # - -sub main () -{ - my $iterator = TAP::Parser::Iterator::Stream->new(\*STDIN); - my $parser = TAP::Parser->new ({iterator => $iterator }); - my $testno = 0; # Number of test results seen so far. - my $bailed_out = 0; # Whether a "Bail out!" directive has been seen. - - STDOUT->autoflush(1); - while (defined (my $cur = $parser->next)) - { - if ($cur->is_bailout) - { - $bailed_out = 1; - print DIAG_STRING . " " . $cur->as_string . "\n"; - next; - } - elsif ($cur->is_plan) - { - $bailed_out = 0; - next; - } - elsif ($cur->is_test) - { - $bailed_out = 0 if $cur->number == 1; - $testno++; - $cur = TAP::Parser::Result::Test->new({ - ok => $cur->ok, - test_num => $testno, - directive => $cur->directive, - explanation => $cur->explanation, - description => $cur->description - }); - } - elsif ($cur->is_version) - { - next if $testno > 0; - } - print $cur->as_string . "\n" unless $bailed_out; - } - print "1..$testno\n"; -} - -# ----------- # -# Main code. # -# ----------- # - -main; - -# Local Variables: -# perl-indent-level: 2 -# perl-continued-statement-offset: 2 -# perl-continued-brace-offset: 0 -# perl-brace-offset: 0 -# perl-brace-imaginary-offset: 0 -# perl-label-offset: -2 -# cperl-indent-level: 2 -# cperl-brace-offset: 0 -# cperl-continued-brace-offset: 0 -# cperl-label-offset: -2 -# cperl-extra-newline-before-brace: t -# cperl-merge-trailing-else: nil -# cperl-continued-statement-offset: 2 -# End: diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py index 5bc94d95cf..5393c7fc5c 100644 --- a/scripts/tracetool/__init__.py +++ b/scripts/tracetool/__init__.py @@ -87,8 +87,6 @@ ALLOWED_TYPES = [ "ssize_t", "uintptr_t", "ptrdiff_t", - # Magic substitution is done by tracetool - "TCGv", ] def validate_type(name): @@ -232,7 +230,7 @@ class Event(object): "(?:(?:(?P\".+),)?\s*(?P\".+))?" "\s*") - _VALID_PROPS = set(["disable", "tcg", "tcg-trans", "tcg-exec", "vcpu"]) + _VALID_PROPS = set(["disable", "vcpu"]) def __init__(self, name, props, fmt, args, lineno, filename, orig=None, event_trans=None, event_exec=None): @@ -321,15 +319,6 @@ class Event(object): fmt = [fmt_trans, fmt] args = Arguments.build(groups["args"]) - if "tcg-trans" in props: - raise ValueError("Invalid property 'tcg-trans'") - if "tcg-exec" in props: - raise ValueError("Invalid property 'tcg-exec'") - if "tcg" not in props and not isinstance(fmt, str): - raise ValueError("Only events with 'tcg' property can have two format strings") - if "tcg" in props and isinstance(fmt, str): - raise ValueError("Events with 'tcg' property must have two format strings") - event = Event(name, props, fmt, args, lineno, filename) # add implicit arguments when using the 'vcpu' property @@ -409,33 +398,7 @@ def read_events(fobj, fname): e.args = (arg0,) + e.args[1:] raise - # transform TCG-enabled events - if "tcg" not in event.properties: - events.append(event) - else: - event_trans = event.copy() - event_trans.name += "_trans" - event_trans.properties += ["tcg-trans"] - event_trans.fmt = event.fmt[0] - # ignore TCG arguments - args_trans = [] - for atrans, aorig in zip( - event_trans.transform(tracetool.transform.TCG_2_HOST).args, - event.args): - if atrans == aorig: - args_trans.append(atrans) - event_trans.args = Arguments(args_trans) - - event_exec = event.copy() - event_exec.name += "_exec" - event_exec.properties += ["tcg-exec"] - event_exec.fmt = event.fmt[1] - event_exec.args = event_exec.args.transform(tracetool.transform.TCG_2_HOST) - - new_event = [event_trans, event_exec] - event.event_trans, event.event_exec = new_event - - events.extend(new_event) + events.append(event) return events diff --git a/scripts/tracetool/format/tcg_h.py b/scripts/tracetool/format/tcg_h.py deleted file mode 100644 index 4d84440aff..0000000000 --- a/scripts/tracetool/format/tcg_h.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Generate .h file for TCG code generation. -""" - -__author__ = "Lluís Vilanova " -__copyright__ = "Copyright 2012-2017, Lluís Vilanova " -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -from tracetool import out, Arguments -import tracetool.vcpu - - -def vcpu_transform_args(args): - assert len(args) == 1 - return Arguments([ - args, - # NOTE: this name must be kept in sync with the one in "tcg_h" - # NOTE: Current helper code uses TCGv_env (CPUArchState*) - ("TCGv_env", "__tcg_" + args.names()[0]), - ]) - - -def generate(events, backend, group): - if group == "root": - header = "trace/trace-root.h" - else: - header = "trace.h" - - out('/* This file is autogenerated by tracetool, do not edit. */', - '/* You must include this file after the inclusion of helper.h */', - '', - '#ifndef TRACE_%s_GENERATED_TCG_TRACERS_H' % group.upper(), - '#define TRACE_%s_GENERATED_TCG_TRACERS_H' % group.upper(), - '', - '#include "exec/helper-proto.h"', - '#include "%s"' % header, - '', - ) - - for e in events: - # just keep one of them - if "tcg-exec" not in e.properties: - continue - - out('static inline void %(name_tcg)s(%(args)s)', - '{', - name_tcg=e.original.api(e.QEMU_TRACE_TCG), - args=tracetool.vcpu.transform_args("tcg_h", e.original)) - - if "disable" not in e.properties: - args_trans = e.original.event_trans.args - args_exec = tracetool.vcpu.transform_args( - "tcg_helper_c", e.original.event_exec, "wrapper") - if "vcpu" in e.properties: - trace_cpu = e.args.names()[0] - cond = "trace_event_get_vcpu_state(%(cpu)s,"\ - " TRACE_%(id)s)"\ - % dict( - cpu=trace_cpu, - id=e.original.event_exec.name.upper()) - else: - cond = "true" - - out(' %(name_trans)s(%(argnames_trans)s);', - ' if (%(cond)s) {', - ' gen_helper_%(name_exec)s(%(argnames_exec)s);', - ' }', - name_trans=e.original.event_trans.api(e.QEMU_TRACE), - name_exec=e.original.event_exec.api(e.QEMU_TRACE), - argnames_trans=", ".join(args_trans.names()), - argnames_exec=", ".join(args_exec.names()), - cond=cond) - - out('}') - - out('', - '#endif /* TRACE_%s_GENERATED_TCG_TRACERS_H */' % group.upper()) diff --git a/scripts/tracetool/format/tcg_helper_c.py b/scripts/tracetool/format/tcg_helper_c.py deleted file mode 100644 index 72576e67d1..0000000000 --- a/scripts/tracetool/format/tcg_helper_c.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Generate trace/generated-helpers.c. -""" - -__author__ = "Lluís Vilanova " -__copyright__ = "Copyright 2012-2017, Lluís Vilanova " -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -from tracetool import Arguments, out -from tracetool.transform import * -import tracetool.vcpu - - -def vcpu_transform_args(args, mode): - assert len(args) == 1 - # NOTE: this name must be kept in sync with the one in "tcg_h" - args = Arguments([(args.types()[0], "__tcg_" + args.names()[0])]) - if mode == "code": - return Arguments([ - # Does cast from helper requirements to tracing types - ("CPUState *", "env_cpu(%s)" % args.names()[0]), - ]) - else: - args = Arguments([ - # NOTE: Current helper code uses TCGv_env (CPUArchState*) - ("CPUArchState *", args.names()[0]), - ]) - if mode == "header": - return args - elif mode == "wrapper": - return args.transform(HOST_2_TCG) - else: - assert False - - -def generate(events, backend, group): - if group == "root": - header = "trace/trace-root.h" - else: - header = "trace.h" - - events = [e for e in events - if "disable" not in e.properties] - - out('/* This file is autogenerated by tracetool, do not edit. */', - '', - '#include "qemu/osdep.h"', - '#include "cpu.h"', - '#include "exec/helper-proto.h"', - '#include "%s"' % header, - '', - ) - - for e in events: - if "tcg-exec" not in e.properties: - continue - - e_args_api = tracetool.vcpu.transform_args( - "tcg_helper_c", e.original, "header").transform( - HOST_2_TCG_COMPAT, TCG_2_TCG_HELPER_DEF) - e_args_call = tracetool.vcpu.transform_args( - "tcg_helper_c", e, "code") - - out('void %(name_tcg)s(%(args_api)s)', - '{', - # NOTE: the check was already performed at TCG-generation time - ' %(name)s(%(args_call)s);', - '}', - name_tcg="helper_%s_proxy" % e.api(), - name=e.api(e.QEMU_TRACE_NOCHECK), - args_api=e_args_api, - args_call=", ".join(e_args_call.casted()), - ) diff --git a/scripts/tracetool/format/tcg_helper_h.py b/scripts/tracetool/format/tcg_helper_h.py deleted file mode 100644 index 08554fbc85..0000000000 --- a/scripts/tracetool/format/tcg_helper_h.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Generate trace/generated-helpers.h. -""" - -__author__ = "Lluís Vilanova " -__copyright__ = "Copyright 2012-2016, Lluís Vilanova " -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -from tracetool import out -from tracetool.transform import * -import tracetool.vcpu - - -def generate(events, backend, group): - events = [e for e in events - if "disable" not in e.properties] - - out('/* This file is autogenerated by tracetool, do not edit. */', - '', - ) - - for e in events: - if "tcg-exec" not in e.properties: - continue - - # TCG helper proxy declaration - fmt = "DEF_HELPER_FLAGS_%(argc)d(%(name)s, %(flags)svoid%(types)s)" - e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "header") - args = e_args.transform(HOST_2_TCG_COMPAT, HOST_2_TCG, - TCG_2_TCG_HELPER_DECL) - types = ", ".join(args.types()) - if types != "": - types = ", " + types - - flags = "TCG_CALL_NO_RWG, " - - out(fmt, - flags=flags, - argc=len(args), - name=e.api() + "_proxy", - types=types, - ) diff --git a/scripts/tracetool/format/tcg_helper_wrapper_h.py b/scripts/tracetool/format/tcg_helper_wrapper_h.py deleted file mode 100644 index 0c5a9797d1..0000000000 --- a/scripts/tracetool/format/tcg_helper_wrapper_h.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Generate trace/generated-helpers-wrappers.h. -""" - -__author__ = "Lluís Vilanova " -__copyright__ = "Copyright 2012-2016, Lluís Vilanova " -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -from tracetool import out -from tracetool.transform import * -import tracetool.vcpu - - -def generate(events, backend, group): - events = [e for e in events - if "disable" not in e.properties] - - out('/* This file is autogenerated by tracetool, do not edit. */', - '', - '#define tcg_temp_new_nop(v) (v)', - '#define tcg_temp_free_nop(v)', - '', - ) - - for e in events: - if "tcg-exec" not in e.properties: - continue - - # tracetool.generate always transforms types to host - e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "wrapper") - - # mixed-type to TCG helper bridge - args_tcg_compat = e_args.transform(HOST_2_TCG_COMPAT) - - code_new = [ - "%(tcg_type)s __%(name)s = %(tcg_func)s(%(name)s);" % - {"tcg_type": transform_type(type_, HOST_2_TCG), - "tcg_func": transform_type(type_, HOST_2_TCG_TMP_NEW), - "name": name} - for (type_, name) in args_tcg_compat - ] - - code_free = [ - "%(tcg_func)s(__%(name)s);" % - {"tcg_func": transform_type(type_, HOST_2_TCG_TMP_FREE), - "name": name} - for (type_, name) in args_tcg_compat - ] - - gen_name = "gen_helper_" + e.api() - - out('static inline void %(name)s(%(args)s)', - '{', - ' %(code_new)s', - ' %(proxy_name)s(%(tmp_names)s);', - ' %(code_free)s', - '}', - name=gen_name, - args=e_args, - proxy_name=gen_name + "_proxy", - code_new="\n ".join(code_new), - code_free="\n ".join(code_free), - tmp_names=", ".join(["__%s" % name for _, name in e_args]), - ) diff --git a/scripts/tracetool/format/ust_events_h.py b/scripts/tracetool/format/ust_events_h.py index 6ce559f6cc..b99fe6896b 100644 --- a/scripts/tracetool/format/ust_events_h.py +++ b/scripts/tracetool/format/ust_events_h.py @@ -29,8 +29,8 @@ def generate(events, backend, group): '#undef TRACEPOINT_PROVIDER', '#define TRACEPOINT_PROVIDER qemu', '', - '#undef TRACEPOINT_INCLUDE_FILE', - '#define TRACEPOINT_INCLUDE_FILE ./%s' % include, + '#undef TRACEPOINT_INCLUDE', + '#define TRACEPOINT_INCLUDE "./%s"' % include, '', '#if !defined (TRACE_%s_GENERATED_UST_H) || \\' % group.upper(), ' defined(TRACEPOINT_HEADER_MULTI_READ)', diff --git a/scripts/tracetool/vcpu.py b/scripts/tracetool/vcpu.py index 868b4cb04c..d232cb1d06 100644 --- a/scripts/tracetool/vcpu.py +++ b/scripts/tracetool/vcpu.py @@ -19,19 +19,9 @@ from tracetool import Arguments, try_import def transform_event(event): """Transform event to comply with the 'vcpu' property (if present).""" if "vcpu" in event.properties: - # events with 'tcg-trans' and 'tcg-exec' are auto-generated from - # already-patched events - assert "tcg-trans" not in event.properties - assert "tcg-exec" not in event.properties - event.args = Arguments([("void *", "__cpu"), event.args]) - if "tcg" in event.properties: - fmt = "\"cpu=%p \"" - event.fmt = [fmt + event.fmt[0], - fmt + event.fmt[1]] - else: - fmt = "\"cpu=%p \"" - event.fmt = fmt + event.fmt + fmt = "\"cpu=%p \"" + event.fmt = fmt + event.fmt return event diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index fea4d6eb65..b1ad99cba8 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -9,6 +9,22 @@ # # This work is licensed under the terms of the GNU GPL version 2. # See the COPYING file in the top-level directory. +# +# The script will copy the headers into two target folders: +# +# - linux-headers/ for files that are required for compiling for a +# Linux host. Generally we have these so we can use kernel structs +# and defines that are more recent than the headers that might be +# installed on the host system. Usually this script can do simple +# file copies for these headers. +# +# - include/standard-headers/ for files that are used for guest +# device emulation and are required on all hosts. For instance, we +# get our definitions of the virtio structures from the Linux +# kernel headers, but we need those definitions regardless of which +# host OS we are building for. This script has to be careful to +# sanitize the headers to remove any use of Linux-specifics such as +# types like "__u64". This work is done in the cp_portable function. tmpdir=$(mktemp -d) linux="$1" @@ -145,7 +161,7 @@ done rm -rf "$output/linux-headers/linux" mkdir -p "$output/linux-headers/linux" for header in kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ - psci.h psp-sev.h userfaultfd.h mman.h; do + psci.h psp-sev.h userfaultfd.h mman.h vduse.h; do cp "$tmpdir/include/linux/$header" "$output/linux-headers/linux" done @@ -198,7 +214,8 @@ for i in "$tmpdir"/include/linux/*virtio*.h \ "$tmpdir/include/linux/const.h" \ "$tmpdir/include/linux/kernel.h" \ "$tmpdir/include/linux/vhost_types.h" \ - "$tmpdir/include/linux/sysinfo.h"; do + "$tmpdir/include/linux/sysinfo.h" \ + "$tmpdir/include/misc/pvpanic.h"; do cp_portable "$i" "$output/include/standard-headers/linux" done mkdir -p "$output/include/standard-headers/drm" diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index 539ead62b4..dfeee8231a 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -40,7 +40,7 @@ def check_fields_match(name, s_field, d_field): return True # Some fields changed names between qemu versions. This list - # is used to whitelist such changes in each section / description. + # is used to allow such changes in each section / description. changed_names = { 'apic': ['timer', 'timer_expiry'], 'e1000': ['dev', 'parent_obj'], @@ -367,7 +367,6 @@ def check_machine_type(s, d): if s["Name"] != d["Name"]: print("Warning: checking incompatible machine types:", end=' ') print("\"" + s["Name"] + "\", \"" + d["Name"] + "\"") - return def main(): diff --git a/scripts/xen-detect.c b/scripts/xen-detect.c new file mode 100644 index 0000000000..85e8206490 --- /dev/null +++ b/scripts/xen-detect.c @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Test programs for various Xen versions that QEMU supports. */ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION == 41100 + #undef XC_WANT_COMPAT_DEVICEMODEL_API + #define __XEN_TOOLS__ + #include + #include + int main(void) { + xendevicemodel_handle *xd; + xenforeignmemory_handle *xfmem; + + xd = xendevicemodel_open(0, 0); + xendevicemodel_pin_memory_cacheattr(xd, 0, 0, 0, 0); + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map_resource(xfmem, 0, 0, 0, 0, 0, NULL, 0, 0); + + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 41000 + #undef XC_WANT_COMPAT_MAP_FOREIGN_API + #include + #include + int main(void) { + xenforeignmemory_handle *xfmem; + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map2(xfmem, 0, 0, 0, 0, 0, 0, 0); + xentoolcore_restrict_all(0); + + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40900 + #undef XC_WANT_COMPAT_DEVICEMODEL_API + #define __XEN_TOOLS__ + #include + int main(void) { + xendevicemodel_handle *xd; + + xd = xendevicemodel_open(0, 0); + xendevicemodel_close(xd); + + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40800 + /* + * If we have stable libs the we don't want the libxc compat + * layers, regardless of what CFLAGS we may have been given. + * + * Also, check if xengnttab_grant_copy_segment_t is defined and + * grant copy operation is implemented. + */ + #undef XC_WANT_COMPAT_EVTCHN_API + #undef XC_WANT_COMPAT_GNTTAB_API + #undef XC_WANT_COMPAT_MAP_FOREIGN_API + #include + #include + #include + #include + #include + #include + #include + #if !defined(HVM_MAX_VCPUS) + # error HVM_MAX_VCPUS not defined + #endif + int main(void) { + xc_interface *xc = NULL; + xenforeignmemory_handle *xfmem; + xenevtchn_handle *xe; + xengnttab_handle *xg; + xengnttab_grant_copy_segment_t* seg = NULL; + + xs_daemon_open(); + + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0); + + xe = xenevtchn_open(0, 0); + xenevtchn_fd(xe); + + xg = xengnttab_open(0, 0); + xengnttab_grant_copy(xg, 0, seg); + + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40701 + /* + * If we have stable libs the we don't want the libxc compat + * layers, regardless of what CFLAGS we may have been given. + */ + #undef XC_WANT_COMPAT_EVTCHN_API + #undef XC_WANT_COMPAT_GNTTAB_API + #undef XC_WANT_COMPAT_MAP_FOREIGN_API + #include + #include + #include + #include + #include + #include + #include + #if !defined(HVM_MAX_VCPUS) + # error HVM_MAX_VCPUS not defined + #endif + int main(void) { + xc_interface *xc = NULL; + xenforeignmemory_handle *xfmem; + xenevtchn_handle *xe; + xengnttab_handle *xg; + + xs_daemon_open(); + + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0); + + xe = xenevtchn_open(0, 0); + xenevtchn_fd(xe); + + xg = xengnttab_open(0, 0); + xengnttab_map_grant_ref(xg, 0, 0, 0); + + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40600 + #include + #include + #include + #include + #if !defined(HVM_MAX_VCPUS) + # error HVM_MAX_VCPUS not defined + #endif + int main(void) { + xc_interface *xc; + xs_daemon_open(); + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_gnttab_open(NULL, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); + xc_reserved_device_memory_map(xc, 0, 0, 0, 0, NULL, 0); + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40500 + #include + #include + #include + #include + #if !defined(HVM_MAX_VCPUS) + # error HVM_MAX_VCPUS not defined + #endif + int main(void) { + xc_interface *xc; + xs_daemon_open(); + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_gnttab_open(NULL, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + xc_hvm_create_ioreq_server(xc, 0, 0, NULL); + return 0; + } + +#elif CONFIG_XEN_CTRL_INTERFACE_VERSION == 40200 + #include + #include + #include + #include + #if !defined(HVM_MAX_VCPUS) + # error HVM_MAX_VCPUS not defined + #endif + int main(void) { + xc_interface *xc; + xs_daemon_open(); + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_gnttab_open(NULL, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + return 0; + } + +#else +#error invalid CONFIG_XEN_CTRL_INTERFACE_VERSION +#endif diff --git a/scsi/pr-manager-helper.c b/scsi/pr-manager-helper.c index 451c7631b7..3be52a98d5 100644 --- a/scsi/pr-manager-helper.c +++ b/scsi/pr-manager-helper.c @@ -77,7 +77,7 @@ static int pr_manager_helper_write(PRManagerHelper *pr_mgr, iov.iov_base = (void *)buf; iov.iov_len = sz; n_written = qio_channel_writev_full(QIO_CHANNEL(pr_mgr->ioc), &iov, 1, - nfds ? &fd : NULL, nfds, errp); + nfds ? &fd : NULL, nfds, 0, errp); if (n_written <= 0) { assert(n_written != QIO_CHANNEL_ERR_BLOCK); diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index 7b9389b47b..196b78c00d 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -36,7 +36,7 @@ #include #endif -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" @@ -77,8 +77,10 @@ static int gid = -1; static void compute_default_paths(void) { - socket_path = qemu_get_local_state_pathname("run/qemu-pr-helper.sock"); - pidfile = qemu_get_local_state_pathname("run/qemu-pr-helper.pid"); + g_autofree char *state = qemu_get_local_state_dir(); + + socket_path = g_build_filename(state, "run", "qemu-pr-helper.sock", NULL); + pidfile = g_build_filename(state, "run", "qemu-pr-helper.pid", NULL); } static void usage(const char *name) @@ -1001,7 +1003,7 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } trace_init_file(); - qemu_set_log(LOG_TRACE); + qemu_set_log(LOG_TRACE, &error_fatal); #ifdef CONFIG_MPATH dm_init(); @@ -1044,10 +1046,7 @@ int main(int argc, char **argv) } } - if (qemu_init_main_loop(&local_err)) { - error_report_err(local_err); - exit(EXIT_FAILURE); - } + qemu_init_main_loop(&error_fatal); server_watch = qio_channel_add_watch(QIO_CHANNEL(server_ioc), G_IO_IN, @@ -1061,10 +1060,8 @@ int main(int argc, char **argv) } } - if ((daemonize || pidfile_specified) && - !qemu_write_pidfile(pidfile, &local_err)) { - error_report_err(local_err); - exit(EXIT_FAILURE); + if (daemonize || pidfile_specified) { + qemu_write_pidfile(pidfile, &error_fatal); } #ifdef CONFIG_LIBCAP_NG diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index 1c29146dcf..62d8bae97f 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -24,7 +24,7 @@ * * ARM Semihosting is documented in: * Semihosting for AArch32 and AArch64 Release 2.0 - * https://static.docs.arm.com/100863/0200/semihosting.pdf + * https://github.com/ARM-software/abi-aa/blob/main/semihosting/semihosting.rst * * RISC-V Semihosting is documented in: * RISC-V Semihosting @@ -32,21 +32,21 @@ */ #include "qemu/osdep.h" - +#include "qemu/timer.h" +#include "exec/gdbstub.h" #include "semihosting/semihost.h" #include "semihosting/console.h" #include "semihosting/common-semi.h" -#include "qemu/timer.h" +#include "semihosting/guestfd.h" +#include "semihosting/syscalls.h" + #ifdef CONFIG_USER_ONLY #include "qemu.h" #define COMMON_SEMI_HEAP_SIZE (128 * 1024 * 1024) #else -#include "exec/gdbstub.h" #include "qemu/cutils.h" -#ifdef TARGET_ARM -#include "hw/arm/boot.h" -#endif +#include "hw/loader.h" #include "hw/boards.h" #endif @@ -84,277 +84,118 @@ #define O_BINARY 0 #endif -#define GDB_O_RDONLY 0x000 -#define GDB_O_WRONLY 0x001 -#define GDB_O_RDWR 0x002 -#define GDB_O_APPEND 0x008 -#define GDB_O_CREAT 0x200 -#define GDB_O_TRUNC 0x400 -#define GDB_O_BINARY 0 - static int gdb_open_modeflags[12] = { GDB_O_RDONLY, - GDB_O_RDONLY | GDB_O_BINARY, + GDB_O_RDONLY, + GDB_O_RDWR, GDB_O_RDWR, - GDB_O_RDWR | GDB_O_BINARY, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC, - GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC, GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC, - GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND, - GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND, GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND, - GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY }; -static int open_modeflags[12] = { - O_RDONLY, - O_RDONLY | O_BINARY, - O_RDWR, - O_RDWR | O_BINARY, - O_WRONLY | O_CREAT | O_TRUNC, - O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, - O_RDWR | O_CREAT | O_TRUNC, - O_RDWR | O_CREAT | O_TRUNC | O_BINARY, - O_WRONLY | O_CREAT | O_APPEND, - O_WRONLY | O_CREAT | O_APPEND | O_BINARY, - O_RDWR | O_CREAT | O_APPEND, - O_RDWR | O_CREAT | O_APPEND | O_BINARY -}; - -typedef enum GuestFDType { - GuestFDUnused = 0, - GuestFDHost = 1, - GuestFDGDB = 2, - GuestFDFeatureFile = 3, -} GuestFDType; - -/* - * Guest file descriptors are integer indexes into an array of - * these structures (we will dynamically resize as necessary). - */ -typedef struct GuestFD { - GuestFDType type; - union { - int hostfd; - target_ulong featurefile_offset; - }; -} GuestFD; - -static GArray *guestfd_array; - #ifndef CONFIG_USER_ONLY -#include "exec/address-spaces.h" -/* - * Find the base of a RAM region containing the specified address + +/** + * common_semi_find_bases: find information about ram and heap base + * + * This function attempts to provide meaningful numbers for RAM and + * HEAP base addresses. The rambase is simply the lowest addressable + * RAM position. For the heapbase we ask the loader to scan the + * address space and the largest available gap by querying the "ROM" + * regions. + * + * Returns: a structure with the numbers we need. */ -static inline hwaddr -common_semi_find_region_base(hwaddr addr) + +typedef struct LayoutInfo { + target_ulong rambase; + size_t ramsize; + hwaddr heapbase; + hwaddr heaplimit; +} LayoutInfo; + +static bool find_ram_cb(Int128 start, Int128 len, const MemoryRegion *mr, + hwaddr offset_in_region, void *opaque) { - MemoryRegion *subregion; + LayoutInfo *info = (LayoutInfo *) opaque; + uint64_t size = int128_get64(len); + + if (!mr->ram || mr->readonly) { + return false; + } + + if (size > info->ramsize) { + info->rambase = int128_get64(start); + info->ramsize = size; + } + + /* search exhaustively for largest RAM */ + return false; +} + +static LayoutInfo common_semi_find_bases(CPUState *cs) +{ + FlatView *fv; + LayoutInfo info = { 0, 0, 0, 0 }; + + RCU_READ_LOCK_GUARD(); + + fv = address_space_to_flatview(cs->as); + flatview_for_each_range(fv, find_ram_cb, &info); /* - * Find the chunk of R/W memory containing the address. This is - * used for the SYS_HEAPINFO semihosting call, which should - * probably be using information from the loaded application. + * If we have found the RAM lets iterate through the ROM blobs to + * work out the best place for the remainder of RAM and split it + * equally between stack and heap. */ - QTAILQ_FOREACH(subregion, &get_system_memory()->subregions, - subregions_link) { - if (subregion->ram && !subregion->readonly) { - Int128 top128 = int128_add(int128_make64(subregion->addr), - subregion->size); - Int128 addr128 = int128_make64(addr); - if (subregion->addr <= addr && int128_lt(addr128, top128)) { - return subregion->addr; - } - } - } - return 0; -} -#endif - -#ifdef TARGET_ARM -static inline target_ulong -common_semi_arg(CPUState *cs, int argno) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - if (is_a64(env)) { - return env->xregs[argno]; - } else { - return env->regs[argno]; - } -} - -static inline void -common_semi_set_ret(CPUState *cs, target_ulong ret) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - if (is_a64(env)) { - env->xregs[0] = ret; - } else { - env->regs[0] = ret; - } -} - -static inline bool -common_semi_sys_exit_extended(CPUState *cs, int nr) -{ - return (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cs->env_ptr)); -} - -#ifndef CONFIG_USER_ONLY -#include "hw/arm/boot.h" -static inline target_ulong -common_semi_rambase(CPUState *cs) -{ - CPUArchState *env = cs->env_ptr; - const struct arm_boot_info *info = env->boot_info; - target_ulong sp; - - if (info) { - return info->loader_start; + if (info.rambase || info.ramsize > 0) { + RomGap gap = rom_find_largest_gap_between(info.rambase, info.ramsize); + info.heapbase = gap.base; + info.heaplimit = gap.base + gap.size; } - if (is_a64(env)) { - sp = env->xregs[31]; - } else { - sp = env->regs[13]; - } - return common_semi_find_region_base(sp); + return info; } -#endif - -#endif /* TARGET_ARM */ - -#ifdef TARGET_RISCV -static inline target_ulong -common_semi_arg(CPUState *cs, int argno) -{ - RISCVCPU *cpu = RISCV_CPU(cs); - CPURISCVState *env = &cpu->env; - return env->gpr[xA0 + argno]; -} - -static inline void -common_semi_set_ret(CPUState *cs, target_ulong ret) -{ - RISCVCPU *cpu = RISCV_CPU(cs); - CPURISCVState *env = &cpu->env; - env->gpr[xA0] = ret; -} - -static inline bool -common_semi_sys_exit_extended(CPUState *cs, int nr) -{ - return (nr == TARGET_SYS_EXIT_EXTENDED || sizeof(target_ulong) == 8); -} - -#ifndef CONFIG_USER_ONLY - -static inline target_ulong -common_semi_rambase(CPUState *cs) -{ - RISCVCPU *cpu = RISCV_CPU(cs); - CPURISCVState *env = &cpu->env; - return common_semi_find_region_base(env->gpr[xSP]); -} -#endif #endif -/* - * Allocate a new guest file descriptor and return it; if we - * couldn't allocate a new fd then return -1. - * This is a fairly simplistic implementation because we don't - * expect that most semihosting guest programs will make very - * heavy use of opening and closing fds. - */ -static int alloc_guestfd(void) -{ - guint i; - - if (!guestfd_array) { - /* New entries zero-initialized, i.e. type GuestFDUnused */ - guestfd_array = g_array_new(FALSE, TRUE, sizeof(GuestFD)); - } - - /* SYS_OPEN should return nonzero handle on success. Start guestfd from 1 */ - for (i = 1; i < guestfd_array->len; i++) { - GuestFD *gf = &g_array_index(guestfd_array, GuestFD, i); - - if (gf->type == GuestFDUnused) { - return i; - } - } - - /* All elements already in use: expand the array */ - g_array_set_size(guestfd_array, i + 1); - return i; -} +#include "common-semi-target.h" /* - * Look up the guestfd in the data structure; return NULL - * for out of bounds, but don't check whether the slot is unused. - * This is used internally by the other guestfd functions. + * Read the input value from the argument block; fail the semihosting + * call if the memory read fails. Eventually we could use a generic + * CPUState helper function here. + * Note that GET_ARG() handles memory access errors by jumping to + * do_fault, so must be used as the first thing done in handling a + * semihosting call, to avoid accidentally leaking allocated resources. + * SET_ARG(), since it unavoidably happens late, instead returns an + * error indication (0 on success, non-0 for error) which the caller + * should check. */ -static GuestFD *do_get_guestfd(int guestfd) -{ - if (!guestfd_array) { - return NULL; - } - if (guestfd <= 0 || guestfd >= guestfd_array->len) { - return NULL; - } +#define GET_ARG(n) do { \ + if (is_64bit_semihosting(env)) { \ + if (get_user_u64(arg ## n, args + (n) * 8)) { \ + goto do_fault; \ + } \ + } else { \ + if (get_user_u32(arg ## n, args + (n) * 4)) { \ + goto do_fault; \ + } \ + } \ +} while (0) - return &g_array_index(guestfd_array, GuestFD, guestfd); -} +#define SET_ARG(n, val) \ + (is_64bit_semihosting(env) ? \ + put_user_u64(val, args + (n) * 8) : \ + put_user_u32(val, args + (n) * 4)) -/* - * Associate the specified guest fd (which must have been - * allocated via alloc_fd() and not previously used) with - * the specified host/gdb fd. - */ -static void associate_guestfd(int guestfd, int hostfd) -{ - GuestFD *gf = do_get_guestfd(guestfd); - - assert(gf); - gf->type = use_gdb_syscalls() ? GuestFDGDB : GuestFDHost; - gf->hostfd = hostfd; -} - -/* - * Deallocate the specified guest file descriptor. This doesn't - * close the host fd, it merely undoes the work of alloc_fd(). - */ -static void dealloc_guestfd(int guestfd) -{ - GuestFD *gf = do_get_guestfd(guestfd); - - assert(gf); - gf->type = GuestFDUnused; -} - -/* - * Given a guest file descriptor, get the associated struct. - * If the fd is not valid, return NULL. This is the function - * used by the various semihosting calls to validate a handle - * from the guest. - * Note: calling alloc_guestfd() or dealloc_guestfd() will - * invalidate any GuestFD* obtained by calling this function. - */ -static GuestFD *get_guestfd(int guestfd) -{ - GuestFD *gf = do_get_guestfd(guestfd); - - if (!gf || gf->type == GuestFDUnused) { - return NULL; - } - return gf; -} /* * The semihosting API has no concept of its errno being thread-safe, @@ -366,23 +207,9 @@ static GuestFD *get_guestfd(int guestfd) #ifndef CONFIG_USER_ONLY static target_ulong syscall_err; -#include "exec/softmmu-semi.h" +#include "semihosting/softmmu-uaccess.h" #endif -static inline uint32_t set_swi_errno(CPUState *cs, uint32_t code) -{ - if (code == (uint32_t)-1) { -#ifdef CONFIG_USER_ONLY - TaskState *ts = cs->opaque; - - ts->swi_errno = errno; -#else - syscall_err = errno; -#endif - } - return code; -} - static inline uint32_t get_swi_errno(CPUState *cs) { #ifdef CONFIG_USER_ONLY @@ -394,254 +221,116 @@ static inline uint32_t get_swi_errno(CPUState *cs) #endif } -static target_ulong common_semi_syscall_len; - -static void common_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) +static void common_semi_cb(CPUState *cs, uint64_t ret, int err) { - target_ulong reg0 = common_semi_arg(cs, 0); - - if (ret == (target_ulong)-1) { - errno = err; - set_swi_errno(cs, -1); - reg0 = ret; - } else { - /* Fixup syscalls that use nonstardard return conventions. */ - switch (reg0) { - case TARGET_SYS_WRITE: - case TARGET_SYS_READ: - reg0 = common_semi_syscall_len - ret; - break; - case TARGET_SYS_SEEK: - reg0 = 0; - break; - default: - reg0 = ret; - break; - } - } - common_semi_set_ret(cs, reg0); -} - -static target_ulong common_semi_flen_buf(CPUState *cs) -{ - target_ulong sp; -#ifdef TARGET_ARM - /* Return an address in target memory of 64 bytes where the remote - * gdb should write its stat struct. (The format of this structure - * is defined by GDB's remote protocol and is not target-specific.) - * We put this on the guest's stack just below SP. - */ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - if (is_a64(env)) { - sp = env->xregs[31]; - } else { - sp = env->regs[13]; - } + if (err) { +#ifdef CONFIG_USER_ONLY + TaskState *ts = cs->opaque; + ts->swi_errno = err; +#else + syscall_err = err; #endif -#ifdef TARGET_RISCV - RISCVCPU *cpu = RISCV_CPU(cs); - CPURISCVState *env = &cpu->env; - - sp = env->gpr[xSP]; -#endif - - return sp - 64; -} - -static void -common_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err) -{ - /* The size is always stored in big-endian order, extract - the value. We assume the size always fit in 32 bits. */ - uint32_t size; - cpu_memory_rw_debug(cs, common_semi_flen_buf(cs) + 32, - (uint8_t *)&size, 4, 0); - size = be32_to_cpu(size); - common_semi_set_ret(cs, size); - errno = err; - set_swi_errno(cs, -1); -} - -static int common_semi_open_guestfd; - -static void -common_semi_open_cb(CPUState *cs, target_ulong ret, target_ulong err) -{ - if (ret == (target_ulong)-1) { - errno = err; - set_swi_errno(cs, -1); - dealloc_guestfd(common_semi_open_guestfd); - } else { - associate_guestfd(common_semi_open_guestfd, ret); - ret = common_semi_open_guestfd; } common_semi_set_ret(cs, ret); } -static target_ulong -common_semi_gdb_syscall(CPUState *cs, gdb_syscall_complete_cb cb, - const char *fmt, ...) +/* + * Use 0xdeadbeef as the return value when there isn't a defined + * return value for the call. + */ +static void common_semi_dead_cb(CPUState *cs, uint64_t ret, int err) { - va_list va; - - va_start(va, fmt); - gdb_do_syscallv(cb, fmt, va); - va_end(va); - - /* - * FIXME: in softmmu mode, the gdbstub will schedule our callback - * to occur, but will not actually call it to complete the syscall - * until after this function has returned and we are back in the - * CPU main loop. Therefore callers to this function must not - * do anything with its return value, because it is not necessarily - * the result of the syscall, but could just be the old value of X0. - * The only thing safe to do with this is that the callers of - * do_common_semihosting() will write it straight back into X0. - * (In linux-user mode, the callback will have happened before - * gdb_do_syscallv() returns.) - * - * We should tidy this up so neither this function nor - * do_common_semihosting() return a value, so the mistake of - * doing something with the return value is not possible to make. - */ - - return common_semi_arg(cs, 0); + common_semi_set_ret(cs, 0xdeadbeef); } /* - * Types for functions implementing various semihosting calls - * for specific types of guest file descriptor. These must all - * do the work and return the required return value for the guest, - * setting the guest errno if appropriate. + * SYS_READ and SYS_WRITE always return the number of bytes not read/written. + * There is no error condition, other than returning the original length. */ -typedef uint32_t sys_closefn(CPUState *cs, GuestFD *gf); -typedef uint32_t sys_writefn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len); -typedef uint32_t sys_readfn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len); -typedef uint32_t sys_isattyfn(CPUState *cs, GuestFD *gf); -typedef uint32_t sys_seekfn(CPUState *cs, GuestFD *gf, - target_ulong offset); -typedef uint32_t sys_flenfn(CPUState *cs, GuestFD *gf); - -static uint32_t host_closefn(CPUState *cs, GuestFD *gf) +static void common_semi_rw_cb(CPUState *cs, uint64_t ret, int err) { - /* - * Only close the underlying host fd if it's one we opened on behalf - * of the guest in SYS_OPEN. - */ - if (gf->hostfd == STDIN_FILENO || - gf->hostfd == STDOUT_FILENO || - gf->hostfd == STDERR_FILENO) { - return 0; + /* Recover the original length from the third argument. */ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + target_ulong args = common_semi_arg(cs, 1); + target_ulong arg2; + GET_ARG(2); + + if (err) { + do_fault: + ret = 0; /* error: no bytes transmitted */ } - return set_swi_errno(cs, close(gf->hostfd)); + common_semi_set_ret(cs, arg2 - ret); } -static uint32_t host_writefn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) +/* + * Convert from Posix ret+errno to Arm SYS_ISTTY return values. + * With gdbstub, err is only ever set for protocol errors to EIO. + */ +static void common_semi_istty_cb(CPUState *cs, uint64_t ret, int err) { - CPUArchState *env = cs->env_ptr; - uint32_t ret; - char *s = lock_user(VERIFY_READ, buf, len, 1); - (void) env; /* Used in arm softmmu lock_user implicitly */ - if (!s) { - /* Return bytes not written on error */ - return len; + if (err) { + ret = (err == ENOTTY ? 0 : -1); } - ret = set_swi_errno(cs, write(gf->hostfd, s, len)); - unlock_user(s, buf, 0); - if (ret == (uint32_t)-1) { + common_semi_cb(cs, ret, err); +} + +/* + * SYS_SEEK returns 0 on success, not the resulting offset. + */ +static void common_semi_seek_cb(CPUState *cs, uint64_t ret, int err) +{ + if (!err) { ret = 0; } - /* Return bytes not written */ - return len - ret; + common_semi_cb(cs, ret, err); } -static uint32_t host_readfn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) +/* + * Return an address in target memory of 64 bytes where the remote + * gdb should write its stat struct. (The format of this structure + * is defined by GDB's remote protocol and is not target-specific.) + * We put this on the guest's stack just below SP. + */ +static target_ulong common_semi_flen_buf(CPUState *cs) { - CPUArchState *env = cs->env_ptr; - uint32_t ret; - char *s = lock_user(VERIFY_WRITE, buf, len, 0); - (void) env; /* Used in arm softmmu lock_user implicitly */ - if (!s) { - /* return bytes not read */ - return len; + target_ulong sp = common_semi_stack_bottom(cs); + return sp - 64; +} + +static void +common_semi_flen_fstat_cb(CPUState *cs, uint64_t ret, int err) +{ + if (!err) { + /* The size is always stored in big-endian order, extract the value. */ + uint64_t size; + if (cpu_memory_rw_debug(cs, common_semi_flen_buf(cs) + + offsetof(struct gdb_stat, gdb_st_size), + &size, 8, 0)) { + ret = -1, err = EFAULT; + } else { + size = be64_to_cpu(size); + if (ret != size) { + ret = -1, err = EOVERFLOW; + } + } } - do { - ret = set_swi_errno(cs, read(gf->hostfd, s, len)); - } while (ret == -1 && errno == EINTR); - unlock_user(s, buf, len); - if (ret == (uint32_t)-1) { - ret = 0; + common_semi_cb(cs, ret, err); +} + +static void +common_semi_readc_cb(CPUState *cs, uint64_t ret, int err) +{ + if (!err) { + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + uint8_t ch; + + if (get_user_u8(ch, common_semi_stack_bottom(cs) - 1)) { + ret = -1, err = EFAULT; + } else { + ret = ch; + } } - /* Return bytes not read */ - return len - ret; -} - -static uint32_t host_isattyfn(CPUState *cs, GuestFD *gf) -{ - return isatty(gf->hostfd); -} - -static uint32_t host_seekfn(CPUState *cs, GuestFD *gf, target_ulong offset) -{ - uint32_t ret = set_swi_errno(cs, lseek(gf->hostfd, offset, SEEK_SET)); - if (ret == (uint32_t)-1) { - return -1; - } - return 0; -} - -static uint32_t host_flenfn(CPUState *cs, GuestFD *gf) -{ - struct stat buf; - uint32_t ret = set_swi_errno(cs, fstat(gf->hostfd, &buf)); - if (ret == (uint32_t)-1) { - return -1; - } - return buf.st_size; -} - -static uint32_t gdb_closefn(CPUState *cs, GuestFD *gf) -{ - return common_semi_gdb_syscall(cs, common_semi_cb, "close,%x", gf->hostfd); -} - -static uint32_t gdb_writefn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) -{ - common_semi_syscall_len = len; - return common_semi_gdb_syscall(cs, common_semi_cb, "write,%x,%x,%x", - gf->hostfd, buf, len); -} - -static uint32_t gdb_readfn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) -{ - common_semi_syscall_len = len; - return common_semi_gdb_syscall(cs, common_semi_cb, "read,%x,%x,%x", - gf->hostfd, buf, len); -} - -static uint32_t gdb_isattyfn(CPUState *cs, GuestFD *gf) -{ - return common_semi_gdb_syscall(cs, common_semi_cb, "isatty,%x", gf->hostfd); -} - -static uint32_t gdb_seekfn(CPUState *cs, GuestFD *gf, target_ulong offset) -{ - return common_semi_gdb_syscall(cs, common_semi_cb, "lseek,%x,%x,0", - gf->hostfd, offset); -} - -static uint32_t gdb_flenfn(CPUState *cs, GuestFD *gf) -{ - return common_semi_gdb_syscall(cs, common_semi_flen_cb, "fstat,%x,%x", - gf->hostfd, common_semi_flen_buf(cs)); + common_semi_cb(cs, ret, err); } #define SHFB_MAGIC_0 0x53 @@ -661,157 +350,15 @@ static const uint8_t featurefile_data[] = { SH_EXT_EXIT_EXTENDED | SH_EXT_STDOUT_STDERR, /* Feature byte 0 */ }; -static void init_featurefile_guestfd(int guestfd) -{ - GuestFD *gf = do_get_guestfd(guestfd); - - assert(gf); - gf->type = GuestFDFeatureFile; - gf->featurefile_offset = 0; -} - -static uint32_t featurefile_closefn(CPUState *cs, GuestFD *gf) -{ - /* Nothing to do */ - return 0; -} - -static uint32_t featurefile_writefn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) -{ - /* This fd can never be open for writing */ - - errno = EBADF; - return set_swi_errno(cs, -1); -} - -static uint32_t featurefile_readfn(CPUState *cs, GuestFD *gf, - target_ulong buf, uint32_t len) -{ - CPUArchState *env = cs->env_ptr; - uint32_t i; - char *s; - - (void) env; /* Used in arm softmmu lock_user implicitly */ - s = lock_user(VERIFY_WRITE, buf, len, 0); - if (!s) { - return len; - } - - for (i = 0; i < len; i++) { - if (gf->featurefile_offset >= sizeof(featurefile_data)) { - break; - } - s[i] = featurefile_data[gf->featurefile_offset]; - gf->featurefile_offset++; - } - - unlock_user(s, buf, len); - - /* Return number of bytes not read */ - return len - i; -} - -static uint32_t featurefile_isattyfn(CPUState *cs, GuestFD *gf) -{ - return 0; -} - -static uint32_t featurefile_seekfn(CPUState *cs, GuestFD *gf, - target_ulong offset) -{ - gf->featurefile_offset = offset; - return 0; -} - -static uint32_t featurefile_flenfn(CPUState *cs, GuestFD *gf) -{ - return sizeof(featurefile_data); -} - -typedef struct GuestFDFunctions { - sys_closefn *closefn; - sys_writefn *writefn; - sys_readfn *readfn; - sys_isattyfn *isattyfn; - sys_seekfn *seekfn; - sys_flenfn *flenfn; -} GuestFDFunctions; - -static const GuestFDFunctions guestfd_fns[] = { - [GuestFDHost] = { - .closefn = host_closefn, - .writefn = host_writefn, - .readfn = host_readfn, - .isattyfn = host_isattyfn, - .seekfn = host_seekfn, - .flenfn = host_flenfn, - }, - [GuestFDGDB] = { - .closefn = gdb_closefn, - .writefn = gdb_writefn, - .readfn = gdb_readfn, - .isattyfn = gdb_isattyfn, - .seekfn = gdb_seekfn, - .flenfn = gdb_flenfn, - }, - [GuestFDFeatureFile] = { - .closefn = featurefile_closefn, - .writefn = featurefile_writefn, - .readfn = featurefile_readfn, - .isattyfn = featurefile_isattyfn, - .seekfn = featurefile_seekfn, - .flenfn = featurefile_flenfn, - }, -}; - -/* - * Read the input value from the argument block; fail the semihosting - * call if the memory read fails. Eventually we could use a generic - * CPUState helper function here. - */ -static inline bool is_64bit_semihosting(CPUArchState *env) -{ -#if defined(TARGET_ARM) - return is_a64(env); -#elif defined(TARGET_RISCV) - return !riscv_cpu_is_32bit(env); -#else -#error un-handled architecture -#endif -} - - -#define GET_ARG(n) do { \ - if (is_64bit_semihosting(env)) { \ - if (get_user_u64(arg ## n, args + (n) * 8)) { \ - errno = EFAULT; \ - return set_swi_errno(cs, -1); \ - } \ - } else { \ - if (get_user_u32(arg ## n, args + (n) * 4)) { \ - errno = EFAULT; \ - return set_swi_errno(cs, -1); \ - } \ - } \ -} while (0) - -#define SET_ARG(n, val) \ - (is_64bit_semihosting(env) ? \ - put_user_u64(val, args + (n) * 8) : \ - put_user_u32(val, args + (n) * 4)) - - /* * Do a semihosting call. * * The specification always says that the "return register" either * returns a specific value or is corrupted, so we don't need to * report to our caller whether we are returning a value or trying to - * leave the register unchanged. We use 0xdeadbeef as the return value - * when there isn't a defined return value for the call. + * leave the register unchanged. */ -target_ulong do_common_semihosting(CPUState *cs) +void do_common_semihosting(CPUState *cs) { CPUArchState *env = cs->env_ptr; target_ulong args; @@ -820,43 +367,31 @@ target_ulong do_common_semihosting(CPUState *cs) char * s; int nr; uint32_t ret; - uint32_t len; - GuestFD *gf; int64_t elapsed; - (void) env; /* Used implicitly by arm lock_user macro */ nr = common_semi_arg(cs, 0) & 0xffffffffU; args = common_semi_arg(cs, 1); switch (nr) { case TARGET_SYS_OPEN: { - int guestfd; + int ret, err = 0; + int hostfd; GET_ARG(0); GET_ARG(1); GET_ARG(2); s = lock_user_string(arg0); if (!s) { - errno = EFAULT; - return set_swi_errno(cs, -1); + goto do_fault; } if (arg1 >= 12) { unlock_user(s, arg0, 0); - errno = EINVAL; - return set_swi_errno(cs, -1); - } - - guestfd = alloc_guestfd(); - if (guestfd < 0) { - unlock_user(s, arg0, 0); - errno = EMFILE; - return set_swi_errno(cs, -1); + common_semi_cb(cs, -1, EINVAL); + break; } if (strcmp(s, ":tt") == 0) { - int result_fileno; - /* * We implement SH_EXT_STDOUT_STDERR, so: * open for read == stdin @@ -864,206 +399,170 @@ target_ulong do_common_semihosting(CPUState *cs) * open for append == stderr */ if (arg1 < 4) { - result_fileno = STDIN_FILENO; + hostfd = STDIN_FILENO; } else if (arg1 < 8) { - result_fileno = STDOUT_FILENO; + hostfd = STDOUT_FILENO; } else { - result_fileno = STDERR_FILENO; + hostfd = STDERR_FILENO; } - associate_guestfd(guestfd, result_fileno); - unlock_user(s, arg0, 0); - return guestfd; - } - if (strcmp(s, ":semihosting-features") == 0) { - unlock_user(s, arg0, 0); + ret = alloc_guestfd(); + associate_guestfd(ret, hostfd); + } else if (strcmp(s, ":semihosting-features") == 0) { /* We must fail opens for modes other than 0 ('r') or 1 ('rb') */ if (arg1 != 0 && arg1 != 1) { - dealloc_guestfd(guestfd); - errno = EACCES; - return set_swi_errno(cs, -1); - } - init_featurefile_guestfd(guestfd); - return guestfd; - } - - if (use_gdb_syscalls()) { - common_semi_open_guestfd = guestfd; - ret = common_semi_gdb_syscall(cs, common_semi_open_cb, - "open,%s,%x,1a4", arg0, (int)arg2 + 1, - gdb_open_modeflags[arg1]); - } else { - ret = set_swi_errno(cs, open(s, open_modeflags[arg1], 0644)); - if (ret == (uint32_t)-1) { - dealloc_guestfd(guestfd); + ret = -1; + err = EACCES; } else { - associate_guestfd(guestfd, ret); - ret = guestfd; + ret = alloc_guestfd(); + staticfile_guestfd(ret, featurefile_data, + sizeof(featurefile_data)); } + } else { + unlock_user(s, arg0, 0); + semihost_sys_open(cs, common_semi_cb, arg0, arg2 + 1, + gdb_open_modeflags[arg1], 0644); + break; } unlock_user(s, arg0, 0); - return ret; + common_semi_cb(cs, ret, err); + break; } + case TARGET_SYS_CLOSE: GET_ARG(0); + semihost_sys_close(cs, common_semi_cb, arg0); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - ret = guestfd_fns[gf->type].closefn(cs, gf); - dealloc_guestfd(arg0); - return ret; case TARGET_SYS_WRITEC: - qemu_semihosting_console_outc(cs->env_ptr, args); - return 0xdeadbeef; + /* + * FIXME: the byte to be written is in a target_ulong slot, + * which means this is wrong for a big-endian guest. + */ + semihost_sys_write_gf(cs, common_semi_dead_cb, + &console_out_gf, args, 1); + break; + case TARGET_SYS_WRITE0: - return qemu_semihosting_console_outs(cs->env_ptr, args); + { + ssize_t len = target_strlen(args); + if (len < 0) { + common_semi_dead_cb(cs, -1, EFAULT); + } else { + semihost_sys_write_gf(cs, common_semi_dead_cb, + &console_out_gf, args, len); + } + } + break; + case TARGET_SYS_WRITE: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; + semihost_sys_write(cs, common_semi_rw_cb, arg0, arg1, arg2); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - return guestfd_fns[gf->type].writefn(cs, gf, arg1, len); case TARGET_SYS_READ: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; + semihost_sys_read(cs, common_semi_rw_cb, arg0, arg1, arg2); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - return guestfd_fns[gf->type].readfn(cs, gf, arg1, len); case TARGET_SYS_READC: - return qemu_semihosting_console_inc(cs->env_ptr); + semihost_sys_read_gf(cs, common_semi_readc_cb, &console_in_gf, + common_semi_stack_bottom(cs) - 1, 1); + break; + case TARGET_SYS_ISERROR: GET_ARG(0); - return (target_long) arg0 < 0 ? 1 : 0; + common_semi_set_ret(cs, (target_long)arg0 < 0); + break; + case TARGET_SYS_ISTTY: GET_ARG(0); + semihost_sys_isatty(cs, common_semi_istty_cb, arg0); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - return guestfd_fns[gf->type].isattyfn(cs, gf); case TARGET_SYS_SEEK: GET_ARG(0); GET_ARG(1); + semihost_sys_lseek(cs, common_semi_seek_cb, arg0, arg1, GDB_SEEK_SET); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - return guestfd_fns[gf->type].seekfn(cs, gf, arg1); case TARGET_SYS_FLEN: GET_ARG(0); + semihost_sys_flen(cs, common_semi_flen_fstat_cb, common_semi_cb, + arg0, common_semi_flen_buf(cs)); + break; - gf = get_guestfd(arg0); - if (!gf) { - errno = EBADF; - return set_swi_errno(cs, -1); - } - - return guestfd_fns[gf->type].flenfn(cs, gf); case TARGET_SYS_TMPNAM: + { + int len; + char *p; + GET_ARG(0); GET_ARG(1); GET_ARG(2); - if (asprintf(&s, "/tmp/qemu-%x%02x", getpid(), - (int) (arg1 & 0xff)) < 0) { - return -1; + len = asprintf(&s, "%s/qemu-%x%02x", g_get_tmp_dir(), + getpid(), (int)arg1 & 0xff); + if (len < 0) { + common_semi_set_ret(cs, -1); + break; } - ul_ret = (target_ulong) -1; + /* Allow for trailing NUL */ + len++; /* Make sure there's enough space in the buffer */ - if (strlen(s) < arg2) { - char *output = lock_user(VERIFY_WRITE, arg0, arg2, 0); - strcpy(output, s); - unlock_user(output, arg0, arg2); - ul_ret = 0; + if (len > arg2) { + free(s); + common_semi_set_ret(cs, -1); + break; } + p = lock_user(VERIFY_WRITE, arg0, len, 0); + if (!p) { + free(s); + goto do_fault; + } + memcpy(p, s, len); + unlock_user(p, arg0, len); free(s); - return ul_ret; + common_semi_set_ret(cs, 0); + break; + } + case TARGET_SYS_REMOVE: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - ret = common_semi_gdb_syscall(cs, common_semi_cb, "unlink,%s", - arg0, (int)arg1 + 1); - } else { - s = lock_user_string(arg0); - if (!s) { - errno = EFAULT; - return set_swi_errno(cs, -1); - } - ret = set_swi_errno(cs, remove(s)); - unlock_user(s, arg0, 0); - } - return ret; + semihost_sys_remove(cs, common_semi_cb, arg0, arg1 + 1); + break; + case TARGET_SYS_RENAME: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); - if (use_gdb_syscalls()) { - return common_semi_gdb_syscall(cs, common_semi_cb, "rename,%s,%s", - arg0, (int)arg1 + 1, arg2, - (int)arg3 + 1); - } else { - char *s2; - s = lock_user_string(arg0); - s2 = lock_user_string(arg2); - if (!s || !s2) { - errno = EFAULT; - ret = set_swi_errno(cs, -1); - } else { - ret = set_swi_errno(cs, rename(s, s2)); - } - if (s2) - unlock_user(s2, arg2, 0); - if (s) - unlock_user(s, arg0, 0); - return ret; - } + semihost_sys_rename(cs, common_semi_cb, arg0, arg1 + 1, arg2, arg3 + 1); + break; + case TARGET_SYS_CLOCK: - return clock() / (CLOCKS_PER_SEC / 100); + common_semi_set_ret(cs, clock() / (CLOCKS_PER_SEC / 100)); + break; + case TARGET_SYS_TIME: - return set_swi_errno(cs, time(NULL)); + ul_ret = time(NULL); + common_semi_cb(cs, ul_ret, ul_ret == -1 ? errno : 0); + break; + case TARGET_SYS_SYSTEM: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - return common_semi_gdb_syscall(cs, common_semi_cb, "system,%s", - arg0, (int)arg1 + 1); - } else { - s = lock_user_string(arg0); - if (!s) { - errno = EFAULT; - return set_swi_errno(cs, -1); - } - ret = set_swi_errno(cs, system(s)); - unlock_user(s, arg0, 0); - return ret; - } + semihost_sys_system(cs, common_semi_cb, arg0, arg1 + 1); + break; + case TARGET_SYS_ERRNO: - return get_swi_errno(cs); + common_semi_set_ret(cs, get_swi_errno(cs)); + break; + case TARGET_SYS_GET_CMDLINE: { /* Build a command-line from the original argv. @@ -1102,7 +601,7 @@ target_ulong do_common_semihosting(CPUState *cs) #else unsigned int i; - output_size = ts->info->arg_end - ts->info->arg_start; + output_size = ts->info->env_strings - ts->info->arg_strings; if (!output_size) { /* * We special-case the "empty command line" case (argc==0). @@ -1114,22 +613,20 @@ target_ulong do_common_semihosting(CPUState *cs) if (output_size > input_size) { /* Not enough space to store command-line arguments. */ - errno = E2BIG; - return set_swi_errno(cs, -1); + common_semi_cb(cs, -1, E2BIG); + break; } /* Adjust the command-line length. */ if (SET_ARG(1, output_size - 1)) { /* Couldn't write back to argument block */ - errno = EFAULT; - return set_swi_errno(cs, -1); + goto do_fault; } /* Lock the buffer on the ARM side. */ output_buffer = lock_user(VERIFY_WRITE, arg0, output_size, 0); if (!output_buffer) { - errno = EFAULT; - return set_swi_errno(cs, -1); + goto do_fault; } /* Copy the command-line arguments. */ @@ -1142,11 +639,10 @@ target_ulong do_common_semihosting(CPUState *cs) goto out; } - if (copy_from_user(output_buffer, ts->info->arg_start, + if (copy_from_user(output_buffer, ts->info->arg_strings, output_size)) { - errno = EFAULT; - status = set_swi_errno(cs, -1); - goto out; + unlock_user(output_buffer, arg0, 0); + goto do_fault; } /* Separate arguments by white spaces. */ @@ -1159,18 +655,19 @@ target_ulong do_common_semihosting(CPUState *cs) #endif /* Unlock the buffer on the ARM side. */ unlock_user(output_buffer, arg0, output_size); - - return status; + common_semi_cb(cs, status, 0); } + break; + case TARGET_SYS_HEAPINFO: { target_ulong retvals[4]; - target_ulong limit; int i; #ifdef CONFIG_USER_ONLY TaskState *ts = cs->opaque; + target_ulong limit; #else - target_ulong rambase = common_semi_rambase(cs); + LayoutInfo info = common_semi_find_bases(cs); #endif GET_ARG(0); @@ -1201,12 +698,10 @@ target_ulong do_common_semihosting(CPUState *cs) retvals[2] = ts->stack_base; retvals[3] = 0; /* Stack limit. */ #else - limit = current_machine->ram_size; - /* TODO: Make this use the limit of the loaded application. */ - retvals[0] = rambase + limit / 2; - retvals[1] = rambase + limit; - retvals[2] = rambase + limit; /* Stack base */ - retvals[3] = rambase; /* Stack limit. */ + retvals[0] = info.heapbase; /* Heap Base */ + retvals[1] = info.heaplimit; /* Heap Limit */ + retvals[2] = info.heaplimit; /* Stack base */ + retvals[3] = info.heapbase; /* Stack limit. */ #endif for (i = 0; i < ARRAY_SIZE(retvals); i++) { @@ -1220,12 +715,13 @@ target_ulong do_common_semihosting(CPUState *cs) if (fail) { /* Couldn't write back to argument block */ - errno = EFAULT; - return set_swi_errno(cs, -1); + goto do_fault; } } - return 0; + common_semi_set_ret(cs, 0); } + break; + case TARGET_SYS_EXIT: case TARGET_SYS_EXIT_EXTENDED: if (common_semi_sys_exit_extended(cs, nr)) { @@ -1255,36 +751,45 @@ target_ulong do_common_semihosting(CPUState *cs) } gdb_exit(ret); exit(ret); + case TARGET_SYS_ELAPSED: elapsed = get_clock() - clock_start; if (sizeof(target_ulong) == 8) { - SET_ARG(0, elapsed); + if (SET_ARG(0, elapsed)) { + goto do_fault; + } } else { - SET_ARG(0, (uint32_t) elapsed); - SET_ARG(1, (uint32_t) (elapsed >> 32)); + if (SET_ARG(0, (uint32_t) elapsed) || + SET_ARG(1, (uint32_t) (elapsed >> 32))) { + goto do_fault; + } } - return 0; + common_semi_set_ret(cs, 0); + break; + case TARGET_SYS_TICKFREQ: /* qemu always uses nsec */ - return 1000000000; + common_semi_set_ret(cs, 1000000000); + break; + case TARGET_SYS_SYNCCACHE: /* * Clean the D-cache and invalidate the I-cache for the specified * virtual address range. This is a nop for us since we don't * implement caches. This is only present on A64. */ -#ifdef TARGET_ARM - if (is_a64(cs->env_ptr)) { - return 0; + if (common_semi_has_synccache(env)) { + common_semi_set_ret(cs, 0); + break; } -#endif -#ifdef TARGET_RISCV - return 0; -#endif - /* fall through -- invalid for A32/T32 */ + /* fall through */ default: fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr); cpu_dump_state(cs, stderr, 0); abort(); + + do_fault: + common_semi_cb(cs, -1, EFAULT); + break; } } diff --git a/semihosting/config.c b/semihosting/config.c index 137171b717..89a1759687 100644 --- a/semihosting/config.c +++ b/semihosting/config.c @@ -27,12 +27,16 @@ QemuOptsList qemu_semihosting_config_opts = { .name = "semihosting-config", + .merge_lists = true, .implied_opt_name = "enable", .head = QTAILQ_HEAD_INITIALIZER(qemu_semihosting_config_opts.head), .desc = { { .name = "enable", .type = QEMU_OPT_BOOL, + }, { + .name = "userspace", + .type = QEMU_OPT_BOOL, }, { .name = "target", .type = QEMU_OPT_STRING, @@ -49,9 +53,9 @@ QemuOptsList qemu_semihosting_config_opts = { typedef struct SemihostingConfig { bool enabled; + bool userspace_enabled; SemihostingTarget target; - Chardev *chardev; - const char **argv; + char **argv; int argc; const char *cmdline; /* concatenated argv */ } SemihostingConfig; @@ -59,9 +63,9 @@ typedef struct SemihostingConfig { static SemihostingConfig semihosting; static const char *semihost_chardev; -bool semihosting_enabled(void) +bool semihosting_enabled(bool is_user) { - return semihosting.enabled; + return semihosting.enabled && (!is_user || semihosting.userspace_enabled); } SemihostingTarget semihosting_get_target(void) @@ -98,8 +102,8 @@ static int add_semihosting_arg(void *opaque, if (strcmp(name, "arg") == 0) { s->argc++; /* one extra element as g_strjoinv() expects NULL-terminated array */ - s->argv = g_realloc(s->argv, (s->argc + 1) * sizeof(void *)); - s->argv[s->argc - 1] = val; + s->argv = g_renew(char *, s->argv, s->argc + 1); + s->argv[s->argc - 1] = g_strdup(val); s->argv[s->argc] = NULL; } return 0; @@ -121,11 +125,6 @@ void semihosting_arg_fallback(const char *file, const char *cmd) } } -Chardev *semihosting_get_chardev(void) -{ - return semihosting.chardev; -} - void qemu_semihosting_enable(void) { semihosting.enabled = true; @@ -142,6 +141,8 @@ int qemu_semihosting_config_options(const char *optarg) if (opts != NULL) { semihosting.enabled = qemu_opt_get_bool(opts, "enable", true); + semihosting.userspace_enabled = qemu_opt_get_bool(opts, "userspace", + false); const char *target = qemu_opt_get(opts, "target"); /* setup of chardev is deferred until they are initialised */ semihost_chardev = qemu_opt_get(opts, "chardev"); @@ -171,16 +172,19 @@ int qemu_semihosting_config_options(const char *optarg) return 0; } -void qemu_semihosting_connect_chardevs(void) +/* We had to defer this until chardevs were created */ +void qemu_semihosting_chardev_init(void) { - /* We had to defer this until chardevs were created */ + Chardev *chr = NULL; + if (semihost_chardev) { - Chardev *chr = qemu_chr_find(semihost_chardev); + chr = qemu_chr_find(semihost_chardev); if (chr == NULL) { error_report("semihosting chardev '%s' not found", semihost_chardev); exit(1); } - semihosting.chardev = chr; } + + qemu_semihosting_console_init(chr); } diff --git a/semihosting/console.c b/semihosting/console.c index ef6958d844..0f976fe8cb 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -27,89 +27,10 @@ #include "qapi/error.h" #include "qemu/fifo8.h" -int qemu_semihosting_log_out(const char *s, int len) -{ - Chardev *chardev = semihosting_get_chardev(); - if (chardev) { - return qemu_chr_write_all(chardev, (uint8_t *) s, len); - } else { - return write(STDERR_FILENO, s, len); - } -} - -/* - * A re-implementation of lock_user_string that we can use locally - * instead of relying on softmmu-semi. Hopefully we can deprecate that - * in time. Copy string until we find a 0 or address error. - */ -static GString *copy_user_string(CPUArchState *env, target_ulong addr) -{ - CPUState *cpu = env_cpu(env); - GString *s = g_string_sized_new(128); - uint8_t c; - - do { - if (cpu_memory_rw_debug(cpu, addr++, &c, 1, 0) == 0) { - if (c) { - s = g_string_append_c(s, c); - } - } else { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: passed inaccessible address " TARGET_FMT_lx, - __func__, addr); - break; - } - } while (c!=0); - - return s; -} - -static void semihosting_cb(CPUState *cs, target_ulong ret, target_ulong err) -{ - if (ret == (target_ulong) -1) { - qemu_log("%s: gdb console output failed ("TARGET_FMT_ld")", - __func__, err); - } -} - -int qemu_semihosting_console_outs(CPUArchState *env, target_ulong addr) -{ - GString *s = copy_user_string(env, addr); - int out = s->len; - - if (use_gdb_syscalls()) { - gdb_do_syscall(semihosting_cb, "write,2,%x,%x", addr, s->len); - } else { - out = qemu_semihosting_log_out(s->str, s->len); - } - - g_string_free(s, true); - return out; -} - -void qemu_semihosting_console_outc(CPUArchState *env, target_ulong addr) -{ - CPUState *cpu = env_cpu(env); - uint8_t c; - - if (cpu_memory_rw_debug(cpu, addr, &c, 1, 0) == 0) { - if (use_gdb_syscalls()) { - gdb_do_syscall(semihosting_cb, "write,2,%x,%x", addr, 1); - } else { - qemu_semihosting_log_out((const char *) &c, 1); - } - } else { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: passed inaccessible address " TARGET_FMT_lx, - __func__, addr); - } -} - -#define FIFO_SIZE 1024 - /* Access to this structure is protected by the BQL */ typedef struct SemihostingConsole { CharBackend backend; + Chardev *chr; GSList *sleeping_cpus; bool got; Fifo8 fifo; @@ -117,6 +38,8 @@ typedef struct SemihostingConsole { static SemihostingConsole console; +#define FIFO_SIZE 1024 + static int console_can_read(void *opaque) { SemihostingConsole *c = opaque; @@ -145,27 +68,59 @@ static void console_read(void *opaque, const uint8_t *buf, int size) c->sleeping_cpus = NULL; } -target_ulong qemu_semihosting_console_inc(CPUArchState *env) +bool qemu_semihosting_console_ready(void) { - uint8_t ch; SemihostingConsole *c = &console; + g_assert(qemu_mutex_iothread_locked()); - g_assert(current_cpu); - if (fifo8_is_empty(&c->fifo)) { - c->sleeping_cpus = g_slist_prepend(c->sleeping_cpus, current_cpu); - current_cpu->halted = 1; - current_cpu->exception_index = EXCP_HALTED; - cpu_loop_exit(current_cpu); - /* never returns */ - } - ch = fifo8_pop(&c->fifo); - return (target_ulong) ch; + return !fifo8_is_empty(&c->fifo); } -void qemu_semihosting_console_init(void) +void qemu_semihosting_console_block_until_ready(CPUState *cs) { - Chardev *chr = semihosting_get_chardev(); + SemihostingConsole *c = &console; + g_assert(qemu_mutex_iothread_locked()); + + /* Block if the fifo is completely empty. */ + if (fifo8_is_empty(&c->fifo)) { + c->sleeping_cpus = g_slist_prepend(c->sleeping_cpus, cs); + cs->halted = 1; + cs->exception_index = EXCP_HALTED; + cpu_loop_exit(cs); + /* never returns */ + } +} + +int qemu_semihosting_console_read(CPUState *cs, void *buf, int len) +{ + SemihostingConsole *c = &console; + int ret = 0; + + qemu_semihosting_console_block_until_ready(cs); + + /* Read until buffer full or fifo exhausted. */ + do { + *(char *)(buf + ret) = fifo8_pop(&c->fifo); + ret++; + } while (ret < len && !fifo8_is_empty(&c->fifo)); + + return ret; +} + +int qemu_semihosting_console_write(void *buf, int len) +{ + if (console.chr) { + int r = qemu_chr_write_all(console.chr, (uint8_t *)buf, len); + return r < 0 ? 0 : r; + } else { + return fwrite(buf, 1, len, stderr); + } +} + +void qemu_semihosting_console_init(Chardev *chr) +{ + console.chr = chr; if (chr) { fifo8_create(&console.fifo, FIFO_SIZE); qemu_chr_fe_init(&console.backend, chr, &error_abort); @@ -175,4 +130,6 @@ void qemu_semihosting_console_init(void) NULL, NULL, &console, NULL, true); } + + qemu_semihosting_guestfd_init(); } diff --git a/semihosting/guestfd.c b/semihosting/guestfd.c new file mode 100644 index 0000000000..b05c52f26f --- /dev/null +++ b/semihosting/guestfd.c @@ -0,0 +1,160 @@ +/* + * Hosted file support for semihosting syscalls. + * + * Copyright (c) 2005, 2007 CodeSourcery. + * Copyright (c) 2019 Linaro + * Copyright © 2020 by Keith Packard + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "exec/gdbstub.h" +#include "semihosting/semihost.h" +#include "semihosting/guestfd.h" +#ifdef CONFIG_USER_ONLY +#include "qemu.h" +#else +#include "semihosting/softmmu-uaccess.h" +#include CONFIG_DEVICES +#endif + +static GArray *guestfd_array; + +#ifdef CONFIG_ARM_COMPATIBLE_SEMIHOSTING +GuestFD console_in_gf; +GuestFD console_out_gf; +#endif + +void qemu_semihosting_guestfd_init(void) +{ + /* New entries zero-initialized, i.e. type GuestFDUnused */ + guestfd_array = g_array_new(FALSE, TRUE, sizeof(GuestFD)); + +#ifdef CONFIG_ARM_COMPATIBLE_SEMIHOSTING + /* For ARM-compat, the console is in a separate namespace. */ + if (use_gdb_syscalls()) { + console_in_gf.type = GuestFDGDB; + console_in_gf.hostfd = 0; + console_out_gf.type = GuestFDGDB; + console_out_gf.hostfd = 2; + } else { + console_in_gf.type = GuestFDConsole; + console_out_gf.type = GuestFDConsole; + } +#else + /* Otherwise, the stdio file descriptors apply. */ + guestfd_array = g_array_set_size(guestfd_array, 3); +#ifndef CONFIG_USER_ONLY + if (!use_gdb_syscalls()) { + GuestFD *gf = &g_array_index(guestfd_array, GuestFD, 0); + gf[0].type = GuestFDConsole; + gf[1].type = GuestFDConsole; + gf[2].type = GuestFDConsole; + return; + } +#endif + associate_guestfd(0, 0); + associate_guestfd(1, 1); + associate_guestfd(2, 2); +#endif +} + +/* + * Allocate a new guest file descriptor and return it; if we + * couldn't allocate a new fd then return -1. + * This is a fairly simplistic implementation because we don't + * expect that most semihosting guest programs will make very + * heavy use of opening and closing fds. + */ +int alloc_guestfd(void) +{ + guint i; + + /* SYS_OPEN should return nonzero handle on success. Start guestfd from 1 */ + for (i = 1; i < guestfd_array->len; i++) { + GuestFD *gf = &g_array_index(guestfd_array, GuestFD, i); + + if (gf->type == GuestFDUnused) { + return i; + } + } + + /* All elements already in use: expand the array */ + g_array_set_size(guestfd_array, i + 1); + return i; +} + +static void do_dealloc_guestfd(GuestFD *gf) +{ + gf->type = GuestFDUnused; +} + +/* + * Look up the guestfd in the data structure; return NULL + * for out of bounds, but don't check whether the slot is unused. + * This is used internally by the other guestfd functions. + */ +static GuestFD *do_get_guestfd(int guestfd) +{ + if (guestfd < 0 || guestfd >= guestfd_array->len) { + return NULL; + } + + return &g_array_index(guestfd_array, GuestFD, guestfd); +} + +/* + * Given a guest file descriptor, get the associated struct. + * If the fd is not valid, return NULL. This is the function + * used by the various semihosting calls to validate a handle + * from the guest. + * Note: calling alloc_guestfd() or dealloc_guestfd() will + * invalidate any GuestFD* obtained by calling this function. + */ +GuestFD *get_guestfd(int guestfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + if (!gf || gf->type == GuestFDUnused) { + return NULL; + } + return gf; +} + +/* + * Associate the specified guest fd (which must have been + * allocated via alloc_fd() and not previously used) with + * the specified host/gdb fd. + */ +void associate_guestfd(int guestfd, int hostfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + gf->type = use_gdb_syscalls() ? GuestFDGDB : GuestFDHost; + gf->hostfd = hostfd; +} + +void staticfile_guestfd(int guestfd, const uint8_t *data, size_t len) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + gf->type = GuestFDStatic; + gf->staticfile.data = data; + gf->staticfile.len = len; + gf->staticfile.off = 0; +} + +/* + * Deallocate the specified guest file descriptor. This doesn't + * close the host fd, it merely undoes the work of alloc_fd(). + */ +void dealloc_guestfd(int guestfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + do_dealloc_guestfd(gf); +} diff --git a/semihosting/meson.build b/semihosting/meson.build index ea8090abe3..8057db5494 100644 --- a/semihosting/meson.build +++ b/semihosting/meson.build @@ -1,6 +1,12 @@ specific_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( + 'guestfd.c', + 'syscalls.c', +)) + +specific_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_SOFTMMU'], if_true: files( 'config.c', 'console.c', + 'uaccess.c', )) specific_ss.add(when: ['CONFIG_ARM_COMPATIBLE_SEMIHOSTING'], diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c new file mode 100644 index 0000000000..508a0ad88c --- /dev/null +++ b/semihosting/syscalls.c @@ -0,0 +1,978 @@ +/* + * Syscall implementations for semihosting. + * + * Copyright (c) 2022 Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "exec/gdbstub.h" +#include "semihosting/guestfd.h" +#include "semihosting/syscalls.h" +#include "semihosting/console.h" +#ifdef CONFIG_USER_ONLY +#include "qemu.h" +#else +#include "semihosting/softmmu-uaccess.h" +#endif + + +/* + * Validate or compute the length of the string (including terminator). + */ +static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char c; + + if (tlen == 0) { + ssize_t slen = target_strlen(str); + + if (slen < 0) { + return -EFAULT; + } + if (slen >= INT32_MAX) { + return -ENAMETOOLONG; + } + return slen + 1; + } + if (tlen > INT32_MAX) { + return -ENAMETOOLONG; + } + if (get_user_u8(c, str + tlen - 1)) { + return -EFAULT; + } + if (c != 0) { + return -EINVAL; + } + return tlen; +} + +static int validate_lock_user_string(char **pstr, CPUState *cs, + target_ulong tstr, target_ulong tlen) +{ + int ret = validate_strlen(cs, tstr, tlen); + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *str = NULL; + + if (ret > 0) { + str = lock_user(VERIFY_READ, tstr, ret, true); + ret = str ? 0 : -EFAULT; + } + *pstr = str; + return ret; +} + +/* + * TODO: Note that gdb always stores the stat structure big-endian. + * So far, that's ok, as the only two targets using this are also + * big-endian. Until we do something with gdb, also produce the + * same big-endian result from the host. + */ +static int copy_stat_to_user(CPUState *cs, target_ulong addr, + const struct stat *s) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + struct gdb_stat *p; + + if (s->st_dev != (uint32_t)s->st_dev || + s->st_ino != (uint32_t)s->st_ino) { + return -EOVERFLOW; + } + + p = lock_user(VERIFY_WRITE, addr, sizeof(struct gdb_stat), 0); + if (!p) { + return -EFAULT; + } + + p->gdb_st_dev = cpu_to_be32(s->st_dev); + p->gdb_st_ino = cpu_to_be32(s->st_ino); + p->gdb_st_mode = cpu_to_be32(s->st_mode); + p->gdb_st_nlink = cpu_to_be32(s->st_nlink); + p->gdb_st_uid = cpu_to_be32(s->st_uid); + p->gdb_st_gid = cpu_to_be32(s->st_gid); + p->gdb_st_rdev = cpu_to_be32(s->st_rdev); + p->gdb_st_size = cpu_to_be64(s->st_size); +#ifdef _WIN32 + /* Windows stat is missing some fields. */ + p->gdb_st_blksize = 0; + p->gdb_st_blocks = 0; +#else + p->gdb_st_blksize = cpu_to_be64(s->st_blksize); + p->gdb_st_blocks = cpu_to_be64(s->st_blocks); +#endif + p->gdb_st_atime = cpu_to_be32(s->st_atime); + p->gdb_st_mtime = cpu_to_be32(s->st_mtime); + p->gdb_st_ctime = cpu_to_be32(s->st_ctime); + + unlock_user(p, addr, sizeof(struct gdb_stat)); + return 0; +} + +/* + * GDB semihosting syscall implementations. + */ + +static gdb_syscall_complete_cb gdb_open_complete; + +static void gdb_open_cb(CPUState *cs, uint64_t ret, int err) +{ + if (!err) { + int guestfd = alloc_guestfd(); + associate_guestfd(guestfd, ret); + ret = guestfd; + } + gdb_open_complete(cs, ret, err); +} + +static void gdb_open(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + int gdb_flags, int mode) +{ + int len = validate_strlen(cs, fname, fname_len); + if (len < 0) { + complete(cs, -1, -len); + return; + } + + gdb_open_complete = complete; + gdb_do_syscall(gdb_open_cb, "open,%s,%x,%x", + fname, len, (target_ulong)gdb_flags, (target_ulong)mode); +} + +static void gdb_close(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + gdb_do_syscall(complete, "close,%x", (target_ulong)gf->hostfd); +} + +static void gdb_read(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + gdb_do_syscall(complete, "read,%x,%x,%x", + (target_ulong)gf->hostfd, buf, len); +} + +static void gdb_write(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + gdb_do_syscall(complete, "write,%x,%x,%x", + (target_ulong)gf->hostfd, buf, len); +} + +static void gdb_lseek(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, int64_t off, int gdb_whence) +{ + gdb_do_syscall(complete, "lseek,%x,%lx,%x", + (target_ulong)gf->hostfd, off, (target_ulong)gdb_whence); +} + +static void gdb_isatty(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + gdb_do_syscall(complete, "isatty,%x", (target_ulong)gf->hostfd); +} + +static void gdb_fstat(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong addr) +{ + gdb_do_syscall(complete, "fstat,%x,%x", (target_ulong)gf->hostfd, addr); +} + +static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + target_ulong addr) +{ + int len = validate_strlen(cs, fname, fname_len); + if (len < 0) { + complete(cs, -1, -len); + return; + } + + gdb_do_syscall(complete, "stat,%s,%x", fname, len, addr); +} + +static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len) +{ + int len = validate_strlen(cs, fname, fname_len); + if (len < 0) { + complete(cs, -1, -len); + return; + } + + gdb_do_syscall(complete, "unlink,%s", fname, len); +} + +static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong oname, target_ulong oname_len, + target_ulong nname, target_ulong nname_len) +{ + int olen, nlen; + + olen = validate_strlen(cs, oname, oname_len); + if (olen < 0) { + complete(cs, -1, -olen); + return; + } + nlen = validate_strlen(cs, nname, nname_len); + if (nlen < 0) { + complete(cs, -1, -nlen); + return; + } + + gdb_do_syscall(complete, "rename,%s,%s", oname, olen, nname, nlen); +} + +static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong cmd, target_ulong cmd_len) +{ + int len = validate_strlen(cs, cmd, cmd_len); + if (len < 0) { + complete(cs, -1, -len); + return; + } + + gdb_do_syscall(complete, "system,%s", cmd, len); +} + +static void gdb_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong tv_addr, target_ulong tz_addr) +{ + gdb_do_syscall(complete, "gettimeofday,%x,%x", tv_addr, tz_addr); +} + +/* + * Host semihosting syscall implementations. + */ + +static void host_open(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + int gdb_flags, int mode) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *p; + int ret, host_flags; + + ret = validate_lock_user_string(&p, cs, fname, fname_len); + if (ret < 0) { + complete(cs, -1, -ret); + return; + } + + if (gdb_flags & GDB_O_WRONLY) { + host_flags = O_WRONLY; + } else if (gdb_flags & GDB_O_RDWR) { + host_flags = O_RDWR; + } else { + host_flags = O_RDONLY; + } + if (gdb_flags & GDB_O_CREAT) { + host_flags |= O_CREAT; + } + if (gdb_flags & GDB_O_TRUNC) { + host_flags |= O_TRUNC; + } + if (gdb_flags & GDB_O_EXCL) { + host_flags |= O_EXCL; + } + + ret = open(p, host_flags, mode); + if (ret < 0) { + complete(cs, -1, errno); + } else { + int guestfd = alloc_guestfd(); + associate_guestfd(guestfd, ret); + complete(cs, guestfd, 0); + } + unlock_user(p, fname, 0); +} + +static void host_close(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + /* + * Only close the underlying host fd if it's one we opened on behalf + * of the guest in SYS_OPEN. + */ + if (gf->hostfd != STDIN_FILENO && + gf->hostfd != STDOUT_FILENO && + gf->hostfd != STDERR_FILENO && + close(gf->hostfd) < 0) { + complete(cs, -1, errno); + } else { + complete(cs, 0, 0); + } +} + +static void host_read(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + void *ptr = lock_user(VERIFY_WRITE, buf, len, 0); + ssize_t ret; + + if (!ptr) { + complete(cs, -1, EFAULT); + return; + } + do { + ret = read(gf->hostfd, ptr, len); + } while (ret == -1 && errno == EINTR); + if (ret == -1) { + complete(cs, -1, errno); + unlock_user(ptr, buf, 0); + } else { + complete(cs, ret, 0); + unlock_user(ptr, buf, ret); + } +} + +static void host_write(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + void *ptr = lock_user(VERIFY_READ, buf, len, 1); + ssize_t ret; + + if (!ptr) { + complete(cs, -1, EFAULT); + return; + } + ret = write(gf->hostfd, ptr, len); + complete(cs, ret, ret == -1 ? errno : 0); + unlock_user(ptr, buf, 0); +} + +static void host_lseek(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, int64_t off, int whence) +{ + /* So far, all hosts use the same values. */ + QEMU_BUILD_BUG_ON(GDB_SEEK_SET != SEEK_SET); + QEMU_BUILD_BUG_ON(GDB_SEEK_CUR != SEEK_CUR); + QEMU_BUILD_BUG_ON(GDB_SEEK_END != SEEK_END); + + off_t ret = off; + int err = 0; + + if (ret == off) { + ret = lseek(gf->hostfd, ret, whence); + if (ret == -1) { + err = errno; + } + } else { + ret = -1; + err = EINVAL; + } + complete(cs, ret, err); +} + +static void host_isatty(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + int ret = isatty(gf->hostfd); + complete(cs, ret, ret ? 0 : errno); +} + +static void host_flen(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + struct stat buf; + + if (fstat(gf->hostfd, &buf) < 0) { + complete(cs, -1, errno); + } else { + complete(cs, buf.st_size, 0); + } +} + +static void host_fstat(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong addr) +{ + struct stat buf; + int ret; + + ret = fstat(gf->hostfd, &buf); + if (ret) { + complete(cs, -1, errno); + return; + } + ret = copy_stat_to_user(cs, addr, &buf); + complete(cs, ret ? -1 : 0, ret ? -ret : 0); +} + +static void host_stat(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + target_ulong addr) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + struct stat buf; + char *name; + int ret, err; + + ret = validate_lock_user_string(&name, cs, fname, fname_len); + if (ret < 0) { + complete(cs, -1, -ret); + return; + } + + ret = stat(name, &buf); + if (ret) { + err = errno; + } else { + ret = copy_stat_to_user(cs, addr, &buf); + err = 0; + if (ret < 0) { + err = -ret; + ret = -1; + } + } + complete(cs, ret, err); + unlock_user(name, fname, 0); +} + +static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *p; + int ret; + + ret = validate_lock_user_string(&p, cs, fname, fname_len); + if (ret < 0) { + complete(cs, -1, -ret); + return; + } + + ret = remove(p); + complete(cs, ret, ret ? errno : 0); + unlock_user(p, fname, 0); +} + +static void host_rename(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong oname, target_ulong oname_len, + target_ulong nname, target_ulong nname_len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *ostr, *nstr; + int ret; + + ret = validate_lock_user_string(&ostr, cs, oname, oname_len); + if (ret < 0) { + complete(cs, -1, -ret); + return; + } + ret = validate_lock_user_string(&nstr, cs, nname, nname_len); + if (ret < 0) { + unlock_user(ostr, oname, 0); + complete(cs, -1, -ret); + return; + } + + ret = rename(ostr, nstr); + complete(cs, ret, ret ? errno : 0); + unlock_user(ostr, oname, 0); + unlock_user(nstr, nname, 0); +} + +static void host_system(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong cmd, target_ulong cmd_len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *p; + int ret; + + ret = validate_lock_user_string(&p, cs, cmd, cmd_len); + if (ret < 0) { + complete(cs, -1, -ret); + return; + } + + ret = system(p); + complete(cs, ret, ret == -1 ? errno : 0); + unlock_user(p, cmd, 0); +} + +static void host_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong tv_addr, target_ulong tz_addr) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + struct gdb_timeval *p; + int64_t rt; + + /* GDB fails on non-null TZ, so be consistent. */ + if (tz_addr != 0) { + complete(cs, -1, EINVAL); + return; + } + + p = lock_user(VERIFY_WRITE, tv_addr, sizeof(struct gdb_timeval), 0); + if (!p) { + complete(cs, -1, EFAULT); + return; + } + + /* TODO: Like stat, gdb always produces big-endian results; match it. */ + rt = g_get_real_time(); + p->tv_sec = cpu_to_be32(rt / G_USEC_PER_SEC); + p->tv_usec = cpu_to_be64(rt % G_USEC_PER_SEC); + unlock_user(p, tv_addr, sizeof(struct gdb_timeval)); +} + +#ifndef CONFIG_USER_ONLY +static void host_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, GIOCondition cond, int timeout) +{ + /* + * Since this is only used by xtensa in system mode, and stdio is + * handled through GuestFDConsole, and there are no semihosting + * system calls for sockets and the like, that means this descriptor + * must be a normal file. Normal files never block and are thus + * always ready. + */ + complete(cs, cond & (G_IO_IN | G_IO_OUT), 0); +} +#endif + +/* + * Static file semihosting syscall implementations. + */ + +static void staticfile_read(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + target_ulong rest = gf->staticfile.len - gf->staticfile.off; + void *ptr; + + if (len > rest) { + len = rest; + } + ptr = lock_user(VERIFY_WRITE, buf, len, 0); + if (!ptr) { + complete(cs, -1, EFAULT); + return; + } + memcpy(ptr, gf->staticfile.data + gf->staticfile.off, len); + gf->staticfile.off += len; + complete(cs, len, 0); + unlock_user(ptr, buf, len); +} + +static void staticfile_lseek(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, int64_t off, int gdb_whence) +{ + int64_t ret; + + switch (gdb_whence) { + case GDB_SEEK_SET: + ret = off; + break; + case GDB_SEEK_CUR: + ret = gf->staticfile.off + off; + break; + case GDB_SEEK_END: + ret = gf->staticfile.len + off; + break; + default: + ret = -1; + break; + } + if (ret >= 0 && ret <= gf->staticfile.len) { + gf->staticfile.off = ret; + complete(cs, ret, 0); + } else { + complete(cs, -1, EINVAL); + } +} + +static void staticfile_flen(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf) +{ + complete(cs, gf->staticfile.len, 0); +} + +/* + * Console semihosting syscall implementations. + */ + +static void console_read(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *ptr; + int ret; + + ptr = lock_user(VERIFY_WRITE, buf, len, 0); + if (!ptr) { + complete(cs, -1, EFAULT); + return; + } + ret = qemu_semihosting_console_read(cs, ptr, len); + complete(cs, ret, 0); + unlock_user(ptr, buf, ret); +} + +static void console_write(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; + char *ptr = lock_user(VERIFY_READ, buf, len, 1); + int ret; + + if (!ptr) { + complete(cs, -1, EFAULT); + return; + } + ret = qemu_semihosting_console_write(ptr, len); + complete(cs, ret ? ret : -1, ret ? 0 : EIO); + unlock_user(ptr, buf, 0); +} + +static void console_fstat(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong addr) +{ + static const struct stat tty_buf = { + .st_mode = 020666, /* S_IFCHR, ugo+rw */ + .st_rdev = 5, /* makedev(5, 0) -- linux /dev/tty */ + }; + int ret; + + ret = copy_stat_to_user(cs, addr, &tty_buf); + complete(cs, ret ? -1 : 0, ret ? -ret : 0); +} + +#ifndef CONFIG_USER_ONLY +static void console_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, GIOCondition cond, int timeout) +{ + /* The semihosting console does not support urgent data or errors. */ + cond &= G_IO_IN | G_IO_OUT; + + /* + * Since qemu_semihosting_console_write never blocks, we can + * consider output always ready -- leave G_IO_OUT alone. + * All that remains is to conditionally signal input ready. + * Since output ready causes an immediate return, only block + * for G_IO_IN alone. + * + * TODO: Implement proper timeout. For now, only support + * indefinite wait or immediate poll. + */ + if (cond == G_IO_IN && timeout < 0) { + qemu_semihosting_console_block_until_ready(cs); + /* We returned -- input must be ready. */ + } else if ((cond & G_IO_IN) && !qemu_semihosting_console_ready()) { + cond &= ~G_IO_IN; + } + + complete(cs, cond, 0); +} +#endif + +/* + * Syscall entry points. + */ + +void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + int gdb_flags, int mode) +{ + if (use_gdb_syscalls()) { + gdb_open(cs, complete, fname, fname_len, gdb_flags, mode); + } else { + host_open(cs, complete, fname, fname_len, gdb_flags, mode); + } +} + +void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete, int fd) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + complete(cs, -1, EBADF); + return; + } + switch (gf->type) { + case GuestFDGDB: + gdb_close(cs, complete, gf); + break; + case GuestFDHost: + host_close(cs, complete, gf); + break; + case GuestFDStatic: + case GuestFDConsole: + complete(cs, 0, 0); + break; + default: + g_assert_not_reached(); + } + dealloc_guestfd(fd); +} + +void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + /* + * Bound length for 64-bit guests on 32-bit hosts, not overlowing ssize_t. + * Note the Linux kernel does this with MAX_RW_COUNT, so it's not a bad + * idea to do this unconditionally. + */ + if (len > INT32_MAX) { + len = INT32_MAX; + } + switch (gf->type) { + case GuestFDGDB: + gdb_read(cs, complete, gf, buf, len); + break; + case GuestFDHost: + host_read(cs, complete, gf, buf, len); + break; + case GuestFDStatic: + staticfile_read(cs, complete, gf, buf, len); + break; + case GuestFDConsole: + console_read(cs, complete, gf, buf, len); + break; + default: + g_assert_not_reached(); + } +} + +void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong buf, target_ulong len) +{ + GuestFD *gf = get_guestfd(fd); + + if (gf) { + semihost_sys_read_gf(cs, complete, gf, buf, len); + } else { + complete(cs, -1, EBADF); + } +} + +void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete, + GuestFD *gf, target_ulong buf, target_ulong len) +{ + /* + * Bound length for 64-bit guests on 32-bit hosts, not overlowing ssize_t. + * Note the Linux kernel does this with MAX_RW_COUNT, so it's not a bad + * idea to do this unconditionally. + */ + if (len > INT32_MAX) { + len = INT32_MAX; + } + switch (gf->type) { + case GuestFDGDB: + gdb_write(cs, complete, gf, buf, len); + break; + case GuestFDHost: + host_write(cs, complete, gf, buf, len); + break; + case GuestFDConsole: + console_write(cs, complete, gf, buf, len); + break; + case GuestFDStatic: + /* Static files are never open for writing: EBADF. */ + complete(cs, -1, EBADF); + break; + default: + g_assert_not_reached(); + } +} + +void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong buf, target_ulong len) +{ + GuestFD *gf = get_guestfd(fd); + + if (gf) { + semihost_sys_write_gf(cs, complete, gf, buf, len); + } else { + complete(cs, -1, EBADF); + } +} + +void semihost_sys_lseek(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, int64_t off, int gdb_whence) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + complete(cs, -1, EBADF); + return; + } + switch (gf->type) { + case GuestFDGDB: + gdb_lseek(cs, complete, gf, off, gdb_whence); + return; + case GuestFDHost: + host_lseek(cs, complete, gf, off, gdb_whence); + break; + case GuestFDStatic: + staticfile_lseek(cs, complete, gf, off, gdb_whence); + break; + case GuestFDConsole: + complete(cs, -1, ESPIPE); + break; + default: + g_assert_not_reached(); + } +} + +void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete, int fd) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + complete(cs, 0, EBADF); + return; + } + switch (gf->type) { + case GuestFDGDB: + gdb_isatty(cs, complete, gf); + break; + case GuestFDHost: + host_isatty(cs, complete, gf); + break; + case GuestFDStatic: + complete(cs, 0, ENOTTY); + break; + case GuestFDConsole: + complete(cs, 1, 0); + break; + default: + g_assert_not_reached(); + } +} + +void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb, + gdb_syscall_complete_cb flen_cb, int fd, + target_ulong fstat_addr) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + flen_cb(cs, -1, EBADF); + return; + } + switch (gf->type) { + case GuestFDGDB: + gdb_fstat(cs, fstat_cb, gf, fstat_addr); + break; + case GuestFDHost: + host_flen(cs, flen_cb, gf); + break; + case GuestFDStatic: + staticfile_flen(cs, flen_cb, gf); + break; + case GuestFDConsole: + default: + g_assert_not_reached(); + } +} + +void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, target_ulong addr) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + complete(cs, -1, EBADF); + return; + } + switch (gf->type) { + case GuestFDGDB: + gdb_fstat(cs, complete, gf, addr); + break; + case GuestFDHost: + host_fstat(cs, complete, gf, addr); + break; + case GuestFDConsole: + console_fstat(cs, complete, gf, addr); + break; + case GuestFDStatic: + default: + g_assert_not_reached(); + } +} + +void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len, + target_ulong addr) +{ + if (use_gdb_syscalls()) { + gdb_stat(cs, complete, fname, fname_len, addr); + } else { + host_stat(cs, complete, fname, fname_len, addr); + } +} + +void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong fname, target_ulong fname_len) +{ + if (use_gdb_syscalls()) { + gdb_remove(cs, complete, fname, fname_len); + } else { + host_remove(cs, complete, fname, fname_len); + } +} + +void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong oname, target_ulong oname_len, + target_ulong nname, target_ulong nname_len) +{ + if (use_gdb_syscalls()) { + gdb_rename(cs, complete, oname, oname_len, nname, nname_len); + } else { + host_rename(cs, complete, oname, oname_len, nname, nname_len); + } +} + +void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong cmd, target_ulong cmd_len) +{ + if (use_gdb_syscalls()) { + gdb_system(cs, complete, cmd, cmd_len); + } else { + host_system(cs, complete, cmd, cmd_len); + } +} + +void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, + target_ulong tv_addr, target_ulong tz_addr) +{ + if (use_gdb_syscalls()) { + gdb_gettimeofday(cs, complete, tv_addr, tz_addr); + } else { + host_gettimeofday(cs, complete, tv_addr, tz_addr); + } +} + +#ifndef CONFIG_USER_ONLY +void semihost_sys_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, + int fd, GIOCondition cond, int timeout) +{ + GuestFD *gf = get_guestfd(fd); + + if (!gf) { + complete(cs, G_IO_NVAL, 1); + return; + } + switch (gf->type) { + case GuestFDGDB: + complete(cs, G_IO_NVAL, 1); + break; + case GuestFDHost: + host_poll_one(cs, complete, gf, cond, timeout); + break; + case GuestFDConsole: + console_poll_one(cs, complete, gf, cond, timeout); + break; + case GuestFDStatic: + default: + g_assert_not_reached(); + } +} +#endif diff --git a/semihosting/uaccess.c b/semihosting/uaccess.c new file mode 100644 index 0000000000..8018828069 --- /dev/null +++ b/semihosting/uaccess.c @@ -0,0 +1,91 @@ +/* + * Helper routines to provide target memory access for semihosting + * syscalls in system emulation mode. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "exec/exec-all.h" +#include "semihosting/softmmu-uaccess.h" + +void *softmmu_lock_user(CPUArchState *env, target_ulong addr, + target_ulong len, bool copy) +{ + void *p = malloc(len); + if (p && copy) { + if (cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0)) { + free(p); + p = NULL; + } + } + return p; +} + +ssize_t softmmu_strlen_user(CPUArchState *env, target_ulong addr) +{ + int mmu_idx = cpu_mmu_index(env, false); + size_t len = 0; + + while (1) { + size_t left_in_page; + int flags; + void *h; + + /* Find the number of bytes remaining in the page. */ + left_in_page = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); + + flags = probe_access_flags(env, addr, MMU_DATA_LOAD, + mmu_idx, true, &h, 0); + if (flags & TLB_INVALID_MASK) { + return -1; + } + if (flags & TLB_MMIO) { + do { + uint8_t c; + if (cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0)) { + return -1; + } + if (c == 0) { + return len; + } + addr++; + len++; + if (len > INT32_MAX) { + return -1; + } + } while (--left_in_page != 0); + } else { + char *p = memchr(h, 0, left_in_page); + if (p) { + len += p - (char *)h; + return len <= INT32_MAX ? (ssize_t)len : -1; + } + addr += left_in_page; + len += left_in_page; + if (len > INT32_MAX) { + return -1; + } + } + } +} + +char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr) +{ + ssize_t len = softmmu_strlen_user(env, addr); + if (len < 0) { + return NULL; + } + return softmmu_lock_user(env, addr, len + 1, true); +} + +void softmmu_unlock_user(CPUArchState *env, void *p, + target_ulong addr, target_ulong len) +{ + if (len) { + cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); + } + free(p); +} diff --git a/slirp b/slirp deleted file mode 160000 index a88d9ace23..0000000000 --- a/slirp +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a88d9ace234a24ce1c17189642ef9104799425e0 diff --git a/softmmu/arch_init.c b/softmmu/arch_init.c index 6ff9f30bad..79716f959b 100644 --- a/softmmu/arch_init.c +++ b/softmmu/arch_init.c @@ -22,14 +22,8 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" +#include "qemu/module.h" #include "sysemu/arch_init.h" -#include "hw/pci/pci.h" -#include "hw/audio/soundhw.h" -#include "qapi/error.h" -#include "qemu/config-file.h" -#include "qemu/error-report.h" -#include "hw/acpi/acpi.h" -#include "qemu/help_option.h" #ifdef TARGET_SPARC int graphic_width = 1024; @@ -45,63 +39,12 @@ int graphic_height = 600; int graphic_depth = 32; #endif - -#if defined(TARGET_ALPHA) -#define QEMU_ARCH QEMU_ARCH_ALPHA -#elif defined(TARGET_ARM) -#define QEMU_ARCH QEMU_ARCH_ARM -#elif defined(TARGET_CRIS) -#define QEMU_ARCH QEMU_ARCH_CRIS -#elif defined(TARGET_HPPA) -#define QEMU_ARCH QEMU_ARCH_HPPA -#elif defined(TARGET_I386) -#define QEMU_ARCH QEMU_ARCH_I386 -#elif defined(TARGET_M68K) -#define QEMU_ARCH QEMU_ARCH_M68K -#elif defined(TARGET_MICROBLAZE) -#define QEMU_ARCH QEMU_ARCH_MICROBLAZE -#elif defined(TARGET_MIPS) -#define QEMU_ARCH QEMU_ARCH_MIPS -#elif defined(TARGET_NIOS2) -#define QEMU_ARCH QEMU_ARCH_NIOS2 -#elif defined(TARGET_OPENRISC) -#define QEMU_ARCH QEMU_ARCH_OPENRISC -#elif defined(TARGET_PPC) -#define QEMU_ARCH QEMU_ARCH_PPC -#elif defined(TARGET_RISCV) -#define QEMU_ARCH QEMU_ARCH_RISCV -#elif defined(TARGET_RX) -#define QEMU_ARCH QEMU_ARCH_RX -#elif defined(TARGET_S390X) -#define QEMU_ARCH QEMU_ARCH_S390X -#elif defined(TARGET_SH4) -#define QEMU_ARCH QEMU_ARCH_SH4 -#elif defined(TARGET_SPARC) -#define QEMU_ARCH QEMU_ARCH_SPARC -#elif defined(TARGET_TRICORE) -#define QEMU_ARCH QEMU_ARCH_TRICORE -#elif defined(TARGET_XTENSA) -#define QEMU_ARCH QEMU_ARCH_XTENSA -#elif defined(TARGET_AVR) -#define QEMU_ARCH QEMU_ARCH_AVR -#endif - const uint32_t arch_type = QEMU_ARCH; -int kvm_available(void) +void qemu_init_arch_modules(void) { -#ifdef CONFIG_KVM - return 1; -#else - return 0; -#endif -} - -int xen_available(void) -{ -#ifdef CONFIG_XEN - return 1; -#else - return 0; +#ifdef CONFIG_MODULES + module_init_info(qemu_modinfo); + module_allow_arch(TARGET_NAME); #endif } diff --git a/softmmu/bootdevice.c b/softmmu/bootdevice.c index add4e3d2d1..2106f1026f 100644 --- a/softmmu/bootdevice.c +++ b/softmmu/bootdevice.c @@ -166,7 +166,7 @@ void add_boot_device_path(int32_t bootindex, DeviceState *dev, del_boot_device_path(dev, suffix); - node = g_malloc0(sizeof(FWBootEntry)); + node = g_new0(FWBootEntry, 1); node->bootindex = bootindex; node->suffix = g_strdup(suffix); node->dev = dev; @@ -268,7 +268,8 @@ char *get_boot_devices_list(size_t *size) *size = total; - if (boot_strict && *size > 0) { + if (current_machine->boot_config.has_strict && + current_machine->boot_config.strict && *size > 0) { list[total-1] = '\n'; list = g_realloc(list, total + 5); memcpy(&list[total], "HALT", 5); @@ -367,7 +368,7 @@ void add_boot_device_lchs(DeviceState *dev, const char *suffix, assert(dev != NULL || suffix != NULL); - node = g_malloc0(sizeof(FWLCHSEntry)); + node = g_new0(FWLCHSEntry, 1); node->suffix = g_strdup(suffix); node->dev = dev; node->lcyls = lcyls; diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c index 8c2144ab95..d9bb30a223 100644 --- a/softmmu/cpu-throttle.c +++ b/softmmu/cpu-throttle.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/thread.h" #include "hw/core/cpu.h" #include "qemu/main-loop.h" diff --git a/softmmu/cpu-timers.c b/softmmu/cpu-timers.c index 34ddfa02f1..117408cb83 100644 --- a/softmmu/cpu-timers.c +++ b/softmmu/cpu-timers.c @@ -23,12 +23,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "exec/exec-all.h" #include "sysemu/cpus.h" #include "qemu/main-loop.h" #include "qemu/option.h" diff --git a/softmmu/cpus.c b/softmmu/cpus.c index 071085f840..5a584a8d57 100644 --- a/softmmu/cpus.c +++ b/softmmu/cpus.c @@ -23,8 +23,8 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "monitor/monitor.h" +#include "qemu/coroutine-tls.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" @@ -32,7 +32,7 @@ #include "qapi/qmp/qerror.h" #include "exec/gdbstub.h" #include "sysemu/hw_accel.h" -#include "exec/exec-all.h" +#include "exec/cpu-common.h" #include "qemu/thread.h" #include "qemu/plugin.h" #include "sysemu/cpus.h" @@ -66,6 +66,11 @@ static QemuMutex qemu_global_mutex; +/* + * The chosen accelerator is supposed to register this. + */ +static const AccelOpsClass *cpus_accel; + bool cpu_is_stopped(CPUState *cpu) { return cpu->stopped || !runstate_is_running(); @@ -73,12 +78,7 @@ bool cpu_is_stopped(CPUState *cpu) bool cpu_work_list_empty(CPUState *cpu) { - bool ret; - - qemu_mutex_lock(&cpu->work_mutex); - ret = QSIMPLEQ_EMPTY(&cpu->work_list); - qemu_mutex_unlock(&cpu->work_mutex); - return ret; + return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list); } bool cpu_thread_is_idle(CPUState *cpu) @@ -89,10 +89,12 @@ bool cpu_thread_is_idle(CPUState *cpu) if (cpu_is_stopped(cpu)) { return true; } - if (!cpu->halted || cpu_has_work(cpu) || - kvm_halt_in_kernel() || whpx_apic_in_platform()) { + if (!cpu->halted || cpu_has_work(cpu)) { return false; } + if (cpus_accel->cpu_thread_is_idle) { + return cpus_accel->cpu_thread_is_idle(cpu); + } return true; } @@ -126,11 +128,6 @@ void hw_error(const char *fmt, ...) abort(); } -/* - * The chosen accelerator is supposed to register this. - */ -static const AccelOpsClass *cpus_accel; - void cpu_synchronize_all_states(void) { CPUState *cpu; @@ -197,7 +194,10 @@ void cpu_synchronize_pre_loadvm(CPUState *cpu) bool cpus_are_resettable(void) { - return cpu_check_are_resettable(); + if (cpus_accel->cpus_are_resettable) { + return cpus_accel->cpus_are_resettable(); + } + return true; } int64_t cpus_get_virtual_clock(void) @@ -352,6 +352,10 @@ static void qemu_init_sigbus(void) { struct sigaction action; + /* + * ALERT: when modifying this, take care that SIGBUS forwarding in + * qemu_prealloc_mem() will continue working as expected. + */ memset(&action, 0, sizeof(action)); action.sa_flags = SA_SIGINFO; action.sa_sigaction = sigbus_handler; @@ -433,18 +437,19 @@ void qemu_wait_io_event(CPUState *cpu) void cpus_kick_thread(CPUState *cpu) { -#ifndef _WIN32 - int err; - if (cpu->thread_kicked) { return; } cpu->thread_kicked = true; - err = pthread_kill(cpu->thread->thread, SIG_IPI); + +#ifndef _WIN32 + int err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err && err != ESRCH) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } +#else + qemu_sem_post(&cpu->sem); #endif } @@ -474,11 +479,16 @@ bool qemu_in_vcpu_thread(void) return current_cpu && qemu_cpu_is_self(current_cpu); } -static __thread bool iothread_locked = false; +QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked) bool qemu_mutex_iothread_locked(void) { - return iothread_locked; + return get_iothread_locked(); +} + +bool qemu_in_main_thread(void) +{ + return qemu_mutex_iothread_locked(); } /* @@ -491,13 +501,13 @@ void qemu_mutex_lock_iothread_impl(const char *file, int line) g_assert(!qemu_mutex_iothread_locked()); bql_lock(&qemu_global_mutex, file, line); - iothread_locked = true; + set_iothread_locked(true); } void qemu_mutex_unlock_iothread(void) { g_assert(qemu_mutex_iothread_locked()); - iothread_locked = false; + set_iothread_locked(false); qemu_mutex_unlock(&qemu_global_mutex); } @@ -608,6 +618,13 @@ void cpus_register_accel(const AccelOpsClass *ops) cpus_accel = ops; } +const AccelOpsClass *cpus_get_accel(void) +{ + /* broken if we call this early */ + assert(cpus_accel); + return cpus_accel; +} + void qemu_init_vcpu(CPUState *cpu) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -663,7 +680,7 @@ int vm_stop(RunState state) * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already * running or in case of an error condition), 0 otherwise. */ -int vm_prepare_start(void) +int vm_prepare_start(bool step_pending) { RunState requested; @@ -683,6 +700,14 @@ int vm_prepare_start(void) return -1; } + /* + * WHPX accelerator needs to know whether we are going to step + * any CPUs, before starting the first one. + */ + if (cpus_accel->synchronize_pre_resume) { + cpus_accel->synchronize_pre_resume(step_pending); + } + /* We are sending this now, but the CPUs will be resumed shortly later */ qapi_event_send_resume(); @@ -694,7 +719,7 @@ int vm_prepare_start(void) void vm_start(void) { - if (!vm_prepare_start()) { + if (!vm_prepare_start(false)) { resume_all_vcpus(); } } @@ -718,14 +743,6 @@ int vm_stop_force_state(RunState state) } } -void list_cpus(const char *optarg) -{ - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif -} - void qmp_memsave(int64_t addr, int64_t size, const char *filename, bool has_cpu, int64_t cpu_index, Error **errp) { diff --git a/softmmu/datadir.c b/softmmu/datadir.c index de156e4b6f..a48066ac32 100644 --- a/softmmu/datadir.c +++ b/softmmu/datadir.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/cutils.h" #include "trace.h" @@ -84,40 +83,22 @@ void qemu_add_data_dir(char *path) data_dir[data_dir_idx++] = path; } -/* - * Find a likely location for support files using the location of the binary. - * When running from the build tree this will be "$bindir/pc-bios". - * Otherwise, this is CONFIG_QEMU_DATADIR (possibly relocated). - * - * The caller must use g_free() to free the returned data when it is - * no longer required. - */ -static char *find_datadir(void) -{ - g_autofree char *dir = NULL; - - dir = g_build_filename(qemu_get_exec_dir(), "pc-bios", NULL); - if (g_file_test(dir, G_FILE_TEST_IS_DIR)) { - return g_steal_pointer(&dir); - } - - return get_relocated_path(CONFIG_QEMU_DATADIR); -} - void qemu_add_default_firmwarepath(void) { - char **dirs; + static const char * const dirs[] = { + CONFIG_QEMU_FIRMWAREPATH + NULL + }; + size_t i; /* add configured firmware directories */ - dirs = g_strsplit(CONFIG_QEMU_FIRMWAREPATH, G_SEARCHPATH_SEPARATOR_S, 0); for (i = 0; dirs[i] != NULL; i++) { qemu_add_data_dir(get_relocated_path(dirs[i])); } - g_strfreev(dirs); /* try to find datadir relative to the executable path */ - qemu_add_data_dir(find_datadir()); + qemu_add_data_dir(get_relocated_path(CONFIG_QEMU_DATADIR)); } void qemu_list_data_dirs(void) diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c index b621f63fba..30aa3aea9f 100644 --- a/softmmu/device_tree.c +++ b/softmmu/device_tree.c @@ -22,10 +22,14 @@ #include "qemu/option.h" #include "qemu/bswap.h" #include "qemu/cutils.h" +#include "qemu/guest-random.h" #include "sysemu/device_tree.h" #include "hw/loader.h" #include "hw/boards.h" #include "qemu/config-file.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qmp/qdict.h" +#include "monitor/hmp.h" #include @@ -60,7 +64,8 @@ void *create_device_tree(int *sizep) } ret = fdt_open_into(fdt, fdt, *sizep); if (ret) { - error_report("Unable to copy device tree in memory"); + error_report("%s: Unable to copy device tree into memory: %s", + __func__, fdt_strerror(ret)); exit(1); } @@ -104,7 +109,8 @@ void *load_device_tree(const char *filename_path, int *sizep) ret = fdt_open_into(fdt, fdt, dt_size); if (ret) { - error_report("Unable to copy device tree in memory"); + error_report("%s: Unable to copy device tree into memory: %s", + __func__, fdt_strerror(ret)); goto fail; } @@ -540,8 +546,8 @@ int qemu_fdt_add_subnode(void *fdt, const char *name) retval = fdt_add_subnode(fdt, parent, basename); if (retval < 0) { - error_report("FDT: Failed to create subnode %s: %s", name, - fdt_strerror(retval)); + error_report("%s: Failed to create subnode %s: %s", + __func__, name, fdt_strerror(retval)); exit(1); } @@ -549,6 +555,45 @@ int qemu_fdt_add_subnode(void *fdt, const char *name) return retval; } +/* + * qemu_fdt_add_path: Like qemu_fdt_add_subnode(), but will add + * all missing subnodes from the given path. + */ +int qemu_fdt_add_path(void *fdt, const char *path) +{ + const char *name; + int namelen, retval; + int parent = 0; + + if (path[0] != '/') { + return -1; + } + + do { + name = path + 1; + path = strchr(name, '/'); + namelen = path != NULL ? path - name : strlen(name); + + retval = fdt_subnode_offset_namelen(fdt, parent, name, namelen); + if (retval < 0 && retval != -FDT_ERR_NOTFOUND) { + error_report("%s: Unexpected error in finding subnode %.*s: %s", + __func__, namelen, name, fdt_strerror(retval)); + exit(1); + } else if (retval == -FDT_ERR_NOTFOUND) { + retval = fdt_add_subnode_namelen(fdt, parent, name, namelen); + if (retval < 0) { + error_report("%s: Failed to create subnode %.*s: %s", + __func__, namelen, name, fdt_strerror(retval)); + exit(1); + } + } + + parent = retval; + } while (path); + + return retval; +} + void qemu_fdt_dumpdtb(void *fdt, int size) { const char *dumpdtb = current_machine->dumpdtb; @@ -602,3 +647,57 @@ out: g_free(propcells); return ret; } + +void qmp_dumpdtb(const char *filename, Error **errp) +{ + g_autoptr(GError) err = NULL; + uint32_t size; + + if (!current_machine->fdt) { + error_setg(errp, "This machine doesn't have a FDT"); + return; + } + + size = fdt_totalsize(current_machine->fdt); + + g_assert(size > 0); + + if (!g_file_set_contents(filename, current_machine->fdt, size, &err)) { + error_setg(errp, "Error saving FDT to file %s: %s", + filename, err->message); + } +} + +void hmp_dumpdtb(Monitor *mon, const QDict *qdict) +{ + const char *filename = qdict_get_str(qdict, "filename"); + Error *local_err = NULL; + + qmp_dumpdtb(filename, &local_err); + + if (hmp_handle_error(mon, local_err)) { + return; + } + + info_report("dtb dumped to %s", filename); +} + +void qemu_fdt_randomize_seeds(void *fdt) +{ + int noffset, poffset, len; + const char *name; + uint8_t *data; + + for (noffset = fdt_next_node(fdt, 0, NULL); + noffset >= 0; + noffset = fdt_next_node(fdt, noffset, NULL)) { + for (poffset = fdt_first_property_offset(fdt, noffset); + poffset >= 0; + poffset = fdt_next_property_offset(fdt, poffset)) { + data = (uint8_t *)fdt_getprop_by_offset(fdt, poffset, &name, &len); + if (!data || strcmp(name, "rng-seed")) + continue; + qemu_guest_getrandom_nofail(data, len); + } + } +} diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c new file mode 100644 index 0000000000..12668555f2 --- /dev/null +++ b/softmmu/dirtylimit.c @@ -0,0 +1,601 @@ +/* + * Dirty page rate limit implementation code + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qapi/qapi-commands-migration.h" +#include "qapi/qmp/qdict.h" +#include "qapi/error.h" +#include "sysemu/dirtyrate.h" +#include "sysemu/dirtylimit.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "exec/memory.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "trace.h" + +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ +/* + * Plus or minus vcpu sleep time linearly if dirty + * page rate error value percentage over + * DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT. + * Otherwise, plus or minus a fixed vcpu sleep time. + */ +#define DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT 50 +/* + * Max vcpu sleep time percentage during a cycle + * composed of dirty ring full and sleep time. + */ +#define DIRTYLIMIT_THROTTLE_PCT_MAX 99 + +struct { + VcpuStat stat; + bool running; + QemuThread thread; +} *vcpu_dirty_rate_stat; + +typedef struct VcpuDirtyLimitState { + int cpu_index; + bool enabled; + /* + * Quota dirty page rate, unit is MB/s + * zero if not enabled. + */ + uint64_t quota; +} VcpuDirtyLimitState; + +struct { + VcpuDirtyLimitState *states; + /* Max cpus number configured by user */ + int max_cpus; + /* Number of vcpu under dirtylimit */ + int limited_nvcpu; +} *dirtylimit_state; + +/* protect dirtylimit_state */ +static QemuMutex dirtylimit_mutex; + +/* dirtylimit thread quit if dirtylimit_quit is true */ +static bool dirtylimit_quit; + +static void vcpu_dirty_rate_stat_collect(void) +{ + VcpuStat stat; + int i = 0; + + /* calculate vcpu dirtyrate */ + vcpu_calculate_dirtyrate(DIRTYLIMIT_CALC_TIME_MS, + &stat, + GLOBAL_DIRTY_LIMIT, + false); + + for (i = 0; i < stat.nvcpu; i++) { + vcpu_dirty_rate_stat->stat.rates[i].id = i; + vcpu_dirty_rate_stat->stat.rates[i].dirty_rate = + stat.rates[i].dirty_rate; + } + + free(stat.rates); +} + +static void *vcpu_dirty_rate_stat_thread(void *opaque) +{ + rcu_register_thread(); + + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, true); + + while (qatomic_read(&vcpu_dirty_rate_stat->running)) { + vcpu_dirty_rate_stat_collect(); + if (dirtylimit_in_service()) { + dirtylimit_process(); + } + } + + /* stop log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, false); + + rcu_unregister_thread(); + return NULL; +} + +int64_t vcpu_dirty_rate_get(int cpu_index) +{ + DirtyRateVcpu *rates = vcpu_dirty_rate_stat->stat.rates; + return qatomic_read_i64(&rates[cpu_index].dirty_rate); +} + +void vcpu_dirty_rate_stat_start(void) +{ + if (qatomic_read(&vcpu_dirty_rate_stat->running)) { + return; + } + + qatomic_set(&vcpu_dirty_rate_stat->running, 1); + qemu_thread_create(&vcpu_dirty_rate_stat->thread, + "dirtyrate-stat", + vcpu_dirty_rate_stat_thread, + NULL, + QEMU_THREAD_JOINABLE); +} + +void vcpu_dirty_rate_stat_stop(void) +{ + qatomic_set(&vcpu_dirty_rate_stat->running, 0); + dirtylimit_state_unlock(); + qemu_mutex_unlock_iothread(); + qemu_thread_join(&vcpu_dirty_rate_stat->thread); + qemu_mutex_lock_iothread(); + dirtylimit_state_lock(); +} + +void vcpu_dirty_rate_stat_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + + vcpu_dirty_rate_stat = + g_malloc0(sizeof(*vcpu_dirty_rate_stat)); + + vcpu_dirty_rate_stat->stat.nvcpu = max_cpus; + vcpu_dirty_rate_stat->stat.rates = + g_new0(DirtyRateVcpu, max_cpus); + + vcpu_dirty_rate_stat->running = false; +} + +void vcpu_dirty_rate_stat_finalize(void) +{ + free(vcpu_dirty_rate_stat->stat.rates); + vcpu_dirty_rate_stat->stat.rates = NULL; + + free(vcpu_dirty_rate_stat); + vcpu_dirty_rate_stat = NULL; +} + +void dirtylimit_state_lock(void) +{ + qemu_mutex_lock(&dirtylimit_mutex); +} + +void dirtylimit_state_unlock(void) +{ + qemu_mutex_unlock(&dirtylimit_mutex); +} + +static void +__attribute__((__constructor__)) dirtylimit_mutex_init(void) +{ + qemu_mutex_init(&dirtylimit_mutex); +} + +static inline VcpuDirtyLimitState *dirtylimit_vcpu_get_state(int cpu_index) +{ + return &dirtylimit_state->states[cpu_index]; +} + +void dirtylimit_state_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + dirtylimit_state = g_malloc0(sizeof(*dirtylimit_state)); + + dirtylimit_state->states = + g_new0(VcpuDirtyLimitState, max_cpus); + + for (i = 0; i < max_cpus; i++) { + dirtylimit_state->states[i].cpu_index = i; + } + + dirtylimit_state->max_cpus = max_cpus; + trace_dirtylimit_state_initialize(max_cpus); +} + +void dirtylimit_state_finalize(void) +{ + free(dirtylimit_state->states); + dirtylimit_state->states = NULL; + + free(dirtylimit_state); + dirtylimit_state = NULL; + + trace_dirtylimit_state_finalize(); +} + +bool dirtylimit_in_service(void) +{ + return !!dirtylimit_state; +} + +bool dirtylimit_vcpu_index_valid(int cpu_index) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + return !(cpu_index < 0 || + cpu_index >= ms->smp.max_cpus); +} + +static inline int64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) +{ + static uint64_t max_dirtyrate; + uint32_t dirty_ring_size = kvm_dirty_ring_size(); + uint64_t dirty_ring_size_meory_MB = + dirty_ring_size * TARGET_PAGE_SIZE >> 20; + + if (max_dirtyrate < dirtyrate) { + max_dirtyrate = dirtyrate; + } + + return dirty_ring_size_meory_MB * 1000000 / max_dirtyrate; +} + +static inline bool dirtylimit_done(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) <= DIRTYLIMIT_TOLERANCE_RANGE) ? true : false; +} + +static inline bool +dirtylimit_need_linear_adjustment(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) * 100 / max) > DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT; +} + +static void dirtylimit_set_throttle(CPUState *cpu, + uint64_t quota, + uint64_t current) +{ + int64_t ring_full_time_us = 0; + uint64_t sleep_pct = 0; + uint64_t throttle_us = 0; + + if (current == 0) { + cpu->throttle_us_per_full = 0; + return; + } + + ring_full_time_us = dirtylimit_dirty_ring_full_time(current); + + if (dirtylimit_need_linear_adjustment(quota, current)) { + if (quota < current) { + sleep_pct = (current - quota) * 100 / current; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full += throttle_us; + } else { + sleep_pct = (quota - current) * 100 / quota; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full -= throttle_us; + } + + trace_dirtylimit_throttle_pct(cpu->cpu_index, + sleep_pct, + throttle_us); + } else { + if (quota < current) { + cpu->throttle_us_per_full += ring_full_time_us / 10; + } else { + cpu->throttle_us_per_full -= ring_full_time_us / 10; + } + } + + /* + * TODO: in the big kvm_dirty_ring_size case (eg: 65536, or other scenario), + * current dirty page rate may never reach the quota, we should stop + * increasing sleep time? + */ + cpu->throttle_us_per_full = MIN(cpu->throttle_us_per_full, + ring_full_time_us * DIRTYLIMIT_THROTTLE_PCT_MAX); + + cpu->throttle_us_per_full = MAX(cpu->throttle_us_per_full, 0); +} + +static void dirtylimit_adjust_throttle(CPUState *cpu) +{ + uint64_t quota = 0; + uint64_t current = 0; + int cpu_index = cpu->cpu_index; + + quota = dirtylimit_vcpu_get_state(cpu_index)->quota; + current = vcpu_dirty_rate_get(cpu_index); + + if (!dirtylimit_done(quota, current)) { + dirtylimit_set_throttle(cpu, quota, current); + } + + return; +} + +void dirtylimit_process(void) +{ + CPUState *cpu; + + if (!qatomic_read(&dirtylimit_quit)) { + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return; + } + + CPU_FOREACH(cpu) { + if (!dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled) { + continue; + } + dirtylimit_adjust_throttle(cpu); + } + dirtylimit_state_unlock(); + } +} + +void dirtylimit_change(bool start) +{ + if (start) { + qatomic_set(&dirtylimit_quit, 0); + } else { + qatomic_set(&dirtylimit_quit, 1); + } +} + +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable) +{ + trace_dirtylimit_set_vcpu(cpu_index, quota); + + if (enable) { + dirtylimit_state->states[cpu_index].quota = quota; + if (!dirtylimit_vcpu_get_state(cpu_index)->enabled) { + dirtylimit_state->limited_nvcpu++; + } + } else { + dirtylimit_state->states[cpu_index].quota = 0; + if (dirtylimit_state->states[cpu_index].enabled) { + dirtylimit_state->limited_nvcpu--; + } + } + + dirtylimit_state->states[cpu_index].enabled = enable; +} + +void dirtylimit_set_all(uint64_t quota, + bool enable) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + for (i = 0; i < max_cpus; i++) { + dirtylimit_set_vcpu(i, quota, enable); + } +} + +void dirtylimit_vcpu_execute(CPUState *cpu) +{ + if (dirtylimit_in_service() && + dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled && + cpu->throttle_us_per_full) { + trace_dirtylimit_vcpu_execute(cpu->cpu_index, + cpu->throttle_us_per_full); + usleep(cpu->throttle_us_per_full); + } +} + +static void dirtylimit_init(void) +{ + dirtylimit_state_initialize(); + dirtylimit_change(true); + vcpu_dirty_rate_stat_initialize(); + vcpu_dirty_rate_stat_start(); +} + +static void dirtylimit_cleanup(void) +{ + vcpu_dirty_rate_stat_stop(); + vcpu_dirty_rate_stat_finalize(); + dirtylimit_change(false); + dirtylimit_state_finalize(); +} + +void qmp_cancel_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirtylimit_in_service()) { + return; + } + + dirtylimit_state_lock(); + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, 0, false); + } else { + dirtylimit_set_all(0, false); + } + + if (!dirtylimit_state->limited_nvcpu) { + dirtylimit_cleanup(); + } + + dirtylimit_state_unlock(); +} + +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + qmp_cancel_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "[Please use 'info vcpu_dirty_limit' to query " + "dirty limit for virtual CPU]\n"); +} + +void qmp_set_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + uint64_t dirty_rate, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + error_setg(errp, "dirty page limit feature requires KVM with" + " accelerator property 'dirty-ring-size' set'"); + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirty_rate) { + qmp_cancel_vcpu_dirty_limit(has_cpu_index, cpu_index, errp); + return; + } + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_init(); + } + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, dirty_rate, true); + } else { + dirtylimit_set_all(dirty_rate, true); + } + + dirtylimit_state_unlock(); +} + +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t dirty_rate = qdict_get_int(qdict, "dirty_rate"); + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + qmp_set_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, dirty_rate, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "[Please use 'info vcpu_dirty_limit' to query " + "dirty limit for virtual CPU]\n"); +} + +static struct DirtyLimitInfo *dirtylimit_query_vcpu(int cpu_index) +{ + DirtyLimitInfo *info = NULL; + + info = g_malloc0(sizeof(*info)); + info->cpu_index = cpu_index; + info->limit_rate = dirtylimit_vcpu_get_state(cpu_index)->quota; + info->current_rate = vcpu_dirty_rate_get(cpu_index); + + return info; +} + +static struct DirtyLimitInfoList *dirtylimit_query_all(void) +{ + int i, index; + DirtyLimitInfo *info = NULL; + DirtyLimitInfoList *head = NULL, **tail = &head; + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return NULL; + } + + for (i = 0; i < dirtylimit_state->max_cpus; i++) { + index = dirtylimit_state->states[i].cpu_index; + if (dirtylimit_vcpu_get_state(index)->enabled) { + info = dirtylimit_query_vcpu(index); + QAPI_LIST_APPEND(tail, info); + } + } + + dirtylimit_state_unlock(); + + return head; +} + +struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error **errp) +{ + if (!dirtylimit_in_service()) { + return NULL; + } + + return dirtylimit_query_all(); +} + +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + DirtyLimitInfoList *limit, *head, *info = NULL; + Error *err = NULL; + + if (!dirtylimit_in_service()) { + monitor_printf(mon, "Dirty page limit not enabled!\n"); + return; + } + + info = qmp_query_vcpu_dirty_limit(&err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + head = info; + for (limit = head; limit != NULL; limit = limit->next) { + monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s)," + " current rate %"PRIi64 " (MB/s)\n", + limit->value->cpu_index, + limit->value->limit_rate, + limit->value->current_rate); + } + + g_free(info); +} diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c index 7d766a5e89..7820fec54c 100644 --- a/softmmu/dma-helpers.c +++ b/softmmu/dma-helpers.c @@ -19,31 +19,17 @@ /* #define DEBUG_IOMMU */ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, - uint8_t c, dma_addr_t len) + uint8_t c, dma_addr_t len, MemTxAttrs attrs) { dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); -#define FILLBUF_SIZE 512 - uint8_t fillbuf[FILLBUF_SIZE]; - int l; - MemTxResult error = MEMTX_OK; - - memset(fillbuf, c, FILLBUF_SIZE); - while (len > 0) { - l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; - error |= address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, - fillbuf, l); - len -= l; - addr += l; - } - - return error; + return address_space_set(as, addr, c, len, attrs); } void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, AddressSpace *as) { - qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry)); + qsg->sg = g_new(ScatterGatherEntry, alloc_hint); qsg->nsg = 0; qsg->nalloc = alloc_hint; qsg->size = 0; @@ -56,7 +42,7 @@ void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) { if (qsg->nsg == qsg->nalloc) { qsg->nalloc = 2 * qsg->nalloc + 1; - qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry)); + qsg->sg = g_renew(ScatterGatherEntry, qsg->sg, qsg->nalloc); } qsg->sg[qsg->nsg].base = base; qsg->sg[qsg->nsg].len = len; @@ -144,7 +130,8 @@ static void dma_blk_cb(void *opaque, int ret) while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; - mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); + mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir, + MEMTXATTRS_UNSPECIFIED); /* * Make reads deterministic in icount mode. Windows sometimes issues * disk read requests with overlapping SGs. It leads @@ -294,35 +281,43 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk, } -static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, - DMADirection dir) +static MemTxResult dma_buf_rw(void *buf, dma_addr_t len, dma_addr_t *residual, + QEMUSGList *sg, DMADirection dir, + MemTxAttrs attrs) { - uint64_t resid; + uint8_t *ptr = buf; + dma_addr_t xresidual; int sg_cur_index; + MemTxResult res = MEMTX_OK; - resid = sg->size; + xresidual = sg->size; sg_cur_index = 0; - len = MIN(len, resid); + len = MIN(len, xresidual); while (len > 0) { ScatterGatherEntry entry = sg->sg[sg_cur_index++]; - int32_t xfer = MIN(len, entry.len); - dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); + dma_addr_t xfer = MIN(len, entry.len); + res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs); ptr += xfer; len -= xfer; - resid -= xfer; + xresidual -= xfer; } - return resid; + if (residual) { + *residual = xresidual; + } + return res; } -uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) +MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual, + QEMUSGList *sg, MemTxAttrs attrs) { - return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); + return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_FROM_DEVICE, attrs); } -uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) +MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual, + QEMUSGList *sg, MemTxAttrs attrs) { - return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); + return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_TO_DEVICE, attrs); } void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, diff --git a/softmmu/globals.c b/softmmu/globals.c index 7d0fc81183..527edbefdd 100644 --- a/softmmu/globals.c +++ b/softmmu/globals.c @@ -25,8 +25,6 @@ #include "qemu/osdep.h" #include "exec/cpu-common.h" #include "hw/display/vga.h" -#include "hw/i386/pc.h" -#include "hw/i386/x86.h" #include "hw/loader.h" #include "hw/xen/xen.h" #include "net/net.h" @@ -42,6 +40,7 @@ int nb_nics; NICInfo nd_table[MAX_NICS]; int autostart = 1; int vga_interface_type = VGA_NONE; +bool vga_interface_created; Chardev *parallel_hds[MAX_PARALLEL_PORTS]; int win2k_install_hack; int singlestep; @@ -51,12 +50,8 @@ QEMUOptionRom option_rom[MAX_OPTION_ROMS]; int nb_option_roms; int old_param; const char *qemu_name; -int alt_grab; -int ctrl_grab; unsigned int nb_prom_envs; const char *prom_envs[MAX_PROM_ENVS]; -int boot_menu; -bool boot_strict; uint8_t *boot_splash_filedata; int only_migratable; /* turn it off unless user states otherwise */ int icount_align_option; diff --git a/softmmu/icount.c b/softmmu/icount.c index 21341a4ce4..a5cef9c60a 100644 --- a/softmmu/icount.c +++ b/softmmu/icount.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "migration/vmstate.h" #include "qapi/error.h" @@ -260,11 +259,16 @@ static void icount_warp_rt(void) warp_delta = clock - timers_state.vm_clock_warp_start; if (icount_enabled() == 2) { /* - * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too - * far ahead of real time. + * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far + * ahead of real time (it might already be ahead so careful not + * to go backwards). */ int64_t cur_icount = icount_get_locked(); int64_t delta = clock - cur_icount; + + if (delta < 0) { + delta = 0; + } warp_delta = MIN(warp_delta, delta); } qatomic_set_i64(&timers_state.qemu_icount_bias, @@ -323,7 +327,7 @@ void icount_start_warp_timer(void) * to vCPU was processed in advance and vCPU went to sleep. * Therefore we have to wake it up for doing someting. */ - if (replay_has_checkpoint()) { + if (replay_has_event()) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } return; @@ -405,6 +409,8 @@ void icount_account_warp_timer(void) return; } + replay_async_events(); + /* warp clock deterministically in record/replay mode */ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { return; @@ -487,3 +493,11 @@ void icount_configure(QemuOpts *opts, Error **errp) qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); } + +void icount_notify_exit(void) +{ + if (icount_enabled() && current_cpu) { + qemu_cpu_kick(current_cpu); + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } +} diff --git a/softmmu/main.c b/softmmu/main.c index b1b57934c9..e84cdc9438 100644 --- a/softmmu/main.c +++ b/softmmu/main.c @@ -23,38 +23,31 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu-main.h" #include "sysemu/sysemu.h" -#ifdef XBOX -#undef main -int qemu_main(int argc, char **argv, char **envp) -#else - +#ifndef XBOX #ifdef CONFIG_SDL -#if defined(__APPLE__) || defined(main) #include -static int qemu_main(int argc, char **argv, char **envp); -int main(int argc, char **argv) -{ - return qemu_main(argc, argv, NULL); -} -#undef main -#define main qemu_main #endif -#endif /* CONFIG_SDL */ +#endif -#ifdef CONFIG_COCOA -#undef main -#define main qemu_main -#endif /* CONFIG_COCOA */ - -int main(int argc, char **argv, char **envp) -#endif // ifdef XBOX +int qemu_default_main(void) { - qemu_init(argc, argv, envp); - qemu_main_loop(); + int status; + + status = qemu_main_loop(); qemu_cleanup(); - return 0; + return status; } + +int (*qemu_main)(void) = qemu_default_main; + +#ifndef XBOX +int main(int argc, char **argv) +{ + qemu_init(argc, argv); + return qemu_main(); +} +#endif \ No newline at end of file diff --git a/softmmu/memory.c b/softmmu/memory.c index f662816c19..7eefde9914 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -33,13 +33,14 @@ #include "qemu/accel.h" #include "hw/boards.h" #include "migration/vmstate.h" +#include "exec/address-spaces.h" //#define DEBUG_UNASSIGNED static unsigned memory_region_transaction_depth; static bool memory_region_update_pending; static bool ioeventfd_update_pending; -bool global_dirty_log; +unsigned int global_dirty_tracking; static QTAILQ_HEAD(, MemoryListener) memory_listeners = QTAILQ_HEAD_INITIALIZER(memory_listeners); @@ -350,7 +351,7 @@ static void flatview_simplify(FlatView *view) static bool memory_region_big_endian(MemoryRegion *mr) { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; #else return mr->ops->endianness == DEVICE_BIG_ENDIAN; @@ -1378,17 +1379,17 @@ bool memory_region_access_valid(MemoryRegion *mr, { if (mr->ops->valid.accepts && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: rejected\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: rejected\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr)); return false; } if (!mr->ops->valid.unaligned && (addr & (size - 1))) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: unaligned\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: unaligned\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr)); return false; } @@ -1400,10 +1401,10 @@ bool memory_region_access_valid(MemoryRegion *mr, if (size > mr->ops->valid.max_access_size || size < mr->ops->valid.min_access_size) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: invalid size " - "(min:%u max:%u)\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: invalid size " + "(min:%u max:%u)\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr), mr->ops->valid.min_access_size, mr->ops->valid.max_access_size); @@ -1444,6 +1445,11 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, unsigned size = memop_size(op); MemTxResult r; + if (mr->alias) { + return memory_region_dispatch_read(mr->alias, + mr->alias_offset + addr, + pval, op, attrs); + } if (!memory_region_access_valid(mr, addr, size, false, attrs)) { *pval = unassigned_mem_read(mr, addr, size); return MEMTX_DECODE_ERROR; @@ -1488,6 +1494,11 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, { unsigned size = memop_size(op); + if (mr->alias) { + return memory_region_dispatch_write(mr->alias, + mr->alias_offset + addr, + data, op, attrs); + } if (!memory_region_access_valid(mr, addr, size, true, attrs)) { unassigned_mem_write(mr, addr, data, size); return MEMTX_DECODE_ERROR; @@ -1821,12 +1832,17 @@ bool memory_region_is_ram_device(MemoryRegion *mr) return mr->ram_device; } +bool memory_region_is_protected(MemoryRegion *mr) +{ + return mr->ram && (mr->ram_block->flags & RAM_PROTECTED); +} + uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) { uint8_t mask = mr->dirty_log_mask; RAMBlock *rb = mr->ram_block; - if (global_dirty_log && ((rb && qemu_ram_is_migratable(rb)) || + if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) || memory_region_is_iommu(mr))) { mask |= (1 << DIRTY_MEMORY_MIGRATION); } @@ -2086,6 +2102,17 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, return rdmc->replay_populated(rdm, section, replay_fn, opaque); } +void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, + void *opaque) +{ + RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); + + g_assert(rdmc->replay_discarded); + rdmc->replay_discarded(rdm, section, replay_fn, opaque); +} + void ram_discard_manager_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *section) @@ -2105,6 +2132,77 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, rdmc->unregister_listener(rdm, rdl); } +/* Called with rcu_read_lock held. */ +bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only, + bool *mr_has_discard_manager) +{ + MemoryRegion *mr; + hwaddr xlat; + hwaddr len = iotlb->addr_mask + 1; + bool writable = iotlb->perm & IOMMU_WO; + + if (mr_has_discard_manager) { + *mr_has_discard_manager = false; + } + /* + * The IOMMU TLB entry we have just covers translation through + * this IOMMU to its immediate target. We need to translate + * it the rest of the way through to memory. + */ + mr = address_space_translate(&address_space_memory, iotlb->translated_addr, + &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED); + if (!memory_region_is_ram(mr)) { + error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat); + return false; + } else if (memory_region_has_ram_discard_manager(mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); + MemoryRegionSection tmp = { + .mr = mr, + .offset_within_region = xlat, + .size = int128_make64(len), + }; + if (mr_has_discard_manager) { + *mr_has_discard_manager = true; + } + /* + * Malicious VMs can map memory into the IOMMU, which is expected + * to remain discarded. vfio will pin all pages, populating memory. + * Disallow that. vmstate priorities make sure any RamDiscardManager + * were already restored before IOMMUs are restored. + */ + if (!ram_discard_manager_is_populated(rdm, &tmp)) { + error_report("iommu map to discarded memory (e.g., unplugged via" + " virtio-mem): %" HWADDR_PRIx "", + iotlb->translated_addr); + return false; + } + } + + /* + * Translation truncates length to the IOMMU page size, + * check that it did not truncate too much. + */ + if (len & iotlb->addr_mask) { + error_report("iommu has granularity incompatible with target AS"); + return false; + } + + if (vaddr) { + *vaddr = memory_region_get_ram_ptr(mr) + xlat; + } + + if (ram_addr) { + *ram_addr = memory_region_get_ram_addr(mr) + xlat; + } + + if (read_only) { + *read_only = !writable || mr->readonly; + } + + return true; +} + void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) { uint8_t mask = 1 << client; @@ -2188,6 +2286,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) } } flatview_unref(view); + trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0); } else if (listener->log_sync_global) { /* * No matter whether MR is specified, what we can do here @@ -2195,6 +2294,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) * sync in a finer granularity. */ listener->log_sync_global(listener); + trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1); } } } @@ -2586,8 +2686,13 @@ static void memory_region_add_subregion_common(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion) { + MemoryRegion *alias; + assert(!subregion->container); subregion->container = mr; + for (alias = subregion->alias; alias; alias = alias->alias) { + alias->mapped_via_alias++; + } subregion->addr = offset; memory_region_update_container_subregions(subregion); } @@ -2612,9 +2717,15 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr, void memory_region_del_subregion(MemoryRegion *mr, MemoryRegion *subregion) { + MemoryRegion *alias; + memory_region_transaction_begin(); assert(subregion->container == mr); subregion->container = NULL; + for (alias = subregion->alias; alias; alias = alias->alias) { + alias->mapped_via_alias--; + assert(alias->mapped_via_alias >= 0); + } QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); memory_region_unref(subregion); memory_region_update_pending |= mr->enabled && subregion->enabled; @@ -2656,8 +2767,7 @@ static void memory_region_readd_subregion(MemoryRegion *mr) memory_region_transaction_begin(); memory_region_ref(mr); memory_region_del_subregion(container, mr); - mr->container = container; - memory_region_update_container_subregions(mr); + memory_region_add_subregion_common(container, mr->addr, mr); memory_region_unref(mr); memory_region_transaction_commit(); } @@ -2711,7 +2821,7 @@ static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) bool memory_region_is_mapped(MemoryRegion *mr) { - return mr->container ? true : false; + return !!mr->container || mr->mapped_via_alias; } /* Same as memory_region_find, but it does not add a reference to the @@ -2820,62 +2930,102 @@ void memory_global_after_dirty_log_sync(void) MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); } +/* + * Dirty track stop flags that are postponed due to VM being stopped. Should + * only be used within vmstate_change hook. + */ +static unsigned int postponed_stop_flags; static VMChangeStateEntry *vmstate_change; +static void memory_global_dirty_log_stop_postponed_run(void); -void memory_global_dirty_log_start(void) +void memory_global_dirty_log_start(unsigned int flags) { + unsigned int old_flags; + + assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); + if (vmstate_change) { - qemu_del_vm_change_state_handler(vmstate_change); - vmstate_change = NULL; + /* If there is postponed stop(), operate on it first */ + postponed_stop_flags &= ~flags; + memory_global_dirty_log_stop_postponed_run(); } - global_dirty_log = true; + flags &= ~global_dirty_tracking; + if (!flags) { + return; + } - MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); + old_flags = global_dirty_tracking; + global_dirty_tracking |= flags; + trace_global_dirty_changed(global_dirty_tracking); - /* Refresh DIRTY_MEMORY_MIGRATION bit. */ - memory_region_transaction_begin(); - memory_region_update_pending = true; - memory_region_transaction_commit(); + if (!old_flags) { + MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); + } } -static void memory_global_dirty_log_do_stop(void) +static void memory_global_dirty_log_do_stop(unsigned int flags) { - global_dirty_log = false; + assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); + assert((global_dirty_tracking & flags) == flags); + global_dirty_tracking &= ~flags; - /* Refresh DIRTY_MEMORY_MIGRATION bit. */ - memory_region_transaction_begin(); - memory_region_update_pending = true; - memory_region_transaction_commit(); + trace_global_dirty_changed(global_dirty_tracking); - MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); + if (!global_dirty_tracking) { + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); + MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); + } +} + +/* + * Execute the postponed dirty log stop operations if there is, then reset + * everything (including the flags and the vmstate change hook). + */ +static void memory_global_dirty_log_stop_postponed_run(void) +{ + /* This must be called with the vmstate handler registered */ + assert(vmstate_change); + + /* Note: postponed_stop_flags can be cleared in log start routine */ + if (postponed_stop_flags) { + memory_global_dirty_log_do_stop(postponed_stop_flags); + postponed_stop_flags = 0; + } + + qemu_del_vm_change_state_handler(vmstate_change); + vmstate_change = NULL; } static void memory_vm_change_state_handler(void *opaque, bool running, RunState state) { if (running) { - memory_global_dirty_log_do_stop(); - - if (vmstate_change) { - qemu_del_vm_change_state_handler(vmstate_change); - vmstate_change = NULL; - } + memory_global_dirty_log_stop_postponed_run(); } } -void memory_global_dirty_log_stop(void) +void memory_global_dirty_log_stop(unsigned int flags) { if (!runstate_is_running()) { + /* Postpone the dirty log stop, e.g., to when VM starts again */ if (vmstate_change) { - return; + /* Batch with previous postponed flags */ + postponed_stop_flags |= flags; + } else { + postponed_stop_flags = flags; + vmstate_change = qemu_add_vm_change_state_handler( + memory_vm_change_state_handler, NULL); } - vmstate_change = qemu_add_vm_change_state_handler( - memory_vm_change_state_handler, NULL); return; } - memory_global_dirty_log_do_stop(); + memory_global_dirty_log_do_stop(flags); } static void listener_add_address_space(MemoryListener *listener, @@ -2887,7 +3037,7 @@ static void listener_add_address_space(MemoryListener *listener, if (listener->begin) { listener->begin(listener); } - if (global_dirty_log) { + if (global_dirty_tracking) { if (listener->log_global_start) { listener->log_global_start(listener); } @@ -3315,58 +3465,115 @@ static gboolean mtree_info_flatview_free(gpointer key, gpointer value, return true; } -void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) +static void mtree_info_flatview(bool dispatch_tree, bool owner) +{ + struct FlatViewInfo fvi = { + .counter = 0, + .dispatch_tree = dispatch_tree, + .owner = owner, + }; + AddressSpace *as; + FlatView *view; + GArray *fv_address_spaces; + GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); + AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + + if (ac->has_memory) { + fvi.ac = ac; + } + + /* Gather all FVs in one table */ + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + view = address_space_get_flatview(as); + + fv_address_spaces = g_hash_table_lookup(views, view); + if (!fv_address_spaces) { + fv_address_spaces = g_array_new(false, false, sizeof(as)); + g_hash_table_insert(views, view, fv_address_spaces); + } + + g_array_append_val(fv_address_spaces, as); + } + + /* Print */ + g_hash_table_foreach(views, mtree_print_flatview, &fvi); + + /* Free */ + g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); + g_hash_table_unref(views); +} + +struct AddressSpaceInfo { + MemoryRegionListHead *ml_head; + bool owner; + bool disabled; +}; + +/* Returns negative value if a < b; zero if a = b; positive value if a > b. */ +static gint address_space_compare_name(gconstpointer a, gconstpointer b) +{ + const AddressSpace *as_a = a; + const AddressSpace *as_b = b; + + return g_strcmp0(as_a->name, as_b->name); +} + +static void mtree_print_as_name(gpointer data, gpointer user_data) +{ + AddressSpace *as = data; + + qemu_printf("address-space: %s\n", as->name); +} + +static void mtree_print_as(gpointer key, gpointer value, gpointer user_data) +{ + MemoryRegion *mr = key; + GSList *as_same_root_mr_list = value; + struct AddressSpaceInfo *asi = user_data; + + g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL); + mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled); + qemu_printf("\n"); +} + +static gboolean mtree_info_as_free(gpointer key, gpointer value, + gpointer user_data) +{ + GSList *as_same_root_mr_list = value; + + g_slist_free(as_same_root_mr_list); + + return true; +} + +static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled) { MemoryRegionListHead ml_head; MemoryRegionList *ml, *ml2; AddressSpace *as; - - if (flatview) { - FlatView *view; - struct FlatViewInfo fvi = { - .counter = 0, - .dispatch_tree = dispatch_tree, - .owner = owner, - }; - GArray *fv_address_spaces; - GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); - - if (ac->has_memory) { - fvi.ac = ac; - } - - /* Gather all FVs in one table */ - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - view = address_space_get_flatview(as); - - fv_address_spaces = g_hash_table_lookup(views, view); - if (!fv_address_spaces) { - fv_address_spaces = g_array_new(false, false, sizeof(as)); - g_hash_table_insert(views, view, fv_address_spaces); - } - - g_array_append_val(fv_address_spaces, as); - } - - /* Print */ - g_hash_table_foreach(views, mtree_print_flatview, &fvi); - - /* Free */ - g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); - g_hash_table_unref(views); - - return; - } + GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); + GSList *as_same_root_mr_list; + struct AddressSpaceInfo asi = { + .ml_head = &ml_head, + .owner = owner, + .disabled = disabled, + }; QTAILQ_INIT(&ml_head); QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - qemu_printf("address-space: %s\n", as->name); - mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled); - qemu_printf("\n"); + /* Create hashtable, key=AS root MR, value = list of AS */ + as_same_root_mr_list = g_hash_table_lookup(views, as->root); + as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as, + address_space_compare_name); + g_hash_table_insert(views, as->root, as_same_root_mr_list); } + /* print address spaces */ + g_hash_table_foreach(views, mtree_print_as, &asi); + g_hash_table_foreach_remove(views, mtree_info_as_free, 0); + g_hash_table_unref(views); + /* print aliased regions */ QTAILQ_FOREACH(ml, &ml_head, mrqueue) { qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); @@ -3379,6 +3586,15 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) } } +void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) +{ + if (flatview) { + mtree_info_flatview(dispatch_tree, owner); + } else { + mtree_info_as(dispatch_tree, owner, disabled); + } +} + void memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, diff --git a/softmmu/memory_mapping.c b/softmmu/memory_mapping.c index e7af276546..f6f0a829fd 100644 --- a/softmmu/memory_mapping.c +++ b/softmmu/memory_mapping.c @@ -17,6 +17,7 @@ #include "sysemu/memory_mapping.h" #include "exec/memory.h" #include "exec/address-spaces.h" +#include "hw/core/cpu.h" //#define DEBUG_GUEST_PHYS_REGION_ADD @@ -41,7 +42,7 @@ static void create_new_memory_mapping(MemoryMappingList *list, { MemoryMapping *memory_mapping; - memory_mapping = g_malloc(sizeof(MemoryMapping)); + memory_mapping = g_new(MemoryMapping, 1); memory_mapping->phys_addr = phys_addr; memory_mapping->virt_addr = virt_addr; memory_mapping->length = length; @@ -193,29 +194,14 @@ typedef struct GuestPhysListener { MemoryListener listener; } GuestPhysListener; -static void guest_phys_blocks_region_add(MemoryListener *listener, +static void guest_phys_block_add_section(GuestPhysListener *g, MemoryRegionSection *section) { - GuestPhysListener *g; - uint64_t section_size; - hwaddr target_start, target_end; - uint8_t *host_addr; - GuestPhysBlock *predecessor; - - /* we only care about RAM */ - if (!memory_region_is_ram(section->mr) || - memory_region_is_ram_device(section->mr) || - memory_region_is_nonvolatile(section->mr)) { - return; - } - - g = container_of(listener, GuestPhysListener, listener); - section_size = int128_get64(section->size); - target_start = section->offset_within_address_space; - target_end = target_start + section_size; - host_addr = memory_region_get_ram_ptr(section->mr) + - section->offset_within_region; - predecessor = NULL; + const hwaddr target_start = section->offset_within_address_space; + const hwaddr target_end = target_start + int128_get64(section->size); + uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + GuestPhysBlock *predecessor = NULL; /* find continuity in guest physical address space */ if (!QTAILQ_EMPTY(&g->list->head)) { @@ -229,7 +215,8 @@ static void guest_phys_blocks_region_add(MemoryListener *listener, /* we want continuity in both guest-physical and host-virtual memory */ if (predecessor->target_end < target_start || - predecessor->host_addr + predecessor_size != host_addr) { + predecessor->host_addr + predecessor_size != host_addr || + predecessor->mr != section->mr) { predecessor = NULL; } } @@ -260,6 +247,40 @@ static void guest_phys_blocks_region_add(MemoryListener *listener, #endif } +static int guest_phys_ram_populate_cb(MemoryRegionSection *section, + void *opaque) +{ + GuestPhysListener *g = opaque; + + guest_phys_block_add_section(g, section); + return 0; +} + +static void guest_phys_blocks_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + GuestPhysListener *g = container_of(listener, GuestPhysListener, listener); + + /* we only care about RAM */ + if (!memory_region_is_ram(section->mr) || + memory_region_is_ram_device(section->mr) || + memory_region_is_nonvolatile(section->mr)) { + return; + } + + /* for special sparse regions, only add populated parts */ + if (memory_region_has_ram_discard_manager(section->mr)) { + RamDiscardManager *rdm; + + rdm = memory_region_get_ram_discard_manager(section->mr); + ram_discard_manager_replay_populated(rdm, section, + guest_phys_ram_populate_cb, g); + return; + } + + guest_phys_block_add_section(g, section); +} + void guest_phys_blocks_append(GuestPhysBlockList *list) { GuestPhysListener g = { 0 }; diff --git a/softmmu/meson.build b/softmmu/meson.build index d8e03018ab..3272af1f31 100644 --- a/softmmu/meson.build +++ b/softmmu/meson.build @@ -1,20 +1,10 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files( 'arch_init.c', - 'balloon.c', - 'cpus.c', - 'cpu-throttle.c', - 'datadir.c', - 'globals.c', - 'physmem.c', 'ioport.c', - 'rtc.c', - 'runstate.c', 'memory.c', - 'memory_mapping.c', + 'physmem.c', 'qtest.c', - 'vl.c', - 'cpu-timers.c', - 'runstate-action.c', + 'dirtylimit.c', )]) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( @@ -22,11 +12,25 @@ specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( )]) softmmu_ss.add(files( + 'balloon.c', 'bootdevice.c', + 'cpus.c', + 'cpu-throttle.c', + 'cpu-timers.c', + 'datadir.c', 'dma-helpers.c', + 'globals.c', + 'memory_mapping.c', 'qdev-monitor.c', + 'rtc.c', + 'runstate-action.c', + 'runstate.c', + 'vl.c', ), sdl, libpmem, libdaxctl) -softmmu_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c')) +if have_tpm + softmmu_ss.add(files('tpm.c')) +endif + softmmu_ss.add(when: seccomp, if_true: files('qemu-seccomp.c')) softmmu_ss.add(when: fdt, if_true: files('device_tree.c')) diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 009f17e4a1..ca2a20d43b 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -18,11 +18,12 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "exec/page-vary.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/cacheflush.h" +#include "qemu/madvise.h" #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" @@ -41,6 +42,8 @@ #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/qemu-print.h" +#include "qemu/log.h" +#include "qemu/memalign.h" #include "exec/memory.h" #include "exec/ioport.h" #include "sysemu/dma.h" @@ -60,7 +63,6 @@ #include "exec/memory-internal.h" #include "exec/ram_addr.h" -#include "exec/log.h" #include "qemu/pmem.h" @@ -667,7 +669,7 @@ void tcg_iommu_init_notifier_list(CPUState *cpu) /* Called from RCU critical section */ MemoryRegionSection * -address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot) { @@ -676,6 +678,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; + hwaddr addr = orig_addr; AddressSpaceDispatch *d = qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); @@ -720,6 +723,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, return section; translate_fail: + /* + * We should be given a page-aligned address -- certainly + * tlb_set_page_with_attrs() does so. The page offset of xlat + * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. + * The page portion of xlat will be logged by memory_region_access_valid() + * when this memory access is rejected, so use the original untranslated + * physical address. + */ + assert((orig_addr & ~TARGET_PAGE_MASK) == 0); + *xlat = orig_addr; return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } @@ -756,6 +769,7 @@ void cpu_address_space_init(CPUState *cpu, int asidx, if (tcg_enabled()) { newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; newas->tcg_as_listener.commit = tcg_commit; + newas->tcg_as_listener.name = "tcg"; memory_listener_register(&newas->tcg_as_listener, as); } } @@ -901,9 +915,9 @@ void mem_access_callback_remove_by_ref(CPUState *cpu, MemAccessCallback *cb) void mem_check_access_callback_vaddr(CPUState *cpu, vaddr addr, vaddr len, int flags, - void *iotlbentry) + void *tlbentryfull) { - ram_addr_t ram_addr = (((CPUIOTLBEntry *)iotlbentry)->addr + ram_addr_t ram_addr = (((CPUTLBEntryFull *)tlbentryfull)->xlat_section & TARGET_PAGE_MASK) + addr; mem_check_access_callback_ramaddr(cpu, ram_addr, len, flags); } @@ -995,7 +1009,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, */ if (!cpu->can_do_io) { /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu); + cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); cpu_loop_exit_restore(cpu, ra); } /* @@ -1012,29 +1026,26 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, } wp->hitaddr = MAX(addr, wp->vaddr); wp->hitattrs = attrs; - if (!cpu->watchpoint_hit) { - if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && - !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { - wp->flags &= ~BP_WATCHPOINT_HIT; - continue; - } - cpu->watchpoint_hit = wp; - mmap_lock(); - tb_check_watchpoint(cpu, ra); - if (wp->flags & BP_STOP_BEFORE_ACCESS) { - cpu->exception_index = EXCP_DEBUG; - mmap_unlock(); - cpu_loop_exit_restore(cpu, ra); - } else { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | curr_cflags(cpu); - mmap_unlock(); - if (ra) { - cpu_restore_state(cpu, ra, true); - } - cpu_loop_exit_noexc(cpu); - } + if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && + !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + wp->flags &= ~BP_WATCHPOINT_HIT; + continue; + } + cpu->watchpoint_hit = wp; + + mmap_lock(); + /* This call also restores vCPU state */ + tb_check_watchpoint(cpu, ra); + if (wp->flags & BP_STOP_BEFORE_ACCESS) { + cpu->exception_index = EXCP_DEBUG; + mmap_unlock(); + cpu_loop_exit(cpu); + } else { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); } } else { wp->flags &= ~BP_WATCHPOINT_HIT; @@ -1382,32 +1393,28 @@ void qemu_mutex_unlock_ramlist(void) qemu_mutex_unlock(&ram_list.mutex); } -void ram_block_dump(Monitor *mon) +GString *ram_block_format(void) { RAMBlock *block; char *psize; + GString *buf = g_string_new(""); RCU_READ_LOCK_GUARD(); - monitor_printf(mon, "%24s %8s %18s %18s %18s\n", - "Block Name", "PSize", "Offset", "Used", "Total"); + g_string_append_printf(buf, "%24s %8s %18s %18s %18s\n", + "Block Name", "PSize", "Offset", "Used", "Total"); RAMBLOCK_FOREACH(block) { psize = size_to_str(block->page_size); - monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 - " 0x%016" PRIx64 "\n", block->idstr, psize, - (uint64_t)block->offset, - (uint64_t)block->used_length, - (uint64_t)block->max_length); + g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 + " 0x%016" PRIx64 "\n", block->idstr, psize, + (uint64_t)block->offset, + (uint64_t)block->used_length, + (uint64_t)block->max_length); g_free(psize); } + + return buf; } -#ifdef __linux__ -/* - * FIXME TOCTTOU: this iterates over memory backends' mem-path, which - * may or may not name the same files / on the same filesystem now as - * when we actually open and map them. Iterate over the file - * descriptors instead, and use qemu_fd_getpagesize(). - */ static int find_min_backend_pagesize(Object *obj, void *opaque) { long *hpsize_min = opaque; @@ -1461,16 +1468,6 @@ long qemu_maxrampagesize(void) object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); return pagesize; } -#else -long qemu_minrampagesize(void) -{ - return qemu_real_host_page_size; -} -long qemu_maxrampagesize(void) -{ - return qemu_real_host_page_size; -} -#endif #ifdef CONFIG_POSIX static int64_t get_file_size(int fd) @@ -1535,6 +1532,9 @@ static int64_t get_file_align(int fd) path = g_strdup_printf("/sys/dev/char/%d:%d", major(st.st_rdev), minor(st.st_rdev)); rpath = realpath(path, NULL); + if (!rpath) { + return -errno; + } rc = daxctl_new(&ctx); if (rc) { @@ -1832,6 +1832,11 @@ void qemu_ram_unset_migratable(RAMBlock *rb) rb->flags &= ~RAM_MIGRATABLE; } +int qemu_ram_get_fd(RAMBlock *rb) +{ + return rb->fd; +} + /* Called with iothread lock held. */ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) { @@ -2136,7 +2141,8 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, int64_t file_size, file_align; /* Just support these ram flags by now. */ - assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE)) == 0); + assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | + RAM_PROTECTED)) == 0); if (xen_enabled()) { error_setg(errp, "-mem-path not supported with Xen"); @@ -2159,7 +2165,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, } file_align = get_file_align(fd); - if (file_align > 0 && mr && file_align > mr->align) { + if (file_align > 0 && file_align > mr->align) { error_setg(errp, "backing store align 0x%" PRIx64 " is larger than 'align' option 0x%" PRIx64, file_align, mr->align); @@ -2240,7 +2246,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, new_block->max_length = max_size; assert(max_size >= size); new_block->fd = -1; - new_block->page_size = qemu_real_host_page_size; + new_block->page_size = qemu_real_host_page_size(); new_block->host = host; new_block->flags = ram_flags; ram_block_add(new_block, &local_err); @@ -2526,6 +2532,18 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr) return block->offset + offset; } +ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) +{ + ram_addr_t ram_addr; + + ram_addr = qemu_ram_addr_from_host(ptr); + if (ram_addr == RAM_ADDR_INVALID) { + error_report("Bad ram pointer %p", ptr); + abort(); + } + return ram_addr; +} + static MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len); static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, @@ -2712,7 +2730,7 @@ static void tcg_log_global_after_sync(MemoryListener *listener) * In record/replay mode this causes a deadlock, because * run_on_cpu waits for rr mutex. Therefore no races are possible * in this case and no need for making run_on_cpu when - * record/replay is not enabled. + * record/replay is enabled. */ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); @@ -2796,7 +2814,7 @@ void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) invalidate_and_set_dirty(mr, addr, size); } -static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) +int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) { unsigned access_size_max = mr->ops->valid.max_access_size; @@ -2823,7 +2841,7 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) return l; } -static bool prepare_mmio_access(MemoryRegion *mr) +bool prepare_mmio_access(MemoryRegion *mr) { bool release_lock = false; @@ -2838,6 +2856,33 @@ static bool prepare_mmio_access(MemoryRegion *mr) return release_lock; } +/** + * flatview_access_allowed + * @mr: #MemoryRegion to be accessed + * @attrs: memory transaction attributes + * @addr: address within that memory region + * @len: the number of bytes to access + * + * Check if a memory transaction is allowed. + * + * Returns: true if transaction is allowed, false if denied. + */ +static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, + hwaddr addr, hwaddr len) +{ + if (likely(!attrs.memory)) { + return true; + } + if (memory_region_is_ram(mr)) { + return true; + } + qemu_log_mask(LOG_GUEST_ERROR, + "Invalid access to non-RAM device at " + "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " + "region '%s'\n", addr, len, memory_region_name(mr)); + return false; +} + /* Called within RCU critical section. */ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, MemTxAttrs attrs, @@ -2858,7 +2903,10 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, #endif for (;;) { - if (!memory_access_is_direct(mr, true)) { + if (!flatview_access_allowed(mr, attrs, addr1, l)) { + result |= MEMTX_ACCESS_ERROR; + /* Keep going. */ + } else if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid @@ -2900,14 +2948,14 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, hwaddr l; hwaddr addr1; MemoryRegion *mr; - MemTxResult result = MEMTX_OK; l = len; mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); - result = flatview_write_continue(fv, addr, attrs, buf, len, - addr1, l, mr); - - return result; + if (!flatview_access_allowed(mr, attrs, addr, len)) { + return MEMTX_ACCESS_ERROR; + } + return flatview_write_continue(fv, addr, attrs, buf, len, + addr1, l, mr); } /* Called within RCU critical section. */ @@ -2930,7 +2978,10 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, fuzz_dma_read_cb(addr, len, mr); for (;;) { - if (!memory_access_is_direct(mr, false)) { + if (!flatview_access_allowed(mr, attrs, addr1, l)) { + result |= MEMTX_ACCESS_ERROR; + /* Keep going. */ + } else if (!memory_access_is_direct(mr, false)) { /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); @@ -2973,6 +3024,9 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, l = len; mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); + if (!flatview_access_allowed(mr, attrs, addr, len)) { + return MEMTX_ACCESS_ERROR; + } return flatview_read_continue(fv, addr, attrs, buf, len, addr1, l, mr); } @@ -3018,6 +3072,25 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, } } +MemTxResult address_space_set(AddressSpace *as, hwaddr addr, + uint8_t c, hwaddr len, MemTxAttrs attrs) +{ +#define FILLBUF_SIZE 512 + uint8_t fillbuf[FILLBUF_SIZE]; + int l; + MemTxResult error = MEMTX_OK; + + memset(fillbuf, c, FILLBUF_SIZE); + while (len > 0) { + l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; + error |= address_space_write(as, addr, attrs, fillbuf, l); + len -= l; + addr += l; + } + + return error; +} + void cpu_physical_memory_rw(hwaddr addr, void *buf, hwaddr len, bool is_write) { @@ -3210,12 +3283,10 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) { FlatView *fv; - bool result; RCU_READ_LOCK_GUARD(); fv = address_space_to_flatview(as); - result = flatview_access_valid(fv, addr, len, is_write, attrs); - return result; + return flatview_access_valid(fv, addr, len, is_write, attrs); } static hwaddr @@ -3507,11 +3578,11 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, #include "memory_ldst.c.inc" /* virtual memory access for debug (includes writing to ROM) */ -int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - void *ptr, target_ulong len, bool is_write) +int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, + void *ptr, size_t len, bool is_write) { hwaddr phys_addr; - target_ulong l, page; + vaddr l, page; uint8_t *buf = ptr; cpu_synchronize_state(cpu); @@ -3737,7 +3808,7 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) " %s%s%s%s%s", i, s->offset_within_address_space, - s->offset_within_address_space + MR_SIZE(s->mr->size), + s->offset_within_address_space + MR_SIZE(s->size), s->mr->name ? s->mr->name : "(noname)", i < ARRAY_SIZE(names) ? names[i] : "", s->mr == root ? " [ROOT]" : "", diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c index 721dec2d82..4b0ef65780 100644 --- a/softmmu/qdev-monitor.c +++ b/softmmu/qdev-monitor.c @@ -28,6 +28,8 @@ #include "qapi/qmp/dispatch.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qobject-input-visitor.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/help_option.h" @@ -40,6 +42,7 @@ #include "qemu/cutils.h" #include "hw/qdev-properties.h" #include "hw/clock.h" +#include "hw/boards.h" /* * Aliases were a bad idea from the start. Let's keep them @@ -52,6 +55,16 @@ typedef struct QDevAlias uint32_t arch_mask; } QDevAlias; +/* default virtio transport per architecture */ +#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | QEMU_ARCH_ARM | \ + QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \ + QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \ + QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \ + QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \ + QEMU_ARCH_LOONGARCH) +#define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X) +#define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K) + /* Please keep this table sorted by typename. */ static const QDevAlias qdev_alias_table[] = { { "AC97", "ac97" }, /* -soundhw name */ @@ -71,6 +84,8 @@ static const QDevAlias qdev_alias_table[] = { { "virtio-gpu-device", "virtio-gpu", QEMU_ARCH_VIRTIO_MMIO }, { "virtio-gpu-ccw", "virtio-gpu", QEMU_ARCH_VIRTIO_CCW }, { "virtio-gpu-pci", "virtio-gpu", QEMU_ARCH_VIRTIO_PCI }, + { "virtio-gpu-gl-device", "virtio-gpu-gl", QEMU_ARCH_VIRTIO_MMIO }, + { "virtio-gpu-gl-pci", "virtio-gpu-gl", QEMU_ARCH_VIRTIO_PCI }, { "virtio-input-host-device", "virtio-input-host", QEMU_ARCH_VIRTIO_MMIO }, { "virtio-input-host-ccw", "virtio-input-host", QEMU_ARCH_VIRTIO_CCW }, { "virtio-input-host-pci", "virtio-input-host", QEMU_ARCH_VIRTIO_PCI }, @@ -153,6 +168,7 @@ static void qdev_print_devinfos(bool show_no_user) [DEVICE_CATEGORY_SOUND] = "Sound", [DEVICE_CATEGORY_MISC] = "Misc", [DEVICE_CATEGORY_CPU] = "CPU", + [DEVICE_CATEGORY_WATCHDOG]= "Watchdog", [DEVICE_CATEGORY_MAX] = "Uncategorized", }; GSList *list, *elt; @@ -185,22 +201,6 @@ static void qdev_print_devinfos(bool show_no_user) g_slist_free(list); } -static int set_property(void *opaque, const char *name, const char *value, - Error **errp) -{ - Object *obj = opaque; - - if (strcmp(name, "driver") == 0) - return 0; - if (strcmp(name, "bus") == 0) - return 0; - - if (!object_property_parse(obj, name, value, errp)) { - return -1; - } - return 0; -} - static const char *find_typename_by_alias(const char *alias) { int i; @@ -259,6 +259,16 @@ static DeviceClass *qdev_get_device_class(const char **driver, Error **errp) return NULL; } + if (object_class_dynamic_cast(oc, TYPE_SYS_BUS_DEVICE)) { + /* sysbus devices need to be allowed by the machine */ + MachineClass *mc = MACHINE_CLASS(object_get_class(qdev_get_machine())); + if (!device_type_is_dynamic_sysbus(mc, *driver)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver", + "a dynamic sysbus device type for the machine"); + return NULL; + } + } + return dc; } @@ -426,7 +436,12 @@ static DeviceState *qbus_find_dev(BusState *bus, char *elem) static inline bool qbus_is_full(BusState *bus) { - BusClass *bus_class = BUS_GET_CLASS(bus); + BusClass *bus_class; + + if (bus->full) { + return true; + } + bus_class = BUS_GET_CLASS(bus); return bus_class->max_dev && bus->num_children >= bus_class->max_dev; } @@ -564,32 +579,49 @@ static BusState *qbus_find(const char *path, Error **errp) return bus; } -void qdev_set_id(DeviceState *dev, const char *id) +/* Takes ownership of @id, will be freed when deleting the device */ +const char *qdev_set_id(DeviceState *dev, char *id, Error **errp) { - if (id) { - dev->id = id; - } + ObjectProperty *prop; - if (dev->id) { - object_property_add_child(qdev_get_peripheral(), dev->id, - OBJECT(dev)); + assert(!dev->id && !dev->realized); + + /* + * object_property_[try_]add_child() below will assert the device + * has no parent + */ + if (id) { + prop = object_property_try_add_child(qdev_get_peripheral(), id, + OBJECT(dev), NULL); + if (prop) { + dev->id = id; + } else { + error_setg(errp, "Duplicate device ID '%s'", id); + g_free(id); + return NULL; + } } else { static int anon_count; gchar *name = g_strdup_printf("device[%d]", anon_count++); - object_property_add_child(qdev_get_peripheral_anon(), name, - OBJECT(dev)); + prop = object_property_add_child(qdev_get_peripheral_anon(), name, + OBJECT(dev)); g_free(name); } + + return prop->name; } -DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) +DeviceState *qdev_device_add_from_qdict(const QDict *opts, + bool from_json, Error **errp) { + ERRP_GUARD(); DeviceClass *dc; const char *driver, *path; + char *id; DeviceState *dev = NULL; BusState *bus = NULL; - driver = qemu_opt_get(opts, "driver"); + driver = qdict_get_try_str(opts, "driver"); if (!driver) { error_setg(errp, QERR_MISSING_PARAMETER, "driver"); return NULL; @@ -602,7 +634,7 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } /* find bus */ - path = qemu_opt_get(opts, "bus"); + path = qdict_get_try_str(opts, "bus"); if (path != NULL) { bus = qbus_find(path, errp); if (!bus) { @@ -622,17 +654,13 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } } - if (qemu_opt_get(opts, "failover_pair_id")) { - if (!opts->id) { - error_setg(errp, "Device with failover_pair_id don't have id"); - return NULL; - } - if (qdev_should_hide_device(opts)) { - if (bus && !qbus_is_hotpluggable(bus)) { - error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name); - } - return NULL; + if (qdev_should_hide_device(opts, from_json, errp)) { + if (bus && !qbus_is_hotpluggable(bus)) { + error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name); } + return NULL; + } else if (*errp) { + return NULL; } if (phase_check(PHASE_MACHINE_READY) && bus && !qbus_is_hotpluggable(bus)) { @@ -662,16 +690,28 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } } - qdev_set_id(dev, qemu_opts_id(opts)); - - /* set properties */ - if (qemu_opt_foreach(opts, set_property, dev, errp)) { + /* + * set dev's parent and register its id. + * If it fails it means the id is already taken. + */ + id = g_strdup(qdict_get_try_str(opts, "id")); + if (!qdev_set_id(dev, id, errp)) { + goto err_del_dev; + } + + /* set properties */ + dev->opts = qdict_clone_shallow(opts); + qdict_del(dev->opts, "driver"); + qdict_del(dev->opts, "bus"); + qdict_del(dev->opts, "id"); + + object_set_properties_from_keyval(&dev->parent_obj, dev->opts, from_json, + errp); + if (*errp) { goto err_del_dev; } - dev->opts = opts; if (!qdev_realize(DEVICE(dev), bus, errp)) { - dev->opts = NULL; goto err_del_dev; } return dev; @@ -684,6 +724,19 @@ err_del_dev: return NULL; } +/* Takes ownership of @opts on success */ +DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) +{ + QDict *qdict = qemu_opts_to_qdict(opts, NULL); + DeviceState *ret; + + ret = qdev_device_add_from_qdict(qdict, false, errp); + if (ret) { + qemu_opts_del(opts); + } + qobject_unref(qdict); + return ret; +} #define qdev_printf(fmt, ...) monitor_printf(mon, "%*s" fmt, indent, "", ## __VA_ARGS__) static void qbus_print(Monitor *mon, BusState *bus, int indent); @@ -821,18 +874,8 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) static DeviceState *find_device_state(const char *id, Error **errp) { - Object *obj; - - if (id[0] == '/') { - obj = object_resolve_path(id, NULL); - } else { - char *root_path = object_get_canonical_path(qdev_get_peripheral()); - char *path = g_strdup_printf("%s/%s", root_path, id); - - g_free(root_path); - obj = object_resolve_path_type(path, TYPE_DEVICE, NULL); - g_free(path); - } + Object *obj = object_resolve_path_at(qdev_get_peripheral(), id); + DeviceState *dev; if (!obj) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, @@ -840,12 +883,13 @@ static DeviceState *find_device_state(const char *id, Error **errp) return NULL; } - if (!object_dynamic_cast(obj, TYPE_DEVICE)) { + dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE); + if (!dev) { error_setg(errp, "%s is not a hotpluggable device", id); return NULL; } - return DEVICE(obj); + return dev; } void qdev_unplug(DeviceState *dev, Error **errp) @@ -855,6 +899,10 @@ void qdev_unplug(DeviceState *dev, Error **errp) HotplugHandlerClass *hdc; Error *local_err = NULL; + if (qdev_unplug_blocked(dev, errp)) { + return; + } + if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) { error_setg(errp, QERR_BUS_NO_HOTPLUG, dev->parent_bus->name); return; @@ -896,7 +944,9 @@ void qmp_device_del(const char *id, Error **errp) { DeviceState *dev = find_device_state(id, errp); if (dev != NULL) { - if (dev->pending_deleted_event) { + if (dev->pending_deleted_event && + (dev->pending_deleted_expires_ms == 0 || + dev->pending_deleted_expires_ms > qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL))) { error_setg(errp, "Device %s is already in the " "process of unplug", id); return; @@ -928,6 +978,8 @@ BlockBackend *blk_by_qdev_id(const char *id, Error **errp) DeviceState *dev; BlockBackend *blk; + GLOBAL_STATE_CODE(); + dev = find_device_state(id, errp); if (dev == NULL) { return NULL; @@ -991,6 +1043,13 @@ int qemu_global_option(const char *str) if (!opts) { return -1; } + if (!qemu_opt_get(opts, "driver") + || !qemu_opt_get(opts, "property") + || !qemu_opt_get(opts, "value")) { + error_report("options 'driver', 'property', and 'value'" + " are required"); + return -1; + } return 0; } diff --git a/softmmu/qemu-seccomp.c b/softmmu/qemu-seccomp.c index f50026778c..d66a2a1226 100644 --- a/softmmu/qemu-seccomp.c +++ b/softmmu/qemu-seccomp.c @@ -38,6 +38,7 @@ struct QemuSeccompSyscall { uint8_t set; uint8_t narg; const struct scmp_arg_cmp *arg_cmp; + uint32_t action; }; const struct scmp_arg_cmp sched_setscheduler_arg[] = { @@ -45,63 +46,223 @@ const struct scmp_arg_cmp sched_setscheduler_arg[] = { { .arg = 1, .op = SCMP_CMP_NE, .datum_a = SCHED_IDLE } }; +/* + * See 'NOTES' in 'man 2 clone' - s390 & cross have 'flags' in + * different position to other architectures + */ +#if defined(HOST_S390X) || defined(HOST_S390) || defined(HOST_CRIS) +#define CLONE_FLAGS_ARG 1 +#else +#define CLONE_FLAGS_ARG 0 +#endif + +#ifndef CLONE_PIDFD +# define CLONE_PIDFD 0x00001000 +#endif + +#define REQUIRE_CLONE_FLAG(flag) \ + const struct scmp_arg_cmp clone_arg ## flag[] = { \ + { .arg = CLONE_FLAGS_ARG, \ + .op = SCMP_CMP_MASKED_EQ, \ + .datum_a = flag, .datum_b = 0 } } + +#define FORBID_CLONE_FLAG(flag) \ + const struct scmp_arg_cmp clone_arg ## flag[] = { \ + { .arg = CLONE_FLAGS_ARG, \ + .op = SCMP_CMP_MASKED_EQ, \ + .datum_a = flag, .datum_b = flag } } + +#define RULE_CLONE_FLAG(flag) \ + { SCMP_SYS(clone), QEMU_SECCOMP_SET_SPAWN, \ + ARRAY_SIZE(clone_arg ## flag), clone_arg ## flag, SCMP_ACT_TRAP } + +/* If no CLONE_* flags are set, except CSIGNAL, deny */ +const struct scmp_arg_cmp clone_arg_none[] = { + { .arg = CLONE_FLAGS_ARG, + .op = SCMP_CMP_MASKED_EQ, + .datum_a = ~(CSIGNAL), .datum_b = 0 } +}; + +/* + * pthread_create should always set all of these. + */ +REQUIRE_CLONE_FLAG(CLONE_VM); +REQUIRE_CLONE_FLAG(CLONE_FS); +REQUIRE_CLONE_FLAG(CLONE_FILES); +REQUIRE_CLONE_FLAG(CLONE_SIGHAND); +REQUIRE_CLONE_FLAG(CLONE_THREAD); +REQUIRE_CLONE_FLAG(CLONE_SYSVSEM); +REQUIRE_CLONE_FLAG(CLONE_SETTLS); +REQUIRE_CLONE_FLAG(CLONE_PARENT_SETTID); +REQUIRE_CLONE_FLAG(CLONE_CHILD_CLEARTID); +/* + * Musl sets this in pthread_create too, but it is + * obsolete and harmless since its behaviour is + * subsumed under CLONE_THREAD + */ +/*REQUIRE_CLONE_FLAG(CLONE_DETACHED);*/ + + +/* + * These all indicate an attempt to spawn a process + * instead of a thread, or other undesirable scenarios + */ +FORBID_CLONE_FLAG(CLONE_PIDFD); +FORBID_CLONE_FLAG(CLONE_PTRACE); +FORBID_CLONE_FLAG(CLONE_VFORK); +FORBID_CLONE_FLAG(CLONE_PARENT); +FORBID_CLONE_FLAG(CLONE_NEWNS); +FORBID_CLONE_FLAG(CLONE_UNTRACED); +FORBID_CLONE_FLAG(CLONE_NEWCGROUP); +FORBID_CLONE_FLAG(CLONE_NEWUTS); +FORBID_CLONE_FLAG(CLONE_NEWIPC); +FORBID_CLONE_FLAG(CLONE_NEWUSER); +FORBID_CLONE_FLAG(CLONE_NEWPID); +FORBID_CLONE_FLAG(CLONE_NEWNET); +FORBID_CLONE_FLAG(CLONE_IO); + + static const struct QemuSeccompSyscall denylist[] = { /* default set of syscalls that should get blocked */ - { SCMP_SYS(reboot), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(swapon), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(swapoff), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(syslog), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(mount), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(umount), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(kexec_load), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(afs_syscall), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(break), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(ftime), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(getpmsg), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(gtty), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(lock), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(mpx), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(prof), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(profil), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(putpmsg), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(security), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(stty), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(tuxcall), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(ulimit), QEMU_SECCOMP_SET_DEFAULT }, - { SCMP_SYS(vserver), QEMU_SECCOMP_SET_DEFAULT }, + { SCMP_SYS(reboot), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(swapon), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(swapoff), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(syslog), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(mount), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(umount), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(kexec_load), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(afs_syscall), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(break), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(ftime), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(getpmsg), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(gtty), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(lock), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(mpx), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(prof), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(profil), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(putpmsg), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(security), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(stty), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(tuxcall), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(ulimit), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(vserver), QEMU_SECCOMP_SET_DEFAULT, + 0, NULL, SCMP_ACT_TRAP }, /* obsolete */ - { SCMP_SYS(readdir), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(_sysctl), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(bdflush), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(create_module), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(get_kernel_syms), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(query_module), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(sgetmask), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(ssetmask), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(sysfs), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(uselib), QEMU_SECCOMP_SET_OBSOLETE }, - { SCMP_SYS(ustat), QEMU_SECCOMP_SET_OBSOLETE }, + { SCMP_SYS(readdir), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(_sysctl), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(bdflush), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(create_module), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(get_kernel_syms), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(query_module), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(sgetmask), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(ssetmask), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(sysfs), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(uselib), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(ustat), QEMU_SECCOMP_SET_OBSOLETE, + 0, NULL, SCMP_ACT_TRAP }, /* privileged */ - { SCMP_SYS(setuid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setgid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setpgid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setsid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setreuid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setregid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setresuid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setresgid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setfsuid), QEMU_SECCOMP_SET_PRIVILEGED }, - { SCMP_SYS(setfsgid), QEMU_SECCOMP_SET_PRIVILEGED }, + { SCMP_SYS(setuid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setgid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setpgid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setsid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setreuid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setregid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setresuid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setresgid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setfsuid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(setfsgid), QEMU_SECCOMP_SET_PRIVILEGED, + 0, NULL, SCMP_ACT_TRAP }, /* spawn */ - { SCMP_SYS(fork), QEMU_SECCOMP_SET_SPAWN }, - { SCMP_SYS(vfork), QEMU_SECCOMP_SET_SPAWN }, - { SCMP_SYS(execve), QEMU_SECCOMP_SET_SPAWN }, + { SCMP_SYS(fork), QEMU_SECCOMP_SET_SPAWN, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(vfork), QEMU_SECCOMP_SET_SPAWN, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(execve), QEMU_SECCOMP_SET_SPAWN, + 0, NULL, SCMP_ACT_TRAP }, + { SCMP_SYS(clone), QEMU_SECCOMP_SET_SPAWN, + ARRAY_SIZE(clone_arg_none), clone_arg_none, SCMP_ACT_TRAP }, + RULE_CLONE_FLAG(CLONE_VM), + RULE_CLONE_FLAG(CLONE_FS), + RULE_CLONE_FLAG(CLONE_FILES), + RULE_CLONE_FLAG(CLONE_SIGHAND), + RULE_CLONE_FLAG(CLONE_THREAD), + RULE_CLONE_FLAG(CLONE_SYSVSEM), + RULE_CLONE_FLAG(CLONE_SETTLS), + RULE_CLONE_FLAG(CLONE_PARENT_SETTID), + RULE_CLONE_FLAG(CLONE_CHILD_CLEARTID), + /*RULE_CLONE_FLAG(CLONE_DETACHED),*/ + RULE_CLONE_FLAG(CLONE_PIDFD), + RULE_CLONE_FLAG(CLONE_PTRACE), + RULE_CLONE_FLAG(CLONE_VFORK), + RULE_CLONE_FLAG(CLONE_PARENT), + RULE_CLONE_FLAG(CLONE_NEWNS), + RULE_CLONE_FLAG(CLONE_UNTRACED), + RULE_CLONE_FLAG(CLONE_NEWCGROUP), + RULE_CLONE_FLAG(CLONE_NEWUTS), + RULE_CLONE_FLAG(CLONE_NEWIPC), + RULE_CLONE_FLAG(CLONE_NEWUSER), + RULE_CLONE_FLAG(CLONE_NEWPID), + RULE_CLONE_FLAG(CLONE_NEWNET), + RULE_CLONE_FLAG(CLONE_IO), +#ifdef __SNR_clone3 + { SCMP_SYS(clone3), QEMU_SECCOMP_SET_SPAWN, + 0, NULL, SCMP_ACT_ERRNO(ENOSYS) }, +#endif +#ifdef __SNR_execveat + { SCMP_SYS(execveat), QEMU_SECCOMP_SET_SPAWN }, +#endif + { SCMP_SYS(setns), QEMU_SECCOMP_SET_SPAWN }, + { SCMP_SYS(unshare), QEMU_SECCOMP_SET_SPAWN }, /* resource control */ - { SCMP_SYS(setpriority), QEMU_SECCOMP_SET_RESOURCECTL }, - { SCMP_SYS(sched_setparam), QEMU_SECCOMP_SET_RESOURCECTL }, + { SCMP_SYS(setpriority), QEMU_SECCOMP_SET_RESOURCECTL, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, + { SCMP_SYS(sched_setparam), QEMU_SECCOMP_SET_RESOURCECTL, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, { SCMP_SYS(sched_setscheduler), QEMU_SECCOMP_SET_RESOURCECTL, - ARRAY_SIZE(sched_setscheduler_arg), sched_setscheduler_arg }, - { SCMP_SYS(sched_setaffinity), QEMU_SECCOMP_SET_RESOURCECTL }, + ARRAY_SIZE(sched_setscheduler_arg), sched_setscheduler_arg, + SCMP_ACT_ERRNO(EPERM) }, + { SCMP_SYS(sched_setaffinity), QEMU_SECCOMP_SET_RESOURCECTL, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, }; static inline __attribute__((unused)) int @@ -115,15 +276,11 @@ qemu_seccomp(unsigned int operation, unsigned int flags, void *args) #endif } -static uint32_t qemu_seccomp_get_action(int set) +static uint32_t qemu_seccomp_update_action(uint32_t action) { - switch (set) { - case QEMU_SECCOMP_SET_DEFAULT: - case QEMU_SECCOMP_SET_OBSOLETE: - case QEMU_SECCOMP_SET_PRIVILEGED: - case QEMU_SECCOMP_SET_SPAWN: { #if defined(SECCOMP_GET_ACTION_AVAIL) && defined(SCMP_ACT_KILL_PROCESS) && \ defined(SECCOMP_RET_KILL_PROCESS) + if (action == SCMP_ACT_TRAP) { static int kill_process = -1; if (kill_process == -1) { uint32_t action = SECCOMP_RET_KILL_PROCESS; @@ -137,16 +294,9 @@ static uint32_t qemu_seccomp_get_action(int set) if (kill_process == 1) { return SCMP_ACT_KILL_PROCESS; } + } #endif - return SCMP_ACT_TRAP; - } - - case QEMU_SECCOMP_SET_RESOURCECTL: - return SCMP_ACT_ERRNO(EPERM); - - default: - g_assert_not_reached(); - } + return action; } @@ -162,6 +312,19 @@ static int seccomp_start(uint32_t seccomp_opts, Error **errp) goto seccomp_return; } +#if defined(CONFIG_SECCOMP_SYSRAWRC) + /* + * This must be the first seccomp_attr_set() call to have full + * error propagation from subsequent seccomp APIs. + */ + rc = seccomp_attr_set(ctx, SCMP_FLTATR_API_SYSRAWRC, 1); + if (rc != 0) { + error_setg_errno(errp, -rc, + "failed to set seccomp rawrc attribute"); + goto seccomp_return; + } +#endif + rc = seccomp_attr_set(ctx, SCMP_FLTATR_CTL_TSYNC, 1); if (rc != 0) { error_setg_errno(errp, -rc, @@ -175,7 +338,7 @@ static int seccomp_start(uint32_t seccomp_opts, Error **errp) continue; } - action = qemu_seccomp_get_action(denylist[i].set); + action = qemu_seccomp_update_action(denylist[i].action); rc = seccomp_rule_add_array(ctx, action, denylist[i].num, denylist[i].narg, denylist[i].arg_cmp); if (rc < 0) { diff --git a/softmmu/qtest.c b/softmmu/qtest.c index 72751e1fd8..d3e0ab4eda 100644 --- a/softmmu/qtest.c +++ b/softmmu/qtest.c @@ -19,6 +19,7 @@ #include "chardev/char-fe.h" #include "exec/ioport.h" #include "exec/memory.h" +#include "hw/qdev-core.h" #include "hw/irq.h" #include "qemu/accel.h" #include "sysemu/cpu-timers.h" @@ -57,12 +58,12 @@ static FILE *qtest_log_fp; static QTest *qtest; static GString *inbuf; static int irq_levels[MAX_IRQ]; -static qemu_timeval start_time; +static GTimer *timer; static bool qtest_opened; static void (*qtest_server_send)(void*, const char*); static void *qtest_server_send_opaque; -#define FMT_timeval "%ld.%06ld" +#define FMT_timeval "%.06f" /** * DOC: QTest Protocol @@ -263,31 +264,16 @@ static int hex2nib(char ch) } } -static void qtest_get_time(qemu_timeval *tv) -{ - qemu_gettimeofday(tv); - tv->tv_sec -= start_time.tv_sec; - tv->tv_usec -= start_time.tv_usec; - if (tv->tv_usec < 0) { - tv->tv_usec += 1000000; - tv->tv_sec -= 1; - } -} - static void qtest_send_prefix(CharBackend *chr) { - qemu_timeval tv; - if (!qtest_log_fp || !qtest_opened) { return; } - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[S +" FMT_timeval "] ", - (long) tv.tv_sec, (long) tv.tv_usec); + fprintf(qtest_log_fp, "[S +" FMT_timeval "] ", g_timer_elapsed(timer, NULL)); } -static void GCC_FMT_ATTR(1, 2) qtest_log_send(const char *fmt, ...) +static void G_GNUC_PRINTF(1, 2) qtest_log_send(const char *fmt, ...) { va_list ap; @@ -317,7 +303,7 @@ static void qtest_send(CharBackend *chr, const char *str) qtest_server_send(qtest_server_send_opaque, str); } -static void GCC_FMT_ATTR(2, 3) qtest_sendf(CharBackend *chr, +static void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...) { va_list ap; @@ -385,12 +371,9 @@ static void qtest_process_command(CharBackend *chr, gchar **words) command = words[0]; if (qtest_log_fp) { - qemu_timeval tv; int i; - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[R +" FMT_timeval "]", - (long) tv.tv_sec, (long) tv.tv_usec); + fprintf(qtest_log_fp, "[R +" FMT_timeval "]", g_timer_elapsed(timer, NULL)); for (i = 0; words[i]; i++) { fprintf(qtest_log_fp, " %s", words[i]); } @@ -731,7 +714,7 @@ static void qtest_process_command(CharBackend *chr, gchar **words) qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "endianness") == 0) { qtest_send_prefix(chr); -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN qtest_sendf(chr, "OK big\n"); #else qtest_sendf(chr, "OK little\n"); @@ -770,12 +753,18 @@ static void qtest_process_command(CharBackend *chr, gchar **words) qtest_sendf(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } else if (strcmp(words[0], "module_load") == 0) { + Error *local_err = NULL; + int rv; g_assert(words[1] && words[2]); qtest_send_prefix(chr); - if (module_load_one(words[1], words[2], false)) { + rv = module_load(words[1], words[2], &local_err); + if (rv > 0) { qtest_sendf(chr, "OK\n"); } else { + if (rv < 0) { + error_report_err(local_err); + } qtest_sendf(chr, "FAIL\n"); } } else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) { @@ -845,21 +834,20 @@ static void qtest_event(void *opaque, QEMUChrEvent event) for (i = 0; i < ARRAY_SIZE(irq_levels); i++) { irq_levels[i] = 0; } - qemu_gettimeofday(&start_time); + + g_clear_pointer(&timer, g_timer_destroy); + timer = g_timer_new(); qtest_opened = true; if (qtest_log_fp) { - fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n", - (long) start_time.tv_sec, (long) start_time.tv_usec); + fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n", g_timer_elapsed(timer, NULL)); } break; case CHR_EVENT_CLOSED: qtest_opened = false; if (qtest_log_fp) { - qemu_timeval tv; - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n", - (long) tv.tv_sec, (long) tv.tv_usec); + fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n", g_timer_elapsed(timer, NULL)); } + g_clear_pointer(&timer, g_timer_destroy); break; default: break; @@ -995,7 +983,7 @@ static void qtest_set_log(Object *obj, const char *value, Error **errp) QTest *q = QTEST(obj); if (qtest == q) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'log' can not be set now"); } else { g_free(q->log); q->log = g_strdup(value); @@ -1015,7 +1003,7 @@ static void qtest_set_chardev(Object *obj, const char *value, Error **errp) Chardev *chr; if (qtest == q) { - error_setg(errp, QERR_PERMISSION_DENIED); + error_setg(errp, "Property 'chardev' can not be set now"); return; } diff --git a/softmmu/rtc.c b/softmmu/rtc.c index 5632684fc9..7e2956f81e 100644 --- a/softmmu/rtc.c +++ b/softmmu/rtc.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" @@ -33,6 +32,7 @@ #include "qom/object.h" #include "sysemu/replay.h" #include "sysemu/sysemu.h" +#include "sysemu/rtc.h" static enum { RTC_BASE_UTC, diff --git a/softmmu/runstate.c b/softmmu/runstate.c index c1f0d7bcc5..b70a741158 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -30,7 +30,6 @@ #include "crypto/cipher.h" #include "crypto/init.h" #include "exec/cpu-common.h" -#include "exec/exec-all.h" #include "exec/gdbstub.h" #include "hw/boards.h" #include "migration/misc.h" @@ -41,9 +40,10 @@ #include "qapi/error.h" #include "qapi/qapi-commands-run-state.h" #include "qapi/qapi-events-run-state.h" -#include "qemu-common.h" #include "qemu/error-report.h" +#include "qemu/log.h" #include "qemu/job.h" +#include "qemu/log.h" #include "qemu/module.h" #include "qemu/plugin.h" #include "qemu/sockets.h" @@ -126,6 +126,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RESTORE_VM, RUN_STATE_PRELAUNCH }, { RUN_STATE_COLO, RUN_STATE_RUNNING }, + { RUN_STATE_COLO, RUN_STATE_PRELAUNCH }, { RUN_STATE_COLO, RUN_STATE_SHUTDOWN}, { RUN_STATE_RUNNING, RUN_STATE_DEBUG }, @@ -440,11 +441,16 @@ void qemu_system_reset(ShutdownCause reason) cpu_synchronize_all_states(); if (mc && mc->reset) { - mc->reset(current_machine); + mc->reset(current_machine, reason); } else { - qemu_devices_reset(); + qemu_devices_reset(reason); } - if (reason && reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) { + switch (reason) { + case SHUTDOWN_CAUSE_NONE: + case SHUTDOWN_CAUSE_SUBSYSTEM_RESET: + case SHUTDOWN_CAUSE_SNAPSHOT_LOAD: + break; + default: qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); } cpu_synchronize_all_post_reset(); @@ -481,7 +487,8 @@ void qemu_system_guest_panicked(GuestPanicInformation *info) qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, !!info, info); vm_stop(RUN_STATE_GUEST_PANICKED); - } else if (panic_action == PANIC_ACTION_SHUTDOWN) { + } else if (panic_action == PANIC_ACTION_SHUTDOWN || + panic_action == PANIC_ACTION_EXIT_FAILURE) { qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_POWEROFF, !!info, info); vm_stop(RUN_STATE_GUEST_PANICKED); @@ -661,7 +668,7 @@ void qemu_system_debug_request(void) qemu_notify_event(); } -static bool main_loop_should_exit(void) +static bool main_loop_should_exit(int *status) { RunState r; ShutdownCause request; @@ -679,6 +686,10 @@ static bool main_loop_should_exit(void) if (shutdown_action == SHUTDOWN_ACTION_PAUSE) { vm_stop(RUN_STATE_SHUTDOWN); } else { + if (request == SHUTDOWN_CAUSE_GUEST_PANIC && + panic_action == PANIC_ACTION_EXIT_FAILURE) { + *status = EXIT_FAILURE; + } return true; } } @@ -714,12 +725,14 @@ static bool main_loop_should_exit(void) return false; } -void qemu_main_loop(void) +int qemu_main_loop(void) { + int status = EXIT_SUCCESS; #ifdef CONFIG_PROFILER int64_t ti; #endif - while (!main_loop_should_exit()) { + + while (!main_loop_should_exit(&status)) { #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif @@ -728,6 +741,8 @@ void qemu_main_loop(void) dev_time += profile_getclock() - ti; #endif } + + return status; } void qemu_add_exit_notifier(Notifier *notify) @@ -747,7 +762,9 @@ static void qemu_run_exit_notifiers(void) void qemu_init_subsystems(void) { +#ifndef XBOX Error *err = NULL; +#endif os_set_line_buffering(); diff --git a/softmmu/trace-events b/softmmu/trace-events index 7b278590a0..22606dc27b 100644 --- a/softmmu/trace-events +++ b/softmmu/trace-events @@ -15,9 +15,11 @@ memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t va memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u" memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u" memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u" +memory_region_sync_dirty(const char *mr, const char *listener, int global) "mr '%s' listener '%s' synced (global=%d)" flatview_new(void *view, void *root) "%p (root %p)" flatview_destroy(void *view, void *root) "%p (root %p)" flatview_destroy_rcu(void *view, void *root) "%p (root %p)" +global_dirty_changed(unsigned int bitmask) "bitmask 0x%"PRIx32 # softmmu.c vm_stop_flush_all(int ret) "ret %d" @@ -29,3 +31,10 @@ runstate_set(int current_state, const char *current_state_str, int new_state, co system_wakeup_request(int reason) "reason=%d" qemu_system_shutdown_request(int reason) "reason=%d" qemu_system_powerdown_request(void) "" + +#dirtylimit.c +dirtylimit_state_initialize(int max_cpus) "dirtylimit state initialize: max cpus %d" +dirtylimit_state_finalize(void) +dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us" +dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64 +dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us" diff --git a/softmmu/vl.c b/softmmu/vl.c index 1ca15676fe..cdc485e3d3 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -23,10 +23,11 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qemu/datadir.h" #include "qemu/units.h" #include "exec/cpu-common.h" +#include "exec/page-vary.h" #include "hw/qdev-properties.h" #include "qapi/compat-policy.h" #include "qapi/error.h" @@ -36,6 +37,7 @@ #include "qemu-version.h" #include "qemu/cutils.h" #include "qemu/help_option.h" +#include "qemu/hw-version.h" #include "qemu/uuid.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" @@ -51,7 +53,6 @@ #include "hw/isa/isa.h" #include "hw/scsi/scsi.h" #include "hw/display/vga.h" -#include "sysemu/watchdog.h" #include "hw/firmware/smbios.h" #include "hw/acpi/acpi.h" #include "hw/xen/xen.h" @@ -114,8 +115,11 @@ #include "crypto/init.h" #include "sysemu/replay.h" #include "qapi/qapi-events-run-state.h" +#include "qapi/qapi-types-audio.h" +#include "qapi/qapi-visit-audio.h" #include "qapi/qapi-visit-block-core.h" #include "qapi/qapi-visit-compat.h" +#include "qapi/qapi-visit-machine.h" #include "qapi/qapi-visit-ui.h" #include "qapi/qapi-commands-block-core.h" #include "qapi/qapi-commands-migration.h" @@ -123,9 +127,11 @@ #include "qapi/qapi-visit-qom.h" #include "qapi/qapi-commands-ui.h" #include "qapi/qmp/qdict.h" +#include "block/qdict.h" #include "qapi/qmp/qerror.h" #include "sysemu/iothread.h" #include "qemu/guest-random.h" +#include "qemu/keyval.h" #include "config-host.h" @@ -150,15 +156,22 @@ typedef struct ObjectOption { QTAILQ_ENTRY(ObjectOption) next; } ObjectOption; +typedef struct DeviceOption { + QDict *opts; + Location loc; + QTAILQ_ENTRY(DeviceOption) next; +} DeviceOption; + static const char *cpu_option; static const char *mem_path; static const char *incoming; static const char *loadvm; static const char *accelerators; +static bool have_custom_ram_size; +static const char *ram_memdev_id; static QDict *machine_opts_dict; static QTAILQ_HEAD(, ObjectOption) object_opts = QTAILQ_HEAD_INITIALIZER(object_opts); -static ram_addr_t maxram_size; -static uint64_t ram_slots; +static QTAILQ_HEAD(, DeviceOption) device_opts = QTAILQ_HEAD_INITIALIZER(device_opts); static int display_remote; static int snapshot; static bool preconfig_requested; @@ -166,7 +179,6 @@ static QemuPluginList plugin_list = QTAILQ_HEAD_INITIALIZER(plugin_list); static BlockdevOptionsQueue bdo_queue = QSIMPLEQ_HEAD_INITIALIZER(bdo_queue); static bool nographic = false; static int mem_prealloc; /* force preallocation of physical target memory */ -static ram_addr_t ram_size; static const char *vga_model = NULL; static DisplayOptions dpy; static int num_serial_hds; @@ -174,7 +186,6 @@ static Chardev **serial_hds; static const char *log_mask; static const char *log_file; static bool list_data_dirs; -static const char *watchdog; static const char *qtest_chrdev; static const char *qtest_log; @@ -500,21 +511,39 @@ const char *qemu_get_vm_name(void) return qemu_name; } -static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp) +static void default_driver_disable(const char *driver) { - const char *driver = qemu_opt_get(opts, "driver"); int i; - if (!driver) - return 0; + if (!driver) { + return; + } + for (i = 0; i < ARRAY_SIZE(default_list); i++) { if (strcmp(default_list[i].driver, driver) != 0) continue; *(default_list[i].flag) = 0; } +} + +static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp) +{ + const char *driver = qemu_opt_get(opts, "driver"); + + default_driver_disable(driver); return 0; } +static void default_driver_check_json(void) +{ + DeviceOption *opt; + + QTAILQ_FOREACH(opt, &device_opts, next) { + const char *driver = qdict_get_try_str(opt->opts, "driver"); + default_driver_disable(driver); + } +} + static int parse_name(void *opaque, QemuOpts *opts, Error **errp) { const char *proc_name; @@ -707,6 +736,9 @@ static QemuOptsList qemu_smp_opts = { }, { .name = "dies", .type = QEMU_OPT_NUMBER, + }, { + .name = "clusters", + .type = QEMU_OPT_NUMBER, }, { .name = "cores", .type = QEMU_OPT_NUMBER, @@ -813,7 +845,7 @@ static void help(int exitcode) version(); printf("usage: %s [options] [disk_image]\n\n" "'disk_image' is a raw hard disk image for IDE hard disk 0\n\n", - error_get_progname()); + g_get_prgname()); #define DEF(option, opt_arg, opt_enum, opt_help, arch_mask) \ if ((arch_mask) & arch_type) \ @@ -908,10 +940,12 @@ static const VGAInterfaceInfo vga_interfaces[VGA_TYPE_MAX] = { .name = "CG3 framebuffer", .class_names = { "cgthree" }, }, +#ifdef CONFIG_XEN_BACKEND [VGA_XENFB] = { .opt_name = "xenfb", .name = "Xen paravirtualized framebuffer", }, +#endif }; static bool vga_interface_available(VGAInterfaceType t) @@ -951,7 +985,8 @@ static void select_vgahw(const MachineClass *machine_class, const char *p) if (vga_interface_available(t) && ti->opt_name) { printf("%-20s %s%s\n", ti->opt_name, ti->name ?: "", - g_str_equal(ti->opt_name, def) ? " (default)" : ""); + (def && g_str_equal(ti->opt_name, def)) ? + " (default)" : ""); } } exit(0); @@ -1017,89 +1052,7 @@ static void parse_display(const char *p) exit(0); } - if (strstart(p, "sdl", &opts)) { - /* - * sdl DisplayType needs hand-crafted parser instead of - * parse_display_qapi() due to some options not in - * DisplayOptions, specifically: - * - ctrl_grab + alt_grab - * Not clear yet what happens to them long-term. Should - * replaced by something better or deprecated and dropped. - */ -#if defined(CONFIG_SDL) - dpy.type = DISPLAY_TYPE_SDL; - while (*opts) { - const char *nextopt; - - if (strstart(opts, ",alt_grab=", &nextopt)) { - opts = nextopt; - if (strstart(opts, "on", &nextopt)) { - alt_grab = 1; - } else if (strstart(opts, "off", &nextopt)) { - alt_grab = 0; - } else { - goto invalid_sdl_args; - } - } else if (strstart(opts, ",ctrl_grab=", &nextopt)) { - opts = nextopt; - if (strstart(opts, "on", &nextopt)) { - ctrl_grab = 1; - } else if (strstart(opts, "off", &nextopt)) { - ctrl_grab = 0; - } else { - goto invalid_sdl_args; - } - } else if (strstart(opts, ",window_close=", &nextopt) || - strstart(opts, ",window-close=", &nextopt)) { - if (strstart(opts, ",window_close=", NULL)) { - warn_report("window_close with an underscore is deprecated," - " please use window-close instead."); - } - opts = nextopt; - dpy.has_window_close = true; - if (strstart(opts, "on", &nextopt)) { - dpy.window_close = true; - } else if (strstart(opts, "off", &nextopt)) { - dpy.window_close = false; - } else { - goto invalid_sdl_args; - } - } else if (strstart(opts, ",show-cursor=", &nextopt)) { - opts = nextopt; - dpy.has_show_cursor = true; - if (strstart(opts, "on", &nextopt)) { - dpy.show_cursor = true; - } else if (strstart(opts, "off", &nextopt)) { - dpy.show_cursor = false; - } else { - goto invalid_sdl_args; - } - } else if (strstart(opts, ",gl=", &nextopt)) { - opts = nextopt; - dpy.has_gl = true; - if (strstart(opts, "on", &nextopt)) { - dpy.gl = DISPLAYGL_MODE_ON; - } else if (strstart(opts, "core", &nextopt)) { - dpy.gl = DISPLAYGL_MODE_CORE; - } else if (strstart(opts, "es", &nextopt)) { - dpy.gl = DISPLAYGL_MODE_ES; - } else if (strstart(opts, "off", &nextopt)) { - dpy.gl = DISPLAYGL_MODE_OFF; - } else { - goto invalid_sdl_args; - } - } else { - invalid_sdl_args: - error_report("invalid SDL option string"); - exit(1); - } - opts = nextopt; - } -#else - error_report("SDL display supported is not available in this binary"); - exit(1); -#endif - } else if (strstart(p, "vnc", &opts)) { + if (strstart(p, "vnc", &opts)) { /* * vnc isn't a (local) DisplayType but a protocol for remote * display access. @@ -1306,6 +1259,7 @@ static void qemu_disable_default_devices(void) { MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); + default_driver_check_json(); qemu_opts_foreach(qemu_find_opts("device"), default_driver_check, NULL, NULL); qemu_opts_foreach(qemu_find_opts("global"), @@ -1313,6 +1267,7 @@ static void qemu_disable_default_devices(void) if (!vga_model && !default_vga) { vga_interface_type = VGA_DEVICE; + vga_interface_created = true; } if (!has_defaults || machine_class->no_serial) { default_serial = 0; @@ -1558,31 +1513,33 @@ machine_merge_property(const char *propname, QDict *prop, Error **errp) static void machine_parse_property_opt(QemuOptsList *opts_list, const char *propname, - const char *arg, Error **errp) + const char *arg) { QDict *prop = NULL; bool help = false; - prop = keyval_parse(arg, opts_list->implied_opt_name, &help, errp); + prop = keyval_parse(arg, opts_list->implied_opt_name, &help, &error_fatal); if (help) { qemu_opts_print_help(opts_list, true); exit(0); } - if (!prop) { - return; - } - machine_merge_property(propname, prop, errp); + machine_merge_property(propname, prop, &error_fatal); qobject_unref(prop); } static const char *pid_file; -static Notifier qemu_unlink_pidfile_notifier; +struct UnlinkPidfileNotifier { + Notifier notifier; + char *pid_file_realpath; +}; +static struct UnlinkPidfileNotifier qemu_unlink_pidfile_notifier; static void qemu_unlink_pidfile(Notifier *n, void *data) { - if (pid_file) { - unlink(pid_file); - } + struct UnlinkPidfileNotifier *upn; + + upn = DO_UPCAST(struct UnlinkPidfileNotifier, notifier, n); + unlink(upn->pid_file_realpath); } static const QEMUOption *lookup_opt(int argc, char **argv, @@ -1700,6 +1657,7 @@ static void keyval_dashify(QDict *qdict, Error **errp) static void qemu_apply_legacy_machine_options(QDict *qdict) { const char *value; + QObject *prop; keyval_dashify(qdict, &error_fatal); @@ -1732,6 +1690,26 @@ static void qemu_apply_legacy_machine_options(QDict *qdict) false); qdict_del(qdict, "kernel-irqchip"); } + + value = qdict_get_try_str(qdict, "memory-backend"); + if (value) { + if (mem_path) { + error_report("'-mem-path' can't be used together with" + "'-machine memory-backend'"); + exit(EXIT_FAILURE); + } + + /* Resolved later. */ + ram_memdev_id = g_strdup(value); + qdict_del(qdict, "memory-backend"); + } + + prop = qdict_get(qdict, "memory"); + if (prop) { + have_custom_ram_size = + qobject_type(prop) == QTYPE_QDICT && + qdict_haskey(qobject_to(QDict, prop), "size"); + } } static void object_option_foreach_add(bool (*type_opt_predicate)(const char *)) @@ -1789,6 +1767,27 @@ static void object_option_parse(const char *optarg) visit_free(v); } +/* + * Very early object creation, before the sandbox options have been activated. + */ +static bool object_create_pre_sandbox(const char *type) +{ + /* + * Objects should in general not get initialized "too early" without + * a reason. If you add one, state the reason in a comment! + */ + + /* + * Reason: -sandbox on,resourcecontrol=deny disallows setting CPU + * affinity of threads. + */ + if (g_str_equal(type, "thread-context")) { + return true; + } + + return false; +} + /* * Initial object creation happens before all other * QEMU data types are created. The majority of objects @@ -1803,6 +1802,11 @@ static bool object_create_early(const char *type) * add one, state the reason in a comment! */ + /* Reason: already created. */ + if (object_create_pre_sandbox(type)) { + return false; + } + /* Reason: property "chardev" */ if (g_str_equal(type, "rng-egd") || g_str_equal(type, "qtest")) { @@ -1848,40 +1852,9 @@ static bool object_create_early(const char *type) static void qemu_apply_machine_options(QDict *qdict) { - MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); - const char *boot_order = NULL; - const char *boot_once = NULL; - QemuOpts *opts; - object_set_properties_from_keyval(OBJECT(current_machine), qdict, false, &error_fatal); - current_machine->ram_size = ram_size; - current_machine->maxram_size = maxram_size; - current_machine->ram_slots = ram_slots; - opts = qemu_opts_find(qemu_find_opts("boot-opts"), NULL); - if (opts) { - boot_order = qemu_opt_get(opts, "order"); - if (boot_order) { - validate_bootdevices(boot_order, &error_fatal); - } - - boot_once = qemu_opt_get(opts, "once"); - if (boot_once) { - validate_bootdevices(boot_once, &error_fatal); - } - - boot_menu = qemu_opt_get_bool(opts, "menu", boot_menu); - boot_strict = qemu_opt_get_bool(opts, "strict", false); - } - - if (!boot_order) { - boot_order = machine_class->default_boot_order; - } - - current_machine->boot_order = boot_order; - current_machine->boot_once = boot_once; - - if (semihosting_enabled() && !semihosting_get_argc()) { + if (semihosting_enabled(false) && !semihosting_get_argc()) { /* fall back to the -kernel/-append */ semihosting_arg_fallback(current_machine->kernel_filename, current_machine->kernel_cmdline); } @@ -1907,12 +1880,8 @@ static void qemu_create_early_backends(void) const bool use_gtk = false; #endif - if ((alt_grab || ctrl_grab) && !use_sdl) { - error_report("-alt-grab and -ctrl-grab are only valid " - "for SDL, ignoring option"); - } if (dpy.has_window_close && !use_gtk && !use_sdl) { - error_report("-no-quit is only valid for GTK and SDL, " + error_report("window-close is only valid for GTK and SDL, " "ignoring option"); } @@ -1931,7 +1900,7 @@ static void qemu_create_early_backends(void) object_option_foreach_add(object_create_early); /* spice needs the timers to be initialized by this point */ - /* spice must initialize before audio as it changes the default auiodev */ + /* spice must initialize before audio as it changes the default audiodev */ /* spice must initialize before chardevs (for spicevmc and spiceport) */ qemu_spice.init(); @@ -1948,7 +1917,9 @@ static void qemu_create_early_backends(void) * setting machine properties, so they can be referred to. */ configure_blockdev(&bdo_queue, machine_class, snapshot); - audio_init_audiodevs(); + if (!audio_init_audiodevs()) { + exit(1); + } } @@ -1958,7 +1929,7 @@ static void qemu_create_early_backends(void) */ static bool object_create_late(const char *type) { - return !object_create_early(type); + return !object_create_early(type) && !object_create_pre_sandbox(type); } static void qemu_create_late_backends(void) @@ -1967,7 +1938,7 @@ static void qemu_create_late_backends(void) qtest_server_init(qtest_chrdev, qtest_log, &error_fatal); } - net_init_clients(&error_fatal); + net_init_clients(); #ifdef XBOX if (g_config.net.enable) { @@ -1992,128 +1963,64 @@ static void qemu_create_late_backends(void) exit(1); /* now chardevs have been created we may have semihosting to connect */ - qemu_semihosting_connect_chardevs(); - qemu_semihosting_console_init(); -} - -static bool have_custom_ram_size(void) -{ - QemuOpts *opts = qemu_find_opts_singleton("memory"); - return !!qemu_opt_get_size(opts, "size", 0); + qemu_semihosting_chardev_init(); } static void qemu_resolve_machine_memdev(void) { - if (current_machine->ram_memdev_id) { + if (ram_memdev_id) { Object *backend; ram_addr_t backend_size; - backend = object_resolve_path_type(current_machine->ram_memdev_id, + backend = object_resolve_path_type(ram_memdev_id, TYPE_MEMORY_BACKEND, NULL); if (!backend) { - error_report("Memory backend '%s' not found", - current_machine->ram_memdev_id); + error_report("Memory backend '%s' not found", ram_memdev_id); exit(EXIT_FAILURE); } - backend_size = object_property_get_uint(backend, "size", &error_abort); - if (have_custom_ram_size() && backend_size != ram_size) { - error_report("Size specified by -m option must match size of " - "explicitly specified 'memory-backend' property"); - exit(EXIT_FAILURE); - } - if (mem_path) { - error_report("'-mem-path' can't be used together with" - "'-machine memory-backend'"); - exit(EXIT_FAILURE); - } - ram_size = backend_size; - } - - if (!xen_enabled()) { - /* On 32-bit hosts, QEMU is limited by virtual address space */ - if (ram_size > (2047 << 20) && HOST_LONG_BITS == 32) { - error_report("at most 2047 MB RAM can be simulated"); - exit(1); + if (!have_custom_ram_size) { + backend_size = object_property_get_uint(backend, "size", &error_abort); + current_machine->ram_size = backend_size; } + object_property_set_link(OBJECT(current_machine), + "memory-backend", backend, &error_fatal); } } -static void set_memory_options(MachineClass *mc) +static void parse_memory_options(void) { - uint64_t sz; - const char *mem_str; - const ram_addr_t default_ram_size = mc->default_ram_size; QemuOpts *opts = qemu_find_opts_singleton("memory"); + QDict *dict, *prop; + const char *mem_str; Location loc; loc_push_none(&loc); qemu_opts_loc_restore(opts); - sz = 0; - mem_str = qemu_opt_get(opts, "size"); - if (mem_str) { - if (!*mem_str) { - error_report("missing 'size' option value"); - exit(EXIT_FAILURE); - } - - sz = qemu_opt_get_size(opts, "size", ram_size); + prop = qdict_new(); + if (qemu_opt_get_size(opts, "size", 0) != 0) { /* Fix up legacy suffix-less format */ + mem_str = qemu_opt_get(opts, "size"); if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { - uint64_t overflow_check = sz; - - sz *= MiB; - if (sz / MiB != overflow_check) { - error_report("too large 'size' option value"); - exit(EXIT_FAILURE); - } + g_autofree char *mib_str = g_strdup_printf("%sM", mem_str); + qdict_put_str(prop, "size", mib_str); + } else { + qdict_put_str(prop, "size", mem_str); } } - /* backward compatibility behaviour for case "-m 0" */ - if (sz == 0) { - sz = default_ram_size; - } - - sz = QEMU_ALIGN_UP(sz, 8192); - if (mc->fixup_ram_size) { - sz = mc->fixup_ram_size(sz); - } - ram_size = sz; - if (ram_size != sz) { - error_report("ram size too large"); - exit(EXIT_FAILURE); - } - - maxram_size = ram_size; - if (qemu_opt_get(opts, "maxmem")) { - uint64_t slots; - - sz = qemu_opt_get_size(opts, "maxmem", 0); - slots = qemu_opt_get_number(opts, "slots", 0); - if (sz < ram_size) { - error_report("invalid value of -m option maxmem: " - "maximum memory size (0x%" PRIx64 ") must be at least " - "the initial memory size (0x" RAM_ADDR_FMT ")", - sz, ram_size); - exit(EXIT_FAILURE); - } else if (slots && sz == ram_size) { - error_report("invalid value of -m option maxmem: " - "memory slots were specified but maximum memory size " - "(0x%" PRIx64 ") is equal to the initial memory size " - "(0x" RAM_ADDR_FMT ")", sz, ram_size); - exit(EXIT_FAILURE); - } - - maxram_size = sz; - ram_slots = slots; - } else if (qemu_opt_get(opts, "slots")) { - error_report("invalid -m option value: missing 'maxmem' option"); - exit(EXIT_FAILURE); + qdict_put_str(prop, "max-size", qemu_opt_get(opts, "maxmem")); + } + if (qemu_opt_get(opts, "slots")) { + qdict_put_str(prop, "slots", qemu_opt_get(opts, "slots")); } + dict = qdict_new(); + qdict_put(dict, "memory", prop); + keyval_merge(machine_opts_dict, dict, &error_fatal); + qobject_unref(dict); loc_pop(&loc); } @@ -2122,8 +2029,6 @@ static void qemu_create_machine(QDict *qdict) MachineClass *machine_class = select_machine(qdict, &error_fatal); object_set_machine_compat_props(machine_class->compat_props); - set_memory_options(machine_class); - current_machine = MACHINE(object_new_with_class(OBJECT_CLASS(machine_class))); object_property_add_child(object_get_root(), "machine", OBJECT(current_machine)); @@ -2182,7 +2087,8 @@ static bool is_qemuopts_group(const char *group) { if (g_str_equal(group, "object") || g_str_equal(group, "machine") || - g_str_equal(group, "smp-opts")) { + g_str_equal(group, "smp-opts") || + g_str_equal(group, "boot-opts")) { return false; } return true; @@ -2204,6 +2110,8 @@ static void qemu_record_config_group(const char *group, QDict *dict, keyval_merge(machine_opts_dict, dict, errp); } else if (g_str_equal(group, "smp-opts")) { machine_merge_property("smp", dict, &error_fatal); + } else if (g_str_equal(group, "boot-opts")) { + machine_merge_property("boot", dict, &error_fatal); } else { abort(); } @@ -2400,8 +2308,7 @@ static void configure_accelerators(const char *progname) } if (init_failed && !qtest_chrdev) { - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); - error_report("falling back to %s", ac->name); + error_report("falling back to %s", current_accel_name()); } if (icount_enabled() && !tcg_enabled()) { @@ -2410,27 +2317,6 @@ static void configure_accelerators(const char *progname) } } -static void create_default_memdev(MachineState *ms, const char *path) -{ - Object *obj; - MachineClass *mc = MACHINE_GET_CLASS(ms); - - obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM); - if (path) { - object_property_set_str(obj, "mem-path", path, &error_fatal); - } - object_property_set_int(obj, "size", ms->ram_size, &error_fatal); - object_property_add_child(object_get_objects_root(), mc->default_ram_id, - obj); - /* Ensure backend's memory region name is equal to mc->default_ram_id */ - object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id", - false, &error_fatal); - user_creatable_complete(USER_CREATABLE(obj), &error_fatal); - object_unref(obj); - object_property_set_str(OBJECT(ms), "memory-backend", mc->default_ram_id, - &error_fatal); -} - /* Return 1 if file fails to open */ static int xemu_check_file(const char *path) { @@ -2514,12 +2400,6 @@ static void qemu_process_sugar_options(void) } object_register_sugar_prop("memory-backend", "prealloc", "on", false); } - - if (watchdog) { - int i = select_watchdog(watchdog); - if (i > 0) - exit (i == 1 ? 1 : 0); - } } /* -action processing */ @@ -2544,6 +2424,11 @@ static int process_runstate_actions(void *opaque, QemuOpts *opts, Error **errp) static void qemu_process_early_options(void) { + qemu_opts_foreach(qemu_find_opts("name"), + parse_name, NULL, &error_fatal); + + object_option_foreach_add(object_create_pre_sandbox); + #ifdef CONFIG_SECCOMP QemuOptsList *olist = qemu_find_opts_err("sandbox", NULL); if (olist) { @@ -2551,9 +2436,6 @@ static void qemu_process_early_options(void) } #endif - qemu_opts_foreach(qemu_find_opts("name"), - parse_name, NULL, &error_fatal); - if (qemu_opts_foreach(qemu_find_opts("action"), process_runstate_actions, NULL, &error_fatal)) { exit(1); @@ -2568,19 +2450,16 @@ static void qemu_process_early_options(void) #endif /* Open the logfile at this point and set the log mask if necessary. */ - if (log_file) { - qemu_set_log_filename(log_file, &error_fatal); - } - if (log_mask) { - int mask; - mask = qemu_str_to_log_mask(log_mask); - if (!mask) { - qemu_print_log_usage(stdout); - exit(1); + { + int mask = 0; + if (log_mask) { + mask = qemu_str_to_log_mask(log_mask); + if (!mask) { + qemu_print_log_usage(stdout); + exit(1); + } } - qemu_set_log(mask); - } else { - qemu_set_log(0); + qemu_set_log_filename_flags(log_file, mask, &error_fatal); } qemu_add_default_firmwarepath(); @@ -2618,13 +2497,30 @@ static void qemu_maybe_daemonize(const char *pid_file) os_daemonize(); rcu_disable_atfork(); - if (pid_file && !qemu_write_pidfile(pid_file, &err)) { - error_reportf_err(err, "cannot create PID file: "); - exit(1); - } + if (pid_file) { + char *pid_file_realpath = NULL; - qemu_unlink_pidfile_notifier.notify = qemu_unlink_pidfile; - qemu_add_exit_notifier(&qemu_unlink_pidfile_notifier); + if (!qemu_write_pidfile(pid_file, &err)) { + error_reportf_err(err, "cannot create PID file: "); + exit(1); + } + + pid_file_realpath = g_malloc0(PATH_MAX); + if (!realpath(pid_file, pid_file_realpath)) { + error_report("cannot resolve PID file path: %s: %s", + pid_file, strerror(errno)); + unlink(pid_file); + exit(1); + } + + qemu_unlink_pidfile_notifier = (struct UnlinkPidfileNotifier) { + .notifier = { + .notify = qemu_unlink_pidfile, + }, + .pid_file_realpath = pid_file_realpath, + }; + qemu_add_exit_notifier(&qemu_unlink_pidfile_notifier.notifier); + } } static void qemu_init_displays(void) @@ -2651,18 +2547,11 @@ static void qemu_init_displays(void) static void qemu_init_board(void) { - MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); - - if (machine_class->default_ram_id && current_machine->ram_size && - numa_uses_legacy_mem() && !current_machine->ram_memdev_id) { - create_default_memdev(current_machine, mem_path); - } - /* process plugin before CPUs are created, but once -smp has been parsed */ qemu_plugin_load_list(&plugin_list, &error_fatal); /* From here on we enter MACHINE_PHASE_INITIALIZED. */ - machine_run_board_init(current_machine); + machine_run_board_init(current_machine, mem_path, &error_fatal); drive_check_orphaned(); @@ -2676,6 +2565,8 @@ static void qemu_init_board(void) static void qemu_create_cli_devices(void) { + DeviceOption *opt; + soundhw_init(); qemu_opts_foreach(qemu_find_opts("fw_cfg"), @@ -2691,6 +2582,20 @@ static void qemu_create_cli_devices(void) rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE); qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, &error_fatal); + QTAILQ_FOREACH(opt, &device_opts, next) { + DeviceState *dev; + loc_push_restore(&opt->loc); + /* + * TODO Eventually we should call qmp_device_add() here to make sure it + * behaves the same, but QMP still has to accept incorrectly typed + * options until libvirt is fixed and we want to be strict on the CLI + * from the start, so call qdev_device_add_from_qdict() directly for + * now. + */ + dev = qdev_device_add_from_qdict(opt->opts, true, &error_fatal); + object_unref(OBJECT(dev)); + loc_pop(&opt->loc); + } rom_reset_order_override(); } @@ -2727,6 +2632,12 @@ static void qemu_machine_creation_done(void) if (foreach_device_config(DEV_GDB, gdbserver_start) < 0) { exit(1); } + if (!vga_interface_created && !default_vga && + vga_interface_type != VGA_NONE) { + warn_report("A -vga option was passed but this machine " + "type does not use that option; " + "No VGA device has been created"); + } } void qmp_x_exit_preconfig(Error **errp) @@ -2741,12 +2652,7 @@ void qmp_x_exit_preconfig(Error **errp) qemu_machine_creation_done(); if (loadvm) { - Error *local_err = NULL; - if (!load_snapshot(loadvm, NULL, false, NULL, &local_err)) { - error_report_err(local_err); - autostart = 0; - exit(1); - } + load_snapshot(loadvm, NULL, false, NULL, &error_fatal); } if (replay_mode != REPLAY_MODE_NONE) { replay_vmstate_init(); @@ -2808,7 +2714,7 @@ static const char *get_eeprom_path(void) return path; } -void qemu_init(int argc, char **argv, char **envp) +void qemu_init(int argc, char **argv) { QemuOpts *opts; QemuOpts *icount_opts = NULL, *accel_opts = NULL; @@ -3030,10 +2936,7 @@ void qemu_init(int argc, char **argv, char **envp) error_init(argv[0]); qemu_init_exec_dir(argv[0]); -#ifdef CONFIG_MODULES - module_init_info(qemu_modinfo); - module_allow_arch(TARGET_NAME); -#endif + qemu_init_arch_modules(); qemu_init_subsystems(); @@ -3105,7 +3008,9 @@ void qemu_init(int argc, char **argv, char **envp) break; } case QEMU_OPTION_drive: - if (drive_def(optarg) == NULL) { + opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), + optarg, false); + if (opts == NULL) { exit(1); } break; @@ -3149,14 +3054,6 @@ void qemu_init(int argc, char **argv, char **envp) nographic = true; dpy.type = DISPLAY_TYPE_NONE; break; - case QEMU_OPTION_curses: -#ifdef CONFIG_CURSES - dpy.type = DISPLAY_TYPE_CURSES; -#else - error_report("curses or iconv support is disabled"); - exit(1); -#endif - break; case QEMU_OPTION_portrait: graphic_rotate = 90; break; @@ -3184,11 +3081,7 @@ void qemu_init(int argc, char **argv, char **envp) drive_add(IF_DEFAULT, 2, optarg, CDROM_OPTS); break; case QEMU_OPTION_boot: - opts = qemu_opts_parse_noisily(qemu_find_opts("boot-opts"), - optarg, true); - if (!opts) { - exit(1); - } + machine_parse_property_opt(qemu_find_opts("boot-opts"), "boot", optarg); break; case QEMU_OPTION_fda: case QEMU_OPTION_fdb: @@ -3200,21 +3093,19 @@ void qemu_init(int argc, char **argv, char **envp) break; case QEMU_OPTION_netdev: default_net = 0; - if (net_client_parse(qemu_find_opts("netdev"), optarg) == -1) { - exit(1); + if (netdev_is_modern(optarg)) { + netdev_parse_modern(optarg); + } else { + net_client_parse(qemu_find_opts("netdev"), optarg); } break; case QEMU_OPTION_nic: default_net = 0; - if (net_client_parse(qemu_find_opts("nic"), optarg) == -1) { - exit(1); - } + net_client_parse(qemu_find_opts("nic"), optarg); break; case QEMU_OPTION_net: default_net = 0; - if (net_client_parse(qemu_find_opts("net"), optarg) == -1) { - exit(1); - } + net_client_parse(qemu_find_opts("net"), optarg); break; #ifdef CONFIG_LIBISCSI case QEMU_OPTION_iscsi: @@ -3232,9 +3123,38 @@ void qemu_init(int argc, char **argv, char **envp) case QEMU_OPTION_audiodev: audio_parse_option(optarg); break; - case QEMU_OPTION_soundhw: - select_soundhw (optarg); + case QEMU_OPTION_audio: { + bool help; + char *model; + Audiodev *dev = NULL; + Visitor *v; + QDict *dict = keyval_parse(optarg, "driver", &help, &error_fatal); + if (help || (qdict_haskey(dict, "driver") && + is_help_option(qdict_get_str(dict, "driver")))) { + audio_help(); + exit(EXIT_SUCCESS); + } + if (!qdict_haskey(dict, "id")) { + qdict_put_str(dict, "id", "audiodev0"); + } + if (!qdict_haskey(dict, "model")) { + error_setg(&error_fatal, "Parameter 'model' is missing"); + } + model = g_strdup(qdict_get_str(dict, "model")); + qdict_del(dict, "model"); + if (is_help_option(model)) { + show_valid_soundhw(); + exit(0); + } + v = qobject_input_visitor_new_keyval(QOBJECT(dict)); + qobject_unref(dict); + visit_type_Audiodev(v, NULL, &dev, &error_fatal); + visit_free(v); + audio_define(dev); + select_soundhw(model, dev->id); + g_free(model); break; + } case QEMU_OPTION_h: help(0); break; @@ -3243,10 +3163,9 @@ void qemu_init(int argc, char **argv, char **envp) exit(0); break; case QEMU_OPTION_m: - opts = qemu_opts_parse_noisily(qemu_find_opts("memory"), - optarg, true); - if (!opts) { - exit(EXIT_FAILURE); + opts = qemu_opts_parse_noisily(qemu_find_opts("memory"), optarg, true); + if (opts == NULL) { + exit(1); } break; #ifdef CONFIG_TPM @@ -3470,25 +3389,18 @@ void qemu_init(int argc, char **argv, char **envp) default_monitor = 0; } break; - case QEMU_OPTION_watchdog: - if (watchdog) { - error_report("only one watchdog option may be given"); - exit(1); - } - watchdog = optarg; - break; case QEMU_OPTION_action: olist = qemu_find_opts("action"); if (!qemu_opts_parse_noisily(olist, optarg, false)) { exit(1); } break; - case QEMU_OPTION_watchdog_action: - if (select_watchdog_action(optarg) == -1) { - error_report("unknown -watchdog-action parameter"); - exit(1); - } + case QEMU_OPTION_watchdog_action: { + QemuOpts *opts; + opts = qemu_opts_create(qemu_find_opts("action"), NULL, 0, &error_abort); + qemu_opt_set(opts, "watchdog", optarg, &error_abort); break; + } case QEMU_OPTION_parallel: add_device_config(DEV_PARALLEL, optarg); default_parallel = 0; @@ -3506,26 +3418,6 @@ void qemu_init(int argc, char **argv, char **envp) dpy.has_full_screen = true; dpy.full_screen = true; break; - case QEMU_OPTION_alt_grab: - alt_grab = 1; - break; - case QEMU_OPTION_ctrl_grab: - ctrl_grab = 1; - break; - case QEMU_OPTION_no_quit: - dpy.has_window_close = true; - dpy.window_close = false; - warn_report("-no-quit is deprecated, please use " - "-display ...,window-close=off instead."); - break; - case QEMU_OPTION_sdl: -#ifdef CONFIG_SDL - dpy.type = DISPLAY_TYPE_SDL; - break; -#else - error_report("SDL support is disabled"); - exit(1); -#endif case QEMU_OPTION_pidfile: pid_file = optarg; break; @@ -3606,13 +3498,23 @@ void qemu_init(int argc, char **argv, char **envp) add_device_config(DEV_USB, optarg); break; case QEMU_OPTION_device: - if (!qemu_opts_parse_noisily(qemu_find_opts("device"), - optarg, true)) { - exit(1); + if (optarg[0] == '{') { + QObject *obj = qobject_from_json(optarg, &error_fatal); + DeviceOption *opt = g_new0(DeviceOption, 1); + opt->opts = qobject_to(QDict, obj); + loc_save(&opt->loc); + assert(opt->opts != NULL); + QTAILQ_INSERT_TAIL(&device_opts, opt, next); + } else { + if (!qemu_opts_parse_noisily(qemu_find_opts("device"), + optarg, true)) { + exit(1); + } } break; case QEMU_OPTION_smp: - machine_parse_property_opt(qemu_find_opts("smp-opts"), "smp", optarg, &error_fatal); + machine_parse_property_opt(qemu_find_opts("smp-opts"), + "smp", optarg); break; case QEMU_OPTION_vnc: vnc_parse(optarg); @@ -3712,21 +3614,21 @@ void qemu_init(int argc, char **argv, char **envp) has_defaults = 0; break; case QEMU_OPTION_xen_domid: - if (!(xen_available())) { + if (!(accel_find("xen"))) { error_report("Option not supported for this target"); exit(1); } xen_domid = atoi(optarg); break; case QEMU_OPTION_xen_attach: - if (!(xen_available())) { + if (!(accel_find("xen"))) { error_report("Option not supported for this target"); exit(1); } xen_mode = XEN_ATTACH; break; case QEMU_OPTION_xen_domid_restrict: - if (!(xen_available())) { + if (!(accel_find("xen"))) { error_report("Option not supported for this target"); exit(1); } @@ -3741,6 +3643,7 @@ void qemu_init(int argc, char **argv, char **envp) case QEMU_OPTION_readconfig: qemu_read_config_file(optarg, qemu_parse_config_group, &error_fatal); break; +#ifdef CONFIG_SPICE case QEMU_OPTION_spice: olist = qemu_find_opts_err("spice", NULL); if (!olist) { @@ -3753,26 +3656,7 @@ void qemu_init(int argc, char **argv, char **envp) } display_remote++; break; - case QEMU_OPTION_writeconfig: - { - FILE *fp; - warn_report("-writeconfig is deprecated and will go away without a replacement"); - if (strcmp(optarg, "-") == 0) { - fp = stdout; - } else { - fp = qemu_fopen(optarg, "w"); - if (fp == NULL) { - error_report("open %s: %s", optarg, - strerror(errno)); - exit(1); - } - } - qemu_config_write(fp); - if (fp != stdout) { - fclose(fp); - } - break; - } +#endif case QEMU_OPTION_qtest: qtest_chrdev = optarg; break; @@ -3907,6 +3791,9 @@ void qemu_init(int argc, char **argv, char **envp) configure_rtc(qemu_find_opts_singleton("rtc")); + /* Transfer QemuOpts options into machine options */ + parse_memory_options(); + qemu_create_machine(machine_opts_dict); suspend_mux_open(); @@ -3945,7 +3832,7 @@ void qemu_init(int argc, char **argv, char **envp) machine_class = MACHINE_GET_CLASS(current_machine); if (!qtest_enabled() && machine_class->deprecation_reason) { - error_report("Machine type '%s' is deprecated: %s", + warn_report("Machine type '%s' is deprecated: %s", machine_class->name, machine_class->deprecation_reason); } @@ -3969,6 +3856,7 @@ void qemu_init(int argc, char **argv, char **envp) if (vmstate_dump_file) { /* dump and exit */ + module_load_qom_all(); dump_vmstate_json_to_file(vmstate_dump_file); exit(0); } diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c index fc8b150629..7718f6dcda 100644 --- a/storage-daemon/qemu-storage-daemon.c +++ b/storage-daemon/qemu-storage-daemon.c @@ -42,8 +42,9 @@ #include "qapi/qmp/qstring.h" #include "qapi/qobject-input-visitor.h" -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "qemu-version.h" +#include "qemu/cutils.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/help_option.h" @@ -60,6 +61,7 @@ #include "trace/control.h" static const char *pid_file; +static char *pid_file_realpath; static volatile bool exit_requested = false; void qemu_system_killed(int signal, pid_t pid) @@ -93,15 +95,43 @@ static void help(void) " --chardev configure a character device backend\n" " (see the qemu(1) man page for possible options)\n" "\n" +" --daemonize daemonize the process, and have the parent exit\n" +" once startup is complete\n" +"\n" " --export [type=]nbd,id=,node-name=[,name=]\n" " [,writable=on|off][,bitmap=]\n" " export the specified block node over NBD\n" " (requires --nbd-server)\n" "\n" +#ifdef CONFIG_FUSE " --export [type=]fuse,id=,node-name=,mountpoint=\n" -" [,growable=on|off][,writable=on|off]\n" +" [,growable=on|off][,writable=on|off][,allow-other=on|off|auto]\n" " export the specified block node over FUSE\n" "\n" +#endif /* CONFIG_FUSE */ +#ifdef CONFIG_VHOST_USER_BLK_SERVER +" --export [type=]vhost-user-blk,id=,node-name=,\n" +" addr.type=unix,addr.path=[,writable=on|off]\n" +" [,logical-block-size=][,num-queues=]\n" +" export the specified block node as a\n" +" vhost-user-blk device over UNIX domain socket\n" +" --export [type=]vhost-user-blk,id=,node-name=,\n" +" addr.type=fd,addr.str=[,writable=on|off]\n" +" [,logical-block-size=][,num-queues=]\n" +" export the specified block node as a\n" +" vhost-user-blk device over file descriptor\n" +"\n" +#endif /* CONFIG_VHOST_USER_BLK_SERVER */ +#ifdef CONFIG_VDUSE_BLK_EXPORT +" --export [type=]vduse-blk,id=,node-name=\n" +" ,name=[,writable=on|off]\n" +" [,num-queues=][,queue-size=]\n" +" [,logical-block-size=]\n" +" [,serial=]\n" +" export the specified block node as a\n" +" vduse-blk device\n" +"\n" +#endif /* CONFIG_VDUSE_BLK_EXPORT */ " --monitor [chardev=]name[,mode=control][,pretty[=on|off]]\n" " configure a QMP monitor\n" "\n" @@ -123,12 +153,13 @@ static void help(void) " --pidfile write process ID to a file after startup\n" "\n" QEMU_HELP_BOTTOM "\n", - error_get_progname()); + g_get_prgname()); } enum { OPTION_BLOCKDEV = 256, OPTION_CHARDEV, + OPTION_DAEMONIZE, OPTION_EXPORT, OPTION_MONITOR, OPTION_NBD_SERVER, @@ -144,7 +175,8 @@ static void init_qmp_commands(void) QTAILQ_INIT(&qmp_cap_negotiation_commands); qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities", - qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG); + qmp_marshal_qmp_capabilities, + QCO_ALLOW_PRECONFIG, 0); } static int getopt_set_loc(int argc, char **argv, const char *optstring, @@ -161,13 +193,30 @@ static int getopt_set_loc(int argc, char **argv, const char *optstring, return c; } -static void process_options(int argc, char *argv[]) +/** + * Process QSD command-line arguments. + * + * This is done in two passes: + * + * First (@pre_init_pass is true), we do a pass where all global + * arguments pertaining to the QSD process (like --help or --daemonize) + * are processed. This pass is done before most of the QEMU-specific + * initialization steps (e.g. initializing the block layer or QMP), and + * so must only process arguments that are not really QEMU-specific. + * + * Second (@pre_init_pass is false), we (sequentially) process all + * QEMU/QSD-specific arguments. Many of these arguments are effectively + * translated to QMP commands (like --blockdev for blockdev-add, or + * --export for block-export-add). + */ +static void process_options(int argc, char *argv[], bool pre_init_pass) { int c; static const struct option long_options[] = { {"blockdev", required_argument, NULL, OPTION_BLOCKDEV}, {"chardev", required_argument, NULL, OPTION_CHARDEV}, + {"daemonize", no_argument, NULL, OPTION_DAEMONIZE}, {"export", required_argument, NULL, OPTION_EXPORT}, {"help", no_argument, NULL, 'h'}, {"monitor", required_argument, NULL, OPTION_MONITOR}, @@ -180,11 +229,27 @@ static void process_options(int argc, char *argv[]) }; /* - * In contrast to the system emulator, options are processed in the order - * they are given on the command lines. This means that things must be - * defined first before they can be referenced in another option. + * In contrast to the system emulator, QEMU-specific options are processed + * in the order they are given on the command lines. This means that things + * must be defined first before they can be referenced in another option. */ + optind = 1; while ((c = getopt_set_loc(argc, argv, "-hT:V", long_options)) != -1) { + bool handle_option_pre_init; + + /* Should this argument be processed in the pre-init pass? */ + handle_option_pre_init = + c == '?' || + c == 'h' || + c == 'V' || + c == OPTION_DAEMONIZE || + c == OPTION_PIDFILE; + + /* Process every option only in its respective pass */ + if (pre_init_pass != handle_option_pre_init) { + continue; + } + switch (c) { case '?': exit(EXIT_FAILURE); @@ -230,6 +295,16 @@ static void process_options(int argc, char *argv[]) qemu_opts_del(opts); break; } + case OPTION_DAEMONIZE: + if (os_set_daemonize(true) < 0) { + /* + * --daemonize is parsed before monitor_init_globals_core(), so + * error_report() does not work yet + */ + fprintf(stderr, "--daemonize not supported in this build\n"); + exit(EXIT_FAILURE); + } + break; case OPTION_EXPORT: { Visitor *v; @@ -289,7 +364,7 @@ static void process_options(int argc, char *argv[]) static void pid_file_cleanup(void) { - unlink(pid_file); + unlink(pid_file_realpath); } static void pid_file_init(void) @@ -305,6 +380,14 @@ static void pid_file_init(void) exit(EXIT_FAILURE); } + pid_file_realpath = g_malloc(PATH_MAX); + if (!realpath(pid_file, pid_file_realpath)) { + error_report("cannot resolve PID file path: %s: %s", + pid_file, strerror(errno)); + unlink(pid_file); + exit(EXIT_FAILURE); + } + atexit(pid_file_cleanup); } @@ -318,6 +401,10 @@ int main(int argc, char *argv[]) qemu_init_exec_dir(argv[0]); os_setup_signal_handling(); + process_options(argc, argv, true); + + os_daemonize(); + module_call_init(MODULE_INIT_QOM); module_call_init(MODULE_INIT_TRACE); qemu_add_opts(&qemu_trace_opts); @@ -329,10 +416,10 @@ int main(int argc, char *argv[]) if (!trace_init_backends()) { return EXIT_FAILURE; } - qemu_set_log(LOG_TRACE); + qemu_set_log(LOG_TRACE, &error_fatal); qemu_init_main_loop(&error_fatal); - process_options(argc, argv); + process_options(argc, argv, false); /* * Write the pid file after creating chardevs, exports, and NBD servers but @@ -340,6 +427,7 @@ int main(int argc, char *argv[]) * it. */ pid_file_init(); + os_setup_post(); while (!exit_requested) { main_loop_wait(false); diff --git a/stubs/arch_type.c b/stubs/arch_type.c deleted file mode 100644 index fc5423bc98..0000000000 --- a/stubs/arch_type.c +++ /dev/null @@ -1,4 +0,0 @@ -#include "qemu/osdep.h" -#include "sysemu/arch_init.h" - -const uint32_t arch_type = QEMU_ARCH_NONE; diff --git a/stubs/error-printf.c b/stubs/error-printf.c index a2f61521a1..0e326d8010 100644 --- a/stubs/error-printf.c +++ b/stubs/error-printf.c @@ -1,5 +1,6 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "monitor/monitor.h" int error_vprintf(const char *fmt, va_list ap) { diff --git a/stubs/get-vm-name.c b/stubs/get-vm-name.c index fa990136b0..0906303f73 100644 --- a/stubs/get-vm-name.c +++ b/stubs/get-vm-name.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "sysemu/sysemu.h" const char *qemu_get_vm_name(void) { diff --git a/stubs/icount.c b/stubs/icount.c index f13c43568b..6df8c2bf7d 100644 --- a/stubs/icount.c +++ b/stubs/icount.c @@ -43,3 +43,7 @@ void icount_account_warp_timer(void) { abort(); } + +void icount_notify_exit(void) +{ +} diff --git a/stubs/iothread-lock-block.c b/stubs/iothread-lock-block.c new file mode 100644 index 0000000000..c88ed70462 --- /dev/null +++ b/stubs/iothread-lock-block.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "qemu/main-loop.h" + +bool qemu_in_main_thread(void) +{ + return qemu_get_current_aio_context() == qemu_get_aio_context(); +} + diff --git a/stubs/meson.build b/stubs/meson.build index d3fa8646b3..c96a74f095 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -1,4 +1,3 @@ -stub_ss.add(files('arch_type.c')) stub_ss.add(files('bdrv-next-monitor-owned.c')) stub_ss.add(files('blk-commit-all.c')) stub_ss.add(files('blk-exp-close-all.c')) @@ -12,25 +11,29 @@ stub_ss.add(files('icount.c')) stub_ss.add(files('dump.c')) stub_ss.add(files('error-printf.c')) stub_ss.add(files('fdset.c')) -stub_ss.add(files('fw_cfg.c')) stub_ss.add(files('gdbstub.c')) stub_ss.add(files('get-vm-name.c')) if linux_io_uring.found() stub_ss.add(files('io_uring.c')) endif stub_ss.add(files('iothread-lock.c')) +if have_block + stub_ss.add(files('iothread-lock-block.c')) +endif stub_ss.add(files('isa-bus.c')) stub_ss.add(files('is-daemonized.c')) -stub_ss.add(when: 'CONFIG_LINUX_AIO', if_true: files('linux-aio.c')) +if libaio.found() + stub_ss.add(files('linux-aio.c')) +endif stub_ss.add(files('migr-blocker.c')) stub_ss.add(files('module-opts.c')) stub_ss.add(files('monitor.c')) stub_ss.add(files('monitor-core.c')) -stub_ss.add(files('pci-bus.c')) -stub_ss.add(files('pci-host-piix.c')) +stub_ss.add(files('physmem.c')) stub_ss.add(files('qemu-timer-notify-cb.c')) stub_ss.add(files('qmp_memory_device.c')) stub_ss.add(files('qmp-command-available.c')) +stub_ss.add(files('qmp-quit.c')) stub_ss.add(files('qtest.c')) stub_ss.add(files('ram-block.c')) stub_ss.add(files('ramfb.c')) @@ -46,14 +49,16 @@ stub_ss.add(files('vmstate.c')) stub_ss.add(files('vm-stop.c')) stub_ss.add(files('win32-kbd-hook.c')) stub_ss.add(files('cpu-synchronize-state.c')) -if have_block +if have_block or have_ga stub_ss.add(files('replay-tools.c')) endif if have_system + stub_ss.add(files('fw_cfg.c')) + stub_ss.add(files('pci-bus.c')) stub_ss.add(files('semihost.c')) stub_ss.add(files('usb-dev-stub.c')) stub_ss.add(files('xen-hw-stub.c')) - stub_ss.add(files('virtio-gpu-udmabuf.c')) else stub_ss.add(files('qdev.c')) endif +stub_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_false: files('vfio-user-obj.c')) diff --git a/stubs/monitor-core.c b/stubs/monitor-core.c index d058a2a00d..afa477aae6 100644 --- a/stubs/monitor-core.c +++ b/stubs/monitor-core.c @@ -1,6 +1,5 @@ #include "qemu/osdep.h" #include "monitor/monitor.h" -#include "qemu-common.h" #include "qapi/qapi-emit-events.h" Monitor *monitor_cur(void) diff --git a/stubs/pci-host-piix.c b/stubs/pci-host-piix.c deleted file mode 100644 index 93975adbfe..0000000000 --- a/stubs/pci-host-piix.c +++ /dev/null @@ -1,7 +0,0 @@ -#include "qemu/osdep.h" -#include "hw/pci-host/i440fx.h" - -PCIBus *find_i440fx(void) -{ - return NULL; -} diff --git a/stubs/physmem.c b/stubs/physmem.c new file mode 100644 index 0000000000..1fc5f2df29 --- /dev/null +++ b/stubs/physmem.c @@ -0,0 +1,13 @@ +#include "qemu/osdep.h" +#include "exec/cpu-common.h" + +RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, + ram_addr_t *offset) +{ + return NULL; +} + +int qemu_ram_get_fd(RAMBlock *rb) +{ + return -1; +} diff --git a/stubs/qdev.c b/stubs/qdev.c index 92e6143134..187659f707 100644 --- a/stubs/qdev.c +++ b/stubs/qdev.c @@ -21,3 +21,10 @@ void qapi_event_send_device_deleted(bool has_device, { /* Nothing to do. */ } + +void qapi_event_send_device_unplug_guest_error(bool has_device, + const char *device, + const char *path) +{ + /* Nothing to do. */ +} diff --git a/stubs/qmp-quit.c b/stubs/qmp-quit.c new file mode 100644 index 0000000000..a3ff47f7bd --- /dev/null +++ b/stubs/qmp-quit.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "qapi/qapi-commands-control.h" +#include "qapi/qmp/dispatch.h" + +void qmp_quit(Error **errp) +{ + g_assert_not_reached(); +} diff --git a/stubs/replay-tools.c b/stubs/replay-tools.c index 43296b3d4e..3e8ca3212d 100644 --- a/stubs/replay-tools.c +++ b/stubs/replay-tools.c @@ -7,13 +7,14 @@ bool replay_events_enabled(void) return false; } -int64_t replay_save_clock(unsigned int kind, int64_t clock, int64_t raw_icount) +int64_t replay_save_clock(ReplayClockKind kind, + int64_t clock, int64_t raw_icount) { abort(); return 0; } -int64_t replay_read_clock(unsigned int kind, int64_t raw_icount) +int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount) { abort(); return 0; @@ -48,11 +49,11 @@ void replay_mutex_unlock(void) { } -void replay_register_char_driver(Chardev *chr) +void replay_register_char_driver(struct Chardev *chr) { } -void replay_chr_be_write(Chardev *s, uint8_t *buf, int len) +void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len) { abort(); } diff --git a/stubs/semihost.c b/stubs/semihost.c index 4bf2cf71b9..d65c9fd5dc 100644 --- a/stubs/semihost.c +++ b/stubs/semihost.c @@ -23,7 +23,7 @@ QemuOptsList qemu_semihosting_config_opts = { }; /* Queries to config status default to off */ -bool semihosting_enabled(void) +bool semihosting_enabled(bool is_user) { return false; } @@ -65,10 +65,6 @@ void semihosting_arg_fallback(const char *file, const char *cmd) { } -void qemu_semihosting_connect_chardevs(void) -{ -} - -void qemu_semihosting_console_init(void) +void qemu_semihosting_chardev_init(void) { } diff --git a/stubs/usb-dev-stub.c b/stubs/usb-dev-stub.c index b1adeeb454..aa557692b7 100644 --- a/stubs/usb-dev-stub.c +++ b/stubs/usb-dev-stub.c @@ -8,6 +8,8 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" #include "sysemu/sysemu.h" #include "monitor/monitor.h" #include "hw/usb.h" @@ -19,6 +21,12 @@ USBDevice *usbdevice_create(const char *driver) return NULL; } +HumanReadableText *qmp_x_query_usb(Error **errp) +{ + error_setg(errp, "Support for USB devices not built-in"); + return NULL; +} + void hmp_info_usb(Monitor *mon, const QDict *qdict) { monitor_printf(mon, "Support for USB devices not built-in\n"); diff --git a/stubs/vfio-user-obj.c b/stubs/vfio-user-obj.c new file mode 100644 index 0000000000..79100d768e --- /dev/null +++ b/stubs/vfio-user-obj.c @@ -0,0 +1,6 @@ +#include "qemu/osdep.h" +#include "hw/remote/vfio-user-obj.h" + +void vfu_object_set_bus_irq(PCIBus *pci_bus) +{ +} diff --git a/stubs/xen-hw-stub.c b/stubs/xen-hw-stub.c index 15f3921a76..34a22f2ad7 100644 --- a/stubs/xen-hw-stub.c +++ b/stubs/xen-hw-stub.c @@ -19,8 +19,9 @@ void xen_piix3_set_irq(void *opaque, int irq_num, int level) { } -void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +int xen_set_pci_link_route(uint8_t link, uint8_t irq) { + return -1; } void xen_hvm_inject_msi(uint64_t addr, uint32_t data) diff --git a/subprojects/libvduse/include/atomic.h b/subprojects/libvduse/include/atomic.h new file mode 120000 index 0000000000..8c2be64f7b --- /dev/null +++ b/subprojects/libvduse/include/atomic.h @@ -0,0 +1 @@ +../../../include/qemu/atomic.h \ No newline at end of file diff --git a/subprojects/libvduse/include/compiler.h b/subprojects/libvduse/include/compiler.h new file mode 120000 index 0000000000..de7b70697c --- /dev/null +++ b/subprojects/libvduse/include/compiler.h @@ -0,0 +1 @@ +../../../include/qemu/compiler.h \ No newline at end of file diff --git a/subprojects/libvduse/libvduse.c b/subprojects/libvduse/libvduse.c new file mode 100644 index 0000000000..e089d4d546 --- /dev/null +++ b/subprojects/libvduse/libvduse.c @@ -0,0 +1,1376 @@ +/* + * VDUSE (vDPA Device in Userspace) library + * + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * Portions of codes and concepts borrowed from libvhost-user.c, so: + * Copyright IBM, Corp. 2007 + * Copyright (c) 2016 Red Hat, Inc. + * + * Author: + * Xie Yongji + * Anthony Liguori + * Marc-André Lureau + * Victor Kaplansky + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "include/atomic.h" +#include "linux-headers/linux/virtio_ring.h" +#include "linux-headers/linux/virtio_config.h" +#include "linux-headers/linux/vduse.h" +#include "libvduse.h" + +#define VDUSE_VQ_ALIGN 4096 +#define MAX_IOVA_REGIONS 256 + +#define LOG_ALIGNMENT 64 + +/* Round number down to multiple */ +#define ALIGN_DOWN(n, m) ((n) / (m) * (m)) + +/* Round number up to multiple */ +#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) + +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +typedef struct VduseDescStateSplit { + uint8_t inflight; + uint8_t padding[5]; + uint16_t next; + uint64_t counter; +} VduseDescStateSplit; + +typedef struct VduseVirtqLogInflight { + uint64_t features; + uint16_t version; + uint16_t desc_num; + uint16_t last_batch_head; + uint16_t used_idx; + VduseDescStateSplit desc[]; +} VduseVirtqLogInflight; + +typedef struct VduseVirtqLog { + VduseVirtqLogInflight inflight; +} VduseVirtqLog; + +typedef struct VduseVirtqInflightDesc { + uint16_t index; + uint64_t counter; +} VduseVirtqInflightDesc; + +typedef struct VduseRing { + unsigned int num; + uint64_t desc_addr; + uint64_t avail_addr; + uint64_t used_addr; + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +} VduseRing; + +struct VduseVirtq { + VduseRing vring; + uint16_t last_avail_idx; + uint16_t shadow_avail_idx; + uint16_t used_idx; + uint16_t signalled_used; + bool signalled_used_valid; + int index; + int inuse; + bool ready; + int fd; + VduseDev *dev; + VduseVirtqInflightDesc *resubmit_list; + uint16_t resubmit_num; + uint64_t counter; + VduseVirtqLog *log; +}; + +typedef struct VduseIovaRegion { + uint64_t iova; + uint64_t size; + uint64_t mmap_offset; + uint64_t mmap_addr; +} VduseIovaRegion; + +struct VduseDev { + VduseVirtq *vqs; + VduseIovaRegion regions[MAX_IOVA_REGIONS]; + int num_regions; + char *name; + uint32_t device_id; + uint32_t vendor_id; + uint16_t num_queues; + uint16_t queue_size; + uint64_t features; + const VduseOps *ops; + int fd; + int ctrl_fd; + void *priv; + void *log; +}; + +static inline size_t vduse_vq_log_size(uint16_t queue_size) +{ + return ALIGN_UP(sizeof(VduseDescStateSplit) * queue_size + + sizeof(VduseVirtqLogInflight), LOG_ALIGNMENT); +} + +static void *vduse_log_get(const char *filename, size_t size) +{ + void *ptr = MAP_FAILED; + int fd; + + fd = open(filename, O_RDWR | O_CREAT, 0600); + if (fd == -1) { + return MAP_FAILED; + } + + if (ftruncate(fd, size) == -1) { + goto out; + } + + ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + +out: + close(fd); + return ptr; +} + +static inline bool has_feature(uint64_t features, unsigned int fbit) +{ + assert(fbit < 64); + return !!(features & (1ULL << fbit)); +} + +static inline bool vduse_dev_has_feature(VduseDev *dev, unsigned int fbit) +{ + return has_feature(dev->features, fbit); +} + +uint64_t vduse_get_virtio_features(void) +{ + return (1ULL << VIRTIO_F_IOMMU_PLATFORM) | + (1ULL << VIRTIO_F_VERSION_1) | + (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | + (1ULL << VIRTIO_RING_F_EVENT_IDX) | + (1ULL << VIRTIO_RING_F_INDIRECT_DESC); +} + +VduseDev *vduse_queue_get_dev(VduseVirtq *vq) +{ + return vq->dev; +} + +int vduse_queue_get_fd(VduseVirtq *vq) +{ + return vq->fd; +} + +void *vduse_dev_get_priv(VduseDev *dev) +{ + return dev->priv; +} + +VduseVirtq *vduse_dev_get_queue(VduseDev *dev, int index) +{ + return &dev->vqs[index]; +} + +int vduse_dev_get_fd(VduseDev *dev) +{ + return dev->fd; +} + +static int vduse_inject_irq(VduseDev *dev, int index) +{ + return ioctl(dev->fd, VDUSE_VQ_INJECT_IRQ, &index); +} + +static int inflight_desc_compare(const void *a, const void *b) +{ + VduseVirtqInflightDesc *desc0 = (VduseVirtqInflightDesc *)a, + *desc1 = (VduseVirtqInflightDesc *)b; + + if (desc1->counter > desc0->counter && + (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { + return 1; + } + + return -1; +} + +static int vduse_queue_check_inflights(VduseVirtq *vq) +{ + int i = 0; + VduseDev *dev = vq->dev; + + vq->used_idx = le16toh(vq->vring.used->idx); + vq->resubmit_num = 0; + vq->resubmit_list = NULL; + vq->counter = 0; + + if (unlikely(vq->log->inflight.used_idx != vq->used_idx)) { + if (vq->log->inflight.last_batch_head > VIRTQUEUE_MAX_SIZE) { + return -1; + } + + vq->log->inflight.desc[vq->log->inflight.last_batch_head].inflight = 0; + + barrier(); + + vq->log->inflight.used_idx = vq->used_idx; + } + + for (i = 0; i < vq->log->inflight.desc_num; i++) { + if (vq->log->inflight.desc[i].inflight == 1) { + vq->inuse++; + } + } + + vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; + + if (vq->inuse) { + vq->resubmit_list = calloc(vq->inuse, sizeof(VduseVirtqInflightDesc)); + if (!vq->resubmit_list) { + return -1; + } + + for (i = 0; i < vq->log->inflight.desc_num; i++) { + if (vq->log->inflight.desc[i].inflight) { + vq->resubmit_list[vq->resubmit_num].index = i; + vq->resubmit_list[vq->resubmit_num].counter = + vq->log->inflight.desc[i].counter; + vq->resubmit_num++; + } + } + + if (vq->resubmit_num > 1) { + qsort(vq->resubmit_list, vq->resubmit_num, + sizeof(VduseVirtqInflightDesc), inflight_desc_compare); + } + vq->counter = vq->resubmit_list[0].counter + 1; + } + + vduse_inject_irq(dev, vq->index); + + return 0; +} + +static int vduse_queue_inflight_get(VduseVirtq *vq, int desc_idx) +{ + vq->log->inflight.desc[desc_idx].counter = vq->counter++; + + barrier(); + + vq->log->inflight.desc[desc_idx].inflight = 1; + + return 0; +} + +static int vduse_queue_inflight_pre_put(VduseVirtq *vq, int desc_idx) +{ + vq->log->inflight.last_batch_head = desc_idx; + + return 0; +} + +static int vduse_queue_inflight_post_put(VduseVirtq *vq, int desc_idx) +{ + vq->log->inflight.desc[desc_idx].inflight = 0; + + barrier(); + + vq->log->inflight.used_idx = vq->used_idx; + + return 0; +} + +static void vduse_iova_remove_region(VduseDev *dev, uint64_t start, + uint64_t last) +{ + int i; + + if (last == start) { + return; + } + + for (i = 0; i < MAX_IOVA_REGIONS; i++) { + if (!dev->regions[i].mmap_addr) { + continue; + } + + if (start <= dev->regions[i].iova && + last >= (dev->regions[i].iova + dev->regions[i].size - 1)) { + munmap((void *)(uintptr_t)dev->regions[i].mmap_addr, + dev->regions[i].mmap_offset + dev->regions[i].size); + dev->regions[i].mmap_addr = 0; + dev->num_regions--; + } + } +} + +static int vduse_iova_add_region(VduseDev *dev, int fd, + uint64_t offset, uint64_t start, + uint64_t last, int prot) +{ + int i; + uint64_t size = last - start + 1; + void *mmap_addr = mmap(0, size + offset, prot, MAP_SHARED, fd, 0); + + if (mmap_addr == MAP_FAILED) { + close(fd); + return -EINVAL; + } + + for (i = 0; i < MAX_IOVA_REGIONS; i++) { + if (!dev->regions[i].mmap_addr) { + dev->regions[i].mmap_addr = (uint64_t)(uintptr_t)mmap_addr; + dev->regions[i].mmap_offset = offset; + dev->regions[i].iova = start; + dev->regions[i].size = size; + dev->num_regions++; + break; + } + } + assert(i < MAX_IOVA_REGIONS); + close(fd); + + return 0; +} + +static int perm_to_prot(uint8_t perm) +{ + int prot = 0; + + switch (perm) { + case VDUSE_ACCESS_WO: + prot |= PROT_WRITE; + break; + case VDUSE_ACCESS_RO: + prot |= PROT_READ; + break; + case VDUSE_ACCESS_RW: + prot |= PROT_READ | PROT_WRITE; + break; + default: + break; + } + + return prot; +} + +static inline void *iova_to_va(VduseDev *dev, uint64_t *plen, uint64_t iova) +{ + int i, ret; + struct vduse_iotlb_entry entry; + + for (i = 0; i < MAX_IOVA_REGIONS; i++) { + VduseIovaRegion *r = &dev->regions[i]; + + if (!r->mmap_addr) { + continue; + } + + if ((iova >= r->iova) && (iova < (r->iova + r->size))) { + if ((iova + *plen) > (r->iova + r->size)) { + *plen = r->iova + r->size - iova; + } + return (void *)(uintptr_t)(iova - r->iova + + r->mmap_addr + r->mmap_offset); + } + } + + entry.start = iova; + entry.last = iova + 1; + ret = ioctl(dev->fd, VDUSE_IOTLB_GET_FD, &entry); + if (ret < 0) { + return NULL; + } + + if (!vduse_iova_add_region(dev, ret, entry.offset, entry.start, + entry.last, perm_to_prot(entry.perm))) { + return iova_to_va(dev, plen, iova); + } + + return NULL; +} + +static inline uint16_t vring_avail_flags(VduseVirtq *vq) +{ + return le16toh(vq->vring.avail->flags); +} + +static inline uint16_t vring_avail_idx(VduseVirtq *vq) +{ + vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); + + return vq->shadow_avail_idx; +} + +static inline uint16_t vring_avail_ring(VduseVirtq *vq, int i) +{ + return le16toh(vq->vring.avail->ring[i]); +} + +static inline uint16_t vring_get_used_event(VduseVirtq *vq) +{ + return vring_avail_ring(vq, vq->vring.num); +} + +static bool vduse_queue_get_head(VduseVirtq *vq, unsigned int idx, + unsigned int *head) +{ + /* + * Grab the next descriptor number they're advertising, and increment + * the index we've seen. + */ + *head = vring_avail_ring(vq, idx % vq->vring.num); + + /* If their number is silly, that's a fatal mistake. */ + if (*head >= vq->vring.num) { + fprintf(stderr, "Guest says index %u is available\n", *head); + return false; + } + + return true; +} + +static int +vduse_queue_read_indirect_desc(VduseDev *dev, struct vring_desc *desc, + uint64_t addr, size_t len) +{ + struct vring_desc *ori_desc; + uint64_t read_len; + + if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { + return -1; + } + + if (len == 0) { + return -1; + } + + while (len) { + read_len = len; + ori_desc = iova_to_va(dev, &read_len, addr); + if (!ori_desc) { + return -1; + } + + memcpy(desc, ori_desc, read_len); + len -= read_len; + addr += read_len; + desc += read_len; + } + + return 0; +} + +enum { + VIRTQUEUE_READ_DESC_ERROR = -1, + VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ + VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ +}; + +static int vduse_queue_read_next_desc(struct vring_desc *desc, int i, + unsigned int max, unsigned int *next) +{ + /* If this descriptor says it doesn't chain, we're done. */ + if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { + return VIRTQUEUE_READ_DESC_DONE; + } + + /* Check they're not leading us off end of descriptors. */ + *next = desc[i].next; + /* Make sure compiler knows to grab that: we don't want it changing! */ + smp_wmb(); + + if (*next >= max) { + fprintf(stderr, "Desc next is %u\n", *next); + return VIRTQUEUE_READ_DESC_ERROR; + } + + return VIRTQUEUE_READ_DESC_MORE; +} + +/* + * Fetch avail_idx from VQ memory only when we really need to know if + * guest has added some buffers. + */ +static bool vduse_queue_empty(VduseVirtq *vq) +{ + if (unlikely(!vq->vring.avail)) { + return true; + } + + if (vq->shadow_avail_idx != vq->last_avail_idx) { + return false; + } + + return vring_avail_idx(vq) == vq->last_avail_idx; +} + +static bool vduse_queue_should_notify(VduseVirtq *vq) +{ + VduseDev *dev = vq->dev; + uint16_t old, new; + bool v; + + /* We need to expose used array entries before checking used event. */ + smp_mb(); + + /* Always notify when queue is empty (when feature acknowledge) */ + if (vduse_dev_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && + !vq->inuse && vduse_queue_empty(vq)) { + return true; + } + + if (!vduse_dev_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { + return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); + } + + v = vq->signalled_used_valid; + vq->signalled_used_valid = true; + old = vq->signalled_used; + new = vq->signalled_used = vq->used_idx; + return !v || vring_need_event(vring_get_used_event(vq), new, old); +} + +void vduse_queue_notify(VduseVirtq *vq) +{ + VduseDev *dev = vq->dev; + + if (unlikely(!vq->vring.avail)) { + return; + } + + if (!vduse_queue_should_notify(vq)) { + return; + } + + if (vduse_inject_irq(dev, vq->index) < 0) { + fprintf(stderr, "Error inject irq for vq %d: %s\n", + vq->index, strerror(errno)); + } +} + +static inline void vring_set_avail_event(VduseVirtq *vq, uint16_t val) +{ + *((uint16_t *)&vq->vring.used->ring[vq->vring.num]) = htole16(val); +} + +static bool vduse_queue_map_single_desc(VduseVirtq *vq, unsigned int *p_num_sg, + struct iovec *iov, unsigned int max_num_sg, + bool is_write, uint64_t pa, size_t sz) +{ + unsigned num_sg = *p_num_sg; + VduseDev *dev = vq->dev; + + assert(num_sg <= max_num_sg); + + if (!sz) { + fprintf(stderr, "virtio: zero sized buffers are not allowed\n"); + return false; + } + + while (sz) { + uint64_t len = sz; + + if (num_sg == max_num_sg) { + fprintf(stderr, + "virtio: too many descriptors in indirect table\n"); + return false; + } + + iov[num_sg].iov_base = iova_to_va(dev, &len, pa); + if (iov[num_sg].iov_base == NULL) { + fprintf(stderr, "virtio: invalid address for buffers\n"); + return false; + } + iov[num_sg++].iov_len = len; + sz -= len; + pa += len; + } + + *p_num_sg = num_sg; + return true; +} + +static void *vduse_queue_alloc_element(size_t sz, unsigned out_num, + unsigned in_num) +{ + VduseVirtqElement *elem; + size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); + size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); + size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); + + assert(sz >= sizeof(VduseVirtqElement)); + elem = malloc(out_sg_end); + if (!elem) { + return NULL; + } + elem->out_num = out_num; + elem->in_num = in_num; + elem->in_sg = (void *)elem + in_sg_ofs; + elem->out_sg = (void *)elem + out_sg_ofs; + return elem; +} + +static void *vduse_queue_map_desc(VduseVirtq *vq, unsigned int idx, size_t sz) +{ + struct vring_desc *desc = vq->vring.desc; + VduseDev *dev = vq->dev; + uint64_t desc_addr, read_len; + unsigned int desc_len; + unsigned int max = vq->vring.num; + unsigned int i = idx; + VduseVirtqElement *elem; + struct iovec iov[VIRTQUEUE_MAX_SIZE]; + struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; + unsigned int out_num = 0, in_num = 0; + int rc; + + if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { + if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { + fprintf(stderr, "Invalid size for indirect buffer table\n"); + return NULL; + } + + /* loop over the indirect descriptor table */ + desc_addr = le64toh(desc[i].addr); + desc_len = le32toh(desc[i].len); + max = desc_len / sizeof(struct vring_desc); + read_len = desc_len; + desc = iova_to_va(dev, &read_len, desc_addr); + if (unlikely(desc && read_len != desc_len)) { + /* Failed to use zero copy */ + desc = NULL; + if (!vduse_queue_read_indirect_desc(dev, desc_buf, + desc_addr, + desc_len)) { + desc = desc_buf; + } + } + if (!desc) { + fprintf(stderr, "Invalid indirect buffer table\n"); + return NULL; + } + i = 0; + } + + /* Collect all the descriptors */ + do { + if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { + if (!vduse_queue_map_single_desc(vq, &in_num, iov + out_num, + VIRTQUEUE_MAX_SIZE - out_num, + true, le64toh(desc[i].addr), + le32toh(desc[i].len))) { + return NULL; + } + } else { + if (in_num) { + fprintf(stderr, "Incorrect order for descriptors\n"); + return NULL; + } + if (!vduse_queue_map_single_desc(vq, &out_num, iov, + VIRTQUEUE_MAX_SIZE, false, + le64toh(desc[i].addr), + le32toh(desc[i].len))) { + return NULL; + } + } + + /* If we've got too many, that implies a descriptor loop. */ + if ((in_num + out_num) > max) { + fprintf(stderr, "Looped descriptor\n"); + return NULL; + } + rc = vduse_queue_read_next_desc(desc, i, max, &i); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + if (rc == VIRTQUEUE_READ_DESC_ERROR) { + fprintf(stderr, "read descriptor error\n"); + return NULL; + } + + /* Now copy what we have collected and mapped */ + elem = vduse_queue_alloc_element(sz, out_num, in_num); + if (!elem) { + fprintf(stderr, "read descriptor error\n"); + return NULL; + } + elem->index = idx; + for (i = 0; i < out_num; i++) { + elem->out_sg[i] = iov[i]; + } + for (i = 0; i < in_num; i++) { + elem->in_sg[i] = iov[out_num + i]; + } + + return elem; +} + +void *vduse_queue_pop(VduseVirtq *vq, size_t sz) +{ + unsigned int head; + VduseVirtqElement *elem; + VduseDev *dev = vq->dev; + int i; + + if (unlikely(!vq->vring.avail)) { + return NULL; + } + + if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { + i = (--vq->resubmit_num); + elem = vduse_queue_map_desc(vq, vq->resubmit_list[i].index, sz); + + if (!vq->resubmit_num) { + free(vq->resubmit_list); + vq->resubmit_list = NULL; + } + + return elem; + } + + if (vduse_queue_empty(vq)) { + return NULL; + } + /* Needed after virtio_queue_empty() */ + smp_rmb(); + + if (vq->inuse >= vq->vring.num) { + fprintf(stderr, "Virtqueue size exceeded: %d\n", vq->inuse); + return NULL; + } + + if (!vduse_queue_get_head(vq, vq->last_avail_idx++, &head)) { + return NULL; + } + + if (vduse_dev_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { + vring_set_avail_event(vq, vq->last_avail_idx); + } + + elem = vduse_queue_map_desc(vq, head, sz); + + if (!elem) { + return NULL; + } + + vq->inuse++; + + vduse_queue_inflight_get(vq, head); + + return elem; +} + +static inline void vring_used_write(VduseVirtq *vq, + struct vring_used_elem *uelem, int i) +{ + struct vring_used *used = vq->vring.used; + + used->ring[i] = *uelem; +} + +static void vduse_queue_fill(VduseVirtq *vq, const VduseVirtqElement *elem, + unsigned int len, unsigned int idx) +{ + struct vring_used_elem uelem; + + if (unlikely(!vq->vring.used)) { + return; + } + + idx = (idx + vq->used_idx) % vq->vring.num; + + uelem.id = htole32(elem->index); + uelem.len = htole32(len); + vring_used_write(vq, &uelem, idx); +} + +static inline void vring_used_idx_set(VduseVirtq *vq, uint16_t val) +{ + vq->vring.used->idx = htole16(val); + vq->used_idx = val; +} + +static void vduse_queue_flush(VduseVirtq *vq, unsigned int count) +{ + uint16_t old, new; + + if (unlikely(!vq->vring.used)) { + return; + } + + /* Make sure buffer is written before we update index. */ + smp_wmb(); + + old = vq->used_idx; + new = old + count; + vring_used_idx_set(vq, new); + vq->inuse -= count; + if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { + vq->signalled_used_valid = false; + } +} + +void vduse_queue_push(VduseVirtq *vq, const VduseVirtqElement *elem, + unsigned int len) +{ + vduse_queue_fill(vq, elem, len, 0); + vduse_queue_inflight_pre_put(vq, elem->index); + vduse_queue_flush(vq, 1); + vduse_queue_inflight_post_put(vq, elem->index); +} + +static int vduse_queue_update_vring(VduseVirtq *vq, uint64_t desc_addr, + uint64_t avail_addr, uint64_t used_addr) +{ + struct VduseDev *dev = vq->dev; + uint64_t len; + + len = sizeof(struct vring_desc); + vq->vring.desc = iova_to_va(dev, &len, desc_addr); + if (len != sizeof(struct vring_desc)) { + return -EINVAL; + } + + len = sizeof(struct vring_avail); + vq->vring.avail = iova_to_va(dev, &len, avail_addr); + if (len != sizeof(struct vring_avail)) { + return -EINVAL; + } + + len = sizeof(struct vring_used); + vq->vring.used = iova_to_va(dev, &len, used_addr); + if (len != sizeof(struct vring_used)) { + return -EINVAL; + } + + if (!vq->vring.desc || !vq->vring.avail || !vq->vring.used) { + fprintf(stderr, "Failed to get vq[%d] iova mapping\n", vq->index); + return -EINVAL; + } + + return 0; +} + +static void vduse_queue_enable(VduseVirtq *vq) +{ + struct VduseDev *dev = vq->dev; + struct vduse_vq_info vq_info; + struct vduse_vq_eventfd vq_eventfd; + int fd; + + vq_info.index = vq->index; + if (ioctl(dev->fd, VDUSE_VQ_GET_INFO, &vq_info)) { + fprintf(stderr, "Failed to get vq[%d] info: %s\n", + vq->index, strerror(errno)); + return; + } + + if (!vq_info.ready) { + return; + } + + vq->vring.num = vq_info.num; + vq->vring.desc_addr = vq_info.desc_addr; + vq->vring.avail_addr = vq_info.driver_addr; + vq->vring.used_addr = vq_info.device_addr; + + if (vduse_queue_update_vring(vq, vq_info.desc_addr, + vq_info.driver_addr, vq_info.device_addr)) { + fprintf(stderr, "Failed to update vring for vq[%d]\n", vq->index); + return; + } + + fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); + if (fd < 0) { + fprintf(stderr, "Failed to init eventfd for vq[%d]\n", vq->index); + return; + } + + vq_eventfd.index = vq->index; + vq_eventfd.fd = fd; + if (ioctl(dev->fd, VDUSE_VQ_SETUP_KICKFD, &vq_eventfd)) { + fprintf(stderr, "Failed to setup kick fd for vq[%d]\n", vq->index); + close(fd); + return; + } + + vq->fd = fd; + vq->signalled_used_valid = false; + vq->ready = true; + + if (vduse_queue_check_inflights(vq)) { + fprintf(stderr, "Failed to check inflights for vq[%d]\n", vq->index); + close(fd); + return; + } + + dev->ops->enable_queue(dev, vq); +} + +static void vduse_queue_disable(VduseVirtq *vq) +{ + struct VduseDev *dev = vq->dev; + struct vduse_vq_eventfd eventfd; + + if (!vq->ready) { + return; + } + + dev->ops->disable_queue(dev, vq); + + eventfd.index = vq->index; + eventfd.fd = VDUSE_EVENTFD_DEASSIGN; + ioctl(dev->fd, VDUSE_VQ_SETUP_KICKFD, &eventfd); + close(vq->fd); + + assert(vq->inuse == 0); + + vq->vring.num = 0; + vq->vring.desc_addr = 0; + vq->vring.avail_addr = 0; + vq->vring.used_addr = 0; + vq->vring.desc = 0; + vq->vring.avail = 0; + vq->vring.used = 0; + vq->ready = false; + vq->fd = -1; +} + +static void vduse_dev_start_dataplane(VduseDev *dev) +{ + int i; + + if (ioctl(dev->fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { + fprintf(stderr, "Failed to get features: %s\n", strerror(errno)); + return; + } + assert(vduse_dev_has_feature(dev, VIRTIO_F_VERSION_1)); + + for (i = 0; i < dev->num_queues; i++) { + vduse_queue_enable(&dev->vqs[i]); + } +} + +static void vduse_dev_stop_dataplane(VduseDev *dev) +{ + size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); + int i; + + for (i = 0; i < dev->num_queues; i++) { + vduse_queue_disable(&dev->vqs[i]); + } + if (dev->log) { + memset(dev->log, 0, log_size); + } + dev->features = 0; + vduse_iova_remove_region(dev, 0, ULONG_MAX); +} + +int vduse_dev_handler(VduseDev *dev) +{ + struct vduse_dev_request req; + struct vduse_dev_response resp = { 0 }; + VduseVirtq *vq; + int i, ret; + + ret = read(dev->fd, &req, sizeof(req)); + if (ret != sizeof(req)) { + fprintf(stderr, "Read request error [%d]: %s\n", + ret, strerror(errno)); + return -errno; + } + resp.request_id = req.request_id; + + switch (req.type) { + case VDUSE_GET_VQ_STATE: + vq = &dev->vqs[req.vq_state.index]; + resp.vq_state.split.avail_index = vq->last_avail_idx; + resp.result = VDUSE_REQ_RESULT_OK; + break; + case VDUSE_SET_STATUS: + if (req.s.status & VIRTIO_CONFIG_S_DRIVER_OK) { + vduse_dev_start_dataplane(dev); + } else if (req.s.status == 0) { + vduse_dev_stop_dataplane(dev); + } + resp.result = VDUSE_REQ_RESULT_OK; + break; + case VDUSE_UPDATE_IOTLB: + /* The iova will be updated by iova_to_va() later, so just remove it */ + vduse_iova_remove_region(dev, req.iova.start, req.iova.last); + for (i = 0; i < dev->num_queues; i++) { + VduseVirtq *vq = &dev->vqs[i]; + if (vq->ready) { + if (vduse_queue_update_vring(vq, vq->vring.desc_addr, + vq->vring.avail_addr, + vq->vring.used_addr)) { + fprintf(stderr, "Failed to update vring for vq[%d]\n", + vq->index); + } + } + } + resp.result = VDUSE_REQ_RESULT_OK; + break; + default: + resp.result = VDUSE_REQ_RESULT_FAILED; + break; + } + + ret = write(dev->fd, &resp, sizeof(resp)); + if (ret != sizeof(resp)) { + fprintf(stderr, "Write request %d error [%d]: %s\n", + req.type, ret, strerror(errno)); + return -errno; + } + return 0; +} + +int vduse_dev_update_config(VduseDev *dev, uint32_t size, + uint32_t offset, char *buffer) +{ + int ret; + struct vduse_config_data *data; + + data = malloc(offsetof(struct vduse_config_data, buffer) + size); + if (!data) { + return -ENOMEM; + } + + data->offset = offset; + data->length = size; + memcpy(data->buffer, buffer, size); + + ret = ioctl(dev->fd, VDUSE_DEV_SET_CONFIG, data); + free(data); + + if (ret) { + return -errno; + } + + if (ioctl(dev->fd, VDUSE_DEV_INJECT_CONFIG_IRQ)) { + return -errno; + } + + return 0; +} + +int vduse_dev_setup_queue(VduseDev *dev, int index, int max_size) +{ + VduseVirtq *vq = &dev->vqs[index]; + struct vduse_vq_config vq_config = { 0 }; + + if (max_size > VIRTQUEUE_MAX_SIZE) { + return -EINVAL; + } + + vq_config.index = vq->index; + vq_config.max_size = max_size; + + if (ioctl(dev->fd, VDUSE_VQ_SETUP, &vq_config)) { + return -errno; + } + + vduse_queue_enable(vq); + + return 0; +} + +int vduse_set_reconnect_log_file(VduseDev *dev, const char *filename) +{ + + size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); + void *log; + int i; + + dev->log = log = vduse_log_get(filename, log_size); + if (log == MAP_FAILED) { + fprintf(stderr, "Failed to get vduse log\n"); + return -EINVAL; + } + + for (i = 0; i < dev->num_queues; i++) { + dev->vqs[i].log = log; + dev->vqs[i].log->inflight.desc_num = VIRTQUEUE_MAX_SIZE; + log = (void *)((char *)log + vduse_vq_log_size(VIRTQUEUE_MAX_SIZE)); + } + + return 0; +} + +static int vduse_dev_init_vqs(VduseDev *dev, uint16_t num_queues) +{ + VduseVirtq *vqs; + int i; + + vqs = calloc(sizeof(VduseVirtq), num_queues); + if (!vqs) { + return -ENOMEM; + } + + for (i = 0; i < num_queues; i++) { + vqs[i].index = i; + vqs[i].dev = dev; + vqs[i].fd = -1; + } + dev->vqs = vqs; + + return 0; +} + +static int vduse_dev_init(VduseDev *dev, const char *name, + uint16_t num_queues, const VduseOps *ops, + void *priv) +{ + char *dev_path, *dev_name; + int ret, fd; + + dev_path = malloc(strlen(name) + strlen("/dev/vduse/") + 1); + if (!dev_path) { + return -ENOMEM; + } + sprintf(dev_path, "/dev/vduse/%s", name); + + fd = open(dev_path, O_RDWR); + free(dev_path); + if (fd < 0) { + fprintf(stderr, "Failed to open vduse dev %s: %s\n", + name, strerror(errno)); + return -errno; + } + + if (ioctl(fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { + fprintf(stderr, "Failed to get features: %s\n", strerror(errno)); + close(fd); + return -errno; + } + + dev_name = strdup(name); + if (!dev_name) { + close(fd); + return -ENOMEM; + } + + ret = vduse_dev_init_vqs(dev, num_queues); + if (ret) { + free(dev_name); + close(fd); + return ret; + } + + dev->name = dev_name; + dev->num_queues = num_queues; + dev->fd = fd; + dev->ops = ops; + dev->priv = priv; + + return 0; +} + +static inline bool vduse_name_is_invalid(const char *name) +{ + return strlen(name) >= VDUSE_NAME_MAX || strstr(name, ".."); +} + +VduseDev *vduse_dev_create_by_fd(int fd, uint16_t num_queues, + const VduseOps *ops, void *priv) +{ + VduseDev *dev; + int ret; + + if (!ops || !ops->enable_queue || !ops->disable_queue) { + fprintf(stderr, "Invalid parameter for vduse\n"); + return NULL; + } + + dev = calloc(sizeof(VduseDev), 1); + if (!dev) { + fprintf(stderr, "Failed to allocate vduse device\n"); + return NULL; + } + + if (ioctl(fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { + fprintf(stderr, "Failed to get features: %s\n", strerror(errno)); + free(dev); + return NULL; + } + + ret = vduse_dev_init_vqs(dev, num_queues); + if (ret) { + fprintf(stderr, "Failed to init vqs\n"); + free(dev); + return NULL; + } + + dev->num_queues = num_queues; + dev->fd = fd; + dev->ops = ops; + dev->priv = priv; + + return dev; +} + +VduseDev *vduse_dev_create_by_name(const char *name, uint16_t num_queues, + const VduseOps *ops, void *priv) +{ + VduseDev *dev; + int ret; + + if (!name || vduse_name_is_invalid(name) || !ops || + !ops->enable_queue || !ops->disable_queue) { + fprintf(stderr, "Invalid parameter for vduse\n"); + return NULL; + } + + dev = calloc(sizeof(VduseDev), 1); + if (!dev) { + fprintf(stderr, "Failed to allocate vduse device\n"); + return NULL; + } + + ret = vduse_dev_init(dev, name, num_queues, ops, priv); + if (ret < 0) { + fprintf(stderr, "Failed to init vduse device %s: %s\n", + name, strerror(-ret)); + free(dev); + return NULL; + } + + return dev; +} + +VduseDev *vduse_dev_create(const char *name, uint32_t device_id, + uint32_t vendor_id, uint64_t features, + uint16_t num_queues, uint32_t config_size, + char *config, const VduseOps *ops, void *priv) +{ + VduseDev *dev; + int ret, ctrl_fd; + uint64_t version; + struct vduse_dev_config *dev_config; + size_t size = offsetof(struct vduse_dev_config, config); + + if (!name || vduse_name_is_invalid(name) || + !has_feature(features, VIRTIO_F_VERSION_1) || !config || + !config_size || !ops || !ops->enable_queue || !ops->disable_queue) { + fprintf(stderr, "Invalid parameter for vduse\n"); + return NULL; + } + + dev = calloc(sizeof(VduseDev), 1); + if (!dev) { + fprintf(stderr, "Failed to allocate vduse device\n"); + return NULL; + } + + ctrl_fd = open("/dev/vduse/control", O_RDWR); + if (ctrl_fd < 0) { + fprintf(stderr, "Failed to open /dev/vduse/control: %s\n", + strerror(errno)); + goto err_ctrl; + } + + version = VDUSE_API_VERSION; + if (ioctl(ctrl_fd, VDUSE_SET_API_VERSION, &version)) { + fprintf(stderr, "Failed to set api version %" PRIu64 ": %s\n", + version, strerror(errno)); + goto err_dev; + } + + dev_config = calloc(size + config_size, 1); + if (!dev_config) { + fprintf(stderr, "Failed to allocate config space\n"); + goto err_dev; + } + + assert(!vduse_name_is_invalid(name)); + strcpy(dev_config->name, name); + dev_config->device_id = device_id; + dev_config->vendor_id = vendor_id; + dev_config->features = features; + dev_config->vq_num = num_queues; + dev_config->vq_align = VDUSE_VQ_ALIGN; + dev_config->config_size = config_size; + memcpy(dev_config->config, config, config_size); + + ret = ioctl(ctrl_fd, VDUSE_CREATE_DEV, dev_config); + free(dev_config); + if (ret && errno != EEXIST) { + fprintf(stderr, "Failed to create vduse device %s: %s\n", + name, strerror(errno)); + goto err_dev; + } + dev->ctrl_fd = ctrl_fd; + + ret = vduse_dev_init(dev, name, num_queues, ops, priv); + if (ret < 0) { + fprintf(stderr, "Failed to init vduse device %s: %s\n", + name, strerror(-ret)); + goto err; + } + + return dev; +err: + ioctl(ctrl_fd, VDUSE_DESTROY_DEV, name); +err_dev: + close(ctrl_fd); +err_ctrl: + free(dev); + + return NULL; +} + +int vduse_dev_destroy(VduseDev *dev) +{ + size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); + int i, ret = 0; + + if (dev->log) { + munmap(dev->log, log_size); + } + for (i = 0; i < dev->num_queues; i++) { + free(dev->vqs[i].resubmit_list); + } + free(dev->vqs); + if (dev->fd >= 0) { + close(dev->fd); + dev->fd = -1; + } + if (dev->ctrl_fd >= 0) { + if (ioctl(dev->ctrl_fd, VDUSE_DESTROY_DEV, dev->name)) { + ret = -errno; + } + close(dev->ctrl_fd); + dev->ctrl_fd = -1; + } + free(dev->name); + free(dev); + + return ret; +} diff --git a/subprojects/libvduse/libvduse.h b/subprojects/libvduse/libvduse.h new file mode 100644 index 0000000000..32f19e7b48 --- /dev/null +++ b/subprojects/libvduse/libvduse.h @@ -0,0 +1,247 @@ +/* + * VDUSE (vDPA Device in Userspace) library + * + * Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved. + * + * Author: + * Xie Yongji + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef LIBVDUSE_H +#define LIBVDUSE_H + +#include +#include + +#define VIRTQUEUE_MAX_SIZE 1024 + +/* VDUSE device structure */ +typedef struct VduseDev VduseDev; + +/* Virtqueue structure */ +typedef struct VduseVirtq VduseVirtq; + +/* Some operation of VDUSE backend */ +typedef struct VduseOps { + /* Called when virtqueue can be processed */ + void (*enable_queue)(VduseDev *dev, VduseVirtq *vq); + /* Called when virtqueue processing should be stopped */ + void (*disable_queue)(VduseDev *dev, VduseVirtq *vq); +} VduseOps; + +/* Describing elements of the I/O buffer */ +typedef struct VduseVirtqElement { + /* Descriptor table index */ + unsigned int index; + /* Number of physically-contiguous device-readable descriptors */ + unsigned int out_num; + /* Number of physically-contiguous device-writable descriptors */ + unsigned int in_num; + /* Array to store physically-contiguous device-writable descriptors */ + struct iovec *in_sg; + /* Array to store physically-contiguous device-readable descriptors */ + struct iovec *out_sg; +} VduseVirtqElement; + + +/** + * vduse_get_virtio_features: + * + * Get supported virtio features + * + * Returns: supported feature bits + */ +uint64_t vduse_get_virtio_features(void); + +/** + * vduse_queue_get_dev: + * @vq: specified virtqueue + * + * Get corresponding VDUSE device from the virtqueue. + * + * Returns: a pointer to VDUSE device on success, NULL on failure. + */ +VduseDev *vduse_queue_get_dev(VduseVirtq *vq); + +/** + * vduse_queue_get_fd: + * @vq: specified virtqueue + * + * Get the kick fd for the virtqueue. + * + * Returns: file descriptor on success, -1 on failure. + */ +int vduse_queue_get_fd(VduseVirtq *vq); + +/** + * vduse_queue_pop: + * @vq: specified virtqueue + * @sz: the size of struct to return (must be >= VduseVirtqElement) + * + * Pop an element from virtqueue available ring. + * + * Returns: a pointer to a structure containing VduseVirtqElement on success, + * NULL on failure. + */ +void *vduse_queue_pop(VduseVirtq *vq, size_t sz); + +/** + * vduse_queue_push: + * @vq: specified virtqueue + * @elem: pointer to VduseVirtqElement returned by vduse_queue_pop() + * @len: length in bytes to write + * + * Push an element to virtqueue used ring. + */ +void vduse_queue_push(VduseVirtq *vq, const VduseVirtqElement *elem, + unsigned int len); +/** + * vduse_queue_notify: + * @vq: specified virtqueue + * + * Request to notify the queue. + */ +void vduse_queue_notify(VduseVirtq *vq); + +/** + * vduse_dev_get_priv: + * @dev: VDUSE device + * + * Get the private pointer passed to vduse_dev_create(). + * + * Returns: private pointer on success, NULL on failure. + */ +void *vduse_dev_get_priv(VduseDev *dev); + +/** + * vduse_dev_get_queue: + * @dev: VDUSE device + * @index: virtqueue index + * + * Get the specified virtqueue. + * + * Returns: a pointer to the virtqueue on success, NULL on failure. + */ +VduseVirtq *vduse_dev_get_queue(VduseDev *dev, int index); + +/** + * vduse_dev_get_fd: + * @dev: VDUSE device + * + * Get the control message fd for the VDUSE device. + * + * Returns: file descriptor on success, -1 on failure. + */ +int vduse_dev_get_fd(VduseDev *dev); + +/** + * vduse_dev_handler: + * @dev: VDUSE device + * + * Used to process the control message. + * + * Returns: file descriptor on success, -errno on failure. + */ +int vduse_dev_handler(VduseDev *dev); + +/** + * vduse_dev_update_config: + * @dev: VDUSE device + * @size: the size to write to configuration space + * @offset: the offset from the beginning of configuration space + * @buffer: the buffer used to write from + * + * Update device configuration space and inject a config interrupt. + * + * Returns: 0 on success, -errno on failure. + */ +int vduse_dev_update_config(VduseDev *dev, uint32_t size, + uint32_t offset, char *buffer); + +/** + * vduse_dev_setup_queue: + * @dev: VDUSE device + * @index: virtqueue index + * @max_size: the max size of virtqueue + * + * Setup the specified virtqueue. + * + * Returns: 0 on success, -errno on failure. + */ +int vduse_dev_setup_queue(VduseDev *dev, int index, int max_size); + +/** + * vduse_set_reconnect_log_file: + * @dev: VDUSE device + * @file: filename of reconnect log + * + * Specify the file to store log for reconnecting. It should + * be called before vduse_dev_setup_queue(). + * + * Returns: 0 on success, -errno on failure. + */ +int vduse_set_reconnect_log_file(VduseDev *dev, const char *filename); + +/** + * vduse_dev_create_by_fd: + * @fd: passed file descriptor + * @num_queues: the number of virtqueues + * @ops: the operation of VDUSE backend + * @priv: private pointer + * + * Create VDUSE device from a passed file descriptor. + * + * Returns: pointer to VDUSE device on success, NULL on failure. + */ +VduseDev *vduse_dev_create_by_fd(int fd, uint16_t num_queues, + const VduseOps *ops, void *priv); + +/** + * vduse_dev_create_by_name: + * @name: VDUSE device name + * @num_queues: the number of virtqueues + * @ops: the operation of VDUSE backend + * @priv: private pointer + * + * Create VDUSE device on /dev/vduse/$NAME. + * + * Returns: pointer to VDUSE device on success, NULL on failure. + */ +VduseDev *vduse_dev_create_by_name(const char *name, uint16_t num_queues, + const VduseOps *ops, void *priv); + +/** + * vduse_dev_create: + * @name: VDUSE device name + * @device_id: virtio device id + * @vendor_id: virtio vendor id + * @features: virtio features + * @num_queues: the number of virtqueues + * @config_size: the size of the configuration space + * @config: the buffer of the configuration space + * @ops: the operation of VDUSE backend + * @priv: private pointer + * + * Create VDUSE device. + * + * Returns: pointer to VDUSE device on success, NULL on failure. + */ +VduseDev *vduse_dev_create(const char *name, uint32_t device_id, + uint32_t vendor_id, uint64_t features, + uint16_t num_queues, uint32_t config_size, + char *config, const VduseOps *ops, void *priv); + +/** + * vduse_dev_destroy: + * @dev: VDUSE device + * + * Destroy the VDUSE device. + * + * Returns: 0 on success, -errno on failure. + */ +int vduse_dev_destroy(VduseDev *dev); + +#endif diff --git a/subprojects/libvduse/linux-headers/linux b/subprojects/libvduse/linux-headers/linux new file mode 120000 index 0000000000..04f3304f79 --- /dev/null +++ b/subprojects/libvduse/linux-headers/linux @@ -0,0 +1 @@ +../../../linux-headers/linux/ \ No newline at end of file diff --git a/subprojects/libvduse/meson.build b/subprojects/libvduse/meson.build new file mode 100644 index 0000000000..ba08f5ee1a --- /dev/null +++ b/subprojects/libvduse/meson.build @@ -0,0 +1,10 @@ +project('libvduse', 'c', + license: 'GPL-2.0-or-later', + default_options: ['c_std=gnu99']) + +libvduse = static_library('vduse', + files('libvduse.c'), + c_args: '-D_GNU_SOURCE') + +libvduse_dep = declare_dependency(link_with: libvduse, + include_directories: include_directories('.')) diff --git a/subprojects/libvduse/standard-headers/linux b/subprojects/libvduse/standard-headers/linux new file mode 120000 index 0000000000..c416f068ac --- /dev/null +++ b/subprojects/libvduse/standard-headers/linux @@ -0,0 +1 @@ +../../../include/standard-headers/linux/ \ No newline at end of file diff --git a/subprojects/libvfio-user b/subprojects/libvfio-user new file mode 160000 index 0000000000..0b28d20557 --- /dev/null +++ b/subprojects/libvfio-user @@ -0,0 +1 @@ +Subproject commit 0b28d205572c80b568a1003db2c8f37ca333e4d7 diff --git a/subprojects/libvhost-user/include/compiler.h b/subprojects/libvhost-user/include/compiler.h new file mode 120000 index 0000000000..de7b70697c --- /dev/null +++ b/subprojects/libvhost-user/include/compiler.h @@ -0,0 +1 @@ +../../../include/qemu/compiler.h \ No newline at end of file diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index bf09693255..b17e82b2b0 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -45,6 +45,17 @@ #include "libvhost-user.h" /* usually provided by GLib */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) +#if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4) +#define G_GNUC_PRINTF(format_idx, arg_idx) \ + __attribute__((__format__(gnu_printf, format_idx, arg_idx))) +#else +#define G_GNUC_PRINTF(format_idx, arg_idx) \ + __attribute__((__format__(__printf__, format_idx, arg_idx))) +#endif +#else /* !__GNUC__ */ +#define G_GNUC_PRINTF(format_idx, arg_idx) +#endif /* !__GNUC__ */ #ifndef MIN #define MIN(x, y) ({ \ typeof(x) _min1 = (x); \ @@ -99,7 +110,7 @@ static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) return has_feature(dev->protocol_features, fbit); } -static const char * +const char * vu_request_to_string(unsigned int req) { #define REQ(req) [req] = #req @@ -151,7 +162,7 @@ vu_request_to_string(unsigned int req) } } -static void +static void G_GNUC_PRINTF(2, 3) vu_panic(VuDev *dev, const char *msg, ...) { char *buf = NULL; @@ -651,7 +662,8 @@ generate_faults(VuDev *dev) { if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { vu_panic(dev, "%s: Failed to userfault region %d " - "@%p + size:%zx offset: %zx: (ufd=%d)%s\n", + "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64 + ": (ufd=%d)%s\n", __func__, i, dev_region->mmap_addr, dev_region->size, dev_region->mmap_offset, @@ -690,6 +702,29 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { VuDevRegion *dev_region = &dev->regions[dev->nregions]; void *mmap_addr; + if (vmsg->fd_num != 1) { + vmsg_close_fds(vmsg); + vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " + "should be sent for this message type", vmsg->fd_num); + return false; + } + + if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { + close(vmsg->fds[0]); + vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " + "least %zu bytes and only %d bytes were received", + VHOST_USER_MEM_REG_SIZE, vmsg->size); + return false; + } + + if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { + close(vmsg->fds[0]); + vu_panic(dev, "failing attempt to hot add memory via " + "VHOST_USER_ADD_MEM_REG message because the backend has " + "no free ram slots available"); + return false; + } + /* * If we are in postcopy mode and we receive a u64 payload with a 0 value * we know all the postcopy client bases have been received, and we @@ -728,12 +763,12 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { * accessing it before we userfault. */ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_NONE, MAP_SHARED, + PROT_NONE, MAP_SHARED | MAP_NORESERVE, vmsg->fds[0], 0); } else { mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_READ | PROT_WRITE, MAP_SHARED, vmsg->fds[0], - 0); + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, + vmsg->fds[0], 0); } if (mmap_addr == MAP_FAILED) { @@ -756,15 +791,9 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { /* Send the message back to qemu with the addresses filled in. */ vmsg->fd_num = 0; - if (!vu_send_reply(dev, dev->sock, vmsg)) { - vu_panic(dev, "failed to respond to add-mem-region for postcopy"); - return false; - } - DPRINT("Successfully added new region in postcopy\n"); dev->nregions++; - return false; - + return true; } else { for (i = 0; i < dev->max_queues; i++) { if (dev->vq[i].vring.desc) { @@ -777,8 +806,7 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { DPRINT("Successfully added new region\n"); dev->nregions++; - vmsg_set_reply_u64(vmsg, 0); - return true; + return false; } } @@ -796,10 +824,24 @@ static inline bool reg_equal(VuDevRegion *vudev_reg, static bool vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { - int i, j; - bool found = false; - VuDevRegion shadow_regions[VHOST_USER_MAX_RAM_SLOTS] = {}; VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; + int i; + bool found = false; + + if (vmsg->fd_num > 1) { + vmsg_close_fds(vmsg); + vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " + "should be sent for this message type", vmsg->fd_num); + return false; + } + + if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { + vmsg_close_fds(vmsg); + vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " + "least %zu bytes and only %d bytes were received", + VHOST_USER_MEM_REG_SIZE, vmsg->size); + return false; + } DPRINT("Removing region:\n"); DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", @@ -811,35 +853,39 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { DPRINT(" mmap_offset 0x%016"PRIx64"\n", msg_region->mmap_offset); - for (i = 0, j = 0; i < dev->nregions; i++) { - if (!reg_equal(&dev->regions[i], msg_region)) { - shadow_regions[j].gpa = dev->regions[i].gpa; - shadow_regions[j].size = dev->regions[i].size; - shadow_regions[j].qva = dev->regions[i].qva; - shadow_regions[j].mmap_offset = dev->regions[i].mmap_offset; - j++; - } else { - found = true; + for (i = 0; i < dev->nregions; i++) { + if (reg_equal(&dev->regions[i], msg_region)) { VuDevRegion *r = &dev->regions[i]; void *m = (void *) (uintptr_t) r->mmap_addr; if (m) { munmap(m, r->size + r->mmap_offset); } + + /* + * Shift all affected entries by 1 to close the hole at index i and + * zero out the last entry. + */ + memmove(dev->regions + i, dev->regions + i + 1, + sizeof(VuDevRegion) * (dev->nregions - i - 1)); + memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion)); + DPRINT("Successfully removed a region\n"); + dev->nregions--; + i--; + + found = true; + + /* Continue the search for eventual duplicates. */ } } - if (found) { - memcpy(dev->regions, shadow_regions, - sizeof(VuDevRegion) * VHOST_USER_MAX_RAM_SLOTS); - DPRINT("Successfully removed a region\n"); - dev->nregions--; - vmsg_set_reply_u64(vmsg, 0); - } else { + if (!found) { vu_panic(dev, "Specified region not found\n"); } - return true; + vmsg_close_fds(vmsg); + + return false; } static bool @@ -877,7 +923,7 @@ vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) * accessing it before we userfault */ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_NONE, MAP_SHARED, + PROT_NONE, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0); if (mmap_addr == MAP_FAILED) { @@ -964,7 +1010,7 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) * mapped address has to be page aligned, and we use huge * pages. */ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_READ | PROT_WRITE, MAP_SHARED, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0); if (mmap_addr == MAP_FAILED) { @@ -1787,18 +1833,11 @@ vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) { - vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; - vmsg->size = sizeof(vmsg->payload.u64); - vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS; - vmsg->fd_num = 0; - - if (!vu_message_write(dev, dev->sock, vmsg)) { - vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno)); - } + vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); - return false; + return true; } static bool @@ -2508,6 +2547,10 @@ virtqueue_alloc_element(size_t sz, assert(sz >= sizeof(VuVirtqElement)); elem = malloc(out_sg_end); + if (!elem) { + DPRINT("%s: failed to malloc virtqueue element\n", __func__); + return NULL; + } elem->out_num = out_num; elem->in_num = in_num; elem->in_sg = (void *)elem + in_sg_ofs; @@ -2594,6 +2637,9 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); + if (!elem) { + return NULL; + } elem->index = idx; for (i = 0; i < out_num; i++) { elem->out_sg[i] = iov[i]; diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index 3d13dfadde..aea7ec5061 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -129,6 +129,8 @@ typedef struct VhostUserMemoryRegion { uint64_t mmap_offset; } VhostUserMemoryRegion; +#define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion)) + typedef struct VhostUserMemory { uint32_t nregions; uint32_t padding; @@ -471,6 +473,15 @@ bool vu_init(VuDev *dev, */ void vu_deinit(VuDev *dev); + +/** + * vu_request_to_string: return string for vhost message request + * @req: VhostUserMsg request + * + * Returns a const string, do not free. + */ +const char *vu_request_to_string(unsigned int req); + /** * vu_dispatch: * @dev: a VuDev context diff --git a/target/Kconfig b/target/Kconfig index ae7f24fc66..83da0bd293 100644 --- a/target/Kconfig +++ b/target/Kconfig @@ -4,6 +4,7 @@ source avr/Kconfig source cris/Kconfig source hppa/Kconfig source i386/Kconfig +source loongarch/Kconfig source m68k/Kconfig source microblaze/Kconfig source mips/Kconfig diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h index 1153992e42..17cd14e590 100644 --- a/target/alpha/cpu-param.h +++ b/target/alpha/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef ALPHA_CPU_PARAM_H -#define ALPHA_CPU_PARAM_H 1 +#define ALPHA_CPU_PARAM_H #define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 13 diff --git a/target/alpha/cpu-qom.h b/target/alpha/cpu-qom.h index 7bb9173c57..1f200724b6 100644 --- a/target/alpha/cpu-qom.h +++ b/target/alpha/cpu-qom.h @@ -25,8 +25,7 @@ #define TYPE_ALPHA_CPU "alpha-cpu" -OBJECT_DECLARE_TYPE(AlphaCPU, AlphaCPUClass, - ALPHA_CPU) +OBJECT_DECLARE_CPU_TYPE(AlphaCPU, AlphaCPUClass, ALPHA_CPU) /** * AlphaCPUClass: diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 4871ad0c0a..270ae787b1 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -33,6 +33,22 @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr alpha_cpu_get_pc(CPUState *cs) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + + return cpu->env.pc; +} + +static void alpha_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool alpha_cpu_has_work(CPUState *cs) { /* Here we are checking to see if the CPU should wake up from HALT. @@ -218,10 +234,14 @@ static const struct SysemuCPUOps alpha_sysemu_ops = { static const struct TCGCPUOps alpha_tcg_ops = { .initialize = alpha_translate_init, - .cpu_exec_interrupt = alpha_cpu_exec_interrupt, - .tlb_fill = alpha_cpu_tlb_fill, + .restore_state_to_opc = alpha_restore_state_to_opc, -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + .record_sigsegv = alpha_cpu_record_sigsegv, + .record_sigbus = alpha_cpu_record_sigbus, +#else + .tlb_fill = alpha_cpu_tlb_fill, + .cpu_exec_interrupt = alpha_cpu_exec_interrupt, .do_interrupt = alpha_cpu_do_interrupt, .do_transaction_failed = alpha_cpu_do_transaction_failed, .do_unaligned_access = alpha_cpu_do_unaligned_access, @@ -241,6 +261,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = alpha_cpu_has_work; cc->dump_state = alpha_cpu_dump_state; cc->set_pc = alpha_cpu_set_pc; + cc->get_pc = alpha_cpu_get_pc; cc->gdb_read_register = alpha_cpu_gdb_read_register; cc->gdb_write_register = alpha_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 82df108967..d0abc949a8 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -22,6 +22,7 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" /* Alpha processors have a weak memory model */ #define TCG_GUEST_DEFAULT_MO (0) @@ -197,9 +198,7 @@ enum { #define MMU_USER_IDX 1 #define MMU_PHYS_IDX 2 -typedef struct CPUAlphaState CPUAlphaState; - -struct CPUAlphaState { +typedef struct CPUArchState { uint64_t ir[31]; float64 fir[31]; uint64_t pc; @@ -251,7 +250,7 @@ struct CPUAlphaState { uint32_t features; uint32_t amask; int implver; -}; +} CPUAlphaState; /** * AlphaCPU: @@ -259,7 +258,7 @@ struct CPUAlphaState { * * An Alpha CPU. */ -struct AlphaCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -274,23 +273,16 @@ struct AlphaCPU { #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_alpha_cpu; -#endif void alpha_cpu_do_interrupt(CPUState *cpu); bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req); +#endif /* !CONFIG_USER_ONLY */ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); #define cpu_list alpha_cpu_list -#define cpu_signal_handler cpu_alpha_signal_handler - -typedef CPUAlphaState CPUArchState; -typedef AlphaCPU ArchCPU; #include "exec/cpu-all.h" @@ -387,6 +379,8 @@ enum { #define ENV_FLAG_TB_MASK \ (ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN) +#define TB_FLAG_UNALIGN (1u << 1) + static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch) { int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; @@ -440,22 +434,27 @@ void alpha_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU void alpha_cpu_list(void); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_alpha_signal_handler(int host_signum, void *pinfo, - void *puc); -bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int); -void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); +G_NORETURN void dynamic_excp(CPUAlphaState *, uintptr_t, int, int); +G_NORETURN void arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env); void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val); uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg); void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val); -#ifndef CONFIG_USER_ONLY + +#ifdef CONFIG_USER_ONLY +void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr); +void alpha_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr); +#else +bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +G_NORETURN void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, @@ -469,6 +468,9 @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc, *pc = env->pc; *cs_base = 0; *pflags = env->flags & ENV_FLAG_TB_MASK; +#ifdef CONFIG_USER_ONLY + *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; +#endif } #ifdef CONFIG_USER_ONLY diff --git a/target/alpha/helper.c b/target/alpha/helper.c index 4f56fe4d23..970c869771 100644 --- a/target/alpha/helper.c +++ b/target/alpha/helper.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" - +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "fpu/softfloat-types.h" @@ -120,15 +120,44 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val) } #if defined(CONFIG_USER_ONLY) -bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { AlphaCPU *cpu = ALPHA_CPU(cs); + target_ulong mmcsr, cause; - cs->exception_index = EXCP_MMFAULT; + /* Assuming !maperr, infer the missing protection. */ + switch (access_type) { + case MMU_DATA_LOAD: + mmcsr = MM_K_FOR; + cause = 0; + break; + case MMU_DATA_STORE: + mmcsr = MM_K_FOW; + cause = 1; + break; + case MMU_INST_FETCH: + mmcsr = MM_K_FOE; + cause = -1; + break; + default: + g_assert_not_reached(); + } + if (maperr) { + if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) { + /* Userspace address, therefore page not mapped. */ + mmcsr = MM_K_TNV; + } else { + /* Kernel or invalid address. */ + mmcsr = MM_K_ACV; + } + } + + /* Record the arguments that PALcode would give to the kernel. */ cpu->env.trap_arg0 = address; - cpu_loop_exit_restore(cs, retaddr); + cpu->env.trap_arg1 = mmcsr; + cpu->env.trap_arg2 = cause; } #else /* Returns the OSF/1 entMM failure indication, or -1 on success. */ @@ -293,7 +322,6 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } -#endif /* USER_ONLY */ void alpha_cpu_do_interrupt(CPUState *cs) { @@ -348,7 +376,6 @@ void alpha_cpu_do_interrupt(CPUState *cs) cs->exception_index = -1; -#if !defined(CONFIG_USER_ONLY) switch (i) { case EXCP_RESET: i = 0x0000; @@ -404,7 +431,6 @@ void alpha_cpu_do_interrupt(CPUState *cs) /* Switch to PALmode. */ env->flags |= ENV_FLAG_PAL_MODE; -#endif /* !USER_ONLY */ } bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request) @@ -451,6 +477,8 @@ bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } +#endif /* !CONFIG_USER_ONLY */ + void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags) { static const char linux_reg_names[31][4] = { @@ -486,7 +514,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags) /* This should only be called from translate, via gen_excp. We expect that ENV->PC has already been updated. */ -void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error) +G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error) { CPUState *cs = env_cpu(env); @@ -496,23 +524,23 @@ void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error) } /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ -void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, - int excp, int error) +G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, + int excp, int error) { CPUState *cs = env_cpu(env); cs->exception_index = excp; env->error_code = error; if (retaddr) { - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); /* Floating-point exceptions (our only users) point to the next PC. */ env->pc += 4; } cpu_loop_exit(cs); } -void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr, - int exc, uint64_t mask) +G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr, + int exc, uint64_t mask) { env->trap_arg0 = exc; env->trap_arg1 = mask; diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c index 75e72bc337..a39b52c5dd 100644 --- a/target/alpha/mem_helper.c +++ b/target/alpha/mem_helper.c @@ -23,18 +23,12 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -/* Softmmu support */ -#ifndef CONFIG_USER_ONLY -void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) +static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; uint64_t pc; uint32_t insn; - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(env_cpu(env), retaddr); pc = env->pc; insn = cpu_ldl_code(env, pc); @@ -42,6 +36,26 @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, env->trap_arg0 = addr; env->trap_arg1 = insn >> 26; /* opcode */ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */ +} + +#ifdef CONFIG_USER_ONLY +void alpha_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t retaddr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + + do_unaligned_access(env, addr, retaddr); +} +#else +void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + + do_unaligned_access(env, addr, retaddr); cs->exception_index = EXCP_UNALIGN; env->error_code = 0; cpu_loop_exit(cs); diff --git a/target/alpha/translate.c b/target/alpha/translate.c index de6c0a8439..f9bcdeb717 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "sysemu/cpus.h" -#include "sysemu/cpu-timers.h" #include "disas/disas.h" #include "qemu/host-utils.h" #include "exec/exec-all.h" @@ -45,7 +44,9 @@ typedef struct DisasContext DisasContext; struct DisasContext { DisasContextBase base; -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + MemOp unalign; +#else uint64_t palbr; #endif uint32_t tbflags; @@ -68,6 +69,12 @@ struct DisasContext { TCGv sink; }; +#ifdef CONFIG_USER_ONLY +#define UNALIGN(C) (C)->unalign +#else +#define UNALIGN(C) 0 +#endif + /* Target-specific return values from translate_one, indicating the state of the TB. Note that DISAS_NEXT indicates that we are not exiting the TB. */ @@ -228,7 +235,7 @@ static TCGv dest_fpr(DisasContext *ctx, unsigned reg) static int get_flag_ofs(unsigned shift) { int ofs = offsetof(CPUAlphaState, flags); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN ofs += 3 - (shift / 8); #else ofs += shift / 8; @@ -267,51 +274,51 @@ static inline DisasJumpType gen_invalid(DisasContext *ctx) return gen_excp(ctx, EXCP_OPCDEC, 0); } -static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) +static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_f(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); + gen_helper_memory_to_f(dest, tmp32); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) +static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv tmp = tcg_temp_new(); - tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); - gen_helper_memory_to_g(t0, tmp); + tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); + gen_helper_memory_to_g(dest, tmp); tcg_temp_free(tmp); } -static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) +static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_s(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); + gen_helper_memory_to_s(dest, tmp32); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) +static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) { - tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); - tcg_gen_mov_i64(cpu_lock_addr, t1); - tcg_gen_mov_i64(cpu_lock_value, t0); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); } -static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) +static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) { - tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); - tcg_gen_mov_i64(cpu_lock_addr, t1); - tcg_gen_mov_i64(cpu_lock_value, t0); + /* Loads to $f31 are prefetches, which we can treat as nops. */ + if (likely(ra != 31)) { + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, cpu_fir[ra], addr); + tcg_temp_free(addr); + } } -static inline void gen_load_mem(DisasContext *ctx, - void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, - int flags), - int ra, int rb, int32_t disp16, bool fp, - bool clear) +static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, + MemOp op, bool clear, bool locked) { - TCGv tmp, addr, va; + TCGv addr, dest; /* LDQ_U with ra $31 is UNOP. Other various loads are forms of prefetches, which we can treat as nops. No worries about @@ -320,72 +327,79 @@ static inline void gen_load_mem(DisasContext *ctx, return; } - tmp = tcg_temp_new(); - addr = load_gpr(ctx, rb); - - if (disp16) { - tcg_gen_addi_i64(tmp, addr, disp16); - addr = tmp; - } + addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); if (clear) { - tcg_gen_andi_i64(tmp, addr, ~0x7); - addr = tmp; + tcg_gen_andi_i64(addr, addr, ~0x7); + } else if (!locked) { + op |= UNALIGN(ctx); } - va = (fp ? cpu_fir[ra] : ctx->ir[ra]); - tcg_gen_qemu_load(va, addr, ctx->mem_idx); + dest = ctx->ir[ra]; + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); - tcg_temp_free(tmp); + if (locked) { + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, dest); + } + tcg_temp_free(addr); } -static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) +static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_f_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_f_to_memory(tmp32, addr); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) +static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) { TCGv tmp = tcg_temp_new(); - gen_helper_g_to_memory(tmp, t0); - tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); + gen_helper_g_to_memory(tmp, src); + tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); tcg_temp_free(tmp); } -static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) +static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_s_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_s_to_memory(tmp32, src); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); tcg_temp_free_i32(tmp32); } -static inline void gen_store_mem(DisasContext *ctx, - void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, - int flags), - int ra, int rb, int32_t disp16, bool fp, - bool clear) +static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) { - TCGv tmp, addr, va; + tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); +} - tmp = tcg_temp_new(); - addr = load_gpr(ctx, rb); +static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) +{ + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, load_fpr(ctx, ra), addr); + tcg_temp_free(addr); +} - if (disp16) { - tcg_gen_addi_i64(tmp, addr, disp16); - addr = tmp; - } +static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, + MemOp op, bool clear) +{ + TCGv addr, src; + + addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); if (clear) { - tcg_gen_andi_i64(tmp, addr, ~0x7); - addr = tmp; + tcg_gen_andi_i64(addr, addr, ~0x7); + } else { + op |= UNALIGN(ctx); } - va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra)); - tcg_gen_qemu_store(va, addr, ctx->mem_idx); + src = load_gpr(ctx, ra); + tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); - tcg_temp_free(tmp); + tcg_temp_free(addr); } static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, @@ -1480,30 +1494,30 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x0A: /* LDBU */ REQUIRE_AMASK(BWX); - gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); break; case 0x0B: /* LDQ_U */ - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); + gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0); break; case 0x0C: /* LDWU */ REQUIRE_AMASK(BWX); - gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); break; case 0x0D: /* STW */ REQUIRE_AMASK(BWX); - gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); break; case 0x0E: /* STB */ REQUIRE_AMASK(BWX); - gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); break; case 0x0F: /* STQ_U */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); + gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1); break; case 0x10: @@ -2454,15 +2468,19 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0x1: /* Quadword physical access (hw_ldq/p) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ - gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, va); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ - gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, va); break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ @@ -2489,7 +2507,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ @@ -2505,7 +2523,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0xF: /* Quadword virtual access with alternate access mode and protection checks (hw_ldq/wa) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); break; } tcg_temp_free(addr); @@ -2718,7 +2736,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, vb, disp12); - tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ); + tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); tcg_temp_free(tmp); break; case 0x2: @@ -2729,7 +2747,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x3: /* Quadword physical access with lock */ ret = gen_store_conditional(ctx, ra, rb, disp12, - MMU_PHYS_IDX, MO_LEQ); + MMU_PHYS_IDX, MO_LEUQ); break; case 0x4: /* Longword virtual access */ @@ -2776,66 +2794,66 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x20: /* LDF */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldf); break; case 0x21: /* LDG */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldg); break; case 0x22: /* LDS */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_lds); break; case 0x23: /* LDT */ REQUIRE_FEN; - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldt); break; case 0x24: /* STF */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stf); break; case 0x25: /* STG */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stg); break; case 0x26: /* STS */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_sts); break; case 0x27: /* STT */ REQUIRE_FEN; - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stt); break; case 0x28: /* LDL */ - gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); break; case 0x29: /* LDQ */ - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0); break; case 0x2A: /* LDL_L */ - gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); break; case 0x2B: /* LDQ_L */ - gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1); break; case 0x2C: /* STL */ - gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); break; case 0x2D: /* STQ */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0); break; case 0x2E: /* STL_C */ @@ -2845,7 +2863,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x2F: /* STQ_C */ ret = gen_store_conditional(ctx, ra, rb, disp16, - ctx->mem_idx, MO_LEQ); + ctx->mem_idx, MO_LEUQ); break; case 0x30: /* BR */ @@ -2935,6 +2953,7 @@ static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) #ifdef CONFIG_USER_ONLY ctx->ir = cpu_std_ir; + ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); #else ctx->palbr = env->palbr; ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); @@ -2971,7 +2990,7 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUAlphaState *env = cpu->env_ptr; - uint32_t insn = translator_ldl(env, ctx->base.pc_next); + uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); ctx->base.pc_next += 4; ctx->base.is_jmp = translate_one(ctx, insn); @@ -2998,27 +3017,21 @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); /* FALLTHRU */ case DISAS_PC_UPDATED: - if (!ctx->base.singlestep_enabled) { - tcg_gen_lookup_and_goto_ptr(); - break; - } - /* FALLTHRU */ + tcg_gen_lookup_and_goto_ptr(); + break; case DISAS_PC_UPDATED_NOCHAIN: - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG, 0); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); } } -static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void alpha_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps alpha_tr_ops = { @@ -3030,14 +3043,9 @@ static const TranslatorOps alpha_tr_ops = { .disas_log = alpha_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns); -} - -void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; + translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); } diff --git a/target/arm/a32.decode b/target/arm/a32.decode index fcd8cd4f7d..f2ca480949 100644 --- a/target/arm/a32.decode +++ b/target/arm/a32.decode @@ -187,13 +187,17 @@ SMULTT .... 0001 0110 .... 0000 .... 1110 .... @rd0mn { { - YIELD ---- 0011 0010 0000 1111 ---- 0000 0001 - WFE ---- 0011 0010 0000 1111 ---- 0000 0010 - WFI ---- 0011 0010 0000 1111 ---- 0000 0011 + [ + YIELD ---- 0011 0010 0000 1111 ---- 0000 0001 + WFE ---- 0011 0010 0000 1111 ---- 0000 0010 + WFI ---- 0011 0010 0000 1111 ---- 0000 0011 - # TODO: Implement SEV, SEVL; may help SMP performance. - # SEV ---- 0011 0010 0000 1111 ---- 0000 0100 - # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101 + # TODO: Implement SEV, SEVL; may help SMP performance. + # SEV ---- 0011 0010 0000 1111 ---- 0000 0100 + # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101 + + ESB ---- 0011 0010 0000 1111 ---- 0001 0000 + ] # The canonical nop ends in 00000000, but the whole of the # rest of the space executes as nop if otherwise unsupported. diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c index 0184845310..2d8e41ab8a 100644 --- a/target/arm/arch_dump.c +++ b/target/arm/arch_dump.c @@ -166,7 +166,7 @@ static off_t sve_fpcr_offset(uint32_t vq) static uint32_t sve_current_vq(CPUARMState *env) { - return sve_zcr_len_for_el(env, arm_current_el(env)) + 1; + return sve_vqm1_for_el(env, arm_current_el(env)) + 1; } static size_t sve_size_vq(uint32_t vq) @@ -232,12 +232,11 @@ static int aarch64_write_elf64_sve(WriteCoreDumpFunction f, #endif int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { struct aarch64_note note; ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - DumpState *s = opaque; uint64_t pstate, sp; int ret, i; @@ -360,12 +359,11 @@ static int arm_write_elf32_vfp(WriteCoreDumpFunction f, CPUARMState *env, } int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { struct arm_note note; ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - DumpState *s = opaque; int ret, i; bool fpvalid = cpu_isar_feature(aa32_vfp_simd, cpu); diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h index 057160e8da..cee0548a1c 100644 --- a/target/arm/arm_ldst.h +++ b/target/arm/arm_ldst.h @@ -24,15 +24,15 @@ #include "qemu/bswap.h" /* Load an instruction and return it in the standard little-endian order */ -static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr, - bool sctlr_b) +static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s, + target_ulong addr, bool sctlr_b) { - return translator_ldl_swap(env, addr, bswap_code(sctlr_b)); + return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b)); } /* Ditto, for a halfword (Thumb) instruction */ -static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, - bool sctlr_b) +static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s, + target_ulong addr, bool sctlr_b) { #ifndef CONFIG_USER_ONLY /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped @@ -41,7 +41,7 @@ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, addr ^= 2; } #endif - return translator_lduw_swap(env, addr, bswap_code(sctlr_b)); + return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b)); } #endif diff --git a/target/arm/common-semi-target.h b/target/arm/common-semi-target.h new file mode 100644 index 0000000000..629d75ca5a --- /dev/null +++ b/target/arm/common-semi-target.h @@ -0,0 +1,62 @@ +/* + * Target-specific parts of semihosting/arm-compat-semi.c. + * + * Copyright (c) 2005, 2007 CodeSourcery. + * Copyright (c) 2019, 2022 Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_ARM_COMMON_SEMI_TARGET_H +#define TARGET_ARM_COMMON_SEMI_TARGET_H + +#ifndef CONFIG_USER_ONLY +#include "hw/arm/boot.h" +#endif + +static inline target_ulong common_semi_arg(CPUState *cs, int argno) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + if (is_a64(env)) { + return env->xregs[argno]; + } else { + return env->regs[argno]; + } +} + +static inline void common_semi_set_ret(CPUState *cs, target_ulong ret) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + if (is_a64(env)) { + env->xregs[0] = ret; + } else { + env->regs[0] = ret; + } +} + +static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr) +{ + return (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cs->env_ptr)); +} + +static inline bool is_64bit_semihosting(CPUArchState *env) +{ + return is_a64(env); +} + +static inline target_ulong common_semi_stack_bottom(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + return is_a64(env) ? env->xregs[31] : env->regs[13]; +} + +static inline bool common_semi_has_synccache(CPUArchState *env) +{ + /* Ok for A64, invalid for A32/T32 */ + return is_a64(env); +} + +#endif diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h new file mode 100644 index 0000000000..7e78c2c05c --- /dev/null +++ b/target/arm/cpregs.h @@ -0,0 +1,496 @@ +/* + * QEMU ARM CP Register access and descriptions + * + * Copyright (c) 2022 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#ifndef TARGET_ARM_CPREGS_H +#define TARGET_ARM_CPREGS_H + +/* + * ARMCPRegInfo type field bits: + */ +enum { + /* + * Register must be handled specially during translation. + * The method is one of the values below: + */ + ARM_CP_SPECIAL_MASK = 0x000f, + /* Special: no change to PE state: writes ignored, reads ignored. */ + ARM_CP_NOP = 0x0001, + /* Special: sysreg is WFI, for v5 and v6. */ + ARM_CP_WFI = 0x0002, + /* Special: sysreg is NZCV. */ + ARM_CP_NZCV = 0x0003, + /* Special: sysreg is CURRENTEL. */ + ARM_CP_CURRENTEL = 0x0004, + /* Special: sysreg is DC ZVA or similar. */ + ARM_CP_DC_ZVA = 0x0005, + ARM_CP_DC_GVA = 0x0006, + ARM_CP_DC_GZVA = 0x0007, + + /* Flag: reads produce resetvalue; writes ignored. */ + ARM_CP_CONST = 1 << 4, + /* Flag: For ARM_CP_STATE_AA32, sysreg is 64-bit. */ + ARM_CP_64BIT = 1 << 5, + /* + * Flag: TB should not be ended after a write to this register + * (the default is that the TB ends after cp writes). + */ + ARM_CP_SUPPRESS_TB_END = 1 << 6, + /* + * Flag: Permit a register definition to override a previous definition + * for the same (cp, is64, crn, crm, opc1, opc2) tuple: either the new + * or the old must have the ARM_CP_OVERRIDE bit set. + */ + ARM_CP_OVERRIDE = 1 << 7, + /* + * Flag: Register is an alias view of some underlying state which is also + * visible via another register, and that the other register is handling + * migration and reset; registers marked ARM_CP_ALIAS will not be migrated + * but may have their state set by syncing of register state from KVM. + */ + ARM_CP_ALIAS = 1 << 8, + /* + * Flag: Register does I/O and therefore its accesses need to be marked + * with gen_io_start() and also end the TB. In particular, registers which + * implement clocks or timers require this. + */ + ARM_CP_IO = 1 << 9, + /* + * Flag: Register has no underlying state and does not support raw access + * for state saving/loading; it will not be used for either migration or + * KVM state synchronization. Typically this is for "registers" which are + * actually used as instructions for cache maintenance and so on. + */ + ARM_CP_NO_RAW = 1 << 10, + /* + * Flag: The read or write hook might raise an exception; the generated + * code will synchronize the CPU state before calling the hook so that it + * is safe for the hook to call raise_exception(). + */ + ARM_CP_RAISES_EXC = 1 << 11, + /* + * Flag: Writes to the sysreg might change the exception level - typically + * on older ARM chips. For those cases we need to re-read the new el when + * recomputing the translation flags. + */ + ARM_CP_NEWEL = 1 << 12, + /* + * Flag: Access check for this sysreg is identical to accessing FPU state + * from an instruction: use translation fp_access_check(). + */ + ARM_CP_FPU = 1 << 13, + /* + * Flag: Access check for this sysreg is identical to accessing SVE state + * from an instruction: use translation sve_access_check(). + */ + ARM_CP_SVE = 1 << 14, + /* Flag: Do not expose in gdb sysreg xml. */ + ARM_CP_NO_GDB = 1 << 15, + /* + * Flags: If EL3 but not EL2... + * - UNDEF: discard the cpreg, + * - KEEP: retain the cpreg as is, + * - C_NZ: set const on the cpreg, but retain resetvalue, + * - else: set const on the cpreg, zero resetvalue, aka RES0. + * See rule RJFFP in section D1.1.3 of DDI0487H.a. + */ + ARM_CP_EL3_NO_EL2_UNDEF = 1 << 16, + ARM_CP_EL3_NO_EL2_KEEP = 1 << 17, + ARM_CP_EL3_NO_EL2_C_NZ = 1 << 18, + /* + * Flag: Access check for this sysreg is constrained by the + * ARM pseudocode function CheckSMEAccess(). + */ + ARM_CP_SME = 1 << 19, +}; + +/* + * Valid values for ARMCPRegInfo state field, indicating which of + * the AArch32 and AArch64 execution states this register is visible in. + * If the reginfo doesn't explicitly specify then it is AArch32 only. + * If the reginfo is declared to be visible in both states then a second + * reginfo is synthesised for the AArch32 view of the AArch64 register, + * such that the AArch32 view is the lower 32 bits of the AArch64 one. + * Note that we rely on the values of these enums as we iterate through + * the various states in some places. + */ +typedef enum { + ARM_CP_STATE_AA32 = 0, + ARM_CP_STATE_AA64 = 1, + ARM_CP_STATE_BOTH = 2, +} CPState; + +/* + * ARM CP register secure state flags. These flags identify security state + * attributes for a given CP register entry. + * The existence of both or neither secure and non-secure flags indicates that + * the register has both a secure and non-secure hash entry. A single one of + * these flags causes the register to only be hashed for the specified + * security state. + * Although definitions may have any combination of the S/NS bits, each + * registered entry will only have one to identify whether the entry is secure + * or non-secure. + */ +typedef enum { + ARM_CP_SECSTATE_BOTH = 0, /* define one cpreg for each secstate */ + ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ + ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ +} CPSecureState; + +/* + * Access rights: + * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM + * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and + * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 + * (ie any of the privileged modes in Secure state, or Monitor mode). + * If a register is accessible in one privilege level it's always accessible + * in higher privilege levels too. Since "Secure PL1" also follows this rule + * (ie anything visible in PL2 is visible in S-PL1, some things are only + * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the + * terminology a little and call this PL3. + * In AArch64 things are somewhat simpler as the PLx bits line up exactly + * with the ELx exception levels. + * + * If access permissions for a register are more complex than can be + * described with these bits, then use a laxer set of restrictions, and + * do the more restrictive/complex check inside a helper function. + */ +typedef enum { + PL3_R = 0x80, + PL3_W = 0x40, + PL2_R = 0x20 | PL3_R, + PL2_W = 0x10 | PL3_W, + PL1_R = 0x08 | PL2_R, + PL1_W = 0x04 | PL2_W, + PL0_R = 0x02 | PL1_R, + PL0_W = 0x01 | PL1_W, + + /* + * For user-mode some registers are accessible to EL0 via a kernel + * trap-and-emulate ABI. In this case we define the read permissions + * as actually being PL0_R. However some bits of any given register + * may still be masked. + */ +#ifdef CONFIG_USER_ONLY + PL0U_R = PL0_R, +#else + PL0U_R = PL1_R, +#endif + + PL3_RW = PL3_R | PL3_W, + PL2_RW = PL2_R | PL2_W, + PL1_RW = PL1_R | PL1_W, + PL0_RW = PL0_R | PL0_W, +} CPAccessRights; + +typedef enum CPAccessResult { + /* Access is permitted */ + CP_ACCESS_OK = 0, + + /* + * Combined with one of the following, the low 2 bits indicate the + * target exception level. If 0, the exception is taken to the usual + * target EL (EL1 or PL1 if in EL0, otherwise to the current EL). + */ + CP_ACCESS_EL_MASK = 3, + + /* + * Access fails due to a configurable trap or enable which would + * result in a categorized exception syndrome giving information about + * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, + * 0xc or 0x18). + */ + CP_ACCESS_TRAP = (1 << 2), + CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2, + CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3, + + /* + * Access fails and results in an exception syndrome 0x0 ("uncategorized"). + * Note that this is not a catch-all case -- the set of cases which may + * result in this failure is specifically defined by the architecture. + */ + CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2), + CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = CP_ACCESS_TRAP_UNCATEGORIZED | 2, + CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = CP_ACCESS_TRAP_UNCATEGORIZED | 3, +} CPAccessResult; + +typedef struct ARMCPRegInfo ARMCPRegInfo; + +/* + * Access functions for coprocessor registers. These cannot fail and + * may not raise exceptions. + */ +typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); +typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value); +/* Access permission check functions for coprocessor registers. */ +typedef CPAccessResult CPAccessFn(CPUARMState *env, + const ARMCPRegInfo *opaque, + bool isread); +/* Hook function for register reset */ +typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); + +#define CP_ANY 0xff + +/* Definition of an ARM coprocessor register */ +struct ARMCPRegInfo { + /* Name of register (useful mainly for debugging, need not be unique) */ + const char *name; + /* + * Location of register: coprocessor number and (crn,crm,opc1,opc2) + * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a + * 'wildcard' field -- any value of that field in the MRC/MCR insn + * will be decoded to this register. The register read and write + * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 + * used by the program, so it is possible to register a wildcard and + * then behave differently on read/write if necessary. + * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 + * must both be zero. + * For AArch64-visible registers, opc0 is also used. + * Since there are no "coprocessors" in AArch64, cp is purely used as a + * way to distinguish (for KVM's benefit) guest-visible system registers + * from demuxed ones provided to preserve the "no side effects on + * KVM register read/write from QEMU" semantics. cp==0x13 is guest + * visible (to match KVM's encoding); cp==0 will be converted to + * cp==0x13 when the ARMCPRegInfo is registered, for convenience. + */ + uint8_t cp; + uint8_t crn; + uint8_t crm; + uint8_t opc0; + uint8_t opc1; + uint8_t opc2; + /* Execution state in which this register is visible: ARM_CP_STATE_* */ + CPState state; + /* Register type: ARM_CP_* bits/values */ + int type; + /* Access rights: PL*_[RW] */ + CPAccessRights access; + /* Security state: ARM_CP_SECSTATE_* bits/values */ + CPSecureState secure; + /* + * The opaque pointer passed to define_arm_cp_regs_with_opaque() when + * this register was defined: can be used to hand data through to the + * register read/write functions, since they are passed the ARMCPRegInfo*. + */ + void *opaque; + /* + * Value of this register, if it is ARM_CP_CONST. Otherwise, if + * fieldoffset is non-zero, the reset value of the register. + */ + uint64_t resetvalue; + /* + * Offset of the field in CPUARMState for this register. + * This is not needed if either: + * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs + * 2. both readfn and writefn are specified + */ + ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ + + /* + * Offsets of the secure and non-secure fields in CPUARMState for the + * register if it is banked. These fields are only used during the static + * registration of a register. During hashing the bank associated + * with a given security state is copied to fieldoffset which is used from + * there on out. + * + * It is expected that register definitions use either fieldoffset or + * bank_fieldoffsets in the definition but not both. It is also expected + * that both bank offsets are set when defining a banked register. This + * use indicates that a register is banked. + */ + ptrdiff_t bank_fieldoffsets[2]; + + /* + * Function for making any access checks for this register in addition to + * those specified by the 'access' permissions bits. If NULL, no extra + * checks required. The access check is performed at runtime, not at + * translate time. + */ + CPAccessFn *accessfn; + /* + * Function for handling reads of this register. If NULL, then reads + * will be done by loading from the offset into CPUARMState specified + * by fieldoffset. + */ + CPReadFn *readfn; + /* + * Function for handling writes of this register. If NULL, then writes + * will be done by writing to the offset into CPUARMState specified + * by fieldoffset. + */ + CPWriteFn *writefn; + /* + * Function for doing a "raw" read; used when we need to copy + * coprocessor state to the kernel for KVM or out for + * migration. This only needs to be provided if there is also a + * readfn and it has side effects (for instance clear-on-read bits). + */ + CPReadFn *raw_readfn; + /* + * Function for doing a "raw" write; used when we need to copy KVM + * kernel coprocessor state into userspace, or for inbound + * migration. This only needs to be provided if there is also a + * writefn and it masks out "unwritable" bits or has write-one-to-clear + * or similar behaviour. + */ + CPWriteFn *raw_writefn; + /* + * Function for resetting the register. If NULL, then reset will be done + * by writing resetvalue to the field specified in fieldoffset. If + * fieldoffset is 0 then no reset will be done. + */ + CPResetFn *resetfn; + + /* + * "Original" writefn and readfn. + * For ARMv8.1-VHE register aliases, we overwrite the read/write + * accessor functions of various EL1/EL0 to perform the runtime + * check for which sysreg should actually be modified, and then + * forwards the operation. Before overwriting the accessors, + * the original function is copied here, so that accesses that + * really do go to the EL1/EL0 version proceed normally. + * (The corresponding EL2 register is linked via opaque.) + */ + CPReadFn *orig_readfn; + CPWriteFn *orig_writefn; +}; + +/* + * Macros which are lvalues for the field in CPUARMState for the + * ARMCPRegInfo *ri. + */ +#define CPREG_FIELD32(env, ri) \ + (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) +#define CPREG_FIELD64(env, ri) \ + (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) + +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *reg, + void *opaque); + +static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_one_arm_cp_reg_with_opaque(cpu, regs, NULL); +} + +void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, + void *opaque, size_t len); + +#define define_arm_cp_regs_with_opaque(CPU, REGS, OPAQUE) \ + do { \ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \ + define_arm_cp_regs_with_opaque_len(CPU, REGS, OPAQUE, \ + ARRAY_SIZE(REGS)); \ + } while (0) + +#define define_arm_cp_regs(CPU, REGS) \ + define_arm_cp_regs_with_opaque(CPU, REGS, NULL) + +const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); + +/* + * Definition of an ARM co-processor register as viewed from + * userspace. This is used for presenting sanitised versions of + * registers to userspace when emulating the Linux AArch64 CPU + * ID/feature ABI (advertised as HWCAP_CPUID). + */ +typedef struct ARMCPRegUserSpaceInfo { + /* Name of register */ + const char *name; + + /* Is the name actually a glob pattern */ + bool is_glob; + + /* Only some bits are exported to user space */ + uint64_t exported_bits; + + /* Fixed bits are applied after the mask */ + uint64_t fixed_bits; +} ARMCPRegUserSpaceInfo; + +void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len, + const ARMCPRegUserSpaceInfo *mods, + size_t mods_len); + +#define modify_arm_cp_regs(REGS, MODS) \ + do { \ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(MODS) == 0); \ + modify_arm_cp_regs_with_len(REGS, ARRAY_SIZE(REGS), \ + MODS, ARRAY_SIZE(MODS)); \ + } while (0) + +/* CPWriteFn that can be used to implement writes-ignored behaviour */ +void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value); +/* CPReadFn that can be used for read-as-zero behaviour */ +uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); + +/* CPWriteFn that just writes the value to ri->fieldoffset */ +void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); + +/* + * CPResetFn that does nothing, for use if no reset is required even + * if fieldoffset is non zero. + */ +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); + +/* + * Return true if this reginfo struct's field in the cpu state struct + * is 64 bits wide. + */ +static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) +{ + return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); +} + +static inline bool cp_access_ok(int current_el, + const ARMCPRegInfo *ri, int isread) +{ + return (ri->access >> ((current_el * 2) + isread)) & 1; +} + +/* Raw read of a coprocessor register (as needed for migration, etc) */ +uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); + +/* + * Return true if the cp register encoding is in the "feature ID space" as + * defined by FEAT_IDST (and thus should be reported with ER_ELx.EC + * as EC_SYSTEMREGISTERTRAP rather than EC_UNCATEGORIZED). + */ +static inline bool arm_cpreg_encoding_in_idspace(uint8_t opc0, uint8_t opc1, + uint8_t opc2, + uint8_t crn, uint8_t crm) +{ + return opc0 == 3 && (opc1 == 0 || opc1 == 1 || opc1 == 3) && + crn == 0 && crm < 8; +} + +/* + * As arm_cpreg_encoding_in_idspace(), but take the encoding from an + * ARMCPRegInfo. + */ +static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri) +{ + return ri->state == ARM_CP_STATE_AA64 && + arm_cpreg_encoding_in_idspace(ri->opc0, ri->opc1, ri->opc2, + ri->crn, ri->crm); +} + +#endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h index 7f38d33b8e..53cac9c89b 100644 --- a/target/arm/cpu-param.h +++ b/target/arm/cpu-param.h @@ -6,12 +6,12 @@ */ #ifndef ARM_CPU_PARAM_H -#define ARM_CPU_PARAM_H 1 +#define ARM_CPU_PARAM_H #ifdef TARGET_AARCH64 # define TARGET_LONG_BITS 64 -# define TARGET_PHYS_ADDR_SPACE_BITS 48 -# define TARGET_VIRT_ADDR_SPACE_BITS 48 +# define TARGET_PHYS_ADDR_SPACE_BITS 52 +# define TARGET_VIRT_ADDR_SPACE_BITS 52 #else # define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 40 @@ -30,8 +30,23 @@ */ # define TARGET_PAGE_BITS_VARY # define TARGET_PAGE_BITS_MIN 10 + +# define TARGET_TB_PCREL 1 + +/* + * Cache the attrs and shareability fields from the page table entry. + * + * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2]. + * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format. + * For shareability and guarded, as in the SH and GP fields respectively + * of the VMSAv8-64 PTEs. + */ +# define TARGET_PAGE_ENTRY_EXTRA \ + uint8_t pte_attrs; \ + uint8_t shareability; \ + bool guarded; #endif -#define NB_MMU_MODES 15 +#define NB_MMU_MODES 12 #endif diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index a22bd506d0..64c44cef2d 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -27,8 +27,7 @@ struct arm_boot_info; #define TYPE_ARM_CPU "arm-cpu" -OBJECT_DECLARE_TYPE(ARMCPU, ARMCPUClass, - ARM_CPU) +OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU) #define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 2866dd7658..38d066c294 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -20,7 +20,9 @@ #include "qemu/osdep.h" #include "qemu/qemu-print.h" -#include "qemu-common.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "exec/page-vary.h" #include "target/arm/idau.h" #include "qemu/module.h" #include "qapi/error.h" @@ -37,10 +39,12 @@ #include "hw/boards.h" #endif #include "sysemu/tcg.h" +#include "sysemu/qtest.h" #include "sysemu/hw_accel.h" #include "kvm_arm.h" #include "disas/capstone.h" #include "fpu/softfloat.h" +#include "cpregs.h" static void arm_cpu_set_pc(CPUState *cs, vaddr value) { @@ -49,28 +53,66 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value) if (is_a64(env)) { env->pc = value; - env->thumb = 0; + env->thumb = false; } else { env->regs[15] = value & ~1; env->thumb = value & 1; } } +static vaddr arm_cpu_get_pc(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (is_a64(env)) { + return env->pc; + } else { + return env->regs[15]; + } +} + #ifdef CONFIG_TCG void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; + /* The program counter is always up to date with TARGET_TB_PCREL. */ + if (!TARGET_TB_PCREL) { + CPUARMState *env = cs->env_ptr; + /* + * It's OK to look at env for the current mode here, because it's + * never possible for an AArch64 TB to chain to an AArch32 TB. + */ + if (is_a64(env)) { + env->pc = tb_pc(tb); + } else { + env->regs[15] = tb_pc(tb); + } + } +} + +void arm_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + CPUARMState *env = cs->env_ptr; - /* - * It's OK to look at env for the current mode here, because it's - * never possible for an AArch64 TB to chain to an AArch32 TB. - */ if (is_a64(env)) { - env->pc = tb->pc; + if (TARGET_TB_PCREL) { + env->pc = (env->pc & TARGET_PAGE_MASK) | data[0]; + } else { + env->pc = data[0]; + } + env->condexec_bits = 0; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } else { - env->regs[15] = tb->pc; + if (TARGET_TB_PCREL) { + env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0]; + } else { + env->regs[15] = data[0]; + } + env->condexec_bits = data[1]; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } } #endif /* CONFIG_TCG */ @@ -82,7 +124,7 @@ static bool arm_cpu_has_work(CPUState *cs) return (cpu->power_state != PSCI_OFF) && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD - | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR | CPU_INTERRUPT_EXITTB); } @@ -114,7 +156,7 @@ static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) ARMCPRegInfo *ri = value; ARMCPU *cpu = opaque; - if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) { + if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) { return; } @@ -150,7 +192,7 @@ static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) ARMCPU *cpu = opaque; uint64_t oldvalue, newvalue; - if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) { + if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) { return; } @@ -187,7 +229,7 @@ static void arm_cpu_reset(DeviceState *dev) if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 64 bit CPUs always start in 64 bit mode */ - env->aarch64 = 1; + env->aarch64 = true; #if defined(CONFIG_USER_ONLY) env->pstate = PSTATE_MODE_EL0t; /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */ @@ -195,20 +237,34 @@ static void arm_cpu_reset(DeviceState *dev) /* Enable all PAC keys. */ env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB); + /* Trap on btype=3 for PACIxSP. */ + env->cp15.sctlr_el[1] |= SCTLR_BT0; /* and to the FP/Neon instructions */ - env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3); - /* and to the SVE instructions */ - env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3); - /* with reasonable vector length */ + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR_EL1, FPEN, 3); + /* and to the SVE instructions, with default vector length */ if (cpu_isar_feature(aa64_sve, cpu)) { - env->vfp.zcr_el[1] = - aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1); + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR_EL1, ZEN, 3); + env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; + } + /* and for SME instructions, with default vector length, and TPIDR2 */ + if (cpu_isar_feature(aa64_sme, cpu)) { + env->cp15.sctlr_el[1] |= SCTLR_EnTP2; + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR_EL1, SMEN, 3); + env->vfp.smcr_el[1] = cpu->sme_default_vq - 1; + if (cpu_isar_feature(aa64_sme_fa64, cpu)) { + env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1], + SMCR, FA64, 1); + } } /* + * Enable 48-bit address space (TODO: take reserved_va into account). * Enable TBI0 but not TBI1. * Note that this must match useronly_clean_ptr. */ - env->cp15.tcr_el[1].raw_tcr = (1ULL << 37); + env->cp15.tcr_el[1] = 5 | (1ULL << 37); /* Enable MTE */ if (cpu_isar_feature(aa64_mte, cpu)) { @@ -224,6 +280,11 @@ static void arm_cpu_reset(DeviceState *dev) */ env->cp15.gcr_el1 = 0x1ffff; } + /* + * Disable access to SCXTNUM_EL0 from CSV2_1p2. + * This is not yet exposed from the Linux kernel in any way. + */ + env->cp15.sctlr_el[1] |= SCTLR_TSCXT; #else /* Reset into the highest available EL */ if (arm_feature(env, ARM_FEATURE_EL3)) { @@ -233,12 +294,18 @@ static void arm_cpu_reset(DeviceState *dev) } else { env->pstate = PSTATE_MODE_EL1h; } - env->pc = cpu->rvbar; + + /* Sample rvbar at reset. */ + env->cp15.rvbar = cpu->rvbar_prop; + env->pc = env->cp15.rvbar; #endif } else { #if defined(CONFIG_USER_ONLY) /* Userspace expects access to cp10 and cp11 for FP/Neon */ - env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf); + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR, CP10, 3); + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR, CP11, 3); #endif } @@ -266,11 +333,24 @@ static void arm_cpu_reset(DeviceState *dev) } env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; + /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently + * executing as AArch32 then check if highvecs are enabled and + * adjust the PC accordingly. + */ + if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { + env->regs[15] = 0xFFFF0000; + } + + env->vfp.xregs[ARM_VFP_FPEXC] = 0; +#endif + if (arm_feature(env, ARM_FEATURE_M)) { +#ifndef CONFIG_USER_ONLY uint32_t initial_msp; /* Loaded from 0x0 */ uint32_t initial_pc; /* Loaded from 0x4 */ uint8_t *rom; uint32_t vecbase; +#endif if (cpu_isar_feature(aa32_lob, cpu)) { /* @@ -324,6 +404,8 @@ static void arm_cpu_reset(DeviceState *dev) env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK | R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK; } + +#ifndef CONFIG_USER_ONLY /* Unlike A/R profile, M profile defines the reset LR value */ env->regs[14] = 0xffffffff; @@ -349,17 +431,26 @@ static void arm_cpu_reset(DeviceState *dev) initial_pc = ldl_phys(s->as, vecbase + 4); } + qemu_log_mask(CPU_LOG_INT, + "Loaded reset SP 0x%x PC 0x%x from vector table\n", + initial_msp, initial_pc); + env->regs[13] = initial_msp & 0xFFFFFFFC; env->regs[15] = initial_pc & ~1; env->thumb = initial_pc & 1; - } - - /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently - * executing as AArch32 then check if highvecs are enabled and - * adjust the PC accordingly. - */ - if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { - env->regs[15] = 0xFFFF0000; +#else + /* + * For user mode we run non-secure and with access to the FPU. + * The FPU context is active (ie does not need further setup) + * and is owned by non-secure. + */ + env->v7m.secure = false; + env->v7m.nsacr = 0xcff; + env->v7m.cpacr[M_REG_NS] = 0xf0ffff; + env->v7m.fpccr[M_REG_S] &= + ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK); + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK; +#endif } /* M profile requires that reset clears the exclusive monitor; @@ -368,9 +459,6 @@ static void arm_cpu_reset(DeviceState *dev) */ arm_clear_exclusive(env); - env->vfp.xregs[ARM_VFP_FPEXC] = 0; -#endif - if (arm_feature(env, ARM_FEATURE_PMSA)) { if (cpu->pmsav7_dregion > 0) { if (arm_feature(env, ARM_FEATURE_V8)) { @@ -440,6 +528,8 @@ static void arm_cpu_reset(DeviceState *dev) arm_rebuild_hflags(env); } +#ifndef CONFIG_USER_ONLY + static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, unsigned int target_el, unsigned int cur_el, bool secure, @@ -479,6 +569,12 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, return false; } return !(env->daif & PSTATE_I); + case EXCP_VSERR: + if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_A); default: g_assert_not_reached(); } @@ -491,14 +587,24 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, if ((target_el > cur_el) && (target_el != 1)) { /* Exceptions targeting a higher EL may not be maskable */ if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* - * 64-bit masking rules are simple: exceptions to EL3 - * can't be masked, and exceptions to EL2 can only be - * masked from Secure state. The HCR and SCR settings - * don't affect the masking logic, only the interrupt routing. - */ - if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) { + switch (target_el) { + case 2: + /* + * According to ARM DDI 0487H.a, an interrupt can be masked + * when HCR_E2H and HCR_TGE are both set regardless of the + * current Security state. Note that we need to revisit this + * part again once we need to support NMI. + */ + if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + unmasked = true; + } + break; + case 3: + /* Interrupt cannot be masked when the target EL is 3 */ unmasked = true; + break; + default: + g_assert_not_reached(); } } else { /* @@ -556,7 +662,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, return unmasked || pstate_unmasked; } -bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); CPUARMState *env = cs->env_ptr; @@ -600,6 +706,17 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) goto found; } } + if (interrupt_request & CPU_INTERRUPT_VSERR) { + excp_idx = EXCP_VSERR; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + /* Taking a virtual abort clears HCR_EL2.VSE */ + env->cp15.hcr_el2 &= ~HCR_VSE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + goto found; + } + } return false; found: @@ -608,6 +725,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) cc->tcg_ops->do_interrupt(cs); return true; } +#endif /* !CONFIG_USER_ONLY */ void arm_cpu_update_virq(ARMCPU *cpu) { @@ -651,6 +769,25 @@ void arm_cpu_update_vfiq(ARMCPU *cpu) } } +void arm_cpu_update_vserr(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = env->cp15.hcr_el2 & HCR_VSE; + + if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VSERR); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + } + } +} + #ifndef CONFIG_USER_ONLY static void arm_cpu_set_irq(void *opaque, int irq, int level) { @@ -664,6 +801,16 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ }; + if (!arm_feature(env, ARM_FEATURE_EL2) && + (irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) { + /* + * The GIC might tell us about VIRQ and VFIQ state, but if we don't + * have EL2 support we don't care. (Unless the guest is doing something + * silly this will only be calls saying "level is still 0".) + */ + return; + } + if (level) { env->irq_line_state |= mask[irq]; } else { @@ -672,11 +819,9 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) switch (irq) { case ARM_CPU_VIRQ: - assert(arm_feature(env, ARM_FEATURE_EL2)); arm_cpu_update_virq(cpu); break; case ARM_CPU_VFIQ: - assert(arm_feature(env, ARM_FEATURE_EL2)); arm_cpu_update_vfiq(cpu); break; case ARM_CPU_IRQ: @@ -734,12 +879,6 @@ static bool arm_cpu_virtio_is_big_endian(CPUState *cs) #endif -static int -print_insn_thumb1(bfd_vma pc, disassemble_info *info) -{ - return print_insn_arm(pc | 1, info); -} - static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) { ARMCPU *ac = ARM_CPU(cpu); @@ -747,25 +886,16 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) bool sctlr_b; if (is_a64(env)) { - /* We might not be compiled with the A64 disassembler - * because it needs a C++ compiler. Leave print_insn - * unset in this case to use the caller default behaviour. - */ -#if defined(CONFIG_ARM_A64_DIS) - info->print_insn = print_insn_arm_a64; -#endif info->cap_arch = CS_ARCH_ARM64; info->cap_insn_unit = 4; info->cap_insn_split = 4; } else { int cap_mode; if (env->thumb) { - info->print_insn = print_insn_thumb1; info->cap_insn_unit = 2; info->cap_insn_split = 4; cap_mode = CS_MODE_THUMB; } else { - info->print_insn = print_insn_arm; info->cap_insn_unit = 4; info->cap_insn_split = 4; cap_mode = CS_MODE_ARM; @@ -782,7 +912,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) sctlr_b = arm_sctlr_b(env); if (bswap_code(sctlr_b)) { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN info->endian = BFD_ENDIAN_LITTLE; #else info->endian = BFD_ENDIAN_BIG; @@ -806,6 +936,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) int i; int el = arm_current_el(env); const char *ns_status; + bool sve; qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); for (i = 0; i < 32; i++) { @@ -832,6 +963,12 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) el, psr & PSTATE_SP ? 'h' : 't'); + if (cpu_isar_feature(aa64_sme, cpu)) { + qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c", + env->svcr, + (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'), + (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-')); + } if (cpu_isar_feature(aa64_bti, cpu)) { qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); } @@ -846,8 +983,16 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n", vfp_get_fpcr(env), vfp_get_fpsr(env)); - if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { - int j, zcr_len = sve_zcr_len_for_el(env, el); + if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) { + sve = sme_exception_el(env, el) == 0; + } else if (cpu_isar_feature(aa64_sve, cpu)) { + sve = sve_exception_el(env, el) == 0; + } else { + sve = false; + } + + if (sve) { + int j, zcr_len = sve_vqm1_for_el(env, el); for (i = 0; i <= FFR_PRED_NUM; i++) { bool eol; @@ -1017,6 +1162,9 @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags) i, v); } qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env)); + if (cpu_isar_feature(aa32_mve, cpu)) { + qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr); + } } } @@ -1027,27 +1175,13 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) return (Aff1 << ARM_AFF1_SHIFT) | Aff0; } -static void cpreg_hashtable_data_destroy(gpointer data) -{ - /* - * Destroy function for cpu->cp_regs hashtable data entries. - * We must free the name string because it was g_strdup()ed in - * add_cpreg_to_hashtable(). It's OK to cast away the 'const' - * from r->name because we know we definitely allocated it. - */ - ARMCPRegInfo *r = data; - - g_free((void *)r->name); - g_free(r); -} - static void arm_cpu_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); cpu_set_cpustate_pointers(cpu); - cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, - g_free, cpreg_hashtable_data_destroy); + cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal, + NULL, g_free); QLIST_INIT(&cpu->pre_el_change_hooks); QLIST_INIT(&cpu->el_change_hooks); @@ -1055,11 +1189,13 @@ static void arm_cpu_initfn(Object *obj) #ifdef CONFIG_USER_ONLY # ifdef TARGET_AARCH64 /* - * The linux kernel defaults to 512-bit vectors, when sve is supported. - * See documentation for /proc/sys/abi/sve_default_vector_length, and - * our corresponding sve-default-vector-length cpu property. + * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME. + * These values were chosen to fit within the default signal frame. + * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length, + * and our corresponding cpu property. */ cpu->sve_default_vq = 4; + cpu->sme_default_vq = 2; # endif #else /* Our inbound IRQ and FIQ lines */ @@ -1086,11 +1222,12 @@ static void arm_cpu_initfn(Object *obj) * picky DTB consumer will also provide a helpful error message. */ cpu->dtb_compatible = "qemu,unknown"; - cpu->psci_version = 1; /* By default assume PSCI v0.1 */ + cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; - if (tcg_enabled()) { - cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ + if (tcg_enabled() || hvf_enabled()) { + /* TCG and HVF implement PSCI 1.1 */ + cpu->psci_version = QEMU_PSCI_VERSION_1_1; } } @@ -1104,9 +1241,6 @@ static Property arm_cpu_reset_cbar_property = static Property arm_cpu_reset_hivecs_property = DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false); -static Property arm_cpu_rvbar_property = - DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0); - #ifndef CONFIG_USER_ONLY static Property arm_cpu_has_el2_property = DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true); @@ -1209,7 +1343,9 @@ void arm_cpu_post_init(Object *obj) } if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property); + object_property_add_uint64_ptr(obj, "rvbar", + &cpu->rvbar_prop, + OBJ_PROP_FLAG_READWRITE); } #ifndef CONFIG_USER_ONLY @@ -1293,6 +1429,11 @@ void arm_cpu_post_init(Object *obj) OBJ_PROP_FLAG_READWRITE); } + /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */ + object_property_add_uint32_ptr(obj, "psci-conduit", + &cpu->psci_conduit, + OBJ_PROP_FLAG_READWRITE); + qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { @@ -1349,6 +1490,7 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) { Error *local_err = NULL; +#ifdef TARGET_AARCH64 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { arm_cpu_sve_finalize(cpu, &local_err); if (local_err != NULL) { @@ -1356,19 +1498,25 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) return; } - /* - * KVM does not support modifications to this feature. - * We have not registered the cpu properties when KVM - * is in use, so the user will not be able to set them. - */ - if (!kvm_enabled()) { - arm_cpu_pauth_finalize(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } + arm_cpu_sme_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + arm_cpu_pauth_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + arm_cpu_lpa2_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; } } +#endif if (kvm_enabled()) { kvm_arm_steal_time_finalize(cpu, &local_err); @@ -1394,8 +1542,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * this is the first point where we can report it. */ if (cpu->host_cpu_probe_failed) { - if (!kvm_enabled()) { - error_setg(errp, "The 'host' CPU type can only be used with KVM"); + if (!kvm_enabled() && !hvf_enabled()) { + error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF"); } else { error_setg(errp, "Failed to retrieve host CPU features"); } @@ -1419,6 +1567,36 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } } + if (!tcg_enabled() && !qtest_enabled()) { + /* + * We assume that no accelerator except TCG (and the "not really an + * accelerator" qtest) can handle these features, because Arm hardware + * virtualization can't virtualize them. + * + * Catch all the cases which might cause us to create more than one + * address space for the CPU (otherwise we will assert() later in + * cpu_address_space_init()). + */ + if (arm_feature(env, ARM_FEATURE_M)) { + error_setg(errp, + "Cannot enable %s when using an M-profile guest CPU", + current_accel_name()); + return; + } + if (cpu->has_el3) { + error_setg(errp, + "Cannot enable %s when guest CPU has EL3 enabled", + current_accel_name()); + return; + } + if (cpu->tag_memory) { + error_setg(errp, + "Cannot enable %s when guest CPUs has MTE enabled", + current_accel_name()); + return; + } + } + { uint64_t scale; @@ -1519,6 +1697,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_NEON); t = cpu->isar.id_aa64isar0; + t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0); + t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0); + t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0); + t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 0); + t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0); + t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0); t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0); cpu->isar.id_aa64isar0 = t; @@ -1533,6 +1717,9 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu->isar.id_aa64pfr0 = t; u = cpu->isar.id_isar5; + u = FIELD_DP32(u, ID_ISAR5, AES, 0); + u = FIELD_DP32(u, ID_ISAR5, SHA1, 0); + u = FIELD_DP32(u, ID_ISAR5, SHA2, 0); u = FIELD_DP32(u, ID_ISAR5, RDM, 0); u = FIELD_DP32(u, ID_ISAR5, VCMA, 0); cpu->isar.id_isar5 = u; @@ -1735,11 +1922,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) */ unset_feature(env, ARM_FEATURE_EL3); - /* Disable the security extension feature bits in the processor feature - * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. + /* + * Disable the security extension feature bits in the processor + * feature registers as well. */ - cpu->isar.id_pfr1 &= ~0xf0; - cpu->isar.id_aa64pfr0 &= ~0xf000; + cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0); + cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0); + cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, + ID_AA64PFR0, EL3, 0); } if (!cpu->has_el2) { @@ -1770,12 +1960,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (!arm_feature(env, ARM_FEATURE_EL2)) { - /* Disable the hypervisor feature bits in the processor feature - * registers if we don't have EL2. These are id_pfr1[15:12] and - * id_aa64pfr0_el1[11:8]. + /* + * Disable the hypervisor feature bits in the processor feature + * registers if we don't have EL2. */ - cpu->isar.id_aa64pfr0 &= ~0xf00; - cpu->isar.id_pfr1 &= ~0xf000; + cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, + ID_AA64PFR0, EL2, 0); + cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, + ID_PFR1, VIRTUALIZATION, 0); } #ifndef CONFIG_USER_ONLY @@ -1789,6 +1981,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } #endif + if (tcg_enabled()) { + /* + * Don't report the Statistical Profiling Extension in the ID + * registers, because TCG doesn't implement it yet (not even a + * minimal stub version) and guests will fall over when they + * try to access the non-existent system registers for it. + */ + cpu->isar.id_aa64dfr0 = + FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0); + } + /* MPU can be configured out of a PMSA CPU either by setting has-mpu * to false or by setting pmsav7-dregion to 0. */ @@ -1947,7 +2150,6 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model) } static Property arm_cpu_properties[] = { - DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0), DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0), DEFINE_PROP_UINT64("mp-affinity", ARMCPU, mp_affinity, ARM64_AFFINITY_INVALID), @@ -1984,11 +2186,15 @@ static const struct SysemuCPUOps arm_sysemu_ops = { static const struct TCGCPUOps arm_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, - .cpu_exec_interrupt = arm_cpu_exec_interrupt, - .tlb_fill = arm_cpu_tlb_fill, .debug_excp_handler = arm_debug_excp_handler, + .restore_state_to_opc = arm_restore_state_to_opc, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = arm_cpu_record_sigsegv, + .record_sigbus = arm_cpu_record_sigbus, +#else + .tlb_fill = arm_cpu_tlb_fill, + .cpu_exec_interrupt = arm_cpu_exec_interrupt, .do_interrupt = arm_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, .do_unaligned_access = arm_cpu_do_unaligned_access, @@ -2015,6 +2221,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = arm_cpu_has_work; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; + cc->get_pc = arm_cpu_get_pc; cc->gdb_read_register = arm_cpu_gdb_read_register; cc->gdb_write_register = arm_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY @@ -2032,26 +2239,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) #endif /* CONFIG_TCG */ } -#ifdef CONFIG_KVM -static void arm_host_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - kvm_arm_set_cpu_features_from_host(cpu); - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - aarch64_add_sve_properties(obj); - } - arm_cpu_post_init(obj); -} - -static const TypeInfo host_arm_cpu_type_info = { - .name = TYPE_ARM_HOST_CPU, - .parent = TYPE_AARCH64_CPU, - .instance_init = arm_host_initfn, -}; - -#endif - static void arm_cpu_instance_init(Object *obj) { ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); @@ -2099,10 +2286,6 @@ static const TypeInfo arm_cpu_type_info = { static void arm_cpu_register_types(void) { type_register_static(&arm_cpu_type_info); - -#ifdef CONFIG_KVM - type_register_static(&host_arm_cpu_type_info); -#endif } type_init(arm_cpu_register_types) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 9f0a5f84d5..a9cd7178f8 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -21,6 +21,7 @@ #define ARM_CPU_H #include "kvm-consts.h" +#include "qemu/cpu-float.h" #include "hw/registerfields.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" @@ -54,6 +55,8 @@ #define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */ #define EXCP_LSERR 21 /* v8M LSERR SecureFault */ #define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */ +#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */ +#define EXCP_VSERR 24 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ #define ARMV7M_EXCP_RESET 1 @@ -87,6 +90,7 @@ enum { #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0 /* The usual mapping for an AArch64 system register to its AArch32 * counterpart is for the 32 bit world to have access to the lower @@ -94,7 +98,7 @@ enum { * therefore useful to be able to pass TCG the offset of the least * significant half of a uint64_t struct member. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #define offsetofhigh32(S, M) offsetof(S, M) #else @@ -162,12 +166,6 @@ typedef struct ARMGenericTimer { #define GTIMER_HYPVIRT 4 #define NUM_GTIMERS 5 -typedef struct { - uint64_t raw_tcr; - uint32_t mask; - uint32_t base_mask; -} TCR; - #define VTCR_NSW (1u << 29) #define VTCR_NSA (1u << 30) #define VSTCR_SW VTCR_NSW @@ -201,12 +199,8 @@ typedef struct { #ifdef TARGET_AARCH64 # define ARM_MAX_VQ 16 -void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); -void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); #else # define ARM_MAX_VQ 1 -static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { } -static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { } #endif typedef struct ARMVectorReg { @@ -231,7 +225,9 @@ typedef struct CPUARMTBFlags { target_ulong flags2; } CPUARMTBFlags; -typedef struct CPUARMState { +typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; + +typedef struct CPUArchState { /* Regs for current mode. */ uint32_t regs[16]; @@ -252,10 +248,12 @@ typedef struct CPUARMState { * nRW (also known as M[4]) is kept, inverted, in env->aarch64 * DAIF (exception masks) are kept in env->daif * BTYPE is kept in env->btype + * SM and ZA are kept in env->svcr * all other bits are stored in their correct places in env->pstate */ uint32_t pstate; - uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ + bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */ + bool thumb; /* True if CPU is in thumb mode; cpsr[5] */ /* Cached TBFLAGS state. See below for which bits are included. */ CPUARMTBFlags hflags; @@ -282,10 +280,10 @@ typedef struct CPUARMState { uint32_t ZF; /* Z set if zero. */ uint32_t QF; /* 0 or 1 */ uint32_t GE; /* cpsr[19:16] */ - uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ uint32_t btype; /* BTI branch type. spsr[11:10]. */ uint64_t daif; /* exception masks, in the bits they are in PSTATE */ + uint64_t svcr; /* PSTATE.{SM,ZA} in the bits they are in SVCR */ uint64_t elr_el[4]; /* AArch64 exception link regs */ uint64_t sp_el[4]; /* AArch64 banked stack pointers */ @@ -337,9 +335,9 @@ typedef struct CPUARMState { uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */ /* MMU translation table base control. */ - TCR tcr_el[4]; - TCR vtcr_el2; /* Virtualization Translation Control. */ - TCR vstcr_el2; /* Secure Virtualization Translation Control. */ + uint64_t tcr_el[4]; + uint64_t vtcr_el2; /* Virtualization Translation Control. */ + uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */ uint32_t c2_data; /* MPU data cacheable bits. */ uint32_t c2_insn; /* MPU instruction cacheable bits. */ union { /* MMU domain access control register @@ -356,6 +354,7 @@ typedef struct CPUARMState { uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ uint64_t hcr_el2; /* Hypervisor configuration register */ + uint64_t hcrx_el2; /* Extended Hypervisor configuration register */ uint64_t scr_el3; /* Secure configuration register. */ union { /* Fault status registers. */ struct { @@ -379,7 +378,7 @@ typedef struct CPUARMState { union { /* Fault address registers. */ struct { uint64_t _unused_far0; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t ifar_ns; uint32_t dfar_ns; uint32_t ifar_s; @@ -416,7 +415,7 @@ typedef struct CPUARMState { uint64_t c9_pminten; /* perf monitor interrupt enables */ union { /* Memory attribute redirection */ struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint64_t _unused_mair_0; uint32_t mair1_ns; uint32_t mair0_ns; @@ -444,6 +443,7 @@ typedef struct CPUARMState { uint64_t vbar_el[4]; }; uint32_t mvbar; /* (monitor) vector base address register */ + uint64_t rvbar; /* rvbar sampled from rvbar property at reset */ struct { /* FCSE PID. */ uint32_t fcseidr_ns; uint32_t fcseidr_s; @@ -466,6 +466,7 @@ typedef struct CPUARMState { }; uint64_t tpidr_el[4]; }; + uint64_t tpidr2_el0; /* The secure banks of these registers don't map anywhere */ uint64_t tpidrurw_s; uint64_t tpidrprw_s; @@ -495,6 +496,7 @@ typedef struct CPUARMState { uint64_t dbgwcr[16]; /* watchpoint control registers */ uint64_t mdscr_el1; uint64_t oslsr_el1; /* OS Lock Status */ + uint64_t osdlr_el1; /* OS DoubleLock status */ uint64_t mdcr_el2; uint64_t mdcr_el3; /* Stores the architectural value of the counter *the last time it was @@ -520,6 +522,11 @@ typedef struct CPUARMState { uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */ uint64_t gcr_el1; uint64_t rgsr_el1; + + /* Minimal RAS registers */ + uint64_t disr_el1; + uint64_t vdisr_el2; + uint64_t vsesr_el2; } cp15; struct { @@ -653,8 +660,8 @@ typedef struct CPUARMState { float_status standard_fp_status; float_status standard_fp_status_f16; - /* ZCR_EL[1-3] */ - uint64_t zcr_el[4]; + uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ + uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ } vfp; uint64_t exclusive_addr; uint64_t exclusive_val; @@ -676,6 +683,30 @@ typedef struct CPUARMState { ARMPACKey apdb; ARMPACKey apga; } keys; + + uint64_t scxtnum_el[4]; + + /* + * SME ZA storage -- 256 x 256 byte array, with bytes in host word order, + * as we do with vfp.zregs[]. This corresponds to the architectural ZA + * array, where ZA[N] is in the least-significant bytes of env->zarray[N]. + * When SVL is less than the architectural maximum, the accessible + * storage is restricted, such that if the SVL is X bytes the guest can + * see only the bottom X elements of zarray[], and only the least + * significant X bytes of each element of the array. (In other words, + * the observable part is always square.) + * + * The ZA storage can also be considered as a set of square tiles of + * elements of different sizes. The mapping from tiles to the ZA array + * is architecturally defined, such that for tiles of elements of esz + * bytes, the Nth row (or "horizontal slice") of tile T is in + * ZA[T + N * esz]. Note that this means that each tile is not contiguous + * in the ZA storage, because its rows are striped through the ZA array. + * + * Because this is so large, keep this toward the end of the reset area, + * to keep the offsets into the rest of the structure smaller. + */ + ARMVectorReg zarray[ARM_MAX_VQ * 16]; #endif #if defined(CONFIG_USER_ONLY) @@ -686,6 +717,9 @@ typedef struct CPUARMState { struct CPUBreakpoint *cpu_breakpoint[16]; struct CPUWatchpoint *cpu_watchpoint[16]; + /* Optional fault info across tlb lookup. */ + ARMMMUFaultInfo *tlb_fi; + /* Fields up to this point are cleared by a CPU reset */ struct {} end_reset_fields; @@ -767,13 +801,26 @@ typedef enum ARMPSCIState { typedef struct ARMISARegisters ARMISARegisters; +/* + * In map, each set bit is a supported vector length of (bit-number + 1) * 16 + * bytes, i.e. each bit number + 1 is the vector length in quadwords. + * + * While processing properties during initialization, corresponding init bits + * are set for bits in sve_vq_map that have been set by properties. + * + * Bits set in supported represent valid vector lengths for the CPU type. + */ +typedef struct { + uint32_t map, init, supported; +} ARMVQMap; + /** * ARMCPU: * @env: #CPUARMState * * An ARM CPU core. */ -struct ARMCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -933,6 +980,7 @@ struct ARMCPU { uint32_t id_mmfr2; uint32_t id_mmfr3; uint32_t id_mmfr4; + uint32_t id_mmfr5; uint32_t id_pfr0; uint32_t id_pfr1; uint32_t id_pfr2; @@ -940,7 +988,10 @@ struct ARMCPU { uint32_t mvfr1; uint32_t mvfr2; uint32_t id_dfr0; + uint32_t id_dfr1; uint32_t dbgdidr; + uint32_t dbgdevid; + uint32_t dbgdevid1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64pfr0; @@ -951,6 +1002,8 @@ struct ARMCPU { uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64zfr0; + uint64_t id_aa64smfr0; + uint64_t reset_pmcr_el0; } isar; uint64_t midr; uint32_t revidr; @@ -974,19 +1027,21 @@ struct ARMCPU { /* * Intermediate values used during property parsing. - * Once finalized, the values should be read from ID_AA64ISAR1. + * Once finalized, the values should be read from ID_AA64*. */ bool prop_pauth; bool prop_pauth_impdef; + bool prop_lpa2; /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ uint32_t dcz_blocksize; - uint64_t rvbar; + uint64_t rvbar_prop; /* Property/input signals. */ /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ int gic_num_lrs; /* number of list registers */ int gic_vpribits; /* number of virtual priority bits */ int gic_vprebits; /* number of virtual preemption bits */ + int gic_pribits; /* number of physical priority bits */ /* Whether the cfgend input is high (i.e. this CPU should reset into * big-endian mode). This setting isn't used directly: instead it modifies @@ -1009,19 +1064,11 @@ struct ARMCPU { #ifdef CONFIG_USER_ONLY /* Used to set the default vector length at process start. */ uint32_t sve_default_vq; + uint32_t sme_default_vq; #endif - /* - * In sve_vq_map each set bit is a supported vector length of - * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector - * length in quadwords. - * - * While processing properties during initialization, corresponding - * sve_vq_init bits are set for bits in sve_vq_map that have been - * set by properties. - */ - DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ); - DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ); + ARMVQMap sve_vq; + ARMVQMap sme_vq; /* Generic timer counter frequency, in Hz */ uint64_t gt_cntfrq_hz; @@ -1035,11 +1082,10 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_arm_cpu; -#endif void arm_cpu_do_interrupt(CPUState *cpu); void arm_v7m_cpu_do_interrupt(CPUState *cpu); -bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); +#endif /* !CONFIG_USER_ONLY */ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); @@ -1061,9 +1107,9 @@ int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname); int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); #ifdef TARGET_AARCH64 int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); @@ -1071,7 +1117,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el, bool el0_a64); -void aarch64_add_sve_properties(Object *obj); +void arm_reset_sve_state(CPUARMState *env); /* * SVE registers are encoded in KVM's memory in an endianness-invariant format. @@ -1084,7 +1130,7 @@ void aarch64_add_sve_properties(Object *obj); */ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN int i; for (i = 0; i < nr; ++i) { @@ -1102,7 +1148,6 @@ static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n, bool a) { } -static inline void aarch64_add_sve_properties(Object *obj) { } #endif void aarch64_sync_32_to_64(CPUARMState *env); @@ -1110,19 +1155,28 @@ void aarch64_sync_64_to_32(CPUARMState *env); int fp_exception_el(CPUARMState *env, int cur_el); int sve_exception_el(CPUARMState *env, int cur_el); -uint32_t sve_zcr_len_for_el(CPUARMState *env, int el); +int sme_exception_el(CPUARMState *env, int cur_el); + +/** + * sve_vqm1_for_el_sm: + * @env: CPUARMState + * @el: exception level + * @sm: streaming mode + * + * Compute the current vector length for @el & @sm, in units of + * Quadwords Minus 1 -- the same scale used for ZCR_ELx.LEN. + * If @sm, compute for SVL, otherwise NVL. + */ +uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm); + +/* Likewise, but using @sm = PSTATE.SM. */ +uint32_t sve_vqm1_for_el(CPUARMState *env, int el); static inline bool is_a64(CPUARMState *env) { return env->aarch64; } -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_arm_signal_handler(int host_signum, void *pinfo, - void *puc); - /** * pmu_op_start/finish * @env: CPUARMState @@ -1200,6 +1254,7 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_WXN (1U << 19) #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ #define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */ +#define SCTLR_TSCXT (1U << 20) /* FEAT_CSV2_1p2, AArch64 only */ #define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */ #define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */ #define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */ @@ -1229,15 +1284,70 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */ #define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */ #define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */ +#define SCTLR_TWEDEn (1ULL << 45) /* FEAT_TWED */ +#define SCTLR_TWEDEL MAKE_64_MASK(46, 4) /* FEAT_TWED */ +#define SCTLR_TMT0 (1ULL << 50) /* FEAT_TME */ +#define SCTLR_TMT (1ULL << 51) /* FEAT_TME */ +#define SCTLR_TME0 (1ULL << 52) /* FEAT_TME */ +#define SCTLR_TME (1ULL << 53) /* FEAT_TME */ +#define SCTLR_EnASR (1ULL << 54) /* FEAT_LS64_V */ +#define SCTLR_EnAS0 (1ULL << 55) /* FEAT_LS64_ACCDATA */ +#define SCTLR_EnALS (1ULL << 56) /* FEAT_LS64 */ +#define SCTLR_EPAN (1ULL << 57) /* FEAT_PAN3 */ +#define SCTLR_EnTP2 (1ULL << 60) /* FEAT_SME */ +#define SCTLR_NMI (1ULL << 61) /* FEAT_NMI */ +#define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */ +#define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */ -#define CPTR_TCPAC (1U << 31) -#define CPTR_TTA (1U << 20) -#define CPTR_TFP (1U << 10) -#define CPTR_TZ (1U << 8) /* CPTR_EL2 */ -#define CPTR_EZ (1U << 8) /* CPTR_EL3 */ +/* Bit definitions for CPACR (AArch32 only) */ +FIELD(CPACR, CP10, 20, 2) +FIELD(CPACR, CP11, 22, 2) +FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ +FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ +FIELD(CPACR, ASEDIS, 31, 1) +/* Bit definitions for CPACR_EL1 (AArch64 only) */ +FIELD(CPACR_EL1, ZEN, 16, 2) +FIELD(CPACR_EL1, FPEN, 20, 2) +FIELD(CPACR_EL1, SMEN, 24, 2) +FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ + +/* Bit definitions for HCPTR (AArch32 only) */ +FIELD(HCPTR, TCP10, 10, 1) +FIELD(HCPTR, TCP11, 11, 1) +FIELD(HCPTR, TASE, 15, 1) +FIELD(HCPTR, TTA, 20, 1) +FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ +FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ + +/* Bit definitions for CPTR_EL2 (AArch64 only) */ +FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ +FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ +FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ +FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ +FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ +FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ +FIELD(CPTR_EL2, TTA, 28, 1) +FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ +FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ + +/* Bit definitions for CPTR_EL3 (AArch64 only) */ +FIELD(CPTR_EL3, EZ, 8, 1) +FIELD(CPTR_EL3, TFP, 10, 1) +FIELD(CPTR_EL3, ESM, 12, 1) +FIELD(CPTR_EL3, TTA, 20, 1) +FIELD(CPTR_EL3, TAM, 30, 1) +FIELD(CPTR_EL3, TCPAC, 31, 1) + +#define MDCR_MTPME (1U << 28) +#define MDCR_TDCC (1U << 27) +#define MDCR_HLP (1U << 26) /* MDCR_EL2 */ +#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ +#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ #define MDCR_EPMAD (1U << 21) #define MDCR_EDAD (1U << 20) +#define MDCR_TTRF (1U << 19) +#define MDCR_STE (1U << 18) /* MDCR_EL3 */ #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ #define MDCR_SDD (1U << 16) @@ -1252,7 +1362,9 @@ void pmu_init(ARMCPU *cpu); #define MDCR_HPMN (0x1fU) /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ -#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) +#define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ + MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ + MDCR_STE | MDCR_SPME | MDCR_SPD) #define CPSR_M (0x1fU) #define CPSR_T (1U << 5) @@ -1316,6 +1428,25 @@ void pmu_init(ARMCPU *cpu); #define TTBCR_SH1 (1U << 28) #define TTBCR_EAE (1U << 31) +FIELD(VTCR, T0SZ, 0, 6) +FIELD(VTCR, SL0, 6, 2) +FIELD(VTCR, IRGN0, 8, 2) +FIELD(VTCR, ORGN0, 10, 2) +FIELD(VTCR, SH0, 12, 2) +FIELD(VTCR, TG0, 14, 2) +FIELD(VTCR, PS, 16, 3) +FIELD(VTCR, VS, 19, 1) +FIELD(VTCR, HA, 21, 1) +FIELD(VTCR, HD, 22, 1) +FIELD(VTCR, HWU59, 25, 1) +FIELD(VTCR, HWU60, 26, 1) +FIELD(VTCR, HWU61, 27, 1) +FIELD(VTCR, HWU62, 28, 1) +FIELD(VTCR, NSW, 29, 1) +FIELD(VTCR, NSA, 30, 1) +FIELD(VTCR, DS, 32, 1) +FIELD(VTCR, SL2, 33, 1) + /* Bit definitions for ARMv8 SPSR (PSTATE) format. * Only these are valid when in AArch64 mode; in * AArch32 mode SPSRs are basically CPSR-format. @@ -1351,6 +1482,14 @@ void pmu_init(ARMCPU *cpu); #define PSTATE_MODE_EL1t 4 #define PSTATE_MODE_EL0t 0 +/* PSTATE bits that are accessed via SVCR and not stored in SPSR_ELx. */ +FIELD(SVCR, SM, 0, 1) +FIELD(SVCR, ZA, 1, 1) + +/* Fields for SMCR_ELx. */ +FIELD(SMCR, LEN, 0, 4) +FIELD(SMCR, FA64, 31, 1) + /* Write a new value to v7m.exception, thus transitioning into or out * of Handler mode; this may result in a change of active stack pointer. */ @@ -1393,11 +1532,17 @@ uint32_t cpsr_read(CPUARMState *env); typedef enum CPSRWriteType { CPSRWriteByInstr = 0, /* from guest MSR or CPS */ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ - CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ + CPSRWriteRaw = 2, + /* trust values, no reg bank switch, no hflags rebuild */ CPSRWriteByGDBStub = 3, /* from the GDB stub */ } CPSRWriteType; -/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ +/* + * Set the CPSR. Note that some bits of mask must be all-set or all-clear. + * This will do an arm_rebuild_hflags() if any of the bits in @mask + * correspond to TB flags bits cached in the hflags, unless @write_type + * is CPSRWriteRaw. + */ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, CPSRWriteType write_type); @@ -1509,32 +1654,60 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) #define HCR_TWEDEN (1ULL << 59) #define HCR_TWEDEL MAKE_64BIT_MASK(60, 4) +#define HCRX_ENAS0 (1ULL << 0) +#define HCRX_ENALS (1ULL << 1) +#define HCRX_ENASR (1ULL << 2) +#define HCRX_FNXS (1ULL << 3) +#define HCRX_FGTNXS (1ULL << 4) +#define HCRX_SMPME (1ULL << 5) +#define HCRX_TALLINT (1ULL << 6) +#define HCRX_VINMI (1ULL << 7) +#define HCRX_VFNMI (1ULL << 8) +#define HCRX_CMOW (1ULL << 9) +#define HCRX_MCE2 (1ULL << 10) +#define HCRX_MSCEN (1ULL << 11) + #define HPFAR_NS (1ULL << 63) -#define SCR_NS (1U << 0) -#define SCR_IRQ (1U << 1) -#define SCR_FIQ (1U << 2) -#define SCR_EA (1U << 3) -#define SCR_FW (1U << 4) -#define SCR_AW (1U << 5) -#define SCR_NET (1U << 6) -#define SCR_SMD (1U << 7) -#define SCR_HCE (1U << 8) -#define SCR_SIF (1U << 9) -#define SCR_RW (1U << 10) -#define SCR_ST (1U << 11) -#define SCR_TWI (1U << 12) -#define SCR_TWE (1U << 13) -#define SCR_TLOR (1U << 14) -#define SCR_TERR (1U << 15) -#define SCR_APK (1U << 16) -#define SCR_API (1U << 17) -#define SCR_EEL2 (1U << 18) -#define SCR_EASE (1U << 19) -#define SCR_NMEA (1U << 20) -#define SCR_FIEN (1U << 21) -#define SCR_ENSCXT (1U << 25) -#define SCR_ATA (1U << 26) +#define SCR_NS (1ULL << 0) +#define SCR_IRQ (1ULL << 1) +#define SCR_FIQ (1ULL << 2) +#define SCR_EA (1ULL << 3) +#define SCR_FW (1ULL << 4) +#define SCR_AW (1ULL << 5) +#define SCR_NET (1ULL << 6) +#define SCR_SMD (1ULL << 7) +#define SCR_HCE (1ULL << 8) +#define SCR_SIF (1ULL << 9) +#define SCR_RW (1ULL << 10) +#define SCR_ST (1ULL << 11) +#define SCR_TWI (1ULL << 12) +#define SCR_TWE (1ULL << 13) +#define SCR_TLOR (1ULL << 14) +#define SCR_TERR (1ULL << 15) +#define SCR_APK (1ULL << 16) +#define SCR_API (1ULL << 17) +#define SCR_EEL2 (1ULL << 18) +#define SCR_EASE (1ULL << 19) +#define SCR_NMEA (1ULL << 20) +#define SCR_FIEN (1ULL << 21) +#define SCR_ENSCXT (1ULL << 25) +#define SCR_ATA (1ULL << 26) +#define SCR_FGTEN (1ULL << 27) +#define SCR_ECVEN (1ULL << 28) +#define SCR_TWEDEN (1ULL << 29) +#define SCR_TWEDEL MAKE_64BIT_MASK(30, 4) +#define SCR_TME (1ULL << 34) +#define SCR_AMVOFFEN (1ULL << 35) +#define SCR_ENAS0 (1ULL << 36) +#define SCR_ADEN (1ULL << 37) +#define SCR_HXEN (1ULL << 38) +#define SCR_TRNDR (1ULL << 40) +#define SCR_ENTP2 (1ULL << 41) +#define SCR_GPF (1ULL << 48) + +#define HSTR_TTEE (1 << 16) +#define HSTR_TJDBX (1 << 17) /* Return the current FPSCR value. */ uint32_t vfp_get_fpscr(CPUARMState *env); @@ -1921,6 +2094,7 @@ FIELD(ID_MMFR4, CCIDX, 24, 4) FIELD(ID_MMFR4, EVT, 28, 4) FIELD(ID_MMFR5, ETS, 0, 4) +FIELD(ID_MMFR5, NTLBPA, 4, 4) FIELD(ID_PFR0, STATE0, 0, 4) FIELD(ID_PFR0, STATE1, 4, 4) @@ -1973,6 +2147,16 @@ FIELD(ID_AA64ISAR1, SPECRES, 40, 4) FIELD(ID_AA64ISAR1, BF16, 44, 4) FIELD(ID_AA64ISAR1, DGH, 48, 4) FIELD(ID_AA64ISAR1, I8MM, 52, 4) +FIELD(ID_AA64ISAR1, XS, 56, 4) +FIELD(ID_AA64ISAR1, LS64, 60, 4) + +FIELD(ID_AA64ISAR2, WFXT, 0, 4) +FIELD(ID_AA64ISAR2, RPRES, 4, 4) +FIELD(ID_AA64ISAR2, GPA3, 8, 4) +FIELD(ID_AA64ISAR2, APA3, 12, 4) +FIELD(ID_AA64ISAR2, MOPS, 16, 4) +FIELD(ID_AA64ISAR2, BC, 20, 4) +FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) FIELD(ID_AA64PFR0, EL0, 0, 4) FIELD(ID_AA64PFR0, EL1, 4, 4) @@ -1995,6 +2179,10 @@ FIELD(ID_AA64PFR1, SSBS, 4, 4) FIELD(ID_AA64PFR1, MTE, 8, 4) FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4) +FIELD(ID_AA64PFR1, SME, 24, 4) +FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) +FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) +FIELD(ID_AA64PFR1, NMI, 36, 4) FIELD(ID_AA64MMFR0, PARANGE, 0, 4) FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) @@ -2021,6 +2209,11 @@ FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) FIELD(ID_AA64MMFR1, XNX, 28, 4) FIELD(ID_AA64MMFR1, TWED, 32, 4) FIELD(ID_AA64MMFR1, ETS, 36, 4) +FIELD(ID_AA64MMFR1, HCX, 40, 4) +FIELD(ID_AA64MMFR1, AFP, 44, 4) +FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) +FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) +FIELD(ID_AA64MMFR1, CMOW, 56, 4) FIELD(ID_AA64MMFR2, CNP, 0, 4) FIELD(ID_AA64MMFR2, UAO, 4, 4) @@ -2047,7 +2240,10 @@ FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) FIELD(ID_AA64DFR0, PMSVER, 32, 4) FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) +FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) FIELD(ID_AA64DFR0, MTPMU, 48, 4) +FIELD(ID_AA64DFR0, BRBE, 52, 4) +FIELD(ID_AA64DFR0, HPMN0, 60, 4) FIELD(ID_AA64ZFR0, SVEVER, 0, 4) FIELD(ID_AA64ZFR0, AES, 4, 4) @@ -2059,6 +2255,15 @@ FIELD(ID_AA64ZFR0, I8MM, 44, 4) FIELD(ID_AA64ZFR0, F32MM, 52, 4) FIELD(ID_AA64ZFR0, F64MM, 56, 4) +FIELD(ID_AA64SMFR0, F32F32, 32, 1) +FIELD(ID_AA64SMFR0, B16F32, 34, 1) +FIELD(ID_AA64SMFR0, F16F32, 35, 1) +FIELD(ID_AA64SMFR0, I8I32, 36, 4) +FIELD(ID_AA64SMFR0, F64F64, 48, 1) +FIELD(ID_AA64SMFR0, I16I64, 52, 4) +FIELD(ID_AA64SMFR0, SMEVER, 56, 4) +FIELD(ID_AA64SMFR0, FA64, 63, 1) + FIELD(ID_DFR0, COPDBG, 0, 4) FIELD(ID_DFR0, COPSDBG, 4, 4) FIELD(ID_DFR0, MMAPDBG, 8, 4) @@ -2069,6 +2274,7 @@ FIELD(ID_DFR0, PERFMON, 24, 4) FIELD(ID_DFR0, TRACEFILT, 28, 4) FIELD(ID_DFR1, MTPMU, 0, 4) +FIELD(ID_DFR1, HPMN0, 4, 4) FIELD(DBGDIDR, SE_IMP, 12, 1) FIELD(DBGDIDR, NSUHD_IMP, 14, 1) @@ -2077,6 +2283,15 @@ FIELD(DBGDIDR, CTX_CMPS, 20, 4) FIELD(DBGDIDR, BRPS, 24, 4) FIELD(DBGDIDR, WRPS, 28, 4) +FIELD(DBGDEVID, PCSAMPLE, 0, 4) +FIELD(DBGDEVID, WPADDRMASK, 4, 4) +FIELD(DBGDEVID, BPADDRMASK, 8, 4) +FIELD(DBGDEVID, VECTORCATCH, 12, 4) +FIELD(DBGDEVID, VIRTEXTNS, 16, 4) +FIELD(DBGDEVID, DOUBLELOCK, 20, 4) +FIELD(DBGDEVID, AUXREGS, 24, 4) +FIELD(DBGDEVID, CIDMASK, 28, 4) + FIELD(MVFR0, SIMDREG, 0, 4) FIELD(MVFR0, FPSP, 4, 4) FIELD(MVFR0, FPDP, 8, 4) @@ -2192,6 +2407,9 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env) /* Return true if the processor is in secure state */ static inline bool arm_is_secure(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.secure; + } if (arm_is_el3_or_mon(env)) { return true; } @@ -2202,15 +2420,15 @@ static inline bool arm_is_secure(CPUARMState *env) * Return true if the current security state has AArch64 EL2 or AArch32 Hyp. * This corresponds to the pseudocode EL2Enabled() */ +static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure) +{ + return arm_feature(env, ARM_FEATURE_EL2) + && (!secure || (env->cp15.scr_el3 & SCR_EEL2)); +} + static inline bool arm_is_el2_enabled(CPUARMState *env) { - if (arm_feature(env, ARM_FEATURE_EL2)) { - if (arm_is_secure_below_el3(env)) { - return (env->cp15.scr_el3 & SCR_EEL2) != 0; - } - return true; - } - return false; + return arm_is_el2_enabled_secstate(env, arm_is_secure_below_el3(env)); } #else @@ -2224,6 +2442,11 @@ static inline bool arm_is_secure(CPUARMState *env) return false; } +static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure) +{ + return false; +} + static inline bool arm_is_el2_enabled(CPUARMState *env) { return false; @@ -2236,7 +2459,9 @@ static inline bool arm_is_el2_enabled(CPUARMState *env) * "for all purposes other than a direct read or write access of HCR_EL2." * Not included here is HCR_RW. */ +uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure); uint64_t arm_hcr_el2_eff(CPUARMState *env); +uint64_t arm_hcrx_el2_eff(CPUARMState *env); /* Return true if the specified exception level is running in AArch64 state. */ static inline bool arm_el_is_aa64(CPUARMState *env, int el) @@ -2532,144 +2757,6 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) return kvmid; } -/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a - * special-behaviour cp reg and bits [11..8] indicate what behaviour - * it has. Otherwise it is a simple cp reg, where CONST indicates that - * TCG can assume the value to be constant (ie load at translate time) - * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END - * indicates that the TB should not be ended after a write to this register - * (the default is that the TB ends after cp writes). OVERRIDE permits - * a register definition to override a previous definition for the - * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the - * old must have the OVERRIDE bit set. - * ALIAS indicates that this register is an alias view of some underlying - * state which is also visible via another register, and that the other - * register is handling migration and reset; registers marked ALIAS will not be - * migrated but may have their state set by syncing of register state from KVM. - * NO_RAW indicates that this register has no underlying state and does not - * support raw access for state saving/loading; it will not be used for either - * migration or KVM state synchronization. (Typically this is for "registers" - * which are actually used as instructions for cache maintenance and so on.) - * IO indicates that this register does I/O and therefore its accesses - * need to be marked with gen_io_start() and also end the TB. In particular, - * registers which implement clocks or timers require this. - * RAISES_EXC is for when the read or write hook might raise an exception; - * the generated code will synchronize the CPU state before calling the hook - * so that it is safe for the hook to call raise_exception(). - * NEWEL is for writes to registers that might change the exception - * level - typically on older ARM chips. For those cases we need to - * re-read the new el when recomputing the translation flags. - */ -#define ARM_CP_SPECIAL 0x0001 -#define ARM_CP_CONST 0x0002 -#define ARM_CP_64BIT 0x0004 -#define ARM_CP_SUPPRESS_TB_END 0x0008 -#define ARM_CP_OVERRIDE 0x0010 -#define ARM_CP_ALIAS 0x0020 -#define ARM_CP_IO 0x0040 -#define ARM_CP_NO_RAW 0x0080 -#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100) -#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200) -#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300) -#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400) -#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500) -#define ARM_CP_DC_GVA (ARM_CP_SPECIAL | 0x0600) -#define ARM_CP_DC_GZVA (ARM_CP_SPECIAL | 0x0700) -#define ARM_LAST_SPECIAL ARM_CP_DC_GZVA -#define ARM_CP_FPU 0x1000 -#define ARM_CP_SVE 0x2000 -#define ARM_CP_NO_GDB 0x4000 -#define ARM_CP_RAISES_EXC 0x8000 -#define ARM_CP_NEWEL 0x10000 -/* Used only as a terminator for ARMCPRegInfo lists */ -#define ARM_CP_SENTINEL 0xfffff -/* Mask of only the flag bits in a type field */ -#define ARM_CP_FLAG_MASK 0x1f0ff - -/* Valid values for ARMCPRegInfo state field, indicating which of - * the AArch32 and AArch64 execution states this register is visible in. - * If the reginfo doesn't explicitly specify then it is AArch32 only. - * If the reginfo is declared to be visible in both states then a second - * reginfo is synthesised for the AArch32 view of the AArch64 register, - * such that the AArch32 view is the lower 32 bits of the AArch64 one. - * Note that we rely on the values of these enums as we iterate through - * the various states in some places. - */ -enum { - ARM_CP_STATE_AA32 = 0, - ARM_CP_STATE_AA64 = 1, - ARM_CP_STATE_BOTH = 2, -}; - -/* ARM CP register secure state flags. These flags identify security state - * attributes for a given CP register entry. - * The existence of both or neither secure and non-secure flags indicates that - * the register has both a secure and non-secure hash entry. A single one of - * these flags causes the register to only be hashed for the specified - * security state. - * Although definitions may have any combination of the S/NS bits, each - * registered entry will only have one to identify whether the entry is secure - * or non-secure. - */ -enum { - ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ - ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ -}; - -/* Return true if cptype is a valid type field. This is used to try to - * catch errors where the sentinel has been accidentally left off the end - * of a list of registers. - */ -static inline bool cptype_valid(int cptype) -{ - return ((cptype & ~ARM_CP_FLAG_MASK) == 0) - || ((cptype & ARM_CP_SPECIAL) && - ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); -} - -/* Access rights: - * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM - * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and - * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 - * (ie any of the privileged modes in Secure state, or Monitor mode). - * If a register is accessible in one privilege level it's always accessible - * in higher privilege levels too. Since "Secure PL1" also follows this rule - * (ie anything visible in PL2 is visible in S-PL1, some things are only - * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the - * terminology a little and call this PL3. - * In AArch64 things are somewhat simpler as the PLx bits line up exactly - * with the ELx exception levels. - * - * If access permissions for a register are more complex than can be - * described with these bits, then use a laxer set of restrictions, and - * do the more restrictive/complex check inside a helper function. - */ -#define PL3_R 0x80 -#define PL3_W 0x40 -#define PL2_R (0x20 | PL3_R) -#define PL2_W (0x10 | PL3_W) -#define PL1_R (0x08 | PL2_R) -#define PL1_W (0x04 | PL2_W) -#define PL0_R (0x02 | PL1_R) -#define PL0_W (0x01 | PL1_W) - -/* - * For user-mode some registers are accessible to EL0 via a kernel - * trap-and-emulate ABI. In this case we define the read permissions - * as actually being PL0_R. However some bits of any given register - * may still be masked. - */ -#ifdef CONFIG_USER_ONLY -#define PL0U_R PL0_R -#else -#define PL0U_R PL1_R -#endif - -#define PL3_RW (PL3_R | PL3_W) -#define PL2_RW (PL2_R | PL2_W) -#define PL1_RW (PL1_R | PL1_W) -#define PL0_RW (PL0_R | PL0_W) - /* Return the highest implemented Exception Level */ static inline int arm_highest_el(CPUARMState *env) { @@ -2721,241 +2808,6 @@ static inline int arm_current_el(CPUARMState *env) } } -typedef struct ARMCPRegInfo ARMCPRegInfo; - -typedef enum CPAccessResult { - /* Access is permitted */ - CP_ACCESS_OK = 0, - /* Access fails due to a configurable trap or enable which would - * result in a categorized exception syndrome giving information about - * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, - * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or - * PL1 if in EL0, otherwise to the current EL). - */ - CP_ACCESS_TRAP = 1, - /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). - * Note that this is not a catch-all case -- the set of cases which may - * result in this failure is specifically defined by the architecture. - */ - CP_ACCESS_TRAP_UNCATEGORIZED = 2, - /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */ - CP_ACCESS_TRAP_EL2 = 3, - CP_ACCESS_TRAP_EL3 = 4, - /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */ - CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5, - CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6, - /* Access fails and results in an exception syndrome for an FP access, - * trapped directly to EL2 or EL3 - */ - CP_ACCESS_TRAP_FP_EL2 = 7, - CP_ACCESS_TRAP_FP_EL3 = 8, -} CPAccessResult; - -/* Access functions for coprocessor registers. These cannot fail and - * may not raise exceptions. - */ -typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); -typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, - uint64_t value); -/* Access permission check functions for coprocessor registers. */ -typedef CPAccessResult CPAccessFn(CPUARMState *env, - const ARMCPRegInfo *opaque, - bool isread); -/* Hook function for register reset */ -typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); - -#define CP_ANY 0xff - -/* Definition of an ARM coprocessor register */ -struct ARMCPRegInfo { - /* Name of register (useful mainly for debugging, need not be unique) */ - const char *name; - /* Location of register: coprocessor number and (crn,crm,opc1,opc2) - * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a - * 'wildcard' field -- any value of that field in the MRC/MCR insn - * will be decoded to this register. The register read and write - * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 - * used by the program, so it is possible to register a wildcard and - * then behave differently on read/write if necessary. - * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 - * must both be zero. - * For AArch64-visible registers, opc0 is also used. - * Since there are no "coprocessors" in AArch64, cp is purely used as a - * way to distinguish (for KVM's benefit) guest-visible system registers - * from demuxed ones provided to preserve the "no side effects on - * KVM register read/write from QEMU" semantics. cp==0x13 is guest - * visible (to match KVM's encoding); cp==0 will be converted to - * cp==0x13 when the ARMCPRegInfo is registered, for convenience. - */ - uint8_t cp; - uint8_t crn; - uint8_t crm; - uint8_t opc0; - uint8_t opc1; - uint8_t opc2; - /* Execution state in which this register is visible: ARM_CP_STATE_* */ - int state; - /* Register type: ARM_CP_* bits/values */ - int type; - /* Access rights: PL*_[RW] */ - int access; - /* Security state: ARM_CP_SECSTATE_* bits/values */ - int secure; - /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when - * this register was defined: can be used to hand data through to the - * register read/write functions, since they are passed the ARMCPRegInfo*. - */ - void *opaque; - /* Value of this register, if it is ARM_CP_CONST. Otherwise, if - * fieldoffset is non-zero, the reset value of the register. - */ - uint64_t resetvalue; - /* Offset of the field in CPUARMState for this register. - * - * This is not needed if either: - * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs - * 2. both readfn and writefn are specified - */ - ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ - - /* Offsets of the secure and non-secure fields in CPUARMState for the - * register if it is banked. These fields are only used during the static - * registration of a register. During hashing the bank associated - * with a given security state is copied to fieldoffset which is used from - * there on out. - * - * It is expected that register definitions use either fieldoffset or - * bank_fieldoffsets in the definition but not both. It is also expected - * that both bank offsets are set when defining a banked register. This - * use indicates that a register is banked. - */ - ptrdiff_t bank_fieldoffsets[2]; - - /* Function for making any access checks for this register in addition to - * those specified by the 'access' permissions bits. If NULL, no extra - * checks required. The access check is performed at runtime, not at - * translate time. - */ - CPAccessFn *accessfn; - /* Function for handling reads of this register. If NULL, then reads - * will be done by loading from the offset into CPUARMState specified - * by fieldoffset. - */ - CPReadFn *readfn; - /* Function for handling writes of this register. If NULL, then writes - * will be done by writing to the offset into CPUARMState specified - * by fieldoffset. - */ - CPWriteFn *writefn; - /* Function for doing a "raw" read; used when we need to copy - * coprocessor state to the kernel for KVM or out for - * migration. This only needs to be provided if there is also a - * readfn and it has side effects (for instance clear-on-read bits). - */ - CPReadFn *raw_readfn; - /* Function for doing a "raw" write; used when we need to copy KVM - * kernel coprocessor state into userspace, or for inbound - * migration. This only needs to be provided if there is also a - * writefn and it masks out "unwritable" bits or has write-one-to-clear - * or similar behaviour. - */ - CPWriteFn *raw_writefn; - /* Function for resetting the register. If NULL, then reset will be done - * by writing resetvalue to the field specified in fieldoffset. If - * fieldoffset is 0 then no reset will be done. - */ - CPResetFn *resetfn; - - /* - * "Original" writefn and readfn. - * For ARMv8.1-VHE register aliases, we overwrite the read/write - * accessor functions of various EL1/EL0 to perform the runtime - * check for which sysreg should actually be modified, and then - * forwards the operation. Before overwriting the accessors, - * the original function is copied here, so that accesses that - * really do go to the EL1/EL0 version proceed normally. - * (The corresponding EL2 register is linked via opaque.) - */ - CPReadFn *orig_readfn; - CPWriteFn *orig_writefn; -}; - -/* Macros which are lvalues for the field in CPUARMState for the - * ARMCPRegInfo *ri. - */ -#define CPREG_FIELD32(env, ri) \ - (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) -#define CPREG_FIELD64(env, ri) \ - (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) - -#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } - -void define_arm_cp_regs_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque); -void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque); -static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) -{ - define_arm_cp_regs_with_opaque(cpu, regs, 0); -} -static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) -{ - define_one_arm_cp_reg_with_opaque(cpu, regs, 0); -} -const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); - -/* - * Definition of an ARM co-processor register as viewed from - * userspace. This is used for presenting sanitised versions of - * registers to userspace when emulating the Linux AArch64 CPU - * ID/feature ABI (advertised as HWCAP_CPUID). - */ -typedef struct ARMCPRegUserSpaceInfo { - /* Name of register */ - const char *name; - - /* Is the name actually a glob pattern */ - bool is_glob; - - /* Only some bits are exported to user space */ - uint64_t exported_bits; - - /* Fixed bits are applied after the mask */ - uint64_t fixed_bits; -} ARMCPRegUserSpaceInfo; - -#define REGUSERINFO_SENTINEL { .name = NULL } - -void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods); - -/* CPWriteFn that can be used to implement writes-ignored behaviour */ -void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value); -/* CPReadFn that can be used for read-as-zero behaviour */ -uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); - -/* CPResetFn that does nothing, for use if no reset is required even - * if fieldoffset is non zero. - */ -void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); - -/* Return true if this reginfo struct's field in the cpu state struct - * is 64 bits wide. - */ -static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) -{ - return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); -} - -static inline bool cp_access_ok(int current_el, - const ARMCPRegInfo *ri, int isread) -{ - return (ri->access >> ((current_el * 2) + isread)) & 1; -} - -/* Raw read of a coprocessor register (as needed for migration, etc) */ -uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); - /** * write_list_to_cpustate * @cpu: ARMCPU @@ -3002,7 +2854,8 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_ARM_CPU -#define cpu_signal_handler cpu_arm_signal_handler +#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU + #define cpu_list arm_cpu_list /* ARM has the following "translation regimes" (as the ARM ARM calls them): @@ -3045,26 +2898,29 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); * table over and over. * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access * Never (PAN) bit within PSTATE. + * 7. we fold together the secure and non-secure regimes for A-profile, + * because there are no banked system registers for aarch64, so the + * process of switching between secure and non-secure is + * already heavyweight. * * This gives us the following list of cases: * - * NS EL0 EL1&0 stage 1+2 (aka NS PL0) - * NS EL1 EL1&0 stage 1+2 (aka NS PL1) - * NS EL1 EL1&0 stage 1+2 +PAN - * NS EL0 EL2&0 - * NS EL2 EL2&0 - * NS EL2 EL2&0 +PAN - * NS EL2 (aka NS PL2) - * S EL0 EL1&0 (aka S PL0) - * S EL1 EL1&0 (not used if EL3 is 32 bit) - * S EL1 EL1&0 +PAN - * S EL3 (aka S PL1) + * EL0 EL1&0 stage 1+2 (aka NS PL0) + * EL1 EL1&0 stage 1+2 (aka NS PL1) + * EL1 EL1&0 stage 1+2 +PAN + * EL0 EL2&0 + * EL2 EL2&0 + * EL2 EL2&0 +PAN + * EL2 (aka NS PL2) + * EL3 (aka S PL1) + * Physical (NS & S) + * Stage2 (NS & S) * - * for a total of 11 different mmu_idx. + * for a total of 12 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes - * as A profile. They only need to distinguish NS EL0 and NS EL1 (and - * NS EL2 if we ever model a Cortex-R52). + * as A profile. They only need to distinguish EL0 and EL1 (and + * EL2 if we ever model a Cortex-R52). * * M profile CPUs are rather different as they do not have a true MMU. * They have the following different MMU indexes: @@ -3103,9 +2959,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ #define ARM_MMU_IDX_M 0x40 /* M profile */ -/* Meanings of the bits for A profile mmu idx values */ -#define ARM_MMU_IDX_A_NS 0x8 - /* Meanings of the bits for M profile mmu idx values */ #define ARM_MMU_IDX_M_PRIV 0x1 #define ARM_MMU_IDX_M_NEGPRI 0x2 @@ -3119,22 +2972,27 @@ typedef enum ARMMMUIdx { /* * A-profile. */ - ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A, - ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A, - ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A, - ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A, - ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A, - ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A, - ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A, - ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A, + ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A, + ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A, - ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS, - ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS, + /* TLBs with 1-1 mapping to the physical address spaces. */ + ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A, + + /* + * Used for second stage of an S12 page table walk, or for descriptor + * loads during first stage of an S1 page table walk. Note that both + * are in use simultaneously for SecureEL2: the security state for + * the S2 ptw is selected by the NS bit from the S1 ptw. + */ + ARMMMUIdx_Stage2 = 10 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2_S = 11 | ARM_MMU_IDX_A, /* * These are not allocated TLBs and are used only for AT system @@ -3143,18 +3001,6 @@ typedef enum ARMMMUIdx { ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB, - /* - * Not allocated a TLB: used only for second stage of an S12 page - * table walk, or for descriptor loads during first stage of an S1 - * page table walk. Note that if we ever want to have a TLB for this - * then various TLB flush insns which currently are no-ops or flush - * only stage 1 MMU indexes will need to change to flush stage 2. - */ - ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB, /* * M-profile. @@ -3184,14 +3030,9 @@ typedef enum ARMMMUIdxBit { TO_CORE_BIT(E2), TO_CORE_BIT(E20_2), TO_CORE_BIT(E20_2_PAN), - TO_CORE_BIT(SE10_0), - TO_CORE_BIT(SE20_0), - TO_CORE_BIT(SE10_1), - TO_CORE_BIT(SE20_2), - TO_CORE_BIT(SE10_1_PAN), - TO_CORE_BIT(SE20_2_PAN), - TO_CORE_BIT(SE2), - TO_CORE_BIT(SE3), + TO_CORE_BIT(E3), + TO_CORE_BIT(Stage2), + TO_CORE_BIT(Stage2_S), TO_CORE_BIT(MUser), TO_CORE_BIT(MPriv), @@ -3215,27 +3056,6 @@ typedef enum ARMASIdx { ARMASIdx_TagS = 3, } ARMASIdx; -/* Return the Exception Level targeted by debug exceptions. */ -static inline int arm_debug_target_el(CPUARMState *env) -{ - bool secure = arm_is_secure(env); - bool route_to_el2 = false; - - if (arm_is_el2_enabled(env)) { - route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || - env->cp15.mdcr_el2 & MDCR_TDE; - } - - if (route_to_el2) { - return 2; - } else if (arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3) && secure) { - return 3; - } else { - return 1; - } -} - static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) { /* If all the CLIDR.Ctypem bits are 0 there are no caches, and @@ -3244,107 +3064,6 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; } -/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ -static inline bool aa64_generate_debug_exceptions(CPUARMState *env) -{ - int cur_el = arm_current_el(env); - int debug_el; - - if (cur_el == 3) { - return false; - } - - /* MDCR_EL3.SDD disables debug events from Secure state */ - if (arm_is_secure_below_el3(env) - && extract32(env->cp15.mdcr_el3, 16, 1)) { - return false; - } - - /* - * Same EL to same EL debug exceptions need MDSCR_KDE enabled - * while not masking the (D)ebug bit in DAIF. - */ - debug_el = arm_debug_target_el(env); - - if (cur_el == debug_el) { - return extract32(env->cp15.mdscr_el1, 13, 1) - && !(env->daif & PSTATE_D); - } - - /* Otherwise the debug target needs to be a higher EL */ - return debug_el > cur_el; -} - -static inline bool aa32_generate_debug_exceptions(CPUARMState *env) -{ - int el = arm_current_el(env); - - if (el == 0 && arm_el_is_aa64(env, 1)) { - return aa64_generate_debug_exceptions(env); - } - - if (arm_is_secure(env)) { - int spd; - - if (el == 0 && (env->cp15.sder & 1)) { - /* SDER.SUIDEN means debug exceptions from Secure EL0 - * are always enabled. Otherwise they are controlled by - * SDCR.SPD like those from other Secure ELs. - */ - return true; - } - - spd = extract32(env->cp15.mdcr_el3, 14, 2); - switch (spd) { - case 1: - /* SPD == 0b01 is reserved, but behaves as 0b00. */ - case 0: - /* For 0b00 we return true if external secure invasive debug - * is enabled. On real hardware this is controlled by external - * signals to the core. QEMU always permits debug, and behaves - * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. - */ - return true; - case 2: - return false; - case 3: - return true; - } - } - - return el != 2; -} - -/* Return true if debugging exceptions are currently enabled. - * This corresponds to what in ARM ARM pseudocode would be - * if UsingAArch32() then - * return AArch32.GenerateDebugExceptions() - * else - * return AArch64.GenerateDebugExceptions() - * We choose to push the if() down into this function for clarity, - * since the pseudocode has it at all callsites except for the one in - * CheckSoftwareStep(), where it is elided because both branches would - * always return the same value. - */ -static inline bool arm_generate_debug_exceptions(CPUARMState *env) -{ - if (env->aarch64) { - return aa64_generate_debug_exceptions(env); - } else { - return aa32_generate_debug_exceptions(env); - } -} - -/* Is single-stepping active? (Note that the "is EL_D AArch64?" check - * implicitly means this always returns false in pre-v8 CPUs.) - */ -static inline bool arm_singlestep_active(CPUARMState *env) -{ - return extract32(env->cp15.mdscr_el1, 0, 1) - && arm_el_is_aa64(env, arm_debug_target_el(env)) - && arm_generate_debug_exceptions(env); -} - static inline bool arm_sctlr_b(CPUARMState *env) { return @@ -3401,9 +3120,6 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) } } -typedef CPUARMState CPUArchState; -typedef ARMCPU ArchCPU; - #include "exec/cpu-all.h" /* @@ -3426,7 +3142,7 @@ typedef ARMCPU ArchCPU; * | TBFLAG_AM32 | +-----+----------+ * | | |TBFLAG_M32| * +-------------+----------------+----------+ - * 31 23 5 4 0 + * 31 23 6 5 0 * * Unless otherwise noted, these bits are cached in env->hflags. */ @@ -3437,10 +3153,9 @@ FIELD(TBFLAG_ANY, BE_DATA, 3, 1) FIELD(TBFLAG_ANY, MMUIDX, 4, 4) /* Target EL if we take a floating-point-disabled exception */ FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2) -/* For A-profile only, target EL for debug exceptions. */ -FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2) /* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */ -FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1) +FIELD(TBFLAG_ANY, ALIGN_MEM, 10, 1) +FIELD(TBFLAG_ANY, PSTATE__IL, 11, 1) /* * Bit usage when in AArch32 state, both A- and M-profile. @@ -3469,6 +3184,11 @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) * the same thing as the current security state of the processor! */ FIELD(TBFLAG_A32, NS, 10, 1) +/* + * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. + * This requires an SME trap from AArch32 mode when using NEON. + */ +FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) /* * Bit usage when in AArch32 state, for M-profile only. @@ -3483,13 +3203,18 @@ FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */ FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */ /* Set if FPCCR.S does not match current security state */ FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */ +/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */ +FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */ +/* Set if in secure mode */ +FIELD(TBFLAG_M32, SECURE, 6, 1) /* * Bit usage when in AArch64 state */ FIELD(TBFLAG_A64, TBII, 0, 2) FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) -FIELD(TBFLAG_A64, ZCR_LEN, 4, 4) +/* The current vector length, either NVL or SVL. */ +FIELD(TBFLAG_A64, VL, 4, 4) FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1) FIELD(TBFLAG_A64, BT, 9, 1) FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ @@ -3499,6 +3224,12 @@ FIELD(TBFLAG_A64, ATA, 15, 1) FIELD(TBFLAG_A64, TCMA, 16, 2) FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1) FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) +FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2) +FIELD(TBFLAG_A64, PSTATE_SM, 22, 1) +FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) +FIELD(TBFLAG_A64, SVL, 24, 4) +/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ +FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) /* * Helpers for using the above. @@ -3533,15 +3264,37 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) return EX_TBFLAG_ANY(env->hflags, MMUIDX); } +/** + * sve_vq + * @env: the cpu context + * + * Return the VL cached within env->hflags, in units of quadwords. + */ +static inline int sve_vq(CPUARMState *env) +{ + return EX_TBFLAG_A64(env->hflags, VL) + 1; +} + +/** + * sme_vq + * @env: the cpu context + * + * Return the SVL cached within env->hflags, in units of quadwords. + */ +static inline int sme_vq(CPUARMState *env) +{ + return EX_TBFLAG_A64(env->hflags, SVL) + 1; +} + static inline bool bswap_code(bool sctlr_b) { #ifdef CONFIG_USER_ONLY - /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian. - * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0 + /* BE8 (SCTLR.B = 0, TARGET_BIG_ENDIAN = 1) is mixed endian. + * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_BIG_ENDIAN=0 * would also end up as a mixed-endian mode with BE code, LE data. */ return -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN 1 ^ #endif sctlr_b; @@ -3557,7 +3310,7 @@ static inline bool bswap_code(bool sctlr_b) static inline bool arm_cpu_bswap_data(CPUARMState *env) { return -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN 1 ^ #endif arm_cpu_data_is_big_endian(env); @@ -3649,27 +3402,24 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) } /* Shared between translate-sve.c and sve_helper.c. */ -extern const uint64_t pred_esz_masks[4]; - -/* Helper for the macros below, validating the argument type. */ -static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x) -{ - return x; -} - -/* - * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB. - * Using these should be a bit more self-documenting than using the - * generic target bits directly. - */ -#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0) -#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1) +extern const uint64_t pred_esz_masks[5]; /* * AArch64 usage of the PAGE_TARGET_* bits for linux-user. + * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect + * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR. */ -#define PAGE_BTI PAGE_TARGET_1 -#define PAGE_MTE PAGE_TARGET_2 +#define PAGE_BTI PAGE_TARGET_1 +#define PAGE_MTE PAGE_TARGET_2 +#define PAGE_TARGET_STICKY PAGE_MTE + +/* We associate one allocation tag per 16 bytes, the minimum. */ +#define LOG2_TAG_GRANULE 4 +#define TAG_GRANULE (1 << LOG2_TAG_GRANULE) + +#ifdef CONFIG_USER_ONLY +#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1)) +#endif #ifdef TARGET_TAGGED_ADDRESSES /** @@ -3969,20 +3719,27 @@ static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; } -static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } -static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } +static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; @@ -4013,6 +3770,21 @@ static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; } +static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5; +} + +static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8; +} + +static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) +{ + return FIELD_EX32(id->dbgdevid, DBGDEVID, DOUBLELOCK) > 0; +} + /* * 64-bit feature tests via id registers. */ @@ -4190,6 +3962,21 @@ static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2; } +static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2; +} + +static inline bool isar_feature_aa64_ras(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0; +} + +static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2; +} + static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; @@ -4220,6 +4007,11 @@ static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; } +static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; +} + static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; @@ -4230,6 +4022,16 @@ static inline bool isar_feature_aa64_st(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; } +static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0; +} + +static inline bool isar_feature_aa64_ids(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0; +} + static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; @@ -4245,18 +4047,29 @@ static inline bool isar_feature_aa64_mte(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2; } -static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; +} + +static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } -static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } +static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; @@ -4272,11 +4085,86 @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; } +static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; +} + +static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id)); +} + +static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2; +} + +static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id)); +} + +static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0; +} + +static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1; +} + +static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0; +} + +static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id)); +} + +static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id)); +} + +static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id)); +} + static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; } +static inline bool isar_feature_aa64_lva(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0; +} + +static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; +} + +static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0; +} + +static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2; +} + static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0; @@ -4287,11 +4175,29 @@ static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0; } +static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id) +{ + int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2); + if (key >= 2) { + return true; /* FEAT_CSV2_2 */ + } + if (key == 1) { + key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC); + return key >= 2; /* FEAT_CSV2_1p2 */ + } + return false; +} + static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; } +static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8; +} + static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; @@ -4342,6 +4248,26 @@ static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; } +static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64); +} + +static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf; +} + +static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64); +} + +static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; +} + /* * Feature tests for "does this exist in either 32-bit or 64-bit?" */ @@ -4355,14 +4281,19 @@ static inline bool isar_feature_any_predinv(const ARMISARegisters *id) return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); } -static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_any_pmuv3p1(const ARMISARegisters *id) { - return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); + return isar_feature_aa64_pmuv3p1(id) || isar_feature_aa32_pmuv3p1(id); } -static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id) { - return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id); + return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id); +} + +static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id); } static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) @@ -4375,6 +4306,16 @@ static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id) return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id); } +static inline bool isar_feature_any_debugv8p2(const ARMISARegisters *id) +{ + return isar_feature_aa64_debugv8p2(id) || isar_feature_aa32_debugv8p2(id); +} + +static inline bool isar_feature_any_ras(const ARMISARegisters *id) +{ + return isar_feature_aa64_ras(id) || isar_feature_aa32_ras(id); +} + /* * Forward to the above feature tests given an ARMCPU pointer. */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index c690318a9b..3d74f134f5 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -29,73 +29,18 @@ #include "hw/loader.h" #endif #include "sysemu/kvm.h" +#include "sysemu/hvf.h" #include "kvm_arm.h" +#include "hvf_arm.h" #include "qapi/visitor.h" #include "hw/qdev-properties.h" +#include "internals.h" - -#ifndef CONFIG_USER_ONLY -static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - ARMCPU *cpu = env_archcpu(env); - - /* Number of cores is in [25:24]; otherwise we RAZ */ - return (cpu->core_count - 1) << 24; -} -#endif - -static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = { -#ifndef CONFIG_USER_ONLY - { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2, - .access = PL1_RW, .readfn = a57_a53_l2ctlr_read, - .writefn = arm_cp_write_ignore }, - { .name = "L2CTLR", - .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2, - .access = PL1_RW, .readfn = a57_a53_l2ctlr_read, - .writefn = arm_cp_write_ignore }, -#endif - { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2ECTLR", - .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUACTLR", - .cp = 15, .opc1 = 0, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUECTLR", - .cp = 15, .opc1 = 1, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUMERRSR", - .cp = 15, .opc1 = 2, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2MERRSR", - .cp = 15, .opc1 = 3, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - REGINFO_SENTINEL -}; - -static void aarch64_a57_initfn(Object *obj) +static void aarch64_a35_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); - cpu->dtb_compatible = "arm,cortex-a57"; + cpu->dtb_compatible = "arm,cortex-a35"; set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); @@ -104,124 +49,15 @@ static void aarch64_a57_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; - cpu->midr = 0x411fd070; - cpu->revidr = 0x00000000; - cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; - cpu->ctr = 0x8444c004; - cpu->reset_sctlr = 0x00c50838; + + /* From B2.2 AArch64 identification registers. */ + cpu->midr = 0x411fd040; + cpu->revidr = 0; + cpu->ctr = 0x84448004; cpu->isar.id_pfr0 = 0x00000131; cpu->isar.id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->isar.dbgdidr = 0x3516d000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ - cpu->dcz_blocksize = 4; /* 64 bytes */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); -} - -static void aarch64_a53_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a53"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53; - cpu->midr = 0x410fd034; - cpu->revidr = 0x00000000; - cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; - cpu->ctr = 0x84448004; /* L1Ip = VIPT */ - cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ - cpu->isar.dbgdidr = 0x3516d000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ - cpu->dcz_blocksize = 4; /* 64 bytes */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); -} - -static void aarch64_a72_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a72"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->midr = 0x410fd083; - cpu->revidr = 0x00000000; - cpu->reset_fpsid = 0x41034080; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; - cpu->ctr = 0x8444c004; - cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; + cpu->id_afr0 = 0; cpu->isar.id_mmfr0 = 0x10201105; cpu->isar.id_mmfr1 = 0x40000000; cpu->isar.id_mmfr2 = 0x01260000; @@ -233,19 +69,51 @@ static void aarch64_a72_initfn(Object *obj) cpu->isar.id_isar4 = 0x00011142; cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64pfr1 = 0; cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr1 = 0; cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.id_aa64isar1 = 0; + cpu->isar.id_aa64mmfr0 = 0x00101122; + cpu->isar.id_aa64mmfr1 = 0; cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ - cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->dcz_blocksize = 4; + + /* From B2.4 AArch64 Virtual Memory control registers */ + cpu->reset_sctlr = 0x00c50838; + + /* From B2.10 AArch64 performance monitor registers */ + cpu->isar.reset_pmcr_el0 = 0x410a3000; + + /* From B2.29 Cache ID registers */ + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */ + + /* From B3.5 VGIC Type register */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; - define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); + cpu->gic_pribits = 5; + + /* From C6.4 Debug ID Register */ + cpu->isar.dbgdidr = 0x3516d000; + /* From C6.5 Debug Device ID Register */ + cpu->isar.dbgdevid = 0x00110f13; + /* From C6.6 Debug Device ID Register 1 */ + cpu->isar.dbgdevid1 = 0x2; + + /* From Cortex-A35 SIMD and Floating-point Support r1p0 */ + /* From 3.2 AArch32 register summary */ + cpu->reset_fpsid = 0x41034043; + + /* From 2.2 AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + + /* These values are the same with A53/A57/A72. */ + define_cortex_a72_a57_a53_cp_reginfo(cpu); } void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) @@ -265,16 +133,28 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) * any of the above. Finally, if SVE is not disabled, then at least one * vector length must be enabled. */ - DECLARE_BITMAP(kvm_supported, ARM_MAX_VQ); - DECLARE_BITMAP(tmp, ARM_MAX_VQ); - uint32_t vq, max_vq = 0; + uint32_t vq_map = cpu->sve_vq.map; + uint32_t vq_init = cpu->sve_vq.init; + uint32_t vq_supported; + uint32_t vq_mask = 0; + uint32_t tmp, vq, max_vq = 0; - /* Collect the set of vector lengths supported by KVM. */ - bitmap_zero(kvm_supported, ARM_MAX_VQ); - if (kvm_enabled() && kvm_arm_sve_supported()) { - kvm_arm_sve_get_vls(CPU(cpu), kvm_supported); - } else if (kvm_enabled()) { - assert(!cpu_isar_feature(aa64_sve, cpu)); + /* + * CPU models specify a set of supported vector lengths which are + * enabled by default. Attempting to enable any vector length not set + * in the supported bitmap results in an error. When KVM is enabled we + * fetch the supported bitmap from the host. + */ + if (kvm_enabled()) { + if (kvm_arm_sve_supported()) { + cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu)); + vq_supported = cpu->sve_vq.supported; + } else { + assert(!cpu_isar_feature(aa64_sve, cpu)); + vq_supported = 0; + } + } else { + vq_supported = cpu->sve_vq.supported; } /* @@ -282,8 +162,9 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) * From the properties, sve_vq_map implies sve_vq_init. * Check first for any sve enabled. */ - if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) { - max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1; + if (vq_map != 0) { + max_vq = 32 - clz32(vq_map); + vq_mask = MAKE_64BIT_MASK(0, max_vq); if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) { error_setg(errp, "cannot enable sve%d", max_vq * 128); @@ -299,15 +180,10 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) * For KVM we have to automatically enable all supported unitialized * lengths, even when the smaller lengths are not all powers-of-two. */ - bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq); - bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq); + vq_map |= vq_supported & ~vq_init & vq_mask; } else { /* Propagate enabled bits down through required powers-of-two. */ - for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) { - if (!test_bit(vq - 1, cpu->sve_vq_init)) { - set_bit(vq - 1, cpu->sve_vq_map); - } - } + vq_map |= SVE_VQ_POW2_MAP & ~vq_init & vq_mask; } } else if (cpu->sve_max_vq == 0) { /* @@ -320,44 +196,29 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) if (kvm_enabled()) { /* Disabling a supported length disables all larger lengths. */ - for (vq = 1; vq <= ARM_MAX_VQ; ++vq) { - if (test_bit(vq - 1, cpu->sve_vq_init) && - test_bit(vq - 1, kvm_supported)) { - break; - } - } - max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ; - bitmap_andnot(cpu->sve_vq_map, kvm_supported, - cpu->sve_vq_init, max_vq); - if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) { - error_setg(errp, "cannot disable sve%d", vq * 128); - error_append_hint(errp, "Disabling sve%d results in all " - "vector lengths being disabled.\n", - vq * 128); - error_append_hint(errp, "With SVE enabled, at least one " - "vector length must be enabled.\n"); - return; - } + tmp = vq_init & vq_supported; } else { /* Disabling a power-of-two disables all larger lengths. */ - if (test_bit(0, cpu->sve_vq_init)) { - error_setg(errp, "cannot disable sve128"); - error_append_hint(errp, "Disabling sve128 results in all " - "vector lengths being disabled.\n"); - error_append_hint(errp, "With SVE enabled, at least one " - "vector length must be enabled.\n"); - return; - } - for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) { - if (test_bit(vq - 1, cpu->sve_vq_init)) { - break; - } - } - max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ; - bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq); + tmp = vq_init & SVE_VQ_POW2_MAP; + } + vq = ctz32(tmp) + 1; + + max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ; + vq_mask = MAKE_64BIT_MASK(0, max_vq); + vq_map = vq_supported & ~vq_init & vq_mask; + + if (max_vq == 0 || vq_map == 0) { + error_setg(errp, "cannot disable sve%d", vq * 128); + error_append_hint(errp, "Disabling sve%d results in all " + "vector lengths being disabled.\n", + vq * 128); + error_append_hint(errp, "With SVE enabled, at least one " + "vector length must be enabled.\n"); + return; } - max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1; + max_vq = 32 - clz32(vq_map); + vq_mask = MAKE_64BIT_MASK(0, max_vq); } /* @@ -367,9 +228,9 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) */ if (cpu->sve_max_vq != 0) { max_vq = cpu->sve_max_vq; + vq_mask = MAKE_64BIT_MASK(0, max_vq); - if (!test_bit(max_vq - 1, cpu->sve_vq_map) && - test_bit(max_vq - 1, cpu->sve_vq_init)) { + if (vq_init & ~vq_map & (1 << (max_vq - 1))) { error_setg(errp, "cannot disable sve%d", max_vq * 128); error_append_hint(errp, "The maximum vector length must be " "enabled, sve-max-vq=%d (%d bits)\n", @@ -378,8 +239,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) } /* Set all bits not explicitly set within sve-max-vq. */ - bitmap_complement(tmp, cpu->sve_vq_init, max_vq); - bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq); + vq_map |= ~vq_init & vq_mask; } /* @@ -388,48 +248,52 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) * are clear, just in case anybody looks. */ assert(max_vq != 0); - bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq); + assert(vq_mask != 0); + vq_map &= vq_mask; - if (kvm_enabled()) { - /* Ensure the set of lengths matches what KVM supports. */ - bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq); - if (!bitmap_empty(tmp, max_vq)) { - vq = find_last_bit(tmp, max_vq) + 1; - if (test_bit(vq - 1, cpu->sve_vq_map)) { - if (cpu->sve_max_vq) { - error_setg(errp, "cannot set sve-max-vq=%d", - cpu->sve_max_vq); - error_append_hint(errp, "This KVM host does not support " - "the vector length %d-bits.\n", - vq * 128); - error_append_hint(errp, "It may not be possible to use " - "sve-max-vq with this KVM host. Try " - "using only sve properties.\n"); - } else { - error_setg(errp, "cannot enable sve%d", vq * 128); - error_append_hint(errp, "This KVM host does not support " - "the vector length %d-bits.\n", - vq * 128); - } + /* Ensure the set of lengths matches what is supported. */ + tmp = vq_map ^ (vq_supported & vq_mask); + if (tmp) { + vq = 32 - clz32(tmp); + if (vq_map & (1 << (vq - 1))) { + if (cpu->sve_max_vq) { + error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq); + error_append_hint(errp, "This CPU does not support " + "the vector length %d-bits.\n", vq * 128); + error_append_hint(errp, "It may not be possible to use " + "sve-max-vq with this CPU. Try " + "using only sve properties.\n"); } else { + error_setg(errp, "cannot enable sve%d", vq * 128); + if (vq_supported) { + error_append_hint(errp, "This CPU does not support " + "the vector length %d-bits.\n", vq * 128); + } else { + error_append_hint(errp, "SVE not supported by KVM " + "on this host\n"); + } + } + return; + } else { + if (kvm_enabled()) { error_setg(errp, "cannot disable sve%d", vq * 128); error_append_hint(errp, "The KVM host requires all " "supported vector lengths smaller " "than %d bits to also be enabled.\n", max_vq * 128); - } - return; - } - } else { - /* Ensure all required powers-of-two are enabled. */ - for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) { - if (!test_bit(vq - 1, cpu->sve_vq_map)) { - error_setg(errp, "cannot disable sve%d", vq * 128); - error_append_hint(errp, "sve%d is required as it " - "is a power-of-two length smaller than " - "the maximum, sve%d\n", - vq * 128, max_vq * 128); return; + } else { + /* Ensure all required powers-of-two are enabled. */ + tmp = SVE_VQ_POW2_MAP & vq_mask & ~vq_map; + if (tmp) { + vq = 32 - clz32(tmp); + error_setg(errp, "cannot disable sve%d", vq * 128); + error_append_hint(errp, "sve%d is required as it " + "is a power-of-two length smaller " + "than the maximum, sve%d\n", + vq * 128, max_vq * 128); + return; + } } } } @@ -448,6 +312,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) /* From now on sve_max_vq is the actual maximum supported length. */ cpu->sve_max_vq = max_vq; + cpu->sve_vq.map = vq_map; } static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name, @@ -492,31 +357,34 @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name, } /* - * Note that cpu_arm_get/set_sve_vq cannot use the simpler - * object_property_add_bool interface because they make use - * of the contents of "name" to determine which bit on which - * to operate. + * Note that cpu_arm_{get,set}_vq cannot use the simpler + * object_property_add_bool interface because they make use of the + * contents of "name" to determine which bit on which to operate. */ -static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); + ARMVQMap *vq_map = opaque; uint32_t vq = atoi(&name[3]) / 128; + bool sve = vq_map == &cpu->sve_vq; bool value; - /* All vector lengths are disabled when SVE is off. */ - if (!cpu_isar_feature(aa64_sve, cpu)) { + /* All vector lengths are disabled when feature is off. */ + if (sve + ? !cpu_isar_feature(aa64_sve, cpu) + : !cpu_isar_feature(aa64_sme, cpu)) { value = false; } else { - value = test_bit(vq - 1, cpu->sve_vq_map); + value = extract32(vq_map->map, vq - 1, 1); } visit_type_bool(v, name, &value, errp); } -static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - ARMCPU *cpu = ARM_CPU(obj); + ARMVQMap *vq_map = opaque; uint32_t vq = atoi(&name[3]) / 128; bool value; @@ -524,18 +392,8 @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name, return; } - if (value && kvm_enabled() && !kvm_arm_sve_supported()) { - error_setg(errp, "cannot enable %s", name); - error_append_hint(errp, "SVE not supported by KVM on this host\n"); - return; - } - - if (value) { - set_bit(vq - 1, cpu->sve_vq_map); - } else { - clear_bit(vq - 1, cpu->sve_vq_map); - } - set_bit(vq - 1, cpu->sve_vq_init); + vq_map->map = deposit32(vq_map->map, vq - 1, 1, value); + vq_map->init |= 1 << (vq - 1); } static bool cpu_arm_get_sve(Object *obj, Error **errp) @@ -559,13 +417,85 @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp) cpu->isar.id_aa64pfr0 = t; } -#ifdef CONFIG_USER_ONLY -/* Mirror linux /proc/sys/abi/sve_default_vector_length. */ -static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) +void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) +{ + uint32_t vq_map = cpu->sme_vq.map; + uint32_t vq_init = cpu->sme_vq.init; + uint32_t vq_supported = cpu->sme_vq.supported; + uint32_t vq; + + if (vq_map == 0) { + if (!cpu_isar_feature(aa64_sme, cpu)) { + cpu->isar.id_aa64smfr0 = 0; + return; + } + + /* TODO: KVM will require limitations via SMCR_EL2. */ + vq_map = vq_supported & ~vq_init; + + if (vq_map == 0) { + vq = ctz32(vq_supported) + 1; + error_setg(errp, "cannot disable sme%d", vq * 128); + error_append_hint(errp, "All SME vector lengths are disabled.\n"); + error_append_hint(errp, "With SME enabled, at least one " + "vector length must be enabled.\n"); + return; + } + } else { + if (!cpu_isar_feature(aa64_sme, cpu)) { + vq = 32 - clz32(vq_map); + error_setg(errp, "cannot enable sme%d", vq * 128); + error_append_hint(errp, "SME must be enabled to enable " + "vector lengths.\n"); + error_append_hint(errp, "Add sme=on to the CPU property list.\n"); + return; + } + /* TODO: KVM will require limitations via SMCR_EL2. */ + } + + cpu->sme_vq.map = vq_map; +} + +static bool cpu_arm_get_sme(Object *obj, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); + return cpu_isar_feature(aa64_sme, cpu); +} + +static void cpu_arm_set_sme(Object *obj, bool value, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint64_t t; + + t = cpu->isar.id_aa64pfr1; + t = FIELD_DP64(t, ID_AA64PFR1, SME, value); + cpu->isar.id_aa64pfr1 = t; +} + +static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + return cpu_isar_feature(aa64_sme, cpu) && + cpu_isar_feature(aa64_sme_fa64, cpu); +} + +static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint64_t t; + + t = cpu->isar.id_aa64smfr0; + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value); + cpu->isar.id_aa64smfr0 = t; +} + +#ifdef CONFIG_USER_ONLY +/* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */ +static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint32_t *ptr_default_vq = opaque; int32_t default_len, default_vq, remainder; if (!visit_type_int32(v, name, &default_len, errp)) { @@ -574,7 +504,7 @@ static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v, /* Undocumented, but the kernel allows -1 to indicate "maximum". */ if (default_len == -1) { - cpu->sve_default_vq = ARM_MAX_VQ; + *ptr_default_vq = ARM_MAX_VQ; return; } @@ -586,7 +516,11 @@ static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v, * and is the maximum architectural width of ZCR_ELx.LEN. */ if (remainder || default_vq < 1 || default_vq > 512) { - error_setg(errp, "cannot set sve-default-vector-length"); + ARMCPU *cpu = ARM_CPU(obj); + const char *which = + (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme"); + + error_setg(errp, "cannot set %s-default-vector-length", which); if (remainder) { error_append_hint(errp, "Vector length not a multiple of 16\n"); } else if (default_vq < 1) { @@ -598,22 +532,23 @@ static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v, return; } - cpu->sve_default_vq = default_vq; + *ptr_default_vq = default_vq; } -static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) +static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) { - ARMCPU *cpu = ARM_CPU(obj); - int32_t value = cpu->sve_default_vq * 16; + uint32_t *ptr_default_vq = opaque; + int32_t value = *ptr_default_vq * 16; visit_type_int32(v, name, &value, errp); } #endif -void aarch64_add_sve_properties(Object *obj) +static void aarch64_add_sve_properties(Object *obj) { + ARMCPU *cpu = ARM_CPU(obj); uint32_t vq; object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve); @@ -621,15 +556,41 @@ void aarch64_add_sve_properties(Object *obj) for (vq = 1; vq <= ARM_MAX_VQ; ++vq) { char name[8]; sprintf(name, "sve%d", vq * 128); - object_property_add(obj, name, "bool", cpu_arm_get_sve_vq, - cpu_arm_set_sve_vq, NULL, NULL); + object_property_add(obj, name, "bool", cpu_arm_get_vq, + cpu_arm_set_vq, NULL, &cpu->sve_vq); } #ifdef CONFIG_USER_ONLY /* Mirror linux /proc/sys/abi/sve_default_vector_length. */ object_property_add(obj, "sve-default-vector-length", "int32", - cpu_arm_get_sve_default_vec_len, - cpu_arm_set_sve_default_vec_len, NULL, NULL); + cpu_arm_get_default_vec_len, + cpu_arm_set_default_vec_len, NULL, + &cpu->sve_default_vq); +#endif +} + +static void aarch64_add_sme_properties(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint32_t vq; + + object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme); + object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64, + cpu_arm_set_sme_fa64); + + for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) { + char name[8]; + sprintf(name, "sme%d", vq * 128); + object_property_add(obj, name, "bool", cpu_arm_get_vq, + cpu_arm_set_vq, NULL, &cpu->sme_vq); + } + +#ifdef CONFIG_USER_ONLY + /* Mirror linux /proc/sys/abi/sme_default_vector_length. */ + object_property_add(obj, "sme-default-vector-length", "int32", + cpu_arm_get_default_vec_len, + cpu_arm_set_default_vec_len, NULL, + &cpu->sme_default_vq); #endif } @@ -638,6 +599,16 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) int arch_val = 0, impdef_val = 0; uint64_t t; + /* Exit early if PAuth is enabled, and fall through to disable it */ + if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) { + if (!cpu_isar_feature(aa64_pauth, cpu)) { + error_setg(errp, "'pauth' feature not supported by %s on this host", + kvm_enabled() ? "KVM" : "hvf"); + } + + return; + } + /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */ if (cpu->prop_pauth) { if (cpu->prop_pauth_impdef) { @@ -663,6 +634,424 @@ static Property arm_cpu_pauth_property = static Property arm_cpu_pauth_impdef_property = DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); +static void aarch64_add_pauth_properties(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + /* Default to PAUTH on, with the architected algorithm on TCG. */ + qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property); + if (kvm_enabled() || hvf_enabled()) { + /* + * Mirror PAuth support from the probed sysregs back into the + * property for KVM or hvf. Is it just a bit backward? Yes it is! + * Note that prop_pauth is true whether the host CPU supports the + * architected QARMA5 algorithm or the IMPDEF one. We don't + * provide the separate pauth-impdef property for KVM or hvf, + * only for TCG. + */ + cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu); + } else { + qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property); + } +} + +static Property arm_cpu_lpa2_property = + DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true); + +void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) +{ + uint64_t t; + + /* + * We only install the property for tcg -cpu max; this is the + * only situation in which the cpu field can be true. + */ + if (!cpu->prop_lpa2) { + return; + } + + t = cpu->isar.id_aa64mmfr0; + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */ + cpu->isar.id_aa64mmfr0 = t; +} + +static void aarch64_a57_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a57"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; + cpu->midr = 0x411fd070; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034070; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001124; + cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x2; + cpu->isar.reset_pmcr_el0 = 0x41013000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} + +static void aarch64_a53_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a53"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53; + cpu->midr = 0x410fd034; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034070; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x84448004; /* L1Ip = VIPT */ + cpu->reset_sctlr = 0x00c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ + cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x00110f13; + cpu->isar.dbgdevid1 = 0x1; + cpu->isar.reset_pmcr_el0 = 0x41033000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} + +static void aarch64_a72_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a72"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fd083; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034080; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001124; + cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x2; + cpu->isar.reset_pmcr_el0 = 0x41023000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} + +static void aarch64_a76_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a76"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x8444C004; + cpu->dcz_blocksize = 4; + cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ + cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x414fd0b1; /* r4p1 */ + cpu->revidr = 0; + + /* From B2.18 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */ + + /* From B2.93 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From B5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410b3000; +} + +static void aarch64_a64fx_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,a64fx"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x461f0010; + cpu->revidr = 0x00000000; + cpu->ctr = 0x86668006; + cpu->reset_sctlr = 0x30000180; + cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */ + cpu->isar.id_aa64pfr1 = 0x0000000000000000; + cpu->isar.id_aa64dfr0 = 0x0000000010305408; + cpu->isar.id_aa64dfr1 = 0x0000000000000000; + cpu->id_aa64afr0 = 0x0000000000000000; + cpu->id_aa64afr1 = 0x0000000000000000; + cpu->isar.id_aa64mmfr0 = 0x0000000000001122; + cpu->isar.id_aa64mmfr1 = 0x0000000011212100; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011; + cpu->isar.id_aa64isar0 = 0x0000000010211120; + cpu->isar.id_aa64isar1 = 0x0000000000010001; + cpu->isar.id_aa64zfr0 = 0x0000000000000000; + cpu->clidr = 0x0000000080000023; + cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */ + cpu->dcz_blocksize = 6; /* 256 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* The A64FX supports only 128, 256 and 512 bit vector lengths */ + aarch64_add_sve_properties(obj); + cpu->sve_vq.supported = (1 << 0) /* 128bit */ + | (1 << 1) /* 256bit */ + | (1 << 3); /* 512bit */ + + cpu->isar.reset_pmcr_el0 = 0x46014040; + + /* TODO: Add A64FX specific HPC extension registers */ +} + +static void aarch64_neoverse_n1_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,neoverse-n1"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x8444c004; + cpu->dcz_blocksize = 4; + cpu->isar.id_aa64dfr0 = 0x0000000110305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ + cpu->isar.id_aa64pfr1 = 0x0000000000000020ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x414fd0c1; /* r4p1 */ + cpu->revidr = 0; + + /* From B2.23 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */ + + /* From B2.98 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From B5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410c3000; +} + +static void aarch64_host_initfn(Object *obj) +{ +#if defined(CONFIG_KVM) + ARMCPU *cpu = ARM_CPU(obj); + kvm_arm_set_cpu_features_from_host(cpu); + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + aarch64_add_sve_properties(obj); + aarch64_add_pauth_properties(obj); + } +#elif defined(CONFIG_HVF) + ARMCPU *cpu = ARM_CPU(obj); + hvf_arm_set_cpu_features_from_host(cpu); + aarch64_add_pauth_properties(obj); +#else + g_assert_not_reached(); +#endif +} + /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); * otherwise, a CPU with as many features enabled as our emulation supports. * The version of '-cpu max' for qemu-system-arm is defined in cpu.c; @@ -671,187 +1060,197 @@ static Property arm_cpu_pauth_impdef_property = static void aarch64_max_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + uint64_t t; + uint32_t u; - if (kvm_enabled()) { - kvm_arm_set_cpu_features_from_host(cpu); - } else { - uint64_t t; - uint32_t u; - aarch64_a57_initfn(obj); - - /* - * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real - * one and try to apply errata workarounds or use impdef features we - * don't provide. - * An IMPLEMENTER field of 0 means "reserved for software use"; - * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers - * to see which features are present"; - * the VARIANT, PARTNUM and REVISION fields are all implementation - * defined and we choose to define PARTNUM just in case guest - * code needs to distinguish this QEMU CPU from other software - * implementations, though this shouldn't be needed. - */ - t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0); - t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf); - t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q'); - t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0); - t = FIELD_DP64(t, MIDR_EL1, REVISION, 0); - cpu->midr = t; - - t = cpu->isar.id_aa64isar0; - t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */ - t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */ - t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); - t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */ - t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ - t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); - cpu->isar.id_aa64isar0 = t; - - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); - t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); - t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */ - t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); - cpu->isar.id_aa64isar1 = t; - - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); - t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); - t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); - t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); - t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); - cpu->isar.id_aa64pfr0 = t; - - t = cpu->isar.id_aa64pfr1; - t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); - t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); - /* - * Begin with full support for MTE. This will be downgraded to MTE=0 - * during realize if the board provides no tag memory, much like - * we do for EL2 with the virtualization=on property. - */ - t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); - cpu->isar.id_aa64pfr1 = t; - - t = cpu->isar.id_aa64mmfr0; - t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */ - cpu->isar.id_aa64mmfr0 = t; - - t = cpu->isar.id_aa64mmfr1; - t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ - t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); - t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); - t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */ - t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */ - t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */ - cpu->isar.id_aa64mmfr1 = t; - - t = cpu->isar.id_aa64mmfr2; - t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); - t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */ - t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */ - cpu->isar.id_aa64mmfr2 = t; - - t = cpu->isar.id_aa64zfr0; - t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */ - t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); - cpu->isar.id_aa64zfr0 = t; - - /* Replicate the same data to the 32-bit id registers. */ - u = cpu->isar.id_isar5; - u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ - u = FIELD_DP32(u, ID_ISAR5, SHA1, 1); - u = FIELD_DP32(u, ID_ISAR5, SHA2, 1); - u = FIELD_DP32(u, ID_ISAR5, CRC32, 1); - u = FIELD_DP32(u, ID_ISAR5, RDM, 1); - u = FIELD_DP32(u, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = u; - - u = cpu->isar.id_isar6; - u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1); - u = FIELD_DP32(u, ID_ISAR6, DP, 1); - u = FIELD_DP32(u, ID_ISAR6, FHM, 1); - u = FIELD_DP32(u, ID_ISAR6, SB, 1); - u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1); - u = FIELD_DP32(u, ID_ISAR6, BF16, 1); - u = FIELD_DP32(u, ID_ISAR6, I8MM, 1); - cpu->isar.id_isar6 = u; - - u = cpu->isar.id_pfr0; - u = FIELD_DP32(u, ID_PFR0, DIT, 1); - cpu->isar.id_pfr0 = u; - - u = cpu->isar.id_pfr2; - u = FIELD_DP32(u, ID_PFR2, SSBS, 1); - cpu->isar.id_pfr2 = u; - - u = cpu->isar.id_mmfr3; - u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->isar.id_mmfr3 = u; - - u = cpu->isar.id_mmfr4; - u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */ - u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ - u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */ - u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */ - cpu->isar.id_mmfr4 = u; - - t = cpu->isar.id_aa64dfr0; - t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */ - cpu->isar.id_aa64dfr0 = t; - - u = cpu->isar.id_dfr0; - u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */ - cpu->isar.id_dfr0 = u; - - u = cpu->isar.mvfr1; - u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */ - u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */ - cpu->isar.mvfr1 = u; - -#ifdef CONFIG_USER_ONLY - /* For usermode -cpu max we can use a larger and more efficient DCZ - * blocksize since we don't have to follow what the hardware does. - */ - cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ - cpu->dcz_blocksize = 7; /* 512 bytes */ -#endif - - /* Default to PAUTH on, with the architected algorithm. */ - qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property); - qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property); + if (kvm_enabled() || hvf_enabled()) { + /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */ + aarch64_host_initfn(obj); + return; } + /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */ + + aarch64_a57_initfn(obj); + + /* + * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real + * one and try to apply errata workarounds or use impdef features we + * don't provide. + * An IMPLEMENTER field of 0 means "reserved for software use"; + * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers + * to see which features are present"; + * the VARIANT, PARTNUM and REVISION fields are all implementation + * defined and we choose to define PARTNUM just in case guest + * code needs to distinguish this QEMU CPU from other software + * implementations, though this shouldn't be needed. + */ + t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0); + t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf); + t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q'); + t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0); + t = FIELD_DP64(t, MIDR_EL1, REVISION, 0); + cpu->midr = t; + + /* + * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS} + * are zero. + */ + u = cpu->clidr; + u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0); + u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); + cpu->clidr = u; + + t = cpu->isar.id_aa64isar0; + t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ + t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */ + t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); /* FEAT_SM4 */ + t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); /* FEAT_DotProd */ + t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); /* FEAT_FHM */ + t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */ + t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ + t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */ + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */ + t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */ + t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */ + t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */ + t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */ + t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */ + t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */ + t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */ + t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ + t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ + t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */ + t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); + t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */ + t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */ + t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */ + t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */ + cpu->isar.id_aa64pfr0 = t; + + t = cpu->isar.id_aa64pfr1; + t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */ + t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */ + /* + * Begin with full support for MTE. This will be downgraded to MTE=0 + * during realize if the board provides no tag memory, much like + * we do for EL2 with the virtualization=on property. + */ + t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ + t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ + t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ + cpu->isar.id_aa64pfr1 = t; + + t = cpu->isar.id_aa64mmfr0; + t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */ + cpu->isar.id_aa64mmfr0 = t; + + t = cpu->isar.id_aa64mmfr1; + t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */ + t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */ + t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */ + t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */ + t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */ + t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */ + t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ + t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */ + t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ + cpu->isar.id_aa64mmfr1 = t; + + t = cpu->isar.id_aa64mmfr2; + t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */ + t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ + t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ + t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ + t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ + t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ + t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */ + t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */ + t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */ + t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ + cpu->isar.id_aa64mmfr2 = t; + + t = cpu->isar.id_aa64zfr0; + t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); + t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */ + t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */ + t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */ + t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */ + t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */ + t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */ + t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */ + cpu->isar.id_aa64zfr0 = t; + + t = cpu->isar.id_aa64dfr0; + t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */ + t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */ + cpu->isar.id_aa64dfr0 = t; + + t = cpu->isar.id_aa64smfr0; + t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ + cpu->isar.id_aa64smfr0 = t; + + /* Replicate the same data to the 32-bit id registers. */ + aa32_max_features(cpu); + +#ifdef CONFIG_USER_ONLY + /* + * For usermode -cpu max we can use a larger and more efficient DCZ + * blocksize since we don't have to follow what the hardware does. + */ + cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ + cpu->dcz_blocksize = 7; /* 512 bytes */ +#endif + + cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ); + cpu->sme_vq.supported = SVE_VQ_POW2_MAP; + + aarch64_add_pauth_properties(obj); aarch64_add_sve_properties(obj); + aarch64_add_sme_properties(obj); object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq, cpu_max_set_sve_max_vq, NULL, NULL); + qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property); } static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a35", .initfn = aarch64_a35_initfn }, { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, + { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, + { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, + { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, { .name = "max", .initfn = aarch64_max_initfn }, +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + { .name = "host", .initfn = aarch64_host_initfn }, +#endif }; static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp) diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index ed444bf436..9a2cef7d05 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -18,11 +18,140 @@ #if !defined(CONFIG_USER_ONLY) #include "hw/boards.h" #endif +#include "cpregs.h" + + +/* Share AArch32 -cpu max features with AArch64. */ +void aa32_max_features(ARMCPU *cpu) +{ + uint32_t t; + + /* Add additional features supported by QEMU */ + t = cpu->isar.id_isar5; + t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */ + t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */ + t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */ + t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); + t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */ + t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */ + cpu->isar.id_isar5 = t; + + t = cpu->isar.id_isar6; + t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */ + t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */ + t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */ + t = FIELD_DP32(t, ID_ISAR6, SB, 1); /* FEAT_SB */ + t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */ + t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */ + t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */ + cpu->isar.id_isar6 = t; + + t = cpu->isar.mvfr1; + t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */ + t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* FEAT_FP16 */ + cpu->isar.mvfr1 = t; + + t = cpu->isar.mvfr2; + t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */ + t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ + cpu->isar.mvfr2 = t; + + t = cpu->isar.id_mmfr3; + t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */ + cpu->isar.id_mmfr3 = t; + + t = cpu->isar.id_mmfr4; + t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */ + t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ + t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */ + t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */ + cpu->isar.id_mmfr4 = t; + + t = cpu->isar.id_mmfr5; + t = FIELD_DP32(t, ID_MMFR5, ETS, 1); /* FEAT_ETS */ + cpu->isar.id_mmfr5 = t; + + t = cpu->isar.id_pfr0; + t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */ + t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */ + t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */ + cpu->isar.id_pfr0 = t; + + t = cpu->isar.id_pfr2; + t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */ + t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */ + cpu->isar.id_pfr2 = t; + + t = cpu->isar.id_dfr0; + t = FIELD_DP32(t, ID_DFR0, COPDBG, 9); /* FEAT_Debugv8p4 */ + t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */ + t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */ + cpu->isar.id_dfr0 = t; +} + +#ifndef CONFIG_USER_ONLY +static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + + /* Number of cores is in [25:24]; otherwise we RAZ */ + return (cpu->core_count - 1) << 24; +} + +static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = { + { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2, + .access = PL1_RW, .readfn = l2ctlr_read, + .writefn = arm_cp_write_ignore }, + { .name = "L2CTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2, + .access = PL1_RW, .readfn = l2ctlr_read, + .writefn = arm_cp_write_ignore }, + { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2ECTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR", + .cp = 15, .opc1 = 0, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUECTLR", + .cp = 15, .opc1 = 1, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUMERRSR", + .cp = 15, .opc1 = 2, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2MERRSR", + .cp = 15, .opc1 = 3, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, +}; + +void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) +{ + define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); +} +#endif /* !CONFIG_USER_ONLY */ /* CPU models. These are not needed for the AArch64 linux-user build. */ #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) -#ifdef CONFIG_TCG +#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); @@ -46,7 +175,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } return ret; } -#endif /* CONFIG_TCG */ +#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */ static void arm926_initfn(Object *obj) { @@ -263,7 +392,6 @@ static const ARMCPRegInfo cortexa8_cp_reginfo[] = { .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; static void cortex_a8_initfn(Object *obj) @@ -301,6 +429,7 @@ static void cortex_a8_initfn(Object *obj) cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ cpu->reset_auxcr = 2; + cpu->isar.reset_pmcr_el0 = 0x41002000; define_arm_cp_regs(cpu, cortexa8_cp_reginfo); } @@ -331,7 +460,6 @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = { .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, - REGINFO_SENTINEL }; static void cortex_a9_initfn(Object *obj) @@ -373,6 +501,7 @@ static void cortex_a9_initfn(Object *obj) cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ + cpu->isar.reset_pmcr_el0 = 0x41093000; define_arm_cp_regs(cpu, cortexa9_cp_reginfo); } @@ -397,7 +526,6 @@ static const ARMCPRegInfo cortexa15_cp_reginfo[] = { #endif { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; static void cortex_a7_initfn(Object *obj) @@ -439,10 +567,13 @@ static void cortex_a7_initfn(Object *obj) cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f005; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x1; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + cpu->isar.reset_pmcr_el0 = 0x41072000; define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */ } @@ -461,7 +592,9 @@ static void cortex_a15_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; - cpu->midr = 0x412fc0f1; + /* r4p0 cpu, not requiring expensive tlb flush errata */ + cpu->midr = 0x414fc0f0; + cpu->revidr = 0x0; cpu->reset_fpsid = 0x410430f0; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x11111111; @@ -481,10 +614,13 @@ static void cortex_a15_initfn(Object *obj) cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f021; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x0; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + cpu->isar.reset_pmcr_el0 = 0x410F3000; define_arm_cp_regs(cpu, cortexa15_cp_reginfo); } @@ -654,12 +790,9 @@ static void cortex_m55_initfn(Object *obj) cpu->revidr = 0; cpu->pmsav7_dregion = 16; cpu->sau_sregion = 8; - /* - * These are the MVFR* values for the FPU, no MVE configuration; - * we will update them later when we implement MVE - */ + /* These are the MVFR* values for the FPU + full MVE configuration */ cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x12100011; + cpu->isar.mvfr1 = 0x12100211; cpu->isar.mvfr2 = 0x00000040; cpu->isar.id_pfr0 = 0x20000030; cpu->isar.id_pfr1 = 0x00000230; @@ -688,7 +821,6 @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = { .access = PL1_RW, .type = ARM_CP_CONST }, { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP }, - REGINFO_SENTINEL }; static void cortex_r5_initfn(Object *obj) @@ -717,6 +849,7 @@ static void cortex_r5_initfn(Object *obj) cpu->isar.id_isar6 = 0x0; cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; + cpu->isar.reset_pmcr_el0 = 0x41151800; define_arm_cp_regs(cpu, cortexr5_cp_reginfo); } @@ -901,11 +1034,15 @@ static void pxa270c5_initfn(Object *obj) static const struct TCGCPUOps arm_v7m_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, - .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, - .tlb_fill = arm_cpu_tlb_fill, .debug_excp_handler = arm_debug_excp_handler, + .restore_state_to_opc = arm_restore_state_to_opc, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = arm_cpu_record_sigsegv, + .record_sigbus = arm_cpu_record_sigbus, +#else + .tlb_fill = arm_cpu_tlb_fill, + .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, .do_interrupt = arm_v7m_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, .do_unaligned_access = arm_cpu_do_unaligned_access, @@ -939,70 +1076,58 @@ static void arm_max_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); - cortex_a15_initfn(obj); + /* aarch64_a57_initfn, advertising none of the aarch64 features */ + cpu->dtb_compatible = "arm,cortex-a57"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x411fd070; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034070; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x00110f13; + cpu->isar.dbgdevid1 = 0x2; + cpu->isar.reset_pmcr_el0 = 0x41013000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ + define_cortex_a72_a57_a53_cp_reginfo(cpu); - /* old-style VFP short-vector support */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + aa32_max_features(cpu); #ifdef CONFIG_USER_ONLY /* - * We don't set these in system emulation mode for the moment, - * since we don't correctly set (all of) the ID registers to - * advertise them. + * Break with true ARMv8 and add back old-style VFP short-vector support. + * Only do this for user-mode, where -cpu max is the default, so that + * older v6 and v7 programs are more likely to work without adjustment. */ - set_feature(&cpu->env, ARM_FEATURE_V8); - { - uint32_t t; - - t = cpu->isar.id_isar5; - t = FIELD_DP32(t, ID_ISAR5, AES, 2); - t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); - t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); - t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); - t = FIELD_DP32(t, ID_ISAR5, RDM, 1); - t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = t; - - t = cpu->isar.id_isar6; - t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); - t = FIELD_DP32(t, ID_ISAR6, DP, 1); - t = FIELD_DP32(t, ID_ISAR6, FHM, 1); - t = FIELD_DP32(t, ID_ISAR6, SB, 1); - t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); - t = FIELD_DP32(t, ID_ISAR6, BF16, 1); - t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); - cpu->isar.id_isar6 = t; - - t = cpu->isar.mvfr1; - t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */ - t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */ - cpu->isar.mvfr1 = t; - - t = cpu->isar.mvfr2; - t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */ - t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ - cpu->isar.mvfr2 = t; - - t = cpu->isar.id_mmfr3; - t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->isar.id_mmfr3 = t; - - t = cpu->isar.id_mmfr4; - t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */ - t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ - t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */ - t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */ - cpu->isar.id_mmfr4 = t; - - t = cpu->isar.id_pfr0; - t = FIELD_DP32(t, ID_PFR0, DIT, 1); - cpu->isar.id_pfr0 = t; - - t = cpu->isar.id_pfr2; - t = FIELD_DP32(t, ID_PFR2, SSBS, 1); - cpu->isar.id_pfr2 = t; - } -#endif /* CONFIG_USER_ONLY */ + cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); +#endif } #endif /* !TARGET_AARCH64 */ diff --git a/target/arm/crypto_helper.c b/target/arm/crypto_helper.c index 28a84c2dbd..d28690321f 100644 --- a/target/arm/crypto_helper.c +++ b/target/arm/crypto_helper.c @@ -15,6 +15,7 @@ #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "crypto/aes.h" +#include "crypto/sm4.h" #include "vec_internal.h" union CRYPTO_STATE { @@ -23,7 +24,7 @@ union CRYPTO_STATE { uint64_t l[2]; }; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define CR_ST_BYTE(state, i) ((state).bytes[(15 - (i)) ^ 8]) #define CR_ST_WORD(state, i) ((state).words[(3 - (i)) ^ 2]) #else @@ -694,41 +695,6 @@ DO_SM3TT(crypto_sm3tt2b, 3) #undef DO_SM3TT -static uint8_t const sm4_sbox[] = { - 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, - 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, - 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, - 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, - 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, - 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, - 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, - 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, - 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, - 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, - 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, - 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, - 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, - 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, - 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, - 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, - 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, - 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, - 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, - 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, - 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, - 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, - 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, - 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, - 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, - 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, - 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, - 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, - 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, - 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, - 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, - 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, -}; - static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm) { union CRYPTO_STATE d = { .l = { rn[0], rn[1] } }; diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index 2983e36dd3..c21739242c 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -6,11 +6,163 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "internals.h" +#include "cpregs.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" + +/* Return the Exception Level targeted by debug exceptions. */ +static int arm_debug_target_el(CPUARMState *env) +{ + bool secure = arm_is_secure(env); + bool route_to_el2 = false; + + if (arm_is_el2_enabled(env)) { + route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || + env->cp15.mdcr_el2 & MDCR_TDE; + } + + if (route_to_el2) { + return 2; + } else if (arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3) && secure) { + return 3; + } else { + return 1; + } +} + +/* + * Raise an exception to the debug target el. + * Modify syndrome to indicate when origin and target EL are the same. + */ +G_NORETURN static void +raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) +{ + int debug_el = arm_debug_target_el(env); + int cur_el = arm_current_el(env); + + /* + * If singlestep is targeting a lower EL than the current one, then + * DisasContext.ss_active must be false and we can never get here. + * Similarly for watchpoint and breakpoint matches. + */ + assert(debug_el >= cur_el); + syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; + raise_exception(env, excp, syndrome, debug_el); +} + +/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ +static bool aa64_generate_debug_exceptions(CPUARMState *env) +{ + int cur_el = arm_current_el(env); + int debug_el; + + if (cur_el == 3) { + return false; + } + + /* MDCR_EL3.SDD disables debug events from Secure state */ + if (arm_is_secure_below_el3(env) + && extract32(env->cp15.mdcr_el3, 16, 1)) { + return false; + } + + /* + * Same EL to same EL debug exceptions need MDSCR_KDE enabled + * while not masking the (D)ebug bit in DAIF. + */ + debug_el = arm_debug_target_el(env); + + if (cur_el == debug_el) { + return extract32(env->cp15.mdscr_el1, 13, 1) + && !(env->daif & PSTATE_D); + } + + /* Otherwise the debug target needs to be a higher EL */ + return debug_el > cur_el; +} + +static bool aa32_generate_debug_exceptions(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el == 0 && arm_el_is_aa64(env, 1)) { + return aa64_generate_debug_exceptions(env); + } + + if (arm_is_secure(env)) { + int spd; + + if (el == 0 && (env->cp15.sder & 1)) { + /* + * SDER.SUIDEN means debug exceptions from Secure EL0 + * are always enabled. Otherwise they are controlled by + * SDCR.SPD like those from other Secure ELs. + */ + return true; + } + + spd = extract32(env->cp15.mdcr_el3, 14, 2); + switch (spd) { + case 1: + /* SPD == 0b01 is reserved, but behaves as 0b00. */ + case 0: + /* + * For 0b00 we return true if external secure invasive debug + * is enabled. On real hardware this is controlled by external + * signals to the core. QEMU always permits debug, and behaves + * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. + */ + return true; + case 2: + return false; + case 3: + return true; + } + } + + return el != 2; +} + +/* + * Return true if debugging exceptions are currently enabled. + * This corresponds to what in ARM ARM pseudocode would be + * if UsingAArch32() then + * return AArch32.GenerateDebugExceptions() + * else + * return AArch64.GenerateDebugExceptions() + * We choose to push the if() down into this function for clarity, + * since the pseudocode has it at all callsites except for the one in + * CheckSoftwareStep(), where it is elided because both branches would + * always return the same value. + */ +bool arm_generate_debug_exceptions(CPUARMState *env) +{ + if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { + return false; + } + if (is_a64(env)) { + return aa64_generate_debug_exceptions(env); + } else { + return aa32_generate_debug_exceptions(env); + } +} + +/* + * Is single-stepping active? (Note that the "is EL_D AArch64?" check + * implicitly means this always returns false in pre-v8 CPUs.) + */ +bool arm_singlestep_active(CPUARMState *env) +{ + return extract32(env->cp15.mdscr_el1, 0, 1) + && arm_el_is_aa64(env, arm_debug_target_el(env)) + && arm_generate_debug_exceptions(env); +} + /* Return true if the linked breakpoint entry lbn passes its checks */ static bool linked_bp_matches(ARMCPU *cpu, int lbn) { @@ -143,9 +295,9 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) * Non-Secure to simplify the code slightly compared to the full * table in the ARM ARM. */ - pac = extract64(cr, 1, 2); - hmc = extract64(cr, 13, 1); - ssc = extract64(cr, 14, 2); + pac = FIELD_EX64(cr, DBGWCR, PAC); + hmc = FIELD_EX64(cr, DBGWCR, HMC); + ssc = FIELD_EX64(cr, DBGWCR, SSC); switch (ssc) { case 0: @@ -184,8 +336,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) g_assert_not_reached(); } - wt = extract64(cr, 20, 1); - lbn = extract64(cr, 16, 4); + wt = FIELD_EX64(cr, DBGWCR, WT); + lbn = FIELD_EX64(cr, DBGWCR, LBN); if (wt && !linked_bp_matches(cpu, lbn)) { return false; @@ -220,6 +372,7 @@ bool arm_debug_check_breakpoint(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; + target_ulong pc; int n; /* @@ -231,6 +384,28 @@ bool arm_debug_check_breakpoint(CPUState *cs) return false; } + /* + * Single-step exceptions have priority over breakpoint exceptions. + * If single-step state is active-pending, suppress the bp. + */ + if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { + return false; + } + + /* + * PC alignment faults have priority over breakpoint exceptions. + */ + pc = is_a64(env) ? env->pc : env->regs[15]; + if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { + return false; + } + + /* + * Instruction aborts have priority over breakpoint exceptions. + * TODO: We would need to look up the page for PC and verify that + * it is present and executable. + */ + for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { if (bp_wp_matches(cpu, n, false)) { return true; @@ -250,6 +425,32 @@ bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) return check_watchpoints(cpu); } +/* + * Return the FSR value for a debug exception (watchpoint, hardware + * breakpoint or BKPT insn) targeting the specified exception level. + */ +static uint32_t arm_debug_exception_fsr(CPUARMState *env) +{ + ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; + int target_el = arm_debug_target_el(env); + bool using_lpae = false; + + if (target_el == 2 || arm_el_is_aa64(env, target_el)) { + using_lpae = true; + } else { + if (arm_feature(env, ARM_FEATURE_LPAE) && + (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { + using_lpae = true; + } + } + + if (using_lpae) { + return arm_fi_to_lfsc(&fi); + } else { + return arm_fi_to_sfsc(&fi); + } +} + void arm_debug_excp_handler(CPUState *cs) { /* @@ -263,19 +464,16 @@ void arm_debug_excp_handler(CPUState *cs) if (wp_hit) { if (wp_hit->flags & BP_CPU) { bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; - bool same_el = arm_debug_target_el(env) == arm_current_el(env); cs->watchpoint_hit = NULL; env->exception.fsr = arm_debug_exception_fsr(env); env->exception.vaddress = wp_hit->hitaddr; - raise_exception(env, EXCP_DATA_ABORT, - syn_watchpoint(same_el, 0, wnr), - arm_debug_target_el(env)); + raise_exception_debug(env, EXCP_DATA_ABORT, + syn_watchpoint(0, 0, wnr)); } } else { uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; - bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); /* * (1) GDB breakpoints should be handled first. @@ -295,9 +493,618 @@ void arm_debug_excp_handler(CPUState *cs) * exception/security level. */ env->exception.vaddress = 0; - raise_exception(env, EXCP_PREFETCH_ABORT, - syn_breakpoint(same_el), - arm_debug_target_el(env)); + raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); + } +} + +/* + * Raise an EXCP_BKPT with the specified syndrome register value, + * targeting the correct exception level for debug exceptions. + */ +void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) +{ + int debug_el = arm_debug_target_el(env); + int cur_el = arm_current_el(env); + + /* FSR will only be used if the debug target EL is AArch32. */ + env->exception.fsr = arm_debug_exception_fsr(env); + /* + * FAR is UNKNOWN: clear vaddress to avoid potentially exposing + * values to the guest that it shouldn't be able to see at its + * exception/security level. + */ + env->exception.vaddress = 0; + /* + * Other kinds of architectural debug exception are ignored if + * they target an exception level below the current one (in QEMU + * this is checked by arm_generate_debug_exceptions()). Breakpoint + * instructions are special because they always generate an exception + * to somewhere: if they can't go to the configured debug exception + * level they are taken to the current exception level. + */ + if (debug_el < cur_el) { + debug_el = cur_el; + } + raise_exception(env, EXCP_BKPT, syndrome, debug_el); +} + +void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) +{ + raise_exception_debug(env, EXCP_UDEF, syndrome); +} + +/* + * Check for traps to "powerdown debug" registers, which are controlled + * by MDCR.TDOSA + */ +static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdosa) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* + * Check for traps to "debug ROM" registers, which are controlled + * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdra) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* + * Check for traps to general debug registers, which are controlled + * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tda) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Writes to OSLAR_EL1 may update the OS lock status, which can be + * read via a bit in OSLSR_EL1. + */ + int oslock; + + if (ri->state == ARM_CP_STATE_AA32) { + oslock = (value == 0xC5ACCE55); + } else { + oslock = value & 1; + } + + env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); +} + +static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + /* + * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not + * implemented this is RAZ/WI. + */ + if(arm_feature(env, ARM_FEATURE_AARCH64) + ? cpu_isar_feature(aa64_doublelock, cpu) + : cpu_isar_feature(aa32_doublelock, cpu)) { + env->cp15.osdlr_el1 = value & 1; + } +} + +static const ARMCPRegInfo debug_cp_reginfo[] = { + /* + * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped + * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; + * unlike DBGDRAR it is never accessible from EL0. + * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 + * accessor. + */ + { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL1_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ + { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), + .resetvalue = 0 }, + /* + * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external + * Debug Communication Channel is not implemented. + */ + { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, + .access = PL0_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as + * it is unlikely a guest will care. + * We don't implement the configurable EL0 access. + */ + { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, + .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, + .type = ARM_CP_ALIAS, + .access = PL1_R, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, + { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .accessfn = access_tdosa, + .writefn = oslar_write }, + { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL1_R, .resetvalue = 10, + .accessfn = access_tdosa, + .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, + /* Dummy OSDLR_EL1: 32-bit Linux will read this */ + { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tdosa, + .writefn = osdlr_write, + .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, + /* + * Dummy DBGVCR: Linux wants to clear this on startup, but we don't + * implement vector catch debug events yet. + */ + { .name = "DBGVCR", + .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, + /* + * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_tda, + .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, + /* + * Dummy MDCCINT_EL1, since we don't implement the Debug Communications + * Channel but Linux may try to access this register. The 32-bit + * alias is DBGDCCINT. + */ + { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, +}; + +static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { + /* 64 bit access versions of the (dummy) debug registers */ + { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, +}; + +void hw_watchpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + vaddr len = 0; + vaddr wvr = env->cp15.dbgwvr[n]; + uint64_t wcr = env->cp15.dbgwcr[n]; + int mask; + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + + if (env->cpu_watchpoint[n]) { + cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); + env->cpu_watchpoint[n] = NULL; + } + + if (!FIELD_EX64(wcr, DBGWCR, E)) { + /* E bit clear : watchpoint disabled */ + return; + } + + switch (FIELD_EX64(wcr, DBGWCR, LSC)) { + case 0: + /* LSC 00 is reserved and must behave as if the wp is disabled */ + return; + case 1: + flags |= BP_MEM_READ; + break; + case 2: + flags |= BP_MEM_WRITE; + break; + case 3: + flags |= BP_MEM_ACCESS; + break; + } + + /* + * Attempts to use both MASK and BAS fields simultaneously are + * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, + * thus generating a watchpoint for every byte in the masked region. + */ + mask = FIELD_EX64(wcr, DBGWCR, MASK); + if (mask == 1 || mask == 2) { + /* + * Reserved values of MASK; we must act as if the mask value was + * some non-reserved value, or as if the watchpoint were disabled. + * We choose the latter. + */ + return; + } else if (mask) { + /* Watchpoint covers an aligned area up to 2GB in size */ + len = 1ULL << mask; + /* + * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE + * whether the watchpoint fires when the unmasked bits match; we opt + * to generate the exceptions. + */ + wvr &= ~(len - 1); + } else { + /* Watchpoint covers bytes defined by the byte address select bits */ + int bas = FIELD_EX64(wcr, DBGWCR, BAS); + int basstart; + + if (extract64(wvr, 2, 1)) { + /* + * Deprecated case of an only 4-aligned address. BAS[7:4] are + * ignored, and BAS[3:0] define which bytes to watch. + */ + bas &= 0xf; + } + + if (bas == 0) { + /* This must act as if the watchpoint is disabled */ + return; + } + + /* + * The BAS bits are supposed to be programmed to indicate a contiguous + * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether + * we fire for each byte in the word/doubleword addressed by the WVR. + * We choose to ignore any non-zero bits after the first range of 1s. + */ + basstart = ctz32(bas); + len = cto32(bas >> basstart); + wvr += basstart; + } + + cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, + &env->cpu_watchpoint[n]); +} + +void hw_watchpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* + * Completely clear out existing QEMU watchpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { + hw_watchpoint_update(cpu, i); + } +} + +static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* + * Bits [1:0] are RES0. + * + * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) + * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if + * they contain the value written. It is CONSTRAINED UNPREDICTABLE + * whether the RESS bits are ignored when comparing an address. + * + * Therefore we are allowed to compare the entire register, which lets + * us avoid considering whether or not FEAT_LVA is actually enabled. + */ + value &= ~3ULL; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +void hw_breakpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + uint64_t bvr = env->cp15.dbgbvr[n]; + uint64_t bcr = env->cp15.dbgbcr[n]; + vaddr addr; + int bt; + int flags = BP_CPU; + + if (env->cpu_breakpoint[n]) { + cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); + env->cpu_breakpoint[n] = NULL; + } + + if (!extract64(bcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + bt = extract64(bcr, 20, 4); + + switch (bt) { + case 4: /* unlinked address mismatch (reserved if AArch64) */ + case 5: /* linked address mismatch (reserved if AArch64) */ + qemu_log_mask(LOG_UNIMP, + "arm: address mismatch breakpoint types not implemented\n"); + return; + case 0: /* unlinked address match */ + case 1: /* linked address match */ + { + /* + * Bits [1:0] are RES0. + * + * It is IMPLEMENTATION DEFINED whether bits [63:49] + * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit + * of the VA field ([48] or [52] for FEAT_LVA), or whether the + * value is read as written. It is CONSTRAINED UNPREDICTABLE + * whether the RESS bits are ignored when comparing an address. + * Therefore we are allowed to compare the entire register, which + * lets us avoid considering whether FEAT_LVA is actually enabled. + * + * The BAS field is used to allow setting breakpoints on 16-bit + * wide instructions; it is CONSTRAINED UNPREDICTABLE whether + * a bp will fire if the addresses covered by the bp and the addresses + * covered by the insn overlap but the insn doesn't start at the + * start of the bp address range. We choose to require the insn and + * the bp to have the same address. The constraints on writing to + * BAS enforced in dbgbcr_write mean we have only four cases: + * 0b0000 => no breakpoint + * 0b0011 => breakpoint on addr + * 0b1100 => breakpoint on addr + 2 + * 0b1111 => breakpoint on addr + * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). + */ + int bas = extract64(bcr, 5, 4); + addr = bvr & ~3ULL; + if (bas == 0) { + return; + } + if (bas == 0xc) { + addr += 2; + } + break; + } + case 2: /* unlinked context ID match */ + case 8: /* unlinked VMID match (reserved if no EL2) */ + case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ + qemu_log_mask(LOG_UNIMP, + "arm: unlinked context breakpoint types not implemented\n"); + return; + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 3: /* linked context ID match */ + default: + /* + * We must generate no events for Linked context matches (unless + * they are linked to by some other bp/wp, which is handled in + * updates for the linking bp/wp). We choose to also generate no events + * for reserved values. + */ + return; + } + + cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); +} + +void hw_breakpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* + * Completely clear out existing QEMU breakpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { + hw_breakpoint_update(cpu, i); + } +} + +static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* + * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only + * copy of BAS[0]. + */ + value = deposit64(value, 6, 1, extract64(value, 5, 1)); + value = deposit64(value, 8, 1, extract64(value, 7, 1)); + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +void define_debug_regs(ARMCPU *cpu) +{ + /* + * Define v7 and v8 architectural debug registers. + * These are just dummy implementations for now. + */ + int i; + int wrps, brps, ctx_cmps; + + /* + * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot + * use AArch32. Given that bit 15 is RES1, if the value is 0 then + * the register must not exist for this cpu. + */ + if (cpu->isar.dbgdidr != 0) { + ARMCPRegInfo dbgdidr = { + .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, + .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, + }; + define_one_arm_cp_reg(cpu, &dbgdidr); + } + + /* + * DBGDEVID is present in the v7 debug architecture if + * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is + * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist + * from v7.1 of the debug architecture. Because no fields have yet + * been defined in DBGDEVID2 (and quite possibly none will ever + * be) we don't define an ARMISARegisters field for it. + * These registers exist only if EL1 can use AArch32, but that + * happens naturally because they are only PL1 accessible anyway. + */ + if (extract32(cpu->isar.dbgdidr, 15, 1)) { + ARMCPRegInfo dbgdevid = { + .name = "DBGDEVID", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, + }; + define_one_arm_cp_reg(cpu, &dbgdevid); + } + if (cpu_isar_feature(aa32_debugv7p1, cpu)) { + ARMCPRegInfo dbgdevid12[] = { + { + .name = "DBGDEVID1", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, + }, { + .name = "DBGDEVID2", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = 0, + }, + }; + define_arm_cp_regs(cpu, dbgdevid12); + } + + brps = arm_num_brps(cpu); + wrps = arm_num_wrps(cpu); + ctx_cmps = arm_num_ctx_cmps(cpu); + + assert(ctx_cmps <= brps); + + define_arm_cp_regs(cpu, debug_cp_reginfo); + + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); + } + + for (i = 0; i < brps; i++) { + char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); + char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); + ARMCPRegInfo dbgregs[] = { + { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), + .writefn = dbgbvr_write, .raw_writefn = raw_write + }, + { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), + .writefn = dbgbcr_write, .raw_writefn = raw_write + }, + }; + define_arm_cp_regs(cpu, dbgregs); + g_free(dbgbvr_el1_name); + g_free(dbgbcr_el1_name); + } + + for (i = 0; i < wrps; i++) { + char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); + char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); + ARMCPRegInfo dbgregs[] = { + { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), + .writefn = dbgwvr_write, .raw_writefn = raw_write + }, + { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), + .writefn = dbgwcr_write, .raw_writefn = raw_write + }, + }; + define_arm_cp_regs(cpu, dbgregs); + g_free(dbgwvr_el1_name); + g_free(dbgwcr_el1_name); } } diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 826601b341..2f806512d0 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -20,6 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" +#include "internals.h" +#include "cpregs.h" typedef struct RegisterSysregXmlParam { CPUState *cs; @@ -76,8 +78,13 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) tmp = ldl_p(mem_buf); - /* Mask out low bit of PC to workaround gdb bugs. This will probably - cause problems if we ever implement the Jazelle DBX extensions. */ + /* + * Mask out low bits of PC to workaround gdb bugs. + * This avoids an assert in thumb_tr_translate_insn, because it is + * architecturally impossible to misalign the pc. + * This will probably cause problems if we ever implement the + * Jazelle DBX extensions. + */ if (n == 15) { tmp &= ~1; } @@ -111,7 +118,7 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* * Don't allow writing to XPSR.Exception as it can cause * a transition into or out of handler mode (it's not - * writeable via the MSR insn so this is a reasonable + * writable via the MSR insn so this is a reasonable * restriction). Other fields are safe to update. */ xpsr_write(env, tmp, ~XPSR_EXCP); @@ -124,6 +131,133 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } +static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; + + /* VFP data registers are always little-endian. */ + if (reg < nregs) { + return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); + } + if (arm_feature(env, ARM_FEATURE_NEON)) { + /* Aliases for Q regs. */ + nregs += 16; + if (reg < nregs) { + uint64_t *q = aa32_vfp_qreg(env, reg - 32); + return gdb_get_reg128(buf, q[0], q[1]); + } + } + switch (reg - nregs) { + case 0: + return gdb_get_reg32(buf, vfp_get_fpscr(env)); + } + return 0; +} + +static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; + + if (reg < nregs) { + *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); + return 8; + } + if (arm_feature(env, ARM_FEATURE_NEON)) { + nregs += 16; + if (reg < nregs) { + uint64_t *q = aa32_vfp_qreg(env, reg - 32); + q[0] = ldq_le_p(buf); + q[1] = ldq_le_p(buf + 8); + return 16; + } + } + switch (reg - nregs) { + case 0: + vfp_set_fpscr(env, ldl_p(buf)); + return 4; + } + return 0; +} + +static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: + return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); + case 1: + return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); + } + return 0; +} + +static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0: + env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); + return 4; + case 1: + env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); + return 4; + } + return 0; +} + +static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: + return gdb_get_reg32(buf, env->v7m.vpr); + default: + return 0; + } +} + +static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0: + env->v7m.vpr = ldl_p(buf); + return 4; + default: + return 0; + } +} + +/** + * arm_get/set_gdb_*: get/set a gdb register + * @env: the CPU state + * @buf: a buffer to copy to/from + * @reg: register number (offset from start of group) + * + * We return the number of bytes copied + */ + +static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + const ARMCPRegInfo *ri; + uint32_t key; + + key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; + ri = get_arm_cp_reginfo(cpu->cp_regs, key); + if (ri) { + if (cpreg_field_is_64bit(ri)) { + return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); + } else { + return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); + } + } + return 0; +} + +static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; +} + static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml, ARMCPRegInfo *ri, uint32_t ri_key, int bitsize, int regnum) @@ -139,7 +273,7 @@ static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml, static void arm_register_sysreg_for_xml(gpointer key, gpointer value, gpointer p) { - uint32_t ri_key = *(uint32_t *)key; + uint32_t ri_key = (uintptr_t)key; ARMCPRegInfo *ri = value; RegisterSysregXmlParam *param = (RegisterSysregXmlParam *)p; GString *s = param->s; @@ -319,3 +453,54 @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) } return NULL; } + +void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* + * The lower part of each SVE register aliases to the FPU + * registers so we don't need to include both. + */ +#ifdef TARGET_AARCH64 + if (isar_feature_aa64_sve(&cpu->isar)) { + gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, + arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), + "sve-registers.xml", 0); + } else { + gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, + aarch64_fpu_gdb_set_reg, + 34, "aarch64-fpu.xml", 0); + } +#endif + } else { + if (arm_feature(env, ARM_FEATURE_NEON)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 49, "arm-neon.xml", 0); + } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 33, "arm-vfp3.xml", 0); + } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 17, "arm-vfp.xml", 0); + } + if (!arm_feature(env, ARM_FEATURE_M)) { + /* + * A and R profile have FP sysregs FPEXC and FPSID that we + * expose to gdb. + */ + gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg, + 2, "arm-vfp-sysregs.xml", 0); + } + } + if (cpu_isar_feature(aa32_mve, cpu)) { + gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg, + 1, "arm-m-profile-mve.xml", 0); + } + gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, + arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), + "system-registers.xml", 0); + +} diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 251539ef79..07a6746944 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -17,7 +17,9 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" +#include "internals.h" #include "exec/gdbstub.h" int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) @@ -69,3 +71,141 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* Unknown register. */ return 0; } + +int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0 ... 31: + { + /* 128 bit FP register - quads are in LE order */ + uint64_t *q = aa64_vfp_qreg(env, reg); + return gdb_get_reg128(buf, q[1], q[0]); + } + case 32: + /* FPSR */ + return gdb_get_reg32(buf, vfp_get_fpsr(env)); + case 33: + /* FPCR */ + return gdb_get_reg32(buf, vfp_get_fpcr(env)); + default: + return 0; + } +} + +int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0 ... 31: + /* 128 bit FP register */ + { + uint64_t *q = aa64_vfp_qreg(env, reg); + q[0] = ldq_le_p(buf); + q[1] = ldq_le_p(buf + 8); + return 16; + } + case 32: + /* FPSR */ + vfp_set_fpsr(env, ldl_p(buf)); + return 4; + case 33: + /* FPCR */ + vfp_set_fpcr(env, ldl_p(buf)); + return 4; + default: + return 0; + } +} + +int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + + switch (reg) { + /* The first 32 registers are the zregs */ + case 0 ... 31: + { + int vq, len = 0; + for (vq = 0; vq < cpu->sve_max_vq; vq++) { + len += gdb_get_reg128(buf, + env->vfp.zregs[reg].d[vq * 2 + 1], + env->vfp.zregs[reg].d[vq * 2]); + } + return len; + } + case 32: + return gdb_get_reg32(buf, vfp_get_fpsr(env)); + case 33: + return gdb_get_reg32(buf, vfp_get_fpcr(env)); + /* then 16 predicates and the ffr */ + case 34 ... 50: + { + int preg = reg - 34; + int vq, len = 0; + for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { + len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); + } + return len; + } + case 51: + { + /* + * We report in Vector Granules (VG) which is 64bit in a Z reg + * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. + */ + int vq = sve_vqm1_for_el(env, arm_current_el(env)) + 1; + return gdb_get_reg64(buf, vq * 2); + } + default: + /* gdbstub asked for something out our range */ + qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); + break; + } + + return 0; +} + +int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + + /* The first 32 registers are the zregs */ + switch (reg) { + /* The first 32 registers are the zregs */ + case 0 ... 31: + { + int vq, len = 0; + uint64_t *p = (uint64_t *) buf; + for (vq = 0; vq < cpu->sve_max_vq; vq++) { + env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; + env->vfp.zregs[reg].d[vq * 2] = *p++; + len += 16; + } + return len; + } + case 32: + vfp_set_fpsr(env, *(uint32_t *)buf); + return 4; + case 33: + vfp_set_fpcr(env, *(uint32_t *)buf); + return 4; + case 34 ... 50: + { + int preg = reg - 34; + int vq, len = 0; + uint64_t *p = (uint64_t *) buf; + for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { + env->vfp.pregs[preg].p[vq / 4] = *p++; + len += 8; + } + return len; + } + case 51: + /* cannot set vg via gdbstub */ + return 0; + default: + /* gdbstub asked for something out our range */ + break; + } + + return 0; +} diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index 26f79f9141..77a8502b6b 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -32,7 +32,6 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" #include "fpu/softfloat.h" #include /* For crc32 */ @@ -84,12 +83,14 @@ void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm) { daif_check(env, 0x1e, imm, GETPC()); env->daif |= (imm << 6) & PSTATE_DAIF; + arm_rebuild_hflags(env); } void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm) { daif_check(env, 0x1f, imm, GETPC()); env->daif &= ~((imm << 6) & PSTATE_DAIF); + arm_rebuild_hflags(env); } /* Convert a softfloat float_relation_ (as returned by @@ -513,37 +514,19 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o0 = ldq_le_p(haddr + 0); - o1 = ldq_le_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_le_p(haddr + 0, int128_getlo(newv)); - stq_le_p(haddr + 1, int128_gethi(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); + MemOpIdx oi0 = make_memop_idx(MO_LEUQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi1 = make_memop_idx(MO_LEUQ, mem_idx); - o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); - o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); + o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra); + o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); } -#endif return !success; } @@ -555,12 +538,12 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); bool success; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->exclusive_val, env->exclusive_high); newv = int128_make128(new_lo, new_hi); @@ -583,37 +566,19 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o1 = ldq_be_p(haddr + 0); - o0 = ldq_be_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_be_p(haddr + 0, int128_gethi(newv)); - stq_be_p(haddr + 1, int128_getlo(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); + MemOpIdx oi0 = make_memop_idx(MO_BEUQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi1 = make_memop_idx(MO_BEUQ, mem_idx); - o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); - o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); + o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra); + o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); } -#endif return !success; } @@ -625,12 +590,12 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); bool success; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx); /* * High and low need to be switched here because this is not actually a @@ -651,12 +616,12 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); newv = int128_make128(new_lo, new_hi); @@ -672,12 +637,12 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); newv = int128_make128(new_lo, new_hi); @@ -987,7 +952,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) qemu_mutex_unlock_iothread(); if (!return_to_aa64) { - env->aarch64 = 0; + env->aarch64 = false; /* We do a raw CPSR write because aarch64_sync_64_to_32() * will sort the register banks out for us, and we've already * caught all the bad-mode cases in el_from_spsr(). @@ -1010,7 +975,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) } else { int tbii; - env->aarch64 = 1; + env->aarch64 = true; spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { @@ -1071,6 +1036,7 @@ illegal_return: if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; } + helper_rebuild_hflags_a64(env, cur_el); qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: " "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); } diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 56e40844ad..76bd25006d 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -33,8 +33,105 @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrb_sg_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrb_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrh_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vldrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vstrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vldrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vldrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vstrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vld20b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld20h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld20w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vld21b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld21h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld21w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vld40b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld40h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld40w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vld41b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld41h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld41w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vld42b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld42h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld42w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vld43b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld43h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vld43w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst20b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst20h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst20w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst21b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst21h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst21w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst40b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst40h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst40w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst41b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst41h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst41w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst42b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst42h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst42w, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vst43b, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst43h, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_vst43w, TCG_CALL_NO_WG, void, env, i32, i32) + DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) +DEF_HELPER_FLAGS_4(mve_viduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) +DEF_HELPER_FLAGS_4(mve_vidupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) + +DEF_HELPER_FLAGS_5(mve_viwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_5(mve_viwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_5(mve_viwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) + +DEF_HELPER_FLAGS_5(mve_vdwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_5(mve_vdwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_5(mve_vdwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32) + DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr) DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr) DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr) @@ -64,12 +161,63 @@ DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr) DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr) DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqabsb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqabsh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqabsw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vqnegb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqnegh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqnegw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vmaxab, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vmaxah, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vmaxaw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vminab, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vminah, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vminaw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vcvt_rm_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_rm_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_rm_ss, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_rm_us, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcvtb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcvtt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcvtb_hs, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcvtt_hs, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vmovnbb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vmovnbh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vmovntb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vmovnth, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vqmovunbb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovunbh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovuntb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovunth, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vqmovnbsb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovnbsh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovntsb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovntsh, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vqmovnbub, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovnbuh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovntub, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vqmovntuh, TCG_CALL_NO_WG, void, env, ptr, ptr) + DEF_HELPER_FLAGS_4(mve_vand, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vpsel, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_1(mve_vpnot, TCG_CALL_NO_WG, void, env) + +DEF_HELPER_FLAGS_2(mve_vctp, TCG_CALL_NO_WG, void, env, i32) + DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) @@ -145,6 +293,11 @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmullpbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmullpth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmullpbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmullptw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + DEF_HELPER_FLAGS_4(mve_vqdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vqdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vqdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) @@ -267,6 +420,60 @@ DEF_HELPER_FLAGS_4(mve_vhcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vhcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vhcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfadds, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfsubs, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfmuls, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfabdh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfabds, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vmaxnmh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmaxnms, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vminnmh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vminnms, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vmaxnmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmaxnmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vminnmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vminnmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfcadd90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfcadd270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vfmsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vfmss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vcmul0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vcmla0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) @@ -328,6 +535,30 @@ DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i3 DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlab, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlah, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlaw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) @@ -349,6 +580,23 @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) +DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32) @@ -356,9 +604,48 @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvsb, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvsh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvsw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvub, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvuh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxavb, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxavh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxavw, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vminvsb, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminvsh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminvsw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminvub, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminvuh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminavb, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminavh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminavw, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vmaxnmvh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxnmvs, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vminnmvh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminnmvs, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vmaxnmavh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vmaxnmavs, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vminnmavh, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vminnmavs, TCG_CALL_NO_WG, i32, env, ptr, i32) + DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64) +DEF_HELPER_FLAGS_4(mve_vabavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vabavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vabavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vabavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vabavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vabavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64) @@ -391,6 +678,14 @@ DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) @@ -463,3 +758,133 @@ DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32) + +DEF_HELPER_FLAGS_3(mve_vcmpeqb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpeqh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpeqw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpneb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpneh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpnew, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpcsb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpcsh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpcsw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmphib, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmphih, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmphiw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpgeb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpgeh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpgew, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpltb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmplth, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpltw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpgtb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpgth, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpgtw, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpleb, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vcmplew, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmpne_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpne_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpne_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmphi_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmphi_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmphi_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmpge_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpge_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpge_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmplt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmplt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmplt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vcmple_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vcmple_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmpeqh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmpeqs, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmpneh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmpnes, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmpgeh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmpges, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmplth, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmplts, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmpgth, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmpgts, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vfcmples, TCG_CALL_NO_WG, void, env, ptr, ptr) + +DEF_HELPER_FLAGS_3(mve_vfcmpeq_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmpeq_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmpne_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmpne_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmpge_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmpge_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmplt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmplt_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmpgt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmpgt_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vfcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vfcmple_scalars, TCG_CALL_NO_WG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vfadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vfadd_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vfsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vfsub_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vfmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vfmul_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vfma_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vfma_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vfmas_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vfmas_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vcvt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_hs, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_hu, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_sf, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_uf, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_fs, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vcvt_fu, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vrint_rm_h, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrint_rm_s, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vrintx_h, TCG_CALL_NO_WG, void, env, ptr, ptr) +DEF_HELPER_FLAGS_3(mve_vrintx_s, TCG_CALL_NO_WG, void, env, ptr, ptr) diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h new file mode 100644 index 0000000000..d2d544a696 --- /dev/null +++ b/target/arm/helper-sme.h @@ -0,0 +1,147 @@ +/* + * AArch64 SME specific helper definitions + * + * Copyright (c) 2022 Linaro, Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) + +DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) + +/* Move to/from vertical array slices, i.e. columns, so 'c'. */ +DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h index dc629f851a..cc4e1d8948 100644 --- a/target/arm/helper-sve.h +++ b/target/arm/helper-sve.h @@ -325,6 +325,8 @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) @@ -717,6 +719,8 @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/target/arm/helper.c b/target/arm/helper.c index 155d8bf239..22bc935242 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -8,14 +8,14 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "target/arm/idau.h" +#include "qemu/log.h" #include "trace.h" #include "cpu.h" #include "internals.h" -#include "exec/gdbstub.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/main-loop.h" +#include "qemu/timer.h" #include "qemu/bitops.h" #include "qemu/crc32c.h" #include "qemu/qemu-print.h" @@ -26,7 +26,6 @@ #include "sysemu/cpus.h" #include "sysemu/cpu-timers.h" #include "sysemu/kvm.h" -#include "sysemu/tcg.h" #include "qemu/range.h" #include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" @@ -36,118 +35,11 @@ #include "exec/cpu_ldst.h" #include "semihosting/common-semi.h" #endif +#include "cpregs.h" #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ -#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */ - -#ifndef CONFIG_USER_ONLY - -static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool s1_is_el0, - hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, - target_ulong *page_size_ptr, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) - __attribute__((nonnull)); -#endif static void switch_mode(CPUARMState *env, int mode); -static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); - -static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; - - /* VFP data registers are always little-endian. */ - if (reg < nregs) { - return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); - } - if (arm_feature(env, ARM_FEATURE_NEON)) { - /* Aliases for Q regs. */ - nregs += 16; - if (reg < nregs) { - uint64_t *q = aa32_vfp_qreg(env, reg - 32); - return gdb_get_reg128(buf, q[0], q[1]); - } - } - switch (reg - nregs) { - case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break; - case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break; - case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break; - } - return 0; -} - -static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; - - if (reg < nregs) { - *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); - return 8; - } - if (arm_feature(env, ARM_FEATURE_NEON)) { - nregs += 16; - if (reg < nregs) { - uint64_t *q = aa32_vfp_qreg(env, reg - 32); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); - return 16; - } - } - switch (reg - nregs) { - case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; - case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; - case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; - } - return 0; -} - -static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) -{ - switch (reg) { - case 0 ... 31: - { - /* 128 bit FP register - quads are in LE order */ - uint64_t *q = aa64_vfp_qreg(env, reg); - return gdb_get_reg128(buf, q[1], q[0]); - } - case 32: - /* FPSR */ - return gdb_get_reg32(buf, vfp_get_fpsr(env)); - case 33: - /* FPCR */ - return gdb_get_reg32(buf,vfp_get_fpcr(env)); - default: - return 0; - } -} - -static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) -{ - switch (reg) { - case 0 ... 31: - /* 128 bit FP register */ - { - uint64_t *q = aa64_vfp_qreg(env, reg); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); - return 16; - } - case 32: - /* FPSR */ - vfp_set_fpsr(env, ldl_p(buf)); - return 4; - case 33: - /* FPCR */ - vfp_set_fpcr(env, ldl_p(buf)); - return 4; - default: - return 0; - } -} static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) { @@ -159,8 +51,7 @@ static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) } } -static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { assert(ri->fieldoffset); if (cpreg_field_is_64bit(ri)) { @@ -208,134 +99,6 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, } } -/** - * arm_get/set_gdb_*: get/set a gdb register - * @env: the CPU state - * @buf: a buffer to copy to/from - * @reg: register number (offset from start of group) - * - * We return the number of bytes copied - */ - -static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - const ARMCPRegInfo *ri; - uint32_t key; - - key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; - ri = get_arm_cp_reginfo(cpu->cp_regs, key); - if (ri) { - if (cpreg_field_is_64bit(ri)) { - return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); - } else { - return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); - } - } - return 0; -} - -static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) -{ - return 0; -} - -#ifdef TARGET_AARCH64 -static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - - switch (reg) { - /* The first 32 registers are the zregs */ - case 0 ... 31: - { - int vq, len = 0; - for (vq = 0; vq < cpu->sve_max_vq; vq++) { - len += gdb_get_reg128(buf, - env->vfp.zregs[reg].d[vq * 2 + 1], - env->vfp.zregs[reg].d[vq * 2]); - } - return len; - } - case 32: - return gdb_get_reg32(buf, vfp_get_fpsr(env)); - case 33: - return gdb_get_reg32(buf, vfp_get_fpcr(env)); - /* then 16 predicates and the ffr */ - case 34 ... 50: - { - int preg = reg - 34; - int vq, len = 0; - for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); - } - return len; - } - case 51: - { - /* - * We report in Vector Granules (VG) which is 64bit in a Z reg - * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. - */ - int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1; - return gdb_get_reg64(buf, vq * 2); - } - default: - /* gdbstub asked for something out our range */ - qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); - break; - } - - return 0; -} - -static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - - /* The first 32 registers are the zregs */ - switch (reg) { - /* The first 32 registers are the zregs */ - case 0 ... 31: - { - int vq, len = 0; - uint64_t *p = (uint64_t *) buf; - for (vq = 0; vq < cpu->sve_max_vq; vq++) { - env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; - env->vfp.zregs[reg].d[vq * 2] = *p++; - len += 16; - } - return len; - } - case 32: - vfp_set_fpsr(env, *(uint32_t *)buf); - return 4; - case 33: - vfp_set_fpcr(env, *(uint32_t *)buf); - return 4; - case 34 ... 50: - { - int preg = reg - 34; - int vq, len = 0; - uint64_t *p = (uint64_t *) buf; - for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - env->vfp.pregs[preg].p[vq / 4] = *p++; - len += 8; - } - return len; - } - case 51: - /* cannot set vg via gdbstub */ - return 0; - default: - /* gdbstub asked for something out our range */ - break; - } - - return 0; -} -#endif /* TARGET_AARCH64 */ - static bool raw_accessors_invalid(const ARMCPRegInfo *ri) { /* Return true if the regdef would cause an assertion if you called @@ -436,11 +199,8 @@ bool write_list_to_cpustate(ARMCPU *cpu) static void add_cpreg_to_list(gpointer key, gpointer opaque) { ARMCPU *cpu = opaque; - uint64_t regidx; - const ARMCPRegInfo *ri; - - regidx = *(uint32_t *)key; - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + uint32_t regidx = (uintptr_t)key; + const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); @@ -452,11 +212,9 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque) static void count_cpreg(gpointer key, gpointer opaque) { ARMCPU *cpu = opaque; - uint64_t regidx; const ARMCPRegInfo *ri; - regidx = *(uint32_t *)key; - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + ri = g_hash_table_lookup(cpu->cp_regs, key); if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { cpu->cpreg_array_len++; @@ -465,8 +223,8 @@ static void count_cpreg(gpointer key, gpointer opaque) static gint cpreg_key_compare(gconstpointer a, gconstpointer b) { - uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); - uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); + uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); + uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); if (aidx > bidx) { return 1; @@ -543,71 +301,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, return CP_ACCESS_TRAP_UNCATEGORIZED; } -static uint64_t arm_mdcr_el2_eff(CPUARMState *env) -{ - return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; -} - -/* Check for traps to "powerdown debug" registers, which are controlled - * by MDCR.TDOSA - */ -static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tdosa) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - -/* Check for traps to "debug ROM" registers, which are controlled - * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. - */ -static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tdra) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - -/* Check for traps to general debug registers, which are controlled - * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. - */ -static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tda) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - /* Check for traps to performance monitor registers, which are controlled * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. */ @@ -706,6 +399,21 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value); } +static int alle1_tlbmask(CPUARMState *env) +{ + /* + * Note that the 'ALL' scope must invalidate both stage 1 and + * stage 2 translations, whereas most other scopes only invalidate + * stage 1 translations. + */ + return (ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_Stage2 | + ARMMMUIdxBit_Stage2_S); +} + + /* IS variants of TLB operations must affect all cores */ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) @@ -808,10 +516,7 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0); + tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); } static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -819,10 +524,7 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0); + tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env)); } @@ -861,6 +563,24 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMMMUIdxBit_E2); } +static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + +static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + static const ARMCPRegInfo cp_reginfo[] = { /* Define the secure and non-secure FCSE identifier CP registers * separately because there is no secure bank in V8 (no _EL3). This allows @@ -895,7 +615,6 @@ static const ARMCPRegInfo cp_reginfo[] = { .secure = ARM_CP_SECSTATE_S, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, - REGINFO_SENTINEL }; static const ARMCPRegInfo not_v8_cp_reginfo[] = { @@ -924,7 +643,6 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = { { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, - REGINFO_SENTINEL }; static const ARMCPRegInfo not_v6_cp_reginfo[] = { @@ -933,7 +651,6 @@ static const ARMCPRegInfo not_v6_cp_reginfo[] = { */ { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, .access = PL1_W, .type = ARM_CP_WFI }, - REGINFO_SENTINEL }; static const ARMCPRegInfo not_v7_cp_reginfo[] = { @@ -982,7 +699,6 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = { .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, - REGINFO_SENTINEL }; static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -998,11 +714,14 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, */ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { /* VFP coprocessor: cp10 & cp11 [23:20] */ - mask |= (1 << 31) | (1 << 30) | (0xf << 20); + mask |= R_CPACR_ASEDIS_MASK | + R_CPACR_D32DIS_MASK | + R_CPACR_CP11_MASK | + R_CPACR_CP10_MASK; if (!arm_feature(env, ARM_FEATURE_NEON)) { /* ASEDIS [31] bit is RAO/WI */ - value |= (1 << 31); + value |= R_CPACR_ASEDIS_MASK; } /* VFPv3 and upwards with NEON implement 32 double precision @@ -1010,7 +729,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, */ if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ - value |= (1 << 30); + value |= R_CPACR_D32DIS_MASK; } } value &= mask; @@ -1022,8 +741,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, */ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { - value &= ~(0xf << 20); - value |= env->cp15.cpacr_el1 & (0xf << 20); + mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK; + value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); } env->cp15.cpacr_el1 = value; @@ -1039,7 +758,7 @@ static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { - value &= ~(0xf << 20); + value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK); } return value; } @@ -1059,11 +778,11 @@ static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, if (arm_feature(env, ARM_FEATURE_V8)) { /* Check if CPACR accesses are to be trapped to EL2 */ if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) && - (env->cp15.cptr_el[2] & CPTR_TCPAC)) { + FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { return CP_ACCESS_TRAP_EL2; /* Check if CPACR accesses are to be trapped to EL3 */ } else if (arm_current_el(env) < 3 && - (env->cp15.cptr_el[3] & CPTR_TCPAC)) { + FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { return CP_ACCESS_TRAP_EL3; } } @@ -1075,7 +794,8 @@ static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Check if CPTR accesses are set to trap to EL3 */ - if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { + if (arm_current_el(env) == 2 && + FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { return CP_ACCESS_TRAP_EL3; } @@ -1111,53 +831,8 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, - REGINFO_SENTINEL }; -/* Definitions for the PMU registers */ -#define PMCRN_MASK 0xf800 -#define PMCRN_SHIFT 11 -#define PMCRLC 0x40 -#define PMCRDP 0x20 -#define PMCRX 0x10 -#define PMCRD 0x8 -#define PMCRC 0x4 -#define PMCRP 0x2 -#define PMCRE 0x1 -/* - * Mask of PMCR bits writeable by guest (not including WO bits like C, P, - * which can be written as 1 to trigger behaviour but which stay RAZ). - */ -#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) - -#define PMXEVTYPER_P 0x80000000 -#define PMXEVTYPER_U 0x40000000 -#define PMXEVTYPER_NSK 0x20000000 -#define PMXEVTYPER_NSU 0x10000000 -#define PMXEVTYPER_NSH 0x08000000 -#define PMXEVTYPER_M 0x04000000 -#define PMXEVTYPER_MT 0x02000000 -#define PMXEVTYPER_EVTCOUNT 0x0000ffff -#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ - PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ - PMXEVTYPER_M | PMXEVTYPER_MT | \ - PMXEVTYPER_EVTCOUNT) - -#define PMCCFILTR 0xf8000000 -#define PMCCFILTR_M PMXEVTYPER_M -#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) - -static inline uint32_t pmu_num_counters(CPUARMState *env) -{ - return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; -} - -/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ -static inline uint64_t pmu_counter_mask(CPUARMState *env) -{ - return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); -} - typedef struct pm_event { uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ /* If the event is supported on this CPU (used to generate PMCEID[01]) */ @@ -1231,16 +906,16 @@ static int64_t instructions_ns_per(uint64_t icount) } #endif -static bool pmu_8_1_events_supported(CPUARMState *env) +static bool pmuv3p1_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); + return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); } -static bool pmu_8_4_events_supported(CPUARMState *env) +static bool pmuv3p4_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); + return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); } static uint64_t zero_event_get_count(CPUARMState *env) @@ -1274,17 +949,17 @@ static const pm_event pm_events[] = { }, #endif { .number = 0x023, /* STALL_FRONTEND */ - .supported = pmu_8_1_events_supported, + .supported = pmuv3p1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x024, /* STALL_BACKEND */ - .supported = pmu_8_1_events_supported, + .supported = pmuv3p1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x03c, /* STALL */ - .supported = pmu_8_4_events_supported, + .supported = pmuv3p4_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, @@ -1431,6 +1106,15 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, return pmreg_access(env, ri, isread); } +/* + * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. + * We use these to decide whether we need to wrap a write to MDCR_EL2 + * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. + */ +#define MDCR_EL2_PMU_ENABLE_BITS \ + (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) +#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) + /* Returns true if the counter (pass 31 for PMCCNTR) should count events using * the current EL, security state, and register configuration. */ @@ -1438,7 +1122,7 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) { uint64_t filter; bool e, p, u, nsk, nsu, nsh, m; - bool enabled, prohibited, filtered; + bool enabled, prohibited = false, filtered; bool secure = arm_is_secure(env); int el = arm_current_el(env); uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); @@ -1456,19 +1140,29 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) } enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); - if (!secure) { - if (el == 2 && (counter < hpmn || counter == 31)) { - prohibited = mdcr_el2 & MDCR_HPMD; - } else { - prohibited = false; - } - } else { - prohibited = arm_feature(env, ARM_FEATURE_EL3) && - !(env->cp15.mdcr_el3 & MDCR_SPME); + /* Is event counting prohibited? */ + if (el == 2 && (counter < hpmn || counter == 31)) { + prohibited = mdcr_el2 & MDCR_HPMD; + } + if (secure) { + prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); } - if (prohibited && counter == 31) { - prohibited = env->cp15.c9_pmcr & PMCRDP; + if (counter == 31) { + /* + * The cycle counter defaults to running. PMCR.DP says "disable + * the cycle counter when event counting is prohibited". + * Some MDCR bits disable the cycle counter specifically. + */ + prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; + if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + if (secure) { + prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); + } + if (el == 2) { + prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); + } + } } if (counter == 31) { @@ -1516,6 +1210,43 @@ static void pmu_update_irq(CPUARMState *env) (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); } +static bool pmccntr_clockdiv_enabled(CPUARMState *env) +{ + /* + * Return true if the clock divider is enabled and the cycle counter + * is supposed to tick only once every 64 clock cycles. This is + * controlled by PMCR.D, but if PMCR.LC is set to enable the long + * (64-bit) cycle counter PMCR.D has no effect. + */ + return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; +} + +static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) +{ + /* Return true if the specified event counter is configured to be 64 bit */ + + /* This isn't intended to be used with the cycle counter */ + assert(counter < 31); + + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + return false; + } + + if (arm_feature(env, ARM_FEATURE_EL2)) { + /* + * MDCR_EL2.HLP still applies even when EL2 is disabled in the + * current security state, so we don't use arm_mdcr_el2_eff() here. + */ + bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; + int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; + + if (hpmn != 0 && counter >= hpmn) { + return hlp; + } + } + return env->cp15.c9_pmcr & PMCRLP; +} + /* * Ensure c15_ccnt is the guest-visible count so that operations such as * enabling/disabling the counter or filtering, modifying the count itself, @@ -1528,8 +1259,7 @@ static void pmccntr_op_start(CPUARMState *env) if (pmu_counter_enabled(env, 31)) { uint64_t eff_cycles = cycles; - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ + if (pmccntr_clockdiv_enabled(env)) { eff_cycles /= 64; } @@ -1538,7 +1268,7 @@ static void pmccntr_op_start(CPUARMState *env) uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1ull << 63 : 1ull << 31; if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { - env->cp15.c9_pmovsr |= (1 << 31); + env->cp15.c9_pmovsr |= (1ULL << 31); pmu_update_irq(env); } @@ -1564,16 +1294,18 @@ static void pmccntr_op_finish(CPUARMState *env) int64_t overflow_in = cycles_ns_per(remaining_cycles); if (overflow_in > 0) { - int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - overflow_in; - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } } #endif uint64_t prev_cycles = env->cp15.c15_ccnt_delta; - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ + if (pmccntr_clockdiv_enabled(env)) { prev_cycles /= 64; } env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; @@ -1591,9 +1323,11 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) } if (pmu_counter_enabled(env, counter)) { - uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? + 1ULL << 63 : 1ULL << 31; - if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { + if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { env->cp15.c9_pmovsr |= (1 << counter); pmu_update_irq(env); } @@ -1608,15 +1342,22 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) #ifndef CONFIG_USER_ONLY uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; uint16_t event_idx = supported_event_map[event]; - uint64_t delta = UINT32_MAX - - (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; - int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); + uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); + int64_t overflow_in; + + if (!pmevcntr_is_64_bit(env, counter)) { + delta = (uint32_t)delta; + } + overflow_in = pm_events[event_idx].ns_per_count(delta); if (overflow_in > 0) { - int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - overflow_in; - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } } #endif @@ -1684,8 +1425,8 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, } } - env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; - env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); + env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; + env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); pmu_op_finish(env); } @@ -1694,6 +1435,8 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { unsigned int i; + uint64_t overflow_mask, new_pmswinc; + for (i = 0; i < pmu_num_counters(env); i++) { /* Increment a counter's count iff: */ if ((value & (1 << i)) && /* counter's bit is set */ @@ -1707,9 +1450,12 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, * Detect if this write causes an overflow since we can't predict * PMSWINC overflows like we can for other events */ - uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; - if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { + overflow_mask = pmevcntr_is_64_bit(env, i) ? + 1ULL << 63 : 1ULL << 31; + + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { env->cp15.c9_pmovsr |= (1 << i); pmu_update_irq(env); } @@ -1784,15 +1530,19 @@ static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + pmu_op_start(env); value &= pmu_counter_mask(env); env->cp15.c9_pmcnten |= value; + pmu_op_finish(env); } static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + pmu_op_start(env); value &= pmu_counter_mask(env); env->cp15.c9_pmcnten &= ~value; + pmu_op_finish(env); } static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1912,6 +1662,10 @@ static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value, uint8_t counter) { + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + value &= MAKE_64BIT_MASK(0, 32); + } if (counter < pmu_num_counters(env)) { pmevcntr_op_start(env, counter); env->cp15.c14_pmevcntr[counter] = value; @@ -1931,6 +1685,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, pmevcntr_op_start(env, counter); ret = env->cp15.c14_pmevcntr[counter]; pmevcntr_op_finish(env, counter); + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + ret &= MAKE_64BIT_MASK(0, 32); + } return ret; } else { /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR @@ -2021,16 +1779,26 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Begin with base v8.0 state. */ - uint32_t valid_mask = 0x3fff; + uint64_t valid_mask = 0x3fff; ARMCPU *cpu = env_archcpu(env); + uint64_t changed; - if (ri->state == ARM_CP_STATE_AA64) { - if (arm_feature(env, ARM_FEATURE_AARCH64) && - !cpu_isar_feature(aa64_aa32_el1, cpu)) { - value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ + /* + * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always + * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64. + * Instead, choose the format based on the mode of EL3. + */ + if (arm_el_is_aa64(env, 3)) { + value |= SCR_FW | SCR_AW; /* RES1 */ + valid_mask &= ~SCR_NET; /* RES0 */ + + if (!cpu_isar_feature(aa64_aa32_el1, cpu) && + !cpu_isar_feature(aa64_aa32_el2, cpu)) { + value |= SCR_RW; /* RAO/WI */ + } + if (cpu_isar_feature(aa64_ras, cpu)) { + valid_mask |= SCR_TERR; } - valid_mask &= ~SCR_NET; - if (cpu_isar_feature(aa64_lor, cpu)) { valid_mask |= SCR_TLOR; } @@ -2043,8 +1811,23 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (cpu_isar_feature(aa64_mte, cpu)) { valid_mask |= SCR_ATA; } + if (cpu_isar_feature(aa64_scxtnum, cpu)) { + valid_mask |= SCR_ENSCXT; + } + if (cpu_isar_feature(aa64_doublefault, cpu)) { + valid_mask |= SCR_EASE | SCR_NMEA; + } + if (cpu_isar_feature(aa64_sme, cpu)) { + valid_mask |= SCR_ENTP2; + } + if (cpu_isar_feature(aa64_hcx, cpu)) { + valid_mask |= SCR_HXEN; + } } else { valid_mask &= ~(SCR_RW | SCR_ST); + if (cpu_isar_feature(aa32_ras, cpu)) { + valid_mask |= SCR_TERR; + } } if (!arm_feature(env, ARM_FEATURE_EL2)) { @@ -2064,7 +1847,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* Clear all-context RES0 bits. */ value &= valid_mask; - raw_write(env, ri, value); + changed = env->cp15.scr_el3 ^ value; + env->cp15.scr_el3 = value; + + /* + * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then + * we must invalidate all TLBs below EL3. + */ + if (changed & SCR_NS) { + tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2)); + } } static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2133,7 +1931,12 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) } } - /* External aborts are not possible in QEMU so A bit is always clear */ + if (hcr_el2 & HCR_AMO) { + if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { + ret |= CPSR_A; + } + } + return ret; } @@ -2173,12 +1976,12 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. */ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_ALIAS, + .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), .writefn = pmcntenset_write, .accessfn = pmreg_access, .raw_writefn = raw_write }, - { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, + { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, .access = PL0_RW, .accessfn = pmreg_access, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, @@ -2188,11 +1991,11 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), .accessfn = pmreg_access, .writefn = pmcntenclr_write, - .type = ARM_CP_ALIAS }, + .type = ARM_CP_ALIAS | ARM_CP_IO }, { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, .access = PL0_RW, .accessfn = pmreg_access, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .writefn = pmcntenclr_write }, { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, @@ -2401,7 +2204,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_write }, - REGINFO_SENTINEL }; static const ARMCPRegInfo v7mp_cp_reginfo[] = { @@ -2418,7 +2220,6 @@ static const ARMCPRegInfo v7mp_cp_reginfo[] = { { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_is_write }, - REGINFO_SENTINEL }; static const ARMCPRegInfo pmovsset_cp_reginfo[] = { @@ -2436,7 +2237,6 @@ static const ARMCPRegInfo pmovsset_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), .writefn = pmovsset_write, .raw_writefn = raw_write }, - REGINFO_SENTINEL }; static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2446,24 +2246,37 @@ static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, env->teecr = value; } +static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* + * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE + * at all, so we don't need to check whether we're v8A. + */ + if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && + (env->cp15.hstr_el2 & HSTR_TTEE)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 0 && (env->teecr & 1)) { return CP_ACCESS_TRAP; } - return CP_ACCESS_OK; + return teecr_access(env, ri, isread); } static const ARMCPRegInfo t2ee_cp_reginfo[] = { { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), .resetvalue = 0, - .writefn = teecr_write }, + .writefn = teecr_write, .accessfn = teecr_access }, { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), .accessfn = teehbr_access, .resetvalue = 0 }, - REGINFO_SENTINEL }; static const ARMCPRegInfo v6k_cp_reginfo[] = { @@ -2495,7 +2308,6 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = { .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, .resetvalue = 0 }, - REGINFO_SENTINEL }; #ifndef CONFIG_USER_ONLY @@ -2881,9 +2693,6 @@ static int gt_phys_redir_timeridx(CPUARMState *env) case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return GTIMER_HYP; default: return GTIMER_PHYS; @@ -2896,9 +2705,6 @@ static int gt_virt_redir_timeridx(CPUARMState *env) case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return GTIMER_HYPVIRT; default: return GTIMER_VIRT; @@ -3343,7 +3149,6 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), .writefn = gt_sec_cval_write, .raw_writefn = raw_write, }, - REGINFO_SENTINEL }; static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3384,7 +3189,6 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, .readfn = gt_virt_cnt_read, }, - REGINFO_SENTINEL }; #endif @@ -3427,20 +3231,23 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, #ifdef CONFIG_TCG static uint64_t do_ats_write(CPUARMState *env, uint64_t value, - MMUAccessType access_type, ARMMMUIdx mmu_idx) + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure) { - hwaddr phys_addr; - target_ulong page_size; - int prot; bool ret; uint64_t par64; bool format64 = false; - MemTxAttrs attrs = {}; ARMMMUFaultInfo fi = {}; - ARMCacheAttrs cacheattrs = {}; + GetPhysAddrResult res = {}; - ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, - &prot, &page_size, &fi, &cacheattrs); + ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx, + is_secure, &res, &fi); + + /* + * ATS operations only do S1 or S1+S2 translations, so we never + * have to deal with the ARMCacheAttrs format for S2 only. + */ + assert(!res.cacheattrs.is_s2_format); if (ret) { /* @@ -3546,12 +3353,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, /* Create a 64-bit PAR */ par64 = (1 << 11); /* LPAE bit always set */ if (!ret) { - par64 |= phys_addr & ~0xfffULL; - if (!attrs.secure) { + par64 |= res.f.phys_addr & ~0xfffULL; + if (!res.f.attrs.secure) { par64 |= (1 << 9); /* NS */ } - par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ - par64 |= cacheattrs.shareability << 7; /* SH */ + par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ + par64 |= res.cacheattrs.shareability << 7; /* SH */ } else { uint32_t fsr = arm_fi_to_lfsc(&fi); @@ -3571,13 +3378,13 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, */ if (!ret) { /* We do not set any attribute bits in the PAR */ - if (page_size == (1 << 24) + if (res.f.lg_page_size == 24 && arm_feature(env, ARM_FEATURE_V7)) { - par64 = (phys_addr & 0xff000000) | (1 << 1); + par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); } else { - par64 = phys_addr & 0xfffff000; + par64 = res.f.phys_addr & 0xfffff000; } - if (!attrs.secure) { + if (!res.f.attrs.secure) { par64 |= (1 << 9); /* NS */ } } else { @@ -3605,17 +3412,17 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_SE3; + mmu_idx = ARMMMUIdx_E3; + secure = true; break; case 2: g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ /* fall through */ case 1: if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { - mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN - : ARMMMUIdx_Stage1_E1_PAN); + mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { - mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; + mmu_idx = ARMMMUIdx_Stage1_E1; } break; default: @@ -3626,14 +3433,15 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_SE10_0; + mmu_idx = ARMMMUIdx_E10_0; + secure = true; break; case 2: g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ mmu_idx = ARMMMUIdx_Stage1_E0; break; case 1: - mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; + mmu_idx = ARMMMUIdx_Stage1_E0; break; default: g_assert_not_reached(); @@ -3642,16 +3450,18 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) case 4: /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ mmu_idx = ARMMMUIdx_E10_1; + secure = false; break; case 6: /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ mmu_idx = ARMMMUIdx_E10_0; + secure = false; break; default: g_assert_not_reached(); } - par64 = do_ats_write(env, value, access_type, mmu_idx); + par64 = do_ats_write(env, value, access_type, mmu_idx, secure); A32_BANKED_CURRENT_REG_SET(env, par, par64); #else @@ -3667,7 +3477,8 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; uint64_t par64; - par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); + /* There is no SecureEL2 for AArch32. */ + par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false); A32_BANKED_CURRENT_REG_SET(env, par, par64); #else @@ -3693,42 +3504,46 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; ARMMMUIdx mmu_idx; int secure = arm_is_secure_below_el3(env); + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); switch (ri->opc2 & 6) { case 0: switch (ri->opc1) { case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { - mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN - : ARMMMUIdx_Stage1_E1_PAN); + mmu_idx = regime_e20 ? + ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; } else { - mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; + mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; } break; case 4: /* AT S1E2R, AT S1E2W */ - mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2; + mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; break; case 6: /* AT S1E3R, AT S1E3W */ - mmu_idx = ARMMMUIdx_SE3; + mmu_idx = ARMMMUIdx_E3; + secure = true; break; default: g_assert_not_reached(); } break; case 2: /* AT S1E0R, AT S1E0W */ - mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; + mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0; break; case 4: /* AT S12E1R, AT S12E1W */ - mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; + mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1; break; case 6: /* AT S12E0R, AT S12E0W */ - mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; + mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0; break; default: g_assert_not_reached(); } - env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); + env->cp15.par_el[1] = do_ats_write(env, value, access_type, + mmu_idx, secure); #else /* Handled by hardware accelerator. */ g_assert_not_reached(); @@ -3748,7 +3563,6 @@ static const ARMCPRegInfo vapa_cp_reginfo[] = { .access = PL1_W, .accessfn = ats_access, .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, #endif - REGINFO_SENTINEL }; /* Return basic MPU access permission bits. */ @@ -3871,7 +3685,6 @@ static const ARMCPRegInfo pmsav7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), .writefn = pmsav7_rgnr_write, .resetfn = arm_cp_reset_ignore }, - REGINFO_SENTINEL }; static const ARMCPRegInfo pmsav5_cp_reginfo[] = { @@ -3922,22 +3735,23 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = { { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, - REGINFO_SENTINEL }; -static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - TCR *tcr = raw_ptr(env, ri); - int maskshift = extract32(value, 0, 3); + ARMCPU *cpu = env_archcpu(env); if (!arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { - /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when - * using Long-desciptor translation table format */ + /* + * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when + * using Long-descriptor translation table format + */ value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); } else if (arm_feature(env, ARM_FEATURE_EL3)) { - /* In an implementation that includes the Security Extensions + /* + * In an implementation that includes the Security Extensions * TTBCR has additional fields PD0 [4] and PD1 [5] for * Short-descriptor translation table format. */ @@ -3947,55 +3761,23 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, } } - /* Update the masks corresponding to the TCR bank being written - * Note that we always calculate mask and base_mask, but - * they are only used for short-descriptor tables (ie if EAE is 0); - * for long-descriptor tables the TCR fields are used differently - * and the mask and base_mask values are meaningless. - */ - tcr->raw_tcr = value; - tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); - tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); -} - -static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - TCR *tcr = raw_ptr(env, ri); - if (arm_feature(env, ARM_FEATURE_LPAE)) { /* With LPAE the TTBCR could result in a change of ASID * via the TTBCR.A1 bit, so do a TLB flush. */ tlb_flush(CPU(cpu)); } - /* Preserve the high half of TCR_EL1, set via TTBCR2. */ - value = deposit64(tcr->raw_tcr, 0, 32, value); - vmsa_ttbcr_raw_write(env, ri, value); -} - -static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - TCR *tcr = raw_ptr(env, ri); - - /* Reset both the TCR as well as the masks corresponding to the bank of - * the TCR being reset. - */ - tcr->raw_tcr = 0; - tcr->mask = 0; - tcr->base_mask = 0xffffc000u; + raw_write(env, ri, value); } static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); - TCR *tcr = raw_ptr(env, ri); /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ tlb_flush(CPU(cpu)); - tcr->raw_tcr = value; + raw_write(env, ri, value); } static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4024,11 +3806,6 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint16_t mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E20_0; - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - tlb_flush_by_mmuidx(env_cpu(env), mask); } raw_write(env, ri, value); @@ -4042,20 +3819,12 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* * A change in VMID to the stage2 page table (Stage2) invalidates - * the combined stage 1&2 tlbs (EL10_1 and EL10_0). + * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0). */ - if (raw_read(env, ri) != value) { - uint16_t mask = ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - - tlb_flush_by_mmuidx(cs, mask); - raw_write(env, ri, value); + if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { + tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); } + raw_write(env, ri, value); } static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { @@ -4076,7 +3845,6 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), .resetvalue = 0, }, - REGINFO_SENTINEL }; static const ARMCPRegInfo vmsa_cp_reginfo[] = { @@ -4100,16 +3868,15 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .writefn = vmsa_tcr_el12_write, - .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, + .raw_writefn = raw_write, + .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, - .raw_writefn = vmsa_ttbcr_raw_write, - /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */ - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]), - offsetof(CPUARMState, cp15.tcr_el[1])} }, - REGINFO_SENTINEL + .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), + offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, }; /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing @@ -4120,8 +3887,8 @@ static const ARMCPRegInfo ttbcr2_reginfo = { .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .bank_fieldoffsets = { - offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr), - offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr), + offsetofhigh32(CPUARMState, cp15.tcr_el[3]), + offsetofhigh32(CPUARMState, cp15.tcr_el[1]), }, }; @@ -4194,7 +3961,6 @@ static const ARMCPRegInfo omap_cp_reginfo[] = { { .name = "C9", .cp = 15, .crn = 9, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, - REGINFO_SENTINEL }; static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4227,7 +3993,6 @@ static const ARMCPRegInfo xscale_cp_reginfo[] = { { .name = "XSCALE_UNLOCK_DCACHE", .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NOP }, - REGINFO_SENTINEL }; static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { @@ -4241,7 +4006,6 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, .resetvalue = 0 }, - REGINFO_SENTINEL }; static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { @@ -4249,11 +4013,10 @@ static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, - REGINFO_SENTINEL }; static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { - /* We never have a a block transfer operation in progress */ + /* We never have a block transfer operation in progress */ { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, @@ -4270,7 +4033,6 @@ static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, - REGINFO_SENTINEL }; static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { @@ -4283,7 +4045,6 @@ static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = (1 << 30) }, - REGINFO_SENTINEL }; static const ARMCPRegInfo strongarm_cp_reginfo[] = { @@ -4292,7 +4053,6 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = { .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, - REGINFO_SENTINEL }; static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -4359,7 +4119,6 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = { .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) }, .writefn = vmsa_ttbr_write, }, - REGINFO_SENTINEL }; static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -4533,11 +4292,6 @@ static int vae1_tlbmask(CPUARMState *env) ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; } - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - return mask; } @@ -4545,7 +4299,7 @@ static int vae1_tlbmask(CPUARMState *env) static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, uint64_t addr) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); int select = extract64(addr, 55, 1); @@ -4564,10 +4318,6 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr) mmu_idx = ARMMMUIdx_E10_0; } - if (arm_is_secure_below_el3(env)) { - mmu_idx &= ~ARM_MMU_IDX_A_NS; - } - return tlbbits_for_regime(env, mmu_idx, addr); } @@ -4593,37 +4343,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, } } -static int alle1_tlbmask(CPUARMState *env) -{ - /* - * Note that the 'ALL' scope must invalidate both stage 1 and - * stage 2 translations, whereas most other scopes only invalidate - * stage 1 translations. - */ - if (arm_is_secure_below_el3(env)) { - return ARMMMUIdxBit_SE10_1 | - ARMMMUIdxBit_SE10_1_PAN | - ARMMMUIdxBit_SE10_0; - } else { - return ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; - } -} - static int e2_tlbmask(CPUARMState *env) { - if (arm_is_secure_below_el3(env)) { - return ARMMMUIdxBit_SE20_0 | - ARMMMUIdxBit_SE20_2 | - ARMMMUIdxBit_SE20_2_PAN | - ARMMMUIdxBit_SE2; - } else { - return ARMMMUIdxBit_E20_0 | - ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2; - } + return (ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2); } static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4650,7 +4375,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4676,7 +4401,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); } static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4704,7 +4429,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4743,12 +4468,10 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - bool secure = arm_is_secure_below_el3(env); - int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2; - int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2, - pageaddr); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr); - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2, bits); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4756,77 +4479,137 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_SE3, bits); + ARMMMUIdxBit_E3, bits); +} + +static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) +{ + /* + * The MSB of value is the NS field, which only applies if SEL2 + * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). + */ + return (value >= 0 + && cpu_isar_feature(aa64_sel2, env_archcpu(env)) + && arm_is_secure_below_el3(env) + ? ARMMMUIdxBit_Stage2_S + : ARMMMUIdxBit_Stage2); +} + +static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = ipas2e1_tlbmask(env, value); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + if (tlb_force_broadcast(env)) { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + } else { + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); + } +} + +static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = ipas2e1_tlbmask(env, value); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); } #ifdef TARGET_AARCH64 -static uint64_t tlbi_aa64_range_get_length(CPUARMState *env, - uint64_t value) -{ - unsigned int page_shift; - unsigned int page_size_granule; - uint64_t num; - uint64_t scale; - uint64_t exponent; +typedef struct { + uint64_t base; uint64_t length; +} TLBIRange; - num = extract64(value, 39, 4); - scale = extract64(value, 44, 2); - page_size_granule = extract64(value, 46, 2); - - page_shift = page_size_granule * 2 + 12; - - if (page_size_granule == 0) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n", - page_size_granule); - return 0; +static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) +{ + /* + * Note that the TLBI range TG field encoding differs from both + * TG0 and TG1 encodings. + */ + switch (tg) { + case 1: + return Gran4K; + case 2: + return Gran16K; + case 3: + return Gran64K; + default: + return GranInvalid; } - - exponent = (5 * scale) + 1; - length = (num + 1) << (exponent + page_shift); - - return length; } -static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value, - bool two_ranges) +static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, + uint64_t value) { - /* TODO: ARMv8.7 FEAT_LPA2 */ - uint64_t pageaddr; + unsigned int page_size_granule, page_shift, num, scale, exponent; + /* Extract one bit to represent the va selector in use. */ + uint64_t select = sextract64(value, 36, 1); + ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true); + TLBIRange ret = { }; + ARMGranuleSize gran; - if (two_ranges) { - pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS; - } else { - pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS; + page_size_granule = extract64(value, 46, 2); + gran = tlbi_range_tg_to_gran_size(page_size_granule); + + /* The granule encoded in value must match the granule in use. */ + if (gran != param.gran) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", + page_size_granule); + return ret; } - return pageaddr; + page_shift = arm_granule_bits(gran); + num = extract64(value, 39, 5); + scale = extract64(value, 44, 2); + exponent = (5 * scale) + 1; + + ret.length = (num + 1) << (exponent + page_shift); + + if (param.select) { + ret.base = sextract64(value, 0, 37); + } else { + ret.base = extract64(value, 0, 37); + } + if (param.ds) { + /* + * With DS=1, BaseADDR is always shifted 16 so that it is able + * to address all 52 va bits. The input address is perforce + * aligned on a 64k boundary regardless of translation granule. + */ + page_shift = 16; + } + ret.base <<= page_shift; + + return ret; } static void do_rvae_write(CPUARMState *env, uint64_t value, int idxmap, bool synced) { ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); - bool two_ranges = regime_has_2_ranges(one_idx); - uint64_t baseaddr, length; + TLBIRange range; int bits; - baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges); - length = tlbi_aa64_range_get_length(env, value); - bits = tlbbits_for_regime(env, one_idx, baseaddr); + range = tlbi_aa64_get_range(env, one_idx, value); + bits = tlbbits_for_regime(env, one_idx, range.base); if (synced) { tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), - baseaddr, - length, + range.base, + range.length, idxmap, bits); } else { - tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr, - length, idxmap, bits); + tlb_flush_range_by_mmuidx(env_cpu(env), range.base, + range.length, idxmap, bits); } } @@ -4862,8 +4645,7 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env, static int vae2_tlbmask(CPUARMState *env) { - return (arm_is_secure_below_el3(env) - ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2); + return ARMMMUIdxBit_E2; } static void tlbi_aa64_rvae2_write(CPUARMState *env, @@ -4909,8 +4691,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env, * flush-last-level-only. */ - do_rvae_write(env, value, ARMMMUIdxBit_SE3, - tlb_force_broadcast(env)); + do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); } static void tlbi_aa64_rvae3is_write(CPUARMState *env, @@ -4924,7 +4705,21 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env, * flush-last-level-only or inner/outer specific flushes. */ - do_rvae_write(env, value, ARMMMUIdxBit_SE3, true); + do_rvae_write(env, value, ARMMMUIdxBit_E3, true); +} + +static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + do_rvae_write(env, value, ipas2e1_tlbmask(env, value), + tlb_force_broadcast(env)); +} + +static void tlbi_aa64_ripas2e1is_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true); } #endif @@ -5034,22 +4829,49 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, } } -static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { - return CP_ACCESS_TRAP_FP_EL2; + /* + * Some MDCR_EL3 bits affect whether PMU counters are running: + * if we are trying to change any of those then we must + * bracket this update with PMU start/finish calls. + */ + bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; + + if (pmu_op) { + pmu_op_start(env); } - if (env->cp15.cptr_el[3] & CPTR_TFP) { - return CP_ACCESS_TRAP_FP_EL3; + env->cp15.mdcr_el3 = value; + if (pmu_op) { + pmu_op_finish(env); } - return CP_ACCESS_OK; } static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; + /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */ + mdcr_el3_write(env, ri, value & SDCR_VALID_MASK); +} + +static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Some MDCR_EL2 bits affect whether PMU counters are running: + * if we are trying to change any of those then we must + * bracket this update with PMU start/finish calls. + */ + bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; + + if (pmu_op) { + pmu_op_start(env); + } + env->cp15.mdcr_el2 = value; + if (pmu_op) { + pmu_op_finish(env); + } } static const ARMCPRegInfo v8_cp_reginfo[] = { @@ -5177,10 +4999,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = tlbi_aa64_vae1_write }, { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1is_write }, { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1is_write }, { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, @@ -5191,10 +5015,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = tlbi_aa64_alle1is_write }, { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1_write }, { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1_write }, { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, @@ -5275,16 +5101,20 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = tlbimva_hyp_is_write }, { .name = "TLBIIPAS2", .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .type = ARM_CP_NOP, .access = PL2_W }, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_hyp_write }, { .name = "TLBIIPAS2IS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .type = ARM_CP_NOP, .access = PL2_W }, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2is_hyp_write }, { .name = "TLBIIPAS2L", .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .type = ARM_CP_NOP, .access = PL2_W }, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_hyp_write }, { .name = "TLBIIPAS2LIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .type = ARM_CP_NOP, .access = PL2_W }, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2is_hyp_write }, /* 32 bit cache operations */ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, @@ -5339,7 +5169,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_ALIAS, + .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP, .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, { .name = "SPSel", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, @@ -5347,17 +5177,17 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, - .type = ARM_CP_ALIAS, - .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), - .access = PL2_RW, .accessfn = fpexc32_access }, + .access = PL2_RW, + .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, - .access = PL2_RW, .resetvalue = 0, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, .writefn = dacr_write, .raw_writefn = raw_write, .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, - .access = PL2_RW, .resetvalue = 0, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, @@ -5380,135 +5210,17 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_IO, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, .resetvalue = 0, - .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, - { .name = "SDCR", .type = ARM_CP_ALIAS, + .access = PL3_RW, + .writefn = mdcr_el3_write, + .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, + { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO, .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .writefn = sdcr_write, .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, - REGINFO_SENTINEL -}; - -/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ -static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { - { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, - .access = PL2_RW, - .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, - { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, - .access = PL2_RW, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, - .access = PL2_RW, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, - .access = PL2_RW, .accessfn = access_el3_aa32ns, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "VTTBR", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 6, .crm = 2, - .access = PL2_RW, .accessfn = access_el3_aa32ns, - .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, - .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, - .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, - .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, - .access = PL2_RW, .accessfn = access_el3_aa32ns, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "HIFAR", .state = ARM_CP_STATE_AA32, - .type = ARM_CP_CONST, - .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, - .access = PL2_RW, .resetvalue = 0 }, - REGINFO_SENTINEL -}; - -/* Ditto, but for registers which exist in ARMv8 but not v7 */ -static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { - { .name = "HCR2", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, - .access = PL2_RW, - .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) @@ -5538,6 +5250,9 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) if (cpu_isar_feature(aa64_vh, cpu)) { valid_mask |= HCR_E2H; } + if (cpu_isar_feature(aa64_ras, cpu)) { + valid_mask |= HCR_TERR | HCR_TEA; + } if (cpu_isar_feature(aa64_lor, cpu)) { valid_mask |= HCR_TLOR; } @@ -5547,6 +5262,12 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) if (cpu_isar_feature(aa64_mte, cpu)) { valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5; } + if (cpu_isar_feature(aa64_scxtnum, cpu)) { + valid_mask |= HCR_ENSCXT; + } + if (cpu_isar_feature(aa64_fwb, cpu)) { + valid_mask |= HCR_FWB; + } } /* Clear RES0 bits. */ @@ -5558,8 +5279,10 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * HCR_PTW forbids certain page-table setups * HCR_DC disables stage1 and enables stage2 translation * HCR_DCT enables tagging on (disabled) stage1 translation + * HCR_FWB changes the interpretation of stage2 descriptor bits */ - if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) { + if ((env->cp15.hcr_el2 ^ value) & + (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) { tlb_flush(CPU(cpu)); } env->cp15.hcr_el2 = value; @@ -5578,6 +5301,7 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) g_assert(qemu_mutex_iothread_locked()); arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); + arm_cpu_update_vserr(cpu); } static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) @@ -5602,15 +5326,15 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, } /* - * Return the effective value of HCR_EL2. + * Return the effective value of HCR_EL2, at the given security state. * Bits that are not included here: * RW (read from SCR_EL3.RW as needed) */ -uint64_t arm_hcr_el2_eff(CPUARMState *env) +uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure) { uint64_t ret = env->cp15.hcr_el2; - if (!arm_is_el2_enabled(env)) { + if (!arm_is_el2_enabled_secstate(env, secure)) { /* * "This register has no effect if EL2 is not enabled in the * current Security state". This is ARMv8.4-SecEL2 speak for @@ -5669,6 +5393,85 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env) return ret; } +uint64_t arm_hcr_el2_eff(CPUARMState *env) +{ + return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env)); +} + +/* + * Corresponds to ARM pseudocode function ELIsInHost(). + */ +bool el_is_in_host(CPUARMState *env, int el) +{ + uint64_t mask; + + /* + * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff(). + * Perform the simplest bit tests first, and validate EL2 afterward. + */ + if (el & 1) { + return false; /* EL1 or EL3 */ + } + + /* + * Note that hcr_write() checks isar_feature_aa64_vh(), + * aka HaveVirtHostExt(), in allowing HCR_E2H to be set. + */ + mask = el ? HCR_E2H : HCR_E2H | HCR_TGE; + if ((env->cp15.hcr_el2 & mask) != mask) { + return false; + } + + /* TGE and/or E2H set: double check those bits are currently legal. */ + return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2); +} + +static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t valid_mask = 0; + + /* No features adding bits to HCRX are implemented. */ + + /* Clear RES0 bits. */ + env->cp15.hcrx_el2 = value & valid_mask; +} + +static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_HXEN)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo hcrx_el2_reginfo = { + .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, + .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, + .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2), +}; + +/* Return the effective value of HCRX_EL2. */ +uint64_t arm_hcrx_el2_eff(CPUARMState *env) +{ + /* + * The bits in this register behave as 0 for all purposes other than + * direct reads of the register if: + * - EL2 is not enabled in the current security state, + * - SCR_EL3.HXEn is 0. + */ + if (!arm_is_el2_enabled(env) + || (arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_HXEN))) { + return 0; + } + return env->cp15.hcrx_el2; +} + static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -5678,8 +5481,8 @@ static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, */ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { - value &= ~(0x3 << 10); - value |= env->cp15.cptr_el[2] & (0x3 << 10); + uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; + value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); } env->cp15.cptr_el[2] = value; } @@ -5694,7 +5497,7 @@ static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { - value |= 0x3 << 10; + value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; } return value; } @@ -5776,19 +5579,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, .access = PL2_RW, .writefn = vmsa_tcr_el12_write, - /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, { .name = "VTCR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .type = ARM_CP_ALIAS, .access = PL2_RW, .accessfn = access_el3_aa32ns, - .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, + .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) }, { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, - /* no .writefn needed as this can't cause an ASID change; - * no .raw_writefn or .resetfn needed as we never use mask/base_mask - */ + /* no .writefn needed as this can't cause an ASID change */ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 6, .crm = 2, @@ -5837,27 +5637,27 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .writefn = tlbimva_hyp_is_write }, { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL2_W, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_alle2_write }, { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_vae2_write }, { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_vae2_write }, { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_alle2is_write }, { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_vae2is_write }, #ifndef CONFIG_USER_ONLY /* Unlike the other EL2-related AT operations, these must @@ -5867,11 +5667,13 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, .access = PL2_W, .accessfn = at_s1e2_access, - .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = ats_write64 }, { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, .access = PL2_W, .accessfn = at_s1e2_access, - .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = ats_write64 }, /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose @@ -5922,13 +5724,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .resetvalue = 0, .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, #endif - /* The only field of MDCR_EL2 that has a defined architectural reset value - * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. - */ - { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, - .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS, - .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, { .name = "HPFAR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, .access = PL2_RW, .accessfn = access_el3_aa32ns, @@ -5941,7 +5736,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, - REGINFO_SENTINEL }; static const ARMCPRegInfo el2_v8_cp_reginfo[] = { @@ -5951,7 +5745,6 @@ static const ARMCPRegInfo el2_v8_cp_reginfo[] = { .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), .writefn = hcr_writehigh }, - REGINFO_SENTINEL }; static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -5972,7 +5765,6 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, .access = PL2_RW, .accessfn = sel2_access, .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, - REGINFO_SENTINEL }; static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -6026,12 +5818,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, .access = PL3_RW, - /* no .writefn needed as this can't cause an ASID change; - * we must provide a .raw_writefn and .resetfn because we handle - * reset and migration for the AArch32 TTBCR(S), which might be - * using mask and base_mask. - */ - .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, + /* no .writefn needed as this can't cause an ASID change */ + .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, @@ -6098,7 +5886,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae3_write }, - REGINFO_SENTINEL }; #ifndef CONFIG_USER_ONLY @@ -6195,10 +5982,16 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) */ { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, + { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6), + "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme }, { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, + { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7), + "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12", + isar_feature_aa64_scxtnum }, + /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ }; @@ -6208,14 +6001,17 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) for (i = 0; i < ARRAY_SIZE(aliases); i++) { const struct E2HAlias *a = &aliases[i]; - ARMCPRegInfo *src_reg, *dst_reg; + ARMCPRegInfo *src_reg, *dst_reg, *new_reg; + bool ok; if (a->feature && !a->feature(&cpu->isar)) { continue; } - src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); - dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); + src_reg = g_hash_table_lookup(cpu->cp_regs, + (gpointer)(uintptr_t)a->src_key); + dst_reg = g_hash_table_lookup(cpu->cp_regs, + (gpointer)(uintptr_t)a->dst_key); g_assert(src_reg != NULL); g_assert(dst_reg != NULL); @@ -6227,19 +6023,16 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) g_assert(src_reg->opaque == NULL); /* Create alias before redirection so we dup the right data. */ - if (a->new_key) { - ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); - uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); - bool ok; + new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); - new_reg->name = a->new_name; - new_reg->type |= ARM_CP_ALIAS; - /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ - new_reg->access &= PL2_RW | PL3_RW; + new_reg->name = a->new_name; + new_reg->type |= ARM_CP_ALIAS; + /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ + new_reg->access &= PL2_RW | PL3_RW; - ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); - g_assert(ok); - } + ok = g_hash_table_insert(cpu->cp_regs, + (gpointer)(uintptr_t)a->new_key, new_reg); + g_assert(ok); src_reg->opaque = dst_reg; src_reg->orig_readfn = src_reg->readfn ?: raw_read; @@ -6289,214 +6082,262 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +/* + * Check for traps to RAS registers, which are controlled + * by HCR_EL2.TERR and SCR_EL3.TERR. + */ +static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - /* Writes to OSLAR_EL1 may update the OS lock status, which can be - * read via a bit in OSLSR_EL1. - */ - int oslock; + int el = arm_current_el(env); - if (ri->state == ARM_CP_STATE_AA32) { - oslock = (value == 0xC5ACCE55); - } else { - oslock = value & 1; + if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) { + return CP_ACCESS_TRAP_EL2; } - - env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); + if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; } -static const ARMCPRegInfo debug_cp_reginfo[] = { - /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped - * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; - * unlike DBGDRAR it is never accessible from EL0. - * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 - * accessor. - */ - { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tdra, +static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + int el = arm_current_el(env); + + if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { + return env->cp15.vdisr_el2; + } + if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { + return 0; /* RAZ/WI */ + } + return env->cp15.disr_el1; +} + +static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) +{ + int el = arm_current_el(env); + + if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { + env->cp15.vdisr_el2 = val; + return; + } + if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { + return; /* RAZ/WI */ + } + env->cp15.disr_el1 = val; +} + +/* + * Minimal RAS implementation with no Error Records. + * Which means that all of the Error Record registers: + * ERXADDR_EL1 + * ERXCTLR_EL1 + * ERXFR_EL1 + * ERXMISC0_EL1 + * ERXMISC1_EL1 + * ERXMISC2_EL1 + * ERXMISC3_EL1 + * ERXPFGCDN_EL1 (RASv1p1) + * ERXPFGCTL_EL1 (RASv1p1) + * ERXPFGF_EL1 (RASv1p1) + * ERXSTATUS_EL1 + * and + * ERRSELR_EL1 + * may generate UNDEFINED, which is the effect we get by not + * listing them at all. + */ +static const ARMCPRegInfo minimal_ras_reginfo[] = { + { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1), + .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write }, + { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0, + .access = PL1_R, .accessfn = access_terr, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, - .access = PL1_R, .accessfn = access_tdra, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tdra, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ - { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), - .resetvalue = 0 }, - /* - * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external - * Debug Communication Channel is not implemented. - */ - { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, - .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* - * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as - * it is unlikely a guest will care. - * We don't implement the configurable EL0 access. - */ - { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, - .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, - .type = ARM_CP_ALIAS, - .access = PL1_R, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, - { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, - .access = PL1_W, .type = ARM_CP_NO_RAW, - .accessfn = access_tdosa, - .writefn = oslar_write }, - { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, - .access = PL1_R, .resetvalue = 10, - .accessfn = access_tdosa, - .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, - /* Dummy OSDLR_EL1: 32-bit Linux will read this */ - { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, - .access = PL1_RW, .accessfn = access_tdosa, - .type = ARM_CP_NOP }, - /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't - * implement vector catch debug events yet. - */ - { .name = "DBGVCR", - .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, - /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor - * to save and restore a 32-bit guest's DBGVCR) - */ - { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, - /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications - * Channel but Linux may try to access this register. The 32-bit - * alias is DBGDCCINT. - */ - { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, - REGINFO_SENTINEL + { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) }, + { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) }, }; -static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { - /* 64 bit access versions of the (dummy) debug registers */ - { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, - .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, - .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, - REGINFO_SENTINEL -}; - -/* Return the exception level to which exceptions should be taken - * via SVEAccessTrap. If an exception should be routed through - * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should - * take care of raising that exception. - * C.f. the ARM pseudocode function CheckSVEEnabled. +/* + * Return the exception level to which exceptions should be taken + * via SVEAccessTrap. This excludes the check for whether the exception + * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily + * be found by testing 0 < fp_exception_el < sve_exception_el. + * + * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the + * pseudocode does *not* separate out the FP trap checks, but has them + * all in one function. */ int sve_exception_el(CPUARMState *env, int el) { #ifndef CONFIG_USER_ONLY - uint64_t hcr_el2 = arm_hcr_el2_eff(env); - - if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { - bool disabled = false; - - /* The CPACR.ZEN controls traps to EL1: - * 0, 2 : trap EL0 and EL1 accesses - * 1 : trap only EL0 accesses - * 3 : trap no accesses - */ - if (!extract32(env->cp15.cpacr_el1, 16, 1)) { - disabled = true; - } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { - disabled = el == 0; - } - if (disabled) { - /* route_to_el2 */ - return hcr_el2 & HCR_TGE ? 2 : 1; - } - - /* Check CPACR.FPEN. */ - if (!extract32(env->cp15.cpacr_el1, 20, 1)) { - disabled = true; - } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { - disabled = el == 0; - } - if (disabled) { - return 0; + if (el <= 1 && !el_is_in_host(env, el)) { + switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { + case 1: + if (el != 0) { + break; + } + /* fall through */ + case 0: + case 2: + return 1; } } - /* CPTR_EL2. Since TZ and TFP are positive, - * they will be zero when EL2 is not present. - */ if (el <= 2 && arm_is_el2_enabled(env)) { - if (env->cp15.cptr_el[2] & CPTR_TZ) { - return 2; - } - if (env->cp15.cptr_el[2] & CPTR_TFP) { - return 0; + /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ + if (env->cp15.hcr_el2 & HCR_E2H) { + switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { + case 1: + if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { + break; + } + /* fall through */ + case 0: + case 2: + return 2; + } + } else { + if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { + return 2; + } } } /* CPTR_EL3. Since EZ is negative we must check for EL3. */ if (arm_feature(env, ARM_FEATURE_EL3) - && !(env->cp15.cptr_el[3] & CPTR_EZ)) { + && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { return 3; } #endif return 0; } -uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) +/* + * Return the exception level to which exceptions should be taken for SME. + * C.f. the ARM pseudocode function CheckSMEAccess. + */ +int sme_exception_el(CPUARMState *env, int el) { - uint32_t end_len; - - start_len = MIN(start_len, ARM_MAX_VQ - 1); - end_len = start_len; - - if (!test_bit(start_len, cpu->sve_vq_map)) { - end_len = find_last_bit(cpu->sve_vq_map, start_len); - assert(end_len < start_len); +#ifndef CONFIG_USER_ONLY + if (el <= 1 && !el_is_in_host(env, el)) { + switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { + case 1: + if (el != 0) { + break; + } + /* fall through */ + case 0: + case 2: + return 1; + } } - return end_len; + + if (el <= 2 && arm_is_el2_enabled(env)) { + /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ + if (env->cp15.hcr_el2 & HCR_E2H) { + switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { + case 1: + if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { + break; + } + /* fall through */ + case 0: + case 2: + return 2; + } + } else { + if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { + return 2; + } + } + } + + /* CPTR_EL3. Since ESM is negative we must check for EL3. */ + if (arm_feature(env, ARM_FEATURE_EL3) + && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { + return 3; + } +#endif + return 0; +} + +/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */ +static bool sme_fa64(CPUARMState *env, int el) +{ + if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) { + return false; + } + + if (el <= 1 && !el_is_in_host(env, el)) { + if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) { + return false; + } + } + if (el <= 2 && arm_is_el2_enabled(env)) { + if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) { + return false; + } + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) { + return false; + } + } + + return true; } /* * Given that SVE is enabled, return the vector length for EL. */ -uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) +uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm) { ARMCPU *cpu = env_archcpu(env); - uint32_t zcr_len = cpu->sve_max_vq - 1; + uint64_t *cr = env->vfp.zcr_el; + uint32_t map = cpu->sve_vq.map; + uint32_t len = ARM_MAX_VQ - 1; - if (el <= 1) { - zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); + if (sm) { + cr = env->vfp.smcr_el; + map = cpu->sme_vq.map; + } + + if (el <= 1 && !el_is_in_host(env, el)) { + len = MIN(len, 0xf & (uint32_t)cr[1]); } if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { - zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); + len = MIN(len, 0xf & (uint32_t)cr[2]); } if (arm_feature(env, ARM_FEATURE_EL3)) { - zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); + len = MIN(len, 0xf & (uint32_t)cr[3]); } - return aarch64_sve_zcr_get_valid_len(cpu, zcr_len); + map &= MAKE_64BIT_MASK(0, len + 1); + if (map != 0) { + return 31 - clz32(map); + } + + /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ + assert(sm); + return ctz32(cpu->sme_vq.map); +} + +uint32_t sve_vqm1_for_el(CPUARMState *env, int el) +{ + return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); } static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int cur_el = arm_current_el(env); - int old_len = sve_zcr_len_for_el(env, cur_el); + int old_len = sve_vqm1_for_el(env, cur_el); int new_len; /* Bits other than [3:0] are RAZ/WI. */ @@ -6507,362 +6348,144 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, * Because we arrived here, we know both FP and SVE are enabled; * otherwise we would have trapped access to the ZCR_ELn register. */ - new_len = sve_zcr_len_for_el(env, cur_el); + new_len = sve_vqm1_for_el(env, cur_el); if (new_len < old_len) { aarch64_sve_narrow_vq(env, new_len + 1); } } -static const ARMCPRegInfo zcr_el1_reginfo = { - .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_SVE, - .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), - .writefn = zcr_write, .raw_writefn = raw_write +static const ARMCPRegInfo zcr_reginfo[] = { + { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), + .writefn = zcr_write, .raw_writefn = raw_write }, + { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), + .writefn = zcr_write, .raw_writefn = raw_write }, + { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), + .writefn = zcr_write, .raw_writefn = raw_write }, }; -static const ARMCPRegInfo zcr_el2_reginfo = { - .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_SVE, - .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), - .writefn = zcr_write, .raw_writefn = raw_write -}; - -static const ARMCPRegInfo zcr_no_el2_reginfo = { - .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, - .access = PL2_RW, .type = ARM_CP_SVE, - .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore -}; - -static const ARMCPRegInfo zcr_el3_reginfo = { - .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, - .access = PL3_RW, .type = ARM_CP_SVE, - .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), - .writefn = zcr_write, .raw_writefn = raw_write -}; - -void hw_watchpoint_update(ARMCPU *cpu, int n) +#ifdef TARGET_AARCH64 +static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - CPUARMState *env = &cpu->env; - vaddr len = 0; - vaddr wvr = env->cp15.dbgwvr[n]; - uint64_t wcr = env->cp15.dbgwcr[n]; - int mask; - int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + int el = arm_current_el(env); - if (env->cpu_watchpoint[n]) { - cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); - env->cpu_watchpoint[n] = NULL; - } - - if (!extract64(wcr, 0, 1)) { - /* E bit clear : watchpoint disabled */ - return; - } - - switch (extract64(wcr, 3, 2)) { - case 0: - /* LSC 00 is reserved and must behave as if the wp is disabled */ - return; - case 1: - flags |= BP_MEM_READ; - break; - case 2: - flags |= BP_MEM_WRITE; - break; - case 3: - flags |= BP_MEM_ACCESS; - break; - } - - /* Attempts to use both MASK and BAS fields simultaneously are - * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, - * thus generating a watchpoint for every byte in the masked region. - */ - mask = extract64(wcr, 24, 4); - if (mask == 1 || mask == 2) { - /* Reserved values of MASK; we must act as if the mask value was - * some non-reserved value, or as if the watchpoint were disabled. - * We choose the latter. - */ - return; - } else if (mask) { - /* Watchpoint covers an aligned area up to 2GB in size */ - len = 1ULL << mask; - /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE - * whether the watchpoint fires when the unmasked bits match; we opt - * to generate the exceptions. - */ - wvr &= ~(len - 1); - } else { - /* Watchpoint covers bytes defined by the byte address select bits */ - int bas = extract64(wcr, 5, 8); - int basstart; - - if (extract64(wvr, 2, 1)) { - /* Deprecated case of an only 4-aligned address. BAS[7:4] are - * ignored, and BAS[3:0] define which bytes to watch. - */ - bas &= 0xf; + if (el == 0) { + uint64_t sctlr = arm_sctlr(env, el); + if (!(sctlr & SCTLR_EnTP2)) { + return CP_ACCESS_TRAP; } - - if (bas == 0) { - /* This must act as if the watchpoint is disabled */ - return; - } - - /* The BAS bits are supposed to be programmed to indicate a contiguous - * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether - * we fire for each byte in the word/doubleword addressed by the WVR. - * We choose to ignore any non-zero bits after the first range of 1s. - */ - basstart = ctz32(bas); - len = cto32(bas >> basstart); - wvr += basstart; } - - cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, - &env->cpu_watchpoint[n]); + /* TODO: FEAT_FGT */ + if (el < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_ENTP2)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; } -void hw_watchpoint_update_all(ARMCPU *cpu) +static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU watchpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { - hw_watchpoint_update(cpu, i); + /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */ + if (arm_current_el(env) < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { + return CP_ACCESS_TRAP_EL3; } + return CP_ACCESS_OK; } -static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; + helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM)); + helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA)); + arm_rebuild_hflags(env); +} - /* Bits [63:49] are hardwired to the value of bit [48]; that is, the - * register reads and behaves as if values written are sign extended. - * Bits [1:0] are RES0. - */ - value = sextract64(value, 0, 49) & ~3ULL; +static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int cur_el = arm_current_el(env); + int old_len = sve_vqm1_for_el(env, cur_el); + int new_len; + QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1); + value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK; raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -void hw_breakpoint_update(ARMCPU *cpu, int n) -{ - CPUARMState *env = &cpu->env; - uint64_t bvr = env->cp15.dbgbvr[n]; - uint64_t bcr = env->cp15.dbgbcr[n]; - vaddr addr; - int bt; - int flags = BP_CPU; - - if (env->cpu_breakpoint[n]) { - cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); - env->cpu_breakpoint[n] = NULL; - } - - if (!extract64(bcr, 0, 1)) { - /* E bit clear : watchpoint disabled */ - return; - } - - bt = extract64(bcr, 20, 4); - - switch (bt) { - case 4: /* unlinked address mismatch (reserved if AArch64) */ - case 5: /* linked address mismatch (reserved if AArch64) */ - qemu_log_mask(LOG_UNIMP, - "arm: address mismatch breakpoint types not implemented\n"); - return; - case 0: /* unlinked address match */ - case 1: /* linked address match */ - { - /* Bits [63:49] are hardwired to the value of bit [48]; that is, - * we behave as if the register was sign extended. Bits [1:0] are - * RES0. The BAS field is used to allow setting breakpoints on 16 - * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether - * a bp will fire if the addresses covered by the bp and the addresses - * covered by the insn overlap but the insn doesn't start at the - * start of the bp address range. We choose to require the insn and - * the bp to have the same address. The constraints on writing to - * BAS enforced in dbgbcr_write mean we have only four cases: - * 0b0000 => no breakpoint - * 0b0011 => breakpoint on addr - * 0b1100 => breakpoint on addr + 2 - * 0b1111 => breakpoint on addr - * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). - */ - int bas = extract64(bcr, 5, 4); - addr = sextract64(bvr, 0, 49) & ~3ULL; - if (bas == 0) { - return; - } - if (bas == 0xc) { - addr += 2; - } - break; - } - case 2: /* unlinked context ID match */ - case 8: /* unlinked VMID match (reserved if no EL2) */ - case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ - qemu_log_mask(LOG_UNIMP, - "arm: unlinked context breakpoint types not implemented\n"); - return; - case 9: /* linked VMID match (reserved if no EL2) */ - case 11: /* linked context ID and VMID match (reserved if no EL2) */ - case 3: /* linked context ID match */ - default: - /* We must generate no events for Linked context matches (unless - * they are linked to by some other bp/wp, which is handled in - * updates for the linking bp/wp). We choose to also generate no events - * for reserved values. - */ - return; - } - - cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); -} - -void hw_breakpoint_update_all(ARMCPU *cpu) -{ - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU breakpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { - hw_breakpoint_update(cpu, i); - } -} - -static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only - * copy of BAS[0]. - */ - value = deposit64(value, 6, 1, extract64(value, 5, 1)); - value = deposit64(value, 8, 1, extract64(value, 7, 1)); - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void define_debug_regs(ARMCPU *cpu) -{ - /* Define v7 and v8 architectural debug registers. - * These are just dummy implementations for now. - */ - int i; - int wrps, brps, ctx_cmps; /* - * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot - * use AArch32. Given that bit 15 is RES1, if the value is 0 then - * the register must not exist for this cpu. + * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage + * when SVL is widened (old values kept, or zeros). Choose to keep the + * current values for simplicity. But for QEMU internals, we must still + * apply the narrower SVL to the Zregs and Pregs -- see the comment + * above aarch64_sve_narrow_vq. */ - if (cpu->isar.dbgdidr != 0) { - ARMCPRegInfo dbgdidr = { - .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, - .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, - }; - define_one_arm_cp_reg(cpu, &dbgdidr); - } - - /* Note that all these register fields hold "number of Xs minus 1". */ - brps = arm_num_brps(cpu); - wrps = arm_num_wrps(cpu); - ctx_cmps = arm_num_ctx_cmps(cpu); - - assert(ctx_cmps <= brps); - - define_arm_cp_regs(cpu, debug_cp_reginfo); - - if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { - define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); - } - - for (i = 0; i < brps; i++) { - ARMCPRegInfo dbgregs[] = { - { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), - .writefn = dbgbvr_write, .raw_writefn = raw_write - }, - { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), - .writefn = dbgbcr_write, .raw_writefn = raw_write - }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, dbgregs); - } - - for (i = 0; i < wrps; i++) { - ARMCPRegInfo dbgregs[] = { - { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), - .writefn = dbgwvr_write, .raw_writefn = raw_write - }, - { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), - .writefn = dbgwcr_write, .raw_writefn = raw_write - }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, dbgregs); + new_len = sve_vqm1_for_el(env, cur_el); + if (new_len < old_len) { + aarch64_sve_narrow_vq(env, new_len + 1); } } +static const ARMCPRegInfo sme_reginfo[] = { + { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5, + .access = PL0_RW, .accessfn = access_tpidr2, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) }, + { .name = "SVCR", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_SME, + .fieldoffset = offsetof(CPUARMState, svcr), + .writefn = svcr_write, .raw_writefn = raw_write }, + { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, + .access = PL1_RW, .type = ARM_CP_SME, + .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), + .writefn = smcr_write, .raw_writefn = raw_write }, + { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6, + .access = PL2_RW, .type = ARM_CP_SME, + .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]), + .writefn = smcr_write, .raw_writefn = raw_write }, + { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6, + .access = PL3_RW, .type = ARM_CP_SME, + .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]), + .writefn = smcr_write, .raw_writefn = raw_write }, + { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6, + .access = PL1_R, .accessfn = access_aa64_tid1, + /* + * IMPLEMENTOR = 0 (software) + * REVISION = 0 (implementation defined) + * SMPS = 0 (no streaming execution priority in QEMU) + * AFFINITY = 0 (streaming sve mode not shared with other PEs) + */ + .type = ARM_CP_CONST, .resetvalue = 0, }, + /* + * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0. + */ + { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4, + .access = PL1_RW, .accessfn = access_esm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5, + .access = PL2_RW, .accessfn = access_esm, + .type = ARM_CP_CONST, .resetvalue = 0 }, +}; +#endif /* TARGET_AARCH64 */ + static void define_pmu_regs(ARMCPU *cpu) { /* @@ -6870,7 +6493,7 @@ static void define_pmu_regs(ARMCPU *cpu) * field as main ID register, and we implement four counters in * addition to the cycle count register. */ - unsigned int i, pmcrn = PMCR_NUM_COUNTERS; + unsigned int i, pmcrn = pmu_num_counters(&cpu->env); ARMCPRegInfo pmcr = { .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .access = PL0_RW, @@ -6885,10 +6508,10 @@ static void define_pmu_regs(ARMCPU *cpu) .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), - .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | - PMCRLC, + .resetvalue = cpu->isar.reset_pmcr_el0, .writefn = pmcr_write, .raw_writefn = raw_write, }; + define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr64); for (i = 0; i < pmcrn; i++) { @@ -6901,10 +6524,10 @@ static void define_pmu_regs(ARMCPU *cpu) .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, - .accessfn = pmreg_access }, + .accessfn = pmreg_access_xevcntr }, { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), - .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, .type = ARM_CP_IO, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, .raw_readfn = pmevcntr_rawread, @@ -6920,7 +6543,6 @@ static void define_pmu_regs(ARMCPU *cpu) .type = ARM_CP_IO, .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, .raw_writefn = pmevtyper_rawwrite }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, pmev_regs); g_free(pmevcntr_name); @@ -6928,7 +6550,7 @@ static void define_pmu_regs(ARMCPU *cpu) g_free(pmevtyper_name); g_free(pmevtyper_el0_name); } - if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { + if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { ARMCPRegInfo v81_pmu_regs[] = { { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, @@ -6938,11 +6560,10 @@ static void define_pmu_regs(ARMCPU *cpu) .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid1, 32, 32) }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v81_pmu_regs); } - if (cpu_isar_feature(any_pmu_8_4, cpu)) { + if (cpu_isar_feature(any_pmuv3p4, cpu)) { static const ARMCPRegInfo v84_pmmir = { .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, @@ -7035,7 +6656,6 @@ static const ARMCPRegInfo lor_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, .access = PL1_R, .accessfn = access_lor_ns, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; #ifdef TARGET_AARCH64 @@ -7045,7 +6665,7 @@ static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, int el = arm_current_el(env); if (el < 2 && - arm_feature(env, ARM_FEATURE_EL2) && + arm_is_el2_enabled(env) && !(arm_hcr_el2_eff(env) & HCR_APK)) { return CP_ACCESS_TRAP_EL2; } @@ -7098,93 +6718,96 @@ static const ARMCPRegInfo pauth_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, - REGINFO_SENTINEL }; static const ARMCPRegInfo tlbirange_reginfo[] = { { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1_write }, { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1_write }, { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1_write }, { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1_write }, { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ripas2e1is_write }, { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ripas2e1is_write }, { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2is_write }, { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2is_write }, { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ripas2e1_write }, + { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NOP }, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ripas2e1_write }, { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2is_write }, { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2is_write }, { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2_write }, { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_rvae2_write }, { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, @@ -7210,26 +6833,49 @@ static const ARMCPRegInfo tlbirange_reginfo[] = { .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae3_write }, - REGINFO_SENTINEL }; static const ARMCPRegInfo tlbios_reginfo[] = { { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, - .access = PL1_W, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_alle2is_write }, + { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, .access = PL2_W, .type = ARM_CP_NO_RAW, @@ -7250,7 +6896,14 @@ static const ARMCPRegInfo tlbios_reginfo[] = { .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle3is_write }, - REGINFO_SENTINEL + { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, }; static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) @@ -7289,7 +6942,6 @@ static const ARMCPRegInfo rndr_reginfo[] = { .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, .access = PL0_R, .readfn = rndr_readfn }, - REGINFO_SENTINEL }; #ifndef CONFIG_USER_ONLY @@ -7325,7 +6977,6 @@ static const ARMCPRegInfo dcpop_reg[] = { .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, - REGINFO_SENTINEL }; static const ARMCPRegInfo dcpodp_reg[] = { @@ -7333,7 +6984,6 @@ static const ARMCPRegInfo dcpodp_reg[] = { .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, - REGINFO_SENTINEL }; #endif /*CONFIG_USER_ONLY*/ @@ -7352,7 +7002,7 @@ static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, { int el = arm_current_el(env); - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { return CP_ACCESS_TRAP_EL2; @@ -7435,14 +7085,12 @@ static const ARMCPRegInfo mte_reginfo[] = { { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, - REGINFO_SENTINEL }; static const ARMCPRegInfo mte_tco_ro_reginfo[] = { { .name = "TCO", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, .type = ARM_CP_CONST, .access = PL0_RW, }, - REGINFO_SENTINEL }; static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { @@ -7494,10 +7142,54 @@ static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { .accessfn = aa64_zva_access, #endif }, - REGINFO_SENTINEL }; -#endif +static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + int el = arm_current_el(env); + + if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) { + if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { + if (hcr & HCR_TGE) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_TRAP; + } + } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_ENSCXT)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo scxtnum_reginfo[] = { + { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7, + .access = PL0_RW, .accessfn = access_scxtnum, + .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) }, + { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7, + .access = PL1_RW, .accessfn = access_scxtnum, + .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, + { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, + .access = PL2_RW, .accessfn = access_scxtnum, + .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) }, + { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7, + .access = PL3_RW, + .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) }, +}; +#endif /* TARGET_AARCH64 */ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) @@ -7540,7 +7232,6 @@ static const ARMCPRegInfo predinv_reginfo[] = { { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, - REGINFO_SENTINEL }; static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -7555,7 +7246,6 @@ static const ARMCPRegInfo ccsidr2_reginfo[] = { .access = PL1_R, .accessfn = access_aa64_tid2, .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, - REGINFO_SENTINEL }; static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, @@ -7588,6 +7278,21 @@ static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_joscr_jmcr(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + /* + * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only + * in v7A, not in v8A. + */ + if (!arm_feature(env, ARM_FEATURE_V8) && + arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && + (env->cp15.hstr_el2 & HSTR_TJDBX)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + static const ARMCPRegInfo jazelle_regs[] = { { .name = "JIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, @@ -7595,18 +7300,22 @@ static const ARMCPRegInfo jazelle_regs[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "JOSCR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, + .accessfn = access_joscr_jmcr, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "JMCR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, + .accessfn = access_joscr_jmcr, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL +}; + +static const ARMCPRegInfo contextidr_el2 = { + .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }; static const ARMCPRegInfo vhe_reginfo[] = { - { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, - .access = PL2_RW, - .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, @@ -7664,7 +7373,6 @@ static const ARMCPRegInfo vhe_reginfo[] = { .access = PL2_RW, .accessfn = e2h_access, .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, #endif - REGINFO_SENTINEL }; #ifndef CONFIG_USER_ONLY @@ -7677,7 +7385,6 @@ static const ARMCPRegInfo ats1e1_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, - REGINFO_SENTINEL }; static const ARMCPRegInfo ats1cp_reginfo[] = { @@ -7689,7 +7396,6 @@ static const ARMCPRegInfo ats1cp_reginfo[] = { .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write }, - REGINFO_SENTINEL }; #endif @@ -7711,7 +7417,6 @@ static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; void register_cp_regs_for_features(ARMCPU *cpu) @@ -7818,7 +7523,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar6 }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); define_arm_cp_regs(cpu, v6_cp_reginfo); @@ -7851,11 +7555,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, not_v7_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V8)) { - /* AArch64 ID registers, which all have impdef reset values. + /* + * v8 ID registers, which all have impdef reset values. * Note that within the ID register ranges the unused slots * must all RAZ, not UNDEF; future architecture versions may * define new registers here. + * ID registers which are AArch64 views of the AArch32 ID registers + * which already existed in v6 and v7 are handled elsewhere, + * in v6_idregs[]. */ + int i; ARMCPRegInfo v8_idregs[] = { /* * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system @@ -7895,11 +7604,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64zfr0 }, - { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, + .resetvalue = cpu->isar.id_aa64smfr0 }, { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, @@ -8045,7 +7754,34 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.mvfr2 }, - { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + /* + * "0, c0, c3, {0,1,2}" are the encodings corresponding to + * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding + * as RAZ, since it is in the "reserved for future ID + * registers, RAZ" part of the AArch32 encoding space. + */ + { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + /* + * Other encodings in "0, c0, c3, ..." are STATE_BOTH because + * they're also RAZ for AArch64, and in v8 are gradually + * being filled with AArch64-view-of-AArch32-ID-register + * for new ID registers. + */ + { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, @@ -8055,17 +7791,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_pfr2 }, - { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, - { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .resetvalue = cpu->isar.id_dfr1 }, + { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, - { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .resetvalue = cpu->isar.id_mmfr5 }, + { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, @@ -8086,10 +7822,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = cpu->pmceid1 }, - REGINFO_SENTINEL }; #ifdef CONFIG_USER_ONLY - ARMCPRegUserSpaceInfo v8_user_idregs[] = { + static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { { .name = "ID_AA64PFR0_EL1", .exported_bits = 0x000f000f00ff0000, .fixed_bits = 0x0000000000000011 }, @@ -8116,7 +7851,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .exported_bits = 0x000000f0ffffffff }, { .name = "ID_AA64ISAR*_EL1_RESERVED", .is_glob = true }, - REGUSERINFO_SENTINEL }; modify_arm_cp_regs(v8_idregs, v8_user_idregs); #endif @@ -8126,37 +7860,85 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo rvbar = { .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, - .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar + .access = PL1_R, + .fieldoffset = offsetof(CPUARMState, cp15.rvbar), }; define_one_arm_cp_reg(cpu, &rvbar); } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); + + for (i = 4; i < 16; i++) { + /* + * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. + * For pre-v8 cores there are RAZ patterns for these in + * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here. + * v8 extends the "must RAZ" part of the ID register space + * to also cover c0, 0, c{8-15}, {0-7}. + * These are STATE_AA32 because in the AArch64 sysreg space + * c4-c7 is where the AArch64 ID registers live (and we've + * already defined those in v8_idregs[]), and c8-c15 are not + * "must RAZ" for AArch64. + */ + g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i); + ARMCPRegInfo v8_aa32_raz_idregs = { + .name = name, + .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }; + define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs); + } } - if (arm_feature(env, ARM_FEATURE_EL2)) { + + /* + * Register the base EL2 cpregs. + * Pre v8, these registers are implemented only as part of the + * Virtualization Extensions (EL2 present). Beginning with v8, + * if EL2 is missing but EL3 is enabled, mostly these become + * RES0 from EL3, with some specific exceptions. + */ + if (arm_feature(env, ARM_FEATURE_EL2) + || (arm_feature(env, ARM_FEATURE_EL3) + && arm_feature(env, ARM_FEATURE_V8))) { uint64_t vmpidr_def = mpidr_read_val(env); ARMCPRegInfo vpidr_regs[] = { { .name = "VPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .accessfn = access_el3_aa32ns, - .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, + .resetvalue = cpu->midr, + .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = cpu->midr, + .type = ARM_CP_EL3_NO_EL2_C_NZ, .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .accessfn = access_el3_aa32ns, - .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, + .resetvalue = vmpidr_def, + .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, - .access = PL2_RW, - .resetvalue = vmpidr_def, + .access = PL2_RW, .resetvalue = vmpidr_def, + .type = ARM_CP_EL3_NO_EL2_C_NZ, .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, - REGINFO_SENTINEL }; + /* + * The only field of MDCR_EL2 that has a defined architectural reset + * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. + */ + ARMCPRegInfo mdcr_el2 = { + .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, + .writefn = mdcr_el2_write, + .access = PL2_RW, .resetvalue = pmu_num_counters(env), + .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), + }; + define_one_arm_cp_reg(cpu, &mdcr_el2); define_arm_cp_regs(cpu, vpidr_regs); define_arm_cp_regs(cpu, el2_cp_reginfo); if (arm_feature(env, ARM_FEATURE_V8)) { @@ -8170,51 +7952,28 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo rvbar = { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, - .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar + .access = PL2_R, + .fieldoffset = offsetof(CPUARMState, cp15.rvbar), }; define_one_arm_cp_reg(cpu, &rvbar); } - } else { - /* If EL2 is missing but higher ELs are enabled, we need to - * register the no_el2 reginfos. - */ - if (arm_feature(env, ARM_FEATURE_EL3)) { - /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value - * of MIDR_EL1 and MPIDR_EL1. - */ - ARMCPRegInfo vpidr_regs[] = { - { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, - .access = PL2_RW, .accessfn = access_el3_aa32ns, - .type = ARM_CP_CONST, .resetvalue = cpu->midr, - .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, - { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, - .access = PL2_RW, .accessfn = access_el3_aa32ns, - .type = ARM_CP_NO_RAW, - .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, vpidr_regs); - define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); - if (arm_feature(env, ARM_FEATURE_V8)) { - define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); - } - } } + + /* Register the base EL3 cpregs. */ if (arm_feature(env, ARM_FEATURE_EL3)) { define_arm_cp_regs(cpu, el3_cp_reginfo); ARMCPRegInfo el3_regs[] = { { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, - .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, + .access = PL3_R, + .fieldoffset = offsetof(CPUARMState, cp15.rvbar), + }, { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write, .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), .resetvalue = cpu->reset_sctlr }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, el3_regs); @@ -8229,7 +7988,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) */ if (arm_feature(env, ARM_FEATURE_EL3)) { if (arm_feature(env, ARM_FEATURE_AARCH64)) { - ARMCPRegInfo nsacr = { + static const ARMCPRegInfo nsacr = { .name = "NSACR", .type = ARM_CP_CONST, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL1_RW, .accessfn = nsacr_access, @@ -8237,7 +7996,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) }; define_one_arm_cp_reg(cpu, &nsacr); } else { - ARMCPRegInfo nsacr = { + static const ARMCPRegInfo nsacr = { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL3_RW | PL1_R, @@ -8248,7 +8007,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) } } else { if (arm_feature(env, ARM_FEATURE_V8)) { - ARMCPRegInfo nsacr = { + static const ARMCPRegInfo nsacr = { .name = "NSACR", .type = ARM_CP_CONST, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL1_R, @@ -8349,7 +8108,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; ARMCPRegInfo id_v8_midr_cp_reginfo[] = { { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, @@ -8369,7 +8127,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .accessfn = access_aa64_tid1, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, - REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ @@ -8387,7 +8144,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .accessfn = access_aa32_tid1, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; /* TLBTR is specific to VMSA */ ARMCPRegInfo id_tlbtr_reginfo = { @@ -8404,35 +8160,33 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->pmsav7_dregion << 8 }; - ARMCPRegInfo crn0_wi_reginfo = { + static const ARMCPRegInfo crn0_wi_reginfo = { .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_OVERRIDE }; #ifdef CONFIG_USER_ONLY - ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { + static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { { .name = "MIDR_EL1", .exported_bits = 0x00000000ffffffff }, { .name = "REVIDR_EL1" }, - REGUSERINFO_SENTINEL }; modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); #endif if (arm_feature(env, ARM_FEATURE_OMAPCP) || arm_feature(env, ARM_FEATURE_STRONGARM)) { - ARMCPRegInfo *r; + size_t i; /* Register the blanket "writes ignored" value first to cover the * whole space. Then update the specific ID registers to allow write * access, so that they ignore writes rather than causing them to * UNDEF. */ define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); - for (r = id_pre_v8_midr_cp_reginfo; - r->type != ARM_CP_SENTINEL; r++) { - r->access = PL1_RW; + for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) { + id_pre_v8_midr_cp_reginfo[i].access = PL1_RW; } - for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { - r->access = PL1_RW; + for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) { + id_cp_reginfo[i].access = PL1_RW; } id_mpuir_reginfo.access = PL1_RW; id_tlbtr_reginfo.access = PL1_RW; @@ -8455,13 +8209,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, - REGINFO_SENTINEL }; #ifdef CONFIG_USER_ONLY - ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { + static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { { .name = "MPIDR_EL1", .fixed_bits = 0x0000000080000000 }, - REGUSERINFO_SENTINEL }; modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); #endif @@ -8482,7 +8234,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, auxcr_reginfo); if (cpu_isar_feature(aa32_ac2, cpu)) { @@ -8517,7 +8268,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .type = ARM_CP_CONST, .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, .access = PL1_R, .resetvalue = cpu->reset_cbar }, - REGINFO_SENTINEL }; /* We don't implement a r/w 64 bit CBAR currently */ assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); @@ -8540,14 +8290,13 @@ void register_cp_regs_for_features(ARMCPU *cpu) } if (arm_feature(env, ARM_FEATURE_VBAR)) { - ARMCPRegInfo vbar_cp_reginfo[] = { + static const ARMCPRegInfo vbar_cp_reginfo[] = { { .name = "VBAR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .writefn = vbar_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), offsetof(CPUARMState, cp15.vbar_ns) }, .resetvalue = 0 }, - REGINFO_SENTINEL }; define_arm_cp_regs(cpu, vbar_cp_reginfo); } @@ -8597,24 +8346,30 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_ssbs, cpu)) { define_one_arm_cp_reg(cpu, &ssbs_reginfo); } + if (cpu_isar_feature(any_ras, cpu)) { + define_arm_cp_regs(cpu, minimal_ras_reginfo); + } + if (cpu_isar_feature(aa64_vh, cpu) || + cpu_isar_feature(aa64_debugv8p2, cpu)) { + define_one_arm_cp_reg(cpu, &contextidr_el2); + } if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { define_arm_cp_regs(cpu, vhe_reginfo); } if (cpu_isar_feature(aa64_sve, cpu)) { - define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); - if (arm_feature(env, ARM_FEATURE_EL2)) { - define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); - } else { - define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); - } - if (arm_feature(env, ARM_FEATURE_EL3)) { - define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); - } + define_arm_cp_regs(cpu, zcr_reginfo); + } + + if (cpu_isar_feature(aa64_hcx, cpu)) { + define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo); } #ifdef TARGET_AARCH64 + if (cpu_isar_feature(aa64_sme, cpu)) { + define_arm_cp_regs(cpu, sme_reginfo); + } if (cpu_isar_feature(aa64_pauth, cpu)) { define_arm_cp_regs(cpu, pauth_reginfo); } @@ -8650,6 +8405,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, mte_tco_ro_reginfo); define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); } + + if (cpu_isar_feature(aa64_scxtnum, cpu)) { + define_arm_cp_regs(cpu, scxtnum_reginfo); + } #endif if (cpu_isar_feature(any_predinv, cpu)) { @@ -8671,44 +8430,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) #endif } -void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* - * The lower part of each SVE register aliases to the FPU - * registers so we don't need to include both. - */ -#ifdef TARGET_AARCH64 - if (isar_feature_aa64_sve(&cpu->isar)) { - gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, - arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), - "sve-registers.xml", 0); - } else -#endif - { - gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, - aarch64_fpu_gdb_set_reg, - 34, "aarch64-fpu.xml", 0); - } - } else if (arm_feature(env, ARM_FEATURE_NEON)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 51, "arm-neon.xml", 0); - } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 35, "arm-vfp3.xml", 0); - } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 19, "arm-vfp.xml", 0); - } - gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, - arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), - "system-registers.xml", 0); - -} - /* Sort alphabetically by type name, except for "any". */ static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) { @@ -8730,12 +8451,17 @@ static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) static void arm_cpu_list_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; + CPUClass *cc = CPU_CLASS(oc); const char *typename; char *name; typename = object_class_get_name(oc); name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); - qemu_printf(" %s\n", name); + if (cc->deprecation_note) { + qemu_printf(" %s (deprecated)\n", name); + } else { + qemu_printf(" %s\n", name); + } g_free(name); } @@ -8778,106 +8504,186 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) return cpu_list; } +/* + * Private utility function for define_one_arm_cp_reg_with_opaque(): + * add a single reginfo struct to the hash table. + */ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, - void *opaque, int state, int secstate, + void *opaque, CPState state, + CPSecureState secstate, int crm, int opc1, int opc2, const char *name) { - /* Private utility function for define_one_arm_cp_reg_with_opaque(): - * add a single reginfo struct to the hash table. - */ - uint32_t *key = g_new(uint32_t, 1); - ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); - int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; - int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; + CPUARMState *env = &cpu->env; + uint32_t key; + ARMCPRegInfo *r2; + bool is64 = r->type & ARM_CP_64BIT; + bool ns = secstate & ARM_CP_SECSTATE_NS; + int cp = r->cp; + size_t name_len; + bool make_const; - r2->name = g_strdup(name); - /* Reset the secure state to the specific incoming state. This is - * necessary as the register may have been defined with both states. - */ - r2->secure = secstate; - - if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { - /* Register is banked (using both entries in array). - * Overwriting fieldoffset as the array is only used to define - * banked registers but later only fieldoffset is used. + switch (state) { + case ARM_CP_STATE_AA32: + /* We assume it is a cp15 register if the .cp field is left unset. */ + if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { + cp = 15; + } + key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); + break; + case ARM_CP_STATE_AA64: + /* + * To allow abbreviation of ARMCPRegInfo definitions, we treat + * cp == 0 as equivalent to the value for "standard guest-visible + * sysreg". STATE_BOTH definitions are also always "standard sysreg" + * in their AArch64 view (the .cp value may be non-zero for the + * benefit of the AArch32 view). */ - r2->fieldoffset = r->bank_fieldoffsets[ns]; + if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { + cp = CP_REG_ARM64_SYSREG_CP; + } + key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); + break; + default: + g_assert_not_reached(); } - if (state == ARM_CP_STATE_AA32) { - if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { - /* If the register is banked then we don't need to migrate or - * reset the 32-bit instance in certain cases: - * - * 1) If the register has both 32-bit and 64-bit instances then we - * can count on the 64-bit instance taking care of the - * non-secure bank. - * 2) If ARMv8 is enabled then we can count on a 64-bit version - * taking care of the secure bank. This requires that separate - * 32 and 64-bit definitions are provided. - */ - if ((r->state == ARM_CP_STATE_BOTH && ns) || - (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { - r2->type |= ARM_CP_ALIAS; - } - } else if ((secstate != r->secure) && !ns) { - /* The register is not banked so we only want to allow migration of - * the non-secure instance. - */ - r2->type |= ARM_CP_ALIAS; - } - - if (r->state == ARM_CP_STATE_BOTH) { - /* We assume it is a cp15 register if the .cp field is left unset. - */ - if (r2->cp == 0) { - r2->cp = 15; - } - -#ifdef HOST_WORDS_BIGENDIAN - if (r2->fieldoffset) { - r2->fieldoffset += sizeof(uint32_t); - } -#endif + /* Overriding of an existing definition must be explicitly requested. */ + if (!(r->type & ARM_CP_OVERRIDE)) { + const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); + if (oldreg) { + assert(oldreg->type & ARM_CP_OVERRIDE); } } - if (state == ARM_CP_STATE_AA64) { - /* To allow abbreviation of ARMCPRegInfo - * definitions, we treat cp == 0 as equivalent to - * the value for "standard guest-visible sysreg". - * STATE_BOTH definitions are also always "standard - * sysreg" in their AArch64 view (the .cp value may - * be non-zero for the benefit of the AArch32 view). + + /* + * Eliminate registers that are not present because the EL is missing. + * Doing this here makes it easier to put all registers for a given + * feature into the same ARMCPRegInfo array and define them all at once. + */ + make_const = false; + if (arm_feature(env, ARM_FEATURE_EL3)) { + /* + * An EL2 register without EL2 but with EL3 is (usually) RES0. + * See rule RJFFP in section D1.1.3 of DDI0487H.a. */ - if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { - r2->cp = CP_REG_ARM64_SYSREG_CP; + int min_el = ctz32(r->access) / 2; + if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) { + if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { + return; + } + make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); } - *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, - r2->opc0, opc1, opc2); } else { - *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); + CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2) + ? PL2_RW : PL1_RW); + if ((r->access & max_el) == 0) { + return; + } } - if (opaque) { - r2->opaque = opaque; - } - /* reginfo passed to helpers is correct for the actual access, - * and is never ARM_CP_STATE_BOTH: - */ - r2->state = state; - /* Make sure reginfo passed to helpers for wildcarded regs - * has the correct crm/opc1/opc2 for this reg, not CP_ANY: + + /* Combine cpreg and name into one allocation. */ + name_len = strlen(name) + 1; + r2 = g_malloc(sizeof(*r2) + name_len); + *r2 = *r; + r2->name = memcpy(r2 + 1, name, name_len); + + /* + * Update fields to match the instantiation, overwiting wildcards + * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH. */ + r2->cp = cp; r2->crm = crm; r2->opc1 = opc1; r2->opc2 = opc2; - /* By convention, for wildcarded registers only the first + r2->state = state; + r2->secure = secstate; + if (opaque) { + r2->opaque = opaque; + } + + if (make_const) { + /* This should not have been a very special register to begin. */ + int old_special = r2->type & ARM_CP_SPECIAL_MASK; + assert(old_special == 0 || old_special == ARM_CP_NOP); + /* + * Set the special function to CONST, retaining the other flags. + * This is important for e.g. ARM_CP_SVE so that we still + * take the SVE trap if CPTR_EL3.EZ == 0. + */ + r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; + /* + * Usually, these registers become RES0, but there are a few + * special cases like VPIDR_EL2 which have a constant non-zero + * value with writes ignored. + */ + if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { + r2->resetvalue = 0; + } + /* + * ARM_CP_CONST has precedence, so removing the callbacks and + * offsets are not strictly necessary, but it is potentially + * less confusing to debug later. + */ + r2->readfn = NULL; + r2->writefn = NULL; + r2->raw_readfn = NULL; + r2->raw_writefn = NULL; + r2->resetfn = NULL; + r2->fieldoffset = 0; + r2->bank_fieldoffsets[0] = 0; + r2->bank_fieldoffsets[1] = 0; + } else { + bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; + + if (isbanked) { + /* + * Register is banked (using both entries in array). + * Overwriting fieldoffset as the array is only used to define + * banked registers but later only fieldoffset is used. + */ + r2->fieldoffset = r->bank_fieldoffsets[ns]; + } + if (state == ARM_CP_STATE_AA32) { + if (isbanked) { + /* + * If the register is banked then we don't need to migrate or + * reset the 32-bit instance in certain cases: + * + * 1) If the register has both 32-bit and 64-bit instances + * then we can count on the 64-bit instance taking care + * of the non-secure bank. + * 2) If ARMv8 is enabled then we can count on a 64-bit + * version taking care of the secure bank. This requires + * that separate 32 and 64-bit definitions are provided. + */ + if ((r->state == ARM_CP_STATE_BOTH && ns) || + (arm_feature(env, ARM_FEATURE_V8) && !ns)) { + r2->type |= ARM_CP_ALIAS; + } + } else if ((secstate != r->secure) && !ns) { + /* + * The register is not banked so we only want to allow + * migration of the non-secure instance. + */ + r2->type |= ARM_CP_ALIAS; + } + + if (HOST_BIG_ENDIAN && + r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { + r2->fieldoffset += sizeof(uint32_t); + } + } + } + + /* + * By convention, for wildcarded registers only the first * entry is used for migration; the others are marked as * ALIAS so we don't try to transfer the register * multiple times. Special registers (ie NOP/WFI) are * never migratable and not even raw-accessible. */ - if ((r->type & ARM_CP_SPECIAL)) { + if (r2->type & ARM_CP_SPECIAL_MASK) { r2->type |= ARM_CP_NO_RAW; } if (((r->crm == CP_ANY) && crm != 0) || @@ -8886,7 +8692,8 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; } - /* Check that raw accesses are either forbidden or handled. Note that + /* + * Check that raw accesses are either forbidden or handled. Note that * we can't assert this earlier because the setup of fieldoffset for * banked registers has to be done first. */ @@ -8894,22 +8701,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, assert(!raw_accessors_invalid(r2)); } - /* Overriding of an existing definition must be explicitly - * requested. - */ - if (!(r->type & ARM_CP_OVERRIDE)) { - ARMCPRegInfo *oldreg; - oldreg = g_hash_table_lookup(cpu->cp_regs, key); - if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { - fprintf(stderr, "Register redefined: cp=%d %d bit " - "crn=%d crm=%d opc1=%d opc2=%d, " - "was %s, now %s\n", r2->cp, 32 + 32 * is64, - r2->crn, r2->crm, r2->opc1, r2->opc2, - oldreg->name, r2->name); - g_assert_not_reached(); - } - } - g_hash_table_insert(cpu->cp_regs, key, r2); + g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); } @@ -8939,13 +8731,15 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of * the register, if any. */ - int crm, opc1, opc2, state; + int crm, opc1, opc2; int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; + CPState state; + /* 64 bit registers have only CRm and Opc1 fields */ assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); /* op0 only exists in the AArch64 encodings */ @@ -8988,7 +8782,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, * to encompass the generic architectural permission check. */ if (r->state != ARM_CP_STATE_AA32) { - int mask = 0; + CPAccessRights mask; switch (r->opc1) { case 0: /* min_EL EL1, but some accessible to EL0 via kernel ABI */ @@ -9017,8 +8811,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, break; default: /* broken reginfo with out-of-range opc1 */ - assert(false); - break; + g_assert_not_reached(); } /* assert our permissions are not too lax (stricter is fine) */ assert((r->access & ~mask) == 0); @@ -9027,7 +8820,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, /* Check that the register definition has enough info to handle * reads and writes if they are permitted. */ - if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { + if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { if (r->access & PL3_R) { assert((r->fieldoffset || (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || @@ -9039,8 +8832,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, r->writefn); } } - /* Bad type field probably means missing sentinel at end of reg list */ - assert(cptype_valid(r->type)); + for (crm = crmmin; crm <= crmmax; crm++) { for (opc1 = opc1min; opc1 <= opc1max; opc1++) { for (opc2 = opc2min; opc2 <= opc2max; opc2++) { @@ -9062,7 +8854,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, r->secure, crm, opc1, opc2, r->name); break; - default: + case ARM_CP_SECSTATE_BOTH: name = g_strdup_printf("%s_S", r->name); add_cpreg_to_hashtable(cpu, r, opaque, state, ARM_CP_SECSTATE_S, @@ -9072,6 +8864,8 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, ARM_CP_SECSTATE_NS, crm, opc1, opc2, r->name); break; + default: + g_assert_not_reached(); } } else { /* AArch64 registers get mapped to non-secure instance @@ -9086,13 +8880,13 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, } } -void define_arm_cp_regs_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque) +/* Define a whole list of registers */ +void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, + void *opaque, size_t len) { - /* Define a whole list of registers */ - const ARMCPRegInfo *r; - for (r = regs; r->type != ARM_CP_SENTINEL; r++) { - define_one_arm_cp_reg_with_opaque(cpu, r, opaque); + size_t i; + for (i = 0; i < len; ++i) { + define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque); } } @@ -9104,17 +8898,20 @@ void define_arm_cp_regs_with_opaque(ARMCPU *cpu, * user-space cannot alter any values and dynamic values pertaining to * execution state are hidden from user space view anyway. */ -void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) +void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len, + const ARMCPRegUserSpaceInfo *mods, + size_t mods_len) { - const ARMCPRegUserSpaceInfo *m; - ARMCPRegInfo *r; - - for (m = mods; m->name; m++) { + for (size_t mi = 0; mi < mods_len; ++mi) { + const ARMCPRegUserSpaceInfo *m = mods + mi; GPatternSpec *pat = NULL; + if (m->is_glob) { pat = g_pattern_spec_new(m->name); } - for (r = regs; r->type != ARM_CP_SENTINEL; r++) { + for (size_t ri = 0; ri < regs_len; ++ri) { + ARMCPRegInfo *r = regs + ri; + if (pat && g_pattern_match_string(pat, r->name)) { r->type = ARM_CP_CONST; r->access = PL0U_R; @@ -9136,7 +8933,7 @@ void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) { - return g_hash_table_lookup(cpregs, &encoded_cp); + return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp); } void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, @@ -9215,6 +9012,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, CPSRWriteType write_type) { uint32_t changed_daif; + bool rebuild_hflags = (write_type != CPSRWriteRaw) && + (mask & (CPSR_M | CPSR_E | CPSR_IL)); if (mask & CPSR_NZCV) { env->ZF = (~val) & CPSR_Z; @@ -9334,6 +9133,9 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, } mask &= ~CACHED_CPSR_BITS; env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); + if (rebuild_hflags) { + arm_rebuild_hflags(env); + } } /* Sign/zero extend */ @@ -9345,6 +9147,18 @@ uint32_t HELPER(sxtb16)(uint32_t x) return res; } +static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) +{ + /* + * Take a division-by-zero exception if necessary; otherwise return + * to get the usual non-trapping division behaviour (result of 0) + */ + if (arm_feature(env, ARM_FEATURE_M) + && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { + raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); + } +} + uint32_t HELPER(uxtb16)(uint32_t x) { uint32_t res; @@ -9353,19 +9167,24 @@ uint32_t HELPER(uxtb16)(uint32_t x) return res; } -int32_t HELPER(sdiv)(int32_t num, int32_t den) +int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) { - if (den == 0) - return 0; - if (num == INT_MIN && den == -1) - return INT_MIN; + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + if (num == INT_MIN && den == -1) { + return INT_MIN; + } return num / den; } -uint32_t HELPER(udiv)(uint32_t num, uint32_t den) +uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) { - if (den == 0) - return 0; + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } return num / den; } @@ -9538,8 +9357,10 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, return target_el; } -void arm_log_exception(int idx) +void arm_log_exception(CPUState *cs) { + int idx = cs->exception_index; + if (qemu_loglevel_mask(CPU_LOG_INT)) { const char *exc = NULL; static const char * const excnames[] = { @@ -9564,6 +9385,8 @@ void arm_log_exception(int idx) [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", [EXCP_LSERR] = "v8M LSERR UsageFault", [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", + [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", + [EXCP_VSERR] = "Virtual SERR", }; if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { @@ -9572,7 +9395,8 @@ void arm_log_exception(int idx) if (!exc) { exc = "unknown"; } - qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); + qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n", + idx, exc, cs->cpu_index); } } @@ -9875,7 +9699,7 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) * separately here. * * The vector table entry used is always the 0x14 Hyp mode entry point, - * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. + * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp. * The offset applied to the preferred return address is always zero * (see DDI0487C.a section G1.12.3). * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. @@ -9889,7 +9713,7 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) addr = 0x04; break; case EXCP_SWI: - addr = 0x14; + addr = 0x08; break; case EXCP_BKPT: /* Fall through to prefetch abort. */ @@ -10075,6 +9899,31 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) mask = CPSR_A | CPSR_I | CPSR_F; offset = 4; break; + case EXCP_VSERR: + { + /* + * Note that this is reported as a data abort, but the DFAR + * has an UNKNOWN value. Construct the SError syndrome from + * AET and ExT fields. + */ + ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, }; + + if (extended_addresses_enabled(env)) { + env->exception.fsr = arm_fi_to_lfsc(&fi); + } else { + env->exception.fsr = arm_fi_to_sfsc(&fi); + } + env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; + A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); + qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n", + env->exception.fsr); + + new_mode = ARM_CPU_MODE_ABT; + addr = 0x10; + mask = CPSR_A | CPSR_I; + offset = 8; + } + break; case EXCP_SMC: new_mode = ARM_CPU_MODE_MON; addr = 0x08; @@ -10182,6 +10031,31 @@ static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) return ret; } +static bool syndrome_is_sync_extabt(uint32_t syndrome) +{ + /* Return true if this syndrome value is a synchronous external abort */ + switch (syn_get_ec(syndrome)) { + case EC_INSNABORT: + case EC_INSNABORT_SAME_EL: + case EC_DATAABORT: + case EC_DATAABORT_SAME_EL: + /* Look at fault status code for all the synchronous ext abort cases */ + switch (syndrome & 0x3f) { + case 0x10: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + return true; + default: + return false; + } + default: + return false; + } +} + /* Handle exception entry to a target EL which is using AArch64 */ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) { @@ -10237,6 +10111,14 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: + /* + * FEAT_DoubleFault allows synchronous external aborts taken to EL3 + * to be taken to the SError vector entrypoint. + */ + if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && + syndrome_is_sync_extabt(env->exception.syndrome)) { + addr += 0x180; + } env->cp15.far_el[new_el] = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", env->cp15.far_el[new_el]); @@ -10295,6 +10177,12 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) case EXCP_VFIQ: addr += 0x100; break; + case EXCP_VSERR: + addr += 0x180; + /* Construct the SError syndrome from IDS and ISS fields. */ + env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); + env->cp15.esr_el[new_el] = env->exception.syndrome; + break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); } @@ -10349,7 +10237,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } pstate_write(env, PSTATE_DAIF | new_mode); - env->aarch64 = 1; + env->aarch64 = true; aarch64_restore_sp(env, new_el); helper_rebuild_hflags_a64(env, new_el); @@ -10376,13 +10264,13 @@ static void handle_semihosting(CPUState *cs) qemu_log_mask(CPU_LOG_INT, "...handling as semihosting call 0x%" PRIx64 "\n", env->xregs[0]); - env->xregs[0] = do_common_semihosting(cs); + do_common_semihosting(cs); env->pc += 4; } else { qemu_log_mask(CPU_LOG_INT, "...handling as semihosting call 0x%x\n", env->regs[0]); - env->regs[0] = do_common_semihosting(cs); + do_common_semihosting(cs); env->regs[15] += env->thumb ? 2 : 4; } } @@ -10405,7 +10293,7 @@ void arm_cpu_do_interrupt(CPUState *cs) assert(!arm_feature(env, ARM_FEATURE_M)); - arm_log_exception(cs->exception_index); + arm_log_exception(cs); qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), new_el); if (qemu_loglevel_mask(CPU_LOG_INT) @@ -10461,892 +10349,16 @@ uint64_t arm_sctlr(CPUARMState *env, int el) /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0) - ? 2 : 1; + el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; } return env->cp15.sctlr_el[el]; } -/* Return the SCTLR value which controls this address translation regime */ -static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; -} - -#ifndef CONFIG_USER_ONLY - -/* Return true if the specified stage of address translation is disabled */ -static inline bool regime_translation_disabled(CPUARMState *env, - ARMMMUIdx mmu_idx) -{ - uint64_t hcr_el2; - - if (arm_feature(env, ARM_FEATURE_M)) { - switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & - (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { - case R_V7M_MPU_CTRL_ENABLE_MASK: - /* Enabled, but not for HardFault and NMI */ - return mmu_idx & ARM_MMU_IDX_M_NEGPRI; - case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: - /* Enabled for all cases */ - return false; - case 0: - default: - /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but - * we warned about that in armv7m_nvic.c when the guest set it. - */ - return true; - } - } - - hcr_el2 = arm_hcr_el2_eff(env); - - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { - /* HCR.DC means HCR.VM behaves as 1 */ - return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; - } - - if (hcr_el2 & HCR_TGE) { - /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ - if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { - return true; - } - } - - if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { - /* HCR.DC means SCTLR_EL1.M behaves as 0 */ - return true; - } - - return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; -} - -static inline bool regime_translation_big_endian(CPUARMState *env, - ARMMMUIdx mmu_idx) -{ - return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; -} - -/* Return the TTBR associated with this translation regime */ -static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, - int ttbrn) -{ - if (mmu_idx == ARMMMUIdx_Stage2) { - return env->cp15.vttbr_el2; - } - if (mmu_idx == ARMMMUIdx_Stage2_S) { - return env->cp15.vsttbr_el2; - } - if (ttbrn == 0) { - return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; - } else { - return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; - } -} - -#endif /* !CONFIG_USER_ONLY */ - -/* Convert a possible stage1+2 MMU index into the appropriate - * stage 1 MMU index - */ -static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_SE10_0: - return ARMMMUIdx_Stage1_SE0; - case ARMMMUIdx_SE10_1: - return ARMMMUIdx_Stage1_SE1; - case ARMMMUIdx_SE10_1_PAN: - return ARMMMUIdx_Stage1_SE1_PAN; - case ARMMMUIdx_E10_0: - return ARMMMUIdx_Stage1_E0; - case ARMMMUIdx_E10_1: - return ARMMMUIdx_Stage1_E1; - case ARMMMUIdx_E10_1_PAN: - return ARMMMUIdx_Stage1_E1_PAN; - default: - return mmu_idx; - } -} - -/* Return true if the translation regime is using LPAE format page tables */ -static inline bool regime_using_lpae_format(CPUARMState *env, - ARMMMUIdx mmu_idx) -{ - int el = regime_el(env, mmu_idx); - if (el == 2 || arm_el_is_aa64(env, el)) { - return true; - } - if (arm_feature(env, ARM_FEATURE_LPAE) - && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { - return true; - } - return false; -} - -/* Returns true if the stage 1 translation regime is using LPAE format page - * tables. Used when raising alignment exceptions, whose FSR changes depending - * on whether the long or short descriptor format is in use. */ -bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - mmu_idx = stage_1_mmu_idx(mmu_idx); - - return regime_using_lpae_format(env, mmu_idx); -} - -#ifndef CONFIG_USER_ONLY -static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_MUser: - case ARMMMUIdx_MSUser: - case ARMMMUIdx_MUserNegPri: - case ARMMMUIdx_MSUserNegPri: - return true; - default: - return false; - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - g_assert_not_reached(); - } -} - -/* Translate section/page access permissions to page - * R/W protection flags - * - * @env: CPUARMState - * @mmu_idx: MMU index indicating required translation regime - * @ap: The 3-bit access permissions (AP[2:0]) - * @domain_prot: The 2-bit domain access permissions - */ -static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, - int ap, int domain_prot) -{ - bool is_user = regime_is_user(env, mmu_idx); - - if (domain_prot == 3) { - return PAGE_READ | PAGE_WRITE; - } - - switch (ap) { - case 0: - if (arm_feature(env, ARM_FEATURE_V7)) { - return 0; - } - switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { - case SCTLR_S: - return is_user ? 0 : PAGE_READ; - case SCTLR_R: - return PAGE_READ; - default: - return 0; - } - case 1: - return is_user ? 0 : PAGE_READ | PAGE_WRITE; - case 2: - if (is_user) { - return PAGE_READ; - } else { - return PAGE_READ | PAGE_WRITE; - } - case 3: - return PAGE_READ | PAGE_WRITE; - case 4: /* Reserved. */ - return 0; - case 5: - return is_user ? 0 : PAGE_READ; - case 6: - return PAGE_READ; - case 7: - if (!arm_feature(env, ARM_FEATURE_V6K)) { - return 0; - } - return PAGE_READ; - default: - g_assert_not_reached(); - } -} - -/* Translate section/page access permissions to page - * R/W protection flags. - * - * @ap: The 2-bit simple AP (AP[2:1]) - * @is_user: TRUE if accessing from PL0 - */ -static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) -{ - switch (ap) { - case 0: - return is_user ? 0 : PAGE_READ | PAGE_WRITE; - case 1: - return PAGE_READ | PAGE_WRITE; - case 2: - return is_user ? 0 : PAGE_READ; - case 3: - return PAGE_READ; - default: - g_assert_not_reached(); - } -} - -static inline int -simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) -{ - return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); -} - -/* Translate S2 section/page access permissions to protection flags - * - * @env: CPUARMState - * @s2ap: The 2-bit stage2 access permissions (S2AP) - * @xn: XN (execute-never) bits - * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 - */ -static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) -{ - int prot = 0; - - if (s2ap & 1) { - prot |= PAGE_READ; - } - if (s2ap & 2) { - prot |= PAGE_WRITE; - } - - if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { - switch (xn) { - case 0: - prot |= PAGE_EXEC; - break; - case 1: - if (s1_is_el0) { - prot |= PAGE_EXEC; - } - break; - case 2: - break; - case 3: - if (!s1_is_el0) { - prot |= PAGE_EXEC; - } - break; - default: - g_assert_not_reached(); - } - } else { - if (!extract32(xn, 1, 1)) { - if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { - prot |= PAGE_EXEC; - } - } - } - return prot; -} - -/* Translate section/page access permissions to protection flags - * - * @env: CPUARMState - * @mmu_idx: MMU index indicating required translation regime - * @is_aa64: TRUE if AArch64 - * @ap: The 2-bit simple AP (AP[2:1]) - * @ns: NS (non-secure) bit - * @xn: XN (execute-never) bit - * @pxn: PXN (privileged execute-never) bit - */ -static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, - int ap, int ns, int xn, int pxn) -{ - bool is_user = regime_is_user(env, mmu_idx); - int prot_rw, user_rw; - bool have_wxn; - int wxn = 0; - - assert(mmu_idx != ARMMMUIdx_Stage2); - assert(mmu_idx != ARMMMUIdx_Stage2_S); - - user_rw = simple_ap_to_rw_prot_is_user(ap, true); - if (is_user) { - prot_rw = user_rw; - } else { - if (user_rw && regime_is_pan(env, mmu_idx)) { - /* PAN forbids data accesses but doesn't affect insn fetch */ - prot_rw = 0; - } else { - prot_rw = simple_ap_to_rw_prot_is_user(ap, false); - } - } - - if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { - return prot_rw; - } - - /* TODO have_wxn should be replaced with - * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) - * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE - * compatible processors have EL2, which is required for [U]WXN. - */ - have_wxn = arm_feature(env, ARM_FEATURE_LPAE); - - if (have_wxn) { - wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; - } - - if (is_aa64) { - if (regime_has_2_ranges(mmu_idx) && !is_user) { - xn = pxn || (user_rw & PAGE_WRITE); - } - } else if (arm_feature(env, ARM_FEATURE_V7)) { - switch (regime_el(env, mmu_idx)) { - case 1: - case 3: - if (is_user) { - xn = xn || !(user_rw & PAGE_READ); - } else { - int uwxn = 0; - if (have_wxn) { - uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; - } - xn = xn || !(prot_rw & PAGE_READ) || pxn || - (uwxn && (user_rw & PAGE_WRITE)); - } - break; - case 2: - break; - } - } else { - xn = wxn = 0; - } - - if (xn || (wxn && (prot_rw & PAGE_WRITE))) { - return prot_rw; - } - return prot_rw | PAGE_EXEC; -} - -static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, - uint32_t *table, uint32_t address) -{ - /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ - TCR *tcr = regime_tcr(env, mmu_idx); - - if (address & tcr->mask) { - if (tcr->raw_tcr & TTBCR_PD1) { - /* Translation table walk disabled for TTBR1 */ - return false; - } - *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; - } else { - if (tcr->raw_tcr & TTBCR_PD0) { - /* Translation table walk disabled for TTBR0 */ - return false; - } - *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; - } - *table |= (address >> 18) & 0x3ffc; - return true; -} - -/* Translate a S1 pagetable walk through S2 if needed. */ -static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, - hwaddr addr, bool *is_secure, - ARMMMUFaultInfo *fi) -{ - if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && - !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { - target_ulong s2size; - hwaddr s2pa; - int s2prot; - int ret; - ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S - : ARMMMUIdx_Stage2; - ARMCacheAttrs cacheattrs = {}; - MemTxAttrs txattrs = {}; - - ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false, - &s2pa, &txattrs, &s2prot, &s2size, fi, - &cacheattrs); - if (ret) { - assert(fi->type != ARMFault_None); - fi->s2addr = addr; - fi->stage2 = true; - fi->s1ptw = true; - fi->s1ns = !*is_secure; - return ~0; - } - if ((arm_hcr_el2_eff(env) & HCR_PTW) && - (cacheattrs.attrs & 0xf0) == 0) { - /* - * PTW set and S1 walk touched S2 Device memory: - * generate Permission fault. - */ - fi->type = ARMFault_Permission; - fi->s2addr = addr; - fi->stage2 = true; - fi->s1ptw = true; - fi->s1ns = !*is_secure; - return ~0; - } - - if (arm_is_secure_below_el3(env)) { - /* Check if page table walk is to secure or non-secure PA space. */ - if (*is_secure) { - *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); - } else { - *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); - } - } else { - assert(!*is_secure); - } - - addr = s2pa; - } - return addr; -} - -/* All loads done in the course of a page table walk go through here. */ -static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, - ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - MemTxAttrs attrs = {}; - MemTxResult result = MEMTX_OK; - AddressSpace *as; - uint32_t data; - - addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); - attrs.secure = is_secure; - as = arm_addressspace(cs, attrs); - if (fi->s1ptw) { - return 0; - } - if (regime_translation_big_endian(env, mmu_idx)) { - data = address_space_ldl_be(as, addr, attrs, &result); - } else { - data = address_space_ldl_le(as, addr, attrs, &result); - } - if (result == MEMTX_OK) { - return data; - } - fi->type = ARMFault_SyncExternalOnWalk; - fi->ea = arm_extabort_type(result); - return 0; -} - -static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, - ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - MemTxAttrs attrs = {}; - MemTxResult result = MEMTX_OK; - AddressSpace *as; - uint64_t data; - - addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); - attrs.secure = is_secure; - as = arm_addressspace(cs, attrs); - if (fi->s1ptw) { - return 0; - } - if (regime_translation_big_endian(env, mmu_idx)) { - data = address_space_ldq_be(as, addr, attrs, &result); - } else { - data = address_space_ldq_le(as, addr, attrs, &result); - } - if (result == MEMTX_OK) { - return data; - } - fi->type = ARMFault_SyncExternalOnWalk; - fi->ea = arm_extabort_type(result); - return 0; -} - -static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, - target_ulong *page_size, - ARMMMUFaultInfo *fi) -{ - CPUState *cs = env_cpu(env); - int level = 1; - uint32_t table; - uint32_t desc; - int type; - int ap; - int domain = 0; - int domain_prot; - hwaddr phys_addr; - uint32_t dacr; - - /* Pagetable walk. */ - /* Lookup l1 descriptor. */ - if (!get_level1_table_address(env, mmu_idx, &table, address)) { - /* Section translation fault if page walk is disabled by PD0 or PD1 */ - fi->type = ARMFault_Translation; - goto do_fault; - } - desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fi); - if (fi->type != ARMFault_None) { - goto do_fault; - } - type = (desc & 3); - domain = (desc >> 5) & 0x0f; - if (regime_el(env, mmu_idx) == 1) { - dacr = env->cp15.dacr_ns; - } else { - dacr = env->cp15.dacr_s; - } - domain_prot = (dacr >> (domain * 2)) & 3; - if (type == 0) { - /* Section translation fault. */ - fi->type = ARMFault_Translation; - goto do_fault; - } - if (type != 2) { - level = 2; - } - if (domain_prot == 0 || domain_prot == 2) { - fi->type = ARMFault_Domain; - goto do_fault; - } - if (type == 2) { - /* 1Mb section. */ - phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); - ap = (desc >> 10) & 3; - *page_size = 1024 * 1024; - } else { - /* Lookup l2 entry. */ - if (type == 1) { - /* Coarse pagetable. */ - table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); - } else { - /* Fine pagetable. */ - table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); - } - desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fi); - if (fi->type != ARMFault_None) { - goto do_fault; - } - switch (desc & 3) { - case 0: /* Page translation fault. */ - fi->type = ARMFault_Translation; - goto do_fault; - case 1: /* 64k page. */ - phys_addr = (desc & 0xffff0000) | (address & 0xffff); - ap = (desc >> (4 + ((address >> 13) & 6))) & 3; - *page_size = 0x10000; - break; - case 2: /* 4k page. */ - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - ap = (desc >> (4 + ((address >> 9) & 6))) & 3; - *page_size = 0x1000; - break; - case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ - if (type == 1) { - /* ARMv6/XScale extended small page format */ - if (arm_feature(env, ARM_FEATURE_XSCALE) - || arm_feature(env, ARM_FEATURE_V6)) { - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - *page_size = 0x1000; - } else { - /* UNPREDICTABLE in ARMv5; we choose to take a - * page translation fault. - */ - fi->type = ARMFault_Translation; - goto do_fault; - } - } else { - phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); - *page_size = 0x400; - } - ap = (desc >> 4) & 3; - break; - default: - /* Never happens, but compiler isn't smart enough to tell. */ - abort(); - } - } - *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); - *prot |= *prot ? PAGE_EXEC : 0; - if (!(*prot & (1 << access_type))) { - /* Access permission fault. */ - fi->type = ARMFault_Permission; - goto do_fault; - } - *phys_ptr = phys_addr; - return false; -do_fault: - fi->domain = domain; - fi->level = level; - return true; -} - -static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, ARMMMUFaultInfo *fi) -{ - CPUState *cs = env_cpu(env); - ARMCPU *cpu = env_archcpu(env); - int level = 1; - uint32_t table; - uint32_t desc; - uint32_t xn; - uint32_t pxn = 0; - int type; - int ap; - int domain = 0; - int domain_prot; - hwaddr phys_addr; - uint32_t dacr; - bool ns; - - /* Pagetable walk. */ - /* Lookup l1 descriptor. */ - if (!get_level1_table_address(env, mmu_idx, &table, address)) { - /* Section translation fault if page walk is disabled by PD0 or PD1 */ - fi->type = ARMFault_Translation; - goto do_fault; - } - desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fi); - if (fi->type != ARMFault_None) { - goto do_fault; - } - type = (desc & 3); - if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { - /* Section translation fault, or attempt to use the encoding - * which is Reserved on implementations without PXN. - */ - fi->type = ARMFault_Translation; - goto do_fault; - } - if ((type == 1) || !(desc & (1 << 18))) { - /* Page or Section. */ - domain = (desc >> 5) & 0x0f; - } - if (regime_el(env, mmu_idx) == 1) { - dacr = env->cp15.dacr_ns; - } else { - dacr = env->cp15.dacr_s; - } - if (type == 1) { - level = 2; - } - domain_prot = (dacr >> (domain * 2)) & 3; - if (domain_prot == 0 || domain_prot == 2) { - /* Section or Page domain fault */ - fi->type = ARMFault_Domain; - goto do_fault; - } - if (type != 1) { - if (desc & (1 << 18)) { - /* Supersection. */ - phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); - phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; - phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; - *page_size = 0x1000000; - } else { - /* Section. */ - phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); - *page_size = 0x100000; - } - ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); - xn = desc & (1 << 4); - pxn = desc & 1; - ns = extract32(desc, 19, 1); - } else { - if (cpu_isar_feature(aa32_pxn, cpu)) { - pxn = (desc >> 2) & 1; - } - ns = extract32(desc, 3, 1); - /* Lookup l2 entry. */ - table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); - desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fi); - if (fi->type != ARMFault_None) { - goto do_fault; - } - ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); - switch (desc & 3) { - case 0: /* Page translation fault. */ - fi->type = ARMFault_Translation; - goto do_fault; - case 1: /* 64k page. */ - phys_addr = (desc & 0xffff0000) | (address & 0xffff); - xn = desc & (1 << 15); - *page_size = 0x10000; - break; - case 2: case 3: /* 4k page. */ - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - xn = desc & 1; - *page_size = 0x1000; - break; - default: - /* Never happens, but compiler isn't smart enough to tell. */ - abort(); - } - } - if (domain_prot == 3) { - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - } else { - if (pxn && !regime_is_user(env, mmu_idx)) { - xn = 1; - } - if (xn && access_type == MMU_INST_FETCH) { - fi->type = ARMFault_Permission; - goto do_fault; - } - - if (arm_feature(env, ARM_FEATURE_V6K) && - (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { - /* The simplified model uses AP[0] as an access control bit. */ - if ((ap & 1) == 0) { - /* Access flag fault. */ - fi->type = ARMFault_AccessFlag; - goto do_fault; - } - *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); - } else { - *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); - } - if (*prot && !xn) { - *prot |= PAGE_EXEC; - } - if (!(*prot & (1 << access_type))) { - /* Access permission fault. */ - fi->type = ARMFault_Permission; - goto do_fault; - } - } - if (ns) { - /* The NS bit will (as required by the architecture) have no effect if - * the CPU doesn't support TZ or this is a non-secure translation - * regime, because the attribute will already be non-secure. - */ - attrs->secure = false; - } - *phys_ptr = phys_addr; - return false; -do_fault: - fi->domain = domain; - fi->level = level; - return true; -} - -/* - * check_s2_mmu_setup - * @cpu: ARMCPU - * @is_aa64: True if the translation regime is in AArch64 state - * @startlevel: Suggested starting level - * @inputsize: Bitsize of IPAs - * @stride: Page-table stride (See the ARM ARM) - * - * Returns true if the suggested S2 translation parameters are OK and - * false otherwise. - */ -static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, - int inputsize, int stride) -{ - const int grainsize = stride + 3; - int startsizecheck; - - /* Negative levels are never allowed. */ - if (level < 0) { - return false; - } - - startsizecheck = inputsize - ((3 - level) * stride + grainsize); - if (startsizecheck < 1 || startsizecheck > stride + 4) { - return false; - } - - if (is_aa64) { - CPUARMState *env = &cpu->env; - unsigned int pamax = arm_pamax(cpu); - - switch (stride) { - case 13: /* 64KB Pages. */ - if (level == 0 || (level == 1 && pamax <= 42)) { - return false; - } - break; - case 11: /* 16KB Pages. */ - if (level == 0 || (level == 1 && pamax <= 40)) { - return false; - } - break; - case 9: /* 4KB Pages. */ - if (level == 0 && pamax <= 42) { - return false; - } - break; - default: - g_assert_not_reached(); - } - - /* Inputsize checks. */ - if (inputsize > pamax && - (arm_el_is_aa64(env, 1) || inputsize > 40)) { - /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ - return false; - } - } else { - /* AArch32 only supports 4KB pages. Assert on that. */ - assert(stride == 9); - - if (level == 0) { - return false; - } - } - return true; -} - -/* Translate from the 4-bit stage 2 representation of - * memory attributes (without cache-allocation hints) to - * the 8-bit representation of the stage 1 MAIR registers - * (which includes allocation hints). - * - * ref: shared/translation/attrs/S2AttrDecode() - * .../S2ConvertAttrsHints() - */ -static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) -{ - uint8_t hiattr = extract32(s2attrs, 2, 2); - uint8_t loattr = extract32(s2attrs, 0, 2); - uint8_t hihint = 0, lohint = 0; - - if (hiattr != 0) { /* normal memory */ - if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */ - hiattr = loattr = 1; /* non-cacheable */ - } else { - if (hiattr != 1) { /* Write-through or write-back */ - hihint = 3; /* RW allocate */ - } - if (loattr != 1) { /* Write-through or write-back */ - lohint = 3; /* RW allocate */ - } - } - } - - return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; -} -#endif /* !CONFIG_USER_ONLY */ - -static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) +int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) { if (regime_has_2_ranges(mmu_idx)) { return extract64(tcr, 37, 2); - } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { + } else if (regime_is_stage2(mmu_idx)) { return 0; /* VTCR_EL2 */ } else { /* Replicate the single TBI bit so we always have 2 bits. */ @@ -11354,11 +10366,11 @@ static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) } } -static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) +int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) { if (regime_has_2_ranges(mmu_idx)) { return extract64(tcr, 51, 2); - } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { + } else if (regime_is_stage2(mmu_idx)) { return 0; /* VTCR_EL2 */ } else { /* Replicate the single TBID bit so we always have 2 bits. */ @@ -11376,26 +10388,119 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) } } +static ARMGranuleSize tg0_to_gran_size(int tg) +{ + switch (tg) { + case 0: + return Gran4K; + case 1: + return Gran64K; + case 2: + return Gran16K; + default: + return GranInvalid; + } +} + +static ARMGranuleSize tg1_to_gran_size(int tg) +{ + switch (tg) { + case 1: + return Gran16K; + case 2: + return Gran4K; + case 3: + return Gran64K; + default: + return GranInvalid; + } +} + +static inline bool have4k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu) + : cpu_isar_feature(aa64_tgran4, cpu); +} + +static inline bool have16k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu) + : cpu_isar_feature(aa64_tgran16, cpu); +} + +static inline bool have64k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu) + : cpu_isar_feature(aa64_tgran64, cpu); +} + +static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran, + bool stage2) +{ + switch (gran) { + case Gran4K: + if (have4k(cpu, stage2)) { + return gran; + } + break; + case Gran16K: + if (have16k(cpu, stage2)) { + return gran; + } + break; + case Gran64K: + if (have64k(cpu, stage2)) { + return gran; + } + break; + case GranInvalid: + break; + } + /* + * If the guest selects a granule size that isn't implemented, + * the architecture requires that we behave as if it selected one + * that is (with an IMPDEF choice of which one to pick). We choose + * to implement the smallest supported granule size. + */ + if (have4k(cpu, stage2)) { + return Gran4K; + } + if (have16k(cpu, stage2)) { + return Gran16K; + } + assert(have64k(cpu, stage2)); + return Gran64K; +} + ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; - bool epd, hpd, using16k, using64k; - int select, tsz, tbi, max_tsz; + uint64_t tcr = regime_tcr(env, mmu_idx); + bool epd, hpd, tsz_oob, ds, ha, hd; + int select, tsz, tbi, max_tsz, min_tsz, ps, sh; + ARMGranuleSize gran; + ARMCPU *cpu = env_archcpu(env); + bool stage2 = regime_is_stage2(mmu_idx); if (!regime_has_2_ranges(mmu_idx)) { select = 0; tsz = extract32(tcr, 0, 6); - using64k = extract32(tcr, 14, 1); - using16k = extract32(tcr, 15, 1); - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { + gran = tg0_to_gran_size(extract32(tcr, 14, 2)); + if (stage2) { /* VTCR_EL2 */ hpd = false; } else { hpd = extract32(tcr, 24, 1); } epd = false; + sh = extract32(tcr, 12, 2); + ps = extract32(tcr, 16, 3); + ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu); + hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu); + ds = extract64(tcr, 32, 1); } else { + bool e0pd; + /* * Bit 55 is always between the two regions, and is canonical for * determining if address tagging is enabled. @@ -11403,28 +10508,76 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, select = extract64(va, 55, 1); if (!select) { tsz = extract32(tcr, 0, 6); + gran = tg0_to_gran_size(extract32(tcr, 14, 2)); epd = extract32(tcr, 7, 1); - using64k = extract32(tcr, 14, 1); - using16k = extract32(tcr, 15, 1); + sh = extract32(tcr, 12, 2); hpd = extract64(tcr, 41, 1); + e0pd = extract64(tcr, 55, 1); } else { - int tg = extract32(tcr, 30, 2); - using16k = tg == 1; - using64k = tg == 3; tsz = extract32(tcr, 16, 6); + gran = tg1_to_gran_size(extract32(tcr, 30, 2)); epd = extract32(tcr, 23, 1); + sh = extract32(tcr, 28, 2); hpd = extract64(tcr, 42, 1); + e0pd = extract64(tcr, 56, 1); + } + ps = extract64(tcr, 32, 3); + ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu); + hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu); + ds = extract64(tcr, 59, 1); + + if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) && + regime_is_user(env, mmu_idx)) { + epd = true; } } - if (cpu_isar_feature(aa64_st, env_archcpu(env))) { - max_tsz = 48 - using64k; + gran = sanitize_gran_size(cpu, gran, stage2); + + if (cpu_isar_feature(aa64_st, cpu)) { + max_tsz = 48 - (gran == Gran64K); } else { max_tsz = 39; } - tsz = MIN(tsz, max_tsz); - tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ + /* + * DS is RES0 unless FEAT_LPA2 is supported for the given page size; + * adjust the effective value of DS, as documented. + */ + min_tsz = 16; + if (gran == Gran64K) { + if (cpu_isar_feature(aa64_lva, cpu)) { + min_tsz = 12; + } + ds = false; + } else if (ds) { + if (regime_is_stage2(mmu_idx)) { + if (gran == Gran16K) { + ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu); + } else { + ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu); + } + } else { + if (gran == Gran16K) { + ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu); + } else { + ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu); + } + } + if (ds) { + min_tsz = 12; + } + } + + if (tsz > max_tsz) { + tsz = max_tsz; + tsz_oob = true; + } else if (tsz < min_tsz) { + tsz = min_tsz; + tsz_oob = true; + } else { + tsz_oob = false; + } /* Present TBI as a composite with TBID. */ tbi = aa64_va_parameter_tbi(tcr, mmu_idx); @@ -11435,1427 +10588,20 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, return (ARMVAParameters) { .tsz = tsz, + .ps = ps, + .sh = sh, .select = select, .tbi = tbi, .epd = epd, .hpd = hpd, - .using16k = using16k, - .using64k = using64k, + .tsz_oob = tsz_oob, + .ds = ds, + .ha = ha, + .hd = ha && hd, + .gran = gran, }; } -#ifndef CONFIG_USER_ONLY -static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, - ARMMMUIdx mmu_idx) -{ - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; - uint32_t el = regime_el(env, mmu_idx); - int select, tsz; - bool epd, hpd; - - assert(mmu_idx != ARMMMUIdx_Stage2_S); - - if (mmu_idx == ARMMMUIdx_Stage2) { - /* VTCR */ - bool sext = extract32(tcr, 4, 1); - bool sign = extract32(tcr, 3, 1); - - /* - * If the sign-extend bit is not the same as t0sz[3], the result - * is unpredictable. Flag this as a guest error. - */ - if (sign != sext) { - qemu_log_mask(LOG_GUEST_ERROR, - "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); - } - tsz = sextract32(tcr, 0, 4) + 8; - select = 0; - hpd = false; - epd = false; - } else if (el == 2) { - /* HTCR */ - tsz = extract32(tcr, 0, 3); - select = 0; - hpd = extract64(tcr, 24, 1); - epd = false; - } else { - int t0sz = extract32(tcr, 0, 3); - int t1sz = extract32(tcr, 16, 3); - - if (t1sz == 0) { - select = va > (0xffffffffu >> t0sz); - } else { - /* Note that we will detect errors later. */ - select = va >= ~(0xffffffffu >> t1sz); - } - if (!select) { - tsz = t0sz; - epd = extract32(tcr, 7, 1); - hpd = extract64(tcr, 41, 1); - } else { - tsz = t1sz; - epd = extract32(tcr, 23, 1); - hpd = extract64(tcr, 42, 1); - } - /* For aarch32, hpd0 is not enabled without t2e as well. */ - hpd &= extract32(tcr, 6, 1); - } - - return (ARMVAParameters) { - .tsz = tsz, - .select = select, - .epd = epd, - .hpd = hpd, - }; -} - -/** - * get_phys_addr_lpae: perform one stage of page table walk, LPAE format - * - * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, - * prot and page_size may not be filled in, and the populated fsr value provides - * information on why the translation aborted, in the format of a long-format - * DFSR/IFSR fault register, with the following caveats: - * * the WnR bit is never set (the caller must do this). - * - * @env: CPUARMState - * @address: virtual address to get physical address for - * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH - * @mmu_idx: MMU index indicating required translation regime - * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table - * walk), must be true if this is stage 2 of a stage 1+2 walk for an - * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored. - * @phys_ptr: set to the physical address corresponding to the virtual address - * @attrs: set to the memory transaction attributes to use - * @prot: set to the permissions for the page containing phys_ptr - * @page_size_ptr: set to the size of the page containing phys_ptr - * @fi: set to fault info if the translation fails - * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes - */ -static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool s1_is_el0, - hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, - target_ulong *page_size_ptr, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) -{ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - /* Read an LPAE long-descriptor translation table. */ - ARMFaultType fault_type = ARMFault_Translation; - uint32_t level; - ARMVAParameters param; - uint64_t ttbr; - hwaddr descaddr, indexmask, indexmask_grainsize; - uint32_t tableattrs; - target_ulong page_size; - uint32_t attrs; - int32_t stride; - int addrsize, inputsize; - TCR *tcr = regime_tcr(env, mmu_idx); - int ap, ns, xn, pxn; - uint32_t el = regime_el(env, mmu_idx); - uint64_t descaddrmask; - bool aarch64 = arm_el_is_aa64(env, el); - bool guarded = false; - - /* TODO: This code does not support shareability levels. */ - if (aarch64) { - param = aa64_va_parameters(env, address, mmu_idx, - access_type != MMU_INST_FETCH); - level = 0; - addrsize = 64 - 8 * param.tbi; - inputsize = 64 - param.tsz; - } else { - param = aa32_va_parameters(env, address, mmu_idx); - level = 1; - addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); - inputsize = addrsize - param.tsz; - } - - /* - * We determined the region when collecting the parameters, but we - * have not yet validated that the address is valid for the region. - * Extract the top bits and verify that they all match select. - * - * For aa32, if inputsize == addrsize, then we have selected the - * region by exclusion in aa32_va_parameters and there is no more - * validation to do here. - */ - if (inputsize < addrsize) { - target_ulong top_bits = sextract64(address, inputsize, - addrsize - inputsize); - if (-top_bits != param.select) { - /* The gap between the two regions is a Translation fault */ - fault_type = ARMFault_Translation; - goto do_fault; - } - } - - if (param.using64k) { - stride = 13; - } else if (param.using16k) { - stride = 11; - } else { - stride = 9; - } - - /* Note that QEMU ignores shareability and cacheability attributes, - * so we don't need to do anything with the SH, ORGN, IRGN fields - * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the - * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently - * implement any ASID-like capability so we can ignore it (instead - * we will always flush the TLB any time the ASID is changed). - */ - ttbr = regime_ttbr(env, mmu_idx, param.select); - - /* Here we should have set up all the parameters for the translation: - * inputsize, ttbr, epd, stride, tbi - */ - - if (param.epd) { - /* Translation table walk disabled => Translation fault on TLB miss - * Note: This is always 0 on 64-bit EL2 and EL3. - */ - goto do_fault; - } - - if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { - /* The starting level depends on the virtual address size (which can - * be up to 48 bits) and the translation granule size. It indicates - * the number of strides (stride bits at a time) needed to - * consume the bits of the input address. In the pseudocode this is: - * level = 4 - RoundUp((inputsize - grainsize) / stride) - * where their 'inputsize' is our 'inputsize', 'grainsize' is - * our 'stride + 3' and 'stride' is our 'stride'. - * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: - * = 4 - (inputsize - stride - 3 + stride - 1) / stride - * = 4 - (inputsize - 4) / stride; - */ - level = 4 - (inputsize - 4) / stride; - } else { - /* For stage 2 translations the starting level is specified by the - * VTCR_EL2.SL0 field (whose interpretation depends on the page size) - */ - uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); - uint32_t startlevel; - bool ok; - - if (!aarch64 || stride == 9) { - /* AArch32 or 4KB pages */ - startlevel = 2 - sl0; - - if (cpu_isar_feature(aa64_st, cpu)) { - startlevel &= 3; - } - } else { - /* 16KB or 64KB pages */ - startlevel = 3 - sl0; - } - - /* Check that the starting level is valid. */ - ok = check_s2_mmu_setup(cpu, aarch64, startlevel, - inputsize, stride); - if (!ok) { - fault_type = ARMFault_Translation; - goto do_fault; - } - level = startlevel; - } - - indexmask_grainsize = (1ULL << (stride + 3)) - 1; - indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; - - /* Now we can extract the actual base address from the TTBR */ - descaddr = extract64(ttbr, 0, 48); - /* - * We rely on this masking to clear the RES0 bits at the bottom of the TTBR - * and also to mask out CnP (bit 0) which could validly be non-zero. - */ - descaddr &= ~indexmask; - - /* The address field in the descriptor goes up to bit 39 for ARMv7 - * but up to bit 47 for ARMv8, but we use the descaddrmask - * up to bit 39 for AArch32, because we don't need other bits in that case - * to construct next descriptor address (anyway they should be all zeroes). - */ - descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & - ~indexmask_grainsize; - - /* Secure accesses start with the page table in secure memory and - * can be downgraded to non-secure at any step. Non-secure accesses - * remain non-secure. We implement this by just ORing in the NSTable/NS - * bits at each step. - */ - tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); - for (;;) { - uint64_t descriptor; - bool nstable; - - descaddr |= (address >> (stride * (4 - level))) & indexmask; - descaddr &= ~7ULL; - nstable = extract32(tableattrs, 4, 1); - descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); - if (fi->type != ARMFault_None) { - goto do_fault; - } - - if (!(descriptor & 1) || - (!(descriptor & 2) && (level == 3))) { - /* Invalid, or the Reserved level 3 encoding */ - goto do_fault; - } - descaddr = descriptor & descaddrmask; - - if ((descriptor & 2) && (level < 3)) { - /* Table entry. The top five bits are attributes which may - * propagate down through lower levels of the table (and - * which are all arranged so that 0 means "no effect", so - * we can gather them up by ORing in the bits at each level). - */ - tableattrs |= extract64(descriptor, 59, 5); - level++; - indexmask = indexmask_grainsize; - continue; - } - /* Block entry at level 1 or 2, or page entry at level 3. - * These are basically the same thing, although the number - * of bits we pull in from the vaddr varies. - */ - page_size = (1ULL << ((stride * (4 - level)) + 3)); - descaddr |= (address & (page_size - 1)); - /* Extract attributes from the descriptor */ - attrs = extract64(descriptor, 2, 10) - | (extract64(descriptor, 52, 12) << 10); - - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { - /* Stage 2 table descriptors do not include any attribute fields */ - break; - } - /* Merge in attributes from table descriptors */ - attrs |= nstable << 3; /* NS */ - guarded = extract64(descriptor, 50, 1); /* GP */ - if (param.hpd) { - /* HPD disables all the table attributes except NSTable. */ - break; - } - attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ - /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 - * means "force PL1 access only", which means forcing AP[1] to 0. - */ - attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ - attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ - break; - } - /* Here descaddr is the final physical address, and attributes - * are all in attrs. - */ - fault_type = ARMFault_AccessFlag; - if ((attrs & (1 << 8)) == 0) { - /* Access flag */ - goto do_fault; - } - - ap = extract32(attrs, 4, 2); - - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { - ns = mmu_idx == ARMMMUIdx_Stage2; - xn = extract32(attrs, 11, 2); - *prot = get_S2prot(env, ap, xn, s1_is_el0); - } else { - ns = extract32(attrs, 3, 1); - xn = extract32(attrs, 12, 1); - pxn = extract32(attrs, 11, 1); - *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); - } - - fault_type = ARMFault_Permission; - if (!(*prot & (1 << access_type))) { - goto do_fault; - } - - if (ns) { - /* The NS bit will (as required by the architecture) have no effect if - * the CPU doesn't support TZ or this is a non-secure translation - * regime, because the attribute will already be non-secure. - */ - txattrs->secure = false; - } - /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ - if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { - arm_tlb_bti_gp(txattrs) = true; - } - - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { - cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); - } else { - /* Index into MAIR registers for cache attributes */ - uint8_t attrindx = extract32(attrs, 0, 3); - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; - assert(attrindx <= 7); - cacheattrs->attrs = extract64(mair, attrindx * 8, 8); - } - cacheattrs->shareability = extract32(attrs, 6, 2); - - *phys_ptr = descaddr; - *page_size_ptr = page_size; - return false; - -do_fault: - fi->type = fault_type; - fi->level = level; - /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ - fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 || - mmu_idx == ARMMMUIdx_Stage2_S); - fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; - return true; -} - -static inline void get_phys_addr_pmsav7_default(CPUARMState *env, - ARMMMUIdx mmu_idx, - int32_t address, int *prot) -{ - if (!arm_feature(env, ARM_FEATURE_M)) { - *prot = PAGE_READ | PAGE_WRITE; - switch (address) { - case 0xF0000000 ... 0xFFFFFFFF: - if (regime_sctlr(env, mmu_idx) & SCTLR_V) { - /* hivecs execing is ok */ - *prot |= PAGE_EXEC; - } - break; - case 0x00000000 ... 0x7FFFFFFF: - *prot |= PAGE_EXEC; - break; - } - } else { - /* Default system address map for M profile cores. - * The architecture specifies which regions are execute-never; - * at the MPU level no other checks are defined. - */ - switch (address) { - case 0x00000000 ... 0x1fffffff: /* ROM */ - case 0x20000000 ... 0x3fffffff: /* SRAM */ - case 0x60000000 ... 0x7fffffff: /* RAM */ - case 0x80000000 ... 0x9fffffff: /* RAM */ - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - break; - case 0x40000000 ... 0x5fffffff: /* Peripheral */ - case 0xa0000000 ... 0xbfffffff: /* Device */ - case 0xc0000000 ... 0xdfffffff: /* Device */ - case 0xe0000000 ... 0xffffffff: /* System */ - *prot = PAGE_READ | PAGE_WRITE; - break; - default: - g_assert_not_reached(); - } - } -} - -static bool pmsav7_use_background_region(ARMCPU *cpu, - ARMMMUIdx mmu_idx, bool is_user) -{ - /* Return true if we should use the default memory map as a - * "background" region if there are no hits against any MPU regions. - */ - CPUARMState *env = &cpu->env; - - if (is_user) { - return false; - } - - if (arm_feature(env, ARM_FEATURE_M)) { - return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] - & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; - } else { - return regime_sctlr(env, mmu_idx) & SCTLR_BR; - } -} - -static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) -{ - /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ - return arm_feature(env, ARM_FEATURE_M) && - extract32(address, 20, 12) == 0xe00; -} - -static inline bool m_is_system_region(CPUARMState *env, uint32_t address) -{ - /* True if address is in the M profile system region - * 0xe0000000 - 0xffffffff - */ - return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; -} - -static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, - target_ulong *page_size, - ARMMMUFaultInfo *fi) -{ - ARMCPU *cpu = env_archcpu(env); - int n; - bool is_user = regime_is_user(env, mmu_idx); - - *phys_ptr = address; - *page_size = TARGET_PAGE_SIZE; - *prot = 0; - - if (regime_translation_disabled(env, mmu_idx) || - m_is_ppb_region(env, address)) { - /* MPU disabled or M profile PPB access: use default memory map. - * The other case which uses the default memory map in the - * v7M ARM ARM pseudocode is exception vector reads from the vector - * table. In QEMU those accesses are done in arm_v7m_load_vector(), - * which always does a direct read using address_space_ldl(), rather - * than going via this function, so we don't need to check that here. - */ - get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); - } else { /* MPU enabled */ - for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { - /* region search */ - uint32_t base = env->pmsav7.drbar[n]; - uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); - uint32_t rmask; - bool srdis = false; - - if (!(env->pmsav7.drsr[n] & 0x1)) { - continue; - } - - if (!rsize) { - qemu_log_mask(LOG_GUEST_ERROR, - "DRSR[%d]: Rsize field cannot be 0\n", n); - continue; - } - rsize++; - rmask = (1ull << rsize) - 1; - - if (base & rmask) { - qemu_log_mask(LOG_GUEST_ERROR, - "DRBAR[%d]: 0x%" PRIx32 " misaligned " - "to DRSR region size, mask = 0x%" PRIx32 "\n", - n, base, rmask); - continue; - } - - if (address < base || address > base + rmask) { - /* - * Address not in this region. We must check whether the - * region covers addresses in the same page as our address. - * In that case we must not report a size that covers the - * whole page for a subsequent hit against a different MPU - * region or the background region, because it would result in - * incorrect TLB hits for subsequent accesses to addresses that - * are in this MPU region. - */ - if (ranges_overlap(base, rmask, - address & TARGET_PAGE_MASK, - TARGET_PAGE_SIZE)) { - *page_size = 1; - } - continue; - } - - /* Region matched */ - - if (rsize >= 8) { /* no subregions for regions < 256 bytes */ - int i, snd; - uint32_t srdis_mask; - - rsize -= 3; /* sub region size (power of 2) */ - snd = ((address - base) >> rsize) & 0x7; - srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); - - srdis_mask = srdis ? 0x3 : 0x0; - for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { - /* This will check in groups of 2, 4 and then 8, whether - * the subregion bits are consistent. rsize is incremented - * back up to give the region size, considering consistent - * adjacent subregions as one region. Stop testing if rsize - * is already big enough for an entire QEMU page. - */ - int snd_rounded = snd & ~(i - 1); - uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], - snd_rounded + 8, i); - if (srdis_mask ^ srdis_multi) { - break; - } - srdis_mask = (srdis_mask << i) | srdis_mask; - rsize++; - } - } - if (srdis) { - continue; - } - if (rsize < TARGET_PAGE_BITS) { - *page_size = 1 << rsize; - } - break; - } - - if (n == -1) { /* no hits */ - if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { - /* background fault */ - fi->type = ARMFault_Background; - return true; - } - get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); - } else { /* a MPU hit! */ - uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); - uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); - - if (m_is_system_region(env, address)) { - /* System space is always execute never */ - xn = 1; - } - - if (is_user) { /* User mode AP bit decoding */ - switch (ap) { - case 0: - case 1: - case 5: - break; /* no access */ - case 3: - *prot |= PAGE_WRITE; - /* fall through */ - case 2: - case 6: - *prot |= PAGE_READ | PAGE_EXEC; - break; - case 7: - /* for v7M, same as 6; for R profile a reserved value */ - if (arm_feature(env, ARM_FEATURE_M)) { - *prot |= PAGE_READ | PAGE_EXEC; - break; - } - /* fall through */ - default: - qemu_log_mask(LOG_GUEST_ERROR, - "DRACR[%d]: Bad value for AP bits: 0x%" - PRIx32 "\n", n, ap); - } - } else { /* Priv. mode AP bits decoding */ - switch (ap) { - case 0: - break; /* no access */ - case 1: - case 2: - case 3: - *prot |= PAGE_WRITE; - /* fall through */ - case 5: - case 6: - *prot |= PAGE_READ | PAGE_EXEC; - break; - case 7: - /* for v7M, same as 6; for R profile a reserved value */ - if (arm_feature(env, ARM_FEATURE_M)) { - *prot |= PAGE_READ | PAGE_EXEC; - break; - } - /* fall through */ - default: - qemu_log_mask(LOG_GUEST_ERROR, - "DRACR[%d]: Bad value for AP bits: 0x%" - PRIx32 "\n", n, ap); - } - } - - /* execute never */ - if (xn) { - *prot &= ~PAGE_EXEC; - } - } - } - - fi->type = ARMFault_Permission; - fi->level = 1; - return !(*prot & (1 << access_type)); -} - -static bool v8m_is_sau_exempt(CPUARMState *env, - uint32_t address, MMUAccessType access_type) -{ - /* The architecture specifies that certain address ranges are - * exempt from v8M SAU/IDAU checks. - */ - return - (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || - (address >= 0xe0000000 && address <= 0xe0002fff) || - (address >= 0xe000e000 && address <= 0xe000efff) || - (address >= 0xe002e000 && address <= 0xe002efff) || - (address >= 0xe0040000 && address <= 0xe0041fff) || - (address >= 0xe00ff000 && address <= 0xe00fffff); -} - -void v8m_security_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - V8M_SAttributes *sattrs) -{ - /* Look up the security attributes for this address. Compare the - * pseudocode SecurityCheck() function. - * We assume the caller has zero-initialized *sattrs. - */ - ARMCPU *cpu = env_archcpu(env); - int r; - bool idau_exempt = false, idau_ns = true, idau_nsc = true; - int idau_region = IREGION_NOTVALID; - uint32_t addr_page_base = address & TARGET_PAGE_MASK; - uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); - - if (cpu->idau) { - IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); - IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); - - iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, - &idau_nsc); - } - - if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { - /* 0xf0000000..0xffffffff is always S for insn fetches */ - return; - } - - if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { - sattrs->ns = !regime_is_secure(env, mmu_idx); - return; - } - - if (idau_region != IREGION_NOTVALID) { - sattrs->irvalid = true; - sattrs->iregion = idau_region; - } - - switch (env->sau.ctrl & 3) { - case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ - break; - case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ - sattrs->ns = true; - break; - default: /* SAU.ENABLE == 1 */ - for (r = 0; r < cpu->sau_sregion; r++) { - if (env->sau.rlar[r] & 1) { - uint32_t base = env->sau.rbar[r] & ~0x1f; - uint32_t limit = env->sau.rlar[r] | 0x1f; - - if (base <= address && limit >= address) { - if (base > addr_page_base || limit < addr_page_limit) { - sattrs->subpage = true; - } - if (sattrs->srvalid) { - /* If we hit in more than one region then we must report - * as Secure, not NS-Callable, with no valid region - * number info. - */ - sattrs->ns = false; - sattrs->nsc = false; - sattrs->sregion = 0; - sattrs->srvalid = false; - break; - } else { - if (env->sau.rlar[r] & 2) { - sattrs->nsc = true; - } else { - sattrs->ns = true; - } - sattrs->srvalid = true; - sattrs->sregion = r; - } - } else { - /* - * Address not in this region. We must check whether the - * region covers addresses in the same page as our address. - * In that case we must not report a size that covers the - * whole page for a subsequent hit against a different MPU - * region or the background region, because it would result - * in incorrect TLB hits for subsequent accesses to - * addresses that are in this MPU region. - */ - if (limit >= base && - ranges_overlap(base, limit - base + 1, - addr_page_base, - TARGET_PAGE_SIZE)) { - sattrs->subpage = true; - } - } - } - } - break; - } - - /* - * The IDAU will override the SAU lookup results if it specifies - * higher security than the SAU does. - */ - if (!idau_ns) { - if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { - sattrs->ns = false; - sattrs->nsc = idau_nsc; - } - } -} - -bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, bool *is_subpage, - ARMMMUFaultInfo *fi, uint32_t *mregion) -{ - /* Perform a PMSAv8 MPU lookup (without also doing the SAU check - * that a full phys-to-virt translation does). - * mregion is (if not NULL) set to the region number which matched, - * or -1 if no region number is returned (MPU off, address did not - * hit a region, address hit in multiple regions). - * We set is_subpage to true if the region hit doesn't cover the - * entire TARGET_PAGE the address is within. - */ - ARMCPU *cpu = env_archcpu(env); - bool is_user = regime_is_user(env, mmu_idx); - uint32_t secure = regime_is_secure(env, mmu_idx); - int n; - int matchregion = -1; - bool hit = false; - uint32_t addr_page_base = address & TARGET_PAGE_MASK; - uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); - - *is_subpage = false; - *phys_ptr = address; - *prot = 0; - if (mregion) { - *mregion = -1; - } - - /* Unlike the ARM ARM pseudocode, we don't need to check whether this - * was an exception vector read from the vector table (which is always - * done using the default system address map), because those accesses - * are done in arm_v7m_load_vector(), which always does a direct - * read using address_space_ldl(), rather than going via this function. - */ - if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ - hit = true; - } else if (m_is_ppb_region(env, address)) { - hit = true; - } else { - if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { - hit = true; - } - - for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { - /* region search */ - /* Note that the base address is bits [31:5] from the register - * with bits [4:0] all zeroes, but the limit address is bits - * [31:5] from the register with bits [4:0] all ones. - */ - uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; - uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; - - if (!(env->pmsav8.rlar[secure][n] & 0x1)) { - /* Region disabled */ - continue; - } - - if (address < base || address > limit) { - /* - * Address not in this region. We must check whether the - * region covers addresses in the same page as our address. - * In that case we must not report a size that covers the - * whole page for a subsequent hit against a different MPU - * region or the background region, because it would result in - * incorrect TLB hits for subsequent accesses to addresses that - * are in this MPU region. - */ - if (limit >= base && - ranges_overlap(base, limit - base + 1, - addr_page_base, - TARGET_PAGE_SIZE)) { - *is_subpage = true; - } - continue; - } - - if (base > addr_page_base || limit < addr_page_limit) { - *is_subpage = true; - } - - if (matchregion != -1) { - /* Multiple regions match -- always a failure (unlike - * PMSAv7 where highest-numbered-region wins) - */ - fi->type = ARMFault_Permission; - fi->level = 1; - return true; - } - - matchregion = n; - hit = true; - } - } - - if (!hit) { - /* background fault */ - fi->type = ARMFault_Background; - return true; - } - - if (matchregion == -1) { - /* hit using the background region */ - get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); - } else { - uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); - uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); - bool pxn = false; - - if (arm_feature(env, ARM_FEATURE_V8_1M)) { - pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); - } - - if (m_is_system_region(env, address)) { - /* System space is always execute never */ - xn = 1; - } - - *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); - if (*prot && !xn && !(pxn && !is_user)) { - *prot |= PAGE_EXEC; - } - /* We don't need to look the attribute up in the MAIR0/MAIR1 - * registers because that only tells us about cacheability. - */ - if (mregion) { - *mregion = matchregion; - } - } - - fi->type = ARMFault_Permission; - fi->level = 1; - return !(*prot & (1 << access_type)); -} - - -static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, target_ulong *page_size, - ARMMMUFaultInfo *fi) -{ - uint32_t secure = regime_is_secure(env, mmu_idx); - V8M_SAttributes sattrs = {}; - bool ret; - bool mpu_is_subpage; - - if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { - v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); - if (access_type == MMU_INST_FETCH) { - /* Instruction fetches always use the MMU bank and the - * transaction attribute determined by the fetch address, - * regardless of CPU state. This is painful for QEMU - * to handle, because it would mean we need to encode - * into the mmu_idx not just the (user, negpri) information - * for the current security state but also that for the - * other security state, which would balloon the number - * of mmu_idx values needed alarmingly. - * Fortunately we can avoid this because it's not actually - * possible to arbitrarily execute code from memory with - * the wrong security attribute: it will always generate - * an exception of some kind or another, apart from the - * special case of an NS CPU executing an SG instruction - * in S&NSC memory. So we always just fail the translation - * here and sort things out in the exception handler - * (including possibly emulating an SG instruction). - */ - if (sattrs.ns != !secure) { - if (sattrs.nsc) { - fi->type = ARMFault_QEMU_NSCExec; - } else { - fi->type = ARMFault_QEMU_SFault; - } - *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; - *phys_ptr = address; - *prot = 0; - return true; - } - } else { - /* For data accesses we always use the MMU bank indicated - * by the current CPU state, but the security attributes - * might downgrade a secure access to nonsecure. - */ - if (sattrs.ns) { - txattrs->secure = false; - } else if (!secure) { - /* NS access to S memory must fault. - * Architecturally we should first check whether the - * MPU information for this address indicates that we - * are doing an unaligned access to Device memory, which - * should generate a UsageFault instead. QEMU does not - * currently check for that kind of unaligned access though. - * If we added it we would need to do so as a special case - * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). - */ - fi->type = ARMFault_QEMU_SFault; - *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; - *phys_ptr = address; - *prot = 0; - return true; - } - } - } - - ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, - txattrs, prot, &mpu_is_subpage, fi, NULL); - *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; - return ret; -} - -static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, - ARMMMUFaultInfo *fi) -{ - int n; - uint32_t mask; - uint32_t base; - bool is_user = regime_is_user(env, mmu_idx); - - if (regime_translation_disabled(env, mmu_idx)) { - /* MPU disabled. */ - *phys_ptr = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return false; - } - - *phys_ptr = address; - for (n = 7; n >= 0; n--) { - base = env->cp15.c6_region[n]; - if ((base & 1) == 0) { - continue; - } - mask = 1 << ((base >> 1) & 0x1f); - /* Keep this shift separate from the above to avoid an - (undefined) << 32. */ - mask = (mask << 1) - 1; - if (((base ^ address) & ~mask) == 0) { - break; - } - } - if (n < 0) { - fi->type = ARMFault_Background; - return true; - } - - if (access_type == MMU_INST_FETCH) { - mask = env->cp15.pmsav5_insn_ap; - } else { - mask = env->cp15.pmsav5_data_ap; - } - mask = (mask >> (n * 4)) & 0xf; - switch (mask) { - case 0: - fi->type = ARMFault_Permission; - fi->level = 1; - return true; - case 1: - if (is_user) { - fi->type = ARMFault_Permission; - fi->level = 1; - return true; - } - *prot = PAGE_READ | PAGE_WRITE; - break; - case 2: - *prot = PAGE_READ; - if (!is_user) { - *prot |= PAGE_WRITE; - } - break; - case 3: - *prot = PAGE_READ | PAGE_WRITE; - break; - case 5: - if (is_user) { - fi->type = ARMFault_Permission; - fi->level = 1; - return true; - } - *prot = PAGE_READ; - break; - case 6: - *prot = PAGE_READ; - break; - default: - /* Bad permission. */ - fi->type = ARMFault_Permission; - fi->level = 1; - return true; - } - *prot |= PAGE_EXEC; - return false; -} - -/* Combine either inner or outer cacheability attributes for normal - * memory, according to table D4-42 and pseudocode procedure - * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). - * - * NB: only stage 1 includes allocation hints (RW bits), leading to - * some asymmetry. - */ -static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) -{ - if (s1 == 4 || s2 == 4) { - /* non-cacheable has precedence */ - return 4; - } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { - /* stage 1 write-through takes precedence */ - return s1; - } else if (extract32(s2, 2, 2) == 2) { - /* stage 2 write-through takes precedence, but the allocation hint - * is still taken from stage 1 - */ - return (2 << 2) | extract32(s1, 0, 2); - } else { /* write-back */ - return s1; - } -} - -/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 - * and CombineS1S2Desc() - * - * @s1: Attributes from stage 1 walk - * @s2: Attributes from stage 2 walk - */ -static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) -{ - uint8_t s1lo, s2lo, s1hi, s2hi; - ARMCacheAttrs ret; - bool tagged = false; - - if (s1.attrs == 0xf0) { - tagged = true; - s1.attrs = 0xff; - } - - s1lo = extract32(s1.attrs, 0, 4); - s2lo = extract32(s2.attrs, 0, 4); - s1hi = extract32(s1.attrs, 4, 4); - s2hi = extract32(s2.attrs, 4, 4); - - /* Combine shareability attributes (table D4-43) */ - if (s1.shareability == 2 || s2.shareability == 2) { - /* if either are outer-shareable, the result is outer-shareable */ - ret.shareability = 2; - } else if (s1.shareability == 3 || s2.shareability == 3) { - /* if either are inner-shareable, the result is inner-shareable */ - ret.shareability = 3; - } else { - /* both non-shareable */ - ret.shareability = 0; - } - - /* Combine memory type and cacheability attributes */ - if (s1hi == 0 || s2hi == 0) { - /* Device has precedence over normal */ - if (s1lo == 0 || s2lo == 0) { - /* nGnRnE has precedence over anything */ - ret.attrs = 0; - } else if (s1lo == 4 || s2lo == 4) { - /* non-Reordering has precedence over Reordering */ - ret.attrs = 4; /* nGnRE */ - } else if (s1lo == 8 || s2lo == 8) { - /* non-Gathering has precedence over Gathering */ - ret.attrs = 8; /* nGRE */ - } else { - ret.attrs = 0xc; /* GRE */ - } - - /* Any location for which the resultant memory type is any - * type of Device memory is always treated as Outer Shareable. - */ - ret.shareability = 2; - } else { /* Normal memory */ - /* Outer/inner cacheability combine independently */ - ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 - | combine_cacheattr_nibble(s1lo, s2lo); - - if (ret.attrs == 0x44) { - /* Any location for which the resultant memory type is Normal - * Inner Non-cacheable, Outer Non-cacheable is always treated - * as Outer Shareable. - */ - ret.shareability = 2; - } - } - - /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ - if (tagged && ret.attrs == 0xff) { - ret.attrs = 0xf0; - } - - return ret; -} - - -/* get_phys_addr - get the physical address for this virtual address - * - * Find the physical address corresponding to the given virtual address, - * by doing a translation table walk on MMU based systems or using the - * MPU state on MPU based systems. - * - * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, - * prot and page_size may not be filled in, and the populated fsr value provides - * information on why the translation aborted, in the format of a - * DFSR/IFSR fault register, with the following caveats: - * * we honour the short vs long DFSR format differences. - * * the WnR bit is never set (the caller must do this). - * * for PSMAv5 based systems we don't bother to return a full FSR format - * value. - * - * @env: CPUARMState - * @address: virtual address to get physical address for - * @access_type: 0 for read, 1 for write, 2 for execute - * @mmu_idx: MMU index indicating required translation regime - * @phys_ptr: set to the physical address corresponding to the virtual address - * @attrs: set to the memory transaction attributes to use - * @prot: set to the permissions for the page containing phys_ptr - * @page_size: set to the size of the page containing phys_ptr - * @fi: set to fault info if the translation fails - * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes - */ -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) -{ - ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx); - - if (mmu_idx != s1_mmu_idx) { - /* Call ourselves recursively to do the stage 1 and then stage 2 - * translations if mmu_idx is a two-stage regime. - */ - if (arm_feature(env, ARM_FEATURE_EL2)) { - hwaddr ipa; - int s2_prot; - int ret; - ARMCacheAttrs cacheattrs2 = {}; - ARMMMUIdx s2_mmu_idx; - bool is_el0; - - ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa, - attrs, prot, page_size, fi, cacheattrs); - - /* If S1 fails or S2 is disabled, return early. */ - if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { - *phys_ptr = ipa; - return ret; - } - - s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; - is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0; - - /* S1 is done. Now do S2 translation. */ - ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0, - phys_ptr, attrs, &s2_prot, - page_size, fi, &cacheattrs2); - fi->s2addr = ipa; - /* Combine the S1 and S2 perms. */ - *prot &= s2_prot; - - /* If S2 fails, return early. */ - if (ret) { - return ret; - } - - /* Combine the S1 and S2 cache attributes. */ - if (arm_hcr_el2_eff(env) & HCR_DC) { - /* - * HCR.DC forces the first stage attributes to - * Normal Non-Shareable, - * Inner Write-Back Read-Allocate Write-Allocate, - * Outer Write-Back Read-Allocate Write-Allocate. - * Do not overwrite Tagged within attrs. - */ - if (cacheattrs->attrs != 0xf0) { - cacheattrs->attrs = 0xff; - } - cacheattrs->shareability = 0; - } - *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); - - /* Check if IPA translates to secure or non-secure PA space. */ - if (arm_is_secure_below_el3(env)) { - if (attrs->secure) { - attrs->secure = - !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)); - } else { - attrs->secure = - !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW)) - || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA)); - } - } - return 0; - } else { - /* - * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. - */ - mmu_idx = stage_1_mmu_idx(mmu_idx); - } - } - - /* The page table entries may downgrade secure to non-secure, but - * cannot upgrade an non-secure translation regime's attributes - * to secure. - */ - attrs->secure = regime_is_secure(env, mmu_idx); - attrs->user = regime_is_user(env, mmu_idx); - - /* Fast Context Switch Extension. This doesn't exist at all in v8. - * In v7 and earlier it affects all stage 1 translations. - */ - if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 - && !arm_feature(env, ARM_FEATURE_V8)) { - if (regime_el(env, mmu_idx) == 3) { - address += env->cp15.fcseidr_s; - } else { - address += env->cp15.fcseidr_ns; - } - } - - if (arm_feature(env, ARM_FEATURE_PMSA)) { - bool ret; - *page_size = TARGET_PAGE_SIZE; - - if (arm_feature(env, ARM_FEATURE_V8)) { - /* PMSAv8 */ - ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, - phys_ptr, attrs, prot, page_size, fi); - } else if (arm_feature(env, ARM_FEATURE_V7)) { - /* PMSAv7 */ - ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, - phys_ptr, prot, page_size, fi); - } else { - /* Pre-v7 MPU */ - ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, - phys_ptr, prot, fi); - } - qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 - " mmu_idx %u -> %s (prot %c%c%c)\n", - access_type == MMU_DATA_LOAD ? "reading" : - (access_type == MMU_DATA_STORE ? "writing" : "execute"), - (uint32_t)address, mmu_idx, - ret ? "Miss" : "Hit", - *prot & PAGE_READ ? 'r' : '-', - *prot & PAGE_WRITE ? 'w' : '-', - *prot & PAGE_EXEC ? 'x' : '-'); - - return ret; - } - - /* Definitely a real MMU, not an MPU */ - - if (regime_translation_disabled(env, mmu_idx)) { - uint64_t hcr; - uint8_t memattr; - - /* - * MMU disabled. S1 addresses within aa64 translation regimes are - * still checked for bounds -- see AArch64.TranslateAddressS1Off. - */ - if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { - int r_el = regime_el(env, mmu_idx); - if (arm_el_is_aa64(env, r_el)) { - int pamax = arm_pamax(env_archcpu(env)); - uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; - int addrtop, tbi; - - tbi = aa64_va_parameter_tbi(tcr, mmu_idx); - if (access_type == MMU_INST_FETCH) { - tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); - } - tbi = (tbi >> extract64(address, 55, 1)) & 1; - addrtop = (tbi ? 55 : 63); - - if (extract64(address, pamax, addrtop - pamax + 1) != 0) { - fi->type = ARMFault_AddressSize; - fi->level = 0; - fi->stage2 = false; - return 1; - } - - /* - * When TBI is disabled, we've just validated that all of the - * bits above PAMax are zero, so logically we only need to - * clear the top byte for TBI. But it's clearer to follow - * the pseudocode set of addrdesc.paddress. - */ - address = extract64(address, 0, 52); - } - } - *phys_ptr = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - *page_size = TARGET_PAGE_SIZE; - - /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ - hcr = arm_hcr_el2_eff(env); - cacheattrs->shareability = 0; - if (hcr & HCR_DC) { - if (hcr & HCR_DCT) { - memattr = 0xf0; /* Tagged, Normal, WB, RWA */ - } else { - memattr = 0xff; /* Normal, WB, RWA */ - } - } else if (access_type == MMU_INST_FETCH) { - if (regime_sctlr(env, mmu_idx) & SCTLR_I) { - memattr = 0xee; /* Normal, WT, RA, NT */ - } else { - memattr = 0x44; /* Normal, NC, No */ - } - cacheattrs->shareability = 2; /* outer sharable */ - } else { - memattr = 0x00; /* Device, nGnRnE */ - } - cacheattrs->attrs = memattr; - return 0; - } - - if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, address, access_type, mmu_idx, false, - phys_ptr, attrs, prot, page_size, - fi, cacheattrs); - } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { - return get_phys_addr_v6(env, address, access_type, mmu_idx, - phys_ptr, attrs, prot, page_size, fi); - } else { - return get_phys_addr_v5(env, address, access_type, mmu_idx, - phys_ptr, prot, page_size, fi); - } -} - -hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, - MemTxAttrs *attrs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - hwaddr phys_addr; - target_ulong page_size; - int prot; - bool ret; - ARMMMUFaultInfo fi = {}; - ARMMMUIdx mmu_idx = arm_mmu_idx(env); - ARMCacheAttrs cacheattrs = {}; - - *attrs = (MemTxAttrs) {}; - - ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr, - attrs, &prot, &page_size, &fi, &cacheattrs); - - if (ret) { - return -1; - } - return phys_addr; -} - -#endif - /* Note that signed overflow is undefined in C. The following routines are careful to use unsigned types where modulo arithmetic is required. Failure to do so _will_ break on newer gcc. */ @@ -13130,6 +10876,8 @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) int fp_exception_el(CPUARMState *env, int cur_el) { #ifndef CONFIG_USER_ONLY + uint64_t hcr_el2; + /* CPACR and the CPTR registers don't exist before v6, so FP is * always accessible */ @@ -13153,37 +10901,34 @@ int fp_exception_el(CPUARMState *env, int cur_el) return 0; } + hcr_el2 = arm_hcr_el2_eff(env); + /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: * 0, 2 : trap EL0 and EL1/PL1 accesses * 1 : trap only EL0 accesses * 3 : trap no accesses * This register is ignored if E2H+TGE are both set. */ - if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { - int fpen = extract32(env->cp15.cpacr_el1, 20, 2); + if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); switch (fpen) { + case 1: + if (cur_el != 0) { + break; + } + /* fall through */ case 0: case 2: - if (cur_el == 0 || cur_el == 1) { - /* Trap to PL1, which might be EL1 or EL3 */ - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { - return 3; - } - return 1; - } - if (cur_el == 3 && !is_a64(env)) { - /* Secure PL1 running at EL3 */ + /* Trap from Secure PL0 or PL1 to Secure PL1. */ + if (!arm_el_is_aa64(env, 3) + && (cur_el == 3 || arm_is_secure_below_el3(env))) { return 3; } - break; - case 1: - if (cur_el == 0) { + if (cur_el <= 1) { return 1; } break; - case 3: - break; } } @@ -13200,19 +10945,31 @@ int fp_exception_el(CPUARMState *env, int cur_el) } } - /* For the CPTR registers we don't need to guard with an ARM_FEATURE - * check because zero bits in the registers mean "don't trap". + /* + * CPTR_EL2 is present in v7VE or v8, and changes format + * with HCR_EL2.E2H (regardless of TGE). */ - - /* CPTR_EL2 : present in v7VE or v8 */ - if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) - && arm_is_el2_enabled(env)) { - /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ - return 2; + if (cur_el <= 2) { + if (hcr_el2 & HCR_E2H) { + switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { + case 1: + if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) { + break; + } + /* fall through */ + case 0: + case 2: + return 2; + } + } else if (arm_is_el2_enabled(env)) { + if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { + return 2; + } + } } /* CPTR_EL3 : present in v8 */ - if (extract32(env->cp15.cptr_el[3], 10, 1)) { + if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { /* Trap all FP ops to EL3 */ return 3; } @@ -13230,22 +10987,15 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE20_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: return 1; case ARMMMUIdx_E2: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE2: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return 2; - case ARMMMUIdx_SE3: + case ARMMMUIdx_E3: return 3; default: g_assert_not_reached(); @@ -13259,6 +11009,15 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) } #endif +static bool arm_pan_enabled(CPUARMState *env) +{ + if (is_a64(env)) { + return env->pstate & PSTATE_PAN; + } else { + return env->uncached_cpsr & CPSR_PAN; + } +} + ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) { ARMMMUIdx idx; @@ -13279,7 +11038,7 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) } break; case 1: - if (env->pstate & PSTATE_PAN) { + if (arm_pan_enabled(env)) { idx = ARMMMUIdx_E10_1_PAN; } else { idx = ARMMMUIdx_E10_1; @@ -13288,7 +11047,7 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) case 2: /* Note that TGE does not apply at EL2. */ if (arm_hcr_el2_eff(env) & HCR_E2H) { - if (env->pstate & PSTATE_PAN) { + if (arm_pan_enabled(env)) { idx = ARMMMUIdx_E20_2_PAN; } else { idx = ARMMMUIdx_E20_2; @@ -13298,15 +11057,11 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) } break; case 3: - return ARMMMUIdx_SE3; + return ARMMMUIdx_E3; default: g_assert_not_reached(); } - if (arm_is_secure_below_el3(env)) { - idx &= ~ARM_MMU_IDX_A_NS; - } - return idx; } @@ -13315,13 +11070,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env) return arm_mmu_idx_el(env, arm_current_el(env)); } -#ifndef CONFIG_USER_ONLY -ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) -{ - return stage_1_mmu_idx(arm_mmu_idx(env)); -} -#endif - static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, CPUARMTBFlags flags) @@ -13378,21 +11126,17 @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, DP_TBFLAG_M32(flags, STACKCHECK, 1); } + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) { + DP_TBFLAG_M32(flags, SECURE, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } -static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env) -{ - CPUARMTBFlags flags = {}; - - DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env)); - return flags; -} - static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx) { - CPUARMTBFlags flags = rebuild_hflags_aprofile(env); + CPUARMTBFlags flags = {}; int el = arm_current_el(env); if (arm_sctlr(env, el) & SCTLR_A) { @@ -13408,15 +11152,33 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); } + if (env->uncached_cpsr & CPSR_IL) { + DP_TBFLAG_ANY(flags, PSTATE__IL, 1); + } + + /* + * The SME exception we are testing for is raised via + * AArch64.CheckFPAdvSIMDEnabled(), as called from + * AArch32.CheckAdvSIMDOrFPEnabled(). + */ + if (el == 0 + && FIELD_EX64(env->svcr, SVCR, SM) + && (!arm_is_el2_enabled(env) + || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE))) + && arm_el_is_aa64(env, 1) + && !sme_fa64(env, el)) { + DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, ARMMMUIdx mmu_idx) { - CPUARMTBFlags flags = rebuild_hflags_aprofile(env); + CPUARMTBFlags flags = {}; ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); uint64_t sctlr; int tbii, tbid; @@ -13431,19 +11193,41 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { int sve_el = sve_exception_el(env, el); - uint32_t zcr_len; /* - * If SVE is disabled, but FP is enabled, - * then the effective len is 0. + * If either FP or SVE are disabled, translator does not need len. + * If SVE EL > FP EL, FP exception has precedence, and translator + * does not need SVE EL. Save potential re-translations by forcing + * the unneeded data to zero. */ - if (sve_el != 0 && fp_el == 0) { - zcr_len = 0; - } else { - zcr_len = sve_zcr_len_for_el(env, el); + if (fp_el != 0) { + if (sve_el > fp_el) { + sve_el = 0; + } + } else if (sve_el == 0) { + DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el)); } DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el); - DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len); + } + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + int sme_el = sme_exception_el(env, el); + bool sm = FIELD_EX64(env->svcr, SVCR, SM); + + DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el); + if (sme_el == 0) { + /* Similarly, do not compute SVL if SME is disabled. */ + int svl = sve_vqm1_for_el_sm(env, el, true); + DP_TBFLAG_A64(flags, SVL, svl); + if (sm) { + /* If SVE is disabled, we will not have set VL above. */ + DP_TBFLAG_A64(flags, VL, svl); + } + } + if (sm) { + DP_TBFLAG_A64(flags, PSTATE_SM, 1); + DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); + } + DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); } sctlr = regime_sctlr(env, stage1); @@ -13480,15 +11264,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: /* TODO: ARMv8.3-NV */ DP_TBFLAG_A64(flags, UNPRIV, 1); break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: /* * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is * gated by HCR_EL2. == '11', and so is LDTR. @@ -13502,6 +11282,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, } } + if (env->pstate & PSTATE_IL) { + DP_TBFLAG_ANY(flags, PSTATE__IL, 1); + } + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { /* * Set MTE_ACTIVE if any access may be Checked, and leave clear @@ -13619,6 +11403,35 @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env) #endif } +static bool mve_no_pred(CPUARMState *env) +{ + /* + * Return true if there is definitely no predication of MVE + * instructions by VPR or LTPSIZE. (Returning false even if there + * isn't any predication is OK; generated code will just be + * a little worse.) + * If the CPU does not implement MVE then this TB flag is always 0. + * + * NOTE: if you change this logic, the "recalculate s->mve_no_pred" + * logic in gen_update_fp_context() needs to be updated to match. + * + * We do not include the effect of the ECI bits here -- they are + * tracked in other TB flags. This simplifies the logic for + * "when did we emit code that changes the MVE_NO_PRED TB flag + * and thus need to end the TB?". + */ + if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { + return false; + } + if (env->v7m.vpr) { + return false; + } + if (env->v7m.ltpsize < 4) { + return false; + } + return true; +} + void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { @@ -13658,6 +11471,10 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { DP_TBFLAG_M32(flags, LSPACT, 1); } + + if (mve_no_pred(env)) { + DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); + } } else { /* * Note that XSCALE_CPAR shares bits with VECSTRIDE. @@ -13736,6 +11553,21 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) } } +static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm) +{ + int exc_el; + + if (sm) { + exc_el = sme_exception_el(env, el); + } else { + exc_el = sve_exception_el(env, el); + } + if (exc_el) { + return 0; /* disabled */ + } + return sve_vqm1_for_el_sm(env, el, sm); +} + /* * Notice a change in SVE vector size when changing EL. */ @@ -13744,7 +11576,7 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, { ARMCPU *cpu = env_archcpu(env); int old_len, new_len; - bool old_a64, new_a64; + bool old_a64, new_a64, sm; /* Nothing to do if no SVE. */ if (!cpu_isar_feature(aa64_sve, cpu)) { @@ -13756,6 +11588,20 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, return; } + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; + + /* + * Both AArch64.TakeException and AArch64.ExceptionReturn + * invoke ResetSVEState when taking an exception from, or + * returning to, AArch32 state when PSTATE.SM is enabled. + */ + sm = FIELD_EX64(env->svcr, SVCR, SM); + if (old_a64 != new_a64 && sm) { + arm_reset_sve_state(env); + return; + } + /* * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped * at ELx, or not available because the EL is in AArch32 state, then @@ -13768,12 +11614,13 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, * we already have the correct register contents when encountering the * vq0->vq0 transition between EL0->EL1. */ - old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; - old_len = (old_a64 && !sve_exception_el(env, old_el) - ? sve_zcr_len_for_el(env, old_el) : 0); - new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; - new_len = (new_a64 && !sve_exception_el(env, new_el) - ? sve_zcr_len_for_el(env, new_el) : 0); + old_len = new_len = 0; + if (old_a64) { + old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm); + } + if (new_a64) { + new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm); + } /* When changing vector length, clear inaccessible state. */ if (new_len < old_len) { diff --git a/target/arm/helper.h b/target/arm/helper.h index 248569b0cd..92f36d9dbb 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -6,8 +6,8 @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32) DEF_HELPER_3(sub_saturate, i32, env, i32, i32) DEF_HELPER_3(add_usaturate, i32, env, i32, i32) DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) -DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32) -DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32) +DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) #define PAS_OP(pfx) \ @@ -44,15 +44,19 @@ DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) -DEF_HELPER_2(exception_internal, void, env, i32) -DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32) -DEF_HELPER_2(exception_bkpt_insn, void, env, i32) +DEF_HELPER_2(exception_internal, noreturn, env, i32) +DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32) +DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32) +DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32) +DEF_HELPER_2(exception_swstep, noreturn, env, i32) +DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl) DEF_HELPER_1(setend, void, env) DEF_HELPER_2(wfi, void, env, i32) DEF_HELPER_1(wfe, void, env) DEF_HELPER_1(yield, void, env) DEF_HELPER_1(pre_hvc, void, env) DEF_HELPER_2(pre_smc, void, env, i32) +DEF_HELPER_1(vesb, void, env) DEF_HELPER_3(cpsr_write, void, env, i32, i32) DEF_HELPER_2(cpsr_write_eret, void, env, i32) @@ -73,6 +77,8 @@ DEF_HELPER_2(v7m_vlldm, void, env, i32) DEF_HELPER_2(v8m_stackcheck, void, env, i32) +DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32) + DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) DEF_HELPER_2(get_cp_reg, i32, env, ptr) @@ -1013,9 +1019,28 @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" +#include "helper-sme.h" #endif #include "helper-mve.h" diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c new file mode 100644 index 0000000000..060aa0ccf4 --- /dev/null +++ b/target/arm/hvf/hvf.c @@ -0,0 +1,1349 @@ +/* + * QEMU Hypervisor.framework support for Apple Silicon + + * Copyright 2020 Alexander Graf + * Copyright 2020 Google LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" + +#include "sysemu/runstate.h" +#include "sysemu/hvf.h" +#include "sysemu/hvf_int.h" +#include "sysemu/hw_accel.h" +#include "hvf_arm.h" +#include "cpregs.h" + +#include + +#include "exec/address-spaces.h" +#include "hw/irq.h" +#include "qemu/main-loop.h" +#include "sysemu/cpus.h" +#include "arm-powerctl.h" +#include "target/arm/cpu.h" +#include "target/arm/internals.h" +#include "trace/trace-target_arm_hvf.h" +#include "migration/vmstate.h" + +#define HVF_SYSREG(crn, crm, op0, op1, op2) \ + ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) +#define PL1_WRITE_MASK 0x4 + +#define SYSREG_OP0_SHIFT 20 +#define SYSREG_OP0_MASK 0x3 +#define SYSREG_OP0(sysreg) ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK) +#define SYSREG_OP1_SHIFT 14 +#define SYSREG_OP1_MASK 0x7 +#define SYSREG_OP1(sysreg) ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK) +#define SYSREG_CRN_SHIFT 10 +#define SYSREG_CRN_MASK 0xf +#define SYSREG_CRN(sysreg) ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK) +#define SYSREG_CRM_SHIFT 1 +#define SYSREG_CRM_MASK 0xf +#define SYSREG_CRM(sysreg) ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK) +#define SYSREG_OP2_SHIFT 17 +#define SYSREG_OP2_MASK 0x7 +#define SYSREG_OP2(sysreg) ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK) + +#define SYSREG(op0, op1, crn, crm, op2) \ + ((op0 << SYSREG_OP0_SHIFT) | \ + (op1 << SYSREG_OP1_SHIFT) | \ + (crn << SYSREG_CRN_SHIFT) | \ + (crm << SYSREG_CRM_SHIFT) | \ + (op2 << SYSREG_OP2_SHIFT)) +#define SYSREG_MASK \ + SYSREG(SYSREG_OP0_MASK, \ + SYSREG_OP1_MASK, \ + SYSREG_CRN_MASK, \ + SYSREG_CRM_MASK, \ + SYSREG_OP2_MASK) +#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4) +#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) +#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) +#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) +#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) +#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) +#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) +#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2) +#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2) +#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3) +#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4) +#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5) +#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6) +#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7) +#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0) +#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7) + +#define WFX_IS_WFE (1 << 0) + +#define TMR_CTL_ENABLE (1 << 0) +#define TMR_CTL_IMASK (1 << 1) +#define TMR_CTL_ISTATUS (1 << 2) + +static void hvf_wfi(CPUState *cpu); + +typedef struct HVFVTimer { + /* Vtimer value during migration and paused state */ + uint64_t vtimer_val; +} HVFVTimer; + +static HVFVTimer vtimer; + +typedef struct ARMHostCPUFeatures { + ARMISARegisters isar; + uint64_t features; + uint64_t midr; + uint32_t reset_sctlr; + const char *dtb_compatible; +} ARMHostCPUFeatures; + +static ARMHostCPUFeatures arm_host_cpu_features; + +struct hvf_reg_match { + int reg; + uint64_t offset; +}; + +static const struct hvf_reg_match hvf_reg_match[] = { + { HV_REG_X0, offsetof(CPUARMState, xregs[0]) }, + { HV_REG_X1, offsetof(CPUARMState, xregs[1]) }, + { HV_REG_X2, offsetof(CPUARMState, xregs[2]) }, + { HV_REG_X3, offsetof(CPUARMState, xregs[3]) }, + { HV_REG_X4, offsetof(CPUARMState, xregs[4]) }, + { HV_REG_X5, offsetof(CPUARMState, xregs[5]) }, + { HV_REG_X6, offsetof(CPUARMState, xregs[6]) }, + { HV_REG_X7, offsetof(CPUARMState, xregs[7]) }, + { HV_REG_X8, offsetof(CPUARMState, xregs[8]) }, + { HV_REG_X9, offsetof(CPUARMState, xregs[9]) }, + { HV_REG_X10, offsetof(CPUARMState, xregs[10]) }, + { HV_REG_X11, offsetof(CPUARMState, xregs[11]) }, + { HV_REG_X12, offsetof(CPUARMState, xregs[12]) }, + { HV_REG_X13, offsetof(CPUARMState, xregs[13]) }, + { HV_REG_X14, offsetof(CPUARMState, xregs[14]) }, + { HV_REG_X15, offsetof(CPUARMState, xregs[15]) }, + { HV_REG_X16, offsetof(CPUARMState, xregs[16]) }, + { HV_REG_X17, offsetof(CPUARMState, xregs[17]) }, + { HV_REG_X18, offsetof(CPUARMState, xregs[18]) }, + { HV_REG_X19, offsetof(CPUARMState, xregs[19]) }, + { HV_REG_X20, offsetof(CPUARMState, xregs[20]) }, + { HV_REG_X21, offsetof(CPUARMState, xregs[21]) }, + { HV_REG_X22, offsetof(CPUARMState, xregs[22]) }, + { HV_REG_X23, offsetof(CPUARMState, xregs[23]) }, + { HV_REG_X24, offsetof(CPUARMState, xregs[24]) }, + { HV_REG_X25, offsetof(CPUARMState, xregs[25]) }, + { HV_REG_X26, offsetof(CPUARMState, xregs[26]) }, + { HV_REG_X27, offsetof(CPUARMState, xregs[27]) }, + { HV_REG_X28, offsetof(CPUARMState, xregs[28]) }, + { HV_REG_X29, offsetof(CPUARMState, xregs[29]) }, + { HV_REG_X30, offsetof(CPUARMState, xregs[30]) }, + { HV_REG_PC, offsetof(CPUARMState, pc) }, +}; + +static const struct hvf_reg_match hvf_fpreg_match[] = { + { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) }, + { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) }, + { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) }, + { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) }, + { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) }, + { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) }, + { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) }, + { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) }, + { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) }, + { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) }, + { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) }, + { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) }, + { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) }, + { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) }, + { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) }, + { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) }, + { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) }, + { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) }, + { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) }, + { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) }, + { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) }, + { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) }, + { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) }, + { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) }, + { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) }, + { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) }, + { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) }, + { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) }, + { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) }, + { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) }, + { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) }, + { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) }, +}; + +struct hvf_sreg_match { + int reg; + uint32_t key; + uint32_t cp_idx; +}; + +static struct hvf_sreg_match hvf_sreg_match[] = { + { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) }, + + { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) }, + { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) }, + { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) }, + { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) }, + +#ifdef SYNC_NO_RAW_REGS + /* + * The registers below are manually synced on init because they are + * marked as NO_RAW. We still list them to make number space sync easier. + */ + { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) }, + { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) }, + { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) }, + { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) }, +#endif + { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) }, + { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) }, + { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) }, + { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) }, + { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) }, +#ifdef SYNC_NO_MMFR0 + /* We keep the hardware MMFR0 around. HW limits are there anyway */ + { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) }, +#endif + { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) }, + { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) }, + + { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) }, + { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) }, + { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) }, + { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) }, + { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) }, + { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) }, + + { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) }, + { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) }, + { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) }, + { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) }, + { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) }, + { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) }, + { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) }, + { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) }, + { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) }, + { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) }, + + { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) }, + { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) }, + { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) }, + { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) }, + { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) }, + { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) }, + { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) }, + { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) }, + { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) }, + { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) }, + { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) }, + { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) }, + { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) }, + { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) }, + { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) }, + { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) }, + { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) }, + { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) }, + { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) }, + { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) }, +}; + +int hvf_get_registers(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + hv_return_t ret; + uint64_t val; + hv_simd_fp_uchar16_t fpval; + int i; + + for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { + ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); + *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; + assert_hvf_ok(ret); + } + + for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { + ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + &fpval); + memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); + assert_hvf_ok(ret); + } + + val = 0; + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); + assert_hvf_ok(ret); + vfp_set_fpcr(env, val); + + val = 0; + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); + assert_hvf_ok(ret); + vfp_set_fpsr(env, val); + + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); + assert_hvf_ok(ret); + pstate_write(env, val); + + for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { + if (hvf_sreg_match[i].cp_idx == -1) { + continue; + } + + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); + assert_hvf_ok(ret); + + arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; + } + assert(write_list_to_cpustate(arm_cpu)); + + aarch64_restore_sp(env, arm_current_el(env)); + + return 0; +} + +int hvf_put_registers(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + hv_return_t ret; + uint64_t val; + hv_simd_fp_uchar16_t fpval; + int i; + + for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { + val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); + ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); + assert_hvf_ok(ret); + } + + for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { + memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); + ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + fpval); + assert_hvf_ok(ret); + } + + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); + assert_hvf_ok(ret); + + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); + assert_hvf_ok(ret); + + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); + assert_hvf_ok(ret); + + aarch64_save_sp(env, arm_current_el(env)); + + assert(write_cpustate_to_list(arm_cpu, false)); + for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { + if (hvf_sreg_match[i].cp_idx == -1) { + continue; + } + + val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); + assert_hvf_ok(ret); + } + + ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); + assert_hvf_ok(ret); + + return 0; +} + +static void flush_cpu_state(CPUState *cpu) +{ + if (cpu->vcpu_dirty) { + hvf_put_registers(cpu); + cpu->vcpu_dirty = false; + } +} + +static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) +{ + hv_return_t r; + + flush_cpu_state(cpu); + + if (rt < 31) { + r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); + assert_hvf_ok(r); + } +} + +static uint64_t hvf_get_reg(CPUState *cpu, int rt) +{ + uint64_t val = 0; + hv_return_t r; + + flush_cpu_state(cpu); + + if (rt < 31) { + r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); + assert_hvf_ok(r); + } + + return val; +} + +static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) +{ + ARMISARegisters host_isar = {}; + const struct isar_regs { + int reg; + uint64_t *val; + } regs[] = { + { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, + { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, + { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, + { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, + { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, + { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, + { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, + { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, + { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, + }; + hv_vcpu_t fd; + hv_return_t r = HV_SUCCESS; + hv_vcpu_exit_t *exit; + int i; + + ahcf->dtb_compatible = "arm,arm-v8"; + ahcf->features = (1ULL << ARM_FEATURE_V8) | + (1ULL << ARM_FEATURE_NEON) | + (1ULL << ARM_FEATURE_AARCH64) | + (1ULL << ARM_FEATURE_PMU) | + (1ULL << ARM_FEATURE_GENERIC_TIMER); + + /* We set up a small vcpu to extract host registers */ + + if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) { + return false; + } + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val); + } + r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr); + r |= hv_vcpu_destroy(fd); + + ahcf->isar = host_isar; + + /* + * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1 + * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97 + */ + ahcf->reset_sctlr = 0x30100180; + /* + * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility, + * let's disable it on boot and then allow guest software to turn it on by + * setting it to 0. + */ + ahcf->reset_sctlr |= 0x00800000; + + /* Make sure we don't advertise AArch32 support for EL0/EL1 */ + if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) { + return false; + } + + return r == HV_SUCCESS; +} + +void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu) +{ + if (!arm_host_cpu_features.dtb_compatible) { + if (!hvf_enabled() || + !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) { + /* + * We can't report this error yet, so flag that we need to + * in arm_cpu_realizefn(). + */ + cpu->host_cpu_probe_failed = true; + return; + } + } + + cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; + cpu->isar = arm_host_cpu_features.isar; + cpu->env.features = arm_host_cpu_features.features; + cpu->midr = arm_host_cpu_features.midr; + cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr; +} + +void hvf_arch_vcpu_destroy(CPUState *cpu) +{ +} + +int hvf_arch_init_vcpu(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match); + uint32_t sregs_cnt = 0; + uint64_t pfr; + hv_return_t ret; + int i; + + env->aarch64 = true; + asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz)); + + /* Allocate enough space for our sysreg sync */ + arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes, + sregs_match_len); + arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values, + sregs_match_len); + arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t, + arm_cpu->cpreg_vmstate_indexes, + sregs_match_len); + arm_cpu->cpreg_vmstate_values = g_renew(uint64_t, + arm_cpu->cpreg_vmstate_values, + sregs_match_len); + + memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t)); + + /* Populate cp list for all known sysregs */ + for (i = 0; i < sregs_match_len; i++) { + const ARMCPRegInfo *ri; + uint32_t key = hvf_sreg_match[i].key; + + ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key); + if (ri) { + assert(!(ri->type & ARM_CP_NO_RAW)); + hvf_sreg_match[i].cp_idx = sregs_cnt; + arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key); + } else { + hvf_sreg_match[i].cp_idx = -1; + } + } + arm_cpu->cpreg_array_len = sregs_cnt; + arm_cpu->cpreg_vmstate_array_len = sregs_cnt; + + assert(write_cpustate_to_list(arm_cpu, false)); + + /* Set CP_NO_RAW system registers on init */ + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, + arm_cpu->midr); + assert_hvf_ok(ret); + + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, + arm_cpu->mp_affinity); + assert_hvf_ok(ret); + + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); + assert_hvf_ok(ret); + pfr |= env->gicv3state ? (1 << 24) : 0; + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); + assert_hvf_ok(ret); + + /* We're limited to underlying hardware caps, override internal versions */ + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, + &arm_cpu->isar.id_aa64mmfr0); + assert_hvf_ok(ret); + + return 0; +} + +void hvf_kick_vcpu_thread(CPUState *cpu) +{ + cpus_kick_thread(cpu); + hv_vcpus_exit(&cpu->hvf->fd, 1); +} + +static void hvf_raise_exception(CPUState *cpu, uint32_t excp, + uint32_t syndrome) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + + cpu->exception_index = excp; + env->exception.target_el = 1; + env->exception.syndrome = syndrome; + + arm_cpu_do_interrupt(cpu); +} + +static void hvf_psci_cpu_off(ARMCPU *arm_cpu) +{ + int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity); + assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); +} + +/* + * Handle a PSCI call. + * + * Returns 0 on success + * -1 when the PSCI call is unknown, + */ +static bool hvf_handle_psci_call(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + uint64_t param[4] = { + env->xregs[0], + env->xregs[1], + env->xregs[2], + env->xregs[3] + }; + uint64_t context_id, mpidr; + bool target_aarch64 = true; + CPUState *target_cpu_state; + ARMCPU *target_cpu; + target_ulong entry; + int target_el = 1; + int32_t ret = 0; + + trace_hvf_psci_call(param[0], param[1], param[2], param[3], + arm_cpu->mp_affinity); + + switch (param[0]) { + case QEMU_PSCI_0_2_FN_PSCI_VERSION: + ret = QEMU_PSCI_VERSION_1_1; + break; + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: + ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ + break; + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: + mpidr = param[1]; + + switch (param[2]) { + case 0: + target_cpu_state = arm_get_cpu_by_id(mpidr); + if (!target_cpu_state) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + target_cpu = ARM_CPU(target_cpu_state); + + ret = target_cpu->power_state; + break; + default: + /* Everything above affinity level 0 is always on. */ + ret = 0; + } + break; + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + /* + * QEMU reset and shutdown are async requests, but PSCI + * mandates that we never return from the reset/shutdown + * call, so power the CPU off now so it doesn't execute + * anything further. + */ + hvf_psci_cpu_off(arm_cpu); + break; + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + hvf_psci_cpu_off(arm_cpu); + break; + case QEMU_PSCI_0_1_FN_CPU_ON: + case QEMU_PSCI_0_2_FN_CPU_ON: + case QEMU_PSCI_0_2_FN64_CPU_ON: + mpidr = param[1]; + entry = param[2]; + context_id = param[3]; + ret = arm_set_cpu_on(mpidr, entry, context_id, + target_el, target_aarch64); + break; + case QEMU_PSCI_0_1_FN_CPU_OFF: + case QEMU_PSCI_0_2_FN_CPU_OFF: + hvf_psci_cpu_off(arm_cpu); + break; + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + /* Affinity levels are not supported in QEMU */ + if (param[1] & 0xfffe0000) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + /* Powerdown is not supported, we always go into WFI */ + env->xregs[0] = 0; + hvf_wfi(cpu); + break; + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: + ret = QEMU_PSCI_RET_NOT_SUPPORTED; + break; + case QEMU_PSCI_1_0_FN_PSCI_FEATURES: + switch (param[1]) { + case QEMU_PSCI_0_2_FN_PSCI_VERSION: + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: + case QEMU_PSCI_0_1_FN_CPU_ON: + case QEMU_PSCI_0_2_FN_CPU_ON: + case QEMU_PSCI_0_2_FN64_CPU_ON: + case QEMU_PSCI_0_1_FN_CPU_OFF: + case QEMU_PSCI_0_2_FN_CPU_OFF: + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + case QEMU_PSCI_1_0_FN_PSCI_FEATURES: + ret = 0; + break; + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: + default: + ret = QEMU_PSCI_RET_NOT_SUPPORTED; + } + break; + default: + return false; + } + + env->xregs[0] = ret; + return true; +} + +static bool is_id_sysreg(uint32_t reg) +{ + return SYSREG_OP0(reg) == 3 && + SYSREG_OP1(reg) == 0 && + SYSREG_CRN(reg) == 0 && + SYSREG_CRM(reg) >= 1 && + SYSREG_CRM(reg) < 8; +} + +static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + uint64_t val = 0; + + switch (reg) { + case SYSREG_CNTPCT_EL0: + val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / + gt_cntfrq_period_ns(arm_cpu); + break; + case SYSREG_PMCR_EL0: + val = env->cp15.c9_pmcr; + break; + case SYSREG_PMCCNTR_EL0: + pmu_op_start(env); + val = env->cp15.c15_ccnt; + pmu_op_finish(env); + break; + case SYSREG_PMCNTENCLR_EL0: + val = env->cp15.c9_pmcnten; + break; + case SYSREG_PMOVSCLR_EL0: + val = env->cp15.c9_pmovsr; + break; + case SYSREG_PMSELR_EL0: + val = env->cp15.c9_pmselr; + break; + case SYSREG_PMINTENCLR_EL1: + val = env->cp15.c9_pminten; + break; + case SYSREG_PMCCFILTR_EL0: + val = env->cp15.pmccfiltr_el0; + break; + case SYSREG_PMCNTENSET_EL0: + val = env->cp15.c9_pmcnten; + break; + case SYSREG_PMUSERENR_EL0: + val = env->cp15.c9_pmuserenr; + break; + case SYSREG_PMCEID0_EL0: + case SYSREG_PMCEID1_EL0: + /* We can't really count anything yet, declare all events invalid */ + val = 0; + break; + case SYSREG_OSLSR_EL1: + val = env->cp15.oslsr_el1; + break; + case SYSREG_OSDLR_EL1: + /* Dummy register */ + break; + default: + if (is_id_sysreg(reg)) { + /* ID system registers read as RES0 */ + val = 0; + break; + } + cpu_synchronize_state(cpu); + trace_hvf_unhandled_sysreg_read(env->pc, reg, + SYSREG_OP0(reg), + SYSREG_OP1(reg), + SYSREG_CRN(reg), + SYSREG_CRM(reg), + SYSREG_OP2(reg)); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + return 1; + } + + trace_hvf_sysreg_read(reg, + SYSREG_OP0(reg), + SYSREG_OP1(reg), + SYSREG_CRN(reg), + SYSREG_CRM(reg), + SYSREG_OP2(reg), + val); + hvf_set_reg(cpu, rt, val); + + return 0; +} + +static void pmu_update_irq(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && + (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); +} + +static bool pmu_event_supported(uint16_t number) +{ + return false; +} + +/* Returns true if the counter (pass 31 for PMCCNTR) should count events using + * the current EL, security state, and register configuration. + */ +static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) +{ + uint64_t filter; + bool enabled, filtered = true; + int el = arm_current_el(env); + + enabled = (env->cp15.c9_pmcr & PMCRE) && + (env->cp15.c9_pmcnten & (1 << counter)); + + if (counter == 31) { + filter = env->cp15.pmccfiltr_el0; + } else { + filter = env->cp15.c14_pmevtyper[counter]; + } + + if (el == 0) { + filtered = filter & PMXEVTYPER_U; + } else if (el == 1) { + filtered = filter & PMXEVTYPER_P; + } + + if (counter != 31) { + /* + * If not checking PMCCNTR, ensure the counter is setup to an event we + * support + */ + uint16_t event = filter & PMXEVTYPER_EVTCOUNT; + if (!pmu_event_supported(event)) { + return false; + } + } + + return enabled && !filtered; +} + +static void pmswinc_write(CPUARMState *env, uint64_t value) +{ + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + /* Increment a counter's count iff: */ + if ((value & (1 << i)) && /* counter's bit is set */ + /* counter is enabled and not filtered */ + pmu_counter_enabled(env, i) && + /* counter is SW_INCR */ + (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { + /* + * Detect if this write causes an overflow since we can't predict + * PMSWINC overflows like we can for other events + */ + uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { + env->cp15.c9_pmovsr |= (1 << i); + pmu_update_irq(env); + } + + env->cp15.c14_pmevcntr[i] = new_pmswinc; + } + } +} + +static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + + trace_hvf_sysreg_write(reg, + SYSREG_OP0(reg), + SYSREG_OP1(reg), + SYSREG_CRN(reg), + SYSREG_CRM(reg), + SYSREG_OP2(reg), + val); + + switch (reg) { + case SYSREG_PMCCNTR_EL0: + pmu_op_start(env); + env->cp15.c15_ccnt = val; + pmu_op_finish(env); + break; + case SYSREG_PMCR_EL0: + pmu_op_start(env); + + if (val & PMCRC) { + /* The counter has been reset */ + env->cp15.c15_ccnt = 0; + } + + if (val & PMCRP) { + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + env->cp15.c14_pmevcntr[i] = 0; + } + } + + env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; + env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK); + + pmu_op_finish(env); + break; + case SYSREG_PMUSERENR_EL0: + env->cp15.c9_pmuserenr = val & 0xf; + break; + case SYSREG_PMCNTENSET_EL0: + env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env)); + break; + case SYSREG_PMCNTENCLR_EL0: + env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env)); + break; + case SYSREG_PMINTENCLR_EL1: + pmu_op_start(env); + env->cp15.c9_pminten |= val; + pmu_op_finish(env); + break; + case SYSREG_PMOVSCLR_EL0: + pmu_op_start(env); + env->cp15.c9_pmovsr &= ~val; + pmu_op_finish(env); + break; + case SYSREG_PMSWINC_EL0: + pmu_op_start(env); + pmswinc_write(env, val); + pmu_op_finish(env); + break; + case SYSREG_PMSELR_EL0: + env->cp15.c9_pmselr = val & 0x1f; + break; + case SYSREG_PMCCFILTR_EL0: + pmu_op_start(env); + env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0; + pmu_op_finish(env); + break; + case SYSREG_OSLAR_EL1: + env->cp15.oslsr_el1 = val & 1; + break; + case SYSREG_OSDLR_EL1: + /* Dummy register */ + break; + default: + cpu_synchronize_state(cpu); + trace_hvf_unhandled_sysreg_write(env->pc, reg, + SYSREG_OP0(reg), + SYSREG_OP1(reg), + SYSREG_CRN(reg), + SYSREG_CRM(reg), + SYSREG_OP2(reg)); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + return 1; + } + + return 0; +} + +static int hvf_inject_interrupts(CPUState *cpu) +{ + if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { + trace_hvf_inject_fiq(); + hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, + true); + } + + if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { + trace_hvf_inject_irq(); + hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, + true); + } + + return 0; +} + +static uint64_t hvf_vtimer_val_raw(void) +{ + /* + * mach_absolute_time() returns the vtimer value without the VM + * offset that we define. Add our own offset on top. + */ + return mach_absolute_time() - hvf_state->vtimer_offset; +} + +static uint64_t hvf_vtimer_val(void) +{ + if (!runstate_is_running()) { + /* VM is paused, the vtimer value is in vtimer.vtimer_val */ + return vtimer.vtimer_val; + } + + return hvf_vtimer_val_raw(); +} + +static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) +{ + /* + * Use pselect to sleep so that other threads can IPI us while we're + * sleeping. + */ + qatomic_mb_set(&cpu->thread_kicked, false); + qemu_mutex_unlock_iothread(); + pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); + qemu_mutex_lock_iothread(); +} + +static void hvf_wfi(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + struct timespec ts; + hv_return_t r; + uint64_t ctl; + uint64_t cval; + int64_t ticks_to_sleep; + uint64_t seconds; + uint64_t nanos; + uint32_t cntfrq; + + if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { + /* Interrupt pending, no need to wait */ + return; + } + + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + assert_hvf_ok(r); + + if (!(ctl & 1) || (ctl & 2)) { + /* Timer disabled or masked, just wait for an IPI. */ + hvf_wait_for_ipi(cpu, NULL); + return; + } + + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); + assert_hvf_ok(r); + + ticks_to_sleep = cval - hvf_vtimer_val(); + if (ticks_to_sleep < 0) { + return; + } + + cntfrq = gt_cntfrq_period_ns(arm_cpu); + seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND); + ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq); + nanos = ticks_to_sleep * cntfrq; + + /* + * Don't sleep for less than the time a context switch would take, + * so that we can satisfy fast timer requests on the same CPU. + * Measurements on M1 show the sweet spot to be ~2ms. + */ + if (!seconds && nanos < (2 * SCALE_MS)) { + return; + } + + ts = (struct timespec) { seconds, nanos }; + hvf_wait_for_ipi(cpu, &ts); +} + +static void hvf_sync_vtimer(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + hv_return_t r; + uint64_t ctl; + bool irq_state; + + if (!cpu->hvf->vtimer_masked) { + /* We will get notified on vtimer changes by hvf, nothing to do */ + return; + } + + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + assert_hvf_ok(r); + + irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == + (TMR_CTL_ENABLE | TMR_CTL_ISTATUS); + qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state); + + if (!irq_state) { + /* Timer no longer asserting, we can unmask it */ + hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); + cpu->hvf->vtimer_masked = false; + } +} + +int hvf_vcpu_exec(CPUState *cpu) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; + hv_return_t r; + bool advance_pc = false; + + if (hvf_inject_interrupts(cpu)) { + return EXCP_INTERRUPT; + } + + if (cpu->halted) { + return EXCP_HLT; + } + + flush_cpu_state(cpu); + + qemu_mutex_unlock_iothread(); + assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); + + /* handle VMEXIT */ + uint64_t exit_reason = hvf_exit->reason; + uint64_t syndrome = hvf_exit->exception.syndrome; + uint32_t ec = syn_get_ec(syndrome); + + qemu_mutex_lock_iothread(); + switch (exit_reason) { + case HV_EXIT_REASON_EXCEPTION: + /* This is the main one, handle below. */ + break; + case HV_EXIT_REASON_VTIMER_ACTIVATED: + qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); + cpu->hvf->vtimer_masked = true; + return 0; + case HV_EXIT_REASON_CANCELED: + /* we got kicked, no exit to process */ + return 0; + default: + g_assert_not_reached(); + } + + hvf_sync_vtimer(cpu); + + switch (ec) { + case EC_DATAABORT: { + bool isv = syndrome & ARM_EL_ISV; + bool iswrite = (syndrome >> 6) & 1; + bool s1ptw = (syndrome >> 7) & 1; + uint32_t sas = (syndrome >> 22) & 3; + uint32_t len = 1 << sas; + uint32_t srt = (syndrome >> 16) & 0x1f; + uint32_t cm = (syndrome >> 8) & 0x1; + uint64_t val = 0; + + trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, + hvf_exit->exception.physical_address, isv, + iswrite, s1ptw, len, srt); + + if (cm) { + /* We don't cache MMIO regions */ + advance_pc = true; + break; + } + + assert(isv); + + if (iswrite) { + val = hvf_get_reg(cpu, srt); + address_space_write(&address_space_memory, + hvf_exit->exception.physical_address, + MEMTXATTRS_UNSPECIFIED, &val, len); + } else { + address_space_read(&address_space_memory, + hvf_exit->exception.physical_address, + MEMTXATTRS_UNSPECIFIED, &val, len); + hvf_set_reg(cpu, srt, val); + } + + advance_pc = true; + break; + } + case EC_SYSTEMREGISTERTRAP: { + bool isread = (syndrome >> 0) & 1; + uint32_t rt = (syndrome >> 5) & 0x1f; + uint32_t reg = syndrome & SYSREG_MASK; + uint64_t val; + int ret = 0; + + if (isread) { + ret = hvf_sysreg_read(cpu, reg, rt); + } else { + val = hvf_get_reg(cpu, rt); + ret = hvf_sysreg_write(cpu, reg, val); + } + + advance_pc = !ret; + break; + } + case EC_WFX_TRAP: + advance_pc = true; + if (!(syndrome & WFX_IS_WFE)) { + hvf_wfi(cpu); + } + break; + case EC_AA64_HVC: + cpu_synchronize_state(cpu); + if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) { + if (!hvf_handle_psci_call(cpu)) { + trace_hvf_unknown_hvc(env->xregs[0]); + /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ + env->xregs[0] = -1; + } + } else { + trace_hvf_unknown_hvc(env->xregs[0]); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + } + break; + case EC_AA64_SMC: + cpu_synchronize_state(cpu); + if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) { + advance_pc = true; + + if (!hvf_handle_psci_call(cpu)) { + trace_hvf_unknown_smc(env->xregs[0]); + /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ + env->xregs[0] = -1; + } + } else { + trace_hvf_unknown_smc(env->xregs[0]); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + } + break; + default: + cpu_synchronize_state(cpu); + trace_hvf_exit(syndrome, ec, env->pc); + error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec); + } + + if (advance_pc) { + uint64_t pc; + + flush_cpu_state(cpu); + + r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); + assert_hvf_ok(r); + pc += 4; + r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); + assert_hvf_ok(r); + } + + return 0; +} + +static const VMStateDescription vmstate_hvf_vtimer = { + .name = "hvf-vtimer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(vtimer_val, HVFVTimer), + VMSTATE_END_OF_LIST() + }, +}; + +static void hvf_vm_state_change(void *opaque, bool running, RunState state) +{ + HVFVTimer *s = opaque; + + if (running) { + /* Update vtimer offset on all CPUs */ + hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val; + cpu_synchronize_all_states(); + } else { + /* Remember vtimer value on every pause */ + s->vtimer_val = hvf_vtimer_val_raw(); + } +} + +int hvf_arch_init(void) +{ + hvf_state->vtimer_offset = mach_absolute_time(); + vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer); + qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer); + return 0; +} diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build new file mode 100644 index 0000000000..855e6cce5a --- /dev/null +++ b/target/arm/hvf/meson.build @@ -0,0 +1,3 @@ +arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( + 'hvf.c', +)) diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events new file mode 100644 index 0000000000..820e8e0297 --- /dev/null +++ b/target/arm/hvf/trace-events @@ -0,0 +1,11 @@ +hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" +hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" +hvf_inject_fiq(void) "injecting FIQ" +hvf_inject_irq(void) "injecting IRQ" +hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" +hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64 +hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")" +hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64 +hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64 +hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]" +hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x" diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h new file mode 100644 index 0000000000..9a9d1a0bf5 --- /dev/null +++ b/target/arm/hvf_arm.h @@ -0,0 +1,18 @@ +/* + * QEMU Hypervisor.framework (HVF) support -- ARM specifics + * + * Copyright (c) 2021 Alexander Graf + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_HVF_ARM_H +#define QEMU_HVF_ARM_H + +#include "cpu.h" + +void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu); + +#endif diff --git a/target/arm/internals.h b/target/arm/internals.h index cd2ea8a388..161e42d50f 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -81,6 +81,18 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ */ #define FNC_RETURN_MIN_MAGIC 0xfefffffe +/* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ +FIELD(DBGWCR, E, 0, 1) +FIELD(DBGWCR, PAC, 1, 2) +FIELD(DBGWCR, LSC, 3, 2) +FIELD(DBGWCR, BAS, 5, 8) +FIELD(DBGWCR, HMC, 13, 1) +FIELD(DBGWCR, SSC, 14, 2) +FIELD(DBGWCR, LBN, 16, 4) +FIELD(DBGWCR, WT, 20, 1) +FIELD(DBGWCR, MASK, 24, 5) +FIELD(DBGWCR, SSCE, 29, 1) + /* We use a few fake FSR values for internal purposes in M profile. * M profile cores don't have A/R format FSRs, but currently our * get_phys_addr() code assumes A/R profile and reports failures via @@ -102,13 +114,13 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ * and target exception level. This should be called from helper functions, * and never returns because we will longjump back up to the CPU main loop. */ -void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el); +G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el); /* * Similarly, but also use unwinding to restore cpu state. */ -void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, +G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el, uintptr_t ra); @@ -173,21 +185,14 @@ static inline int r14_bank_number(int mode) void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); +void arm_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); + #ifdef CONFIG_TCG void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); #endif /* CONFIG_TCG */ -/** - * aarch64_sve_zcr_get_valid_len: - * @cpu: cpu context - * @start_len: maximum len to consider - * - * Return the maximum supported sve vector length <= @start_len. - * Note that both @start_len and the return value are in units - * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128. - */ -uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len); - enum arm_fprounding { FPROUNDING_TIEEVEN, FPROUNDING_POSINF, @@ -243,24 +248,7 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) * Returns the implementation defined bit-width of physical addresses. * The ARMv8 reference manuals refer to this as PAMax(). */ -static inline unsigned int arm_pamax(ARMCPU *cpu) -{ - static const unsigned int pamax_map[] = { - [0] = 32, - [1] = 36, - [2] = 40, - [3] = 42, - [4] = 44, - [5] = 48, - }; - unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); - - /* id_aa64mmfr0 is a read-only register so values outside of the - * supported mappings can be considered an implementation error. */ - assert(parange < ARRAY_SIZE(pamax_map)); - return pamax_map[parange]; -} +unsigned int arm_pamax(ARMCPU *cpu); /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, @@ -268,9 +256,9 @@ static inline unsigned int arm_pamax(ARMCPU *cpu) */ static inline bool extended_addresses_enabled(CPUARMState *env) { - TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; return arm_el_is_aa64(env, 1) || - (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); + (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); } /* Update a QEMU watchpoint based on the information the guest has set in the @@ -354,6 +342,7 @@ typedef enum ARMFaultType { ARMFault_AsyncExternal, ARMFault_Debug, ARMFault_TLBConflict, + ARMFault_UnsuppAtomicUpdate, ARMFault_Lockdown, ARMFault_Exclusive, ARMFault_ICacheMaint, @@ -479,28 +468,51 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) case ARMFault_None: return 0; case ARMFault_AddressSize: - fsc = fi->level & 3; + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b101001; + } else { + fsc = fi->level; + } break; case ARMFault_AccessFlag: - fsc = (fi->level & 3) | (0x2 << 2); + assert(fi->level >= 0 && fi->level <= 3); + fsc = 0b001000 | fi->level; break; case ARMFault_Permission: - fsc = (fi->level & 3) | (0x3 << 2); + assert(fi->level >= 0 && fi->level <= 3); + fsc = 0b001100 | fi->level; break; case ARMFault_Translation: - fsc = (fi->level & 3) | (0x1 << 2); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b101011; + } else { + fsc = 0b000100 | fi->level; + } break; case ARMFault_SyncExternal: fsc = 0x10 | (fi->ea << 12); break; case ARMFault_SyncExternalOnWalk: - fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b010011; + } else { + fsc = 0b010100 | fi->level; + } + fsc |= fi->ea << 12; break; case ARMFault_SyncParity: fsc = 0x18; break; case ARMFault_SyncParityOnWalk: - fsc = (fi->level & 3) | (0x7 << 2); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b011011; + } else { + fsc = 0b011100 | fi->level; + } break; case ARMFault_AsyncParity: fsc = 0x19; @@ -517,6 +529,9 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) case ARMFault_TLBConflict: fsc = 0x30; break; + case ARMFault_UnsuppAtomicUpdate: + fsc = 0x31; + break; case ARMFault_Lockdown: fsc = 0x34; break; @@ -544,9 +559,17 @@ static inline bool arm_extabort_type(MemTxResult result) return result != MEMTX_DECODE_ERROR; } +#ifdef CONFIG_USER_ONLY +void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); +#else bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) { @@ -587,14 +610,19 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); -/* Return true if the stage 1 translation regime is using LPAE format page - * tables */ +/* Return true if the translation regime is using LPAE format page tables */ +bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); + +/* + * Return true if the stage 1 translation regime is using LPAE + * format page tables + */ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); /* Raise a data fault alignment exception for the specified virtual address */ -void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); /* arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort @@ -629,112 +657,53 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return true; default: return false; } } -/* Return true if this address translation regime is secure */ -static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_E1: - case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_E2: - case ARMMMUIdx_Stage2: - case ARMMMUIdx_MPrivNegPri: - case ARMMMUIdx_MUserNegPri: - case ARMMMUIdx_MPriv: - case ARMMMUIdx_MUser: - return false; - case ARMMMUIdx_SE3: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: - case ARMMMUIdx_SE2: - case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_MSPrivNegPri: - case ARMMMUIdx_MSUserNegPri: - case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSUser: - return true; - default: - g_assert_not_reached(); - } -} - static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_2_PAN: return true; default: return false; } } +static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) +{ + return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; +} + /* Return the exception level which controls this address translation regime */ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_Stage2: case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_SE2: case ARMMMUIdx_E2: return 2; - case ARMMMUIdx_SE3: + case ARMMMUIdx_E3: return 3; - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_Stage1_SE0: - return arm_el_is_aa64(env, 3) ? 1 : 3; - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: + case ARMMMUIdx_E10_0: case ARMMMUIdx_Stage1_E0: + return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: - case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_MPrivNegPri: @@ -751,45 +720,61 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) } } -/* Return the TCR controlling this translation regime */ -static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) +static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_MUser: + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MSUserNegPri: + return true; + default: + return false; + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + g_assert_not_reached(); + } +} + +/* Return the SCTLR value which controls this address translation regime */ +static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; +} + +/* + * These are the fields in VTCR_EL2 which affect both the Secure stage 2 + * and the Non-Secure stage 2 translation regimes (and hence which are + * not present in VSTCR_EL2). + */ +#define VTCR_SHARED_FIELD_MASK \ + (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ + R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ + R_VTCR_DS_MASK) + +/* Return the value of the TCR controlling this translation regime */ +static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) { if (mmu_idx == ARMMMUIdx_Stage2) { - return &env->cp15.vtcr_el2; + return env->cp15.vtcr_el2; } if (mmu_idx == ARMMMUIdx_Stage2_S) { /* - * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but - * those are not currently used by QEMU, so just return VSTCR_EL2. + * Secure stage 2 shares fields from VTCR_EL2. We merge those + * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format + * value so the callers don't need to special case this. + * + * If a future architecture change defines bits in VSTCR_EL2 that + * overlap with these VTCR_EL2 fields we may need to revisit this. */ - return &env->cp15.vstcr_el2; - } - return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; -} - -/* Return the FSR value for a debug exception (watchpoint, hardware - * breakpoint or BKPT insn) targeting the specified exception level. - */ -static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) -{ - ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; - int target_el = arm_debug_target_el(env); - bool using_lpae = false; - - if (target_el == 2 || arm_el_is_aa64(env, target_el)) { - using_lpae = true; - } else { - if (arm_feature(env, ARM_FEATURE_LPAE) && - (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { - using_lpae = true; - } - } - - if (using_lpae) { - return arm_fi_to_lfsc(&fi); - } else { - return arm_fi_to_sfsc(&fi); + uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; + v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; + return v; } + return env->cp15.tcr_el[regime_el(env, mmu_idx)]; } /** @@ -921,6 +906,14 @@ void arm_cpu_update_virq(ARMCPU *cpu); */ void arm_cpu_update_vfiq(ARMCPU *cpu); +/** + * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit + * + * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, + * following a change to the HCR_EL2.VSE bit. + */ +void arm_cpu_update_vserr(ARMCPU *cpu); + /** * arm_mmu_idx_el: * @env: The cpu environment @@ -945,11 +938,16 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env); * Return the ARMMMUIdx for the stage1 traversal for the current regime. */ #ifdef CONFIG_USER_ONLY +static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) +{ + return ARMMMUIdx_Stage1_E0; +} static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) { return ARMMMUIdx_Stage1_E0; } #else +ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); #endif @@ -966,9 +964,6 @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: return true; default: return false; @@ -1035,37 +1030,59 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) return valid; } +/* Granule size (i.e. page size) */ +typedef enum ARMGranuleSize { + /* Same order as TG0 encoding */ + Gran4K, + Gran64K, + Gran16K, + GranInvalid, +} ARMGranuleSize; + +/** + * arm_granule_bits: Return address size of the granule in bits + * + * Return the address size of the granule in bits. This corresponds + * to the pseudocode TGxGranuleBits(). + */ +static inline int arm_granule_bits(ARMGranuleSize gran) +{ + switch (gran) { + case Gran64K: + return 16; + case Gran16K: + return 14; + case Gran4K: + return 12; + default: + g_assert_not_reached(); + } +} + /* * Parameters of a given virtual address, as extracted from the * translation control register (TCR) for a given regime. */ typedef struct ARMVAParameters { unsigned tsz : 8; + unsigned ps : 3; + unsigned sh : 2; unsigned select : 1; bool tbi : 1; bool epd : 1; bool hpd : 1; - bool using16k : 1; - bool using64k : 1; + bool tsz_oob : 1; /* tsz has been clamped to legal range */ + bool ds : 1; + bool ha : 1; + bool hd : 1; + ARMGranuleSize gran : 2; } ARMVAParameters; ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data); -static inline int exception_target_el(CPUARMState *env) -{ - int target_el = MAX(1, arm_current_el(env)); - - /* - * No such thing as secure EL1 if EL3 is aarch32, - * so update the target EL to EL3 in this case. - */ - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { - target_el = 3; - } - - return target_el; -} +int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); +int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); /* Determine if allocation tags are available. */ static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, @@ -1076,7 +1093,7 @@ static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, && !(env->cp15.scr_el3 & SCR_ATA)) { return false; } - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { return false; @@ -1101,28 +1118,77 @@ typedef struct V8M_SAttributes { void v8m_security_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, - V8M_SAttributes *sattrs); - -bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, bool *is_subpage, - ARMMMUFaultInfo *fi, uint32_t *mregion); + bool secure, V8M_SAttributes *sattrs); /* Cacheability and shareability attributes for a memory access */ typedef struct ARMCacheAttrs { - unsigned int attrs:8; /* as in the MAIR register encoding */ + /* + * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] + * Otherwise, attrs is the same as the MAIR_EL1 8-bit format + */ + unsigned int attrs:8; unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ + bool is_s2_format:1; + bool guarded:1; /* guarded bit of the v8-64 PTE */ } ARMCacheAttrs; -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) +/* Fields that are valid upon success. */ +typedef struct GetPhysAddrResult { + CPUTLBEntryFull f; + ARMCacheAttrs cacheattrs; +} GetPhysAddrResult; + +/** + * get_phys_addr_with_secure: get the physical address for a virtual address + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index indicating required translation regime + * @is_secure: security state for the access + * @result: set on translation success. + * @fi: set to fault info if the translation fails + * + * Find the physical address corresponding to the given virtual address, + * by doing a translation table walk on MMU based systems or using the + * MPU state on MPU based systems. + * + * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, + * prot and page_size may not be filled in, and the populated fsr value provides + * information on why the translation aborted, in the format of a + * DFSR/IFSR fault register, with the following caveats: + * * we honour the short vs long DFSR format differences. + * * the WnR bit is never set (the caller must do this). + * * for PSMAv5 based systems we don't bother to return a full FSR format + * value. + */ +bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address, + MMUAccessType access_type, + ARMMMUIdx mmu_idx, bool is_secure, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); -void arm_log_exception(int idx); +/** + * get_phys_addr: get the physical address for a virtual address + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index indicating required translation regime + * @result: set on translation success. + * @fi: set to fault info if the translation fails + * + * Similarly, but use the security regime of @mmu_idx. + */ +bool get_phys_addr(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) + __attribute__((nonnull)); + +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi, uint32_t *mregion); + +void arm_log_exception(CPUState *cs); #endif /* !CONFIG_USER_ONLY */ @@ -1132,10 +1198,6 @@ void arm_log_exception(int idx); */ #define GMID_EL1_BS 6 -/* We associate one allocation tag per 16 bytes, the minimum. */ -#define LOG2_TAG_GRANULE 4 -#define TAG_GRANULE (1 << LOG2_TAG_GRANULE) - /* * SVE predicates are 1/8 the size of SVE vectors, and cannot use * the same simd_desc() encoding due to restrictions on size. @@ -1226,4 +1288,89 @@ enum MVEECIState { /* All other values reserved */ }; +/* Definitions for the PMU registers */ +#define PMCRN_MASK 0xf800 +#define PMCRN_SHIFT 11 +#define PMCRLP 0x80 +#define PMCRLC 0x40 +#define PMCRDP 0x20 +#define PMCRX 0x10 +#define PMCRD 0x8 +#define PMCRC 0x4 +#define PMCRP 0x2 +#define PMCRE 0x1 +/* + * Mask of PMCR bits writable by guest (not including WO bits like C, P, + * which can be written as 1 to trigger behaviour but which stay RAZ). + */ +#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) + +#define PMXEVTYPER_P 0x80000000 +#define PMXEVTYPER_U 0x40000000 +#define PMXEVTYPER_NSK 0x20000000 +#define PMXEVTYPER_NSU 0x10000000 +#define PMXEVTYPER_NSH 0x08000000 +#define PMXEVTYPER_M 0x04000000 +#define PMXEVTYPER_MT 0x02000000 +#define PMXEVTYPER_EVTCOUNT 0x0000ffff +#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ + PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ + PMXEVTYPER_M | PMXEVTYPER_MT | \ + PMXEVTYPER_EVTCOUNT) + +#define PMCCFILTR 0xf8000000 +#define PMCCFILTR_M PMXEVTYPER_M +#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) + +static inline uint32_t pmu_num_counters(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + + return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; +} + +/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ +static inline uint64_t pmu_counter_mask(CPUARMState *env) +{ + return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); +} + +#ifdef TARGET_AARCH64 +int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg); +int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg); +void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); +#endif + +#ifdef CONFIG_USER_ONLY +static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { } +#else +void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); +#endif + +bool el_is_in_host(CPUARMState *env, int el); + +void aa32_max_features(ARMCPU *cpu); +int exception_target_el(CPUARMState *env); +bool arm_singlestep_active(CPUARMState *env); +bool arm_generate_debug_exceptions(CPUARMState *env); + +/* Add the cpreg definitions for debug related system registers */ +void define_debug_regs(ARMCPU *cpu); + +/* Effective value of MDCR_EL2 */ +static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) +{ + return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; +} + +/* Powers of 2 for sve_vq_map et al. */ +#define SVE_VQ_POW2_MAP \ + ((1 << (1 - 1)) | (1 << (2 - 1)) | \ + (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) + #endif diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h index 580f1c1fee..faacf96fdc 100644 --- a/target/arm/kvm-consts.h +++ b/target/arm/kvm-consts.h @@ -77,6 +77,8 @@ MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE); #define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4) #define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5) +#define QEMU_PSCI_1_0_FN_PSCI_FEATURES QEMU_PSCI_0_2_FN(10) + MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND); MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF); MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON); @@ -84,18 +86,22 @@ MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE); +MISMATCH_CHECK(QEMU_PSCI_1_0_FN_PSCI_FEATURES, PSCI_1_0_FN_PSCI_FEATURES); /* PSCI v0.2 return values used by TCG emulation of PSCI */ /* No Trusted OS migration to worry about when offlining CPUs */ #define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2 -/* We implement version 0.2 only */ -#define QEMU_PSCI_0_2_RET_VERSION_0_2 2 +#define QEMU_PSCI_VERSION_0_1 0x00001 +#define QEMU_PSCI_VERSION_0_2 0x00002 +#define QEMU_PSCI_VERSION_1_0 0x10000 +#define QEMU_PSCI_VERSION_1_1 0x10001 MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP); -MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2, - (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2))); +/* We don't bother to check every possible version value */ +MISMATCH_CHECK(QEMU_PSCI_VERSION_0_2, PSCI_VERSION(0, 2)); +MISMATCH_CHECK(QEMU_PSCI_VERSION_1_1, PSCI_VERSION(1, 1)); /* PSCI return values (inclusive of all PSCI versions) */ #define QEMU_PSCI_RET_SUCCESS 0 diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c index 56a7099e6b..965a486b32 100644 --- a/target/arm/kvm-stub.c +++ b/target/arm/kvm-stub.c @@ -15,10 +15,10 @@ bool write_kvmstate_to_list(ARMCPU *cpu) { - abort(); + g_assert_not_reached(); } bool write_list_to_kvmstate(ARMCPU *cpu, int level) { - abort(); + g_assert_not_reached(); } diff --git a/target/arm/kvm.c b/target/arm/kvm.c index d8381ba224..84da49332c 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -13,7 +13,6 @@ #include -#include "qemu-common.h" #include "qemu/timer.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -70,12 +69,19 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, struct kvm_vcpu_init *init) { int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; + int max_vm_pa_size; kvmfd = qemu_open_old("/dev/kvm", O_RDWR); if (kvmfd < 0) { goto err; } - vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0); + max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE); + if (max_vm_pa_size < 0) { + max_vm_pa_size = 0; + } + do { + vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size); + } while (vmfd == -1 && errno == EINTR); if (vmfd < 0) { goto err; } @@ -274,6 +280,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + kvm_arm_init_debug(s); + return ret; } @@ -330,6 +338,7 @@ static void kvm_arm_devlistener_del(MemoryListener *listener, } static MemoryListener devlistener = { + .name = "kvm-arm", .region_add = kvm_arm_devlistener_add, .region_del = kvm_arm_devlistener_del, }; @@ -535,7 +544,7 @@ bool write_kvmstate_to_list(ARMCPU *cpu) ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); break; default: - abort(); + g_assert_not_reached(); } if (ret) { ok = false; @@ -570,7 +579,7 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) r.addr = (uintptr_t)(cpu->cpreg_values + i); break; default: - abort(); + g_assert_not_reached(); } ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); if (ret) { @@ -954,7 +963,7 @@ void kvm_arch_init_irq_routing(KVMState *s) int kvm_arch_irqchip_create(KVMState *s) { if (kvm_kernel_irqchip_split()) { - perror("-machine kernel_irqchip=split is not supported on ARM."); + error_report("-machine kernel_irqchip=split is not supported on ARM."); exit(1); } @@ -998,7 +1007,6 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, hwaddr xlat, len, doorbell_gpa; MemoryRegionSection mrs; MemoryRegion *mr; - int ret = 1; if (as == &address_space_memory) { return 0; @@ -1006,15 +1014,19 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, /* MSI doorbell address is translated by an IOMMU */ - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); + mr = address_space_translate(as, address, &xlat, &len, true, MEMTXATTRS_UNSPECIFIED); + if (!mr) { - goto unlock; + return 1; } + mrs = memory_region_find(mr, xlat, 1); + if (!mrs.mr) { - goto unlock; + return 1; } doorbell_gpa = mrs.offset_within_address_space; @@ -1025,11 +1037,7 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, trace_kvm_arm_fixup_msi_route(address, doorbell_gpa); - ret = 0; - -unlock: - rcu_read_unlock(); - return ret; + return 0; } int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, @@ -1052,3 +1060,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 59982d470d..810db33ccb 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -16,7 +16,6 @@ #include #include -#include "qemu-common.h" #include "qapi/error.h" #include "cpu.h" #include "qemu/timer.h" @@ -75,24 +74,16 @@ GArray *hw_breakpoints, *hw_watchpoints; #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) -/** - * kvm_arm_init_debug() - check for guest debug capabilities - * @cs: CPUState - * - * kvm_check_extension returns the number of debug registers we have - * or 0 if we have none. - * - */ -static void kvm_arm_init_debug(CPUState *cs) +void kvm_arm_init_debug(KVMState *s) { - have_guest_debug = kvm_check_extension(cs->kvm_state, + have_guest_debug = kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG); - max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); hw_watchpoints = g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps); - max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS); + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); hw_breakpoints = g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps); return; @@ -209,7 +200,7 @@ static int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type) { HWWatchpoint wp = { - .wcr = 1, /* E=1, enable */ + .wcr = R_DBGWCR_E_MASK, /* E=1, enable */ .wvr = addr & (~0x7ULL), .details = { .vaddr = addr, .len = len } }; @@ -222,19 +213,19 @@ static int insert_hw_watchpoint(target_ulong addr, * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state, * valid whether EL3 is implemented or not */ - wp.wcr = deposit32(wp.wcr, 1, 2, 3); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, PAC, 3); switch (type) { case GDB_WATCHPOINT_READ: - wp.wcr = deposit32(wp.wcr, 3, 2, 1); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 1); wp.details.flags = BP_MEM_READ; break; case GDB_WATCHPOINT_WRITE: - wp.wcr = deposit32(wp.wcr, 3, 2, 2); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 2); wp.details.flags = BP_MEM_WRITE; break; case GDB_WATCHPOINT_ACCESS: - wp.wcr = deposit32(wp.wcr, 3, 2, 3); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 3); wp.details.flags = BP_MEM_ACCESS; break; default: @@ -253,8 +244,8 @@ static int insert_hw_watchpoint(target_ulong addr, int bits = ctz64(len); wp.wvr &= ~((1 << bits) - 1); - wp.wcr = deposit32(wp.wcr, 24, 4, bits); - wp.wcr = deposit32(wp.wcr, 5, 8, 0xff); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, MASK, bits); + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, BAS, 0xff); } else { return -ENOBUFS; } @@ -491,6 +482,12 @@ static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) return ioctl(fd, KVM_GET_ONE_REG, &idreg); } +static bool kvm_arm_pauth_supported(void) +{ + return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && + kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); +} + bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) { /* Identify the feature bits corresponding to the host CPU, and @@ -500,8 +497,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ int fdarray[3]; bool sve_supported; + bool pmu_supported = false; uint64_t features = 0; - uint64_t t; int err; /* Old kernels may not know about the PREFERRED_TARGET ioctl: however @@ -521,6 +518,29 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ struct kvm_vcpu_init init = { .target = -1, }; + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + + /* + * Ask for Pointer Authentication if supported, so that we get + * the unsanitized field values for AA64ISAR1_EL1. + */ + if (kvm_arm_pauth_supported()) { + init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + if (kvm_arm_pmu_supported()) { + init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + pmu_supported = true; + } + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { return false; } @@ -552,6 +572,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } else { err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, + ARM64_SYS_REG(3, 0, 0, 4, 5)); err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, ARM64_SYS_REG(3, 0, 0, 5, 0)); err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, @@ -578,8 +600,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 1, 0)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, ARM64_SYS_REG(3, 0, 0, 1, 2)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, @@ -613,6 +633,12 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 3, 1)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); /* * DBGDIDR is a bit complicated because the kernel doesn't @@ -643,24 +669,24 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) dbgdidr |= (1 << 15); /* RES1 bit */ ahcf->isar.dbgdidr = dbgdidr; } - } - sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0; + if (pmu_supported) { + /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + ARM64_SYS_REG(3, 3, 9, 12, 0)); + } - /* Add feature bits that can't appear until after VCPU init. */ - if (sve_supported) { - t = ahcf->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); - ahcf->isar.id_aa64pfr0 = t; - - /* - * Before v5.1, KVM did not support SVE and did not expose - * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does - * not expose the register to "user" requests like this - * unless the host supports SVE. - */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } } kvm_arm_destroy_scratch_host_vcpu(fdarray); @@ -732,15 +758,13 @@ bool kvm_arm_steal_time_supported(void) QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); -void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) +uint32_t kvm_arm_sve_get_vls(CPUState *cs) { /* Only call this function if kvm_arm_sve_supported() returns true. */ static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; static bool probed; uint32_t vq = 0; - int i, j; - - bitmap_clear(map, 0, ARM_MAX_VQ); + int i; /* * KVM ensures all host CPUs support the same set of vector lengths. @@ -781,46 +805,24 @@ void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) if (vq > ARM_MAX_VQ) { warn_report("KVM supports vector lengths larger than " "QEMU can enable"); + vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); } } - for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) { - if (!vls[i]) { - continue; - } - for (j = 1; j <= 64; ++j) { - vq = j + i * 64; - if (vq > ARM_MAX_VQ) { - return; - } - if (vls[i] & (1UL << (j - 1))) { - set_bit(vq - 1, map); - } - } - } + return vls[0]; } static int kvm_arm_sve_set_vls(CPUState *cs) { - uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0}; + ARMCPU *cpu = ARM_CPU(cs); + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; struct kvm_one_reg reg = { .id = KVM_REG_ARM64_SVE_VLS, .addr = (uint64_t)&vls[0], }; - ARMCPU *cpu = ARM_CPU(cs); - uint32_t vq; - int i, j; assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); - for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { - if (test_bit(vq - 1, cpu->sve_vq_map)) { - i = (vq - 1) / 64; - j = (vq - 1) % 64; - vls[i] |= 1UL << j; - } - } - return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); } @@ -832,6 +834,7 @@ int kvm_arch_init_vcpu(CPUState *cs) uint64_t mpidr; ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; + uint64_t psciver; if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { @@ -847,7 +850,7 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; } if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { - cpu->psci_version = 2; + cpu->psci_version = QEMU_PSCI_VERSION_0_2; cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; } if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { @@ -865,6 +868,10 @@ int kvm_arch_init_vcpu(CPUState *cs) assert(kvm_arm_sve_supported()); cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; } + if (cpu_isar_feature(aa64_pauth, cpu)) { + cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } /* Do KVM_ARM_VCPU_INIT ioctl */ ret = kvm_arm_vcpu_init(cs); @@ -883,6 +890,17 @@ int kvm_arch_init_vcpu(CPUState *cs) } } + /* + * KVM reports the exact PSCI version it is implementing via a + * special sysreg. If it is present, use its contents to determine + * what to report to the guest in the dtb (it is the PSCI version, + * in the same 15-bits major 16-bits minor format that PSCI_VERSION + * returns). + */ + if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { + cpu->psci_version = psciver; + } + /* * When KVM is in use, PSCI is emulated in-kernel and not by qemu. * Currently KVM has its own idea about MPIDR assignment, so we @@ -894,8 +912,6 @@ int kvm_arch_init_vcpu(CPUState *cs) } cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - kvm_arm_init_debug(cs); - /* Check whether user space can specify guest syndrome value */ kvm_arm_init_serror_injection(cs); @@ -990,7 +1006,7 @@ static int kvm_arch_put_fpsimd(CPUState *cs) for (i = 0; i < 32; i++) { uint64_t *q = aa64_vfp_qreg(env, i); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint64_t fp_val[2] = { q[1], q[0] }; reg.addr = (uintptr_t)fp_val; #else @@ -1209,7 +1225,7 @@ static int kvm_arch_get_fpsimd(CPUState *cs) if (ret) { return ret; } else { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint64_t t; t = q[0], q[0] = q[1], q[1] = t; #endif diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 34f8daa377..330fbe5c72 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -18,6 +18,14 @@ #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) +/** + * kvm_arm_init_debug() - initialize guest debug capabilities + * @s: KVMState + * + * Should be called only once before using guest debug capabilities. + */ +void kvm_arm_init_debug(KVMState *s); + /** * kvm_arm_vcpu_init: * @cs: CPUState @@ -214,8 +222,6 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, */ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); -#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU - /** * ARMHostCPUFeatures: information about the host CPU (identified * by asking the host kernel) @@ -241,13 +247,12 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf); /** * kvm_arm_sve_get_vls: * @cs: CPUState - * @map: bitmap to fill in * * Get all the SVE vector lengths supported by the KVM host, setting * the bits corresponding to their length in quadwords minus one - * (vq - 1) in @map up to ARM_MAX_VQ. + * (vq - 1) up to ARM_MAX_VQ. Return the resulting map. */ -void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map); +uint32_t kvm_arm_sve_get_vls(CPUState *cs); /** * kvm_arm_set_cpu_features_from_host: @@ -441,7 +446,7 @@ static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) g_assert_not_reached(); } -static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) +static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) { g_assert_not_reached(); } @@ -525,8 +530,8 @@ static inline const char *its_class_name(void) /* KVM implementation requires this capability */ return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; } else { - /* Software emulation is not implemented yet */ - return NULL; + /* Software emulation based model */ + return "arm-gicv3-its"; } } diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 20761c9487..355cd4d60a 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -19,6 +19,7 @@ #include "qemu/bitops.h" #include "qemu/crc32c.h" #include "qemu/qemu-print.h" +#include "qemu/log.h" #include "exec/exec-all.h" #include /* For crc32 */ #include "semihosting/semihost.h" @@ -182,19 +183,14 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; - MemTxAttrs attrs = {}; MemTxResult txres; - target_ulong page_size; - hwaddr physaddr; - int prot; + GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - ARMCacheAttrs cacheattrs = {}; bool secure = mmu_idx & ARM_MMU_IDX_M_S; int exc; bool exc_secure; - if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, - &attrs, &prot, &page_size, &fi, &cacheattrs)) { + if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { if (mode == STACK_LAZYFP) { @@ -227,8 +223,8 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, } goto pend_fault; } - address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, - attrs, &txres); + address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr, + value, res.f.attrs, &txres); if (txres != MEMTX_OK) { /* BusFault trying to write the data */ if (mode == STACK_LAZYFP) { @@ -275,20 +271,15 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; - MemTxAttrs attrs = {}; MemTxResult txres; - target_ulong page_size; - hwaddr physaddr; - int prot; + GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - ARMCacheAttrs cacheattrs = {}; bool secure = mmu_idx & ARM_MMU_IDX_M_S; int exc; bool exc_secure; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, - &attrs, &prot, &page_size, &fi, &cacheattrs)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, @@ -307,8 +298,8 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, goto pend_fault; } - value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, - attrs, &txres); + value = address_space_ldl(arm_addressspace(cs, res.f.attrs), + res.f.phys_addr, res.f.attrs, &txres); if (txres != MEMTX_OK) { /* BusFault trying to read the data */ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); @@ -563,7 +554,7 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; } switch_v7m_security_state(env, dest & 1); - env->thumb = 1; + env->thumb = true; env->regs[15] = dest & ~1; arm_rebuild_hflags(env); } @@ -589,7 +580,7 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) * except that the low bit doesn't indicate Thumb/not. */ env->regs[14] = nextinst; - env->thumb = 1; + env->thumb = true; env->regs[15] = dest & ~1; return; } @@ -625,7 +616,7 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) } env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; switch_v7m_security_state(env, 0); - env->thumb = 1; + env->thumb = true; env->regs[15] = dest; arm_rebuild_hflags(env); } @@ -678,6 +669,10 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, ARMMMUIdx mmu_idx; bool exc_secure; + qemu_log_mask(CPU_LOG_INT, + "...loading from element %d of %s vector table at 0x%x\n", + exc, targets_secure ? "secure" : "non-secure", addr); + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); /* @@ -694,7 +689,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { V8M_SAttributes sattrs = {}; - v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, + targets_secure, &sattrs); if (sattrs.ns) { attrs.secure = false; } else if (!targets_secure) { @@ -718,6 +714,7 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, goto load_fail; } *pvec = vector_entry; + qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec); return true; load_fail: @@ -1930,7 +1927,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) { bool threadmode, spsel; - TCGMemOpIdx oi; + MemOpIdx oi; ARMMMUIdx mmu_idx; uint32_t *frame_sp_p; uint32_t frameptr; @@ -1947,9 +1944,9 @@ static bool do_v7m_function_return(ARMCPU *cpu) * do them as secure, so work out what MMU index that is. */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); - oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); - newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); - newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); + oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx)); + newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0); + newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0); /* Consistency checks on new IPSR */ newpsr_exc = newpsr & XPSR_EXCP; @@ -1984,7 +1981,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) return true; } -static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, +static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure, uint32_t addr, uint16_t *insn) { /* @@ -2002,15 +1999,11 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; V8M_SAttributes sattrs = {}; - MemTxAttrs attrs = {}; + GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - ARMCacheAttrs cacheattrs = {}; MemTxResult txres; - target_ulong page_size; - hwaddr physaddr; - int prot; - v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); + v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs); if (!sattrs.nsc || sattrs.ns) { /* * This must be the second half of the insn, and it straddles a @@ -2022,16 +2015,15 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, "...really SecureFault with SFSR.INVEP\n"); return false; } - if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr, - &attrs, &prot, &page_size, &fi, &cacheattrs)) { + if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) { /* the MPU lookup failed */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); return false; } - *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, - attrs, &txres); + *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs), + res.f.phys_addr, res.f.attrs, &txres); if (txres != MEMTX_OK) { env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); @@ -2054,17 +2046,12 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx, */ CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; - MemTxAttrs attrs = {}; MemTxResult txres; - target_ulong page_size; - hwaddr physaddr; - int prot; + GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - ARMCacheAttrs cacheattrs = {}; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, - &attrs, &prot, &page_size, &fi, &cacheattrs)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, @@ -2082,8 +2069,8 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx, } return false; } - value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, - attrs, &txres); + value = address_space_ldl(arm_addressspace(cs, res.f.attrs), + res.f.phys_addr, res.f.attrs, &txres); if (txres != MEMTX_OK) { /* BusFault trying to read the data */ qemu_log_mask(CPU_LOG_INT, @@ -2121,7 +2108,7 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu) /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); - if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { + if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) { return false; } @@ -2137,7 +2124,7 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu) goto gen_invep; } - if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { + if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) { return false; } @@ -2206,7 +2193,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) uint32_t lr; bool ignore_stackfaults; - arm_log_exception(cs->exception_index); + arm_log_exception(cs); /* * For exceptions we just mark as pending on the NVIC, and let that @@ -2252,6 +2239,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK; break; + case EXCP_DIVBYZERO: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK; + break; case EXCP_SWI: /* The PC already points to the next instruction. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); @@ -2262,7 +2253,13 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) * Note that for M profile we don't have a guest facing FSR, but * the env->exception.fsr will be populated by the code that * raises the fault, in the A profile short-descriptor format. + * + * Log the exception.vaddress now regardless of subtype, because + * logging below only logs it when it goes into a guest visible + * register. */ + qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n", + (uint32_t)env->exception.vaddress); switch (env->exception.fsr & 0xf) { case M_FAKE_FSR_NSC_EXEC: /* @@ -2357,7 +2354,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) "...handling as semihosting call 0x%x\n", env->regs[0]); #ifdef CONFIG_TCG - env->regs[0] = do_common_semihosting(cs); + do_common_semihosting(cs); #else g_assert_not_reached(); #endif @@ -2774,15 +2771,10 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) V8M_SAttributes sattrs = {}; uint32_t tt_resp; bool r, rw, nsr, nsrw, mrvalid; - int prot; - ARMMMUFaultInfo fi = {}; - MemTxAttrs attrs = {}; - hwaddr phys_addr; ARMMMUIdx mmu_idx; uint32_t mregion; bool targetpriv; bool targetsec = env->v7m.secure; - bool is_subpage; /* * Work out what the security state and privilege level we're @@ -2813,18 +2805,20 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) * inspecting the other MPU state. */ if (arm_current_el(env) != 0 || alt) { + GetPhysAddrResult res = {}; + ARMMMUFaultInfo fi = {}; + /* We can ignore the return value as prot is always set */ - pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, - &phys_addr, &attrs, &prot, &is_subpage, - &fi, &mregion); + pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec, + &res, &fi, &mregion); if (mregion == -1) { mrvalid = false; mregion = 0; } else { mrvalid = true; } - r = prot & PAGE_READ; - rw = prot & PAGE_WRITE; + r = res.f.prot & PAGE_READ; + rw = res.f.prot & PAGE_WRITE; } else { r = false; rw = false; @@ -2833,7 +2827,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) } if (env->v7m.secure) { - v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, + targetsec, &sattrs); nsr = sattrs.ns && r; nsrw = sattrs.ns && rw; } else { diff --git a/target/arm/machine.c b/target/arm/machine.c index 81e30de824..54c5c62433 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -167,6 +167,39 @@ static const VMStateDescription vmstate_sve = { VMSTATE_END_OF_LIST() } }; + +static const VMStateDescription vmstate_vreg = { + .name = "vreg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2), + VMSTATE_END_OF_LIST() + } +}; + +static bool za_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + + /* + * When ZA storage is disabled, its contents are discarded. + * It will be zeroed when ZA storage is re-enabled. + */ + return FIELD_EX64(cpu->env.svcr, SVCR, ZA); +} + +static const VMStateDescription vmstate_za = { + .name = "cpu/sme", + .version_id = 1, + .minimum_version_id = 1, + .needed = za_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0, + vmstate_vreg, ARMVectorReg), + VMSTATE_END_OF_LIST() + } +}; #endif /* AARCH64 */ static bool serror_needed(void *opaque) @@ -661,7 +694,7 @@ static int cpu_pre_save(void *opaque) if (kvm_enabled()) { if (!write_kvmstate_to_list(cpu)) { /* This should never fail */ - abort(); + g_assert_not_reached(); } /* @@ -672,7 +705,7 @@ static int cpu_pre_save(void *opaque) } else { if (!write_cpustate_to_list(cpu, false)) { /* This should never fail. */ - abort(); + g_assert_not_reached(); } } @@ -781,6 +814,29 @@ static int cpu_post_load(void *opaque, int version_id) hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); + /* + * TCG gen_update_fp_context() relies on the invariant that + * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension; + * forbid bogus incoming data with some other value. + */ + if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) { + if (extract32(env->v7m.fpdscr[M_REG_NS], + FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 || + extract32(env->v7m.fpdscr[M_REG_S], + FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) { + return -1; + } + } + + /* + * Misaligned thumb pc is architecturally impossible. + * We have an assert in thumb_tr_translate_insn to verify this. + * Fail an incoming migrate to avoid this assert. + */ + if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) { + return -1; + } + if (!kvm_enabled()) { pmu_op_finish(&cpu->env); } @@ -861,6 +917,7 @@ const VMStateDescription vmstate_arm_cpu = { &vmstate_m_security, #ifdef TARGET_AARCH64 &vmstate_sve, + &vmstate_za, #endif &vmstate_serror, &vmstate_irq_line_state, diff --git a/target/arm/meson.build b/target/arm/meson.build index 25a02bf276..87e911b27f 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -1,5 +1,7 @@ gen = [ decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), + decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), + decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'), decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), @@ -47,8 +49,10 @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files( 'mte_helper.c', 'pauth_helper.c', 'sve_helper.c', + 'sme_helper.c', 'translate-a64.c', 'translate-sve.c', + 'translate-sme.c', )) arm_softmmu_ss = ss.source_set() @@ -58,7 +62,10 @@ arm_softmmu_ss.add(files( 'machine.c', 'monitor.c', 'psci.c', + 'ptw.c', )) +subdir('hvf') + target_arch += {'arm': arm_ss} target_softmmu_arch += {'arm': arm_softmmu_ss} diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c index 724175210b..86b3754838 100644 --- a/target/arm/mte_helper.c +++ b/target/arm/mte_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" @@ -84,10 +85,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, uintptr_t index; if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { - /* SIGSEGV */ - arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access, - ptr_mmu_idx, false, ra); - g_assert_not_reached(); + cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, + !(flags & PAGE_VALID), ra); } /* Require both MAP_ANON and PROT_MTE for the page. */ @@ -96,20 +95,14 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, } tags = page_get_target_data(clean_ptr); - if (tags == NULL) { - size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1); - tags = page_alloc_target_data(clean_ptr, alloc_size); - assert(tags != NULL); - } index = extract32(ptr, LOG2_TAG_GRANULE + 1, TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); return tags + index; #else - uintptr_t index; - CPUIOTLBEntry *iotlbentry; + CPUTLBEntryFull *full; + MemTxAttrs attrs; int in_page, flags; - ram_addr_t ptr_ra; hwaddr ptr_paddr, tag_paddr, xlat; MemoryRegion *mr; ARMASIdx tag_asi; @@ -125,30 +118,12 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, * valid. Indicate to probe_access_flags no-fault, then assert that * we received a valid page. */ - flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx, - ra == 0, &host, ra); + flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx, + ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); - /* - * Find the iotlbentry for ptr. This *must* be present in the TLB - * because we just found the mapping. - * TODO: Perhaps there should be a cputlb helper that returns a - * matching tlb entry + iotlb entry. - */ - index = tlb_index(env, ptr_mmu_idx, ptr); -# ifdef CONFIG_DEBUG_TCG - { - CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr); - target_ulong comparator = (ptr_access == MMU_DATA_LOAD - ? entry->addr_read - : tlb_addr_write(entry)); - g_assert(tlb_hit(comparator, ptr)); - } -# endif - iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index]; - /* If the virtual page MemAttr != Tagged, access unchecked. */ - if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) { + if (full->pte_attrs != 0xf0) { return NULL; } @@ -163,6 +138,14 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, return NULL; } + /* + * Remember these values across the second lookup below, + * which may invalidate this pointer via tlb resize. + */ + ptr_paddr = full->phys_addr; + attrs = full->attrs; + full = NULL; + /* * The Normal memory access can extend to the next page. E.g. a single * 8-byte access to the last byte of a page will check only the last @@ -171,9 +154,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, */ in_page = -(ptr | TARGET_PAGE_MASK); if (unlikely(ptr_size > in_page)) { - void *ignore; - flags |= probe_access_flags(env, ptr + in_page, ptr_access, - ptr_mmu_idx, ra == 0, &ignore, ra); + flags |= probe_access_full(env, ptr + in_page, ptr_access, + ptr_mmu_idx, ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); } @@ -181,33 +163,17 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, if (unlikely(flags & TLB_WATCHPOINT)) { int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; assert(ra != 0); - cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, - iotlbentry->attrs, wp, ra); + cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); } - /* - * Find the physical address within the normal mem space. - * The memory region lookup must succeed because TLB_MMIO was - * not set in the cputlb lookup above. - */ - mr = memory_region_from_host(host, &ptr_ra); - tcg_debug_assert(mr != NULL); - tcg_debug_assert(memory_region_is_ram(mr)); - ptr_paddr = ptr_ra; - do { - ptr_paddr += mr->addr; - mr = mr->container; - } while (mr); - /* Convert to the physical address in tag space. */ tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); /* Look up the address in tag space. */ - tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; + tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; tag_as = cpu_get_address_space(env_cpu(env), tag_asi); mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, - tag_access == MMU_DATA_STORE, - iotlbentry->attrs); + tag_access == MMU_DATA_STORE, attrs); /* * Note that @mr will never be NULL. If there is nothing in the address diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 595d97568e..14a4f39802 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -26,6 +26,14 @@ # VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit %size_28 28:1 !function=plus_1 +# 2 operand fp insns have size in bit 20: 1 for 16 bit, 0 for 32 bit, +# like Neon FP insns. +%2op_fp_size 20:1 !function=neon_3same_fp_size +# VCADD is an exception, where bit 20 is 0 for 16 bit and 1 for 32 bit +%2op_fp_size_rev 20:1 !function=plus_1 +# FP scalars have size in bit 28, 1 for 16 bit, 0 for 32 bit +%2op_fp_scalar_size 28:1 !function=neon_3same_fp_size + # 1imm format immediate %imm_28_16_0 28:1 16:3 0:4 @@ -35,11 +43,35 @@ &2scalar qd qn rm size &1imm qd imm cmode op &2shift qd qm shift size +&vidup qd rn size imm +&viwdup qd rn rm size imm +&vcmp qm qn size mask +&vcmp_scalar qn rm size mask +&shl_scalar qda rm size +&vmaxv qm rda size +&vabav qn qm rda size +&vldst_sg qd qm rn size msize os +&vldst_sg_imm qd qm a w imm +&vldst_il qd rn size pat w + +# scatter-gather memory size is in bits 6:4 +%sg_msize 6:1 4:1 @vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0 # Note that both Rn and Qd are 3 bits only (no D bit) @vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr +@vldst_sg .... .... .... rn:4 .... ... size:2 ... ... os:1 &vldst_sg \ + qd=%qd qm=%qm msize=%sg_msize + +# Qm is in the fields usually labeled Qn +@vldst_sg_imm .... .... a:1 . w:1 . .... .... .... . imm:7 &vldst_sg_imm \ + qd=%qd qm=%qn + +# Deinterleaving load/interleaving store +@vldst_il .... .... .. w:1 . rn:4 .... ... size:2 pat:2 ..... &vldst_il \ + qd=%qd + @1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm @1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0 @2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn @@ -84,6 +116,42 @@ @2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \ size=2 shift=%rshift_i5 +@shl_scalar .... .... .... size:2 .. .... .... .... rm:4 &shl_scalar qda=%qd + +# Vector comparison; 4-bit Qm but 3-bit Qn +%mask_22_13 22:1 13:3 +@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13 +@vcmp_scalar .... .... .. size:2 qn:3 . .... .... .... rm:4 &vcmp_scalar \ + mask=%mask_22_13 + +@vcmp_fp .... .... .... qn:3 . .... .... .... .... &vcmp \ + qm=%qm size=%2op_fp_scalar_size mask=%mask_22_13 + +# Bit 28 is a 2op_fp_scalar_size bit, but we do not decode it in this +# format to avoid complicated overlapping-instruction-groups +@vcmp_fp_scalar .... .... .... qn:3 . .... .... .... rm:4 &vcmp_scalar \ + mask=%mask_22_13 + +@vmaxv .... .... .... size:2 .. rda:4 .... .... .... &vmaxv qm=%qm + +@2op_fp .... .... .... .... .... .... .... .... &2op \ + qd=%qd qn=%qn qm=%qm size=%2op_fp_size + +@2op_fp_size_rev .... .... .... .... .... .... .... .... &2op \ + qd=%qd qn=%qn qm=%qm size=%2op_fp_size_rev + +# 2-operand, but Qd and Qn share a field. Size is in bit 28, but we +# don't decode it in this format +@vmaxnma .... .... .... .... .... .... .... .... &2op \ + qd=%qd qn=%qd qm=%qm + +# Here also we don't decode the bit 28 size in the format to avoid +# awkward nested overlap groups +@vmaxnmv .... .... .... .... rda:4 .... .... .... &vmaxv qm=%qm + +@2op_fp_scalar .... .... .... .... .... .... .... rm:4 &2scalar \ + qd=%qd qn=%qn size=%2op_fp_scalar_size + # Vector loads and stores # Widening loads and narrowing stores: @@ -119,6 +187,26 @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \ size=2 p=1 +# gather loads/scatter stores +VLDR_S_sg 111 0 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg +VLDR_U_sg 111 1 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg +VSTR_sg 111 0 1100 1 . 00 .... ... 0 111 . .... .... @vldst_sg + +VLDRW_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1110 .... .... @vldst_sg_imm +VLDRD_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1111 .... .... @vldst_sg_imm +VSTRW_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1110 .... .... @vldst_sg_imm +VSTRD_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1111 .... .... @vldst_sg_imm + +# deinterleaving loads/interleaving stores +VLD2 1111 1100 1 .. 1 .... ... 1 111 .. .. 00000 @vldst_il +VLD4 1111 1100 1 .. 1 .... ... 1 111 .. .. 00001 @vldst_il +VST2 1111 1100 1 .. 0 .... ... 1 111 .. .. 00000 @vldst_il +VST4 1111 1100 1 .. 0 .... ... 1 111 .. .. 00001 @vldst_il + +# Moves between 2 32-bit vector lanes and 2 general purpose registers +VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd +VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd + # Vector 2-op VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz @@ -133,30 +221,60 @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op # The VSHLL T2 encoding is not a @2op pattern, but is here because it # overlaps what would be size=0b11 VMULH/VRMULH { + VCVTB_SH 111 0 1110 0 . 11 1111 ... 0 1110 0 0 . 0 ... 1 @1op_nosz + + VMAXNMA 111 0 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=2 + VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h + VQMOVUNB 111 0 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op + VQMOVN_BS 111 0 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op + + VMAXA 111 0 1110 0 . 11 .. 11 ... 0 1110 1 0 . 0 ... 1 @1op + VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op } { + VCVTB_HS 111 1 1110 0 . 11 1111 ... 0 1110 0 0 . 0 ... 1 @1op_nosz + + VMAXNMA 111 1 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=1 + VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h + VMOVNB 111 1 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op + VQMOVN_BU 111 1 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op + VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op } { + VCVTT_SH 111 0 1110 0 . 11 1111 ... 1 1110 0 0 . 0 ... 1 @1op_nosz + + VMINNMA 111 0 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=2 VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h + VQMOVUNT 111 0 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op + VQMOVN_TS 111 0 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op + + VMINA 111 0 1110 0 . 11 .. 11 ... 1 1110 1 0 . 0 ... 1 @1op + VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op } { + VCVTT_HS 111 1 1110 0 . 11 1111 ... 1 1110 0 0 . 0 ... 1 @1op_nosz + + VMINNMA 111 1 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=1 VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h + VMOVNT 111 1 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op + VQMOVN_TU 111 1 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op + VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op } @@ -173,10 +291,16 @@ VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op -VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op -VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op -VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op -VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op +{ + VMULLP_B 111 . 1110 0 . 11 ... 1 ... 0 1110 . 0 . 0 ... 0 @2op_sz28 + VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op + VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op +} +{ + VMULLP_T 111 . 1110 0 . 11 ... 1 ... 1 1110 . 0 . 0 ... 0 @2op_sz28 + VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op + VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op +} VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op @@ -198,15 +322,29 @@ VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev -VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op -VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op -VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op -VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +{ + VCMUL0 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 0 @2op_sz28 + VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op + VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op +} -VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op -VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op -VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op -VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +{ + VCMUL180 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 0 @2op_sz28 + VQDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op + VQDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op +} + +{ + VCMUL90 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 1 @2op_sz28 + VQRDMLADH 111 0 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op + VQRDMLSDH 111 1 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op +} + +{ + VCMUL270 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 1 @2op_sz28 + VQRDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op + VQRDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +} VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28 VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28 @@ -244,6 +382,9 @@ VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op +VQABS 1111 1111 1 . 11 .. 00 ... 0 0111 01 . 0 ... 0 @1op +VQNEG 1111 1111 1 . 11 .. 00 ... 0 0111 11 . 0 ... 0 @1op + &vdup qd rt size # Qd is in the fields usually named Qn @vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup @@ -253,6 +394,31 @@ VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0 VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1 VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2 +# Incrementing and decrementing dup + +# VIDUP, VDDUP format immediate: 1 << (immh:imml) +%imm_vidup 7:1 0:1 !function=vidup_imm + +# VIDUP, VDDUP registers: Rm bits [3:1] from insn, bit 0 is 1; +# Rn bits [3:1] from insn, bit 0 is 0 +%vidup_rm 1:3 !function=times_2_plus_1 +%vidup_rn 17:3 !function=times_2 + +@vidup .... .... . . size:2 .... .... .... .... .... \ + qd=%qd imm=%imm_vidup rn=%vidup_rn &vidup +@viwdup .... .... . . size:2 .... .... .... .... .... \ + qd=%qd imm=%imm_vidup rm=%vidup_rm rn=%vidup_rn &viwdup +{ + VIDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 111 . @vidup + VIWDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 ... . @viwdup +} +{ + VCMPGT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=2 + VCMPLE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=2 + VDDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 111 . @vidup + VDWDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 ... . @viwdup +} + # multiply-add long dual accumulate # rdahi: bits [3:1] from insn, bit 0 is 1 # rdalo: bits [3:1] from insn, bit 0 is 0 @@ -262,30 +428,112 @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2 %size_16 16:1 !function=plus_1 &vmlaldav rdahi rdalo size qn qm x a +&vmladav rda size qn qm x a -@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \ +@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav -@vmlaldav_nosz .... .... . ... ... . ... . .... .... qm:3 . \ +@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav -VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav -VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav +@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ + qn=%qn rda=%rdalo size=%size_16 &vmladav +@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ + qn=%qn rda=%rdalo size=0 &vmladav -VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav +{ + VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav + VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav +} +{ + VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav + VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav +} -VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz -VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz +{ + VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav + VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav +} -VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz +{ + VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz + VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz +} + +VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz +VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz + +{ + [ + VMAXNMAV 1110 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2 + VMINNMAV 1110 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2 + VMAXNMV 1110 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2 + VMINNMV 1110 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2 + ] + [ + VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv + VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv + VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv + VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv + ] + VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz + VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz +} + +{ + [ + VMAXNMAV 1111 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1 + VMINNMAV 1111 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1 + VMAXNMV 1111 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1 + VMINNMV 1111 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1 + ] + [ + VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv + VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv + ] + VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz + VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz +} # Scalar operations -VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar -VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar -VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar -VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar -VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar -VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar -VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar +{ + VCMPEQ_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=2 + VCMPNE_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=2 + VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar +} + +{ + VCMPLT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=2 + VCMPGE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=2 + VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar +} + +{ + VSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar + VRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar + VQSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar + VQRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar + VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar +} + +{ + VSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar + VRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar + VQSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar + VQRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar + VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar +} + +{ + VADD_fp_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 100 .... @2op_fp_scalar + VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar + VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar +} + +{ + VSUB_fp_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 100 .... @2op_fp_scalar + VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar + VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar +} { VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar @@ -301,10 +549,28 @@ VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar size=%size_28 } -VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar +{ + VMUL_fp_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 110 .... @2op_fp_scalar + VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar + VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar +} -VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar -VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar +{ + VFMA_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 100 .... @2op_fp_scalar + # The U bit (28) is don't-care because it does not affect the result + VMLA 111 - 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar +} + +{ + VFMAS_scalar 111 . 1110 0 . 11 ... 1 ... 1 1110 . 100 .... @2op_fp_scalar + # The U bit (28) is don't-care because it does not affect the result + VMLAS 111 - 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar +} + +VQRDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 100 .... @2scalar +VQRDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 100 .... @2scalar +VQDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 110 .... @2scalar +VQDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 110 .... @2scalar # Vector add across vector { @@ -313,9 +579,10 @@ VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar rdahi=%rdahi rdalo=%rdalo } -# Predicate operations -%mask_22_13 22:1 13:3 -VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13 +@vabav .... .... .. size:2 .... rda:4 .... .... .... &vabav qn=%qn qm=%qm + +VABAV_S 111 0 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav +VABAV_U 111 1 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav # Logical immediate operations (1 reg and modified-immediate) @@ -364,6 +631,8 @@ VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w # VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file +# Note that VMOVL is encoded as "VSHLL with a zero shift count"; we +# implement it that way rather than special-casing it in the decode. VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h @@ -425,3 +694,139 @@ VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd + +# Comparisons. We expand out the conditions which are split across +# encodings T1, T2, T3 and the fc bits. These include VPT, which is +# effectively "VCMP then VPST". A plain "VCMP" has a mask field of zero. +{ + VCMPEQ_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp_fp + VCMPEQ 111 1 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp +} + +{ + VCMPNE_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp_fp + VCMPNE 111 1 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp +} + +{ + VCMPGE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp_fp + VCMPGE 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp +} + +{ + VCMPLT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp_fp + VCMPLT 111 1 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp +} + +{ + VCMPGT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp_fp + VCMPGT 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp +} + +{ + VCMPLE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp_fp + VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp +} + +{ + VPSEL 1111 1110 0 . 11 ... 1 ... 0 1111 . 0 . 0 ... 1 @2op_nosz + VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp + VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp +} + +{ + VPNOT 1111 1110 0 0 11 000 1 000 0 1111 0100 1101 + VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13 + VCMPEQ_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=1 + VCMPEQ_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0100 .... @vcmp_scalar +} + +{ + VCMPNE_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=1 + VCMPNE_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1100 .... @vcmp_scalar +} + +{ + VCMPGT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=1 + VCMPGT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0110 .... @vcmp_scalar +} + +{ + VCMPLE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=1 + VCMPLE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1110 .... @vcmp_scalar +} + +{ + VCMPGE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=1 + VCMPGE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0100 .... @vcmp_scalar +} +{ + VCMPLT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=1 + VCMPLT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1100 .... @vcmp_scalar +} + +VCMPCS_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 1 0 .... @vcmp_scalar +VCMPHI_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1 1 1 0 .... @vcmp_scalar + +# 2-operand FP +VADD_fp 1110 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp +VSUB_fp 1110 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp +VMUL_fp 1111 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 1 ... 0 @2op_fp +VABD_fp 1111 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp + +VMAXNM 1111 1111 0 . 0 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp +VMINNM 1111 1111 0 . 1 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp + +VCADD90_fp 1111 1100 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCADD270_fp 1111 1101 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev + +VFMA 1110 1111 0 . 0 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp +VFMS 1110 1111 0 . 1 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp + +VCMLA0 1111 110 00 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA90 1111 110 01 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA180 1111 110 10 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA270 1111 110 11 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev + +# floating-point <-> fixed-point conversions. Naming convention: +# VCVT_, S = signed int, U = unsigned int, H = halfprec, F = singleprec +@vcvt .... .... .. 1 ..... .... .. 1 . .... .... &2shift \ + qd=%qd qm=%qm shift=%rshift_i5 size=2 +@vcvt_f16 .... .... .. 11 .... .... .. 0 . .... .... &2shift \ + qd=%qd qm=%qm shift=%rshift_i4 size=1 + +VCVT_SH_fixed 1110 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt_f16 +VCVT_UH_fixed 1111 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt_f16 + +VCVT_HS_fixed 1110 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt_f16 +VCVT_HU_fixed 1111 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt_f16 + +VCVT_SF_fixed 1110 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt +VCVT_UF_fixed 1111 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt + +VCVT_FS_fixed 1110 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt +VCVT_FU_fixed 1111 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt + +# VCVT between floating point and integer (halfprec and single); +# VCVT_, S = signed int, U = unsigned int, F = float +VCVT_SF 1111 1111 1 . 11 .. 11 ... 0 011 00 1 . 0 ... 0 @1op +VCVT_UF 1111 1111 1 . 11 .. 11 ... 0 011 01 1 . 0 ... 0 @1op +VCVT_FS 1111 1111 1 . 11 .. 11 ... 0 011 10 1 . 0 ... 0 @1op +VCVT_FU 1111 1111 1 . 11 .. 11 ... 0 011 11 1 . 0 ... 0 @1op + +# VCVT from floating point to integer with specified rounding mode +VCVTAS 1111 1111 1 . 11 .. 11 ... 000 00 0 1 . 0 ... 0 @1op +VCVTAU 1111 1111 1 . 11 .. 11 ... 000 00 1 1 . 0 ... 0 @1op +VCVTNS 1111 1111 1 . 11 .. 11 ... 000 01 0 1 . 0 ... 0 @1op +VCVTNU 1111 1111 1 . 11 .. 11 ... 000 01 1 1 . 0 ... 0 @1op +VCVTPS 1111 1111 1 . 11 .. 11 ... 000 10 0 1 . 0 ... 0 @1op +VCVTPU 1111 1111 1 . 11 .. 11 ... 000 10 1 1 . 0 ... 0 @1op +VCVTMS 1111 1111 1 . 11 .. 11 ... 000 11 0 1 . 0 ... 0 @1op +VCVTMU 1111 1111 1 . 11 .. 11 ... 000 11 1 1 . 0 ... 0 @1op + +VRINTN 1111 1111 1 . 11 .. 10 ... 001 000 1 . 0 ... 0 @1op +VRINTX 1111 1111 1 . 11 .. 10 ... 001 001 1 . 0 ... 0 @1op +VRINTA 1111 1111 1 . 11 .. 10 ... 001 010 1 . 0 ... 0 @1op +VRINTZ 1111 1111 1 . 11 .. 10 ... 001 011 1 . 0 ... 0 @1op +VRINTM 1111 1111 1 . 11 .. 10 ... 001 101 1 . 0 ... 0 @1op +VRINTP 1111 1111 1 . 11 .. 10 ... 001 111 1 . 0 ... 0 @1op diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index db5d622085..403b345ea3 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -25,6 +25,36 @@ #include "exec/cpu_ldst.h" #include "exec/exec-all.h" #include "tcg/tcg.h" +#include "fpu/softfloat.h" + +static uint16_t mve_eci_mask(CPUARMState *env) +{ + /* + * Return the mask of which elements in the MVE vector correspond + * to beats being executed. The mask has 1 bits for executed lanes + * and 0 bits where ECI says this beat was already executed. + */ + int eci; + + if ((env->condexec_bits & 0xf) != 0) { + return 0xffff; + } + + eci = env->condexec_bits >> 4; + switch (eci) { + case ECI_NONE: + return 0xffff; + case ECI_A0: + return 0xfff0; + case ECI_A0A1: + return 0xff00; + case ECI_A0A1A2: + case ECI_A0A1A2B0: + return 0xf000; + default: + g_assert_not_reached(); + } +} static uint16_t mve_element_mask(CPUARMState *env) { @@ -64,33 +94,15 @@ static uint16_t mve_element_mask(CPUARMState *env) */ int masklen = env->regs[14] << env->v7m.ltpsize; assert(masklen <= 16); - mask &= MAKE_64BIT_MASK(0, masklen); - } - - if ((env->condexec_bits & 0xf) == 0) { - /* - * ECI bits indicate which beats are already executed; - * we handle this by effectively predicating them out. - */ - int eci = env->condexec_bits >> 4; - switch (eci) { - case ECI_NONE: - break; - case ECI_A0: - mask &= 0xfff0; - break; - case ECI_A0A1: - mask &= 0xff00; - break; - case ECI_A0A1A2: - case ECI_A0A1A2B0: - mask &= 0xf000; - break; - default: - g_assert_not_reached(); - } + uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; + mask &= ltpmask; } + /* + * ECI bits indicate which beats are already executed; + * we handle this by effectively predicating them out. + */ + mask &= mve_eci_mask(env); return mask; } @@ -99,6 +111,8 @@ static void mve_advance_vpt(CPUARMState *env) /* Advance the VPT and ECI state if necessary */ uint32_t vpr = env->v7m.vpr; unsigned mask01, mask23; + uint16_t inv_mask; + uint16_t eci_mask = mve_eci_mask(env); if ((env->condexec_bits & 0xf) == 0) { env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? @@ -110,27 +124,36 @@ static void mve_advance_vpt(CPUARMState *env) return; } + /* Invert P0 bits if needed, but only for beats we actually executed */ mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); - if (mask01 > 8) { - /* high bit set, but not 0b1000: invert the relevant half of P0 */ - vpr ^= 0xff; + /* Start by assuming we invert all bits corresponding to executed beats */ + inv_mask = eci_mask; + if (mask01 <= 8) { + /* MASK01 says don't invert low half of P0 */ + inv_mask &= ~0xff; } - if (mask23 > 8) { - /* high bit set, but not 0b1000: invert the relevant half of P0 */ - vpr ^= 0xff00; + if (mask23 <= 8) { + /* MASK23 says don't invert high half of P0 */ + inv_mask &= ~0xff00; } - vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); + vpr ^= inv_mask; + /* Only update MASK01 if beat 1 executed */ + if (eci_mask & 0xf0) { + vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); + } + /* Beat 3 always executes, so update MASK23 */ vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); env->v7m.vpr = vpr; } - +/* For loads, predicated lanes are zeroed instead of keeping their old values */ #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ { \ TYPE *d = vd; \ uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ unsigned b, e; \ /* \ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ @@ -138,8 +161,9 @@ static void mve_advance_vpt(CPUARMState *env) * then take an exception. \ */ \ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ - if (mask & (1 << b)) { \ - d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \ + if (eci_mask & (1 << b)) { \ + d[H##ESIZE(e)] = (mask & (1 << b)) ? \ + cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ } \ addr += MSIZE; \ } \ @@ -183,6 +207,504 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) #undef DO_VLDR #undef DO_VSTR +/* + * Gather loads/scatter stores. Here each element of Qm specifies + * an offset to use from the base register Rm. In the _os_ versions + * that offset is scaled by the element size. + * For loads, predicated lanes are zeroed instead of retaining + * their previous values. + */ +#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ + uint32_t base) \ + { \ + TYPE *d = vd; \ + OFFTYPE *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + unsigned e; \ + uint32_t addr; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ + if (!(eci_mask & 1)) { \ + continue; \ + } \ + addr = ADDRFN(base, m[H##ESIZE(e)]); \ + d[H##ESIZE(e)] = (mask & 1) ? \ + cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ + if (WB) { \ + m[H##ESIZE(e)] = addr; \ + } \ + } \ + mve_advance_vpt(env); \ + } + +/* We know here TYPE is unsigned so always the same as the offset type */ +#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ + uint32_t base) \ + { \ + TYPE *d = vd; \ + TYPE *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + unsigned e; \ + uint32_t addr; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ + if (!(eci_mask & 1)) { \ + continue; \ + } \ + addr = ADDRFN(base, m[H##ESIZE(e)]); \ + if (mask & 1) { \ + cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ + } \ + if (WB) { \ + m[H##ESIZE(e)] = addr; \ + } \ + } \ + mve_advance_vpt(env); \ + } + +/* + * 64-bit accesses are slightly different: they are done as two 32-bit + * accesses, controlled by the predicate mask for the relevant beat, + * and with a single 32-bit offset in the first of the two Qm elements. + * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). + * Address writeback happens on the odd beats and updates the address + * stored in the even-beat element. + */ +#define DO_VLDR64_SG(OP, ADDRFN, WB) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ + uint32_t base) \ + { \ + uint32_t *d = vd; \ + uint32_t *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + unsigned e; \ + uint32_t addr; \ + for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ + if (!(eci_mask & 1)) { \ + continue; \ + } \ + addr = ADDRFN(base, m[H4(e & ~1)]); \ + addr += 4 * (e & 1); \ + d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ + if (WB && (e & 1)) { \ + m[H4(e & ~1)] = addr - 4; \ + } \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VSTR64_SG(OP, ADDRFN, WB) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ + uint32_t base) \ + { \ + uint32_t *d = vd; \ + uint32_t *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + unsigned e; \ + uint32_t addr; \ + for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ + if (!(eci_mask & 1)) { \ + continue; \ + } \ + addr = ADDRFN(base, m[H4(e & ~1)]); \ + addr += 4 * (e & 1); \ + if (mask & 1) { \ + cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ + } \ + if (WB && (e & 1)) { \ + m[H4(e & ~1)] = addr - 4; \ + } \ + } \ + mve_advance_vpt(env); \ + } + +#define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) +#define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) +#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) +#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) + +DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) + +DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) + +DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) +DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) + +DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) +DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) +DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) +DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) +DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) +DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) +DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) + +DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) +DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) +DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) +DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) + +DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) +DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) +DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) +DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) + +/* + * Deinterleaving loads/interleaving stores. + * + * For these helpers we are passed the index of the first Qreg + * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3) + * and the value of the base address register Rn. + * The helpers are specialized for pattern and element size, so + * for instance vld42h is VLD4 with pattern 2, element size MO_16. + * + * These insns are beatwise but not predicated, so we must honour ECI, + * but need not look at mve_element_mask(). + * + * The pseudocode implements these insns with multiple memory accesses + * of the element size, but rules R_VVVG and R_FXDM permit us to make + * one 32-bit memory access per beat. + */ +#define DO_VLD4B(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat, e; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + for (e = 0; e < 4; e++, data >>= 8) { \ + uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ + qd[H1(off[beat])] = data; \ + } \ + } \ + } + +#define DO_VLD4H(OP, O1, O2) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O1, O2, O2 }; \ + uint32_t addr, data; \ + int y; /* y counts 0 2 0 2 */ \ + uint16_t *qd; \ + for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 8 + (beat & 1) * 4; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ + qd[H2(off[beat])] = data; \ + data >>= 16; \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ + qd[H2(off[beat])] = data; \ + } \ + } + +#define DO_VLD4W(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint32_t *qd; \ + int y; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + y = (beat + (O1 & 2)) & 3; \ + qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ + qd[H4(off[beat] >> 2)] = data; \ + } \ + } + +DO_VLD4B(vld40b, 0, 1, 10, 11) +DO_VLD4B(vld41b, 2, 3, 12, 13) +DO_VLD4B(vld42b, 4, 5, 14, 15) +DO_VLD4B(vld43b, 6, 7, 8, 9) + +DO_VLD4H(vld40h, 0, 5) +DO_VLD4H(vld41h, 1, 6) +DO_VLD4H(vld42h, 2, 7) +DO_VLD4H(vld43h, 3, 4) + +DO_VLD4W(vld40w, 0, 1, 10, 11) +DO_VLD4W(vld41w, 2, 3, 12, 13) +DO_VLD4W(vld42w, 4, 5, 14, 15) +DO_VLD4W(vld43w, 6, 7, 8, 9) + +#define DO_VLD2B(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat, e; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint8_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 2; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + for (e = 0; e < 4; e++, data >>= 8) { \ + qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ + qd[H1(off[beat] + (e >> 1))] = data; \ + } \ + } \ + } + +#define DO_VLD2H(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + int e; \ + uint16_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + for (e = 0; e < 2; e++, data >>= 16) { \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ + qd[H2(off[beat])] = data; \ + } \ + } \ + } + +#define DO_VLD2W(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint32_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat]; \ + data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ + qd[H4(off[beat] >> 3)] = data; \ + } \ + } + +DO_VLD2B(vld20b, 0, 2, 12, 14) +DO_VLD2B(vld21b, 4, 6, 8, 10) + +DO_VLD2H(vld20h, 0, 1, 6, 7) +DO_VLD2H(vld21h, 2, 3, 4, 5) + +DO_VLD2W(vld20w, 0, 4, 24, 28) +DO_VLD2W(vld21w, 8, 12, 16, 20) + +#define DO_VST4B(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat, e; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + data = 0; \ + for (e = 3; e >= 0; e--) { \ + uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ + data = (data << 8) | qd[H1(off[beat])]; \ + } \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +#define DO_VST4H(OP, O1, O2) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O1, O2, O2 }; \ + uint32_t addr, data; \ + int y; /* y counts 0 2 0 2 */ \ + uint16_t *qd; \ + for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 8 + (beat & 1) * 4; \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ + data = qd[H2(off[beat])]; \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ + data |= qd[H2(off[beat])] << 16; \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +#define DO_VST4W(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint32_t *qd; \ + int y; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + y = (beat + (O1 & 2)) & 3; \ + qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ + data = qd[H4(off[beat] >> 2)]; \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +DO_VST4B(vst40b, 0, 1, 10, 11) +DO_VST4B(vst41b, 2, 3, 12, 13) +DO_VST4B(vst42b, 4, 5, 14, 15) +DO_VST4B(vst43b, 6, 7, 8, 9) + +DO_VST4H(vst40h, 0, 5) +DO_VST4H(vst41h, 1, 6) +DO_VST4H(vst42h, 2, 7) +DO_VST4H(vst43h, 3, 4) + +DO_VST4W(vst40w, 0, 1, 10, 11) +DO_VST4W(vst41w, 2, 3, 12, 13) +DO_VST4W(vst42w, 4, 5, 14, 15) +DO_VST4W(vst43w, 6, 7, 8, 9) + +#define DO_VST2B(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat, e; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint8_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 2; \ + data = 0; \ + for (e = 3; e >= 0; e--) { \ + qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ + data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ + } \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +#define DO_VST2H(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + int e; \ + uint16_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat] * 4; \ + data = 0; \ + for (e = 1; e >= 0; e--) { \ + qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ + data = (data << 16) | qd[H2(off[beat])]; \ + } \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +#define DO_VST2W(OP, O1, O2, O3, O4) \ + void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ + uint32_t base) \ + { \ + int beat; \ + uint16_t mask = mve_eci_mask(env); \ + static const uint8_t off[4] = { O1, O2, O3, O4 }; \ + uint32_t addr, data; \ + uint32_t *qd; \ + for (beat = 0; beat < 4; beat++, mask >>= 4) { \ + if ((mask & 1) == 0) { \ + /* ECI says skip this beat */ \ + continue; \ + } \ + addr = base + off[beat]; \ + qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ + data = qd[H4(off[beat] >> 3)]; \ + cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + } \ + } + +DO_VST2B(vst20b, 0, 2, 12, 14) +DO_VST2B(vst21b, 4, 6, 8, 10) + +DO_VST2H(vst20h, 0, 1, 6, 7) +DO_VST2H(vst21h, 2, 3, 4, 5) + +DO_VST2W(vst20w, 0, 4, 24, 28) +DO_VST2W(vst21w, 8, 12, 16, 20) + /* * The mergemask(D, R, M) macro performs the operation "*D = R" but * storing only the bytes which correspond to 1 bits in M, @@ -204,7 +726,7 @@ static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) { - uint16_t bmask = expand_pred_b_data[mask & 3]; + uint16_t bmask = expand_pred_b(mask); *d = (*d & ~bmask) | (r & bmask); } @@ -215,7 +737,7 @@ static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) { - uint32_t bmask = expand_pred_b_data[mask & 0xf]; + uint32_t bmask = expand_pred_b(mask); *d = (*d & ~bmask) | (r & bmask); } @@ -226,7 +748,7 @@ static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) { - uint64_t bmask = expand_pred_b_data[mask & 0xff]; + uint64_t bmask = expand_pred_b(mask); *d = (*d & ~bmask) | (r & bmask); } @@ -458,6 +980,22 @@ DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) +/* + * Polynomial multiply. We can always do this generating 64 bits + * of the result at a time, so we don't need to use DO_2OP_L. + */ +#define VMULLPH_MASK 0x00ff00ff00ff00ffULL +#define VMULLPW_MASK 0x0000ffff0000ffffULL +#define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK) +#define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8) +#define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK) +#define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16) + +DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH) +DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH) +DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW) +DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW) + /* * Because the computation type is at least twice as large as required, * these work for both signed and unsigned source types. @@ -909,6 +1447,44 @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) mve_advance_vpt(env); \ } +/* "accumulating" version where FN takes d as well as n and m */ +#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ + uint32_t rm) \ + { \ + TYPE *d = vd, *n = vn; \ + TYPE m = rm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + mergemask(&d[H##ESIZE(e)], \ + FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ + uint32_t rm) \ + { \ + TYPE *d = vd, *n = vn; \ + TYPE m = rm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + bool qc = false; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + bool sat = false; \ + mergemask(&d[H##ESIZE(e)], \ + FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ + mask); \ + qc |= sat & mask & 1; \ + } \ + if (qc) { \ + env->vfp.qc[0] = qc; \ + } \ + mve_advance_vpt(env); \ + } + /* provide unsigned 2-op scalar helpers for all sizes */ #define DO_2OP_SCALAR_U(OP, FN) \ DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ @@ -919,6 +1495,11 @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ DO_2OP_SCALAR(OP##w, 4, int32_t, FN) +#define DO_2OP_ACC_SCALAR_U(OP, FN) \ + DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ + DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ + DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) + DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) @@ -948,6 +1529,89 @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) +static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) +{ + int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); + return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; +} + +static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, + int round, bool *sat) +{ + int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); + return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; +} + +static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, + int round, bool *sat) +{ + /* + * Architecturally we should do the entire add, double, round + * and then check for saturation. We do three saturating adds, + * but we need to be careful about the order. If the first + * m1 + m2 saturates then it's impossible for the *2+rc to + * bring it back into the non-saturated range. However, if + * m1 + m2 is negative then it's possible that doing the doubling + * would take the intermediate result below INT64_MAX and the + * addition of the rounding constant then brings it back in range. + * So we add half the rounding constant and half the "c << esize" + * before doubling rather than adding the rounding constant after + * the doubling. + */ + int64_t m1 = (int64_t)a * b; + int64_t m2 = (int64_t)c << 31; + int64_t r; + if (sadd64_overflow(m1, m2, &r) || + sadd64_overflow(r, (round << 30), &r) || + sadd64_overflow(r, r, &r)) { + *sat = true; + return r < 0 ? INT32_MAX : INT32_MIN; + } + return r >> 32; +} + +/* + * The *MLAH insns are vector * scalar + vector; + * the *MLASH insns are vector * vector + scalar + */ +#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) +#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) +#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) +#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) +#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) +#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) + +#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) +#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) +#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) +#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) +#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) +#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) + +DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) +DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) +DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) +DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) +DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) +DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) + +DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) +DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) +DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) +DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) +DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) +DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) + +/* Vector by scalar plus vector */ +#define DO_VMLA(D, N, M) ((N) * (M) + (D)) + +DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) + +/* Vector by vector plus scalar */ +#define DO_VMLAS(D, N, M) ((N) * (D) + (M)) + +DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) + /* * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. @@ -1124,6 +1788,47 @@ DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) +/* + * Multiply add dual accumulate ops + */ +#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ + uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ + void *vm, uint32_t a) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *n = vn, *m = vm; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if (mask & 1) { \ + if (e & 1) { \ + a ODDACC \ + n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ + } else { \ + a EVENACC \ + n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ + } \ + } \ + } \ + mve_advance_vpt(env); \ + return a; \ + } + +#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) + +#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) + +DO_DAV_S(vmladavs, false, +=, +=) +DO_DAV_U(vmladavu, false, +=, +=) +DO_DAV_S(vmlsdav, false, +=, -=) +DO_DAV_S(vmladavsx, true, +=, +=) +DO_DAV_S(vmlsdavx, true, +=, -=) + /* * Rounding multiply add long dual accumulate high. In the pseudocode * this is implemented with a 72-bit internal accumulator value of which @@ -1182,13 +1887,105 @@ DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) return ra; \ } \ -DO_VADDV(vaddvsb, 1, uint8_t) -DO_VADDV(vaddvsh, 2, uint16_t) -DO_VADDV(vaddvsw, 4, uint32_t) +DO_VADDV(vaddvsb, 1, int8_t) +DO_VADDV(vaddvsh, 2, int16_t) +DO_VADDV(vaddvsw, 4, int32_t) DO_VADDV(vaddvub, 1, uint8_t) DO_VADDV(vaddvuh, 2, uint16_t) DO_VADDV(vaddvuw, 4, uint32_t) +/* + * Vector max/min across vector. Unlike VADDV, we must + * read ra as the element size, not its full width. + * We work with int64_t internally for simplicity. + */ +#define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ + uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ + uint32_t ra_in) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *m = vm; \ + int64_t ra = (RATYPE)ra_in; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if (mask & 1) { \ + ra = FN(ra, m[H##ESIZE(e)]); \ + } \ + } \ + mve_advance_vpt(env); \ + return ra; \ + } \ + +#define DO_VMAXMINV_U(INSN, FN) \ + DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ + DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ + DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) +#define DO_VMAXMINV_S(INSN, FN) \ + DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ + DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ + DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) + +/* + * Helpers for max and min of absolute values across vector: + * note that we only take the absolute value of 'm', not 'n' + */ +static int64_t do_maxa(int64_t n, int64_t m) +{ + if (m < 0) { + m = -m; + } + return MAX(n, m); +} + +static int64_t do_mina(int64_t n, int64_t m) +{ + if (m < 0) { + m = -m; + } + return MIN(n, m); +} + +DO_VMAXMINV_S(vmaxvs, DO_MAX) +DO_VMAXMINV_U(vmaxvu, DO_MAX) +DO_VMAXMINV_S(vminvs, DO_MIN) +DO_VMAXMINV_U(vminvu, DO_MIN) +/* + * VMAXAV, VMINAV treat the general purpose input as unsigned + * and the vector elements as signed. + */ +DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) +DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) +DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) +DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) +DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) +DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) + +#define DO_VABAV(OP, ESIZE, TYPE) \ + uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ + void *vm, uint32_t ra) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *m = vm, *n = vn; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if (mask & 1) { \ + int64_t n0 = n[H##ESIZE(e)]; \ + int64_t m0 = m[H##ESIZE(e)]; \ + uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ + ra += r; \ + } \ + } \ + mve_advance_vpt(env); \ + return ra; \ + } + +DO_VABAV(vabavsb, 1, int8_t) +DO_VABAV(vabavsh, 2, int16_t) +DO_VABAV(vabavsw, 4, int32_t) +DO_VABAV(vabavub, 1, uint8_t) +DO_VABAV(vabavuh, 2, uint16_t) +DO_VABAV(vabavuw, 4, uint32_t) + #define DO_VADDLV(OP, TYPE, LTYPE) \ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ uint64_t ra) \ @@ -1269,6 +2066,8 @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) DO_2SHIFT_U(vrshli_u, DO_VRSHLU) DO_2SHIFT_S(vrshli_s, DO_VRSHLS) +DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) +DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) /* Shift-and-insert; we always work with 64 bits at a time */ #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ @@ -1279,11 +2078,12 @@ DO_2SHIFT_S(vrshli_s, DO_VRSHLS) uint16_t mask; \ uint64_t shiftmask; \ unsigned e; \ - if (shift == 0 || shift == ESIZE * 8) { \ + if (shift == ESIZE * 8) { \ /* \ - * Only VSLI can shift by 0; only VSRI can shift by
. \ - * The generic logic would give the right answer for 0 but \ - * fails for
. \ + * Only VSRI can shift by
; it should mean "don't \ + * update the destination". The generic logic can't handle \ + * this because it would try to shift by an out-of-range \ + * amount, so special case it here. \ */ \ goto done; \ } \ @@ -1357,6 +2157,7 @@ DO_VSHLL_ALL(vshllt, true) TYPE *d = vd; \ uint16_t mask = mve_element_mask(env); \ unsigned le; \ + mask >>= ESIZE * TOP; \ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ TYPE r = FN(m[H##LESIZE(le)], shift); \ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ @@ -1418,11 +2219,12 @@ static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, uint16_t mask = mve_element_mask(env); \ bool qc = false; \ unsigned le; \ + mask >>= ESIZE * TOP; \ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ bool sat = false; \ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ - qc |= sat && (mask & 1 << (TOP * ESIZE)); \ + qc |= sat & mask & 1; \ } \ if (qc) { \ env->vfp.qc[0] = qc; \ @@ -1488,6 +2290,84 @@ DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) +#define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ + { \ + LTYPE *m = vm; \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + unsigned le; \ + mask >>= ESIZE * TOP; \ + for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ + mergemask(&d[H##ESIZE(le * 2 + TOP)], \ + m[H##LESIZE(le)], mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) +DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) +DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) +DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) + +#define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ + { \ + LTYPE *m = vm; \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + bool qc = false; \ + unsigned le; \ + mask >>= ESIZE * TOP; \ + for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ + bool sat = false; \ + TYPE r = FN(m[H##LESIZE(le)], &sat); \ + mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ + qc |= sat & mask & 1; \ + } \ + if (qc) { \ + env->vfp.qc[0] = qc; \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ + DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ + DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) + +#define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ + DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ + DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) + +#define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ + DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ + DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) + +#define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ + DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ + DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) + +#define DO_VQMOVN_SB(N, SATP) \ + do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) +#define DO_VQMOVN_UB(N, SATP) \ + do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) +#define DO_VQMOVUN_B(N, SATP) \ + do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) + +#define DO_VQMOVN_SH(N, SATP) \ + do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) +#define DO_VQMOVN_UH(N, SATP) \ + do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) +#define DO_VQMOVUN_H(N, SATP) \ + do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) + +DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) +DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) +DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) +DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) +DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) +DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) + uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, uint32_t shift) { @@ -1560,6 +2440,8 @@ uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, bool round, uint32_t *sat) { + int64_t val, extval; + if (shift <= -48) { /* Rounding the sign bit always produces 0. */ if (round) { @@ -1569,21 +2451,25 @@ static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, } else if (shift < 0) { if (round) { src >>= -shift - 1; - return (src >> 1) + (src & 1); + val = (src >> 1) + (src & 1); + } else { + val = src >> -shift; } - return src >> -shift; - } else if (shift < 48) { - int64_t val = src << shift; - int64_t extval = sextract64(val, 0, 48); + extval = sextract64(val, 0, 48); if (!sat || val == extval) { return extval; } + } else if (shift < 48) { + int64_t extval = sextract64(src << shift, 0, 48); + if (!sat || src == (extval >> shift)) { + return extval; + } } else if (!sat || src == 0) { return 0; } *sat = 1; - return (1ULL << 47) - (src >= 0); + return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); } /* Operate on 64-bit values, but saturate at 48 bits */ @@ -1606,9 +2492,8 @@ static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, return extval; } } else if (shift < 48) { - uint64_t val = src << shift; - uint64_t extval = extract64(val, 0, 48); - if (!sat || val == extval) { + uint64_t extval = extract64(src << shift, 0, 48); + if (!sat || src == (extval >> shift)) { return extval; } } else if (!sat || src == 0) { @@ -1648,3 +2533,918 @@ uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) { return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); } + +#define DO_VIDUP(OP, ESIZE, TYPE, FN) \ + uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ + uint32_t offset, uint32_t imm) \ + { \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + mergemask(&d[H##ESIZE(e)], offset, mask); \ + offset = FN(offset, imm); \ + } \ + mve_advance_vpt(env); \ + return offset; \ + } + +#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ + uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ + uint32_t offset, uint32_t wrap, \ + uint32_t imm) \ + { \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + mergemask(&d[H##ESIZE(e)], offset, mask); \ + offset = FN(offset, wrap, imm); \ + } \ + mve_advance_vpt(env); \ + return offset; \ + } + +#define DO_VIDUP_ALL(OP, FN) \ + DO_VIDUP(OP##b, 1, int8_t, FN) \ + DO_VIDUP(OP##h, 2, int16_t, FN) \ + DO_VIDUP(OP##w, 4, int32_t, FN) + +#define DO_VIWDUP_ALL(OP, FN) \ + DO_VIWDUP(OP##b, 1, int8_t, FN) \ + DO_VIWDUP(OP##h, 2, int16_t, FN) \ + DO_VIWDUP(OP##w, 4, int32_t, FN) + +static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) +{ + offset += imm; + if (offset == wrap) { + offset = 0; + } + return offset; +} + +static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) +{ + if (offset == 0) { + offset = wrap; + } + offset -= imm; + return offset; +} + +DO_VIDUP_ALL(vidup, DO_ADD) +DO_VIWDUP_ALL(viwdup, do_add_wrap) +DO_VIWDUP_ALL(vdwdup, do_sub_wrap) + +/* + * Vector comparison. + * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. + * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. + * P0 bits otherwise are updated with the results of the comparisons. + * We must also keep unchanged the MASK fields at the top of v7m.vpr. + */ +#define DO_VCMP(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ + { \ + TYPE *n = vn, *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + uint16_t beatpred = 0; \ + uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++) { \ + bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ + /* Comparison sets 0/1 bits for each byte in the element */ \ + beatpred |= r * emask; \ + emask <<= ESIZE; \ + } \ + beatpred &= mask; \ + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ + (beatpred & eci_mask); \ + mve_advance_vpt(env); \ + } + +#define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ + uint32_t rm) \ + { \ + TYPE *n = vn; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + uint16_t beatpred = 0; \ + uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++) { \ + bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ + /* Comparison sets 0/1 bits for each byte in the element */ \ + beatpred |= r * emask; \ + emask <<= ESIZE; \ + } \ + beatpred &= mask; \ + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ + (beatpred & eci_mask); \ + mve_advance_vpt(env); \ + } + +#define DO_VCMP_S(OP, FN) \ + DO_VCMP(OP##b, 1, int8_t, FN) \ + DO_VCMP(OP##h, 2, int16_t, FN) \ + DO_VCMP(OP##w, 4, int32_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) + +#define DO_VCMP_U(OP, FN) \ + DO_VCMP(OP##b, 1, uint8_t, FN) \ + DO_VCMP(OP##h, 2, uint16_t, FN) \ + DO_VCMP(OP##w, 4, uint32_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ + DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) + +#define DO_EQ(N, M) ((N) == (M)) +#define DO_NE(N, M) ((N) != (M)) +#define DO_EQ(N, M) ((N) == (M)) +#define DO_EQ(N, M) ((N) == (M)) +#define DO_GE(N, M) ((N) >= (M)) +#define DO_LT(N, M) ((N) < (M)) +#define DO_GT(N, M) ((N) > (M)) +#define DO_LE(N, M) ((N) <= (M)) + +DO_VCMP_U(vcmpeq, DO_EQ) +DO_VCMP_U(vcmpne, DO_NE) +DO_VCMP_U(vcmpcs, DO_GE) +DO_VCMP_U(vcmphi, DO_GT) +DO_VCMP_S(vcmpge, DO_GE) +DO_VCMP_S(vcmplt, DO_LT) +DO_VCMP_S(vcmpgt, DO_GT) +DO_VCMP_S(vcmple, DO_LE) + +void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) +{ + /* + * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] + * but note that whether bytes are written to Qd is still subject + * to (all forms of) predication in the usual way. + */ + uint64_t *d = vd, *n = vn, *m = vm; + uint16_t mask = mve_element_mask(env); + uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); + unsigned e; + for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { + uint64_t r = m[H8(e)]; + mergemask(&r, n[H8(e)], p0); + mergemask(&d[H8(e)], r, mask); + } + mve_advance_vpt(env); +} + +void HELPER(mve_vpnot)(CPUARMState *env) +{ + /* + * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. + * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. + * P0 bits otherwise are inverted. + * (This is the same logic as VCMP.) + * This insn is itself subject to predication and to beat-wise execution, + * and after it executes VPT state advances in the usual way. + */ + uint16_t mask = mve_element_mask(env); + uint16_t eci_mask = mve_eci_mask(env); + uint16_t beatpred = ~env->v7m.vpr & mask; + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); + mve_advance_vpt(env); +} + +/* + * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, + * otherwise set according to value of Rn. The calculation of + * newmask here works in the same way as the calculation of the + * ltpmask in mve_element_mask(), but we have pre-calculated + * the masklen in the generated code. + */ +void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) +{ + uint16_t mask = mve_element_mask(env); + uint16_t eci_mask = mve_eci_mask(env); + uint16_t newmask; + + assert(masklen <= 16); + newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; + newmask &= mask; + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); + mve_advance_vpt(env); +} + +#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ + { \ + TYPE *d = vd, *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + bool qc = false; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + bool sat = false; \ + mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ + qc |= sat & mask & 1; \ + } \ + if (qc) { \ + env->vfp.qc[0] = qc; \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VQABS_B(N, SATP) \ + do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) +#define DO_VQABS_H(N, SATP) \ + do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) +#define DO_VQABS_W(N, SATP) \ + do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) + +#define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) +#define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) +#define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) + +DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) +DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) +DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) + +DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) +DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) +DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) + +/* + * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its + * absolute value; we then do an unsigned comparison. + */ +#define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ + { \ + UTYPE *d = vd; \ + STYPE *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ + r = FN(d[H##ESIZE(e)], r); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) +DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) +DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) +DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) +DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) +DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) + +/* + * 2-operand floating point. Note that if an element is partially + * predicated we must do the FP operation to update the non-predicated + * bytes, but we must be careful to avoid updating the FP exception + * state unless byte 0 of the element was unpredicated. + */ +#define DO_2OP_FP(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, void *vm) \ + { \ + TYPE *d = vd, *n = vn, *m = vm; \ + TYPE r; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_2OP_FP_ALL(OP, FN) \ + DO_2OP_FP(OP##h, 2, float16, float16_##FN) \ + DO_2OP_FP(OP##s, 4, float32, float32_##FN) + +DO_2OP_FP_ALL(vfadd, add) +DO_2OP_FP_ALL(vfsub, sub) +DO_2OP_FP_ALL(vfmul, mul) + +static inline float16 float16_abd(float16 a, float16 b, float_status *s) +{ + return float16_abs(float16_sub(a, b, s)); +} + +static inline float32 float32_abd(float32 a, float32 b, float_status *s) +{ + return float32_abs(float32_sub(a, b, s)); +} + +DO_2OP_FP_ALL(vfabd, abd) +DO_2OP_FP_ALL(vmaxnm, maxnum) +DO_2OP_FP_ALL(vminnm, minnum) + +static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s) +{ + return float16_maxnum(float16_abs(a), float16_abs(b), s); +} + +static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s) +{ + return float32_maxnum(float32_abs(a), float32_abs(b), s); +} + +static inline float16 float16_minnuma(float16 a, float16 b, float_status *s) +{ + return float16_minnum(float16_abs(a), float16_abs(b), s); +} + +static inline float32 float32_minnuma(float32 a, float32 b, float_status *s) +{ + return float32_minnum(float32_abs(a), float32_abs(b), s); +} + +DO_2OP_FP_ALL(vmaxnma, maxnuma) +DO_2OP_FP_ALL(vminnma, minnuma) + +#define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, void *vm) \ + { \ + TYPE *d = vd, *n = vn, *m = vm; \ + TYPE r[16 / ESIZE]; \ + uint16_t tm, mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + /* Calculate all results first to avoid overwriting inputs */ \ + for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \ + if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + r[e] = 0; \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(tm & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + if (!(e & 1)) { \ + r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \ + } else { \ + r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \ + } \ + } \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + mergemask(&d[H##ESIZE(e)], r[e], mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add) +DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add) +DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub) +DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) + +#define DO_VFMA(OP, ESIZE, TYPE, CHS) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, void *vm) \ + { \ + TYPE *d = vd, *n = vn, *m = vm; \ + TYPE r; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = n[H##ESIZE(e)]; \ + if (CHS) { \ + r = TYPE##_chs(r); \ + } \ + r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \ + 0, fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_VFMA(vfmah, 2, float16, false) +DO_VFMA(vfmas, 4, float32, false) +DO_VFMA(vfmsh, 2, float16, true) +DO_VFMA(vfmss, 4, float32, true) + +#define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, void *vm) \ + { \ + TYPE *d = vd, *n = vn, *m = vm; \ + TYPE r0, r1, e1, e2, e3, e4; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst0, *fpst1; \ + float_status scratch_fpst; \ + /* We loop through pairs of elements at a time */ \ + for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ + continue; \ + } \ + fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + fpst1 = fpst0; \ + if (!(mask & 1)) { \ + scratch_fpst = *fpst0; \ + fpst0 = &scratch_fpst; \ + } \ + if (!(mask & (1 << ESIZE))) { \ + scratch_fpst = *fpst1; \ + fpst1 = &scratch_fpst; \ + } \ + switch (ROT) { \ + case 0: \ + e1 = m[H##ESIZE(e)]; \ + e2 = n[H##ESIZE(e)]; \ + e3 = m[H##ESIZE(e + 1)]; \ + e4 = n[H##ESIZE(e)]; \ + break; \ + case 1: \ + e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ + e2 = n[H##ESIZE(e + 1)]; \ + e3 = m[H##ESIZE(e)]; \ + e4 = n[H##ESIZE(e + 1)]; \ + break; \ + case 2: \ + e1 = TYPE##_chs(m[H##ESIZE(e)]); \ + e2 = n[H##ESIZE(e)]; \ + e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ + e4 = n[H##ESIZE(e)]; \ + break; \ + case 3: \ + e1 = m[H##ESIZE(e + 1)]; \ + e2 = n[H##ESIZE(e + 1)]; \ + e3 = TYPE##_chs(m[H##ESIZE(e)]); \ + e4 = n[H##ESIZE(e + 1)]; \ + break; \ + default: \ + g_assert_not_reached(); \ + } \ + r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ + r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ + mergemask(&d[H##ESIZE(e)], r0, mask); \ + mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) +#define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) + +#define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) +#define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) + +DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) +DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) +DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) +DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) +DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) +DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) +DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) +DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) + +DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) +DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) +DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) +DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) +DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) +DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) +DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) +DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) + +#define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, uint32_t rm) \ + { \ + TYPE *d = vd, *n = vn; \ + TYPE r, m = rm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(n[H##ESIZE(e)], m, fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_2OP_FP_SCALAR_ALL(OP, FN) \ + DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \ + DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN) + +DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add) +DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub) +DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) + +#define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, uint32_t rm) \ + { \ + TYPE *d = vd, *n = vn; \ + TYPE r, m = rm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +/* VFMAS is vector * vector + scalar, so swap op2 and op3 */ +#define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S) +#define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S) + +/* VFMA is vector * scalar + vector */ +DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd) +DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd) +DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH) +DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) + +/* Floating point max/min across vector. */ +#define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \ + uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ + uint32_t ra_in) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *m = vm; \ + TYPE ra = (TYPE)ra_in; \ + float_status *fpst = (ESIZE == 2) ? \ + &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if (mask & 1) { \ + TYPE v = m[H##ESIZE(e)]; \ + if (TYPE##_is_signaling_nan(ra, fpst)) { \ + ra = TYPE##_silence_nan(ra, fpst); \ + float_raise(float_flag_invalid, fpst); \ + } \ + if (TYPE##_is_signaling_nan(v, fpst)) { \ + v = TYPE##_silence_nan(v, fpst); \ + float_raise(float_flag_invalid, fpst); \ + } \ + if (ABS) { \ + v = TYPE##_abs(v); \ + } \ + ra = FN(ra, v, fpst); \ + } \ + } \ + mve_advance_vpt(env); \ + return ra; \ + } \ + +#define NOP(X) (X) + +DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum) +DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum) +DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum) +DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum) +DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum) +DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum) +DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum) +DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) + +/* FP compares; note that all comparisons signal InvalidOp for QNaNs */ +#define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ + { \ + TYPE *n = vn, *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + uint16_t beatpred = 0; \ + uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + bool r; \ + for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ + if ((mask & emask) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & (1 << (e * ESIZE)))) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ + /* Comparison sets 0/1 bits for each byte in the element */ \ + beatpred |= r * emask; \ + } \ + beatpred &= mask; \ + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ + (beatpred & eci_mask); \ + mve_advance_vpt(env); \ + } + +#define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ + uint32_t rm) \ + { \ + TYPE *n = vn; \ + uint16_t mask = mve_element_mask(env); \ + uint16_t eci_mask = mve_eci_mask(env); \ + uint16_t beatpred = 0; \ + uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + bool r; \ + for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ + if ((mask & emask) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & (1 << (e * ESIZE)))) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \ + /* Comparison sets 0/1 bits for each byte in the element */ \ + beatpred |= r * emask; \ + } \ + beatpred &= mask; \ + env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ + (beatpred & eci_mask); \ + mve_advance_vpt(env); \ + } + +#define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \ + DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \ + DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN) + +/* + * Some care is needed here to get the correct result for the unordered case. + * Architecturally EQ, GE and GT are defined to be false for unordered, but + * the NE, LT and LE comparisons are defined as simple logical inverses of + * EQ, GE and GT and so they must return true for unordered. The softfloat + * comparison functions float*_{eq,le,lt} all return false for unordered. + */ +#define DO_GE16(X, Y, S) float16_le(Y, X, S) +#define DO_GE32(X, Y, S) float32_le(Y, X, S) +#define DO_GT16(X, Y, S) float16_lt(Y, X, S) +#define DO_GT32(X, Y, S) float32_lt(Y, X, S) + +DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq) +DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq) + +DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq) +DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq) + +DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16) +DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32) + +DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16) +DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32) + +DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16) +DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32) + +DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16) +DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) + +#define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \ + uint32_t shift) \ + { \ + TYPE *d = vd, *m = vm; \ + TYPE r; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(m[H##ESIZE(e)], shift, fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh) +DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh) +DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero) +DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero) +DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos) +DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos) +DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero) +DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) + +/* VCVT with specified rmode */ +#define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vm, uint32_t rmode) \ + { \ + TYPE *d = vd, *m = vm; \ + TYPE r; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + float_status *base_fpst = (ESIZE == 2) ? \ + &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ + set_float_rounding_mode(rmode, base_fpst); \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = base_fpst; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(m[H##ESIZE(e)], 0, fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + set_float_rounding_mode(prev_rmode, base_fpst); \ + mve_advance_vpt(env); \ + } + +DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh) +DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh) +DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls) +DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls) + +#define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S) +#define DO_VRINT_RM_S(M, F, S) helper_rints(M, S) + +DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H) +DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S) + +/* + * VCVT between halfprec and singleprec. As usual for halfprec + * conversions, FZ16 is ignored and AHP is observed. + */ +static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) +{ + uint16_t *d = vd; + uint32_t *m = vm; + uint16_t r; + uint16_t mask = mve_element_mask(env); + bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); + unsigned e; + float_status *fpst; + float_status scratch_fpst; + float_status *base_fpst = &env->vfp.standard_fp_status; + bool old_fz = get_flush_to_zero(base_fpst); + set_flush_to_zero(false, base_fpst); + for (e = 0; e < 16 / 4; e++, mask >>= 4) { + if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { + continue; + } + fpst = base_fpst; + if (!(mask & 1)) { + /* We need the result but without updating flags */ + scratch_fpst = *fpst; + fpst = &scratch_fpst; + } + r = float32_to_float16(m[H4(e)], ieee, fpst); + mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2)); + } + set_flush_to_zero(old_fz, base_fpst); + mve_advance_vpt(env); +} + +static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) +{ + uint32_t *d = vd; + uint16_t *m = vm; + uint32_t r; + uint16_t mask = mve_element_mask(env); + bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); + unsigned e; + float_status *fpst; + float_status scratch_fpst; + float_status *base_fpst = &env->vfp.standard_fp_status; + bool old_fiz = get_flush_inputs_to_zero(base_fpst); + set_flush_inputs_to_zero(false, base_fpst); + for (e = 0; e < 16 / 4; e++, mask >>= 4) { + if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { + continue; + } + fpst = base_fpst; + if (!(mask & (1 << (top * 2)))) { + /* We need the result but without updating flags */ + scratch_fpst = *fpst; + fpst = &scratch_fpst; + } + r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst); + mergemask(&d[H4(e)], r, mask); + } + set_flush_inputs_to_zero(old_fiz, base_fpst); + mve_advance_vpt(env); +} + +void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm) +{ + do_vcvt_sh(env, vd, vm, 0); +} +void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm) +{ + do_vcvt_sh(env, vd, vm, 1); +} +void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm) +{ + do_vcvt_hs(env, vd, vm, 0); +} +void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) +{ + do_vcvt_hs(env, vd, vm, 1); +} + +#define DO_1OP_FP(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \ + { \ + TYPE *d = vd, *m = vm; \ + TYPE r; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst; \ + float_status scratch_fpst; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ + continue; \ + } \ + fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + if (!(mask & 1)) { \ + /* We need the result but without updating flags */ \ + scratch_fpst = *fpst; \ + fpst = &scratch_fpst; \ + } \ + r = FN(m[H##ESIZE(e)], fpst); \ + mergemask(&d[H##ESIZE(e)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int) +DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int) diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c index 338b9189d5..bc6c4a54e9 100644 --- a/target/arm/neon_helper.c +++ b/target/arm/neon_helper.c @@ -23,7 +23,7 @@ typedef struct \ { \ type v1; \ } neon_##name; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define NEON_TYPE2(name, type) \ typedef struct \ { \ diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index e98fd86305..70672bcd9f 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -23,10 +23,26 @@ #include "internals.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "cpregs.h" #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) +int exception_target_el(CPUARMState *env) +{ + int target_el = MAX(1, arm_current_el(env)); + + /* + * No such thing as secure EL1 if EL3 is aarch32, + * so update the target EL to EL3 in this case. + */ + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { + target_el = 3; + } + + return target_el; +} + void raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { @@ -62,7 +78,7 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, * we must restore CPU state here before setting the syndrome * the caller passed us, and cannot use cpu_loop_exit_restore(). */ - cpu_restore_state(cs, ra, true); + cpu_restore_state(cs, ra); raise_exception(env, excp, syndrome, target_el); } @@ -224,6 +240,22 @@ void HELPER(setend)(CPUARMState *env) arm_rebuild_hflags(env); } +void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm) +{ + /* + * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU; + * check if HSTR.TJDBX means we need to trap to EL2. + */ + if (env->cp15.hstr_el2 & HSTR_TJDBX) { + /* + * We know the condition code check passed, so take the IMPDEF + * choice to always report CV=1 COND 0xe + */ + uint32_t syn = syn_bxjtrap(1, 0xe, rm); + raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC()); + } +} + #ifndef CONFIG_USER_ONLY /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. * The function returns the target EL (1-3) if the instruction is to be trapped; @@ -349,7 +381,7 @@ void HELPER(yield)(CPUARMState *env) * those EXCP values which are special cases for QEMU to interrupt * execution and not to be used for exceptions which are passed to * the guest (those must all have syndrome information and thus should - * use exception_with_syndrome). + * use exception_with_syndrome*). */ void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) { @@ -361,39 +393,20 @@ void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) } /* Raise an exception with the specified syndrome register value */ -void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el) +void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el) { raise_exception(env, excp, syndrome, target_el); } -/* Raise an EXCP_BKPT with the specified syndrome register value, - * targeting the correct exception level for debug exceptions. +/* + * Raise an exception with the specified syndrome register value + * to the default target el. */ -void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) +void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, + uint32_t syndrome) { - int debug_el = arm_debug_target_el(env); - int cur_el = arm_current_el(env); - - /* FSR will only be used if the debug target EL is AArch32. */ - env->exception.fsr = arm_debug_exception_fsr(env); - /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing - * values to the guest that it shouldn't be able to see at its - * exception/security level. - */ - env->exception.vaddress = 0; - /* - * Other kinds of architectural debug exception are ignored if - * they target an exception level below the current one (in QEMU - * this is checked by arm_generate_debug_exceptions()). Breakpoint - * instructions are special because they always generate an exception - * to somewhere: if they can't go to the configured debug exception - * level they are taken to the current exception level. - */ - if (debug_el < cur_el) { - debug_el = cur_el; - } - raise_exception(env, EXCP_BKPT, syndrome, debug_el); + raise_exception(env, excp, syndrome, exception_target_el(env)); } uint32_t HELPER(cpsr_read)(CPUARMState *env) @@ -614,12 +627,15 @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, uint32_t isread) { + ARMCPU *cpu = env_archcpu(env); const ARMCPRegInfo *ri = rip; + CPAccessResult res = CP_ACCESS_OK; int target_el; if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { - raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); + res = CP_ACCESS_TRAP; + goto fail; } /* @@ -638,61 +654,54 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, mask &= ~((1 << 4) | (1 << 14)); if (env->cp15.hstr_el2 & mask) { - target_el = 2; - goto exept; + res = CP_ACCESS_TRAP_EL2; + goto fail; } } - if (!ri->accessfn) { + if (ri->accessfn) { + res = ri->accessfn(env, ri, isread); + } + if (likely(res == CP_ACCESS_OK)) { return; } - switch (ri->accessfn(env, ri, isread)) { - case CP_ACCESS_OK: - return; + fail: + switch (res & ~CP_ACCESS_EL_MASK) { case CP_ACCESS_TRAP: - target_el = exception_target_el(env); - break; - case CP_ACCESS_TRAP_EL2: - /* Requesting a trap to EL2 when we're in EL3 is - * a bug in the access function. - */ - assert(arm_current_el(env) != 3); - target_el = 2; - break; - case CP_ACCESS_TRAP_EL3: - target_el = 3; break; case CP_ACCESS_TRAP_UNCATEGORIZED: - target_el = exception_target_el(env); + if (cpu_isar_feature(aa64_ids, cpu) && isread && + arm_cpreg_in_idspace(ri)) { + /* + * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP, + * not EC_UNCATEGORIZED + */ + break; + } syndrome = syn_uncategorized(); break; - case CP_ACCESS_TRAP_UNCATEGORIZED_EL2: - target_el = 2; - syndrome = syn_uncategorized(); - break; - case CP_ACCESS_TRAP_UNCATEGORIZED_EL3: - target_el = 3; - syndrome = syn_uncategorized(); - break; - case CP_ACCESS_TRAP_FP_EL2: - target_el = 2; - /* Since we are an implementation that takes exceptions on a trapped - * conditional insn only if the insn has passed its condition code - * check, we take the IMPDEF choice to always report CV=1 COND=0xe - * (which is also the required value for AArch64 traps). - */ - syndrome = syn_fp_access_trap(1, 0xe, false); - break; - case CP_ACCESS_TRAP_FP_EL3: - target_el = 3; - syndrome = syn_fp_access_trap(1, 0xe, false); - break; default: g_assert_not_reached(); } -exept: + target_el = res & CP_ACCESS_EL_MASK; + switch (target_el) { + case 0: + target_el = exception_target_el(env); + break; + case 2: + assert(arm_current_el(env) != 3); + assert(arm_is_el2_enabled(env)); + break; + case 3: + assert(arm_feature(env, ARM_FEATURE_EL3)); + break; + default: + /* No "direct" traps to EL1 */ + g_assert_not_reached(); + } + raise_exception(env, EXCP_UDEF, syndrome, target_el); } @@ -956,3 +965,46 @@ void HELPER(probe_access)(CPUARMState *env, target_ulong ptr, access_type, mmu_idx, ra); } } + +/* + * This function corresponds to AArch64.vESBOperation(). + * Note that the AArch32 version is not functionally different. + */ +void HELPER(vesb)(CPUARMState *env) +{ + /* + * The EL2Enabled() check is done inside arm_hcr_el2_eff, + * and will return HCR_EL2.VSE == 0, so nothing happens. + */ + uint64_t hcr = arm_hcr_el2_eff(env); + bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO); + bool pending = enabled && (hcr & HCR_VSE); + bool masked = (env->daif & PSTATE_A); + + /* If VSE pending and masked, defer the exception. */ + if (pending && masked) { + uint32_t syndrome; + + if (arm_el_is_aa64(env, 1)) { + /* Copy across IDS and ISS from VSESR. */ + syndrome = env->cp15.vsesr_el2 & 0x1ffffff; + } else { + ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal }; + + if (extended_addresses_enabled(env)) { + syndrome = arm_fi_to_lfsc(&fi); + } else { + syndrome = arm_fi_to_sfsc(&fi); + } + /* Copy across AET and ExT from VSESR. */ + syndrome |= env->cp15.vsesr_el2 & 0xd000; + } + + /* Set VDISR_EL2.A along with the syndrome. */ + env->cp15.vdisr_el2 = syndrome | (1u << 31); + + /* Clear pending virtual SError */ + env->cp15.hcr_el2 &= ~HCR_VSE; + cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR); + } +} diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c index cd6df18150..d0483bf051 100644 --- a/target/arm/pauth_helper.c +++ b/target/arm/pauth_helper.c @@ -382,15 +382,15 @@ static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data) return pauth_original_ptr(ptr, param); } -static void QEMU_NORETURN pauth_trap(CPUARMState *env, int target_el, - uintptr_t ra) +static G_NORETURN +void pauth_trap(CPUARMState *env, int target_el, uintptr_t ra) { raise_exception_ra(env, EXCP_UDEF, syn_pactrap(), target_el, ra); } static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra) { - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); bool trap = !(hcr & HCR_API); if (el == 0) { diff --git a/target/arm/psci.c b/target/arm/psci.c index 6709e28013..6c1239bb96 100644 --- a/target/arm/psci.c +++ b/target/arm/psci.c @@ -27,15 +27,13 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { - /* Return true if the r0/x0 value indicates a PSCI call and - * the exception type matches the configured PSCI conduit. This is - * called before the SMC/HVC instruction is executed, to decide whether - * we should treat it as a PSCI call or with the architecturally + /* + * Return true if the exception type matches the configured PSCI conduit. + * This is called before the SMC/HVC instruction is executed, to decide + * whether we should treat it as a PSCI call or with the architecturally * defined behaviour for an SMC or HVC (which might be UNDEF or trap * to EL2 or to EL3). */ - CPUARMState *env = &cpu->env; - uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0]; switch (excp_type) { case EXCP_HVC: @@ -52,34 +50,14 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type) return false; } - switch (param) { - case QEMU_PSCI_0_2_FN_PSCI_VERSION: - case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: - case QEMU_PSCI_0_2_FN_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN_SYSTEM_RESET: - case QEMU_PSCI_0_2_FN_SYSTEM_OFF: - case QEMU_PSCI_0_1_FN_CPU_ON: - case QEMU_PSCI_0_2_FN_CPU_ON: - case QEMU_PSCI_0_2_FN64_CPU_ON: - case QEMU_PSCI_0_1_FN_CPU_OFF: - case QEMU_PSCI_0_2_FN_CPU_OFF: - case QEMU_PSCI_0_1_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: - case QEMU_PSCI_0_1_FN_MIGRATE: - case QEMU_PSCI_0_2_FN_MIGRATE: - return true; - default: - return false; - } + return true; } void arm_handle_psci_call(ARMCPU *cpu) { /* * This function partially implements the logic for dispatching Power State - * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b), + * Coordination Interface (PSCI) calls (as described in ARM DEN 0022D.b), * to the extent required for bringing up and taking down secondary cores, * and for handling reset and poweroff requests. * Additional information about the calling convention used is available in @@ -102,7 +80,7 @@ void arm_handle_psci_call(ARMCPU *cpu) } if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; + ret = QEMU_PSCI_RET_NOT_SUPPORTED; goto err; } @@ -111,7 +89,7 @@ void arm_handle_psci_call(ARMCPU *cpu) ARMCPU *target_cpu; case QEMU_PSCI_0_2_FN_PSCI_VERSION: - ret = QEMU_PSCI_0_2_RET_VERSION_0_2; + ret = QEMU_PSCI_VERSION_1_1; break; case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ @@ -192,12 +170,40 @@ void arm_handle_psci_call(ARMCPU *cpu) } helper_wfi(env, 4); break; + case QEMU_PSCI_1_0_FN_PSCI_FEATURES: + switch (param[1]) { + case QEMU_PSCI_0_2_FN_PSCI_VERSION: + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: + case QEMU_PSCI_0_1_FN_CPU_ON: + case QEMU_PSCI_0_2_FN_CPU_ON: + case QEMU_PSCI_0_2_FN64_CPU_ON: + case QEMU_PSCI_0_1_FN_CPU_OFF: + case QEMU_PSCI_0_2_FN_CPU_OFF: + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + case QEMU_PSCI_1_0_FN_PSCI_FEATURES: + if (!(param[1] & QEMU_PSCI_0_2_64BIT) || is_a64(env)) { + ret = 0; + break; + } + /* fallthrough */ + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: + default: + ret = QEMU_PSCI_RET_NOT_SUPPORTED; + break; + } + break; case QEMU_PSCI_0_1_FN_MIGRATE: case QEMU_PSCI_0_2_FN_MIGRATE: + default: ret = QEMU_PSCI_RET_NOT_SUPPORTED; break; - default: - g_assert_not_reached(); } err: diff --git a/target/arm/ptw.c b/target/arm/ptw.c new file mode 100644 index 0000000000..0b16068557 --- /dev/null +++ b/target/arm/ptw.c @@ -0,0 +1,2895 @@ +/* + * ARM page table walking. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/range.h" +#include "qemu/main-loop.h" +#include "exec/exec-all.h" +#include "cpu.h" +#include "internals.h" +#include "idau.h" + + +typedef struct S1Translate { + ARMMMUIdx in_mmu_idx; + ARMMMUIdx in_ptw_idx; + bool in_secure; + bool in_debug; + bool out_secure; + bool out_rw; + bool out_be; + hwaddr out_virt; + hwaddr out_phys; + void *out_host; +} S1Translate; + +static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, + uint64_t address, + MMUAccessType access_type, bool s1_is_el0, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) + __attribute__((nonnull)); + +static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) + __attribute__((nonnull)); + +/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ +static const uint8_t pamax_map[] = { + [0] = 32, + [1] = 36, + [2] = 40, + [3] = 42, + [4] = 44, + [5] = 48, + [6] = 52, +}; + +/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */ +unsigned int arm_pamax(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + unsigned int parange = + FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + + /* + * id_aa64mmfr0 is a read-only register so values outside of the + * supported mappings can be considered an implementation error. + */ + assert(parange < ARRAY_SIZE(pamax_map)); + return pamax_map[parange]; + } + + /* + * In machvirt_init, we call arm_pamax on a cpu that is not fully + * initialized, so we can't rely on the propagation done in realize. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) || + arm_feature(&cpu->env, ARM_FEATURE_V7VE)) { + /* v7 with LPAE */ + return 40; + } + /* Anything else */ + return 32; +} + +/* + * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index + */ +ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E10_0: + return ARMMMUIdx_Stage1_E0; + case ARMMMUIdx_E10_1: + return ARMMMUIdx_Stage1_E1; + case ARMMMUIdx_E10_1_PAN: + return ARMMMUIdx_Stage1_E1_PAN; + default: + return mmu_idx; + } +} + +ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) +{ + return stage_1_mmu_idx(arm_mmu_idx(env)); +} + +static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; +} + +/* Return the TTBR associated with this translation regime */ +static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) +{ + if (mmu_idx == ARMMMUIdx_Stage2) { + return env->cp15.vttbr_el2; + } + if (mmu_idx == ARMMMUIdx_Stage2_S) { + return env->cp15.vsttbr_el2; + } + if (ttbrn == 0) { + return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; + } else { + return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; + } +} + +/* Return true if the specified stage of address translation is disabled */ +static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, + bool is_secure) +{ + uint64_t hcr_el2; + + if (arm_feature(env, ARM_FEATURE_M)) { + switch (env->v7m.mpu_ctrl[is_secure] & + (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { + case R_V7M_MPU_CTRL_ENABLE_MASK: + /* Enabled, but not for HardFault and NMI */ + return mmu_idx & ARM_MMU_IDX_M_NEGPRI; + case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: + /* Enabled for all cases */ + return false; + case 0: + default: + /* + * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but + * we warned about that in armv7m_nvic.c when the guest set it. + */ + return true; + } + } + + hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure); + + switch (mmu_idx) { + case ARMMMUIdx_Stage2: + case ARMMMUIdx_Stage2_S: + /* HCR.DC means HCR.VM behaves as 1 */ + return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; + + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ + if (hcr_el2 & HCR_TGE) { + return true; + } + break; + + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + /* HCR.DC means SCTLR_EL1.M behaves as 0 */ + if (hcr_el2 & HCR_DC) { + return true; + } + break; + + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E2: + case ARMMMUIdx_E3: + break; + + case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_Phys_S: + /* No translation for physical address spaces. */ + return true; + + default: + g_assert_not_reached(); + } + + return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; +} + +static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) +{ + /* + * For an S1 page table walk, the stage 1 attributes are always + * some form of "this is Normal memory". The combined S1+S2 + * attributes are therefore only Device if stage 2 specifies Device. + * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, + * ie when cacheattrs.attrs bits [3:2] are 0b00. + * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie + * when cacheattrs.attrs bit [2] is 0. + */ + if (hcr & HCR_FWB) { + return (attrs & 0x4) == 0; + } else { + return (attrs & 0xc) == 0; + } +} + +/* Translate a S1 pagetable walk through S2 if needed. */ +static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, + hwaddr addr, ARMMMUFaultInfo *fi) +{ + bool is_secure = ptw->in_secure; + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; + ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; + uint8_t pte_attrs; + bool pte_secure; + + ptw->out_virt = addr; + + if (unlikely(ptw->in_debug)) { + /* + * From gdbstub, do not use softmmu so that we don't modify the + * state of the cpu at all, including softmmu tlb contents. + */ + if (regime_is_stage2(s2_mmu_idx)) { + S1Translate s2ptw = { + .in_mmu_idx = s2_mmu_idx, + .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS, + .in_secure = is_secure, + .in_debug = true, + }; + GetPhysAddrResult s2 = { }; + + if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD, + false, &s2, fi)) { + goto fail; + } + ptw->out_phys = s2.f.phys_addr; + pte_attrs = s2.cacheattrs.attrs; + pte_secure = s2.f.attrs.secure; + } else { + /* Regime is physical. */ + ptw->out_phys = addr; + pte_attrs = 0; + pte_secure = is_secure; + } + ptw->out_host = NULL; + ptw->out_rw = false; + } else { + CPUTLBEntryFull *full; + int flags; + + env->tlb_fi = fi; + flags = probe_access_full(env, addr, MMU_DATA_LOAD, + arm_to_core_mmu_idx(s2_mmu_idx), + true, &ptw->out_host, &full, 0); + env->tlb_fi = NULL; + + if (unlikely(flags & TLB_INVALID_MASK)) { + goto fail; + } + ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); + ptw->out_rw = full->prot & PAGE_WRITE; + pte_attrs = full->pte_attrs; + pte_secure = full->attrs.secure; + } + + if (regime_is_stage2(s2_mmu_idx)) { + uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure); + + if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { + /* + * PTW set and S1 walk touched S2 Device memory: + * generate Permission fault. + */ + fi->type = ARMFault_Permission; + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + fi->s1ns = !is_secure; + return false; + } + } + + /* Check if page table walk is to secure or non-secure PA space. */ + ptw->out_secure = (is_secure + && !(pte_secure + ? env->cp15.vstcr_el2 & VSTCR_SW + : env->cp15.vtcr_el2 & VTCR_NSW)); + ptw->out_be = regime_translation_big_endian(env, mmu_idx); + return true; + + fail: + assert(fi->type != ARMFault_None); + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + fi->s1ns = !is_secure; + return false; +} + +/* All loads done in the course of a page table walk go through here. */ +static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, + ARMMMUFaultInfo *fi) +{ + CPUState *cs = env_cpu(env); + void *host = ptw->out_host; + uint32_t data; + + if (likely(host)) { + /* Page tables are in RAM, and we have the host address. */ + data = qatomic_read((uint32_t *)host); + if (ptw->out_be) { + data = be32_to_cpu(data); + } else { + data = le32_to_cpu(data); + } + } else { + /* Page tables are in MMIO. */ + MemTxAttrs attrs = { .secure = ptw->out_secure }; + AddressSpace *as = arm_addressspace(cs, attrs); + MemTxResult result = MEMTX_OK; + + if (ptw->out_be) { + data = address_space_ldl_be(as, ptw->out_phys, attrs, &result); + } else { + data = address_space_ldl_le(as, ptw->out_phys, attrs, &result); + } + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; + } + } + return data; +} + +static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, + ARMMMUFaultInfo *fi) +{ + CPUState *cs = env_cpu(env); + void *host = ptw->out_host; + uint64_t data; + + if (likely(host)) { + /* Page tables are in RAM, and we have the host address. */ +#ifdef CONFIG_ATOMIC64 + data = qatomic_read__nocheck((uint64_t *)host); + if (ptw->out_be) { + data = be64_to_cpu(data); + } else { + data = le64_to_cpu(data); + } +#else + if (ptw->out_be) { + data = ldq_be_p(host); + } else { + data = ldq_le_p(host); + } +#endif + } else { + /* Page tables are in MMIO. */ + MemTxAttrs attrs = { .secure = ptw->out_secure }; + AddressSpace *as = arm_addressspace(cs, attrs); + MemTxResult result = MEMTX_OK; + + if (ptw->out_be) { + data = address_space_ldq_be(as, ptw->out_phys, attrs, &result); + } else { + data = address_space_ldq_le(as, ptw->out_phys, attrs, &result); + } + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; + } + } + return data; +} + +static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, + uint64_t new_val, S1Translate *ptw, + ARMMMUFaultInfo *fi) +{ + uint64_t cur_val; + void *host = ptw->out_host; + + if (unlikely(!host)) { + fi->type = ARMFault_UnsuppAtomicUpdate; + fi->s1ptw = true; + return 0; + } + + /* + * Raising a stage2 Protection fault for an atomic update to a read-only + * page is delayed until it is certain that there is a change to make. + */ + if (unlikely(!ptw->out_rw)) { + int flags; + void *discard; + + env->tlb_fi = fi; + flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE, + arm_to_core_mmu_idx(ptw->in_ptw_idx), + true, &discard, 0); + env->tlb_fi = NULL; + + if (unlikely(flags & TLB_INVALID_MASK)) { + assert(fi->type != ARMFault_None); + fi->s2addr = ptw->out_virt; + fi->stage2 = true; + fi->s1ptw = true; + fi->s1ns = !ptw->in_secure; + return 0; + } + + /* In case CAS mismatches and we loop, remember writability. */ + ptw->out_rw = true; + } + +#ifdef CONFIG_ATOMIC64 + if (ptw->out_be) { + old_val = cpu_to_be64(old_val); + new_val = cpu_to_be64(new_val); + cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); + cur_val = be64_to_cpu(cur_val); + } else { + old_val = cpu_to_le64(old_val); + new_val = cpu_to_le64(new_val); + cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); + cur_val = le64_to_cpu(cur_val); + } +#else + /* + * We can't support the full 64-bit atomic cmpxchg on the host. + * Because this is only used for FEAT_HAFDBS, which is only for AA64, + * we know that TCG_OVERSIZED_GUEST is set, which means that we are + * running in round-robin mode and could only race with dma i/o. + */ +#ifndef TCG_OVERSIZED_GUEST +# error "Unexpected configuration" +#endif + bool locked = qemu_mutex_iothread_locked(); + if (!locked) { + qemu_mutex_lock_iothread(); + } + if (ptw->out_be) { + cur_val = ldq_be_p(host); + if (cur_val == old_val) { + stq_be_p(host, new_val); + } + } else { + cur_val = ldq_le_p(host); + if (cur_val == old_val) { + stq_le_p(host, new_val); + } + } + if (!locked) { + qemu_mutex_unlock_iothread(); + } +#endif + + return cur_val; +} + +static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, + uint32_t *table, uint32_t address) +{ + /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ + uint64_t tcr = regime_tcr(env, mmu_idx); + int maskshift = extract32(tcr, 0, 3); + uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); + uint32_t base_mask; + + if (address & mask) { + if (tcr & TTBCR_PD1) { + /* Translation table walk disabled for TTBR1 */ + return false; + } + *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; + } else { + if (tcr & TTBCR_PD0) { + /* Translation table walk disabled for TTBR0 */ + return false; + } + base_mask = ~((uint32_t)0x3fffu >> maskshift); + *table = regime_ttbr(env, mmu_idx, 0) & base_mask; + } + *table |= (address >> 18) & 0x3ffc; + return true; +} + +/* + * Translate section/page access permissions to page R/W protection flags + * @env: CPUARMState + * @mmu_idx: MMU index indicating required translation regime + * @ap: The 3-bit access permissions (AP[2:0]) + * @domain_prot: The 2-bit domain access permissions + * @is_user: TRUE if accessing from PL0 + */ +static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx, + int ap, int domain_prot, bool is_user) +{ + if (domain_prot == 3) { + return PAGE_READ | PAGE_WRITE; + } + + switch (ap) { + case 0: + if (arm_feature(env, ARM_FEATURE_V7)) { + return 0; + } + switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { + case SCTLR_S: + return is_user ? 0 : PAGE_READ; + case SCTLR_R: + return PAGE_READ; + default: + return 0; + } + case 1: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 2: + if (is_user) { + return PAGE_READ; + } else { + return PAGE_READ | PAGE_WRITE; + } + case 3: + return PAGE_READ | PAGE_WRITE; + case 4: /* Reserved. */ + return 0; + case 5: + return is_user ? 0 : PAGE_READ; + case 6: + return PAGE_READ; + case 7: + if (!arm_feature(env, ARM_FEATURE_V6K)) { + return 0; + } + return PAGE_READ; + default: + g_assert_not_reached(); + } +} + +/* + * Translate section/page access permissions to page R/W protection flags + * @env: CPUARMState + * @mmu_idx: MMU index indicating required translation regime + * @ap: The 3-bit access permissions (AP[2:0]) + * @domain_prot: The 2-bit domain access permissions + */ +static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, + int ap, int domain_prot) +{ + return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, + regime_is_user(env, mmu_idx)); +} + +/* + * Translate section/page access permissions to page R/W protection flags. + * @ap: The 2-bit simple AP (AP[2:1]) + * @is_user: TRUE if accessing from PL0 + */ +static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) +{ + switch (ap) { + case 0: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 1: + return PAGE_READ | PAGE_WRITE; + case 2: + return is_user ? 0 : PAGE_READ; + case 3: + return PAGE_READ; + default: + g_assert_not_reached(); + } +} + +static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) +{ + return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); +} + +static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, + uint32_t address, MMUAccessType access_type, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +{ + int level = 1; + uint32_t table; + uint32_t desc; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + uint32_t dacr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if (!S1_ptw_translate(env, ptw, table, fi)) { + goto do_fault; + } + desc = arm_ldl_ptw(env, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + type = (desc & 3); + domain = (desc >> 5) & 0x0f; + if (regime_el(env, ptw->in_mmu_idx) == 1) { + dacr = env->cp15.dacr_ns; + } else { + dacr = env->cp15.dacr_s; + } + domain_prot = (dacr >> (domain * 2)) & 3; + if (type == 0) { + /* Section translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if (type != 2) { + level = 2; + } + if (domain_prot == 0 || domain_prot == 2) { + fi->type = ARMFault_Domain; + goto do_fault; + } + if (type == 2) { + /* 1Mb section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + ap = (desc >> 10) & 3; + result->f.lg_page_size = 20; /* 1MB */ + } else { + /* Lookup l2 entry. */ + if (type == 1) { + /* Coarse pagetable. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + } else { + /* Fine pagetable. */ + table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); + } + if (!S1_ptw_translate(env, ptw, table, fi)) { + goto do_fault; + } + desc = arm_ldl_ptw(env, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + switch (desc & 3) { + case 0: /* Page translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + result->f.lg_page_size = 16; + break; + case 2: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + ap = (desc >> (4 + ((address >> 9) & 6))) & 3; + result->f.lg_page_size = 12; + break; + case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ + if (type == 1) { + /* ARMv6/XScale extended small page format */ + if (arm_feature(env, ARM_FEATURE_XSCALE) + || arm_feature(env, ARM_FEATURE_V6)) { + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + result->f.lg_page_size = 12; + } else { + /* + * UNPREDICTABLE in ARMv5; we choose to take a + * page translation fault. + */ + fi->type = ARMFault_Translation; + goto do_fault; + } + } else { + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); + result->f.lg_page_size = 10; + } + ap = (desc >> 4) & 3; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + g_assert_not_reached(); + } + } + result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); + result->f.prot |= result->f.prot ? PAGE_EXEC : 0; + if (!(result->f.prot & (1 << access_type))) { + /* Access permission fault. */ + fi->type = ARMFault_Permission; + goto do_fault; + } + result->f.phys_addr = phys_addr; + return false; +do_fault: + fi->domain = domain; + fi->level = level; + return true; +} + +static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, + uint32_t address, MMUAccessType access_type, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = env_archcpu(env); + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; + int level = 1; + uint32_t table; + uint32_t desc; + uint32_t xn; + uint32_t pxn = 0; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + uint32_t dacr; + bool ns; + int user_prot; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, mmu_idx, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if (!S1_ptw_translate(env, ptw, table, fi)) { + goto do_fault; + } + desc = arm_ldl_ptw(env, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + type = (desc & 3); + if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { + /* Section translation fault, or attempt to use the encoding + * which is Reserved on implementations without PXN. + */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if ((type == 1) || !(desc & (1 << 18))) { + /* Page or Section. */ + domain = (desc >> 5) & 0x0f; + } + if (regime_el(env, mmu_idx) == 1) { + dacr = env->cp15.dacr_ns; + } else { + dacr = env->cp15.dacr_s; + } + if (type == 1) { + level = 2; + } + domain_prot = (dacr >> (domain * 2)) & 3; + if (domain_prot == 0 || domain_prot == 2) { + /* Section or Page domain fault */ + fi->type = ARMFault_Domain; + goto do_fault; + } + if (type != 1) { + if (desc & (1 << 18)) { + /* Supersection. */ + phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); + phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; + phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; + result->f.lg_page_size = 24; /* 16MB */ + } else { + /* Section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + result->f.lg_page_size = 20; /* 1MB */ + } + ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); + xn = desc & (1 << 4); + pxn = desc & 1; + ns = extract32(desc, 19, 1); + } else { + if (cpu_isar_feature(aa32_pxn, cpu)) { + pxn = (desc >> 2) & 1; + } + ns = extract32(desc, 3, 1); + /* Lookup l2 entry. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + if (!S1_ptw_translate(env, ptw, table, fi)) { + goto do_fault; + } + desc = arm_ldl_ptw(env, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); + switch (desc & 3) { + case 0: /* Page translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + xn = desc & (1 << 15); + result->f.lg_page_size = 16; + break; + case 2: case 3: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + xn = desc & 1; + result->f.lg_page_size = 12; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + g_assert_not_reached(); + } + } + if (domain_prot == 3) { + result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + if (pxn && !regime_is_user(env, mmu_idx)) { + xn = 1; + } + if (xn && access_type == MMU_INST_FETCH) { + fi->type = ARMFault_Permission; + goto do_fault; + } + + if (arm_feature(env, ARM_FEATURE_V6K) && + (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { + /* The simplified model uses AP[0] as an access control bit. */ + if ((ap & 1) == 0) { + /* Access flag fault. */ + fi->type = ARMFault_AccessFlag; + goto do_fault; + } + result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); + user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1); + } else { + result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); + user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); + } + if (result->f.prot && !xn) { + result->f.prot |= PAGE_EXEC; + } + if (!(result->f.prot & (1 << access_type))) { + /* Access permission fault. */ + fi->type = ARMFault_Permission; + goto do_fault; + } + if (regime_is_pan(env, mmu_idx) && + !regime_is_user(env, mmu_idx) && + user_prot && + access_type != MMU_INST_FETCH) { + /* Privileged Access Never fault */ + fi->type = ARMFault_Permission; + goto do_fault; + } + } + if (ns) { + /* The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + result->f.attrs.secure = false; + } + result->f.phys_addr = phys_addr; + return false; +do_fault: + fi->domain = domain; + fi->level = level; + return true; +} + +/* + * Translate S2 section/page access permissions to protection flags + * @env: CPUARMState + * @s2ap: The 2-bit stage2 access permissions (S2AP) + * @xn: XN (execute-never) bits + * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 + */ +static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) +{ + int prot = 0; + + if (s2ap & 1) { + prot |= PAGE_READ; + } + if (s2ap & 2) { + prot |= PAGE_WRITE; + } + + if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { + switch (xn) { + case 0: + prot |= PAGE_EXEC; + break; + case 1: + if (s1_is_el0) { + prot |= PAGE_EXEC; + } + break; + case 2: + break; + case 3: + if (!s1_is_el0) { + prot |= PAGE_EXEC; + } + break; + default: + g_assert_not_reached(); + } + } else { + if (!extract32(xn, 1, 1)) { + if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { + prot |= PAGE_EXEC; + } + } + } + return prot; +} + +/* + * Translate section/page access permissions to protection flags + * @env: CPUARMState + * @mmu_idx: MMU index indicating required translation regime + * @is_aa64: TRUE if AArch64 + * @ap: The 2-bit simple AP (AP[2:1]) + * @ns: NS (non-secure) bit + * @xn: XN (execute-never) bit + * @pxn: PXN (privileged execute-never) bit + */ +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, + int ap, int ns, int xn, int pxn) +{ + bool is_user = regime_is_user(env, mmu_idx); + int prot_rw, user_rw; + bool have_wxn; + int wxn = 0; + + assert(!regime_is_stage2(mmu_idx)); + + user_rw = simple_ap_to_rw_prot_is_user(ap, true); + if (is_user) { + prot_rw = user_rw; + } else { + if (user_rw && regime_is_pan(env, mmu_idx)) { + /* PAN forbids data accesses but doesn't affect insn fetch */ + prot_rw = 0; + } else { + prot_rw = simple_ap_to_rw_prot_is_user(ap, false); + } + } + + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { + return prot_rw; + } + + /* TODO have_wxn should be replaced with + * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) + * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE + * compatible processors have EL2, which is required for [U]WXN. + */ + have_wxn = arm_feature(env, ARM_FEATURE_LPAE); + + if (have_wxn) { + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; + } + + if (is_aa64) { + if (regime_has_2_ranges(mmu_idx) && !is_user) { + xn = pxn || (user_rw & PAGE_WRITE); + } + } else if (arm_feature(env, ARM_FEATURE_V7)) { + switch (regime_el(env, mmu_idx)) { + case 1: + case 3: + if (is_user) { + xn = xn || !(user_rw & PAGE_READ); + } else { + int uwxn = 0; + if (have_wxn) { + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; + } + xn = xn || !(prot_rw & PAGE_READ) || pxn || + (uwxn && (user_rw & PAGE_WRITE)); + } + break; + case 2: + break; + } + } else { + xn = wxn = 0; + } + + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { + return prot_rw; + } + return prot_rw | PAGE_EXEC; +} + +static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, + ARMMMUIdx mmu_idx) +{ + uint64_t tcr = regime_tcr(env, mmu_idx); + uint32_t el = regime_el(env, mmu_idx); + int select, tsz; + bool epd, hpd; + + assert(mmu_idx != ARMMMUIdx_Stage2_S); + + if (mmu_idx == ARMMMUIdx_Stage2) { + /* VTCR */ + bool sext = extract32(tcr, 4, 1); + bool sign = extract32(tcr, 3, 1); + + /* + * If the sign-extend bit is not the same as t0sz[3], the result + * is unpredictable. Flag this as a guest error. + */ + if (sign != sext) { + qemu_log_mask(LOG_GUEST_ERROR, + "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); + } + tsz = sextract32(tcr, 0, 4) + 8; + select = 0; + hpd = false; + epd = false; + } else if (el == 2) { + /* HTCR */ + tsz = extract32(tcr, 0, 3); + select = 0; + hpd = extract64(tcr, 24, 1); + epd = false; + } else { + int t0sz = extract32(tcr, 0, 3); + int t1sz = extract32(tcr, 16, 3); + + if (t1sz == 0) { + select = va > (0xffffffffu >> t0sz); + } else { + /* Note that we will detect errors later. */ + select = va >= ~(0xffffffffu >> t1sz); + } + if (!select) { + tsz = t0sz; + epd = extract32(tcr, 7, 1); + hpd = extract64(tcr, 41, 1); + } else { + tsz = t1sz; + epd = extract32(tcr, 23, 1); + hpd = extract64(tcr, 42, 1); + } + /* For aarch32, hpd0 is not enabled without t2e as well. */ + hpd &= extract32(tcr, 6, 1); + } + + return (ARMVAParameters) { + .tsz = tsz, + .select = select, + .epd = epd, + .hpd = hpd, + }; +} + +/* + * check_s2_mmu_setup + * @cpu: ARMCPU + * @is_aa64: True if the translation regime is in AArch64 state + * @startlevel: Suggested starting level + * @inputsize: Bitsize of IPAs + * @stride: Page-table stride (See the ARM ARM) + * + * Returns true if the suggested S2 translation parameters are OK and + * false otherwise. + */ +static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, + int inputsize, int stride, int outputsize) +{ + const int grainsize = stride + 3; + int startsizecheck; + + /* + * Negative levels are usually not allowed... + * Except for FEAT_LPA2, 4k page table, 52-bit address space, which + * begins with level -1. Note that previous feature tests will have + * eliminated this combination if it is not enabled. + */ + if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) { + return false; + } + + startsizecheck = inputsize - ((3 - level) * stride + grainsize); + if (startsizecheck < 1 || startsizecheck > stride + 4) { + return false; + } + + if (is_aa64) { + switch (stride) { + case 13: /* 64KB Pages. */ + if (level == 0 || (level == 1 && outputsize <= 42)) { + return false; + } + break; + case 11: /* 16KB Pages. */ + if (level == 0 || (level == 1 && outputsize <= 40)) { + return false; + } + break; + case 9: /* 4KB Pages. */ + if (level == 0 && outputsize <= 42) { + return false; + } + break; + default: + g_assert_not_reached(); + } + + /* Inputsize checks. */ + if (inputsize > outputsize && + (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { + /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ + return false; + } + } else { + /* AArch32 only supports 4KB pages. Assert on that. */ + assert(stride == 9); + + if (level == 0) { + return false; + } + } + return true; +} + +/** + * get_phys_addr_lpae: perform one stage of page table walk, LPAE format + * + * Returns false if the translation was successful. Otherwise, phys_ptr, + * attrs, prot and page_size may not be filled in, and the populated fsr + * value provides information on why the translation aborted, in the format + * of a long-format DFSR/IFSR fault register, with the following caveat: + * the WnR bit is never set (the caller must do this). + * + * @env: CPUARMState + * @ptw: Current and next stage parameters for the walk. + * @address: virtual address to get physical address for + * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH + * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2 + * (so this is a stage 2 page table walk), + * must be true if this is stage 2 of a stage 1+2 + * walk for an EL0 access. If @mmu_idx is anything else, + * @s1_is_el0 is ignored. + * @result: set on translation success, + * @fi: set to fault info if the translation fails + */ +static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, + uint64_t address, + MMUAccessType access_type, bool s1_is_el0, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = env_archcpu(env); + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; + bool is_secure = ptw->in_secure; + int32_t level; + ARMVAParameters param; + uint64_t ttbr; + hwaddr descaddr, indexmask, indexmask_grainsize; + uint32_t tableattrs; + target_ulong page_size; + uint64_t attrs; + int32_t stride; + int addrsize, inputsize, outputsize; + uint64_t tcr = regime_tcr(env, mmu_idx); + int ap, ns, xn, pxn; + uint32_t el = regime_el(env, mmu_idx); + uint64_t descaddrmask; + bool aarch64 = arm_el_is_aa64(env, el); + uint64_t descriptor, new_descriptor; + bool nstable; + + /* TODO: This code does not support shareability levels. */ + if (aarch64) { + int ps; + + param = aa64_va_parameters(env, address, mmu_idx, + access_type != MMU_INST_FETCH); + level = 0; + + /* + * If TxSZ is programmed to a value larger than the maximum, + * or smaller than the effective minimum, it is IMPLEMENTATION + * DEFINED whether we behave as if the field were programmed + * within bounds, or if a level 0 Translation fault is generated. + * + * With FEAT_LVA, fault on less than minimum becomes required, + * so our choice is to always raise the fault. + */ + if (param.tsz_oob) { + goto do_translation_fault; + } + + addrsize = 64 - 8 * param.tbi; + inputsize = 64 - param.tsz; + + /* + * Bound PS by PARANGE to find the effective output address size. + * ID_AA64MMFR0 is a read-only register so values outside of the + * supported mappings can be considered an implementation error. + */ + ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + ps = MIN(ps, param.ps); + assert(ps < ARRAY_SIZE(pamax_map)); + outputsize = pamax_map[ps]; + + /* + * With LPA2, the effective output address (OA) size is at most 48 bits + * unless TCR.DS == 1 + */ + if (!param.ds && param.gran != Gran64K) { + outputsize = MIN(outputsize, 48); + } + } else { + param = aa32_va_parameters(env, address, mmu_idx); + level = 1; + addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); + inputsize = addrsize - param.tsz; + outputsize = 40; + } + + /* + * We determined the region when collecting the parameters, but we + * have not yet validated that the address is valid for the region. + * Extract the top bits and verify that they all match select. + * + * For aa32, if inputsize == addrsize, then we have selected the + * region by exclusion in aa32_va_parameters and there is no more + * validation to do here. + */ + if (inputsize < addrsize) { + target_ulong top_bits = sextract64(address, inputsize, + addrsize - inputsize); + if (-top_bits != param.select) { + /* The gap between the two regions is a Translation fault */ + goto do_translation_fault; + } + } + + stride = arm_granule_bits(param.gran) - 3; + + /* + * Note that QEMU ignores shareability and cacheability attributes, + * so we don't need to do anything with the SH, ORGN, IRGN fields + * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the + * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently + * implement any ASID-like capability so we can ignore it (instead + * we will always flush the TLB any time the ASID is changed). + */ + ttbr = regime_ttbr(env, mmu_idx, param.select); + + /* + * Here we should have set up all the parameters for the translation: + * inputsize, ttbr, epd, stride, tbi + */ + + if (param.epd) { + /* + * Translation table walk disabled => Translation fault on TLB miss + * Note: This is always 0 on 64-bit EL2 and EL3. + */ + goto do_translation_fault; + } + + if (!regime_is_stage2(mmu_idx)) { + /* + * The starting level depends on the virtual address size (which can + * be up to 48 bits) and the translation granule size. It indicates + * the number of strides (stride bits at a time) needed to + * consume the bits of the input address. In the pseudocode this is: + * level = 4 - RoundUp((inputsize - grainsize) / stride) + * where their 'inputsize' is our 'inputsize', 'grainsize' is + * our 'stride + 3' and 'stride' is our 'stride'. + * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: + * = 4 - (inputsize - stride - 3 + stride - 1) / stride + * = 4 - (inputsize - 4) / stride; + */ + level = 4 - (inputsize - 4) / stride; + } else { + /* + * For stage 2 translations the starting level is specified by the + * VTCR_EL2.SL0 field (whose interpretation depends on the page size) + */ + uint32_t sl0 = extract32(tcr, 6, 2); + uint32_t sl2 = extract64(tcr, 33, 1); + int32_t startlevel; + bool ok; + + /* SL2 is RES0 unless DS=1 & 4kb granule. */ + if (param.ds && stride == 9 && sl2) { + if (sl0 != 0) { + level = 0; + goto do_translation_fault; + } + startlevel = -1; + } else if (!aarch64 || stride == 9) { + /* AArch32 or 4KB pages */ + startlevel = 2 - sl0; + + if (cpu_isar_feature(aa64_st, cpu)) { + startlevel &= 3; + } + } else { + /* 16KB or 64KB pages */ + startlevel = 3 - sl0; + } + + /* Check that the starting level is valid. */ + ok = check_s2_mmu_setup(cpu, aarch64, startlevel, + inputsize, stride, outputsize); + if (!ok) { + goto do_translation_fault; + } + level = startlevel; + } + + indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); + indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); + + /* Now we can extract the actual base address from the TTBR */ + descaddr = extract64(ttbr, 0, 48); + + /* + * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. + * + * Otherwise, if the base address is out of range, raise AddressSizeFault. + * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), + * but we've just cleared the bits above 47, so simplify the test. + */ + if (outputsize > 48) { + descaddr |= extract64(ttbr, 2, 4) << 48; + } else if (descaddr >> outputsize) { + level = 0; + fi->type = ARMFault_AddressSize; + goto do_fault; + } + + /* + * We rely on this masking to clear the RES0 bits at the bottom of the TTBR + * and also to mask out CnP (bit 0) which could validly be non-zero. + */ + descaddr &= ~indexmask; + + /* + * For AArch32, the address field in the descriptor goes up to bit 39 + * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 + * or an AddressSize fault is raised. So for v8 we extract those SBZ + * bits as part of the address, which will be checked via outputsize. + * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; + * the highest bits of a 52-bit output are placed elsewhere. + */ + if (param.ds) { + descaddrmask = MAKE_64BIT_MASK(0, 50); + } else if (arm_feature(env, ARM_FEATURE_V8)) { + descaddrmask = MAKE_64BIT_MASK(0, 48); + } else { + descaddrmask = MAKE_64BIT_MASK(0, 40); + } + descaddrmask &= ~indexmask_grainsize; + + /* + * Secure accesses start with the page table in secure memory and + * can be downgraded to non-secure at any step. Non-secure accesses + * remain non-secure. We implement this by just ORing in the NSTable/NS + * bits at each step. + */ + tableattrs = is_secure ? 0 : (1 << 4); + + next_level: + descaddr |= (address >> (stride * (4 - level))) & indexmask; + descaddr &= ~7ULL; + nstable = extract32(tableattrs, 4, 1); + if (nstable) { + /* + * Stage2_S -> Stage2 or Phys_S -> Phys_NS + * Assert that the non-secure idx are even, and relative order. + */ + QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0); + QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0); + QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S); + QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S); + ptw->in_ptw_idx &= ~1; + ptw->in_secure = false; + } + if (!S1_ptw_translate(env, ptw, descaddr, fi)) { + goto do_fault; + } + descriptor = arm_ldq_ptw(env, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + new_descriptor = descriptor; + + restart_atomic_update: + if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { + /* Invalid, or the Reserved level 3 encoding */ + goto do_translation_fault; + } + + descaddr = descriptor & descaddrmask; + + /* + * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] + * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of + * descaddr are in [9:8]. Otherwise, if descaddr is out of range, + * raise AddressSizeFault. + */ + if (outputsize > 48) { + if (param.ds) { + descaddr |= extract64(descriptor, 8, 2) << 50; + } else { + descaddr |= extract64(descriptor, 12, 4) << 48; + } + } else if (descaddr >> outputsize) { + fi->type = ARMFault_AddressSize; + goto do_fault; + } + + if ((descriptor & 2) && (level < 3)) { + /* + * Table entry. The top five bits are attributes which may + * propagate down through lower levels of the table (and + * which are all arranged so that 0 means "no effect", so + * we can gather them up by ORing in the bits at each level). + */ + tableattrs |= extract64(descriptor, 59, 5); + level++; + indexmask = indexmask_grainsize; + goto next_level; + } + + /* + * Block entry at level 1 or 2, or page entry at level 3. + * These are basically the same thing, although the number + * of bits we pull in from the vaddr varies. Note that although + * descaddrmask masks enough of the low bits of the descriptor + * to give a correct page or table address, the address field + * in a block descriptor is smaller; so we need to explicitly + * clear the lower bits here before ORing in the low vaddr bits. + * + * Afterward, descaddr is the final physical address. + */ + page_size = (1ULL << ((stride * (4 - level)) + 3)); + descaddr &= ~(hwaddr)(page_size - 1); + descaddr |= (address & (page_size - 1)); + + if (likely(!ptw->in_debug)) { + /* + * Access flag. + * If HA is enabled, prepare to update the descriptor below. + * Otherwise, pass the access fault on to software. + */ + if (!(descriptor & (1 << 10))) { + if (param.ha) { + new_descriptor |= 1 << 10; /* AF */ + } else { + fi->type = ARMFault_AccessFlag; + goto do_fault; + } + } + + /* + * Dirty Bit. + * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP + * bit for writeback. The actual write protection test may still be + * overridden by tableattrs, to be merged below. + */ + if (param.hd + && extract64(descriptor, 51, 1) /* DBM */ + && access_type == MMU_DATA_STORE) { + if (regime_is_stage2(mmu_idx)) { + new_descriptor |= 1ull << 7; /* set S2AP[1] */ + } else { + new_descriptor &= ~(1ull << 7); /* clear AP[2] */ + } + } + } + + /* + * Extract attributes from the (modified) descriptor, and apply + * table descriptors. Stage 2 table descriptors do not include + * any attribute fields. HPD disables all the table attributes + * except NSTable. + */ + attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); + if (!regime_is_stage2(mmu_idx)) { + attrs |= nstable << 5; /* NS */ + if (!param.hpd) { + attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ + /* + * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 + * means "force PL1 access only", which means forcing AP[1] to 0. + */ + attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ + attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ + } + } + + ap = extract32(attrs, 6, 2); + if (regime_is_stage2(mmu_idx)) { + ns = mmu_idx == ARMMMUIdx_Stage2; + xn = extract64(attrs, 53, 2); + result->f.prot = get_S2prot(env, ap, xn, s1_is_el0); + } else { + ns = extract32(attrs, 5, 1); + xn = extract64(attrs, 54, 1); + pxn = extract64(attrs, 53, 1); + result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); + } + + if (!(result->f.prot & (1 << access_type))) { + fi->type = ARMFault_Permission; + goto do_fault; + } + + /* If FEAT_HAFDBS has made changes, update the PTE. */ + if (new_descriptor != descriptor) { + new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + /* + * I_YZSVV says that if the in-memory descriptor has changed, + * then we must use the information in that new value + * (which might include a different output address, different + * attributes, or generate a fault). + * Restart the handling of the descriptor value from scratch. + */ + if (new_descriptor != descriptor) { + descriptor = new_descriptor; + goto restart_atomic_update; + } + } + + if (ns) { + /* + * The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + result->f.attrs.secure = false; + } + + /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ + if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { + result->f.guarded = extract64(attrs, 50, 1); /* GP */ + } + + if (regime_is_stage2(mmu_idx)) { + result->cacheattrs.is_s2_format = true; + result->cacheattrs.attrs = extract32(attrs, 2, 4); + } else { + /* Index into MAIR registers for cache attributes */ + uint8_t attrindx = extract32(attrs, 2, 3); + uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + assert(attrindx <= 7); + result->cacheattrs.is_s2_format = false; + result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + } + + /* + * For FEAT_LPA2 and effective DS, the SH field in the attributes + * was re-purposed for output address bits. The SH attribute in + * that case comes from TCR_ELx, which we extracted earlier. + */ + if (param.ds) { + result->cacheattrs.shareability = param.sh; + } else { + result->cacheattrs.shareability = extract32(attrs, 8, 2); + } + + result->f.phys_addr = descaddr; + result->f.lg_page_size = ctz64(page_size); + return false; + + do_translation_fault: + fi->type = ARMFault_Translation; + do_fault: + fi->level = level; + /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ + fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx); + fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; + return true; +} + +static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + int n; + uint32_t mask; + uint32_t base; + bool is_user = regime_is_user(env, mmu_idx); + + if (regime_translation_disabled(env, mmu_idx, is_secure)) { + /* MPU disabled. */ + result->f.phys_addr = address; + result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return false; + } + + result->f.phys_addr = address; + for (n = 7; n >= 0; n--) { + base = env->cp15.c6_region[n]; + if ((base & 1) == 0) { + continue; + } + mask = 1 << ((base >> 1) & 0x1f); + /* Keep this shift separate from the above to avoid an + (undefined) << 32. */ + mask = (mask << 1) - 1; + if (((base ^ address) & ~mask) == 0) { + break; + } + } + if (n < 0) { + fi->type = ARMFault_Background; + return true; + } + + if (access_type == MMU_INST_FETCH) { + mask = env->cp15.pmsav5_insn_ap; + } else { + mask = env->cp15.pmsav5_data_ap; + } + mask = (mask >> (n * 4)) & 0xf; + switch (mask) { + case 0: + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + case 1: + if (is_user) { + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + result->f.prot = PAGE_READ | PAGE_WRITE; + break; + case 2: + result->f.prot = PAGE_READ; + if (!is_user) { + result->f.prot |= PAGE_WRITE; + } + break; + case 3: + result->f.prot = PAGE_READ | PAGE_WRITE; + break; + case 5: + if (is_user) { + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + result->f.prot = PAGE_READ; + break; + case 6: + result->f.prot = PAGE_READ; + break; + default: + /* Bad permission. */ + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + result->f.prot |= PAGE_EXEC; + return false; +} + +static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, + int32_t address, uint8_t *prot) +{ + if (!arm_feature(env, ARM_FEATURE_M)) { + *prot = PAGE_READ | PAGE_WRITE; + switch (address) { + case 0xF0000000 ... 0xFFFFFFFF: + if (regime_sctlr(env, mmu_idx) & SCTLR_V) { + /* hivecs execing is ok */ + *prot |= PAGE_EXEC; + } + break; + case 0x00000000 ... 0x7FFFFFFF: + *prot |= PAGE_EXEC; + break; + } + } else { + /* Default system address map for M profile cores. + * The architecture specifies which regions are execute-never; + * at the MPU level no other checks are defined. + */ + switch (address) { + case 0x00000000 ... 0x1fffffff: /* ROM */ + case 0x20000000 ... 0x3fffffff: /* SRAM */ + case 0x60000000 ... 0x7fffffff: /* RAM */ + case 0x80000000 ... 0x9fffffff: /* RAM */ + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + break; + case 0x40000000 ... 0x5fffffff: /* Peripheral */ + case 0xa0000000 ... 0xbfffffff: /* Device */ + case 0xc0000000 ... 0xdfffffff: /* Device */ + case 0xe0000000 ... 0xffffffff: /* System */ + *prot = PAGE_READ | PAGE_WRITE; + break; + default: + g_assert_not_reached(); + } + } +} + +static bool m_is_ppb_region(CPUARMState *env, uint32_t address) +{ + /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ + return arm_feature(env, ARM_FEATURE_M) && + extract32(address, 20, 12) == 0xe00; +} + +static bool m_is_system_region(CPUARMState *env, uint32_t address) +{ + /* + * True if address is in the M profile system region + * 0xe0000000 - 0xffffffff + */ + return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; +} + +static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, + bool is_secure, bool is_user) +{ + /* + * Return true if we should use the default memory map as a + * "background" region if there are no hits against any MPU regions. + */ + CPUARMState *env = &cpu->env; + + if (is_user) { + return false; + } + + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; + } else { + return regime_sctlr(env, mmu_idx) & SCTLR_BR; + } +} + +static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = env_archcpu(env); + int n; + bool is_user = regime_is_user(env, mmu_idx); + + result->f.phys_addr = address; + result->f.lg_page_size = TARGET_PAGE_BITS; + result->f.prot = 0; + + if (regime_translation_disabled(env, mmu_idx, secure) || + m_is_ppb_region(env, address)) { + /* + * MPU disabled or M profile PPB access: use default memory map. + * The other case which uses the default memory map in the + * v7M ARM ARM pseudocode is exception vector reads from the vector + * table. In QEMU those accesses are done in arm_v7m_load_vector(), + * which always does a direct read using address_space_ldl(), rather + * than going via this function, so we don't need to check that here. + */ + get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); + } else { /* MPU enabled */ + for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { + /* region search */ + uint32_t base = env->pmsav7.drbar[n]; + uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); + uint32_t rmask; + bool srdis = false; + + if (!(env->pmsav7.drsr[n] & 0x1)) { + continue; + } + + if (!rsize) { + qemu_log_mask(LOG_GUEST_ERROR, + "DRSR[%d]: Rsize field cannot be 0\n", n); + continue; + } + rsize++; + rmask = (1ull << rsize) - 1; + + if (base & rmask) { + qemu_log_mask(LOG_GUEST_ERROR, + "DRBAR[%d]: 0x%" PRIx32 " misaligned " + "to DRSR region size, mask = 0x%" PRIx32 "\n", + n, base, rmask); + continue; + } + + if (address < base || address > base + rmask) { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result in + * incorrect TLB hits for subsequent accesses to addresses that + * are in this MPU region. + */ + if (ranges_overlap(base, rmask, + address & TARGET_PAGE_MASK, + TARGET_PAGE_SIZE)) { + result->f.lg_page_size = 0; + } + continue; + } + + /* Region matched */ + + if (rsize >= 8) { /* no subregions for regions < 256 bytes */ + int i, snd; + uint32_t srdis_mask; + + rsize -= 3; /* sub region size (power of 2) */ + snd = ((address - base) >> rsize) & 0x7; + srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); + + srdis_mask = srdis ? 0x3 : 0x0; + for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { + /* + * This will check in groups of 2, 4 and then 8, whether + * the subregion bits are consistent. rsize is incremented + * back up to give the region size, considering consistent + * adjacent subregions as one region. Stop testing if rsize + * is already big enough for an entire QEMU page. + */ + int snd_rounded = snd & ~(i - 1); + uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], + snd_rounded + 8, i); + if (srdis_mask ^ srdis_multi) { + break; + } + srdis_mask = (srdis_mask << i) | srdis_mask; + rsize++; + } + } + if (srdis) { + continue; + } + if (rsize < TARGET_PAGE_BITS) { + result->f.lg_page_size = rsize; + } + break; + } + + if (n == -1) { /* no hits */ + if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { + /* background fault */ + fi->type = ARMFault_Background; + return true; + } + get_phys_addr_pmsav7_default(env, mmu_idx, address, + &result->f.prot); + } else { /* a MPU hit! */ + uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); + uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); + + if (m_is_system_region(env, address)) { + /* System space is always execute never */ + xn = 1; + } + + if (is_user) { /* User mode AP bit decoding */ + switch (ap) { + case 0: + case 1: + case 5: + break; /* no access */ + case 3: + result->f.prot |= PAGE_WRITE; + /* fall through */ + case 2: + case 6: + result->f.prot |= PAGE_READ | PAGE_EXEC; + break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + result->f.prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "DRACR[%d]: Bad value for AP bits: 0x%" + PRIx32 "\n", n, ap); + } + } else { /* Priv. mode AP bits decoding */ + switch (ap) { + case 0: + break; /* no access */ + case 1: + case 2: + case 3: + result->f.prot |= PAGE_WRITE; + /* fall through */ + case 5: + case 6: + result->f.prot |= PAGE_READ | PAGE_EXEC; + break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + result->f.prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "DRACR[%d]: Bad value for AP bits: 0x%" + PRIx32 "\n", n, ap); + } + } + + /* execute never */ + if (xn) { + result->f.prot &= ~PAGE_EXEC; + } + } + } + + fi->type = ARMFault_Permission; + fi->level = 1; + return !(result->f.prot & (1 << access_type)); +} + +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi, uint32_t *mregion) +{ + /* + * Perform a PMSAv8 MPU lookup (without also doing the SAU check + * that a full phys-to-virt translation does). + * mregion is (if not NULL) set to the region number which matched, + * or -1 if no region number is returned (MPU off, address did not + * hit a region, address hit in multiple regions). + * If the region hit doesn't cover the entire TARGET_PAGE the address + * is within, then we set the result page_size to 1 to force the + * memory system to use a subpage. + */ + ARMCPU *cpu = env_archcpu(env); + bool is_user = regime_is_user(env, mmu_idx); + int n; + int matchregion = -1; + bool hit = false; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + + result->f.lg_page_size = TARGET_PAGE_BITS; + result->f.phys_addr = address; + result->f.prot = 0; + if (mregion) { + *mregion = -1; + } + + /* + * Unlike the ARM ARM pseudocode, we don't need to check whether this + * was an exception vector read from the vector table (which is always + * done using the default system address map), because those accesses + * are done in arm_v7m_load_vector(), which always does a direct + * read using address_space_ldl(), rather than going via this function. + */ + if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */ + hit = true; + } else if (m_is_ppb_region(env, address)) { + hit = true; + } else { + if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { + hit = true; + } + + for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { + /* region search */ + /* + * Note that the base address is bits [31:5] from the register + * with bits [4:0] all zeroes, but the limit address is bits + * [31:5] from the register with bits [4:0] all ones. + */ + uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; + uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; + + if (!(env->pmsav8.rlar[secure][n] & 0x1)) { + /* Region disabled */ + continue; + } + + if (address < base || address > limit) { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result in + * incorrect TLB hits for subsequent accesses to addresses that + * are in this MPU region. + */ + if (limit >= base && + ranges_overlap(base, limit - base + 1, + addr_page_base, + TARGET_PAGE_SIZE)) { + result->f.lg_page_size = 0; + } + continue; + } + + if (base > addr_page_base || limit < addr_page_limit) { + result->f.lg_page_size = 0; + } + + if (matchregion != -1) { + /* + * Multiple regions match -- always a failure (unlike + * PMSAv7 where highest-numbered-region wins) + */ + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + + matchregion = n; + hit = true; + } + } + + if (!hit) { + /* background fault */ + fi->type = ARMFault_Background; + return true; + } + + if (matchregion == -1) { + /* hit using the background region */ + get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); + } else { + uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); + uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); + bool pxn = false; + + if (arm_feature(env, ARM_FEATURE_V8_1M)) { + pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); + } + + if (m_is_system_region(env, address)) { + /* System space is always execute never */ + xn = 1; + } + + result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); + if (result->f.prot && !xn && !(pxn && !is_user)) { + result->f.prot |= PAGE_EXEC; + } + /* + * We don't need to look the attribute up in the MAIR0/MAIR1 + * registers because that only tells us about cacheability. + */ + if (mregion) { + *mregion = matchregion; + } + } + + fi->type = ARMFault_Permission; + fi->level = 1; + return !(result->f.prot & (1 << access_type)); +} + +static bool v8m_is_sau_exempt(CPUARMState *env, + uint32_t address, MMUAccessType access_type) +{ + /* + * The architecture specifies that certain address ranges are + * exempt from v8M SAU/IDAU checks. + */ + return + (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || + (address >= 0xe0000000 && address <= 0xe0002fff) || + (address >= 0xe000e000 && address <= 0xe000efff) || + (address >= 0xe002e000 && address <= 0xe002efff) || + (address >= 0xe0040000 && address <= 0xe0041fff) || + (address >= 0xe00ff000 && address <= 0xe00fffff); +} + +void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure, V8M_SAttributes *sattrs) +{ + /* + * Look up the security attributes for this address. Compare the + * pseudocode SecurityCheck() function. + * We assume the caller has zero-initialized *sattrs. + */ + ARMCPU *cpu = env_archcpu(env); + int r; + bool idau_exempt = false, idau_ns = true, idau_nsc = true; + int idau_region = IREGION_NOTVALID; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + + if (cpu->idau) { + IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); + IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); + + iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, + &idau_nsc); + } + + if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { + /* 0xf0000000..0xffffffff is always S for insn fetches */ + return; + } + + if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { + sattrs->ns = !is_secure; + return; + } + + if (idau_region != IREGION_NOTVALID) { + sattrs->irvalid = true; + sattrs->iregion = idau_region; + } + + switch (env->sau.ctrl & 3) { + case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ + break; + case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ + sattrs->ns = true; + break; + default: /* SAU.ENABLE == 1 */ + for (r = 0; r < cpu->sau_sregion; r++) { + if (env->sau.rlar[r] & 1) { + uint32_t base = env->sau.rbar[r] & ~0x1f; + uint32_t limit = env->sau.rlar[r] | 0x1f; + + if (base <= address && limit >= address) { + if (base > addr_page_base || limit < addr_page_limit) { + sattrs->subpage = true; + } + if (sattrs->srvalid) { + /* + * If we hit in more than one region then we must report + * as Secure, not NS-Callable, with no valid region + * number info. + */ + sattrs->ns = false; + sattrs->nsc = false; + sattrs->sregion = 0; + sattrs->srvalid = false; + break; + } else { + if (env->sau.rlar[r] & 2) { + sattrs->nsc = true; + } else { + sattrs->ns = true; + } + sattrs->srvalid = true; + sattrs->sregion = r; + } + } else { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result + * in incorrect TLB hits for subsequent accesses to + * addresses that are in this MPU region. + */ + if (limit >= base && + ranges_overlap(base, limit - base + 1, + addr_page_base, + TARGET_PAGE_SIZE)) { + sattrs->subpage = true; + } + } + } + } + break; + } + + /* + * The IDAU will override the SAU lookup results if it specifies + * higher security than the SAU does. + */ + if (!idau_ns) { + if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { + sattrs->ns = false; + sattrs->nsc = idau_nsc; + } + } +} + +static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + V8M_SAttributes sattrs = {}; + bool ret; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + v8m_security_lookup(env, address, access_type, mmu_idx, + secure, &sattrs); + if (access_type == MMU_INST_FETCH) { + /* + * Instruction fetches always use the MMU bank and the + * transaction attribute determined by the fetch address, + * regardless of CPU state. This is painful for QEMU + * to handle, because it would mean we need to encode + * into the mmu_idx not just the (user, negpri) information + * for the current security state but also that for the + * other security state, which would balloon the number + * of mmu_idx values needed alarmingly. + * Fortunately we can avoid this because it's not actually + * possible to arbitrarily execute code from memory with + * the wrong security attribute: it will always generate + * an exception of some kind or another, apart from the + * special case of an NS CPU executing an SG instruction + * in S&NSC memory. So we always just fail the translation + * here and sort things out in the exception handler + * (including possibly emulating an SG instruction). + */ + if (sattrs.ns != !secure) { + if (sattrs.nsc) { + fi->type = ARMFault_QEMU_NSCExec; + } else { + fi->type = ARMFault_QEMU_SFault; + } + result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; + result->f.phys_addr = address; + result->f.prot = 0; + return true; + } + } else { + /* + * For data accesses we always use the MMU bank indicated + * by the current CPU state, but the security attributes + * might downgrade a secure access to nonsecure. + */ + if (sattrs.ns) { + result->f.attrs.secure = false; + } else if (!secure) { + /* + * NS access to S memory must fault. + * Architecturally we should first check whether the + * MPU information for this address indicates that we + * are doing an unaligned access to Device memory, which + * should generate a UsageFault instead. QEMU does not + * currently check for that kind of unaligned access though. + * If we added it we would need to do so as a special case + * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). + */ + fi->type = ARMFault_QEMU_SFault; + result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; + result->f.phys_addr = address; + result->f.prot = 0; + return true; + } + } + } + + ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure, + result, fi, NULL); + if (sattrs.subpage) { + result->f.lg_page_size = 0; + } + return ret; +} + +/* + * Translate from the 4-bit stage 2 representation of + * memory attributes (without cache-allocation hints) to + * the 8-bit representation of the stage 1 MAIR registers + * (which includes allocation hints). + * + * ref: shared/translation/attrs/S2AttrDecode() + * .../S2ConvertAttrsHints() + */ +static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs) +{ + uint8_t hiattr = extract32(s2attrs, 2, 2); + uint8_t loattr = extract32(s2attrs, 0, 2); + uint8_t hihint = 0, lohint = 0; + + if (hiattr != 0) { /* normal memory */ + if (hcr & HCR_CD) { /* cache disabled */ + hiattr = loattr = 1; /* non-cacheable */ + } else { + if (hiattr != 1) { /* Write-through or write-back */ + hihint = 3; /* RW allocate */ + } + if (loattr != 1) { /* Write-through or write-back */ + lohint = 3; /* RW allocate */ + } + } + } + + return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; +} + +/* + * Combine either inner or outer cacheability attributes for normal + * memory, according to table D4-42 and pseudocode procedure + * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). + * + * NB: only stage 1 includes allocation hints (RW bits), leading to + * some asymmetry. + */ +static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) +{ + if (s1 == 4 || s2 == 4) { + /* non-cacheable has precedence */ + return 4; + } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { + /* stage 1 write-through takes precedence */ + return s1; + } else if (extract32(s2, 2, 2) == 2) { + /* stage 2 write-through takes precedence, but the allocation hint + * is still taken from stage 1 + */ + return (2 << 2) | extract32(s1, 0, 2); + } else { /* write-back */ + return s1; + } +} + +/* + * Combine the memory type and cacheability attributes of + * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the + * combined attributes in MAIR_EL1 format. + */ +static uint8_t combined_attrs_nofwb(uint64_t hcr, + ARMCacheAttrs s1, ARMCacheAttrs s2) +{ + uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; + + s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); + + s1lo = extract32(s1.attrs, 0, 4); + s2lo = extract32(s2_mair_attrs, 0, 4); + s1hi = extract32(s1.attrs, 4, 4); + s2hi = extract32(s2_mair_attrs, 4, 4); + + /* Combine memory type and cacheability attributes */ + if (s1hi == 0 || s2hi == 0) { + /* Device has precedence over normal */ + if (s1lo == 0 || s2lo == 0) { + /* nGnRnE has precedence over anything */ + ret_attrs = 0; + } else if (s1lo == 4 || s2lo == 4) { + /* non-Reordering has precedence over Reordering */ + ret_attrs = 4; /* nGnRE */ + } else if (s1lo == 8 || s2lo == 8) { + /* non-Gathering has precedence over Gathering */ + ret_attrs = 8; /* nGRE */ + } else { + ret_attrs = 0xc; /* GRE */ + } + } else { /* Normal memory */ + /* Outer/inner cacheability combine independently */ + ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 + | combine_cacheattr_nibble(s1lo, s2lo); + } + return ret_attrs; +} + +static uint8_t force_cacheattr_nibble_wb(uint8_t attr) +{ + /* + * Given the 4 bits specifying the outer or inner cacheability + * in MAIR format, return a value specifying Normal Write-Back, + * with the allocation and transient hints taken from the input + * if the input specified some kind of cacheable attribute. + */ + if (attr == 0 || attr == 4) { + /* + * 0 == an UNPREDICTABLE encoding + * 4 == Non-cacheable + * Either way, force Write-Back RW allocate non-transient + */ + return 0xf; + } + /* Change WriteThrough to WriteBack, keep allocation and transient hints */ + return attr | 4; +} + +/* + * Combine the memory type and cacheability attributes of + * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the + * combined attributes in MAIR_EL1 format. + */ +static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2) +{ + switch (s2.attrs) { + case 7: + /* Use stage 1 attributes */ + return s1.attrs; + case 6: + /* + * Force Normal Write-Back. Note that if S1 is Normal cacheable + * then we take the allocation hints from it; otherwise it is + * RW allocate, non-transient. + */ + if ((s1.attrs & 0xf0) == 0) { + /* S1 is Device */ + return 0xff; + } + /* Need to check the Inner and Outer nibbles separately */ + return force_cacheattr_nibble_wb(s1.attrs & 0xf) | + force_cacheattr_nibble_wb(s1.attrs >> 4) << 4; + case 5: + /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ + if ((s1.attrs & 0xf0) == 0) { + return s1.attrs; + } + return 0x44; + case 0 ... 3: + /* Force Device, of subtype specified by S2 */ + return s2.attrs << 2; + default: + /* + * RESERVED values (including RES0 descriptor bit [5] being nonzero); + * arbitrarily force Device. + */ + return 0; + } +} + +/* + * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 + * and CombineS1S2Desc() + * + * @env: CPUARMState + * @s1: Attributes from stage 1 walk + * @s2: Attributes from stage 2 walk + */ +static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, + ARMCacheAttrs s1, ARMCacheAttrs s2) +{ + ARMCacheAttrs ret; + bool tagged = false; + + assert(s2.is_s2_format && !s1.is_s2_format); + ret.is_s2_format = false; + + if (s1.attrs == 0xf0) { + tagged = true; + s1.attrs = 0xff; + } + + /* Combine shareability attributes (table D4-43) */ + if (s1.shareability == 2 || s2.shareability == 2) { + /* if either are outer-shareable, the result is outer-shareable */ + ret.shareability = 2; + } else if (s1.shareability == 3 || s2.shareability == 3) { + /* if either are inner-shareable, the result is inner-shareable */ + ret.shareability = 3; + } else { + /* both non-shareable */ + ret.shareability = 0; + } + + /* Combine memory type and cacheability attributes */ + if (hcr & HCR_FWB) { + ret.attrs = combined_attrs_fwb(s1, s2); + } else { + ret.attrs = combined_attrs_nofwb(hcr, s1, s2); + } + + /* + * Any location for which the resultant memory type is any + * type of Device memory is always treated as Outer Shareable. + * Any location for which the resultant memory type is Normal + * Inner Non-cacheable, Outer Non-cacheable is always treated + * as Outer Shareable. + * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC + */ + if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) { + ret.shareability = 2; + } + + /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ + if (tagged && ret.attrs == 0xff) { + ret.attrs = 0xf0; + } + + return ret; +} + +/* + * MMU disabled. S1 addresses within aa64 translation regimes are + * still checked for bounds -- see AArch64.S1DisabledOutput(). + */ +static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address, + MMUAccessType access_type, + ARMMMUIdx mmu_idx, bool is_secure, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + uint8_t memattr = 0x00; /* Device nGnRnE */ + uint8_t shareability = 0; /* non-sharable */ + int r_el; + + switch (mmu_idx) { + case ARMMMUIdx_Stage2: + case ARMMMUIdx_Stage2_S: + case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_Phys_S: + break; + + default: + r_el = regime_el(env, mmu_idx); + if (arm_el_is_aa64(env, r_el)) { + int pamax = arm_pamax(env_archcpu(env)); + uint64_t tcr = env->cp15.tcr_el[r_el]; + int addrtop, tbi; + + tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + if (access_type == MMU_INST_FETCH) { + tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); + } + tbi = (tbi >> extract64(address, 55, 1)) & 1; + addrtop = (tbi ? 55 : 63); + + if (extract64(address, pamax, addrtop - pamax + 1) != 0) { + fi->type = ARMFault_AddressSize; + fi->level = 0; + fi->stage2 = false; + return 1; + } + + /* + * When TBI is disabled, we've just validated that all of the + * bits above PAMax are zero, so logically we only need to + * clear the top byte for TBI. But it's clearer to follow + * the pseudocode set of addrdesc.paddress. + */ + address = extract64(address, 0, 52); + } + + /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ + if (r_el == 1) { + uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure); + if (hcr & HCR_DC) { + if (hcr & HCR_DCT) { + memattr = 0xf0; /* Tagged, Normal, WB, RWA */ + } else { + memattr = 0xff; /* Normal, WB, RWA */ + } + } + } + if (memattr == 0 && access_type == MMU_INST_FETCH) { + if (regime_sctlr(env, mmu_idx) & SCTLR_I) { + memattr = 0xee; /* Normal, WT, RA, NT */ + } else { + memattr = 0x44; /* Normal, NC, No */ + } + shareability = 2; /* outer sharable */ + } + result->cacheattrs.is_s2_format = false; + break; + } + + result->f.phys_addr = address; + result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + result->f.lg_page_size = TARGET_PAGE_BITS; + result->cacheattrs.shareability = shareability; + result->cacheattrs.attrs = memattr; + return false; +} + +static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + hwaddr ipa; + int s1_prot, s1_lgpgsz; + bool is_secure = ptw->in_secure; + bool ret, ipa_secure, s2walk_secure; + ARMCacheAttrs cacheattrs1; + bool is_el0; + uint64_t hcr; + + ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi); + + /* If S1 fails, return early. */ + if (ret) { + return ret; + } + + ipa = result->f.phys_addr; + ipa_secure = result->f.attrs.secure; + if (is_secure) { + /* Select TCR based on the NS bit from the S1 walk. */ + s2walk_secure = !(ipa_secure + ? env->cp15.vstcr_el2 & VSTCR_SW + : env->cp15.vtcr_el2 & VTCR_NSW); + } else { + assert(!ipa_secure); + s2walk_secure = false; + } + + is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; + ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; + ptw->in_secure = s2walk_secure; + + /* + * S1 is done, now do S2 translation. + * Save the stage1 results so that we may merge prot and cacheattrs later. + */ + s1_prot = result->f.prot; + s1_lgpgsz = result->f.lg_page_size; + cacheattrs1 = result->cacheattrs; + memset(result, 0, sizeof(*result)); + + ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi); + fi->s2addr = ipa; + + /* Combine the S1 and S2 perms. */ + result->f.prot &= s1_prot; + + /* If S2 fails, return early. */ + if (ret) { + return ret; + } + + /* + * Use the maximum of the S1 & S2 page size, so that invalidation + * of pages > TARGET_PAGE_SIZE works correctly. + */ + if (result->f.lg_page_size < s1_lgpgsz) { + result->f.lg_page_size = s1_lgpgsz; + } + + /* Combine the S1 and S2 cache attributes. */ + hcr = arm_hcr_el2_eff_secstate(env, is_secure); + if (hcr & HCR_DC) { + /* + * HCR.DC forces the first stage attributes to + * Normal Non-Shareable, + * Inner Write-Back Read-Allocate Write-Allocate, + * Outer Write-Back Read-Allocate Write-Allocate. + * Do not overwrite Tagged within attrs. + */ + if (cacheattrs1.attrs != 0xf0) { + cacheattrs1.attrs = 0xff; + } + cacheattrs1.shareability = 0; + } + result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, + result->cacheattrs); + + /* + * Check if IPA translates to secure or non-secure PA space. + * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. + */ + result->f.attrs.secure = + (is_secure + && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) + && (ipa_secure + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); + + return false; +} + +static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; + bool is_secure = ptw->in_secure; + ARMMMUIdx s1_mmu_idx; + + /* + * The page table entries may downgrade secure to non-secure, but + * cannot upgrade an non-secure translation regime's attributes + * to secure. + */ + result->f.attrs.secure = is_secure; + + switch (mmu_idx) { + case ARMMMUIdx_Phys_S: + case ARMMMUIdx_Phys_NS: + /* Checking Phys early avoids special casing later vs regime_el. */ + return get_phys_addr_disabled(env, address, access_type, mmu_idx, + is_secure, result, fi); + + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + /* First stage lookup uses second stage for ptw. */ + ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + break; + + case ARMMMUIdx_E10_0: + s1_mmu_idx = ARMMMUIdx_Stage1_E0; + goto do_twostage; + case ARMMMUIdx_E10_1: + s1_mmu_idx = ARMMMUIdx_Stage1_E1; + goto do_twostage; + case ARMMMUIdx_E10_1_PAN: + s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN; + do_twostage: + /* + * Call ourselves recursively to do the stage 1 and then stage 2 + * translations if mmu_idx is a two-stage regime, and EL2 present. + * Otherwise, a stage1+stage2 translation is just stage 1. + */ + ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; + if (arm_feature(env, ARM_FEATURE_EL2) && + !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) { + return get_phys_addr_twostage(env, ptw, address, access_type, + result, fi); + } + /* fall through */ + + default: + /* Single stage and second stage uses physical for ptw. */ + ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; + break; + } + + result->f.attrs.user = regime_is_user(env, mmu_idx); + + /* + * Fast Context Switch Extension. This doesn't exist at all in v8. + * In v7 and earlier it affects all stage 1 translations. + */ + if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 + && !arm_feature(env, ARM_FEATURE_V8)) { + if (regime_el(env, mmu_idx) == 3) { + address += env->cp15.fcseidr_s; + } else { + address += env->cp15.fcseidr_ns; + } + } + + if (arm_feature(env, ARM_FEATURE_PMSA)) { + bool ret; + result->f.lg_page_size = TARGET_PAGE_BITS; + + if (arm_feature(env, ARM_FEATURE_V8)) { + /* PMSAv8 */ + ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, + is_secure, result, fi); + } else if (arm_feature(env, ARM_FEATURE_V7)) { + /* PMSAv7 */ + ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, + is_secure, result, fi); + } else { + /* Pre-v7 MPU */ + ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, + is_secure, result, fi); + } + qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 + " mmu_idx %u -> %s (prot %c%c%c)\n", + access_type == MMU_DATA_LOAD ? "reading" : + (access_type == MMU_DATA_STORE ? "writing" : "execute"), + (uint32_t)address, mmu_idx, + ret ? "Miss" : "Hit", + result->f.prot & PAGE_READ ? 'r' : '-', + result->f.prot & PAGE_WRITE ? 'w' : '-', + result->f.prot & PAGE_EXEC ? 'x' : '-'); + + return ret; + } + + /* Definitely a real MMU, not an MPU */ + + if (regime_translation_disabled(env, mmu_idx, is_secure)) { + return get_phys_addr_disabled(env, address, access_type, mmu_idx, + is_secure, result, fi); + } + + if (regime_using_lpae_format(env, mmu_idx)) { + return get_phys_addr_lpae(env, ptw, address, access_type, false, + result, fi); + } else if (arm_feature(env, ARM_FEATURE_V7) || + regime_sctlr(env, mmu_idx) & SCTLR_XP) { + return get_phys_addr_v6(env, ptw, address, access_type, result, fi); + } else { + return get_phys_addr_v5(env, ptw, address, access_type, result, fi); + } +} + +bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + S1Translate ptw = { + .in_mmu_idx = mmu_idx, + .in_secure = is_secure, + }; + return get_phys_addr_with_struct(env, &ptw, address, access_type, + result, fi); +} + +bool get_phys_addr(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +{ + bool is_secure; + + switch (mmu_idx) { + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_E2: + is_secure = arm_is_secure_below_el3(env); + break; + case ARMMMUIdx_Stage2: + case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_MPrivNegPri: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MPriv: + case ARMMMUIdx_MUser: + is_secure = false; + break; + case ARMMMUIdx_E3: + case ARMMMUIdx_Stage2_S: + case ARMMMUIdx_Phys_S: + case ARMMMUIdx_MSPrivNegPri: + case ARMMMUIdx_MSUserNegPri: + case ARMMMUIdx_MSPriv: + case ARMMMUIdx_MSUser: + is_secure = true; + break; + default: + g_assert_not_reached(); + } + return get_phys_addr_with_secure(env, address, access_type, mmu_idx, + is_secure, result, fi); +} + +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + S1Translate ptw = { + .in_mmu_idx = arm_mmu_idx(env), + .in_secure = arm_is_secure(env), + .in_debug = true, + }; + GetPhysAddrResult res = {}; + ARMMMUFaultInfo fi = {}; + bool ret; + + ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); + *attrs = res.f.attrs; + + if (ret) { + return -1; + } + return res.f.phys_addr; +} diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode new file mode 100644 index 0000000000..47708ccc8d --- /dev/null +++ b/target/arm/sme-fa64.decode @@ -0,0 +1,60 @@ +# AArch64 SME allowed instruction decoding +# +# Copyright (c) 2022 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . + +# +# This file is processed by scripts/decodetree.py +# + +# These patterns are taken from Appendix E1.1 of DDI0616 A.a, +# Arm Architecture Reference Manual Supplement, +# The Scalable Matrix Extension (SME), for Armv9-A + +{ + [ + OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0] + OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0] + OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0] + OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0] + OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0] + OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0] + OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0] + ] + FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations +} + +{ + [ + OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar) + OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16) + OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar) + OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16) + ] + FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations +} + +FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store +FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions +FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS + +# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions +# We don't actually need to include these, as the default is OK. +# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations +# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers +# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal) +# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) +# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) +# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) diff --git a/target/arm/sme.decode b/target/arm/sme.decode new file mode 100644 index 0000000000..628804e37a --- /dev/null +++ b/target/arm/sme.decode @@ -0,0 +1,88 @@ +# AArch64 SME instruction descriptions +# +# Copyright (c) 2022 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . + +# +# This file is processed by scripts/decodetree.py +# + +### SME Misc + +ZERO 11000000 00 001 00000000000 imm:8 + +### SME Move into/from Array + +%mova_rs 13:2 !function=plus_12 +&mova esz rs pg zr za_imm v:bool to_vec:bool + +MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \ + &mova to_vec=0 rs=%mova_rs +MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \ + &mova to_vec=0 rs=%mova_rs esz=4 + +MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ + &mova to_vec=1 rs=%mova_rs +MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ + &mova to_vec=1 rs=%mova_rs esz=4 + +### SME Memory + +&ldst esz rs pg rn rm za_imm v:bool st:bool + +LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ + &ldst rs=%mova_rs +LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ + &ldst esz=4 rs=%mova_rs + +&ldstr rv rn imm +@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \ + &ldstr rv=%mova_rs + +LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr +STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr + +### SME Add Vector to Array + +&adda zad zn pm pn +@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda +@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda + +ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 +ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 +ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 +ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 + +### SME Outer Product + +&op zad zn zm pm pn sub:bool +@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op +@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op + +FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 +FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 + +BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 +FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 + +SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32 +SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32 +USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32 +UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32 + +SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64 +SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64 +USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64 +UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64 diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c new file mode 100644 index 0000000000..f891306bb9 --- /dev/null +++ b/target/arm/sme_helper.c @@ -0,0 +1,1201 @@ +/* + * ARM SME Operations + * + * Copyright (c) 2022 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "tcg/tcg-gvec-desc.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "qemu/int128.h" +#include "fpu/softfloat.h" +#include "vec_internal.h" +#include "sve_ldst_internal.h" + +/* ResetSVEState */ +void arm_reset_sve_state(CPUARMState *env) +{ + memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); + /* Recall that FFR is stored as pregs[16]. */ + memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); + vfp_set_fpcr(env, 0x0800009f); +} + +void helper_set_pstate_sm(CPUARMState *env, uint32_t i) +{ + if (i == FIELD_EX64(env->svcr, SVCR, SM)) { + return; + } + env->svcr ^= R_SVCR_SM_MASK; + arm_reset_sve_state(env); +} + +void helper_set_pstate_za(CPUARMState *env, uint32_t i) +{ + if (i == FIELD_EX64(env->svcr, SVCR, ZA)) { + return; + } + env->svcr ^= R_SVCR_ZA_MASK; + + /* + * ResetSMEState. + * + * SetPSTATE_ZA zeros on enable and disable. We can zero this only + * on enable: while disabled, the storage is inaccessible and the + * value does not matter. We're not saving the storage in vmstate + * when disabled either. + */ + if (i) { + memset(env->zarray, 0, sizeof(env->zarray)); + } +} + +void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) +{ + uint32_t i; + + /* + * Special case clearing the entire ZA space. + * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any + * parts of the ZA storage outside of SVL. + */ + if (imm == 0xff) { + memset(env->zarray, 0, sizeof(env->zarray)); + return; + } + + /* + * Recall that ZAnH.D[m] is spread across ZA[n+8*m], + * so each row is discontiguous within ZA[]. + */ + for (i = 0; i < svl; i++) { + if (imm & (1 << (i % 8))) { + memset(&env->zarray[i], 0, svl); + } + } +} + + +/* + * When considering the ZA storage as an array of elements of + * type T, the index within that array of the Nth element of + * a vertical slice of a tile can be calculated like this, + * regardless of the size of type T. This is because the tiles + * are interleaved, so if type T is size N bytes then row 1 of + * the tile is N rows away from row 0. The division by N to + * convert a byte offset into an array index and the multiplication + * by N to convert from vslice-index-within-the-tile to + * the index within the ZA storage cancel out. + */ +#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg)) + +/* + * When doing byte arithmetic on the ZA storage, the element + * byteoff bytes away in a tile vertical slice is always this + * many bytes away in the ZA storage, regardless of the + * size of the tile element, assuming that byteoff is a multiple + * of the element size. Again this is because of the interleaving + * of the tiles. For instance if we have 1 byte per element then + * each row of the ZA storage has one byte of the vslice data, + * and (counting from 0) byte 8 goes in row 8 of the storage + * at offset (8 * row-size-in-bytes). + * If we have 8 bytes per element then each row of the ZA storage + * has 8 bytes of the data, but there are 8 interleaved tiles and + * so byte 8 of the data goes into row 1 of the tile, + * which is again row 8 of the storage, so the offset is still + * (8 * row-size-in-bytes). Similarly for other element sizes. + */ +#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg)) + + +/* + * Move Zreg vector to ZArray column. + */ +#define DO_MOVA_C(NAME, TYPE, H) \ +void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \ +{ \ + int i, oprsz = simd_oprsz(desc); \ + for (i = 0; i < oprsz; ) { \ + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \ + } \ + i += sizeof(TYPE); \ + pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +DO_MOVA_C(sme_mova_cz_b, uint8_t, H1) +DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2) +DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4) + +void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 8; + uint8_t *pg = vg; + uint64_t *n = vn; + uint64_t *a = za; + + for (i = 0; i < oprsz; i++) { + if (pg[H1(i)] & 1) { + a[tile_vslice_index(i)] = n[i]; + } + } +} + +void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 16; + uint16_t *pg = vg; + Int128 *n = vn; + Int128 *a = za; + + /* + * Int128 is used here simply to copy 16 bytes, and to simplify + * the address arithmetic. + */ + for (i = 0; i < oprsz; i++) { + if (pg[H2(i)] & 1) { + a[tile_vslice_index(i)] = n[i]; + } + } +} + +#undef DO_MOVA_C + +/* + * Move ZArray column to Zreg vector. + */ +#define DO_MOVA_Z(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \ +{ \ + int i, oprsz = simd_oprsz(desc); \ + for (i = 0; i < oprsz; ) { \ + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \ + } \ + i += sizeof(TYPE); \ + pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1) +DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2) +DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4) + +void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 8; + uint8_t *pg = vg; + uint64_t *d = vd; + uint64_t *a = za; + + for (i = 0; i < oprsz; i++) { + if (pg[H1(i)] & 1) { + d[i] = a[tile_vslice_index(i)]; + } + } +} + +void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 16; + uint16_t *pg = vg; + Int128 *d = vd; + Int128 *a = za; + + /* + * Int128 is used here simply to copy 16 bytes, and to simplify + * the address arithmetic. + */ + for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) { + if (pg[H2(i)] & 1) { + d[i] = a[tile_vslice_index(i)]; + } + } +} + +#undef DO_MOVA_Z + +/* + * Clear elements in a tile slice comprising len bytes. + */ + +typedef void ClearFn(void *ptr, size_t off, size_t len); + +static void clear_horizontal(void *ptr, size_t off, size_t len) +{ + memset(ptr + off, 0, len); +} + +static void clear_vertical_b(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; ++i) { + *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_h(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 2) { + *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_s(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 4) { + *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_d(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 8) { + *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_q(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 16) { + memset(vptr + tile_vslice_offset(i + off), 0, 16); + } +} + +/* + * Copy elements from an array into a tile slice comprising len bytes. + */ + +typedef void CopyFn(void *dst, const void *src, size_t len); + +static void copy_horizontal(void *dst, const void *src, size_t len) +{ + memcpy(dst, src, len); +} + +static void copy_vertical_b(void *vdst, const void *vsrc, size_t len) +{ + const uint8_t *src = vsrc; + uint8_t *dst = vdst; + size_t i; + + for (i = 0; i < len; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_h(void *vdst, const void *vsrc, size_t len) +{ + const uint16_t *src = vsrc; + uint16_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 2; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_s(void *vdst, const void *vsrc, size_t len) +{ + const uint32_t *src = vsrc; + uint32_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 4; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_d(void *vdst, const void *vsrc, size_t len) +{ + const uint64_t *src = vsrc; + uint64_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 8; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_q(void *vdst, const void *vsrc, size_t len) +{ + for (size_t i = 0; i < len; i += 16) { + memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16); + } +} + +/* + * Host and TLB primitives for vertical tile slice addressing. + */ + +#define DO_LD(NAME, TYPE, HOST, TLB) \ +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + TYPE val = HOST(host); \ + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ +} \ +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ + intptr_t off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \ + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ +} + +#define DO_ST(NAME, TYPE, HOST, TLB) \ +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ + HOST(host, val); \ +} \ +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ + intptr_t off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ + TLB(env, useronly_clean_ptr(addr), val, ra); \ +} + +/* + * The ARMVectorReg elements are stored in host-endian 64-bit units. + * For 128-bit quantities, the sequence defined by the Elem[] pseudocode + * corresponds to storing the two 64-bit pieces in little-endian order. + */ +#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \ +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ +{ \ + uint64_t val0 = HOST(host), val1 = HOST(host + 8); \ + uint64_t *ptr = za + off; \ + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ +} \ +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + HNAME##_host(za, tile_vslice_offset(off), host); \ +} \ +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \ + uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \ + uint64_t *ptr = za + off; \ + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ +} \ +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ +} + +#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \ +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ +{ \ + uint64_t *ptr = za + off; \ + HOST(host, ptr[BE]); \ + HOST(host + 1, ptr[!BE]); \ +} \ +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + HNAME##_host(za, tile_vslice_offset(off), host); \ +} \ +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + uint64_t *ptr = za + off; \ + TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \ + TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \ +} \ +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ +} + +DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra) +DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra) +DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra) +DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra) +DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra) +DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra) +DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra) + +DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra) +DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra) + +DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra) +DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra) +DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra) +DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra) +DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra) +DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra) +DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra) + +DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra) +DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra) + +#undef DO_LD +#undef DO_ST +#undef DO_LDQ +#undef DO_STQ + +/* + * Common helper for all contiguous predicated loads. + */ + +static inline QEMU_ALWAYS_INLINE +void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, + const target_ulong addr, uint32_t desc, const uintptr_t ra, + const int esz, uint32_t mtedesc, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn, + ClearFn *clr_fn, + CopyFn *cpy_fn) +{ + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t esize = 1 << esz; + intptr_t reg_off, reg_last; + SVEContLdSt info; + void *host; + int flags; + + /* Find the active elements. */ + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { + /* The entire predicate was false; no load occurs. */ + clr_fn(za, 0, reg_max); + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra); + + /* Handle watchpoints for all active elements. */ + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, + BP_MEM_READ, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, + mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. Perform the load + * into scratch memory to preserve register state until the end. + */ + ARMVectorReg scratch = { }; + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[1]; + if (reg_last < 0) { + reg_last = info.reg_off_split; + if (reg_last < 0) { + reg_last = info.reg_off_last[0]; + } + } + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + tlb_fn(env, &scratch, reg_off, addr + reg_off, ra); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + + cpy_fn(za, &scratch, reg_max); + return; +#endif + } + + /* The entire operation is in RAM, on valid pages. */ + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[0]; + host = info.page[0].host; + + if (!vertical) { + memset(za, 0, reg_max); + } else if (reg_off) { + clr_fn(za, 0, reg_off); + } + + while (reg_off <= reg_last) { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } else if (vertical) { + clr_fn(za, reg_off, esize); + } + reg_off += esize; + } while (reg_off <= reg_last && (reg_off & 63)); + } + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + reg_off = info.reg_off_split; + if (unlikely(reg_off >= 0)) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + + reg_off = info.reg_off_first[1]; + if (unlikely(reg_off >= 0)) { + reg_last = info.reg_off_last[1]; + host = info.page[1].host; + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } else if (vertical) { + clr_fn(za, reg_off, esize); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + } +} + +static inline QEMU_ALWAYS_INLINE +void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, + target_ulong addr, uint32_t desc, uintptr_t ra, + const int esz, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn, + ClearFn *clr_fn, + CopyFn *cpy_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + int bit55 = extract64(addr, 55, 1); + + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* Perform gross MTE suppression early. */ + if (!tbi_check(desc, bit55) || + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { + mtedesc = 0; + } + + sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical, + host_fn, tlb_fn, clr_fn, cpy_fn); +} + +#define DO_LD(L, END, ESZ) \ +void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ + clear_horizontal, copy_horizontal); \ +} \ +void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ + clear_vertical_##L, copy_vertical_##L); \ +} \ +void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ + clear_horizontal, copy_horizontal); \ +} \ +void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ + clear_vertical_##L, copy_vertical_##L); \ +} + +DO_LD(b, , MO_8) +DO_LD(h, _be, MO_16) +DO_LD(h, _le, MO_16) +DO_LD(s, _be, MO_32) +DO_LD(s, _le, MO_32) +DO_LD(d, _be, MO_64) +DO_LD(d, _le, MO_64) +DO_LD(q, _be, MO_128) +DO_LD(q, _le, MO_128) + +#undef DO_LD + +/* + * Common helper for all contiguous predicated stores. + */ + +static inline QEMU_ALWAYS_INLINE +void sme_st1(CPUARMState *env, void *za, uint64_t *vg, + const target_ulong addr, uint32_t desc, const uintptr_t ra, + const int esz, uint32_t mtedesc, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t esize = 1 << esz; + intptr_t reg_off, reg_last; + SVEContLdSt info; + void *host; + int flags; + + /* Find the active elements. */ + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { + /* The entire predicate was false; no store occurs. */ + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra); + + /* Handle watchpoints for all active elements. */ + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, + BP_MEM_WRITE, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, + mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. We cannot avoid + * this fault and will leave with the store incomplete. + */ + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[1]; + if (reg_last < 0) { + reg_last = info.reg_off_split; + if (reg_last < 0) { + reg_last = info.reg_off_last[0]; + } + } + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + return; +#endif + } + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[0]; + host = info.page[0].host; + + while (reg_off <= reg_last) { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } + reg_off += 1 << esz; + } while (reg_off <= reg_last && (reg_off & 63)); + } + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + reg_off = info.reg_off_split; + if (unlikely(reg_off >= 0)) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + + reg_off = info.reg_off_first[1]; + if (unlikely(reg_off >= 0)) { + reg_last = info.reg_off_last[1]; + host = info.page[1].host; + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } + reg_off += 1 << esz; + } while (reg_off & 63); + } while (reg_off <= reg_last); + } +} + +static inline QEMU_ALWAYS_INLINE +void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, + uint32_t desc, uintptr_t ra, int esz, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + int bit55 = extract64(addr, 55, 1); + + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* Perform gross MTE suppression early. */ + if (!tbi_check(desc, bit55) || + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { + mtedesc = 0; + } + + sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc, + vertical, host_fn, tlb_fn); +} + +#define DO_ST(L, END, ESZ) \ +void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ +} \ +void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ +} \ +void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ +} \ +void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ +} + +DO_ST(b, , MO_8) +DO_ST(h, _be, MO_16) +DO_ST(h, _le, MO_16) +DO_ST(s, _be, MO_32) +DO_ST(s, _le, MO_32) +DO_ST(d, _be, MO_64) +DO_ST(d, _le, MO_64) +DO_ST(q, _be, MO_128) +DO_ST(q, _le, MO_128) + +#undef DO_ST + +void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; + uint64_t *pn = vpn, *pm = vpm; + uint32_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ) { + uint64_t pa = pn[row >> 4]; + do { + if (pa & 1) { + for (col = 0; col < oprsz; ) { + uint64_t pb = pm[col >> 4]; + do { + if (pb & 1) { + zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)]; + } + pb >>= 4; + } while (++col & 15); + } + } + pa >>= 4; + } while (++row & 15); + } +} + +void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint8_t *pn = vpn, *pm = vpm; + uint64_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + zda[tile_vslice_index(row) + col] += zn[col]; + } + } + } + } +} + +void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; + uint64_t *pn = vpn, *pm = vpm; + uint32_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ) { + uint64_t pa = pn[row >> 4]; + do { + if (pa & 1) { + uint32_t zn_row = zn[H4(row)]; + for (col = 0; col < oprsz; ) { + uint64_t pb = pm[col >> 4]; + do { + if (pb & 1) { + zda[tile_vslice_index(row) + H4(col)] += zn_row; + } + pb >>= 4; + } while (++col & 15); + } + } + pa >>= 4; + } while (++row & 15); + } +} + +void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint8_t *pn = vpn, *pm = vpm; + uint64_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + uint64_t zn_row = zn[row]; + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + zda[tile_vslice_index(row) + col] += zn_row; + } + } + } + } +} + +void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) << 31; + uint16_t *pn = vpn, *pm = vpm; + float_status fpst; + + /* + * Make a copy of float_status because this operation does not + * update the cumulative fp exception status. It also produces + * default nans. + */ + fpst = *(float_status *)vst; + set_default_nan_mode(true, &fpst); + + for (row = 0; row < oprsz; ) { + uint16_t pa = pn[H2(row >> 4)]; + do { + if (pa & 1) { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg; + + for (col = 0; col < oprsz; ) { + uint16_t pb = pm[H2(col >> 4)]; + do { + if (pb & 1) { + uint32_t *a = vza_row + H1_4(col); + uint32_t *m = vzm + H1_4(col); + *a = float32_muladd(n, *m, *a, 0, vst); + } + col += 4; + pb >>= 4; + } while (col & 15); + } + } + row += 4; + pa >>= 4; + } while (row & 15); + } +} + +void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint64_t neg = (uint64_t)simd_data(desc) << 63; + uint64_t *za = vza, *zn = vzn, *zm = vzm; + uint8_t *pn = vpn, *pm = vpm; + float_status fpst = *(float_status *)vst; + + set_default_nan_mode(true, &fpst); + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + uint64_t *za_row = &za[tile_vslice_index(row)]; + uint64_t n = zn[row] ^ neg; + + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + uint64_t *a = &za_row[col]; + *a = float64_muladd(n, zm[col], *a, 0, &fpst); + } + } + } + } +} + +/* + * Alter PAIR as needed for controlling predicates being false, + * and for NEG on an enabled row element. + */ +static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) +{ + /* + * The pseudocode uses a conditional negate after the conditional zero. + * It is simpler here to unconditionally negate before conditional zero. + */ + pair ^= neg; + if (!(pg & 1)) { + pair &= 0xffff0000u; + } + if (!(pg & 4)) { + pair &= 0x0000ffffu; + } + return pair; +} + +static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, + float_status *s_std, float_status *s_odd) +{ + float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std); + float64 e1c = float16_to_float64(e1 >> 16, true, s_std); + float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std); + float64 e2c = float16_to_float64(e2 >> 16, true, s_std); + float64 t64; + float32 t32; + + /* + * The ARM pseudocode function FPDot performs both multiplies + * and the add with a single rounding operation. Emulate this + * by performing the first multiply in round-to-odd, then doing + * the second multiply as fused multiply-add, and rounding to + * float32 all in one step. + */ + t64 = float64_mul(e1r, e2r, s_odd); + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); + + /* This conversion is exact, because we've already rounded. */ + t32 = float64_to_float32(t64, s_std); + + /* The final accumulation step is not fused. */ + return float32_add(sum, t32, s_std); +} + +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) * 0x80008000u; + uint16_t *pn = vpn, *pm = vpm; + float_status fpst_odd, fpst_std; + + /* + * Make a copy of float_status because this operation does not + * update the cumulative fp exception status. It also produces + * default nans. Make a second copy with round-to-odd -- see above. + */ + fpst_std = *(float_status *)vst; + set_default_nan_mode(true, &fpst_std); + fpst_odd = fpst_std; + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + n = f16mop_adj_pair(n, prow, neg); + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd); + + col += 4; + pcol >>= 4; + } + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } +} + +void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) * 0x80008000u; + uint16_t *pn = vpn, *pm = vpm; + + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + n = f16mop_adj_pair(n, prow, neg); + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = bfdotadd(*a, n, m); + + col += 4; + pcol >>= 4; + } + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } +} + +typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool); + +static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, + uint8_t *pn, uint8_t *pm, + uint32_t desc, IMOPFn *fn) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + bool neg = simd_data(desc); + + for (row = 0; row < oprsz; ++row) { + uint8_t pa = pn[H1(row)]; + uint64_t *za_row = &za[tile_vslice_index(row)]; + uint64_t n = zn[row]; + + for (col = 0; col < oprsz; ++col) { + uint8_t pb = pm[H1(col)]; + uint64_t *a = &za_row[col]; + + *a = fn(n, zm[col], *a, pa & pb, neg); + } + } +} + +#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ +{ \ + uint32_t sum0 = 0, sum1 = 0; \ + /* Apply P to N as a mask, making the inactive elements 0. */ \ + n &= expand_pred_b(p); \ + sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ + sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ + sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ + sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \ + sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ + sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \ + if (neg) { \ + sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \ + } else { \ + sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \ + } \ + return ((uint64_t)sum1 << 32) | sum0; \ +} + +#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ +{ \ + uint64_t sum = 0; \ + /* Apply P to N as a mask, making the inactive elements 0. */ \ + n &= expand_pred_h(p); \ + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ + sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ + return neg ? a - sum : a + sum; \ +} + +DEF_IMOP_32(smopa_s, int8_t, int8_t) +DEF_IMOP_32(umopa_s, uint8_t, uint8_t) +DEF_IMOP_32(sumopa_s, int8_t, uint8_t) +DEF_IMOP_32(usmopa_s, uint8_t, int8_t) + +DEF_IMOP_64(smopa_d, int16_t, int16_t) +DEF_IMOP_64(umopa_d, uint16_t, uint16_t) +DEF_IMOP_64(sumopa_d, int16_t, uint16_t) +DEF_IMOP_64(usmopa_d, uint16_t, int16_t) + +#define DEF_IMOPH(NAME) \ + void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \ + void *vpm, uint32_t desc) \ + { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); } + +DEF_IMOPH(smopa_s) +DEF_IMOPH(umopa_s) +DEF_IMOPH(sumopa_s) +DEF_IMOPH(usmopa_s) +DEF_IMOPH(smopa_d) +DEF_IMOPH(umopa_d) +DEF_IMOPH(sumopa_d) +DEF_IMOPH(usmopa_d) diff --git a/target/arm/sve.decode b/target/arm/sve.decode index c60b9f0fec..14b3a69c36 100644 --- a/target/arm/sve.decode +++ b/target/arm/sve.decode @@ -449,14 +449,17 @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5 # SVE index generation (register start, register increment) INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm -### SVE Stack Allocation Group +### SVE / Streaming SVE Stack Allocation Group # SVE stack frame adjustment ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6 +ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6 ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6 +ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6 # SVE stack frame size RDVL 00000100 101 11111 01010 imm:s6 rd:5 +RDSVL 00000100 101 11111 01011 imm:s6 rd:5 ### SVE Bitwise Shift - Unpredicated Group @@ -528,8 +531,14 @@ DUPM 00000101 11 0000 dbm:13 rd:5 FCPY 00000101 .. 01 .... 110 imm:8 ..... @rdn_pg4 # SVE copy integer immediate (predicated) -CPY_m_i 00000101 .. 01 .... 01 . ........ ..... @rdn_pg4 imm=%sh8_i8s -CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s +{ + INVALID 00000101 00 01 ---- 01 1 -------- ----- + CPY_m_i 00000101 .. 01 .... 01 . ........ ..... @rdn_pg4 imm=%sh8_i8s +} +{ + INVALID 00000101 00 01 ---- 00 1 -------- ----- + CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s +} ### SVE Permute - Extract Group @@ -643,6 +652,7 @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn +REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0 # SVE vector splice (predicated, destructive) SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm @@ -787,16 +797,40 @@ WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4 FDUP 00100101 esz:2 111 00 1110 imm:8 rd:5 # SVE broadcast integer immediate (unpredicated) -DUP_i 00100101 esz:2 111 00 011 . ........ rd:5 imm=%sh8_i8s +{ + INVALID 00100101 00 111 00 011 1 -------- ----- + DUP_i 00100101 esz:2 111 00 011 . ........ rd:5 imm=%sh8_i8s +} # SVE integer add/subtract immediate (unpredicated) -ADD_zzi 00100101 .. 100 000 11 . ........ ..... @rdn_sh_i8u -SUB_zzi 00100101 .. 100 001 11 . ........ ..... @rdn_sh_i8u -SUBR_zzi 00100101 .. 100 011 11 . ........ ..... @rdn_sh_i8u -SQADD_zzi 00100101 .. 100 100 11 . ........ ..... @rdn_sh_i8u -UQADD_zzi 00100101 .. 100 101 11 . ........ ..... @rdn_sh_i8u -SQSUB_zzi 00100101 .. 100 110 11 . ........ ..... @rdn_sh_i8u -UQSUB_zzi 00100101 .. 100 111 11 . ........ ..... @rdn_sh_i8u +{ + INVALID 00100101 00 100 000 11 1 -------- ----- + ADD_zzi 00100101 .. 100 000 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 001 11 1 -------- ----- + SUB_zzi 00100101 .. 100 001 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 011 11 1 -------- ----- + SUBR_zzi 00100101 .. 100 011 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 100 11 1 -------- ----- + SQADD_zzi 00100101 .. 100 100 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 101 11 1 -------- ----- + UQADD_zzi 00100101 .. 100 101 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 110 11 1 -------- ----- + SQSUB_zzi 00100101 .. 100 110 11 . ........ ..... @rdn_sh_i8u +} +{ + INVALID 00100101 00 100 111 11 1 -------- ----- + UQSUB_zzi 00100101 .. 100 111 11 . ........ ..... @rdn_sh_i8u +} # SVE integer min/max immediate (unpredicated) SMAX_zzi 00100101 .. 101 000 110 ........ ..... @rdn_i8s @@ -1153,10 +1187,10 @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \ @rpri_load_msz nreg=0 # SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets) -PRF 1000010 00 -1 ----- 0-- --- ----- 0 ---- +PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ---- # SVE 32-bit gather prefetch (vector plus immediate) -PRF 1000010 -- 00 ----- 111 --- ----- 0 ---- +PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ---- # SVE contiguous prefetch (scalar plus immediate) PRF 1000010 11 1- ----- 0-- --- ----- 0 ---- @@ -1193,13 +1227,13 @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ @rpri_g_load esz=3 # SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets) -PRF 1100010 00 11 ----- 1-- --- ----- 0 ---- +PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ---- # SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets) -PRF 1100010 00 -1 ----- 0-- --- ----- 0 ---- +PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ---- # SVE 64-bit gather prefetch (vector plus immediate) -PRF 1100010 -- 00 ----- 111 --- ----- 0 ---- +PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ---- ### SVE Memory Store Group @@ -1568,17 +1602,15 @@ SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm ### SVE2 floating point matrix multiply accumulate -{ - BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 - FMMLA 01100100 .. 1 ..... 111 001 ..... ..... @rda_rn_rm -} +BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 +FMMLA_s 01100100 10 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 +FMMLA_d 01100100 11 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 ### SVE2 Memory Gather Load Group -# SVE2 64-bit gather non-temporal load -# (scalar plus unpacked 32-bit unscaled offsets) +# SVE2 64-bit gather non-temporal load (scalar plus 64-bit unscaled offsets) LDNT1_zprz 1100010 msz:2 00 rm:5 1 u:1 0 pg:3 rn:5 rd:5 \ - &rprr_gather_load xs=0 esz=3 scale=0 ff=0 + &rprr_gather_load xs=2 esz=3 scale=0 ff=0 # SVE2 32-bit gather non-temporal load (scalar plus 32-bit unscaled offsets) LDNT1_zprz 1000010 msz:2 00 rm:5 10 u:1 pg:3 rn:5 rd:5 \ @@ -1643,3 +1675,28 @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 ### SVE2 floating-point bfloat16 dot-product (indexed) BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2 + +### SVE broadcast predicate element + +&psel esz pd pn pm rv imm +%psel_rv 16:2 !function=plus_12 +%psel_imm_b 22:2 19:2 +%psel_imm_h 22:2 20:1 +%psel_imm_s 22:2 +%psel_imm_d 23:1 +@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \ + &psel rv=%psel_rv + +PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \ + @psel esz=0 imm=%psel_imm_b +PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \ + @psel esz=1 imm=%psel_imm_h +PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ + @psel esz=2 imm=%psel_imm_s +PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ + @psel esz=3 imm=%psel_imm_d + +### SVE clamp + +SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm +UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index dab5f1d1cd..27838fb6e2 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -21,12 +21,12 @@ #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" -#include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "fpu/softfloat.h" #include "tcg/tcg.h" #include "vec_internal.h" +#include "sve_ldst_internal.h" /* Return a value for NZCV as per the ARM PredTest pseudofunction. @@ -103,44 +103,6 @@ uint32_t HELPER(sve_predtest)(void *vd, void *vg, uint32_t words) return flags; } -/* - * Expand active predicate bits to bytes, for byte elements. - * (The data table itself is in vec_helper.c as MVE also needs it.) - */ -static inline uint64_t expand_pred_b(uint8_t byte) -{ - return expand_pred_b_data[byte]; -} - -/* Similarly for half-word elements. - * for (i = 0; i < 256; ++i) { - * unsigned long m = 0; - * if (i & 0xaa) { - * continue; - * } - * for (j = 0; j < 8; j += 2) { - * if ((i >> j) & 1) { - * m |= 0xfffful << (j << 3); - * } - * } - * printf("[0x%x] = 0x%016lx,\n", i, m); - * } - */ -static inline uint64_t expand_pred_h(uint8_t byte) -{ - static const uint64_t word[] = { - [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000, - [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000, - [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000, - [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000, - [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000, - [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000, - [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000, - [0x55] = 0xffffffffffffffff, - }; - return word[byte & 0x55]; -} - /* Similarly for single word elements. */ static inline uint64_t expand_pred_s(uint8_t byte) { @@ -969,6 +931,22 @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) +void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 2) { + if (pg[H1(i)] & 1) { + uint64_t n0 = n[i + 0]; + uint64_t n1 = n[i + 1]; + d[i + 0] = n1; + d[i + 1] = n0; + } + } +} + DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) @@ -2802,7 +2780,7 @@ static void swap_memmove(void *vd, void *vs, size_t n) uintptr_t o = (d | s | n) & 7; size_t i; -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN o = 0; #endif switch (o) { @@ -2864,7 +2842,7 @@ static void swap_memzero(void *vd, size_t n) return; } -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN o = 0; #endif switch (o) { @@ -3382,19 +3360,21 @@ void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc) void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ + intptr_t odd_ofs = simd_data(desc); \ intptr_t i, oprsz_2 = oprsz / 2; \ ARMVectorReg tmp_n, tmp_m; \ /* We produce output faster than we consume input. \ Therefore we must be mindful of possible overlap. */ \ if (unlikely((vn - vd) < (uintptr_t)oprsz)) { \ - vn = memcpy(&tmp_n, vn, oprsz_2); \ + vn = memcpy(&tmp_n, vn, oprsz); \ } \ if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \ - vm = memcpy(&tmp_m, vm, oprsz_2); \ + vm = memcpy(&tmp_m, vm, oprsz); \ } \ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ - *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \ - *(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \ + *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + odd_ofs + H(i)); \ + *(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = \ + *(TYPE *)(vm + odd_ofs + H(i)); \ } \ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \ memset(vd + oprsz - 16, 0, 16); \ @@ -3601,6 +3581,18 @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, } } +void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 16; + Int128 *d = vd, *n = vn, *m = vm; + uint16_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + d[i] = (pg[H2(i)] & 1 ? n : m)[i]; + } +} + /* Two operand comparison controlled by a predicate. * ??? It is very tempting to want to be able to expand this inline * with x86 instructions, e.g. @@ -5299,111 +5291,6 @@ void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, * Load contiguous data, protected by a governing predicate. */ -/* - * Load one element into @vd + @reg_off from @host. - * The controlling predicate is known to be true. - */ -typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host); - -/* - * Load one element into @vd + @reg_off from (@env, @vaddr, @ra). - * The controlling predicate is known to be true. - */ -typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, - target_ulong vaddr, uintptr_t retaddr); - -/* - * Generate the above primitives. - */ - -#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \ -static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ -{ \ - TYPEM val = HOST(host); \ - *(TYPEE *)(vd + H(reg_off)) = val; \ -} - -#define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \ -static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ -{ HOST(host, (TYPEM)*(TYPEE *)(vd + H(reg_off))); } - -#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \ -static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ - target_ulong addr, uintptr_t ra) \ -{ \ - *(TYPEE *)(vd + H(reg_off)) = \ - (TYPEM)TLB(env, useronly_clean_ptr(addr), ra); \ -} - -#define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \ -static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ - target_ulong addr, uintptr_t ra) \ -{ \ - TLB(env, useronly_clean_ptr(addr), \ - (TYPEM)*(TYPEE *)(vd + H(reg_off)), ra); \ -} - -#define DO_LD_PRIM_1(NAME, H, TE, TM) \ - DO_LD_HOST(NAME, H, TE, TM, ldub_p) \ - DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra) - -DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t) -DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t) -DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t) -DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t) -DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t) -DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t) -DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t) - -#define DO_ST_PRIM_1(NAME, H, TE, TM) \ - DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \ - DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra) - -DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t) -DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t) -DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t) -DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t) - -#define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \ - DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \ - DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \ - DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \ - DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra) - -#define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \ - DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \ - DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \ - DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \ - DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra) - -DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw) -DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw) -DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw) -DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw) -DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw) - -DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw) -DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw) -DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw) - -DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl) -DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl) -DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl) - -DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl) -DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl) - -DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq) -DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq) - -#undef DO_LD_TLB -#undef DO_ST_TLB -#undef DO_LD_HOST -#undef DO_LD_PRIM_1 -#undef DO_ST_PRIM_1 -#undef DO_LD_PRIM_2 -#undef DO_ST_PRIM_2 - /* * Skip through a sequence of inactive elements in the guarding predicate @vg, * beginning at @reg_off bounded by @reg_max. Return the offset of the active @@ -5444,16 +5331,9 @@ static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off, * exit via page fault exception. */ -typedef struct { - void *host; - int flags; - MemTxAttrs attrs; -} SVEHostPage; - -static bool sve_probe_page(SVEHostPage *info, bool nofault, - CPUARMState *env, target_ulong addr, - int mem_off, MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) +bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, + target_ulong addr, int mem_off, MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) { int flags; @@ -5471,8 +5351,14 @@ static bool sve_probe_page(SVEHostPage *info, bool nofault, */ addr = useronly_clean_ptr(addr); +#ifdef CONFIG_USER_ONLY flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault, &info->host, retaddr); +#else + CPUTLBEntryFull *full; + flags = probe_access_full(env, addr, access_type, mmu_idx, nofault, + &info->host, &full, retaddr); +#endif info->flags = flags; if (flags & TLB_INVALID_MASK) { @@ -5480,88 +5366,27 @@ static bool sve_probe_page(SVEHostPage *info, bool nofault, return false; } - /* Ensure that info->host[] is relative to addr, not addr + mem_off. */ - info->host -= mem_off; - #ifdef CONFIG_USER_ONLY memset(&info->attrs, 0, sizeof(info->attrs)); + /* Require both ANON and MTE; see allocation_tag_mem(). */ + info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE); #else - /* - * Find the iotlbentry for addr and return the transaction attributes. - * This *must* be present in the TLB because we just found the mapping. - */ - { - uintptr_t index = tlb_index(env, mmu_idx, addr); - -# ifdef CONFIG_DEBUG_TCG - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong comparator = (access_type == MMU_DATA_LOAD - ? entry->addr_read - : tlb_addr_write(entry)); - g_assert(tlb_hit(comparator, addr)); -# endif - - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - info->attrs = iotlbentry->attrs; - } + info->attrs = full->attrs; + info->tagged = full->pte_attrs == 0xf0; #endif + /* Ensure that info->host[] is relative to addr, not addr + mem_off. */ + info->host -= mem_off; return true; } - -/* - * Analyse contiguous data, protected by a governing predicate. - */ - -typedef enum { - FAULT_NO, - FAULT_FIRST, - FAULT_ALL, -} SVEContFault; - -typedef struct { - /* - * First and last element wholly contained within the two pages. - * mem_off_first[0] and reg_off_first[0] are always set >= 0. - * reg_off_last[0] may be < 0 if the first element crosses pages. - * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1] - * are set >= 0 only if there are complete elements on a second page. - * - * The reg_off_* offsets are relative to the internal vector register. - * The mem_off_first offset is relative to the memory address; the - * two offsets are different when a load operation extends, a store - * operation truncates, or for multi-register operations. - */ - int16_t mem_off_first[2]; - int16_t reg_off_first[2]; - int16_t reg_off_last[2]; - - /* - * One element that is misaligned and spans both pages, - * or -1 if there is no such active element. - */ - int16_t mem_off_split; - int16_t reg_off_split; - - /* - * The byte offset at which the entire operation crosses a page boundary. - * Set >= 0 if and only if the entire operation spans two pages. - */ - int16_t page_split; - - /* TLB data for the two pages. */ - SVEHostPage page[2]; -} SVEContLdSt; - /* * Find first active element on each page, and a loose bound for the * final element on each page. Identify any single element that spans * the page boundary. Return true if there are any active elements. */ -static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, - uint64_t *vg, intptr_t reg_max, - int esz, int msize) +bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg, + intptr_t reg_max, int esz, int msize) { const int esize = 1 << esz; const uint64_t pg_mask = pred_esz_masks[esz]; @@ -5651,9 +5476,9 @@ static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, * Control the generation of page faults with @fault. Return false if * there is no work to do, which can only happen with @fault == FAULT_NO. */ -static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, - CPUARMState *env, target_ulong addr, - MMUAccessType access_type, uintptr_t retaddr) +bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, + CPUARMState *env, target_ulong addr, + MMUAccessType access_type, uintptr_t retaddr) { int mmu_idx = cpu_mmu_index(env, false); int mem_off = info->mem_off_first[0]; @@ -5709,12 +5534,12 @@ static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, return have_work; } -static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, - uint64_t *vg, target_ulong addr, - int esize, int msize, int wp_access, - uintptr_t retaddr) -{ #ifndef CONFIG_USER_ONLY +void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, + uint64_t *vg, target_ulong addr, + int esize, int msize, int wp_access, + uintptr_t retaddr) +{ intptr_t mem_off, reg_off, reg_last; int flags0 = info->page[0].flags; int flags1 = info->page[1].flags; @@ -5770,17 +5595,17 @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, } while (reg_off & 63); } while (reg_off <= reg_last); } -#endif } +#endif -static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, - uint64_t *vg, target_ulong addr, int esize, - int msize, uint32_t mtedesc, uintptr_t ra) +void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, + uint64_t *vg, target_ulong addr, int esize, + int msize, uint32_t mtedesc, uintptr_t ra) { intptr_t mem_off, reg_off, reg_last; /* Process the page only if MemAttr == Tagged. */ - if (arm_tlb_mte_tagged(&info->page[0].attrs)) { + if (info->page[0].tagged) { mem_off = info->mem_off_first[0]; reg_off = info->reg_off_first[0]; reg_last = info->reg_off_split; @@ -5801,7 +5626,7 @@ static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, } mem_off = info->mem_off_first[1]; - if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) { + if (mem_off >= 0 && info->page[1].tagged) { reg_off = info->reg_off_first[1]; reg_last = info->reg_off_last[1]; @@ -6118,7 +5943,7 @@ DO_LDN_2(4, dd, MO_64) * linux-user/ in its get_user/put_user macros. * * TODO: Construct some helpers, written in assembly, that interact with - * handle_cpu_signal to produce memory ops which can properly report errors + * host_signal_handler to produce memory ops which can properly report errors * without racing. */ @@ -6180,7 +6005,7 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, * Disable MTE checking if the Tagged bit is not set. Since TBI must * be set within MTEDESC for MTE, !mtedesc => !mte_active. */ - if (arm_tlb_mte_tagged(&info.page[0].attrs)) { + if (!info.page[0].tagged) { mtedesc = 0; } @@ -6731,10 +6556,14 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, cpu_check_watchpoint(env_cpu(env), addr, msize, info.attrs, BP_MEM_READ, retaddr); } - if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + if (mtedesc && info.tagged) { mte_check(env, mtedesc, addr, retaddr); } - host_fn(&scratch, reg_off, info.host); + if (unlikely(info.flags & TLB_MMIO)) { + tlb_fn(env, &scratch, reg_off, addr, retaddr); + } else { + host_fn(&scratch, reg_off, info.host); + } } else { /* Element crosses the page boundary. */ sve_probe_page(&info2, false, env, addr + in_page, 0, @@ -6744,7 +6573,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, msize, info.attrs, BP_MEM_READ, retaddr); } - if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + if (mtedesc && info.tagged) { mte_check(env, mtedesc, addr, retaddr); } tlb_fn(env, &scratch, reg_off, addr, retaddr); @@ -6897,6 +6726,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, intptr_t reg_off; SVEHostPage info; target_ulong addr, in_page; + ARMVectorReg scratch; /* Skip to the first true predicate. */ reg_off = find_next_active(vg, 0, reg_max, esz); @@ -6906,6 +6736,11 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, return; } + /* Protect against overlap between vd and vm. */ + if (unlikely(vd == vm)) { + vm = memcpy(&scratch, vm, reg_max); + } + /* * Probe the first element, allowing faults. */ @@ -6945,9 +6780,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, (env_cpu(env), addr, msize) & BP_MEM_READ)) { goto fault; } - if (mtedesc && - arm_tlb_mte_tagged(&info.attrs) && - !mte_probe(env, mtedesc, addr)) { + if (mtedesc && info.tagged && !mte_probe(env, mtedesc, addr)) { goto fault; } @@ -7112,7 +6945,9 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, if (likely(in_page >= msize)) { sve_probe_page(&info, false, env, addr, 0, MMU_DATA_STORE, mmu_idx, retaddr); - host[i] = info.host; + if (!(info.flags & TLB_MMIO)) { + host[i] = info.host; + } } else { /* * Element crosses the page boundary. @@ -7131,7 +6966,7 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, info.attrs, BP_MEM_WRITE, retaddr); } - if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + if (mtedesc && info.tagged) { mte_check(env, mtedesc, addr, retaddr); } } diff --git a/target/arm/sve_ldst_internal.h b/target/arm/sve_ldst_internal.h new file mode 100644 index 0000000000..4f159ec4ad --- /dev/null +++ b/target/arm/sve_ldst_internal.h @@ -0,0 +1,222 @@ +/* + * ARM SVE Load/Store Helpers + * + * Copyright (c) 2018-2022 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_ARM_SVE_LDST_INTERNAL_H +#define TARGET_ARM_SVE_LDST_INTERNAL_H + +#include "exec/cpu_ldst.h" + +/* + * Load one element into @vd + @reg_off from @host. + * The controlling predicate is known to be true. + */ +typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host); + +/* + * Load one element into @vd + @reg_off from (@env, @vaddr, @ra). + * The controlling predicate is known to be true. + */ +typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong vaddr, uintptr_t retaddr); + +/* + * Generate the above primitives. + */ + +#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \ +static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ +{ TYPEM val = HOST(host); *(TYPEE *)(vd + H(reg_off)) = val; } + +#define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \ +static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ +{ TYPEM val = *(TYPEE *)(vd + H(reg_off)); HOST(host, val); } + +#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \ +static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \ + intptr_t reg_off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPEM val = TLB(env, useronly_clean_ptr(addr), ra); \ + *(TYPEE *)(vd + H(reg_off)) = val; \ +} + +#define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \ +static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \ + intptr_t reg_off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPEM val = *(TYPEE *)(vd + H(reg_off)); \ + TLB(env, useronly_clean_ptr(addr), val, ra); \ +} + +#define DO_LD_PRIM_1(NAME, H, TE, TM) \ + DO_LD_HOST(NAME, H, TE, TM, ldub_p) \ + DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra) + +DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t) +DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t) +DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t) +DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t) +DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t) +DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t) +DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t) + +#define DO_ST_PRIM_1(NAME, H, TE, TM) \ + DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \ + DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra) + +DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t) +DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t) +DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t) +DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t) + +#define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \ + DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \ + DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \ + DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \ + DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra) + +#define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \ + DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \ + DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \ + DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \ + DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra) + +DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw) +DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw) +DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw) +DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw) +DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw) + +DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw) +DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw) +DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw) + +DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl) +DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl) +DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl) + +DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl) +DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl) + +DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq) +DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq) + +#undef DO_LD_TLB +#undef DO_ST_TLB +#undef DO_LD_HOST +#undef DO_LD_PRIM_1 +#undef DO_ST_PRIM_1 +#undef DO_LD_PRIM_2 +#undef DO_ST_PRIM_2 + +/* + * Resolve the guest virtual address to info->host and info->flags. + * If @nofault, return false if the page is invalid, otherwise + * exit via page fault exception. + */ + +typedef struct { + void *host; + int flags; + MemTxAttrs attrs; + bool tagged; +} SVEHostPage; + +bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, + target_ulong addr, int mem_off, MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + +/* + * Analyse contiguous data, protected by a governing predicate. + */ + +typedef enum { + FAULT_NO, + FAULT_FIRST, + FAULT_ALL, +} SVEContFault; + +typedef struct { + /* + * First and last element wholly contained within the two pages. + * mem_off_first[0] and reg_off_first[0] are always set >= 0. + * reg_off_last[0] may be < 0 if the first element crosses pages. + * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1] + * are set >= 0 only if there are complete elements on a second page. + * + * The reg_off_* offsets are relative to the internal vector register. + * The mem_off_first offset is relative to the memory address; the + * two offsets are different when a load operation extends, a store + * operation truncates, or for multi-register operations. + */ + int16_t mem_off_first[2]; + int16_t reg_off_first[2]; + int16_t reg_off_last[2]; + + /* + * One element that is misaligned and spans both pages, + * or -1 if there is no such active element. + */ + int16_t mem_off_split; + int16_t reg_off_split; + + /* + * The byte offset at which the entire operation crosses a page boundary. + * Set >= 0 if and only if the entire operation spans two pages. + */ + int16_t page_split; + + /* TLB data for the two pages. */ + SVEHostPage page[2]; +} SVEContLdSt; + +/* + * Find first active element on each page, and a loose bound for the + * final element on each page. Identify any single element that spans + * the page boundary. Return true if there are any active elements. + */ +bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg, + intptr_t reg_max, int esz, int msize); + +/* + * Resolve the guest virtual addresses to info->page[]. + * Control the generation of page faults with @fault. Return false if + * there is no work to do, which can only happen with @fault == FAULT_NO. + */ +bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, + CPUARMState *env, target_ulong addr, + MMUAccessType access_type, uintptr_t retaddr); + +#ifdef CONFIG_USER_ONLY +static inline void +sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg, + target_ulong addr, int esize, int msize, + int wp_access, uintptr_t retaddr) +{ } +#else +void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, + uint64_t *vg, target_ulong addr, + int esize, int msize, int wp_access, + uintptr_t retaddr); +#endif + +void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg, + target_ulong addr, int esize, int msize, + uint32_t mtedesc, uintptr_t ra); + +#endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */ diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index 39a31260f2..73df5e3793 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -36,6 +36,7 @@ enum arm_exception_class { EC_ADVSIMDFPACCESSTRAP = 0x07, EC_FPIDTRAP = 0x08, EC_PACTRAP = 0x09, + EC_BXJTRAP = 0x0a, EC_CP14RRTTRAP = 0x0c, EC_BTITRAP = 0x0d, EC_ILLEGALSTATE = 0x0e, @@ -47,6 +48,7 @@ enum arm_exception_class { EC_AA64_SMC = 0x17, EC_SYSTEMREGISTERTRAP = 0x18, EC_SVEACCESSTRAP = 0x19, + EC_SMETRAP = 0x1d, EC_INSNABORT = 0x20, EC_INSNABORT_SAME_EL = 0x21, EC_PCALIGNMENT = 0x22, @@ -67,6 +69,13 @@ enum arm_exception_class { EC_AA64_BKPT = 0x3c, }; +typedef enum { + SME_ET_AccessTrap, + SME_ET_Streaming, + SME_ET_NotStreaming, + SME_ET_InactiveZA, +} SMEExceptionType; + #define ARM_EL_EC_SHIFT 26 #define ARM_EL_IL_SHIFT 25 #define ARM_EL_ISV_SHIFT 24 @@ -184,12 +193,13 @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; } -static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) +static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit, + int coproc) { - /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ + /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 */ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20) | 0xa; + | (cv << 24) | (cond << 20) | coproc; } static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) @@ -205,6 +215,12 @@ static inline uint32_t syn_sve_access_trap(void) return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT; } +static inline uint32_t syn_smetrap(SMEExceptionType etype, bool is_16bit) +{ + return (EC_SMETRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) | etype; +} + static inline uint32_t syn_pactrap(void) { return EC_PACTRAP << ARM_EL_EC_SHIFT; @@ -215,6 +231,12 @@ static inline uint32_t syn_btitrap(int btype) return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype; } +static inline uint32_t syn_bxjtrap(int cv, int cond, int rm) +{ + return (EC_BXJTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL | + (cv << 24) | (cond << 20) | rm; +} + static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) { return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) @@ -270,4 +292,19 @@ static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) (cv << 24) | (cond << 20) | ti; } +static inline uint32_t syn_illegalstate(void) +{ + return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_pcalignment(void) +{ + return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_serror(uint32_t extra) +{ + return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra; +} + #endif /* TARGET_ARM_SYNDROME_H */ diff --git a/target/arm/t32.decode b/target/arm/t32.decode index 2d47f31f14..f21ad0167a 100644 --- a/target/arm/t32.decode +++ b/target/arm/t32.decode @@ -364,17 +364,17 @@ CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm [ # Hints, and CPS { - YIELD 1111 0011 1010 1111 1000 0000 0000 0001 - WFE 1111 0011 1010 1111 1000 0000 0000 0010 - WFI 1111 0011 1010 1111 1000 0000 0000 0011 + [ + YIELD 1111 0011 1010 1111 1000 0000 0000 0001 + WFE 1111 0011 1010 1111 1000 0000 0000 0010 + WFI 1111 0011 1010 1111 1000 0000 0000 0011 - # TODO: Implement SEV, SEVL; may help SMP performance. - # SEV 1111 0011 1010 1111 1000 0000 0000 0100 - # SEVL 1111 0011 1010 1111 1000 0000 0000 0101 + # TODO: Implement SEV, SEVL; may help SMP performance. + # SEV 1111 0011 1010 1111 1000 0000 0000 0100 + # SEVL 1111 0011 1010 1111 1000 0000 0000 0101 - # For M-profile minimal-RAS ESB can be a NOP, which is the - # default behaviour since it is in the hint space. - # ESB 1111 0011 1010 1111 1000 0000 0001 0000 + ESB 1111 0011 1010 1111 1000 0000 0001 0000 + ] # The canonical nop ends in 0000 0000, but the whole rest # of the space is "reserved hint, behaves as nop". @@ -748,5 +748,6 @@ BL 1111 0. .......... 11.1 ............ @branch24 # This is DLSTP DLS 1111 0 0000 0 size:2 rn:4 1110 0000 0000 0001 } + VCTP 1111 0 0000 0 size:2 rn:4 1110 1000 0000 0001 ] } diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c index 3107f9823e..1384fe6f98 100644 --- a/target/arm/tlb_helper.c +++ b/target/arm/tlb_helper.c @@ -9,6 +9,33 @@ #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" +#include "exec/helper-proto.h" + + +/* Return true if the translation regime is using LPAE format page tables */ +bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + int el = regime_el(env, mmu_idx); + if (el == 2 || arm_el_is_aa64(env, el)) { + return true; + } + if (arm_feature(env, ARM_FEATURE_LPAE) + && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { + return true; + } + return false; +} + +/* + * Returns true if the stage 1 translation regime is using LPAE format page + * tables. Used when raising alignment exceptions, whose FSR changes depending + * on whether the long or short descriptor format is in use. + */ +bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + mmu_idx = stage_1_mmu_idx(mmu_idx); + return regime_using_lpae_format(env, mmu_idx); +} static inline uint32_t merge_syn_data_abort(uint32_t template_syn, unsigned int target_el, @@ -49,28 +76,23 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, return syn; } -static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, ARMMMUFaultInfo *fi) +static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, + int target_el, int mmu_idx, uint32_t *ret_fsc) { - CPUARMState *env = &cpu->env; - int target_el; - bool same_el; - uint32_t syn, exc, fsr, fsc; ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); + uint32_t fsr, fsc; - target_el = exception_target_el(env); - if (fi->stage2) { - target_el = 2; - env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; - if (arm_is_secure_below_el3(env) && fi->s1ns) { - env->cp15.hpfar_el2 |= HPFAR_NS; - } - } - same_el = (arm_current_el(env) == target_el); - - if (target_el == 2 || arm_el_is_aa64(env, target_el) || - arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* + * For M-profile there is no guest-facing FSR. We compute a + * short-form value for env->exception.fsr which we will then + * examine in arm_v7m_cpu_do_interrupt(). In theory we could + * use the LPAE format instead as long as both bits of code agree + * (and arm_fi_to_lfsc() handled the M-profile specific + * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). + */ + if (!arm_feature(env, ARM_FEATURE_M) && + (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { /* * LPAE format fault status register : bottom 6 bits are * status code in the same form as needed for syndrome @@ -88,6 +110,32 @@ static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr, fsc = 0x3f; } + *ret_fsc = fsc; + return fsr; +} + +static G_NORETURN +void arm_deliver_fault(ARMCPU *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, ARMMMUFaultInfo *fi) +{ + CPUARMState *env = &cpu->env; + int target_el; + bool same_el; + uint32_t syn, exc, fsr, fsc; + + target_el = exception_target_el(env); + if (fi->stage2) { + target_el = 2; + env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; + if (arm_is_secure_below_el3(env) && fi->s1ns) { + env->cp15.hpfar_el2 |= HPFAR_NS; + } + } + same_el = (arm_current_el(env) == target_el); + + fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); + if (access_type == MMU_INST_FETCH) { syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); exc = EXCP_PREFETCH_ABORT; @@ -117,12 +165,29 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, ARMMMUFaultInfo fi = {}; /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); fi.type = ARMFault_Alignment; arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); } +void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc) +{ + ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; + int target_el = exception_target_el(env); + int mmu_idx = cpu_mmu_index(env, true); + uint32_t fsc; + + env->exception.vaddress = pc; + + /* + * Note that the fsc is not applicable to this exception, + * since any syndrome is pcalignment not insn_abort. + */ + env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc); + raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el); +} + #if !defined(CONFIG_USER_ONLY) /* @@ -140,40 +205,32 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, ARMMMUFaultInfo fi = {}; /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); fi.ea = arm_extabort_type(response); fi.type = ARMFault_SyncExternal; arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } -#endif /* !defined(CONFIG_USER_ONLY) */ - bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); - ARMMMUFaultInfo fi = {}; + GetPhysAddrResult res = {}; + ARMMMUFaultInfo local_fi, *fi; + int ret; -#ifdef CONFIG_USER_ONLY - int flags = page_get_flags(useronly_clean_ptr(address)); - if (flags & PAGE_VALID) { - fi.type = ARMFault_Permission; + /* + * Allow S1_ptw_translate to see any fault generated here. + * Since this may recurse, read and clear. + */ + fi = cpu->env.tlb_fi; + if (fi) { + cpu->env.tlb_fi = NULL; } else { - fi.type = ARMFault_Translation; + fi = memset(&local_fi, 0, sizeof(local_fi)); } - fi.level = 3; - - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr, true); - arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); -#else - hwaddr phys_addr; - target_ulong page_size; - int prot, ret; - MemTxAttrs attrs = {}; - ARMCacheAttrs cacheattrs = {}; /* * Walk the page table and (if the mapping exists) add the page @@ -183,32 +240,53 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, */ ret = get_phys_addr(&cpu->env, address, access_type, core_to_arm_mmu_idx(&cpu->env, mmu_idx), - &phys_addr, &attrs, &prot, &page_size, - &fi, &cacheattrs); + &res, fi); if (likely(!ret)) { /* * Map a single [sub]page. Regions smaller than our declared * target page size are handled specially, so for those we * pass in the exact addresses. */ - if (page_size >= TARGET_PAGE_SIZE) { - phys_addr &= TARGET_PAGE_MASK; + if (res.f.lg_page_size >= TARGET_PAGE_BITS) { + res.f.phys_addr &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK; } - /* Notice and record tagged memory. */ - if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) { - arm_tlb_mte_tagged(&attrs) = true; - } - tlb_set_page_with_attrs(cs, address, phys_addr, attrs, - prot, mmu_idx, page_size); + res.f.pte_attrs = res.cacheattrs.attrs; + res.f.shareability = res.cacheattrs.shareability; + + tlb_set_page_full(cs, mmu_idx, address, &res.f); return true; } else if (probe) { return false; } else { /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr, true); - arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); + cpu_restore_state(cs, retaddr); + arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); } -#endif } +#else +void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra) +{ + ARMMMUFaultInfo fi = { + .type = maperr ? ARMFault_Translation : ARMFault_Permission, + .level = 3, + }; + ARMCPU *cpu = ARM_CPU(cs); + + /* + * We report both ESR and FAR to signal handlers. + * For now, it's easiest to deliver the fault normally. + */ + cpu_restore_state(cs, ra); + arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi); +} + +void arm_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra) +{ + arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra); +} +#endif /* !defined(CONFIG_USER_ONLY) */ diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h index 6dfcafe179..99eea85fa8 100644 --- a/target/arm/translate-a32.h +++ b/target/arm/translate-a32.h @@ -17,8 +17,8 @@ * License along with this library; if not, see . */ -#ifndef TARGET_ARM_TRANSLATE_A64_H -#define TARGET_ARM_TRANSLATE_A64_H +#ifndef TARGET_ARM_TRANSLATE_A32_H +#define TARGET_ARM_TRANSLATE_A32_H /* Prototypes for autogenerated disassembler functions */ bool disas_m_nocp(DisasContext *dc, uint32_t insn); @@ -40,7 +40,7 @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop); TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs); void gen_set_cpsr(TCGv_i32 var, uint32_t mask); void gen_set_condexec(DisasContext *s); -void gen_set_pc_im(DisasContext *s, target_ulong val); +void gen_update_pc(DisasContext *s, target_long diff); void gen_lookup_tb(DisasContext *s); long vfp_reg_offset(bool dp, unsigned reg); long neon_full_reg_offset(unsigned reg); @@ -48,7 +48,9 @@ long neon_element_offset(int reg, int element, MemOp memop); void gen_rev16(TCGv_i32 dest, TCGv_i32 var); void clear_eci_state(DisasContext *s); bool mve_eci_check(DisasContext *s); +void mve_update_eci(DisasContext *s); void mve_update_and_store_eci(DisasContext *s); +bool mve_skip_vmov(DisasContext *s, int vn, int index, int size); static inline TCGv_i32 load_cpu_offset(int offset) { @@ -59,14 +61,21 @@ static inline TCGv_i32 load_cpu_offset(int offset) #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) -static inline void store_cpu_offset(TCGv_i32 var, int offset) -{ - tcg_gen_st_i32(var, cpu_env, offset); - tcg_temp_free_i32(var); -} +/* Load from the low half of a 64-bit field to a TCGv_i32 */ +#define load_cpu_field_low32(name) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \ + load_cpu_offset(offsetoflow32(CPUARMState, name)); \ + }) -#define store_cpu_field(var, name) \ - store_cpu_offset(var, offsetof(CPUARMState, name)) +void store_cpu_offset(TCGv_i32 var, int offset, int size); + +#define store_cpu_field(var, name) \ + store_cpu_offset(var, offsetof(CPUARMState, name), \ + sizeof_field(CPUARMState, name)) + +#define store_cpu_field_constant(val, name) \ + store_cpu_field(tcg_constant_i32(val), name) /* Create a new temporary and set it to the value of a CPU register. */ static inline TCGv_i32 load_reg(DisasContext *s, int reg) @@ -112,13 +121,13 @@ void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index) { - gen_aa32_ld_i64(s, val, a32, index, MO_Q); + gen_aa32_ld_i64(s, val, a32, index, MO_UQ); } static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index) { - gen_aa32_st_i64(s, val, a32, index, MO_Q); + gen_aa32_st_i64(s, val, a32, index, MO_UQ); } DO_GEN_LD(8u, MO_UB) diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 422e2ac0c9..f0b8db7ce5 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -27,14 +27,12 @@ #include "translate.h" #include "internals.h" #include "qemu/host-utils.h" - #include "semihosting/semihost.h" #include "exec/gen-icount.h" - #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/log.h" - +#include "cpregs.h" #include "translate-a64.h" #include "qemu/atomic128.h" @@ -113,14 +111,6 @@ static int get_a64_user_mem_index(DisasContext *s) case ARMMMUIdx_E20_2_PAN: useridx = ARMMMUIdx_E20_0; break; - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - useridx = ARMMMUIdx_SE10_0; - break; - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: - useridx = ARMMMUIdx_SE20_0; - break; default: g_assert_not_reached(); } @@ -128,32 +118,42 @@ static int get_a64_user_mem_index(DisasContext *s) return arm_to_core_mmu_idx(useridx); } -static void reset_btype(DisasContext *s) +static void set_btype_raw(int val) { - if (s->btype != 0) { - TCGv_i32 zero = tcg_const_i32(0); - tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype)); - tcg_temp_free_i32(zero); - s->btype = 0; - } + tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, + offsetof(CPUARMState, btype)); } static void set_btype(DisasContext *s, int val) { - TCGv_i32 tcg_val; - /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */ tcg_debug_assert(val >= 1 && val <= 3); - - tcg_val = tcg_const_i32(val); - tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype)); - tcg_temp_free_i32(tcg_val); + set_btype_raw(val); s->btype = -1; } -void gen_a64_set_pc_im(uint64_t val) +static void reset_btype(DisasContext *s) { - tcg_gen_movi_i64(cpu_pc, val); + if (s->btype != 0) { + set_btype_raw(0); + s->btype = 0; + } +} + +static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff) +{ + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff); + } else { + tcg_gen_movi_i64(dest, s->pc_curr + diff); + } +} + +void gen_a64_update_pc(DisasContext *s, target_long diff) +{ + gen_pc_plus_diff(s, cpu_pc, diff); + s->pc_save = s->pc_curr + diff; } /* @@ -207,6 +207,7 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) * then loading an address into the PC will clear out any tag. */ gen_top_byte_ignore(s, cpu_pc, src, s->tbii); + s->pc_save = -1; } /* @@ -241,14 +242,10 @@ static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src) static void gen_probe_access(DisasContext *s, TCGv_i64 ptr, MMUAccessType acc, int log2_size) { - TCGv_i32 t_acc = tcg_const_i32(acc); - TCGv_i32 t_idx = tcg_const_i32(get_mem_index(s)); - TCGv_i32 t_size = tcg_const_i32(1 << log2_size); - - gen_helper_probe_access(cpu_env, ptr, t_acc, t_idx, t_size); - tcg_temp_free_i32(t_acc); - tcg_temp_free_i32(t_idx); - tcg_temp_free_i32(t_size); + gen_helper_probe_access(cpu_env, ptr, + tcg_constant_i32(acc), + tcg_constant_i32(get_mem_index(s)), + tcg_constant_i32(1 << log2_size)); } /* @@ -263,7 +260,6 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, int core_idx) { if (tag_checked && s->mte_active[is_unpriv]) { - TCGv_i32 tcg_desc; TCGv_i64 ret; int desc = 0; @@ -272,11 +268,9 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); - tcg_desc = tcg_const_i32(desc); ret = new_tmp_a64(s); - gen_helper_mte_check(ret, cpu_env, tcg_desc, addr); - tcg_temp_free_i32(tcg_desc); + gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; } @@ -297,7 +291,6 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, bool tag_checked, int size) { if (tag_checked && s->mte_active[0]) { - TCGv_i32 tcg_desc; TCGv_i64 ret; int desc = 0; @@ -306,11 +299,9 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); - tcg_desc = tcg_const_i32(desc); ret = new_tmp_a64(s); - gen_helper_mte_check(ret, cpu_env, tcg_desc, addr); - tcg_temp_free_i32(tcg_desc); + gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; } @@ -342,30 +333,28 @@ static void a64_free_cc(DisasCompare64 *c64) tcg_temp_free_i64(c64->value); } -static void gen_exception_internal(int excp) +static void gen_rebuild_hflags(DisasContext *s) { - TCGv_i32 tcg_excp = tcg_const_i32(excp); - - assert(excp_is_internal(excp)); - gen_helper_exception_internal(cpu_env, tcg_excp); - tcg_temp_free_i32(tcg_excp); + gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el)); } -static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp) +static void gen_exception_internal(int excp) { - gen_a64_set_pc_im(pc); + assert(excp_is_internal(excp)); + gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp)); +} + +static void gen_exception_internal_insn(DisasContext *s, int excp) +{ + gen_a64_update_pc(s, 0); gen_exception_internal(excp); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) { - TCGv_i32 tcg_syn; - - gen_a64_set_pc_im(s->pc_curr); - tcg_syn = tcg_const_i32(syndrome); - gen_helper_exception_bkpt_insn(cpu_env, tcg_syn); - tcg_temp_free_i32(tcg_syn); + gen_a64_update_pc(s, 0); + gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome)); s->base.is_jmp = DISAS_NORETURN; } @@ -393,19 +382,30 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest) return translator_use_goto_tb(&s->base, dest); } -static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) +static void gen_goto_tb(DisasContext *s, int n, int64_t diff) { - if (use_goto_tb(s, dest)) { - tcg_gen_goto_tb(n); - gen_a64_set_pc_im(dest); + if (use_goto_tb(s, s->pc_curr + diff)) { + /* + * For pcrel, the pc must always be up-to-date on entry to + * the linked TB, so that it can use simple additions for all + * further adjustments. For !pcrel, the linked TB is compiled + * to know its full virtual address, so we can delay the + * update to pc to the unlinked path. A long chain of links + * can thus avoid many updates to the PC. + */ + if (TARGET_TB_PCREL) { + gen_a64_update_pc(s, diff); + tcg_gen_goto_tb(n); + } else { + tcg_gen_goto_tb(n); + gen_a64_update_pc(s, diff); + } tcg_gen_exit_tb(s->base.tb, n); s->base.is_jmp = DISAS_NORETURN; } else { - gen_a64_set_pc_im(dest); + gen_a64_update_pc(s, diff); if (s->ss_active) { gen_step_complete_exception(s); - } else if (s->base.singlestep_enabled) { - gen_exception_internal(EXCP_DEBUG); } else { tcg_gen_lookup_and_goto_ptr(); s->base.is_jmp = DISAS_NORETURN; @@ -829,15 +829,15 @@ static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { - TCGv_i64 result, cf_64, vf_64, tmp; - result = tcg_temp_new_i64(); - cf_64 = tcg_temp_new_i64(); - vf_64 = tcg_temp_new_i64(); - tmp = tcg_const_i64(0); + TCGv_i64 result = tcg_temp_new_i64(); + TCGv_i64 cf_64 = tcg_temp_new_i64(); + TCGv_i64 vf_64 = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 zero = tcg_constant_i64(0); tcg_gen_extu_i32_i64(cf_64, cpu_CF); - tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp); - tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp); + tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero); + tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero); tcg_gen_extrl_i64_i32(cpu_CF, cf_64); gen_set_NZ64(result); @@ -853,15 +853,15 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_temp_free_i64(cf_64); tcg_temp_free_i64(result); } else { - TCGv_i32 t0_32, t1_32, tmp; - t0_32 = tcg_temp_new_i32(); - t1_32 = tcg_temp_new_i32(); - tmp = tcg_const_i32(0); + TCGv_i32 t0_32 = tcg_temp_new_i32(); + TCGv_i32 t1_32 = tcg_temp_new_i32(); + TCGv_i32 tmp = tcg_temp_new_i32(); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_extrl_i64_i32(t0_32, t0); tcg_gen_extrl_i64_i32(t1_32, t1); - tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp); - tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp); + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero); + tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); @@ -975,7 +975,7 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx)); - mop = s->be_data | MO_Q; + mop = s->be_data | MO_UQ; tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), mop | (s->align_mem ? MO_ALIGN_16 : 0)); tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); @@ -1009,7 +1009,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) tmphi = tcg_temp_new_i64(); tcg_hiaddr = tcg_temp_new_i64(); - mop = s->be_data | MO_Q; + mop = s->be_data | MO_UQ; tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), mop | (s->align_mem ? MO_ALIGN_16 : 0)); tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); @@ -1045,7 +1045,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); - switch (memop) { + switch ((unsigned)memop) { case MO_8: tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off); break; @@ -1172,35 +1172,109 @@ static void do_vec_ld(DisasContext *s, int destidx, int element, * unallocated-encoding checks (otherwise the syndrome information * for the resulting exception will be incorrect). */ -static bool fp_access_check(DisasContext *s) +static bool fp_access_check_only(DisasContext *s) { if (s->fp_excp_el) { assert(!s->fp_access_checked); s->fp_access_checked = true; - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + gen_exception_insn_el(s, 0, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, false, 0), + s->fp_excp_el); return false; } s->fp_access_checked = true; return true; } -/* Check that SVE access is enabled. If it is, return true. +static bool fp_access_check(DisasContext *s) +{ + if (!fp_access_check_only(s)) { + return false; + } + if (s->sme_trap_nonstreaming && s->is_nonstreaming) { + gen_exception_insn(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_Streaming, false)); + return false; + } + return true; +} + +/* + * Check that SVE access is enabled. If it is, return true. * If not, emit code to generate an appropriate exception and return false. + * This function corresponds to CheckSVEEnabled(). */ bool sve_access_check(DisasContext *s) { - if (s->sve_excp_el) { - assert(!s->sve_access_checked); - s->sve_access_checked = true; - - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_sve_access_trap(), s->sve_excp_el); - return false; + if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { + assert(dc_isar_feature(aa64_sme, s)); + if (!sme_sm_enabled_check(s)) { + goto fail_exit; + } + } else if (s->sve_excp_el) { + gen_exception_insn_el(s, 0, EXCP_UDEF, + syn_sve_access_trap(), s->sve_excp_el); + goto fail_exit; } s->sve_access_checked = true; return fp_access_check(s); + + fail_exit: + /* Assert that we only raise one exception per instruction. */ + assert(!s->sve_access_checked); + s->sve_access_checked = true; + return false; +} + +/* + * Check that SME access is enabled, raise an exception if not. + * Note that this function corresponds to CheckSMEAccess and is + * only used directly for cpregs. + */ +static bool sme_access_check(DisasContext *s) +{ + if (s->sme_excp_el) { + gen_exception_insn_el(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_AccessTrap, false), + s->sme_excp_el); + return false; + } + return true; +} + +/* This function corresponds to CheckSMEEnabled. */ +bool sme_enabled_check(DisasContext *s) +{ + /* + * Note that unlike sve_excp_el, we have not constrained sme_excp_el + * to be zero when fp_excp_el has priority. This is because we need + * sme_excp_el by itself for cpregs access checks. + */ + if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { + s->fp_access_checked = true; + return sme_access_check(s); + } + return fp_access_check_only(s); +} + +/* Common subroutine for CheckSMEAnd*Enabled. */ +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) +{ + if (!sme_enabled_check(s)) { + return false; + } + if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { + gen_exception_insn(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_NotStreaming, false)); + return false; + } + if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { + gen_exception_insn(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_InactiveZA, false)); + return false; + } + return true; } /* @@ -1305,16 +1379,16 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, */ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) { - uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4; + int64_t diff = sextract32(insn, 0, 26) * 4; if (insn & (1U << 31)) { /* BL Branch with link */ - tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); } /* B Branch / BL Branch with link */ reset_btype(s); - gen_goto_tb(s, 0, addr); + gen_goto_tb(s, 0, diff); } /* Compare and branch (immediate) @@ -1326,25 +1400,24 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) static void disas_comp_b_imm(DisasContext *s, uint32_t insn) { unsigned int sf, op, rt; - uint64_t addr; - TCGLabel *label_match; + int64_t diff; + DisasLabel match; TCGv_i64 tcg_cmp; sf = extract32(insn, 31, 1); op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ rt = extract32(insn, 0, 5); - addr = s->pc_curr + sextract32(insn, 5, 19) * 4; + diff = sextract32(insn, 5, 19) * 4; tcg_cmp = read_cpu_reg(s, rt, sf); - label_match = gen_new_label(); - reset_btype(s); - tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, - tcg_cmp, 0, label_match); - gen_goto_tb(s, 0, s->base.pc_next); - gen_set_label(label_match); - gen_goto_tb(s, 1, addr); + match = gen_disas_label(s); + tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, + tcg_cmp, 0, match.label); + gen_goto_tb(s, 0, 4); + set_disas_label(s, match); + gen_goto_tb(s, 1, diff); } /* Test and branch (immediate) @@ -1356,26 +1429,27 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn) static void disas_test_b_imm(DisasContext *s, uint32_t insn) { unsigned int bit_pos, op, rt; - uint64_t addr; - TCGLabel *label_match; + int64_t diff; + DisasLabel match; TCGv_i64 tcg_cmp; bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ - addr = s->pc_curr + sextract32(insn, 5, 14) * 4; + diff = sextract32(insn, 5, 14) * 4; rt = extract32(insn, 0, 5); tcg_cmp = tcg_temp_new_i64(); tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); - label_match = gen_new_label(); reset_btype(s); + + match = gen_disas_label(s); tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, - tcg_cmp, 0, label_match); + tcg_cmp, 0, match.label); tcg_temp_free_i64(tcg_cmp); - gen_goto_tb(s, 0, s->base.pc_next); - gen_set_label(label_match); - gen_goto_tb(s, 1, addr); + gen_goto_tb(s, 0, 4); + set_disas_label(s, match); + gen_goto_tb(s, 1, diff); } /* Conditional branch (immediate) @@ -1387,26 +1461,26 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) static void disas_cond_b_imm(DisasContext *s, uint32_t insn) { unsigned int cond; - uint64_t addr; + int64_t diff; if ((insn & (1 << 4)) || (insn & (1 << 24))) { unallocated_encoding(s); return; } - addr = s->pc_curr + sextract32(insn, 5, 19) * 4; + diff = sextract32(insn, 5, 19) * 4; cond = extract32(insn, 0, 4); reset_btype(s); if (cond < 0x0e) { /* genuinely conditional branches */ - TCGLabel *label_match = gen_new_label(); - arm_gen_test_cc(cond, label_match); - gen_goto_tb(s, 0, s->base.pc_next); - gen_set_label(label_match); - gen_goto_tb(s, 1, addr); + DisasLabel match = gen_disas_label(s); + arm_gen_test_cc(cond, match.label); + gen_goto_tb(s, 0, 4); + set_disas_label(s, match); + gen_goto_tb(s, 1, diff); } else { /* 0xe and 0xf are both "always" conditions */ - gen_goto_tb(s, 0, addr); + gen_goto_tb(s, 0, diff); } } @@ -1444,6 +1518,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, break; case 0b00100: /* SEV */ case 0b00101: /* SEVL */ + case 0b00110: /* DGH */ /* we treat all as NOP at least for now */ break; case 0b00111: /* XPACLRI */ @@ -1471,6 +1546,23 @@ static void handle_hint(DisasContext *s, uint32_t insn, gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]); } break; + case 0b10000: /* ESB */ + /* Without RAS, we must implement this as NOP. */ + if (dc_isar_feature(aa64_ras, s)) { + /* + * QEMU does not have a source of physical SErrors, + * so we are only concerned with virtual SErrors. + * The pseudocode in the ARM for this case is + * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then + * AArch64.vESBOperation(); + * Most of the condition can be evaluated at translation time. + * Test for EL2 present, and defer test for SEL2 to runtime. + */ + if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { + gen_helper_vesb(cpu_env); + } + } + break; case 0b11000: /* PACIAZ */ if (s->pauth_active) { gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], @@ -1562,7 +1654,7 @@ static void handle_sync(DisasContext *s, uint32_t insn, * any pending interrupts immediately. */ reset_btype(s); - gen_goto_tb(s, 0, s->base.pc_next); + gen_goto_tb(s, 0, 4); return; case 7: /* SB */ @@ -1574,7 +1666,7 @@ static void handle_sync(DisasContext *s, uint32_t insn, * MB and end the TB instead. */ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); - gen_goto_tb(s, 0, s->base.pc_next); + gen_goto_tb(s, 0, 4); return; default: @@ -1630,7 +1722,6 @@ static void gen_axflag(void) static void handle_msr_i(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { - TCGv_i32 t1; int op = op1 << 3 | op2; /* End the TB by default, chaining is ok. */ @@ -1670,9 +1761,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn, } else { clear_pstate_bits(PSTATE_UAO); } - t1 = tcg_const_i32(s->current_el); - gen_helper_rebuild_hflags_a64(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_rebuild_hflags(s); break; case 0x04: /* PAN */ @@ -1684,18 +1773,14 @@ static void handle_msr_i(DisasContext *s, uint32_t insn, } else { clear_pstate_bits(PSTATE_PAN); } - t1 = tcg_const_i32(s->current_el); - gen_helper_rebuild_hflags_a64(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_rebuild_hflags(s); break; case 0x05: /* SPSel */ if (s->current_el == 0) { goto do_unallocated; } - t1 = tcg_const_i32(crm & PSTATE_SP); - gen_helper_msr_i_spsel(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP)); break; case 0x19: /* SSBS */ @@ -1723,15 +1808,11 @@ static void handle_msr_i(DisasContext *s, uint32_t insn, break; case 0x1e: /* DAIFSet */ - t1 = tcg_const_i32(crm); - gen_helper_msr_i_daifset(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm)); break; case 0x1f: /* DAIFClear */ - t1 = tcg_const_i32(crm); - gen_helper_msr_i_daifclear(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm)); /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */ s->base.is_jmp = DISAS_UPDATE_EXIT; break; @@ -1744,9 +1825,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn, } else { clear_pstate_bits(PSTATE_TCO); } - t1 = tcg_const_i32(s->current_el); - gen_helper_rebuild_hflags_a64(cpu_env, t1); - tcg_temp_free_i32(t1); + gen_rebuild_hflags(s); /* Many factors, including TCO, go into MTE_ACTIVE. */ s->base.is_jmp = DISAS_UPDATE_NOCHAIN; } else if (dc_isar_feature(aa64_mte_insn_reg, s)) { @@ -1757,6 +1836,30 @@ static void handle_msr_i(DisasContext *s, uint32_t insn, } break; + case 0x1b: /* SVCR* */ + if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) { + goto do_unallocated; + } + if (sme_access_check(s)) { + bool i = crm & 1; + bool changed = false; + + if ((crm & 2) && i != s->pstate_sm) { + gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i)); + changed = true; + } + if ((crm & 4) && i != s->pstate_za) { + gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i)); + changed = true; + } + if (changed) { + gen_rebuild_hflags(s); + } else { + s->base.is_jmp = DISAS_NEXT; + } + } + break; + default: do_unallocated: unallocated_encoding(s); @@ -1807,6 +1910,29 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt) tcg_temp_free_i32(nzcv); } +static void gen_sysreg_undef(DisasContext *s, bool isread, + uint8_t op0, uint8_t op1, uint8_t op2, + uint8_t crn, uint8_t crm, uint8_t rt) +{ + /* + * Generate code to emit an UNDEF with correct syndrome + * information for a failed system register access. + * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases, + * but if FEAT_IDST is implemented then read accesses to registers + * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP + * syndrome. + */ + uint32_t syndrome; + + if (isread && dc_isar_feature(aa64_ids, s) && + arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) { + syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); + } else { + syndrome = syn_uncategorized(); + } + gen_exception_insn(s, 0, EXCP_UDEF, syndrome); +} + /* MRS - move from system register * MSR (register) - move to system register * SYS @@ -1832,13 +1958,13 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 " "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n", isread ? "read" : "write", op0, op1, crn, crm, op2); - unallocated_encoding(s); + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); return; } /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { - unallocated_encoding(s); + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); return; } @@ -1846,29 +1972,26 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ - TCGv_ptr tmpptr; - TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; - gen_a64_set_pc_im(s->pc_curr); - tmpptr = tcg_const_ptr(ri); syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); - tcg_syn = tcg_const_i32(syndrome); - tcg_isread = tcg_const_i32(isread); - gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread); - tcg_temp_free_ptr(tmpptr); - tcg_temp_free_i32(tcg_syn); - tcg_temp_free_i32(tcg_isread); + gen_a64_update_pc(s, 0); + gen_helper_access_check_cp_reg(cpu_env, + tcg_constant_ptr(ri), + tcg_constant_i32(syndrome), + tcg_constant_i32(isread)); } else if (ri->type & ARM_CP_RAISES_EXC) { /* * The readfn or writefn might raise an exception; * synchronize the CPU state in case it does. */ - gen_a64_set_pc_im(s->pc_curr); + gen_a64_update_pc(s, 0); } /* Handle special cases first */ - switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + switch (ri->type & ARM_CP_SPECIAL_MASK) { + case 0: + break; case ARM_CP_NOP: return; case ARM_CP_NZCV: @@ -1889,17 +2012,15 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { - TCGv_i32 t_desc; int desc = 0; desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); - t_desc = tcg_const_i32(desc); tcg_rt = new_tmp_a64(s); - gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt)); - tcg_temp_free_i32(t_desc); + gen_helper_mte_check_zva(tcg_rt, cpu_env, + tcg_constant_i32(desc), cpu_reg(s, rt)); } else { tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); } @@ -1945,12 +2066,14 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } return; default: - break; + g_assert_not_reached(); } - if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { return; } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { return; + } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { + return; } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { @@ -1963,10 +2086,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, if (ri->type & ARM_CP_CONST) { tcg_gen_movi_i64(tcg_rt, ri->resetvalue); } else if (ri->readfn) { - TCGv_ptr tmpptr; - tmpptr = tcg_const_ptr(ri); - gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr); - tcg_temp_free_ptr(tmpptr); + gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_constant_ptr(ri)); } else { tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset); } @@ -1975,10 +2095,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, /* If not forbidden by access permissions, treat as WI */ return; } else if (ri->writefn) { - TCGv_ptr tmpptr; - tmpptr = tcg_const_ptr(ri); - gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt); - tcg_temp_free_ptr(tmpptr); + gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tcg_rt); } else { tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset); } @@ -1993,9 +2110,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, * A write to any coprocessor regiser that ends a TB * must rebuild the hflags for the next TB. */ - TCGv_i32 tcg_el = tcg_const_i32(s->current_el); - gen_helper_rebuild_hflags_a64(cpu_env, tcg_el); - tcg_temp_free_i32(tcg_el); + gen_rebuild_hflags(s); /* * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition @@ -2058,7 +2173,6 @@ static void disas_exc(DisasContext *s, uint32_t insn) int opc = extract32(insn, 21, 3); int op2_ll = extract32(insn, 0, 5); int imm16 = extract32(insn, 5, 16); - TCGv_i32 tmp; switch (opc) { case 0: @@ -2070,8 +2184,7 @@ static void disas_exc(DisasContext *s, uint32_t insn) switch (op2_ll) { case 1: /* SVC */ gen_ss_advance(s); - gen_exception_insn(s, s->base.pc_next, EXCP_SWI, - syn_aa64_svc(imm16), default_exception_el(s)); + gen_exception_insn(s, 4, EXCP_SWI, syn_aa64_svc(imm16)); break; case 2: /* HVC */ if (s->current_el == 0) { @@ -2081,24 +2194,20 @@ static void disas_exc(DisasContext *s, uint32_t insn) /* The pre HVC helper handles cases when HVC gets trapped * as an undefined insn by runtime configuration. */ - gen_a64_set_pc_im(s->pc_curr); + gen_a64_update_pc(s, 0); gen_helper_pre_hvc(cpu_env); gen_ss_advance(s); - gen_exception_insn(s, s->base.pc_next, EXCP_HVC, - syn_aa64_hvc(imm16), 2); + gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2); break; case 3: /* SMC */ if (s->current_el == 0) { unallocated_encoding(s); break; } - gen_a64_set_pc_im(s->pc_curr); - tmp = tcg_const_i32(syn_aa64_smc(imm16)); - gen_helper_pre_smc(cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_a64_update_pc(s, 0); + gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16))); gen_ss_advance(s); - gen_exception_insn(s, s->base.pc_next, EXCP_SMC, - syn_aa64_smc(imm16), 3); + gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3); break; default: unallocated_encoding(s); @@ -2124,20 +2233,10 @@ static void disas_exc(DisasContext *s, uint32_t insn) * it is required for halting debug disabled: it will UNDEF. * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction. */ - if (semihosting_enabled() && imm16 == 0xf000) { -#ifndef CONFIG_USER_ONLY - /* In system mode, don't allow userspace access to semihosting, - * to provide some semblance of security (and for consistency - * with our 32-bit semihosting). - */ - if (s->current_el == 0) { - unsupported_encoding(s, insn); - break; - } -#endif - gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) { + gen_exception_internal_insn(s, EXCP_SEMIHOST); } else { - unsupported_encoding(s, insn); + unallocated_encoding(s); } break; case 5: @@ -2146,7 +2245,7 @@ static void disas_exc(DisasContext *s, uint32_t insn) break; } /* DCPS1, DCPS2, DCPS3 */ - unsupported_encoding(s, insn); + unallocated_encoding(s); break; default: unallocated_encoding(s); @@ -2225,11 +2324,17 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) default: goto do_unallocated; } - gen_a64_set_pc(s, dst); /* BLR also needs to load return address */ if (opc == 1) { - tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + TCGv_i64 lr = cpu_reg(s, 30); + if (dst == lr) { + TCGv_i64 tmp = new_tmp_a64(s); + tcg_gen_mov_i64(tmp, dst); + dst = tmp; + } + gen_pc_plus_diff(s, lr, curr_insn_len(s)); } + gen_a64_set_pc(s, dst); break; case 8: /* BRAA */ @@ -2252,11 +2357,17 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) } else { dst = cpu_reg(s, rn); } - gen_a64_set_pc(s, dst); /* BLRAA also needs to load return address */ if (opc == 9) { - tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + TCGv_i64 lr = cpu_reg(s, 30); + if (dst == lr) { + TCGv_i64 tmp = new_tmp_a64(s); + tcg_gen_mov_i64(tmp, dst); + dst = tmp; + } + gen_pc_plus_diff(s, lr, curr_insn_len(s)); } + gen_a64_set_pc(s, dst); break; case 4: /* ERET */ @@ -2311,7 +2422,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) if (op3 != 0 || op4 != 0 || rn != 0x1f) { goto do_unallocated; } else { - unsupported_encoding(s, insn); + unallocated_encoding(s); } return; @@ -2472,7 +2583,12 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { if (!HAVE_CMPXCHG128) { gen_helper_exit_atomic(cpu_env); - s->base.is_jmp = DISAS_NORETURN; + /* + * Produce a result so we have a well-formed opcode + * stream when the following (dead) code uses 'tmp'. + * TCG will remove the dead ops for us. + */ + tcg_gen_movi_i64(tmp, 0); } else if (s->be_data == MO_LE) { gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env, cpu_exclusive_addr, @@ -2564,7 +2680,7 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_temp_free_i64(cmp); } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { - TCGv_i32 tcg_rs = tcg_const_i32(rs); + TCGv_i32 tcg_rs = tcg_constant_i32(rs); if (s->be_data == MO_LE) { gen_helper_casp_le_parallel(cpu_env, tcg_rs, clean_addr, t1, t2); @@ -2572,7 +2688,6 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, gen_helper_casp_be_parallel(cpu_env, tcg_rs, clean_addr, t1, t2); } - tcg_temp_free_i32(tcg_rs); } else { gen_helper_exit_atomic(cpu_env); s->base.is_jmp = DISAS_NORETURN; @@ -2583,7 +2698,7 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, TCGv_i64 a2 = tcg_temp_new_i64(); TCGv_i64 c1 = tcg_temp_new_i64(); TCGv_i64 c2 = tcg_temp_new_i64(); - TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 zero = tcg_constant_i64(0); /* Load the two words, in memory order. */ tcg_gen_qemu_ld_i64(d1, clean_addr, memidx, @@ -2604,7 +2719,6 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_temp_free_i64(a2); tcg_temp_free_i64(c1); tcg_temp_free_i64(c2); - tcg_temp_free_i64(zero); /* Write back the data from memory to Rs. */ tcg_gen_mov_i64(s1, d1); @@ -2821,7 +2935,8 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) tcg_rt = cpu_reg(s, rt); - clean_addr = tcg_const_i64(s->pc_curr + imm); + clean_addr = new_tmp_a64(s); + gen_pc_plus_diff(s, clean_addr, imm); if (is_vector) { do_fp_ld(s, rt, clean_addr, size); } else { @@ -2831,7 +2946,6 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, false, true, rt, iss_sf, false); } - tcg_temp_free_i64(clean_addr); } /* @@ -3041,7 +3155,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); - bool iss_valid = !is_vector; + bool iss_valid; bool post_index; bool writeback; int memidx; @@ -3094,6 +3208,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, g_assert_not_reached(); } + iss_valid = !is_vector && !writeback; + if (rn == 31) { gen_check_sp_alignment(s); } @@ -3420,8 +3536,22 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, */ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); - if ((mop & MO_SIGN) && size != MO_64) { - tcg_gen_ext32u_i64(tcg_rt, tcg_rt); + if (mop & MO_SIGN) { + switch (size) { + case MO_8: + tcg_gen_ext8u_i64(tcg_rt, tcg_rt); + break; + case MO_16: + tcg_gen_ext16u_i64(tcg_rt, tcg_rt); + break; + case MO_32: + tcg_gen_ext32u_i64(tcg_rt, tcg_rt); + break; + case MO_64: + break; + default: + g_assert_not_reached(); + } } } @@ -3737,7 +3867,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) mop = endian | size | align; elements = (is_q ? 16 : 8) >> size; - tcg_ebytes = tcg_const_i64(1 << size); + tcg_ebytes = tcg_constant_i64(1 << size); for (r = 0; r < rpt; r++) { int e; for (e = 0; e < elements; e++) { @@ -3753,7 +3883,6 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) } } } - tcg_temp_free_i64(tcg_ebytes); if (!is_store) { /* For non-quad operations, setting a slice of the low @@ -3883,7 +4012,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) total); mop = finalize_memop(s, scale); - tcg_ebytes = tcg_const_i64(1 << scale); + tcg_ebytes = tcg_constant_i64(1 << scale); for (xs = 0; xs < selem; xs++) { if (replicate) { /* Load and replicate to all elements */ @@ -3905,7 +4034,6 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); rt = (rt + 1) % 32; } - tcg_temp_free_i64(tcg_ebytes); if (is_postidx) { if (rm == 31) { @@ -4062,9 +4190,13 @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn) if (s->ata) { gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); } else { + /* + * Tag access disabled: we must check for aborts on the load + * load from [rn+offset], and then insert a 0 tag into rt. + */ clean_addr = clean_data_tbi(s, addr); gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8); - gen_address_with_allocation_tag0(tcg_rt, addr); + gen_address_with_allocation_tag0(tcg_rt, tcg_rt); } } else { tcg_rt = cpu_reg_sp(s, rt); @@ -4096,17 +4228,16 @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn) if (is_zero) { TCGv_i64 clean_addr = clean_data_tbi(s, addr); - TCGv_i64 tcg_zero = tcg_const_i64(0); + TCGv_i64 tcg_zero = tcg_constant_i64(0); int mem_index = get_mem_index(s); int i, n = (1 + is_pair) << LOG2_TAG_GRANULE; tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, - MO_Q | MO_ALIGN_16); + MO_UQ | MO_ALIGN_16); for (i = 8; i < n; i += 8) { tcg_gen_addi_i64(clean_addr, clean_addr, 8); - tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q); + tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ); } - tcg_temp_free_i64(tcg_zero); } if (index != 0) { @@ -4167,23 +4298,22 @@ static void disas_ldst(DisasContext *s, uint32_t insn) static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) { unsigned int page, rd; - uint64_t base; - uint64_t offset; + int64_t offset; page = extract32(insn, 31, 1); /* SignExtend(immhi:immlo) -> offset */ offset = sextract64(insn, 5, 19); offset = offset << 2 | extract32(insn, 29, 2); rd = extract32(insn, 0, 5); - base = s->pc_curr; if (page) { /* ADRP (page based) */ - base &= ~0xfff; offset <<= 12; + /* The page offset is ok for TARGET_TB_PCREL. */ + offset -= s->pc_curr & 0xfff; } - tcg_gen_movi_i64(cpu_reg(s, rd), base + offset); + gen_pc_plus_diff(s, cpu_reg(s, rd), offset); } /* @@ -4225,13 +4355,12 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) tcg_gen_addi_i64(tcg_result, tcg_rn, imm); } } else { - TCGv_i64 tcg_imm = tcg_const_i64(imm); + TCGv_i64 tcg_imm = tcg_constant_i64(imm); if (sub_op) { gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); } else { gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); } - tcg_temp_free_i64(tcg_imm); } if (is_64bit) { @@ -4279,12 +4408,9 @@ static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn) tcg_rd = cpu_reg_sp(s, rd); if (s->ata) { - TCGv_i32 offset = tcg_const_i32(imm); - TCGv_i32 tag_offset = tcg_const_i32(uimm4); - - gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, offset, tag_offset); - tcg_temp_free_i32(tag_offset); - tcg_temp_free_i32(offset); + gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, + tcg_constant_i32(imm), + tcg_constant_i32(uimm4)); } else { tcg_gen_addi_i64(tcg_rd, tcg_rn, imm); gen_address_with_allocation_tag0(tcg_rd, tcg_rd); @@ -4470,7 +4596,6 @@ static void disas_movw_imm(DisasContext *s, uint32_t insn) int opc = extract32(insn, 29, 2); int pos = extract32(insn, 21, 2) << 4; TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_imm; if (!sf && (pos >= 32)) { unallocated_encoding(s); @@ -4490,9 +4615,7 @@ static void disas_movw_imm(DisasContext *s, uint32_t insn) tcg_gen_movi_i64(tcg_rd, imm); break; case 3: /* MOVK */ - tcg_imm = tcg_const_i64(imm); - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16); - tcg_temp_free_i64(tcg_imm); + tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16); if (!sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } @@ -4732,11 +4855,7 @@ static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf, if (shift_i == 0) { tcg_gen_mov_i64(dst, src); } else { - TCGv_i64 shift_const; - - shift_const = tcg_const_i64(shift_i); - shift_reg(dst, src, sf, shift_type, shift_const); - tcg_temp_free_i64(shift_const); + shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i)); } } @@ -5313,7 +5432,7 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) tcg_rd = cpu_reg(s, rd); a64_test_cc(&c, cond); - zero = tcg_const_i64(0); + zero = tcg_constant_i64(0); if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { /* CSET & CSETM. */ @@ -5334,7 +5453,6 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); } - tcg_temp_free_i64(zero); a64_free_cc(&c); if (!sf) { @@ -5431,7 +5549,7 @@ static void handle_rev16(DisasContext *s, unsigned int sf, TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_tmp = tcg_temp_new_i64(); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); - TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); + TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8); tcg_gen_and_i64(tcg_rd, tcg_rn, mask); @@ -5439,7 +5557,6 @@ static void handle_rev16(DisasContext *s, unsigned int sf, tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); - tcg_temp_free_i64(mask); tcg_temp_free_i64(tcg_tmp); } @@ -5722,15 +5839,13 @@ static void handle_crc32(DisasContext *s, } tcg_acc = cpu_reg(s, rn); - tcg_bytes = tcg_const_i32(1 << sz); + tcg_bytes = tcg_constant_i32(1 << sz); if (crc32c) { gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); } else { gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); } - - tcg_temp_free_i32(tcg_bytes); } /* Data-processing (2 source) @@ -5796,15 +5911,13 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { goto do_unallocated; } else { - TCGv_i64 t1 = tcg_const_i64(1); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_new_i64(); - tcg_gen_extract_i64(t2, cpu_reg_sp(s, rn), 56, 4); - tcg_gen_shl_i64(t1, t1, t2); - tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t1); + tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); + tcg_gen_shl_i64(t, tcg_constant_i64(1), t); + tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); + tcg_temp_free_i64(t); } break; case 8: /* LSLV */ @@ -5939,7 +6052,7 @@ static void handle_fp_compare(DisasContext *s, int size, tcg_vn = read_fp_dreg(s, rn); if (cmp_with_zero) { - tcg_vm = tcg_const_i64(0); + tcg_vm = tcg_constant_i64(0); } else { tcg_vm = read_fp_dreg(s, rm); } @@ -6049,7 +6162,6 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn) static void disas_fp_ccomp(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, cond, rn, op, nzcv; - TCGv_i64 tcg_flags; TCGLabel *label_continue = NULL; int size; @@ -6093,9 +6205,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) label_continue = gen_new_label(); arm_gen_test_cc(cond, label_match); /* nomatch: */ - tcg_flags = tcg_const_i64(nzcv << 28); - gen_set_nzcv(tcg_flags); - tcg_temp_free_i64(tcg_flags); + gen_set_nzcv(tcg_constant_i64(nzcv << 28)); tcg_gen_br(label_continue); gen_set_label(label_match); } @@ -6116,7 +6226,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) static void disas_fp_csel(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, cond, rn, rd; - TCGv_i64 t_true, t_false, t_zero; + TCGv_i64 t_true, t_false; DisasCompare64 c; MemOp sz; @@ -6161,9 +6271,8 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) read_vec_element(s, t_false, rm, 0, sz); a64_test_cc(&c, cond); - t_zero = tcg_const_i64(0); - tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false); - tcg_temp_free_i64(t_zero); + tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0), + t_true, t_false); tcg_temp_free_i64(t_false); a64_free_cc(&c); @@ -6219,7 +6328,7 @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); break; default: - abort(); + g_assert_not_reached(); } write_fp_sreg(s, rd, tcg_res); @@ -6460,7 +6569,7 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, break; } default: - abort(); + g_assert_not_reached(); } } @@ -6945,7 +7054,6 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) int type = extract32(insn, 22, 2); int mos = extract32(insn, 29, 3); uint64_t imm; - TCGv_i64 tcg_res; MemOp sz; if (mos || imm5) { @@ -6976,10 +7084,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) } imm = vfp_expand_imm(sz, imm8); - - tcg_res = tcg_const_i64(imm); - write_fp_dreg(s, rd, tcg_res); - tcg_temp_free_i64(tcg_res); + write_fp_dreg(s, rd, tcg_constant_i64(imm)); } /* Handle floating point <=> fixed point conversions. Note that we can @@ -6997,7 +7102,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_shift = tcg_const_i32(64 - scale); + tcg_shift = tcg_constant_i32(64 - scale); if (itof) { TCGv_i64 tcg_int = cpu_reg(s, rn); @@ -7156,7 +7261,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, } tcg_temp_free_ptr(tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); } /* Floating point <-> fixed point conversions @@ -8427,7 +8531,7 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, /* Deal with the rounding step */ if (round) { if (extended_result) { - TCGv_i64 tcg_zero = tcg_const_i64(0); + TCGv_i64 tcg_zero = tcg_constant_i64(0); if (!is_u) { /* take care of sign extending tcg_res */ tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63); @@ -8439,7 +8543,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, tcg_src, tcg_zero, tcg_rnd, tcg_zero); } - tcg_temp_free_i64(tcg_zero); } else { tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd); } @@ -8525,8 +8628,7 @@ static void handle_scalar_simd_shri(DisasContext *s, } if (round) { - uint64_t round_const = 1ULL << (shift - 1); - tcg_round = tcg_const_i64(round_const); + tcg_round = tcg_constant_i64(1ULL << (shift - 1)); } else { tcg_round = NULL; } @@ -8552,9 +8654,6 @@ static void handle_scalar_simd_shri(DisasContext *s, tcg_temp_free_i64(tcg_rn); tcg_temp_free_i64(tcg_rd); - if (round) { - tcg_temp_free_i64(tcg_round); - } } /* SHL/SLI - Scalar shift left */ @@ -8652,8 +8751,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, tcg_final = tcg_const_i64(0); if (round) { - uint64_t round_const = 1ULL << (shift - 1); - tcg_round = tcg_const_i64(round_const); + tcg_round = tcg_constant_i64(1ULL << (shift - 1)); } else { tcg_round = NULL; } @@ -8673,9 +8771,6 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, write_vec_element(s, tcg_final, rd, 1, MO_64); } - if (round) { - tcg_temp_free_i64(tcg_round); - } tcg_temp_free_i64(tcg_rn); tcg_temp_free_i64(tcg_rd); tcg_temp_free_i32(tcg_rd_narrowed); @@ -8727,7 +8822,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } if (size == 3) { - TCGv_i64 tcg_shift = tcg_const_i64(shift); + TCGv_i64 tcg_shift = tcg_constant_i64(shift); static NeonGenTwo64OpEnvFn * const fns[2][2] = { { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, { NULL, gen_helper_neon_qshl_u64 }, @@ -8744,10 +8839,9 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, tcg_temp_free_i64(tcg_op); } - tcg_temp_free_i64(tcg_shift); clear_vec_high(s, is_q, rd); } else { - TCGv_i32 tcg_shift = tcg_const_i32(shift); + TCGv_i32 tcg_shift = tcg_constant_i32(shift); static NeonGenTwoOpEnvFn * const fns[2][2][3] = { { { gen_helper_neon_qshl_s8, @@ -8792,7 +8886,6 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, tcg_temp_free_i32(tcg_op); } - tcg_temp_free_i32(tcg_shift); if (!scalar) { clear_vec_high(s, is_q, rd); @@ -8812,7 +8905,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, int pass; if (fracbits || size == MO_64) { - tcg_shift = tcg_const_i32(fracbits); + tcg_shift = tcg_constant_i32(fracbits); } if (size == MO_64) { @@ -8897,9 +8990,6 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, } tcg_temp_free_ptr(tcg_fpst); - if (tcg_shift) { - tcg_temp_free_i32(tcg_shift); - } clear_vec_high(s, elements << size == 16, rd); } @@ -8989,7 +9079,7 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); fracbits = (16 << size) - immhb; - tcg_shift = tcg_const_i32(fracbits); + tcg_shift = tcg_constant_i32(fracbits); if (size == MO_64) { int maxpass = is_scalar ? 1 : 2; @@ -9047,9 +9137,8 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, } } - tcg_temp_free_ptr(tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + tcg_temp_free_ptr(tcg_fpstatus); tcg_temp_free_i32(tcg_rmode); } @@ -9919,23 +10008,15 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u, case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); - gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); break; - } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); - gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); break; - } case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ @@ -9975,7 +10056,7 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, if (is_double) { TCGv_i64 tcg_op = tcg_temp_new_i64(); - TCGv_i64 tcg_zero = tcg_const_i64(0); + TCGv_i64 tcg_zero = tcg_constant_i64(0); TCGv_i64 tcg_res = tcg_temp_new_i64(); NeonGenTwoDoubleOpFn *genfn; bool swap = false; @@ -10011,13 +10092,12 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, write_vec_element(s, tcg_res, rd, pass, MO_64); } tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_zero); tcg_temp_free_i64(tcg_op); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(); - TCGv_i32 tcg_zero = tcg_const_i32(0); + TCGv_i32 tcg_zero = tcg_constant_i32(0); TCGv_i32 tcg_res = tcg_temp_new_i32(); NeonGenTwoSingleOpFn *genfn; bool swap = false; @@ -10086,7 +10166,6 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, } } tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_zero); tcg_temp_free_i32(tcg_op); if (!is_scalar) { clear_vec_high(s, is_q, rd); @@ -10187,7 +10266,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, int passes = scalar ? 1 : 2; if (scalar) { - tcg_res[1] = tcg_const_i32(0); + tcg_res[1] = tcg_constant_i32(0); } for (pass = 0; pass < passes; pass++) { @@ -10365,9 +10444,7 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } if (is_scalar) { - TCGv_i64 tcg_zero = tcg_const_i64(0); - write_vec_element(s, tcg_zero, rd, 0, MO_64); - tcg_temp_free_i64(tcg_zero); + write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64); } write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } @@ -10550,23 +10627,17 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); - gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0), + tcg_fpstatus); break; - } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); - gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0), + tcg_fpstatus); break; - } default: g_assert_not_reached(); } @@ -10738,8 +10809,7 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); if (round) { - uint64_t round_const = 1ULL << (shift - 1); - tcg_round = tcg_const_i64(round_const); + tcg_round = tcg_constant_i64(1ULL << (shift - 1)); } else { tcg_round = NULL; } @@ -10757,9 +10827,6 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - if (round) { - tcg_temp_free_i64(tcg_round); - } tcg_temp_free_i64(tcg_rn); tcg_temp_free_i64(tcg_rd); tcg_temp_free_i64(tcg_final); @@ -12463,7 +12530,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, } } if (!is_q) { - tcg_res[1] = tcg_const_i64(0); + tcg_res[1] = tcg_constant_i64(0); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); @@ -12896,25 +12963,17 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); gen_helper_vfp_tosls(tcg_res, tcg_op, - tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + tcg_constant_i32(0), tcg_fpstatus); break; - } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ - { - TCGv_i32 tcg_shift = tcg_const_i32(0); gen_helper_vfp_touls(tcg_res, tcg_op, - tcg_shift, tcg_fpstatus); - tcg_temp_free_i32(tcg_shift); + tcg_constant_i32(0), tcg_fpstatus); break; - } case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ @@ -14012,7 +14071,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } if (is_scalar) { - tcg_res[1] = tcg_const_i64(0); + tcg_res[1] = tcg_constant_i64(0); } for (pass = 0; pass < 2; pass++) { @@ -14416,7 +14475,7 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) tcg_op2 = tcg_temp_new_i32(); tcg_op3 = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32(); - tcg_zero = tcg_const_i32(0); + tcg_zero = tcg_constant_i32(0); read_vec_element_i32(s, tcg_op1, rn, 3, MO_32); read_vec_element_i32(s, tcg_op2, rm, 3, MO_32); @@ -14436,7 +14495,6 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_op2); tcg_temp_free_i32(tcg_op3); tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_zero); } } @@ -14565,6 +14623,23 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) } } +/* + * Include the generated SME FA64 decoder. + */ + +#include "decode-sme-fa64.c.inc" + +static bool trans_OK(DisasContext *s, arg_OK *a) +{ + return true; +} + +static bool trans_FAIL(DisasContext *s, arg_OK *a) +{ + s->is_nonstreaming = true; + return true; +} + /** * is_guarded_page: * @env: The cpu environment @@ -14578,22 +14653,21 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) #ifdef CONFIG_USER_ONLY return page_get_flags(addr) & PAGE_BTI; #else + CPUTLBEntryFull *full; + void *host; int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); - unsigned int index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + int flags; /* * We test this immediately after reading an insn, which means - * that any normal page must be in the TLB. The only exception - * would be for executing from flash or device memory, which - * does not retain the TLB entry. - * - * FIXME: Assume false for those, for now. We could use - * arm_cpu_get_phys_page_attrs_debug to re-read the page - * table entry even for that case. + * that the TLB entry must be present and valid, and thus this + * access will never raise an exception. */ - return (tlb_hit(entry->addr_code, addr) && - arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs)); + flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx, + false, &host, &full, 0); + assert(!(flags & TLB_INVALID_MASK)); + + return full->guarded; #endif } @@ -14649,19 +14723,165 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) return false; } -/* C3.1 A64 instruction index by encoding */ -static void disas_a64_insn(CPUARMState *env, DisasContext *s) +static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, + CPUState *cpu) { + DisasContext *dc = container_of(dcbase, DisasContext, base); + CPUARMState *env = cpu->env_ptr; + ARMCPU *arm_cpu = env_archcpu(env); + CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); + int bound, core_mmu_idx; + + dc->isar = &arm_cpu->isar; + dc->condjmp = 0; + dc->pc_save = dc->base.pc_first; + dc->aarch64 = true; + dc->thumb = false; + dc->sctlr_b = 0; + dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; + dc->condexec_mask = 0; + dc->condexec_cond = 0; + core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); + dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); + dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); + dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); + dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); +#if !defined(CONFIG_USER_ONLY) + dc->user = (dc->current_el == 0); +#endif + dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); + dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); + dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); + dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); + dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); + dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; + dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16; + dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); + dc->bt = EX_TBFLAG_A64(tb_flags, BT); + dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); + dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); + dc->ata = EX_TBFLAG_A64(tb_flags, ATA); + dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); + dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); + dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); + dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); + dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); + dc->vec_len = 0; + dc->vec_stride = 0; + dc->cp_regs = arm_cpu->cp_regs; + dc->features = env->features; + dc->dcz_blocksize = arm_cpu->dcz_blocksize; + +#ifdef CONFIG_USER_ONLY + /* In sve_probe_page, we assume TBI is enabled. */ + tcg_debug_assert(dc->tbid & 1); +#endif + + /* Single step state. The code-generation logic here is: + * SS_ACTIVE == 0: + * generate code with no special handling for single-stepping (except + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; + * this happens anyway because those changes are all system register or + * PSTATE writes). + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) + * emit code for one insn + * emit code to clear PSTATE.SS + * emit code to generate software step exception for completed step + * end TB (as usual for having generated an exception) + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) + * emit code to generate a software step exception + * end the TB + */ + dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); + dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); + dc->is_ldex = false; + + /* Bound the number of insns to execute to those left on the page. */ + bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; + + /* If architectural single step active, limit to 1. */ + if (dc->ss_active) { + bound = 1; + } + dc->base.max_insns = MIN(dc->base.max_insns, bound); + + init_tmp_a64_array(dc); +} + +static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} + +static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + target_ulong pc_arg = dc->base.pc_next; + + if (TARGET_TB_PCREL) { + pc_arg &= ~TARGET_PAGE_MASK; + } + tcg_gen_insn_start(pc_arg, 0, 0); + dc->insn_start = tcg_last_op(); +} + +static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *s = container_of(dcbase, DisasContext, base); + CPUARMState *env = cpu->env_ptr; + uint64_t pc = s->base.pc_next; uint32_t insn; - s->pc_curr = s->base.pc_next; - insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); + /* Singlestep exceptions have the highest priority. */ + if (s->ss_active && !s->pstate_ss) { + /* Singlestep state is Active-pending. + * If we're in this state at the start of a TB then either + * a) we just took an exception to an EL which is being debugged + * and this is the first insn in the exception handler + * b) debug exceptions were masked and we just unmasked them + * without changing EL (eg by clearing PSTATE.D) + * In either case we're going to take a swstep exception in the + * "did not step an insn" case, and so the syndrome ISV and EX + * bits should be zero. + */ + assert(s->base.num_insns == 1); + gen_swstep_exception(s, 0, 0); + s->base.is_jmp = DISAS_NORETURN; + s->base.pc_next = pc + 4; + return; + } + + if (pc & 3) { + /* + * PC alignment fault. This has priority over the instruction abort + * that we would receive from a translation fault via arm_ldl_code. + * This should only be possible after an indirect branch, at the + * start of the TB. + */ + assert(s->base.num_insns == 1); + gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc)); + s->base.is_jmp = DISAS_NORETURN; + s->base.pc_next = QEMU_ALIGN_UP(pc, 4); + return; + } + + s->pc_curr = pc; + insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b); s->insn = insn; - s->base.pc_next += 4; + s->base.pc_next = pc + 4; s->fp_access_checked = false; s->sve_access_checked = false; + if (s->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate()); + return; + } + if (dc_isar_feature(aa64_bti, s)) { if (s->base.num_insns == 1) { /* @@ -14689,9 +14909,7 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) if (s->btype != 0 && s->guarded_page && !btype_destination_ok(insn, s->bt, s->btype)) { - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_btitrap(s->btype), - default_exception_el(s)); + gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype)); return; } } else { @@ -14700,12 +14918,22 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) } } + s->is_nonstreaming = false; + if (s->sme_trap_nonstreaming) { + disas_sme_fa64(s, insn); + } + switch (extract32(insn, 25, 4)) { - case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ + case 0x0: + if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { + unallocated_encoding(s); + } + break; + case 0x1: case 0x3: /* UNALLOCATED */ unallocated_encoding(s); break; case 0x2: - if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { + if (!disas_sve(s, insn)) { unallocated_encoding(s); } break; @@ -14744,137 +14972,15 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { reset_btype(s); } -} -static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, - CPUState *cpu) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUARMState *env = cpu->env_ptr; - ARMCPU *arm_cpu = env_archcpu(env); - CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); - int bound, core_mmu_idx; - - dc->isar = &arm_cpu->isar; - dc->condjmp = 0; - - dc->aarch64 = 1; - /* If we are coming from secure EL0 in a system with a 32-bit EL3, then - * there is no secure EL1, so we route exceptions to EL3. - */ - dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3); - dc->thumb = 0; - dc->sctlr_b = 0; - dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; - dc->condexec_mask = 0; - dc->condexec_cond = 0; - core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); - dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); - dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); - dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); - dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); - dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); -#if !defined(CONFIG_USER_ONLY) - dc->user = (dc->current_el == 0); -#endif - dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); - dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); - dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); - dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16; - dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); - dc->bt = EX_TBFLAG_A64(tb_flags, BT); - dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); - dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); - dc->ata = EX_TBFLAG_A64(tb_flags, ATA); - dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); - dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); - dc->vec_len = 0; - dc->vec_stride = 0; - dc->cp_regs = arm_cpu->cp_regs; - dc->features = env->features; - dc->dcz_blocksize = arm_cpu->dcz_blocksize; - -#ifdef CONFIG_USER_ONLY - /* In sve_probe_page, we assume TBI is enabled. */ - tcg_debug_assert(dc->tbid & 1); -#endif - - /* Single step state. The code-generation logic here is: - * SS_ACTIVE == 0: - * generate code with no special handling for single-stepping (except - * that anything that can make us go to SS_ACTIVE == 1 must end the TB; - * this happens anyway because those changes are all system register or - * PSTATE writes). - * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) - * emit code for one insn - * emit code to clear PSTATE.SS - * emit code to generate software step exception for completed step - * end TB (as usual for having generated an exception) - * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) - * emit code to generate a software step exception - * end the TB - */ - dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); - dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); - dc->is_ldex = false; - dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL); - - /* Bound the number of insns to execute to those left on the page. */ - bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; - - /* If architectural single step active, limit to 1. */ - if (dc->ss_active) { - bound = 1; - } - dc->base.max_insns = MIN(dc->base.max_insns, bound); - - init_tmp_a64_array(dc); -} - -static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) -{ -} - -static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - - tcg_gen_insn_start(dc->base.pc_next, 0, 0); - dc->insn_start = tcg_last_op(); -} - -static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUARMState *env = cpu->env_ptr; - - if (dc->ss_active && !dc->pstate_ss) { - /* Singlestep state is Active-pending. - * If we're in this state at the start of a TB then either - * a) we just took an exception to an EL which is being debugged - * and this is the first insn in the exception handler - * b) debug exceptions were masked and we just unmasked them - * without changing EL (eg by clearing PSTATE.D) - * In either case we're going to take a swstep exception in the - * "did not step an insn" case, and so the syndrome ISV and EX - * bits should be zero. - */ - assert(dc->base.num_insns == 1); - gen_swstep_exception(dc, 0, 0); - dc->base.is_jmp = DISAS_NORETURN; - } else { - disas_a64_insn(env, dc); - } - - translator_loop_temp_check(&dc->base); + translator_loop_temp_check(&s->base); } static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) { + if (unlikely(dc->ss_active)) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception @@ -14882,15 +14988,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) */ switch (dc->base.is_jmp) { default: - gen_a64_set_pc_im(dc->base.pc_next); + gen_a64_update_pc(dc, 4); /* fall through */ case DISAS_EXIT: case DISAS_JUMP: - if (dc->base.singlestep_enabled) { - gen_exception_internal(EXCP_DEBUG); - } else { - gen_step_complete_exception(dc); - } + gen_step_complete_exception(dc); break; case DISAS_NORETURN: break; @@ -14899,17 +15001,17 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) switch (dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: - gen_goto_tb(dc, 1, dc->base.pc_next); + gen_goto_tb(dc, 1, 4); break; default: case DISAS_UPDATE_EXIT: - gen_a64_set_pc_im(dc->base.pc_next); + gen_a64_update_pc(dc, 4); /* fall through */ case DISAS_EXIT: tcg_gen_exit_tb(NULL, 0); break; case DISAS_UPDATE_NOCHAIN: - gen_a64_set_pc_im(dc->base.pc_next); + gen_a64_update_pc(dc, 4); /* fall through */ case DISAS_JUMP: tcg_gen_lookup_and_goto_ptr(); @@ -14918,40 +15020,37 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_SWI: break; case DISAS_WFE: - gen_a64_set_pc_im(dc->base.pc_next); + gen_a64_update_pc(dc, 4); gen_helper_wfe(cpu_env); break; case DISAS_YIELD: - gen_a64_set_pc_im(dc->base.pc_next); + gen_a64_update_pc(dc, 4); gen_helper_yield(cpu_env); break; case DISAS_WFI: - { - /* This is a special case because we don't want to just halt the CPU - * if trying to debug across a WFI. + /* + * This is a special case because we don't want to just halt + * the CPU if trying to debug across a WFI. */ - TCGv_i32 tmp = tcg_const_i32(4); - - gen_a64_set_pc_im(dc->base.pc_next); - gen_helper_wfi(cpu_env, tmp); - tcg_temp_free_i32(tmp); - /* The helper doesn't necessarily throw an exception, but we + gen_a64_update_pc(dc, 4); + gen_helper_wfi(cpu_env, tcg_constant_i32(4)); + /* + * The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(NULL, 0); break; } - } } } static void aarch64_tr_disas_log(const DisasContextBase *dcbase, - CPUState *cpu) + CPUState *cpu, FILE *logfile) { DisasContext *dc = container_of(dcbase, DisasContext, base); - qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); - log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); + target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); } const TranslatorOps aarch64_translator_ops = { diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index 58f50abca4..ad3762d1ac 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -18,15 +18,6 @@ #ifndef TARGET_ARM_TRANSLATE_A64_H #define TARGET_ARM_TRANSLATE_A64_H -#define unsupported_encoding(s, insn) \ - do { \ - qemu_log_mask(LOG_UNIMP, \ - "%s:%d: unsupported instruction encoding 0x%08x " \ - "at pc=%016" PRIx64 "\n", \ - __FILE__, __LINE__, insn, s->pc_curr); \ - unallocated_encoding(s); \ - } while (0) - TCGv_i64 new_tmp_a64(DisasContext *s); TCGv_i64 new_tmp_a64_local(DisasContext *s); TCGv_i64 new_tmp_a64_zero(DisasContext *s); @@ -38,6 +29,27 @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, unsigned int imms, unsigned int immr); bool sve_access_check(DisasContext *s); +bool sme_enabled_check(DisasContext *s); +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); + +/* This function corresponds to CheckStreamingSVEEnabled. */ +static inline bool sme_sm_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK); +} + +/* This function corresponds to CheckSMEAndZAEnabled. */ +static inline bool sme_za_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK); +} + +/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */ +static inline bool sme_smza_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); +} + TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, bool tag_checked, int log2_size); @@ -71,7 +83,7 @@ static inline int vec_reg_offset(DisasContext *s, int regno, { int element_size = 1 << size; int offs = element * element_size; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN /* This is complicated slightly because vfp.zregs[n].d[0] is * still the lowest and vfp.zregs[n].d[15] the highest of the * 256 byte vector, even on big endian systems. @@ -113,10 +125,69 @@ static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) /* Return the byte size of the "whole" vector register, VL / 8. */ static inline int vec_full_reg_size(DisasContext *s) { - return s->sve_len; + return s->vl; +} + +/* Return the byte size of the vector register, SVL / 8. */ +static inline int streaming_vec_reg_size(DisasContext *s) +{ + return s->svl; +} + +/* + * Return the offset info CPUARMState of the predicate vector register Pn. + * Note for this purpose, FFR is P16. + */ +static inline int pred_full_reg_offset(DisasContext *s, int regno) +{ + return offsetof(CPUARMState, vfp.pregs[regno]); +} + +/* Return the byte size of the whole predicate register, VL / 64. */ +static inline int pred_full_reg_size(DisasContext *s) +{ + return s->vl >> 3; +} + +/* Return the byte size of the predicate register, SVL / 64. */ +static inline int streaming_pred_reg_size(DisasContext *s) +{ + return s->svl >> 3; +} + +/* + * Round up the size of a register to a size allowed by + * the tcg vector infrastructure. Any operation which uses this + * size may assume that the bits above pred_full_reg_size are zero, + * and must leave them the same way. + * + * Note that this is not needed for the vector registers as they + * are always properly sized for tcg vectors. + */ +static inline int size_for_gvec(int size) +{ + if (size <= 8) { + return 8; + } else { + return QEMU_ALIGN_UP(size, 16); + } +} + +static inline int pred_gvec_reg_size(DisasContext *s) +{ + return size_for_gvec(pred_full_reg_size(s)); +} + +/* Return a newly allocated pointer to the predicate register. */ +static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) +{ + TCGv_ptr ret = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno)); + return ret; } bool disas_sve(DisasContext *, uint32_t); +bool disas_sme(DisasContext *, uint32_t); void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); @@ -124,4 +195,7 @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, int64_t shift, uint32_t opr_sz, uint32_t max_sz); +void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); +void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); + #endif /* TARGET_ARM_TRANSLATE_A64_H */ diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c index 5eab04832c..5df7d46120 100644 --- a/target/arm/translate-m-nocp.c +++ b/target/arm/translate-m-nocp.c @@ -95,7 +95,10 @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) clear_eci_state(s); - /* End the TB, because we have updated FP control bits */ + /* + * End the TB, because we have updated FP control bits, + * and possibly VPR or LTPSIZE. + */ s->base.is_jmp = DISAS_UPDATE_EXIT; return true; } @@ -137,11 +140,11 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK); tcg_gen_or_i32(sfpa, sfpa, aspen); arm_gen_condlabel(s); - tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel); + tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label); if (s->fp_excp_el != 0) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); + gen_exception_insn_el(s, 0, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); return true; } @@ -170,7 +173,7 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) } /* Zero the Sregs from btmreg to topreg inclusive. */ - zero = tcg_const_i64(0); + zero = tcg_constant_i64(0); if (btmreg & 1) { write_neon_element64(zero, btmreg >> 1, 1, MO_32); btmreg++; @@ -184,8 +187,7 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) } assert(btmreg == topreg + 1); if (dc_isar_feature(aa32_mve, s)) { - TCGv_i32 z32 = tcg_const_i32(0); - store_cpu_field(z32, v7m.vpr); + store_cpu_field(tcg_constant_i32(0), v7m.vpr); } clear_eci_state(s); @@ -374,7 +376,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, if (!vfp_access_check_m(s, true)) { /* * This was only a conditional exception, so override - * gen_exception_insn()'s default to DISAS_NORETURN + * gen_exception_insn_el()'s default to DISAS_NORETURN */ s->base.is_jmp = DISAS_NEXT; break; @@ -397,6 +399,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, store_cpu_field(control, v7m.control[M_REG_S]); tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); gen_helper_vfp_set_fpscr(cpu_env, tmp); + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; tcg_temp_free_i32(tmp); tcg_temp_free_i32(sfpa); break; @@ -409,6 +412,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, } tmp = loadfn(s, opaque, true); store_cpu_field(tmp, v7m.vpr); + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; break; case ARM_VFP_P0: { @@ -418,6 +422,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, tcg_gen_deposit_i32(vpr, vpr, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); store_cpu_field(vpr, v7m.vpr); + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; tcg_temp_free_i32(tmp); break; } @@ -506,7 +511,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, } case ARM_VFP_FPCXT_NS: { - TCGv_i32 control, sfpa, fpscr, fpdscr, zero; + TCGv_i32 control, sfpa, fpscr, fpdscr; TCGLabel *lab_active = gen_new_label(); lookup_tb = true; @@ -527,7 +532,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, if (!vfp_access_check_m(s, true)) { /* * This was only a conditional exception, so override - * gen_exception_insn()'s default to DISAS_NORETURN + * gen_exception_insn_el()'s default to DISAS_NORETURN */ s->base.is_jmp = DISAS_NEXT; break; @@ -546,10 +551,9 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, storefn(s, opaque, tmp, true); /* If SFPA is zero then set FPSCR from FPDSCR_NS */ fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); - zero = tcg_const_i32(0); - tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr); + tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0), + fpdscr, fpscr); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(zero); tcg_temp_free_i32(sfpa); tcg_temp_free_i32(fpdscr); tcg_temp_free_i32(fpscr); @@ -761,14 +765,13 @@ static bool trans_NOCP(DisasContext *s, arg_nocp *a) } if (a->cp != 10) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), default_exception_el(s)); + gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized()); return true; } if (s->fp_excp_el != 0) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); + gen_exception_insn_el(s, 0, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); return true; } diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index a2a45036a0..db7ea3f603 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -25,17 +25,31 @@ #include "translate.h" #include "translate-a32.h" +static inline int vidup_imm(DisasContext *s, int x) +{ + return 1 << x; +} + /* Include the generated decoder */ #include "decode-mve.c.inc" typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenLdStSGFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenLdStIlFn(TCGv_ptr, TCGv_i32, TCGv_i32); typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); -typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64); +typedef void MVEGenLongDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64); typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64); +typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); +typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenVCVTRmodeFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */ static inline long mve_qreg_offset(unsigned reg) @@ -50,6 +64,16 @@ static TCGv_ptr mve_qreg_ptr(unsigned reg) return ret; } +static bool mve_no_predication(DisasContext *s) +{ + /* + * Return true if we are executing the entire MVE instruction + * with no predication or partial-execution, and so we can safely + * use an inline TCG vector implementation. + */ + return s->eci == 0 && s->mve_no_pred; +} + static bool mve_check_qreg_bank(DisasContext *s, int qmask) { /* @@ -76,13 +100,12 @@ bool mve_eci_check(DisasContext *s) return true; default: /* Reserved value: INVSTATE UsageFault */ - gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(), - default_exception_el(s)); + gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); return false; } } -static void mve_update_eci(DisasContext *s) +void mve_update_eci(DisasContext *s) { /* * The helper function will always update the CPUState field, @@ -198,6 +221,267 @@ DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8) DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8) DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16) +static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn) +{ + TCGv_i32 addr; + TCGv_ptr qd, qm; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qd | a->qm) || + !fn || a->rn == 15) { + /* Rn case is UNPREDICTABLE */ + return false; + } + + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + addr = load_reg(s, a->rn); + + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm, addr); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + tcg_temp_free_i32(addr); + mve_update_eci(s); + return true; +} + +/* + * The naming scheme here is "vldrb_sg_sh == in-memory byte loads + * signextended to halfword elements in register". _os_ indicates that + * the offsets in Qm should be scaled by the element size. + */ +/* This macro is just to make the arrays more compact in these functions */ +#define F(N) gen_helper_mve_##N + +/* VLDRB/VSTRB (ie msize 1) with OS=1 is UNPREDICTABLE; we UNDEF */ +static bool trans_VLDR_S_sg(DisasContext *s, arg_vldst_sg *a) +{ + static MVEGenLdStSGFn * const fns[2][4][4] = { { + { NULL, F(vldrb_sg_sh), F(vldrb_sg_sw), NULL }, + { NULL, NULL, F(vldrh_sg_sw), NULL }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL } + }, { + { NULL, NULL, NULL, NULL }, + { NULL, NULL, F(vldrh_sg_os_sw), NULL }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL } + } + }; + if (a->qd == a->qm) { + return false; /* UNPREDICTABLE */ + } + return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]); +} + +static bool trans_VLDR_U_sg(DisasContext *s, arg_vldst_sg *a) +{ + static MVEGenLdStSGFn * const fns[2][4][4] = { { + { F(vldrb_sg_ub), F(vldrb_sg_uh), F(vldrb_sg_uw), NULL }, + { NULL, F(vldrh_sg_uh), F(vldrh_sg_uw), NULL }, + { NULL, NULL, F(vldrw_sg_uw), NULL }, + { NULL, NULL, NULL, F(vldrd_sg_ud) } + }, { + { NULL, NULL, NULL, NULL }, + { NULL, F(vldrh_sg_os_uh), F(vldrh_sg_os_uw), NULL }, + { NULL, NULL, F(vldrw_sg_os_uw), NULL }, + { NULL, NULL, NULL, F(vldrd_sg_os_ud) } + } + }; + if (a->qd == a->qm) { + return false; /* UNPREDICTABLE */ + } + return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]); +} + +static bool trans_VSTR_sg(DisasContext *s, arg_vldst_sg *a) +{ + static MVEGenLdStSGFn * const fns[2][4][4] = { { + { F(vstrb_sg_ub), F(vstrb_sg_uh), F(vstrb_sg_uw), NULL }, + { NULL, F(vstrh_sg_uh), F(vstrh_sg_uw), NULL }, + { NULL, NULL, F(vstrw_sg_uw), NULL }, + { NULL, NULL, NULL, F(vstrd_sg_ud) } + }, { + { NULL, NULL, NULL, NULL }, + { NULL, F(vstrh_sg_os_uh), F(vstrh_sg_os_uw), NULL }, + { NULL, NULL, F(vstrw_sg_os_uw), NULL }, + { NULL, NULL, NULL, F(vstrd_sg_os_ud) } + } + }; + return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]); +} + +#undef F + +static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a, + MVEGenLdStSGFn *fn, unsigned msize) +{ + uint32_t offset; + TCGv_ptr qd, qm; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qd | a->qm) || + !fn) { + return false; + } + + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + offset = a->imm << msize; + if (!a->a) { + offset = -offset; + } + + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm, tcg_constant_i32(offset)); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + mve_update_eci(s); + return true; +} + +static bool trans_VLDRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a) +{ + static MVEGenLdStSGFn * const fns[] = { + gen_helper_mve_vldrw_sg_uw, + gen_helper_mve_vldrw_sg_wb_uw, + }; + if (a->qd == a->qm) { + return false; /* UNPREDICTABLE */ + } + return do_ldst_sg_imm(s, a, fns[a->w], MO_32); +} + +static bool trans_VLDRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a) +{ + static MVEGenLdStSGFn * const fns[] = { + gen_helper_mve_vldrd_sg_ud, + gen_helper_mve_vldrd_sg_wb_ud, + }; + if (a->qd == a->qm) { + return false; /* UNPREDICTABLE */ + } + return do_ldst_sg_imm(s, a, fns[a->w], MO_64); +} + +static bool trans_VSTRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a) +{ + static MVEGenLdStSGFn * const fns[] = { + gen_helper_mve_vstrw_sg_uw, + gen_helper_mve_vstrw_sg_wb_uw, + }; + return do_ldst_sg_imm(s, a, fns[a->w], MO_32); +} + +static bool trans_VSTRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a) +{ + static MVEGenLdStSGFn * const fns[] = { + gen_helper_mve_vstrd_sg_ud, + gen_helper_mve_vstrd_sg_wb_ud, + }; + return do_ldst_sg_imm(s, a, fns[a->w], MO_64); +} + +static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn, + int addrinc) +{ + TCGv_i32 rn; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qd) || + !fn || (a->rn == 13 && a->w) || a->rn == 15) { + /* Variously UNPREDICTABLE or UNDEF or related-encoding */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + rn = load_reg(s, a->rn); + /* + * We pass the index of Qd, not a pointer, because the helper must + * access multiple Q registers starting at Qd and working up. + */ + fn(cpu_env, tcg_constant_i32(a->qd), rn); + + if (a->w) { + tcg_gen_addi_i32(rn, rn, addrinc); + store_reg(s, a->rn, rn); + } else { + tcg_temp_free_i32(rn); + } + mve_update_and_store_eci(s); + return true; +} + +/* This macro is just to make the arrays more compact in these functions */ +#define F(N) gen_helper_mve_##N + +static bool trans_VLD2(DisasContext *s, arg_vldst_il *a) +{ + static MVEGenLdStIlFn * const fns[4][4] = { + { F(vld20b), F(vld20h), F(vld20w), NULL, }, + { F(vld21b), F(vld21h), F(vld21w), NULL, }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL }, + }; + if (a->qd > 6) { + return false; + } + return do_vldst_il(s, a, fns[a->pat][a->size], 32); +} + +static bool trans_VLD4(DisasContext *s, arg_vldst_il *a) +{ + static MVEGenLdStIlFn * const fns[4][4] = { + { F(vld40b), F(vld40h), F(vld40w), NULL, }, + { F(vld41b), F(vld41h), F(vld41w), NULL, }, + { F(vld42b), F(vld42h), F(vld42w), NULL, }, + { F(vld43b), F(vld43h), F(vld43w), NULL, }, + }; + if (a->qd > 4) { + return false; + } + return do_vldst_il(s, a, fns[a->pat][a->size], 64); +} + +static bool trans_VST2(DisasContext *s, arg_vldst_il *a) +{ + static MVEGenLdStIlFn * const fns[4][4] = { + { F(vst20b), F(vst20h), F(vst20w), NULL, }, + { F(vst21b), F(vst21h), F(vst21w), NULL, }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL }, + }; + if (a->qd > 6) { + return false; + } + return do_vldst_il(s, a, fns[a->pat][a->size], 32); +} + +static bool trans_VST4(DisasContext *s, arg_vldst_il *a) +{ + static MVEGenLdStIlFn * const fns[4][4] = { + { F(vst40b), F(vst40h), F(vst40w), NULL, }, + { F(vst41b), F(vst41h), F(vst41w), NULL, }, + { F(vst42b), F(vst42h), F(vst42w), NULL, }, + { F(vst43b), F(vst43h), F(vst43w), NULL, }, + }; + if (a->qd > 4) { + return false; + } + return do_vldst_il(s, a, fns[a->pat][a->size], 64); +} + +#undef F + static bool trans_VDUP(DisasContext *s, arg_VDUP *a) { TCGv_ptr qd; @@ -215,17 +499,22 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) return true; } - qd = mve_qreg_ptr(a->qd); rt = load_reg(s, a->rt); - tcg_gen_dup_i32(a->size, rt, rt); - gen_helper_mve_vdup(cpu_env, qd, rt); - tcg_temp_free_ptr(qd); + if (mve_no_predication(s)) { + tcg_gen_gvec_dup_i32(a->size, mve_qreg_offset(a->qd), 16, 16, rt); + } else { + qd = mve_qreg_ptr(a->qd); + tcg_gen_dup_i32(a->size, rt, rt); + gen_helper_mve_vdup(cpu_env, qd, rt); + tcg_temp_free_ptr(qd); + } tcg_temp_free_i32(rt); mve_update_eci(s); return true; } -static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) +static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn, + GVecGen2Fn vecfn) { TCGv_ptr qd, qm; @@ -239,16 +528,25 @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) return true; } - qd = mve_qreg_ptr(a->qd); - qm = mve_qreg_ptr(a->qm); - fn(cpu_env, qd, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); + if (vecfn && mve_no_predication(s)) { + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), 16, 16); + } else { + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + } mve_update_eci(s); return true; } -#define DO_1OP(INSN, FN) \ +static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) +{ + return do_1op_vec(s, a, fn, NULL); +} + +#define DO_1OP_VEC(INSN, FN, VECFN) \ static bool trans_##INSN(DisasContext *s, arg_1op *a) \ { \ static MVEGenOneOpFn * const fns[] = { \ @@ -257,13 +555,183 @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) gen_helper_mve_##FN##w, \ NULL, \ }; \ - return do_1op(s, a, fns[a->size]); \ + return do_1op_vec(s, a, fns[a->size], VECFN); \ } +#define DO_1OP(INSN, FN) DO_1OP_VEC(INSN, FN, NULL) + DO_1OP(VCLZ, vclz) DO_1OP(VCLS, vcls) -DO_1OP(VABS, vabs) -DO_1OP(VNEG, vneg) +DO_1OP_VEC(VABS, vabs, tcg_gen_gvec_abs) +DO_1OP_VEC(VNEG, vneg, tcg_gen_gvec_neg) +DO_1OP(VQABS, vqabs) +DO_1OP(VQNEG, vqneg) +DO_1OP(VMAXA, vmaxa) +DO_1OP(VMINA, vmina) + +/* + * For simple float/int conversions we use the fixed-point + * conversion helpers with a zero shift count + */ +#define DO_VCVT(INSN, HFN, SFN) \ + static void gen_##INSN##h(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \ + { \ + gen_helper_mve_##HFN(env, qd, qm, tcg_constant_i32(0)); \ + } \ + static void gen_##INSN##s(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \ + { \ + gen_helper_mve_##SFN(env, qd, qm, tcg_constant_i32(0)); \ + } \ + static bool trans_##INSN(DisasContext *s, arg_1op *a) \ + { \ + static MVEGenOneOpFn * const fns[] = { \ + NULL, \ + gen_##INSN##h, \ + gen_##INSN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_1op(s, a, fns[a->size]); \ + } + +DO_VCVT(VCVT_SF, vcvt_sh, vcvt_sf) +DO_VCVT(VCVT_UF, vcvt_uh, vcvt_uf) +DO_VCVT(VCVT_FS, vcvt_hs, vcvt_fs) +DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu) + +static bool do_vcvt_rmode(DisasContext *s, arg_1op *a, + enum arm_fprounding rmode, bool u) +{ + /* + * Handle VCVT fp to int with specified rounding mode. + * This is a 1op fn but we must pass the rounding mode as + * an immediate to the helper. + */ + TCGv_ptr qd, qm; + static MVEGenVCVTRmodeFn * const fns[4][2] = { + { NULL, NULL }, + { gen_helper_mve_vcvt_rm_sh, gen_helper_mve_vcvt_rm_uh }, + { gen_helper_mve_vcvt_rm_ss, gen_helper_mve_vcvt_rm_us }, + { NULL, NULL }, + }; + MVEGenVCVTRmodeFn *fn = fns[a->size][u]; + + if (!dc_isar_feature(aa32_mve_fp, s) || + !mve_check_qreg_bank(s, a->qd | a->qm) || + !fn) { + return false; + } + + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode))); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + mve_update_eci(s); + return true; +} + +#define DO_VCVT_RMODE(INSN, RMODE, U) \ + static bool trans_##INSN(DisasContext *s, arg_1op *a) \ + { \ + return do_vcvt_rmode(s, a, RMODE, U); \ + } \ + +DO_VCVT_RMODE(VCVTAS, FPROUNDING_TIEAWAY, false) +DO_VCVT_RMODE(VCVTAU, FPROUNDING_TIEAWAY, true) +DO_VCVT_RMODE(VCVTNS, FPROUNDING_TIEEVEN, false) +DO_VCVT_RMODE(VCVTNU, FPROUNDING_TIEEVEN, true) +DO_VCVT_RMODE(VCVTPS, FPROUNDING_POSINF, false) +DO_VCVT_RMODE(VCVTPU, FPROUNDING_POSINF, true) +DO_VCVT_RMODE(VCVTMS, FPROUNDING_NEGINF, false) +DO_VCVT_RMODE(VCVTMU, FPROUNDING_NEGINF, true) + +#define DO_VCVT_SH(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_1op *a) \ + { \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_1op(s, a, gen_helper_mve_##FN); \ + } \ + +DO_VCVT_SH(VCVTB_SH, vcvtb_sh) +DO_VCVT_SH(VCVTT_SH, vcvtt_sh) +DO_VCVT_SH(VCVTB_HS, vcvtb_hs) +DO_VCVT_SH(VCVTT_HS, vcvtt_hs) + +#define DO_VRINT(INSN, RMODE) \ + static void gen_##INSN##h(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \ + { \ + gen_helper_mve_vrint_rm_h(env, qd, qm, \ + tcg_constant_i32(arm_rmode_to_sf(RMODE))); \ + } \ + static void gen_##INSN##s(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \ + { \ + gen_helper_mve_vrint_rm_s(env, qd, qm, \ + tcg_constant_i32(arm_rmode_to_sf(RMODE))); \ + } \ + static bool trans_##INSN(DisasContext *s, arg_1op *a) \ + { \ + static MVEGenOneOpFn * const fns[] = { \ + NULL, \ + gen_##INSN##h, \ + gen_##INSN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_1op(s, a, fns[a->size]); \ + } + +DO_VRINT(VRINTN, FPROUNDING_TIEEVEN) +DO_VRINT(VRINTA, FPROUNDING_TIEAWAY) +DO_VRINT(VRINTZ, FPROUNDING_ZERO) +DO_VRINT(VRINTM, FPROUNDING_NEGINF) +DO_VRINT(VRINTP, FPROUNDING_POSINF) + +static bool trans_VRINTX(DisasContext *s, arg_1op *a) +{ + static MVEGenOneOpFn * const fns[] = { + NULL, + gen_helper_mve_vrintx_h, + gen_helper_mve_vrintx_s, + NULL, + }; + if (!dc_isar_feature(aa32_mve_fp, s)) { + return false; + } + return do_1op(s, a, fns[a->size]); +} + +/* Narrowing moves: only size 0 and 1 are valid */ +#define DO_VMOVN(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_1op *a) \ + { \ + static MVEGenOneOpFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + NULL, \ + NULL, \ + }; \ + return do_1op(s, a, fns[a->size]); \ + } + +DO_VMOVN(VMOVNB, vmovnb) +DO_VMOVN(VMOVNT, vmovnt) +DO_VMOVN(VQMOVUNB, vqmovunb) +DO_VMOVN(VQMOVUNT, vqmovunt) +DO_VMOVN(VQMOVN_BS, vqmovnbs) +DO_VMOVN(VQMOVN_TS, vqmovnts) +DO_VMOVN(VQMOVN_BU, vqmovnbu) +DO_VMOVN(VQMOVN_TU, vqmovntu) static bool trans_VREV16(DisasContext *s, arg_1op *a) { @@ -300,7 +768,7 @@ static bool trans_VREV64(DisasContext *s, arg_1op *a) static bool trans_VMVN(DisasContext *s, arg_1op *a) { - return do_1op(s, a, gen_helper_mve_vmvn); + return do_1op_vec(s, a, gen_helper_mve_vmvn, tcg_gen_gvec_not); } static bool trans_VABS_fp(DisasContext *s, arg_1op *a) @@ -331,7 +799,8 @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a) return do_1op(s, a, fns[a->size]); } -static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn) +static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn, + GVecGen3Fn *vecfn) { TCGv_ptr qd, qn, qm; @@ -344,30 +813,47 @@ static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn) return true; } - qd = mve_qreg_ptr(a->qd); - qn = mve_qreg_ptr(a->qn); - qm = mve_qreg_ptr(a->qm); - fn(cpu_env, qd, qn, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); + if (vecfn && mve_no_predication(s)) { + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qn), + mve_qreg_offset(a->qm), 16, 16); + } else { + qd = mve_qreg_ptr(a->qd); + qn = mve_qreg_ptr(a->qn); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qn, qm); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qn); + tcg_temp_free_ptr(qm); + } mve_update_eci(s); return true; } -#define DO_LOGIC(INSN, HELPER) \ +static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn *fn) +{ + return do_2op_vec(s, a, fn, NULL); +} + +#define DO_LOGIC(INSN, HELPER, VECFN) \ static bool trans_##INSN(DisasContext *s, arg_2op *a) \ { \ - return do_2op(s, a, HELPER); \ + return do_2op_vec(s, a, HELPER, VECFN); \ } -DO_LOGIC(VAND, gen_helper_mve_vand) -DO_LOGIC(VBIC, gen_helper_mve_vbic) -DO_LOGIC(VORR, gen_helper_mve_vorr) -DO_LOGIC(VORN, gen_helper_mve_vorn) -DO_LOGIC(VEOR, gen_helper_mve_veor) +DO_LOGIC(VAND, gen_helper_mve_vand, tcg_gen_gvec_and) +DO_LOGIC(VBIC, gen_helper_mve_vbic, tcg_gen_gvec_andc) +DO_LOGIC(VORR, gen_helper_mve_vorr, tcg_gen_gvec_or) +DO_LOGIC(VORN, gen_helper_mve_vorn, tcg_gen_gvec_orc) +DO_LOGIC(VEOR, gen_helper_mve_veor, tcg_gen_gvec_xor) -#define DO_2OP(INSN, FN) \ +static bool trans_VPSEL(DisasContext *s, arg_2op *a) +{ + /* This insn updates predication bits */ + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + return do_2op(s, a, gen_helper_mve_vpsel); +} + +#define DO_2OP_VEC(INSN, FN, VECFN) \ static bool trans_##INSN(DisasContext *s, arg_2op *a) \ { \ static MVEGenTwoOpFn * const fns[] = { \ @@ -376,20 +862,22 @@ DO_LOGIC(VEOR, gen_helper_mve_veor) gen_helper_mve_##FN##w, \ NULL, \ }; \ - return do_2op(s, a, fns[a->size]); \ + return do_2op_vec(s, a, fns[a->size], VECFN); \ } -DO_2OP(VADD, vadd) -DO_2OP(VSUB, vsub) -DO_2OP(VMUL, vmul) +#define DO_2OP(INSN, FN) DO_2OP_VEC(INSN, FN, NULL) + +DO_2OP_VEC(VADD, vadd, tcg_gen_gvec_add) +DO_2OP_VEC(VSUB, vsub, tcg_gen_gvec_sub) +DO_2OP_VEC(VMUL, vmul, tcg_gen_gvec_mul) DO_2OP(VMULH_S, vmulhs) DO_2OP(VMULH_U, vmulhu) DO_2OP(VRMULH_S, vrmulhs) DO_2OP(VRMULH_U, vrmulhu) -DO_2OP(VMAX_S, vmaxs) -DO_2OP(VMAX_U, vmaxu) -DO_2OP(VMIN_S, vmins) -DO_2OP(VMIN_U, vminu) +DO_2OP_VEC(VMAX_S, vmaxs, tcg_gen_gvec_smax) +DO_2OP_VEC(VMAX_U, vmaxu, tcg_gen_gvec_umax) +DO_2OP_VEC(VMIN_S, vmins, tcg_gen_gvec_smin) +DO_2OP_VEC(VMIN_U, vminu, tcg_gen_gvec_umin) DO_2OP(VABD_S, vabds) DO_2OP(VABD_U, vabdu) DO_2OP(VHADD_S, vhadds) @@ -464,6 +952,34 @@ static bool trans_VQDMULLT(DisasContext *s, arg_2op *a) return do_2op(s, a, fns[a->size]); } +static bool trans_VMULLP_B(DisasContext *s, arg_2op *a) +{ + /* + * Note that a->size indicates the output size, ie VMULL.P8 + * is the 8x8->16 operation and a->size is MO_16; VMULL.P16 + * is the 16x16->32 operation and a->size is MO_32. + */ + static MVEGenTwoOpFn * const fns[] = { + NULL, + gen_helper_mve_vmullpbh, + gen_helper_mve_vmullpbw, + NULL, + }; + return do_2op(s, a, fns[a->size]); +} + +static bool trans_VMULLP_T(DisasContext *s, arg_2op *a) +{ + /* a->size is as for trans_VMULLP_B */ + static MVEGenTwoOpFn * const fns[] = { + NULL, + gen_helper_mve_vmullpth, + gen_helper_mve_vmullptw, + NULL, + }; + return do_2op(s, a, fns[a->size]); +} + /* * VADC and VSBC: these perform an add-with-carry or subtract-with-carry * of the 32-bit elements in each lane of the input vectors, where the @@ -501,6 +1017,42 @@ static bool trans_VSBCI(DisasContext *s, arg_2op *a) return do_2op(s, a, gen_helper_mve_vsbci); } +#define DO_2OP_FP(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2op *a) \ + { \ + static MVEGenTwoOpFn * const fns[] = { \ + NULL, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_2op(s, a, fns[a->size]); \ + } + +DO_2OP_FP(VADD_fp, vfadd) +DO_2OP_FP(VSUB_fp, vfsub) +DO_2OP_FP(VMUL_fp, vfmul) +DO_2OP_FP(VABD_fp, vfabd) +DO_2OP_FP(VMAXNM, vmaxnm) +DO_2OP_FP(VMINNM, vminnm) +DO_2OP_FP(VCADD90_fp, vfcadd90) +DO_2OP_FP(VCADD270_fp, vfcadd270) +DO_2OP_FP(VFMA, vfma) +DO_2OP_FP(VFMS, vfms) +DO_2OP_FP(VCMUL0, vcmul0) +DO_2OP_FP(VCMUL90, vcmul90) +DO_2OP_FP(VCMUL180, vcmul180) +DO_2OP_FP(VCMUL270, vcmul270) +DO_2OP_FP(VCMLA0, vcmla0) +DO_2OP_FP(VCMLA90, vcmla90) +DO_2OP_FP(VCMLA180, vcmla180) +DO_2OP_FP(VCMLA270, vcmla270) +DO_2OP_FP(VMAXNMA, vmaxnma) +DO_2OP_FP(VMINNMA, vminnma) + static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, MVEGenTwoOpScalarFn fn) { @@ -531,7 +1083,7 @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, return true; } -#define DO_2OP_SCALAR(INSN, FN) \ +#define DO_2OP_SCALAR(INSN, FN) \ static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \ { \ static MVEGenTwoOpScalarFn * const fns[] = { \ @@ -557,6 +1109,12 @@ DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar) DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar) DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar) DO_2OP_SCALAR(VBRSR, vbrsr) +DO_2OP_SCALAR(VMLA, vmla) +DO_2OP_SCALAR(VMLAS, vmlas) +DO_2OP_SCALAR(VQDMLAH, vqdmlah) +DO_2OP_SCALAR(VQRDMLAH, vqrdmlah) +DO_2OP_SCALAR(VQDMLASH, vqdmlash) +DO_2OP_SCALAR(VQRDMLASH, vqrdmlash) static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a) { @@ -588,8 +1146,30 @@ static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a) return do_2op_scalar(s, a, fns[a->size]); } + +#define DO_2OP_FP_SCALAR(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \ + { \ + static MVEGenTwoOpScalarFn * const fns[] = { \ + NULL, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_2op_scalar(s, a, fns[a->size]); \ + } + +DO_2OP_FP_SCALAR(VADD_fp_scalar, vfadd_scalar) +DO_2OP_FP_SCALAR(VSUB_fp_scalar, vfsub_scalar) +DO_2OP_FP_SCALAR(VMUL_fp_scalar, vfmul_scalar) +DO_2OP_FP_SCALAR(VFMA_scalar, vfma_scalar) +DO_2OP_FP_SCALAR(VFMAS_scalar, vfmas_scalar) + static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, - MVEGenDualAccOpFn *fn) + MVEGenLongDualAccOpFn *fn) { TCGv_ptr qn, qm; TCGv_i64 rda; @@ -647,7 +1227,7 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[4][2] = { + static MVEGenLongDualAccOpFn * const fns[4][2] = { { NULL, NULL }, { gen_helper_mve_vmlaldavsh, gen_helper_mve_vmlaldavxsh }, { gen_helper_mve_vmlaldavsw, gen_helper_mve_vmlaldavxsw }, @@ -658,7 +1238,7 @@ static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a) static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[4][2] = { + static MVEGenLongDualAccOpFn * const fns[4][2] = { { NULL, NULL }, { gen_helper_mve_vmlaldavuh, NULL }, { gen_helper_mve_vmlaldavuw, NULL }, @@ -669,7 +1249,7 @@ static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a) static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[4][2] = { + static MVEGenLongDualAccOpFn * const fns[4][2] = { { NULL, NULL }, { gen_helper_mve_vmlsldavsh, gen_helper_mve_vmlsldavxsh }, { gen_helper_mve_vmlsldavsw, gen_helper_mve_vmlsldavxsw }, @@ -680,7 +1260,7 @@ static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a) static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[] = { + static MVEGenLongDualAccOpFn * const fns[] = { gen_helper_mve_vrmlaldavhsw, gen_helper_mve_vrmlaldavhxsw, }; return do_long_dual_acc(s, a, fns[a->x]); @@ -688,7 +1268,7 @@ static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a) static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[] = { + static MVEGenLongDualAccOpFn * const fns[] = { gen_helper_mve_vrmlaldavhuw, NULL, }; return do_long_dual_acc(s, a, fns[a->x]); @@ -696,39 +1276,93 @@ static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a) static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a) { - static MVEGenDualAccOpFn * const fns[] = { + static MVEGenLongDualAccOpFn * const fns[] = { gen_helper_mve_vrmlsldavhsw, gen_helper_mve_vrmlsldavhxsw, }; return do_long_dual_acc(s, a, fns[a->x]); } -static bool trans_VPST(DisasContext *s, arg_VPST *a) +static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) { - TCGv_i32 vpr; + TCGv_ptr qn, qm; + TCGv_i32 rda; - /* mask == 0 is a "related encoding" */ - if (!dc_isar_feature(aa32_mve, s) || !a->mask) { + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qn) || + !fn) { return false; } if (!mve_eci_check(s) || !vfp_access_check(s)) { return true; } + + qn = mve_qreg_ptr(a->qn); + qm = mve_qreg_ptr(a->qm); + + /* + * This insn is subject to beat-wise execution. Partial execution + * of an A=0 (no-accumulate) insn which does not execute the first + * beat must start with the current rda value, not 0. + */ + if (a->a || mve_skip_first_beat(s)) { + rda = load_reg(s, a->rda); + } else { + rda = tcg_const_i32(0); + } + + fn(rda, cpu_env, qn, qm, rda); + store_reg(s, a->rda, rda); + tcg_temp_free_ptr(qn); + tcg_temp_free_ptr(qm); + + mve_update_eci(s); + return true; +} + +#define DO_DUAL_ACC(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vmladav *a) \ + { \ + static MVEGenDualAccOpFn * const fns[4][2] = { \ + { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb }, \ + { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh }, \ + { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw }, \ + { NULL, NULL }, \ + }; \ + return do_dual_acc(s, a, fns[a->size][a->x]); \ + } + +DO_DUAL_ACC(VMLADAV_S, vmladavs) +DO_DUAL_ACC(VMLSDAV, vmlsdav) + +static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a) +{ + static MVEGenDualAccOpFn * const fns[4][2] = { + { gen_helper_mve_vmladavub, NULL }, + { gen_helper_mve_vmladavuh, NULL }, + { gen_helper_mve_vmladavuw, NULL }, + { NULL, NULL }, + }; + return do_dual_acc(s, a, fns[a->size][a->x]); +} + +static void gen_vpst(DisasContext *s, uint32_t mask) +{ /* * Set the VPR mask fields. We take advantage of MASK01 and MASK23 * being adjacent fields in the register. * - * This insn is not predicated, but it is subject to beat-wise + * Updating the masks is not predicated, but it is subject to beat-wise * execution, and the mask is updated on the odd-numbered beats. * So if PSR.ECI says we should skip beat 1, we mustn't update the * 01 mask field. */ - vpr = load_cpu_field(v7m.vpr); + TCGv_i32 vpr = load_cpu_field(v7m.vpr); switch (s->eci) { case ECI_NONE: case ECI_A0: /* Update both 01 and 23 fields */ tcg_gen_deposit_i32(vpr, vpr, - tcg_constant_i32(a->mask | (a->mask << 4)), + tcg_constant_i32(mask | (mask << 4)), R_V7M_VPR_MASK01_SHIFT, R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH); break; @@ -737,17 +1371,50 @@ static bool trans_VPST(DisasContext *s, arg_VPST *a) case ECI_A0A1A2B0: /* Update only the 23 mask field */ tcg_gen_deposit_i32(vpr, vpr, - tcg_constant_i32(a->mask), + tcg_constant_i32(mask), R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH); break; default: g_assert_not_reached(); } store_cpu_field(vpr, v7m.vpr); +} + +static bool trans_VPST(DisasContext *s, arg_VPST *a) +{ + /* mask == 0 is a "related encoding" */ + if (!dc_isar_feature(aa32_mve, s) || !a->mask) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + gen_vpst(s, a->mask); mve_update_and_store_eci(s); return true; } +static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a) +{ + /* + * Invert the predicate in VPR.P0. We have call out to + * a helper because this insn itself is beatwise and can + * be predicated. + */ + if (!dc_isar_feature(aa32_mve, s)) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + gen_helper_mve_vpnot(cpu_env); + /* This insn updates predication bits */ + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + mve_update_eci(s); + return true; +} + static bool trans_VADDV(DisasContext *s, arg_VADDV *a) { /* VADDV: vector add across vector */ @@ -853,7 +1520,8 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) return true; } -static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) +static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn, + GVecGen2iFn *vecfn) { TCGv_ptr qd; uint64_t imm; @@ -869,17 +1537,29 @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) imm = asimd_imm_const(a->imm, a->cmode, a->op); - qd = mve_qreg_ptr(a->qd); - fn(cpu_env, qd, tcg_constant_i64(imm)); - tcg_temp_free_ptr(qd); + if (vecfn && mve_no_predication(s)) { + vecfn(MO_64, mve_qreg_offset(a->qd), mve_qreg_offset(a->qd), + imm, 16, 16); + } else { + qd = mve_qreg_ptr(a->qd); + fn(cpu_env, qd, tcg_constant_i64(imm)); + tcg_temp_free_ptr(qd); + } mve_update_eci(s); return true; } +static void gen_gvec_vmovi(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, c); +} + static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) { /* Handle decode of cmode/op here between VORR/VBIC/VMOV */ MVEGenOneOpImmFn *fn; + GVecGen2iFn *vecfn; if ((a->cmode & 1) && a->cmode < 12) { if (a->op) { @@ -888,8 +1568,10 @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) * so the VBIC becomes a logical AND operation. */ fn = gen_helper_mve_vandi; + vecfn = tcg_gen_gvec_andi; } else { fn = gen_helper_mve_vorri; + vecfn = tcg_gen_gvec_ori; } } else { /* There is one unallocated cmode/op combination in this space */ @@ -898,12 +1580,13 @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) } /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */ fn = gen_helper_mve_vmovi; + vecfn = gen_gvec_vmovi; } - return do_1imm(s, a, fn); + return do_1imm(s, a, fn, vecfn); } -static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, - bool negateshift) +static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, + bool negateshift, GVecGen2iFn vecfn) { TCGv_ptr qd, qm; int shift = a->shift; @@ -926,50 +1609,208 @@ static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, shift = -shift; } - qd = mve_qreg_ptr(a->qd); - qm = mve_qreg_ptr(a->qm); - fn(cpu_env, qd, qm, tcg_constant_i32(shift)); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); + if (vecfn && mve_no_predication(s)) { + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), + shift, 16, 16); + } else { + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm, tcg_constant_i32(shift)); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + } mve_update_eci(s); return true; } -#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \ - static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ - { \ - static MVEGenTwoOpShiftFn * const fns[] = { \ - gen_helper_mve_##FN##b, \ - gen_helper_mve_##FN##h, \ - gen_helper_mve_##FN##w, \ - NULL, \ - }; \ - return do_2shift(s, a, fns[a->size], NEGATESHIFT); \ +static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, + bool negateshift) +{ + return do_2shift_vec(s, a, fn, negateshift, NULL); +} + +#define DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, VECFN) \ + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_2shift_vec(s, a, fns[a->size], NEGATESHIFT, VECFN); \ } -DO_2SHIFT(VSHLI, vshli_u, false) +#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \ + DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, NULL) + +static void do_gvec_shri_s(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + /* + * We get here with a negated shift count, and we must handle + * shifts by the element size, which tcg_gen_gvec_sari() does not do. + */ + shift = -shift; + if (shift == (8 << vece)) { + shift--; + } + tcg_gen_gvec_sari(vece, dofs, aofs, shift, oprsz, maxsz); +} + +static void do_gvec_shri_u(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + /* + * We get here with a negated shift count, and we must handle + * shifts by the element size, which tcg_gen_gvec_shri() does not do. + */ + shift = -shift; + if (shift == (8 << vece)) { + tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, 0); + } else { + tcg_gen_gvec_shri(vece, dofs, aofs, shift, oprsz, maxsz); + } +} + +DO_2SHIFT_VEC(VSHLI, vshli_u, false, tcg_gen_gvec_shli) DO_2SHIFT(VQSHLI_S, vqshli_s, false) DO_2SHIFT(VQSHLI_U, vqshli_u, false) DO_2SHIFT(VQSHLUI, vqshlui_s, false) /* These right shifts use a left-shift helper with negated shift count */ -DO_2SHIFT(VSHRI_S, vshli_s, true) -DO_2SHIFT(VSHRI_U, vshli_u, true) +DO_2SHIFT_VEC(VSHRI_S, vshli_s, true, do_gvec_shri_s) +DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u) DO_2SHIFT(VRSHRI_S, vrshli_s, true) DO_2SHIFT(VRSHRI_U, vrshli_u, true) -DO_2SHIFT(VSRI, vsri, false) -DO_2SHIFT(VSLI, vsli, false) +DO_2SHIFT_VEC(VSRI, vsri, false, gen_gvec_sri) +DO_2SHIFT_VEC(VSLI, vsli, false, gen_gvec_sli) -#define DO_VSHLL(INSN, FN) \ +#define DO_2SHIFT_FP(INSN, FN) \ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ { \ - static MVEGenTwoOpShiftFn * const fns[] = { \ - gen_helper_mve_##FN##b, \ - gen_helper_mve_##FN##h, \ - }; \ - return do_2shift(s, a, fns[a->size], false); \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_2shift(s, a, gen_helper_mve_##FN, false); \ } +DO_2SHIFT_FP(VCVT_SH_fixed, vcvt_sh) +DO_2SHIFT_FP(VCVT_UH_fixed, vcvt_uh) +DO_2SHIFT_FP(VCVT_HS_fixed, vcvt_hs) +DO_2SHIFT_FP(VCVT_HU_fixed, vcvt_hu) +DO_2SHIFT_FP(VCVT_SF_fixed, vcvt_sf) +DO_2SHIFT_FP(VCVT_UF_fixed, vcvt_uf) +DO_2SHIFT_FP(VCVT_FS_fixed, vcvt_fs) +DO_2SHIFT_FP(VCVT_FU_fixed, vcvt_fu) + +static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a, + MVEGenTwoOpShiftFn *fn) +{ + TCGv_ptr qda; + TCGv_i32 rm; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qda) || + a->rm == 13 || a->rm == 15 || !fn) { + /* Rm cases are UNPREDICTABLE */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qda = mve_qreg_ptr(a->qda); + rm = load_reg(s, a->rm); + fn(cpu_env, qda, qda, rm); + tcg_temp_free_ptr(qda); + tcg_temp_free_i32(rm); + mve_update_eci(s); + return true; +} + +#define DO_2SHIFT_SCALAR(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_shl_scalar *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_2shift_scalar(s, a, fns[a->size]); \ + } + +DO_2SHIFT_SCALAR(VSHL_S_scalar, vshli_s) +DO_2SHIFT_SCALAR(VSHL_U_scalar, vshli_u) +DO_2SHIFT_SCALAR(VRSHL_S_scalar, vrshli_s) +DO_2SHIFT_SCALAR(VRSHL_U_scalar, vrshli_u) +DO_2SHIFT_SCALAR(VQSHL_S_scalar, vqshli_s) +DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u) +DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s) +DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u) + +#define DO_VSHLL(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + }; \ + return do_2shift_vec(s, a, fns[a->size], false, do_gvec_##FN); \ + } + +/* + * For the VSHLL vector helpers, the vece is the size of the input + * (ie MO_8 or MO_16); the helpers want to work in the output size. + * The shift count can be 0.., inclusive. (0 is VMOVL.) + */ +static void do_gvec_vshllbs(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + unsigned ovece = vece + 1; + unsigned ibits = vece == MO_8 ? 8 : 16; + tcg_gen_gvec_shli(ovece, dofs, aofs, ibits, oprsz, maxsz); + tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); +} + +static void do_gvec_vshllbu(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + unsigned ovece = vece + 1; + tcg_gen_gvec_andi(ovece, dofs, aofs, + ovece == MO_16 ? 0xff : 0xffff, oprsz, maxsz); + tcg_gen_gvec_shli(ovece, dofs, dofs, shift, oprsz, maxsz); +} + +static void do_gvec_vshllts(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + unsigned ovece = vece + 1; + unsigned ibits = vece == MO_8 ? 8 : 16; + if (shift == 0) { + tcg_gen_gvec_sari(ovece, dofs, aofs, ibits, oprsz, maxsz); + } else { + tcg_gen_gvec_andi(ovece, dofs, aofs, + ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz); + tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); + } +} + +static void do_gvec_vshlltu(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + unsigned ovece = vece + 1; + unsigned ibits = vece == MO_8 ? 8 : 16; + if (shift == 0) { + tcg_gen_gvec_shri(ovece, dofs, aofs, ibits, oprsz, maxsz); + } else { + tcg_gen_gvec_andi(ovece, dofs, aofs, + ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz); + tcg_gen_gvec_shri(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); + } +} + DO_VSHLL(VSHLL_BS, vshllbs) DO_VSHLL(VSHLL_BU, vshllbu) DO_VSHLL(VSHLL_TS, vshllts) @@ -1031,3 +1872,439 @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a) mve_update_eci(s); return true; } + +static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn) +{ + TCGv_ptr qd; + TCGv_i32 rn; + + /* + * Vector increment/decrement with wrap and duplicate (VIDUP, VDDUP). + * This fills the vector with elements of successively increasing + * or decreasing values, starting from Rn. + */ + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) { + return false; + } + if (a->size == MO_64) { + /* size 0b11 is another encoding */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qd = mve_qreg_ptr(a->qd); + rn = load_reg(s, a->rn); + fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm)); + store_reg(s, a->rn, rn); + tcg_temp_free_ptr(qd); + mve_update_eci(s); + return true; +} + +static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn) +{ + TCGv_ptr qd; + TCGv_i32 rn, rm; + + /* + * Vector increment/decrement with wrap and duplicate (VIWDUp, VDWDUP) + * This fills the vector with elements of successively increasing + * or decreasing values, starting from Rn. Rm specifies a point where + * the count wraps back around to 0. The updated offset is written back + * to Rn. + */ + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) { + return false; + } + if (!fn || a->rm == 13 || a->rm == 15) { + /* + * size 0b11 is another encoding; Rm == 13 is UNPREDICTABLE; + * Rm == 13 is VIWDUP, VDWDUP. + */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qd = mve_qreg_ptr(a->qd); + rn = load_reg(s, a->rn); + rm = load_reg(s, a->rm); + fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm)); + store_reg(s, a->rn, rn); + tcg_temp_free_ptr(qd); + tcg_temp_free_i32(rm); + mve_update_eci(s); + return true; +} + +static bool trans_VIDUP(DisasContext *s, arg_vidup *a) +{ + static MVEGenVIDUPFn * const fns[] = { + gen_helper_mve_vidupb, + gen_helper_mve_viduph, + gen_helper_mve_vidupw, + NULL, + }; + return do_vidup(s, a, fns[a->size]); +} + +static bool trans_VDDUP(DisasContext *s, arg_vidup *a) +{ + static MVEGenVIDUPFn * const fns[] = { + gen_helper_mve_vidupb, + gen_helper_mve_viduph, + gen_helper_mve_vidupw, + NULL, + }; + /* VDDUP is just like VIDUP but with a negative immediate */ + a->imm = -a->imm; + return do_vidup(s, a, fns[a->size]); +} + +static bool trans_VIWDUP(DisasContext *s, arg_viwdup *a) +{ + static MVEGenVIWDUPFn * const fns[] = { + gen_helper_mve_viwdupb, + gen_helper_mve_viwduph, + gen_helper_mve_viwdupw, + NULL, + }; + return do_viwdup(s, a, fns[a->size]); +} + +static bool trans_VDWDUP(DisasContext *s, arg_viwdup *a) +{ + static MVEGenVIWDUPFn * const fns[] = { + gen_helper_mve_vdwdupb, + gen_helper_mve_vdwduph, + gen_helper_mve_vdwdupw, + NULL, + }; + return do_viwdup(s, a, fns[a->size]); +} + +static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn) +{ + TCGv_ptr qn, qm; + + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) || + !fn) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qn = mve_qreg_ptr(a->qn); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qn, qm); + tcg_temp_free_ptr(qn); + tcg_temp_free_ptr(qm); + if (a->mask) { + /* VPT */ + gen_vpst(s, a->mask); + } + /* This insn updates predication bits */ + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + mve_update_eci(s); + return true; +} + +static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a, + MVEGenScalarCmpFn *fn) +{ + TCGv_ptr qn; + TCGv_i32 rm; + + if (!dc_isar_feature(aa32_mve, s) || !fn || a->rm == 13) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qn = mve_qreg_ptr(a->qn); + if (a->rm == 15) { + /* Encoding Rm=0b1111 means "constant zero" */ + rm = tcg_constant_i32(0); + } else { + rm = load_reg(s, a->rm); + } + fn(cpu_env, qn, rm); + tcg_temp_free_ptr(qn); + tcg_temp_free_i32(rm); + if (a->mask) { + /* VPT */ + gen_vpst(s, a->mask); + } + /* This insn updates predication bits */ + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + mve_update_eci(s); + return true; +} + +#define DO_VCMP(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \ + { \ + static MVEGenCmpFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_vcmp(s, a, fns[a->size]); \ + } \ + static bool trans_##INSN##_scalar(DisasContext *s, \ + arg_vcmp_scalar *a) \ + { \ + static MVEGenScalarCmpFn * const fns[] = { \ + gen_helper_mve_##FN##_scalarb, \ + gen_helper_mve_##FN##_scalarh, \ + gen_helper_mve_##FN##_scalarw, \ + NULL, \ + }; \ + return do_vcmp_scalar(s, a, fns[a->size]); \ + } + +DO_VCMP(VCMPEQ, vcmpeq) +DO_VCMP(VCMPNE, vcmpne) +DO_VCMP(VCMPCS, vcmpcs) +DO_VCMP(VCMPHI, vcmphi) +DO_VCMP(VCMPGE, vcmpge) +DO_VCMP(VCMPLT, vcmplt) +DO_VCMP(VCMPGT, vcmpgt) +DO_VCMP(VCMPLE, vcmple) + +#define DO_VCMP_FP(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \ + { \ + static MVEGenCmpFn * const fns[] = { \ + NULL, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_vcmp(s, a, fns[a->size]); \ + } \ + static bool trans_##INSN##_scalar(DisasContext *s, \ + arg_vcmp_scalar *a) \ + { \ + static MVEGenScalarCmpFn * const fns[] = { \ + NULL, \ + gen_helper_mve_##FN##_scalarh, \ + gen_helper_mve_##FN##_scalars, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_vcmp_scalar(s, a, fns[a->size]); \ + } + +DO_VCMP_FP(VCMPEQ_fp, vfcmpeq) +DO_VCMP_FP(VCMPNE_fp, vfcmpne) +DO_VCMP_FP(VCMPGE_fp, vfcmpge) +DO_VCMP_FP(VCMPLT_fp, vfcmplt) +DO_VCMP_FP(VCMPGT_fp, vfcmpgt) +DO_VCMP_FP(VCMPLE_fp, vfcmple) + +static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn) +{ + /* + * MIN/MAX operations across a vector: compute the min or + * max of the initial value in a general purpose register + * and all the elements in the vector, and store it back + * into the general purpose register. + */ + TCGv_ptr qm; + TCGv_i32 rda; + + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) || + !fn || a->rda == 13 || a->rda == 15) { + /* Rda cases are UNPREDICTABLE */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qm = mve_qreg_ptr(a->qm); + rda = load_reg(s, a->rda); + fn(rda, cpu_env, qm, rda); + store_reg(s, a->rda, rda); + tcg_temp_free_ptr(qm); + mve_update_eci(s); + return true; +} + +#define DO_VMAXV(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vmaxv *a) \ + { \ + static MVEGenVADDVFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_vmaxv(s, a, fns[a->size]); \ + } + +DO_VMAXV(VMAXV_S, vmaxvs) +DO_VMAXV(VMAXV_U, vmaxvu) +DO_VMAXV(VMAXAV, vmaxav) +DO_VMAXV(VMINV_S, vminvs) +DO_VMAXV(VMINV_U, vminvu) +DO_VMAXV(VMINAV, vminav) + +#define DO_VMAXV_FP(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vmaxv *a) \ + { \ + static MVEGenVADDVFn * const fns[] = { \ + NULL, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##s, \ + NULL, \ + }; \ + if (!dc_isar_feature(aa32_mve_fp, s)) { \ + return false; \ + } \ + return do_vmaxv(s, a, fns[a->size]); \ + } + +DO_VMAXV_FP(VMAXNMV, vmaxnmv) +DO_VMAXV_FP(VMINNMV, vminnmv) +DO_VMAXV_FP(VMAXNMAV, vmaxnmav) +DO_VMAXV_FP(VMINNMAV, vminnmav) + +static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn) +{ + /* Absolute difference accumulated across vector */ + TCGv_ptr qn, qm; + TCGv_i32 rda; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qm | a->qn) || + !fn || a->rda == 13 || a->rda == 15) { + /* Rda cases are UNPREDICTABLE */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qm = mve_qreg_ptr(a->qm); + qn = mve_qreg_ptr(a->qn); + rda = load_reg(s, a->rda); + fn(rda, cpu_env, qn, qm, rda); + store_reg(s, a->rda, rda); + tcg_temp_free_ptr(qm); + tcg_temp_free_ptr(qn); + mve_update_eci(s); + return true; +} + +#define DO_VABAV(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vabav *a) \ + { \ + static MVEGenVABAVFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_vabav(s, a, fns[a->size]); \ + } + +DO_VABAV(VABAV_S, vabavs) +DO_VABAV(VABAV_U, vabavu) + +static bool trans_VMOV_to_2gp(DisasContext *s, arg_VMOV_to_2gp *a) +{ + /* + * VMOV two 32-bit vector lanes to two general-purpose registers. + * This insn is not predicated but it is subject to beat-wise + * execution if it is not in an IT block. For us this means + * only that if PSR.ECI says we should not be executing the beat + * corresponding to the lane of the vector register being accessed + * then we should skip perfoming the move, and that we need to do + * the usual check for bad ECI state and advance of ECI state. + * (If PSR.ECI is non-zero then we cannot be in an IT block.) + */ + TCGv_i32 tmp; + int vd; + + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) || + a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15 || + a->rt == a->rt2) { + /* Rt/Rt2 cases are UNPREDICTABLE */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + /* Convert Qreg index to Dreg for read_neon_element32() etc */ + vd = a->qd * 2; + + if (!mve_skip_vmov(s, vd, a->idx, MO_32)) { + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, vd, a->idx, MO_32); + store_reg(s, a->rt, tmp); + } + if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) { + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, vd + 1, a->idx, MO_32); + store_reg(s, a->rt2, tmp); + } + + mve_update_and_store_eci(s); + return true; +} + +static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a) +{ + /* + * VMOV two general-purpose registers to two 32-bit vector lanes. + * This insn is not predicated but it is subject to beat-wise + * execution if it is not in an IT block. For us this means + * only that if PSR.ECI says we should not be executing the beat + * corresponding to the lane of the vector register being accessed + * then we should skip perfoming the move, and that we need to do + * the usual check for bad ECI state and advance of ECI state. + * (If PSR.ECI is non-zero then we cannot be in an IT block.) + */ + TCGv_i32 tmp; + int vd; + + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) || + a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15) { + /* Rt/Rt2 cases are UNPREDICTABLE */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + /* Convert Qreg idx to Dreg for read_neon_element32() etc */ + vd = a->qd * 2; + + if (!mve_skip_vmov(s, vd, a->idx, MO_32)) { + tmp = load_reg(s, a->rt); + write_neon_element32(tmp, vd, a->idx, MO_32); + tcg_temp_free_i32(tmp); + } + if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) { + tmp = load_reg(s, a->rt2); + write_neon_element32(tmp, vd + 1, a->idx, MO_32); + tcg_temp_free_i32(tmp); + } + + mve_update_and_store_eci(s); + return true; +} diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c index c53ab20fa4..4016339d46 100644 --- a/target/arm/translate-neon.c +++ b/target/arm/translate-neon.c @@ -28,12 +28,6 @@ #include "translate.h" #include "translate-a32.h" -static inline int neon_3same_fp_size(DisasContext *s, int x) -{ - /* Convert 0==fp32, 1==fp16 into a MO_* value */ - return MO_32 - x; -} - /* Include the generated Neon decoder */ #include "decode-neon-dp.c.inc" #include "decode-neon-ls.c.inc" @@ -79,7 +73,7 @@ static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop) case MO_UL: tcg_gen_ld32u_i64(var, cpu_env, offset); break; - case MO_Q: + case MO_UQ: tcg_gen_ld_i64(var, cpu_env, offset); break; default: @@ -453,7 +447,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) int mmu_idx = get_mem_index(s); int size = a->size; TCGv_i64 tmp64; - TCGv_i32 addr, tmp; + TCGv_i32 addr; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -519,7 +513,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) tmp64 = tcg_temp_new_i64(); addr = tcg_temp_new_i32(); - tmp = tcg_const_i32(1 << size); load_reg_var(s, addr, a->rn); mop = endian | size | align; @@ -536,7 +529,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) neon_load_element64(tmp64, tt, n, size); gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop); } - tcg_gen_add_i32(addr, addr, tmp); + tcg_gen_addi_i32(addr, addr, 1 << size); /* Subsequent memory operations inherit alignment */ mop &= ~MO_AMASK; @@ -544,7 +537,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) } } tcg_temp_free_i32(addr); - tcg_temp_free_i32(tmp); tcg_temp_free_i64(tmp64); gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8); @@ -592,7 +584,11 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) case 3: return false; case 4: - align = pow2_align(size + 2); + if (size == 2) { + align = pow2_align(3); + } else { + align = pow2_align(size + 2); + } break; default: g_assert_not_reached(); @@ -663,28 +659,31 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) /* Catch the UNDEF cases. This is unavoidably a bit messy. */ switch (nregs) { case 1: + if (a->stride != 1) { + return false; + } if (((a->align & (1 << a->size)) != 0) || (a->size == 2 && (a->align == 1 || a->align == 2))) { return false; } break; - case 3: - if ((a->align & 1) != 0) { - return false; - } - /* fall through */ case 2: if (a->size == 2 && (a->align & 2) != 0) { return false; } break; + case 3: + if (a->align != 0) { + return false; + } + break; case 4: if (a->size == 2 && a->align == 3) { return false; } break; default: - abort(); + g_assert_not_reached(); } if ((vd + a->stride * (nregs - 1)) > 31) { /* @@ -1351,7 +1350,7 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, * To avoid excessive duplication of ops we implement shift * by immediate using the variable shift operations. */ - constimm = tcg_const_i64(dup_const(a->size, a->shift)); + constimm = tcg_constant_i64(dup_const(a->size, a->shift)); for (pass = 0; pass < a->q + 1; pass++) { TCGv_i64 tmp = tcg_temp_new_i64(); @@ -1361,7 +1360,6 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, write_neon_element64(tmp, a->vd, pass, MO_64); tcg_temp_free_i64(tmp); } - tcg_temp_free_i64(constimm); return true; } @@ -1397,7 +1395,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, * To avoid excessive duplication of ops we implement shift * by immediate using the variable shift operations. */ - constimm = tcg_const_i32(dup_const(a->size, a->shift)); + constimm = tcg_constant_i32(dup_const(a->size, a->shift)); tmp = tcg_temp_new_i32(); for (pass = 0; pass < (a->q ? 4 : 2); pass++) { @@ -1406,7 +1404,6 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, write_neon_element32(tmp, a->vd, pass, MO_32); } tcg_temp_free_i32(tmp); - tcg_temp_free_i32(constimm); return true; } @@ -1460,7 +1457,7 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, * This is always a right shift, and the shiftfn is always a * left-shift helper, which thus needs the negated shift count. */ - constimm = tcg_const_i64(-a->shift); + constimm = tcg_constant_i64(-a->shift); rm1 = tcg_temp_new_i64(); rm2 = tcg_temp_new_i64(); rd = tcg_temp_new_i32(); @@ -1480,7 +1477,6 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, tcg_temp_free_i32(rd); tcg_temp_free_i64(rm1); tcg_temp_free_i64(rm2); - tcg_temp_free_i64(constimm); return true; } @@ -1524,7 +1520,7 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, /* size == 2 */ imm = -a->shift; } - constimm = tcg_const_i32(imm); + constimm = tcg_constant_i32(imm); /* Load all inputs first to avoid potential overwrite */ rm1 = tcg_temp_new_i32(); @@ -1549,7 +1545,6 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, shiftfn(rm3, rm3, constimm); shiftfn(rm4, rm4, constimm); - tcg_temp_free_i32(constimm); tcg_gen_concat_i32_i64(rtmp, rm3, rm4); tcg_temp_free_i32(rm4); @@ -1836,7 +1831,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, return false; } - if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) { + if ((a->vd & 1) || (src1_mop == MO_UQ && (a->vn & 1))) { return false; } @@ -1916,7 +1911,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, }; \ int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \ return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \ - SRC1WIDE ? MO_Q : narrow_mop, \ + SRC1WIDE ? MO_UQ : narrow_mop, \ narrow_mop); \ } @@ -2914,7 +2909,7 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a) return true; } - desc = tcg_const_i32((a->vn << 2) | a->len); + desc = tcg_constant_i32((a->vn << 2) | a->len); def = tcg_temp_new_i64(); if (a->op) { read_neon_element64(def, a->vd, 0, MO_64); @@ -2929,7 +2924,6 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a) tcg_temp_free_i64(def); tcg_temp_free_i64(val); - tcg_temp_free_i32(desc); return true; } diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c new file mode 100644 index 0000000000..7b87a9df63 --- /dev/null +++ b/target/arm/translate-sme.c @@ -0,0 +1,373 @@ +/* + * AArch64 SME translation + * + * Copyright (c) 2022 Linaro, Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "tcg/tcg-gvec-desc.h" +#include "translate.h" +#include "exec/helper-gen.h" +#include "translate-a64.h" +#include "fpu/softfloat.h" + + +/* + * Include the generated decoder. + */ + +#include "decode-sme.c.inc" + + +/* + * Resolve tile.size[index] to a host pointer, where tile and index + * are always decoded together, dependent on the element size. + */ +static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, + int tile_index, bool vertical) +{ + int tile = tile_index >> (4 - esz); + int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz); + int pos, len, offset; + TCGv_i32 tmp; + TCGv_ptr addr; + + /* Compute the final index, which is Rs+imm. */ + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs)); + tcg_gen_addi_i32(tmp, tmp, index); + + /* Prepare a power-of-two modulo via extraction of @len bits. */ + len = ctz32(streaming_vec_reg_size(s)) - esz; + + if (vertical) { + /* + * Compute the byte offset of the index within the tile: + * (index % (svl / size)) * size + * = (index % (svl >> esz)) << esz + * Perform the power-of-two modulo via extraction of the low @len bits. + * Perform the multiply by shifting left by @pos bits. + * Perform these operations simultaneously via deposit into zero. + */ + pos = esz; + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); + + /* + * For big-endian, adjust the indexed column byte offset within + * the uint64_t host words that make up env->zarray[]. + */ + if (HOST_BIG_ENDIAN && esz < MO_64) { + tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz)); + } + } else { + /* + * Compute the byte offset of the index within the tile: + * (index % (svl / size)) * (size * sizeof(row)) + * = (index % (svl >> esz)) << (esz + log2(sizeof(row))) + */ + pos = esz + ctz32(sizeof(ARMVectorReg)); + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); + + /* Row slices are always aligned and need no endian adjustment. */ + } + + /* The tile byte offset within env->zarray is the row. */ + offset = tile * sizeof(ARMVectorReg); + + /* Include the byte offset of zarray to make this relative to env. */ + offset += offsetof(CPUARMState, zarray); + tcg_gen_addi_i32(tmp, tmp, offset); + + /* Add the byte offset to env to produce the final pointer. */ + addr = tcg_temp_new_ptr(); + tcg_gen_ext_i32_ptr(addr, tmp); + tcg_temp_free_i32(tmp); + tcg_gen_add_ptr(addr, addr, cpu_env); + + return addr; +} + +static bool trans_ZERO(DisasContext *s, arg_ZERO *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_za_enabled_check(s)) { + gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm), + tcg_constant_i32(streaming_vec_reg_size(s))); + } + return true; +} + +static bool trans_MOVA(DisasContext *s, arg_MOVA *a) +{ + static gen_helper_gvec_4 * const h_fns[5] = { + gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, + gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d, + gen_helper_sve_sel_zpzz_q + }; + static gen_helper_gvec_3 * const cz_fns[5] = { + gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h, + gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d, + gen_helper_sme_mova_cz_q, + }; + static gen_helper_gvec_3 * const zc_fns[5] = { + gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h, + gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d, + gen_helper_sme_mova_zc_q, + }; + + TCGv_ptr t_za, t_zr, t_pg; + TCGv_i32 t_desc; + int svl; + + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (!sme_smza_enabled_check(s)) { + return true; + } + + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_zr = vec_full_reg_ptr(s, a->zr); + t_pg = pred_full_reg_ptr(s, a->pg); + + svl = streaming_vec_reg_size(s); + t_desc = tcg_constant_i32(simd_desc(svl, svl, 0)); + + if (a->v) { + /* Vertical slice -- use sme mova helpers. */ + if (a->to_vec) { + zc_fns[a->esz](t_zr, t_za, t_pg, t_desc); + } else { + cz_fns[a->esz](t_za, t_zr, t_pg, t_desc); + } + } else { + /* Horizontal slice -- reuse sve sel helpers. */ + if (a->to_vec) { + h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc); + } else { + h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); + } + } + + tcg_temp_free_ptr(t_za); + tcg_temp_free_ptr(t_zr); + tcg_temp_free_ptr(t_pg); + + return true; +} + +static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) +{ + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); + + /* + * Indexed by [esz][be][v][mte][st], which is (except for load/store) + * also the order in which the elements appear in the function names, + * and so how we must concatenate the pieces. + */ + +#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F } +#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) } +#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) } +#define FN_END(L, B) { FN_HV(L), FN_HV(B) } + + static GenLdSt1 * const fns[5][2][2][2][2] = { + FN_END(b, b), + FN_END(h_le, h_be), + FN_END(s_le, s_be), + FN_END(d_le, d_be), + FN_END(q_le, q_be), + }; + +#undef FN_LS +#undef FN_MTE +#undef FN_HV +#undef FN_END + + TCGv_ptr t_za, t_pg; + TCGv_i64 addr; + int svl, desc = 0; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (!sme_smza_enabled_check(s)) { + return true; + } + + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_pg = pred_full_reg_ptr(s, a->pg); + addr = tcg_temp_new_i64(); + + tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz); + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); + + if (mte) { + desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); + desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st); + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1); + desc <<= SVE_MTEDESC_SHIFT; + } else { + addr = clean_data_tbi(s, addr); + } + svl = streaming_vec_reg_size(s); + desc = simd_desc(svl, svl, desc); + + fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, + tcg_constant_i32(desc)); + + tcg_temp_free_ptr(t_za); + tcg_temp_free_ptr(t_pg); + tcg_temp_free_i64(addr); + return true; +} + +typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int); + +static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) +{ + int svl = streaming_vec_reg_size(s); + int imm = a->imm; + TCGv_ptr base; + + if (!sme_za_enabled_check(s)) { + return true; + } + + /* ZA[n] equates to ZA0H.B[n]. */ + base = get_tile_rowcol(s, MO_8, a->rv, imm, false); + + fn(s, base, 0, svl, a->rn, imm * svl); + + tcg_temp_free_ptr(base); + return true; +} + +TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) +TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) + +static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, + gen_helper_gvec_4 *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, 0); + TCGv_ptr za, zn, pn, pm; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + + fn(za, zn, pn, pm, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + return true; +} + +TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) +TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) +TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) +TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) + +static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, + gen_helper_gvec_5 *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, a->sub); + TCGv_ptr za, zn, zm, pn, pm; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + zm = vec_full_reg_ptr(s, a->zm); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + + fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + return true; +} + +static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, + gen_helper_gvec_5_ptr *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, a->sub); + TCGv_ptr za, zn, zm, pn, pm, fpst; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + zm = vec_full_reg_ptr(s, a->zm); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + fpst = fpstatus_ptr(FPST_FPCR); + + fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + tcg_temp_free_ptr(fpst); + return true; +} + +TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h) +TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) +TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) + +/* TODO: FEAT_EBF16 */ +TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) + +TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s) +TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s) +TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s) +TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s) + +TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d) +TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d) +TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d) +TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d) diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index bc91a64171..621a2abb22 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -100,133 +100,363 @@ static inline int msz_dtype(DisasContext *s, int msz) * Implement all of the translator functions referenced by the decoder. */ -/* Return the offset info CPUARMState of the predicate vector register Pn. - * Note for this purpose, FFR is P16. - */ -static inline int pred_full_reg_offset(DisasContext *s, int regno) -{ - return offsetof(CPUARMState, vfp.pregs[regno]); -} - -/* Return the byte size of the whole predicate register, VL / 64. */ -static inline int pred_full_reg_size(DisasContext *s) -{ - return s->sve_len >> 3; -} - -/* Round up the size of a register to a size allowed by - * the tcg vector infrastructure. Any operation which uses this - * size may assume that the bits above pred_full_reg_size are zero, - * and must leave them the same way. - * - * Note that this is not needed for the vector registers as they - * are always properly sized for tcg vectors. - */ -static int size_for_gvec(int size) -{ - if (size <= 8) { - return 8; - } else { - return QEMU_ALIGN_UP(size, 16); - } -} - -static int pred_gvec_reg_size(DisasContext *s) -{ - return size_for_gvec(pred_full_reg_size(s)); -} - /* Invoke an out-of-line helper on 2 Zregs. */ -static void gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn, +static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn, int rd, int rn, int data) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vsz, vsz, data, fn); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vsz, vsz, data, fn); + } + return true; +} + +static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, + int rd, int rn, int data, + ARMFPStatusFlavour flavour) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = fpstatus_ptr(flavour); + + tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + status, vsz, vsz, data, fn); + tcg_temp_free_ptr(status); + } + return true; +} + +static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, + arg_rr_esz *a, int data) +{ + return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); } /* Invoke an out-of-line helper on 3 Zregs. */ -static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, +static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, int rd, int rn, int rm, int data) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - vsz, vsz, data, fn); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vsz, vsz, data, fn); + } + return true; +} + +static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn, + arg_rrr_esz *a, int data) +{ + return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data); +} + +/* Invoke an out-of-line helper on 3 Zregs, plus float_status. */ +static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, + int rd, int rn, int rm, + int data, ARMFPStatusFlavour flavour) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = fpstatus_ptr(flavour); + + tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + status, vsz, vsz, data, fn); + + tcg_temp_free_ptr(status); + } + return true; +} + +static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, + arg_rrr_esz *a, int data) +{ + return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); } /* Invoke an out-of-line helper on 4 Zregs. */ -static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, +static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, int rd, int rn, int rm, int ra, int data) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - vec_full_reg_offset(s, ra), - vsz, vsz, data, fn); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, ra), + vsz, vsz, data, fn); + } + return true; +} + +static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, + arg_rrrr_esz *a, int data) +{ + return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); +} + +static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn, + arg_rrxr_esz *a) +{ + return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); +} + +/* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */ +static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + int rd, int rn, int rm, int ra, + int data, TCGv_ptr ptr) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, ra), + ptr, vsz, vsz, data, fn); + } + return true; +} + +static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + int rd, int rn, int rm, int ra, + int data, ARMFPStatusFlavour flavour) +{ + TCGv_ptr status = fpstatus_ptr(flavour); + bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); + tcg_temp_free_ptr(status); + return ret; +} + +/* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */ +static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, + int rd, int rn, int rm, int ra, int pg, + int data, ARMFPStatusFlavour flavour) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = fpstatus_ptr(flavour); + + tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, ra), + pred_full_reg_offset(s, pg), + status, vsz, vsz, data, fn); + + tcg_temp_free_ptr(status); + } + return true; } /* Invoke an out-of-line helper on 2 Zregs and a predicate. */ -static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, +static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, int rd, int rn, int pg, int data) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - pred_full_reg_offset(s, pg), - vsz, vsz, data, fn); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + pred_full_reg_offset(s, pg), + vsz, vsz, data, fn); + } + return true; +} + +static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn, + arg_rpr_esz *a, int data) +{ + return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data); +} + +static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn, + arg_rpri_esz *a) +{ + return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm); +} + +static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, + int rd, int rn, int pg, int data, + ARMFPStatusFlavour flavour) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = fpstatus_ptr(flavour); + + tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + pred_full_reg_offset(s, pg), + status, vsz, vsz, data, fn); + tcg_temp_free_ptr(status); + } + return true; +} + +static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn, + arg_rpr_esz *a, int data, + ARMFPStatusFlavour flavour) +{ + return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour); } /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ -static void gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn, +static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn, int rd, int rn, int rm, int pg, int data) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - pred_full_reg_offset(s, pg), - vsz, vsz, data, fn); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + pred_full_reg_offset(s, pg), + vsz, vsz, data, fn); + } + return true; } -/* Invoke a vector expander on two Zregs. */ -static void gen_gvec_fn_zz(DisasContext *s, GVecGen2Fn *gvec_fn, - int esz, int rd, int rn) +static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn, + arg_rprr_esz *a, int data) { - unsigned vsz = vec_full_reg_size(s); - gvec_fn(esz, vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), vsz, vsz); + return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data); +} + +/* Invoke an out-of-line helper on 3 Zregs and a predicate. */ +static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, + int rd, int rn, int rm, int pg, int data, + ARMFPStatusFlavour flavour) +{ + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = fpstatus_ptr(flavour); + + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + pred_full_reg_offset(s, pg), + status, vsz, vsz, data, fn); + tcg_temp_free_ptr(status); + } + return true; +} + +static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + arg_rprr_esz *a) +{ + return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); +} + +/* Invoke a vector expander on two Zregs and an immediate. */ +static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, + int esz, int rd, int rn, uint64_t imm) +{ + if (gvec_fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(esz, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), imm, vsz, vsz); + } + return true; +} + +static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, + arg_rri_esz *a) +{ + if (a->esz < 0) { + /* Invalid tsz encoding -- see tszimm_esz. */ + return false; + } + return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm); } /* Invoke a vector expander on three Zregs. */ -static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, +static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, int esz, int rd, int rn, int rm) { - unsigned vsz = vec_full_reg_size(s); - gvec_fn(esz, vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), vsz, vsz); + if (gvec_fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(esz, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), vsz, vsz); + } + return true; +} + +static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn, + arg_rrr_esz *a) +{ + return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); } /* Invoke a vector expander on four Zregs. */ -static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, - int esz, int rd, int rn, int rm, int ra) +static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, + arg_rrrr_esz *a) { - unsigned vsz = vec_full_reg_size(s); - gvec_fn(esz, vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - vec_full_reg_offset(s, ra), vsz, vsz); + if (gvec_fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vec_full_reg_offset(s, a->ra), vsz, vsz); + } + return true; } /* Invoke a vector move on two Zregs. */ static bool do_mov_z(DisasContext *s, int rd, int rn) { if (sve_access_check(s)) { - gen_gvec_fn_zz(s, tcg_gen_gvec_mov, MO_8, rd, rn); + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), vsz, vsz); } return true; } @@ -239,13 +469,16 @@ static void do_dupi_z(DisasContext *s, int rd, uint64_t word) } /* Invoke a vector expander on three Pregs. */ -static void gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn, +static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn, int rd, int rn, int rm) { - unsigned psz = pred_gvec_reg_size(s); - gvec_fn(MO_64, pred_full_reg_offset(s, rd), - pred_full_reg_offset(s, rn), - pred_full_reg_offset(s, rm), psz, psz); + if (sve_access_check(s)) { + unsigned psz = pred_gvec_reg_size(s); + gvec_fn(MO_64, pred_full_reg_offset(s, rd), + pred_full_reg_offset(s, rn), + pred_full_reg_offset(s, rm), psz, psz); + } + return true; } /* Invoke a vector move on two Pregs. */ @@ -282,13 +515,12 @@ static void do_predtest(DisasContext *s, int dofs, int gofs, int words) { TCGv_ptr dptr = tcg_temp_new_ptr(); TCGv_ptr gptr = tcg_temp_new_ptr(); - TCGv_i32 t; + TCGv_i32 t = tcg_temp_new_i32(); tcg_gen_addi_ptr(dptr, cpu_env, dofs); tcg_gen_addi_ptr(gptr, cpu_env, gofs); - t = tcg_const_i32(words); - gen_helper_sve_predtest(t, dptr, gptr, t); + gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); tcg_temp_free_ptr(dptr); tcg_temp_free_ptr(gptr); @@ -297,42 +529,26 @@ static void do_predtest(DisasContext *s, int dofs, int gofs, int words) } /* For each element size, the bits within a predicate word that are active. */ -const uint64_t pred_esz_masks[4] = { +const uint64_t pred_esz_masks[5] = { 0xffffffffffffffffull, 0x5555555555555555ull, - 0x1111111111111111ull, 0x0101010101010101ull + 0x1111111111111111ull, 0x0101010101010101ull, + 0x0001000100010001ull, }; +static bool trans_INVALID(DisasContext *s, arg_INVALID *a) +{ + unallocated_encoding(s); + return true; +} + /* *** SVE Logical - Unpredicated Group */ -static bool do_zzz_fn(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *gvec_fn) -{ - if (sve_access_check(s)) { - gen_gvec_fn_zzz(s, gvec_fn, a->esz, a->rd, a->rn, a->rm); - } - return true; -} - -static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_and); -} - -static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_or); -} - -static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_xor); -} - -static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_andc); -} +TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a) +TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a) +TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a) +TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a) static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) { @@ -438,17 +654,6 @@ static bool trans_XAR(DisasContext *s, arg_rrri_esz *a) return true; } -static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra); - } - return true; -} - static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) { tcg_gen_xor_i64(d, n, m); @@ -475,10 +680,7 @@ static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); } -static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_eor3); -} +TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a) static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) { @@ -506,10 +708,7 @@ static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); } -static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_bcax); -} +TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a) static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, uint32_t a, uint32_t oprsz, uint32_t maxsz) @@ -518,10 +717,7 @@ static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz); } -static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_bsl); -} +TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a) static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) { @@ -556,10 +752,7 @@ static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); } -static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_bsl1n); -} +TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a) static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) { @@ -603,10 +796,7 @@ static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); } -static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_bsl2n); -} +TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a) static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) { @@ -635,239 +825,136 @@ static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); } -static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sve2_zzzz_fn(s, a, gen_nbsl); -} +TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a) /* *** SVE Integer Arithmetic - Unpredicated Group */ -static bool trans_ADD_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_add); -} - -static bool trans_SUB_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_sub); -} - -static bool trans_SQADD_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_ssadd); -} - -static bool trans_SQSUB_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_sssub); -} - -static bool trans_UQADD_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_usadd); -} - -static bool trans_UQSUB_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_fn(s, a, tcg_gen_gvec_ussub); -} +TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a) +TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a) +TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a) +TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a) +TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a) +TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a) /* *** SVE Integer Arithmetic - Binary Predicated Group */ -static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0); - } - return true; -} - /* Select active elememnts from Zn and inactive elements from Zm, * storing the result in Zd. */ -static void do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) +static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) { static gen_helper_gvec_4 * const fns[4] = { gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d }; - gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0); + return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0); } -#define DO_ZPZZ(NAME, name) \ -static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4 * const fns[4] = { \ - gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \ - gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \ +#define DO_ZPZZ(NAME, FEAT, name) \ + static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \ + gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \ + gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \ }; \ - return do_zpzz_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \ + name##_zpzz_fns[a->esz], a, 0) -DO_ZPZZ(AND, and) -DO_ZPZZ(EOR, eor) -DO_ZPZZ(ORR, orr) -DO_ZPZZ(BIC, bic) +DO_ZPZZ(AND_zpzz, aa64_sve, sve_and) +DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor) +DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr) +DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic) -DO_ZPZZ(ADD, add) -DO_ZPZZ(SUB, sub) +DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add) +DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub) -DO_ZPZZ(SMAX, smax) -DO_ZPZZ(UMAX, umax) -DO_ZPZZ(SMIN, smin) -DO_ZPZZ(UMIN, umin) -DO_ZPZZ(SABD, sabd) -DO_ZPZZ(UABD, uabd) +DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax) +DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax) +DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin) +DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin) +DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd) +DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd) -DO_ZPZZ(MUL, mul) -DO_ZPZZ(SMULH, smulh) -DO_ZPZZ(UMULH, umulh) +DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul) +DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh) +DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh) -DO_ZPZZ(ASR, asr) -DO_ZPZZ(LSR, lsr) -DO_ZPZZ(LSL, lsl) +DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr) +DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr) +DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl) -static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a) -{ - static gen_helper_gvec_4 * const fns[4] = { - NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d - }; - return do_zpzz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_4 * const sdiv_fns[4] = { + NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d +}; +TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0) -static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a) -{ - static gen_helper_gvec_4 * const fns[4] = { - NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d - }; - return do_zpzz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_4 * const udiv_fns[4] = { + NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d +}; +TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0) -static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a) -{ - if (sve_access_check(s)) { - do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz); - } - return true; -} - -#undef DO_ZPZZ +TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz) /* *** SVE Integer Arithmetic - Unary Predicated Group */ -static bool do_zpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3 *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, 0); - } - return true; -} - -#define DO_ZPZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ -{ \ - static gen_helper_gvec_3 * const fns[4] = { \ - gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ +#define DO_ZPZ(NAME, FEAT, name) \ + static gen_helper_gvec_3 * const name##_fns[4] = { \ + gen_helper_##name##_b, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d, \ }; \ - return do_zpz_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0) -DO_ZPZ(CLS, cls) -DO_ZPZ(CLZ, clz) -DO_ZPZ(CNT_zpz, cnt_zpz) -DO_ZPZ(CNOT, cnot) -DO_ZPZ(NOT_zpz, not_zpz) -DO_ZPZ(ABS, abs) -DO_ZPZ(NEG, neg) +DO_ZPZ(CLS, aa64_sve, sve_cls) +DO_ZPZ(CLZ, aa64_sve, sve_clz) +DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz) +DO_ZPZ(CNOT, aa64_sve, sve_cnot) +DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz) +DO_ZPZ(ABS, aa64_sve, sve_abs) +DO_ZPZ(NEG, aa64_sve, sve_neg) +DO_ZPZ(RBIT, aa64_sve, sve_rbit) -static bool trans_FABS(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_fabs_h, - gen_helper_sve_fabs_s, - gen_helper_sve_fabs_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const fabs_fns[4] = { + NULL, gen_helper_sve_fabs_h, + gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, +}; +TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0) -static bool trans_FNEG(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_fneg_h, - gen_helper_sve_fneg_s, - gen_helper_sve_fneg_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const fneg_fns[4] = { + NULL, gen_helper_sve_fneg_h, + gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, +}; +TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0) -static bool trans_SXTB(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_sxtb_h, - gen_helper_sve_sxtb_s, - gen_helper_sve_sxtb_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const sxtb_fns[4] = { + NULL, gen_helper_sve_sxtb_h, + gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d, +}; +TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0) -static bool trans_UXTB(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_uxtb_h, - gen_helper_sve_uxtb_s, - gen_helper_sve_uxtb_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const uxtb_fns[4] = { + NULL, gen_helper_sve_uxtb_h, + gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d, +}; +TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0) -static bool trans_SXTH(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, NULL, - gen_helper_sve_sxth_s, - gen_helper_sve_sxth_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const sxth_fns[4] = { + NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d +}; +TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0) -static bool trans_UXTH(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, NULL, - gen_helper_sve_uxth_s, - gen_helper_sve_uxth_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const uxth_fns[4] = { + NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d +}; +TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0) -static bool trans_SXTW(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_sxtw_d : NULL); -} - -static bool trans_UXTW(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL); -} - -#undef DO_ZPZ +TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz, + a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0) +TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz, + a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0) /* *** SVE Integer Reduction Group @@ -889,7 +976,7 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, return true; } - desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); temp = tcg_temp_new_i64(); t_zn = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr(); @@ -899,7 +986,6 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, fn(temp, t_zn, t_pg, desc); tcg_temp_free_ptr(t_zn); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(desc); write_fp_dreg(s, a->rd, temp); tcg_temp_free_i64(temp); @@ -907,14 +993,11 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, } #define DO_VPZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ -{ \ - static gen_helper_gvec_reduc * const fns[4] = { \ + static gen_helper_gvec_reduc * const name##_fns[4] = { \ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ - return do_vpz_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz]) DO_VPZ(ORV, orv) DO_VPZ(ANDV, andv) @@ -926,14 +1009,11 @@ DO_VPZ(UMAXV, umaxv) DO_VPZ(SMINV, sminv) DO_VPZ(UMINV, uminv) -static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_reduc * const fns[4] = { - gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, - gen_helper_sve_saddv_s, NULL - }; - return do_vpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_reduc * const saddv_fns[4] = { + gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, + gen_helper_sve_saddv_s, NULL +}; +TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz]) #undef DO_VPZ @@ -952,168 +1032,105 @@ static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg, gen_helper_sve_movz_b, gen_helper_sve_movz_h, gen_helper_sve_movz_s, gen_helper_sve_movz_d, }; - - if (sve_access_check(s)) { - gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert); - } - return true; + return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert); } -static bool do_zpzi_ool(DisasContext *s, arg_rpri_esz *a, - gen_helper_gvec_3 *fn) +static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr, + gen_helper_gvec_3 * const fns[4]) { - if (sve_access_check(s)) { - gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm); - } - return true; -} + int max; -static bool trans_ASR_zpzi(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, - gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, - }; if (a->esz < 0) { /* Invalid tsz encoding -- see tszimm_esz. */ return false; } - /* Shift by element size is architecturally valid. For - arithmetic right-shift, it's the same as by one less. */ - a->imm = MIN(a->imm, (8 << a->esz) - 1); - return do_zpzi_ool(s, a, fns[a->esz]); + + /* + * Shift by element size is architecturally valid. + * For arithmetic right-shift, it's the same as by one less. + * For logical shifts and ASRD, it is a zeroing operation. + */ + max = 8 << a->esz; + if (a->imm >= max) { + if (asr) { + a->imm = max - 1; + } else { + return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); + } + } + return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a); } -static bool trans_LSR_zpzi(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, - gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, - }; - if (a->esz < 0) { - return false; - } - /* Shift by element size is architecturally valid. - For logical shifts, it is a zeroing operation. */ - if (a->imm >= (8 << a->esz)) { - return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); - } else { - return do_zpzi_ool(s, a, fns[a->esz]); - } -} +static gen_helper_gvec_3 * const asr_zpzi_fns[4] = { + gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, + gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, +}; +TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns) -static bool trans_LSL_zpzi(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, - gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, - }; - if (a->esz < 0) { - return false; - } - /* Shift by element size is architecturally valid. - For logical shifts, it is a zeroing operation. */ - if (a->imm >= (8 << a->esz)) { - return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); - } else { - return do_zpzi_ool(s, a, fns[a->esz]); - } -} +static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = { + gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, + gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, +}; +TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns) -static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, - gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, - }; - if (a->esz < 0) { - return false; - } - /* Shift by element size is architecturally valid. For arithmetic - right shift for division, it is a zeroing operation. */ - if (a->imm >= (8 << a->esz)) { - return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); - } else { - return do_zpzi_ool(s, a, fns[a->esz]); - } -} +static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = { + gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, + gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, +}; +TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns) -static bool trans_SQSHL_zpzi(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h, - gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d, - }; - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzi_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const asrd_fns[4] = { + gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, + gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, +}; +TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns) -static bool trans_UQSHL_zpzi(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h, - gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d, - }; - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzi_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = { + gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h, + gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d, +}; +TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, + a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a) -static bool trans_SRSHR(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h, - gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d, - }; - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzi_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = { + gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h, + gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d, +}; +TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, + a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a) -static bool trans_URSHR(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h, - gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d, - }; - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzi_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const srshr_fns[4] = { + gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h, + gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d, +}; +TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, + a->esz < 0 ? NULL : srshr_fns[a->esz], a) -static bool trans_SQSHLU(DisasContext *s, arg_rpri_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h, - gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d, - }; - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzi_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const urshr_fns[4] = { + gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h, + gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d, +}; +TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, + a->esz < 0 ? NULL : urshr_fns[a->esz], a) + +static gen_helper_gvec_3 * const sqshlu_fns[4] = { + gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h, + gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d, +}; +TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi, + a->esz < 0 ? NULL : sqshlu_fns[a->esz], a) /* *** SVE Bitwise Shift - Predicated Group */ #define DO_ZPZW(NAME, name) \ -static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4 * const fns[3] = { \ + static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \ gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ - gen_helper_sve_##name##_zpzw_s, \ + gen_helper_sve_##name##_zpzw_s, NULL \ }; \ - if (a->esz < 0 || a->esz >= 3) { \ - return false; \ - } \ - return do_zpzz_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \ + a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0) DO_ZPZW(ASR, asr) DO_ZPZW(LSR, lsr) @@ -1152,45 +1169,21 @@ static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, return true; } -static bool trans_ASR_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_shift_imm(s, a, true, tcg_gen_gvec_sari); -} - -static bool trans_LSR_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_shift_imm(s, a, false, tcg_gen_gvec_shri); -} - -static bool trans_LSL_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_shift_imm(s, a, false, tcg_gen_gvec_shli); -} - -static bool do_zzw_ool(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0); - } - return true; -} +TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari) +TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri) +TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli) #define DO_ZZW(NAME, name) \ -static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \ -{ \ - static gen_helper_gvec_3 * const fns[4] = { \ + static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \ gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ gen_helper_sve_##name##_zzw_s, NULL \ }; \ - return do_zzw_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \ + name##_zzw_fns[a->esz], a, 0) -DO_ZZW(ASR, asr) -DO_ZZW(LSR, lsr) -DO_ZZW(LSL, lsl) +DO_ZZW(ASR_zzw, asr) +DO_ZZW(LSR_zzw, lsr) +DO_ZZW(LSL_zzw, lsl) #undef DO_ZZW @@ -1213,31 +1206,36 @@ static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, return true; } -#define DO_ZPZZZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ -{ \ - static gen_helper_gvec_5 * const fns[4] = { \ - gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ - }; \ - return do_zpzzz_ool(s, a, fns[a->esz]); \ -} +static gen_helper_gvec_5 * const mla_fns[4] = { + gen_helper_sve_mla_b, gen_helper_sve_mla_h, + gen_helper_sve_mla_s, gen_helper_sve_mla_d, +}; +TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz]) -DO_ZPZZZ(MLA, mla) -DO_ZPZZZ(MLS, mls) - -#undef DO_ZPZZZ +static gen_helper_gvec_5 * const mls_fns[4] = { + gen_helper_sve_mls_b, gen_helper_sve_mls_h, + gen_helper_sve_mls_s, gen_helper_sve_mls_d, +}; +TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz]) /* *** SVE Index Generation Group */ -static void do_index(DisasContext *s, int esz, int rd, +static bool do_index(DisasContext *s, int esz, int rd, TCGv_i64 start, TCGv_i64 incr) { - unsigned vsz = vec_full_reg_size(s); - TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); - TCGv_ptr t_zd = tcg_temp_new_ptr(); + unsigned vsz; + TCGv_i32 desc; + TCGv_ptr t_zd; + + if (!sve_access_check(s)) { + return true; + } + + vsz = vec_full_reg_size(s); + desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); + t_zd = tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd)); if (esz == 3) { @@ -1260,52 +1258,17 @@ static void do_index(DisasContext *s, int esz, int rd, tcg_temp_free_i32(i32); } tcg_temp_free_ptr(t_zd); - tcg_temp_free_i32(desc); -} - -static bool trans_INDEX_ii(DisasContext *s, arg_INDEX_ii *a) -{ - if (sve_access_check(s)) { - TCGv_i64 start = tcg_const_i64(a->imm1); - TCGv_i64 incr = tcg_const_i64(a->imm2); - do_index(s, a->esz, a->rd, start, incr); - tcg_temp_free_i64(start); - tcg_temp_free_i64(incr); - } return true; } -static bool trans_INDEX_ir(DisasContext *s, arg_INDEX_ir *a) -{ - if (sve_access_check(s)) { - TCGv_i64 start = tcg_const_i64(a->imm); - TCGv_i64 incr = cpu_reg(s, a->rm); - do_index(s, a->esz, a->rd, start, incr); - tcg_temp_free_i64(start); - } - return true; -} - -static bool trans_INDEX_ri(DisasContext *s, arg_INDEX_ri *a) -{ - if (sve_access_check(s)) { - TCGv_i64 start = cpu_reg(s, a->rn); - TCGv_i64 incr = tcg_const_i64(a->imm); - do_index(s, a->esz, a->rd, start, incr); - tcg_temp_free_i64(incr); - } - return true; -} - -static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a) -{ - if (sve_access_check(s)) { - TCGv_i64 start = cpu_reg(s, a->rn); - TCGv_i64 incr = cpu_reg(s, a->rm); - do_index(s, a->esz, a->rd, start, incr); - } - return true; -} +TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd, + tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2)) +TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd, + tcg_constant_i64(a->imm), cpu_reg(s, a->rm)) +TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd, + cpu_reg(s, a->rn), tcg_constant_i64(a->imm)) +TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd, + cpu_reg(s, a->rn), cpu_reg(s, a->rm)) /* *** SVE Stack Allocation Group @@ -1313,6 +1276,9 @@ static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a) static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 rd = cpu_reg_sp(s, a->rd); TCGv_i64 rn = cpu_reg_sp(s, a->rn); @@ -1321,8 +1287,24 @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) return true; } +static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); + } + return true; +} + static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 rd = cpu_reg_sp(s, a->rd); TCGv_i64 rn = cpu_reg_sp(s, a->rn); @@ -1331,8 +1313,24 @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) return true; } +static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); + } + return true; +} + static bool trans_RDVL(DisasContext *s, arg_RDVL *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s)); @@ -1340,75 +1338,49 @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a) return true; } +static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); + } + return true; +} + /* *** SVE Compute Vector Address Group */ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) { - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); - } - return true; + return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); } -static bool trans_ADR_p32(DisasContext *s, arg_rrri *a) -{ - return do_adr(s, a, gen_helper_sve_adr_p32); -} - -static bool trans_ADR_p64(DisasContext *s, arg_rrri *a) -{ - return do_adr(s, a, gen_helper_sve_adr_p64); -} - -static bool trans_ADR_s32(DisasContext *s, arg_rrri *a) -{ - return do_adr(s, a, gen_helper_sve_adr_s32); -} - -static bool trans_ADR_u32(DisasContext *s, arg_rrri *a) -{ - return do_adr(s, a, gen_helper_sve_adr_u32); -} +TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) +TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) +TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) +TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) /* *** SVE Integer Misc - Unpredicated Group */ -static bool trans_FEXPA(DisasContext *s, arg_rr_esz *a) -{ - static gen_helper_gvec_2 * const fns[4] = { - NULL, - gen_helper_sve_fexpa_h, - gen_helper_sve_fexpa_s, - gen_helper_sve_fexpa_d, - }; - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0); - } - return true; -} +static gen_helper_gvec_2 * const fexpa_fns[4] = { + NULL, gen_helper_sve_fexpa_h, + gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, +}; +TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, + fexpa_fns[a->esz], a->rd, a->rn, 0) -static bool trans_FTSSEL(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_ftssel_h, - gen_helper_sve_ftssel_s, - gen_helper_sve_ftssel_d, - }; - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0); - } - return true; -} +static gen_helper_gvec_3 * const ftssel_fns[4] = { + NULL, gen_helper_sve_ftssel_h, + gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, +}; +TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, + ftssel_fns[a->esz], a, 0) /* *** SVE Predicate Logical Operations Group @@ -1492,20 +1464,17 @@ static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!a->s) { - if (!sve_access_check(s)) { - return true; - } if (a->rn == a->rm) { if (a->pg == a->rn) { - do_mov_p(s, a->rd, a->rn); - } else { - gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg); + return do_mov_p(s, a->rd, a->rn); } - return true; + return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg); } else if (a->pg == a->rn || a->pg == a->rm) { - gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm); - return true; + return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm); } } return do_pppp_flags(s, a, &op); @@ -1533,11 +1502,11 @@ static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!a->s && a->pg == a->rn) { - if (sve_access_check(s)) { - gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm); - } - return true; + return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm); } return do_pppp_flags(s, a, &op); } @@ -1563,12 +1532,20 @@ static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) .fno = gen_helper_sve_eor_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */ + if (!a->s && a->pg == a->rm) { + return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn); + } return do_pppp_flags(s, a, &op); } static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) { - if (a->s) { + if (a->s || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -1603,6 +1580,9 @@ static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!a->s && a->pg == a->rn && a->rn == a->rm) { return do_mov_p(s, a->rd, a->rn); } @@ -1630,6 +1610,10 @@ static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) .fno = gen_helper_sve_orn_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } return do_pppp_flags(s, a, &op); } @@ -1654,6 +1638,10 @@ static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) .fno = gen_helper_sve_nor_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } return do_pppp_flags(s, a, &op); } @@ -1678,6 +1666,10 @@ static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) .fno = gen_helper_sve_nand_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; + + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } return do_pppp_flags(s, a, &op); } @@ -1687,6 +1679,9 @@ static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) static bool trans_PTEST(DisasContext *s, arg_PTEST *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int nofs = pred_full_reg_offset(s, a->rn); int gofs = pred_full_reg_offset(s, a->pg); @@ -1827,22 +1822,14 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) return true; } -static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a) -{ - return do_predset(s, a->esz, a->rd, a->pat, a->s); -} +TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) -static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a) -{ - /* Note pat == 31 is #all, to set all elements. */ - return do_predset(s, 0, FFR_PRED_NUM, 31, false); -} +/* Note pat == 31 is #all, to set all elements. */ +TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, + do_predset, 0, FFR_PRED_NUM, 31, false) -static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a) -{ - /* Note pat == 32 is #unimp, to set no elements. */ - return do_predset(s, 0, a->rd, 32, false); -} +/* Note pat == 32 is #unimp, to set no elements. */ +TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) { @@ -1853,18 +1840,13 @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) .rd = a->rd, .pg = a->pg, .s = a->s, .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, }; + + s->is_nonstreaming = true; return trans_AND_pppp(s, &alt_a); } -static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a) -{ - return do_mov_p(s, a->rd, FFR_PRED_NUM); -} - -static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a) -{ - return do_mov_p(s, FFR_PRED_NUM, a->rn); -} +TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) +TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, void (*gen_fn)(TCGv_i32, TCGv_ptr, @@ -1884,9 +1866,9 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn)); - t = tcg_const_i32(desc); + t = tcg_temp_new_i32(); - gen_fn(t, t_pd, t_pg, t); + gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); tcg_temp_free_ptr(t_pd); tcg_temp_free_ptr(t_pg); @@ -1895,15 +1877,8 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, return true; } -static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a) -{ - return do_pfirst_pnext(s, a, gen_helper_sve_pfirst); -} - -static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a) -{ - return do_pfirst_pnext(s, a, gen_helper_sve_pnext); -} +TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst) +TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext) /* *** SVE Element Count Group @@ -1916,8 +1891,6 @@ static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a) static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) { int64_t ibound; - TCGv_i64 bound; - TCGCond cond; /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ if (u) { @@ -1928,35 +1901,32 @@ static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) if (d) { tcg_gen_sub_i64(reg, reg, val); ibound = (u ? 0 : INT32_MIN); - cond = TCG_COND_LT; + tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound)); } else { tcg_gen_add_i64(reg, reg, val); ibound = (u ? UINT32_MAX : INT32_MAX); - cond = TCG_COND_GT; + tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound)); } - bound = tcg_const_i64(ibound); - tcg_gen_movcond_i64(cond, reg, reg, bound, bound, reg); - tcg_temp_free_i64(bound); } /* Similarly with 64-bit values. */ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) { TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2; if (u) { if (d) { tcg_gen_sub_i64(t0, reg, val); - tcg_gen_movi_i64(t1, 0); - tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t1, t0); + t2 = tcg_constant_i64(0); + tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0); } else { tcg_gen_add_i64(t0, reg, val); - tcg_gen_movi_i64(t1, -1); - tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t1, t0); + t2 = tcg_constant_i64(-1); + tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0); } } else { + TCGv_i64 t1 = tcg_temp_new_i64(); if (d) { /* Detect signed overflow for subtraction. */ tcg_gen_xor_i64(t0, reg, val); @@ -1966,7 +1936,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) /* Bound the result. */ tcg_gen_movi_i64(reg, INT64_MIN); - t2 = tcg_const_i64(0); + t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1); } else { /* Detect signed overflow for addition. */ @@ -1977,13 +1947,12 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) /* Bound the result. */ tcg_gen_movi_i64(t1, INT64_MAX); - t2 = tcg_const_i64(0); + t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); } - tcg_temp_free_i64(t2); + tcg_temp_free_i64(t1); } tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* Similarly with a vector and a scalar operand. */ @@ -1999,7 +1968,7 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, nptr = tcg_temp_new_ptr(); tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn)); - desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); switch (esz) { case MO_8: @@ -2068,11 +2037,13 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, tcg_temp_free_ptr(dptr); tcg_temp_free_ptr(nptr); - tcg_temp_free_i32(desc); } static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); @@ -2083,6 +2054,9 @@ static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); @@ -2096,6 +2070,9 @@ static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!sve_access_check(s)) { return true; } @@ -2113,15 +2090,16 @@ static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) tcg_gen_ext32s_i64(reg, reg); } } else { - TCGv_i64 t = tcg_const_i64(inc); - do_sat_addsub_32(reg, t, a->u, a->d); - tcg_temp_free_i64(t); + do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d); } return true; } static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!sve_access_check(s)) { return true; } @@ -2132,16 +2110,14 @@ static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) TCGv_i64 reg = cpu_reg(s, a->rd); if (inc != 0) { - TCGv_i64 t = tcg_const_i64(inc); - do_sat_addsub_64(reg, t, a->u, a->d); - tcg_temp_free_i64(t); + do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d); } return true; } static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } @@ -2151,11 +2127,10 @@ static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) if (inc != 0) { if (sve_access_check(s)) { - TCGv_i64 t = tcg_const_i64(a->d ? -inc : inc); tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), - t, fullsz, fullsz); - tcg_temp_free_i64(t); + tcg_constant_i64(a->d ? -inc : inc), + fullsz, fullsz); } } else { do_mov_z(s, a->rd, a->rn); @@ -2165,7 +2140,7 @@ static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } @@ -2175,9 +2150,8 @@ static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) if (inc != 0) { if (sve_access_check(s)) { - TCGv_i64 t = tcg_const_i64(inc); - do_sat_addsub_vec(s, a->esz, a->rd, a->rn, t, a->u, a->d); - tcg_temp_free_i64(t); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, + tcg_constant_i64(inc), a->u, a->d); } } else { do_mov_z(s, a->rd, a->rn); @@ -2197,32 +2171,20 @@ static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) extract32(a->dbm, 6, 6))) { return false; } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - gvec_fn(MO_64, vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), imm, vsz, vsz); - } - return true; + return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm); } -static bool trans_AND_zzi(DisasContext *s, arg_rr_dbm *a) -{ - return do_zz_dbm(s, a, tcg_gen_gvec_andi); -} - -static bool trans_ORR_zzi(DisasContext *s, arg_rr_dbm *a) -{ - return do_zz_dbm(s, a, tcg_gen_gvec_ori); -} - -static bool trans_EOR_zzi(DisasContext *s, arg_rr_dbm *a) -{ - return do_zz_dbm(s, a, tcg_gen_gvec_xori); -} +TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi) +TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori) +TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori) static bool trans_DUPM(DisasContext *s, arg_DUPM *a) { uint64_t imm; + + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), extract32(a->dbm, 0, 6), extract32(a->dbm, 6, 6))) { @@ -2250,7 +2212,7 @@ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, }; unsigned vsz = vec_full_reg_size(s); - TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); TCGv_ptr t_zd = tcg_temp_new_ptr(); TCGv_ptr t_zn = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr(); @@ -2264,33 +2226,28 @@ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, tcg_temp_free_ptr(t_zd); tcg_temp_free_ptr(t_zn); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(desc); } static bool trans_FCPY(DisasContext *s, arg_FCPY *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { /* Decode the VFP immediate. */ uint64_t imm = vfp_expand_imm(a->esz, a->imm); - TCGv_i64 t_imm = tcg_const_i64(imm); - do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); - tcg_temp_free_i64(t_imm); + do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm)); } return true; } static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) { - if (a->esz == 0 && extract32(s->insn, 13, 1)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { - TCGv_i64 t_imm = tcg_const_i64(a->imm); - do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); - tcg_temp_free_i64(t_imm); + do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm)); } return true; } @@ -2302,16 +2259,15 @@ static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, }; - if (a->esz == 0 && extract32(s->insn, 13, 1)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); - TCGv_i64 t_imm = tcg_const_i64(a->imm); tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), pred_full_reg_offset(s, a->pg), - t_imm, vsz, vsz, 0, fns[a->esz]); - tcg_temp_free_i64(t_imm); + tcg_constant_i64(a->imm), + vsz, vsz, 0, fns[a->esz]); } return true; } @@ -2350,18 +2306,8 @@ static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) return true; } -static bool trans_EXT(DisasContext *s, arg_EXT *a) -{ - return do_EXT(s, a->rd, a->rn, a->rm, a->imm); -} - -static bool trans_EXT_sve2(DisasContext *s, arg_rri *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_EXT(s, a->rd, a->rn, (a->rn + 1) % 32, a->imm); -} +TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm) +TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm) /* *** SVE Permute - Unpredicated Group @@ -2369,6 +2315,9 @@ static bool trans_EXT_sve2(DisasContext *s, arg_rri *a) static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd), @@ -2379,6 +2328,9 @@ static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if ((a->imm & 0x1f) == 0) { return false; } @@ -2412,7 +2364,7 @@ static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) gen_helper_sve_insr_s, gen_helper_sve_insr_d, }; unsigned vsz = vec_full_reg_size(s); - TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); TCGv_ptr t_zd = tcg_temp_new_ptr(); TCGv_ptr t_zn = tcg_temp_new_ptr(); @@ -2423,11 +2375,13 @@ static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) tcg_temp_free_ptr(t_zd); tcg_temp_free_ptr(t_zn); - tcg_temp_free_i32(desc); } static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); @@ -2439,70 +2393,39 @@ static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { do_insr_i64(s, a, cpu_reg(s, a->rm)); } return true; } -static bool trans_REV_v(DisasContext *s, arg_rr_esz *a) -{ - static gen_helper_gvec_2 * const fns[4] = { - gen_helper_sve_rev_b, gen_helper_sve_rev_h, - gen_helper_sve_rev_s, gen_helper_sve_rev_d - }; +static gen_helper_gvec_2 * const rev_fns[4] = { + gen_helper_sve_rev_b, gen_helper_sve_rev_h, + gen_helper_sve_rev_s, gen_helper_sve_rev_d +}; +TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0) - if (sve_access_check(s)) { - gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0); - } - return true; -} +static gen_helper_gvec_3 * const sve_tbl_fns[4] = { + gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, + gen_helper_sve_tbl_s, gen_helper_sve_tbl_d +}; +TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0) -static bool trans_TBL(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, - gen_helper_sve_tbl_s, gen_helper_sve_tbl_d - }; +static gen_helper_gvec_4 * const sve2_tbl_fns[4] = { + gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, + gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d +}; +TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz], + a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0) - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0); - } - return true; -} - -static bool trans_TBL_sve2(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_4 * const fns[4] = { - gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, - gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d - }; - - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, - (a->rn + 1) % 32, a->rm, 0); - } - return true; -} - -static bool trans_TBX(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, - gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d - }; - - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0); - } - return true; -} +static gen_helper_gvec_3 * const tbx_fns[4] = { + gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, + gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d +}; +TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0) static bool trans_UNPK(DisasContext *s, arg_UNPK *a) { @@ -2513,7 +2436,7 @@ static bool trans_UNPK(DisasContext *s, arg_UNPK *a) { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, }; - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -2542,7 +2465,6 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, TCGv_ptr t_d = tcg_temp_new_ptr(); TCGv_ptr t_n = tcg_temp_new_ptr(); TCGv_ptr t_m = tcg_temp_new_ptr(); - TCGv_i32 t_desc; uint32_t desc = 0; desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); @@ -2552,14 +2474,12 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm)); - t_desc = tcg_const_i32(desc); - fn(t_d, t_n, t_m, t_desc); + fn(t_d, t_n, t_m, tcg_constant_i32(desc)); tcg_temp_free_ptr(t_d); tcg_temp_free_ptr(t_n); tcg_temp_free_ptr(t_m); - tcg_temp_free_i32(t_desc); return true; } @@ -2573,7 +2493,6 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, unsigned vsz = pred_full_reg_size(s); TCGv_ptr t_d = tcg_temp_new_ptr(); TCGv_ptr t_n = tcg_temp_new_ptr(); - TCGv_i32 t_desc; uint32_t desc = 0; tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); @@ -2582,201 +2501,83 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); - t_desc = tcg_const_i32(desc); - fn(t_d, t_n, t_desc); + fn(t_d, t_n, tcg_constant_i32(desc)); - tcg_temp_free_i32(t_desc); tcg_temp_free_ptr(t_d); tcg_temp_free_ptr(t_n); return true; } -static bool trans_ZIP1_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 0, gen_helper_sve_zip_p); -} +TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p) +TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p) +TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p) +TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p) +TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p) +TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p) -static bool trans_ZIP2_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 1, gen_helper_sve_zip_p); -} - -static bool trans_UZP1_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 0, gen_helper_sve_uzp_p); -} - -static bool trans_UZP2_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 1, gen_helper_sve_uzp_p); -} - -static bool trans_TRN1_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 0, gen_helper_sve_trn_p); -} - -static bool trans_TRN2_p(DisasContext *s, arg_rrr_esz *a) -{ - return do_perm_pred3(s, a, 1, gen_helper_sve_trn_p); -} - -static bool trans_REV_p(DisasContext *s, arg_rr_esz *a) -{ - return do_perm_pred2(s, a, 0, gen_helper_sve_rev_p); -} - -static bool trans_PUNPKLO(DisasContext *s, arg_PUNPKLO *a) -{ - return do_perm_pred2(s, a, 0, gen_helper_sve_punpk_p); -} - -static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a) -{ - return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p); -} +TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p) +TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p) +TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p) /* *** SVE Permute - Interleaving Group */ -static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_zip_b, gen_helper_sve_zip_h, - gen_helper_sve_zip_s, gen_helper_sve_zip_d, - }; +static gen_helper_gvec_3 * const zip_fns[4] = { + gen_helper_sve_zip_b, gen_helper_sve_zip_h, + gen_helper_sve_zip_s, gen_helper_sve_zip_d, +}; +TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz, + zip_fns[a->esz], a, 0) +TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz, + zip_fns[a->esz], a, vec_full_reg_size(s) / 2) - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - unsigned high_ofs = high ? vsz / 2 : 0; - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn) + high_ofs, - vec_full_reg_offset(s, a->rm) + high_ofs, - vsz, vsz, 0, fns[a->esz]); - } - return true; -} - -static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data, - gen_helper_gvec_3 *fn) -{ - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data); - } - return true; -} - -static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zip(s, a, false); -} - -static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zip(s, a, true); -} - -static bool do_zip_q(DisasContext *s, arg_rrr_esz *a, bool high) -{ - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - unsigned high_ofs = high ? QEMU_ALIGN_DOWN(vsz, 32) / 2 : 0; - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn) + high_ofs, - vec_full_reg_offset(s, a->rm) + high_ofs, - vsz, vsz, 0, gen_helper_sve2_zip_q); - } - return true; -} - -static bool trans_ZIP1_q(DisasContext *s, arg_rrr_esz *a) -{ - return do_zip_q(s, a, false); -} - -static bool trans_ZIP2_q(DisasContext *s, arg_rrr_esz *a) -{ - return do_zip_q(s, a, true); -} +TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_zip_q, a, 0) +TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_zip_q, a, + QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) static gen_helper_gvec_3 * const uzp_fns[4] = { gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, }; -static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]); -} +TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz, + uzp_fns[a->esz], a, 0) +TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz, + uzp_fns[a->esz], a, 1 << a->esz) -static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]); -} - -static bool trans_UZP1_q(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - return do_zzz_data_ool(s, a, 0, gen_helper_sve2_uzp_q); -} - -static bool trans_UZP2_q(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - return do_zzz_data_ool(s, a, 16, gen_helper_sve2_uzp_q); -} +TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_uzp_q, a, 0) +TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_uzp_q, a, 16) static gen_helper_gvec_3 * const trn_fns[4] = { gen_helper_sve_trn_b, gen_helper_sve_trn_h, gen_helper_sve_trn_s, gen_helper_sve_trn_d, }; -static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]); -} +TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz, + trn_fns[a->esz], a, 0) +TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz, + trn_fns[a->esz], a, 1 << a->esz) -static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a) -{ - return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]); -} - -static bool trans_TRN1_q(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - return do_zzz_data_ool(s, a, 0, gen_helper_sve2_trn_q); -} - -static bool trans_TRN2_q(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - return do_zzz_data_ool(s, a, 16, gen_helper_sve2_trn_q); -} +TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_trn_q, a, 0) +TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, + gen_helper_sve2_trn_q, a, 16) /* *** SVE Permute Vector - Predicated Group */ -static bool trans_COMPACT(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const compact_fns[4] = { + NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d +}; +TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, + compact_fns[a->esz], a, 0) /* Call the helper that computes the ARM LastActiveElement pseudocode * function, scaled by the element size. This includes the not found @@ -2788,18 +2589,15 @@ static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) * round up, as we do elsewhere, because we need the exact size. */ TCGv_ptr t_p = tcg_temp_new_ptr(); - TCGv_i32 t_desc; unsigned desc = 0; desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg)); - t_desc = tcg_const_i32(desc); - gen_helper_sve_last_active_element(ret, t_p, t_desc); + gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); - tcg_temp_free_i32(t_desc); tcg_temp_free_ptr(t_p); } @@ -2814,11 +2612,9 @@ static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) if (is_power_of_2(vsz)) { tcg_gen_andi_i32(last, last, vsz - 1); } else { - TCGv_i32 max = tcg_const_i32(vsz); - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 max = tcg_constant_i32(vsz); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last); - tcg_temp_free_i32(max); - tcg_temp_free_i32(zero); } } @@ -2830,11 +2626,9 @@ static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) if (is_power_of_2(vsz)) { tcg_gen_andi_i32(last, last, vsz - 1); } else { - TCGv_i32 max = tcg_const_i32(vsz - (1 << esz)); - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz)); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last); - tcg_temp_free_i32(max); - tcg_temp_free_i32(zero); } } @@ -2873,7 +2667,7 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, * The final adjustment for the vector register base * is added via constant offset to the load. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN /* Adjust for element ordering. See vec_reg_offset. */ if (esz < 3) { tcg_gen_xori_i32(last, last, 8 - (1 << esz)); @@ -2936,22 +2730,15 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) return true; } -static bool trans_CLASTA_z(DisasContext *s, arg_rprr_esz *a) -{ - return do_clast_vector(s, a, false); -} - -static bool trans_CLASTB_z(DisasContext *s, arg_rprr_esz *a) -{ - return do_clast_vector(s, a, true); -} +TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false) +TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true) /* Compute CLAST for a scalar. */ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, bool before, TCGv_i64 reg_val) { TCGv_i32 last = tcg_temp_new_i32(); - TCGv_i64 ele, cmp, zero; + TCGv_i64 ele, cmp; find_last_active(s, last, esz, pg); @@ -2971,10 +2758,9 @@ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, ele = load_last_active(s, last, rm, esz); tcg_temp_free_i32(last); - zero = tcg_const_i64(0); - tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, zero, ele, reg_val); + tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), + ele, reg_val); - tcg_temp_free_i64(zero); tcg_temp_free_i64(cmp); tcg_temp_free_i64(ele); } @@ -2994,15 +2780,8 @@ static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) return true; } -static bool trans_CLASTA_v(DisasContext *s, arg_rpr_esz *a) -{ - return do_clast_fp(s, a, false); -} - -static bool trans_CLASTB_v(DisasContext *s, arg_rpr_esz *a) -{ - return do_clast_fp(s, a, true); -} +TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false) +TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true) /* Compute CLAST for a Xreg. */ static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) @@ -3034,15 +2813,8 @@ static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) return true; } -static bool trans_CLASTA_r(DisasContext *s, arg_rpr_esz *a) -{ - return do_clast_general(s, a, false); -} - -static bool trans_CLASTB_r(DisasContext *s, arg_rpr_esz *a) -{ - return do_clast_general(s, a, true); -} +TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false) +TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true) /* Compute LAST for a scalar. */ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, @@ -3074,15 +2846,8 @@ static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) return true; } -static bool trans_LASTA_v(DisasContext *s, arg_rpr_esz *a) -{ - return do_last_fp(s, a, false); -} - -static bool trans_LASTB_v(DisasContext *s, arg_rpr_esz *a) -{ - return do_last_fp(s, a, true); -} +TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false) +TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true) /* Compute LAST for a Xreg. */ static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) @@ -3095,18 +2860,14 @@ static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) return true; } -static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a) -{ - return do_last_general(s, a, false); -} - -static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a) -{ - return do_last_general(s, a, true); -} +TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false) +TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true) static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); } @@ -3115,6 +2876,9 @@ static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int ofs = vec_reg_offset(s, a->rn, 0, a->esz); TCGv_i64 t = load_esz(cpu_env, ofs, a->esz); @@ -3124,64 +2888,27 @@ static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) return true; } -static bool trans_REVB(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - gen_helper_sve_revb_h, - gen_helper_sve_revb_s, - gen_helper_sve_revb_d, - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const revb_fns[4] = { + NULL, gen_helper_sve_revb_h, + gen_helper_sve_revb_s, gen_helper_sve_revb_d, +}; +TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0) -static bool trans_REVH(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - NULL, - NULL, - gen_helper_sve_revh_s, - gen_helper_sve_revh_d, - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const revh_fns[4] = { + NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d, +}; +TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) -static bool trans_REVW(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_revw_d : NULL); -} +TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, + a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) -static bool trans_RBIT(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve_rbit_b, - gen_helper_sve_rbit_h, - gen_helper_sve_rbit_s, - gen_helper_sve_rbit_d, - }; - return do_zpz_ool(s, a, fns[a->esz]); -} +TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) -static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a) -{ - if (sve_access_check(s)) { - gen_gvec_ool_zzzp(s, gen_helper_sve_splice, - a->rd, a->rn, a->rm, a->pg, a->esz); - } - return true; -} +TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, + gen_helper_sve_splice, a, a->esz) -static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzp(s, gen_helper_sve_splice, - a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz); - } - return true; -} +TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice, + a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz) /* *** SVE Integer Compare - Vectors Group @@ -3202,7 +2929,7 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, } vsz = vec_full_reg_size(s); - t = tcg_const_i32(simd_desc(vsz, vsz, 0)); + t = tcg_temp_new_i32(); pd = tcg_temp_new_ptr(); zn = tcg_temp_new_ptr(); zm = tcg_temp_new_ptr(); @@ -3213,7 +2940,7 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); - gen_fn(t, pd, zn, zm, pg, t); + gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); tcg_temp_free_ptr(pd); tcg_temp_free_ptr(zn); @@ -3227,14 +2954,12 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, } #define DO_PPZZ(NAME, name) \ -static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_flags_4 * const fns[4] = { \ - gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ - gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ - }; \ - return do_ppzz_flags(s, a, fns[a->esz]); \ -} + static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \ + gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ + gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ + }; \ + TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \ + a, name##_ppzz_fns[a->esz]) DO_PPZZ(CMPEQ, cmpeq) DO_PPZZ(CMPNE, cmpne) @@ -3246,14 +2971,12 @@ DO_PPZZ(CMPHS, cmphs) #undef DO_PPZZ #define DO_PPZW(NAME, name) \ -static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_flags_4 * const fns[4] = { \ - gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ - gen_helper_sve_##name##_ppzw_s, NULL \ - }; \ - return do_ppzz_flags(s, a, fns[a->esz]); \ -} + static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \ + gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ + gen_helper_sve_##name##_ppzw_s, NULL \ + }; \ + TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \ + a, name##_ppzw_fns[a->esz]) DO_PPZW(CMPEQ, cmpeq) DO_PPZW(CMPNE, cmpne) @@ -3287,7 +3010,7 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, } vsz = vec_full_reg_size(s); - t = tcg_const_i32(simd_desc(vsz, vsz, a->imm)); + t = tcg_temp_new_i32(); pd = tcg_temp_new_ptr(); zn = tcg_temp_new_ptr(); pg = tcg_temp_new_ptr(); @@ -3296,7 +3019,7 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); - gen_fn(t, pd, zn, pg, t); + gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); tcg_temp_free_ptr(pd); tcg_temp_free_ptr(zn); @@ -3309,14 +3032,12 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, } #define DO_PPZI(NAME, name) \ -static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \ -{ \ - static gen_helper_gvec_flags_3 * const fns[4] = { \ + static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \ gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ }; \ - return do_ppzi_flags(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \ + name##_ppzi_fns[a->esz]) DO_PPZI(CMPEQ, cmpeq) DO_PPZI(CMPNE, cmpne) @@ -3349,7 +3070,7 @@ static bool do_brk3(DisasContext *s, arg_rprr_s *a, TCGv_ptr n = tcg_temp_new_ptr(); TCGv_ptr m = tcg_temp_new_ptr(); TCGv_ptr g = tcg_temp_new_ptr(); - TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); + TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); @@ -3357,16 +3078,17 @@ static bool do_brk3(DisasContext *s, arg_rprr_s *a, tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); if (a->s) { - fn_s(t, d, n, m, g, t); + TCGv_i32 t = tcg_temp_new_i32(); + fn_s(t, d, n, m, g, desc); do_pred_flags(t); + tcg_temp_free_i32(t); } else { - fn(d, n, m, g, t); + fn(d, n, m, g, desc); } tcg_temp_free_ptr(d); tcg_temp_free_ptr(n); tcg_temp_free_ptr(m); tcg_temp_free_ptr(g); - tcg_temp_free_i32(t); return true; } @@ -3383,59 +3105,43 @@ static bool do_brk2(DisasContext *s, arg_rpr_s *a, TCGv_ptr d = tcg_temp_new_ptr(); TCGv_ptr n = tcg_temp_new_ptr(); TCGv_ptr g = tcg_temp_new_ptr(); - TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); + TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); if (a->s) { - fn_s(t, d, n, g, t); + TCGv_i32 t = tcg_temp_new_i32(); + fn_s(t, d, n, g, desc); do_pred_flags(t); + tcg_temp_free_i32(t); } else { - fn(d, n, g, t); + fn(d, n, g, desc); } tcg_temp_free_ptr(d); tcg_temp_free_ptr(n); tcg_temp_free_ptr(g); - tcg_temp_free_i32(t); return true; } -static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a) -{ - return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas); -} +TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a, + gen_helper_sve_brkpa, gen_helper_sve_brkpas) +TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a, + gen_helper_sve_brkpb, gen_helper_sve_brkpbs) -static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a) -{ - return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs); -} +TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a, + gen_helper_sve_brka_m, gen_helper_sve_brkas_m) +TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a, + gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m) -static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a) -{ - return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m); -} +TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a, + gen_helper_sve_brka_z, gen_helper_sve_brkas_z) +TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a, + gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z) -static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a) -{ - return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m); -} - -static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a) -{ - return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z); -} - -static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a) -{ - return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z); -} - -static bool trans_BRKN(DisasContext *s, arg_rpr_s *a) -{ - return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns); -} +TRANS_FEAT(BRKN, aa64_sve, do_brk2, a, + gen_helper_sve_brkn, gen_helper_sve_brkns) /* *** SVE Predicate Count Group @@ -3467,24 +3173,24 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) TCGv_ptr t_pn = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr(); unsigned desc = 0; - TCGv_i32 t_desc; desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz); desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); - t_desc = tcg_const_i32(desc); - gen_helper_sve_cntp(val, t_pn, t_pg, t_desc); + gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); tcg_temp_free_ptr(t_pn); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(t_desc); } } static bool trans_CNTP(DisasContext *s, arg_CNTP *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); } @@ -3493,6 +3199,9 @@ static bool trans_CNTP(DisasContext *s, arg_CNTP *a) static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(); @@ -3510,7 +3219,7 @@ static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -3527,6 +3236,9 @@ static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(); @@ -3539,6 +3251,9 @@ static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(); @@ -3551,7 +3266,7 @@ static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -3568,6 +3283,9 @@ static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) static bool trans_CTERM(DisasContext *s, arg_CTERM *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!sve_access_check(s)) { return true; } @@ -3594,7 +3312,7 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a) static bool trans_WHILE(DisasContext *s, arg_WHILE *a) { TCGv_i64 op0, op1, t0, t1, tmax; - TCGv_i32 t2, t3; + TCGv_i32 t2; TCGv_ptr ptr; unsigned vsz = vec_full_reg_size(s); unsigned desc = 0; @@ -3604,7 +3322,9 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) bool eq = a->eq == a->lt; /* The greater-than conditions are all SVE2. */ - if (!a->lt && !dc_isar_feature(aa64_sve2, s)) { + if (a->lt + ? !dc_isar_feature(aa64_sve, s) + : !dc_isar_feature(aa64_sve2, s)) { return false; } if (!sve_access_check(s)) { @@ -3650,7 +3370,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) } } - tmax = tcg_const_i64(vsz >> a->esz); + tmax = tcg_constant_i64(vsz >> a->esz); if (eq) { /* Equality means one more iteration. */ tcg_gen_addi_i64(t0, t0, 1); @@ -3670,7 +3390,6 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) /* Bound to the maximum. */ tcg_gen_umin_i64(t0, t0, tmax); - tcg_temp_free_i64(tmax); /* Set the count to zero if the condition is false. */ tcg_gen_movi_i64(t1, 0); @@ -3687,28 +3406,26 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); - t3 = tcg_const_i32(desc); ptr = tcg_temp_new_ptr(); tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); if (a->lt) { - gen_helper_sve_whilel(t2, ptr, t2, t3); + gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); } else { - gen_helper_sve_whileg(t2, ptr, t2, t3); + gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); } do_pred_flags(t2); tcg_temp_free_ptr(ptr); tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); return true; } static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) { TCGv_i64 op0, op1, diff, t1, tmax; - TCGv_i32 t2, t3; + TCGv_i32 t2; TCGv_ptr ptr; unsigned vsz = vec_full_reg_size(s); unsigned desc = 0; @@ -3723,7 +3440,7 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) op0 = read_cpu_reg(s, a->rn, 1); op1 = read_cpu_reg(s, a->rm, 1); - tmax = tcg_const_i64(vsz); + tmax = tcg_constant_i64(vsz); diff = tcg_temp_new_i64(); if (a->rw) { @@ -3749,7 +3466,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) /* Bound to the maximum. */ tcg_gen_umin_i64(diff, diff, tmax); - tcg_temp_free_i64(tmax); /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(); @@ -3758,17 +3474,15 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); - t3 = tcg_const_i32(desc); ptr = tcg_temp_new_ptr(); tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); - gen_helper_sve_whilel(t2, ptr, t2, t3); + gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); do_pred_flags(t2); tcg_temp_free_ptr(ptr); tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); return true; } @@ -3778,7 +3492,7 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) static bool trans_FDUP(DisasContext *s, arg_FDUP *a) { - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -3795,30 +3509,18 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a) static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) { - if (a->esz == 0 && extract32(s->insn, 13, 1)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); int dofs = vec_full_reg_offset(s, a->rd); - tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm); } return true; } -static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a) -{ - if (a->esz == 0 && extract32(s->insn, 13, 1)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_addi(a->esz, vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); - } - return true; -} +TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a) static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) { @@ -3857,86 +3559,51 @@ static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) .scalar_first = true } }; - if (a->esz == 0 && extract32(s->insn, 13, 1)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); - TCGv_i64 c = tcg_const_i64(a->imm); tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), - vsz, vsz, c, &op[a->esz]); - tcg_temp_free_i64(c); + vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]); } return true; } -static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a) -{ - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_muli(a->esz, vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); - } - return true; -} +TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a) static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) { - if (a->esz == 0 && extract32(s->insn, 13, 1)) { - return false; - } if (sve_access_check(s)) { - TCGv_i64 val = tcg_const_i64(a->imm); - do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d); - tcg_temp_free_i64(val); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, + tcg_constant_i64(a->imm), u, d); } return true; } -static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_zzi_sat(s, a, false, false); -} - -static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_zzi_sat(s, a, true, false); -} - -static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_zzi_sat(s, a, false, true); -} - -static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a) -{ - return do_zzi_sat(s, a, true, true); -} +TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false) +TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false) +TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true) +TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true) static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) { if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); - TCGv_i64 c = tcg_const_i64(a->imm); - tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), - c, vsz, vsz, 0, fn); - tcg_temp_free_i64(c); + tcg_constant_i64(a->imm), vsz, vsz, 0, fn); } return true; } #define DO_ZZI(NAME, name) \ -static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \ -{ \ - static gen_helper_gvec_2i * const fns[4] = { \ + static gen_helper_gvec_2i * const name##i_fns[4] = { \ gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ }; \ - return do_zzi_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz]) DO_ZZI(SMAX, smax) DO_ZZI(UMAX, umax) @@ -3945,204 +3612,130 @@ DO_ZZI(UMIN, umin) #undef DO_ZZI -static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a) -{ - static gen_helper_gvec_4 * const fns[2][2] = { - { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, - { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } - }; - - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0); - } - return true; -} +static gen_helper_gvec_4 * const dot_fns[2][2] = { + { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, + { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } +}; +TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, + dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0) /* * SVE Multiply - Indexed */ -static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a, - gen_helper_gvec_4 *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); - } - return true; -} +TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sdot_idx_b, a) +TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sdot_idx_h, a) +TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_udot_idx_b, a) +TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_udot_idx_h, a) -#define DO_RRXR(NAME, FUNC) \ - static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ - { return do_zzxz_ool(s, a, FUNC); } - -DO_RRXR(trans_SDOT_zzxw_s, gen_helper_gvec_sdot_idx_b) -DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h) -DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b) -DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h) - -static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_i8mm, s)) { - return false; - } - return do_zzxz_ool(s, a, gen_helper_gvec_sudot_idx_b); -} - -static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_i8mm, s)) { - return false; - } - return do_zzxz_ool(s, a, gen_helper_gvec_usdot_idx_b); -} - -#undef DO_RRXR - -static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data, - gen_helper_gvec_3 *fn) -{ - if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - vsz, vsz, data, fn); - } - return true; -} +TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sudot_idx_b, a) +TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_usdot_idx_b, a) #define DO_SVE2_RRX(NAME, FUNC) \ - static bool NAME(DisasContext *s, arg_rrx_esz *a) \ - { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); } + TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ + a->rd, a->rn, a->rm, a->index) -DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h) -DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s) -DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d) +DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h) +DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s) +DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d) -DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) -DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) -DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) +DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) +DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) +DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) -DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) -DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) -DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) +DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) +DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) +DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) #undef DO_SVE2_RRX #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ - static bool NAME(DisasContext *s, arg_rrx_esz *a) \ - { \ - return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \ - (a->index << 1) | TOP, FUNC); \ - } + TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ + a->rd, a->rn, a->rm, (a->index << 1) | TOP) -DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) -DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) -DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) -DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) +DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) +DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) +DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) +DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) -DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) -DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) -DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) -DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) +DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) +DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) +DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) +DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) -DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) -DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) -DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) -DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) +DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) +DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) +DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) +DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) #undef DO_SVE2_RRX_TB -static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra, - int data, gen_helper_gvec_4 *fn) -{ - if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - vec_full_reg_offset(s, ra), - vsz, vsz, data, fn); - } - return true; -} - #define DO_SVE2_RRXR(NAME, FUNC) \ - static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ - { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); } + TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a) -DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h) -DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s) -DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d) +DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h) +DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s) +DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d) -DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h) -DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s) -DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d) +DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h) +DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s) +DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d) -DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) -DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) -DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) +DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) +DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) +DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) -DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) -DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) -DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) +DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) +DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) +DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) #undef DO_SVE2_RRXR #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \ - static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ - { \ - return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \ - (a->index << 1) | TOP, FUNC); \ - } + TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ + a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP) -DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) -DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) -DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) -DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) +DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) +DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) +DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) +DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) -DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) -DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) -DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) -DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) +DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) +DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) +DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) +DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) -DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) -DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) -DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) -DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) +DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) +DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) +DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) +DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) -DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) -DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) -DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) -DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) +DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) +DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) +DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) +DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) -DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) -DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) -DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) -DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) +DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) +DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) +DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) +DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) -DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) -DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) -DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) -DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) +DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) +DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) +DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) +DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) #undef DO_SVE2_RRXR_TB #define DO_SVE2_RRXR_ROT(NAME, FUNC) \ - static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ - { \ - return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \ - (a->index << 2) | a->rot, FUNC); \ - } + TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ + a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot) DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h) DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) @@ -4161,59 +3754,31 @@ DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) { - static gen_helper_gvec_4_ptr * const fns[3] = { + static gen_helper_gvec_4_ptr * const fns[4] = { + NULL, gen_helper_gvec_fmla_idx_h, gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d, }; - - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - status, vsz, vsz, (a->index << 1) | sub, - fns[a->esz - 1]); - tcg_temp_free_ptr(status); - } - return true; + return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, + (a->index << 1) | sub, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); } -static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) -{ - return do_FMLA_zzxz(s, a, false); -} - -static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a) -{ - return do_FMLA_zzxz(s, a, true); -} +TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false) +TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true) /* *** SVE Floating Point Multiply Indexed Group */ -static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a) -{ - static gen_helper_gvec_3_ptr * const fns[3] = { - gen_helper_gvec_fmul_idx_h, - gen_helper_gvec_fmul_idx_s, - gen_helper_gvec_fmul_idx_d, - }; - - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - status, vsz, vsz, a->index, fns[a->esz - 1]); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { + NULL, gen_helper_gvec_fmul_idx_h, + gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, +}; +TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, + fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) /* *** SVE Floating Point Fast Reduction Group @@ -4222,15 +3787,24 @@ static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a) typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); -static void do_reduce(DisasContext *s, arg_rpr_esz *a, +static bool do_reduce(DisasContext *s, arg_rpr_esz *a, gen_helper_fp_reduce *fn) { - unsigned vsz = vec_full_reg_size(s); - unsigned p2vsz = pow2ceil(vsz); - TCGv_i32 t_desc = tcg_const_i32(simd_desc(vsz, vsz, p2vsz)); + unsigned vsz, p2vsz; + TCGv_i32 t_desc; TCGv_ptr t_zn, t_pg, status; TCGv_i64 temp; + if (fn == NULL) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vsz = vec_full_reg_size(s); + p2vsz = pow2ceil(vsz); + t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz)); temp = tcg_temp_new_i64(); t_zn = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr(); @@ -4243,28 +3817,18 @@ static void do_reduce(DisasContext *s, arg_rpr_esz *a, tcg_temp_free_ptr(t_zn); tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(status); - tcg_temp_free_i32(t_desc); write_fp_dreg(s, a->rd, temp); tcg_temp_free_i64(temp); + return true; } #define DO_VPZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ -{ \ - static gen_helper_fp_reduce * const fns[3] = { \ - gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, \ - gen_helper_sve_##name##_d, \ + static gen_helper_fp_reduce * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ - if (a->esz == 0) { \ - return false; \ - } \ - if (sve_access_check(s)) { \ - do_reduce(s, a, fns[a->esz - 1]); \ - } \ - return true; \ -} + TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) DO_VPZ(FADDV, faddv) DO_VPZ(FMINNMV, fminnmv) @@ -4272,86 +3836,54 @@ DO_VPZ(FMAXNMV, fmaxnmv) DO_VPZ(FMINV, fminv) DO_VPZ(FMAXV, fmaxv) +#undef DO_VPZ + /* *** SVE Floating Point Unary Operations - Unpredicated Group */ -static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn) -{ - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); +static gen_helper_gvec_2_ptr * const frecpe_fns[] = { + NULL, gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, +}; +TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0) - tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); -} - -static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a) -{ - static gen_helper_gvec_2_ptr * const fns[3] = { - gen_helper_gvec_frecpe_h, - gen_helper_gvec_frecpe_s, - gen_helper_gvec_frecpe_d, - }; - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - do_zz_fp(s, a, fns[a->esz - 1]); - } - return true; -} - -static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a) -{ - static gen_helper_gvec_2_ptr * const fns[3] = { - gen_helper_gvec_frsqrte_h, - gen_helper_gvec_frsqrte_s, - gen_helper_gvec_frsqrte_d, - }; - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - do_zz_fp(s, a, fns[a->esz - 1]); - } - return true; -} +static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { + NULL, gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, +}; +TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0) /* *** SVE Floating Point Compare with Zero Group */ -static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a, +static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3_ptr *fn) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = + fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); + tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(status); + } + return true; } #define DO_PPZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ -{ \ - static gen_helper_gvec_3_ptr * const fns[3] = { \ - gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, \ - gen_helper_sve_##name##_d, \ + static gen_helper_gvec_3_ptr * const name##_fns[] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ - if (a->esz == 0) { \ - return false; \ - } \ - if (sve_access_check(s)) { \ - do_ppz_fp(s, a, fns[a->esz - 1]); \ - } \ - return true; \ -} + TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz]) DO_PPZ(FCMGE_ppz0, fcmge0) DO_PPZ(FCMGT_ppz0, fcmgt0) @@ -4366,28 +3898,13 @@ DO_PPZ(FCMNE_ppz0, fcmne0) *** SVE floating-point trig multiply-add coefficient */ -static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a) -{ - static gen_helper_gvec_3_ptr * const fns[3] = { - gen_helper_sve_ftmad_h, - gen_helper_sve_ftmad_s, - gen_helper_sve_ftmad_d, - }; - - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - status, vsz, vsz, a->imm, fns[a->esz - 1]); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { + NULL, gen_helper_sve_ftmad_h, + gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, +}; +TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, + ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) /* *** SVE Floating Point Accumulating Reduction Group @@ -4407,9 +3924,10 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) TCGv_i64 t_val; TCGv_i32 t_desc; - if (a->esz == 0) { + if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -4420,11 +3938,10 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - t_desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); - tcg_temp_free_i32(t_desc); tcg_temp_free_ptr(t_fpst); tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(t_rm); @@ -4438,90 +3955,50 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) *** SVE Floating Point Arithmetic - Unpredicated Group */ -static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a, - gen_helper_gvec_3_ptr *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); - } - return true; -} - - #define DO_FP3(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ -{ \ - static gen_helper_gvec_3_ptr * const fns[4] = { \ + static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ NULL, gen_helper_gvec_##name##_h, \ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ }; \ - return do_zzz_fp(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) DO_FP3(FMUL_zzz, fmul) -DO_FP3(FTSMUL, ftsmul) DO_FP3(FRECPS, recps) DO_FP3(FRSQRTS, rsqrts) #undef DO_FP3 +static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { + NULL, gen_helper_gvec_ftsmul_h, + gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d +}; +TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, + ftsmul_fns[a->esz], a, 0) + /* *** SVE Floating Point Arithmetic - Predicated Group */ -static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a, - gen_helper_gvec_4_ptr *fn) -{ - if (fn == NULL) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); - } - return true; -} +#define DO_ZPZZ_FP(NAME, FEAT, name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + NULL, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) -#define DO_FP3(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4_ptr * const fns[4] = { \ - NULL, gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ - }; \ - return do_zpzz_fp(s, a, fns[a->esz]); \ -} - -DO_FP3(FADD_zpzz, fadd) -DO_FP3(FSUB_zpzz, fsub) -DO_FP3(FMUL_zpzz, fmul) -DO_FP3(FMIN_zpzz, fmin) -DO_FP3(FMAX_zpzz, fmax) -DO_FP3(FMINNM_zpzz, fminnum) -DO_FP3(FMAXNM_zpzz, fmaxnum) -DO_FP3(FABD, fabd) -DO_FP3(FSCALE, fscalbn) -DO_FP3(FDIV, fdiv) -DO_FP3(FMULX, fmulx) - -#undef DO_FP3 +DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) +DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) +DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) +DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin) +DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax) +DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) +DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) +DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) +DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) +DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) +DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, TCGv_i32); @@ -4541,45 +4018,42 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); - desc = tcg_const_i32(simd_desc(vsz, vsz, 0)); + desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fn(t_zd, t_zn, t_pg, scalar, status, desc); - tcg_temp_free_i32(desc); tcg_temp_free_ptr(status); tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(t_zn); tcg_temp_free_ptr(t_zd); } -static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, +static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, gen_helper_sve_fp2scalar *fn) { - TCGv_i64 temp = tcg_const_i64(imm); - do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, temp, fn); - tcg_temp_free_i64(temp); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, + tcg_constant_i64(imm), fn); + } + return true; } -#define DO_FP_IMM(NAME, name, const0, const1) \ -static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \ -{ \ - static gen_helper_sve_fp2scalar * const fns[3] = { \ - gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, \ - gen_helper_sve_##name##_d \ - }; \ - static uint64_t const val[3][2] = { \ - { float16_##const0, float16_##const1 }, \ - { float32_##const0, float32_##const1 }, \ - { float64_##const0, float64_##const1 }, \ - }; \ - if (a->esz == 0) { \ - return false; \ - } \ - if (sve_access_check(s)) { \ - do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \ - } \ - return true; \ -} +#define DO_FP_IMM(NAME, name, const0, const1) \ + static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d \ + }; \ + static uint64_t const name##_const[4][2] = { \ + { -1, -1 }, \ + { float16_##const0, float16_##const1 }, \ + { float32_##const0, float32_##const1 }, \ + { float64_##const0, float64_##const1 }, \ + }; \ + TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ + name##_const[a->esz][a->imm], name##_fns[a->esz]) DO_FP_IMM(FADD, fadds, half, one) DO_FP_IMM(FSUB, fsubs, half, one) @@ -4612,14 +4086,11 @@ static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, } #define DO_FPCMP(NAME, name) \ -static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4_ptr * const fns[4] = { \ + static gen_helper_gvec_4_ptr * const name##_fns[4] = { \ NULL, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ - return do_fp_cmp(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME##_ppzz, aa64_sve, do_fp_cmp, a, name##_fns[a->esz]) DO_FPCMP(FCMGE, fcmge) DO_FPCMP(FCMGT, fcmgt) @@ -4631,59 +4102,22 @@ DO_FPCMP(FACGT, facgt) #undef DO_FPCMP -static bool trans_FCADD(DisasContext *s, arg_FCADD *a) -{ - static gen_helper_gvec_4_ptr * const fns[3] = { - gen_helper_sve_fcadd_h, - gen_helper_sve_fcadd_s, - gen_helper_sve_fcadd_d - }; - - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, a->rot, fns[a->esz - 1]); - tcg_temp_free_ptr(status); - } - return true; -} - -static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, - gen_helper_gvec_5_ptr *fn) -{ - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_4_ptr * const fcadd_fns[] = { + NULL, gen_helper_sve_fcadd_h, + gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, +}; +TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], + a->rd, a->rn, a->rm, a->pg, a->rot, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) #define DO_FMLA(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ -{ \ - static gen_helper_gvec_5_ptr * const fns[4] = { \ - NULL, gen_helper_sve_##name##_h, \ - gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ - }; \ - return do_fmla(s, a, fns[a->esz]); \ -} + static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \ + a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) DO_FMLA(FMLA_zpzzz, fmla_zpzzz) DO_FMLA(FMLS_zpzzz, fmls_zpzzz) @@ -4692,368 +4126,180 @@ DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) #undef DO_FMLA -static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a) -{ - static gen_helper_gvec_5_ptr * const fns[4] = { - NULL, - gen_helper_sve_fcmla_zpzzz_h, - gen_helper_sve_fcmla_zpzzz_s, - gen_helper_sve_fcmla_zpzzz_d, - }; +static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { + NULL, gen_helper_sve_fcmla_zpzzz_h, + gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, +}; +TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], + a->rd, a->rn, a->rm, a->ra, a->pg, a->rot, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) - if (a->esz == 0) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, a->rot, fns[a->esz]); - tcg_temp_free_ptr(status); - } - return true; -} - -static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a) -{ - static gen_helper_gvec_4_ptr * const fns[2] = { - gen_helper_gvec_fcmlah_idx, - gen_helper_gvec_fcmlas_idx, - }; - - tcg_debug_assert(a->esz == 1 || a->esz == 2); - tcg_debug_assert(a->rd == a->ra); - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - status, vsz, vsz, - a->index * 4 + a->rot, - fns[a->esz - 1]); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { + NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL +}; +TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz], + a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) /* *** SVE Floating Point Unary Operations Predicated Group */ -static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg, - bool is_fp16, gen_helper_gvec_3_ptr *fn) -{ - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rn), - pred_full_reg_offset(s, pg), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); - } - return true; -} +TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR) +TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR) -static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh); -} +TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, + gen_helper_sve_bfcvt, a, 0, FPST_FPCR) -static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs); -} +TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR) +TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR) +TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR) +TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR) -static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt); -} +TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16) +TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16) +TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16) +TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16) -static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh); -} +TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR) -static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd); -} +TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR) -static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds); -} - -static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd); -} - -static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh); -} - -static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh); -} - -static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs); -} - -static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs); -} - -static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd); -} - -static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd); -} - -static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss); -} - -static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss); -} - -static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd); -} - -static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd); -} - -static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds); -} - -static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds); -} - -static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd); -} - -static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd); -} - -static gen_helper_gvec_3_ptr * const frint_fns[3] = { +static gen_helper_gvec_3_ptr * const frint_fns[] = { + NULL, gen_helper_sve_frint_h, gen_helper_sve_frint_s, gen_helper_sve_frint_d }; +TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz], + a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) -static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, - frint_fns[a->esz - 1]); -} - -static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3_ptr * const fns[3] = { - gen_helper_sve_frintx_h, - gen_helper_sve_frintx_s, - gen_helper_sve_frintx_d - }; - if (a->esz == 0) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); -} +static gen_helper_gvec_3_ptr * const frintx_fns[] = { + NULL, + gen_helper_sve_frintx_h, + gen_helper_sve_frintx_s, + gen_helper_sve_frintx_d +}; +TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], + a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode, gen_helper_gvec_3_ptr *fn) { - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_i32 tmode = tcg_const_i32(mode); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + unsigned vsz; + TCGv_i32 tmode; + TCGv_ptr status; - gen_helper_set_rmode(tmode, tmode, status); - - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, 0, fn); - - gen_helper_set_rmode(tmode, tmode, status); - tcg_temp_free_i32(tmode); - tcg_temp_free_ptr(status); + if (fn == NULL) { + return false; } + if (!sve_access_check(s)) { + return true; + } + + vsz = vec_full_reg_size(s); + tmode = tcg_const_i32(mode); + status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + + gen_helper_set_rmode(tmode, tmode, status); + + tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, fn); + + gen_helper_set_rmode(tmode, tmode, status); + tcg_temp_free_i32(tmode); + tcg_temp_free_ptr(status); return true; } -static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_frint_mode(s, a, float_round_nearest_even, frint_fns[a->esz - 1]); -} +TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, + float_round_nearest_even, frint_fns[a->esz]) +TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, + float_round_up, frint_fns[a->esz]) +TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, + float_round_down, frint_fns[a->esz]) +TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, + float_round_to_zero, frint_fns[a->esz]) +TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, + float_round_ties_away, frint_fns[a->esz]) -static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_frint_mode(s, a, float_round_up, frint_fns[a->esz - 1]); -} +static gen_helper_gvec_3_ptr * const frecpx_fns[] = { + NULL, gen_helper_sve_frecpx_h, + gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, +}; +TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], + a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) -static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_frint_mode(s, a, float_round_down, frint_fns[a->esz - 1]); -} +static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { + NULL, gen_helper_sve_fsqrt_h, + gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d, +}; +TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz], + a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) -static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_frint_mode(s, a, float_round_to_zero, frint_fns[a->esz - 1]); -} +TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16) -static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz == 0) { - return false; - } - return do_frint_mode(s, a, float_round_ties_away, frint_fns[a->esz - 1]); -} +TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_ss, a, 0, FPST_FPCR) +TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_ds, a, 0, FPST_FPCR) -static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3_ptr * const fns[3] = { - gen_helper_sve_frecpx_h, - gen_helper_sve_frecpx_s, - gen_helper_sve_frecpx_d - }; - if (a->esz == 0) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); -} +TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_sd, a, 0, FPST_FPCR) +TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_scvt_dd, a, 0, FPST_FPCR) -static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3_ptr * const fns[3] = { - gen_helper_sve_fsqrt_h, - gen_helper_sve_fsqrt_s, - gen_helper_sve_fsqrt_d - }; - if (a->esz == 0) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); -} +TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16) +TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16) -static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh); -} +TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR) +TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR) +TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR) -static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh); -} - -static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh); -} - -static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss); -} - -static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds); -} - -static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd); -} - -static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd); -} - -static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh); -} - -static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh); -} - -static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh); -} - -static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss); -} - -static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds); -} - -static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd); -} - -static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a) -{ - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd); -} +TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, + gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR) /* *** SVE Memory - 32-bit Gather and Unsized Contiguous Group @@ -5063,7 +4309,8 @@ static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a) * The load should begin at the address Rn + IMM. */ -static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, + int len, int rn, int imm) { int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; @@ -5088,8 +4335,8 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) t0 = tcg_temp_new_i64(); for (i = 0; i < len_align; i += 8) { - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ); - tcg_gen_st_i64(t0, cpu_env, vofs + i); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); + tcg_gen_st_i64(t0, base, vofs + i); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } tcg_temp_free_i64(t0); @@ -5102,14 +4349,20 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); + if (base != cpu_env) { + TCGv_ptr b = tcg_temp_local_new_ptr(); + tcg_gen_mov_ptr(b, base); + base = b; + } + gen_set_label(loop); t0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); tp = tcg_temp_new_ptr(); - tcg_gen_add_ptr(tp, cpu_env, i); + tcg_gen_add_ptr(tp, base, i); tcg_gen_addi_ptr(i, i, 8); tcg_gen_st_i64(t0, tp, vofs); tcg_temp_free_ptr(tp); @@ -5117,6 +4370,11 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); + + if (base != cpu_env) { + tcg_temp_free_ptr(base); + assert(len_remain == 0); + } } /* @@ -5145,13 +4403,14 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) default: g_assert_not_reached(); } - tcg_gen_st_i64(t0, cpu_env, vofs + len_align); + tcg_gen_st_i64(t0, base, vofs + len_align); tcg_temp_free_i64(t0); } } /* Similarly for stores. */ -static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, + int len, int rn, int imm) { int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; @@ -5177,8 +4436,8 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) t0 = tcg_temp_new_i64(); for (i = 0; i < len_align; i += 8) { - tcg_gen_ld_i64(t0, cpu_env, vofs + i); - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ); + tcg_gen_ld_i64(t0, base, vofs + i); + tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } tcg_temp_free_i64(t0); @@ -5191,27 +4450,38 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); + if (base != cpu_env) { + TCGv_ptr b = tcg_temp_local_new_ptr(); + tcg_gen_mov_ptr(b, base); + base = b; + } + gen_set_label(loop); t0 = tcg_temp_new_i64(); tp = tcg_temp_new_ptr(); - tcg_gen_add_ptr(tp, cpu_env, i); + tcg_gen_add_ptr(tp, base, i); tcg_gen_ld_i64(t0, tp, vofs); tcg_gen_addi_ptr(i, i, 8); tcg_temp_free_ptr(tp); - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ); + tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); + + if (base != cpu_env) { + tcg_temp_free_ptr(base); + assert(len_remain == 0); + } } /* Predicate register stores can be any multiple of 2. */ if (len_remain) { t0 = tcg_temp_new_i64(); - tcg_gen_ld_i64(t0, cpu_env, vofs + len_align); + tcg_gen_ld_i64(t0, base, vofs + len_align); switch (len_remain) { case 2: @@ -5237,40 +4507,52 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) static bool trans_LDR_zri(DisasContext *s, arg_rri *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - do_ldr(s, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } static bool trans_LDR_pri(DisasContext *s, arg_rri *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - do_ldr(s, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } static bool trans_STR_zri(DisasContext *s, arg_rri *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - do_str(s, off, size, a->rn, a->imm * size); + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } static bool trans_STR_pri(DisasContext *s, arg_rri *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - do_str(s, off, size, a->rn, a->imm * size); + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } @@ -5284,7 +4566,7 @@ static const MemOp dtype_mop[16] = { MO_UB, MO_UB, MO_UB, MO_UB, MO_SL, MO_UW, MO_UW, MO_UW, MO_SW, MO_SW, MO_UL, MO_UL, - MO_SB, MO_SB, MO_SB, MO_Q + MO_SB, MO_SB, MO_SB, MO_UQ }; #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) @@ -5303,7 +4585,6 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, { unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_pg; - TCGv_i32 t_desc; int desc = 0; /* @@ -5325,14 +4606,12 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, } desc = simd_desc(vsz, vsz, zt | desc); - t_desc = tcg_const_i32(desc); t_pg = tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); - fn(cpu_env, t_pg, addr, t_desc); + fn(cpu_env, t_pg, addr, tcg_constant_i32(desc)); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(t_desc); } /* Indexed by [mte][be][dtype][nreg] */ @@ -5470,7 +4749,7 @@ static void do_ld_zpa(DisasContext *s, int zt, int pg, static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) { - if (a->rm == 31) { + if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -5484,6 +4763,9 @@ static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; @@ -5585,6 +4867,10 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) gen_helper_sve_ldff1dd_be_r_mte } }, }; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); @@ -5683,6 +4969,10 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) gen_helper_sve_ldnf1dd_be_r_mte } }, }; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; @@ -5712,7 +5002,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) * for this load operation. */ TCGv_i64 tmp = tcg_temp_new_i64(); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN poff += 6; #endif tcg_gen_ld16u_i64(tmp, cpu_env, poff); @@ -5740,7 +5030,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) { - if (a->rm == 31) { + if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { return false; } if (sve_access_check(s)) { @@ -5755,6 +5045,9 @@ static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); @@ -5791,7 +5084,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) * for this load operation. */ TCGv_i64 tmp = tcg_temp_new_i64(); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN poff += 4; #endif tcg_gen_ld32u_i64(tmp, cpu_env, poff); @@ -5834,6 +5127,7 @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) if (a->rm == 31) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); @@ -5848,6 +5142,7 @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) if (!dc_isar_feature(aa64_sve_f64mm, s)) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); @@ -5866,6 +5161,9 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) TCGLabel *over; TCGv_i64 temp, clean_addr; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (!sve_access_check(s)) { return true; } @@ -6034,6 +5332,9 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (a->rm == 31 || a->msz > a->esz) { return false; } @@ -6048,6 +5349,9 @@ static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } if (a->msz > a->esz) { return false; } @@ -6075,7 +5379,6 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, TCGv_ptr t_zm = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr(); TCGv_ptr t_zt = tcg_temp_new_ptr(); - TCGv_i32 t_desc; int desc = 0; if (s->mte_active[0]) { @@ -6087,17 +5390,15 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, desc <<= SVE_MTEDESC_SHIFT; } desc = simd_desc(vsz, vsz, desc | scale); - t_desc = tcg_const_i32(desc); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); - fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc); + fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); tcg_temp_free_ptr(t_zt); tcg_temp_free_ptr(t_zm); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(t_desc); } /* Indexed by [mte][be][ff][xs][u][msz]. */ @@ -6434,6 +5735,10 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -6458,11 +5763,14 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) gen_helper_gvec_mem_scatter *fn = NULL; bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; - TCGv_i64 imm; if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { return false; } + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -6480,18 +5788,41 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) * by loading the immediate into the scalar parameter. */ - imm = tcg_const_i64(a->imm << a->msz); - do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, false, fn); - tcg_temp_free_i64(imm); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + tcg_constant_i64(a->imm << a->msz), a->msz, false, fn); return true; } static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) { + gen_helper_gvec_mem_scatter *fn = NULL; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (a->esz < a->msz + !a->u) { + return false; + } if (!dc_isar_feature(aa64_sve2, s)) { return false; } - return trans_LD1_zprz(s, a); + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + + switch (a->esz) { + case MO_32: + fn = gather_load_fn32[mte][be][0][0][a->u][a->msz]; + break; + case MO_64: + fn = gather_load_fn64[mte][be][0][2][a->u][a->msz]; + break; + } + assert(fn != NULL); + + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), a->msz, false, fn); + return true; } /* Indexed by [mte][be][xs][msz]. */ @@ -6595,6 +5926,10 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) if (a->esz < a->msz || (a->msz == 0 && a->scale)) { return false; } + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -6618,11 +5953,14 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) gen_helper_gvec_mem_scatter *fn = NULL; bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; - TCGv_i64 imm; if (a->esz < a->msz) { return false; } + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -6640,18 +5978,42 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) * by loading the immediate into the scalar parameter. */ - imm = tcg_const_i64(a->imm << a->msz); - do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, true, fn); - tcg_temp_free_i64(imm); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + tcg_constant_i64(a->imm << a->msz), a->msz, true, fn); return true; } static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) { + gen_helper_gvec_mem_scatter *fn; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (a->esz < a->msz) { + return false; + } if (!dc_isar_feature(aa64_sve2, s)) { return false; } - return trans_ST1_zprz(s, a); + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + + switch (a->esz) { + case MO_32: + fn = scatter_store_fn32[mte][be][0][a->msz]; + break; + case MO_64: + fn = scatter_store_fn64[mte][be][2][a->msz]; + break; + default: + g_assert_not_reached(); + } + + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), a->msz, true, fn); + return true; } /* @@ -6660,6 +6022,9 @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) static bool trans_PRF(DisasContext *s, arg_PRF *a) { + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } /* Prefetch is a nop within QEMU. */ (void)sve_access_check(s); return true; @@ -6667,7 +6032,7 @@ static bool trans_PRF(DisasContext *s, arg_PRF *a) static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) { - if (a->rm == 31) { + if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { return false; } /* Prefetch is a nop within QEMU. */ @@ -6675,6 +6040,17 @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) return true; } +static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) +{ + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + /* Prefetch is a nop within QEMU. */ + s->is_nonstreaming = true; + (void)sve_access_check(s); + return true; +} + /* * Move Prefix * @@ -6689,294 +6065,213 @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) * In the meantime, just emit the moves. */ -static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a) -{ - return do_mov_z(s, a->rd, a->rn); -} - -static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a) -{ - if (sve_access_check(s)) { - do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz); - } - return true; -} - -static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a) -{ - return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false); -} +TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn) +TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz) +TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false) /* * SVE2 Integer Multiply - Unpredicated */ -static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm); - } - return true; -} +TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a) -static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a, - gen_helper_gvec_3 *fn) -{ - if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0); - } - return true; -} +static gen_helper_gvec_3 * const smulh_zzz_fns[4] = { + gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, + gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, +}; +TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + smulh_zzz_fns[a->esz], a, 0) -static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, - gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, - }; - return do_sve2_zzz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const umulh_zzz_fns[4] = { + gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, + gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, +}; +TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + umulh_zzz_fns[a->esz], a, 0) -static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, - gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, - }; - return do_sve2_zzz_ool(s, a, fns[a->esz]); -} +TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + gen_helper_gvec_pmul_b, a, 0) -static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a) -{ - return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b); -} +static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = { + gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, + gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, +}; +TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + sqdmulh_zzz_fns[a->esz], a, 0) -static bool trans_SQDMULH_zzz(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, - gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, - }; - return do_sve2_zzz_ool(s, a, fns[a->esz]); -} - -static bool trans_SQRDMULH_zzz(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, - gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, - }; - return do_sve2_zzz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = { + gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, + gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, +}; +TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + sqrdmulh_zzz_fns[a->esz], a, 0) /* * SVE2 Integer - Predicated */ -static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a, - gen_helper_gvec_4 *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzz_ool(s, a, fn); -} +static gen_helper_gvec_4 * const sadlp_fns[4] = { + NULL, gen_helper_sve2_sadalp_zpzz_h, + gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d, +}; +TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, + sadlp_fns[a->esz], a, 0) -static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a) -{ - static gen_helper_gvec_4 * const fns[3] = { - gen_helper_sve2_sadalp_zpzz_h, - gen_helper_sve2_sadalp_zpzz_s, - gen_helper_sve2_sadalp_zpzz_d, - }; - if (a->esz == 0) { - return false; - } - return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]); -} - -static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a) -{ - static gen_helper_gvec_4 * const fns[3] = { - gen_helper_sve2_uadalp_zpzz_h, - gen_helper_sve2_uadalp_zpzz_s, - gen_helper_sve2_uadalp_zpzz_d, - }; - if (a->esz == 0) { - return false; - } - return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]); -} +static gen_helper_gvec_4 * const uadlp_fns[4] = { + NULL, gen_helper_sve2_uadalp_zpzz_h, + gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d, +}; +TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, + uadlp_fns[a->esz], a, 0) /* * SVE2 integer unary operations (predicated) */ -static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a, - gen_helper_gvec_3 *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpz_ool(s, a, fn); -} +TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz, + a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0) -static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz != 2) { - return false; - } - return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s); -} +TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz, + a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0) -static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a) -{ - if (a->esz != 2) { - return false; - } - return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s); -} +static gen_helper_gvec_3 * const sqabs_fns[4] = { + gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, + gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, +}; +TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0) -static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, - gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, - }; - return do_sve2_zpz_ool(s, a, fns[a->esz]); -} +static gen_helper_gvec_3 * const sqneg_fns[4] = { + gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, + gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, +}; +TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0) -static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, - gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, - }; - return do_sve2_zpz_ool(s, a, fns[a->esz]); -} +DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl) +DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl) +DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl) -#define DO_SVE2_ZPZZ(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4 * const fns[4] = { \ - gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \ - gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \ - }; \ - return do_sve2_zpzz_ool(s, a, fns[a->esz]); \ -} +DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl) +DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl) +DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl) -DO_SVE2_ZPZZ(SQSHL, sqshl) -DO_SVE2_ZPZZ(SQRSHL, sqrshl) -DO_SVE2_ZPZZ(SRSHL, srshl) +DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd) +DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd) +DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub) -DO_SVE2_ZPZZ(UQSHL, uqshl) -DO_SVE2_ZPZZ(UQRSHL, uqrshl) -DO_SVE2_ZPZZ(URSHL, urshl) +DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd) +DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd) +DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub) -DO_SVE2_ZPZZ(SHADD, shadd) -DO_SVE2_ZPZZ(SRHADD, srhadd) -DO_SVE2_ZPZZ(SHSUB, shsub) +DO_ZPZZ(ADDP, aa64_sve2, sve2_addp) +DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp) +DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp) +DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp) +DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp) -DO_SVE2_ZPZZ(UHADD, uhadd) -DO_SVE2_ZPZZ(URHADD, urhadd) -DO_SVE2_ZPZZ(UHSUB, uhsub) - -DO_SVE2_ZPZZ(ADDP, addp) -DO_SVE2_ZPZZ(SMAXP, smaxp) -DO_SVE2_ZPZZ(UMAXP, umaxp) -DO_SVE2_ZPZZ(SMINP, sminp) -DO_SVE2_ZPZZ(UMINP, uminp) - -DO_SVE2_ZPZZ(SQADD_zpzz, sqadd) -DO_SVE2_ZPZZ(UQADD_zpzz, uqadd) -DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub) -DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub) -DO_SVE2_ZPZZ(SUQADD, suqadd) -DO_SVE2_ZPZZ(USQADD, usqadd) +DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd) +DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd) +DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub) +DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub) +DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd) +DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd) /* * SVE2 Widening Integer Arithmetic */ -static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a, - gen_helper_gvec_3 *fn, int data) -{ - if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vsz, vsz, data, fn); - } - return true; -} +static gen_helper_gvec_3 * const saddl_fns[4] = { + NULL, gen_helper_sve2_saddl_h, + gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d, +}; +TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, + saddl_fns[a->esz], a, 0) +TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, + saddl_fns[a->esz], a, 3) +TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz, + saddl_fns[a->esz], a, 2) -#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \ -static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ -{ \ - static gen_helper_gvec_3 * const fns[4] = { \ - NULL, gen_helper_sve2_##name##_h, \ - gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ - }; \ - return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \ -} +static gen_helper_gvec_3 * const ssubl_fns[4] = { + NULL, gen_helper_sve2_ssubl_h, + gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d, +}; +TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, + ssubl_fns[a->esz], a, 0) +TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, + ssubl_fns[a->esz], a, 3) +TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz, + ssubl_fns[a->esz], a, 2) +TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz, + ssubl_fns[a->esz], a, 1) -DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false) -DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false) -DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false) +static gen_helper_gvec_3 * const sabdl_fns[4] = { + NULL, gen_helper_sve2_sabdl_h, + gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d, +}; +TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, + sabdl_fns[a->esz], a, 0) +TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, + sabdl_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false) -DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false) -DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false) +static gen_helper_gvec_3 * const uaddl_fns[4] = { + NULL, gen_helper_sve2_uaddl_h, + gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d, +}; +TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, + uaddl_fns[a->esz], a, 0) +TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, + uaddl_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true) -DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true) -DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true) +static gen_helper_gvec_3 * const usubl_fns[4] = { + NULL, gen_helper_sve2_usubl_h, + gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d, +}; +TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, + usubl_fns[a->esz], a, 0) +TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, + usubl_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true) -DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true) -DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true) +static gen_helper_gvec_3 * const uabdl_fns[4] = { + NULL, gen_helper_sve2_uabdl_h, + gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d, +}; +TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, + uabdl_fns[a->esz], a, 0) +TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, + uabdl_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true) -DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true) -DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false) +static gen_helper_gvec_3 * const sqdmull_fns[4] = { + NULL, gen_helper_sve2_sqdmull_zzz_h, + gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d, +}; +TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + sqdmull_fns[a->esz], a, 0) +TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + sqdmull_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false) -DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true) +static gen_helper_gvec_3 * const smull_fns[4] = { + NULL, gen_helper_sve2_smull_zzz_h, + gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d, +}; +TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + smull_fns[a->esz], a, 0) +TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + smull_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false) -DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true) +static gen_helper_gvec_3 * const umull_fns[4] = { + NULL, gen_helper_sve2_umull_zzz_h, + gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d, +}; +TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + umull_fns[a->esz], a, 0) +TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, + umull_fns[a->esz], a, 3) -DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false) -DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true) - -static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, - gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, - }; - return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1); -} - -static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a) -{ - return do_eor_tb(s, a, false); -} - -static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a) -{ - return do_eor_tb(s, a, true); -} +static gen_helper_gvec_3 * const eoril_fns[4] = { + gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, + gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, +}; +TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2) +TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1) static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) { @@ -6984,41 +6279,48 @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, NULL, gen_helper_sve2_pmull_d, }; - if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) { + + if (a->esz == 0) { + if (!dc_isar_feature(aa64_sve2_pmull128, s)) { + return false; + } + s->is_nonstreaming = true; + } else if (!dc_isar_feature(aa64_sve, s)) { return false; } - return do_sve2_zzw_ool(s, a, fns[a->esz], sel); + return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); } -static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a) -{ - return do_trans_pmull(s, a, false); -} +TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false) +TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true) -static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a) -{ - return do_trans_pmull(s, a, true); -} +static gen_helper_gvec_3 * const saddw_fns[4] = { + NULL, gen_helper_sve2_saddw_h, + gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d, +}; +TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0) +TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1) -#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \ -static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ -{ \ - static gen_helper_gvec_3 * const fns[4] = { \ - NULL, gen_helper_sve2_##name##_h, \ - gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ - }; \ - return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \ -} +static gen_helper_gvec_3 * const ssubw_fns[4] = { + NULL, gen_helper_sve2_ssubw_h, + gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d, +}; +TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0) +TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1) -DO_SVE2_ZZZ_WTB(SADDWB, saddw, false) -DO_SVE2_ZZZ_WTB(SADDWT, saddw, true) -DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false) -DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true) +static gen_helper_gvec_3 * const uaddw_fns[4] = { + NULL, gen_helper_sve2_uaddw_h, + gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d, +}; +TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0) +TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1) -DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false) -DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true) -DO_SVE2_ZZZ_WTB(USUBWB, usubw, false) -DO_SVE2_ZZZ_WTB(USUBWT, usubw, true) +static gen_helper_gvec_3 * const usubw_fns[4] = { + NULL, gen_helper_sve2_usubw_h, + gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d, +}; +TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0) +TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1) static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) { @@ -7107,46 +6409,11 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) } } -static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a, - bool sel, bool uns) +static bool do_shll_tb(DisasContext *s, arg_rri_esz *a, + const GVecGen2i ops[3], bool sel) { - static const TCGOpcode sshll_list[] = { - INDEX_op_shli_vec, INDEX_op_sari_vec, 0 - }; - static const TCGOpcode ushll_list[] = { - INDEX_op_shli_vec, INDEX_op_shri_vec, 0 - }; - static const GVecGen2i ops[2][3] = { - { { .fniv = gen_sshll_vec, - .opt_opc = sshll_list, - .fno = gen_helper_sve2_sshll_h, - .vece = MO_16 }, - { .fniv = gen_sshll_vec, - .opt_opc = sshll_list, - .fno = gen_helper_sve2_sshll_s, - .vece = MO_32 }, - { .fniv = gen_sshll_vec, - .opt_opc = sshll_list, - .fno = gen_helper_sve2_sshll_d, - .vece = MO_64 } }, - { { .fni8 = gen_ushll16_i64, - .fniv = gen_ushll_vec, - .opt_opc = ushll_list, - .fno = gen_helper_sve2_ushll_h, - .vece = MO_16 }, - { .fni8 = gen_ushll32_i64, - .fniv = gen_ushll_vec, - .opt_opc = ushll_list, - .fno = gen_helper_sve2_ushll_s, - .vece = MO_32 }, - { .fni8 = gen_ushll64_i64, - .fniv = gen_ushll_vec, - .opt_opc = ushll_list, - .fno = gen_helper_sve2_ushll_d, - .vece = MO_64 } }, - }; - if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) { + if (a->esz < 0 || a->esz > 2) { return false; } if (sve_access_check(s)) { @@ -7154,140 +6421,106 @@ static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a, tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vsz, vsz, (a->imm << 1) | sel, - &ops[uns][a->esz]); + &ops[a->esz]); } return true; } -static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_shll_tb(s, a, false, false); -} +static const TCGOpcode sshll_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, 0 +}; +static const GVecGen2i sshll_ops[3] = { + { .fniv = gen_sshll_vec, + .opt_opc = sshll_list, + .fno = gen_helper_sve2_sshll_h, + .vece = MO_16 }, + { .fniv = gen_sshll_vec, + .opt_opc = sshll_list, + .fno = gen_helper_sve2_sshll_s, + .vece = MO_32 }, + { .fniv = gen_sshll_vec, + .opt_opc = sshll_list, + .fno = gen_helper_sve2_sshll_d, + .vece = MO_64 } +}; +TRANS_FEAT(SSHLLB, aa64_sve2, do_shll_tb, a, sshll_ops, false) +TRANS_FEAT(SSHLLT, aa64_sve2, do_shll_tb, a, sshll_ops, true) -static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_shll_tb(s, a, true, false); -} +static const TCGOpcode ushll_list[] = { + INDEX_op_shli_vec, INDEX_op_shri_vec, 0 +}; +static const GVecGen2i ushll_ops[3] = { + { .fni8 = gen_ushll16_i64, + .fniv = gen_ushll_vec, + .opt_opc = ushll_list, + .fno = gen_helper_sve2_ushll_h, + .vece = MO_16 }, + { .fni8 = gen_ushll32_i64, + .fniv = gen_ushll_vec, + .opt_opc = ushll_list, + .fno = gen_helper_sve2_ushll_s, + .vece = MO_32 }, + { .fni8 = gen_ushll64_i64, + .fniv = gen_ushll_vec, + .opt_opc = ushll_list, + .fno = gen_helper_sve2_ushll_d, + .vece = MO_64 }, +}; +TRANS_FEAT(USHLLB, aa64_sve2, do_shll_tb, a, ushll_ops, false) +TRANS_FEAT(USHLLT, aa64_sve2, do_shll_tb, a, ushll_ops, true) -static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_shll_tb(s, a, false, true); -} +static gen_helper_gvec_3 * const bext_fns[4] = { + gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, + gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, +}; +TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bext_fns[a->esz], a, 0) -static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_shll_tb(s, a, true, true); -} +static gen_helper_gvec_3 * const bdep_fns[4] = { + gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, + gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, +}; +TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bdep_fns[a->esz], a, 0) -static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, - gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, - }; - if (!dc_isar_feature(aa64_sve2_bitperm, s)) { - return false; - } - return do_sve2_zzw_ool(s, a, fns[a->esz], 0); -} +static gen_helper_gvec_3 * const bgrp_fns[4] = { + gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, + gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, +}; +TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bgrp_fns[a->esz], a, 0) -static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, - gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, - }; - if (!dc_isar_feature(aa64_sve2_bitperm, s)) { - return false; - } - return do_sve2_zzw_ool(s, a, fns[a->esz], 0); -} +static gen_helper_gvec_3 * const cadd_fns[4] = { + gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, + gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d, +}; +TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, + cadd_fns[a->esz], a, 0) +TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, + cadd_fns[a->esz], a, 1) -static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a) -{ - static gen_helper_gvec_3 * const fns[4] = { - gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, - gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, - }; - if (!dc_isar_feature(aa64_sve2_bitperm, s)) { - return false; - } - return do_sve2_zzw_ool(s, a, fns[a->esz], 0); -} +static gen_helper_gvec_3 * const sqcadd_fns[4] = { + gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, + gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d, +}; +TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, + sqcadd_fns[a->esz], a, 0) +TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, + sqcadd_fns[a->esz], a, 1) -static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot) -{ - static gen_helper_gvec_3 * const fns[2][4] = { - { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, - gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d }, - { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, - gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d }, - }; - return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot); -} +static gen_helper_gvec_4 * const sabal_fns[4] = { + NULL, gen_helper_sve2_sabal_h, + gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d, +}; +TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0) +TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1) -static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a) -{ - return do_cadd(s, a, false, false); -} - -static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a) -{ - return do_cadd(s, a, false, true); -} - -static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a) -{ - return do_cadd(s, a, true, false); -} - -static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a) -{ - return do_cadd(s, a, true, true); -} - -static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a, - gen_helper_gvec_4 *fn, int data) -{ - if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); - } - return true; -} - -static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel) -{ - static gen_helper_gvec_4 * const fns[2][4] = { - { NULL, gen_helper_sve2_sabal_h, - gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d }, - { NULL, gen_helper_sve2_uabal_h, - gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d }, - }; - return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel); -} - -static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a) -{ - return do_abal(s, a, false, false); -} - -static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a) -{ - return do_abal(s, a, false, true); -} - -static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a) -{ - return do_abal(s, a, true, false); -} - -static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a) -{ - return do_abal(s, a, true, true); -} +static gen_helper_gvec_4 * const uabal_fns[4] = { + NULL, gen_helper_sve2_uabal_h, + gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d, +}; +TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0) +TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1) static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) { @@ -7299,89 +6532,26 @@ static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) * Note that in this case the ESZ field encodes both size and sign. * Split out 'subtract' into bit 1 of the data field for the helper. */ - return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel); + return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel); } -static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a) -{ - return do_adcl(s, a, false); -} +TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false) +TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true) -static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a) -{ - return do_adcl(s, a, true); -} +TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a) +TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a) +TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a) +TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a) +TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a) +TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a) -static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn) -{ - if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - unsigned rd_ofs = vec_full_reg_offset(s, a->rd); - unsigned rn_ofs = vec_full_reg_offset(s, a->rn); - fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz); - } - return true; -} +TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a) +TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a) -static bool trans_SSRA(DisasContext *s, arg_rri_esz *a) +static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a, + const GVecGen2 ops[3]) { - return do_sve2_fn2i(s, a, gen_gvec_ssra); -} - -static bool trans_USRA(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_fn2i(s, a, gen_gvec_usra); -} - -static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_fn2i(s, a, gen_gvec_srsra); -} - -static bool trans_URSRA(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_fn2i(s, a, gen_gvec_ursra); -} - -static bool trans_SRI(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_fn2i(s, a, gen_gvec_sri); -} - -static bool trans_SLI(DisasContext *s, arg_rri_esz *a) -{ - return do_sve2_fn2i(s, a, gen_gvec_sli); -} - -static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); - } - return true; -} - -static bool trans_SABA(DisasContext *s, arg_rrr_esz *a) -{ - return do_sve2_fn_zzz(s, a, gen_gvec_saba); -} - -static bool trans_UABA(DisasContext *s, arg_rrr_esz *a) -{ - return do_sve2_fn_zzz(s, a, gen_gvec_uaba); -} - -static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a, - const GVecGen2 ops[3]) -{ - if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 || - !dc_isar_feature(aa64_sve2, s)) { + if (a->esz < 0 || a->esz > MO_32 || a->imm != 0) { return false; } if (sve_access_check(s)) { @@ -7414,24 +6584,21 @@ static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_sqxtnb_vec, - .opt_opc = sqxtn_list, - .fno = gen_helper_sve2_sqxtnb_h, - .vece = MO_16 }, - { .fniv = gen_sqxtnb_vec, - .opt_opc = sqxtn_list, - .fno = gen_helper_sve2_sqxtnb_s, - .vece = MO_32 }, - { .fniv = gen_sqxtnb_vec, - .opt_opc = sqxtn_list, - .fno = gen_helper_sve2_sqxtnb_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 sqxtnb_ops[3] = { + { .fniv = gen_sqxtnb_vec, + .opt_opc = sqxtn_list, + .fno = gen_helper_sve2_sqxtnb_h, + .vece = MO_16 }, + { .fniv = gen_sqxtnb_vec, + .opt_opc = sqxtn_list, + .fno = gen_helper_sve2_sqxtnb_s, + .vece = MO_32 }, + { .fniv = gen_sqxtnb_vec, + .opt_opc = sqxtn_list, + .fno = gen_helper_sve2_sqxtnb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops) static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { @@ -7451,27 +6618,24 @@ static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_sqxtnt_vec, - .opt_opc = sqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtnt_h, - .vece = MO_16 }, - { .fniv = gen_sqxtnt_vec, - .opt_opc = sqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtnt_s, - .vece = MO_32 }, - { .fniv = gen_sqxtnt_vec, - .opt_opc = sqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtnt_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 sqxtnt_ops[3] = { + { .fniv = gen_sqxtnt_vec, + .opt_opc = sqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtnt_h, + .vece = MO_16 }, + { .fniv = gen_sqxtnt_vec, + .opt_opc = sqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtnt_s, + .vece = MO_32 }, + { .fniv = gen_sqxtnt_vec, + .opt_opc = sqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtnt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQXTNT, aa64_sve2, do_narrow_extract, a, sqxtnt_ops) static const TCGOpcode uqxtn_list[] = { INDEX_op_shli_vec, INDEX_op_umin_vec, 0 @@ -7488,24 +6652,21 @@ static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_uqxtnb_vec, - .opt_opc = uqxtn_list, - .fno = gen_helper_sve2_uqxtnb_h, - .vece = MO_16 }, - { .fniv = gen_uqxtnb_vec, - .opt_opc = uqxtn_list, - .fno = gen_helper_sve2_uqxtnb_s, - .vece = MO_32 }, - { .fniv = gen_uqxtnb_vec, - .opt_opc = uqxtn_list, - .fno = gen_helper_sve2_uqxtnb_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 uqxtnb_ops[3] = { + { .fniv = gen_uqxtnb_vec, + .opt_opc = uqxtn_list, + .fno = gen_helper_sve2_uqxtnb_h, + .vece = MO_16 }, + { .fniv = gen_uqxtnb_vec, + .opt_opc = uqxtn_list, + .fno = gen_helper_sve2_uqxtnb_s, + .vece = MO_32 }, + { .fniv = gen_uqxtnb_vec, + .opt_opc = uqxtn_list, + .fno = gen_helper_sve2_uqxtnb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops) static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { @@ -7520,27 +6681,24 @@ static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_uqxtnt_vec, - .opt_opc = uqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_uqxtnt_h, - .vece = MO_16 }, - { .fniv = gen_uqxtnt_vec, - .opt_opc = uqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_uqxtnt_s, - .vece = MO_32 }, - { .fniv = gen_uqxtnt_vec, - .opt_opc = uqxtn_list, - .load_dest = true, - .fno = gen_helper_sve2_uqxtnt_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 uqxtnt_ops[3] = { + { .fniv = gen_uqxtnt_vec, + .opt_opc = uqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_uqxtnt_h, + .vece = MO_16 }, + { .fniv = gen_uqxtnt_vec, + .opt_opc = uqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_uqxtnt_s, + .vece = MO_32 }, + { .fniv = gen_uqxtnt_vec, + .opt_opc = uqxtn_list, + .load_dest = true, + .fno = gen_helper_sve2_uqxtnt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(UQXTNT, aa64_sve2, do_narrow_extract, a, uqxtnt_ops) static const TCGOpcode sqxtun_list[] = { INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0 @@ -7559,24 +6717,21 @@ static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_sqxtunb_vec, - .opt_opc = sqxtun_list, - .fno = gen_helper_sve2_sqxtunb_h, - .vece = MO_16 }, - { .fniv = gen_sqxtunb_vec, - .opt_opc = sqxtun_list, - .fno = gen_helper_sve2_sqxtunb_s, - .vece = MO_32 }, - { .fniv = gen_sqxtunb_vec, - .opt_opc = sqxtun_list, - .fno = gen_helper_sve2_sqxtunb_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 sqxtunb_ops[3] = { + { .fniv = gen_sqxtunb_vec, + .opt_opc = sqxtun_list, + .fno = gen_helper_sve2_sqxtunb_h, + .vece = MO_16 }, + { .fniv = gen_sqxtunb_vec, + .opt_opc = sqxtun_list, + .fno = gen_helper_sve2_sqxtunb_s, + .vece = MO_32 }, + { .fniv = gen_sqxtunb_vec, + .opt_opc = sqxtun_list, + .fno = gen_helper_sve2_sqxtunb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops) static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { @@ -7593,32 +6748,29 @@ static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_temp_free_vec(t); } -static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2 ops[3] = { - { .fniv = gen_sqxtunt_vec, - .opt_opc = sqxtun_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtunt_h, - .vece = MO_16 }, - { .fniv = gen_sqxtunt_vec, - .opt_opc = sqxtun_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtunt_s, - .vece = MO_32 }, - { .fniv = gen_sqxtunt_vec, - .opt_opc = sqxtun_list, - .load_dest = true, - .fno = gen_helper_sve2_sqxtunt_d, - .vece = MO_64 }, - }; - return do_sve2_narrow_extract(s, a, ops); -} +static const GVecGen2 sqxtunt_ops[3] = { + { .fniv = gen_sqxtunt_vec, + .opt_opc = sqxtun_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtunt_h, + .vece = MO_16 }, + { .fniv = gen_sqxtunt_vec, + .opt_opc = sqxtun_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtunt_s, + .vece = MO_32 }, + { .fniv = gen_sqxtunt_vec, + .opt_opc = sqxtun_list, + .load_dest = true, + .fno = gen_helper_sve2_sqxtunt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQXTUNT, aa64_sve2, do_narrow_extract, a, sqxtunt_ops) -static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a, - const GVecGen2i ops[3]) +static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a, + const GVecGen2i ops[3]) { - if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) { + if (a->esz < 0 || a->esz > MO_32) { return false; } assert(a->imm > 0 && a->imm <= (8 << a->esz)); @@ -7667,28 +6819,25 @@ static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_temp_free_vec(t); } -static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 }; - static const GVecGen2i ops[3] = { - { .fni8 = gen_shrnb16_i64, - .fniv = gen_shrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_shrnb_h, - .vece = MO_16 }, - { .fni8 = gen_shrnb32_i64, - .fniv = gen_shrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_shrnb_s, - .vece = MO_32 }, - { .fni8 = gen_shrnb64_i64, - .fniv = gen_shrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_shrnb_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; +static const GVecGen2i shrnb_ops[3] = { + { .fni8 = gen_shrnb16_i64, + .fniv = gen_shrnb_vec, + .opt_opc = shrnb_vec_list, + .fno = gen_helper_sve2_shrnb_h, + .vece = MO_16 }, + { .fni8 = gen_shrnb32_i64, + .fniv = gen_shrnb_vec, + .opt_opc = shrnb_vec_list, + .fno = gen_helper_sve2_shrnb_s, + .vece = MO_32 }, + { .fni8 = gen_shrnb64_i64, + .fniv = gen_shrnb_vec, + .opt_opc = shrnb_vec_list, + .fno = gen_helper_sve2_shrnb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SHRNB, aa64_sve2, do_shr_narrow, a, shrnb_ops) static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) { @@ -7729,51 +6878,42 @@ static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_temp_free_vec(t); } -static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 }; - static const GVecGen2i ops[3] = { - { .fni8 = gen_shrnt16_i64, - .fniv = gen_shrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_shrnt_h, - .vece = MO_16 }, - { .fni8 = gen_shrnt32_i64, - .fniv = gen_shrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_shrnt_s, - .vece = MO_32 }, - { .fni8 = gen_shrnt64_i64, - .fniv = gen_shrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_shrnt_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; +static const GVecGen2i shrnt_ops[3] = { + { .fni8 = gen_shrnt16_i64, + .fniv = gen_shrnt_vec, + .opt_opc = shrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_shrnt_h, + .vece = MO_16 }, + { .fni8 = gen_shrnt32_i64, + .fniv = gen_shrnt_vec, + .opt_opc = shrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_shrnt_s, + .vece = MO_32 }, + { .fni8 = gen_shrnt64_i64, + .fniv = gen_shrnt_vec, + .opt_opc = shrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_shrnt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SHRNT, aa64_sve2, do_shr_narrow, a, shrnt_ops) -static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_rshrnb_h }, - { .fno = gen_helper_sve2_rshrnb_s }, - { .fno = gen_helper_sve2_rshrnb_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i rshrnb_ops[3] = { + { .fno = gen_helper_sve2_rshrnb_h }, + { .fno = gen_helper_sve2_rshrnb_s }, + { .fno = gen_helper_sve2_rshrnb_d }, +}; +TRANS_FEAT(RSHRNB, aa64_sve2, do_shr_narrow, a, rshrnb_ops) -static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_rshrnt_h }, - { .fno = gen_helper_sve2_rshrnt_s }, - { .fno = gen_helper_sve2_rshrnt_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i rshrnt_ops[3] = { + { .fno = gen_helper_sve2_rshrnt_h }, + { .fno = gen_helper_sve2_rshrnt_s }, + { .fno = gen_helper_sve2_rshrnt_d }, +}; +TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops) static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -7789,27 +6929,24 @@ static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_sqshrunb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrunb_h, - .vece = MO_16 }, - { .fniv = gen_sqshrunb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrunb_s, - .vece = MO_32 }, - { .fniv = gen_sqshrunb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrunb_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode sqshrunb_vec_list[] = { + INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 +}; +static const GVecGen2i sqshrunb_ops[3] = { + { .fniv = gen_sqshrunb_vec, + .opt_opc = sqshrunb_vec_list, + .fno = gen_helper_sve2_sqshrunb_h, + .vece = MO_16 }, + { .fniv = gen_sqshrunb_vec, + .opt_opc = sqshrunb_vec_list, + .fno = gen_helper_sve2_sqshrunb_s, + .vece = MO_32 }, + { .fniv = gen_sqshrunb_vec, + .opt_opc = sqshrunb_vec_list, + .fno = gen_helper_sve2_sqshrunb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops) static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -7827,51 +6964,42 @@ static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_shli_vec, INDEX_op_sari_vec, - INDEX_op_smax_vec, INDEX_op_umin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_sqshrunt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrunt_h, - .vece = MO_16 }, - { .fniv = gen_sqshrunt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrunt_s, - .vece = MO_32 }, - { .fniv = gen_sqshrunt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrunt_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode sqshrunt_vec_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, + INDEX_op_smax_vec, INDEX_op_umin_vec, 0 +}; +static const GVecGen2i sqshrunt_ops[3] = { + { .fniv = gen_sqshrunt_vec, + .opt_opc = sqshrunt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrunt_h, + .vece = MO_16 }, + { .fniv = gen_sqshrunt_vec, + .opt_opc = sqshrunt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrunt_s, + .vece = MO_32 }, + { .fniv = gen_sqshrunt_vec, + .opt_opc = sqshrunt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrunt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQSHRUNT, aa64_sve2, do_shr_narrow, a, sqshrunt_ops) -static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_sqrshrunb_h }, - { .fno = gen_helper_sve2_sqrshrunb_s }, - { .fno = gen_helper_sve2_sqrshrunb_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i sqrshrunb_ops[3] = { + { .fno = gen_helper_sve2_sqrshrunb_h }, + { .fno = gen_helper_sve2_sqrshrunb_s }, + { .fno = gen_helper_sve2_sqrshrunb_d }, +}; +TRANS_FEAT(SQRSHRUNB, aa64_sve2, do_shr_narrow, a, sqrshrunb_ops) -static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_sqrshrunt_h }, - { .fno = gen_helper_sve2_sqrshrunt_s }, - { .fno = gen_helper_sve2_sqrshrunt_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i sqrshrunt_ops[3] = { + { .fno = gen_helper_sve2_sqrshrunt_h }, + { .fno = gen_helper_sve2_sqrshrunt_s }, + { .fno = gen_helper_sve2_sqrshrunt_d }, +}; +TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops) static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -7891,27 +7019,24 @@ static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_sqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrnb_h, - .vece = MO_16 }, - { .fniv = gen_sqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrnb_s, - .vece = MO_32 }, - { .fniv = gen_sqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_sqshrnb_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode sqshrnb_vec_list[] = { + INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 +}; +static const GVecGen2i sqshrnb_ops[3] = { + { .fniv = gen_sqshrnb_vec, + .opt_opc = sqshrnb_vec_list, + .fno = gen_helper_sve2_sqshrnb_h, + .vece = MO_16 }, + { .fniv = gen_sqshrnb_vec, + .opt_opc = sqshrnb_vec_list, + .fno = gen_helper_sve2_sqshrnb_s, + .vece = MO_32 }, + { .fniv = gen_sqshrnb_vec, + .opt_opc = sqshrnb_vec_list, + .fno = gen_helper_sve2_sqshrnb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops) static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -7932,51 +7057,42 @@ static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_shli_vec, INDEX_op_sari_vec, - INDEX_op_smax_vec, INDEX_op_smin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_sqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrnt_h, - .vece = MO_16 }, - { .fniv = gen_sqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrnt_s, - .vece = MO_32 }, - { .fniv = gen_sqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_sqshrnt_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode sqshrnt_vec_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, + INDEX_op_smax_vec, INDEX_op_smin_vec, 0 +}; +static const GVecGen2i sqshrnt_ops[3] = { + { .fniv = gen_sqshrnt_vec, + .opt_opc = sqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrnt_h, + .vece = MO_16 }, + { .fniv = gen_sqshrnt_vec, + .opt_opc = sqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrnt_s, + .vece = MO_32 }, + { .fniv = gen_sqshrnt_vec, + .opt_opc = sqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_sqshrnt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(SQSHRNT, aa64_sve2, do_shr_narrow, a, sqshrnt_ops) -static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_sqrshrnb_h }, - { .fno = gen_helper_sve2_sqrshrnb_s }, - { .fno = gen_helper_sve2_sqrshrnb_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i sqrshrnb_ops[3] = { + { .fno = gen_helper_sve2_sqrshrnb_h }, + { .fno = gen_helper_sve2_sqrshrnb_s }, + { .fno = gen_helper_sve2_sqrshrnb_d }, +}; +TRANS_FEAT(SQRSHRNB, aa64_sve2, do_shr_narrow, a, sqrshrnb_ops) -static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_sqrshrnt_h }, - { .fno = gen_helper_sve2_sqrshrnt_s }, - { .fno = gen_helper_sve2_sqrshrnt_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i sqrshrnt_ops[3] = { + { .fno = gen_helper_sve2_sqrshrnt_h }, + { .fno = gen_helper_sve2_sqrshrnt_s }, + { .fno = gen_helper_sve2_sqrshrnt_d }, +}; +TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops) static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -7990,27 +7106,24 @@ static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_shri_vec, INDEX_op_umin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_uqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_uqshrnb_h, - .vece = MO_16 }, - { .fniv = gen_uqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_uqshrnb_s, - .vece = MO_32 }, - { .fniv = gen_uqshrnb_vec, - .opt_opc = vec_list, - .fno = gen_helper_sve2_uqshrnb_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode uqshrnb_vec_list[] = { + INDEX_op_shri_vec, INDEX_op_umin_vec, 0 +}; +static const GVecGen2i uqshrnb_ops[3] = { + { .fniv = gen_uqshrnb_vec, + .opt_opc = uqshrnb_vec_list, + .fno = gen_helper_sve2_uqshrnb_h, + .vece = MO_16 }, + { .fniv = gen_uqshrnb_vec, + .opt_opc = uqshrnb_vec_list, + .fno = gen_helper_sve2_uqshrnb_s, + .vece = MO_32 }, + { .fniv = gen_uqshrnb_vec, + .opt_opc = uqshrnb_vec_list, + .fno = gen_helper_sve2_uqshrnb_d, + .vece = MO_64 }, +}; +TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops) static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) @@ -8026,60 +7139,49 @@ static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, tcg_temp_free_vec(t); } -static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const TCGOpcode vec_list[] = { - INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 - }; - static const GVecGen2i ops[3] = { - { .fniv = gen_uqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_uqshrnt_h, - .vece = MO_16 }, - { .fniv = gen_uqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_uqshrnt_s, - .vece = MO_32 }, - { .fniv = gen_uqshrnt_vec, - .opt_opc = vec_list, - .load_dest = true, - .fno = gen_helper_sve2_uqshrnt_d, - .vece = MO_64 }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const TCGOpcode uqshrnt_vec_list[] = { + INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 +}; +static const GVecGen2i uqshrnt_ops[3] = { + { .fniv = gen_uqshrnt_vec, + .opt_opc = uqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_uqshrnt_h, + .vece = MO_16 }, + { .fniv = gen_uqshrnt_vec, + .opt_opc = uqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_uqshrnt_s, + .vece = MO_32 }, + { .fniv = gen_uqshrnt_vec, + .opt_opc = uqshrnt_vec_list, + .load_dest = true, + .fno = gen_helper_sve2_uqshrnt_d, + .vece = MO_64 }, +}; +TRANS_FEAT(UQSHRNT, aa64_sve2, do_shr_narrow, a, uqshrnt_ops) -static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_uqrshrnb_h }, - { .fno = gen_helper_sve2_uqrshrnb_s }, - { .fno = gen_helper_sve2_uqrshrnb_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i uqrshrnb_ops[3] = { + { .fno = gen_helper_sve2_uqrshrnb_h }, + { .fno = gen_helper_sve2_uqrshrnb_s }, + { .fno = gen_helper_sve2_uqrshrnb_d }, +}; +TRANS_FEAT(UQRSHRNB, aa64_sve2, do_shr_narrow, a, uqrshrnb_ops) -static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a) -{ - static const GVecGen2i ops[3] = { - { .fno = gen_helper_sve2_uqrshrnt_h }, - { .fno = gen_helper_sve2_uqrshrnt_s }, - { .fno = gen_helper_sve2_uqrshrnt_d }, - }; - return do_sve2_shr_narrow(s, a, ops); -} +static const GVecGen2i uqrshrnt_ops[3] = { + { .fno = gen_helper_sve2_uqrshrnt_h }, + { .fno = gen_helper_sve2_uqrshrnt_s }, + { .fno = gen_helper_sve2_uqrshrnt_d }, +}; +TRANS_FEAT(UQRSHRNT, aa64_sve2, do_shr_narrow, a, uqrshrnt_ops) #define DO_SVE2_ZZZ_NARROW(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ -{ \ - static gen_helper_gvec_3 * const fns[4] = { \ + static gen_helper_gvec_3 * const name##_fns[4] = { \ NULL, gen_helper_sve2_##name##_h, \ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ }; \ - return do_sve2_zzz_ool(s, a, fns[a->esz]); \ -} + TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \ + name##_fns[a->esz], a, 0) DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) @@ -8091,655 +7193,391 @@ DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb) DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) -static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, - gen_helper_gvec_flags_4 *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_ppzz_flags(s, a, fn); -} +static gen_helper_gvec_flags_4 * const match_fns[4] = { + gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL +}; +TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) -#define DO_SVE2_PPZZ_MATCH(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_flags_4 * const fns[4] = { \ - gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \ - NULL, NULL \ - }; \ - return do_sve2_ppzz_flags(s, a, fns[a->esz]); \ -} +static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { + gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL +}; +TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) -DO_SVE2_PPZZ_MATCH(MATCH, match) -DO_SVE2_PPZZ_MATCH(NMATCH, nmatch) +static gen_helper_gvec_4 * const histcnt_fns[4] = { + NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d +}; +TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, + histcnt_fns[a->esz], a, 0) -static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a) -{ - static gen_helper_gvec_4 * const fns[2] = { - gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d - }; - if (a->esz < 2) { - return false; - } - return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]); -} +TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, + a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) -static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a) -{ - if (a->esz != 0) { - return false; - } - return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg); -} - -static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, - gen_helper_gvec_4_ptr *fn) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpzz_fp(s, a, fn); -} - -#define DO_SVE2_ZPZZ_FP(NAME, name) \ -static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ -{ \ - static gen_helper_gvec_4_ptr * const fns[4] = { \ - NULL, gen_helper_sve2_##name##_zpzz_h, \ - gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \ - }; \ - return do_sve2_zpzz_fp(s, a, fns[a->esz]); \ -} - -DO_SVE2_ZPZZ_FP(FADDP, faddp) -DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp) -DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp) -DO_SVE2_ZPZZ_FP(FMAXP, fmaxp) -DO_SVE2_ZPZZ_FP(FMINP, fminp) +DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) +DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) +DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz) +DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz) +DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) /* * SVE Integer Multiply-Add (unpredicated) */ -static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a) -{ - gen_helper_gvec_4_ptr *fn; +TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, + gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, + 0, FPST_FPCR) +TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, + gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, + 0, FPST_FPCR) - switch (a->esz) { - case MO_32: - if (!dc_isar_feature(aa64_sve_f32mm, s)) { - return false; - } - fn = gen_helper_fmmla_s; - break; - case MO_64: - if (!dc_isar_feature(aa64_sve_f64mm, s)) { - return false; - } - fn = gen_helper_fmmla_d; - break; - default: - return false; - } +static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { + NULL, gen_helper_sve2_sqdmlal_zzzw_h, + gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, +}; +TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlal_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlal_zzzw_fns[a->esz], a, 3) +TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlal_zzzw_fns[a->esz], a, 2) - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(FPST_FPCR); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = { + NULL, gen_helper_sve2_sqdmlsl_zzzw_h, + gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, +}; +TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlsl_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlsl_zzzw_fns[a->esz], a, 3) +TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqdmlsl_zzzw_fns[a->esz], a, 2) -static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a, - bool sel1, bool sel2) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_sqdmlal_zzzw_h, - gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1); -} +static gen_helper_gvec_4 * const sqrdmlah_fns[] = { + gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, + gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, +}; +TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqrdmlah_fns[a->esz], a, 0) -static bool do_sqdmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, - bool sel1, bool sel2) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_sqdmlsl_zzzw_h, - gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1); -} +static gen_helper_gvec_4 * const sqrdmlsh_fns[] = { + gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, + gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, +}; +TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, + sqrdmlsh_fns[a->esz], a, 0) -static bool trans_SQDMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlal_zzzw(s, a, false, false); -} +static gen_helper_gvec_4 * const smlal_zzzw_fns[] = { + NULL, gen_helper_sve2_smlal_zzzw_h, + gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, +}; +TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + smlal_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + smlal_zzzw_fns[a->esz], a, 1) -static bool trans_SQDMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlal_zzzw(s, a, true, true); -} +static gen_helper_gvec_4 * const umlal_zzzw_fns[] = { + NULL, gen_helper_sve2_umlal_zzzw_h, + gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, +}; +TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + umlal_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + umlal_zzzw_fns[a->esz], a, 1) -static bool trans_SQDMLALBT(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlal_zzzw(s, a, false, true); -} +static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = { + NULL, gen_helper_sve2_smlsl_zzzw_h, + gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, +}; +TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + smlsl_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + smlsl_zzzw_fns[a->esz], a, 1) -static bool trans_SQDMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlsl_zzzw(s, a, false, false); -} +static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = { + NULL, gen_helper_sve2_umlsl_zzzw_h, + gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, +}; +TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + umlsl_zzzw_fns[a->esz], a, 0) +TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, + umlsl_zzzw_fns[a->esz], a, 1) -static bool trans_SQDMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlsl_zzzw(s, a, true, true); -} +static gen_helper_gvec_4 * const cmla_fns[] = { + gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, + gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, +}; +TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz, + cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) -static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a) -{ - return do_sqdmlsl_zzzw(s, a, false, true); -} +static gen_helper_gvec_4 * const cdot_fns[] = { + NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d +}; +TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz, + cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) -static bool trans_SQRDMLAH_zzzz(DisasContext *s, arg_rrrr_esz *a) -{ - static gen_helper_gvec_4 * const fns[] = { - gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, - gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], 0); -} +static gen_helper_gvec_4 * const sqrdcmlah_fns[] = { + gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, + gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, +}; +TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, + sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) -static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a) -{ - static gen_helper_gvec_4 * const fns[] = { - gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, - gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], 0); -} +TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) -static bool do_smlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_smlal_zzzw_h, - gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); -} +TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, + gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) -static bool trans_SMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_smlal_zzzw(s, a, false); -} +TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, + gen_helper_crypto_aese, a, false) +TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, + gen_helper_crypto_aese, a, true) -static bool trans_SMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_smlal_zzzw(s, a, true); -} +TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, + gen_helper_crypto_sm4e, a, 0) +TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, + gen_helper_crypto_sm4ekey, a, 0) -static bool do_umlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_umlal_zzzw_h, - gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); -} +TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, + gen_gvec_rax1, a) -static bool trans_UMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_umlal_zzzw(s, a, false); -} +TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, + gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, + gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR) -static bool trans_UMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_umlal_zzzw(s, a, true); -} +TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, + gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR) -static bool do_smlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_smlsl_zzzw_h, - gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); -} +TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, + gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR) +TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, + gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR) -static bool trans_SMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_smlsl_zzzw(s, a, false); -} +TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, + float_round_to_odd, gen_helper_sve_fcvt_ds) +TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, + float_round_to_odd, gen_helper_sve2_fcvtnt_ds) -static bool trans_SMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_smlsl_zzzw(s, a, true); -} - -static bool do_umlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) -{ - static gen_helper_gvec_4 * const fns[] = { - NULL, gen_helper_sve2_umlsl_zzzw_h, - gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, - }; - return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); -} - -static bool trans_UMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_umlsl_zzzw(s, a, false); -} - -static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_umlsl_zzzw(s, a, true); -} - -static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a) -{ - static gen_helper_gvec_4 * const fns[] = { - gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, - gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, - }; - - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot); - } - return true; -} - -static bool trans_CDOT_zzzz(DisasContext *s, arg_CMLA_zzzz *a) -{ - if (!dc_isar_feature(aa64_sve2, s) || a->esz < MO_32) { - return false; - } - if (sve_access_check(s)) { - gen_helper_gvec_4 *fn = (a->esz == MO_32 - ? gen_helper_sve2_cdot_zzzz_s - : gen_helper_sve2_cdot_zzzz_d); - gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->rot); - } - return true; -} - -static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a) -{ - static gen_helper_gvec_4 * const fns[] = { - gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, - gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, - }; - - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot); - } - return true; -} - -static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a) -{ - if (a->esz != 2 || !dc_isar_feature(aa64_sve_i8mm, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - vsz, vsz, 0, gen_helper_gvec_usdot_b); - } - return true; -} - -static bool trans_AESMC(DisasContext *s, arg_AESMC *a) -{ - if (!dc_isar_feature(aa64_sve2_aes, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zz(s, gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt); - } - return true; -} - -static bool do_aese(DisasContext *s, arg_rrr_esz *a, bool decrypt) -{ - if (!dc_isar_feature(aa64_sve2_aes, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, gen_helper_crypto_aese, - a->rd, a->rn, a->rm, decrypt); - } - return true; -} - -static bool trans_AESE(DisasContext *s, arg_rrr_esz *a) -{ - return do_aese(s, a, false); -} - -static bool trans_AESD(DisasContext *s, arg_rrr_esz *a) -{ - return do_aese(s, a, true); -} - -static bool do_sm4(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn) -{ - if (!dc_isar_feature(aa64_sve2_sm4, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0); - } - return true; -} - -static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a) -{ - return do_sm4(s, a, gen_helper_crypto_sm4e); -} - -static bool trans_SM4EKEY(DisasContext *s, arg_rrr_esz *a) -{ - return do_sm4(s, a, gen_helper_crypto_sm4ekey); -} - -static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2_sha3, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_fn_zzz(s, gen_gvec_rax1, MO_64, a->rd, a->rn, a->rm); - } - return true; -} - -static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh); -} - -static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt); -} - -static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds); -} - -static bool trans_FCVTLT_hs(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_hs); -} - -static bool trans_FCVTLT_sd(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_sd); -} - -static bool trans_FCVTX_ds(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve_fcvt_ds); -} - -static bool trans_FCVTXNT_ds(DisasContext *s, arg_rpr_esz *a) -{ - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve2_fcvtnt_ds); -} - -static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a) -{ - static gen_helper_gvec_3_ptr * const fns[] = { - NULL, gen_helper_flogb_h, - gen_helper_flogb_s, gen_helper_flogb_d - }; - - if (!dc_isar_feature(aa64_sve2, s) || fns[a->esz] == NULL) { - return false; - } - if (sve_access_check(s)) { - TCGv_ptr status = - fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - unsigned vsz = vec_full_reg_size(s); - - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - pred_full_reg_offset(s, a->pg), - status, vsz, vsz, 0, fns[a->esz]); - tcg_temp_free_ptr(status); - } - return true; -} +static gen_helper_gvec_3_ptr * const flogb_fns[] = { + NULL, gen_helper_flogb_h, + gen_helper_flogb_s, gen_helper_flogb_d +}; +TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz], + a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) { - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - cpu_env, vsz, vsz, (sel << 1) | sub, - gen_helper_sve2_fmlal_zzzw_s); - } - return true; + return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s, + a->rd, a->rn, a->rm, a->ra, + (sel << 1) | sub, cpu_env); } -static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_FMLAL_zzzw(s, a, false, false); -} - -static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_FMLAL_zzzw(s, a, false, true); -} - -static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_FMLAL_zzzw(s, a, true, false); -} - -static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_FMLAL_zzzw(s, a, true, true); -} +TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false) +TRANS_FEAT(FMLALT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, true) +TRANS_FEAT(FMLSLB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, false) +TRANS_FEAT(FMLSLT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, true) static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) { - if (!dc_isar_feature(aa64_sve2, s)) { - return false; - } - if (sve_access_check(s)) { - unsigned vsz = vec_full_reg_size(s); - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - cpu_env, vsz, vsz, - (a->index << 2) | (sel << 1) | sub, - gen_helper_sve2_fmlal_zzxw_s); - } - return true; + return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, + a->rd, a->rn, a->rm, a->ra, + (a->index << 2) | (sel << 1) | sub, cpu_env); } -static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a) -{ - return do_FMLAL_zzxw(s, a, false, false); -} +TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) +TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) +TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) +TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) -static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a) -{ - return do_FMLAL_zzxw(s, a, false, true); -} +TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_smmla_b, a, 0) +TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_usmmla_b, a, 0) +TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_ummla_b, a, 0) -static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a) -{ - return do_FMLAL_zzxw(s, a, true, false); -} +TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_bfdot, a, 0) +TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_bfdot_idx, a) -static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a) -{ - return do_FMLAL_zzxw(s, a, true, true); -} - -static bool do_i8mm_zzzz_ool(DisasContext *s, arg_rrrr_esz *a, - gen_helper_gvec_4 *fn, int data) -{ - if (!dc_isar_feature(aa64_sve_i8mm, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); - } - return true; -} - -static bool trans_SMMLA(DisasContext *s, arg_rrrr_esz *a) -{ - return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_smmla_b, 0); -} - -static bool trans_USMMLA(DisasContext *s, arg_rrrr_esz *a) -{ - return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_usmmla_b, 0); -} - -static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a) -{ - return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0); -} - -static bool trans_BFDOT_zzzz(DisasContext *s, arg_rrrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot, - a->rd, a->rn, a->rm, a->ra, 0); - } - return true; -} - -static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot_idx, - a->rd, a->rn, a->rm, a->ra, a->index); - } - return true; -} - -static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a) -{ - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - if (sve_access_check(s)) { - gen_gvec_ool_zzzz(s, gen_helper_gvec_bfmmla, - a->rd, a->rn, a->rm, a->ra, 0); - } - return true; -} +TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_bfmmla, a, 0) static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) { - if (!dc_isar_feature(aa64_sve_bf16, s)) { - return false; - } - if (sve_access_check(s)) { - TCGv_ptr status = fpstatus_ptr(FPST_FPCR); - unsigned vsz = vec_full_reg_size(s); - - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - status, vsz, vsz, sel, - gen_helper_gvec_bfmlal); - tcg_temp_free_ptr(status); - } - return true; + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, + a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR); } -static bool trans_BFMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_BFMLAL_zzzw(s, a, false); -} - -static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) -{ - return do_BFMLAL_zzzw(s, a, true); -} +TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) +TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true) static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) { - if (!dc_isar_feature(aa64_sve_bf16, s)) { + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, + a->rd, a->rn, a->rm, a->ra, + (a->index << 1) | sel, FPST_FPCR); +} + +TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) +TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) + +static bool trans_PSEL(DisasContext *s, arg_psel *a) +{ + int vl = vec_full_reg_size(s); + int pl = pred_gvec_reg_size(s); + int elements = vl >> a->esz; + TCGv_i64 tmp, didx, dbit; + TCGv_ptr ptr; + + if (!dc_isar_feature(aa64_sme, s)) { return false; } - if (sve_access_check(s)) { - TCGv_ptr status = fpstatus_ptr(FPST_FPCR); - unsigned vsz = vec_full_reg_size(s); - - tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), - vec_full_reg_offset(s, a->rn), - vec_full_reg_offset(s, a->rm), - vec_full_reg_offset(s, a->ra), - status, vsz, vsz, (a->index << 1) | sel, - gen_helper_gvec_bfmlal_idx); - tcg_temp_free_ptr(status); + if (!sve_access_check(s)) { + return true; } + + tmp = tcg_temp_new_i64(); + dbit = tcg_temp_new_i64(); + didx = tcg_temp_new_i64(); + ptr = tcg_temp_new_ptr(); + + /* Compute the predicate element. */ + tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); + if (is_power_of_2(elements)) { + tcg_gen_andi_i64(tmp, tmp, elements - 1); + } else { + tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); + } + + /* Extract the predicate byte and bit indices. */ + tcg_gen_shli_i64(tmp, tmp, a->esz); + tcg_gen_andi_i64(dbit, tmp, 7); + tcg_gen_shri_i64(didx, tmp, 3); + if (HOST_BIG_ENDIAN) { + tcg_gen_xori_i64(didx, didx, 7); + } + + /* Load the predicate word. */ + tcg_gen_trunc_i64_ptr(ptr, didx); + tcg_gen_add_ptr(ptr, ptr, cpu_env); + tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); + + /* Extract the predicate bit and replicate to MO_64. */ + tcg_gen_shr_i64(tmp, tmp, dbit); + tcg_gen_andi_i64(tmp, tmp, 1); + tcg_gen_neg_i64(tmp, tmp); + + /* Apply to either copy the source, or write zeros. */ + tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), + pred_full_reg_offset(s, a->pn), tmp, pl, pl); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(dbit); + tcg_temp_free_i64(didx); + tcg_temp_free_ptr(ptr); return true; } -static bool trans_BFMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a) +static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) { - return do_BFMLAL_zzxw(s, a, false); + tcg_gen_smax_i32(d, a, n); + tcg_gen_smin_i32(d, d, m); } -static bool trans_BFMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a) +static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) { - return do_BFMLAL_zzxw(s, a, true); + tcg_gen_smax_i64(d, a, n); + tcg_gen_smin_i64(d, d, m); } + +static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, + TCGv_vec m, TCGv_vec a) +{ + tcg_gen_smax_vec(vece, d, a, n); + tcg_gen_smin_vec(vece, d, d, m); +} + +static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, + uint32_t a, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop[] = { + INDEX_op_smin_vec, INDEX_op_smax_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_b, + .opt_opc = vecop, + .vece = MO_8 }, + { .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_h, + .opt_opc = vecop, + .vece = MO_16 }, + { .fni4 = gen_sclamp_i32, + .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_s, + .opt_opc = vecop, + .vece = MO_32 }, + { .fni8 = gen_sclamp_i64, + .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_d, + .opt_opc = vecop, + .vece = MO_64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } + }; + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); +} + +TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) + +static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) +{ + tcg_gen_umax_i32(d, a, n); + tcg_gen_umin_i32(d, d, m); +} + +static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) +{ + tcg_gen_umax_i64(d, a, n); + tcg_gen_umin_i64(d, d, m); +} + +static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, + TCGv_vec m, TCGv_vec a) +{ + tcg_gen_umax_vec(vece, d, a, n); + tcg_gen_umin_vec(vece, d, d, m); +} + +static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, + uint32_t a, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop[] = { + INDEX_op_umin_vec, INDEX_op_umax_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_b, + .opt_opc = vecop, + .vece = MO_8 }, + { .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_h, + .opt_opc = vecop, + .vece = MO_16 }, + { .fni4 = gen_uclamp_i32, + .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_s, + .opt_opc = vecop, + .vece = MO_32 }, + { .fni8 = gen_uclamp_i64, + .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_d, + .opt_opc = vecop, + .vece = MO_64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } + }; + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); +} + +TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c index b2991e21ec..5c5d58d2c6 100644 --- a/target/arm/translate-vfp.c +++ b/target/arm/translate-vfp.c @@ -93,7 +93,7 @@ uint64_t vfp_expand_imm(int size, uint8_t imm8) static inline long vfp_f16_offset(unsigned reg, bool top) { long offs = vfp_reg_offset(false, reg); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN if (!top) { offs += 2; } @@ -109,7 +109,7 @@ static inline long vfp_f16_offset(unsigned reg, bool top) * Generate code for M-profile lazy FP state preservation if needed; * this corresponds to the pseudocode PreserveFPState() function. */ -static void gen_preserve_fp_state(DisasContext *s) +static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update) { if (s->v7m_lspact) { /* @@ -128,6 +128,20 @@ static void gen_preserve_fp_state(DisasContext *s) * any further FP insns in this TB. */ s->v7m_lspact = false; + /* + * The helper might have zeroed VPR, so we do not know the + * correct value for the MVE_NO_PRED TB flag any more. + * If we're about to create a new fp context then that + * will precisely determine the MVE_NO_PRED value (see + * gen_update_fp_context()). Otherwise, we must: + * - set s->mve_no_pred to false, so this instruction + * is generated to use helper functions + * - end the TB now, without chaining to the next TB + */ + if (skip_context_update || !s->v7m_new_fp_ctxt_needed) { + s->mve_no_pred = false; + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + } } } @@ -166,15 +180,21 @@ static void gen_update_fp_context(DisasContext *s) gen_helper_vfp_set_fpscr(cpu_env, fpscr); tcg_temp_free_i32(fpscr); if (dc_isar_feature(aa32_mve, s)) { - TCGv_i32 z32 = tcg_const_i32(0); - store_cpu_field(z32, v7m.vpr); + store_cpu_field(tcg_constant_i32(0), v7m.vpr); } - /* - * We don't need to arrange to end the TB, because the only - * parts of FPSCR which we cache in the TB flags are the VECLEN - * and VECSTRIDE, and those don't exist for M-profile. + * We just updated the FPSCR and VPR. Some of this state is cached + * in the MVE_NO_PRED TB flag. We want to avoid having to end the + * TB here, which means we need the new value of the MVE_NO_PRED + * flag to be exactly known here and the same for all executions. + * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is + * always set to 0, so the new MVE_NO_PRED flag is always 1 + * if and only if we have MVE. + * + * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE, + * but those do not exist for M-profile, so are not relevant here.) */ + s->mve_no_pred = dc_isar_feature(aa32_mve, s); if (s->v8m_secure) { bits |= R_V7M_CONTROL_SFPA_MASK; @@ -199,8 +219,30 @@ static void gen_update_fp_context(DisasContext *s) static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled) { if (s->fp_excp_el) { - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + /* + * The full syndrome is only used for HSR when HCPTR traps: + * For v8, when TA==0, coproc is RES0. + * For v7, any use of a Floating-point instruction or access + * to a Floating-point Extension register that is trapped to + * Hyp mode because of a trap configured in the HCPTR sets + * this field to 0xA. + */ + int coproc = arm_dc_feature(s, ARM_FEATURE_V8) ? 0 : 0xa; + uint32_t syn = syn_fp_access_trap(1, 0xe, false, coproc); + + gen_exception_insn_el(s, 0, EXCP_UDEF, syn, s->fp_excp_el); + return false; + } + + /* + * Note that rebuild_hflags_a32 has already accounted for being in EL0 + * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not + * appear to be any insns which touch VFP which are allowed. + */ + if (s->sme_trap_nonstreaming) { + gen_exception_insn(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_Streaming, + curr_insn_len(s) == 2)); return false; } @@ -230,15 +272,15 @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update) * the encoding space handled by the patterns in m-nocp.decode, * and for them we may need to raise NOCP here. */ - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); + gen_exception_insn_el(s, 0, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); return false; } /* Handle M-profile lazy FP state mechanics */ /* Trigger lazy-state preservation if necessary */ - gen_preserve_fp_state(s); + gen_preserve_fp_state(s, skip_context_update); if (!skip_context_update) { /* Update ownership of FP context and create new FP context if needed */ @@ -296,7 +338,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) TCGv_i64 frn, frm, dest; TCGv_i64 tmp, zero, zf, nf, vf; - zero = tcg_const_i64(0); + zero = tcg_constant_i64(0); frn = tcg_temp_new_i64(); frm = tcg_temp_new_i64(); @@ -314,27 +356,22 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) vfp_load_reg64(frm, rm); switch (a->cc) { case 0: /* eq: Z */ - tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, - frn, frm); + tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, frn, frm); break; case 1: /* vs: V */ - tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, - frn, frm); + tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, frn, frm); break; case 2: /* ge: N == V -> N ^ V == 0 */ tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); - tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, - frn, frm); + tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, frn, frm); tcg_temp_free_i64(tmp); break; case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, - frn, frm); + tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, frn, frm); tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); - tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, - dest, frm); + tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, dest, frm); tcg_temp_free_i64(tmp); break; } @@ -346,13 +383,11 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_temp_free_i64(zf); tcg_temp_free_i64(nf); tcg_temp_free_i64(vf); - - tcg_temp_free_i64(zero); } else { TCGv_i32 frn, frm, dest; TCGv_i32 tmp, zero; - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); frn = tcg_temp_new_i32(); frm = tcg_temp_new_i32(); @@ -361,27 +396,22 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) vfp_load_reg32(frm, rm); switch (a->cc) { case 0: /* eq: Z */ - tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, - frn, frm); + tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, frn, frm); break; case 1: /* vs: V */ - tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, - frn, frm); + tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, frn, frm); break; case 2: /* ge: N == V -> N ^ V == 0 */ tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); - tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, - frn, frm); + tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, frn, frm); tcg_temp_free_i32(tmp); break; case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, - frn, frm); + tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, frn, frm); tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); - tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, - dest, frm); + tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, dest, frm); tcg_temp_free_i32(tmp); break; } @@ -393,8 +423,6 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_temp_free_i32(frn); tcg_temp_free_i32(frm); tcg_temp_free_i32(dest); - - tcg_temp_free_i32(zero); } return true; @@ -526,7 +554,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) fpst = fpstatus_ptr(FPST_FPCR); } - tcg_shift = tcg_const_i32(0); + tcg_shift = tcg_constant_i32(0); tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); @@ -574,14 +602,12 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_i32(tcg_shift); - tcg_temp_free_ptr(fpst); return true; } -static bool mve_skip_vmov(DisasContext *s, int vn, int index, int size) +bool mve_skip_vmov(DisasContext *s, int vn, int index, int size) { /* * In a CPU with MVE, the VMOV (vector lane to general-purpose register) @@ -829,15 +855,11 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) case ARM_VFP_MVFR2: case ARM_VFP_FPSID: if (s->current_el == 1) { - TCGv_i32 tcg_reg, tcg_rt; - gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); - tcg_reg = tcg_const_i32(a->reg); - tcg_rt = tcg_const_i32(a->rt); - gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg); - tcg_temp_free_i32(tcg_reg); - tcg_temp_free_i32(tcg_rt); + gen_update_pc(s, 0); + gen_helper_check_hcr_el2_trap(cpu_env, + tcg_constant_i32(a->rt), + tcg_constant_i32(a->reg)); } /* fall through */ case ARM_VFP_FPEXC: @@ -1149,11 +1171,11 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) addr = add_reg_for_lit(s, a->rn, offset); tmp = tcg_temp_new_i64(); if (a->l) { - gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); + gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); vfp_store_reg64(tmp, a->vd); } else { vfp_load_reg64(tmp, a->vd); - gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); + gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); } tcg_temp_free_i64(tmp); tcg_temp_free_i32(addr); @@ -1301,12 +1323,12 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) for (i = 0; i < n; i++) { if (a->l) { /* load */ - gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); + gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); vfp_store_reg64(tmp, a->vd + i); } else { /* store */ vfp_load_reg64(tmp, a->vd + i); - gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); + gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); } tcg_gen_addi_i32(addr, addr, offset); } @@ -2367,8 +2389,6 @@ MAKE_VFM_TRANS_FNS(dp) static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a) { - TCGv_i32 fd; - if (!dc_isar_feature(aa32_fp16_arith, s)) { return false; } @@ -2381,9 +2401,7 @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a) return true; } - fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm)); - vfp_store_reg32(fd, a->vd); - tcg_temp_free_i32(fd); + vfp_store_reg32(tcg_constant_i32(vfp_expand_imm(MO_16, a->imm)), a->vd); return true; } @@ -2419,7 +2437,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) } } - fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm)); + fd = tcg_constant_i32(vfp_expand_imm(MO_32, a->imm)); for (;;) { vfp_store_reg32(fd, vd); @@ -2433,7 +2451,6 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) vd = vfp_advance_sreg(vd, delta_d); } - tcg_temp_free_i32(fd); return true; } @@ -2474,7 +2491,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) } } - fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm)); + fd = tcg_constant_i64(vfp_expand_imm(MO_64, a->imm)); for (;;) { vfp_store_reg64(fd, vd); @@ -2488,7 +2505,6 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) vd = vfp_advance_dreg(vd, delta_d); } - tcg_temp_free_i64(fd); return true; } @@ -3273,7 +3289,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) vfp_load_reg32(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR_F16); - shift = tcg_const_i32(frac_bits); + shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ switch (a->opc) { @@ -3307,7 +3323,6 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); - tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); return true; } @@ -3332,7 +3347,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) vfp_load_reg32(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR); - shift = tcg_const_i32(frac_bits); + shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ switch (a->opc) { @@ -3366,7 +3381,6 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); - tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); return true; } @@ -3397,7 +3411,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) vfp_load_reg64(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR); - shift = tcg_const_i32(frac_bits); + shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ switch (a->opc) { @@ -3431,7 +3445,6 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) vfp_store_reg64(vd, a->vd); tcg_temp_free_i64(vd); - tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); return true; } diff --git a/target/arm/translate.c b/target/arm/translate.c index 80c282669f..a06da05640 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -30,11 +30,10 @@ #include "qemu/bitops.h" #include "arm_ldst.h" #include "semihosting/semihost.h" - #include "exec/helper-proto.h" #include "exec/helper-gen.h" - #include "exec/log.h" +#include "cpregs.h" #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) @@ -163,7 +162,7 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op) void arm_gen_condlabel(DisasContext *s) { if (!s->condjmp) { - s->condlabel = gen_new_label(); + s->condlabel = gen_disas_label(s); s->condjmp = 1; } } @@ -180,6 +179,25 @@ typedef enum ISSInfo { ISSIs16Bit = (1 << 8), } ISSInfo; +/* + * Store var into env + offset to a member with size bytes. + * Free var after use. + */ +void store_cpu_offset(TCGv_i32 var, int offset, int size) +{ + switch (size) { + case 1: + tcg_gen_st8_i32(var, cpu_env, offset); + break; + case 4: + tcg_gen_st_i32(var, cpu_env, offset); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i32(var); +} + /* Save the syndrome information for a Data Abort */ static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) { @@ -219,16 +237,12 @@ static inline int get_a32_user_mem_index(DisasContext *s) * otherwise, access as if at PL0. */ switch (s->mmu_idx) { + case ARMMMUIdx_E3: case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */ case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: return arm_to_core_mmu_idx(ARMMMUIdx_E10_0); - case ARMMMUIdx_SE3: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0); case ARMMMUIdx_MUser: case ARMMMUIdx_MPriv: return arm_to_core_mmu_idx(ARMMMUIdx_MUser); @@ -246,17 +260,27 @@ static inline int get_a32_user_mem_index(DisasContext *s) } } -/* The architectural value of PC. */ -static uint32_t read_pc(DisasContext *s) +/* The pc_curr difference for an architectural jump. */ +static target_long jmp_diff(DisasContext *s, target_long diff) { - return s->pc_curr + (s->thumb ? 4 : 8); + return diff + (s->thumb ? 4 : 8); +} + +static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff) +{ + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff); + } else { + tcg_gen_movi_i32(var, s->pc_curr + diff); + } } /* Set a variable to the value of a CPU register. */ void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) { if (reg == 15) { - tcg_gen_movi_i32(var, read_pc(s)); + gen_pc_plus_diff(s, var, jmp_diff(s, 0)); } else { tcg_gen_mov_i32(var, cpu_R[reg]); } @@ -272,7 +296,11 @@ TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) TCGv_i32 tmp = tcg_temp_new_i32(); if (reg == 15) { - tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs); + /* + * This address is computed from an aligned PC: + * subtract off the low bits. + */ + gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3))); } else { tcg_gen_addi_i32(tmp, cpu_R[reg], ofs); } @@ -291,6 +319,7 @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var) */ tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3); s->base.is_jmp = DISAS_JUMP; + s->pc_save = -1; } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) { /* For M-profile SP bits [1:0] are always zero */ tcg_gen_andi_i32(var, var, ~3); @@ -327,21 +356,36 @@ static void store_sp_checked(DisasContext *s, TCGv_i32 var) void gen_set_cpsr(TCGv_i32 var, uint32_t mask) { - TCGv_i32 tmp_mask = tcg_const_i32(mask); - gen_helper_cpsr_write(cpu_env, var, tmp_mask); - tcg_temp_free_i32(tmp_mask); + gen_helper_cpsr_write(cpu_env, var, tcg_constant_i32(mask)); +} + +static void gen_rebuild_hflags(DisasContext *s, bool new_el) +{ + bool m_profile = arm_dc_feature(s, ARM_FEATURE_M); + + if (new_el) { + if (m_profile) { + gen_helper_rebuild_hflags_m32_newel(cpu_env); + } else { + gen_helper_rebuild_hflags_a32_newel(cpu_env); + } + } else { + TCGv_i32 tcg_el = tcg_constant_i32(s->current_el); + if (m_profile) { + gen_helper_rebuild_hflags_m32(cpu_env, tcg_el); + } else { + gen_helper_rebuild_hflags_a32(cpu_env, tcg_el); + } + } } static void gen_exception_internal(int excp) { - TCGv_i32 tcg_excp = tcg_const_i32(excp); - assert(excp_is_internal(excp)); - gen_helper_exception_internal(cpu_env, tcg_excp); - tcg_temp_free_i32(tcg_excp); + gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp)); } -static void gen_step_complete_exception(DisasContext *s) +static void gen_singlestep_exception(DisasContext *s) { /* We just completed step of an insn. Move from Active-not-pending * to Active-pending, and then also take the swstep exception. @@ -357,30 +401,6 @@ static void gen_step_complete_exception(DisasContext *s) s->base.is_jmp = DISAS_NORETURN; } -static void gen_singlestep_exception(DisasContext *s) -{ - /* Generate the right kind of exception for singlestep, which is - * either the architectural singlestep or EXCP_DEBUG for QEMU's - * gdb singlestepping. - */ - if (s->ss_active) { - gen_step_complete_exception(s); - } else { - gen_exception_internal(EXCP_DEBUG); - } -} - -static inline bool is_singlestepping(DisasContext *s) -{ - /* Return true if we are singlestepping either because of - * architectural singlestep or QEMU gdbstub singlestep. This does - * not include the command line '-singlestep' mode which is rather - * misnamed as it only means "one instruction per TB" and doesn't - * affect the code we generate. - */ - return s->base.singlestep_enabled || s->ss_active; -} - void clear_eci_state(DisasContext *s) { /* @@ -388,8 +408,7 @@ void clear_eci_state(DisasContext *s) * multiple insn executes. */ if (s->eci) { - TCGv_i32 tmp = tcg_const_i32(0); - store_cpu_field(tmp, condexec_bits); + store_cpu_field_constant(0, condexec_bits); s->eci = 0; } } @@ -413,13 +432,12 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) void gen_rev16(TCGv_i32 dest, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(); - TCGv_i32 mask = tcg_const_i32(0x00ff00ff); + TCGv_i32 mask = tcg_constant_i32(0x00ff00ff); tcg_gen_shri_i32(tmp, var, 8); tcg_gen_and_i32(tmp, tmp, mask); tcg_gen_and_i32(var, var, mask); tcg_gen_shli_i32(var, var, 8); tcg_gen_or_i32(dest, var, tmp); - tcg_temp_free_i32(mask); tcg_temp_free_i32(tmp); } @@ -539,16 +557,14 @@ static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) #define GEN_SHIFT(name) \ static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ { \ - TCGv_i32 tmp1, tmp2, tmp3; \ - tmp1 = tcg_temp_new_i32(); \ - tcg_gen_andi_i32(tmp1, t1, 0xff); \ - tmp2 = tcg_const_i32(0); \ - tmp3 = tcg_const_i32(0x1f); \ - tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \ - tcg_temp_free_i32(tmp3); \ - tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \ - tcg_gen_##name##_i32(dest, tmp2, tmp1); \ - tcg_temp_free_i32(tmp2); \ + TCGv_i32 tmpd = tcg_temp_new_i32(); \ + TCGv_i32 tmp1 = tcg_temp_new_i32(); \ + TCGv_i32 zero = tcg_constant_i32(0); \ + tcg_gen_andi_i32(tmp1, t1, 0x1f); \ + tcg_gen_##name##_i32(tmpd, t0, tmp1); \ + tcg_gen_andi_i32(tmp1, t1, 0xe0); \ + tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \ + tcg_temp_free_i32(tmpd); \ tcg_temp_free_i32(tmp1); \ } GEN_SHIFT(shl) @@ -557,12 +573,10 @@ GEN_SHIFT(shr) static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { - TCGv_i32 tmp1, tmp2; - tmp1 = tcg_temp_new_i32(); + TCGv_i32 tmp1 = tcg_temp_new_i32(); + tcg_gen_andi_i32(tmp1, t1, 0xff); - tmp2 = tcg_const_i32(0x1f); - tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1); - tcg_temp_free_i32(tmp2); + tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31)); tcg_gen_sar_i32(dest, t0, tmp1); tcg_temp_free_i32(tmp1); } @@ -764,15 +778,15 @@ void gen_set_condexec(DisasContext *s) { if (s->condexec_mask) { uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, val); - store_cpu_field(tmp, condexec_bits); + + store_cpu_field_constant(val, condexec_bits); } } -void gen_set_pc_im(DisasContext *s, target_ulong val) +void gen_update_pc(DisasContext *s, target_long diff) { - tcg_gen_movi_i32(cpu_R[15], val); + gen_pc_plus_diff(s, cpu_R[15], diff); + s->pc_save = s->pc_curr + diff; } /* Set PC and Thumb state from var. var is marked as dead. */ @@ -782,6 +796,7 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var) tcg_gen_andi_i32(cpu_R[15], var, ~1); tcg_gen_andi_i32(var, var, 1); store_cpu_field(var, thumb); + s->pc_save = -1; } /* @@ -823,7 +838,7 @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) static inline void gen_bx_excret_final_code(DisasContext *s) { /* Generate the code to finish possible exception return and end the TB */ - TCGLabel *excret_label = gen_new_label(); + DisasLabel excret_label = gen_disas_label(s); uint32_t min_magic; if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) { @@ -835,14 +850,14 @@ static inline void gen_bx_excret_final_code(DisasContext *s) } /* Is the new PC value in the magic range indicating exception return? */ - tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label); + tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label); /* No: end the TB as we would for a DISAS_JMP */ - if (is_singlestepping(s)) { + if (s->ss_active) { gen_singlestep_exception(s); } else { tcg_gen_exit_tb(NULL, 0); } - gen_set_label(excret_label); + set_disas_label(s, excret_label); /* Yes: this is an exception return. * At this point in runtime env->regs[15] and env->thumb will hold * the exception-return magic number, which do_v7m_exception_exit() @@ -864,7 +879,7 @@ static inline void gen_bxns(DisasContext *s, int rm) /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory * we need to sync state before calling it, but: - * - we don't need to do gen_set_pc_im() because the bxns helper will + * - we don't need to do gen_update_pc() because the bxns helper will * always set the PC itself * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE * unless it's outside an IT block or the last insn in an IT block, @@ -885,7 +900,7 @@ static inline void gen_blxns(DisasContext *s, int rm) * We do however need to set the PC, because the blxns helper reads it. * The blxns helper may throw an exception. */ - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); gen_helper_v7m_blxns(cpu_env, var); tcg_temp_free_i32(var); s->base.is_jmp = DISAS_EXIT; @@ -1053,7 +1068,7 @@ static inline void gen_hvc(DisasContext *s, int imm16) * as an undefined insn by runtime configuration (ie before * the insn really executes). */ - gen_set_pc_im(s, s->pc_curr); + gen_update_pc(s, 0); gen_helper_pre_hvc(cpu_env); /* Otherwise we will treat this as a real exception which * happens after execution of the insn. (The distinction matters @@ -1061,7 +1076,7 @@ static inline void gen_hvc(DisasContext *s, int imm16) * for single stepping.) */ s->svc_imm = imm16; - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_HVC; } @@ -1070,76 +1085,88 @@ static inline void gen_smc(DisasContext *s) /* As with HVC, we may take an exception either before or after * the insn executes. */ - TCGv_i32 tmp; - - gen_set_pc_im(s, s->pc_curr); - tmp = tcg_const_i32(syn_aa32_smc()); - gen_helper_pre_smc(cpu_env, tmp); - tcg_temp_free_i32(tmp); - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, 0); + gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa32_smc())); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_SMC; } -static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp) +static void gen_exception_internal_insn(DisasContext *s, int excp) { gen_set_condexec(s); - gen_set_pc_im(s, pc); + gen_update_pc(s, 0); gen_exception_internal(excp); s->base.is_jmp = DISAS_NORETURN; } -void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, - uint32_t syn, uint32_t target_el) +static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el) +{ + gen_helper_exception_with_syndrome_el(cpu_env, tcg_constant_i32(excp), + tcg_constant_i32(syndrome), tcg_el); +} + +static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el) +{ + gen_exception_el_v(excp, syndrome, tcg_constant_i32(target_el)); +} + +static void gen_exception(int excp, uint32_t syndrome) +{ + gen_helper_exception_with_syndrome(cpu_env, tcg_constant_i32(excp), + tcg_constant_i32(syndrome)); +} + +static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff, + int excp, uint32_t syn, TCGv_i32 tcg_el) { if (s->aarch64) { - gen_a64_set_pc_im(pc); + gen_a64_update_pc(s, pc_diff); } else { gen_set_condexec(s); - gen_set_pc_im(s, pc); + gen_update_pc(s, pc_diff); } - gen_exception(excp, syn, target_el); + gen_exception_el_v(excp, syn, tcg_el); + s->base.is_jmp = DISAS_NORETURN; +} + +void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, + uint32_t syn, uint32_t target_el) +{ + gen_exception_insn_el_v(s, pc_diff, excp, syn, + tcg_constant_i32(target_el)); +} + +void gen_exception_insn(DisasContext *s, target_long pc_diff, + int excp, uint32_t syn) +{ + if (s->aarch64) { + gen_a64_update_pc(s, pc_diff); + } else { + gen_set_condexec(s); + gen_update_pc(s, pc_diff); + } + gen_exception(excp, syn); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn) { - TCGv_i32 tcg_syn; - gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); - tcg_syn = tcg_const_i32(syn); - gen_helper_exception_bkpt_insn(cpu_env, tcg_syn); - tcg_temp_free_i32(tcg_syn); + gen_update_pc(s, 0); + gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syn)); s->base.is_jmp = DISAS_NORETURN; } void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), - default_exception_el(s)); -} - -static void gen_exception_el(DisasContext *s, int excp, uint32_t syn, - TCGv_i32 tcg_el) -{ - TCGv_i32 tcg_excp; - TCGv_i32 tcg_syn; - - gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); - tcg_excp = tcg_const_i32(excp); - tcg_syn = tcg_const_i32(syn); - gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn, tcg_el); - tcg_temp_free_i32(tcg_syn); - tcg_temp_free_i32(tcg_excp); - s->base.is_jmp = DISAS_NORETURN; + gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized()); } /* Force a TB lookup after an instruction that changes the CPU state. */ void gen_lookup_tb(DisasContext *s) { - tcg_gen_movi_i32(cpu_R[15], s->base.pc_next); + gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s)); s->base.is_jmp = DISAS_EXIT; } @@ -1157,12 +1184,9 @@ static inline void gen_hlt(DisasContext *s, int imm) * semihosting, to provide some semblance of security * (and for consistency with our 32-bit semihosting). */ - if (semihosting_enabled() && -#ifndef CONFIG_USER_ONLY - s->current_el != 0 && -#endif + if (semihosting_enabled(s->current_el == 0) && (imm == (s->thumb ? 0x3c : 0xf000))) { - gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + gen_exception_internal_insn(s, EXCP_SEMIHOST); return; } @@ -1185,7 +1209,7 @@ long neon_element_offset(int reg, int element, MemOp memop) { int element_size = 1 << (memop & MO_SIZE); int ofs = element * element_size; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN /* * Calculate the offset assuming fully little-endian, * then XOR to account for the order of the 8-byte units. @@ -1244,7 +1268,7 @@ void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) case MO_UL: tcg_gen_ld32u_i64(dest, cpu_env, off); break; - case MO_Q: + case MO_UQ: tcg_gen_ld_i64(dest, cpu_env, off); break; default: @@ -1855,24 +1879,21 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(wrd); switch ((insn >> 6) & 3) { case 0: - tmp2 = tcg_const_i32(0xff); - tmp3 = tcg_const_i32((insn & 7) << 3); + tmp2 = tcg_constant_i32(0xff); + tmp3 = tcg_constant_i32((insn & 7) << 3); break; case 1: - tmp2 = tcg_const_i32(0xffff); - tmp3 = tcg_const_i32((insn & 3) << 4); + tmp2 = tcg_constant_i32(0xffff); + tmp3 = tcg_constant_i32((insn & 3) << 4); break; case 2: - tmp2 = tcg_const_i32(0xffffffff); - tmp3 = tcg_const_i32((insn & 1) << 5); + tmp2 = tcg_constant_i32(0xffffffff); + tmp3 = tcg_constant_i32((insn & 1) << 5); break; default: - tmp2 = NULL; - tmp3 = NULL; + g_assert_not_reached(); } gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); - tcg_temp_free_i32(tmp3); - tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); @@ -2328,10 +2349,9 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_const_i32((insn >> 20) & 3); iwmmxt_load_reg(cpu_V1, rd1); - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - tcg_temp_free_i32(tmp); + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, + tcg_constant_i32((insn >> 20) & 3)); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -2385,9 +2405,8 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); + tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2590,34 +2609,79 @@ static void gen_goto_ptr(void) * cpu_loop_exec. Any live exit_requests will be processed as we * enter the next TB. */ -static void gen_goto_tb(DisasContext *s, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *s, int n, target_long diff) { - if (translator_use_goto_tb(&s->base, dest)) { - tcg_gen_goto_tb(n); - gen_set_pc_im(s, dest); + if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) { + /* + * For pcrel, the pc must always be up-to-date on entry to + * the linked TB, so that it can use simple additions for all + * further adjustments. For !pcrel, the linked TB is compiled + * to know its full virtual address, so we can delay the + * update to pc to the unlinked path. A long chain of links + * can thus avoid many updates to the PC. + */ + if (TARGET_TB_PCREL) { + gen_update_pc(s, diff); + tcg_gen_goto_tb(n); + } else { + tcg_gen_goto_tb(n); + gen_update_pc(s, diff); + } tcg_gen_exit_tb(s->base.tb, n); } else { - gen_set_pc_im(s, dest); + gen_update_pc(s, diff); gen_goto_ptr(); } s->base.is_jmp = DISAS_NORETURN; } /* Jump, specifying which TB number to use if we gen_goto_tb() */ -static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno) +static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno) { - if (unlikely(is_singlestepping(s))) { + if (unlikely(s->ss_active)) { /* An indirect jump so that we still trigger the debug exception. */ - gen_set_pc_im(s, dest); + gen_update_pc(s, diff); s->base.is_jmp = DISAS_JUMP; - } else { - gen_goto_tb(s, tbno, dest); + return; + } + switch (s->base.is_jmp) { + case DISAS_NEXT: + case DISAS_TOO_MANY: + case DISAS_NORETURN: + /* + * The normal case: just go to the destination TB. + * NB: NORETURN happens if we generate code like + * gen_brcondi(l); + * gen_jmp(); + * gen_set_label(l); + * gen_jmp(); + * on the second call to gen_jmp(). + */ + gen_goto_tb(s, tbno, diff); + break; + case DISAS_UPDATE_NOCHAIN: + case DISAS_UPDATE_EXIT: + /* + * We already decided we're leaving the TB for some other reason. + * Avoid using goto_tb so we really do exit back to the main loop + * and don't chain to another TB. + */ + gen_update_pc(s, diff); + gen_goto_ptr(); + s->base.is_jmp = DISAS_NORETURN; + break; + default: + /* + * We shouldn't be emitting code for a jump and also have + * is_jmp set to one of the special cases like DISAS_SWI. + */ + g_assert_not_reached(); } } -static inline void gen_jmp(DisasContext *s, uint32_t dest) +static inline void gen_jmp(DisasContext *s, target_long diff) { - gen_jmp_tb(s, dest, 0); + gen_jmp_tb(s, diff, 0); } static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y) @@ -2719,8 +2783,6 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, * an exception and return false. Otherwise it will return true, * and set *tgtmode and *regno appropriately. */ - int exc_target = default_exception_el(s); - /* These instructions are present only in ARMv8, or in ARMv7 with the * Virtualization Extensions. */ @@ -2824,14 +2886,15 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, if (arm_dc_feature(s, ARM_FEATURE_AARCH64) && dc_isar_feature(aa64_sel2, s)) { /* Target EL is EL<3 minus SCR_EL3.EEL2> */ - tcg_el = load_cpu_field(cp15.scr_el3); + tcg_el = load_cpu_field_low32(cp15.scr_el3); tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1); tcg_gen_addi_i32(tcg_el, tcg_el, 3); } else { - tcg_el = tcg_const_i32(3); + tcg_el = tcg_constant_i32(3); } - gen_exception_el(s, EXCP_UDEF, syn_uncategorized(), tcg_el); + gen_exception_insn_el_v(s, 0, EXCP_UDEF, + syn_uncategorized(), tcg_el); tcg_temp_free_i32(tcg_el); return false; } @@ -2856,14 +2919,13 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, undef: /* If we get here then some access check did not pass */ - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_uncategorized(), exc_target); + gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized()); return false; } static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) { - TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; + TCGv_i32 tcg_reg; int tgtmode = 0, regno = 0; if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { @@ -2872,20 +2934,18 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) /* Sync state because msr_banked() can raise exceptions */ gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); + gen_update_pc(s, 0); tcg_reg = load_reg(s, rn); - tcg_tgtmode = tcg_const_i32(tgtmode); - tcg_regno = tcg_const_i32(regno); - gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno); - tcg_temp_free_i32(tcg_tgtmode); - tcg_temp_free_i32(tcg_regno); + gen_helper_msr_banked(cpu_env, tcg_reg, + tcg_constant_i32(tgtmode), + tcg_constant_i32(regno)); tcg_temp_free_i32(tcg_reg); s->base.is_jmp = DISAS_UPDATE_EXIT; } static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) { - TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; + TCGv_i32 tcg_reg; int tgtmode = 0, regno = 0; if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { @@ -2894,13 +2954,11 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) /* Sync state because mrs_banked() can raise exceptions */ gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); + gen_update_pc(s, 0); tcg_reg = tcg_temp_new_i32(); - tcg_tgtmode = tcg_const_i32(tgtmode); - tcg_regno = tcg_const_i32(regno); - gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno); - tcg_temp_free_i32(tcg_tgtmode); - tcg_temp_free_i32(tcg_regno); + gen_helper_mrs_banked(tcg_reg, cpu_env, + tcg_constant_i32(tgtmode), + tcg_constant_i32(regno)); store_reg(s, rn, tcg_reg); s->base.is_jmp = DISAS_UPDATE_EXIT; } @@ -2983,9 +3041,8 @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, } \ static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \ { \ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); \ + TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \ tcg_gen_cmp_vec(COND, vece, d, a, zero); \ - tcg_temp_free_vec(zero); \ } \ void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \ uint32_t opr_sz, uint32_t max_sz) \ @@ -3975,8 +4032,8 @@ void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) TCGv_i32 rval = tcg_temp_new_i32(); TCGv_i32 lsh = tcg_temp_new_i32(); TCGv_i32 rsh = tcg_temp_new_i32(); - TCGv_i32 zero = tcg_const_i32(0); - TCGv_i32 max = tcg_const_i32(32); + TCGv_i32 zero = tcg_constant_i32(0); + TCGv_i32 max = tcg_constant_i32(32); /* * Rely on the TCG guarantee that out of range shifts produce @@ -3994,8 +4051,6 @@ void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_temp_free_i32(rval); tcg_temp_free_i32(lsh); tcg_temp_free_i32(rsh); - tcg_temp_free_i32(zero); - tcg_temp_free_i32(max); } void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4004,8 +4059,8 @@ void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) TCGv_i64 rval = tcg_temp_new_i64(); TCGv_i64 lsh = tcg_temp_new_i64(); TCGv_i64 rsh = tcg_temp_new_i64(); - TCGv_i64 zero = tcg_const_i64(0); - TCGv_i64 max = tcg_const_i64(64); + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_i64 max = tcg_constant_i64(64); /* * Rely on the TCG guarantee that out of range shifts produce @@ -4023,8 +4078,6 @@ void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_temp_free_i64(rval); tcg_temp_free_i64(lsh); tcg_temp_free_i64(rsh); - tcg_temp_free_i64(zero); - tcg_temp_free_i64(max); } static void gen_ushl_vec(unsigned vece, TCGv_vec dst, @@ -4119,8 +4172,8 @@ void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) TCGv_i32 rval = tcg_temp_new_i32(); TCGv_i32 lsh = tcg_temp_new_i32(); TCGv_i32 rsh = tcg_temp_new_i32(); - TCGv_i32 zero = tcg_const_i32(0); - TCGv_i32 max = tcg_const_i32(31); + TCGv_i32 zero = tcg_constant_i32(0); + TCGv_i32 max = tcg_constant_i32(31); /* * Rely on the TCG guarantee that out of range shifts produce @@ -4139,8 +4192,6 @@ void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_temp_free_i32(rval); tcg_temp_free_i32(lsh); tcg_temp_free_i32(rsh); - tcg_temp_free_i32(zero); - tcg_temp_free_i32(max); } void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4149,8 +4200,8 @@ void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) TCGv_i64 rval = tcg_temp_new_i64(); TCGv_i64 lsh = tcg_temp_new_i64(); TCGv_i64 rsh = tcg_temp_new_i64(); - TCGv_i64 zero = tcg_const_i64(0); - TCGv_i64 max = tcg_const_i64(63); + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_i64 max = tcg_constant_i64(63); /* * Rely on the TCG guarantee that out of range shifts produce @@ -4169,8 +4220,6 @@ void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_temp_free_i64(rval); tcg_temp_free_i64(lsh); tcg_temp_free_i64(rsh); - tcg_temp_free_i64(zero); - tcg_temp_free_i64(max); } static void gen_sshl_vec(unsigned vece, TCGv_vec dst, @@ -4685,8 +4734,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, * Note that on XScale all cp0..c13 registers do an access check * call in order to handle c15_cpar. */ - TCGv_ptr tmpptr; - TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; /* Note that since we are an implementation which takes an @@ -4728,26 +4775,24 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); - tmpptr = tcg_const_ptr(ri); - tcg_syn = tcg_const_i32(syndrome); - tcg_isread = tcg_const_i32(isread); - gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, - tcg_isread); - tcg_temp_free_ptr(tmpptr); - tcg_temp_free_i32(tcg_syn); - tcg_temp_free_i32(tcg_isread); + gen_update_pc(s, 0); + gen_helper_access_check_cp_reg(cpu_env, + tcg_constant_ptr(ri), + tcg_constant_i32(syndrome), + tcg_constant_i32(isread)); } else if (ri->type & ARM_CP_RAISES_EXC) { /* * The readfn or writefn might raise an exception; * synchronize the CPU state in case it does. */ gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); + gen_update_pc(s, 0); } /* Handle special cases first */ - switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + switch (ri->type & ARM_CP_SPECIAL_MASK) { + case 0: + break; case ARM_CP_NOP: return; case ARM_CP_WFI: @@ -4755,11 +4800,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, unallocated_encoding(s); return; } - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_WFI; return; default: - break; + g_assert_not_reached(); } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { @@ -4772,13 +4817,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, TCGv_i64 tmp64; TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { - tmp64 = tcg_const_i64(ri->resetvalue); + tmp64 = tcg_constant_i64(ri->resetvalue); } else if (ri->readfn) { - TCGv_ptr tmpptr; tmp64 = tcg_temp_new_i64(); - tmpptr = tcg_const_ptr(ri); - gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr); - tcg_temp_free_ptr(tmpptr); + gen_helper_get_cp_reg64(tmp64, cpu_env, + tcg_constant_ptr(ri)); } else { tmp64 = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset); @@ -4793,13 +4836,10 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } else { TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { - tmp = tcg_const_i32(ri->resetvalue); + tmp = tcg_constant_i32(ri->resetvalue); } else if (ri->readfn) { - TCGv_ptr tmpptr; tmp = tcg_temp_new_i32(); - tmpptr = tcg_const_ptr(ri); - gen_helper_get_cp_reg(tmp, cpu_env, tmpptr); - tcg_temp_free_ptr(tmpptr); + gen_helper_get_cp_reg(tmp, cpu_env, tcg_constant_ptr(ri)); } else { tmp = load_cpu_offset(ri->fieldoffset); } @@ -4829,25 +4869,19 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tcg_temp_free_i32(tmplo); tcg_temp_free_i32(tmphi); if (ri->writefn) { - TCGv_ptr tmpptr = tcg_const_ptr(ri); - gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64); - tcg_temp_free_ptr(tmpptr); + gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), + tmp64); } else { tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); } tcg_temp_free_i64(tmp64); } else { + TCGv_i32 tmp = load_reg(s, rt); if (ri->writefn) { - TCGv_i32 tmp; - TCGv_ptr tmpptr; - tmp = load_reg(s, rt); - tmpptr = tcg_const_ptr(ri); - gen_helper_set_cp_reg(cpu_env, tmpptr, tmp); - tcg_temp_free_ptr(tmpptr); + gen_helper_set_cp_reg(cpu_env, tcg_constant_ptr(ri), tmp); tcg_temp_free_i32(tmp); } else { - TCGv_i32 tmp = load_reg(s, rt); - store_cpu_offset(tmp, ri->fieldoffset); + store_cpu_offset(tmp, ri->fieldoffset, 4); } } } @@ -4861,17 +4895,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, * A write to any coprocessor register that ends a TB * must rebuild the hflags for the next TB. */ - TCGv_i32 tcg_el = tcg_const_i32(s->current_el); - if (arm_dc_feature(s, ARM_FEATURE_M)) { - gen_helper_rebuild_hflags_m32(cpu_env, tcg_el); - } else { - if (ri->type & ARM_CP_NEWEL) { - gen_helper_rebuild_hflags_a32_newel(cpu_env); - } else { - gen_helper_rebuild_hflags_a32(cpu_env, tcg_el); - } - } - tcg_temp_free_i32(tcg_el); + gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL); /* * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition @@ -5119,7 +5143,7 @@ static void gen_srs(DisasContext *s, * For the UNPREDICTABLE cases we choose to UNDEF. */ if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) { - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3); + gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3); return; } @@ -5160,12 +5184,10 @@ static void gen_srs(DisasContext *s, } addr = tcg_temp_new_i32(); - tmp = tcg_const_i32(mode); /* get_r13_banked() will raise an exception if called from System mode */ gen_set_condexec(s); - gen_set_pc_im(s, s->pc_curr); - gen_helper_get_r13_banked(addr, cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_update_pc(s, 0); + gen_helper_get_r13_banked(addr, cpu_env, tcg_constant_i32(mode)); switch (amode) { case 0: /* DA */ offset = -4; @@ -5180,7 +5202,7 @@ static void gen_srs(DisasContext *s, offset = 4; break; default: - abort(); + g_assert_not_reached(); } tcg_gen_addi_i32(addr, addr, offset); tmp = load_reg(s, 14); @@ -5205,12 +5227,10 @@ static void gen_srs(DisasContext *s, offset = 0; break; default: - abort(); + g_assert_not_reached(); } tcg_gen_addi_i32(addr, addr, offset); - tmp = tcg_const_i32(mode); - gen_helper_set_r13_banked(cpu_env, tmp, addr); - tcg_temp_free_i32(tmp); + gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr); } tcg_temp_free_i32(addr); s->base.is_jmp = DISAS_UPDATE_EXIT; @@ -5220,7 +5240,7 @@ static void gen_srs(DisasContext *s, static void arm_skip_unless(DisasContext *s, uint32_t cond) { arm_gen_condlabel(s); - arm_gen_test_cc(cond ^ 1, s->condlabel); + arm_gen_test_cc(cond ^ 1, s->condlabel.label); } @@ -5522,18 +5542,16 @@ static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { - TCGv_i32 tmp1, tmp2; + TCGv_i32 tmp1; uint32_t imm; imm = ror32(a->imm, a->rot); if (logic_cc && a->rot) { tcg_gen_movi_i32(cpu_CF, imm >> 31); } - tmp2 = tcg_const_i32(imm); tmp1 = load_reg(s, a->rn); - gen(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); + gen(tmp1, tmp1, tcg_constant_i32(imm)); if (logic_cc) { gen_logic_CC(tmp1); @@ -5552,9 +5570,10 @@ static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a, if (logic_cc && a->rot) { tcg_gen_movi_i32(cpu_CF, imm >> 31); } - tmp = tcg_const_i32(imm); - gen(tmp, tmp); + tmp = tcg_temp_new_i32(); + gen(tmp, tcg_constant_i32(imm)); + if (logic_cc) { gen_logic_CC(tmp); } @@ -5680,14 +5699,11 @@ static bool trans_ADR(DisasContext *s, arg_ri *a) static bool trans_MOVW(DisasContext *s, arg_MOVW *a) { - TCGv_i32 tmp; - if (!ENABLE_ARCH_6T2) { return false; } - tmp = tcg_const_i32(a->imm); - store_reg(s, a->rd, tmp); + store_reg(s, a->rd, tcg_constant_i32(a->imm)); return true; } @@ -6058,14 +6074,13 @@ static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a) t0 = load_reg(s, a->rm); t1 = load_reg(s, a->rn); tcg_gen_mulu2_i32(t0, t1, t0, t1); - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); t2 = load_reg(s, a->ra); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); tcg_temp_free_i32(t2); t2 = load_reg(s, a->rd); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); tcg_temp_free_i32(t2); - tcg_temp_free_i32(zero); store_reg(s, a->ra, t0); store_reg(s, a->rd, t1); return true; @@ -6240,7 +6255,7 @@ static bool trans_YIELD(DisasContext *s, arg_YIELD *a) * scheduling of other vCPUs. */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_YIELD; } return true; @@ -6256,7 +6271,7 @@ static bool trans_WFE(DisasContext *s, arg_WFE *a) * implemented so we can't sleep like WFI does. */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_WFE; } return true; @@ -6265,11 +6280,34 @@ static bool trans_WFE(DisasContext *s, arg_WFE *a) static bool trans_WFI(DisasContext *s, arg_WFI *a) { /* For WFI, halt the vCPU until an IRQ. */ - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_WFI; return true; } +static bool trans_ESB(DisasContext *s, arg_ESB *a) +{ + /* + * For M-profile, minimal-RAS ESB can be a NOP. + * Without RAS, we must implement this as NOP. + */ + if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) { + /* + * QEMU does not have a source of physical SErrors, + * so we are only concerned with virtual SErrors. + * The pseudocode in the ARM for this case is + * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then + * AArch32.vESBOperation(); + * Most of the condition can be evaluated at translation time. + * Test for EL2 present, and defer test for SEL2 to runtime. + */ + if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { + gen_helper_vesb(cpu_env); + } + } + return true; +} + static bool trans_NOP(DisasContext *s, arg_NOP *a) { return true; @@ -6312,14 +6350,13 @@ static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz) default: g_assert_not_reached(); } - t3 = tcg_const_i32(1 << sz); + t3 = tcg_constant_i32(1 << sz); if (c) { gen_helper_crc32c(t1, t1, t2, t3); } else { gen_helper_crc32(t1, t1, t2, t3); } tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); store_reg(s, a->rd, t1); return true; } @@ -6402,8 +6439,8 @@ static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a) if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; } - tmp = tcg_const_i32(a->sysm); - gen_helper_v7m_mrs(tmp, cpu_env, tmp); + tmp = tcg_temp_new_i32(); + gen_helper_v7m_mrs(tmp, cpu_env, tcg_constant_i32(a->sysm)); store_reg(s, a->rd, tmp); return true; } @@ -6415,13 +6452,12 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a) if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; } - addr = tcg_const_i32((a->mask << 10) | a->sysm); + addr = tcg_constant_i32((a->mask << 10) | a->sysm); reg = load_reg(s, a->rn); gen_helper_v7m_msr(cpu_env, addr, reg); - tcg_temp_free_i32(addr); tcg_temp_free_i32(reg); /* If we wrote to CONTROL, the EL might have changed */ - gen_helper_rebuild_hflags_m32_newel(cpu_env); + gen_rebuild_hflags(s, true); gen_lookup_tb(s); return true; } @@ -6440,6 +6476,18 @@ static bool trans_BXJ(DisasContext *s, arg_BXJ *a) if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } + /* + * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a + * TBFLAGS bit on a basically-never-happens case, so call a helper + * function to check for the trap and raise the exception if needed + * (passing it the register number for the syndrome value). + * v8A doesn't have this HSTR bit. + */ + if (!arm_dc_feature(s, ARM_FEATURE_V8) && + arm_dc_feature(s, ARM_FEATURE_EL2) && + s->current_el < 2 && s->ns) { + gen_helper_check_bxj_trap(cpu_env, tcg_constant_i32(a->rm)); + } /* Trivial implementation equivalent to bx. */ gen_bx(s, load_reg(s, a->rm)); return true; @@ -6453,7 +6501,7 @@ static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a) return false; } tmp = load_reg(s, a->rm); - tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb); + gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); gen_bx(s, tmp); return true; } @@ -6510,7 +6558,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) } if (s->current_el == 2) { /* ERET from Hyp uses ELR_Hyp, not LR */ - tmp = load_cpu_field(elr_el[2]); + tmp = load_cpu_field_low32(elr_el[2]); } else { tmp = load_reg(s, 14); } @@ -6532,12 +6580,9 @@ static bool trans_BKPT(DisasContext *s, arg_BKPT *a) /* BKPT is OK with ECI set and leaves it untouched */ s->eci_handled = true; if (arm_dc_feature(s, ARM_FEATURE_M) && - semihosting_enabled() && -#ifndef CONFIG_USER_ONLY - !IS_USER(s) && -#endif + semihosting_enabled(s->current_el == 0) && (a->imm == 0xab)) { - gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + gen_exception_internal_insn(s, EXCP_SEMIHOST); } else { gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); } @@ -6618,8 +6663,8 @@ static bool trans_TT(DisasContext *s, arg_TT *a) } addr = load_reg(s, a->rn); - tmp = tcg_const_i32((a->A << 1) | a->T); - gen_helper_v7m_tt(tmp, cpu_env, addr, tmp); + tmp = tcg_temp_new_i32(); + gen_helper_v7m_tt(tmp, cpu_env, addr, tcg_constant_i32((a->A << 1) | a->T)); tcg_temp_free_i32(addr); store_reg(s, a->rd, tmp); return true; @@ -6636,7 +6681,7 @@ static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w) /* ISS not valid if writeback */ if (p && !w) { ret = rd; - if (s->base.pc_next - s->pc_curr == 2) { + if (curr_insn_len(s) == 2) { ret |= ISSIs16Bit; } } else { @@ -7586,7 +7631,7 @@ static bool trans_PKH(DisasContext *s, arg_PKH *a) static bool op_sat(DisasContext *s, arg_sat *a, void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) { - TCGv_i32 tmp, satimm; + TCGv_i32 tmp; int shift = a->imm; if (!ENABLE_ARCH_6) { @@ -7600,9 +7645,7 @@ static bool op_sat(DisasContext *s, arg_sat *a, tcg_gen_shli_i32(tmp, tmp, shift); } - satimm = tcg_const_i32(a->satimm); - gen(tmp, cpu_env, tmp, satimm); - tcg_temp_free_i32(satimm); + gen(tmp, cpu_env, tmp, tcg_constant_i32(a->satimm)); store_reg(s, a->rd, tmp); return true; @@ -7829,10 +7872,9 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) t3 = tcg_temp_new_i32(); tcg_gen_sari_i32(t3, t1, 31); qf = load_cpu_field(QF); - one = tcg_const_i32(1); + one = tcg_constant_i32(1); tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf); store_cpu_field(qf, QF); - tcg_temp_free_i32(one); tcg_temp_free_i32(t3); tcg_temp_free_i32(t2); } @@ -7938,9 +7980,7 @@ static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) * a non-zero multiplicand lowpart, and the correct result * lowpart for rounding. */ - TCGv_i32 zero = tcg_const_i32(0); - tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1); - tcg_temp_free_i32(zero); + tcg_gen_sub2_i32(t2, t1, tcg_constant_i32(0), t3, t2, t1); } else { tcg_gen_add_i32(t1, t1, t3); } @@ -7992,9 +8032,9 @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); if (u) { - gen_helper_udiv(t1, t1, t2); + gen_helper_udiv(t1, cpu_env, t1, t2); } else { - gen_helper_sdiv(t1, t1, t2); + gen_helper_sdiv(t1, cpu_env, t1, t2); } tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); @@ -8037,7 +8077,7 @@ static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n) * If the writeback is incrementing SP rather than * decrementing it, and the initial SP is below the * stack limit but the final written-back SP would - * be above, then then we must not perform any memory + * be above, then we must not perform any memory * accesses, but it is IMPDEF whether we generate * an exception. We choose to do so in this case. * At this point 'addr' is the lowest address, so @@ -8077,7 +8117,7 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) { int i, j, n, list, mem_idx; bool user = a->u; - TCGv_i32 addr, tmp, tmp2; + TCGv_i32 addr, tmp; if (user) { /* STM (user) */ @@ -8107,9 +8147,7 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) if (user && i != 15) { tmp = tcg_temp_new_i32(); - tmp2 = tcg_const_i32(i); - gen_helper_get_user_reg(tmp, cpu_env, tmp2); - tcg_temp_free_i32(tmp2); + gen_helper_get_user_reg(tmp, cpu_env, tcg_constant_i32(i)); } else { tmp = load_reg(s, i); } @@ -8150,7 +8188,7 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) bool loaded_base; bool user = a->u; bool exc_return = false; - TCGv_i32 addr, tmp, tmp2, loaded_var; + TCGv_i32 addr, tmp, loaded_var; if (user) { /* LDM (user), LDM (exception return) */ @@ -8193,9 +8231,7 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) tmp = tcg_temp_new_i32(); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); if (user) { - tmp2 = tcg_const_i32(i); - gen_helper_set_user_reg(cpu_env, tmp2, tmp); - tcg_temp_free_i32(tmp2); + gen_helper_set_user_reg(cpu_env, tcg_constant_i32(i), tmp); tcg_temp_free_i32(tmp); } else if (i == a->rn) { loaded_var = tmp; @@ -8288,7 +8324,7 @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a) s->eci_handled = true; - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); for (i = 0; i < 15; i++) { if (extract32(a->list, i, 1)) { /* Clear R[i] */ @@ -8300,11 +8336,8 @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a) * Clear APSR (by calling the MSR helper with the same argument * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0) */ - TCGv_i32 maskreg = tcg_const_i32(0xc << 8); - gen_helper_v7m_msr(cpu_env, maskreg, zero); - tcg_temp_free_i32(maskreg); + gen_helper_v7m_msr(cpu_env, tcg_constant_i32(0xc00), zero); } - tcg_temp_free_i32(zero); clear_eci_state(s); return true; } @@ -8315,7 +8348,7 @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a) static bool trans_B(DisasContext *s, arg_i *a) { - gen_jmp(s, read_pc(s) + a->imm); + gen_jmp(s, jmp_diff(s, a->imm)); return true; } @@ -8330,21 +8363,19 @@ static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a) return true; } arm_skip_unless(s, a->cond); - gen_jmp(s, read_pc(s) + a->imm); + gen_jmp(s, jmp_diff(s, a->imm)); return true; } static bool trans_BL(DisasContext *s, arg_i *a) { - tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb); - gen_jmp(s, read_pc(s) + a->imm); + gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); + gen_jmp(s, jmp_diff(s, a->imm)); return true; } static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) { - TCGv_i32 tmp; - /* * BLX would be useless on M-profile; the encoding space * is used for other insns from v8.1M onward, and UNDEFs before that. @@ -8357,17 +8388,17 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) if (s->thumb && (a->imm & 2)) { return false; } - tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb); - tmp = tcg_const_i32(!s->thumb); - store_cpu_field(tmp, thumb); - gen_jmp(s, (read_pc(s) & ~3) + a->imm); + gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); + store_cpu_field_constant(!s->thumb, thumb); + /* This jump is computed from an aligned PC: subtract off the low bits. */ + gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3))); return true; } static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a) { assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); - tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12)); + gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12)); return true; } @@ -8377,7 +8408,7 @@ static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a) assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1); - tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1); + gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1); gen_bx(s, tmp); return true; } @@ -8393,7 +8424,7 @@ static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a) tmp = tcg_temp_new_i32(); tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1); tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); - tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1); + gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1); gen_bx(s, tmp); return true; } @@ -8450,8 +8481,8 @@ static bool trans_DLS(DisasContext *s, arg_DLS *a) store_reg(s, 14, tmp); if (a->size != 4) { /* DLSTP: set FPSCR.LTPSIZE */ - tmp = tcg_const_i32(a->size); - store_cpu_field(tmp, v7m.ltpsize); + store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; } return true; } @@ -8460,7 +8491,7 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a) { /* M-profile low-overhead while-loop start */ TCGv_i32 tmp; - TCGLabel *nextlabel; + DisasLabel nextlabel; if (!dc_isar_feature(aa32_lob, s)) { return false; @@ -8495,14 +8526,14 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a) * Do the check-and-raise-exception by hand. */ if (s->fp_excp_el) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); + gen_exception_insn_el(s, 0, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); return true; } } - nextlabel = gen_new_label(); - tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel); + nextlabel = gen_disas_label(s); + tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label); tmp = load_reg(s, a->rn); store_reg(s, 14, tmp); if (a->size != 4) { @@ -8515,13 +8546,16 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a) */ bool ok = vfp_access_check(s); assert(ok); - tmp = tcg_const_i32(a->size); - store_cpu_field(tmp, v7m.ltpsize); + store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); + /* + * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0) + * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK. + */ } - gen_jmp_tb(s, s->base.pc_next, 1); + gen_jmp_tb(s, curr_insn_len(s), 1); - gen_set_label(nextlabel); - gen_jmp(s, read_pc(s) + a->imm); + set_disas_label(s, nextlabel); + gen_jmp(s, jmp_diff(s, a->imm)); return true; } @@ -8536,7 +8570,7 @@ static bool trans_LE(DisasContext *s, arg_LE *a) * any faster. */ TCGv_i32 tmp; - TCGLabel *loopend; + DisasLabel loopend; bool fpu_active; if (!dc_isar_feature(aa32_lob, s)) { @@ -8591,18 +8625,17 @@ static bool trans_LE(DisasContext *s, arg_LE *a) if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) { /* Need to do a runtime check for LTPSIZE != 4 */ - TCGLabel *skipexc = gen_new_label(); + DisasLabel skipexc = gen_disas_label(s); tmp = load_cpu_field(v7m.ltpsize); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label); tcg_temp_free_i32(tmp); - gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(), - default_exception_el(s)); - gen_set_label(skipexc); + gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); + set_disas_label(s, skipexc); } if (a->f) { /* Loop-forever: just jump back to the loop start */ - gen_jmp(s, read_pc(s) - a->imm); + gen_jmp(s, jmp_diff(s, -a->imm)); return true; } @@ -8612,9 +8645,9 @@ static bool trans_LE(DisasContext *s, arg_LE *a) * loop decrement value is 1. For LETP we need to calculate the decrement * value from LTPSIZE. */ - loopend = gen_new_label(); + loopend = gen_disas_label(s); if (!a->tp) { - tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend); + tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label); tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1); } else { /* @@ -8627,22 +8660,21 @@ static bool trans_LE(DisasContext *s, arg_LE *a) tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr); tcg_temp_free_i32(ltpsize); - tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend); + tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label); tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr); tcg_temp_free_i32(decr); } /* Jump back to the loop start */ - gen_jmp(s, read_pc(s) - a->imm); + gen_jmp(s, jmp_diff(s, -a->imm)); - gen_set_label(loopend); + set_disas_label(s, loopend); if (a->tp) { /* Exits from tail-pred loops must reset LTPSIZE to 4 */ - tmp = tcg_const_i32(4); - store_cpu_field(tmp, v7m.ltpsize); + store_cpu_field(tcg_constant_i32(4), v7m.ltpsize); } /* End TB, continuing to following insn */ - gen_jmp_tb(s, s->base.pc_next, 1); + gen_jmp_tb(s, curr_insn_len(s), 1); return true; } @@ -8653,7 +8685,6 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a) * doesn't cache branch information, all we need to do is reset * FPSCR.LTPSIZE to 4. */ - TCGv_i32 ltpsize; if (!dc_isar_feature(aa32_lob, s) || !dc_isar_feature(aa32_mve, s)) { @@ -8664,11 +8695,45 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a) return true; } - ltpsize = tcg_const_i32(4); - store_cpu_field(ltpsize, v7m.ltpsize); + store_cpu_field_constant(4, v7m.ltpsize); return true; } +static bool trans_VCTP(DisasContext *s, arg_VCTP *a) +{ + /* + * M-profile Create Vector Tail Predicate. This insn is itself + * predicated and is subject to beatwise execution. + */ + TCGv_i32 rn_shifted, masklen; + + if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) { + return false; + } + + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + /* + * We pre-calculate the mask length here to avoid having + * to have multiple helpers specialized for size. + * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16". + */ + rn_shifted = tcg_temp_new_i32(); + masklen = load_reg(s, a->rn); + tcg_gen_shli_i32(rn_shifted, masklen, a->size); + tcg_gen_movcond_i32(TCG_COND_LEU, masklen, + masklen, tcg_constant_i32(1 << (4 - a->size)), + rn_shifted, tcg_constant_i32(16)); + gen_helper_mve_vctp(cpu_env, masklen); + tcg_temp_free_i32(masklen); + tcg_temp_free_i32(rn_shifted); + /* This insn updates predication bits */ + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; + mve_update_eci(s); + return true; +} static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) { @@ -8682,10 +8747,11 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) tcg_gen_add_i32(addr, addr, tmp); gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB); - tcg_temp_free_i32(addr); tcg_gen_add_i32(tmp, tmp, tmp); - tcg_gen_addi_i32(tmp, tmp, read_pc(s)); + gen_pc_plus_diff(s, addr, jmp_diff(s, 0)); + tcg_gen_add_i32(tmp, tmp, addr); + tcg_temp_free_i32(addr); store_reg(s, 15, tmp); return true; } @@ -8706,9 +8772,9 @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a) arm_gen_condlabel(s); tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE, - tmp, 0, s->condlabel); + tmp, 0, s->condlabel.label); tcg_temp_free_i32(tmp); - gen_jmp(s, read_pc(s) + a->imm); + gen_jmp(s, jmp_diff(s, a->imm)); return true; } @@ -8721,14 +8787,12 @@ static bool trans_SVC(DisasContext *s, arg_SVC *a) { const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; - if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() && -#ifndef CONFIG_USER_ONLY - !IS_USER(s) && -#endif + if (!arm_dc_feature(s, ARM_FEATURE_M) && + semihosting_enabled(s->current_el == 0) && (a->imm == semihost_imm)) { - gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + gen_exception_internal_insn(s, EXCP_SEMIHOST); } else { - gen_set_pc_im(s, s->base.pc_next); + gen_update_pc(s, curr_insn_len(s)); s->svc_imm = a->imm; s->base.is_jmp = DISAS_SWI; } @@ -8827,7 +8891,7 @@ static bool trans_CPS(DisasContext *s, arg_CPS *a) static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a) { - TCGv_i32 tmp, addr, el; + TCGv_i32 tmp, addr; if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; @@ -8837,23 +8901,18 @@ static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a) return true; } - tmp = tcg_const_i32(a->im); + tmp = tcg_constant_i32(a->im); /* FAULTMASK */ if (a->F) { - addr = tcg_const_i32(19); + addr = tcg_constant_i32(19); gen_helper_v7m_msr(cpu_env, addr, tmp); - tcg_temp_free_i32(addr); } /* PRIMASK */ if (a->I) { - addr = tcg_const_i32(16); + addr = tcg_constant_i32(16); gen_helper_v7m_msr(cpu_env, addr, tmp); - tcg_temp_free_i32(addr); } - el = tcg_const_i32(s->current_el); - gen_helper_rebuild_hflags_m32(cpu_env, el); - tcg_temp_free_i32(el); - tcg_temp_free_i32(tmp); + gen_rebuild_hflags(s, false); gen_lookup_tb(s); return true; } @@ -8989,13 +9048,14 @@ static bool trans_CSEL(DisasContext *s, arg_CSEL *a) } /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */ + zero = tcg_constant_i32(0); if (a->rn == 15) { - rn = tcg_const_i32(0); + rn = zero; } else { rn = load_reg(s, a->rn); } if (a->rm == 15) { - rm = tcg_const_i32(0); + rm = zero; } else { rm = load_reg(s, a->rm); } @@ -9017,10 +9077,8 @@ static bool trans_CSEL(DisasContext *s, arg_CSEL *a) } arm_test_cc(&c, a->fcond); - zero = tcg_const_i32(0); tcg_gen_movcond_i32(c.cond, rn, c.value, zero, rn, rm); arm_free_cc(&c); - tcg_temp_free_i32(zero); store_reg(s, a->rd, rn); tcg_temp_free_i32(rm); @@ -9040,8 +9098,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) * UsageFault exception. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { - gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(), - default_exception_el(s)); + gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); + return; + } + + if (s->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate()); return; } @@ -9257,7 +9323,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s) * boundary, so we cross the page if the first 16 bits indicate * that this is a 32 bit insn. */ - uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b); + uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b); return !thumb_insn_is_16bit(s, s->base.pc_next, insn); } @@ -9272,13 +9338,8 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->isar = &cpu->isar; dc->condjmp = 0; - - dc->aarch64 = 0; - /* If we are coming from secure EL0 in a system with a 32-bit EL3, then - * there is no secure EL1, so we route exceptions to EL3. - */ - dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3); + dc->pc_save = dc->base.pc_first; + dc->aarch64 = false; dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB); dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC); @@ -9295,7 +9356,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) */ dc->eci = dc->condexec_mask = dc->condexec_cond = 0; dc->eci_handled = false; - dc->insn_eci_rewind = NULL; if (condexec & 0xf) { dc->condexec_mask = (condexec & 0xf) << 1; dc->condexec_cond = condexec >> 4; @@ -9313,20 +9373,20 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #endif dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); + dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); if (arm_feature(env, ARM_FEATURE_M)) { dc->vfp_enabled = 1; dc->be_data = MO_TE; dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER); - dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && - regime_is_secure(env, dc->mmu_idx); + dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE); dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK); dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG); dc->v7m_new_fp_ctxt_needed = EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED); dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT); + dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED); } else { - dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL); dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B); dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE); dc->ns = EX_TBFLAG_A32(tb_flags, NS); @@ -9337,6 +9397,8 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); } + dc->sme_trap_nonstreaming = + EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); } dc->cp_regs = cpu->cp_regs; dc->features = env->features; @@ -9363,7 +9425,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; /* If architectural single step active, limit to 1. */ - if (is_singlestepping(dc)) { + if (dc->ss_active) { dc->base.max_insns = 1; } @@ -9416,9 +9478,7 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); - store_cpu_field(tmp, condexec_bits); + store_cpu_field_constant(0, condexec_bits); } } @@ -9431,17 +9491,21 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) * fields here. */ uint32_t condexec_bits; + target_ulong pc_arg = dc->base.pc_next; + if (TARGET_TB_PCREL) { + pc_arg &= ~TARGET_PAGE_MASK; + } if (dc->eci) { condexec_bits = dc->eci << 4; } else { condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); } - tcg_gen_insn_start(dc->base.pc_next, condexec_bits, 0); + tcg_gen_insn_start(pc_arg, condexec_bits, 0); dc->insn_start = tcg_last_op(); } -static bool arm_pre_translate_insn(DisasContext *dc) +static bool arm_check_kernelpage(DisasContext *dc) { #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ @@ -9453,7 +9517,11 @@ static bool arm_pre_translate_insn(DisasContext *dc) return true; } #endif + return false; +} +static bool arm_check_ss_active(DisasContext *dc) +{ if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either @@ -9476,8 +9544,11 @@ static bool arm_pre_translate_insn(DisasContext *dc) static void arm_post_translate_insn(DisasContext *dc) { - if (dc->condjmp && !dc->base.is_jmp) { - gen_set_label(dc->condlabel); + if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) { + if (dc->pc_save != dc->condlabel.pc_save) { + gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save); + } + gen_set_label(dc->condlabel.label); dc->condjmp = 0; } translator_loop_temp_check(&dc->base); @@ -9487,17 +9558,38 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; + uint32_t pc = dc->base.pc_next; unsigned int insn; - if (arm_pre_translate_insn(dc)) { - dc->base.pc_next += 4; + /* Singlestep exceptions have the highest priority. */ + if (arm_check_ss_active(dc)) { + dc->base.pc_next = pc + 4; return; } - dc->pc_curr = dc->base.pc_next; - insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b); + if (pc & 3) { + /* + * PC alignment fault. This has priority over the instruction abort + * that we would receive from a translation fault via arm_ldl_code + * (or the execution of the kernelpage entrypoint). This should only + * be possible after an indirect branch, at the start of the TB. + */ + assert(dc->base.num_insns == 1); + gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc)); + dc->base.is_jmp = DISAS_NORETURN; + dc->base.pc_next = QEMU_ALIGN_UP(pc, 4); + return; + } + + if (arm_check_kernelpage(dc)) { + dc->base.pc_next = pc + 4; + return; + } + + dc->pc_curr = pc; + insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b); dc->insn = insn; - dc->base.pc_next += 4; + dc->base.pc_next = pc + 4; disas_arm_insn(dc, insn); arm_post_translate_insn(dc); @@ -9556,26 +9648,42 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; + uint32_t pc = dc->base.pc_next; uint32_t insn; bool is_16bit; + /* TCG op to rewind to if this turns out to be an invalid ECI state */ + TCGOp *insn_eci_rewind = NULL; + target_ulong insn_eci_pc_save = -1; - if (arm_pre_translate_insn(dc)) { - dc->base.pc_next += 2; + /* Misaligned thumb PC is architecturally impossible. */ + assert((dc->base.pc_next & 1) == 0); + + if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) { + dc->base.pc_next = pc + 2; return; } - dc->pc_curr = dc->base.pc_next; - insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); + dc->pc_curr = pc; + insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); - dc->base.pc_next += 2; + pc += 2; if (!is_16bit) { - uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); - + uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); insn = insn << 16 | insn2; - dc->base.pc_next += 2; + pc += 2; } + dc->base.pc_next = pc; dc->insn = insn; + if (dc->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate()); + return; + } + if (dc->eci) { /* * For M-profile continuable instructions, ECI/ICI handling @@ -9607,7 +9715,8 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * insn" case. We will rewind to the marker (ie throwing away * all the generated code) and instead emit "take exception". */ - dc->insn_eci_rewind = tcg_last_op(); + insn_eci_rewind = tcg_last_op(); + insn_eci_pc_save = dc->pc_save; } if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { @@ -9643,10 +9752,10 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * Insn wasn't valid for ECI/ICI at all: undo what we * just generated and instead emit an exception */ - tcg_remove_ops_after(dc->insn_eci_rewind); + tcg_remove_ops_after(insn_eci_rewind); + dc->pc_save = insn_eci_pc_save; dc->condjmp = 0; - gen_exception_insn(dc, dc->pc_curr, EXCP_INVSTATE, syn_uncategorized(), - default_exception_el(dc)); + gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized()); } arm_post_translate_insn(dc); @@ -9687,27 +9796,26 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) * insn codepath itself. */ gen_bx_excret_final_code(dc); - } else if (unlikely(is_singlestepping(dc))) { + } else if (unlikely(dc->ss_active)) { /* Unconditional and "condition passed" instruction codepath. */ switch (dc->base.is_jmp) { case DISAS_SWI: gen_ss_advance(dc); - gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), - default_exception_el(dc)); + gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); break; case DISAS_HVC: gen_ss_advance(dc); - gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); + gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_ss_advance(dc); - gen_exception(EXCP_SMC, syn_aa32_smc(), 3); + gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3); break; case DISAS_NEXT: case DISAS_TOO_MANY: case DISAS_UPDATE_EXIT: case DISAS_UPDATE_NOCHAIN: - gen_set_pc_im(dc, dc->base.pc_next); + gen_update_pc(dc, curr_insn_len(dc)); /* fall through */ default: /* FIXME: Single stepping a WFI insn will not halt the CPU. */ @@ -9728,16 +9836,16 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) switch (dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: - gen_goto_tb(dc, 1, dc->base.pc_next); + gen_goto_tb(dc, 1, curr_insn_len(dc)); break; case DISAS_UPDATE_NOCHAIN: - gen_set_pc_im(dc, dc->base.pc_next); + gen_update_pc(dc, curr_insn_len(dc)); /* fall through */ case DISAS_JUMP: gen_goto_ptr(); break; case DISAS_UPDATE_EXIT: - gen_set_pc_im(dc, dc->base.pc_next); + gen_update_pc(dc, curr_insn_len(dc)); /* fall through */ default: /* indicate that the hash table must be used to find the next TB */ @@ -9747,18 +9855,13 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) /* nothing more to generate */ break; case DISAS_WFI: - { - TCGv_i32 tmp = tcg_const_i32((dc->thumb && - !(dc->insn & (1U << 31))) ? 2 : 4); - - gen_helper_wfi(cpu_env, tmp); - tcg_temp_free_i32(tmp); - /* The helper doesn't necessarily throw an exception, but we + gen_helper_wfi(cpu_env, tcg_constant_i32(curr_insn_len(dc))); + /* + * The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(NULL, 0); break; - } case DISAS_WFE: gen_helper_wfe(cpu_env); break; @@ -9766,37 +9869,37 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) gen_helper_yield(cpu_env); break; case DISAS_SWI: - gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), - default_exception_el(dc)); + gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); break; case DISAS_HVC: - gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); + gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: - gen_exception(EXCP_SMC, syn_aa32_smc(), 3); + gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3); break; } } if (dc->condjmp) { /* "Condition failed" instruction codepath for the branch/trap insn */ - gen_set_label(dc->condlabel); + set_disas_label(dc, dc->condlabel); gen_set_condexec(dc); - if (unlikely(is_singlestepping(dc))) { - gen_set_pc_im(dc, dc->base.pc_next); + if (unlikely(dc->ss_active)) { + gen_update_pc(dc, curr_insn_len(dc)); gen_singlestep_exception(dc); } else { - gen_goto_tb(dc, 1, dc->base.pc_next); + gen_goto_tb(dc, 1, curr_insn_len(dc)); } } } -static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void arm_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { DisasContext *dc = container_of(dcbase, DisasContext, base); - qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); - log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); + target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); } static const TranslatorOps arm_translator_ops = { @@ -9818,7 +9921,8 @@ static const TranslatorOps thumb_translator_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc = { }; const TranslatorOps *ops = &arm_translator_ops; @@ -9833,19 +9937,5 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) } #endif - translator_loop(ops, &dc.base, cpu, tb, max_insns); -} - -void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, - target_ulong *data) -{ - if (is_a64(env)) { - env->pc = data[0]; - env->condexec_bits = 0; - env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; - } else { - env->regs[15] = data[0]; - env->condexec_bits = data[1]; - env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; - } + translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base); } diff --git a/target/arm/translate.h b/target/arm/translate.h index 241596c5bd..3cdc7dbc2f 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -6,18 +6,42 @@ /* internal defines */ + +/* + * Save pc_save across a branch, so that we may restore the value from + * before the branch at the point the label is emitted. + */ +typedef struct DisasLabel { + TCGLabel *label; + target_ulong pc_save; +} DisasLabel; + typedef struct DisasContext { DisasContextBase base; const ARMISARegisters *isar; /* The address of the current instruction being translated. */ target_ulong pc_curr; + /* + * For TARGET_TB_PCREL, the full value of cpu_pc is not known + * (although the page offset is known). For convenience, the + * translation loop uses the full virtual address that triggered + * the translation, from base.pc_start through pc_curr. + * For efficiency, we do not update cpu_pc for every instruction. + * Instead, pc_save has the value of pc_curr at the time of the + * last update to cpu_pc, which allows us to compute the addend + * needed to bring cpu_pc current: pc_curr - pc_save. + * If cpu_pc now contains the destination of an indirect branch, + * pc_save contains -1 to indicate that relative updates are no + * longer possible. + */ + target_ulong pc_save; target_ulong page_start; uint32_t insn; /* Nonzero if this instruction has been conditionally skipped. */ int condjmp; /* The label that will be jumped to when the instruction is skipped. */ - TCGLabel *condlabel; + DisasLabel condlabel; /* Thumb-2 conditional execution bits. */ int condexec_mask; int condexec_cond; @@ -28,9 +52,6 @@ typedef struct DisasContext { * after decode (ie after any UNDEF checks) */ bool eci_handled; - /* TCG op to rewind to if this turns out to be an invalid ECI state */ - TCGOp *insn_eci_rewind; - int thumb; int sctlr_b; MemOp be_data; #if !defined(CONFIG_USER_ONLY) @@ -43,9 +64,9 @@ typedef struct DisasContext { bool ns; /* Use non-secure CPREG bank on access */ int fp_excp_el; /* FP exception EL or 0 if enabled */ int sve_excp_el; /* SVE exception EL or 0 if enabled */ - int sve_len; /* SVE vector length in bytes */ - /* Flag indicating that exceptions from secure mode are routed to EL3. */ - bool secure_routed_to_el3; + int sme_excp_el; /* SME exception EL or 0 if enabled */ + int vl; /* current vector length in bytes */ + int svl; /* current streaming vector length in bytes */ bool vfp_enabled; /* FP enabled via FPSCR.EN */ int vec_len; int vec_stride; @@ -59,12 +80,11 @@ typedef struct DisasContext { * so that top level loop can generate correct syndrome information. */ uint32_t svc_imm; - int aarch64; int current_el; - /* Debug target exception level for single-step exceptions */ - int debug_target_el; GHashTable *cp_regs; uint64_t features; /* CPU features bits */ + bool aarch64; + bool thumb; /* Because unallocated encodings generate different exception syndrome * information from traps due to FP being disabled, we can't do a single * "is fp access disabled" check at a high level in the decode tree. @@ -98,6 +118,18 @@ typedef struct DisasContext { bool hstr_active; /* True if memory operations require alignment */ bool align_mem; + /* True if PSTATE.IL is set */ + bool pstate_il; + /* True if PSTATE.SM is set. */ + bool pstate_sm; + /* True if PSTATE.ZA is set. */ + bool pstate_za; + /* True if non-streaming insns should raise an SME Streaming exception. */ + bool sme_trap_nonstreaming; + /* True if the current instruction is non-streaming. */ + bool is_nonstreaming; + /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ + bool mve_no_pred; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -146,6 +178,11 @@ static inline int plus_2(DisasContext *s, int x) return x + 2; } +static inline int plus_12(DisasContext *s, int x) +{ + return x + 12; +} + static inline int times_2(DisasContext *s, int x) { return x * 2; @@ -181,6 +218,12 @@ static inline int rsub_8(DisasContext *s, int x) return 8 - x; } +static inline int neon_3same_fp_size(DisasContext *s, int x) +{ + /* Convert 0==fp32, 1==fp16 into a MO_* value */ + return MO_32 - x; +} + static inline int arm_dc_feature(DisasContext *dc, int feature) { return (dc->features & (1ULL << feature)) != 0; @@ -191,20 +234,6 @@ static inline int get_mem_index(DisasContext *s) return arm_to_core_mmu_idx(s->mmu_idx); } -/* Function used to determine the target exception EL when otherwise not known - * or default. - */ -static inline int default_exception_el(DisasContext *s) -{ - /* If we are coming from secure EL0 in a system with a 32-bit EL3, then - * there is no secure EL1, so we route exceptions to EL3. Otherwise, - * exceptions can only be routed to ELs above 1, so we target the higher of - * 1 or the current EL. - */ - return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3) - ? 3 : MAX(1, s->current_el); -} - static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) { /* We don't need to save all of the syndrome so we mask and shift @@ -219,6 +248,11 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) s->insn_start = NULL; } +static inline int curr_insn_len(DisasContext *s) +{ + return s->base.pc_next - s->pc_curr; +} + /* is_jmp field values */ #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ /* CPU state was modified dynamically; exit to main loop for interrupts. */ @@ -242,7 +276,7 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) * For instructions which want an immediate exit to the main loop, as opposed * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this * doesn't write the PC on exiting the translation loop so you need to ensure - * something (gen_a64_set_pc_im or runtime helper) has done so before we reach + * something (gen_a64_update_pc or runtime helper) has done so before we reach * return from cpu_tb_exec. */ #define DISAS_EXIT DISAS_TARGET_9 @@ -251,14 +285,14 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) #ifdef TARGET_AARCH64 void a64_translate_init(void); -void gen_a64_set_pc_im(uint64_t val); +void gen_a64_update_pc(DisasContext *s, target_long diff); extern const TranslatorOps aarch64_translator_ops; #else static inline void a64_translate_init(void) { } -static inline void gen_a64_set_pc_im(uint64_t val) +static inline void gen_a64_update_pc(DisasContext *s, target_long diff) { } #endif @@ -269,8 +303,10 @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(int cc, TCGLabel *label); MemOp pow2_align(unsigned i); void unallocated_encoding(DisasContext *s); -void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, - uint32_t syn, uint32_t target_el); +void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, + uint32_t syn, uint32_t target_el); +void gen_exception_insn(DisasContext *s, target_long pc_diff, + int excp, uint32_t syn); /* Return state of Alternate Half-precision flag, caller frees result */ static inline TCGv_i32 get_ahp_flag(void) @@ -319,33 +355,12 @@ static inline void gen_ss_advance(DisasContext *s) } } -static inline void gen_exception(int excp, uint32_t syndrome, - uint32_t target_el) -{ - TCGv_i32 tcg_excp = tcg_const_i32(excp); - TCGv_i32 tcg_syn = tcg_const_i32(syndrome); - TCGv_i32 tcg_el = tcg_const_i32(target_el); - - gen_helper_exception_with_syndrome(cpu_env, tcg_excp, - tcg_syn, tcg_el); - - tcg_temp_free_i32(tcg_el); - tcg_temp_free_i32(tcg_syn); - tcg_temp_free_i32(tcg_excp); -} - /* Generate an architectural singlestep exception */ static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) { - bool same_el = (s->debug_target_el == s->current_el); - - /* - * If singlestep is targeting a lower EL than the current one, - * then s->ss_active must be false and we can never get here. - */ - assert(s->debug_target_el >= s->current_el); - - gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el); + /* Fill in the same_el field of the syndrome in the helper. */ + uint32_t syn = syn_swstep(false, isv, ex); + gen_helper_exception_swstep(cpu_env, tcg_constant_i32(syn)); } /* @@ -573,4 +588,44 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc) */ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); +/* + * gen_disas_label: + * Create a label and cache a copy of pc_save. + */ +static inline DisasLabel gen_disas_label(DisasContext *s) +{ + return (DisasLabel){ + .label = gen_new_label(), + .pc_save = s->pc_save, + }; +} + +/* + * set_disas_label: + * Emit a label and restore the cached copy of pc_save. + */ +static inline void set_disas_label(DisasContext *s, DisasLabel l) +{ + gen_set_label(l.label); + s->pc_save = l.pc_save; +} + +/* + * Helpers for implementing sets of trans_* functions. + * Defer the implementation of NAME to FUNC, with optional extra arguments. + */ +#define TRANS(NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ + { return FUNC(s, __VA_ARGS__); } +#define TRANS_FEAT(NAME, FEAT, FUNC, ...) \ + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ + { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } + +#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ + { \ + s->is_nonstreaming = true; \ + return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ + } + #endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 034f6b84f7..f59d3b26ea 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -127,6 +127,32 @@ const uint64_t expand_pred_b_data[256] = { 0xffffffffffffffff, }; +/* + * Similarly for half-word elements. + * for (i = 0; i < 256; ++i) { + * unsigned long m = 0; + * if (i & 0xaa) { + * continue; + * } + * for (j = 0; j < 8; j += 2) { + * if ((i >> j) & 1) { + * m |= 0xfffful << (j << 3); + * } + * } + * printf("[0x%x] = 0x%016lx,\n", i, m); + * } + */ +const uint64_t expand_pred_h_data[0x55 + 1] = { + [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000, + [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000, + [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000, + [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000, + [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000, + [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000, + [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000, + [0x55] = 0xffffffffffffffff, +}; + /* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */ int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3, bool neg, bool round) @@ -2028,11 +2054,23 @@ static uint64_t expand_byte_to_half(uint64_t x) | ((x & 0xff000000) << 24); } -static uint64_t pmull_h(uint64_t op1, uint64_t op2) +uint64_t pmull_w(uint64_t op1, uint64_t op2) { uint64_t result = 0; int i; + for (i = 0; i < 16; ++i) { + uint64_t mask = (op1 & 0x0000000100000001ull) * 0xffffffff; + result ^= op2 & mask; + op1 >>= 1; + op2 <<= 1; + } + return result; +} +uint64_t pmull_h(uint64_t op1, uint64_t op2) +{ + uint64_t result = 0; + int i; for (i = 0; i < 8; ++i) { uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff; result ^= op2 & mask; @@ -2519,7 +2557,7 @@ DO_MMLA_B(gvec_usmmla_b, do_usmmla_b) * BFloat16 Dot Product */ -static float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2) +float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2) { /* FPCR is ignored for BFDOT and BFMMLA. */ float_status bf_status = { @@ -2652,3 +2690,27 @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, } clear_tail(d, opr_sz, simd_maxsz(desc)); } + +#define DO_CLAMP(NAME, TYPE) \ +void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ + TYPE aa = *(TYPE *)(a + i); \ + TYPE nn = *(TYPE *)(n + i); \ + TYPE mm = *(TYPE *)(m + i); \ + TYPE dd = MIN(MAX(aa, nn), mm); \ + *(TYPE *)(d + i) = dd; \ + } \ + clear_tail(d, opr_sz, simd_maxsz(desc)); \ +} + +DO_CLAMP(gvec_sclamp_b, int8_t) +DO_CLAMP(gvec_sclamp_h, int16_t) +DO_CLAMP(gvec_sclamp_s, int32_t) +DO_CLAMP(gvec_sclamp_d, int64_t) + +DO_CLAMP(gvec_uclamp_b, uint8_t) +DO_CLAMP(gvec_uclamp_h, uint16_t) +DO_CLAMP(gvec_uclamp_s, uint32_t) +DO_CLAMP(gvec_uclamp_d, uint64_t) diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h index 865d213944..1f4ed80ff7 100644 --- a/target/arm/vec_internal.h +++ b/target/arm/vec_internal.h @@ -17,8 +17,8 @@ * License along with this library; if not, see . */ -#ifndef TARGET_ARM_VEC_INTERNALS_H -#define TARGET_ARM_VEC_INTERNALS_H +#ifndef TARGET_ARM_VEC_INTERNAL_H +#define TARGET_ARM_VEC_INTERNAL_H /* * Note that vector data is stored in host-endian 64-bit chunks, @@ -29,7 +29,7 @@ * The H1_ macros are used when performing byte arithmetic and then * casting the final pointer to a type of size N. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define H1(x) ((x) ^ 7) #define H1_2(x) ((x) ^ 6) #define H1_4(x) ((x) ^ 4) @@ -50,8 +50,21 @@ #define H8(x) (x) #define H1_8(x) (x) -/* Data for expanding active predicate bits to bytes, for byte elements. */ +/* + * Expand active predicate bits to bytes, for byte elements. + */ extern const uint64_t expand_pred_b_data[256]; +static inline uint64_t expand_pred_b(uint8_t byte) +{ + return expand_pred_b_data[byte]; +} + +/* Similarly for half-word elements. */ +extern const uint64_t expand_pred_h_data[0x55 + 1]; +static inline uint64_t expand_pred_h(uint8_t byte) +{ + return expand_pred_h_data[byte & 0x55]; +} static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz) { @@ -206,4 +219,28 @@ int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *); int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *); int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool); -#endif /* TARGET_ARM_VEC_INTERNALS_H */ +/* + * 8 x 8 -> 16 vector polynomial multiply where the inputs are + * in the low 8 bits of each 16-bit element +*/ +uint64_t pmull_h(uint64_t op1, uint64_t op2); +/* + * 16 x 16 -> 32 vector polynomial multiply where the inputs are + * in the low 16 bits of each 32-bit element + */ +uint64_t pmull_w(uint64_t op1, uint64_t op2); + +/** + * bfdotadd: + * @sum: addend + * @e1, @e2: multiplicand vectors + * + * BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum. + * The @e1 and @e2 operands correspond to the 32-bit source vector + * slots and contain two Bfloat16 values each. + * + * Corresponds to the ARM pseudocode function BFDotAdd. + */ +float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2); + +#endif /* TARGET_ARM_VEC_INTERNAL_H */ diff --git a/target/avr/cpu-qom.h b/target/avr/cpu-qom.h index 9fa6989c18..b5c3507d6d 100644 --- a/target/avr/cpu-qom.h +++ b/target/avr/cpu-qom.h @@ -18,22 +18,20 @@ * */ -#ifndef QEMU_AVR_QOM_H -#define QEMU_AVR_QOM_H +#ifndef TARGET_AVR_CPU_QOM_H +#define TARGET_AVR_CPU_QOM_H #include "hw/core/cpu.h" #include "qom/object.h" #define TYPE_AVR_CPU "avr-cpu" -OBJECT_DECLARE_TYPE(AVRCPU, AVRCPUClass, - AVR_CPU) +OBJECT_DECLARE_CPU_TYPE(AVRCPU, AVRCPUClass, AVR_CPU) /** * AVRCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. - * @vr: Version Register value. * * A AVR CPU model. */ @@ -46,4 +44,4 @@ struct AVRCPUClass { }; -#endif /* !defined (QEMU_AVR_CPU_QOM_H) */ +#endif /* TARGET_AVR_CPU_QOM_H */ diff --git a/target/avr/cpu.c b/target/avr/cpu.c index ea14175ca5..c7295b488d 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -32,6 +32,13 @@ static void avr_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc_w = value / 2; /* internally PC points to words */ } +static vaddr avr_cpu_get_pc(CPUState *cs) +{ + AVRCPU *cpu = AVR_CPU(cs); + + return cpu->env.pc_w * 2; +} + static bool avr_cpu_has_work(CPUState *cs) { AVRCPU *cpu = AVR_CPU(cs); @@ -47,7 +54,17 @@ static void avr_cpu_synchronize_from_tb(CPUState *cs, AVRCPU *cpu = AVR_CPU(cs); CPUAVRState *env = &cpu->env; - env->pc_w = tb->pc / 2; /* internally PC points to words */ + env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */ +} + +static void avr_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + AVRCPU *cpu = AVR_CPU(cs); + CPUAVRState *env = &cpu->env; + + env->pc_w = data[0]; } static void avr_cpu_reset(DeviceState *ds) @@ -195,12 +212,10 @@ static const struct SysemuCPUOps avr_sysemu_ops = { static const struct TCGCPUOps avr_tcg_ops = { .initialize = avr_cpu_tcg_init, .synchronize_from_tb = avr_cpu_synchronize_from_tb, + .restore_state_to_opc = avr_restore_state_to_opc, .cpu_exec_interrupt = avr_cpu_exec_interrupt, .tlb_fill = avr_cpu_tlb_fill, - -#ifndef CONFIG_USER_ONLY .do_interrupt = avr_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ }; static void avr_cpu_class_init(ObjectClass *oc, void *data) @@ -217,7 +232,7 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = avr_cpu_has_work; cc->dump_state = avr_cpu_dump_state; cc->set_pc = avr_cpu_set_pc; - cc->memory_rw_debug = avr_cpu_memory_rw_debug; + cc->get_pc = avr_cpu_get_pc; dc->vmsd = &vms_avr_cpu; cc->sysemu_ops = &avr_sysemu_ops; cc->disas_set_info = avr_cpu_disas_set_info; diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 93e3faa0a9..96419c0c2b 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -108,9 +108,7 @@ typedef enum AVRFeature { AVR_FEATURE_RAMPZ, } AVRFeature; -typedef struct CPUAVRState CPUAVRState; - -struct CPUAVRState { +typedef struct CPUArchState { uint32_t pc_w; /* 0x003fffff up to 22 bits */ uint32_t sregC; /* 0x00000001 1 bit */ @@ -137,7 +135,7 @@ struct CPUAVRState { bool fullacc; /* CPU/MEM if true MEM only otherwise */ uint64_t features; -}; +} CPUAVRState; /** * AVRCPU: @@ -145,14 +143,14 @@ struct CPUAVRState { * * A AVR CPU. */ -typedef struct AVRCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUAVRState env; -} AVRCPU; +}; extern const struct VMStateDescription vms_avr_cpu; @@ -175,7 +173,6 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) } #define cpu_list avr_cpu_list -#define cpu_signal_handler cpu_avr_signal_handler #define cpu_mmu_index avr_cpu_mmu_index static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch) @@ -187,9 +184,6 @@ void avr_cpu_tcg_init(void); void avr_cpu_list(void); int cpu_avr_exec(CPUState *cpu); -int cpu_avr_signal_handler(int host_signum, void *pinfo, void *puc); -int avr_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf, - int len, bool is_write); enum { TB_FLAGS_FULL_ACCESS = 1, @@ -249,9 +243,6 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -typedef CPUAVRState CPUArchState; -typedef AVRCPU ArchCPU; - #include "exec/cpu-all.h" -#endif /* !defined (QEMU_AVR_CPU_H) */ +#endif /* QEMU_AVR_CPU_H */ diff --git a/target/avr/helper.c b/target/avr/helper.c index 981c29da45..156dde4e92 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/core/tcg-cpu-ops.h" #include "exec/exec-all.h" @@ -27,36 +28,41 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - bool ret = false; - CPUClass *cc = CPU_GET_CLASS(cs); AVRCPU *cpu = AVR_CPU(cs); CPUAVRState *env = &cpu->env; + /* + * We cannot separate a skip from the next instruction, + * as the skip would not be preserved across the interrupt. + * Separating the two insn normally only happens at page boundaries. + */ + if (env->skip) { + return false; + } + if (interrupt_request & CPU_INTERRUPT_RESET) { if (cpu_interrupts_enabled(env)) { cs->exception_index = EXCP_RESET; - cc->tcg_ops->do_interrupt(cs); + avr_cpu_do_interrupt(cs); cs->interrupt_request &= ~CPU_INTERRUPT_RESET; - - ret = true; + return true; } } if (interrupt_request & CPU_INTERRUPT_HARD) { if (cpu_interrupts_enabled(env) && env->intsrc != 0) { int index = ctz32(env->intsrc); cs->exception_index = EXCP_INT(index); - cc->tcg_ops->do_interrupt(cs); + avr_cpu_do_interrupt(cs); env->intsrc &= env->intsrc - 1; /* clear the interrupt */ if (!env->intsrc) { cs->interrupt_request &= ~CPU_INTERRUPT_HARD; } - - ret = true; + return true; } } - return ret; + return false; } void avr_cpu_do_interrupt(CPUState *cs) @@ -92,12 +98,6 @@ void avr_cpu_do_interrupt(CPUState *cs) cs->exception_index = -1; } -int avr_cpu_memory_rw_debug(CPUState *cs, vaddr addr, uint8_t *buf, - int len, bool is_write) -{ - return cpu_memory_rw_debug(cs, addr, buf, len, is_write); -} - hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { return addr; /* I assume 1:1 address correspondence */ @@ -107,38 +107,50 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - int prot = 0; - MemTxAttrs attrs = {}; + int prot, page_size = TARGET_PAGE_SIZE; uint32_t paddr; address &= TARGET_PAGE_MASK; if (mmu_idx == MMU_CODE_IDX) { - /* access to code in flash */ + /* Access to code in flash. */ paddr = OFFSET_CODE + address; prot = PAGE_READ | PAGE_EXEC; - if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) { + if (paddr >= OFFSET_DATA) { + /* + * This should not be possible via any architectural operations. + * There is certainly not an exception that we can deliver. + * Accept probing that might come from generic code. + */ + if (probe) { + return false; + } error_report("execution left flash memory"); abort(); } - } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { - /* - * access to CPU registers, exit and rebuilt this TB to use full access - * incase it touches specially handled registers like SREG or SP - */ - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; - env->fullacc = 1; - cpu_loop_exit_restore(cs, retaddr); } else { - /* access to memory. nothing special */ + /* Access to memory. */ paddr = OFFSET_DATA + address; prot = PAGE_READ | PAGE_WRITE; + if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { + /* + * Access to CPU registers, exit and rebuilt this TB to use + * full access in case it touches specially handled registers + * like SREG or SP. For probing, set page_size = 1, in order + * to force tlb_fill to be called for the next access. + */ + if (probe) { + page_size = 1; + } else { + AVRCPU *cpu = AVR_CPU(cs); + CPUAVRState *env = &cpu->env; + env->fullacc = 1; + cpu_loop_exit_restore(cs, retaddr); + } + } } - tlb_set_page_with_attrs(cs, address, paddr, attrs, prot, - mmu_idx, TARGET_PAGE_SIZE); - + tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size); return true; } diff --git a/target/avr/translate.c b/target/avr/translate.c index 1111e08b83..2bed56f135 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -70,11 +70,9 @@ static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = { }; #define REG(x) (cpu_r[x]) -enum { - DISAS_EXIT = DISAS_TARGET_0, /* We want return to the cpu main loop. */ - DISAS_LOOKUP = DISAS_TARGET_1, /* We have a variable condition exit. */ - DISAS_CHAIN = DISAS_TARGET_2, /* We have a single condition exit. */ -}; +#define DISAS_EXIT DISAS_TARGET_0 /* We want return to the cpu main loop. */ +#define DISAS_LOOKUP DISAS_TARGET_1 /* We have a variable condition exit. */ +#define DISAS_CHAIN DISAS_TARGET_2 /* We have a single condition exit. */ typedef struct DisasContext DisasContext; @@ -1089,11 +1087,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } ctx->base.is_jmp = DISAS_NORETURN; } @@ -2977,8 +2971,18 @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (skip_label) { canonicalize_skip(ctx); gen_set_label(skip_label); - if (ctx->base.is_jmp == DISAS_NORETURN) { + + switch (ctx->base.is_jmp) { + case DISAS_NORETURN: ctx->base.is_jmp = DISAS_CHAIN; + break; + case DISAS_NEXT: + if (ctx->base.tb->flags & TB_FLAGS_SKIP) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } + break; + default: + break; } } @@ -2995,6 +2999,11 @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); bool nonconst_skip = canonicalize_skip(ctx); + /* + * Because we disable interrupts while env->skip is set, + * we must return to the main loop to re-evaluate afterward. + */ + bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP; switch (ctx->base.is_jmp) { case DISAS_NORETURN: @@ -3003,7 +3012,7 @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) case DISAS_NEXT: case DISAS_TOO_MANY: case DISAS_CHAIN: - if (!nonconst_skip) { + if (!nonconst_skip && !force_exit) { /* Note gen_goto_tb checks singlestep. */ gen_goto_tb(ctx, 1, ctx->npc); break; @@ -3011,27 +3020,24 @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) tcg_gen_movi_tl(cpu_pc, ctx->npc); /* fall through */ case DISAS_LOOKUP: - if (!ctx->base.singlestep_enabled) { + if (!force_exit) { tcg_gen_lookup_and_goto_ptr(); break; } /* fall through */ case DISAS_EXIT: - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); } } -static void avr_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void avr_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps avr_tr_ops = { @@ -3043,14 +3049,9 @@ static const TranslatorOps avr_tr_ops = { .disas_log = avr_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc = { }; - translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns); -} - -void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc_w = data[0]; + translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); } diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h index 36a3058761..12ec22d8df 100644 --- a/target/cris/cpu-param.h +++ b/target/cris/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef CRIS_CPU_PARAM_H -#define CRIS_CPU_PARAM_H 1 +#define CRIS_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 13 diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h index 2596edc7e3..71e8af0e70 100644 --- a/target/cris/cpu-qom.h +++ b/target/cris/cpu-qom.h @@ -25,8 +25,7 @@ #define TYPE_CRIS_CPU "cris-cpu" -OBJECT_DECLARE_TYPE(CRISCPU, CRISCPUClass, - CRIS_CPU) +OBJECT_DECLARE_CPU_TYPE(CRISCPU, CRISCPUClass, CRIS_CPU) /** * CRISCPUClass: diff --git a/target/cris/cpu.c b/target/cris/cpu.c index 70932b1f8c..fb05dc6f9a 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -35,6 +35,22 @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr cris_cpu_get_pc(CPUState *cs) +{ + CRISCPU *cpu = CRIS_CPU(cs); + + return cpu->env.pc; +} + +static void cris_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + CRISCPU *cpu = CRIS_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool cris_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); @@ -205,20 +221,22 @@ static const struct SysemuCPUOps cris_sysemu_ops = { static const struct TCGCPUOps crisv10_tcg_ops = { .initialize = cris_initialize_crisv10_tcg, - .cpu_exec_interrupt = cris_cpu_exec_interrupt, - .tlb_fill = cris_cpu_tlb_fill, + .restore_state_to_opc = cris_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = cris_cpu_tlb_fill, + .cpu_exec_interrupt = cris_cpu_exec_interrupt, .do_interrupt = crisv10_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ }; static const struct TCGCPUOps crisv32_tcg_ops = { .initialize = cris_initialize_tcg, - .cpu_exec_interrupt = cris_cpu_exec_interrupt, - .tlb_fill = cris_cpu_tlb_fill, + .restore_state_to_opc = cris_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = cris_cpu_tlb_fill, + .cpu_exec_interrupt = cris_cpu_exec_interrupt, .do_interrupt = cris_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ }; @@ -297,6 +315,7 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = cris_cpu_has_work; cc->dump_state = cris_cpu_dump_state; cc->set_pc = cris_cpu_set_pc; + cc->get_pc = cris_cpu_get_pc; cc->gdb_read_register = cris_cpu_gdb_read_register; cc->gdb_write_register = cris_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/cris/cpu.h b/target/cris/cpu.h index d3b6492909..e6776f25b1 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -105,7 +105,7 @@ typedef struct { uint32_t lo; } TLBSet; -typedef struct CPUCRISState { +typedef struct CPUArchState { uint32_t regs[16]; /* P0 - P15 are referred to as special registers in the docs. */ uint32_t pregs[16]; @@ -173,7 +173,7 @@ typedef struct CPUCRISState { * * A CRIS CPU. */ -struct CRISCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -185,12 +185,16 @@ struct CRISCPU { #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_cris_cpu; -#endif void cris_cpu_do_interrupt(CPUState *cpu); void crisv10_cpu_do_interrupt(CPUState *cpu); bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req); +bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +#endif + void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags); hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -199,12 +203,6 @@ int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_cris_signal_handler(int host_signum, void *pinfo, - void *puc); - void cris_initialize_tcg(void); void cris_initialize_crisv10_tcg(void); @@ -250,8 +248,6 @@ enum { #define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_CRIS_CPU -#define cpu_signal_handler cpu_cris_signal_handler - /* MMU modes definitions */ #define MMU_USER_IDX 1 static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch) @@ -259,10 +255,6 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch) return !!(env->pregs[PR_CCS] & U_FLAG); } -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - /* Support function regs. */ #define SFR_RW_GC_CFG 0][0 #define SFR_RW_MM_CFG env->pregs[PR_SRS]][0 @@ -273,9 +265,6 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, #define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5 #define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6 -typedef CPUCRISState CPUArchState; -typedef CRISCPU ArchCPU; - #include "exec/cpu-all.h" static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc, diff --git a/target/cris/helper.c b/target/cris/helper.c index 911867f3b4..81a72699b5 100644 --- a/target/cris/helper.c +++ b/target/cris/helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/core/tcg-cpu-ops.h" #include "mmu.h" @@ -39,36 +40,6 @@ #define D_LOG(...) do { } while (0) #endif -#if defined(CONFIG_USER_ONLY) - -void cris_cpu_do_interrupt(CPUState *cs) -{ - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; - - cs->exception_index = -1; - env->pregs[PR_ERP] = env->pc; -} - -void crisv10_cpu_do_interrupt(CPUState *cs) -{ - cris_cpu_do_interrupt(cs); -} - -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - CRISCPU *cpu = CRIS_CPU(cs); - - cs->exception_index = 0xaa; - cpu->env.pregs[PR_EDA] = address; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - - static void cris_shift_ccs(CPUCRISState *env) { uint32_t ccs; @@ -116,7 +87,7 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, cs->exception_index = EXCP_BUSFAULT; env->fault_vector = res.bf_vec; if (retaddr) { - if (cpu_restore_state(cs, retaddr, true)) { + if (cpu_restore_state(cs, retaddr)) { /* Evaluate flags after retranslation. */ helper_top_evaluate_flags(env); } @@ -287,7 +258,6 @@ hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy)); return phy; } -#endif bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { diff --git a/target/cris/meson.build b/target/cris/meson.build index 67c3793c85..c1e326d950 100644 --- a/target/cris/meson.build +++ b/target/cris/meson.build @@ -2,13 +2,16 @@ cris_ss = ss.source_set() cris_ss.add(files( 'cpu.c', 'gdbstub.c', - 'helper.c', 'op_helper.c', 'translate.c', )) cris_softmmu_ss = ss.source_set() -cris_softmmu_ss.add(files('mmu.c', 'machine.c')) +cris_softmmu_ss.add(files( + 'helper.c', + 'machine.c', + 'mmu.c', +)) target_arch += {'cris': cris_ss} target_softmmu_arch += {'cris': cris_softmmu_ss} diff --git a/target/cris/translate.c b/target/cris/translate.c index a84b753349..fbc3fd5865 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -1047,7 +1047,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr) cris_store_direct_jmp(dc); } - tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEQ); + tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEUQ); } static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, @@ -3249,22 +3249,6 @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } - if (unlikely(dc->base.singlestep_enabled)) { - switch (is_jmp) { - case DISAS_TOO_MANY: - case DISAS_UPDATE_NEXT: - tcg_gen_movi_tl(env_pc, npc); - /* fall through */ - case DISAS_JUMP: - case DISAS_UPDATE: - t_gen_raise_exception(EXCP_DEBUG); - return; - default: - break; - } - g_assert_not_reached(); - } - switch (is_jmp) { case DISAS_TOO_MANY: gen_goto_tb(dc, 0, npc); @@ -3284,11 +3268,12 @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } -static void cris_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void cris_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { if (!DISAS_CRIS) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } } @@ -3301,10 +3286,11 @@ static const TranslatorOps cris_tr_ops = { .disas_log = cris_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base); } void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -3406,9 +3392,3 @@ void cris_initialize_tcg(void) pregnames_v32[i]); } } - -void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/hexagon/README b/target/hexagon/README index b0b2435070..372e24747c 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -1,9 +1,13 @@ Hexagon is Qualcomm's very long instruction word (VLIW) digital signal -processor(DSP). +processor(DSP). We also support Hexagon Vector eXtensions (HVX). HVX +is a wide vector coprocessor designed for high performance computer vision, +image processing, machine learning, and other workloads. The following versions of the Hexagon core are supported Scalar core: v67 https://developer.qualcomm.com/downloads/qualcomm-hexagon-v67-programmer-s-reference-manual + HVX extension: v66 + https://developer.qualcomm.com/downloads/qualcomm-hexagon-v66-hvx-programmer-s-reference-manual We presented an overview of the project at the 2019 KVM Forum. https://kvmforum2019.sched.com/event/Tmwc/qemu-hexagon-automatic-translation-of-the-isa-manual-pseudcode-to-tiny-code-instructions-of-a-vliw-architecture-niccolo-izzo-revng-taylor-simpson-qualcomm-innovation-center @@ -124,6 +128,71 @@ There are also cases where we brute force the TCG code generation. Instructions with multiple definitions are examples. These require special handling because qemu helpers can only return a single value. +For HVX vectors, the generator behaves slightly differently. The wide vectors +won't fit in a TCGv or TCGv_i64, so we pass TCGv_ptr variables to pass the +address to helper functions. Here's an example for an HVX vector-add-word +istruction. + static void generate_V6_vaddw( + CPUHexagonState *env, + DisasContext *ctx, + Insn *insn, + Packet *pkt) + { + const int VdN = insn->regno[0]; + const intptr_t VdV_off = + ctx_future_vreg_off(ctx, VdN, 1, true); + TCGv_ptr VdV = tcg_temp_local_new_ptr(); + tcg_gen_addi_ptr(VdV, cpu_env, VdV_off); + const int VuN = insn->regno[1]; + const intptr_t VuV_off = + vreg_src_off(ctx, VuN); + TCGv_ptr VuV = tcg_temp_local_new_ptr(); + const int VvN = insn->regno[2]; + const intptr_t VvV_off = + vreg_src_off(ctx, VvN); + TCGv_ptr VvV = tcg_temp_local_new_ptr(); + tcg_gen_addi_ptr(VuV, cpu_env, VuV_off); + tcg_gen_addi_ptr(VvV, cpu_env, VvV_off); + TCGv slot = tcg_constant_tl(insn->slot); + gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot); + tcg_temp_free(slot); + gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); + ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); + tcg_temp_free_ptr(VdV); + tcg_temp_free_ptr(VuV); + tcg_temp_free_ptr(VvV); + } + +Notice that we also generate a variable named _off for each operand of +the instruction. This makes it easy to override the instruction semantics with +functions from tcg-op-gvec.h. Here's the override for this instruction. + #define fGEN_TCG_V6_vaddw(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +Finally, we notice that the override doesn't use the TCGv_ptr variables, so +we don't generate them when an override is present. Here is what we generate +when the override is present. + static void generate_V6_vaddw( + CPUHexagonState *env, + DisasContext *ctx, + Insn *insn, + Packet *pkt) + { + const int VdN = insn->regno[0]; + const intptr_t VdV_off = + ctx_future_vreg_off(ctx, VdN, 1, true); + const int VuN = insn->regno[1]; + const intptr_t VuV_off = + vreg_src_off(ctx, VuN); + const int VvN = insn->regno[2]; + const intptr_t VvV_off = + vreg_src_off(ctx, VvN); + fGEN_TCG_V6_vaddw({ fHIDE(int i;) fVFOREACH(32, i) { VdV.w[i] = VuV.w[i] + VvV.w[i] ; } }); + gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); + ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); + } + In addition to instruction semantics, we use a generator to create the decode tree. This generation is also a two step process. The first step is to run target/hexagon/gen_dectree_import.c to produce @@ -140,6 +209,7 @@ runtime information for each thread and contains stuff like the GPR and predicate registers. macros.h +mmvec/macros.h The Hexagon arch lib relies heavily on macros for the instruction semantics. This is a great advantage for qemu because we can override them for different @@ -203,6 +273,15 @@ During runtime, the following fields in CPUHexagonState (see cpu.h) are used pred_written boolean indicating if predicate was written mem_log_stores record of the stores (indexed by slot) +For Hexagon Vector eXtensions (HVX), the following fields are used + VRegs Vector registers + future_VRegs Registers to be stored during packet commit + tmp_VRegs Temporary registers *not* stored during commit + VRegs_updated Mask of predicated vector writes + QRegs Q (vector predicate) registers + future_QRegs Registers to be stored during packet commit + QRegs_updated Mask of predicated vector writes + *** Debugging *** You can turn on a lot of debugging by changing the HEX_DEBUG macro to 1 in diff --git a/target/hexagon/arch.c b/target/hexagon/arch.c index 68a55b3bd4..da79b41c4d 100644 --- a/target/hexagon/arch.c +++ b/target/hexagon/arch.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -298,8 +298,8 @@ int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int *adjust, } else { PeV = 0x00; /* Basic checks passed */ - n_exp = float32_getexp(RsV); - d_exp = float32_getexp(RtV); + n_exp = float32_getexp_raw(RsV); + d_exp = float32_getexp_raw(RtV); if ((n_exp - d_exp + SF_BIAS) <= SF_MANTBITS) { /* Near quotient underflow / inexact Q */ PeV = 0x80; diff --git a/target/hexagon/attribs.h b/target/hexagon/attribs.h index 54576f4143..d51bb4f732 100644 --- a/target/hexagon/attribs.h +++ b/target/hexagon/attribs.h @@ -32,4 +32,4 @@ extern DECLARE_BITMAP(opcode_attribs[XX_LAST_OPCODE], A_ZZ_LASTATTRIB); #define GET_ATTRIB(opcode, attrib) \ test_bit(attrib, opcode_attribs[opcode]) -#endif /* ATTRIBS_H */ +#endif /* HEXAGON_ATTRIBS_H */ diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc index 381550909d..5d2a102c18 100644 --- a/target/hexagon/attribs_def.h.inc +++ b/target/hexagon/attribs_def.h.inc @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,9 +38,41 @@ DEF_ATTRIB(SUBINSN, "sub-instruction", "", "") /* Load and Store attributes */ DEF_ATTRIB(LOAD, "Loads from memory", "", "") DEF_ATTRIB(STORE, "Stores to memory", "", "") +DEF_ATTRIB(STOREIMMED, "Stores immed to memory", "", "") +DEF_ATTRIB(MEMSIZE_0B, "Memory width is 0 byte", "", "") +DEF_ATTRIB(MEMSIZE_1B, "Memory width is 1 byte", "", "") +DEF_ATTRIB(MEMSIZE_2B, "Memory width is 2 bytes", "", "") +DEF_ATTRIB(MEMSIZE_4B, "Memory width is 4 bytes", "", "") +DEF_ATTRIB(MEMSIZE_8B, "Memory width is 8 bytes", "", "") +DEF_ATTRIB(SCALAR_STORE, "Store is scalar", "", "") +DEF_ATTRIB(REGWRSIZE_1B, "Memory width is 1 byte", "", "") +DEF_ATTRIB(REGWRSIZE_2B, "Memory width is 2 bytes", "", "") +DEF_ATTRIB(REGWRSIZE_4B, "Memory width is 4 bytes", "", "") +DEF_ATTRIB(REGWRSIZE_8B, "Memory width is 8 bytes", "", "") DEF_ATTRIB(MEMLIKE, "Memory-like instruction", "", "") DEF_ATTRIB(MEMLIKE_PACKET_RULES, "follows Memory-like packet rules", "", "") +/* V6 Vector attributes */ +DEF_ATTRIB(CVI, "Executes on the HVX extension", "", "") + +DEF_ATTRIB(CVI_NEW, "New value memory instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VM, "Memory instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VP, "Permute instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VP_VS, "Double vector permute/shft insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VX, "Multiply instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VX_DV, "Double vector multiply insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VS, "Shift instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VS_VX, "Permute/shift and multiply insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VA, "ALU instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VA_DV, "Double vector alu instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_4SLOT, "Consumes all the vector execution resources", "", "") +DEF_ATTRIB(CVI_TMP, "Transient Memory Load not written to register", "", "") +DEF_ATTRIB(CVI_GATHER, "CVI Gather operation", "", "") +DEF_ATTRIB(CVI_SCATTER, "CVI Scatter operation", "", "") +DEF_ATTRIB(CVI_SCATTER_RELEASE, "CVI Store Release for scatter", "", "") +DEF_ATTRIB(CVI_TMP_DST, "CVI instruction that doesn't write a register", "", "") +DEF_ATTRIB(CVI_SLOT23, "Can execute in slot 2 or slot 3 (HVX)", "", "") + /* Change-of-flow attributes */ DEF_ATTRIB(JUMP, "Jump-type instruction", "", "") @@ -50,6 +82,11 @@ DEF_ATTRIB(COF, "Change-of-flow instruction", "", "") DEF_ATTRIB(CONDEXEC, "May be cancelled by a predicate", "", "") DEF_ATTRIB(DOTNEWVALUE, "Uses a register value generated in this pkt", "", "") DEF_ATTRIB(NEWCMPJUMP, "Compound compare and jump", "", "") +DEF_ATTRIB(NVSTORE, "New-value store", "", "") +DEF_ATTRIB(MEMOP, "memop", "", "") + +DEF_ATTRIB(ROPS_2, "Compound instruction worth 2 RISC-ops", "", "") +DEF_ATTRIB(ROPS_3, "Compound instruction worth 3 RISC-ops", "", "") /* access to implicit registers */ DEF_ATTRIB(IMPLICIT_WRITES_LR, "Writes the link register", "", "UREG.LR") @@ -64,7 +101,11 @@ DEF_ATTRIB(IMPLICIT_WRITES_P1, "Writes Predicate 1", "", "UREG.P1") DEF_ATTRIB(IMPLICIT_WRITES_P2, "Writes Predicate 1", "", "UREG.P2") DEF_ATTRIB(IMPLICIT_WRITES_P3, "May write Predicate 3", "", "UREG.P3") DEF_ATTRIB(IMPLICIT_READS_PC, "Reads the PC register", "", "") +DEF_ATTRIB(IMPLICIT_WRITES_USR, "May write USR", "", "") DEF_ATTRIB(WRITES_PRED_REG, "Writes a predicate register", "", "") +DEF_ATTRIB(COMMUTES, "The operation is communitive", "", "") +DEF_ATTRIB(DEALLOCRET, "dealloc_return", "", "") +DEF_ATTRIB(DEALLOCFRAME, "deallocframe", "", "") DEF_ATTRIB(CRSLOT23, "Can execute in slot 2 or slot 3 (CR)", "", "") DEF_ATTRIB(IT_NOP, "nop instruction", "", "") @@ -72,20 +113,25 @@ DEF_ATTRIB(IT_EXTENDER, "constant extender instruction", "", "") /* Restrictions to make note of */ +DEF_ATTRIB(RESTRICT_COF_MAX1, "One change-of-flow per packet", "", "") +DEF_ATTRIB(RESTRICT_NOPACKET, "Not allowed in a packet", "", "") DEF_ATTRIB(RESTRICT_SLOT0ONLY, "Must execute on slot0", "", "") DEF_ATTRIB(RESTRICT_SLOT1ONLY, "Must execute on slot1", "", "") DEF_ATTRIB(RESTRICT_SLOT2ONLY, "Must execute on slot2", "", "") DEF_ATTRIB(RESTRICT_SLOT3ONLY, "Must execute on slot3", "", "") DEF_ATTRIB(RESTRICT_NOSLOT1, "No slot 1 instruction in parallel", "", "") DEF_ATTRIB(RESTRICT_PREFERSLOT0, "Try to encode into slot 0", "", "") +DEF_ATTRIB(RESTRICT_PACKET_AXOK, "May exist with A-type or X-type", "", "") DEF_ATTRIB(ICOP, "Instruction cache op", "", "") DEF_ATTRIB(HWLOOP0_END, "Ends HW loop0", "", "") DEF_ATTRIB(HWLOOP1_END, "Ends HW loop1", "", "") +DEF_ATTRIB(RET_TYPE, "return type", "", "") DEF_ATTRIB(DCZEROA, "dczeroa type", "", "") DEF_ATTRIB(ICFLUSHOP, "icflush op type", "", "") DEF_ATTRIB(DCFLUSHOP, "dcflush op type", "", "") +DEF_ATTRIB(L2FLUSHOP, "l2flush op type", "", "") DEF_ATTRIB(DCFETCH, "dcfetch type", "", "") DEF_ATTRIB(L2FETCH, "Instruction is l2fetch type", "", "") @@ -93,5 +139,18 @@ DEF_ATTRIB(L2FETCH, "Instruction is l2fetch type", "", "") DEF_ATTRIB(ICINVA, "icinva", "", "") DEF_ATTRIB(DCCLEANINVA, "dccleaninva", "", "") +/* Documentation Notes */ +DEF_ATTRIB(NOTE_CONDITIONAL, "can be conditionally executed", "", "") +DEF_ATTRIB(NOTE_NEWVAL_SLOT0, "New-value oprnd must execute on slot 0", "", "") +DEF_ATTRIB(NOTE_PRIV, "Monitor-level feature", "", "") +DEF_ATTRIB(NOTE_NOPACKET, "solo instruction", "", "") +DEF_ATTRIB(NOTE_AXOK, "May only be grouped with ALU32 or non-FP XTYPE.", "", "") +DEF_ATTRIB(NOTE_LATEPRED, "The predicate can not be used as a .new", "", "") +DEF_ATTRIB(NOTE_NVSLOT0, "Can execute only in slot 0 (ST)", "", "") + +/* Restrictions to make note of */ +DEF_ATTRIB(RESTRICT_NOSLOT1_STORE, "Packet must not have slot 1 store", "", "") +DEF_ATTRIB(RESTRICT_LATEPRED, "Predicate can not be used as a .new.", "", "") + /* Keep this as the last attribute: */ DEF_ATTRIB(ZZ_LASTATTRIB, "Last attribute in the file", "", "") diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 3338365c16..03221fbdc2 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -59,7 +59,7 @@ const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = { "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "sa0", "lc0", "sa1", "lc1", "p3_0", "c5", "m0", "m1", "usr", "pc", "ugp", "gp", "cs0", "cs1", "c14", "c15", - "c16", "c17", "c18", "c19", "pkt_cnt", "insn_cnt", "c22", "c23", + "c16", "c17", "c18", "c19", "pkt_cnt", "insn_cnt", "hvx_cnt", "c23", "c24", "c25", "c26", "c27", "c28", "c29", "c30", "c31", }; @@ -113,7 +113,66 @@ static void print_reg(FILE *f, CPUHexagonState *env, int regnum) hexagon_regnames[regnum], value); } -static void hexagon_dump(CPUHexagonState *env, FILE *f) +static void print_vreg(FILE *f, CPUHexagonState *env, int regnum, + bool skip_if_zero) +{ + if (skip_if_zero) { + bool nonzero_found = false; + for (int i = 0; i < MAX_VEC_SIZE_BYTES; i++) { + if (env->VRegs[regnum].ub[i] != 0) { + nonzero_found = true; + break; + } + } + if (!nonzero_found) { + return; + } + } + + qemu_fprintf(f, " v%d = ( ", regnum); + qemu_fprintf(f, "0x%02x", env->VRegs[regnum].ub[MAX_VEC_SIZE_BYTES - 1]); + for (int i = MAX_VEC_SIZE_BYTES - 2; i >= 0; i--) { + qemu_fprintf(f, ", 0x%02x", env->VRegs[regnum].ub[i]); + } + qemu_fprintf(f, " )\n"); +} + +void hexagon_debug_vreg(CPUHexagonState *env, int regnum) +{ + print_vreg(stdout, env, regnum, false); +} + +static void print_qreg(FILE *f, CPUHexagonState *env, int regnum, + bool skip_if_zero) +{ + if (skip_if_zero) { + bool nonzero_found = false; + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 8; i++) { + if (env->QRegs[regnum].ub[i] != 0) { + nonzero_found = true; + break; + } + } + if (!nonzero_found) { + return; + } + } + + qemu_fprintf(f, " q%d = ( ", regnum); + qemu_fprintf(f, "0x%02x", + env->QRegs[regnum].ub[MAX_VEC_SIZE_BYTES / 8 - 1]); + for (int i = MAX_VEC_SIZE_BYTES / 8 - 2; i >= 0; i--) { + qemu_fprintf(f, ", 0x%02x", env->QRegs[regnum].ub[i]); + } + qemu_fprintf(f, " )\n"); +} + +void hexagon_debug_qreg(CPUHexagonState *env, int regnum) +{ + print_qreg(stdout, env, regnum, false); +} + +static void hexagon_dump(CPUHexagonState *env, FILE *f, int flags) { HexagonCPU *cpu = env_archcpu(env); @@ -159,6 +218,17 @@ static void hexagon_dump(CPUHexagonState *env, FILE *f) print_reg(f, env, HEX_REG_CS1); #endif qemu_fprintf(f, "}\n"); + + if (flags & CPU_DUMP_FPU) { + qemu_fprintf(f, "Vector Registers = {\n"); + for (int i = 0; i < NUM_VREGS; i++) { + print_vreg(f, env, i, true); + } + for (int i = 0; i < NUM_QREGS; i++) { + print_qreg(f, env, i, true); + } + qemu_fprintf(f, "}\n"); + } } static void hexagon_dump_state(CPUState *cs, FILE *f, int flags) @@ -166,12 +236,12 @@ static void hexagon_dump_state(CPUState *cs, FILE *f, int flags) HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; - hexagon_dump(env, f); + hexagon_dump(env, f, flags); } void hexagon_debug(CPUHexagonState *env) { - hexagon_dump(env, stdout); + hexagon_dump(env, stdout, CPU_DUMP_FPU); } static void hexagon_cpu_set_pc(CPUState *cs, vaddr value) @@ -181,12 +251,19 @@ static void hexagon_cpu_set_pc(CPUState *cs, vaddr value) env->gpr[HEX_REG_PC] = value; } +static vaddr hexagon_cpu_get_pc(CPUState *cs) +{ + HexagonCPU *cpu = HEXAGON_CPU(cs); + CPUHexagonState *env = &cpu->env; + return env->gpr[HEX_REG_PC]; +} + static void hexagon_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; - env->gpr[HEX_REG_PC] = tb->pc; + env->gpr[HEX_REG_PC] = tb_pc(tb); } static bool hexagon_cpu_has_work(CPUState *cs) @@ -194,9 +271,13 @@ static bool hexagon_cpu_has_work(CPUState *cs) return true; } -void restore_state_to_opc(CPUHexagonState *env, TranslationBlock *tb, - target_ulong *data) +static void hexagon_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + HexagonCPU *cpu = HEXAGON_CPU(cs); + CPUHexagonState *env = &cpu->env; + env->gpr[HEX_REG_PC] = data[0]; } @@ -245,34 +326,12 @@ static void hexagon_cpu_init(Object *obj) qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property); } -static bool hexagon_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ -#ifdef CONFIG_USER_ONLY - switch (access_type) { - case MMU_INST_FETCH: - cs->exception_index = HEX_EXCP_FETCH_NO_UPAGE; - break; - case MMU_DATA_LOAD: - cs->exception_index = HEX_EXCP_PRIV_NO_UREAD; - break; - case MMU_DATA_STORE: - cs->exception_index = HEX_EXCP_PRIV_NO_UWRITE; - break; - } - cpu_loop_exit_restore(cs, retaddr); -#else -#error System mode not implemented for Hexagon -#endif -} - #include "hw/core/tcg-cpu-ops.h" static const struct TCGCPUOps hexagon_tcg_ops = { .initialize = hexagon_translate_init, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, - .tlb_fill = hexagon_tlb_fill, + .restore_state_to_opc = hexagon_restore_state_to_opc, }; static void hexagon_cpu_class_init(ObjectClass *c, void *data) @@ -290,9 +349,10 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) cc->has_work = hexagon_cpu_has_work; cc->dump_state = hexagon_dump_state; cc->set_pc = hexagon_cpu_set_pc; + cc->get_pc = hexagon_cpu_get_pc; cc->gdb_read_register = hexagon_gdb_read_register; cc->gdb_write_register = hexagon_gdb_write_register; - cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS; + cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = hexagon_cpu_disas_set_info; cc->tcg_ops = &hexagon_tcg_ops; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 2855dd3881..2a65a57bab 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,14 +18,13 @@ #ifndef HEXAGON_CPU_H #define HEXAGON_CPU_H -/* Forward declaration needed by some of the header files */ -typedef struct CPUHexagonState CPUHexagonState; - #include "fpu/softfloat-types.h" -#include "qemu-common.h" #include "exec/cpu-defs.h" #include "hex_regs.h" +#include "mmvec/mmvec.h" +#include "qom/object.h" +#include "hw/core/cpu.h" #define NUM_PREGS 4 #define TOTAL_PER_THREAD_REGS 64 @@ -34,6 +33,7 @@ typedef struct CPUHexagonState CPUHexagonState; #define STORES_MAX 2 #define REG_WRITES_MAX 32 #define PRED_WRITES_MAX 5 /* 4 insns + endloop */ +#define VSTORES_MAX 2 #define TYPE_HEXAGON_CPU "hexagon-cpu" @@ -52,6 +52,13 @@ typedef struct { uint64_t data64; } MemLog; +typedef struct { + target_ulong va; + int size; + DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES) QEMU_ALIGNED(16); + MMVector data QEMU_ALIGNED(16); +} VStoreLog; + #define EXEC_STATUS_OK 0x0000 #define EXEC_STATUS_STOP 0x0002 #define EXEC_STATUS_REPLAY 0x0010 @@ -64,7 +71,10 @@ typedef struct { #define CLEAR_EXCEPTION (env->status &= (~EXEC_STATUS_EXCEPTION)) #define SET_EXCEPTION (env->status |= EXEC_STATUS_EXCEPTION) -struct CPUHexagonState { +/* Maximum number of vector temps in a packet */ +#define VECTOR_TEMPS_MAX 4 + +typedef struct CPUArchState { target_ulong gpr[TOTAL_PER_THREAD_REGS]; target_ulong pred[NUM_PREGS]; target_ulong branch_taken; @@ -97,16 +107,30 @@ struct CPUHexagonState { target_ulong llsc_val; uint64_t llsc_val_i64; - target_ulong is_gather_store_insn; - target_ulong gather_issued; -}; + MMVector VRegs[NUM_VREGS] QEMU_ALIGNED(16); + MMVector future_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); + MMVector tmp_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); -#define HEXAGON_CPU_CLASS(klass) \ - OBJECT_CLASS_CHECK(HexagonCPUClass, (klass), TYPE_HEXAGON_CPU) -#define HEXAGON_CPU(obj) \ - OBJECT_CHECK(HexagonCPU, (obj), TYPE_HEXAGON_CPU) -#define HEXAGON_CPU_GET_CLASS(obj) \ - OBJECT_GET_CLASS(HexagonCPUClass, (obj), TYPE_HEXAGON_CPU) + VRegMask VRegs_updated; + + MMQReg QRegs[NUM_QREGS] QEMU_ALIGNED(16); + MMQReg future_QRegs[NUM_QREGS] QEMU_ALIGNED(16); + QRegMask QRegs_updated; + + /* Temporaries used within instructions */ + MMVectorPair VuuV QEMU_ALIGNED(16); + MMVectorPair VvvV QEMU_ALIGNED(16); + MMVectorPair VxxV QEMU_ALIGNED(16); + MMVector vtmp QEMU_ALIGNED(16); + MMQReg qtmp QEMU_ALIGNED(16); + + VStoreLog vstore[VSTORES_MAX]; + target_ulong vstore_pending[VSTORES_MAX]; + bool vtcm_pending; + VTCMStoreLog vtcm_log; +} CPUHexagonState; + +OBJECT_DECLARE_CPU_TYPE(HexagonCPU, HexagonCPUClass, HEXAGON_CPU) typedef struct HexagonCPUClass { /*< private >*/ @@ -116,7 +140,7 @@ typedef struct HexagonCPUClass { DeviceReset parent_reset; } HexagonCPUClass; -typedef struct HexagonCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -125,13 +149,10 @@ typedef struct HexagonCPU { bool lldb_compat; target_ulong lldb_stack_adjust; -} HexagonCPU; +}; #include "cpu_bits.h" -#define cpu_signal_handler cpu_hexagon_signal_handler -int cpu_hexagon_signal_handler(int host_signum, void *pinfo, void *puc); - static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { @@ -144,7 +165,15 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, #endif } -typedef struct CPUHexagonState CPUArchState; +static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch) +{ +#ifdef CONFIG_USER_ONLY + return MMU_USER_IDX; +#else +#error System mode not supported on Hexagon yet +#endif +} + typedef HexagonCPU ArchCPU; void hexagon_translate_init(void); diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index d424245598..6b73b5c60c 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,6 +22,7 @@ #include "decode.h" #include "insn.h" #include "printinsn.h" +#include "mmvec/decode_ext_mmvec.h" #define fZXTN(N, M, VAL) ((VAL) & ((1LL << (N)) - 1)) @@ -46,6 +47,7 @@ enum { /* Name Num Table */ DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22) +DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7) #define DECODE_MAPPED_REG(OPNUM, NAME) \ insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]]; @@ -157,6 +159,9 @@ static void decode_ext_init(void) for (i = EXT_IDX_noext; i < EXT_IDX_noext_AFTER; i++) { ext_trees[i] = &dectree_table_DECODE_EXT_EXT_noext; } + for (i = EXT_IDX_mmvec; i < EXT_IDX_mmvec_AFTER; i++) { + ext_trees[i] = &dectree_table_DECODE_EXT_EXT_mmvec; + } } typedef struct { @@ -397,10 +402,13 @@ static void decode_set_insn_attr_fields(Packet *pkt) } if (GET_ATTRIB(opcode, A_STORE)) { - if (pkt->insn[i].slot == 0) { - pkt->pkt_has_store_s0 = true; - } else { - pkt->pkt_has_store_s1 = true; + if (GET_ATTRIB(opcode, A_SCALAR_STORE) && + !GET_ATTRIB(opcode, A_MEMSIZE_0B)) { + if (pkt->insn[i].slot == 0) { + pkt->pkt_has_store_s0 = true; + } else { + pkt->pkt_has_store_s1 = true; + } } } @@ -566,8 +574,12 @@ static void decode_remove_extenders(Packet *packet) static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot) { - return find_iclass_slots(pkt->insn[slot].opcode, - pkt->insn[slot].iclass); + if (GET_ATTRIB(pkt->insn[slot].opcode, A_EXTENSION)) { + return mmvec_ext_decode_find_iclass_slots(pkt->insn[slot].opcode); + } else { + return find_iclass_slots(pkt->insn[slot].opcode, + pkt->insn[slot].iclass); + } } #define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */ @@ -728,6 +740,11 @@ decode_insns_tablewalk(Insn *insn, const DectreeTable *table, } decode_op(insn, opc, encoding); return 1; + } else if (table->table[i].type == DECTREE_EXTSPACE) { + /* + * For now, HVX will be the only coproc + */ + return decode_insns_tablewalk(insn, ext_trees[EXT_IDX_mmvec], encoding); } else { return 0; } @@ -874,6 +891,7 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, int words_read = 0; bool end_of_packet = false; int new_insns = 0; + int i; uint32_t encoding32; /* Initialize */ @@ -901,6 +919,11 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, return 0; } pkt->encod_pkt_size_in_bytes = words_read * 4; + pkt->pkt_has_hvx = false; + for (i = 0; i < num_insns; i++) { + pkt->pkt_has_hvx |= + GET_ATTRIB(pkt->insn[i].opcode, A_CVI); + } /* * Check for :endloop in the parse bits @@ -931,6 +954,10 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, decode_set_slot_number(pkt); decode_fill_newvalue_regno(pkt); + if (pkt->pkt_has_hvx) { + mmvec_ext_decode_checks(pkt, disas_only); + } + if (!disas_only) { decode_shuffle_for_execution(pkt); decode_split_cmpjump(pkt); diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h index e3b99a8cf4..91591d6050 100644 --- a/target/hexagon/fma_emu.h +++ b/target/hexagon/fma_emu.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -24,6 +24,10 @@ static inline bool is_finite(float64 x) } int32_t float64_getexp(float64 f64); +static inline uint32_t float32_getexp_raw(float32 f32) +{ + return extract32(f32, 23, 8); +} int32_t float32_getexp(float32 f32); float32 infinite_float32(uint8_t sign); float32 internal_fmafx(float32 a, float32 b, float32 c, diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c index 9c8c04c961..d152d01bfe 100644 --- a/target/hexagon/gdbstub.c +++ b/target/hexagon/gdbstub.c @@ -16,7 +16,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "exec/gdbstub.h" #include "cpu.h" #include "internal.h" diff --git a/target/hexagon/gen_dectree_import.c b/target/hexagon/gen_dectree_import.c index 5b7ecfc6b3..ee354677fd 100644 --- a/target/hexagon/gen_dectree_import.c +++ b/target/hexagon/gen_dectree_import.c @@ -40,6 +40,11 @@ const char * const opcode_names[] = { * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(), * "Add 32-bit registers", * { RdV=RsV+RtV;}) + * HVX instructions have the following form + * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", + * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX,A_CVI_LATE), + * "Insert Word Scalar into Vector", + * VxV.uw[0] = RtV;) */ const char * const opcode_syntax[XX_LAST_OPCODE] = { #define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ @@ -105,6 +110,14 @@ static const char *get_opcode_enc(int opcode) static const char *get_opcode_enc_class(int opcode) { + const char *tmp = opcode_encodings[opcode].encoding; + if (tmp == NULL) { + const char *test = "V6_"; /* HVX */ + const char *name = opcode_names[opcode]; + if (strncmp(name, test, strlen(test)) == 0) { + return "EXT_mmvec"; + } + } return opcode_enc_class_names[opcode_encodings[opcode].enc_class]; } diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index 2b1c5d8e3e..a446c45384 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -48,12 +48,26 @@ def gen_helper_arg_pair(f,regtype,regid,regno): if regno >= 0 : f.write(", ") f.write("int64_t %s%sV" % (regtype,regid)) +def gen_helper_arg_ext(f,regtype,regid,regno): + if regno > 0 : f.write(", ") + f.write("void *%s%sV_void" % (regtype,regid)) + +def gen_helper_arg_ext_pair(f,regtype,regid,regno): + if regno > 0 : f.write(", ") + f.write("void *%s%sV_void" % (regtype,regid)) + def gen_helper_arg_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): - gen_helper_arg_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext_pair(f,regtype,regid,i) + else: + gen_helper_arg_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - gen_helper_arg(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext(f,regtype,regid,i) + else: + gen_helper_arg(f,regtype,regid,i) elif hex_common.is_new_val(regtype, regid, tag): gen_helper_arg_new(f,regtype,regid,i) else: @@ -72,25 +86,67 @@ def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""): f.write(" int64_t %s%sV%s = 0;\n" % \ (regtype,regid,subfield)) +def gen_helper_dest_decl_ext(f,regtype,regid): + if (regtype == "Q"): + f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + else: + f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + +def gen_helper_dest_decl_ext_pair(f,regtype,regid,regno): + f.write(" /* %s%sV is *(MMVectorPair *))%s%sV_void) */\n" % \ + (regtype,regid,regtype, regid)) + def gen_helper_dest_decl_opn(f,regtype,regid,i): if (hex_common.is_pair(regid)): - gen_helper_dest_decl_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dest_decl_ext_pair(f,regtype,regid, i) + else: + gen_helper_dest_decl_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_dest_decl(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dest_decl_ext(f,regtype,regid) + else: + gen_helper_dest_decl(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) +def gen_helper_src_var_ext(f,regtype,regid): + if (regtype == "Q"): + f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + else: + f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + +def gen_helper_src_var_ext_pair(f,regtype,regid,regno): + f.write(" /* %s%sV%s is *(MMVectorPair *)(%s%sV%s_void) */\n" % \ + (regtype,regid,regno,regtype,regid,regno)) + def gen_helper_return(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) def gen_helper_return_pair(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) +def gen_helper_dst_write_ext(f,regtype,regid): + return + +def gen_helper_dst_write_ext_pair(f,regtype,regid): + return + def gen_helper_return_opn(f, regtype, regid, i): if (hex_common.is_pair(regid)): - gen_helper_return_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dst_write_ext_pair(f,regtype,regid) + else: + gen_helper_return_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_return(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dst_write_ext(f,regtype,regid) + else: + gen_helper_return(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) @@ -129,14 +185,20 @@ def gen_helper_function(f, tag, tagregs, tagimms): % (tag, tag)) else: ## The return type of the function is the type of the destination - ## register + ## register (if scalar) i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): if (hex_common.is_pair(regid)): - gen_helper_return_type_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + continue + else: + gen_helper_return_type_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_return_type(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + continue + else: + gen_helper_return_type(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) i += 1 @@ -145,16 +207,37 @@ def gen_helper_function(f, tag, tagregs, tagimms): f.write("void") f.write(" HELPER(%s)(CPUHexagonState *env" % tag) + ## Arguments include the vector destination operands i = 1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (hex_common.is_pair(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext_pair(f,regtype,regid,i) + else: + continue + elif (hex_common.is_single(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext(f,regtype,regid,i) + else: + # This is the return value of the function + continue + else: + print("Bad register parse: ",regtype,regid,toss,numregs) + i += 1 ## Arguments to the helper function are the source regs and immediates for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_helper_arg_opn(f,regtype,regid,i,tag) i += 1 for immlett,bits,immshift in imms: gen_helper_arg_imm(f,immlett) i += 1 + if hex_common.need_slot(tag): if i > 0: f.write(", ") f.write("uint32_t slot") @@ -173,6 +256,17 @@ def gen_helper_function(f, tag, tagregs, tagimms): gen_helper_dest_decl_opn(f,regtype,regid,i) i += 1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_read(regid)): + if (hex_common.is_pair(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_src_var_ext_pair(f,regtype,regid,i) + elif (hex_common.is_single(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_src_var_ext(f,regtype,regid) + else: + print("Bad register parse: ",regtype,regid,toss,numregs) + if 'A_FPOP' in hex_common.attribdict[tag]: f.write(' arch_fpop_start(env);\n'); @@ -192,11 +286,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py index ea41007ec9..3b4e993fd1 100755 --- a/target/hexagon/gen_helper_protos.py +++ b/target/hexagon/gen_helper_protos.py @@ -94,19 +94,33 @@ def gen_helper_prototype(f, tag, tagregs, tagimms): f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) ## Generate the qemu DEF_HELPER type for each result + ## Iterate over this list twice + ## - Emit the scalar result + ## - Emit the vector result i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): - gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + if (not hex_common.is_hvx_reg(regtype)): + gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 ## Put the env between the outputs and inputs f.write(', env' ) i += 1 + # Second pass + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + i += 1 + ## Generate the qemu type for each input operand (regs and immediates) for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: @@ -121,11 +135,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : diff --git a/target/hexagon/gen_semantics.c b/target/hexagon/gen_semantics.c index c5fccecc9e..4a2bdd70e9 100644 --- a/target/hexagon/gen_semantics.c +++ b/target/hexagon/gen_semantics.c @@ -44,6 +44,11 @@ int main(int argc, char *argv[]) * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(), * "Add 32-bit registers", * { RdV=RsV+RtV;}) + * HVX instructions have the following form + * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", + * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), + * "Insert Word Scalar into Vector", + * VxV.uw[0] = RtV;) */ #define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ do { \ @@ -59,8 +64,23 @@ int main(int argc, char *argv[]) ")\n", \ #TAG, STRINGIZE(ATTRIBS)); \ } while (0); +#define EXTINSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ + do { \ + fprintf(outfile, "SEMANTICS( \\\n" \ + " \"%s\", \\\n" \ + " %s, \\\n" \ + " \"\"\"%s\"\"\" \\\n" \ + ")\n", \ + #TAG, STRINGIZE(BEH), STRINGIZE(SEM)); \ + fprintf(outfile, "ATTRIBUTES( \\\n" \ + " \"%s\", \\\n" \ + " \"%s\" \\\n" \ + ")\n", \ + #TAG, STRINGIZE(ATTRIBS)); \ + } while (0); #include "imported/allidefs.def" #undef Q6INSN +#undef EXTINSN /* * Process the macro definitions @@ -81,6 +101,19 @@ int main(int argc, char *argv[]) ")\n", \ #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS)); #include "imported/macros.def" +#undef DEF_MACRO + +/* + * Process the macros for HVX + */ +#define DEF_MACRO(MNAME, BEH, ATTRS) \ + fprintf(outfile, "MACROATTRIB( \\\n" \ + " \"%s\", \\\n" \ + " \"\"\"%s\"\"\", \\\n" \ + " \"%s\" \\\n" \ + ")\n", \ + #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS)); +#include "imported/allext_macros.def" #undef DEF_MACRO fclose(outfile); diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index ee94c903db..50634ac459 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -66,11 +66,10 @@ } while (0) #define GET_EA_pci \ do { \ - TCGv tcgv_siV = tcg_const_tl(siV); \ + TCGv tcgv_siV = tcg_constant_tl(siV); \ tcg_gen_mov_tl(EA, RxV); \ gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \ hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(tcgv_siV); \ } while (0) #define GET_EA_pcr(SHIFT) \ do { \ @@ -340,12 +339,13 @@ do { \ TCGv LSB = tcg_temp_local_new(); \ TCGLabel *label = gen_new_label(); \ - GET_EA; \ + tcg_gen_movi_tl(EA, 0); \ PRED; \ + CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \ PRED_LOAD_CANCEL(LSB, EA); \ tcg_gen_movi_tl(RdV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ - fLOAD(1, SIZE, SIGN, EA, RdV); \ + fLOAD(1, SIZE, SIGN, EA, RdV); \ gen_set_label(label); \ tcg_temp_free(LSB); \ } while (0) @@ -399,12 +399,13 @@ do { \ TCGv LSB = tcg_temp_local_new(); \ TCGLabel *label = gen_new_label(); \ - GET_EA; \ + tcg_gen_movi_tl(EA, 0); \ PRED; \ + CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \ PRED_LOAD_CANCEL(LSB, EA); \ tcg_gen_movi_i64(RddV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ - fLOAD(1, 8, u, EA, RddV); \ + fLOAD(1, 8, u, EA, RddV); \ gen_set_label(label); \ tcg_temp_free(LSB); \ } while (0) @@ -557,7 +558,7 @@ #define fGEN_TCG_A4_addp_c(SHORTCODE) \ do { \ TCGv_i64 carry = tcg_temp_new_i64(); \ - TCGv_i64 zero = tcg_const_i64(0); \ + TCGv_i64 zero = tcg_constant_i64(0); \ tcg_gen_extu_i32_i64(carry, PxV); \ tcg_gen_andi_i64(carry, carry, 1); \ tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \ @@ -565,14 +566,13 @@ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(zero); \ } while (0) /* r5:4 = sub(r1:0, r3:2, p1):carry */ #define fGEN_TCG_A4_subp_c(SHORTCODE) \ do { \ TCGv_i64 carry = tcg_temp_new_i64(); \ - TCGv_i64 zero = tcg_const_i64(0); \ + TCGv_i64 zero = tcg_constant_i64(0); \ TCGv_i64 not_RttV = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(carry, PxV); \ tcg_gen_andi_i64(carry, carry, 1); \ @@ -582,7 +582,6 @@ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(zero); \ tcg_temp_free_i64(not_RttV); \ } while (0) @@ -684,9 +683,8 @@ gen_helper_sfmin(RdV, cpu_env, RsV, RtV) #define fGEN_TCG_F2_sfclass(SHORTCODE) \ do { \ - TCGv imm = tcg_const_tl(uiV); \ + TCGv imm = tcg_constant_tl(uiV); \ gen_helper_sfclass(PdV, cpu_env, RsV, imm); \ - tcg_temp_free(imm); \ } while (0) #define fGEN_TCG_F2_sffixupn(SHORTCODE) \ gen_helper_sffixupn(RdV, cpu_env, RsV, RtV) @@ -712,9 +710,8 @@ gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV) #define fGEN_TCG_F2_dfclass(SHORTCODE) \ do { \ - TCGv imm = tcg_const_tl(uiV); \ + TCGv imm = tcg_constant_tl(uiV); \ gen_helper_dfclass(PdV, cpu_env, RssV, imm); \ - tcg_temp_free(imm); \ } while (0) #define fGEN_TCG_F2_sfmpy(SHORTCODE) \ gen_helper_sfmpy(RdV, cpu_env, RsV, RtV) diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index 7ceb25b5f6..6dea02b0b9 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -119,10 +119,97 @@ def genptr_decl(f, tag, regtype, regid, regno): (regtype, regid, regtype, regid)) else: print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"dd"}): + f.write(" const int %s%sN = insn->regno[%d];\n" %\ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" %\ + (regtype, regid)) + if (hex_common.is_tmp_result(tag)): + f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 2, true);\n" % \ + (regtype, regid)) + else: + f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ + (regtype, regid)) + f.write(" 2, true);\n") + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"uu", "vv", "xx"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, %s%sV);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"s", "u", "v", "w"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + elif (regid in {"d", "x", "y"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + if (regid == "y"): + f.write(" offsetof(CPUHexagonState, vtmp);\n") + elif (hex_common.is_tmp_result(tag)): + f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 1, true);\n" % \ + (regtype, regid)) + else: + f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ + (regtype, regid)) + f.write(" 1, true);\n"); + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "x"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState,\n") + f.write(" future_QRegs[%s%sN]);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"s", "t", "u", "v"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" %\ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) -def genptr_decl_new(f,regtype,regid,regno): +def genptr_decl_new(f, tag, regtype, regid, regno): if (regtype == "N"): if (regid in {"s", "t"}): f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \ @@ -135,6 +222,21 @@ def genptr_decl_new(f,regtype,regid,regno): (regtype, regid, regno)) else: print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid == "s"): + f.write(" const intptr_t %s%sN_num = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + if (hex_common.skip_qemu_helper(tag)): + f.write(" const intptr_t %s%sN_off =\n" % \ + (regtype, regid)) + f.write(" ctx_future_vreg_off(ctx, %s%sN_num," % \ + (regtype, regid)) + f.write(" 1, true);\n") + else: + f.write(" TCGv %s%sN = tcg_constant_tl(%s%sN_num);\n" % \ + (regtype, regid, regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) @@ -145,7 +247,7 @@ def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i): if hex_common.is_old_val(regtype, regid, tag): genptr_decl(f,tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): - genptr_decl_new(f,regtype,regid,i) + genptr_decl_new(f, tag, regtype, regid, i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: @@ -159,7 +261,7 @@ def genptr_decl_imm(f,immlett): f.write(" int %s = insn->immed[%d];\n" % \ (hex_common.imm_name(immlett), i)) -def genptr_free(f,regtype,regid,regno): +def genptr_free(f, tag, regtype, regid, regno): if (regtype == "R"): if (regid in {"dd", "ss", "tt", "xx", "yy"}): f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) @@ -182,33 +284,51 @@ def genptr_free(f,regtype,regid,regno): elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"dd", "uu", "vv", "xx", \ + "d", "s", "u", "v", "w", "x", "y"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "s", "t", "u", "v", "x"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) -def genptr_free_new(f,regtype,regid,regno): +def genptr_free_new(f, tag, regtype, regid, regno): if (regtype == "N"): if (regid not in {"s", "t"}): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid != "s"): + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_free_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): - genptr_free(f,regtype,regid,i) + genptr_free(f, tag, regtype, regid, i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - genptr_free(f,regtype,regid,i) + genptr_free(f, tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): - genptr_free_new(f,regtype,regid,i) + genptr_free_new(f, tag, regtype, regid, i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) -def genptr_src_read(f,regtype,regid): +def genptr_src_read(f, tag, regtype, regid): if (regtype == "R"): if (regid in {"ss", "tt", "xx", "yy"}): f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \ @@ -238,6 +358,44 @@ def genptr_src_read(f,regtype,regid): elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"uu", "vv", "xx"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + f.write(" tcg_gen_gvec_mov(MO_64,\n") + f.write(" %s%sV_off + sizeof(MMVector),\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN ^ 1),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + elif (regid in {"s", "u", "v", "w"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"x", "y"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"s", "t", "u", "v"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"x"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMQReg), sizeof(MMQReg));\n") + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) @@ -248,15 +406,18 @@ def genptr_src_read_new(f,regtype,regid): elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid != "s"): + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_src_read_opn(f,regtype,regid,tag): if (hex_common.is_pair(regid)): - genptr_src_read(f,regtype,regid) + genptr_src_read(f, tag, regtype, regid) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - genptr_src_read(f,regtype,regid) + genptr_src_read(f, tag, regtype, regid) elif hex_common.is_new_val(regtype, regid, tag): genptr_src_read_new(f,regtype,regid) else: @@ -279,15 +440,12 @@ def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i): print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_decl_imm(f,immlett): - f.write(" TCGv tcgv_%s = tcg_const_tl(%s);\n" % \ + f.write(" TCGv tcgv_%s = tcg_constant_tl(%s);\n" % \ (hex_common.imm_name(immlett), hex_common.imm_name(immlett))) def gen_helper_call_imm(f,immlett): f.write(", tcgv_%s" % hex_common.imm_name(immlett)) -def gen_helper_free_imm(f,immlett): - f.write(" tcg_temp_free(tcgv_%s);\n" % hex_common.imm_name(immlett)) - def genptr_dst_write_pair(f, tag, regtype, regid): if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \ @@ -334,11 +492,68 @@ def genptr_dst_write(f, tag, regtype, regid): else: print("Bad register parse: ", regtype, regid) +def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"): + if (regtype == "V"): + if (regid in {"dd", "xx", "yy"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \ + (regtype, regid, regtype, regid)) + f.write("%s, insn->slot, %s);\n" % \ + (newv, is_predicated)) + f.write(" ctx_log_vreg_write_pair(ctx, %s%sN, %s,\n" % \ + (regtype, regid, newv)) + f.write(" %s);\n" % (is_predicated)) + elif (regid in {"d", "x", "y"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s, " % \ + (regtype, regid, regtype, regid, newv)) + f.write("insn->slot, %s);\n" % \ + (is_predicated)) + f.write(" ctx_log_vreg_write(ctx, %s%sN, %s, %s);\n" % \ + (regtype, regid, newv, is_predicated)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "x"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_qreg_write(%s%sV_off, %s%sN, %s, " % \ + (regtype, regid, regtype, regid, newv)) + f.write("insn->slot, %s);\n" % (is_predicated)) + f.write(" ctx_log_qreg_write(ctx, %s%sN, %s);\n" % \ + (regtype, regid, is_predicated)) + else: + print("Bad register parse: ", regtype, regid) + else: + print("Bad register parse: ", regtype, regid) + def genptr_dst_write_opn(f,regtype, regid, tag): if (hex_common.is_pair(regid)): - genptr_dst_write(f, tag, regtype, regid) + if (hex_common.is_hvx_reg(regtype)): + if (hex_common.is_tmp_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") + else: + genptr_dst_write_ext(f, tag, regtype, regid) + else: + genptr_dst_write(f, tag, regtype, regid) elif (hex_common.is_single(regid)): - genptr_dst_write(f, tag, regtype, regid) + if (hex_common.is_hvx_reg(regtype)): + if (hex_common.is_new_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW") + elif (hex_common.is_tmp_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") + else: + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL") + else: + genptr_dst_write(f, tag, regtype, regid) else: print("Bad register parse: ",regtype,regid,toss,numregs) @@ -401,21 +616,32 @@ def gen_tcg_func(f, tag, regs, imms): for immlett,bits,immshift in imms: gen_helper_decl_imm(f,immlett) if hex_common.need_part1(tag): - f.write(" TCGv part1 = tcg_const_tl(insn->part1);\n") + f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n") if hex_common.need_slot(tag): - f.write(" TCGv slot = tcg_const_tl(insn->slot);\n") + f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n") f.write(" gen_helper_%s(" % (tag)) i=0 ## If there is a scalar result, it is the return type for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): + if (hex_common.is_hvx_reg(regtype)): + continue gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 if (i > 0): f.write(", ") f.write("cpu_env") i=1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (not hex_common.is_hvx_reg(regtype)): + continue + gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) + i += 1 for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: @@ -424,12 +650,6 @@ def gen_tcg_func(f, tag, regs, imms): if hex_common.need_slot(tag): f.write(", slot") if hex_common.need_part1(tag): f.write(", part1" ) f.write(");\n") - if hex_common.need_slot(tag): - f.write(" tcg_temp_free(slot);\n") - if hex_common.need_part1(tag): - f.write(" tcg_temp_free(part1);\n") - for immlett,bits,immshift in imms: - gen_helper_free_imm(f,immlett) ## Write all the outputs for regtype,regid,toss,numregs in regs: @@ -454,11 +674,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") f.write("#define HEXAGON_TCG_FUNCS_H\n\n") diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h new file mode 100644 index 0000000000..cdcc9382bb --- /dev/null +++ b/target/hexagon/gen_tcg_hvx.h @@ -0,0 +1,903 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_GEN_TCG_HVX_H +#define HEXAGON_GEN_TCG_HVX_H + +/* + * Histogram instructions + * + * Note that these instructions operate directly on the vector registers + * and therefore happen after commit. + * + * The generate_ function is called twice + * The first time is during the normal TCG generation + * ctx->pre_commit is true + * In the masked cases, we save the mask to the qtmp temporary + * Otherwise, there is nothing to do + * The second call is at the end of gen_commit_packet + * ctx->pre_commit is false + * Generate the call to the helper + */ + +static inline void assert_vhist_tmp(DisasContext *ctx) +{ + /* vhist instructions require exactly one .tmp to be defined */ + g_assert(ctx->tmp_vregs_idx == 1); +} + +#define fGEN_TCG_V6_vhist(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vhist(cpu_env); \ + } +#define fGEN_TCG_V6_vhistq(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vhistq(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist256(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist256q(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256q(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256_sat(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256q_sat(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist128(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist128q(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128q(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist128m(SHORTCODE) \ + if (!ctx->pre_commit) { \ + TCGv tcgv_uiV = tcg_constant_tl(uiV); \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128m(cpu_env, tcgv_uiV); \ + } +#define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + TCGv tcgv_uiV = tcg_constant_tl(uiV); \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \ + } \ + } while (0) + + +#define fGEN_TCG_V6_vassign(SHORTCODE) \ + tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector conditional move */ +#define fGEN_TCG_VEC_CMOV(PRED) \ + do { \ + TCGv lsb = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + tcg_gen_andi_tl(lsb, PsV, 1); \ + tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \ + tcg_temp_free(lsb); \ + tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + + +/* Vector conditional move (true) */ +#define fGEN_TCG_V6_vcmov(SHORTCODE) \ + fGEN_TCG_VEC_CMOV(1) + +/* Vector conditional move (false) */ +#define fGEN_TCG_V6_vncmov(SHORTCODE) \ + fGEN_TCG_VEC_CMOV(0) + +/* Vector add - various forms */ +#define fGEN_TCG_V6_vaddb(SHORTCODE) \ + tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddh(SHORTCYDE) \ + tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddw(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \ + tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \ + tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +/* Vector sub - various forms */ +#define fGEN_TCG_V6_vsubb(SHORTCODE) \ + tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubh(SHORTCODE) \ + tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubw(SHORTCODE) \ + tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +/* Vector shift right - various forms */ +#define fGEN_TCG_V6_vasrh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrb(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 7); \ + tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +/* Vector shift left - various forms */ +#define fGEN_TCG_V6_vaslb(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 7); \ + tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +/* Vector max - various forms */ +#define fGEN_TCG_V6_vmaxw(SHORTCODE) \ + tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxh(SHORTCODE) \ + tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxuh(SHORTCODE) \ + tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxb(SHORTCODE) \ + tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxub(SHORTCODE) \ + tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector min - various forms */ +#define fGEN_TCG_V6_vminw(SHORTCODE) \ + tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminh(SHORTCODE) \ + tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminuh(SHORTCODE) \ + tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminb(SHORTCODE) \ + tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminub(SHORTCODE) \ + tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector logical ops */ +#define fGEN_TCG_V6_vxor(SHORTCODE) \ + tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vand(SHORTCODE) \ + tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vor(SHORTCODE) \ + tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vnot(SHORTCODE) \ + tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Q register logical ops */ +#define fGEN_TCG_V6_pred_or(SHORTCODE) \ + tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_and(SHORTCODE) \ + tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_xor(SHORTCODE) \ + tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_or_n(SHORTCODE) \ + tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_and_n(SHORTCODE) \ + tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_not(SHORTCODE) \ + tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +/* Vector compares */ +#define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + vec_to_qvec(SIZE, QdV_off, tmpoff); \ + } while (0) + +#define fGEN_TCG_V6_vgtw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4) +#define fGEN_TCG_V6_vgth(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2) +#define fGEN_TCG_V6_vgtb(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1) + +#define fGEN_TCG_V6_vgtuw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4) +#define fGEN_TCG_V6_vgtuh(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2) +#define fGEN_TCG_V6_vgtub(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1) + +#define fGEN_TCG_V6_veqw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4) +#define fGEN_TCG_V6_veqh(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2) +#define fGEN_TCG_V6_veqb(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1) + +#define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + intptr_t qoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + vec_to_qvec(SIZE, qoff, tmpoff); \ + OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \ + } while (0) + +#define fGEN_TCG_V6_vgtw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgth_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgth_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgth_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtb_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtb_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtub_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtub_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqh_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqh_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqh_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqb_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqb_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqb_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor) + +/* Vector splat - various forms */ +#define fGEN_TCG_V6_lvsplatw(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_32, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +#define fGEN_TCG_V6_lvsplath(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_16, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +#define fGEN_TCG_V6_lvsplatb(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_8, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +/* Vector absolute value - various forms */ +#define fGEN_TCG_V6_vabsb(SHORTCODE) \ + tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vabsh(SHORTCODE) \ + tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vabsw(SHORTCODE) \ + tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector loads */ +#define fGEN_TCG_V6_vL32b_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE) SHORTCODE + +/* Predicated vector loads */ +#define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \ + do { \ + TCGv LSB = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + GET_EA; \ + PRED; \ + tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ + tcg_temp_free(LSB); \ + gen_vreg_load(ctx, DSTOFF, EA, true); \ + INC; \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + +#define fGEN_TCG_PRED_VEC_LOAD_pred_pi \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_LOAD_npred_pi \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi + +#define fGEN_TCG_PRED_VEC_LOAD_pred_ai \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VdV_off, \ + do {} while (0)) +#define fGEN_TCG_PRED_VEC_LOAD_npred_ai \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VdV_off, \ + do {} while (0)) + +#define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai + +#define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu + +/* Vector stores */ +#define fGEN_TCG_V6_vS32b_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE) SHORTCODE + +/* New value vector stores */ +#define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \ + do { \ + GET_EA; \ + gen_vreg_store(ctx, insn, pkt, EA, OsN_off, insn->slot, true); \ + INC; \ + } while (0) + +#define fGEN_TCG_NEWVAL_VEC_STORE_pi \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_pi +#define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_pi + +#define fGEN_TCG_NEWVAL_VEC_STORE_ai \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \ + do { } while (0)) + +#define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ai +#define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ai + +#define fGEN_TCG_NEWVAL_VEC_STORE_ppu \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ppu +#define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ppu + +/* Predicated vector stores */ +#define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \ + do { \ + TCGv LSB = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + GET_EA; \ + PRED; \ + tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ + tcg_temp_free(LSB); \ + gen_vreg_store(ctx, insn, pkt, EA, SRCOFF, insn->slot, ALIGN); \ + INC; \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + +#define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(true) +#define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(true) +#define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(false) +#define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(false) +#define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(true) +#define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(true) +#define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_pi +#define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_pi +#define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_pi +#define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_pi + +#define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VsV_off, ALIGN, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VsV_off, ALIGN, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + OsN_off, true, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + OsN_off, true, \ + do { } while (0)) + +#define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(true) +#define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(true) +#define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(false) +#define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(false) +#define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(true) +#define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(true) +#define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ai +#define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ai +#define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ai +#define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ai + +#define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(true) +#define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(true) +#define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(false) +#define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(false) +#define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(true) +#define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(true) +#define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ppu +#define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ppu +#define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ppu +#define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ppu + +/* Masked vector stores */ +#define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE) SHORTCODE + +/* Store release not modelled in qemu, but need to suppress compiler warnings */ +#define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \ + do { \ + siV = siV; \ + } while (0) +#define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \ + do { \ + RtV = RtV; \ + siV = siV; \ + } while (0) +#define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \ + do { \ + MuV = MuV; \ + } while (0) + +#endif diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 7333299615..806d0974ff 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,17 +19,20 @@ #include "cpu.h" #include "internal.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "insn.h" #include "opcodes.h" #include "translate.h" #define QEMU_GENERATE /* Used internally by macros.h */ #include "macros.h" +#include "mmvec/macros.h" #undef QEMU_GENERATE #include "gen_tcg.h" +#include "gen_tcg_hvx.h" static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); @@ -47,7 +50,6 @@ static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); } - tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -63,7 +65,7 @@ static inline void gen_log_reg_write(int rnum, TCGv val) static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot) { TCGv val32 = tcg_temp_new(); - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); @@ -92,7 +94,6 @@ static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot) } tcg_temp_free(val32); - tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -167,6 +168,9 @@ static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, } else if (reg_num == HEX_REG_QEMU_INSN_CNT) { tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); + } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { + tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT], + ctx->num_hvx_insns); } else { tcg_gen_mov_tl(dest, hex_gpr[reg_num]); } @@ -181,9 +185,8 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); tcg_temp_free(p3_0); } else if (reg_num == HEX_REG_PC - 1) { - TCGv pc = tcg_const_tl(ctx->base.pc_next); + TCGv pc = tcg_constant_tl(ctx->base.pc_next); tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); - tcg_temp_free(pc); } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { TCGv pkt_cnt = tcg_temp_new(); TCGv insn_cnt = tcg_temp_new(); @@ -194,6 +197,12 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); tcg_temp_free(pkt_cnt); tcg_temp_free(insn_cnt); + } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { + TCGv hvx_cnt = tcg_temp_new(); + tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], + ctx->num_hvx_insns); + tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); + tcg_temp_free(hvx_cnt); } else { tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], @@ -201,11 +210,15 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, } } -static inline void gen_write_p3_0(TCGv control_reg) +static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) { + TCGv hex_p8 = tcg_temp_new(); for (int i = 0; i < NUM_PREGS; i++) { - tcg_gen_extract_tl(hex_pred[i], control_reg, i * 8, 8); + tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8); + gen_log_pred_write(ctx, i, hex_p8); + ctx_log_pred_write(ctx, i); } + tcg_temp_free(hex_p8); } /* @@ -219,7 +232,7 @@ static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, TCGv val) { if (reg_num == HEX_REG_P3_0) { - gen_write_p3_0(val); + gen_write_p3_0(ctx, val); } else { gen_log_reg_write(reg_num, val); ctx_log_reg_write(ctx, reg_num); @@ -229,6 +242,9 @@ static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, if (reg_num == HEX_REG_QEMU_INSN_CNT) { ctx->num_insns = 0; } + if (reg_num == HEX_REG_QEMU_HVX_CNT) { + ctx->num_hvx_insns = 0; + } } } @@ -238,7 +254,7 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, if (reg_num == HEX_REG_P3_0) { TCGv val32 = tcg_temp_new(); tcg_gen_extrl_i64_i32(val32, val); - gen_write_p3_0(val32); + gen_write_p3_0(ctx, val32); tcg_gen_extrh_i64_i32(val32, val); gen_log_reg_write(reg_num + 1, val32); tcg_temp_free(val32); @@ -250,6 +266,9 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, ctx->num_packets = 0; ctx->num_insns = 0; } + if (reg_num == HEX_REG_QEMU_HVX_CNT) { + ctx->num_hvx_insns = 0; + } } } @@ -331,15 +350,13 @@ static inline void gen_store_conditional4(DisasContext *ctx, tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); - one = tcg_const_tl(0xff); - zero = tcg_const_tl(0); + one = tcg_constant_tl(0xff); + zero = tcg_constant_tl(0); tmp = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src, ctx->mem_idx, MO_32); tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, one, zero); - tcg_temp_free(one); - tcg_temp_free(zero); tcg_temp_free(tmp); tcg_gen_br(done); @@ -359,16 +376,14 @@ static inline void gen_store_conditional8(DisasContext *ctx, tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); - one = tcg_const_i64(0xff); - zero = tcg_const_i64(0); + one = tcg_constant_i64(0xff); + zero = tcg_constant_i64(0); tmp = tcg_temp_new_i64(); tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src, ctx->mem_idx, MO_64); tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, one, zero); tcg_gen_extrl_i64_i32(pred, tmp); - tcg_temp_free_i64(one); - tcg_temp_free_i64(zero); tcg_temp_free_i64(tmp); tcg_gen_br(done); @@ -386,78 +401,237 @@ static inline void gen_store32(TCGv vaddr, TCGv src, int width, int slot) tcg_gen_mov_tl(hex_store_val32[slot], src); } -static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, - DisasContext *ctx, int slot) +static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot) { gen_store32(vaddr, src, 1, slot); - ctx->store_width[slot] = 1; } -static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, - DisasContext *ctx, int slot) +static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot) { - TCGv tmp = tcg_const_tl(src); - gen_store1(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); + TCGv tmp = tcg_constant_tl(src); + gen_store1(cpu_env, vaddr, tmp, slot); } -static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, - DisasContext *ctx, int slot) +static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot) { gen_store32(vaddr, src, 2, slot); - ctx->store_width[slot] = 2; } -static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, - DisasContext *ctx, int slot) +static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot) { - TCGv tmp = tcg_const_tl(src); - gen_store2(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); + TCGv tmp = tcg_constant_tl(src); + gen_store2(cpu_env, vaddr, tmp, slot); } -static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, - DisasContext *ctx, int slot) +static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot) { gen_store32(vaddr, src, 4, slot); - ctx->store_width[slot] = 4; } -static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, - DisasContext *ctx, int slot) +static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot) { - TCGv tmp = tcg_const_tl(src); - gen_store4(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); + TCGv tmp = tcg_constant_tl(src); + gen_store4(cpu_env, vaddr, tmp, slot); } -static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, - DisasContext *ctx, int slot) +static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, int slot) { tcg_gen_mov_tl(hex_store_addr[slot], vaddr); tcg_gen_movi_tl(hex_store_width[slot], 8); tcg_gen_mov_i64(hex_store_val64[slot], src); - ctx->store_width[slot] = 8; } -static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, - DisasContext *ctx, int slot) +static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, int slot) { - TCGv_i64 tmp = tcg_const_i64(src); - gen_store8(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free_i64(tmp); + TCGv_i64 tmp = tcg_constant_i64(src); + gen_store8(cpu_env, vaddr, tmp, slot); } static TCGv gen_8bitsof(TCGv result, TCGv value) { - TCGv zero = tcg_const_tl(0); - TCGv ones = tcg_const_tl(0xff); + TCGv zero = tcg_constant_tl(0); + TCGv ones = tcg_constant_tl(0xff); tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero); - tcg_temp_free(zero); - tcg_temp_free(ones); return result; } +static intptr_t vreg_src_off(DisasContext *ctx, int num) +{ + intptr_t offset = offsetof(CPUHexagonState, VRegs[num]); + + if (test_bit(num, ctx->vregs_select)) { + offset = ctx_future_vreg_off(ctx, num, 1, false); + } + if (test_bit(num, ctx->vregs_updated_tmp)) { + offset = ctx_tmp_vreg_off(ctx, num, 1, false); + } + return offset; +} + +static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, + VRegWriteType type, int slot_num, + bool is_predicated) +{ + TCGLabel *label_end = NULL; + intptr_t dstoff; + + if (is_predicated) { + TCGv cancelled = tcg_temp_local_new(); + label_end = gen_new_label(); + + /* Don't do anything if the slot was cancelled */ + tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); + tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); + tcg_temp_free(cancelled); + } + + if (type != EXT_TMP) { + dstoff = ctx_future_vreg_off(ctx, num, 1, true); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, + sizeof(MMVector), sizeof(MMVector)); + tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num); + } else { + dstoff = ctx_tmp_vreg_off(ctx, num, 1, false); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, + sizeof(MMVector), sizeof(MMVector)); + } + + if (is_predicated) { + gen_set_label(label_end); + } +} + +static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num, + VRegWriteType type, int slot_num, + bool is_predicated) +{ + gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated); + srcoff += sizeof(MMVector); + gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated); +} + +static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, + int slot_num, bool is_predicated) +{ + TCGLabel *label_end = NULL; + intptr_t dstoff; + + if (is_predicated) { + TCGv cancelled = tcg_temp_local_new(); + label_end = gen_new_label(); + + /* Don't do anything if the slot was cancelled */ + tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); + tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); + tcg_temp_free(cancelled); + } + + dstoff = offsetof(CPUHexagonState, future_QRegs[num]); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg)); + + if (is_predicated) { + tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num); + gen_set_label(label_end); + } +} + +static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, + bool aligned) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + if (aligned) { + tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); + } + for (int i = 0; i < sizeof(MMVector) / 8; i++) { + tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); + tcg_gen_addi_tl(src, src, 8); + tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); + } + tcg_temp_free_i64(tmp); +} + +static void gen_vreg_store(DisasContext *ctx, Insn *insn, Packet *pkt, + TCGv EA, intptr_t srcoff, int slot, bool aligned) +{ + intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); + intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); + + if (is_gather_store_insn(insn, pkt)) { + TCGv sl = tcg_constant_tl(slot); + gen_helper_gather_store(cpu_env, EA, sl); + return; + } + + tcg_gen_movi_tl(hex_vstore_pending[slot], 1); + if (aligned) { + tcg_gen_andi_tl(hex_vstore_addr[slot], EA, + ~((int32_t)sizeof(MMVector) - 1)); + } else { + tcg_gen_mov_tl(hex_vstore_addr[slot], EA); + } + tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); + + /* Copy the data to the vstore buffer */ + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); + /* Set the mask to all 1's */ + tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL); +} + +static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, + intptr_t bitsoff, int slot, bool invert) +{ + intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); + intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); + + tcg_gen_movi_tl(hex_vstore_pending[slot], 1); + tcg_gen_andi_tl(hex_vstore_addr[slot], EA, + ~((int32_t)sizeof(MMVector) - 1)); + tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); + + /* Copy the data to the vstore buffer */ + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); + /* Copy the mask */ + tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg)); + if (invert) { + tcg_gen_gvec_not(MO_64, maskoff, maskoff, + sizeof(MMQReg), sizeof(MMQReg)); + } +} + +static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 word = tcg_temp_new_i64(); + TCGv_i64 bits = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_temp_new_i64(); + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_i64 ones = tcg_constant_i64(~0); + + for (int i = 0; i < sizeof(MMVector) / 8; i++) { + tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8); + tcg_gen_movi_i64(mask, 0); + + for (int j = 0; j < 8; j += size) { + tcg_gen_extract_i64(word, tmp, j * 8, size * 8); + tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero); + tcg_gen_deposit_i64(mask, mask, bits, j, size); + } + + tcg_gen_st8_i64(mask, cpu_env, dstoff + i); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(word); + tcg_temp_free_i64(bits); + tcg_temp_free_i64(mask); +} + +static void probe_noshuf_load(TCGv va, int s, int mi) +{ + TCGv size = tcg_constant_tl(s); + TCGv mem_idx = tcg_constant_tl(mi); + gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx); +} + #include "tcg_funcs_generated.c.inc" #include "tcg_func_table_generated.c.inc" diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index ca201fb680..368f0b5708 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -23,6 +23,8 @@ DEF_HELPER_1(debug_start_packet, void, env) DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int) DEF_HELPER_FLAGS_3(debug_commit_end, TCG_CALL_NO_WG, void, env, int, int) DEF_HELPER_2(commit_store, void, env, int) +DEF_HELPER_3(gather_store, void, env, i32, int) +DEF_HELPER_1(commit_hvx_stores, void, env) DEF_HELPER_FLAGS_4(fcircadd, TCG_CALL_NO_RWG_SE, s32, s32, s32, s32, s32) DEF_HELPER_FLAGS_1(fbrev, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(sfrecipa, i64, env, f32, f32) @@ -89,3 +91,20 @@ DEF_HELPER_4(sffms_lib, f32, env, f32, f32, f32) DEF_HELPER_3(dfmpyfix, f64, env, f64, f64) DEF_HELPER_4(dfmpyhh, f64, env, f64, f64, f64) + +/* Histogram instructions */ +DEF_HELPER_1(vhist, void, env) +DEF_HELPER_1(vhistq, void, env) +DEF_HELPER_1(vwhist256, void, env) +DEF_HELPER_1(vwhist256q, void, env) +DEF_HELPER_1(vwhist256_sat, void, env) +DEF_HELPER_1(vwhist256q_sat, void, env) +DEF_HELPER_1(vwhist128, void, env) +DEF_HELPER_1(vwhist128q, void, env) +DEF_HELPER_2(vwhist128m, void, env, s32) +DEF_HELPER_2(vwhist128qm, void, env, s32) + +DEF_HELPER_4(probe_noshuf_load, void, env, i32, int, int) +DEF_HELPER_2(probe_pkt_scalar_store_s0, void, env, int) +DEF_HELPER_2(probe_hvx_stores, void, env, int) +DEF_HELPER_3(probe_pkt_scalar_hvx_stores, void, env, int, int) diff --git a/target/hexagon/hex_arch_types.h b/target/hexagon/hex_arch_types.h index d721e1f934..885f68f760 100644 --- a/target/hexagon/hex_arch_types.h +++ b/target/hexagon/hex_arch_types.h @@ -15,10 +15,11 @@ * along with this program; if not, see . */ -#ifndef HEXAGON_ARCH_TYPES_H -#define HEXAGON_ARCH_TYPES_H +#ifndef HEXAGON_HEX_ARCH_TYPES_H +#define HEXAGON_HEX_ARCH_TYPES_H #include "qemu/osdep.h" +#include "mmvec/mmvec.h" #include "qemu/int128.h" /* @@ -35,4 +36,8 @@ typedef uint64_t size8u_t; typedef int64_t size8s_t; typedef Int128 size16s_t; +typedef MMVector mmvector_t; +typedef MMVectorPair mmvector_pair_t; +typedef MMQReg mmqret_t; + #endif diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index b3b534057d..d9ba7df786 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -73,6 +73,9 @@ def calculate_attribs(): add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG') + add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR') + add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR') + add_qemu_macro_attrib('fSTORE', 'A_SCALAR_STORE') # Recurse down macros, find attributes from sub-macros macroValues = list(macros.values()) @@ -143,6 +146,9 @@ def compute_tag_immediates(tag): ## P predicate register ## R GPR register ## M modifier register +## Q HVX predicate vector +## V HVX vector register +## O HVX new vector register ## regid can be one of the following ## d, e destination register ## dd destination register pair @@ -178,6 +184,9 @@ def is_readwrite(regid): def is_scalar_reg(regtype): return regtype in "RPC" +def is_hvx_reg(regtype): + return regtype in "VQ" + def is_old_val(regtype, regid, tag): return regtype+regid+'V' in semdict[tag] @@ -201,6 +210,13 @@ def need_ea(tag): def skip_qemu_helper(tag): return tag in overrides.keys() +def is_tmp_result(tag): + return ('A_CVI_TMP' in attribdict[tag] or + 'A_CVI_TMP_DST' in attribdict[tag]) + +def is_new_result(tag): + return ('A_CVI_NEW' in attribdict[tag]) + def imm_name(immlett): return "%siV" % immlett diff --git a/target/hexagon/hex_regs.h b/target/hexagon/hex_regs.h index f291911eef..a63c2c0fd5 100644 --- a/target/hexagon/hex_regs.h +++ b/target/hexagon/hex_regs.h @@ -15,8 +15,8 @@ * along with this program; if not, see . */ -#ifndef HEXAGON_REGS_H -#define HEXAGON_REGS_H +#ifndef HEXAGON_HEX_REGS_H +#define HEXAGON_HEX_REGS_H enum { HEX_REG_R00 = 0, @@ -76,6 +76,7 @@ enum { /* Use reserved control registers for qemu execution counts */ HEX_REG_QEMU_PKT_CNT = 52, HEX_REG_QEMU_INSN_CNT = 53, + HEX_REG_QEMU_HVX_CNT = 54, HEX_REG_UTIMERLO = 62, HEX_REG_UTIMERHI = 63, }; diff --git a/target/hexagon/imported/allext.idef b/target/hexagon/imported/allext.idef new file mode 100644 index 0000000000..9d4b23e9de --- /dev/null +++ b/target/hexagon/imported/allext.idef @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Top level file for all instruction set extensions + */ +#define EXTNAME mmvec +#define EXTSTR "mmvec" +#include "mmvec/ext.idef" +#undef EXTNAME +#undef EXTSTR diff --git a/target/hexagon/imported/allext_macros.def b/target/hexagon/imported/allext_macros.def new file mode 100644 index 0000000000..9c91199151 --- /dev/null +++ b/target/hexagon/imported/allext_macros.def @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Top level file for all instruction set extensions + */ +#define EXTNAME mmvec +#define EXTSTR "mmvec" +#include "mmvec/macros.def" +#undef EXTNAME +#undef EXTSTR diff --git a/target/hexagon/imported/allextenc.def b/target/hexagon/imported/allextenc.def new file mode 100644 index 0000000000..39a3e93fad --- /dev/null +++ b/target/hexagon/imported/allextenc.def @@ -0,0 +1,20 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define EXTNAME mmvec +#include "mmvec/encode_ext.def" +#undef EXTNAME diff --git a/target/hexagon/imported/allidefs.def b/target/hexagon/imported/allidefs.def index 2aace29888..ee253b83a9 100644 --- a/target/hexagon/imported/allidefs.def +++ b/target/hexagon/imported/allidefs.def @@ -28,3 +28,4 @@ #include "shift.idef" #include "system.idef" #include "subinsns.idef" +#include "allext.idef" diff --git a/target/hexagon/imported/encode.def b/target/hexagon/imported/encode.def index b9368d1284..e40e7fbffb 100644 --- a/target/hexagon/imported/encode.def +++ b/target/hexagon/imported/encode.def @@ -71,6 +71,7 @@ #include "encode_pp.def" #include "encode_subinsn.def" +#include "allextenc.def" #ifdef __SELF_DEF_FIELD32 #undef __SELF_DEF_FIELD32 diff --git a/target/hexagon/imported/encode_pp.def b/target/hexagon/imported/encode_pp.def index 939c6fc55f..d71c04cd30 100644 --- a/target/hexagon/imported/encode_pp.def +++ b/target/hexagon/imported/encode_pp.def @@ -944,13 +944,6 @@ MPY_ENC(F2_dfmpyfix, "1000","ddddd","0","0","1","0","11") MPY_ENC(F2_dfmin, "1000","ddddd","0","0","1","1","11") MPY_ENC(F2_dfmax, "1000","ddddd","0","1","0","0","11") MPY_ENC(F2_dfmpyll, "1000","ddddd","0","1","0","1","11") -#ifdef ADD_DP_OPS -MPY_ENC(F2_dfdivcheat, "1000","ddddd","0","0","0","1","00") - -MPY_ENC(F2_dffixupn, "1000","ddddd","0","1","0","1","11") -MPY_ENC(F2_dffixupd, "1000","ddddd","0","1","1","0","11") -MPY_ENC(F2_dfrecipa, "1000","ddddd","0","1","1","1","ee") -#endif MPY_ENC(M7_dcmpyrw, "1000","ddddd","0","0","0","1","10") MPY_ENC(M7_dcmpyrwc, "1000","ddddd","0","0","1","1","10") @@ -1024,15 +1017,6 @@ MPY_ENC(M5_vdmacbsu, "1010","xxxxx","0","1","0","0","01") MPY_ENC(F2_dfmpylh, "1010","xxxxx","0","0","0","0","11") MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","0","1","11") -#ifdef ADD_DP_OPS -MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","1","0","11") -MPY_ENC(F2_dffma, "1010","xxxxx","0","0","0","0","11") -MPY_ENC(F2_dffms, "1010","xxxxx","0","0","0","1","11") - -MPY_ENC(F2_dffma_lib, "1010","xxxxx","0","0","1","0","11") -MPY_ENC(F2_dffms_lib, "1010","xxxxx","0","0","1","1","11") -MPY_ENC(F2_dffma_sc, "1010","xxxxx","0","1","1","1","uu") -#endif MPY_ENC(M7_dcmpyrw_acc, "1010","xxxxx","0","0","0","1","10") @@ -1547,15 +1531,8 @@ SH2_RR_ENC(F2_conv_df2d, "0000","111","0","0 00","ddddd") SH2_RR_ENC(F2_conv_df2ud, "0000","111","0","0 01","ddddd") SH2_RR_ENC(F2_conv_ud2df, "0000","111","0","0 10","ddddd") SH2_RR_ENC(F2_conv_d2df, "0000","111","0","0 11","ddddd") -#ifdef ADD_DP_OPS -SH2_RR_ENC(F2_dffixupr, "0000","111","0","1 00","ddddd") -SH2_RR_ENC(F2_dfsqrtcheat, "0000","111","0","1 01","ddddd") -#endif SH2_RR_ENC(F2_conv_df2d_chop, "0000","111","0","1 10","ddddd") SH2_RR_ENC(F2_conv_df2ud_chop,"0000","111","0","1 11","ddddd") -#ifdef ADD_DP_OPS -SH2_RR_ENC(F2_dfinvsqrta, "0000","111","1","0 ee","ddddd") -#endif diff --git a/target/hexagon/imported/ldst.idef b/target/hexagon/imported/ldst.idef index 359d3b744e..237634bdd9 100644 --- a/target/hexagon/imported/ldst.idef +++ b/target/hexagon/imported/ldst.idef @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,12 +31,12 @@ Q6INSN(L2_##TAG##_pci, OPER"(Rx32++#s4:"SHFT":circ(Mu2))",ATTRIB,DESCR,{fEA_REG( Q6INSN(L2_##TAG##_pcr, OPER"(Rx32++I:circ(Mu2))", ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRR(RxV,fREAD_IREG(MuV)<. + */ + +#define CONCAT(A,B) A##B +#define EXTEXTNAME(X) CONCAT(EXT_,X) +#define DEF_ENC(TAG,STR) DEF_EXT_ENC(TAG,EXTEXTNAME(EXTNAME),STR) + + +#ifndef NO_MMVEC +DEF_ENC(V6_extractw, ICLASS_LD" 001 0 000sssss PP0uuuuu --1ddddd") /* coproc insn, returns Rd */ +#endif + + +#ifndef NO_MMVEC + + + +DEF_CLASS32(ICLASS_NCJ" 1--- -------- PP------ --------",COPROC_VMEM) +DEF_CLASS32(ICLASS_NCJ" 1000 0-0ttttt PPi--iii ---ddddd",BaseOffset_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPivviii ---ddddd",BaseOffset_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1000 0-1ttttt PPi--iii --------",BaseOffset_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPi--iii 00------",BaseOffset_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1000 1-1ttttt PPivviii --------",BaseOffset_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 1001 0-0xxxxx PP---iii ---ddddd",PostImm_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP-vviii ---ddddd",PostImm_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1001 0-1xxxxx PP---iii --------",PostImm_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP---iii 00------",PostImm_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1001 1-1xxxxx PP-vviii --------",PostImm_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 1011 0-0xxxxx PPu----- ---ddddd",PostM_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPuvv--- ---ddddd",PostM_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1011 0-1xxxxx PPu----- --------",PostM_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPu----- 00------",PostM_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1011 1-1xxxxx PPuvv--- --------",PostM_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 110- 0------- PP------ --------",Z_Load) +DEF_CLASS32(ICLASS_NCJ" 110- 1------- PP------ --------",Z_Load_if_Pv) + +DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--0-- ---vvvvv",Gather) +DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--1-- -ssvvvvv",Gather_if_Qs) +DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv ---wwwww",Scatter) +DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv -----sss",Scatter_New) +DEF_CLASS32(ICLASS_NCJ" 1111 1--ttttt PPuvvvvv -sswwwww",Scatter_if_Qs) + + +DEF_FIELD32(ICLASS_NCJ" 1--- -!------ PP------ --------",NT,"NonTemporal") + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 000 --- ----- PP i --iii ----- ---","[#0] vmem(Rt+#s4)[:nt]") + +#define LDST_ENC(TAG,MAJ3,MID3,RREG,TINY6,MIN3,VREG) DEF_ENC(TAG, ICLASS_NCJ "1" #MAJ3 #MID3 #RREG "PP" #TINY6 #MIN3 #VREG) + +#define LDST_BO(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ai, 000,MID3,ttttt,i PRED iii,MIN3,VREG) +#define LDST_PI(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_pi, 001,MID3,xxxxx,- PRED iii,MIN3,VREG) +#define LDST_PM(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ppu,011,MID3,xxxxx,u PRED ---,MIN3,VREG) + +#define LDST_BASICLD(OP,TAGPRE) \ + OP(TAGPRE, 000,00,000,ddddd) \ + OP(TAGPRE##_nt, 010,00,000,ddddd) \ + OP(TAGPRE##_cur, 000,00,001,ddddd) \ + OP(TAGPRE##_nt_cur, 010,00,001,ddddd) \ + OP(TAGPRE##_tmp, 000,00,010,ddddd) \ + OP(TAGPRE##_nt_tmp, 010,00,010,ddddd) + +#define LDST_BASICST(OP,TAGPRE) \ + OP(TAGPRE, 001,--,000,sssss) \ + OP(TAGPRE##_nt, 011,--,000,sssss) \ + OP(TAGPRE##_new, 001,--,001,-0sss) \ + OP(TAGPRE##_srls, 001,--,001,-1---) \ + OP(TAGPRE##_nt_new, 011,--,001,--sss) \ + + +#define LDST_QPREDST(OP,TAGPRE) \ + OP(TAGPRE##_qpred, 100,vv,000,sssss) \ + OP(TAGPRE##_nt_qpred, 110,vv,000,sssss) \ + OP(TAGPRE##_nqpred, 100,vv,001,sssss) \ + OP(TAGPRE##_nt_nqpred,110,vv,001,sssss) \ + +#define LDST_CONDLD(OP,TAGPRE) \ + OP(TAGPRE##_pred, 100,vv,010,ddddd) \ + OP(TAGPRE##_nt_pred, 110,vv,010,ddddd) \ + OP(TAGPRE##_npred, 100,vv,011,ddddd) \ + OP(TAGPRE##_nt_npred, 110,vv,011,ddddd) \ + OP(TAGPRE##_cur_pred, 100,vv,100,ddddd) \ + OP(TAGPRE##_nt_cur_pred, 110,vv,100,ddddd) \ + OP(TAGPRE##_cur_npred, 100,vv,101,ddddd) \ + OP(TAGPRE##_nt_cur_npred, 110,vv,101,ddddd) \ + OP(TAGPRE##_tmp_pred, 100,vv,110,ddddd) \ + OP(TAGPRE##_nt_tmp_pred, 110,vv,110,ddddd) \ + OP(TAGPRE##_tmp_npred, 100,vv,111,ddddd) \ + OP(TAGPRE##_nt_tmp_npred, 110,vv,111,ddddd) \ + +#define LDST_PREDST(OP,TAGPRE,NT,MIN2) \ + OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,sssss) \ + OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,sssss) + +#define LDST_PREDSTNEW(OP,TAGPRE,NT,MIN2) \ + OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,NT 0 sss) \ + OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,NT 1 sss) + +// 0.0,vv,0--,sssss: pred st +#define LDST_BASICPREDST(OP,TAGPRE) \ + LDST_PREDST(OP,TAGPRE, 0,00) \ + LDST_PREDST(OP,TAGPRE##_nt, 1,00) \ + LDST_PREDSTNEW(OP,TAGPRE##_new, 0,01) \ + LDST_PREDSTNEW(OP,TAGPRE##_nt_new, 1,01) + + + +LDST_BASICLD(LDST_BO,V6_vL32b) +LDST_CONDLD(LDST_BO,V6_vL32b) +LDST_BASICLD(LDST_PI,V6_vL32b) +LDST_CONDLD(LDST_PI,V6_vL32b) +LDST_BASICLD(LDST_PM,V6_vL32b) +LDST_CONDLD(LDST_PM,V6_vL32b) + +// Loads + +LDST_BO(V6_vL32Ub,000,00,111,ddddd) +//Stores +LDST_BASICST(LDST_BO,V6_vS32b) + + +LDST_BO(V6_vS32Ub,001,--,111,sssss) + + + + +// Byte Enabled Stores +LDST_QPREDST(LDST_BO,V6_vS32b) + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_BO,V6_vS32b) + + +LDST_PREDST(LDST_BO,V6_vS32Ub,0,11) + + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 001 --- ----- PP - ----- ddddd ---","[#1] vmem(Rx++#s3)[:nt]") + +// Loads +LDST_PI(V6_vL32Ub,000,00,111,ddddd) + +//Stores +LDST_BASICST(LDST_PI,V6_vS32b) + + + +LDST_PI(V6_vS32Ub,001,--,111,sssss) + + +// Byte Enabled Stores +LDST_QPREDST(LDST_PI,V6_vS32b) + + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_PI,V6_vS32b) + + +LDST_PREDST(LDST_PI,V6_vS32Ub,0,11) + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 011 --- ----- PP - ----- ----- ---","[#3] vmem(Rx++#M)[:nt]") + +// Loads +LDST_PM(V6_vL32Ub,000,00,111,ddddd) + +//Stores +LDST_BASICST(LDST_PM,V6_vS32b) + + + +LDST_PM(V6_vS32Ub,001,--,111,sssss) + +// Byte Enabled Stores +LDST_QPREDST(LDST_PM,V6_vS32b) + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_PM,V6_vS32b) + + +LDST_PREDST(LDST_PM,V6_vS32Ub,0,11) + + + +DEF_ENC(V6_vaddcarrysat, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 0ss ddddd") // +DEF_ENC(V6_vaddcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 0ee ddddd") // +DEF_ENC(V6_vsubcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 1ee ddddd") // +DEF_ENC(V6_vsatdw, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 111 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 111 --- ----- PP - ----- ----- ---","[#6] vgather,vscatter") +DEF_ENC(V6_vgathermw, ICLASS_NCJ" 1 111 000 ttttt PP u --000 --- vvvvv") // vtmp.w=vmem(Rt32,Mu2,Vv32.w).w +DEF_ENC(V6_vgathermh, ICLASS_NCJ" 1 111 000 ttttt PP u --001 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vv32.h).h +DEF_ENC(V6_vgathermhw, ICLASS_NCJ" 1 111 000 ttttt PP u --010 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h + + +DEF_ENC(V6_vgathermwq, ICLASS_NCJ" 1 111 000 ttttt PP u --100 -ss vvvvv") // if (Qs4) vtmp.w=vmem(Rt32,Mu2,Vv32.w).w +DEF_ENC(V6_vgathermhq, ICLASS_NCJ" 1 111 000 ttttt PP u --101 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vv32.h).h +DEF_ENC(V6_vgathermhwq, ICLASS_NCJ" 1 111 000 ttttt PP u --110 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h + + + +DEF_ENC(V6_vscattermw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 000 wwwww") // vmem(Rt32,Mu2,Vv32.w)=Vw32.w +DEF_ENC(V6_vscattermh, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 001 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h +DEF_ENC(V6_vscattermhw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 010 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h + +DEF_ENC(V6_vscattermw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 100 wwwww") // vmem(Rt32,Mu2,Vv32.w) += Vw32.w +DEF_ENC(V6_vscattermh_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 101 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h +DEF_ENC(V6_vscattermhw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 110 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h + + +DEF_ENC(V6_vscattermwq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.w)=Vw32.w +DEF_ENC(V6_vscattermhq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 1ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h +DEF_ENC(V6_vscattermhwq, ICLASS_NCJ" 1 111 101 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h + + + + + +DEF_CLASS32(ICLASS_CJ" 1--- -------- PP------ --------",COPROC_VX) + + + +/*************************************************************** +* +* Group #0, Uses Q6 Rt8: new in v61 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 000 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Vv32, Rt8)") +DEF_ENC(V6_vasrhbsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vasruwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vasrwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vlutvvb_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vlutvwh_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vasruhubrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vasruwuhsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vasruhubsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 101 ddddd") // + +/*************************************************************** +* +* Group #1, Uses Q6 Rt32 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Rt32)") +DEF_ENC(V6_vtmpyb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vtmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vdmpyhb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrmpyub, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vdsaduh, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vdmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdmpybus_dv, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vdmpyhsusat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vdmpyhsuisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vdmpyhsat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vdmpyhisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vdmpyhb_dv, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpybus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpabus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpahb, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vmpyhss, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyhsrs, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpyuh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrmpybusi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 10i ddddd") // +DEF_ENC(V6_vrsadubi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 11i ddddd") // + +DEF_ENC(V6_vmpyihb, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vror, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyuhe, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpabuu, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vlut4, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 100 ddddd") // + + +DEF_ENC(V6_vasrw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vasrh, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaslw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vaslh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlsrw, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vlsrh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vlsrb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 011 ddddd") // + +DEF_ENC(V6_vmpauhb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpyiwub, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpyiwh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyiwb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_lvsplatw, ICLASS_CJ" 1 001 101 ttttt PP 0 ----0 001 ddddd") // + + + +DEF_ENC(V6_pred_scalar2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -01dd") // +DEF_ENC(V6_vandvrt, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 010 -10dd") // +DEF_ENC(V6_pred_scalar2v2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -11dd") // + +DEF_ENC(V6_vtmpyhb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vandqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --0uu 101 ddddd") // +DEF_ENC(V6_vandnqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --1uu 101 ddddd") // + +DEF_ENC(V6_vrmpyubi, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 11i ddddd") // + +DEF_ENC(V6_vmpyub, ICLASS_CJ" 1 001 110 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_lvsplath, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 001 ddddd") // +DEF_ENC(V6_lvsplatb, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 010 ddddd") // + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vx32=(Vu32, Rt32)") +DEF_ENC(V6_vtmpyb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vtmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vtmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vrmpyub_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vrmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vdmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vdmpybus_dv_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vdmpyhsusat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vdmpyhsuisat_acc,ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vdmpyhisat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhsat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vdmpyhb_dv_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpybus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpabus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpahb_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyhsat_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyuh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vmpyiwb_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyiwh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vrmpybusi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 10i xxxxx") // +DEF_ENC(V6_vrsadubi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 11i xxxxx") // + +DEF_ENC(V6_vdsaduh_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyihb_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaslw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vandqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --0uu 011 xxxxx") // +DEF_ENC(V6_vandnqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --1uu 011 xxxxx") // +DEF_ENC(V6_vandvrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 100 ---xx") // +DEF_ENC(V6_vasrw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vrmpyubi_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 11i xxxxx") // + +DEF_ENC(V6_vmpyub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyiwub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vmpauhb_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyuhe_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 011 xxxxx") +DEF_ENC(V6_vmpahhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpauhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpsuhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vasrh_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 111 xxxxx") // + + + + +DEF_ENC(V6_vinsertwr, ICLASS_CJ" 1 001 101 ttttt PP 1 ----- 001 xxxxx") + +DEF_ENC(V6_vmpabuu_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaslh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpyh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 110 xxxxx") // + + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] (Vx32, Vy32, Rt32)") +DEF_ENC(V6_vshuff, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 001 xxxxx") // +DEF_ENC(V6_vdeal, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 010 xxxxx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 010 --- ----- PP - ----- ----- ---","[#2] if (Ps) Vd=Vu") +DEF_ENC(V6_vcmov, ICLASS_CJ" 1 010 000 ----- PP - uuuuu -ss ddddd") +DEF_ENC(V6_vncmov, ICLASS_CJ" 1 010 001 ----- PP - uuuuu -ss ddddd") +DEF_ENC(V6_vnccombine, ICLASS_CJ" 1 010 010 vvvvv PP - uuuuu -ss ddddd") +DEF_ENC(V6_vccombine, ICLASS_CJ" 1 010 011 vvvvv PP - uuuuu -ss ddddd") + +DEF_ENC(V6_vrotr, ICLASS_CJ" 1 010 100 vvvvv PP 1 uuuuu 111 ddddd") +DEF_ENC(V6_vasr_into, ICLASS_CJ" 1 010 101 vvvvv PP 1 uuuuu 111 xxxxx") + +/*************************************************************** +* +* Group #3, Uses Q6 Rt8 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 011 --- ----- PP - ----- ----- ---","[#3] Vd32=(Vu32, Vv32, Rt8)") +DEF_ENC(V6_valignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlalignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vasrwh, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vasrwhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vasrwhrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vasrwuhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vasrhubsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vasrhubrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vasrhbrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 000 ddddd") // +DEF_ENC(V6_vlutvvb, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 001 ddddd") +DEF_ENC(V6_vshuffvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 011 ddddd") // +DEF_ENC(V6_vdealvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vlutvvb_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 101 xxxxx") +DEF_ENC(V6_vlutvwh, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 110 ddddd") +DEF_ENC(V6_vlutvwh_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 111 xxxxx") + + + +/*************************************************************** +* +* Group #4, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 0 ----- ----- ---","[#4] Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vrmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vrmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vrmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vdmpyhvsat, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpyhv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyuhv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vmpyhvsrs, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyhus, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpabusv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vmpyih, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vand, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vxor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vaddw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddubsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vadduhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vaddhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddwsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsubb, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsubh, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vsububsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vsubuhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsubhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsubwsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddb_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vaddh_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vaddw_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaddubsat_dv,ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vadduhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddhsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vaddwsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsubb_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsubh_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsubw_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsububsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubuhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vsubhsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vsubwsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vaddubh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vadduhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsububh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsubuhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vabsdiffub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vabsdiffh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vabsdiffuh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vabsdiffw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vavgub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vavguh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vavgh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vavgw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vnavgub, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vnavgh, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vnavgw, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vavgubrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vavguhrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vavghrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vavgwrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpabuuv, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 1 ----- ----- ---","[#4] Vx32=(Vu32, Vv32)") +DEF_ENC(V6_vrmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vrmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vrmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhvsat_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpyhv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyuhv_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyhus_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaddhw_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyowh_64_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 011 xxxxx") +DEF_ENC(V6_vmpyih_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpyiewuh_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpyowh_sacc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpyowh_rnd_sacc,ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyiewh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 000 xxxxx") // + +DEF_ENC(V6_vadduhw_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaddubh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 101 xxxxx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 100 ----- PP 1 ----- ----- ---","[#4] Qx4=(Vu32, Vv32)") +// Grouped by element size (lsbs), operation (next-lsbs) and operation (next-lsbs) +DEF_ENC(V6_veqb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 000xx") // +DEF_ENC(V6_veqh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 001xx") // +DEF_ENC(V6_veqw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 010xx") // + +DEF_ENC(V6_vgtb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 100xx") // +DEF_ENC(V6_vgth_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 101xx") // +DEF_ENC(V6_vgtw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 110xx") // + +DEF_ENC(V6_vgtub_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 000xx") // +DEF_ENC(V6_vgtuh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 001xx") // +DEF_ENC(V6_vgtuw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 010xx") // + +DEF_ENC(V6_veqb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 000xx") // +DEF_ENC(V6_veqh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 001xx") // +DEF_ENC(V6_veqw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 010xx") // + +DEF_ENC(V6_vgtb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 100xx") // +DEF_ENC(V6_vgth_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 101xx") // +DEF_ENC(V6_vgtw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 110xx") // + +DEF_ENC(V6_vgtub_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 000xx") // +DEF_ENC(V6_vgtuh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 001xx") // +DEF_ENC(V6_vgtuw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 010xx") // + +DEF_ENC(V6_veqb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 000xx") // +DEF_ENC(V6_veqh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 001xx") // +DEF_ENC(V6_veqw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 010xx") // + +DEF_ENC(V6_vgtb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 100xx") // +DEF_ENC(V6_vgth_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 101xx") // +DEF_ENC(V6_vgtw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 110xx") // + +DEF_ENC(V6_vgtub_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 000xx") // +DEF_ENC(V6_vgtuh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 001xx") // +DEF_ENC(V6_vgtuw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 010xx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 101 ----- PP 1 ----- ----- ---","[#4] Qx4,Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vaddcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 0xx ddddd") // +DEF_ENC(V6_vsubcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 1xx ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 11- ----- PP 1 ----- ----- ---","[#4] Vx32|=(Vu32, Vv32,#)") +DEF_ENC(V6_vlutvvb_oracci, ICLASS_CJ" 1 100 110 vvvvv PP 1 uuuuu iii xxxxx") // +DEF_ENC(V6_vlutvwh_oracci, ICLASS_CJ" 1 100 111 vvvvv PP 1 uuuuu iii xxxxx") // + + + +/*************************************************************** +* +* Group #5, Reserved/Deprecated. Uses Q6 Rx. Stupid FFT. +* +****************************************************************/ + + + + +/*************************************************************** +* +* Group #6, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32") +DEF_ENC(V6_vabsh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vabsh_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vabsw, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vabsw_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vnot, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vdealh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdealb, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vunpackub, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vunpackuh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vunpackb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vunpackh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vabsb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vabsb_sat, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vshuffh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vshuffb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vzb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vzh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vcl0w, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vpopcounth, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vcl0h, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 111 ddddd") // + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ---11 PP 0 ----- ----- ---","[#6] Qd4=Qt4, Qs4") +DEF_ENC(V6_pred_and, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 000dd") // +DEF_ENC(V6_pred_or, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 001dd") // +DEF_ENC(V6_pred_not, ICLASS_CJ" 1 110 --0 ---11 PP 0 ---ss 000 010dd") // +DEF_ENC(V6_pred_xor, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 011dd") // +DEF_ENC(V6_pred_or_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 100dd") // +DEF_ENC(V6_pred_and_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 101dd") // +DEF_ENC(V6_shuffeqh, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 110dd") // +DEF_ENC(V6_shuffeqw, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 111dd") // + +DEF_ENC(V6_vnormamtw, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vnormamth, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 101 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --1 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32,Vv32") +DEF_ENC(V6_vlutvvbi, ICLASS_CJ" 1 110 001 vvvvv PP 0 uuuuu iii ddddd") +DEF_ENC(V6_vlutvwhi, ICLASS_CJ" 1 110 011 vvvvv PP 0 uuuuu iii ddddd") + +DEF_ENC(V6_vaddbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 000 ddddd") +DEF_ENC(V6_vsubbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 001 ddddd") +DEF_ENC(V6_vadduwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 010 ddddd") +DEF_ENC(V6_vsubuwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 011 ddddd") +DEF_ENC(V6_vaddububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 100 ddddd") +DEF_ENC(V6_vsubububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vmpyewuh_64, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 110 ddddd") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","Vx32=Vu32") +DEF_ENC(V6_vunpackob, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vunpackoh, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 001 xxxxx") // +//DEF_ENC(V6_vunpackow, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 010 xxxxx") // + +DEF_ENC(V6_vhist, ICLASS_CJ" 1 110 --0 ---00 PP 1 -000- 100 -----") +DEF_ENC(V6_vwhist256, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0010 100 -----") +DEF_ENC(V6_vwhist256_sat, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0011 100 -----") +DEF_ENC(V6_vwhist128, ICLASS_CJ" 1 110 --0 ---00 PP 1 -010- 100 -----") +DEF_ENC(V6_vwhist128m, ICLASS_CJ" 1 110 --0 ---00 PP 1 -011i 100 -----") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","if (Qv4) Vx32=Vu32") +DEF_ENC(V6_vaddbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vaddhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaddwq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vaddbnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vaddhnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaddwnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vsubbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vsubhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vsubwq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vsubbnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vsubhnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vsubwnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 011 xxxxx") // + +DEF_ENC(V6_vhistq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --00- 100 -----") +DEF_ENC(V6_vwhist256q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --010 100 -----") +DEF_ENC(V6_vwhist256q_sat, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --011 100 -----") +DEF_ENC(V6_vwhist128q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --10- 100 -----") +DEF_ENC(V6_vwhist128qm, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --11i 100 -----") + + +DEF_ENC(V6_vandvqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 000 ddddd") +DEF_ENC(V6_vandvnqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 001 ddddd") + + +DEF_ENC(V6_vprefixqb, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --000 010 ddddd") // +DEF_ENC(V6_vprefixqh, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --001 010 ddddd") // +DEF_ENC(V6_vprefixqw, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --010 010 ddddd") // + + + + +DEF_ENC(V6_vassign, ICLASS_CJ" 1 110 --0 ---11 PP 1 uuuuu 111 ddddd") + +DEF_ENC(V6_valignbi, ICLASS_CJ" 1 110 001 vvvvv PP 1 uuuuu iii ddddd") +DEF_ENC(V6_vlalignbi, ICLASS_CJ" 1 110 011 vvvvv PP 1 uuuuu iii ddddd") +DEF_ENC(V6_vswap, ICLASS_CJ" 1 110 101 vvvvv PP 1 uuuuu -tt ddddd") // +DEF_ENC(V6_vmux, ICLASS_CJ" 1 110 111 vvvvv PP 1 uuuuu -tt ddddd") // + + + +/*************************************************************** +* +* Group #7, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 --- ----- PP 0 ----- ----- ---","[#7] Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vaddbsat, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vminub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vminuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vminh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vminw, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmaxub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmaxuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmaxh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vaddclbh, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddclbw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 001 ddddd") // + +DEF_ENC(V6_vavguw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 010 ddddd") // +DEF_ENC(V6_vavguwrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 011 ddddd") // +DEF_ENC(V6_vavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vavgbrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 101 ddddd") // +DEF_ENC(V6_vnavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 110 ddddd") // + + +DEF_ENC(V6_vmaxw, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsubbsat, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vminb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmaxb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsatuwuh, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdealb4w, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vmpyowh_rnd, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vshuffeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vshuffob, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vshufeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vshufoh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vshufoeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vshufoeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vcombine, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyieoh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vadduwsat, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsathub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsatwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vroundwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 100 ddddd") +DEF_ENC(V6_vroundwuh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vroundhb, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 110 ddddd") +DEF_ENC(V6_vroundhub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 111 ddddd") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 100 ----- PP - ----- ----- ---","[#7] Qd4=(Vu32, Vv32)") +DEF_ENC(V6_veqb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 000dd") // +DEF_ENC(V6_veqh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 001dd") // +DEF_ENC(V6_veqw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 010dd") // + +DEF_ENC(V6_vgtb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 100dd") // +DEF_ENC(V6_vgth, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 101dd") // +DEF_ENC(V6_vgtw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 110dd") // + +DEF_ENC(V6_vgtub, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 000dd") // +DEF_ENC(V6_vgtuh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 001dd") // +DEF_ENC(V6_vgtuw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 010dd") // + + +DEF_ENC(V6_vasrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlsrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vlsrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vasrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaslwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vaslhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vaddb, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaddh, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vmpyiewuh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 000 ddddd") +DEF_ENC(V6_vmpyiowh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 001 ddddd") +DEF_ENC(V6_vpackeb, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vpackeh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsubuwsat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vpackhub_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vpackhb_sat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vpackwuh_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vpackwh_sat, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vpackob, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vpackoh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrounduhub, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrounduwuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpyewuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vmpyowh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 111 ddddd") + + +#endif /* NO MMVEC */ diff --git a/target/hexagon/imported/mmvec/ext.idef b/target/hexagon/imported/mmvec/ext.idef new file mode 100644 index 0000000000..8ca5a606e1 --- /dev/null +++ b/target/hexagon/imported/mmvec/ext.idef @@ -0,0 +1,2606 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/****************************************************************************** + * + * HOYA: MULTI MEDIA INSTRUCITONS + * + ******************************************************************************/ + +#ifndef EXTINSN +#define EXTINSN Q6INSN +#define __SELF_DEF_EXTINSN 1 +#endif + +#ifndef NO_MMVEC + +#define DO_FOR_EACH_CODE(WIDTH, CODE) \ +{ \ + fHIDE(int i;) \ + fVFOREACH(WIDTH, i) {\ + CODE ;\ + } \ +} + + + + +#define ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + + +#define ITERATOR_INSN2_ANY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + +#define ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + + +#define ITERATOR_INSN_SHIFT_SLOT_VV_LATE(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_SHIFT_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), + + +#define ITERATOR_INSN2_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_MPY_SLOT(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN2_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + + + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC2(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_CVI_VX_VSRC0_IS_DST), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_SLOT2_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_RESTRICT_SLOT2ONLY), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_VHISTLIKE(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), \ +DESCR, fHIDE(mmvector_t input;) input = fTMPVDATA(); DO_FOR_EACH_CODE(WIDTH, CODE)) + + + + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS - NO NAPALI V1 +* +*******************************************************************************************/ + + + +#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + + +#define ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_MPY_SLOT_NOV1(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOTT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), + + +#define ITERATOR_INSN2_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DEP_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define NARROWING_SHIFT_NOV1(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \ +ITERATOR_INSN_SHIFT_SLOT_NOV1(ITERSIZE,TAG, \ +"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \ +"Vector shift right and shuffle", \ + fHIDE(int )shamt = RtV & SHAMTMASK; \ + DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \ + DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt))) + +#define MMVEC_AVGS_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) + + #define MMVEC_AVGU_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS +* +*******************************************************************************************/ + +#define MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,BEH) \ +EXTINSN(V6_##TAG##_pi, SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_I(RxV,VEC_SCALE(siV)); }) \ +EXTINSN(V6_##TAG##_ai, SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_RI(RtV,VEC_SCALE(siV)); BEH;}) \ +EXTINSN(V6_##TAG##_ppu, SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_M(RxV,MuV); }) \ + + +#define MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +EXTINSN(V6_##TAG##_pred_pi, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_pred_ai, "if (" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_pred_ppu, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}}) \ + +#define MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +EXTINSN(V6_##TAG##_npred_pi, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_npred_ai, "if (!" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLDNOT(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_npred_ppu, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}}) + +#define MMVEC_COND_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) + + +#define VEC_SCALE(X) X*fVECSIZE() + + +#define MMVEC_LD(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDC(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_cur,DESCR,ATTRIB,NT,"Vd32.cur=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDT(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_tmp,DESCR,ATTRIB,NT,"Vd32.tmp=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDU(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmemu","",fLOADMMVU(EA,VdV)) + + +#define MMVEC_STQ(TAG,DESCR,ATTRIB,NT) \ +MMVEC_EACH_EA(TAG##_qpred,DESCR,ATTRIB,NT,"if (Qv4) vmem","=Vs32",fSTOREMMVQ(EA,VsV,QvV)) \ +MMVEC_EACH_EA(TAG##_nqpred,DESCR,ATTRIB,NT,"if (!Qv4) vmem","=Vs32",fSTOREMMVNQ(EA,VsV,QvV)) + +/**************************************************************** +* MAPPING FOR VMEMs +****************************************************************/ + +#define ATTR_VMEM A_EXTENSION,A_CVI,A_CVI_VM +#define ATTR_VMEMU A_EXTENSION,A_CVI,A_CVI_VM,A_CVI_VP + + +MMVEC_LD(vL32b, "Aligned Vector Load", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),) +MMVEC_LDC(vL32b, "Aligned Vector Load Cur", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_NEW,A_CVI_VA),) +MMVEC_LDT(vL32b, "Aligned Vector Load Tmp", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),) + +MMVEC_COND_EACH_EA(vL32b,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),,"Vd32=vmem",,Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",,Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),,"Vd32.tmp=vmem",,Pv,fLOADMMV(EA,VdV);) + +MMVEC_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",fSTOREMMV(EA,VsV)) +MMVEC_COND_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",Pv,fSTOREMMV(EA,VsV)) + + +MMVEC_STQ(vS32b, "Aligned Vector Store", ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),) + +MMVEC_LDU(vL32Ub, "Unaligned Vector Load", ATTRIBS(ATTR_VMEMU,A_LOAD,A_RESTRICT_NOSLOT1),) + +MMVEC_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",fSTOREMMVU(EA,VsV)) + +MMVEC_COND_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",Pv,fSTOREMMVU(EA,VsV)) + +MMVEC_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN))) + +// V65 store relase, zero byte store +MMVEC_EACH_EA(vS32b_srls,"Aligned Vector Scatter Release",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_SCATTER_RELEASE,A_CVI_NEW,A_RESTRICT_SLOT0ONLY),,"vmem",":scatter_release",fSTORERELEASE(EA,0)) + + + +MMVEC_COND_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN))) + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS - NON TEMPORAL +* +*******************************************************************************************/ + +#define ATTR_VMEM_NT A_EXTENSION,A_CVI,A_CVI_VM + +MMVEC_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",fSTOREMMV(EA,VsV)) +MMVEC_COND_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",Pv,fSTOREMMV(EA,VsV)) + +MMVEC_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN))) +MMVEC_COND_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN))) + + +MMVEC_STQ(vS32b_nt, "Aligned Vector Store - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt") + +MMVEC_LD(vL32b_nt, "Aligned Vector Load - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_VA),":nt") +MMVEC_LDC(vL32b_nt, "Aligned Vector Load Cur - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_NEW,A_CVI_VA),":nt") +MMVEC_LDT(vL32b_nt, "Aligned Vector Load Tmp - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_TMP),":nt") + +MMVEC_COND_EACH_EA(vL32b_nt,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA),,"Vd32=vmem",":nt",Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_nt_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",":nt",Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_nt_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM_NT,A_CVI_TMP),,"Vd32.tmp=vmem",":nt",Pv,fLOADMMV(EA,VdV);) + + +#undef VEC_SCALE + + +/*************************************************** + * Vector Alignment + ************************************************/ + +#define VALIGNB(SHIFT) \ + fHIDE(int i;) \ + for(i = 0; i < fVBYTES(); i++) {\ + VdV.ub[i] = (i+SHIFT>=fVBYTES()) ? VuV.ub[i+SHIFT-fVBYTES()] : VvV.ub[i+SHIFT];\ + } + +EXTINSN(V6_valignb, "Vd32=valign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control", +{ + unsigned shift = RtV & (fVBYTES()-1); + VALIGNB(shift) +}) +EXTINSN(V6_vlalignb, "Vd32=vlalign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control", +{ + unsigned shift = fVBYTES() - (RtV & (fVBYTES()-1)); + VALIGNB(shift) +}) +EXTINSN(V6_valignbi, "Vd32=valign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control", +{ + VALIGNB(uiV) +}) +EXTINSN(V6_vlalignbi,"Vd32=vlalign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control", +{ + unsigned shift = fVBYTES() - uiV; + VALIGNB(shift) +}) + +EXTINSN(V6_vror, "Vd32=vror(Vu32,Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), +"Align Two vectors by Rt32 as control", +{ + fHIDE(int k;) + for (k=0;k> (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE, "Vd32=vasl" #TYPE "(Vu32,Rt32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Rt32)", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = (VuV.TYPE[i] << (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE, "Vd32=vlsr" #TYPE "(Vu32,Rt32)","Vd32.u"#TYPE"=vlsr(Vu32.u"#TYPE",Rt32)", "Vector logical shift right " DESC, VdV.u##TYPE[i] = (VuV.u##TYPE[i] >> (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasr##TYPE##v,"Vd32=vasr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasr(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift right " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTR(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE##v,"Vd32=vasl" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTL(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE##v,"Vd32=vlsr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vlsr(Vu32."#TYPE",Vv32."#TYPE")", "Vector logical shift right " DESC, VdV.u##TYPE[i] = fBIDIR_LSHIFTR(VuV.u##TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ + +V_SHIFT(w, "word", 32,5,4_4) +V_SHIFT(h, "halfword", 16,4,2_2) + +ITERATOR_INSN_SHIFT_SLOT(8,vlsrb,"Vd32.ub=vlsr(Vu32.ub,Rt32)","vec log shift right bytes", VdV.b[i] = VuV.ub[i] >> (RtV & 0x7)) + +ITERATOR_INSN2_SHIFT_SLOT(32,vrotr,"Vd32=vrotr(Vu32,Vv32)","Vd32.uw=vrotr(Vu32.uw,Vv32.uw)","Vector word rotate right", VdV.uw[i] = ((VuV.uw[i] >> (VvV.uw[i] & 0x1f)) | (VuV.uw[i] << (32 - (VvV.uw[i] & 0x1f))))) + +/********************************************************************* + * MMVECTOR SHIFT AND PERMUTE + * ******************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(32,vasr_into,"Vxx32=vasrinto(Vu32,Vv32)","Vxx32.w=vasrinto(Vu32.w,Vv32.w)","ASR vector 1 elements and overlay dropping bits to MSB of vector 2 elements", + fHIDE(int64_t ) shift = (fSE32_64(VuV.w[i]) << 32); + fHIDE(int64_t ) mask = (((fSE32_64(VxxV.v[0].w[i])) << 32) | fZE32_64(VxxV.v[0].w[i])); + fHIDE(int64_t) lomask = (((fSE32_64(1)) << 32) - 1); + fHIDE(int ) count = -(0x40 & VvV.w[i]) + (VvV.w[i] & 0x3f); + fHIDE(int64_t ) result = (count == -0x40) ? 0 : (((count < 0) ? ((shift << -(count)) | (mask & (lomask << -(count)))) : ((shift >> count) | (mask & (lomask >> count))))); + VxxV.v[1].w[i] = ((result >> 32) & 0xffffffff); + VxxV.v[0].w[i] = (result & 0xffffffff)) + +#define NEW_NARROWING_SHIFT 1 + +#if NEW_NARROWING_SHIFT +#define NARROWING_SHIFT(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \ +ITERATOR_INSN_SHIFT_SLOT(ITERSIZE,TAG, \ +"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \ +"Vector shift right and shuffle", \ + fHIDE(int )shamt = RtV & SHAMTMASK; \ + DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \ + DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt))) + + + + + +/* WORD TO HALF*/ + +NARROWING_SHIFT(32,vasrwh,fSETHALF,h,w,,fECHO,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasrwhsat,fSETHALF,h,w,:sat,fVSATH,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasrwhrndsat,fSETHALF,h,w,:rnd:sat,fVSATH,fVROUND,0xF) +NARROWING_SHIFT(32,vasrwuhrndsat,fSETHALF,uh,w,:rnd:sat,fVSATUH,fVROUND,0xF) +NARROWING_SHIFT(32,vasrwuhsat,fSETHALF,uh,w,:sat,fVSATUH,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasruwuhrndsat,fSETHALF,uh,uw,:rnd:sat,fVSATUH,fVROUND,0xF) + +NARROWING_SHIFT_NOV1(32,vasruwuhsat,fSETHALF,uh,uw,:sat,fVSATUH,fVNOROUND,0xF) +NARROWING_SHIFT(16,vasrhubsat,fSETBYTE,ub,h,:sat,fVSATUB,fVNOROUND,0x7) +NARROWING_SHIFT(16,vasrhubrndsat,fSETBYTE,ub,h,:rnd:sat,fVSATUB,fVROUND,0x7) +NARROWING_SHIFT(16,vasrhbsat,fSETBYTE,b,h,:sat,fVSATB,fVNOROUND,0x7) +NARROWING_SHIFT(16,vasrhbrndsat,fSETBYTE,b,h,:rnd:sat,fVSATB,fVROUND,0x7) + +NARROWING_SHIFT_NOV1(16,vasruhubsat,fSETBYTE,ub,uh,:sat,fVSATUB,fVNOROUND,0x7) +NARROWING_SHIFT_NOV1(16,vasruhubrndsat,fSETBYTE,ub,uh,:rnd:sat,fVSATUB,fVROUND,0x7) + +#else +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwh,"Vd32=vasrwh(Vu32,Vv32,Rt8)","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0,VdV.w[i], (VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1,VdV.w[i], (VuV.w[i] >> (RtV & 0xF)))) + + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0,VdV.w[i], fVSATH(VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1,VdV.w[i], fVSATH(VuV.w[i] >> (RtV & 0xF)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhrndsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):rnd:sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhrndsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATUH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATUH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH(VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1, VdV.uw[i], fVSATUH(VuV.w[i] >> (RtV & 0xF)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasruwuhrndsat,"Vd32=vasruwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATUH( (VvV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATUH( (VuV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) +#endif + + + +ITERATOR_INSN2_SHIFT_SLOT(32,vroundwh,"Vd32=vroundwh(Vu32,Vv32):sat","Vd32.h=vround(Vu32.w,Vv32.w):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATH((VvV.w[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATH((VuV.w[i] + fCONSTLL(0x8000)) >> 16))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vroundwuh,"Vd32=vroundwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.w,Vv32.w):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH((VvV.w[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATUH((VuV.w[i] + fCONSTLL(0x8000)) >> 16))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vrounduwuh,"Vd32=vrounduwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.uw,Vv32.uw):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH((VvV.uw[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATUH((VuV.uw[i] + fCONSTLL(0x8000)) >> 16))) + + + + + +/* HALF TO BYTE*/ + +ITERATOR_INSN2_SHIFT_SLOT(16,vroundhb,"Vd32=vroundhb(Vu32,Vv32):sat","Vd32.b=vround(Vu32.h,Vv32.h):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATB((VvV.h[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATB((VuV.h[i] + 0x80) >> 8))) + +ITERATOR_INSN2_SHIFT_SLOT(16,vroundhub,"Vd32=vroundhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.h,Vv32.h):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.h[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.h[i] + 0x80) >> 8))) + +ITERATOR_INSN2_SHIFT_SLOT(16,vrounduhub,"Vd32=vrounduhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.uh,Vv32.uh):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.uh[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.uh[i] + 0x80) >> 8))) + + +ITERATOR_INSN2_SHIFT_SLOT(32,vaslw_acc,"Vx32+=vaslw(Vu32,Rt32)","Vx32.w+=vasl(Vu32.w,Rt32)", +"Vector shift add word", + VxV.w[i] += (VuV.w[i] << (RtV & (32-1)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrw_acc,"Vx32+=vasrw(Vu32,Rt32)","Vx32.w+=vasr(Vu32.w,Rt32)", +"Vector shift add word", + VxV.w[i] += (VuV.w[i] >> (RtV & (32-1)))) + +ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vaslh_acc,"Vx32+=vaslh(Vu32,Rt32)","Vx32.h+=vasl(Vu32.h,Rt32)", +"Vector shift add halfword", + VxV.h[i] += (VuV.h[i] << (RtV & (16-1)))) + +ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vasrh_acc,"Vx32+=vasrh(Vu32,Rt32)","Vx32.h+=vasr(Vu32.h,Rt32)", +"Vector shift add halfword", + VxV.h[i] += (VuV.h[i] >> (RtV & (16-1)))) + +/************************************************************************** +* +* MMVECTOR ELEMENT-WISE ARITHMETIC +* +**************************************************************************/ + +/************************************************************************** +* MACROS GO IN MACROS.DEF NOT HERE!!! +**************************************************************************/ + + +#define MMVEC_ABSDIFF(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_MPY_SLOT(WIDTH, vabsdiff##TYPE, "Vd32=vabsdiff"TYPE2"(Vu32,Vv32)" ,"Vd32."#DEST"=vabsdiff(Vu32."#SRC",Vv32."#SRC")" , "Vector Absolute of Difference "DESCR, VdV.DEST[i] = (VuV.SRC[i] > VvV.SRC[i]) ? (VuV.SRC[i] - VvV.SRC[i]) : (VvV.SRC[i] - VuV.SRC[i])) + +#define MMVEC_ADDU_SAT(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\ + +#define MMVEC_ADDS_SAT(TYPE,TYPE2,DESCR, WIDTH,DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\ + +#define MMVEC_AVGU(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + +#define MMVEC_AVGS(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + + + + + +#define MMVEC_ADDWRAP(TYPE,TYPE2, DESCR, WIDTH , DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE, "Vd32=vadd"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC")", "Vector Add "DESCR, VdV.DEST[i] = VuV.SRC[i] + VvV.SRC[i])\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE, "Vd32=vsub"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC")", "Vector Sub "DESCR, VdV.DEST[i] = VuV.SRC[i] - VvV.SRC[i])\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Add "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] + VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] + VvvV.v[1].SRC[i])\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Sub "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] - VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] - VvvV.v[1].SRC[i]) \ + + + + + +/* Wrapping Adds */ +MMVEC_ADDWRAP(b, "b", "Byte", 8, b, b) +MMVEC_ADDWRAP(h, "h", "Halfword", 16, h, h) +MMVEC_ADDWRAP(w, "w", "Word", 32, w, w) + +/* Saturating Adds */ +MMVEC_ADDU_SAT(ub, "ub", "Unsigned Byte", 8, ub, ub) +MMVEC_ADDU_SAT(uh, "uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_ADDU_SAT(uw, "uw", "Unsigned word", 32, uw, uw) +MMVEC_ADDS_SAT(b, "b", "byte", 8, b, b) +MMVEC_ADDS_SAT(h, "h", "Halfword", 16, h, h) +MMVEC_ADDS_SAT(w, "w", "Word", 32, w, w) + + +/* Averaging Instructions */ +MMVEC_AVGU(ub,"ub", "Unsigned Byte", 8, ub, ub) +MMVEC_AVGU(uh,"uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_AVGU_NOV1(uw,"uw", "Unsigned Word", 32, uw, uw) +MMVEC_AVGS_NOV1(b, "b", "Byte", 8, b, b) +MMVEC_AVGS(h, "h", "Halfword", 16, h, h) +MMVEC_AVGS(w, "w", "Word", 32, w, w) + + +/* Absolute Difference */ +MMVEC_ABSDIFF(ub,"ub", "Unsigned Byte", 8, ub, ub) +MMVEC_ABSDIFF(uh,"uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_ABSDIFF(h,"h", "Halfword", 16, uh, h) +MMVEC_ABSDIFF(w,"w", "Word", 32, uw, w) + +ITERATOR_INSN2_ANY_SLOT(8,vnavgub, "Vd32=vnavgub(Vu32,Vv32)", "Vd32.b=vnavg(Vu32.ub,Vv32.ub)", +"Vector Negative Average Unsigned Byte", VdV.b[i] = fVNAVGU(8, VuV.ub[i], VvV.ub[i])) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarrysat,"Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat","add w/carry and saturate", +VdV.w[i] = fVSATW(VuV.w[i]+VvV.w[i]+fGETQBIT(QsV,i*4))) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarry,"Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry","add w/carry", +VdV.w[i] = VuV.w[i]+VvV.w[i]+fGETQBIT(QxV,i*4); +fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],fGETQBIT(QxV,i*4)))) + +ITERATOR_INSN_ANY_SLOT(32,vsubcarry,"Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry","add w/carry", +VdV.w[i] = VuV.w[i]+~VvV.w[i]+fGETQBIT(QxV,i*4); +fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],fGETQBIT(QxV,i*4)))) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarryo,"Vd32.w,Qe4=vadd(Vu32.w,Vv32.w):carry","add w/carry out-only", +VdV.w[i] = VuV.w[i]+VvV.w[i]; +fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],0))) + +ITERATOR_INSN_ANY_SLOT(32,vsubcarryo,"Vd32.w,Qe4=vsub(Vu32.w,Vv32.w):carry","subtract w/carry out-only", +VdV.w[i] = VuV.w[i]+~VvV.w[i]+1; +fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],1))) + + +ITERATOR_INSN_ANY_SLOT(32,vsatdw,"Vd32.w=vsatdw(Vu32.w,Vv32.w)","Saturate from 64-bits (higher 32-bits come from first vector) to 32-bits",VdV.w[i] = fVSATDW(VuV.w[i],VvV.w[i])) + + +#define MMVEC_ADDSAT_MIX(TAGEND,SATF,WIDTH,DEST,SRC1,SRC2)\ +ITERATOR_INSN_ANY_SLOT(WIDTH, vadd##TAGEND,"Vd32."#DEST"=vadd(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Add mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] + VvV.SRC2[i]))\ +ITERATOR_INSN_ANY_SLOT(WIDTH, vsub##TAGEND,"Vd32."#DEST"=vsub(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Sub mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] - VvV.SRC2[i]))\ + +MMVEC_ADDSAT_MIX(ububb_sat,fVSATUB,8,ub,ub,b) + +/**************************** +* WIDENING +****************************/ + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh,"Vdd32=vaddub(Vu32,Vv32)","Vdd32.h=vadd(Vu32.ub,Vv32.ub)", +"Vector addition with widen into two vectors", + VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) + fZE8_16(fGETUBYTE(0, VvV.uh[i])); + VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) + fZE8_16(fGETUBYTE(1, VvV.uh[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vsububh,"Vdd32=vsubub(Vu32,Vv32)","Vdd32.h=vsub(Vu32.ub,Vv32.ub)", +"Vector subtraction with widen into two vectors", + VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) - fZE8_16(fGETUBYTE(0, VvV.uh[i])); + VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) - fZE8_16(fGETUBYTE(1, VvV.uh[i]))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw,"Vdd32=vaddh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.h,Vv32.h)", +"Vector addition with widen into two vectors", + VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]); + VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubhw,"Vdd32=vsubh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.h,Vv32.h)", +"Vector subtraction with widen into two vectors", + VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) - fGETHALF(0, VvV.w[i]); + VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) - fGETHALF(1, VvV.w[i])) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw,"Vdd32=vadduh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.uh,Vv32.uh)", +"Vector addition with widen into two vectors", + VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) + fZE16_32(fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) + fZE16_32(fGETUHALF(1, VvV.uw[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubuhw,"Vdd32=vsubuh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.uh,Vv32.uh)", +"Vector subtraction with widen into two vectors", + VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) - fZE16_32(fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) - fZE16_32(fGETUHALF(1, VvV.uw[i]))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw_acc,"Vxx32+=vaddh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.h,Vv32.h)", +"Vector addition with widen into two vectors", + VxxV.v[0].w[i] += fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]); + VxxV.v[1].w[i] += fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw_acc,"Vxx32+=vadduh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.uh,Vv32.uh)", +"Vector addition with widen into two vectors", + VxxV.v[0].w[i] += fGETUHALF(0, VuV.w[i]) + fGETUHALF(0, VvV.w[i]); + VxxV.v[1].w[i] += fGETUHALF(1, VuV.w[i]) + fGETUHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh_acc,"Vxx32+=vaddub(Vu32,Vv32)","Vxx32.h+=vadd(Vu32.ub,Vv32.ub)", +"Vector addition with widen into two vectors", + VxxV.v[0].h[i] += fGETUBYTE(0, VuV.h[i]) + fGETUBYTE(0, VvV.h[i]); + VxxV.v[1].h[i] += fGETUBYTE(1, VuV.h[i]) + fGETUBYTE(1, VvV.h[i])) + + +/**************************** +* Conditional +****************************/ + +#define CONDADDSUB(WIDTH,TAGEND,LHSYN,RHSYN,DESCR,LHBEH,RHBEH) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH+RHBEH,LHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH-RHBEH,LHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (!Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH+RHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (!Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH-RHBEH)) \ + +CONDADDSUB(8,b,"Vx32.b","Vu32.b","Conditional add/sub Byte",VxV.ub[i],VuV.ub[i]) +CONDADDSUB(16,h,"Vx32.h","Vu32.h","Conditional add/sub Half",VxV.h[i],VuV.h[i]) +CONDADDSUB(32,w,"Vx32.w","Vu32.w","Conditional add/sub Word",VxV.w[i],VuV.w[i]) + +/***************************************************** + ABSOLUTE VALUES +*****************************************************/ +// V65 +ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb, "Vd32=vabsb(Vu32)", "Vd32.b=vabs(Vu32.b)", "Vector absolute value of bytes", VdV.b[i] = fABS(VuV.b[i])) +ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb_sat, "Vd32=vabsb(Vu32):sat", "Vd32.b=vabs(Vu32.b):sat", "Vector absolute value of bytes", VdV.b[i] = fVSATB(fABS(fSE8_16(VuV.b[i])))) + + +ITERATOR_INSN2_ANY_SLOT(16,vabsh, "Vd32=vabsh(Vu32)", "Vd32.h=vabs(Vu32.h)", "Vector absolute value of halfwords", VdV.h[i] = fABS(VuV.h[i])) +ITERATOR_INSN2_ANY_SLOT(16,vabsh_sat, "Vd32=vabsh(Vu32):sat", "Vd32.h=vabs(Vu32.h):sat", "Vector absolute value of halfwords", VdV.h[i] = fVSATH(fABS(fSE16_32(VuV.h[i])))) +ITERATOR_INSN2_ANY_SLOT(32,vabsw, "Vd32=vabsw(Vu32)", "Vd32.w=vabs(Vu32.w)", "Vector absolute value of words", VdV.w[i] = fABS(VuV.w[i])) +ITERATOR_INSN2_ANY_SLOT(32,vabsw_sat, "Vd32=vabsw(Vu32):sat", "Vd32.w=vabs(Vu32.w):sat", "Vector absolute value of words", VdV.w[i] = fVSATW(fABS(fSE32_64(VuV.w[i])))) + + +/************************************************************************** + * MMVECTOR MULTIPLICATIONS + * ************************************************************************/ + + +/* Byte by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv,"Vdd32=vmpyb(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.b,Vv32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i])); + VddV.v[1].h[i] = fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv_acc,"Vxx32+=vmpyb(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.b,Vv32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i])); + VxxV.v[1].h[i] += fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i]))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv,"Vdd32=vmpyub(Vu32,Vv32)","Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)", +"Vector absolute value of words", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) ); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) )) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv_acc,"Vxx32+=vmpyub(Vu32,Vv32)","Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)", +"Vector absolute value of words", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) ); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) )) + + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv,"Vdd32=vmpybus(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.ub,Vv32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i])); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv_acc,"Vxx32+=vmpybus(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.ub,Vv32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i])); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i]))) + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabusv,"Vdd32=vmpabus(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(0, VvvV.v[1].uh[i])); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(1, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(1, VvvV.v[1].uh[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabuuv,"Vdd32=vmpabuu(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(0, VvvV.v[1].uh[i])); + VddV.v[1].h[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(1, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(1, VvvV.v[1].uh[i]))) + + + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv,"Vdd32=vmpyh(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i])); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv_acc,"Vxx32+=vmpyh(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i])); + VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv,"Vdd32=vmpyuh(Vu32,Vv32)","Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)", +"Vector by Vector Unsigned Halfword Multiply", + VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i])); + VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv_acc,"Vxx32+=vmpyuh(Vu32,Vv32)","Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)", +"Vector by Vector Unsigned Halfword Multiply", + VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i])); + VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i]))) + + + +/* Vector by Vector */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyhvsrs,"Vd32=vmpyh(Vu32,Vv32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat", +"Vector halfword multiply with round, shift, and sat16", + VdV.h[i] = fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(VuV.h[i],VvV.h[i] )<<1)))))) + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus, "Vdd32=vmpyhus(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.uh)", +"Vector by Vector Halfword Multiply", + VddV.v[0].w[i] = fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i]))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus_acc, "Vxx32+=vmpyhus(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.uh)", +"Vector by Vector Halfword Multiply", + VxxV.v[0].w[i] += fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i])); + VxxV.v[1].w[i] += fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i]))) + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih,"Vd32=vmpyih(Vu32,Vv32)","Vd32.h=vmpyi(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VdV.h[i] = fMPY16SS(VuV.h[i], VvV.h[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih_acc,"Vx32+=vmpyih(Vu32,Vv32)","Vx32.h+=vmpyi(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VxV.h[i] += fMPY16SS(VuV.h[i], VvV.h[i])) + + + +/* 32x32 high half / frac */ + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh,"Vd32=vmpyewuh(Vu32,Vv32)","Vd32.w=vmpye(Vu32.w,Vv32.uh)", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) >> 16) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh,"Vd32=vmpyowh(Vu32,Vv32):<<1:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 0) >> 1))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd,"Vd32=vmpyowh(Vu32,Vv32):<<1:rnd:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 1) >> 1))) + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh_64,"Vdd32=vmpye(Vu32.w,Vv32.uh)", +"Word times Halfword Multiply, 64-bit result", + fHIDE(size8s_t prod;) + prod = fMPY32SU(VuV.w[i],fGETUHALF(0,VvV.w[i])); + VddV.v[1].w[i] = prod >> 16; + VddV.v[0].w[i] = prod << 16) + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_64_acc,"Vxx32+=vmpyo(Vu32.w,Vv32.h)", +"Word times Halfword Multiply, 64-bit result", + fHIDE(size8s_t prod;) + prod = fMPY32SS(VuV.w[i],fGETHALF(1,VvV.w[i])) + fSE32_64(VxxV.v[1].w[i]); + VxxV.v[1].w[i] = prod >> 16; + fSETHALF(0, VxxV.v[0].w[i], VxxV.v[0].w[i] >> 16); + fSETHALF(1, VxxV.v[0].w[i], prod & 0x0000ffff)) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift", +"Vector by Vector Halfword Multiply", +IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 0) >> 1))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:rnd:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift", +"Vector by Vector Halfword Multiply", +IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 1) >> 1))) + +/* For 32x32 integer / low half */ + +ITERATOR_INSN_MPY_SLOT(32,vmpyieoh,"Vd32.w=vmpyieo(Vu32.h,Vv32.h)","Odd/Even multiply for 32x32 low half", + VdV.w[i] = (fGETHALF(0,VuV.w[i])*fGETHALF(1,VvV.w[i])) << 16) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh,"Vd32=vmpyiewuh(Vu32,Vv32)","Vd32.w=vmpyie(Vu32.w,Vv32.uh)", +"Vector by Vector Word by Halfword Multiply", +IV1DEAD() VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) ) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiowh,"Vd32=vmpyiowh(Vu32,Vv32)","Vd32.w=vmpyio(Vu32.w,Vv32.h)", +"Vector by Vector Word by Halfword Multiply", +IV1DEAD() VdV.w[i] = fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) ) + +/* Add back these... */ + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewh_acc,"Vx32+=vmpyiewh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.h)", +"Vector by Vector Word by Halfword Multiply", +VxV.w[i] = VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(0, VvV.w[i])) ) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh_acc,"Vx32+=vmpyiewuh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.uh)", +"Vector by Vector Word by Halfword Multiply", +VxV.w[i] = VxV.w[i] + fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) ) + + + + + + + +/* Vector by Scalar */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub,"Vdd32=vmpyub(Vu32,Rt32)","Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)", +"Vector absolute value of words", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV)); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub_acc,"Vxx32+=vmpyub(Vu32,Rt32)","Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)", +"Vector absolute value of words", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV)); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus,"Vdd32=vmpybus(Vu32,Rt32)","Vdd32.h=vmpy(Vu32.ub,Rt32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV)); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus_acc,"Vxx32+=vmpybus(Vu32,Rt32)","Vxx32.h+=vmpy(Vu32.ub,Rt32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV)); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus,"Vdd32=vmpabus(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV)); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus_acc,"Vxx32+=vmpabus(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV)); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV))) + +// V65 + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu,"Vdd32=vmpabuu(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)", +"Vertical Byte Multiply", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV)); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu_acc,"Vxx32+=vmpabuu(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)", +"Vertical Byte Multiply", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV)); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV))) + + + + +/* Half by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb,"Vdd32=vmpahb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.h,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb_acc,"Vxx32+=vmpahb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.h,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +/* Half by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb,"Vdd32=vmpauhb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.uh,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].w[i] = fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VddV.v[1].w[i] = fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb_acc,"Vxx32+=vmpauhb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].w[i] += fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VxxV.v[1].w[i] += fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + + + + + + + +/* Half by Half */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyh,"Vdd32=vmpyh(Vu32,Rt32)","Vdd32.w=vmpy(Vu32.h,Rt32.h)", +"Vector absolute value of words", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV)); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(32,vmpyh_acc,"Vxx32+=vmpyh(Vu32,Rt32)","Vxx32.w+=vmpy(Vu32.h,Rt32.h)", +"Vector even halfwords with scalar lower halfword multiply with shift and sat32", + VxxV.v[0].w[i] = fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV)); + VxxV.v[1].w[i] = fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsat_acc,"Vxx32+=vmpyh(Vu32,Rt32):sat","Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat", +"Vector even halfwords with scalar lower halfword multiply with shift and sat32", + VxxV.v[0].w[i] = fVSATW(fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV))); + VxxV.v[1].w[i] = fVSATW(fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV)))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhss,"Vd32=vmpyh(Vu32,Rt32):<<1:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat", +"Vector halfword by halfword multiply, shift by 1, and take upper 16 msb", + fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1))))); + fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1))))); +) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsrs,"Vd32=vmpyh(Vu32,Rt32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat", +"Vector halfword with scalar halfword multiply with round, shift, and sat16", + fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1)))))); + fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1)))))); +) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh,"Vdd32=vmpyuh(Vu32,Rt32)","Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)); + VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh_acc,"Vxx32+=vmpyuh(Vu32,Rt32)","Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)); + VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV))) + + + + +/******************************************** +* HALF BY BYTE +********************************************/ +ITERATOR_INSN2_MPY_SLOT(16,vmpyihb,"Vd32=vmpyihb(Vu32,Rt32)","Vd32.h=vmpyi(Vu32.h,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VdV.h[i] = fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(16,vmpyihb_acc,"Vx32+=vmpyihb(Vu32,Rt32)","Vx32.h+=vmpyi(Vu32.h,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VxV.h[i] += fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) )) + + +/******************************************** +* WORD BY BYTE +********************************************/ +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb,"Vd32=vmpyiwb(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb_acc,"Vx32+=vmpyiwb(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub,"Vd32=vmpyiwub(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.ub)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub_acc,"Vx32+=vmpyiwub(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.ub)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) )) + + +/******************************************** +* WORD BY HALF +********************************************/ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh,"Vd32=vmpyiwh(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.h)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh_acc,"Vx32+=vmpyiwh(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.h)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV))) + + + + + + + + + + + + + + + + + + + +/************************************************************************** + * MMVECTOR LOGICAL OPERATIONS + * ************************************************************************/ +ITERATOR_INSN_ANY_SLOT(16,vand,"Vd32=vand(Vu32,Vv32)", "Vector Logical And", VdV.uh[i] = VuV.uh[i] & VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vor, "Vd32=vor(Vu32,Vv32)", "Vector Logical Or", VdV.uh[i] = VuV.uh[i] | VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vxor,"Vd32=vxor(Vu32,Vv32)", "Vector Logical XOR", VdV.uh[i] = VuV.uh[i] ^ VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vnot,"Vd32=vnot(Vu32)", "Vector Logical NOT", VdV.uh[i] = ~VuV.uh[i]) + + + + + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt, +"Vd32.ub=vand(Qu4.ub,Rt32.ub)", "Vd32=vand(Qu4,Rt32)", "Insert Predicate into Vector", + VdV.ub[i] = fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt_acc, +"Vx32.ub|=vand(Qu4.ub,Rt32.ub)", "Vx32|=vand(Qu4,Rt32)", "Insert Predicate into Vector", + VxV.ub[i] |= (fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt, +"Vd32.ub=vand(!Qu4.ub,Rt32.ub)", "Vd32=vand(!Qu4,Rt32)", "Insert Predicate into Vector", + VdV.ub[i] = !fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt_acc, +"Vx32.ub|=vand(!Qu4.ub,Rt32.ub)", "Vx32|=vand(!Qu4,Rt32)", "Insert Predicate into Vector", + VxV.ub[i] |= !(fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0) + + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt, +"Qd4.ub=vand(Vu32.ub,Rt32.ub)", "Qd4=vand(Vu32,Rt32)", "Insert into Predicate", + fSETQBIT(QdV,i,((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0)) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt_acc, +"Qx4.ub|=vand(Vu32.ub,Rt32.ub)", "Qx4|=vand(Vu32,Rt32)", "Insert into Predicate ", + fSETQBIT(QxV,i,fGETQBIT(QxV,i)|(((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0))) + +ITERATOR_INSN_ANY_SLOT(8,vandvqv,"Vd32=vand(Qv4,Vu32)","Mask off bytes", +VdV.b[i] = fGETQBIT(QvV,i) ? VuV.b[i] : 0) +ITERATOR_INSN_ANY_SLOT(8,vandvnqv,"Vd32=vand(!Qv4,Vu32)","Mask off bytes", +VdV.b[i] = !fGETQBIT(QvV,i) ? VuV.b[i] : 0) + + + /*************************************************** + * Compare Vector with Vector + ***************************************************/ +#define VCMP(DEST, ASRC, ASRCOP, CMP, N, SRC, MASK, WIDTH) \ +{ \ + for(fHIDE(int) i = 0; i < fVBYTES(); i += WIDTH) { \ + fSETQBITS(DEST,WIDTH,MASK,i,ASRC ASRCOP ((VuV.SRC[i/WIDTH] CMP VvV.SRC[i/WIDTH]) ? MASK : 0)); \ + } \ + } + + +#define MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \ +EXTINSN(V6_vgt##TYPE, "Qd4=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than", \ + VCMP(QdV, , , >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_and, "Qx4&=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-and", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_or, "Qx4|=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-or", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_xor, "Qx4^=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-xor", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, >, N, SRC, MASK, WIDTH)) + +#define MMVEC_CMP(TYPE,TYPE2,TYPE3,DESCR,N,MASK, WIDTH, SRC)\ +MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \ +EXTINSN(V6_veq##TYPE, "Qd4=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equal to", \ + VCMP(QdV, , , ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_and, "Qx4&=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-and", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_or, "Qx4|=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-or", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_xor, "Qx4^=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-xor", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, ==, N, SRC, MASK, WIDTH)) + + +MMVEC_CMP(w,"w","","Vector Word Compare ", fVELEM(32), 0xF, 4, w) +MMVEC_CMP(h,"h","","Vector Half Compare ", fVELEM(16), 0x3, 2, h) +MMVEC_CMP(b,"b","","Vector Half Compare ", fVELEM(8), 0x1, 1, b) +MMVEC_CMPGT(uw,"uw","","Vector Unsigned Half Compare ", fVELEM(32), 0xF, 4,uw) +MMVEC_CMPGT(uh,"uh","","Vector Unsigned Half Compare ", fVELEM(16), 0x3, 2,uh) +MMVEC_CMPGT(ub,"ub","","Vector Unsigned Byte Compare ", fVELEM(8), 0x1, 1,ub) + +/*************************************************** +* Predicate Operations +***************************************************/ + +EXTINSN(V6_pred_scalar2, "Qd4=vsetq(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ", +{ + fHIDE(int i;) + for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i < (RtV & (fVBYTES()-1))) ? 1 : 0); +}) + +EXTINSN(V6_pred_scalar2v2, "Qd4=vsetq2(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ", +{ + fHIDE(int i;) + for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i <= ((RtV-1) & (fVBYTES()-1))) ? 1 : 0); +}) + + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqw, "Qd4.h=vshuffe(Qs4.w,Qt4.w)","Shrink Predicate", fSETQBIT(QdV,i, (i & 2) ? fGETQBIT(QsV,i-2) : fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqh, "Qd4.b=vshuffe(Qs4.h,Qt4.h)","Shrink Predicate", fSETQBIT(QdV,i, (i & 1) ? fGETQBIT(QsV,i-1) : fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or, "Qd4=or(Qs4,Qt4)","Vector Predicate Or", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and, "Qd4=and(Qs4,Qt4)","Vector Predicate And", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_xor, "Qd4=xor(Qs4,Qt4)","Vector Predicate Xor", fSETQBIT(QdV,i,fGETQBIT(QsV,i) ^ fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or_n, "Qd4=or(Qs4,!Qt4)","Vector Predicate Or with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || !fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and_n, "Qd4=and(Qs4,!Qt4)","Vector Predicate And with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && !fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT(8, pred_not, "Qd4=not(Qs4)","Vector Predicate Not", fSETQBIT(QdV,i,!fGETQBIT(QsV,i) ) ) + + + +EXTINSN(V6_vcmov, "if (Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov", +{ +if (fLSBOLD(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VdV.ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vncmov, "if (!Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov", +{ +if (fLSBOLDNOT(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VdV.ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vccombine, "if (Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine", +{ +if (fLSBOLD(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vnccombine, "if (!Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine", +{ +if (fLSBOLDNOT(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + + + +ITERATOR_INSN_ANY_SLOT(8,vmux,"Vd32=vmux(Qt4,Vu32,Vv32)", +"Vector Select Element 8-bit", + VdV.ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]) + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vswap,"Vdd32=vswap(Qt4,Vu32,Vv32)", +"Vector Swap Element 8-bit", + VddV.v[0].ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]; + VddV.v[1].ub[i] = !fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]) + + +/*************************************************************************** +* +* MMVECTOR SORTING +* +****************************************************************************/ + +#define MMVEC_SORT(TYPE,TYPE2,DESCR,ELEMENTSIZE,SRC)\ +ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmax##TYPE, "Vd32=vmax" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmax(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " max", VdV.SRC[i] = (VuV.SRC[i] > VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i]) \ +ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmin##TYPE, "Vd32=vmin" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmin(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " min", VdV.SRC[i] = (VuV.SRC[i] < VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i]) + +MMVEC_SORT(b,"b", "signed byte", 8, b) +MMVEC_SORT(ub,"ub", "unsigned byte", 8, ub) +MMVEC_SORT(uh,"uh", "unsigned halfword",16, uh) +MMVEC_SORT(h, "h", "halfword", 16, h) +MMVEC_SORT(w, "w", "word", 32, w) + + + + + + + + + +/************************************************************* +* SHUFFLES +****************************************************************/ + +ITERATOR_INSN2_ANY_SLOT(16,vsathub,"Vd32=vsathub(Vu32,Vv32)","Vd32.ub=vsat(Vu32.h,Vv32.h)", +"Saturate and pack 32 halfwords to 32 unsigned bytes, and interleave them", + fSETBYTE(0, VdV.uh[i], fVSATUB(VvV.h[i])); + fSETBYTE(1, VdV.uh[i], fVSATUB(VuV.h[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vsatwh,"Vd32=vsatwh(Vu32,Vv32)","Vd32.h=vsat(Vu32.w,Vv32.w)", +"Saturate and pack 16 words to 16 halfwords, and interleave them", + fSETHALF(0, VdV.w[i], fVSATH(VvV.w[i])); + fSETHALF(1, VdV.w[i], fVSATH(VuV.w[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vsatuwuh,"Vd32=vsatuwuh(Vu32,Vv32)","Vd32.uh=vsat(Vu32.uw,Vv32.uw)", +"Saturate and pack 16 words to 16 halfwords, and interleave them", + fSETHALF(0, VdV.w[i], fVSATUH(VvV.uw[i])); + fSETHALF(1, VdV.w[i], fVSATUH(VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT(16,vshuffeb,"Vd32=vshuffeb(Vu32,Vv32)","Vd32.b=vshuffe(Vu32.b,Vv32.b)", +"Shuffle half words with in a lane", + fSETBYTE(0, VdV.uh[i], fGETUBYTE(0, VvV.uh[i])); + fSETBYTE(1, VdV.uh[i], fGETUBYTE(0, VuV.uh[i]))) + +ITERATOR_INSN2_ANY_SLOT(16,vshuffob,"Vd32=vshuffob(Vu32,Vv32)","Vd32.b=vshuffo(Vu32.b,Vv32.b)", +"Shuffle half words with in a lane", + fSETBYTE(0, VdV.uh[i], fGETUBYTE(1, VvV.uh[i])); + fSETBYTE(1, VdV.uh[i], fGETUBYTE(1, VuV.uh[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vshufeh,"Vd32=vshuffeh(Vu32,Vv32)","Vd32.h=vshuffe(Vu32.h,Vv32.h)", +"Shuffle half words with in a lane", + fSETHALF(0, VdV.uw[i], fGETUHALF(0, VvV.uw[i])); + fSETHALF(1, VdV.uw[i], fGETUHALF(0, VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vshufoh,"Vd32=vshuffoh(Vu32,Vv32)","Vd32.h=vshuffo(Vu32.h,Vv32.h)", +"Shuffle half words with in a lane", + fSETHALF(0, VdV.uw[i], fGETUHALF(1, VvV.uw[i])); + fSETHALF(1, VdV.uw[i], fGETUHALF(1, VuV.uw[i]))) + + + + +/************************************************************************** +* Double Vector Shuffles +**************************************************************************/ + +EXTINSN(V6_vshuff, "vshuff(Vy32,Vx32,Rt32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), +"2x2->2x2 transpose, for multiple data sizes, inplace", +{ + fHIDE(int offset;) + for (offset=1; offset2x2 transpose for multiple data sizes", +{ + fHIDE(int offset;) + VddV.v[0] = VvV; + VddV.v[1] = VuV; + for (offset=1; offset>1; offset>0; offset>>=1) { + if ( RtV & offset) { + fHIDE(int k;) \ + fVFOREACH(8, k) {\ + if (!( k & offset)) { + fSWAPB(VyV.ub[k], VxV.ub[k+offset]); + } + } + } + } + }) + +EXTINSN(V6_vdealvdd, "Vdd32=vdeal(Vu32,Vv32,Rt8)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), +" vector - vector deal - or deinterleave, for multiple data sizes", +{ + fHIDE(int offset;) + VddV.v[0] = VvV; + VddV.v[1] = VuV; + for (offset=fVBYTES()>>1; offset>0; offset>>=1) { + if ( RtV & offset) { + fHIDE(int k;) \ + fVFOREACH(8, k) {\ + if (!( k & offset)) { + fSWAPB(VddV.v[1].ub[k], VddV.v[0].ub[k+offset]); + } + } + } + } + }) + +/**************************************************************************/ + + + +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(32,vshufoeh,"Vdd32=vshuffoeh(Vu32,Vv32)","Vdd32.h=vshuffoe(Vu32.h,Vv32.h)", +"Vector Shuffle half words", + fSETHALF(0, VddV.v[0].uw[i], fGETUHALF(0, VvV.uw[i])); + fSETHALF(1, VddV.v[0].uw[i], fGETUHALF(0, VuV.uw[i])); + fSETHALF(0, VddV.v[1].uw[i], fGETUHALF(1, VvV.uw[i])); + fSETHALF(1, VddV.v[1].uw[i], fGETUHALF(1, VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(16,vshufoeb,"Vdd32=vshuffoeb(Vu32,Vv32)","Vdd32.b=vshuffoe(Vu32.b,Vv32.b)", +"Vector Shuffle bytes", + fSETBYTE(0, VddV.v[0].uh[i], fGETUBYTE(0, VvV.uh[i])); + fSETBYTE(1, VddV.v[0].uh[i], fGETUBYTE(0, VuV.uh[i])); + fSETBYTE(0, VddV.v[1].uh[i], fGETUBYTE(1, VvV.uh[i])); + fSETBYTE(1, VddV.v[1].uh[i], fGETUBYTE(1, VuV.uh[i]))) + + +/*************************************************************** +* Deal +***************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT(32, vdealh, "Vd32=vdealh(Vu32)", "Vd32.h=vdeal(Vu32.h)", +"Deal Halfwords", + VdV.uh[i ] = fGETUHALF(0, VuV.uw[i]); + VdV.uh[i+fVELEM(32)] = fGETUHALF(1, VuV.uw[i])) + +ITERATOR_INSN2_PERMUTE_SLOT(16, vdealb, "Vd32=vdealb(Vu32)", "Vd32.b=vdeal(Vu32.b)", +"Deal Halfwords", + VdV.ub[i ] = fGETUBYTE(0, VuV.uh[i]); + VdV.ub[i+fVELEM(16)] = fGETUBYTE(1, VuV.uh[i])) + +ITERATOR_INSN2_PERMUTE_SLOT(32, vdealb4w, "Vd32=vdealb4w(Vu32,Vv32)", "Vd32.b=vdeale(Vu32.b,Vv32.b)", +"Deal Two Vectors Bytes", + VdV.ub[0+i ] = fGETUBYTE(0, VvV.uw[i]); + VdV.ub[fVELEM(32)+i ] = fGETUBYTE(2, VvV.uw[i]); + VdV.ub[2*fVELEM(32)+i] = fGETUBYTE(0, VuV.uw[i]); + VdV.ub[3*fVELEM(32)+i] = fGETUBYTE(2, VuV.uw[i])) + +/*************************************************************** +* shuffle +***************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT(32, vshuffh, "Vd32=vshuffh(Vu32)", "Vd32.h=vshuff(Vu32.h)", +"Deal Halfwords", + fSETHALF(0, VdV.uw[i], VuV.uh[i]); + fSETHALF(1, VdV.uw[i], VuV.uh[i+fVELEM(32)])) + +ITERATOR_INSN2_PERMUTE_SLOT(16, vshuffb, "Vd32=vshuffb(Vu32)", "Vd32.b=vshuff(Vu32.b)", +"Deal Halfwords", + fSETBYTE(0, VdV.uh[i], VuV.ub[i]); + fSETBYTE(1, VdV.uh[i], VuV.ub[i+fVELEM(16)])) + + + + + +/*********************************************************** +* INSERT AND EXTRACT +*********************************************************/ +EXTINSN(V6_extractw, "Rd32=vextract(Vu32,Rs32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA,A_MEMLIKE,A_RESTRICT_SLOT0ONLY), +"Extract an element from a vector to scalar", +fHIDE(warn("RdN=%d VuN=%d RsN=%d RsV=0x%08x widx=%d",RdN,VuN,RsN,RsV,((RsV & (fVBYTES()-1)) >> 2));) +RdV = VuV.uw[ (RsV & (fVBYTES()-1)) >> 2]; +fHIDE(warn("RdV=0x%08x",RdV);)) + +EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), +"Insert Word Scalar into Vector", +VxV.uw[0] = RtV;) + + + + +ITERATOR_INSN_MPY_SLOT_LATE(32,lvsplatw, "Vd32=vsplat(Rt32)", "Replicates scalar accross words in vector", VdV.uw[i] = RtV) + +ITERATOR_INSN_MPY_SLOT_LATE(16,lvsplath, "Vd32.h=vsplat(Rt32)", "Replicates scalar accross halves in vector", VdV.uh[i] = RtV) + +ITERATOR_INSN_MPY_SLOT_LATE(8,lvsplatb, "Vd32.b=vsplat(Rt32)", "Replicates scalar accross bytes in vector", VdV.ub[i] = RtV) + + +ITERATOR_INSN_ANY_SLOT(32,vassign,"Vd32=Vu32","Copy a vector",VdV.w[i]=VuV.w[i]) + + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vcombine,"Vdd32=vcombine(Vu32,Vv32)", +"Vector assign, Any two to Vector Pair", + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]) + + + +/////////////////////////////////////////////////////////////////////////// + + +/********************************************************* +* GENERAL PERMUTE NETWORKS +*********************************************************/ + + +EXTINSN(V6_vdelta, "Vd32=vdelta(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), +"Reverse Benes Butterfly network ", +{ + fHIDE(int offset;) + fHIDE(int k;) + fHIDE(mmvector_t tmp;) + tmp = VuV; + for (offset=fVBYTES(); (offset>>=1)>0; ) { + for (k = 0; k>3; \ + unsigned char element = value & 7; \ + READ_EXT_VREG(regno,tmp,0); \ + tmp.uh[(128/16)*lane+(element)]++; \ + WRITE_EXT_VREG(regno,tmp,EXT_NEW); \ + } \ + } + +#define fHISTQ(INPUTVEC,QVAL) \ + fUARCH_NOTE_PUMP_4X(); \ + fHIDE(int lane;) \ + fHIDE(mmvector_t tmp;) \ + fVFOREACH(128, lane) { \ + for (fHIDE(int )i=0; i<128/8; ++i) { \ + unsigned char value = INPUTVEC.ub[(128/8)*lane+i]; \ + unsigned char regno = value>>3; \ + unsigned char element = value & 7; \ + READ_EXT_VREG(regno,tmp,0); \ + if (fGETQBIT(QVAL,128/8*lane+i)) tmp.uh[(128/16)*lane+(element)]++; \ + WRITE_EXT_VREG(regno,tmp,EXT_NEW); \ + } \ + } + + + +EXTINSN(V6_vhist, "vhist",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHIST(inputVec); }) +EXTINSN(V6_vhistq, "vhist(Qv4)",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHISTQ(inputVec,QvV); }) + +#undef fHIST +#undef fHISTQ + + +/* **** WEIGHTED HISTOGRAM **** */ + + +#if 1 +#define WHIST(EL,MASK,BSHIFT,COND,SATF) \ + fHIDE(unsigned int) bucket = fGETUBYTE(0,input.h[i]); \ + fHIDE(unsigned int) weight = fGETUBYTE(1,input.h[i]); \ + fHIDE(unsigned int) vindex = (bucket >> 3) & 0x1F; \ + fHIDE(unsigned int) elindex = ((i>>BSHIFT) & (~MASK)) | ((bucket>>BSHIFT) & MASK); \ + fHIDE(mmvector_t tmp;) \ + READ_EXT_VREG(vindex,tmp,0); \ + COND tmp.EL[elindex] = SATF(tmp.EL[elindex] + weight); \ + WRITE_EXT_VREG(vindex,tmp,EXT_NEW); \ + fUARCH_NOTE_PUMP_2X(); + +ITERATOR_INSN_VHISTLIKE(16,vwhist256,"vwhist256","vector weighted histogram halfword counters", WHIST(uh,7,0,,)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256q,"vwhist256(Qv4)","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256_sat,"vwhist256:sat","vector weighted histogram halfword counters", WHIST(uh,7,0,,fVSATUH)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256q_sat,"vwhist256(Qv4):sat","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),fVSATUH)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128,"vwhist128","vector weighted histogram word counters", WHIST(uw,3,1,,)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128q,"vwhist128(Qv4)","vector weighted histogram word counters", WHIST(uw,3,1,if (fGETQBIT(QvV,2*i)),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128m,"vwhist128(#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if ((bucket & 1) == uiV),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128qm,"vwhist128(Qv4,#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if (((bucket & 1) == uiV) && fGETQBIT(QvV,2*i)),)) + + +#endif + + + +/* ****** lookup table instructions *********** */ + +/* Use low bits from idx to choose next-bigger elements from vector, then use LSB from idx to choose odd or even element */ + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0x7; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracc,"Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0x7; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0xF; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracc,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = fGETUBYTE(0,RtV) & 0xF; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvbi,"Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0x7; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracci,"Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0x7; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwhi,"Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0xF; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracci,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0xF; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb_nm,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;) + matchval = RtV & 0x7; + oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; + idx = VuV.ub[i]; + idx = (idx&0x1F) | (matchval<<5); + VdV.b[i] = fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)])) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_nm,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;) + matchval = RtV & 0xF; + oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; + idx = fGETUBYTE(0,VuV.uh[i]); + idx = (idx&0x0F) | (matchval<<4); + VddV.v[0].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]); + idx = fGETUBYTE(1,VuV.uh[i]); + idx = (idx&0x0F) | (matchval<<4); + VddV.v[1].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)])) + + + + +/****************************************************************************** +NON LINEAR - V65 + ******************************************************************************/ + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpahhsat,"Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( ( fMPY16SS(VxV.h[i],VuV.h[i])<<1) + (fGETHALF(( (VuV.h[i]>>14)&0x3), RttV )<<15))>>16)) + + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpauhuhsat,"Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) + (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16)) + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpsuhuhsat,"Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) - (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16)) + + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vlut4,"Vd32.h=vlut4(Vu32.uh,Rtt32.h)","4 entry lookup table", + VdV.h[i]= fGETHALF( ((VuV.h[i]>>14)&0x3), RttV )) + + + +/****************************************************************************** +V65 + ******************************************************************************/ + +ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe,"Vd32.uw=vmpye(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VdV.uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV))) + + +ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe_acc,"Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VxV.uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV))) + + + + +EXTINSN(V6_vgathermw, "vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_GATHER_WORD(EA, VvV.uw[i], i,MuV); + } + fGATHER_FINISH() +}) +EXTINSN(V6_vgathermh, "vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_GATHER_HALFWORD(EA, VvV.uh[i], i,MuV); + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vgathermhw, "vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_GATHER_HALFWORD_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,MuV); + } + } + fGATHER_FINISH() +}) + + +EXTINSN(V6_vgathermwq, "if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_GATHER_WORDQ(EA, VvV.uw[i], i,QsV,MuV); + } + fGATHER_FINISH() +}) +EXTINSN(V6_vgathermhq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_GATHER_HALFWORDQ(EA, VvV.uh[i], i,QsV,MuV); + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vgathermhwq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,QsV,MuV); + } + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vscattermw , "vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_WORD(EA, VvV.uw[i], VwV,i,MuV); + } + fSCATTER_FINISH(0) +}) + + + +EXTINSN(V6_vscattermh , "vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfWords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_HALFWORD(EA,VvV.uh[i],VwV,i,MuV); + } + fSCATTER_FINISH(0) +}) + + +EXTINSN(V6_vscattermw_add, "vscatter(Rt32,Mu2,Vv32.w).w+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words-Add", +{ + fHIDE(int i;) + fHIDE(int ALIGNMENT=4;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = (RtV+fVALIGN(VvV.uw[i],ALIGNMENT)); + fVLOG_VTCM_WORD_INCREMENT(EA,VvV.uw[i],VwV,i,ALIGNMENT,MuV); + } + fHIDE(fLOG_SCATTER_OP(4);) + fSCATTER_FINISH(1) +}) + +EXTINSN(V6_vscattermh_add, "vscatter(Rt32,Mu2,Vv32.h).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfword-Add", +{ + fHIDE(int i;) + fHIDE(int ALIGNMENT=2;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = (RtV+fVALIGN(VvV.uh[i],ALIGNMENT)); + fVLOG_VTCM_HALFWORD_INCREMENT(EA,VvV.uh[i],VwV,i,ALIGNMENT,MuV); + } + fHIDE(fLOG_SCATTER_OP(2);) + fSCATTER_FINISH(1) +}) + + +EXTINSN(V6_vscattermwq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words conditional", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_WORDQ(EA,VvV.uw[i], VwV,i,QsV,MuV); + } + fSCATTER_FINISH(0) +}) + +EXTINSN(V6_vscattermhq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter HalfWords conditional", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_HALFWORDQ(EA,VvV.uh[i],VwV,i,QsV,MuV); + } + fSCATTER_FINISH(0) +}) + + + + +EXTINSN(V6_vscattermhw , "vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter Words", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_HALFWORD_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,MuV); + } + } + fSCATTER_FINISH(0) +}) + + + +EXTINSN(V6_vscattermhwq, "if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords conditional", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_HALFWORDQ_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),QsV,i,j,MuV); + } + } + fSCATTER_FINISH(0) +}) + +EXTINSN(V6_vscattermhw_add, "vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords-add", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int ALIGNMENT=2;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV + fVALIGN(VvvV.v[j].uw[i],ALIGNMENT);; + fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,ALIGNMENT,MuV); + } + } + fHIDE(fLOG_SCATTER_OP(2);) + fSCATTER_FINISH(1) +}) + +EXTINSN(V6_vprefixqb,"Vd32.b=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into byte", +{ + fHIDE(int i;) + fHIDE(size1u_t acc = 0;) + fVFOREACH(8, i) { + acc += fGETQBIT(QvV,i); + VdV.ub[i] = acc; + } + } ) +EXTINSN(V6_vprefixqh,"Vd32.h=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into halfwords", +{ + fHIDE(int i;) + fHIDE(size2u_t acc = 0;) + fVFOREACH(16, i) { + acc += fGETQBIT(QvV,i*2+0); + acc += fGETQBIT(QvV,i*2+1); + VdV.uh[i] = acc; + } + } ) +EXTINSN(V6_vprefixqw,"Vd32.w=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into words", +{ + fHIDE(int i;) + fHIDE(size4u_t acc = 0;) + fVFOREACH(32, i) { + acc += fGETQBIT(QvV,i*4+0); + acc += fGETQBIT(QvV,i*4+1); + acc += fGETQBIT(QvV,i*4+2); + acc += fGETQBIT(QvV,i*4+3); + VdV.uw[i] = acc; + } + } ) + + + + + +/****************************************************************************** + DEBUG Vector/Register Printing + ******************************************************************************/ + +#define PRINT_VU(TYPE, TYPE2, COUNT)\ + int i; \ + size4u_t vec_len = fVBYTES();\ + fprintf(stdout,"V%2d: ",VuN); \ + for (i=0;i>COUNT;i++) { \ + fprintf(stdout,TYPE2 " ", VuV.TYPE[i]); \ + }; \ + fprintf(stdout,"\\n"); \ + fflush(stdout);\ + +#undef ATTR_VMEM +#undef ATTR_VMEMU +#undef ATTR_VMEM_NT + +#endif /* NO_MMVEC */ + +#ifdef __SELF_DEF_EXTINSN +#undef EXTINSN +#undef __SELF_DEF_EXTINSN +#endif diff --git a/target/hexagon/imported/mmvec/macros.def b/target/hexagon/imported/mmvec/macros.def new file mode 100755 index 0000000000..7e5438a998 --- /dev/null +++ b/target/hexagon/imported/mmvec/macros.def @@ -0,0 +1,842 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +DEF_MACRO(fDUMPQ, + do { + printf(STR ":" #REG ": 0x%016llx\n",REG.ud[0]); + } while (0), + () +) + +DEF_MACRO(fUSE_LOOKUP_ADDRESS_BY_REV, + PROC->arch_proc_options->mmvec_use_full_va_for_lookup, + () +) + +DEF_MACRO(fUSE_LOOKUP_ADDRESS, + 1, + () +) + +DEF_MACRO(fNOTQ, + ({mmqreg_t _ret = {0}; int _i_; for (_i_ = 0; _i_ < fVECSIZE()/64; _i_++) _ret.ud[_i_] = ~VAL.ud[_i_]; _ret;}), + () +) + +DEF_MACRO(fGETQBITS, + ((MASK) & (REG.w[(BITNO)>>5] >> ((BITNO) & 0x1f))), + () +) + +DEF_MACRO(fGETQBIT, + fGETQBITS(REG,1,1,BITNO), + () +) + +DEF_MACRO(fGENMASKW, + (((fGETQBIT(QREG,(IDX*4+0)) ? 0xFF : 0x0) << 0) + |((fGETQBIT(QREG,(IDX*4+1)) ? 0xFF : 0x0) << 8) + |((fGETQBIT(QREG,(IDX*4+2)) ? 0xFF : 0x0) << 16) + |((fGETQBIT(QREG,(IDX*4+3)) ? 0xFF : 0x0) << 24)), + () +) +DEF_MACRO(fGET10BIT, + { + COE = (((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) | fGETUBYTE(POS,VAL)) << 6); + COE >>= 6; + }, + () +) + +DEF_MACRO(fVMAX, + (X>Y) ? X : Y, + () +) + + +DEF_MACRO(fGETNIBBLE, + ( fSXTN(4,8,(SRC >> (4*IDX)) & 0xF) ), + () +) + +DEF_MACRO(fGETCRUMB, + ( fSXTN(2,8,(SRC >> (2*IDX)) & 0x3) ), + () +) + +DEF_MACRO(fGETCRUMB_SYMMETRIC, + ( (fGETCRUMB(IDX,SRC)>=0 ? (2-fGETCRUMB(IDX,SRC)) : fGETCRUMB(IDX,SRC) ) ), + () +) + +#define ZERO_OFFSET_2B + + +DEF_MACRO(fGENMASKH, + (((fGETQBIT(QREG,(IDX*2+0)) ? 0xFF : 0x0) << 0) + |((fGETQBIT(QREG,(IDX*2+1)) ? 0xFF : 0x0) << 8)), + () +) + +DEF_MACRO(fGETMASKW, + (VREG.w[IDX] & fGENMASKW((QREG),IDX)), + () +) + +DEF_MACRO(fGETMASKH, + (VREG.h[IDX] & fGENMASKH((QREG),IDX)), + () +) + +DEF_MACRO(fCONDMASK8, + (fGETQBIT(QREG,IDX) ? (YESVAL) : (NOVAL)), + () +) + +DEF_MACRO(fCONDMASK16, + ((fGENMASKH(QREG,IDX) & (YESVAL)) | (fGENMASKH(fNOTQ(QREG),IDX) & (NOVAL))), + () +) + +DEF_MACRO(fCONDMASK32, + ((fGENMASKW(QREG,IDX) & (YESVAL)) | (fGENMASKW(fNOTQ(QREG),IDX) & (NOVAL))), + () +) + + +DEF_MACRO(fSETQBITS, + do { + size4u_t __TMP = (VAL); + REG.w[(BITNO)>>5] &= ~((MASK) << ((BITNO) & 0x1f)); + REG.w[(BITNO)>>5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); + } while (0), + () +) + +DEF_MACRO(fSETQBIT, + fSETQBITS(REG,1,1,BITNO,VAL), + () +) + +DEF_MACRO(fVBYTES, + (fVECSIZE()), + () +) + +DEF_MACRO(fVHALVES, + (fVECSIZE()/2), + () +) + +DEF_MACRO(fVWORDS, + (fVECSIZE()/4), + () +) + +DEF_MACRO(fVDWORDS, + (fVECSIZE()/8), + () +) + +DEF_MACRO(fVALIGN, + ( ADDR = ADDR & ~(LOG2_ALIGNMENT-1)), + () +) + +DEF_MACRO(fVLASTBYTE, + ( ADDR = ADDR | (LOG2_ALIGNMENT-1)), + () +) + + +DEF_MACRO(fVELEM, + ((fVECSIZE()*8)/WIDTH), + () +) + +DEF_MACRO(fVECLOGSIZE, + (mmvec_current_veclogsize(thread)), + () +) + +DEF_MACRO(fVECSIZE, + (1<VRegs_updated & (((VRegMask)1)<future_VRegs[VNUM] : mmvec_zero_vector()), + (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY) +) + +DEF_MACRO( + fV_AL_CHECK, + if ((EA) & (MASK)) { + warn("aligning misaligned vector. PC=%08x EA=%08x",thread->Regs[REG_PC],(EA)); + }, + () +) +DEF_MACRO(fSCATTER_INIT, + { + mem_vector_scatter_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE); + if (EXCEPTION_DETECTED) return; + }, + (A_STORE,A_MEMLIKE,A_RESTRICT_SLOT0ONLY) +) + +DEF_MACRO(fGATHER_INIT, + { + mem_vector_gather_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE); + if (EXCEPTION_DETECTED) return; + }, + (A_LOAD,A_MEMLIKE,A_RESTRICT_SLOT1ONLY) +) + +DEF_MACRO(fSCATTER_FINISH, + { + if (EXCEPTION_DETECTED) return; + mem_vector_scatter_finish(thread, insn, OP); + }, + () +) + +DEF_MACRO(fGATHER_FINISH, + { + if (EXCEPTION_DETECTED) return; + mem_vector_gather_finish(thread, insn); + }, + () +) + + +DEF_MACRO(CHECK_VTCM_PAGE, + { + int slot = insn->slot; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + pa = pa & ~(ALIGNMENT-1); + FLAG = (pa < (thread->mem_access[slot].paddr+LENGTH)); + }, + () +) +DEF_MACRO(COUNT_OUT_OF_BOUNDS, + { + if (!FLAG) + { + THREAD2STRUCT->vtcm_log.oob_access += SIZE; + warn("Scatter/Gather out of bounds of region"); + } + }, + () +) + +DEF_MACRO(fLOG_SCATTER_OP, + { + // Log the size and indicate that the extension ext.c file needs to increment right before memory write + THREAD2STRUCT->vtcm_log.op = 1; + THREAD2STRUCT->vtcm_log.op_size = SIZE; + }, + () +) + + + +DEF_MACRO(fVLOG_VTCM_WORD_INCREMENT, + { + int slot = insn->slot; + int log_bank = 0; + int log_byte =0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 4; i0++) + { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int log_bank = 0; + int log_byte = 0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 2; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int log_bank = 0; + int log_byte = 0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 2; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int i0; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + int log_bank = 0; + int log_byte = 0; + for(i0 = 0; i0 < ELEMENT_SIZE; i0++) + { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL; + log_bank |= (log_byte<system_ptr, thread->threadId, thread->mem_access[slot].paddr+OFFSET+i0); + THREAD2STRUCT->tmp_VRegs[0].ub[ELEMENT_SIZE*IDX+i0] = B; + LOG_VTCM_BYTE(pa+i0,log_byte,B,ELEMENT_SIZE*IDX+i0); + } + LOG_VTCM_BANK(pa, log_bank,BANK_IDX); +}, +() +) + + + +DEF_MACRO(fVLOG_VTCM_GATHER_WORD, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD_DV, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_WORDQ, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0)); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0)); + }, + () +) + +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ_DV, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0)); + }, + () +) + + +DEF_MACRO(DEBUG_LOG_ADDR, + { + + if (thread->processor_ptr->arch_proc_options->mmvec_network_addr_log2) + { + + int slot = insn->slot; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + } + }, + () +) + + + + + + + +DEF_MACRO(SCATTER_OP_WRITE_TO_MEM, + { + for (int i = 0; i < mmvecx->vtcm_log.size; i+=sizeof(TYPE)) + { + if ( mmvecx->vtcm_log.mask.ub[i] != 0) { + TYPE dst = 0; + TYPE inc = 0; + for(int j = 0; j < sizeof(TYPE); j++) { + dst |= (sim_mem_read1(thread->system_ptr, thread->threadId, mmvecx->vtcm_log.pa[i+j]) << (8*j)); + inc |= mmvecx->vtcm_log.data.ub[j+i] << (8*j); + + mmvecx->vtcm_log.mask.ub[j+i] = 0; + mmvecx->vtcm_log.data.ub[j+i] = 0; + mmvecx->vtcm_log.offsets.ub[j+i] = 0; + } + dst += inc; + for(int j = 0; j < sizeof(TYPE); j++) { + sim_mem_write1(thread->system_ptr,thread->threadId, mmvecx->vtcm_log.pa[i+j], (dst >> (8*j))& 0xFF ); + } + } + + } + }, + () +) + +DEF_MACRO(SCATTER_FUNCTION, +{ + int slot = insn->slot; + int i0; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + int log_bank = 0; + int log_byte = 0; + for(i0 = 0; i0 < ELEMENT_SIZE; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL; + log_bank |= (log_byte<processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fVFETCH_AL, + { + fV_AL_CHECK(EA,fVECSIZE()-1); + mem_fetch_vector(thread, insn, EA&~(fVECSIZE()-1), insn->slot, fVECSIZE()); + }, + (A_LOAD,A_MEMLIKE) +) + + +DEF_MACRO(fLOADMMV_AL, + { + fV_AL_CHECK(EA,ALIGNMENT-1); + thread->last_pkt->double_access_vec = 0; + mem_load_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &DST.ub[0], LEN, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_LOAD,A_MEMLIKE) +) + +DEF_MACRO(fLOADMMV, + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST), + () +) + +DEF_MACRO(fLOADMMVQ, + do { + int __i; + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + fVFOREACH(8,__i) if (!fGETQBIT(QVAL,__i)) DST.b[__i] = 0; + } while (0), + () +) + +DEF_MACRO(fLOADMMVNQ, + do { + int __i; + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + fVFOREACH(8,__i) if (fGETQBIT(QVAL,__i)) DST.b[__i] = 0; + } while (0), + () +) + +DEF_MACRO(fLOADMMVU_AL, + { + size4u_t size2 = (EA)&(ALIGNMENT-1); + size4u_t size1 = LEN-size2; + thread->last_pkt->double_access_vec = 1; + mem_load_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &DST.ub[size1], size2, fUSE_LOOKUP_ADDRESS()); + mem_load_vector_oddva(thread, insn, EA, EA,/* slot */ 0, size1, &DST.ub[0], size1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_LOAD,A_MEMLIKE) +) + +DEF_MACRO(fLOADMMVU, + { + /* if address happens to be aligned, only do aligned load */ + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->pkt_has_vmemu_access = 0; + thread->last_pkt->double_access = 0; + + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + } else { + thread->last_pkt->pkt_has_vmemu_access = 1; + thread->last_pkt->double_access = 1; + + fLOADMMVU_AL(EA,fVECSIZE(),fVECSIZE(),DST); + } + }, + () +) + +DEF_MACRO(fSTOREMMV_AL, + { + fV_AL_CHECK(EA,ALIGNMENT-1); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMV, + fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC), + () +) + +DEF_MACRO(fSTOREMMVQ_AL, + do { + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + } while (0), + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVQ, + fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK), + () +) + +DEF_MACRO(fSTOREMMVNQ_AL, + { + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + fV_AL_CHECK(EA,ALIGNMENT-1); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVNQ, + fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK), + () +) + +DEF_MACRO(fSTOREMMVU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], 0, 0, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVU_AL(EA,fVECSIZE(),fVECSIZE(),SRC); + } + }, + () +) + +DEF_MACRO(fSTOREMMVQU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(),/* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 0, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVQU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } + }, + () +) + +DEF_MACRO(fSTOREMMVNQU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 1, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVNQU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVNQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } + }, + () +) + + + + +DEF_MACRO(fVFOREACH, + for (VAR = 0; VAR < fVELEM(WIDTH); VAR++), + /* NOTHING */ +) + +DEF_MACRO(fVARRAY_ELEMENT_ACCESS, + ARRAY.v[(INDEX) / (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))], + () +) + +DEF_MACRO(fVNEWCANCEL, + do { THREAD2STRUCT->VRegs_select &= ~(1<<(REGNUM)); } while (0), + () +) + +DEF_MACRO(fTMPVDATA, + mmvec_vtmp_data(thread), + (A_CVI) +) + +DEF_MACRO(fVSATDW, + fVSATW( ( ( ((long long)U)<<32 ) | fZXTN(32,64,V) ) ), + /* attribs */ +) + +DEF_MACRO(fVASL_SATHI, + fVSATW(((U)<<1) | ((V)>>31)), + /* attribs */ +) + +DEF_MACRO(fVUADDSAT, + fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVSADDSAT, + fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVUSUBSAT, + fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVSSUBSAT, + fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVAVGU, + ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVAVGURND, + ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGU, + ((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGURNDSAT, + fVSATUN(WIDTH,((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)+1)>>1)), + /* attribs */ +) + +DEF_MACRO(fVAVGS, + ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVAVGSRND, + ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGS, + ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGSRND, + ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGSRNDSAT, + fVSATN(WIDTH,((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1)), + /* attribs */ +) + + +DEF_MACRO(fVNOROUND, + VAL, + /* NOTHING */ +) +DEF_MACRO(fVNOSAT, + VAL, + /* NOTHING */ +) + +DEF_MACRO(fVROUND, + ((VAL) + (((SHAMT)>0)?(1LL<<((SHAMT)-1)):0)), + /* NOTHING */ +) + +DEF_MACRO(fCARRY_FROM_ADD32, + (((fZXTN(32,64,A)+fZXTN(32,64,B)+C) >> 32) & 1), + /* NOTHING */ +) + +DEF_MACRO(fUARCH_NOTE_PUMP_4X, + , + () +) + +DEF_MACRO(fUARCH_NOTE_PUMP_2X, + , + () +) diff --git a/target/hexagon/imported/subinsns.idef b/target/hexagon/imported/subinsns.idef index ec1c74f479..be0ae8779d 100644 --- a/target/hexagon/imported/subinsns.idef +++ b/target/hexagon/imported/subinsns.idef @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -39,18 +39,18 @@ Q6INSN(SA1_clrf, "if (!p0) Rd16=#0", ATTRIBS(A_SUBINSN),"clear if false Q6INSN(SA1_addsp, "Rd16=add(r29,#u6:2)", ATTRIBS(A_SUBINSN),"Add", { RdV=fREAD_SP()+uiV; }) Q6INSN(SA1_inc, "Rd16=add(Rs16,#1)", ATTRIBS(A_SUBINSN),"Inc", { RdV=RsV+1;}) Q6INSN(SA1_dec, "Rd16=add(Rs16,#-1)", ATTRIBS(A_SUBINSN),"Dec", { RdV=RsV-1;}) -Q6INSN(SA1_addrx, "Rx16=add(Rx16,Rs16)", ATTRIBS(A_SUBINSN),"Add", { RxV=RxV+RsV; }) +Q6INSN(SA1_addrx, "Rx16=add(Rx16,Rs16)", ATTRIBS(A_SUBINSN,A_COMMUTES),"Add", { RxV=RxV+RsV; }) Q6INSN(SA1_zxtb, "Rd16=and(Rs16,#255)", ATTRIBS(A_SUBINSN),"Zxtb", { RdV= fZXTN(8,32,RsV);}) Q6INSN(SA1_and1, "Rd16=and(Rs16,#1)", ATTRIBS(A_SUBINSN),"And #1", { RdV= RsV&1;}) Q6INSN(SA1_sxtb, "Rd16=sxtb(Rs16)", ATTRIBS(A_SUBINSN),"Sxtb", { RdV= fSXTN(8,32,RsV);}) Q6INSN(SA1_zxth, "Rd16=zxth(Rs16)", ATTRIBS(A_SUBINSN),"Zxth", { RdV= fZXTN(16,32,RsV);}) Q6INSN(SA1_sxth, "Rd16=sxth(Rs16)", ATTRIBS(A_SUBINSN),"Sxth", { RdV= fSXTN(16,32,RsV);}) -Q6INSN(SA1_combinezr,"Rdd8=combine(#0,Rs16)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,RsV); fSETWORD(1,RddV,0); }) -Q6INSN(SA1_combinerz,"Rdd8=combine(Rs16,#0)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,0); fSETWORD(1,RddV,RsV); }) -Q6INSN(SA1_combine0i,"Rdd8=combine(#0,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,0); }) -Q6INSN(SA1_combine1i,"Rdd8=combine(#1,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,1); }) -Q6INSN(SA1_combine2i,"Rdd8=combine(#2,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,2); }) -Q6INSN(SA1_combine3i,"Rdd8=combine(#3,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,3); }) +Q6INSN(SA1_combinezr,"Rdd8=combine(#0,Rs16)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,RsV); fSETWORD(1,RddV,0); }) +Q6INSN(SA1_combinerz,"Rdd8=combine(Rs16,#0)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,0); fSETWORD(1,RddV,RsV); }) +Q6INSN(SA1_combine0i,"Rdd8=combine(#0,#u2)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,0); }) +Q6INSN(SA1_combine1i,"Rdd8=combine(#1,#u2)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,1); }) +Q6INSN(SA1_combine2i,"Rdd8=combine(#2,#u2)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,2); }) +Q6INSN(SA1_combine3i,"Rdd8=combine(#3,#u2)", ATTRIBS(A_SUBINSN,A_ROPS_2),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,3); }) Q6INSN(SA1_cmpeqi, "p0=cmp.eq(Rs16,#u2)", ATTRIBS(A_SUBINSN),"CompareImmed",{fWRITE_P0(f8BITSOF(RsV==uiV));}) @@ -62,16 +62,16 @@ Q6INSN(SA1_cmpeqi, "p0=cmp.eq(Rs16,#u2)", ATTRIBS(A_SUBINSN),"CompareImmed", /* */ /*****************************************************************/ -Q6INSN(SL1_loadri_io, "Rd16=memw(Rs16+#u4:2)", ATTRIBS(A_LOAD,A_SUBINSN),"load word", {fEA_RI(RsV,uiV); fLOAD(1,4,u,EA,RdV);}) -Q6INSN(SL1_loadrub_io, "Rd16=memub(Rs16+#u4:0)",ATTRIBS(A_LOAD,A_SUBINSN),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,u,EA,RdV);}) +Q6INSN(SL1_loadri_io, "Rd16=memw(Rs16+#u4:2)", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_LOAD,A_SUBINSN),"load word", {fEA_RI(RsV,uiV); fLOAD(1,4,u,EA,RdV);}) +Q6INSN(SL1_loadrub_io, "Rd16=memub(Rs16+#u4:0)",ATTRIBS(A_MEMSIZE_1B,A_LOAD,A_SUBINSN,A_REGWRSIZE_1B),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,u,EA,RdV);}) -Q6INSN(SL2_loadrh_io, "Rd16=memh(Rs16+#u3:1)", ATTRIBS(A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,s,EA,RdV);}) -Q6INSN(SL2_loadruh_io, "Rd16=memuh(Rs16+#u3:1)",ATTRIBS(A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,u,EA,RdV);}) -Q6INSN(SL2_loadrb_io, "Rd16=memb(Rs16+#u3:0)", ATTRIBS(A_LOAD,A_SUBINSN),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,s,EA,RdV);}) -Q6INSN(SL2_loadri_sp, "Rd16=memw(r29+#u5:2)", ATTRIBS(A_LOAD,A_SUBINSN),"load word", {fEA_RI(fREAD_SP(),uiV); fLOAD(1,4,u,EA,RdV);}) -Q6INSN(SL2_loadrd_sp, "Rdd8=memd(r29+#u5:3)", ATTRIBS(A_LOAD,A_SUBINSN),"load dword",{fEA_RI(fREAD_SP(),uiV); fLOAD(1,8,u,EA,RddV);}) +Q6INSN(SL2_loadrh_io, "Rd16=memh(Rs16+#u3:1)", ATTRIBS(A_REGWRSIZE_2B,A_MEMSIZE_2B,A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,s,EA,RdV);}) +Q6INSN(SL2_loadruh_io, "Rd16=memuh(Rs16+#u3:1)",ATTRIBS(A_REGWRSIZE_2B,A_MEMSIZE_2B,A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,u,EA,RdV);}) +Q6INSN(SL2_loadrb_io, "Rd16=memb(Rs16+#u3:0)", ATTRIBS(A_MEMSIZE_1B,A_LOAD,A_SUBINSN),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,s,EA,RdV);}) +Q6INSN(SL2_loadri_sp, "Rd16=memw(r29+#u5:2)", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_LOAD,A_SUBINSN),"load word", {fEA_RI(fREAD_SP(),uiV); fLOAD(1,4,u,EA,RdV);}) +Q6INSN(SL2_loadrd_sp, "Rdd8=memd(r29+#u5:3)", ATTRIBS(A_REGWRSIZE_8B,A_MEMSIZE_8B,A_LOAD,A_SUBINSN),"load dword",{fEA_RI(fREAD_SP(),uiV); fLOAD(1,8,u,EA,RddV);}) -Q6INSN(SL2_deallocframe,"deallocframe", ATTRIBS(A_SUBINSN,A_LOAD), "Deallocate stack frame", +Q6INSN(SL2_deallocframe,"deallocframe", ATTRIBS(A_REGWRSIZE_8B,A_SUBINSN,A_MEMSIZE_8B,A_LOAD,A_DEALLOCFRAME), "Deallocate stack frame", { fHIDE(size8u_t tmp;) fEA_REG(fREAD_FP()); fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); @@ -79,7 +79,7 @@ Q6INSN(SL2_deallocframe,"deallocframe", ATTRIBS(A_SUBINSN,A_LOAD), "Deallocate s fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8); }) -Q6INSN(SL2_return,"dealloc_return", ATTRIBS(A_JINDIR,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return", +Q6INSN(SL2_return,"dealloc_return", ATTRIBS(A_REGWRSIZE_8B,A_JINDIR,A_SUBINSN,A_ROPS_2,A_MEMSIZE_8B,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY,A_RET_TYPE,A_DEALLOCRET), "Deallocate stack frame and return", { fHIDE(size8u_t tmp;) fEA_REG(fREAD_FP()); fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); @@ -88,40 +88,40 @@ Q6INSN(SL2_return,"dealloc_return", ATTRIBS(A_JINDIR,A_SUBINSN,A_LOAD,A_RETURN,A fWRITE_SP(EA+8); fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);}) -Q6INSN(SL2_return_t,"if (p0) dealloc_return", ATTRIBS(A_JINDIROLD,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return", +Q6INSN(SL2_return_t,"if (p0) dealloc_return", ATTRIBS(A_REGWRSIZE_8B,A_JINDIROLD,A_SUBINSN,A_ROPS_2,A_MEMSIZE_8B,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY,A_RET_TYPE), "Deallocate stack frame and return", { fHIDE(size8u_t tmp;); fBRANCH_SPECULATE_STALL(fLSBOLD(fREAD_P0()),, SPECULATE_NOT_TAKEN,4,0); fEA_REG(fREAD_FP()); if (fLSBOLD(fREAD_P0())) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8); fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} }) -Q6INSN(SL2_return_f,"if (!p0) dealloc_return", ATTRIBS(A_JINDIROLD,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return", +Q6INSN(SL2_return_f,"if (!p0) dealloc_return", ATTRIBS(A_REGWRSIZE_8B,A_JINDIROLD,A_SUBINSN,A_ROPS_2,A_MEMSIZE_8B,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY,A_RET_TYPE), "Deallocate stack frame and return", { fHIDE(size8u_t tmp;);fBRANCH_SPECULATE_STALL(fLSBOLDNOT(fREAD_P0()),, SPECULATE_NOT_TAKEN,4,0); fEA_REG(fREAD_FP()); if (fLSBOLDNOT(fREAD_P0())) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8); fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} }) -Q6INSN(SL2_return_tnew,"if (p0.new) dealloc_return:nt", ATTRIBS(A_JINDIRNEW,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return", +Q6INSN(SL2_return_tnew,"if (p0.new) dealloc_return:nt", ATTRIBS(A_REGWRSIZE_8B,A_JINDIRNEW,A_SUBINSN,A_ROPS_2,A_MEMSIZE_8B,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY,A_RET_TYPE), "Deallocate stack frame and return", { fHIDE(size8u_t tmp;) fBRANCH_SPECULATE_STALL(fLSBNEW0,, SPECULATE_NOT_TAKEN , 4,3); fEA_REG(fREAD_FP()); if (fLSBNEW0) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8); fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} }) -Q6INSN(SL2_return_fnew,"if (!p0.new) dealloc_return:nt", ATTRIBS(A_JINDIRNEW,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return", +Q6INSN(SL2_return_fnew,"if (!p0.new) dealloc_return:nt", ATTRIBS(A_REGWRSIZE_8B,A_JINDIRNEW,A_SUBINSN,A_ROPS_2,A_MEMSIZE_8B,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY,A_RET_TYPE), "Deallocate stack frame and return", { fHIDE(size8u_t tmp;) fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,, SPECULATE_NOT_TAKEN , 4,3); fEA_REG(fREAD_FP()); if (fLSBNEW0NOT) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8); fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} }) -Q6INSN(SL2_jumpr31,"jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIR,A_RESTRICT_SLOT0ONLY),"indirect unconditional jump", +Q6INSN(SL2_jumpr31,"jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIR,A_RESTRICT_SLOT0ONLY,A_RET_TYPE),"indirect unconditional jump", { fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}) -Q6INSN(SL2_jumpr31_t,"if (p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if true", +Q6INSN(SL2_jumpr31_t,"if (p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_NOTE_CONDITIONAL,A_RESTRICT_SLOT0ONLY,A_RET_TYPE),"indirect conditional jump if true", {fBRANCH_SPECULATE_STALL(fLSBOLD(fREAD_P0()),, SPECULATE_TAKEN,4,0); if (fLSBOLD(fREAD_P0())) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}}) -Q6INSN(SL2_jumpr31_f,"if (!p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if false", +Q6INSN(SL2_jumpr31_f,"if (!p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_NOTE_CONDITIONAL,A_RESTRICT_SLOT0ONLY,A_RET_TYPE),"indirect conditional jump if false", {fBRANCH_SPECULATE_STALL(fLSBOLDNOT(fREAD_P0()),, SPECULATE_TAKEN,4,0); if (fLSBOLDNOT(fREAD_P0())) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}}) -Q6INSN(SL2_jumpr31_tnew,"if (p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if true", +Q6INSN(SL2_jumpr31_tnew,"if (p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_NOTE_CONDITIONAL,A_RESTRICT_SLOT0ONLY,A_RET_TYPE),"indirect conditional jump if true", {fBRANCH_SPECULATE_STALL(fLSBNEW0,, SPECULATE_NOT_TAKEN , 4,3); if (fLSBNEW0) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}}) -Q6INSN(SL2_jumpr31_fnew,"if (!p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if false", +Q6INSN(SL2_jumpr31_fnew,"if (!p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_NOTE_CONDITIONAL,A_RESTRICT_SLOT0ONLY,A_RET_TYPE),"indirect conditional jump if false", {fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,, SPECULATE_NOT_TAKEN , 4,3); if (fLSBNEW0NOT) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}}) @@ -134,16 +134,16 @@ Q6INSN(SL2_jumpr31_fnew,"if (!p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNE /* */ /*****************************************************************/ -Q6INSN(SS1_storew_io, "memw(Rs16+#u4:2)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,RtV);}) -Q6INSN(SS1_storeb_io, "memb(Rs16+#u4:0)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,fGETBYTE(0,RtV));}) -Q6INSN(SS2_storeh_io, "memh(Rs16+#u3:1)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store half", {fEA_RI(RsV,uiV); fSTORE(1,2,EA,fGETHALF(0,RtV));}) -Q6INSN(SS2_stored_sp, "memd(r29+#s6:3)=Rtt8", ATTRIBS(A_STORE,A_SUBINSN), "store dword",{fEA_RI(fREAD_SP(),siV); fSTORE(1,8,EA,RttV);}) -Q6INSN(SS2_storew_sp, "memw(r29+#u5:2)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(fREAD_SP(),uiV); fSTORE(1,4,EA,RtV);}) -Q6INSN(SS2_storewi0, "memw(Rs16+#u4:2)=#0", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,0);}) -Q6INSN(SS2_storebi0, "memb(Rs16+#u4:0)=#0", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,0);}) -Q6INSN(SS2_storewi1, "memw(Rs16+#u4:2)=#1", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,1);}) -Q6INSN(SS2_storebi1, "memb(Rs16+#u4:0)=#1", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,1);}) +Q6INSN(SS1_storew_io, "memw(Rs16+#u4:2)=Rt16", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,RtV);}) +Q6INSN(SS1_storeb_io, "memb(Rs16+#u4:0)=Rt16", ATTRIBS(A_MEMSIZE_1B,A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,fGETBYTE(0,RtV));}) +Q6INSN(SS2_storeh_io, "memh(Rs16+#u3:1)=Rt16", ATTRIBS(A_MEMSIZE_2B,A_STORE,A_SUBINSN), "store half", {fEA_RI(RsV,uiV); fSTORE(1,2,EA,fGETHALF(0,RtV));}) +Q6INSN(SS2_stored_sp, "memd(r29+#s6:3)=Rtt8", ATTRIBS(A_REGWRSIZE_8B,A_MEMSIZE_8B,A_STORE,A_SUBINSN), "store dword",{fEA_RI(fREAD_SP(),siV); fSTORE(1,8,EA,RttV);}) +Q6INSN(SS2_storew_sp, "memw(r29+#u5:2)=Rt16", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_STORE,A_SUBINSN), "store word", {fEA_RI(fREAD_SP(),uiV); fSTORE(1,4,EA,RtV);}) +Q6INSN(SS2_storewi0, "memw(Rs16+#u4:2)=#0", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_STORE,A_SUBINSN,A_ROPS_2), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,0);}) +Q6INSN(SS2_storebi0, "memb(Rs16+#u4:0)=#0", ATTRIBS(A_MEMSIZE_1B,A_STORE,A_SUBINSN,A_ROPS_2), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,0);}) +Q6INSN(SS2_storewi1, "memw(Rs16+#u4:2)=#1", ATTRIBS(A_REGWRSIZE_4B,A_MEMSIZE_4B,A_STORE,A_SUBINSN,A_ROPS_2), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,1);}) +Q6INSN(SS2_storebi1, "memb(Rs16+#u4:0)=#1", ATTRIBS(A_MEMSIZE_1B,A_STORE,A_SUBINSN,A_ROPS_2), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,1);}) -Q6INSN(SS2_allocframe,"allocframe(#u5:3)", ATTRIBS(A_SUBINSN,A_STORE,A_RESTRICT_SLOT0ONLY), "Allocate stack frame", +Q6INSN(SS2_allocframe,"allocframe(#u5:3)", ATTRIBS(A_REGWRSIZE_8B,A_SUBINSN,A_MEMSIZE_8B,A_STORE,A_RESTRICT_SLOT0ONLY), "Allocate stack frame", { fEA_RI(fREAD_SP(),-8); fSTORE(1,8,EA,fFRAME_SCRAMBLE((fCAST8_8u(fREAD_LR()) << 32) | fCAST4_4u(fREAD_FP()))); fWRITE_FP(EA); fFRAMECHECK(EA-uiV,EA); fWRITE_SP(EA-uiV); }) diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h index 2e345912a8..aa26389147 100644 --- a/target/hexagon/insn.h +++ b/target/hexagon/insn.h @@ -67,6 +67,9 @@ struct Packet { bool pkt_has_store_s0; bool pkt_has_store_s1; + bool pkt_has_hvx; + Insn *vhist_insn; + Insn insn[INSTRUCTIONS_MAX]; }; diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index 6b20affdfa..b1bfadc3f5 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -18,6 +18,8 @@ #ifndef HEXAGON_INTERNAL_H #define HEXAGON_INTERNAL_H +#include "qemu/log.h" + /* * Change HEX_DEBUG to 1 to turn on debugging output */ @@ -31,6 +33,9 @@ int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +void hexagon_debug_vreg(CPUHexagonState *env, int regnum); +void hexagon_debug_qreg(CPUHexagonState *env, int regnum); void hexagon_debug(CPUHexagonState *env); extern const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS]; diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index 094b8dabb5..c8805bdaeb 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -62,7 +62,7 @@ reg_field_info[FIELD].offset) #define SET_USR_FIELD(FIELD, VAL) \ - fINSERT_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \ + fINSERT_BITS(env->new_value[HEX_REG_USR], reg_field_info[FIELD].width, \ reg_field_info[FIELD].offset, (VAL)) #endif @@ -87,11 +87,28 @@ * * * For qemu, we look for a load in slot 0 when there is a store in slot 1 - * in the same packet. When we see this, we call a helper that merges the - * bytes from the store buffer with the value loaded from memory. + * in the same packet. When we see this, we call a helper that probes the + * load to make sure it doesn't fault. Then, we process the store ahead of + * the actual load. + */ -#define CHECK_NOSHUF \ +#define CHECK_NOSHUF(VA, SIZE) \ do { \ + if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ + probe_noshuf_load(VA, SIZE, ctx->mem_idx); \ + process_store(ctx, pkt, 1); \ + } \ + } while (0) + +#define CHECK_NOSHUF_PRED(GET_EA, SIZE, PRED) \ + do { \ + TCGLabel *label = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, label); \ + GET_EA; \ + if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ + probe_noshuf_load(EA, SIZE, ctx->mem_idx); \ + } \ + gen_set_label(label); \ if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ process_store(ctx, pkt, 1); \ } \ @@ -99,37 +116,37 @@ #define MEM_LOAD1s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 1); \ tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD1u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 1); \ tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD2s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 2); \ tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD2u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 2); \ tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD4s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 4); \ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD4u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 4); \ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD8u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 8); \ tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ } while (0) @@ -139,7 +156,7 @@ __builtin_choose_expr(TYPE_TCGV(X), \ gen_store1, (void)0)) #define MEM_STORE1(VA, DATA, SLOT) \ - MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, SLOT) #define MEM_STORE2_FUNC(X) \ __builtin_choose_expr(TYPE_INT(X), \ @@ -147,7 +164,7 @@ __builtin_choose_expr(TYPE_TCGV(X), \ gen_store2, (void)0)) #define MEM_STORE2(VA, DATA, SLOT) \ - MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, SLOT) #define MEM_STORE4_FUNC(X) \ __builtin_choose_expr(TYPE_INT(X), \ @@ -155,7 +172,7 @@ __builtin_choose_expr(TYPE_TCGV(X), \ gen_store4, (void)0)) #define MEM_STORE4(VA, DATA, SLOT) \ - MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, SLOT) #define MEM_STORE8_FUNC(X) \ __builtin_choose_expr(TYPE_INT(X), \ @@ -163,7 +180,7 @@ __builtin_choose_expr(TYPE_TCGV_I64(X), \ gen_store8, (void)0)) #define MEM_STORE8(VA, DATA, SLOT) \ - MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, SLOT) #else #define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, slot, VA)) #define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, slot, VA)) @@ -187,18 +204,15 @@ #ifdef QEMU_GENERATE static inline void gen_pred_cancel(TCGv pred, int slot_num) { - TCGv slot_mask = tcg_const_tl(1 << slot_num); + TCGv slot_mask = tcg_temp_new(); TCGv tmp = tcg_temp_new(); - TCGv zero = tcg_const_tl(0); - TCGv one = tcg_const_tl(1); - tcg_gen_or_tl(slot_mask, hex_slot_cancelled, slot_mask); + TCGv zero = tcg_constant_tl(0); + tcg_gen_ori_tl(slot_mask, hex_slot_cancelled, 1 << slot_num); tcg_gen_andi_tl(tmp, pred, 1); tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero, slot_mask, hex_slot_cancelled); tcg_temp_free(slot_mask); tcg_temp_free(tmp); - tcg_temp_free(zero); - tcg_temp_free(one); } #define PRED_LOAD_CANCEL(PRED, EA) \ gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot) @@ -269,6 +283,10 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) #define fNEWREG_ST(VAL) (VAL) +#define fVSATUVALN(N, VAL) \ + ({ \ + (((int64_t)(VAL)) < 0) ? 0 : ((1LL << (N)) - 1); \ + }) #define fSATUVALN(N, VAL) \ ({ \ fSET_OVERFLOW(); \ @@ -279,10 +297,16 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) fSET_OVERFLOW(); \ ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \ }) +#define fVSATVALN(N, VAL) \ + ({ \ + ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \ + }) #define fZXTN(N, M, VAL) (((N) != 0) ? extract64((VAL), 0, (N)) : 0LL) #define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL) #define fSATN(N, VAL) \ ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATVALN(N, VAL)) +#define fVSATN(N, VAL) \ + ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATVALN(N, VAL)) #define fADDSAT64(DST, A, B) \ do { \ uint64_t __a = fCAST8u(A); \ @@ -305,12 +329,18 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) DST = __sum; \ } \ } while (0) +#define fVSATUN(N, VAL) \ + ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATUVALN(N, VAL)) #define fSATUN(N, VAL) \ ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATUVALN(N, VAL)) #define fSATH(VAL) (fSATN(16, VAL)) #define fSATUH(VAL) (fSATUN(16, VAL)) +#define fVSATH(VAL) (fVSATN(16, VAL)) +#define fVSATUH(VAL) (fVSATUN(16, VAL)) #define fSATUB(VAL) (fSATUN(8, VAL)) #define fSATB(VAL) (fSATN(8, VAL)) +#define fVSATUB(VAL) (fVSATUN(8, VAL)) +#define fVSATB(VAL) (fVSATN(8, VAL)) #define fIMMEXT(IMM) (IMM = IMM) #define fMUST_IMMEXT(IMM) fIMMEXT(IMM) @@ -417,6 +447,8 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fCAST4s(A) ((int32_t)(A)) #define fCAST8u(A) ((uint64_t)(A)) #define fCAST8s(A) ((int64_t)(A)) +#define fCAST2_2s(A) ((int16_t)(A)) +#define fCAST2_2u(A) ((uint16_t)(A)) #define fCAST4_4s(A) ((int32_t)(A)) #define fCAST4_4u(A) ((uint32_t)(A)) #define fCAST4_8s(A) ((int64_t)((int32_t)(A))) @@ -501,10 +533,9 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fPM_M(REG, MVAL) tcg_gen_add_tl(REG, REG, MVAL) #define fPM_CIRI(REG, IMM, MVAL) \ do { \ - TCGv tcgv_siV = tcg_const_tl(siV); \ + TCGv tcgv_siV = tcg_constant_tl(siV); \ gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \ hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(tcgv_siV); \ } while (0) #else #define fEA_IMM(IMM) do { EA = (IMM); } while (0) @@ -514,7 +545,9 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fPM_M(REG, MVAL) do { REG = REG + (MVAL); } while (0) #endif #define fSCALE(N, A) (((int64_t)(A)) << N) +#define fVSATW(A) fVSATN(32, ((long long)A)) #define fSATW(A) fSATN(32, ((long long)A)) +#define fVSAT(A) fVSATN(32, (A)) #define fSAT(A) fSATN(32, (A)) #define fSAT_ORIG_SHL(A, ORIG_REG) \ ((((int32_t)((fSAT(A)) ^ ((int32_t)(ORIG_REG)))) < 0) \ @@ -651,12 +684,14 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) fSETBIT(j, DST, VAL); \ } \ } while (0) +#define fCOUNTONES_2(VAL) ctpop16(VAL) #define fCOUNTONES_4(VAL) ctpop32(VAL) #define fCOUNTONES_8(VAL) ctpop64(VAL) #define fBREV_8(VAL) revbit64(VAL) #define fBREV_4(VAL) revbit32(VAL) #define fCL1_8(VAL) clo64(VAL) #define fCL1_4(VAL) clo32(VAL) +#define fCL1_2(VAL) (clz32(~(uint16_t)(VAL) & 0xffff) - 16) #define fINTERLEAVE(ODD, EVEN) interleave(ODD, EVEN) #define fDEINTERLEAVE(MIXED) deinterleave(MIXED) #define fHIDE(A) A diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index 6fd9360b74..b61243103f 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -20,6 +20,7 @@ hexagon_ss = ss.source_set() hex_common_py = 'hex_common.py' attribs_def = meson.current_source_dir() / 'attribs_def.h.inc' gen_tcg_h = meson.current_source_dir() / 'gen_tcg.h' +gen_tcg_hvx_h = meson.current_source_dir() / 'gen_tcg_hvx.h' # # Step 1 @@ -63,8 +64,8 @@ helper_protos_generated = custom_target( 'helper_protos_generated.h.inc', output: 'helper_protos_generated.h.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(helper_protos_generated) @@ -72,8 +73,8 @@ tcg_funcs_generated = custom_target( 'tcg_funcs_generated.c.inc', output: 'tcg_funcs_generated.c.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(tcg_funcs_generated) @@ -90,8 +91,8 @@ helper_funcs_generated = custom_target( 'helper_funcs_generated.c.inc', output: 'helper_funcs_generated.c.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(helper_funcs_generated) @@ -156,7 +157,8 @@ dectree_generated = custom_target( 'dectree_generated.h.inc', output: 'dectree_generated.h.inc', depends: [iset_py], - command: ['env', 'PYTHONPATH=' + meson.current_build_dir(), files('dectree.py'), '@OUTPUT@'], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('dectree.py'), '@OUTPUT@'], ) hexagon_ss.add(dectree_generated) @@ -173,6 +175,8 @@ hexagon_ss.add(files( 'printinsn.c', 'arch.c', 'fma_emu.c', + 'mmvec/decode_ext_mmvec.c', + 'mmvec/system_ext_mmvec.c', )) target_arch += {'hexagon': hexagon_ss} diff --git a/target/hexagon/mmvec/decode_ext_mmvec.c b/target/hexagon/mmvec/decode_ext_mmvec.c new file mode 100644 index 0000000000..061a65ab88 --- /dev/null +++ b/target/hexagon/mmvec/decode_ext_mmvec.c @@ -0,0 +1,236 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "decode.h" +#include "opcodes.h" +#include "insn.h" +#include "iclass.h" +#include "mmvec/mmvec.h" +#include "mmvec/decode_ext_mmvec.h" + +static void +check_new_value(Packet *pkt) +{ + /* .new value for a MMVector store */ + int i, j; + const char *reginfo; + const char *destletters; + const char *dststr = NULL; + uint16_t def_opcode; + char letter; + int def_regnum; + + for (i = 1; i < pkt->num_insns; i++) { + uint16_t use_opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(use_opcode, A_DOTNEWVALUE) && + GET_ATTRIB(use_opcode, A_CVI) && + GET_ATTRIB(use_opcode, A_STORE)) { + int use_regidx = strchr(opcode_reginfo[use_opcode], 's') - + opcode_reginfo[use_opcode]; + /* + * What's encoded at the N-field is the offset to who's producing + * the value. + * Shift off the LSB which indicates odd/even register. + */ + int def_off = ((pkt->insn[i].regno[use_regidx]) >> 1); + int def_oreg = pkt->insn[i].regno[use_regidx] & 1; + int def_idx = -1; + for (j = i - 1; (j >= 0) && (def_off >= 0); j--) { + if (!GET_ATTRIB(pkt->insn[j].opcode, A_CVI)) { + continue; + } + def_off--; + if (def_off == 0) { + def_idx = j; + break; + } + } + /* + * Check for a badly encoded N-field which points to an instruction + * out-of-range + */ + g_assert(!((def_off != 0) || (def_idx < 0) || + (def_idx > (pkt->num_insns - 1)))); + + /* def_idx is the index of the producer */ + def_opcode = pkt->insn[def_idx].opcode; + reginfo = opcode_reginfo[def_opcode]; + destletters = "dexy"; + for (j = 0; (letter = destletters[j]) != 0; j++) { + dststr = strchr(reginfo, letter); + if (dststr != NULL) { + break; + } + } + if ((dststr == NULL) && GET_ATTRIB(def_opcode, A_CVI_GATHER)) { + def_regnum = 0; + pkt->insn[i].regno[use_regidx] = def_oreg; + pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot; + } else { + if (dststr == NULL) { + /* still not there, we have a bad packet */ + g_assert_not_reached(); + } + def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; + /* Now patch up the consumer with the register number */ + pkt->insn[i].regno[use_regidx] = def_regnum ^ def_oreg; + /* special case for (Vx,Vy) */ + dststr = strchr(reginfo, 'y'); + if (def_oreg && strchr(reginfo, 'x') && dststr) { + def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; + pkt->insn[i].regno[use_regidx] = def_regnum; + } + /* + * We need to remember who produces this value to later + * check if it was dynamically cancelled + */ + pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot; + } + } + } +} + +/* + * We don't want to reorder slot1/slot0 with respect to each other. + * So in our shuffling, we don't want to move the .cur / .tmp vmem earlier + * Instead, we should move the producing instruction later + * But the producing instruction might feed a .new store! + * So we may need to move that even later. + */ + +static void +decode_mmvec_move_cvi_to_end(Packet *pkt, int max) +{ + int i; + for (i = 0; i < max; i++) { + if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) { + int last_inst = pkt->num_insns - 1; + uint16_t last_opcode = pkt->insn[last_inst].opcode; + + /* + * If the last instruction is an endloop, move to the one before it + * Keep endloop as the last thing always + */ + if ((last_opcode == J2_endloop0) || + (last_opcode == J2_endloop1) || + (last_opcode == J2_endloop01)) { + last_inst--; + } + + decode_send_insn_to(pkt, i, last_inst); + max--; + i--; /* Retry this index now that packet has rotated */ + } + } +} + +static void +decode_shuffle_for_execution_vops(Packet *pkt) +{ + /* + * Sort for .new + */ + int i; + for (i = 0; i < pkt->num_insns; i++) { + uint16_t opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_LOAD) && + (GET_ATTRIB(opcode, A_CVI_NEW) || + GET_ATTRIB(opcode, A_CVI_TMP))) { + /* + * Find prior consuming vector instructions + * Move to end of packet + */ + decode_mmvec_move_cvi_to_end(pkt, i); + break; + } + } + + /* Move HVX new value stores to the end of the packet */ + for (i = 0; i < pkt->num_insns - 1; i++) { + uint16_t opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_STORE) && + GET_ATTRIB(opcode, A_CVI_NEW) && + !GET_ATTRIB(opcode, A_CVI_SCATTER_RELEASE)) { + int last_inst = pkt->num_insns - 1; + uint16_t last_opcode = pkt->insn[last_inst].opcode; + + /* + * If the last instruction is an endloop, move to the one before it + * Keep endloop as the last thing always + */ + if ((last_opcode == J2_endloop0) || + (last_opcode == J2_endloop1) || + (last_opcode == J2_endloop01)) { + last_inst--; + } + + decode_send_insn_to(pkt, i, last_inst); + break; + } + } +} + +static void +check_for_vhist(Packet *pkt) +{ + pkt->vhist_insn = NULL; + for (int i = 0; i < pkt->num_insns; i++) { + Insn *insn = &pkt->insn[i]; + int opcode = insn->opcode; + if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_CVI_4SLOT)) { + pkt->vhist_insn = insn; + return; + } + } +} + +/* + * Public Functions + */ + +SlotMask mmvec_ext_decode_find_iclass_slots(int opcode) +{ + if (GET_ATTRIB(opcode, A_CVI_VM)) { + /* HVX memory instruction */ + if (GET_ATTRIB(opcode, A_RESTRICT_SLOT0ONLY)) { + return SLOTS_0; + } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT1ONLY)) { + return SLOTS_1; + } + return SLOTS_01; + } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT2ONLY)) { + return SLOTS_2; + } else if (GET_ATTRIB(opcode, A_CVI_VX)) { + /* HVX multiply instruction */ + return SLOTS_23; + } else if (GET_ATTRIB(opcode, A_CVI_VS_VX)) { + /* HVX permute/shift instruction */ + return SLOTS_23; + } else { + return SLOTS_0123; + } +} + +void mmvec_ext_decode_checks(Packet *pkt, bool disas_only) +{ + check_new_value(pkt); + if (!disas_only) { + decode_shuffle_for_execution_vops(pkt); + } + check_for_vhist(pkt); +} diff --git a/target/hexagon/mmvec/decode_ext_mmvec.h b/target/hexagon/mmvec/decode_ext_mmvec.h new file mode 100644 index 0000000000..3664b68cf9 --- /dev/null +++ b/target/hexagon/mmvec/decode_ext_mmvec.h @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_DECODE_EXT_MMVEC_H +#define HEXAGON_DECODE_EXT_MMVEC_H + +void mmvec_ext_decode_checks(Packet *pkt, bool disas_only); +SlotMask mmvec_ext_decode_find_iclass_slots(int opcode); + +#endif diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h new file mode 100644 index 0000000000..8345753580 --- /dev/null +++ b/target/hexagon/mmvec/macros.h @@ -0,0 +1,350 @@ +/* + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_MMVEC_MACROS_H +#define HEXAGON_MMVEC_MACROS_H + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "arch.h" +#include "mmvec/system_ext_mmvec.h" + +#ifndef QEMU_GENERATE +#define VdV (*(MMVector *)(VdV_void)) +#define VsV (*(MMVector *)(VsV_void)) +#define VuV (*(MMVector *)(VuV_void)) +#define VvV (*(MMVector *)(VvV_void)) +#define VwV (*(MMVector *)(VwV_void)) +#define VxV (*(MMVector *)(VxV_void)) +#define VyV (*(MMVector *)(VyV_void)) + +#define VddV (*(MMVectorPair *)(VddV_void)) +#define VuuV (*(MMVectorPair *)(VuuV_void)) +#define VvvV (*(MMVectorPair *)(VvvV_void)) +#define VxxV (*(MMVectorPair *)(VxxV_void)) + +#define QeV (*(MMQReg *)(QeV_void)) +#define QdV (*(MMQReg *)(QdV_void)) +#define QsV (*(MMQReg *)(QsV_void)) +#define QtV (*(MMQReg *)(QtV_void)) +#define QuV (*(MMQReg *)(QuV_void)) +#define QvV (*(MMQReg *)(QvV_void)) +#define QxV (*(MMQReg *)(QxV_void)) +#endif + +#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \ + do { \ + env->vtcm_log.data.ub[IDX] = (VAL); \ + if (MASK) { \ + set_bit((IDX), env->vtcm_log.mask); \ + } else { \ + clear_bit((IDX), env->vtcm_log.mask); \ + } \ + env->vtcm_log.va[IDX] = (VA); \ + } while (0) + +#define fNOTQ(VAL) \ + ({ \ + MMQReg _ret; \ + int _i_; \ + for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \ + _ret.ud[_i_] = ~VAL.ud[_i_]; \ + } \ + _ret;\ + }) +#define fGETQBITS(REG, WIDTH, MASK, BITNO) \ + ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f))) +#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO) +#define fGENMASKW(QREG, IDX) \ + (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0) | \ + ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8) | \ + ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \ + ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24)) +#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF)) +#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3)) +#define fGETCRUMB_SYMMETRIC(IDX, SRC) \ + ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \ + : fGETCRUMB(IDX, SRC))) +#define fGENMASKH(QREG, IDX) \ + (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \ + ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8)) +#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX)) +#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX)) +#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \ + (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL)) +#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \ + ((fGENMASKH(QREG, IDX) & (YESVAL)) | \ + (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL))) +#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \ + ((fGENMASKW(QREG, IDX) & (YESVAL)) | \ + (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL))) +#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \ + do { \ + uint32_t __TMP = (VAL); \ + REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \ + REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \ + } while (0) +#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL) +#define fVBYTES() (fVECSIZE()) +#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1)) +#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1)) +#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH) +#define fVECLOGSIZE() (7) +#define fVECSIZE() (1 << fVECLOGSIZE()) +#define fSWAPB(A, B) do { uint8_t tmp = A; A = B; B = tmp; } while (0) +#define fV_AL_CHECK(EA, MASK) \ + if ((EA) & (MASK)) { \ + warn("aligning misaligned vector. EA=%08x", (EA)); \ + } +#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \ + mem_vector_scatter_init(env) +#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \ + mem_vector_gather_init(env) +#define fSCATTER_FINISH(OP) +#define fGATHER_FINISH() +#define fLOG_SCATTER_OP(SIZE) \ + do { \ + env->vtcm_log.op = true; \ + env->vtcm_log.op_size = SIZE; \ + } while (0) +#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 4; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \ + 4 * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 2; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \ + 2 * IDX + i0); \ + } \ + } while (0) + +#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \ + ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 2; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \ + 2 * IDX + i0); \ + } \ + } while (0) + +/* NOTE - Will this always be tmp_VRegs[0]; */ +#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \ + do { \ + int i0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + uintptr_t ra = GETPC(); \ + int log_byte = 0; \ + for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \ + log_byte = ((va + i0) <= va_high) && QVAL; \ + uint8_t B; \ + B = cpu_ldub_data_ra(env, EA + i0, ra); \ + env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \ + LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \ + fGETQBIT(QsV, 4 * IDX + i0)); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \ + fGETQBIT(QsV, 2 * IDX + i0)); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \ + fGETQBIT(QsV, 2 * IDX + i0)); \ + } while (0) +#define SCATTER_OP_WRITE_TO_MEM(TYPE) \ + do { \ + uintptr_t ra = GETPC(); \ + for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \ + if (test_bit(i, env->vtcm_log.mask)) { \ + TYPE dst = 0; \ + TYPE inc = 0; \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + uint8_t val; \ + val = cpu_ldub_data_ra(env, env->vtcm_log.va[i + j], ra); \ + dst |= val << (8 * j); \ + inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \ + clear_bit(j + i, env->vtcm_log.mask); \ + env->vtcm_log.data.ub[j + i] = 0; \ + } \ + dst += inc; \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + cpu_stb_data_ra(env, env->vtcm_log.va[i + j], \ + (dst >> (8 * j)) & 0xFF, ra); \ + } \ + } \ + } \ + } while (0) +#define SCATTER_OP_PROBE_MEM(TYPE, MMU_IDX, RETADDR) \ + do { \ + for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \ + if (test_bit(i, env->vtcm_log.mask)) { \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + probe_read(env, env->vtcm_log.va[i + j], 1, \ + MMU_IDX, RETADDR); \ + probe_write(env, env->vtcm_log.va[i + j], 1, \ + MMU_IDX, RETADDR); \ + } \ + } \ + } \ + } while (0) +#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \ + do { \ + int i0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + int log_byte = 0; \ + for (i0 = 0; i0 < ELEM_SIZE; i0++) { \ + log_byte = ((va + i0) <= va_high) && QVAL; \ + LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \ + ELEM_SIZE * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \ + } while (0) +#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \ + fGETQBIT(QsV, 2 * IDX + i0), IN); \ + } while (0) +#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \ + fGETQBIT(QsV, 4 * IDX + i0), IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \ + (2 * IDX2 + IDX_H), 1, IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \ + fGETQBIT(QsV, 2 * IDX + i0), IN); \ + } while (0) +#define fSTORERELEASE(EA, TYPE) \ + do { \ + fV_AL_CHECK(EA, fVECSIZE() - 1); \ + } while (0) +#ifdef QEMU_GENERATE +#define fLOADMMV(EA, DST) gen_vreg_load(ctx, DST##_off, EA, true) +#endif +#ifdef QEMU_GENERATE +#define fLOADMMVU(EA, DST) gen_vreg_load(ctx, DST##_off, EA, false) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMV(EA, SRC) \ + gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, true) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVQ(EA, SRC, MASK) \ + gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, false) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVNQ(EA, SRC, MASK) \ + gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, true) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVU(EA, SRC) \ + gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, false) +#endif +#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++) +#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \ + ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \ + (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))] + +#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V))) +#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31)) +#define fVUADDSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) +#define fVSADDSAT(WIDTH, U, V) \ + fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) +#define fVUSUBSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) +#define fVSSUBSAT(WIDTH, U, V) \ + fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) +#define fVAVGU(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVAVGURND(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGU(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVNAVGURNDSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \ + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)) +#define fVAVGS(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVAVGSRND(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGS(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVNAVGSRND(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGSRNDSAT(WIDTH, U, V) \ + fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \ + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)) +#define fVNOROUND(VAL, SHAMT) VAL +#define fVNOSAT(VAL) VAL +#define fVROUND(VAL, SHAMT) \ + ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0)) +#define fCARRY_FROM_ADD32(A, B, C) \ + (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1) +#define fUARCH_NOTE_PUMP_4X() +#define fUARCH_NOTE_PUMP_2X() + +#define IV1DEAD() +#endif diff --git a/target/hexagon/mmvec/mmvec.h b/target/hexagon/mmvec/mmvec.h new file mode 100644 index 0000000000..52d470709c --- /dev/null +++ b/target/hexagon/mmvec/mmvec.h @@ -0,0 +1,82 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_MMVEC_H +#define HEXAGON_MMVEC_H + +#define MAX_VEC_SIZE_LOGBYTES 7 +#define MAX_VEC_SIZE_BYTES (1 << MAX_VEC_SIZE_LOGBYTES) + +#define NUM_VREGS 32 +#define NUM_QREGS 4 + +typedef uint32_t VRegMask; /* at least NUM_VREGS bits */ +typedef uint32_t QRegMask; /* at least NUM_QREGS bits */ + +#define VECTOR_SIZE_BYTE (fVECSIZE()) + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; + int32_t w[MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; + int16_t h[MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; + int8_t b[MAX_VEC_SIZE_BYTES / 1]; +} MMVector; + +typedef union { + uint64_t ud[2 * MAX_VEC_SIZE_BYTES / 8]; + int64_t d[2 * MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[2 * MAX_VEC_SIZE_BYTES / 4]; + int32_t w[2 * MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[2 * MAX_VEC_SIZE_BYTES / 2]; + int16_t h[2 * MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[2 * MAX_VEC_SIZE_BYTES / 1]; + int8_t b[2 * MAX_VEC_SIZE_BYTES / 1]; + MMVector v[2]; +} MMVectorPair; + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8 / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8 / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4 / 8]; + int32_t w[MAX_VEC_SIZE_BYTES / 4 / 8]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2 / 8]; + int16_t h[MAX_VEC_SIZE_BYTES / 2 / 8]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1 / 8]; + int8_t b[MAX_VEC_SIZE_BYTES / 1 / 8]; +} MMQReg; + +typedef struct { + MMVector data; + DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES); + target_ulong va[MAX_VEC_SIZE_BYTES]; + bool op; + int op_size; +} VTCMStoreLog; + + +/* Types of vector register assignment */ +typedef enum { + EXT_DFL, /* Default */ + EXT_NEW, /* New - value used in the same packet */ + EXT_TMP /* Temp - value used but not stored to register */ +} VRegWriteType; + +#endif diff --git a/target/hexagon/mmvec/system_ext_mmvec.c b/target/hexagon/mmvec/system_ext_mmvec.c new file mode 100644 index 0000000000..8351f2cc01 --- /dev/null +++ b/target/hexagon/mmvec/system_ext_mmvec.c @@ -0,0 +1,47 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "mmvec/system_ext_mmvec.h" + +void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot) +{ + size_t size = sizeof(MMVector); + + env->vstore_pending[slot] = 1; + env->vstore[slot].va = vaddr; + env->vstore[slot].size = size; + memcpy(&env->vstore[slot].data.ub[0], &env->tmp_VRegs[0], size); + + /* On a gather store, overwrite the store mask to emulate dropped gathers */ + bitmap_copy(env->vstore[slot].mask, env->vtcm_log.mask, size); +} + +void mem_vector_scatter_init(CPUHexagonState *env) +{ + bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES); + + env->vtcm_pending = true; + env->vtcm_log.op = false; + env->vtcm_log.op_size = 0; +} + +void mem_vector_gather_init(CPUHexagonState *env) +{ + bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES); +} diff --git a/target/hexagon/mmvec/system_ext_mmvec.h b/target/hexagon/mmvec/system_ext_mmvec.h new file mode 100644 index 0000000000..bcefbffdf2 --- /dev/null +++ b/target/hexagon/mmvec/system_ext_mmvec.h @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_SYSTEM_EXT_MMVEC_H +#define HEXAGON_SYSTEM_EXT_MMVEC_H + +void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot); +void mem_vector_scatter_init(CPUHexagonState *env); +void mem_vector_gather_init(CPUHexagonState *env); + +#endif diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 61d5cde939..085afc3274 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,14 +27,17 @@ #include "arch.h" #include "hex_arch_types.h" #include "fma_emu.h" +#include "mmvec/mmvec.h" +#include "mmvec/macros.h" #define SF_BIAS 127 #define SF_MANTBITS 23 /* Exceptions processing helpers */ -static void QEMU_NORETURN do_raise_exception_err(CPUHexagonState *env, - uint32_t exception, - uintptr_t pc) +static G_NORETURN +void do_raise_exception_err(CPUHexagonState *env, + uint32_t exception, + uintptr_t pc) { CPUState *cs = env_cpu(env); qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); @@ -42,7 +45,7 @@ static void QEMU_NORETURN do_raise_exception_err(CPUHexagonState *env, cpu_loop_exit_restore(cs, pc); } -void QEMU_NORETURN HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp) +G_NORETURN void HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp) { do_raise_exception_err(env, excp, 0); } @@ -164,6 +167,57 @@ void HELPER(commit_store)(CPUHexagonState *env, int slot_num) } } +void HELPER(gather_store)(CPUHexagonState *env, uint32_t addr, int slot) +{ + mem_gather_store(env, addr, slot); +} + +void HELPER(commit_hvx_stores)(CPUHexagonState *env) +{ + uintptr_t ra = GETPC(); + int i; + + /* Normal (possibly masked) vector store */ + for (i = 0; i < VSTORES_MAX; i++) { + if (env->vstore_pending[i]) { + env->vstore_pending[i] = 0; + target_ulong va = env->vstore[i].va; + int size = env->vstore[i].size; + for (int j = 0; j < size; j++) { + if (test_bit(j, env->vstore[i].mask)) { + cpu_stb_data_ra(env, va + j, env->vstore[i].data.ub[j], ra); + } + } + } + } + + /* Scatter store */ + if (env->vtcm_pending) { + env->vtcm_pending = false; + if (env->vtcm_log.op) { + /* Need to perform the scatter read/modify/write at commit time */ + if (env->vtcm_log.op_size == 2) { + SCATTER_OP_WRITE_TO_MEM(uint16_t); + } else if (env->vtcm_log.op_size == 4) { + /* Word Scatter += */ + SCATTER_OP_WRITE_TO_MEM(uint32_t); + } else { + g_assert_not_reached(); + } + } else { + for (i = 0; i < sizeof(MMVector); i++) { + if (test_bit(i, env->vtcm_log.mask)) { + cpu_stb_data_ra(env, env->vtcm_log.va[i], + env->vtcm_log.data.ub[i], ra); + clear_bit(i, env->vtcm_log.mask); + env->vtcm_log.data.ub[i] = 0; + } + + } + } + } +} + static void print_store(CPUHexagonState *env, int slot) { if (!(env->slot_cancelled & (1 << slot))) { @@ -242,16 +296,17 @@ void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1) HEX_DEBUG_LOG("Next PC = " TARGET_FMT_lx "\n", env->next_PC); HEX_DEBUG_LOG("Exec counters: pkt = " TARGET_FMT_lx ", insn = " TARGET_FMT_lx - "\n", + ", hvx = " TARGET_FMT_lx "\n", env->gpr[HEX_REG_QEMU_PKT_CNT], - env->gpr[HEX_REG_QEMU_INSN_CNT]); + env->gpr[HEX_REG_QEMU_INSN_CNT], + env->gpr[HEX_REG_QEMU_HVX_CNT]); } int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS) { - int32_t K_const = sextract32(M, 24, 4); - int32_t length = sextract32(M, 0, 17); + uint32_t K_const = extract32(M, 24, 4); + uint32_t length = extract32(M, 0, 17); uint32_t new_ptr = RxV + offset; uint32_t start_addr; uint32_t end_addr; @@ -377,6 +432,92 @@ int32_t HELPER(vacsh_pred)(CPUHexagonState *env, return PeV; } +static void probe_store(CPUHexagonState *env, int slot, int mmu_idx) +{ + if (!(env->slot_cancelled & (1 << slot))) { + size1u_t width = env->mem_log_stores[slot].width; + target_ulong va = env->mem_log_stores[slot].va; + uintptr_t ra = GETPC(); + probe_write(env, va, width, mmu_idx, ra); + } +} + +/* + * Called from a mem_noshuf packet to make sure the load doesn't + * raise an exception + */ +void HELPER(probe_noshuf_load)(CPUHexagonState *env, target_ulong va, + int size, int mmu_idx) +{ + uintptr_t retaddr = GETPC(); + probe_read(env, va, size, mmu_idx, retaddr); +} + +/* Called during packet commit when there are two scalar stores */ +void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx) +{ + probe_store(env, 0, mmu_idx); +} + +void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx) +{ + uintptr_t retaddr = GETPC(); + int i; + + /* Normal (possibly masked) vector store */ + for (i = 0; i < VSTORES_MAX; i++) { + if (env->vstore_pending[i]) { + target_ulong va = env->vstore[i].va; + int size = env->vstore[i].size; + for (int j = 0; j < size; j++) { + if (test_bit(j, env->vstore[i].mask)) { + probe_write(env, va + j, 1, mmu_idx, retaddr); + } + } + } + } + + /* Scatter store */ + if (env->vtcm_pending) { + if (env->vtcm_log.op) { + /* Need to perform the scatter read/modify/write at commit time */ + if (env->vtcm_log.op_size == 2) { + SCATTER_OP_PROBE_MEM(size2u_t, mmu_idx, retaddr); + } else if (env->vtcm_log.op_size == 4) { + /* Word Scatter += */ + SCATTER_OP_PROBE_MEM(size4u_t, mmu_idx, retaddr); + } else { + g_assert_not_reached(); + } + } else { + for (int i = 0; i < sizeof(MMVector); i++) { + if (test_bit(i, env->vtcm_log.mask)) { + probe_write(env, env->vtcm_log.va[i], 1, mmu_idx, retaddr); + } + + } + } + } +} + +void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask, + int mmu_idx) +{ + bool has_st0 = (mask >> 0) & 1; + bool has_st1 = (mask >> 1) & 1; + bool has_hvx_stores = (mask >> 2) & 1; + + if (has_st0) { + probe_store(env, 0, mmu_idx); + } + if (has_st1) { + probe_store(env, 1, mmu_idx); + } + if (has_hvx_stores) { + HELPER(probe_hvx_stores)(env, mmu_idx); + } +} + /* * mem_noshuf * Section 5.5 of the Hexagon V67 Programmer's Reference Manual @@ -384,10 +525,12 @@ int32_t HELPER(vacsh_pred)(CPUHexagonState *env, * If the load is in slot 0 and there is a store in slot1 (that * wasn't cancelled), we have to do the store first. */ -static void check_noshuf(CPUHexagonState *env, uint32_t slot) +static void check_noshuf(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr, int size) { if (slot == 0 && env->pkt_has_store_s1 && ((env->slot_cancelled & (1 << 1)) == 0)) { + HELPER(probe_noshuf_load)(env, vaddr, size, MMU_USER_IDX); HELPER(commit_store)(env, 1); } } @@ -396,7 +539,7 @@ static uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 1); return cpu_ldub_data_ra(env, vaddr, ra); } @@ -404,7 +547,7 @@ static uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 2); return cpu_lduw_data_ra(env, vaddr, ra); } @@ -412,7 +555,7 @@ static uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 4); return cpu_ldl_data_ra(env, vaddr, ra); } @@ -420,7 +563,7 @@ static uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 8); return cpu_ldq_data_ra(env, vaddr, ra); } @@ -700,7 +843,7 @@ uint32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV) uint32_t RdV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float64_is_neg(RssV) && !float32_is_any_nan(RssV)) { + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { float_raise(float_flag_invalid, &env->fp_status); RdV = 0; } else { @@ -809,8 +952,7 @@ int32_t HELPER(sfcmpuo)(CPUHexagonState *env, float32 RsV, float32 RtV) { int32_t PdV; arch_fpop_start(env); - PdV = f8BITSOF(float32_is_any_nan(RsV) || - float32_is_any_nan(RtV)); + PdV = f8BITSOF(float32_unordered_quiet(RsV, RtV, &env->fp_status)); arch_fpop_end(env); return PdV; } @@ -819,7 +961,7 @@ float32 HELPER(sfmax)(CPUHexagonState *env, float32 RsV, float32 RtV) { float32 RdV; arch_fpop_start(env); - RdV = float32_maxnum(RsV, RtV, &env->fp_status); + RdV = float32_maximum_number(RsV, RtV, &env->fp_status); arch_fpop_end(env); return RdV; } @@ -828,7 +970,7 @@ float32 HELPER(sfmin)(CPUHexagonState *env, float32 RsV, float32 RtV) { float32 RdV; arch_fpop_start(env); - RdV = float32_minnum(RsV, RtV, &env->fp_status); + RdV = float32_minimum_number(RsV, RtV, &env->fp_status); arch_fpop_end(env); return RdV; } @@ -912,10 +1054,7 @@ float64 HELPER(dfmax)(CPUHexagonState *env, float64 RssV, float64 RttV) { float64 RddV; arch_fpop_start(env); - RddV = float64_maxnum(RssV, RttV, &env->fp_status); - if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) { - float_raise(float_flag_invalid, &env->fp_status); - } + RddV = float64_maximum_number(RssV, RttV, &env->fp_status); arch_fpop_end(env); return RddV; } @@ -924,10 +1063,7 @@ float64 HELPER(dfmin)(CPUHexagonState *env, float64 RssV, float64 RttV) { float64 RddV; arch_fpop_start(env); - RddV = float64_minnum(RssV, RttV, &env->fp_status); - if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) { - float_raise(float_flag_invalid, &env->fp_status); - } + RddV = float64_minimum_number(RssV, RttV, &env->fp_status); arch_fpop_end(env); return RddV; } @@ -968,8 +1104,7 @@ int32_t HELPER(dfcmpuo)(CPUHexagonState *env, float64 RssV, float64 RttV) { int32_t PdV; arch_fpop_start(env); - PdV = f8BITSOF(float64_is_any_nan(RssV) || - float64_is_any_nan(RttV)); + PdV = f8BITSOF(float64_unordered_quiet(RssV, RttV, &env->fp_status)); arch_fpop_end(env); return PdV; } @@ -1165,6 +1300,171 @@ float64 HELPER(dfmpyhh)(CPUHexagonState *env, float64 RxxV, return RxxV; } +/* Histogram instructions */ + +void HELPER(vhist)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int lane = 0; lane < 8; lane++) { + for (int i = 0; i < sizeof(MMVector) / 8; ++i) { + unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i]; + unsigned char regno = value >> 3; + unsigned char element = value & 7; + + env->VRegs[regno].uh[(sizeof(MMVector) / 16) * lane + element]++; + } + } +} + +void HELPER(vhistq)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int lane = 0; lane < 8; lane++) { + for (int i = 0; i < sizeof(MMVector) / 8; ++i) { + unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i]; + unsigned char regno = value >> 3; + unsigned char element = value & 7; + + if (fGETQBIT(env->qtmp, sizeof(MMVector) / 8 * lane + i)) { + env->VRegs[regno].uh[ + (sizeof(MMVector) / 16) * lane + element]++; + } + } + } +} + +void HELPER(vwhist256)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + env->VRegs[vindex].uh[elindex] = + env->VRegs[vindex].uh[elindex] + weight; + } +} + +void HELPER(vwhist256q)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uh[elindex] = + env->VRegs[vindex].uh[elindex] + weight; + } + } +} + +void HELPER(vwhist256_sat)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + env->VRegs[vindex].uh[elindex] = + fVSATUH(env->VRegs[vindex].uh[elindex] + weight); + } +} + +void HELPER(vwhist256q_sat)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uh[elindex] = + fVSATUH(env->VRegs[vindex].uh[elindex] + weight); + } + } +} + +void HELPER(vwhist128)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } +} + +void HELPER(vwhist128q)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + +void HELPER(vwhist128m)(CPUHexagonState *env, int32_t uiV) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if ((bucket & 1) == uiV) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + +void HELPER(vwhist128qm)(CPUHexagonState *env, int32_t uiV) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if (((bucket & 1) == uiV) && fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + static void cancel_slot(CPUHexagonState *env, uint32_t slot) { HEX_DEBUG_LOG("Slot %d cancelled\n", slot); diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 54fdcaa5e8..2329177537 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "exec/cpu_ldst.h" #include "exec/log.h" #include "internal.h" @@ -47,16 +48,63 @@ TCGv hex_dczero_addr; TCGv hex_llsc_addr; TCGv hex_llsc_val; TCGv_i64 hex_llsc_val_i64; +TCGv hex_VRegs_updated; +TCGv hex_QRegs_updated; +TCGv hex_vstore_addr[VSTORES_MAX]; +TCGv hex_vstore_size[VSTORES_MAX]; +TCGv hex_vstore_pending[VSTORES_MAX]; static const char * const hexagon_prednames[] = { "p0", "p1", "p2", "p3" }; +intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok) +{ + intptr_t offset; + + /* See if it is already allocated */ + for (int i = 0; i < ctx->future_vregs_idx; i++) { + if (ctx->future_vregs_num[i] == regnum) { + return offsetof(CPUHexagonState, future_VRegs[i]); + } + } + + g_assert(alloc_ok); + offset = offsetof(CPUHexagonState, future_VRegs[ctx->future_vregs_idx]); + for (int i = 0; i < num; i++) { + ctx->future_vregs_num[ctx->future_vregs_idx + i] = regnum++; + } + ctx->future_vregs_idx += num; + g_assert(ctx->future_vregs_idx <= VECTOR_TEMPS_MAX); + return offset; +} + +intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok) +{ + intptr_t offset; + + /* See if it is already allocated */ + for (int i = 0; i < ctx->tmp_vregs_idx; i++) { + if (ctx->tmp_vregs_num[i] == regnum) { + return offsetof(CPUHexagonState, tmp_VRegs[i]); + } + } + + g_assert(alloc_ok); + offset = offsetof(CPUHexagonState, tmp_VRegs[ctx->tmp_vregs_idx]); + for (int i = 0; i < num; i++) { + ctx->tmp_vregs_num[ctx->tmp_vregs_idx + i] = regnum++; + } + ctx->tmp_vregs_idx += num; + g_assert(ctx->tmp_vregs_idx <= VECTOR_TEMPS_MAX); + return offset; +} + static void gen_exception_raw(int excp) { - TCGv_i32 helper_tmp = tcg_const_i32(excp); - gen_helper_raise_exception(cpu_env, helper_tmp); - tcg_temp_free_i32(helper_tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp)); } static void gen_exec_counters(DisasContext *ctx) @@ -65,17 +113,15 @@ static void gen_exec_counters(DisasContext *ctx) hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets); tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT], hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); + tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_HVX_CNT], + hex_gpr[HEX_REG_QEMU_HVX_CNT], ctx->num_hvx_insns); } static void gen_end_tb(DisasContext *ctx) { gen_exec_counters(ctx); tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC); - if (ctx->base.singlestep_enabled) { - gen_exception_raw(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -112,7 +158,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t)); for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) { words[nwords] = - translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t)); + translator_ldl(env, &ctx->base, + ctx->base.pc_next + nwords * sizeof(uint32_t)); found_end = is_packet_end(words[nwords]); } if (!found_end) { @@ -172,11 +219,19 @@ static void gen_start_packet(DisasContext *ctx, Packet *pkt) bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS); ctx->preg_log_idx = 0; bitmap_zero(ctx->pregs_written, NUM_PREGS); + ctx->future_vregs_idx = 0; + ctx->tmp_vregs_idx = 0; + ctx->vreg_log_idx = 0; + bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS); + bitmap_zero(ctx->vregs_updated, NUM_VREGS); + bitmap_zero(ctx->vregs_select, NUM_VREGS); + ctx->qreg_log_idx = 0; for (i = 0; i < STORES_MAX; i++) { ctx->store_width[i] = 0; } tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1); ctx->s1_store_processed = false; + ctx->pre_commit = true; if (HEX_DEBUG) { /* Handy place to set a breakpoint before the packet executes */ @@ -198,6 +253,26 @@ static void gen_start_packet(DisasContext *ctx, Packet *pkt) if (need_pred_written(pkt)) { tcg_gen_movi_tl(hex_pred_written, 0); } + + if (pkt->pkt_has_hvx) { + tcg_gen_movi_tl(hex_VRegs_updated, 0); + tcg_gen_movi_tl(hex_QRegs_updated, 0); + } +} + +bool is_gather_store_insn(Insn *insn, Packet *pkt) +{ + if (GET_ATTRIB(insn->opcode, A_CVI_NEW) && + insn->new_value_producer_slot == 1) { + /* Look for gather instruction */ + for (int i = 0; i < pkt->num_insns; i++) { + Insn *in = &pkt->insn[i]; + if (GET_ATTRIB(in->opcode, A_CVI_GATHER) && in->slot == 1) { + return true; + } + } + } + return false; } /* @@ -209,7 +284,12 @@ static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn, int attrib, int rnum) { if (GET_ATTRIB(insn->opcode, attrib)) { - bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC); + /* + * USR is used to set overflow and FP exceptions, + * so treat it as conditional + */ + bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC) || + rnum == HEX_REG_USR; if (is_predicated && !is_preloaded(ctx, rnum)) { tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]); } @@ -235,6 +315,8 @@ static void mark_implicit_reg_writes(DisasContext *ctx, Insn *insn) mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1); + mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_USR, HEX_REG_USR); + mark_implicit_reg_write(ctx, insn, A_FPOP, HEX_REG_USR); } static void mark_implicit_pred_writes(DisasContext *ctx, Insn *insn) @@ -245,6 +327,30 @@ static void mark_implicit_pred_writes(DisasContext *ctx, Insn *insn) mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P3, 3); } +static void mark_store_width(DisasContext *ctx, Insn *insn) +{ + uint16_t opcode = insn->opcode; + uint32_t slot = insn->slot; + uint8_t width = 0; + + if (GET_ATTRIB(opcode, A_SCALAR_STORE)) { + if (GET_ATTRIB(opcode, A_MEMSIZE_1B)) { + width |= 1; + } + if (GET_ATTRIB(opcode, A_MEMSIZE_2B)) { + width |= 2; + } + if (GET_ATTRIB(opcode, A_MEMSIZE_4B)) { + width |= 4; + } + if (GET_ATTRIB(opcode, A_MEMSIZE_8B)) { + width |= 8; + } + tcg_debug_assert(is_power_of_2(width)); + ctx->store_width[slot] = width; + } +} + static void gen_insn(CPUHexagonState *env, DisasContext *ctx, Insn *insn, Packet *pkt) { @@ -252,6 +358,7 @@ static void gen_insn(CPUHexagonState *env, DisasContext *ctx, mark_implicit_reg_writes(ctx, insn); insn->generate(env, ctx, insn, pkt); mark_implicit_pred_writes(ctx, insn); + mark_store_width(ctx, insn); } else { gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE); } @@ -287,7 +394,7 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) * write of the predicates. */ if (pkt->pkt_has_endloop) { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv pred_written = tcg_temp_new(); for (i = 0; i < ctx->preg_log_idx; i++) { int pred_num = ctx->preg_log[i]; @@ -298,7 +405,6 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) hex_new_pred_value[pred_num], hex_pred[pred_num]); } - tcg_temp_free(zero); tcg_temp_free(pred_written); } else { for (i = 0; i < ctx->preg_log_idx; i++) { @@ -316,11 +422,9 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) static void gen_check_store_width(DisasContext *ctx, int slot_num) { if (HEX_DEBUG) { - TCGv slot = tcg_const_tl(slot_num); - TCGv check = tcg_const_tl(ctx->store_width[slot_num]); + TCGv slot = tcg_constant_tl(slot_num); + TCGv check = tcg_constant_tl(ctx->store_width[slot_num]); gen_helper_debug_check_store_width(cpu_env, slot, check); - tcg_temp_free(slot); - tcg_temp_free(check); } } @@ -402,9 +506,8 @@ void process_store(DisasContext *ctx, Packet *pkt, int slot_num) * TCG generation time, we'll use a helper to * avoid branching based on the width at runtime. */ - TCGv slot = tcg_const_tl(slot_num); + TCGv slot = tcg_constant_tl(slot_num); gen_helper_commit_store(cpu_env, slot); - tcg_temp_free(slot); } } tcg_temp_free(address); @@ -418,13 +521,15 @@ static void process_store_log(DisasContext *ctx, Packet *pkt) { /* * When a packet has two stores, the hardware processes - * slot 1 and then slot 2. This will be important when + * slot 1 and then slot 0. This will be important when * the memory accesses overlap. */ - if (pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa) { + if (pkt->pkt_has_store_s1) { + g_assert(!pkt->pkt_has_dczeroa); process_store(ctx, pkt, 1); } - if (pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa) { + if (pkt->pkt_has_store_s0) { + g_assert(!pkt->pkt_has_dczeroa); process_store(ctx, pkt, 0); } } @@ -435,7 +540,7 @@ static void process_dczeroa(DisasContext *ctx, Packet *pkt) if (pkt->pkt_has_dczeroa) { /* Store 32 bytes of zero starting at (addr & ~0x1f) */ TCGv addr = tcg_temp_new(); - TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 zero = tcg_constant_i64(0); tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); @@ -447,7 +552,93 @@ static void process_dczeroa(DisasContext *ctx, Packet *pkt) tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_temp_free(addr); - tcg_temp_free_i64(zero); + } +} + +static bool pkt_has_hvx_store(Packet *pkt) +{ + int i; + for (i = 0; i < pkt->num_insns; i++) { + int opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_STORE)) { + return true; + } + } + return false; +} + +static void gen_commit_hvx(DisasContext *ctx, Packet *pkt) +{ + int i; + + /* + * for (i = 0; i < ctx->vreg_log_idx; i++) { + * int rnum = ctx->vreg_log[i]; + * if (ctx->vreg_is_predicated[i]) { + * if (env->VRegs_updated & (1 << rnum)) { + * env->VRegs[rnum] = env->future_VRegs[rnum]; + * } + * } else { + * env->VRegs[rnum] = env->future_VRegs[rnum]; + * } + * } + */ + for (i = 0; i < ctx->vreg_log_idx; i++) { + int rnum = ctx->vreg_log[i]; + bool is_predicated = ctx->vreg_is_predicated[i]; + intptr_t dstoff = offsetof(CPUHexagonState, VRegs[rnum]); + intptr_t srcoff = ctx_future_vreg_off(ctx, rnum, 1, false); + size_t size = sizeof(MMVector); + + if (is_predicated) { + TCGv cmp = tcg_temp_new(); + TCGLabel *label_skip = gen_new_label(); + + tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum); + tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); + tcg_temp_free(cmp); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + gen_set_label(label_skip); + } else { + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + } + } + + /* + * for (i = 0; i < ctx->qreg_log_idx; i++) { + * int rnum = ctx->qreg_log[i]; + * if (ctx->qreg_is_predicated[i]) { + * if (env->QRegs_updated) & (1 << rnum)) { + * env->QRegs[rnum] = env->future_QRegs[rnum]; + * } + * } else { + * env->QRegs[rnum] = env->future_QRegs[rnum]; + * } + * } + */ + for (i = 0; i < ctx->qreg_log_idx; i++) { + int rnum = ctx->qreg_log[i]; + bool is_predicated = ctx->qreg_is_predicated[i]; + intptr_t dstoff = offsetof(CPUHexagonState, QRegs[rnum]); + intptr_t srcoff = offsetof(CPUHexagonState, future_QRegs[rnum]); + size_t size = sizeof(MMQReg); + + if (is_predicated) { + TCGv cmp = tcg_temp_new(); + TCGLabel *label_skip = gen_new_label(); + + tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum); + tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); + tcg_temp_free(cmp); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + gen_set_label(label_skip); + } else { + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + } + } + + if (pkt_has_hvx_store(pkt)) { + gen_helper_commit_hvx_stores(cpu_env); } } @@ -455,6 +646,7 @@ static void update_exec_counters(DisasContext *ctx, Packet *pkt) { int num_insns = pkt->num_insns; int num_real_insns = 0; + int num_hvx_insns = 0; for (int i = 0; i < num_insns; i++) { if (!pkt->insn[i].is_endloop && @@ -462,30 +654,97 @@ static void update_exec_counters(DisasContext *ctx, Packet *pkt) !GET_ATTRIB(pkt->insn[i].opcode, A_IT_NOP)) { num_real_insns++; } + if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) { + num_hvx_insns++; + } } ctx->num_packets++; ctx->num_insns += num_real_insns; + ctx->num_hvx_insns += num_hvx_insns; } -static void gen_commit_packet(DisasContext *ctx, Packet *pkt) +static void gen_commit_packet(CPUHexagonState *env, DisasContext *ctx, + Packet *pkt) { + /* + * If there is more than one store in a packet, make sure they are all OK + * before proceeding with the rest of the packet commit. + * + * dczeroa has to be the only store operation in the packet, so we go + * ahead and process that first. + * + * When there is an HVX store, there can also be a scalar store in either + * slot 0 or slot1, so we create a mask for the helper to indicate what + * work to do. + * + * When there are two scalar stores, we probe the one in slot 0. + * + * Note that we don't call the probe helper for packets with only one + * store. Therefore, we call process_store_log before anything else + * involved in committing the packet. + */ + bool has_store_s0 = pkt->pkt_has_store_s0; + bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed); + bool has_hvx_store = pkt_has_hvx_store(pkt); + if (pkt->pkt_has_dczeroa) { + /* + * The dczeroa will be the store in slot 0, check that we don't have + * a store in slot 1 or an HVX store. + */ + g_assert(!has_store_s1 && !has_hvx_store); + process_dczeroa(ctx, pkt); + } else if (has_hvx_store) { + TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); + + if (!has_store_s0 && !has_store_s1) { + gen_helper_probe_hvx_stores(cpu_env, mem_idx); + } else { + int mask = 0; + TCGv mask_tcgv; + + if (has_store_s0) { + mask |= (1 << 0); + } + if (has_store_s1) { + mask |= (1 << 1); + } + if (has_hvx_store) { + mask |= (1 << 2); + } + mask_tcgv = tcg_constant_tl(mask); + gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, mask_tcgv, mem_idx); + } + } else if (has_store_s0 && has_store_s1) { + /* + * process_store_log will execute the slot 1 store first, + * so we only have to probe the store in slot 0 + */ + TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); + gen_helper_probe_pkt_scalar_store_s0(cpu_env, mem_idx); + } + + process_store_log(ctx, pkt); + gen_reg_writes(ctx); gen_pred_writes(ctx, pkt); - process_store_log(ctx, pkt); - process_dczeroa(ctx, pkt); + if (pkt->pkt_has_hvx) { + gen_commit_hvx(ctx, pkt); + } update_exec_counters(ctx, pkt); if (HEX_DEBUG) { TCGv has_st0 = - tcg_const_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); + tcg_constant_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); TCGv has_st1 = - tcg_const_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); + tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); /* Handy place to set a breakpoint at the end of execution */ gen_helper_debug_commit_end(cpu_env, has_st0, has_st1); + } - tcg_temp_free(has_st0); - tcg_temp_free(has_st1); + if (pkt->vhist_insn != NULL) { + ctx->pre_commit = false; + pkt->vhist_insn->generate(env, ctx, pkt->vhist_insn, pkt); } if (pkt->pkt_has_cof) { @@ -512,7 +771,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) for (i = 0; i < pkt.num_insns; i++) { gen_insn(env, ctx, &pkt.insn[i], &pkt); } - gen_commit_packet(ctx, &pkt); + gen_commit_packet(env, ctx, &pkt); ctx->base.pc_next += pkt.encod_pkt_size_in_bytes; } else { gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); @@ -527,6 +786,7 @@ static void hexagon_tr_init_disas_context(DisasContextBase *dcbase, ctx->mem_idx = MMU_USER_IDX; ctx->num_packets = 0; ctx->num_insns = 0; + ctx->num_hvx_insns = 0; } static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -591,11 +851,7 @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_TOO_MANY: gen_exec_counters(ctx); tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next); - if (ctx->base.singlestep_enabled) { - gen_exception_raw(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; case DISAS_NORETURN: break; @@ -604,10 +860,11 @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } -static void hexagon_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void hexagon_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } @@ -620,11 +877,13 @@ static const TranslatorOps hexagon_tr_ops = { .disas_log = hexagon_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, + &hexagon_tr_ops, &ctx.base); } #define NAME_LEN 64 @@ -635,6 +894,9 @@ static char store_addr_names[STORES_MAX][NAME_LEN]; static char store_width_names[STORES_MAX][NAME_LEN]; static char store_val32_names[STORES_MAX][NAME_LEN]; static char store_val64_names[STORES_MAX][NAME_LEN]; +static char vstore_addr_names[VSTORES_MAX][NAME_LEN]; +static char vstore_size_names[VSTORES_MAX][NAME_LEN]; +static char vstore_pending_names[VSTORES_MAX][NAME_LEN]; void hexagon_translate_init(void) { @@ -642,12 +904,6 @@ void hexagon_translate_init(void) opcode_init(); - if (HEX_DEBUG) { - if (!qemu_logfile) { - qemu_set_log(qemu_loglevel); - } - } - for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) { hex_gpr[i] = tcg_global_mem_new(cpu_env, offsetof(CPUHexagonState, gpr[i]), @@ -697,6 +953,10 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, llsc_val), "llsc_val"); hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env, offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64"); + hex_VRegs_updated = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, VRegs_updated), "VRegs_updated"); + hex_QRegs_updated = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, QRegs_updated), "QRegs_updated"); for (i = 0; i < STORES_MAX; i++) { snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i); hex_store_addr[i] = tcg_global_mem_new(cpu_env, @@ -718,4 +978,20 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, mem_log_stores[i].data64), store_val64_names[i]); } + for (int i = 0; i < VSTORES_MAX; i++) { + snprintf(vstore_addr_names[i], NAME_LEN, "vstore_addr_%d", i); + hex_vstore_addr[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore[i].va), + vstore_addr_names[i]); + + snprintf(vstore_size_names[i], NAME_LEN, "vstore_size_%d", i); + hex_vstore_size[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore[i].size), + vstore_size_names[i]); + + snprintf(vstore_pending_names[i], NAME_LEN, "vstore_pending_%d", i); + hex_vstore_pending[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore_pending[i]), + vstore_pending_names[i]); + } } diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h index 703fd1345f..a245172827 100644 --- a/target/hexagon/translate.h +++ b/target/hexagon/translate.h @@ -19,6 +19,7 @@ #define HEXAGON_TRANSLATE_H #include "qemu/bitmap.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/translator.h" #include "tcg/tcg-op.h" @@ -29,6 +30,7 @@ typedef struct DisasContext { uint32_t mem_idx; uint32_t num_packets; uint32_t num_insns; + uint32_t num_hvx_insns; int reg_log[REG_WRITES_MAX]; int reg_log_idx; DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS); @@ -37,6 +39,20 @@ typedef struct DisasContext { DECLARE_BITMAP(pregs_written, NUM_PREGS); uint8_t store_width[STORES_MAX]; bool s1_store_processed; + int future_vregs_idx; + int future_vregs_num[VECTOR_TEMPS_MAX]; + int tmp_vregs_idx; + int tmp_vregs_num[VECTOR_TEMPS_MAX]; + int vreg_log[NUM_VREGS]; + bool vreg_is_predicated[NUM_VREGS]; + int vreg_log_idx; + DECLARE_BITMAP(vregs_updated_tmp, NUM_VREGS); + DECLARE_BITMAP(vregs_updated, NUM_VREGS); + DECLARE_BITMAP(vregs_select, NUM_VREGS); + int qreg_log[NUM_QREGS]; + bool qreg_is_predicated[NUM_QREGS]; + int qreg_log_idx; + bool pre_commit; } DisasContext; static inline void ctx_log_reg_write(DisasContext *ctx, int rnum) @@ -67,6 +83,46 @@ static inline bool is_preloaded(DisasContext *ctx, int num) return test_bit(num, ctx->regs_written); } +intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok); +intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok); + +static inline void ctx_log_vreg_write(DisasContext *ctx, + int rnum, VRegWriteType type, + bool is_predicated) +{ + if (type != EXT_TMP) { + ctx->vreg_log[ctx->vreg_log_idx] = rnum; + ctx->vreg_is_predicated[ctx->vreg_log_idx] = is_predicated; + ctx->vreg_log_idx++; + + set_bit(rnum, ctx->vregs_updated); + } + if (type == EXT_NEW) { + set_bit(rnum, ctx->vregs_select); + } + if (type == EXT_TMP) { + set_bit(rnum, ctx->vregs_updated_tmp); + } +} + +static inline void ctx_log_vreg_write_pair(DisasContext *ctx, + int rnum, VRegWriteType type, + bool is_predicated) +{ + ctx_log_vreg_write(ctx, rnum ^ 0, type, is_predicated); + ctx_log_vreg_write(ctx, rnum ^ 1, type, is_predicated); +} + +static inline void ctx_log_qreg_write(DisasContext *ctx, + int rnum, bool is_predicated) +{ + ctx->qreg_log[ctx->qreg_log_idx] = rnum; + ctx->qreg_is_predicated[ctx->qreg_log_idx] = is_predicated; + ctx->qreg_log_idx++; +} + extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; extern TCGv hex_pred[NUM_PREGS]; extern TCGv hex_next_PC; @@ -85,6 +141,12 @@ extern TCGv hex_dczero_addr; extern TCGv hex_llsc_addr; extern TCGv hex_llsc_val; extern TCGv_i64 hex_llsc_val_i64; +extern TCGv hex_VRegs_updated; +extern TCGv hex_QRegs_updated; +extern TCGv hex_vstore_addr[VSTORES_MAX]; +extern TCGv hex_vstore_size[VSTORES_MAX]; +extern TCGv hex_vstore_pending[VSTORES_MAX]; +bool is_gather_store_insn(Insn *insn, Packet *pkt); void process_store(DisasContext *ctx, Packet *pkt, int slot_num); #endif diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h index a97d1428df..a48a2701ae 100644 --- a/target/hppa/cpu-param.h +++ b/target/hppa/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef HPPA_CPU_PARAM_H -#define HPPA_CPU_PARAM_H 1 +#define HPPA_CPU_PARAM_H #ifdef TARGET_HPPA64 # define TARGET_LONG_BITS 64 diff --git a/target/hppa/cpu-qom.h b/target/hppa/cpu-qom.h index d424f88370..b96e0318c7 100644 --- a/target/hppa/cpu-qom.h +++ b/target/hppa/cpu-qom.h @@ -25,8 +25,7 @@ #define TYPE_HPPA_CPU "hppa-cpu" -OBJECT_DECLARE_TYPE(HPPACPU, HPPACPUClass, - HPPA_CPU) +OBJECT_DECLARE_CPU_TYPE(HPPACPU, HPPACPUClass, HPPA_CPU) /** * HPPACPUClass: diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 2eace4ee12..55c190280e 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/qemu-print.h" +#include "qemu/timer.h" #include "cpu.h" #include "qemu/module.h" #include "exec/exec-all.h" @@ -35,13 +36,20 @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.iaoq_b = value + 4; } +static vaddr hppa_cpu_get_pc(CPUState *cs) +{ + HPPACPU *cpu = HPPA_CPU(cs); + + return cpu->env.iaoq_f; +} + static void hppa_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { HPPACPU *cpu = HPPA_CPU(cs); #ifdef CONFIG_USER_ONLY - cpu->env.iaoq_f = tb->pc; + cpu->env.iaoq_f = tb_pc(tb); cpu->env.iaoq_b = tb->cs_base; #else /* Recover the IAOQ values from the GVA + PRIV. */ @@ -51,7 +59,7 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, int32_t diff = cs_base; cpu->env.iasq_f = iasq_f; - cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv; + cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv; if (diff) { cpu->env.iaoq_b = cpu->env.iaoq_f + diff; } @@ -60,9 +68,27 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, cpu->env.psw_n = (tb->flags & PSW_N) != 0; } +static void hppa_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + HPPACPU *cpu = HPPA_CPU(cs); + + cpu->env.iaoq_f = data[0]; + if (data[1] != (target_ureg)-1) { + cpu->env.iaoq_b = data[1]; + } + /* + * Since we were executing the instruction at IAOQ_F, and took some + * sort of action that provoked the cpu_restore_state, we can infer + * that the instruction was not nullified. + */ + cpu->env.psw_n = 0; +} + static bool hppa_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & CPU_INTERRUPT_HARD; + return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) @@ -72,9 +98,10 @@ static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) } #ifndef CONFIG_USER_ONLY -static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) +static G_NORETURN +void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) { HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; @@ -144,10 +171,11 @@ static const struct SysemuCPUOps hppa_sysemu_ops = { static const struct TCGCPUOps hppa_tcg_ops = { .initialize = hppa_translate_init, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, - .cpu_exec_interrupt = hppa_cpu_exec_interrupt, - .tlb_fill = hppa_cpu_tlb_fill, + .restore_state_to_opc = hppa_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = hppa_cpu_tlb_fill, + .cpu_exec_interrupt = hppa_cpu_exec_interrupt, .do_interrupt = hppa_cpu_do_interrupt, .do_unaligned_access = hppa_cpu_do_unaligned_access, #endif /* !CONFIG_USER_ONLY */ @@ -166,6 +194,7 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = hppa_cpu_has_work; cc->dump_state = hppa_cpu_dump_state; cc->set_pc = hppa_cpu_set_pc; + cc->get_pc = hppa_cpu_get_pc; cc->gdb_read_register = hppa_cpu_gdb_read_register; cc->gdb_write_register = hppa_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 748270bfa3..6f3b6beecf 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -22,6 +22,7 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" /* PA-RISC 1.x processors have a strong memory model. */ /* ??? While we do not yet implement PA-RISC 2.0, those processors have @@ -69,6 +70,11 @@ #define EXCP_SYSCALL 30 #define EXCP_SYSCALL_LWS 31 +/* Emulated hardware TOC button */ +#define EXCP_TOC 32 /* TOC = Transfer of control (NMI) */ + +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 /* TOC */ + /* Taken from Linux kernel: arch/parisc/include/asm/psw.h */ #define PSW_I 0x00000001 #define PSW_D 0x00000002 @@ -133,8 +139,6 @@ #define CR_IPSW 22 #define CR_EIRR 23 -typedef struct CPUHPPAState CPUHPPAState; - #if TARGET_REGISTER_BITS == 32 typedef uint32_t target_ureg; typedef int32_t target_sreg; @@ -163,7 +167,7 @@ typedef struct { unsigned access_id : 16; } hppa_tlb_entry; -struct CPUHPPAState { +typedef struct CPUArchState { target_ureg gr[32]; uint64_t fr[32]; uint64_t sr[8]; /* stored shifted into place for gva */ @@ -202,7 +206,7 @@ struct CPUHPPAState { /* ??? We should use a more intelligent data structure. */ hppa_tlb_entry tlb[HPPA_TLB_ENTRIES]; uint32_t tlb_last; -}; +} CPUHPPAState; /** * HPPACPU: @@ -210,7 +214,7 @@ struct CPUHPPAState { * * An HPPA CPU. */ -struct HPPACPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -220,10 +224,6 @@ struct HPPACPU { QEMUTimer *alarm_timer; }; - -typedef CPUHPPAState CPUArchState; -typedef HPPACPU ArchCPU; - #include "exec/cpu-all.h" static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) @@ -259,12 +259,14 @@ static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, return hppa_form_gva_psw(env->psw, spc, off); } -/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them. +/* + * Since PSW_{I,CB} will never need to be in tb->flags, reuse them. * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the * same value. */ #define TB_FLAG_SR_SAME PSW_I #define TB_FLAG_PRIV_SHIFT 8 +#define TB_FLAG_UNALIGN 0x400 static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc, target_ulong *cs_base, @@ -279,6 +281,7 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc, #ifdef CONFIG_USER_ONLY *pc = env->iaoq_f & -4; *cs_base = env->iaoq_b & -4; + flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; #else /* ??? E, T, H, L, B, P bits need to be here, when implemented. */ flags |= env->psw & (PSW_W | PSW_C | PSW_D); @@ -319,19 +322,16 @@ static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { } void cpu_hppa_change_prot_id(CPUHPPAState *env); #endif -#define cpu_signal_handler cpu_hppa_signal_handler - -int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void hppa_cpu_do_interrupt(CPUState *cpu); -bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); +#ifndef CONFIG_USER_ONLY bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -#ifndef CONFIG_USER_ONLY +void hppa_cpu_do_interrupt(CPUState *cpu); +bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int type, hwaddr *pphys, int *pprot); extern const MemoryRegionOps hppa_io_eir_ops; @@ -339,6 +339,6 @@ extern const VMStateDescription vmstate_hppa_cpu; void hppa_cpu_alarm_timer(void *); int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr); #endif -void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); +G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); #endif /* HPPA_CPU_H */ diff --git a/target/hppa/helper.c b/target/hppa/helper.c index 1ccff5765a..74b8747083 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" - +#include "qemu/log.h" #include "cpu.h" #include "fpu/softfloat.h" #include "exec/exec-all.h" @@ -85,9 +85,11 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) char psw_c[20]; int i; - qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx "\n", + qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx + " IIR " TREG_FMT_lx "\n", hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f), - hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b)); + hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b), + env->cr[CR_IIR]); psw_c[0] = (psw & PSW_W ? 'W' : '-'); psw_c[1] = (psw & PSW_E ? 'E' : '-'); diff --git a/target/hppa/helper.h b/target/hppa/helper.h index 0a629ffa7c..c7e35ce8c7 100644 --- a/target/hppa/helper.h +++ b/target/hppa/helper.h @@ -1,7 +1,9 @@ #if TARGET_REGISTER_BITS == 64 # define dh_alias_tr i64 +# define dh_typecode_tr dh_typecode_i64 #else # define dh_alias_tr i32 +# define dh_typecode_tr dh_typecode_i32 #endif #define dh_ctype_tr target_ureg @@ -80,6 +82,7 @@ DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tr) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(halt, noreturn, env) DEF_HELPER_1(reset, noreturn, env) +DEF_HELPER_1(getshadowregs, void, env) DEF_HELPER_1(rfi, void, env) DEF_HELPER_1(rfi_r, void, env) DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tr) diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode index d4eefc0d48..c7a7e997f9 100644 --- a/target/hppa/insns.decode +++ b/target/hppa/insns.decode @@ -111,6 +111,7 @@ rfi_r 000000 ----- ----- --- 01100101 00000 # They are allocated from the unassigned instruction space. halt 1111 1111 1111 1101 1110 1010 1101 0000 reset 1111 1111 1111 1101 1110 1010 1101 0001 +getshadowregs 1111 1111 1111 1101 1110 1010 1101 0010 #### # Memory Management diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 349495d361..f599dccfff 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -23,6 +23,7 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "hw/core/cpu.h" +#include "hw/hppa/hppa_hardware.h" #ifndef CONFIG_USER_ONLY static void eval_interrupt(HPPACPU *cpu) @@ -88,7 +89,6 @@ void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val) eval_interrupt(env_archcpu(env)); qemu_mutex_unlock_iothread(); } -#endif /* !CONFIG_USER_ONLY */ void hppa_cpu_do_interrupt(CPUState *cs) { @@ -100,7 +100,6 @@ void hppa_cpu_do_interrupt(CPUState *cs) uint64_t iasq_f = env->iasq_f; uint64_t iasq_b = env->iasq_b; -#ifndef CONFIG_USER_ONLY target_ureg old_psw; /* As documented in pa2.0 -- interruption handling. */ @@ -183,11 +182,17 @@ void hppa_cpu_do_interrupt(CPUState *cs) } /* step 7 */ - env->iaoq_f = env->cr[CR_IVA] + 32 * i; + if (i == EXCP_TOC) { + env->iaoq_f = FIRMWARE_START; + /* help SeaBIOS and provide iaoq_b and iasq_back in shadow regs */ + env->gr[24] = env->cr_back[0]; + env->gr[25] = env->cr_back[1]; + } else { + env->iaoq_f = env->cr[CR_IVA] + 32 * i; + } env->iaoq_b = env->iaoq_f + 4; env->iasq_f = 0; env->iasq_b = 0; -#endif if (qemu_loglevel_mask(CPU_LOG_INT)) { static const char * const names[] = { @@ -222,6 +227,7 @@ void hppa_cpu_do_interrupt(CPUState *cs) [EXCP_PER_INTERRUPT] = "performance monitor interrupt", [EXCP_SYSCALL] = "syscall", [EXCP_SYSCALL_LWS] = "syscall-lws", + [EXCP_TOC] = "TOC (transfer of control)", }; static int count; const char *name = NULL; @@ -248,16 +254,24 @@ void hppa_cpu_do_interrupt(CPUState *cs) bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { -#ifndef CONFIG_USER_ONLY HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; + if (interrupt_request & CPU_INTERRUPT_NMI) { + /* Raise TOC (NMI) interrupt */ + cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI); + cs->exception_index = EXCP_TOC; + hppa_cpu_do_interrupt(cs); + return true; + } + /* If interrupts are requested and enabled, raise them. */ if ((env->psw & PSW_I) && (interrupt_request & CPU_INTERRUPT_HARD)) { cs->exception_index = EXCP_EXT_INTERRUPT; hppa_cpu_do_interrupt(cs); return true; } -#endif return false; } + +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index afc5b56c3e..5046cc8f9d 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -18,26 +18,13 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "hw/core/cpu.h" #include "trace.h" -#ifdef CONFIG_USER_ONLY -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - HPPACPU *cpu = HPPA_CPU(cs); - - /* ??? Test between data page fault and data memory protection trap, - which would affect si_code. */ - cs->exception_index = EXCP_DMP; - cpu->env.cr[CR_IOR] = address; - cpu_loop_exit_restore(cs, retaddr); -} -#else static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) { int i; @@ -392,4 +379,3 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr); return ent ? ent->ar_type : -1; } -#endif /* CONFIG_USER_ONLY */ diff --git a/target/hppa/meson.build b/target/hppa/meson.build index 8a7ff82efc..021e42a2d0 100644 --- a/target/hppa/meson.build +++ b/target/hppa/meson.build @@ -7,13 +7,15 @@ hppa_ss.add(files( 'gdbstub.c', 'helper.c', 'int_helper.c', - 'mem_helper.c', 'op_helper.c', 'translate.c', )) hppa_softmmu_ss = ss.source_set() -hppa_softmmu_ss.add(files('machine.c')) +hppa_softmmu_ss.add(files( + 'machine.c', + 'mem_helper.c', +)) target_arch += {'hppa': hppa_ss} target_softmmu_arch += {'hppa': hppa_softmmu_ss} diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index 96d9391c39..fbd80e4248 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" @@ -27,7 +28,7 @@ #include "fpu/softfloat.h" #include "trace.h" -void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp) +G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp) { CPUState *cs = env_cpu(env); @@ -35,7 +36,7 @@ void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp) cpu_loop_exit(cs); } -void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra) +G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra) { CPUState *cs = env_cpu(env); @@ -57,26 +58,29 @@ void HELPER(tcond)(CPUHPPAState *env, target_ureg cond) } } -static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val, - uint32_t mask, uintptr_t ra) +static void atomic_store_3(CPUHPPAState *env, target_ulong addr, + uint32_t val, uintptr_t ra) { -#ifdef CONFIG_USER_ONLY - uint32_t old, new, cmp; + int mmu_idx = cpu_mmu_index(env, 0); + uint32_t old, new, cmp, mask, *haddr; + void *vaddr; + + vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra); + if (vaddr == NULL) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + haddr = (uint32_t *)((uintptr_t)vaddr & -4); + mask = addr & 1 ? 0x00ffffffu : 0xffffff00u; - uint32_t *haddr = g2h(env_cpu(env), addr - 1); old = *haddr; while (1) { - new = (old & ~mask) | (val & mask); + new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask)); cmp = qatomic_cmpxchg(haddr, old, new); if (cmp == old) { return; } old = cmp; } -#else - /* FIXME -- we can do better. */ - cpu_loop_exit_atomic(env_cpu(env), ra); -#endif } static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val, @@ -92,7 +96,7 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val, case 1: /* The 3 byte store must appear atomic. */ if (parallel) { - atomic_store_3(env, addr, val, 0x00ffffffu, ra); + atomic_store_3(env, addr, val, ra); } else { cpu_stb_data_ra(env, addr, val >> 16, ra); cpu_stw_data_ra(env, addr + 1, val, ra); @@ -122,7 +126,7 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, case 3: /* The 3 byte store must appear atomic. */ if (parallel) { - atomic_store_3(env, addr - 3, val, 0xffffff00u, ra); + atomic_store_3(env, addr - 3, val, ra); } else { cpu_stw_data_ra(env, addr - 3, val >> 16, ra); cpu_stb_data_ra(env, addr - 1, val >> 8, ra); @@ -166,7 +170,7 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr, uint32_t level, uint32_t want) { #ifdef CONFIG_USER_ONLY - return page_check_range(addr, 1, want); + return (page_check_range(addr, 1, want) == 0) ? 1 : 0; #else int prot, excp; hwaddr phys; @@ -691,7 +695,7 @@ void HELPER(rfi)(CPUHPPAState *env) cpu_hppa_put_psw(env, env->cr[CR_IPSW]); } -void HELPER(rfi_r)(CPUHPPAState *env) +void HELPER(getshadowregs)(CPUHPPAState *env) { env->gr[1] = env->shadow[0]; env->gr[8] = env->shadow[1]; @@ -700,6 +704,11 @@ void HELPER(rfi_r)(CPUHPPAState *env) env->gr[17] = env->shadow[4]; env->gr[24] = env->shadow[5]; env->gr[25] = env->shadow[6]; +} + +void HELPER(rfi_r)(CPUHPPAState *env) +{ + helper_getshadowregs(env); helper_rfi(env); } #endif diff --git a/target/hppa/translate.c b/target/hppa/translate.c index b18150ef8d..1af77473da 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -34,7 +34,6 @@ #undef TCGv #undef tcg_temp_new -#undef tcg_global_reg_new #undef tcg_global_mem_new #undef tcg_temp_local_new #undef tcg_temp_free @@ -59,7 +58,6 @@ #define TCGv_reg TCGv_i64 #define tcg_temp_new tcg_temp_new_i64 -#define tcg_global_reg_new tcg_global_reg_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 #define tcg_temp_local_new tcg_temp_local_new_i64 #define tcg_temp_free tcg_temp_free_i64 @@ -142,6 +140,7 @@ #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 #define tcg_gen_extract_reg tcg_gen_extract_i64 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 +#define tcg_gen_extract2_reg tcg_gen_extract2_i64 #define tcg_const_reg tcg_const_i64 #define tcg_const_local_reg tcg_const_local_i64 #define tcg_constant_reg tcg_constant_i64 @@ -155,7 +154,6 @@ #else #define TCGv_reg TCGv_i32 #define tcg_temp_new tcg_temp_new_i32 -#define tcg_global_reg_new tcg_global_reg_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 #define tcg_temp_local_new tcg_temp_local_new_i32 #define tcg_temp_free tcg_temp_free_i32 @@ -237,6 +235,7 @@ #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 #define tcg_gen_extract_reg tcg_gen_extract_i32 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 +#define tcg_gen_extract2_reg tcg_gen_extract2_i32 #define tcg_const_reg tcg_const_i32 #define tcg_const_local_reg tcg_const_local_i32 #define tcg_constant_reg tcg_constant_i32 @@ -275,8 +274,18 @@ typedef struct DisasContext { int mmu_idx; int privilege; bool psw_n_nonzero; + +#ifdef CONFIG_USER_ONLY + MemOp unalign; +#endif } DisasContext; +#ifdef CONFIG_USER_ONLY +#define UNALIGN(C) (C)->unalign +#else +#define UNALIGN(C) 0 +#endif + /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ static int expand_sm_imm(DisasContext *ctx, int val) { @@ -557,7 +566,7 @@ static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) } } -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define HI_OFS 0 # define LO_OFS 4 #else @@ -817,11 +826,7 @@ static void gen_goto_tb(DisasContext *ctx, int which, } else { copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } @@ -1480,7 +1485,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, ctx->mmu_idx == MMU_PHYS_IDX); - tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop); + tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); } @@ -1498,7 +1503,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, ctx->mmu_idx == MMU_PHYS_IDX); - tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); } @@ -1516,7 +1521,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, ctx->mmu_idx == MMU_PHYS_IDX); - tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop); + tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); } @@ -1534,7 +1539,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, ctx->mmu_idx == MMU_PHYS_IDX); - tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop); + tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); } @@ -1604,7 +1609,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = tcg_temp_new_i64(); - do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ); + do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); save_frd(rt, tmp); tcg_temp_free_i64(tmp); @@ -1660,7 +1665,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = load_frd(rt); - do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ); + do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); tcg_temp_free_i64(tmp); return nullify_end(ctx); @@ -2349,11 +2354,7 @@ static bool do_rfi(DisasContext *ctx, bool rfi_r) gen_helper_rfi(cpu_env); } /* Exit the TB to recognize new interrupts. */ - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; return nullify_end(ctx); @@ -2392,6 +2393,16 @@ static bool trans_reset(DisasContext *ctx, arg_reset *a) #endif } +static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) +{ + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); +#ifndef CONFIG_USER_ONLY + nullify_over(ctx); + gen_helper_getshadowregs(cpu_env); + return nullify_end(ctx); +#endif +} + static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) { if (a->m) { @@ -3215,19 +3226,22 @@ static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) dest = dest_gpr(ctx, a->t); t2 = load_gpr(ctx, a->r2); - if (a->r1 == a->r2) { + if (a->r1 == 0) { + tcg_gen_extract_reg(dest, t2, sa, 32 - sa); + } else if (TARGET_REGISTER_BITS == 32) { + tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); + } else if (a->r1 == a->r2) { TCGv_i32 t32 = tcg_temp_new_i32(); tcg_gen_trunc_reg_i32(t32, t2); tcg_gen_rotri_i32(t32, t32, sa); tcg_gen_extu_i32_reg(dest, t32); tcg_temp_free_i32(t32); - } else if (a->r1 == 0) { - tcg_gen_extract_reg(dest, t2, sa, 32 - sa); } else { - TCGv_reg t0 = tcg_temp_new(); - tcg_gen_extract_reg(t0, t2, sa, 32 - sa); - tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa); - tcg_temp_free(t0); + TCGv_i64 t64 = tcg_temp_new_i64(); + tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); + tcg_gen_shri_i64(t64, t64, sa); + tcg_gen_trunc_i64_reg(dest, t64); + tcg_temp_free_i64(t64); } save_gpr(ctx, a->t, dest); @@ -4113,6 +4127,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->mmu_idx = MMU_USER_IDX; ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX; ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX; + ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); #else ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX); @@ -4177,7 +4192,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { /* Always fetch the insn, even if nullified, so that we check the page permissions for execute. */ - uint32_t insn = translator_ldl(env, ctx->base.pc_next); + uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); /* Set up the IA queue for the next insn. This will be overwritten by a branch. */ @@ -4277,10 +4292,9 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) nullify_save(ctx); /* FALLTHRU */ case DISAS_IAQ_N_UPDATED: - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { + if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { tcg_gen_lookup_and_goto_ptr(); + break; } /* FALLTHRU */ case DISAS_EXIT: @@ -4291,29 +4305,30 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void hppa_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { target_ulong pc = dcbase->pc_first; #ifdef CONFIG_USER_ONLY switch (pc) { case 0x00: - qemu_log("IN:\n0x00000000: (null)\n"); + fprintf(logfile, "IN:\n0x00000000: (null)\n"); return; case 0xb0: - qemu_log("IN:\n0x000000b0: light-weight-syscall\n"); + fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); return; case 0xe0: - qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n"); + fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); return; case 0x100: - qemu_log("IN:\n0x00000100: syscall\n"); + fprintf(logfile, "IN:\n0x00000100: syscall\n"); return; } #endif - qemu_log("IN: %s\n", lookup_symbol(pc)); - log_target_disas(cs, pc, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); + target_disas(logfile, cs, pc, dcbase->tb->size); } static const TranslatorOps hppa_tr_ops = { @@ -4325,21 +4340,9 @@ static const TranslatorOps hppa_tr_ops = { .disas_log = hppa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns); -} - -void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->iaoq_f = data[0]; - if (data[1] != (target_ureg)-1) { - env->iaoq_b = data[1]; - } - /* Since we were executing the instruction at IAOQ_F, and took some - sort of action that provoked the cpu_restore_state, we can infer - that the instruction was not nullified. */ - env->psw_n = 0; + translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); } diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c index 004141fc04..c290910a04 100644 --- a/target/i386/arch_dump.c +++ b/target/i386/arch_dump.c @@ -42,7 +42,7 @@ typedef struct { static int x86_64_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env, int id, - void *opaque) + DumpState *s) { x86_64_user_regs_struct regs; Elf64_Nhdr *note; @@ -94,7 +94,7 @@ static int x86_64_write_elf64_note(WriteCoreDumpFunction f, buf += descsz - sizeof(x86_64_user_regs_struct)-sizeof(target_ulong); memcpy(buf, ®s, sizeof(x86_64_user_regs_struct)); - ret = f(note, note_size, opaque); + ret = f(note, note_size, s); g_free(note); if (ret < 0) { return -1; @@ -148,7 +148,7 @@ static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUX86State *env, } static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env, - int id, void *opaque) + int id, DumpState *s) { x86_elf_prstatus prstatus; Elf64_Nhdr *note; @@ -170,7 +170,7 @@ static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env, buf += ROUND_UP(name_size, 4); memcpy(buf, &prstatus, sizeof(prstatus)); - ret = f(note, note_size, opaque); + ret = f(note, note_size, s); g_free(note); if (ret < 0) { return -1; @@ -180,7 +180,7 @@ static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env, } int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { X86CPU *cpu = X86_CPU(cs); int ret; @@ -189,10 +189,10 @@ int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK); if (lma) { - ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque); + ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, s); } else { #endif - ret = x86_write_elf64_note(f, &cpu->env, cpuid, opaque); + ret = x86_write_elf64_note(f, &cpu->env, cpuid, s); #ifdef TARGET_X86_64 } #endif @@ -201,7 +201,7 @@ int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, } int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { X86CPU *cpu = X86_CPU(cs); x86_elf_prstatus prstatus; @@ -224,7 +224,7 @@ int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, buf += ROUND_UP(name_size, 4); memcpy(buf, &prstatus, sizeof(prstatus)); - ret = f(note, note_size, opaque); + ret = f(note, note_size, s); g_free(note); if (ret < 0) { return -1; @@ -329,7 +329,7 @@ static void qemu_get_cpustate(QEMUCPUState *s, CPUX86State *env) static inline int cpu_write_qemu_note(WriteCoreDumpFunction f, CPUX86State *env, - void *opaque, + DumpState *s, int type) { QEMUCPUState state; @@ -369,7 +369,7 @@ static inline int cpu_write_qemu_note(WriteCoreDumpFunction f, buf += ROUND_UP(name_size, 4); memcpy(buf, &state, sizeof(state)); - ret = f(note, note_size, opaque); + ret = f(note, note_size, s); g_free(note); if (ret < 0) { return -1; @@ -379,19 +379,19 @@ static inline int cpu_write_qemu_note(WriteCoreDumpFunction f, } int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cs, - void *opaque) + DumpState *s) { X86CPU *cpu = X86_CPU(cs); - return cpu_write_qemu_note(f, &cpu->env, opaque, 1); + return cpu_write_qemu_note(f, &cpu->env, s, 1); } int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs, - void *opaque) + DumpState *s) { X86CPU *cpu = X86_CPU(cs); - return cpu_write_qemu_note(f, &cpu->env, opaque, 0); + return cpu_write_qemu_note(f, &cpu->env, s, 0); } int cpu_get_dump_info(ArchDumpInfo *info, diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index 02b635a52c..08ac957e99 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -464,13 +464,13 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { - qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", + qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n", env->cc_src, env->cc_dst, cc_op_name); } else #endif { - qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", + qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%s\n", (uint32_t)env->cc_src, (uint32_t)env->cc_dst, cc_op_name); } diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h index 57abc64c0d..f579b16bd2 100644 --- a/target/i386/cpu-param.h +++ b/target/i386/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef I386_CPU_PARAM_H -#define I386_CPU_PARAM_H 1 +#define I386_CPU_PARAM_H #ifdef TARGET_X86_64 # define TARGET_LONG_BITS 64 @@ -23,6 +23,10 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 -#define NB_MMU_MODES 3 +#define NB_MMU_MODES 5 + +#ifndef CONFIG_USER_ONLY +# define TARGET_TB_PCREL 1 +#endif #endif diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h index f9923cee04..c557a522e1 100644 --- a/target/i386/cpu-qom.h +++ b/target/i386/cpu-qom.h @@ -30,8 +30,7 @@ #define TYPE_X86_CPU "i386-cpu" #endif -OBJECT_DECLARE_TYPE(X86CPU, X86CPUClass, - X86_CPU) +OBJECT_DECLARE_CPU_TYPE(X86CPU, X86CPUClass, X86_CPU) typedef struct X86CPUModel X86CPUModel; diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c index 1078e3d157..a6f47b7d11 100644 --- a/target/i386/cpu-sysemu.c +++ b/target/i386/cpu-sysemu.c @@ -103,7 +103,7 @@ static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) /* Convert CPU model data from X86CPU object to a property dictionary * that can recreate exactly the same CPU model, including every - * writeable QOM property. + * writable QOM property. */ static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) { @@ -313,7 +313,7 @@ GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) GuestPanicInformation *panic_info = NULL; if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) { - panic_info = g_malloc0(sizeof(GuestPanicInformation)); + panic_info = g_new0(GuestPanicInformation, 1); panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; @@ -335,7 +335,7 @@ void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, GuestPanicInformation *panic_info; if (!cs->crash_occurred) { - error_setg(errp, "No crash occured"); + error_setg(errp, "No crash occurred"); return; } diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 36d6c6c4e6..2330a8d902 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -21,12 +21,14 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qemu/qemu-print.h" +#include "qemu/hw-version.h" #include "cpu.h" #include "tcg/helper-tcg.h" #include "sysemu/reset.h" #include "sysemu/hvf.h" #include "kvm/kvm_i386.h" -#include "sev_i386.h" +#include "sev.h" +#include "qapi/error.h" #include "qapi/qapi-visit-machine.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-commands-machine-target.h" @@ -36,6 +38,7 @@ #ifndef CONFIG_USER_ONLY #include "exec/address-spaces.h" #include "hw/boards.h" +#include "hw/i386/sgx-epc.h" #endif #include "disas/capstone.h" @@ -576,6 +579,18 @@ static CPUCacheInfo legacy_l3_cache = { #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ +/* CPUID Leaf 0x1D constants: */ +#define INTEL_AMX_TILE_MAX_SUBLEAF 0x1 +#define INTEL_AMX_TOTAL_TILE_BYTES 0x2000 +#define INTEL_AMX_BYTES_PER_TILE 0x400 +#define INTEL_AMX_BYTES_PER_ROW 0x40 +#define INTEL_AMX_TILE_MAX_NAMES 0x8 +#define INTEL_AMX_TILE_MAX_ROWS 0x10 + +/* CPUID Leaf 0x1E constants: */ +#define INTEL_AMX_TMUL_MAX_K 0x10 +#define INTEL_AMX_TMUL_MAX_N 0x40 + void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, uint32_t vendor2, uint32_t vendor3) { @@ -614,13 +629,13 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ - CPUID_EXT_RDRAND) + CPUID_EXT_RDRAND | CPUID_EXT_AVX | CPUID_EXT_F16C | \ + CPUID_EXT_FMA) /* missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, - CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, + CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, - CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, - CPUID_EXT_F16C */ + CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER */ #ifdef TARGET_X86_64 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) @@ -635,20 +650,21 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) #define TCG_EXT4_FEATURES 0 -#define TCG_SVM_FEATURES CPUID_SVM_NPT +#define TCG_SVM_FEATURES (CPUID_SVM_NPT | CPUID_SVM_VGIF | \ + CPUID_SVM_SVME_ADDR_CHK) #define TCG_KVM_FEATURES 0 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ - CPUID_7_0_EBX_ERMS) + CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2) /* missing: - CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, + CPUID_7_0_EBX_HLE CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, CPUID_7_0_EBX_RDSEED */ -#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ +#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | \ /* CPUID_7_0_ECX_OSPKE is dynamic */ \ - CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS) + CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS | CPUID_7_0_ECX_VAES) #define TCG_7_0_EDX_FEATURES 0 #define TCG_7_1_EAX_FEATURES 0 #define TCG_APM_FEATURES 0 @@ -657,6 +673,9 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, /* missing: CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ #define TCG_14_0_ECX_FEATURES 0 +#define TCG_SGX_12_0_EAX_FEATURES 0 +#define TCG_SGX_12_0_EBX_FEATURES 0 +#define TCG_SGX_12_1_EAX_FEATURES 0 FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { @@ -798,7 +817,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_0_EBX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - "fsgsbase", "tsc-adjust", NULL, "bmi1", + "fsgsbase", "tsc-adjust", "sgx", "bmi1", "hle", "avx2", NULL, "smep", "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL, @@ -824,7 +843,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "la57", NULL, NULL, NULL, NULL, NULL, "rdpid", NULL, "bus-lock-detect", "cldemote", NULL, "movdiri", - "movdir64b", NULL, NULL, "pks", + "movdir64b", NULL, "sgxlc", "pks", }, .cpuid = { .eax = 7, @@ -840,9 +859,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "fsrm", NULL, NULL, NULL, "avx512-vp2intersect", NULL, "md-clear", NULL, NULL, NULL, "serialize", NULL, - "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, - NULL, NULL, NULL, "avx512-fp16", - NULL, NULL, "spec-ctrl", "stibp", + "tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr", + NULL, NULL, "amx-bf16", "avx512-fp16", + "amx-tile", "amx-int8", "spec-ctrl", "stibp", NULL, "arch-capabilities", "core-capability", "ssbd", }, .cpuid = { @@ -907,7 +926,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { "xsaveopt", "xsavec", "xgetbv1", "xsaves", - NULL, NULL, NULL, NULL, + "xfd", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -922,6 +941,34 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_XSAVE_FEATURES, }, + [FEAT_XSAVE_XSS_LO] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, + .ecx = 1, + .reg = R_ECX, + }, + }, + [FEAT_XSAVE_XSS_HI] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, + .ecx = 1, + .reg = R_EDX + }, + }, [FEAT_6_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -937,7 +984,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .cpuid = { .eax = 6, .reg = R_EAX, }, .tcg_features = TCG_6_EAX_FEATURES, }, - [FEAT_XSAVE_COMP_LO] = { + [FEAT_XSAVE_XCR0_LO] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, @@ -950,7 +997,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK, }, - [FEAT_XSAVE_COMP_HI] = { + [FEAT_XSAVE_XCR0_HI] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, @@ -1185,6 +1232,65 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .tcg_features = TCG_14_0_ECX_FEATURES, }, + [FEAT_SGX_12_0_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "sgx1", "sgx2", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 0, + .reg = R_EAX, + }, + .tcg_features = TCG_SGX_12_0_EAX_FEATURES, + }, + + [FEAT_SGX_12_0_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "sgx-exinfo" , NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 0, + .reg = R_EBX, + }, + .tcg_features = TCG_SGX_12_0_EBX_FEATURES, + }, + + [FEAT_SGX_12_1_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, "sgx-debug", "sgx-mode64", NULL, + "sgx-provisionkey", "sgx-tokenkey", NULL, "sgx-kss", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_SGX_12_1_EAX_FEATURES, + }, }; typedef struct FeatureMask { @@ -1253,6 +1359,14 @@ static FeatureDep feature_dependencies[] = { .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, }, + { + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_MPX }, + .to = { FEAT_VMX_EXIT_CTLS, VMX_VM_EXIT_CLEAR_BNDCFGS }, + }, + { + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_MPX }, + .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_LOAD_BNDCFGS }, + }, { .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, @@ -1308,6 +1422,9 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { }; #undef REGISTER +/* CPUID feature bits available in XSS */ +#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK) + ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ @@ -1340,19 +1457,34 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_PKRU_BIT] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, .size = sizeof(XSavePKRU) }, + [XSTATE_ARCH_LBR_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_ARCH_LBR, + .offset = 0 /*supervisor mode component, offset = 0 */, + .size = sizeof(XSavesArchLBR) }, + [XSTATE_XTILE_CFG_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILECFG), + }, + [XSTATE_XTILE_DATA_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILEDATA) + }, }; #ifndef XBOX -static uint32_t xsave_area_size(uint64_t mask) +uint32_t xsave_area_size(uint64_t mask, bool compacted) { + uint64_t ret = x86_ext_save_areas[0].size; + const ExtSaveArea *esa; + uint32_t offset = 0; int i; - uint64_t ret = 0; - for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { - const ExtSaveArea *esa = &x86_ext_save_areas[i]; + for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { + esa = &x86_ext_save_areas[i]; if ((mask >> i) & 1) { - ret = MAX(ret, esa->offset + esa->size); + offset = compacted ? ret : esa->offset; + ret = MAX(ret, offset + esa->size); } } return ret; @@ -1365,10 +1497,10 @@ static inline bool accel_uses_host_cpuid(void) return kvm_enabled() || hvf_enabled(); } -static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) +static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu) { - return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | - cpu->env.features[FEAT_XSAVE_COMP_LO]; + return ((uint64_t)cpu->env.features[FEAT_XSAVE_XCR0_HI]) << 32 | + cpu->env.features[FEAT_XSAVE_XCR0_LO]; } /* Return name of 32-bit register, from a R_* constant */ @@ -1380,6 +1512,12 @@ static const char *get_register_name_32(unsigned int reg) return x86_reg_info_32[reg].name; } +static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu) +{ + return ((uint64_t)cpu->env.features[FEAT_XSAVE_XSS_HI]) << 32 | + cpu->env.features[FEAT_XSAVE_XSS_LO]; +} + /* * Returns the set of feature flags that are supported and migratable by * QEMU, for a given FeatureWord. @@ -3122,7 +3260,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, .features[FEAT_7_1_EAX] = - CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16, + CPUID_7_1_EAX_AVX512_BF16, /* XSAVES is added in version 2 */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | @@ -3192,128 +3330,6 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, - { - .name = "Icelake-Client", - .level = 0xd, - .vendor = CPUID_VENDOR_INTEL, - .family = 6, - .model = 126, - .stepping = 0, - .features[FEAT_1_EDX] = - CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - .features[FEAT_1_ECX] = - CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | - CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | - CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | - CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | - CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, - .features[FEAT_8000_0001_EDX] = - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | - CPUID_EXT2_SYSCALL, - .features[FEAT_8000_0001_ECX] = - CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, - .features[FEAT_8000_0008_EBX] = - CPUID_8000_0008_EBX_WBNOINVD, - .features[FEAT_7_0_EBX] = - CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | - CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | - CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | - CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | - CPUID_7_0_EBX_SMAP, - .features[FEAT_7_0_ECX] = - CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | - CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | - CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | - CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | - CPUID_7_0_ECX_AVX512_VPOPCNTDQ, - .features[FEAT_7_0_EDX] = - CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, - /* XSAVES is added in version 3 */ - .features[FEAT_XSAVE] = - CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | - CPUID_XSAVE_XGETBV1, - .features[FEAT_6_EAX] = - CPUID_6_EAX_ARAT, - /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ - .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | - MSR_VMX_BASIC_TRUE_CTLS, - .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | - VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | - VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, - .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | - MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | - MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | - MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | - MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | - MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | - MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, - .features[FEAT_VMX_EXIT_CTLS] = - VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | - VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | - VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | - VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | - VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, - .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | - MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, - .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | - VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | - VMX_PIN_BASED_VMX_PREEMPTION_TIMER, - .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | - VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | - VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | - VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | - VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | - VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | - VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | - VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | - VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | - VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | - VMX_CPU_BASED_MONITOR_TRAP_FLAG | - VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, - .features[FEAT_VMX_SECONDARY_CTLS] = - VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | - VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | - VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | - VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | - VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | - VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, - .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, - .xlevel = 0x80000008, - .model_id = "Intel Core Processor (Icelake)", - .versions = (X86CPUVersionDefinition[]) { - { - .version = 1, - .note = "deprecated" - }, - { - .version = 2, - .note = "no TSX, deprecated", - .alias = "Icelake-Client-noTSX", - .props = (PropValue[]) { - { "hle", "off" }, - { "rtm", "off" }, - { /* end of list */ } - }, - }, - { - .version = 3, - .note = "no TSX, XSAVES, deprecated", - .props = (PropValue[]) { - { "xsaves", "on" }, - { "vmx-xsaves", "on" }, - { /* end of list */ } - }, - }, - { /* end of list */ } - }, - .deprecation_note = "use Icelake-Server instead" - }, { .name = "Icelake-Server", .level = 0xd, @@ -3460,6 +3476,14 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { + .version = 6, + .note = "5-level EPT", + .props = (PropValue[]) { + { "vmx-page-walk-5", "on" }, + { /* end of list */ } + }, + }, { /* end of list */ } } }, @@ -3628,7 +3652,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { CPUID_7_0_EDX_CORE_CAPABILITY, .features[FEAT_CORE_CAPABILITY] = MSR_CORE_CAP_SPLIT_LOCK_DETECT, - /* XSAVES is is added in version 3 */ + /* XSAVES is added in version 3 */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, @@ -3702,6 +3726,15 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ }, }, }, + { + .version = 4, + .note = "no split lock detect, no core-capability", + .props = (PropValue[]) { + { "split-lock-detect", "off" }, + { "core-capability", "off" }, + { /* end of list */ }, + }, + }, { /* end of list */ }, }, }, @@ -4551,8 +4584,8 @@ static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) /* XSAVE components are automatically enabled by other features, * so return the original feature name instead */ - if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { - int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; + if (w == FEAT_XSAVE_XCR0_LO || w == FEAT_XSAVE_XCR0_HI) { + int comp = (w == FEAT_XSAVE_XCR0_HI) ? bitnr + 32 : bitnr; if (comp < ARRAY_SIZE(x86_ext_save_areas) && x86_ext_save_areas[comp].bits) { @@ -4825,7 +4858,12 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) desc = g_strdup_printf("%s", model_id); } - qemu_printf("x86 %-20s %-58s\n", name, desc); + if (cc->model && cc->model->cpudef->deprecation_note) { + g_autofree char *olddesc = desc; + desc = g_strdup_printf("%s (deprecated)", olddesc); + } + + qemu_printf("x86 %-20s %s\n", name, desc); } /* list available CPU models and flags */ @@ -4899,8 +4937,8 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) return cpu_list; } -static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, - bool migratable_only) +uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + bool migratable_only) { FeatureWordInfo *wi = &feature_word_info[w]; uint64_t r = 0; @@ -4940,6 +4978,61 @@ static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, return r; } +#ifndef XBOX +static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + if (kvm_enabled()) { + *eax = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EAX); + *ebx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EBX); + *ecx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_ECX); + *edx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EDX); + } else if (hvf_enabled()) { + *eax = hvf_get_supported_cpuid(func, index, R_EAX); + *ebx = hvf_get_supported_cpuid(func, index, R_EBX); + *ecx = hvf_get_supported_cpuid(func, index, R_ECX); + *edx = hvf_get_supported_cpuid(func, index, R_EDX); + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } +} + +static void x86_cpu_get_cache_cpuid(uint32_t func, uint32_t index, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + uint32_t level, unused; + + /* Only return valid host leaves. */ + switch (func) { + case 2: + case 4: + host_cpuid(0, 0, &level, &unused, &unused, &unused); + break; + case 0x80000005: + case 0x80000006: + case 0x8000001d: + host_cpuid(0x80000000, 0, &level, &unused, &unused, &unused); + break; + default: + return; + } + + if (func > level) { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } else { + host_cpuid(func, index, eax, ebx, ecx, edx); + } +} +#endif /* XBOX */ + /* * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ @@ -5130,6 +5223,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def) } +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env) +{ + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { + return 57; /* 57 bits virtual */ + } else { + return 48; /* 48 bits virtual */ + } +} + void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) @@ -5204,7 +5306,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, #else /* cache info: needed for Pentium Pro compatibility */ if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); + x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { *eax = *ebx = *ecx = *edx = 0; @@ -5226,11 +5328,23 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 4: /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { - host_cpuid(index, count, eax, ebx, ecx, edx); - /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ - *eax &= ~0xFC000000; - if ((*eax & 31) && cs->nr_cores > 1) { - *eax |= (cs->nr_cores - 1) << 26; + x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx); + /* + * QEMU has its own number of cores/logical cpus, + * set 24..14, 31..26 bit to configured values + */ + if (*eax & 31) { + int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14); + int vcpus_per_socket = env->nr_dies * cs->nr_cores * + cs->nr_threads; + if (cs->nr_cores > 1) { + *eax &= ~0xFC000000; + *eax |= (pow2ceil(cs->nr_cores) - 1) << 26; + } + if (host_vcpus_per_cache > vcpus_per_socket) { + *eax &= ~0x3FFC000; + *eax |= (pow2ceil(vcpus_per_socket) - 1) << 14; + } } } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { *eax = *ebx = *ecx = *edx = 0; @@ -5292,6 +5406,25 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ + + /* + * SGX cannot be emulated in software. If hardware does not + * support enabling SGX and/or SGX flexible launch control, + * then we need to update the VM's CPUID values accordingly. + */ + if ((*ebx & CPUID_7_0_EBX_SGX) && + (!kvm_enabled() || + !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_EBX) & + CPUID_7_0_EBX_SGX))) { + *ebx &= ~CPUID_7_0_EBX_SGX; + } + + if ((*ecx & CPUID_7_0_ECX_SGX_LC) && + (!(*ebx & CPUID_7_0_EBX_SGX) || !kvm_enabled() || + !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_ECX) & + CPUID_7_0_ECX_SGX_LC))) { + *ecx &= ~CPUID_7_0_ECX_SGX_LC; + } } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; *ebx = 0; @@ -5313,18 +5446,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0xA: /* Architectural Performance Monitoring Leaf */ - if (kvm_enabled() && cpu->enable_pmu) { - KVMState *s = cs->kvm_state; - - *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); - *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); - *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); - *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); - } else if (hvf_enabled() && cpu->enable_pmu) { - *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); - *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); - *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); - *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); + if (accel_uses_host_cpuid() && cpu->enable_pmu) { + x86_cpu_get_supported_cpuid(0xA, count, eax, ebx, ecx, edx); } else { *eax = 0; *ebx = 0; @@ -5362,6 +5485,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, assert(!(*eax & ~0x1f)); *ebx &= 0xffff; /* The count doesn't need to be reliable. */ break; + case 0x1C: + if (accel_uses_host_cpuid() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx); + *edx = 0; + } + break; case 0x1F: /* V2 Extended Topology Enumeration Leaf */ if (env->nr_dies < 2) { @@ -5406,27 +5536,108 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } if (count == 0) { - *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); - *eax = env->features[FEAT_XSAVE_COMP_LO]; - *edx = env->features[FEAT_XSAVE_COMP_HI]; + *ecx = xsave_area_size(x86_cpu_xsave_xcr0_components(cpu), false); + *eax = env->features[FEAT_XSAVE_XCR0_LO]; + *edx = env->features[FEAT_XSAVE_XCR0_HI]; /* * The initial value of xcr0 and ebx == 0, On host without kvm * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 * even through guest update xcr0, this will crash some legacy guest * (e.g., CentOS 6), So set ebx == ecx to workaroud it. */ - *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); + *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false); } else if (count == 1) { + uint64_t xstate = x86_cpu_xsave_xcr0_components(cpu) | + x86_cpu_xsave_xss_components(cpu); + *eax = env->features[FEAT_XSAVE]; + *ebx = xsave_area_size(xstate, true); + *ecx = env->features[FEAT_XSAVE_XSS_LO]; + *edx = env->features[FEAT_XSAVE_XSS_HI]; + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) && + (*eax & CPUID_XSAVE_XSAVES)) { + *ecx |= XSTATE_ARCH_LBR_MASK; + } else { + *ecx &= ~XSTATE_ARCH_LBR_MASK; + } + } else if (count == 0xf && + accel_uses_host_cpuid() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx); } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { - if ((x86_cpu_xsave_components(cpu) >> count) & 1) { - const ExtSaveArea *esa = &x86_ext_save_areas[count]; + const ExtSaveArea *esa = &x86_ext_save_areas[count]; + + if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) { *eax = esa->size; *ebx = esa->offset; + *ecx = esa->ecx & + (ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK); + } else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) { + *eax = esa->size; + *ebx = 0; + *ecx = 1; } } break; } + case 0x12: +#ifndef CONFIG_USER_ONLY + if (!kvm_enabled() || + !(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + + /* + * SGX sub-leafs CPUID.0x12.{0x2..N} enumerate EPC sections. Retrieve + * the EPC properties, e.g. confidentiality and integrity, from the + * host's first EPC section, i.e. assume there is one EPC section or + * that all EPC sections have the same security properties. + */ + if (count > 1) { + uint64_t epc_addr, epc_size; + + if (sgx_epc_get_section(count - 2, &epc_addr, &epc_size)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + host_cpuid(index, 2, eax, ebx, ecx, edx); + *eax = (uint32_t)(epc_addr & 0xfffff000) | 0x1; + *ebx = (uint32_t)(epc_addr >> 32); + *ecx = (uint32_t)(epc_size & 0xfffff000) | (*ecx & 0xf); + *edx = (uint32_t)(epc_size >> 32); + break; + } + + /* + * SGX sub-leafs CPUID.0x12.{0x0,0x1} are heavily dependent on hardware + * and KVM, i.e. QEMU cannot emulate features to override what KVM + * supports. Features can be further restricted by userspace, but not + * made more permissive. + */ + x86_cpu_get_supported_cpuid(0x12, count, eax, ebx, ecx, edx); + + if (count == 0) { + *eax &= env->features[FEAT_SGX_12_0_EAX]; + *ebx &= env->features[FEAT_SGX_12_0_EBX]; + } else { + *eax &= env->features[FEAT_SGX_12_1_EAX]; + *ebx &= 0; /* ebx reserve */ + *ecx &= env->features[FEAT_XSAVE_XCR0_LO]; + *edx &= env->features[FEAT_XSAVE_XCR0_HI]; + + /* FP and SSE are always allowed regardless of XSAVE/XCR0. */ + *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK; + + /* Access to PROVISIONKEY requires additional credentials. */ + if ((*eax & (1U << 4)) && + !kvm_enable_sgx_provisioning(cs->kvm_state)) { + *eax &= ~(1U << 4); + } + } +#endif + break; case 0x14: { /* Intel Processor Trace Enumeration */ *eax = 0; @@ -5451,6 +5662,43 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x1D: { + /* AMX TILE */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *eax = INTEL_AMX_TILE_MAX_SUBLEAF; + } else if (count == 1) { + *eax = INTEL_AMX_TOTAL_TILE_BYTES | + (INTEL_AMX_BYTES_PER_TILE << 16); + *ebx = INTEL_AMX_BYTES_PER_ROW | (INTEL_AMX_TILE_MAX_NAMES << 16); + *ecx = INTEL_AMX_TILE_MAX_ROWS; + } + break; + } + case 0x1E: { + /* AMX TMUL */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *ebx = INTEL_AMX_TMUL_MAX_K | (INTEL_AMX_TMUL_MAX_N << 8); + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff @@ -5510,7 +5758,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 0x80000005: /* cache info (L1 cache) */ if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); + x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | @@ -5523,7 +5771,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 0x80000006: /* cache info (L2 cache) */ if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); + x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | @@ -5547,16 +5795,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0x80000008: /* virtual & phys address size in low 2 bytes. */ + *eax = cpu->phys_bits; if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { /* 64 bit processor */ - *eax = cpu->phys_bits; /* configurable physical bits */ - if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { - *eax |= 0x00003900; /* 57 bits virtual */ - } else { - *eax |= 0x00003000; /* 48 bits virtual */ - } - } else { - *eax = cpu->phys_bits; + *eax |= (cpu_x86_virtual_addr_width(env) << 8); } *ebx = env->features[FEAT_8000_0008_EBX]; if (cs->nr_cores * cs->nr_threads > 1) { @@ -5589,7 +5831,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 0x8000001D: *eax = 0; if (cpu->cache_info_passthrough) { - host_cpuid(index, count, eax, ebx, ecx, edx); + x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx); break; } switch (count) { @@ -5647,12 +5889,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = 0; break; case 0x8000001F: - *eax = sev_enabled() ? 0x2 : 0; - *eax |= sev_es_enabled() ? 0x8 : 0; - *ebx = sev_get_cbit_position(); - *ebx |= sev_get_reduced_phys_bits() << 6; - *ecx = 0; - *edx = 0; + *eax = *ebx = *ecx = *edx = 0; + if (sev_enabled()) { + *eax = 0x2; + *eax |= sev_es_enabled() ? 0x8 : 0; + *ebx = sev_get_cbit_position(); + *ebx |= sev_get_reduced_phys_bits() << 6; + } break; #endif /* XBOX */ default: @@ -5665,6 +5908,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } +static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) +{ +#ifndef CONFIG_USER_ONLY + /* Those default values are defined in Skylake HW */ + env->msr_ia32_sgxlepubkeyhash[0] = 0xa6053e051270b7acULL; + env->msr_ia32_sgxlepubkeyhash[1] = 0x6cfbe8ba8b3b413dULL; + env->msr_ia32_sgxlepubkeyhash[2] = 0xc4916d99f2b3735dULL; + env->msr_ia32_sgxlepubkeyhash[3] = 0xd4f8c05909f9bb3bULL; +#endif +} + static void x86_cpu_reset(DeviceState *dev) { CPUState *s = CPU(dev); @@ -5682,8 +5936,9 @@ static void x86_cpu_reset(DeviceState *dev) env->old_exception = -1; /* init to reset state */ - + env->int_ctl = 0; env->hflags2 |= HF2_GIF_MASK; + env->hflags2 |= HF2_VGIF_MASK; env->hflags &= ~HF_GUEST_MASK; cpu_x86_update_cr0(env, 0x60000010); @@ -5733,6 +5988,19 @@ static void x86_cpu_reset(DeviceState *dev) env->xstate_bv = 0; env->pat = 0x0007040600070406ULL; + + if (kvm_enabled()) { + /* + * KVM handles TSC = 0 specially and thinks we are hot-plugging + * a new CPU, use 1 instead to force a reset. + */ + if (env->tsc != 0) { + env->tsc = 1; + } + } else { + env->tsc = 0; + } + env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; @@ -5754,6 +6022,9 @@ static void x86_cpu_reset(DeviceState *dev) } for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; + if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) { + continue; + } if (env->features[esa->feature] & esa->bits) { xcr0 |= 1ull << i; } @@ -5787,6 +6058,7 @@ static void x86_cpu_reset(DeviceState *dev) env->exception_has_payload = false; env->exception_payload = 0; env->nmi_injected = false; + env->triple_fault_pending = false; #if !defined(CONFIG_USER_ONLY) /* We hard-wire the BSP to the first CPU. */ apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); @@ -5796,6 +6068,24 @@ static void x86_cpu_reset(DeviceState *dev) if (kvm_enabled()) { kvm_arch_reset_vcpu(cpu); } + + x86_cpu_set_sgxlepubkeyhash(env); + + env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT; + +#endif +} + +void x86_cpu_after_reset(X86CPU *cpu) +{ +#ifndef CONFIG_USER_ONLY + if (kvm_enabled()) { + kvm_arch_after_reset_vcpu(cpu); + } + + if (cpu->apic_state) { + device_cold_reset(cpu->apic_state); + } #endif } @@ -5860,10 +6150,11 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) CPUX86State *env = &cpu->env; int i; uint64_t mask; + static bool request_perm; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { - env->features[FEAT_XSAVE_COMP_LO] = 0; - env->features[FEAT_XSAVE_COMP_HI] = 0; + env->features[FEAT_XSAVE_XCR0_LO] = 0; + env->features[FEAT_XSAVE_XCR0_HI] = 0; return; } @@ -5875,8 +6166,16 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) } } - env->features[FEAT_XSAVE_COMP_LO] = mask; - env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; + /* Only request permission for first vcpu */ + if (kvm_enabled() && !request_perm) { + kvm_request_xsave_components(cpu, mask); + request_perm = true; + } + + env->features[FEAT_XSAVE_XCR0_LO] = mask & CPUID_XSTATE_XCR0_MASK; + env->features[FEAT_XSAVE_XCR0_HI] = mask >> 32; + env->features[FEAT_XSAVE_XSS_LO] = mask & CPUID_XSTATE_XSS_MASK; + env->features[FEAT_XSAVE_XSS_HI] = mask >> 32; } /***** Steps involved on loading and filtering CPUID data @@ -6025,6 +6324,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) if (sev_enabled()) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); } + + /* SGX requires CPUID[0x12] for EPC enumeration */ + if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12); + } } /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ @@ -6124,10 +6428,6 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu) cpu->hyperv_interface_id[2] = 0; cpu->hyperv_interface_id[3] = 0; - /* Hypervisor system identity */ - cpu->hyperv_version_id[0] = 0x00001bbc; - cpu->hyperv_version_id[1] = 0x00060001; - /* Hypervisor implementation limits */ cpu->hyperv_limits[0] = 64; cpu->hyperv_limits[1] = 0; @@ -6142,6 +6442,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) CPUX86State *env = &cpu->env; Error *local_err = NULL; static bool ht_warned; + unsigned requested_lbr_fmt; if (cpu->apic_id == UNASSIGNED_APIC_ID) { error_setg(errp, "apic-id property was not initialized properly"); @@ -6159,6 +6460,42 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) goto out; } + /* + * Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT + * with user-provided setting. + */ + if (cpu->lbr_fmt != ~PERF_CAP_LBR_FMT) { + if ((cpu->lbr_fmt & PERF_CAP_LBR_FMT) != cpu->lbr_fmt) { + error_setg(errp, "invalid lbr-fmt"); + return; + } + env->features[FEAT_PERF_CAPABILITIES] &= ~PERF_CAP_LBR_FMT; + env->features[FEAT_PERF_CAPABILITIES] |= cpu->lbr_fmt; + } + + /* + * vPMU LBR is supported when 1) KVM is enabled 2) Option pmu=on and + * 3)vPMU LBR format matches that of host setting. + */ + requested_lbr_fmt = + env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT; + if (requested_lbr_fmt && kvm_enabled()) { + uint64_t host_perf_cap = + x86_cpu_get_supported_feature_word(FEAT_PERF_CAPABILITIES, false); + unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT; + + if (!cpu->enable_pmu) { + error_setg(errp, "vPMU: LBR is unsupported without pmu=on"); + return; + } + if (requested_lbr_fmt != host_lbr_fmt) { + error_setg(errp, "vPMU: the lbr-fmt value (0x%x) does not match " + "the host value (0x%x).", + requested_lbr_fmt, host_lbr_fmt); + return; + } + } + x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { @@ -6178,6 +6515,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) & CPUID_EXT2_AMD_ALIASES); } + x86_cpu_set_sgxlepubkeyhash(env); + /* * note: the call to the framework needs to happen after feature expansion, * but before the checks/modifications to ucode_rev, mwait, phys_bits. @@ -6508,6 +6847,10 @@ static void x86_cpu_initfn(Object *obj) object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); + object_property_add_alias(obj, "hv-apicv", obj, "hv-avic"); + cpu->lbr_fmt = ~PERF_CAP_LBR_FMT; + object_property_add_alias(obj, "lbr_fmt", obj, "lbr-fmt"); + if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); } @@ -6536,6 +6879,14 @@ static void x86_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.eip = value; } +static vaddr x86_cpu_get_pc(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + /* Match cpu_get_tb_cpu_state. */ + return cpu->env.eip + cpu->env.segs[R_CS].base; +} + int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); @@ -6567,10 +6918,12 @@ int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { return CPU_INTERRUPT_HARD; #if !defined(CONFIG_USER_ONLY) - } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && + } else if (env->hflags2 & HF2_VGIF_MASK) { + if((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { - return CPU_INTERRUPT_VIRQ; + return CPU_INTERRUPT_VIRQ; + } #endif } } @@ -6591,7 +6944,6 @@ static void x86_disas_set_info(CPUState *cs, disassemble_info *info) info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 : bfd_mach_i386_i8086); - info->print_insn = print_insn_i386; info->cap_arch = CS_ARCH_X86; info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 @@ -6660,6 +7012,7 @@ static Property x86_cpu_properties[] = { #endif DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), + DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT), DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, HYPERV_SPINLOCK_NEVER_NOTIFY), @@ -6693,9 +7046,33 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_IPI, 0), DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, HYPERV_FEAT_STIMER_DIRECT, 0), + DEFINE_PROP_BIT64("hv-avic", X86CPU, hyperv_features, + HYPERV_FEAT_AVIC, 0), + DEFINE_PROP_BIT64("hv-emsr-bitmap", X86CPU, hyperv_features, + HYPERV_FEAT_MSR_BITMAP, 0), + DEFINE_PROP_BIT64("hv-xmm-input", X86CPU, hyperv_features, + HYPERV_FEAT_XMM_INPUT, 0), + DEFINE_PROP_BIT64("hv-tlbflush-ext", X86CPU, hyperv_features, + HYPERV_FEAT_TLBFLUSH_EXT, 0), + DEFINE_PROP_BIT64("hv-tlbflush-direct", X86CPU, hyperv_features, + HYPERV_FEAT_TLBFLUSH_DIRECT, 0), DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), + DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features, + HYPERV_FEAT_SYNDBG, 0), DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), + DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false), + + /* WS2008R2 identify by default */ + DEFINE_PROP_UINT32("hv-version-id-build", X86CPU, hyperv_ver_id_build, + 0x3839), + DEFINE_PROP_UINT16("hv-version-id-major", X86CPU, hyperv_ver_id_major, + 0x000A), + DEFINE_PROP_UINT16("hv-version-id-minor", X86CPU, hyperv_ver_id_minor, + 0x0000), + DEFINE_PROP_UINT32("hv-version-id-spack", X86CPU, hyperv_ver_id_sp, 0), + DEFINE_PROP_UINT8("hv-version-id-sbranch", X86CPU, hyperv_ver_id_sb, 0), + DEFINE_PROP_UINT32("hv-version-id-snumber", X86CPU, hyperv_ver_id_sn, 0), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), @@ -6722,6 +7099,8 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, false), + DEFINE_PROP_BOOL("kvm-pv-enforce-cpuid", X86CPU, kvm_pv_enforce_cpuid, + false), DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, @@ -6790,6 +7169,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->has_work = x86_cpu_has_work; cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; + cc->get_pc = x86_cpu_get_pc; cc->gdb_read_register = x86_cpu_gdb_read_register; cc->gdb_write_register = x86_cpu_gdb_write_register; cc->get_arch_id = x86_cpu_get_arch_id; @@ -6863,7 +7243,6 @@ static const TypeInfo x86_cpu_type_info = { .class_init = x86_cpu_common_class_init, }; - /* "base" CPU model, used by query-cpu-model-expansion */ static void x86_cpu_base_class_init(ObjectClass *oc, void *data) { diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 687d8924f5..57b275a212 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -25,6 +25,7 @@ #include "kvm/hyperv-proto.h" #include "exec/cpu-defs.h" #include "qapi/qapi-types-common.h" +#include "qemu/cpu-float.h" /* The x86 has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) @@ -167,7 +168,9 @@ typedef enum X86Seg { #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ -#define HF_FPU_PC_SHIFT 27 /* FPU Precision Control */ +#define HF_UMIP_SHIFT 27 /* CR4.UMIP */ +#define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */ +#define HF_FPU_PC_SHIFT 29 /* FPU Precision Control */ #define HF_CPL_MASK (3 << HF_CPL_SHIFT) #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) @@ -193,6 +196,8 @@ typedef enum X86Seg { #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) +#define HF_UMIP_MASK (1 << HF_UMIP_SHIFT) +#define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT) #define HF_FPU_PC_MASK (1 << HF_FPU_PC_SHIFT) /* hflags2 */ @@ -205,6 +210,7 @@ typedef enum X86Seg { #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ +#define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/ #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) @@ -214,6 +220,7 @@ typedef enum X86Seg { #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) +#define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT) #define CR0_PE_SHIFT 0 #define CR0_MP_SHIFT 1 @@ -258,7 +265,8 @@ typedef enum X86Seg { (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \ - | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK |CR4_UMIP_MASK \ + | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \ + | CR4_LA57_MASK \ | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \ | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK)) @@ -382,15 +390,29 @@ typedef enum X86Seg { #define ARCH_CAP_TSX_CTRL_MSR (1<<7) #define MSR_IA32_PERF_CAPABILITIES 0x345 +#define PERF_CAP_LBR_FMT 0x3f #define MSR_IA32_TSX_CTRL 0x122 #define MSR_IA32_TSCDEADLINE 0x6e0 #define MSR_IA32_PKRS 0x6e1 +#define MSR_ARCH_LBR_CTL 0x000014ce +#define MSR_ARCH_LBR_DEPTH 0x000014cf +#define MSR_ARCH_LBR_FROM_0 0x00001500 +#define MSR_ARCH_LBR_TO_0 0x00001600 +#define MSR_ARCH_LBR_INFO_0 0x00001200 #define FEATURE_CONTROL_LOCKED (1<<0) +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) +#define FEATURE_CONTROL_SGX_LC (1ULL << 17) +#define FEATURE_CONTROL_SGX (1ULL << 18) #define FEATURE_CONTROL_LMCE (1<<20) +#define MSR_IA32_SGXLEPUBKEYHASH0 0x8c +#define MSR_IA32_SGXLEPUBKEYHASH1 0x8d +#define MSR_IA32_SGXLEPUBKEYHASH2 0x8e +#define MSR_IA32_SGXLEPUBKEYHASH3 0x8f + #define MSR_P6_PERFCTR0 0xc1 #define MSR_IA32_SMBASE 0x9e @@ -490,9 +512,15 @@ typedef enum X86Seg { #define MSR_GSBASE 0xc0000101 #define MSR_KERNELGSBASE 0xc0000102 #define MSR_TSC_AUX 0xc0000103 +#define MSR_AMD64_TSC_RATIO 0xc0000104 + +#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL #define MSR_VM_HSAVE_PA 0xc0010117 +#define MSR_IA32_XFD 0x000001c4 +#define MSR_IA32_XFD_ERR 0x000001c5 + #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 #define MSR_IA32_UMWAIT_CONTROL 0xe1 @@ -525,6 +553,9 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_BIT 6 #define XSTATE_Hi16_ZMM_BIT 7 #define XSTATE_PKRU_BIT 9 +#define XSTATE_ARCH_LBR_BIT 15 +#define XSTATE_XTILE_CFG_BIT 17 +#define XSTATE_XTILE_DATA_BIT 18 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) @@ -535,6 +566,26 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) +#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT) +#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) +#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) + +#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) + +#define ESA_FEATURE_ALIGN64_BIT 1 +#define ESA_FEATURE_XFD_BIT 2 + +#define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) +#define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT) + + +/* CPUID feature bits available in XCR0 */ +#define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \ + XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \ + XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \ + XSTATE_ZMM_Hi256_MASK | \ + XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \ + XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK) /* CPUID feature words */ typedef enum FeatureWord { @@ -554,8 +605,8 @@ typedef enum FeatureWord { FEAT_SVM, /* CPUID[8000_000A].EDX */ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ FEAT_6_EAX, /* CPUID[6].EAX */ - FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ - FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ + FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ + FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ FEAT_ARCH_CAPABILITIES, FEAT_CORE_CAPABILITY, FEAT_PERF_CAPABILITIES, @@ -569,10 +620,17 @@ typedef enum FeatureWord { FEAT_VMX_BASIC, FEAT_VMX_VMFUNC, FEAT_14_0_ECX, + FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ + FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ + FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ + FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ + FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ FEATURE_WORDS, } FeatureWord; typedef uint64_t FeatureWordArray[FEATURE_WORDS]; +uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + bool migratable_only); /* cpuid_features bits */ #define CPUID_FP87 (1U << 0) @@ -717,6 +775,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ #define CPUID_7_0_EBX_FSGSBASE (1U << 0) +/* Support SGX */ +#define CPUID_7_0_EBX_SGX (1U << 2) /* 1st Group of Advanced Bit Manipulation Extensions */ #define CPUID_7_0_EBX_BMI1 (1U << 3) /* Hardware Lock Elision */ @@ -804,6 +864,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* Move 64 Bytes as Direct Store Instruction */ #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) +/* Support SGX Launch Control */ +#define CPUID_7_0_ECX_SGX_LC (1U << 30) /* Protection Keys for Supervisor-mode Pages */ #define CPUID_7_0_ECX_PKS (1U << 31) @@ -819,8 +881,12 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_SERIALIZE (1U << 14) /* TSX Suspend Load Address Tracking instruction */ #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) +/* Architectural LBRs */ +#define CPUID_7_0_EDX_ARCH_LBR (1U << 19) /* AVX512_FP16 instruction */ #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) +/* AMX tile (two-dimensional register) */ +#define CPUID_7_0_EDX_AMX_TILE (1U << 24) /* Speculation Control */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Single Thread Indirect Branch Predictors */ @@ -836,6 +902,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) +/* XFD Extend Feature Disabled */ +#define CPUID_D_1_EAX_XFD (1U << 4) /* Packets which contain IP payload have LIP values */ #define CPUID_14_0_ECX_LIP (1U << 31) @@ -1040,6 +1108,12 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define HYPERV_FEAT_EVMCS 12 #define HYPERV_FEAT_IPI 13 #define HYPERV_FEAT_STIMER_DIRECT 14 +#define HYPERV_FEAT_AVIC 15 +#define HYPERV_FEAT_SYNDBG 16 +#define HYPERV_FEAT_MSR_BITMAP 17 +#define HYPERV_FEAT_XMM_INPUT 18 +#define HYPERV_FEAT_TLBFLUSH_EXT 19 +#define HYPERV_FEAT_TLBFLUSH_DIRECT 20 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF @@ -1163,32 +1237,35 @@ typedef struct SegmentCache { uint32_t flags; } SegmentCache; -#define MMREG_UNION(n, bits) \ - union n { \ - uint8_t _b_##n[(bits)/8]; \ - uint16_t _w_##n[(bits)/16]; \ - uint32_t _l_##n[(bits)/32]; \ - uint64_t _q_##n[(bits)/64]; \ - float32 _s_##n[(bits)/32]; \ - float64 _d_##n[(bits)/64]; \ - } +typedef union MMXReg { + uint8_t _b_MMXReg[64 / 8]; + uint16_t _w_MMXReg[64 / 16]; + uint32_t _l_MMXReg[64 / 32]; + uint64_t _q_MMXReg[64 / 64]; + float32 _s_MMXReg[64 / 32]; + float64 _d_MMXReg[64 / 64]; +} MMXReg; -typedef union { - uint8_t _b[16]; - uint16_t _w[8]; - uint32_t _l[4]; - uint64_t _q[2]; +typedef union XMMReg { + uint64_t _q_XMMReg[128 / 64]; } XMMReg; -typedef union { - uint8_t _b[32]; - uint16_t _w[16]; - uint32_t _l[8]; - uint64_t _q[4]; +typedef union YMMReg { + uint64_t _q_YMMReg[256 / 64]; + XMMReg _x_YMMReg[256 / 128]; } YMMReg; -typedef MMREG_UNION(ZMMReg, 512) ZMMReg; -typedef MMREG_UNION(MMXReg, 64) MMXReg; +typedef union ZMMReg { + uint8_t _b_ZMMReg[512 / 8]; + uint16_t _w_ZMMReg[512 / 16]; + uint32_t _l_ZMMReg[512 / 32]; + uint64_t _q_ZMMReg[512 / 64]; + float16 _h_ZMMReg[512 / 16]; + float32 _s_ZMMReg[512 / 32]; + float64 _d_ZMMReg[512 / 64]; + XMMReg _x_ZMMReg[512 / 128]; + YMMReg _y_ZMMReg[512 / 256]; +} ZMMReg; typedef struct BNDReg { uint64_t lb; @@ -1204,13 +1281,21 @@ typedef struct BNDCSReg { #define BNDCFG_BNDPRESERVE 2ULL #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define ZMM_B(n) _b_ZMMReg[63 - (n)] #define ZMM_W(n) _w_ZMMReg[31 - (n)] #define ZMM_L(n) _l_ZMMReg[15 - (n)] +#define ZMM_H(n) _h_ZMMReg[31 - (n)] #define ZMM_S(n) _s_ZMMReg[15 - (n)] #define ZMM_Q(n) _q_ZMMReg[7 - (n)] #define ZMM_D(n) _d_ZMMReg[7 - (n)] +#define ZMM_X(n) _x_ZMMReg[3 - (n)] +#define ZMM_Y(n) _y_ZMMReg[1 - (n)] + +#define XMM_Q(n) _q_XMMReg[1 - (n)] + +#define YMM_Q(n) _q_YMMReg[3 - (n)] +#define YMM_X(n) _x_YMMReg[1 - (n)] #define MMX_B(n) _b_MMXReg[7 - (n)] #define MMX_W(n) _w_MMXReg[3 - (n)] @@ -1220,9 +1305,17 @@ typedef struct BNDCSReg { #define ZMM_B(n) _b_ZMMReg[n] #define ZMM_W(n) _w_ZMMReg[n] #define ZMM_L(n) _l_ZMMReg[n] +#define ZMM_H(n) _h_ZMMReg[n] #define ZMM_S(n) _s_ZMMReg[n] #define ZMM_Q(n) _q_ZMMReg[n] #define ZMM_D(n) _d_ZMMReg[n] +#define ZMM_X(n) _x_ZMMReg[n] +#define ZMM_Y(n) _y_ZMMReg[n] + +#define XMM_Q(n) _q_XMMReg[n] + +#define YMM_Q(n) _q_YMMReg[n] +#define YMM_X(n) _x_YMMReg[n] #define MMX_B(n) _b_MMXReg[n] #define MMX_W(n) _w_MMXReg[n] @@ -1323,6 +1416,34 @@ typedef struct XSavePKRU { uint32_t padding; } XSavePKRU; +/* Ext. save area 17: AMX XTILECFG state */ +typedef struct XSaveXTILECFG { + uint8_t xtilecfg[64]; +} XSaveXTILECFG; + +/* Ext. save area 18: AMX XTILEDATA state */ +typedef struct XSaveXTILEDATA { + uint8_t xtiledata[8][1024]; +} XSaveXTILEDATA; + +typedef struct { + uint64_t from; + uint64_t to; + uint64_t info; +} LBREntry; + +#define ARCH_LBR_NR_ENTRIES 32 + +/* Ext. save area 19: Supervisor mode Arch LBR state */ +typedef struct XSavesArchLBR { + uint64_t lbr_ctl; + uint64_t lbr_depth; + uint64_t ler_from; + uint64_t ler_to; + uint64_t ler_info; + LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; +} XSavesArchLBR; + QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); @@ -1330,13 +1451,17 @@ QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000); +QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328); typedef struct ExtSaveArea { uint32_t feature, bits; uint32_t offset, size; + uint32_t ecx; } ExtSaveArea; -#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) +#define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1) extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; @@ -1409,7 +1534,7 @@ typedef struct HVFX86LazyFlags { target_ulong auxbits; } HVFX86LazyFlags; -typedef struct CPUX86State { +typedef struct CPUArchState { /* standard registers */ target_ulong regs[CPU_NB_REGS]; target_ulong eip; @@ -1435,6 +1560,9 @@ typedef struct CPUX86State { SegmentCache idt; /* only base and limit are used */ target_ulong cr[5]; /* NOTE: cr1 is unused */ + + bool pdptrs_valid; + uint64_t pdptrs[4]; int32_t a20_mask; BNDReg bnd_regs[4]; @@ -1465,15 +1593,15 @@ typedef struct CPUX86State { float_status mmx_status; /* for 3DNow! float ops */ float_status sse_status; uint32_t mxcsr; - ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; - ZMMReg xmm_t0; + ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16); + ZMMReg xmm_t0 QEMU_ALIGNED(16); MMXReg mmx_t0; - XMMReg ymmh_regs[CPU_NB_REGS]; - uint64_t opmask_regs[NB_OPMASK_REGS]; - YMMReg zmmh_regs[CPU_NB_REGS]; - ZMMReg hi16_zmm_regs[CPU_NB_REGS]; +#ifdef TARGET_X86_64 + uint8_t xtilecfg[64]; + uint8_t xtiledata[8192]; +#endif /* sysenter registers */ uint32_t sysenter_cs; @@ -1490,7 +1618,6 @@ typedef struct CPUX86State { target_ulong kernelgsbase; #endif - uint64_t tsc; uint64_t tsc_adjust; uint64_t tsc_deadline; uint64_t tsc_aux; @@ -1500,6 +1627,7 @@ typedef struct CPUX86State { uint64_t mcg_status; uint64_t msr_ia32_misc_enable; uint64_t msr_ia32_feature_control; + uint64_t msr_ia32_sgxlepubkeyhash[4]; uint64_t msr_fixed_ctr_ctrl; uint64_t msr_global_ctrl; @@ -1518,6 +1646,7 @@ typedef struct CPUX86State { uint32_t tsx_ctrl; uint64_t spec_ctrl; + uint64_t amd_tsc_scale_msr; uint64_t virt_ssbd; /* End of state preserved by INIT (dummy marker). */ @@ -1535,6 +1664,12 @@ typedef struct CPUX86State { uint64_t msr_hv_hypercall; uint64_t msr_hv_guest_os_id; uint64_t msr_hv_tsc; + uint64_t msr_hv_syndbg_control; + uint64_t msr_hv_syndbg_status; + uint64_t msr_hv_syndbg_send_page; + uint64_t msr_hv_syndbg_recv_page; + uint64_t msr_hv_syndbg_pending_page; + uint64_t msr_hv_syndbg_options; /* Per-VCPU HV MSRs */ uint64_t msr_hv_vapic; @@ -1557,6 +1692,15 @@ typedef struct CPUX86State { uint64_t msr_rtit_cr3_match; uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; + /* Per-VCPU XFD MSRs */ + uint64_t msr_xfd; + uint64_t msr_xfd_err; + + /* Per-VCPU Arch LBR MSRs */ + uint64_t msr_lbr_ctl; + uint64_t msr_lbr_depth; + LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; + /* exception/interrupt handling */ int error_code; int exception_is_int; @@ -1579,6 +1723,7 @@ typedef struct CPUX86State { uint64_t nested_cr3; uint32_t nested_pg_mode; uint8_t v_tpr; + uint32_t int_ctl; /* KVM states, automatically cleared on reset */ uint8_t nmi_injected; @@ -1631,12 +1776,14 @@ typedef struct CPUX86State { uint8_t has_error_code; uint8_t exception_has_payload; uint64_t exception_payload; + uint8_t triple_fault_pending; uint32_t ins_len; uint32_t sipi_vector; bool tsc_valid; int64_t tsc_khz; int64_t user_tsc_khz; /* for sanity check only */ uint64_t apic_bus_freq; + uint64_t tsc; #if defined(CONFIG_KVM) || defined(CONFIG_HVF) void *xsave_buf; uint32_t xsave_buf_len; @@ -1679,7 +1826,7 @@ struct kvm_msrs; * * An x86 CPU. */ -struct X86CPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -1698,9 +1845,14 @@ struct X86CPU { OnOffAuto hyperv_no_nonarch_cs; uint32_t hyperv_vendor_id[3]; uint32_t hyperv_interface_id[4]; - uint32_t hyperv_version_id[4]; uint32_t hyperv_limits[3]; - uint32_t hyperv_nested[4]; + bool hyperv_enforce_cpuid; + uint32_t hyperv_ver_id_build; + uint16_t hyperv_ver_id_major; + uint16_t hyperv_ver_id_minor; + uint32_t hyperv_ver_id_sp; + uint8_t hyperv_ver_id_sb; + uint32_t hyperv_ver_id_sn; bool check_cpuid; bool enforce_cpuid; @@ -1743,6 +1895,15 @@ struct X86CPU { */ bool enable_pmu; + /* + * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR. + * This can't be initialized with a default because it doesn't have + * stable ABI support yet. It is only allowed to pass all LBR_FMT bits + * returned by kvm_arch_get_supported_msr_feature()(which depends on both + * host CPU and kernel capabilities) to the guest. + */ + uint64_t lbr_fmt; + /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is * disabled by default to avoid breaking migration between QEMU with * different LMCE configurations. @@ -1784,6 +1945,9 @@ struct X86CPU { /* Stop SMI delivery for migration compatibility with old machines */ bool kvm_no_smi_migration; + /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ + bool kvm_pv_enforce_cpuid; + /* Number of physical address bits supported */ uint32_t phys_bits; @@ -1812,13 +1976,13 @@ extern const VMStateDescription vmstate_x86_cpu; int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); + int cpuid, DumpState *s); int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); + int cpuid, DumpState *s); int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); + DumpState *s); int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); + DumpState *s); void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, Error **errp); @@ -1834,12 +1998,15 @@ int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void x86_cpu_list(void); int cpu_x86_support_mca_broadcast(CPUX86State *env); +#ifndef CONFIG_USER_ONLY int cpu_get_pic_interrupt(CPUX86State *s); + /* MSDOS compatibility mode FPU exception support */ void x86_register_ferr_irq(qemu_irq irq); void fpu_check_raise_ferr_irq(CPUX86State *s); void cpu_set_ignne(void); void cpu_clear_ignne(void); +#endif /* mpx_helper.c */ void cpu_sync_bndcs_hflags(CPUX86State *env); @@ -1941,12 +2108,8 @@ void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); - -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_x86_signal_handler(int host_signum, void *pinfo, - void *puc); +void cpu_x86_xsave(CPUX86State *s, target_ulong ptr); +void cpu_x86_xrstor(CPUX86State *s, target_ulong ptr); /* cpu.c */ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, @@ -1956,6 +2119,10 @@ typedef struct PropValue { } PropValue; void x86_cpu_apply_props(X86CPU *cpu, PropValue *props); +void x86_cpu_after_reset(X86CPU *cpu); + +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env); + /* cpu.c other functions (cpuid) */ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, @@ -1966,6 +2133,7 @@ void host_cpuid(uint32_t function, uint32_t count, /* helper.c */ void x86_cpu_set_a20(X86CPU *cpu, int a20_state); +void cpu_sync_avx_hflag(CPUX86State *env); #ifndef CONFIG_USER_ONLY static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) @@ -2013,13 +2181,15 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") #endif -#define cpu_signal_handler cpu_x86_signal_handler #define cpu_list x86_cpu_list /* MMU modes definitions */ #define MMU_KSMAP_IDX 0 #define MMU_USER_IDX 1 #define MMU_KNOSMAP_IDX 2 +#define MMU_NESTED_IDX 3 +#define MMU_PHYS_IDX 4 + static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : @@ -2039,9 +2209,6 @@ static inline int cpu_mmu_index_kernel(CPUX86State *env) #define CC_SRC2 (env->cc_src2) #define CC_OP (env->cc_op) -typedef CPUX86State CPUArchState; -typedef X86CPU ArchCPU; - #include "exec/cpu-all.h" #include "svm.h" @@ -2212,6 +2379,7 @@ bool cpu_is_bsp(X86CPU *cpu); void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen); void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen); +uint32_t xsave_area_size(uint64_t mask, bool compacted); void x86_update_hflags(CPUX86State* env); static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) @@ -2249,6 +2417,21 @@ static inline uint64_t cr4_reserved_bits(CPUX86State *env) return reserved_bits; } +static inline bool ctl_has_irq(CPUX86State *env) +{ + uint32_t int_prio; + uint32_t tpr; + + int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; + tpr = env->int_ctl & V_TPR_MASK; + + if (env->int_ctl & V_IGN_TPR_MASK) { + return (env->int_ctl & V_IRQ_MASK); + } + + return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr); +} + #if defined(TARGET_X86_64) && \ defined(CONFIG_USER_ONLY) && \ defined(CONFIG_LINUX) diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c index 098a2ad15a..c3a2cf6f28 100644 --- a/target/i386/gdbstub.c +++ b/target/i386/gdbstub.c @@ -129,8 +129,8 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) n -= IDX_XMM_REGS; if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) { return gdb_get_reg128(mem_buf, - env->xmm_regs[n].ZMM_Q(0), - env->xmm_regs[n].ZMM_Q(1)); + env->xmm_regs[n].ZMM_Q(1), + env->xmm_regs[n].ZMM_Q(0)); } } else { switch (n) { diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index 136630e9b2..18114fe34d 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -61,8 +61,8 @@ static void hax_start_vcpu_thread(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + cpu->thread = g_new0(QemuThread, 1); + cpu->halt_cond = g_new0(QemuCond, 1); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX", diff --git a/target/i386/hax/hax-accel-ops.h b/target/i386/hax/hax-accel-ops.h index c7698519cd..9e357e7b40 100644 --- a/target/i386/hax/hax-accel-ops.h +++ b/target/i386/hax/hax-accel-ops.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef HAX_CPUS_H -#define HAX_CPUS_H +#ifndef TARGET_I386_HAX_ACCEL_OPS_H +#define TARGET_I386_HAX_ACCEL_OPS_H #include "sysemu/cpus.h" @@ -28,4 +28,4 @@ int hax_vcpu_destroy(CPUState *cpu); void hax_raise_event(CPUState *cpu); void hax_reset_vcpu_state(void *opaque); -#endif /* HAX_CPUS_H */ +#endif /* TARGET_I386_HAX_ACCEL_OPS_H */ diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index bf65ed6fa9..b185ee8de4 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -27,7 +27,6 @@ #include "cpu.h" #include "exec/address-spaces.h" -#include "qemu-common.h" #include "qemu/accel.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" @@ -49,18 +48,13 @@ const uint32_t hax_cur_version = 0x4; /* API v4: unmapping and MMIO moves */ /* Minimum HAX kernel version */ const uint32_t hax_min_version = 0x4; /* API v4: supports unmapping */ -static bool hax_allowed; +bool hax_allowed; struct hax_state hax_global; static void hax_vcpu_sync_state(CPUArchState *env, int modified); static int hax_arch_get_registers(CPUArchState *env); -int hax_enabled(void) -{ - return hax_allowed; -} - int valid_hax_tunnel_size(uint16_t size) { return size >= sizeof(struct hax_tunnel); @@ -227,7 +221,7 @@ int hax_init_vcpu(CPUState *cpu) cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index]; cpu->vcpu_dirty = true; - qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr)); + qemu_register_reset(hax_reset_vcpu_state, cpu->env_ptr); return ret; } @@ -674,7 +668,7 @@ void hax_cpu_synchronize_pre_loadvm(CPUState *cpu) int hax_smp_cpu_exec(CPUState *cpu) { - CPUArchState *env = (CPUArchState *) (cpu->env_ptr); + CPUArchState *env = cpu->env_ptr; int fatal; int ret; diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c index 27303ba957..c6ddaa8acd 100644 --- a/target/i386/hax/hax-mem.c +++ b/target/i386/hax/hax-mem.c @@ -188,15 +188,15 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags) /* Adjust start_pa and size so that they are page-aligned. (Cf * kvm_set_phys_mem() in kvm-all.c). */ - delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); - delta &= ~qemu_real_host_page_mask; + delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); + delta &= ~qemu_real_host_page_mask(); if (delta > size) { return; } start_pa += delta; size -= delta; - size &= qemu_real_host_page_mask; - if (!size || (start_pa & ~qemu_real_host_page_mask)) { + size &= qemu_real_host_page_mask(); + if (!size || (start_pa & ~qemu_real_host_page_mask())) { return; } @@ -214,7 +214,7 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags) * call into the kernel. Instead, we split the mapping into smaller ones, * and call hax_update_mapping() on each. */ - max_mapping_size = UINT32_MAX & qemu_real_host_page_mask; + max_mapping_size = UINT32_MAX & qemu_real_host_page_mask(); while (size > max_mapping_size) { hax_update_mapping(start_pa, max_mapping_size, host_va, flags); start_pa += max_mapping_size; @@ -292,6 +292,7 @@ static void hax_log_sync(MemoryListener *listener, } static MemoryListener hax_memory_listener = { + .name = "hax", .begin = hax_transaction_begin, .commit = hax_transaction_commit, .region_add = hax_region_add, diff --git a/target/i386/helper.c b/target/i386/helper.c index 533b29cb91..0ac2da066d 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -27,6 +27,18 @@ #include "sysemu/hw_accel.h" #include "monitor/monitor.h" #endif +#include "qemu/log.h" + +void cpu_sync_avx_hflag(CPUX86State *env) +{ + if ((env->cr[4] & CR4_OSXSAVE_MASK) + && (env->xcr0 & (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) + == (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) { + env->hflags |= HF_AVX_EN_MASK; + } else{ + env->hflags &= ~HF_AVX_EN_MASK; + } +} void cpu_sync_bndcs_hflags(CPUX86State *env) { @@ -174,7 +186,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) } /* Clear bits we're going to recompute. */ - hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK); + hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK); /* SSE handling */ if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { @@ -190,6 +202,12 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) if (new_cr4 & CR4_SMAP_MASK) { hflags |= HF_SMAP_MASK; } + if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) { + new_cr4 &= ~CR4_UMIP_MASK; + } + if (new_cr4 & CR4_UMIP_MASK) { + hflags |= HF_UMIP_MASK; + } if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { new_cr4 &= ~CR4_PKE_MASK; @@ -202,6 +220,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) env->hflags = hflags; cpu_sync_bndcs_hflags(env); + cpu_sync_avx_hflag(env); } #if !defined(CONFIG_USER_ONLY) @@ -408,7 +427,7 @@ static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data) if (need_reset) { emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar, recursive); - monitor_printf(params->mon, "%s", msg); + monitor_puts(params->mon, msg); qemu_log_mask(CPU_LOG_RESET, "%s\n", msg); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); return; @@ -490,6 +509,27 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, } } +static inline target_ulong get_memio_eip(CPUX86State *env) +{ +#ifdef CONFIG_TCG + uint64_t data[TARGET_INSN_START_WORDS]; + CPUState *cs = env_cpu(env); + + if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) { + return env->eip; + } + + /* Per x86_restore_state_to_opc. */ + if (TARGET_TB_PCREL) { + return (env->eip & TARGET_PAGE_MASK) | data[0]; + } else { + return data[0] - env->segs[R_CS].base; + } +#else + qemu_build_not_reached(); +#endif +} + void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) { X86CPU *cpu = env_archcpu(env); @@ -500,9 +540,9 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) cpu_interrupt(cs, CPU_INTERRUPT_TPR); } else if (tcg_enabled()) { - cpu_restore_state(cs, cs->mem_io_pc, false); + target_ulong eip = get_memio_eip(env); - apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); + apic_handle_tpr_access_report(cpu->apic_state, eip, access); } } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/i386/helper.h b/target/i386/helper.h index ad7ffc612f..2dd9b3ee37 100644 --- a/target/i386/helper.h +++ b/target/i386/helper.h @@ -37,7 +37,7 @@ DEF_HELPER_2(lldt, void, env, int) DEF_HELPER_2(ltr, void, env, int) DEF_HELPER_3(load_seg, void, env, int, int) DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl) -DEF_HELPER_5(lcall_real, void, env, int, tl, int, int) +DEF_HELPER_5(lcall_real, void, env, i32, i32, int, i32) DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl) DEF_HELPER_2(iret_real, void, env, int) DEF_HELPER_3(iret_protected, void, env, int, int) @@ -56,14 +56,8 @@ DEF_HELPER_2(syscall, void, env, int) DEF_HELPER_2(sysret, void, env, int) #endif DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int) -DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env) -DEF_HELPER_1(reset_rf, void, env) DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int) DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int) -DEF_HELPER_1(cli, void, env) -DEF_HELPER_1(sti, void, env) -DEF_HELPER_1(clac, void, env) -DEF_HELPER_1(stac, void, env) DEF_HELPER_3(boundw, void, env, tl, int) DEF_HELPER_3(boundl, void, env, tl, int) @@ -236,12 +230,13 @@ DEF_HELPER_2(ldmxcsr, void, env, i32) DEF_HELPER_1(update_mxcsr, void, env) DEF_HELPER_1(enter_mmx, void, env) DEF_HELPER_1(emms, void, env) -DEF_HELPER_3(movq, void, env, ptr, ptr) #define SHIFT 0 #include "ops_sse_header.h" #define SHIFT 1 #include "ops_sse_header.h" +#define SHIFT 2 +#include "ops_sse_header.h" DEF_HELPER_3(rclb, tl, env, tl, tl) DEF_HELPER_3(rclw, tl, env, tl, tl) diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 79ba4ed93a..8d2248bb3f 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -47,12 +47,13 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/error-report.h" +#include "qemu/memalign.h" #include "sysemu/hvf.h" #include "sysemu/hvf_int.h" #include "sysemu/runstate.h" +#include "sysemu/cpus.h" #include "hvf-i386.h" #include "vmcs.h" #include "vmx.h" @@ -206,10 +207,21 @@ static inline bool apic_bus_freq_is_known(CPUX86State *env) return env->apic_bus_freq != 0; } +void hvf_kick_vcpu_thread(CPUState *cpu) +{ + cpus_kick_thread(cpu); +} + +int hvf_arch_init(void) +{ + return 0; +} + int hvf_arch_init_vcpu(CPUState *cpu) { X86CPU *x86cpu = X86_CPU(cpu); CPUX86State *env = &x86cpu->env; + uint64_t reqCap; init_emu(); init_decoder(); @@ -246,19 +258,26 @@ int hvf_arch_init_vcpu(CPUState *cpu) /* set VMCS control fields */ wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, - VMCS_PIN_BASED_CTLS_EXTINT | - VMCS_PIN_BASED_CTLS_NMI | - VMCS_PIN_BASED_CTLS_VNMI)); + VMCS_PIN_BASED_CTLS_EXTINT | + VMCS_PIN_BASED_CTLS_NMI | + VMCS_PIN_BASED_CTLS_VNMI)); wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, - VMCS_PRI_PROC_BASED_CTLS_HLT | - VMCS_PRI_PROC_BASED_CTLS_MWAIT | - VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET | - VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) | + VMCS_PRI_PROC_BASED_CTLS_HLT | + VMCS_PRI_PROC_BASED_CTLS_MWAIT | + VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET | + VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) | VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL); + + reqCap = VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES; + + /* Is RDTSCP support in CPUID? If so, enable it in the VMCS. */ + if (hvf_get_supported_cpuid(0x80000001, 0, R_EDX) & CPUID_EXT2_RDTSCP) { + reqCap |= VMCS_PRI_PROC_BASED2_CTLS_RDTSCP; + } + wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS, - cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, - VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES)); + cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, reqCap)); wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, 0)); diff --git a/target/i386/hvf/vmcs.h b/target/i386/hvf/vmcs.h index 42de7ebc3a..aee6f75dfd 100644 --- a/target/i386/hvf/vmcs.h +++ b/target/i386/hvf/vmcs.h @@ -330,7 +330,7 @@ #define EPT_VIOLATION_DATA_WRITE (1UL << 1) #define EPT_VIOLATION_INST_FETCH (1UL << 2) #define EPT_VIOLATION_GPA_READABLE (1UL << 3) -#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4) +#define EPT_VIOLATION_GPA_WRITABLE (1UL << 4) #define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5) #define EPT_VIOLATION_GLA_VALID (1UL << 7) #define EPT_VIOLATION_XLAT_VALID (1UL << 8) @@ -354,7 +354,7 @@ #define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3) #define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7) #define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10) -#define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12) +#define VMCS_PRI_PROC_BASED_CTLS_RDTSC (1 << 12) #define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19) #define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE (1 << 20) #define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW (1 << 21) @@ -362,6 +362,7 @@ #define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL (1 << 31) #define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0) +#define VMCS_PRI_PROC_BASED2_CTLS_RDTSCP (1 << 3) #define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4) enum task_switch_reason { diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index 6df87116f6..fcd9a95e5b 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -80,7 +80,7 @@ static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl) #define AR_TYPE_ACCESSES_MASK 1 #define AR_TYPE_READABLE_MASK (1 << 1) -#define AR_TYPE_WRITEABLE_MASK (1 << 2) +#define AR_TYPE_WRITABLE_MASK (1 << 2) #define AR_TYPE_CODE_MASK (1 << 3) #define AR_TYPE_MASK 0x0f #define AR_TYPE_BUSY_64_TSS 11 @@ -124,10 +124,11 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0); uint64_t changed_cr0 = old_cr0 ^ cr0; - uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET; + uint64_t mask = CR0_PG_MASK | CR0_CD_MASK | CR0_NW_MASK | + CR0_NE_MASK | CR0_ET_MASK; uint64_t entry_ctls; - if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && + if ((cr0 & CR0_PG_MASK) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE_MASK) && !(efer & MSR_EFER_LME)) { address_space_read(&address_space_memory, rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f, @@ -142,8 +143,8 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) wvmcs(vcpu, VMCS_CR0_SHADOW, cr0); if (efer & MSR_EFER_LME) { - if (changed_cr0 & CR0_PG) { - if (cr0 & CR0_PG) { + if (changed_cr0 & CR0_PG_MASK) { + if (cr0 & CR0_PG_MASK) { enter_long_mode(vcpu, cr0, efer); } else { exit_long_mode(vcpu, cr0, efer); @@ -155,23 +156,21 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) } /* Filter new CR0 after we are finished examining it above. */ - cr0 = (cr0 & ~(mask & ~CR0_PG)); - wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); + cr0 = (cr0 & ~(mask & ~CR0_PG_MASK)); + wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE_MASK | CR0_ET_MASK); hv_vcpu_invalidate_tlb(vcpu); - hv_vcpu_flush(vcpu); } static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4) { - uint64_t guest_cr4 = cr4 | CR4_VMXE; + uint64_t guest_cr4 = cr4 | CR4_VMXE_MASK; wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4); wvmcs(vcpu, VMCS_CR4_SHADOW, cr4); - wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE); + wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE_MASK); hv_vcpu_invalidate_tlb(vcpu); - hv_vcpu_flush(vcpu); } static inline void macvm_set_rip(CPUState *cpu, uint64_t rip) diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index 2898bb70a8..d086584f26 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "qemu-common.h" #include "x86_decode.h" #include "x86_emu.h" #include "vmcs.h" @@ -119,7 +118,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, bool x86_is_protected(struct CPUState *cpu) { uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); - return cr0 & CR0_PE; + return cr0 & CR0_PE_MASK; } bool x86_is_real(struct CPUState *cpu) @@ -150,13 +149,13 @@ bool x86_is_long64_mode(struct CPUState *cpu) bool x86_is_paging_mode(struct CPUState *cpu) { uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); - return cr0 & CR0_PG; + return cr0 & CR0_PG_MASK; } bool x86_is_pae_enabled(struct CPUState *cpu) { uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); - return cr4 & CR4_PAE; + return cr4 & CR4_PAE_MASK; } target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg) diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h index 782664c2ea..947b98da41 100644 --- a/target/i386/hvf/x86.h +++ b/target/i386/hvf/x86.h @@ -42,40 +42,6 @@ typedef struct x86_register { }; } __attribute__ ((__packed__)) x86_register; -typedef enum x86_reg_cr0 { - CR0_PE = (1L << 0), - CR0_MP = (1L << 1), - CR0_EM = (1L << 2), - CR0_TS = (1L << 3), - CR0_ET = (1L << 4), - CR0_NE = (1L << 5), - CR0_WP = (1L << 16), - CR0_AM = (1L << 18), - CR0_NW = (1L << 29), - CR0_CD = (1L << 30), - CR0_PG = (1L << 31), -} x86_reg_cr0; - -typedef enum x86_reg_cr4 { - CR4_VME = (1L << 0), - CR4_PVI = (1L << 1), - CR4_TSD = (1L << 2), - CR4_DE = (1L << 3), - CR4_PSE = (1L << 4), - CR4_PAE = (1L << 5), - CR4_MSE = (1L << 6), - CR4_PGE = (1L << 7), - CR4_PCE = (1L << 8), - CR4_OSFXSR = (1L << 9), - CR4_OSXMMEXCPT = (1L << 10), - CR4_VMXE = (1L << 13), - CR4_SMXE = (1L << 14), - CR4_FSGSBASE = (1L << 16), - CR4_PCIDE = (1L << 17), - CR4_OSXSAVE = (1L << 18), - CR4_SMEP = (1L << 20), -} x86_reg_cr4; - /* 16 bit Task State Segment */ typedef struct x86_tss_segment16 { uint16_t link; diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c index 32b0d131df..7323a7a94b 100644 --- a/target/i386/hvf/x86_cpuid.c +++ b/target/i386/hvf/x86_cpuid.c @@ -21,7 +21,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "x86.h" #include "vmx.h" @@ -96,7 +95,8 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, ebx &= ~CPUID_7_0_EBX_INVPCID; } - ecx &= CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ; + ecx &= CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ | + CPUID_7_0_ECX_RDPID; edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS; } else { ebx = 0; @@ -133,11 +133,11 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX; hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap); - if (!(cap & CPU_BASED2_RDTSCP)) { + if (!(cap2ctrl(cap, CPU_BASED2_RDTSCP) & CPU_BASED2_RDTSCP)) { edx &= ~CPUID_EXT2_RDTSCP; } hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap); - if (!(cap & CPU_BASED_TSC_OFFSET)) { + if (!(cap2ctrl(cap, CPU_BASED_TSC_OFFSET) & CPU_BASED_TSC_OFFSET)) { edx &= ~CPUID_EXT2_RDTSCP; } ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG | diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c index 1590b7f0d4..5cc5cb29ad 100644 --- a/target/i386/hvf/x86_decode.c +++ b/target/i386/hvf/x86_decode.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "panic.h" #include "x86_decode.h" #include "vmx.h" diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index af15c06ac5..a484942cfc 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "vmx.h" #include "x86_descr.h" diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index cc49a29f83..f5704f63e8 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -37,7 +37,6 @@ #include "qemu/osdep.h" #include "panic.h" -#include "qemu-common.h" #include "x86_decode.h" #include "x86.h" #include "x86_emu.h" @@ -128,7 +127,7 @@ void write_reg(CPUX86State *env, int reg, target_ulong val, int size) } } -target_ulong read_val_from_reg(uintptr_t reg_ptr, int size) +target_ulong read_val_from_reg(target_ulong reg_ptr, int size) { target_ulong val; @@ -151,7 +150,7 @@ target_ulong read_val_from_reg(uintptr_t reg_ptr, int size) return val; } -void write_val_to_reg(uintptr_t reg_ptr, target_ulong val, int size) +void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size) { switch (size) { case 1: @@ -171,12 +170,12 @@ void write_val_to_reg(uintptr_t reg_ptr, target_ulong val, int size) } } -static bool is_host_reg(struct CPUX86State *env, uintptr_t ptr) +static bool is_host_reg(CPUX86State *env, target_ulong ptr) { - return (ptr - (uintptr_t)&env->regs[0]) < sizeof(env->regs); + return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs); } -void write_val_ext(struct CPUX86State *env, uintptr_t ptr, target_ulong val, int size) +void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size) { if (is_host_reg(env, ptr)) { write_val_to_reg(ptr, val, size); @@ -185,14 +184,14 @@ void write_val_ext(struct CPUX86State *env, uintptr_t ptr, target_ulong val, int vmx_write_mem(env_cpu(env), ptr, &val, size); } -uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes) +uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes) { vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes); return env->hvf_mmio_buf; } -target_ulong read_val_ext(struct CPUX86State *env, uintptr_t ptr, int size) +target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size) { target_ulong val; uint8_t *mmio_ptr; @@ -222,7 +221,7 @@ target_ulong read_val_ext(struct CPUX86State *env, uintptr_t ptr, int size) return val; } -static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode, +static void fetch_operands(CPUX86State *env, struct x86_decode *decode, int n, bool val_op0, bool val_op1, bool val_op2) { int i; @@ -261,7 +260,7 @@ static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode, } } -static void exec_mov(struct CPUX86State *env, struct x86_decode *decode) +static void exec_mov(CPUX86State *env, struct x86_decode *decode) { fetch_operands(env, decode, 2, false, true, false); write_val_ext(env, decode->op[0].ptr, decode->op[1].val, @@ -270,49 +269,49 @@ static void exec_mov(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_add(struct CPUX86State *env, struct x86_decode *decode) +static void exec_add(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); env->eip += decode->len; } -static void exec_or(struct CPUX86State *env, struct x86_decode *decode) +static void exec_or(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true); env->eip += decode->len; } -static void exec_adc(struct CPUX86State *env, struct x86_decode *decode) +static void exec_adc(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true); env->eip += decode->len; } -static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode) +static void exec_sbb(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true); env->eip += decode->len; } -static void exec_and(struct CPUX86State *env, struct x86_decode *decode) +static void exec_and(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true); env->eip += decode->len; } -static void exec_sub(struct CPUX86State *env, struct x86_decode *decode) +static void exec_sub(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true); env->eip += decode->len; } -static void exec_xor(struct CPUX86State *env, struct x86_decode *decode) +static void exec_xor(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true); env->eip += decode->len; } -static void exec_neg(struct CPUX86State *env, struct x86_decode *decode) +static void exec_neg(CPUX86State *env, struct x86_decode *decode) { /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/ int32_t val; @@ -335,13 +334,13 @@ static void exec_neg(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode) +static void exec_cmp(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); env->eip += decode->len; } -static void exec_inc(struct CPUX86State *env, struct x86_decode *decode) +static void exec_inc(CPUX86State *env, struct x86_decode *decode) { decode->op[1].type = X86_VAR_IMMEDIATE; decode->op[1].val = 0; @@ -351,7 +350,7 @@ static void exec_inc(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_dec(struct CPUX86State *env, struct x86_decode *decode) +static void exec_dec(CPUX86State *env, struct x86_decode *decode) { decode->op[1].type = X86_VAR_IMMEDIATE; decode->op[1].val = 0; @@ -360,13 +359,13 @@ static void exec_dec(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_tst(struct CPUX86State *env, struct x86_decode *decode) +static void exec_tst(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false); env->eip += decode->len; } -static void exec_not(struct CPUX86State *env, struct x86_decode *decode) +static void exec_not(CPUX86State *env, struct x86_decode *decode) { fetch_operands(env, decode, 1, true, false, false); @@ -375,7 +374,7 @@ static void exec_not(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -void exec_movzx(struct CPUX86State *env, struct x86_decode *decode) +void exec_movzx(CPUX86State *env, struct x86_decode *decode) { int src_op_size; int op_size = decode->operand_size; @@ -395,7 +394,7 @@ void exec_movzx(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_out(struct CPUX86State *env, struct x86_decode *decode) +static void exec_out(CPUX86State *env, struct x86_decode *decode) { switch (decode->opcode[0]) { case 0xe6: @@ -419,7 +418,7 @@ static void exec_out(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_in(struct CPUX86State *env, struct x86_decode *decode) +static void exec_in(CPUX86State *env, struct x86_decode *decode) { target_ulong val = 0; switch (decode->opcode[0]) { @@ -455,7 +454,7 @@ static void exec_in(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static inline void string_increment_reg(struct CPUX86State *env, int reg, +static inline void string_increment_reg(CPUX86State *env, int reg, struct x86_decode *decode) { target_ulong val = read_reg(env, reg, decode->addressing_size); @@ -467,8 +466,8 @@ static inline void string_increment_reg(struct CPUX86State *env, int reg, write_reg(env, reg, val, decode->addressing_size); } -static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode, - void (*func)(struct CPUX86State *env, +static inline void string_rep(CPUX86State *env, struct x86_decode *decode, + void (*func)(CPUX86State *env, struct x86_decode *ins), int rep) { target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size); @@ -484,7 +483,7 @@ static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode } } -static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_ins_single(CPUX86State *env, struct x86_decode *decode) { target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), decode->addressing_size, R_ES); @@ -497,7 +496,7 @@ static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_EDI, decode); } -static void exec_ins(struct CPUX86State *env, struct x86_decode *decode) +static void exec_ins(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_ins_single, 0); @@ -508,7 +507,7 @@ static void exec_ins(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_outs_single(CPUX86State *env, struct x86_decode *decode) { target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); @@ -520,7 +519,7 @@ static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_ESI, decode); } -static void exec_outs(struct CPUX86State *env, struct x86_decode *decode) +static void exec_outs(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_outs_single, 0); @@ -531,7 +530,7 @@ static void exec_outs(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_movs_single(CPUX86State *env, struct x86_decode *decode) { target_ulong src_addr; target_ulong dst_addr; @@ -548,7 +547,7 @@ static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_EDI, decode); } -static void exec_movs(struct CPUX86State *env, struct x86_decode *decode) +static void exec_movs(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_movs_single, 0); @@ -559,7 +558,7 @@ static void exec_movs(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode) { target_ulong src_addr; target_ulong dst_addr; @@ -579,7 +578,7 @@ static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_EDI, decode); } -static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode) +static void exec_cmps(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_cmps_single, decode->rep); @@ -590,7 +589,7 @@ static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode) } -static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_stos_single(CPUX86State *env, struct x86_decode *decode) { target_ulong addr; target_ulong val; @@ -604,7 +603,7 @@ static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode) } -static void exec_stos(struct CPUX86State *env, struct x86_decode *decode) +static void exec_stos(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_stos_single, 0); @@ -615,7 +614,7 @@ static void exec_stos(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_scas_single(CPUX86State *env, struct x86_decode *decode) { target_ulong addr; @@ -628,7 +627,7 @@ static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_EDI, decode); } -static void exec_scas(struct CPUX86State *env, struct x86_decode *decode) +static void exec_scas(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = R_EAX; @@ -641,7 +640,7 @@ static void exec_scas(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode) +static void exec_lods_single(CPUX86State *env, struct x86_decode *decode) { target_ulong addr; target_ulong val = 0; @@ -653,7 +652,7 @@ static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode) string_increment_reg(env, R_ESI, decode); } -static void exec_lods(struct CPUX86State *env, struct x86_decode *decode) +static void exec_lods(CPUX86State *env, struct x86_decode *decode) { if (decode->rep) { string_rep(env, decode, exec_lods_single, 0); @@ -703,9 +702,6 @@ void simulate_rdmsr(struct CPUState *cpu) case MSR_CSTAR: abort(); break; - case MSR_PAT: - val = env->pat; - break; case MSR_IA32_MISC_ENABLE: val = env->msr_ia32_misc_enable; break; @@ -754,7 +750,7 @@ void simulate_rdmsr(struct CPUState *cpu) val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ break; default: - fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); + /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */ val = 0; break; } @@ -763,7 +759,7 @@ void simulate_rdmsr(struct CPUState *cpu) RDX(env) = (uint32_t)(val >> 32); } -static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode) +static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) { simulate_rdmsr(env_cpu(env)); env->eip += decode->len; @@ -807,9 +803,6 @@ void simulate_wrmsr(struct CPUState *cpu) hv_vcpu_invalidate_tlb(cpu->hvf->fd); } break; - case MSR_PAT: - env->pat = data; - break; case MSR_MTRRphysBase(0): case MSR_MTRRphysBase(1): case MSR_MTRRphysBase(2): @@ -861,7 +854,7 @@ void simulate_wrmsr(struct CPUState *cpu) printf("write msr %llx\n", RCX(cpu));*/ } -static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode) +static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) { simulate_wrmsr(env_cpu(env)); env->eip += decode->len; @@ -871,7 +864,7 @@ static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode) * flag: * 0 - bt, 1 - btc, 2 - bts, 3 - btr */ -static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag) +static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag) { int32_t displacement; uint8_t index; @@ -917,31 +910,31 @@ static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag) set_CF(env, cf); } -static void exec_bt(struct CPUX86State *env, struct x86_decode *decode) +static void exec_bt(CPUX86State *env, struct x86_decode *decode) { do_bt(env, decode, 0); env->eip += decode->len; } -static void exec_btc(struct CPUX86State *env, struct x86_decode *decode) +static void exec_btc(CPUX86State *env, struct x86_decode *decode) { do_bt(env, decode, 1); env->eip += decode->len; } -static void exec_btr(struct CPUX86State *env, struct x86_decode *decode) +static void exec_btr(CPUX86State *env, struct x86_decode *decode) { do_bt(env, decode, 3); env->eip += decode->len; } -static void exec_bts(struct CPUX86State *env, struct x86_decode *decode) +static void exec_bts(CPUX86State *env, struct x86_decode *decode) { do_bt(env, decode, 2); env->eip += decode->len; } -void exec_shl(struct CPUX86State *env, struct x86_decode *decode) +void exec_shl(CPUX86State *env, struct x86_decode *decode) { uint8_t count; int of = 0, cf = 0; @@ -1028,7 +1021,7 @@ void exec_movsx(CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -void exec_ror(struct CPUX86State *env, struct x86_decode *decode) +void exec_ror(CPUX86State *env, struct x86_decode *decode) { uint8_t count; @@ -1106,7 +1099,7 @@ void exec_ror(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -void exec_rol(struct CPUX86State *env, struct x86_decode *decode) +void exec_rol(CPUX86State *env, struct x86_decode *decode) { uint8_t count; @@ -1188,7 +1181,7 @@ void exec_rol(struct CPUX86State *env, struct x86_decode *decode) } -void exec_rcl(struct CPUX86State *env, struct x86_decode *decode) +void exec_rcl(CPUX86State *env, struct x86_decode *decode) { uint8_t count; int of = 0, cf = 0; @@ -1273,7 +1266,7 @@ void exec_rcl(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -void exec_rcr(struct CPUX86State *env, struct x86_decode *decode) +void exec_rcr(CPUX86State *env, struct x86_decode *decode) { uint8_t count; int of = 0, cf = 0; @@ -1348,7 +1341,7 @@ void exec_rcr(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode) +static void exec_xchg(CPUX86State *env, struct x86_decode *decode) { fetch_operands(env, decode, 2, true, true, false); @@ -1360,7 +1353,7 @@ static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode) +static void exec_xadd(CPUX86State *env, struct x86_decode *decode) { EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); write_val_ext(env, decode->op[1].ptr, decode->op[0].val, @@ -1371,7 +1364,7 @@ static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode) static struct cmd_handler { enum x86_decode_cmd cmd; - void (*handler)(struct CPUX86State *env, struct x86_decode *ins); + void (*handler)(CPUX86State *env, struct x86_decode *ins); } handlers[] = { {X86_DECODE_CMD_INVL, NULL,}, {X86_DECODE_CMD_MOV, exec_mov}, @@ -1471,7 +1464,7 @@ void store_regs(struct CPUState *cpu) macvm_set_rip(cpu, env->eip); } -bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins) +bool exec_instruction(CPUX86State *env, struct x86_decode *ins) { /*if (hvf_vcpu_id(cpu)) printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), env->eip, diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h index bbb608a1d0..640da90b30 100644 --- a/target/i386/hvf/x86_emu.h +++ b/target/i386/hvf/x86_emu.h @@ -24,7 +24,7 @@ #include "cpu.h" void init_emu(void); -bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins); +bool exec_instruction(CPUX86State *env, struct x86_decode *ins); void load_regs(struct CPUState *cpu); void store_regs(struct CPUState *cpu); @@ -34,17 +34,17 @@ void simulate_wrmsr(struct CPUState *cpu); target_ulong read_reg(CPUX86State *env, int reg, int size); void write_reg(CPUX86State *env, int reg, target_ulong val, int size); -target_ulong read_val_from_reg(uintptr_t reg_ptr, int size); -void write_val_to_reg(uintptr_t reg_ptr, target_ulong val, int size); -void write_val_ext(struct CPUX86State *env, uintptr_t ptr, target_ulong val, int size); -uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes); -target_ulong read_val_ext(struct CPUX86State *env, uintptr_t ptr, int size); +target_ulong read_val_from_reg(target_ulong reg_ptr, int size); +void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size); +void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size); +uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes); +target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size); -void exec_movzx(struct CPUX86State *env, struct x86_decode *decode); -void exec_shl(struct CPUX86State *env, struct x86_decode *decode); -void exec_movsx(struct CPUX86State *env, struct x86_decode *decode); -void exec_ror(struct CPUX86State *env, struct x86_decode *decode); -void exec_rol(struct CPUX86State *env, struct x86_decode *decode); -void exec_rcl(struct CPUX86State *env, struct x86_decode *decode); -void exec_rcr(struct CPUX86State *env, struct x86_decode *decode); +void exec_movzx(CPUX86State *env, struct x86_decode *decode); +void exec_shl(CPUX86State *env, struct x86_decode *decode); +void exec_movsx(CPUX86State *env, struct x86_decode *decode); +void exec_ror(CPUX86State *env, struct x86_decode *decode); +void exec_rol(CPUX86State *env, struct x86_decode *decode); +void exec_rcl(CPUX86State *env, struct x86_decode *decode); +void exec_rcr(CPUX86State *env, struct x86_decode *decode); #endif diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c index fecbca7517..03d6de5efc 100644 --- a/target/i386/hvf/x86_flags.c +++ b/target/i386/hvf/x86_flags.c @@ -23,7 +23,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "panic.h" #include "cpu.h" #include "x86_flags.h" diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 59a1a36fcf..06f4bcaed8 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" #include "panic.h" -#include "qemu-common.h" #include "cpu.h" #include "x86.h" #include "x86_mmu.h" @@ -129,7 +128,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); /* check protection */ - if (cr0 & CR0_WP) { + if (cr0 & CR0_WP_MASK) { if (pt->write_access && !pte_write_access(pte)) { return false; } diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index 422156128b..beaeec0687 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -8,7 +8,6 @@ // GNU General Public License for more details. #include "qemu/osdep.h" #include "panic.h" -#include "qemu-common.h" #include "qemu/error-report.h" #include "sysemu/hvf.h" @@ -174,12 +173,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); - macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS); + macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | + CR0_TS_MASK); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); store_regs(cpu); hv_vcpu_invalidate_tlb(cpu->hvf->fd); - hv_vcpu_flush(cpu->hvf->fd); } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 05ec1bddc4..69d4fb8cf5 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "x86hvf.h" #include "vmx.h" #include "vmcs.h" @@ -83,7 +82,7 @@ void hvf_put_xsave(CPUState *cpu_state) } } -void hvf_put_segments(CPUState *cpu_state) +static void hvf_put_segments(CPUState *cpu_state) { CPUX86State *env = &X86_CPU(cpu_state)->env; struct vmx_segment seg; @@ -125,8 +124,6 @@ void hvf_put_segments(CPUState *cpu_state) hvf_set_segment(cpu_state, &seg, &env->ldt, false); vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); - - hv_vcpu_flush(cpu_state->hvf->fd); } void hvf_put_msrs(CPUState *cpu_state) @@ -166,7 +163,7 @@ void hvf_get_xsave(CPUState *cpu_state) x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len); } -void hvf_get_segments(CPUState *cpu_state) +static void hvf_get_segments(CPUState *cpu_state) { CPUX86State *env = &X86_CPU(cpu_state)->env; diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h index 99ed8d608d..db6003d6bd 100644 --- a/target/i386/hvf/x86hvf.h +++ b/target/i386/hvf/x86hvf.h @@ -26,11 +26,9 @@ void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr); void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg); void hvf_put_xsave(CPUState *cpu_state); -void hvf_put_segments(CPUState *cpu_state); void hvf_put_msrs(CPUState *cpu_state); void hvf_get_xsave(CPUState *cpu_state); void hvf_get_msrs(CPUState *cpu_state); void vmx_clear_int_window_exiting(CPUState *cpu); -void hvf_get_segments(CPUState *cpu_state); void vmx_update_tpr(CPUState *cpu); #endif diff --git a/target/i386/kvm/hyperv-proto.h b/target/i386/kvm/hyperv-proto.h index 5fbb385cc1..464fbf09e3 100644 --- a/target/i386/kvm/hyperv-proto.h +++ b/target/i386/kvm/hyperv-proto.h @@ -19,6 +19,9 @@ #define HV_CPUID_ENLIGHTMENT_INFO 0x40000004 #define HV_CPUID_IMPLEMENT_LIMITS 0x40000005 #define HV_CPUID_NESTED_FEATURES 0x4000000A +#define HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080 +#define HV_CPUID_SYNDBG_INTERFACE 0x40000081 +#define HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082 #define HV_CPUID_MIN 0x40000005 #define HV_CPUID_MAX 0x4000ffff #define HV_HYPERVISOR_PRESENT_BIT 0x80000000 @@ -51,12 +54,19 @@ #define HV_GUEST_DEBUGGING_AVAILABLE (1u << 1) #define HV_PERF_MONITOR_AVAILABLE (1u << 2) #define HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1u << 3) -#define HV_HYPERCALL_PARAMS_XMM_AVAILABLE (1u << 4) +#define HV_HYPERCALL_XMM_INPUT_AVAILABLE (1u << 4) #define HV_GUEST_IDLE_STATE_AVAILABLE (1u << 5) #define HV_FREQUENCY_MSRS_AVAILABLE (1u << 8) #define HV_GUEST_CRASH_MSR_AVAILABLE (1u << 10) +#define HV_FEATURE_DEBUG_MSRS_AVAILABLE (1u << 11) +#define HV_EXT_GVA_RANGES_FLUSH_AVAILABLE (1u << 14) #define HV_STIMER_DIRECT_MODE_AVAILABLE (1u << 19) +/* + * HV_CPUID_FEATURES.EBX bits + */ +#define HV_PARTITION_DEBUGGING_ALLOWED (1u << 12) + /* * HV_CPUID_ENLIGHTMENT_INFO.EAX bits */ @@ -66,11 +76,23 @@ #define HV_APIC_ACCESS_RECOMMENDED (1u << 3) #define HV_SYSTEM_RESET_RECOMMENDED (1u << 4) #define HV_RELAXED_TIMING_RECOMMENDED (1u << 5) +#define HV_DEPRECATING_AEOI_RECOMMENDED (1u << 9) #define HV_CLUSTER_IPI_RECOMMENDED (1u << 10) #define HV_EX_PROCESSOR_MASKS_RECOMMENDED (1u << 11) #define HV_ENLIGHTENED_VMCS_RECOMMENDED (1u << 14) #define HV_NO_NONARCH_CORESHARING (1u << 18) +/* + * HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits + */ +#define HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING (1u << 1) + +/* + * HV_CPUID_NESTED_FEATURES.EAX bits + */ +#define HV_NESTED_DIRECT_FLUSH (1u << 17) +#define HV_NESTED_MSR_BITMAP (1u << 19) + /* * Basic virtualized MSRs */ @@ -129,6 +151,18 @@ #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 +/* + * Hyper-V Synthetic debug options MSR + */ +#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 +#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 +#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 +#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 +#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 +#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF + +#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) + /* * Guest crash notification MSRs */ @@ -167,5 +201,16 @@ #define HV_STIMER_COUNT 4 +/* + * Synthetic debugger control definitions + */ +#define HV_SYNDBG_CONTROL_SEND (1u << 0) +#define HV_SYNDBG_CONTROL_RECV (1u << 1) +#define HV_SYNDBG_CONTROL_SEND_SIZE(ctl) ((ctl >> 16) & 0xffff) +#define HV_SYNDBG_STATUS_INVALID (0) +#define HV_SYNDBG_STATUS_SEND_SUCCESS (1u << 0) +#define HV_SYNDBG_STATUS_RECV_SUCCESS (1u << 2) +#define HV_SYNDBG_STATUS_RESET (1u << 3) +#define HV_SYNDBG_STATUS_SET_SIZE(st, sz) (st | (sz << 16)) #endif diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c index 0028527e79..778ed782e6 100644 --- a/target/i386/kvm/hyperv-stub.c +++ b/target/i386/kvm/hyperv-stub.c @@ -27,6 +27,12 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) return 0; case KVM_EXIT_HYPERV_HCALL: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; + return 0; + case KVM_EXIT_HYPERV_SYNDBG: + if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + return -1; + } + return 0; default: return -1; diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index 26efc1e0e6..e3ac978648 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -23,6 +23,10 @@ int hyperv_x86_synic_add(X86CPU *cpu) return 0; } +/* + * All devices possibly using SynIC have to be reset before calling this to let + * them remove their SINT routes first. + */ void hyperv_x86_synic_reset(X86CPU *cpu) { hyperv_synic_reset(CPU(cpu)); @@ -81,20 +85,66 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) case KVM_EXIT_HYPERV_HCALL: { uint16_t code = exit->u.hcall.input & 0xffff; bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST; - uint64_t param = exit->u.hcall.params[0]; + uint64_t in_param = exit->u.hcall.params[0]; + uint64_t out_param = exit->u.hcall.params[1]; switch (code) { case HV_POST_MESSAGE: - exit->u.hcall.result = hyperv_hcall_post_message(param, fast); + exit->u.hcall.result = hyperv_hcall_post_message(in_param, fast); break; case HV_SIGNAL_EVENT: - exit->u.hcall.result = hyperv_hcall_signal_event(param, fast); + exit->u.hcall.result = hyperv_hcall_signal_event(in_param, fast); + break; + case HV_POST_DEBUG_DATA: + exit->u.hcall.result = + hyperv_hcall_post_dbg_data(in_param, out_param, fast); + break; + case HV_RETRIEVE_DEBUG_DATA: + exit->u.hcall.result = + hyperv_hcall_retreive_dbg_data(in_param, out_param, fast); + break; + case HV_RESET_DEBUG_SESSION: + exit->u.hcall.result = + hyperv_hcall_reset_dbg_session(out_param); break; default: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; } return 0; } + + case KVM_EXIT_HYPERV_SYNDBG: + if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + return -1; + } + + switch (exit->u.syndbg.msr) { + case HV_X64_MSR_SYNDBG_CONTROL: { + uint64_t control = exit->u.syndbg.control; + env->msr_hv_syndbg_control = control; + env->msr_hv_syndbg_send_page = exit->u.syndbg.send_page; + env->msr_hv_syndbg_recv_page = exit->u.syndbg.recv_page; + exit->u.syndbg.status = HV_STATUS_SUCCESS; + if (control & HV_SYNDBG_CONTROL_SEND) { + exit->u.syndbg.status = + hyperv_syndbg_send(env->msr_hv_syndbg_send_page, + HV_SYNDBG_CONTROL_SEND_SIZE(control)); + } else if (control & HV_SYNDBG_CONTROL_RECV) { + exit->u.syndbg.status = + hyperv_syndbg_recv(env->msr_hv_syndbg_recv_page, + TARGET_PAGE_SIZE); + } + break; + } + case HV_X64_MSR_SYNDBG_PENDING_BUFFER: + env->msr_hv_syndbg_pending_page = exit->u.syndbg.pending_page; + hyperv_syndbg_set_pending_page(env->msr_hv_syndbg_pending_page); + break; + default: + return -1; + } + + return 0; default: return -1; } diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index d95028018e..7237378a7d 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -84,7 +84,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu) static void kvm_cpu_xsave_init(void) { static bool first = true; - KVMState *s = kvm_state; + uint32_t eax, ebx, ecx, edx; int i; if (!first) { @@ -99,12 +99,18 @@ static void kvm_cpu_xsave_init(void) for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { ExtSaveArea *esa = &x86_ext_save_areas[i]; - if (esa->size) { - int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX); - if (sz != 0) { - assert(esa->size == sz); - esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX); - } + if (!esa->size) { + continue; + } + if ((x86_cpu_get_supported_feature_word(esa->feature, false) & esa->bits) + != esa->bits) { + continue; + } + host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx); + if (eax != 0) { + assert(esa->size == eax); + esa->offset = ebx; + esa->ecx = ecx; } } } @@ -165,7 +171,7 @@ static void kvm_cpu_instance_init(CPUState *cs) /* only applies to builtin_x86_defs cpus */ if (!kvm_irqchip_in_kernel()) { x86_cpu_change_kvm_default("x2apic", "off"); - } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { + } else if (kvm_irqchip_is_split()) { x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); } diff --git a/target/i386/kvm/kvm-stub.c b/target/i386/kvm/kvm-stub.c index f6e7e4466e..e052f1c7b0 100644 --- a/target/i386/kvm/kvm-stub.c +++ b/target/i386/kvm/kvm-stub.c @@ -44,3 +44,8 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) { abort(); } + +void kvm_set_max_apic_id(uint32_t max_apic_id) +{ + return; +} diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index e69abe48e3..a213209379 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -15,8 +15,10 @@ #include "qemu/osdep.h" #include "qapi/qapi-events-run-state.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include #include +#include #include #include "standard-headers/asm-x86/kvm_para.h" @@ -28,7 +30,7 @@ #include "sysemu/kvm_int.h" #include "sysemu/runstate.h" #include "kvm_i386.h" -#include "sev_i386.h" +#include "sev.h" #include "hyperv.h" #include "hyperv-proto.h" @@ -37,6 +39,7 @@ #include "qemu/main-loop.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/memalign.h" #include "hw/i386/x86.h" #include "hw/i386/apic.h" #include "hw/i386/apic_internal.h" @@ -44,7 +47,6 @@ #include "hw/i386/intel_iommu.h" #include "hw/i386/x86-iommu.h" #include "hw/i386/e820_memory_layout.h" -#include "sysemu/sev.h" #include "hw/pci/pci.h" #include "hw/pci/msi.h" @@ -53,6 +55,8 @@ #include "exec/memattrs.h" #include "trace.h" +#include CONFIG_DEVICES + //#define DEBUG_KVM #ifdef DEBUG_KVM @@ -103,9 +107,11 @@ static bool has_msr_hv_synic; static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_hv_reenlightenment; +static bool has_msr_hv_syndbg_options; static bool has_msr_xss; static bool has_msr_umwait; static bool has_msr_spec_ctrl; +static bool has_tsc_scale_msr; static bool has_msr_tsx_ctrl; static bool has_msr_virt_ssbd; static bool has_msr_smi_count; @@ -122,9 +128,12 @@ static uint32_t num_architectural_pmu_gp_counters; static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; +static int has_xsave2; static int has_xcrs; static int has_pit_state2; +static int has_sregs2; static int has_exception_payload; +static int has_triple_fault_event; static bool has_msr_mcg_ext_ctl; @@ -132,8 +141,11 @@ static struct kvm_cpuid2 *cpuid_cache; static struct kvm_cpuid2 *hv_cpuid_cache; static struct kvm_msr_list *kvm_feature_msrs; +static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; + #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ static RateLimit bus_lock_ratelimit_ctrl; +static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); int kvm_has_pit_state2(void) { @@ -149,7 +161,7 @@ bool kvm_has_adjust_clock_stable(void) { int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); - return (ret == KVM_CLOCK_TSC_STABLE); + return (ret & KVM_CLOCK_TSC_STABLE); } bool kvm_has_adjust_clock(void) @@ -204,28 +216,21 @@ static int kvm_get_tsc(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[1]; - } msr_data = {}; + uint64_t value; int ret; if (env->tsc_valid) { return 0; } - memset(&msr_data, 0, sizeof(msr_data)); - msr_data.info.nmsrs = 1; - msr_data.entries[0].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value); if (ret < 0) { return ret; } - assert(ret == 1); - env->tsc = msr_data.entries[0].data; + env->tsc = value; return 0; } @@ -347,6 +352,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, struct kvm_cpuid2 *cpuid; uint32_t ret = 0; uint32_t cpuid_1_edx; + uint64_t bitmask; cpuid = get_supported_cpuid(s); @@ -404,6 +410,34 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (!has_msr_arch_capabs) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } + } else if (function == 0xd && index == 0 && + (reg == R_EAX || reg == R_EDX)) { + /* + * The value returned by KVM_GET_SUPPORTED_CPUID does not include + * features that still have to be enabled with the arch_prctl + * system call. QEMU needs the full value, which is retrieved + * with KVM_GET_DEVICE_ATTR. + */ + struct kvm_device_attr attr = { + .group = 0, + .attr = KVM_X86_XCOMP_GUEST_SUPP, + .addr = (unsigned long) &bitmask + }; + + bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); + if (!sys_attr) { + return ret; + } + + int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); + if (rc < 0) { + if (rc != -ENXIO) { + warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " + "error: %d", rc); + } + return ret; + } + ret = (reg == R_EAX) ? bitmask : bitmask >> 32; } else if (function == 0x80000001 && reg == R_ECX) { /* * It's safe to enable TOPOEXT even if it's not returned by @@ -536,7 +570,7 @@ static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) if (code == BUS_MCEERR_AR) { status |= MCI_STATUS_AR | 0x134; - mcg_status |= MCG_STATUS_EIPV; + mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV; } else { status |= 0xc0; mcg_status |= MCG_STATUS_RIPV; @@ -801,6 +835,8 @@ static bool tsc_is_stable_and_known(CPUX86State *env) || env->user_tsc_khz; } +#define DEFAULT_EVMCS_VERSION ((1 << 8) | 1) + static struct { const char *desc; struct { @@ -821,9 +857,7 @@ static struct { .desc = "virtual APIC (hv-vapic)", .flags = { {.func = HV_CPUID_FEATURES, .reg = R_EAX, - .bits = HV_APIC_ACCESS_AVAILABLE}, - {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, - .bits = HV_APIC_ACCESS_RECOMMENDED} + .bits = HV_APIC_ACCESS_AVAILABLE} } }, [HYPERV_FEAT_TIME] = { @@ -926,6 +960,53 @@ static struct { }, .dependencies = BIT(HYPERV_FEAT_STIMER) }, + [HYPERV_FEAT_AVIC] = { + .desc = "AVIC/APICv support (hv-avic/hv-apicv)", + .flags = { + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, + .bits = HV_DEPRECATING_AEOI_RECOMMENDED} + } + }, +#ifdef CONFIG_SYNDBG + [HYPERV_FEAT_SYNDBG] = { + .desc = "Enable synthetic kernel debugger channel (hv-syndbg)", + .flags = { + {.func = HV_CPUID_FEATURES, .reg = R_EDX, + .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE} + }, + .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED) + }, +#endif + [HYPERV_FEAT_MSR_BITMAP] = { + .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)", + .flags = { + {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, + .bits = HV_NESTED_MSR_BITMAP} + } + }, + [HYPERV_FEAT_XMM_INPUT] = { + .desc = "XMM fast hypercall input (hv-xmm-input)", + .flags = { + {.func = HV_CPUID_FEATURES, .reg = R_EDX, + .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE} + } + }, + [HYPERV_FEAT_TLBFLUSH_EXT] = { + .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)", + .flags = { + {.func = HV_CPUID_FEATURES, .reg = R_EDX, + .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE} + }, + .dependencies = BIT(HYPERV_FEAT_TLBFLUSH) + }, + [HYPERV_FEAT_TLBFLUSH_DIRECT] = { + .desc = "direct TLB flush (hv-tlbflush-direct)", + .flags = { + {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, + .bits = HV_NESTED_DIRECT_FLUSH} + }, + .dependencies = BIT(HYPERV_FEAT_VAPIC) + }, }; static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, @@ -966,8 +1047,8 @@ static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) { struct kvm_cpuid2 *cpuid; - /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */ - int max = 10; + /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */ + int max = 11; int i; bool do_sys_ioctl; @@ -1080,6 +1161,12 @@ static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; } + if (has_msr_hv_syndbg_options) { + entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE; + entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; + entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TLBFLUSH) > 0) { entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; @@ -1203,6 +1290,13 @@ static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) } } + /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */ + if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) { + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { + r |= DEFAULT_EVMCS_VERSION; + } + } + return r; } @@ -1253,14 +1347,18 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) cpu->hyperv_interface_id[3] = hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); - cpu->hyperv_version_id[0] = + cpu->hyperv_ver_id_build = hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); - cpu->hyperv_version_id[1] = - hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX); - cpu->hyperv_version_id[2] = + cpu->hyperv_ver_id_major = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16; + cpu->hyperv_ver_id_minor = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff; + cpu->hyperv_ver_id_sp = hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); - cpu->hyperv_version_id[3] = - hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX); + cpu->hyperv_ver_id_sb = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24; + cpu->hyperv_ver_id_sn = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff; cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EAX); @@ -1327,12 +1425,22 @@ static int hyperv_fill_cpuids(CPUState *cs, { X86CPU *cpu = X86_CPU(cs); struct kvm_cpuid_entry2 *c; - uint32_t cpuid_i = 0; + uint32_t signature[3]; + uint32_t cpuid_i = 0, max_cpuid_leaf = 0; + uint32_t nested_eax = + hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX); + + max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES : + HV_CPUID_IMPLEMENT_LIMITS; + + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + max_cpuid_leaf = + MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); + } c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; - c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? - HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; + c->eax = max_cpuid_leaf; c->ebx = cpu->hyperv_vendor_id[0]; c->ecx = cpu->hyperv_vendor_id[1]; c->edx = cpu->hyperv_vendor_id[2]; @@ -1346,10 +1454,12 @@ static int hyperv_fill_cpuids(CPUState *cs, c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_VERSION; - c->eax = cpu->hyperv_version_id[0]; - c->ebx = cpu->hyperv_version_id[1]; - c->ecx = cpu->hyperv_version_id[2]; - c->edx = cpu->hyperv_version_id[3]; + c->eax = cpu->hyperv_ver_id_build; + c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | + cpu->hyperv_ver_id_minor; + c->ecx = cpu->hyperv_ver_id_sp; + c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | + (cpu->hyperv_ver_id_sn & 0xffffff); c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_FEATURES; @@ -1366,6 +1476,7 @@ static int hyperv_fill_cpuids(CPUState *cs, c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; } + /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; @@ -1374,6 +1485,11 @@ static int hyperv_fill_cpuids(CPUState *cs, c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); c->ebx = cpu->hyperv_spinlock_attempts; + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && + !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) { + c->eax |= HV_APIC_ACCESS_RECOMMENDED; + } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { c->eax |= HV_NO_NONARCH_CORESHARING; } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { @@ -1388,8 +1504,8 @@ static int hyperv_fill_cpuids(CPUState *cs, c->ecx = cpu->hyperv_limits[1]; c->edx = cpu->hyperv_limits[2]; - if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { - __u32 function; + if (nested_eax) { + uint32_t function; /* Create zeroed 0x40000006..0x40000009 leaves */ for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; @@ -1400,7 +1516,34 @@ static int hyperv_fill_cpuids(CPUState *cs, c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_NESTED_FEATURES; - c->eax = cpu->hyperv_nested[0]; + c->eax = nested_eax; + } + + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS; + c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? + HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; + memcpy(signature, "Microsoft VS", 12); + c->eax = 0; + c->ebx = signature[0]; + c->ecx = signature[1]; + c->edx = signature[2]; + + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_INTERFACE; + memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); + c->eax = signature[0]; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; + + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; + c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; } return cpuid_i; @@ -1422,8 +1565,6 @@ static bool evmcs_version_supported(uint16_t evmcs_version, (max_version <= max_supported_version); } -#define DEFAULT_EVMCS_VERSION ((1 << 8) | 1) - static int hyperv_init_vcpu(X86CPU *cpu) { CPUState *cs = CPU(cpu); @@ -1435,9 +1576,8 @@ static int hyperv_init_vcpu(X86CPU *cpu) "'hv-passthrough' CPU flag prevents migration, use explicit" " set of hv-* flags instead"); ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err); - if (local_err) { + if (ret < 0) { error_report_err(local_err); - error_free(hv_passthrough_mig_blocker); return ret; } } @@ -1450,9 +1590,8 @@ static int hyperv_init_vcpu(X86CPU *cpu) " make sure SMT is disabled and/or that vCPUs are properly" " pinned)"); ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err); - if (local_err) { + if (ret < 0) { error_report_err(local_err); - error_free(hv_no_nonarch_cs_mig_blocker); return ret; } } @@ -1462,21 +1601,14 @@ static int hyperv_init_vcpu(X86CPU *cpu) * the kernel doesn't support setting vp_index; assert that its value * is in sync */ - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[1]; - } msr_data = { - .info.nmsrs = 1, - .entries[0].index = HV_X64_MSR_VP_INDEX, - }; + uint64_t value; - ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); + ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value); if (ret < 0) { return ret; } - assert(ret == 1); - if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { + if (value != hyperv_vp_index(CPU(cpu))) { error_report("kernel's vp_index != QEMU's vp_index"); return -ENXIO; } @@ -1529,8 +1661,15 @@ static int hyperv_init_vcpu(X86CPU *cpu) supported_evmcs_version >> 8); return -ENOTSUP; } + } - cpu->hyperv_nested[0] = evmcs_version; + if (cpu->hyperv_enforce_cpuid) { + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1); + if (ret < 0) { + error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s", + strerror(-ret)); + return ret; + } } return 0; @@ -1540,6 +1679,50 @@ static Error *invtsc_mig_blocker; #define KVM_MAX_CPUID_ENTRIES 100 +static void kvm_init_xsave(CPUX86State *env) +{ + if (has_xsave2) { + env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); + } else if (has_xsave) { + env->xsave_buf_len = sizeof(struct kvm_xsave); + } else { + return; + } + + env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); + memset(env->xsave_buf, 0, env->xsave_buf_len); + /* + * The allocated storage must be large enough for all of the + * possible XSAVE state components. + */ + assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= + env->xsave_buf_len); +} + +static void kvm_init_nested_state(CPUX86State *env) +{ + struct kvm_vmx_nested_state_hdr *vmx_hdr; + uint32_t size; + + if (!env->nested_state) { + return; + } + + size = env->nested_state->size; + + memset(env->nested_state, 0, size); + env->nested_state->size = size; + + if (cpu_has_vmx(env)) { + env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; + vmx_hdr = &env->nested_state->hdr.vmx; + vmx_hdr->vmxon_pa = -1ull; + vmx_hdr->vmcs12_pa = -1ull; + } else if (cpu_has_svm(env)) { + env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; + } +} + int kvm_arch_init_vcpu(CPUState *cs) { struct { @@ -1569,6 +1752,8 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_i = 0; + has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); + r = kvm_arch_set_tsc_khz(cs); if (r < 0) { return r; @@ -1631,6 +1816,16 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); + if (cpu->kvm_pv_enforce_cpuid) { + r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); + if (r < 0) { + fprintf(stderr, + "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", + strerror(-r)); + abort(); + } + } + for (i = 0; i <= limit; i++) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "unsupported level value: 0x%x\n", limit); @@ -1705,7 +1900,28 @@ int kvm_arch_init_vcpu(CPUState *cs) } break; case 0x7: - case 0x14: { + case 0x12: + for (j = 0; ; j++) { + c->function = i; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + c->index = j; + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); + + if (j > 1 && (c->eax & 0xf) != 1) { + break; + } + + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { + fprintf(stderr, "cpuid_data is full, no space for " + "cpuid(eax:0x12,ecx:0x%x)\n", j); + abort(); + } + c = &cpuid_data.entries[cpuid_i++]; + } + break; + case 0x14: + case 0x1d: + case 0x1e: { uint32_t times; c->function = i; @@ -1879,6 +2095,11 @@ int kvm_arch_init_vcpu(CPUState *cs) !!(c->ecx & CPUID_EXT_SMX); } + c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0); + if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { + has_msr_feature_control = true; + } + if (env->mcg_cap & MCG_LMCE_P) { has_msr_mcg_ext_ctl = has_msr_feature_control = true; } @@ -1890,9 +2111,8 @@ int kvm_arch_init_vcpu(CPUState *cs) "State blocked by non-migratable CPU device" " (invtsc flag)"); r = migrate_add_blocker(invtsc_mig_blocker, &local_err); - if (local_err) { + if (r < 0) { error_report_err(local_err); - error_free(invtsc_mig_blocker); return r; } } @@ -1923,38 +2143,17 @@ int kvm_arch_init_vcpu(CPUState *cs) if (r) { goto fail; } - - if (has_xsave) { - env->xsave_buf_len = sizeof(struct kvm_xsave); - env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); - memset(env->xsave_buf, 0, env->xsave_buf_len); - - /* - * The allocated storage must be large enough for all of the - * possible XSAVE state components. - */ - assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) - <= env->xsave_buf_len); - } + kvm_init_xsave(env); max_nested_state_len = kvm_max_nested_state_length(); if (max_nested_state_len > 0) { assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); if (cpu_has_vmx(env) || cpu_has_svm(env)) { - struct kvm_vmx_nested_state_hdr *vmx_hdr; - env->nested_state = g_malloc0(max_nested_state_len); env->nested_state->size = max_nested_state_len; - if (cpu_has_vmx(env)) { - env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; - vmx_hdr = &env->nested_state->hdr.vmx; - vmx_hdr->vmxon_pa = -1ull; - vmx_hdr->vmcs12_pa = -1ull; - } else { - env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; - } + kvm_init_nested_state(env); } } @@ -1979,15 +2178,13 @@ int kvm_arch_destroy_vcpu(CPUState *cs) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (cpu->kvm_msr_buf) { - g_free(cpu->kvm_msr_buf); - cpu->kvm_msr_buf = NULL; - } + g_free(env->xsave_buf); - if (env->nested_state) { - g_free(env->nested_state); - env->nested_state = NULL; - } + g_free(cpu->kvm_msr_buf); + cpu->kvm_msr_buf = NULL; + + g_free(env->nested_state); + env->nested_state = NULL; qemu_del_vm_change_state_handler(cpu->vmsentry); @@ -2006,18 +2203,30 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) env->mp_state = KVM_MP_STATE_RUNNABLE; } + /* enabled by default */ + env->poll_control_msr = 1; + + kvm_init_nested_state(env); + + sev_es_set_reset_vector(CPU(cpu)); +} + +void kvm_arch_after_reset_vcpu(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + int i; + + /* + * Reset SynIC after all other devices have been reset to let them remove + * their SINT routes first. + */ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { - int i; for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { env->msr_hv_synic_sint[i] = HV_SINT_MASKED; } hyperv_x86_synic_reset(cpu); } - /* enabled by default */ - env->poll_control_msr = 1; - - sev_es_set_reset_vector(CPU(cpu)); } void kvm_arch_do_init_vcpu(X86CPU *cpu) @@ -2053,8 +2262,7 @@ static int kvm_get_supported_feature_msrs(KVMState *s) } assert(msr_list.nmsrs > 0); - kvm_feature_msrs = (struct kvm_msr_list *) \ - g_malloc0(sizeof(msr_list) + + kvm_feature_msrs = g_malloc0(sizeof(msr_list) + msr_list.nmsrs * sizeof(msr_list.indices[0])); kvm_feature_msrs->nmsrs = msr_list.nmsrs; @@ -2157,9 +2365,15 @@ static int kvm_get_supported_msrs(KVMState *s) case HV_X64_MSR_REENLIGHTENMENT_CONTROL: has_msr_hv_reenlightenment = true; break; + case HV_X64_MSR_SYNDBG_OPTIONS: + has_msr_hv_syndbg_options = true; + break; case MSR_IA32_SPEC_CTRL: has_msr_spec_ctrl = true; break; + case MSR_AMD64_TSC_RATIO: + has_tsc_scale_msr = true; + break; case MSR_IA32_TSX_CTRL: has_msr_tsx_ctrl = true; break; @@ -2196,6 +2410,17 @@ static int kvm_get_supported_msrs(KVMState *s) return ret; } +static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr, + uint64_t *val) +{ + CPUState *cs = CPU(cpu); + + *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ + *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ + + return true; +} + static Notifier smram_machine_done; static KVMMemoryListener smram_listener; static AddressSpace smram_address_space; @@ -2227,7 +2452,7 @@ static void register_smram_listener(Notifier *n, void *unused) address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); kvm_memory_listener_register(kvm_state, &smram_listener, - &smram_address_space, 1); + &smram_address_space, 1, "kvm-smram"); } int kvm_arch_init(MachineState *ms, KVMState *s) @@ -2264,6 +2489,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); + has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); @@ -2277,6 +2503,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT); + if (has_triple_fault_event) { + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable triple fault event cap: %s", + strerror(-ret)); + return ret; + } + } + ret = kvm_get_supported_msrs(s); if (ret < 0) { return ret; @@ -2382,6 +2618,40 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE && + kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { + uint64_t notify_window_flags = + ((uint64_t)s->notify_window << 32) | + KVM_X86_NOTIFY_VMEXIT_ENABLED | + KVM_X86_NOTIFY_VMEXIT_USER; + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, + notify_window_flags); + if (ret < 0) { + error_report("kvm: Failed to enable notify vmexit cap: %s", + strerror(-ret)); + return ret; + } + } + if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { + bool r; + + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, + KVM_MSR_EXIT_REASON_FILTER); + if (ret) { + error_report("Could not enable user space MSRs: %s", + strerror(-ret)); + exit(1); + } + + r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, + kvm_rdmsr_core_thread_count, NULL); + if (!r) { + error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s", + strerror(-ret)); + exit(1); + } + } + return 0; } @@ -2545,11 +2815,11 @@ static int kvm_put_sregs(X86CPU *cpu) CPUX86State *env = &cpu->env; struct kvm_sregs sregs; + /* + * The interrupt_bitmap is ignored because KVM_SET_SREGS is + * always followed by KVM_SET_VCPU_EVENTS. + */ memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); - if (env->interrupt_injected >= 0) { - sregs.interrupt_bitmap[env->interrupt_injected / 64] |= - (uint64_t)1 << (env->interrupt_injected % 64); - } if ((env->eflags & VM_MASK)) { set_v8086_seg(&sregs.cs, &env->segs[R_CS]); @@ -2590,6 +2860,61 @@ static int kvm_put_sregs(X86CPU *cpu) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } +static int kvm_put_sregs2(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + struct kvm_sregs2 sregs; + int i; + + sregs.flags = 0; + + if ((env->eflags & VM_MASK)) { + set_v8086_seg(&sregs.cs, &env->segs[R_CS]); + set_v8086_seg(&sregs.ds, &env->segs[R_DS]); + set_v8086_seg(&sregs.es, &env->segs[R_ES]); + set_v8086_seg(&sregs.fs, &env->segs[R_FS]); + set_v8086_seg(&sregs.gs, &env->segs[R_GS]); + set_v8086_seg(&sregs.ss, &env->segs[R_SS]); + } else { + set_seg(&sregs.cs, &env->segs[R_CS]); + set_seg(&sregs.ds, &env->segs[R_DS]); + set_seg(&sregs.es, &env->segs[R_ES]); + set_seg(&sregs.fs, &env->segs[R_FS]); + set_seg(&sregs.gs, &env->segs[R_GS]); + set_seg(&sregs.ss, &env->segs[R_SS]); + } + + set_seg(&sregs.tr, &env->tr); + set_seg(&sregs.ldt, &env->ldt); + + sregs.idt.limit = env->idt.limit; + sregs.idt.base = env->idt.base; + memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); + sregs.gdt.limit = env->gdt.limit; + sregs.gdt.base = env->gdt.base; + memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); + + sregs.cr0 = env->cr[0]; + sregs.cr2 = env->cr[2]; + sregs.cr3 = env->cr[3]; + sregs.cr4 = env->cr[4]; + + sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); + sregs.apic_base = cpu_get_apic_base(cpu->apic_state); + + sregs.efer = env->efer; + + if (env->pdptrs_valid) { + for (i = 0; i < 4; i++) { + sregs.pdptrs[i] = env->pdptrs[i]; + } + sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; + } + + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs); +} + + static void kvm_msr_buf_reset(X86CPU *cpu) { memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); @@ -2617,6 +2942,25 @@ static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); } +static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value) +{ + int ret; + struct { + struct kvm_msrs info; + struct kvm_msr_entry entries[1]; + } msr_data = { + .info.nmsrs = 1, + .entries[0].index = index, + }; + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + if (ret < 0) { + return ret; + } + assert(ret == 1); + *value = msr_data.entries[0].data; + return ret; +} void kvm_put_apicbase(X86CPU *cpu, uint64_t value) { int ret; @@ -2916,6 +3260,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); } + if (has_tsc_scale_msr) { + kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); + } + if (has_msr_tsx_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); } @@ -3011,6 +3359,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, env->msr_hv_tsc_emulation_status); } +#ifdef CONFIG_SYNDBG + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) && + has_msr_hv_syndbg_options) { + kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, + hyperv_syndbg_query_options()); + } +#endif } if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, @@ -3110,6 +3465,55 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } } + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, + env->msr_ia32_sgxlepubkeyhash[0]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, + env->msr_ia32_sgxlepubkeyhash[1]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, + env->msr_ia32_sgxlepubkeyhash[2]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, + env->msr_ia32_sgxlepubkeyhash[3]); + } + + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, + env->msr_xfd); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, + env->msr_xfd_err); + } + + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + uint64_t depth; + int i, ret; + + /* + * Only migrate Arch LBR states when the host Arch LBR depth + * equals that of source guest's, this is to avoid mismatch + * of guest/host config for the msr hence avoid unexpected + * misbehavior. + */ + ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); + + if (ret == 1 && !!depth && depth == env->msr_lbr_depth) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth); + + for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { + if (!env->lbr_records[i].from) { + continue; + } + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, + env->lbr_records[i].from); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, + env->lbr_records[i].to); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, + env->lbr_records[i].info); + } + } + } + /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } @@ -3165,13 +3569,14 @@ static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; - int ret; + int type, ret; if (!has_xsave) { return kvm_get_fpu(cpu); } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); + type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; + ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); if (ret < 0) { return ret; } @@ -3209,22 +3614,55 @@ static int kvm_get_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; - int bit, i, ret; + int ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); if (ret < 0) { return ret; } - /* There can only be one pending IRQ set in the bitmap at a time, so try - to find it and save its number instead (-1 for none). */ - env->interrupt_injected = -1; - for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { - if (sregs.interrupt_bitmap[i]) { - bit = ctz64(sregs.interrupt_bitmap[i]); - env->interrupt_injected = i * 64 + bit; - break; - } + /* + * The interrupt_bitmap is ignored because KVM_GET_SREGS is + * always preceded by KVM_GET_VCPU_EVENTS. + */ + + get_seg(&env->segs[R_CS], &sregs.cs); + get_seg(&env->segs[R_DS], &sregs.ds); + get_seg(&env->segs[R_ES], &sregs.es); + get_seg(&env->segs[R_FS], &sregs.fs); + get_seg(&env->segs[R_GS], &sregs.gs); + get_seg(&env->segs[R_SS], &sregs.ss); + + get_seg(&env->tr, &sregs.tr); + get_seg(&env->ldt, &sregs.ldt); + + env->idt.limit = sregs.idt.limit; + env->idt.base = sregs.idt.base; + env->gdt.limit = sregs.gdt.limit; + env->gdt.base = sregs.gdt.base; + + env->cr[0] = sregs.cr0; + env->cr[2] = sregs.cr2; + env->cr[3] = sregs.cr3; + env->cr[4] = sregs.cr4; + + env->efer = sregs.efer; + + /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ + x86_update_hflags(env); + + return 0; +} + +static int kvm_get_sregs2(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + struct kvm_sregs2 sregs; + int i, ret; + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs); + if (ret < 0) { + return ret; } get_seg(&env->segs[R_CS], &sregs.cs); @@ -3249,6 +3687,14 @@ static int kvm_get_sregs(X86CPU *cpu) env->efer = sregs.efer; + env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; + + if (env->pdptrs_valid) { + for (i = 0; i < 4; i++) { + env->pdptrs[i] = sregs.pdptrs[i]; + } + } + /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ x86_update_hflags(env); @@ -3310,6 +3756,10 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); } + if (has_tsc_scale_msr) { + kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0); + } + if (has_msr_tsx_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); } @@ -3388,6 +3838,9 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); } + if (has_msr_hv_syndbg_options) { + kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0); + } if (has_msr_hv_crash) { int j; @@ -3449,6 +3902,36 @@ static int kvm_get_msrs(X86CPU *cpu) } } + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); + } + + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); + } + + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + uint64_t depth; + int i, ret; + + ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); + if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0); + + for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0); + } + } + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -3667,6 +4150,9 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_TSC_EMULATION_STATUS: env->msr_hv_tsc_emulation_status = msrs[i].data; break; + case HV_X64_MSR_SYNDBG_OPTIONS: + env->msr_hv_syndbg_options = msrs[i].data; + break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; @@ -3714,6 +4200,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_SPEC_CTRL: env->spec_ctrl = msrs[i].data; break; + case MSR_AMD64_TSC_RATIO: + env->amd_tsc_scale_msr = msrs[i].data; + break; case MSR_IA32_TSX_CTRL: env->tsx_ctrl = msrs[i].data; break; @@ -3738,6 +4227,31 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = + msrs[i].data; + break; + case MSR_IA32_XFD: + env->msr_xfd = msrs[i].data; + break; + case MSR_IA32_XFD_ERR: + env->msr_xfd_err = msrs[i].data; + break; + case MSR_ARCH_LBR_CTL: + env->msr_lbr_ctl = msrs[i].data; + break; + case MSR_ARCH_LBR_DEPTH: + env->msr_lbr_depth = msrs[i].data; + break; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data; + break; + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data; + break; + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; + break; } } @@ -3849,6 +4363,11 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) } } + if (has_triple_fault_event) { + events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; + events.triple_fault.pending = env->triple_fault_pending; + } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); } @@ -3918,6 +4437,10 @@ static int kvm_get_vcpu_events(X86CPU *cpu) } } + if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { + env->triple_fault_pending = events.triple_fault.pending; + } + env->sipi_vector = events.sipi_vector; return 0; @@ -4079,8 +4602,20 @@ int kvm_arch_put_registers(CPUState *cpu, int level) assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); + /* + * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX + * root operation upon vCPU reset. kvm_put_msr_feature_control() should also + * preceed kvm_put_nested_state() when 'real' nested state is set. + */ + if (level >= KVM_PUT_RESET_STATE) { + ret = kvm_put_msr_feature_control(x86_cpu); + if (ret < 0) { + return ret; + } + } + /* must be before kvm_put_nested_state so that EFER.SVME is set */ - ret = kvm_put_sregs(x86_cpu); + ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); if (ret < 0) { return ret; } @@ -4090,11 +4625,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - - ret = kvm_put_msr_feature_control(x86_cpu); - if (ret < 0) { - return ret; - } } if (level == KVM_PUT_FULL_STATE) { @@ -4185,7 +4715,7 @@ int kvm_arch_get_registers(CPUState *cs) if (ret < 0) { goto out; } - ret = kvm_get_sregs(cpu); + ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); if (ret < 0) { goto out; } @@ -4620,6 +5150,137 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) } } +static bool kvm_install_msr_filters(KVMState *s) +{ + uint64_t zero = 0; + struct kvm_msr_filter filter = { + .flags = KVM_MSR_FILTER_DEFAULT_ALLOW, + }; + int r, i, j = 0; + + for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) { + KVMMSRHandlers *handler = &msr_handlers[i]; + if (handler->msr) { + struct kvm_msr_filter_range *range = &filter.ranges[j++]; + + *range = (struct kvm_msr_filter_range) { + .flags = 0, + .nmsrs = 1, + .base = handler->msr, + .bitmap = (__u8 *)&zero, + }; + + if (handler->rdmsr) { + range->flags |= KVM_MSR_FILTER_READ; + } + + if (handler->wrmsr) { + range->flags |= KVM_MSR_FILTER_WRITE; + } + } + } + + r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter); + if (r) { + return false; + } + + return true; +} + +bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, + QEMUWRMSRHandler *wrmsr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { + if (!msr_handlers[i].msr) { + msr_handlers[i] = (KVMMSRHandlers) { + .msr = msr, + .rdmsr = rdmsr, + .wrmsr = wrmsr, + }; + + if (!kvm_install_msr_filters(s)) { + msr_handlers[i] = (KVMMSRHandlers) { }; + return false; + } + + return true; + } + } + + return false; +} + +static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run) +{ + int i; + bool r; + + for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { + KVMMSRHandlers *handler = &msr_handlers[i]; + if (run->msr.index == handler->msr) { + if (handler->rdmsr) { + r = handler->rdmsr(cpu, handler->msr, + (uint64_t *)&run->msr.data); + run->msr.error = r ? 0 : 1; + return 0; + } + } + } + + assert(false); +} + +static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run) +{ + int i; + bool r; + + for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { + KVMMSRHandlers *handler = &msr_handlers[i]; + if (run->msr.index == handler->msr) { + if (handler->wrmsr) { + r = handler->wrmsr(cpu, handler->msr, run->msr.data); + run->msr.error = r ? 0 : 1; + return 0; + } + } + } + + assert(false); +} + +static bool has_sgx_provisioning; + +static bool __kvm_enable_sgx_provisioning(KVMState *s) +{ + int fd, ret; + + if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) { + return false; + } + + fd = qemu_open_old("/dev/sgx_provision", O_RDONLY); + if (fd < 0) { + return false; + } + + ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd); + if (ret) { + error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); + exit(1); + } + close(fd); + return true; +} + +bool kvm_enable_sgx_provisioning(KVMState *s) +{ + return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning); +} + static bool host_supports_vmx(void) { uint32_t ecx, unused; @@ -4635,6 +5296,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) X86CPU *cpu = X86_CPU(cs); uint64_t code; int ret; + bool ctx_invalid; + char str[256]; + KVMState *state; switch (run->exit_reason) { case KVM_EXIT_HLT: @@ -4690,6 +5354,31 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) /* already handled in kvm_arch_post_run */ ret = 0; break; + case KVM_EXIT_NOTIFY: + ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID); + state = KVM_STATE(current_accel()); + sprintf(str, "Encounter a notify exit with %svalid context in" + " guest. There can be possible misbehaves in guest." + " Please have a look.", ctx_invalid ? "in" : ""); + if (ctx_invalid || + state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) { + warn_report("KVM internal error: %s", str); + ret = -1; + } else { + warn_report_once("KVM: %s", str); + ret = 0; + } + break; + case KVM_EXIT_X86_RDMSR: + /* We only enable MSR filtering, any other exit is bogus */ + assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); + ret = kvm_handle_rdmsr(cpu, run); + break; + case KVM_EXIT_X86_WRMSR: + /* We only enable MSR filtering, any other exit is bogus */ + assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); + ret = kvm_handle_wrmsr(cpu, run); + break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -4719,16 +5408,18 @@ void kvm_arch_init_irq_routing(KVMState *s) kvm_gsi_routing_allowed = true; if (kvm_irqchip_is_split()) { + KVMRouteChange c = kvm_irqchip_begin_route_changes(s); int i; /* If the ioapic is in QEMU and the lapics are in KVM, reserve MSI routes for signaling interrupts to the local apics. */ for (i = 0; i < IOAPIC_NUM_PINS; i++) { - if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) { + if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) { error_report("Could not enable split IRQ mode."); exit(1); } } + kvm_irqchip_commit_route_changes(&c); } } @@ -4928,3 +5619,112 @@ bool kvm_arch_cpu_check_are_resettable(void) { return !sev_es_enabled(); } + +#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 + +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) +{ + KVMState *s = kvm_state; + uint64_t supported; + + mask &= XSTATE_DYNAMIC_MASK; + if (!mask) { + return; + } + /* + * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. + * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned + * about them already because they are not supported features. + */ + supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); + supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; + mask &= supported; + + while (mask) { + int bit = ctz64(mask); + int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); + if (rc) { + /* + * Older kernel version (<5.17) do not support + * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return + * any dynamic feature from kvm_arch_get_supported_cpuid. + */ + warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " + "for feature bit %d", bit); + } + mask &= ~BIT_ULL(bit); + } +} + +static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp) +{ + KVMState *s = KVM_STATE(obj); + return s->notify_vmexit; +} + +static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp) +{ + KVMState *s = KVM_STATE(obj); + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + s->notify_vmexit = value; +} + +static void kvm_arch_get_notify_window(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint32_t value = s->notify_window; + + visit_type_uint32(v, name, &value, errp); +} + +static void kvm_arch_set_notify_window(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint32_t value; + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + visit_type_uint32(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->notify_window = value; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ + object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", + &NotifyVmexitOption_lookup, + kvm_arch_get_notify_vmexit, + kvm_arch_set_notify_vmexit); + object_class_property_set_description(oc, "notify-vmexit", + "Enable notify VM exit"); + + object_class_property_add(oc, "notify-window", "uint32", + kvm_arch_get_notify_window, + kvm_arch_set_notify_window, + NULL, NULL); + object_class_property_set_description(oc, "notify-window", + "Clock cycles without an event window " + "after which a notification VM exit occurs"); +} + +void kvm_set_max_apic_id(uint32_t max_apic_id) +{ + kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id); +} diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 54667b35f0..6a5c24e3dc 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -38,6 +38,7 @@ bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); void kvm_synchronize_all_tsc(void); void kvm_arch_reset_vcpu(X86CPU *cs); +void kvm_arch_after_reset_vcpu(X86CPU *cpu); void kvm_arch_do_init_vcpu(X86CPU *cs); void kvm_put_apicbase(X86CPU *cpu, uint64_t value); @@ -51,4 +52,20 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); +bool kvm_enable_sgx_provisioning(KVMState *s); +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); + +typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); +typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); +typedef struct kvm_msr_handlers { + uint32_t msr; + QEMURDMSRHandler *rdmsr; + QEMUWRMSRHandler *wrmsr; +} KVMMSRHandlers; + +bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, + QEMUWRMSRHandler *wrmsr); + +void kvm_set_max_apic_id(uint32_t max_apic_id); + #endif diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 0a533411ca..736df8b72e 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -1,8 +1,14 @@ i386_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) -i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files( +i386_softmmu_kvm_ss = ss.source_set() + +i386_softmmu_kvm_ss.add(files( 'kvm.c', 'kvm-cpu.c', )) +i386_softmmu_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) + i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) + +i386_softmmu_ss.add_all(when: 'CONFIG_KVM', if_true: i386_softmmu_kvm_ss) diff --git a/accel/kvm/sev-stub.c b/target/i386/kvm/sev-stub.c similarity index 89% rename from accel/kvm/sev-stub.c rename to target/i386/kvm/sev-stub.c index 9587d1b2a3..1be5341e8a 100644 --- a/accel/kvm/sev-stub.c +++ b/target/i386/kvm/sev-stub.c @@ -12,8 +12,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "sysemu/sev.h" +#include "sev.h" int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { diff --git a/target/i386/machine.c b/target/i386/machine.c index f6f094f1c9..310b125235 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -136,6 +136,22 @@ static const VMStateDescription vmstate_mtrr_var = { #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) +static const VMStateDescription vmstate_lbr_records_var = { + .name = "lbr_records_var", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(from, LBREntry), + VMSTATE_UINT64(to, LBREntry), + VMSTATE_UINT64(info, LBREntry), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_LBR_VARS(_field, _state, _n, _v) \ + VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_lbr_records_var, \ + LBREntry) + typedef struct x86_FPReg_tmp { FPReg *parent; uint64_t tmp_mant; @@ -203,7 +219,7 @@ static int cpu_pre_save(void *opaque) X86CPU *cpu = opaque; CPUX86State *env = &cpu->env; int i; - + env->v_tpr = env->int_ctl & V_TPR_MASK; /* FPU */ env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; env->fptag_vmstate = 0; @@ -1280,6 +1296,27 @@ static const VMStateDescription vmstate_spec_ctrl = { } }; + +static bool amd_tsc_scale_msr_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE); +} + +static const VMStateDescription amd_tsc_scale_msr_ctrl = { + .name = "cpu/amd_tsc_scale_msr", + .version_id = 1, + .minimum_version_id = 1, + .needed = amd_tsc_scale_msr_needed, + .fields = (VMStateField[]){ + VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + + static bool intel_pt_enable_needed(void *opaque) { X86CPU *cpu = opaque; @@ -1356,6 +1393,25 @@ static const VMStateDescription vmstate_svm_npt = { } }; +static bool svm_guest_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return tcg_enabled() && env->int_ctl; +} + +static const VMStateDescription vmstate_svm_guest = { + .name = "cpu/svm_guest", + .version_id = 1, + .minimum_version_id = 1, + .needed = svm_guest_needed, + .fields = (VMStateField[]){ + VMSTATE_UINT32(env.int_ctl, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + #ifndef TARGET_X86_64 static bool intel_efer32_needed(void *opaque) { @@ -1396,6 +1452,135 @@ static const VMStateDescription vmstate_msr_tsx_ctrl = { } }; +static bool intel_sgx_msrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC); +} + +static const VMStateDescription vmstate_msr_intel_sgx = { + .name = "cpu/intel_sgx", + .version_id = 1, + .minimum_version_id = 1, + .needed = intel_sgx_msrs_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), + VMSTATE_END_OF_LIST() + } + }; + +static bool pdptrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + return env->pdptrs_valid; +} + +static int pdptrs_post_load(void *opaque, int version_id) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + env->pdptrs_valid = true; + return 0; +} + + +static const VMStateDescription vmstate_pdptrs = { + .name = "cpu/pdptrs", + .version_id = 1, + .minimum_version_id = 1, + .needed = pdptrs_needed, + .post_load = pdptrs_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4), + VMSTATE_END_OF_LIST() + } +}; + +static bool xfd_msrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); +} + +static const VMStateDescription vmstate_msr_xfd = { + .name = "cpu/msr_xfd", + .version_id = 1, + .minimum_version_id = 1, + .needed = xfd_msrs_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_xfd, X86CPU), + VMSTATE_UINT64(env.msr_xfd_err, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + +#ifdef TARGET_X86_64 +static bool amx_xtile_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); +} + +static const VMStateDescription vmstate_amx_xtile = { + .name = "cpu/intel_amx_xtile", + .version_id = 1, + .minimum_version_id = 1, + .needed = amx_xtile_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), + VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), + VMSTATE_END_OF_LIST() + } +}; +#endif + +static bool arch_lbr_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR); +} + +static const VMStateDescription vmstate_arch_lbr = { + .name = "cpu/arch_lbr", + .version_id = 1, + .minimum_version_id = 1, + .needed = arch_lbr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU), + VMSTATE_UINT64(env.msr_lbr_depth, X86CPU), + VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1), + VMSTATE_END_OF_LIST() + } +}; + +static bool triple_fault_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->triple_fault_pending; +} + +static const VMStateDescription vmstate_triple_fault = { + .name = "cpu/triple_fault", + .version_id = 1, + .minimum_version_id = 1, + .needed = triple_fault_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(env.triple_fault_pending, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -1520,10 +1705,12 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_pkru, &vmstate_pkrs, &vmstate_spec_ctrl, + &amd_tsc_scale_msr_ctrl, &vmstate_mcg_ext_ctl, &vmstate_msr_intel_pt, &vmstate_msr_virt_ssbd, &vmstate_svm_npt, + &vmstate_svm_guest, #ifndef TARGET_X86_64 &vmstate_efer32, #endif @@ -1531,6 +1718,14 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_nested_state, #endif &vmstate_msr_tsx_ctrl, + &vmstate_msr_intel_sgx, + &vmstate_pdptrs, + &vmstate_msr_xfd, +#ifdef TARGET_X86_64 + &vmstate_amx_xtile, +#endif + &vmstate_arch_lbr, + &vmstate_triple_fault, NULL } }; diff --git a/target/i386/meson.build b/target/i386/meson.build index dac19ec00d..ae38dc9563 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -6,7 +6,7 @@ i386_ss.add(files( 'xsave_helper.c', 'cpu-dump.c', )) -i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'sev.c'), if_false: files('sev-stub.c')) +i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c')) # x86 cpu type i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) @@ -20,6 +20,8 @@ i386_softmmu_ss.add(files( 'monitor.c', 'cpu-sysemu.c', )) +i386_softmmu_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-sysemu-stub.c')) + i386_user_ss = ss.source_set() subdir('kvm') diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 119211f0b0..8e4b4d600c 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -28,10 +28,9 @@ #include "monitor/hmp-target.h" #include "monitor/hmp.h" #include "qapi/qmp/qdict.h" +#include "qapi/qmp/qerror.h" #include "sysemu/kvm.h" -#include "sysemu/sev.h" #include "qapi/error.h" -#include "sev_i386.h" #include "qapi/qapi-commands-misc-target.h" #include "qapi/qapi-commands-misc.h" #include "hw/i386/pc.h" @@ -668,98 +667,3 @@ void hmp_info_local_apic(Monitor *mon, const QDict *qdict) } x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU); } - -void hmp_info_io_apic(Monitor *mon, const QDict *qdict) -{ - monitor_printf(mon, "This command is obsolete and will be " - "removed soon. Please use 'info pic' instead.\n"); -} - -SevInfo *qmp_query_sev(Error **errp) -{ - SevInfo *info; - - info = sev_get_info(); - if (!info) { - error_setg(errp, "SEV feature is not available"); - return NULL; - } - - return info; -} - -void hmp_info_sev(Monitor *mon, const QDict *qdict) -{ - SevInfo *info = sev_get_info(); - - if (info && info->enabled) { - monitor_printf(mon, "handle: %d\n", info->handle); - monitor_printf(mon, "state: %s\n", SevState_str(info->state)); - monitor_printf(mon, "build: %d\n", info->build_id); - monitor_printf(mon, "api version: %d.%d\n", - info->api_major, info->api_minor); - monitor_printf(mon, "debug: %s\n", - info->policy & SEV_POLICY_NODBG ? "off" : "on"); - monitor_printf(mon, "key-sharing: %s\n", - info->policy & SEV_POLICY_NOKS ? "off" : "on"); - } else { - monitor_printf(mon, "SEV is not enabled\n"); - } - - qapi_free_SevInfo(info); -} - -SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) -{ - char *data; - SevLaunchMeasureInfo *info; - - data = sev_get_launch_measurement(); - if (!data) { - error_setg(errp, "Measurement is not available"); - return NULL; - } - - info = g_malloc0(sizeof(*info)); - info->data = data; - - return info; -} - -SevCapability *qmp_query_sev_capabilities(Error **errp) -{ - return sev_get_capabilities(errp); -} - -#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" -struct sev_secret_area { - uint32_t base; - uint32_t size; -}; - -void qmp_sev_inject_launch_secret(const char *packet_hdr, - const char *secret, - bool has_gpa, uint64_t gpa, - Error **errp) -{ - if (!has_gpa) { - uint8_t *data; - struct sev_secret_area *area; - - if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { - error_setg(errp, "SEV: no secret area found in OVMF," - " gpa must be specified."); - return; - } - area = (struct sev_secret_area *)data; - gpa = area->base; - } - - sev_inject_launch_secret(packet_hdr, secret, gpa, errp); -} - -SevAttestationReport * -qmp_query_sev_attestation_report(const char *mnonce, Error **errp) -{ - return sev_get_attestation_report(mnonce, errp); -} diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index f788f75289..6c46101ac1 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -64,8 +64,8 @@ static void nvmm_start_vcpu_thread(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + cpu->thread = g_new0(QemuThread, 1); + cpu->halt_cond = g_new0(QemuCond, 1); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM", cpu->cpu_index); diff --git a/target/i386/nvmm/nvmm-accel-ops.h b/target/i386/nvmm/nvmm-accel-ops.h index 43e24adcaf..7c5461bd75 100644 --- a/target/i386/nvmm/nvmm-accel-ops.h +++ b/target/i386/nvmm/nvmm-accel-ops.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef NVMM_CPUS_H -#define NVMM_CPUS_H +#ifndef TARGET_I386_NVMM_ACCEL_OPS_H +#define TARGET_I386_NVMM_ACCEL_OPS_H #include "sysemu/cpus.h" @@ -21,4 +21,4 @@ void nvmm_cpu_synchronize_post_reset(CPUState *cpu); void nvmm_cpu_synchronize_post_init(CPUState *cpu); void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu); -#endif /* NVMM_CPUS_H */ +#endif /* TARGET_I386_NVMM_ACCEL_OPS_H */ diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index dfa690d65d..b75738ee9c 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -11,7 +11,6 @@ #include "cpu.h" #include "exec/address-spaces.h" #include "exec/ioport.h" -#include "qemu-common.h" #include "qemu/accel.h" #include "sysemu/nvmm.h" #include "sysemu/cpus.h" @@ -85,7 +84,7 @@ nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg) static void nvmm_set_registers(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; @@ -222,7 +221,7 @@ nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg) static void nvmm_get_registers(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; @@ -347,7 +346,7 @@ nvmm_get_registers(CPUState *cpu) static bool nvmm_can_take_int(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -394,7 +393,7 @@ nvmm_can_take_nmi(CPUState *cpu) static void nvmm_vcpu_pre_run(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; @@ -480,7 +479,7 @@ static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -652,7 +651,7 @@ static int nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; int ret = 0; qemu_mutex_lock_iothread(); @@ -685,7 +684,7 @@ nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) static int nvmm_vcpu_loop(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; @@ -750,7 +749,11 @@ nvmm_vcpu_loop(CPUState *cpu) nvmm_vcpu_pre_run(cpu); if (qatomic_read(&cpu->exit_request)) { +#if NVMM_USER_VERSION >= 2 nvmm_vcpu_stop(vcpu); +#else + qemu_cpu_kick_self(); +#endif } /* Read exit_request before the kernel reads the immediate exit flag */ @@ -767,6 +770,7 @@ nvmm_vcpu_loop(CPUState *cpu) switch (exit->reason) { case NVMM_VCPU_EXIT_NONE: break; +#if NVMM_USER_VERSION >= 2 case NVMM_VCPU_EXIT_STOPPED: /* * The kernel cleared the immediate exit flag; cpu->exit_request @@ -775,6 +779,7 @@ nvmm_vcpu_loop(CPUState *cpu) smp_wmb(); qcpu->stop = true; break; +#endif case NVMM_VCPU_EXIT_MEMORY: ret = nvmm_handle_mem(mach, vcpu); break; @@ -888,8 +893,12 @@ nvmm_ipi_signal(int sigcpu) { if (current_cpu) { struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu); +#if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); +#else + qcpu->stop = true; +#endif } } @@ -926,10 +935,8 @@ nvmm_init_vcpu(CPUState *cpu) error_setg(&nvmm_migration_blocker, "NVMM: Migration not supported"); - (void)migrate_add_blocker(nvmm_migration_blocker, &local_error); - if (local_error) { + if (migrate_add_blocker(nvmm_migration_blocker, &local_error) < 0) { error_report_err(local_error); - migrate_del_blocker(nvmm_migration_blocker); error_free(nvmm_migration_blocker); return -EINVAL; } @@ -1067,15 +1074,15 @@ nvmm_process_section(MemoryRegionSection *section, int add) } /* Adjust start_pa and size so that they are page-aligned. */ - delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); - delta &= ~qemu_real_host_page_mask; + delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); + delta &= ~qemu_real_host_page_mask(); if (delta > size) { return; } start_pa += delta; size -= delta; - size &= qemu_real_host_page_mask; - if (!size || (start_pa & ~qemu_real_host_page_mask)) { + size &= qemu_real_host_page_mask(); + if (!size || (start_pa & ~qemu_real_host_page_mask())) { return; } @@ -1125,6 +1132,7 @@ nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section) } static MemoryListener nvmm_memory_listener = { + .name = "nvmm", .begin = nvmm_transaction_begin, .commit = nvmm_transaction_commit, .region_add = nvmm_region_add, @@ -1134,13 +1142,14 @@ static MemoryListener nvmm_memory_listener = { }; static void -nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) +nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) { struct nvmm_machine *mach = get_nvmm_mach(); uintptr_t hva = (uintptr_t)host; int ret; - ret = nvmm_hva_map(mach, hva, size); + ret = nvmm_hva_map(mach, hva, max_size); if (ret == -1) { error_report("NVMM: Failed to map HVA, HostVA:%p " diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 6f1fc174b3..44c1e70093 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -35,262 +35,206 @@ #define W(n) ZMM_W(n) #define L(n) ZMM_L(n) #define Q(n) ZMM_Q(n) +#if SHIFT == 1 #define SUFFIX _xmm +#else +#define SUFFIX _ymm +#endif #endif -void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +#define LANE_WIDTH (SHIFT ? 16 : 8) +#define PACK_WIDTH (LANE_WIDTH / 2) + +#if SHIFT == 0 +#define FPSRL(x, c) ((x) >> shift) +#define FPSRAW(x, c) ((int16_t)(x) >> shift) +#define FPSRAL(x, c) ((int32_t)(x) >> shift) +#define FPSLL(x, c) ((x) << shift) +#endif + +void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 15) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif + if (c->Q(0) > 15) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } else { - shift = s->B(0); - d->W(0) >>= shift; - d->W(1) >>= shift; - d->W(2) >>= shift; - d->W(3) >>= shift; -#if SHIFT == 1 - d->W(4) >>= shift; - d->W(5) >>= shift; - d->W(6) >>= shift; - d->W(7) >>= shift; -#endif + shift = c->B(0); + for (int i = 0; i < 4 << SHIFT; i++) { + d->W(i) = FPSRL(s->W(i), shift); + } } } -void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; + if (c->Q(0) > 15) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } + } else { + shift = c->B(0); + for (int i = 0; i < 4 << SHIFT; i++) { + d->W(i) = FPSLL(s->W(i), shift); + } + } +} - if (s->Q(0) > 15) { +void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) +{ + int shift; + if (c->Q(0) > 15) { shift = 15; } else { - shift = s->B(0); + shift = c->B(0); + } + for (int i = 0; i < 4 << SHIFT; i++) { + d->W(i) = FPSRAW(s->W(i), shift); } - d->W(0) = (int16_t)d->W(0) >> shift; - d->W(1) = (int16_t)d->W(1) >> shift; - d->W(2) = (int16_t)d->W(2) >> shift; - d->W(3) = (int16_t)d->W(3) >> shift; -#if SHIFT == 1 - d->W(4) = (int16_t)d->W(4) >> shift; - d->W(5) = (int16_t)d->W(5) >> shift; - d->W(6) = (int16_t)d->W(6) >> shift; - d->W(7) = (int16_t)d->W(7) >> shift; -#endif } -void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 15) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif + if (c->Q(0) > 31) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } else { - shift = s->B(0); - d->W(0) <<= shift; - d->W(1) <<= shift; - d->W(2) <<= shift; - d->W(3) <<= shift; -#if SHIFT == 1 - d->W(4) <<= shift; - d->W(5) <<= shift; - d->W(6) <<= shift; - d->W(7) <<= shift; -#endif + shift = c->B(0); + for (int i = 0; i < 2 << SHIFT; i++) { + d->L(i) = FPSRL(s->L(i), shift); + } } } -void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 31) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif + if (c->Q(0) > 31) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } else { - shift = s->B(0); - d->L(0) >>= shift; - d->L(1) >>= shift; -#if SHIFT == 1 - d->L(2) >>= shift; - d->L(3) >>= shift; -#endif + shift = c->B(0); + for (int i = 0; i < 2 << SHIFT; i++) { + d->L(i) = FPSLL(s->L(i), shift); + } } } -void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 31) { + if (c->Q(0) > 31) { shift = 31; } else { - shift = s->B(0); + shift = c->B(0); + } + for (int i = 0; i < 2 << SHIFT; i++) { + d->L(i) = FPSRAL(s->L(i), shift); } - d->L(0) = (int32_t)d->L(0) >> shift; - d->L(1) = (int32_t)d->L(1) >> shift; -#if SHIFT == 1 - d->L(2) = (int32_t)d->L(2) >> shift; - d->L(3) = (int32_t)d->L(3) >> shift; -#endif } -void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 31) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif + if (c->Q(0) > 63) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } else { - shift = s->B(0); - d->L(0) <<= shift; - d->L(1) <<= shift; -#if SHIFT == 1 - d->L(2) <<= shift; - d->L(3) <<= shift; -#endif + shift = c->B(0); + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = FPSRL(s->Q(i), shift); + } } } -void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { int shift; - - if (s->Q(0) > 63) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif + if (c->Q(0) > 63) { + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } else { - shift = s->B(0); - d->Q(0) >>= shift; -#if SHIFT == 1 - d->Q(1) >>= shift; -#endif + shift = c->B(0); + for (int i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = FPSLL(s->Q(i), shift); + } } } -void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +#if SHIFT >= 1 +void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { - int shift; + int shift, i, j; - if (s->Q(0) > 63) { - d->Q(0) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif - } else { - shift = s->B(0); - d->Q(0) <<= shift; -#if SHIFT == 1 - d->Q(1) <<= shift; -#endif - } -} - -#if SHIFT == 1 -void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - int shift, i; - - shift = s->L(0); + shift = c->L(0); if (shift > 16) { shift = 16; } - for (i = 0; i < 16 - shift; i++) { - d->B(i) = d->B(i + shift); - } - for (i = 16 - shift; i < 16; i++) { - d->B(i) = 0; + for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { + for (i = 0; i < 16 - shift; i++) { + d->B(j + i) = s->B(j + i + shift); + } + for (i = 16 - shift; i < 16; i++) { + d->B(j + i) = 0; + } } } -void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) { - int shift, i; + int shift, i, j; - shift = s->L(0); + shift = c->L(0); if (shift > 16) { shift = 16; } - for (i = 15; i >= shift; i--) { - d->B(i) = d->B(i - shift); - } - for (i = 0; i < shift; i++) { - d->B(i) = 0; + for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { + for (i = 15; i >= shift; i--) { + d->B(j + i) = s->B(j + i - shift); + } + for (i = 0; i < shift; i++) { + d->B(j + i) = 0; + } } } #endif +#define SSE_HELPER_1(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + int n = num; \ + for (int i = 0; i < n; i++) { \ + d->elem(i) = F(s->elem(i)); \ + } \ + } + +#define SSE_HELPER_2(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ + { \ + int n = num; \ + for (int i = 0; i < n; i++) { \ + d->elem(i) = F(v->elem(i), s->elem(i)); \ + } \ + } + #define SSE_HELPER_B(name, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->B(0) = F(d->B(0), s->B(0)); \ - d->B(1) = F(d->B(1), s->B(1)); \ - d->B(2) = F(d->B(2), s->B(2)); \ - d->B(3) = F(d->B(3), s->B(3)); \ - d->B(4) = F(d->B(4), s->B(4)); \ - d->B(5) = F(d->B(5), s->B(5)); \ - d->B(6) = F(d->B(6), s->B(6)); \ - d->B(7) = F(d->B(7), s->B(7)); \ - XMM_ONLY( \ - d->B(8) = F(d->B(8), s->B(8)); \ - d->B(9) = F(d->B(9), s->B(9)); \ - d->B(10) = F(d->B(10), s->B(10)); \ - d->B(11) = F(d->B(11), s->B(11)); \ - d->B(12) = F(d->B(12), s->B(12)); \ - d->B(13) = F(d->B(13), s->B(13)); \ - d->B(14) = F(d->B(14), s->B(14)); \ - d->B(15) = F(d->B(15), s->B(15)); \ - ) \ - } + SSE_HELPER_2(name, B, 8 << SHIFT, F) #define SSE_HELPER_W(name, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->W(0) = F(d->W(0), s->W(0)); \ - d->W(1) = F(d->W(1), s->W(1)); \ - d->W(2) = F(d->W(2), s->W(2)); \ - d->W(3) = F(d->W(3), s->W(3)); \ - XMM_ONLY( \ - d->W(4) = F(d->W(4), s->W(4)); \ - d->W(5) = F(d->W(5), s->W(5)); \ - d->W(6) = F(d->W(6), s->W(6)); \ - d->W(7) = F(d->W(7), s->W(7)); \ - ) \ - } + SSE_HELPER_2(name, W, 4 << SHIFT, F) #define SSE_HELPER_L(name, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->L(0) = F(d->L(0), s->L(0)); \ - d->L(1) = F(d->L(1), s->L(1)); \ - XMM_ONLY( \ - d->L(2) = F(d->L(2), s->L(2)); \ - d->L(3) = F(d->L(3), s->L(3)); \ - ) \ - } + SSE_HELPER_2(name, L, 2 << SHIFT, F) #define SSE_HELPER_Q(name, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->Q(0) = F(d->Q(0), s->Q(0)); \ - XMM_ONLY( \ - d->Q(1) = F(d->Q(1), s->Q(1)); \ - ) \ - } + SSE_HELPER_2(name, Q, 1 << SHIFT, F) #if SHIFT == 0 static inline int satub(int x) @@ -353,17 +297,6 @@ static inline int satsw(int x) #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) -#define FAND(a, b) ((a) & (b)) -#define FANDN(a, b) ((~(a)) & (b)) -#define FOR(a, b) ((a) | (b)) -#define FXOR(a, b) ((a) ^ (b)) - -#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0) -#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0) -#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0) -#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0) - -#define FMULLW(a, b) ((a) * (b)) #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) #define FMULHUW(a, b) ((a) * (b) >> 16) #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) @@ -371,70 +304,38 @@ static inline int satsw(int x) #define FAVG(a, b) (((a) + (b) + 1) >> 1) #endif -SSE_HELPER_B(helper_paddb, FADD) -SSE_HELPER_W(helper_paddw, FADD) -SSE_HELPER_L(helper_paddl, FADD) -SSE_HELPER_Q(helper_paddq, FADD) - -SSE_HELPER_B(helper_psubb, FSUB) -SSE_HELPER_W(helper_psubw, FSUB) -SSE_HELPER_L(helper_psubl, FSUB) -SSE_HELPER_Q(helper_psubq, FSUB) - -SSE_HELPER_B(helper_paddusb, FADDUB) -SSE_HELPER_B(helper_paddsb, FADDSB) -SSE_HELPER_B(helper_psubusb, FSUBUB) -SSE_HELPER_B(helper_psubsb, FSUBSB) - -SSE_HELPER_W(helper_paddusw, FADDUW) -SSE_HELPER_W(helper_paddsw, FADDSW) -SSE_HELPER_W(helper_psubusw, FSUBUW) -SSE_HELPER_W(helper_psubsw, FSUBSW) - -SSE_HELPER_B(helper_pminub, FMINUB) -SSE_HELPER_B(helper_pmaxub, FMAXUB) - -SSE_HELPER_W(helper_pminsw, FMINSW) -SSE_HELPER_W(helper_pmaxsw, FMAXSW) - -SSE_HELPER_Q(helper_pand, FAND) -SSE_HELPER_Q(helper_pandn, FANDN) -SSE_HELPER_Q(helper_por, FOR) -SSE_HELPER_Q(helper_pxor, FXOR) - -SSE_HELPER_B(helper_pcmpgtb, FCMPGTB) -SSE_HELPER_W(helper_pcmpgtw, FCMPGTW) -SSE_HELPER_L(helper_pcmpgtl, FCMPGTL) - -SSE_HELPER_B(helper_pcmpeqb, FCMPEQ) -SSE_HELPER_W(helper_pcmpeqw, FCMPEQ) -SSE_HELPER_L(helper_pcmpeql, FCMPEQ) - -SSE_HELPER_W(helper_pmullw, FMULLW) -#if SHIFT == 0 -SSE_HELPER_W(helper_pmulhrw, FMULHRW) -#endif SSE_HELPER_W(helper_pmulhuw, FMULHUW) SSE_HELPER_W(helper_pmulhw, FMULHW) +#if SHIFT == 0 +void glue(helper_pmulhrw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = FMULHRW(d->W(0), s->W(0)); + d->W(1) = FMULHRW(d->W(1), s->W(1)); + d->W(2) = FMULHRW(d->W(2), s->W(2)); + d->W(3) = FMULHRW(d->W(3), s->W(3)); +} +#endif + SSE_HELPER_B(helper_pavgb, FAVG) SSE_HELPER_W(helper_pavgw, FAVG) -void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0); -#if SHIFT == 1 - d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2); -#endif + int i; + + for (i = 0; i < (1 << SHIFT); i++) { + d->Q(i) = (uint64_t)s->L(i * 2) * (uint64_t)v->L(i * 2); + } } -void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { int i; for (i = 0; i < (2 << SHIFT); i++) { - d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) + - (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1); + d->L(i) = (int16_t)s->W(2 * i) * (int16_t)v->W(2 * i) + + (int16_t)s->W(2 * i + 1) * (int16_t)v->W(2 * i + 1); } } @@ -448,34 +349,25 @@ static inline int abs1(int a) } } #endif -void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - unsigned int val; + int i; - val = 0; - val += abs1(d->B(0) - s->B(0)); - val += abs1(d->B(1) - s->B(1)); - val += abs1(d->B(2) - s->B(2)); - val += abs1(d->B(3) - s->B(3)); - val += abs1(d->B(4) - s->B(4)); - val += abs1(d->B(5) - s->B(5)); - val += abs1(d->B(6) - s->B(6)); - val += abs1(d->B(7) - s->B(7)); - d->Q(0) = val; -#if SHIFT == 1 - val = 0; - val += abs1(d->B(8) - s->B(8)); - val += abs1(d->B(9) - s->B(9)); - val += abs1(d->B(10) - s->B(10)); - val += abs1(d->B(11) - s->B(11)); - val += abs1(d->B(12) - s->B(12)); - val += abs1(d->B(13) - s->B(13)); - val += abs1(d->B(14) - s->B(14)); - val += abs1(d->B(15) - s->B(15)); - d->Q(1) = val; -#endif + for (i = 0; i < (1 << SHIFT); i++) { + unsigned int val = 0; + val += abs1(v->B(8 * i + 0) - s->B(8 * i + 0)); + val += abs1(v->B(8 * i + 1) - s->B(8 * i + 1)); + val += abs1(v->B(8 * i + 2) - s->B(8 * i + 2)); + val += abs1(v->B(8 * i + 3) - s->B(8 * i + 3)); + val += abs1(v->B(8 * i + 4) - s->B(8 * i + 4)); + val += abs1(v->B(8 * i + 5) - s->B(8 * i + 5)); + val += abs1(v->B(8 * i + 6) - s->B(8 * i + 6)); + val += abs1(v->B(8 * i + 7) - s->B(8 * i + 7)); + d->Q(i) = val; + } } +#if SHIFT < 2 void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, target_ulong a0) { @@ -487,128 +379,140 @@ void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } } } +#endif -void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val) -{ - d->L(0) = val; - d->L(1) = 0; -#if SHIFT == 1 - d->Q(1) = 0; -#endif -} - -#ifdef TARGET_X86_64 -void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val) -{ - d->Q(0) = val; -#if SHIFT == 1 - d->Q(1) = 0; -#endif -} -#endif +#define SHUFFLE4(F, a, b, offset) do { \ + r0 = a->F((order & 3) + offset); \ + r1 = a->F(((order >> 2) & 3) + offset); \ + r2 = b->F(((order >> 4) & 3) + offset); \ + r3 = b->F(((order >> 6) & 3) + offset); \ + d->F(offset) = r0; \ + d->F(offset + 1) = r1; \ + d->F(offset + 2) = r2; \ + d->F(offset + 3) = r3; \ + } while (0) #if SHIFT == 0 void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) { - Reg r; + uint16_t r0, r1, r2, r3; - r.W(0) = s->W(order & 3); - r.W(1) = s->W((order >> 2) & 3); - r.W(2) = s->W((order >> 4) & 3); - r.W(3) = s->W((order >> 6) & 3); - *d = r; + SHUFFLE4(W, s, s, 0); } #else -void helper_shufps(Reg *d, Reg *s, int order) +void glue(helper_shufps, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) { - Reg r; + uint32_t r0, r1, r2, r3; + int i; - r.L(0) = d->L(order & 3); - r.L(1) = d->L((order >> 2) & 3); - r.L(2) = s->L((order >> 4) & 3); - r.L(3) = s->L((order >> 6) & 3); - *d = r; + for (i = 0; i < 2 << SHIFT; i += 4) { + SHUFFLE4(L, v, s, i); + } } -void helper_shufpd(Reg *d, Reg *s, int order) +void glue(helper_shufpd, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) { - Reg r; + uint64_t r0, r1; + int i; - r.Q(0) = d->Q(order & 1); - r.Q(1) = s->Q((order >> 1) & 1); - *d = r; + for (i = 0; i < 1 << SHIFT; i += 2) { + r0 = v->Q(((order & 1) & 1) + i); + r1 = s->Q(((order >> 1) & 1) + i); + d->Q(i) = r0; + d->Q(i + 1) = r1; + order >>= 2; + } } void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) { - Reg r; + uint32_t r0, r1, r2, r3; + int i; - r.L(0) = s->L(order & 3); - r.L(1) = s->L((order >> 2) & 3); - r.L(2) = s->L((order >> 4) & 3); - r.L(3) = s->L((order >> 6) & 3); - *d = r; + for (i = 0; i < 2 << SHIFT; i += 4) { + SHUFFLE4(L, s, s, i); + } } void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) { - Reg r; + uint16_t r0, r1, r2, r3; + int i, j; - r.W(0) = s->W(order & 3); - r.W(1) = s->W((order >> 2) & 3); - r.W(2) = s->W((order >> 4) & 3); - r.W(3) = s->W((order >> 6) & 3); - r.Q(1) = s->Q(1); - *d = r; + for (i = 0, j = 1; j < 1 << SHIFT; i += 8, j += 2) { + SHUFFLE4(W, s, s, i); + d->Q(j) = s->Q(j); + } } void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) { - Reg r; + uint16_t r0, r1, r2, r3; + int i, j; - r.Q(0) = s->Q(0); - r.W(4) = s->W(4 + (order & 3)); - r.W(5) = s->W(4 + ((order >> 2) & 3)); - r.W(6) = s->W(4 + ((order >> 4) & 3)); - r.W(7) = s->W(4 + ((order >> 6) & 3)); - *d = r; + for (i = 4, j = 0; j < 1 << SHIFT; i += 8, j += 2) { + d->Q(j) = s->Q(j); + SHUFFLE4(W, s, s, i); + } } #endif -#if SHIFT == 1 +#if SHIFT >= 1 /* FPU ops */ /* XXX: not accurate */ -#define SSE_HELPER_S(name, F) \ - void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ +#define SSE_HELPER_P(name, F) \ + void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ + Reg *d, Reg *v, Reg *s) \ { \ - d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ - d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ - d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ - d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ + int i; \ + for (i = 0; i < 2 << SHIFT; i++) { \ + d->ZMM_S(i) = F(32, v->ZMM_S(i), s->ZMM_S(i)); \ + } \ } \ \ - void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ + void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ + Reg *d, Reg *v, Reg *s) \ { \ - d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ - } \ - \ - void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ - d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ - } \ - \ - void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ + int i; \ + for (i = 0; i < 1 << SHIFT; i++) { \ + d->ZMM_D(i) = F(64, v->ZMM_D(i), s->ZMM_D(i)); \ + } \ } +#if SHIFT == 1 + +#define SSE_HELPER_S(name, F) \ + SSE_HELPER_P(name, F) \ + \ + void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ + { \ + int i; \ + d->ZMM_S(0) = F(32, v->ZMM_S(0), s->ZMM_S(0)); \ + for (i = 1; i < 2 << SHIFT; i++) { \ + d->ZMM_L(i) = v->ZMM_L(i); \ + } \ + } \ + \ + void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ + { \ + int i; \ + d->ZMM_D(0) = F(64, v->ZMM_D(0), s->ZMM_D(0)); \ + for (i = 1; i < 1 << SHIFT; i++) { \ + d->ZMM_Q(i) = v->ZMM_Q(i); \ + } \ + } + +#else + +#define SSE_HELPER_S(name, F) SSE_HELPER_P(name, F) + +#endif + #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) -#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status) /* Note that the choice of comparison op here is important to get the * special cases right: for min and max Intel specifies that (-0,0), @@ -625,56 +529,131 @@ SSE_HELPER_S(mul, FPU_MUL) SSE_HELPER_S(div, FPU_DIV) SSE_HELPER_S(min, FPU_MIN) SSE_HELPER_S(max, FPU_MAX) -SSE_HELPER_S(sqrt, FPU_SQRT) +void glue(helper_sqrtps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_S(i) = float32_sqrt(s->ZMM_S(i), &env->sse_status); + } +} + +void glue(helper_sqrtpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + for (i = 0; i < 1 << SHIFT; i++) { + d->ZMM_D(i) = float64_sqrt(s->ZMM_D(i), &env->sse_status); + } +} + +#if SHIFT == 1 +void helper_sqrtss(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + d->ZMM_S(0) = float32_sqrt(s->ZMM_S(0), &env->sse_status); + for (i = 1; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = v->ZMM_L(i); + } +} + +void helper_sqrtsd(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + d->ZMM_D(0) = float64_sqrt(s->ZMM_D(0), &env->sse_status); + for (i = 1; i < 1 << SHIFT; i++) { + d->ZMM_Q(i) = v->ZMM_Q(i); + } +} +#endif /* float to float conversions */ -void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_cvtps2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - float32 s0, s1; - - s0 = s->ZMM_S(0); - s1 = s->ZMM_S(1); - d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status); - d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status); + int i; + for (i = 1 << SHIFT; --i >= 0; ) { + d->ZMM_D(i) = float32_to_float64(s->ZMM_S(i), &env->sse_status); + } } -void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_cvtpd2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); - d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status); - d->Q(1) = 0; + int i; + for (i = 0; i < 1 << SHIFT; i++) { + d->ZMM_S(i) = float64_to_float32(s->ZMM_D(i), &env->sse_status); + } + for (i >>= 1; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } -void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s) +#if SHIFT >= 1 +void glue(helper_cvtph2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { + int i; + + for (i = 2 << SHIFT; --i >= 0; ) { + d->ZMM_S(i) = float16_to_float32(s->ZMM_H(i), true, &env->sse_status); + } +} + +void glue(helper_cvtps2ph, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, int mode) +{ + int i; + FloatRoundMode prev_rounding_mode = env->sse_status.float_rounding_mode; + if (!(mode & (1 << 2))) { + set_x86_rounding_mode(mode & 3, &env->sse_status); + } + + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_H(i) = float32_to_float16(s->ZMM_S(i), true, &env->sse_status); + } + for (i >>= 2; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } + + env->sse_status.float_rounding_mode = prev_rounding_mode; +} +#endif + +#if SHIFT == 1 +void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status); + for (i = 1; i < 1 << SHIFT; i++) { + d->ZMM_Q(i) = v->ZMM_Q(i); + } } -void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s) +void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) { + int i; d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); + for (i = 1; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = v->ZMM_L(i); + } } +#endif /* integer to float */ -void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_cvtdq2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status); - d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status); - d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status); - d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status); + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_S(i) = int32_to_float32(s->ZMM_L(i), &env->sse_status); + } } -void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_cvtdq2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - int32_t l0, l1; - - l0 = (int32_t)s->ZMM_L(0); - l1 = (int32_t)s->ZMM_L(1); - d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status); - d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status); + int i; + for (i = 1 << SHIFT; --i >= 0; ) { + int32_t l = s->ZMM_L(i); + d->ZMM_D(i) = int32_to_float64(l, &env->sse_status); + } } +#if SHIFT == 1 void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s) { d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); @@ -709,8 +688,11 @@ void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) } #endif +#endif + /* float to integer */ +#if SHIFT == 1 /* * x86 mandates that we return the indefinite integer value for the result * of any float-to-integer conversion that raises the 'invalid' exception. @@ -741,22 +723,28 @@ WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) +#endif -void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_cvtps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); - d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); - d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status); - d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status); + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = x86_float32_to_int32(s->ZMM_S(i), &env->sse_status); + } } -void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_cvtpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); - d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); - d->ZMM_Q(1) = 0; + int i; + for (i = 0; i < 1 << SHIFT; i++) { + d->ZMM_L(i) = x86_float64_to_int32(s->ZMM_D(i), &env->sse_status); + } + for (i >>= 1; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } +#if SHIFT == 1 void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); @@ -790,23 +778,31 @@ int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); } #endif +#endif /* float to integer truncated */ -void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_cvttps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); - d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); - d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status); - d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status); + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = x86_float32_to_int32_round_to_zero(s->ZMM_S(i), + &env->sse_status); + } } -void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_cvttpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); - d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); - d->ZMM_Q(1) = 0; + int i; + for (i = 0; i < 1 << SHIFT; i++) { + d->ZMM_L(i) = x86_float64_to_int32_round_to_zero(s->ZMM_D(i), + &env->sse_status); + } + for (i >>= 1; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + } } +#if SHIFT == 1 void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); @@ -840,51 +836,59 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); } #endif +#endif -void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); - d->ZMM_S(0) = float32_div(float32_one, - float32_sqrt(s->ZMM_S(0), &env->sse_status), - &env->sse_status); - d->ZMM_S(1) = float32_div(float32_one, - float32_sqrt(s->ZMM_S(1), &env->sse_status), - &env->sse_status); - d->ZMM_S(2) = float32_div(float32_one, - float32_sqrt(s->ZMM_S(2), &env->sse_status), - &env->sse_status); - d->ZMM_S(3) = float32_div(float32_one, - float32_sqrt(s->ZMM_S(3), &env->sse_status), - &env->sse_status); + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_S(i) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(i), &env->sse_status), + &env->sse_status); + } set_float_exception_flags(old_flags, &env->sse_status); } -void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s) +#if SHIFT == 1 +void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int i; d->ZMM_S(0) = float32_div(float32_one, float32_sqrt(s->ZMM_S(0), &env->sse_status), &env->sse_status); set_float_exception_flags(old_flags, &env->sse_status); + for (i = 1; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = v->ZMM_L(i); + } } +#endif -void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); - d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); - d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status); - d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status); - d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status); + int i; + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status); + } set_float_exception_flags(old_flags, &env->sse_status); } -void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s) +#if SHIFT == 1 +void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int i; d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); + for (i = 1; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = v->ZMM_L(i); + } set_float_exception_flags(old_flags, &env->sse_status); } +#endif +#if SHIFT == 1 static inline uint64_t helper_extrq(uint64_t src, int shift, int len) { uint64_t mask; @@ -899,7 +903,7 @@ static inline uint64_t helper_extrq(uint64_t src, int shift, int len) void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0)); + d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1) & 63, s->ZMM_B(0) & 63); } void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) @@ -907,7 +911,7 @@ void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length); } -static inline uint64_t helper_insertq(uint64_t src, int shift, int len) +static inline uint64_t helper_insertq(uint64_t dest, uint64_t src, int shift, int len) { uint64_t mask; @@ -916,125 +920,184 @@ static inline uint64_t helper_insertq(uint64_t src, int shift, int len) } else { mask = (1ULL << len) - 1; } - return (src & ~(mask << shift)) | ((src & mask) << shift); + return (dest & ~(mask << shift)) | ((src & mask) << shift); } void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8)); + d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), s->ZMM_B(9) & 63, s->ZMM_B(8) & 63); } -void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length) +void helper_insertq_i(CPUX86State *env, ZMMReg *d, ZMMReg *s, int index, int length) { - d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length); + d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), index, length); +} +#endif + +#define SSE_HELPER_HPS(name, F) \ +void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ +{ \ + float32 r[2 << SHIFT]; \ + int i, j, k; \ + for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ + for (i = j = 0; j < 4; i++, j += 2) { \ + r[i + k] = F(v->ZMM_S(j + k), v->ZMM_S(j + k + 1), &env->sse_status); \ + } \ + for (j = 0; j < 4; i++, j += 2) { \ + r[i + k] = F(s->ZMM_S(j + k), s->ZMM_S(j + k + 1), &env->sse_status); \ + } \ + } \ + for (i = 0; i < 2 << SHIFT; i++) { \ + d->ZMM_S(i) = r[i]; \ + } \ } -void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s) +SSE_HELPER_HPS(haddps, float32_add) +SSE_HELPER_HPS(hsubps, float32_sub) + +#define SSE_HELPER_HPD(name, F) \ +void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ +{ \ + float64 r[1 << SHIFT]; \ + int i, j, k; \ + for (k = 0; k < 1 << SHIFT; k += LANE_WIDTH / 8) { \ + for (i = j = 0; j < 2; i++, j += 2) { \ + r[i + k] = F(v->ZMM_D(j + k), v->ZMM_D(j + k + 1), &env->sse_status); \ + } \ + for (j = 0; j < 2; i++, j += 2) { \ + r[i + k] = F(s->ZMM_D(j + k), s->ZMM_D(j + k + 1), &env->sse_status); \ + } \ + } \ + for (i = 0; i < 1 << SHIFT; i++) { \ + d->ZMM_D(i) = r[i]; \ + } \ +} + +SSE_HELPER_HPD(haddpd, float64_add) +SSE_HELPER_HPD(hsubpd, float64_sub) + +void glue(helper_addsubps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - ZMMReg r; - - r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); - r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); - r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); - r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); - *d = r; + int i; + for (i = 0; i < 2 << SHIFT; i += 2) { + d->ZMM_S(i) = float32_sub(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); + d->ZMM_S(i+1) = float32_add(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); + } } -void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) +void glue(helper_addsubpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - ZMMReg r; - - r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); - r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); - *d = r; + int i; + for (i = 0; i < 1 << SHIFT; i += 2) { + d->ZMM_D(i) = float64_sub(v->ZMM_D(i), s->ZMM_D(i), &env->sse_status); + d->ZMM_D(i+1) = float64_add(v->ZMM_D(i+1), s->ZMM_D(i+1), &env->sse_status); + } } -void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) -{ - ZMMReg r; - - r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); - r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); - r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); - r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); - *d = r; -} - -void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) -{ - ZMMReg r; - - r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); - r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); - *d = r; -} - -void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) -{ - d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status); - d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status); - d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status); - d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status); -} - -void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) -{ - d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status); - d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status); -} - -/* XXX: unordered */ -#define SSE_HELPER_CMP(name, F) \ - void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ +#define SSE_HELPER_CMP_P(name, F, C) \ + void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ + Reg *d, Reg *v, Reg *s) \ { \ - d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ - d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ - d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ - d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ + int i; \ + for (i = 0; i < 2 << SHIFT; i++) { \ + d->ZMM_L(i) = C(F(32, v->ZMM_S(i), s->ZMM_S(i))) ? -1 : 0; \ + } \ } \ \ - void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ + void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ + Reg *d, Reg *v, Reg *s) \ { \ - d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ - } \ - \ - void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ - d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ - } \ - \ - void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ - { \ - d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ + int i; \ + for (i = 0; i < 1 << SHIFT; i++) { \ + d->ZMM_Q(i) = C(F(64, v->ZMM_D(i), s->ZMM_D(i))) ? -1 : 0; \ + } \ } -#define FPU_CMPEQ(size, a, b) \ - (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0) -#define FPU_CMPLT(size, a, b) \ - (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0) -#define FPU_CMPLE(size, a, b) \ - (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0) -#define FPU_CMPUNORD(size, a, b) \ - (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0) -#define FPU_CMPNEQ(size, a, b) \ - (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1) -#define FPU_CMPNLT(size, a, b) \ - (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1) -#define FPU_CMPNLE(size, a, b) \ - (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1) -#define FPU_CMPORD(size, a, b) \ - (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1) +#if SHIFT == 1 +#define SSE_HELPER_CMP(name, F, C) \ + SSE_HELPER_CMP_P(name, F, C) \ + void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ + { \ + int i; \ + d->ZMM_L(0) = C(F(32, v->ZMM_S(0), s->ZMM_S(0))) ? -1 : 0; \ + for (i = 1; i < 2 << SHIFT; i++) { \ + d->ZMM_L(i) = v->ZMM_L(i); \ + } \ + } \ + \ + void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ + { \ + int i; \ + d->ZMM_Q(0) = C(F(64, v->ZMM_D(0), s->ZMM_D(0))) ? -1 : 0; \ + for (i = 1; i < 1 << SHIFT; i++) { \ + d->ZMM_Q(i) = v->ZMM_Q(i); \ + } \ + } -SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) -SSE_HELPER_CMP(cmplt, FPU_CMPLT) -SSE_HELPER_CMP(cmple, FPU_CMPLE) -SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) -SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) -SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) -SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) -SSE_HELPER_CMP(cmpord, FPU_CMPORD) +static inline bool FPU_EQU(FloatRelation x) +{ + return (x == float_relation_equal || x == float_relation_unordered); +} +static inline bool FPU_GE(FloatRelation x) +{ + return (x == float_relation_equal || x == float_relation_greater); +} +#define FPU_EQ(x) (x == float_relation_equal) +#define FPU_LT(x) (x == float_relation_less) +#define FPU_LE(x) (x <= float_relation_equal) +#define FPU_GT(x) (x == float_relation_greater) +#define FPU_UNORD(x) (x == float_relation_unordered) +/* We must make sure we evaluate the argument in case it is a signalling NAN */ +#define FPU_FALSE(x) (x == float_relation_equal && 0) +#define FPU_CMPQ(size, a, b) \ + float ## size ## _compare_quiet(a, b, &env->sse_status) +#define FPU_CMPS(size, a, b) \ + float ## size ## _compare(a, b, &env->sse_status) + +#else +#define SSE_HELPER_CMP(name, F, C) SSE_HELPER_CMP_P(name, F, C) +#endif + +SSE_HELPER_CMP(cmpeq, FPU_CMPQ, FPU_EQ) +SSE_HELPER_CMP(cmplt, FPU_CMPS, FPU_LT) +SSE_HELPER_CMP(cmple, FPU_CMPS, FPU_LE) +SSE_HELPER_CMP(cmpunord, FPU_CMPQ, FPU_UNORD) +SSE_HELPER_CMP(cmpneq, FPU_CMPQ, !FPU_EQ) +SSE_HELPER_CMP(cmpnlt, FPU_CMPS, !FPU_LT) +SSE_HELPER_CMP(cmpnle, FPU_CMPS, !FPU_LE) +SSE_HELPER_CMP(cmpord, FPU_CMPQ, !FPU_UNORD) + +SSE_HELPER_CMP(cmpequ, FPU_CMPQ, FPU_EQU) +SSE_HELPER_CMP(cmpnge, FPU_CMPS, !FPU_GE) +SSE_HELPER_CMP(cmpngt, FPU_CMPS, !FPU_GT) +SSE_HELPER_CMP(cmpfalse, FPU_CMPQ, FPU_FALSE) +SSE_HELPER_CMP(cmpnequ, FPU_CMPQ, !FPU_EQU) +SSE_HELPER_CMP(cmpge, FPU_CMPS, FPU_GE) +SSE_HELPER_CMP(cmpgt, FPU_CMPS, FPU_GT) +SSE_HELPER_CMP(cmptrue, FPU_CMPQ, !FPU_FALSE) + +SSE_HELPER_CMP(cmpeqs, FPU_CMPS, FPU_EQ) +SSE_HELPER_CMP(cmpltq, FPU_CMPQ, FPU_LT) +SSE_HELPER_CMP(cmpleq, FPU_CMPQ, FPU_LE) +SSE_HELPER_CMP(cmpunords, FPU_CMPS, FPU_UNORD) +SSE_HELPER_CMP(cmpneqq, FPU_CMPS, !FPU_EQ) +SSE_HELPER_CMP(cmpnltq, FPU_CMPQ, !FPU_LT) +SSE_HELPER_CMP(cmpnleq, FPU_CMPQ, !FPU_LE) +SSE_HELPER_CMP(cmpords, FPU_CMPS, !FPU_UNORD) + +SSE_HELPER_CMP(cmpequs, FPU_CMPS, FPU_EQU) +SSE_HELPER_CMP(cmpngeq, FPU_CMPQ, !FPU_GE) +SSE_HELPER_CMP(cmpngtq, FPU_CMPQ, !FPU_GT) +SSE_HELPER_CMP(cmpfalses, FPU_CMPS, FPU_FALSE) +SSE_HELPER_CMP(cmpnequs, FPU_CMPS, !FPU_EQU) +SSE_HELPER_CMP(cmpgeq, FPU_CMPQ, FPU_GE) +SSE_HELPER_CMP(cmpgtq, FPU_CMPQ, FPU_GT) +SSE_HELPER_CMP(cmptrues, FPU_CMPS, !FPU_FALSE) + +#undef SSE_HELPER_CMP + +#if SHIFT == 1 static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) @@ -1080,205 +1143,154 @@ void helper_comisd(CPUX86State *env, Reg *d, Reg *s) ret = float64_compare(d0, d1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } +#endif -uint32_t helper_movmskps(CPUX86State *env, Reg *s) +uint32_t glue(helper_movmskps, SUFFIX)(CPUX86State *env, Reg *s) { - int b0, b1, b2, b3; + uint32_t mask; + int i; - b0 = s->ZMM_L(0) >> 31; - b1 = s->ZMM_L(1) >> 31; - b2 = s->ZMM_L(2) >> 31; - b3 = s->ZMM_L(3) >> 31; - return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3); + mask = 0; + for (i = 0; i < 2 << SHIFT; i++) { + mask |= (s->ZMM_L(i) >> (31 - i)) & (1 << i); + } + return mask; } -uint32_t helper_movmskpd(CPUX86State *env, Reg *s) +uint32_t glue(helper_movmskpd, SUFFIX)(CPUX86State *env, Reg *s) { - int b0, b1; + uint32_t mask; + int i; - b0 = s->ZMM_L(1) >> 31; - b1 = s->ZMM_L(3) >> 31; - return b0 | (b1 << 1); + mask = 0; + for (i = 0; i < 1 << SHIFT; i++) { + mask |= (s->ZMM_Q(i) >> (63 - i)) & (1 << i); + } + return mask; } #endif -uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s) -{ - uint32_t val; - - val = 0; - val |= (s->B(0) >> 7); - val |= (s->B(1) >> 6) & 0x02; - val |= (s->B(2) >> 5) & 0x04; - val |= (s->B(3) >> 4) & 0x08; - val |= (s->B(4) >> 3) & 0x10; - val |= (s->B(5) >> 2) & 0x20; - val |= (s->B(6) >> 1) & 0x40; - val |= (s->B(7)) & 0x80; -#if SHIFT == 1 - val |= (s->B(8) << 1) & 0x0100; - val |= (s->B(9) << 2) & 0x0200; - val |= (s->B(10) << 3) & 0x0400; - val |= (s->B(11) << 4) & 0x0800; - val |= (s->B(12) << 5) & 0x1000; - val |= (s->B(13) << 6) & 0x2000; - val |= (s->B(14) << 7) & 0x4000; - val |= (s->B(15) << 8) & 0x8000; -#endif - return val; +#define PACK_HELPER_B(name, F) \ +void glue(helper_pack ## name, SUFFIX)(CPUX86State *env, \ + Reg *d, Reg *v, Reg *s) \ +{ \ + uint8_t r[PACK_WIDTH * 2]; \ + int j, k; \ + for (j = 0; j < 4 << SHIFT; j += PACK_WIDTH) { \ + for (k = 0; k < PACK_WIDTH; k++) { \ + r[k] = F((int16_t)v->W(j + k)); \ + } \ + for (k = 0; k < PACK_WIDTH; k++) { \ + r[PACK_WIDTH + k] = F((int16_t)s->W(j + k)); \ + } \ + for (k = 0; k < PACK_WIDTH * 2; k++) { \ + d->B(2 * j + k) = r[k]; \ + } \ + } \ } -void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +PACK_HELPER_B(sswb, satsb) +PACK_HELPER_B(uswb, satub) + +void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - Reg r; + uint16_t r[PACK_WIDTH]; + int j, k; - r.B(0) = satsb((int16_t)d->W(0)); - r.B(1) = satsb((int16_t)d->W(1)); - r.B(2) = satsb((int16_t)d->W(2)); - r.B(3) = satsb((int16_t)d->W(3)); -#if SHIFT == 1 - r.B(4) = satsb((int16_t)d->W(4)); - r.B(5) = satsb((int16_t)d->W(5)); - r.B(6) = satsb((int16_t)d->W(6)); - r.B(7) = satsb((int16_t)d->W(7)); -#endif - r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0)); - r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1)); - r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2)); - r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3)); -#if SHIFT == 1 - r.B(12) = satsb((int16_t)s->W(4)); - r.B(13) = satsb((int16_t)s->W(5)); - r.B(14) = satsb((int16_t)s->W(6)); - r.B(15) = satsb((int16_t)s->W(7)); -#endif - *d = r; -} - -void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - Reg r; - - r.B(0) = satub((int16_t)d->W(0)); - r.B(1) = satub((int16_t)d->W(1)); - r.B(2) = satub((int16_t)d->W(2)); - r.B(3) = satub((int16_t)d->W(3)); -#if SHIFT == 1 - r.B(4) = satub((int16_t)d->W(4)); - r.B(5) = satub((int16_t)d->W(5)); - r.B(6) = satub((int16_t)d->W(6)); - r.B(7) = satub((int16_t)d->W(7)); -#endif - r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0)); - r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1)); - r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2)); - r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3)); -#if SHIFT == 1 - r.B(12) = satub((int16_t)s->W(4)); - r.B(13) = satub((int16_t)s->W(5)); - r.B(14) = satub((int16_t)s->W(6)); - r.B(15) = satub((int16_t)s->W(7)); -#endif - *d = r; -} - -void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - Reg r; - - r.W(0) = satsw(d->L(0)); - r.W(1) = satsw(d->L(1)); -#if SHIFT == 1 - r.W(2) = satsw(d->L(2)); - r.W(3) = satsw(d->L(3)); -#endif - r.W((2 << SHIFT) + 0) = satsw(s->L(0)); - r.W((2 << SHIFT) + 1) = satsw(s->L(1)); -#if SHIFT == 1 - r.W(6) = satsw(s->L(2)); - r.W(7) = satsw(s->L(3)); -#endif - *d = r; + for (j = 0; j < 2 << SHIFT; j += PACK_WIDTH / 2) { + for (k = 0; k < PACK_WIDTH / 2; k++) { + r[k] = satsw(v->L(j + k)); + } + for (k = 0; k < PACK_WIDTH / 2; k++) { + r[PACK_WIDTH / 2 + k] = satsw(s->L(j + k)); + } + for (k = 0; k < PACK_WIDTH; k++) { + d->W(2 * j + k) = r[k]; + } + } } #define UNPCK_OP(base_name, base) \ \ void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\ - Reg *d, Reg *s) \ + Reg *d, Reg *v, Reg *s) \ { \ - Reg r; \ + uint8_t r[PACK_WIDTH * 2]; \ + int j, i; \ \ - r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ - r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ - r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ - r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ - r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ - r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ - r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ - r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ - XMM_ONLY( \ - r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ - r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ - r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ - r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ - r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ - r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ - r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ - r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ - ) \ - *d = r; \ + for (j = 0; j < 8 << SHIFT; ) { \ + int k = j + base * PACK_WIDTH; \ + for (i = 0; i < PACK_WIDTH; i++) { \ + r[2 * i] = v->B(k + i); \ + r[2 * i + 1] = s->B(k + i); \ + } \ + for (i = 0; i < PACK_WIDTH * 2; i++, j++) { \ + d->B(j) = r[i]; \ + } \ + } \ } \ \ void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\ - Reg *d, Reg *s) \ + Reg *d, Reg *v, Reg *s) \ { \ - Reg r; \ + uint16_t r[PACK_WIDTH]; \ + int j, i; \ \ - r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ - r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ - r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ - r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ - XMM_ONLY( \ - r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ - r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ - r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ - r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ - ) \ - *d = r; \ + for (j = 0; j < 4 << SHIFT; ) { \ + int k = j + base * PACK_WIDTH / 2; \ + for (i = 0; i < PACK_WIDTH / 2; i++) { \ + r[2 * i] = v->W(k + i); \ + r[2 * i + 1] = s->W(k + i); \ + } \ + for (i = 0; i < PACK_WIDTH; i++, j++) { \ + d->W(j) = r[i]; \ + } \ + } \ } \ \ void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\ - Reg *d, Reg *s) \ + Reg *d, Reg *v, Reg *s) \ { \ - Reg r; \ + uint32_t r[PACK_WIDTH / 2]; \ + int j, i; \ \ - r.L(0) = d->L((base << SHIFT) + 0); \ - r.L(1) = s->L((base << SHIFT) + 0); \ - XMM_ONLY( \ - r.L(2) = d->L((base << SHIFT) + 1); \ - r.L(3) = s->L((base << SHIFT) + 1); \ - ) \ - *d = r; \ + for (j = 0; j < 2 << SHIFT; ) { \ + int k = j + base * PACK_WIDTH / 4; \ + for (i = 0; i < PACK_WIDTH / 4; i++) { \ + r[2 * i] = v->L(k + i); \ + r[2 * i + 1] = s->L(k + i); \ + } \ + for (i = 0; i < PACK_WIDTH / 2; i++, j++) { \ + d->L(j) = r[i]; \ + } \ + } \ } \ \ XMM_ONLY( \ - void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \ - *env, \ - Reg *d, \ - Reg *s) \ + void glue(helper_punpck ## base_name ## qdq, SUFFIX)( \ + CPUX86State *env, Reg *d, Reg *v, Reg *s) \ { \ - Reg r; \ + uint64_t r[2]; \ + int i; \ \ - r.Q(0) = d->Q(base); \ - r.Q(1) = s->Q(base); \ - *d = r; \ + for (i = 0; i < 1 << SHIFT; i += 2) { \ + r[0] = v->Q(base + i); \ + r[1] = s->Q(base + i); \ + d->Q(i) = r[0]; \ + d->Q(i + 1) = r[1]; \ + } \ } \ ) UNPCK_OP(l, 0) UNPCK_OP(h, 1) +#undef PACK_WIDTH +#undef PACK_HELPER_B +#undef UNPCK_OP + + /* 3DNow! float ops */ #if SHIFT == 0 void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s) @@ -1309,11 +1321,11 @@ void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s) void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s) { - MMXReg r; + float32 r; - r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); - r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); - *d = r; + r = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + d->MMX_S(0) = r; } void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s) @@ -1374,20 +1386,20 @@ void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s) void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s) { - MMXReg r; + float32 r; - r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); - r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); - *d = r; + r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + d->MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + d->MMX_S(0) = r; } void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s) { - MMXReg r; + float32 r; - r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); - r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); - *d = r; + r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + d->MMX_S(0) = r; } void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s) @@ -1420,132 +1432,94 @@ void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s) void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s) { - MMXReg r; + uint32_t r; - r.MMX_L(0) = s->MMX_L(1); - r.MMX_L(1) = s->MMX_L(0); - *d = r; + r = s->MMX_L(0); + d->MMX_L(0) = s->MMX_L(1); + d->MMX_L(1) = r; } #endif /* SSSE3 op helpers */ -void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { int i; - Reg r; +#if SHIFT == 0 + uint8_t r[8]; - for (i = 0; i < (8 << SHIFT); i++) { - r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1))); + for (i = 0; i < 8; i++) { + r[i] = (s->B(i) & 0x80) ? 0 : (v->B(s->B(i) & 7)); } + for (i = 0; i < 8; i++) { + d->B(i) = r[i]; + } +#else + uint8_t r[8 << SHIFT]; - *d = r; -} - -void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - - Reg r; - - r.W(0) = (int16_t)d->W(0) + (int16_t)d->W(1); - r.W(1) = (int16_t)d->W(2) + (int16_t)d->W(3); - XMM_ONLY(r.W(2) = (int16_t)d->W(4) + (int16_t)d->W(5)); - XMM_ONLY(r.W(3) = (int16_t)d->W(6) + (int16_t)d->W(7)); - r.W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1); - r.W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3); - XMM_ONLY(r.W(6) = (int16_t)s->W(4) + (int16_t)s->W(5)); - XMM_ONLY(r.W(7) = (int16_t)s->W(6) + (int16_t)s->W(7)); - - *d = r; -} - -void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - Reg r; - - r.L(0) = (int32_t)d->L(0) + (int32_t)d->L(1); - XMM_ONLY(r.L(1) = (int32_t)d->L(2) + (int32_t)d->L(3)); - r.L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1); - XMM_ONLY(r.L(3) = (int32_t)s->L(2) + (int32_t)s->L(3)); - - *d = r; -} - -void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - Reg r; - - r.W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1)); - r.W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3)); - XMM_ONLY(r.W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5))); - XMM_ONLY(r.W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7))); - r.W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1)); - r.W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3)); - XMM_ONLY(r.W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5))); - XMM_ONLY(r.W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7))); - - *d = r; -} - -void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) + - (int8_t)s->B(1) * (uint8_t)d->B(1)); - d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) + - (int8_t)s->B(3) * (uint8_t)d->B(3)); - d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) + - (int8_t)s->B(5) * (uint8_t)d->B(5)); - d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) + - (int8_t)s->B(7) * (uint8_t)d->B(7)); -#if SHIFT == 1 - d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) + - (int8_t)s->B(9) * (uint8_t)d->B(9)); - d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) + - (int8_t)s->B(11) * (uint8_t)d->B(11)); - d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) + - (int8_t)s->B(13) * (uint8_t)d->B(13)); - d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) + - (int8_t)s->B(15) * (uint8_t)d->B(15)); + for (i = 0; i < 8 << SHIFT; i++) { + int j = i & ~0xf; + r[i] = (s->B(i) & 0x80) ? 0 : v->B(j | (s->B(i) & 0xf)); + } + for (i = 0; i < 8 << SHIFT; i++) { + d->B(i) = r[i]; + } #endif } -void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1); - d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3); - XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5)); - XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7)); - d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1); - d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3); - XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5)); - XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7)); +#define SSE_HELPER_HW(name, F) \ +void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ +{ \ + uint16_t r[4 << SHIFT]; \ + int i, j, k; \ + for (k = 0; k < 4 << SHIFT; k += LANE_WIDTH / 2) { \ + for (i = j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ + r[i + k] = F(v->W(j + k), v->W(j + k + 1)); \ + } \ + for (j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ + r[i + k] = F(s->W(j + k), s->W(j + k + 1)); \ + } \ + } \ + for (i = 0; i < 4 << SHIFT; i++) { \ + d->W(i) = r[i]; \ + } \ } -void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1); - XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3)); - d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1); - XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3)); +#define SSE_HELPER_HL(name, F) \ +void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ +{ \ + uint32_t r[2 << SHIFT]; \ + int i, j, k; \ + for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ + for (i = j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ + r[i + k] = F(v->L(j + k), v->L(j + k + 1)); \ + } \ + for (j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ + r[i + k] = F(s->L(j + k), s->L(j + k + 1)); \ + } \ + } \ + for (i = 0; i < 2 << SHIFT; i++) { \ + d->L(i) = r[i]; \ + } \ } -void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) -{ - d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1)); - d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3)); - XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5))); - XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7))); - d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1)); - d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3)); - XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5))); - XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7))); -} +SSE_HELPER_HW(phaddw, FADD) +SSE_HELPER_HW(phsubw, FSUB) +SSE_HELPER_HW(phaddsw, FADDSW) +SSE_HELPER_HW(phsubsw, FSUBSW) +SSE_HELPER_HL(phaddd, FADD) +SSE_HELPER_HL(phsubd, FSUB) -#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x) -#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x) -#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x) -SSE_HELPER_B(helper_pabsb, FABSB) -SSE_HELPER_W(helper_pabsw, FABSW) -SSE_HELPER_L(helper_pabsd, FABSL) +#undef SSE_HELPER_HW +#undef SSE_HELPER_HL + +void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + for (i = 0; i < 4 << SHIFT; i++) { + d->W(i) = satsw((int8_t)s->B(i * 2) * (uint8_t)v->B(i * 2) + + (int8_t)s->B(i * 2 + 1) * (uint8_t)v->B(i * 2 + 1)); + } +} #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) @@ -1557,186 +1531,146 @@ SSE_HELPER_B(helper_psignb, FSIGNB) SSE_HELPER_W(helper_psignw, FSIGNW) SSE_HELPER_L(helper_psignd, FSIGNL) -void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, - int32_t shift) +void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, + uint32_t imm) { - Reg r; + int i; /* XXX could be checked during translation */ - if (shift >= (16 << SHIFT)) { - r.Q(0) = 0; - XMM_ONLY(r.Q(1) = 0); + if (imm >= (SHIFT ? 32 : 16)) { + for (i = 0; i < (1 << SHIFT); i++) { + d->Q(i) = 0; + } } else { - shift <<= 3; + int shift = imm * 8; #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) #if SHIFT == 0 - r.Q(0) = SHR(s->Q(0), shift - 0) | - SHR(d->Q(0), shift - 64); + d->Q(0) = SHR(s->Q(0), shift - 0) | + SHR(v->Q(0), shift - 64); #else - r.Q(0) = SHR(s->Q(0), shift - 0) | - SHR(s->Q(1), shift - 64) | - SHR(d->Q(0), shift - 128) | - SHR(d->Q(1), shift - 192); - r.Q(1) = SHR(s->Q(0), shift + 64) | - SHR(s->Q(1), shift - 0) | - SHR(d->Q(0), shift - 64) | - SHR(d->Q(1), shift - 128); + for (i = 0; i < (1 << SHIFT); i += 2) { + uint64_t r0, r1; + + r0 = SHR(s->Q(i), shift - 0) | + SHR(s->Q(i + 1), shift - 64) | + SHR(v->Q(i), shift - 128) | + SHR(v->Q(i + 1), shift - 192); + r1 = SHR(s->Q(i), shift + 64) | + SHR(s->Q(i + 1), shift - 0) | + SHR(v->Q(i), shift - 64) | + SHR(v->Q(i + 1), shift - 128); + d->Q(i) = r0; + d->Q(i + 1) = r1; + } #endif #undef SHR } - - *d = r; } -#define XMM0 (env->xmm_regs[0]) +#if SHIFT >= 1 -#if SHIFT == 1 #define SSE_HELPER_V(name, elem, num, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ + Reg *m) \ { \ - d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \ - d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \ - if (num > 2) { \ - d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \ - d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \ - if (num > 4) { \ - d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \ - d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \ - d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \ - d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \ - if (num > 8) { \ - d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \ - d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \ - d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \ - d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \ - d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \ - d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \ - d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \ - d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \ - } \ - } \ + int i; \ + for (i = 0; i < num; i++) { \ + d->elem(i) = F(v->elem(i), s->elem(i), m->elem(i)); \ } \ } #define SSE_HELPER_I(name, elem, num, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ + uint32_t imm) \ { \ - d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \ - d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \ - if (num > 2) { \ - d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \ - d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \ - if (num > 4) { \ - d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \ - d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \ - d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \ - d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \ - if (num > 8) { \ - d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \ - d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \ - d->elem(10) = F(d->elem(10), s->elem(10), \ - ((imm >> 10) & 1)); \ - d->elem(11) = F(d->elem(11), s->elem(11), \ - ((imm >> 11) & 1)); \ - d->elem(12) = F(d->elem(12), s->elem(12), \ - ((imm >> 12) & 1)); \ - d->elem(13) = F(d->elem(13), s->elem(13), \ - ((imm >> 13) & 1)); \ - d->elem(14) = F(d->elem(14), s->elem(14), \ - ((imm >> 14) & 1)); \ - d->elem(15) = F(d->elem(15), s->elem(15), \ - ((imm >> 15) & 1)); \ - } \ - } \ + int i; \ + for (i = 0; i < num; i++) { \ + int j = i & 7; \ + d->elem(i) = F(v->elem(i), s->elem(i), (imm >> j) & 1); \ } \ } /* SSE4.1 op helpers */ -#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d) -#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d) -#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d) -SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB) -SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS) -SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD) +#define FBLENDVB(v, s, m) ((m & 0x80) ? s : v) +#define FBLENDVPS(v, s, m) ((m & 0x80000000) ? s : v) +#define FBLENDVPD(v, s, m) ((m & 0x8000000000000000LL) ? s : v) +SSE_HELPER_V(helper_pblendvb, B, 8 << SHIFT, FBLENDVB) +SSE_HELPER_V(helper_blendvps, L, 2 << SHIFT, FBLENDVPS) +SSE_HELPER_V(helper_blendvpd, Q, 1 << SHIFT, FBLENDVPD) void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1)); - uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1)); + uint64_t zf = 0, cf = 0; + int i; + for (i = 0; i < 1 << SHIFT; i++) { + zf |= (s->Q(i) & d->Q(i)); + cf |= (s->Q(i) & ~d->Q(i)); + } CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); } -#define SSE_HELPER_F(name, elem, num, F) \ - void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ - { \ - if (num > 2) { \ - if (num > 4) { \ - d->elem(7) = F(7); \ - d->elem(6) = F(6); \ - d->elem(5) = F(5); \ - d->elem(4) = F(4); \ - } \ - d->elem(3) = F(3); \ - d->elem(2) = F(2); \ - } \ - d->elem(1) = F(1); \ - d->elem(0) = F(0); \ +#define FMOVSLDUP(i) s->L((i) & ~1) +#define FMOVSHDUP(i) s->L((i) | 1) +#define FMOVDLDUP(i) s->Q((i) & ~1) + +#define SSE_HELPER_F(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + int n = num; \ + for (int i = n; --i >= 0; ) { \ + d->elem(i) = F(i); \ + } \ } -SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) -SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B) -SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B) -SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W) -SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W) -SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L) -SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B) -SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B) -SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B) -SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W) -SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W) -SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L) +#if SHIFT > 0 +SSE_HELPER_F(helper_pmovsxbw, W, 4 << SHIFT, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxbd, L, 2 << SHIFT, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxbq, Q, 1 << SHIFT, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxwd, L, 2 << SHIFT, (int16_t) s->W) +SSE_HELPER_F(helper_pmovsxwq, Q, 1 << SHIFT, (int16_t) s->W) +SSE_HELPER_F(helper_pmovsxdq, Q, 1 << SHIFT, (int32_t) s->L) +SSE_HELPER_F(helper_pmovzxbw, W, 4 << SHIFT, s->B) +SSE_HELPER_F(helper_pmovzxbd, L, 2 << SHIFT, s->B) +SSE_HELPER_F(helper_pmovzxbq, Q, 1 << SHIFT, s->B) +SSE_HELPER_F(helper_pmovzxwd, L, 2 << SHIFT, s->W) +SSE_HELPER_F(helper_pmovzxwq, Q, 1 << SHIFT, s->W) +SSE_HELPER_F(helper_pmovzxdq, Q, 1 << SHIFT, s->L) +SSE_HELPER_F(helper_pmovsldup, L, 2 << SHIFT, FMOVSLDUP) +SSE_HELPER_F(helper_pmovshdup, L, 2 << SHIFT, FMOVSHDUP) +SSE_HELPER_F(helper_pmovdldup, Q, 1 << SHIFT, FMOVDLDUP) +#endif -void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0); - d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2); + int i; + + for (i = 0; i < 1 << SHIFT; i++) { + d->Q(i) = (int64_t)(int32_t) v->L(2 * i) * (int32_t) s->L(2 * i); + } } -#define FCMPEQQ(d, s) (d == s ? -1 : 0) -SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) - -void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { - Reg r; + uint16_t r[8]; + int i, j, k; - r.W(0) = satuw((int32_t) d->L(0)); - r.W(1) = satuw((int32_t) d->L(1)); - r.W(2) = satuw((int32_t) d->L(2)); - r.W(3) = satuw((int32_t) d->L(3)); - r.W(4) = satuw((int32_t) s->L(0)); - r.W(5) = satuw((int32_t) s->L(1)); - r.W(6) = satuw((int32_t) s->L(2)); - r.W(7) = satuw((int32_t) s->L(3)); - *d = r; + for (i = 0, j = 0; i <= 2 << SHIFT; i += 8, j += 4) { + r[0] = satuw(v->L(j)); + r[1] = satuw(v->L(j + 1)); + r[2] = satuw(v->L(j + 2)); + r[3] = satuw(v->L(j + 3)); + r[4] = satuw(s->L(j)); + r[5] = satuw(s->L(j + 1)); + r[6] = satuw(s->L(j + 2)); + r[7] = satuw(s->L(j + 3)); + for (k = 0; k < 8; k++) { + d->W(i + k) = r[k]; + } + } } -#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) -#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s) -#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s) -#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s) -SSE_HELPER_B(helper_pminsb, FMINSB) -SSE_HELPER_L(helper_pminsd, FMINSD) -SSE_HELPER_W(helper_pminuw, MIN) -SSE_HELPER_L(helper_pminud, MIN) -SSE_HELPER_B(helper_pmaxsb, FMAXSB) -SSE_HELPER_L(helper_pmaxsd, FMAXSD) -SSE_HELPER_W(helper_pmaxuw, MAX) -SSE_HELPER_L(helper_pmaxud, MAX) - -#define FMULLD(d, s) ((int32_t)d * (int32_t)s) -SSE_HELPER_L(helper_pmulld, FMULLD) - +#if SHIFT == 1 void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int idx = 0; @@ -1768,35 +1702,23 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) d->L(1) = 0; d->Q(1) = 0; } +#endif void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; + int i; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { - switch (mode & 3) { - case 0: - set_float_rounding_mode(float_round_nearest_even, &env->sse_status); - break; - case 1: - set_float_rounding_mode(float_round_down, &env->sse_status); - break; - case 2: - set_float_rounding_mode(float_round_up, &env->sse_status); - break; - case 3: - set_float_rounding_mode(float_round_to_zero, &env->sse_status); - break; - } + set_x86_rounding_mode(mode & 3, &env->sse_status); } - d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); - d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status); - d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status); - d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status); + for (i = 0; i < 2 << SHIFT; i++) { + d->ZMM_S(i) = float32_round_to_int(s->ZMM_S(i), &env->sse_status); + } if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & @@ -1811,27 +1733,16 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, { uint8_t old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; + int i; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { - switch (mode & 3) { - case 0: - set_float_rounding_mode(float_round_nearest_even, &env->sse_status); - break; - case 1: - set_float_rounding_mode(float_round_down, &env->sse_status); - break; - case 2: - set_float_rounding_mode(float_round_up, &env->sse_status); - break; - case 3: - set_float_rounding_mode(float_round_to_zero, &env->sse_status); - break; - } + set_x86_rounding_mode(mode & 3, &env->sse_status); } - d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); - d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status); + for (i = 0; i < 1 << SHIFT; i++) { + d->ZMM_D(i) = float64_round_to_int(s->ZMM_D(i), &env->sse_status); + } if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & @@ -1841,31 +1752,23 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, env->sse_status.float_rounding_mode = prev_rounding_mode; } -void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, +#if SHIFT == 1 +void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, uint32_t mode) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; + int i; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { - switch (mode & 3) { - case 0: - set_float_rounding_mode(float_round_nearest_even, &env->sse_status); - break; - case 1: - set_float_rounding_mode(float_round_down, &env->sse_status); - break; - case 2: - set_float_rounding_mode(float_round_up, &env->sse_status); - break; - case 3: - set_float_rounding_mode(float_round_to_zero, &env->sse_status); - break; - } + set_x86_rounding_mode(mode & 3, &env->sse_status); } d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); + for (i = 1; i < 2 << SHIFT; i++) { + d->ZMM_L(i) = v->ZMM_L(i); + } if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & @@ -1875,31 +1778,22 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, env->sse_status.float_rounding_mode = prev_rounding_mode; } -void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, +void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, uint32_t mode) { uint8_t old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; + int i; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { - switch (mode & 3) { - case 0: - set_float_rounding_mode(float_round_nearest_even, &env->sse_status); - break; - case 1: - set_float_rounding_mode(float_round_down, &env->sse_status); - break; - case 2: - set_float_rounding_mode(float_round_up, &env->sse_status); - break; - case 3: - set_float_rounding_mode(float_round_to_zero, &env->sse_status); - break; - } + set_x86_rounding_mode(mode & 3, &env->sse_status); } d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); + for (i = 1; i < 1 << SHIFT; i++) { + d->ZMM_Q(i) = v->ZMM_Q(i); + } if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & @@ -1908,110 +1802,122 @@ void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } env->sse_status.float_rounding_mode = prev_rounding_mode; } +#endif -#define FBLENDP(d, s, m) (m ? s : d) -SSE_HELPER_I(helper_blendps, L, 4, FBLENDP) -SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) -SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) +#define FBLENDP(v, s, m) (m ? s : v) +SSE_HELPER_I(helper_blendps, L, 2 << SHIFT, FBLENDP) +SSE_HELPER_I(helper_blendpd, Q, 1 << SHIFT, FBLENDP) +SSE_HELPER_I(helper_pblendw, W, 4 << SHIFT, FBLENDP) -void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, + uint32_t mask) { - float32 iresult = float32_zero; + float32 prod1, prod2, temp2, temp3, temp4; + int i; - if (mask & (1 << 4)) { - iresult = float32_add(iresult, - float32_mul(d->ZMM_S(0), s->ZMM_S(0), - &env->sse_status), - &env->sse_status); + for (i = 0; i < 2 << SHIFT; i += 4) { + /* + * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D + * to correctly round the intermediate results + */ + if (mask & (1 << 4)) { + prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); + } else { + prod1 = float32_zero; + } + if (mask & (1 << 5)) { + prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); + } else { + prod2 = float32_zero; + } + temp2 = float32_add(prod1, prod2, &env->sse_status); + if (mask & (1 << 6)) { + prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status); + } else { + prod1 = float32_zero; + } + if (mask & (1 << 7)) { + prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status); + } else { + prod2 = float32_zero; + } + temp3 = float32_add(prod1, prod2, &env->sse_status); + temp4 = float32_add(temp2, temp3, &env->sse_status); + + d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero; + d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero; + d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero; + d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero; } - if (mask & (1 << 5)) { - iresult = float32_add(iresult, - float32_mul(d->ZMM_S(1), s->ZMM_S(1), - &env->sse_status), - &env->sse_status); - } - if (mask & (1 << 6)) { - iresult = float32_add(iresult, - float32_mul(d->ZMM_S(2), s->ZMM_S(2), - &env->sse_status), - &env->sse_status); - } - if (mask & (1 << 7)) { - iresult = float32_add(iresult, - float32_mul(d->ZMM_S(3), s->ZMM_S(3), - &env->sse_status), - &env->sse_status); - } - d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; - d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; - d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; - d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; } -void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +#if SHIFT == 1 +/* Oddly, there is no ymm version of dppd */ +void glue(helper_dppd, SUFFIX)(CPUX86State *env, + Reg *d, Reg *v, Reg *s, uint32_t mask) { - float64 iresult = float64_zero; + float64 prod1, prod2, temp2; if (mask & (1 << 4)) { - iresult = float64_add(iresult, - float64_mul(d->ZMM_D(0), s->ZMM_D(0), - &env->sse_status), - &env->sse_status); + prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status); + } else { + prod1 = float64_zero; } if (mask & (1 << 5)) { - iresult = float64_add(iresult, - float64_mul(d->ZMM_D(1), s->ZMM_D(1), - &env->sse_status), - &env->sse_status); + prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status); + } else { + prod2 = float64_zero; } - d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; - d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; + temp2 = float64_add(prod1, prod2, &env->sse_status); + d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero; + d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero; } +#endif -void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, +void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, uint32_t offset) { - int s0 = (offset & 3) << 2; - int d0 = (offset & 4) << 0; - int i; - Reg r; + int i, j; + uint16_t r[8]; - for (i = 0; i < 8; i++, d0++) { - r.W(i) = 0; - r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0)); - r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1)); - r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2)); - r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3)); + for (j = 0; j < 4 << SHIFT; ) { + int s0 = (j * 2) + ((offset & 3) << 2); + int d0 = (j * 2) + ((offset & 4) << 0); + for (i = 0; i < LANE_WIDTH / 2; i++, d0++) { + r[i] = 0; + r[i] += abs1(v->B(d0 + 0) - s->B(s0 + 0)); + r[i] += abs1(v->B(d0 + 1) - s->B(s0 + 1)); + r[i] += abs1(v->B(d0 + 2) - s->B(s0 + 2)); + r[i] += abs1(v->B(d0 + 3) - s->B(s0 + 3)); + } + for (i = 0; i < LANE_WIDTH / 2; i++, j++) { + d->W(j) = r[i]; + } + offset >>= 3; } - - *d = r; } /* SSE4.2 op helpers */ -#define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0) -SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ) - +#if SHIFT == 1 static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) { - int val; + target_long val, limit; /* Presence of REX.W is indicated by a bit higher than 7 set */ if (ctrl >> 8) { - val = abs1((int64_t)env->regs[reg]); + val = (target_long)env->regs[reg]; } else { - val = abs1((int32_t)env->regs[reg]); + val = (int32_t)env->regs[reg]; } - if (ctrl & 1) { - if (val > 8) { - return 8; - } + limit = 8; } else { - if (val > 16) { - return 16; - } + limit = 16; } - return val; + if ((val > limit) || (val < -limit)) { + return limit; + } + return abs1(val); } static inline int pcmp_ilen(Reg *r, uint8_t ctrl) @@ -2047,7 +1953,7 @@ static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) } static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, - int8_t ctrl, int valids, int validd) + uint8_t ctrl, int valids, int validd) { unsigned int res = 0; int v; @@ -2213,14 +2119,16 @@ target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) return crc; } -void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, - uint32_t ctrl) +#endif + +#if SHIFT == 1 +static void clmulq(uint64_t *dest_l, uint64_t *dest_h, + uint64_t a, uint64_t b) { - uint64_t ah, al, b, resh, resl; + uint64_t al, ah, resh, resl; ah = 0; - al = d->Q((ctrl & 1) != 0); - b = s->Q((ctrl & 16) != 0); + al = a; resh = resl = 0; while (b) { @@ -2233,71 +2141,87 @@ void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, b >>= 1; } - d->Q(0) = resl; - d->Q(1) = resh; + *dest_l = resl; + *dest_h = resh; } +#endif -void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, + uint32_t ctrl) { + uint64_t a, b; int i; - Reg st = *d; - Reg rk = *s; - for (i = 0 ; i < 4 ; i++) { - d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^ - AES_Td1[st.B(AES_ishifts[4*i+1])] ^ - AES_Td2[st.B(AES_ishifts[4*i+2])] ^ - AES_Td3[st.B(AES_ishifts[4*i+3])]); + for (i = 0; i < 1 << SHIFT; i += 2) { + a = v->Q(((ctrl & 1) != 0) + i); + b = s->Q(((ctrl & 16) != 0) + i); + clmulq(&d->Q(i), &d->Q(i + 1), a, b); } } -void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { int i; - Reg st = *d; + Reg st = *v; Reg rk = *s; - for (i = 0; i < 16; i++) { - d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]); + for (i = 0 ; i < 2 << SHIFT ; i++) { + int j = i & 3; + d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4 * j + 0])] ^ + AES_Td1[st.B(AES_ishifts[4 * j + 1])] ^ + AES_Td2[st.B(AES_ishifts[4 * j + 2])] ^ + AES_Td3[st.B(AES_ishifts[4 * j + 3])]); } } -void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { int i; - Reg st = *d; + Reg st = *v; Reg rk = *s; - for (i = 0 ; i < 4 ; i++) { - d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^ - AES_Te1[st.B(AES_shifts[4*i+1])] ^ - AES_Te2[st.B(AES_shifts[4*i+2])] ^ - AES_Te3[st.B(AES_shifts[4*i+3])]); + for (i = 0; i < 8 << SHIFT; i++) { + d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i & 15] + (i & ~15))]); } } -void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) { int i; - Reg st = *d; + Reg st = *v; Reg rk = *s; - for (i = 0; i < 16; i++) { - d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]); + for (i = 0 ; i < 2 << SHIFT ; i++) { + int j = i & 3; + d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4 * j + 0])] ^ + AES_Te1[st.B(AES_shifts[4 * j + 1])] ^ + AES_Te2[st.B(AES_shifts[4 * j + 2])] ^ + AES_Te3[st.B(AES_shifts[4 * j + 3])]); } - } +void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + Reg st = *v; + Reg rk = *s; + + for (i = 0; i < 8 << SHIFT; i++) { + d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i & 15] + (i & ~15))]); + } +} + +#if SHIFT == 1 void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg tmp = *s; for (i = 0 ; i < 4 ; i++) { - d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^ - AES_imc[tmp.B(4*i+1)][1] ^ - AES_imc[tmp.B(4*i+2)][2] ^ - AES_imc[tmp.B(4*i+3)][3]); + d->L(i) = bswap32(AES_imc[tmp.B(4 * i + 0)][0] ^ + AES_imc[tmp.B(4 * i + 1)][1] ^ + AES_imc[tmp.B(4 * i + 2)][2] ^ + AES_imc[tmp.B(4 * i + 3)][3]); } } @@ -2315,7 +2239,327 @@ void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; } #endif +#endif +#if SHIFT >= 1 +void glue(helper_vpermilpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + uint64_t r0, r1; + int i; + + for (i = 0; i < 1 << SHIFT; i += 2) { + r0 = v->Q(i + ((s->Q(i) >> 1) & 1)); + r1 = v->Q(i + ((s->Q(i+1) >> 1) & 1)); + d->Q(i) = r0; + d->Q(i+1) = r1; + } +} + +void glue(helper_vpermilps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + uint32_t r0, r1, r2, r3; + int i; + + for (i = 0; i < 2 << SHIFT; i += 4) { + r0 = v->L(i + (s->L(i) & 3)); + r1 = v->L(i + (s->L(i+1) & 3)); + r2 = v->L(i + (s->L(i+2) & 3)); + r3 = v->L(i + (s->L(i+3) & 3)); + d->L(i) = r0; + d->L(i+1) = r1; + d->L(i+2) = r2; + d->L(i+3) = r3; + } +} + +void glue(helper_vpermilpd_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) +{ + uint64_t r0, r1; + int i; + + for (i = 0; i < 1 << SHIFT; i += 2) { + r0 = s->Q(i + ((order >> 0) & 1)); + r1 = s->Q(i + ((order >> 1) & 1)); + d->Q(i) = r0; + d->Q(i+1) = r1; + + order >>= 2; + } +} + +void glue(helper_vpermilps_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) +{ + uint32_t r0, r1, r2, r3; + int i; + + for (i = 0; i < 2 << SHIFT; i += 4) { + r0 = s->L(i + ((order >> 0) & 3)); + r1 = s->L(i + ((order >> 2) & 3)); + r2 = s->L(i + ((order >> 4) & 3)); + r3 = s->L(i + ((order >> 6) & 3)); + d->L(i) = r0; + d->L(i+1) = r1; + d->L(i+2) = r2; + d->L(i+3) = r3; + } +} + +#if SHIFT == 1 +#define FPSRLVD(x, c) (c < 32 ? ((x) >> c) : 0) +#define FPSRLVQ(x, c) (c < 64 ? ((x) >> c) : 0) +#define FPSRAVD(x, c) ((int32_t)(x) >> (c < 32 ? c : 31)) +#define FPSRAVQ(x, c) ((int64_t)(x) >> (c < 64 ? c : 63)) +#define FPSLLVD(x, c) (c < 32 ? ((x) << c) : 0) +#define FPSLLVQ(x, c) (c < 64 ? ((x) << c) : 0) +#endif + +SSE_HELPER_L(helper_vpsrlvd, FPSRLVD) +SSE_HELPER_L(helper_vpsravd, FPSRAVD) +SSE_HELPER_L(helper_vpsllvd, FPSLLVD) + +SSE_HELPER_Q(helper_vpsrlvq, FPSRLVQ) +SSE_HELPER_Q(helper_vpsravq, FPSRAVQ) +SSE_HELPER_Q(helper_vpsllvq, FPSLLVQ) + +void glue(helper_vtestps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + uint32_t zf = 0, cf = 0; + int i; + + for (i = 0; i < 2 << SHIFT; i++) { + zf |= (s->L(i) & d->L(i)); + cf |= (s->L(i) & ~d->L(i)); + } + CC_SRC = ((zf >> 31) ? 0 : CC_Z) | ((cf >> 31) ? 0 : CC_C); +} + +void glue(helper_vtestpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + uint64_t zf = 0, cf = 0; + int i; + + for (i = 0; i < 1 << SHIFT; i++) { + zf |= (s->Q(i) & d->Q(i)); + cf |= (s->Q(i) & ~d->Q(i)); + } + CC_SRC = ((zf >> 63) ? 0 : CC_Z) | ((cf >> 63) ? 0 : CC_C); +} + +void glue(helper_vpmaskmovd_st, SUFFIX)(CPUX86State *env, + Reg *v, Reg *s, target_ulong a0) +{ + int i; + + for (i = 0; i < (2 << SHIFT); i++) { + if (v->L(i) >> 31) { + cpu_stl_data_ra(env, a0 + i * 4, s->L(i), GETPC()); + } + } +} + +void glue(helper_vpmaskmovq_st, SUFFIX)(CPUX86State *env, + Reg *v, Reg *s, target_ulong a0) +{ + int i; + + for (i = 0; i < (1 << SHIFT); i++) { + if (v->Q(i) >> 63) { + cpu_stq_data_ra(env, a0 + i * 8, s->Q(i), GETPC()); + } + } +} + +void glue(helper_vpmaskmovd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + + for (i = 0; i < (2 << SHIFT); i++) { + d->L(i) = (v->L(i) >> 31) ? s->L(i) : 0; + } +} + +void glue(helper_vpmaskmovq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) +{ + int i; + + for (i = 0; i < (1 << SHIFT); i++) { + d->Q(i) = (v->Q(i) >> 63) ? s->Q(i) : 0; + } +} + +void glue(helper_vpgatherdd, SUFFIX)(CPUX86State *env, + Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) +{ + int i; + for (i = 0; i < (2 << SHIFT); i++) { + if (v->L(i) >> 31) { + target_ulong addr = a0 + + ((target_ulong)(int32_t)s->L(i) << scale); + d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); + } + v->L(i) = 0; + } +} + +void glue(helper_vpgatherdq, SUFFIX)(CPUX86State *env, + Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) +{ + int i; + for (i = 0; i < (1 << SHIFT); i++) { + if (v->Q(i) >> 63) { + target_ulong addr = a0 + + ((target_ulong)(int32_t)s->L(i) << scale); + d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); + } + v->Q(i) = 0; + } +} + +void glue(helper_vpgatherqd, SUFFIX)(CPUX86State *env, + Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) +{ + int i; + for (i = 0; i < (1 << SHIFT); i++) { + if (v->L(i) >> 31) { + target_ulong addr = a0 + + ((target_ulong)(int64_t)s->Q(i) << scale); + d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); + } + v->L(i) = 0; + } + for (i /= 2; i < 1 << SHIFT; i++) { + d->Q(i) = 0; + v->Q(i) = 0; + } +} + +void glue(helper_vpgatherqq, SUFFIX)(CPUX86State *env, + Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) +{ + int i; + for (i = 0; i < (1 << SHIFT); i++) { + if (v->Q(i) >> 63) { + target_ulong addr = a0 + + ((target_ulong)(int64_t)s->Q(i) << scale); + d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); + } + v->Q(i) = 0; + } +} +#endif + +#if SHIFT >= 2 +void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order) +{ + uint64_t r0, r1, r2, r3; + + switch (order & 3) { + case 0: + r0 = v->Q(0); + r1 = v->Q(1); + break; + case 1: + r0 = v->Q(2); + r1 = v->Q(3); + break; + case 2: + r0 = s->Q(0); + r1 = s->Q(1); + break; + case 3: + r0 = s->Q(2); + r1 = s->Q(3); + break; + } + switch ((order >> 4) & 3) { + case 0: + r2 = v->Q(0); + r3 = v->Q(1); + break; + case 1: + r2 = v->Q(2); + r3 = v->Q(3); + break; + case 2: + r2 = s->Q(0); + r3 = s->Q(1); + break; + case 3: + r2 = s->Q(2); + r3 = s->Q(3); + break; + } + d->Q(0) = r0; + d->Q(1) = r1; + d->Q(2) = r2; + d->Q(3) = r3; + if (order & 0x8) { + d->Q(0) = 0; + d->Q(1) = 0; + } + if (order & 0x80) { + d->Q(2) = 0; + d->Q(3) = 0; + } +} + +void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) +{ + uint64_t r0, r1, r2, r3; + r0 = s->Q(order & 3); + r1 = s->Q((order >> 2) & 3); + r2 = s->Q((order >> 4) & 3); + r3 = s->Q((order >> 6) & 3); + d->Q(0) = r0; + d->Q(1) = r1; + d->Q(2) = r2; + d->Q(3) = r3; +} + +void helper_vpermd_ymm(Reg *d, Reg *v, Reg *s) +{ + uint32_t r[8]; + int i; + + for (i = 0; i < 8; i++) { + r[i] = s->L(v->L(i) & 7); + } + for (i = 0; i < 8; i++) { + d->L(i) = r[i]; + } +} +#endif + +/* FMA3 op helpers */ +#if SHIFT == 1 +#define SSE_HELPER_FMAS(name, elem, F) \ + void name(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, int flags) \ + { \ + d->elem(0) = F(a->elem(0), b->elem(0), c->elem(0), flags, &env->sse_status); \ + } +#define SSE_HELPER_FMAP(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, \ + int flags, int flip) \ + { \ + int i; \ + for (i = 0; i < num; i++) { \ + d->elem(i) = F(a->elem(i), b->elem(i), c->elem(i), flags, &env->sse_status); \ + flags ^= flip; \ + } \ + } + +SSE_HELPER_FMAS(helper_fma4ss, ZMM_S, float32_muladd) +SSE_HELPER_FMAS(helper_fma4sd, ZMM_D, float64_muladd) +#endif + +#if SHIFT >= 1 +SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) +SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) +#endif + +#undef SSE_HELPER_S + +#undef LANE_WIDTH #undef SHIFT #undef XMM_ONLY #undef Reg diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h index e68af5c403..8a7b2f4e2f 100644 --- a/target/i386/ops_sse_header.h +++ b/target/i386/ops_sse_header.h @@ -21,7 +21,11 @@ #define SUFFIX _mmx #else #define Reg ZMMReg +#if SHIFT == 1 #define SUFFIX _xmm +#else +#define SUFFIX _ymm +#endif #endif #define dh_alias_Reg ptr @@ -30,75 +34,38 @@ #define dh_ctype_Reg Reg * #define dh_ctype_ZMMReg ZMMReg * #define dh_ctype_MMXReg MMXReg * +#define dh_typecode_Reg dh_typecode_ptr +#define dh_typecode_ZMMReg dh_typecode_ptr +#define dh_typecode_MMXReg dh_typecode_ptr -DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(psrlw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psraw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psllw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psrld, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psrad, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pslld, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psrlq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psllq, SUFFIX), void, env, Reg, Reg, Reg) -#if SHIFT == 1 -DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg) +#if SHIFT >= 1 +DEF_HELPER_4(glue(psrldq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pslldq, SUFFIX), void, env, Reg, Reg, Reg) #endif #define SSE_HELPER_B(name, F)\ - DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + DEF_HELPER_4(glue(name, SUFFIX), void, env, Reg, Reg, Reg) #define SSE_HELPER_W(name, F)\ - DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + DEF_HELPER_4(glue(name, SUFFIX), void, env, Reg, Reg, Reg) #define SSE_HELPER_L(name, F)\ - DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + DEF_HELPER_4(glue(name, SUFFIX), void, env, Reg, Reg, Reg) #define SSE_HELPER_Q(name, F)\ - DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + DEF_HELPER_4(glue(name, SUFFIX), void, env, Reg, Reg, Reg) -SSE_HELPER_B(paddb, FADD) -SSE_HELPER_W(paddw, FADD) -SSE_HELPER_L(paddl, FADD) -SSE_HELPER_Q(paddq, FADD) - -SSE_HELPER_B(psubb, FSUB) -SSE_HELPER_W(psubw, FSUB) -SSE_HELPER_L(psubl, FSUB) -SSE_HELPER_Q(psubq, FSUB) - -SSE_HELPER_B(paddusb, FADDUB) -SSE_HELPER_B(paddsb, FADDSB) -SSE_HELPER_B(psubusb, FSUBUB) -SSE_HELPER_B(psubsb, FSUBSB) - -SSE_HELPER_W(paddusw, FADDUW) -SSE_HELPER_W(paddsw, FADDSW) -SSE_HELPER_W(psubusw, FSUBUW) -SSE_HELPER_W(psubsw, FSUBSW) - -SSE_HELPER_B(pminub, FMINUB) -SSE_HELPER_B(pmaxub, FMAXUB) - -SSE_HELPER_W(pminsw, FMINSW) -SSE_HELPER_W(pmaxsw, FMAXSW) - -SSE_HELPER_Q(pand, FAND) -SSE_HELPER_Q(pandn, FANDN) -SSE_HELPER_Q(por, FOR) -SSE_HELPER_Q(pxor, FXOR) - -SSE_HELPER_B(pcmpgtb, FCMPGTB) -SSE_HELPER_W(pcmpgtw, FCMPGTW) -SSE_HELPER_L(pcmpgtl, FCMPGTL) - -SSE_HELPER_B(pcmpeqb, FCMPEQ) -SSE_HELPER_W(pcmpeqw, FCMPEQ) -SSE_HELPER_L(pcmpeql, FCMPEQ) - -SSE_HELPER_W(pmullw, FMULLW) #if SHIFT == 0 -SSE_HELPER_W(pmulhrw, FMULHRW) +DEF_HELPER_3(glue(pmulhrw, SUFFIX), void, env, Reg, Reg) #endif SSE_HELPER_W(pmulhuw, FMULHUW) SSE_HELPER_W(pmulhw, FMULHW) @@ -106,51 +73,74 @@ SSE_HELPER_W(pmulhw, FMULHW) SSE_HELPER_B(pavgb, FAVG) SSE_HELPER_W(pavgw, FAVG) -DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(pmuludq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pmaddwd, SUFFIX), void, env, Reg, Reg, Reg) -DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(psadbw, SUFFIX), void, env, Reg, Reg, Reg) +#if SHIFT < 2 DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl) -DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32) -#ifdef TARGET_X86_64 -DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64) #endif #if SHIFT == 0 DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int) #else -DEF_HELPER_3(shufps, void, Reg, Reg, int) -DEF_HELPER_3(shufpd, void, Reg, Reg, int) DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int) DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int) DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int) #endif -#if SHIFT == 1 +#if SHIFT >= 1 /* FPU ops */ /* XXX: not accurate */ -#define SSE_HELPER_S(name, F) \ - DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## sd, void, env, Reg, Reg) +#define SSE_HELPER_P4(name) \ + DEF_HELPER_4(glue(name ## ps, SUFFIX), void, env, Reg, Reg, Reg) \ + DEF_HELPER_4(glue(name ## pd, SUFFIX), void, env, Reg, Reg, Reg) -SSE_HELPER_S(add, FPU_ADD) -SSE_HELPER_S(sub, FPU_SUB) -SSE_HELPER_S(mul, FPU_MUL) -SSE_HELPER_S(div, FPU_DIV) -SSE_HELPER_S(min, FPU_MIN) -SSE_HELPER_S(max, FPU_MAX) -SSE_HELPER_S(sqrt, FPU_SQRT) +#define SSE_HELPER_P3(name, ...) \ + DEF_HELPER_3(glue(name ## ps, SUFFIX), void, env, Reg, Reg) \ + DEF_HELPER_3(glue(name ## pd, SUFFIX), void, env, Reg, Reg) +#if SHIFT == 1 +#define SSE_HELPER_S4(name) \ + SSE_HELPER_P4(name) \ + DEF_HELPER_4(name ## ss, void, env, Reg, Reg, Reg) \ + DEF_HELPER_4(name ## sd, void, env, Reg, Reg, Reg) +#define SSE_HELPER_S3(name) \ + SSE_HELPER_P3(name) \ + DEF_HELPER_4(name ## ss, void, env, Reg, Reg, Reg) \ + DEF_HELPER_4(name ## sd, void, env, Reg, Reg, Reg) +#else +#define SSE_HELPER_S4(name, ...) SSE_HELPER_P4(name) +#define SSE_HELPER_S3(name, ...) SSE_HELPER_P3(name) +#endif -DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg) -DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg) -DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg) -DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg) -DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg) -DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg) +DEF_HELPER_4(glue(shufps, SUFFIX), void, Reg, Reg, Reg, int) +DEF_HELPER_4(glue(shufpd, SUFFIX), void, Reg, Reg, Reg, int) + +SSE_HELPER_S4(add) +SSE_HELPER_S4(sub) +SSE_HELPER_S4(mul) +SSE_HELPER_S4(div) +SSE_HELPER_S4(min) +SSE_HELPER_S4(max) + +SSE_HELPER_S3(sqrt) + +DEF_HELPER_3(glue(cvtps2pd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(cvtpd2ps, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(cvtdq2ps, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(cvtdq2pd, SUFFIX), void, env, Reg, Reg) + +DEF_HELPER_3(glue(cvtps2dq, SUFFIX), void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(glue(cvtpd2dq, SUFFIX), void, env, ZMMReg, ZMMReg) + +DEF_HELPER_3(glue(cvttps2dq, SUFFIX), void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(glue(cvttpd2dq, SUFFIX), void, env, ZMMReg, ZMMReg) + +#if SHIFT == 1 +DEF_HELPER_4(cvtss2sd, void, env, Reg, Reg, Reg) +DEF_HELPER_4(cvtsd2ss, void, env, Reg, Reg, Reg) DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg) DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg) DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32) @@ -161,8 +151,6 @@ DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64) DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64) #endif -DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_2(cvtss2si, s32, env, ZMMReg) @@ -172,8 +160,6 @@ DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg) DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg) #endif -DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_2(cvttss2si, s32, env, ZMMReg) @@ -182,60 +168,87 @@ DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg) DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg) DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg) #endif +#endif -DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(glue(rsqrtps, SUFFIX), void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(glue(rcpps, SUFFIX), void, env, ZMMReg, ZMMReg) + +#if SHIFT == 1 +DEF_HELPER_4(rsqrtss, void, env, ZMMReg, ZMMReg, ZMMReg) +DEF_HELPER_4(rcpss, void, env, ZMMReg, ZMMReg, ZMMReg) DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg) DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int) DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg) -DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int) -DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg) -DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg) +DEF_HELPER_5(insertq_i, void, env, ZMMReg, ZMMReg, int, int) +#endif -#define SSE_HELPER_CMP(name, F) \ - DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ - DEF_HELPER_3(name ## sd, void, env, Reg, Reg) +SSE_HELPER_P4(hadd) +SSE_HELPER_P4(hsub) +SSE_HELPER_P4(addsub) -SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) -SSE_HELPER_CMP(cmplt, FPU_CMPLT) -SSE_HELPER_CMP(cmple, FPU_CMPLE) -SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) -SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) -SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) -SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) -SSE_HELPER_CMP(cmpord, FPU_CMPORD) +#define SSE_HELPER_CMP(name, F, C) SSE_HELPER_S4(name) +SSE_HELPER_CMP(cmpeq, FPU_CMPQ, FPU_EQ) +SSE_HELPER_CMP(cmplt, FPU_CMPS, FPU_LT) +SSE_HELPER_CMP(cmple, FPU_CMPS, FPU_LE) +SSE_HELPER_CMP(cmpunord, FPU_CMPQ, FPU_UNORD) +SSE_HELPER_CMP(cmpneq, FPU_CMPQ, !FPU_EQ) +SSE_HELPER_CMP(cmpnlt, FPU_CMPS, !FPU_LT) +SSE_HELPER_CMP(cmpnle, FPU_CMPS, !FPU_LE) +SSE_HELPER_CMP(cmpord, FPU_CMPQ, !FPU_UNORD) + +SSE_HELPER_CMP(cmpequ, FPU_CMPQ, FPU_EQU) +SSE_HELPER_CMP(cmpnge, FPU_CMPS, !FPU_GE) +SSE_HELPER_CMP(cmpngt, FPU_CMPS, !FPU_GT) +SSE_HELPER_CMP(cmpfalse, FPU_CMPQ, FPU_FALSE) +SSE_HELPER_CMP(cmpnequ, FPU_CMPQ, !FPU_EQU) +SSE_HELPER_CMP(cmpge, FPU_CMPS, FPU_GE) +SSE_HELPER_CMP(cmpgt, FPU_CMPS, FPU_GT) +SSE_HELPER_CMP(cmptrue, FPU_CMPQ, !FPU_FALSE) + +SSE_HELPER_CMP(cmpeqs, FPU_CMPS, FPU_EQ) +SSE_HELPER_CMP(cmpltq, FPU_CMPQ, FPU_LT) +SSE_HELPER_CMP(cmpleq, FPU_CMPQ, FPU_LE) +SSE_HELPER_CMP(cmpunords, FPU_CMPS, FPU_UNORD) +SSE_HELPER_CMP(cmpneqq, FPU_CMPS, !FPU_EQ) +SSE_HELPER_CMP(cmpnltq, FPU_CMPQ, !FPU_LT) +SSE_HELPER_CMP(cmpnleq, FPU_CMPQ, !FPU_LE) +SSE_HELPER_CMP(cmpords, FPU_CMPS, !FPU_UNORD) + +SSE_HELPER_CMP(cmpequs, FPU_CMPS, FPU_EQU) +SSE_HELPER_CMP(cmpngeq, FPU_CMPQ, !FPU_GE) +SSE_HELPER_CMP(cmpngtq, FPU_CMPQ, !FPU_GT) +SSE_HELPER_CMP(cmpfalses, FPU_CMPS, FPU_FALSE) +SSE_HELPER_CMP(cmpnequs, FPU_CMPS, !FPU_EQU) +SSE_HELPER_CMP(cmpgeq, FPU_CMPQ, FPU_GE) +SSE_HELPER_CMP(cmpgtq, FPU_CMPQ, FPU_GT) +SSE_HELPER_CMP(cmptrues, FPU_CMPS, !FPU_FALSE) + +#if SHIFT == 1 DEF_HELPER_3(ucomiss, void, env, Reg, Reg) DEF_HELPER_3(comiss, void, env, Reg, Reg) DEF_HELPER_3(ucomisd, void, env, Reg, Reg) DEF_HELPER_3(comisd, void, env, Reg, Reg) -DEF_HELPER_2(movmskps, i32, env, Reg) -DEF_HELPER_2(movmskpd, i32, env, Reg) #endif -DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg) -DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg) -#define UNPCK_OP(base_name, base) \ - DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \ - DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \ - DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_2(glue(movmskps, SUFFIX), i32, env, Reg) +DEF_HELPER_2(glue(movmskpd, SUFFIX), i32, env, Reg) +#endif + +DEF_HELPER_4(glue(packsswb, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(packuswb, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(packssdw, SUFFIX), void, env, Reg, Reg, Reg) +#define UNPCK_OP(name, base) \ + DEF_HELPER_4(glue(punpck ## name ## bw, SUFFIX), void, env, Reg, Reg, Reg) \ + DEF_HELPER_4(glue(punpck ## name ## wd, SUFFIX), void, env, Reg, Reg, Reg) \ + DEF_HELPER_4(glue(punpck ## name ## dq, SUFFIX), void, env, Reg, Reg, Reg) UNPCK_OP(l, 0) UNPCK_OP(h, 1) -#if SHIFT == 1 -DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg) +#if SHIFT >= 1 +DEF_HELPER_4(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg, Reg) #endif /* 3DNow! float ops */ @@ -262,28 +275,25 @@ DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg) #endif /* SSSE3 op helpers */ -DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32) +DEF_HELPER_4(glue(phaddw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(phaddd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(phaddsw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(phsubw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(phsubd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(phsubsw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(pshufb, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psignb, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psignw, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(psignd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_5(glue(palignr, SUFFIX), void, env, Reg, Reg, Reg, i32) /* SSE4.1 op helpers */ -#if SHIFT == 1 -DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg) +#if SHIFT >= 1 +DEF_HELPER_5(glue(pblendvb, SUFFIX), void, env, Reg, Reg, Reg, Reg) +DEF_HELPER_5(glue(blendvps, SUFFIX), void, env, Reg, Reg, Reg, Reg) +DEF_HELPER_5(glue(blendvpd, SUFFIX), void, env, Reg, Reg, Reg, Reg) DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg) @@ -297,34 +307,32 @@ DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsldup, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovshdup, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovdldup, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(pmuldq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(packusdw, SUFFIX), void, env, Reg, Reg, Reg) +#if SHIFT == 1 DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg) +#endif DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32) +#if SHIFT == 1 +DEF_HELPER_5(roundss_xmm, void, env, Reg, Reg, Reg, i32) +DEF_HELPER_5(roundsd_xmm, void, env, Reg, Reg, Reg, i32) +#endif +DEF_HELPER_5(glue(blendps, SUFFIX), void, env, Reg, Reg, Reg, i32) +DEF_HELPER_5(glue(blendpd, SUFFIX), void, env, Reg, Reg, Reg, i32) +DEF_HELPER_5(glue(pblendw, SUFFIX), void, env, Reg, Reg, Reg, i32) +DEF_HELPER_5(glue(dpps, SUFFIX), void, env, Reg, Reg, Reg, i32) +#if SHIFT == 1 +DEF_HELPER_5(glue(dppd, SUFFIX), void, env, Reg, Reg, Reg, i32) +#endif +DEF_HELPER_5(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, Reg, i32) #endif /* SSE4.2 op helpers */ #if SHIFT == 1 -DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32) @@ -333,14 +341,62 @@ DEF_HELPER_3(crc32, tl, i32, tl, i32) #endif /* AES-NI op helpers */ +#if SHIFT >= 1 +DEF_HELPER_4(glue(aesdec, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(aesdeclast, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(aesenc, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(aesenclast, SUFFIX), void, env, Reg, Reg, Reg) #if SHIFT == 1 -DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg) -DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32) -DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32) +#endif +DEF_HELPER_5(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, Reg, i32) +#endif + +/* F16C helpers */ +#if SHIFT >= 1 +DEF_HELPER_3(glue(cvtph2ps, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(cvtps2ph, SUFFIX), void, env, Reg, Reg, int) +#endif + +/* FMA3 helpers */ +#if SHIFT == 1 +DEF_HELPER_6(fma4ss, void, env, Reg, Reg, Reg, Reg, int) +DEF_HELPER_6(fma4sd, void, env, Reg, Reg, Reg, Reg, int) +#endif + +#if SHIFT >= 1 +DEF_HELPER_7(glue(fma4ps, SUFFIX), void, env, Reg, Reg, Reg, Reg, int, int) +DEF_HELPER_7(glue(fma4pd, SUFFIX), void, env, Reg, Reg, Reg, Reg, int, int) +#endif + +/* AVX helpers */ +#if SHIFT >= 1 +DEF_HELPER_4(glue(vpermilpd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpermilps, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_3(glue(vpermilpd_imm, SUFFIX), void, Reg, Reg, i32) +DEF_HELPER_3(glue(vpermilps_imm, SUFFIX), void, Reg, Reg, i32) +DEF_HELPER_4(glue(vpsrlvd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpsravd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpsllvd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpsrlvq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpsravq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpsllvq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_3(glue(vtestps, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(vtestpd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(vpmaskmovd_st, SUFFIX), void, env, Reg, Reg, tl) +DEF_HELPER_4(glue(vpmaskmovq_st, SUFFIX), void, env, Reg, Reg, tl) +DEF_HELPER_4(glue(vpmaskmovd, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_4(glue(vpmaskmovq, SUFFIX), void, env, Reg, Reg, Reg) +DEF_HELPER_6(glue(vpgatherdd, SUFFIX), void, env, Reg, Reg, Reg, tl, i32) +DEF_HELPER_6(glue(vpgatherdq, SUFFIX), void, env, Reg, Reg, Reg, tl, i32) +DEF_HELPER_6(glue(vpgatherqd, SUFFIX), void, env, Reg, Reg, Reg, tl, i32) +DEF_HELPER_6(glue(vpgatherqq, SUFFIX), void, env, Reg, Reg, Reg, tl, i32) +#if SHIFT == 2 +DEF_HELPER_3(vpermd_ymm, void, Reg, Reg, Reg) +DEF_HELPER_4(vpermdq_ymm, void, Reg, Reg, Reg, i32) +DEF_HELPER_3(vpermq_ymm, void, Reg, Reg, i32) +#endif #endif #undef SHIFT @@ -351,6 +407,9 @@ DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32) #undef SSE_HELPER_W #undef SSE_HELPER_L #undef SSE_HELPER_Q -#undef SSE_HELPER_S +#undef SSE_HELPER_S3 +#undef SSE_HELPER_S4 +#undef SSE_HELPER_P3 +#undef SSE_HELPER_P4 #undef SSE_HELPER_CMP #undef UNPCK_OP diff --git a/target/i386/sev-stub.c b/target/i386/sev-stub.c deleted file mode 100644 index 0227cb5177..0000000000 --- a/target/i386/sev-stub.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * QEMU SEV stub - * - * Copyright Advanced Micro Devices 2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "sev_i386.h" - -SevInfo *sev_get_info(void) -{ - return NULL; -} - -bool sev_enabled(void) -{ - return false; -} - -uint64_t sev_get_me_mask(void) -{ - return ~0; -} - -uint32_t sev_get_cbit_position(void) -{ - return 0; -} - -uint32_t sev_get_reduced_phys_bits(void) -{ - return 0; -} - -char *sev_get_launch_measurement(void) -{ - return NULL; -} - -SevCapability *sev_get_capabilities(Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -int sev_inject_launch_secret(const char *hdr, const char *secret, - uint64_t gpa, Error **errp) -{ - return 1; -} - -int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) -{ - return 0; -} - -bool sev_es_enabled(void) -{ - return false; -} - -void sev_es_set_reset_vector(CPUState *cpu) -{ -} - -int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) -{ - abort(); -} - -SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} diff --git a/target/i386/sev-sysemu-stub.c b/target/i386/sev-sysemu-stub.c new file mode 100644 index 0000000000..7a29295d1e --- /dev/null +++ b/target/i386/sev-sysemu-stub.c @@ -0,0 +1,70 @@ +/* + * QEMU SEV system stub + * + * Copyright Advanced Micro Devices 2018 + * + * Authors: + * Brijesh Singh + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qerror.h" +#include "qapi/error.h" +#include "sev.h" + +SevInfo *qmp_query_sev(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevCapability *qmp_query_sev_capabilities(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret, + bool has_gpa, uint64_t gpa, Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); +} + +int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) +{ + g_assert_not_reached(); +} + +void sev_es_set_reset_vector(CPUState *cpu) +{ +} + +int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) +{ + g_assert_not_reached(); +} + +SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, + Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +void hmp_info_sev(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SEV is not available in this QEMU\n"); +} diff --git a/target/i386/sev.c b/target/i386/sev.c index 83df8c09f6..32f7dbac4e 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -23,16 +23,21 @@ #include "qemu/base64.h" #include "qemu/module.h" #include "qemu/uuid.h" +#include "crypto/hash.h" #include "sysemu/kvm.h" -#include "sev_i386.h" +#include "sev.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" #include "trace.h" #include "migration/blocker.h" #include "qom/object.h" #include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qerror.h" #include "exec/confidential-guest-support.h" #include "hw/i386/pc.h" +#include "exec/address-spaces.h" #define TYPE_SEV_GUEST "sev-guest" OBJECT_DECLARE_SIMPLE_TYPE(SevGuestState, SEV_GUEST) @@ -58,13 +63,13 @@ struct SevGuestState { char *session_file; uint32_t cbitpos; uint32_t reduced_phys_bits; + bool kernel_hashes; /* runtime state */ uint32_t handle; uint8_t api_major; uint8_t api_minor; uint8_t build_id; - uint64_t me_mask; int sev_fd; SevState state; gchar *measurement; @@ -83,6 +88,42 @@ typedef struct __attribute__((__packed__)) SevInfoBlock { uint32_t reset_addr; } SevInfoBlock; +#define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454" +typedef struct QEMU_PACKED SevHashTableDescriptor { + /* SEV hash table area guest address */ + uint32_t base; + /* SEV hash table area size (in bytes) */ + uint32_t size; +} SevHashTableDescriptor; + +/* hard code sha256 digest size */ +#define HASH_SIZE 32 + +typedef struct QEMU_PACKED SevHashTableEntry { + QemuUUID guid; + uint16_t len; + uint8_t hash[HASH_SIZE]; +} SevHashTableEntry; + +typedef struct QEMU_PACKED SevHashTable { + QemuUUID guid; + uint16_t len; + SevHashTableEntry cmdline; + SevHashTableEntry initrd; + SevHashTableEntry kernel; +} SevHashTable; + +/* + * Data encrypted by sev_encrypt_flash() must be padded to a multiple of + * 16 bytes. + */ +typedef struct QEMU_PACKED PaddedSevHashTable { + SevHashTable ht; + uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)]; +} PaddedSevHashTable; + +QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0); + static SevGuestState *sev_guest; static Error *sev_mig_blocker; @@ -298,6 +339,20 @@ sev_guest_set_sev_device(Object *obj, const char *value, Error **errp) sev->sev_device = g_strdup(value); } +static bool sev_guest_get_kernel_hashes(Object *obj, Error **errp) +{ + SevGuestState *sev = SEV_GUEST(obj); + + return sev->kernel_hashes; +} + +static void sev_guest_set_kernel_hashes(Object *obj, bool value, Error **errp) +{ + SevGuestState *sev = SEV_GUEST(obj); + + sev->kernel_hashes = value; +} + static void sev_guest_class_init(ObjectClass *oc, void *data) { @@ -316,6 +371,11 @@ sev_guest_class_init(ObjectClass *oc, void *data) sev_guest_set_session_file); object_class_property_set_description(oc, "session-file", "guest owners session parameters (encoded with base64)"); + object_class_property_add_bool(oc, "kernel-hashes", + sev_guest_get_kernel_hashes, + sev_guest_set_kernel_hashes); + object_class_property_set_description(oc, "kernel-hashes", + "add kernel hashes to guest firmware for measured Linux boot"); } static void @@ -362,12 +422,6 @@ sev_es_enabled(void) return sev_enabled() && (sev_guest->policy & SEV_POLICY_ES); } -uint64_t -sev_get_me_mask(void) -{ - return sev_guest ? sev_guest->me_mask : ~0; -} - uint32_t sev_get_cbit_position(void) { @@ -380,8 +434,7 @@ sev_get_reduced_phys_bits(void) return sev_guest ? sev_guest->reduced_phys_bits : 0; } -SevInfo * -sev_get_info(void) +static SevInfo *sev_get_info(void) { SevInfo *info; @@ -400,6 +453,40 @@ sev_get_info(void) return info; } +SevInfo *qmp_query_sev(Error **errp) +{ + SevInfo *info; + + info = sev_get_info(); + if (!info) { + error_setg(errp, "SEV feature is not available"); + return NULL; + } + + return info; +} + +void hmp_info_sev(Monitor *mon, const QDict *qdict) +{ + SevInfo *info = sev_get_info(); + + if (info && info->enabled) { + monitor_printf(mon, "handle: %d\n", info->handle); + monitor_printf(mon, "state: %s\n", SevState_str(info->state)); + monitor_printf(mon, "build: %d\n", info->build_id); + monitor_printf(mon, "api version: %d.%d\n", + info->api_major, info->api_minor); + monitor_printf(mon, "debug: %s\n", + info->policy & SEV_POLICY_NODBG ? "off" : "on"); + monitor_printf(mon, "key-sharing: %s\n", + info->policy & SEV_POLICY_NOKS ? "off" : "on"); + } else { + monitor_printf(mon, "SEV is not enabled\n"); + } + + qapi_free_SevInfo(info); +} + static int sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, size_t *cert_chain_len, Error **errp) @@ -413,7 +500,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); if (r < 0) { if (err != SEV_RET_INVALID_LEN) { - error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)", + error_setg(errp, "SEV: Failed to export PDH cert" + " ret=%d fw_err=%d (%s)", r, err, fw_error_to_str(err)); return 1; } @@ -426,7 +514,7 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); if (r < 0) { - error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)", + error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)", r, err, fw_error_to_str(err)); goto e_free; } @@ -443,13 +531,46 @@ e_free: return 1; } -SevCapability * -sev_get_capabilities(Error **errp) +static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp) +{ + guchar *id_data; + struct sev_user_data_get_id2 get_id2 = {}; + int err, r; + + /* query the ID length */ + r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); + if (r < 0 && err != SEV_RET_INVALID_LEN) { + error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", + r, err, fw_error_to_str(err)); + return 1; + } + + id_data = g_new(guchar, get_id2.length); + get_id2.address = (unsigned long)id_data; + + r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); + if (r < 0) { + error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", + r, err, fw_error_to_str(err)); + goto err; + } + + *id = id_data; + *id_len = get_id2.length; + return 0; + +err: + g_free(id_data); + return 1; +} + +static SevCapability *sev_get_capabilities(Error **errp) { SevCapability *cap = NULL; guchar *pdh_data = NULL; guchar *cert_chain_data = NULL; - size_t pdh_len = 0, cert_chain_len = 0; + guchar *cpu0_id_data = NULL; + size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0; uint32_t ebx; int fd; @@ -464,7 +585,7 @@ sev_get_capabilities(Error **errp) fd = open(DEFAULT_SEV_DEVICE, O_RDWR); if (fd < 0) { - error_setg_errno(errp, errno, "Failed to open %s", + error_setg_errno(errp, errno, "SEV: Failed to open %s", DEFAULT_SEV_DEVICE); return NULL; } @@ -474,9 +595,14 @@ sev_get_capabilities(Error **errp) goto out; } + if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) { + goto out; + } + cap = g_new0(SevCapability, 1); cap->pdh = g_base64_encode(pdh_data, pdh_len); cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len); + cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len); host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); cap->cbitpos = ebx & 0x3f; @@ -488,20 +614,26 @@ sev_get_capabilities(Error **errp) cap->reduced_phys_bits = 1; out: + g_free(cpu0_id_data); g_free(pdh_data); g_free(cert_chain_data); close(fd); return cap; } -SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp) +SevCapability *qmp_query_sev_capabilities(Error **errp) +{ + return sev_get_capabilities(errp); +} + +static SevAttestationReport *sev_get_attestation_report(const char *mnonce, + Error **errp) { struct kvm_sev_attestation_report input = {}; SevAttestationReport *report = NULL; SevGuestState *sev = sev_guest; - guchar *data; - guchar *buf; + g_autofree guchar *data = NULL; + g_autofree guchar *buf = NULL; gsize len; int err = 0, ret; @@ -521,7 +653,6 @@ sev_get_attestation_report(const char *mnonce, Error **errp) if (len != sizeof(input.mnonce)) { error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")", sizeof(input.mnonce), len); - g_free(buf); return NULL; } @@ -530,9 +661,9 @@ sev_get_attestation_report(const char *mnonce, Error **errp) &input, &err); if (ret < 0) { if (err != SEV_RET_INVALID_LEN) { - error_setg(errp, "failed to query the attestation report length " - "ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); - g_free(buf); + error_setg(errp, "SEV: Failed to query the attestation report" + " length ret=%d fw_err=%d (%s)", + ret, err, fw_error_to_str(err)); return NULL; } } @@ -545,9 +676,9 @@ sev_get_attestation_report(const char *mnonce, Error **errp) ret = sev_ioctl(sev->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, &input, &err); if (ret) { - error_setg_errno(errp, errno, "Failed to get attestation report" + error_setg_errno(errp, errno, "SEV: Failed to get attestation report" " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); - goto e_free_data; + return NULL; } report = g_new0(SevAttestationReport, 1); @@ -555,21 +686,24 @@ sev_get_attestation_report(const char *mnonce, Error **errp) trace_kvm_sev_attestation_report(mnonce, report->data); -e_free_data: - g_free(data); - g_free(buf); return report; } +SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, + Error **errp) +{ + return sev_get_attestation_report(mnonce, errp); +} + static int sev_read_file_base64(const char *filename, guchar **data, gsize *len) { gsize sz; - gchar *base64; + g_autofree gchar *base64 = NULL; GError *error = NULL; if (!g_file_get_contents(filename, &base64, &sz, &error)) { - error_report("failed to read '%s' (%s)", filename, error->message); + error_report("SEV: Failed to read '%s' (%s)", filename, error->message); g_error_free(error); return -1; } @@ -584,31 +718,29 @@ sev_launch_start(SevGuestState *sev) gsize sz; int ret = 1; int fw_error, rc; - struct kvm_sev_launch_start *start; + struct kvm_sev_launch_start start = { + .handle = sev->handle, .policy = sev->policy + }; guchar *session = NULL, *dh_cert = NULL; - start = g_new0(struct kvm_sev_launch_start, 1); - - start->handle = sev->handle; - start->policy = sev->policy; if (sev->session_file) { if (sev_read_file_base64(sev->session_file, &session, &sz) < 0) { goto out; } - start->session_uaddr = (unsigned long)session; - start->session_len = sz; + start.session_uaddr = (unsigned long)session; + start.session_len = sz; } if (sev->dh_cert_file) { if (sev_read_file_base64(sev->dh_cert_file, &dh_cert, &sz) < 0) { goto out; } - start->dh_uaddr = (unsigned long)dh_cert; - start->dh_len = sz; + start.dh_uaddr = (unsigned long)dh_cert; + start.dh_len = sz; } - trace_kvm_sev_launch_start(start->policy, session, dh_cert); - rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, start, &fw_error); + trace_kvm_sev_launch_start(start.policy, session, dh_cert); + rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error); if (rc < 0) { error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'", __func__, ret, fw_error, fw_error_to_str(fw_error)); @@ -616,11 +748,10 @@ sev_launch_start(SevGuestState *sev) } sev_set_guest_state(sev, SEV_STATE_LAUNCH_UPDATE); - sev->handle = start->handle; + sev->handle = start.handle; ret = 0; out: - g_free(start); g_free(session); g_free(dh_cert); return ret; @@ -668,8 +799,8 @@ sev_launch_get_measure(Notifier *notifier, void *unused) { SevGuestState *sev = sev_guest; int ret, error; - guchar *data; - struct kvm_sev_launch_measure *measurement; + g_autofree guchar *data = NULL; + struct kvm_sev_launch_measure measurement = {}; if (!sev_check_state(sev, SEV_STATE_LAUNCH_UPDATE)) { return; @@ -683,43 +814,35 @@ sev_launch_get_measure(Notifier *notifier, void *unused) } } - measurement = g_new0(struct kvm_sev_launch_measure, 1); - /* query the measurement blob length */ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE, - measurement, &error); - if (!measurement->len) { + &measurement, &error); + if (!measurement.len) { error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", __func__, ret, error, fw_error_to_str(errno)); - goto free_measurement; + return; } - data = g_new0(guchar, measurement->len); - measurement->uaddr = (unsigned long)data; + data = g_new0(guchar, measurement.len); + measurement.uaddr = (unsigned long)data; /* get the measurement blob */ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE, - measurement, &error); + &measurement, &error); if (ret) { error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", __func__, ret, error, fw_error_to_str(errno)); - goto free_data; + return; } sev_set_guest_state(sev, SEV_STATE_LAUNCH_SECRET); /* encode the measurement value and emit the event */ - sev->measurement = g_base64_encode(data, measurement->len); + sev->measurement = g_base64_encode(data, measurement.len); trace_kvm_sev_launch_measurement(sev->measurement); - -free_data: - g_free(data); -free_measurement: - g_free(measurement); } -char * -sev_get_launch_measurement(void) +static char *sev_get_launch_measurement(void) { if (sev_guest && sev_guest->state >= SEV_STATE_LAUNCH_SECRET) { @@ -729,6 +852,23 @@ sev_get_launch_measurement(void) return NULL; } +SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) +{ + char *data; + SevLaunchMeasureInfo *info; + + data = sev_get_launch_measurement(); + if (!data) { + error_setg(errp, "SEV launch measurement is not available"); + return NULL; + } + + info = g_malloc0(sizeof(*info)); + info->data = data; + + return info; +} + static Notifier sev_machine_done_notify = { .notify = sev_launch_get_measure, }; @@ -737,7 +877,6 @@ static void sev_launch_finish(SevGuestState *sev) { int ret, error; - Error *local_err = NULL; trace_kvm_sev_launch_finish(); ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, &error); @@ -752,12 +891,7 @@ sev_launch_finish(SevGuestState *sev) /* add migration blocker */ error_setg(&sev_mig_blocker, "SEV: Migration is not implemented"); - ret = migrate_add_blocker(sev_mig_blocker, &local_err); - if (local_err) { - error_report_err(local_err); - error_free(sev_mig_blocker); - exit(1); - } + migrate_add_blocker(sev_mig_blocker, &error_fatal); } static void @@ -810,8 +944,6 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) goto err; } - sev->me_mask = ~(1UL << sev->cbitpos); - devname = object_property_get_str(OBJECT(sev), "sev-device", NULL); sev->sev_fd = open(devname, O_RDWR); if (sev->sev_fd < 0) { @@ -890,7 +1022,7 @@ sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) if (sev_check_state(sev_guest, SEV_STATE_LAUNCH_UPDATE)) { int ret = sev_launch_update_data(sev_guest, ptr, len); if (ret < 0) { - error_setg(errp, "failed to encrypt pflash rom"); + error_setg(errp, "SEV: Failed to encrypt pflash rom"); return ret; } } @@ -909,7 +1041,7 @@ int sev_inject_launch_secret(const char *packet_hdr, const char *secret, MemoryRegion *mr = NULL; if (!sev_guest) { - error_setg(errp, "SEV: SEV not enabled."); + error_setg(errp, "SEV not enabled for guest"); return 1; } @@ -961,6 +1093,37 @@ int sev_inject_launch_secret(const char *packet_hdr, const char *secret, return 0; } +#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" +struct sev_secret_area { + uint32_t base; + uint32_t size; +}; + +void qmp_sev_inject_launch_secret(const char *packet_hdr, + const char *secret, + bool has_gpa, uint64_t gpa, + Error **errp) +{ + if (!sev_enabled()) { + error_setg(errp, "SEV not enabled for guest"); + return; + } + if (!has_gpa) { + uint8_t *data; + struct sev_secret_area *area; + + if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { + error_setg(errp, "SEV: no secret area found in OVMF," + " gpa must be specified."); + return; + } + area = (struct sev_secret_area *)data; + gpa = area->base; + } + + sev_inject_launch_secret(packet_hdr, secret, gpa, errp); +} + static int sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr) { @@ -1077,6 +1240,138 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) return 0; } +static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +}; + +static const QemuUUID sev_kernel_entry_guid = { + .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1, + 0x72, 0xd2, 0x04, 0x5b) +}; +static const QemuUUID sev_initrd_entry_guid = { + .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2, + 0x91, 0x69, 0x78, 0x1d) +}; +static const QemuUUID sev_cmdline_entry_guid = { + .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71, + 0x4d, 0x36, 0xab, 0x2a) +}; + +/* + * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page + * which is included in SEV's initial memory measurement. + */ +bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp) +{ + uint8_t *data; + SevHashTableDescriptor *area; + SevHashTable *ht; + PaddedSevHashTable *padded_ht; + uint8_t cmdline_hash[HASH_SIZE]; + uint8_t initrd_hash[HASH_SIZE]; + uint8_t kernel_hash[HASH_SIZE]; + uint8_t *hashp; + size_t hash_len = HASH_SIZE; + hwaddr mapped_len = sizeof(*padded_ht); + MemTxAttrs attrs = { 0 }; + bool ret = true; + + /* + * Only add the kernel hashes if the sev-guest configuration explicitly + * stated kernel-hashes=on. + */ + if (!sev_guest->kernel_hashes) { + return false; + } + + if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) { + error_setg(errp, "SEV: kernel specified but guest firmware " + "has no hashes table GUID"); + return false; + } + area = (SevHashTableDescriptor *)data; + if (!area->base || area->size < sizeof(PaddedSevHashTable)) { + error_setg(errp, "SEV: guest firmware hashes table area is invalid " + "(base=0x%x size=0x%x)", area->base, area->size); + return false; + } + + /* + * Calculate hash of kernel command-line with the terminating null byte. If + * the user doesn't supply a command-line via -append, the 1-byte "\0" will + * be used. + */ + hashp = cmdline_hash; + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data, + ctx->cmdline_size, &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* + * Calculate hash of initrd. If the user doesn't supply an initrd via + * -initrd, an empty buffer will be used (ctx->initrd_size == 0). + */ + hashp = initrd_hash; + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data, + ctx->initrd_size, &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* Calculate hash of the kernel */ + hashp = kernel_hash; + struct iovec iov[2] = { + { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size }, + { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size } + }; + if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov), + &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* + * Populate the hashes table in the guest's memory at the OVMF-designated + * area for the SEV hashes table + */ + padded_ht = address_space_map(&address_space_memory, area->base, + &mapped_len, true, attrs); + if (!padded_ht || mapped_len != sizeof(*padded_ht)) { + error_setg(errp, "SEV: cannot map hashes table guest memory area"); + return false; + } + ht = &padded_ht->ht; + + ht->guid = sev_hash_table_header_guid; + ht->len = sizeof(*ht); + + ht->cmdline.guid = sev_cmdline_entry_guid; + ht->cmdline.len = sizeof(ht->cmdline); + memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash)); + + ht->initrd.guid = sev_initrd_entry_guid; + ht->initrd.len = sizeof(ht->initrd); + memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash)); + + ht->kernel.guid = sev_kernel_entry_guid; + ht->kernel.len = sizeof(ht->kernel); + memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash)); + + /* zero the excess data so the measurement can be reliably calculated */ + memset(padded_ht->padding, 0, sizeof(padded_ht->padding)); + + if (sev_encrypt_flash((uint8_t *)padded_ht, sizeof(*padded_ht), errp) < 0) { + ret = false; + } + + address_space_unmap(&address_space_memory, padded_ht, + mapped_len, true, mapped_len); + + return ret; +} + static void sev_register_types(void) { diff --git a/target/i386/sev.h b/target/i386/sev.h new file mode 100644 index 0000000000..7b1528248a --- /dev/null +++ b/target/i386/sev.h @@ -0,0 +1,62 @@ +/* + * QEMU Secure Encrypted Virutualization (SEV) support + * + * Copyright: Advanced Micro Devices, 2016-2018 + * + * Authors: + * Brijesh Singh + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef I386_SEV_H +#define I386_SEV_H + +#ifndef CONFIG_USER_ONLY +#include CONFIG_DEVICES /* CONFIG_SEV */ +#endif + +#include "exec/confidential-guest-support.h" + +#define SEV_POLICY_NODBG 0x1 +#define SEV_POLICY_NOKS 0x2 +#define SEV_POLICY_ES 0x4 +#define SEV_POLICY_NOSEND 0x8 +#define SEV_POLICY_DOMAIN 0x10 +#define SEV_POLICY_SEV 0x20 + +typedef struct SevKernelLoaderContext { + char *setup_data; + size_t setup_size; + char *kernel_data; + size_t kernel_size; + char *initrd_data; + size_t initrd_size; + char *cmdline_data; + size_t cmdline_size; +} SevKernelLoaderContext; + +#ifdef CONFIG_SEV +bool sev_enabled(void); +bool sev_es_enabled(void); +#else +#define sev_enabled() 0 +#define sev_es_enabled() 0 +#endif + +extern uint32_t sev_get_cbit_position(void); +extern uint32_t sev_get_reduced_phys_bits(void); +extern bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp); + +int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); +int sev_inject_launch_secret(const char *hdr, const char *secret, + uint64_t gpa, Error **errp); + +int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size); +void sev_es_set_reset_vector(CPUState *cpu); + +int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +#endif diff --git a/target/i386/sev_i386.h b/target/i386/sev_i386.h deleted file mode 100644 index ae6d840478..0000000000 --- a/target/i386/sev_i386.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QEMU Secure Encrypted Virutualization (SEV) support - * - * Copyright: Advanced Micro Devices, 2016-2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_SEV_I386_H -#define QEMU_SEV_I386_H - -#include "qom/object.h" -#include "qapi/error.h" -#include "sysemu/kvm.h" -#include "sysemu/sev.h" -#include "qemu/error-report.h" -#include "qapi/qapi-types-misc-target.h" - -#define SEV_POLICY_NODBG 0x1 -#define SEV_POLICY_NOKS 0x2 -#define SEV_POLICY_ES 0x4 -#define SEV_POLICY_NOSEND 0x8 -#define SEV_POLICY_DOMAIN 0x10 -#define SEV_POLICY_SEV 0x20 - -extern bool sev_es_enabled(void); -extern uint64_t sev_get_me_mask(void); -extern SevInfo *sev_get_info(void); -extern uint32_t sev_get_cbit_position(void); -extern uint32_t sev_get_reduced_phys_bits(void); -extern char *sev_get_launch_measurement(void); -extern SevCapability *sev_get_capabilities(Error **errp); -extern SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp); - -#endif diff --git a/target/i386/svm.h b/target/i386/svm.h index adc058dc76..f9a785489d 100644 --- a/target/i386/svm.h +++ b/target/i386/svm.h @@ -9,6 +9,12 @@ #define V_IRQ_SHIFT 8 #define V_IRQ_MASK (1 << V_IRQ_SHIFT) +#define V_GIF_ENABLED_SHIFT 25 +#define V_GIF_ENABLED_MASK (1 << V_GIF_ENABLED_SHIFT) + +#define V_GIF_SHIFT 9 +#define V_GIF_MASK (1 << V_GIF_SHIFT) + #define V_INTR_PRIO_SHIFT 16 #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) @@ -18,6 +24,8 @@ #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) +#define V_VMLOAD_VMSAVE_ENABLED_MASK (1 << 1) + #define SVM_INTERRUPT_SHADOW_MASK 1 #define SVM_IOIO_STR_SHIFT 2 diff --git a/target/i386/tcg/bpt_helper.c b/target/i386/tcg/bpt_helper.c index b6c1fff16e..bc34ac27fe 100644 --- a/target/i386/tcg/bpt_helper.c +++ b/target/i386/tcg/bpt_helper.c @@ -22,7 +22,7 @@ #include "exec/helper-proto.h" #include "helper-tcg.h" -void QEMU_NORETURN helper_single_step(CPUX86State *env) +G_NORETURN void helper_single_step(CPUX86State *env) { #ifndef CONFIG_USER_ONLY check_hw_breakpoints(env, true); diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c index cc7ea9e8b9..6227dbb30b 100644 --- a/target/i386/tcg/cc_helper.c +++ b/target/i386/tcg/cc_helper.c @@ -346,44 +346,3 @@ void helper_clts(CPUX86State *env) env->cr[0] &= ~CR0_TS_MASK; env->hflags &= ~HF_TS_MASK; } - -void helper_reset_rf(CPUX86State *env) -{ - env->eflags &= ~RF_MASK; -} - -void helper_cli(CPUX86State *env) -{ - env->eflags &= ~IF_MASK; -} - -void helper_sti(CPUX86State *env) -{ - env->eflags |= IF_MASK; -} - -void helper_clac(CPUX86State *env) -{ - env->eflags &= ~AC_MASK; -} - -void helper_stac(CPUX86State *env) -{ - env->eflags |= AC_MASK; -} - -#if 0 -/* vm86plus instructions */ -void helper_cli_vm(CPUX86State *env) -{ - env->eflags &= ~VIF_MASK; -} - -void helper_sti_vm(CPUX86State *env) -{ - env->eflags |= VIF_MASK; - if (env->eflags & VIP_MASK) { - raise_exception_ra(env, EXCP0D_GPF, GETPC()); - } -} -#endif diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc new file mode 100644 index 0000000000..6099919bc5 --- /dev/null +++ b/target/i386/tcg/decode-new.c.inc @@ -0,0 +1,1858 @@ +/* + * New-style decoder for i386 instructions + * + * Copyright (c) 2022 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * The decoder is mostly based on tables copied from the Intel SDM. As + * a result, most operand load and writeback is done entirely in common + * table-driven code using the same operand type (X86_TYPE_*) and + * size (X86_SIZE_*) codes used in the manual. + * + * The main difference is that the V, U and W types are extended to + * cover MMX as well; if an instruction is like + * + * por Pq, Qq + * 66 por Vx, Hx, Wx + * + * only the second row is included and the instruction is marked as a + * valid MMX instruction. The MMX flag directs the decoder to rewrite + * the V/U/H/W types to P/N/P/Q if there is no prefix, as well as changing + * "x" to "q" if there is no prefix. + * + * In addition, the ss/ps/sd/pd types are sometimes mushed together as "x" + * if the difference is expressed via prefixes. Individual instructions + * are separated by prefix in the generator functions. + * + * There are a couple cases in which instructions (e.g. MOVD) write the + * whole XMM or MM register but are established incorrectly in the manual + * as "d" or "q". These have to be fixed for the decoder to work correctly. + */ + +#define X86_OP_NONE { 0 }, + +#define X86_OP_GROUP3(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) { \ + .decode = glue(decode_, op), \ + .op0 = glue(X86_TYPE_, op0_), \ + .s0 = glue(X86_SIZE_, s0_), \ + .op1 = glue(X86_TYPE_, op1_), \ + .s1 = glue(X86_SIZE_, s1_), \ + .op2 = glue(X86_TYPE_, op2_), \ + .s2 = glue(X86_SIZE_, s2_), \ + .is_decode = true, \ + ## __VA_ARGS__ \ +} + +#define X86_OP_GROUP2(op, op0, s0, op1, s1, ...) \ + X86_OP_GROUP3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__) +#define X86_OP_GROUP0(op, ...) \ + X86_OP_GROUP3(op, None, None, None, None, None, None, ## __VA_ARGS__) + +#define X86_OP_ENTRY3(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) { \ + .gen = glue(gen_, op), \ + .op0 = glue(X86_TYPE_, op0_), \ + .s0 = glue(X86_SIZE_, s0_), \ + .op1 = glue(X86_TYPE_, op1_), \ + .s1 = glue(X86_SIZE_, s1_), \ + .op2 = glue(X86_TYPE_, op2_), \ + .s2 = glue(X86_SIZE_, s2_), \ + ## __VA_ARGS__ \ +} + +#define X86_OP_ENTRY4(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) \ + X86_OP_ENTRY3(op, op0_, s0_, op1_, s1_, op2_, s2_, \ + .op3 = X86_TYPE_I, .s3 = X86_SIZE_b, \ + ## __VA_ARGS__) + +#define X86_OP_ENTRY2(op, op0, s0, op1, s1, ...) \ + X86_OP_ENTRY3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__) +#define X86_OP_ENTRYw(op, op0, s0, ...) \ + X86_OP_ENTRY3(op, op0, s0, None, None, None, None, ## __VA_ARGS__) +#define X86_OP_ENTRYr(op, op0, s0, ...) \ + X86_OP_ENTRY3(op, None, None, None, None, op0, s0, ## __VA_ARGS__) +#define X86_OP_ENTRY0(op, ...) \ + X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__) + +#define cpuid(feat) .cpuid = X86_FEAT_##feat, +#define i64 .special = X86_SPECIAL_i64, +#define o64 .special = X86_SPECIAL_o64, +#define xchg .special = X86_SPECIAL_Locked, +#define mmx .special = X86_SPECIAL_MMX, +#define zext0 .special = X86_SPECIAL_ZExtOp0, +#define zext2 .special = X86_SPECIAL_ZExtOp2, +#define avx_movx .special = X86_SPECIAL_AVXExtMov, + +#define vex1 .vex_class = 1, +#define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar, +#define vex2 .vex_class = 2, +#define vex2_rep3 .vex_class = 2, .vex_special = X86_VEX_REPScalar, +#define vex3 .vex_class = 3, +#define vex4 .vex_class = 4, +#define vex4_unal .vex_class = 4, .vex_special = X86_VEX_SSEUnaligned, +#define vex5 .vex_class = 5, +#define vex6 .vex_class = 6, +#define vex7 .vex_class = 7, +#define vex8 .vex_class = 8, +#define vex11 .vex_class = 11, +#define vex12 .vex_class = 12, +#define vex13 .vex_class = 13, + +#define avx2_256 .vex_special = X86_VEX_AVX2_256, + +#define P_00 1 +#define P_66 (1 << PREFIX_DATA) +#define P_F3 (1 << PREFIX_REPZ) +#define P_F2 (1 << PREFIX_REPNZ) + +#define p_00 .valid_prefix = P_00, +#define p_66 .valid_prefix = P_66, +#define p_f3 .valid_prefix = P_F3, +#define p_f2 .valid_prefix = P_F2, +#define p_00_66 .valid_prefix = P_00 | P_66, +#define p_00_f3 .valid_prefix = P_00 | P_F3, +#define p_66_f2 .valid_prefix = P_66 | P_F2, +#define p_00_66_f3 .valid_prefix = P_00 | P_66 | P_F3, +#define p_66_f3_f2 .valid_prefix = P_66 | P_F3 | P_F2, +#define p_00_66_f3_f2 .valid_prefix = P_00 | P_66 | P_F3 | P_F2, + +static uint8_t get_modrm(DisasContext *s, CPUX86State *env) +{ + if (!s->has_modrm) { + s->modrm = x86_ldub_code(env, s); + s->has_modrm = true; + } + return s->modrm; +} + +static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEntry entries[4]) +{ + if (s->prefix & PREFIX_REPNZ) { + return &entries[3]; + } else if (s->prefix & PREFIX_REPZ) { + return &entries[2]; + } else if (s->prefix & PREFIX_DATA) { + return &entries[1]; + } else { + return &entries[0]; + } +} + +static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + /* only includes ldmxcsr and stmxcsr, because they have AVX variants. */ + static const X86OpEntry group15_reg[8] = { + }; + + static const X86OpEntry group15_mem[8] = { + [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5), + [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5), + }; + + uint8_t modrm = get_modrm(s, env); + if ((modrm >> 6) == 3) { + *entry = group15_reg[(modrm >> 3) & 7]; + } else { + *entry = group15_mem[(modrm >> 3) & 7]; + } +} + +static void decode_group17(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86GenFunc group17_gen[8] = { + NULL, gen_BLSR, gen_BLSMSK, gen_BLSI, + }; + int op = (get_modrm(s, env) >> 3) & 7; + entry->gen = group17_gen[op]; +} + +static void decode_group12(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_group12[8] = { + {}, + {}, + X86_OP_ENTRY3(PSRLW_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + X86_OP_ENTRY3(PSRAW_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + X86_OP_ENTRY3(PSLLW_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + }; + + int op = (get_modrm(s, env) >> 3) & 7; + *entry = opcodes_group12[op]; +} + +static void decode_group13(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_group13[8] = { + {}, + {}, + X86_OP_ENTRY3(PSRLD_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + X86_OP_ENTRY3(PSRAD_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + X86_OP_ENTRY3(PSLLD_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + {}, + }; + + int op = (get_modrm(s, env) >> 3) & 7; + *entry = opcodes_group13[op]; +} + +static void decode_group14(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_group14[8] = { + /* grp14 */ + {}, + {}, + X86_OP_ENTRY3(PSRLQ_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + X86_OP_ENTRY3(PSRLDQ_i, H,x, U,x, I,b, vex7 avx2_256 p_66), + {}, + {}, + X86_OP_ENTRY3(PSLLQ_i, H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66), + X86_OP_ENTRY3(PSLLDQ_i, H,x, U,x, I,b, vex7 avx2_256 p_66), + }; + + int op = (get_modrm(s, env) >> 3) & 7; + *entry = opcodes_group14[op]; +} + +static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F6F[4] = { + X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex1 mmx), /* movq */ + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1), /* movdqa */ + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* movdqu */ + {}, + }; + *entry = *decode_by_prefix(s, opcodes_0F6F); +} + +static void decode_0F70(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry pshufw[4] = { + X86_OP_ENTRY3(PSHUFW, P,q, Q,q, I,b, vex4 mmx), + X86_OP_ENTRY3(PSHUFD, V,x, W,x, I,b, vex4 avx2_256), + X86_OP_ENTRY3(PSHUFHW, V,x, W,x, I,b, vex4 avx2_256), + X86_OP_ENTRY3(PSHUFLW, V,x, W,x, I,b, vex4 avx2_256), + }; + + *entry = *decode_by_prefix(s, pshufw); +} + +static void decode_0F77(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + if (!(s->prefix & PREFIX_VEX)) { + entry->gen = gen_EMMS; + } else if (!s->vex_l) { + entry->gen = gen_VZEROUPPER; + entry->vex_class = 8; + } else { + entry->gen = gen_VZEROALL; + entry->vex_class = 8; + } +} + +static void decode_0F78(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F78[4] = { + {}, + X86_OP_ENTRY3(EXTRQ_i, V,x, None,None, I,w, cpuid(SSE4A)), + {}, + X86_OP_ENTRY3(INSERTQ_i, V,x, U,x, I,w, cpuid(SSE4A)), + }; + *entry = *decode_by_prefix(s, opcodes_0F78); +} + +static void decode_0F79(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + if (s->prefix & PREFIX_REPNZ) { + entry->gen = gen_INSERTQ_r; + } else if (s->prefix & PREFIX_DATA) { + entry->gen = gen_EXTRQ_r; + } else { + entry->gen = NULL; + }; +} + +static void decode_0F7E(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F7E[4] = { + X86_OP_ENTRY3(MOVD_from, E,y, None,None, P,y, vex5 mmx), + X86_OP_ENTRY3(MOVD_from, E,y, None,None, V,y, vex5), + X86_OP_ENTRY3(MOVQ, V,x, None,None, W,q, vex5), /* wrong dest Vy on SDM! */ + {}, + }; + *entry = *decode_by_prefix(s, opcodes_0F7E); +} + +static void decode_0F7F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F7F[4] = { + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx), /* movq */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1), /* movdqa */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4_unal), /* movdqu */ + {}, + }; + *entry = *decode_by_prefix(s, opcodes_0F7F); +} + +static void decode_0FD6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry movq[4] = { + {}, + X86_OP_ENTRY3(MOVQ, W,x, None, None, V,q, vex5), + X86_OP_ENTRY3(MOVq_dq, V,dq, None, None, N,q), + X86_OP_ENTRY3(MOVq_dq, P,q, None, None, U,q), + }; + + *entry = *decode_by_prefix(s, movq); +} + +static const X86OpEntry opcodes_0F38_00toEF[240] = { + [0x00] = X86_OP_ENTRY3(PSHUFB, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x01] = X86_OP_ENTRY3(PHADDW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x02] = X86_OP_ENTRY3(PHADDD, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x03] = X86_OP_ENTRY3(PHADDSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x04] = X86_OP_ENTRY3(PMADDUBSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x05] = X86_OP_ENTRY3(PHSUBW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x06] = X86_OP_ENTRY3(PHSUBD, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x07] = X86_OP_ENTRY3(PHSUBSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + + [0x10] = X86_OP_ENTRY2(PBLENDVB, V,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,ph, vex11 cpuid(F16C) p_66), + [0x14] = X86_OP_ENTRY2(BLENDVPS, V,x, W,x, vex4 cpuid(SSE41) p_66), + [0x15] = X86_OP_ENTRY2(BLENDVPD, V,x, W,x, vex4 cpuid(SSE41) p_66), + /* Listed incorrectly as type 4 */ + [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x17] = X86_OP_ENTRY3(VPTEST, None,None, V,x, W,x, vex4 cpuid(SSE41) p_66), + + /* + * Source operand listed as Mq/Ux and similar in the manual; incorrectly listed + * as 128-bit only in 2-17. + */ + [0x20] = X86_OP_ENTRY3(VPMOVSXBW, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x21] = X86_OP_ENTRY3(VPMOVSXBD, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x22] = X86_OP_ENTRY3(VPMOVSXBQ, V,x, None,None, W,w, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x23] = X86_OP_ENTRY3(VPMOVSXWD, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x24] = X86_OP_ENTRY3(VPMOVSXWQ, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x25] = X86_OP_ENTRY3(VPMOVSXDQ, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + + /* Same as PMOVSX. */ + [0x30] = X86_OP_ENTRY3(VPMOVZXBW, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x31] = X86_OP_ENTRY3(VPMOVZXBD, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x32] = X86_OP_ENTRY3(VPMOVZXBQ, V,x, None,None, W,w, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x33] = X86_OP_ENTRY3(VPMOVZXWD, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x34] = X86_OP_ENTRY3(VPMOVZXWQ, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x35] = X86_OP_ENTRY3(VPMOVZXDQ, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), + [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x37] = X86_OP_ENTRY3(PCMPGTQ, V,x, H,x, W,x, vex4 cpuid(SSE42) avx2_256 p_66), + + [0x40] = X86_OP_ENTRY3(PMULLD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x41] = X86_OP_ENTRY3(VPHMINPOSUW, V,dq, None,None, W,dq, vex4 cpuid(SSE41) p_66), + /* Listed incorrectly as type 4 */ + [0x45] = X86_OP_ENTRY3(VPSRLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), + [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), + [0x47] = X86_OP_ENTRY3(VPSLLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), + + [0x90] = X86_OP_ENTRY3(VPGATHERD, V,x, H,x, M,d, vex12 cpuid(AVX2) p_66), /* vpgatherdd/q */ + [0x91] = X86_OP_ENTRY3(VPGATHERQ, V,x, H,x, M,q, vex12 cpuid(AVX2) p_66), /* vpgatherqd/q */ + [0x92] = X86_OP_ENTRY3(VPGATHERD, V,x, H,x, M,d, vex12 cpuid(AVX2) p_66), /* vgatherdps/d */ + [0x93] = X86_OP_ENTRY3(VPGATHERQ, V,x, H,x, M,q, vex12 cpuid(AVX2) p_66), /* vgatherqps/d */ + + /* Should be exception type 2 but they do not have legacy SSE equivalents? */ + [0x96] = X86_OP_ENTRY3(VFMADDSUB132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x97] = X86_OP_ENTRY3(VFMSUBADD132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0xa6] = X86_OP_ENTRY3(VFMADDSUB213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xa7] = X86_OP_ENTRY3(VFMSUBADD213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0xb6] = X86_OP_ENTRY3(VFMADDSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xb7] = X86_OP_ENTRY3(VFMSUBADD231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0x08] = X86_OP_ENTRY3(PSIGNB, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x09] = X86_OP_ENTRY3(PSIGNW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x0a] = X86_OP_ENTRY3(PSIGND, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x0b] = X86_OP_ENTRY3(PMULHRSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex4 cpuid(AVX) p_00_66), + [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex4 cpuid(AVX) p_66), + [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), + [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), + + [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX) p_66), /* vbroadcastss */ + [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 cpuid(AVX) p_66), /* vbroadcastsd */ + [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX) p_66), + [0x1c] = X86_OP_ENTRY3(PABSB, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x1d] = X86_OP_ENTRY3(PABSW, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + [0x1e] = X86_OP_ENTRY3(PABSD, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + + [0x28] = X86_OP_ENTRY3(PMULDQ, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x29] = X86_OP_ENTRY3(PCMPEQQ, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x2a] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex1 cpuid(SSE41) avx2_256 p_66), /* movntdqa */ + [0x2b] = X86_OP_ENTRY3(VPACKUSDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), + [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), + /* Incorrectly listed as Mx,Hx,Vx in the manual */ + [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), + [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), + + [0x38] = X86_OP_ENTRY3(PMINSB, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x39] = X86_OP_ENTRY3(PMINSD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3a] = X86_OP_ENTRY3(PMINUW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3b] = X86_OP_ENTRY3(PMINUD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3c] = X86_OP_ENTRY3(PMAXSB, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3d] = X86_OP_ENTRY3(PMAXSD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3e] = X86_OP_ENTRY3(PMAXUW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x3f] = X86_OP_ENTRY3(PMAXUD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + + [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX2) p_66), + [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 cpuid(AVX2) p_66), + [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX2) p_66), + + [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 cpuid(AVX2) p_66), + [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 cpuid(AVX2) p_66), + + [0x8c] = X86_OP_ENTRY3(VPMASKMOV, V,x, H,x, WM,x, vex6 cpuid(AVX2) p_66), + [0x8e] = X86_OP_ENTRY3(VPMASKMOV_st, M,x, V,x, H,x, vex6 cpuid(AVX2) p_66), + + /* Should be exception type 2 or 3 but they do not have legacy SSE equivalents? */ + [0x98] = X86_OP_ENTRY3(VFMADD132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x99] = X86_OP_ENTRY3(VFMADD132Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9a] = X86_OP_ENTRY3(VFMSUB132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9b] = X86_OP_ENTRY3(VFMSUB132Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9c] = X86_OP_ENTRY3(VFNMADD132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9d] = X86_OP_ENTRY3(VFNMADD132Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9e] = X86_OP_ENTRY3(VFNMSUB132Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0x9f] = X86_OP_ENTRY3(VFNMSUB132Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0xa8] = X86_OP_ENTRY3(VFMADD213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xa9] = X86_OP_ENTRY3(VFMADD213Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xaa] = X86_OP_ENTRY3(VFMSUB213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xab] = X86_OP_ENTRY3(VFMSUB213Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xac] = X86_OP_ENTRY3(VFNMADD213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xad] = X86_OP_ENTRY3(VFNMADD213Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xae] = X86_OP_ENTRY3(VFNMSUB213Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xaf] = X86_OP_ENTRY3(VFNMSUB213Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0xb8] = X86_OP_ENTRY3(VFMADD231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xb9] = X86_OP_ENTRY3(VFMADD231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xba] = X86_OP_ENTRY3(VFMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xbb] = X86_OP_ENTRY3(VFMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xbc] = X86_OP_ENTRY3(VFNMADD231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xbd] = X86_OP_ENTRY3(VFNMADD231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xbe] = X86_OP_ENTRY3(VFNMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xbf] = X86_OP_ENTRY3(VFNMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + + [0xdb] = X86_OP_ENTRY3(VAESIMC, V,dq, None,None, W,dq, vex4 cpuid(AES) p_66), + [0xdc] = X86_OP_ENTRY3(VAESENC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), + [0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), + [0xde] = X86_OP_ENTRY3(VAESDEC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), + [0xdf] = X86_OP_ENTRY3(VAESDECLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), +}; + +/* five rows for no prefix, 66, F3, F2, 66+F2 */ +static const X86OpEntry opcodes_0F38_F0toFF[16][5] = { + [0] = { + X86_OP_ENTRY3(MOVBE, G,y, M,y, None,None, cpuid(MOVBE)), + X86_OP_ENTRY3(MOVBE, G,w, M,w, None,None, cpuid(MOVBE)), + {}, + X86_OP_ENTRY2(CRC32, G,d, E,b, cpuid(SSE42)), + X86_OP_ENTRY2(CRC32, G,d, E,b, cpuid(SSE42)), + }, + [1] = { + X86_OP_ENTRY3(MOVBE, M,y, G,y, None,None, cpuid(MOVBE)), + X86_OP_ENTRY3(MOVBE, M,w, G,w, None,None, cpuid(MOVBE)), + {}, + X86_OP_ENTRY2(CRC32, G,d, E,y, cpuid(SSE42)), + X86_OP_ENTRY2(CRC32, G,d, E,w, cpuid(SSE42)), + }, + [2] = { + X86_OP_ENTRY3(ANDN, G,y, B,y, E,y, vex13 cpuid(BMI1)), + {}, + {}, + {}, + {}, + }, + [3] = { + X86_OP_GROUP3(group17, B,y, E,y, None,None, vex13 cpuid(BMI1)), + {}, + {}, + {}, + {}, + }, + [5] = { + X86_OP_ENTRY3(BZHI, G,y, E,y, B,y, vex13 cpuid(BMI1)), + {}, + X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 cpuid(BMI2)), + X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 cpuid(BMI2)), + {}, + }, + [6] = { + {}, + X86_OP_ENTRY2(ADCX, G,y, E,y, cpuid(ADX)), + X86_OP_ENTRY2(ADOX, G,y, E,y, cpuid(ADX)), + X86_OP_ENTRY3(MULX, /* B,y, */ G,y, E,y, 2,y, vex13 cpuid(BMI2)), + {}, + }, + [7] = { + X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(SHLX, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 cpuid(BMI1)), + {}, + }, +}; + +static void decode_0F38(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + *b = x86_ldub_code(env, s); + if (*b < 0xf0) { + *entry = opcodes_0F38_00toEF[*b]; + } else { + int row = 0; + if (s->prefix & PREFIX_REPZ) { + /* The REPZ (F3) prefix has priority over 66 */ + row = 2; + } else { + row += s->prefix & PREFIX_REPNZ ? 3 : 0; + row += s->prefix & PREFIX_DATA ? 1 : 0; + } + *entry = opcodes_0F38_F0toFF[*b & 15][row]; + } +} + +static void decode_VINSERTPS(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry + vinsertps_reg = X86_OP_ENTRY4(VINSERTPS_r, V,dq, H,dq, U,dq, vex5 cpuid(SSE41) p_66), + vinsertps_mem = X86_OP_ENTRY4(VINSERTPS_m, V,dq, H,dq, M,d, vex5 cpuid(SSE41) p_66); + + int modrm = get_modrm(s, env); + *entry = (modrm >> 6) == 3 ? vinsertps_reg : vinsertps_mem; +} + +static const X86OpEntry opcodes_0F3A[256] = { + /* + * These are VEX-only, but incorrectly listed in the manual as exception type 4. + * Also the "qq" instructions are sometimes omitted by Table 2-17, but are VEX256 + * only. + */ + [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), + [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), /* VPERMPD */ + [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), /* VPBLENDD */ + [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), + [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), + [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), + + [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), + [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), + [0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66), + [0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66), + [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,ph, V,x, I,b, vex11 cpuid(F16C) p_66), + + [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66), + [0x21] = X86_OP_GROUP0(VINSERTPS), + [0x22] = X86_OP_ENTRY4(PINSR, V,dq, H,dq, E,y, vex5 cpuid(SSE41) p_66), + + [0x40] = X86_OP_ENTRY4(VDDPS, V,x, H,x, W,x, vex2 cpuid(SSE41) p_66), + [0x41] = X86_OP_ENTRY4(VDDPD, V,dq, H,dq, W,dq, vex2 cpuid(SSE41) p_66), + [0x42] = X86_OP_ENTRY4(VMPSADBW, V,x, H,x, W,x, vex2 cpuid(SSE41) avx2_256 p_66), + [0x44] = X86_OP_ENTRY4(PCLMULQDQ, V,dq, H,dq, W,dq, vex4 cpuid(PCLMULQDQ) p_66), + [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + + [0x60] = X86_OP_ENTRY4(PCMPESTRM, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), + [0x61] = X86_OP_ENTRY4(PCMPESTRI, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), + [0x62] = X86_OP_ENTRY4(PCMPISTRM, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), + [0x63] = X86_OP_ENTRY4(PCMPISTRI, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), + + [0x08] = X86_OP_ENTRY3(VROUNDPS, V,x, W,x, I,b, vex2 cpuid(SSE41) p_66), + [0x09] = X86_OP_ENTRY3(VROUNDPD, V,x, W,x, I,b, vex2 cpuid(SSE41) p_66), + /* + * Not listed as four operand in the manual. Also writes and reads 128-bits + * from the first two operands due to the V operand picking higher entries of + * the H operand; the "Vss,Hss,Wss" description from the manual is incorrect. + * For other unary operations such as VSQRTSx this is hidden by the "REPScalar" + * value of vex_special, because the table lists the operand types of VSQRTPx. + */ + [0x0a] = X86_OP_ENTRY4(VROUNDSS, V,x, H,x, W,ss, vex3 cpuid(SSE41) p_66), + [0x0b] = X86_OP_ENTRY4(VROUNDSD, V,x, H,x, W,sd, vex3 cpuid(SSE41) p_66), + [0x0c] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex4 cpuid(SSE41) p_66), + [0x0d] = X86_OP_ENTRY4(VBLENDPD, V,x, H,x, W,x, vex4 cpuid(SSE41) p_66), + [0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), + [0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), + + [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), + [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX) p_66), + + [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX2) p_66), + + /* Listed incorrectly as type 4 */ + [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), + [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), + [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 cpuid(AVX) p_66 avx2_256), + + [0xdf] = X86_OP_ENTRY3(VAESKEYGEN, V,dq, W,dq, I,b, vex4 cpuid(AES) p_66), + + [0xF0] = X86_OP_ENTRY3(RORX, G,y, E,y, I,b, vex13 cpuid(BMI2) p_f2), +}; + +static void decode_0F3A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + *b = x86_ldub_code(env, s); + *entry = opcodes_0F3A[*b]; +} + +/* + * There are some mistakes in the operands in the manual, and the load/store/register + * cases are easiest to keep separate, so the entries for 10-17 follow simplicity and + * efficiency of implementation rather than copying what the manual says. + * + * In particular: + * + * 1) "VMOVSS m32, xmm1" and "VMOVSD m64, xmm1" do not support VEX.vvvv != 1111b, + * but this is not mentioned in the tables. + * + * 2) MOVHLPS, MOVHPS, MOVHPD, MOVLPD, MOVLPS read the high quadword of one of their + * operands, which must therefore be dq; MOVLPD and MOVLPS also write the high + * quadword of the V operand. + */ +static void decode_0F10(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F10_reg[4] = { + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ + X86_OP_ENTRY3(VMOVSS, V,x, H,x, W,x, vex4), + X86_OP_ENTRY3(VMOVLPx, V,x, H,x, W,x, vex4), /* MOVSD */ + }; + + static const X86OpEntry opcodes_0F10_mem[4] = { + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ + X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ + X86_OP_ENTRY3(VMOVSS_ld, V,x, H,x, M,ss, vex4), + X86_OP_ENTRY3(VMOVSD_ld, V,x, H,x, M,sd, vex4), + }; + + if ((get_modrm(s, env) >> 6) == 3) { + *entry = *decode_by_prefix(s, opcodes_0F10_reg); + } else { + *entry = *decode_by_prefix(s, opcodes_0F10_mem); + } +} + +static void decode_0F11(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F11_reg[4] = { + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ + X86_OP_ENTRY3(VMOVSS, W,x, H,x, V,x, vex4), + X86_OP_ENTRY3(VMOVLPx, W,x, H,x, V,q, vex4), /* MOVSD */ + }; + + static const X86OpEntry opcodes_0F11_mem[4] = { + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ + X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4), + X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4), /* MOVSD */ + }; + + if ((get_modrm(s, env) >> 6) == 3) { + *entry = *decode_by_prefix(s, opcodes_0F11_reg); + } else { + *entry = *decode_by_prefix(s, opcodes_0F11_mem); + } +} + +static void decode_0F12(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F12_mem[4] = { + /* + * Use dq for operand for compatibility with gen_MOVSD and + * to allow VEX128 only. + */ + X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPS */ + X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPD */ + X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), + X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, WM,q, vex4 cpuid(SSE3)), /* qq if VEX.256 */ + }; + static const X86OpEntry opcodes_0F12_reg[4] = { + X86_OP_ENTRY3(VMOVHLPS, V,dq, H,dq, U,dq, vex4), + X86_OP_ENTRY3(VMOVLPx, W,x, H,x, U,q, vex4), /* MOVLPD */ + X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), + X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), + }; + + if ((get_modrm(s, env) >> 6) == 3) { + *entry = *decode_by_prefix(s, opcodes_0F12_reg); + } else { + *entry = *decode_by_prefix(s, opcodes_0F12_mem); + if ((s->prefix & PREFIX_REPNZ) && s->vex_l) { + entry->s2 = X86_SIZE_qq; + } + } +} + +static void decode_0F16(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F16_mem[4] = { + /* + * Operand 1 technically only reads the low 64 bits, but uses dq so that + * it is easier to check for op0 == op1 in an endianness-neutral manner. + */ + X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPS */ + X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPD */ + X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), + {}, + }; + static const X86OpEntry opcodes_0F16_reg[4] = { + /* Same as above, operand 1 could be Hq if it wasn't for big-endian. */ + X86_OP_ENTRY3(VMOVLHPS, V,dq, H,dq, U,q, vex4), + X86_OP_ENTRY3(VMOVHPx, V,x, H,x, U,x, vex4), /* MOVHPD */ + X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), + {}, + }; + + if ((get_modrm(s, env) >> 6) == 3) { + *entry = *decode_by_prefix(s, opcodes_0F16_reg); + } else { + *entry = *decode_by_prefix(s, opcodes_0F16_mem); + } +} + +static void decode_0F2A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F2A[4] = { + X86_OP_ENTRY3(CVTPI2Px, V,x, None,None, Q,q), + X86_OP_ENTRY3(CVTPI2Px, V,x, None,None, Q,q), + X86_OP_ENTRY3(VCVTSI2Sx, V,x, H,x, E,y, vex3), + X86_OP_ENTRY3(VCVTSI2Sx, V,x, H,x, E,y, vex3), + }; + *entry = *decode_by_prefix(s, opcodes_0F2A); +} + +static void decode_0F2B(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F2B[4] = { + X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPS */ + X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPD */ + X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSS */ + X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSD */ + }; + + *entry = *decode_by_prefix(s, opcodes_0F2B); +} + +static void decode_0F2C(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F2C[4] = { + /* Listed as ps/pd in the manual, but CVTTPS2PI only reads 64-bit. */ + X86_OP_ENTRY3(CVTTPx2PI, P,q, None,None, W,q), + X86_OP_ENTRY3(CVTTPx2PI, P,q, None,None, W,dq), + X86_OP_ENTRY3(VCVTTSx2SI, G,y, None,None, W,ss, vex3), + X86_OP_ENTRY3(VCVTTSx2SI, G,y, None,None, W,sd, vex3), + }; + *entry = *decode_by_prefix(s, opcodes_0F2C); +} + +static void decode_0F2D(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F2D[4] = { + /* Listed as ps/pd in the manual, but CVTPS2PI only reads 64-bit. */ + X86_OP_ENTRY3(CVTPx2PI, P,q, None,None, W,q), + X86_OP_ENTRY3(CVTPx2PI, P,q, None,None, W,dq), + X86_OP_ENTRY3(VCVTSx2SI, G,y, None,None, W,ss, vex3), + X86_OP_ENTRY3(VCVTSx2SI, G,y, None,None, W,sd, vex3), + }; + *entry = *decode_by_prefix(s, opcodes_0F2D); +} + +static void decode_VxCOMISx(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + /* + * VUCOMISx and VCOMISx are different and use no-prefix and 0x66 for SS and SD + * respectively. Scalar values usually are associated with 0xF2 and 0xF3, for + * which X86_VEX_REPScalar exists, but here it has to be decoded by hand. + */ + entry->s1 = entry->s2 = (s->prefix & PREFIX_DATA ? X86_SIZE_sd : X86_SIZE_ss); + entry->gen = (*b == 0x2E ? gen_VUCOMI : gen_VCOMI); +} + +static void decode_sse_unary(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))) { + entry->op1 = X86_TYPE_None; + entry->s1 = X86_SIZE_None; + } + switch (*b) { + case 0x51: entry->gen = gen_VSQRT; break; + case 0x52: entry->gen = gen_VRSQRT; break; + case 0x53: entry->gen = gen_VRCP; break; + case 0x5A: entry->gen = gen_VCVTfp2fp; break; + } +} + +static void decode_0F5B(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0F5B[4] = { + X86_OP_ENTRY2(VCVTDQ2PS, V,x, W,x, vex2), + X86_OP_ENTRY2(VCVTPS2DQ, V,x, W,x, vex2), + X86_OP_ENTRY2(VCVTTPS2DQ, V,x, W,x, vex2), + {}, + }; + *entry = *decode_by_prefix(s, opcodes_0F5B); +} + +static void decode_0FE6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry opcodes_0FE6[4] = { + {}, + X86_OP_ENTRY2(VCVTTPD2DQ, V,x, W,x, vex2), + X86_OP_ENTRY2(VCVTDQ2PD, V,x, W,x, vex2), + X86_OP_ENTRY2(VCVTPD2DQ, V,x, W,x, vex2), + }; + *entry = *decode_by_prefix(s, opcodes_0FE6); +} + +static const X86OpEntry opcodes_0F[256] = { + [0x0E] = X86_OP_ENTRY0(EMMS, cpuid(3DNOW)), /* femms */ + /* + * 3DNow!'s opcode byte comes *after* modrm and displacements, making it + * more like an Ib operand. Dispatch to the right helper in a single gen_* + * function. + */ + [0x0F] = X86_OP_ENTRY3(3dnow, P,q, Q,q, I,b, cpuid(3DNOW)), + + [0x10] = X86_OP_GROUP0(0F10), + [0x11] = X86_OP_GROUP0(0F11), + [0x12] = X86_OP_GROUP0(0F12), + [0x13] = X86_OP_ENTRY3(VMOVLPx_st, M,q, None,None, V,q, vex4 p_00_66), + [0x14] = X86_OP_ENTRY3(VUNPCKLPx, V,x, H,x, W,x, vex4 p_00_66), + [0x15] = X86_OP_ENTRY3(VUNPCKHPx, V,x, H,x, W,x, vex4 p_00_66), + [0x16] = X86_OP_GROUP0(0F16), + /* Incorrectly listed as Mq,Vq in the manual */ + [0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex4 p_00_66), + + [0x50] = X86_OP_ENTRY3(MOVMSK, G,y, None,None, U,x, vex7 p_00_66), + [0x51] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x52] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex5 p_00_f3), + [0x53] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex5 p_00_f3), + [0x54] = X86_OP_ENTRY3(PAND, V,x, H,x, W,x, vex4 p_00_66), /* vand */ + [0x55] = X86_OP_ENTRY3(PANDN, V,x, H,x, W,x, vex4 p_00_66), /* vandn */ + [0x56] = X86_OP_ENTRY3(POR, V,x, H,x, W,x, vex4 p_00_66), /* vor */ + [0x57] = X86_OP_ENTRY3(PXOR, V,x, H,x, W,x, vex4 p_00_66), /* vxor */ + + [0x60] = X86_OP_ENTRY3(PUNPCKLBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x61] = X86_OP_ENTRY3(PUNPCKLWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x62] = X86_OP_ENTRY3(PUNPCKLDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x63] = X86_OP_ENTRY3(PACKSSWB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x64] = X86_OP_ENTRY3(PCMPGTB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x65] = X86_OP_ENTRY3(PCMPGTW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x66] = X86_OP_ENTRY3(PCMPGTD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x67] = X86_OP_ENTRY3(PACKUSWB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + + [0x70] = X86_OP_GROUP0(0F70), + [0x71] = X86_OP_GROUP0(group12), + [0x72] = X86_OP_GROUP0(group13), + [0x73] = X86_OP_GROUP0(group14), + [0x74] = X86_OP_ENTRY3(PCMPEQB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x75] = X86_OP_ENTRY3(PCMPEQW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x76] = X86_OP_ENTRY3(PCMPEQD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x77] = X86_OP_GROUP0(0F77), + + [0x28] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1 p_00_66), /* MOVAPS */ + [0x29] = X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 p_00_66), /* MOVAPS */ + [0x2A] = X86_OP_GROUP0(0F2A), + [0x2B] = X86_OP_GROUP0(0F2B), + [0x2C] = X86_OP_GROUP0(0F2C), + [0x2D] = X86_OP_GROUP0(0F2D), + [0x2E] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VUCOMISS/SD */ + [0x2F] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VCOMISS/SD */ + + [0x38] = X86_OP_GROUP0(0F38), + [0x3a] = X86_OP_GROUP0(0F3A), + + [0x58] = X86_OP_ENTRY3(VADD, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x59] = X86_OP_ENTRY3(VMUL, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x5a] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex3 p_00_66_f3_f2), + [0x5b] = X86_OP_GROUP0(0F5B), + [0x5c] = X86_OP_ENTRY3(VSUB, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x5d] = X86_OP_ENTRY3(VMIN, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x5e] = X86_OP_ENTRY3(VDIV, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x5f] = X86_OP_ENTRY3(VMAX, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + + [0x68] = X86_OP_ENTRY3(PUNPCKHBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x69] = X86_OP_ENTRY3(PUNPCKHWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x6a] = X86_OP_ENTRY3(PUNPCKHDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x6b] = X86_OP_ENTRY3(PACKSSDW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0x6c] = X86_OP_ENTRY3(PUNPCKLQDQ, V,x, H,x, W,x, vex4 p_66 avx2_256), + [0x6d] = X86_OP_ENTRY3(PUNPCKHQDQ, V,x, H,x, W,x, vex4 p_66 avx2_256), + [0x6e] = X86_OP_ENTRY3(MOVD_to, V,x, None,None, E,y, vex5 mmx p_00_66), /* wrong dest Vy on SDM! */ + [0x6f] = X86_OP_GROUP0(0F6F), + + [0x78] = X86_OP_GROUP0(0F78), + [0x79] = X86_OP_GROUP2(0F79, V,x, U,x, cpuid(SSE4A)), + [0x7c] = X86_OP_ENTRY3(VHADD, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), + [0x7d] = X86_OP_ENTRY3(VHSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), + [0x7e] = X86_OP_GROUP0(0F7E), + [0x7f] = X86_OP_GROUP0(0F7F), + + [0xae] = X86_OP_GROUP0(group15), + + [0xc2] = X86_OP_ENTRY4(VCMP, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66), + [0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66), + [0xc6] = X86_OP_ENTRY4(VSHUF, V,x, H,x, W,x, vex4 p_00_66), + + [0xd0] = X86_OP_ENTRY3(VADDSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), + [0xd1] = X86_OP_ENTRY3(PSRLW_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd2] = X86_OP_ENTRY3(PSRLD_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd3] = X86_OP_ENTRY3(PSRLQ_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd4] = X86_OP_ENTRY3(PADDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd5] = X86_OP_ENTRY3(PMULLW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd6] = X86_OP_GROUP0(0FD6), + [0xd7] = X86_OP_ENTRY3(PMOVMSKB, G,d, None,None, U,x, vex7 mmx avx2_256 p_00_66), + + [0xe0] = X86_OP_ENTRY3(PAVGB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xe1] = X86_OP_ENTRY3(PSRAW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66), + [0xe2] = X86_OP_ENTRY3(PSRAD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66), + [0xe3] = X86_OP_ENTRY3(PAVGW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xe4] = X86_OP_ENTRY3(PMULHUW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xe5] = X86_OP_ENTRY3(PMULHW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xe6] = X86_OP_GROUP0(0FE6), + [0xe7] = X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx p_00_66), /* MOVNTQ/MOVNTDQ */ + + [0xf0] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex4_unal cpuid(SSE3) p_f2), /* LDDQU */ + [0xf1] = X86_OP_ENTRY3(PSLLW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66), + [0xf2] = X86_OP_ENTRY3(PSLLD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66), + [0xf3] = X86_OP_ENTRY3(PSLLQ_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66), + [0xf4] = X86_OP_ENTRY3(PMULUDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xf5] = X86_OP_ENTRY3(PMADDWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xf6] = X86_OP_ENTRY3(PSADBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xf7] = X86_OP_ENTRY3(MASKMOV, None,None, V,dq, U,dq, vex4_unal avx2_256 mmx p_00_66), + + /* Incorrectly missing from 2-17 */ + [0xd8] = X86_OP_ENTRY3(PSUBUSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xd9] = X86_OP_ENTRY3(PSUBUSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xda] = X86_OP_ENTRY3(PMINUB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xdb] = X86_OP_ENTRY3(PAND, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xdc] = X86_OP_ENTRY3(PADDUSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xdd] = X86_OP_ENTRY3(PADDUSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xde] = X86_OP_ENTRY3(PMAXUB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xdf] = X86_OP_ENTRY3(PANDN, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + + [0xe8] = X86_OP_ENTRY3(PSUBSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xe9] = X86_OP_ENTRY3(PSUBSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xea] = X86_OP_ENTRY3(PMINSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xeb] = X86_OP_ENTRY3(POR, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xec] = X86_OP_ENTRY3(PADDSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xed] = X86_OP_ENTRY3(PADDSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xee] = X86_OP_ENTRY3(PMAXSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xef] = X86_OP_ENTRY3(PXOR, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + + [0xf8] = X86_OP_ENTRY3(PSUBB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xf9] = X86_OP_ENTRY3(PSUBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xfa] = X86_OP_ENTRY3(PSUBD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xfb] = X86_OP_ENTRY3(PSUBQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xfc] = X86_OP_ENTRY3(PADDB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xfd] = X86_OP_ENTRY3(PADDW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + [0xfe] = X86_OP_ENTRY3(PADDD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), + /* 0xff = UD0 */ +}; + +static void do_decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + *entry = opcodes_0F[*b]; +} + +static void decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + *b = x86_ldub_code(env, s); + do_decode_0F(s, env, entry, b); +} + +static const X86OpEntry opcodes_root[256] = { + [0x0F] = X86_OP_GROUP0(0F), +}; + +#undef mmx +#undef vex1 +#undef vex2 +#undef vex3 +#undef vex4 +#undef vex4_unal +#undef vex5 +#undef vex6 +#undef vex7 +#undef vex8 +#undef vex11 +#undef vex12 +#undef vex13 + +/* + * Decode the fixed part of the opcode and place the last + * in b. + */ +static void decode_root(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + *entry = opcodes_root[*b]; +} + + +static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + X86DecodedOp *op, X86OpType type) +{ + int modrm = get_modrm(s, env); + if ((modrm >> 6) == 3) { + if (s->prefix & PREFIX_LOCK) { + decode->e.gen = gen_illegal; + return 0xff; + } + op->n = (modrm & 7); + if (type != X86_TYPE_Q && type != X86_TYPE_N) { + op->n |= REX_B(s); + } + } else { + op->has_ea = true; + op->n = -1; + decode->mem = gen_lea_modrm_0(env, s, get_modrm(s, env)); + } + return modrm; +} + +static bool decode_op_size(DisasContext *s, X86OpEntry *e, X86OpSize size, MemOp *ot) +{ + switch (size) { + case X86_SIZE_b: /* byte */ + *ot = MO_8; + return true; + + case X86_SIZE_d: /* 32-bit */ + case X86_SIZE_ss: /* SSE/AVX scalar single precision */ + *ot = MO_32; + return true; + + case X86_SIZE_p: /* Far pointer, return offset size */ + case X86_SIZE_s: /* Descriptor, return offset size */ + case X86_SIZE_v: /* 16/32/64-bit, based on operand size */ + *ot = s->dflag; + return true; + + case X86_SIZE_pi: /* MMX */ + case X86_SIZE_q: /* 64-bit */ + case X86_SIZE_sd: /* SSE/AVX scalar double precision */ + *ot = MO_64; + return true; + + case X86_SIZE_w: /* 16-bit */ + *ot = MO_16; + return true; + + case X86_SIZE_y: /* 32/64-bit, based on operand size */ + *ot = s->dflag == MO_16 ? MO_32 : s->dflag; + return true; + + case X86_SIZE_z: /* 16-bit for 16-bit operand size, else 32-bit */ + *ot = s->dflag == MO_16 ? MO_16 : MO_32; + return true; + + case X86_SIZE_dq: /* SSE/AVX 128-bit */ + if (e->special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + *ot = MO_64; + return true; + } + if (s->vex_l && e->s0 != X86_SIZE_qq && e->s1 != X86_SIZE_qq) { + return false; + } + *ot = MO_128; + return true; + + case X86_SIZE_qq: /* AVX 256-bit */ + if (!s->vex_l) { + return false; + } + *ot = MO_256; + return true; + + case X86_SIZE_x: /* 128/256-bit, based on operand size */ + if (e->special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + *ot = MO_64; + return true; + } + /* fall through */ + case X86_SIZE_ps: /* SSE/AVX packed single precision */ + case X86_SIZE_pd: /* SSE/AVX packed double precision */ + *ot = s->vex_l ? MO_256 : MO_128; + return true; + + case X86_SIZE_ph: /* SSE/AVX packed half precision */ + *ot = s->vex_l ? MO_128 : MO_64; + return true; + + case X86_SIZE_d64: /* Default to 64-bit in 64-bit mode */ + *ot = CODE64(s) && s->dflag == MO_32 ? MO_64 : s->dflag; + return true; + + case X86_SIZE_f64: /* Ignore size override prefix in 64-bit mode */ + *ot = CODE64(s) ? MO_64 : s->dflag; + return true; + + default: + *ot = -1; + return true; + } +} + +static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + X86DecodedOp *op, X86OpType type, int b) +{ + int modrm; + + switch (type) { + case X86_TYPE_None: /* Implicit or absent */ + case X86_TYPE_A: /* Implicit */ + case X86_TYPE_F: /* EFLAGS/RFLAGS */ + break; + + case X86_TYPE_B: /* VEX.vvvv selects a GPR */ + op->unit = X86_OP_INT; + op->n = s->vex_v; + break; + + case X86_TYPE_C: /* REG in the modrm byte selects a control register */ + op->unit = X86_OP_CR; + goto get_reg; + + case X86_TYPE_D: /* REG in the modrm byte selects a debug register */ + op->unit = X86_OP_DR; + goto get_reg; + + case X86_TYPE_G: /* REG in the modrm byte selects a GPR */ + op->unit = X86_OP_INT; + goto get_reg; + + case X86_TYPE_S: /* reg selects a segment register */ + op->unit = X86_OP_SEG; + goto get_reg; + + case X86_TYPE_P: + op->unit = X86_OP_MMX; + goto get_reg; + + case X86_TYPE_V: /* reg in the modrm byte selects an XMM/YMM register */ + if (decode->e.special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + op->unit = X86_OP_MMX; + } else { + op->unit = X86_OP_SSE; + } + get_reg: + op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s); + break; + + case X86_TYPE_E: /* ALU modrm operand */ + op->unit = X86_OP_INT; + goto get_modrm; + + case X86_TYPE_Q: /* MMX modrm operand */ + op->unit = X86_OP_MMX; + goto get_modrm; + + case X86_TYPE_W: /* XMM/YMM modrm operand */ + if (decode->e.special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + op->unit = X86_OP_MMX; + } else { + op->unit = X86_OP_SSE; + } + goto get_modrm; + + case X86_TYPE_N: /* R/M in the modrm byte selects an MMX register */ + op->unit = X86_OP_MMX; + goto get_modrm_reg; + + case X86_TYPE_U: /* R/M in the modrm byte selects an XMM/YMM register */ + if (decode->e.special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + op->unit = X86_OP_MMX; + } else { + op->unit = X86_OP_SSE; + } + goto get_modrm_reg; + + case X86_TYPE_R: /* R/M in the modrm byte selects a register */ + op->unit = X86_OP_INT; + get_modrm_reg: + modrm = get_modrm(s, env); + if ((modrm >> 6) != 3) { + return false; + } + goto get_modrm; + + case X86_TYPE_WM: /* modrm byte selects an XMM/YMM memory operand */ + op->unit = X86_OP_SSE; + /* fall through */ + case X86_TYPE_M: /* modrm byte selects a memory operand */ + modrm = get_modrm(s, env); + if ((modrm >> 6) == 3) { + return false; + } + get_modrm: + decode_modrm(s, env, decode, op, type); + break; + + case X86_TYPE_O: /* Absolute address encoded in the instruction */ + op->unit = X86_OP_INT; + op->has_ea = true; + op->n = -1; + decode->mem = (AddressParts) { + .def_seg = R_DS, + .base = -1, + .index = -1, + .disp = insn_get_addr(env, s, s->aflag) + }; + break; + + case X86_TYPE_H: /* For AVX, VEX.vvvv selects an XMM/YMM register */ + if ((s->prefix & PREFIX_VEX)) { + op->unit = X86_OP_SSE; + op->n = s->vex_v; + break; + } + if (op == &decode->op[0]) { + /* shifts place the destination in VEX.vvvv, use modrm */ + return decode_op(s, env, decode, op, decode->e.op1, b); + } else { + return decode_op(s, env, decode, op, decode->e.op0, b); + } + + case X86_TYPE_I: /* Immediate */ + op->unit = X86_OP_IMM; + decode->immediate = insn_get_signed(env, s, op->ot); + break; + + case X86_TYPE_J: /* Relative offset for a jump */ + op->unit = X86_OP_IMM; + decode->immediate = insn_get_signed(env, s, op->ot); + decode->immediate += s->pc - s->cs_base; + if (s->dflag == MO_16) { + decode->immediate &= 0xffff; + } else if (!CODE64(s)) { + decode->immediate &= 0xffffffffu; + } + break; + + case X86_TYPE_L: /* The upper 4 bits of the immediate select a 128-bit register */ + op->n = insn_get(env, s, op->ot) >> 4; + break; + + case X86_TYPE_X: /* string source */ + op->n = -1; + decode->mem = (AddressParts) { + .def_seg = R_DS, + .base = R_ESI, + .index = -1, + }; + break; + + case X86_TYPE_Y: /* string destination */ + op->n = -1; + decode->mem = (AddressParts) { + .def_seg = R_ES, + .base = R_EDI, + .index = -1, + }; + break; + + case X86_TYPE_2op: + *op = decode->op[0]; + break; + + case X86_TYPE_LoBits: + op->n = (b & 7) | REX_B(s); + op->unit = X86_OP_INT; + break; + + case X86_TYPE_0 ... X86_TYPE_7: + op->n = type - X86_TYPE_0; + op->unit = X86_OP_INT; + break; + + case X86_TYPE_ES ... X86_TYPE_GS: + op->n = type - X86_TYPE_ES; + op->unit = X86_OP_SEG; + break; + } + + return true; +} + +static bool validate_sse_prefix(DisasContext *s, X86OpEntry *e) +{ + uint16_t sse_prefixes; + + if (!e->valid_prefix) { + return true; + } + if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { + /* In SSE instructions, 0xF3 and 0xF2 cancel 0x66. */ + s->prefix &= ~PREFIX_DATA; + } + + /* Now, either zero or one bit is set in sse_prefixes. */ + sse_prefixes = s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); + return e->valid_prefix & (1 << sse_prefixes); +} + +static bool decode_insn(DisasContext *s, CPUX86State *env, X86DecodeFunc decode_func, + X86DecodedInsn *decode) +{ + X86OpEntry *e = &decode->e; + + decode_func(s, env, e, &decode->b); + while (e->is_decode) { + e->is_decode = false; + e->decode(s, env, e, &decode->b); + } + + if (!validate_sse_prefix(s, e)) { + return false; + } + + /* First compute size of operands in order to initialize s->rip_offset. */ + if (e->op0 != X86_TYPE_None) { + if (!decode_op_size(s, e, e->s0, &decode->op[0].ot)) { + return false; + } + if (e->op0 == X86_TYPE_I) { + s->rip_offset += 1 << decode->op[0].ot; + } + } + if (e->op1 != X86_TYPE_None) { + if (!decode_op_size(s, e, e->s1, &decode->op[1].ot)) { + return false; + } + if (e->op1 == X86_TYPE_I) { + s->rip_offset += 1 << decode->op[1].ot; + } + } + if (e->op2 != X86_TYPE_None) { + if (!decode_op_size(s, e, e->s2, &decode->op[2].ot)) { + return false; + } + if (e->op2 == X86_TYPE_I) { + s->rip_offset += 1 << decode->op[2].ot; + } + } + if (e->op3 != X86_TYPE_None) { + /* + * A couple instructions actually use the extra immediate byte for an Lx + * register operand; those are handled in the gen_* functions as one off. + */ + assert(e->op3 == X86_TYPE_I && e->s3 == X86_SIZE_b); + s->rip_offset += 1; + } + + if (e->op0 != X86_TYPE_None && + !decode_op(s, env, decode, &decode->op[0], e->op0, decode->b)) { + return false; + } + + if (e->op1 != X86_TYPE_None && + !decode_op(s, env, decode, &decode->op[1], e->op1, decode->b)) { + return false; + } + + if (e->op2 != X86_TYPE_None && + !decode_op(s, env, decode, &decode->op[2], e->op2, decode->b)) { + return false; + } + + if (e->op3 != X86_TYPE_None) { + decode->immediate = insn_get_signed(env, s, MO_8); + } + + return true; +} + +static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) +{ + switch (cpuid) { + case X86_FEAT_None: + return true; + case X86_FEAT_F16C: + return (s->cpuid_ext_features & CPUID_EXT_F16C); + case X86_FEAT_FMA: + return (s->cpuid_ext_features & CPUID_EXT_FMA); + case X86_FEAT_MOVBE: + return (s->cpuid_ext_features & CPUID_EXT_MOVBE); + case X86_FEAT_PCLMULQDQ: + return (s->cpuid_ext_features & CPUID_EXT_PCLMULQDQ); + case X86_FEAT_SSE: + return (s->cpuid_ext_features & CPUID_SSE); + case X86_FEAT_SSE2: + return (s->cpuid_ext_features & CPUID_SSE2); + case X86_FEAT_SSE3: + return (s->cpuid_ext_features & CPUID_EXT_SSE3); + case X86_FEAT_SSSE3: + return (s->cpuid_ext_features & CPUID_EXT_SSSE3); + case X86_FEAT_SSE41: + return (s->cpuid_ext_features & CPUID_EXT_SSE41); + case X86_FEAT_SSE42: + return (s->cpuid_ext_features & CPUID_EXT_SSE42); + case X86_FEAT_AES: + if (!(s->cpuid_ext_features & CPUID_EXT_AES)) { + return false; + } else if (!(s->prefix & PREFIX_VEX)) { + return true; + } else if (!(s->cpuid_ext_features & CPUID_EXT_AVX)) { + return false; + } else { + return !s->vex_l || (s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_VAES); + } + + case X86_FEAT_AVX: + return (s->cpuid_ext_features & CPUID_EXT_AVX); + + case X86_FEAT_3DNOW: + return (s->cpuid_ext2_features & CPUID_EXT2_3DNOW); + case X86_FEAT_SSE4A: + return (s->cpuid_ext3_features & CPUID_EXT3_SSE4A); + + case X86_FEAT_ADX: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX); + case X86_FEAT_BMI1: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1); + case X86_FEAT_BMI2: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2); + case X86_FEAT_AVX2: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2); + } + g_assert_not_reached(); +} + +static bool validate_vex(DisasContext *s, X86DecodedInsn *decode) +{ + X86OpEntry *e = &decode->e; + + switch (e->vex_special) { + case X86_VEX_REPScalar: + /* + * Instructions which differ between 00/66 and F2/F3 in the + * exception classification and the size of the memory operand. + */ + assert(e->vex_class == 1 || e->vex_class == 2); + if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { + e->vex_class = 3; + if (s->vex_l) { + goto illegal; + } + assert(decode->e.s2 == X86_SIZE_x); + if (decode->op[2].has_ea) { + decode->op[2].ot = s->prefix & PREFIX_REPZ ? MO_32 : MO_64; + } + } + break; + + case X86_VEX_SSEUnaligned: + /* handled in sse_needs_alignment. */ + break; + + case X86_VEX_AVX2_256: + if ((s->prefix & PREFIX_VEX) && s->vex_l && !has_cpuid_feature(s, X86_FEAT_AVX2)) { + goto illegal; + } + } + + /* TODO: instructions that require VEX.W=0 (Table 2-16) */ + + switch (e->vex_class) { + case 0: + if (s->prefix & PREFIX_VEX) { + goto illegal; + } + return true; + case 1: + case 2: + case 3: + case 4: + case 5: + case 7: + if (s->prefix & PREFIX_VEX) { + if (!(s->flags & HF_AVX_EN_MASK)) { + goto illegal; + } + } else if (e->special != X86_SPECIAL_MMX || + (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) { + if (!(s->flags & HF_OSFXSR_MASK)) { + goto illegal; + } + } + break; + case 12: + /* Must have a VSIB byte and no address prefix. */ + assert(s->has_modrm); + if ((s->modrm & 7) != 4 || s->aflag == MO_16) { + goto illegal; + } + + /* Check no overlap between registers. */ + if (!decode->op[0].has_ea && + (decode->op[0].n == decode->mem.index || decode->op[0].n == decode->op[1].n)) { + goto illegal; + } + assert(!decode->op[1].has_ea); + if (decode->op[1].n == decode->mem.index) { + goto illegal; + } + if (!decode->op[2].has_ea && + (decode->op[2].n == decode->mem.index || decode->op[2].n == decode->op[1].n)) { + goto illegal; + } + /* fall through */ + case 6: + case 11: + if (!(s->prefix & PREFIX_VEX)) { + goto illegal; + } + if (!(s->flags & HF_AVX_EN_MASK)) { + goto illegal; + } + break; + case 8: + /* Non-VEX case handled in decode_0F77. */ + assert(s->prefix & PREFIX_VEX); + if (!(s->flags & HF_AVX_EN_MASK)) { + goto illegal; + } + break; + case 13: + if (!(s->prefix & PREFIX_VEX)) { + goto illegal; + } + if (s->vex_l) { + goto illegal; + } + /* All integer instructions use VEX.vvvv, so exit. */ + return true; + } + + if (s->vex_v != 0 && + e->op0 != X86_TYPE_H && e->op0 != X86_TYPE_B && + e->op1 != X86_TYPE_H && e->op1 != X86_TYPE_B && + e->op2 != X86_TYPE_H && e->op2 != X86_TYPE_B) { + goto illegal; + } + + if (s->flags & HF_TS_MASK) { + goto nm_exception; + } + if (s->flags & HF_EM_MASK) { + goto illegal; + } + return true; + +nm_exception: + gen_NM_exception(s); + return false; +illegal: + gen_illegal_opcode(s); + return false; +} + +static void decode_temp_free(X86DecodedOp *op) +{ + if (op->v_ptr) { + tcg_temp_free_ptr(op->v_ptr); + } +} + +static void decode_temps_free(X86DecodedInsn *decode) +{ + decode_temp_free(&decode->op[0]); + decode_temp_free(&decode->op[1]); + decode_temp_free(&decode->op[2]); +} + +/* + * Convert one instruction. s->base.is_jmp is set if the translation must + * be stopped. + */ +static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) +{ + CPUX86State *env = cpu->env_ptr; + bool first = true; + X86DecodedInsn decode; + X86DecodeFunc decode_func = decode_root; + + s->has_modrm = false; + + next_byte: + if (first) { + first = false; + } else { + b = x86_ldub_code(env, s); + } + /* Collect prefixes. */ + switch (b) { + case 0xf3: + s->prefix |= PREFIX_REPZ; + s->prefix &= ~PREFIX_REPNZ; + goto next_byte; + case 0xf2: + s->prefix |= PREFIX_REPNZ; + s->prefix &= ~PREFIX_REPZ; + goto next_byte; + case 0xf0: + s->prefix |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + s->prefix |= PREFIX_DATA; + goto next_byte; + case 0x67: + s->prefix |= PREFIX_ADR; + goto next_byte; +#ifdef TARGET_X86_64 + case 0x40 ... 0x4f: + if (CODE64(s)) { + /* REX prefix */ + s->prefix |= PREFIX_REX; + s->vex_w = (b >> 3) & 1; + s->rex_r = (b & 0x4) << 1; + s->rex_x = (b & 0x2) << 2; + s->rex_b = (b & 0x1) << 3; + goto next_byte; + } + break; +#endif + case 0xc5: /* 2-byte VEX */ + case 0xc4: /* 3-byte VEX */ + /* + * VEX prefixes cannot be used except in 32-bit mode. + * Otherwise the instruction is LES or LDS. + */ + if (CODE32(s) && !VM86(s)) { + static const int pp_prefix[4] = { + 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ + }; + int vex3, vex2 = x86_ldub_code(env, s); + + if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { + /* + * 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, + * otherwise the instruction is LES or LDS. + */ + s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ + break; + } + + /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ + if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ + | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) { + goto illegal_op; + } +#ifdef TARGET_X86_64 + s->rex_r = (~vex2 >> 4) & 8; +#endif + if (b == 0xc5) { + /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */ + vex3 = vex2; + decode_func = decode_0F; + } else { + /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */ + vex3 = x86_ldub_code(env, s); +#ifdef TARGET_X86_64 + s->rex_x = (~vex2 >> 3) & 8; + s->rex_b = (~vex2 >> 2) & 8; +#endif + s->vex_w = (vex3 >> 7) & 1; + switch (vex2 & 0x1f) { + case 0x01: /* Implied 0f leading opcode bytes. */ + decode_func = decode_0F; + break; + case 0x02: /* Implied 0f 38 leading opcode bytes. */ + decode_func = decode_0F38; + break; + case 0x03: /* Implied 0f 3a leading opcode bytes. */ + decode_func = decode_0F3A; + break; + default: /* Reserved for future use. */ + goto unknown_op; + } + } + s->vex_v = (~vex3 >> 3) & 0xf; + s->vex_l = (vex3 >> 2) & 1; + s->prefix |= pp_prefix[vex3 & 3] | PREFIX_VEX; + } + break; + default: + if (b >= 0x100) { + b -= 0x100; + decode_func = do_decode_0F; + } + break; + } + + /* Post-process prefixes. */ + if (CODE64(s)) { + /* + * In 64-bit mode, the default data size is 32-bit. Select 64-bit + * data with rex_w, and 16-bit data with 0x66; rex_w takes precedence + * over 0x66 if both are present. + */ + s->dflag = (REX_W(s) ? MO_64 : s->prefix & PREFIX_DATA ? MO_16 : MO_32); + /* In 64-bit mode, 0x67 selects 32-bit addressing. */ + s->aflag = (s->prefix & PREFIX_ADR ? MO_32 : MO_64); + } else { + /* In 16/32-bit mode, 0x66 selects the opposite data size. */ + if (CODE32(s) ^ ((s->prefix & PREFIX_DATA) != 0)) { + s->dflag = MO_32; + } else { + s->dflag = MO_16; + } + /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ + if (CODE32(s) ^ ((s->prefix & PREFIX_ADR) != 0)) { + s->aflag = MO_32; + } else { + s->aflag = MO_16; + } + } + + memset(&decode, 0, sizeof(decode)); + decode.b = b; + if (!decode_insn(s, env, decode_func, &decode)) { + goto illegal_op; + } + if (!decode.e.gen) { + goto unknown_op; + } + + if (!has_cpuid_feature(s, decode.e.cpuid)) { + goto illegal_op; + } + + switch (decode.e.special) { + case X86_SPECIAL_None: + break; + + case X86_SPECIAL_Locked: + if (decode.op[0].has_ea) { + s->prefix |= PREFIX_LOCK; + } + break; + + case X86_SPECIAL_ProtMode: + if (!PE(s) || VM86(s)) { + goto illegal_op; + } + break; + + case X86_SPECIAL_i64: + if (CODE64(s)) { + goto illegal_op; + } + break; + case X86_SPECIAL_o64: + if (!CODE64(s)) { + goto illegal_op; + } + break; + + case X86_SPECIAL_ZExtOp0: + assert(decode.op[0].unit == X86_OP_INT); + if (!decode.op[0].has_ea) { + decode.op[0].ot = MO_32; + } + break; + + case X86_SPECIAL_ZExtOp2: + assert(decode.op[2].unit == X86_OP_INT); + if (!decode.op[2].has_ea) { + decode.op[2].ot = MO_32; + } + break; + + case X86_SPECIAL_AVXExtMov: + if (!decode.op[2].has_ea) { + decode.op[2].ot = s->vex_l ? MO_256 : MO_128; + } else if (s->vex_l) { + decode.op[2].ot++; + } + break; + + default: + break; + } + + if (!validate_vex(s, &decode)) { + return; + } + + if (decode.e.special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) { + gen_enter_mmx(s); + } + + if (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea) { + gen_load_ea(s, &decode.mem, decode.e.vex_class == 12); + } + if (s->prefix & PREFIX_LOCK) { + if (decode.op[0].unit != X86_OP_INT || !decode.op[0].has_ea) { + goto illegal_op; + } + gen_load(s, &decode, 2, s->T1); + decode.e.gen(s, env, &decode); + } else { + if (decode.op[0].unit == X86_OP_MMX) { + compute_mmx_offset(&decode.op[0]); + } else if (decode.op[0].unit == X86_OP_SSE) { + compute_xmm_offset(&decode.op[0]); + } + gen_load(s, &decode, 1, s->T0); + gen_load(s, &decode, 2, s->T1); + decode.e.gen(s, env, &decode); + gen_writeback(s, &decode, 0, s->T0); + } + decode_temps_free(&decode); + return; + illegal_op: + gen_illegal_opcode(s); + return; + unknown_op: + gen_unknown_opcode(env, s); +} diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h new file mode 100644 index 0000000000..cb6b8bcf67 --- /dev/null +++ b/target/i386/tcg/decode-new.h @@ -0,0 +1,252 @@ +/* + * Decode table flags, mostly based on Intel SDM. + * + * Copyright (c) 2022 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +typedef enum X86OpType { + X86_TYPE_None, + + X86_TYPE_A, /* Implicit */ + X86_TYPE_B, /* VEX.vvvv selects a GPR */ + X86_TYPE_C, /* REG in the modrm byte selects a control register */ + X86_TYPE_D, /* REG in the modrm byte selects a debug register */ + X86_TYPE_E, /* ALU modrm operand */ + X86_TYPE_F, /* EFLAGS/RFLAGS */ + X86_TYPE_G, /* REG in the modrm byte selects a GPR */ + X86_TYPE_H, /* For AVX, VEX.vvvv selects an XMM/YMM register */ + X86_TYPE_I, /* Immediate */ + X86_TYPE_J, /* Relative offset for a jump */ + X86_TYPE_L, /* The upper 4 bits of the immediate select a 128-bit register */ + X86_TYPE_M, /* modrm byte selects a memory operand */ + X86_TYPE_N, /* R/M in the modrm byte selects an MMX register */ + X86_TYPE_O, /* Absolute address encoded in the instruction */ + X86_TYPE_P, /* reg in the modrm byte selects an MMX register */ + X86_TYPE_Q, /* MMX modrm operand */ + X86_TYPE_R, /* R/M in the modrm byte selects a register */ + X86_TYPE_S, /* reg selects a segment register */ + X86_TYPE_U, /* R/M in the modrm byte selects an XMM/YMM register */ + X86_TYPE_V, /* reg in the modrm byte selects an XMM/YMM register */ + X86_TYPE_W, /* XMM/YMM modrm operand */ + X86_TYPE_X, /* string source */ + X86_TYPE_Y, /* string destination */ + + /* Custom */ + X86_TYPE_WM, /* modrm byte selects an XMM/YMM memory operand */ + X86_TYPE_2op, /* 2-operand RMW instruction */ + X86_TYPE_LoBits, /* encoded in bits 0-2 of the operand + REX.B */ + X86_TYPE_0, /* Hard-coded GPRs (RAX..RDI) */ + X86_TYPE_1, + X86_TYPE_2, + X86_TYPE_3, + X86_TYPE_4, + X86_TYPE_5, + X86_TYPE_6, + X86_TYPE_7, + X86_TYPE_ES, /* Hard-coded segment registers */ + X86_TYPE_CS, + X86_TYPE_SS, + X86_TYPE_DS, + X86_TYPE_FS, + X86_TYPE_GS, +} X86OpType; + +typedef enum X86OpSize { + X86_SIZE_None, + + X86_SIZE_a, /* BOUND operand */ + X86_SIZE_b, /* byte */ + X86_SIZE_d, /* 32-bit */ + X86_SIZE_dq, /* SSE/AVX 128-bit */ + X86_SIZE_p, /* Far pointer */ + X86_SIZE_pd, /* SSE/AVX packed double precision */ + X86_SIZE_pi, /* MMX */ + X86_SIZE_ps, /* SSE/AVX packed single precision */ + X86_SIZE_q, /* 64-bit */ + X86_SIZE_qq, /* AVX 256-bit */ + X86_SIZE_s, /* Descriptor */ + X86_SIZE_sd, /* SSE/AVX scalar double precision */ + X86_SIZE_ss, /* SSE/AVX scalar single precision */ + X86_SIZE_si, /* 32-bit GPR */ + X86_SIZE_v, /* 16/32/64-bit, based on operand size */ + X86_SIZE_w, /* 16-bit */ + X86_SIZE_x, /* 128/256-bit, based on operand size */ + X86_SIZE_y, /* 32/64-bit, based on operand size */ + X86_SIZE_z, /* 16-bit for 16-bit operand size, else 32-bit */ + + /* Custom */ + X86_SIZE_d64, + X86_SIZE_f64, + X86_SIZE_ph, /* SSE/AVX packed half precision */ +} X86OpSize; + +typedef enum X86CPUIDFeature { + X86_FEAT_None, + X86_FEAT_3DNOW, + X86_FEAT_ADX, + X86_FEAT_AES, + X86_FEAT_AVX, + X86_FEAT_AVX2, + X86_FEAT_BMI1, + X86_FEAT_BMI2, + X86_FEAT_F16C, + X86_FEAT_FMA, + X86_FEAT_MOVBE, + X86_FEAT_PCLMULQDQ, + X86_FEAT_SSE, + X86_FEAT_SSE2, + X86_FEAT_SSE3, + X86_FEAT_SSSE3, + X86_FEAT_SSE41, + X86_FEAT_SSE42, + X86_FEAT_SSE4A, +} X86CPUIDFeature; + +/* Execution flags */ + +typedef enum X86OpUnit { + X86_OP_SKIP, /* not valid or managed by emission function */ + X86_OP_SEG, /* segment selector */ + X86_OP_CR, /* control register */ + X86_OP_DR, /* debug register */ + X86_OP_INT, /* loaded into/stored from s->T0/T1 */ + X86_OP_IMM, /* immediate */ + X86_OP_SSE, /* address in either s->ptrX or s->A0 depending on has_ea */ + X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */ +} X86OpUnit; + +typedef enum X86InsnSpecial { + X86_SPECIAL_None, + + /* Always locked if it has a memory operand (XCHG) */ + X86_SPECIAL_Locked, + + /* Fault outside protected mode */ + X86_SPECIAL_ProtMode, + + /* + * Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw + * in the manual. + */ + X86_SPECIAL_ZExtOp0, + X86_SPECIAL_ZExtOp2, + + /* + * Register operand 2 is extended to full width, while a memory operand + * is doubled in size if VEX.L=1. + */ + X86_SPECIAL_AVXExtMov, + + /* + * MMX instruction exists with no prefix; if there is no prefix, V/H/W/U operands + * become P/P/Q/N, and size "x" becomes "q". + */ + X86_SPECIAL_MMX, + + /* Illegal or exclusive to 64-bit mode */ + X86_SPECIAL_i64, + X86_SPECIAL_o64, +} X86InsnSpecial; + +/* + * Special cases for instructions that operate on XMM/YMM registers. Intel + * retconned all of them to have VEX exception classes other than 0 and 13, so + * all these only matter for instructions that have a VEX exception class. + * Based on tables in the "AVX and SSE Instruction Exception Specification" + * section of the manual. + */ +typedef enum X86VEXSpecial { + /* Legacy SSE instructions that allow unaligned operands */ + X86_VEX_SSEUnaligned, + + /* + * Used for instructions that distinguish the XMM operand type with an + * instruction prefix; legacy SSE encodings will allow unaligned operands + * for scalar operands only (identified by a REP prefix). In this case, + * the decoding table uses "x" for the vector operands instead of specifying + * pd/ps/sd/ss individually. + */ + X86_VEX_REPScalar, + + /* + * VEX instructions that only support 256-bit operands with AVX2 (Table 2-17 + * column 3). Columns 2 and 4 (instructions limited to 256- and 127-bit + * operands respectively) are implicit in the presence of dq and qq + * operands, and thus handled by decode_op_size. + */ + X86_VEX_AVX2_256, +} X86VEXSpecial; + + +typedef struct X86OpEntry X86OpEntry; +typedef struct X86DecodedInsn X86DecodedInsn; + +/* Decode function for multibyte opcodes. */ +typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b); + +/* Code generation function. */ +typedef void (*X86GenFunc)(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode); + +struct X86OpEntry { + /* Based on the is_decode flags. */ + union { + X86GenFunc gen; + X86DecodeFunc decode; + }; + /* op0 is always written, op1 and op2 are always read. */ + X86OpType op0:8; + X86OpSize s0:8; + X86OpType op1:8; + X86OpSize s1:8; + X86OpType op2:8; + X86OpSize s2:8; + /* Must be I and b respectively if present. */ + X86OpType op3:8; + X86OpSize s3:8; + + X86InsnSpecial special:8; + X86CPUIDFeature cpuid:8; + unsigned vex_class:8; + X86VEXSpecial vex_special:8; + uint16_t valid_prefix:16; + bool is_decode:1; +}; + +typedef struct X86DecodedOp { + int8_t n; + MemOp ot; /* For b/c/d/p/s/q/v/w/y/z */ + X86OpUnit unit; + bool has_ea; + int offset; /* For MMX and SSE */ + + /* + * This field is used internally by macros OP0_PTR/OP1_PTR/OP2_PTR, + * do not access directly! + */ + TCGv_ptr v_ptr; +} X86DecodedOp; + +struct X86DecodedInsn { + X86OpEntry e; + X86DecodedOp op[3]; + target_ulong immediate; + AddressParts mem; + + uint8_t b; +}; + diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc new file mode 100644 index 0000000000..b1f9494aaa --- /dev/null +++ b/target/i386/tcg/emit.c.inc @@ -0,0 +1,2305 @@ +/* + * New-style TCG opcode generator for i386 instructions + * + * Copyright (c) 2022 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#define ZMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg]) + +typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); +typedef void (*SSEFunc_0_eppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c); +typedef void (*SSEFunc_0_epppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c, TCGv_ptr reg_d); +typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_i32 val); +typedef void (*SSEFunc_0_epppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c, TCGv_i32 val); +typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); +typedef void (*SSEFunc_0_pppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_ptr reg_c, + TCGv_i32 val); +typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv val); +typedef void (*SSEFunc_0_epppti)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c, TCGv a0, TCGv_i32 scale); +typedef void (*SSEFunc_0_eppppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 flags); +typedef void (*SSEFunc_0_eppppii)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 even, + TCGv_i32 odd); + +static inline TCGv_i32 tcg_constant8u_i32(uint8_t val) +{ + return tcg_constant_i32(val); +} + +static void gen_NM_exception(DisasContext *s) +{ + gen_exception(s, EXCP07_PREX); +} + +static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_illegal_opcode(s); +} + +static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib) +{ + TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib); + gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override); +} + +static inline int mmx_offset(MemOp ot) +{ + switch (ot) { + case MO_8: + return offsetof(MMXReg, MMX_B(0)); + case MO_16: + return offsetof(MMXReg, MMX_W(0)); + case MO_32: + return offsetof(MMXReg, MMX_L(0)); + case MO_64: + return offsetof(MMXReg, MMX_Q(0)); + default: + g_assert_not_reached(); + } +} + +static inline int xmm_offset(MemOp ot) +{ + switch (ot) { + case MO_8: + return offsetof(ZMMReg, ZMM_B(0)); + case MO_16: + return offsetof(ZMMReg, ZMM_W(0)); + case MO_32: + return offsetof(ZMMReg, ZMM_L(0)); + case MO_64: + return offsetof(ZMMReg, ZMM_Q(0)); + case MO_128: + return offsetof(ZMMReg, ZMM_X(0)); + case MO_256: + return offsetof(ZMMReg, ZMM_Y(0)); + default: + g_assert_not_reached(); + } +} + +static int vector_reg_offset(X86DecodedOp *op) +{ + assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE); + + if (op->unit == X86_OP_MMX) { + return op->offset - mmx_offset(op->ot); + } else { + return op->offset - xmm_offset(op->ot); + } +} + +static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n) +{ + int base_ofs = vector_reg_offset(op); + switch(ot) { + case MO_8: + if (op->unit == X86_OP_MMX) { + return base_ofs + offsetof(MMXReg, MMX_B(n)); + } else { + return base_ofs + offsetof(ZMMReg, ZMM_B(n)); + } + case MO_16: + if (op->unit == X86_OP_MMX) { + return base_ofs + offsetof(MMXReg, MMX_W(n)); + } else { + return base_ofs + offsetof(ZMMReg, ZMM_W(n)); + } + case MO_32: + if (op->unit == X86_OP_MMX) { + return base_ofs + offsetof(MMXReg, MMX_L(n)); + } else { + return base_ofs + offsetof(ZMMReg, ZMM_L(n)); + } + case MO_64: + if (op->unit == X86_OP_MMX) { + return base_ofs; + } else { + return base_ofs + offsetof(ZMMReg, ZMM_Q(n)); + } + case MO_128: + assert(op->unit == X86_OP_SSE); + return base_ofs + offsetof(ZMMReg, ZMM_X(n)); + case MO_256: + assert(op->unit == X86_OP_SSE); + return base_ofs + offsetof(ZMMReg, ZMM_Y(n)); + default: + g_assert_not_reached(); + } +} + +static void compute_mmx_offset(X86DecodedOp *op) +{ + if (!op->has_ea) { + op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot); + } else { + op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot); + } +} + +static void compute_xmm_offset(X86DecodedOp *op) +{ + if (!op->has_ea) { + op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot); + } else { + op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot); + } +} + +static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned) +{ + switch(ot) { + case MO_8: + gen_op_ld_v(s, MO_8, temp, s->A0); + tcg_gen_st8_tl(temp, cpu_env, dest_ofs); + break; + case MO_16: + gen_op_ld_v(s, MO_16, temp, s->A0); + tcg_gen_st16_tl(temp, cpu_env, dest_ofs); + break; + case MO_32: + gen_op_ld_v(s, MO_32, temp, s->A0); + tcg_gen_st32_tl(temp, cpu_env, dest_ofs); + break; + case MO_64: + gen_ldq_env_A0(s, dest_ofs); + break; + case MO_128: + gen_ldo_env_A0(s, dest_ofs, aligned); + break; + case MO_256: + gen_ldy_env_A0(s, dest_ofs, aligned); + break; + default: + g_assert_not_reached(); + } +} + +static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot) +{ + switch (decode->e.vex_class) { + case 2: + case 4: + if ((s->prefix & PREFIX_VEX) || + decode->e.vex_special == X86_VEX_SSEUnaligned) { + /* MOST legacy SSE instructions require aligned memory operands, but not all. */ + return false; + } + /* fall through */ + case 1: + return ot >= MO_128; + + default: + return false; + } +} + +static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v) +{ + X86DecodedOp *op = &decode->op[opn]; + + switch (op->unit) { + case X86_OP_SKIP: + return; + case X86_OP_SEG: + tcg_gen_ld32u_tl(v, cpu_env, + offsetof(CPUX86State,segs[op->n].selector)); + break; + case X86_OP_CR: + tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n])); + break; + case X86_OP_DR: + tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n])); + break; + case X86_OP_INT: + if (op->has_ea) { + gen_op_ld_v(s, op->ot, v, s->A0); + } else { + gen_op_mov_v_reg(s, op->ot, v, op->n); + } + break; + case X86_OP_IMM: + tcg_gen_movi_tl(v, decode->immediate); + break; + + case X86_OP_MMX: + compute_mmx_offset(op); + goto load_vector; + + case X86_OP_SSE: + compute_xmm_offset(op); + load_vector: + if (op->has_ea) { + bool aligned = sse_needs_alignment(s, decode, op->ot); + gen_load_sse(s, v, op->ot, op->offset, aligned); + } + break; + + default: + g_assert_not_reached(); + } +} + +static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn) +{ + X86DecodedOp *op = &decode->op[opn]; + if (op->v_ptr) { + return op->v_ptr; + } + op->v_ptr = tcg_temp_new_ptr(); + + /* The temporary points to the MMXReg or ZMMReg. */ + tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op)); + return op->v_ptr; +} + +#define OP_PTR0 op_ptr(decode, 0) +#define OP_PTR1 op_ptr(decode, 1) +#define OP_PTR2 op_ptr(decode, 2) + +static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v) +{ + X86DecodedOp *op = &decode->op[opn]; + switch (op->unit) { + case X86_OP_SKIP: + break; + case X86_OP_SEG: + /* Note that gen_movl_seg_T0 takes care of interrupt shadow and TF. */ + gen_movl_seg_T0(s, op->n); + break; + case X86_OP_INT: + if (op->has_ea) { + gen_op_st_v(s, op->ot, v, s->A0); + } else { + gen_op_mov_reg_v(s, op->ot, op->n, v); + } + break; + case X86_OP_MMX: + break; + case X86_OP_SSE: + if (!op->has_ea && (s->prefix & PREFIX_VEX) && op->ot <= MO_128) { + tcg_gen_gvec_dup_imm(MO_64, + offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)), + 16, 16, 0); + } + break; + case X86_OP_CR: + case X86_OP_DR: + default: + g_assert_not_reached(); + } +} + +static inline int vector_len(DisasContext *s, X86DecodedInsn *decode) +{ + if (decode->e.special == X86_SPECIAL_MMX && + !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { + return 8; + } + return s->vex_l ? 32 : 16; +} + +static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs) +{ + MemOp ot = decode->op[0].ot; + int vec_len = vector_len(s, decode); + bool aligned = sse_needs_alignment(s, decode, ot); + + if (!decode->op[0].has_ea) { + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len); + return; + } + + switch (ot) { + case MO_64: + gen_stq_env_A0(s, src_ofs); + break; + case MO_128: + gen_sto_env_A0(s, src_ofs, aligned); + break; + case MO_256: + gen_sty_env_A0(s, src_ofs, aligned); + break; + default: + g_assert_not_reached(); + } +} + +static void gen_helper_pavgusb(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b) +{ + gen_helper_pavgb_mmx(env, reg_a, reg_a, reg_b); +} + +#define FN_3DNOW_MOVE ((SSEFunc_0_epp) (uintptr_t) 1) +static const SSEFunc_0_epp fns_3dnow[] = { + [0x0c] = gen_helper_pi2fw, + [0x0d] = gen_helper_pi2fd, + [0x1c] = gen_helper_pf2iw, + [0x1d] = gen_helper_pf2id, + [0x8a] = gen_helper_pfnacc, + [0x8e] = gen_helper_pfpnacc, + [0x90] = gen_helper_pfcmpge, + [0x94] = gen_helper_pfmin, + [0x96] = gen_helper_pfrcp, + [0x97] = gen_helper_pfrsqrt, + [0x9a] = gen_helper_pfsub, + [0x9e] = gen_helper_pfadd, + [0xa0] = gen_helper_pfcmpgt, + [0xa4] = gen_helper_pfmax, + [0xa6] = FN_3DNOW_MOVE, /* PFRCPIT1; no need to actually increase precision */ + [0xa7] = FN_3DNOW_MOVE, /* PFRSQIT1 */ + [0xb6] = FN_3DNOW_MOVE, /* PFRCPIT2 */ + [0xaa] = gen_helper_pfsubr, + [0xae] = gen_helper_pfacc, + [0xb0] = gen_helper_pfcmpeq, + [0xb4] = gen_helper_pfmul, + [0xb7] = gen_helper_pmulhrw_mmx, + [0xbb] = gen_helper_pswapd, + [0xbf] = gen_helper_pavgusb, +}; + +static void gen_3dnow(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + uint8_t b = decode->immediate; + SSEFunc_0_epp fn = b < ARRAY_SIZE(fns_3dnow) ? fns_3dnow[b] : NULL; + + if (!fn) { + gen_illegal_opcode(s); + return; + } + if (s->flags & HF_TS_MASK) { + gen_NM_exception(s); + return; + } + if (s->flags & HF_EM_MASK) { + gen_illegal_opcode(s); + return; + } + + gen_enter_mmx(s); + if (fn == FN_3DNOW_MOVE) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset); + } else { + fn(cpu_env, OP_PTR0, OP_PTR1); + } +} + +/* + * 00 = v*ps Vps, Hps, Wpd + * 66 = v*pd Vpd, Hpd, Wps + * f3 = v*ss Vss, Hss, Wps + * f2 = v*sd Vsd, Hsd, Wps + */ +static inline void gen_unary_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_epp pd_xmm, SSEFunc_0_epp ps_xmm, + SSEFunc_0_epp pd_ymm, SSEFunc_0_epp ps_ymm, + SSEFunc_0_eppp sd, SSEFunc_0_eppp ss) +{ + if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) { + SSEFunc_0_eppp fn = s->prefix & PREFIX_REPZ ? ss : sd; + if (!fn) { + gen_illegal_opcode(s); + return; + } + fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } else { + SSEFunc_0_epp ps, pd, fn; + ps = s->vex_l ? ps_ymm : ps_xmm; + pd = s->vex_l ? pd_ymm : pd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + if (!fn) { + gen_illegal_opcode(s); + return; + } + fn(cpu_env, OP_PTR0, OP_PTR2); + } +} +#define UNARY_FP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_unary_fp_sse(s, env, decode, \ + gen_helper_##lname##pd_xmm, \ + gen_helper_##lname##ps_xmm, \ + gen_helper_##lname##pd_ymm, \ + gen_helper_##lname##ps_ymm, \ + gen_helper_##lname##sd, \ + gen_helper_##lname##ss); \ +} +UNARY_FP_SSE(VSQRT, sqrt) + +/* + * 00 = v*ps Vps, Hps, Wpd + * 66 = v*pd Vpd, Hpd, Wps + * f3 = v*ss Vss, Hss, Wps + * f2 = v*sd Vsd, Hsd, Wps + */ +static inline void gen_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm, + SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm, + SSEFunc_0_eppp sd, SSEFunc_0_eppp ss) +{ + SSEFunc_0_eppp ps, pd, fn; + if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) { + fn = s->prefix & PREFIX_REPZ ? ss : sd; + } else { + ps = s->vex_l ? ps_ymm : ps_xmm; + pd = s->vex_l ? pd_ymm : pd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + } + if (fn) { + fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } else { + gen_illegal_opcode(s); + } +} + +#define FP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_fp_sse(s, env, decode, \ + gen_helper_##lname##pd_xmm, \ + gen_helper_##lname##ps_xmm, \ + gen_helper_##lname##pd_ymm, \ + gen_helper_##lname##ps_ymm, \ + gen_helper_##lname##sd, \ + gen_helper_##lname##ss); \ +} +FP_SSE(VADD, add) +FP_SSE(VMUL, mul) +FP_SSE(VSUB, sub) +FP_SSE(VMIN, min) +FP_SSE(VDIV, div) +FP_SSE(VMAX, max) + +#define FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, even, odd) \ +static void gen_##uname##Px(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + SSEFunc_0_eppppii xmm = s->vex_w ? gen_helper_fma4pd_xmm : gen_helper_fma4ps_xmm; \ + SSEFunc_0_eppppii ymm = s->vex_w ? gen_helper_fma4pd_ymm : gen_helper_fma4ps_ymm; \ + SSEFunc_0_eppppii fn = s->vex_l ? ymm : xmm; \ + \ + fn(cpu_env, OP_PTR0, ptr0, ptr1, ptr2, \ + tcg_constant_i32(even), \ + tcg_constant_i32((even) ^ (odd))); \ +} + +#define FMA_SSE(uname, ptr0, ptr1, ptr2, flags) \ +FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, flags, flags) \ +static void gen_##uname##Sx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + SSEFunc_0_eppppi fn = s->vex_w ? gen_helper_fma4sd : gen_helper_fma4ss; \ + \ + fn(cpu_env, OP_PTR0, ptr0, ptr1, ptr2, \ + tcg_constant_i32(flags)); \ +} \ + +FMA_SSE(VFMADD231, OP_PTR1, OP_PTR2, OP_PTR0, 0) +FMA_SSE(VFMADD213, OP_PTR1, OP_PTR0, OP_PTR2, 0) +FMA_SSE(VFMADD132, OP_PTR0, OP_PTR2, OP_PTR1, 0) + +FMA_SSE(VFNMADD231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_product) +FMA_SSE(VFNMADD213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_product) +FMA_SSE(VFNMADD132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_product) + +FMA_SSE(VFMSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c) +FMA_SSE(VFMSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c) +FMA_SSE(VFMSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c) + +FMA_SSE(VFNMSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c|float_muladd_negate_product) +FMA_SSE(VFNMSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c|float_muladd_negate_product) +FMA_SSE(VFNMSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c|float_muladd_negate_product) + +FMA_SSE_PACKED(VFMADDSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c, 0) +FMA_SSE_PACKED(VFMADDSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c, 0) +FMA_SSE_PACKED(VFMADDSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c, 0) + +FMA_SSE_PACKED(VFMSUBADD231, OP_PTR1, OP_PTR2, OP_PTR0, 0, float_muladd_negate_c) +FMA_SSE_PACKED(VFMSUBADD213, OP_PTR1, OP_PTR0, OP_PTR2, 0, float_muladd_negate_c) +FMA_SSE_PACKED(VFMSUBADD132, OP_PTR0, OP_PTR2, OP_PTR1, 0, float_muladd_negate_c) + +#define FP_UNPACK_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + /* PS maps to the DQ integer instruction, PD maps to QDQ. */ \ + gen_fp_sse(s, env, decode, \ + gen_helper_##lname##qdq_xmm, \ + gen_helper_##lname##dq_xmm, \ + gen_helper_##lname##qdq_ymm, \ + gen_helper_##lname##dq_ymm, \ + NULL, NULL); \ +} +FP_UNPACK_SSE(VUNPCKLPx, punpckl) +FP_UNPACK_SSE(VUNPCKHPx, punpckh) + +/* + * 00 = v*ps Vps, Wpd + * f3 = v*ss Vss, Wps + */ +static inline void gen_unary_fp32_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_epp ps_xmm, + SSEFunc_0_epp ps_ymm, + SSEFunc_0_eppp ss) +{ + if ((s->prefix & (PREFIX_DATA | PREFIX_REPNZ)) != 0) { + goto illegal_op; + } else if (s->prefix & PREFIX_REPZ) { + if (!ss) { + goto illegal_op; + } + ss(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } else { + SSEFunc_0_epp fn = s->vex_l ? ps_ymm : ps_xmm; + if (!fn) { + goto illegal_op; + } + fn(cpu_env, OP_PTR0, OP_PTR2); + } + return; + +illegal_op: + gen_illegal_opcode(s); +} +#define UNARY_FP32_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_unary_fp32_sse(s, env, decode, \ + gen_helper_##lname##ps_xmm, \ + gen_helper_##lname##ps_ymm, \ + gen_helper_##lname##ss); \ +} +UNARY_FP32_SSE(VRSQRT, rsqrt) +UNARY_FP32_SSE(VRCP, rcp) + +/* + * 66 = v*pd Vpd, Hpd, Wpd + * f2 = v*ps Vps, Hps, Wps + */ +static inline void gen_horizontal_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm, + SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm) +{ + SSEFunc_0_eppp ps, pd, fn; + ps = s->vex_l ? ps_ymm : ps_xmm; + pd = s->vex_l ? pd_ymm : pd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); +} +#define HORIZONTAL_FP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_horizontal_fp_sse(s, env, decode, \ + gen_helper_##lname##pd_xmm, gen_helper_##lname##ps_xmm, \ + gen_helper_##lname##pd_ymm, gen_helper_##lname##ps_ymm); \ +} +HORIZONTAL_FP_SSE(VHADD, hadd) +HORIZONTAL_FP_SSE(VHSUB, hsub) +HORIZONTAL_FP_SSE(VADDSUB, addsub) + +static inline void gen_ternary_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + int op3, SSEFunc_0_epppp xmm, SSEFunc_0_epppp ymm) +{ + SSEFunc_0_epppp fn = s->vex_l ? ymm : xmm; + TCGv_ptr ptr3 = tcg_temp_new_ptr(); + + /* The format of the fourth input is Lx */ + tcg_gen_addi_ptr(ptr3, cpu_env, ZMM_OFFSET(op3)); + fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3); + tcg_temp_free_ptr(ptr3); +} +#define TERNARY_SSE(uname, uvname, lname) \ +static void gen_##uvname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_ternary_sse(s, env, decode, (uint8_t)decode->immediate >> 4, \ + gen_helper_##lname##_xmm, gen_helper_##lname##_ymm); \ +} \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_ternary_sse(s, env, decode, 0, \ + gen_helper_##lname##_xmm, gen_helper_##lname##_ymm); \ +} +TERNARY_SSE(BLENDVPS, VBLENDVPS, blendvps) +TERNARY_SSE(BLENDVPD, VBLENDVPD, blendvpd) +TERNARY_SSE(PBLENDVB, VPBLENDVB, pblendvb) + +static inline void gen_binary_imm_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_epppi xmm, SSEFunc_0_epppi ymm) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + if (!s->vex_l) { + xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); + } else { + ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); + } +} + +#define BINARY_IMM_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_binary_imm_sse(s, env, decode, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} + +BINARY_IMM_SSE(VBLENDPD, blendpd) +BINARY_IMM_SSE(VBLENDPS, blendps) +BINARY_IMM_SSE(VPBLENDW, pblendw) +BINARY_IMM_SSE(VDDPS, dpps) +#define gen_helper_dppd_ymm NULL +BINARY_IMM_SSE(VDDPD, dppd) +BINARY_IMM_SSE(VMPSADBW, mpsadbw) +BINARY_IMM_SSE(PCLMULQDQ, pclmulqdq) + + +#define UNARY_INT_GVEC(uname, func, ...) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + int vec_len = vector_len(s, decode); \ + \ + func(__VA_ARGS__, decode->op[0].offset, \ + decode->op[2].offset, vec_len, vec_len); \ +} +UNARY_INT_GVEC(PABSB, tcg_gen_gvec_abs, MO_8) +UNARY_INT_GVEC(PABSW, tcg_gen_gvec_abs, MO_16) +UNARY_INT_GVEC(PABSD, tcg_gen_gvec_abs, MO_32) +UNARY_INT_GVEC(VBROADCASTx128, tcg_gen_gvec_dup_mem, MO_128) +UNARY_INT_GVEC(VPBROADCASTB, tcg_gen_gvec_dup_mem, MO_8) +UNARY_INT_GVEC(VPBROADCASTW, tcg_gen_gvec_dup_mem, MO_16) +UNARY_INT_GVEC(VPBROADCASTD, tcg_gen_gvec_dup_mem, MO_32) +UNARY_INT_GVEC(VPBROADCASTQ, tcg_gen_gvec_dup_mem, MO_64) + + +#define BINARY_INT_GVEC(uname, func, ...) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + int vec_len = vector_len(s, decode); \ + \ + func(__VA_ARGS__, \ + decode->op[0].offset, decode->op[1].offset, \ + decode->op[2].offset, vec_len, vec_len); \ +} + +BINARY_INT_GVEC(PADDB, tcg_gen_gvec_add, MO_8) +BINARY_INT_GVEC(PADDW, tcg_gen_gvec_add, MO_16) +BINARY_INT_GVEC(PADDD, tcg_gen_gvec_add, MO_32) +BINARY_INT_GVEC(PADDQ, tcg_gen_gvec_add, MO_64) +BINARY_INT_GVEC(PADDSB, tcg_gen_gvec_ssadd, MO_8) +BINARY_INT_GVEC(PADDSW, tcg_gen_gvec_ssadd, MO_16) +BINARY_INT_GVEC(PADDUSB, tcg_gen_gvec_usadd, MO_8) +BINARY_INT_GVEC(PADDUSW, tcg_gen_gvec_usadd, MO_16) +BINARY_INT_GVEC(PAND, tcg_gen_gvec_and, MO_64) +BINARY_INT_GVEC(PCMPEQB, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_8) +BINARY_INT_GVEC(PCMPEQD, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_32) +BINARY_INT_GVEC(PCMPEQW, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_16) +BINARY_INT_GVEC(PCMPEQQ, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_64) +BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8) +BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16) +BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32) +BINARY_INT_GVEC(PCMPGTQ, tcg_gen_gvec_cmp, TCG_COND_GT, MO_64) +BINARY_INT_GVEC(PMAXSB, tcg_gen_gvec_smax, MO_8) +BINARY_INT_GVEC(PMAXSW, tcg_gen_gvec_smax, MO_16) +BINARY_INT_GVEC(PMAXSD, tcg_gen_gvec_smax, MO_32) +BINARY_INT_GVEC(PMAXUB, tcg_gen_gvec_umax, MO_8) +BINARY_INT_GVEC(PMAXUW, tcg_gen_gvec_umax, MO_16) +BINARY_INT_GVEC(PMAXUD, tcg_gen_gvec_umax, MO_32) +BINARY_INT_GVEC(PMINSB, tcg_gen_gvec_smin, MO_8) +BINARY_INT_GVEC(PMINSW, tcg_gen_gvec_smin, MO_16) +BINARY_INT_GVEC(PMINSD, tcg_gen_gvec_smin, MO_32) +BINARY_INT_GVEC(PMINUB, tcg_gen_gvec_umin, MO_8) +BINARY_INT_GVEC(PMINUW, tcg_gen_gvec_umin, MO_16) +BINARY_INT_GVEC(PMINUD, tcg_gen_gvec_umin, MO_32) +BINARY_INT_GVEC(PMULLW, tcg_gen_gvec_mul, MO_16) +BINARY_INT_GVEC(PMULLD, tcg_gen_gvec_mul, MO_32) +BINARY_INT_GVEC(POR, tcg_gen_gvec_or, MO_64) +BINARY_INT_GVEC(PSUBB, tcg_gen_gvec_sub, MO_8) +BINARY_INT_GVEC(PSUBW, tcg_gen_gvec_sub, MO_16) +BINARY_INT_GVEC(PSUBD, tcg_gen_gvec_sub, MO_32) +BINARY_INT_GVEC(PSUBQ, tcg_gen_gvec_sub, MO_64) +BINARY_INT_GVEC(PSUBSB, tcg_gen_gvec_sssub, MO_8) +BINARY_INT_GVEC(PSUBSW, tcg_gen_gvec_sssub, MO_16) +BINARY_INT_GVEC(PSUBUSB, tcg_gen_gvec_ussub, MO_8) +BINARY_INT_GVEC(PSUBUSW, tcg_gen_gvec_ussub, MO_16) +BINARY_INT_GVEC(PXOR, tcg_gen_gvec_xor, MO_64) + + +/* + * 00 = p* Pq, Qq (if mmx not NULL; no VEX) + * 66 = vp* Vx, Hx, Wx + * + * These are really the same encoding, because 1) V is the same as P when VEX.V + * is not present 2) P and Q are the same as H and W apart from MM/XMM + */ +static inline void gen_binary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm) +{ + assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX)); + + if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) { + /* VEX encoding is not applicable to MMX instructions. */ + gen_illegal_opcode(s); + return; + } + if (!(s->prefix & PREFIX_DATA)) { + mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } else if (!s->vex_l) { + xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } else { + ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); + } +} + + +#define BINARY_INT_MMX(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_binary_int_sse(s, env, decode, \ + gen_helper_##lname##_mmx, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} +BINARY_INT_MMX(PUNPCKLBW, punpcklbw) +BINARY_INT_MMX(PUNPCKLWD, punpcklwd) +BINARY_INT_MMX(PUNPCKLDQ, punpckldq) +BINARY_INT_MMX(PACKSSWB, packsswb) +BINARY_INT_MMX(PACKUSWB, packuswb) +BINARY_INT_MMX(PUNPCKHBW, punpckhbw) +BINARY_INT_MMX(PUNPCKHWD, punpckhwd) +BINARY_INT_MMX(PUNPCKHDQ, punpckhdq) +BINARY_INT_MMX(PACKSSDW, packssdw) + +BINARY_INT_MMX(PAVGB, pavgb) +BINARY_INT_MMX(PAVGW, pavgw) +BINARY_INT_MMX(PMADDWD, pmaddwd) +BINARY_INT_MMX(PMULHUW, pmulhuw) +BINARY_INT_MMX(PMULHW, pmulhw) +BINARY_INT_MMX(PMULUDQ, pmuludq) +BINARY_INT_MMX(PSADBW, psadbw) + +BINARY_INT_MMX(PSLLW_r, psllw) +BINARY_INT_MMX(PSLLD_r, pslld) +BINARY_INT_MMX(PSLLQ_r, psllq) +BINARY_INT_MMX(PSRLW_r, psrlw) +BINARY_INT_MMX(PSRLD_r, psrld) +BINARY_INT_MMX(PSRLQ_r, psrlq) +BINARY_INT_MMX(PSRAW_r, psraw) +BINARY_INT_MMX(PSRAD_r, psrad) + +BINARY_INT_MMX(PHADDW, phaddw) +BINARY_INT_MMX(PHADDSW, phaddsw) +BINARY_INT_MMX(PHADDD, phaddd) +BINARY_INT_MMX(PHSUBW, phsubw) +BINARY_INT_MMX(PHSUBSW, phsubsw) +BINARY_INT_MMX(PHSUBD, phsubd) +BINARY_INT_MMX(PMADDUBSW, pmaddubsw) +BINARY_INT_MMX(PSHUFB, pshufb) +BINARY_INT_MMX(PSIGNB, psignb) +BINARY_INT_MMX(PSIGNW, psignw) +BINARY_INT_MMX(PSIGND, psignd) +BINARY_INT_MMX(PMULHRSW, pmulhrsw) + +/* Instructions with no MMX equivalent. */ +#define BINARY_INT_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_binary_int_sse(s, env, decode, \ + NULL, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} + +/* Instructions with no MMX equivalent. */ +BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq) +BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq) +BINARY_INT_SSE(VPACKUSDW, packusdw) +BINARY_INT_SSE(VPERMILPS, vpermilps) +BINARY_INT_SSE(VPERMILPD, vpermilpd) +BINARY_INT_SSE(VMASKMOVPS, vpmaskmovd) +BINARY_INT_SSE(VMASKMOVPD, vpmaskmovq) + +BINARY_INT_SSE(PMULDQ, pmuldq) + +BINARY_INT_SSE(VAESDEC, aesdec) +BINARY_INT_SSE(VAESDECLAST, aesdeclast) +BINARY_INT_SSE(VAESENC, aesenc) +BINARY_INT_SSE(VAESENCLAST, aesenclast) + +#define UNARY_CMP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + if (!s->vex_l) { \ + gen_helper_##lname##_xmm(cpu_env, OP_PTR1, OP_PTR2); \ + } else { \ + gen_helper_##lname##_ymm(cpu_env, OP_PTR1, OP_PTR2); \ + } \ + set_cc_op(s, CC_OP_EFLAGS); \ +} +UNARY_CMP_SSE(VPTEST, ptest) +UNARY_CMP_SSE(VTESTPS, vtestps) +UNARY_CMP_SSE(VTESTPD, vtestpd) + +static inline void gen_unary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_epp xmm, SSEFunc_0_epp ymm) +{ + if (!s->vex_l) { + xmm(cpu_env, OP_PTR0, OP_PTR2); + } else { + ymm(cpu_env, OP_PTR0, OP_PTR2); + } +} + +#define UNARY_INT_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_unary_int_sse(s, env, decode, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} + +UNARY_INT_SSE(VPMOVSXBW, pmovsxbw) +UNARY_INT_SSE(VPMOVSXBD, pmovsxbd) +UNARY_INT_SSE(VPMOVSXBQ, pmovsxbq) +UNARY_INT_SSE(VPMOVSXWD, pmovsxwd) +UNARY_INT_SSE(VPMOVSXWQ, pmovsxwq) +UNARY_INT_SSE(VPMOVSXDQ, pmovsxdq) + +UNARY_INT_SSE(VPMOVZXBW, pmovzxbw) +UNARY_INT_SSE(VPMOVZXBD, pmovzxbd) +UNARY_INT_SSE(VPMOVZXBQ, pmovzxbq) +UNARY_INT_SSE(VPMOVZXWD, pmovzxwd) +UNARY_INT_SSE(VPMOVZXWQ, pmovzxwq) +UNARY_INT_SSE(VPMOVZXDQ, pmovzxdq) + +UNARY_INT_SSE(VMOVSLDUP, pmovsldup) +UNARY_INT_SSE(VMOVSHDUP, pmovshdup) +UNARY_INT_SSE(VMOVDDUP, pmovdldup) + +UNARY_INT_SSE(VCVTDQ2PD, cvtdq2pd) +UNARY_INT_SSE(VCVTPD2DQ, cvtpd2dq) +UNARY_INT_SSE(VCVTTPD2DQ, cvttpd2dq) +UNARY_INT_SSE(VCVTDQ2PS, cvtdq2ps) +UNARY_INT_SSE(VCVTPS2DQ, cvtps2dq) +UNARY_INT_SSE(VCVTTPS2DQ, cvttps2dq) +UNARY_INT_SSE(VCVTPH2PS, cvtph2ps) + + +static inline void gen_unary_imm_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_ppi xmm, SSEFunc_0_ppi ymm) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + if (!s->vex_l) { + xmm(OP_PTR0, OP_PTR1, imm); + } else { + ymm(OP_PTR0, OP_PTR1, imm); + } +} + +#define UNARY_IMM_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_unary_imm_sse(s, env, decode, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} + +UNARY_IMM_SSE(PSHUFD, pshufd) +UNARY_IMM_SSE(PSHUFHW, pshufhw) +UNARY_IMM_SSE(PSHUFLW, pshuflw) +#define gen_helper_vpermq_xmm NULL +UNARY_IMM_SSE(VPERMQ, vpermq) +UNARY_IMM_SSE(VPERMILPS_i, vpermilps_imm) +UNARY_IMM_SSE(VPERMILPD_i, vpermilpd_imm) + +static inline void gen_unary_imm_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppi xmm, SSEFunc_0_eppi ymm) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + if (!s->vex_l) { + xmm(cpu_env, OP_PTR0, OP_PTR1, imm); + } else { + ymm(cpu_env, OP_PTR0, OP_PTR1, imm); + } +} + +#define UNARY_IMM_FP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_unary_imm_fp_sse(s, env, decode, \ + gen_helper_##lname##_xmm, \ + gen_helper_##lname##_ymm); \ +} + +UNARY_IMM_FP_SSE(VROUNDPS, roundps) +UNARY_IMM_FP_SSE(VROUNDPD, roundpd) + +static inline void gen_vexw_avx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppp d_xmm, SSEFunc_0_eppp q_xmm, + SSEFunc_0_eppp d_ymm, SSEFunc_0_eppp q_ymm) +{ + SSEFunc_0_eppp d = s->vex_l ? d_ymm : d_xmm; + SSEFunc_0_eppp q = s->vex_l ? q_ymm : q_xmm; + SSEFunc_0_eppp fn = s->vex_w ? q : d; + fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); +} + +/* VEX.W affects whether to operate on 32- or 64-bit elements. */ +#define VEXW_AVX(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_vexw_avx(s, env, decode, \ + gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm, \ + gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm); \ +} +VEXW_AVX(VPSLLV, vpsllv) +VEXW_AVX(VPSRLV, vpsrlv) +VEXW_AVX(VPSRAV, vpsrav) +VEXW_AVX(VPMASKMOV, vpmaskmov) + +/* Same as above, but with extra arguments to the helper. */ +static inline void gen_vsib_avx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_epppti d_xmm, SSEFunc_0_epppti q_xmm, + SSEFunc_0_epppti d_ymm, SSEFunc_0_epppti q_ymm) +{ + SSEFunc_0_epppti d = s->vex_l ? d_ymm : d_xmm; + SSEFunc_0_epppti q = s->vex_l ? q_ymm : q_xmm; + SSEFunc_0_epppti fn = s->vex_w ? q : d; + TCGv_i32 scale = tcg_constant_i32(decode->mem.scale); + TCGv_ptr index = tcg_temp_new_ptr(); + + /* Pass third input as (index, base, scale) */ + tcg_gen_addi_ptr(index, cpu_env, ZMM_OFFSET(decode->mem.index)); + fn(cpu_env, OP_PTR0, OP_PTR1, index, s->A0, scale); + + /* + * There are two output operands, so zero OP1's high 128 bits + * in the VEX.128 case. + */ + if (!s->vex_l) { + int ymmh_ofs = vector_elem_offset(&decode->op[1], MO_128, 1); + tcg_gen_gvec_dup_imm(MO_64, ymmh_ofs, 16, 16, 0); + } + tcg_temp_free_ptr(index); +} +#define VSIB_AVX(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_vsib_avx(s, env, decode, \ + gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm, \ + gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm); \ +} +VSIB_AVX(VPGATHERD, vpgatherd) +VSIB_AVX(VPGATHERQ, vpgatherq) + +static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) +{ + int opposite_cc_op; + TCGv carry_in = NULL; + TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2); + TCGv zero; + + if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) { + /* Re-use the carry-out from a previous round. */ + carry_in = carry_out; + } else { + /* We don't have a carry-in, get it out of EFLAGS. */ + if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { + gen_compute_eflags(s); + } + carry_in = s->tmp0; + tcg_gen_extract_tl(carry_in, cpu_cc_src, + ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1); + } + + switch (ot) { +#ifdef TARGET_X86_64 + case MO_32: + /* If TL is 64-bit just do everything in 64-bit arithmetic. */ + tcg_gen_add_i64(s->T0, s->T0, s->T1); + tcg_gen_add_i64(s->T0, s->T0, carry_in); + tcg_gen_shri_i64(carry_out, s->T0, 32); + break; +#endif + default: + zero = tcg_constant_tl(0); + tcg_gen_add2_tl(s->T0, carry_out, s->T0, zero, carry_in, zero); + tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero); + break; + } + + opposite_cc_op = cc_op == CC_OP_ADCX ? CC_OP_ADOX : CC_OP_ADCX; + if (s->cc_op == CC_OP_ADCOX || s->cc_op == opposite_cc_op) { + /* Merge with the carry-out from the opposite instruction. */ + set_cc_op(s, CC_OP_ADCOX); + } else { + set_cc_op(s, cc_op); + } +} + +static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADCX); +} + +static void gen_ADOX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADOX); +} + +static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + tcg_gen_andc_tl(s->T0, s->T1, s->T0); + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); +} + +static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + TCGv zero = tcg_constant_tl(0); + TCGv mone = tcg_constant_tl(-1); + + /* + * Extract START, and shift the operand. + * Shifts larger than operand size get zeros. + */ + tcg_gen_ext8u_tl(s->A0, s->T1); + if (TARGET_LONG_BITS == 64 && ot == MO_32) { + tcg_gen_ext32u_tl(s->T0, s->T0); + } + tcg_gen_shr_tl(s->T0, s->T0, s->A0); + + tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero); + + /* + * Extract the LEN into an inverse mask. Lengths larger than + * operand size get all zeros, length 0 gets all ones. + */ + tcg_gen_extract_tl(s->A0, s->T1, 8, 8); + tcg_gen_shl_tl(s->T1, mone, s->A0); + tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero); + tcg_gen_andc_tl(s->T0, s->T0, s->T1); + + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); +} + +static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + tcg_gen_mov_tl(cpu_cc_src, s->T0); + tcg_gen_neg_tl(s->T1, s->T0); + tcg_gen_and_tl(s->T0, s->T0, s->T1); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + set_cc_op(s, CC_OP_BMILGB + ot); +} + +static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + tcg_gen_mov_tl(cpu_cc_src, s->T0); + tcg_gen_subi_tl(s->T1, s->T0, 1); + tcg_gen_xor_tl(s->T0, s->T0, s->T1); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + set_cc_op(s, CC_OP_BMILGB + ot); +} + +static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + tcg_gen_mov_tl(cpu_cc_src, s->T0); + tcg_gen_subi_tl(s->T1, s->T0, 1); + tcg_gen_and_tl(s->T0, s->T0, s->T1); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + set_cc_op(s, CC_OP_BMILGB + ot); +} + +static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + TCGv zero = tcg_constant_tl(0); + TCGv mone = tcg_constant_tl(-1); + + tcg_gen_ext8u_tl(s->T1, s->T1); + + /* + * Note that since we're using BMILG (in order to get O + * cleared) we need to store the inverse into C. + */ + tcg_gen_setcond_tl(TCG_COND_LEU, cpu_cc_src, s->T1, bound); + + tcg_gen_shl_tl(s->A0, mone, s->T1); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); + tcg_gen_andc_tl(s->T0, s->T0, s->A0); + + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_BMILGB + ot); +} + +static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[2].ot; + + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot)); +} + +static void gen_CVTPI2Px(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_enter_mmx(s); + if (s->prefix & PREFIX_DATA) { + gen_helper_cvtpi2pd(cpu_env, OP_PTR0, OP_PTR2); + } else { + gen_helper_cvtpi2ps(cpu_env, OP_PTR0, OP_PTR2); + } +} + +static void gen_CVTPx2PI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_enter_mmx(s); + if (s->prefix & PREFIX_DATA) { + gen_helper_cvtpd2pi(cpu_env, OP_PTR0, OP_PTR2); + } else { + gen_helper_cvtps2pi(cpu_env, OP_PTR0, OP_PTR2); + } +} + +static void gen_CVTTPx2PI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_enter_mmx(s); + if (s->prefix & PREFIX_DATA) { + gen_helper_cvttpd2pi(cpu_env, OP_PTR0, OP_PTR2); + } else { + gen_helper_cvttps2pi(cpu_env, OP_PTR0, OP_PTR2); + } +} + +static void gen_EMMS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_emms(cpu_env); +} + +static void gen_EXTRQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 length = tcg_constant_i32(decode->immediate & 63); + TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63); + + gen_helper_extrq_i(cpu_env, OP_PTR0, index, length); +} + +static void gen_EXTRQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_extrq_r(cpu_env, OP_PTR0, OP_PTR2); +} + +static void gen_INSERTQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 length = tcg_constant_i32(decode->immediate & 63); + TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63); + + gen_helper_insertq_i(cpu_env, OP_PTR0, OP_PTR1, index, length); +} + +static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_insertq_r(cpu_env, OP_PTR0, OP_PTR2); +} + +static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + if (s->vex_l) { + gen_illegal_opcode(s); + return; + } + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1); + gen_helper_ldmxcsr(cpu_env, s->tmp2_i32); +} + +static void gen_MASKMOV(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]); + gen_extu(s->aflag, s->A0); + gen_add_A0_ds_seg(s); + + if (s->prefix & PREFIX_DATA) { + gen_helper_maskmov_xmm(cpu_env, OP_PTR1, OP_PTR2, s->A0); + } else { + gen_helper_maskmov_mmx(cpu_env, OP_PTR1, OP_PTR2, s->A0); + } +} + +static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + /* M operand type does not load/store */ + if (decode->e.op0 == X86_TYPE_M) { + tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE); + } else { + tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE); + } +} + +static void gen_MOVD_from(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[2].ot; + + switch (ot) { + case MO_32: +#ifdef TARGET_X86_64 + tcg_gen_ld32u_tl(s->T0, cpu_env, decode->op[2].offset); + break; + case MO_64: +#endif + tcg_gen_ld_tl(s->T0, cpu_env, decode->op[2].offset); + break; + default: + abort(); + } +} + +static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[2].ot; + int vec_len = vector_len(s, decode); + int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0); + + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + + switch (ot) { + case MO_32: +#ifdef TARGET_X86_64 + tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs); + break; + case MO_64: +#endif + tcg_gen_st_tl(s->T1, cpu_env, lo_ofs); + break; + default: + g_assert_not_reached(); + } +} + +static void gen_MOVDQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_store_sse(s, decode, decode->op[2].offset); +} + +static void gen_MOVMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn; + ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm; + pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + fn(s->tmp2_i32, cpu_env, OP_PTR2); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); +} + +static void gen_MOVQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + int lo_ofs = vector_elem_offset(&decode->op[0], MO_64, 0); + + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset); + if (decode->op[0].has_ea) { + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); + } else { + /* + * tcg_gen_gvec_dup_i64(MO_64, op0.offset, 8, vec_len, s->tmp1_64) would + * seem to work, but it does not on big-endian platforms; the cleared parts + * are always at higher addresses, but cross-endian emulation inverts the + * byte order so that the cleared parts need to be at *lower* addresses. + * Because oprsz is 8, we see this here even for SSE; but more in general, + * it disqualifies using oprsz < maxsz to emulate VEX128. + */ + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, lo_ofs); + } +} + +static void gen_MOVq_dq(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_enter_mmx(s); + /* Otherwise the same as any other movq. */ + return gen_MOVQ(s, env, decode); +} + +static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + + /* low part of result in VEX.vvvv, high in MODRM */ + switch (ot) { + default: + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); + tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1); + break; +#endif + } + +} + +static void gen_PALIGNR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + if (!(s->prefix & PREFIX_DATA)) { + gen_helper_palignr_mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); + } else if (!s->vex_l) { + gen_helper_palignr_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); + } else { + gen_helper_palignr_ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); + } +} + +static void gen_PANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + /* Careful, operand order is reversed! */ + tcg_gen_gvec_andc(MO_64, + decode->op[0].offset, decode->op[2].offset, + decode->op[1].offset, vec_len, vec_len); +} + +static void gen_PCMPESTRI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + gen_helper_pcmpestri_xmm(cpu_env, OP_PTR1, OP_PTR2, imm); + set_cc_op(s, CC_OP_EFLAGS); +} + +static void gen_PCMPESTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + gen_helper_pcmpestrm_xmm(cpu_env, OP_PTR1, OP_PTR2, imm); + set_cc_op(s, CC_OP_EFLAGS); + if ((s->prefix & PREFIX_VEX) && !s->vex_l) { + tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)), + 16, 16, 0); + } +} + +static void gen_PCMPISTRI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + gen_helper_pcmpistri_xmm(cpu_env, OP_PTR1, OP_PTR2, imm); + set_cc_op(s, CC_OP_EFLAGS); +} + +static void gen_PCMPISTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + gen_helper_pcmpistrm_xmm(cpu_env, OP_PTR1, OP_PTR2, imm); + set_cc_op(s, CC_OP_EFLAGS); + if ((s->prefix & PREFIX_VEX) && !s->vex_l) { + tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)), + 16, 16, 0); + } +} + +static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[1].ot; + if (ot < MO_64) { + tcg_gen_ext32u_tl(s->T0, s->T0); + } + gen_helper_pdep(s->T0, s->T0, s->T1); +} + +static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[1].ot; + if (ot < MO_64) { + tcg_gen_ext32u_tl(s->T0, s->T0); + } + gen_helper_pext(s->T0, s->T0, s->T1); +} + +static inline void gen_pextr(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, MemOp ot) +{ + int vec_len = vector_len(s, decode); + int mask = (vec_len >> ot) - 1; + int val = decode->immediate & mask; + + switch (ot) { + case MO_8: + tcg_gen_ld8u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val)); + break; + case MO_16: + tcg_gen_ld16u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val)); + break; + case MO_32: +#ifdef TARGET_X86_64 + tcg_gen_ld32u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val)); + break; + case MO_64: +#endif + tcg_gen_ld_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val)); + break; + default: + abort(); + } +} + +static void gen_PEXTRB(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pextr(s, env, decode, MO_8); +} + +static void gen_PEXTRW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pextr(s, env, decode, MO_16); +} + +static void gen_PEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + gen_pextr(s, env, decode, ot); +} + +static inline void gen_pinsr(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, MemOp ot) +{ + int vec_len = vector_len(s, decode); + int mask = (vec_len >> ot) - 1; + int val = decode->immediate & mask; + + if (decode->op[1].offset != decode->op[0].offset) { + assert(vec_len == 16); + gen_store_sse(s, decode, decode->op[1].offset); + } + + switch (ot) { + case MO_8: + tcg_gen_st8_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val)); + break; + case MO_16: + tcg_gen_st16_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val)); + break; + case MO_32: +#ifdef TARGET_X86_64 + tcg_gen_st32_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val)); + break; + case MO_64: +#endif + tcg_gen_st_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val)); + break; + default: + abort(); + } +} + +static void gen_PINSRB(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pinsr(s, env, decode, MO_8); +} + +static void gen_PINSRW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pinsr(s, env, decode, MO_16); +} + +static void gen_PINSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pinsr(s, env, decode, decode->op[2].ot); +} + +static void gen_pmovmskb_i64(TCGv_i64 d, TCGv_i64 s) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_andi_i64(d, s, 0x8080808080808080ull); + + /* + * After each shift+or pair: + * 0: a.......b.......c.......d.......e.......f.......g.......h....... + * 7: ab......bc......cd......de......ef......fg......gh......h....... + * 14: abcd....bcde....cdef....defg....efgh....fgh.....gh......h....... + * 28: abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h....... + * The result is left in the high bits of the word. + */ + tcg_gen_shli_i64(t, d, 7); + tcg_gen_or_i64(d, d, t); + tcg_gen_shli_i64(t, d, 14); + tcg_gen_or_i64(d, d, t); + tcg_gen_shli_i64(t, d, 28); + tcg_gen_or_i64(d, d, t); +} + +static void gen_pmovmskb_vec(unsigned vece, TCGv_vec d, TCGv_vec s) +{ + TCGv_vec t = tcg_temp_new_vec_matching(d); + TCGv_vec m = tcg_constant_vec_matching(d, MO_8, 0x80); + + /* See above */ + tcg_gen_and_vec(vece, d, s, m); + tcg_gen_shli_vec(vece, t, d, 7); + tcg_gen_or_vec(vece, d, d, t); + tcg_gen_shli_vec(vece, t, d, 14); + tcg_gen_or_vec(vece, d, d, t); + tcg_gen_shli_vec(vece, t, d, 28); + tcg_gen_or_vec(vece, d, d, t); +} + +#ifdef TARGET_X86_64 +#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64 +#else +#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32 +#endif + +static void gen_PMOVMSKB(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 }; + static const GVecGen2 g = { + .fni8 = gen_pmovmskb_i64, + .fniv = gen_pmovmskb_vec, + .opt_opc = vecop_list, + .vece = MO_64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64 + }; + MemOp ot = decode->op[2].ot; + int vec_len = vector_len(s, decode); + TCGv t = tcg_temp_new(); + + tcg_gen_gvec_2(offsetof(CPUX86State, xmm_t0) + xmm_offset(ot), decode->op[2].offset, + vec_len, vec_len, &g); + tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1))); + while (vec_len > 8) { + vec_len -= 8; + if (TCG_TARGET_HAS_extract2_tl) { + /* + * Load the next byte of the result into the high byte of T. + * TCG does a similar expansion of deposit to shl+extract2; by + * loading the whole word, the shift left is avoided. + */ +#ifdef TARGET_X86_64 + tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_Q((vec_len - 1) / 8))); +#else + tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L((vec_len - 1) / 4))); +#endif + + tcg_gen_extract2_tl(s->T0, t, s->T0, TARGET_LONG_BITS - 8); + } else { + /* + * The _previous_ value is deposited into bits 8 and higher of t. Because + * those bits are known to be zero after ld8u, this becomes a shift+or + * if deposit is not available. + */ + tcg_gen_ld8u_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1))); + tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8); + } + } + tcg_temp_free(t); +} + +static void gen_PSHUFW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + gen_helper_pshufw_mmx(OP_PTR0, OP_PTR1, imm); +} + +static void gen_PSRLW_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 16) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shri(MO_16, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static void gen_PSLLW_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 16) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shli(MO_16, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static void gen_PSRAW_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 16) { + decode->immediate = 15; + } + tcg_gen_gvec_sari(MO_16, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); +} + +static void gen_PSRLD_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 32) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shri(MO_32, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static void gen_PSLLD_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 32) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shli(MO_32, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static void gen_PSRAD_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 32) { + decode->immediate = 31; + } + tcg_gen_gvec_sari(MO_32, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); +} + +static void gen_PSRLQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 64) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shri(MO_64, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static void gen_PSLLQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + if (decode->immediate >= 64) { + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else { + tcg_gen_gvec_shli(MO_64, + decode->op[0].offset, decode->op[1].offset, + decode->immediate, vec_len, vec_len); + } +} + +static TCGv_ptr make_imm8u_xmm_vec(uint8_t imm, int vec_len) +{ + MemOp ot = vec_len == 16 ? MO_128 : MO_256; + TCGv_i32 imm_v = tcg_constant8u_i32(imm); + TCGv_ptr ptr = tcg_temp_new_ptr(); + + tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_t0) + xmm_offset(ot), + vec_len, vec_len, 0); + + tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0)); + tcg_gen_st_i32(imm_v, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0))); + return ptr; +} + +static void gen_PSRLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len); + + if (s->vex_l) { + gen_helper_psrldq_ymm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); + } else { + gen_helper_psrldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); + } + tcg_temp_free_ptr(imm_vec); +} + +static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len); + + if (s->vex_l) { + gen_helper_pslldq_ymm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); + } else { + gen_helper_pslldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); + } + tcg_temp_free_ptr(imm_vec); +} + +static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + int b = decode->immediate; + + if (ot == MO_64) { + tcg_gen_rotri_tl(s->T0, s->T0, b & 63); + } else { + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); + } +} + +static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + int mask; + + mask = ot == MO_64 ? 63 : 31; + tcg_gen_andi_tl(s->T1, s->T1, mask); + if (ot != MO_64) { + tcg_gen_ext32s_tl(s->T0, s->T0); + } + tcg_gen_sar_tl(s->T0, s->T0, s->T1); +} + +static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + int mask; + + mask = ot == MO_64 ? 63 : 31; + tcg_gen_andi_tl(s->T1, s->T1, mask); + tcg_gen_shl_tl(s->T0, s->T0, s->T1); +} + +static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + int mask; + + mask = ot == MO_64 ? 63 : 31; + tcg_gen_andi_tl(s->T1, s->T1, mask); + if (ot != MO_64) { + tcg_gen_ext32u_tl(s->T0, s->T0); + } + tcg_gen_shr_tl(s->T0, s->T0, s->T1); +} + +static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + assert(!s->vex_l); + gen_helper_aeskeygenassist_xmm(cpu_env, OP_PTR0, OP_PTR1, imm); +} + +static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + if (s->vex_l) { + gen_illegal_opcode(s); + return; + } + gen_helper_update_mxcsr(cpu_env); + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr)); +} + +static void gen_VAESIMC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + assert(!s->vex_l); + gen_helper_aesimc_xmm(cpu_env, OP_PTR0, OP_PTR2); +} + +/* + * 00 = v*ps Vps, Hps, Wpd + * 66 = v*pd Vpd, Hpd, Wps + * f3 = v*ss Vss, Hss, Wps + * f2 = v*sd Vsd, Hsd, Wps + */ +#define SSE_CMP(x) { \ + gen_helper_ ## x ## ps ## _xmm, gen_helper_ ## x ## pd ## _xmm, \ + gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, \ + gen_helper_ ## x ## ps ## _ymm, gen_helper_ ## x ## pd ## _ymm} +static const SSEFunc_0_eppp gen_helper_cmp_funcs[32][6] = { + SSE_CMP(cmpeq), + SSE_CMP(cmplt), + SSE_CMP(cmple), + SSE_CMP(cmpunord), + SSE_CMP(cmpneq), + SSE_CMP(cmpnlt), + SSE_CMP(cmpnle), + SSE_CMP(cmpord), + + SSE_CMP(cmpequ), + SSE_CMP(cmpnge), + SSE_CMP(cmpngt), + SSE_CMP(cmpfalse), + SSE_CMP(cmpnequ), + SSE_CMP(cmpge), + SSE_CMP(cmpgt), + SSE_CMP(cmptrue), + + SSE_CMP(cmpeqs), + SSE_CMP(cmpltq), + SSE_CMP(cmpleq), + SSE_CMP(cmpunords), + SSE_CMP(cmpneqq), + SSE_CMP(cmpnltq), + SSE_CMP(cmpnleq), + SSE_CMP(cmpords), + + SSE_CMP(cmpequs), + SSE_CMP(cmpngeq), + SSE_CMP(cmpngtq), + SSE_CMP(cmpfalses), + SSE_CMP(cmpnequs), + SSE_CMP(cmpgeq), + SSE_CMP(cmpgtq), + SSE_CMP(cmptrues), +}; +#undef SSE_CMP + +static void gen_VCMP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int index = decode->immediate & (s->prefix & PREFIX_VEX ? 31 : 7); + int b = + s->prefix & PREFIX_REPZ ? 2 /* ss */ : + s->prefix & PREFIX_REPNZ ? 3 /* sd */ : + !!(s->prefix & PREFIX_DATA) /* pd */ + (s->vex_l << 2); + + gen_helper_cmp_funcs[index][b](cpu_env, OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_VCOMI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + SSEFunc_0_epp fn; + fn = s->prefix & PREFIX_DATA ? gen_helper_comisd : gen_helper_comiss; + fn(cpu_env, OP_PTR1, OP_PTR2); + set_cc_op(s, CC_OP_EFLAGS); +} + +static void gen_VCVTfp2fp(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_unary_fp_sse(s, env, decode, + gen_helper_cvtpd2ps_xmm, gen_helper_cvtps2pd_xmm, + gen_helper_cvtpd2ps_ymm, gen_helper_cvtps2pd_ymm, + gen_helper_cvtsd2ss, gen_helper_cvtss2sd); +} + +static void gen_VCVTPS2PH(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_unary_imm_fp_sse(s, env, decode, + gen_helper_cvtps2ph_xmm, + gen_helper_cvtps2ph_ymm); + /* + * VCVTPS2PH is the only instruction that performs an operation on a + * register source and then *stores* into memory. + */ + if (decode->op[0].has_ea) { + gen_store_sse(s, decode, decode->op[0].offset); + } +} + +static void gen_VCVTSI2Sx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + TCGv_i32 in; + + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len); + +#ifdef TARGET_X86_64 + MemOp ot = decode->op[2].ot; + if (ot == MO_64) { + if (s->prefix & PREFIX_REPNZ) { + gen_helper_cvtsq2sd(cpu_env, OP_PTR0, s->T1); + } else { + gen_helper_cvtsq2ss(cpu_env, OP_PTR0, s->T1); + } + return; + } + in = s->tmp2_i32; + tcg_gen_trunc_tl_i32(in, s->T1); +#else + in = s->T1; +#endif + + if (s->prefix & PREFIX_REPNZ) { + gen_helper_cvtsi2sd(cpu_env, OP_PTR0, in); + } else { + gen_helper_cvtsi2ss(cpu_env, OP_PTR0, in); + } +} + +static inline void gen_VCVTtSx2SI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_i_ep ss2si, SSEFunc_l_ep ss2sq, + SSEFunc_i_ep sd2si, SSEFunc_l_ep sd2sq) +{ + TCGv_i32 out; + +#ifdef TARGET_X86_64 + MemOp ot = decode->op[0].ot; + if (ot == MO_64) { + if (s->prefix & PREFIX_REPNZ) { + sd2sq(s->T0, cpu_env, OP_PTR2); + } else { + ss2sq(s->T0, cpu_env, OP_PTR2); + } + return; + } + + out = s->tmp2_i32; +#else + out = s->T0; +#endif + if (s->prefix & PREFIX_REPNZ) { + sd2si(out, cpu_env, OP_PTR2); + } else { + ss2si(out, cpu_env, OP_PTR2); + } +#ifdef TARGET_X86_64 + tcg_gen_extu_i32_tl(s->T0, out); +#endif +} + +#ifndef TARGET_X86_64 +#define gen_helper_cvtss2sq NULL +#define gen_helper_cvtsd2sq NULL +#define gen_helper_cvttss2sq NULL +#define gen_helper_cvttsd2sq NULL +#endif + +static void gen_VCVTSx2SI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_VCVTtSx2SI(s, env, decode, + gen_helper_cvtss2si, gen_helper_cvtss2sq, + gen_helper_cvtsd2si, gen_helper_cvtsd2sq); +} + +static void gen_VCVTTSx2SI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_VCVTtSx2SI(s, env, decode, + gen_helper_cvttss2si, gen_helper_cvttss2sq, + gen_helper_cvttsd2si, gen_helper_cvttsd2sq); +} + +static void gen_VEXTRACTx128(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int mask = decode->immediate & 1; + int src_ofs = vector_elem_offset(&decode->op[1], MO_128, mask); + if (decode->op[0].has_ea) { + /* VEX-only instruction, no alignment requirements. */ + gen_sto_env_A0(s, src_ofs, false); + } else { + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, 16, 16); + } +} + +static void gen_VEXTRACTPS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_pextr(s, env, decode, MO_32); +} + +static void gen_vinsertps(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int val = decode->immediate; + int dest_word = (val >> 4) & 3; + int new_mask = (val & 15) | (1 << dest_word); + int vec_len = 16; + + assert(!s->vex_l); + + if (new_mask == 15) { + /* All zeroes except possibly for the inserted element */ + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + } else if (decode->op[1].offset != decode->op[0].offset) { + gen_store_sse(s, decode, decode->op[1].offset); + } + + if (new_mask != (val & 15)) { + tcg_gen_st_i32(s->tmp2_i32, cpu_env, + vector_elem_offset(&decode->op[0], MO_32, dest_word)); + } + + if (new_mask != 15) { + TCGv_i32 zero = tcg_constant_i32(0); /* float32_zero */ + int i; + for (i = 0; i < 4; i++) { + if ((val >> i) & 1) { + tcg_gen_st_i32(zero, cpu_env, + vector_elem_offset(&decode->op[0], MO_32, i)); + } + } + } +} + +static void gen_VINSERTPS_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int val = decode->immediate; + tcg_gen_ld_i32(s->tmp2_i32, cpu_env, + vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3)); + gen_vinsertps(s, env, decode); +} + +static void gen_VINSERTPS_m(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + gen_vinsertps(s, env, decode); +} + +static void gen_VINSERTx128(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int mask = decode->immediate & 1; + tcg_gen_gvec_mov(MO_64, + decode->op[0].offset + offsetof(YMMReg, YMM_X(mask)), + decode->op[2].offset + offsetof(YMMReg, YMM_X(0)), 16, 16); + tcg_gen_gvec_mov(MO_64, + decode->op[0].offset + offsetof(YMMReg, YMM_X(!mask)), + decode->op[1].offset + offsetof(YMMReg, YMM_X(!mask)), 16, 16); +} + +static inline void gen_maskmov(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppt xmm, SSEFunc_0_eppt ymm) +{ + if (!s->vex_l) { + xmm(cpu_env, OP_PTR2, OP_PTR1, s->A0); + } else { + ymm(cpu_env, OP_PTR2, OP_PTR1, s->A0); + } +} + +static void gen_VMASKMOVPD_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_maskmov(s, env, decode, gen_helper_vpmaskmovq_st_xmm, gen_helper_vpmaskmovq_st_ymm); +} + +static void gen_VMASKMOVPS_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_maskmov(s, env, decode, gen_helper_vpmaskmovd_st_xmm, gen_helper_vpmaskmovd_st_ymm); +} + +static void gen_VMOVHPx_ld(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_ldq_env_A0(s, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1))); + if (decode->op[0].offset != decode->op[1].offset) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0))); + } +} + +static void gen_VMOVHPx_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_stq_env_A0(s, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1))); +} + +static void gen_VMOVHPx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + if (decode->op[0].offset != decode->op[2].offset) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1))); + } + if (decode->op[0].offset != decode->op[1].offset) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0))); + } +} + +static void gen_VMOVHLPS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0))); + if (decode->op[0].offset != decode->op[1].offset) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1))); + } +} + +static void gen_VMOVLHPS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1))); + if (decode->op[0].offset != decode->op[1].offset) { + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0))); + } +} + +/* + * Note that MOVLPx supports 256-bit operation unlike MOVHLPx, MOVLHPx, MOXHPx. + * Use a gvec move to move everything above the bottom 64 bits. + */ + +static void gen_VMOVLPx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0))); +} + +static void gen_VMOVLPx_ld(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len); + tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0))); +} + +static void gen_VMOVLPx_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_ld_i64(s->tmp1_i64, OP_PTR2, offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); +} + +static void gen_VMOVSD_ld(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i64 zero = tcg_constant_i64(0); + + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); + tcg_gen_st_i64(zero, OP_PTR0, offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0))); +} + +static void gen_VMOVSS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len); + tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); +} + +static void gen_VMOVSS_ld(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = vector_len(s, decode); + + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); +} + +static void gen_VMOVSS_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); +} + +static void gen_VPMASKMOV_st(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + if (s->vex_w) { + gen_VMASKMOVPD_st(s, env, decode); + } else { + gen_VMASKMOVPS_st(s, env, decode); + } +} + +static void gen_VPERMD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + assert(s->vex_l); + gen_helper_vpermd_ymm(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_VPERM2x128(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + assert(s->vex_l); + gen_helper_vpermdq_ymm(OP_PTR0, OP_PTR1, OP_PTR2, imm); +} + +static void gen_VPHMINPOSUW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + assert(!s->vex_l); + gen_helper_phminposuw_xmm(cpu_env, OP_PTR0, OP_PTR2); +} + +static void gen_VROUNDSD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + assert(!s->vex_l); + gen_helper_roundsd_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); +} + +static void gen_VROUNDSS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant8u_i32(decode->immediate); + assert(!s->vex_l); + gen_helper_roundss_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm); +} + +static void gen_VSHUF(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 imm = tcg_constant_i32(decode->immediate); + SSEFunc_0_pppi ps, pd, fn; + ps = s->vex_l ? gen_helper_shufps_ymm : gen_helper_shufps_xmm; + pd = s->vex_l ? gen_helper_shufpd_ymm : gen_helper_shufpd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + fn(OP_PTR0, OP_PTR1, OP_PTR2, imm); +} + +static void gen_VUCOMI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + SSEFunc_0_epp fn; + fn = s->prefix & PREFIX_DATA ? gen_helper_ucomisd : gen_helper_ucomiss; + fn(cpu_env, OP_PTR1, OP_PTR2); + set_cc_op(s, CC_OP_EFLAGS); +} + +static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_ptr ptr = tcg_temp_new_ptr(); + + tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs)); + gen_helper_memset(ptr, ptr, tcg_constant_i32(0), + tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg))); + tcg_temp_free_ptr(ptr); +} + +static void gen_VZEROUPPER(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int i; + + for (i = 0; i < CPU_NB_REGS; i++) { + int offset = offsetof(CPUX86State, xmm_regs[i].ZMM_X(1)); + tcg_gen_gvec_dup_imm(MO_64, offset, 16, 16, 0); + } +} diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index bdae887d0a..7c3c8dc7fe 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -25,13 +25,13 @@ #include "exec/helper-proto.h" #include "helper-tcg.h" -void QEMU_NORETURN helper_raise_interrupt(CPUX86State *env, int intno, +G_NORETURN void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { raise_interrupt(env, intno, 1, 0, next_eip_addend); } -void QEMU_NORETURN helper_raise_exception(CPUX86State *env, int exception_index) +G_NORETURN void helper_raise_exception(CPUX86State *env, int exception_index) { raise_exception(env, exception_index); } @@ -87,10 +87,11 @@ static int check_exception(CPUX86State *env, int intno, int *error_code, * env->eip value AFTER the interrupt instruction. It is only relevant if * is_int is TRUE. */ -static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, - int is_int, int error_code, - int next_eip_addend, - uintptr_t retaddr) +static G_NORETURN +void raise_interrupt2(CPUX86State *env, int intno, + int is_int, int error_code, + int next_eip_addend, + uintptr_t retaddr) { CPUState *cs = env_cpu(env); @@ -111,31 +112,44 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, /* shortcuts to generate exceptions */ -void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, - int error_code, int next_eip_addend) +G_NORETURN void raise_interrupt(CPUX86State *env, int intno, int is_int, + int error_code, int next_eip_addend) { raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); } -void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, - int error_code) +G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, + int error_code) { raise_interrupt2(env, exception_index, 0, error_code, 0, 0); } -void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, - int error_code, uintptr_t retaddr) +G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); } -void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index) +G_NORETURN void raise_exception(CPUX86State *env, int exception_index) { raise_interrupt2(env, exception_index, 0, 0, 0, 0); } -void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, - uintptr_t retaddr) +G_NORETURN void raise_exception_ra(CPUX86State *env, int exception_index, + uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); } + +G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, + MMUAccessType access_type, + uintptr_t retaddr) +{ + /* + * Unaligned accesses are currently only triggered by SSE/AVX + * instructions that impose alignment requirements on memory + * operands. These instructions raise #GP(0) upon accessing an + * unaligned address. + */ + raise_exception_ra(env, EXCP0D_GPF, retaddr); +} diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index d6bfac5554..64dca26840 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -32,7 +32,8 @@ #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) #define ST1 ST(1) -#define FPU_RC_MASK 0xc00 +#define FPU_RC_SHIFT 10 +#define FPU_RC_MASK (3 << FPU_RC_SHIFT) #define FPU_RC_NEAR 0x000 #define FPU_RC_DOWN 0x400 #define FPU_RC_UP 0x800 @@ -434,24 +435,37 @@ void helper_fldl_ST0(CPUX86State *env, uint64_t val) merge_exception_flags(env, old_flags); } +static FloatX80RoundPrec tmp_maximise_precision(float_status *st) +{ + FloatX80RoundPrec old = get_floatx80_rounding_precision(st); + set_floatx80_rounding_precision(floatx80_precision_x, st); + return old; +} + void helper_fildl_ST0(CPUX86State *env, int32_t val) { int new_fpstt; + FloatX80RoundPrec old = tmp_maximise_precision(&env->fp_status); new_fpstt = (env->fpstt - 1) & 7; env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ + + set_floatx80_rounding_precision(old, &env->fp_status); } void helper_fildll_ST0(CPUX86State *env, int64_t val) { int new_fpstt; + FloatX80RoundPrec old = tmp_maximise_precision(&env->fp_status); new_fpstt = (env->fpstt - 1) & 7; env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ + + set_floatx80_rounding_precision(old, &env->fp_status); } uint32_t helper_fsts_ST0(CPUX86State *env) @@ -870,28 +884,26 @@ uint32_t helper_fnstcw(CPUX86State *env) } #ifndef USE_HARD_FPU +static void set_x86_rounding_mode(unsigned mode, float_status *status) +{ + static FloatRoundMode x86_round_mode[4] = { + float_round_nearest_even, + float_round_down, + float_round_up, + float_round_to_zero + }; + assert(mode < ARRAY_SIZE(x86_round_mode)); + set_float_rounding_mode(x86_round_mode[mode], status); +} + void update_fp_status(CPUX86State *env) { - FloatRoundMode rnd_mode; + int rnd_mode; FloatX80RoundPrec rnd_prec; /* set rounding mode */ - switch (env->fpuc & FPU_RC_MASK) { - default: - case FPU_RC_NEAR: - rnd_mode = float_round_nearest_even; - break; - case FPU_RC_DOWN: - rnd_mode = float_round_down; - break; - case FPU_RC_UP: - rnd_mode = float_round_up; - break; - case FPU_RC_CHOP: - rnd_mode = float_round_to_zero; - break; - } - set_float_rounding_mode(rnd_mode, &env->fp_status); + rnd_mode = (env->fpuc & FPU_RC_MASK) >> FPU_RC_SHIFT; + set_x86_rounding_mode(rnd_mode, &env->fp_status); switch ((env->fpuc >> 8) & 3) { case 0: @@ -2652,7 +2664,7 @@ static void do_fsave(CPUX86State *env, target_ulong ptr, int data32, do_fstenv(env, ptr, data32, retaddr); - ptr += (14 << data32); + ptr += (target_ulong)14 << data32; for (i = 0; i < 8; i++) { tmp = ST(i); do_fstt(env, tmp, ptr, retaddr); @@ -2674,7 +2686,7 @@ static void do_frstor(CPUX86State *env, target_ulong ptr, int data32, int i; do_fldenv(env, ptr, data32, retaddr); - ptr += (14 << data32); + ptr += (target_ulong)14 << data32; for (i = 0; i < 8; i++) { tmp = do_fldt(env, ptr, retaddr); @@ -2688,22 +2700,8 @@ void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) do_frstor(env, ptr, data32, GETPC()); } - #ifndef USE_HARD_FPU - -#if defined(CONFIG_USER_ONLY) -void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32) -{ - do_fsave(env, ptr, data32, 0); -} - -void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) -{ - do_frstor(env, ptr, data32, 0); -} -#endif - #define XO(X) offsetof(X86XSaveArea, X) static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) @@ -2761,6 +2759,22 @@ static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) } } +static void do_xsave_ymmh(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + for (i = 0; i < nb_xmm_regs; i++, ptr += 16) { + cpu_stq_data_ra(env, ptr, env->xmm_regs[i].ZMM_Q(2), ra); + cpu_stq_data_ra(env, ptr + 8, env->xmm_regs[i].ZMM_Q(3), ra); + } +} + static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) { target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); @@ -2853,6 +2867,9 @@ static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm, if (opt & XSTATE_SSE_MASK) { do_xsave_sse(env, ptr, ra); } + if (opt & XSTATE_YMM_MASK) { + do_xsave_ymmh(env, ptr + XO(avx_state), ra); + } if (opt & XSTATE_BNDREGS_MASK) { do_xsave_bndregs(env, ptr + XO(bndreg_state), ra); } @@ -2927,6 +2944,54 @@ static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) } } +static void do_clear_sse(CPUX86State *env) +{ + int i, nb_xmm_regs; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].ZMM_Q(0) = 0; + env->xmm_regs[i].ZMM_Q(1) = 0; + } +} + +static void do_xrstor_ymmh(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + for (i = 0; i < nb_xmm_regs; i++, ptr += 16) { + env->xmm_regs[i].ZMM_Q(2) = cpu_ldq_data_ra(env, ptr, ra); + env->xmm_regs[i].ZMM_Q(3) = cpu_ldq_data_ra(env, ptr + 8, ra); + } +} + +static void do_clear_ymmh(CPUX86State *env) +{ + int i, nb_xmm_regs; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].ZMM_Q(2) = 0; + env->xmm_regs[i].ZMM_Q(3) = 0; + } +} + static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) { target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); @@ -2977,21 +3042,8 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr) do_fxrstor(env, ptr, GETPC()); } -#if defined(CONFIG_USER_ONLY) -void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr) +static void do_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm, uintptr_t ra) { - do_fxsave(env, ptr, 0); -} - -void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr) -{ - do_fxrstor(env, ptr, 0); -} -#endif - -void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) -{ - uintptr_t ra = GETPC(); uint64_t xstate_bv, xcomp_bv, reserve0; rfbm &= env->xcr0; @@ -3046,9 +3098,14 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) if (xstate_bv & XSTATE_SSE_MASK) { do_xrstor_sse(env, ptr, ra); } else { - /* ??? When AVX is implemented, we may have to be more - selective in the clearing. */ - memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); + do_clear_sse(env); + } + } + if (rfbm & XSTATE_YMM_MASK) { + if (xstate_bv & XSTATE_YMM_MASK) { + do_xrstor_ymmh(env, ptr + XO(avx_state), ra); + } else { + do_clear_ymmh(env); } } if (rfbm & XSTATE_BNDREGS_MASK) { @@ -3084,6 +3141,43 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) #undef XO +void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) +{ + do_xrstor(env, ptr, rfbm, GETPC()); +} + +#if defined(CONFIG_USER_ONLY) +void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32) +{ + do_fsave(env, ptr, data32, 0); +} + +void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) +{ + do_frstor(env, ptr, data32, 0); +} + +void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr) +{ + do_fxsave(env, ptr, 0); +} + +void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr) +{ + do_fxrstor(env, ptr, 0); +} + +void cpu_x86_xsave(CPUX86State *env, target_ulong ptr) +{ + do_xsave(env, ptr, -1, get_xinuse(env), -1, 0); +} + +void cpu_x86_xrstor(CPUX86State *env, target_ulong ptr) +{ + do_xrstor(env, ptr, -1, 0); +} +#endif + uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx) { /* The OS must have enabled XSAVE. */ @@ -3133,6 +3227,7 @@ void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask) env->xcr0 = mask; cpu_sync_bndcs_hflags(env); + cpu_sync_avx_hflag(env); return; do_gpf: @@ -3143,11 +3238,8 @@ void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask) /* XXX: optimize by storing fptt and fptags in the static cpu state */ #define SSE_DAZ 0x0040 -#define SSE_RC_MASK 0x6000 -#define SSE_RC_NEAR 0x0000 -#define SSE_RC_DOWN 0x2000 -#define SSE_RC_UP 0x4000 -#define SSE_RC_CHOP 0x6000 +#define SSE_RC_SHIFT 13 +#define SSE_RC_MASK (3 << SSE_RC_SHIFT) #define SSE_FZ 0x8000 void update_mxcsr_status(CPUX86State *env) @@ -3156,22 +3248,8 @@ void update_mxcsr_status(CPUX86State *env) int rnd_type; /* set rounding mode */ - switch (mxcsr & SSE_RC_MASK) { - default: - case SSE_RC_NEAR: - rnd_type = float_round_nearest_even; - break; - case SSE_RC_DOWN: - rnd_type = float_round_down; - break; - case SSE_RC_UP: - rnd_type = float_round_up; - break; - case SSE_RC_CHOP: - rnd_type = float_round_to_zero; - break; - } - set_float_rounding_mode(rnd_type, &env->sse_status); + rnd_type = (mxcsr & SSE_RC_MASK) >> SSE_RC_SHIFT; + set_x86_rounding_mode(rnd_type, &env->sse_status); /* Set exception flags. */ set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) | @@ -3231,15 +3309,13 @@ void helper_emms(CPUX86State *env) *(uint32_t *)(env->fptags + 4) = 0x01010101; } -/* XXX: suppress */ -void helper_movq(CPUX86State *env, void *d, void *s) -{ - *(uint64_t *)d = *(uint64_t *)s; -} - #define SHIFT 0 #include "ops_sse.h" #define SHIFT 1 #include "ops_sse.h" + +#define SHIFT 2 +#include "ops_sse.h" + #endif diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index 2510cc244e..cd1723389a 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -38,12 +38,9 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS); * @cpu: vCPU the interrupt is to be handled by. */ void x86_cpu_do_interrupt(CPUState *cpu); +#ifndef CONFIG_USER_ONLY bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); - -/* helper.c */ -bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +#endif void breakpoint_handler(CPUState *cs); @@ -61,27 +58,44 @@ static inline target_long lshift(target_long x, int n) void tcg_x86_init(void); /* excp_helper.c */ -void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); -void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, - uintptr_t retaddr); -void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, - int error_code); -void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, - int error_code, uintptr_t retaddr); -void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, - int error_code, int next_eip_addend); +G_NORETURN void raise_exception(CPUX86State *env, int exception_index); +G_NORETURN void raise_exception_ra(CPUX86State *env, int exception_index, + uintptr_t retaddr); +G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, + int error_code); +G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr); +G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int, + int error_code, int next_eip_addend); +G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, + MMUAccessType access_type, + uintptr_t retaddr); +#ifdef CONFIG_USER_ONLY +void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +void x86_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra); +#else +bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); +#endif /* cc_helper.c */ extern const uint8_t parity_table[256]; /* misc_helper.c */ void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask); -void do_pause(CPUX86State *env) QEMU_NORETURN; +G_NORETURN void do_pause(CPUX86State *env); /* sysemu/svm_helper.c */ #ifndef CONFIG_USER_ONLY -void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, - uint64_t exit_info_1, uintptr_t retaddr); +G_NORETURN void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, + uint64_t exit_info_1, uintptr_t retaddr); void do_vmexit(CPUX86State *env); #endif diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c index 87fa7280ee..599ac968b0 100644 --- a/target/i386/tcg/int_helper.c +++ b/target/i386/tcg/int_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/host-utils.h" diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c index 2da3cd14b6..e3cdafd2d4 100644 --- a/target/i386/tcg/mem_helper.c +++ b/target/i386/tcg/mem_helper.c @@ -67,7 +67,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEUQ, mem_idx); oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); } @@ -136,7 +136,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); if (int128_eq(oldv, cmpv)) { diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index baffa5d7ba..5f7a3061ca 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" @@ -80,7 +81,7 @@ void helper_rdtscp(CPUX86State *env) env->regs[R_ECX] = (uint32_t)(env->tsc_aux); } -void QEMU_NORETURN helper_rdpmc(CPUX86State *env) +G_NORETURN void helper_rdpmc(CPUX86State *env) { if (((env->cr[4] & CR4_PCE_MASK) == 0 ) && ((env->hflags & HF_CPL_MASK) != 0)) { @@ -93,7 +94,7 @@ void QEMU_NORETURN helper_rdpmc(CPUX86State *env) raise_exception_err(env, EXCP06_ILLOP, 0); } -void QEMU_NORETURN do_pause(CPUX86State *env) +G_NORETURN void do_pause(CPUX86State *env) { CPUState *cs = env_cpu(env); @@ -102,7 +103,7 @@ void QEMU_NORETURN do_pause(CPUX86State *env) cpu_loop_exit(cs); } -void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend) +G_NORETURN void helper_pause(CPUX86State *env, int next_eip_addend) { cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC()); env->eip += next_eip_addend; @@ -110,14 +111,6 @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend) do_pause(env); } -void QEMU_NORETURN helper_debug(CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); -} - uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx) { if ((env->cr[4] & CR4_PKE_MASK) == 0) { diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index 3ed20ca31d..539189b4d1 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -28,6 +28,42 @@ #include "helper-tcg.h" #include "seg_helper.h" +int get_pg_mode(CPUX86State *env) +{ + int pg_mode = 0; + if (!(env->cr[0] & CR0_PG_MASK)) { + return 0; + } + if (env->cr[0] & CR0_WP_MASK) { + pg_mode |= PG_MODE_WP; + } + if (env->cr[4] & CR4_PAE_MASK) { + pg_mode |= PG_MODE_PAE; + if (env->efer & MSR_EFER_NXE) { + pg_mode |= PG_MODE_NXE; + } + } + if (env->cr[4] & CR4_PSE_MASK) { + pg_mode |= PG_MODE_PSE; + } + if (env->cr[4] & CR4_SMEP_MASK) { + pg_mode |= PG_MODE_SMEP; + } + if (env->hflags & HF_LMA_MASK) { + pg_mode |= PG_MODE_LMA; + if (env->cr[4] & CR4_PKE_MASK) { + pg_mode |= PG_MODE_PKE; + } + if (env->cr[4] & CR4_PKS_MASK) { + pg_mode |= PG_MODE_PKS; + } + if (env->cr[4] & CR4_LA57_MASK) { + pg_mode |= PG_MODE_LA57; + } + } + return pg_mode; +} + /* return non zero if error */ static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, uint32_t *e2_ptr, int selector, @@ -794,7 +830,9 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) { X86CPU *cpu = env_archcpu(env); - int index; + int index, pg_mode; + target_ulong rsp; + int32_t sext; #if 0 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", @@ -808,7 +846,17 @@ static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) if ((index + 7) > env->tr.limit) { raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); } - return cpu_ldq_kernel(env, env->tr.base + index); + + rsp = cpu_ldq_kernel(env, env->tr.base + index); + + /* test virtual address sign extension */ + pg_mode = get_pg_mode(env); + sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); + if (sext != 0 && sext != -1) { + raise_exception_err(env, EXCP0C_STACK, 0); + } + + return rsp; } /* 64 bit interrupt */ @@ -929,9 +977,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, e2); env->eip = offset; } -#endif -#ifdef TARGET_X86_64 void helper_sysret(CPUX86State *env, int dflag) { int cpl, selector; @@ -984,7 +1030,7 @@ void helper_sysret(CPUX86State *env, int dflag) DESC_W_MASK | DESC_A_MASK); } } -#endif +#endif /* TARGET_X86_64 */ /* real mode interrupt */ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, @@ -1112,76 +1158,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); } -bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - int intno; - - interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); - if (!interrupt_request) { - return false; - } - - /* Don't process multiple interrupt requests in a single call. - * This is required to make icount-driven execution deterministic. - */ - switch (interrupt_request) { -#if !defined(CONFIG_USER_ONLY) - case CPU_INTERRUPT_POLL: - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; - apic_poll_irq(cpu->apic_state); - break; -#endif - case CPU_INTERRUPT_SIPI: - do_cpu_sipi(cpu); - break; - case CPU_INTERRUPT_SMI: - cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; -#ifdef CONFIG_USER_ONLY - cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode"); -#else - do_smm_enter(cpu); -#endif /* CONFIG_USER_ONLY */ - break; - case CPU_INTERRUPT_NMI: - cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; - env->hflags2 |= HF2_NMI_MASK; - do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); - break; - case CPU_INTERRUPT_MCE: - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; - do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); - break; - case CPU_INTERRUPT_HARD: - cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); - intno = cpu_get_pic_interrupt(env); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - break; -#if !defined(CONFIG_USER_ONLY) - case CPU_INTERRUPT_VIRQ: - /* FIXME: this should respect TPR */ - cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); - intno = x86_ldl_phys(cs, env->vm_vmcb - + offsetof(struct vmcb, control.int_vector)); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing virtual hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; - break; -#endif - } - - /* Ensure that no TB jump will be modified as the program flow was changed. */ - return true; -} - void helper_lldt(CPUX86State *env, int selector) { SegmentCache *dt; @@ -1528,14 +1504,12 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, } /* real mode call */ -void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, - int shift, int next_eip) +void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, + int shift, uint32_t next_eip) { - int new_eip; uint32_t esp, esp_mask; target_ulong ssp; - new_eip = new_eip1; esp = env->regs[R_ESP]; esp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index b6d940e04e..55bd1194d3 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -19,194 +19,278 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "tcg/helper-tcg.h" -int get_pg_mode(CPUX86State *env) +typedef struct TranslateParams { + target_ulong addr; + target_ulong cr3; + int pg_mode; + int mmu_idx; + int ptw_idx; + MMUAccessType access_type; +} TranslateParams; + +typedef struct TranslateResult { + hwaddr paddr; + int prot; + int page_size; +} TranslateResult; + +typedef enum TranslateFaultStage2 { + S2_NONE, + S2_GPA, + S2_GPT, +} TranslateFaultStage2; + +typedef struct TranslateFault { + int exception_index; + int error_code; + target_ulong cr2; + TranslateFaultStage2 stage2; +} TranslateFault; + +typedef struct PTETranslate { + CPUX86State *env; + TranslateFault *err; + int ptw_idx; + void *haddr; + hwaddr gaddr; +} PTETranslate; + +static bool ptw_translate(PTETranslate *inout, hwaddr addr) { - int pg_mode = 0; - if (env->cr[0] & CR0_WP_MASK) { - pg_mode |= PG_MODE_WP; + CPUTLBEntryFull *full; + int flags; + + inout->gaddr = addr; + flags = probe_access_full(inout->env, addr, MMU_DATA_STORE, + inout->ptw_idx, true, &inout->haddr, &full, 0); + + if (unlikely(flags & TLB_INVALID_MASK)) { + TranslateFault *err = inout->err; + + assert(inout->ptw_idx == MMU_NESTED_IDX); + *err = (TranslateFault){ + .error_code = inout->env->error_code, + .cr2 = addr, + .stage2 = S2_GPT, + }; + return false; } - if (env->cr[4] & CR4_PAE_MASK) { - pg_mode |= PG_MODE_PAE; - } - if (env->cr[4] & CR4_PSE_MASK) { - pg_mode |= PG_MODE_PSE; - } - if (env->cr[4] & CR4_PKE_MASK) { - pg_mode |= PG_MODE_PKE; - } - if (env->cr[4] & CR4_PKS_MASK) { - pg_mode |= PG_MODE_PKS; - } - if (env->cr[4] & CR4_SMEP_MASK) { - pg_mode |= PG_MODE_SMEP; - } - if (env->cr[4] & CR4_LA57_MASK) { - pg_mode |= PG_MODE_LA57; - } - if (env->hflags & HF_LMA_MASK) { - pg_mode |= PG_MODE_LMA; - } - if (env->efer & MSR_EFER_NXE) { - pg_mode |= PG_MODE_NXE; - } - return pg_mode; + return true; } -#define PG_ERROR_OK (-1) - -typedef hwaddr (*MMUTranslateFunc)(CPUState *cs, hwaddr gphys, MMUAccessType access_type, - int *prot); - -#define GET_HPHYS(cs, gpa, access_type, prot) \ - (get_hphys_func ? get_hphys_func(cs, gpa, access_type, prot) : gpa) - -static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_func, - uint64_t cr3, int is_write1, int mmu_idx, int pg_mode, - hwaddr *xlat, int *page_size, int *prot) +static inline uint32_t ptw_ldl(const PTETranslate *in) { - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - uint64_t ptep, pte; - int32_t a20_mask; - target_ulong pde_addr, pte_addr; - int error_code = 0; - int is_dirty, is_write, is_user; - uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits); - uint32_t page_offset; + if (likely(in->haddr)) { + return ldl_p(in->haddr); + } + return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); +} + +static inline uint64_t ptw_ldq(const PTETranslate *in) +{ + if (likely(in->haddr)) { + return ldq_p(in->haddr); + } + return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); +} + +/* + * Note that we can use a 32-bit cmpxchg for all page table entries, + * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and + * PG_DIRTY_MASK are all in the low 32 bits. + */ +static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) +{ + uint32_t cmp; + + /* Does x86 really perform a rmw cycle on mmio for ptw? */ + start_exclusive(); + cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); + if (cmp == old) { + cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0); + } + end_exclusive(); + return cmp == old; +} + +static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set) +{ + if (set & ~old) { + uint32_t new = old | set; + if (likely(in->haddr)) { + old = cpu_to_le32(old); + new = cpu_to_le32(new); + return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old; + } + return ptw_setl_slow(in, old, new); + } + return true; +} + +static bool mmu_translate(CPUX86State *env, const TranslateParams *in, + TranslateResult *out, TranslateFault *err) +{ + const int32_t a20_mask = x86_get_a20_mask(env); + const target_ulong addr = in->addr; + const int pg_mode = in->pg_mode; + const bool is_user = (in->mmu_idx == MMU_USER_IDX); + const MMUAccessType access_type = in->access_type; + uint64_t ptep, pte, rsvd_mask; + PTETranslate pte_trans = { + .env = env, + .err = err, + .ptw_idx = in->ptw_idx, + }; + hwaddr pte_addr, paddr; uint32_t pkr; + int page_size; - is_user = (mmu_idx == MMU_USER_IDX); - is_write = is_write1 & 1; - a20_mask = x86_get_a20_mask(env); - + restart_all: + rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); + rsvd_mask &= PG_ADDRESS_MASK; if (!(pg_mode & PG_MODE_NXE)) { rsvd_mask |= PG_NX_MASK; } if (pg_mode & PG_MODE_PAE) { - uint64_t pde, pdpe; - target_ulong pdpe_addr; - #ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - bool la57 = pg_mode & PG_MODE_LA57; - uint64_t pml5e_addr, pml5e; - uint64_t pml4e_addr, pml4e; - int32_t sext; - - /* test virtual address sign extension */ - sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; - if (get_hphys_func && sext != 0 && sext != -1) { - env->error_code = 0; - cs->exception_index = EXCP0D_GPF; - return 1; - } - - if (la57) { - pml5e_addr = ((cr3 & ~0xfff) + - (((addr >> 48) & 0x1ff) << 3)) & a20_mask; - pml5e_addr = GET_HPHYS(cs, pml5e_addr, MMU_DATA_STORE, NULL); - pml5e = x86_ldq_phys(cs, pml5e_addr); - if (!(pml5e & PG_PRESENT_MASK)) { + if (pg_mode & PG_MODE_LMA) { + if (pg_mode & PG_MODE_LA57) { + /* + * Page table level 5 + */ + pte_addr = ((in->cr3 & ~0xfff) + + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + restart_5: + pte = ptw_ldq(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } - if (pml5e & (rsvd_mask | PG_PSE_MASK)) { + if (pte & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } - if (!(pml5e & PG_ACCESSED_MASK)) { - pml5e |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_5; } - ptep = pml5e ^ PG_NX_MASK; + ptep = pte ^ PG_NX_MASK; } else { - pml5e = cr3; + pte = in->cr3; ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } - pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + - (((addr >> 39) & 0x1ff) << 3)) & a20_mask; - pml4e_addr = GET_HPHYS(cs, pml4e_addr, MMU_DATA_STORE, NULL); - pml4e = x86_ldq_phys(cs, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) { + /* + * Page table level 4 + */ + pte_addr = ((pte & PG_ADDRESS_MASK) + + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + restart_4: + pte = ptw_ldq(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } - if (pml4e & (rsvd_mask | PG_PSE_MASK)) { + if (pte & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } - if (!(pml4e & PG_ACCESSED_MASK)) { - pml4e |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_4; } - ptep &= pml4e ^ PG_NX_MASK; - pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & - a20_mask; - pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { + ptep &= pte ^ PG_NX_MASK; + + /* + * Page table level 3 + */ + pte_addr = ((pte & PG_ADDRESS_MASK) + + (((addr >> 30) & 0x1ff) << 3)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + restart_3_lma: + pte = ptw_ldq(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } - if (pdpe & rsvd_mask) { + if (pte & rsvd_mask) { goto do_fault_rsvd; } - ptep &= pdpe ^ PG_NX_MASK; - if (!(pdpe & PG_ACCESSED_MASK)) { - pdpe |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_3_lma; } - if (pdpe & PG_PSE_MASK) { + ptep &= pte ^ PG_NX_MASK; + if (pte & PG_PSE_MASK) { /* 1 GB page */ - *page_size = 1024 * 1024 * 1024; - pte_addr = pdpe_addr; - pte = pdpe; + page_size = 1024 * 1024 * 1024; goto do_check_protect; } } else #endif { - /* XXX: load them when cr3 is loaded ? */ - pdpe_addr = ((cr3 & ~0x1f) + ((addr >> 27) & 0x18)) & - a20_mask; - pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; + /* + * Page table level 3 + */ + pte_addr = ((in->cr3 & ~0x1f) + ((addr >> 27) & 0x18)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; } rsvd_mask |= PG_HI_USER_MASK; - if (pdpe & (rsvd_mask | PG_NX_MASK)) { + restart_3_nolma: + pte = ptw_ldq(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pte & (rsvd_mask | PG_NX_MASK)) { goto do_fault_rsvd; } + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_3_nolma; + } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } - pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & - a20_mask; - pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL); - pde = x86_ldq_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { + /* + * Page table level 2 + */ + pte_addr = ((pte & PG_ADDRESS_MASK) + + (((addr >> 21) & 0x1ff) << 3)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + restart_2_pae: + pte = ptw_ldq(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } - if (pde & rsvd_mask) { + if (pte & rsvd_mask) { goto do_fault_rsvd; } - ptep &= pde ^ PG_NX_MASK; - if (pde & PG_PSE_MASK) { + if (pte & PG_PSE_MASK) { /* 2 MB page */ - *page_size = 2048 * 1024; - pte_addr = pde_addr; - pte = pde; + page_size = 2048 * 1024; + ptep &= pte ^ PG_NX_MASK; goto do_check_protect; } - /* 4 KB page */ - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_2_pae; } - pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & - a20_mask; - pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL); - pte = x86_ldq_phys(cs, pte_addr); + ptep &= pte ^ PG_NX_MASK; + + /* + * Page table level 1 + */ + pte_addr = ((pte & PG_ADDRESS_MASK) + + (((addr >> 12) & 0x1ff) << 3)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + pte = ptw_ldq(&pte_trans); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -215,54 +299,56 @@ static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_f } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; - *page_size = 4096; + page_size = 4096; } else { - uint32_t pde; - - /* page directory entry */ - pde_addr = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) & - a20_mask; - pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL); - pde = x86_ldl_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { + /* + * Page table level 2 + */ + pte_addr = ((in->cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + restart_2_nopae: + pte = ptw_ldl(&pte_trans); + if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } - ptep = pde | PG_NX_MASK; + ptep = pte | PG_NX_MASK; /* if PSE bit is set, then we use a 4MB page */ - if ((pde & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) { - *page_size = 4096 * 1024; - pte_addr = pde_addr; - - /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) { + page_size = 4096 * 1024; + /* + * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. * Leave bits 20-13 in place for setting accessed/dirty bits below. */ - pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); + pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13)); rsvd_mask = 0x200000; goto do_check_protect_pse36; } - - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); + if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { + goto restart_2_nopae; } - /* page directory entry */ - pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & - a20_mask; - pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL); - pte = x86_ldl_phys(cs, pte_addr); + /* + * Page table level 1 + */ + pte_addr = ((pte & ~0xfffu) + ((addr >> 10) & 0xffc)) & a20_mask; + if (!ptw_translate(&pte_trans, pte_addr)) { + return false; + } + pte = ptw_ldl(&pte_trans); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } /* combine pde and pte user and rw protections */ ptep &= pte | PG_NX_MASK; - *page_size = 4096; + page_size = 4096; rsvd_mask = 0; } do_check_protect: - rsvd_mask |= (*page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; + rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; do_check_protect_pse36: if (pte & rsvd_mask) { goto do_fault_rsvd; @@ -274,22 +360,20 @@ do_check_protect_pse36: goto do_fault_protect; } - *prot = 0; - if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { - *prot |= PAGE_READ; + int prot = 0; + if (in->mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { + prot |= PAGE_READ; if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { - *prot |= PAGE_WRITE; + prot |= PAGE_WRITE; } } if (!(ptep & PG_NX_MASK) && - (mmu_idx == MMU_USER_IDX || + (is_user || !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) { - *prot |= PAGE_EXEC; + prot |= PAGE_EXEC; } - if (!(env->hflags & HF_LMA_MASK)) { - pkr = 0; - } else if (ptep & PG_USER_MASK) { + if (ptep & PG_USER_MASK) { pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0; } else { pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0; @@ -305,167 +389,255 @@ do_check_protect_pse36: } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) { pkr_prot &= ~PAGE_WRITE; } - - *prot &= pkr_prot; - if ((pkr_prot & (1 << is_write1)) == 0) { - assert(is_write1 != 2); - error_code |= PG_ERROR_PK_MASK; - goto do_fault_protect; + if ((pkr_prot & (1 << access_type)) == 0) { + goto do_fault_pk_protect; } + prot &= pkr_prot; } - if ((*prot & (1 << is_write1)) == 0) { + if ((prot & (1 << access_type)) == 0) { goto do_fault_protect; } /* yes, it can! */ - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; + { + uint32_t set = PG_ACCESSED_MASK; + if (access_type == MMU_DATA_STORE) { + set |= PG_DIRTY_MASK; + } else if (!(pte & PG_DIRTY_MASK)) { + /* + * Only set write access if already dirty... + * otherwise wait for dirty access. + */ + prot &= ~PAGE_WRITE; + } + if (!ptw_setl(&pte_trans, pte, set)) { + /* + * We can arrive here from any of 3 levels and 2 formats. + * The only safe thing is to restart the entire lookup. + */ + goto restart_all; } - x86_stl_phys_notdirty(cs, pte_addr, pte); } - if (!(pte & PG_DIRTY_MASK)) { - /* only set write access if already dirty... otherwise wait - for dirty access */ - assert(!is_write); - *prot &= ~PAGE_WRITE; - } - - pte = pte & a20_mask; - /* align to page_size */ - pte &= PG_ADDRESS_MASK & ~(*page_size - 1); - page_offset = addr & (*page_size - 1); - *xlat = GET_HPHYS(cs, pte + page_offset, is_write1, prot); - return PG_ERROR_OK; + paddr = (pte & a20_mask & PG_ADDRESS_MASK & ~(page_size - 1)) + | (addr & (page_size - 1)); + if (in->ptw_idx == MMU_NESTED_IDX) { + CPUTLBEntryFull *full; + int flags, nested_page_size; + + flags = probe_access_full(env, paddr, access_type, + MMU_NESTED_IDX, true, + &pte_trans.haddr, &full, 0); + if (unlikely(flags & TLB_INVALID_MASK)) { + *err = (TranslateFault){ + .error_code = env->error_code, + .cr2 = paddr, + .stage2 = S2_GPA, + }; + return false; + } + + /* Merge stage1 & stage2 protection bits. */ + prot &= full->prot; + + /* Re-verify resulting protection. */ + if ((prot & (1 << access_type)) == 0) { + goto do_fault_protect; + } + + /* Merge stage1 & stage2 addresses to final physical address. */ + nested_page_size = 1 << full->lg_page_size; + paddr = (full->phys_addr & ~(nested_page_size - 1)) + | (paddr & (nested_page_size - 1)); + + /* + * Use the larger of stage1 & stage2 page sizes, so that + * invalidation works. + */ + if (nested_page_size > page_size) { + page_size = nested_page_size; + } + } + + out->paddr = paddr; + out->prot = prot; + out->page_size = page_size; + return true; + + int error_code; do_fault_rsvd: - error_code |= PG_ERROR_RSVD_MASK; + error_code = PG_ERROR_RSVD_MASK; + goto do_fault_cont; do_fault_protect: - error_code |= PG_ERROR_P_MASK; + error_code = PG_ERROR_P_MASK; + goto do_fault_cont; + do_fault_pk_protect: + assert(access_type != MMU_INST_FETCH); + error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK; + goto do_fault_cont; do_fault: - error_code |= (is_write << PG_ERROR_W_BIT); - if (is_user) + error_code = 0; + do_fault_cont: + if (is_user) { error_code |= PG_ERROR_U_MASK; - if (is_write1 == 2 && - (((pg_mode & PG_MODE_NXE) && (pg_mode & PG_MODE_PAE)) || - (pg_mode & PG_MODE_SMEP))) - error_code |= PG_ERROR_I_D_MASK; - return error_code; + } + switch (access_type) { + case MMU_DATA_LOAD: + break; + case MMU_DATA_STORE: + error_code |= PG_ERROR_W_MASK; + break; + case MMU_INST_FETCH: + if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) { + error_code |= PG_ERROR_I_D_MASK; + } + break; + } + *err = (TranslateFault){ + .exception_index = EXCP0E_PAGE, + .error_code = error_code, + .cr2 = addr, + }; + return false; } -static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, - int *prot) +static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err, + uintptr_t retaddr) { - CPUX86State *env = &X86_CPU(cs)->env; - uint64_t exit_info_1; - int page_size; - int next_prot; - hwaddr hphys; + uint64_t exit_info_1 = err->error_code; - if (likely(!(env->hflags2 & HF2_NPT_MASK))) { - return gphys; - } - - exit_info_1 = mmu_translate(cs, gphys, NULL, env->nested_cr3, - access_type, MMU_USER_IDX, env->nested_pg_mode, - &hphys, &page_size, &next_prot); - if (exit_info_1 == PG_ERROR_OK) { - if (prot) { - *prot &= next_prot; - } - return hphys; - } - - x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - gphys); - if (prot) { - exit_info_1 |= SVM_NPTEXIT_GPA; - } else { /* page table access */ + switch (err->stage2) { + case S2_GPT: exit_info_1 |= SVM_NPTEXIT_GPT; + break; + case S2_GPA: + exit_info_1 |= SVM_NPTEXIT_GPA; + break; + default: + g_assert_not_reached(); } - cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); + + x86_stq_phys(env_cpu(env), + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + err->cr2); + cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr); } -/* return value: - * -1 = cannot handle fault - * 0 = nothing more to do - * 1 = generate PF fault - */ -static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, - int is_write1, int mmu_idx) +static bool get_physical_address(CPUX86State *env, vaddr addr, + MMUAccessType access_type, int mmu_idx, + TranslateResult *out, TranslateFault *err) { - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - int error_code = PG_ERROR_OK; - int pg_mode, prot, page_size; - hwaddr paddr; - hwaddr vaddr; + TranslateParams in; + bool use_stage2 = env->hflags2 & HF2_NPT_MASK; -#if defined(DEBUG_MMU) - printf("MMU fault: addr=%" VADDR_PRIx " w=%d mmu=%d eip=" TARGET_FMT_lx "\n", - addr, is_write1, mmu_idx, env->eip); -#endif + in.addr = addr; + in.access_type = access_type; - if (!(env->cr[0] & CR0_PG_MASK)) { - paddr = addr; + switch (mmu_idx) { + case MMU_PHYS_IDX: + break; + + case MMU_NESTED_IDX: + if (likely(use_stage2)) { + in.cr3 = env->nested_cr3; + in.pg_mode = env->nested_pg_mode; + in.mmu_idx = MMU_USER_IDX; + in.ptw_idx = MMU_PHYS_IDX; + + if (!mmu_translate(env, &in, out, err)) { + err->stage2 = S2_GPA; + return false; + } + return true; + } + break; + + default: + if (likely(env->cr[0] & CR0_PG_MASK)) { + in.cr3 = env->cr[3]; + in.mmu_idx = mmu_idx; + in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX; + in.pg_mode = get_pg_mode(env); + + if (in.pg_mode & PG_MODE_LMA) { + /* test virtual address sign extension */ + int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47; + int64_t sext = (int64_t)addr >> shift; + if (sext != 0 && sext != -1) { + *err = (TranslateFault){ + .exception_index = EXCP0D_GPF, + .cr2 = addr, + }; + return false; + } + } + return mmu_translate(env, &in, out, err); + } + break; + } + + /* Translation disabled. */ + out->paddr = addr & x86_get_a20_mask(env); #ifdef TARGET_X86_64 - if (!(env->hflags & HF_LMA_MASK)) { - /* Without long mode we can only address 32bits in real mode */ - paddr = (uint32_t)paddr; - } + if (!(env->hflags & HF_LMA_MASK)) { + /* Without long mode we can only address 32bits in real mode */ + out->paddr = (uint32_t)out->paddr; + } #endif - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - page_size = 4096; - } else { - pg_mode = get_pg_mode(env); - error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1, - mmu_idx, pg_mode, - &paddr, &page_size, &prot); - } - - if (error_code == PG_ERROR_OK) { - /* Even if 4MB pages, we map only one 4KB page in the cache to - avoid filling it too fast */ - vaddr = addr & TARGET_PAGE_MASK; - paddr &= TARGET_PAGE_MASK; - - assert(prot & (1 << is_write1)); - tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), - prot, mmu_idx, page_size); - return 0; - } else { - if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { - /* cr2 is not modified in case of exceptions */ - x86_stq_phys(cs, - env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - addr); - } else { - env->cr[2] = addr; - } - env->error_code = error_code; - cs->exception_index = EXCP0E_PAGE; - return 1; - } + out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + out->page_size = TARGET_PAGE_SIZE; + return true; } bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; + CPUX86State *env = cs->env_ptr; + TranslateResult out; + TranslateFault err; - env->retaddr = retaddr; - if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { - /* FIXME: On error in get_hphys we have already jumped out. */ - g_assert(!probe); - raise_exception_err_ra(env, cs->exception_index, - env->error_code, retaddr); + if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err)) { + /* + * Even if 4MB pages, we map only one 4KB page in the cache to + * avoid filling it too fast. + */ + assert(out.prot & (1 << access_type)); + tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK, + out.paddr & TARGET_PAGE_MASK, + cpu_get_mem_attrs(env), + out.prot, mmu_idx, out.page_size); + return true; } - return true; + + if (probe) { + /* This will be used if recursing for stage2 translation. */ + env->error_code = err.error_code; + return false; + } + + if (err.stage2 != S2_NONE) { + raise_stage2(env, &err, retaddr); + } + + if (env->intercept_exceptions & (1 << err.exception_index)) { + /* cr2 is not modified in case of exceptions */ + x86_stq_phys(cs, env->vm_vmcb + + offsetof(struct vmcb, control.exit_info_2), + err.cr2); + } else { + env->cr[2] = err.cr2; + } + raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr); +} + +G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + X86CPU *cpu = X86_CPU(cs); + handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr); } diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c index e7a2ebde81..e1528b7f80 100644 --- a/target/i386/tcg/sysemu/misc_helper.c +++ b/target/i386/tcg/sysemu/misc_helper.c @@ -23,6 +23,7 @@ #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "exec/address-spaces.h" +#include "exec/exec-all.h" #include "tcg/helper-tcg.h" void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) @@ -73,7 +74,7 @@ target_ulong helper_read_crN(CPUX86State *env, int reg) if (!(env->hflags2 & HF2_VINTR_MASK)) { val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); } else { - val = env->v_tpr; + val = env->int_ctl & V_TPR_MASK; } break; } @@ -121,7 +122,14 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); qemu_mutex_unlock_iothread(); } - env->v_tpr = t0 & 0x0f; + env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); + + CPUState *cs = env_cpu(env); + if (ctl_has_irq(env)) { + cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); + } break; default: env->cr[reg] = t0; @@ -442,6 +450,11 @@ void helper_rdmsr(CPUX86State *env) case MSR_IA32_UCODE_REV: val = x86_cpu->ucode_rev; break; + case MSR_CORE_THREAD_COUNT: { + CPUState *cs = CPU(x86_cpu); + val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16); + break; + } default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + @@ -463,7 +476,8 @@ void helper_flush_page(CPUX86State *env, target_ulong addr) tlb_flush_page(env_cpu(env), addr); } -static void QEMU_NORETURN do_hlt(CPUX86State *env) +static G_NORETURN +void do_hlt(CPUX86State *env) { CPUState *cs = env_cpu(env); @@ -473,7 +487,7 @@ static void QEMU_NORETURN do_hlt(CPUX86State *env) cpu_loop_exit(cs); } -void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend) +G_NORETURN void helper_hlt(CPUX86State *env, int next_eip_addend) { cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); env->eip += next_eip_addend; @@ -490,7 +504,7 @@ void helper_monitor(CPUX86State *env, target_ulong ptr) cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); } -void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend) +G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend) { CPUState *cs = env_cpu(env); diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c index 82c0856c41..2c9bd007ad 100644 --- a/target/i386/tcg/sysemu/seg_helper.c +++ b/target/i386/tcg/sysemu/seg_helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" @@ -125,6 +126,68 @@ void x86_cpu_do_interrupt(CPUState *cs) } } +bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int intno; + + interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); + if (!interrupt_request) { + return false; + } + + /* Don't process multiple interrupt requests in a single call. + * This is required to make icount-driven execution deterministic. + */ + switch (interrupt_request) { + case CPU_INTERRUPT_POLL: + cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(cpu->apic_state); + break; + case CPU_INTERRUPT_SIPI: + do_cpu_sipi(cpu); + break; + case CPU_INTERRUPT_SMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + do_smm_enter(cpu); + break; + case CPU_INTERRUPT_NMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + env->hflags2 |= HF2_NMI_MASK; + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); + break; + case CPU_INTERRUPT_MCE: + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); + break; + case CPU_INTERRUPT_HARD: + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | + CPU_INTERRUPT_VIRQ); + intno = cpu_get_pic_interrupt(env); + qemu_log_mask(CPU_LOG_INT, + "Servicing hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + break; + case CPU_INTERRUPT_VIRQ: + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); + intno = x86_ldl_phys(cs, env->vm_vmcb + + offsetof(struct vmcb, control.int_vector)); + qemu_log_mask(CPU_LOG_INT, + "Servicing virtual hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + env->int_ctl &= ~V_IRQ_MASK; + break; + } + + /* Ensure that no TB jump will be modified as the program flow was changed. */ + return true; +} + /* check if Port I/O is allowed in TSS */ void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size) { diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c index 0d549b3d6c..2d27731b60 100644 --- a/target/i386/tcg/sysemu/svm_helper.c +++ b/target/i386/tcg/sysemu/svm_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" @@ -26,53 +27,61 @@ /* Secure Virtual Machine helpers */ -static inline void svm_save_seg(CPUX86State *env, hwaddr addr, - const SegmentCache *sc) +static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr, + const SegmentCache *sc) { - CPUState *cs = env_cpu(env); - - x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector), - sc->selector); - x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base), - sc->base); - x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit), - sc->limit); - x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib), - ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); + cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), + sc->selector, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), + sc->base, mmu_idx, 0); + cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), + sc->limit, mmu_idx, 0); + cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), + ((sc->flags >> 8) & 0xff) + | ((sc->flags >> 12) & 0x0f00), + mmu_idx, 0); } -static inline void svm_load_seg(CPUX86State *env, hwaddr addr, - SegmentCache *sc) +/* + * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base + * addresses in the segment registers that have been loaded. + */ +static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base) +{ + uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); + *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt); +} + +static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr, + SegmentCache *sc) { - CPUState *cs = env_cpu(env); unsigned int flags; - sc->selector = x86_lduw_phys(cs, - addr + offsetof(struct vmcb_seg, selector)); - sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base)); - sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit)); - flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib)); + sc->selector = + cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), + mmu_idx, 0); + sc->base = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), + mmu_idx, 0); + sc->limit = + cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), + mmu_idx, 0); + flags = + cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), + mmu_idx, 0); sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); + + svm_canonicalization(env, &sc->base); } -static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, - int seg_reg) +static void svm_load_seg_cache(CPUX86State *env, int mmu_idx, + hwaddr addr, int seg_reg) { - SegmentCache sc1, *sc = &sc1; + SegmentCache sc; - svm_load_seg(env, addr, sc); - cpu_x86_load_seg_cache(env, seg_reg, sc->selector, - sc->base, sc->limit, sc->flags); -} - -static inline bool ctl_has_irq(uint32_t int_ctl) -{ - uint32_t int_prio; - uint32_t tpr; - - int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; - tpr = int_ctl & V_TPR_MASK; - return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr); + svm_load_seg(env, mmu_idx, addr, &sc); + cpu_x86_load_seg_cache(env, seg_reg, sc.selector, + sc.base, sc.limit, sc.flags); } static inline bool is_efer_invalid_state (CPUX86State *env) @@ -110,6 +119,39 @@ static inline bool is_efer_invalid_state (CPUX86State *env) return false; } +static inline bool virtual_gif_enabled(CPUX86State *env) +{ + if (likely(env->hflags & HF_GUEST_MASK)) { + return (env->features[FEAT_SVM] & CPUID_SVM_VGIF) + && (env->int_ctl & V_GIF_ENABLED_MASK); + } + return false; +} + +static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr) +{ + uint64_t lbr_ctl; + + if (likely(env->hflags & HF_GUEST_MASK)) { + if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) { + cpu_vmexit(env, exit_code, 0, retaddr); + } + + lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb, + control.lbr_ctl)); + return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD) + && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK); + + } + + return false; +} + +static inline bool virtual_gif_set(CPUX86State *env) +{ + return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK); +} + void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) { CPUState *cs = env_cpu(env); @@ -117,7 +159,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) target_ulong addr; uint64_t nested_ctl; uint32_t event_inj; - uint32_t int_ctl; uint32_t asid; uint64_t new_cr0; uint64_t new_cr3; @@ -165,13 +206,17 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->vm_hsave + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); - svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.es), &env->segs[R_ES]); - svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.cs), &env->segs[R_CS]); - svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.ss), &env->segs[R_SS]); - svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), @@ -237,6 +282,8 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->hflags2 |= HF2_NPT_MASK; env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK; + + tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); } /* enable intercepts */ @@ -245,16 +292,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); - env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - save.gdtr.base)); - env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - save.gdtr.limit)); - - env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - save.idtr.base)); - env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - save.idtr.limit)); - new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0)); if (new_cr0 & SVM_CR0_RESERVED_MASK) { cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); @@ -280,11 +317,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) cpu_x86_update_cr3(env, new_cr3); env->cr[2] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr2)); - int_ctl = x86_ldl_phys(cs, + env->int_ctl = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); - if (int_ctl & V_INTR_MASKING_MASK) { - env->v_tpr = int_ctl & V_TPR_MASK; + if (env->int_ctl & V_INTR_MASKING_MASK) { env->hflags2 |= HF2_VINTR_MASK; if (env->eflags & IF_MASK) { env->hflags2 |= HF2_HIF_MASK; @@ -300,14 +336,18 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es), - R_ES); - svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), - R_CS); - svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), - R_SS); - svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), - R_DS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); + svm_load_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt); + svm_load_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt); env->eip = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip)); @@ -346,12 +386,16 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->hflags2 |= HF2_GIF_MASK; - if (ctl_has_irq(int_ctl)) { + if (ctl_has_irq(env)) { CPUState *cs = env_cpu(env); cs->interrupt_request |= CPU_INTERRUPT_VIRQ; } + if (virtual_gif_set(env)) { + env->hflags2 |= HF2_VGIF_MASK; + } + /* maybe we need to inject an event */ event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); @@ -418,7 +462,7 @@ void helper_vmmcall(CPUX86State *env) void helper_vmload(CPUX86State *env, int aflag) { - CPUState *cs = env_cpu(env); + int mmu_idx = MMU_PHYS_IDX; target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); @@ -429,36 +473,52 @@ void helper_vmload(CPUX86State *env, int aflag) addr = (uint32_t)env->regs[R_EAX]; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx - "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb, - save.fs.base)), - env->segs[R_FS].base); + if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) { + mmu_idx = MMU_NESTED_IDX; + } - svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); - svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); - svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); - svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); + svm_load_seg_cache(env, mmu_idx, + addr + offsetof(struct vmcb, save.fs), R_FS); + svm_load_seg_cache(env, mmu_idx, + addr + offsetof(struct vmcb, save.gs), R_GS); + svm_load_seg(env, mmu_idx, + addr + offsetof(struct vmcb, save.tr), &env->tr); + svm_load_seg(env, mmu_idx, + addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 - env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb, - save.kernel_gs_base)); - env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar)); - env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar)); - env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask)); + env->kernelgsbase = + cpu_ldq_mmuidx_ra(env, + addr + offsetof(struct vmcb, save.kernel_gs_base), + mmu_idx, 0); + env->lstar = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), + mmu_idx, 0); + env->cstar = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), + mmu_idx, 0); + env->fmask = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), + mmu_idx, 0); + svm_canonicalization(env, &env->kernelgsbase); #endif - env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star)); - env->sysenter_cs = x86_ldq_phys(cs, - addr + offsetof(struct vmcb, save.sysenter_cs)); - env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb, - save.sysenter_esp)); - env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb, - save.sysenter_eip)); + env->star = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), + mmu_idx, 0); + env->sysenter_cs = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), + mmu_idx, 0); + env->sysenter_esp = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), + mmu_idx, 0); + env->sysenter_eip = + cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), + mmu_idx, 0); } void helper_vmsave(CPUX86State *env, int aflag) { - CPUState *cs = env_cpu(env); + int mmu_idx = MMU_PHYS_IDX; target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); @@ -469,47 +529,61 @@ void helper_vmsave(CPUX86State *env, int aflag) addr = (uint32_t)env->regs[R_EAX]; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx - "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, x86_ldq_phys(cs, - addr + offsetof(struct vmcb, save.fs.base)), - env->segs[R_FS].base); + if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) { + mmu_idx = MMU_NESTED_IDX; + } - svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), + svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs), &env->segs[R_FS]); - svm_save_seg(env, addr + offsetof(struct vmcb, save.gs), + svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs), &env->segs[R_GS]); - svm_save_seg(env, addr + offsetof(struct vmcb, save.tr), + svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr), &env->tr); - svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr), + svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base), - env->kernelgsbase); - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar); - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar); - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base), + env->kernelgsbase, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), + env->lstar, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), + env->cstar, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), + env->fmask, mmu_idx, 0); #endif - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star); - x86_stq_phys(cs, - addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp), - env->sysenter_esp); - x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip), - env->sysenter_eip); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), + env->star, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), + env->sysenter_cs, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), + env->sysenter_esp, mmu_idx, 0); + cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), + env->sysenter_eip, mmu_idx, 0); } void helper_stgi(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); - env->hflags2 |= HF2_GIF_MASK; + + if (virtual_gif_enabled(env)) { + env->int_ctl |= V_GIF_MASK; + env->hflags2 |= HF2_VGIF_MASK; + } else { + env->hflags2 |= HF2_GIF_MASK; + } } void helper_clgi(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); - env->hflags2 &= ~HF2_GIF_MASK; + + if (virtual_gif_enabled(env)) { + env->int_ctl &= ~V_GIF_MASK; + env->hflags2 &= ~HF2_VGIF_MASK; + } else { + env->hflags2 &= ~HF2_GIF_MASK; + } } bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type) @@ -630,7 +704,7 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, { CPUState *cs = env_cpu(env); - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", @@ -654,7 +728,6 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, void do_vmexit(CPUX86State *env) { CPUState *cs = env_cpu(env); - uint32_t int_ctl; if (env->hflags & HF_INHIBIT_IRQ_MASK) { x86_stl_phys(cs, @@ -666,15 +739,20 @@ void do_vmexit(CPUX86State *env) env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); } env->hflags2 &= ~HF2_NPT_MASK; + tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); /* Save the VM state in the vmcb */ - svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.es), &env->segs[R_ES]); - svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.cs), &env->segs[R_CS]); - svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.ss), &env->segs[R_SS]); - svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), + svm_save_seg(env, MMU_PHYS_IDX, + env->vm_vmcb + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), @@ -697,16 +775,8 @@ void do_vmexit(CPUX86State *env) env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); - - int_ctl = x86_ldl_phys(cs, - env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); - int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); - int_ctl |= env->v_tpr & V_TPR_MASK; - if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { - int_ctl |= V_IRQ_MASK; - } x86_stl_phys(cs, - env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); + env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); @@ -729,6 +799,7 @@ void do_vmexit(CPUX86State *env) env->intercept = 0; env->intercept_exceptions = 0; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + env->int_ctl = 0; env->tsc_offset = 0; env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, @@ -762,14 +833,14 @@ void do_vmexit(CPUX86State *env) ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | VM_MASK)); - svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), - R_ES); - svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), - R_CS); - svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), - R_SS); - svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), - R_DS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); + svm_load_seg_cache(env, MMU_PHYS_IDX, + env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); env->eip = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip)); @@ -796,6 +867,7 @@ void do_vmexit(CPUX86State *env) env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); env->hflags2 &= ~HF2_GIF_MASK; + env->hflags2 &= ~HF2_VGIF_MASK; /* FIXME: Resets the current ASID register to zero (host ASID). */ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 93a79a5741..79ac5908f7 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -49,9 +49,29 @@ static void x86_cpu_exec_exit(CPUState *cs) static void x86_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - X86CPU *cpu = X86_CPU(cs); + /* The instruction pointer is always up to date with TARGET_TB_PCREL. */ + if (!TARGET_TB_PCREL) { + CPUX86State *env = cs->env_ptr; + env->eip = tb_pc(tb) - tb->cs_base; + } +} - cpu->env.eip = tb->pc - tb->cs_base; +static void x86_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int cc_op = data[1]; + + if (TARGET_TB_PCREL) { + env->eip = (env->eip & TARGET_PAGE_MASK) | data[0]; + } else { + env->eip = data[0] - tb->cs_base; + } + if (cc_op != CC_OP_DYNAMIC) { + env->cc_op = cc_op; + } } #ifndef CONFIG_USER_ONLY @@ -70,12 +90,18 @@ static bool x86_debug_check_breakpoint(CPUState *cs) static const struct TCGCPUOps x86_tcg_ops = { .initialize = tcg_x86_init, .synchronize_from_tb = x86_cpu_synchronize_from_tb, + .restore_state_to_opc = x86_restore_state_to_opc, .cpu_exec_enter = x86_cpu_exec_enter, .cpu_exec_exit = x86_cpu_exec_exit, - .cpu_exec_interrupt = x86_cpu_exec_interrupt, - .do_interrupt = x86_cpu_do_interrupt, +#ifdef CONFIG_USER_ONLY + .fake_user_interrupt = x86_cpu_do_interrupt, + .record_sigsegv = x86_cpu_record_sigsegv, + .record_sigbus = x86_cpu_record_sigbus, +#else .tlb_fill = x86_cpu_tlb_fill, -#ifndef CONFIG_USER_ONLY + .do_interrupt = x86_cpu_do_interrupt, + .cpu_exec_interrupt = x86_cpu_exec_interrupt, + .do_unaligned_access = x86_cpu_do_unaligned_access, .debug_excp_handler = breakpoint_handler, .debug_check_breakpoint = x86_debug_check_breakpoint, #endif /* !CONFIG_USER_ONLY */ diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 2359936e74..a9dbd5cf85 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -23,8 +23,10 @@ #include "disas/disas.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "exec/cpu_ldst.h" #include "exec/translator.h" +#include "fpu/softfloat.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" @@ -152,6 +154,7 @@ static int g_use_hard_fpu; /* global register indexes */ static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; +static TCGv cpu_eip; static TCGv_i32 cpu_cc_op; static TCGv cpu_regs[CPU_NB_REGS]; static TCGv cpu_seg_base[6]; @@ -168,8 +171,8 @@ typedef struct DisasContext { DisasContextBase base; target_ulong pc; /* pc = eip + cs_base */ - target_ulong pc_start; /* pc at TB entry */ target_ulong cs_base; /* base of CS segment */ + target_ulong pc_save; MemOp aflag; MemOp dflag; @@ -177,6 +180,9 @@ typedef struct DisasContext { int8_t override; /* -1 if no override, else R_CS, R_DS, etc */ uint8_t prefix; + bool has_modrm; + uint8_t modrm; + #ifndef CONFIG_USER_ONLY uint8_t cpl; /* code priv level */ uint8_t iopl; /* i/o priv level */ @@ -190,8 +196,8 @@ typedef struct DisasContext { uint8_t rex_r; uint8_t rex_x; uint8_t rex_b; - bool rex_w; #endif + bool vex_w; /* used by AVX even on 32-bit processors */ bool jmp_opt; /* use direct block chaining for direct jumps */ bool repz_opt; /* optimize jumps within repz instructions */ bool cc_op_dirty; @@ -204,6 +210,7 @@ typedef struct DisasContext { int cpuid_ext2_features; int cpuid_ext3_features; int cpuid_7_0_ebx_features; + int cpuid_7_0_ecx_features; int cpuid_xsave_features; /* TCG local temps */ @@ -215,13 +222,12 @@ typedef struct DisasContext { /* TCG local register indexes (only used inside old micro ops) */ TCGv tmp0; TCGv tmp4; - TCGv_ptr ptr0; - TCGv_ptr ptr1; TCGv_i32 tmp2_i32; TCGv_i32 tmp3_i32; TCGv_i64 tmp1_i64; sigjmp_buf jmpbuf; + TCGOp *prev_insn_end; /* Floating point */ bool flcr_set; @@ -230,6 +236,11 @@ typedef struct DisasContext { TCGv_fp ft0; } DisasContext; +#define DISAS_EOB_ONLY DISAS_TARGET_0 +#define DISAS_EOB_NEXT DISAS_TARGET_1 +#define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2 +#define DISAS_JUMP DISAS_TARGET_3 + /* The environment in which user-only runs is constrained. */ #ifdef CONFIG_USER_ONLY #define PE(S) true @@ -268,7 +279,7 @@ typedef struct DisasContext { #ifdef TARGET_X86_64 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0) -#define REX_W(S) ((S)->rex_w) +#define REX_W(S) ((S)->vex_w) #define REX_R(S) ((S)->rex_r + 0) #define REX_X(S) ((S)->rex_x + 0) #define REX_B(S) ((S)->rex_b + 0) @@ -316,9 +327,9 @@ STUB_HELPER(wrmsr, TCGv_env env) #endif static void gen_eob(DisasContext *s); -static void gen_jr(DisasContext *s, TCGv dest); -static void gen_jmp(DisasContext *s, target_ulong eip); -static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); +static void gen_jr(DisasContext *s); +static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num); +static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num); static void gen_op(DisasContext *s1, int op, MemOp ot, int d); static void gen_exception_gpf(DisasContext *s); @@ -457,7 +468,7 @@ static void gen_update_cc_op(DisasContext *s) #endif /* !TARGET_X86_64 */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define REG_B_OFFSET (sizeof(target_ulong) - 1) #define REG_H_OFFSET (sizeof(target_ulong) - 2) #define REG_W_OFFSET (sizeof(target_ulong) - 2) @@ -526,32 +537,51 @@ static inline MemOp mo_b_d32(int b, MemOp ot) return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } -static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) +/* Compute the result of writing t0 to the OT-sized register REG. + * + * If DEST is NULL, store the result into the register and return the + * register's TCGv. + * + * If DEST is not NULL, store the result into DEST and return the + * register's TCGv. + */ +static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0) { switch(ot) { case MO_8: - if (!byte_reg_is_xH(s, reg)) { - tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); - } else { - tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); + if (byte_reg_is_xH(s, reg)) { + dest = dest ? dest : cpu_regs[reg - 4]; + tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8); + return cpu_regs[reg - 4]; } + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8); break; case MO_16: - tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16); break; case MO_32: /* For x86_64, this sets the higher half of register to zero. For i386, this is equivalent to a mov. */ - tcg_gen_ext32u_tl(cpu_regs[reg], t0); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_ext32u_tl(dest, t0); break; #ifdef TARGET_X86_64 case MO_64: - tcg_gen_mov_tl(cpu_regs[reg], t0); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_mov_tl(dest, t0); break; #endif default: tcg_abort(); } + return cpu_regs[reg]; +} + +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) +{ + gen_op_deposit_reg_v(s, ot, reg, NULL, t0); } static inline @@ -572,9 +602,10 @@ static void gen_add_A0_im(DisasContext *s, int val) } } -static inline void gen_op_jmp_v(TCGv dest) +static inline void gen_op_jmp_v(DisasContext *s, TCGv dest) { - tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip)); + tcg_gen_mov_tl(cpu_eip, dest); + s->pc_save = -1; } static inline @@ -609,10 +640,84 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) } } -static inline void gen_jmp_im(DisasContext *s, target_ulong pc) +static void gen_update_eip_cur(DisasContext *s) { - tcg_gen_movi_tl(s->tmp0, pc); - gen_op_jmp_v(s->tmp0); + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); + } else { + tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base); + } + s->pc_save = s->base.pc_next; +} + +static void gen_update_eip_next(DisasContext *s) +{ + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); + } else { + tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base); + } + s->pc_save = s->pc; +} + +static int cur_insn_len(DisasContext *s) +{ + return s->pc - s->base.pc_next; +} + +static TCGv_i32 cur_insn_len_i32(DisasContext *s) +{ + return tcg_constant_i32(cur_insn_len(s)); +} + +static TCGv_i32 eip_next_i32(DisasContext *s) +{ + assert(s->pc_save != -1); + /* + * This function has two users: lcall_real (always 16-bit mode), and + * iret_protected (16, 32, or 64-bit mode). IRET only uses the value + * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is + * why passing a 32-bit value isn't broken. To avoid using this where + * we shouldn't, return -1 in 64-bit mode so that execution goes into + * the weeds quickly. + */ + if (CODE64(s)) { + return tcg_constant_i32(-1); + } + if (TARGET_TB_PCREL) { + TCGv_i32 ret = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(ret, cpu_eip); + tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); + return ret; + } else { + return tcg_constant_i32(s->pc - s->cs_base); + } +} + +static TCGv eip_next_tl(DisasContext *s) +{ + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + TCGv ret = tcg_temp_new(); + tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); + return ret; + } else { + return tcg_constant_tl(s->pc - s->cs_base); + } +} + +static TCGv eip_cur_tl(DisasContext *s) +{ + assert(s->pc_save != -1); + if (TARGET_TB_PCREL) { + TCGv ret = tcg_temp_new(); + tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); + return ret; + } else { + return tcg_constant_tl(s->base.pc_next - s->cs_base); + } } /* Compute SEG:REG into A0. SEG is selected from the override segment @@ -728,20 +833,21 @@ static void gen_exts(MemOp ot, TCGv reg) gen_ext_tl(reg, reg, ot, true); } -static inline -void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) +static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) { tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); - gen_extu(size, s->tmp0); - tcg_gen_brcondi_tl(TCG_COND_NE, s->tmp0, 0, label1); + gen_extu(s->aflag, s->tmp0); + tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1); } -static inline -void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) +static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1) { - tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); - gen_extu(size, s->tmp0); - tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); + gen_op_j_ecx(s, TCG_COND_EQ, label1); +} + +static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1) +{ + gen_op_j_ecx(s, TCG_COND_NE, label1); } static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) @@ -797,24 +903,21 @@ static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot)); } if (GUEST(s)) { - target_ulong cur_eip = s->base.pc_next - s->cs_base; - target_ulong next_eip = s->pc - s->cs_base; - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); + gen_update_eip_cur(s); if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { svm_flags |= SVM_IOIO_REP_MASK; } svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot); gen_helper_svm_check_io(cpu_env, port, tcg_constant_i32(svm_flags), - tcg_constant_i32(next_eip - cur_eip)); + cur_insn_len_i32(s)); } return true; #endif } -static inline void gen_movs(DisasContext *s, MemOp ot) +static void gen_movs(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -1234,18 +1337,18 @@ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) /* XXX: does not work with gdbstub "ice" single step - not a serious problem */ -static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) +static TCGLabel *gen_jz_ecx_string(DisasContext *s) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - gen_op_jnz_ecx(s, s->aflag, l1); + gen_op_jnz_ecx(s, l1); gen_set_label(l2); - gen_jmp_tb(s, next_eip, 1); + gen_jmp_rel_csize(s, 0, 1); gen_set_label(l1); return l2; } -static inline void gen_stos(DisasContext *s, MemOp ot) +static void gen_stos(DisasContext *s, MemOp ot) { gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); @@ -1254,7 +1357,7 @@ static inline void gen_stos(DisasContext *s, MemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_lods(DisasContext *s, MemOp ot) +static void gen_lods(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -1263,7 +1366,7 @@ static inline void gen_lods(DisasContext *s, MemOp ot) gen_op_add_reg_T0(s, s->aflag, R_ESI); } -static inline void gen_scas(DisasContext *s, MemOp ot) +static void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1272,7 +1375,7 @@ static inline void gen_scas(DisasContext *s, MemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_cmps(DisasContext *s, MemOp ot) +static void gen_cmps(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1290,17 +1393,14 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) /* user-mode cpu should not be in IOBPT mode */ g_assert_not_reached(); #else - TCGv_i32 t_size = tcg_const_i32(1 << ot); - TCGv t_next = tcg_const_tl(s->pc - s->cs_base); - + TCGv_i32 t_size = tcg_constant_i32(1 << ot); + TCGv t_next = eip_next_tl(s); gen_helper_bpt_io(cpu_env, t_port, t_size, t_next); - tcg_temp_free_i32(t_size); - tcg_temp_free(t_next); #endif /* CONFIG_USER_ONLY */ } } -static inline void gen_ins(DisasContext *s, MemOp ot) +static void gen_ins(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); /* Note: we must do this dummy write first to be restartable in @@ -1316,7 +1416,7 @@ static inline void gen_ins(DisasContext *s, MemOp ot) gen_bpt_io(s, s->tmp2_i32, ot); } -static inline void gen_outs(DisasContext *s, MemOp ot) +static void gen_outs(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -1330,42 +1430,49 @@ static inline void gen_outs(DisasContext *s, MemOp ot) gen_bpt_io(s, s->tmp2_i32, ot); } -/* same method as Valgrind : we generate jumps to current or next - instruction */ -#define GEN_REPZ(op) \ -static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ - target_ulong cur_eip, target_ulong next_eip) \ -{ \ - TCGLabel *l2; \ - gen_update_cc_op(s); \ - l2 = gen_jz_ecx_string(s, next_eip); \ - gen_ ## op(s, ot); \ - gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ - /* a loop would cause two single step exceptions if ECX = 1 \ - before rep string_insn */ \ - if (s->repz_opt) \ - gen_op_jz_ecx(s, s->aflag, l2); \ - gen_jmp(s, cur_eip); \ +/* Generate jumps to current or next instruction */ +static void gen_repz(DisasContext *s, MemOp ot, + void (*fn)(DisasContext *s, MemOp ot)) +{ + TCGLabel *l2; + gen_update_cc_op(s); + l2 = gen_jz_ecx_string(s); + fn(s, ot); + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + /* + * A loop would cause two single step exceptions if ECX = 1 + * before rep string_insn + */ + if (s->repz_opt) { + gen_op_jz_ecx(s, l2); + } + gen_jmp_rel_csize(s, -cur_insn_len(s), 0); } -#define GEN_REPZ2(op) \ -static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ - target_ulong cur_eip, \ - target_ulong next_eip, \ - int nz) \ -{ \ - TCGLabel *l2; \ - gen_update_cc_op(s); \ - l2 = gen_jz_ecx_string(s, next_eip); \ - gen_ ## op(s, ot); \ - gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ - gen_update_cc_op(s); \ - gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ - if (s->repz_opt) \ - gen_op_jz_ecx(s, s->aflag, l2); \ - gen_jmp(s, cur_eip); \ +#define GEN_REPZ(op) \ + static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \ + { gen_repz(s, ot, gen_##op); } + +static void gen_repz2(DisasContext *s, MemOp ot, int nz, + void (*fn)(DisasContext *s, MemOp ot)) +{ + TCGLabel *l2; + gen_update_cc_op(s); + l2 = gen_jz_ecx_string(s); + fn(s, ot); + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + gen_update_cc_op(s); + gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); + if (s->repz_opt) { + gen_op_jz_ecx(s, l2); + } + gen_jmp_rel_csize(s, -cur_insn_len(s), 0); } +#define GEN_REPZ2(op) \ + static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \ + { gen_repz2(s, ot, nz, gen_##op); } + GEN_REPZ(movs) GEN_REPZ(stos) GEN_REPZ(lods) @@ -1818,10 +1925,10 @@ static void gen_fldz_FT0(DisasContext *s) fp_pc_wrapper(gen_fldz_FT0)(s); } -static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) +static void gen_exception(DisasContext *s, int trapno) { gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); + gen_update_eip_cur(s); gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); s->base.is_jmp = DISAS_NORETURN; } @@ -1830,13 +1937,13 @@ static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) the instruction is known, but it isn't allowed in the current cpu mode. */ static void gen_illegal_opcode(DisasContext *s) { - gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base); + gen_exception(s, EXCP06_ILLOP); } /* Generate #GP for the current instruction. */ static void gen_exception_gpf(DisasContext *s) { - gen_exception(s, EXCP0D_GPF, s->pc_start - s->cs_base); + gen_exception(s, EXCP0D_GPF); } /* Check for cpl == 0; if not, raise #GP and return false. */ @@ -2494,8 +2601,14 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) { uint64_t pc = s->pc; + /* This is a subsequent insn that crosses a page boundary. */ + if (s->base.num_insns > 1 && + !is_same_page(&s->base, s->pc + num_bytes - 1)) { + siglongjmp(s->jmpbuf, 2); + } + s->pc += num_bytes; - if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) { + if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) { /* If the instruction's 16th byte is on a different page than the 1st, a * page fault on the second page wins over the general protection fault * caused by the instruction being too long. @@ -2514,28 +2627,28 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s) { - return translator_ldub(env, advance_pc(env, s, 1)); + return translator_ldub(env, &s->base, advance_pc(env, s, 1)); } static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s) { - return translator_ldsw(env, advance_pc(env, s, 2)); + return translator_lduw(env, &s->base, advance_pc(env, s, 2)); } static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s) { - return translator_lduw(env, advance_pc(env, s, 2)); + return translator_lduw(env, &s->base, advance_pc(env, s, 2)); } static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s) { - return translator_ldl(env, advance_pc(env, s, 4)); + return translator_ldl(env, &s->base, advance_pc(env, s, 4)); } #ifdef TARGET_X86_64 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) { - return translator_ldq(env, advance_pc(env, s, 8)); + return translator_ldq(env, &s->base, advance_pc(env, s, 8)); } #endif @@ -2673,11 +2786,11 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, } /* Compute the address, with a minimum number of TCG ops. */ -static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) +static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) { TCGv ea = NULL; - if (a.index >= 0) { + if (a.index >= 0 && !is_vsib) { if (a.scale == 0) { ea = cpu_regs[a.index]; } else { @@ -2692,7 +2805,12 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) ea = cpu_regs[a.base]; } if (!ea) { - tcg_gen_movi_tl(s->A0, a.disp); + if (TARGET_TB_PCREL && a.base == -2) { + /* With cpu_eip ~= pc_save, the expression is pc-relative. */ + tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); + } else { + tcg_gen_movi_tl(s->A0, a.disp); + } ea = s->A0; } else if (a.disp != 0) { tcg_gen_addi_tl(s->A0, ea, a.disp); @@ -2705,7 +2823,7 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) { AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a); + TCGv ea = gen_lea_modrm_1(s, a, false); gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); } @@ -2718,7 +2836,8 @@ static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, TCGCond cond, TCGv_i64 bndv) { - TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm)); + AddressParts a = gen_lea_modrm_0(env, s, modrm); + TCGv ea = gen_lea_modrm_1(s, a, false); tcg_gen_extu_tl_i64(s->tmp1_i64, ea); if (!CODE64(s)) { @@ -2768,6 +2887,31 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, } } +static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot) +{ + target_ulong ret; + + switch (ot) { + case MO_8: + ret = x86_ldub_code(env, s); + break; + case MO_16: + ret = x86_lduw_code(env, s); + break; + case MO_32: + ret = x86_ldl_code(env, s); + break; +#ifdef TARGET_X86_64 + case MO_64: + ret = x86_ldq_code(env, s); + break; +#endif + default: + g_assert_not_reached(); + } + return ret; +} + static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) { uint32_t ret; @@ -2791,6 +2935,31 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) return ret; } +static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot) +{ + target_long ret; + + switch (ot) { + case MO_8: + ret = (int8_t) x86_ldub_code(env, s); + break; + case MO_16: + ret = (int16_t) x86_lduw_code(env, s); + break; + case MO_32: + ret = (int32_t) x86_ldl_code(env, s); + break; +#ifdef TARGET_X86_64 + case MO_64: + ret = x86_ldq_code(env, s); + break; +#endif + default: + g_assert_not_reached(); + } + return ret; +} + static inline int insn_const_size(MemOp ot) { if (ot <= MO_32) { @@ -2800,49 +2969,14 @@ static inline int insn_const_size(MemOp ot) } } -static void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) +static void gen_jcc(DisasContext *s, int b, int diff) { - target_ulong pc = s->cs_base + eip; + TCGLabel *l1 = gen_new_label(); - if (translator_use_goto_tb(&s->base, pc)) { - /* jump to same page: we can use a direct jump */ - tcg_gen_goto_tb(tb_num); - gen_jmp_im(s, eip); - tcg_gen_exit_tb(s->base.tb, tb_num); - s->base.is_jmp = DISAS_NORETURN; - } else { - /* jump to another page */ - gen_jmp_im(s, eip); - gen_jr(s, s->tmp0); - } -} - -static inline void gen_jcc(DisasContext *s, int b, - target_ulong val, target_ulong next_eip) -{ - TCGLabel *l1, *l2; - - if (s->jmp_opt) { - l1 = gen_new_label(); - gen_jcc1(s, b, l1); - - gen_goto_tb(s, 0, next_eip); - - gen_set_label(l1); - gen_goto_tb(s, 1, val); - } else { - l1 = gen_new_label(); - l2 = gen_new_label(); - gen_jcc1(s, b, l1); - - gen_jmp_im(s, next_eip); - tcg_gen_br(l2); - - gen_set_label(l1); - gen_jmp_im(s, val); - gen_set_label(l2); - gen_eob(s); - } + gen_jcc1(s, b, l1); + gen_jmp_rel_csize(s, 0, 1); + gen_set_label(l1); + gen_jmp_rel(s, s->dflag, diff, 0); } static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, @@ -2899,13 +3033,15 @@ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ - if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) { - s->base.is_jmp = DISAS_TOO_MANY; + if (seg_reg == R_SS) { + s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; + } else if (CODE32(s) && seg_reg < R_FS) { + s->base.is_jmp = DISAS_EOB_NEXT; } } else { gen_op_movl_seg_T0_vm(s, seg_reg); if (seg_reg == R_SS) { - s->base.is_jmp = DISAS_TOO_MANY; + s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; } } } @@ -3066,27 +3202,28 @@ static void gen_unknown_opcode(CPUX86State *env, DisasContext *s) gen_illegal_opcode(s); if (qemu_loglevel_mask(LOG_UNIMP)) { - FILE *logfile = qemu_log_lock(); - target_ulong pc = s->pc_start, end = s->pc; + FILE *logfile = qemu_log_trylock(); + if (logfile) { + target_ulong pc = s->base.pc_next, end = s->pc; - qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc); - for (; pc < end; ++pc) { - qemu_log(" %02x", cpu_ldub_code(env, pc)); + fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc); + for (; pc < end; ++pc) { + fprintf(logfile, " %02x", cpu_ldub_code(env, pc)); + } + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); } - qemu_log("\n"); - qemu_log_unlock(logfile); } } /* an interrupt is different from an exception because of the privilege checks */ -static void gen_interrupt(DisasContext *s, int intno, - target_ulong cur_eip, target_ulong next_eip) +static void gen_interrupt(DisasContext *s, int intno) { gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), - tcg_const_i32(next_eip - cur_eip)); + gen_update_eip_cur(s); + gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno), + cur_insn_len_i32(s)); s->base.is_jmp = DISAS_NORETURN; } @@ -3114,6 +3251,26 @@ static void gen_reset_hflag(DisasContext *s, uint32_t mask) } } +static void gen_set_eflags(DisasContext *s, target_ulong mask) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); + tcg_gen_ori_tl(t, t, mask); + tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); + tcg_temp_free(t); +} + +static void gen_reset_eflags(DisasContext *s, target_ulong mask) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); + tcg_gen_andi_tl(t, t, ~mask); + tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); + tcg_temp_free(t); +} + /* Clear BND registers during legacy branches. */ static void gen_bnd_jmp(DisasContext *s) { @@ -3144,11 +3301,9 @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) } if (s->base.tb->flags & HF_RF_MASK) { - gen_helper_reset_rf(cpu_env); + gen_reset_eflags(s, RF_MASK); } - if (s->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (recheck_tf) { + if (recheck_tf) { gen_helper_rechecking_single_step(cpu_env); tcg_gen_exit_tb(NULL, 0); } else if (s->flags & HF_TF_MASK) { @@ -3181,1880 +3336,165 @@ static void gen_eob(DisasContext *s) } /* Jump to register */ -static void gen_jr(DisasContext *s, TCGv dest) +static void gen_jr(DisasContext *s) { do_gen_eob_worker(s, false, false, true); } -/* generate a jump to eip. No segment change must happen before as a - direct call to the next block may occur */ -static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) +/* Jump to eip+diff, truncating the result to OT. */ +static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) { + bool use_goto_tb = s->jmp_opt; + target_ulong mask = -1; + target_ulong new_pc = s->pc + diff; + target_ulong new_eip = new_pc - s->cs_base; + + /* In 64-bit mode, operand size is fixed at 64 bits. */ + if (!CODE64(s)) { + if (ot == MO_16) { + mask = 0xffff; + if (TARGET_TB_PCREL && CODE32(s)) { + use_goto_tb = false; + } + } else { + mask = 0xffffffff; + } + } + new_eip &= mask; + gen_update_cc_op(s); set_cc_op(s, CC_OP_DYNAMIC); - if (s->jmp_opt) { - gen_goto_tb(s, tb_num, eip); + + if (TARGET_TB_PCREL) { + tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); + /* + * If we can prove the branch does not leave the page and we have + * no extra masking to apply (data16 branch in code32, see above), + * then we have also proven that the addition does not wrap. + */ + if (!use_goto_tb || !is_same_page(&s->base, new_pc)) { + tcg_gen_andi_tl(cpu_eip, cpu_eip, mask); + use_goto_tb = false; + } + } + + if (use_goto_tb && + translator_use_goto_tb(&s->base, new_eip + s->cs_base)) { + /* jump to same page: we can use a direct jump */ + tcg_gen_goto_tb(tb_num); + if (!TARGET_TB_PCREL) { + tcg_gen_movi_tl(cpu_eip, new_eip); + } + tcg_gen_exit_tb(s->base.tb, tb_num); + s->base.is_jmp = DISAS_NORETURN; } else { - gen_jmp_im(s, eip); - gen_eob(s); + if (!TARGET_TB_PCREL) { + tcg_gen_movi_tl(cpu_eip, new_eip); + } + if (s->jmp_opt) { + gen_jr(s); /* jump to another page */ + } else { + gen_eob(s); /* exit to main loop */ + } } } -static void gen_jmp(DisasContext *s, target_ulong eip) +/* Jump to eip+diff, truncating to the current code size. */ +static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num) { - gen_jmp_tb(s, eip, 0); + /* CODE64 ignores the OT argument, so we need not consider it. */ + gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num); } static inline void gen_ldq_env_A0(DisasContext *s, int offset) { - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset); } static inline void gen_stq_env_A0(DisasContext *s, int offset) { tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset); - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); } -static inline void gen_ldo_env_A0(DisasContext *s, int offset) +static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align) { int mem_index = s->mem_index; - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_16 : 0)); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); tcg_gen_addi_tl(s->tmp0, s->A0, 8); - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); } -static inline void gen_sto_env_A0(DisasContext *s, int offset) +static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align) { int mem_index = s->mem_index; - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_16 : 0)); tcg_gen_addi_tl(s->tmp0, s->A0, 8); - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); } -static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset) +static void gen_ldy_env_A0(DisasContext *s, int offset, bool align) { - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); + int mem_index = s->mem_index; + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_32 : 0)); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0))); + tcg_gen_addi_tl(s->tmp0, s->A0, 8); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1))); + + tcg_gen_addi_tl(s->tmp0, s->A0, 16); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2))); + tcg_gen_addi_tl(s->tmp0, s->A0, 24); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3))); } -static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset) +static void gen_sty_env_A0(DisasContext *s, int offset, bool align) { - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset); + int mem_index = s->mem_index; + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_32 : 0)); + tcg_gen_addi_tl(s->tmp0, s->A0, 8); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_addi_tl(s->tmp0, s->A0, 16); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); + tcg_gen_addi_tl(s->tmp0, s->A0, 24); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); } -static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) -{ - tcg_gen_ld_i32(s->tmp2_i32, cpu_env, s_offset); - tcg_gen_st_i32(s->tmp2_i32, cpu_env, d_offset); -} - -static inline void gen_op_movq_env_0(DisasContext *s, int d_offset) -{ - tcg_gen_movi_i64(s->tmp1_i64, 0); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset); -} - -typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); -typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); -typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); -typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); -typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); -typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, - TCGv_i32 val); -typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); -typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, - TCGv val); - -#define SSE_SPECIAL ((void *)1) -#define SSE_DUMMY ((void *)2) - -#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } -#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ - gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } - -static const SSEFunc_0_epp sse_op_table1[256][4] = { - /* 3DNow! extensions */ - [0x0e] = { SSE_DUMMY }, /* femms */ - [0x0f] = { SSE_DUMMY }, /* pf... */ - /* pure SSE operations */ - [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ - [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ - [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ - [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ - [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, - [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, - [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ - [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ - - [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ - [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ - [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ - [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ - [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ - [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ - [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd }, - [0x2f] = { gen_helper_comiss, gen_helper_comisd }, - [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ - [0x51] = SSE_FOP(sqrt), - [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, - [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, - [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ - [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ - [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ - [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ - [0x58] = SSE_FOP(add), - [0x59] = SSE_FOP(mul), - [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, - gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, - [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, - [0x5c] = SSE_FOP(sub), - [0x5d] = SSE_FOP(min), - [0x5e] = SSE_FOP(div), - [0x5f] = SSE_FOP(max), - - [0xc2] = SSE_FOP(cmpeq), - [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps, - (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ - - /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ - [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - - /* MMX ops and their SSE extensions */ - [0x60] = MMX_OP2(punpcklbw), - [0x61] = MMX_OP2(punpcklwd), - [0x62] = MMX_OP2(punpckldq), - [0x63] = MMX_OP2(packsswb), - [0x64] = MMX_OP2(pcmpgtb), - [0x65] = MMX_OP2(pcmpgtw), - [0x66] = MMX_OP2(pcmpgtl), - [0x67] = MMX_OP2(packuswb), - [0x68] = MMX_OP2(punpckhbw), - [0x69] = MMX_OP2(punpckhwd), - [0x6a] = MMX_OP2(punpckhdq), - [0x6b] = MMX_OP2(packssdw), - [0x6c] = { NULL, gen_helper_punpcklqdq_xmm }, - [0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, - [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ - [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ - [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx, - (SSEFunc_0_epp)gen_helper_pshufd_xmm, - (SSEFunc_0_epp)gen_helper_pshufhw_xmm, - (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ - [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ - [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ - [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ - [0x74] = MMX_OP2(pcmpeqb), - [0x75] = MMX_OP2(pcmpeqw), - [0x76] = MMX_OP2(pcmpeql), - [0x77] = { SSE_DUMMY }, /* emms */ - [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ - [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, - [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, - [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, - [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ - [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ - [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ - [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ - [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, - [0xd1] = MMX_OP2(psrlw), - [0xd2] = MMX_OP2(psrld), - [0xd3] = MMX_OP2(psrlq), - [0xd4] = MMX_OP2(paddq), - [0xd5] = MMX_OP2(pmullw), - [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ - [0xd8] = MMX_OP2(psubusb), - [0xd9] = MMX_OP2(psubusw), - [0xda] = MMX_OP2(pminub), - [0xdb] = MMX_OP2(pand), - [0xdc] = MMX_OP2(paddusb), - [0xdd] = MMX_OP2(paddusw), - [0xde] = MMX_OP2(pmaxub), - [0xdf] = MMX_OP2(pandn), - [0xe0] = MMX_OP2(pavgb), - [0xe1] = MMX_OP2(psraw), - [0xe2] = MMX_OP2(psrad), - [0xe3] = MMX_OP2(pavgw), - [0xe4] = MMX_OP2(pmulhuw), - [0xe5] = MMX_OP2(pmulhw), - [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, - [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ - [0xe8] = MMX_OP2(psubsb), - [0xe9] = MMX_OP2(psubsw), - [0xea] = MMX_OP2(pminsw), - [0xeb] = MMX_OP2(por), - [0xec] = MMX_OP2(paddsb), - [0xed] = MMX_OP2(paddsw), - [0xee] = MMX_OP2(pmaxsw), - [0xef] = MMX_OP2(pxor), - [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ - [0xf1] = MMX_OP2(psllw), - [0xf2] = MMX_OP2(pslld), - [0xf3] = MMX_OP2(psllq), - [0xf4] = MMX_OP2(pmuludq), - [0xf5] = MMX_OP2(pmaddwd), - [0xf6] = MMX_OP2(psadbw), - [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx, - (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ - [0xf8] = MMX_OP2(psubb), - [0xf9] = MMX_OP2(psubw), - [0xfa] = MMX_OP2(psubl), - [0xfb] = MMX_OP2(psubq), - [0xfc] = MMX_OP2(paddb), - [0xfd] = MMX_OP2(paddw), - [0xfe] = MMX_OP2(paddl), -}; - -static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { - [0 + 2] = MMX_OP2(psrlw), - [0 + 4] = MMX_OP2(psraw), - [0 + 6] = MMX_OP2(psllw), - [8 + 2] = MMX_OP2(psrld), - [8 + 4] = MMX_OP2(psrad), - [8 + 6] = MMX_OP2(pslld), - [16 + 2] = MMX_OP2(psrlq), - [16 + 3] = { NULL, gen_helper_psrldq_xmm }, - [16 + 6] = MMX_OP2(psllq), - [16 + 7] = { NULL, gen_helper_pslldq_xmm }, -}; - -static const SSEFunc_0_epi sse_op_table3ai[] = { - gen_helper_cvtsi2ss, - gen_helper_cvtsi2sd -}; - -#ifdef TARGET_X86_64 -static const SSEFunc_0_epl sse_op_table3aq[] = { - gen_helper_cvtsq2ss, - gen_helper_cvtsq2sd -}; -#endif - -static const SSEFunc_i_ep sse_op_table3bi[] = { - gen_helper_cvttss2si, - gen_helper_cvtss2si, - gen_helper_cvttsd2si, - gen_helper_cvtsd2si -}; - -#ifdef TARGET_X86_64 -static const SSEFunc_l_ep sse_op_table3bq[] = { - gen_helper_cvttss2sq, - gen_helper_cvtss2sq, - gen_helper_cvttsd2sq, - gen_helper_cvtsd2sq -}; -#endif - -static const SSEFunc_0_epp sse_op_table4[8][4] = { - SSE_FOP(cmpeq), - SSE_FOP(cmplt), - SSE_FOP(cmple), - SSE_FOP(cmpunord), - SSE_FOP(cmpneq), - SSE_FOP(cmpnlt), - SSE_FOP(cmpnle), - SSE_FOP(cmpord), -}; - -static const SSEFunc_0_epp sse_op_table5[256] = { - [0x0c] = gen_helper_pi2fw, - [0x0d] = gen_helper_pi2fd, - [0x1c] = gen_helper_pf2iw, - [0x1d] = gen_helper_pf2id, - [0x8a] = gen_helper_pfnacc, - [0x8e] = gen_helper_pfpnacc, - [0x90] = gen_helper_pfcmpge, - [0x94] = gen_helper_pfmin, - [0x96] = gen_helper_pfrcp, - [0x97] = gen_helper_pfrsqrt, - [0x9a] = gen_helper_pfsub, - [0x9e] = gen_helper_pfadd, - [0xa0] = gen_helper_pfcmpgt, - [0xa4] = gen_helper_pfmax, - [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ - [0xa7] = gen_helper_movq, /* pfrsqit1 */ - [0xaa] = gen_helper_pfsubr, - [0xae] = gen_helper_pfacc, - [0xb0] = gen_helper_pfcmpeq, - [0xb4] = gen_helper_pfmul, - [0xb6] = gen_helper_movq, /* pfrcpit2 */ - [0xb7] = gen_helper_pmulhrw_mmx, - [0xbb] = gen_helper_pswapd, - [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ -}; - -struct SSEOpHelper_epp { - SSEFunc_0_epp op[2]; - uint32_t ext_mask; -}; - -struct SSEOpHelper_eppi { - SSEFunc_0_eppi op[2]; - uint32_t ext_mask; -}; - -#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } -#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } -#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } -#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } -#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ - CPUID_EXT_PCLMULQDQ } -#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } - -static const struct SSEOpHelper_epp sse_op_table6[256] = { - [0x00] = SSSE3_OP(pshufb), - [0x01] = SSSE3_OP(phaddw), - [0x02] = SSSE3_OP(phaddd), - [0x03] = SSSE3_OP(phaddsw), - [0x04] = SSSE3_OP(pmaddubsw), - [0x05] = SSSE3_OP(phsubw), - [0x06] = SSSE3_OP(phsubd), - [0x07] = SSSE3_OP(phsubsw), - [0x08] = SSSE3_OP(psignb), - [0x09] = SSSE3_OP(psignw), - [0x0a] = SSSE3_OP(psignd), - [0x0b] = SSSE3_OP(pmulhrsw), - [0x10] = SSE41_OP(pblendvb), - [0x14] = SSE41_OP(blendvps), - [0x15] = SSE41_OP(blendvpd), - [0x17] = SSE41_OP(ptest), - [0x1c] = SSSE3_OP(pabsb), - [0x1d] = SSSE3_OP(pabsw), - [0x1e] = SSSE3_OP(pabsd), - [0x20] = SSE41_OP(pmovsxbw), - [0x21] = SSE41_OP(pmovsxbd), - [0x22] = SSE41_OP(pmovsxbq), - [0x23] = SSE41_OP(pmovsxwd), - [0x24] = SSE41_OP(pmovsxwq), - [0x25] = SSE41_OP(pmovsxdq), - [0x28] = SSE41_OP(pmuldq), - [0x29] = SSE41_OP(pcmpeqq), - [0x2a] = SSE41_SPECIAL, /* movntqda */ - [0x2b] = SSE41_OP(packusdw), - [0x30] = SSE41_OP(pmovzxbw), - [0x31] = SSE41_OP(pmovzxbd), - [0x32] = SSE41_OP(pmovzxbq), - [0x33] = SSE41_OP(pmovzxwd), - [0x34] = SSE41_OP(pmovzxwq), - [0x35] = SSE41_OP(pmovzxdq), - [0x37] = SSE42_OP(pcmpgtq), - [0x38] = SSE41_OP(pminsb), - [0x39] = SSE41_OP(pminsd), - [0x3a] = SSE41_OP(pminuw), - [0x3b] = SSE41_OP(pminud), - [0x3c] = SSE41_OP(pmaxsb), - [0x3d] = SSE41_OP(pmaxsd), - [0x3e] = SSE41_OP(pmaxuw), - [0x3f] = SSE41_OP(pmaxud), - [0x40] = SSE41_OP(pmulld), - [0x41] = SSE41_OP(phminposuw), - [0xdb] = AESNI_OP(aesimc), - [0xdc] = AESNI_OP(aesenc), - [0xdd] = AESNI_OP(aesenclast), - [0xde] = AESNI_OP(aesdec), - [0xdf] = AESNI_OP(aesdeclast), -}; - -static const struct SSEOpHelper_eppi sse_op_table7[256] = { - [0x08] = SSE41_OP(roundps), - [0x09] = SSE41_OP(roundpd), - [0x0a] = SSE41_OP(roundss), - [0x0b] = SSE41_OP(roundsd), - [0x0c] = SSE41_OP(blendps), - [0x0d] = SSE41_OP(blendpd), - [0x0e] = SSE41_OP(pblendw), - [0x0f] = SSSE3_OP(palignr), - [0x14] = SSE41_SPECIAL, /* pextrb */ - [0x15] = SSE41_SPECIAL, /* pextrw */ - [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ - [0x17] = SSE41_SPECIAL, /* extractps */ - [0x20] = SSE41_SPECIAL, /* pinsrb */ - [0x21] = SSE41_SPECIAL, /* insertps */ - [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ - [0x40] = SSE41_OP(dpps), - [0x41] = SSE41_OP(dppd), - [0x42] = SSE41_OP(mpsadbw), - [0x44] = PCLMULQDQ_OP(pclmulqdq), - [0x60] = SSE42_OP(pcmpestrm), - [0x61] = SSE42_OP(pcmpestri), - [0x62] = SSE42_OP(pcmpistrm), - [0x63] = SSE42_OP(pcmpistri), - [0xdf] = AESNI_OP(aeskeygenassist), -}; - -static void gen_sse(CPUX86State *env, DisasContext *s, int b, - target_ulong pc_start) -{ - int b1, op1_offset, op2_offset, is_xmm, val; - int modrm, mod, rm, reg; - SSEFunc_0_epp sse_fn_epp; - SSEFunc_0_eppi sse_fn_eppi; - SSEFunc_0_ppi sse_fn_ppi; - SSEFunc_0_eppt sse_fn_eppt; - MemOp ot; - - b &= 0xff; - if (s->prefix & PREFIX_DATA) - b1 = 1; - else if (s->prefix & PREFIX_REPZ) - b1 = 2; - else if (s->prefix & PREFIX_REPNZ) - b1 = 3; - else - b1 = 0; - sse_fn_epp = sse_op_table1[b][b1]; - if (!sse_fn_epp) { - goto unknown_op; - } - if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { - is_xmm = 1; - } else { - if (b1 == 0) { - /* MMX case */ - is_xmm = 0; - } else { - is_xmm = 1; - } - } - /* simple MMX/SSE operation */ - if (s->flags & HF_TS_MASK) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - return; - } - if (s->flags & HF_EM_MASK) { - illegal_op: - gen_illegal_opcode(s); - return; - } - if (is_xmm - && !(s->flags & HF_OSFXSR_MASK) - && (b != 0x38 && b != 0x3a)) { - goto unknown_op; - } - if (b == 0x0e) { - if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { - /* If we were fully decoding this we might use illegal_op. */ - goto unknown_op; - } - /* femms */ - gen_helper_emms(cpu_env); - return; - } - if (b == 0x77) { - /* emms */ - gen_helper_emms(cpu_env); - return; - } - /* prepare MMX state (XXX: optimize by storing fptt and fptags in - the static cpu state) */ - if (!is_xmm) { - gen_enter_mmx(s); - } - - modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7); - if (is_xmm) { - reg |= REX_R(s); - } - mod = (modrm >> 6) & 3; - if (sse_fn_epp == SSE_SPECIAL) { - b |= (b1 << 8); - switch(b) { - case 0x0e7: /* movntq */ - if (mod == 3) { - goto illegal_op; - } - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - break; - case 0x1e7: /* movntdq */ - case 0x02b: /* movntps */ - case 0x12b: /* movntps */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - break; - case 0x3f0: /* lddqu */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - break; - case 0x22b: /* movntss */ - case 0x32b: /* movntsd */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - if (b1 & 1) { - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, s->T0, s->A0); - } - break; - case 0x6e: /* movd mm, ea */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_st_tl(s->T0, cpu_env, - offsetof(CPUX86State, fpregs[reg].mmx)); - } else -#endif - { - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_movl_mm_T0_mmx(s->ptr0, s->tmp2_i32); - } - break; - case 0x16e: /* movd xmm, ea */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - gen_helper_movq_mm_T0_xmm(s->ptr0, s->T0); - } else -#endif - { - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_movl_mm_T0_xmm(s->ptr0, s->tmp2_i32); - } - break; - case 0x6f: /* movq mm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - } else { - rm = (modrm & 7); - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, - offsetof(CPUX86State,fpregs[rm].mmx)); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - } - break; - case 0x010: /* movups */ - case 0x110: /* movupd */ - case 0x028: /* movaps */ - case 0x128: /* movapd */ - case 0x16f: /* movdqa xmm, ea */ - case 0x26f: /* movdqu xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movo(s, offsetof(CPUX86State, xmm_regs[reg]), - offsetof(CPUX86State,xmm_regs[rm])); - } - break; - case 0x210: /* movss xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32, s->T0, s->A0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); - tcg_gen_movi_tl(s->T0, 0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1))); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); - } - break; - case 0x310: /* movsd xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - tcg_gen_movi_tl(s->T0, 0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); - } - break; - case 0x012: /* movlps */ - case 0x112: /* movlpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - /* movhlps */ - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1))); - } - break; - case 0x212: /* movsldup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2))); - } - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); - break; - case 0x312: /* movddup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); - } - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); - break; - case 0x016: /* movhps */ - case 0x116: /* movhpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(1))); - } else { - /* movlhps */ - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); - } - break; - case 0x216: /* movshdup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1))); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3))); - } - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); - break; - case 0x178: - case 0x378: - { - int bit_index, field_length; - - if (b1 == 1 && reg != 0) - goto illegal_op; - field_length = x86_ldub_code(env, s) & 0x3F; - bit_index = x86_ldub_code(env, s) & 0x3F; - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - if (b1 == 1) - gen_helper_extrq_i(cpu_env, s->ptr0, - tcg_const_i32(bit_index), - tcg_const_i32(field_length)); - else - gen_helper_insertq_i(cpu_env, s->ptr0, - tcg_const_i32(bit_index), - tcg_const_i32(field_length)); - } - break; - case 0x7e: /* movd ea, mm */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - tcg_gen_ld_i64(s->T0, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); - } else -#endif - { - tcg_gen_ld32u_tl(s->T0, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); - } - break; - case 0x17e: /* movd ea, xmm */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - tcg_gen_ld_i64(s->T0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); - } else -#endif - { - tcg_gen_ld32u_tl(s->T0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); - } - break; - case 0x27e: /* movq xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); - } - gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); - break; - case 0x7f: /* movq ea, mm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - } else { - rm = (modrm & 7); - gen_op_movq(s, offsetof(CPUX86State, fpregs[rm].mmx), - offsetof(CPUX86State,fpregs[reg].mmx)); - } - break; - case 0x011: /* movups */ - case 0x111: /* movupd */ - case 0x029: /* movaps */ - case 0x129: /* movapd */ - case 0x17f: /* movdqa ea, xmm */ - case 0x27f: /* movdqu ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movo(s, offsetof(CPUX86State, xmm_regs[rm]), - offsetof(CPUX86State,xmm_regs[reg])); - } - break; - case 0x211: /* movss ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, s->T0, s->A0); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_L(0)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - } - break; - case 0x311: /* movsd ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); - } - break; - case 0x013: /* movlps */ - case 0x113: /* movlpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - goto illegal_op; - } - break; - case 0x017: /* movhps */ - case 0x117: /* movhpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(1))); - } else { - goto illegal_op; - } - break; - case 0x71: /* shift mm, im */ - case 0x72: - case 0x73: - case 0x171: /* shift xmm, im */ - case 0x172: - case 0x173: - if (b1 >= 2) { - goto unknown_op; - } - val = x86_ldub_code(env, s); - if (is_xmm) { - tcg_gen_movi_tl(s->T0, val); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_t0.ZMM_L(0))); - tcg_gen_movi_tl(s->T0, 0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_t0.ZMM_L(1))); - op1_offset = offsetof(CPUX86State,xmm_t0); - } else { - tcg_gen_movi_tl(s->T0, val); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, mmx_t0.MMX_L(0))); - tcg_gen_movi_tl(s->T0, 0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, mmx_t0.MMX_L(1))); - op1_offset = offsetof(CPUX86State,mmx_t0); - } - sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + - (((modrm >> 3)) & 7)][b1]; - if (!sse_fn_epp) { - goto unknown_op; - } - if (is_xmm) { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op1_offset); - sse_fn_epp(cpu_env, s->ptr0, s->ptr1); - break; - case 0x050: /* movmskps */ - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskps(s->tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); - break; - case 0x150: /* movmskpd */ - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskpd(s->tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); - break; - case 0x02a: /* cvtpi2ps */ - case 0x12a: /* cvtpi2pd */ - gen_enter_mmx(s); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_ldq_env_A0(s, op2_offset); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - switch(b >> 8) { - case 0x0: - gen_helper_cvtpi2ps(cpu_env, s->ptr0, s->ptr1); - break; - default: - case 0x1: - gen_helper_cvtpi2pd(cpu_env, s->ptr0, s->ptr1); - break; - } - break; - case 0x22a: /* cvtsi2ss */ - case 0x32a: /* cvtsi2sd */ - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - if (ot == MO_32) { - SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - sse_fn_epi(cpu_env, s->ptr0, s->tmp2_i32); - } else { -#ifdef TARGET_X86_64 - SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; - sse_fn_epl(cpu_env, s->ptr0, s->T0); -#else - goto illegal_op; -#endif - } - break; - case 0x02c: /* cvttps2pi */ - case 0x12c: /* cvttpd2pi */ - case 0x02d: /* cvtps2pi */ - case 0x12d: /* cvtpd2pi */ - gen_enter_mmx(s); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_ldo_env_A0(s, op2_offset); - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - switch(b) { - case 0x02c: - gen_helper_cvttps2pi(cpu_env, s->ptr0, s->ptr1); - break; - case 0x12c: - gen_helper_cvttpd2pi(cpu_env, s->ptr0, s->ptr1); - break; - case 0x02d: - gen_helper_cvtps2pi(cpu_env, s->ptr0, s->ptr1); - break; - case 0x12d: - gen_helper_cvtpd2pi(cpu_env, s->ptr0, s->ptr1); - break; - } - break; - case 0x22c: /* cvttss2si */ - case 0x32c: /* cvttsd2si */ - case 0x22d: /* cvtss2si */ - case 0x32d: /* cvtsd2si */ - ot = mo_64_32(s->dflag); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - if ((b >> 8) & 1) { - gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); - } else { - gen_op_ld_v(s, MO_32, s->T0, s->A0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State, xmm_t0.ZMM_L(0))); - } - op2_offset = offsetof(CPUX86State,xmm_t0); - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset); - if (ot == MO_32) { - SSEFunc_i_ep sse_fn_i_ep = - sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; - sse_fn_i_ep(s->tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); - } else { -#ifdef TARGET_X86_64 - SSEFunc_l_ep sse_fn_l_ep = - sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; - sse_fn_l_ep(s->T0, cpu_env, s->ptr0); -#else - goto illegal_op; -#endif - } - gen_op_mov_reg_v(s, ot, reg, s->T0); - break; - case 0xc4: /* pinsrw */ - case 0x1c4: - s->rip_offset = 1; - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - val = x86_ldub_code(env, s); - if (b1) { - val &= 7; - tcg_gen_st16_tl(s->T0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val))); - } else { - val &= 3; - tcg_gen_st16_tl(s->T0, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); - } - break; - case 0xc5: /* pextrw */ - case 0x1c5: - if (mod != 3) - goto illegal_op; - ot = mo_64_32(s->dflag); - val = x86_ldub_code(env, s); - if (b1) { - val &= 7; - rm = (modrm & 7) | REX_B(s); - tcg_gen_ld16u_tl(s->T0, cpu_env, - offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val))); - } else { - val &= 3; - rm = (modrm & 7); - tcg_gen_ld16u_tl(s->T0, cpu_env, - offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); - } - reg = ((modrm >> 3) & 7) | REX_R(s); - gen_op_mov_reg_v(s, ot, reg, s->T0); - break; - case 0x1d6: /* movq ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), - offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); - gen_op_movq_env_0(s, - offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(1))); - } - break; - case 0x2d6: /* movq2dq */ - gen_enter_mmx(s); - rm = (modrm & 7); - gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), - offsetof(CPUX86State,fpregs[rm].mmx)); - gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); - break; - case 0x3d6: /* movdq2q */ - gen_enter_mmx(s); - rm = (modrm & 7) | REX_B(s); - gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx), - offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); - break; - case 0xd7: /* pmovmskb */ - case 0x1d7: - if (mod != 3) - goto illegal_op; - if (b1) { - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State, xmm_regs[rm])); - gen_helper_pmovmskb_xmm(s->tmp2_i32, cpu_env, s->ptr0); - } else { - rm = (modrm & 7); - tcg_gen_addi_ptr(s->ptr0, cpu_env, - offsetof(CPUX86State, fpregs[rm].mmx)); - gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr0); - } - reg = ((modrm >> 3) & 7) | REX_R(s); - tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); - break; - - case 0x138: - case 0x038: - b = modrm; - if ((b & 0xf0) == 0xf0) { - goto do_0f_38_fx; - } - modrm = x86_ldub_code(env, s); - rm = modrm & 7; - reg = ((modrm >> 3) & 7) | REX_R(s); - mod = (modrm >> 6) & 3; - if (b1 >= 2) { - goto unknown_op; - } - - sse_fn_epp = sse_op_table6[b].op[b1]; - if (!sse_fn_epp) { - goto unknown_op; - } - if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) - goto illegal_op; - - if (b1) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); - } else { - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_lea_modrm(env, s, modrm); - switch (b) { - case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ - case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ - case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ - gen_ldq_env_A0(s, op2_offset + - offsetof(ZMMReg, ZMM_Q(0))); - break; - case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ - case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ - tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, - s->mem_index, MO_LEUL); - tcg_gen_st_i32(s->tmp2_i32, cpu_env, op2_offset + - offsetof(ZMMReg, ZMM_L(0))); - break; - case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ - tcg_gen_qemu_ld_tl(s->tmp0, s->A0, - s->mem_index, MO_LEUW); - tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset + - offsetof(ZMMReg, ZMM_W(0))); - break; - case 0x2a: /* movntqda */ - gen_ldo_env_A0(s, op1_offset); - return; - default: - gen_ldo_env_A0(s, op2_offset); - } - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } else { - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, op2_offset); - } - } - if (sse_fn_epp == SSE_SPECIAL) { - goto unknown_op; - } - - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, s->ptr1); - - if (b == 0x17) { - set_cc_op(s, CC_OP_EFLAGS); - } - break; - - case 0x238: - case 0x338: - do_0f_38_fx: - /* Various integer extensions at 0f 38 f[0-f]. */ - b = modrm | (b1 << 8); - modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | REX_R(s); - - switch (b) { - case 0x3f0: /* crc32 Gd,Eb */ - case 0x3f1: /* crc32 Gd,Ey */ - do_crc32: - if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { - goto illegal_op; - } - if ((b & 0xff) == 0xf0) { - ot = MO_8; - } else if (s->dflag != MO_64) { - ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); - } else { - ot = MO_64; - } - - tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[reg]); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_helper_crc32(s->T0, s->tmp2_i32, - s->T0, tcg_const_i32(8 << ot)); - - ot = mo_64_32(s->dflag); - gen_op_mov_reg_v(s, ot, reg, s->T0); - break; - - case 0x1f0: /* crc32 or movbe */ - case 0x1f1: - /* For these insns, the f3 prefix is supposed to have priority - over the 66 prefix, but that's not what we implement above - setting b1. */ - if (s->prefix & PREFIX_REPNZ) { - goto do_crc32; - } - /* FALLTHRU */ - case 0x0f0: /* movbe Gy,My */ - case 0x0f1: /* movbe My,Gy */ - if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { - goto illegal_op; - } - if (s->dflag != MO_64) { - ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); - } else { - ot = MO_64; - } - - gen_lea_modrm(env, s, modrm); - if ((b & 1) == 0) { - tcg_gen_qemu_ld_tl(s->T0, s->A0, - s->mem_index, ot | MO_BE); - gen_op_mov_reg_v(s, ot, reg, s->T0); - } else { - tcg_gen_qemu_st_tl(cpu_regs[reg], s->A0, - s->mem_index, ot | MO_BE); - } - break; - - case 0x0f2: /* andn Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_andc_tl(s->T0, s->T0, cpu_regs[s->vex_v]); - gen_op_mov_reg_v(s, ot, reg, s->T0); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - - case 0x0f7: /* bextr Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - { - TCGv bound, zero; - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Extract START, and shift the operand. - Shifts larger than operand size get zeros. */ - tcg_gen_ext8u_tl(s->A0, cpu_regs[s->vex_v]); - tcg_gen_shr_tl(s->T0, s->T0, s->A0); - - bound = tcg_const_tl(ot == MO_64 ? 63 : 31); - zero = tcg_const_tl(0); - tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, - s->T0, zero); - tcg_temp_free(zero); - - /* Extract the LEN into a mask. Lengths larger than - operand size get all ones. */ - tcg_gen_extract_tl(s->A0, cpu_regs[s->vex_v], 8, 8); - tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, - s->A0, bound); - tcg_temp_free(bound); - tcg_gen_movi_tl(s->T1, 1); - tcg_gen_shl_tl(s->T1, s->T1, s->A0); - tcg_gen_subi_tl(s->T1, s->T1, 1); - tcg_gen_and_tl(s->T0, s->T0, s->T1); - - gen_op_mov_reg_v(s, ot, reg, s->T0); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); - } - break; - - case 0x0f5: /* bzhi Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]); - { - TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31); - /* Note that since we're using BMILG (in order to get O - cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, - s->T1, bound); - tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, - bound, bound, s->T1); - tcg_temp_free(bound); - } - tcg_gen_movi_tl(s->A0, -1); - tcg_gen_shl_tl(s->A0, s->A0, s->T1); - tcg_gen_andc_tl(s->T0, s->T0, s->A0); - gen_op_mov_reg_v(s, ot, reg, s->T0); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - case 0x3f6: /* mulx By, Gy, rdx, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - switch (ot) { - default: - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EDX]); - tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, - s->tmp2_i32, s->tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); - tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp3_i32); - break; -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_mulu2_i64(s->T0, s->T1, - s->T0, cpu_regs[R_EDX]); - tcg_gen_mov_i64(cpu_regs[s->vex_v], s->T0); - tcg_gen_mov_i64(cpu_regs[reg], s->T1); - break; -#endif - } - break; - - case 0x3f5: /* pdep Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Note that by zero-extending the source operand, we - automatically handle zero-extending the result. */ - if (ot == MO_64) { - tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]); - } else { - tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]); - } - gen_helper_pdep(cpu_regs[reg], s->T1, s->T0); - break; - - case 0x2f5: /* pext Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Note that by zero-extending the source operand, we - automatically handle zero-extending the result. */ - if (ot == MO_64) { - tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]); - } else { - tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]); - } - gen_helper_pext(cpu_regs[reg], s->T1, s->T0); - break; - - case 0x1f6: /* adcx Gy, Ey */ - case 0x2f6: /* adox Gy, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { - goto illegal_op; - } else { - TCGv carry_in, carry_out, zero; - int end_op; - - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - /* Re-use the carry-out from a previous round. */ - carry_in = NULL; - carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2); - switch (s->cc_op) { - case CC_OP_ADCX: - if (b == 0x1f6) { - carry_in = cpu_cc_dst; - end_op = CC_OP_ADCX; - } else { - end_op = CC_OP_ADCOX; - } - break; - case CC_OP_ADOX: - if (b == 0x1f6) { - end_op = CC_OP_ADCOX; - } else { - carry_in = cpu_cc_src2; - end_op = CC_OP_ADOX; - } - break; - case CC_OP_ADCOX: - end_op = CC_OP_ADCOX; - carry_in = carry_out; - break; - default: - end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); - break; - } - /* If we can't reuse carry-out, get it out of EFLAGS. */ - if (!carry_in) { - if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { - gen_compute_eflags(s); - } - carry_in = s->tmp0; - tcg_gen_extract_tl(carry_in, cpu_cc_src, - ctz32(b == 0x1f6 ? CC_C : CC_O), 1); - } - - switch (ot) { -#ifdef TARGET_X86_64 - case MO_32: - /* If we know TL is 64-bit, and we want a 32-bit - result, just do everything in 64-bit arithmetic. */ - tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]); - tcg_gen_ext32u_i64(s->T0, s->T0); - tcg_gen_add_i64(s->T0, s->T0, cpu_regs[reg]); - tcg_gen_add_i64(s->T0, s->T0, carry_in); - tcg_gen_ext32u_i64(cpu_regs[reg], s->T0); - tcg_gen_shri_i64(carry_out, s->T0, 32); - break; -#endif - default: - /* Otherwise compute the carry-out in two steps. */ - zero = tcg_const_tl(0); - tcg_gen_add2_tl(s->T0, carry_out, - s->T0, zero, - carry_in, zero); - tcg_gen_add2_tl(cpu_regs[reg], carry_out, - cpu_regs[reg], carry_out, - s->T0, zero); - tcg_temp_free(zero); - break; - } - set_cc_op(s, end_op); - } - break; - - case 0x1f7: /* shlx Gy, Ey, By */ - case 0x2f7: /* sarx Gy, Ey, By */ - case 0x3f7: /* shrx Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - if (ot == MO_64) { - tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 63); - } else { - tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 31); - } - if (b == 0x1f7) { - tcg_gen_shl_tl(s->T0, s->T0, s->T1); - } else if (b == 0x2f7) { - if (ot != MO_64) { - tcg_gen_ext32s_tl(s->T0, s->T0); - } - tcg_gen_sar_tl(s->T0, s->T0, s->T1); - } else { - if (ot != MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } - tcg_gen_shr_tl(s->T0, s->T0, s->T1); - } - gen_op_mov_reg_v(s, ot, reg, s->T0); - break; - - case 0x0f3: - case 0x1f3: - case 0x2f3: - case 0x3f3: /* Group 17 */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - tcg_gen_mov_tl(cpu_cc_src, s->T0); - switch (reg & 7) { - case 1: /* blsr By,Ey */ - tcg_gen_subi_tl(s->T1, s->T0, 1); - tcg_gen_and_tl(s->T0, s->T0, s->T1); - break; - case 2: /* blsmsk By,Ey */ - tcg_gen_subi_tl(s->T1, s->T0, 1); - tcg_gen_xor_tl(s->T0, s->T0, s->T1); - break; - case 3: /* blsi By, Ey */ - tcg_gen_neg_tl(s->T1, s->T0); - tcg_gen_and_tl(s->T0, s->T0, s->T1); - break; - default: - goto unknown_op; - } - tcg_gen_mov_tl(cpu_cc_dst, s->T0); - gen_op_mov_reg_v(s, ot, s->vex_v, s->T0); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - default: - goto unknown_op; - } - break; - - case 0x03a: - case 0x13a: - b = modrm; - modrm = x86_ldub_code(env, s); - rm = modrm & 7; - reg = ((modrm >> 3) & 7) | REX_R(s); - mod = (modrm >> 6) & 3; - if (b1 >= 2) { - goto unknown_op; - } - - sse_fn_eppi = sse_op_table7[b].op[b1]; - if (!sse_fn_eppi) { - goto unknown_op; - } - if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) - goto illegal_op; - - s->rip_offset = 1; - - if (sse_fn_eppi == SSE_SPECIAL) { - ot = mo_64_32(s->dflag); - rm = (modrm & 7) | REX_B(s); - if (mod != 3) - gen_lea_modrm(env, s, modrm); - reg = ((modrm >> 3) & 7) | REX_R(s); - val = x86_ldub_code(env, s); - switch (b) { - case 0x14: /* pextrb */ - tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_B(val & 15))); - if (mod == 3) { - gen_op_mov_reg_v(s, ot, rm, s->T0); - } else { - tcg_gen_qemu_st_tl(s->T0, s->A0, - s->mem_index, MO_UB); - } - break; - case 0x15: /* pextrw */ - tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_W(val & 7))); - if (mod == 3) { - gen_op_mov_reg_v(s, ot, rm, s->T0); - } else { - tcg_gen_qemu_st_tl(s->T0, s->A0, - s->mem_index, MO_LEUW); - } - break; - case 0x16: - if (ot == MO_32) { /* pextrd */ - tcg_gen_ld_i32(s->tmp2_i32, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(val & 3))); - if (mod == 3) { - tcg_gen_extu_i32_tl(cpu_regs[rm], s->tmp2_i32); - } else { - tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, - s->mem_index, MO_LEUL); - } - } else { /* pextrq */ -#ifdef TARGET_X86_64 - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(val & 1))); - if (mod == 3) { - tcg_gen_mov_i64(cpu_regs[rm], s->tmp1_i64); - } else { - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); - } -#else - goto illegal_op; -#endif - } - break; - case 0x17: /* extractps */ - tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(val & 3))); - if (mod == 3) { - gen_op_mov_reg_v(s, ot, rm, s->T0); - } else { - tcg_gen_qemu_st_tl(s->T0, s->A0, - s->mem_index, MO_LEUL); - } - break; - case 0x20: /* pinsrb */ - if (mod == 3) { - gen_op_mov_v_reg(s, MO_32, s->T0, rm); - } else { - tcg_gen_qemu_ld_tl(s->T0, s->A0, - s->mem_index, MO_UB); - } - tcg_gen_st8_tl(s->T0, cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_B(val & 15))); - break; - case 0x21: /* insertps */ - if (mod == 3) { - tcg_gen_ld_i32(s->tmp2_i32, cpu_env, - offsetof(CPUX86State,xmm_regs[rm] - .ZMM_L((val >> 6) & 3))); - } else { - tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, - s->mem_index, MO_LEUL); - } - tcg_gen_st_i32(s->tmp2_i32, cpu_env, - offsetof(CPUX86State,xmm_regs[reg] - .ZMM_L((val >> 4) & 3))); - if ((val >> 0) & 1) - tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(0))); - if ((val >> 1) & 1) - tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(1))); - if ((val >> 2) & 1) - tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(2))); - if ((val >> 3) & 1) - tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(3))); - break; - case 0x22: - if (ot == MO_32) { /* pinsrd */ - if (mod == 3) { - tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[rm]); - } else { - tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, - s->mem_index, MO_LEUL); - } - tcg_gen_st_i32(s->tmp2_i32, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].ZMM_L(val & 3))); - } else { /* pinsrq */ -#ifdef TARGET_X86_64 - if (mod == 3) { - gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm); - } else { - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); - } - tcg_gen_st_i64(s->tmp1_i64, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].ZMM_Q(val & 1))); -#else - goto illegal_op; -#endif - } - break; - } - return; - } - - if (b1) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); - } else { - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, op2_offset); - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } else { - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, op2_offset); - } - } - val = x86_ldub_code(env, s); - - if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ - set_cc_op(s, CC_OP_EFLAGS); - - if (s->dflag == MO_64) { - /* The helper must use entire 64-bit gp registers */ - val |= 1 << 8; - } - } - - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - sse_fn_eppi(cpu_env, s->ptr0, s->ptr1, tcg_const_i32(val)); - break; - - case 0x33a: - /* Various integer extensions at 0f 3a f[0-f]. */ - b = modrm | (b1 << 8); - modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | REX_R(s); - - switch (b) { - case 0x3f0: /* rorx Gy,Ey, Ib */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - b = x86_ldub_code(env, s); - if (ot == MO_64) { - tcg_gen_rotri_tl(s->T0, s->T0, b & 63); - } else { - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); - tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); - } - gen_op_mov_reg_v(s, ot, reg, s->T0); - break; - - default: - goto unknown_op; - } - break; - - default: - unknown_op: - gen_unknown_opcode(env, s); - return; - } - } else { - /* generic MMX or SSE operation */ - switch(b) { - case 0x70: /* pshufx insn */ - case 0xc6: /* pshufx insn */ - case 0xc2: /* compare insns */ - s->rip_offset = 1; - break; - default: - break; - } - if (is_xmm) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod != 3) { - int sz = 4; - - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,xmm_t0); - - switch (b) { - case 0x50 ... 0x5a: - case 0x5c ... 0x5f: - case 0xc2: - /* Most sse scalar operations. */ - if (b1 == 2) { - sz = 2; - } else if (b1 == 3) { - sz = 3; - } - break; - - case 0x2e: /* ucomis[sd] */ - case 0x2f: /* comis[sd] */ - if (b1 == 0) { - sz = 2; - } else { - sz = 3; - } - break; - } - - switch (sz) { - case 2: - /* 32 bit access */ - gen_op_ld_v(s, MO_32, s->T0, s->A0); - tcg_gen_st32_tl(s->T0, cpu_env, - offsetof(CPUX86State,xmm_t0.ZMM_L(0))); - break; - case 3: - /* 64 bit access */ - gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0))); - break; - default: - /* 128 bit access */ - gen_ldo_env_A0(s, op2_offset); - break; - } - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_ldq_env_A0(s, op2_offset); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - } - switch(b) { - case 0x0f: /* 3DNow! data insns */ - val = x86_ldub_code(env, s); - sse_fn_epp = sse_op_table5[val]; - if (!sse_fn_epp) { - goto unknown_op; - } - if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { - goto illegal_op; - } - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, s->ptr1); - break; - case 0x70: /* pshufx insn */ - case 0xc6: /* pshufx insn */ - val = x86_ldub_code(env, s); - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - /* XXX: introduce a new table? */ - sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; - sse_fn_ppi(s->ptr0, s->ptr1, tcg_const_i32(val)); - break; - case 0xc2: - /* compare insns */ - val = x86_ldub_code(env, s); - if (val >= 8) - goto unknown_op; - sse_fn_epp = sse_op_table4[val][b1]; - - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, s->ptr1); - break; - case 0xf7: - /* maskmov : we must prepare A0 */ - if (mod != 3) - goto illegal_op; - tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]); - gen_extu(s->aflag, s->A0); - gen_add_A0_ds_seg(s); - - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - /* XXX: introduce a new table? */ - sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(cpu_env, s->ptr0, s->ptr1, s->A0); - break; - default: - tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, s->ptr1); - break; - } - if (b == 0x2e || b == 0x2f) { - set_cc_op(s, CC_OP_EFLAGS); - } - } -} +#include "decode-new.h" +#include "emit.c.inc" +#include "decode-new.c.inc" /* convert one instruction. s->base.is_jmp is set if the translation must be stopped. Return the next pc value */ -static target_ulong disas_insn(DisasContext *s, CPUState *cpu) +static bool disas_insn(DisasContext *s, CPUState *cpu) { CPUX86State *env = cpu->env_ptr; int b, prefixes; int shift; MemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; - target_ulong next_eip, tval; - target_ulong pc_start = s->base.pc_next; + bool orig_cc_op_dirty = s->cc_op_dirty; + CCOp orig_cc_op = s->cc_op; + target_ulong orig_pc_save = s->pc_save; - s->pc_start = s->pc = pc_start; + s->pc = s->base.pc_next; s->override = -1; #ifdef TARGET_X86_64 - s->rex_w = false; s->rex_r = 0; s->rex_x = 0; s->rex_b = 0; @@ -5062,22 +3502,52 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; - if (sigsetjmp(s->jmpbuf, 0) != 0) { + s->vex_w = false; + switch (sigsetjmp(s->jmpbuf, 0)) { + case 0: + break; + case 1: gen_exception_gpf(s); - return s->pc; + return true; + case 2: + /* Restore state that may affect the next instruction. */ + s->pc = s->base.pc_next; + /* + * TODO: These save/restore can be removed after the table-based + * decoder is complete; we will be decoding the insn completely + * before any code generation that might affect these variables. + */ + s->cc_op_dirty = orig_cc_op_dirty; + s->cc_op = orig_cc_op; + s->pc_save = orig_pc_save; + /* END TODO */ + s->base.num_insns--; + tcg_remove_ops_after(s->prev_insn_end); + s->base.is_jmp = DISAS_TOO_MANY; + return false; + default: + g_assert_not_reached(); } prefixes = 0; next_byte: + s->prefix = prefixes; b = x86_ldub_code(env, s); /* Collect prefixes. */ switch (b) { + default: + break; + case 0x0f: + b = x86_ldub_code(env, s) + 0x100; + break; case 0xf3: prefixes |= PREFIX_REPZ; + prefixes &= ~PREFIX_REPNZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; + prefixes &= ~PREFIX_REPZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; @@ -5111,7 +3581,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (CODE64(s)) { /* REX prefix */ prefixes |= PREFIX_REX; - s->rex_w = (b >> 3) & 1; + s->vex_w = (b >> 3) & 1; s->rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; s->rex_b = (b & 0x1) << 3; @@ -5121,58 +3591,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif case 0xc5: /* 2-byte VEX */ case 0xc4: /* 3-byte VEX */ - /* VEX prefixes cannot be used except in 32-bit mode. - Otherwise the instruction is LES or LDS. */ if (CODE32(s) && !VM86(s)) { - static const int pp_prefix[4] = { - 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ - }; - int vex3, vex2 = x86_ldub_code(env, s); + int vex2 = x86_ldub_code(env, s); + s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ - s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ break; } - - /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ - | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) { - goto illegal_op; - } -#ifdef TARGET_X86_64 - s->rex_r = (~vex2 >> 4) & 8; -#endif - if (b == 0xc5) { - /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */ - vex3 = vex2; - b = x86_ldub_code(env, s) | 0x100; - } else { - /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */ - vex3 = x86_ldub_code(env, s); -#ifdef TARGET_X86_64 - s->rex_x = (~vex2 >> 3) & 8; - s->rex_b = (~vex2 >> 2) & 8; - s->rex_w = (vex3 >> 7) & 1; -#endif - switch (vex2 & 0x1f) { - case 0x01: /* Implied 0f leading opcode bytes. */ - b = x86_ldub_code(env, s) | 0x100; - break; - case 0x02: /* Implied 0f 38 leading opcode bytes. */ - b = 0x138; - break; - case 0x03: /* Implied 0f 3a leading opcode bytes. */ - b = 0x13a; - break; - default: /* Reserved for future use. */ - goto unknown_op; - } - } - s->vex_v = (~vex3 >> 3) & 0xf; - s->vex_l = (vex3 >> 2) & 1; - prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; + disas_insn_new(s, cpu, b); + return s->pc; } break; } @@ -5205,14 +3634,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->dflag = dflag; /* now check op code */ - reswitch: - switch(b) { - case 0x0f: - /**************************/ - /* extended op code */ - b = x86_ldub_code(env, s) | 0x100; - goto reswitch; - + switch (b) { /**************************/ /* arith & logic */ case 0x00 ... 0x05: @@ -5403,7 +3825,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_temp_free(t2); tcg_temp_free(a0); - tcg_gen_mov_tl(s->T0, t0); + tcg_gen_neg_tl(s->T0, t0); tcg_temp_free(t0); } else { tcg_gen_neg_tl(s->T0, s->T0); @@ -5612,12 +4034,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (dflag == MO_16) { tcg_gen_ext16u_tl(s->T0, s->T0); } - next_eip = s->pc - s->cs_base; - tcg_gen_movi_tl(s->T1, next_eip); - gen_push_v(s, s->T1); - gen_op_jmp_v(s->T0); + gen_push_v(s, eip_next_tl(s)); + gen_op_jmp_v(s, s->T0); gen_bnd_jmp(s); - gen_jr(s, s->T0); + s->base.is_jmp = DISAS_JUMP; break; case 3: /* lcall Ev */ if (mod == 3) { @@ -5630,24 +4050,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1, - tcg_const_i32(dflag - 1), - tcg_const_tl(s->pc - s->cs_base)); + tcg_constant_i32(dflag - 1), + eip_next_tl(s)); } else { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->T1, - tcg_const_i32(dflag - 1), - tcg_const_i32(s->pc - s->cs_base)); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); + gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32, + tcg_constant_i32(dflag - 1), + eip_next_i32(s)); } - tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip)); - gen_jr(s, s->tmp4); + s->base.is_jmp = DISAS_JUMP; break; case 4: /* jmp Ev */ if (dflag == MO_16) { tcg_gen_ext16u_tl(s->T0, s->T0); } - gen_op_jmp_v(s->T0); + gen_op_jmp_v(s, s->T0); gen_bnd_jmp(s); - gen_jr(s, s->T0); + s->base.is_jmp = DISAS_JUMP; break; case 5: /* ljmp Ev */ if (mod == 3) { @@ -5660,13 +4080,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1, - tcg_const_tl(s->pc - s->cs_base)); + eip_next_tl(s)); } else { gen_op_movl_seg_T0_vm(s, R_CS); - gen_op_jmp_v(s->T1); + gen_op_jmp_v(s, s->T1); } - tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip)); - gen_jr(s, s->tmp4); + s->base.is_jmp = DISAS_JUMP; break; case 6: /* push Ev */ gen_push_v(s, s->T0); @@ -5833,7 +4252,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { - TCGv oldv, newv, cmpv; + TCGv oldv, newv, cmpv, dest; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); @@ -5844,7 +4263,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) cmpv = tcg_temp_new(); gen_op_mov_v_reg(s, ot, newv, reg); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); - + gen_extu(ot, cmpv); if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; @@ -5852,32 +4271,43 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, s->mem_index, ot | MO_LE); - gen_op_mov_reg_v(s, ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, ot, oldv, rm); + gen_extu(ot, oldv); + + /* + * Unlike the memory case, where "the destination operand receives + * a write cycle without regard to the result of the comparison", + * rm must not be touched altogether if the write fails, including + * not zero-extending it on 64-bit processors. So, precompute + * the result of a successful writeback and perform the movcond + * directly on cpu_regs. Also need to write accumulator first, in + * case rm is part of RAX too. + */ + dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv); + tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, s->A0); - rm = 0; /* avoid warning */ - } - gen_extu(ot, oldv); - gen_extu(ot, cmpv); - /* store value = (old == cmp ? new : old); */ - tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); - if (mod == 3) { - gen_op_mov_reg_v(s, ot, R_EAX, oldv); - gen_op_mov_reg_v(s, ot, rm, newv); - } else { - /* Perform an unconditional store cycle like physical cpu; - must be before changing accumulator to ensure - idempotency if the store faults and the instruction - is restarted */ + + /* + * Perform an unconditional store cycle like physical cpu; + * must be before changing accumulator to ensure + * idempotency if the store faults and the instruction + * is restarted + */ + tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); gen_op_st_v(s, ot, newv, s->A0); - gen_op_mov_reg_v(s, ot, R_EAX, oldv); } } + /* + * Write EAX only if the cmpxchg fails; reuse newv as the destination, + * since it's dead here. + */ + dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv); + tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv); tcg_gen_mov_tl(cpu_cc_src, oldv); tcg_gen_mov_tl(s->cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); @@ -5933,14 +4363,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_helper_rdrand(s->T0, cpu_env); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(s, dflag, rm, s->T0); set_cc_op(s, CC_OP_EFLAGS); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; default: @@ -6031,26 +4459,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = gen_pop_T0(s); gen_movl_seg_T0(s, reg); gen_pop_update(s, ot); - /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ - if (s->base.is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - if (reg == R_SS) { - s->flags &= ~HF_TF_MASK; - gen_eob_inhibit_irq(s, true); - } else { - gen_eob(s); - } - } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ ot = gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); - if (s->base.is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } break; /**************************/ @@ -6097,16 +4511,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_movl_seg_T0(s, reg); - /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ - if (s->base.is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - if (reg == R_SS) { - s->flags &= ~HF_TF_MASK; - gen_eob_inhibit_irq(s, true); - } else { - gen_eob(s); - } - } break; case 0x8c: /* mov Gv, seg */ modrm = x86_ldub_code(env, s); @@ -6177,7 +4581,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | REX_R(s); { AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a); + TCGv ea = gen_lea_modrm_1(s, a, false); gen_lea_v_seg(s, s->aflag, ea, -1, -1); gen_op_mov_reg_v(s, dflag, reg, s->A0); } @@ -6191,16 +4595,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) target_ulong offset_addr; ot = mo_b_d(b, dflag); - switch (s->aflag) { -#ifdef TARGET_X86_64 - case MO_64: - offset_addr = x86_ldq_code(env, s); - break; -#endif - default: - offset_addr = insn_get(env, s, s->aflag); - break; - } + offset_addr = insn_get_addr(env, s, s->aflag); tcg_gen_movi_tl(s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { @@ -6305,10 +4700,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(s, ot, reg, s->T1); - if (s->base.is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } break; /************************/ @@ -6403,7 +4794,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); break; } modrm = x86_ldub_code(env, s); @@ -6413,7 +4804,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { /* memory op */ AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a); + TCGv ea = gen_lea_modrm_1(s, a, false); TCGv last_addr = tcg_temp_new(); bool update_fdp = true; @@ -6442,7 +4833,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 2: tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); gen_fldl_FT0(s, s->tmp1_i64); break; case 3: @@ -6481,7 +4872,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 2: tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); gen_fldl_ST0(s, s->tmp1_i64); break; case 3: @@ -6503,7 +4894,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 2: gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); break; case 3: default: @@ -6529,7 +4920,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 2: gen_fstl_ST0(s, s->tmp1_i64); tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); break; case 3: default: @@ -6548,7 +4939,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fldenv(cpu_env, s->A0, tcg_const_i32(dflag - 1)); update_fip = update_fdp = false; - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_eob(s); break; case 0x0d: /* fldcw mem */ @@ -6556,7 +4947,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, MO_LEUW); gen_helper_fldcw(cpu_env, s->tmp2_i32); update_fip = update_fdp = false; - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_eob(s); break; case 0x0e: /* fnstenv mem */ @@ -6602,13 +4993,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0x3d: /* fildll */ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); gen_fildll_ST0(s, s->tmp1_i64); break; case 0x3f: /* fistpll */ gen_fistll_ST0(s, s->tmp1_i64); tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); gen_fpop(s); break; default: @@ -6820,7 +5211,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 3: /* fninit */ gen_helper_fninit(cpu_env); update_fip = false; - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_eob(s); break; case 4: /* fsetpm (287 only, just do nop here) */ @@ -6948,7 +5339,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) offsetof(CPUX86State, segs[R_CS].selector)); tcg_gen_st16_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, fpcs)); - tcg_gen_st_tl(tcg_constant_tl(pc_start - s->cs_base), + tcg_gen_st_tl(eip_cur_tl(s), cpu_env, offsetof(CPUX86State, fpip)); } } @@ -6960,7 +5351,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xa5: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + gen_repz_movs(s, ot); } else { gen_movs(s, ot); } @@ -6970,7 +5361,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xab: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + gen_repz_stos(s, ot); } else { gen_stos(s, ot); } @@ -6979,7 +5370,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xad: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + gen_repz_lods(s, ot); } else { gen_lods(s, ot); } @@ -6988,9 +5379,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xaf: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { - gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + gen_repz_scas(s, ot, 1); } else if (prefixes & PREFIX_REPZ) { - gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + gen_repz_scas(s, ot, 0); } else { gen_scas(s, ot); } @@ -7000,9 +5391,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xa7: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { - gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + gen_repz_cmps(s, ot, 1); } else if (prefixes & PREFIX_REPZ) { - gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + gen_repz_cmps(s, ot, 0); } else { gen_cmps(s, ot); } @@ -7018,15 +5409,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - /* jump generated by gen_repz_ins */ + gen_repz_ins(s, ot); } else { gen_ins(s, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } } break; case 0x6e: /* outsS */ @@ -7039,15 +5427,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - /* jump generated by gen_repz_outs */ + gen_repz_outs(s, ot); } else { gen_outs(s, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } } break; @@ -7064,13 +5449,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; case 0xe6: case 0xe7: @@ -7082,14 +5465,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; case 0xec: case 0xed: @@ -7101,13 +5482,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; case 0xee: case 0xef: @@ -7119,14 +5498,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; /************************/ @@ -7136,24 +5513,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(s->T0); + gen_op_jmp_v(s, s->T0); gen_bnd_jmp(s); - gen_jr(s, s->T0); + s->base.is_jmp = DISAS_JUMP; break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(s->T0); + gen_op_jmp_v(s, s->T0); gen_bnd_jmp(s); - gen_jr(s, s->T0); + s->base.is_jmp = DISAS_JUMP; break; case 0xca: /* lret im */ val = x86_ldsw_code(env, s); do_lret: if (PE(s) && !VM86(s)) { gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); } else { @@ -7162,7 +5539,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, dflag, s->T0, s->A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ - gen_op_jmp_v(s->T0); + gen_op_jmp_v(s, s->T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); gen_op_ld_v(s, dflag, s->T0, s->A0); @@ -7170,7 +5547,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); } - gen_eob(s); + s->base.is_jmp = DISAS_EOB_ONLY; break; case 0xcb: /* lret */ val = 0; @@ -7184,30 +5561,20 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); } else { - gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), - tcg_const_i32(s->pc - s->cs_base)); + gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1), + eip_next_i32(s)); } set_cc_op(s, CC_OP_EFLAGS); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_ONLY; break; case 0xe8: /* call im */ { - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); - } - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } else if (!CODE64(s)) { - tval &= 0xffffffff; - } - tcg_gen_movi_tl(s->T0, next_eip); - gen_push_v(s, s->T0); + int diff = (dflag != MO_16 + ? (int32_t)insn_get(env, s, MO_32) + : (int16_t)insn_get(env, s, MO_16)); + gen_push_v(s, eip_next_tl(s)); gen_bnd_jmp(s); - gen_jmp(s, tval); + gen_jmp_rel(s, dflag, diff, 0); } break; case 0x9a: /* lcall im */ @@ -7225,19 +5592,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } goto do_lcall; case 0xe9: /* jmp im */ - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); + { + int diff = (dflag != MO_16 + ? (int32_t)insn_get(env, s, MO_32) + : (int16_t)insn_get(env, s, MO_16)); + gen_bnd_jmp(s); + gen_jmp_rel(s, dflag, diff, 0); } - tval += s->pc - s->cs_base; - if (dflag == MO_16) { - tval &= 0xffff; - } else if (!CODE64(s)) { - tval &= 0xffffffff; - } - gen_bnd_jmp(s); - gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { @@ -7254,30 +5615,26 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } goto do_ljmp; case 0xeb: /* jmp Jb */ - tval = (int8_t)insn_get(env, s, MO_8); - tval += s->pc - s->cs_base; - if (dflag == MO_16) { - tval &= 0xffff; + { + int diff = (int8_t)insn_get(env, s, MO_8); + gen_jmp_rel(s, dflag, diff, 0); } - gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ - tval = (int8_t)insn_get(env, s, MO_8); - goto do_jcc; + { + int diff = (int8_t)insn_get(env, s, MO_8); + gen_bnd_jmp(s); + gen_jcc(s, b, diff); + } + break; case 0x180 ... 0x18f: /* jcc Jv */ - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); + { + int diff = (dflag != MO_16 + ? (int32_t)insn_get(env, s, MO_32) + : (int16_t)insn_get(env, s, MO_16)); + gen_bnd_jmp(s); + gen_jcc(s, b, diff); } - do_jcc: - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } - gen_bnd_jmp(s); - gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ @@ -7357,14 +5714,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; - gen_op_mov_v_reg(s, MO_8, s->T0, R_AH); + tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); @@ -7376,7 +5732,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02); - gen_op_mov_reg_v(s, MO_8, R_AH, s->T0); + tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8); break; case 0xf5: /* cmc */ gen_compute_eflags(s); @@ -7447,7 +5803,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exts(ot, s->T1); tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot); tcg_gen_shli_tl(s->tmp0, s->tmp0, ot); - tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), s->tmp0); + tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, s->T0, s->A0); @@ -7623,7 +5979,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; val = x86_ldub_code(env, s); if (val == 0) { - gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); + gen_exception(s, EXCP00_DIVZ); } else { gen_helper_aam(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); @@ -7649,34 +6005,34 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); + gen_update_eip_cur(s); + gen_helper_pause(cpu_env, cur_insn_len_i32(s)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); } else { gen_helper_fwait(cpu_env); } break; case 0xcc: /* int3 */ - gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); + gen_interrupt(s, EXCP03_INT3); break; case 0xcd: /* int N */ val = x86_ldub_code(env, s); if (check_vm86_iopl(s)) { - gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); + gen_interrupt(s, val); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); + gen_update_eip_cur(s); + gen_helper_into(cpu_env, cur_insn_len_i32(s)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ @@ -7686,14 +6042,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif case 0xfa: /* cli */ if (check_iopl(s)) { - gen_helper_cli(cpu_env); + gen_reset_eflags(s, IF_MASK); } break; case 0xfb: /* sti */ if (check_iopl(s)) { - gen_helper_sti(cpu_env); + gen_set_eflags(s, IF_MASK); /* interruptions are enabled only the first insn after sti */ - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_eob_inhibit_irq(s, true); } break; @@ -7737,75 +6093,62 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xe2: /* loop */ case 0xe3: /* jecxz */ { - TCGLabel *l1, *l2, *l3; - - tval = (int8_t)insn_get(env, s, MO_8); - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } + TCGLabel *l1, *l2; + int diff = (int8_t)insn_get(env, s, MO_8); l1 = gen_new_label(); l2 = gen_new_label(); - l3 = gen_new_label(); gen_update_cc_op(s); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); - gen_op_jz_ecx(s, s->aflag, l3); + gen_op_jz_ecx(s, l2); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); - gen_op_jnz_ecx(s, s->aflag, l1); + gen_op_jnz_ecx(s, l1); break; default: case 3: /* jcxz */ - gen_op_jz_ecx(s, s->aflag, l1); + gen_op_jz_ecx(s, l1); break; } - gen_set_label(l3); - gen_jmp_im(s, next_eip); - tcg_gen_br(l2); + gen_set_label(l2); + gen_jmp_rel_csize(s, 0, 1); gen_set_label(l1); - gen_jmp_im(s, tval); - gen_set_label(l2); - gen_eob(s); + gen_jmp_rel(s, dflag, diff, 0); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (check_cpl0(s)) { gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; } } break; case 0x131: /* rdtsc */ gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_helper_rdtsc(cpu_env); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; case 0x133: /* rdpmc */ gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_rdpmc(cpu_env); s->base.is_jmp = DISAS_NORETURN; break; @@ -7817,7 +6160,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception_gpf(s); } else { gen_helper_sysenter(cpu_env); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_ONLY; } break; case 0x135: /* sysexit */ @@ -7828,15 +6171,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception_gpf(s); } else { gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_ONLY; } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); + gen_update_eip_cur(s); + gen_helper_syscall(cpu_env, cur_insn_len_i32(s)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be generated after one has entered CPL0 if TF is set in FMASK. */ @@ -7861,14 +6204,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ if (check_cpl0(s)) { gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); + gen_update_eip_cur(s); + gen_helper_hlt(cpu_env, cur_insn_len_i32(s)); s->base.is_jmp = DISAS_NORETURN; } break; @@ -7880,6 +6223,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: /* sldt */ if (!PE(s) || VM86(s)) goto illegal_op; + if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { + break; + } gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, ldt.selector)); @@ -7899,6 +6245,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 1: /* str */ if (!PE(s) || VM86(s)) goto illegal_op; + if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { + break; + } gen_svm_check_intercept(s, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, tr.selector)); @@ -7937,6 +6286,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ + if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { + break; + } gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(s->T0, @@ -7955,7 +6307,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); @@ -7967,8 +6319,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); + gen_update_eip_cur(s); + gen_helper_mwait(cpu_env, cur_insn_len_i32(s)); s->base.is_jmp = DISAS_NORETURN; break; @@ -7977,9 +6329,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) || CPL(s) != 0) { goto illegal_op; } - gen_helper_clac(cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + gen_reset_eflags(s, AC_MASK); + s->base.is_jmp = DISAS_EOB_NEXT; break; case 0xcb: /* stac */ @@ -7987,12 +6338,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) || CPL(s) != 0) { goto illegal_op; } - gen_helper_stac(cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + gen_set_eflags(s, AC_MASK); + s->base.is_jmp = DISAS_EOB_NEXT; break; CASE_MODRM_MEM_OP(1): /* sidt */ + if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { + break; + } gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit)); @@ -8030,8 +6383,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64); /* End TB because translation flags may change. */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; case 0xd8: /* VMRUN */ @@ -8042,9 +6394,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), - tcg_const_i32(s->pc - pc_start)); + cur_insn_len_i32(s)); tcg_gen_exit_tb(NULL, 0); s->base.is_jmp = DISAS_NORETURN; break; @@ -8054,7 +6406,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_vmmcall(cpu_env); break; @@ -8066,7 +6418,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); break; @@ -8078,7 +6430,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); break; @@ -8092,8 +6444,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_helper_stgi(cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; case 0xdd: /* CLGI */ @@ -8104,7 +6455,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); gen_helper_clgi(cpu_env); break; @@ -8131,8 +6482,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]); } gen_helper_flush_page(cpu_env, s->A0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; CASE_MODRM_MEM_OP(2): /* lgdt */ @@ -8168,6 +6518,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; CASE_MODRM_OP(4): /* smsw */ + if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { + break; + } gen_svm_check_intercept(s, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0])); /* @@ -8212,8 +6565,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_andi_tl(s->T1, s->T1, ~0xe); tcg_gen_or_tl(s->T0, s->T0, s->T1); gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; CASE_MODRM_MEM_OP(7): /* invlpg */ @@ -8223,8 +6575,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, SVM_EXIT_INVLPG); gen_lea_modrm(env, s, modrm); gen_helper_flush_page(cpu_env, s->A0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; case 0xf8: /* swapgs */ @@ -8247,14 +6598,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_update_eip_cur(s); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } gen_helper_rdtscp(cpu_env); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } break; default: @@ -8430,10 +6779,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); tcg_gen_addi_tl(s->A0, s->A0, 8); tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); } else { tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); @@ -8503,7 +6852,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* rip-relative generates #ud */ goto illegal_op; } - tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a)); + tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false)); if (!CODE64(s)) { tcg_gen_ext32u_tl(s->A0, s->A0); } @@ -8537,10 +6886,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); tcg_gen_addi_tl(s->A0, s->A0, 8); tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, - s->mem_index, MO_LEQ); + s->mem_index, MO_LEUQ); } else { tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); @@ -8618,20 +6967,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); + s->base.is_jmp = DISAS_TOO_MANY; } if (b & 2) { gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg); gen_op_mov_v_reg(s, ot, s->T0, rm); gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; } else { gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg); gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg)); gen_op_mov_reg_v(s, ot, rm, s->T0); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } } break; @@ -8658,8 +7004,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(s, ot, s->T0, rm); tcg_gen_movi_i32(s->tmp2_i32, reg); gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; } else { gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(s->tmp2_i32, reg); @@ -8673,8 +7018,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ @@ -8699,7 +7043,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); break; } gen_lea_modrm(env, s, modrm); @@ -8712,12 +7056,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxrstor(cpu_env, s->A0); - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_eob(s); break; @@ -8726,7 +7070,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if (s->flags & HF_TS_MASK) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); break; } gen_lea_modrm(env, s, modrm); @@ -8739,7 +7083,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if (s->flags & HF_TS_MASK) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + gen_exception(s, EXCP07_PREX); break; } gen_helper_update_mxcsr(cpu_env); @@ -8772,9 +7116,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ - gen_update_cc_op(s); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); + s->base.is_jmp = DISAS_EOB_NEXT; break; CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ @@ -8907,10 +7249,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) g_assert_not_reached(); #else gen_update_cc_op(s); - gen_jmp_im(s, s->pc - s->cs_base); + gen_update_eip_next(s); gen_helper_rsm(cpu_env); #endif /* CONFIG_USER_ONLY */ - gen_eob(s); + s->base.is_jmp = DISAS_EOB_ONLY; break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != @@ -8936,11 +7278,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) set_cc_op(s, CC_OP_POPCNT); break; - case 0x10e ... 0x10f: - /* 3DNow! instructions, ignore prefixes */ - s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); - /* fall through */ - case 0x110 ... 0x117: + case 0x10e ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x179: @@ -8948,18 +7286,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: - gen_sse(env, s, b, pc_start); + disas_insn_new(s, cpu, b); break; default: goto unknown_op; } - return s->pc; + return true; illegal_op: gen_illegal_opcode(s); - return s->pc; + return true; unknown_op: gen_unknown_opcode(env, s); - return s->pc; + return true; } void tcg_x86_init(void) @@ -8991,6 +7329,13 @@ void tcg_x86_init(void) [R_EDI] = "edi", [R_EBP] = "ebp", [R_ESP] = "esp", +#endif + }; + static const char eip_name[] = { +#ifdef TARGET_X86_64 + "rip" +#else + "eip" #endif }; static const char seg_base_names[6][8] = { @@ -9017,6 +7362,7 @@ void tcg_x86_init(void) "cc_src"); cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2), "cc_src2"); + cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name); for (i = 0; i < CPU_NB_REGS; ++i) { cpu_regs[i] = tcg_global_mem_new(cpu_env, @@ -9055,10 +7401,12 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); CPUX86State *env = cpu->env_ptr; uint32_t flags = dc->base.tb->flags; + uint32_t cflags = tb_cflags(dc->base.tb); int cpl = (flags >> HF_CPL_SHIFT) & 3; int iopl = (flags >> IOPL_SHIFT) & 3; dc->cs_base = dc->base.tb->cs_base; + dc->pc_save = dc->base.pc_next; dc->flags = flags; #ifndef CONFIG_USER_ONLY dc->cpl = cpl; @@ -9091,15 +7439,16 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; + dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; - dc->jmp_opt = !(dc->base.singlestep_enabled || + dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); /* * If jmp_opt, we want to handle each string instruction individually. * For icount also disable repz optimization so that each iteration * is accounted separately. */ - dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT); + dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); dc->T0 = tcg_temp_new(); dc->T1 = tcg_temp_new(); @@ -9110,8 +7459,6 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->tmp2_i32 = tcg_temp_new_i32(); dc->tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); - dc->ptr0 = tcg_temp_new_ptr(); - dc->ptr1 = tcg_temp_new_ptr(); dc->cc_srcT = tcg_temp_local_new(); for (int i = 0; i < 8; i++) { @@ -9129,52 +7476,50 @@ static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); + target_ulong pc_arg = dc->base.pc_next; - tcg_gen_insn_start(dc->base.pc_next, dc->cc_op); + dc->prev_insn_end = tcg_last_op(); + if (TARGET_TB_PCREL) { + pc_arg -= dc->cs_base; + pc_arg &= ~TARGET_PAGE_MASK; + } + tcg_gen_insn_start(pc_arg, dc->cc_op); } static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - target_ulong pc_next; #ifdef TARGET_VSYSCALL_PAGE /* * Detect entry into the vsyscall page and invoke the syscall. */ if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { - gen_exception(dc, EXCP_VSYSCALL, dc->base.pc_next); + gen_exception(dc, EXCP_VSYSCALL); dc->base.pc_next = dc->pc + 1; return; } #endif - pc_next = disas_insn(dc, cpu); + if (disas_insn(dc, cpu)) { + target_ulong pc_next = dc->pc; + dc->base.pc_next = pc_next; - if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { - /* if single step mode, we generate only one instruction and - generate an exception */ - /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear - the flag and abort the translation to give the irqs a - chance to happen */ - dc->base.is_jmp = DISAS_TOO_MANY; - } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) - && ((pc_next & TARGET_PAGE_MASK) - != ((pc_next + TARGET_MAX_INSN_SIZE - 1) - & TARGET_PAGE_MASK) - || (pc_next & ~TARGET_PAGE_MASK) == 0)) { - /* Do not cross the boundary of the pages in icount mode, - it can cause an exception. Do it only when boundary is - crossed by the first instruction in the block. - If current instruction already crossed the bound - it's ok, - because an exception hasn't stopped this code. - */ - dc->base.is_jmp = DISAS_TOO_MANY; - } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) { - dc->base.is_jmp = DISAS_TOO_MANY; + if (dc->base.is_jmp == DISAS_NEXT) { + if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { + /* + * If single step mode, we generate only one instruction and + * generate an exception. + * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear + * the flag and abort the translation to give the irqs a + * chance to happen. + */ + dc->base.is_jmp = DISAS_EOB_NEXT; + } else if (!is_same_page(&dc->base, pc_next)) { + dc->base.is_jmp = DISAS_TOO_MANY; + } + } } - - dc->base.pc_next = pc_next; } static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) @@ -9183,19 +7528,40 @@ static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) gen_flush_fp(dc); - if (dc->base.is_jmp == DISAS_TOO_MANY) { - gen_jmp_im(dc, dc->base.pc_next - dc->cs_base); + switch (dc->base.is_jmp) { + case DISAS_NORETURN: + break; + case DISAS_TOO_MANY: + gen_update_cc_op(dc); + gen_jmp_rel_csize(dc, 0, 0); + break; + case DISAS_EOB_NEXT: + gen_update_cc_op(dc); + gen_update_eip_cur(dc); + /* fall through */ + case DISAS_EOB_ONLY: gen_eob(dc); + break; + case DISAS_EOB_INHIBIT_IRQ: + gen_update_cc_op(dc); + gen_update_eip_cur(dc); + gen_eob_inhibit_irq(dc, true); + break; + case DISAS_JUMP: + gen_jr(dc); + break; + default: + g_assert_not_reached(); } } static void i386_tr_disas_log(const DisasContextBase *dcbase, - CPUState *cpu) + CPUState *cpu, FILE *logfile) { DisasContext *dc = container_of(dcbase, DisasContext, base); - qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); - log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); + target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); } static const TranslatorOps i386_tr_ops = { @@ -9208,25 +7574,16 @@ static const TranslatorOps i386_tr_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; tcg_ctx->disas_ctx = &dc; - translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns); + translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base); tcg_ctx->disas_ctx = NULL; } -void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, - target_ulong *data) -{ - int cc_op = data[1]; - env->eip = data[0] - tb->cs_base; - if (cc_op != CC_OP_DYNAMIC) { - env->cc_op = cc_op; - } -} - void gen_bb_epilogue(void) { gen_flush_fp((DisasContext *)tcg_ctx->disas_ctx); diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c index a89b5228fd..b3bdb7831a 100644 --- a/target/i386/tcg/user/excp_helper.c +++ b/target/i386/tcg/user/excp_helper.c @@ -22,18 +22,36 @@ #include "exec/exec-all.h" #include "tcg/helper-tcg.h" -bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; + /* + * The error_code that hw reports as part of the exception frame + * is copied to linux sigcontext.err. The exception_index is + * copied to linux sigcontext.trapno. Short of inventing a new + * place to store the trapno, we cannot let our caller raise the + * signal and set exception_index to EXCP_INTERRUPT. + */ env->cr[2] = addr; - env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; - env->error_code |= PG_ERROR_U_MASK; + env->error_code = ((access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT) + | (maperr ? 0 : PG_ERROR_P_MASK) + | PG_ERROR_U_MASK; cs->exception_index = EXCP0E_PAGE; + + /* Disable do_interrupt_user. */ env->exception_is_int = 0; env->exception_next_eip = -1; - cpu_loop_exit_restore(cs, retaddr); + + cpu_loop_exit_restore(cs, ra); +} + +void x86_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra) +{ + X86CPU *cpu = X86_CPU(cs); + handle_unaligned_access(&cpu->env, addr, access_type, ra); } diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 6bc47c5309..e8dc4b3a47 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -64,8 +64,8 @@ static void whpx_start_vcpu_thread(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + cpu->thread = g_new0(QemuThread, 1); + cpu->halt_cond = g_new0(QemuCond, 1); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", cpu->cpu_index); @@ -83,17 +83,24 @@ static void whpx_kick_vcpu_thread(CPUState *cpu) } } +static bool whpx_vcpu_thread_is_idle(CPUState *cpu) +{ + return !whpx_apic_in_platform(); +} + static void whpx_accel_ops_class_init(ObjectClass *oc, void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = whpx_start_vcpu_thread; ops->kick_vcpu_thread = whpx_kick_vcpu_thread; + ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle; ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset; ops->synchronize_post_init = whpx_cpu_synchronize_post_init; ops->synchronize_state = whpx_cpu_synchronize_state; ops->synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm; + ops->synchronize_pre_resume = whpx_cpu_synchronize_pre_resume; } static const TypeInfo whpx_accel_ops_type = { diff --git a/target/i386/whpx/whpx-accel-ops.h b/target/i386/whpx/whpx-accel-ops.h index 2dee6d61ea..7a1bb1ab57 100644 --- a/target/i386/whpx/whpx-accel-ops.h +++ b/target/i386/whpx/whpx-accel-ops.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef WHPX_CPUS_H -#define WHPX_CPUS_H +#ifndef TARGET_I386_WHPX_ACCEL_OPS_H +#define TARGET_I386_WHPX_ACCEL_OPS_H #include "sysemu/cpus.h" @@ -21,6 +21,7 @@ void whpx_cpu_synchronize_state(CPUState *cpu); void whpx_cpu_synchronize_post_reset(CPUState *cpu); void whpx_cpu_synchronize_post_init(CPUState *cpu); void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu); +void whpx_cpu_synchronize_pre_resume(bool step_pending); /* state subset only touched by the VCPU itself during runtime */ #define WHPX_SET_RUNTIME_STATE 1 @@ -29,4 +30,4 @@ void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu); /* full state set, modified during initialization or on vmload */ #define WHPX_SET_FULL_STATE 3 -#endif /* WHPX_CPUS_H */ +#endif /* TARGET_I386_WHPX_ACCEL_OPS_H */ diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index f832f286ac..e738d83e81 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -12,7 +12,7 @@ #include "cpu.h" #include "exec/address-spaces.h" #include "exec/ioport.h" -#include "qemu-common.h" +#include "exec/gdbstub.h" #include "qemu/accel.h" #include "sysemu/whpx.h" #include "sysemu/cpus.h" @@ -148,6 +148,87 @@ struct whpx_register_set { WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)]; }; +/* + * The current implementation of instruction stepping sets the TF flag + * in RFLAGS, causing the CPU to raise an INT1 after each instruction. + * This corresponds to the WHvX64ExceptionTypeDebugTrapOrFault exception. + * + * This approach has a few limitations: + * 1. Stepping over a PUSHF/SAHF instruction will save the TF flag + * along with the other flags, possibly restoring it later. It would + * result in another INT1 when the flags are restored, triggering + * a stop in gdb that could be cleared by doing another step. + * + * Stepping over a POPF/LAHF instruction will let it overwrite the + * TF flags, ending the stepping mode. + * + * 2. Stepping over an instruction raising an exception (e.g. INT, DIV, + * or anything that could result in a page fault) will save the flags + * to the stack, clear the TF flag, and let the guest execute the + * handler. Normally, the guest will restore the original flags, + * that will continue single-stepping. + * + * 3. Debuggers running on the guest may wish to set TF to do instruction + * stepping. INT1 events generated by it would be intercepted by us, + * as long as the gdb is connected to QEMU. + * + * In practice this means that: + * 1. Stepping through flags-modifying instructions may cause gdb to + * continue or stop in unexpected places. This will be fully recoverable + * and will not crash the target. + * + * 2. Stepping over an instruction that triggers an exception will step + * over the exception handler, not into it. + * + * 3. Debugging the guest via gdb, while running debugger on the guest + * at the same time may lead to unexpected effects. Removing all + * breakpoints set via QEMU will prevent any further interference + * with the guest-level debuggers. + * + * The limitations can be addressed as shown below: + * 1. PUSHF/SAHF/POPF/LAHF/IRET instructions can be emulated instead of + * stepping through them. The exact semantics of the instructions is + * defined in the "Combined Volume Set of Intel 64 and IA-32 + * Architectures Software Developer's Manuals", however it involves a + * fair amount of corner cases due to compatibility with real mode, + * virtual 8086 mode, and differences between 64-bit and 32-bit modes. + * + * 2. We could step into the guest's exception handlers using the following + * sequence: + * a. Temporarily enable catching of all exception types via + * whpx_set_exception_exit_bitmap(). + * b. Once an exception is intercepted, read the IDT/GDT and locate + * the original handler. + * c. Patch the original handler, injecting an INT3 at the beginning. + * d. Update the exception exit bitmap to only catch the + * WHvX64ExceptionTypeBreakpointTrap exception. + * e. Let the affected CPU run in the exclusive mode. + * f. Restore the original handler and the exception exit bitmap. + * Note that handling all corner cases related to IDT/GDT is harder + * than it may seem. See x86_cpu_get_phys_page_attrs_debug() for a + * rough idea. + * + * 3. In order to properly support guest-level debugging in parallel with + * the QEMU-level debugging, we would need to be able to pass some INT1 + * events to the guest. This could be done via the following methods: + * a. Using the WHvRegisterPendingEvent register. As of Windows 21H1, + * it seems to only work for interrupts and not software + * exceptions. + * b. Locating and patching the original handler by parsing IDT/GDT. + * This involves relatively complex logic outlined in the previous + * paragraph. + * c. Emulating the exception invocation (i.e. manually updating RIP, + * RFLAGS, and pushing the old values to stack). This is even more + * complicated than the previous option, since it involves checking + * CPL, gate attributes, and doing various adjustments depending + * on the current CPU mode, whether the CPL is changing, etc. + */ +typedef enum WhpxStepMode { + WHPX_STEP_NONE = 0, + /* Halt other VCPUs */ + WHPX_STEP_EXCLUSIVE, +} WhpxStepMode; + struct whpx_vcpu { WHV_EMULATOR_HANDLE emulator; bool window_registered; @@ -165,9 +246,15 @@ static bool whpx_allowed; static bool whp_dispatch_initialized; static HMODULE hWinHvPlatform, hWinHvEmulation; static uint32_t max_vcpu_index; +static WHV_PROCESSOR_XSAVE_FEATURES whpx_xsave_cap; + struct whpx_state whpx_global; struct WHPDispatch whp_dispatch; +static bool whpx_has_xsave(void) +{ + return whpx_xsave_cap.XsaveSupport; +} /* * VP support @@ -219,9 +306,31 @@ static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs) return qs; } +/* X64 Extended Control Registers */ +static void whpx_set_xcrs(CPUState *cpu) +{ + CPUX86State *env = cpu->env_ptr; + HRESULT hr; + struct whpx_state *whpx = &whpx_global; + WHV_REGISTER_VALUE xcr0; + WHV_REGISTER_NAME xcr0_name = WHvX64RegisterXCr0; + + if (!whpx_has_xsave()) { + return; + } + + /* Only xcr0 is supported by the hypervisor currently */ + xcr0.Reg64 = env->xcr0; + hr = whp_dispatch.WHvSetVirtualProcessorRegisters( + whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0); + if (FAILED(hr)) { + error_report("WHPX: Failed to set register xcr0, hr=%08lx", hr); + } +} + static int whpx_set_tsc(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; WHV_REGISTER_VALUE tsc_val; HRESULT hr; @@ -256,11 +365,33 @@ static int whpx_set_tsc(CPUState *cpu) return 0; } +/* + * The CR8 register in the CPU is mapped to the TPR register of the APIC, + * however, they use a slightly different encoding. Specifically: + * + * APIC.TPR[bits 7:4] = CR8[bits 3:0] + * + * This mechanism is described in section 10.8.6.1 of Volume 3 of Intel 64 + * and IA-32 Architectures Software Developer's Manual. + * + * The functions below translate the value of CR8 to TPR and vice versa. + */ + +static uint64_t whpx_apic_tpr_to_cr8(uint64_t tpr) +{ + return tpr >> 4; +} + +static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8) +{ + return cr8 << 4; +} + static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; HRESULT hr; @@ -284,7 +415,7 @@ static void whpx_set_registers(CPUState *cpu, int level) v86 = (env->eflags & VM_MASK); r86 = !(env->cr[0] & CR0_PE_MASK); - vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state); + vcpu->tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state)); vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); idx = 0; @@ -339,6 +470,12 @@ static void whpx_set_registers(CPUState *cpu, int level) /* 8 Debug Registers - Skipped */ + /* + * Extended control registers needs to be handled separately depending + * on whether xsave is supported/enabled or not. + */ + whpx_set_xcrs(cpu); + /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); idx_next = idx + 16; @@ -428,7 +565,7 @@ static void whpx_set_registers(CPUState *cpu, int level) static int whpx_get_tsc(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; WHV_REGISTER_VALUE tsc_val; HRESULT hr; @@ -445,11 +582,35 @@ static int whpx_get_tsc(CPUState *cpu) return 0; } +/* X64 Extended Control Registers */ +static void whpx_get_xcrs(CPUState *cpu) +{ + CPUX86State *env = cpu->env_ptr; + HRESULT hr; + struct whpx_state *whpx = &whpx_global; + WHV_REGISTER_VALUE xcr0; + WHV_REGISTER_NAME xcr0_name = WHvX64RegisterXCr0; + + if (!whpx_has_xsave()) { + return; + } + + /* Only xcr0 is supported by the hypervisor currently */ + hr = whp_dispatch.WHvGetVirtualProcessorRegisters( + whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0); + if (FAILED(hr)) { + error_report("WHPX: Failed to get register xcr0, hr=%08lx", hr); + return; + } + + env->xcr0 = xcr0.Reg64; +} + static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; uint64_t tpr, apic_base; @@ -475,6 +636,17 @@ static void whpx_get_registers(CPUState *cpu) hr); } + if (whpx_apic_in_platform()) { + /* + * Fetch the TPR value from the emulated APIC. It may get overwritten + * below with the value from CR8 returned by + * WHvGetVirtualProcessorRegisters(). + */ + whpx_apic_get(x86_cpu->apic_state); + vcpu->tpr = whpx_apic_tpr_to_cr8( + cpu_get_apic_tpr(x86_cpu->apic_state)); + } + idx = 0; /* Indexes for first 16 registers match between HV and QEMU definitions */ @@ -522,11 +694,17 @@ static void whpx_get_registers(CPUState *cpu) tpr = vcxt.values[idx++].Reg64; if (tpr != vcpu->tpr) { vcpu->tpr = tpr; - cpu_set_apic_tpr(x86_cpu->apic_state, tpr); + cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(tpr)); } /* 8 Debug Registers - Skipped */ + /* + * Extended control registers needs to be handled separately depending + * on whether xsave is supported/enabled or not. + */ + whpx_get_xcrs(cpu); + /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); idx_next = idx + 16; @@ -604,6 +782,8 @@ static void whpx_get_registers(CPUState *cpu) whpx_apic_get(x86_cpu->apic_state); } + x86_update_hflags(env); + return; } @@ -758,9 +938,517 @@ static int whpx_handle_portio(CPUState *cpu, return 0; } +/* + * Controls whether we should intercept various exceptions on the guest, + * namely breakpoint/single-step events. + * + * The 'exceptions' argument accepts a bitmask, e.g: + * (1 << WHvX64ExceptionTypeDebugTrapOrFault) | (...) + */ +static HRESULT whpx_set_exception_exit_bitmap(UINT64 exceptions) +{ + struct whpx_state *whpx = &whpx_global; + WHV_PARTITION_PROPERTY prop = { 0, }; + HRESULT hr; + + if (exceptions == whpx->exception_exit_bitmap) { + return S_OK; + } + + prop.ExceptionExitBitmap = exceptions; + + hr = whp_dispatch.WHvSetPartitionProperty( + whpx->partition, + WHvPartitionPropertyCodeExceptionExitBitmap, + &prop, + sizeof(WHV_PARTITION_PROPERTY)); + + if (SUCCEEDED(hr)) { + whpx->exception_exit_bitmap = exceptions; + } + + return hr; +} + + +/* + * This function is called before/after stepping over a single instruction. + * It will update the CPU registers to arm/disarm the instruction stepping + * accordingly. + */ +static HRESULT whpx_vcpu_configure_single_stepping(CPUState *cpu, + bool set, + uint64_t *exit_context_rflags) +{ + WHV_REGISTER_NAME reg_name; + WHV_REGISTER_VALUE reg_value; + HRESULT hr; + struct whpx_state *whpx = &whpx_global; + + /* + * If we are trying to step over a single instruction, we need to set the + * TF bit in rflags. Otherwise, clear it. + */ + reg_name = WHvX64RegisterRflags; + hr = whp_dispatch.WHvGetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to get rflags, hr=%08lx", hr); + return hr; + } + + if (exit_context_rflags) { + assert(*exit_context_rflags == reg_value.Reg64); + } + + if (set) { + /* Raise WHvX64ExceptionTypeDebugTrapOrFault after each instruction */ + reg_value.Reg64 |= TF_MASK; + } else { + reg_value.Reg64 &= ~TF_MASK; + } + + if (exit_context_rflags) { + *exit_context_rflags = reg_value.Reg64; + } + + hr = whp_dispatch.WHvSetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to set rflags," + " hr=%08lx", + hr); + return hr; + } + + reg_name = WHvRegisterInterruptState; + reg_value.Reg64 = 0; + + /* Suspend delivery of hardware interrupts during single-stepping. */ + reg_value.InterruptState.InterruptShadow = set != 0; + + hr = whp_dispatch.WHvSetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to set InterruptState," + " hr=%08lx", + hr); + return hr; + } + + if (!set) { + /* + * We have just finished stepping over a single instruction, + * and intercepted the INT1 generated by it. + * We need to now hide the INT1 from the guest, + * as it would not be expecting it. + */ + + reg_name = WHvX64RegisterPendingDebugException; + hr = whp_dispatch.WHvGetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to get pending debug exceptions," + "hr=%08lx", hr); + return hr; + } + + if (reg_value.PendingDebugException.SingleStep) { + reg_value.PendingDebugException.SingleStep = 0; + + hr = whp_dispatch.WHvSetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to clear pending debug exceptions," + "hr=%08lx", hr); + return hr; + } + } + + } + + return S_OK; +} + +/* Tries to find a breakpoint at the specified address. */ +static struct whpx_breakpoint *whpx_lookup_breakpoint_by_addr(uint64_t address) +{ + struct whpx_state *whpx = &whpx_global; + int i; + + if (whpx->breakpoints.breakpoints) { + for (i = 0; i < whpx->breakpoints.breakpoints->used; i++) { + if (address == whpx->breakpoints.breakpoints->data[i].address) { + return &whpx->breakpoints.breakpoints->data[i]; + } + } + } + + return NULL; +} + +/* + * Linux uses int3 (0xCC) during startup (see int3_selftest()) and for + * debugging user-mode applications. Since the WHPX API does not offer + * an easy way to pass the intercepted exception back to the guest, we + * resort to using INT1 instead, and let the guest always handle INT3. + */ +static const uint8_t whpx_breakpoint_instruction = 0xF1; + +/* + * The WHPX QEMU backend implements breakpoints by writing the INT1 + * instruction into memory (ignoring the DRx registers). This raises a few + * issues that need to be carefully handled: + * + * 1. Although unlikely, other parts of QEMU may set multiple breakpoints + * at the same location, and later remove them in arbitrary order. + * This should not cause memory corruption, and should only remove the + * physical breakpoint instruction when the last QEMU breakpoint is gone. + * + * 2. Writing arbitrary virtual memory may fail if it's not mapped to a valid + * physical location. Hence, physically adding/removing a breakpoint can + * theoretically fail at any time. We need to keep track of it. + * + * The function below rebuilds a list of low-level breakpoints (one per + * address, tracking the original instruction and any errors) from the list of + * high-level breakpoints (set via cpu_breakpoint_insert()). + * + * In order to optimize performance, this function stores the list of + * high-level breakpoints (a.k.a. CPU breakpoints) used to compute the + * low-level ones, so that it won't be re-invoked until these breakpoints + * change. + * + * Note that this function decides which breakpoints should be inserted into, + * memory, but doesn't actually do it. The memory accessing is done in + * whpx_apply_breakpoints(). + */ +static void whpx_translate_cpu_breakpoints( + struct whpx_breakpoints *breakpoints, + CPUState *cpu, + int cpu_breakpoint_count) +{ + CPUBreakpoint *bp; + int cpu_bp_index = 0; + + breakpoints->original_addresses = + g_renew(vaddr, breakpoints->original_addresses, cpu_breakpoint_count); + + breakpoints->original_address_count = cpu_breakpoint_count; + + int max_breakpoints = cpu_breakpoint_count + + (breakpoints->breakpoints ? breakpoints->breakpoints->used : 0); + + struct whpx_breakpoint_collection *new_breakpoints = + g_malloc0(sizeof(struct whpx_breakpoint_collection) + + max_breakpoints * sizeof(struct whpx_breakpoint)); + + new_breakpoints->allocated = max_breakpoints; + new_breakpoints->used = 0; + + /* + * 1. Preserve all old breakpoints that could not be automatically + * cleared when the CPU got stopped. + */ + if (breakpoints->breakpoints) { + int i; + for (i = 0; i < breakpoints->breakpoints->used; i++) { + if (breakpoints->breakpoints->data[i].state != WHPX_BP_CLEARED) { + new_breakpoints->data[new_breakpoints->used++] = + breakpoints->breakpoints->data[i]; + } + } + } + + /* 2. Map all CPU breakpoints to WHPX breakpoints */ + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + int i; + bool found = false; + + /* This will be used to detect changed CPU breakpoints later. */ + breakpoints->original_addresses[cpu_bp_index++] = bp->pc; + + for (i = 0; i < new_breakpoints->used; i++) { + /* + * WARNING: This loop has O(N^2) complexity, where N is the + * number of breakpoints. It should not be a bottleneck in + * real-world scenarios, since it only needs to run once after + * the breakpoints have been modified. + * If this ever becomes a concern, it can be optimized by storing + * high-level breakpoint objects in a tree or hash map. + */ + + if (new_breakpoints->data[i].address == bp->pc) { + /* There was already a breakpoint at this address. */ + if (new_breakpoints->data[i].state == WHPX_BP_CLEAR_PENDING) { + new_breakpoints->data[i].state = WHPX_BP_SET; + } else if (new_breakpoints->data[i].state == WHPX_BP_SET) { + new_breakpoints->data[i].state = WHPX_BP_SET_PENDING; + } + + found = true; + break; + } + } + + if (!found && new_breakpoints->used < new_breakpoints->allocated) { + /* No WHPX breakpoint at this address. Create one. */ + new_breakpoints->data[new_breakpoints->used].address = bp->pc; + new_breakpoints->data[new_breakpoints->used].state = + WHPX_BP_SET_PENDING; + new_breakpoints->used++; + } + } + + /* + * Free the previous breakpoint list. This can be optimized by keeping + * it as shadow buffer for the next computation instead of freeing + * it immediately. + */ + g_free(breakpoints->breakpoints); + + breakpoints->breakpoints = new_breakpoints; +} + +/* + * Physically inserts/removes the breakpoints by reading and writing the + * physical memory, keeping a track of the failed attempts. + * + * Passing resuming=true will try to set all previously unset breakpoints. + * Passing resuming=false will remove all inserted ones. + */ +static void whpx_apply_breakpoints( + struct whpx_breakpoint_collection *breakpoints, + CPUState *cpu, + bool resuming) +{ + int i, rc; + if (!breakpoints) { + return; + } + + for (i = 0; i < breakpoints->used; i++) { + /* Decide what to do right now based on the last known state. */ + WhpxBreakpointState state = breakpoints->data[i].state; + switch (state) { + case WHPX_BP_CLEARED: + if (resuming) { + state = WHPX_BP_SET_PENDING; + } + break; + case WHPX_BP_SET_PENDING: + if (!resuming) { + state = WHPX_BP_CLEARED; + } + break; + case WHPX_BP_SET: + if (!resuming) { + state = WHPX_BP_CLEAR_PENDING; + } + break; + case WHPX_BP_CLEAR_PENDING: + if (resuming) { + state = WHPX_BP_SET; + } + break; + } + + if (state == WHPX_BP_SET_PENDING) { + /* Remember the original instruction. */ + rc = cpu_memory_rw_debug(cpu, + breakpoints->data[i].address, + &breakpoints->data[i].original_instruction, + 1, + false); + + if (!rc) { + /* Write the breakpoint instruction. */ + rc = cpu_memory_rw_debug(cpu, + breakpoints->data[i].address, + (void *)&whpx_breakpoint_instruction, + 1, + true); + } + + if (!rc) { + state = WHPX_BP_SET; + } + + } + + if (state == WHPX_BP_CLEAR_PENDING) { + /* Restore the original instruction. */ + rc = cpu_memory_rw_debug(cpu, + breakpoints->data[i].address, + &breakpoints->data[i].original_instruction, + 1, + true); + + if (!rc) { + state = WHPX_BP_CLEARED; + } + } + + breakpoints->data[i].state = state; + } +} + +/* + * This function is called when the a VCPU is about to start and no other + * VCPUs have been started so far. Since the VCPU start order could be + * arbitrary, it doesn't have to be VCPU#0. + * + * It is used to commit the breakpoints into memory, and configure WHPX + * to intercept debug exceptions. + * + * Note that whpx_set_exception_exit_bitmap() cannot be called if one or + * more VCPUs are already running, so this is the best place to do it. + */ +static int whpx_first_vcpu_starting(CPUState *cpu) +{ + struct whpx_state *whpx = &whpx_global; + HRESULT hr; + + g_assert(qemu_mutex_iothread_locked()); + + if (!QTAILQ_EMPTY(&cpu->breakpoints) || + (whpx->breakpoints.breakpoints && + whpx->breakpoints.breakpoints->used)) { + CPUBreakpoint *bp; + int i = 0; + bool update_pending = false; + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (i >= whpx->breakpoints.original_address_count || + bp->pc != whpx->breakpoints.original_addresses[i]) { + update_pending = true; + } + + i++; + } + + if (i != whpx->breakpoints.original_address_count) { + update_pending = true; + } + + if (update_pending) { + /* + * The CPU breakpoints have changed since the last call to + * whpx_translate_cpu_breakpoints(). WHPX breakpoints must + * now be recomputed. + */ + whpx_translate_cpu_breakpoints(&whpx->breakpoints, cpu, i); + } + + /* Actually insert the breakpoints into the memory. */ + whpx_apply_breakpoints(whpx->breakpoints.breakpoints, cpu, true); + } + + uint64_t exception_mask; + if (whpx->step_pending || + (whpx->breakpoints.breakpoints && + whpx->breakpoints.breakpoints->used)) { + /* + * We are either attempting to single-step one or more CPUs, or + * have one or more breakpoints enabled. Both require intercepting + * the WHvX64ExceptionTypeBreakpointTrap exception. + */ + + exception_mask = 1UL << WHvX64ExceptionTypeDebugTrapOrFault; + } else { + /* Let the guest handle all exceptions. */ + exception_mask = 0; + } + + hr = whpx_set_exception_exit_bitmap(exception_mask); + if (!SUCCEEDED(hr)) { + error_report("WHPX: Failed to update exception exit mask," + "hr=%08lx.", hr); + return 1; + } + + return 0; +} + +/* + * This function is called when the last VCPU has finished running. + * It is used to remove any previously set breakpoints from memory. + */ +static int whpx_last_vcpu_stopping(CPUState *cpu) +{ + whpx_apply_breakpoints(whpx_global.breakpoints.breakpoints, cpu, false); + return 0; +} + +/* Returns the address of the next instruction that is about to be executed. */ +static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) +{ + if (cpu->vcpu_dirty) { + /* The CPU registers have been modified by other parts of QEMU. */ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + return env->eip; + } else if (exit_context_valid) { + /* + * The CPU registers have not been modified by neither other parts + * of QEMU, nor this port by calling WHvSetVirtualProcessorRegisters(). + * This is the most common case. + */ + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + return vcpu->exit_ctx.VpContext.Rip; + } else { + /* + * The CPU registers have been modified by a call to + * WHvSetVirtualProcessorRegisters() and must be re-queried from + * the target. + */ + WHV_REGISTER_VALUE reg_value; + WHV_REGISTER_NAME reg_name = WHvX64RegisterRip; + HRESULT hr; + struct whpx_state *whpx = &whpx_global; + + hr = whp_dispatch.WHvGetVirtualProcessorRegisters( + whpx->partition, + cpu->cpu_index, + ®_name, + 1, + ®_value); + + if (FAILED(hr)) { + error_report("WHPX: Failed to get PC, hr=%08lx", hr); + return 0; + } + + return reg_value.Reg64; + } +} + static int whpx_handle_halt(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; int ret = 0; qemu_mutex_lock_iothread(); @@ -781,7 +1469,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); int irq; uint8_t tpr; @@ -863,7 +1551,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) } /* Sync the TPR to the CR8 if was modified during the intercept */ - tpr = cpu_get_apic_tpr(x86_cpu->apic_state); + tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state)); if (tpr != vcpu->tpr) { vcpu->tpr = tpr; reg_values[reg_count].Reg64 = tpr; @@ -903,7 +1591,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) static void whpx_vcpu_post_run(CPUState *cpu) { struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); env->eflags = vcpu->exit_ctx.VpContext.Rflags; @@ -912,7 +1600,7 @@ static void whpx_vcpu_post_run(CPUState *cpu) if (vcpu->tpr != tpr) { vcpu->tpr = tpr; qemu_mutex_lock_iothread(); - cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr); + cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr)); qemu_mutex_unlock_iothread(); } @@ -927,7 +1615,7 @@ static void whpx_vcpu_post_run(CPUState *cpu) static void whpx_vcpu_process_async_events(CPUState *cpu) { - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); @@ -969,17 +1657,75 @@ static int whpx_vcpu_run(CPUState *cpu) HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + struct whpx_breakpoint *stepped_over_bp = NULL; + WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; - whpx_vcpu_process_async_events(cpu); - if (cpu->halted && !whpx_apic_in_platform()) { - cpu->exception_index = EXCP_HLT; - qatomic_set(&cpu->exit_request, false); - return 0; + g_assert(qemu_mutex_iothread_locked()); + + if (whpx->running_cpus++ == 0) { + /* Insert breakpoints into memory, update exception exit bitmap. */ + ret = whpx_first_vcpu_starting(cpu); + if (ret != 0) { + return ret; + } + } + + if (whpx->breakpoints.breakpoints && + whpx->breakpoints.breakpoints->used > 0) + { + uint64_t pc = whpx_vcpu_get_pc(cpu, true); + stepped_over_bp = whpx_lookup_breakpoint_by_addr(pc); + if (stepped_over_bp && stepped_over_bp->state != WHPX_BP_SET) { + stepped_over_bp = NULL; + } + + if (stepped_over_bp) { + /* + * We are trying to run the instruction overwritten by an active + * breakpoint. We will temporarily disable the breakpoint, suspend + * other CPUs, and step over the instruction. + */ + exclusive_step_mode = WHPX_STEP_EXCLUSIVE; + } + } + + if (exclusive_step_mode == WHPX_STEP_NONE) { + whpx_vcpu_process_async_events(cpu); + if (cpu->halted && !whpx_apic_in_platform()) { + cpu->exception_index = EXCP_HLT; + qatomic_set(&cpu->exit_request, false); + return 0; + } } qemu_mutex_unlock_iothread(); - cpu_exec_start(cpu); + + if (exclusive_step_mode != WHPX_STEP_NONE) { + start_exclusive(); + g_assert(cpu == current_cpu); + g_assert(!cpu->running); + cpu->running = true; + + hr = whpx_set_exception_exit_bitmap( + 1UL << WHvX64ExceptionTypeDebugTrapOrFault); + if (!SUCCEEDED(hr)) { + error_report("WHPX: Failed to update exception exit mask, " + "hr=%08lx.", hr); + return 1; + } + + if (stepped_over_bp) { + /* Temporarily disable the triggered breakpoint. */ + cpu_memory_rw_debug(cpu, + stepped_over_bp->address, + &stepped_over_bp->original_instruction, + 1, + true); + } + } else { + cpu_exec_start(cpu); + } do { if (cpu->vcpu_dirty) { @@ -987,10 +1733,16 @@ static int whpx_vcpu_run(CPUState *cpu) cpu->vcpu_dirty = false; } - whpx_vcpu_pre_run(cpu); + if (exclusive_step_mode == WHPX_STEP_NONE) { + whpx_vcpu_pre_run(cpu); - if (qatomic_read(&cpu->exit_request)) { - whpx_vcpu_kick(cpu); + if (qatomic_read(&cpu->exit_request)) { + whpx_vcpu_kick(cpu); + } + } + + if (exclusive_step_mode != WHPX_STEP_NONE || cpu->singlestep_enabled) { + whpx_vcpu_configure_single_stepping(cpu, true, NULL); } hr = whp_dispatch.WHvRunVirtualProcessor( @@ -1004,6 +1756,12 @@ static int whpx_vcpu_run(CPUState *cpu) break; } + if (exclusive_step_mode != WHPX_STEP_NONE || cpu->singlestep_enabled) { + whpx_vcpu_configure_single_stepping(cpu, + false, + &vcpu->exit_ctx.VpContext.Rflags); + } + whpx_vcpu_post_run(cpu); switch (vcpu->exit_ctx.ExitReason) { @@ -1027,6 +1785,10 @@ static int whpx_vcpu_run(CPUState *cpu) break; case WHvRunVpExitReasonX64Halt: + /* + * WARNING: as of build 19043.1526 (21H1), this exit reason is no + * longer used. + */ ret = whpx_handle_halt(cpu); break; @@ -1125,10 +1887,19 @@ static int whpx_vcpu_run(CPUState *cpu) } case WHvRunVpExitReasonCanceled: - cpu->exception_index = EXCP_INTERRUPT; - ret = 1; + if (exclusive_step_mode != WHPX_STEP_NONE) { + /* + * We are trying to step over a single instruction, and + * likely got a request to stop from another thread. + * Delay it until we are done stepping + * over. + */ + ret = 0; + } else { + cpu->exception_index = EXCP_INTERRUPT; + ret = 1; + } break; - case WHvRunVpExitReasonX64MsrAccess: { WHV_REGISTER_VALUE reg_values[3] = {0}; WHV_REGISTER_NAME reg_names[3]; @@ -1232,11 +2003,36 @@ static int whpx_vcpu_run(CPUState *cpu) ret = 0; break; } + case WHvRunVpExitReasonException: + whpx_get_registers(cpu); + + if ((vcpu->exit_ctx.VpException.ExceptionType == + WHvX64ExceptionTypeDebugTrapOrFault) && + (vcpu->exit_ctx.VpException.InstructionByteCount >= 1) && + (vcpu->exit_ctx.VpException.InstructionBytes[0] == + whpx_breakpoint_instruction)) { + /* Stopped at a software breakpoint. */ + cpu->exception_index = EXCP_DEBUG; + } else if ((vcpu->exit_ctx.VpException.ExceptionType == + WHvX64ExceptionTypeDebugTrapOrFault) && + !cpu->singlestep_enabled) { + /* + * Just finished stepping over a breakpoint, but the + * gdb does not expect us to do single-stepping. + * Don't do anything special. + */ + cpu->exception_index = EXCP_INTERRUPT; + } else { + /* Another exception or debug event. Report it to GDB. */ + cpu->exception_index = EXCP_DEBUG; + } + + ret = 1; + break; case WHvRunVpExitReasonNone: case WHvRunVpExitReasonUnrecoverableException: case WHvRunVpExitReasonInvalidVpRegisterValue: case WHvRunVpExitReasonUnsupportedFeature: - case WHvRunVpExitReasonException: default: error_report("WHPX: Unexpected VP exit code %d", vcpu->exit_ctx.ExitReason); @@ -1249,10 +2045,32 @@ static int whpx_vcpu_run(CPUState *cpu) } while (!ret); - cpu_exec_end(cpu); + if (stepped_over_bp) { + /* Restore the breakpoint we stepped over */ + cpu_memory_rw_debug(cpu, + stepped_over_bp->address, + (void *)&whpx_breakpoint_instruction, + 1, + true); + } + + if (exclusive_step_mode != WHPX_STEP_NONE) { + g_assert(cpu_in_exclusive_context(cpu)); + cpu->running = false; + end_exclusive(); + + exclusive_step_mode = WHPX_STEP_NONE; + } else { + cpu_exec_end(cpu); + } + qemu_mutex_lock_iothread(); current_cpu = cpu; + if (--whpx->running_cpus == 0) { + whpx_last_vcpu_stopping(cpu); + } + qatomic_set(&cpu->exit_request, false); return ret < 0; @@ -1312,6 +2130,11 @@ void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu) run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); } +void whpx_cpu_synchronize_pre_resume(bool step_pending) +{ + whpx_global.step_pending = step_pending; +} + /* * Vcpu support. */ @@ -1333,7 +2156,7 @@ int whpx_init_vcpu(CPUState *cpu) struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = NULL; Error *local_error = NULL; - struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); + CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); UINT64 freq = 0; int ret; @@ -1346,17 +2169,15 @@ int whpx_init_vcpu(CPUState *cpu) "State blocked due to non-migratable CPUID feature support," "dirty memory tracking support, and XSAVE/XRSTOR support"); - (void)migrate_add_blocker(whpx_migration_blocker, &local_error); - if (local_error) { + if (migrate_add_blocker(whpx_migration_blocker, &local_error) < 0) { error_report_err(local_error); - migrate_del_blocker(whpx_migration_blocker); error_free(whpx_migration_blocker); ret = -EINVAL; goto error; } } - vcpu = g_malloc0(sizeof(struct whpx_vcpu)); + vcpu = g_new0(struct whpx_vcpu, 1); if (!vcpu) { error_report("WHPX: Failed to allocte VCPU context."); @@ -1546,15 +2367,15 @@ static void whpx_process_section(MemoryRegionSection *section, int add) return; } - delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); - delta &= ~qemu_real_host_page_mask; + delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); + delta &= ~qemu_real_host_page_mask(); if (delta > size) { return; } start_pa += delta; size -= delta; - size &= qemu_real_host_page_mask; - if (!size || (start_pa & ~qemu_real_host_page_mask)) { + size &= qemu_real_host_page_mask(); + if (!size || (start_pa & ~qemu_real_host_page_mask())) { return; } @@ -1600,6 +2421,7 @@ static void whpx_log_sync(MemoryListener *listener, } static MemoryListener whpx_memory_listener = { + .name = "whpx", .begin = whpx_transaction_begin, .commit = whpx_transaction_commit, .region_add = whpx_region_add, @@ -1759,6 +2581,29 @@ static int whpx_accel_init(MachineState *ms) goto error; } + /* + * Query the XSAVE capability of the partition. Any error here is not + * considered fatal. + */ + hr = whp_dispatch.WHvGetPartitionProperty( + whpx->partition, + WHvPartitionPropertyCodeProcessorXsaveFeatures, + &whpx_xsave_cap, + sizeof(whpx_xsave_cap), + &whpx_cap_size); + + /* + * Windows version which don't support this property will return with the + * specific error code. + */ + if (FAILED(hr) && hr != WHV_E_UNKNOWN_PROPERTY) { + error_report("WHPX: Failed to query XSAVE capability, hr=%08lx", hr); + } + + if (!whpx_has_xsave()) { + printf("WHPX: Partition is not XSAVE capable\n"); + } + memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY)); prop.ProcessorCount = ms->smp.cpus; hr = whp_dispatch.WHvSetPartitionProperty( @@ -1812,6 +2657,7 @@ static int whpx_accel_init(MachineState *ms) memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY)); prop.ExtendedVmExits.X64MsrExit = 1; prop.ExtendedVmExits.X64CpuidExit = 1; + prop.ExtendedVmExits.ExceptionExit = 1; if (whpx_apic_in_platform()) { prop.ExtendedVmExits.X64ApicInitSipiExitTrap = 1; } @@ -1840,6 +2686,19 @@ static int whpx_accel_init(MachineState *ms) goto error; } + /* + * We do not want to intercept any exceptions from the guest, + * until we actually start debugging with gdb. + */ + whpx->exception_exit_bitmap = -1; + hr = whpx_set_exception_exit_bitmap(0); + + if (FAILED(hr)) { + error_report("WHPX: Failed to set exception exit bitmap, hr=%08lx", hr); + ret = -EINVAL; + goto error; + } + hr = whp_dispatch.WHvSetupPartition(whpx->partition); if (FAILED(hr)) { error_report("WHPX: Failed to setup partition, hr=%08lx", hr); diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c index bba36f3ec9..c15df35ad6 100644 --- a/target/i386/whpx/whpx-apic.c +++ b/target/i386/whpx/whpx-apic.c @@ -11,7 +11,6 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" diff --git a/target/i386/whpx/whpx-internal.h b/target/i386/whpx/whpx-internal.h index 908ababf6d..06429d8ccd 100644 --- a/target/i386/whpx/whpx-internal.h +++ b/target/i386/whpx/whpx-internal.h @@ -1,13 +1,43 @@ -#ifndef WHP_INTERNAL_H -#define WHP_INTERNAL_H +#ifndef TARGET_I386_WHPX_INTERNAL_H +#define TARGET_I386_WHPX_INTERNAL_H #include #include #include +typedef enum WhpxBreakpointState { + WHPX_BP_CLEARED = 0, + WHPX_BP_SET_PENDING, + WHPX_BP_SET, + WHPX_BP_CLEAR_PENDING, +} WhpxBreakpointState; + +struct whpx_breakpoint { + vaddr address; + WhpxBreakpointState state; + uint8_t original_instruction; +}; + +struct whpx_breakpoint_collection { + int allocated, used; + struct whpx_breakpoint data[0]; +}; + +struct whpx_breakpoints { + int original_address_count; + vaddr *original_addresses; + + struct whpx_breakpoint_collection *breakpoints; +}; + struct whpx_state { uint64_t mem_quota; WHV_PARTITION_HANDLE partition; + uint64_t exception_exit_bitmap; + int32_t running_cpus; + struct whpx_breakpoints breakpoints; + bool step_pending; + bool kernel_irqchip_allowed; bool kernel_irqchip_required; bool apic_in_platform; @@ -18,6 +48,9 @@ void whpx_apic_get(DeviceState *s); #define WHV_E_UNKNOWN_CAPABILITY 0x80370300L +/* This should eventually come from the Windows SDK */ +#define WHV_E_UNKNOWN_PROPERTY 0x80370302 + #define LIST_WINHVPLATFORM_FUNCTIONS(X) \ X(HRESULT, WHvGetCapability, (WHV_CAPABILITY_CODE CapabilityCode, VOID* CapabilityBuffer, UINT32 CapabilityBufferSizeInBytes, UINT32* WrittenSizeInBytes)) \ X(HRESULT, WHvCreatePartition, (WHV_PARTITION_HANDLE* Partition)) \ @@ -83,4 +116,4 @@ typedef enum WHPFunctionList { WINHV_PLATFORM_FNS_SUPPLEMENTAL } WHPFunctionList; -#endif /* WHP_INTERNAL_H */ +#endif /* TARGET_I386_WHPX_INTERNAL_H */ diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c index ac61a96344..996e9f3bfe 100644 --- a/target/i386/xsave_helper.c +++ b/target/i386/xsave_helper.c @@ -126,6 +126,20 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen) memcpy(pkru, &env->pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata)); + } #endif } @@ -247,5 +261,19 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen) pkru = buf + e->offset; memcpy(&env->pkru, pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + const XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + const XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata)); + } #endif } diff --git a/target/loongarch/Kconfig b/target/loongarch/Kconfig new file mode 100644 index 0000000000..46b26b1a85 --- /dev/null +++ b/target/loongarch/Kconfig @@ -0,0 +1,2 @@ +config LOONGARCH64 + bool diff --git a/target/loongarch/README b/target/loongarch/README new file mode 100644 index 0000000000..0b9dc0d40a --- /dev/null +++ b/target/loongarch/README @@ -0,0 +1,52 @@ +- Introduction + + LoongArch is the general processor architecture of Loongson. + + The following versions of the LoongArch core are supported + core: 3A5000 + https://github.com/loongson/LoongArch-Documentation/releases/download/2021.08.17/LoongArch-Vol1-v1.00-EN.pdf + + We can get the latest loongarch documents at https://github.com/loongson/LoongArch-Documentation/tags. + + +- System emulation + + You can reference docs/system/loongarch/loongson3.rst to get the information about system emulation of LoongArch. + +- Linux-user emulation + + We already support Linux user emulation. We can use LoongArch cross-tools to build LoongArch executables on X86 machines, + and We can also use qemu-loongarch64 to run LoongArch executables. + + 1. Config cross-tools env. + + see System emulation. + + 2. Test tests/tcg/multiarch. + + ./configure --static --prefix=/usr --disable-werror --target-list="loongarch64-linux-user" --enable-debug + + cd build + + make && make check-tcg + + 3. Run LoongArch system basic command with loongarch-clfs-system. + + - Config clfs env. + + wget https://github.com/loongson/build-tools/releases/download/2022.05.29/loongarch64-clfs-system-5.0.tar.bz2 + + tar -vxf loongarch64-clfs-system-5.0.tar.bz2 -C /opt/clfs + + cp /opt/clfs/lib64/ld-linux-loongarch-lp64d.so.1 /lib64 + + export LD_LIBRARY_PATH="/opt/clfs/lib64" + + - Run LoongArch system basic command. + + ./qemu-loongarch64 /opt/clfs/usr/bin/bash + ./qemu-loongarch64 /opt/clfs/usr/bin/ls + ./qemu-loongarch64 /opt/clfs/usr/bin/pwd + +- Note. + We can get the latest LoongArch documents or LoongArch tools at https://github.com/loongson/ diff --git a/target/loongarch/constant_timer.c b/target/loongarch/constant_timer.c new file mode 100644 index 0000000000..1851f53fd6 --- /dev/null +++ b/target/loongarch/constant_timer.c @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch constant timer support + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-csr.h" + +#define TIMER_PERIOD 10 /* 10 ns period for 100 MHz frequency */ +#define CONSTANT_TIMER_TICK_MASK 0xfffffffffffcUL +#define CONSTANT_TIMER_ENABLE 0x1UL + +uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD; +} + +uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu) +{ + uint64_t now, expire; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + expire = timer_expire_time_ns(&cpu->timer); + + return (expire - now) / TIMER_PERIOD; +} + +void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, + uint64_t value) +{ + CPULoongArchState *env = &cpu->env; + uint64_t now, next; + + env->CSR_TCFG = value; + if (value & CONSTANT_TIMER_ENABLE) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + (value & CONSTANT_TIMER_TICK_MASK) * TIMER_PERIOD; + timer_mod(&cpu->timer, next); + } else { + timer_del(&cpu->timer); + } +} + +void loongarch_constant_timer_cb(void *opaque) +{ + LoongArchCPU *cpu = opaque; + CPULoongArchState *env = &cpu->env; + uint64_t now, next; + + if (FIELD_EX64(env->CSR_TCFG, CSR_TCFG, PERIODIC)) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + (env->CSR_TCFG & CONSTANT_TIMER_TICK_MASK) * TIMER_PERIOD; + timer_mod(&cpu->timer, next); + } else { + env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); + } + + loongarch_cpu_set_irq(opaque, IRQ_TIMER, 1); +} diff --git a/target/loongarch/cpu-csr.h b/target/loongarch/cpu-csr.h new file mode 100644 index 0000000000..4c8ce7fed5 --- /dev/null +++ b/target/loongarch/cpu-csr.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CSRs + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_CPU_CSR_H +#define LOONGARCH_CPU_CSR_H + +#include "hw/registerfields.h" + +/* Base on kernal definitions: arch/loongarch/include/asm/loongarch.h */ + +/* Basic CSRs */ +#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */ + +#define LOONGARCH_CSR_PRMD 0x1 /* Prev-exception mode info */ +FIELD(CSR_PRMD, PPLV, 0, 2) +FIELD(CSR_PRMD, PIE, 2, 1) +FIELD(CSR_PRMD, PWE, 3, 1) + +#define LOONGARCH_CSR_EUEN 0x2 /* Extended unit enable */ +FIELD(CSR_EUEN, FPE, 0, 1) +FIELD(CSR_EUEN, SXE, 1, 1) +FIELD(CSR_EUEN, ASXE, 2, 1) +FIELD(CSR_EUEN, BTE, 3, 1) + +#define LOONGARCH_CSR_MISC 0x3 /* Misc config */ +FIELD(CSR_MISC, VA32, 0, 4) +FIELD(CSR_MISC, DRDTL, 4, 4) +FIELD(CSR_MISC, RPCNTL, 8, 4) +FIELD(CSR_MISC, ALCL, 12, 4) +FIELD(CSR_MISC, DWPL, 16, 3) + +#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ +FIELD(CSR_ECFG, LIE, 0, 13) +FIELD(CSR_ECFG, VS, 16, 3) + +#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */ +FIELD(CSR_ESTAT, IS, 0, 13) +FIELD(CSR_ESTAT, ECODE, 16, 6) +FIELD(CSR_ESTAT, ESUBCODE, 22, 9) + +#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */ + +#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */ + +#define LOONGARCH_CSR_BADI 0x8 /* Bad instruction */ + +#define LOONGARCH_CSR_EENTRY 0xc /* Exception entry address */ + +/* TLB related CSRs */ +#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize, NP */ +FIELD(CSR_TLBIDX, INDEX, 0, 12) +FIELD(CSR_TLBIDX, PS, 24, 6) +FIELD(CSR_TLBIDX, NE, 31, 1) + +#define LOONGARCH_CSR_TLBEHI 0x11 /* TLB EntryHi */ +FIELD(CSR_TLBEHI, VPPN, 13, 35) + +#define LOONGARCH_CSR_TLBELO0 0x12 /* TLB EntryLo0 */ +#define LOONGARCH_CSR_TLBELO1 0x13 /* TLB EntryLo1 */ +FIELD(TLBENTRY, V, 0, 1) +FIELD(TLBENTRY, D, 1, 1) +FIELD(TLBENTRY, PLV, 2, 2) +FIELD(TLBENTRY, MAT, 4, 2) +FIELD(TLBENTRY, G, 6, 1) +FIELD(TLBENTRY, PPN, 12, 36) +FIELD(TLBENTRY, NR, 61, 1) +FIELD(TLBENTRY, NX, 62, 1) +FIELD(TLBENTRY, RPLV, 63, 1) + +#define LOONGARCH_CSR_ASID 0x18 /* Address space identifier */ +FIELD(CSR_ASID, ASID, 0, 10) +FIELD(CSR_ASID, ASIDBITS, 16, 8) + +/* Page table base address when badv[47] = 0 */ +#define LOONGARCH_CSR_PGDL 0x19 +/* Page table base address when badv[47] = 1 */ +#define LOONGARCH_CSR_PGDH 0x1a + +#define LOONGARCH_CSR_PGD 0x1b /* Page table base address */ + +/* Page walk controller's low addr */ +#define LOONGARCH_CSR_PWCL 0x1c +FIELD(CSR_PWCL, PTBASE, 0, 5) +FIELD(CSR_PWCL, PTWIDTH, 5, 5) +FIELD(CSR_PWCL, DIR1_BASE, 10, 5) +FIELD(CSR_PWCL, DIR1_WIDTH, 15, 5) +FIELD(CSR_PWCL, DIR2_BASE, 20, 5) +FIELD(CSR_PWCL, DIR2_WIDTH, 25, 5) +FIELD(CSR_PWCL, PTEWIDTH, 30, 2) + +/* Page walk controller's high addr */ +#define LOONGARCH_CSR_PWCH 0x1d +FIELD(CSR_PWCH, DIR3_BASE, 0, 6) +FIELD(CSR_PWCH, DIR3_WIDTH, 6, 6) +FIELD(CSR_PWCH, DIR4_BASE, 12, 6) +FIELD(CSR_PWCH, DIR4_WIDTH, 18, 6) + +#define LOONGARCH_CSR_STLBPS 0x1e /* Stlb page size */ +FIELD(CSR_STLBPS, PS, 0, 5) + +#define LOONGARCH_CSR_RVACFG 0x1f /* Reduced virtual address config */ +FIELD(CSR_RVACFG, RBITS, 0, 4) + +/* Config CSRs */ +#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */ + +#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */ +FIELD(CSR_PRCFG1, SAVE_NUM, 0, 4) +FIELD(CSR_PRCFG1, TIMER_BITS, 4, 8) +FIELD(CSR_PRCFG1, VSMAX, 12, 3) + +#define LOONGARCH_CSR_PRCFG2 0x22 /* Config2 */ + +#define LOONGARCH_CSR_PRCFG3 0x23 /* Config3 */ +FIELD(CSR_PRCFG3, TLB_TYPE, 0, 4) +FIELD(CSR_PRCFG3, MTLB_ENTRY, 4, 8) +FIELD(CSR_PRCFG3, STLB_WAYS, 12, 8) +FIELD(CSR_PRCFG3, STLB_SETS, 20, 8) + +/* + * Save registers count can read from PRCFG1.SAVE_NUM + * The Min count is 1. Max count is 15. + */ +#define LOONGARCH_CSR_SAVE(N) (0x30 + N) + +/* Timer CSRs */ +#define LOONGARCH_CSR_TID 0x40 /* Timer ID */ + +#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */ +FIELD(CSR_TCFG, EN, 0, 1) +FIELD(CSR_TCFG, PERIODIC, 1, 1) +FIELD(CSR_TCFG, INIT_VAL, 2, 46) + +#define LOONGARCH_CSR_TVAL 0x42 /* Timer ticks remain */ + +#define LOONGARCH_CSR_CNTC 0x43 /* Timer offset */ + +#define LOONGARCH_CSR_TICLR 0x44 /* Timer interrupt clear */ + +/* LLBCTL CSRs */ +#define LOONGARCH_CSR_LLBCTL 0x60 /* LLBit control */ +FIELD(CSR_LLBCTL, ROLLB, 0, 1) +FIELD(CSR_LLBCTL, WCLLB, 1, 1) +FIELD(CSR_LLBCTL, KLO, 2, 1) + +/* Implement dependent */ +#define LOONGARCH_CSR_IMPCTL1 0x80 /* LoongArch config1 */ + +#define LOONGARCH_CSR_IMPCTL2 0x81 /* LoongArch config2*/ + +/* TLB Refill CSRs */ +#define LOONGARCH_CSR_TLBRENTRY 0x88 /* TLB refill exception address */ +#define LOONGARCH_CSR_TLBRBADV 0x89 /* TLB refill badvaddr */ +#define LOONGARCH_CSR_TLBRERA 0x8a /* TLB refill ERA */ +#define LOONGARCH_CSR_TLBRSAVE 0x8b /* KScratch for TLB refill */ +FIELD(CSR_TLBRERA, ISTLBR, 0, 1) +FIELD(CSR_TLBRERA, PC, 2, 62) +#define LOONGARCH_CSR_TLBRELO0 0x8c /* TLB refill entrylo0 */ +#define LOONGARCH_CSR_TLBRELO1 0x8d /* TLB refill entrylo1 */ +#define LOONGARCH_CSR_TLBREHI 0x8e /* TLB refill entryhi */ +FIELD(CSR_TLBREHI, PS, 0, 6) +FIELD(CSR_TLBREHI, VPPN, 13, 35) +#define LOONGARCH_CSR_TLBRPRMD 0x8f /* TLB refill mode info */ +FIELD(CSR_TLBRPRMD, PPLV, 0, 2) +FIELD(CSR_TLBRPRMD, PIE, 2, 1) +FIELD(CSR_TLBRPRMD, PWE, 4, 1) + +/* Machine Error CSRs */ +#define LOONGARCH_CSR_MERRCTL 0x90 /* ERRCTL */ +FIELD(CSR_MERRCTL, ISMERR, 0, 1) +#define LOONGARCH_CSR_MERRINFO1 0x91 +#define LOONGARCH_CSR_MERRINFO2 0x92 +#define LOONGARCH_CSR_MERRENTRY 0x93 /* MError exception base */ +#define LOONGARCH_CSR_MERRERA 0x94 /* MError exception PC */ +#define LOONGARCH_CSR_MERRSAVE 0x95 /* KScratch for error exception */ + +#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ + +/* Direct map windows CSRs*/ +#define LOONGARCH_CSR_DMW(N) (0x180 + N) +FIELD(CSR_DMW, PLV0, 0, 1) +FIELD(CSR_DMW, PLV1, 1, 1) +FIELD(CSR_DMW, PLV2, 2, 1) +FIELD(CSR_DMW, PLV3, 3, 1) +FIELD(CSR_DMW, MAT, 4, 2) +FIELD(CSR_DMW, VSEG, 60, 4) + +#define dmw_va2pa(va) \ + (va & MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)) + +/* Debug CSRs */ +#define LOONGARCH_CSR_DBG 0x500 /* debug config */ +FIELD(CSR_DBG, DST, 0, 1) +FIELD(CSR_DBG, DREV, 1, 7) +FIELD(CSR_DBG, DEI, 8, 1) +FIELD(CSR_DBG, DCL, 9, 1) +FIELD(CSR_DBG, DFW, 10, 1) +FIELD(CSR_DBG, DMW, 11, 1) +FIELD(CSR_DBG, ECODE, 16, 6) + +#define LOONGARCH_CSR_DERA 0x501 /* Debug era */ +#define LOONGARCH_CSR_DSAVE 0x502 /* Debug save */ + +#endif /* LOONGARCH_CPU_CSR_H */ diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h new file mode 100644 index 0000000000..414d8fff46 --- /dev/null +++ b/target/loongarch/cpu-param.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch CPU parameters for QEMU. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_CPU_PARAM_H +#define LOONGARCH_CPU_PARAM_H + +#define TARGET_LONG_BITS 64 +#define TARGET_PHYS_ADDR_SPACE_BITS 48 +#define TARGET_VIRT_ADDR_SPACE_BITS 48 + +#define TARGET_PAGE_BITS 14 +#define NB_MMU_MODES 5 + +#endif diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c new file mode 100644 index 0000000000..46b04cbdad --- /dev/null +++ b/target/loongarch/cpu.c @@ -0,0 +1,774 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CPU + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/qemu-print.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/qtest.h" +#include "exec/exec-all.h" +#include "qapi/qapi-commands-machine-target.h" +#include "cpu.h" +#include "internals.h" +#include "fpu/softfloat-helpers.h" +#include "cpu-csr.h" +#include "sysemu/reset.h" + +const char * const regnames[32] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", +}; + +const char * const fregnames[32] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", +}; + +static const char * const excp_names[] = { + [EXCCODE_INT] = "Interrupt", + [EXCCODE_PIL] = "Page invalid exception for load", + [EXCCODE_PIS] = "Page invalid exception for store", + [EXCCODE_PIF] = "Page invalid exception for fetch", + [EXCCODE_PME] = "Page modified exception", + [EXCCODE_PNR] = "Page Not Readable exception", + [EXCCODE_PNX] = "Page Not Executable exception", + [EXCCODE_PPI] = "Page Privilege error", + [EXCCODE_ADEF] = "Address error for instruction fetch", + [EXCCODE_ADEM] = "Address error for Memory access", + [EXCCODE_SYS] = "Syscall", + [EXCCODE_BRK] = "Break", + [EXCCODE_INE] = "Instruction Non-Existent", + [EXCCODE_IPE] = "Instruction privilege error", + [EXCCODE_FPD] = "Floating Point Disabled", + [EXCCODE_FPE] = "Floating Point Exception", + [EXCCODE_DBP] = "Debug breakpoint", + [EXCCODE_BCE] = "Bound Check Exception", +}; + +const char *loongarch_exception_name(int32_t exception) +{ + assert(excp_names[exception]); + return excp_names[exception]; +} + +void G_NORETURN do_raise_exception(CPULoongArchState *env, + uint32_t exception, + uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + + qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", + __func__, + exception, + loongarch_exception_name(exception)); + cs->exception_index = exception; + + cpu_loop_exit_restore(cs, pc); +} + +static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + env->pc = value; +} + +static vaddr loongarch_cpu_get_pc(CPUState *cs) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + return env->pc; +} + +#ifndef CONFIG_USER_ONLY +#include "hw/loongarch/virt.h" + +void loongarch_cpu_set_irq(void *opaque, int irq, int level) +{ + LoongArchCPU *cpu = opaque; + CPULoongArchState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + if (irq < 0 || irq >= N_IRQS) { + return; + } + + env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); + + if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) +{ + bool ret = 0; + + ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && + !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); + + return ret; +} + +/* Check if there is pending and not masked out interrupt */ +static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) +{ + uint32_t pending; + uint32_t status; + bool r; + + pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); + status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); + + r = (pending & status) != 0; + return r; +} + +static void loongarch_cpu_do_interrupt(CPUState *cs) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + bool update_badinstr = 1; + int cause = -1; + const char *name; + bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); + uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); + + if (cs->exception_index != EXCCODE_INT) { + if (cs->exception_index < 0 || + cs->exception_index >= ARRAY_SIZE(excp_names)) { + name = "unknown"; + } else { + name = excp_names[cs->exception_index]; + } + + qemu_log_mask(CPU_LOG_INT, + "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, + env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); + } + + switch (cs->exception_index) { + case EXCCODE_DBP: + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); + goto set_DERA; + set_DERA: + env->CSR_DERA = env->pc; + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); + env->pc = env->CSR_EENTRY + 0x480; + break; + case EXCCODE_INT: + if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); + goto set_DERA; + } + QEMU_FALLTHROUGH; + case EXCCODE_PIF: + case EXCCODE_ADEF: + cause = cs->exception_index; + update_badinstr = 0; + break; + case EXCCODE_SYS: + case EXCCODE_BRK: + case EXCCODE_INE: + case EXCCODE_IPE: + case EXCCODE_FPD: + case EXCCODE_FPE: + case EXCCODE_BCE: + env->CSR_BADV = env->pc; + QEMU_FALLTHROUGH; + case EXCCODE_ADEM: + case EXCCODE_PIL: + case EXCCODE_PIS: + case EXCCODE_PME: + case EXCCODE_PNR: + case EXCCODE_PNX: + case EXCCODE_PPI: + cause = cs->exception_index; + break; + default: + qemu_log("Error: exception(%d) has not been supported\n", + cs->exception_index); + abort(); + } + + if (update_badinstr) { + env->CSR_BADI = cpu_ldl_code(env, env->pc); + } + + /* Save PLV and IE */ + if (tlbfill) { + env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, + FIELD_EX64(env->CSR_CRMD, + CSR_CRMD, PLV)); + env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); + /* set the DA mode */ + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); + env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, + PC, (env->pc >> 2)); + } else { + env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, + EXCODE_MCODE(cause)); + env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, + EXCODE_SUBCODE(cause)); + env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); + env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); + env->CSR_ERA = env->pc; + } + + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); + + if (vec_size) { + vec_size = (1 << vec_size) * 4; + } + + if (cs->exception_index == EXCCODE_INT) { + /* Interrupt */ + uint32_t vector = 0; + uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); + pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); + + /* Find the highest-priority interrupt. */ + vector = 31 - clz32(pending); + env->pc = env->CSR_EENTRY + (EXCCODE_EXTERNAL_INT + vector) * vec_size; + qemu_log_mask(CPU_LOG_INT, + "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " cause %d\n" " A " TARGET_FMT_lx " D " + TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" + TARGET_FMT_lx "\n", + __func__, env->pc, env->CSR_ERA, + cause, env->CSR_BADV, env->CSR_DERA, vector, + env->CSR_ECFG, env->CSR_ESTAT); + } else { + if (tlbfill) { + env->pc = env->CSR_TLBRENTRY; + } else { + env->pc = env->CSR_EENTRY; + env->pc += EXCODE_MCODE(cause) * vec_size; + } + qemu_log_mask(CPU_LOG_INT, + "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " cause %d%s\n, ESTAT " TARGET_FMT_lx + " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx + "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu + " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, + tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, + cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, + env->CSR_ECFG, + tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, + env->CSR_BADI, env->gpr[11], cs->cpu_index, + env->CSR_ASID); + } + cs->exception_index = -1; +} + +static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, + uintptr_t retaddr) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + if (access_type == MMU_INST_FETCH) { + do_raise_exception(env, EXCCODE_ADEF, retaddr); + } else { + do_raise_exception(env, EXCCODE_ADEM, retaddr); + } +} + +static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + if (cpu_loongarch_hw_interrupts_enabled(env) && + cpu_loongarch_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCCODE_INT; + loongarch_cpu_do_interrupt(cs); + return true; + } + } + return false; +} +#endif + +#ifdef CONFIG_TCG +static void loongarch_cpu_synchronize_from_tb(CPUState *cs, + const TranslationBlock *tb) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + env->pc = tb_pc(tb); +} + +static void loongarch_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + env->pc = data[0]; +} +#endif /* CONFIG_TCG */ + +static bool loongarch_cpu_has_work(CPUState *cs) +{ +#ifdef CONFIG_USER_ONLY + return true; +#else + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + bool has_work = false; + + if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_loongarch_hw_interrupts_pending(env)) { + has_work = true; + } + + return has_work; +#endif +} + +static void loongarch_la464_initfn(Object *obj) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + CPULoongArchState *env = &cpu->env; + int i; + + for (i = 0; i < 21; i++) { + env->cpucfg[i] = 0x0; + } + + cpu->dtb_compatible = "loongarch,Loongson-3A5000"; + env->cpucfg[0] = 0x14c010; /* PRID */ + + uint32_t data = 0; + data = FIELD_DP32(data, CPUCFG1, ARCH, 2); + data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); + data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); + data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); + data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); + data = FIELD_DP32(data, CPUCFG1, UAL, 1); + data = FIELD_DP32(data, CPUCFG1, RI, 1); + data = FIELD_DP32(data, CPUCFG1, EP, 1); + data = FIELD_DP32(data, CPUCFG1, RPLV, 1); + data = FIELD_DP32(data, CPUCFG1, HP, 1); + data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); + env->cpucfg[1] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG2, FP, 1); + data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); + data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); + data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); + data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); + data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); + data = FIELD_DP32(data, CPUCFG2, LAM, 1); + env->cpucfg[2] = data; + + env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ + + data = 0; + data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); + data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); + env->cpucfg[5] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); + data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); + data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); + data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); + data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); + data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); + data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); + data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); + env->cpucfg[16] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); + data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); + data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); + env->cpucfg[17] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); + data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); + data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); + env->cpucfg[18] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); + data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); + data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); + env->cpucfg[19] = data; + + data = 0; + data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); + data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); + data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); + env->cpucfg[20] = data; + + env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); +} + +static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) +{ + const char *typename = object_class_get_name(OBJECT_CLASS(data)); + + qemu_printf("%s\n", typename); +} + +void loongarch_cpu_list(void) +{ + GSList *list; + list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); + g_slist_foreach(list, loongarch_cpu_list_entry, NULL); + g_slist_free(list); +} + +static void loongarch_cpu_reset(DeviceState *dev) +{ + CPUState *cs = CPU(dev); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); + CPULoongArchState *env = &cpu->env; + + lacc->parent_reset(dev); + + env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; + env->fcsr0 = 0x0; + + int n; + /* Set csr registers value after reset */ + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); + + env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); + env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); + env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); + env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); + + env->CSR_MISC = 0; + + env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); + env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); + + env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); + env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); + env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); + env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); + env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); + env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); + + env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); + env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); + env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); + env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); + + for (n = 0; n < 4; n++) { + env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); + env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); + env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); + env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); + } + +#ifndef CONFIG_USER_ONLY + env->pc = 0x1c000000; + memset(env->tlb, 0, sizeof(env->tlb)); +#endif + + restore_fp_status(env); + cs->exception_index = -1; +} + +static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) +{ + info->print_insn = print_insn_loongarch; +} + +static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); + Error *local_err = NULL; + + cpu_exec_realizefn(cs, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + loongarch_cpu_register_gdb_regs_for_features(cs); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + lacc->parent_realize(dev, errp); +} + +#ifndef CONFIG_USER_ONLY +static void loongarch_qemu_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) +{ + switch (addr) { + case FEATURE_REG: + return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | + 1ULL << IOCSRF_CSRIPI; + case VENDOR_REG: + return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ + case CPUNAME_REG: + return 0x303030354133ULL; /* "3A5000" */ + case MISC_FUNC_REG: + return 1ULL << IOCSRM_EXTIOI_EN; + } + return 0ULL; +} + +static const MemoryRegionOps loongarch_qemu_ops = { + .read = loongarch_qemu_read, + .write = loongarch_qemu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; +#endif + +static void loongarch_cpu_init(Object *obj) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu_set_cpustate_pointers(cpu); + +#ifndef CONFIG_USER_ONLY + CPULoongArchState *env = &cpu->env; + qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); + timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, + &loongarch_constant_timer_cb, cpu); + memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, + env, "iocsr", UINT64_MAX); + address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); + memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, + NULL, "iocsr_misc", 0x428); + memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); +#endif +} + +static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc; + + oc = object_class_by_name(cpu_model); + if (!oc) { + g_autofree char *typename + = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); + oc = object_class_by_name(typename); + if (!oc) { + return NULL; + } + } + + if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) + && !object_class_is_abstract(oc)) { + return oc; + } + return NULL; +} + +void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + int i; + + qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); + qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, + get_float_exception_flags(&env->fp_status)); + + /* gpr */ + for (i = 0; i < 32; i++) { + if ((i & 3) == 0) { + qemu_fprintf(f, " GPR%02d:", i); + } + qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); + if ((i & 3) == 3) { + qemu_fprintf(f, "\n"); + } + } + + qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); + qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); + qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); + qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); + qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); + qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); + qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); + qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); + qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," + " PRCFG3=%016" PRIx64 "\n", + env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); + qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); + qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); + qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); + + /* fpr */ + if (flags & CPU_DUMP_FPU) { + for (i = 0; i < 32; i++) { + qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i]); + if ((i & 3) == 3) { + qemu_fprintf(f, "\n"); + } + } + } +} + +#ifdef CONFIG_TCG +#include "hw/core/tcg-cpu-ops.h" + +static struct TCGCPUOps loongarch_tcg_ops = { + .initialize = loongarch_translate_init, + .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, + .restore_state_to_opc = loongarch_restore_state_to_opc, + +#ifndef CONFIG_USER_ONLY + .tlb_fill = loongarch_cpu_tlb_fill, + .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, + .do_interrupt = loongarch_cpu_do_interrupt, + .do_transaction_failed = loongarch_cpu_do_transaction_failed, +#endif +}; +#endif /* CONFIG_TCG */ + +#ifndef CONFIG_USER_ONLY +#include "hw/core/sysemu-cpu-ops.h" + +static const struct SysemuCPUOps loongarch_sysemu_ops = { + .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, +}; +#endif + +static gchar *loongarch_gdb_arch_name(CPUState *cs) +{ + return g_strdup("loongarch64"); +} + +static void loongarch_cpu_class_init(ObjectClass *c, void *data) +{ + LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + DeviceClass *dc = DEVICE_CLASS(c); + + device_class_set_parent_realize(dc, loongarch_cpu_realizefn, + &lacc->parent_realize); + device_class_set_parent_reset(dc, loongarch_cpu_reset, &lacc->parent_reset); + + cc->class_by_name = loongarch_cpu_class_by_name; + cc->has_work = loongarch_cpu_has_work; + cc->dump_state = loongarch_cpu_dump_state; + cc->set_pc = loongarch_cpu_set_pc; + cc->get_pc = loongarch_cpu_get_pc; +#ifndef CONFIG_USER_ONLY + dc->vmsd = &vmstate_loongarch_cpu; + cc->sysemu_ops = &loongarch_sysemu_ops; +#endif + cc->disas_set_info = loongarch_cpu_disas_set_info; + cc->gdb_read_register = loongarch_cpu_gdb_read_register; + cc->gdb_write_register = loongarch_cpu_gdb_write_register; + cc->disas_set_info = loongarch_cpu_disas_set_info; + cc->gdb_num_core_regs = 35; + cc->gdb_core_xml_file = "loongarch-base64.xml"; + cc->gdb_stop_before_watchpoint = true; + cc->gdb_arch_name = loongarch_gdb_arch_name; + +#ifdef CONFIG_TCG + cc->tcg_ops = &loongarch_tcg_ops; +#endif +} + +#define DEFINE_LOONGARCH_CPU_TYPE(model, initfn) \ + { \ + .parent = TYPE_LOONGARCH_CPU, \ + .instance_init = initfn, \ + .name = LOONGARCH_CPU_TYPE_NAME(model), \ + } + +static const TypeInfo loongarch_cpu_type_infos[] = { + { + .name = TYPE_LOONGARCH_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(LoongArchCPU), + .instance_init = loongarch_cpu_init, + + .abstract = true, + .class_size = sizeof(LoongArchCPUClass), + .class_init = loongarch_cpu_class_init, + }, + DEFINE_LOONGARCH_CPU_TYPE("la464", loongarch_la464_initfn), +}; + +DEFINE_TYPES(loongarch_cpu_type_infos) + +static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); + const char *typename = object_class_get_name(oc); + + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(TYPE_LOONGARCH_CPU, false); + g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h new file mode 100644 index 0000000000..e15c633b0b --- /dev/null +++ b/target/loongarch/cpu.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CPU + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_CPU_H +#define LOONGARCH_CPU_H + +#include "exec/cpu-defs.h" +#include "fpu/softfloat-types.h" +#include "hw/registerfields.h" +#include "qemu/timer.h" +#include "exec/memory.h" +#include "hw/sysbus.h" +#include "cpu-csr.h" + +#define IOCSRF_TEMP 0 +#define IOCSRF_NODECNT 1 +#define IOCSRF_MSI 2 +#define IOCSRF_EXTIOI 3 +#define IOCSRF_CSRIPI 4 +#define IOCSRF_FREQCSR 5 +#define IOCSRF_FREQSCALE 6 +#define IOCSRF_DVFSV1 7 +#define IOCSRF_GMOD 9 +#define IOCSRF_VM 11 + +#define FEATURE_REG 0x8 +#define VENDOR_REG 0x10 +#define CPUNAME_REG 0x20 +#define MISC_FUNC_REG 0x420 +#define IOCSRM_EXTIOI_EN 48 + +#define IOCSR_MEM_SIZE 0x428 + +#define TCG_GUEST_DEFAULT_MO (0) + +#define FCSR0_M1 0x1f /* FCSR1 mask, Enables */ +#define FCSR0_M2 0x1f1f0000 /* FCSR2 mask, Cause and Flags */ +#define FCSR0_M3 0x300 /* FCSR3 mask, Round Mode */ +#define FCSR0_RM 8 /* Round Mode bit num on fcsr0 */ + +FIELD(FCSR0, ENABLES, 0, 5) +FIELD(FCSR0, RM, 8, 2) +FIELD(FCSR0, FLAGS, 16, 5) +FIELD(FCSR0, CAUSE, 24, 5) + +#define GET_FP_CAUSE(REG) FIELD_EX32(REG, FCSR0, CAUSE) +#define SET_FP_CAUSE(REG, V) \ + do { \ + (REG) = FIELD_DP32(REG, FCSR0, CAUSE, V); \ + } while (0) + +#define GET_FP_ENABLES(REG) FIELD_EX32(REG, FCSR0, ENABLES) +#define SET_FP_ENABLES(REG, V) \ + do { \ + (REG) = FIELD_DP32(REG, FCSR0, ENABLES, V); \ + } while (0) + +#define GET_FP_FLAGS(REG) FIELD_EX32(REG, FCSR0, FLAGS) +#define SET_FP_FLAGS(REG, V) \ + do { \ + (REG) = FIELD_DP32(REG, FCSR0, FLAGS, V); \ + } while (0) + +#define UPDATE_FP_FLAGS(REG, V) \ + do { \ + (REG) |= FIELD_DP32(0, FCSR0, FLAGS, V); \ + } while (0) + +#define FP_INEXACT 1 +#define FP_UNDERFLOW 2 +#define FP_OVERFLOW 4 +#define FP_DIV0 8 +#define FP_INVALID 16 + +#define EXCODE(code, subcode) ( ((subcode) << 6) | (code) ) +#define EXCODE_MCODE(code) ( (code) & 0x3f ) +#define EXCODE_SUBCODE(code) ( (code) >> 6 ) + +#define EXCCODE_EXTERNAL_INT 64 /* plus external interrupt number */ +#define EXCCODE_INT EXCODE(0, 0) +#define EXCCODE_PIL EXCODE(1, 0) +#define EXCCODE_PIS EXCODE(2, 0) +#define EXCCODE_PIF EXCODE(3, 0) +#define EXCCODE_PME EXCODE(4, 0) +#define EXCCODE_PNR EXCODE(5, 0) +#define EXCCODE_PNX EXCODE(6, 0) +#define EXCCODE_PPI EXCODE(7, 0) +#define EXCCODE_ADEF EXCODE(8, 0) /* Different exception subcode */ +#define EXCCODE_ADEM EXCODE(8, 1) +#define EXCCODE_ALE EXCODE(9, 0) +#define EXCCODE_BCE EXCODE(10, 0) +#define EXCCODE_SYS EXCODE(11, 0) +#define EXCCODE_BRK EXCODE(12, 0) +#define EXCCODE_INE EXCODE(13, 0) +#define EXCCODE_IPE EXCODE(14, 0) +#define EXCCODE_FPD EXCODE(15, 0) +#define EXCCODE_SXD EXCODE(16, 0) +#define EXCCODE_ASXD EXCODE(17, 0) +#define EXCCODE_FPE EXCODE(18, 0) /* Different exception subcode */ +#define EXCCODE_VFPE EXCODE(18, 1) +#define EXCCODE_WPEF EXCODE(19, 0) /* Different exception subcode */ +#define EXCCODE_WPEM EXCODE(19, 1) +#define EXCCODE_BTD EXCODE(20, 0) +#define EXCCODE_BTE EXCODE(21, 0) +#define EXCCODE_DBP EXCODE(26, 0) /* Reserved subcode used for debug */ + +/* cpucfg[0] bits */ +FIELD(CPUCFG0, PRID, 0, 32) + +/* cpucfg[1] bits */ +FIELD(CPUCFG1, ARCH, 0, 2) +FIELD(CPUCFG1, PGMMU, 2, 1) +FIELD(CPUCFG1, IOCSR, 3, 1) +FIELD(CPUCFG1, PALEN, 4, 8) +FIELD(CPUCFG1, VALEN, 12, 8) +FIELD(CPUCFG1, UAL, 20, 1) +FIELD(CPUCFG1, RI, 21, 1) +FIELD(CPUCFG1, EP, 22, 1) +FIELD(CPUCFG1, RPLV, 23, 1) +FIELD(CPUCFG1, HP, 24, 1) +FIELD(CPUCFG1, IOCSR_BRD, 25, 1) +FIELD(CPUCFG1, MSG_INT, 26, 1) + +/* cpucfg[2] bits */ +FIELD(CPUCFG2, FP, 0, 1) +FIELD(CPUCFG2, FP_SP, 1, 1) +FIELD(CPUCFG2, FP_DP, 2, 1) +FIELD(CPUCFG2, FP_VER, 3, 3) +FIELD(CPUCFG2, LSX, 6, 1) +FIELD(CPUCFG2, LASX, 7, 1) +FIELD(CPUCFG2, COMPLEX, 8, 1) +FIELD(CPUCFG2, CRYPTO, 9, 1) +FIELD(CPUCFG2, LVZ, 10, 1) +FIELD(CPUCFG2, LVZ_VER, 11, 3) +FIELD(CPUCFG2, LLFTP, 14, 1) +FIELD(CPUCFG2, LLFTP_VER, 15, 3) +FIELD(CPUCFG2, LBT_X86, 18, 1) +FIELD(CPUCFG2, LBT_ARM, 19, 1) +FIELD(CPUCFG2, LBT_MIPS, 20, 1) +FIELD(CPUCFG2, LSPW, 21, 1) +FIELD(CPUCFG2, LAM, 22, 1) + +/* cpucfg[3] bits */ +FIELD(CPUCFG3, CCDMA, 0, 1) +FIELD(CPUCFG3, SFB, 1, 1) +FIELD(CPUCFG3, UCACC, 2, 1) +FIELD(CPUCFG3, LLEXC, 3, 1) +FIELD(CPUCFG3, SCDLY, 4, 1) +FIELD(CPUCFG3, LLDBAR, 5, 1) +FIELD(CPUCFG3, ITLBHMC, 6, 1) +FIELD(CPUCFG3, ICHMC, 7, 1) +FIELD(CPUCFG3, SPW_LVL, 8, 3) +FIELD(CPUCFG3, SPW_HP_HF, 11, 1) +FIELD(CPUCFG3, RVA, 12, 1) +FIELD(CPUCFG3, RVAMAX, 13, 4) + +/* cpucfg[4] bits */ +FIELD(CPUCFG4, CC_FREQ, 0, 32) + +/* cpucfg[5] bits */ +FIELD(CPUCFG5, CC_MUL, 0, 16) +FIELD(CPUCFG5, CC_DIV, 16, 16) + +/* cpucfg[6] bits */ +FIELD(CPUCFG6, PMP, 0, 1) +FIELD(CPUCFG6, PMVER, 1, 3) +FIELD(CPUCFG6, PMNUM, 4, 4) +FIELD(CPUCFG6, PMBITS, 8, 6) +FIELD(CPUCFG6, UPM, 14, 1) + +/* cpucfg[16] bits */ +FIELD(CPUCFG16, L1_IUPRE, 0, 1) +FIELD(CPUCFG16, L1_IUUNIFY, 1, 1) +FIELD(CPUCFG16, L1_DPRE, 2, 1) +FIELD(CPUCFG16, L2_IUPRE, 3, 1) +FIELD(CPUCFG16, L2_IUUNIFY, 4, 1) +FIELD(CPUCFG16, L2_IUPRIV, 5, 1) +FIELD(CPUCFG16, L2_IUINCL, 6, 1) +FIELD(CPUCFG16, L2_DPRE, 7, 1) +FIELD(CPUCFG16, L2_DPRIV, 8, 1) +FIELD(CPUCFG16, L2_DINCL, 9, 1) +FIELD(CPUCFG16, L3_IUPRE, 10, 1) +FIELD(CPUCFG16, L3_IUUNIFY, 11, 1) +FIELD(CPUCFG16, L3_IUPRIV, 12, 1) +FIELD(CPUCFG16, L3_IUINCL, 13, 1) +FIELD(CPUCFG16, L3_DPRE, 14, 1) +FIELD(CPUCFG16, L3_DPRIV, 15, 1) +FIELD(CPUCFG16, L3_DINCL, 16, 1) + +/* cpucfg[17] bits */ +FIELD(CPUCFG17, L1IU_WAYS, 0, 16) +FIELD(CPUCFG17, L1IU_SETS, 16, 8) +FIELD(CPUCFG17, L1IU_SIZE, 24, 7) + +/* cpucfg[18] bits */ +FIELD(CPUCFG18, L1D_WAYS, 0, 16) +FIELD(CPUCFG18, L1D_SETS, 16, 8) +FIELD(CPUCFG18, L1D_SIZE, 24, 7) + +/* cpucfg[19] bits */ +FIELD(CPUCFG19, L2IU_WAYS, 0, 16) +FIELD(CPUCFG19, L2IU_SETS, 16, 8) +FIELD(CPUCFG19, L2IU_SIZE, 24, 7) + +/* cpucfg[20] bits */ +FIELD(CPUCFG20, L3IU_WAYS, 0, 16) +FIELD(CPUCFG20, L3IU_SETS, 16, 8) +FIELD(CPUCFG20, L3IU_SIZE, 24, 7) + +/*CSR_CRMD */ +FIELD(CSR_CRMD, PLV, 0, 2) +FIELD(CSR_CRMD, IE, 2, 1) +FIELD(CSR_CRMD, DA, 3, 1) +FIELD(CSR_CRMD, PG, 4, 1) +FIELD(CSR_CRMD, DATF, 5, 2) +FIELD(CSR_CRMD, DATM, 7, 2) +FIELD(CSR_CRMD, WE, 9, 1) + +extern const char * const regnames[32]; +extern const char * const fregnames[32]; + +#define N_IRQS 13 +#define IRQ_TIMER 11 +#define IRQ_IPI 12 + +#define LOONGARCH_STLB 2048 /* 2048 STLB */ +#define LOONGARCH_MTLB 64 /* 64 MTLB */ +#define LOONGARCH_TLB_MAX (LOONGARCH_STLB + LOONGARCH_MTLB) + +/* + * define the ASID PS E VPPN field of TLB + */ +FIELD(TLB_MISC, E, 0, 1) +FIELD(TLB_MISC, ASID, 1, 10) +FIELD(TLB_MISC, VPPN, 13, 35) +FIELD(TLB_MISC, PS, 48, 6) + +struct LoongArchTLB { + uint64_t tlb_misc; + /* Fields corresponding to CSR_TLBELO0/1 */ + uint64_t tlb_entry0; + uint64_t tlb_entry1; +}; +typedef struct LoongArchTLB LoongArchTLB; + +typedef struct CPUArchState { + uint64_t gpr[32]; + uint64_t pc; + + uint64_t fpr[32]; + float_status fp_status; + bool cf[8]; + + uint32_t fcsr0; + uint32_t fcsr0_mask; + + uint32_t cpucfg[21]; + + uint64_t lladdr; /* LL virtual address compared against SC */ + uint64_t llval; + + /* LoongArch CSRs */ + uint64_t CSR_CRMD; + uint64_t CSR_PRMD; + uint64_t CSR_EUEN; + uint64_t CSR_MISC; + uint64_t CSR_ECFG; + uint64_t CSR_ESTAT; + uint64_t CSR_ERA; + uint64_t CSR_BADV; + uint64_t CSR_BADI; + uint64_t CSR_EENTRY; + uint64_t CSR_TLBIDX; + uint64_t CSR_TLBEHI; + uint64_t CSR_TLBELO0; + uint64_t CSR_TLBELO1; + uint64_t CSR_ASID; + uint64_t CSR_PGDL; + uint64_t CSR_PGDH; + uint64_t CSR_PGD; + uint64_t CSR_PWCL; + uint64_t CSR_PWCH; + uint64_t CSR_STLBPS; + uint64_t CSR_RVACFG; + uint64_t CSR_PRCFG1; + uint64_t CSR_PRCFG2; + uint64_t CSR_PRCFG3; + uint64_t CSR_SAVE[16]; + uint64_t CSR_TID; + uint64_t CSR_TCFG; + uint64_t CSR_TVAL; + uint64_t CSR_CNTC; + uint64_t CSR_TICLR; + uint64_t CSR_LLBCTL; + uint64_t CSR_IMPCTL1; + uint64_t CSR_IMPCTL2; + uint64_t CSR_TLBRENTRY; + uint64_t CSR_TLBRBADV; + uint64_t CSR_TLBRERA; + uint64_t CSR_TLBRSAVE; + uint64_t CSR_TLBRELO0; + uint64_t CSR_TLBRELO1; + uint64_t CSR_TLBREHI; + uint64_t CSR_TLBRPRMD; + uint64_t CSR_MERRCTL; + uint64_t CSR_MERRINFO1; + uint64_t CSR_MERRINFO2; + uint64_t CSR_MERRENTRY; + uint64_t CSR_MERRERA; + uint64_t CSR_MERRSAVE; + uint64_t CSR_CTAG; + uint64_t CSR_DMW[4]; + uint64_t CSR_DBG; + uint64_t CSR_DERA; + uint64_t CSR_DSAVE; + +#ifndef CONFIG_USER_ONLY + LoongArchTLB tlb[LOONGARCH_TLB_MAX]; + + AddressSpace address_space_iocsr; + MemoryRegion system_iocsr; + MemoryRegion iocsr_mem; + bool load_elf; + uint64_t elf_address; +#endif +} CPULoongArchState; + +/** + * LoongArchCPU: + * @env: #CPULoongArchState + * + * A LoongArch CPU. + */ +struct ArchCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPULoongArchState env; + QEMUTimer timer; + + /* 'compatible' string for this CPU for Linux device trees */ + const char *dtb_compatible; +}; + +#define TYPE_LOONGARCH_CPU "loongarch-cpu" + +OBJECT_DECLARE_CPU_TYPE(LoongArchCPU, LoongArchCPUClass, + LOONGARCH_CPU) + +/** + * LoongArchCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A LoongArch CPU model. + */ +struct LoongArchCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + DeviceReset parent_reset; +}; + +/* + * LoongArch CPUs has 4 privilege levels. + * 0 for kernel mode, 3 for user mode. + * Define an extra index for DA(direct addressing) mode. + */ +#define MMU_PLV_KERNEL 0 +#define MMU_PLV_USER 3 +#define MMU_IDX_KERNEL MMU_PLV_KERNEL +#define MMU_IDX_USER MMU_PLV_USER +#define MMU_IDX_DA 4 + +static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch) +{ +#ifdef CONFIG_USER_ONLY + return MMU_IDX_USER; +#else + if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { + return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); + } + return MMU_IDX_DA; +#endif +} + +/* + * LoongArch CPUs hardware flags. + */ +#define HW_FLAGS_PLV_MASK R_CSR_CRMD_PLV_MASK /* 0x03 */ +#define HW_FLAGS_CRMD_PG R_CSR_CRMD_PG_MASK /* 0x10 */ +#define HW_FLAGS_EUEN_FPE 0x04 + +static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, + target_ulong *pc, + target_ulong *cs_base, + uint32_t *flags) +{ + *pc = env->pc; + *cs_base = 0; + *flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); + *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; +} + +void loongarch_cpu_list(void); + +#define cpu_list loongarch_cpu_list + +#include "exec/cpu-all.h" + +#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU +#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU + +#endif /* LOONGARCH_CPU_H */ diff --git a/target/loongarch/csr_helper.c b/target/loongarch/csr_helper.c new file mode 100644 index 0000000000..7e02787895 --- /dev/null +++ b/target/loongarch/csr_helper.c @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch emulation helpers for CSRs + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "internals.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "hw/irq.h" +#include "cpu-csr.h" +#include "tcg/tcg-ldst.h" + +target_ulong helper_csrrd_pgd(CPULoongArchState *env) +{ + int64_t v; + + if (env->CSR_TLBRERA & 0x1) { + v = env->CSR_TLBRBADV; + } else { + v = env->CSR_BADV; + } + + if ((v >> 63) & 0x1) { + v = env->CSR_PGDH; + } else { + v = env->CSR_PGDL; + } + + return v; +} + +target_ulong helper_csrrd_tval(CPULoongArchState *env) +{ + LoongArchCPU *cpu = env_archcpu(env); + + return cpu_loongarch_get_constant_timer_ticks(cpu); +} + +target_ulong helper_csrwr_estat(CPULoongArchState *env, target_ulong val) +{ + int64_t old_v = env->CSR_ESTAT; + + /* Only IS[1:0] can be written */ + env->CSR_ESTAT = deposit64(env->CSR_ESTAT, 0, 2, val); + + return old_v; +} + +target_ulong helper_csrwr_asid(CPULoongArchState *env, target_ulong val) +{ + int64_t old_v = env->CSR_ASID; + + /* Only ASID filed of CSR_ASID can be written */ + env->CSR_ASID = deposit64(env->CSR_ASID, 0, 10, val); + if (old_v != env->CSR_ASID) { + tlb_flush(env_cpu(env)); + } + return old_v; +} + +target_ulong helper_csrwr_tcfg(CPULoongArchState *env, target_ulong val) +{ + LoongArchCPU *cpu = env_archcpu(env); + int64_t old_v = env->CSR_TCFG; + + cpu_loongarch_store_constant_timer_config(cpu, val); + + return old_v; +} + +target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val) +{ + LoongArchCPU *cpu = env_archcpu(env); + int64_t old_v = 0; + + if (val & 0x1) { + qemu_mutex_lock_iothread(); + loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0); + qemu_mutex_unlock_iothread(); + } + return old_v; +} diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c new file mode 100644 index 0000000000..858dfcc53a --- /dev/null +++ b/target/loongarch/disas.c @@ -0,0 +1,757 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch Disassembler + * + * Copyright (c) 2021 Loongson Technology Corporation Limited. + */ + +#include "qemu/osdep.h" +#include "disas/dis-asm.h" +#include "qemu/bitops.h" +#include "cpu-csr.h" + +typedef struct { + disassemble_info *info; + uint64_t pc; + uint32_t insn; +} DisasContext; + +static inline int plus_1(DisasContext *ctx, int x) +{ + return x + 1; +} + +static inline int shl_2(DisasContext *ctx, int x) +{ + return x << 2; +} + +#define CSR_NAME(REG) \ + [LOONGARCH_CSR_##REG] = (#REG) + +static const char * const csr_names[] = { + CSR_NAME(CRMD), + CSR_NAME(PRMD), + CSR_NAME(EUEN), + CSR_NAME(MISC), + CSR_NAME(ECFG), + CSR_NAME(ESTAT), + CSR_NAME(ERA), + CSR_NAME(BADV), + CSR_NAME(BADI), + CSR_NAME(EENTRY), + CSR_NAME(TLBIDX), + CSR_NAME(TLBEHI), + CSR_NAME(TLBELO0), + CSR_NAME(TLBELO1), + CSR_NAME(ASID), + CSR_NAME(PGDL), + CSR_NAME(PGDH), + CSR_NAME(PGD), + CSR_NAME(PWCL), + CSR_NAME(PWCH), + CSR_NAME(STLBPS), + CSR_NAME(RVACFG), + CSR_NAME(CPUID), + CSR_NAME(PRCFG1), + CSR_NAME(PRCFG2), + CSR_NAME(PRCFG3), + CSR_NAME(SAVE(0)), + CSR_NAME(SAVE(1)), + CSR_NAME(SAVE(2)), + CSR_NAME(SAVE(3)), + CSR_NAME(SAVE(4)), + CSR_NAME(SAVE(5)), + CSR_NAME(SAVE(6)), + CSR_NAME(SAVE(7)), + CSR_NAME(SAVE(8)), + CSR_NAME(SAVE(9)), + CSR_NAME(SAVE(10)), + CSR_NAME(SAVE(11)), + CSR_NAME(SAVE(12)), + CSR_NAME(SAVE(13)), + CSR_NAME(SAVE(14)), + CSR_NAME(SAVE(15)), + CSR_NAME(TID), + CSR_NAME(TCFG), + CSR_NAME(TVAL), + CSR_NAME(CNTC), + CSR_NAME(TICLR), + CSR_NAME(LLBCTL), + CSR_NAME(IMPCTL1), + CSR_NAME(IMPCTL2), + CSR_NAME(TLBRENTRY), + CSR_NAME(TLBRBADV), + CSR_NAME(TLBRERA), + CSR_NAME(TLBRSAVE), + CSR_NAME(TLBRELO0), + CSR_NAME(TLBRELO1), + CSR_NAME(TLBREHI), + CSR_NAME(TLBRPRMD), + CSR_NAME(MERRCTL), + CSR_NAME(MERRINFO1), + CSR_NAME(MERRINFO2), + CSR_NAME(MERRENTRY), + CSR_NAME(MERRERA), + CSR_NAME(MERRSAVE), + CSR_NAME(CTAG), + CSR_NAME(DMW(0)), + CSR_NAME(DMW(1)), + CSR_NAME(DMW(2)), + CSR_NAME(DMW(3)), + CSR_NAME(DBG), + CSR_NAME(DERA), + CSR_NAME(DSAVE), +}; + +static const char *get_csr_name(unsigned num) +{ + return ((num < ARRAY_SIZE(csr_names)) && (csr_names[num] != NULL)) ? + csr_names[num] : "Undefined CSR"; +} + +#define output(C, INSN, FMT, ...) \ +{ \ + (C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT, \ + (C)->insn, INSN, ##__VA_ARGS__); \ +} + +#include "decode-insns.c.inc" + +int print_insn_loongarch(bfd_vma memaddr, struct disassemble_info *info) +{ + bfd_byte buffer[4]; + uint32_t insn; + int status; + + status = (*info->read_memory_func)(memaddr, buffer, 4, info); + if (status != 0) { + (*info->memory_error_func)(status, memaddr, info); + return -1; + } + insn = bfd_getl32(buffer); + DisasContext ctx = { + .info = info, + .pc = memaddr, + .insn = insn + }; + + if (!decode(&ctx, insn)) { + output(&ctx, "illegal", ""); + } + return 4; +} + +static void output_r_i(DisasContext *ctx, arg_r_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, %d", a->rd, a->imm); +} + +static void output_rrr(DisasContext *ctx, arg_rrr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, r%d", a->rd, a->rj, a->rk); +} + +static void output_rr_i(DisasContext *ctx, arg_rr_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, %d", a->rd, a->rj, a->imm); +} + +static void output_rrr_sa(DisasContext *ctx, arg_rrr_sa *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, r%d, %d", a->rd, a->rj, a->rk, a->sa); +} + +static void output_rr(DisasContext *ctx, arg_rr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d", a->rd, a->rj); +} + +static void output_rr_ms_ls(DisasContext *ctx, arg_rr_ms_ls *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, %d, %d", a->rd, a->rj, a->ms, a->ls); +} + +static void output_hint_r_i(DisasContext *ctx, arg_hint_r_i *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "%d, r%d, %d", a->hint, a->rj, a->imm); +} + +static void output_i(DisasContext *ctx, arg_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "%d", a->imm); +} + +static void output_rr_jk(DisasContext *ctx, arg_rr_jk *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d", a->rj, a->rk); +} + +static void output_ff(DisasContext *ctx, arg_ff *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, f%d", a->fd, a->fj); +} + +static void output_fff(DisasContext *ctx, arg_fff *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, f%d, f%d", a->fd, a->fj, a->fk); +} + +static void output_ffff(DisasContext *ctx, arg_ffff *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, f%d, f%d, f%d", a->fd, a->fj, a->fk, a->fa); +} + +static void output_fffc(DisasContext *ctx, arg_fffc *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, f%d, f%d, %d", a->fd, a->fj, a->fk, a->ca); +} + +static void output_fr(DisasContext *ctx, arg_fr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, r%d", a->fd, a->rj); +} + +static void output_rf(DisasContext *ctx, arg_rf *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, f%d", a->rd, a->fj); +} + +static void output_fcsrd_r(DisasContext *ctx, arg_fcsrd_r *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "fcsr%d, r%d", a->fcsrd, a->rj); +} + +static void output_r_fcsrs(DisasContext *ctx, arg_r_fcsrs *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, fcsr%d", a->rd, a->fcsrs); +} + +static void output_cf(DisasContext *ctx, arg_cf *a, const char *mnemonic) +{ + output(ctx, mnemonic, "fcc%d, f%d", a->cd, a->fj); +} + +static void output_fc(DisasContext *ctx, arg_fc *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, fcc%d", a->fd, a->cj); +} + +static void output_cr(DisasContext *ctx, arg_cr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "fcc%d, r%d", a->cd, a->rj); +} + +static void output_rc(DisasContext *ctx, arg_rc *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, fcc%d", a->rd, a->cj); +} + +static void output_frr(DisasContext *ctx, arg_frr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, r%d, r%d", a->fd, a->rj, a->rk); +} + +static void output_fr_i(DisasContext *ctx, arg_fr_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "f%d, r%d, %d", a->fd, a->rj, a->imm); +} + +static void output_r_offs(DisasContext *ctx, arg_r_offs *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, %d # 0x%" PRIx64, a->rj, a->offs, + ctx->pc + a->offs); +} + +static void output_c_offs(DisasContext *ctx, arg_c_offs *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "fcc%d, %d # 0x%" PRIx64, a->cj, a->offs, + ctx->pc + a->offs); +} + +static void output_offs(DisasContext *ctx, arg_offs *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "%d # 0x%" PRIx64, a->offs, ctx->pc + a->offs); +} + +static void output_rr_offs(DisasContext *ctx, arg_rr_offs *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, %d # 0x%" PRIx64, a->rj, + a->rd, a->offs, ctx->pc + a->offs); +} + +static void output_r_csr(DisasContext *ctx, arg_r_csr *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, %d # %s", a->rd, a->csr, get_csr_name(a->csr)); +} + +static void output_rr_csr(DisasContext *ctx, arg_rr_csr *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, r%d, %d # %s", + a->rd, a->rj, a->csr, get_csr_name(a->csr)); +} + +static void output_empty(DisasContext *ctx, arg_empty *a, + const char *mnemonic) +{ + output(ctx, mnemonic, ""); +} + +static void output_i_rr(DisasContext *ctx, arg_i_rr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "%d, r%d, r%d", a->imm, a->rj, a->rk); +} + +static void output_cop_r_i(DisasContext *ctx, arg_cop_r_i *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "%d, r%d, %d", a->cop, a->rj, a->imm); +} + +static void output_j_i(DisasContext *ctx, arg_j_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, %d", a->rj, a->imm); +} + +#define INSN(insn, type) \ +static bool trans_##insn(DisasContext *ctx, arg_##type * a) \ +{ \ + output_##type(ctx, a, #insn); \ + return true; \ +} + +INSN(clo_w, rr) +INSN(clz_w, rr) +INSN(cto_w, rr) +INSN(ctz_w, rr) +INSN(clo_d, rr) +INSN(clz_d, rr) +INSN(cto_d, rr) +INSN(ctz_d, rr) +INSN(revb_2h, rr) +INSN(revb_4h, rr) +INSN(revb_2w, rr) +INSN(revb_d, rr) +INSN(revh_2w, rr) +INSN(revh_d, rr) +INSN(bitrev_4b, rr) +INSN(bitrev_8b, rr) +INSN(bitrev_w, rr) +INSN(bitrev_d, rr) +INSN(ext_w_h, rr) +INSN(ext_w_b, rr) +INSN(rdtimel_w, rr) +INSN(rdtimeh_w, rr) +INSN(rdtime_d, rr) +INSN(cpucfg, rr) +INSN(asrtle_d, rr_jk) +INSN(asrtgt_d, rr_jk) +INSN(alsl_w, rrr_sa) +INSN(alsl_wu, rrr_sa) +INSN(bytepick_w, rrr_sa) +INSN(bytepick_d, rrr_sa) +INSN(add_w, rrr) +INSN(add_d, rrr) +INSN(sub_w, rrr) +INSN(sub_d, rrr) +INSN(slt, rrr) +INSN(sltu, rrr) +INSN(maskeqz, rrr) +INSN(masknez, rrr) +INSN(nor, rrr) +INSN(and, rrr) +INSN(or, rrr) +INSN(xor, rrr) +INSN(orn, rrr) +INSN(andn, rrr) +INSN(sll_w, rrr) +INSN(srl_w, rrr) +INSN(sra_w, rrr) +INSN(sll_d, rrr) +INSN(srl_d, rrr) +INSN(sra_d, rrr) +INSN(rotr_w, rrr) +INSN(rotr_d, rrr) +INSN(mul_w, rrr) +INSN(mulh_w, rrr) +INSN(mulh_wu, rrr) +INSN(mul_d, rrr) +INSN(mulh_d, rrr) +INSN(mulh_du, rrr) +INSN(mulw_d_w, rrr) +INSN(mulw_d_wu, rrr) +INSN(div_w, rrr) +INSN(mod_w, rrr) +INSN(div_wu, rrr) +INSN(mod_wu, rrr) +INSN(div_d, rrr) +INSN(mod_d, rrr) +INSN(div_du, rrr) +INSN(mod_du, rrr) +INSN(crc_w_b_w, rrr) +INSN(crc_w_h_w, rrr) +INSN(crc_w_w_w, rrr) +INSN(crc_w_d_w, rrr) +INSN(crcc_w_b_w, rrr) +INSN(crcc_w_h_w, rrr) +INSN(crcc_w_w_w, rrr) +INSN(crcc_w_d_w, rrr) +INSN(break, i) +INSN(syscall, i) +INSN(alsl_d, rrr_sa) +INSN(slli_w, rr_i) +INSN(slli_d, rr_i) +INSN(srli_w, rr_i) +INSN(srli_d, rr_i) +INSN(srai_w, rr_i) +INSN(srai_d, rr_i) +INSN(rotri_w, rr_i) +INSN(rotri_d, rr_i) +INSN(bstrins_w, rr_ms_ls) +INSN(bstrpick_w, rr_ms_ls) +INSN(bstrins_d, rr_ms_ls) +INSN(bstrpick_d, rr_ms_ls) +INSN(fadd_s, fff) +INSN(fadd_d, fff) +INSN(fsub_s, fff) +INSN(fsub_d, fff) +INSN(fmul_s, fff) +INSN(fmul_d, fff) +INSN(fdiv_s, fff) +INSN(fdiv_d, fff) +INSN(fmax_s, fff) +INSN(fmax_d, fff) +INSN(fmin_s, fff) +INSN(fmin_d, fff) +INSN(fmaxa_s, fff) +INSN(fmaxa_d, fff) +INSN(fmina_s, fff) +INSN(fmina_d, fff) +INSN(fscaleb_s, fff) +INSN(fscaleb_d, fff) +INSN(fcopysign_s, fff) +INSN(fcopysign_d, fff) +INSN(fabs_s, ff) +INSN(fabs_d, ff) +INSN(fneg_s, ff) +INSN(fneg_d, ff) +INSN(flogb_s, ff) +INSN(flogb_d, ff) +INSN(fclass_s, ff) +INSN(fclass_d, ff) +INSN(fsqrt_s, ff) +INSN(fsqrt_d, ff) +INSN(frecip_s, ff) +INSN(frecip_d, ff) +INSN(frsqrt_s, ff) +INSN(frsqrt_d, ff) +INSN(fmov_s, ff) +INSN(fmov_d, ff) +INSN(movgr2fr_w, fr) +INSN(movgr2fr_d, fr) +INSN(movgr2frh_w, fr) +INSN(movfr2gr_s, rf) +INSN(movfr2gr_d, rf) +INSN(movfrh2gr_s, rf) +INSN(movgr2fcsr, fcsrd_r) +INSN(movfcsr2gr, r_fcsrs) +INSN(movfr2cf, cf) +INSN(movcf2fr, fc) +INSN(movgr2cf, cr) +INSN(movcf2gr, rc) +INSN(fcvt_s_d, ff) +INSN(fcvt_d_s, ff) +INSN(ftintrm_w_s, ff) +INSN(ftintrm_w_d, ff) +INSN(ftintrm_l_s, ff) +INSN(ftintrm_l_d, ff) +INSN(ftintrp_w_s, ff) +INSN(ftintrp_w_d, ff) +INSN(ftintrp_l_s, ff) +INSN(ftintrp_l_d, ff) +INSN(ftintrz_w_s, ff) +INSN(ftintrz_w_d, ff) +INSN(ftintrz_l_s, ff) +INSN(ftintrz_l_d, ff) +INSN(ftintrne_w_s, ff) +INSN(ftintrne_w_d, ff) +INSN(ftintrne_l_s, ff) +INSN(ftintrne_l_d, ff) +INSN(ftint_w_s, ff) +INSN(ftint_w_d, ff) +INSN(ftint_l_s, ff) +INSN(ftint_l_d, ff) +INSN(ffint_s_w, ff) +INSN(ffint_s_l, ff) +INSN(ffint_d_w, ff) +INSN(ffint_d_l, ff) +INSN(frint_s, ff) +INSN(frint_d, ff) +INSN(slti, rr_i) +INSN(sltui, rr_i) +INSN(addi_w, rr_i) +INSN(addi_d, rr_i) +INSN(lu52i_d, rr_i) +INSN(andi, rr_i) +INSN(ori, rr_i) +INSN(xori, rr_i) +INSN(fmadd_s, ffff) +INSN(fmadd_d, ffff) +INSN(fmsub_s, ffff) +INSN(fmsub_d, ffff) +INSN(fnmadd_s, ffff) +INSN(fnmadd_d, ffff) +INSN(fnmsub_s, ffff) +INSN(fnmsub_d, ffff) +INSN(fsel, fffc) +INSN(addu16i_d, rr_i) +INSN(lu12i_w, r_i) +INSN(lu32i_d, r_i) +INSN(pcaddi, r_i) +INSN(pcalau12i, r_i) +INSN(pcaddu12i, r_i) +INSN(pcaddu18i, r_i) +INSN(ll_w, rr_i) +INSN(sc_w, rr_i) +INSN(ll_d, rr_i) +INSN(sc_d, rr_i) +INSN(ldptr_w, rr_i) +INSN(stptr_w, rr_i) +INSN(ldptr_d, rr_i) +INSN(stptr_d, rr_i) +INSN(ld_b, rr_i) +INSN(ld_h, rr_i) +INSN(ld_w, rr_i) +INSN(ld_d, rr_i) +INSN(st_b, rr_i) +INSN(st_h, rr_i) +INSN(st_w, rr_i) +INSN(st_d, rr_i) +INSN(ld_bu, rr_i) +INSN(ld_hu, rr_i) +INSN(ld_wu, rr_i) +INSN(preld, hint_r_i) +INSN(fld_s, fr_i) +INSN(fst_s, fr_i) +INSN(fld_d, fr_i) +INSN(fst_d, fr_i) +INSN(ldx_b, rrr) +INSN(ldx_h, rrr) +INSN(ldx_w, rrr) +INSN(ldx_d, rrr) +INSN(stx_b, rrr) +INSN(stx_h, rrr) +INSN(stx_w, rrr) +INSN(stx_d, rrr) +INSN(ldx_bu, rrr) +INSN(ldx_hu, rrr) +INSN(ldx_wu, rrr) +INSN(fldx_s, frr) +INSN(fldx_d, frr) +INSN(fstx_s, frr) +INSN(fstx_d, frr) +INSN(amswap_w, rrr) +INSN(amswap_d, rrr) +INSN(amadd_w, rrr) +INSN(amadd_d, rrr) +INSN(amand_w, rrr) +INSN(amand_d, rrr) +INSN(amor_w, rrr) +INSN(amor_d, rrr) +INSN(amxor_w, rrr) +INSN(amxor_d, rrr) +INSN(ammax_w, rrr) +INSN(ammax_d, rrr) +INSN(ammin_w, rrr) +INSN(ammin_d, rrr) +INSN(ammax_wu, rrr) +INSN(ammax_du, rrr) +INSN(ammin_wu, rrr) +INSN(ammin_du, rrr) +INSN(amswap_db_w, rrr) +INSN(amswap_db_d, rrr) +INSN(amadd_db_w, rrr) +INSN(amadd_db_d, rrr) +INSN(amand_db_w, rrr) +INSN(amand_db_d, rrr) +INSN(amor_db_w, rrr) +INSN(amor_db_d, rrr) +INSN(amxor_db_w, rrr) +INSN(amxor_db_d, rrr) +INSN(ammax_db_w, rrr) +INSN(ammax_db_d, rrr) +INSN(ammin_db_w, rrr) +INSN(ammin_db_d, rrr) +INSN(ammax_db_wu, rrr) +INSN(ammax_db_du, rrr) +INSN(ammin_db_wu, rrr) +INSN(ammin_db_du, rrr) +INSN(dbar, i) +INSN(ibar, i) +INSN(fldgt_s, frr) +INSN(fldgt_d, frr) +INSN(fldle_s, frr) +INSN(fldle_d, frr) +INSN(fstgt_s, frr) +INSN(fstgt_d, frr) +INSN(fstle_s, frr) +INSN(fstle_d, frr) +INSN(ldgt_b, rrr) +INSN(ldgt_h, rrr) +INSN(ldgt_w, rrr) +INSN(ldgt_d, rrr) +INSN(ldle_b, rrr) +INSN(ldle_h, rrr) +INSN(ldle_w, rrr) +INSN(ldle_d, rrr) +INSN(stgt_b, rrr) +INSN(stgt_h, rrr) +INSN(stgt_w, rrr) +INSN(stgt_d, rrr) +INSN(stle_b, rrr) +INSN(stle_h, rrr) +INSN(stle_w, rrr) +INSN(stle_d, rrr) +INSN(beqz, r_offs) +INSN(bnez, r_offs) +INSN(bceqz, c_offs) +INSN(bcnez, c_offs) +INSN(jirl, rr_offs) +INSN(b, offs) +INSN(bl, offs) +INSN(beq, rr_offs) +INSN(bne, rr_offs) +INSN(blt, rr_offs) +INSN(bge, rr_offs) +INSN(bltu, rr_offs) +INSN(bgeu, rr_offs) +INSN(csrrd, r_csr) +INSN(csrwr, r_csr) +INSN(csrxchg, rr_csr) +INSN(iocsrrd_b, rr) +INSN(iocsrrd_h, rr) +INSN(iocsrrd_w, rr) +INSN(iocsrrd_d, rr) +INSN(iocsrwr_b, rr) +INSN(iocsrwr_h, rr) +INSN(iocsrwr_w, rr) +INSN(iocsrwr_d, rr) +INSN(tlbsrch, empty) +INSN(tlbrd, empty) +INSN(tlbwr, empty) +INSN(tlbfill, empty) +INSN(tlbclr, empty) +INSN(tlbflush, empty) +INSN(invtlb, i_rr) +INSN(cacop, cop_r_i) +INSN(lddir, rr_i) +INSN(ldpte, j_i) +INSN(ertn, empty) +INSN(idle, i) +INSN(dbcl, i) + +#define output_fcmp(C, PREFIX, SUFFIX) \ +{ \ + (C)->info->fprintf_func((C)->info->stream, "%08x %s%s\tfcc%d, f%d, f%d", \ + (C)->insn, PREFIX, SUFFIX, a->cd, \ + a->fj, a->fk); \ +} + +static bool output_cff_fcond(DisasContext *ctx, arg_cff_fcond * a, + const char *suffix) +{ + bool ret = true; + switch (a->fcond) { + case 0x0: + output_fcmp(ctx, "fcmp_caf_", suffix); + break; + case 0x1: + output_fcmp(ctx, "fcmp_saf_", suffix); + break; + case 0x2: + output_fcmp(ctx, "fcmp_clt_", suffix); + break; + case 0x3: + output_fcmp(ctx, "fcmp_slt_", suffix); + break; + case 0x4: + output_fcmp(ctx, "fcmp_ceq_", suffix); + break; + case 0x5: + output_fcmp(ctx, "fcmp_seq_", suffix); + break; + case 0x6: + output_fcmp(ctx, "fcmp_cle_", suffix); + break; + case 0x7: + output_fcmp(ctx, "fcmp_sle_", suffix); + break; + case 0x8: + output_fcmp(ctx, "fcmp_cun_", suffix); + break; + case 0x9: + output_fcmp(ctx, "fcmp_sun_", suffix); + break; + case 0xA: + output_fcmp(ctx, "fcmp_cult_", suffix); + break; + case 0xB: + output_fcmp(ctx, "fcmp_sult_", suffix); + break; + case 0xC: + output_fcmp(ctx, "fcmp_cueq_", suffix); + break; + case 0xD: + output_fcmp(ctx, "fcmp_sueq_", suffix); + break; + case 0xE: + output_fcmp(ctx, "fcmp_cule_", suffix); + break; + case 0xF: + output_fcmp(ctx, "fcmp_sule_", suffix); + break; + case 0x10: + output_fcmp(ctx, "fcmp_cne_", suffix); + break; + case 0x11: + output_fcmp(ctx, "fcmp_sne_", suffix); + break; + case 0x14: + output_fcmp(ctx, "fcmp_cor_", suffix); + break; + case 0x15: + output_fcmp(ctx, "fcmp_sor_", suffix); + break; + case 0x18: + output_fcmp(ctx, "fcmp_cune_", suffix); + break; + case 0x19: + output_fcmp(ctx, "fcmp_sune_", suffix); + break; + default: + ret = false; + } + return ret; +} + +#define FCMP_INSN(suffix) \ +static bool trans_fcmp_cond_##suffix(DisasContext *ctx, \ + arg_cff_fcond * a) \ +{ \ + return output_cff_fcond(ctx, a, #suffix); \ +} + +FCMP_INSN(s) +FCMP_INSN(d) diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/fpu_helper.c new file mode 100644 index 0000000000..4b9637210a --- /dev/null +++ b/target/loongarch/fpu_helper.c @@ -0,0 +1,879 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch float point emulation helpers for QEMU + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "fpu/softfloat.h" +#include "internals.h" + +static inline uint64_t nanbox_s(float32 fp) +{ + return fp | MAKE_64BIT_MASK(32, 32); +} + +/* Convert loongarch rounding mode in fcsr0 to IEEE library */ +static const FloatRoundMode ieee_rm[4] = { + float_round_nearest_even, + float_round_to_zero, + float_round_up, + float_round_down +}; + +void restore_fp_status(CPULoongArchState *env) +{ + set_float_rounding_mode(ieee_rm[(env->fcsr0 >> FCSR0_RM) & 0x3], + &env->fp_status); + set_flush_to_zero(0, &env->fp_status); +} + +static int ieee_ex_to_loongarch(int xcpt) +{ + int ret = 0; + if (xcpt & float_flag_invalid) { + ret |= FP_INVALID; + } + if (xcpt & float_flag_overflow) { + ret |= FP_OVERFLOW; + } + if (xcpt & float_flag_underflow) { + ret |= FP_UNDERFLOW; + } + if (xcpt & float_flag_divbyzero) { + ret |= FP_DIV0; + } + if (xcpt & float_flag_inexact) { + ret |= FP_INEXACT; + } + return ret; +} + +static void update_fcsr0_mask(CPULoongArchState *env, uintptr_t pc, int mask) +{ + int flags = get_float_exception_flags(&env->fp_status); + + set_float_exception_flags(0, &env->fp_status); + + flags &= ~mask; + + if (!flags) { + SET_FP_CAUSE(env->fcsr0, flags); + return; + } else { + flags = ieee_ex_to_loongarch(flags); + SET_FP_CAUSE(env->fcsr0, flags); + } + + if (GET_FP_ENABLES(env->fcsr0) & flags) { + do_raise_exception(env, EXCCODE_FPE, pc); + } else { + UPDATE_FP_FLAGS(env->fcsr0, flags); + } +} + +static void update_fcsr0(CPULoongArchState *env, uintptr_t pc) +{ + update_fcsr0_mask(env, pc, 0); +} + +uint64_t helper_fadd_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_add((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fadd_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_add(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fsub_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_sub((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fsub_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_sub(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmul_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_mul((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmul_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_mul(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fdiv_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_div((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fdiv_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_div(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmax_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_maxnum((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmax_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_maxnum(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmin_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_minnum((uint32_t)fj, (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmin_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_minnum(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmaxa_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_maxnummag((uint32_t)fj, + (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmaxa_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_maxnummag(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmina_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = nanbox_s(float32_minnummag((uint32_t)fj, + (uint32_t)fk, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmina_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + + fd = float64_minnummag(fj, fk, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fscaleb_s(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + int32_t n = (int32_t)fk; + + fd = nanbox_s(float32_scalbn((uint32_t)fj, + n > 0x200 ? 0x200 : + n < -0x200 ? -0x200 : n, + &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fscaleb_d(CPULoongArchState *env, uint64_t fj, uint64_t fk) +{ + uint64_t fd; + int64_t n = (int64_t)fk; + + fd = float64_scalbn(fj, + n > 0x1000 ? 0x1000 : + n < -0x1000 ? -0x1000 : n, + &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fsqrt_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = nanbox_s(float32_sqrt((uint32_t)fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fsqrt_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float64_sqrt(fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frecip_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = nanbox_s(float32_div(float32_one, (uint32_t)fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frecip_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float64_div(float64_one, fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frsqrt_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + uint32_t fp; + + fp = float32_sqrt((uint32_t)fj, &env->fp_status); + fd = nanbox_s(float32_div(float32_one, fp, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frsqrt_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fp, fd; + + fp = float64_sqrt(fj, &env->fp_status); + fd = float64_div(float64_one, fp, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_flogb_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + uint32_t fp; + float_status *status = &env->fp_status; + FloatRoundMode old_mode = get_float_rounding_mode(status); + + set_float_rounding_mode(float_round_down, status); + fp = float32_log2((uint32_t)fj, status); + fd = nanbox_s(float32_round_to_int(fp, status)); + set_float_rounding_mode(old_mode, status); + update_fcsr0_mask(env, GETPC(), float_flag_inexact); + return fd; +} + +uint64_t helper_flogb_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + float_status *status = &env->fp_status; + FloatRoundMode old_mode = get_float_rounding_mode(status); + + set_float_rounding_mode(float_round_down, status); + fd = float64_log2(fj, status); + fd = float64_round_to_int(fd, status); + set_float_rounding_mode(old_mode, status); + update_fcsr0_mask(env, GETPC(), float_flag_inexact); + return fd; +} + +uint64_t helper_fclass_s(CPULoongArchState *env, uint64_t fj) +{ + float32 f = fj; + bool sign = float32_is_neg(f); + + if (float32_is_infinity(f)) { + return sign ? 1 << 2 : 1 << 6; + } else if (float32_is_zero(f)) { + return sign ? 1 << 5 : 1 << 9; + } else if (float32_is_zero_or_denormal(f)) { + return sign ? 1 << 4 : 1 << 8; + } else if (float32_is_any_nan(f)) { + float_status s = { }; /* for snan_bit_is_one */ + return float32_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0; + } else { + return sign ? 1 << 3 : 1 << 7; + } +} + +uint64_t helper_fclass_d(CPULoongArchState *env, uint64_t fj) +{ + float64 f = fj; + bool sign = float64_is_neg(f); + + if (float64_is_infinity(f)) { + return sign ? 1 << 2 : 1 << 6; + } else if (float64_is_zero(f)) { + return sign ? 1 << 5 : 1 << 9; + } else if (float64_is_zero_or_denormal(f)) { + return sign ? 1 << 4 : 1 << 8; + } else if (float64_is_any_nan(f)) { + float_status s = { }; /* for snan_bit_is_one */ + return float64_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0; + } else { + return sign ? 1 << 3 : 1 << 7; + } +} + +uint64_t helper_fmuladd_s(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint64_t fa, uint32_t flag) +{ + uint64_t fd; + + fd = nanbox_s(float32_muladd((uint32_t)fj, (uint32_t)fk, + (uint32_t)fa, flag, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fmuladd_d(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint64_t fa, uint32_t flag) +{ + uint64_t fd; + + fd = float64_muladd(fj, fk, fa, flag, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +static uint64_t fcmp_common(CPULoongArchState *env, FloatRelation cmp, + uint32_t flags) +{ + bool ret; + + switch (cmp) { + case float_relation_less: + ret = (flags & FCMP_LT); + break; + case float_relation_equal: + ret = (flags & FCMP_EQ); + break; + case float_relation_greater: + ret = (flags & FCMP_GT); + break; + case float_relation_unordered: + ret = (flags & FCMP_UN); + break; + default: + g_assert_not_reached(); + } + update_fcsr0(env, GETPC()); + + return ret; +} + +/* fcmp_cXXX_s */ +uint64_t helper_fcmp_c_s(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint32_t flags) +{ + FloatRelation cmp = float32_compare_quiet((uint32_t)fj, + (uint32_t)fk, &env->fp_status); + return fcmp_common(env, cmp, flags); +} + +/* fcmp_sXXX_s */ +uint64_t helper_fcmp_s_s(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint32_t flags) +{ + FloatRelation cmp = float32_compare((uint32_t)fj, + (uint32_t)fk, &env->fp_status); + return fcmp_common(env, cmp, flags); +} + +/* fcmp_cXXX_d */ +uint64_t helper_fcmp_c_d(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint32_t flags) +{ + FloatRelation cmp = float64_compare_quiet(fj, fk, &env->fp_status); + return fcmp_common(env, cmp, flags); +} + +/* fcmp_sXXX_d */ +uint64_t helper_fcmp_s_d(CPULoongArchState *env, uint64_t fj, + uint64_t fk, uint32_t flags) +{ + FloatRelation cmp = float64_compare(fj, fk, &env->fp_status); + return fcmp_common(env, cmp, flags); +} + +/* floating point conversion */ +uint64_t helper_fcvt_s_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = nanbox_s(float64_to_float32(fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_fcvt_d_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float32_to_float64((uint32_t)fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ffint_s_w(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = nanbox_s(int32_to_float32((int32_t)fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ffint_s_l(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = nanbox_s(int64_to_float32(fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ffint_d_w(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = int32_to_float64((int32_t)fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ffint_d_l(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = int64_to_float64(fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frint_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = (uint64_t)(float32_round_to_int((uint32_t)fj, &env->fp_status)); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_frint_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float64_round_to_int(fj, &env->fp_status); + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrm_l_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_down, &env->fp_status); + fd = float64_to_int64(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrm_l_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_down, &env->fp_status); + fd = float32_to_int64((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrm_w_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_down, &env->fp_status); + fd = (uint64_t)float64_to_int32(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrm_w_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_down, &env->fp_status); + fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrp_l_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_up, &env->fp_status); + fd = float64_to_int64(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrp_l_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_up, &env->fp_status); + fd = float32_to_int64((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrp_w_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_up, &env->fp_status); + fd = (uint64_t)float64_to_int32(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrp_w_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_up, &env->fp_status); + fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrz_l_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + fd = float64_to_int64_round_to_zero(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrz_l_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + fd = float32_to_int64_round_to_zero((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrz_w_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + fd = (uint64_t)float64_to_int32_round_to_zero(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrz_w_s(CPULoongArchState *env, uint64_t fj) +{ + uint32_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + fd = float32_to_int32_round_to_zero((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return (uint64_t)fd; +} + +uint64_t helper_ftintrne_l_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + fd = float64_to_int64(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrne_l_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + fd = float32_to_int64((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrne_w_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + fd = (uint64_t)float64_to_int32(fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftintrne_w_s(CPULoongArchState *env, uint64_t fj) +{ + uint32_t fd; + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); + + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + fd = float32_to_int32((uint32_t)fj, &env->fp_status); + set_float_rounding_mode(old_mode, &env->fp_status); + + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return (uint64_t)fd; +} + +uint64_t helper_ftint_l_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float64_to_int64(fj, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftint_l_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = float32_to_int64((uint32_t)fj, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftint_w_s(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +uint64_t helper_ftint_w_d(CPULoongArchState *env, uint64_t fj) +{ + uint64_t fd; + + fd = (uint64_t)float64_to_int32(fj, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } + } + update_fcsr0(env, GETPC()); + return fd; +} + +void helper_set_rounding_mode(CPULoongArchState *env) +{ + set_float_rounding_mode(ieee_rm[(env->fcsr0 >> FCSR0_RM) & 0x3], + &env->fp_status); +} diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c new file mode 100644 index 0000000000..a4d1e28e36 --- /dev/null +++ b/target/loongarch/gdbstub.c @@ -0,0 +1,104 @@ +/* + * LOONGARCH gdb server stub + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + * + * SPDX-License-Identifier: LGPL-2.1+ + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "exec/gdbstub.h" + +uint64_t read_fcc(CPULoongArchState *env) +{ + uint64_t ret = 0; + + for (int i = 0; i < 8; ++i) { + ret |= (uint64_t)env->cf[i] << (i * 8); + } + + return ret; +} + +void write_fcc(CPULoongArchState *env, uint64_t val) +{ + for (int i = 0; i < 8; ++i) { + env->cf[i] = (val >> (i * 8)) & 1; + } +} + +int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + if (0 <= n && n < 32) { + return gdb_get_regl(mem_buf, env->gpr[n]); + } else if (n == 32) { + /* orig_a0 */ + return gdb_get_regl(mem_buf, 0); + } else if (n == 33) { + return gdb_get_regl(mem_buf, env->pc); + } else if (n == 34) { + return gdb_get_regl(mem_buf, env->CSR_BADV); + } + return 0; +} + +int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + target_ulong tmp = ldtul_p(mem_buf); + int length = 0; + + if (0 <= n && n < 32) { + env->gpr[n] = tmp; + length = sizeof(target_ulong); + } else if (n == 33) { + env->pc = tmp; + length = sizeof(target_ulong); + } + return length; +} + +static int loongarch_gdb_get_fpu(CPULoongArchState *env, + GByteArray *mem_buf, int n) +{ + if (0 <= n && n < 32) { + return gdb_get_reg64(mem_buf, env->fpr[n]); + } else if (n == 32) { + uint64_t val = read_fcc(env); + return gdb_get_reg64(mem_buf, val); + } else if (n == 33) { + return gdb_get_reg32(mem_buf, env->fcsr0); + } + return 0; +} + +static int loongarch_gdb_set_fpu(CPULoongArchState *env, + uint8_t *mem_buf, int n) +{ + int length = 0; + + if (0 <= n && n < 32) { + env->fpr[n] = ldq_p(mem_buf); + length = 8; + } else if (n == 32) { + uint64_t val = ldq_p(mem_buf); + write_fcc(env, val); + length = 8; + } else if (n == 33) { + env->fcsr0 = ldl_p(mem_buf); + length = 4; + } + return length; +} + +void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs) +{ + gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu, + 41, "loongarch-fpu.xml", 0); +} diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h new file mode 100644 index 0000000000..9c01823a26 --- /dev/null +++ b/target/loongarch/helper.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +DEF_HELPER_2(raise_exception, noreturn, env, i32) + +DEF_HELPER_FLAGS_1(bitrev_w, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(bitrev_d, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) + +DEF_HELPER_FLAGS_3(asrtle_d, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(asrtgt_d, TCG_CALL_NO_WG, void, env, tl, tl) + +DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_2(cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl) + +/* Floating-point helper */ +DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmaxa_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmaxa_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmina_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmina_d, TCG_CALL_NO_WG, i64, env, i64, i64) + +DEF_HELPER_FLAGS_5(fmuladd_s, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) +DEF_HELPER_FLAGS_5(fmuladd_d, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) + +DEF_HELPER_FLAGS_3(fscaleb_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fscaleb_d, TCG_CALL_NO_WG, i64, env, i64, i64) + +DEF_HELPER_FLAGS_2(flogb_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(flogb_d, TCG_CALL_NO_WG, i64, env, i64) + +DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frsqrt_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frsqrt_d, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frecip_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frecip_d, TCG_CALL_NO_WG, i64, env, i64) + +DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, i64, env, i64) +DEF_HELPER_FLAGS_2(fclass_d, TCG_CALL_NO_RWG_SE, i64, env, i64) + +/* fcmp.cXXX.s */ +DEF_HELPER_4(fcmp_c_s, i64, env, i64, i64, i32) +/* fcmp.sXXX.s */ +DEF_HELPER_4(fcmp_s_s, i64, env, i64, i64, i32) +/* fcmp.cXXX.d */ +DEF_HELPER_4(fcmp_c_d, i64, env, i64, i64, i32) +/* fcmp.sXXX.d */ +DEF_HELPER_4(fcmp_s_d, i64, env, i64, i64, i32) + +DEF_HELPER_2(fcvt_d_s, i64, env, i64) +DEF_HELPER_2(fcvt_s_d, i64, env, i64) +DEF_HELPER_2(ffint_d_w, i64, env, i64) +DEF_HELPER_2(ffint_d_l, i64, env, i64) +DEF_HELPER_2(ffint_s_w, i64, env, i64) +DEF_HELPER_2(ffint_s_l, i64, env, i64) +DEF_HELPER_2(ftintrm_l_s, i64, env, i64) +DEF_HELPER_2(ftintrm_l_d, i64, env, i64) +DEF_HELPER_2(ftintrm_w_s, i64, env, i64) +DEF_HELPER_2(ftintrm_w_d, i64, env, i64) +DEF_HELPER_2(ftintrp_l_s, i64, env, i64) +DEF_HELPER_2(ftintrp_l_d, i64, env, i64) +DEF_HELPER_2(ftintrp_w_s, i64, env, i64) +DEF_HELPER_2(ftintrp_w_d, i64, env, i64) +DEF_HELPER_2(ftintrz_l_s, i64, env, i64) +DEF_HELPER_2(ftintrz_l_d, i64, env, i64) +DEF_HELPER_2(ftintrz_w_s, i64, env, i64) +DEF_HELPER_2(ftintrz_w_d, i64, env, i64) +DEF_HELPER_2(ftintrne_l_s, i64, env, i64) +DEF_HELPER_2(ftintrne_l_d, i64, env, i64) +DEF_HELPER_2(ftintrne_w_s, i64, env, i64) +DEF_HELPER_2(ftintrne_w_d, i64, env, i64) +DEF_HELPER_2(ftint_l_s, i64, env, i64) +DEF_HELPER_2(ftint_l_d, i64, env, i64) +DEF_HELPER_2(ftint_w_s, i64, env, i64) +DEF_HELPER_2(ftint_w_d, i64, env, i64) +DEF_HELPER_2(frint_s, i64, env, i64) +DEF_HELPER_2(frint_d, i64, env, i64) + +DEF_HELPER_FLAGS_1(set_rounding_mode, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_1(rdtime_d, i64, env) + +#ifndef CONFIG_USER_ONLY +/* CSRs helper */ +DEF_HELPER_1(csrrd_pgd, i64, env) +DEF_HELPER_1(csrrd_tval, i64, env) +DEF_HELPER_2(csrwr_estat, i64, env, tl) +DEF_HELPER_2(csrwr_asid, i64, env, tl) +DEF_HELPER_2(csrwr_tcfg, i64, env, tl) +DEF_HELPER_2(csrwr_ticlr, i64, env, tl) +DEF_HELPER_2(iocsrrd_b, i64, env, tl) +DEF_HELPER_2(iocsrrd_h, i64, env, tl) +DEF_HELPER_2(iocsrrd_w, i64, env, tl) +DEF_HELPER_2(iocsrrd_d, i64, env, tl) +DEF_HELPER_3(iocsrwr_b, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_h, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_w, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_d, void, env, tl, tl) + +/* TLB helper */ +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbfill, void, env) +DEF_HELPER_1(tlbsrch, void, env) +DEF_HELPER_1(tlbrd, void, env) +DEF_HELPER_1(tlbclr, void, env) +DEF_HELPER_1(tlbflush, void, env) +DEF_HELPER_1(invtlb_all, void, env) +DEF_HELPER_2(invtlb_all_g, void, env, i32) +DEF_HELPER_2(invtlb_all_asid, void, env, tl) +DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl) +DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl) + +DEF_HELPER_4(lddir, tl, env, tl, tl, i32) +DEF_HELPER_4(ldpte, void, env, tl, tl, i32) +DEF_HELPER_1(ertn, void, env) +DEF_HELPER_1(idle, void, env) +#endif diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/insn_trans/trans_arith.c.inc new file mode 100644 index 0000000000..8e45eadbc8 --- /dev/null +++ b/target/loongarch/insn_trans/trans_arith.c.inc @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool gen_rrr(DisasContext *ctx, arg_rrr *a, + DisasExtend src1_ext, DisasExtend src2_ext, + DisasExtend dst_ext, void (*func)(TCGv, TCGv, TCGv)) +{ + TCGv dest = gpr_dst(ctx, a->rd, dst_ext); + TCGv src1 = gpr_src(ctx, a->rj, src1_ext); + TCGv src2 = gpr_src(ctx, a->rk, src2_ext); + + func(dest, src1, src2); + gen_set_gpr(a->rd, dest, dst_ext); + + return true; +} + +static bool gen_rri_v(DisasContext *ctx, arg_rr_i *a, + DisasExtend src_ext, DisasExtend dst_ext, + void (*func)(TCGv, TCGv, TCGv)) +{ + TCGv dest = gpr_dst(ctx, a->rd, dst_ext); + TCGv src1 = gpr_src(ctx, a->rj, src_ext); + TCGv src2 = tcg_constant_tl(a->imm); + + func(dest, src1, src2); + gen_set_gpr(a->rd, dest, dst_ext); + + return true; +} + +static bool gen_rri_c(DisasContext *ctx, arg_rr_i *a, + DisasExtend src_ext, DisasExtend dst_ext, + void (*func)(TCGv, TCGv, target_long)) +{ + TCGv dest = gpr_dst(ctx, a->rd, dst_ext); + TCGv src1 = gpr_src(ctx, a->rj, src_ext); + + func(dest, src1, a->imm); + gen_set_gpr(a->rd, dest, dst_ext); + + return true; +} + +static bool gen_rrr_sa(DisasContext *ctx, arg_rrr_sa *a, + DisasExtend src_ext, DisasExtend dst_ext, + void (*func)(TCGv, TCGv, TCGv, target_long)) +{ + TCGv dest = gpr_dst(ctx, a->rd, dst_ext); + TCGv src1 = gpr_src(ctx, a->rj, src_ext); + TCGv src2 = gpr_src(ctx, a->rk, src_ext); + + func(dest, src1, src2, a->sa); + gen_set_gpr(a->rd, dest, dst_ext); + + return true; +} + +static bool trans_lu12i_w(DisasContext *ctx, arg_lu12i_w *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + + tcg_gen_movi_tl(dest, a->imm << 12); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool gen_pc(DisasContext *ctx, arg_r_i *a, + target_ulong (*func)(target_ulong, int)) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + target_ulong addr = func(ctx->base.pc_next, a->imm); + + tcg_gen_movi_tl(dest, addr); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static void gen_slt(TCGv dest, TCGv src1, TCGv src2) +{ + tcg_gen_setcond_tl(TCG_COND_LT, dest, src1, src2); +} + +static void gen_sltu(TCGv dest, TCGv src1, TCGv src2) +{ + tcg_gen_setcond_tl(TCG_COND_LTU, dest, src1, src2); +} + +static void gen_mulh_w(TCGv dest, TCGv src1, TCGv src2) +{ + tcg_gen_mul_i64(dest, src1, src2); + tcg_gen_sari_i64(dest, dest, 32); +} + +static void gen_mulh_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv discard = tcg_temp_new(); + tcg_gen_muls2_tl(discard, dest, src1, src2); + tcg_temp_free(discard); +} + +static void gen_mulh_du(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv discard = tcg_temp_new(); + tcg_gen_mulu2_tl(discard, dest, src1, src2); + tcg_temp_free(discard); +} + +static void prep_divisor_d(TCGv ret, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + /* + * If min / -1, set the divisor to 1. + * This avoids potential host overflow trap and produces min. + * If x / 0, set the divisor to 1. + * This avoids potential host overflow trap; + * the required result is undefined. + */ + tcg_gen_setcondi_tl(TCG_COND_EQ, ret, src1, INT64_MIN); + tcg_gen_setcondi_tl(TCG_COND_EQ, t0, src2, -1); + tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src2, 0); + tcg_gen_and_tl(ret, ret, t0); + tcg_gen_or_tl(ret, ret, t1); + tcg_gen_movcond_tl(TCG_COND_NE, ret, ret, zero, ret, src2); + + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void prep_divisor_du(TCGv ret, TCGv src2) +{ + TCGv zero = tcg_constant_tl(0); + TCGv one = tcg_constant_tl(1); + + /* + * If x / 0, set the divisor to 1. + * This avoids potential host overflow trap; + * the required result is undefined. + */ + tcg_gen_movcond_tl(TCG_COND_EQ, ret, src2, zero, one, src2); +} + +static void gen_div_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + prep_divisor_d(t0, src1, src2); + tcg_gen_div_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_rem_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + prep_divisor_d(t0, src1, src2); + tcg_gen_rem_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_div_du(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + prep_divisor_du(t0, src2); + tcg_gen_divu_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_rem_du(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + prep_divisor_du(t0, src2); + tcg_gen_remu_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_div_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + /* We need not check for integer overflow for div_w. */ + prep_divisor_du(t0, src2); + tcg_gen_div_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_rem_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + /* We need not check for integer overflow for rem_w. */ + prep_divisor_du(t0, src2); + tcg_gen_rem_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_alsl(TCGv dest, TCGv src1, TCGv src2, target_long sa) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_shli_tl(t0, src1, sa); + tcg_gen_add_tl(dest, t0, src2); + tcg_temp_free(t0); +} + +static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rd, EXT_NONE); + TCGv src2 = tcg_constant_tl(a->imm); + + tcg_gen_deposit_tl(dest, src1, src2, 32, 32); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool trans_lu52i_d(DisasContext *ctx, arg_lu52i_d *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = tcg_constant_tl(a->imm); + + tcg_gen_deposit_tl(dest, src1, src2, 52, 12); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static target_ulong gen_pcaddi(target_ulong pc, int imm) +{ + return pc + (imm << 2); +} + +static target_ulong gen_pcalau12i(target_ulong pc, int imm) +{ + return (pc + (imm << 12)) & ~0xfff; +} + +static target_ulong gen_pcaddu12i(target_ulong pc, int imm) +{ + return pc + (imm << 12); +} + +static target_ulong gen_pcaddu18i(target_ulong pc, int imm) +{ + return pc + ((target_ulong)(imm) << 18); +} + +static bool trans_addu16i_d(DisasContext *ctx, arg_addu16i_d *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + tcg_gen_addi_tl(dest, src1, a->imm << 16); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +TRANS(add_w, gen_rrr, EXT_NONE, EXT_NONE, EXT_SIGN, tcg_gen_add_tl) +TRANS(add_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_add_tl) +TRANS(sub_w, gen_rrr, EXT_NONE, EXT_NONE, EXT_SIGN, tcg_gen_sub_tl) +TRANS(sub_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_sub_tl) +TRANS(and, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_and_tl) +TRANS(or, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_or_tl) +TRANS(xor, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_xor_tl) +TRANS(nor, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_nor_tl) +TRANS(andn, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_andc_tl) +TRANS(orn, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_orc_tl) +TRANS(slt, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_slt) +TRANS(sltu, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_sltu) +TRANS(mul_w, gen_rrr, EXT_SIGN, EXT_SIGN, EXT_SIGN, tcg_gen_mul_tl) +TRANS(mul_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, tcg_gen_mul_tl) +TRANS(mulh_w, gen_rrr, EXT_SIGN, EXT_SIGN, EXT_NONE, gen_mulh_w) +TRANS(mulh_wu, gen_rrr, EXT_ZERO, EXT_ZERO, EXT_NONE, gen_mulh_w) +TRANS(mulh_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_mulh_d) +TRANS(mulh_du, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_mulh_du) +TRANS(mulw_d_w, gen_rrr, EXT_SIGN, EXT_SIGN, EXT_NONE, tcg_gen_mul_tl) +TRANS(mulw_d_wu, gen_rrr, EXT_ZERO, EXT_ZERO, EXT_NONE, tcg_gen_mul_tl) +TRANS(div_w, gen_rrr, EXT_SIGN, EXT_SIGN, EXT_SIGN, gen_div_w) +TRANS(mod_w, gen_rrr, EXT_SIGN, EXT_SIGN, EXT_SIGN, gen_rem_w) +TRANS(div_wu, gen_rrr, EXT_ZERO, EXT_ZERO, EXT_SIGN, gen_div_du) +TRANS(mod_wu, gen_rrr, EXT_ZERO, EXT_ZERO, EXT_SIGN, gen_rem_du) +TRANS(div_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_div_d) +TRANS(mod_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_rem_d) +TRANS(div_du, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_div_du) +TRANS(mod_du, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_rem_du) +TRANS(slti, gen_rri_v, EXT_NONE, EXT_NONE, gen_slt) +TRANS(sltui, gen_rri_v, EXT_NONE, EXT_NONE, gen_sltu) +TRANS(addi_w, gen_rri_c, EXT_NONE, EXT_SIGN, tcg_gen_addi_tl) +TRANS(addi_d, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_addi_tl) +TRANS(alsl_w, gen_rrr_sa, EXT_NONE, EXT_SIGN, gen_alsl) +TRANS(alsl_wu, gen_rrr_sa, EXT_NONE, EXT_ZERO, gen_alsl) +TRANS(alsl_d, gen_rrr_sa, EXT_NONE, EXT_NONE, gen_alsl) +TRANS(pcaddi, gen_pc, gen_pcaddi) +TRANS(pcalau12i, gen_pc, gen_pcalau12i) +TRANS(pcaddu12i, gen_pc, gen_pcaddu12i) +TRANS(pcaddu18i, gen_pc, gen_pcaddu18i) +TRANS(andi, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_andi_tl) +TRANS(ori, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_ori_tl) +TRANS(xori, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_xori_tl) diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/insn_trans/trans_atomic.c.inc new file mode 100644 index 0000000000..6763c1c301 --- /dev/null +++ b/target/loongarch/insn_trans/trans_atomic.c.inc @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool gen_ll(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv t0 = tcg_temp_new(); + + tcg_gen_addi_tl(t0, src1, a->imm); + tcg_gen_qemu_ld_i64(dest, t0, ctx->mem_idx, mop); + tcg_gen_st_tl(t0, cpu_env, offsetof(CPULoongArchState, lladdr)); + tcg_gen_st_tl(dest, cpu_env, offsetof(CPULoongArchState, llval)); + gen_set_gpr(a->rd, dest, EXT_NONE); + tcg_temp_free(t0); + + return true; +} + +static bool gen_sc(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rd, EXT_NONE); + TCGv t0 = tcg_temp_new(); + TCGv val = tcg_temp_new(); + + TCGLabel *l1 = gen_new_label(); + TCGLabel *done = gen_new_label(); + + tcg_gen_addi_tl(t0, src1, a->imm); + tcg_gen_brcond_tl(TCG_COND_EQ, t0, cpu_lladdr, l1); + tcg_gen_movi_tl(dest, 0); + tcg_gen_br(done); + + gen_set_label(l1); + tcg_gen_mov_tl(val, src2); + /* generate cmpxchg */ + tcg_gen_atomic_cmpxchg_tl(t0, cpu_lladdr, cpu_llval, + val, ctx->mem_idx, mop); + tcg_gen_setcond_tl(TCG_COND_EQ, dest, t0, cpu_llval); + gen_set_label(done); + gen_set_gpr(a->rd, dest, EXT_NONE); + tcg_temp_free(t0); + tcg_temp_free(val); + + return true; +} + +static bool gen_am(DisasContext *ctx, arg_rrr *a, + void (*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), + MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv val = gpr_src(ctx, a->rk, EXT_NONE); + + if (a->rd != 0 && (a->rj == a->rd || a->rk == a->rd)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Warning: source register overlaps destination register" + "in atomic insn at pc=0x" TARGET_FMT_lx "\n", + ctx->base.pc_next - 4); + return false; + } + + func(dest, addr, val, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +TRANS(ll_w, gen_ll, MO_TESL) +TRANS(sc_w, gen_sc, MO_TESL) +TRANS(ll_d, gen_ll, MO_TEUQ) +TRANS(sc_d, gen_sc, MO_TEUQ) +TRANS(amswap_w, gen_am, tcg_gen_atomic_xchg_tl, MO_TESL) +TRANS(amswap_d, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) +TRANS(amadd_w, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TESL) +TRANS(amadd_d, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) +TRANS(amand_w, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TESL) +TRANS(amand_d, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) +TRANS(amor_w, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TESL) +TRANS(amor_d, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) +TRANS(amxor_w, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TESL) +TRANS(amxor_d, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) +TRANS(ammax_w, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TESL) +TRANS(ammax_d, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) +TRANS(ammin_w, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TESL) +TRANS(ammin_d, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) +TRANS(ammax_wu, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TESL) +TRANS(ammax_du, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) +TRANS(ammin_wu, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TESL) +TRANS(ammin_du, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) +TRANS(amswap_db_w, gen_am, tcg_gen_atomic_xchg_tl, MO_TESL) +TRANS(amswap_db_d, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) +TRANS(amadd_db_w, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TESL) +TRANS(amadd_db_d, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) +TRANS(amand_db_w, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TESL) +TRANS(amand_db_d, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) +TRANS(amor_db_w, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TESL) +TRANS(amor_db_d, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) +TRANS(amxor_db_w, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TESL) +TRANS(amxor_db_d, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) +TRANS(ammax_db_w, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TESL) +TRANS(ammax_db_d, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) +TRANS(ammin_db_w, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TESL) +TRANS(ammin_db_d, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) +TRANS(ammax_db_wu, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TESL) +TRANS(ammax_db_du, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) +TRANS(ammin_db_wu, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TESL) +TRANS(ammin_db_du, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/insn_trans/trans_bit.c.inc new file mode 100644 index 0000000000..b01e4aeb23 --- /dev/null +++ b/target/loongarch/insn_trans/trans_bit.c.inc @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool gen_rr(DisasContext *ctx, arg_rr *a, + DisasExtend src_ext, DisasExtend dst_ext, + void (*func)(TCGv, TCGv)) +{ + TCGv dest = gpr_dst(ctx, a->rd, dst_ext); + TCGv src1 = gpr_src(ctx, a->rj, src_ext); + + func(dest, src1); + gen_set_gpr(a->rd, dest, dst_ext); + + return true; +} + +static void gen_bytepick_w(TCGv dest, TCGv src1, TCGv src2, target_long sa) +{ + tcg_gen_concat_tl_i64(dest, src1, src2); + tcg_gen_sextract_i64(dest, dest, (32 - sa * 8), 32); +} + +static void gen_bytepick_d(TCGv dest, TCGv src1, TCGv src2, target_long sa) +{ + tcg_gen_extract2_i64(dest, src1, src2, (64 - sa * 8)); +} + +static bool gen_bstrins(DisasContext *ctx, arg_rr_ms_ls *a, + DisasExtend dst_ext) +{ + TCGv src1 = gpr_src(ctx, a->rd, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + + if (a->ls > a->ms) { + return false; + } + + tcg_gen_deposit_tl(dest, src1, src2, a->ls, a->ms - a->ls + 1); + gen_set_gpr(a->rd, dest, dst_ext); + return true; +} + +static bool gen_bstrpick(DisasContext *ctx, arg_rr_ms_ls *a, + DisasExtend dst_ext) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + if (a->ls > a->ms) { + return false; + } + + tcg_gen_extract_tl(dest, src1, a->ls, a->ms - a->ls + 1); + gen_set_gpr(a->rd, dest, dst_ext); + return true; +} + +static void gen_clz_w(TCGv dest, TCGv src1) +{ + tcg_gen_clzi_tl(dest, src1, TARGET_LONG_BITS); + tcg_gen_subi_tl(dest, dest, TARGET_LONG_BITS - 32); +} + +static void gen_clo_w(TCGv dest, TCGv src1) +{ + tcg_gen_not_tl(dest, src1); + tcg_gen_ext32u_tl(dest, dest); + gen_clz_w(dest, dest); +} + +static void gen_ctz_w(TCGv dest, TCGv src1) +{ + tcg_gen_ori_tl(dest, src1, (target_ulong)MAKE_64BIT_MASK(32, 32)); + tcg_gen_ctzi_tl(dest, dest, TARGET_LONG_BITS); +} + +static void gen_cto_w(TCGv dest, TCGv src1) +{ + tcg_gen_not_tl(dest, src1); + gen_ctz_w(dest, dest); +} + +static void gen_clz_d(TCGv dest, TCGv src1) +{ + tcg_gen_clzi_i64(dest, src1, TARGET_LONG_BITS); +} + +static void gen_clo_d(TCGv dest, TCGv src1) +{ + tcg_gen_not_tl(dest, src1); + gen_clz_d(dest, dest); +} + +static void gen_ctz_d(TCGv dest, TCGv src1) +{ + tcg_gen_ctzi_tl(dest, src1, TARGET_LONG_BITS); +} + +static void gen_cto_d(TCGv dest, TCGv src1) +{ + tcg_gen_not_tl(dest, src1); + gen_ctz_d(dest, dest); +} + +static void gen_revb_2w(TCGv dest, TCGv src1) +{ + tcg_gen_bswap64_i64(dest, src1); + tcg_gen_rotri_i64(dest, dest, 32); +} + +static void gen_revb_2h(TCGv dest, TCGv src1) +{ + TCGv mask = tcg_constant_tl(0x00FF00FF); + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + tcg_gen_shri_tl(t0, src1, 8); + tcg_gen_and_tl(t0, t0, mask); + tcg_gen_and_tl(t1, src1, mask); + tcg_gen_shli_tl(t1, t1, 8); + tcg_gen_or_tl(dest, t0, t1); + + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void gen_revb_4h(TCGv dest, TCGv src1) +{ + TCGv mask = tcg_constant_tl(0x00FF00FF00FF00FFULL); + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + tcg_gen_shri_tl(t0, src1, 8); + tcg_gen_and_tl(t0, t0, mask); + tcg_gen_and_tl(t1, src1, mask); + tcg_gen_shli_tl(t1, t1, 8); + tcg_gen_or_tl(dest, t0, t1); + + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void gen_revh_2w(TCGv dest, TCGv src1) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_constant_i64(0x0000ffff0000ffffull); + + tcg_gen_shri_i64(t0, src1, 16); + tcg_gen_and_i64(t1, src1, mask); + tcg_gen_and_i64(t0, t0, mask); + tcg_gen_shli_i64(t1, t1, 16); + tcg_gen_or_i64(dest, t1, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static void gen_revh_d(TCGv dest, TCGv src1) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv mask = tcg_constant_tl(0x0000FFFF0000FFFFULL); + + tcg_gen_shri_tl(t1, src1, 16); + tcg_gen_and_tl(t1, t1, mask); + tcg_gen_and_tl(t0, src1, mask); + tcg_gen_shli_tl(t0, t0, 16); + tcg_gen_or_tl(t0, t0, t1); + tcg_gen_rotri_tl(dest, t0, 32); + + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void gen_maskeqz(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv zero = tcg_constant_tl(0); + + tcg_gen_movcond_tl(TCG_COND_EQ, dest, src2, zero, zero, src1); +} + +static void gen_masknez(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv zero = tcg_constant_tl(0); + + tcg_gen_movcond_tl(TCG_COND_NE, dest, src2, zero, zero, src1); +} + +TRANS(ext_w_h, gen_rr, EXT_NONE, EXT_NONE, tcg_gen_ext16s_tl) +TRANS(ext_w_b, gen_rr, EXT_NONE, EXT_NONE, tcg_gen_ext8s_tl) +TRANS(clo_w, gen_rr, EXT_NONE, EXT_NONE, gen_clo_w) +TRANS(clz_w, gen_rr, EXT_ZERO, EXT_NONE, gen_clz_w) +TRANS(cto_w, gen_rr, EXT_NONE, EXT_NONE, gen_cto_w) +TRANS(ctz_w, gen_rr, EXT_NONE, EXT_NONE, gen_ctz_w) +TRANS(clo_d, gen_rr, EXT_NONE, EXT_NONE, gen_clo_d) +TRANS(clz_d, gen_rr, EXT_NONE, EXT_NONE, gen_clz_d) +TRANS(cto_d, gen_rr, EXT_NONE, EXT_NONE, gen_cto_d) +TRANS(ctz_d, gen_rr, EXT_NONE, EXT_NONE, gen_ctz_d) +TRANS(revb_2h, gen_rr, EXT_NONE, EXT_SIGN, gen_revb_2h) +TRANS(revb_4h, gen_rr, EXT_NONE, EXT_NONE, gen_revb_4h) +TRANS(revb_2w, gen_rr, EXT_NONE, EXT_NONE, gen_revb_2w) +TRANS(revb_d, gen_rr, EXT_NONE, EXT_NONE, tcg_gen_bswap64_i64) +TRANS(revh_2w, gen_rr, EXT_NONE, EXT_NONE, gen_revh_2w) +TRANS(revh_d, gen_rr, EXT_NONE, EXT_NONE, gen_revh_d) +TRANS(bitrev_4b, gen_rr, EXT_ZERO, EXT_SIGN, gen_helper_bitswap) +TRANS(bitrev_8b, gen_rr, EXT_NONE, EXT_NONE, gen_helper_bitswap) +TRANS(bitrev_w, gen_rr, EXT_NONE, EXT_SIGN, gen_helper_bitrev_w) +TRANS(bitrev_d, gen_rr, EXT_NONE, EXT_NONE, gen_helper_bitrev_d) +TRANS(maskeqz, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_maskeqz) +TRANS(masknez, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_masknez) +TRANS(bytepick_w, gen_rrr_sa, EXT_NONE, EXT_NONE, gen_bytepick_w) +TRANS(bytepick_d, gen_rrr_sa, EXT_NONE, EXT_NONE, gen_bytepick_d) +TRANS(bstrins_w, gen_bstrins, EXT_SIGN) +TRANS(bstrins_d, gen_bstrins, EXT_NONE) +TRANS(bstrpick_w, gen_bstrpick, EXT_SIGN) +TRANS(bstrpick_d, gen_bstrpick, EXT_NONE) diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/insn_trans/trans_branch.c.inc new file mode 100644 index 0000000000..65dbdff41e --- /dev/null +++ b/target/loongarch/insn_trans/trans_branch.c.inc @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool trans_b(DisasContext *ctx, arg_b *a) +{ + gen_goto_tb(ctx, 0, ctx->base.pc_next + a->offs); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool trans_bl(DisasContext *ctx, arg_bl *a) +{ + tcg_gen_movi_tl(cpu_gpr[1], ctx->base.pc_next + 4); + gen_goto_tb(ctx, 0, ctx->base.pc_next + a->offs); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool trans_jirl(DisasContext *ctx, arg_jirl *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + tcg_gen_addi_tl(cpu_pc, src1, a->offs); + tcg_gen_movi_tl(dest, ctx->base.pc_next + 4); + gen_set_gpr(a->rd, dest, EXT_NONE); + tcg_gen_lookup_and_goto_ptr(); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static void gen_bc(DisasContext *ctx, TCGv src1, TCGv src2, + target_long offs, TCGCond cond) +{ + TCGLabel *l = gen_new_label(); + tcg_gen_brcond_tl(cond, src1, src2, l); + gen_goto_tb(ctx, 1, ctx->base.pc_next + 4); + gen_set_label(l); + gen_goto_tb(ctx, 0, ctx->base.pc_next + offs); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static bool gen_rr_bc(DisasContext *ctx, arg_rr_offs *a, TCGCond cond) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rd, EXT_NONE); + + gen_bc(ctx, src1, src2, a->offs, cond); + return true; +} + +static bool gen_rz_bc(DisasContext *ctx, arg_r_offs *a, TCGCond cond) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = tcg_constant_tl(0); + + gen_bc(ctx, src1, src2, a->offs, cond); + return true; +} + +static bool gen_cz_bc(DisasContext *ctx, arg_c_offs *a, TCGCond cond) +{ + TCGv src1 = tcg_temp_new(); + TCGv src2 = tcg_constant_tl(0); + + tcg_gen_ld8u_tl(src1, cpu_env, + offsetof(CPULoongArchState, cf[a->cj])); + gen_bc(ctx, src1, src2, a->offs, cond); + return true; +} + +TRANS(beq, gen_rr_bc, TCG_COND_EQ) +TRANS(bne, gen_rr_bc, TCG_COND_NE) +TRANS(blt, gen_rr_bc, TCG_COND_LT) +TRANS(bge, gen_rr_bc, TCG_COND_GE) +TRANS(bltu, gen_rr_bc, TCG_COND_LTU) +TRANS(bgeu, gen_rr_bc, TCG_COND_GEU) +TRANS(beqz, gen_rz_bc, TCG_COND_EQ) +TRANS(bnez, gen_rz_bc, TCG_COND_NE) +TRANS(bceqz, gen_cz_bc, TCG_COND_EQ) +TRANS(bcnez, gen_cz_bc, TCG_COND_NE) diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/insn_trans/trans_extra.c.inc new file mode 100644 index 0000000000..ad713cd61e --- /dev/null +++ b/target/loongarch/insn_trans/trans_extra.c.inc @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool trans_break(DisasContext *ctx, arg_break *a) +{ + generate_exception(ctx, EXCCODE_BRK); + return true; +} + +static bool trans_syscall(DisasContext *ctx, arg_syscall *a) +{ + generate_exception(ctx, EXCCODE_SYS); + return true; +} + +static bool trans_asrtle_d(DisasContext *ctx, arg_asrtle_d * a) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtle_d(cpu_env, src1, src2); + return true; +} + +static bool trans_asrtgt_d(DisasContext *ctx, arg_asrtgt_d * a) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtgt_d(cpu_env, src1, src2); + return true; +} + +static bool gen_rdtime(DisasContext *ctx, arg_rr *a, + bool word, bool high) +{ + TCGv dst1 = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv dst2 = gpr_dst(ctx, a->rj, EXT_NONE); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_rdtime_d(dst1, cpu_env); + if (word) { + tcg_gen_sextract_tl(dst1, dst1, high ? 32 : 0, 32); + } + tcg_gen_ld_i64(dst2, cpu_env, offsetof(CPULoongArchState, CSR_TID)); + + return true; +} + +static bool trans_rdtimel_w(DisasContext *ctx, arg_rdtimel_w *a) +{ + return gen_rdtime(ctx, a, 1, 0); +} + +static bool trans_rdtimeh_w(DisasContext *ctx, arg_rdtimeh_w *a) +{ + return gen_rdtime(ctx, a, 1, 1); +} + +static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a) +{ + return gen_rdtime(ctx, a, 0, 0); +} + +static bool trans_cpucfg(DisasContext *ctx, arg_cpucfg *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + gen_helper_cpucfg(dest, cpu_env, src1); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool gen_crc(DisasContext *ctx, arg_rrr *a, + void (*func)(TCGv, TCGv, TCGv, TCGv), + TCGv tsz) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_SIGN); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + func(dest, src2, src1, tsz); + gen_set_gpr(a->rd, dest, EXT_SIGN); + + return true; +} + +TRANS(crc_w_b_w, gen_crc, gen_helper_crc32, tcg_constant_tl(1)) +TRANS(crc_w_h_w, gen_crc, gen_helper_crc32, tcg_constant_tl(2)) +TRANS(crc_w_w_w, gen_crc, gen_helper_crc32, tcg_constant_tl(4)) +TRANS(crc_w_d_w, gen_crc, gen_helper_crc32, tcg_constant_tl(8)) +TRANS(crcc_w_b_w, gen_crc, gen_helper_crc32c, tcg_constant_tl(1)) +TRANS(crcc_w_h_w, gen_crc, gen_helper_crc32c, tcg_constant_tl(2)) +TRANS(crcc_w_w_w, gen_crc, gen_helper_crc32c, tcg_constant_tl(4)) +TRANS(crcc_w_d_w, gen_crc, gen_helper_crc32c, tcg_constant_tl(8)) diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/insn_trans/trans_farith.c.inc new file mode 100644 index 0000000000..7081fbb89b --- /dev/null +++ b/target/loongarch/insn_trans/trans_farith.c.inc @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef CONFIG_USER_ONLY +#define CHECK_FPE do { \ + if ((ctx->base.tb->flags & HW_FLAGS_EUEN_FPE) == 0) { \ + generate_exception(ctx, EXCCODE_FPD); \ + return true; \ + } \ +} while (0) +#else +#define CHECK_FPE +#endif + +static bool gen_fff(DisasContext *ctx, arg_fff *a, + void (*func)(TCGv, TCGv_env, TCGv, TCGv)) +{ + CHECK_FPE; + + func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk]); + return true; +} + +static bool gen_ff(DisasContext *ctx, arg_ff *a, + void (*func)(TCGv, TCGv_env, TCGv)) +{ + CHECK_FPE; + + func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj]); + return true; +} + +static bool gen_muladd(DisasContext *ctx, arg_ffff *a, + void (*func)(TCGv, TCGv_env, TCGv, TCGv, TCGv, TCGv_i32), + int flag) +{ + TCGv_i32 tflag = tcg_constant_i32(flag); + + CHECK_FPE; + + func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj], + cpu_fpr[a->fk], cpu_fpr[a->fa], tflag); + return true; +} + +static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a) +{ + CHECK_FPE; + + tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 31); + return true; +} + +static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a) +{ + CHECK_FPE; + + tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 63); + return true; +} + +static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a) +{ + CHECK_FPE; + + tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 31)); + gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + return true; +} + +static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a) +{ + CHECK_FPE; + + tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 63)); + return true; +} + +static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a) +{ + CHECK_FPE; + + tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x80000000); + gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + return true; +} + +static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a) +{ + CHECK_FPE; + + tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x8000000000000000LL); + return true; +} + +TRANS(fadd_s, gen_fff, gen_helper_fadd_s) +TRANS(fadd_d, gen_fff, gen_helper_fadd_d) +TRANS(fsub_s, gen_fff, gen_helper_fsub_s) +TRANS(fsub_d, gen_fff, gen_helper_fsub_d) +TRANS(fmul_s, gen_fff, gen_helper_fmul_s) +TRANS(fmul_d, gen_fff, gen_helper_fmul_d) +TRANS(fdiv_s, gen_fff, gen_helper_fdiv_s) +TRANS(fdiv_d, gen_fff, gen_helper_fdiv_d) +TRANS(fmax_s, gen_fff, gen_helper_fmax_s) +TRANS(fmax_d, gen_fff, gen_helper_fmax_d) +TRANS(fmin_s, gen_fff, gen_helper_fmin_s) +TRANS(fmin_d, gen_fff, gen_helper_fmin_d) +TRANS(fmaxa_s, gen_fff, gen_helper_fmaxa_s) +TRANS(fmaxa_d, gen_fff, gen_helper_fmaxa_d) +TRANS(fmina_s, gen_fff, gen_helper_fmina_s) +TRANS(fmina_d, gen_fff, gen_helper_fmina_d) +TRANS(fscaleb_s, gen_fff, gen_helper_fscaleb_s) +TRANS(fscaleb_d, gen_fff, gen_helper_fscaleb_d) +TRANS(fsqrt_s, gen_ff, gen_helper_fsqrt_s) +TRANS(fsqrt_d, gen_ff, gen_helper_fsqrt_d) +TRANS(frecip_s, gen_ff, gen_helper_frecip_s) +TRANS(frecip_d, gen_ff, gen_helper_frecip_d) +TRANS(frsqrt_s, gen_ff, gen_helper_frsqrt_s) +TRANS(frsqrt_d, gen_ff, gen_helper_frsqrt_d) +TRANS(flogb_s, gen_ff, gen_helper_flogb_s) +TRANS(flogb_d, gen_ff, gen_helper_flogb_d) +TRANS(fclass_s, gen_ff, gen_helper_fclass_s) +TRANS(fclass_d, gen_ff, gen_helper_fclass_d) +TRANS(fmadd_s, gen_muladd, gen_helper_fmuladd_s, 0) +TRANS(fmadd_d, gen_muladd, gen_helper_fmuladd_d, 0) +TRANS(fmsub_s, gen_muladd, gen_helper_fmuladd_s, float_muladd_negate_c) +TRANS(fmsub_d, gen_muladd, gen_helper_fmuladd_d, float_muladd_negate_c) +TRANS(fnmadd_s, gen_muladd, gen_helper_fmuladd_s, float_muladd_negate_result) +TRANS(fnmadd_d, gen_muladd, gen_helper_fmuladd_d, float_muladd_negate_result) +TRANS(fnmsub_s, gen_muladd, gen_helper_fmuladd_s, + float_muladd_negate_c | float_muladd_negate_result) +TRANS(fnmsub_d, gen_muladd, gen_helper_fmuladd_d, + float_muladd_negate_c | float_muladd_negate_result) diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/insn_trans/trans_fcmp.c.inc new file mode 100644 index 0000000000..2ccf646ccb --- /dev/null +++ b/target/loongarch/insn_trans/trans_fcmp.c.inc @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +/* bit0(signaling/quiet) bit1(lt) bit2(eq) bit3(un) bit4(neq) */ +static uint32_t get_fcmp_flags(int cond) +{ + uint32_t flags = 0; + + if (cond & 0x1) { + flags |= FCMP_LT; + } + if (cond & 0x2) { + flags |= FCMP_EQ; + } + if (cond & 0x4) { + flags |= FCMP_UN; + } + if (cond & 0x8) { + flags |= FCMP_GT | FCMP_LT; + } + return flags; +} + +static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) +{ + TCGv var; + uint32_t flags; + void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); + + CHECK_FPE; + + var = tcg_temp_new(); + fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s); + flags = get_fcmp_flags(a->fcond >> 1); + + fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); + + tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); + tcg_temp_free(var); + return true; +} + +static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) +{ + TCGv var; + uint32_t flags; + void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); + + CHECK_FPE; + + var = tcg_temp_new(); + fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d); + flags = get_fcmp_flags(a->fcond >> 1); + + fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); + + tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); + + tcg_temp_free(var); + return true; +} diff --git a/target/loongarch/insn_trans/trans_fcnv.c.inc b/target/loongarch/insn_trans/trans_fcnv.c.inc new file mode 100644 index 0000000000..c1c6918ad1 --- /dev/null +++ b/target/loongarch/insn_trans/trans_fcnv.c.inc @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +TRANS(fcvt_s_d, gen_ff, gen_helper_fcvt_s_d) +TRANS(fcvt_d_s, gen_ff, gen_helper_fcvt_d_s) +TRANS(ftintrm_w_s, gen_ff, gen_helper_ftintrm_w_s) +TRANS(ftintrm_w_d, gen_ff, gen_helper_ftintrm_w_d) +TRANS(ftintrm_l_s, gen_ff, gen_helper_ftintrm_l_s) +TRANS(ftintrm_l_d, gen_ff, gen_helper_ftintrm_l_d) +TRANS(ftintrp_w_s, gen_ff, gen_helper_ftintrp_w_s) +TRANS(ftintrp_w_d, gen_ff, gen_helper_ftintrp_w_d) +TRANS(ftintrp_l_s, gen_ff, gen_helper_ftintrp_l_s) +TRANS(ftintrp_l_d, gen_ff, gen_helper_ftintrp_l_d) +TRANS(ftintrz_w_s, gen_ff, gen_helper_ftintrz_w_s) +TRANS(ftintrz_w_d, gen_ff, gen_helper_ftintrz_w_d) +TRANS(ftintrz_l_s, gen_ff, gen_helper_ftintrz_l_s) +TRANS(ftintrz_l_d, gen_ff, gen_helper_ftintrz_l_d) +TRANS(ftintrne_w_s, gen_ff, gen_helper_ftintrne_w_s) +TRANS(ftintrne_w_d, gen_ff, gen_helper_ftintrne_w_d) +TRANS(ftintrne_l_s, gen_ff, gen_helper_ftintrne_l_s) +TRANS(ftintrne_l_d, gen_ff, gen_helper_ftintrne_l_d) +TRANS(ftint_w_s, gen_ff, gen_helper_ftint_w_s) +TRANS(ftint_w_d, gen_ff, gen_helper_ftint_w_d) +TRANS(ftint_l_s, gen_ff, gen_helper_ftint_l_s) +TRANS(ftint_l_d, gen_ff, gen_helper_ftint_l_d) +TRANS(ffint_s_w, gen_ff, gen_helper_ffint_s_w) +TRANS(ffint_s_l, gen_ff, gen_helper_ffint_s_l) +TRANS(ffint_d_w, gen_ff, gen_helper_ffint_d_w) +TRANS(ffint_d_l, gen_ff, gen_helper_ffint_d_l) +TRANS(frint_s, gen_ff, gen_helper_frint_s) +TRANS(frint_d, gen_ff, gen_helper_frint_d) diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/insn_trans/trans_fmemory.c.inc new file mode 100644 index 0000000000..3025a1d3e9 --- /dev/null +++ b/target/loongarch/insn_trans/trans_fmemory.c.inc @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static void maybe_nanbox_load(TCGv freg, MemOp mop) +{ + if ((mop & MO_SIZE) == MO_32) { + gen_nanbox_s(freg, freg); + } +} + +static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) +{ + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + CHECK_FPE; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + maybe_nanbox_load(cpu_fpr[a->fd], mop); + + if (temp) { + tcg_temp_free(temp); + } + + return true; +} + +static bool gen_fstore_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) +{ + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + CHECK_FPE; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + + if (temp) { + tcg_temp_free(temp); + } + return true; +} + +static bool gen_floadx(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + maybe_nanbox_load(cpu_fpr[a->fd], mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_fstorex(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + gen_helper_asrtgt_d(cpu_env, src1, src2); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + maybe_nanbox_load(cpu_fpr[a->fd], mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + gen_helper_asrtgt_d(cpu_env, src1, src2); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + gen_helper_asrtle_d(cpu_env, src1, src2); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + maybe_nanbox_load(cpu_fpr[a->fd], mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop) +{ + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr; + + CHECK_FPE; + + addr = tcg_temp_new(); + gen_helper_asrtle_d(cpu_env, src1, src2); + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + tcg_temp_free(addr); + + return true; +} + +TRANS(fld_s, gen_fload_i, MO_TEUL) +TRANS(fst_s, gen_fstore_i, MO_TEUL) +TRANS(fld_d, gen_fload_i, MO_TEUQ) +TRANS(fst_d, gen_fstore_i, MO_TEUQ) +TRANS(fldx_s, gen_floadx, MO_TEUL) +TRANS(fldx_d, gen_floadx, MO_TEUQ) +TRANS(fstx_s, gen_fstorex, MO_TEUL) +TRANS(fstx_d, gen_fstorex, MO_TEUQ) +TRANS(fldgt_s, gen_fload_gt, MO_TEUL) +TRANS(fldgt_d, gen_fload_gt, MO_TEUQ) +TRANS(fldle_s, gen_fload_le, MO_TEUL) +TRANS(fldle_d, gen_fload_le, MO_TEUQ) +TRANS(fstgt_s, gen_fstore_gt, MO_TEUL) +TRANS(fstgt_d, gen_fstore_gt, MO_TEUQ) +TRANS(fstle_s, gen_fstore_le, MO_TEUL) +TRANS(fstle_d, gen_fstore_le, MO_TEUQ) diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/insn_trans/trans_fmov.c.inc new file mode 100644 index 0000000000..8e5106db4e --- /dev/null +++ b/target/loongarch/insn_trans/trans_fmov.c.inc @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static const uint32_t fcsr_mask[4] = { + UINT32_MAX, FCSR0_M1, FCSR0_M2, FCSR0_M3 +}; + +static bool trans_fsel(DisasContext *ctx, arg_fsel *a) +{ + TCGv zero = tcg_constant_tl(0); + TCGv cond; + + CHECK_FPE; + + cond = tcg_temp_new(); + tcg_gen_ld8u_tl(cond, cpu_env, offsetof(CPULoongArchState, cf[a->ca])); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_fpr[a->fd], cond, zero, + cpu_fpr[a->fj], cpu_fpr[a->fk]); + tcg_temp_free(cond); + + return true; +} + +static bool gen_f2f(DisasContext *ctx, arg_ff *a, + void (*func)(TCGv, TCGv), bool nanbox) +{ + TCGv dest = cpu_fpr[a->fd]; + TCGv src = cpu_fpr[a->fj]; + + CHECK_FPE; + + func(dest, src); + if (nanbox) { + gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + } + + return true; +} + +static bool gen_r2f(DisasContext *ctx, arg_fr *a, + void (*func)(TCGv, TCGv)) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + + CHECK_FPE; + + func(cpu_fpr[a->fd], src); + return true; +} + +static bool gen_f2r(DisasContext *ctx, arg_rf *a, + void (*func)(TCGv, TCGv)) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + + CHECK_FPE; + + func(dest, cpu_fpr[a->fj]); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a) +{ + uint32_t mask = fcsr_mask[a->fcsrd]; + TCGv Rj = gpr_src(ctx, a->rj, EXT_NONE); + + CHECK_FPE; + + if (mask == UINT32_MAX) { + tcg_gen_st32_i64(Rj, cpu_env, offsetof(CPULoongArchState, fcsr0)); + } else { + TCGv_i32 fcsr0 = tcg_temp_new_i32(); + TCGv_i32 temp = tcg_temp_new_i32(); + + tcg_gen_ld_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0)); + tcg_gen_extrl_i64_i32(temp, Rj); + tcg_gen_andi_i32(temp, temp, mask); + tcg_gen_andi_i32(fcsr0, fcsr0, ~mask); + tcg_gen_or_i32(fcsr0, fcsr0, temp); + tcg_gen_st_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0)); + + tcg_temp_free_i32(temp); + tcg_temp_free_i32(fcsr0); + } + + /* + * Install the new rounding mode to fpu_status, if changed. + * Note that FCSR3 is exactly the rounding mode field. + */ + if (mask & FCSR0_M3) { + gen_helper_set_rounding_mode(cpu_env); + } + return true; +} + +static bool trans_movfcsr2gr(DisasContext *ctx, arg_movfcsr2gr *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + + CHECK_FPE; + + tcg_gen_ld32u_i64(dest, cpu_env, offsetof(CPULoongArchState, fcsr0)); + tcg_gen_andi_i64(dest, dest, fcsr_mask[a->fcsrs]); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static void gen_movgr2fr_w(TCGv dest, TCGv src) +{ + tcg_gen_deposit_i64(dest, dest, src, 0, 32); +} + +static void gen_movgr2frh_w(TCGv dest, TCGv src) +{ + tcg_gen_deposit_i64(dest, dest, src, 32, 32); +} + +static void gen_movfrh2gr_s(TCGv dest, TCGv src) +{ + tcg_gen_sextract_tl(dest, src, 32, 32); +} + +static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a) +{ + TCGv t0; + + CHECK_FPE; + + t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, cpu_fpr[a->fj], 0x1); + tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); + tcg_temp_free(t0); + + return true; +} + +static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a) +{ + CHECK_FPE; + + tcg_gen_ld8u_tl(cpu_fpr[a->fd], cpu_env, + offsetof(CPULoongArchState, cf[a->cj & 0x7])); + return true; +} + +static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a) +{ + TCGv t0; + + CHECK_FPE; + + t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, gpr_src(ctx, a->rj, EXT_NONE), 0x1); + tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); + tcg_temp_free(t0); + + return true; +} + +static bool trans_movcf2gr(DisasContext *ctx, arg_movcf2gr *a) +{ + CHECK_FPE; + + tcg_gen_ld8u_tl(gpr_dst(ctx, a->rd, EXT_NONE), cpu_env, + offsetof(CPULoongArchState, cf[a->cj & 0x7])); + return true; +} + +TRANS(fmov_s, gen_f2f, tcg_gen_mov_tl, true) +TRANS(fmov_d, gen_f2f, tcg_gen_mov_tl, false) +TRANS(movgr2fr_w, gen_r2f, gen_movgr2fr_w) +TRANS(movgr2fr_d, gen_r2f, tcg_gen_mov_tl) +TRANS(movgr2frh_w, gen_r2f, gen_movgr2frh_w) +TRANS(movfr2gr_s, gen_f2r, tcg_gen_ext32s_tl) +TRANS(movfr2gr_d, gen_f2r, tcg_gen_mov_tl) +TRANS(movfrh2gr_s, gen_f2r, gen_movfrh2gr_s) diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/insn_trans/trans_memory.c.inc new file mode 100644 index 0000000000..d5eb31147c --- /dev/null +++ b/target/loongarch/insn_trans/trans_memory.c.inc @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static bool gen_load(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + + if (temp) { + tcg_temp_free(temp); + } + + return true; +} + +static bool gen_store(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv data = gpr_src(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); + + if (temp) { + tcg_temp_free(temp); + } + + return true; +} + +static bool gen_loadx(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr = tcg_temp_new(); + + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + tcg_temp_free(addr); + + return true; +} + +static bool gen_storex(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv data = gpr_src(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv addr = tcg_temp_new(); + + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); + tcg_temp_free(addr); + + return true; +} + +static bool gen_load_gt(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtgt_d(cpu_env, src1, src2); + tcg_gen_qemu_ld_tl(dest, src1, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool gen_load_le(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtle_d(cpu_env, src1, src2); + tcg_gen_qemu_ld_tl(dest, src1, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +static bool gen_store_gt(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv data = gpr_src(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtgt_d(cpu_env, src1, src2); + tcg_gen_qemu_st_tl(data, src1, ctx->mem_idx, mop); + + return true; +} + +static bool gen_store_le(DisasContext *ctx, arg_rrr *a, MemOp mop) +{ + TCGv data = gpr_src(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + + gen_helper_asrtle_d(cpu_env, src1, src2); + tcg_gen_qemu_st_tl(data, src1, ctx->mem_idx, mop); + + return true; +} + +static bool trans_preld(DisasContext *ctx, arg_preld *a) +{ + return true; +} + +static bool trans_dbar(DisasContext *ctx, arg_dbar * a) +{ + tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); + return true; +} + +static bool trans_ibar(DisasContext *ctx, arg_ibar *a) +{ + ctx->base.is_jmp = DISAS_STOP; + return true; +} + +static bool gen_ldptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + gen_set_gpr(a->rd, dest, EXT_NONE); + + if (temp) { + tcg_temp_free(temp); + } + + return true; +} + +static bool gen_stptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) +{ + TCGv data = gpr_src(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + TCGv temp = NULL; + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); + + if (temp) { + tcg_temp_free(temp); + } + + return true; +} + +TRANS(ld_b, gen_load, MO_SB) +TRANS(ld_h, gen_load, MO_TESW) +TRANS(ld_w, gen_load, MO_TESL) +TRANS(ld_d, gen_load, MO_TEUQ) +TRANS(st_b, gen_store, MO_UB) +TRANS(st_h, gen_store, MO_TEUW) +TRANS(st_w, gen_store, MO_TEUL) +TRANS(st_d, gen_store, MO_TEUQ) +TRANS(ld_bu, gen_load, MO_UB) +TRANS(ld_hu, gen_load, MO_TEUW) +TRANS(ld_wu, gen_load, MO_TEUL) +TRANS(ldx_b, gen_loadx, MO_SB) +TRANS(ldx_h, gen_loadx, MO_TESW) +TRANS(ldx_w, gen_loadx, MO_TESL) +TRANS(ldx_d, gen_loadx, MO_TEUQ) +TRANS(stx_b, gen_storex, MO_UB) +TRANS(stx_h, gen_storex, MO_TEUW) +TRANS(stx_w, gen_storex, MO_TEUL) +TRANS(stx_d, gen_storex, MO_TEUQ) +TRANS(ldx_bu, gen_loadx, MO_UB) +TRANS(ldx_hu, gen_loadx, MO_TEUW) +TRANS(ldx_wu, gen_loadx, MO_TEUL) +TRANS(ldptr_w, gen_ldptr, MO_TESL) +TRANS(stptr_w, gen_stptr, MO_TEUL) +TRANS(ldptr_d, gen_ldptr, MO_TEUQ) +TRANS(stptr_d, gen_stptr, MO_TEUQ) +TRANS(ldgt_b, gen_load_gt, MO_SB) +TRANS(ldgt_h, gen_load_gt, MO_TESW) +TRANS(ldgt_w, gen_load_gt, MO_TESL) +TRANS(ldgt_d, gen_load_gt, MO_TEUQ) +TRANS(ldle_b, gen_load_le, MO_SB) +TRANS(ldle_h, gen_load_le, MO_TESW) +TRANS(ldle_w, gen_load_le, MO_TESL) +TRANS(ldle_d, gen_load_le, MO_TEUQ) +TRANS(stgt_b, gen_store_gt, MO_UB) +TRANS(stgt_h, gen_store_gt, MO_TEUW) +TRANS(stgt_w, gen_store_gt, MO_TEUL) +TRANS(stgt_d, gen_store_gt, MO_TEUQ) +TRANS(stle_b, gen_store_le, MO_UB) +TRANS(stle_h, gen_store_le, MO_TEUW) +TRANS(stle_w, gen_store_le, MO_TEUL) +TRANS(stle_d, gen_store_le, MO_TEUQ) diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/insn_trans/trans_privileged.c.inc new file mode 100644 index 0000000000..40f82becb0 --- /dev/null +++ b/target/loongarch/insn_trans/trans_privileged.c.inc @@ -0,0 +1,502 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * + * LoongArch translation routines for the privileged instructions. + */ + +#include "cpu-csr.h" + +#ifdef CONFIG_USER_ONLY + +#define GEN_FALSE_TRANS(name) \ +static bool trans_##name(DisasContext *ctx, arg_##name * a) \ +{ \ + return false; \ +} + +GEN_FALSE_TRANS(csrrd) +GEN_FALSE_TRANS(csrwr) +GEN_FALSE_TRANS(csrxchg) +GEN_FALSE_TRANS(iocsrrd_b) +GEN_FALSE_TRANS(iocsrrd_h) +GEN_FALSE_TRANS(iocsrrd_w) +GEN_FALSE_TRANS(iocsrrd_d) +GEN_FALSE_TRANS(iocsrwr_b) +GEN_FALSE_TRANS(iocsrwr_h) +GEN_FALSE_TRANS(iocsrwr_w) +GEN_FALSE_TRANS(iocsrwr_d) +GEN_FALSE_TRANS(tlbsrch) +GEN_FALSE_TRANS(tlbrd) +GEN_FALSE_TRANS(tlbwr) +GEN_FALSE_TRANS(tlbfill) +GEN_FALSE_TRANS(tlbclr) +GEN_FALSE_TRANS(tlbflush) +GEN_FALSE_TRANS(invtlb) +GEN_FALSE_TRANS(cacop) +GEN_FALSE_TRANS(ldpte) +GEN_FALSE_TRANS(lddir) +GEN_FALSE_TRANS(ertn) +GEN_FALSE_TRANS(dbcl) +GEN_FALSE_TRANS(idle) + +#else + +typedef void (*GenCSRRead)(TCGv dest, TCGv_ptr env); +typedef void (*GenCSRWrite)(TCGv dest, TCGv_ptr env, TCGv src); + +typedef struct { + int offset; + int flags; + GenCSRRead readfn; + GenCSRWrite writefn; +} CSRInfo; + +enum { + CSRFL_READONLY = (1 << 0), + CSRFL_EXITTB = (1 << 1), + CSRFL_IO = (1 << 2), +}; + +#define CSR_OFF_FUNCS(NAME, FL, RD, WR) \ + [LOONGARCH_CSR_##NAME] = { \ + .offset = offsetof(CPULoongArchState, CSR_##NAME), \ + .flags = FL, .readfn = RD, .writefn = WR \ + } + +#define CSR_OFF_ARRAY(NAME, N) \ + [LOONGARCH_CSR_##NAME(N)] = { \ + .offset = offsetof(CPULoongArchState, CSR_##NAME[N]), \ + .flags = 0, .readfn = NULL, .writefn = NULL \ + } + +#define CSR_OFF_FLAGS(NAME, FL) \ + CSR_OFF_FUNCS(NAME, FL, NULL, NULL) + +#define CSR_OFF(NAME) \ + CSR_OFF_FLAGS(NAME, 0) + +static const CSRInfo csr_info[] = { + CSR_OFF_FLAGS(CRMD, CSRFL_EXITTB), + CSR_OFF(PRMD), + CSR_OFF_FLAGS(EUEN, CSRFL_EXITTB), + CSR_OFF_FLAGS(MISC, CSRFL_READONLY), + CSR_OFF(ECFG), + CSR_OFF_FUNCS(ESTAT, CSRFL_EXITTB, NULL, gen_helper_csrwr_estat), + CSR_OFF(ERA), + CSR_OFF(BADV), + CSR_OFF_FLAGS(BADI, CSRFL_READONLY), + CSR_OFF(EENTRY), + CSR_OFF(TLBIDX), + CSR_OFF(TLBEHI), + CSR_OFF(TLBELO0), + CSR_OFF(TLBELO1), + CSR_OFF_FUNCS(ASID, CSRFL_EXITTB, NULL, gen_helper_csrwr_asid), + CSR_OFF(PGDL), + CSR_OFF(PGDH), + CSR_OFF_FUNCS(PGD, CSRFL_READONLY, gen_helper_csrrd_pgd, NULL), + CSR_OFF(PWCL), + CSR_OFF(PWCH), + CSR_OFF(STLBPS), + CSR_OFF(RVACFG), + [LOONGARCH_CSR_CPUID] = { + .offset = (int)offsetof(CPUState, cpu_index) + - (int)offsetof(LoongArchCPU, env), + .flags = CSRFL_READONLY, + .readfn = NULL, + .writefn = NULL + }, + CSR_OFF_FLAGS(PRCFG1, CSRFL_READONLY), + CSR_OFF_FLAGS(PRCFG2, CSRFL_READONLY), + CSR_OFF_FLAGS(PRCFG3, CSRFL_READONLY), + CSR_OFF_ARRAY(SAVE, 0), + CSR_OFF_ARRAY(SAVE, 1), + CSR_OFF_ARRAY(SAVE, 2), + CSR_OFF_ARRAY(SAVE, 3), + CSR_OFF_ARRAY(SAVE, 4), + CSR_OFF_ARRAY(SAVE, 5), + CSR_OFF_ARRAY(SAVE, 6), + CSR_OFF_ARRAY(SAVE, 7), + CSR_OFF_ARRAY(SAVE, 8), + CSR_OFF_ARRAY(SAVE, 9), + CSR_OFF_ARRAY(SAVE, 10), + CSR_OFF_ARRAY(SAVE, 11), + CSR_OFF_ARRAY(SAVE, 12), + CSR_OFF_ARRAY(SAVE, 13), + CSR_OFF_ARRAY(SAVE, 14), + CSR_OFF_ARRAY(SAVE, 15), + CSR_OFF(TID), + CSR_OFF_FUNCS(TCFG, CSRFL_IO, NULL, gen_helper_csrwr_tcfg), + CSR_OFF_FUNCS(TVAL, CSRFL_READONLY | CSRFL_IO, gen_helper_csrrd_tval, NULL), + CSR_OFF(CNTC), + CSR_OFF_FUNCS(TICLR, CSRFL_IO, NULL, gen_helper_csrwr_ticlr), + CSR_OFF(LLBCTL), + CSR_OFF(IMPCTL1), + CSR_OFF(IMPCTL2), + CSR_OFF(TLBRENTRY), + CSR_OFF(TLBRBADV), + CSR_OFF(TLBRERA), + CSR_OFF(TLBRSAVE), + CSR_OFF(TLBRELO0), + CSR_OFF(TLBRELO1), + CSR_OFF(TLBREHI), + CSR_OFF(TLBRPRMD), + CSR_OFF(MERRCTL), + CSR_OFF(MERRINFO1), + CSR_OFF(MERRINFO2), + CSR_OFF(MERRENTRY), + CSR_OFF(MERRERA), + CSR_OFF(MERRSAVE), + CSR_OFF(CTAG), + CSR_OFF_ARRAY(DMW, 0), + CSR_OFF_ARRAY(DMW, 1), + CSR_OFF_ARRAY(DMW, 2), + CSR_OFF_ARRAY(DMW, 3), + CSR_OFF(DBG), + CSR_OFF(DERA), + CSR_OFF(DSAVE), +}; + +static bool check_plv(DisasContext *ctx) +{ + if (ctx->plv == MMU_PLV_USER) { + generate_exception(ctx, EXCCODE_IPE); + return true; + } + return false; +} + +static const CSRInfo *get_csr(unsigned csr_num) +{ + const CSRInfo *csr; + + if (csr_num >= ARRAY_SIZE(csr_info)) { + return NULL; + } + csr = &csr_info[csr_num]; + if (csr->offset == 0) { + return NULL; + } + return csr; +} + +static bool check_csr_flags(DisasContext *ctx, const CSRInfo *csr, bool write) +{ + if ((csr->flags & CSRFL_READONLY) && write) { + return false; + } + if ((csr->flags & CSRFL_IO) && + (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT)) { + gen_io_start(); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; + } else if ((csr->flags & CSRFL_EXITTB) && write) { + ctx->base.is_jmp = DISAS_EXIT_UPDATE; + } + return true; +} + +static bool trans_csrrd(DisasContext *ctx, arg_csrrd *a) +{ + TCGv dest; + const CSRInfo *csr; + + if (check_plv(ctx)) { + return false; + } + csr = get_csr(a->csr); + if (csr == NULL) { + /* CSR is undefined: read as 0. */ + dest = tcg_constant_tl(0); + } else { + check_csr_flags(ctx, csr, false); + dest = gpr_dst(ctx, a->rd, EXT_NONE); + if (csr->readfn) { + csr->readfn(dest, cpu_env); + } else { + tcg_gen_ld_tl(dest, cpu_env, csr->offset); + } + } + gen_set_gpr(a->rd, dest, EXT_NONE); + return true; +} + +static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a) +{ + TCGv dest, src1; + const CSRInfo *csr; + + if (check_plv(ctx)) { + return false; + } + csr = get_csr(a->csr); + if (csr == NULL) { + /* CSR is undefined: write ignored, read old_value as 0. */ + gen_set_gpr(a->rd, tcg_constant_tl(0), EXT_NONE); + return true; + } + if (!check_csr_flags(ctx, csr, true)) { + /* CSR is readonly: trap. */ + return false; + } + src1 = gpr_src(ctx, a->rd, EXT_NONE); + if (csr->writefn) { + dest = gpr_dst(ctx, a->rd, EXT_NONE); + csr->writefn(dest, cpu_env, src1); + } else { + dest = temp_new(ctx); + tcg_gen_ld_tl(dest, cpu_env, csr->offset); + tcg_gen_st_tl(src1, cpu_env, csr->offset); + } + gen_set_gpr(a->rd, dest, EXT_NONE); + return true; +} + +static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) +{ + TCGv src1, mask, oldv, newv, temp; + const CSRInfo *csr; + + if (check_plv(ctx)) { + return false; + } + csr = get_csr(a->csr); + if (csr == NULL) { + /* CSR is undefined: write ignored, read old_value as 0. */ + gen_set_gpr(a->rd, tcg_constant_tl(0), EXT_NONE); + return true; + } + + if (!check_csr_flags(ctx, csr, true)) { + /* CSR is readonly: trap. */ + return false; + } + + /* So far only readonly csrs have readfn. */ + assert(csr->readfn == NULL); + + src1 = gpr_src(ctx, a->rd, EXT_NONE); + mask = gpr_src(ctx, a->rj, EXT_NONE); + oldv = tcg_temp_new(); + newv = tcg_temp_new(); + temp = tcg_temp_new(); + + tcg_gen_ld_tl(oldv, cpu_env, csr->offset); + tcg_gen_and_tl(newv, src1, mask); + tcg_gen_andc_tl(temp, oldv, mask); + tcg_gen_or_tl(newv, newv, temp); + + if (csr->writefn) { + csr->writefn(oldv, cpu_env, newv); + } else { + tcg_gen_st_tl(newv, cpu_env, csr->offset); + } + gen_set_gpr(a->rd, oldv, EXT_NONE); + + tcg_temp_free(temp); + tcg_temp_free(newv); + tcg_temp_free(oldv); + return true; +} + +static bool gen_iocsrrd(DisasContext *ctx, arg_rr *a, + void (*func)(TCGv, TCGv_ptr, TCGv)) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + if (check_plv(ctx)) { + return false; + } + func(dest, cpu_env, src1); + return true; +} + +static bool gen_iocsrwr(DisasContext *ctx, arg_rr *a, + void (*func)(TCGv_ptr, TCGv, TCGv)) +{ + TCGv val = gpr_src(ctx, a->rd, EXT_NONE); + TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); + + if (check_plv(ctx)) { + return false; + } + func(cpu_env, addr, val); + return true; +} + +TRANS(iocsrrd_b, gen_iocsrrd, gen_helper_iocsrrd_b) +TRANS(iocsrrd_h, gen_iocsrrd, gen_helper_iocsrrd_h) +TRANS(iocsrrd_w, gen_iocsrrd, gen_helper_iocsrrd_w) +TRANS(iocsrrd_d, gen_iocsrrd, gen_helper_iocsrrd_d) +TRANS(iocsrwr_b, gen_iocsrwr, gen_helper_iocsrwr_b) +TRANS(iocsrwr_h, gen_iocsrwr, gen_helper_iocsrwr_h) +TRANS(iocsrwr_w, gen_iocsrwr, gen_helper_iocsrwr_w) +TRANS(iocsrwr_d, gen_iocsrwr, gen_helper_iocsrwr_d) + +static void check_mmu_idx(DisasContext *ctx) +{ + if (ctx->mem_idx != MMU_IDX_DA) { + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + } +} + +static bool trans_tlbsrch(DisasContext *ctx, arg_tlbsrch *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbsrch(cpu_env); + return true; +} + +static bool trans_tlbrd(DisasContext *ctx, arg_tlbrd *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbrd(cpu_env); + return true; +} + +static bool trans_tlbwr(DisasContext *ctx, arg_tlbwr *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbwr(cpu_env); + check_mmu_idx(ctx); + return true; +} + +static bool trans_tlbfill(DisasContext *ctx, arg_tlbfill *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbfill(cpu_env); + check_mmu_idx(ctx); + return true; +} + +static bool trans_tlbclr(DisasContext *ctx, arg_tlbclr *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbclr(cpu_env); + check_mmu_idx(ctx); + return true; +} + +static bool trans_tlbflush(DisasContext *ctx, arg_tlbflush *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_tlbflush(cpu_env); + check_mmu_idx(ctx); + return true; +} + +static bool trans_invtlb(DisasContext *ctx, arg_invtlb *a) +{ + TCGv rj = gpr_src(ctx, a->rj, EXT_NONE); + TCGv rk = gpr_src(ctx, a->rk, EXT_NONE); + + if (check_plv(ctx)) { + return false; + } + + switch (a->imm) { + case 0: + case 1: + gen_helper_invtlb_all(cpu_env); + break; + case 2: + gen_helper_invtlb_all_g(cpu_env, tcg_constant_i32(1)); + break; + case 3: + gen_helper_invtlb_all_g(cpu_env, tcg_constant_i32(0)); + break; + case 4: + gen_helper_invtlb_all_asid(cpu_env, rj); + break; + case 5: + gen_helper_invtlb_page_asid(cpu_env, rj, rk); + break; + case 6: + gen_helper_invtlb_page_asid_or_g(cpu_env, rj, rk); + break; + default: + return false; + } + ctx->base.is_jmp = DISAS_STOP; + return true; +} + +static bool trans_cacop(DisasContext *ctx, arg_cacop *a) +{ + /* Treat the cacop as a nop */ + if (check_plv(ctx)) { + return false; + } + return true; +} + +static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a) +{ + TCGv_i32 mem_idx = tcg_constant_i32(ctx->mem_idx); + TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); + + if (check_plv(ctx)) { + return false; + } + gen_helper_ldpte(cpu_env, src1, tcg_constant_tl(a->imm), mem_idx); + return true; +} + +static bool trans_lddir(DisasContext *ctx, arg_lddir *a) +{ + TCGv_i32 mem_idx = tcg_constant_i32(ctx->mem_idx); + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + + if (check_plv(ctx)) { + return false; + } + gen_helper_lddir(dest, cpu_env, src, tcg_constant_tl(a->imm), mem_idx); + return true; +} + +static bool trans_ertn(DisasContext *ctx, arg_ertn *a) +{ + if (check_plv(ctx)) { + return false; + } + gen_helper_ertn(cpu_env); + ctx->base.is_jmp = DISAS_EXIT; + return true; +} + +static bool trans_dbcl(DisasContext *ctx, arg_dbcl *a) +{ + if (check_plv(ctx)) { + return false; + } + generate_exception(ctx, EXCCODE_DBP); + return true; +} + +static bool trans_idle(DisasContext *ctx, arg_idle *a) +{ + if (check_plv(ctx)) { + return false; + } + + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next + 4); + gen_helper_idle(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} +#endif diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/insn_trans/trans_shift.c.inc new file mode 100644 index 0000000000..5260af2337 --- /dev/null +++ b/target/loongarch/insn_trans/trans_shift.c.inc @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +static void gen_sll_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x1f); + tcg_gen_shl_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_srl_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x1f); + tcg_gen_shr_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_sra_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x1f); + tcg_gen_sar_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_sll_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x3f); + tcg_gen_shl_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_srl_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x3f); + tcg_gen_shr_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_sra_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x3f); + tcg_gen_sar_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static void gen_rotr_w(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, src2, 0x1f); + + tcg_gen_trunc_tl_i32(t1, src1); + tcg_gen_trunc_tl_i32(t2, t0); + + tcg_gen_rotr_i32(t1, t1, t2); + tcg_gen_ext_i32_tl(dest, t1); + + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); + tcg_temp_free(t0); +} + +static void gen_rotr_d(TCGv dest, TCGv src1, TCGv src2) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, src2, 0x3f); + tcg_gen_rotr_tl(dest, src1, t0); + tcg_temp_free(t0); +} + +static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a) +{ + TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src1 = gpr_src(ctx, a->rj, EXT_ZERO); + + tcg_gen_sextract_tl(dest, src1, a->imm, 32 - a->imm); + gen_set_gpr(a->rd, dest, EXT_NONE); + + return true; +} + +TRANS(sll_w, gen_rrr, EXT_ZERO, EXT_NONE, EXT_SIGN, gen_sll_w) +TRANS(srl_w, gen_rrr, EXT_ZERO, EXT_NONE, EXT_SIGN, gen_srl_w) +TRANS(sra_w, gen_rrr, EXT_SIGN, EXT_NONE, EXT_SIGN, gen_sra_w) +TRANS(sll_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_sll_d) +TRANS(srl_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_srl_d) +TRANS(sra_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_sra_d) +TRANS(rotr_w, gen_rrr, EXT_ZERO, EXT_NONE, EXT_SIGN, gen_rotr_w) +TRANS(rotr_d, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_rotr_d) +TRANS(slli_w, gen_rri_c, EXT_NONE, EXT_SIGN, tcg_gen_shli_tl) +TRANS(slli_d, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_shli_tl) +TRANS(srli_w, gen_rri_c, EXT_ZERO, EXT_SIGN, tcg_gen_shri_tl) +TRANS(srli_d, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_shri_tl) +TRANS(srai_d, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_sari_tl) +TRANS(rotri_w, gen_rri_v, EXT_NONE, EXT_NONE, gen_rotr_w) +TRANS(rotri_d, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_rotri_tl) diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode new file mode 100644 index 0000000000..3fdc6e148c --- /dev/null +++ b/target/loongarch/insns.decode @@ -0,0 +1,486 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# LoongArch instruction decode definitions. +# +# Copyright (c) 2021 Loongson Technology Corporation Limited +# + +# +# Fields +# +%i14s2 10:s14 !function=shl_2 +%sa2p1 15:2 !function=plus_1 +%offs21 0:s5 10:16 !function=shl_2 +%offs16 10:s16 !function=shl_2 +%offs26 0:s10 10:16 !function=shl_2 + +# +# Argument sets +# +&i imm +&r_i rd imm +&rr rd rj +&rr_jk rj rk +&rrr rd rj rk +&rr_i rd rj imm +&hint_r_i hint rj imm +&rrr_sa rd rj rk sa +&rr_ms_ls rd rj ms ls +&ff fd fj +&fff fd fj fk +&ffff fd fj fk fa +&cff_fcond cd fj fk fcond +&fffc fd fj fk ca +&fr fd rj +&rf rd fj +&fcsrd_r fcsrd rj +&r_fcsrs rd fcsrs +&cf cd fj +&fc fd cj +&cr cd rj +&rc rd cj +&frr fd rj rk +&fr_i fd rj imm +&r_offs rj offs +&c_offs cj offs +&offs offs +&rr_offs rj rd offs +&r_csr rd csr +&rr_csr rd rj csr +&empty +&i_rr imm rj rk +&cop_r_i cop rj imm +&j_i rj imm + +# +# Formats +# +@i15 .... ........ ..... imm:15 &i +@rr .... ........ ..... ..... rj:5 rd:5 &rr +@rr_jk .... ........ ..... rk:5 rj:5 ..... &rr_jk +@rrr .... ........ ..... rk:5 rj:5 rd:5 &rrr +@r_i20 .... ... imm:s20 rd:5 &r_i +@rr_ui5 .... ........ ..... imm:5 rj:5 rd:5 &rr_i +@rr_ui6 .... ........ .... imm:6 rj:5 rd:5 &rr_i +@rr_ui8 .. ........ .... imm:8 rj:5 rd:5 &rr_i +@rr_i12 .... ...... imm:s12 rj:5 rd:5 &rr_i +@rr_ui12 .... ...... imm:12 rj:5 rd:5 &rr_i +@rr_i14s2 .... .... .............. rj:5 rd:5 &rr_i imm=%i14s2 +@rr_i16 .... .. imm:s16 rj:5 rd:5 &rr_i +@hint_r_i12 .... ...... imm:s12 rj:5 hint:5 &hint_r_i +@rrr_sa2p1 .... ........ ... .. rk:5 rj:5 rd:5 &rrr_sa sa=%sa2p1 +@rrr_sa2 .... ........ ... sa:2 rk:5 rj:5 rd:5 &rrr_sa +@rrr_sa3 .... ........ .. sa:3 rk:5 rj:5 rd:5 &rrr_sa +@rr_2bw .... ....... ms:5 . ls:5 rj:5 rd:5 &rr_ms_ls +@rr_2bd .... ...... ms:6 ls:6 rj:5 rd:5 &rr_ms_ls +@ff .... ........ ..... ..... fj:5 fd:5 &ff +@fff .... ........ ..... fk:5 fj:5 fd:5 &fff +@ffff .... ........ fa:5 fk:5 fj:5 fd:5 &ffff +@cff_fcond .... ........ fcond:5 fk:5 fj:5 .. cd:3 &cff_fcond +@fffc .... ........ .. ca:3 fk:5 fj:5 fd:5 &fffc +@fr .... ........ ..... ..... rj:5 fd:5 &fr +@rf .... ........ ..... ..... fj:5 rd:5 &rf +@fcsrd_r .... ........ ..... ..... rj:5 fcsrd:5 &fcsrd_r +@r_fcsrs .... ........ ..... ..... fcsrs:5 rd:5 &r_fcsrs +@cf .... ........ ..... ..... fj:5 .. cd:3 &cf +@fc .... ........ ..... ..... .. cj:3 fd:5 &fc +@cr .... ........ ..... ..... rj:5 .. cd:3 &cr +@rc .... ........ ..... ..... .. cj:3 rd:5 &rc +@frr .... ........ ..... rk:5 rj:5 fd:5 &frr +@fr_i12 .... ...... imm:s12 rj:5 fd:5 &fr_i +@r_offs21 .... .. ................ rj:5 ..... &r_offs offs=%offs21 +@c_offs21 .... .. ................ .. cj:3 ..... &c_offs offs=%offs21 +@offs26 .... .. .......................... &offs offs=%offs26 +@rr_offs16 .... .. ................ rj:5 rd:5 &rr_offs offs=%offs16 +@r_csr .... .... csr:14 ..... rd:5 &r_csr +@rr_csr .... .... csr:14 rj:5 rd:5 &rr_csr +@empty .... ........ ..... ..... ..... ..... &empty +@i_rr ...... ...... ..... rk:5 rj:5 imm:5 &i_rr +@cop_r_i .... ...... imm:s12 rj:5 cop:5 &cop_r_i +@j_i .... ........ .. imm:8 rj:5 ..... &j_i + +# +# Fixed point arithmetic operation instruction +# +add_w 0000 00000001 00000 ..... ..... ..... @rrr +add_d 0000 00000001 00001 ..... ..... ..... @rrr +sub_w 0000 00000001 00010 ..... ..... ..... @rrr +sub_d 0000 00000001 00011 ..... ..... ..... @rrr +slt 0000 00000001 00100 ..... ..... ..... @rrr +sltu 0000 00000001 00101 ..... ..... ..... @rrr +slti 0000 001000 ............ ..... ..... @rr_i12 +sltui 0000 001001 ............ ..... ..... @rr_i12 +nor 0000 00000001 01000 ..... ..... ..... @rrr +and 0000 00000001 01001 ..... ..... ..... @rrr +or 0000 00000001 01010 ..... ..... ..... @rrr +xor 0000 00000001 01011 ..... ..... ..... @rrr +orn 0000 00000001 01100 ..... ..... ..... @rrr +andn 0000 00000001 01101 ..... ..... ..... @rrr +mul_w 0000 00000001 11000 ..... ..... ..... @rrr +mulh_w 0000 00000001 11001 ..... ..... ..... @rrr +mulh_wu 0000 00000001 11010 ..... ..... ..... @rrr +mul_d 0000 00000001 11011 ..... ..... ..... @rrr +mulh_d 0000 00000001 11100 ..... ..... ..... @rrr +mulh_du 0000 00000001 11101 ..... ..... ..... @rrr +mulw_d_w 0000 00000001 11110 ..... ..... ..... @rrr +mulw_d_wu 0000 00000001 11111 ..... ..... ..... @rrr +div_w 0000 00000010 00000 ..... ..... ..... @rrr +mod_w 0000 00000010 00001 ..... ..... ..... @rrr +div_wu 0000 00000010 00010 ..... ..... ..... @rrr +mod_wu 0000 00000010 00011 ..... ..... ..... @rrr +div_d 0000 00000010 00100 ..... ..... ..... @rrr +mod_d 0000 00000010 00101 ..... ..... ..... @rrr +div_du 0000 00000010 00110 ..... ..... ..... @rrr +mod_du 0000 00000010 00111 ..... ..... ..... @rrr +alsl_w 0000 00000000 010 .. ..... ..... ..... @rrr_sa2p1 +alsl_wu 0000 00000000 011 .. ..... ..... ..... @rrr_sa2p1 +alsl_d 0000 00000010 110 .. ..... ..... ..... @rrr_sa2p1 +lu12i_w 0001 010 .................... ..... @r_i20 +lu32i_d 0001 011 .................... ..... @r_i20 +lu52i_d 0000 001100 ............ ..... ..... @rr_i12 +pcaddi 0001 100 .................... ..... @r_i20 +pcalau12i 0001 101 .................... ..... @r_i20 +pcaddu12i 0001 110 .................... ..... @r_i20 +pcaddu18i 0001 111 .................... ..... @r_i20 +addi_w 0000 001010 ............ ..... ..... @rr_i12 +addi_d 0000 001011 ............ ..... ..... @rr_i12 +addu16i_d 0001 00 ................ ..... ..... @rr_i16 +andi 0000 001101 ............ ..... ..... @rr_ui12 +ori 0000 001110 ............ ..... ..... @rr_ui12 +xori 0000 001111 ............ ..... ..... @rr_ui12 + +# +# Fixed point shift operation instruction +# +sll_w 0000 00000001 01110 ..... ..... ..... @rrr +srl_w 0000 00000001 01111 ..... ..... ..... @rrr +sra_w 0000 00000001 10000 ..... ..... ..... @rrr +sll_d 0000 00000001 10001 ..... ..... ..... @rrr +srl_d 0000 00000001 10010 ..... ..... ..... @rrr +sra_d 0000 00000001 10011 ..... ..... ..... @rrr +rotr_w 0000 00000001 10110 ..... ..... ..... @rrr +rotr_d 0000 00000001 10111 ..... ..... ..... @rrr +slli_w 0000 00000100 00001 ..... ..... ..... @rr_ui5 +slli_d 0000 00000100 0001 ...... ..... ..... @rr_ui6 +srli_w 0000 00000100 01001 ..... ..... ..... @rr_ui5 +srli_d 0000 00000100 0101 ...... ..... ..... @rr_ui6 +srai_w 0000 00000100 10001 ..... ..... ..... @rr_ui5 +srai_d 0000 00000100 1001 ...... ..... ..... @rr_ui6 +rotri_w 0000 00000100 11001 ..... ..... ..... @rr_ui5 +rotri_d 0000 00000100 1101 ...... ..... ..... @rr_ui6 + +# +# Fixed point bit operation instruction +# +ext_w_h 0000 00000000 00000 10110 ..... ..... @rr +ext_w_b 0000 00000000 00000 10111 ..... ..... @rr +clo_w 0000 00000000 00000 00100 ..... ..... @rr +clz_w 0000 00000000 00000 00101 ..... ..... @rr +cto_w 0000 00000000 00000 00110 ..... ..... @rr +ctz_w 0000 00000000 00000 00111 ..... ..... @rr +clo_d 0000 00000000 00000 01000 ..... ..... @rr +clz_d 0000 00000000 00000 01001 ..... ..... @rr +cto_d 0000 00000000 00000 01010 ..... ..... @rr +ctz_d 0000 00000000 00000 01011 ..... ..... @rr +revb_2h 0000 00000000 00000 01100 ..... ..... @rr +revb_4h 0000 00000000 00000 01101 ..... ..... @rr +revb_2w 0000 00000000 00000 01110 ..... ..... @rr +revb_d 0000 00000000 00000 01111 ..... ..... @rr +revh_2w 0000 00000000 00000 10000 ..... ..... @rr +revh_d 0000 00000000 00000 10001 ..... ..... @rr +bitrev_4b 0000 00000000 00000 10010 ..... ..... @rr +bitrev_8b 0000 00000000 00000 10011 ..... ..... @rr +bitrev_w 0000 00000000 00000 10100 ..... ..... @rr +bitrev_d 0000 00000000 00000 10101 ..... ..... @rr +bytepick_w 0000 00000000 100 .. ..... ..... ..... @rrr_sa2 +bytepick_d 0000 00000000 11 ... ..... ..... ..... @rrr_sa3 +maskeqz 0000 00000001 00110 ..... ..... ..... @rrr +masknez 0000 00000001 00111 ..... ..... ..... @rrr +bstrins_w 0000 0000011 ..... 0 ..... ..... ..... @rr_2bw +bstrpick_w 0000 0000011 ..... 1 ..... ..... ..... @rr_2bw +bstrins_d 0000 000010 ...... ...... ..... ..... @rr_2bd +bstrpick_d 0000 000011 ...... ...... ..... ..... @rr_2bd + +# +# Fixed point load/store instruction +# +ld_b 0010 100000 ............ ..... ..... @rr_i12 +ld_h 0010 100001 ............ ..... ..... @rr_i12 +ld_w 0010 100010 ............ ..... ..... @rr_i12 +ld_d 0010 100011 ............ ..... ..... @rr_i12 +st_b 0010 100100 ............ ..... ..... @rr_i12 +st_h 0010 100101 ............ ..... ..... @rr_i12 +st_w 0010 100110 ............ ..... ..... @rr_i12 +st_d 0010 100111 ............ ..... ..... @rr_i12 +ld_bu 0010 101000 ............ ..... ..... @rr_i12 +ld_hu 0010 101001 ............ ..... ..... @rr_i12 +ld_wu 0010 101010 ............ ..... ..... @rr_i12 +ldx_b 0011 10000000 00000 ..... ..... ..... @rrr +ldx_h 0011 10000000 01000 ..... ..... ..... @rrr +ldx_w 0011 10000000 10000 ..... ..... ..... @rrr +ldx_d 0011 10000000 11000 ..... ..... ..... @rrr +stx_b 0011 10000001 00000 ..... ..... ..... @rrr +stx_h 0011 10000001 01000 ..... ..... ..... @rrr +stx_w 0011 10000001 10000 ..... ..... ..... @rrr +stx_d 0011 10000001 11000 ..... ..... ..... @rrr +ldx_bu 0011 10000010 00000 ..... ..... ..... @rrr +ldx_hu 0011 10000010 01000 ..... ..... ..... @rrr +ldx_wu 0011 10000010 10000 ..... ..... ..... @rrr +preld 0010 101011 ............ ..... ..... @hint_r_i12 +dbar 0011 10000111 00100 ............... @i15 +ibar 0011 10000111 00101 ............... @i15 +ldptr_w 0010 0100 .............. ..... ..... @rr_i14s2 +stptr_w 0010 0101 .............. ..... ..... @rr_i14s2 +ldptr_d 0010 0110 .............. ..... ..... @rr_i14s2 +stptr_d 0010 0111 .............. ..... ..... @rr_i14s2 +ldgt_b 0011 10000111 10000 ..... ..... ..... @rrr +ldgt_h 0011 10000111 10001 ..... ..... ..... @rrr +ldgt_w 0011 10000111 10010 ..... ..... ..... @rrr +ldgt_d 0011 10000111 10011 ..... ..... ..... @rrr +ldle_b 0011 10000111 10100 ..... ..... ..... @rrr +ldle_h 0011 10000111 10101 ..... ..... ..... @rrr +ldle_w 0011 10000111 10110 ..... ..... ..... @rrr +ldle_d 0011 10000111 10111 ..... ..... ..... @rrr +stgt_b 0011 10000111 11000 ..... ..... ..... @rrr +stgt_h 0011 10000111 11001 ..... ..... ..... @rrr +stgt_w 0011 10000111 11010 ..... ..... ..... @rrr +stgt_d 0011 10000111 11011 ..... ..... ..... @rrr +stle_b 0011 10000111 11100 ..... ..... ..... @rrr +stle_h 0011 10000111 11101 ..... ..... ..... @rrr +stle_w 0011 10000111 11110 ..... ..... ..... @rrr +stle_d 0011 10000111 11111 ..... ..... ..... @rrr + +# +# Fixed point atomic instruction +# +ll_w 0010 0000 .............. ..... ..... @rr_i14s2 +sc_w 0010 0001 .............. ..... ..... @rr_i14s2 +ll_d 0010 0010 .............. ..... ..... @rr_i14s2 +sc_d 0010 0011 .............. ..... ..... @rr_i14s2 +amswap_w 0011 10000110 00000 ..... ..... ..... @rrr +amswap_d 0011 10000110 00001 ..... ..... ..... @rrr +amadd_w 0011 10000110 00010 ..... ..... ..... @rrr +amadd_d 0011 10000110 00011 ..... ..... ..... @rrr +amand_w 0011 10000110 00100 ..... ..... ..... @rrr +amand_d 0011 10000110 00101 ..... ..... ..... @rrr +amor_w 0011 10000110 00110 ..... ..... ..... @rrr +amor_d 0011 10000110 00111 ..... ..... ..... @rrr +amxor_w 0011 10000110 01000 ..... ..... ..... @rrr +amxor_d 0011 10000110 01001 ..... ..... ..... @rrr +ammax_w 0011 10000110 01010 ..... ..... ..... @rrr +ammax_d 0011 10000110 01011 ..... ..... ..... @rrr +ammin_w 0011 10000110 01100 ..... ..... ..... @rrr +ammin_d 0011 10000110 01101 ..... ..... ..... @rrr +ammax_wu 0011 10000110 01110 ..... ..... ..... @rrr +ammax_du 0011 10000110 01111 ..... ..... ..... @rrr +ammin_wu 0011 10000110 10000 ..... ..... ..... @rrr +ammin_du 0011 10000110 10001 ..... ..... ..... @rrr +amswap_db_w 0011 10000110 10010 ..... ..... ..... @rrr +amswap_db_d 0011 10000110 10011 ..... ..... ..... @rrr +amadd_db_w 0011 10000110 10100 ..... ..... ..... @rrr +amadd_db_d 0011 10000110 10101 ..... ..... ..... @rrr +amand_db_w 0011 10000110 10110 ..... ..... ..... @rrr +amand_db_d 0011 10000110 10111 ..... ..... ..... @rrr +amor_db_w 0011 10000110 11000 ..... ..... ..... @rrr +amor_db_d 0011 10000110 11001 ..... ..... ..... @rrr +amxor_db_w 0011 10000110 11010 ..... ..... ..... @rrr +amxor_db_d 0011 10000110 11011 ..... ..... ..... @rrr +ammax_db_w 0011 10000110 11100 ..... ..... ..... @rrr +ammax_db_d 0011 10000110 11101 ..... ..... ..... @rrr +ammin_db_w 0011 10000110 11110 ..... ..... ..... @rrr +ammin_db_d 0011 10000110 11111 ..... ..... ..... @rrr +ammax_db_wu 0011 10000111 00000 ..... ..... ..... @rrr +ammax_db_du 0011 10000111 00001 ..... ..... ..... @rrr +ammin_db_wu 0011 10000111 00010 ..... ..... ..... @rrr +ammin_db_du 0011 10000111 00011 ..... ..... ..... @rrr + +# +# Fixed point extra instruction +# +crc_w_b_w 0000 00000010 01000 ..... ..... ..... @rrr +crc_w_h_w 0000 00000010 01001 ..... ..... ..... @rrr +crc_w_w_w 0000 00000010 01010 ..... ..... ..... @rrr +crc_w_d_w 0000 00000010 01011 ..... ..... ..... @rrr +crcc_w_b_w 0000 00000010 01100 ..... ..... ..... @rrr +crcc_w_h_w 0000 00000010 01101 ..... ..... ..... @rrr +crcc_w_w_w 0000 00000010 01110 ..... ..... ..... @rrr +crcc_w_d_w 0000 00000010 01111 ..... ..... ..... @rrr +break 0000 00000010 10100 ............... @i15 +syscall 0000 00000010 10110 ............... @i15 +asrtle_d 0000 00000000 00010 ..... ..... 00000 @rr_jk +asrtgt_d 0000 00000000 00011 ..... ..... 00000 @rr_jk +rdtimel_w 0000 00000000 00000 11000 ..... ..... @rr +rdtimeh_w 0000 00000000 00000 11001 ..... ..... @rr +rdtime_d 0000 00000000 00000 11010 ..... ..... @rr +cpucfg 0000 00000000 00000 11011 ..... ..... @rr + +# +# Floating point arithmetic operation instruction +# +fadd_s 0000 00010000 00001 ..... ..... ..... @fff +fadd_d 0000 00010000 00010 ..... ..... ..... @fff +fsub_s 0000 00010000 00101 ..... ..... ..... @fff +fsub_d 0000 00010000 00110 ..... ..... ..... @fff +fmul_s 0000 00010000 01001 ..... ..... ..... @fff +fmul_d 0000 00010000 01010 ..... ..... ..... @fff +fdiv_s 0000 00010000 01101 ..... ..... ..... @fff +fdiv_d 0000 00010000 01110 ..... ..... ..... @fff +fmadd_s 0000 10000001 ..... ..... ..... ..... @ffff +fmadd_d 0000 10000010 ..... ..... ..... ..... @ffff +fmsub_s 0000 10000101 ..... ..... ..... ..... @ffff +fmsub_d 0000 10000110 ..... ..... ..... ..... @ffff +fnmadd_s 0000 10001001 ..... ..... ..... ..... @ffff +fnmadd_d 0000 10001010 ..... ..... ..... ..... @ffff +fnmsub_s 0000 10001101 ..... ..... ..... ..... @ffff +fnmsub_d 0000 10001110 ..... ..... ..... ..... @ffff +fmax_s 0000 00010000 10001 ..... ..... ..... @fff +fmax_d 0000 00010000 10010 ..... ..... ..... @fff +fmin_s 0000 00010000 10101 ..... ..... ..... @fff +fmin_d 0000 00010000 10110 ..... ..... ..... @fff +fmaxa_s 0000 00010000 11001 ..... ..... ..... @fff +fmaxa_d 0000 00010000 11010 ..... ..... ..... @fff +fmina_s 0000 00010000 11101 ..... ..... ..... @fff +fmina_d 0000 00010000 11110 ..... ..... ..... @fff +fabs_s 0000 00010001 01000 00001 ..... ..... @ff +fabs_d 0000 00010001 01000 00010 ..... ..... @ff +fneg_s 0000 00010001 01000 00101 ..... ..... @ff +fneg_d 0000 00010001 01000 00110 ..... ..... @ff +fsqrt_s 0000 00010001 01000 10001 ..... ..... @ff +fsqrt_d 0000 00010001 01000 10010 ..... ..... @ff +frecip_s 0000 00010001 01000 10101 ..... ..... @ff +frecip_d 0000 00010001 01000 10110 ..... ..... @ff +frsqrt_s 0000 00010001 01000 11001 ..... ..... @ff +frsqrt_d 0000 00010001 01000 11010 ..... ..... @ff +fscaleb_s 0000 00010001 00001 ..... ..... ..... @fff +fscaleb_d 0000 00010001 00010 ..... ..... ..... @fff +flogb_s 0000 00010001 01000 01001 ..... ..... @ff +flogb_d 0000 00010001 01000 01010 ..... ..... @ff +fcopysign_s 0000 00010001 00101 ..... ..... ..... @fff +fcopysign_d 0000 00010001 00110 ..... ..... ..... @fff +fclass_s 0000 00010001 01000 01101 ..... ..... @ff +fclass_d 0000 00010001 01000 01110 ..... ..... @ff + +# +# Floating point compare instruction +# +fcmp_cond_s 0000 11000001 ..... ..... ..... 00 ... @cff_fcond +fcmp_cond_d 0000 11000010 ..... ..... ..... 00 ... @cff_fcond + +# +# Floating point conversion instruction +# +fcvt_s_d 0000 00010001 10010 00110 ..... ..... @ff +fcvt_d_s 0000 00010001 10010 01001 ..... ..... @ff +ftintrm_w_s 0000 00010001 10100 00001 ..... ..... @ff +ftintrm_w_d 0000 00010001 10100 00010 ..... ..... @ff +ftintrm_l_s 0000 00010001 10100 01001 ..... ..... @ff +ftintrm_l_d 0000 00010001 10100 01010 ..... ..... @ff +ftintrp_w_s 0000 00010001 10100 10001 ..... ..... @ff +ftintrp_w_d 0000 00010001 10100 10010 ..... ..... @ff +ftintrp_l_s 0000 00010001 10100 11001 ..... ..... @ff +ftintrp_l_d 0000 00010001 10100 11010 ..... ..... @ff +ftintrz_w_s 0000 00010001 10101 00001 ..... ..... @ff +ftintrz_w_d 0000 00010001 10101 00010 ..... ..... @ff +ftintrz_l_s 0000 00010001 10101 01001 ..... ..... @ff +ftintrz_l_d 0000 00010001 10101 01010 ..... ..... @ff +ftintrne_w_s 0000 00010001 10101 10001 ..... ..... @ff +ftintrne_w_d 0000 00010001 10101 10010 ..... ..... @ff +ftintrne_l_s 0000 00010001 10101 11001 ..... ..... @ff +ftintrne_l_d 0000 00010001 10101 11010 ..... ..... @ff +ftint_w_s 0000 00010001 10110 00001 ..... ..... @ff +ftint_w_d 0000 00010001 10110 00010 ..... ..... @ff +ftint_l_s 0000 00010001 10110 01001 ..... ..... @ff +ftint_l_d 0000 00010001 10110 01010 ..... ..... @ff +ffint_s_w 0000 00010001 11010 00100 ..... ..... @ff +ffint_s_l 0000 00010001 11010 00110 ..... ..... @ff +ffint_d_w 0000 00010001 11010 01000 ..... ..... @ff +ffint_d_l 0000 00010001 11010 01010 ..... ..... @ff +frint_s 0000 00010001 11100 10001 ..... ..... @ff +frint_d 0000 00010001 11100 10010 ..... ..... @ff + +# +# Floating point move instruction +# +fmov_s 0000 00010001 01001 00101 ..... ..... @ff +fmov_d 0000 00010001 01001 00110 ..... ..... @ff +fsel 0000 11010000 00 ... ..... ..... ..... @fffc +movgr2fr_w 0000 00010001 01001 01001 ..... ..... @fr +movgr2fr_d 0000 00010001 01001 01010 ..... ..... @fr +movgr2frh_w 0000 00010001 01001 01011 ..... ..... @fr +movfr2gr_s 0000 00010001 01001 01101 ..... ..... @rf +movfr2gr_d 0000 00010001 01001 01110 ..... ..... @rf +movfrh2gr_s 0000 00010001 01001 01111 ..... ..... @rf +movgr2fcsr 0000 00010001 01001 10000 ..... ..... @fcsrd_r +movfcsr2gr 0000 00010001 01001 10010 ..... ..... @r_fcsrs +movfr2cf 0000 00010001 01001 10100 ..... 00 ... @cf +movcf2fr 0000 00010001 01001 10101 00 ... ..... @fc +movgr2cf 0000 00010001 01001 10110 ..... 00 ... @cr +movcf2gr 0000 00010001 01001 10111 00 ... ..... @rc + +# +# Floating point load/store instruction +# +fld_s 0010 101100 ............ ..... ..... @fr_i12 +fst_s 0010 101101 ............ ..... ..... @fr_i12 +fld_d 0010 101110 ............ ..... ..... @fr_i12 +fst_d 0010 101111 ............ ..... ..... @fr_i12 +fldx_s 0011 10000011 00000 ..... ..... ..... @frr +fldx_d 0011 10000011 01000 ..... ..... ..... @frr +fstx_s 0011 10000011 10000 ..... ..... ..... @frr +fstx_d 0011 10000011 11000 ..... ..... ..... @frr +fldgt_s 0011 10000111 01000 ..... ..... ..... @frr +fldgt_d 0011 10000111 01001 ..... ..... ..... @frr +fldle_s 0011 10000111 01010 ..... ..... ..... @frr +fldle_d 0011 10000111 01011 ..... ..... ..... @frr +fstgt_s 0011 10000111 01100 ..... ..... ..... @frr +fstgt_d 0011 10000111 01101 ..... ..... ..... @frr +fstle_s 0011 10000111 01110 ..... ..... ..... @frr +fstle_d 0011 10000111 01111 ..... ..... ..... @frr + +# +# Branch instructions +# +beqz 0100 00 ................ ..... ..... @r_offs21 +bnez 0100 01 ................ ..... ..... @r_offs21 +bceqz 0100 10 ................ 00 ... ..... @c_offs21 +bcnez 0100 10 ................ 01 ... ..... @c_offs21 +jirl 0100 11 ................ ..... ..... @rr_offs16 +b 0101 00 .......................... @offs26 +bl 0101 01 .......................... @offs26 +beq 0101 10 ................ ..... ..... @rr_offs16 +bne 0101 11 ................ ..... ..... @rr_offs16 +blt 0110 00 ................ ..... ..... @rr_offs16 +bge 0110 01 ................ ..... ..... @rr_offs16 +bltu 0110 10 ................ ..... ..... @rr_offs16 +bgeu 0110 11 ................ ..... ..... @rr_offs16 + +# +# Core instructions +# +{ + csrrd 0000 0100 .............. 00000 ..... @r_csr + csrwr 0000 0100 .............. 00001 ..... @r_csr + csrxchg 0000 0100 .............. ..... ..... @rr_csr +} + +iocsrrd_b 0000 01100100 10000 00000 ..... ..... @rr +iocsrrd_h 0000 01100100 10000 00001 ..... ..... @rr +iocsrrd_w 0000 01100100 10000 00010 ..... ..... @rr +iocsrrd_d 0000 01100100 10000 00011 ..... ..... @rr +iocsrwr_b 0000 01100100 10000 00100 ..... ..... @rr +iocsrwr_h 0000 01100100 10000 00101 ..... ..... @rr +iocsrwr_w 0000 01100100 10000 00110 ..... ..... @rr +iocsrwr_d 0000 01100100 10000 00111 ..... ..... @rr +tlbsrch 0000 01100100 10000 01010 00000 00000 @empty +tlbrd 0000 01100100 10000 01011 00000 00000 @empty +tlbwr 0000 01100100 10000 01100 00000 00000 @empty +tlbfill 0000 01100100 10000 01101 00000 00000 @empty +tlbclr 0000 01100100 10000 01000 00000 00000 @empty +tlbflush 0000 01100100 10000 01001 00000 00000 @empty +invtlb 0000 01100100 10011 ..... ..... ..... @i_rr +cacop 0000 011000 ............ ..... ..... @cop_r_i +lddir 0000 01100100 00 ........ ..... ..... @rr_ui8 +ldpte 0000 01100100 01 ........ ..... 00000 @j_i +ertn 0000 01100100 10000 01110 00000 00000 @empty +idle 0000 01100100 10001 ............... @i15 +dbcl 0000 00000010 10101 ............... @i15 diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h new file mode 100644 index 0000000000..f01635aed6 --- /dev/null +++ b/target/loongarch/internals.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CPU -- internal functions and types + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_INTERNALS_H +#define LOONGARCH_INTERNALS_H + +#define FCMP_LT 0b0001 /* fp0 < fp1 */ +#define FCMP_EQ 0b0010 /* fp0 = fp1 */ +#define FCMP_UN 0b0100 /* unordered */ +#define FCMP_GT 0b1000 /* fp0 > fp1 */ + +#define TARGET_PHYS_MASK MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS) +#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS) + +/* Global bit used for lddir/ldpte */ +#define LOONGARCH_PAGE_HUGE_SHIFT 6 +/* Global bit for huge page */ +#define LOONGARCH_HGLOBAL_SHIFT 12 + +void loongarch_translate_init(void); + +void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags); + +void G_NORETURN do_raise_exception(CPULoongArchState *env, + uint32_t exception, + uintptr_t pc); + +const char *loongarch_exception_name(int32_t exception); + +void restore_fp_status(CPULoongArchState *env); + +#ifndef CONFIG_USER_ONLY +extern const VMStateDescription vmstate_loongarch_cpu; + +void loongarch_cpu_set_irq(void *opaque, int irq, int level); + +void loongarch_constant_timer_cb(void *opaque); +uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); +uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); +void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, + uint64_t value); + +bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#endif /* !CONFIG_USER_ONLY */ + +uint64_t read_fcc(CPULoongArchState *env); +void write_fcc(CPULoongArchState *env, uint64_t val); + +int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n); +int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n); +void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs); + +#endif diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/iocsr_helper.c new file mode 100644 index 0000000000..505853e17b --- /dev/null +++ b/target/loongarch/iocsr_helper.c @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * + * Helpers for IOCSR reads/writes + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "tcg/tcg-ldst.h" + +#define GET_MEMTXATTRS(cas) \ + ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index}) + +uint64_t helper_iocsrrd_b(CPULoongArchState *env, target_ulong r_addr) +{ + return address_space_ldub(&env->address_space_iocsr, r_addr, + GET_MEMTXATTRS(env), NULL); +} + +uint64_t helper_iocsrrd_h(CPULoongArchState *env, target_ulong r_addr) +{ + return address_space_lduw(&env->address_space_iocsr, r_addr, + GET_MEMTXATTRS(env), NULL); +} + +uint64_t helper_iocsrrd_w(CPULoongArchState *env, target_ulong r_addr) +{ + return address_space_ldl(&env->address_space_iocsr, r_addr, + GET_MEMTXATTRS(env), NULL); +} + +uint64_t helper_iocsrrd_d(CPULoongArchState *env, target_ulong r_addr) +{ + return address_space_ldq(&env->address_space_iocsr, r_addr, + GET_MEMTXATTRS(env), NULL); +} + +void helper_iocsrwr_b(CPULoongArchState *env, target_ulong w_addr, + target_ulong val) +{ + address_space_stb(&env->address_space_iocsr, w_addr, + val, GET_MEMTXATTRS(env), NULL); +} + +void helper_iocsrwr_h(CPULoongArchState *env, target_ulong w_addr, + target_ulong val) +{ + address_space_stw(&env->address_space_iocsr, w_addr, + val, GET_MEMTXATTRS(env), NULL); +} + +void helper_iocsrwr_w(CPULoongArchState *env, target_ulong w_addr, + target_ulong val) +{ + address_space_stl(&env->address_space_iocsr, w_addr, + val, GET_MEMTXATTRS(env), NULL); +} + +void helper_iocsrwr_d(CPULoongArchState *env, target_ulong w_addr, + target_ulong val) +{ + address_space_stq(&env->address_space_iocsr, w_addr, + val, GET_MEMTXATTRS(env), NULL); +} diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c new file mode 100644 index 0000000000..b1e523ea72 --- /dev/null +++ b/target/loongarch/machine.c @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch Machine State + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "migration/cpu.h" +#include "internals.h" + +/* TLB state */ +const VMStateDescription vmstate_tlb = { + .name = "cpu/tlb", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(tlb_misc, LoongArchTLB), + VMSTATE_UINT64(tlb_entry0, LoongArchTLB), + VMSTATE_UINT64(tlb_entry1, LoongArchTLB), + VMSTATE_END_OF_LIST() + } +}; + +/* LoongArch CPU state */ + +const VMStateDescription vmstate_loongarch_cpu = { + .name = "cpu", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + + VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), + VMSTATE_UINTTL(env.pc, LoongArchCPU), + VMSTATE_UINT64_ARRAY(env.fpr, LoongArchCPU, 32), + VMSTATE_UINT32(env.fcsr0, LoongArchCPU), + VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8), + + /* Remaining CSRs */ + VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PRMD, LoongArchCPU), + VMSTATE_UINT64(env.CSR_EUEN, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MISC, LoongArchCPU), + VMSTATE_UINT64(env.CSR_ECFG, LoongArchCPU), + VMSTATE_UINT64(env.CSR_ESTAT, LoongArchCPU), + VMSTATE_UINT64(env.CSR_ERA, LoongArchCPU), + VMSTATE_UINT64(env.CSR_BADV, LoongArchCPU), + VMSTATE_UINT64(env.CSR_BADI, LoongArchCPU), + VMSTATE_UINT64(env.CSR_EENTRY, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBIDX, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBEHI, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBELO0, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBELO1, LoongArchCPU), + VMSTATE_UINT64(env.CSR_ASID, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PGDL, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PGDH, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PGD, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PWCL, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PWCH, LoongArchCPU), + VMSTATE_UINT64(env.CSR_STLBPS, LoongArchCPU), + VMSTATE_UINT64(env.CSR_RVACFG, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PRCFG1, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PRCFG2, LoongArchCPU), + VMSTATE_UINT64(env.CSR_PRCFG3, LoongArchCPU), + VMSTATE_UINT64_ARRAY(env.CSR_SAVE, LoongArchCPU, 16), + VMSTATE_UINT64(env.CSR_TID, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TCFG, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TVAL, LoongArchCPU), + VMSTATE_UINT64(env.CSR_CNTC, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TICLR, LoongArchCPU), + VMSTATE_UINT64(env.CSR_LLBCTL, LoongArchCPU), + VMSTATE_UINT64(env.CSR_IMPCTL1, LoongArchCPU), + VMSTATE_UINT64(env.CSR_IMPCTL2, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRENTRY, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRBADV, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRERA, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRSAVE, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRELO0, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRELO1, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBREHI, LoongArchCPU), + VMSTATE_UINT64(env.CSR_TLBRPRMD, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRCTL, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRINFO1, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRINFO2, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRENTRY, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRERA, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MERRSAVE, LoongArchCPU), + VMSTATE_UINT64(env.CSR_CTAG, LoongArchCPU), + VMSTATE_UINT64_ARRAY(env.CSR_DMW, LoongArchCPU, 4), + + /* Debug CSRs */ + VMSTATE_UINT64(env.CSR_DBG, LoongArchCPU), + VMSTATE_UINT64(env.CSR_DERA, LoongArchCPU), + VMSTATE_UINT64(env.CSR_DSAVE, LoongArchCPU), + /* TLB */ + VMSTATE_STRUCT_ARRAY(env.tlb, LoongArchCPU, LOONGARCH_TLB_MAX, + 0, vmstate_tlb, LoongArchTLB), + + VMSTATE_END_OF_LIST() + }, +}; diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build new file mode 100644 index 0000000000..6376f9e84b --- /dev/null +++ b/target/loongarch/meson.build @@ -0,0 +1,30 @@ +gen = decodetree.process('insns.decode') + +loongarch_ss = ss.source_set() +loongarch_ss.add(files( + 'cpu.c', + 'disas.c', +)) +loongarch_tcg_ss = ss.source_set() +loongarch_tcg_ss.add(gen) +loongarch_tcg_ss.add(files( + 'fpu_helper.c', + 'op_helper.c', + 'translate.c', + 'gdbstub.c', +)) +loongarch_tcg_ss.add(zlib) + +loongarch_softmmu_ss = ss.source_set() +loongarch_softmmu_ss.add(files( + 'machine.c', + 'tlb_helper.c', + 'constant_timer.c', + 'csr_helper.c', + 'iocsr_helper.c', +)) + +loongarch_ss.add_all(when: 'CONFIG_TCG', if_true: [loongarch_tcg_ss]) + +target_arch += {'loongarch': loongarch_ss} +target_softmmu_arch += {'loongarch': loongarch_softmmu_ss} diff --git a/target/loongarch/op_helper.c b/target/loongarch/op_helper.c new file mode 100644 index 0000000000..568c071601 --- /dev/null +++ b/target/loongarch/op_helper.c @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch emulation helpers for QEMU. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "internals.h" +#include "qemu/crc32c.h" +#include +#include "cpu-csr.h" + +/* Exceptions helpers */ +void helper_raise_exception(CPULoongArchState *env, uint32_t exception) +{ + do_raise_exception(env, exception, GETPC()); +} + +target_ulong helper_bitrev_w(target_ulong rj) +{ + return (int32_t)revbit32(rj); +} + +target_ulong helper_bitrev_d(target_ulong rj) +{ + return revbit64(rj); +} + +target_ulong helper_bitswap(target_ulong v) +{ + v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | + ((v & (target_ulong)0x5555555555555555ULL) << 1); + v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | + ((v & (target_ulong)0x3333333333333333ULL) << 2); + v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | + ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); + return v; +} + +/* loongarch assert op */ +void helper_asrtle_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) +{ + if (rj > rk) { + do_raise_exception(env, EXCCODE_BCE, 0); + } +} + +void helper_asrtgt_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) +{ + if (rj <= rk) { + do_raise_exception(env, EXCCODE_BCE, 0); + } +} + +target_ulong helper_crc32(target_ulong val, target_ulong m, uint64_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1); + + m &= mask; + stq_le_p(buf, m); + return (int32_t) (crc32(val ^ 0xffffffff, buf, sz) ^ 0xffffffff); +} + +target_ulong helper_crc32c(target_ulong val, target_ulong m, uint64_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1); + m &= mask; + stq_le_p(buf, m); + return (int32_t) (crc32c(val, buf, sz) ^ 0xffffffff); +} + +target_ulong helper_cpucfg(CPULoongArchState *env, target_ulong rj) +{ + return rj >= ARRAY_SIZE(env->cpucfg) ? 0 : env->cpucfg[rj]; +} + +uint64_t helper_rdtime_d(CPULoongArchState *env) +{ +#ifdef CONFIG_USER_ONLY + return cpu_get_host_ticks(); +#else + uint64_t plv; + LoongArchCPU *cpu = env_archcpu(env); + + plv = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); + if (extract64(env->CSR_MISC, R_CSR_MISC_DRDTL_SHIFT + plv, 1)) { + do_raise_exception(env, EXCCODE_IPE, GETPC()); + } + + return cpu_loongarch_get_constant_timer_counter(cpu); +#endif +} + +#ifndef CONFIG_USER_ONLY +void helper_ertn(CPULoongArchState *env) +{ + uint64_t csr_pplv, csr_pie; + if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { + csr_pplv = FIELD_EX64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV); + csr_pie = FIELD_EX64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE); + + env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 1); + env->pc = env->CSR_TLBRERA; + qemu_log_mask(CPU_LOG_INT, "%s: TLBRERA " TARGET_FMT_lx "\n", + __func__, env->CSR_TLBRERA); + } else { + csr_pplv = FIELD_EX64(env->CSR_PRMD, CSR_PRMD, PPLV); + csr_pie = FIELD_EX64(env->CSR_PRMD, CSR_PRMD, PIE); + + env->pc = env->CSR_ERA; + qemu_log_mask(CPU_LOG_INT, "%s: ERA " TARGET_FMT_lx "\n", + __func__, env->CSR_ERA); + } + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, csr_pplv); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, csr_pie); + + env->lladdr = 1; +} + +void helper_idle(CPULoongArchState *env) +{ + CPUState *cs = env_cpu(env); + + cs->halted = 1; + do_raise_exception(env, EXCP_HLT, 0); +} +#endif diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tlb_helper.c new file mode 100644 index 0000000000..c6d1de50fe --- /dev/null +++ b/target/loongarch/tlb_helper.c @@ -0,0 +1,764 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch TLB helpers + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + * + */ + +#include "qemu/osdep.h" +#include "qemu/guest-random.h" + +#include "cpu.h" +#include "internals.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/log.h" +#include "cpu-csr.h" + +enum { + TLBRET_MATCH = 0, + TLBRET_BADADDR = 1, + TLBRET_NOMATCH = 2, + TLBRET_INVALID = 3, + TLBRET_DIRTY = 4, + TLBRET_RI = 5, + TLBRET_XI = 6, + TLBRET_PE = 7, +}; + +static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + int access_type, int index, int mmu_idx) +{ + LoongArchTLB *tlb = &env->tlb[index]; + uint64_t plv = mmu_idx; + uint64_t tlb_entry, tlb_ppn; + uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + n = (address >> tlb_ps) & 0x1;/* Odd or even */ + + tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; + tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); + tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); + tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY, PPN); + tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY, NX); + tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY, NR); + tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY, RPLV); + + /* Check access rights */ + if (!tlb_v) { + return TLBRET_INVALID; + } + + if (access_type == MMU_INST_FETCH && tlb_nx) { + return TLBRET_XI; + } + + if (access_type == MMU_DATA_LOAD && tlb_nr) { + return TLBRET_RI; + } + + if (((tlb_rplv == 0) && (plv > tlb_plv)) || + ((tlb_rplv == 1) && (plv != tlb_plv))) { + return TLBRET_PE; + } + + if ((access_type == MMU_DATA_STORE) && !tlb_d) { + return TLBRET_DIRTY; + } + + /* + * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15] + * need adjust. + */ + *physical = (tlb_ppn << R_TLBENTRY_PPN_SHIFT) | + (address & MAKE_64BIT_MASK(0, tlb_ps)); + *prot = PAGE_READ; + if (tlb_d) { + *prot |= PAGE_WRITE; + } + if (!tlb_nx) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; +} + +/* + * One tlb entry holds an adjacent odd/even pair, the vpn is the + * content of the virtual page number divided by 2. So the + * compare vpn is bit[47:15] for 16KiB page. while the vppn + * field in tlb entry contains bit[47:13], so need adjust. + * virt_vpn = vaddr[47:13] + */ +static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index) +{ + LoongArchTLB *tlb; + uint16_t csr_asid, tlb_asid, stlb_idx; + uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; + int i, compare_shift; + uint64_t vpn, tlb_vppn; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); + stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ + compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + /* Search STLB */ + for (i = 0; i < 8; ++i) { + tlb = &env->tlb[i * 256 + stlb_idx]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i * 256 + stlb_idx; + return true; + } + } + } + + /* Search MTLB */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { + tlb = &env->tlb[i]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i; + return true; + } + } + } + return false; +} + +static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int index, match; + + match = loongarch_tlb_search(env, address, &index); + if (match) { + return loongarch_map_tlb_entry(env, physical, prot, + address, access_type, index, mmu_idx); + } + + return TLBRET_NOMATCH; +} + +static int get_physical_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int user_mode = mmu_idx == MMU_IDX_USER; + int kernel_mode = mmu_idx == MMU_IDX_KERNEL; + uint32_t plv, base_c, base_v; + int64_t addr_high; + uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); + uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); + + /* Check PG and DA */ + if (da & !pg) { + *physical = address & TARGET_PHYS_MASK; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + + plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); + base_v = address >> TARGET_VIRT_ADDR_SPACE_BITS; + /* Check direct map window */ + for (int i = 0; i < 4; i++) { + base_c = env->CSR_DMW[i] >> TARGET_VIRT_ADDR_SPACE_BITS; + if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { + *physical = dmw_va2pa(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + } + + /* Check valid extension */ + addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); + if (!(addr_high == 0 || addr_high == -1)) { + return TLBRET_BADADDR; + } + + /* Mapped address */ + return loongarch_map_address(env, physical, prot, address, + access_type, mmu_idx); +} + +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, + cpu_mmu_index(env, false)) != 0) { + return -1; + } + return phys_addr; +} + +static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, + MMUAccessType access_type, int tlb_error) +{ + CPUState *cs = env_cpu(env); + + switch (tlb_error) { + default: + case TLBRET_BADADDR: + cs->exception_index = access_type == MMU_INST_FETCH + ? EXCCODE_ADEF : EXCCODE_ADEM; + break; + case TLBRET_NOMATCH: + /* No TLB match for a mapped address */ + if (access_type == MMU_DATA_LOAD) { + cs->exception_index = EXCCODE_PIL; + } else if (access_type == MMU_DATA_STORE) { + cs->exception_index = EXCCODE_PIS; + } else if (access_type == MMU_INST_FETCH) { + cs->exception_index = EXCCODE_PIF; + } + env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); + break; + case TLBRET_INVALID: + /* TLB match with no valid bit */ + if (access_type == MMU_DATA_LOAD) { + cs->exception_index = EXCCODE_PIL; + } else if (access_type == MMU_DATA_STORE) { + cs->exception_index = EXCCODE_PIS; + } else if (access_type == MMU_INST_FETCH) { + cs->exception_index = EXCCODE_PIF; + } + break; + case TLBRET_DIRTY: + /* TLB match but 'D' bit is cleared */ + cs->exception_index = EXCCODE_PME; + break; + case TLBRET_XI: + /* Execute-Inhibit Exception */ + cs->exception_index = EXCCODE_PNX; + break; + case TLBRET_RI: + /* Read-Inhibit Exception */ + cs->exception_index = EXCCODE_PNR; + break; + case TLBRET_PE: + /* Privileged Exception */ + cs->exception_index = EXCCODE_PPI; + break; + } + + if (tlb_error == TLBRET_NOMATCH) { + env->CSR_TLBRBADV = address; + env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN, + extract64(address, 13, 35)); + } else { + if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { + env->CSR_BADV = address; + } + env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); + } +} + +static void invalidate_tlb_entry(CPULoongArchState *env, int index) +{ + target_ulong addr, mask, pagesize; + uint8_t tlb_ps; + LoongArchTLB *tlb = &env->tlb[index]; + + int mmu_idx = cpu_mmu_index(env, false); + uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); + uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); + uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + pagesize = MAKE_64BIT_MASK(tlb_ps, 1); + mask = MAKE_64BIT_MASK(0, tlb_ps + 1); + + if (tlb_v0) { + addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ + tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, + mmu_idx, TARGET_LONG_BITS); + } + + if (tlb_v1) { + addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ + tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, + mmu_idx, TARGET_LONG_BITS); + } +} + +static void invalidate_tlb(CPULoongArchState *env, int index) +{ + LoongArchTLB *tlb; + uint16_t csr_asid, tlb_asid, tlb_g; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + tlb = &env->tlb[index]; + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + if (tlb_g == 0 && tlb_asid != csr_asid) { + return; + } + invalidate_tlb_entry(env, index); +} + +static void fill_tlb_entry(CPULoongArchState *env, int index) +{ + LoongArchTLB *tlb = &env->tlb[index]; + uint64_t lo0, lo1, csr_vppn; + uint16_t csr_asid; + uint8_t csr_ps; + + if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { + csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); + csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN); + lo0 = env->CSR_TLBRELO0; + lo1 = env->CSR_TLBRELO1; + } else { + csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); + csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI, VPPN); + lo0 = env->CSR_TLBELO0; + lo1 = env->CSR_TLBELO1; + } + + if (csr_ps == 0) { + qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); + } + + /* Only MTLB has the ps fields */ + if (index >= LOONGARCH_STLB) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); + } + + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); + + tlb->tlb_entry0 = lo0; + tlb->tlb_entry1 = lo1; +} + +/* Return an random value between low and high */ +static uint32_t get_random_tlb(uint32_t low, uint32_t high) +{ + uint32_t val; + + qemu_guest_getrandom_nofail(&val, sizeof(val)); + return val % (high - low + 1) + low; +} + +void helper_tlbsrch(CPULoongArchState *env) +{ + int index, match; + + if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { + match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); + } else { + match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); + } + + if (match) { + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); + return; + } + + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); +} + +void helper_tlbrd(CPULoongArchState *env) +{ + LoongArchTLB *tlb; + int index; + uint8_t tlb_ps, tlb_e; + + index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); + tlb = &env->tlb[index]; + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + + if (!tlb_e) { + /* Invalid TLB entry */ + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); + env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); + env->CSR_TLBEHI = 0; + env->CSR_TLBELO0 = 0; + env->CSR_TLBELO1 = 0; + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); + } else { + /* Valid TLB entry */ + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); + env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, + PS, (tlb_ps & 0x3f)); + env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << + R_TLB_MISC_VPPN_SHIFT; + env->CSR_TLBELO0 = tlb->tlb_entry0; + env->CSR_TLBELO1 = tlb->tlb_entry1; + } +} + +void helper_tlbwr(CPULoongArchState *env) +{ + int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); + + invalidate_tlb(env, index); + + if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { + env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, + TLB_MISC, E, 0); + return; + } + + fill_tlb_entry(env, index); +} + +void helper_tlbfill(CPULoongArchState *env) +{ + uint64_t address, entryhi; + int index, set, stlb_idx; + uint16_t pagesize, stlb_ps; + + if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { + entryhi = env->CSR_TLBREHI; + pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); + } else { + entryhi = env->CSR_TLBEHI; + pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); + } + + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + + if (pagesize == stlb_ps) { + /* Only write into STLB bits [47:13] */ + address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_VPPN_SHIFT); + + /* Choose one set ramdomly */ + set = get_random_tlb(0, 7); + + /* Index in one set */ + stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ + + index = set * 256 + stlb_idx; + } else { + /* Only write into MTLB */ + index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); + } + + invalidate_tlb(env, index); + fill_tlb_entry(env, index); +} + +void helper_tlbclr(CPULoongArchState *env) +{ + LoongArchTLB *tlb; + int i, index; + uint16_t csr_asid, tlb_asid, tlb_g; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); + + if (index < LOONGARCH_STLB) { + /* STLB. One line per operation */ + for (i = 0; i < 8; i++) { + tlb = &env->tlb[i * 256 + (index % 256)]; + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + if (!tlb_g && tlb_asid == csr_asid) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + } else if (index < LOONGARCH_TLB_MAX) { + /* All MTLB entries */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { + tlb = &env->tlb[i]; + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + if (!tlb_g && tlb_asid == csr_asid) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + } + + tlb_flush(env_cpu(env)); +} + +void helper_tlbflush(CPULoongArchState *env) +{ + int i, index; + + index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); + + if (index < LOONGARCH_STLB) { + /* STLB. One line per operation */ + for (i = 0; i < 8; i++) { + int s_idx = i * 256 + (index % 256); + env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, + TLB_MISC, E, 0); + } + } else if (index < LOONGARCH_TLB_MAX) { + /* All MTLB entries */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { + env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, + TLB_MISC, E, 0); + } + } + + tlb_flush(env_cpu(env)); +} + +void helper_invtlb_all(CPULoongArchState *env) +{ + for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { + env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, + TLB_MISC, E, 0); + } + tlb_flush(env_cpu(env)); +} + +void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) +{ + for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { + LoongArchTLB *tlb = &env->tlb[i]; + uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + + if (tlb_g == g) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + tlb_flush(env_cpu(env)); +} + +void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) +{ + uint16_t asid = info & R_CSR_ASID_ASID_MASK; + + for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { + LoongArchTLB *tlb = &env->tlb[i]; + uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + + if (!tlb_g && (tlb_asid == asid)) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + tlb_flush(env_cpu(env)); +} + +void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, + target_ulong addr) +{ + uint16_t asid = info & 0x3ff; + + for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { + LoongArchTLB *tlb = &env->tlb[i]; + uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + uint64_t vpn, tlb_vppn; + uint8_t tlb_ps, compare_shift; + + if (i >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + if (!tlb_g && (tlb_asid == asid) && + (vpn == (tlb_vppn >> compare_shift))) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + tlb_flush(env_cpu(env)); +} + +void helper_invtlb_page_asid_or_g(CPULoongArchState *env, + target_ulong info, target_ulong addr) +{ + uint16_t asid = info & 0x3ff; + + for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { + LoongArchTLB *tlb = &env->tlb[i]; + uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + uint64_t vpn, tlb_vppn; + uint8_t tlb_ps, compare_shift; + + if (i >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + if ((tlb_g || (tlb_asid == asid)) && + (vpn == (tlb_vppn >> compare_shift))) { + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); + } + } + tlb_flush(env_cpu(env)); +} + +bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + hwaddr physical; + int prot; + int ret; + + /* Data access */ + ret = get_physical_address(env, &physical, &prot, address, + access_type, mmu_idx); + + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot, + mmu_idx, TARGET_PAGE_SIZE); + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx + " prot %d\n", __func__, address, physical, prot); + return true; + } else { + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, + ret); + } + if (probe) { + return false; + } + raise_mmu_exception(env, address, access_type, ret); + cpu_loop_exit_restore(cs, retaddr); +} + +target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, + target_ulong level, uint32_t mem_idx) +{ + CPUState *cs = env_cpu(env); + target_ulong badvaddr, index, phys, ret; + int shift; + uint64_t dir_base, dir_width; + bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; + + badvaddr = env->CSR_TLBRBADV; + base = base & TARGET_PHYS_MASK; + + /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ + shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); + shift = (shift + 1) * 3; + + if (huge) { + return base; + } + switch (level) { + case 1: + dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); + dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); + break; + case 2: + dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); + dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); + break; + case 3: + dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); + dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); + break; + case 4: + dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); + dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); + break; + default: + do_raise_exception(env, EXCCODE_INE, GETPC()); + return 0; + } + index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); + phys = base | index << shift; + ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; + return ret; +} + +void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, + uint32_t mem_idx) +{ + CPUState *cs = env_cpu(env); + target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; + int shift; + bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; + uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); + uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); + + base = base & TARGET_PHYS_MASK; + + if (huge) { + /* Huge Page. base is paddr */ + tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT); + /* Move Global bit */ + tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >> + LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT | + (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT))); + ps = ptbase + ptwidth - 1; + if (odd) { + tmp0 += MAKE_64BIT_MASK(ps, 1); + } + } else { + /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ + shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); + shift = (shift + 1) * 3; + badv = env->CSR_TLBRBADV; + + ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); + ptindex = ptindex & ~0x1; /* clear bit 0 */ + ptoffset0 = ptindex << shift; + ptoffset1 = (ptindex + 1) << shift; + + phys = base | (odd ? ptoffset1 : ptoffset0); + tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; + ps = ptbase; + } + + if (odd) { + env->CSR_TLBRELO1 = tmp0; + } else { + env->CSR_TLBRELO0 = tmp0; + } + env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); +} diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c new file mode 100644 index 0000000000..38ced69803 --- /dev/null +++ b/target/loongarch/translate.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch emulation for QEMU - main translation routines. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg-op.h" +#include "exec/translator.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/translator.h" +#include "exec/log.h" +#include "qemu/qemu-print.h" +#include "fpu/softfloat.h" +#include "translate.h" +#include "internals.h" + +/* Global register indices */ +TCGv cpu_gpr[32], cpu_pc; +static TCGv cpu_lladdr, cpu_llval; +TCGv_i64 cpu_fpr[32]; + +#include "exec/gen-icount.h" + +#define DISAS_STOP DISAS_TARGET_0 +#define DISAS_EXIT DISAS_TARGET_1 +#define DISAS_EXIT_UPDATE DISAS_TARGET_2 + +static inline int plus_1(DisasContext *ctx, int x) +{ + return x + 1; +} + +static inline int shl_2(DisasContext *ctx, int x) +{ + return x << 2; +} + +/* + * LoongArch the upper 32 bits are undefined ("can be any value"). + * QEMU chooses to nanbox, because it is most likely to show guest bugs early. + */ +static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in) +{ + tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32)); +} + +void generate_exception(DisasContext *ctx, int excp) +{ + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp)); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + if (translator_use_goto_tb(&ctx->base, dest)) { + tcg_gen_goto_tb(n); + tcg_gen_movi_tl(cpu_pc, dest); + tcg_gen_exit_tb(ctx->base.tb, n); + } else { + tcg_gen_movi_tl(cpu_pc, dest); + tcg_gen_lookup_and_goto_ptr(); + } +} + +static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, + CPUState *cs) +{ + int64_t bound; + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; + ctx->plv = ctx->base.tb->flags & HW_FLAGS_PLV_MASK; + if (ctx->base.tb->flags & HW_FLAGS_CRMD_PG) { + ctx->mem_idx = ctx->plv; + } else { + ctx->mem_idx = MMU_IDX_DA; + } + + /* Bound the number of insns to execute to those left on the page. */ + bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; + ctx->base.max_insns = MIN(ctx->base.max_insns, bound); + + ctx->ntemp = 0; + memset(ctx->temp, 0, sizeof(ctx->temp)); + + ctx->zero = tcg_constant_tl(0); +} + +static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) +{ +} + +static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + tcg_gen_insn_start(ctx->base.pc_next); +} + +/* + * Wrappers for getting reg values. + * + * The $zero register does not have cpu_gpr[0] allocated -- we supply the + * constant zero as a source, and an uninitialized sink as destination. + * + * Further, we may provide an extension for word operations. + */ +static TCGv temp_new(DisasContext *ctx) +{ + assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); + return ctx->temp[ctx->ntemp++] = tcg_temp_new(); +} + +static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) +{ + TCGv t; + + if (reg_num == 0) { + return ctx->zero; + } + + switch (src_ext) { + case EXT_NONE: + return cpu_gpr[reg_num]; + case EXT_SIGN: + t = temp_new(ctx); + tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); + return t; + case EXT_ZERO: + t = temp_new(ctx); + tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); + return t; + } + g_assert_not_reached(); +} + +static TCGv gpr_dst(DisasContext *ctx, int reg_num, DisasExtend dst_ext) +{ + if (reg_num == 0 || dst_ext) { + return temp_new(ctx); + } + return cpu_gpr[reg_num]; +} + +static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext) +{ + if (reg_num != 0) { + switch (dst_ext) { + case EXT_NONE: + tcg_gen_mov_tl(cpu_gpr[reg_num], t); + break; + case EXT_SIGN: + tcg_gen_ext32s_tl(cpu_gpr[reg_num], t); + break; + case EXT_ZERO: + tcg_gen_ext32u_tl(cpu_gpr[reg_num], t); + break; + default: + g_assert_not_reached(); + } + } +} + +#include "decode-insns.c.inc" +#include "insn_trans/trans_arith.c.inc" +#include "insn_trans/trans_shift.c.inc" +#include "insn_trans/trans_bit.c.inc" +#include "insn_trans/trans_memory.c.inc" +#include "insn_trans/trans_atomic.c.inc" +#include "insn_trans/trans_extra.c.inc" +#include "insn_trans/trans_farith.c.inc" +#include "insn_trans/trans_fcmp.c.inc" +#include "insn_trans/trans_fcnv.c.inc" +#include "insn_trans/trans_fmov.c.inc" +#include "insn_trans/trans_fmemory.c.inc" +#include "insn_trans/trans_branch.c.inc" +#include "insn_trans/trans_privileged.c.inc" + +static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + CPULoongArchState *env = cs->env_ptr; + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); + + if (!decode(ctx, ctx->opcode)) { + qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. " + TARGET_FMT_lx ": 0x%x\n", + ctx->base.pc_next, ctx->opcode); + generate_exception(ctx, EXCCODE_INE); + } + + for (int i = ctx->ntemp - 1; i >= 0; --i) { + tcg_temp_free(ctx->temp[i]); + ctx->temp[i] = NULL; + } + ctx->ntemp = 0; + + ctx->base.pc_next += 4; +} + +static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + switch (ctx->base.is_jmp) { + case DISAS_STOP: + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); + tcg_gen_lookup_and_goto_ptr(); + break; + case DISAS_TOO_MANY: + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_NORETURN: + break; + case DISAS_EXIT_UPDATE: + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); + QEMU_FALLTHROUGH; + case DISAS_EXIT: + tcg_gen_exit_tb(NULL, 0); + break; + default: + g_assert_not_reached(); + } +} + +static void loongarch_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) +{ + qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); +} + +static const TranslatorOps loongarch_tr_ops = { + .init_disas_context = loongarch_tr_init_disas_context, + .tb_start = loongarch_tr_tb_start, + .insn_start = loongarch_tr_insn_start, + .translate_insn = loongarch_tr_translate_insn, + .tb_stop = loongarch_tr_tb_stop, + .disas_log = loongarch_tr_disas_log, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) +{ + DisasContext ctx; + + translator_loop(cs, tb, max_insns, pc, host_pc, + &loongarch_tr_ops, &ctx.base); +} + +void loongarch_translate_init(void) +{ + int i; + + cpu_gpr[0] = NULL; + for (i = 1; i < 32; i++) { + cpu_gpr[i] = tcg_global_mem_new(cpu_env, + offsetof(CPULoongArchState, gpr[i]), + regnames[i]); + } + + for (i = 0; i < 32; i++) { + int off = offsetof(CPULoongArchState, fpr[i]); + cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]); + } + + cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, pc), "pc"); + cpu_lladdr = tcg_global_mem_new(cpu_env, + offsetof(CPULoongArchState, lladdr), "lladdr"); + cpu_llval = tcg_global_mem_new(cpu_env, + offsetof(CPULoongArchState, llval), "llval"); +} diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h new file mode 100644 index 0000000000..6d2e382e8b --- /dev/null +++ b/target/loongarch/translate.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch translation routines. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef TARGET_LOONGARCH_TRANSLATE_H +#define TARGET_LOONGARCH_TRANSLATE_H + +#include "exec/translator.h" + +#define TRANS(NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME * a) \ + { return FUNC(ctx, a, __VA_ARGS__); } + +/* + * If an operation is being performed on less than TARGET_LONG_BITS, + * it may require the inputs to be sign- or zero-extended; which will + * depend on the exact operation being performed. + */ +typedef enum { + EXT_NONE, + EXT_SIGN, + EXT_ZERO, +} DisasExtend; + +typedef struct DisasContext { + DisasContextBase base; + target_ulong page_start; + uint32_t opcode; + uint16_t mem_idx; + uint16_t plv; + TCGv zero; + /* Space for 3 operands plus 1 extra for address computation. */ + TCGv temp[4]; + uint8_t ntemp; +} DisasContext; + +void generate_exception(DisasContext *ctx, int excp); + +extern TCGv cpu_gpr[32], cpu_pc; +extern TCGv_i32 cpu_fscr0; +extern TCGv_i64 cpu_fpr[32]; + +#endif diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h index 06556dfbf3..44a8d193f0 100644 --- a/target/m68k/cpu-param.h +++ b/target/m68k/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef M68K_CPU_PARAM_H -#define M68K_CPU_PARAM_H 1 +#define M68K_CPU_PARAM_H #define TARGET_LONG_BITS 32 /* diff --git a/target/m68k/cpu-qom.h b/target/m68k/cpu-qom.h index 1ceb160ecb..cd9687192c 100644 --- a/target/m68k/cpu-qom.h +++ b/target/m68k/cpu-qom.h @@ -25,8 +25,7 @@ #define TYPE_M68K_CPU "m68k-cpu" -OBJECT_DECLARE_TYPE(M68kCPU, M68kCPUClass, - M68K_CPU) +OBJECT_DECLARE_CPU_TYPE(M68kCPU, M68kCPUClass, M68K_CPU) /* * M68kCPUClass: diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 72de6e9726..b67ddea2ae 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -31,6 +31,26 @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr m68k_cpu_get_pc(CPUState *cs) +{ + M68kCPU *cpu = M68K_CPU(cs); + + return cpu->env.pc; +} + +static void m68k_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + M68kCPU *cpu = M68K_CPU(cs); + int cc_op = data[1]; + + cpu->env.pc = data[0]; + if (cc_op != CC_OP_DYNAMIC) { + cpu->env.cc_op = cc_op; + } +} + static bool m68k_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; @@ -38,12 +58,12 @@ static bool m68k_cpu_has_work(CPUState *cs) static void m68k_set_feature(CPUM68KState *env, int feature) { - env->features |= (1u << feature); + env->features |= BIT_ULL(feature); } static void m68k_unset_feature(CPUM68KState *env, int feature) { - env->features &= (-1u - (1u << feature)); + env->features &= ~BIT_ULL(feature); } static void m68k_cpu_reset(DeviceState *dev) @@ -75,12 +95,8 @@ static void m68k_cpu_reset(DeviceState *dev) static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info) { - M68kCPU *cpu = M68K_CPU(s); - CPUM68KState *env = &cpu->env; info->print_insn = print_insn_m68k; - if (m68k_feature(env, M68K_FEATURE_M68000)) { - info->mach = bfd_mach_m68040; - } + info->mach = 0; } /* CPU models */ @@ -106,6 +122,7 @@ static void m5206_cpu_initfn(Object *obj) CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); } /* Base feature set, including isns. for m68k family */ @@ -114,7 +131,7 @@ static void m68000_cpu_initfn(Object *obj) M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; - m68k_set_feature(env, M68K_FEATURE_M68000); + m68k_set_feature(env, M68K_FEATURE_M68K); m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); m68k_set_feature(env, M68K_FEATURE_MOVEP); @@ -133,6 +150,7 @@ static void m68010_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_RTD); m68k_set_feature(env, M68K_FEATURE_BKPT); m68k_set_feature(env, M68K_FEATURE_MOVEC); + m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); } /* @@ -162,6 +180,7 @@ static void m68020_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_CHK2); m68k_set_feature(env, M68K_FEATURE_MSP); m68k_set_feature(env, M68K_FEATURE_UNALIGNED_DATA); + m68k_set_feature(env, M68K_FEATURE_TRAPCC); } /* @@ -244,6 +263,7 @@ static void m5208_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_CF_EMAC); m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); } static void cfv4e_cpu_initfn(Object *obj) @@ -257,6 +277,7 @@ static void cfv4e_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_CF_FPU); m68k_set_feature(env, M68K_FEATURE_CF_EMAC); m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); } static void any_cpu_initfn(Object *obj) @@ -278,6 +299,7 @@ static void any_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_EXT_FULL); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); + m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); } static void m68k_cpu_realizefn(DeviceState *dev, Error **errp) @@ -515,10 +537,11 @@ static const struct SysemuCPUOps m68k_sysemu_ops = { static const struct TCGCPUOps m68k_tcg_ops = { .initialize = m68k_tcg_init, - .cpu_exec_interrupt = m68k_cpu_exec_interrupt, - .tlb_fill = m68k_cpu_tlb_fill, + .restore_state_to_opc = m68k_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = m68k_cpu_tlb_fill, + .cpu_exec_interrupt = m68k_cpu_exec_interrupt, .do_interrupt = m68k_cpu_do_interrupt, .do_transaction_failed = m68k_cpu_transaction_failed, #endif /* !CONFIG_USER_ONLY */ @@ -538,6 +561,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) cc->has_work = m68k_cpu_has_work; cc->dump_state = m68k_cpu_dump_state; cc->set_pc = m68k_cpu_set_pc; + cc->get_pc = m68k_cpu_get_pc; cc->gdb_read_register = m68k_cpu_gdb_read_register; cc->gdb_write_register = m68k_cpu_gdb_write_register; #if defined(CONFIG_SOFTMMU) diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 997d588911..3a9cfe2f33 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -22,6 +22,7 @@ #define M68K_CPU_H #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" #include "cpu-qom.h" #define OS_BYTE 0 @@ -79,7 +80,7 @@ typedef CPU_LDoubleU FPReg; -typedef struct CPUM68KState { +typedef struct CPUArchState { uint32_t dregs[8]; uint32_t aregs[8]; uint32_t pc; @@ -121,6 +122,12 @@ typedef struct CPUM68KState { /* MMU status. */ struct { + /* + * Holds the "address" value in between raising an exception + * and creation of the exception stack frame. + * Used for both Format 7 exceptions (Access, i.e. mmu) + * and Format 2 exceptions (chk, div0, trapcc, etc). + */ uint32_t ar; uint32_t ssw; /* 68040 */ @@ -147,7 +154,7 @@ typedef struct CPUM68KState { struct {} end_reset_fields; /* Fields from here on are preserved across CPU reset. */ - uint32_t features; + uint64_t features; } CPUM68KState; /* @@ -156,7 +163,7 @@ typedef struct CPUM68KState { * * A Motorola 68k CPU. */ -struct M68kCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -166,8 +173,10 @@ struct M68kCPU { }; +#ifndef CONFIG_USER_ONLY void m68k_cpu_do_interrupt(CPUState *cpu); bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); +#endif /* !CONFIG_USER_ONLY */ void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); @@ -175,13 +184,6 @@ int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void m68k_tcg_init(void); void m68k_cpu_init_gdb(M68kCPU *cpu); -/* - * you can call this signal handler from your SIGBUS and SIGSEGV - * signal handlers to inform the virtual CPU of exceptions. non zero - * is returned if the signal was handled by the virtual CPU. - */ -int cpu_m68k_signal_handler(int host_signum, void *pinfo, - void *puc); uint32_t cpu_m68k_get_ccr(CPUM68KState *env); void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); void cpu_m68k_set_sr(CPUM68KState *env, uint32_t); @@ -478,8 +480,9 @@ void do_m68k_semihosting(CPUM68KState *env, int nr); */ enum m68k_features { - /* Base m68k instruction set */ - M68K_FEATURE_M68000, + /* Base Motorola CPU set (not set for Coldfire CPUs) */ + M68K_FEATURE_M68K, + /* Motorola CPU feature sets */ M68K_FEATURE_M68010, M68K_FEATURE_M68020, M68K_FEATURE_M68030, @@ -532,11 +535,15 @@ enum m68k_features { M68K_FEATURE_MOVEC, /* Unaligned data accesses (680[2346]0) */ M68K_FEATURE_UNALIGNED_DATA, + /* TRAPcc insn. (680[2346]0, and CPU32) */ + M68K_FEATURE_TRAPCC, + /* MOVE from SR privileged (from 68010) */ + M68K_FEATURE_MOVEFROMSR_PRIV, }; -static inline int m68k_feature(CPUM68KState *env, int feature) +static inline bool m68k_feature(CPUM68KState *env, int feature) { - return (env->features & (1u << feature)) != 0; + return (env->features & BIT_ULL(feature)) != 0; } void m68k_cpu_list(void); @@ -561,7 +568,6 @@ enum { #define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_M68K_CPU -#define cpu_signal_handler cpu_m68k_signal_handler #define cpu_list m68k_cpu_list /* MMU modes definitions */ @@ -580,9 +586,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); -typedef CPUM68KState CPUArchState; -typedef M68kCPU ArchCPU; - #include "exec/cpu-all.h" /* TB flags */ diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 137a3e1a3d..4621cf2402 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -460,7 +460,7 @@ void m68k_switch_sp(CPUM68KState *env) int new_sp; env->sp[env->current_sp] = env->aregs[7]; - if (m68k_feature(env, M68K_FEATURE_M68000)) { + if (m68k_feature(env, M68K_FEATURE_M68K)) { if (env->sr & SR_S) { /* SR:Master-Mode bit unimplemented then ISP is not available */ if (!m68k_feature(env, M68K_FEATURE_MSP) || env->sr & SR_M) { @@ -978,16 +978,12 @@ void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) } } -#endif - bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType qemu_access_type, int mmu_idx, bool probe, uintptr_t retaddr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; - -#ifndef CONFIG_USER_ONLY hwaddr physical; int prot; int access_type; @@ -1051,12 +1047,12 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (!(access_type & ACCESS_STORE)) { env->mmu.ssw |= M68K_RW_040; } -#endif cs->exception_index = EXCP_ACCESS; env->mmu.ar = address; cpu_loop_exit_restore(cs, retaddr); } +#endif /* !CONFIG_USER_ONLY */ uint32_t HELPER(bitrev)(uint32_t x) { diff --git a/target/m68k/helper.h b/target/m68k/helper.h index 9842eeaa95..c9bed2b884 100644 --- a/target/m68k/helper.h +++ b/target/m68k/helper.h @@ -1,12 +1,12 @@ DEF_HELPER_1(bitrev, i32, i32) DEF_HELPER_1(ff1, i32, i32) DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_3(divuw, void, env, int, i32) -DEF_HELPER_3(divsw, void, env, int, s32) -DEF_HELPER_4(divul, void, env, int, int, i32) -DEF_HELPER_4(divsl, void, env, int, int, s32) -DEF_HELPER_4(divull, void, env, int, int, i32) -DEF_HELPER_4(divsll, void, env, int, int, s32) +DEF_HELPER_4(divuw, void, env, int, i32, int) +DEF_HELPER_4(divsw, void, env, int, s32, int) +DEF_HELPER_5(divul, void, env, int, int, i32, int) +DEF_HELPER_5(divsl, void, env, int, int, s32, int) +DEF_HELPER_5(divull, void, env, int, int, i32, int) +DEF_HELPER_5(divsll, void, env, int, int, s32, int) DEF_HELPER_2(set_sr, void, env, i32) DEF_HELPER_3(cf_movec_to, void, env, i32, i32) DEF_HELPER_3(m68k_movec_to, void, env, i32, i32) @@ -17,6 +17,7 @@ DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32) #define dh_alias_fp ptr #define dh_ctype_fp FPReg * +#define dh_typecode_fp dh_typecode_ptr DEF_HELPER_3(exts32, void, env, fp, s32) DEF_HELPER_3(extf32, void, env, fp, f32) @@ -108,7 +109,7 @@ DEF_HELPER_3(set_mac_extu, void, env, i32, i32) DEF_HELPER_2(flush_flags, void, env, i32) DEF_HELPER_2(set_ccr, void, env, i32) DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env) -DEF_HELPER_2(raise_exception, void, env, i32) +DEF_HELPER_2(raise_exception, noreturn, env, i32) DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c index d919245e4f..87b1314925 100644 --- a/target/m68k/m68k-semi.c +++ b/target/m68k/m68k-semi.c @@ -20,14 +20,10 @@ #include "qemu/osdep.h" #include "cpu.h" -#if defined(CONFIG_USER_ONLY) -#include "qemu.h" -#define SEMIHOSTING_HEAP_SIZE (128 * 1024 * 1024) -#else #include "exec/gdbstub.h" -#include "exec/softmmu-semi.h" +#include "semihosting/syscalls.h" +#include "semihosting/softmmu-uaccess.h" #include "hw/boards.h" -#endif #include "qemu/log.h" #define HOSTED_EXIT 0 @@ -45,91 +41,43 @@ #define HOSTED_ISATTY 12 #define HOSTED_SYSTEM 13 -typedef uint32_t gdb_mode_t; -typedef uint32_t gdb_time_t; - -struct m68k_gdb_stat { - uint32_t gdb_st_dev; /* device */ - uint32_t gdb_st_ino; /* inode */ - gdb_mode_t gdb_st_mode; /* protection */ - uint32_t gdb_st_nlink; /* number of hard links */ - uint32_t gdb_st_uid; /* user ID of owner */ - uint32_t gdb_st_gid; /* group ID of owner */ - uint32_t gdb_st_rdev; /* device type (if inode device) */ - uint64_t gdb_st_size; /* total size, in bytes */ - uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */ - uint64_t gdb_st_blocks; /* number of blocks allocated */ - gdb_time_t gdb_st_atime; /* time of last access */ - gdb_time_t gdb_st_mtime; /* time of last modification */ - gdb_time_t gdb_st_ctime; /* time of last change */ -} QEMU_PACKED; - -struct gdb_timeval { - gdb_time_t tv_sec; /* second */ - uint64_t tv_usec; /* microsecond */ -} QEMU_PACKED; - -#define GDB_O_RDONLY 0x0 -#define GDB_O_WRONLY 0x1 -#define GDB_O_RDWR 0x2 -#define GDB_O_APPEND 0x8 -#define GDB_O_CREAT 0x200 -#define GDB_O_TRUNC 0x400 -#define GDB_O_EXCL 0x800 - -static int translate_openflags(int flags) +static int host_to_gdb_errno(int err) { - int hf; - - if (flags & GDB_O_WRONLY) - hf = O_WRONLY; - else if (flags & GDB_O_RDWR) - hf = O_RDWR; - else - hf = O_RDONLY; - - if (flags & GDB_O_APPEND) hf |= O_APPEND; - if (flags & GDB_O_CREAT) hf |= O_CREAT; - if (flags & GDB_O_TRUNC) hf |= O_TRUNC; - if (flags & GDB_O_EXCL) hf |= O_EXCL; - - return hf; +#define E(X) case E##X: return GDB_E##X + switch (err) { + E(PERM); + E(NOENT); + E(INTR); + E(BADF); + E(ACCES); + E(FAULT); + E(BUSY); + E(EXIST); + E(NODEV); + E(NOTDIR); + E(ISDIR); + E(INVAL); + E(NFILE); + E(MFILE); + E(FBIG); + E(NOSPC); + E(SPIPE); + E(ROFS); + E(NAMETOOLONG); + default: + return GDB_EUNKNOWN; + } +#undef E } -static void translate_stat(CPUM68KState *env, target_ulong addr, struct stat *s) +static void m68k_semi_u32_cb(CPUState *cs, uint64_t ret, int err) { - struct m68k_gdb_stat *p; + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; - if (!(p = lock_user(VERIFY_WRITE, addr, sizeof(struct m68k_gdb_stat), 0))) - /* FIXME - should this return an error code? */ - return; - p->gdb_st_dev = cpu_to_be32(s->st_dev); - p->gdb_st_ino = cpu_to_be32(s->st_ino); - p->gdb_st_mode = cpu_to_be32(s->st_mode); - p->gdb_st_nlink = cpu_to_be32(s->st_nlink); - p->gdb_st_uid = cpu_to_be32(s->st_uid); - p->gdb_st_gid = cpu_to_be32(s->st_gid); - p->gdb_st_rdev = cpu_to_be32(s->st_rdev); - p->gdb_st_size = cpu_to_be64(s->st_size); -#ifdef _WIN32 - /* Windows stat is missing some fields. */ - p->gdb_st_blksize = 0; - p->gdb_st_blocks = 0; -#else - p->gdb_st_blksize = cpu_to_be64(s->st_blksize); - p->gdb_st_blocks = cpu_to_be64(s->st_blocks); -#endif - p->gdb_st_atime = cpu_to_be32(s->st_atime); - p->gdb_st_mtime = cpu_to_be32(s->st_mtime); - p->gdb_st_ctime = cpu_to_be32(s->st_ctime); - unlock_user(p, addr, sizeof(struct m68k_gdb_stat)); -} - -static void m68k_semi_return_u32(CPUM68KState *env, uint32_t ret, uint32_t err) -{ target_ulong args = env->dregs[1]; if (put_user_u32(ret, args) || - put_user_u32(err, args + 4)) { + put_user_u32(host_to_gdb_errno(err), args + 4)) { /* * The m68k semihosting ABI does not provide any way to report this * error to the guest, so the best we can do is log it in qemu. @@ -140,34 +88,18 @@ static void m68k_semi_return_u32(CPUM68KState *env, uint32_t ret, uint32_t err) } } -static void m68k_semi_return_u64(CPUM68KState *env, uint64_t ret, uint32_t err) -{ - target_ulong args = env->dregs[1]; - if (put_user_u32(ret >> 32, args) || - put_user_u32(ret, args + 4) || - put_user_u32(err, args + 8)) { - /* No way to report this via m68k semihosting ABI; just log it */ - qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value " - "discarded because argument block not writable\n"); - } -} - -static int m68k_semi_is_fseek; - -static void m68k_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) +static void m68k_semi_u64_cb(CPUState *cs, uint64_t ret, int err) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; - if (m68k_semi_is_fseek) { - /* - * FIXME: We've already lost the high bits of the fseek - * return value. - */ - m68k_semi_return_u64(env, ret, err); - m68k_semi_is_fseek = 0; - } else { - m68k_semi_return_u32(env, ret, err); + target_ulong args = env->dregs[1]; + if (put_user_u32(ret >> 32, args) || + put_user_u32(ret, args + 4) || + put_user_u32(host_to_gdb_errno(err), args + 8)) { + /* No way to report this via m68k semihosting ABI; just log it */ + qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value " + "discarded because argument block not writable\n"); } } @@ -177,293 +109,126 @@ static void m68k_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) */ #define GET_ARG(n) do { \ if (get_user_ual(arg ## n, args + (n) * 4)) { \ - result = -1; \ - errno = EFAULT; \ goto failed; \ } \ } while (0) +#define GET_ARG64(n) do { \ + if (get_user_ual(arg ## n, args + (n) * 4)) { \ + goto failed64; \ + } \ +} while (0) + + void do_m68k_semihosting(CPUM68KState *env, int nr) { + CPUState *cs = env_cpu(env); uint32_t args; target_ulong arg0, arg1, arg2, arg3; - void *p; - void *q; - uint32_t len; - uint32_t result; args = env->dregs[1]; switch (nr) { case HOSTED_EXIT: gdb_exit(env->dregs[0]); exit(env->dregs[0]); + case HOSTED_OPEN: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "open,%s,%x,%x", arg0, (int)arg1, - arg2, arg3); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = open(p, translate_openflags(arg2), arg3); - unlock_user(p, arg0, 0); - } - } + semihost_sys_open(cs, m68k_semi_u32_cb, arg0, arg1, arg2, arg3); break; + case HOSTED_CLOSE: - { - /* Ignore attempts to close stdin/out/err. */ - GET_ARG(0); - int fd = arg0; - if (fd > 2) { - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "close,%x", arg0); - return; - } else { - result = close(fd); - } - } else { - result = 0; - } - break; - } + GET_ARG(0); + semihost_sys_close(cs, m68k_semi_u32_cb, arg0); + break; + case HOSTED_READ: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "read,%x,%x,%x", - arg0, arg1, len); - return; - } else { - p = lock_user(VERIFY_WRITE, arg1, len, 0); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = read(arg0, p, len); - unlock_user(p, arg1, len); - } - } + semihost_sys_read(cs, m68k_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_WRITE: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "write,%x,%x,%x", - arg0, arg1, len); - return; - } else { - p = lock_user(VERIFY_READ, arg1, len, 1); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = write(arg0, p, len); - unlock_user(p, arg0, 0); - } - } + semihost_sys_write(cs, m68k_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_LSEEK: - { - uint64_t off; - GET_ARG(0); - GET_ARG(1); - GET_ARG(2); - GET_ARG(3); - off = (uint32_t)arg2 | ((uint64_t)arg1 << 32); - if (use_gdb_syscalls()) { - m68k_semi_is_fseek = 1; - gdb_do_syscall(m68k_semi_cb, "fseek,%x,%lx,%x", - arg0, off, arg3); - } else { - off = lseek(arg0, off, arg3); - m68k_semi_return_u64(env, off, errno); - } - return; - } + GET_ARG64(0); + GET_ARG64(1); + GET_ARG64(2); + GET_ARG64(3); + semihost_sys_lseek(cs, m68k_semi_u64_cb, arg0, + deposit64(arg2, arg1, 32, 32), arg3); + break; + case HOSTED_RENAME: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "rename,%s,%s", - arg0, (int)arg1, arg2, (int)arg3); - return; - } else { - p = lock_user_string(arg0); - q = lock_user_string(arg2); - if (!p || !q) { - /* FIXME - check error code? */ - result = -1; - } else { - result = rename(p, q); - } - unlock_user(p, arg0, 0); - unlock_user(q, arg2, 0); - } + semihost_sys_rename(cs, m68k_semi_u32_cb, arg0, arg1, arg2, arg3); break; + case HOSTED_UNLINK: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "unlink,%s", - arg0, (int)arg1); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = unlink(p); - unlock_user(p, arg0, 0); - } - } + semihost_sys_remove(cs, m68k_semi_u32_cb, arg0, arg1); break; + case HOSTED_STAT: GET_ARG(0); GET_ARG(1); GET_ARG(2); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "stat,%s,%x", - arg0, (int)arg1, arg2); - return; - } else { - struct stat s; - p = lock_user_string(arg0); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = stat(p, &s); - unlock_user(p, arg0, 0); - } - if (result == 0) { - translate_stat(env, arg2, &s); - } - } + semihost_sys_stat(cs, m68k_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_FSTAT: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "fstat,%x,%x", - arg0, arg1); - return; - } else { - struct stat s; - result = fstat(arg0, &s); - if (result == 0) { - translate_stat(env, arg1, &s); - } - } + semihost_sys_fstat(cs, m68k_semi_u32_cb, arg0, arg1); break; + case HOSTED_GETTIMEOFDAY: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "gettimeofday,%x,%x", - arg0, arg1); - return; - } else { - qemu_timeval tv; - struct gdb_timeval *p; - result = qemu_gettimeofday(&tv); - if (result != 0) { - if (!(p = lock_user(VERIFY_WRITE, - arg0, sizeof(struct gdb_timeval), 0))) { - /* FIXME - check error code? */ - result = -1; - } else { - p->tv_sec = cpu_to_be32(tv.tv_sec); - p->tv_usec = cpu_to_be64(tv.tv_usec); - unlock_user(p, arg0, sizeof(struct gdb_timeval)); - } - } - } + semihost_sys_gettimeofday(cs, m68k_semi_u32_cb, arg0, arg1); break; + case HOSTED_ISATTY: GET_ARG(0); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "isatty,%x", arg0); - return; - } else { - result = isatty(arg0); - } + semihost_sys_isatty(cs, m68k_semi_u32_cb, arg0); break; + case HOSTED_SYSTEM: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(m68k_semi_cb, "system,%s", - arg0, (int)arg1); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - /* FIXME - check error code? */ - result = -1; - } else { - result = system(p); - unlock_user(p, arg0, 0); - } - } + semihost_sys_system(cs, m68k_semi_u32_cb, arg0, arg1); break; - case HOSTED_INIT_SIM: -#if defined(CONFIG_USER_ONLY) - { - CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; - /* Allocate the heap using sbrk. */ - if (!ts->heap_limit) { - abi_ulong ret; - uint32_t size; - uint32_t base; - base = do_brk(0); - size = SEMIHOSTING_HEAP_SIZE; - /* Try a big heap, and reduce the size if that fails. */ - for (;;) { - ret = do_brk(base + size); - if (ret >= (base + size)) { - break; - } - size >>= 1; - } - ts->heap_limit = base + size; - } - /* - * This call may happen before we have writable memory, so return - * values directly in registers. - */ - env->dregs[1] = ts->heap_limit; - env->aregs[7] = ts->stack_base; - } -#else + case HOSTED_INIT_SIM: /* * FIXME: This is wrong for boards where RAM does not start at * address zero. */ env->dregs[1] = current_machine->ram_size; env->aregs[7] = current_machine->ram_size; -#endif return; + default: cpu_abort(env_cpu(env), "Unsupported semihosting syscall %d\n", nr); - result = 0; + + failed: + m68k_semi_u32_cb(cs, -1, EFAULT); + break; + failed64: + m68k_semi_u64_cb(cs, -1, EFAULT); + break; } -failed: - m68k_semi_return_u32(env, result, errno); } diff --git a/target/m68k/meson.build b/target/m68k/meson.build index 05cd9fbd1e..27d2d7ba87 100644 --- a/target/m68k/meson.build +++ b/target/m68k/meson.build @@ -4,14 +4,16 @@ m68k_ss.add(files( 'fpu_helper.c', 'gdbstub.c', 'helper.c', - 'm68k-semi.c', 'op_helper.c', 'softfloat.c', 'translate.c', )) m68k_softmmu_ss = ss.source_set() -m68k_softmmu_ss.add(files('monitor.c')) +m68k_softmmu_ss.add(files( + 'm68k-semi.c', + 'monitor.c' +)) target_arch += {'m68k': m68k_ss} target_softmmu_arch += {'m68k': m68k_softmmu_ss} diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c index d006d1cb3e..1ce850bbc5 100644 --- a/target/m68k/op_helper.c +++ b/target/m68k/op_helper.c @@ -17,25 +17,14 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "semihosting/semihost.h" -#include "tcg/tcg.h" -#if defined(CONFIG_USER_ONLY) - -void m68k_cpu_do_interrupt(CPUState *cs) -{ - cs->exception_index = -1; -} - -static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) -{ -} - -#else +#if !defined(CONFIG_USER_ONLY) static void cf_rte(CPUM68KState *env) { @@ -214,8 +203,7 @@ static void cf_interrupt_all(CPUM68KState *env, int is_hw) cf_rte(env); return; case EXCP_HALT_INSN: - if (semihosting_enabled() - && (env->sr & SR_S) != 0 + if (semihosting_enabled((env->sr & SR_S) == 0) && (env->pc & 3) == 0 && cpu_lduw_code(env, env->pc - 4) == 0x4e71 && cpu_ldl_code(env, env->pc) == 0x4e7bf000) { @@ -228,11 +216,6 @@ static void cf_interrupt_all(CPUM68KState *env, int is_hw) cpu_loop_exit(cs); return; } - if (cs->exception_index >= EXCP_TRAP0 - && cs->exception_index <= EXCP_TRAP15) { - /* Move the PC after the trap instruction. */ - retaddr += 2; - } } vector = cs->exception_index << 2; @@ -303,22 +286,15 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw) { CPUState *cs = env_cpu(env); uint32_t sp; - uint32_t retaddr; uint32_t vector; uint16_t sr, oldsr; - retaddr = env->pc; - if (!is_hw) { switch (cs->exception_index) { case EXCP_RTE: /* Return from an exception. */ m68k_rte(env); return; - case EXCP_TRAP0 ... EXCP_TRAP15: - /* Move the PC after the trap instruction. */ - retaddr += 2; - break; } } @@ -353,7 +329,8 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw) sp &= ~1; } - if (cs->exception_index == EXCP_ACCESS) { + switch (cs->exception_index) { + case EXCP_ACCESS: if (env->mmu.fault) { cpu_abort(cs, "DOUBLE MMU FAULT\n"); } @@ -404,33 +381,48 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw) sp -= 4; cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); - do_stack_frame(env, &sp, 7, oldsr, 0, retaddr); + do_stack_frame(env, &sp, 7, oldsr, 0, env->pc); env->mmu.fault = false; if (qemu_loglevel_mask(CPU_LOG_INT)) { qemu_log(" " "ssw: %08x ea: %08x sfc: %d dfc: %d\n", env->mmu.ssw, env->mmu.ar, env->sfc, env->dfc); } - } else if (cs->exception_index == EXCP_ADDRESS) { - do_stack_frame(env, &sp, 2, oldsr, 0, retaddr); - } else if (cs->exception_index == EXCP_ILLEGAL || - cs->exception_index == EXCP_DIV0 || - cs->exception_index == EXCP_CHK || - cs->exception_index == EXCP_TRAPCC || - cs->exception_index == EXCP_TRACE) { - /* FIXME: addr is not only env->pc */ - do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr); - } else if (is_hw && oldsr & SR_M && - cs->exception_index >= EXCP_SPURIOUS && - cs->exception_index <= EXCP_INT_LEVEL_7) { - do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); - oldsr = sr; - env->aregs[7] = sp; - cpu_m68k_set_sr(env, sr &= ~SR_M); - sp = env->aregs[7] & ~1; - do_stack_frame(env, &sp, 1, oldsr, 0, retaddr); - } else { - do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); + break; + + case EXCP_ILLEGAL: + do_stack_frame(env, &sp, 0, oldsr, 0, env->pc); + break; + + case EXCP_ADDRESS: + do_stack_frame(env, &sp, 2, oldsr, 0, env->pc); + break; + + case EXCP_CHK: + case EXCP_DIV0: + case EXCP_TRACE: + case EXCP_TRAPCC: + do_stack_frame(env, &sp, 2, oldsr, env->mmu.ar, env->pc); + break; + + case EXCP_SPURIOUS ... EXCP_INT_LEVEL_7: + if (is_hw && (oldsr & SR_M)) { + do_stack_frame(env, &sp, 0, oldsr, 0, env->pc); + oldsr = sr; + env->aregs[7] = sp; + cpu_m68k_set_sr(env, sr & ~SR_M); + sp = env->aregs[7]; + if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) { + sp &= ~1; + } + do_stack_frame(env, &sp, 1, oldsr, 0, env->pc); + break; + } + /* fall through */ + + default: + do_stack_frame(env, &sp, 0, oldsr, 0, env->pc); + break; } env->aregs[7] = sp; @@ -440,7 +432,7 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw) static void do_interrupt_all(CPUM68KState *env, int is_hw) { - if (m68k_feature(env, M68K_FEATURE_M68000)) { + if (m68k_feature(env, M68K_FEATURE_M68K)) { m68k_interrupt_all(env, is_hw); return; } @@ -468,7 +460,7 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); if (m68k_feature(env, M68K_FEATURE_M68040)) { env->mmu.mmusr = 0; @@ -516,7 +508,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, cpu_loop_exit(cs); } } -#endif bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { @@ -538,7 +529,10 @@ bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } -static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) +#endif /* !CONFIG_USER_ONLY */ + +G_NORETURN static void +raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) { CPUState *cs = env_cpu(env); @@ -546,7 +540,7 @@ static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) cpu_loop_exit_restore(cs, raddr); } -static void raise_exception(CPUM68KState *env, int tt) +G_NORETURN static void raise_exception(CPUM68KState *env, int tt) { raise_exception_ra(env, tt, 0); } @@ -556,18 +550,42 @@ void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) raise_exception(env, tt); } -void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den) +G_NORETURN static void +raise_exception_format2(CPUM68KState *env, int tt, int ilen, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = tt; + + /* Recover PC and CC_OP for the beginning of the insn. */ + cpu_restore_state(cs, raddr); + + /* Flags are current in env->cc_*, or are undefined. */ + env->cc_op = CC_OP_FLAGS; + + /* + * Remember original pc in mmu.ar, for the Format 2 stack frame. + * Adjust PC to end of the insn. + */ + env->mmu.ar = env->pc; + env->pc += ilen; + + cpu_loop_exit(cs); +} + +void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den, int ilen) { uint32_t num = env->dregs[destr]; uint32_t quot, rem; + env->cc_c = 0; /* always cleared, even if div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; /* always cleared, even if overflow */ if (quot > 0xffff) { env->cc_v = -1; /* @@ -583,18 +601,19 @@ void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den) env->cc_v = 0; } -void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den) +void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den, int ilen) { int32_t num = env->dregs[destr]; uint32_t quot, rem; + env->cc_c = 0; /* always cleared, even if overflow/div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; /* always cleared, even if overflow */ if (quot != (int16_t)quot) { env->cc_v = -1; /* nothing else is modified */ @@ -611,18 +630,20 @@ void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den) env->cc_v = 0; } -void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den) +void HELPER(divul)(CPUM68KState *env, int numr, int regr, + uint32_t den, int ilen) { uint32_t num = env->dregs[numr]; uint32_t quot, rem; + env->cc_c = 0; /* always cleared, even if div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; @@ -639,18 +660,20 @@ void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den) } } -void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den) +void HELPER(divsl)(CPUM68KState *env, int numr, int regr, + int32_t den, int ilen) { int32_t num = env->dregs[numr]; int32_t quot, rem; + env->cc_c = 0; /* always cleared, even if overflow/div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; @@ -667,19 +690,21 @@ void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den) } } -void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den) +void HELPER(divull)(CPUM68KState *env, int numr, int regr, + uint32_t den, int ilen) { uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); uint64_t quot; uint32_t rem; + env->cc_c = 0; /* always cleared, even if overflow/div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; /* always cleared, even if overflow */ if (quot > 0xffffffffULL) { env->cc_v = -1; /* @@ -702,19 +727,21 @@ void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den) env->dregs[numr] = quot; } -void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den) +void HELPER(divsll)(CPUM68KState *env, int numr, int regr, + int32_t den, int ilen) { int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); int64_t quot; int32_t rem; + env->cc_c = 0; /* always cleared, even if overflow/div0 */ + if (den == 0) { - raise_exception_ra(env, EXCP_DIV0, GETPC()); + raise_exception_format2(env, EXCP_DIV0, ilen, GETPC()); } quot = num / den; rem = num % den; - env->cc_c = 0; /* always cleared, even if overflow */ if (quot != (int32_t)quot) { env->cc_v = -1; /* @@ -785,7 +812,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, uintptr_t ra = GETPC(); #if defined(CONFIG_ATOMIC64) int mmu_idx = cpu_mmu_index(env, 0); - TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_BEUQ, mmu_idx); #endif if (parallel) { @@ -1073,18 +1100,7 @@ void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub) env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0; if (val < 0 || val > ub) { - CPUState *cs = env_cpu(env); - - /* Recover PC and CC_OP for the beginning of the insn. */ - cpu_restore_state(cs, GETPC(), true); - - /* flags have been modified by gen_flush_flags() */ - env->cc_op = CC_OP_FLAGS; - /* Adjust PC to end of the insn. */ - env->pc += 2; - - cs->exception_index = EXCP_CHK; - cpu_loop_exit(cs); + raise_exception_format2(env, EXCP_CHK, 2, GETPC()); } } @@ -1105,17 +1121,6 @@ void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub) env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb; if (env->cc_c) { - CPUState *cs = env_cpu(env); - - /* Recover PC and CC_OP for the beginning of the insn. */ - cpu_restore_state(cs, GETPC(), true); - - /* flags have been modified by gen_flush_flags() */ - env->cc_op = CC_OP_FLAGS; - /* Adjust PC to end of the insn. */ - env->pc += 4; - - cs->exception_index = EXCP_CHK; - cpu_loop_exit(cs); + raise_exception_format2(env, EXCP_CHK, 4, GETPC()); } } diff --git a/target/m68k/qregs.def b/target/m68k/qregs.h.inc similarity index 100% rename from target/m68k/qregs.def rename to target/m68k/qregs.h.inc diff --git a/target/m68k/translate.c b/target/m68k/translate.c index c34d9aed61..18418312b1 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -39,7 +39,7 @@ #define DEFO32(name, offset) static TCGv QREG_##name; #define DEFO64(name, offset) static TCGv_i64 QREG_##name; -#include "qregs.def" +#include "qregs.h.inc" #undef DEFO32 #undef DEFO64 @@ -75,7 +75,7 @@ void m68k_tcg_init(void) #define DEFO64(name, offset) \ QREG_##name = tcg_global_mem_new_i64(cpu_env, \ offsetof(CPUM68KState, offset), #name); -#include "qregs.def" +#include "qregs.h.inc" #undef DEFO32 #undef DEFO64 @@ -114,6 +114,7 @@ typedef struct DisasContext { DisasContextBase base; CPUM68KState *env; target_ulong pc; + target_ulong pc_prev; CCOp cc_op; /* Current CC operation */ int cc_op_synced; TCGv_i64 mactmp; @@ -194,18 +195,6 @@ static void do_writebacks(DisasContext *s) } } -static bool is_singlestepping(DisasContext *s) -{ - /* - * Return true if we are singlestepping either because of - * architectural singlestep or QEMU gdbstub singlestep. This does - * not include the command line '-singlestep' mode which is rather - * misnamed as it only means "one instruction per TB" and doesn't - * affect the code we generate. - */ - return s->base.singlestep_enabled || s->ss_active; -} - /* is_jmp field values */ #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */ @@ -310,6 +299,21 @@ static void gen_raise_exception(int nr) tcg_temp_free_i32(tmp); } +static void gen_raise_exception_format2(DisasContext *s, int nr, + target_ulong this_pc) +{ + /* + * Pass the address of the insn to the exception handler, + * for recording in the Format $2 (6-word) stack frame. + * Re-use mmu.ar for the purpose, since that's only valid + * after tlb_fill. + */ + tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env, + offsetof(CPUM68KState, mmu.ar)); + gen_raise_exception(nr); + s->base.is_jmp = DISAS_NORETURN; +} + static void gen_exception(DisasContext *s, uint32_t dest, int nr) { update_cc_op(s); @@ -320,20 +324,6 @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr) s->base.is_jmp = DISAS_NORETURN; } -static void gen_singlestep_exception(DisasContext *s) -{ - /* - * Generate the right kind of exception for singlestep, which is - * either the architectural singlestep or EXCP_DEBUG for QEMU's - * gdb singlestepping. - */ - if (s->ss_active) { - gen_raise_exception(EXCP_TRACE); - } else { - gen_raise_exception(EXCP_DEBUG); - } -} - static inline void gen_addr_fault(DisasContext *s) { gen_exception(s, s->base.pc_next, EXCP_ADDRESS); @@ -415,7 +405,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s) { uint16_t im; - im = translator_lduw(env, s->pc); + im = translator_lduw(env, &s->base, s->pc); s->pc += 2; return im; } @@ -481,7 +471,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) return NULL_QREG; - if (m68k_feature(s->env, M68K_FEATURE_M68000) && + if (m68k_feature(s->env, M68K_FEATURE_M68K) && !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) { ext &= ~(3 << 9); } @@ -814,7 +804,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, reg = get_areg(s, reg0); tmp = mark_to_release(s, tcg_temp_new()); if (reg0 == 7 && opsize == OS_BYTE && - m68k_feature(s->env, M68K_FEATURE_M68000)) { + m68k_feature(s->env, M68K_FEATURE_M68K)) { tcg_gen_subi_i32(tmp, reg, 2); } else { tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize)); @@ -898,7 +888,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, if (what == EA_STORE || !addrp) { TCGv tmp = tcg_temp_new(); if (reg0 == 7 && opsize == OS_BYTE && - m68k_feature(s->env, M68K_FEATURE_M68000)) { + m68k_feature(s->env, M68K_FEATURE_M68K)) { tcg_gen_addi_i32(tmp, reg, 2); } else { tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize)); @@ -1520,12 +1510,13 @@ static void gen_exit_tb(DisasContext *s) } while (0) /* Generate a jump to an immediate address. */ -static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) +static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest, + target_ulong src) { - if (unlikely(is_singlestepping(s))) { + if (unlikely(s->ss_active)) { update_cc_op(s); tcg_gen_movi_i32(QREG_PC, dest); - gen_singlestep_exception(s); + gen_raise_exception_format2(s, EXCP_TRACE, src); } else if (translator_use_goto_tb(&s->base, dest)) { tcg_gen_goto_tb(n); tcg_gen_movi_i32(QREG_PC, dest); @@ -1574,9 +1565,9 @@ DISAS_INSN(dbcc) tcg_gen_addi_i32(tmp, tmp, -1); gen_partset_reg(OS_WORD, reg, tmp); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1); - gen_jmp_tb(s, 1, base + offset); + gen_jmp_tb(s, 1, base + offset, s->base.pc_next); gen_set_label(l1); - gen_jmp_tb(s, 0, s->pc); + gen_jmp_tb(s, 0, s->pc, s->base.pc_next); } DISAS_INSN(undef_mac) @@ -1627,6 +1618,7 @@ DISAS_INSN(divw) int sign; TCGv src; TCGv destr; + TCGv ilen; /* divX.w ,Dn 32/16 -> 16r:16q */ @@ -1635,20 +1627,20 @@ DISAS_INSN(divw) /* dest.l / src.w */ SRC_EA(env, src, OS_WORD, sign, NULL); - destr = tcg_const_i32(REG(insn, 9)); + destr = tcg_constant_i32(REG(insn, 9)); + ilen = tcg_constant_i32(s->pc - s->base.pc_next); if (sign) { - gen_helper_divsw(cpu_env, destr, src); + gen_helper_divsw(cpu_env, destr, src, ilen); } else { - gen_helper_divuw(cpu_env, destr, src); + gen_helper_divuw(cpu_env, destr, src, ilen); } - tcg_temp_free(destr); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(divl) { - TCGv num, reg, den; + TCGv num, reg, den, ilen; int sign; uint16_t ext; @@ -1665,15 +1657,14 @@ DISAS_INSN(divl) /* divX.l , Dr:Dq 64/32 -> 32r:32q */ SRC_EA(env, den, OS_LONG, 0, NULL); - num = tcg_const_i32(REG(ext, 12)); - reg = tcg_const_i32(REG(ext, 0)); + num = tcg_constant_i32(REG(ext, 12)); + reg = tcg_constant_i32(REG(ext, 0)); + ilen = tcg_constant_i32(s->pc - s->base.pc_next); if (sign) { - gen_helper_divsll(cpu_env, num, reg, den); + gen_helper_divsll(cpu_env, num, reg, den, ilen); } else { - gen_helper_divull(cpu_env, num, reg, den); + gen_helper_divull(cpu_env, num, reg, den, ilen); } - tcg_temp_free(reg); - tcg_temp_free(num); set_cc_op(s, CC_OP_FLAGS); return; } @@ -1682,15 +1673,14 @@ DISAS_INSN(divl) /* divXl.l , Dr:Dq 32/32 -> 32r:32q */ SRC_EA(env, den, OS_LONG, 0, NULL); - num = tcg_const_i32(REG(ext, 12)); - reg = tcg_const_i32(REG(ext, 0)); + num = tcg_constant_i32(REG(ext, 12)); + reg = tcg_constant_i32(REG(ext, 0)); + ilen = tcg_constant_i32(s->pc - s->base.pc_next); if (sign) { - gen_helper_divsl(cpu_env, num, reg, den); + gen_helper_divsl(cpu_env, num, reg, den, ilen); } else { - gen_helper_divul(cpu_env, num, reg, den); + gen_helper_divul(cpu_env, num, reg, den, ilen); } - tcg_temp_free(reg); - tcg_temp_free(num); set_cc_op(s, CC_OP_FLAGS); } @@ -2220,7 +2210,7 @@ DISAS_INSN(bitop_im) op = (insn >> 6) & 3; bitnum = read_im16(env, s); - if (m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (m68k_feature(s->env, M68K_FEATURE_M68K)) { if (bitnum & 0xfe00) { disas_undef(env, s, insn); return; @@ -2295,9 +2285,9 @@ static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0); tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0); } else { - TCGv sr = tcg_const_i32(val); - gen_helper_set_sr(cpu_env, sr); - tcg_temp_free(sr); + /* Must writeback before changing security state. */ + do_writebacks(s); + gen_helper_set_sr(cpu_env, tcg_constant_i32(val)); } set_cc_op(s, CC_OP_FLAGS); } @@ -2307,6 +2297,8 @@ static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only) if (ccr_only) { gen_helper_set_ccr(cpu_env, val); } else { + /* Must writeback before changing security state. */ + do_writebacks(s); gen_helper_set_sr(cpu_env, val); } set_cc_op(s, CC_OP_FLAGS); @@ -2383,6 +2375,7 @@ DISAS_INSN(arith_im) tcg_gen_or_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2392,6 +2385,7 @@ DISAS_INSN(arith_im) tcg_gen_and_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2415,6 +2409,7 @@ DISAS_INSN(arith_im) tcg_gen_xor_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2835,19 +2830,39 @@ DISAS_INSN(illegal) gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } -/* ??? This should be atomic. */ DISAS_INSN(tas) { - TCGv dest; - TCGv src1; - TCGv addr; + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); - dest = tcg_temp_new(); - SRC_EA(env, src1, OS_BYTE, 1, &addr); - gen_logic_cc(s, src1, OS_BYTE); - tcg_gen_ori_i32(dest, src1, 0x80); - DEST_EA(env, insn, OS_BYTE, dest, &addr); - tcg_temp_free(dest); + if (mode == 0) { + /* data register direct */ + TCGv dest = cpu_dregs[reg0]; + gen_logic_cc(s, dest, OS_BYTE); + tcg_gen_ori_tl(dest, dest, 0x80); + } else { + TCGv src1, addr; + + addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + src1 = tcg_temp_new(); + tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80), + IS_USER(s), MO_SB); + gen_logic_cc(s, src1, OS_BYTE); + tcg_temp_free(src1); + + switch (mode) { + case 3: /* Indirect postincrement. */ + tcg_gen_addi_i32(AREG(insn, 0), addr, 1); + break; + case 4: /* Indirect predecrememnt. */ + tcg_gen_mov_i32(AREG(insn, 0), addr); + break; + } + } } DISAS_INSN(mull) @@ -2885,7 +2900,7 @@ DISAS_INSN(mull) return; } SRC_EA(env, src1, OS_LONG, 0, NULL); - if (m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (m68k_feature(s->env, M68K_FEATURE_M68K)) { tcg_gen_movi_i32(QREG_CC_C, 0); if (sign) { tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); @@ -3085,22 +3100,6 @@ DISAS_INSN(addsubq) tcg_temp_free(dest); } -DISAS_INSN(tpf) -{ - switch (insn & 7) { - case 2: /* One extension word. */ - s->pc += 2; - break; - case 3: /* Two extension words. */ - s->pc += 4; - break; - case 4: /* No extension words. */ - break; - default: - disas_undef(env, s, insn); - } -} - DISAS_INSN(branch) { int32_t offset; @@ -3123,13 +3122,13 @@ DISAS_INSN(branch) /* Bcc */ TCGLabel *l1 = gen_new_label(); gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); - gen_jmp_tb(s, 1, base + offset); + gen_jmp_tb(s, 1, base + offset, s->base.pc_next); gen_set_label(l1); - gen_jmp_tb(s, 0, s->pc); + gen_jmp_tb(s, 0, s->pc, s->base.pc_next); } else { /* Unconditional branch. */ update_cc_op(s); - gen_jmp_tb(s, 0, base + offset); + gen_jmp_tb(s, 0, base + offset, s->base.pc_next); } } @@ -3496,7 +3495,7 @@ static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) * while M68000 sets if the most significant bit is changed at * any time during the shift operation. */ - if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) { /* if shift count >= bits, V is (reg != 0) */ if (count >= bits) { tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V); @@ -3580,7 +3579,7 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) * int64_t t = (int64_t)(intN_t)reg << count; * V = ((s ^ t) & (-1 << (bits - 1))) != 0 */ - if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) { TCGv_i64 tt = tcg_const_i64(32); /* if shift is greater than 32, use 32 */ tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64); @@ -3673,7 +3672,7 @@ DISAS_INSN(shift_mem) * while M68000 sets if the most significant bit is changed at * any time during the shift operation */ - if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) { src = gen_extend(s, src, OS_WORD, 1); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); } @@ -4618,13 +4617,14 @@ DISAS_INSN(strldsr) } gen_push(s, gen_get_sr(s)); gen_set_sr_im(s, ext, 0); + gen_exit_tb(s); } DISAS_INSN(move_from_sr) { TCGv sr; - if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) { + if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } @@ -4886,7 +4886,62 @@ DISAS_INSN(wdebug) DISAS_INSN(trap) { - gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf)); + gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf)); +} + +static void do_trapcc(DisasContext *s, DisasCompare *c) +{ + if (c->tcond != TCG_COND_NEVER) { + TCGLabel *over = NULL; + + update_cc_op(s); + + if (c->tcond != TCG_COND_ALWAYS) { + /* Jump over if !c. */ + over = gen_new_label(); + tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over); + } + + tcg_gen_movi_i32(QREG_PC, s->pc); + gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next); + + if (over != NULL) { + gen_set_label(over); + s->base.is_jmp = DISAS_NEXT; + } + } + free_cond(c); +} + +DISAS_INSN(trapcc) +{ + DisasCompare c; + + /* Consume and discard the immediate operand. */ + switch (extract32(insn, 0, 3)) { + case 2: /* trapcc.w */ + (void)read_im16(env, s); + break; + case 3: /* trapcc.l */ + (void)read_im32(env, s); + break; + case 4: /* trapcc (no operand) */ + break; + default: + /* trapcc registered with only valid opmodes */ + g_assert_not_reached(); + } + + gen_cc_cond(&c, s, extract32(insn, 8, 4)); + do_trapcc(s, &c); +} + +DISAS_INSN(trapv) +{ + DisasCompare c; + + gen_cc_cond(&c, s, 9); /* V set */ + do_trapcc(s, &c); } static void gen_load_fcr(DisasContext *s, TCGv res, int reg) @@ -5512,9 +5567,9 @@ DISAS_INSN(fbcc) l1 = gen_new_label(); update_cc_op(s); gen_fjmpcc(s, insn & 0x3f, l1); - gen_jmp_tb(s, 0, s->pc); + gen_jmp_tb(s, 0, s->pc, s->base.pc_next); gen_set_label(l1); - gen_jmp_tb(s, 1, base + offset); + gen_jmp_tb(s, 1, base + offset, s->base.pc_next); } DISAS_INSN(fscc) @@ -5537,6 +5592,34 @@ DISAS_INSN(fscc) tcg_temp_free(tmp); } +DISAS_INSN(ftrapcc) +{ + DisasCompare c; + uint16_t ext; + int cond; + + ext = read_im16(env, s); + cond = ext & 0x3f; + + /* Consume and discard the immediate operand. */ + switch (extract32(insn, 0, 3)) { + case 2: /* ftrapcc.w */ + (void)read_im16(env, s); + break; + case 3: /* ftrapcc.l */ + (void)read_im32(env, s); + break; + case 4: /* ftrapcc (no operand) */ + break; + default: + /* ftrapcc registered with only valid opmodes */ + g_assert_not_reached(); + } + + gen_fcc_cond(&c, s, cond); + do_trapcc(s, &c); +} + #if defined(CONFIG_SOFTMMU) DISAS_INSN(frestore) { @@ -5835,8 +5918,10 @@ DISAS_INSN(from_mext) DISAS_INSN(macsr_to_ccr) { TCGv tmp = tcg_temp_new(); - tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf); - gen_helper_set_sr(cpu_env, tmp); + + /* Note that X and C are always cleared. */ + tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V); + gen_helper_set_ccr(cpu_env, tmp); tcg_temp_free(tmp); set_cc_op(s, CC_OP_FLAGS); } @@ -5954,7 +6039,7 @@ void register_m68k_insns (CPUM68KState *env) } while(0) BASE(undef, 0000, 0000); INSN(arith_im, 0080, fff8, CF_ISA_A); - INSN(arith_im, 0000, ff00, M68000); + INSN(arith_im, 0000, ff00, M68K); INSN(chk2, 00c0, f9c0, CHK2); INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); BASE(bitop_reg, 0100, f1c0); @@ -5963,26 +6048,26 @@ void register_m68k_insns (CPUM68KState *env) BASE(bitop_reg, 01c0, f1c0); INSN(movep, 0108, f138, MOVEP); INSN(arith_im, 0280, fff8, CF_ISA_A); - INSN(arith_im, 0200, ff00, M68000); - INSN(undef, 02c0, ffc0, M68000); + INSN(arith_im, 0200, ff00, M68K); + INSN(undef, 02c0, ffc0, M68K); INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); INSN(arith_im, 0480, fff8, CF_ISA_A); - INSN(arith_im, 0400, ff00, M68000); - INSN(undef, 04c0, ffc0, M68000); - INSN(arith_im, 0600, ff00, M68000); - INSN(undef, 06c0, ffc0, M68000); + INSN(arith_im, 0400, ff00, M68K); + INSN(undef, 04c0, ffc0, M68K); + INSN(arith_im, 0600, ff00, M68K); + INSN(undef, 06c0, ffc0, M68K); INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); INSN(arith_im, 0680, fff8, CF_ISA_A); INSN(arith_im, 0c00, ff38, CF_ISA_A); - INSN(arith_im, 0c00, ff00, M68000); + INSN(arith_im, 0c00, ff00, M68K); BASE(bitop_im, 0800, ffc0); BASE(bitop_im, 0840, ffc0); BASE(bitop_im, 0880, ffc0); BASE(bitop_im, 08c0, ffc0); INSN(arith_im, 0a80, fff8, CF_ISA_A); - INSN(arith_im, 0a00, ff00, M68000); + INSN(arith_im, 0a00, ff00, M68K); #if defined(CONFIG_SOFTMMU) - INSN(moves, 0e00, ff00, M68000); + INSN(moves, 0e00, ff00, M68K); #endif INSN(cas, 0ac0, ffc0, CAS); INSN(cas, 0cc0, ffc0, CAS); @@ -5992,43 +6077,44 @@ void register_m68k_insns (CPUM68KState *env) BASE(move, 1000, f000); BASE(move, 2000, f000); BASE(move, 3000, f000); - INSN(chk, 4000, f040, M68000); + INSN(chk, 4000, f040, M68K); INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); INSN(negx, 4080, fff8, CF_ISA_A); - INSN(negx, 4000, ff00, M68000); - INSN(undef, 40c0, ffc0, M68000); + INSN(negx, 4000, ff00, M68K); + INSN(undef, 40c0, ffc0, M68K); INSN(move_from_sr, 40c0, fff8, CF_ISA_A); - INSN(move_from_sr, 40c0, ffc0, M68000); + INSN(move_from_sr, 40c0, ffc0, M68K); BASE(lea, 41c0, f1c0); BASE(clr, 4200, ff00); BASE(undef, 42c0, ffc0); INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); - INSN(move_from_ccr, 42c0, ffc0, M68000); + INSN(move_from_ccr, 42c0, ffc0, M68K); INSN(neg, 4480, fff8, CF_ISA_A); - INSN(neg, 4400, ff00, M68000); - INSN(undef, 44c0, ffc0, M68000); + INSN(neg, 4400, ff00, M68K); + INSN(undef, 44c0, ffc0, M68K); BASE(move_to_ccr, 44c0, ffc0); INSN(not, 4680, fff8, CF_ISA_A); - INSN(not, 4600, ff00, M68000); + INSN(not, 4600, ff00, M68K); #if defined(CONFIG_SOFTMMU) BASE(move_to_sr, 46c0, ffc0); #endif - INSN(nbcd, 4800, ffc0, M68000); - INSN(linkl, 4808, fff8, M68000); + INSN(nbcd, 4800, ffc0, M68K); + INSN(linkl, 4808, fff8, M68K); BASE(pea, 4840, ffc0); BASE(swap, 4840, fff8); INSN(bkpt, 4848, fff8, BKPT); INSN(movem, 48d0, fbf8, CF_ISA_A); INSN(movem, 48e8, fbf8, CF_ISA_A); - INSN(movem, 4880, fb80, M68000); + INSN(movem, 4880, fb80, M68K); BASE(ext, 4880, fff8); BASE(ext, 48c0, fff8); BASE(ext, 49c0, fff8); BASE(tst, 4a00, ff00); INSN(tas, 4ac0, ffc0, CF_ISA_B); - INSN(tas, 4ac0, ffc0, M68000); + INSN(tas, 4ac0, ffc0, M68K); #if defined(CONFIG_SOFTMMU) INSN(halt, 4ac8, ffff, CF_ISA_A); + INSN(halt, 4ac8, ffff, M68K); #endif INSN(pulse, 4acc, ffff, CF_ISA_A); BASE(illegal, 4afc, ffff); @@ -6043,7 +6129,7 @@ void register_m68k_insns (CPUM68KState *env) #if defined(CONFIG_SOFTMMU) INSN(move_to_usp, 4e60, fff8, USP); INSN(move_from_usp, 4e68, fff8, USP); - INSN(reset, 4e70, ffff, M68000); + INSN(reset, 4e70, ffff, M68K); BASE(stop, 4e72, ffff); BASE(rte, 4e73, ffff); INSN(cf_movec, 4e7b, ffff, CF_ISA_A); @@ -6052,15 +6138,19 @@ void register_m68k_insns (CPUM68KState *env) BASE(nop, 4e71, ffff); INSN(rtd, 4e74, ffff, RTD); BASE(rts, 4e75, ffff); - INSN(rtr, 4e77, ffff, M68000); + INSN(trapv, 4e76, ffff, M68K); + INSN(rtr, 4e77, ffff, M68K); BASE(jump, 4e80, ffc0); BASE(jump, 4ec0, ffc0); - INSN(addsubq, 5000, f080, M68000); + INSN(addsubq, 5000, f080, M68K); BASE(addsubq, 5080, f0c0); INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */ - INSN(scc, 50c0, f0c0, M68000); /* Scc.B */ - INSN(dbcc, 50c8, f0f8, M68000); - INSN(tpf, 51f8, fff8, CF_ISA_A); + INSN(scc, 50c0, f0c0, M68K); /* Scc.B */ + INSN(dbcc, 50c8, f0f8, M68K); + INSN(trapcc, 50fa, f0fe, TRAPCC); /* opmode 010, 011 */ + INSN(trapcc, 50fc, f0ff, TRAPCC); /* opmode 100 */ + INSN(trapcc, 51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */ + INSN(trapcc, 51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */ /* Branch instructions. */ BASE(branch, 6000, f000); @@ -6075,15 +6165,15 @@ void register_m68k_insns (CPUM68KState *env) INSN(mvzs, 7100, f100, CF_ISA_B); BASE(or, 8000, f000); BASE(divw, 80c0, f0c0); - INSN(sbcd_reg, 8100, f1f8, M68000); - INSN(sbcd_mem, 8108, f1f8, M68000); + INSN(sbcd_reg, 8100, f1f8, M68K); + INSN(sbcd_mem, 8108, f1f8, M68K); BASE(addsub, 9000, f000); INSN(undef, 90c0, f0c0, CF_ISA_A); INSN(subx_reg, 9180, f1f8, CF_ISA_A); - INSN(subx_reg, 9100, f138, M68000); - INSN(subx_mem, 9108, f138, M68000); + INSN(subx_reg, 9100, f138, M68K); + INSN(subx_mem, 9108, f138, M68K); INSN(suba, 91c0, f1c0, CF_ISA_A); - INSN(suba, 90c0, f0c0, M68000); + INSN(suba, 90c0, f0c0, M68K); BASE(undef_mac, a000, f000); INSN(mac, a000, f100, CF_EMAC); @@ -6104,41 +6194,41 @@ void register_m68k_insns (CPUM68KState *env) INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ INSN(cmp, b080, f1c0, CF_ISA_A); INSN(cmpa, b1c0, f1c0, CF_ISA_A); - INSN(cmp, b000, f100, M68000); - INSN(eor, b100, f100, M68000); - INSN(cmpm, b108, f138, M68000); - INSN(cmpa, b0c0, f0c0, M68000); + INSN(cmp, b000, f100, M68K); + INSN(eor, b100, f100, M68K); + INSN(cmpm, b108, f138, M68K); + INSN(cmpa, b0c0, f0c0, M68K); INSN(eor, b180, f1c0, CF_ISA_A); BASE(and, c000, f000); - INSN(exg_dd, c140, f1f8, M68000); - INSN(exg_aa, c148, f1f8, M68000); - INSN(exg_da, c188, f1f8, M68000); + INSN(exg_dd, c140, f1f8, M68K); + INSN(exg_aa, c148, f1f8, M68K); + INSN(exg_da, c188, f1f8, M68K); BASE(mulw, c0c0, f0c0); - INSN(abcd_reg, c100, f1f8, M68000); - INSN(abcd_mem, c108, f1f8, M68000); + INSN(abcd_reg, c100, f1f8, M68K); + INSN(abcd_mem, c108, f1f8, M68K); BASE(addsub, d000, f000); INSN(undef, d0c0, f0c0, CF_ISA_A); INSN(addx_reg, d180, f1f8, CF_ISA_A); - INSN(addx_reg, d100, f138, M68000); - INSN(addx_mem, d108, f138, M68000); + INSN(addx_reg, d100, f138, M68K); + INSN(addx_mem, d108, f138, M68K); INSN(adda, d1c0, f1c0, CF_ISA_A); - INSN(adda, d0c0, f0c0, M68000); + INSN(adda, d0c0, f0c0, M68K); INSN(shift_im, e080, f0f0, CF_ISA_A); INSN(shift_reg, e0a0, f0f0, CF_ISA_A); - INSN(shift8_im, e000, f0f0, M68000); - INSN(shift16_im, e040, f0f0, M68000); - INSN(shift_im, e080, f0f0, M68000); - INSN(shift8_reg, e020, f0f0, M68000); - INSN(shift16_reg, e060, f0f0, M68000); - INSN(shift_reg, e0a0, f0f0, M68000); - INSN(shift_mem, e0c0, fcc0, M68000); - INSN(rotate_im, e090, f0f0, M68000); - INSN(rotate8_im, e010, f0f0, M68000); - INSN(rotate16_im, e050, f0f0, M68000); - INSN(rotate_reg, e0b0, f0f0, M68000); - INSN(rotate8_reg, e030, f0f0, M68000); - INSN(rotate16_reg, e070, f0f0, M68000); - INSN(rotate_mem, e4c0, fcc0, M68000); + INSN(shift8_im, e000, f0f0, M68K); + INSN(shift16_im, e040, f0f0, M68K); + INSN(shift_im, e080, f0f0, M68K); + INSN(shift8_reg, e020, f0f0, M68K); + INSN(shift16_reg, e060, f0f0, M68K); + INSN(shift_reg, e0a0, f0f0, M68K); + INSN(shift_mem, e0c0, fcc0, M68K); + INSN(rotate_im, e090, f0f0, M68K); + INSN(rotate8_im, e010, f0f0, M68K); + INSN(rotate16_im, e050, f0f0, M68K); + INSN(rotate_reg, e0b0, f0f0, M68K); + INSN(rotate8_reg, e030, f0f0, M68K); + INSN(rotate16_reg, e070, f0f0, M68K); + INSN(rotate_mem, e4c0, fcc0, M68K); INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */ INSN(bfext_reg, e9c0, fdf8, BITFIELD); INSN(bfins_mem, efc0, ffc0, BITFIELD); @@ -6158,6 +6248,8 @@ void register_m68k_insns (CPUM68KState *env) INSN(fbcc, f280, ffc0, CF_FPU); INSN(fpu, f200, ffc0, FPU); INSN(fscc, f240, ffc0, FPU); + INSN(ftrapcc, f27a, fffe, FPU); /* opmode 010, 011 */ + INSN(ftrapcc, f27c, ffff, FPU); /* opmode 100 */ INSN(fbcc, f280, ff80, FPU); #if defined(CONFIG_SOFTMMU) INSN(frestore, f340, ffc0, CF_FPU); @@ -6185,6 +6277,8 @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->env = env; dc->pc = dc->base.pc_first; + /* This value will always be filled in properly before m68k_tr_tb_stop. */ + dc->pc_prev = 0xdeadbeef; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_synced = 1; dc->done_mac = 0; @@ -6193,7 +6287,7 @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS); /* If architectural single step active, limit to 1 */ - if (is_singlestepping(dc)) { + if (dc->ss_active) { dc->base.max_insns = 1; } } @@ -6218,6 +6312,7 @@ static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) do_writebacks(dc); do_release(dc); + dc->pc_prev = dc->base.pc_next; dc->base.pc_next = dc->pc; if (dc->base.is_jmp == DISAS_NEXT) { @@ -6252,17 +6347,12 @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) break; case DISAS_TOO_MANY: update_cc_op(dc); - if (is_singlestepping(dc)) { - tcg_gen_movi_i32(QREG_PC, dc->pc); - gen_singlestep_exception(dc); - } else { - gen_jmp_tb(dc, 0, dc->pc); - } + gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev); break; case DISAS_JUMP: /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */ - if (is_singlestepping(dc)) { - gen_singlestep_exception(dc); + if (dc->ss_active) { + gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev); } else { tcg_gen_lookup_and_goto_ptr(); } @@ -6272,8 +6362,8 @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) * We updated CC_OP and PC in gen_exit_tb, but also modified * other state that may require returning to the main loop. */ - if (is_singlestepping(dc)) { - gen_singlestep_exception(dc); + if (dc->ss_active) { + gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev); } else { tcg_gen_exit_tb(NULL, 0); } @@ -6283,10 +6373,11 @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } -static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void m68k_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps m68k_tr_ops = { @@ -6298,10 +6389,11 @@ static const TranslatorOps m68k_tr_ops = { .disas_log = m68k_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns); + translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base); } static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low) @@ -6387,13 +6479,3 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->mmu.mmusr, env->mmu.ar); #endif } - -void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, - target_ulong *data) -{ - int cc_op = data[1]; - env->pc = data[0]; - if (cc_op != CC_OP_DYNAMIC) { - env->cc_op = cc_op; - } -} diff --git a/target/meson.build b/target/meson.build index 2f6940255e..a53a60486f 100644 --- a/target/meson.build +++ b/target/meson.build @@ -5,6 +5,7 @@ subdir('cris') subdir('hexagon') subdir('hppa') subdir('i386') +subdir('loongarch') subdir('m68k') subdir('microblaze') subdir('mips') diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h index 4d8297fa94..5e54ea0108 100644 --- a/target/microblaze/cpu-param.h +++ b/target/microblaze/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef MICROBLAZE_CPU_PARAM_H -#define MICROBLAZE_CPU_PARAM_H 1 +#define MICROBLAZE_CPU_PARAM_H /* * While system mode can address up to 64 bits of address space, diff --git a/target/microblaze/cpu-qom.h b/target/microblaze/cpu-qom.h index e520eefb12..255b39a45d 100644 --- a/target/microblaze/cpu-qom.h +++ b/target/microblaze/cpu-qom.h @@ -25,8 +25,7 @@ #define TYPE_MICROBLAZE_CPU "microblaze-cpu" -OBJECT_DECLARE_TYPE(MicroBlazeCPU, MicroBlazeCPUClass, - MICROBLAZE_CPU) +OBJECT_DECLARE_CPU_TYPE(MicroBlazeCPU, MicroBlazeCPUClass, MICROBLAZE_CPU) /** * MicroBlazeCPUClass: diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index 72d8f2a0da..89e493f3ff 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -22,6 +22,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qapi/error.h" #include "cpu.h" #include "qemu/module.h" @@ -83,15 +84,32 @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.iflags = 0; } +static vaddr mb_cpu_get_pc(CPUState *cs) +{ + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + + return cpu->env.pc; +} + static void mb_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - cpu->env.pc = tb->pc; + cpu->env.pc = tb_pc(tb); cpu->env.iflags = tb->flags & IFLAGS_TB_MASK; } +static void mb_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.iflags = data[1]; +} + static bool mb_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); @@ -365,10 +383,11 @@ static const struct SysemuCPUOps mb_sysemu_ops = { static const struct TCGCPUOps mb_tcg_ops = { .initialize = mb_tcg_init, .synchronize_from_tb = mb_cpu_synchronize_from_tb, - .cpu_exec_interrupt = mb_cpu_exec_interrupt, - .tlb_fill = mb_cpu_tlb_fill, + .restore_state_to_opc = mb_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = mb_cpu_tlb_fill, + .cpu_exec_interrupt = mb_cpu_exec_interrupt, .do_interrupt = mb_cpu_do_interrupt, .do_transaction_failed = mb_cpu_transaction_failed, .do_unaligned_access = mb_cpu_do_unaligned_access, @@ -390,6 +409,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) cc->dump_state = mb_cpu_dump_state; cc->set_pc = mb_cpu_set_pc; + cc->get_pc = mb_cpu_get_pc; cc->gdb_read_register = mb_cpu_gdb_read_register; cc->gdb_write_register = mb_cpu_gdb_write_register; diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index e4bba8a755..1e84dd8f47 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -22,9 +22,9 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" -#include "fpu/softfloat-types.h" +#include "qemu/cpu-float.h" -typedef struct CPUMBState CPUMBState; +typedef struct CPUArchState CPUMBState; #if !defined(CONFIG_USER_ONLY) #include "mmu.h" #endif @@ -239,7 +239,7 @@ typedef struct CPUMBState CPUMBState; #define USE_NON_SECURE_M_AXI_DC_MASK 0x4 #define USE_NON_SECURE_M_AXI_IC_MASK 0x8 -struct CPUMBState { +struct CPUArchState { uint32_t bvalue; /* TCG temporary, only valid during a TB */ uint32_t btarget; /* Full resolved branch destination */ @@ -339,7 +339,7 @@ typedef struct { * * A MicroBlaze CPU. */ -struct MicroBlazeCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; @@ -355,11 +355,13 @@ struct MicroBlazeCPU { }; +#ifndef CONFIG_USER_ONLY void mb_cpu_do_interrupt(CPUState *cs); bool mb_cpu_exec_interrupt(CPUState *cs, int int_req); -void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +#endif /* !CONFIG_USER_ONLY */ +G_NORETURN void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); @@ -383,29 +385,15 @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val) } void mb_tcg_init(void); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_mb_signal_handler(int host_signum, void *pinfo, - void *puc); #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU -#define cpu_signal_handler cpu_mb_signal_handler - /* MMU modes definitions */ #define MMU_NOMMU_IDX 0 #define MMU_KERNEL_IDX 1 #define MMU_USER_IDX 2 /* See NB_MMU_MODES further up the file. */ -bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - -typedef CPUMBState CPUArchState; -typedef MicroBlazeCPU ArchCPU; - #include "exec/cpu-all.h" /* Ensure there is no overlap between the two masks. */ @@ -420,6 +408,10 @@ static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc, } #if !defined(CONFIG_USER_ONLY) +bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c index 20dbd67313..98bdb82de8 100644 --- a/target/microblaze/helper.c +++ b/target/microblaze/helper.c @@ -24,28 +24,7 @@ #include "qemu/host-utils.h" #include "exec/log.h" -#if defined(CONFIG_USER_ONLY) - -void mb_cpu_do_interrupt(CPUState *cs) -{ - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUMBState *env = &cpu->env; - - cs->exception_index = -1; - env->res_addr = RES_ADDR_NONE; - env->regs[14] = env->pc; -} - -bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - cs->exception_index = 0xaa; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - +#ifndef CONFIG_USER_ONLY static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, MMUAccessType access_type) { @@ -271,7 +250,6 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, return paddr; } -#endif bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { @@ -289,6 +267,8 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } +#endif /* !CONFIG_USER_ONLY */ + void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) @@ -297,7 +277,7 @@ void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, uint32_t esr, iflags; /* Recover the pc and iflags from the corresponding insn_start. */ - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); iflags = cpu->env.iflags; qemu_log_mask(CPU_LOG_INT, diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c index cc40f275ea..75651979a9 100644 --- a/target/microblaze/mmu.c +++ b/target/microblaze/mmu.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h index b6b4b9ad60..1068bd2d52 100644 --- a/target/microblaze/mmu.h +++ b/target/microblaze/mmu.h @@ -20,6 +20,8 @@ #ifndef TARGET_MICROBLAZE_MMU_H #define TARGET_MICROBLAZE_MMU_H +#include "cpu.h" + #define MMU_R_PID 0 #define MMU_R_ZPR 1 #define MMU_R_TLBX 2 diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c index 58d633584d..5b745d0928 100644 --- a/target/microblaze/op_helper.c +++ b/target/microblaze/op_helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index a14ffed784..974f21eb31 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -126,12 +126,7 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) { - if (dc->base.singlestep_enabled) { - TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG); - tcg_gen_movi_i32(cpu_pc, dest); - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); - } else if (translator_use_goto_tb(&dc->base, dest)) { + if (translator_use_goto_tb(&dc->base, dest)) { tcg_gen_goto_tb(n); tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_exit_tb(dc->base.tb, n); @@ -727,6 +722,7 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) } #endif +#ifndef CONFIG_USER_ONLY static void record_unaligned_ess(DisasContext *dc, int rd, MemOp size, bool store) { @@ -739,6 +735,7 @@ static void record_unaligned_ess(DisasContext *dc, int rd, tcg_set_insn_start_param(dc->insn_start, 1, iflags); } +#endif static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, int mem_index, bool rev) @@ -760,12 +757,19 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, false); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); @@ -906,12 +910,19 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, true); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); @@ -1779,7 +1790,7 @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) break; case DISAS_JUMP: - if (dc->jmp_dest != -1 && !cs->singlestep_enabled) { + if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) { /* Direct jump. */ tcg_gen_discard_i32(cpu_btarget); @@ -1804,15 +1815,10 @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) return; } - /* Indirect jump (or direct jump w/ singlestep) */ + /* Indirect jump (or direct jump w/ goto_tb disabled) */ tcg_gen_mov_i32(cpu_pc, cpu_btarget); tcg_gen_discard_i32(cpu_btarget); - - if (unlikely(cs->singlestep_enabled)) { - gen_raise_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); return; default: @@ -1827,10 +1833,11 @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) } } -static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs) +static void mb_tr_disas_log(const DisasContextBase *dcb, + CPUState *cs, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first)); - log_target_disas(cs, dcb->pc_first, dcb->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first)); + target_disas(logfile, cs, dcb->pc_first, dcb->tb->size); } static const TranslatorOps mb_tr_ops = { @@ -1842,10 +1849,11 @@ static const TranslatorOps mb_tr_ops = { .disas_log = mb_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns); + translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base); } void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -1938,10 +1946,3 @@ void mb_tcg_init(void) cpu_res_addr = tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr"); } - -void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->iflags = data[1]; -} diff --git a/target/mips/TODO b/target/mips/TODO deleted file mode 100644 index 1d782d8027..0000000000 --- a/target/mips/TODO +++ /dev/null @@ -1,51 +0,0 @@ -Unsolved issues/bugs in the mips/mipsel backend ------------------------------------------------ - -General -------- -- Unimplemented ASEs: - - MDMX - - SmartMIPS - - microMIPS DSP r1 & r2 encodings -- MT ASE only partially implemented and not functional -- Shadow register support only partially implemented, - lacks set switching on interrupt/exception. -- 34K ITC not implemented. -- A general lack of documentation, especially for technical internals. - Existing documentation is x86-centric. -- Reverse endianness bit not implemented -- The TLB emulation is very inefficient: - QEMU's softmmu implements a x86-style MMU, with separate entries - for read/write/execute, a TLB index which is just a modulo of the - virtual address, and a set of TLBs for each user/kernel/supervisor - MMU mode. - MIPS has a single entry for read/write/execute and only one MMU mode. - But it is fully associative with randomized entry indices, and uses - up to 256 ASID tags as additional matching criterion (which roughly - equates to 256 MMU modes). It also has a global flag which causes - entries to match regardless of ASID. - To cope with these differences, QEMU currently flushes the TLB at - each ASID change. Using the MMU modes to implement ASIDs hinges on - implementing the global bit efficiently. -- save/restore of the CPU state is not implemented (see machine.c). - -MIPS64 ------- -- Userland emulation (both n32 and n64) not functional. - -"Generic" 4Kc system emulation ------------------------------- -- Doesn't correspond to any real hardware. Should be removed some day, - U-Boot is the last remaining user. - -PICA 61 system emulation ------------------------- -- No framebuffer support yet. - -MALTA system emulation ----------------------- -- We fake firmware support instead of doing the real thing -- Real firmware (YAMON) falls over when trying to init RAM, presumably - due to lacking system controller emulation. -- Bonito system controller not implemented -- MSC1 system controller not implemented diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index e03b2a998c..480e60aeec 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -369,7 +369,6 @@ const mips_def_t mips_defs[] = * Config3: VZ, CTXTC, CDMM, TL * Config4: MMUExtDef * Config5: MRP - * FIR(FCR0): Has2008 * */ .name = "P5600", .CP0_PRid = 0x0001A800, @@ -805,7 +804,7 @@ const mips_def_t mips_defs[] = .mmu_type = MMU_TYPE_R4000, }, { - .name = "Loongson-3A1000", + .name = "Loongson-3A1000", /* Loongson-3A R1, GS464-based */ .CP0_PRid = 0x6305, /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | @@ -828,14 +827,14 @@ const mips_def_t mips_defs[] = (0x1 << FCR0_D) | (0x1 << FCR0_S), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, - .SEGBITS = 42, + .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A | ASE_LMMI | ASE_LEXT, .mmu_type = MMU_TYPE_R4000, }, { - .name = "Loongson-3A4000", /* GS464V-based */ + .name = "Loongson-3A4000", /* Loongson-3A R4, GS464V-based */ .CP0_PRid = 0x14C000, /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | @@ -886,6 +885,7 @@ const mips_def_t mips_defs[] = (0x1 << FCR0_D) | (0x1 << FCR0_S), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .MSAIR = (0x01 << MSAIR_ProcID) | (0x40 << MSAIR_Rev), .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A | @@ -921,6 +921,34 @@ const mips_def_t mips_defs[] = .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSP_R2, .mmu_type = MMU_TYPE_R4000, }, + { + /* + * Octeon 68xx with MIPS64 Cavium Octeon features. + */ + .name = "Octeon68XX", + .CP0_PRid = 0x000D9100, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (0x3F << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA), + .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | + (0x3c << CP0C4_KScrExist) | (1U << CP0C4_MMUExtDef) | + (3U << CP0C4_MMUSizeExt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .CP0_PageGrain = (1 << CP0PG_ELPA), + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x12F8FFFF, + .SEGBITS = 42, + .PABITS = 49, + .insn_flags = CPU_MIPS64R2 | INSN_OCTEON, + .mmu_type = MMU_TYPE_R4000, + }, #endif }; diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h index 9c4a6ea45e..f4c76994ea 100644 --- a/target/mips/cpu-param.h +++ b/target/mips/cpu-param.h @@ -5,14 +5,14 @@ */ #ifndef MIPS_CPU_PARAM_H -#define MIPS_CPU_PARAM_H 1 +#define MIPS_CPU_PARAM_H #ifdef TARGET_MIPS64 # define TARGET_LONG_BITS 64 #else # define TARGET_LONG_BITS 32 #endif -#ifdef TARGET_MIPS64 +#ifdef TARGET_ABI_MIPSN64 #define TARGET_PHYS_ADDR_SPACE_BITS 48 #define TARGET_VIRT_ADDR_SPACE_BITS 48 #else diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h index dda0c911fa..e28b529607 100644 --- a/target/mips/cpu-qom.h +++ b/target/mips/cpu-qom.h @@ -29,8 +29,7 @@ #define TYPE_MIPS_CPU "mips-cpu" #endif -OBJECT_DECLARE_TYPE(MIPSCPU, MIPSCPUClass, - MIPS_CPU) +OBJECT_DECLARE_CPU_TYPE(MIPSCPU, MIPSCPUClass, MIPS_CPU) /** * MIPSCPUClass: diff --git a/target/mips/cpu.c b/target/mips/cpu.c index d426918291..7a565466cb 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -128,6 +128,13 @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value) mips_env_set_pc(&cpu->env, value); } +static vaddr mips_cpu_get_pc(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + + return cpu->env.active_tc.PC; +} + static bool mips_cpu_has_work(CPUState *cs) { MIPSCPU *cpu = MIPS_CPU(cs); @@ -189,7 +196,7 @@ static void mips_cpu_reset(DeviceState *dev) /* Reset registers to their default values */ env->CP0_PRid = env->cpu_model->CP0_PRid; env->CP0_Config0 = env->cpu_model->CP0_Config0; -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN env->CP0_Config0 |= (1 << CP0C0_BE); #endif env->CP0_Config1 = env->cpu_model->CP0_Config1; @@ -295,6 +302,12 @@ static void mips_cpu_reset(DeviceState *dev) env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); + if (env->insn_flags & INSN_LOONGSON2F) { + /* Loongson-2F has those bits hardcoded to 1 */ + env->CP0_Status |= (1 << CP0St_KX) | (1 << CP0St_SX) | + (1 << CP0St_UX); + } + /* * Vectored interrupts not implemented, timer on int 7, * no performance counters. @@ -305,7 +318,7 @@ static void mips_cpu_reset(DeviceState *dev) for (i = 0; i < 7; i++) { env->CP0_WatchLo[i] = 0; - env->CP0_WatchHi[i] = 0x80000000; + env->CP0_WatchHi[i] = 1 << CP0WH_M; } env->CP0_WatchLo[7] = 0; env->CP0_WatchHi[7] = 0; @@ -418,7 +431,7 @@ static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info) CPUMIPSState *env = &cpu->env; if (!(env->insn_flags & ISA_NANOMIPS32)) { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN info->print_insn = print_insn_big_mips; #else info->print_insn = print_insn_little_mips; @@ -434,14 +447,13 @@ static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info) * Since commit 6af0bf9c7c3 this model assumes a CPU clocked at 200MHz. */ #define CPU_FREQ_HZ_DEFAULT 200000000 -#define CP0_COUNT_RATE_DEFAULT 2 static void mips_cp0_period_set(MIPSCPU *cpu) { CPUMIPSState *env = &cpu->env; env->cp0_count_ns = clock_ticks_to_ns(MIPS_CPU(cpu)->clock, - cpu->cp0_count_rate); + env->cpu_model->CCRes); assert(env->cp0_count_ns); } @@ -514,13 +526,6 @@ static ObjectClass *mips_cpu_class_by_name(const char *cpu_model) return oc; } -static Property mips_cpu_properties[] = { - /* CP0 timer running at half the clock of the CPU */ - DEFINE_PROP_UINT32("cp0-count-rate", MIPSCPU, cp0_count_rate, - CP0_COUNT_RATE_DEFAULT), - DEFINE_PROP_END_OF_LIST() -}; - #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" @@ -539,10 +544,11 @@ static const struct SysemuCPUOps mips_sysemu_ops = { static const struct TCGCPUOps mips_tcg_ops = { .initialize = mips_tcg_init, .synchronize_from_tb = mips_cpu_synchronize_from_tb, - .cpu_exec_interrupt = mips_cpu_exec_interrupt, - .tlb_fill = mips_cpu_tlb_fill, + .restore_state_to_opc = mips_restore_state_to_opc, #if !defined(CONFIG_USER_ONLY) + .tlb_fill = mips_cpu_tlb_fill, + .cpu_exec_interrupt = mips_cpu_exec_interrupt, .do_interrupt = mips_cpu_do_interrupt, .do_transaction_failed = mips_cpu_do_transaction_failed, .do_unaligned_access = mips_cpu_do_unaligned_access, @@ -560,12 +566,12 @@ static void mips_cpu_class_init(ObjectClass *c, void *data) device_class_set_parent_realize(dc, mips_cpu_realizefn, &mcc->parent_realize); device_class_set_parent_reset(dc, mips_cpu_reset, &mcc->parent_reset); - device_class_set_props(dc, mips_cpu_properties); cc->class_by_name = mips_cpu_class_by_name; cc->has_work = mips_cpu_has_work; cc->dump_state = mips_cpu_dump_state; cc->set_pc = mips_cpu_set_pc; + cc->get_pc = mips_cpu_get_pc; cc->gdb_read_register = mips_cpu_gdb_read_register; cc->gdb_write_register = mips_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 1dfe69c6c0..0a085643a3 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -35,7 +35,7 @@ union fpr_t { *define FP_ENDIAN_IDX to access the same location * in the fpr_t union regardless of the host endianness */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN # define FP_ENDIAN_IDX 1 #else # define FP_ENDIAN_IDX 0 @@ -524,8 +524,7 @@ struct TCState { }; struct MIPSITUState; -typedef struct CPUMIPSState CPUMIPSState; -struct CPUMIPSState { +typedef struct CPUArchState { TCState active_tc; CPUMIPSFPUContext active_fpu; @@ -1006,6 +1005,7 @@ struct CPUMIPSState { */ uint64_t CP0_WatchHi[8]; #define CP0WH_ASID 16 +#define CP0WH_M 31 /* * CP0 Register 20 */ @@ -1077,7 +1077,7 @@ struct CPUMIPSState { #define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ uint32_t hflags; /* CPU State */ /* TMASK defines different execution modes */ -#define MIPS_HFLAG_TMASK 0x1F5807FF +#define MIPS_HFLAG_TMASK 0x3F5807FF #define MIPS_HFLAG_MODE 0x00007 /* execution modes */ /* * The KSU flags must be the lowest bits in hflags. The flag order @@ -1161,18 +1161,17 @@ struct CPUMIPSState { QEMUTimer *timer; /* Internal timer */ target_ulong exception_base; /* ExceptionBase input to the core */ uint64_t cp0_count_ns; /* CP0_Count clock period (in nanoseconds) */ -}; +} CPUMIPSState; /** * MIPSCPU: * @env: #CPUMIPSState * @clock: this CPU input clock (may be connected * to an output clock from another device). - * @cp0_count_rate: rate at which the coprocessor 0 counter increments * * A MIPS CPU. */ -struct MIPSCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -1180,20 +1179,11 @@ struct MIPSCPU { Clock *clock; CPUNegativeOffsetState neg; CPUMIPSState env; - /* - * The Count register acts as a timer, incrementing at a constant rate, - * whether or not an instruction is executed, retired, or any forward - * progress is made through the pipeline. The rate at which the counter - * increments is implementation dependent, and is a function of the - * pipeline clock of the processor, not the issue width of the processor. - */ - unsigned cp0_count_rate; }; void mips_cpu_list(void); -#define cpu_signal_handler cpu_mips_signal_handler #define cpu_list mips_cpu_list extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); @@ -1219,9 +1209,6 @@ static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch) return hflags_mmu_index(env->hflags); } -typedef CPUMIPSState CPUArchState; -typedef MIPSCPU ArchCPU; - #include "exec/cpu-all.h" /* Exceptions */ @@ -1265,8 +1252,9 @@ enum { EXCP_MSAFPE, EXCP_TLBXI, EXCP_TLBRI, + EXCP_SEMIHOST, - EXCP_LAST = EXCP_TLBRI, + EXCP_LAST = EXCP_SEMIHOST, }; /* @@ -1277,8 +1265,6 @@ enum { */ #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 -int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); - #define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU #define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_MIPS_CPU diff --git a/target/mips/helper.h b/target/mips/helper.h index a9c6c7d1a3..de32d82e98 100644 --- a/target/mips/helper.h +++ b/target/mips/helper.h @@ -16,21 +16,6 @@ DEF_HELPER_3(lld, tl, env, tl, int) #endif #endif -DEF_HELPER_3(muls, tl, env, tl, tl) -DEF_HELPER_3(mulsu, tl, env, tl, tl) -DEF_HELPER_3(macc, tl, env, tl, tl) -DEF_HELPER_3(maccu, tl, env, tl, tl) -DEF_HELPER_3(msac, tl, env, tl, tl) -DEF_HELPER_3(msacu, tl, env, tl, tl) -DEF_HELPER_3(mulhi, tl, env, tl, tl) -DEF_HELPER_3(mulhiu, tl, env, tl, tl) -DEF_HELPER_3(mulshi, tl, env, tl, tl) -DEF_HELPER_3(mulshiu, tl, env, tl, tl) -DEF_HELPER_3(macchi, tl, env, tl, tl) -DEF_HELPER_3(macchiu, tl, env, tl, tl) -DEF_HELPER_3(msachi, tl, env, tl, tl) -DEF_HELPER_3(msachiu, tl, env, tl, tl) - DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) #ifdef TARGET_MIPS64 DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) @@ -609,3 +594,6 @@ DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) #endif /* !CONFIG_USER_ONLY */ #include "tcg/msa_helper.h.inc" + +/* Vendor extensions */ +#include "tcg/vr54xx_helper.h.inc" diff --git a/target/mips/internal.h b/target/mips/internal.h index eecdd10116..57b312689a 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -12,6 +12,7 @@ #ifdef CONFIG_TCG #include "tcg/tcg-internal.h" #endif +#include "cpu.h" /* * MMU types, the first four entries have the same layout as the @@ -46,6 +47,15 @@ struct mips_def_t { target_ulong CP0_LLAddr_rw_bitmask; int CP0_LLAddr_shift; int32_t SYNCI_Step; + /* + * @CCRes: rate at which the coprocessor 0 counter increments + * + * The Count register acts as a timer, incrementing at a constant rate, + * whether or not an instruction is executed, retired, or any forward + * progress is made through the pipeline. The rate at which the counter + * increments is implementation dependent, and is a function of the + * pipeline clock of the processor, not the issue width of the processor. + */ int32_t CCRes; int32_t CP0_Status_rw_bitmask; int32_t CP0_TCStatus_rw_bitmask; @@ -133,14 +143,14 @@ struct r4k_tlb_t { struct CPUMIPSTLBContext { uint32_t nb_tlb; uint32_t tlb_in_use; - int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot, + int (*map_address)(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, MMUAccessType access_type); - void (*helper_tlbwi)(struct CPUMIPSState *env); - void (*helper_tlbwr)(struct CPUMIPSState *env); - void (*helper_tlbp)(struct CPUMIPSState *env); - void (*helper_tlbr)(struct CPUMIPSState *env); - void (*helper_tlbinv)(struct CPUMIPSState *env); - void (*helper_tlbinvf)(struct CPUMIPSState *env); + void (*helper_tlbwi)(CPUMIPSState *env); + void (*helper_tlbwr)(CPUMIPSState *env); + void (*helper_tlbp)(CPUMIPSState *env); + void (*helper_tlbr)(CPUMIPSState *env); + void (*helper_tlbinv)(CPUMIPSState *env); + void (*helper_tlbinvf)(CPUMIPSState *env); union { struct { r4k_tlb_t tlb[MIPS_TLB_MAX]; @@ -156,8 +166,6 @@ extern const VMStateDescription vmstate_mips_cpu; #endif /* !CONFIG_USER_ONLY */ -#define cpu_signal_handler cpu_mips_signal_handler - static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) { return (env->CP0_Status & (1 << CP0St_IE)) && diff --git a/target/mips/kvm.c b/target/mips/kvm.c index 086debd9f0..bcb8e06b2c 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -14,7 +14,6 @@ #include -#include "qemu-common.h" #include "cpu.h" #include "internal.h" #include "qemu/error-report.h" @@ -1295,3 +1294,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h index 0a12d982a7..a6cebe0265 100644 --- a/target/mips/mips-defs.h +++ b/target/mips/mips-defs.h @@ -42,6 +42,7 @@ #define INSN_LOONGSON2E 0x0000040000000000ULL #define INSN_LOONGSON2F 0x0000080000000000ULL #define INSN_LOONGSON3A 0x0000100000000000ULL +#define INSN_OCTEON 0x0000200000000000ULL /* * bits 52-63: vendor-specific ASEs */ diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c index 4fb8b00711..96e61170e6 100644 --- a/target/mips/tcg/exception.c +++ b/target/mips/tcg/exception.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "internal.h" #include "exec/helper-proto.h" @@ -81,29 +82,11 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; - env->active_tc.PC = tb->pc; + env->active_tc.PC = tb_pc(tb); env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= tb->flags & MIPS_HFLAG_BMASK; } -bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - if (interrupt_request & CPU_INTERRUPT_HARD) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - if (cpu_mips_hw_interrupts_enabled(env) && - cpu_mips_hw_interrupts_pending(env)) { - /* Raise it */ - cs->exception_index = EXCP_EXT_INTERRUPT; - env->error_code = 0; - mips_cpu_do_interrupt(cs); - return true; - } - } - return false; -} - static const char * const excp_names[EXCP_LAST + 1] = { [EXCP_RESET] = "reset", [EXCP_SRESET] = "soft reset", @@ -142,6 +125,7 @@ static const char * const excp_names[EXCP_LAST + 1] = { [EXCP_TLBRI] = "TLB read-inhibit", [EXCP_MSADIS] = "MSA disabled", [EXCP_MSAFPE] = "MSA floating point", + [EXCP_SEMIHOST] = "Semihosting", }; const char *mips_exception_name(int32_t exception) diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c index d42812b8a6..d0bd0267b2 100644 --- a/target/mips/tcg/ldst_helper.c +++ b/target/mips/tcg/ldst_helper.c @@ -52,31 +52,45 @@ HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) #endif /* !CONFIG_USER_ONLY */ -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK(v) ((v) & 3) -#define GET_OFFSET(addr, offset) (addr + (offset)) -#else -#define GET_LMASK(v) (((v) & 3) ^ 3) -#define GET_OFFSET(addr, offset) (addr - (offset)) -#endif +static inline bool cpu_is_bigendian(CPUMIPSState *env) +{ + return extract32(env->CP0_Config0, CP0C0_BE, 1); +} + +static inline target_ulong get_lmask(CPUMIPSState *env, + target_ulong value, unsigned bits) +{ + unsigned mask = (bits / BITS_PER_BYTE) - 1; + + value &= mask; + + if (!cpu_is_bigendian(env)) { + value ^= mask; + } + + return value; +} void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { + target_ulong lmask = get_lmask(env, arg2, 32); + int dir = cpu_is_bigendian(env) ? 1 : -1; + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); - if (GET_LMASK(arg2) <= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), + if (lmask <= 2) { + cpu_stb_mmuidx_ra(env, arg2 + 1 * dir, (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } - if (GET_LMASK(arg2) <= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), + if (lmask <= 1) { + cpu_stb_mmuidx_ra(env, arg2 + 2 * dir, (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } - if (GET_LMASK(arg2) == 0) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, + if (lmask == 0) { + cpu_stb_mmuidx_ra(env, arg2 + 3 * dir, (uint8_t)arg1, mem_idx, GETPC()); } } @@ -84,20 +98,23 @@ void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { + target_ulong lmask = get_lmask(env, arg2, 32); + int dir = cpu_is_bigendian(env) ? 1 : -1; + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); - if (GET_LMASK(arg2) >= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + if (lmask >= 1) { + cpu_stb_mmuidx_ra(env, arg2 - 1 * dir, (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } - if (GET_LMASK(arg2) >= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + if (lmask >= 2) { + cpu_stb_mmuidx_ra(env, arg2 - 2 * dir, (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } - if (GET_LMASK(arg2) == 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + if (lmask == 3) { + cpu_stb_mmuidx_ra(env, arg2 - 3 * dir, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } } @@ -107,49 +124,47 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, * "half" load and stores. We must do the memory access inline, * or fault handling won't work. */ -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK64(v) ((v) & 7) -#else -#define GET_LMASK64(v) (((v) & 7) ^ 7) -#endif void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { + target_ulong lmask = get_lmask(env, arg2, 64); + int dir = cpu_is_bigendian(env) ? 1 : -1; + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); - if (GET_LMASK64(arg2) <= 6) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), + if (lmask <= 6) { + cpu_stb_mmuidx_ra(env, arg2 + 1 * dir, (uint8_t)(arg1 >> 48), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 5) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), + if (lmask <= 5) { + cpu_stb_mmuidx_ra(env, arg2 + 2 * dir, (uint8_t)(arg1 >> 40), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 4) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), + if (lmask <= 4) { + cpu_stb_mmuidx_ra(env, arg2 + 3 * dir, (uint8_t)(arg1 >> 32), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), + if (lmask <= 3) { + cpu_stb_mmuidx_ra(env, arg2 + 4 * dir, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), + if (lmask <= 2) { + cpu_stb_mmuidx_ra(env, arg2 + 5 * dir, (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), + if (lmask <= 1) { + cpu_stb_mmuidx_ra(env, arg2 + 6 * dir, (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) <= 0) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, + if (lmask <= 0) { + cpu_stb_mmuidx_ra(env, arg2 + 7 * dir, (uint8_t)arg1, mem_idx, GETPC()); } } @@ -157,40 +172,43 @@ void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { + target_ulong lmask = get_lmask(env, arg2, 64); + int dir = cpu_is_bigendian(env) ? 1 : -1; + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); - if (GET_LMASK64(arg2) >= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + if (lmask >= 1) { + cpu_stb_mmuidx_ra(env, arg2 - 1 * dir, (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) >= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + if (lmask >= 2) { + cpu_stb_mmuidx_ra(env, arg2 - 2 * dir, (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) >= 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + if (lmask >= 3) { + cpu_stb_mmuidx_ra(env, arg2 - 3 * dir, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) >= 4) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), + if (lmask >= 4) { + cpu_stb_mmuidx_ra(env, arg2 - 4 * dir, (uint8_t)(arg1 >> 32), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) >= 5) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), + if (lmask >= 5) { + cpu_stb_mmuidx_ra(env, arg2 - 5 * dir, (uint8_t)(arg1 >> 40), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) >= 6) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), + if (lmask >= 6) { + cpu_stb_mmuidx_ra(env, arg2 - 6 * dir, (uint8_t)(arg1 >> 48), mem_idx, GETPC()); } - if (GET_LMASK64(arg2) == 7) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), + if (lmask == 7) { + cpu_stb_mmuidx_ra(env, arg2 - 7 * dir, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); } } diff --git a/target/mips/tcg/lmmi_helper.c b/target/mips/tcg/lmmi_helper.c index abeb7736ae..2c8732525c 100644 --- a/target/mips/tcg/lmmi_helper.c +++ b/target/mips/tcg/lmmi_helper.c @@ -37,7 +37,7 @@ typedef union { } LMIValue; /* Some byte ordering issues can be mitigated by XORing in the following. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define BYTE_ORDER_XOR(N) N #else # define BYTE_ORDER_XOR(N) 0 diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build index bf4001e574..7ee969ec8f 100644 --- a/target/mips/tcg/meson.build +++ b/target/mips/tcg/meson.build @@ -1,8 +1,9 @@ gen = [ - decodetree.process('mips32r6.decode', extra_args: '--static-decode=decode_mips32r6'), - decodetree.process('mips64r6.decode', extra_args: '--static-decode=decode_mips64r6'), + decodetree.process('rel6.decode', extra_args: ['--decode=decode_isa_rel6']), decodetree.process('msa.decode', extra_args: '--decode=decode_ase_msa'), decodetree.process('tx79.decode', extra_args: '--static-decode=decode_tx79'), + decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'), + decodetree.process('octeon.decode', extra_args: '--decode=decode_ext_octeon'), ] mips_ss.add(gen) @@ -19,16 +20,16 @@ mips_ss.add(files( 'translate.c', 'translate_addr_const.c', 'txx9_translate.c', + 'vr54xx_helper.c', + 'vr54xx_translate.c', )) mips_ss.add(when: 'TARGET_MIPS64', if_true: files( 'tx79_translate.c', + 'octeon_translate.c', ), if_false: files( 'mxu_translate.c', )) -if have_user - subdir('user') -endif if have_system subdir('sysemu') endif diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index 5e95f47854..632895cc9e 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -822,11 +822,11 @@ static void gen_pool16c_insn(DisasContext *ctx) gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); break; case BREAK16: - generate_exception_end(ctx, EXCP_BREAK); + generate_exception_break(ctx, extract32(ctx->opcode, 0, 4)); break; case SDBBP16: - if (is_uhi(extract32(ctx->opcode, 0, 4))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 0, 4))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { /* * XXX: not clear which exception should be raised @@ -937,12 +937,12 @@ static void gen_pool16c_r6_insn(DisasContext *ctx) break; case R6_BREAK16: /* BREAK16 */ - generate_exception(ctx, EXCP_BREAK); + generate_exception_break(ctx, extract32(ctx->opcode, 6, 4)); break; case R6_SDBBP16: /* SDBBP16 */ - if (is_uhi(extract32(ctx->opcode, 6, 4))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 6, 4))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception(ctx, EXCP_RI); @@ -1001,20 +1001,20 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); gen_store_gpr(t1, rd + 1); break; case SDP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); break; #endif } @@ -1047,7 +1047,7 @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) case TNE: mips32_op = OPC_TNE; do_trap: - gen_trap(ctx, mips32_op, rs, rt, -1); + gen_trap(ctx, mips32_op, rs, rt, -1, extract32(ctx->opcode, 12, 4)); break; #ifndef CONFIG_USER_ONLY case MFC0: @@ -1310,8 +1310,8 @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) generate_exception_end(ctx, EXCP_SYSCALL); break; case SDBBP: - if (is_uhi(extract32(ctx->opcode, 16, 10))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 16, 10))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { check_insn(ctx, ISA_MIPS_R1); if (ctx->hflags & MIPS_HFLAG_SBRI) { @@ -1627,7 +1627,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) uint32_t op, minor, minor2, mips32_op; uint32_t cond, fmt, cc; - insn = translator_lduw(env, ctx->base.pc_next + 2); + insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); ctx->opcode = (ctx->opcode << 16) | insn; rt = (ctx->opcode >> 21) & 0x1f; @@ -1812,7 +1812,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) gen_pool32axf(env, ctx, rt, rs); break; case BREAK32: - generate_exception_end(ctx, EXCP_BREAK); + generate_exception_break(ctx, extract32(ctx->opcode, 6, 20)); break; case SIGRIE: check_insn(ctx, ISA_MIPS_R6); @@ -2439,7 +2439,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) check_insn_opc_removed(ctx, ISA_MIPS_R6); mips32_op = OPC_TEQI; do_trapi: - gen_trap(ctx, mips32_op, rs, -1, imm); + gen_trap(ctx, mips32_op, rs, -1, imm, 0); break; case BNEZC: @@ -2578,7 +2578,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) case SCD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false); + gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false); break; #endif case LD_EVA: diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 54071813f1..918b15d55c 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -455,7 +455,7 @@ static void decode_i64_mips16(DisasContext *ctx, static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) { - int extend = translator_lduw(env, ctx->base.pc_next + 2); + int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); int op, rx, ry, funct, sa; int16_t imm, offset; @@ -688,7 +688,7 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_JAL: - offset = translator_lduw(env, ctx->base.pc_next + 2); + offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); offset = (((ctx->opcode & 0x1f) << 21) | ((ctx->opcode >> 5) & 0x1f) << 16 | offset) << 2; @@ -951,8 +951,8 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) } break; case RR_SDBBP: - if (is_uhi(extract32(ctx->opcode, 5, 6))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 5, 6))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { /* * XXX: not clear which exception should be raised @@ -969,7 +969,7 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) gen_slt(ctx, OPC_SLTU, 24, rx, ry); break; case RR_BREAK: - generate_exception_end(ctx, EXCP_BREAK); + generate_exception_break(ctx, extract32(ctx->opcode, 5, 6)); break; case RR_SLLV: gen_shift(ctx, OPC_SLLV, ry, rx, ry); diff --git a/target/mips/tcg/mips64r6.decode b/target/mips/tcg/mips64r6.decode deleted file mode 100644 index b58d8009cc..0000000000 --- a/target/mips/tcg/mips64r6.decode +++ /dev/null @@ -1,27 +0,0 @@ -# MIPS64 Release 6 instruction set -# -# Copyright (C) 2020 Philippe Mathieu-Daudé -# -# SPDX-License-Identifier: LGPL-2.1-or-later -# -# Reference: -# MIPS Architecture for Programmers Volume II-A -# The MIPS64 Instruction Set Reference Manual, Revision 6.06 -# (Document Number: MD00087-2B-MIPS64BIS-AFP-6.06) -# - -&rtype rs rt rd sa !extern - -&REMOVED !extern - -@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &rtype - -DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa - -REMOVED 011010 ----- ----- ---------------- # LDL -REMOVED 011011 ----- ----- ---------------- # LDR -REMOVED 101100 ----- ----- ---------------- # SDL -REMOVED 101101 ----- ----- ---------------- # SDR - -REMOVED 110100 ----- ----- ---------------- # LLD -REMOVED 111100 ----- ----- ---------------- # SCD diff --git a/target/mips/tcg/msa.decode b/target/mips/tcg/msa.decode index bf132e36b9..9575289195 100644 --- a/target/mips/tcg/msa.decode +++ b/target/mips/tcg/msa.decode @@ -11,21 +11,248 @@ # - The MIPS64 SIMD Architecture Module, Revision 1.12 # (Document Number: MD00868-1D-MSA64-AFP-01.12) -&rtype rs rt rd sa +&r rs rt rd sa -&msa_bz df wt s16 +&msa_r df wd ws wt +&msa_bz df wt sa +&msa_ldi df wd sa +&msa_i df wd ws sa +&msa_bit df wd ws m +&msa_elm_df df wd ws n +&msa_elm wd ws -@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &rtype -@bz ...... ... .. wt:5 s16:16 &msa_bz df=3 -@bz_df ...... ... df:2 wt:5 s16:16 &msa_bz +%elm_df 16:6 !function=elm_df +%elm_n 16:6 !function=elm_n +%bit_df 16:7 !function=bit_df +%bit_m 16:7 !function=bit_m +%2r_df_w 16:1 !function=plus_2 +%3r_df_h 21:1 !function=plus_1 +%3r_df_w 21:1 !function=plus_2 + +@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r +@ldst ...... sa:s10 ws:5 wd:5 .... df:2 &msa_i +@bz_v ...... ... .. wt:5 sa:16 &msa_bz df=3 +@bz ...... ... df:2 wt:5 sa:16 &msa_bz +@elm_df ...... .... ...... ws:5 wd:5 ...... &msa_elm_df df=%elm_df n=%elm_n +@elm ...... .......... ws:5 wd:5 ...... &msa_elm +@vec ...... ..... wt:5 ws:5 wd:5 ...... &msa_r df=0 +@2r ...... ........ df:2 ws:5 wd:5 ...... &msa_r wt=0 +@2rf ...... ......... . ws:5 wd:5 ...... &msa_r wt=0 df=%2r_df_w +@3r ...... ... df:2 wt:5 ws:5 wd:5 ...... &msa_r +@3rf_h ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_h +@3rf_w ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_w +@u5 ...... ... df:2 sa:5 ws:5 wd:5 ...... &msa_i +@s5 ...... ... df:2 sa:s5 ws:5 wd:5 ...... &msa_i +@i8_df ...... df:2 sa:s8 ws:5 wd:5 ...... &msa_i +@i8 ...... .. sa:s8 ws:5 wd:5 ...... &msa_i df=0 +@ldi ...... ... df:2 sa:s10 wd:5 ...... &msa_ldi +@bit ...... ... ....... ws:5 wd:5 ...... &msa_bit df=%bit_df m=%bit_m LSA 000000 ..... ..... ..... 000 .. 000101 @lsa DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa -BZ_V 010001 01011 ..... ................ @bz -BNZ_V 010001 01111 ..... ................ @bz +BZ_V 010001 01011 ..... ................ @bz_v +BNZ_V 010001 01111 ..... ................ @bz_v +BZ 010001 110 .. ..... ................ @bz +BNZ 010001 111 .. ..... ................ @bz -BZ_x 010001 110 .. ..... ................ @bz_df -BNZ_x 010001 111 .. ..... ................ @bz_df +ANDI 011110 00 ........ ..... ..... 000000 @i8 +ORI 011110 01 ........ ..... ..... 000000 @i8 +NORI 011110 10 ........ ..... ..... 000000 @i8 +XORI 011110 11 ........ ..... ..... 000000 @i8 +BMNZI 011110 00 ........ ..... ..... 000001 @i8 +BMZI 011110 01 ........ ..... ..... 000001 @i8 +BSELI 011110 10 ........ ..... ..... 000001 @i8 +SHF 011110 .. ........ ..... ..... 000010 @i8_df -MSA 011110 -------------------------- +ADDVI 011110 000 .. ..... ..... ..... 000110 @u5 +SUBVI 011110 001 .. ..... ..... ..... 000110 @u5 +MAXI_S 011110 010 .. ..... ..... ..... 000110 @s5 +MAXI_U 011110 011 .. ..... ..... ..... 000110 @u5 +MINI_S 011110 100 .. ..... ..... ..... 000110 @s5 +MINI_U 011110 101 .. ..... ..... ..... 000110 @u5 + +CEQI 011110 000 .. ..... ..... ..... 000111 @s5 +CLTI_S 011110 010 .. ..... ..... ..... 000111 @s5 +CLTI_U 011110 011 .. ..... ..... ..... 000111 @u5 +CLEI_S 011110 100 .. ..... ..... ..... 000111 @s5 +CLEI_U 011110 101 .. ..... ..... ..... 000111 @u5 + +LDI 011110 110 .. .......... ..... 000111 @ldi + +SLLI 011110 000 ....... ..... ..... 001001 @bit +SRAI 011110 001 ....... ..... ..... 001001 @bit +SRLI 011110 010 ....... ..... ..... 001001 @bit +BCLRI 011110 011 ....... ..... ..... 001001 @bit +BSETI 011110 100 ....... ..... ..... 001001 @bit +BNEGI 011110 101 ....... ..... ..... 001001 @bit +BINSLI 011110 110 ....... ..... ..... 001001 @bit +BINSRI 011110 111 ....... ..... ..... 001001 @bit + +SAT_S 011110 000 ....... ..... ..... 001010 @bit +SAT_U 011110 001 ....... ..... ..... 001010 @bit +SRARI 011110 010 ....... ..... ..... 001010 @bit +SRLRI 011110 011 ....... ..... ..... 001010 @bit + +SLL 011110 000.. ..... ..... ..... 001101 @3r +SRA 011110 001.. ..... ..... ..... 001101 @3r +SRL 011110 010.. ..... ..... ..... 001101 @3r +BCLR 011110 011.. ..... ..... ..... 001101 @3r +BSET 011110 100.. ..... ..... ..... 001101 @3r +BNEG 011110 101.. ..... ..... ..... 001101 @3r +BINSL 011110 110.. ..... ..... ..... 001101 @3r +BINSR 011110 111.. ..... ..... ..... 001101 @3r + +ADDV 011110 000.. ..... ..... ..... 001110 @3r +SUBV 011110 001.. ..... ..... ..... 001110 @3r +MAX_S 011110 010.. ..... ..... ..... 001110 @3r +MAX_U 011110 011.. ..... ..... ..... 001110 @3r +MIN_S 011110 100.. ..... ..... ..... 001110 @3r +MIN_U 011110 101.. ..... ..... ..... 001110 @3r +MAX_A 011110 110.. ..... ..... ..... 001110 @3r +MIN_A 011110 111.. ..... ..... ..... 001110 @3r + +CEQ 011110 000.. ..... ..... ..... 001111 @3r +CLT_S 011110 010.. ..... ..... ..... 001111 @3r +CLT_U 011110 011.. ..... ..... ..... 001111 @3r +CLE_S 011110 100.. ..... ..... ..... 001111 @3r +CLE_U 011110 101.. ..... ..... ..... 001111 @3r + +ADD_A 011110 000.. ..... ..... ..... 010000 @3r +ADDS_A 011110 001.. ..... ..... ..... 010000 @3r +ADDS_S 011110 010.. ..... ..... ..... 010000 @3r +ADDS_U 011110 011.. ..... ..... ..... 010000 @3r +AVE_S 011110 100.. ..... ..... ..... 010000 @3r +AVE_U 011110 101.. ..... ..... ..... 010000 @3r +AVER_S 011110 110.. ..... ..... ..... 010000 @3r +AVER_U 011110 111.. ..... ..... ..... 010000 @3r + +SUBS_S 011110 000.. ..... ..... ..... 010001 @3r +SUBS_U 011110 001.. ..... ..... ..... 010001 @3r +SUBSUS_U 011110 010.. ..... ..... ..... 010001 @3r +SUBSUU_S 011110 011.. ..... ..... ..... 010001 @3r +ASUB_S 011110 100.. ..... ..... ..... 010001 @3r +ASUB_U 011110 101.. ..... ..... ..... 010001 @3r + +MULV 011110 000.. ..... ..... ..... 010010 @3r +MADDV 011110 001.. ..... ..... ..... 010010 @3r +MSUBV 011110 010.. ..... ..... ..... 010010 @3r +DIV_S 011110 100.. ..... ..... ..... 010010 @3r +DIV_U 011110 101.. ..... ..... ..... 010010 @3r +MOD_S 011110 110.. ..... ..... ..... 010010 @3r +MOD_U 011110 111.. ..... ..... ..... 010010 @3r + +DOTP_S 011110 000.. ..... ..... ..... 010011 @3r +DOTP_U 011110 001.. ..... ..... ..... 010011 @3r +DPADD_S 011110 010.. ..... ..... ..... 010011 @3r +DPADD_U 011110 011.. ..... ..... ..... 010011 @3r +DPSUB_S 011110 100.. ..... ..... ..... 010011 @3r +DPSUB_U 011110 101.. ..... ..... ..... 010011 @3r + +SLD 011110 000 .. ..... ..... ..... 010100 @3r +SPLAT 011110 001 .. ..... ..... ..... 010100 @3r +PCKEV 011110 010 .. ..... ..... ..... 010100 @3r +PCKOD 011110 011 .. ..... ..... ..... 010100 @3r +ILVL 011110 100 .. ..... ..... ..... 010100 @3r +ILVR 011110 101 .. ..... ..... ..... 010100 @3r +ILVEV 011110 110 .. ..... ..... ..... 010100 @3r +ILVOD 011110 111 .. ..... ..... ..... 010100 @3r + +VSHF 011110 000 .. ..... ..... ..... 010101 @3r +SRAR 011110 001 .. ..... ..... ..... 010101 @3r +SRLR 011110 010 .. ..... ..... ..... 010101 @3r +HADD_S 011110 100.. ..... ..... ..... 010101 @3r +HADD_U 011110 101.. ..... ..... ..... 010101 @3r +HSUB_S 011110 110.. ..... ..... ..... 010101 @3r +HSUB_U 011110 111.. ..... ..... ..... 010101 @3r + +{ + CTCMSA 011110 0000111110 ..... ..... 011001 @elm + SLDI 011110 0000 ...... ..... ..... 011001 @elm_df +} +{ + CFCMSA 011110 0001111110 ..... ..... 011001 @elm + SPLATI 011110 0001 ...... ..... ..... 011001 @elm_df +} +{ + MOVE_V 011110 0010111110 ..... ..... 011001 @elm + COPY_S 011110 0010 ...... ..... ..... 011001 @elm_df +} +COPY_U 011110 0011 ...... ..... ..... 011001 @elm_df +INSERT 011110 0100 ...... ..... ..... 011001 @elm_df +INSVE 011110 0101 ...... ..... ..... 011001 @elm_df + +FCAF 011110 0000 . ..... ..... ..... 011010 @3rf_w +FCUN 011110 0001 . ..... ..... ..... 011010 @3rf_w +FCEQ 011110 0010 . ..... ..... ..... 011010 @3rf_w +FCUEQ 011110 0011 . ..... ..... ..... 011010 @3rf_w +FCLT 011110 0100 . ..... ..... ..... 011010 @3rf_w +FCULT 011110 0101 . ..... ..... ..... 011010 @3rf_w +FCLE 011110 0110 . ..... ..... ..... 011010 @3rf_w +FCULE 011110 0111 . ..... ..... ..... 011010 @3rf_w +FSAF 011110 1000 . ..... ..... ..... 011010 @3rf_w +FSUN 011110 1001 . ..... ..... ..... 011010 @3rf_w +FSEQ 011110 1010 . ..... ..... ..... 011010 @3rf_w +FSUEQ 011110 1011 . ..... ..... ..... 011010 @3rf_w +FSLT 011110 1100 . ..... ..... ..... 011010 @3rf_w +FSULT 011110 1101 . ..... ..... ..... 011010 @3rf_w +FSLE 011110 1110 . ..... ..... ..... 011010 @3rf_w +FSULE 011110 1111 . ..... ..... ..... 011010 @3rf_w + +FADD 011110 0000 . ..... ..... ..... 011011 @3rf_w +FSUB 011110 0001 . ..... ..... ..... 011011 @3rf_w +FMUL 011110 0010 . ..... ..... ..... 011011 @3rf_w +FDIV 011110 0011 . ..... ..... ..... 011011 @3rf_w +FMADD 011110 0100 . ..... ..... ..... 011011 @3rf_w +FMSUB 011110 0101 . ..... ..... ..... 011011 @3rf_w +FEXP2 011110 0111 . ..... ..... ..... 011011 @3rf_w +FEXDO 011110 1000 . ..... ..... ..... 011011 @3rf_w +FTQ 011110 1010 . ..... ..... ..... 011011 @3rf_w +FMIN 011110 1100 . ..... ..... ..... 011011 @3rf_w +FMIN_A 011110 1101 . ..... ..... ..... 011011 @3rf_w +FMAX 011110 1110 . ..... ..... ..... 011011 @3rf_w +FMAX_A 011110 1111 . ..... ..... ..... 011011 @3rf_w + +FCOR 011110 0001 . ..... ..... ..... 011100 @3rf_w +FCUNE 011110 0010 . ..... ..... ..... 011100 @3rf_w +FCNE 011110 0011 . ..... ..... ..... 011100 @3rf_w +MUL_Q 011110 0100 . ..... ..... ..... 011100 @3rf_h +MADD_Q 011110 0101 . ..... ..... ..... 011100 @3rf_h +MSUB_Q 011110 0110 . ..... ..... ..... 011100 @3rf_h +FSOR 011110 1001 . ..... ..... ..... 011100 @3rf_w +FSUNE 011110 1010 . ..... ..... ..... 011100 @3rf_w +FSNE 011110 1011 . ..... ..... ..... 011100 @3rf_w +MULR_Q 011110 1100 . ..... ..... ..... 011100 @3rf_h +MADDR_Q 011110 1101 . ..... ..... ..... 011100 @3rf_h +MSUBR_Q 011110 1110 . ..... ..... ..... 011100 @3rf_h + +AND_V 011110 00000 ..... ..... ..... 011110 @vec +OR_V 011110 00001 ..... ..... ..... 011110 @vec +NOR_V 011110 00010 ..... ..... ..... 011110 @vec +XOR_V 011110 00011 ..... ..... ..... 011110 @vec +BMNZ_V 011110 00100 ..... ..... ..... 011110 @vec +BMZ_V 011110 00101 ..... ..... ..... 011110 @vec +BSEL_V 011110 00110 ..... ..... ..... 011110 @vec +FILL 011110 11000000 .. ..... ..... 011110 @2r +PCNT 011110 11000001 .. ..... ..... 011110 @2r +NLOC 011110 11000010 .. ..... ..... 011110 @2r +NLZC 011110 11000011 .. ..... ..... 011110 @2r +FCLASS 011110 110010000 . ..... ..... 011110 @2rf +FTRUNC_S 011110 110010001 . ..... ..... 011110 @2rf +FTRUNC_U 011110 110010010 . ..... ..... 011110 @2rf +FSQRT 011110 110010011 . ..... ..... 011110 @2rf +FRSQRT 011110 110010100 . ..... ..... 011110 @2rf +FRCP 011110 110010101 . ..... ..... 011110 @2rf +FRINT 011110 110010110 . ..... ..... 011110 @2rf +FLOG2 011110 110010111 . ..... ..... 011110 @2rf +FEXUPL 011110 110011000 . ..... ..... 011110 @2rf +FEXUPR 011110 110011001 . ..... ..... 011110 @2rf +FFQL 011110 110011010 . ..... ..... 011110 @2rf +FFQR 011110 110011011 . ..... ..... 011110 @2rf +FTINT_S 011110 110011100 . ..... ..... 011110 @2rf +FTINT_U 011110 110011101 . ..... ..... 011110 @2rf +FFINT_S 011110 110011110 . ..... ..... 011110 @2rf +FFINT_U 011110 110011111 . ..... ..... 011110 @2rf + +LD 011110 .......... ..... ..... 1000 .. @ldst +ST 011110 .......... ..... ..... 1001 .. @ldst diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index 04af54f66d..736283e2af 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -3231,22 +3231,22 @@ void helper_msa_maddv_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - pwd->b[0] = msa_maddv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]); - pwd->b[1] = msa_maddv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]); - pwd->b[2] = msa_maddv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]); - pwd->b[3] = msa_maddv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]); - pwd->b[4] = msa_maddv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]); - pwd->b[5] = msa_maddv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]); - pwd->b[6] = msa_maddv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]); - pwd->b[7] = msa_maddv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]); - pwd->b[8] = msa_maddv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]); - pwd->b[9] = msa_maddv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]); - pwd->b[10] = msa_maddv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]); - pwd->b[11] = msa_maddv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]); - pwd->b[12] = msa_maddv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]); - pwd->b[13] = msa_maddv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]); - pwd->b[14] = msa_maddv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]); - pwd->b[15] = msa_maddv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]); + pwd->b[0] = msa_maddv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_maddv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_maddv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_maddv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_maddv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_maddv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_maddv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_maddv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_maddv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_maddv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_maddv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_maddv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_maddv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_maddv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_maddv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_maddv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_maddv_h(CPUMIPSState *env, @@ -3303,22 +3303,22 @@ void helper_msa_msubv_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - pwd->b[0] = msa_msubv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]); - pwd->b[1] = msa_msubv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]); - pwd->b[2] = msa_msubv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]); - pwd->b[3] = msa_msubv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]); - pwd->b[4] = msa_msubv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]); - pwd->b[5] = msa_msubv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]); - pwd->b[6] = msa_msubv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]); - pwd->b[7] = msa_msubv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]); - pwd->b[8] = msa_msubv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]); - pwd->b[9] = msa_msubv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]); - pwd->b[10] = msa_msubv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]); - pwd->b[11] = msa_msubv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]); - pwd->b[12] = msa_msubv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]); - pwd->b[13] = msa_msubv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]); - pwd->b[14] = msa_msubv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]); - pwd->b[15] = msa_msubv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]); + pwd->b[0] = msa_msubv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_msubv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_msubv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_msubv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_msubv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_msubv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_msubv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_msubv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_msubv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_msubv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_msubv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_msubv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_msubv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_msubv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_msubv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_msubv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_msubv_h(CPUMIPSState *env, @@ -4146,7 +4146,7 @@ void helper_msa_ilvev_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[8] = pws->b[9]; pwd->b[9] = pwt->b[9]; pwd->b[10] = pws->b[11]; @@ -4190,7 +4190,7 @@ void helper_msa_ilvev_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[4] = pws->h[5]; pwd->h[5] = pwt->h[5]; pwd->h[6] = pws->h[7]; @@ -4218,7 +4218,7 @@ void helper_msa_ilvev_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[2] = pws->w[3]; pwd->w[3] = pwt->w[3]; pwd->w[0] = pws->w[1]; @@ -4250,7 +4250,7 @@ void helper_msa_ilvod_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[7] = pwt->b[6]; pwd->b[6] = pws->b[6]; pwd->b[5] = pwt->b[4]; @@ -4294,7 +4294,7 @@ void helper_msa_ilvod_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[3] = pwt->h[2]; pwd->h[2] = pws->h[2]; pwd->h[1] = pwt->h[0]; @@ -4322,7 +4322,7 @@ void helper_msa_ilvod_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[1] = pwt->w[0]; pwd->w[0] = pws->w[0]; pwd->w[3] = pwt->w[2]; @@ -4354,7 +4354,7 @@ void helper_msa_ilvl_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[7] = pwt->b[15]; pwd->b[6] = pws->b[15]; pwd->b[5] = pwt->b[14]; @@ -4398,7 +4398,7 @@ void helper_msa_ilvl_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[3] = pwt->h[7]; pwd->h[2] = pws->h[7]; pwd->h[1] = pwt->h[6]; @@ -4426,7 +4426,7 @@ void helper_msa_ilvl_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[1] = pwt->w[3]; pwd->w[0] = pws->w[3]; pwd->w[3] = pwt->w[2]; @@ -4458,7 +4458,7 @@ void helper_msa_ilvr_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[8] = pws->b[0]; pwd->b[9] = pwt->b[0]; pwd->b[10] = pws->b[1]; @@ -4502,7 +4502,7 @@ void helper_msa_ilvr_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[4] = pws->h[0]; pwd->h[5] = pwt->h[0]; pwd->h[6] = pws->h[1]; @@ -4530,7 +4530,7 @@ void helper_msa_ilvr_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[2] = pws->w[0]; pwd->w[3] = pwt->w[0]; pwd->w[0] = pws->w[1]; @@ -4661,7 +4661,7 @@ void helper_msa_pckev_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[8] = pws->b[9]; pwd->b[10] = pws->b[13]; pwd->b[12] = pws->b[1]; @@ -4705,7 +4705,7 @@ void helper_msa_pckev_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[4] = pws->h[5]; pwd->h[6] = pws->h[1]; pwd->h[0] = pwt->h[5]; @@ -4733,7 +4733,7 @@ void helper_msa_pckev_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[2] = pws->w[3]; pwd->w[0] = pwt->w[3]; pwd->w[3] = pws->w[1]; @@ -4765,7 +4765,7 @@ void helper_msa_pckod_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->b[7] = pwt->b[6]; pwd->b[5] = pwt->b[2]; pwd->b[3] = pwt->b[14]; @@ -4810,7 +4810,7 @@ void helper_msa_pckod_h(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->h[3] = pwt->h[2]; pwd->h[1] = pwt->h[6]; pwd->h[7] = pws->h[2]; @@ -4838,7 +4838,7 @@ void helper_msa_pckod_w(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN pwd->w[1] = pwt->w[0]; pwd->w[3] = pws->w[0]; pwd->w[0] = pwt->w[2]; @@ -5926,7 +5926,7 @@ void helper_msa_copy_s_b(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 16; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 8) { n = 8 - n - 1; } else { @@ -5940,7 +5940,7 @@ void helper_msa_copy_s_h(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 8; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 4) { n = 4 - n - 1; } else { @@ -5954,7 +5954,7 @@ void helper_msa_copy_s_w(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 4; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 2) { n = 2 - n - 1; } else { @@ -5975,7 +5975,7 @@ void helper_msa_copy_u_b(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 16; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 8) { n = 8 - n - 1; } else { @@ -5989,7 +5989,7 @@ void helper_msa_copy_u_h(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 8; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 4) { n = 4 - n - 1; } else { @@ -6003,7 +6003,7 @@ void helper_msa_copy_u_w(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 4; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 2) { n = 2 - n - 1; } else { @@ -6019,7 +6019,7 @@ void helper_msa_insert_b(CPUMIPSState *env, uint32_t wd, wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 16; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 8) { n = 8 - n - 1; } else { @@ -6035,7 +6035,7 @@ void helper_msa_insert_h(CPUMIPSState *env, uint32_t wd, wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 8; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 4) { n = 4 - n - 1; } else { @@ -6051,7 +6051,7 @@ void helper_msa_insert_w(CPUMIPSState *env, uint32_t wd, wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 4; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN if (n < 2) { n = 2 - n - 1; } else { @@ -8211,185 +8211,93 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) #if !defined(CONFIG_USER_ONLY) -#define MEMOP_IDX(DF) \ - TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ - cpu_mmu_index(env, false)); +#define MEMOP_IDX(DF) \ + MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ + cpu_mmu_index(env, false)); #else #define MEMOP_IDX(DF) #endif +#if TARGET_BIG_ENDIAN +static inline uint64_t bswap16x4(uint64_t x) +{ + uint64_t m = 0x00ff00ff00ff00ffull; + return ((x & m) << 8) | ((x >> 8) & m); +} + +static inline uint64_t bswap32x2(uint64_t x) +{ + return ror64(bswap64(x), 32); +} +#endif + void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_BYTE) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); -#else - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); -#else - pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); -#endif -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* Load 8 bytes at a time. Vector element ordering makes this LE. */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_HALF) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); -#else - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF)); -#else - pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then swap the four halfwords. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#if TARGET_BIG_ENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_WORD) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); -#else - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD)); -#else - pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then bswap the two words. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#if TARGET_BIG_ENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_DOUBLE) -#if !defined(CONFIG_USER_ONLY) - pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); - pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); -#else - pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE)); - pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + d0 = cpu_ldq_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } #define MSA_PAGESPAN(x) \ @@ -8415,82 +8323,13 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_BYTE) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); -#else - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]); -#else - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]); -#endif -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. Vector element ordering makes this LE. */ + cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_le_data_ra(env, addr + 8, pwd->d[1], ra); } void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, @@ -8498,50 +8337,20 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_HALF) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); -#else - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]); -#else - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_h. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#if TARGET_BIG_ENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, @@ -8549,34 +8358,20 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_WORD) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); -#else - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]); -#else - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_w. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#if TARGET_BIG_ENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, @@ -8584,14 +8379,10 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_DOUBLE) ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) - helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); - helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); -#else - cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]); - cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]); -#endif + + cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra); } diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c index eed2eca6c9..1bcdbb1121 100644 --- a/target/mips/tcg/msa_translate.c +++ b/target/mips/tcg/msa_translate.c @@ -17,243 +17,24 @@ #include "fpu_helper.h" #include "internal.h" +static int elm_n(DisasContext *ctx, int x); +static int elm_df(DisasContext *ctx, int x); +static int bit_m(DisasContext *ctx, int x); +static int bit_df(DisasContext *ctx, int x); + +static inline int plus_1(DisasContext *s, int x) +{ + return x + 1; +} + +static inline int plus_2(DisasContext *s, int x) +{ + return x + 2; +} + /* Include the auto-generated decoder. */ #include "decode-msa.c.inc" -#define OPC_MSA (0x1E << 26) - -#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) -enum { - OPC_MSA_I8_00 = 0x00 | OPC_MSA, - OPC_MSA_I8_01 = 0x01 | OPC_MSA, - OPC_MSA_I8_02 = 0x02 | OPC_MSA, - OPC_MSA_I5_06 = 0x06 | OPC_MSA, - OPC_MSA_I5_07 = 0x07 | OPC_MSA, - OPC_MSA_BIT_09 = 0x09 | OPC_MSA, - OPC_MSA_BIT_0A = 0x0A | OPC_MSA, - OPC_MSA_3R_0D = 0x0D | OPC_MSA, - OPC_MSA_3R_0E = 0x0E | OPC_MSA, - OPC_MSA_3R_0F = 0x0F | OPC_MSA, - OPC_MSA_3R_10 = 0x10 | OPC_MSA, - OPC_MSA_3R_11 = 0x11 | OPC_MSA, - OPC_MSA_3R_12 = 0x12 | OPC_MSA, - OPC_MSA_3R_13 = 0x13 | OPC_MSA, - OPC_MSA_3R_14 = 0x14 | OPC_MSA, - OPC_MSA_3R_15 = 0x15 | OPC_MSA, - OPC_MSA_ELM = 0x19 | OPC_MSA, - OPC_MSA_3RF_1A = 0x1A | OPC_MSA, - OPC_MSA_3RF_1B = 0x1B | OPC_MSA, - OPC_MSA_3RF_1C = 0x1C | OPC_MSA, - OPC_MSA_VEC = 0x1E | OPC_MSA, - - /* MI10 instruction */ - OPC_LD_B = (0x20) | OPC_MSA, - OPC_LD_H = (0x21) | OPC_MSA, - OPC_LD_W = (0x22) | OPC_MSA, - OPC_LD_D = (0x23) | OPC_MSA, - OPC_ST_B = (0x24) | OPC_MSA, - OPC_ST_H = (0x25) | OPC_MSA, - OPC_ST_W = (0x26) | OPC_MSA, - OPC_ST_D = (0x27) | OPC_MSA, -}; - -enum { - /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ - OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, - OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, - OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, - OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, - OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, - OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, - OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, - OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, - OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, - OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, - OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, - OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, - - /* I8 instruction */ - OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, - OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, - OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, - OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, - OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, - OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, - OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, - OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, - OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, - OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, - - /* VEC/2R/2RF instruction */ - OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, - OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, - OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, - OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, - OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, - OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, - OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, - - OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, - OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, - - /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ - OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, - OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, - OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, - OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, - - /* 2RF instruction df(bit 16) = _w, _d */ - OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, - OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, - OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, - OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, - OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, - OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, - OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, - OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, - OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, - OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, - OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, - OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, - OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, - OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, - - /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ - OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, - OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, - OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, - OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, - OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, - OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, - OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, - OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, - OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, - OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, - OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, - OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, - OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, - OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, - OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, - OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, - OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, - OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, - OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, - OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, - OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, - OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, - OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, - OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, - OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, - OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, - OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, - OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, - OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, - OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, - OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, - OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, - OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, - OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, - OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, - OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, - OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, - OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, - OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, - OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, - OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, - OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, - OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, - OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, - OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, - OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, - OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, - OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, - OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, - OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, - OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, - OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, - OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, - OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, - OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, - OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, - OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, - OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, - OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, - - /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ - OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, - - /* 3RF instruction _df(bit 21) = _w, _d */ - OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, - OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, - OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, - OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, - OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, - OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, - OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, - OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, - OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, - OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, - OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, - OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, - OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, - OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, - OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, - OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, - OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, - OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, - OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, - OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, - OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, - OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, - OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, - OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, - OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, - OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, - OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, - OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, - OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, - OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, - OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, - OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, - OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, - OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, - OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, - OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, - OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, - - /* BIT instruction df(bits 22..16) = _B _H _W _D */ - OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, - OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, - OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, - OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, - OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, - OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, - OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, - OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, - OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, - OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, - OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, - OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, -}; - static const char msaregnames[][6] = { "w0.d0", "w0.d1", "w1.d0", "w1.d1", "w2.d0", "w2.d1", "w3.d0", "w3.d1", @@ -273,6 +54,77 @@ static const char msaregnames[][6] = { "w30.d0", "w30.d1", "w31.d0", "w31.d1", }; +/* Encoding of Operation Field (must be indexed by CPUMIPSMSADataFormat) */ +struct dfe { + int start; + int length; + uint32_t mask; +}; + +/* + * Extract immediate from df/{m,n} format (used by ELM & BIT instructions). + * Returns the immediate value, or -1 if the format does not match. + */ +static int df_extract_val(DisasContext *ctx, int x, const struct dfe *s) +{ + for (unsigned i = 0; i < 4; i++) { + if (extract32(x, s[i].start, s[i].length) == s[i].mask) { + return extract32(x, 0, s[i].start); + } + } + return -1; +} + +/* + * Extract DataField from df/{m,n} format (used by ELM & BIT instructions). + * Returns the DataField, or -1 if the format does not match. + */ +static int df_extract_df(DisasContext *ctx, int x, const struct dfe *s) +{ + for (unsigned i = 0; i < 4; i++) { + if (extract32(x, s[i].start, s[i].length) == s[i].mask) { + return i; + } + } + return -1; +} + +static const struct dfe df_elm[] = { + /* Table 3.26 ELM Instruction Format */ + [DF_BYTE] = {4, 2, 0b00}, + [DF_HALF] = {3, 3, 0b100}, + [DF_WORD] = {2, 4, 0b1100}, + [DF_DOUBLE] = {1, 5, 0b11100} +}; + +static int elm_n(DisasContext *ctx, int x) +{ + return df_extract_val(ctx, x, df_elm); +} + +static int elm_df(DisasContext *ctx, int x) +{ + return df_extract_df(ctx, x, df_elm); +} + +static const struct dfe df_bit[] = { + /* Table 3.28 BIT Instruction Format */ + [DF_BYTE] = {3, 4, 0b1110}, + [DF_HALF] = {4, 3, 0b110}, + [DF_WORD] = {5, 2, 0b10}, + [DF_DOUBLE] = {6, 1, 0b0} +}; + +static int bit_m(DisasContext *ctx, int x) +{ + return df_extract_val(ctx, x, df_bit); +} + +static int bit_df(DisasContext *ctx, int x) +{ + return df_extract_df(ctx, x, df_bit); +} + static TCGv_i64 msa_wr_d[64]; void msa_translate_init(void) @@ -280,61 +132,80 @@ void msa_translate_init(void) int i; for (i = 0; i < 32; i++) { - int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); + int off; /* * The MSA vector registers are mapped on the * scalar floating-point unit (FPU) registers. */ + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); msa_wr_d[i * 2] = fpu_f64[i]; + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); msa_wr_d[i * 2 + 1] = tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]); } } -static inline int check_msa_access(DisasContext *ctx) +/* + * Check if MSA is enabled. + * This function is always called with MSA available. + * If MSA is disabled, raise an exception. + */ +static inline bool check_msa_enabled(DisasContext *ctx) { if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && !(ctx->hflags & MIPS_HFLAG_F64))) { gen_reserved_instruction(ctx); - return 0; + return false; } if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { generate_exception_end(ctx, EXCP_MSADIS); - return 0; + return false; } - return 1; + return true; } +typedef void gen_helper_piv(TCGv_ptr, TCGv_i32, TCGv); +typedef void gen_helper_pii(TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void gen_helper_piii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void gen_helper_piiii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); + +#define TRANS_DF_x(TYPE, NAME, trans_func, gen_func) \ + static gen_helper_p##TYPE * const NAME##_tab[4] = { \ + gen_func##_b, gen_func##_h, gen_func##_w, gen_func##_d \ + }; \ + TRANS(NAME, trans_func, NAME##_tab[a->df]) + +#define TRANS_DF_iv(NAME, trans_func, gen_func) \ + TRANS_DF_x(iv, NAME, trans_func, gen_func) + +#define TRANS_DF_ii(NAME, trans_func, gen_func) \ + TRANS_DF_x(ii, NAME, trans_func, gen_func) + +#define TRANS_DF_iii(NAME, trans_func, gen_func) \ + TRANS_DF_x(iii, NAME, trans_func, gen_func) + +#define TRANS_DF_iii_b(NAME, trans_func, gen_func) \ + static gen_helper_piii * const NAME##_tab[4] = { \ + NULL, gen_func##_h, gen_func##_w, gen_func##_d \ + }; \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { \ + return trans_func(ctx, a, NAME##_tab[a->df]); \ + } + static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, TCGCond cond) { /* generates tcg ops to check if any element is 0 */ /* Note this function only works with MSA_WRLEN = 128 */ - uint64_t eval_zero_or_big = 0; - uint64_t eval_big = 0; + uint64_t eval_zero_or_big = dup_const(df, 1); + uint64_t eval_big = eval_zero_or_big << ((8 << df) - 1); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); - switch (df) { - case DF_BYTE: - eval_zero_or_big = 0x0101010101010101ULL; - eval_big = 0x8080808080808080ULL; - break; - case DF_HALF: - eval_zero_or_big = 0x0001000100010001ULL; - eval_big = 0x8000800080008000ULL; - break; - case DF_WORD: - eval_zero_or_big = 0x0000000100000001ULL; - eval_big = 0x8000000080000000ULL; - break; - case DF_DOUBLE: - eval_zero_or_big = 0x0000000000000001ULL; - eval_big = 0x8000000000000000ULL; - break; - } + tcg_gen_subi_i64(t0, msa_wr_d[wt << 1], eval_zero_or_big); tcg_gen_andc_i64(t0, t0, msa_wr_d[wt << 1]); tcg_gen_andi_i64(t0, t0, eval_big); @@ -350,11 +221,13 @@ static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, tcg_temp_free_i64(t1); } -static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) +static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) { TCGv_i64 t0; - check_msa_access(ctx); + if (!check_msa_enabled(ctx)) { + return true; + } if (ctx->hflags & MIPS_HFLAG_BMASK) { gen_reserved_instruction(ctx); @@ -366,7 +239,7 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) tcg_gen_trunc_i64_tl(bcond, t0); tcg_temp_free_i64(t0); - ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; + ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; @@ -376,17 +249,19 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) static bool trans_BZ_V(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_EQ); + return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_EQ); } static bool trans_BNZ_V(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_NE); + return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_NE); } -static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not) +static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int sa, bool if_not) { - check_msa_access(ctx); + if (!check_msa_enabled(ctx)) { + return true; + } if (ctx->hflags & MIPS_HFLAG_BMASK) { gen_reserved_instruction(ctx); @@ -395,1878 +270,532 @@ static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not) gen_check_zero_element(bcond, df, wt, if_not ? TCG_COND_EQ : TCG_COND_NE); - ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; + ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; return true; } -static bool trans_BZ_x(DisasContext *ctx, arg_msa_bz *a) +static bool trans_BZ(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, false); + return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, false); } -static bool trans_BNZ_x(DisasContext *ctx, arg_msa_bz *a) +static bool trans_BNZ(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, true); + return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, true); } -static void gen_msa_i8(DisasContext *ctx) +static bool trans_msa_i8(DisasContext *ctx, arg_msa_i *a, + gen_helper_piii *gen_msa_i8) { -#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) - uint8_t i8 = (ctx->opcode >> 16) & 0xff; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 ti8 = tcg_const_i32(i8); - - switch (MASK_MSA_I8(ctx->opcode)) { - case OPC_ANDI_B: - gen_helper_msa_andi_b(cpu_env, twd, tws, ti8); - break; - case OPC_ORI_B: - gen_helper_msa_ori_b(cpu_env, twd, tws, ti8); - break; - case OPC_NORI_B: - gen_helper_msa_nori_b(cpu_env, twd, tws, ti8); - break; - case OPC_XORI_B: - gen_helper_msa_xori_b(cpu_env, twd, tws, ti8); - break; - case OPC_BMNZI_B: - gen_helper_msa_bmnzi_b(cpu_env, twd, tws, ti8); - break; - case OPC_BMZI_B: - gen_helper_msa_bmzi_b(cpu_env, twd, tws, ti8); - break; - case OPC_BSELI_B: - gen_helper_msa_bseli_b(cpu_env, twd, tws, ti8); - break; - case OPC_SHF_B: - case OPC_SHF_H: - case OPC_SHF_W: - { - uint8_t df = (ctx->opcode >> 24) & 0x3; - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - } else { - TCGv_i32 tdf = tcg_const_i32(df); - gen_helper_msa_shf_df(cpu_env, tdf, twd, tws, ti8); - tcg_temp_free_i32(tdf); - } - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; + if (!check_msa_enabled(ctx)) { + return true; } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(ti8); -} - -static void gen_msa_i5(DisasContext *ctx) -{ -#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t df = (ctx->opcode >> 21) & 0x3; - int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); - uint8_t u5 = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(df); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 timm = tcg_temp_new_i32(); - tcg_gen_movi_i32(timm, u5); - - switch (MASK_MSA_I5(ctx->opcode)) { - case OPC_ADDVI_df: - gen_helper_msa_addvi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_SUBVI_df: - gen_helper_msa_subvi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_maxi_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_U_df: - gen_helper_msa_maxi_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_mini_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_U_df: - gen_helper_msa_mini_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CEQI_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_ceqi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_clti_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_U_df: - gen_helper_msa_clti_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_clei_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_U_df: - gen_helper_msa_clei_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_LDI_df: - { - int32_t s10 = sextract32(ctx->opcode, 11, 10); - tcg_gen_movi_i32(timm, s10); - gen_helper_msa_ldi_df(cpu_env, tdf, twd, timm); - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(tdf); - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(timm); -} - -static void gen_msa_bit(DisasContext *ctx) -{ -#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t dfm = (ctx->opcode >> 16) & 0x7f; - uint32_t df = 0, m = 0; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf; - TCGv_i32 tm; - TCGv_i32 twd; - TCGv_i32 tws; - - if ((dfm & 0x40) == 0x00) { - m = dfm & 0x3f; - df = DF_DOUBLE; - } else if ((dfm & 0x60) == 0x40) { - m = dfm & 0x1f; - df = DF_WORD; - } else if ((dfm & 0x70) == 0x60) { - m = dfm & 0x0f; - df = DF_HALF; - } else if ((dfm & 0x78) == 0x70) { - m = dfm & 0x7; - df = DF_BYTE; - } else { - gen_reserved_instruction(ctx); - return; - } - - tdf = tcg_const_i32(df); - tm = tcg_const_i32(m); - twd = tcg_const_i32(wd); - tws = tcg_const_i32(ws); - - switch (MASK_MSA_BIT(ctx->opcode)) { - case OPC_SLLI_df: - gen_helper_msa_slli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRAI_df: - gen_helper_msa_srai_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLI_df: - gen_helper_msa_srli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BCLRI_df: - gen_helper_msa_bclri_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BSETI_df: - gen_helper_msa_bseti_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BNEGI_df: - gen_helper_msa_bnegi_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSLI_df: - gen_helper_msa_binsli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSRI_df: - gen_helper_msa_binsri_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_S_df: - gen_helper_msa_sat_s_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_U_df: - gen_helper_msa_sat_u_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRARI_df: - gen_helper_msa_srari_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLRI_df: - gen_helper_msa_srlri_df(cpu_env, tdf, twd, tws, tm); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(tdf); - tcg_temp_free_i32(tm); - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); -} - -static void gen_msa_3r(DisasContext *ctx) -{ -#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t df = (ctx->opcode >> 21) & 0x3; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(df); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_BINSL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_binsl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_binsl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_binsl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_binsl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BINSR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_binsr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_binsr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_binsr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_binsr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BCLR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bclr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bclr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bclr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bclr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BNEG_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bneg_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bneg_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bneg_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bneg_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BSET_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bset_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bset_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bset_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bset_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADD_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_add_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_add_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_add_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_add_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_addv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_addv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_addv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_addv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVE_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ave_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ave_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ave_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ave_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVE_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ave_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ave_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ave_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ave_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVER_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_aver_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_aver_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_aver_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_aver_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVER_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_aver_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_aver_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_aver_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_aver_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CEQ_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ceq_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ceq_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ceq_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ceq_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLE_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_cle_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_cle_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_cle_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_cle_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLE_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_cle_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_cle_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_cle_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_cle_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLT_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_clt_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_clt_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_clt_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_clt_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLT_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_clt_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_clt_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_clt_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_clt_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DIV_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_div_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_div_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_div_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_div_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DIV_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_div_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_div_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_div_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_div_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MOD_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mod_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mod_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mod_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mod_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MOD_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mod_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mod_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mod_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mod_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MADDV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_maddv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_maddv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_maddv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_maddv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MSUBV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_msubv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_msubv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_msubv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_msubv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ASUB_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_asub_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_asub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_asub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_asub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ASUB_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_asub_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_asub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_asub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_asub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVEV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvev_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvev_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvev_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvev_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVOD_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvod_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvod_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvod_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvod_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_PCKEV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pckev_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_pckev_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_pckev_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_pckev_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_PCKOD_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pckod_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_pckod_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_pckod_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_pckod_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SLL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_sll_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_sll_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_sll_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_sll_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRA_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_sra_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_sra_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_sra_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_sra_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRAR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srar_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srar_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srar_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srar_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRLR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srlr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srlr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srlr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srlr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBS_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subs_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subs_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subs_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subs_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MULV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mulv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mulv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mulv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mulv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SLD_df: - gen_helper_msa_sld_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_VSHF_df: - gen_helper_msa_vshf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subs_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subs_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subs_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subs_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SPLAT_df: - gen_helper_msa_splat_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBSUS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subsus_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subsus_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subsus_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subsus_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBSUU_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subsuu_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subsuu_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subsuu_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subsuu_s_d(cpu_env, twd, tws, twt); - break; - } - break; - - case OPC_DOTP_S_df: - case OPC_DOTP_U_df: - case OPC_DPADD_S_df: - case OPC_DPADD_U_df: - case OPC_DPSUB_S_df: - case OPC_HADD_S_df: - case OPC_DPSUB_U_df: - case OPC_HADD_U_df: - case OPC_HSUB_S_df: - case OPC_HSUB_U_df: - if (df == DF_BYTE) { - gen_reserved_instruction(ctx); - break; - } - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_HADD_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hadd_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hadd_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hadd_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HADD_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hadd_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hadd_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hadd_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HSUB_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hsub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hsub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hsub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HSUB_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hsub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hsub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hsub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DOTP_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dotp_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dotp_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dotp_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DOTP_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dotp_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dotp_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dotp_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPADD_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpadd_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpadd_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpadd_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPADD_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpadd_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpadd_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpadd_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPSUB_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpsub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpsub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpsub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPSUB_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpsub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpsub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpsub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_elm_3e(DisasContext *ctx) -{ -#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) - uint8_t source = (ctx->opcode >> 11) & 0x1f; - uint8_t dest = (ctx->opcode >> 6) & 0x1f; - TCGv telm = tcg_temp_new(); - TCGv_i32 tsr = tcg_const_i32(source); - TCGv_i32 tdt = tcg_const_i32(dest); - - switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { - case OPC_CTCMSA: - gen_load_gpr(telm, source); - gen_helper_msa_ctcmsa(cpu_env, telm, tdt); - break; - case OPC_CFCMSA: - gen_helper_msa_cfcmsa(telm, cpu_env, tsr); - gen_store_gpr(telm, dest); - break; - case OPC_MOVE_V: - gen_helper_msa_move_v(cpu_env, tdt, tsr); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free(telm); - tcg_temp_free_i32(tdt); - tcg_temp_free_i32(tsr); -} - -static void gen_msa_elm_df(DisasContext *ctx, uint32_t df, uint32_t n) -{ -#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tn = tcg_const_i32(n); - TCGv_i32 tdf = tcg_const_i32(df); - - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_SLDI_df: - gen_helper_msa_sldi_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_SPLATI_df: - gen_helper_msa_splati_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_INSVE_df: - gen_helper_msa_insve_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_COPY_S_df: - case OPC_COPY_U_df: - case OPC_INSERT_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - break; - } - if ((MASK_MSA_ELM(ctx->opcode) == OPC_COPY_U_df) && - (df == DF_WORD)) { - gen_reserved_instruction(ctx); - break; - } -#endif - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_COPY_S_df: - if (likely(wd != 0)) { - switch (df) { - case DF_BYTE: - gen_helper_msa_copy_s_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_copy_s_h(cpu_env, twd, tws, tn); - break; - case DF_WORD: - gen_helper_msa_copy_s_w(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_DOUBLE: - gen_helper_msa_copy_s_d(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - } - break; - case OPC_COPY_U_df: - if (likely(wd != 0)) { - switch (df) { - case DF_BYTE: - gen_helper_msa_copy_u_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_copy_u_h(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_WORD: - gen_helper_msa_copy_u_w(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - } - break; - case OPC_INSERT_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_insert_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_insert_h(cpu_env, twd, tws, tn); - break; - case DF_WORD: - gen_helper_msa_insert_w(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_DOUBLE: - gen_helper_msa_insert_d(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(tn); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_elm(DisasContext *ctx) -{ - uint8_t dfn = (ctx->opcode >> 16) & 0x3f; - uint32_t df = 0, n = 0; - - if ((dfn & 0x30) == 0x00) { - n = dfn & 0x0f; - df = DF_BYTE; - } else if ((dfn & 0x38) == 0x20) { - n = dfn & 0x07; - df = DF_HALF; - } else if ((dfn & 0x3c) == 0x30) { - n = dfn & 0x03; - df = DF_WORD; - } else if ((dfn & 0x3e) == 0x38) { - n = dfn & 0x01; - df = DF_DOUBLE; - } else if (dfn == 0x3E) { - /* CTCMSA, CFCMSA, MOVE.V */ - gen_msa_elm_3e(ctx); - return; - } else { - gen_reserved_instruction(ctx); - return; - } - - gen_msa_elm_df(ctx, df, n); -} - -static void gen_msa_3rf(DisasContext *ctx) -{ -#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - uint8_t df = (ctx->opcode >> 21) & 0x1; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - TCGv_i32 tdf = tcg_temp_new_i32(); - - /* adjust df value for floating-point instruction */ - tcg_gen_movi_i32(tdf, df + 2); - - switch (MASK_MSA_3RF(ctx->opcode)) { - case OPC_FCAF_df: - gen_helper_msa_fcaf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FADD_df: - gen_helper_msa_fadd_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUN_df: - gen_helper_msa_fcun_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUB_df: - gen_helper_msa_fsub_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCOR_df: - gen_helper_msa_fcor_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCEQ_df: - gen_helper_msa_fceq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMUL_df: - gen_helper_msa_fmul_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUNE_df: - gen_helper_msa_fcune_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUEQ_df: - gen_helper_msa_fcueq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FDIV_df: - gen_helper_msa_fdiv_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCNE_df: - gen_helper_msa_fcne_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLT_df: - gen_helper_msa_fclt_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMADD_df: - gen_helper_msa_fmadd_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MUL_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_mul_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULT_df: - gen_helper_msa_fcult_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMSUB_df: - gen_helper_msa_fmsub_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADD_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_madd_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLE_df: - gen_helper_msa_fcle_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUB_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_msub_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULE_df: - gen_helper_msa_fcule_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXP2_df: - gen_helper_msa_fexp2_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSAF_df: - gen_helper_msa_fsaf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXDO_df: - gen_helper_msa_fexdo_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUN_df: - gen_helper_msa_fsun_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSOR_df: - gen_helper_msa_fsor_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSEQ_df: - gen_helper_msa_fseq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FTQ_df: - gen_helper_msa_ftq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUNE_df: - gen_helper_msa_fsune_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUEQ_df: - gen_helper_msa_fsueq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSNE_df: - gen_helper_msa_fsne_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLT_df: - gen_helper_msa_fslt_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_df: - gen_helper_msa_fmin_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MULR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_mulr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULT_df: - gen_helper_msa_fsult_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_A_df: - gen_helper_msa_fmin_a_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADDR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_maddr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLE_df: - gen_helper_msa_fsle_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_df: - gen_helper_msa_fmax_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUBR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_msubr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULE_df: - gen_helper_msa_fsule_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_A_df: - gen_helper_msa_fmax_a_df(cpu_env, tdf, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_2r(DisasContext *ctx) -{ -#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0x7 << 18))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x3; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - TCGv_i32 tdf = tcg_const_i32(df); - - switch (MASK_MSA_2R(ctx->opcode)) { - case OPC_FILL_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - break; - } -#endif - gen_helper_msa_fill_df(cpu_env, tdf, twd, tws); /* trs */ - break; - case OPC_NLOC_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_nloc_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_nloc_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_nloc_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_nloc_d(cpu_env, twd, tws); - break; - } - break; - case OPC_NLZC_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_nlzc_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_nlzc_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_nlzc_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_nlzc_d(cpu_env, twd, tws); - break; - } - break; - case OPC_PCNT_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pcnt_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_pcnt_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_pcnt_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_pcnt_d(cpu_env, twd, tws); - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_2rf(DisasContext *ctx) -{ -#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0xf << 17))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x1; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - /* adjust df value for floating-point instruction */ - TCGv_i32 tdf = tcg_const_i32(df + 2); - - switch (MASK_MSA_2RF(ctx->opcode)) { - case OPC_FCLASS_df: - gen_helper_msa_fclass_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_S_df: - gen_helper_msa_ftrunc_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_U_df: - gen_helper_msa_ftrunc_u_df(cpu_env, tdf, twd, tws); - break; - case OPC_FSQRT_df: - gen_helper_msa_fsqrt_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRSQRT_df: - gen_helper_msa_frsqrt_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRCP_df: - gen_helper_msa_frcp_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRINT_df: - gen_helper_msa_frint_df(cpu_env, tdf, twd, tws); - break; - case OPC_FLOG2_df: - gen_helper_msa_flog2_df(cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPL_df: - gen_helper_msa_fexupl_df(cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPR_df: - gen_helper_msa_fexupr_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFQL_df: - gen_helper_msa_ffql_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFQR_df: - gen_helper_msa_ffqr_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_S_df: - gen_helper_msa_ftint_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_U_df: - gen_helper_msa_ftint_u_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_S_df: - gen_helper_msa_ffint_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_U_df: - gen_helper_msa_ffint_u_df(cpu_env, tdf, twd, tws); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_vec_v(DisasContext *ctx) -{ -#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - gen_helper_msa_and_v(cpu_env, twd, tws, twt); - break; - case OPC_OR_V: - gen_helper_msa_or_v(cpu_env, twd, tws, twt); - break; - case OPC_NOR_V: - gen_helper_msa_nor_v(cpu_env, twd, tws, twt); - break; - case OPC_XOR_V: - gen_helper_msa_xor_v(cpu_env, twd, tws, twt); - break; - case OPC_BMNZ_V: - gen_helper_msa_bmnz_v(cpu_env, twd, tws, twt); - break; - case OPC_BMZ_V: - gen_helper_msa_bmz_v(cpu_env, twd, tws, twt); - break; - case OPC_BSEL_V: - gen_helper_msa_bsel_v(cpu_env, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); -} - -static void gen_msa_vec(DisasContext *ctx) -{ - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - case OPC_OR_V: - case OPC_NOR_V: - case OPC_XOR_V: - case OPC_BMNZ_V: - case OPC_BMZ_V: - case OPC_BSEL_V: - gen_msa_vec_v(ctx); - break; - case OPC_MSA_2R: - gen_msa_2r(ctx); - break; - case OPC_MSA_2RF: - gen_msa_2rf(ctx); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } -} - -static bool trans_MSA(DisasContext *ctx, arg_MSA *a) -{ - uint32_t opcode = ctx->opcode; - - check_msa_access(ctx); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_MSA_I8_00: - case OPC_MSA_I8_01: - case OPC_MSA_I8_02: - gen_msa_i8(ctx); - break; - case OPC_MSA_I5_06: - case OPC_MSA_I5_07: - gen_msa_i5(ctx); - break; - case OPC_MSA_BIT_09: - case OPC_MSA_BIT_0A: - gen_msa_bit(ctx); - break; - case OPC_MSA_3R_0D: - case OPC_MSA_3R_0E: - case OPC_MSA_3R_0F: - case OPC_MSA_3R_10: - case OPC_MSA_3R_11: - case OPC_MSA_3R_12: - case OPC_MSA_3R_13: - case OPC_MSA_3R_14: - case OPC_MSA_3R_15: - gen_msa_3r(ctx); - break; - case OPC_MSA_ELM: - gen_msa_elm(ctx); - break; - case OPC_MSA_3RF_1A: - case OPC_MSA_3RF_1B: - case OPC_MSA_3RF_1C: - gen_msa_3rf(ctx); - break; - case OPC_MSA_VEC: - gen_msa_vec(ctx); - break; - case OPC_LD_B: - case OPC_LD_H: - case OPC_LD_W: - case OPC_LD_D: - case OPC_ST_B: - case OPC_ST_H: - case OPC_ST_W: - case OPC_ST_D: - { - int32_t s10 = sextract32(ctx->opcode, 16, 10); - uint8_t rs = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 0) & 0x3; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv taddr = tcg_temp_new(); - gen_base_offset_addr(ctx, taddr, rs, s10 << df); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_LD_B: - gen_helper_msa_ld_b(cpu_env, twd, taddr); - break; - case OPC_LD_H: - gen_helper_msa_ld_h(cpu_env, twd, taddr); - break; - case OPC_LD_W: - gen_helper_msa_ld_w(cpu_env, twd, taddr); - break; - case OPC_LD_D: - gen_helper_msa_ld_d(cpu_env, twd, taddr); - break; - case OPC_ST_B: - gen_helper_msa_st_b(cpu_env, twd, taddr); - break; - case OPC_ST_H: - gen_helper_msa_st_h(cpu_env, twd, taddr); - break; - case OPC_ST_W: - gen_helper_msa_st_w(cpu_env, twd, taddr); - break; - case OPC_ST_D: - gen_helper_msa_st_d(cpu_env, twd, taddr); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free(taddr); - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } + gen_msa_i8(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); return true; } -static bool trans_LSA(DisasContext *ctx, arg_rtype *a) +TRANS(ANDI, trans_msa_i8, gen_helper_msa_andi_b); +TRANS(ORI, trans_msa_i8, gen_helper_msa_ori_b); +TRANS(NORI, trans_msa_i8, gen_helper_msa_nori_b); +TRANS(XORI, trans_msa_i8, gen_helper_msa_xori_b); +TRANS(BMNZI, trans_msa_i8, gen_helper_msa_bmnzi_b); +TRANS(BMZI, trans_msa_i8, gen_helper_msa_bmzi_b); +TRANS(BSELI, trans_msa_i8, gen_helper_msa_bseli_b); + +static bool trans_SHF(DisasContext *ctx, arg_msa_i *a) +{ + if (a->df == DF_DOUBLE) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_shf_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); + + return true; +} + +static bool trans_msa_i5(DisasContext *ctx, arg_msa_i *a, + gen_helper_piiii *gen_msa_i5) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_i5(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); + + return true; +} + +TRANS(ADDVI, trans_msa_i5, gen_helper_msa_addvi_df); +TRANS(SUBVI, trans_msa_i5, gen_helper_msa_subvi_df); +TRANS(MAXI_S, trans_msa_i5, gen_helper_msa_maxi_s_df); +TRANS(MAXI_U, trans_msa_i5, gen_helper_msa_maxi_u_df); +TRANS(MINI_S, trans_msa_i5, gen_helper_msa_mini_s_df); +TRANS(MINI_U, trans_msa_i5, gen_helper_msa_mini_u_df); +TRANS(CLTI_S, trans_msa_i5, gen_helper_msa_clti_s_df); +TRANS(CLTI_U, trans_msa_i5, gen_helper_msa_clti_u_df); +TRANS(CLEI_S, trans_msa_i5, gen_helper_msa_clei_s_df); +TRANS(CLEI_U, trans_msa_i5, gen_helper_msa_clei_u_df); +TRANS(CEQI, trans_msa_i5, gen_helper_msa_ceqi_df); + +static bool trans_LDI(DisasContext *ctx, arg_msa_ldi *a) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_ldi_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->sa)); + + return true; +} + +static bool trans_msa_bit(DisasContext *ctx, arg_msa_bit *a, + gen_helper_piiii *gen_msa_bit) +{ + if (a->df < 0) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_bit(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->m)); + + return true; +} + +TRANS(SLLI, trans_msa_bit, gen_helper_msa_slli_df); +TRANS(SRAI, trans_msa_bit, gen_helper_msa_srai_df); +TRANS(SRLI, trans_msa_bit, gen_helper_msa_srli_df); +TRANS(BCLRI, trans_msa_bit, gen_helper_msa_bclri_df); +TRANS(BSETI, trans_msa_bit, gen_helper_msa_bseti_df); +TRANS(BNEGI, trans_msa_bit, gen_helper_msa_bnegi_df); +TRANS(BINSLI, trans_msa_bit, gen_helper_msa_binsli_df); +TRANS(BINSRI, trans_msa_bit, gen_helper_msa_binsri_df); +TRANS(SAT_S, trans_msa_bit, gen_helper_msa_sat_s_df); +TRANS(SAT_U, trans_msa_bit, gen_helper_msa_sat_u_df); +TRANS(SRARI, trans_msa_bit, gen_helper_msa_srari_df); +TRANS(SRLRI, trans_msa_bit, gen_helper_msa_srlri_df); + +static bool trans_msa_3rf(DisasContext *ctx, arg_msa_r *a, + gen_helper_piiii *gen_msa_3rf) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_3rf(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->wt)); + + return true; +} + +static bool trans_msa_3r(DisasContext *ctx, arg_msa_r *a, + gen_helper_piii *gen_msa_3r) +{ + if (!gen_msa_3r) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_3r(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->wt)); + + return true; +} + +TRANS(AND_V, trans_msa_3r, gen_helper_msa_and_v); +TRANS(OR_V, trans_msa_3r, gen_helper_msa_or_v); +TRANS(NOR_V, trans_msa_3r, gen_helper_msa_nor_v); +TRANS(XOR_V, trans_msa_3r, gen_helper_msa_xor_v); +TRANS(BMNZ_V, trans_msa_3r, gen_helper_msa_bmnz_v); +TRANS(BMZ_V, trans_msa_3r, gen_helper_msa_bmz_v); +TRANS(BSEL_V, trans_msa_3r, gen_helper_msa_bsel_v); + +TRANS_DF_iii(SLL, trans_msa_3r, gen_helper_msa_sll); +TRANS_DF_iii(SRA, trans_msa_3r, gen_helper_msa_sra); +TRANS_DF_iii(SRL, trans_msa_3r, gen_helper_msa_srl); +TRANS_DF_iii(BCLR, trans_msa_3r, gen_helper_msa_bclr); +TRANS_DF_iii(BSET, trans_msa_3r, gen_helper_msa_bset); +TRANS_DF_iii(BNEG, trans_msa_3r, gen_helper_msa_bneg); +TRANS_DF_iii(BINSL, trans_msa_3r, gen_helper_msa_binsl); +TRANS_DF_iii(BINSR, trans_msa_3r, gen_helper_msa_binsr); + +TRANS_DF_iii(ADDV, trans_msa_3r, gen_helper_msa_addv); +TRANS_DF_iii(SUBV, trans_msa_3r, gen_helper_msa_subv); +TRANS_DF_iii(MAX_S, trans_msa_3r, gen_helper_msa_max_s); +TRANS_DF_iii(MAX_U, trans_msa_3r, gen_helper_msa_max_u); +TRANS_DF_iii(MIN_S, trans_msa_3r, gen_helper_msa_min_s); +TRANS_DF_iii(MIN_U, trans_msa_3r, gen_helper_msa_min_u); +TRANS_DF_iii(MAX_A, trans_msa_3r, gen_helper_msa_max_a); +TRANS_DF_iii(MIN_A, trans_msa_3r, gen_helper_msa_min_a); + +TRANS_DF_iii(CEQ, trans_msa_3r, gen_helper_msa_ceq); +TRANS_DF_iii(CLT_S, trans_msa_3r, gen_helper_msa_clt_s); +TRANS_DF_iii(CLT_U, trans_msa_3r, gen_helper_msa_clt_u); +TRANS_DF_iii(CLE_S, trans_msa_3r, gen_helper_msa_cle_s); +TRANS_DF_iii(CLE_U, trans_msa_3r, gen_helper_msa_cle_u); + +TRANS_DF_iii(ADD_A, trans_msa_3r, gen_helper_msa_add_a); +TRANS_DF_iii(ADDS_A, trans_msa_3r, gen_helper_msa_adds_a); +TRANS_DF_iii(ADDS_S, trans_msa_3r, gen_helper_msa_adds_s); +TRANS_DF_iii(ADDS_U, trans_msa_3r, gen_helper_msa_adds_u); +TRANS_DF_iii(AVE_S, trans_msa_3r, gen_helper_msa_ave_s); +TRANS_DF_iii(AVE_U, trans_msa_3r, gen_helper_msa_ave_u); +TRANS_DF_iii(AVER_S, trans_msa_3r, gen_helper_msa_aver_s); +TRANS_DF_iii(AVER_U, trans_msa_3r, gen_helper_msa_aver_u); + +TRANS_DF_iii(SUBS_S, trans_msa_3r, gen_helper_msa_subs_s); +TRANS_DF_iii(SUBS_U, trans_msa_3r, gen_helper_msa_subs_u); +TRANS_DF_iii(SUBSUS_U, trans_msa_3r, gen_helper_msa_subsus_u); +TRANS_DF_iii(SUBSUU_S, trans_msa_3r, gen_helper_msa_subsuu_s); +TRANS_DF_iii(ASUB_S, trans_msa_3r, gen_helper_msa_asub_s); +TRANS_DF_iii(ASUB_U, trans_msa_3r, gen_helper_msa_asub_u); + +TRANS_DF_iii(MULV, trans_msa_3r, gen_helper_msa_mulv); +TRANS_DF_iii(MADDV, trans_msa_3r, gen_helper_msa_maddv); +TRANS_DF_iii(MSUBV, trans_msa_3r, gen_helper_msa_msubv); +TRANS_DF_iii(DIV_S, trans_msa_3r, gen_helper_msa_div_s); +TRANS_DF_iii(DIV_U, trans_msa_3r, gen_helper_msa_div_u); +TRANS_DF_iii(MOD_S, trans_msa_3r, gen_helper_msa_mod_s); +TRANS_DF_iii(MOD_U, trans_msa_3r, gen_helper_msa_mod_u); + +TRANS_DF_iii_b(DOTP_S, trans_msa_3r, gen_helper_msa_dotp_s); +TRANS_DF_iii_b(DOTP_U, trans_msa_3r, gen_helper_msa_dotp_u); +TRANS_DF_iii_b(DPADD_S, trans_msa_3r, gen_helper_msa_dpadd_s); +TRANS_DF_iii_b(DPADD_U, trans_msa_3r, gen_helper_msa_dpadd_u); +TRANS_DF_iii_b(DPSUB_S, trans_msa_3r, gen_helper_msa_dpsub_s); +TRANS_DF_iii_b(DPSUB_U, trans_msa_3r, gen_helper_msa_dpsub_u); + +TRANS(SLD, trans_msa_3rf, gen_helper_msa_sld_df); +TRANS(SPLAT, trans_msa_3rf, gen_helper_msa_splat_df); +TRANS_DF_iii(PCKEV, trans_msa_3r, gen_helper_msa_pckev); +TRANS_DF_iii(PCKOD, trans_msa_3r, gen_helper_msa_pckod); +TRANS_DF_iii(ILVL, trans_msa_3r, gen_helper_msa_ilvl); +TRANS_DF_iii(ILVR, trans_msa_3r, gen_helper_msa_ilvr); +TRANS_DF_iii(ILVEV, trans_msa_3r, gen_helper_msa_ilvev); +TRANS_DF_iii(ILVOD, trans_msa_3r, gen_helper_msa_ilvod); + +TRANS(VSHF, trans_msa_3rf, gen_helper_msa_vshf_df); +TRANS_DF_iii(SRAR, trans_msa_3r, gen_helper_msa_srar); +TRANS_DF_iii(SRLR, trans_msa_3r, gen_helper_msa_srlr); +TRANS_DF_iii_b(HADD_S, trans_msa_3r, gen_helper_msa_hadd_s); +TRANS_DF_iii_b(HADD_U, trans_msa_3r, gen_helper_msa_hadd_u); +TRANS_DF_iii_b(HSUB_S, trans_msa_3r, gen_helper_msa_hsub_s); +TRANS_DF_iii_b(HSUB_U, trans_msa_3r, gen_helper_msa_hsub_u); + +static bool trans_MOVE_V(DisasContext *ctx, arg_msa_elm *a) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_move_v(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a) +{ + TCGv telm; + + if (!check_msa_enabled(ctx)) { + return true; + } + + telm = tcg_temp_new(); + + gen_load_gpr(telm, a->ws); + gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd)); + + tcg_temp_free(telm); + + return true; +} + +static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a) +{ + TCGv telm; + + if (!check_msa_enabled(ctx)) { + return true; + } + + telm = tcg_temp_new(); + + gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws)); + gen_store_gpr(telm, a->wd); + + tcg_temp_free(telm); + + return true; +} + +static bool trans_msa_elm(DisasContext *ctx, arg_msa_elm_df *a, + gen_helper_piiii *gen_msa_elm_df) +{ + if (a->df < 0) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_elm_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->n)); + + return true; +} + +TRANS(SLDI, trans_msa_elm, gen_helper_msa_sldi_df); +TRANS(SPLATI, trans_msa_elm, gen_helper_msa_splati_df); +TRANS(INSVE, trans_msa_elm, gen_helper_msa_insve_df); + +static bool trans_msa_elm_fn(DisasContext *ctx, arg_msa_elm_df *a, + gen_helper_piii * const gen_msa_elm[4]) +{ + if (a->df < 0 || !gen_msa_elm[a->df]) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_elm[a->df](cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->n)); + + return true; +} + +#if defined(TARGET_MIPS64) +#define NULL_IF_MIPS32(function) function +#else +#define NULL_IF_MIPS32(function) NULL +#endif + +static bool trans_COPY_U(DisasContext *ctx, arg_msa_elm_df *a) +{ + if (a->wd == 0) { + /* Treat as NOP. */ + return true; + } + + static gen_helper_piii * const gen_msa_copy_u[4] = { + gen_helper_msa_copy_u_b, gen_helper_msa_copy_u_h, + NULL_IF_MIPS32(gen_helper_msa_copy_u_w), NULL + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_copy_u); +} + +static bool trans_COPY_S(DisasContext *ctx, arg_msa_elm_df *a) +{ + if (a->wd == 0) { + /* Treat as NOP. */ + return true; + } + + static gen_helper_piii * const gen_msa_copy_s[4] = { + gen_helper_msa_copy_s_b, gen_helper_msa_copy_s_h, + gen_helper_msa_copy_s_w, NULL_IF_MIPS32(gen_helper_msa_copy_s_d) + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_copy_s); +} + +static bool trans_INSERT(DisasContext *ctx, arg_msa_elm_df *a) +{ + static gen_helper_piii * const gen_msa_insert[4] = { + gen_helper_msa_insert_b, gen_helper_msa_insert_h, + gen_helper_msa_insert_w, NULL_IF_MIPS32(gen_helper_msa_insert_d) + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_insert); +} + +TRANS(FCAF, trans_msa_3rf, gen_helper_msa_fcaf_df); +TRANS(FCUN, trans_msa_3rf, gen_helper_msa_fcun_df); +TRANS(FCEQ, trans_msa_3rf, gen_helper_msa_fceq_df); +TRANS(FCUEQ, trans_msa_3rf, gen_helper_msa_fcueq_df); +TRANS(FCLT, trans_msa_3rf, gen_helper_msa_fclt_df); +TRANS(FCULT, trans_msa_3rf, gen_helper_msa_fcult_df); +TRANS(FCLE, trans_msa_3rf, gen_helper_msa_fcle_df); +TRANS(FCULE, trans_msa_3rf, gen_helper_msa_fcule_df); +TRANS(FSAF, trans_msa_3rf, gen_helper_msa_fsaf_df); +TRANS(FSUN, trans_msa_3rf, gen_helper_msa_fsun_df); +TRANS(FSEQ, trans_msa_3rf, gen_helper_msa_fseq_df); +TRANS(FSUEQ, trans_msa_3rf, gen_helper_msa_fsueq_df); +TRANS(FSLT, trans_msa_3rf, gen_helper_msa_fslt_df); +TRANS(FSULT, trans_msa_3rf, gen_helper_msa_fsult_df); +TRANS(FSLE, trans_msa_3rf, gen_helper_msa_fsle_df); +TRANS(FSULE, trans_msa_3rf, gen_helper_msa_fsule_df); + +TRANS(FADD, trans_msa_3rf, gen_helper_msa_fadd_df); +TRANS(FSUB, trans_msa_3rf, gen_helper_msa_fsub_df); +TRANS(FMUL, trans_msa_3rf, gen_helper_msa_fmul_df); +TRANS(FDIV, trans_msa_3rf, gen_helper_msa_fdiv_df); +TRANS(FMADD, trans_msa_3rf, gen_helper_msa_fmadd_df); +TRANS(FMSUB, trans_msa_3rf, gen_helper_msa_fmsub_df); +TRANS(FEXP2, trans_msa_3rf, gen_helper_msa_fexp2_df); +TRANS(FEXDO, trans_msa_3rf, gen_helper_msa_fexdo_df); +TRANS(FTQ, trans_msa_3rf, gen_helper_msa_ftq_df); +TRANS(FMIN, trans_msa_3rf, gen_helper_msa_fmin_df); +TRANS(FMIN_A, trans_msa_3rf, gen_helper_msa_fmin_a_df); +TRANS(FMAX, trans_msa_3rf, gen_helper_msa_fmax_df); +TRANS(FMAX_A, trans_msa_3rf, gen_helper_msa_fmax_a_df); + +TRANS(FCOR, trans_msa_3rf, gen_helper_msa_fcor_df); +TRANS(FCUNE, trans_msa_3rf, gen_helper_msa_fcune_df); +TRANS(FCNE, trans_msa_3rf, gen_helper_msa_fcne_df); +TRANS(MUL_Q, trans_msa_3rf, gen_helper_msa_mul_q_df); +TRANS(MADD_Q, trans_msa_3rf, gen_helper_msa_madd_q_df); +TRANS(MSUB_Q, trans_msa_3rf, gen_helper_msa_msub_q_df); +TRANS(FSOR, trans_msa_3rf, gen_helper_msa_fsor_df); +TRANS(FSUNE, trans_msa_3rf, gen_helper_msa_fsune_df); +TRANS(FSNE, trans_msa_3rf, gen_helper_msa_fsne_df); +TRANS(MULR_Q, trans_msa_3rf, gen_helper_msa_mulr_q_df); +TRANS(MADDR_Q, trans_msa_3rf, gen_helper_msa_maddr_q_df); +TRANS(MSUBR_Q, trans_msa_3rf, gen_helper_msa_msubr_q_df); + +static bool trans_msa_2r(DisasContext *ctx, arg_msa_r *a, + gen_helper_pii *gen_msa_2r) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_2r(cpu_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws)); + + return true; +} + +TRANS_DF_ii(PCNT, trans_msa_2r, gen_helper_msa_pcnt); +TRANS_DF_ii(NLOC, trans_msa_2r, gen_helper_msa_nloc); +TRANS_DF_ii(NLZC, trans_msa_2r, gen_helper_msa_nlzc); + +static bool trans_FILL(DisasContext *ctx, arg_msa_r *a) +{ + if (TARGET_LONG_BITS != 64 && a->df == DF_DOUBLE) { + /* Double format valid only for MIPS64 */ + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_fill_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +static bool trans_msa_2rf(DisasContext *ctx, arg_msa_r *a, + gen_helper_piii *gen_msa_2rf) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_2rf(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +TRANS(FCLASS, trans_msa_2rf, gen_helper_msa_fclass_df); +TRANS(FTRUNC_S, trans_msa_2rf, gen_helper_msa_ftrunc_s_df); +TRANS(FTRUNC_U, trans_msa_2rf, gen_helper_msa_ftrunc_u_df); +TRANS(FSQRT, trans_msa_2rf, gen_helper_msa_fsqrt_df); +TRANS(FRSQRT, trans_msa_2rf, gen_helper_msa_frsqrt_df); +TRANS(FRCP, trans_msa_2rf, gen_helper_msa_frcp_df); +TRANS(FRINT, trans_msa_2rf, gen_helper_msa_frint_df); +TRANS(FLOG2, trans_msa_2rf, gen_helper_msa_flog2_df); +TRANS(FEXUPL, trans_msa_2rf, gen_helper_msa_fexupl_df); +TRANS(FEXUPR, trans_msa_2rf, gen_helper_msa_fexupr_df); +TRANS(FFQL, trans_msa_2rf, gen_helper_msa_ffql_df); +TRANS(FFQR, trans_msa_2rf, gen_helper_msa_ffqr_df); +TRANS(FTINT_S, trans_msa_2rf, gen_helper_msa_ftint_s_df); +TRANS(FTINT_U, trans_msa_2rf, gen_helper_msa_ftint_u_df); +TRANS(FFINT_S, trans_msa_2rf, gen_helper_msa_ffint_s_df); +TRANS(FFINT_U, trans_msa_2rf, gen_helper_msa_ffint_u_df); + +static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a, + gen_helper_piv *gen_msa_ldst) +{ + TCGv taddr; + + if (!check_msa_enabled(ctx)) { + return true; + } + + taddr = tcg_temp_new(); + + gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df); + gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr); + + tcg_temp_free(taddr); + + return true; +} + +TRANS_DF_iv(LD, trans_msa_ldst, gen_helper_msa_ld); +TRANS_DF_iv(ST, trans_msa_ldst, gen_helper_msa_st); + +static bool trans_LSA(DisasContext *ctx, arg_r *a) { return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa); } -static bool trans_DLSA(DisasContext *ctx, arg_rtype *a) +static bool trans_DLSA(DisasContext *ctx, arg_r *a) { if (TARGET_LONG_BITS != 64) { return false; diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index 09e64a6948..812c111e3c 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -999,11 +999,11 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, gen_base_offset_addr(ctx, taddr, base, offset); tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_extr_i64_tl(tmp2, tmp1, tval); -#else - tcg_gen_extr_i64_tl(tmp1, tmp2, tval); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_extr_i64_tl(tmp2, tmp1, tval); + } else { + tcg_gen_extr_i64_tl(tmp1, tmp2, tval); + } gen_store_gpr(tmp1, reg1); tcg_temp_free(tmp1); gen_store_gpr(tmp2, reg2); @@ -1035,11 +1035,11 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, gen_load_gpr(tmp1, reg1); gen_load_gpr(tmp2, reg2); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_concat_tl_i64(tval, tmp2, tmp1); -#else - tcg_gen_concat_tl_i64(tval, tmp1, tmp2); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_concat_tl_i64(tval, tmp2, tmp1); + } else { + tcg_gen_concat_tl_i64(tval, tmp1, tmp2); + } tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp)); tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval, @@ -1268,11 +1268,11 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 10, 1)) { case NM_TEQ: check_nms(ctx); - gen_trap(ctx, OPC_TEQ, rs, rt, -1); + gen_trap(ctx, OPC_TEQ, rs, rt, -1, rd); break; case NM_TNE: check_nms(ctx); - gen_trap(ctx, OPC_TNE, rs, rt, -1); + gen_trap(ctx, OPC_TNE, rs, rt, -1, rd); break; } break; @@ -1597,7 +1597,7 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, check_dsp(ctx); switch (extract32(ctx->opcode, 12, 2)) { case NM_MTHLIP: - tcg_gen_movi_tl(t0, v2); + tcg_gen_movi_tl(t0, v2 >> 3); gen_helper_mthlip(t0, v0_t, cpu_env); break; case NM_SHILOV: @@ -1868,6 +1868,9 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t3 = tcg_temp_new_i32(); + if (acc || ctx->insn_flags & ISA_MIPS_R6) { + check_dsp_r2(ctx); + } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); tcg_gen_trunc_tl_i32(t2, t0); @@ -1925,6 +1928,9 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t3 = tcg_temp_new_i32(); + if (acc || ctx->insn_flags & ISA_MIPS_R6) { + check_dsp_r2(ctx); + } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); tcg_gen_trunc_tl_i32(t2, t0); @@ -2030,7 +2036,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, case NM_EXTRV_S_H: check_dsp(ctx); tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_s_h(t0, t0, v0_t, cpu_env); + gen_helper_extr_s_h(t0, t0, v1_t, cpu_env); gen_store_gpr(t0, ret); break; } @@ -2701,6 +2707,9 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) case NM_SDC1XS: tcg_gen_shli_tl(t0, t0, 3); break; + default: + gen_reserved_instruction(ctx); + goto out; } } gen_op_addr_add(ctx, t0, t0, t1); @@ -2791,6 +2800,7 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) break; } +out: tcg_temp_free(t0); tcg_temp_free(t1); } @@ -3656,7 +3666,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) int offset; int imm; - insn = translator_lduw(env, ctx->base.pc_next + 2); + insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); ctx->opcode = (ctx->opcode << 16) | insn; rt = extract32(ctx->opcode, 21, 5); @@ -3684,8 +3694,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) generate_exception_end(ctx, EXCP_BREAK); break; case NM_SDBBP: - if (is_uhi(extract32(ctx->opcode, 0, 19))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 0, 19))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { gen_reserved_instruction(ctx); @@ -3775,7 +3785,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) break; case NM_P48I: { - insn = translator_lduw(env, ctx->base.pc_next + 4); + insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4); target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16; switch (extract32(ctx->opcode, 16, 5)) { case NM_LI48: @@ -3938,6 +3948,9 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_shift_imm(ctx, OPC_ROTR, rt, rs, extract32(ctx->opcode, 0, 5)); break; + default: + gen_reserved_instruction(ctx); + break; } } break; @@ -4239,6 +4252,9 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) check_xnp(ctx); gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5)); break; + default: + gen_reserved_instruction(ctx); + break; } break; case NM_P_SC: @@ -4251,6 +4267,9 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5), false); break; + default: + gen_reserved_instruction(ctx); + break; } break; case NM_CACHE: @@ -4259,6 +4278,9 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_cache_operation(ctx, rt, rs, s); } break; + default: + gen_reserved_instruction(ctx); + break; } break; case NM_P_LS_E0: @@ -4365,6 +4387,9 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) break; } break; + default: + gen_reserved_instruction(ctx); + break; } break; case NM_P_LS_WM: @@ -4472,12 +4497,13 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) case NM_P_BR3A: s = sextract32(ctx->opcode, 0, 1) << 14 | extract32(ctx->opcode, 1, 13) << 1; - check_cp1_enabled(ctx); switch (extract32(ctx->opcode, 16, 5)) { case NM_BC1EQZC: + check_cp1_enabled(ctx); gen_compute_branch_cp1_nm(ctx, OPC_BC1EQZ, rt, s); break; case NM_BC1NEZC: + check_cp1_enabled(ctx); gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s); break; case NM_BPOSGE32C: @@ -4521,7 +4547,12 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 14, 2)) { case NM_BNEC: check_nms(ctx); - gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s); + if (rs == rt) { + /* NOP */ + ctx->hflags |= MIPS_HFLAG_FBNSLOT; + } else { + gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s); + } break; case NM_BLTC: if (rs != 0 && rt != 0 && rs == rt) { @@ -4602,8 +4633,8 @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) generate_exception_end(ctx, EXCP_BREAK); break; case NM_SDBBP16: - if (is_uhi(extract32(ctx->opcode, 0, 3))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 0, 3))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { gen_reserved_instruction(ctx); diff --git a/target/mips/tcg/octeon.decode b/target/mips/tcg/octeon.decode new file mode 100644 index 0000000000..0c787cb498 --- /dev/null +++ b/target/mips/tcg/octeon.decode @@ -0,0 +1,41 @@ +# Octeon Architecture Module instruction set +# +# Copyright (C) 2022 Pavel Dovgalyuk +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# + +# Branch on bit set or clear +# BBIT0 110010 ..... ..... ................ +# BBIT032 110110 ..... ..... ................ +# BBIT1 111010 ..... ..... ................ +# BBIT132 111110 ..... ..... ................ + +%bbit_p 28:1 16:5 +BBIT 11 set:1 . 10 rs:5 ..... offset:s16 p=%bbit_p + +# Arithmetic +# BADDU rd, rs, rt +# DMUL rd, rs, rt +# EXTS rt, rs, p, lenm1 +# EXTS32 rt, rs, p, lenm1 +# CINS rt, rs, p, lenm1 +# CINS32 rt, rs, p, lenm1 +# DPOP rd, rs +# POP rd, rs +# SEQ rd, rs, rt +# SEQI rt, rs, immediate +# SNE rd, rs, rt +# SNEI rt, rs, immediate + +@r3 ...... rs:5 rt:5 rd:5 ..... ...... +%bitfield_p 0:1 6:5 +@bitfield ...... rs:5 rt:5 lenm1:5 ..... ..... . p=%bitfield_p + +BADDU 011100 ..... ..... ..... 00000 101000 @r3 +DMUL 011100 ..... ..... ..... 00000 000011 @r3 +EXTS 011100 ..... ..... ..... ..... 11101 . @bitfield +CINS 011100 ..... ..... ..... ..... 11001 . @bitfield +POP 011100 rs:5 00000 rd:5 00000 10110 dw:1 +SEQNE 011100 rs:5 rt:5 rd:5 00000 10101 ne:1 +SEQNEI 011100 rs:5 rt:5 imm:s10 10111 ne:1 diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c new file mode 100644 index 0000000000..6a207d2e7e --- /dev/null +++ b/target/mips/tcg/octeon_translate.c @@ -0,0 +1,201 @@ +/* + * Octeon-specific instructions translation routines + * + * Copyright (c) 2022 Pavel Dovgalyuk + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "exec/helper-gen.h" +#include "translate.h" + +/* Include the auto-generated decoder. */ +#include "decode-octeon.c.inc" + +static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a) +{ + TCGv p; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" + TARGET_FMT_lx "\n", ctx->base.pc_next); + generate_exception_end(ctx, EXCP_RI); + return true; + } + + /* Load needed operands */ + TCGv t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + + p = tcg_constant_tl(1ULL << a->p); + if (a->set) { + tcg_gen_and_tl(bcond, p, t0); + } else { + tcg_gen_andc_tl(bcond, p, t0); + } + + ctx->hflags |= MIPS_HFLAG_BC; + ctx->btarget = ctx->base.pc_next + 4 + a->offset * 4; + ctx->hflags |= MIPS_HFLAG_BDS32; + + tcg_temp_free(t0); + return true; +} + +static bool trans_BADDU(DisasContext *ctx, arg_BADDU *a) +{ + TCGv t0, t1; + + if (a->rt == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + gen_load_gpr(t1, a->rt); + + tcg_gen_add_tl(t0, t0, t1); + tcg_gen_andi_i64(cpu_gpr[a->rd], t0, 0xff); + + tcg_temp_free(t0); + tcg_temp_free(t1); + + return true; +} + +static bool trans_DMUL(DisasContext *ctx, arg_DMUL *a) +{ + TCGv t0, t1; + + if (a->rt == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + gen_load_gpr(t1, a->rt); + + tcg_gen_mul_i64(cpu_gpr[a->rd], t0, t1); + + tcg_temp_free(t0); + tcg_temp_free(t1); + + return true; +} + +static bool trans_EXTS(DisasContext *ctx, arg_EXTS *a) +{ + TCGv t0; + + if (a->rt == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + tcg_gen_sextract_tl(t0, t0, a->p, a->lenm1 + 1); + gen_store_gpr(t0, a->rt); + tcg_temp_free(t0); + + return true; +} + +static bool trans_CINS(DisasContext *ctx, arg_CINS *a) +{ + TCGv t0; + + if (a->rt == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + tcg_gen_deposit_z_tl(t0, t0, a->p, a->lenm1 + 1); + gen_store_gpr(t0, a->rt); + tcg_temp_free(t0); + + return true; +} + +static bool trans_POP(DisasContext *ctx, arg_POP *a) +{ + TCGv t0; + + if (a->rd == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rs); + if (!a->dw) { + tcg_gen_andi_i64(t0, t0, 0xffffffff); + } + tcg_gen_ctpop_tl(t0, t0); + gen_store_gpr(t0, a->rd); + tcg_temp_free(t0); + + return true; +} + +static bool trans_SEQNE(DisasContext *ctx, arg_SEQNE *a) +{ + TCGv t0, t1; + + if (a->rd == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, a->rs); + gen_load_gpr(t1, a->rt); + + if (a->ne) { + tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr[a->rd], t1, t0); + } else { + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr[a->rd], t1, t0); + } + + tcg_temp_free(t0); + tcg_temp_free(t1); + + return true; +} + +static bool trans_SEQNEI(DisasContext *ctx, arg_SEQNEI *a) +{ + TCGv t0; + + if (a->rt == 0) { + /* nop */ + return true; + } + + t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rs); + + /* Sign-extend to 64 bit value */ + target_ulong imm = a->imm; + if (a->ne) { + tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr[a->rt], t0, imm); + } else { + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], t0, imm); + } + + tcg_temp_free(t0); + + return true; +} diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c index fafbf1faca..ef3dafcbb3 100644 --- a/target/mips/tcg/op_helper.c +++ b/target/mips/tcg/op_helper.c @@ -26,124 +26,6 @@ #include "exec/memop.h" #include "fpu_helper.h" -/* 64 bits arithmetic for 32 bits hosts */ -static inline uint64_t get_HILO(CPUMIPSState *env) -{ - return ((uint64_t)(env->active_tc.HI[0]) << 32) | - (uint32_t)env->active_tc.LO[0]; -} - -static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) -{ - env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - return env->active_tc.HI[0] = (int32_t)(HILO >> 32); -} - -static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) -{ - target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - env->active_tc.HI[0] = (int32_t)(HILO >> 32); - return tmp; -} - -/* Multiplication variants of the vr54xx. */ -target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2)); -} - -target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - static inline target_ulong bitswap(target_ulong v) { v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | diff --git a/target/mips/tcg/mips32r6.decode b/target/mips/tcg/rel6.decode similarity index 64% rename from target/mips/tcg/mips32r6.decode rename to target/mips/tcg/rel6.decode index 837c991edc..d6989cf56e 100644 --- a/target/mips/tcg/mips32r6.decode +++ b/target/mips/tcg/rel6.decode @@ -5,21 +5,29 @@ # SPDX-License-Identifier: LGPL-2.1-or-later # # Reference: +# # MIPS Architecture for Programmers Volume II-A # The MIPS32 Instruction Set Reference Manual, Revision 6.06 # (Document Number: MD00086-2B-MIPS32BIS-AFP-06.06) # +# MIPS Architecture for Programmers Volume II-A +# The MIPS64 Instruction Set Reference Manual, Revision 6.06 +# (Document Number: MD00087-2B-MIPS64BIS-AFP-6.06) -&rtype rs rt rd sa +&r rs rt rd sa -@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &rtype +@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r LSA 000000 ..... ..... ..... 000 .. 000101 @lsa +DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa REMOVED 010011 ----- ----- ----- ----- ------ # COP1X (COP3) REMOVED 011100 ----- ----- ----- ----- ------ # SPECIAL2 +REMOVED 011010 ----- ----- ---------------- # LDL +REMOVED 011011 ----- ----- ---------------- # LDR + REMOVED 011111 ----- ----- ---------- 011001 # LWLE REMOVED 011111 ----- ----- ---------- 011010 # LWRE REMOVED 011111 ----- ----- ---------- 100001 # SWLE @@ -28,9 +36,14 @@ REMOVED 011111 ----- ----- ---------- 100010 # SWRE REMOVED 100010 ----- ----- ---------------- # LWL REMOVED 100110 ----- ----- ---------------- # LWR REMOVED 101010 ----- ----- ---------------- # SWL +REMOVED 101100 ----- ----- ---------------- # SDL +REMOVED 101101 ----- ----- ---------------- # SDR REMOVED 101110 ----- ----- ---------------- # SWR REMOVED 101111 ----- ----- ---------------- # CACHE + REMOVED 110000 ----- ----- ---------------- # LL REMOVED 110011 ----- ----- ---------------- # PREF +REMOVED 110100 ----- ----- ---------------- # LLD REMOVED 111000 ----- ----- ---------------- # SC +REMOVED 111100 ----- ----- ---------------- # SCD diff --git a/target/mips/tcg/rel6_translate.c b/target/mips/tcg/rel6_translate.c index 0354370927..d631851258 100644 --- a/target/mips/tcg/rel6_translate.c +++ b/target/mips/tcg/rel6_translate.c @@ -13,9 +13,8 @@ #include "exec/helper-gen.h" #include "translate.h" -/* Include the auto-generated decoder. */ -#include "decode-mips32r6.c.inc" -#include "decode-mips64r6.c.inc" +/* Include the auto-generated decoders. */ +#include "decode-rel6.c.inc" bool trans_REMOVED(DisasContext *ctx, arg_REMOVED *a) { @@ -24,20 +23,15 @@ bool trans_REMOVED(DisasContext *ctx, arg_REMOVED *a) return true; } -static bool trans_LSA(DisasContext *ctx, arg_rtype *a) +static bool trans_LSA(DisasContext *ctx, arg_r *a) { return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa); } -static bool trans_DLSA(DisasContext *ctx, arg_rtype *a) +static bool trans_DLSA(DisasContext *ctx, arg_r *a) { + if (TARGET_LONG_BITS != 64) { + return false; + } return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa); } - -bool decode_isa_rel6(DisasContext *ctx, uint32_t insn) -{ - if (TARGET_LONG_BITS == 64 && decode_mips64r6(ctx, insn)) { - return true; - } - return decode_mips32r6(ctx, insn); -} diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c index aae2af6ecc..5da1124589 100644 --- a/target/mips/tcg/sysemu/cp0_helper.c +++ b/target/mips/tcg/sysemu/cp0_helper.c @@ -1396,10 +1396,11 @@ void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); + uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */ if ((env->CP0_Config5 >> CP0C5_MI) & 1) { mask |= 0xFFFFFFFF00000000ULL; /* MMID */ } - env->CP0_WatchHi[sel] = arg1 & mask; + env->CP0_WatchHi[sel] = m_bit | (arg1 & mask); env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); } diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c index b4a383ae90..85f0567a7f 100644 --- a/target/mips/tcg/sysemu/mips-semi.c +++ b/target/mips/tcg/sysemu/mips-semi.c @@ -20,10 +20,12 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/log.h" -#include "exec/helper-proto.h" -#include "exec/softmmu-semi.h" +#include "exec/gdbstub.h" +#include "semihosting/softmmu-uaccess.h" #include "semihosting/semihost.h" #include "semihosting/console.h" +#include "semihosting/syscalls.h" +#include "internal.h" typedef enum UHIOp { UHI_exit = 1, @@ -74,293 +76,284 @@ enum UHIOpenFlags { UHIOpen_EXCL = 0x800 }; -static int errno_mips(int host_errno) +enum UHIErrno { + UHI_EACCESS = 13, + UHI_EAGAIN = 11, + UHI_EBADF = 9, + UHI_EBADMSG = 77, + UHI_EBUSY = 16, + UHI_ECONNRESET = 104, + UHI_EEXIST = 17, + UHI_EFBIG = 27, + UHI_EINTR = 4, + UHI_EINVAL = 22, + UHI_EIO = 5, + UHI_EISDIR = 21, + UHI_ELOOP = 92, + UHI_EMFILE = 24, + UHI_EMLINK = 31, + UHI_ENAMETOOLONG = 91, + UHI_ENETDOWN = 115, + UHI_ENETUNREACH = 114, + UHI_ENFILE = 23, + UHI_ENOBUFS = 105, + UHI_ENOENT = 2, + UHI_ENOMEM = 12, + UHI_ENOSPC = 28, + UHI_ENOSR = 63, + UHI_ENOTCONN = 128, + UHI_ENOTDIR = 20, + UHI_ENXIO = 6, + UHI_EOVERFLOW = 139, + UHI_EPERM = 1, + UHI_EPIPE = 32, + UHI_ERANGE = 34, + UHI_EROFS = 30, + UHI_ESPIPE = 29, + UHI_ETIMEDOUT = 116, + UHI_ETXTBSY = 26, + UHI_EWOULDBLOCK = 11, + UHI_EXDEV = 18, +}; + +static void report_fault(CPUMIPSState *env) { - /* Errno values taken from asm-mips/errno.h */ - switch (host_errno) { - case 0: return 0; - case ENAMETOOLONG: return 78; -#ifdef EOVERFLOW - case EOVERFLOW: return 79; -#endif -#ifdef ELOOP - case ELOOP: return 90; -#endif - default: return EINVAL; - } + int op = env->active_tc.gpr[25]; + error_report("Fault during UHI operation %d", op); + abort(); } -static int copy_stat_to_target(CPUMIPSState *env, const struct stat *src, - target_ulong vaddr) +static void uhi_cb(CPUState *cs, uint64_t ret, int err) { - hwaddr len = sizeof(struct UHIStat); - UHIStat *dst = lock_user(VERIFY_WRITE, vaddr, len, 0); - if (!dst) { - errno = EFAULT; - return -1; + CPUMIPSState *env = cs->env_ptr; + +#define E(N) case E##N: err = UHI_E##N; break + + switch (err) { + case 0: + break; + E(PERM); + E(NOENT); + E(INTR); + E(BADF); + E(BUSY); + E(EXIST); + E(NOTDIR); + E(ISDIR); + E(INVAL); + E(NFILE); + E(MFILE); + E(FBIG); + E(NOSPC); + E(SPIPE); + E(ROFS); + E(NAMETOOLONG); + default: + err = UHI_EINVAL; + break; + case EFAULT: + report_fault(env); } - dst->uhi_st_dev = tswap16(src->st_dev); - dst->uhi_st_ino = tswap16(src->st_ino); - dst->uhi_st_mode = tswap32(src->st_mode); - dst->uhi_st_nlink = tswap16(src->st_nlink); - dst->uhi_st_uid = tswap16(src->st_uid); - dst->uhi_st_gid = tswap16(src->st_gid); - dst->uhi_st_rdev = tswap16(src->st_rdev); - dst->uhi_st_size = tswap64(src->st_size); - dst->uhi_st_atime = tswap64(src->st_atime); - dst->uhi_st_mtime = tswap64(src->st_mtime); - dst->uhi_st_ctime = tswap64(src->st_ctime); -#ifdef _WIN32 - dst->uhi_st_blksize = 0; - dst->uhi_st_blocks = 0; -#else - dst->uhi_st_blksize = tswap64(src->st_blksize); - dst->uhi_st_blocks = tswap64(src->st_blocks); -#endif - unlock_user(dst, vaddr, len); - return 0; +#undef E + + env->active_tc.gpr[2] = ret; + env->active_tc.gpr[3] = err; } -static int get_open_flags(target_ulong target_flags) +static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err) { - int open_flags = 0; + QEMU_BUILD_BUG_ON(sizeof(UHIStat) < sizeof(struct gdb_stat)); - if (target_flags & UHIOpen_RDWR) { - open_flags |= O_RDWR; - } else if (target_flags & UHIOpen_WRONLY) { - open_flags |= O_WRONLY; - } else { - open_flags |= O_RDONLY; + if (!err) { + CPUMIPSState *env = cs->env_ptr; + target_ulong addr = env->active_tc.gpr[5]; + UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1); + struct gdb_stat s; + + if (!dst) { + report_fault(env); + } + + memcpy(&s, dst, sizeof(struct gdb_stat)); + memset(dst, 0, sizeof(UHIStat)); + + dst->uhi_st_dev = tswap16(be32_to_cpu(s.gdb_st_dev)); + dst->uhi_st_ino = tswap16(be32_to_cpu(s.gdb_st_ino)); + dst->uhi_st_mode = tswap32(be32_to_cpu(s.gdb_st_mode)); + dst->uhi_st_nlink = tswap16(be32_to_cpu(s.gdb_st_nlink)); + dst->uhi_st_uid = tswap16(be32_to_cpu(s.gdb_st_uid)); + dst->uhi_st_gid = tswap16(be32_to_cpu(s.gdb_st_gid)); + dst->uhi_st_rdev = tswap16(be32_to_cpu(s.gdb_st_rdev)); + dst->uhi_st_size = tswap64(be64_to_cpu(s.gdb_st_size)); + dst->uhi_st_atime = tswap64(be32_to_cpu(s.gdb_st_atime)); + dst->uhi_st_mtime = tswap64(be32_to_cpu(s.gdb_st_mtime)); + dst->uhi_st_ctime = tswap64(be32_to_cpu(s.gdb_st_ctime)); + dst->uhi_st_blksize = tswap64(be64_to_cpu(s.gdb_st_blksize)); + dst->uhi_st_blocks = tswap64(be64_to_cpu(s.gdb_st_blocks)); + + unlock_user(dst, addr, sizeof(UHIStat)); } - open_flags |= (target_flags & UHIOpen_APPEND) ? O_APPEND : 0; - open_flags |= (target_flags & UHIOpen_CREAT) ? O_CREAT : 0; - open_flags |= (target_flags & UHIOpen_TRUNC) ? O_TRUNC : 0; - open_flags |= (target_flags & UHIOpen_EXCL) ? O_EXCL : 0; - - return open_flags; + uhi_cb(cs, ret, err); } -static int write_to_file(CPUMIPSState *env, target_ulong fd, target_ulong vaddr, - target_ulong len, target_ulong offset) -{ - int num_of_bytes; - void *dst = lock_user(VERIFY_READ, vaddr, len, 1); - if (!dst) { - errno = EFAULT; - return -1; - } - - if (offset) { -#ifdef _WIN32 - num_of_bytes = 0; -#else - num_of_bytes = pwrite(fd, dst, len, offset); -#endif - } else { - num_of_bytes = write(fd, dst, len); - } - - unlock_user(dst, vaddr, 0); - return num_of_bytes; -} - -static int read_from_file(CPUMIPSState *env, target_ulong fd, - target_ulong vaddr, target_ulong len, - target_ulong offset) -{ - int num_of_bytes; - void *dst = lock_user(VERIFY_WRITE, vaddr, len, 0); - if (!dst) { - errno = EFAULT; - return -1; - } - - if (offset) { -#ifdef _WIN32 - num_of_bytes = 0; -#else - num_of_bytes = pread(fd, dst, len, offset); -#endif - } else { - num_of_bytes = read(fd, dst, len); - } - - unlock_user(dst, vaddr, len); - return num_of_bytes; -} - -static int copy_argn_to_target(CPUMIPSState *env, int arg_num, - target_ulong vaddr) -{ - int strsize = strlen(semihosting_get_arg(arg_num)) + 1; - char *dst = lock_user(VERIFY_WRITE, vaddr, strsize, 0); - if (!dst) { - return -1; - } - - strcpy(dst, semihosting_get_arg(arg_num)); - - unlock_user(dst, vaddr, strsize); - return 0; -} - -#define GET_TARGET_STRING(p, addr) \ - do { \ - p = lock_user_string(addr); \ - if (!p) { \ - gpr[2] = -1; \ - gpr[3] = EFAULT; \ - return; \ - } \ - } while (0) - -#define GET_TARGET_STRINGS_2(p, addr, p2, addr2) \ - do { \ - p = lock_user_string(addr); \ - if (!p) { \ - gpr[2] = -1; \ - gpr[3] = EFAULT; \ - return; \ - } \ - p2 = lock_user_string(addr2); \ - if (!p2) { \ - unlock_user(p, addr, 0); \ - gpr[2] = -1; \ - gpr[3] = EFAULT; \ - return; \ - } \ - } while (0) - -#define FREE_TARGET_STRING(p, gpr) \ - do { \ - unlock_user(p, gpr, 0); \ - } while (0) - -void helper_do_semihosting(CPUMIPSState *env) +void mips_semihosting(CPUMIPSState *env) { + CPUState *cs = env_cpu(env); target_ulong *gpr = env->active_tc.gpr; const UHIOp op = gpr[25]; - char *p, *p2; + char *p; switch (op) { case UHI_exit: - qemu_log("UHI(%d): exit(%d)\n", op, (int)gpr[4]); + gdb_exit(gpr[4]); exit(gpr[4]); + case UHI_open: - GET_TARGET_STRING(p, gpr[4]); - if (!strcmp("/dev/stdin", p)) { - gpr[2] = 0; - } else if (!strcmp("/dev/stdout", p)) { - gpr[2] = 1; - } else if (!strcmp("/dev/stderr", p)) { - gpr[2] = 2; - } else { - gpr[2] = open(p, get_open_flags(gpr[5]), gpr[6]); - gpr[3] = errno_mips(errno); + { + target_ulong fname = gpr[4]; + int ret = -1; + + p = lock_user_string(fname); + if (!p) { + report_fault(env); + } + if (!strcmp("/dev/stdin", p)) { + ret = 0; + } else if (!strcmp("/dev/stdout", p)) { + ret = 1; + } else if (!strcmp("/dev/stderr", p)) { + ret = 2; + } + unlock_user(p, fname, 0); + + /* FIXME: reusing a guest fd doesn't seem correct. */ + if (ret >= 0) { + gpr[2] = ret; + break; + } + + semihost_sys_open(cs, uhi_cb, fname, 0, gpr[5], gpr[6]); } - FREE_TARGET_STRING(p, gpr[4]); break; + case UHI_close: - if (gpr[4] < 3) { - /* ignore closing stdin/stdout/stderr */ - gpr[2] = 0; - return; - } - gpr[2] = close(gpr[4]); - gpr[3] = errno_mips(errno); + semihost_sys_close(cs, uhi_cb, gpr[4]); break; case UHI_read: - gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], 0); - gpr[3] = errno_mips(errno); + semihost_sys_read(cs, uhi_cb, gpr[4], gpr[5], gpr[6]); break; case UHI_write: - gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], 0); - gpr[3] = errno_mips(errno); + semihost_sys_write(cs, uhi_cb, gpr[4], gpr[5], gpr[6]); break; case UHI_lseek: - gpr[2] = lseek(gpr[4], gpr[5], gpr[6]); - gpr[3] = errno_mips(errno); + semihost_sys_lseek(cs, uhi_cb, gpr[4], gpr[5], gpr[6]); break; case UHI_unlink: - GET_TARGET_STRING(p, gpr[4]); - gpr[2] = remove(p); - gpr[3] = errno_mips(errno); - FREE_TARGET_STRING(p, gpr[4]); + semihost_sys_remove(cs, uhi_cb, gpr[4], 0); break; case UHI_fstat: - { - struct stat sbuf; - memset(&sbuf, 0, sizeof(sbuf)); - gpr[2] = fstat(gpr[4], &sbuf); - gpr[3] = errno_mips(errno); - if (gpr[2]) { - return; - } - gpr[2] = copy_stat_to_target(env, &sbuf, gpr[5]); - gpr[3] = errno_mips(errno); - } + semihost_sys_fstat(cs, uhi_fstat_cb, gpr[4], gpr[5]); break; + case UHI_argc: gpr[2] = semihosting_get_argc(); break; case UHI_argnlen: - if (gpr[4] >= semihosting_get_argc()) { - gpr[2] = -1; - return; + { + const char *s = semihosting_get_arg(gpr[4]); + gpr[2] = s ? strlen(s) : -1; } - gpr[2] = strlen(semihosting_get_arg(gpr[4])); break; case UHI_argn: - if (gpr[4] >= semihosting_get_argc()) { - gpr[2] = -1; - return; + { + const char *s = semihosting_get_arg(gpr[4]); + target_ulong addr; + size_t len; + + if (!s) { + gpr[2] = -1; + break; + } + len = strlen(s) + 1; + addr = gpr[5]; + p = lock_user(VERIFY_WRITE, addr, len, 0); + if (!p) { + report_fault(env); + } + memcpy(p, s, len); + unlock_user(p, addr, len); + gpr[2] = 0; } - gpr[2] = copy_argn_to_target(env, gpr[4], gpr[5]); break; + case UHI_plog: - GET_TARGET_STRING(p, gpr[4]); - p2 = strstr(p, "%d"); - if (p2) { - int char_num = p2 - p; - GString *s = g_string_new_len(p, char_num); - g_string_append_printf(s, "%d%s", (int)gpr[5], p2 + 2); - gpr[2] = qemu_semihosting_log_out(s->str, s->len); - g_string_free(s, true); - } else { - gpr[2] = qemu_semihosting_log_out(p, strlen(p)); + { + target_ulong addr = gpr[4]; + ssize_t len = target_strlen(addr); + GString *str; + char *pct_d; + + if (len < 0) { + report_fault(env); + } + p = lock_user(VERIFY_READ, addr, len, 1); + if (!p) { + report_fault(env); + } + + pct_d = strstr(p, "%d"); + if (!pct_d) { + unlock_user(p, addr, 0); + semihost_sys_write(cs, uhi_cb, 2, addr, len); + break; + } + + str = g_string_new_len(p, pct_d - p); + g_string_append_printf(str, "%d%s", (int)gpr[5], pct_d + 2); + unlock_user(p, addr, 0); + + /* + * When we're using gdb, we need a guest address, so + * drop the string onto the stack below the stack pointer. + */ + if (use_gdb_syscalls()) { + addr = gpr[29] - str->len; + p = lock_user(VERIFY_WRITE, addr, str->len, 0); + if (!p) { + report_fault(env); + } + memcpy(p, str->str, str->len); + unlock_user(p, addr, str->len); + semihost_sys_write(cs, uhi_cb, 2, addr, str->len); + } else { + gpr[2] = qemu_semihosting_console_write(str->str, str->len); + } + g_string_free(str, true); } - FREE_TARGET_STRING(p, gpr[4]); break; + case UHI_assert: - GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]); - printf("assertion '"); - printf("\"%s\"", p); - printf("': file \"%s\", line %d\n", p2, (int)gpr[6]); - FREE_TARGET_STRING(p2, gpr[5]); - FREE_TARGET_STRING(p, gpr[4]); - abort(); - break; - case UHI_pread: - gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], gpr[7]); - gpr[3] = errno_mips(errno); - break; - case UHI_pwrite: - gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], gpr[7]); - gpr[3] = errno_mips(errno); - break; -#ifndef _WIN32 - case UHI_link: - GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]); - gpr[2] = link(p, p2); - gpr[3] = errno_mips(errno); - FREE_TARGET_STRING(p2, gpr[5]); - FREE_TARGET_STRING(p, gpr[4]); - break; -#endif + { + const char *msg, *file; + + msg = lock_user_string(gpr[4]); + if (!msg) { + msg = ""; + } + file = lock_user_string(gpr[5]); + if (!file) { + file = ""; + } + + error_report("UHI assertion \"%s\": file \"%s\", line %d", + msg, file, (int)gpr[6]); + abort(); + } + default: - fprintf(stderr, "Unknown UHI operation %d\n", op); + error_report("Unknown UHI operation %d", op); abort(); } return; diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c index 2a2afb49e8..3c5f35c759 100644 --- a/target/mips/tcg/sysemu/special_helper.c +++ b/target/mips/tcg/sysemu/special_helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" @@ -93,7 +94,7 @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) CPUMIPSState *env = &cpu->env; if ((env->hflags & MIPS_HFLAG_BMASK) != 0 - && env->active_tc.PC != tb->pc) { + && env->active_tc.PC != tb_pc(tb)) { env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); env->hflags &= ~MIPS_HFLAG_BMASK; return true; diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c index a150a014ec..9d16859c0a 100644 --- a/target/mips/tcg/sysemu/tlb_helper.c +++ b/target/mips/tcg/sysemu/tlb_helper.c @@ -1053,6 +1053,11 @@ void mips_cpu_do_interrupt(CPUState *cs) } offset = 0x180; switch (cs->exception_index) { + case EXCP_SEMIHOST: + cs->exception_index = EXCP_NONE; + mips_semihosting(env); + env->active_tc.PC += env->error_code; + return; case EXCP_DSS: env->CP0_Debug |= 1 << CP0DB_DSS; /* @@ -1339,6 +1344,24 @@ void mips_cpu_do_interrupt(CPUState *cs) cs->exception_index = EXCP_NONE; } +bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if (cpu_mips_hw_interrupts_enabled(env) && + cpu_mips_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCP_EXT_INTERRUPT; + env->error_code = 0; + mips_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra) { CPUState *cs = env_cpu(env); diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc index 4353a966f9..af585b5d9c 100644 --- a/target/mips/tcg/sysemu_helper.h.inc +++ b/target/mips/tcg/sysemu_helper.h.inc @@ -9,8 +9,6 @@ * SPDX-License-Identifier: LGPL-2.1-or-later */ -DEF_HELPER_1(do_semihosting, void, env) - /* CP0 helpers */ DEF_HELPER_1(mfc0_mvpcontrol, tl, env) DEF_HELPER_1(mfc0_mvpconf0, tl, env) diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index 81b14eb219..aef032c48d 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -18,29 +18,31 @@ void mips_tcg_init(void); void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); -void mips_cpu_do_interrupt(CPUState *cpu); -bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); -bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); +void mips_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); const char *mips_exception_name(int32_t exception); -void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, - int error_code, uintptr_t pc); +G_NORETURN void do_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code, uintptr_t pc); -static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, - uint32_t exception, - uintptr_t pc) +static inline G_NORETURN +void do_raise_exception(CPUMIPSState *env, + uint32_t exception, + uintptr_t pc) { do_raise_exception_err(env, exception, 0, pc); } #if !defined(CONFIG_USER_ONLY) +void mips_cpu_do_interrupt(CPUState *cpu); +bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); + void mmu_init(CPUMIPSState *env, const mips_def_t *def); void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); @@ -59,6 +61,12 @@ void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MemTxResult response, uintptr_t retaddr); void cpu_mips_tlb_flush(CPUMIPSState *env); +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +void mips_semihosting(CPUMIPSState *env); + #endif /* !CONFIG_USER_ONLY */ #endif diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index 5b03545f09..624e6b7786 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -294,26 +294,6 @@ enum { R6_OPC_SDBBP = 0x0e | OPC_SPECIAL, }; -/* Multiplication variants of the vr54xx. */ -#define MASK_MUL_VR54XX(op) (MASK_SPECIAL(op) | (op & (0x1F << 6))) - -enum { - OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT, - OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU, - OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT, - OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU, - OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT, - OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU, - OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT, - OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU, - OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT, - OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU, - OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT, - OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU, - OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT, - OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU, -}; - /* REGIMM (rt field) opcodes */ #define MASK_REGIMM(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 16))) @@ -1233,51 +1213,6 @@ TCGv_i64 fpu_f64[32]; #include "exec/gen-icount.h" -#define gen_helper_0e0i(name, arg) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg); \ - gen_helper_##name(cpu_env, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_0e1i(name, arg1, arg2) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg2); \ - gen_helper_##name(cpu_env, arg1, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_1e0i(name, ret, arg1) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg1); \ - gen_helper_##name(ret, cpu_env, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_1e1i(name, ret, arg1, arg2) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg2); \ - gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg3); \ - gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg3); \ - gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg4); \ - gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ - } while (0) - -#define DISAS_STOP DISAS_TARGET_0 -#define DISAS_EXIT DISAS_TARGET_1 - static const char regnames_HI[][4] = { "HI0", "HI1", "HI2", "HI3", }; @@ -1413,18 +1348,15 @@ static inline void restore_cpu_state(CPUMIPSState *env, DisasContext *ctx) void generate_exception_err(DisasContext *ctx, int excp, int err) { - TCGv_i32 texcp = tcg_const_i32(excp); - TCGv_i32 terr = tcg_const_i32(err); save_cpu_state(ctx, 1); - gen_helper_raise_exception_err(cpu_env, texcp, terr); - tcg_temp_free_i32(terr); - tcg_temp_free_i32(texcp); + gen_helper_raise_exception_err(cpu_env, tcg_constant_i32(excp), + tcg_constant_i32(err)); ctx->base.is_jmp = DISAS_NORETURN; } void generate_exception(DisasContext *ctx, int excp) { - gen_helper_0e0i(raise_exception, excp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp)); } void generate_exception_end(DisasContext *ctx, int excp) @@ -1432,6 +1364,16 @@ void generate_exception_end(DisasContext *ctx, int excp) generate_exception_err(ctx, excp, 0); } +void generate_exception_break(DisasContext *ctx, int code) +{ +#ifdef CONFIG_USER_ONLY + /* Pass the break code along to cpu_loop. */ + tcg_gen_st_i32(tcg_constant_i32(code), cpu_env, + offsetof(CPUMIPSState, error_code)); +#endif + generate_exception_end(ctx, EXCP_BREAK); +} + void gen_reserved_instruction(DisasContext *ctx) { generate_exception_end(ctx, EXCP_RI); @@ -1603,7 +1545,7 @@ void check_cop1x(DisasContext *ctx) */ void check_cp1_64bitmode(DisasContext *ctx) { - if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) { + if (unlikely(~ctx->hflags & MIPS_HFLAG_F64)) { gen_reserved_instruction(ctx); } } @@ -2033,7 +1975,7 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ DisasContext *ctx) \ { \ - gen_helper_1e1i(insn, ret, arg1, mem_idx); \ + gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \ } #endif OP_LD_ATOMIC(ll, ld32s); @@ -2096,7 +2038,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, gen_store_gpr(t0, rt); break; case OPC_LD: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2113,12 +2055,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, */ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 7); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 7); -#endif + if (!cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 7); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); tcg_gen_shl_tl(t0, t0, t1); t2 = tcg_const_tl(-1); tcg_gen_shl_tl(t2, t2, t1); @@ -2137,12 +2079,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, */ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 7); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 7); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 7); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); tcg_gen_shr_tl(t0, t0, t1); tcg_gen_xori_tl(t1, t1, 63); t2 = tcg_const_tl(0xfffffffffffffffeull); @@ -2158,7 +2100,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, t1 = tcg_const_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); tcg_temp_free(t1); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); gen_store_gpr(t0, rt); break; #endif @@ -2218,9 +2160,9 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, */ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 3); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 3); -#endif + if (!cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 3); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); @@ -2246,9 +2188,9 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, */ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 3); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 3); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 3); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); @@ -2289,7 +2231,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, switch (opc) { #if defined(TARGET_MIPS64) case OPC_SD: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); break; case OPC_SDL: @@ -2399,7 +2341,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, case OPC_LDC1: { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, fp0, ft); tcg_temp_free_i64(fp0); @@ -2409,7 +2351,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, ft); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); tcg_temp_free_i64(fp0); } @@ -3157,7 +3099,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, check_mips_64(ctx); offset = sextract32(ctx->opcode << 3, 0, 21); addr = addr_add(ctx, (pc & ~0x7), offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ); + gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ); break; #endif default: @@ -3764,70 +3706,6 @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, tcg_temp_free(t1); } -static void gen_mul_vr54xx(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - gen_load_gpr(t0, rs); - gen_load_gpr(t1, rt); - - switch (opc) { - case OPC_VR54XX_MULS: - gen_helper_muls(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MULSU: - gen_helper_mulsu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MACC: - gen_helper_macc(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MACCU: - gen_helper_maccu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MSAC: - gen_helper_msac(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MSACU: - gen_helper_msacu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MULHI: - gen_helper_mulhi(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MULHIU: - gen_helper_mulhiu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MULSHI: - gen_helper_mulshi(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MULSHIU: - gen_helper_mulshiu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MACCHI: - gen_helper_macchi(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MACCHIU: - gen_helper_macchiu(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MSACHI: - gen_helper_msachi(t0, cpu_env, t0, t1); - break; - case OPC_VR54XX_MSACHIU: - gen_helper_msachiu(t0, cpu_env, t0, t1); - break; - default: - MIPS_INVAL("mul vr54xx"); - gen_reserved_instruction(ctx); - goto out; - } - gen_store_gpr(t0, rd); - - out: - tcg_temp_free(t0); - tcg_temp_free(t1); -} - static void gen_cl(DisasContext *ctx, uint32_t opc, int rd, int rs) { @@ -4473,10 +4351,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, case OPC_GSLQ: t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rt); gen_store_gpr(t0, lsq_rt1); @@ -4486,10 +4364,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, check_cp1_enabled(ctx); t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t1, rt); gen_store_fpr64(ctx, t0, lsq_rt1); @@ -4499,11 +4377,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_gpr(t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); tcg_temp_free(t1); break; @@ -4512,11 +4390,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_fpr64(ctx, t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); tcg_temp_free(t1); break; @@ -4529,9 +4407,9 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 3); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 3); -#endif + if (!cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 3); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); @@ -4559,9 +4437,9 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 3); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 3); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 3); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); @@ -4591,12 +4469,12 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 7); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 7); -#endif + if (!cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 7); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); tcg_gen_shl_tl(t0, t0, t1); t2 = tcg_const_tl(-1); tcg_gen_shl_tl(t2, t2, t1); @@ -4613,12 +4491,12 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(t1, t0, 7); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(t1, t1, 7); -#endif + if (cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, 7); + } tcg_gen_shli_tl(t1, t1, 3); tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); tcg_gen_shr_tl(t0, t0, t1); tcg_gen_xori_tl(t1, t1, 63); t2 = tcg_const_tl(0xfffffffffffffffeull); @@ -4771,13 +4649,12 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; #endif case OPC_GSLWXC1: - check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, offset); if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); @@ -4790,12 +4667,11 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, break; #if defined(TARGET_MIPS64) case OPC_GSLDXC1: - check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, offset); if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t0, rt); break; @@ -4824,7 +4700,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, case OPC_GSSDX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); tcg_temp_free(t1); break; @@ -4840,7 +4716,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, case OPC_GSSDXC1: t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEQ | + tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); tcg_temp_free(t1); break; @@ -4854,7 +4730,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, /* Traps */ static void gen_trap(DisasContext *ctx, uint32_t opc, - int rs, int rt, int16_t imm) + int rs, int rt, int16_t imm, int code) { int cond; TCGv t0 = tcg_temp_new(); @@ -4899,6 +4775,11 @@ static void gen_trap(DisasContext *ctx, uint32_t opc, case OPC_TGEU: /* rs >= rs unsigned */ case OPC_TGEIU: /* r0 >= 0 unsigned */ /* Always trap */ +#ifdef CONFIG_USER_ONLY + /* Pass the break code along to cpu_loop. */ + tcg_gen_st_i32(tcg_constant_i32(code), cpu_env, + offsetof(CPUMIPSState, error_code)); +#endif generate_exception_end(ctx, EXCP_TRAP); break; case OPC_TLT: /* rs < rs */ @@ -4939,6 +4820,18 @@ static void gen_trap(DisasContext *ctx, uint32_t opc, tcg_gen_brcond_tl(TCG_COND_EQ, t0, t1, l1); break; } +#ifdef CONFIG_USER_ONLY + /* Pass the break code along to cpu_loop. */ + tcg_gen_st_i32(tcg_constant_i32(code), cpu_env, + offsetof(CPUMIPSState, error_code)); +#endif + /* Like save_cpu_state, only don't update saved values. */ + if (ctx->base.pc_next != ctx->saved_pc) { + gen_save_pc(ctx->base.pc_next); + } + if (ctx->hflags != ctx->saved_hflags) { + tcg_gen_movi_i32(hflags, ctx->hflags); + } generate_exception(ctx, EXCP_TRAP); gen_set_label(l1); } @@ -4954,12 +4847,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { gen_save_pc(dest); - if (ctx->base.singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_raise_exception_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } @@ -9170,12 +9058,7 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, break; case 3: /* XXX: For now we support only a single FPU context. */ - { - TCGv_i32 fs_tmp = tcg_const_i32(rd); - - gen_helper_0e2i(ctc1, t0, fs_tmp, rt); - tcg_temp_free_i32(fs_tmp); - } + gen_helper_0e2i(ctc1, t0, tcg_constant_i32(rd), rt); /* Stop translation as we may have changed hflags */ ctx->base.is_jmp = DISAS_STOP; break; @@ -9792,12 +9675,7 @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) case OPC_CTC1: gen_load_gpr(t0, rt); save_cpu_state(ctx, 0); - { - TCGv_i32 fs_tmp = tcg_const_i32(fs); - - gen_helper_0e2i(ctc1, t0, fs_tmp, rt); - tcg_temp_free_i32(fs_tmp); - } + gen_helper_0e2i(ctc1, t0, tcg_constant_i32(fs), rt); /* Stop translation as we may have changed hflags */ ctx->base.is_jmp = DISAS_STOP; break; @@ -11476,7 +11354,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, check_cp1_registers(ctx, fd); { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -11487,7 +11365,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -11507,7 +11385,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); tcg_temp_free_i64(fp0); } break; @@ -11517,7 +11395,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); tcg_temp_free_i64(fp0); } break; @@ -11550,17 +11428,17 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2); tcg_temp_free(t0); -#ifdef TARGET_WORDS_BIGENDIAN - gen_load_fpr32(ctx, fp, fs); - gen_load_fpr32h(ctx, fph, ft); - gen_store_fpr32h(ctx, fp, fd); - gen_store_fpr32(ctx, fph, fd); -#else - gen_load_fpr32h(ctx, fph, fs); - gen_load_fpr32(ctx, fp, ft); - gen_store_fpr32(ctx, fph, fd); - gen_store_fpr32h(ctx, fp, fd); -#endif + if (cpu_is_bigendian(ctx)) { + gen_load_fpr32(ctx, fp, fs); + gen_load_fpr32h(ctx, fph, ft); + gen_store_fpr32h(ctx, fp, fd); + gen_store_fpr32(ctx, fph, fd); + } else { + gen_load_fpr32h(ctx, fph, fs); + gen_load_fpr32(ctx, fp, ft); + gen_store_fpr32(ctx, fph, fd); + gen_store_fpr32h(ctx, fp, fd); + } gen_set_label(l2); tcg_temp_free_i32(fp); tcg_temp_free_i32(fph); @@ -11929,10 +11807,6 @@ static void gen_branch(DisasContext *ctx, int insn_bytes) } else { tcg_gen_mov_tl(cpu_PC, btarget); } - if (ctx->base.singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_raise_exception_debug(cpu_env); - } tcg_gen_lookup_and_goto_ptr(); break; default: @@ -12007,13 +11881,11 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_temp_new(); + TCGv toffset = tcg_constant_tl(offset); gen_load_gpr(tbase, rt); - tcg_gen_movi_tl(toffset, offset); gen_op_addr_add(ctx, btarget, tbase, toffset); tcg_temp_free(tbase); - tcg_temp_free(toffset); } break; default: @@ -12210,23 +12082,16 @@ static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, tcg_temp_free_i32(t0); } -static inline bool is_uhi(int sdbbp_code) +static inline bool is_uhi(DisasContext *ctx, int sdbbp_code) { #ifdef CONFIG_USER_ONLY return false; #else - return semihosting_enabled() && sdbbp_code == 1; + bool is_user = (ctx->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM; + return semihosting_enabled(is_user) && sdbbp_code == 1; #endif } -#ifdef CONFIG_USER_ONLY -/* The above should dead-code away any calls to this..*/ -static inline void gen_helper_do_semihosting(void *env) -{ - g_assert_not_reached(); -} -#endif - void gen_ldxs(DisasContext *ctx, int base, int index, int rd) { TCGv t0 = tcg_temp_new(); @@ -12308,12 +12173,16 @@ enum { #include "nanomips_translate.c.inc" /* MIPSDSP functions. */ -static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, - int rd, int base, int offset) + +/* Indexed load is not for DSP only */ +static void gen_mips_lx(DisasContext *ctx, uint32_t opc, + int rd, int base, int offset) { TCGv t0; - check_dsp(ctx); + if (!(ctx->insn_flags & INSN_OCTEON)) { + check_dsp(ctx); + } t0 = tcg_temp_new(); if (base == 0) { @@ -12339,7 +12208,7 @@ static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, break; #if defined(TARGET_MIPS64) case OPC_LDX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); gen_store_gpr(t0, rd); break; #endif @@ -13768,7 +13637,6 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, TCGv t0; TCGv t1; TCGv v1_t; - TCGv v2_t; int16_t imm; if ((ret == 0) && (check_ret == 1)) { @@ -13779,10 +13647,8 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, t0 = tcg_temp_new(); t1 = tcg_temp_new(); v1_t = tcg_temp_new(); - v2_t = tcg_temp_new(); gen_load_gpr(v1_t, v1); - gen_load_gpr(v2_t, v2); switch (op1) { case OPC_EXTR_W_DSP: @@ -13948,8 +13814,7 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, break; case OPC_DEXTRV_S_H: tcg_gen_movi_tl(t0, v2); - tcg_gen_movi_tl(t1, v1); - gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env); + gen_helper_dextr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env); break; case OPC_DEXTRV_L: tcg_gen_movi_tl(t0, v2); @@ -13983,7 +13848,6 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } /* End MIPSDSP functions. */ @@ -14039,8 +13903,8 @@ static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx) } break; case R6_OPC_SDBBP: - if (is_uhi(extract32(ctx->opcode, 6, 20))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 6, 20))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { gen_reserved_instruction(ctx); @@ -14144,13 +14008,12 @@ static void decode_opc_special_tx79(CPUMIPSState *env, DisasContext *ctx) static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) { - int rs, rt, rd, sa; + int rs, rt, rd; uint32_t op1; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; op1 = MASK_SPECIAL(ctx->opcode); switch (op1) { @@ -14180,13 +14043,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) break; case OPC_MULT: case OPC_MULTU: - if (sa) { - check_insn(ctx, INSN_VR54XX); - op1 = MASK_MUL_VR54XX(ctx->opcode); - gen_mul_vr54xx(ctx, op1, rd, rs, rt); - } else { - gen_muldiv(ctx, op1, rd & 3, rs, rt); - } + gen_muldiv(ctx, op1, rd & 3, rs, rt); break; case OPC_DIV: case OPC_DIVU: @@ -14203,7 +14060,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) break; #endif case OPC_JR: - gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); + gen_compute_branch(ctx, op1, 4, rs, 0, 0, 4); break; case OPC_SPIM: #ifdef MIPS_STRICT_STANDARD @@ -14309,7 +14166,7 @@ static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) case OPC_TEQ: case OPC_TNE: check_insn(ctx, ISA_MIPS2); - gen_trap(ctx, op1, rs, rt, -1); + gen_trap(ctx, op1, rs, rt, -1, extract32(ctx->opcode, 6, 10)); break; case OPC_PMON: /* Pmon entry point, also R4010 selsl */ @@ -14317,14 +14174,14 @@ static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) MIPS_INVAL("PMON / selsl"); gen_reserved_instruction(ctx); #else - gen_helper_0e0i(pmon, sa); + gen_helper_pmon(cpu_env, tcg_constant_i32(sa)); #endif break; case OPC_SYSCALL: generate_exception_end(ctx, EXCP_SYSCALL); break; case OPC_BREAK: - generate_exception_end(ctx, EXCP_BREAK); + generate_exception_break(ctx, extract32(ctx->opcode, 6, 20)); break; case OPC_SYNC: check_insn(ctx, ISA_MIPS2); @@ -14458,8 +14315,8 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) gen_cl(ctx, op1, rd, rs); break; case OPC_SDBBP: - if (is_uhi(extract32(ctx->opcode, 6, 20))) { - gen_helper_do_semihosting(cpu_env); + if (is_uhi(ctx, extract32(ctx->opcode, 6, 20))) { + ctx->base.is_jmp = DISAS_SEMIHOST; } else { /* * XXX: not clear which exception should be raised @@ -14567,7 +14424,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) #endif #if defined(TARGET_MIPS64) case R6_OPC_SCD: - gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); + gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); break; case R6_OPC_LLD: gen_ld(ctx, op1, rt, rs, imm); @@ -14670,7 +14527,7 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_LBUX: case OPC_LHX: case OPC_LWX: - gen_mipsdsp_ld(ctx, op2, rd, rs, rt); + gen_mips_lx(ctx, op2, rd, rs, rt); break; default: /* Invalid */ MIPS_INVAL("MASK LX"); @@ -15443,11 +15300,10 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_TLTI: case OPC_TLTIU: case OPC_TEQI: - case OPC_TNEI: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS_R6); - gen_trap(ctx, op1, rs, -1, imm); + gen_trap(ctx, op1, rs, -1, imm, 0); break; case OPC_SIGRIE: check_insn(ctx, ISA_MIPS_R6); @@ -15739,12 +15595,8 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) /* Treat as NOP. */ break; case OPC_PREF: - if (ctx->insn_flags & INSN_R5900) { - /* Treat as NOP. */ - } else { - check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1); - /* Treat as NOP. */ - } + check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1 | INSN_R5900); + /* Treat as NOP. */ break; /* Floating point (COP1). */ @@ -16011,7 +15863,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) check_insn_opc_user_only(ctx, INSN_R5900); } check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); + gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); break; case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ if (ctx->insn_flags & ISA_MIPS_R6) { @@ -16098,6 +15950,19 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) /* Transition to the auto-generated decoder. */ + /* Vendor specific extensions */ + if (cpu_supports_isa(env, INSN_R5900) && decode_ext_txx9(ctx, ctx->opcode)) { + return; + } + if (cpu_supports_isa(env, INSN_VR54XX) && decode_ext_vr54xx(ctx, ctx->opcode)) { + return; + } +#if defined(TARGET_MIPS64) + if (cpu_supports_isa(env, INSN_OCTEON) && decode_ext_octeon(ctx, ctx->opcode)) { + return; + } +#endif + /* ISA extensions */ if (ase_msa_available(env) && decode_ase_msa(ctx, ctx->opcode)) { return; @@ -16107,9 +15972,6 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) if (cpu_supports_isa(env, ISA_MIPS_R6) && decode_isa_rel6(ctx, ctx->opcode)) { return; } - if (cpu_supports_isa(env, INSN_R5900) && decode_ext_txx9(ctx, ctx->opcode)) { - return; - } if (decode_opc_legacy(env, ctx)) { return; @@ -16126,6 +15988,7 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; ctx->saved_pc = -1; ctx->insn_flags = env->insn_flags; + ctx->CP0_Config0 = env->CP0_Config0; ctx->CP0_Config1 = env->CP0_Config1; ctx->CP0_Config2 = env->CP0_Config2; ctx->CP0_Config3 = env->CP0_Config3; @@ -16159,8 +16022,19 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #else ctx->mem_idx = hflags_mmu_index(ctx->hflags); #endif - ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 | - INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN; + ctx->default_tcg_memop_mask = (!(ctx->insn_flags & ISA_NANOMIPS32) && + (ctx->insn_flags & (ISA_MIPS_R6 | + INSN_LOONGSON3A))) ? MO_UNALN : MO_ALIGN; + + /* + * Execute a branch and its delay slot as a single instruction. + * This is what GDB expects and is consistent with what the + * hardware does (e.g. if a delay slot instruction faults, the + * reported PC is the PC of the branch). + */ + if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) { + ctx->base.max_insns = 2; + } LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx, ctx->hflags); @@ -16187,17 +16061,17 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) is_slot = ctx->hflags & MIPS_HFLAG_BMASK; if (ctx->insn_flags & ISA_NANOMIPS32) { - ctx->opcode = translator_lduw(env, ctx->base.pc_next); + ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next); insn_bytes = decode_isa_nanomips(env, ctx); } else if (!(ctx->hflags & MIPS_HFLAG_M16)) { - ctx->opcode = translator_ldl(env, ctx->base.pc_next); + ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); insn_bytes = 4; decode_opc(env, ctx); } else if (ctx->insn_flags & ASE_MICROMIPS) { - ctx->opcode = translator_lduw(env, ctx->base.pc_next); + ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next); insn_bytes = decode_isa_micromips(env, ctx); } else if (ctx->insn_flags & ASE_MIPS16) { - ctx->opcode = translator_lduw(env, ctx->base.pc_next); + ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next); insn_bytes = decode_ase_mips16e(env, ctx); } else { gen_reserved_instruction(ctx); @@ -16226,22 +16100,22 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (is_slot) { gen_branch(ctx, insn_bytes); } + if (ctx->base.is_jmp == DISAS_SEMIHOST) { + generate_exception_err(ctx, EXCP_SEMIHOST, insn_bytes); + } ctx->base.pc_next += insn_bytes; if (ctx->base.is_jmp != DISAS_NEXT) { return; } + /* - * Execute a branch and its delay slot as a single instruction. - * This is what GDB expects and is consistent with what the - * hardware does (e.g. if a delay slot instruction faults, the - * reported PC is the PC of the branch). + * End the TB on (most) page crossings. + * See mips_tr_init_disas_context about single-stepping a branch + * together with its delay slot. */ - if (ctx->base.singlestep_enabled && - (ctx->hflags & MIPS_HFLAG_BMASK) == 0) { - ctx->base.is_jmp = DISAS_TOO_MANY; - } - if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) { + if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE + && !ctx->base.singlestep_enabled) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -16250,35 +16124,31 @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { - save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); - gen_helper_raise_exception_debug(cpu_env); - } else { - switch (ctx->base.is_jmp) { - case DISAS_STOP: - gen_save_pc(ctx->base.pc_next); - tcg_gen_lookup_and_goto_ptr(); - break; - case DISAS_NEXT: - case DISAS_TOO_MANY: - save_cpu_state(ctx, 0); - gen_goto_tb(ctx, 0, ctx->base.pc_next); - break; - case DISAS_EXIT: - tcg_gen_exit_tb(NULL, 0); - break; - case DISAS_NORETURN: - break; - default: - g_assert_not_reached(); - } + switch (ctx->base.is_jmp) { + case DISAS_STOP: + gen_save_pc(ctx->base.pc_next); + tcg_gen_lookup_and_goto_ptr(); + break; + case DISAS_NEXT: + case DISAS_TOO_MANY: + save_cpu_state(ctx, 0); + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_EXIT: + tcg_gen_exit_tb(NULL, 0); + break; + case DISAS_NORETURN: + break; + default: + g_assert_not_reached(); } } -static void mips_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void mips_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps mips_tr_ops = { @@ -16290,11 +16160,12 @@ static const TranslatorOps mips_tr_ops = { .disas_log = mips_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base); } void mips_tcg_init(void) @@ -16362,9 +16233,13 @@ void mips_tcg_init(void) } } -void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, - target_ulong *data) +void mips_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + env->active_tc.PC = data[0]; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= data[1]; diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h index c25fad597d..69f85841d2 100644 --- a/target/mips/tcg/translate.h +++ b/target/mips/tcg/translate.h @@ -8,6 +8,7 @@ #ifndef TARGET_MIPS_TRANSLATE_H #define TARGET_MIPS_TRANSLATE_H +#include "qemu/log.h" #include "exec/translator.h" #define MIPS_DEBUG_DISAS 0 @@ -18,6 +19,7 @@ typedef struct DisasContext { target_ulong page_start; uint32_t opcode; uint64_t insn_flags; + int32_t CP0_Config0; int32_t CP0_Config1; int32_t CP0_Config2; int32_t CP0_Config3; @@ -49,6 +51,10 @@ typedef struct DisasContext { int gi; } DisasContext; +#define DISAS_STOP DISAS_TARGET_0 +#define DISAS_EXIT DISAS_TARGET_1 +#define DISAS_SEMIHOST DISAS_TARGET_2 + /* MIPS major opcodes */ #define MASK_OP_MAJOR(op) (op & (0x3F << 26)) @@ -113,9 +119,22 @@ enum { OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4, }; +#define gen_helper_0e1i(name, arg1, arg2) do { \ + gen_helper_##name(cpu_env, arg1, tcg_constant_i32(arg2)); \ + } while (0) + +#define gen_helper_1e0i(name, ret, arg1) do { \ + gen_helper_##name(ret, cpu_env, tcg_constant_i32(arg1)); \ + } while (0) + +#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \ + gen_helper_##name(cpu_env, arg1, arg2, tcg_constant_i32(arg3));\ + } while (0) + void generate_exception(DisasContext *ctx, int excp); void generate_exception_err(DisasContext *ctx, int excp, int err); void generate_exception_end(DisasContext *ctx, int excp); +void generate_exception_break(DisasContext *ctx, int code); void gen_reserved_instruction(DisasContext *ctx); void check_insn(DisasContext *ctx, uint64_t flags); @@ -200,6 +219,21 @@ bool decode_ase_msa(DisasContext *ctx, uint32_t insn); bool decode_ext_txx9(DisasContext *ctx, uint32_t insn); #if defined(TARGET_MIPS64) bool decode_ext_tx79(DisasContext *ctx, uint32_t insn); +bool decode_ext_octeon(DisasContext *ctx, uint32_t insn); #endif +bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn); + +/* + * Helpers for implementing sets of trans_* functions. + * Defer the implementation of NAME to FUNC, with optional extra arguments. + */ +#define TRANS(NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { return FUNC(ctx, a, __VA_ARGS__); } + +static inline bool cpu_is_bigendian(DisasContext *ctx) +{ + return extract32(ctx->CP0_Config0, CP0C0_BE, 1); +} #endif diff --git a/target/mips/tcg/tx79.decode b/target/mips/tcg/tx79.decode index 03a25a5096..57d87a2076 100644 --- a/target/mips/tcg/tx79.decode +++ b/target/mips/tcg/tx79.decode @@ -11,20 +11,20 @@ # when creating helpers common to those for the individual # instruction patterns. -&rtype rs rt rd sa +&r rs rt rd sa -&itype base rt offset +&i base rt offset ########################################################################### # Named instruction formats. These are generally used to # reduce the amount of duplication between instruction patterns. -@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &rtype sa=0 -@rt_rd ...... ..... rt:5 rd:5 ..... ...... &rtype rs=0 sa=0 -@rs ...... rs:5 ..... .......... ...... &rtype rt=0 rd=0 sa=0 -@rd ...... .......... rd:5 ..... ...... &rtype rs=0 rt=0 sa=0 +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &r sa=0 +@rt_rd ...... ..... rt:5 rd:5 ..... ...... &r sa=0 rs=0 +@rs ...... rs:5 ..... .......... ...... &r sa=0 rt=0 rd=0 +@rd ...... .......... rd:5 ..... ...... &r sa=0 rs=0 rt=0 -@ldst ...... base:5 rt:5 offset:16 &itype +@ldst ...... base:5 rt:5 offset:16 &i ########################################################################### diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c index 395d6afa1f..4e479c2d10 100644 --- a/target/mips/tcg/tx79_translate.c +++ b/target/mips/tcg/tx79_translate.c @@ -64,28 +64,28 @@ bool decode_ext_tx79(DisasContext *ctx, uint32_t insn) * MTLO1 rs Move To LO1 Register */ -static bool trans_MFHI1(DisasContext *ctx, arg_rtype *a) +static bool trans_MFHI1(DisasContext *ctx, arg_r *a) { gen_store_gpr(cpu_HI[1], a->rd); return true; } -static bool trans_MFLO1(DisasContext *ctx, arg_rtype *a) +static bool trans_MFLO1(DisasContext *ctx, arg_r *a) { gen_store_gpr(cpu_LO[1], a->rd); return true; } -static bool trans_MTHI1(DisasContext *ctx, arg_rtype *a) +static bool trans_MTHI1(DisasContext *ctx, arg_r *a) { gen_load_gpr(cpu_HI[1], a->rs); return true; } -static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) +static bool trans_MTLO1(DisasContext *ctx, arg_r *a) { gen_load_gpr(cpu_LO[1], a->rs); @@ -116,7 +116,7 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word */ -static bool trans_parallel_arith(DisasContext *ctx, arg_rtype *a, +static bool trans_parallel_arith(DisasContext *ctx, arg_r *a, void (*gen_logic_i64)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 ax, bx; @@ -146,19 +146,19 @@ static bool trans_parallel_arith(DisasContext *ctx, arg_rtype *a, } /* Parallel Subtract Byte */ -static bool trans_PSUBB(DisasContext *ctx, arg_rtype *a) +static bool trans_PSUBB(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_vec_sub8_i64); } /* Parallel Subtract Halfword */ -static bool trans_PSUBH(DisasContext *ctx, arg_rtype *a) +static bool trans_PSUBH(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_vec_sub16_i64); } /* Parallel Subtract Word */ -static bool trans_PSUBW(DisasContext *ctx, arg_rtype *a) +static bool trans_PSUBW(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_vec_sub32_i64); } @@ -189,25 +189,25 @@ static bool trans_PSUBW(DisasContext *ctx, arg_rtype *a) */ /* Parallel And */ -static bool trans_PAND(DisasContext *ctx, arg_rtype *a) +static bool trans_PAND(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_and_i64); } /* Parallel Or */ -static bool trans_POR(DisasContext *ctx, arg_rtype *a) +static bool trans_POR(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_or_i64); } /* Parallel Exclusive Or */ -static bool trans_PXOR(DisasContext *ctx, arg_rtype *a) +static bool trans_PXOR(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_xor_i64); } /* Parallel Not Or */ -static bool trans_PNOR(DisasContext *ctx, arg_rtype *a) +static bool trans_PNOR(DisasContext *ctx, arg_r *a) { return trans_parallel_arith(ctx, a, tcg_gen_nor_i64); } @@ -237,7 +237,7 @@ static bool trans_PNOR(DisasContext *ctx, arg_rtype *a) * PCEQW rd, rs, rt Parallel Compare for Equal Word */ -static bool trans_parallel_compare(DisasContext *ctx, arg_rtype *a, +static bool trans_parallel_compare(DisasContext *ctx, arg_r *a, TCGCond cond, unsigned wlen) { TCGv_i64 c0, c1, ax, bx, t0, t1, t2; @@ -286,37 +286,37 @@ static bool trans_parallel_compare(DisasContext *ctx, arg_rtype *a, } /* Parallel Compare for Greater Than Byte */ -static bool trans_PCGTB(DisasContext *ctx, arg_rtype *a) +static bool trans_PCGTB(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_GE, 8); } /* Parallel Compare for Equal Byte */ -static bool trans_PCEQB(DisasContext *ctx, arg_rtype *a) +static bool trans_PCEQB(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_EQ, 8); } /* Parallel Compare for Greater Than Halfword */ -static bool trans_PCGTH(DisasContext *ctx, arg_rtype *a) +static bool trans_PCGTH(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_GE, 16); } /* Parallel Compare for Equal Halfword */ -static bool trans_PCEQH(DisasContext *ctx, arg_rtype *a) +static bool trans_PCEQH(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_EQ, 16); } /* Parallel Compare for Greater Than Word */ -static bool trans_PCGTW(DisasContext *ctx, arg_rtype *a) +static bool trans_PCGTW(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_GE, 32); } /* Parallel Compare for Equal Word */ -static bool trans_PCEQW(DisasContext *ctx, arg_rtype *a) +static bool trans_PCEQW(DisasContext *ctx, arg_r *a) { return trans_parallel_compare(ctx, a, TCG_COND_EQ, 32); } @@ -334,7 +334,7 @@ static bool trans_PCEQW(DisasContext *ctx, arg_rtype *a) * SQ rt, offset(base) Store Quadword */ -static bool trans_LQ(DisasContext *ctx, arg_itype *a) +static bool trans_LQ(DisasContext *ctx, arg_i *a) { TCGv_i64 t0; TCGv addr; @@ -355,12 +355,12 @@ static bool trans_LQ(DisasContext *ctx, arg_itype *a) tcg_gen_andi_tl(addr, addr, ~0xf); /* Lower half */ - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); gen_store_gpr(t0, a->rt); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); gen_store_gpr_hi(t0, a->rt); tcg_temp_free(t0); @@ -369,7 +369,7 @@ static bool trans_LQ(DisasContext *ctx, arg_itype *a) return true; } -static bool trans_SQ(DisasContext *ctx, arg_itype *a) +static bool trans_SQ(DisasContext *ctx, arg_i *a) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv addr = tcg_temp_new(); @@ -383,12 +383,12 @@ static bool trans_SQ(DisasContext *ctx, arg_itype *a) /* Lower half */ gen_load_gpr(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); gen_load_gpr_hi(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); tcg_temp_free(addr); tcg_temp_free(t0); @@ -437,7 +437,7 @@ static bool trans_SQ(DisasContext *ctx, arg_itype *a) */ /* Parallel Pack to Word */ -static bool trans_PPACW(DisasContext *ctx, arg_rtype *a) +static bool trans_PPACW(DisasContext *ctx, arg_r *a) { TCGv_i64 a0, b0, t0; @@ -473,7 +473,7 @@ static void gen_pextw(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 a, TCGv_i64 b) tcg_gen_deposit_i64(dh, a, b, 0, 32); } -static bool trans_PEXTLx(DisasContext *ctx, arg_rtype *a, unsigned wlen) +static bool trans_PEXTLx(DisasContext *ctx, arg_r *a, unsigned wlen) { TCGv_i64 ax, bx; @@ -514,19 +514,19 @@ static bool trans_PEXTLx(DisasContext *ctx, arg_rtype *a, unsigned wlen) } /* Parallel Extend Lower from Byte */ -static bool trans_PEXTLB(DisasContext *ctx, arg_rtype *a) +static bool trans_PEXTLB(DisasContext *ctx, arg_r *a) { return trans_PEXTLx(ctx, a, 8); } /* Parallel Extend Lower from Halfword */ -static bool trans_PEXTLH(DisasContext *ctx, arg_rtype *a) +static bool trans_PEXTLH(DisasContext *ctx, arg_r *a) { return trans_PEXTLx(ctx, a, 16); } /* Parallel Extend Lower from Word */ -static bool trans_PEXTLW(DisasContext *ctx, arg_rtype *a) +static bool trans_PEXTLW(DisasContext *ctx, arg_r *a) { TCGv_i64 ax, bx; @@ -549,7 +549,7 @@ static bool trans_PEXTLW(DisasContext *ctx, arg_rtype *a) } /* Parallel Extend Upper from Word */ -static bool trans_PEXTUW(DisasContext *ctx, arg_rtype *a) +static bool trans_PEXTUW(DisasContext *ctx, arg_r *a) { TCGv_i64 ax, bx; @@ -593,7 +593,7 @@ static bool trans_PEXTUW(DisasContext *ctx, arg_rtype *a) */ /* Parallel Copy Halfword */ -static bool trans_PCPYH(DisasContext *s, arg_rtype *a) +static bool trans_PCPYH(DisasContext *s, arg_r *a) { if (a->rd == 0) { /* nop */ @@ -615,7 +615,7 @@ static bool trans_PCPYH(DisasContext *s, arg_rtype *a) } /* Parallel Copy Lower Doubleword */ -static bool trans_PCPYLD(DisasContext *s, arg_rtype *a) +static bool trans_PCPYLD(DisasContext *s, arg_r *a) { if (a->rd == 0) { /* nop */ @@ -638,7 +638,7 @@ static bool trans_PCPYLD(DisasContext *s, arg_rtype *a) } /* Parallel Copy Upper Doubleword */ -static bool trans_PCPYUD(DisasContext *s, arg_rtype *a) +static bool trans_PCPYUD(DisasContext *s, arg_r *a) { if (a->rd == 0) { /* nop */ @@ -657,7 +657,7 @@ static bool trans_PCPYUD(DisasContext *s, arg_rtype *a) } /* Parallel Rotate 3 Words Left */ -static bool trans_PROT3W(DisasContext *ctx, arg_rtype *a) +static bool trans_PROT3W(DisasContext *ctx, arg_r *a) { TCGv_i64 ax; diff --git a/target/mips/tcg/user/meson.build b/target/mips/tcg/user/meson.build deleted file mode 100644 index 79badcd321..0000000000 --- a/target/mips/tcg/user/meson.build +++ /dev/null @@ -1,3 +0,0 @@ -mips_user_ss.add(files( - 'tlb_helper.c', -)) diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c deleted file mode 100644 index b835144b82..0000000000 --- a/target/mips/tcg/user/tlb_helper.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MIPS TLB (Translation lookaside buffer) helpers. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "qemu/osdep.h" - -#include "cpu.h" -#include "exec/exec-all.h" -#include "internal.h" - -static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, - MMUAccessType access_type) -{ - CPUState *cs = env_cpu(env); - - env->error_code = 0; - if (access_type == MMU_INST_FETCH) { - env->error_code |= EXCP_INST_NOTAVAIL; - } - - /* Reference to kernel address from user mode or supervisor mode */ - /* Reference to supervisor address from user mode */ - if (access_type == MMU_DATA_STORE) { - cs->exception_index = EXCP_AdES; - } else { - cs->exception_index = EXCP_AdEL; - } - - /* Raise exception */ - if (!(env->hflags & MIPS_HFLAG_DM)) { - env->CP0_BadVAddr = address; - } -} - -bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - /* data access */ - raise_mmu_exception(env, address, access_type); - do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); -} - -void mips_cpu_do_interrupt(CPUState *cs) -{ - cs->exception_index = EXCP_NONE; -} diff --git a/target/mips/tcg/vr54xx.decode b/target/mips/tcg/vr54xx.decode new file mode 100644 index 0000000000..4fc708d80a --- /dev/null +++ b/target/mips/tcg/vr54xx.decode @@ -0,0 +1,27 @@ +# MIPS VR5432 instruction set extensions +# +# Copyright (C) 2021 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: VR5432 Microprocessor User’s Manual +# (Document Number U13751EU5V0UM00) + +&r rs rt rd + +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &r + +MULS 000000 ..... ..... ..... 00011011000 @rs_rt_rd +MULSU 000000 ..... ..... ..... 00011011001 @rs_rt_rd +MACC 000000 ..... ..... ..... 00101011000 @rs_rt_rd +MACCU 000000 ..... ..... ..... 00101011001 @rs_rt_rd +MSAC 000000 ..... ..... ..... 00111011000 @rs_rt_rd +MSACU 000000 ..... ..... ..... 00111011001 @rs_rt_rd +MULHI 000000 ..... ..... ..... 01001011000 @rs_rt_rd +MULHIU 000000 ..... ..... ..... 01001011001 @rs_rt_rd +MULSHI 000000 ..... ..... ..... 01011011000 @rs_rt_rd +MULSHIU 000000 ..... ..... ..... 01011011001 @rs_rt_rd +MACCHI 000000 ..... ..... ..... 01101011000 @rs_rt_rd +MACCHIU 000000 ..... ..... ..... 01101011001 @rs_rt_rd +MSACHI 000000 ..... ..... ..... 01111011000 @rs_rt_rd +MSACHIU 000000 ..... ..... ..... 01111011001 @rs_rt_rd diff --git a/target/mips/tcg/vr54xx_helper.c b/target/mips/tcg/vr54xx_helper.c new file mode 100644 index 0000000000..2255bd1116 --- /dev/null +++ b/target/mips/tcg/vr54xx_helper.c @@ -0,0 +1,142 @@ +/* + * MIPS VR5432 emulation helpers + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" + +/* 64 bits arithmetic for 32 bits hosts */ +static inline uint64_t get_HILO(CPUMIPSState *env) +{ + return ((uint64_t)(env->active_tc.HI[0]) << 32) | + (uint32_t)env->active_tc.LO[0]; +} + +static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) +{ + env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + return env->active_tc.HI[0] = (int32_t)(HILO >> 32); +} + +static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) +{ + target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + env->active_tc.HI[0] = (int32_t)(HILO >> 32); + return tmp; +} + +/* Multiplication variants of the vr54xx. */ +target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2)); +} + +target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} diff --git a/target/mips/tcg/vr54xx_helper.h.inc b/target/mips/tcg/vr54xx_helper.h.inc new file mode 100644 index 0000000000..50b1f5b818 --- /dev/null +++ b/target/mips/tcg/vr54xx_helper.h.inc @@ -0,0 +1,24 @@ +/* + * MIPS NEC Vr54xx instruction emulation helpers for QEMU. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +DEF_HELPER_3(muls, tl, env, tl, tl) +DEF_HELPER_3(mulsu, tl, env, tl, tl) +DEF_HELPER_3(macc, tl, env, tl, tl) +DEF_HELPER_3(maccu, tl, env, tl, tl) +DEF_HELPER_3(msac, tl, env, tl, tl) +DEF_HELPER_3(msacu, tl, env, tl, tl) +DEF_HELPER_3(mulhi, tl, env, tl, tl) +DEF_HELPER_3(mulhiu, tl, env, tl, tl) +DEF_HELPER_3(mulshi, tl, env, tl, tl) +DEF_HELPER_3(mulshiu, tl, env, tl, tl) +DEF_HELPER_3(macchi, tl, env, tl, tl) +DEF_HELPER_3(macchiu, tl, env, tl, tl) +DEF_HELPER_3(msachi, tl, env, tl, tl) +DEF_HELPER_3(msachiu, tl, env, tl, tl) diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c new file mode 100644 index 0000000000..3e2c98f2c6 --- /dev/null +++ b/target/mips/tcg/vr54xx_translate.c @@ -0,0 +1,72 @@ +/* + * VR5432 extensions translation routines + * + * Reference: VR5432 Microprocessor User’s Manual + * (Document Number U13751EU5V0UM00) + * + * Copyright (c) 2021 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" +#include "exec/helper-gen.h" +#include "translate.h" +#include "internal.h" + +/* Include the auto-generated decoder. */ +#include "decode-vr54xx.c.inc" + +/* + * Integer Multiply-Accumulate Instructions + * + * MACC Multiply, accumulate, and move LO + * MACCHI Multiply, accumulate, and move HI + * MACCHIU Unsigned multiply, accumulate, and move HI + * MACCU Unsigned multiply, accumulate, and move LO + * MSAC Multiply, negate, accumulate, and move LO + * MSACHI Multiply, negate, accumulate, and move HI + * MSACHIU Unsigned multiply, negate, accumulate, and move HI + * MSACU Unsigned multiply, negate, accumulate, and move LO + * MULHI Multiply and move HI + * MULHIU Unsigned multiply and move HI + * MULS Multiply, negate, and move LO + * MULSHI Multiply, negate, and move HI + * MULSHIU Unsigned multiply, negate, and move HI + * MULSU Unsigned multiply, negate, and move LO + */ + +static bool trans_mult_acc(DisasContext *ctx, arg_r *a, + void (*gen_helper_mult_acc)(TCGv, TCGv_ptr, TCGv, TCGv)) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + gen_load_gpr(t0, a->rs); + gen_load_gpr(t1, a->rt); + + gen_helper_mult_acc(t0, cpu_env, t0, t1); + + gen_store_gpr(t0, a->rd); + + tcg_temp_free(t0); + tcg_temp_free(t1); + + return false; +} + +TRANS(MACC, trans_mult_acc, gen_helper_macc); +TRANS(MACCHI, trans_mult_acc, gen_helper_macchi); +TRANS(MACCHIU, trans_mult_acc, gen_helper_macchiu); +TRANS(MACCU, trans_mult_acc, gen_helper_maccu); +TRANS(MSAC, trans_mult_acc, gen_helper_msac); +TRANS(MSACHI, trans_mult_acc, gen_helper_msachi); +TRANS(MSACHIU, trans_mult_acc, gen_helper_msachiu); +TRANS(MSACU, trans_mult_acc, gen_helper_msacu); +TRANS(MULHI, trans_mult_acc, gen_helper_mulhi); +TRANS(MULHIU, trans_mult_acc, gen_helper_mulhiu); +TRANS(MULS, trans_mult_acc, gen_helper_muls); +TRANS(MULSHI, trans_mult_acc, gen_helper_mulshi); +TRANS(MULSHIU, trans_mult_acc, gen_helper_mulshiu); +TRANS(MULSU, trans_mult_acc, gen_helper_mulsu); diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h index 38bedbfd61..177d720864 100644 --- a/target/nios2/cpu-param.h +++ b/target/nios2/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef NIOS2_CPU_PARAM_H -#define NIOS2_CPU_PARAM_H 1 +#define NIOS2_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index 5e37defef8..9a5351bc81 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -31,12 +31,30 @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value) Nios2CPU *cpu = NIOS2_CPU(cs); CPUNios2State *env = &cpu->env; - env->regs[R_PC] = value; + env->pc = value; +} + +static vaddr nios2_cpu_get_pc(CPUState *cs) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + return env->pc; +} + +static void nios2_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + env->pc = data[0]; } static bool nios2_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); + return cs->interrupt_request & CPU_INTERRUPT_HARD; } static void nios2_cpu_reset(DeviceState *dev) @@ -46,39 +64,46 @@ static void nios2_cpu_reset(DeviceState *dev) Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu); CPUNios2State *env = &cpu->env; - if (qemu_loglevel_mask(CPU_LOG_RESET)) { - qemu_log("CPU Reset (CPU %d)\n", cs->cpu_index); - log_cpu_state(cs, 0); - } - ncc->parent_reset(dev); - memset(env->regs, 0, sizeof(uint32_t) * NUM_CORE_REGS); - env->regs[R_PC] = cpu->reset_addr; + memset(env->ctrl, 0, sizeof(env->ctrl)); + env->pc = cpu->reset_addr; #if defined(CONFIG_USER_ONLY) /* Start in user mode with interrupts enabled. */ - env->regs[CR_STATUS] = CR_STATUS_U | CR_STATUS_PIE; + env->ctrl[CR_STATUS] = CR_STATUS_RSIE | CR_STATUS_U | CR_STATUS_PIE; + memset(env->regs, 0, sizeof(env->regs)); #else - env->regs[CR_STATUS] = 0; + env->ctrl[CR_STATUS] = CR_STATUS_RSIE; + nios2_update_crs(env); + memset(env->shadow_regs, 0, sizeof(env->shadow_regs)); #endif } #ifndef CONFIG_USER_ONLY -static void nios2_cpu_set_irq(void *opaque, int irq, int level) +static void eic_set_irq(void *opaque, int irq, int level) +{ + Nios2CPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +static void iic_set_irq(void *opaque, int irq, int level) { Nios2CPU *cpu = opaque; CPUNios2State *env = &cpu->env; CPUState *cs = CPU(cpu); - env->regs[CR_IPENDING] = deposit32(env->regs[CR_IPENDING], irq, 1, !!level); + env->ctrl[CR_IPENDING] = deposit32(env->ctrl[CR_IPENDING], irq, 1, !!level); - env->irq_pending = env->regs[CR_IPENDING] & env->regs[CR_IENABLE]; - - if (env->irq_pending && (env->regs[CR_STATUS] & CR_STATUS_PIE)) { - env->irq_pending = 0; + if (env->ctrl[CR_IPENDING]) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else if (!env->irq_pending) { + } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } @@ -92,15 +117,6 @@ static void nios2_cpu_initfn(Object *obj) #if !defined(CONFIG_USER_ONLY) mmu_init(&cpu->env); - - /* - * These interrupt lines model the IIC (internal interrupt - * controller). QEMU does not currently support the EIC - * (external interrupt controller) -- if we did it would be - * a separate device in hw/intc with a custom interface to - * the CPU, and boards using it would not wire up these IRQ lines. - */ - qdev_init_gpio_in_named(DEVICE(cpu), nios2_cpu_set_irq, "IRQ", 32); #endif } @@ -109,70 +125,182 @@ static ObjectClass *nios2_cpu_class_by_name(const char *cpu_model) return object_class_by_name(TYPE_NIOS2_CPU); } +static void realize_cr_status(CPUState *cs) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + + /* Begin with all fields of all registers are reserved. */ + memset(cpu->cr_state, 0, sizeof(cpu->cr_state)); + + /* + * The combination of writable and readonly is the set of all + * non-reserved fields. We apply writable as a mask to bits, + * and merge in existing readonly bits, before storing. + */ +#define WR_REG(C) cpu->cr_state[C].writable = -1 +#define RO_REG(C) cpu->cr_state[C].readonly = -1 +#define WR_FIELD(C, F) cpu->cr_state[C].writable |= R_##C##_##F##_MASK +#define RO_FIELD(C, F) cpu->cr_state[C].readonly |= R_##C##_##F##_MASK + + WR_FIELD(CR_STATUS, PIE); + WR_REG(CR_ESTATUS); + WR_REG(CR_BSTATUS); + RO_REG(CR_CPUID); + RO_REG(CR_EXCEPTION); + WR_REG(CR_BADADDR); + + if (cpu->eic_present) { + WR_FIELD(CR_STATUS, RSIE); + RO_FIELD(CR_STATUS, NMI); + WR_FIELD(CR_STATUS, PRS); + RO_FIELD(CR_STATUS, CRS); + WR_FIELD(CR_STATUS, IL); + WR_FIELD(CR_STATUS, IH); + } else { + RO_FIELD(CR_STATUS, RSIE); + WR_REG(CR_IENABLE); + RO_REG(CR_IPENDING); + } + + if (cpu->mmu_present) { + WR_FIELD(CR_STATUS, U); + WR_FIELD(CR_STATUS, EH); + + WR_FIELD(CR_PTEADDR, VPN); + WR_FIELD(CR_PTEADDR, PTBASE); + + RO_FIELD(CR_TLBMISC, D); + RO_FIELD(CR_TLBMISC, PERM); + RO_FIELD(CR_TLBMISC, BAD); + RO_FIELD(CR_TLBMISC, DBL); + WR_FIELD(CR_TLBMISC, PID); + WR_FIELD(CR_TLBMISC, WE); + WR_FIELD(CR_TLBMISC, RD); + WR_FIELD(CR_TLBMISC, WAY); + + WR_REG(CR_TLBACC); + } + + /* + * TODO: ECC (config, eccinj) and MPU (config, mpubase, mpuacc) are + * unimplemented, so their corresponding control regs remain reserved. + */ + +#undef WR_REG +#undef RO_REG +#undef WR_FIELD +#undef RO_FIELD +} + static void nios2_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); + Nios2CPU *cpu = NIOS2_CPU(cs); Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(dev); Error *local_err = NULL; +#ifndef CONFIG_USER_ONLY + if (cpu->eic_present) { + qdev_init_gpio_in_named(DEVICE(cpu), eic_set_irq, "EIC", 1); + } else { + qdev_init_gpio_in_named(DEVICE(cpu), iic_set_irq, "IRQ", 32); + } +#endif + cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } + realize_cr_status(cs); qemu_init_vcpu(cs); cpu_reset(cs); + /* We have reserved storage for cpuid; might as well use it. */ + cpu->env.ctrl[CR_CPUID] = cs->cpu_index; + ncc->parent_realize(dev, errp); } +#ifndef CONFIG_USER_ONLY +static bool eic_take_interrupt(Nios2CPU *cpu) +{ + CPUNios2State *env = &cpu->env; + const uint32_t status = env->ctrl[CR_STATUS]; + + if (cpu->rnmi) { + return !(status & CR_STATUS_NMI); + } + if (!(status & CR_STATUS_PIE)) { + return false; + } + if (cpu->ril <= FIELD_EX32(status, CR_STATUS, IL)) { + return false; + } + if (cpu->rrs != FIELD_EX32(status, CR_STATUS, CRS)) { + return true; + } + return status & CR_STATUS_RSIE; +} + +static bool iic_take_interrupt(Nios2CPU *cpu) +{ + CPUNios2State *env = &cpu->env; + + if (!(env->ctrl[CR_STATUS] & CR_STATUS_PIE)) { + return false; + } + return env->ctrl[CR_IPENDING] & env->ctrl[CR_IENABLE]; +} + static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; - if ((interrupt_request & CPU_INTERRUPT_HARD) && - (env->regs[CR_STATUS] & CR_STATUS_PIE)) { - cs->exception_index = EXCP_IRQ; - nios2_cpu_do_interrupt(cs); - return true; + if (interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu->eic_present + ? eic_take_interrupt(cpu) + : iic_take_interrupt(cpu)) { + cs->exception_index = EXCP_IRQ; + nios2_cpu_do_interrupt(cs); + return true; + } } return false; } - +#endif /* !CONFIG_USER_ONLY */ static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { /* NOTE: NiosII R2 is not supported yet. */ info->mach = bfd_arch_nios2; -#ifdef TARGET_WORDS_BIGENDIAN - info->print_insn = print_insn_big_nios2; -#else - info->print_insn = print_insn_little_nios2; -#endif + info->print_insn = print_insn_nios2; } static int nios2_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { Nios2CPU *cpu = NIOS2_CPU(cs); - CPUClass *cc = CPU_GET_CLASS(cs); CPUNios2State *env = &cpu->env; + uint32_t val; - if (n > cc->gdb_num_core_regs) { + if (n < 32) { /* GP regs */ + val = env->regs[n]; + } else if (n == 32) { /* PC */ + val = env->pc; + } else if (n < 49) { /* Status regs */ + unsigned cr = n - 33; + if (nios2_cr_reserved(&cpu->cr_state[cr])) { + val = 0; + } else { + val = env->ctrl[n - 33]; + } + } else { + /* Invalid regs */ return 0; } - if (n < 32) { /* GP regs */ - return gdb_get_reg32(mem_buf, env->regs[n]); - } else if (n == 32) { /* PC */ - return gdb_get_reg32(mem_buf, env->regs[R_PC]); - } else if (n < 49) { /* Status regs */ - return gdb_get_reg32(mem_buf, env->regs[n - 1]); - } - - /* Invalid regs */ - return 0; + return gdb_get_reg32(mem_buf, val); } static int nios2_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) @@ -180,23 +308,32 @@ static int nios2_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) Nios2CPU *cpu = NIOS2_CPU(cs); CPUClass *cc = CPU_GET_CLASS(cs); CPUNios2State *env = &cpu->env; + uint32_t val; if (n > cc->gdb_num_core_regs) { return 0; } + val = ldl_p(mem_buf); if (n < 32) { /* GP regs */ - env->regs[n] = ldl_p(mem_buf); + env->regs[n] = val; } else if (n == 32) { /* PC */ - env->regs[R_PC] = ldl_p(mem_buf); + env->pc = val; } else if (n < 49) { /* Status regs */ - env->regs[n - 1] = ldl_p(mem_buf); + unsigned cr = n - 33; + /* ??? Maybe allow the debugger to write to readonly fields. */ + val &= cpu->cr_state[cr].writable; + val |= cpu->cr_state[cr].readonly & env->ctrl[cr]; + env->ctrl[cr] = val; + } else { + g_assert_not_reached(); } return 4; } static Property nios2_properties[] = { + DEFINE_PROP_BOOL("diverr_present", Nios2CPU, diverr_present, true), DEFINE_PROP_BOOL("mmu_present", Nios2CPU, mmu_present, true), /* ALTR,pid-num-bits */ DEFINE_PROP_UINT32("mmu_pid_num_bits", Nios2CPU, pid_num_bits, 8), @@ -219,10 +356,11 @@ static const struct SysemuCPUOps nios2_sysemu_ops = { static const struct TCGCPUOps nios2_tcg_ops = { .initialize = nios2_tcg_init, - .cpu_exec_interrupt = nios2_cpu_exec_interrupt, - .tlb_fill = nios2_cpu_tlb_fill, + .restore_state_to_opc = nios2_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = nios2_cpu_tlb_fill, + .cpu_exec_interrupt = nios2_cpu_exec_interrupt, .do_interrupt = nios2_cpu_do_interrupt, .do_unaligned_access = nios2_cpu_do_unaligned_access, #endif /* !CONFIG_USER_ONLY */ @@ -243,6 +381,7 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = nios2_cpu_has_work; cc->dump_state = nios2_cpu_dump_state; cc->set_pc = nios2_cpu_set_pc; + cc->get_pc = nios2_cpu_get_pc; cc->disas_set_info = nios2_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &nios2_sysemu_ops; diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index 2ab82fdc71..f85581ee56 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -23,17 +23,17 @@ #include "exec/cpu-defs.h" #include "hw/core/cpu.h" +#include "hw/registerfields.h" #include "qom/object.h" -typedef struct CPUNios2State CPUNios2State; +typedef struct CPUArchState CPUNios2State; #if !defined(CONFIG_USER_ONLY) #include "mmu.h" #endif #define TYPE_NIOS2_CPU "nios2-cpu" -OBJECT_DECLARE_TYPE(Nios2CPU, Nios2CPUClass, - NIOS2_CPU) +OBJECT_DECLARE_CPU_TYPE(Nios2CPU, Nios2CPUClass, NIOS2_CPU) /** * Nios2CPUClass: @@ -57,83 +57,114 @@ struct Nios2CPUClass { #define EXCEPTION_ADDRESS 0x00000004 #define FAST_TLB_MISS_ADDRESS 0x00000008 +#define NUM_GP_REGS 32 +#define NUM_CR_REGS 32 -/* GP regs + CR regs + PC */ -#define NUM_CORE_REGS (32 + 32 + 1) +#ifndef CONFIG_USER_ONLY +/* 63 shadow register sets; index 0 is the primary register set. */ +#define NUM_REG_SETS 64 +#endif /* General purpose register aliases */ -#define R_ZERO 0 -#define R_AT 1 -#define R_RET0 2 -#define R_RET1 3 -#define R_ARG0 4 -#define R_ARG1 5 -#define R_ARG2 6 -#define R_ARG3 7 -#define R_ET 24 -#define R_BT 25 -#define R_GP 26 -#define R_SP 27 -#define R_FP 28 -#define R_EA 29 -#define R_BA 30 -#define R_RA 31 +enum { + R_ZERO = 0, + R_AT = 1, + R_RET0 = 2, + R_RET1 = 3, + R_ARG0 = 4, + R_ARG1 = 5, + R_ARG2 = 6, + R_ARG3 = 7, + R_ET = 24, + R_BT = 25, + R_GP = 26, + R_SP = 27, + R_FP = 28, + R_EA = 29, + R_BA = 30, + R_SSTATUS = 30, + R_RA = 31, +}; /* Control register aliases */ -#define CR_BASE 32 -#define CR_STATUS (CR_BASE + 0) -#define CR_STATUS_PIE (1 << 0) -#define CR_STATUS_U (1 << 1) -#define CR_STATUS_EH (1 << 2) -#define CR_STATUS_IH (1 << 3) -#define CR_STATUS_IL (63 << 4) -#define CR_STATUS_CRS (63 << 10) -#define CR_STATUS_PRS (63 << 16) -#define CR_STATUS_NMI (1 << 22) -#define CR_STATUS_RSIE (1 << 23) -#define CR_ESTATUS (CR_BASE + 1) -#define CR_BSTATUS (CR_BASE + 2) -#define CR_IENABLE (CR_BASE + 3) -#define CR_IPENDING (CR_BASE + 4) -#define CR_CPUID (CR_BASE + 5) -#define CR_CTL6 (CR_BASE + 6) -#define CR_EXCEPTION (CR_BASE + 7) -#define CR_PTEADDR (CR_BASE + 8) -#define CR_PTEADDR_PTBASE_SHIFT 22 -#define CR_PTEADDR_PTBASE_MASK (0x3FF << CR_PTEADDR_PTBASE_SHIFT) -#define CR_PTEADDR_VPN_SHIFT 2 -#define CR_PTEADDR_VPN_MASK (0xFFFFF << CR_PTEADDR_VPN_SHIFT) -#define CR_TLBACC (CR_BASE + 9) -#define CR_TLBACC_IGN_SHIFT 25 -#define CR_TLBACC_IGN_MASK (0x7F << CR_TLBACC_IGN_SHIFT) -#define CR_TLBACC_C (1 << 24) -#define CR_TLBACC_R (1 << 23) -#define CR_TLBACC_W (1 << 22) -#define CR_TLBACC_X (1 << 21) -#define CR_TLBACC_G (1 << 20) -#define CR_TLBACC_PFN_MASK 0x000FFFFF -#define CR_TLBMISC (CR_BASE + 10) -#define CR_TLBMISC_WAY_SHIFT 20 -#define CR_TLBMISC_WAY_MASK (0xF << CR_TLBMISC_WAY_SHIFT) -#define CR_TLBMISC_RD (1 << 19) -#define CR_TLBMISC_WR (1 << 18) -#define CR_TLBMISC_PID_SHIFT 4 -#define CR_TLBMISC_PID_MASK (0x3FFF << CR_TLBMISC_PID_SHIFT) -#define CR_TLBMISC_DBL (1 << 3) -#define CR_TLBMISC_BAD (1 << 2) -#define CR_TLBMISC_PERM (1 << 1) -#define CR_TLBMISC_D (1 << 0) -#define CR_ENCINJ (CR_BASE + 11) -#define CR_BADADDR (CR_BASE + 12) -#define CR_CONFIG (CR_BASE + 13) -#define CR_MPUBASE (CR_BASE + 14) -#define CR_MPUACC (CR_BASE + 15) +enum { + CR_STATUS = 0, + CR_ESTATUS = 1, + CR_BSTATUS = 2, + CR_IENABLE = 3, + CR_IPENDING = 4, + CR_CPUID = 5, + CR_EXCEPTION = 7, + CR_PTEADDR = 8, + CR_TLBACC = 9, + CR_TLBMISC = 10, + CR_ENCINJ = 11, + CR_BADADDR = 12, + CR_CONFIG = 13, + CR_MPUBASE = 14, + CR_MPUACC = 15, +}; -/* Other registers */ -#define R_PC 64 +FIELD(CR_STATUS, PIE, 0, 1) +FIELD(CR_STATUS, U, 1, 1) +FIELD(CR_STATUS, EH, 2, 1) +FIELD(CR_STATUS, IH, 3, 1) +FIELD(CR_STATUS, IL, 4, 6) +FIELD(CR_STATUS, CRS, 10, 6) +FIELD(CR_STATUS, PRS, 16, 6) +FIELD(CR_STATUS, NMI, 22, 1) +FIELD(CR_STATUS, RSIE, 23, 1) +FIELD(CR_STATUS, SRS, 31, 1) /* only in sstatus */ + +#define CR_STATUS_PIE R_CR_STATUS_PIE_MASK +#define CR_STATUS_U R_CR_STATUS_U_MASK +#define CR_STATUS_EH R_CR_STATUS_EH_MASK +#define CR_STATUS_IH R_CR_STATUS_IH_MASK +#define CR_STATUS_NMI R_CR_STATUS_NMI_MASK +#define CR_STATUS_RSIE R_CR_STATUS_RSIE_MASK +#define CR_STATUS_SRS R_CR_STATUS_SRS_MASK + +FIELD(CR_EXCEPTION, CAUSE, 2, 5) +FIELD(CR_EXCEPTION, ECCFTL, 31, 1) + +FIELD(CR_PTEADDR, VPN, 2, 20) +FIELD(CR_PTEADDR, PTBASE, 22, 10) + +FIELD(CR_TLBACC, PFN, 0, 20) +FIELD(CR_TLBACC, G, 20, 1) +FIELD(CR_TLBACC, X, 21, 1) +FIELD(CR_TLBACC, W, 22, 1) +FIELD(CR_TLBACC, R, 23, 1) +FIELD(CR_TLBACC, C, 24, 1) +FIELD(CR_TLBACC, IG, 25, 7) + +#define CR_TLBACC_C R_CR_TLBACC_C_MASK +#define CR_TLBACC_R R_CR_TLBACC_R_MASK +#define CR_TLBACC_W R_CR_TLBACC_W_MASK +#define CR_TLBACC_X R_CR_TLBACC_X_MASK +#define CR_TLBACC_G R_CR_TLBACC_G_MASK + +FIELD(CR_TLBMISC, D, 0, 1) +FIELD(CR_TLBMISC, PERM, 1, 1) +FIELD(CR_TLBMISC, BAD, 2, 1) +FIELD(CR_TLBMISC, DBL, 3, 1) +FIELD(CR_TLBMISC, PID, 4, 14) +FIELD(CR_TLBMISC, WE, 18, 1) +FIELD(CR_TLBMISC, RD, 19, 1) +FIELD(CR_TLBMISC, WAY, 20, 4) +FIELD(CR_TLBMISC, EE, 24, 1) + +#define CR_TLBMISC_EE R_CR_TLBMISC_EE_MASK +#define CR_TLBMISC_RD R_CR_TLBMISC_RD_MASK +#define CR_TLBMISC_WE R_CR_TLBMISC_WE_MASK +#define CR_TLBMISC_DBL R_CR_TLBMISC_DBL_MASK +#define CR_TLBMISC_BAD R_CR_TLBMISC_BAD_MASK +#define CR_TLBMISC_PERM R_CR_TLBMISC_PERM_MASK +#define CR_TLBMISC_D R_CR_TLBMISC_D_MASK /* Exceptions */ #define EXCP_BREAK 0x1000 +#define EXCP_SEMIHOST 0x1001 #define EXCP_RESET 0 #define EXCP_PRESET 1 #define EXCP_IRQ 2 @@ -143,35 +174,46 @@ struct Nios2CPUClass { #define EXCP_UNALIGN 6 #define EXCP_UNALIGND 7 #define EXCP_DIV 8 -#define EXCP_SUPERA 9 +#define EXCP_SUPERA_X 9 #define EXCP_SUPERI 10 -#define EXCP_SUPERD 11 -#define EXCP_TLBD 12 -#define EXCP_TLBX 13 -#define EXCP_TLBR 14 -#define EXCP_TLBW 15 +#define EXCP_SUPERA_D 11 +#define EXCP_TLB_X 12 +#define EXCP_TLB_D (0x1000 | EXCP_TLB_X) +#define EXCP_PERM_X 13 +#define EXCP_PERM_R 14 +#define EXCP_PERM_W 15 #define EXCP_MPUI 16 #define EXCP_MPUD 17 -#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 - -struct CPUNios2State { - uint32_t regs[NUM_CORE_REGS]; +struct CPUArchState { +#ifdef CONFIG_USER_ONLY + uint32_t regs[NUM_GP_REGS]; +#else + uint32_t shadow_regs[NUM_REG_SETS][NUM_GP_REGS]; + /* Pointer into shadow_regs for the current register set. */ + uint32_t *regs; +#endif + uint32_t ctrl[NUM_CR_REGS]; + uint32_t pc; #if !defined(CONFIG_USER_ONLY) Nios2MMU mmu; - - uint32_t irq_pending; #endif + int error_code; }; +typedef struct { + uint32_t writable; + uint32_t readonly; +} ControlRegState; + /** * Nios2CPU: * @env: #CPUNios2State * * A Nios2 CPU. */ -struct Nios2CPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -179,7 +221,10 @@ struct Nios2CPU { CPUNegativeOffsetState neg; CPUNios2State env; + bool diverr_present; bool mmu_present; + bool eic_present; + uint32_t pid_num_bits; uint32_t tlb_num_ways; uint32_t tlb_num_entries; @@ -188,25 +233,47 @@ struct Nios2CPU { uint32_t reset_addr; uint32_t exception_addr; uint32_t fast_tlb_miss_addr; + + /* Bits within each control register which are reserved or readonly. */ + ControlRegState cr_state[NUM_CR_REGS]; + + /* External Interrupt Controller Interface */ + uint32_t rha; /* Requested handler address */ + uint32_t ril; /* Requested interrupt level */ + uint32_t rrs; /* Requested register set */ + bool rnmi; /* Requested nonmaskable interrupt */ }; +static inline bool nios2_cr_reserved(const ControlRegState *s) +{ + return (s->writable | s->readonly) == 0; +} + +static inline void nios2_update_crs(CPUNios2State *env) +{ +#ifndef CONFIG_USER_ONLY + unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS); + env->regs = env->shadow_regs[crs]; +#endif +} + void nios2_tcg_init(void); void nios2_cpu_do_interrupt(CPUState *cs); -int cpu_nios2_signal_handler(int host_signum, void *pinfo, void *puc); void dump_mmu(CPUNios2State *env); void nios2_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); +G_NORETURN void nios2_cpu_loop_exit_advance(CPUNios2State *env, + uintptr_t retaddr); void do_nios2_semihosting(CPUNios2State *env); #define CPU_RESOLVING_TYPE TYPE_NIOS2_CPU #define cpu_gen_code cpu_nios2_gen_code -#define cpu_signal_handler cpu_nios2_signal_handler #define CPU_SAVE_VERSION 1 @@ -216,30 +283,35 @@ void do_nios2_semihosting(CPUNios2State *env); static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch) { - return (env->regs[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX : + return (env->ctrl[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX : MMU_SUPERVISOR_IDX; } +#ifndef CONFIG_USER_ONLY bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); - -static inline int cpu_interrupts_enabled(CPUNios2State *env) -{ - return env->regs[CR_STATUS] & CR_STATUS_PIE; -} +#endif typedef CPUNios2State CPUArchState; typedef Nios2CPU ArchCPU; #include "exec/cpu-all.h" +FIELD(TBFLAGS, CRS0, 0, 1) /* Set if CRS == 0. */ +FIELD(TBFLAGS, U, 1, 1) /* Overlaps CR_STATUS_U */ +FIELD(TBFLAGS, R0_0, 2, 1) /* Set if R0 == 0. */ + static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { - *pc = env->regs[R_PC]; + unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS); + + *pc = env->pc; *cs_base = 0; - *flags = (env->regs[CR_STATUS] & (CR_STATUS_EH | CR_STATUS_U)); + *flags = (env->ctrl[CR_STATUS] & CR_STATUS_U) + | (crs ? 0 : R_TBFLAGS_CRS0_MASK) + | (env->regs[0] ? 0 : R_TBFLAGS_R0_0_MASK); } #endif /* NIOS2_CPU_H */ diff --git a/target/nios2/helper.c b/target/nios2/helper.c index 53be8398e9..bb3b09e5a7 100644 --- a/target/nios2/helper.c +++ b/target/nios2/helper.c @@ -28,175 +28,234 @@ #include "exec/helper-proto.h" #include "semihosting/semihost.h" -#if defined(CONFIG_USER_ONLY) + +static void do_exception(Nios2CPU *cpu, uint32_t exception_addr, + uint32_t tlbmisc_set, bool is_break) +{ + CPUNios2State *env = &cpu->env; + CPUState *cs = CPU(cpu); + uint32_t old_status = env->ctrl[CR_STATUS]; + uint32_t new_status = old_status; + + /* With shadow regs, exceptions are always taken into CRS 0. */ + new_status &= ~R_CR_STATUS_CRS_MASK; + env->regs = env->shadow_regs[0]; + + if ((old_status & CR_STATUS_EH) == 0) { + int r_ea = R_EA, cr_es = CR_ESTATUS; + + if (is_break) { + r_ea = R_BA; + cr_es = CR_BSTATUS; + } + env->ctrl[cr_es] = old_status; + env->regs[r_ea] = env->pc; + + if (cpu->mmu_present) { + new_status |= CR_STATUS_EH; + + /* + * There are 4 bits that are always written. + * Explicitly clear them, to be set via the argument. + */ + env->ctrl[CR_TLBMISC] &= ~(CR_TLBMISC_D | + CR_TLBMISC_PERM | + CR_TLBMISC_BAD | + CR_TLBMISC_DBL); + env->ctrl[CR_TLBMISC] |= tlbmisc_set; + } + + /* + * With shadow regs, and EH == 0, PRS is set from CRS. + * At least, so says Table 3-9, and some other text, + * though Table 3-38 says otherwise. + */ + new_status = FIELD_DP32(new_status, CR_STATUS, PRS, + FIELD_EX32(old_status, CR_STATUS, CRS)); + } + + new_status &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->ctrl[CR_STATUS] = new_status; + if (!is_break) { + env->ctrl[CR_EXCEPTION] = FIELD_DP32(0, CR_EXCEPTION, CAUSE, + cs->exception_index); + } + env->pc = exception_addr; +} + +static void do_iic_irq(Nios2CPU *cpu) +{ + do_exception(cpu, cpu->exception_addr, 0, false); +} + +static void do_eic_irq(Nios2CPU *cpu) +{ + CPUNios2State *env = &cpu->env; + uint32_t old_status = env->ctrl[CR_STATUS]; + uint32_t new_status = old_status; + uint32_t old_rs = FIELD_EX32(old_status, CR_STATUS, CRS); + uint32_t new_rs = cpu->rrs; + + new_status = FIELD_DP32(new_status, CR_STATUS, CRS, new_rs); + new_status = FIELD_DP32(new_status, CR_STATUS, IL, cpu->ril); + new_status = FIELD_DP32(new_status, CR_STATUS, NMI, cpu->rnmi); + new_status &= ~(CR_STATUS_RSIE | CR_STATUS_U); + new_status |= CR_STATUS_IH; + + if (!(new_status & CR_STATUS_EH)) { + new_status = FIELD_DP32(new_status, CR_STATUS, PRS, old_rs); + if (new_rs == 0) { + env->ctrl[CR_ESTATUS] = old_status; + } else { + if (new_rs != old_rs) { + old_status |= CR_STATUS_SRS; + } + env->shadow_regs[new_rs][R_SSTATUS] = old_status; + } + env->shadow_regs[new_rs][R_EA] = env->pc; + } + + env->ctrl[CR_STATUS] = new_status; + nios2_update_crs(env); + + env->pc = cpu->rha; +} void nios2_cpu_do_interrupt(CPUState *cs) { Nios2CPU *cpu = NIOS2_CPU(cs); CPUNios2State *env = &cpu->env; - cs->exception_index = -1; - env->regs[R_EA] = env->regs[R_PC] + 4; -} + uint32_t tlbmisc_set = 0; -bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - cs->exception_index = 0xaa; - cpu_loop_exit_restore(cs, retaddr); -} + if (qemu_loglevel_mask(CPU_LOG_INT)) { + const char *name = NULL; -#else /* !CONFIG_USER_ONLY */ - -void nios2_cpu_do_interrupt(CPUState *cs) -{ - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; + switch (cs->exception_index) { + case EXCP_IRQ: + name = "interrupt"; + break; + case EXCP_TLB_X: + case EXCP_TLB_D: + if (env->ctrl[CR_STATUS] & CR_STATUS_EH) { + name = "TLB MISS (double)"; + } else { + name = "TLB MISS (fast)"; + } + break; + case EXCP_PERM_R: + case EXCP_PERM_W: + case EXCP_PERM_X: + name = "TLB PERM"; + break; + case EXCP_SUPERA_X: + case EXCP_SUPERA_D: + name = "SUPERVISOR (address)"; + break; + case EXCP_SUPERI: + name = "SUPERVISOR (insn)"; + break; + case EXCP_ILLEGAL: + name = "ILLEGAL insn"; + break; + case EXCP_UNALIGN: + name = "Misaligned (data)"; + break; + case EXCP_UNALIGND: + name = "Misaligned (destination)"; + break; + case EXCP_DIV: + name = "DIV error"; + break; + case EXCP_TRAP: + name = "TRAP insn"; + break; + case EXCP_BREAK: + name = "BREAK insn"; + break; + case EXCP_SEMIHOST: + name = "SEMIHOST insn"; + break; + } + if (name) { + qemu_log("%s at pc=0x%08x\n", name, env->pc); + } else { + qemu_log("Unknown exception %d at pc=0x%08x\n", + cs->exception_index, env->pc); + } + } switch (cs->exception_index) { case EXCP_IRQ: - assert(env->regs[CR_STATUS] & CR_STATUS_PIE); - - qemu_log_mask(CPU_LOG_INT, "interrupt at pc=%x\n", env->regs[R_PC]); - - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[CR_STATUS] |= CR_STATUS_IH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[R_EA] = env->regs[R_PC] + 4; - env->regs[R_PC] = cpu->exception_addr; - break; - - case EXCP_TLBD: - if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { - qemu_log_mask(CPU_LOG_INT, "TLB MISS (fast) at pc=%x\n", - env->regs[R_PC]); - - /* Fast TLB miss */ - /* Variation from the spec. Table 3-35 of the cpu reference shows - * estatus not being changed for TLB miss but this appears to - * be incorrect. */ - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[CR_TLBMISC] &= ~CR_TLBMISC_DBL; - env->regs[CR_TLBMISC] |= CR_TLBMISC_WR; - - env->regs[R_EA] = env->regs[R_PC] + 4; - env->regs[R_PC] = cpu->fast_tlb_miss_addr; + /* Note that PC is advanced for interrupts as well. */ + env->pc += 4; + if (cpu->eic_present) { + do_eic_irq(cpu); } else { - qemu_log_mask(CPU_LOG_INT, "TLB MISS (double) at pc=%x\n", - env->regs[R_PC]); - - /* Double TLB miss */ - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[CR_TLBMISC] |= CR_TLBMISC_DBL; - - env->regs[R_PC] = cpu->exception_addr; + do_iic_irq(cpu); } break; - case EXCP_TLBR: - case EXCP_TLBW: - case EXCP_TLBX: - qemu_log_mask(CPU_LOG_INT, "TLB PERM at pc=%x\n", env->regs[R_PC]); - - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { - env->regs[CR_TLBMISC] |= CR_TLBMISC_WR; + case EXCP_TLB_D: + tlbmisc_set = CR_TLBMISC_D; + /* fall through */ + case EXCP_TLB_X: + if (env->ctrl[CR_STATUS] & CR_STATUS_EH) { + tlbmisc_set |= CR_TLBMISC_DBL; + /* + * Normally, we don't write to tlbmisc unless !EH, + * so do it manually for the double-tlb miss exception. + */ + env->ctrl[CR_TLBMISC] &= ~(CR_TLBMISC_D | + CR_TLBMISC_PERM | + CR_TLBMISC_BAD); + env->ctrl[CR_TLBMISC] |= tlbmisc_set; + do_exception(cpu, cpu->exception_addr, 0, false); + } else { + tlbmisc_set |= CR_TLBMISC_WE; + do_exception(cpu, cpu->fast_tlb_miss_addr, tlbmisc_set, false); } - - env->regs[R_EA] = env->regs[R_PC] + 4; - env->regs[R_PC] = cpu->exception_addr; break; - case EXCP_SUPERA: + case EXCP_PERM_R: + case EXCP_PERM_W: + tlbmisc_set = CR_TLBMISC_D; + /* fall through */ + case EXCP_PERM_X: + tlbmisc_set |= CR_TLBMISC_PERM; + if (!(env->ctrl[CR_STATUS] & CR_STATUS_EH)) { + tlbmisc_set |= CR_TLBMISC_WE; + } + do_exception(cpu, cpu->exception_addr, tlbmisc_set, false); + break; + + case EXCP_SUPERA_D: + case EXCP_UNALIGN: + tlbmisc_set = CR_TLBMISC_D; + /* fall through */ + case EXCP_SUPERA_X: + case EXCP_UNALIGND: + tlbmisc_set |= CR_TLBMISC_BAD; + do_exception(cpu, cpu->exception_addr, tlbmisc_set, false); + break; + case EXCP_SUPERI: - case EXCP_SUPERD: - qemu_log_mask(CPU_LOG_INT, "SUPERVISOR exception at pc=%x\n", - env->regs[R_PC]); - - if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[R_EA] = env->regs[R_PC] + 4; - } - - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[R_PC] = cpu->exception_addr; - break; - case EXCP_ILLEGAL: + case EXCP_DIV: case EXCP_TRAP: - qemu_log_mask(CPU_LOG_INT, "TRAP exception at pc=%x\n", - env->regs[R_PC]); - - if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { - env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; - env->regs[R_EA] = env->regs[R_PC] + 4; - } - - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[R_PC] = cpu->exception_addr; + do_exception(cpu, cpu->exception_addr, 0, false); break; case EXCP_BREAK: - qemu_log_mask(CPU_LOG_INT, "BREAK exception at pc=%x\n", - env->regs[R_PC]); - /* The semihosting instruction is "break 1". */ - if (semihosting_enabled() && - cpu_ldl_code(env, env->regs[R_PC]) == 0x003da07a) { - qemu_log_mask(CPU_LOG_INT, "Entering semihosting\n"); - env->regs[R_PC] += 4; - do_nios2_semihosting(env); - break; - } + do_exception(cpu, cpu->exception_addr, 0, true); + break; - if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { - env->regs[CR_BSTATUS] = env->regs[CR_STATUS]; - env->regs[R_BA] = env->regs[R_PC] + 4; - } - - env->regs[CR_STATUS] |= CR_STATUS_EH; - env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); - - env->regs[CR_EXCEPTION] &= ~(0x1F << 2); - env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; - - env->regs[R_PC] = cpu->exception_addr; + case EXCP_SEMIHOST: + do_nios2_semihosting(env); break; default: - cpu_abort(cs, "unhandled exception type=%d\n", - cs->exception_index); - break; + cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index); } } @@ -231,9 +290,9 @@ void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr, Nios2CPU *cpu = NIOS2_CPU(cs); CPUNios2State *env = &cpu->env; - env->regs[CR_BADADDR] = addr; - env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2; - helper_raise_exception(env, EXCP_UNALIGN); + env->ctrl[CR_BADADDR] = addr; + cs->exception_index = EXCP_UNALIGN; + nios2_cpu_loop_exit_advance(env, retaddr); } bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, @@ -242,7 +301,7 @@ bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { Nios2CPU *cpu = NIOS2_CPU(cs); CPUNios2State *env = &cpu->env; - unsigned int excp = EXCP_TLBD; + unsigned int excp; target_ulong vaddr, paddr; Nios2MMULookup lu; unsigned int hit; @@ -269,9 +328,10 @@ bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (probe) { return false; } - cs->exception_index = EXCP_SUPERA; - env->regs[CR_BADADDR] = address; - cpu_loop_exit_restore(cs, retaddr); + cs->exception_index = (access_type == MMU_INST_FETCH + ? EXCP_SUPERA_X : EXCP_SUPERA_D); + env->ctrl[CR_BADADDR] = address; + nios2_cpu_loop_exit_advance(env, retaddr); } } @@ -290,25 +350,23 @@ bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } /* Permission violation */ - excp = (access_type == MMU_DATA_LOAD ? EXCP_TLBR : - access_type == MMU_DATA_STORE ? EXCP_TLBW : EXCP_TLBX); + excp = (access_type == MMU_DATA_LOAD ? EXCP_PERM_R : + access_type == MMU_DATA_STORE ? EXCP_PERM_W : EXCP_PERM_X); + } else { + excp = (access_type == MMU_INST_FETCH ? EXCP_TLB_X: EXCP_TLB_D); } if (probe) { return false; } - if (access_type == MMU_INST_FETCH) { - env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D; - } else { - env->regs[CR_TLBMISC] |= CR_TLBMISC_D; - } - env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK; - env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK; - env->mmu.pteaddr_wr = env->regs[CR_PTEADDR]; + env->ctrl[CR_TLBMISC] = FIELD_DP32(env->ctrl[CR_TLBMISC], CR_TLBMISC, D, + access_type != MMU_INST_FETCH); + env->ctrl[CR_PTEADDR] = FIELD_DP32(env->ctrl[CR_PTEADDR], CR_PTEADDR, VPN, + address >> TARGET_PAGE_BITS); + env->mmu.pteaddr_wr = env->ctrl[CR_PTEADDR]; cs->exception_index = excp; - env->regs[CR_BADADDR] = address; - cpu_loop_exit_restore(cs, retaddr); + env->ctrl[CR_BADADDR] = address; + nios2_cpu_loop_exit_advance(env, retaddr); } -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/nios2/helper.h b/target/nios2/helper.h index 6c8f0b5b35..1648d76ade 100644 --- a/target/nios2/helper.h +++ b/target/nios2/helper.h @@ -19,9 +19,14 @@ */ DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32) +DEF_HELPER_FLAGS_3(divs, TCG_CALL_NO_WG, s32, env, s32, s32) +DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32) #if !defined(CONFIG_USER_ONLY) -DEF_HELPER_2(mmu_read_debug, void, env, i32) -DEF_HELPER_3(mmu_write, void, env, i32, i32) -DEF_HELPER_1(check_interrupts, void, env) +DEF_HELPER_3(eret, noreturn, env, i32, i32) +DEF_HELPER_FLAGS_2(rdprs, TCG_CALL_NO_WG, i32, env, i32) +DEF_HELPER_3(wrprs, void, env, i32, i32) +DEF_HELPER_2(mmu_write_tlbacc, void, env, i32) +DEF_HELPER_2(mmu_write_tlbmisc, void, env, i32) +DEF_HELPER_2(mmu_write_pteaddr, void, env, i32) #endif diff --git a/target/nios2/meson.build b/target/nios2/meson.build index e643917db1..c6e2243cc3 100644 --- a/target/nios2/meson.build +++ b/target/nios2/meson.build @@ -1,15 +1,17 @@ nios2_ss = ss.source_set() nios2_ss.add(files( 'cpu.c', - 'helper.c', - 'mmu.c', - 'nios2-semi.c', 'op_helper.c', 'translate.c', )) nios2_softmmu_ss = ss.source_set() -nios2_softmmu_ss.add(files('monitor.c')) +nios2_softmmu_ss.add(files( + 'helper.c', + 'monitor.c', + 'mmu.c', + 'nios2-semi.c', +)) target_arch += {'nios2': nios2_ss} target_softmmu_arch += {'nios2': nios2_softmmu_ss} diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c index 2545c06761..d9b690b78e 100644 --- a/target/nios2/mmu.c +++ b/target/nios2/mmu.c @@ -23,37 +23,9 @@ #include "cpu.h" #include "exec/exec-all.h" #include "mmu.h" +#include "exec/helper-proto.h" +#include "trace/trace-target_nios2.h" -#if !defined(CONFIG_USER_ONLY) - -/* Define this to enable MMU debug messages */ -/* #define DEBUG_MMU */ - -#ifdef DEBUG_MMU -#define MMU_LOG(x) x -#else -#define MMU_LOG(x) -#endif - -void mmu_read_debug(CPUNios2State *env, uint32_t rn) -{ - switch (rn) { - case CR_TLBACC: - MMU_LOG(qemu_log("TLBACC READ %08X\n", env->regs[rn])); - break; - - case CR_TLBMISC: - MMU_LOG(qemu_log("TLBMISC READ %08X\n", env->regs[rn])); - break; - - case CR_PTEADDR: - MMU_LOG(qemu_log("PTEADDR READ %08X\n", env->regs[rn])); - break; - - default: - break; - } -} /* rw - 0 = read, 1 = write, 2 = fetch. */ unsigned int mmu_translate(CPUNios2State *env, @@ -61,39 +33,28 @@ unsigned int mmu_translate(CPUNios2State *env, target_ulong vaddr, int rw, int mmu_idx) { Nios2CPU *cpu = env_archcpu(env); - int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4; + int pid = FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID); int vpn = vaddr >> 12; + int way, n_ways = cpu->tlb_num_ways; - MMU_LOG(qemu_log("mmu_translate vaddr %08X, pid %08X, vpn %08X\n", - vaddr, pid, vpn)); - - int way; - for (way = 0; way < cpu->tlb_num_ways; way++) { - - Nios2TLBEntry *entry = - &env->mmu.tlb[(way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask)]; - - MMU_LOG(qemu_log("TLB[%d] TAG %08X, VPN %08X\n", - (way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask), - entry->tag, (entry->tag >> 12))); + for (way = 0; way < n_ways; way++) { + uint32_t index = (way * n_ways) + (vpn & env->mmu.tlb_entry_mask); + Nios2TLBEntry *entry = &env->mmu.tlb[index]; if (((entry->tag >> 12) != vpn) || (((entry->tag & (1 << 11)) == 0) && ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) != pid))) { + trace_nios2_mmu_translate_miss(vaddr, pid, index, entry->tag); continue; } + lu->vaddr = vaddr & TARGET_PAGE_MASK; - lu->paddr = (entry->data & CR_TLBACC_PFN_MASK) << TARGET_PAGE_BITS; + lu->paddr = FIELD_EX32(entry->data, CR_TLBACC, PFN) << TARGET_PAGE_BITS; lu->prot = ((entry->data & CR_TLBACC_R) ? PAGE_READ : 0) | ((entry->data & CR_TLBACC_W) ? PAGE_WRITE : 0) | ((entry->data & CR_TLBACC_X) ? PAGE_EXEC : 0); - MMU_LOG(qemu_log("HIT TLB[%d] %08X %08X %08X\n", - (way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask), - lu->vaddr, lu->paddr, lu->prot)); + trace_nios2_mmu_translate_hit(vaddr, pid, index, lu->paddr, lu->prot); return 1; } return 0; @@ -104,141 +65,119 @@ static void mmu_flush_pid(CPUNios2State *env, uint32_t pid) CPUState *cs = env_cpu(env); Nios2CPU *cpu = env_archcpu(env); int idx; - MMU_LOG(qemu_log("TLB Flush PID %d\n", pid)); for (idx = 0; idx < cpu->tlb_num_entries; idx++) { Nios2TLBEntry *entry = &env->mmu.tlb[idx]; - MMU_LOG(qemu_log("TLB[%d] => %08X %08X\n", - idx, entry->tag, entry->data)); - if ((entry->tag & (1 << 10)) && (!(entry->tag & (1 << 11))) && ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) == pid)) { uint32_t vaddr = entry->tag & TARGET_PAGE_MASK; - MMU_LOG(qemu_log("TLB Flush Page %08X\n", vaddr)); - + trace_nios2_mmu_flush_pid_hit(pid, idx, vaddr); tlb_flush_page(cs, vaddr); + } else { + trace_nios2_mmu_flush_pid_miss(pid, idx, entry->tag); } } } -void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v) +void helper_mmu_write_tlbacc(CPUNios2State *env, uint32_t v) { CPUState *cs = env_cpu(env); Nios2CPU *cpu = env_archcpu(env); - MMU_LOG(qemu_log("mmu_write %08X = %08X\n", rn, v)); + trace_nios2_mmu_write_tlbacc(FIELD_EX32(v, CR_TLBACC, IG), + (v & CR_TLBACC_C) ? 'C' : '.', + (v & CR_TLBACC_R) ? 'R' : '.', + (v & CR_TLBACC_W) ? 'W' : '.', + (v & CR_TLBACC_X) ? 'X' : '.', + (v & CR_TLBACC_G) ? 'G' : '.', + FIELD_EX32(v, CR_TLBACC, PFN)); - switch (rn) { - case CR_TLBACC: - MMU_LOG(qemu_log("TLBACC: IG %02X, FLAGS %c%c%c%c%c, PFN %05X\n", - v >> CR_TLBACC_IGN_SHIFT, - (v & CR_TLBACC_C) ? 'C' : '.', - (v & CR_TLBACC_R) ? 'R' : '.', - (v & CR_TLBACC_W) ? 'W' : '.', - (v & CR_TLBACC_X) ? 'X' : '.', - (v & CR_TLBACC_G) ? 'G' : '.', - v & CR_TLBACC_PFN_MASK)); + /* if tlbmisc.WE == 1 then trigger a TLB write on writes to TLBACC */ + if (env->ctrl[CR_TLBMISC] & CR_TLBMISC_WE) { + int way = FIELD_EX32(env->ctrl[CR_TLBMISC], CR_TLBMISC, WAY); + int vpn = FIELD_EX32(env->mmu.pteaddr_wr, CR_PTEADDR, VPN); + int pid = FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID); + int g = FIELD_EX32(v, CR_TLBACC, G); + int valid = FIELD_EX32(vpn, CR_TLBACC, PFN) < 0xC0000; + Nios2TLBEntry *entry = + &env->mmu.tlb[(way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask)]; + uint32_t newTag = (vpn << 12) | (g << 11) | (valid << 10) | pid; + uint32_t newData = v & (CR_TLBACC_C | CR_TLBACC_R | CR_TLBACC_W | + CR_TLBACC_X | R_CR_TLBACC_PFN_MASK); - /* if tlbmisc.WE == 1 then trigger a TLB write on writes to TLBACC */ - if (env->regs[CR_TLBMISC] & CR_TLBMISC_WR) { - int way = (env->regs[CR_TLBMISC] >> CR_TLBMISC_WAY_SHIFT); - int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2; - int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4; - int g = (v & CR_TLBACC_G) ? 1 : 0; - int valid = ((vpn & CR_TLBACC_PFN_MASK) < 0xC0000) ? 1 : 0; - Nios2TLBEntry *entry = - &env->mmu.tlb[(way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask)]; - uint32_t newTag = (vpn << 12) | (g << 11) | (valid << 10) | pid; - uint32_t newData = v & (CR_TLBACC_C | CR_TLBACC_R | CR_TLBACC_W | - CR_TLBACC_X | CR_TLBACC_PFN_MASK); - - if ((entry->tag != newTag) || (entry->data != newData)) { - if (entry->tag & (1 << 10)) { - /* Flush existing entry */ - MMU_LOG(qemu_log("TLB Flush Page (OLD) %08X\n", - entry->tag & TARGET_PAGE_MASK)); - tlb_flush_page(cs, entry->tag & TARGET_PAGE_MASK); - } - entry->tag = newTag; - entry->data = newData; - MMU_LOG(qemu_log("TLB[%d] = %08X %08X\n", - (way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask), - entry->tag, entry->data)); + if ((entry->tag != newTag) || (entry->data != newData)) { + if (entry->tag & (1 << 10)) { + /* Flush existing entry */ + tlb_flush_page(cs, entry->tag & TARGET_PAGE_MASK); } - /* Auto-increment tlbmisc.WAY */ - env->regs[CR_TLBMISC] = - (env->regs[CR_TLBMISC] & ~CR_TLBMISC_WAY_MASK) | - (((way + 1) & (cpu->tlb_num_ways - 1)) << - CR_TLBMISC_WAY_SHIFT); + entry->tag = newTag; + entry->data = newData; } - - /* Writes to TLBACC don't change the read-back value */ - env->mmu.tlbacc_wr = v; - break; - - case CR_TLBMISC: - MMU_LOG(qemu_log("TLBMISC: WAY %X, FLAGS %c%c%c%c%c%c, PID %04X\n", - v >> CR_TLBMISC_WAY_SHIFT, - (v & CR_TLBMISC_RD) ? 'R' : '.', - (v & CR_TLBMISC_WR) ? 'W' : '.', - (v & CR_TLBMISC_DBL) ? '2' : '.', - (v & CR_TLBMISC_BAD) ? 'B' : '.', - (v & CR_TLBMISC_PERM) ? 'P' : '.', - (v & CR_TLBMISC_D) ? 'D' : '.', - (v & CR_TLBMISC_PID_MASK) >> 4)); - - if ((v & CR_TLBMISC_PID_MASK) != - (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK)) { - mmu_flush_pid(env, (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> - CR_TLBMISC_PID_SHIFT); - } - /* if tlbmisc.RD == 1 then trigger a TLB read on writes to TLBMISC */ - if (v & CR_TLBMISC_RD) { - int way = (v >> CR_TLBMISC_WAY_SHIFT); - int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2; - Nios2TLBEntry *entry = - &env->mmu.tlb[(way * cpu->tlb_num_ways) + - (vpn & env->mmu.tlb_entry_mask)]; - - env->regs[CR_TLBACC] &= CR_TLBACC_IGN_MASK; - env->regs[CR_TLBACC] |= entry->data; - env->regs[CR_TLBACC] |= (entry->tag & (1 << 11)) ? CR_TLBACC_G : 0; - env->regs[CR_TLBMISC] = - (v & ~CR_TLBMISC_PID_MASK) | - ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) << - CR_TLBMISC_PID_SHIFT); - env->regs[CR_PTEADDR] &= ~CR_PTEADDR_VPN_MASK; - env->regs[CR_PTEADDR] |= (entry->tag >> 12) << CR_PTEADDR_VPN_SHIFT; - MMU_LOG(qemu_log("TLB READ way %d, vpn %05X, tag %08X, data %08X, " - "tlbacc %08X, tlbmisc %08X, pteaddr %08X\n", - way, vpn, entry->tag, entry->data, - env->regs[CR_TLBACC], env->regs[CR_TLBMISC], - env->regs[CR_PTEADDR])); - } else { - env->regs[CR_TLBMISC] = v; - } - - env->mmu.tlbmisc_wr = v; - break; - - case CR_PTEADDR: - MMU_LOG(qemu_log("PTEADDR: PTBASE %03X, VPN %05X\n", - v >> CR_PTEADDR_PTBASE_SHIFT, - (v & CR_PTEADDR_VPN_MASK) >> CR_PTEADDR_VPN_SHIFT)); - - /* Writes to PTEADDR don't change the read-back VPN value */ - env->regs[CR_PTEADDR] = (v & ~CR_PTEADDR_VPN_MASK) | - (env->regs[CR_PTEADDR] & CR_PTEADDR_VPN_MASK); - env->mmu.pteaddr_wr = v; - break; - - default: - break; + /* Auto-increment tlbmisc.WAY */ + env->ctrl[CR_TLBMISC] = FIELD_DP32(env->ctrl[CR_TLBMISC], + CR_TLBMISC, WAY, + (way + 1) & (cpu->tlb_num_ways - 1)); } + + /* Writes to TLBACC don't change the read-back value */ + env->mmu.tlbacc_wr = v; +} + +void helper_mmu_write_tlbmisc(CPUNios2State *env, uint32_t v) +{ + Nios2CPU *cpu = env_archcpu(env); + uint32_t new_pid = FIELD_EX32(v, CR_TLBMISC, PID); + uint32_t old_pid = FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID); + uint32_t way = FIELD_EX32(v, CR_TLBMISC, WAY); + + trace_nios2_mmu_write_tlbmisc(way, + (v & CR_TLBMISC_RD) ? 'R' : '.', + (v & CR_TLBMISC_WE) ? 'W' : '.', + (v & CR_TLBMISC_DBL) ? '2' : '.', + (v & CR_TLBMISC_BAD) ? 'B' : '.', + (v & CR_TLBMISC_PERM) ? 'P' : '.', + (v & CR_TLBMISC_D) ? 'D' : '.', + new_pid); + + if (new_pid != old_pid) { + mmu_flush_pid(env, old_pid); + } + + /* if tlbmisc.RD == 1 then trigger a TLB read on writes to TLBMISC */ + if (v & CR_TLBMISC_RD) { + int vpn = FIELD_EX32(env->mmu.pteaddr_wr, CR_PTEADDR, VPN); + Nios2TLBEntry *entry = + &env->mmu.tlb[(way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask)]; + + env->ctrl[CR_TLBACC] &= R_CR_TLBACC_IG_MASK; + env->ctrl[CR_TLBACC] |= entry->data; + env->ctrl[CR_TLBACC] |= (entry->tag & (1 << 11)) ? CR_TLBACC_G : 0; + env->ctrl[CR_TLBMISC] = FIELD_DP32(v, CR_TLBMISC, PID, + entry->tag & + ((1 << cpu->pid_num_bits) - 1)); + env->ctrl[CR_PTEADDR] = FIELD_DP32(env->ctrl[CR_PTEADDR], + CR_PTEADDR, VPN, + entry->tag >> TARGET_PAGE_BITS); + } else { + env->ctrl[CR_TLBMISC] = v; + } + + env->mmu.tlbmisc_wr = v; +} + +void helper_mmu_write_pteaddr(CPUNios2State *env, uint32_t v) +{ + trace_nios2_mmu_write_pteaddr(FIELD_EX32(v, CR_PTEADDR, PTBASE), + FIELD_EX32(v, CR_PTEADDR, VPN)); + + /* Writes to PTEADDR don't change the read-back VPN value */ + env->ctrl[CR_PTEADDR] = ((v & ~R_CR_PTEADDR_VPN_MASK) | + (env->ctrl[CR_PTEADDR] & R_CR_PTEADDR_VPN_MASK)); + env->mmu.pteaddr_wr = v; } void mmu_init(CPUNios2State *env) @@ -246,8 +185,6 @@ void mmu_init(CPUNios2State *env) Nios2CPU *cpu = env_archcpu(env); Nios2MMU *mmu = &env->mmu; - MMU_LOG(qemu_log("mmu_init\n")); - mmu->tlb_entry_mask = (cpu->tlb_num_entries / cpu->tlb_num_ways) - 1; mmu->tlb = g_new0(Nios2TLBEntry, cpu->tlb_num_entries); } @@ -270,12 +207,10 @@ void dump_mmu(CPUNios2State *env) entry->tag >> 12, entry->tag & ((1 << cpu->pid_num_bits) - 1), (entry->tag & (1 << 11)) ? 'G' : '-', - entry->data & CR_TLBACC_PFN_MASK, + FIELD_EX32(entry->data, CR_TLBACC, PFN), (entry->data & CR_TLBACC_C) ? 'C' : '-', (entry->data & CR_TLBACC_R) ? 'R' : '-', (entry->data & CR_TLBACC_W) ? 'W' : '-', (entry->data & CR_TLBACC_X) ? 'X' : '-'); } } - -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/nios2/mmu.h b/target/nios2/mmu.h index 4f46fbb82e..5b085900fb 100644 --- a/target/nios2/mmu.h +++ b/target/nios2/mmu.h @@ -21,6 +21,8 @@ #ifndef NIOS2_MMU_H #define NIOS2_MMU_H +#include "cpu.h" + typedef struct Nios2TLBEntry { target_ulong tag; target_ulong data; @@ -44,7 +46,6 @@ void mmu_flip_um(CPUNios2State *env, unsigned int um); unsigned int mmu_translate(CPUNios2State *env, Nios2MMULookup *lu, target_ulong vaddr, int rw, int mmu_idx); -void mmu_read_debug(CPUNios2State *env, uint32_t rn); void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v); void mmu_init(CPUNios2State *env); diff --git a/target/nios2/nios2-semi.c b/target/nios2/nios2-semi.c index e508b2fafc..f76e8588c5 100644 --- a/target/nios2/nios2-semi.c +++ b/target/nios2/nios2-semi.c @@ -22,15 +22,10 @@ */ #include "qemu/osdep.h" - #include "cpu.h" -#if defined(CONFIG_USER_ONLY) -#include "qemu.h" -#else -#include "qemu-common.h" #include "exec/gdbstub.h" -#include "exec/softmmu-semi.h" -#endif +#include "semihosting/syscalls.h" +#include "semihosting/softmmu-uaccess.h" #include "qemu/log.h" #define HOSTED_EXIT 0 @@ -48,105 +43,43 @@ #define HOSTED_ISATTY 12 #define HOSTED_SYSTEM 13 -typedef uint32_t gdb_mode_t; -typedef uint32_t gdb_time_t; - -struct nios2_gdb_stat { - uint32_t gdb_st_dev; /* device */ - uint32_t gdb_st_ino; /* inode */ - gdb_mode_t gdb_st_mode; /* protection */ - uint32_t gdb_st_nlink; /* number of hard links */ - uint32_t gdb_st_uid; /* user ID of owner */ - uint32_t gdb_st_gid; /* group ID of owner */ - uint32_t gdb_st_rdev; /* device type (if inode device) */ - uint64_t gdb_st_size; /* total size, in bytes */ - uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */ - uint64_t gdb_st_blocks; /* number of blocks allocated */ - gdb_time_t gdb_st_atime; /* time of last access */ - gdb_time_t gdb_st_mtime; /* time of last modification */ - gdb_time_t gdb_st_ctime; /* time of last change */ -} QEMU_PACKED; - -struct gdb_timeval { - gdb_time_t tv_sec; /* second */ - uint64_t tv_usec; /* microsecond */ -} QEMU_PACKED; - -#define GDB_O_RDONLY 0x0 -#define GDB_O_WRONLY 0x1 -#define GDB_O_RDWR 0x2 -#define GDB_O_APPEND 0x8 -#define GDB_O_CREAT 0x200 -#define GDB_O_TRUNC 0x400 -#define GDB_O_EXCL 0x800 - -static int translate_openflags(int flags) +static int host_to_gdb_errno(int err) { - int hf; - - if (flags & GDB_O_WRONLY) { - hf = O_WRONLY; - } else if (flags & GDB_O_RDWR) { - hf = O_RDWR; - } else { - hf = O_RDONLY; +#define E(X) case E##X: return GDB_E##X + switch (err) { + E(PERM); + E(NOENT); + E(INTR); + E(BADF); + E(ACCES); + E(FAULT); + E(BUSY); + E(EXIST); + E(NODEV); + E(NOTDIR); + E(ISDIR); + E(INVAL); + E(NFILE); + E(MFILE); + E(FBIG); + E(NOSPC); + E(SPIPE); + E(ROFS); + E(NAMETOOLONG); + default: + return GDB_EUNKNOWN; } - - if (flags & GDB_O_APPEND) { - hf |= O_APPEND; - } - if (flags & GDB_O_CREAT) { - hf |= O_CREAT; - } - if (flags & GDB_O_TRUNC) { - hf |= O_TRUNC; - } - if (flags & GDB_O_EXCL) { - hf |= O_EXCL; - } - - return hf; +#undef E } -static bool translate_stat(CPUNios2State *env, target_ulong addr, - struct stat *s) -{ - struct nios2_gdb_stat *p; - - p = lock_user(VERIFY_WRITE, addr, sizeof(struct nios2_gdb_stat), 0); - - if (!p) { - return false; - } - p->gdb_st_dev = cpu_to_be32(s->st_dev); - p->gdb_st_ino = cpu_to_be32(s->st_ino); - p->gdb_st_mode = cpu_to_be32(s->st_mode); - p->gdb_st_nlink = cpu_to_be32(s->st_nlink); - p->gdb_st_uid = cpu_to_be32(s->st_uid); - p->gdb_st_gid = cpu_to_be32(s->st_gid); - p->gdb_st_rdev = cpu_to_be32(s->st_rdev); - p->gdb_st_size = cpu_to_be64(s->st_size); -#ifdef _WIN32 - /* Windows stat is missing some fields. */ - p->gdb_st_blksize = 0; - p->gdb_st_blocks = 0; -#else - p->gdb_st_blksize = cpu_to_be64(s->st_blksize); - p->gdb_st_blocks = cpu_to_be64(s->st_blocks); -#endif - p->gdb_st_atime = cpu_to_be32(s->st_atime); - p->gdb_st_mtime = cpu_to_be32(s->st_mtime); - p->gdb_st_ctime = cpu_to_be32(s->st_ctime); - unlock_user(p, addr, sizeof(struct nios2_gdb_stat)); - return true; -} - -static void nios2_semi_return_u32(CPUNios2State *env, uint32_t ret, - uint32_t err) +static void nios2_semi_u32_cb(CPUState *cs, uint64_t ret, int err) { + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; target_ulong args = env->regs[R_ARG1]; + if (put_user_u32(ret, args) || - put_user_u32(err, args + 4)) { + put_user_u32(host_to_gdb_errno(err), args + 4)) { /* * The nios2 semihosting ABI does not provide any way to report this * error to the guest, so the best we can do is log it in qemu. @@ -157,35 +90,18 @@ static void nios2_semi_return_u32(CPUNios2State *env, uint32_t ret, } } -static void nios2_semi_return_u64(CPUNios2State *env, uint64_t ret, - uint32_t err) -{ - target_ulong args = env->regs[R_ARG1]; - if (put_user_u32(ret >> 32, args) || - put_user_u32(ret, args + 4) || - put_user_u32(err, args + 8)) { - /* No way to report this via nios2 semihosting ABI; just log it */ - qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: return value " - "discarded because argument block not writable\n"); - } -} - -static int nios2_semi_is_lseek; - -static void nios2_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) +static void nios2_semi_u64_cb(CPUState *cs, uint64_t ret, int err) { Nios2CPU *cpu = NIOS2_CPU(cs); CPUNios2State *env = &cpu->env; + target_ulong args = env->regs[R_ARG1]; - if (nios2_semi_is_lseek) { - /* - * FIXME: We've already lost the high bits of the lseek - * return value. - */ - nios2_semi_return_u64(env, ret, err); - nios2_semi_is_lseek = 0; - } else { - nios2_semi_return_u32(env, ret, err); + if (put_user_u32(ret >> 32, args) || + put_user_u32(ret, args + 4) || + put_user_u32(host_to_gdb_errno(err), args + 8)) { + /* No way to report this via nios2 semihosting ABI; just log it */ + qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: return value " + "discarded because argument block not writable\n"); } } @@ -195,21 +111,22 @@ static void nios2_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) */ #define GET_ARG(n) do { \ if (get_user_ual(arg ## n, args + (n) * 4)) { \ - result = -1; \ - errno = EFAULT; \ goto failed; \ } \ } while (0) +#define GET_ARG64(n) do { \ + if (get_user_ual(arg ## n, args + (n) * 4)) { \ + goto failed64; \ + } \ +} while (0) + void do_nios2_semihosting(CPUNios2State *env) { + CPUState *cs = env_cpu(env); int nr; uint32_t args; target_ulong arg0, arg1, arg2, arg3; - void *p; - void *q; - uint32_t len; - uint32_t result; nr = env->regs[R_ARG0]; args = env->regs[R_ARG1]; @@ -217,238 +134,98 @@ void do_nios2_semihosting(CPUNios2State *env) case HOSTED_EXIT: gdb_exit(env->regs[R_ARG0]); exit(env->regs[R_ARG0]); + case HOSTED_OPEN: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "open,%s,%x,%x", arg0, (int)arg1, - arg2, arg3); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = open(p, translate_openflags(arg2), arg3); - unlock_user(p, arg0, 0); - } - } + semihost_sys_open(cs, nios2_semi_u32_cb, arg0, arg1, arg2, arg3); break; + case HOSTED_CLOSE: - { - /* Ignore attempts to close stdin/out/err. */ - GET_ARG(0); - int fd = arg0; - if (fd > 2) { - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "close,%x", arg0); - return; - } else { - result = close(fd); - } - } else { - result = 0; - } - break; - } + GET_ARG(0); + semihost_sys_close(cs, nios2_semi_u32_cb, arg0); + break; + case HOSTED_READ: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "read,%x,%x,%x", - arg0, arg1, len); - return; - } else { - p = lock_user(VERIFY_WRITE, arg1, len, 0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = read(arg0, p, len); - unlock_user(p, arg1, len); - } - } + semihost_sys_read(cs, nios2_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_WRITE: GET_ARG(0); GET_ARG(1); GET_ARG(2); - len = arg2; - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "write,%x,%x,%x", - arg0, arg1, len); - return; - } else { - p = lock_user(VERIFY_READ, arg1, len, 1); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = write(arg0, p, len); - unlock_user(p, arg0, 0); - } - } + semihost_sys_write(cs, nios2_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_LSEEK: - { - uint64_t off; - GET_ARG(0); - GET_ARG(1); - GET_ARG(2); - GET_ARG(3); - off = (uint32_t)arg2 | ((uint64_t)arg1 << 32); - if (use_gdb_syscalls()) { - nios2_semi_is_lseek = 1; - gdb_do_syscall(nios2_semi_cb, "lseek,%x,%lx,%x", - arg0, off, arg3); - } else { - off = lseek(arg0, off, arg3); - nios2_semi_return_u64(env, off, errno); - } - return; - } + GET_ARG64(0); + GET_ARG64(1); + GET_ARG64(2); + GET_ARG64(3); + semihost_sys_lseek(cs, nios2_semi_u64_cb, arg0, + deposit64(arg2, arg1, 32, 32), arg3); + break; + case HOSTED_RENAME: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "rename,%s,%s", - arg0, (int)arg1, arg2, (int)arg3); - return; - } else { - p = lock_user_string(arg0); - q = lock_user_string(arg2); - if (!p || !q) { - result = -1; - errno = EFAULT; - } else { - result = rename(p, q); - } - unlock_user(p, arg0, 0); - unlock_user(q, arg2, 0); - } + semihost_sys_rename(cs, nios2_semi_u32_cb, arg0, arg1, arg2, arg3); break; + case HOSTED_UNLINK: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "unlink,%s", - arg0, (int)arg1); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = unlink(p); - unlock_user(p, arg0, 0); - } - } + semihost_sys_remove(cs, nios2_semi_u32_cb, arg0, arg1); break; + case HOSTED_STAT: GET_ARG(0); GET_ARG(1); GET_ARG(2); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "stat,%s,%x", - arg0, (int)arg1, arg2); - return; - } else { - struct stat s; - p = lock_user_string(arg0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = stat(p, &s); - unlock_user(p, arg0, 0); - } - if (result == 0 && !translate_stat(env, arg2, &s)) { - result = -1; - errno = EFAULT; - } - } + semihost_sys_stat(cs, nios2_semi_u32_cb, arg0, arg1, arg2); break; + case HOSTED_FSTAT: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "fstat,%x,%x", - arg0, arg1); - return; - } else { - struct stat s; - result = fstat(arg0, &s); - if (result == 0 && !translate_stat(env, arg1, &s)) { - result = -1; - errno = EFAULT; - } - } + semihost_sys_fstat(cs, nios2_semi_u32_cb, arg0, arg1); break; + case HOSTED_GETTIMEOFDAY: - /* Only the tv parameter is used. tz is assumed NULL. */ GET_ARG(0); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "gettimeofday,%x,%x", - arg0, 0); - return; - } else { - qemu_timeval tv; - struct gdb_timeval *p; - result = qemu_gettimeofday(&tv); - if (result != 0) { - p = lock_user(VERIFY_WRITE, arg0, sizeof(struct gdb_timeval), - 0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - p->tv_sec = cpu_to_be32(tv.tv_sec); - p->tv_usec = cpu_to_be64(tv.tv_usec); - unlock_user(p, arg0, sizeof(struct gdb_timeval)); - } - } - } + GET_ARG(1); + semihost_sys_gettimeofday(cs, nios2_semi_u32_cb, arg0, arg1); break; + case HOSTED_ISATTY: GET_ARG(0); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "isatty,%x", arg0); - return; - } else { - result = isatty(arg0); - } + semihost_sys_isatty(cs, nios2_semi_u32_cb, arg0); break; + case HOSTED_SYSTEM: GET_ARG(0); GET_ARG(1); - if (use_gdb_syscalls()) { - gdb_do_syscall(nios2_semi_cb, "system,%s", - arg0, (int)arg1); - return; - } else { - p = lock_user_string(arg0); - if (!p) { - result = -1; - errno = EFAULT; - } else { - result = system(p); - unlock_user(p, arg0, 0); - } - } + semihost_sys_system(cs, nios2_semi_u32_cb, arg0, arg1); break; + default: qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: unsupported " "semihosting syscall %d\n", nr); - result = 0; + nios2_semi_u32_cb(cs, -1, ENOSYS); + break; + + failed: + nios2_semi_u32_cb(cs, -1, EFAULT); + break; + failed64: + nios2_semi_u64_cb(cs, -1, EFAULT); + break; } -failed: - nios2_semi_return_u32(env, result, errno); } diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c index a59003855a..0aaf33ffc2 100644 --- a/target/nios2/op_helper.c +++ b/target/nios2/op_helper.c @@ -21,41 +21,100 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" #include "exec/exec-all.h" #include "qemu/main-loop.h" -#if !defined(CONFIG_USER_ONLY) -void helper_mmu_read_debug(CPUNios2State *env, uint32_t rn) -{ - mmu_read_debug(env, rn); -} - -void helper_mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v) -{ - mmu_write(env, rn, v); -} - -static void nios2_check_interrupts(CPUNios2State *env) -{ - if (env->irq_pending && - (env->regs[CR_STATUS] & CR_STATUS_PIE)) { - env->irq_pending = 0; - cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HARD); - } -} - -void helper_check_interrupts(CPUNios2State *env) -{ - qemu_mutex_lock_iothread(); - nios2_check_interrupts(env); - qemu_mutex_unlock_iothread(); -} -#endif /* !CONFIG_USER_ONLY */ - void helper_raise_exception(CPUNios2State *env, uint32_t index) { CPUState *cs = env_cpu(env); cs->exception_index = index; cpu_loop_exit(cs); } + +void nios2_cpu_loop_exit_advance(CPUNios2State *env, uintptr_t retaddr) +{ + CPUState *cs = env_cpu(env); + + /* + * Note that PC is advanced for all hardware exceptions. + * Do this here, rather than in restore_state_to_opc(), + * lest we affect QEMU internal exceptions, like EXCP_DEBUG. + */ + cpu_restore_state(cs, retaddr); + env->pc += 4; + cpu_loop_exit(cs); +} + +static void maybe_raise_div(CPUNios2State *env, uintptr_t ra) +{ + Nios2CPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + if (cpu->diverr_present) { + cs->exception_index = EXCP_DIV; + nios2_cpu_loop_exit_advance(env, ra); + } +} + +int32_t helper_divs(CPUNios2State *env, int32_t num, int32_t den) +{ + if (unlikely(den == 0) || unlikely(den == -1 && num == INT32_MIN)) { + maybe_raise_div(env, GETPC()); + return num; /* undefined */ + } + return num / den; +} + +uint32_t helper_divu(CPUNios2State *env, uint32_t num, uint32_t den) +{ + if (unlikely(den == 0)) { + maybe_raise_div(env, GETPC()); + return num; /* undefined */ + } + return num / den; +} + +#ifndef CONFIG_USER_ONLY +void helper_eret(CPUNios2State *env, uint32_t new_status, uint32_t new_pc) +{ + Nios2CPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + if (unlikely(new_pc & 3)) { + env->ctrl[CR_BADADDR] = new_pc; + cs->exception_index = EXCP_UNALIGND; + nios2_cpu_loop_exit_advance(env, GETPC()); + } + + /* + * None of estatus, bstatus, or sstatus have constraints on write; + * do not allow reserved fields in status to be set. + * When shadow registers are enabled, eret *does* restore CRS. + * Rather than testing eic_present to decide, mask CRS out of + * the set of readonly fields. + */ + new_status &= cpu->cr_state[CR_STATUS].writable | + (cpu->cr_state[CR_STATUS].readonly & R_CR_STATUS_CRS_MASK); + + env->ctrl[CR_STATUS] = new_status; + env->pc = new_pc; + nios2_update_crs(env); + cpu_loop_exit(cs); +} + +/* + * RDPRS and WRPRS are implemented out of line so that if PRS == CRS, + * all of the tcg global temporaries are synced back to ENV. + */ +uint32_t helper_rdprs(CPUNios2State *env, uint32_t regno) +{ + unsigned prs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, PRS); + return env->shadow_regs[prs][regno]; +} + +void helper_wrprs(CPUNios2State *env, uint32_t regno, uint32_t val) +{ + unsigned prs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, PRS); + env->shadow_regs[prs][regno] = val; +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/nios2/trace-events b/target/nios2/trace-events new file mode 100644 index 0000000000..07f1f0a5e7 --- /dev/null +++ b/target/nios2/trace-events @@ -0,0 +1,10 @@ +# mmu.c +nios2_mmu_translate_miss(uint32_t vaddr, uint32_t pid, uint32_t index, uint32_t tag) "mmu_translate: MISS vaddr=0x%08x pid=%u TLB[%u] tag=0x%08x" +nios2_mmu_translate_hit(uint32_t vaddr, uint32_t pid, uint32_t index, uint32_t paddr, uint32_t prot) "mmu_translate: HIT vaddr=0x%08x pid=%u TLB[%u] paddr=0x%08x prot=0x%x" + +nios2_mmu_flush_pid_miss(uint32_t pid, uint32_t index, uint32_t vaddr) "mmu_flush: MISS pid=%u TLB[%u] tag=0x%08x" +nios2_mmu_flush_pid_hit(uint32_t pid, uint32_t index, uint32_t vaddr) "mmu_flush: HIT pid=%u TLB[%u] vaddr=0x%08x" + +nios2_mmu_write_tlbacc(uint32_t ig, char c, char r, char w, char x, char g, uint32_t pfn) "mmu_write_tlbacc: ig=0x%02x flags=%c%c%c%c%c pfn=0x%08x" +nios2_mmu_write_tlbmisc(uint32_t way, char r, char w, char t, char b, char p, char d, uint32_t pid) "mmu_write_tlbmisc: way=0x%x flags=%c%c%c%c%c%c pid=%u" +nios2_mmu_write_pteaddr(uint32_t ptb, uint32_t vpn) "mmu_write_pteaddr: ptbase=0x%03x vpn=0x%05x" diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 08d7ac5398..4db8b47744 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -33,9 +33,9 @@ #include "exec/translator.h" #include "qemu/qemu-print.h" #include "exec/gen-icount.h" +#include "semihosting/semihost.h" /* is_jmp field values */ -#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ #define INSTRUCTION_FLG(func, flags) { (func), (flags) } @@ -52,32 +52,53 @@ #define INSN_R_TYPE 0x3A /* I-Type instruction parsing */ +typedef struct { + uint8_t op; + union { + uint16_t u; + int16_t s; + } imm16; + uint8_t b; + uint8_t a; +} InstrIType; + #define I_TYPE(instr, code) \ - struct { \ - uint8_t op; \ - union { \ - uint16_t u; \ - int16_t s; \ - } imm16; \ - uint8_t b; \ - uint8_t a; \ - } (instr) = { \ + InstrIType (instr) = { \ .op = extract32((code), 0, 6), \ .imm16.u = extract32((code), 6, 16), \ .b = extract32((code), 22, 5), \ .a = extract32((code), 27, 5), \ } +typedef target_ulong ImmFromIType(const InstrIType *); + +static target_ulong imm_unsigned(const InstrIType *i) +{ + return i->imm16.u; +} + +static target_ulong imm_signed(const InstrIType *i) +{ + return i->imm16.s; +} + +static target_ulong imm_shifted(const InstrIType *i) +{ + return i->imm16.u << 16; +} + /* R-Type instruction parsing */ +typedef struct { + uint8_t op; + uint8_t imm5; + uint8_t opx; + uint8_t c; + uint8_t b; + uint8_t a; +} InstrRType; + #define R_TYPE(instr, code) \ - struct { \ - uint8_t op; \ - uint8_t imm5; \ - uint8_t opx; \ - uint8_t c; \ - uint8_t b; \ - uint8_t a; \ - } (instr) = { \ + InstrRType (instr) = { \ .op = extract32((code), 0, 6), \ .imm5 = extract32((code), 6, 5), \ .opx = extract32((code), 11, 6), \ @@ -87,23 +108,36 @@ } /* J-Type instruction parsing */ +typedef struct { + uint8_t op; + uint32_t imm26; +} InstrJType; + #define J_TYPE(instr, code) \ - struct { \ - uint8_t op; \ - uint32_t imm26; \ - } (instr) = { \ + InstrJType (instr) = { \ .op = extract32((code), 0, 6), \ .imm26 = extract32((code), 6, 26), \ } +typedef void GenFn2i(TCGv, TCGv, target_long); +typedef void GenFn3(TCGv, TCGv, TCGv); +typedef void GenFn4(TCGv, TCGv, TCGv, TCGv); + typedef struct DisasContext { DisasContextBase base; - TCGv_i32 zero; target_ulong pc; int mem_idx; + uint32_t tb_flags; + TCGv sink; + const ControlRegState *cr_state; + bool eic_present; } DisasContext; -static TCGv cpu_R[NUM_CORE_REGS]; +static TCGv cpu_R[NUM_GP_REGS]; +static TCGv cpu_pc; +#ifndef CONFIG_USER_ONLY +static TCGv cpu_crs_R[NUM_GP_REGS]; +#endif typedef struct Nios2Instruction { void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags); @@ -122,31 +156,57 @@ static uint8_t get_opxcode(uint32_t code) return instr.opx; } -static TCGv load_zero(DisasContext *dc) +static TCGv load_gpr(DisasContext *dc, unsigned reg) { - if (!dc->zero) { - dc->zero = tcg_const_i32(0); - } - return dc->zero; -} + assert(reg < NUM_GP_REGS); -static TCGv load_gpr(DisasContext *dc, uint8_t reg) -{ - if (likely(reg != R_ZERO)) { + /* + * With shadow register sets, register r0 does not necessarily contain 0, + * but it is overwhelmingly likely that it does -- software is supposed + * to have set r0 to 0 in every shadow register set before use. + */ + if (unlikely(reg == R_ZERO) && FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0)) { + return tcg_constant_tl(0); + } + if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) { return cpu_R[reg]; - } else { - return load_zero(dc); } +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + return cpu_crs_R[reg]; +#endif } -static void t_gen_helper_raise_exception(DisasContext *dc, - uint32_t index) +static TCGv dest_gpr(DisasContext *dc, unsigned reg) { - TCGv_i32 tmp = tcg_const_i32(index); + assert(reg < NUM_GP_REGS); - tcg_gen_movi_tl(cpu_R[R_PC], dc->pc); - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); + /* + * The spec for shadow register sets isn't clear, but we assume that + * writes to r0 are discarded regardless of CRS. + */ + if (unlikely(reg == R_ZERO)) { + if (dc->sink == NULL) { + dc->sink = tcg_temp_new(); + } + return dc->sink; + } + if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) { + return cpu_R[reg]; + } +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + return cpu_crs_R[reg]; +#endif +} + +static void t_gen_helper_raise_exception(DisasContext *dc, uint32_t index) +{ + /* Note that PC is advanced for all hardware exceptions. */ + tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); dc->base.is_jmp = DISAS_NORETURN; } @@ -156,12 +216,36 @@ static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest) if (translator_use_goto_tb(&dc->base, dest)) { tcg_gen_goto_tb(n); - tcg_gen_movi_tl(cpu_R[R_PC], dest); + tcg_gen_movi_tl(cpu_pc, dest); tcg_gen_exit_tb(tb, n); } else { - tcg_gen_movi_tl(cpu_R[R_PC], dest); - tcg_gen_exit_tb(NULL, 0); + tcg_gen_movi_tl(cpu_pc, dest); + tcg_gen_lookup_and_goto_ptr(); } + dc->base.is_jmp = DISAS_NORETURN; +} + +static void gen_jumpr(DisasContext *dc, int regno, bool is_call) +{ + TCGLabel *l = gen_new_label(); + TCGv test = tcg_temp_new(); + TCGv dest = load_gpr(dc, regno); + + tcg_gen_andi_tl(test, dest, 3); + tcg_gen_brcondi_tl(TCG_COND_NE, test, 0, l); + tcg_temp_free(test); + + tcg_gen_mov_tl(cpu_pc, dest); + if (is_call) { + tcg_gen_movi_tl(dest_gpr(dc, R_RA), dc->base.pc_next); + } + tcg_gen_lookup_and_goto_ptr(); + + gen_set_label(l); + tcg_gen_st_tl(dest, cpu_env, offsetof(CPUNios2State, ctrl[CR_BADADDR])); + t_gen_helper_raise_exception(dc, EXCP_UNALIGND); + + dc->base.is_jmp = DISAS_NORETURN; } static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags) @@ -169,12 +253,14 @@ static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags) t_gen_helper_raise_exception(dc, flags); } -static void gen_check_supervisor(DisasContext *dc) +static bool gen_check_supervisor(DisasContext *dc) { - if (dc->base.tb->flags & CR_STATUS_U) { + if (FIELD_EX32(dc->tb_flags, TBFLAGS, U)) { /* CPU in user mode, privileged instruction called, stop. */ t_gen_helper_raise_exception(dc, EXCP_SUPERI); + return false; } + return true; } /* @@ -193,12 +279,11 @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags) { J_TYPE(instr, code); gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2)); - dc->base.is_jmp = DISAS_NORETURN; } static void call(DisasContext *dc, uint32_t code, uint32_t flags) { - tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next); + tcg_gen_movi_tl(dest_gpr(dc, R_RA), dc->base.pc_next); jmpi(dc, code, flags); } @@ -211,27 +296,10 @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags) I_TYPE(instr, code); TCGv addr = tcg_temp_new(); - TCGv data; - - /* - * WARNING: Loads into R_ZERO are ignored, but we must generate the - * memory access itself to emulate the CPU precisely. Load - * from a protected page to R_ZERO will cause SIGSEGV on - * the Nios2 CPU. - */ - if (likely(instr.b != R_ZERO)) { - data = cpu_R[instr.b]; - } else { - data = tcg_temp_new(); - } + TCGv data = dest_gpr(dc, instr.b); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags); - - if (unlikely(instr.b == R_ZERO)) { - tcg_temp_free(data); - } - tcg_temp_free(addr); } @@ -253,7 +321,6 @@ static void br(DisasContext *dc, uint32_t code, uint32_t flags) I_TYPE(instr, code); gen_goto_tb(dc, 0, dc->base.pc_next + (instr.imm16.s & -4)); - dc->base.is_jmp = DISAS_NORETURN; } static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags) @@ -261,48 +328,86 @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags) I_TYPE(instr, code); TCGLabel *l1 = gen_new_label(); - tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1); + tcg_gen_brcond_tl(flags, load_gpr(dc, instr.a), load_gpr(dc, instr.b), l1); gen_goto_tb(dc, 0, dc->base.pc_next); gen_set_label(l1); gen_goto_tb(dc, 1, dc->base.pc_next + (instr.imm16.s & -4)); - dc->base.is_jmp = DISAS_NORETURN; } /* Comparison instructions */ -#define gen_i_cmpxx(fname, op3) \ -static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ -{ \ - I_TYPE(instr, (code)); \ - tcg_gen_setcondi_tl(flags, cpu_R[instr.b], cpu_R[instr.a], (op3)); \ +static void do_i_cmpxx(DisasContext *dc, uint32_t insn, + TCGCond cond, ImmFromIType *imm) +{ + I_TYPE(instr, insn); + tcg_gen_setcondi_tl(cond, dest_gpr(dc, instr.b), + load_gpr(dc, instr.a), imm(&instr)); } -gen_i_cmpxx(gen_cmpxxsi, instr.imm16.s) -gen_i_cmpxx(gen_cmpxxui, instr.imm16.u) +#define gen_i_cmpxx(fname, imm) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_i_cmpxx(dc, code, flags, imm); } + +gen_i_cmpxx(gen_cmpxxsi, imm_signed) +gen_i_cmpxx(gen_cmpxxui, imm_unsigned) /* Math/logic instructions */ -#define gen_i_math_logic(fname, insn, resimm, op3) \ -static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ -{ \ - I_TYPE(instr, (code)); \ - if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \ - return; \ - } else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \ - tcg_gen_movi_tl(cpu_R[instr.b], (resimm) ? (op3) : 0); \ - } else { \ - tcg_gen_##insn##_tl(cpu_R[instr.b], cpu_R[instr.a], (op3)); \ - } \ +static void do_i_math_logic(DisasContext *dc, uint32_t insn, + GenFn2i *fn, ImmFromIType *imm, + bool x_op_0_eq_x) +{ + I_TYPE(instr, insn); + target_ulong val; + + if (unlikely(instr.b == R_ZERO)) { + /* Store to R_ZERO is ignored -- this catches the canonical NOP. */ + return; + } + + val = imm(&instr); + + if (instr.a == R_ZERO && FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0)) { + /* This catches the canonical expansions of movi and movhi. */ + tcg_gen_movi_tl(dest_gpr(dc, instr.b), x_op_0_eq_x ? val : 0); + } else { + fn(dest_gpr(dc, instr.b), load_gpr(dc, instr.a), val); + } } -gen_i_math_logic(addi, addi, 1, instr.imm16.s) -gen_i_math_logic(muli, muli, 0, instr.imm16.s) +#define gen_i_math_logic(fname, insn, x_op_0, imm) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_i_math_logic(dc, code, tcg_gen_##insn##_tl, imm, x_op_0); } -gen_i_math_logic(andi, andi, 0, instr.imm16.u) -gen_i_math_logic(ori, ori, 1, instr.imm16.u) -gen_i_math_logic(xori, xori, 1, instr.imm16.u) +gen_i_math_logic(addi, addi, 1, imm_signed) +gen_i_math_logic(muli, muli, 0, imm_signed) -gen_i_math_logic(andhi, andi, 0, instr.imm16.u << 16) -gen_i_math_logic(orhi , ori, 1, instr.imm16.u << 16) -gen_i_math_logic(xorhi, xori, 1, instr.imm16.u << 16) +gen_i_math_logic(andi, andi, 0, imm_unsigned) +gen_i_math_logic(ori, ori, 1, imm_unsigned) +gen_i_math_logic(xori, xori, 1, imm_unsigned) + +gen_i_math_logic(andhi, andi, 0, imm_shifted) +gen_i_math_logic(orhi , ori, 1, imm_shifted) +gen_i_math_logic(xorhi, xori, 1, imm_shifted) + +/* rB <- prs.rA + sigma(IMM16) */ +static void rdprs(DisasContext *dc, uint32_t code, uint32_t flags) +{ + if (!dc->eic_present) { + t_gen_helper_raise_exception(dc, EXCP_ILLEGAL); + return; + } + if (!gen_check_supervisor(dc)) { + return; + } + +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + I_TYPE(instr, code); + TCGv dest = dest_gpr(dc, instr.b); + gen_helper_rdprs(dest, cpu_env, tcg_constant_i32(instr.a)); + tcg_gen_addi_tl(dest, dest, instr.imm16.s); +#endif +} /* Prototype only, defined below */ static void handle_r_type_instr(DisasContext *dc, uint32_t code, @@ -365,7 +470,7 @@ static const Nios2Instruction i_type_instructions[] = { INSTRUCTION_FLG(gen_stx, MO_SL), /* stwio */ INSTRUCTION_FLG(gen_bxx, TCG_COND_LTU), /* bltu */ INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldwio */ - INSTRUCTION_UNIMPLEMENTED(), /* rdprs */ + INSTRUCTION(rdprs), /* rdprs */ INSTRUCTION_ILLEGAL(), INSTRUCTION_FLG(handle_r_type_instr, 0), /* R-Type */ INSTRUCTION_NOP(), /* flushd */ @@ -384,26 +489,51 @@ static const Nios2Instruction i_type_instructions[] = { */ static void eret(DisasContext *dc, uint32_t code, uint32_t flags) { - tcg_gen_mov_tl(cpu_R[CR_STATUS], cpu_R[CR_ESTATUS]); - tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_EA]); + if (!gen_check_supervisor(dc)) { + return; + } - dc->base.is_jmp = DISAS_JUMP; +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) { + TCGv tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS])); + gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_EA)); + tcg_temp_free(tmp); + } else { + gen_helper_eret(cpu_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA)); + } + dc->base.is_jmp = DISAS_NORETURN; +#endif } /* PC <- ra */ static void ret(DisasContext *dc, uint32_t code, uint32_t flags) { - tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_RA]); - - dc->base.is_jmp = DISAS_JUMP; + gen_jumpr(dc, R_RA, false); } -/* PC <- ba */ +/* + * status <- bstatus + * PC <- ba + */ static void bret(DisasContext *dc, uint32_t code, uint32_t flags) { - tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_BA]); + if (!gen_check_supervisor(dc)) { + return; + } - dc->base.is_jmp = DISAS_JUMP; +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + TCGv tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS])); + gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_BA)); + tcg_temp_free(tmp); + + dc->base.is_jmp = DISAS_NORETURN; +#endif } /* PC <- rA */ @@ -411,9 +541,7 @@ static void jmp(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, code); - tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a)); - - dc->base.is_jmp = DISAS_JUMP; + gen_jumpr(dc, instr.a, false); } /* rC <- PC + 4 */ @@ -421,9 +549,7 @@ static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, code); - if (likely(instr.c != R_ZERO)) { - tcg_gen_movi_tl(cpu_R[instr.c], dc->base.pc_next); - } + tcg_gen_movi_tl(dest_gpr(dc, instr.c), dc->base.pc_next); } /* @@ -434,77 +560,144 @@ static void callr(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, code); - tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a)); - tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next); - - dc->base.is_jmp = DISAS_JUMP; + gen_jumpr(dc, instr.a, true); } /* rC <- ctlN */ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags) { + if (!gen_check_supervisor(dc)) { + return; + } + +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else R_TYPE(instr, code); + TCGv t1, t2, dest = dest_gpr(dc, instr.c); - gen_check_supervisor(dc); - - switch (instr.imm5 + CR_BASE) { - case CR_PTEADDR: - case CR_TLBACC: - case CR_TLBMISC: - { -#if !defined(CONFIG_USER_ONLY) - if (likely(instr.c != R_ZERO)) { - tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]); -#ifdef DEBUG_MMU - TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE); - gen_helper_mmu_read_debug(cpu_R[instr.c], cpu_env, tmp); - tcg_temp_free_i32(tmp); -#endif - } -#endif - break; + /* Reserved registers read as zero. */ + if (nios2_cr_reserved(&dc->cr_state[instr.imm5])) { + tcg_gen_movi_tl(dest, 0); + return; } + switch (instr.imm5) { + case CR_IPENDING: + /* + * The value of the ipending register is synthetic. + * In hw, this is the AND of a set of hardware irq lines + * with the ienable register. In qemu, we re-use the space + * of CR_IPENDING to store the set of irq lines, and so we + * must perform the AND here, and anywhere else we need the + * guest value of ipending. + */ + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); + tcg_gen_ld_tl(t1, cpu_env, offsetof(CPUNios2State, ctrl[CR_IPENDING])); + tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUNios2State, ctrl[CR_IENABLE])); + tcg_gen_and_tl(dest, t1, t2); + tcg_temp_free(t1); + tcg_temp_free(t2); + break; default: - if (likely(instr.c != R_ZERO)) { - tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]); - } + tcg_gen_ld_tl(dest, cpu_env, + offsetof(CPUNios2State, ctrl[instr.imm5])); break; } +#endif } /* ctlN <- rA */ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags) { + if (!gen_check_supervisor(dc)) { + return; + } + +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else R_TYPE(instr, code); + TCGv v = load_gpr(dc, instr.a); + uint32_t ofs = offsetof(CPUNios2State, ctrl[instr.imm5]); + uint32_t wr = dc->cr_state[instr.imm5].writable; + uint32_t ro = dc->cr_state[instr.imm5].readonly; - gen_check_supervisor(dc); + /* Skip reserved or readonly registers. */ + if (wr == 0) { + return; + } - switch (instr.imm5 + CR_BASE) { + switch (instr.imm5) { case CR_PTEADDR: + gen_helper_mmu_write_pteaddr(cpu_env, v); + break; case CR_TLBACC: + gen_helper_mmu_write_tlbacc(cpu_env, v); + break; case CR_TLBMISC: - { -#if !defined(CONFIG_USER_ONLY) - TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE); - gen_helper_mmu_write(cpu_env, tmp, load_gpr(dc, instr.a)); - tcg_temp_free_i32(tmp); -#endif + gen_helper_mmu_write_tlbmisc(cpu_env, v); break; - } - + case CR_STATUS: + case CR_IENABLE: + /* If interrupts were enabled using WRCTL, trigger them. */ + dc->base.is_jmp = DISAS_UPDATE; + /* fall through */ default: - tcg_gen_mov_tl(cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a)); + if (wr == -1) { + /* The register is entirely writable. */ + tcg_gen_st_tl(v, cpu_env, ofs); + } else { + /* + * The register is partially read-only or reserved: + * merge the value. + */ + TCGv n = tcg_temp_new(); + + tcg_gen_andi_tl(n, v, wr); + + if (ro != 0) { + TCGv o = tcg_temp_new(); + tcg_gen_ld_tl(o, cpu_env, ofs); + tcg_gen_andi_tl(o, o, ro); + tcg_gen_or_tl(n, n, o); + tcg_temp_free(o); + } + + tcg_gen_st_tl(n, cpu_env, ofs); + tcg_temp_free(n); + } break; } +#endif +} - /* If interrupts were enabled using WRCTL, trigger them. */ -#if !defined(CONFIG_USER_ONLY) - if ((instr.imm5 + CR_BASE) == CR_STATUS) { - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_check_interrupts(cpu_env); +/* prs.rC <- rA */ +static void wrprs(DisasContext *dc, uint32_t code, uint32_t flags) +{ + if (!dc->eic_present) { + t_gen_helper_raise_exception(dc, EXCP_ILLEGAL); + return; + } + if (!gen_check_supervisor(dc)) { + return; + } + +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + R_TYPE(instr, code); + gen_helper_wrprs(cpu_env, tcg_constant_i32(instr.c), + load_gpr(dc, instr.a)); + /* + * The expected write to PRS[r0] is 0, from CRS[r0]. + * If not, and CRS == PRS (which we cannot tell from here), + * we may now have a non-zero value in our current r0. + * By ending the TB, we re-evaluate tb_flags and find out. + */ + if (instr.c == 0 + && (instr.a != 0 || !FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0))) { dc->base.is_jmp = DISAS_UPDATE; } #endif @@ -514,126 +707,125 @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags) static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, code); - if (likely(instr.c != R_ZERO)) { - tcg_gen_setcond_tl(flags, cpu_R[instr.c], cpu_R[instr.a], - cpu_R[instr.b]); - } + tcg_gen_setcond_tl(flags, dest_gpr(dc, instr.c), + load_gpr(dc, instr.a), load_gpr(dc, instr.b)); } /* Math/logic instructions */ -#define gen_r_math_logic(fname, insn, op3) \ -static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ -{ \ - R_TYPE(instr, (code)); \ - if (likely(instr.c != R_ZERO)) { \ - tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), (op3)); \ - } \ +static void do_ri_math_logic(DisasContext *dc, uint32_t insn, GenFn2i *fn) +{ + R_TYPE(instr, insn); + fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), instr.imm5); } -gen_r_math_logic(add, add_tl, load_gpr(dc, instr.b)) -gen_r_math_logic(sub, sub_tl, load_gpr(dc, instr.b)) -gen_r_math_logic(mul, mul_tl, load_gpr(dc, instr.b)) - -gen_r_math_logic(and, and_tl, load_gpr(dc, instr.b)) -gen_r_math_logic(or, or_tl, load_gpr(dc, instr.b)) -gen_r_math_logic(xor, xor_tl, load_gpr(dc, instr.b)) -gen_r_math_logic(nor, nor_tl, load_gpr(dc, instr.b)) - -gen_r_math_logic(srai, sari_tl, instr.imm5) -gen_r_math_logic(srli, shri_tl, instr.imm5) -gen_r_math_logic(slli, shli_tl, instr.imm5) -gen_r_math_logic(roli, rotli_tl, instr.imm5) - -#define gen_r_mul(fname, insn) \ -static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ -{ \ - R_TYPE(instr, (code)); \ - if (likely(instr.c != R_ZERO)) { \ - TCGv t0 = tcg_temp_new(); \ - tcg_gen_##insn(t0, cpu_R[instr.c], \ - load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \ - tcg_temp_free(t0); \ - } \ +static void do_rr_math_logic(DisasContext *dc, uint32_t insn, GenFn3 *fn) +{ + R_TYPE(instr, insn); + fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), load_gpr(dc, instr.b)); } -gen_r_mul(mulxss, muls2_tl) -gen_r_mul(mulxuu, mulu2_tl) -gen_r_mul(mulxsu, mulsu2_tl) +#define gen_ri_math_logic(fname, insn) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_ri_math_logic(dc, code, tcg_gen_##insn##_tl); } -#define gen_r_shift_s(fname, insn) \ -static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ -{ \ - R_TYPE(instr, (code)); \ - if (likely(instr.c != R_ZERO)) { \ - TCGv t0 = tcg_temp_new(); \ - tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \ - tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), t0); \ - tcg_temp_free(t0); \ - } \ +#define gen_rr_math_logic(fname, insn) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_rr_math_logic(dc, code, tcg_gen_##insn##_tl); } + +gen_rr_math_logic(add, add) +gen_rr_math_logic(sub, sub) +gen_rr_math_logic(mul, mul) + +gen_rr_math_logic(and, and) +gen_rr_math_logic(or, or) +gen_rr_math_logic(xor, xor) +gen_rr_math_logic(nor, nor) + +gen_ri_math_logic(srai, sari) +gen_ri_math_logic(srli, shri) +gen_ri_math_logic(slli, shli) +gen_ri_math_logic(roli, rotli) + +static void do_rr_mul_high(DisasContext *dc, uint32_t insn, GenFn4 *fn) +{ + R_TYPE(instr, insn); + TCGv discard = tcg_temp_new(); + + fn(discard, dest_gpr(dc, instr.c), + load_gpr(dc, instr.a), load_gpr(dc, instr.b)); + tcg_temp_free(discard); } -gen_r_shift_s(sra, sar_tl) -gen_r_shift_s(srl, shr_tl) -gen_r_shift_s(sll, shl_tl) -gen_r_shift_s(rol, rotl_tl) -gen_r_shift_s(ror, rotr_tl) +#define gen_rr_mul_high(fname, insn) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_rr_mul_high(dc, code, tcg_gen_##insn##_tl); } + +gen_rr_mul_high(mulxss, muls2) +gen_rr_mul_high(mulxuu, mulu2) +gen_rr_mul_high(mulxsu, mulsu2) + +static void do_rr_shift(DisasContext *dc, uint32_t insn, GenFn3 *fn) +{ + R_TYPE(instr, insn); + TCGv sh = tcg_temp_new(); + + tcg_gen_andi_tl(sh, load_gpr(dc, instr.b), 31); + fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), sh); + tcg_temp_free(sh); +} + +#define gen_rr_shift(fname, insn) \ + static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ + { do_rr_shift(dc, code, tcg_gen_##insn##_tl); } + +gen_rr_shift(sra, sar) +gen_rr_shift(srl, shr) +gen_rr_shift(sll, shl) +gen_rr_shift(rol, rotl) +gen_rr_shift(ror, rotr) static void divs(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, (code)); - - /* Stores into R_ZERO are ignored */ - if (unlikely(instr.c == R_ZERO)) { - return; - } - - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - TCGv t3 = tcg_temp_new(); - - tcg_gen_ext32s_tl(t0, load_gpr(dc, instr.a)); - tcg_gen_ext32s_tl(t1, load_gpr(dc, instr.b)); - tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN); - tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1); - tcg_gen_and_tl(t2, t2, t3); - tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_div_tl(cpu_R[instr.c], t0, t1); - tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]); - - tcg_temp_free(t3); - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); + gen_helper_divs(dest_gpr(dc, instr.c), cpu_env, + load_gpr(dc, instr.a), load_gpr(dc, instr.b)); } static void divu(DisasContext *dc, uint32_t code, uint32_t flags) { R_TYPE(instr, (code)); + gen_helper_divu(dest_gpr(dc, instr.c), cpu_env, + load_gpr(dc, instr.a), load_gpr(dc, instr.b)); +} - /* Stores into R_ZERO are ignored */ - if (unlikely(instr.c == R_ZERO)) { +static void trap(DisasContext *dc, uint32_t code, uint32_t flags) +{ +#ifdef CONFIG_USER_ONLY + /* + * The imm5 field is not stored anywhere on real hw; the kernel + * has to load the insn and extract the field. But we can make + * things easier for cpu_loop if we pop this into env->error_code. + */ + R_TYPE(instr, code); + tcg_gen_st_i32(tcg_constant_i32(instr.imm5), cpu_env, + offsetof(CPUNios2State, error_code)); +#endif + t_gen_helper_raise_exception(dc, EXCP_TRAP); +} + +static void gen_break(DisasContext *dc, uint32_t code, uint32_t flags) +{ +#ifndef CONFIG_USER_ONLY + /* The semihosting instruction is "break 1". */ + bool is_user = FIELD_EX32(dc->tb_flags, TBFLAGS, U); + R_TYPE(instr, code); + if (semihosting_enabled(is_user) && instr.imm5 == 1) { + t_gen_helper_raise_exception(dc, EXCP_SEMIHOST); return; } +#endif - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); - - tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a)); - tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b)); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_divu_tl(cpu_R[instr.c], t0, t1); - tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]); - - tcg_temp_free(t3); - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); + t_gen_helper_raise_exception(dc, EXCP_BREAK); } static const Nios2Instruction r_type_instructions[] = { @@ -657,7 +849,7 @@ static const Nios2Instruction r_type_instructions[] = { INSTRUCTION_ILLEGAL(), INSTRUCTION(slli), /* slli */ INSTRUCTION(sll), /* sll */ - INSTRUCTION_UNIMPLEMENTED(), /* wrprs */ + INSTRUCTION(wrprs), /* wrprs */ INSTRUCTION_ILLEGAL(), INSTRUCTION(or), /* or */ INSTRUCTION(mulxsu), /* mulxsu */ @@ -682,14 +874,14 @@ static const Nios2Instruction r_type_instructions[] = { INSTRUCTION_ILLEGAL(), INSTRUCTION_ILLEGAL(), INSTRUCTION_ILLEGAL(), - INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */ + INSTRUCTION(trap), /* trap */ INSTRUCTION(wrctl), /* wrctl */ INSTRUCTION_ILLEGAL(), INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */ INSTRUCTION(add), /* add */ INSTRUCTION_ILLEGAL(), INSTRUCTION_ILLEGAL(), - INSTRUCTION_FLG(gen_excp, EXCP_BREAK), /* break */ + INSTRUCTION(gen_break), /* break */ INSTRUCTION_ILLEGAL(), INSTRUCTION(nop), /* nop */ INSTRUCTION_ILLEGAL(), @@ -722,7 +914,7 @@ illegal_op: t_gen_helper_raise_exception(dc, EXCP_ILLEGAL); } -static const char * const regnames[] = { +static const char * const gr_regnames[NUM_GP_REGS] = { "zero", "at", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", @@ -731,16 +923,20 @@ static const char * const regnames[] = { "r20", "r21", "r22", "r23", "et", "bt", "gp", "sp", "fp", "ea", "ba", "ra", +}; + +#ifndef CONFIG_USER_ONLY +static const char * const cr_regnames[NUM_CR_REGS] = { "status", "estatus", "bstatus", "ienable", - "ipending", "cpuid", "reserved0", "exception", + "ipending", "cpuid", "res6", "exception", "pteaddr", "tlbacc", "tlbmisc", "reserved1", "badaddr", "config", "mpubase", "mpuacc", - "reserved2", "reserved3", "reserved4", "reserved5", - "reserved6", "reserved7", "reserved8", "reserved9", - "reserved10", "reserved11", "reserved12", "reserved13", - "reserved14", "reserved15", "reserved16", "reserved17", - "rpc" + "res16", "res17", "res18", "res19", + "res20", "res21", "res22", "res23", + "res24", "res25", "res26", "res27", + "res28", "res29", "res30", "res31", }; +#endif #include "exec/gen-icount.h" @@ -749,9 +945,13 @@ static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUNios2State *env = cs->env_ptr; + Nios2CPU *cpu = env_archcpu(env); int page_insns; dc->mem_idx = cpu_mmu_index(env, false); + dc->cr_state = cpu->cr_state; + dc->tb_flags = dc->base.tb->flags; + dc->eic_present = cpu->eic_present; /* Bound the number of insns to execute to those left on the page. */ page_insns = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; @@ -780,15 +980,6 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) dc->base.pc_next = pc + 4; /* Decode an instruction */ - -#if defined(CONFIG_USER_ONLY) - /* FIXME: Is this needed ? */ - if (pc >= 0x1000 && pc < 0x2000) { - t_gen_helper_raise_exception(dc, 0xaa); - return; - } -#endif - code = cpu_ldl_code(env, pc); op = get_opcode(code); @@ -797,13 +988,13 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) return; } - dc->zero = NULL; + dc->sink = NULL; instr = &i_type_instructions[op]; instr->handler(dc, code, instr->flags); - if (dc->zero) { - tcg_temp_free(dc->zero); + if (dc->sink) { + tcg_temp_free(dc->sink); } } @@ -814,14 +1005,12 @@ static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* Indicate where the next block should start */ switch (dc->base.is_jmp) { case DISAS_TOO_MANY: - case DISAS_UPDATE: - /* Save the current PC back into the CPU register */ - tcg_gen_movi_tl(cpu_R[R_PC], dc->base.pc_next); - tcg_gen_exit_tb(NULL, 0); + gen_goto_tb(dc, 0, dc->base.pc_next); break; - case DISAS_JUMP: - /* The jump will already have updated the PC register */ + case DISAS_UPDATE: + /* Save the current PC, and return to the main loop. */ + tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); tcg_gen_exit_tb(NULL, 0); break; @@ -834,10 +1023,11 @@ static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void nios2_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void nios2_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps nios2_tr_ops = { @@ -849,10 +1039,11 @@ static const TranslatorOps nios2_tr_ops = { .disas_log = nios2_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base); } void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -861,41 +1052,61 @@ void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags) CPUNios2State *env = &cpu->env; int i; - if (!env) { - return; - } + qemu_fprintf(f, "IN: PC=%x %s\n", env->pc, lookup_symbol(env->pc)); - qemu_fprintf(f, "IN: PC=%x %s\n", - env->regs[R_PC], lookup_symbol(env->regs[R_PC])); - - for (i = 0; i < NUM_CORE_REGS; i++) { - qemu_fprintf(f, "%9s=%8.8x ", regnames[i], env->regs[i]); + for (i = 0; i < NUM_GP_REGS; i++) { + qemu_fprintf(f, "%9s=%8.8x ", gr_regnames[i], env->regs[i]); if ((i + 1) % 4 == 0) { qemu_fprintf(f, "\n"); } } + #if !defined(CONFIG_USER_ONLY) - qemu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n", - env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK, - (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4, - env->mmu.tlbacc_wr); + int j; + + for (i = j = 0; i < NUM_CR_REGS; i++) { + if (!nios2_cr_reserved(&cpu->cr_state[i])) { + qemu_fprintf(f, "%9s=%8.8x ", cr_regnames[i], env->ctrl[i]); + if (++j % 4 == 0) { + qemu_fprintf(f, "\n"); + } + } + } + if (j % 4 != 0) { + qemu_fprintf(f, "\n"); + } + if (cpu->mmu_present) { + qemu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n", + env->mmu.pteaddr_wr & R_CR_PTEADDR_VPN_MASK, + FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID), + env->mmu.tlbacc_wr); + } #endif qemu_fprintf(f, "\n\n"); } void nios2_tcg_init(void) { - int i; +#ifndef CONFIG_USER_ONLY + TCGv_ptr crs = tcg_global_mem_new_ptr(cpu_env, + offsetof(CPUNios2State, regs), "crs"); - for (i = 0; i < NUM_CORE_REGS; i++) { - cpu_R[i] = tcg_global_mem_new(cpu_env, - offsetof(CPUNios2State, regs[i]), - regnames[i]); + for (int i = 0; i < NUM_GP_REGS; i++) { + cpu_crs_R[i] = tcg_global_mem_new(crs, 4 * i, gr_regnames[i]); } -} -void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->regs[R_PC] = data[0]; +#define offsetof_regs0(N) offsetof(CPUNios2State, shadow_regs[0][N]) +#else +#define offsetof_regs0(N) offsetof(CPUNios2State, regs[N]) +#endif + + for (int i = 0; i < NUM_GP_REGS; i++) { + cpu_R[i] = tcg_global_mem_new(cpu_env, offsetof_regs0(i), + gr_regnames[i]); + } + +#undef offsetof_regs0 + + cpu_pc = tcg_global_mem_new(cpu_env, + offsetof(CPUNios2State, pc), "pc"); } diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h index 06ee64d171..73be699f36 100644 --- a/target/openrisc/cpu-param.h +++ b/target/openrisc/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef OPENRISC_CPU_PARAM_H -#define OPENRISC_CPU_PARAM_H 1 +#define OPENRISC_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 13 diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index bd34e429ec..de0176cd20 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -21,6 +21,7 @@ #include "qapi/error.h" #include "qemu/qemu-print.h" #include "cpu.h" +#include "exec/exec-all.h" static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) { @@ -30,6 +31,34 @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.dflag = 0; } +static vaddr openrisc_cpu_get_pc(CPUState *cs) +{ + OpenRISCCPU *cpu = OPENRISC_CPU(cs); + + return cpu->env.pc; +} + +static void openrisc_cpu_synchronize_from_tb(CPUState *cs, + const TranslationBlock *tb) +{ + OpenRISCCPU *cpu = OPENRISC_CPU(cs); + + cpu->env.pc = tb_pc(tb); +} + +static void openrisc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + OpenRISCCPU *cpu = OPENRISC_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.dflag = data[1] & 1; + if (data[1] & 2) { + cpu->env.ppc = cpu->env.pc - 4; + } +} + static bool openrisc_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | @@ -88,7 +117,6 @@ static void openrisc_cpu_set_irq(void *opaque, int irq, int level) cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); - cpu->env.picsr = 0; } } #endif @@ -186,10 +214,12 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = { static const struct TCGCPUOps openrisc_tcg_ops = { .initialize = openrisc_translate_init, - .cpu_exec_interrupt = openrisc_cpu_exec_interrupt, - .tlb_fill = openrisc_cpu_tlb_fill, + .synchronize_from_tb = openrisc_cpu_synchronize_from_tb, + .restore_state_to_opc = openrisc_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = openrisc_cpu_tlb_fill, + .cpu_exec_interrupt = openrisc_cpu_exec_interrupt, .do_interrupt = openrisc_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ }; @@ -208,6 +238,7 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = openrisc_cpu_has_work; cc->dump_state = openrisc_cpu_dump_state; cc->set_pc = openrisc_cpu_set_pc; + cc->get_pc = openrisc_cpu_get_pc; cc->gdb_read_register = openrisc_cpu_gdb_read_register; cc->gdb_write_register = openrisc_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index 82cbaeb4f8..1d5efa5ca2 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -21,16 +21,15 @@ #define OPENRISC_CPU_H #include "exec/cpu-defs.h" +#include "fpu/softfloat-types.h" #include "hw/core/cpu.h" #include "qom/object.h" -/* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */ -struct OpenRISCCPU; +#define TCG_GUEST_DEFAULT_MO (0) #define TYPE_OPENRISC_CPU "or1k-cpu" -OBJECT_DECLARE_TYPE(OpenRISCCPU, OpenRISCCPUClass, - OPENRISC_CPU) +OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU) /** * OpenRISCCPUClass: @@ -231,18 +230,18 @@ typedef struct CPUOpenRISCTLBContext { OpenRISCTLBEntry itlb[TLB_SIZE]; OpenRISCTLBEntry dtlb[TLB_SIZE]; - int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu, + int (*cpu_openrisc_map_address_code)(OpenRISCCPU *cpu, hwaddr *physical, int *prot, target_ulong address, int rw); - int (*cpu_openrisc_map_address_data)(struct OpenRISCCPU *cpu, + int (*cpu_openrisc_map_address_data)(OpenRISCCPU *cpu, hwaddr *physical, int *prot, target_ulong address, int rw); } CPUOpenRISCTLBContext; #endif -typedef struct CPUOpenRISCState { +typedef struct CPUArchState { target_ulong shadow_gpr[16][32]; /* Shadow registers */ target_ulong pc; /* Program counter */ @@ -301,7 +300,7 @@ typedef struct CPUOpenRISCState { * * A OpenRISC CPU. */ -struct OpenRISCCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -312,25 +311,25 @@ struct OpenRISCCPU { void cpu_openrisc_list(void); -void openrisc_cpu_do_interrupt(CPUState *cpu); -bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req); void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); -bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc); int print_insn_or1k(bfd_vma addr, disassemble_info *info); #define cpu_list cpu_openrisc_list -#define cpu_signal_handler cpu_openrisc_signal_handler #ifndef CONFIG_USER_ONLY +bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + extern const VMStateDescription vmstate_openrisc_cpu; +void openrisc_cpu_do_interrupt(CPUState *cpu); +bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req); + /* hw/openrisc_pic.c */ void cpu_openrisc_pic_init(OpenRISCCPU *cpu); @@ -348,9 +347,6 @@ void cpu_openrisc_count_stop(OpenRISCCPU *cpu); #define OPENRISC_CPU_TYPE_NAME(model) model OPENRISC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU -typedef CPUOpenRISCState CPUArchState; -typedef OpenRISCCPU ArchCPU; - #include "exec/cpu-all.h" #define TB_FLAGS_SM SR_SM diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c index 28c1fce523..8699c3dcea 100644 --- a/target/openrisc/exception.c +++ b/target/openrisc/exception.c @@ -22,7 +22,7 @@ #include "exec/exec-all.h" #include "exception.h" -void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp) +G_NORETURN void raise_exception(OpenRISCCPU *cpu, uint32_t excp) { CPUState *cs = CPU(cpu); diff --git a/target/openrisc/exception.h b/target/openrisc/exception.h index 333bf84638..f62fc314c1 100644 --- a/target/openrisc/exception.h +++ b/target/openrisc/exception.h @@ -22,6 +22,6 @@ #include "cpu.h" -void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp); +G_NORETURN void raise_exception(OpenRISCCPU *cpu, uint32_t excp); #endif /* TARGET_OPENRISC_EXCEPTION_H */ diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c index d02a1cf0aa..1f5be4bed9 100644 --- a/target/openrisc/exception_helper.c +++ b/target/openrisc/exception_helper.c @@ -30,7 +30,8 @@ void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp) raise_exception(cpu, excp); } -static void QEMU_NORETURN do_range(CPUOpenRISCState *env, uintptr_t pc) +static G_NORETURN +void do_range(CPUOpenRISCState *env, uintptr_t pc) { CPUState *cs = env_cpu(env); diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c index 3eab771dcd..c31c6f12c4 100644 --- a/target/openrisc/interrupt.c +++ b/target/openrisc/interrupt.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/gdbstub.h" @@ -28,7 +29,6 @@ void openrisc_cpu_do_interrupt(CPUState *cs) { -#ifndef CONFIG_USER_ONLY OpenRISCCPU *cpu = OPENRISC_CPU(cs); CPUOpenRISCState *env = &cpu->env; int exception = cs->exception_index; @@ -83,7 +83,9 @@ void openrisc_cpu_do_interrupt(CPUState *cs) [EXCP_TRAP] = "TRAP", }; - qemu_log_mask(CPU_LOG_INT, "INT: %s\n", int_name[exception]); + qemu_log_mask(CPU_LOG_INT, "CPU: %d INT: %s\n", + cs->cpu_index, + int_name[exception]); hwaddr vect_pc = exception << 8; if (env->cpucfgr & CPUCFGR_EVBARP) { @@ -96,7 +98,6 @@ void openrisc_cpu_do_interrupt(CPUState *cs) } else { cpu_abort(cs, "Unhandled exception 0x%x\n", exception); } -#endif cs->exception_index = -1; } diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c index 6239725c4f..b7d7388640 100644 --- a/target/openrisc/machine.c +++ b/target/openrisc/machine.c @@ -25,7 +25,6 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINTTL(mr, OpenRISCTLBEntry), VMSTATE_UINTTL(tr, OpenRISCTLBEntry), diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build index 9774a58306..84322086ec 100644 --- a/target/openrisc/meson.build +++ b/target/openrisc/meson.build @@ -9,15 +9,17 @@ openrisc_ss.add(files( 'exception_helper.c', 'fpu_helper.c', 'gdbstub.c', - 'interrupt.c', 'interrupt_helper.c', - 'mmu.c', 'sys_helper.c', 'translate.c', )) openrisc_softmmu_ss = ss.source_set() -openrisc_softmmu_ss.add(files('machine.c')) +openrisc_softmmu_ss.add(files( + 'interrupt.c', + 'machine.c', + 'mmu.c', +)) target_arch += {'openrisc': openrisc_ss} target_softmmu_arch += {'openrisc': openrisc_softmmu_ss} diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c index 94df8c7bef..0b8afdbacf 100644 --- a/target/openrisc/mmu.c +++ b/target/openrisc/mmu.c @@ -19,15 +19,13 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/gdbstub.h" #include "qemu/host-utils.h" -#ifndef CONFIG_USER_ONLY #include "hw/loader.h" -#endif -#ifndef CONFIG_USER_ONLY static inline void get_phys_nommu(hwaddr *phys_addr, int *prot, target_ulong address) { @@ -94,7 +92,6 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot, return need & PAGE_EXEC ? EXCP_ITLBMISS : EXCP_DTLBMISS; } } -#endif static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address, int exception) @@ -112,8 +109,6 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, { OpenRISCCPU *cpu = OPENRISC_CPU(cs); int excp = EXCP_DPF; - -#ifndef CONFIG_USER_ONLY int prot; hwaddr phys_addr; @@ -138,13 +133,11 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, if (probe) { return false; } -#endif raise_mmu_exception(cpu, addr, excp); cpu_loop_exit_restore(cs, retaddr); } -#ifndef CONFIG_USER_ONLY hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { OpenRISCCPU *cpu = OPENRISC_CPU(cs); @@ -155,7 +148,13 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) case SR_DME | SR_IME: /* The mmu is definitely enabled. */ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, - PAGE_EXEC | PAGE_READ | PAGE_WRITE, + PAGE_READ, + (sr & SR_SM) != 0); + if (!excp) { + return phys_addr; + } + excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, + PAGE_EXEC, (sr & SR_SM) != 0); return excp ? -1 : phys_addr; @@ -177,4 +176,3 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } } -#endif diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 48674231e7..ec145960e3 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -45,14 +45,14 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) break; case TO_SPR(0, 16): /* NPC */ - cpu_restore_state(cs, GETPC(), true); + cpu_restore_state(cs, GETPC()); /* ??? Mirror or1ksim in not trashing delayed branch state when "jumping" to the current instruction. */ if (env->pc != rb) { env->pc = rb; env->dflag = 0; - cpu_loop_exit(cs); } + cpu_loop_exit(cs); break; case TO_SPR(0, 17): /* SR */ @@ -131,7 +131,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) case TO_SPR(8, 0): /* PMR */ env->pmr = rb; if (env->pmr & PMR_DME || env->pmr & PMR_SME) { - cpu_restore_state(cs, GETPC(), true); + cpu_restore_state(cs, GETPC()); env->pc += 4; cs->halted = 1; raise_exception(cpu, EXCP_HALTED); @@ -139,12 +139,20 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) break; case TO_SPR(9, 0): /* PICMR */ env->picmr = rb; + qemu_mutex_lock_iothread(); + if (env->picsr & env->picmr) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + qemu_mutex_unlock_iothread(); break; case TO_SPR(9, 2): /* PICSR */ env->picsr &= ~rb; break; case TO_SPR(10, 0): /* TTMR */ { + qemu_mutex_lock_iothread(); if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) { switch (rb & TTMR_M) { case TIMER_NONE: @@ -168,14 +176,16 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) env->ttmr = rb & ~TTMR_IP; cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; } - cpu_openrisc_timer_update(cpu); + qemu_mutex_unlock_iothread(); } break; case TO_SPR(10, 1): /* TTCR */ + qemu_mutex_lock_iothread(); cpu_openrisc_count_set(cpu, rb); cpu_openrisc_timer_update(cpu); + qemu_mutex_unlock_iothread(); break; #endif @@ -189,6 +199,7 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, target_ulong spr) { #ifndef CONFIG_USER_ONLY + uint64_t data[TARGET_INSN_START_WORDS]; MachineState *ms = MACHINE(qdev_get_machine()); OpenRISCCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); @@ -222,14 +233,20 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, return env->evbar; case TO_SPR(0, 16): /* NPC (equals PC) */ - cpu_restore_state(cs, GETPC(), false); + if (cpu_unwind_state_data(cs, GETPC(), data)) { + return data[0]; + } return env->pc; case TO_SPR(0, 17): /* SR */ return cpu_get_sr(env); case TO_SPR(0, 18): /* PPC */ - cpu_restore_state(cs, GETPC(), false); + if (cpu_unwind_state_data(cs, GETPC(), data)) { + if (data[1] & 2) { + return data[0] - 4; + } + } return env->ppc; case TO_SPR(0, 32): /* EPCR */ @@ -303,7 +320,9 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, return env->ttmr; case TO_SPR(10, 1): /* TTCR */ + qemu_mutex_lock_iothread(); cpu_openrisc_count_update(cpu); + qemu_mutex_unlock_iothread(); return cpu_openrisc_count_get(cpu); #endif diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index d6ea536744..2f3d7c5fd1 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1613,7 +1613,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); OpenRISCCPU *cpu = OPENRISC_CPU(cs); - uint32_t insn = translator_ldl(&cpu->env, dc->base.pc_next); + uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next); if (!decode(dc, insn)) { gen_illegal_exception(dc); @@ -1659,11 +1659,7 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* The jump destination is indirect/computed; use jmp_pc. */ tcg_gen_mov_tl(cpu_pc, jmp_pc); tcg_gen_discard_tl(jmp_pc); - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; } /* The jump destination is direct; use jmp_pc_imm. @@ -1680,31 +1676,24 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) break; } tcg_gen_movi_tl(cpu_pc, jmp_dest); - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; case DISAS_EXIT: - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); } } -static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void openrisc_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { DisasContext *s = container_of(dcbase, DisasContext, base); - qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first)); - log_target_disas(cs, s->base.pc_first, s->base.tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(s->base.pc_first)); + target_disas(logfile, cs, s->base.pc_first, s->base.tb->size); } static const TranslatorOps openrisc_tr_ops = { @@ -1716,11 +1705,13 @@ static const TranslatorOps openrisc_tr_ops = { .disas_log = openrisc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, + &openrisc_tr_ops, &ctx.base); } void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -1735,13 +1726,3 @@ void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags) (i % 4) == 3 ? '\n' : ' '); } } - -void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->dflag = data[1] & 1; - if (data[1] & 2) { - env->ppc = env->pc - 4; - } -} diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c index bb392f6d88..f58e6359d5 100644 --- a/target/ppc/arch_dump.c +++ b/target/ppc/arch_dump.c @@ -161,7 +161,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu) bool needs_byteswap; ppc_avr_t *avr = cpu_avr_ptr(&cpu->env, i); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB; #else needs_byteswap = s->dump_info.d_endian == ELFDATA2MSB; @@ -237,7 +237,7 @@ int cpu_get_dump_info(ArchDumpInfo *info, info->d_machine = PPC_ELF_MACHINE; info->d_class = ELFCLASS; - if (ppc_interrupts_little_endian(cpu)) { + if (ppc_interrupts_little_endian(cpu, cpu->env.has_hv_mode)) { info->d_endian = ELFDATA2LSB; } else { info->d_endian = ELFDATA2MSB; @@ -270,23 +270,23 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) static int ppc_write_all_elf_notes(const char *note_name, WriteCoreDumpFunction f, PowerPCCPU *cpu, int id, - void *opaque) + DumpState *s) { - NoteFuncArg arg = { .state = opaque }; + NoteFuncArg arg = { .state = s }; int ret = -1; int note_size; const NoteFuncDesc *nf; for (nf = note_func; nf->note_contents_func; nf++) { - arg.note.hdr.n_namesz = cpu_to_dump32(opaque, sizeof(arg.note.name)); - arg.note.hdr.n_descsz = cpu_to_dump32(opaque, nf->contents_size); + arg.note.hdr.n_namesz = cpu_to_dump32(s, sizeof(arg.note.name)); + arg.note.hdr.n_descsz = cpu_to_dump32(s, nf->contents_size); strncpy(arg.note.name, note_name, sizeof(arg.note.name)); (*nf->note_contents_func)(&arg, cpu); note_size = sizeof(arg.note) - sizeof(arg.note.contents) + nf->contents_size; - ret = f(&arg.note, note_size, opaque); + ret = f(&arg.note, note_size, s); if (ret < 0) { return -1; } @@ -295,15 +295,15 @@ static int ppc_write_all_elf_notes(const char *note_name, } int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { PowerPCCPU *cpu = POWERPC_CPU(cs); - return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque); + return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, s); } int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { PowerPCCPU *cpu = POWERPC_CPU(cs); - return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque); + return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, s); } diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 87e4228614..912b037c63 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -67,40 +67,6 @@ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) /* Embedded PowerPC */ - /* PowerPC 401 family */ - POWERPC_DEF("401", CPU_POWERPC_401, 401, - "Generic PowerPC 401") - /* PowerPC 401 cores */ - POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401, - "PowerPC 401A1") - POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2, - "PowerPC 401B2") - POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2, - "PowerPC 401C2") - POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2, - "PowerPC 401D2") - POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2, - "PowerPC 401E2") - POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2, - "PowerPC 401F2") - /* XXX: to be checked */ - POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2, - "PowerPC 401G2") - /* PowerPC 401 microcontrollers */ - POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480, - "IOP480 (401 microcontroller)") - POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401, - "IBM Processor for Network Resources") - /* PowerPC 403 family */ - /* PowerPC 403 microcontrollers */ - POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403, - "PowerPC 403 GA") - POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403, - "PowerPC 403 GB") - POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403, - "PowerPC 403 GC") - POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX, - "PowerPC 403 GCX") /* PowerPC 405 family */ /* PowerPC 405 cores */ POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405, @@ -419,19 +385,19 @@ POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", - CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) + CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v1) POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", - CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) + CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v1) POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", - CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) + CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v1) POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", - CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) + CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v1) POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", - CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) + CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v1) POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", - CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) + CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v1) POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", - CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) + CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v1) POWERPC_DEF_SVR("mpc8567", "MPC8567", CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) POWERPC_DEF_SVR("mpc8567e", "MPC8567E", @@ -456,14 +422,6 @@ CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600) /* 32 bits "classic" PowerPC */ /* PowerPC 6xx family */ - POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601, - "PowerPC 601v0") - POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601, - "PowerPC 601v1") - POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v, - "PowerPC 601v2") - POWERPC_DEF("602", CPU_POWERPC_602, 602, - "PowerPC 602") POWERPC_DEF("603", CPU_POWERPC_603, 603, "PowerPC 603") POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E, @@ -670,13 +628,13 @@ "PowerPC 7410 v1.3 (G4)") POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, "PowerPC 7410 v1.4 (G4)") - POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, + POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7445, "PowerPC 7448 v1.0 (G4)") - POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, + POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7445, "PowerPC 7448 v1.1 (G4)") - POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, + POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7445, "PowerPC 7448 v2.0 (G4)") - POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, + POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7445, "PowerPC 7448 v2.1 (G4)") POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, "PowerPC 7450 v1.0 (G4)") @@ -776,13 +734,14 @@ "POWER9 v2.0") POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, "POWER10 v1.0") + POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10, + "POWER10 v2.0") #endif /* defined (TARGET_PPC64) */ /***************************************************************************/ /* PowerPC CPU aliases */ PowerPCCPUAlias ppc_cpu_aliases[] = { - { "403", "403gc" }, { "405", "405d4" }, { "405cr", "405crc" }, { "405gp", "405gpd" }, @@ -894,8 +853,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "mpc8555", "mpc8555_v11" }, { "mpc8555e", "mpc8555e_v11" }, { "mpc8560", "mpc8560_v21" }, - { "601", "601_v2" }, - { "601v", "601_v2" }, { "vanilla", "603" }, { "603e", "603e_v4.1" }, { "stretch", "603e_v4.1" }, @@ -922,7 +879,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "755", "755_v2.8" }, { "goldfinger", "755_v2.8" }, { "7400", "7400_v2.9" }, - { "max", "7400_v2.9" }, { "g4", "7400_v2.9" }, { "7410", "7410_v1.4" }, { "nitro", "7410_v1.4" }, @@ -952,7 +908,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "power8", "power8_v2.0" }, { "power8nvl", "power8nvl_v1.0" }, { "power9", "power9_v2.0" }, - { "power10", "power10_v1.0" }, + { "power10", "power10_v2.0" }, #endif /* Generic PowerPCs */ @@ -961,6 +917,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { #endif { "ppc32", "604" }, { "ppc", "604" }, - { "default", "604" }, + { NULL, NULL } }; diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index fc5e21728d..1326493a9a 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -38,27 +38,8 @@ extern PowerPCCPUAlias ppc_cpu_aliases[]; /*****************************************************************************/ /* PVR definitions for most known PowerPC */ enum { - /* PowerPC 401 family */ - /* Generic PowerPC 401 */ -#define CPU_POWERPC_401 CPU_POWERPC_401G2 - /* PowerPC 401 cores */ - CPU_POWERPC_401A1 = 0x00210000, - CPU_POWERPC_401B2 = 0x00220000, - CPU_POWERPC_401C2 = 0x00230000, - CPU_POWERPC_401D2 = 0x00240000, - CPU_POWERPC_401E2 = 0x00250000, - CPU_POWERPC_401F2 = 0x00260000, - CPU_POWERPC_401G2 = 0x00270000, - /* PowerPC 401 microcontrolers */ -#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2 /* IBM Processor for Network Resources */ CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */ - /* PowerPC 403 family */ - /* PowerPC 403 microcontrollers */ - CPU_POWERPC_403GA = 0x00200011, - CPU_POWERPC_403GB = 0x00200100, - CPU_POWERPC_403GC = 0x00200200, - CPU_POWERPC_403GCX = 0x00201400, /* PowerPC 405 family */ /* PowerPC 405 cores */ CPU_POWERPC_405D2 = 0x20010000, @@ -203,13 +184,13 @@ enum { #define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21 -#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11 -#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11 -#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20 -#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v1_v10 +#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22 @@ -224,10 +205,6 @@ enum { #define CPU_POWERPC_MPC8641 CPU_POWERPC_e600 #define CPU_POWERPC_MPC8641D CPU_POWERPC_e600 /* PowerPC 6xx cores */ - CPU_POWERPC_601_v0 = 0x00010001, - CPU_POWERPC_601_v1 = 0x00010001, - CPU_POWERPC_601_v2 = 0x00010002, - CPU_POWERPC_602 = 0x00050100, CPU_POWERPC_603 = 0x00030100, CPU_POWERPC_603E_v11 = 0x00060101, CPU_POWERPC_603E_v12 = 0x00060102, @@ -375,6 +352,7 @@ enum { CPU_POWERPC_POWER9_DD20 = 0x004E1200, CPU_POWERPC_POWER10_BASE = 0x00800000, CPU_POWERPC_POWER10_DD1 = 0x00800100, + CPU_POWERPC_POWER10_DD20 = 0x00800200, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h index 37b458d33d..ea377b7d06 100644 --- a/target/ppc/cpu-param.h +++ b/target/ppc/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef PPC_CPU_PARAM_H -#define PPC_CPU_PARAM_H 1 +#define PPC_CPU_PARAM_H #ifdef TARGET_PPC64 # define TARGET_LONG_BITS 64 diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 5800fa324e..89ff88f28c 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -29,10 +29,9 @@ #define TYPE_POWERPC_CPU "powerpc-cpu" #endif -OBJECT_DECLARE_TYPE(PowerPCCPU, PowerPCCPUClass, - POWERPC_CPU) +OBJECT_DECLARE_CPU_TYPE(PowerPCCPU, PowerPCCPUClass, POWERPC_CPU) -typedef struct CPUPPCState CPUPPCState; +typedef struct CPUArchState CPUPPCState; typedef struct ppc_tb_t ppc_tb_t; typedef struct ppc_dcr_t ppc_dcr_t; @@ -45,12 +44,14 @@ enum powerpc_mmu_t { POWERPC_MMU_32B = 0x00000001, /* PowerPC 6xx MMU with software TLB */ POWERPC_MMU_SOFT_6xx = 0x00000002, - /* PowerPC 74xx MMU with software TLB */ + /* + * PowerPC 74xx MMU with software TLB (this has been + * disabled, see git history for more information. + * keywords: tlbld tlbli TLBMISS PTEHI PTELO) + */ POWERPC_MMU_SOFT_74xx = 0x00000003, /* PowerPC 4xx MMU with software TLB */ POWERPC_MMU_SOFT_4xx = 0x00000004, - /* PowerPC 4xx MMU with software TLB and zones protections */ - POWERPC_MMU_SOFT_4xx_Z = 0x00000005, /* PowerPC MMU in real mode only */ POWERPC_MMU_REAL = 0x00000006, /* Freescale MPC8xx MMU model */ @@ -59,8 +60,6 @@ enum powerpc_mmu_t { POWERPC_MMU_BOOKE = 0x00000008, /* BookE 2.06 MMU model */ POWERPC_MMU_BOOKE206 = 0x00000009, - /* PowerPC 601 MMU model (specific BATs format) */ - POWERPC_MMU_601 = 0x0000000A, #define POWERPC_MMU_64 0x00010000 /* 64 bits PowerPC MMU */ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, @@ -88,22 +87,10 @@ enum powerpc_excp_t { POWERPC_EXCP_STD, /* PowerPC 40x exception model */ POWERPC_EXCP_40x, - /* PowerPC 601 exception model */ - POWERPC_EXCP_601, - /* PowerPC 602 exception model */ - POWERPC_EXCP_602, - /* PowerPC 603 exception model */ - POWERPC_EXCP_603, - /* PowerPC 603e exception model */ - POWERPC_EXCP_603E, - /* PowerPC G2 exception model */ - POWERPC_EXCP_G2, - /* PowerPC 604 exception model */ - POWERPC_EXCP_604, - /* PowerPC 7x0 exception model */ - POWERPC_EXCP_7x0, - /* PowerPC 7x5 exception model */ - POWERPC_EXCP_7x5, + /* PowerPC 603/604/G2 exception model */ + POWERPC_EXCP_6xx, + /* PowerPC 7xx exception model */ + POWERPC_EXCP_7xx, /* PowerPC 74xx exception model */ POWERPC_EXCP_74xx, /* BookE exception model */ @@ -147,8 +134,6 @@ enum powerpc_input_t { PPC_FLAGS_INPUT_POWER7, /* PowerPC POWER9 bus */ PPC_FLAGS_INPUT_POWER9, - /* PowerPC 401 bus */ - PPC_FLAGS_INPUT_401, /* Freescale RCPU bus */ PPC_FLAGS_INPUT_RCPU, }; @@ -173,7 +158,11 @@ struct PowerPCCPUClass { void (*parent_parse_features)(const char *type, char *str, Error **errp); uint32_t pvr; - bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr); + /* + * If @best is false, match if pcc is in the family of pvr + * Else match only if pcc is the best match for pvr in this family. + */ + bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr, bool best); uint64_t pcr_mask; /* Available bits in PCR register */ uint64_t pcr_supported; /* Bits for supported PowerISA versions */ uint32_t svr; diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c index a29299882a..1a97b41c6b 100644 --- a/target/ppc/cpu.c +++ b/target/ppc/cpu.c @@ -27,7 +27,7 @@ #include "helper_regs.h" #include "sysemu/tcg.h" -target_ulong cpu_read_xer(CPUPPCState *env) +target_ulong cpu_read_xer(const CPUPPCState *env) { if (is_isa300(env)) { return env->xer | (env->so << XER_SO) | @@ -67,40 +67,13 @@ uint32_t ppc_get_vscr(CPUPPCState *env) return env->vscr | (sat << VSCR_SAT); } -#ifdef CONFIG_SOFTMMU -void ppc_store_sdr1(CPUPPCState *env, target_ulong value) -{ - PowerPCCPU *cpu = env_archcpu(env); - qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); - assert(!cpu->env.has_hv_mode || !cpu->vhyp); -#if defined(TARGET_PPC64) - if (mmu_is_64bit(env->mmu_model)) { - target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; - target_ulong htabsize = value & SDR_64_HTABSIZE; - - if (value & ~sdr_mask) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx - " set in SDR1", value & ~sdr_mask); - value &= sdr_mask; - } - if (htabsize > 28) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx - " stored in SDR1", htabsize); - return; - } - } -#endif /* defined(TARGET_PPC64) */ - /* FIXME: Should check for valid HTABMASK values in 32-bit case */ - env->spr[SPR_SDR1] = value; -} -#endif /* CONFIG_SOFTMMU */ - /* GDBstub can read and write MSR... */ void ppc_store_msr(CPUPPCState *env, target_ulong value) { hreg_store_msr(env, value, 0); } +#if !defined(CONFIG_USER_ONLY) void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); @@ -109,14 +82,17 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) env->spr[SPR_LPCR] = val & pcc->lpcr_mask; /* The gtse bit affects hflags */ hreg_compute_hflags(env); + + ppc_maybe_interrupt(env); } +#endif static inline void fpscr_set_rounding_mode(CPUPPCState *env) { int rnd_type; /* Set rounding mode */ - switch (fpscr_rn) { + switch (env->fpscr & FP_RN) { case 0: /* Best approximation (round to nearest) */ rnd_type = float_round_nearest_even; @@ -140,7 +116,7 @@ static inline void fpscr_set_rounding_mode(CPUPPCState *env) void ppc_store_fpscr(CPUPPCState *env, target_ulong val) { - val &= ~(FP_VX | FP_FEX); + val &= FPSCR_MTFS_MASK; if (val & FPSCR_IX) { val |= FP_VX; } @@ -148,6 +124,8 @@ void ppc_store_fpscr(CPUPPCState *env, target_ulong val) val |= FP_FEX; } env->fpscr = val; + env->fp_status.rebias_overflow = (FP_OE & env->fpscr) ? true : false; + env->fp_status.rebias_underflow = (FP_UE & env->fpscr) ? true : false; if (tcg_enabled()) { fpscr_set_rounding_mode(env); } diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 93d308ac8f..81d4263a07 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -21,9 +21,11 @@ #define PPC_CPU_H #include "qemu/int128.h" +#include "qemu/cpu-float.h" #include "exec/cpu-defs.h" #include "cpu-qom.h" #include "qom/object.h" +#include "hw/registerfields.h" #define TCG_GUEST_DEFAULT_MO 0 @@ -36,6 +38,7 @@ #define PPC_ELF_MACHINE EM_PPC #endif +#define PPC_BIT_NR(bit) (63 - (bit)) #define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) #define PPC_BIT32(bit) (0x80000000 >> (bit)) #define PPC_BIT8(bit) (0x80 >> (bit)) @@ -44,6 +47,18 @@ PPC_BIT32(bs)) #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs)) +/* + * QEMU version of the GETFIELD/SETFIELD macros from skiboot + * + * It might be better to use the existing extract64() and + * deposit64() but this means that all the register definitions will + * change and become incompatible with the ones found in skiboot. + */ +#define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1) +#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m)) +#define SETFIELD(m, v, val) \ + (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m))) + /*****************************************************************************/ /* Exception vectors definitions */ enum { @@ -89,11 +104,9 @@ enum { POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */ /* 40x specific exceptions */ POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */ - /* 601 specific exceptions */ - POWERPC_EXCP_IO = 75, /* IO error exception */ - POWERPC_EXCP_RUNM = 76, /* Run mode exception */ + /* Vectors 75-76 are 601 specific exceptions */ /* 602 specific exceptions */ - POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */ + POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */ /* 602/603 specific exceptions */ POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */ POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */ @@ -129,8 +142,10 @@ enum { /* ISA 3.00 additions */ POWERPC_EXCP_HVIRT = 101, POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */ + POWERPC_EXCP_PERFM_EBB = 103, /* Performance Monitor EBB Exception */ + POWERPC_EXCP_EXTERNAL_EBB = 104, /* External EBB Exception */ /* EOL */ - POWERPC_EXCP_NB = 103, + POWERPC_EXCP_NB = 105, /* QEMU exceptions: special cases we want to stop translation */ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */ }; @@ -224,17 +239,19 @@ typedef union _ppc_vsr_t { int16_t s16[8]; int32_t s32[4]; int64_t s64[2]; + float16 f16[8]; float32 f32[4]; float64 f64[2]; float128 f128; #ifdef CONFIG_INT128 __uint128_t u128; #endif - Int128 s128; + Int128 s128; } ppc_vsr_t; typedef ppc_vsr_t ppc_avr_t; typedef ppc_vsr_t ppc_fprp_t; +typedef ppc_vsr_t ppc_acc_t; #if !defined(CONFIG_USER_ONLY) /* Software TLB cache */ @@ -296,51 +313,170 @@ typedef struct ppc_v3_pate_t { uint64_t dw1; } ppc_v3_pate_t; +/* PMU related structs and defines */ +#define PMU_COUNTERS_NUM 6 +typedef enum { + PMU_EVENT_INVALID = 0, + PMU_EVENT_INACTIVE, + PMU_EVENT_CYCLES, + PMU_EVENT_INSTRUCTIONS, + PMU_EVENT_INSN_RUN_LATCH, +} PMUEventType; + /*****************************************************************************/ /* Machine state register bits definition */ -#define MSR_SF 63 /* Sixty-four-bit mode hflags */ -#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */ -#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */ -#define MSR_HV 60 /* hypervisor state hflags */ -#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */ -#define MSR_TS1 33 -#define MSR_TM 32 /* Transactional Memory Available (Book3s) */ -#define MSR_CM 31 /* Computation mode for BookE hflags */ -#define MSR_ICM 30 /* Interrupt computation mode for BookE */ -#define MSR_GS 28 /* guest state for BookE */ -#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */ -#define MSR_VR 25 /* altivec available x hflags */ -#define MSR_SPE 25 /* SPE enable for BookE x hflags */ -#define MSR_AP 23 /* Access privilege state on 602 hflags */ -#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */ -#define MSR_SA 22 /* Supervisor access mode on 602 hflags */ -#define MSR_KEY 19 /* key bit on 603e */ -#define MSR_POW 18 /* Power management */ -#define MSR_TGPR 17 /* TGPR usage on 602/603 x */ -#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */ -#define MSR_ILE 16 /* Interrupt little-endian mode */ -#define MSR_EE 15 /* External interrupt enable */ -#define MSR_PR 14 /* Problem state hflags */ -#define MSR_FP 13 /* Floating point available hflags */ -#define MSR_ME 12 /* Machine check interrupt enable */ -#define MSR_FE0 11 /* Floating point exception mode 0 */ -#define MSR_SE 10 /* Single-step trace enable x hflags */ -#define MSR_DWE 10 /* Debug wait enable on 405 x */ -#define MSR_UBLE 10 /* User BTB lock enable on e500 x */ -#define MSR_BE 9 /* Branch trace enable x hflags */ -#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */ -#define MSR_FE1 8 /* Floating point exception mode 1 */ -#define MSR_AL 7 /* AL bit on POWER */ -#define MSR_EP 6 /* Exception prefix on 601 */ -#define MSR_IR 5 /* Instruction relocate */ -#define MSR_DR 4 /* Data relocate */ -#define MSR_IS 5 /* Instruction address space (BookE) */ -#define MSR_DS 4 /* Data address space (BookE) */ -#define MSR_PE 3 /* Protection enable on 403 */ -#define MSR_PX 2 /* Protection exclusive on 403 x */ -#define MSR_PMM 2 /* Performance monitor mark on POWER x */ -#define MSR_RI 1 /* Recoverable interrupt 1 */ -#define MSR_LE 0 /* Little-endian mode 1 hflags */ +#define MSR_SF PPC_BIT_NR(0) /* Sixty-four-bit mode hflags */ +#define MSR_TAG PPC_BIT_NR(1) /* Tag-active mode (POWERx ?) */ +#define MSR_ISF PPC_BIT_NR(2) /* Sixty-four-bit interrupt mode on 630 */ +#define MSR_HV PPC_BIT_NR(3) /* hypervisor state hflags */ +#define MSR_TS0 PPC_BIT_NR(29) /* Transactional state, 2 bits (Book3s) */ +#define MSR_TS1 PPC_BIT_NR(30) +#define MSR_TM PPC_BIT_NR(31) /* Transactional Memory Available (Book3s) */ +#define MSR_CM PPC_BIT_NR(32) /* Computation mode for BookE hflags */ +#define MSR_ICM PPC_BIT_NR(33) /* Interrupt computation mode for BookE */ +#define MSR_GS PPC_BIT_NR(35) /* guest state for BookE */ +#define MSR_UCLE PPC_BIT_NR(37) /* User-mode cache lock enable for BookE */ +#define MSR_VR PPC_BIT_NR(38) /* altivec available x hflags */ +#define MSR_SPE PPC_BIT_NR(38) /* SPE enable for BookE x hflags */ +#define MSR_VSX PPC_BIT_NR(40) /* Vector Scalar Extension (>= 2.06)x hflags */ +#define MSR_S PPC_BIT_NR(41) /* Secure state */ +#define MSR_KEY PPC_BIT_NR(44) /* key bit on 603e */ +#define MSR_POW PPC_BIT_NR(45) /* Power management */ +#define MSR_WE PPC_BIT_NR(45) /* Wait State Enable on 405 */ +#define MSR_TGPR PPC_BIT_NR(46) /* TGPR usage on 602/603 x */ +#define MSR_CE PPC_BIT_NR(46) /* Critical int. enable on embedded PPC x */ +#define MSR_ILE PPC_BIT_NR(47) /* Interrupt little-endian mode */ +#define MSR_EE PPC_BIT_NR(48) /* External interrupt enable */ +#define MSR_PR PPC_BIT_NR(49) /* Problem state hflags */ +#define MSR_FP PPC_BIT_NR(50) /* Floating point available hflags */ +#define MSR_ME PPC_BIT_NR(51) /* Machine check interrupt enable */ +#define MSR_FE0 PPC_BIT_NR(52) /* Floating point exception mode 0 */ +#define MSR_SE PPC_BIT_NR(53) /* Single-step trace enable x hflags */ +#define MSR_DWE PPC_BIT_NR(53) /* Debug wait enable on 405 x */ +#define MSR_UBLE PPC_BIT_NR(53) /* User BTB lock enable on e500 x */ +#define MSR_BE PPC_BIT_NR(54) /* Branch trace enable x hflags */ +#define MSR_DE PPC_BIT_NR(54) /* Debug int. enable on embedded PPC x */ +#define MSR_FE1 PPC_BIT_NR(55) /* Floating point exception mode 1 */ +#define MSR_AL PPC_BIT_NR(56) /* AL bit on POWER */ +#define MSR_EP PPC_BIT_NR(57) /* Exception prefix on 601 */ +#define MSR_IR PPC_BIT_NR(58) /* Instruction relocate */ +#define MSR_IS PPC_BIT_NR(58) /* Instruction address space (BookE) */ +#define MSR_DR PPC_BIT_NR(59) /* Data relocate */ +#define MSR_DS PPC_BIT_NR(59) /* Data address space (BookE) */ +#define MSR_PE PPC_BIT_NR(60) /* Protection enable on 403 */ +#define MSR_PX PPC_BIT_NR(61) /* Protection exclusive on 403 x */ +#define MSR_PMM PPC_BIT_NR(61) /* Performance monitor mark on POWER x */ +#define MSR_RI PPC_BIT_NR(62) /* Recoverable interrupt 1 */ +#define MSR_LE PPC_BIT_NR(63) /* Little-endian mode 1 hflags */ + +FIELD(MSR, SF, MSR_SF, 1) +FIELD(MSR, TAG, MSR_TAG, 1) +FIELD(MSR, ISF, MSR_ISF, 1) +#if defined(TARGET_PPC64) +FIELD(MSR, HV, MSR_HV, 1) +#define FIELD_EX64_HV(storage) FIELD_EX64(storage, MSR, HV) +#else +#define FIELD_EX64_HV(storage) 0 +#endif +FIELD(MSR, TS0, MSR_TS0, 1) +FIELD(MSR, TS1, MSR_TS1, 1) +FIELD(MSR, TS, MSR_TS0, 2) +FIELD(MSR, TM, MSR_TM, 1) +FIELD(MSR, CM, MSR_CM, 1) +FIELD(MSR, ICM, MSR_ICM, 1) +FIELD(MSR, GS, MSR_GS, 1) +FIELD(MSR, UCLE, MSR_UCLE, 1) +FIELD(MSR, VR, MSR_VR, 1) +FIELD(MSR, SPE, MSR_SPE, 1) +FIELD(MSR, VSX, MSR_VSX, 1) +FIELD(MSR, S, MSR_S, 1) +FIELD(MSR, KEY, MSR_KEY, 1) +FIELD(MSR, POW, MSR_POW, 1) +FIELD(MSR, WE, MSR_WE, 1) +FIELD(MSR, TGPR, MSR_TGPR, 1) +FIELD(MSR, CE, MSR_CE, 1) +FIELD(MSR, ILE, MSR_ILE, 1) +FIELD(MSR, EE, MSR_EE, 1) +FIELD(MSR, PR, MSR_PR, 1) +FIELD(MSR, FP, MSR_FP, 1) +FIELD(MSR, ME, MSR_ME, 1) +FIELD(MSR, FE0, MSR_FE0, 1) +FIELD(MSR, SE, MSR_SE, 1) +FIELD(MSR, DWE, MSR_DWE, 1) +FIELD(MSR, UBLE, MSR_UBLE, 1) +FIELD(MSR, BE, MSR_BE, 1) +FIELD(MSR, DE, MSR_DE, 1) +FIELD(MSR, FE1, MSR_FE1, 1) +FIELD(MSR, AL, MSR_AL, 1) +FIELD(MSR, EP, MSR_EP, 1) +FIELD(MSR, IR, MSR_IR, 1) +FIELD(MSR, DR, MSR_DR, 1) +FIELD(MSR, IS, MSR_IS, 1) +FIELD(MSR, DS, MSR_DS, 1) +FIELD(MSR, PE, MSR_PE, 1) +FIELD(MSR, PX, MSR_PX, 1) +FIELD(MSR, PMM, MSR_PMM, 1) +FIELD(MSR, RI, MSR_RI, 1) +FIELD(MSR, LE, MSR_LE, 1) + +/* + * FE0 and FE1 bits are not side-by-side + * so we can't combine them using FIELD() + */ +#define FIELD_EX64_FE(msr) \ + ((FIELD_EX64(msr, MSR, FE0) << 1) | FIELD_EX64(msr, MSR, FE1)) + +/* PMU bits */ +#define MMCR0_FC PPC_BIT(32) /* Freeze Counters */ +#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Ocurred */ +#define MMCR0_PMAE PPC_BIT(37) /* Perf Monitor Alert Enable */ +#define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */ +#define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */ +#define MMCR0_PMCC0 PPC_BIT(44) /* PMC Control bit 0 */ +#define MMCR0_PMCC1 PPC_BIT(45) /* PMC Control bit 1 */ +#define MMCR0_PMCC PPC_BITMASK(44, 45) /* PMC Control */ +#define MMCR0_FC14 PPC_BIT(58) /* PMC Freeze Counters 1-4 bit */ +#define MMCR0_FC56 PPC_BIT(59) /* PMC Freeze Counters 5-6 bit */ +#define MMCR0_PMC1CE PPC_BIT(48) /* MMCR0 PMC1 Condition Enabled */ +#define MMCR0_PMCjCE PPC_BIT(49) /* MMCR0 PMCj Condition Enabled */ +/* MMCR0 userspace r/w mask */ +#define MMCR0_UREG_MASK (MMCR0_FC | MMCR0_PMAO | MMCR0_PMAE) +/* MMCR2 userspace r/w mask */ +#define MMCR2_FC1P0 PPC_BIT(1) /* MMCR2 FCnP0 for PMC1 */ +#define MMCR2_FC2P0 PPC_BIT(10) /* MMCR2 FCnP0 for PMC2 */ +#define MMCR2_FC3P0 PPC_BIT(19) /* MMCR2 FCnP0 for PMC3 */ +#define MMCR2_FC4P0 PPC_BIT(28) /* MMCR2 FCnP0 for PMC4 */ +#define MMCR2_FC5P0 PPC_BIT(37) /* MMCR2 FCnP0 for PMC5 */ +#define MMCR2_FC6P0 PPC_BIT(46) /* MMCR2 FCnP0 for PMC6 */ +#define MMCR2_UREG_MASK (MMCR2_FC1P0 | MMCR2_FC2P0 | MMCR2_FC3P0 | \ + MMCR2_FC4P0 | MMCR2_FC5P0 | MMCR2_FC6P0) + +#define MMCR1_EVT_SIZE 8 +/* extract64() does a right shift before extracting */ +#define MMCR1_PMC1SEL_START 32 +#define MMCR1_PMC1EVT_EXTR (64 - MMCR1_PMC1SEL_START - MMCR1_EVT_SIZE) +#define MMCR1_PMC2SEL_START 40 +#define MMCR1_PMC2EVT_EXTR (64 - MMCR1_PMC2SEL_START - MMCR1_EVT_SIZE) +#define MMCR1_PMC3SEL_START 48 +#define MMCR1_PMC3EVT_EXTR (64 - MMCR1_PMC3SEL_START - MMCR1_EVT_SIZE) +#define MMCR1_PMC4SEL_START 56 +#define MMCR1_PMC4EVT_EXTR (64 - MMCR1_PMC4SEL_START - MMCR1_EVT_SIZE) + +/* PMU uses CTRL_RUN to sample PM_RUN_INST_CMPL */ +#define CTRL_RUN PPC_BIT(63) + +/* EBB/BESCR bits */ +/* Global Enable */ +#define BESCR_GE PPC_BIT(0) +/* External Event-based Exception Enable */ +#define BESCR_EE PPC_BIT(30) +/* Performance Monitor Event-based Exception Enable */ +#define BESCR_PME PPC_BIT(31) +/* External Event-based Exception Occurred */ +#define BESCR_EEO PPC_BIT(62) +/* Performance Monitor Event-based Exception Occurred */ +#define BESCR_PMEO PPC_BIT(63) +#define BESCR_INVALID PPC_BITMASK(32, 33) /* LPCR bits */ #define LPCR_VPM0 PPC_BIT(0) @@ -400,52 +536,6 @@ typedef struct ppc_v3_pate_t { #define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ #define HFSCR_IC_MSGP 0xA -#define msr_sf ((env->msr >> MSR_SF) & 1) -#define msr_isf ((env->msr >> MSR_ISF) & 1) -#if defined(TARGET_PPC64) -#define msr_hv ((env->msr >> MSR_HV) & 1) -#else -#define msr_hv (0) -#endif -#define msr_cm ((env->msr >> MSR_CM) & 1) -#define msr_icm ((env->msr >> MSR_ICM) & 1) -#define msr_gs ((env->msr >> MSR_GS) & 1) -#define msr_ucle ((env->msr >> MSR_UCLE) & 1) -#define msr_vr ((env->msr >> MSR_VR) & 1) -#define msr_spe ((env->msr >> MSR_SPE) & 1) -#define msr_ap ((env->msr >> MSR_AP) & 1) -#define msr_vsx ((env->msr >> MSR_VSX) & 1) -#define msr_sa ((env->msr >> MSR_SA) & 1) -#define msr_key ((env->msr >> MSR_KEY) & 1) -#define msr_pow ((env->msr >> MSR_POW) & 1) -#define msr_tgpr ((env->msr >> MSR_TGPR) & 1) -#define msr_ce ((env->msr >> MSR_CE) & 1) -#define msr_ile ((env->msr >> MSR_ILE) & 1) -#define msr_ee ((env->msr >> MSR_EE) & 1) -#define msr_pr ((env->msr >> MSR_PR) & 1) -#define msr_fp ((env->msr >> MSR_FP) & 1) -#define msr_me ((env->msr >> MSR_ME) & 1) -#define msr_fe0 ((env->msr >> MSR_FE0) & 1) -#define msr_se ((env->msr >> MSR_SE) & 1) -#define msr_dwe ((env->msr >> MSR_DWE) & 1) -#define msr_uble ((env->msr >> MSR_UBLE) & 1) -#define msr_be ((env->msr >> MSR_BE) & 1) -#define msr_de ((env->msr >> MSR_DE) & 1) -#define msr_fe1 ((env->msr >> MSR_FE1) & 1) -#define msr_al ((env->msr >> MSR_AL) & 1) -#define msr_ep ((env->msr >> MSR_EP) & 1) -#define msr_ir ((env->msr >> MSR_IR) & 1) -#define msr_dr ((env->msr >> MSR_DR) & 1) -#define msr_is ((env->msr >> MSR_IS) & 1) -#define msr_ds ((env->msr >> MSR_DS) & 1) -#define msr_pe ((env->msr >> MSR_PE) & 1) -#define msr_px ((env->msr >> MSR_PX) & 1) -#define msr_pmm ((env->msr >> MSR_PMM) & 1) -#define msr_ri ((env->msr >> MSR_RI) & 1) -#define msr_le ((env->msr >> MSR_LE) & 1) -#define msr_ts ((env->msr >> MSR_TS1) & 3) -#define msr_tm ((env->msr >> MSR_TM) & 1) - #define DBCR0_ICMP (1 << 27) #define DBCR0_BRT (1 << 26) #define DBSR_ICMP (1 << 27) @@ -572,8 +662,7 @@ enum { POWERPC_FLAG_PX = 0x00000200, POWERPC_FLAG_PMM = 0x00000400, /* Flag for special features */ - /* Decrementer clock: RTC clock (POWER, 601) or bus clock */ - POWERPC_FLAG_RTC_CLK = 0x00010000, + /* Decrementer clock */ POWERPC_FLAG_BUS_CLK = 0x00020000, /* Has CFAR */ POWERPC_FLAG_CFAR = 0x00040000, @@ -583,8 +672,6 @@ enum { POWERPC_FLAG_TM = 0x00100000, /* Has SCV (ISA 3.00) */ POWERPC_FLAG_SCV = 0x00200000, - /* Has HID0 for LE bit (601) */ - POWERPC_FLAG_HID0_LE = 0x00400000, }; /* @@ -595,17 +682,23 @@ enum { * the MSR are validated in hreg_compute_hflags. */ enum { - HFLAGS_LE = 0, /* MSR_LE -- comes from elsewhere on 601 */ + HFLAGS_LE = 0, /* MSR_LE */ HFLAGS_HV = 1, /* computed from MSR_HV and other state */ HFLAGS_64 = 2, /* computed from MSR_CE and MSR_SF */ HFLAGS_GTSE = 3, /* computed from SPR_LPCR[GTSE] */ HFLAGS_DR = 4, /* MSR_DR */ + HFLAGS_HR = 5, /* computed from SPR_LPCR[HR] */ HFLAGS_SPE = 6, /* from MSR_SPE if cpu has SPE; avoid overlap w/ MSR_VR */ HFLAGS_TM = 8, /* computed from MSR_TM */ HFLAGS_BE = 9, /* MSR_BE -- from elsewhere on embedded ppc */ HFLAGS_SE = 10, /* MSR_SE -- from elsewhere on embedded ppc */ HFLAGS_FP = 13, /* MSR_FP */ HFLAGS_PR = 14, /* MSR_PR */ + HFLAGS_PMCC0 = 15, /* MMCR0 PMCC bit 0 */ + HFLAGS_PMCC1 = 16, /* MMCR0 PMCC bit 1 */ + HFLAGS_PMCJCE = 17, /* MMCR0 PMCjCE bit */ + HFLAGS_PMC_OTHER = 18, /* PMC other than PMC5-6 is enabled */ + HFLAGS_INSN_CNT = 19, /* PMU instruction count enabled */ HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */ HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */ @@ -615,77 +708,50 @@ enum { /*****************************************************************************/ /* Floating point status and control register */ -#define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */ -#define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */ -#define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */ -#define FPSCR_FX 31 /* Floating-point exception summary */ -#define FPSCR_FEX 30 /* Floating-point enabled exception summary */ -#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */ -#define FPSCR_OX 28 /* Floating-point overflow exception */ -#define FPSCR_UX 27 /* Floating-point underflow exception */ -#define FPSCR_ZX 26 /* Floating-point zero divide exception */ -#define FPSCR_XX 25 /* Floating-point inexact exception */ -#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */ -#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */ -#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */ -#define FPSCR_FR 18 /* Floating-point fraction rounded */ -#define FPSCR_FI 17 /* Floating-point fraction inexact */ -#define FPSCR_C 16 /* Floating-point result class descriptor */ -#define FPSCR_FL 15 /* Floating-point less than or negative */ -#define FPSCR_FG 14 /* Floating-point greater than or negative */ -#define FPSCR_FE 13 /* Floating-point equal or zero */ -#define FPSCR_FU 12 /* Floating-point unordered or NaN */ -#define FPSCR_FPCC 12 /* Floating-point condition code */ -#define FPSCR_FPRF 12 /* Floating-point result flags */ -#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */ -#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */ -#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */ -#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */ -#define FPSCR_OE 6 /* Floating-point overflow exception enable */ -#define FPSCR_UE 5 /* Floating-point underflow exception enable */ -#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */ -#define FPSCR_XE 3 /* Floating-point inexact exception enable */ -#define FPSCR_NI 2 /* Floating-point non-IEEE mode */ -#define FPSCR_RN1 1 -#define FPSCR_RN0 0 /* Floating-point rounding control */ -#define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0) -#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1) -#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1) -#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1) -#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1) -#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1) -#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1) -#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1) -#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1) -#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1) -#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1) -#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1) -#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1) -#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF) -#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1) -#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1) -#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1) -#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1) -#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1) -#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1) -#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1) -#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1) -#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1) -#define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3) +#define FPSCR_DRN2 PPC_BIT_NR(29) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_DRN1 PPC_BIT_NR(30) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_DRN0 PPC_BIT_NR(31) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_FX PPC_BIT_NR(32) /* Floating-point exception summary */ +#define FPSCR_FEX PPC_BIT_NR(33) /* Floating-point enabled exception summ.*/ +#define FPSCR_VX PPC_BIT_NR(34) /* Floating-point invalid op. excp. summ.*/ +#define FPSCR_OX PPC_BIT_NR(35) /* Floating-point overflow exception */ +#define FPSCR_UX PPC_BIT_NR(36) /* Floating-point underflow exception */ +#define FPSCR_ZX PPC_BIT_NR(37) /* Floating-point zero divide exception */ +#define FPSCR_XX PPC_BIT_NR(38) /* Floating-point inexact exception */ +#define FPSCR_VXSNAN PPC_BIT_NR(39) /* Floating-point invalid op. excp (sNan)*/ +#define FPSCR_VXISI PPC_BIT_NR(40) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXIDI PPC_BIT_NR(41) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXZDZ PPC_BIT_NR(42) /* Floating-point invalid op. excp (zero)*/ +#define FPSCR_VXIMZ PPC_BIT_NR(43) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXVC PPC_BIT_NR(44) /* Floating-point invalid op. excp (comp)*/ +#define FPSCR_FR PPC_BIT_NR(45) /* Floating-point fraction rounded */ +#define FPSCR_FI PPC_BIT_NR(46) /* Floating-point fraction inexact */ +#define FPSCR_C PPC_BIT_NR(47) /* Floating-point result class descriptor*/ +#define FPSCR_FL PPC_BIT_NR(48) /* Floating-point less than or negative */ +#define FPSCR_FG PPC_BIT_NR(49) /* Floating-point greater than or neg. */ +#define FPSCR_FE PPC_BIT_NR(50) /* Floating-point equal or zero */ +#define FPSCR_FU PPC_BIT_NR(51) /* Floating-point unordered or NaN */ +#define FPSCR_FPCC PPC_BIT_NR(51) /* Floating-point condition code */ +#define FPSCR_FPRF PPC_BIT_NR(51) /* Floating-point result flags */ +#define FPSCR_VXSOFT PPC_BIT_NR(53) /* Floating-point invalid op. excp (soft)*/ +#define FPSCR_VXSQRT PPC_BIT_NR(54) /* Floating-point invalid op. excp (sqrt)*/ +#define FPSCR_VXCVI PPC_BIT_NR(55) /* Floating-point invalid op. excp (int) */ +#define FPSCR_VE PPC_BIT_NR(56) /* Floating-point invalid op. excp enable*/ +#define FPSCR_OE PPC_BIT_NR(57) /* Floating-point overflow excp. enable */ +#define FPSCR_UE PPC_BIT_NR(58) /* Floating-point underflow excp. enable */ +#define FPSCR_ZE PPC_BIT_NR(59) /* Floating-point zero divide excp enable*/ +#define FPSCR_XE PPC_BIT_NR(60) /* Floating-point inexact excp. enable */ +#define FPSCR_NI PPC_BIT_NR(61) /* Floating-point non-IEEE mode */ +#define FPSCR_RN1 PPC_BIT_NR(62) +#define FPSCR_RN0 PPC_BIT_NR(63) /* Floating-point rounding control */ /* Invalid operation exception summary */ #define FPSCR_IX ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \ (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \ (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \ (1 << FPSCR_VXCVI)) -/* exception summary */ -#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F) -/* enabled exception summary */ -#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \ - 0x1F) + +FIELD(FPSCR, FI, FPSCR_FI, 1) #define FP_DRN2 (1ull << FPSCR_DRN2) #define FP_DRN1 (1ull << FPSCR_DRN1) @@ -735,6 +801,10 @@ enum { FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \ FP_VXSQRT | FP_VXCVI) +/* FPSCR bits that can be set by mtfsf, mtfsfi and mtfsb1 */ +#define FPSCR_MTFS_MASK (~(MAKE_64BIT_MASK(36, 28) | PPC_BIT(28) | \ + FP_FEX | FP_VX | PPC_BIT(52))) + /*****************************************************************************/ /* Vector status and control register */ #define VSCR_NJ 16 /* Vector non-java */ @@ -1012,7 +1082,7 @@ struct ppc_radix_page_info { #define PPC_CPU_OPCODES_LEN 0x40 #define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 -struct CPUPPCState { +struct CPUArchState { /* Most commonly used resources during translated code execution first */ target_ulong gpr[32]; /* general purpose registers */ target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */ @@ -1062,7 +1132,6 @@ struct CPUPPCState { int nb_pids; /* Number of available PID registers */ int tlb_type; /* Type of TLB we're dealing with */ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ - target_ulong pb[4]; /* 403 dedicated access protection registers */ bool tlb_dirty; /* Set to non-zero when modifying TLB */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ uint32_t tlb_need_flush; /* Delayed flush needed */ @@ -1073,6 +1142,9 @@ struct CPUPPCState { /* Other registers */ target_ulong spr[1024]; /* special purpose registers */ ppc_spr_t spr_cb[1024]; + /* Composite status for PMC[1-6] enabled and counting insns or cycles. */ + uint8_t pmc_ins_cnt; + uint8_t pmc_cyc_cnt; /* Vector status and control register, minus VSCR_SAT */ uint32_t vscr; /* VSX registers (including FP and AVR) */ @@ -1114,7 +1186,6 @@ struct CPUPPCState { * by recent Book3s compatible CPUs (POWER7 and newer). */ uint32_t irq_input_state; - void **irq_inputs; target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */ target_ulong excp_prefix; @@ -1167,6 +1238,18 @@ struct CPUPPCState { uint32_t tm_vscr; uint64_t tm_dscr; uint64_t tm_tar; + + /* + * Timers used to fire performance monitor alerts + * when counting cycles. + */ + QEMUTimer *pmu_cyc_overflow_timers[PMU_COUNTERS_NUM]; + + /* + * PMU base time value used by the PMU to calculate + * running cycles. + */ + uint64_t pmu_base_time; }; #define SET_FIT_PERIOD(a_, b_, c_, d_) \ @@ -1196,7 +1279,7 @@ typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass; * * A PowerPC CPU. */ -struct PowerPCCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -1234,6 +1317,8 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); #ifndef CONFIG_USER_ONLY struct PPCVirtualHypervisorClass { InterfaceClass parent; + bool (*cpu_in_nested)(PowerPCCPU *cpu); + void (*deliver_hv_excp)(PowerPCCPU *cpu, int excp); void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp); const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp, @@ -1243,7 +1328,8 @@ struct PPCVirtualHypervisorClass { hwaddr ptex, int n); void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); - void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry); + bool (*get_pate)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry); target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp); void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); @@ -1252,10 +1338,13 @@ struct PPCVirtualHypervisorClass { #define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor" DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass, PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR) + +static inline bool vhyp_cpu_in_nested(PowerPCCPU *cpu) +{ + return PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp)->cpu_in_nested(cpu); +} #endif /* CONFIG_USER_ONLY */ -void ppc_cpu_do_interrupt(CPUState *cpu); -bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req); void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); @@ -1267,10 +1356,13 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu); const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name); #endif int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); #ifndef CONFIG_USER_ONLY +void ppc_maybe_interrupt(CPUPPCState *env); +void ppc_cpu_do_interrupt(CPUState *cpu); +bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req); void ppc_cpu_do_system_reset(CPUState *cs); void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector); extern const VMStateDescription vmstate_ppc_cpu; @@ -1278,21 +1370,12 @@ extern const VMStateDescription vmstate_ppc_cpu; /*****************************************************************************/ void ppc_translate_init(void); -/* - * you can call this signal handler from your SIGBUS and SIGSEGV - * signal handlers to inform the virtual CPU of exceptions. non zero - * is returned if the signal was handled by the virtual CPU. - */ -int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc); -bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); #if !defined(CONFIG_USER_ONLY) void ppc_store_sdr1(CPUPPCState *env, target_ulong value); +void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); #endif /* !defined(CONFIG_USER_ONLY) */ void ppc_store_msr(CPUPPCState *env, target_ulong value); -void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); void ppc_cpu_list(void); @@ -1316,20 +1399,27 @@ void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value); void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value); uint64_t cpu_ppc_load_purr(CPUPPCState *env); void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value); -uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env); -uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env); #if !defined(CONFIG_USER_ONLY) -void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value); -void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value); target_ulong load_40x_pit(CPUPPCState *env); void store_40x_pit(CPUPPCState *env, target_ulong val); void store_40x_dbcr0(CPUPPCState *env, uint32_t val); void store_40x_sler(CPUPPCState *env, uint32_t val); +void store_40x_tcr(CPUPPCState *env, target_ulong val); +void store_40x_tsr(CPUPPCState *env, target_ulong val); void store_booke_tcr(CPUPPCState *env, target_ulong val); void store_booke_tsr(CPUPPCState *env, target_ulong val); void ppc_tlb_invalidate_all(CPUPPCState *env); void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr); void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp); +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddrp, target_ulong address, + uint32_t pid); +int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddrp, + target_ulong address, uint32_t pid, int ext, + int i); +hwaddr booke206_tlb_to_page_size(CPUPPCState *env, + ppcmas_tlb_t *tlb); #endif #endif @@ -1362,7 +1452,6 @@ int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); #define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU -#define cpu_signal_handler cpu_ppc_signal_handler #define cpu_list ppc_cpu_list /* MMU modes definitions */ @@ -1393,9 +1482,6 @@ void ppc_compat_add_property(Object *obj, const char *name, uint32_t *compat_pvr, const char *basedesc); #endif /* defined(TARGET_PPC64) */ -typedef CPUPPCState CPUArchState; -typedef PowerPCCPU ArchCPU; - #include "exec/cpu-all.h" /*****************************************************************************/ @@ -1423,27 +1509,18 @@ typedef PowerPCCPU ArchCPU; #define XER_CMP 8 #define XER_BC 0 #define xer_so (env->so) -#define xer_ov (env->ov) -#define xer_ca (env->ca) -#define xer_ov32 (env->ov) -#define xer_ca32 (env->ca) #define xer_cmp ((env->xer >> XER_CMP) & 0xFF) #define xer_bc ((env->xer >> XER_BC) & 0x7F) /* SPR definitions */ #define SPR_MQ (0x000) #define SPR_XER (0x001) -#define SPR_601_VRTCU (0x004) -#define SPR_601_VRTCL (0x005) -#define SPR_601_UDECR (0x006) #define SPR_LR (0x008) #define SPR_CTR (0x009) #define SPR_UAMR (0x00D) #define SPR_DSCR (0x011) #define SPR_DSISR (0x012) -#define SPR_DAR (0x013) /* DAE for PowerPC 601 */ -#define SPR_601_RTCU (0x014) -#define SPR_601_RTCL (0x015) +#define SPR_DAR (0x013) #define SPR_DECR (0x016) #define SPR_SDR1 (0x019) #define SPR_SRR0 (0x01A) @@ -1598,6 +1675,8 @@ typedef PowerPCCPU ArchCPU; #define SPR_BOOKE_GIVOR14 (0x1BD) #define SPR_TIR (0x1BE) #define SPR_PTCR (0x1D0) +#define SPR_HASHKEYR (0x1D4) +#define SPR_HASHPKEYR (0x1D5) #define SPR_BOOKE_SPEFSCR (0x200) #define SPR_Exxx_BBEAR (0x201) #define SPR_Exxx_BBTAR (0x202) @@ -1920,7 +1999,6 @@ typedef PowerPCCPU ArchCPU; #define SPR_HID1 (0x3F1) #define SPR_IABR (0x3F2) #define SPR_40x_DBCR0 (0x3F2) -#define SPR_601_HID2 (0x3F2) #define SPR_Exxx_L1CSR0 (0x3F2) #define SPR_ICTRL (0x3F3) #define SPR_HID2 (0x3F3) @@ -1936,7 +2014,6 @@ typedef PowerPCCPU ArchCPU; #define DABR_MASK (~(target_ulong)0x7) #define SPR_Exxx_BUCSR (0x3F5) #define SPR_40x_IAC2 (0x3F5) -#define SPR_601_HID5 (0x3F5) #define SPR_40x_DAC1 (0x3F6) #define SPR_MSSCR0 (0x3F6) #define SPR_970_HID5 (0x3F6) @@ -1969,7 +2046,6 @@ typedef PowerPCCPU ArchCPU; #define SPR_403_PBL2 (0x3FE) #define SPR_PIR (0x3FF) #define SPR_403_PBU2 (0x3FF) -#define SPR_601_HID15 (0x3FF) #define SPR_604_HID15 (0x3FF) #define SPR_E500_SVR (0x3FF) @@ -2034,15 +2110,6 @@ enum { #define PPC_RES PPC_INSNS_BASE /* spr/msr access instructions */ #define PPC_MISC PPC_INSNS_BASE - /* Deprecated instruction sets */ - /* Original POWER instruction set */ - PPC_POWER = 0x0000000000000002ULL, - /* POWER2 instruction set extension */ - PPC_POWER2 = 0x0000000000000004ULL, - /* Power RTC support */ - PPC_POWER_RTC = 0x0000000000000008ULL, - /* Power-to-PowerPC bridge (601) */ - PPC_POWER_BR = 0x0000000000000010ULL, /* 64 bits PowerPC instruction set */ PPC_64B = 0x0000000000000020ULL, /* New 64 bits extensions (PowerPC 2.0x) */ @@ -2055,8 +2122,6 @@ enum { PPC_MFTB = 0x0000000000000200ULL, /* Fixed-point unit extensions */ - /* PowerPC 602 specific */ - PPC_602_SPEC = 0x0000000000000400ULL, /* isel instruction */ PPC_ISEL = 0x0000000000000800ULL, /* popcntb instruction */ @@ -2115,8 +2180,6 @@ enum { PPC_SEGMENT = 0x0000020000000000ULL, /* PowerPC 6xx TLB management instructions */ PPC_6xx_TLB = 0x0000040000000000ULL, - /* PowerPC 74xx TLB management instructions */ - PPC_74xx_TLB = 0x0000080000000000ULL, /* PowerPC 40x TLB management instructions */ PPC_40x_TLB = 0x0000100000000000ULL, /* segment register access instructions for PowerPC 64 "bridge" */ @@ -2152,15 +2215,12 @@ enum { PPC_DCR = 0x1000000000000000ULL, /* DCR extended accesse */ PPC_DCRX = 0x2000000000000000ULL, - /* user-mode DCR access, implemented in PowerPC 460 */ - PPC_DCRUX = 0x4000000000000000ULL, /* popcntw and popcntd instructions */ PPC_POPCNTWD = 0x8000000000000000ULL, -#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \ - | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \ +#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_64B \ | PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \ - | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \ + | PPC_ISEL | PPC_POPCNTB \ | PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \ | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \ @@ -2173,13 +2233,13 @@ enum { | PPC_CACHE_DCBZ \ | PPC_CACHE_DCBA | PPC_CACHE_LOCK \ | PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \ - | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \ + | PPC_40x_TLB | PPC_SEGMENT_64B \ | PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \ | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \ | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \ | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \ - | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \ - | PPC_POPCNTWD | PPC_CILDST) + | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_POPCNTWD \ + | PPC_CILDST) /* extended type values */ @@ -2225,6 +2285,10 @@ enum { PPC2_ISA300 = 0x0000000000080000ULL, /* POWER ISA 3.1 */ PPC2_ISA310 = 0x0000000000100000ULL, + /* lwsync instruction */ + PPC2_MEM_LWSYNC = 0x0000000000200000ULL, + /* ISA 2.06 BCD assist instructions */ + PPC2_BCDA_ISA206 = 0x0000000000400000ULL, #define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \ @@ -2233,7 +2297,8 @@ enum { PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \ PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \ - PPC2_ISA300 | PPC2_ISA310) + PPC2_ISA300 | PPC2_ISA310 | PPC2_MEM_LWSYNC | \ + PPC2_BCDA_ISA206) }; /*****************************************************************************/ @@ -2354,26 +2419,27 @@ enum { /* Hardware exceptions definitions */ enum { /* External hardware exception sources */ - PPC_INTERRUPT_RESET = 0, /* Reset exception */ - PPC_INTERRUPT_WAKEUP, /* Wakeup exception */ - PPC_INTERRUPT_MCK, /* Machine check exception */ - PPC_INTERRUPT_EXT, /* External interrupt */ - PPC_INTERRUPT_SMI, /* System management interrupt */ - PPC_INTERRUPT_CEXT, /* Critical external interrupt */ - PPC_INTERRUPT_DEBUG, /* External debug exception */ - PPC_INTERRUPT_THERM, /* Thermal exception */ + PPC_INTERRUPT_RESET = 0x00001, /* Reset exception */ + PPC_INTERRUPT_WAKEUP = 0x00002, /* Wakeup exception */ + PPC_INTERRUPT_MCK = 0x00004, /* Machine check exception */ + PPC_INTERRUPT_EXT = 0x00008, /* External interrupt */ + PPC_INTERRUPT_SMI = 0x00010, /* System management interrupt */ + PPC_INTERRUPT_CEXT = 0x00020, /* Critical external interrupt */ + PPC_INTERRUPT_DEBUG = 0x00040, /* External debug exception */ + PPC_INTERRUPT_THERM = 0x00080, /* Thermal exception */ /* Internal hardware exception sources */ - PPC_INTERRUPT_DECR, /* Decrementer exception */ - PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */ - PPC_INTERRUPT_PIT, /* Programmable interval timer interrupt */ - PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */ - PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */ - PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */ - PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */ - PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */ - PPC_INTERRUPT_HMI, /* Hypervisor Maintenance interrupt */ - PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */ - PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */ + PPC_INTERRUPT_DECR = 0x00100, /* Decrementer exception */ + PPC_INTERRUPT_HDECR = 0x00200, /* Hypervisor decrementer exception */ + PPC_INTERRUPT_PIT = 0x00400, /* Programmable interval timer int. */ + PPC_INTERRUPT_FIT = 0x00800, /* Fixed interval timer interrupt */ + PPC_INTERRUPT_WDT = 0x01000, /* Watchdog timer interrupt */ + PPC_INTERRUPT_CDOORBELL = 0x02000, /* Critical doorbell interrupt */ + PPC_INTERRUPT_DOORBELL = 0x04000, /* Doorbell interrupt */ + PPC_INTERRUPT_PERFM = 0x08000, /* Performance monitor interrupt */ + PPC_INTERRUPT_HMI = 0x10000, /* Hypervisor Maintenance interrupt */ + PPC_INTERRUPT_HDOORBELL = 0x20000, /* Hypervisor Doorbell interrupt */ + PPC_INTERRUPT_HVIRT = 0x40000, /* Hypervisor virtualization interrupt */ + PPC_INTERRUPT_EBB = 0x80000, /* Event-based Branch exception */ }; /* Processor Compatibility mask (PCR) */ @@ -2409,7 +2475,7 @@ enum { /*****************************************************************************/ #define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) -target_ulong cpu_read_xer(CPUPPCState *env); +target_ulong cpu_read_xer(const CPUPPCState *env); void cpu_write_xer(CPUPPCState *env, target_ulong xer); /* @@ -2431,13 +2497,18 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, } #endif -void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception); -void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception, - uintptr_t raddr); -void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception, - uint32_t error_code); -void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception, - uint32_t error_code, uintptr_t raddr); +G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception); +G_NORETURN void raise_exception_ra(CPUPPCState *env, uint32_t exception, + uintptr_t raddr); +G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code); +G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, + uint32_t error_code, uintptr_t raddr); + +/* PERFM EBB helper*/ +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +void raise_ebb_perfm_exception(CPUPPCState *env); +#endif #if !defined(CONFIG_USER_ONLY) static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm) @@ -2577,7 +2648,7 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx) } /* Accessors for FP, VMX and VSX registers */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define VsrB(i) u8[i] #define VsrSB(i) s8[i] #define VsrH(i) u16[i] @@ -2586,6 +2657,9 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx) #define VsrSW(i) s32[i] #define VsrD(i) u64[i] #define VsrSD(i) s64[i] +#define VsrHF(i) f16[i] +#define VsrSF(i) f32[i] +#define VsrDF(i) f64[i] #else #define VsrB(i) u8[15 - (i)] #define VsrSB(i) s8[15 - (i)] @@ -2595,6 +2669,9 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx) #define VsrSW(i) s32[3 - (i)] #define VsrD(i) u64[1 - (i)] #define VsrSD(i) s64[1 - (i)] +#define VsrHF(i) f16[7 - (i)] +#define VsrSF(i) f32[3 - (i)] +#define VsrDF(i) f64[1 - (i)] #endif static inline int vsr64_offset(int i, bool high) @@ -2607,6 +2684,11 @@ static inline int vsr_full_offset(int i) return offsetof(CPUPPCState, vsr[i].u64[0]); } +static inline int acc_full_offset(int i) +{ + return vsr_full_offset(i * 4); +} + static inline int fpr_offset(int i) { return vsr64_offset(i, true); @@ -2643,24 +2725,72 @@ static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr) return cpu->env.spr_cb[spr].name != NULL; } -static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu) +#if !defined(CONFIG_USER_ONLY) +static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu, bool hv) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + bool ile; - /* - * Only models that have an LPCR and know about LPCR_ILE can do little - * endian. - */ - if (pcc->lpcr_mask & LPCR_ILE) { - return !!(cpu->env.spr[SPR_LPCR] & LPCR_ILE); + if (hv && env->has_hv_mode) { + if (is_isa300(pcc)) { + ile = !!(env->spr[SPR_HID0] & HID0_POWER9_HILE); + } else { + ile = !!(env->spr[SPR_HID0] & HID0_HILE); + } + + } else if (pcc->lpcr_mask & LPCR_ILE) { + ile = !!(env->spr[SPR_LPCR] & LPCR_ILE); + } else { + ile = FIELD_EX64(env->msr, MSR, ILE); } - return false; + return ile; } +#endif void dump_mmu(CPUPPCState *env); void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len); void ppc_store_vscr(CPUPPCState *env, uint32_t vscr); uint32_t ppc_get_vscr(CPUPPCState *env); + +/*****************************************************************************/ +/* Power management enable checks */ +static inline int check_pow_none(CPUPPCState *env) +{ + return 0; +} + +static inline int check_pow_nocheck(CPUPPCState *env) +{ + return 1; +} + +/*****************************************************************************/ +/* PowerPC implementations definitions */ + +#define POWERPC_FAMILY(_name) \ + static void \ + glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \ + \ + static const TypeInfo \ + glue(glue(ppc_, _name), _cpu_family_type_info) = { \ + .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \ + .parent = TYPE_POWERPC_CPU, \ + .abstract = true, \ + .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \ + }; \ + \ + static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \ + { \ + type_register_static( \ + &glue(glue(ppc_, _name), _cpu_family_type_info)); \ + } \ + \ + type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \ + \ + static void glue(glue(ppc_, _name), _cpu_family_class_init) + + #endif /* PPC_CPU_H */ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 505a0ed6ac..294a18a5b7 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -22,7 +22,6 @@ #include "disas/dis-asm.h" #include "exec/gdbstub.h" #include "kvm_ppc.h" -#include "sysemu/arch_init.h" #include "sysemu/cpus.h" #include "sysemu/hw_accel.h" #include "sysemu/tcg.h" @@ -45,7 +44,12 @@ #include "helper_regs.h" #include "internal.h" -#include "spr_tcg.h" +#include "spr_common.h" +#include "power8-pmu.h" + +#ifndef CONFIG_USER_ONLY +#include "hw/boards.h" +#endif /* #define PPC_DEBUG_SPR */ /* #define USE_APPLE_GDB */ @@ -57,391 +61,7 @@ static inline void vscr_init(CPUPPCState *env, uint32_t val) ppc_store_vscr(env, val); } -/** - * _spr_register - * - * Register an SPR with all the callbacks required for tcg, - * and the ID number for KVM. - * - * The reason for the conditional compilation is that the tcg functions - * may be compiled out, and the system kvm header may not be available - * for supplying the ID numbers. This is ugly, but the best we can do. - */ - -#ifdef CONFIG_TCG -# define USR_ARG(X) X, -# ifdef CONFIG_USER_ONLY -# define SYS_ARG(X) -# else -# define SYS_ARG(X) X, -# endif -#else -# define USR_ARG(X) -# define SYS_ARG(X) -#endif -#ifdef CONFIG_KVM -# define KVM_ARG(X) X, -#else -# define KVM_ARG(X) -#endif - -typedef void spr_callback(DisasContext *, int, int); - -static void _spr_register(CPUPPCState *env, int num, const char *name, - USR_ARG(spr_callback *uea_read) - USR_ARG(spr_callback *uea_write) - SYS_ARG(spr_callback *oea_read) - SYS_ARG(spr_callback *oea_write) - SYS_ARG(spr_callback *hea_read) - SYS_ARG(spr_callback *hea_write) - KVM_ARG(uint64_t one_reg_id) - target_ulong initial_value) -{ - ppc_spr_t *spr = &env->spr_cb[num]; - - /* No SPR should be registered twice. */ - assert(spr->name == NULL); - assert(name != NULL); - - spr->name = name; - spr->default_value = initial_value; - env->spr[num] = initial_value; - -#ifdef CONFIG_TCG - spr->uea_read = uea_read; - spr->uea_write = uea_write; -# ifndef CONFIG_USER_ONLY - spr->oea_read = oea_read; - spr->oea_write = oea_write; - spr->hea_read = hea_read; - spr->hea_write = hea_write; -# endif -#endif -#ifdef CONFIG_KVM - spr->one_reg_id = one_reg_id; -#endif -} - -/* spr_register_kvm_hv passes all required arguments. */ -#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - one_reg_id, initial_value) \ - _spr_register(env, num, name, \ - USR_ARG(uea_read) USR_ARG(uea_write) \ - SYS_ARG(oea_read) SYS_ARG(oea_write) \ - SYS_ARG(hea_read) SYS_ARG(hea_write) \ - KVM_ARG(one_reg_id) initial_value) - -/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */ -#define spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, one_reg_id, ival) \ - spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ - oea_write, oea_read, oea_write, one_reg_id, ival) - -/* spr_register_hv and spr_register are similar, except there is no kvm id. */ -#define spr_register_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, ival) \ - spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ - oea_write, hea_read, hea_write, 0, ival) - -#define spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, ival) \ - spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, 0, ival) - -/* Generic PowerPC SPRs */ -static void register_generic_sprs(CPUPPCState *env) -{ - /* Integer processing */ - spr_register(env, SPR_XER, "XER", - &spr_read_xer, &spr_write_xer, - &spr_read_xer, &spr_write_xer, - 0x00000000); - /* Branch control */ - spr_register(env, SPR_LR, "LR", - &spr_read_lr, &spr_write_lr, - &spr_read_lr, &spr_write_lr, - 0x00000000); - spr_register(env, SPR_CTR, "CTR", - &spr_read_ctr, &spr_write_ctr, - &spr_read_ctr, &spr_write_ctr, - 0x00000000); - /* Interrupt processing */ - spr_register(env, SPR_SRR0, "SRR0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_SRR1, "SRR1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Processor control */ - spr_register(env, SPR_SPRG0, "SPRG0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_SPRG1, "SPRG1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_SPRG2, "SPRG2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_SPRG3, "SPRG3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -/* SPR common to all non-embedded PowerPC, including 601 */ -static void register_ne_601_sprs(CPUPPCState *env) -{ - /* Exception processing */ - spr_register_kvm(env, SPR_DSISR, "DSISR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - KVM_REG_PPC_DSISR, 0x00000000); - spr_register_kvm(env, SPR_DAR, "DAR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - KVM_REG_PPC_DAR, 0x00000000); - /* Timer */ - spr_register(env, SPR_DECR, "DECR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_decr, &spr_write_decr, - 0x00000000); -} - -/* Storage Description Register 1 */ -static void register_sdr1_sprs(CPUPPCState *env) -{ -#ifndef CONFIG_USER_ONLY - if (env->has_hv_mode) { - /* - * SDR1 is a hypervisor resource on CPUs which have a - * hypervisor mode - */ - spr_register_hv(env, SPR_SDR1, "SDR1", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_sdr1, - 0x00000000); - } else { - spr_register(env, SPR_SDR1, "SDR1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_sdr1, - 0x00000000); - } -#endif -} - -/* BATs 0-3 */ -static void register_low_BATs(CPUPPCState *env) -{ -#if !defined(CONFIG_USER_ONLY) - spr_register(env, SPR_IBAT0U, "IBAT0U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatu, - 0x00000000); - spr_register(env, SPR_IBAT0L, "IBAT0L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatl, - 0x00000000); - spr_register(env, SPR_IBAT1U, "IBAT1U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatu, - 0x00000000); - spr_register(env, SPR_IBAT1L, "IBAT1L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatl, - 0x00000000); - spr_register(env, SPR_IBAT2U, "IBAT2U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatu, - 0x00000000); - spr_register(env, SPR_IBAT2L, "IBAT2L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatl, - 0x00000000); - spr_register(env, SPR_IBAT3U, "IBAT3U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatu, - 0x00000000); - spr_register(env, SPR_IBAT3L, "IBAT3L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat, &spr_write_ibatl, - 0x00000000); - spr_register(env, SPR_DBAT0U, "DBAT0U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatu, - 0x00000000); - spr_register(env, SPR_DBAT0L, "DBAT0L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatl, - 0x00000000); - spr_register(env, SPR_DBAT1U, "DBAT1U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatu, - 0x00000000); - spr_register(env, SPR_DBAT1L, "DBAT1L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatl, - 0x00000000); - spr_register(env, SPR_DBAT2U, "DBAT2U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatu, - 0x00000000); - spr_register(env, SPR_DBAT2L, "DBAT2L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatl, - 0x00000000); - spr_register(env, SPR_DBAT3U, "DBAT3U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatu, - 0x00000000); - spr_register(env, SPR_DBAT3L, "DBAT3L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat, &spr_write_dbatl, - 0x00000000); - env->nb_BATs += 4; -#endif -} - -/* BATs 4-7 */ -static void register_high_BATs(CPUPPCState *env) -{ -#if !defined(CONFIG_USER_ONLY) - spr_register(env, SPR_IBAT4U, "IBAT4U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatu_h, - 0x00000000); - spr_register(env, SPR_IBAT4L, "IBAT4L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatl_h, - 0x00000000); - spr_register(env, SPR_IBAT5U, "IBAT5U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatu_h, - 0x00000000); - spr_register(env, SPR_IBAT5L, "IBAT5L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatl_h, - 0x00000000); - spr_register(env, SPR_IBAT6U, "IBAT6U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatu_h, - 0x00000000); - spr_register(env, SPR_IBAT6L, "IBAT6L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatl_h, - 0x00000000); - spr_register(env, SPR_IBAT7U, "IBAT7U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatu_h, - 0x00000000); - spr_register(env, SPR_IBAT7L, "IBAT7L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_ibat_h, &spr_write_ibatl_h, - 0x00000000); - spr_register(env, SPR_DBAT4U, "DBAT4U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatu_h, - 0x00000000); - spr_register(env, SPR_DBAT4L, "DBAT4L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatl_h, - 0x00000000); - spr_register(env, SPR_DBAT5U, "DBAT5U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatu_h, - 0x00000000); - spr_register(env, SPR_DBAT5L, "DBAT5L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatl_h, - 0x00000000); - spr_register(env, SPR_DBAT6U, "DBAT6U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatu_h, - 0x00000000); - spr_register(env, SPR_DBAT6L, "DBAT6L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatl_h, - 0x00000000); - spr_register(env, SPR_DBAT7U, "DBAT7U", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatu_h, - 0x00000000); - spr_register(env, SPR_DBAT7L, "DBAT7L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_dbat_h, &spr_write_dbatl_h, - 0x00000000); - env->nb_BATs += 4; -#endif -} - -/* Generic PowerPC time base */ -static void register_tbl(CPUPPCState *env) -{ - spr_register(env, SPR_VTBL, "TBL", - &spr_read_tbl, SPR_NOACCESS, - &spr_read_tbl, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_TBL, "TBL", - &spr_read_tbl, SPR_NOACCESS, - &spr_read_tbl, &spr_write_tbl, - 0x00000000); - spr_register(env, SPR_VTBU, "TBU", - &spr_read_tbu, SPR_NOACCESS, - &spr_read_tbu, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_TBU, "TBU", - &spr_read_tbu, SPR_NOACCESS, - &spr_read_tbu, &spr_write_tbu, - 0x00000000); -} - -/* Softare table search registers */ -static void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) -{ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = nb_tlbs; - env->nb_ways = nb_ways; - env->id_tlbs = 1; - env->tlb_type = TLB_6XX; - spr_register(env, SPR_DMISS, "DMISS", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_DCMP, "DCMP", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_HASH1, "HASH1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_HASH2, "HASH2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_IMISS, "IMISS", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_ICMP, "ICMP", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_RPA, "RPA", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -#endif -} - -/* SPR common to MPC755 and G2 */ -static void register_G2_755_sprs(CPUPPCState *env) +static void register_745_sprs(CPUPPCState *env) { /* SGPRs */ spr_register(env, SPR_SPRG4, "SPRG4", @@ -460,105 +80,142 @@ static void register_G2_755_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + + /* Hardware implementation registers */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void register_755_sprs(CPUPPCState *env) +{ + /* L2 cache control */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + + spr_register(env, SPR_L2PMCR, "L2PMCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); } /* SPR common to all 7xx PowerPC implementations */ static void register_7xx_sprs(CPUPPCState *env) { /* Breakpoints */ - /* XXX : not implemented */ spr_register_kvm(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABR, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Cache management */ - /* XXX : not implemented */ spr_register(env, SPR_ICTC, "ICTC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Performance monitors */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UMMCR0, "UMMCR0", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UMMCR1, "UMMCR1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC1, "UPMC1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC2, "UPMC2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC3, "UPMC3", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC4, "UPMC4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_USIAR, "USIAR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* External access control */ - /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + + /* Hardware implementation registers */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); } #ifdef TARGET_PPC64 @@ -606,26 +263,6 @@ static void register_iamr_sprs(CPUPPCState *env) } #endif /* TARGET_PPC64 */ -static void register_thrm_sprs(CPUPPCState *env) -{ - /* Thermal management */ - /* XXX : not implemented */ - spr_register(env, SPR_THRM1, "THRM1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_thrm, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_THRM2, "THRM2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_thrm, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_THRM3, "THRM3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_thrm, &spr_write_generic, - 0x00000000); -} - /* SPR specific to PowerPC 604 implementation */ static void register_604_sprs(CPUPPCState *env) { @@ -635,66 +272,133 @@ static void register_604_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_pir, 0x00000000); /* Breakpoints */ - /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register_kvm(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABR, 0x00000000); /* Performance counters */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* External access control */ - /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + + /* Hardware implementation registers */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void register_604e_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_7XX_MMCR1, "MMCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC3, "PMC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC4, "PMC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); } /* SPR specific to PowerPC 603 implementation */ static void register_603_sprs(CPUPPCState *env) { /* External access control */ - /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Breakpoints */ - /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void register_e300_sprs(CPUPPCState *env) +{ + /* hardware implementation registers */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Breakpoints */ + spr_register(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_DABR2, "DABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_IABR2, "IABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_IBCR, "IBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_DBCR, "DBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); } /* SPR specific to PowerPC G2 implementation */ @@ -702,7 +406,6 @@ static void register_G2_sprs(CPUPPCState *env) { /* Memory base address */ /* MBAR */ - /* XXX : not implemented */ spr_register(env, SPR_MBAR, "MBAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -717,197 +420,200 @@ static void register_G2_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Breakpoints */ - /* XXX : not implemented */ spr_register(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_DABR2, "DABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_IABR2, "IABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_IBCR, "IBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_DBCR, "DBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); -} -/* SPR specific to PowerPC 602 implementation */ -static void register_602_sprs(CPUPPCState *env) -{ - /* ESA registers */ - /* XXX : not implemented */ - spr_register(env, SPR_SER, "SER", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_SEBR, "SEBR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_ESASRR, "ESASRR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Floating point status */ - /* XXX : not implemented */ - spr_register(env, SPR_SP, "SP", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_LT, "LT", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Watchdog timer */ - /* XXX : not implemented */ - spr_register(env, SPR_TCR, "TCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Interrupt base */ - spr_register(env, SPR_IBR, "IBR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_IABR, "IABR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -/* SPR specific to PowerPC 601 implementation */ -static void register_601_sprs(CPUPPCState *env) -{ - /* Multiplication/division register */ - /* MQ */ - spr_register(env, SPR_MQ, "MQ", - &spr_read_generic, &spr_write_generic, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* RTC registers */ - spr_register(env, SPR_601_RTCU, "RTCU", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_601_rtcu, - 0x00000000); - spr_register(env, SPR_601_VRTCU, "RTCU", - &spr_read_601_rtcu, SPR_NOACCESS, - &spr_read_601_rtcu, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_601_RTCL, "RTCL", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_601_rtcl, - 0x00000000); - spr_register(env, SPR_601_VRTCL, "RTCL", - &spr_read_601_rtcl, SPR_NOACCESS, - &spr_read_601_rtcl, SPR_NOACCESS, - 0x00000000); - /* Timer */ -#if 0 /* ? */ - spr_register(env, SPR_601_UDECR, "UDECR", - &spr_read_decr, SPR_NOACCESS, - &spr_read_decr, SPR_NOACCESS, - 0x00000000); -#endif /* External access control */ - /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - spr_register(env, SPR_IBAT0U, "IBAT0U", + /* Hardware implementation register */ + spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatu, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT0L, "IBAT0L", + + spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatl, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT1U, "IBAT1U", + + spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatu, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT1L, "IBAT1L", + + /* SGPRs */ + spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatl, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT2U, "IBAT2U", + spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatu, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT2L, "IBAT2L", + spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatl, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT3U, "IBAT3U", + spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatu, + &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_IBAT3L, "IBAT3L", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_601_ubat, &spr_write_601_ubatl, - 0x00000000); - env->nb_BATs = 4; -#endif } static void register_74xx_sprs(CPUPPCState *env) { + /* Breakpoints */ + spr_register_kvm(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DABR, 0x00000000); + + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Cache management */ + spr_register(env, SPR_ICTC, "ICTC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Performance monitors */ + spr_register(env, SPR_7XX_MMCR0, "MMCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_MMCR1, "MMCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC1, "PMC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC2, "PMC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC3, "PMC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_PMC4, "PMC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_7XX_SIAR, "SIAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UMMCR0, "UMMCR0", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UMMCR1, "UMMCR1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UPMC1, "UPMC1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UPMC2, "UPMC2", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UPMC3, "UPMC3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_UPMC4, "UPMC4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + + spr_register(env, SPR_7XX_USIAR, "USIAR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* External access control */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Processor identification */ spr_register(env, SPR_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_74XX_MMCR2, "MMCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_74XX_UMMCR2, "UMMCR2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX: not implemented */ + spr_register(env, SPR_BAMR, "BAMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MSSCR0, "MSSCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ - /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -917,7 +623,7 @@ static void register_74xx_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, @@ -927,78 +633,22 @@ static void register_74xx_sprs(CPUPPCState *env) static void register_l3_ctrl(CPUPPCState *env) { /* L3CR */ - /* XXX : not implemented */ spr_register(env, SPR_L3CR, "L3CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR0 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR0, "L3ITCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3PM */ - /* XXX : not implemented */ spr_register(env, SPR_L3PM, "L3PM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } -static void register_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) -{ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = nb_tlbs; - env->nb_ways = nb_ways; - env->id_tlbs = 1; - env->tlb_type = TLB_6XX; - /* XXX : not implemented */ - spr_register(env, SPR_PTEHI, "PTEHI", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_PTELO, "PTELO", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_TLBMISS, "TLBMISS", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -#endif -} - -static void register_usprg3_sprs(CPUPPCState *env) -{ - spr_register(env, SPR_USPRG3, "USPRG3", - &spr_read_ureg, SPR_NOACCESS, - &spr_read_ureg, SPR_NOACCESS, - 0x00000000); -} - -static void register_usprgh_sprs(CPUPPCState *env) -{ - spr_register(env, SPR_USPRG4, "USPRG4", - &spr_read_ureg, SPR_NOACCESS, - &spr_read_ureg, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_USPRG5, "USPRG5", - &spr_read_ureg, SPR_NOACCESS, - &spr_read_ureg, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_USPRG6, "USPRG6", - &spr_read_ureg, SPR_NOACCESS, - &spr_read_ureg, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_USPRG7, "USPRG7", - &spr_read_ureg, SPR_NOACCESS, - &spr_read_ureg, SPR_NOACCESS, - 0x00000000); -} - /* PowerPC BookE SPR */ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask) { @@ -1051,37 +701,36 @@ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask) &spr_read_generic, &spr_write_generic, 0x00000000); /* Debug */ - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC1, "IAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC2, "IAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DAC1, "DAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DAC2, "DAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR0, "DBCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR1, "DBCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR2, "DBCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1094,7 +743,7 @@ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, @@ -1203,7 +852,6 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, int i; /* TLB assist registers */ - /* XXX : not implemented */ for (i = 0; i < 8; i++) { if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], @@ -1215,14 +863,12 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, } } if (env->nb_pids > 1) { - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, @@ -1238,7 +884,6 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, &spr_read_generic, &spr_write_epsc, 0x00000000); - /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, @@ -1273,131 +918,127 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, break; } #endif - - register_usprgh_sprs(env); } /* SPR specific to PowerPC 440 implementation */ static void register_440_sprs(CPUPPCState *env) { /* Cache control */ - /* XXX : not implemented */ spr_register(env, SPR_440_DNV0, "DNV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DNV1, "DNV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DNV2, "DNV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DNV3, "DNV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DTV0, "DTV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DTV1, "DTV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DTV2, "DTV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DTV3, "DTV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DVLIM, "DVLIM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_INV0, "INV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_INV1, "INV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_INV2, "INV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_INV3, "INV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_ITV0, "ITV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_ITV1, "ITV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_ITV2, "ITV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_ITV3, "ITV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_IVLIM, "IVLIM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Cache debug */ - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_DBDR, "DBDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1416,6 +1057,32 @@ static void register_440_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_BOOKE_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_BOOKE_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); } /* SPR shared between PowerPC 40x implementations */ @@ -1465,11 +1132,11 @@ static void register_40x_sprs(CPUPPCState *env) 0x00000000); spr_register(env, SPR_40x_TCR, "TCR", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_booke_tcr, + &spr_read_generic, &spr_write_40x_tcr, 0x00000000); spr_register(env, SPR_40x_TSR, "TSR", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_booke_tsr, + &spr_read_generic, &spr_write_40x_tsr, 0x00000000); } @@ -1479,30 +1146,29 @@ static void register_405_sprs(CPUPPCState *env) /* MMU */ spr_register(env, SPR_40x_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_40x_pid, 0x00000000); spr_register(env, SPR_4xx_CCR0, "CCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00700000); /* Debug interface */ - /* XXX : not implemented */ spr_register(env, SPR_40x_DBCR0, "DBCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_DBCR1, "DBCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_40x_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, /* Last reset was system reset */ 0x00000300); - /* XXX : not implemented */ + spr_register(env, SPR_40x_DAC1, "DAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1511,17 +1177,17 @@ static void register_405_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_40x_IAC1, "IAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1530,18 +1196,17 @@ static void register_405_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Storage control */ - /* XXX: TODO: not implemented */ spr_register(env, SPR_405_SLER, "SLER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_sler, @@ -1550,7 +1215,7 @@ static void register_405_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_405_SU0R, "SU0R", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1576,68 +1241,8 @@ static void register_405_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, spr_read_generic, &spr_write_generic, 0x00000000); - register_usprgh_sprs(env); -} -/* SPR shared between PowerPC 401 & 403 implementations */ -static void register_401_403_sprs(CPUPPCState *env) -{ - /* Time base */ - spr_register(env, SPR_403_VTBL, "TBL", - &spr_read_tbl, SPR_NOACCESS, - &spr_read_tbl, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_403_TBL, "TBL", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_tbl, - 0x00000000); - spr_register(env, SPR_403_VTBU, "TBU", - &spr_read_tbu, SPR_NOACCESS, - &spr_read_tbu, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_403_TBU, "TBU", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_tbu, - 0x00000000); - /* Debug */ - /* not emulated, as QEMU do not emulate caches */ - spr_register(env, SPR_403_CDBCR, "CDBCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -/* SPR specific to PowerPC 401 implementation */ -static void register_401_sprs(CPUPPCState *env) -{ - /* Debug interface */ - /* XXX : not implemented */ - spr_register(env, SPR_40x_DBCR0, "DBCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_40x_dbcr0, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_DBSR, "DBSR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_clear, - /* Last reset was system reset */ - 0x00000300); - /* XXX : not implemented */ - spr_register(env, SPR_40x_DAC1, "DAC", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_IAC1, "IAC", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Storage control */ - /* XXX: TODO: not implemented */ - spr_register(env, SPR_405_SLER, "SLER", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_40x_sler, - 0x00000000); + /* Bus access control */ /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", SPR_NOACCESS, SPR_NOACCESS, @@ -1650,98 +1255,6 @@ static void register_401_sprs(CPUPPCState *env) 0x00000000); } -static void register_401x2_sprs(CPUPPCState *env) -{ - register_401_sprs(env); - spr_register(env, SPR_40x_PID, "PID", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_40x_ZPR, "ZPR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -/* SPR specific to PowerPC 403 implementation */ -static void register_403_sprs(CPUPPCState *env) -{ - /* Debug interface */ - /* XXX : not implemented */ - spr_register(env, SPR_40x_DBCR0, "DBCR0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_40x_dbcr0, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_DBSR, "DBSR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_clear, - /* Last reset was system reset */ - 0x00000300); - /* XXX : not implemented */ - spr_register(env, SPR_40x_DAC1, "DAC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_DAC2, "DAC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_IAC1, "IAC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_40x_IAC2, "IAC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -static void register_403_real_sprs(CPUPPCState *env) -{ - spr_register(env, SPR_403_PBL1, "PBL1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_403_pbr, &spr_write_403_pbr, - 0x00000000); - spr_register(env, SPR_403_PBU1, "PBU1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_403_pbr, &spr_write_403_pbr, - 0x00000000); - spr_register(env, SPR_403_PBL2, "PBL2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_403_pbr, &spr_write_403_pbr, - 0x00000000); - spr_register(env, SPR_403_PBU2, "PBU2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_403_pbr, &spr_write_403_pbr, - 0x00000000); -} - -static void register_403_mmu_sprs(CPUPPCState *env) -{ - /* MMU */ - spr_register(env, SPR_40x_PID, "PID", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_40x_ZPR, "ZPR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -/* SPR specific to PowerPC compression coprocessor extension */ -static void register_compress_sprs(CPUPPCState *env) -{ - /* XXX : not implemented */ - spr_register(env, SPR_401_SKR, "SKR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} static void register_5xx_8xx_sprs(CPUPPCState *env) { @@ -1759,102 +1272,102 @@ static void register_5xx_8xx_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_EIE, "EIE", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_EID, "EID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_NRI, "NRI", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPA, "CMPA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPB, "CMPB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPC, "CMPC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPD, "CMPD", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_ECR, "ECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_DER, "DER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_COUNTA, "COUNTA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_COUNTB, "COUNTB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPE, "CMPE", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPF, "CMPF", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPG, "CMPG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPH, "CMPH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_LCTRL1, "LCTRL1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_LCTRL2, "LCTRL2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_BAR, "BAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_DPDR, "DPDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_IMMR, "IMMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1863,107 +1376,106 @@ static void register_5xx_8xx_sprs(CPUPPCState *env) static void register_5xx_sprs(CPUPPCState *env) { - /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_RCPU_FPECR, "FPECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -1972,127 +1484,127 @@ static void register_5xx_sprs(CPUPPCState *env) static void register_8xx_sprs(CPUPPCState *env) { - /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_CST, "IC_CST", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_ADR, "IC_ADR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_DAT, "IC_DAT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_CST, "DC_CST", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_ADR, "DC_ADR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_DAT, "DC_DAT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_CTR, "MI_CTR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_AP, "MI_AP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_EPN, "MI_EPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_TWC, "MI_TWC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_RPN, "MI_RPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_CTR, "MD_CTR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_CASID, "MD_CASID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_AP, "MD_AP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_EPN, "MD_EPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TWB, "MD_TWB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TWC, "MD_TWC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_RPN, "MD_RPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TW, "MD_TW", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -2129,26 +1641,6 @@ static void register_8xx_sprs(CPUPPCState *env) /*****************************************************************************/ /* Exception vectors models */ -static void init_excp_4xx_real(CPUPPCState *env) -{ -#if !defined(CONFIG_USER_ONLY) - env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; - env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; - env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; - env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; - env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; - env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; - env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; - env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; - env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; - env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; - env->ivor_mask = 0x0000FFF0UL; - env->ivpr_mask = 0xFFFF0000UL; - /* Hardware reset vector */ - env->hreset_vector = 0xFFFFFFFCUL; -#endif -} - static void init_excp_4xx_softmmu(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) @@ -2181,7 +1673,7 @@ static void init_excp_MPC5xx(CPUPPCState *env) env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; - env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; @@ -2208,7 +1700,7 @@ static void init_excp_MPC8xx(CPUPPCState *env) env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; - env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; @@ -2274,8 +1766,14 @@ static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask) env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; + /* + * These two are the same IVOR as POWERPC_EXCP_VPU and + * POWERPC_EXCP_VPUA. We deal with that when dispatching at + * powerpc_excp(). + */ env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000; env->ivor_mask = 0x0000FFF7UL; env->ivpr_mask = ivpr_mask; @@ -2310,53 +1808,6 @@ static void init_excp_BookE(CPUPPCState *env) #endif } -static void init_excp_601(CPUPPCState *env) -{ -#if !defined(CONFIG_USER_ONLY) - env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; - env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; - env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; - env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; - env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; - env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; - env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; - env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; - env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; - env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00; - env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; - env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000; - /* Hardware reset vector */ - env->hreset_vector = 0x00000100UL; -#endif -} - -static void init_excp_602(CPUPPCState *env) -{ -#if !defined(CONFIG_USER_ONLY) - /* XXX: exception prefix has a special behavior on 602 */ - env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; - env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; - env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; - env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; - env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; - env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; - env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; - env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; - env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; - env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; - env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; - env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; - env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; - env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; - env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; - env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; - env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500; - env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600; - /* Hardware reset vector */ - env->hreset_vector = 0x00000100UL; -#endif -} - static void init_excp_603(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) @@ -2538,9 +1989,6 @@ static void init_excp_7450(CPUPPCState *env) env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; - env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; - env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; - env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600; @@ -2616,6 +2064,10 @@ static void init_excp_POWER8(CPUPPCState *env) env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60; env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80; env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80; + + /* Userland exceptions without vector value in PowerISA v3.1 */ + env->excp_vectors[POWERPC_EXCP_PERFM_EBB] = 0x0; + env->excp_vectors[POWERPC_EXCP_EXTERNAL_EBB] = 0x0; #endif } @@ -2636,18 +2088,6 @@ static void init_excp_POWER10(CPUPPCState *env) #endif -/*****************************************************************************/ -/* Power management enable checks */ -static int check_pow_none(CPUPPCState *env) -{ - return 0; -} - -static int check_pow_nocheck(CPUPPCState *env) -{ - return 1; -} - static int check_pow_hid0(CPUPPCState *env) { if (env->spr[SPR_HID0] & 0x00E00000) { @@ -2666,377 +2106,12 @@ static int check_pow_hid0_74xx(CPUPPCState *env) return 0; } -/*****************************************************************************/ -/* PowerPC implementations definitions */ - -#define POWERPC_FAMILY(_name) \ - static void \ - glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \ - \ - static const TypeInfo \ - glue(glue(ppc_, _name), _cpu_family_type_info) = { \ - .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \ - .parent = TYPE_POWERPC_CPU, \ - .abstract = true, \ - .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \ - }; \ - \ - static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \ - { \ - type_register_static( \ - &glue(glue(ppc_, _name), _cpu_family_type_info)); \ - } \ - \ - type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \ - \ - static void glue(glue(ppc_, _name), _cpu_family_class_init) - -static void init_proc_401(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_401_sprs(env); - init_excp_4xx_real(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(12, 16, 20, 24); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(401)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 401"; - pcc->init_proc = init_proc_401; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | - PPC_WRTEE | PPC_DCR | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << MSR_KEY) | - (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_DE) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_REAL; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | - POWERPC_FLAG_BUS_CLK; -} - -static void init_proc_401x2(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_401x2_sprs(env); - register_compress_sprs(env); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif - init_excp_4xx_softmmu(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(12, 16, 20, 24); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(401x2)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 401x2"; - pcc->init_proc = init_proc_401x2; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | PPC_CACHE_DCBA | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << 20) | - (1ull << MSR_KEY) | - (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_DE) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | - POWERPC_FLAG_BUS_CLK; -} - -static void init_proc_401x3(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_401_sprs(env); - register_401x2_sprs(env); - register_compress_sprs(env); - init_excp_4xx_softmmu(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(12, 16, 20, 24); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(401x3)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 401x3"; - pcc->init_proc = init_proc_401x3; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | PPC_CACHE_DCBA | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << 20) | - (1ull << MSR_KEY) | - (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_DWE) | - (1ull << MSR_DE) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | - POWERPC_FLAG_BUS_CLK; -} - -static void init_proc_IOP480(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_401x2_sprs(env); - register_compress_sprs(env); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif - init_excp_4xx_softmmu(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(8, 12, 16, 20); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(IOP480)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "IOP480"; - pcc->init_proc = init_proc_IOP480; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | PPC_CACHE_DCBA | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << 20) | - (1ull << MSR_KEY) | - (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_DE) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | - POWERPC_FLAG_BUS_CLK; -} - -static void init_proc_403(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_403_sprs(env); - register_403_real_sprs(env); - init_excp_4xx_real(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(8, 12, 16, 20); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(403)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 403"; - pcc->init_proc = init_proc_403; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_PE) | - (1ull << MSR_PX) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_REAL; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | - POWERPC_FLAG_BUS_CLK; -} - -static void init_proc_403GCX(CPUPPCState *env) -{ - register_40x_sprs(env); - register_401_403_sprs(env); - register_403_sprs(env); - register_403_real_sprs(env); - register_403_mmu_sprs(env); - /* Bus access control */ - /* not emulated, as QEMU never does speculative access */ - spr_register(env, SPR_40x_SGR, "SGR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0xFFFFFFFF); - /* not emulated, as QEMU do not emulate caches */ - spr_register(env, SPR_40x_DCWR, "DCWR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif - init_excp_4xx_softmmu(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc40x_irq_init(env_archcpu(env)); - - SET_FIT_PERIOD(8, 12, 16, 20); - SET_WDT_PERIOD(16, 20, 24, 28); -} - -POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 403 GCX"; - pcc->init_proc = init_proc_403GCX; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | - PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | - PPC_4xx_COMMON | PPC_40x_EXCP; - pcc->msr_mask = (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_ME) | - (1ull << MSR_PE) | - (1ull << MSR_PX) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; - pcc->excp_model = POWERPC_EXCP_40x; - pcc->bus_model = PPC_FLAGS_INPUT_401; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | - POWERPC_FLAG_BUS_CLK; -} - static void init_proc_405(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_40x_sprs(env); register_405_sprs(env); - /* Bus access control */ - /* not emulated, as QEMU never does speculative access */ - spr_register(env, SPR_40x_SGR, "SGR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0xFFFFFFFF); - /* not emulated, as QEMU do not emulate caches */ - spr_register(env, SPR_40x_DCWR, "DCWR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + register_usprgh_sprs(env); + /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; @@ -3069,11 +2144,12 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data) PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP; - pcc->msr_mask = (1ull << MSR_POW) | + pcc->msr_mask = (1ull << MSR_WE) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | + (1ull << MSR_ME) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_IR) | @@ -3088,37 +2164,10 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data) static void init_proc_440EP(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_BookE_sprs(env, 0x000000000000FFFFULL); register_440_sprs(env); register_usprgh_sprs(env); - /* Processor identification */ - spr_register(env, SPR_BOOKE_PIR, "PIR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_pir, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC3, "IAC3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC4, "IAC4", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC1, "DVC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC2, "DVC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3131,7 +2180,7 @@ static void init_proc_440EP(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_CCR1, "CCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3230,36 +2279,10 @@ POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data) static void init_proc_440GP(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_BookE_sprs(env, 0x000000000000FFFFULL); register_440_sprs(env); register_usprgh_sprs(env); - /* Processor identification */ - spr_register(env, SPR_BOOKE_PIR, "PIR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_pir, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC3, "IAC3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC4, "IAC4", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC1, "DVC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC2, "DVC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; @@ -3311,122 +2334,12 @@ POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data) POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } -static void init_proc_440x4(CPUPPCState *env) -{ - /* Time base */ - register_tbl(env); - register_BookE_sprs(env, 0x000000000000FFFFULL); - register_440_sprs(env); - register_usprgh_sprs(env); - /* Processor identification */ - spr_register(env, SPR_BOOKE_PIR, "PIR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_pir, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC3, "IAC3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC4, "IAC4", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC1, "DVC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC2, "DVC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif - init_excp_BookE(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* XXX: TODO: allocate internal IRQ controller */ - - SET_FIT_PERIOD(12, 16, 20, 24); - SET_WDT_PERIOD(20, 24, 28, 32); -} - -POWERPC_FAMILY(440x4)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 440x4"; - pcc->init_proc = init_proc_440x4; - pcc->check_pow = check_pow_nocheck; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | - PPC_DCR | PPC_WRTEE | - PPC_CACHE | PPC_CACHE_ICBI | - PPC_CACHE_DCBZ | PPC_CACHE_DCBA | - PPC_MEM_TLBSYNC | PPC_MFTB | - PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | - PPC_440_SPEC; - pcc->msr_mask = (1ull << MSR_POW) | - (1ull << MSR_CE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_DWE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_IR) | - (1ull << MSR_DR); - pcc->mmu_model = POWERPC_MMU_BOOKE; - pcc->excp_model = POWERPC_EXCP_BOOKE; - pcc->bus_model = PPC_FLAGS_INPUT_BookE; - pcc->bfd_mach = bfd_mach_ppc_403; - pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | - POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; -} - static void init_proc_440x5(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_BookE_sprs(env, 0x000000000000FFFFULL); register_440_sprs(env); register_usprgh_sprs(env); - /* Processor identification */ - spr_register(env, SPR_BOOKE_PIR, "PIR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_pir, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC3, "IAC3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_IAC4, "IAC4", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC1, "DVC1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_DVC2, "DVC2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3439,7 +2352,7 @@ static void init_proc_440x5(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_440_CCR1, "CCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3535,8 +2448,6 @@ POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data) static void init_proc_MPC5xx(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_5xx_8xx_sprs(env); register_5xx_sprs(env); init_excp_MPC5xx(env); @@ -3570,7 +2481,7 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_REAL; - pcc->excp_model = POWERPC_EXCP_603; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_RCPU; pcc->bfd_mach = bfd_mach_ppc_505; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -3579,8 +2490,6 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data) static void init_proc_MPC8xx(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_5xx_8xx_sprs(env); register_8xx_sprs(env); init_excp_MPC8xx(env); @@ -3613,7 +2522,7 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_MPC8xx; - pcc->excp_model = POWERPC_EXCP_603; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_RCPU; pcc->bfd_mach = bfd_mach_ppc_860; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -3624,34 +2533,10 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) static void init_proc_G2(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_G2_755_sprs(env); register_G2_sprs(env); - /* Time base */ - register_tbl(env); - /* External access control */ - /* XXX : not implemented */ - spr_register(env, SPR_EAR, "EAR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Hardware implementation register */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); register_high_BATs(env); @@ -3694,62 +2579,20 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) (1ull << MSR_DR) | (1ull << MSR_RI); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_G2; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_ec603e; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } -static void init_proc_G2LE(CPUPPCState *env) -{ - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_G2_755_sprs(env); - register_G2_sprs(env); - /* Time base */ - register_tbl(env); - /* External access control */ - /* XXX : not implemented */ - spr_register(env, SPR_EAR, "EAR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Hardware implementation register */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - - /* Memory management */ - register_low_BATs(env); - register_high_BATs(env); - register_6xx_7xx_soft_tlb(env, 64, 2); - init_excp_G2(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); -} - POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->desc = "PowerPC G2LE"; - pcc->init_proc = init_proc_G2LE; + pcc->init_proc = init_proc_G2; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | @@ -3776,7 +2619,7 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_G2; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_ec603e; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | @@ -3785,87 +2628,86 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) static void init_proc_e200(CPUPPCState *env) { - /* Time base */ - register_tbl(env); register_BookE_sprs(env, 0x000000070000FFFFULL); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", &spr_read_spefscr, &spr_write_spefscr, &spr_read_spefscr, &spr_write_spefscr, 0x00000000); /* Memory management */ register_BookE206_sprs(env, 0x0000005D, NULL, 0); - /* XXX : not implemented */ + register_usprgh_sprs(env); + spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BUCSR, "BUCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_CTXCR, "CTXCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_DBCNT, "DBCNT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_DBCR3, "DBCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MMUCSR0, "MMUCSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3938,106 +2780,6 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data) POWERPC_FLAG_BUS_CLK; } -static void init_proc_e300(CPUPPCState *env) -{ - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_603_sprs(env); - /* Time base */ - register_tbl(env); - /* hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Breakpoints */ - /* XXX : not implemented */ - spr_register(env, SPR_DABR, "DABR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_DABR2, "DABR2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_IABR2, "IABR2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_IBCR, "IBCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_DBCR, "DBCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - register_low_BATs(env); - register_high_BATs(env); - register_6xx_7xx_soft_tlb(env, 64, 2); - init_excp_603(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); -} - -POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "e300 core"; - pcc->init_proc = init_proc_e300; - pcc->check_pow = check_pow_hid0; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_STFIWX | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | - PPC_SEGMENT | PPC_EXTERN; - pcc->msr_mask = (1ull << MSR_POW) | - (1ull << MSR_TGPR) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_AL) | - (1ull << MSR_EP) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_603; - pcc->bus_model = PPC_FLAGS_INPUT_6xx; - pcc->bfd_mach = bfd_mach_ppc_603; - pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; -} - enum fsl_e500_version { fsl_e500v1, fsl_e500v2, @@ -4060,8 +2802,6 @@ static void init_proc_e500(CPUPPCState *env, int version) int i; #endif - /* Time base */ - register_tbl(env); /* * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't * complain when accessing them. @@ -4082,13 +2822,18 @@ static void init_proc_e500(CPUPPCState *env, int version) break; } register_BookE_sprs(env, ivor_mask); - register_usprg3_sprs(env); + + spr_register(env, SPR_USPRG3, "USPRG3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", &spr_read_spefscr, &spr_write_spefscr, &spr_read_spefscr, &spr_write_spefscr, @@ -4148,47 +2893,48 @@ static void init_proc_e500(CPUPPCState *env, int version) env->spr[SPR_PVR]); } register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg); - /* XXX : not implemented */ + register_usprgh_sprs(env); + spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BBEAR, "BBEAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BBTAR, "BBTAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_MCAR, "MCAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_NPIDR, "NPIDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BUCSR, "BUCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, @@ -4500,207 +3246,12 @@ POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data) #endif /* Non-embedded PowerPC */ - -#define POWERPC_MSRR_601 (0x0000000000001040ULL) - -static void init_proc_601(CPUPPCState *env) -{ - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_601_sprs(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_hid0_601, - 0x80010080); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_601_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_601_HID5, "HID5", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - init_excp_601(env); - /* - * XXX: beware that dcache line size is 64 - * but dcbz uses 32 bytes "sectors" - * XXX: this breaks clcs instruction ! - */ - env->dcache_line_size = 32; - env->icache_line_size = 64; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); -} - -POWERPC_FAMILY(601)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 601"; - pcc->init_proc = init_proc_601; - pcc->check_pow = check_pow_none; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | - PPC_FLOAT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | - PPC_SEGMENT | PPC_EXTERN; - pcc->msr_mask = (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_FE1) | - (1ull << MSR_EP) | - (1ull << MSR_IR) | - (1ull << MSR_DR); - pcc->mmu_model = POWERPC_MMU_601; - pcc->excp_model = POWERPC_EXCP_601; - pcc->bus_model = PPC_FLAGS_INPUT_6xx; - pcc->bfd_mach = bfd_mach_ppc_601; - pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE; -} - -#define POWERPC_MSRR_601v (0x0000000000001040ULL) - -static void init_proc_601v(CPUPPCState *env) -{ - init_proc_601(env); - /* XXX : not implemented */ - spr_register(env, SPR_601_HID15, "HID15", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); -} - -POWERPC_FAMILY(601v)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 601v"; - pcc->init_proc = init_proc_601v; - pcc->check_pow = check_pow_none; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | - PPC_FLOAT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | - PPC_SEGMENT | PPC_EXTERN; - pcc->msr_mask = (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_FE1) | - (1ull << MSR_EP) | - (1ull << MSR_IR) | - (1ull << MSR_DR); - pcc->mmu_model = POWERPC_MMU_601; - pcc->bus_model = PPC_FLAGS_INPUT_6xx; - pcc->bfd_mach = bfd_mach_ppc_601; - pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE; -} - -static void init_proc_602(CPUPPCState *env) -{ - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_602_sprs(env); - /* Time base */ - register_tbl(env); - /* hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - register_low_BATs(env); - register_6xx_7xx_soft_tlb(env, 64, 2); - init_excp_602(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); -} - -POWERPC_FAMILY(602)(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - - dc->desc = "PowerPC 602"; - pcc->init_proc = init_proc_602; - pcc->check_pow = check_pow_hid0; - pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC | - PPC_SEGMENT | PPC_602_SPEC; - pcc->msr_mask = (1ull << MSR_VSX) | - (1ull << MSR_SA) | - (1ull << MSR_POW) | - (1ull << MSR_TGPR) | - (1ull << MSR_ILE) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_EP) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - /* XXX: 602 MMU is quite specific. Should add a special case */ - pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_602; - pcc->bus_model = PPC_FLAGS_INPUT_6xx; - pcc->bfd_mach = bfd_mach_ppc_602; - pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; -} - static void init_proc_603(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_603_sprs(env); - /* Time base */ - register_tbl(env); - /* hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); register_6xx_7xx_soft_tlb(env, 64, 2); @@ -4743,48 +3294,20 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_603; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_603; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } -static void init_proc_603E(CPUPPCState *env) -{ - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_603_sprs(env); - /* Time base */ - register_tbl(env); - /* hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - register_low_BATs(env); - register_6xx_7xx_soft_tlb(env, 64, 2); - init_excp_603(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); -} - POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->desc = "PowerPC 603e"; - pcc->init_proc = init_proc_603E; + pcc->init_proc = init_proc_603; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | @@ -4810,26 +3333,65 @@ POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_603E; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_ec603e; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } +static void init_proc_e300(CPUPPCState *env) +{ + init_proc_603(env); + register_e300_sprs(env); +} + +POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->desc = "e300 core"; + pcc->init_proc = init_proc_e300; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_AL) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_6xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_603; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + static void init_proc_604(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_604_sprs(env); - /* Time base */ - register_tbl(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); init_excp_604(env); @@ -4871,7 +3433,7 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_604; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_604; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -4880,44 +3442,8 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data) static void init_proc_604E(CPUPPCState *env) { - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_604_sprs(env); - /* XXX : not implemented */ - spr_register(env, SPR_7XX_MMCR1, "MMCR1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_7XX_PMC3, "PMC3", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_7XX_PMC4, "PMC4", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Time base */ - register_tbl(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - register_low_BATs(env); - init_excp_604(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); + init_proc_604(env); + register_604e_sprs(env); } POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) @@ -4952,7 +3478,7 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_604; + pcc->excp_model = POWERPC_EXCP_6xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_604; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -4961,24 +3487,12 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) static void init_proc_740(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* Time base */ - register_tbl(env); /* Thermal management */ register_thrm_sprs(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); init_excp_7x0(env); @@ -5020,7 +3534,7 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5029,29 +3543,17 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data) static void init_proc_750(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Time base */ - register_tbl(env); /* Thermal management */ register_thrm_sprs(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); /* @@ -5097,7 +3599,7 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5106,16 +3608,14 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data) static void init_proc_750cl(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Time base */ - register_tbl(env); /* Thermal management */ /* Those registers are fake on 750CL */ spr_register(env, SPR_THRM1, "THRM1", @@ -5130,7 +3630,7 @@ static void init_proc_750cl(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX: not implemented */ + spr_register(env, SPR_750_TDCL, "TDCL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5140,7 +3640,6 @@ static void init_proc_750cl(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* DMA */ - /* XXX : not implemented */ spr_register(env, SPR_750_WPAR, "WPAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5154,63 +3653,51 @@ static void init_proc_750cl(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ spr_register(env, SPR_750CL_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750CL_HID4, "HID4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Quantization registers */ - /* XXX : not implemented */ spr_register(env, SPR_750_GQR0, "GQR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR1, "GQR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR2, "GQR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR3, "GQR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR4, "GQR4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR5, "GQR5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR6, "GQR6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_750_GQR7, "GQR7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5297,7 +3784,7 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5306,34 +3793,22 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) static void init_proc_750cx(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Time base */ - register_tbl(env); /* Thermal management */ register_thrm_sprs(env); - /* This register is not implemented but is present for compatibility */ + spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); /* PowerPC 750cx has 8 DBATs and 8 IBATs */ @@ -5377,7 +3852,7 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5386,35 +3861,22 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) static void init_proc_750fx(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Time base */ - register_tbl(env); /* Thermal management */ register_thrm_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ spr_register(env, SPR_750FX_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5462,7 +3924,7 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5471,35 +3933,22 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) static void init_proc_750gx(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - /* XXX : not implemented (XXX: different from 750fx) */ + spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Time base */ - register_tbl(env); /* Thermal management */ register_thrm_sprs(env); - /* XXX : not implemented */ + spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ - /* XXX : not implemented (XXX: different from 750fx) */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_750FX_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5547,7 +3996,7 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; - pcc->excp_model = POWERPC_EXCP_7x0; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5556,30 +4005,13 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) static void init_proc_745(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); register_7xx_sprs(env); - register_G2_755_sprs(env); - /* Time base */ - register_tbl(env); + register_745_sprs(env); /* Thermal management */ register_thrm_sprs(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); + /* Memory management */ register_low_BATs(env); register_high_BATs(env); @@ -5623,7 +4055,7 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_7x5; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5632,50 +4064,8 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data) static void init_proc_755(CPUPPCState *env) { - register_ne_601_sprs(env); - register_sdr1_sprs(env); - register_7xx_sprs(env); - register_G2_755_sprs(env); - /* Time base */ - register_tbl(env); - /* L2 cache control */ - /* XXX : not implemented */ - spr_register(env, SPR_L2CR, "L2CR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, spr_access_nop, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_L2PMCR, "L2PMCR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Thermal management */ - register_thrm_sprs(env); - /* Hardware implementation registers */ - /* XXX : not implemented */ - spr_register(env, SPR_HID0, "HID0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID1, "HID1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_HID2, "HID2", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* Memory management */ - register_low_BATs(env); - register_high_BATs(env); - register_6xx_7xx_soft_tlb(env, 64, 2); - init_excp_7x5(env); - env->dcache_line_size = 32; - env->icache_line_size = 32; - /* Allocate hardware IRQ controller */ - ppc6xx_irq_init(env_archcpu(env)); + init_proc_745(env); + register_755_sprs(env); } POWERPC_FAMILY(755)(ObjectClass *oc, void *data) @@ -5710,7 +4100,7 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data) (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; - pcc->excp_model = POWERPC_EXCP_7x5; + pcc->excp_model = POWERPC_EXCP_7xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_750; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | @@ -5719,21 +4109,16 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data) static void init_proc_7400(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); - /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX: this seems not implemented on all revisions. */ - /* XXX : not implemented */ + spr_register(env, SPR_MSSCR1, "MSSCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5796,15 +4181,11 @@ POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) static void init_proc_7410(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); - /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, @@ -5812,13 +4193,13 @@ static void init_proc_7410(CPUPPCState *env) /* Thermal management */ register_thrm_sprs(env); /* L2PMCR */ - /* XXX : not implemented */ + spr_register(env, SPR_L2PMCR, "L2PMCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* LDSTDB */ - /* XXX : not implemented */ + spr_register(env, SPR_LDSTDB, "LDSTDB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -5879,61 +4260,52 @@ POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) static void init_proc_7440(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); - /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* LDSTCR */ - /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ - /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ - /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ register_low_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5957,7 +4329,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | @@ -5977,7 +4349,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->mmu_model = POWERPC_MMU_32B; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_7400; @@ -5988,87 +4360,74 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) static void init_proc_7450(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); /* Level 3 cache control */ register_l3_ctrl(env); /* L3ITCR1 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR2 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR2, "L3ITCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR3 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR3, "L3ITCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3OHCR */ - /* XXX : not implemented */ spr_register(env, SPR_L3OHCR, "L3OHCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* LDSTCR */ - /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ - /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ - /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ register_low_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6092,7 +4451,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | @@ -6112,7 +4471,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->mmu_model = POWERPC_MMU_32B; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_7400; @@ -6123,49 +4482,41 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) static void init_proc_7445(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); /* LDSTCR */ - /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ - /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ - /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, @@ -6206,7 +4557,6 @@ static void init_proc_7445(CPUPPCState *env) /* Memory management */ register_low_BATs(env); register_high_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6230,7 +4580,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | @@ -6250,7 +4600,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->mmu_model = POWERPC_MMU_32B; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_7400; @@ -6261,51 +4611,43 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) static void init_proc_7455(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); /* Level 3 cache control */ register_l3_ctrl(env); /* LDSTCR */ - /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ - /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ - /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, @@ -6346,7 +4688,6 @@ static void init_proc_7455(CPUPPCState *env) /* Memory management */ register_low_BATs(env); register_high_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6370,7 +4711,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | @@ -6390,7 +4731,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->mmu_model = POWERPC_MMU_32B; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_7400; @@ -6401,75 +4742,63 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) static void init_proc_7457(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); /* Level 3 cache control */ register_l3_ctrl(env); /* L3ITCR1 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR2 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR2, "L3ITCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR3 */ - /* XXX : not implemented */ spr_register(env, SPR_L3ITCR3, "L3ITCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3OHCR */ - /* XXX : not implemented */ spr_register(env, SPR_L3OHCR, "L3OHCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* LDSTCR */ - /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ - /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ - /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ - /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, @@ -6510,7 +4839,6 @@ static void init_proc_7457(CPUPPCState *env) /* Memory management */ register_low_BATs(env); register_high_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6534,7 +4862,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | @@ -6554,7 +4882,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->mmu_model = POWERPC_MMU_32B; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_7400; @@ -6565,50 +4893,46 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) static void init_proc_e600(CPUPPCState *env) { - register_ne_601_sprs(env); + register_non_embedded_sprs(env); register_sdr1_sprs(env); - register_7xx_sprs(env); - /* Time base */ - register_tbl(env); - /* 74xx specific SPR */ register_74xx_sprs(env); vscr_init(env, 0x00010000); - /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, @@ -6649,7 +4973,6 @@ static void init_proc_e600(CPUPPCState *env) /* Memory management */ register_low_BATs(env); register_high_BATs(env); - register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6673,7 +4996,7 @@ POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->insns_flags2 = PPC_NONE; @@ -6722,7 +5045,6 @@ static int check_pow_970(CPUPPCState *env) static void register_970_hid_sprs(CPUPPCState *env) { /* Hardware implementation registers */ - /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, @@ -6749,7 +5071,7 @@ static void register_book3s_ctrl_sprs(CPUPPCState *env) { spr_register(env, SPR_CTRL, "SPR_CTRL", SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_generic, + SPR_NOACCESS, &spr_write_CTRL, 0x00000000); spr_register(env, SPR_UCTRL, "SPR_UCTRL", &spr_read_ureg, SPR_NOACCESS, @@ -6821,11 +5143,11 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - KVM_REG_PPC_MMCR0, 0x00000000); + &spr_read_generic, &spr_write_MMCR0, + KVM_REG_PPC_MMCR0, 0x80000000); spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_MMCR1, KVM_REG_PPC_MMCR1, 0x00000000); spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA", SPR_NOACCESS, SPR_NOACCESS, @@ -6833,27 +5155,27 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env) KVM_REG_PPC_MMCRA, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC1, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC2, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC3, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC4, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC5, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_PMC, &spr_write_PMC, KVM_REG_PPC_PMC6, 0x00000000); spr_register_kvm(env, SPR_POWER_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, @@ -6868,9 +5190,9 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env) static void register_book3s_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR0, "UMMCR0", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_MMCR0_ureg, &spr_write_MMCR0_ureg, &spr_read_ureg, &spr_write_ureg, - 0x00000000); + 0x80000000); spr_register(env, SPR_POWER_UMMCR1, "UMMCR1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, @@ -6880,27 +5202,27 @@ static void register_book3s_pmu_user_sprs(CPUPPCState *env) &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC1, "UPMC1", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC2, "UPMC2", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC3, "UPMC3", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC4, "UPMC4", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC5, "UPMC5", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC56_ureg, &spr_write_PMC56_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC6, "UPMC6", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC56_ureg, &spr_write_PMC56_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIAR, "USIAR", @@ -6976,7 +5298,7 @@ static void register_power8_pmu_sup_sprs(CPUPPCState *env) static void register_power8_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR2, "UMMCR2", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_MMCR2_ureg, &spr_write_MMCR2_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIER, "USIER", @@ -7378,11 +5700,48 @@ static void register_power9_mmu_sprs(CPUPPCState *env) #endif } +static void register_power10_hash_sprs(CPUPPCState *env) +{ + /* + * it's the OS responsability to generate a random value for the registers + * in each process' context. So, initialize it with 0 here. + */ + uint64_t hashkeyr_initial_value = 0, hashpkeyr_initial_value = 0; +#if defined(CONFIG_USER_ONLY) + /* in linux-user, setup the hash register with a random value */ + GRand *rand = g_rand_new(); + hashkeyr_initial_value = + ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand); + hashpkeyr_initial_value = + ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand); + g_rand_free(rand); +#endif + spr_register(env, SPR_HASHKEYR, "HASHKEYR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + hashkeyr_initial_value); + spr_register_hv(env, SPR_HASHPKEYR, "HASHPKEYR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + hashpkeyr_initial_value); +} + +/* + * Initialize PMU counter overflow timers for Power8 and + * newer Power chips when using TCG. + */ +static void init_tcg_pmu_power8(CPUPPCState *env) +{ + /* Init PMU overflow timers */ + if (tcg_enabled()) { + cpu_ppc_pmu_init(env); + } +} + static void init_proc_book3s_common(CPUPPCState *env) { - register_ne_601_sprs(env); - register_tbl(env); - register_usprg3_sprs(env); + register_non_embedded_sprs(env); register_book3s_altivec_sprs(env); register_book3s_pmu_sup_sprs(env); register_book3s_pmu_user_sprs(env); @@ -7392,6 +5751,11 @@ static void init_proc_book3s_common(CPUPPCState *env) * value is the one used by 74xx processors. */ vscr_init(env, 0x00010000); + + spr_register(env, SPR_USPRG3, "USPRG3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); } static void init_proc_970(CPUPPCState *env) @@ -7436,7 +5800,7 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI; - pcc->insns_flags2 = PPC2_FP_CVT_S64; + pcc->insns_flags2 = PPC2_FP_CVT_S64 | PPC2_MEM_LWSYNC; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_POW) | @@ -7506,12 +5870,14 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | + PPC_POPCNTB | PPC_SEGMENT_64B | PPC_SLBI; - pcc->insns_flags2 = PPC2_FP_CVT_S64; + pcc->insns_flags2 = PPC2_FP_CVT_S64 | PPC2_MEM_LWSYNC; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_POW) | @@ -7573,56 +5939,31 @@ static void init_proc_POWER7(CPUPPCState *env) ppcPOWER7_irq_init(env_archcpu(env)); } -static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr) +static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr, bool best) { - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) { - return true; - } - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) { - return true; - } - return false; -} + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; -static bool cpu_has_work_POWER7(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + if (!best) { + if (base == CPU_POWERPC_POWER7_BASE) { + return true; + } + if (base == CPU_POWERPC_POWER7P_BASE) { + return true; + } + } - if (cs->halted) { - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { - return false; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { - return true; - } - if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { - return true; - } + if (base != pcc_base) { return false; - } else { - return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } + + return true; } POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER7"; dc->desc = "POWER7"; @@ -7631,7 +5972,6 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER7; pcc->check_pow = check_pow_nocheck; - cc->has_work = cpu_has_work_POWER7; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -7649,7 +5989,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 | - PPC2_PM_ISA206; + PPC2_PM_ISA206 | PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_VSX) | @@ -7695,6 +6035,9 @@ static void init_proc_POWER8(CPUPPCState *env) register_sdr1_sprs(env); register_book3s_207_dbg_sprs(env); + /* Common TCG PMU */ + init_tcg_pmu_power8(env); + /* POWER8 Specific Registers */ register_book3s_ids_sprs(env); register_rmor_sprs(env); @@ -7730,67 +6073,33 @@ static void init_proc_POWER8(CPUPPCState *env) ppcPOWER7_irq_init(env_archcpu(env)); } -static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr) +static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr, bool best) { - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) { - return true; - } - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) { - return true; - } - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) { - return true; - } - return false; -} + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; -static bool cpu_has_work_POWER8(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - - if (cs->halted) { - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { - return false; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { + if (!best) { + if (base == CPU_POWERPC_POWER8_BASE) { return true; } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { + if (base == CPU_POWERPC_POWER8E_BASE) { return true; } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { - return true; - } - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { - return true; - } - if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + if (base == CPU_POWERPC_POWER8NVL_BASE) { return true; } + } + if (base != pcc_base) { return false; - } else { - return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } + + return true; } POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER8"; dc->desc = "POWER8"; @@ -7799,7 +6108,6 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER8; pcc->check_pow = check_pow_nocheck; - cc->has_work = cpu_has_work_POWER8; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -7819,7 +6127,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_PM_ISA206; + PPC2_TM | PPC2_PM_ISA206 | PPC2_MEM_LWSYNC | + PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -7889,6 +6198,9 @@ static void init_proc_POWER9(CPUPPCState *env) init_proc_book3s_common(env); register_book3s_207_dbg_sprs(env); + /* Common TCG PMU */ + init_tcg_pmu_power8(env); + /* POWER8 Specific Registers */ register_book3s_ids_sprs(env); register_amr_sprs(env); @@ -7934,77 +6246,33 @@ static void init_proc_POWER9(CPUPPCState *env) ppcPOWER9_irq_init(env_archcpu(env)); } -static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) +static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best) { - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) { + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; + + if (!best) { + if (base == CPU_POWERPC_POWER9_BASE) { + return true; + } + } + + if (base != pcc_base) { + return false; + } + + if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { + /* Major DD version matches to power9_v1.0 and power9_v2.0 */ return true; } + return false; } -static bool cpu_has_work_POWER9(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - - if (cs->halted) { - uint64_t psscr = env->spr[SPR_PSSCR]; - - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { - return false; - } - - /* If EC is clear, just return true on any pending interrupt */ - if (!(psscr & PSSCR_EC)) { - return true; - } - /* External Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && - (env->spr[SPR_LPCR] & LPCR_EEE)) { - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); - if (heic == 0 || !msr_hv || msr_pr) { - return true; - } - } - /* Decrementer Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && - (env->spr[SPR_LPCR] & LPCR_DEE)) { - return true; - } - /* Machine Check or Hypervisor Maintenance Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | - 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { - return true; - } - /* Privileged Doorbell Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_PDEE)) { - return true; - } - /* Hypervisor Doorbell Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_HDEE)) { - return true; - } - /* Hypervisor virtualization exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && - (env->spr[SPR_LPCR] & LPCR_HVEE)) { - return true; - } - if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { - return true; - } - return false; - } else { - return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); - } -} - POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER9"; dc->desc = "POWER9"; @@ -8014,7 +6282,6 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; - cc->has_work = cpu_has_work_POWER9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -8023,7 +6290,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBSYNC | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -8034,7 +6301,8 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL; + PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC | + PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -8105,6 +6373,9 @@ static void init_proc_POWER10(CPUPPCState *env) init_proc_book3s_common(env); register_book3s_207_dbg_sprs(env); + /* Common TCG PMU */ + init_tcg_pmu_power8(env); + /* POWER8 Specific Registers */ register_book3s_ids_sprs(env); register_amr_sprs(env); @@ -8113,6 +6384,7 @@ static void init_proc_POWER10(CPUPPCState *env) register_power5p_common_sprs(env); register_power5p_lpar_sprs(env); register_power5p_ear_sprs(env); + register_power5p_tb_sprs(env); register_power6_common_sprs(env); register_power6_dbg_sprs(env); register_power8_tce_address_control_sprs(env); @@ -8123,11 +6395,13 @@ static void init_proc_POWER10(CPUPPCState *env) register_power8_pmu_user_sprs(env); register_power8_tm_sprs(env); register_power8_pspb_sprs(env); + register_power8_dpdes_sprs(env); register_vtb_sprs(env); register_power8_ic_sprs(env); register_power8_book4_sprs(env); register_power8_rpr_sprs(env); register_power9_mmu_sprs(env); + register_power10_hash_sprs(env); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, @@ -8143,77 +6417,33 @@ static void init_proc_POWER10(CPUPPCState *env) ppcPOWER9_irq_init(env_archcpu(env)); } -static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr) +static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr, bool best) { - if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER10_BASE) { + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; + + if (!best) { + if (base == CPU_POWERPC_POWER10_BASE) { + return true; + } + } + + if (base != pcc_base) { + return false; + } + + if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { + /* Major DD version matches to power10_v1.0 and power10_v2.0 */ return true; } + return false; } -static bool cpu_has_work_POWER10(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - - if (cs->halted) { - uint64_t psscr = env->spr[SPR_PSSCR]; - - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { - return false; - } - - /* If EC is clear, just return true on any pending interrupt */ - if (!(psscr & PSSCR_EC)) { - return true; - } - /* External Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && - (env->spr[SPR_LPCR] & LPCR_EEE)) { - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); - if (heic == 0 || !msr_hv || msr_pr) { - return true; - } - } - /* Decrementer Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && - (env->spr[SPR_LPCR] & LPCR_DEE)) { - return true; - } - /* Machine Check or Hypervisor Maintenance Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | - 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { - return true; - } - /* Privileged Doorbell Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_PDEE)) { - return true; - } - /* Hypervisor Doorbell Exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && - (env->spr[SPR_LPCR] & LPCR_HDEE)) { - return true; - } - /* Hypervisor virtualization exception */ - if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && - (env->spr[SPR_LPCR] & LPCR_HVEE)) { - return true; - } - if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { - return true; - } - return false; - } else { - return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); - } -} - POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER10"; dc->desc = "POWER10"; @@ -8224,7 +6454,6 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER10; pcc->check_pow = check_pow_nocheck; - cc->has_work = cpu_has_work_POWER10; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -8233,7 +6462,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBSYNC | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -8244,7 +6473,8 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310; + PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | + PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -8270,6 +6500,9 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; + /* DD2 adds an extra HAIL bit */ + pcc->lpcr_mask |= LPCR_HAIL; + pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; #if defined(CONFIG_SOFTMMU) @@ -8316,7 +6549,6 @@ static void init_ppc_proc(PowerPCCPU *cpu) #if !defined(CONFIG_USER_ONLY) int i; - env->irq_inputs = NULL; /* Set all exception vectors to an invalid address */ for (i = 0; i < POWERPC_EXCP_NB; i++) { env->excp_vectors[i] = (target_ulong)(-1ULL); @@ -8330,31 +6562,8 @@ static void init_ppc_proc(PowerPCCPU *cpu) env->tlb_type = TLB_NONE; #endif /* Register SPR common to all PowerPC implementations */ - register_generic_sprs(env); - spr_register(env, SPR_PVR, "PVR", - /* Linux permits userspace to read PVR */ -#if defined(CONFIG_LINUX_USER) - &spr_read_generic, -#else - SPR_NOACCESS, -#endif - SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - pcc->pvr); - /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ - if (pcc->svr != POWERPC_SVR_NONE) { - if (pcc->svr & POWERPC_SVR_E500) { - spr_register(env, SPR_E500_SVR, "SVR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - pcc->svr & ~POWERPC_SVR_E500); - } else { - spr_register(env, SPR_SVR, "SVR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - pcc->svr); - } - } + register_generic_sprs(cpu); + /* PowerPC implementation specific initialisations (SPRs, timers, ...) */ (*pcc->init_proc)(env); @@ -8443,7 +6652,7 @@ static void init_ppc_proc(PowerPCCPU *cpu) "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n"); exit(1); } - if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) { + if ((env->flags & POWERPC_FLAG_BUS_CLK) == 0) { fprintf(stderr, "PowerPC flags inconsistency\n" "Should define the time-base and decrementer clock source\n"); exit(1); @@ -8469,10 +6678,6 @@ static void init_ppc_proc(PowerPCCPU *cpu) /* Pre-compute some useful values */ env->tlb_per_way = env->nb_tlb / env->nb_ways; } - if (env->irq_inputs == NULL) { - warn_report("no internal IRQ controller registered." - " Attempt QEMU to crash very soon !"); - } #endif if (env->check_pow == NULL) { warn_report("no power management check handler registered." @@ -8576,7 +6781,7 @@ static gint ppc_cpu_compare_class_pvr_mask(gconstpointer a, gconstpointer b) return -1; } - if (pcc->pvr_match(pcc, pvr)) { + if (pcc->pvr_match(pcc, pvr, true)) { return 0; } @@ -8630,6 +6835,21 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name) } } + /* + * All ppc CPUs represent hardware that exists in the real world, i.e.: we + * do not have a "max" CPU with all possible emulated features enabled. + * Return the default CPU type for the machine because that has greater + * chance of being useful as the "max" CPU. + */ +#if !defined(CONFIG_USER_ONLY) + if (strcmp(name, "max") == 0) { + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + if (mc) { + return object_class_by_name(mc->default_cpu_type); + } + } +#endif + cpu_model = g_ascii_strdown(name, -1); p = ppc_cpu_lookup_alias(cpu_model); if (p) { @@ -8732,7 +6952,7 @@ void ppc_cpu_list(void) #ifdef CONFIG_KVM qemu_printf("\n"); - qemu_printf("PowerPC %-16s\n", "host"); + qemu_printf("PowerPC %s\n", "host"); #endif } @@ -8788,12 +7008,27 @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.nip = value; } -static bool ppc_cpu_has_work(CPUState *cs) +static vaddr ppc_cpu_get_pc(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + return cpu->env.nip; +} + +#ifdef CONFIG_TCG +static void ppc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + + cpu->env.nip = data[0]; +} +#endif /* CONFIG_TCG */ + +static bool ppc_cpu_has_work(CPUState *cs) +{ + return cs->interrupt_request & CPU_INTERRUPT_HARD; } static void ppc_cpu_reset(DeviceState *dev) @@ -8809,8 +7044,6 @@ static void ppc_cpu_reset(DeviceState *dev) msr = (target_ulong)0; msr |= (target_ulong)MSR_HVB; - msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */ - msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */ msr |= (target_ulong)1 << MSR_EP; #if defined(DO_SINGLE_STEP) && 0 /* Single step trace mode */ @@ -8828,7 +7061,7 @@ static void ppc_cpu_reset(DeviceState *dev) #if defined(TARGET_PPC64) msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */ #endif -#if !defined(TARGET_WORDS_BIGENDIAN) +#if !TARGET_BIG_ENDIAN msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */ if (!((env->msr_mask >> MSR_LE) & 1)) { fprintf(stderr, "Selected CPU does not support little-endian.\n"); @@ -8847,13 +7080,17 @@ static void ppc_cpu_reset(DeviceState *dev) #if !defined(CONFIG_USER_ONLY) env->nip = env->hreset_vector | env->excp_prefix; -#if defined(CONFIG_TCG) - if (env->mmu_model != POWERPC_MMU_REAL) { - ppc_tlb_invalidate_all(env); - } -#endif /* CONFIG_TCG */ -#endif + if (tcg_enabled()) { + if (env->mmu_model != POWERPC_MMU_REAL) { + ppc_tlb_invalidate_all(env); + } + pmu_mmcr01_updated(env); + } + + /* clean any pending stop state */ + env->resume_as_sreset = 0; +#endif hreg_compute_hflags(env); env->reserve_addr = (target_ulong)-1ULL; /* Be sure no exception or interrupt is pending */ @@ -8885,7 +7122,7 @@ static bool ppc_cpu_is_big_endian(CPUState *cs) cpu_synchronize_state(cs); - return !msr_le; + return !FIELD_EX64(env->msr, MSR, LE); } #ifdef CONFIG_TCG @@ -8956,7 +7193,7 @@ static void ppc_cpu_instance_finalize(Object *obj) ppc_hash64_finalize(cpu); } -static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr) +static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr, bool best) { return pcc->pvr == pvr; } @@ -8977,8 +7214,6 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) info->mach = bfd_mach_ppc; #endif } - info->disassembler_options = (char *)"any"; - info->print_insn = print_insn_ppc; info->cap_arch = CS_ARCH_PPC; #ifdef TARGET_PPC64 @@ -9012,10 +7247,13 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { static const struct TCGCPUOps ppc_tcg_ops = { .initialize = ppc_translate_init, - .cpu_exec_interrupt = ppc_cpu_exec_interrupt, - .tlb_fill = ppc_cpu_tlb_fill, + .restore_state_to_opc = ppc_restore_state_to_opc, -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + .record_sigsegv = ppc_cpu_record_sigsegv, +#else + .tlb_fill = ppc_cpu_tlb_fill, + .cpu_exec_interrupt = ppc_cpu_exec_interrupt, .do_interrupt = ppc_cpu_do_interrupt, .cpu_exec_enter = ppc_cpu_exec_enter, .cpu_exec_exit = ppc_cpu_exec_exit, @@ -9043,6 +7281,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = ppc_cpu_has_work; cc->dump_state = ppc_cpu_dump_state; cc->set_pc = ppc_cpu_set_pc; + cc->get_pc = ppc_cpu_get_pc; cc->gdb_read_register = ppc_cpu_gdb_read_register; cc->gdb_write_register = ppc_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY @@ -9119,17 +7358,15 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) "%08x iidx %d didx %d\n", env->msr, env->spr[SPR_HID0], env->hflags, cpu_mmu_index(env, true), cpu_mmu_index(env, false)); -#if !defined(NO_TIMER_DUMP) - qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 #if !defined(CONFIG_USER_ONLY) - " DECR " TARGET_FMT_lu -#endif - "\n", - cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env) -#if !defined(CONFIG_USER_ONLY) - , cpu_ppc_load_decr(env) -#endif - ); + if (env->tb_env) { + qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 + " DECR " TARGET_FMT_lu "\n", cpu_ppc_load_tbu(env), + cpu_ppc_load_tbl(env), cpu_ppc_load_decr(env)); + } +#else + qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 "\n", cpu_ppc_load_tbu(env), + cpu_ppc_load_tbl(env)); #endif for (i = 0; i < 32; i++) { if ((i & (RGPL - 1)) == 0) { @@ -9187,16 +7424,17 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->spr[SPR_SPRG4], env->spr[SPR_SPRG5], env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]); + switch (env->excp_model) { #if defined(TARGET_PPC64) - if (env->excp_model == POWERPC_EXCP_POWER7 || - env->excp_model == POWERPC_EXCP_POWER8 || - env->excp_model == POWERPC_EXCP_POWER9 || - env->excp_model == POWERPC_EXCP_POWER10) { + case POWERPC_EXCP_POWER7: + case POWERPC_EXCP_POWER8: + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n", env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); - } + break; #endif - if (env->excp_model == POWERPC_EXCP_BOOKE) { + case POWERPC_EXCP_BOOKE: qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n", env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1], @@ -9227,6 +7465,20 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) * IVORs are left out as they are large and do not change often -- * they can be read with "p $ivor0", "p $ivor1", etc. */ + break; + case POWERPC_EXCP_40x: + qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx + " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n", + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], + env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]); + + qemu_fprintf(f, " EVPR " TARGET_FMT_lx " SRR2 " TARGET_FMT_lx + " SRR3 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n", + env->spr[SPR_40x_EVPR], env->spr[SPR_40x_SRR2], + env->spr[SPR_40x_SRR3], env->spr[SPR_40x_PID]); + break; + default: + break; } #if defined(TARGET_PPC64) @@ -9241,9 +7493,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) switch (env->mmu_model) { case POWERPC_MMU_32B: - case POWERPC_MMU_601: case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_2_03: diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 07341a69f5..cc024316d5 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -42,13 +42,21 @@ static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp) static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src) { - dfp->VsrD(0) = src->VsrD(1); + dfp[0].VsrD(0) = src->VsrD(1); + dfp[0].VsrD(1) = 0ULL; } static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) { dfp[0].VsrD(0) = src->VsrD(0); dfp[1].VsrD(0) = src->VsrD(1); + dfp[0].VsrD(1) = 0ULL; + dfp[1].VsrD(1) = 0ULL; +} + +static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src) +{ + *dst = *src; } struct PPC_DFP { @@ -440,8 +448,8 @@ static void ADD_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_add(dfp); } -DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64) -DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128) +DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64) +DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128) static void SUB_PPs(struct PPC_DFP *dfp) { @@ -453,8 +461,8 @@ static void SUB_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_subtract(dfp); } -DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64) -DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128) +DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64) +DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128) static void MUL_PPs(struct PPC_DFP *dfp) { @@ -466,8 +474,8 @@ static void MUL_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIMZ(dfp); } -DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64) -DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128) +DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64) +DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128) static void DIV_PPs(struct PPC_DFP *dfp) { @@ -481,8 +489,8 @@ static void DIV_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIDI(dfp); } -DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64) -DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128) +DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64) +DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128) #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -502,8 +510,8 @@ static void CMPU_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64) -DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128) +DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64) +DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128) static void CMPO_PPs(struct PPC_DFP *dfp) { @@ -513,8 +521,8 @@ static void CMPO_PPs(struct PPC_DFP *dfp) dfp_check_for_VXVC(dfp); } -DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64) -DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128) +DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64) +DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128) #define DFP_HELPER_TSTDC(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -541,8 +549,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDC(dtstdc, 64) -DFP_HELPER_TSTDC(dtstdcq, 128) +DFP_HELPER_TSTDC(DTSTDC, 64) +DFP_HELPER_TSTDC(DTSTDCQ, 128) #define DFP_HELPER_TSTDG(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -596,8 +604,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDG(dtstdg, 64) -DFP_HELPER_TSTDG(dtstdgq, 128) +DFP_HELPER_TSTDG(DTSTDG, 64) +DFP_HELPER_TSTDG(DTSTDGQ, 128) #define DFP_HELPER_TSTEX(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -628,8 +636,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTEX(dtstex, 64) -DFP_HELPER_TSTEX(dtstexq, 128) +DFP_HELPER_TSTEX(DTSTEX, 64) +DFP_HELPER_TSTEX(DTSTEXQ, 128) #define DFP_HELPER_TSTSF(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -665,8 +673,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSF(dtstsf, 64) -DFP_HELPER_TSTSF(dtstsfq, 128) +DFP_HELPER_TSTSF(DTSTSF, 64) +DFP_HELPER_TSTSF(DTSTSFQ, 128) #define DFP_HELPER_TSTSFI(op, size) \ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ @@ -700,8 +708,8 @@ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSFI(dtstsfi, 64) -DFP_HELPER_TSTSFI(dtstsfiq, 128) +DFP_HELPER_TSTSFI(DTSTSFI, 64) +DFP_HELPER_TSTSFI(DTSTSFIQ, 128) static void QUA_PPs(struct PPC_DFP *dfp) { @@ -746,8 +754,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUAI(dquai, 64) -DFP_HELPER_QUAI(dquaiq, 128) +DFP_HELPER_QUAI(DQUAI, 64) +DFP_HELPER_QUAI(DQUAIQ, 128) #define DFP_HELPER_QUA(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ @@ -764,8 +772,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUA(dqua, 64) -DFP_HELPER_QUA(dquaq, 128) +DFP_HELPER_QUA(DQUA, 64) +DFP_HELPER_QUA(DQUAQ, 128) static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, struct PPC_DFP *dfp) @@ -842,8 +850,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_RRND(drrnd, 64) -DFP_HELPER_RRND(drrndq, 128) +DFP_HELPER_RRND(DRRND, 64) +DFP_HELPER_RRND(DRRNDQ, 128) #define DFP_HELPER_RINT(op, postprocs, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ @@ -868,8 +876,8 @@ static void RINTX_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintx, RINTX_PPs, 64) -DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) +DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64) +DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128) static void RINTN_PPs(struct PPC_DFP *dfp) { @@ -877,10 +885,10 @@ static void RINTN_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintn, RINTN_PPs, 64) -DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) +DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64) +DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128) -void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -896,7 +904,7 @@ void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) dfp_set_FPRF_from_FRT(&dfp); } -void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -911,7 +919,7 @@ void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp128(t, &dfp.vt); } -void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; uint32_t t_short = 0; @@ -929,7 +937,7 @@ void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp64(t, &vt); } -void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; dfp_prepare_decimal128(&dfp, 0, b, env); @@ -967,8 +975,20 @@ static void CFFIX_PPs(struct PPC_DFP *dfp) dfp_check_for_XX(dfp); } -DFP_HELPER_CFFIX(dcffix, 64) -DFP_HELPER_CFFIX(dcffixq, 128) +DFP_HELPER_CFFIX(DCFFIX, 64) +DFP_HELPER_CFFIX(DCFFIXQ, 128) + +void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b) +{ + struct PPC_DFP dfp; + + dfp_prepare_decimal128(&dfp, NULL, NULL, env); + decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0)); + dfp_finalize_decimal128(&dfp); + CFFIX_PPs(&dfp); + + set_dfp128(t, &dfp.vt); +} #define DFP_HELPER_CTFIX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1005,8 +1025,55 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ set_dfp64(t, &dfp.vt); \ } -DFP_HELPER_CTFIX(dctfix, 64) -DFP_HELPER_CTFIX(dctfixq, 128) +DFP_HELPER_CTFIX(DCTFIX, 64) +DFP_HELPER_CTFIX(DCTFIXQ, 128) + +void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + dfp_prepare_decimal128(&dfp, 0, b, env); + + if (unlikely(decNumberIsSpecial(&dfp.b))) { + uint64_t invalid_flags = FP_VX | FP_VXCVI; + if (decNumberIsInfinite(&dfp.b)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + } else { /* NaN */ + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + if (decNumberIsSNaN(&dfp.b)) { + invalid_flags |= FP_VXSNAN; + } + } + dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); + } else if (unlikely(decNumberIsZero(&dfp.b))) { + dfp.vt.VsrD(0) = 0; + dfp.vt.VsrD(1) = 0; + } else { + decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); + decNumberIntegralToInt128(&dfp.b, &dfp.context, + &dfp.vt.VsrD(1), &dfp.vt.VsrD(0)); + if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); + } else { + dfp_check_for_XX(&dfp); + } + } + + set_dfp128_to_avr(t, &dfp.vt); +} static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, unsigned n) @@ -1067,8 +1134,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_DEDPD(ddedpd, 64) -DFP_HELPER_DEDPD(ddedpdq, 128) +DFP_HELPER_DEDPD(DDEDPD, 64) +DFP_HELPER_DEDPD(DDEDPDQ, 128) static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) { @@ -1080,6 +1147,26 @@ static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n) return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15; } +static inline void dfp_invalid_op_vxcvi_64(struct PPC_DFP *dfp) +{ + /* TODO: fpscr is incorrectly not being saved to env */ + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); + if ((dfp->env->fpscr & FP_VE) == 0) { + dfp->vt.VsrD(1) = 0x7c00000000000000; /* QNaN */ + } +} + + +static inline void dfp_invalid_op_vxcvi_128(struct PPC_DFP *dfp) +{ + /* TODO: fpscr is incorrectly not being saved to env */ + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); + if ((dfp->env->fpscr & FP_VE) == 0) { + dfp->vt.VsrD(0) = 0x7c00000000000000; /* QNaN */ + dfp->vt.VsrD(1) = 0x0; + } +} + #define DFP_HELPER_ENBCD(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t s) \ @@ -1106,7 +1193,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ sgn = 0; \ break; \ default: \ - dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + dfp_invalid_op_vxcvi_##size(&dfp); \ + set_dfp##size(t, &dfp.vt); \ return; \ } \ } \ @@ -1116,7 +1204,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \ offset++); \ if (digits[(size) / 4 - n] > 10) { \ - dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + dfp_invalid_op_vxcvi_##size(&dfp); \ + set_dfp##size(t, &dfp.vt); \ return; \ } else { \ nonzero |= (digits[(size) / 4 - n] > 0); \ @@ -1135,8 +1224,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_ENBCD(denbcd, 64) -DFP_HELPER_ENBCD(denbcdq, 128) +DFP_HELPER_ENBCD(DENBCD, 64) +DFP_HELPER_ENBCD(DENBCDQ, 128) #define DFP_HELPER_XEX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1169,8 +1258,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ } \ } -DFP_HELPER_XEX(dxex, 64) -DFP_HELPER_XEX(dxexq, 128) +DFP_HELPER_XEX(DXEX, 64) +DFP_HELPER_XEX(DXEXQ, 128) static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) { @@ -1235,8 +1324,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_IEX(diex, 64) -DFP_HELPER_IEX(diexq, 128) +DFP_HELPER_IEX(DIEX, 64) +DFP_HELPER_IEX(DIEXQ, 128) static void dfp_clear_lmd_from_g5msb(uint64_t *t) { @@ -1323,7 +1412,72 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_SHIFT(dscli, 64, 1) -DFP_HELPER_SHIFT(dscliq, 128, 1) -DFP_HELPER_SHIFT(dscri, 64, 0) -DFP_HELPER_SHIFT(dscriq, 128, 0) +DFP_HELPER_SHIFT(DSCLI, 64, 1) +DFP_HELPER_SHIFT(DSCLIQ, 128, 1) +DFP_HELPER_SHIFT(DSCRI, 64, 0) +DFP_HELPER_SHIFT(DSCRIQ, 128, 0) + +target_ulong helper_CDTBCD(target_ulong s) +{ + uint64_t res = 0; + uint32_t dec32, declets; + uint8_t bcd[6]; + int i, w, sh; + decNumber a; + + for (w = 1; w >= 0; w--) { + res <<= 32; + declets = extract64(s, 32 * w, 20); + if (declets) { + /* decimal32 with zero exponent and word "w" declets */ + dec32 = (0x225ULL << 20) | declets; + decimal32ToNumber((decimal32 *)&dec32, &a); + decNumberGetBCD(&a, bcd); + for (i = 0; i < a.digits; i++) { + sh = 4 * (a.digits - 1 - i); + res |= (uint64_t)bcd[i] << sh; + } + } + } + + return res; +} + +target_ulong helper_CBCDTD(target_ulong s) +{ + uint64_t res = 0; + uint32_t dec32; + uint8_t bcd[6]; + int w, i, offs; + decNumber a; + decContext context; + + decContextDefault(&context, DEC_INIT_DECIMAL32); + + for (w = 1; w >= 0; w--) { + res <<= 32; + decNumberZero(&a); + /* Extract each BCD field of word "w" */ + for (i = 5; i >= 0; i--) { + offs = 4 * (5 - i) + 32 * w; + bcd[i] = extract64(s, offs, 4); + if (bcd[i] > 9) { + /* + * If the field value is greater than 9, the results are + * undefined. We could use a fixed value like 0 or 9, but + * an and with 9 seems to better match the hardware behavior. + */ + bcd[i] &= 9; + } + } + + /* Create a decNumber with the BCD values and convert to decimal32 */ + decNumberSetBCD(&a, bcd, 6); + decimal32FromNumber((decimal32 *)&dec32, &a, &context); + + /* Extract the two declets from the decimal32 value */ + res |= dec32 & 0xfffff; + } + + return res; +} diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index a79a0ed465..6cf88f635a 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -18,47 +18,95 @@ */ #include "qemu/osdep.h" #include "qemu/main-loop.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "internal.h" #include "helper_regs.h" +#include "hw/ppc/ppc.h" + +#include "trace.h" #ifdef CONFIG_TCG #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #endif -/* #define DEBUG_OP */ -/* #define DEBUG_SOFTWARE_TLB */ -/* #define DEBUG_EXCEPTIONS */ - -#ifdef DEBUG_EXCEPTIONS -# define LOG_EXCP(...) qemu_log(__VA_ARGS__) -#else -# define LOG_EXCP(...) do { } while (0) -#endif - /*****************************************************************************/ /* Exception processing */ -#if defined(CONFIG_USER_ONLY) -void ppc_cpu_do_interrupt(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; +#if !defined(CONFIG_USER_ONLY) - cs->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; +static const char *powerpc_excp_name(int excp) +{ + switch (excp) { + case POWERPC_EXCP_CRITICAL: return "CRITICAL"; + case POWERPC_EXCP_MCHECK: return "MCHECK"; + case POWERPC_EXCP_DSI: return "DSI"; + case POWERPC_EXCP_ISI: return "ISI"; + case POWERPC_EXCP_EXTERNAL: return "EXTERNAL"; + case POWERPC_EXCP_ALIGN: return "ALIGN"; + case POWERPC_EXCP_PROGRAM: return "PROGRAM"; + case POWERPC_EXCP_FPU: return "FPU"; + case POWERPC_EXCP_SYSCALL: return "SYSCALL"; + case POWERPC_EXCP_APU: return "APU"; + case POWERPC_EXCP_DECR: return "DECR"; + case POWERPC_EXCP_FIT: return "FIT"; + case POWERPC_EXCP_WDT: return "WDT"; + case POWERPC_EXCP_DTLB: return "DTLB"; + case POWERPC_EXCP_ITLB: return "ITLB"; + case POWERPC_EXCP_DEBUG: return "DEBUG"; + case POWERPC_EXCP_SPEU: return "SPEU"; + case POWERPC_EXCP_EFPDI: return "EFPDI"; + case POWERPC_EXCP_EFPRI: return "EFPRI"; + case POWERPC_EXCP_EPERFM: return "EPERFM"; + case POWERPC_EXCP_DOORI: return "DOORI"; + case POWERPC_EXCP_DOORCI: return "DOORCI"; + case POWERPC_EXCP_GDOORI: return "GDOORI"; + case POWERPC_EXCP_GDOORCI: return "GDOORCI"; + case POWERPC_EXCP_HYPPRIV: return "HYPPRIV"; + case POWERPC_EXCP_RESET: return "RESET"; + case POWERPC_EXCP_DSEG: return "DSEG"; + case POWERPC_EXCP_ISEG: return "ISEG"; + case POWERPC_EXCP_HDECR: return "HDECR"; + case POWERPC_EXCP_TRACE: return "TRACE"; + case POWERPC_EXCP_HDSI: return "HDSI"; + case POWERPC_EXCP_HISI: return "HISI"; + case POWERPC_EXCP_HDSEG: return "HDSEG"; + case POWERPC_EXCP_HISEG: return "HISEG"; + case POWERPC_EXCP_VPU: return "VPU"; + case POWERPC_EXCP_PIT: return "PIT"; + case POWERPC_EXCP_EMUL: return "EMUL"; + case POWERPC_EXCP_IFTLB: return "IFTLB"; + case POWERPC_EXCP_DLTLB: return "DLTLB"; + case POWERPC_EXCP_DSTLB: return "DSTLB"; + case POWERPC_EXCP_FPA: return "FPA"; + case POWERPC_EXCP_DABR: return "DABR"; + case POWERPC_EXCP_IABR: return "IABR"; + case POWERPC_EXCP_SMI: return "SMI"; + case POWERPC_EXCP_PERFM: return "PERFM"; + case POWERPC_EXCP_THERM: return "THERM"; + case POWERPC_EXCP_VPUA: return "VPUA"; + case POWERPC_EXCP_SOFTP: return "SOFTP"; + case POWERPC_EXCP_MAINT: return "MAINT"; + case POWERPC_EXCP_MEXTBR: return "MEXTBR"; + case POWERPC_EXCP_NMEXTBR: return "NMEXTBR"; + case POWERPC_EXCP_ITLBE: return "ITLBE"; + case POWERPC_EXCP_DTLBE: return "DTLBE"; + case POWERPC_EXCP_VSXU: return "VSXU"; + case POWERPC_EXCP_FU: return "FU"; + case POWERPC_EXCP_HV_EMU: return "HV_EMU"; + case POWERPC_EXCP_HV_MAINT: return "HV_MAINT"; + case POWERPC_EXCP_HV_FU: return "HV_FU"; + case POWERPC_EXCP_SDOOR: return "SDOOR"; + case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV"; + case POWERPC_EXCP_HVIRT: return "HVIRT"; + case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED"; + default: + g_assert_not_reached(); + } } -static void ppc_hw_interrupt(CPUPPCState *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; -} -#else /* defined(CONFIG_USER_ONLY) */ -static inline void dump_syscall(CPUPPCState *env) +static void dump_syscall(CPUPPCState *env) { qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 @@ -70,7 +118,7 @@ static inline void dump_syscall(CPUPPCState *env) ppc_dump_gpr(env, 8), env->nip); } -static inline void dump_hcall(CPUPPCState *env) +static void dump_hcall(CPUPPCState *env) { qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 @@ -85,6 +133,39 @@ static inline void dump_hcall(CPUPPCState *env) env->nip); } +static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) +{ + const char *es; + target_ulong *miss, *cmp; + int en; + + if (!qemu_loglevel_mask(CPU_LOG_MMU)) { + return; + } + + if (excp == POWERPC_EXCP_IFTLB) { + es = "I"; + en = 'I'; + miss = &env->spr[SPR_IMISS]; + cmp = &env->spr[SPR_ICMP]; + } else { + if (excp == POWERPC_EXCP_DLTLB) { + es = "DL"; + } else { + es = "DS"; + } + en = 'D'; + miss = &env->spr[SPR_DMISS]; + cmp = &env->spr[SPR_DCMP]; + } + qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " + TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " + TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, + env->spr[SPR_HASH1], env->spr[SPR_HASH2], + env->error_code); +} + +#if defined(TARGET_PPC64) static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, target_ulong *msr) { @@ -183,12 +264,10 @@ static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, * | a | h | 11 | 1 | 1 | h | * +--------------------------------------------------------------------+ */ -static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, - target_ulong msr, - target_ulong *new_msr, - target_ulong *vector) +static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, + target_ulong *new_msr, target_ulong *vector) { -#if defined(TARGET_PPC64) + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); @@ -201,8 +280,13 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, return; } - if (excp_model == POWERPC_EXCP_POWER8 || - excp_model == POWERPC_EXCP_POWER9) { + if (!(pcc->lpcr_mask & LPCR_AIL)) { + /* This CPU does not have AIL */ + return; + } + + /* P8 & P9 */ + if (!(pcc->lpcr_mask & LPCR_HAIL)) { if (!mmu_all_on) { /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ return; @@ -225,7 +309,8 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, return; } - } else if (excp_model == POWERPC_EXCP_POWER10) { + /* P10 and up */ + } else { if (!mmu_all_on && !hv_escalation) { /* * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. @@ -250,9 +335,6 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ return; } - } else { - /* Other processors do not support AIL */ - return; } /* @@ -277,15 +359,26 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ } } -#endif } +#endif -static inline void powerpc_set_excp_state(PowerPCCPU *cpu, - target_ulong vector, target_ulong msr) +static void powerpc_reset_excp_state(PowerPCCPU *cpu) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; + /* Reset exception state */ + cs->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; +} + +static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, + target_ulong msr) +{ + CPUPPCState *env = &cpu->env; + + assert((msr & env->msr_mask) == msr); + /* * We don't use hreg_store_msr here as already have treated any * special case that could occur. Just store MSR and update hflags @@ -294,44 +387,957 @@ static inline void powerpc_set_excp_state(PowerPCCPU *cpu, * will prevent setting of the HV bit which some exceptions might need * to do. */ - env->msr = msr & env->msr_mask; - hreg_compute_hflags(env); env->nip = vector; - /* Reset exception state */ - cs->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; + env->msr = msr; + hreg_compute_hflags(env); + ppc_maybe_interrupt(env); - /* Reset the reservation */ - env->reserve_addr = -1; + powerpc_reset_excp_state(cpu); /* * Any interrupt is context synchronizing, check if TCG TLB needs * a delayed flush on ppc64 */ check_tlb_flush(env, false); + + /* Reset the reservation */ + env->reserve_addr = -1; } -/* - * Note that this function should be greatly optimized when called - * with a constant excp, from ppc_hw_interrupt - */ -static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) +static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; - int srr0, srr1, asrr0, asrr1, lev = -1; - - qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx - " => %08x (%02x)\n", env->nip, excp, env->error_code); + int srr0, srr1; /* new srr1 value excluding must-be-zero bits */ - if (excp_model == POWERPC_EXCP_BOOKE) { - msr = env->msr; - } else { - msr = env->msr & ~0x783f0000ULL; + msr = env->msr & ~0x783f0000ULL; + + /* + * new interrupt handler msr preserves existing ME unless + * explicitly overriden. + */ + new_msr = env->msr & (((target_ulong)1 << MSR_ME)); + + /* target registers */ + srr0 = SPR_SRR0; + srr1 = SPR_SRR1; + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. + */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; } + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + + switch (excp) { + case POWERPC_EXCP_CRITICAL: /* Critical input */ + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (!FIELD_EX64(env->msr, MSR, ME)) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); + return; + } + env->spr[SPR_40x_ESR] = ESR_FP; + break; + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + env->spr[SPR_40x_ESR] = ESR_PIL; + break; + case POWERPC_EXCP_PRIV: + env->spr[SPR_40x_ESR] = ESR_PPR; + break; + case POWERPC_EXCP_TRAP: + env->spr[SPR_40x_ESR] = ESR_PTR; + break; + default: + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + dump_syscall(env); + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + break; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + trace_ppc_excp_print("FIT"); + break; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + trace_ppc_excp_print("WDT"); + break; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + break; + case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ + trace_ppc_excp_print("PIT"); + break; + case POWERPC_EXCP_DEBUG: /* Debug interrupt */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); + break; + default: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + + /* Save PC */ + env->spr[srr0] = env->nip; + + /* Save MSR */ + env->spr[srr1] = msr; + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + + /* new srr1 value excluding must-be-zero bits */ + msr = env->msr & ~0x783f0000ULL; + + /* + * new interrupt handler msr preserves existing ME unless + * explicitly overriden + */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. + */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; + } + + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + + switch (excp) { + case POWERPC_EXCP_CRITICAL: /* Critical input */ + break; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (!FIELD_EX64(env->msr, MSR, ME)) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + msr |= env->error_code; + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + /* Get rS/rD and rA from faulting opcode */ + /* + * Note: the opcode fields will not be set properly for a + * direct store load/store, but nobody cares as nobody + * actually uses direct store segments. + */ + env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); + return; + } + + /* + * FP exceptions always have NIP pointing to the faulting + * instruction, so always use store_next and claim we are + * precise in the MSR. + */ + msr |= 0x00100000; + break; + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + msr |= 0x00080000; + break; + case POWERPC_EXCP_PRIV: + msr |= 0x00040000; + break; + case POWERPC_EXCP_TRAP: + msr |= 0x00020000; + break; + default: + /* Should never occur */ + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + dump_syscall(env); + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + break; + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + if (FIELD_EX64(env->msr, MSR, POW)) { + cpu_abort(cs, "Trying to deliver power-saving system reset " + "exception %d with no HV support\n", excp); + } + break; + case POWERPC_EXCP_TRACE: /* Trace exception */ + break; + case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ + case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ + case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ + /* Swap temporary saved registers with GPRs */ + if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { + new_msr |= (target_ulong)1 << MSR_TGPR; + hreg_swap_gpr_tgpr(env); + } + + ppc_excp_debug_sw_tlb(env, excp); + + msr |= env->crf[0] << 28; + msr |= env->error_code; /* key, D/I, S/L bits */ + /* Set way using a LRU mechanism */ + msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; + break; + case POWERPC_EXCP_FPA: /* Floating-point assist exception */ + case POWERPC_EXCP_DABR: /* Data address breakpoint */ + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + case POWERPC_EXCP_SMI: /* System management interrupt */ + case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ + case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); + break; + default: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + + /* + * Sort out endianness of interrupt, this differs depending on the + * CPU, the HV mode, etc... + */ + if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { + new_msr |= (target_ulong)1 << MSR_LE; + } + + /* Save PC */ + env->spr[SPR_SRR0] = env->nip; + + /* Save MSR */ + env->spr[SPR_SRR1] = msr; + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + + /* new srr1 value excluding must-be-zero bits */ + msr = env->msr & ~0x783f0000ULL; + + /* + * new interrupt handler msr preserves existing ME unless + * explicitly overriden + */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. + */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; + } + + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + + switch (excp) { + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (!FIELD_EX64(env->msr, MSR, ME)) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + msr |= env->error_code; + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + /* Get rS/rD and rA from faulting opcode */ + /* + * Note: the opcode fields will not be set properly for a + * direct store load/store, but nobody cares as nobody + * actually uses direct store segments. + */ + env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); + return; + } + + /* + * FP exceptions always have NIP pointing to the faulting + * instruction, so always use store_next and claim we are + * precise in the MSR. + */ + msr |= 0x00100000; + break; + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + msr |= 0x00080000; + break; + case POWERPC_EXCP_PRIV: + msr |= 0x00040000; + break; + case POWERPC_EXCP_TRAP: + msr |= 0x00020000; + break; + default: + /* Should never occur */ + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + { + int lev = env->error_code; + + if (lev == 1 && cpu->vhyp) { + dump_hcall(env); + } else { + dump_syscall(env); + } + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + + /* + * The Virtual Open Firmware (VOF) relies on the 'sc 1' + * instruction to communicate with QEMU. The pegasos2 machine + * uses VOF and the 7xx CPUs, so although the 7xx don't have + * HV mode, we need to keep hypercall support. + */ + if (lev == 1 && cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->hypercall(cpu->vhyp, cpu); + return; + } + + break; + } + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + if (FIELD_EX64(env->msr, MSR, POW)) { + cpu_abort(cs, "Trying to deliver power-saving system reset " + "exception %d with no HV support\n", excp); + } + break; + case POWERPC_EXCP_TRACE: /* Trace exception */ + break; + case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ + case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ + case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ + ppc_excp_debug_sw_tlb(env, excp); + + msr |= env->crf[0] << 28; + msr |= env->error_code; /* key, D/I, S/L bits */ + /* Set way using a LRU mechanism */ + msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; + + break; + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + case POWERPC_EXCP_SMI: /* System management interrupt */ + case POWERPC_EXCP_THERM: /* Thermal interrupt */ + case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); + break; + default: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + + /* + * Sort out endianness of interrupt, this differs depending on the + * CPU, the HV mode, etc... + */ + if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { + new_msr |= (target_ulong)1 << MSR_LE; + } + + /* Save PC */ + env->spr[SPR_SRR0] = env->nip; + + /* Save MSR */ + env->spr[SPR_SRR1] = msr; + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + + /* new srr1 value excluding must-be-zero bits */ + msr = env->msr & ~0x783f0000ULL; + + /* + * new interrupt handler msr preserves existing ME unless + * explicitly overriden + */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. + */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; + } + + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + + switch (excp) { + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (!FIELD_EX64(env->msr, MSR, ME)) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + msr |= env->error_code; + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + /* Get rS/rD and rA from faulting opcode */ + /* + * Note: the opcode fields will not be set properly for a + * direct store load/store, but nobody cares as nobody + * actually uses direct store segments. + */ + env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); + return; + } + + /* + * FP exceptions always have NIP pointing to the faulting + * instruction, so always use store_next and claim we are + * precise in the MSR. + */ + msr |= 0x00100000; + break; + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + msr |= 0x00080000; + break; + case POWERPC_EXCP_PRIV: + msr |= 0x00040000; + break; + case POWERPC_EXCP_TRAP: + msr |= 0x00020000; + break; + default: + /* Should never occur */ + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + { + int lev = env->error_code; + + if ((lev == 1) && cpu->vhyp) { + dump_hcall(env); + } else { + dump_syscall(env); + } + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + + /* + * The Virtual Open Firmware (VOF) relies on the 'sc 1' + * instruction to communicate with QEMU. The pegasos2 machine + * uses VOF and the 74xx CPUs, so although the 74xx don't have + * HV mode, we need to keep hypercall support. + */ + if ((lev == 1) && cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->hypercall(cpu->vhyp, cpu); + return; + } + + break; + } + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + if (FIELD_EX64(env->msr, MSR, POW)) { + cpu_abort(cs, "Trying to deliver power-saving system reset " + "exception %d with no HV support\n", excp); + } + break; + case POWERPC_EXCP_TRACE: /* Trace exception */ + break; + case POWERPC_EXCP_VPU: /* Vector unavailable exception */ + break; + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + case POWERPC_EXCP_SMI: /* System management interrupt */ + case POWERPC_EXCP_THERM: /* Thermal interrupt */ + case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ + case POWERPC_EXCP_VPUA: /* Vector assist exception */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); + break; + default: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + + /* + * Sort out endianness of interrupt, this differs depending on the + * CPU, the HV mode, etc... + */ + if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { + new_msr |= (target_ulong)1 << MSR_LE; + } + + /* Save PC */ + env->spr[SPR_SRR0] = env->nip; + + /* Save MSR */ + env->spr[SPR_SRR1] = msr; + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + int srr0, srr1; + + msr = env->msr; + + /* + * new interrupt handler msr preserves existing ME unless + * explicitly overriden + */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); + + /* target registers */ + srr0 = SPR_SRR0; + srr1 = SPR_SRR1; + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. + */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; + } + +#ifdef TARGET_PPC64 + /* + * SPEU and VPU share the same IVOR but they exist in different + * processors. SPEU is e500v1/2 only and VPU is e6500 only. + */ + if (excp == POWERPC_EXCP_VPU) { + excp = POWERPC_EXCP_SPEU; + } +#endif + + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + + switch (excp) { + case POWERPC_EXCP_CRITICAL: /* Critical input */ + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (!FIELD_EX64(env->msr, MSR, ME)) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + /* FIXME: choose one or the other based on CPU type */ + srr0 = SPR_BOOKE_MCSRR0; + srr1 = SPR_BOOKE_MCSRR1; + + env->spr[SPR_BOOKE_CSRR0] = env->nip; + env->spr[SPR_BOOKE_CSRR1] = msr; + + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + if (env->mpic_proxy) { + /* IACK the IRQ on delivery */ + env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); + } + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); + return; + } + + /* + * FP exceptions always have NIP pointing to the faulting + * instruction, so always use store_next and claim we are + * precise in the MSR. + */ + msr |= 0x00100000; + env->spr[SPR_BOOKE_ESR] = ESR_FP; + break; + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + msr |= 0x00080000; + env->spr[SPR_BOOKE_ESR] = ESR_PIL; + break; + case POWERPC_EXCP_PRIV: + msr |= 0x00040000; + env->spr[SPR_BOOKE_ESR] = ESR_PPR; + break; + case POWERPC_EXCP_TRAP: + msr |= 0x00020000; + env->spr[SPR_BOOKE_ESR] = ESR_PTR; + break; + default: + /* Should never occur */ + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + dump_syscall(env); + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + break; + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + /* FIT on 4xx */ + trace_ppc_excp_print("FIT"); + break; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + trace_ppc_excp_print("WDT"); + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + break; + case POWERPC_EXCP_DEBUG: /* Debug interrupt */ + if (env->flags & POWERPC_FLAG_DE) { + /* FIXME: choose one or the other based on CPU type */ + srr0 = SPR_BOOKE_DSRR0; + srr1 = SPR_BOOKE_DSRR1; + + env->spr[SPR_BOOKE_CSRR0] = env->nip; + env->spr[SPR_BOOKE_CSRR1] = msr; + + /* DBSR already modified by caller */ + } else { + cpu_abort(cs, "Debug exception triggered on unsupported model\n"); + } + break; + case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + break; + case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ + break; + case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + if (FIELD_EX64(env->msr, MSR, POW)) { + cpu_abort(cs, "Trying to deliver power-saving system reset " + "exception %d with no HV support\n", excp); + } + break; + case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ + case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); + break; + default: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + +#if defined(TARGET_PPC64) + if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { + /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ + new_msr |= (target_ulong)1 << MSR_CM; + } else { + vector = (uint32_t)vector; + } +#endif + + /* Save PC */ + env->spr[srr0] = env->nip; + + /* Save MSR */ + env->spr[srr1] = msr; + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +/* + * When running a nested HV guest under vhyp, external interrupts are + * delivered as HVIRT. + */ +static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu) +{ + if (cpu->vhyp) { + return vhyp_cpu_in_nested(cpu); + } + return false; +} + +#ifdef TARGET_PPC64 +/* + * When running under vhyp, hcalls are always intercepted and sent to the + * vhc->hypercall handler. + */ +static bool books_vhyp_handles_hcall(PowerPCCPU *cpu) +{ + if (cpu->vhyp) { + return !vhyp_cpu_in_nested(cpu); + } + return false; +} + +/* + * When running a nested KVM HV guest under vhyp, HV exceptions are not + * delivered to the guest (because there is no concept of HV support), but + * rather they are sent tothe vhyp to exit from the L2 back to the L1 and + * return from the H_ENTER_NESTED hypercall. + */ +static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu) +{ + if (cpu->vhyp) { + return vhyp_cpu_in_nested(cpu); + } + return false; +} + +static void powerpc_excp_books(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + int srr0, srr1, lev = -1; + + /* new srr1 value excluding must-be-zero bits */ + msr = env->msr & ~0x783f0000ULL; + /* * new interrupt handler msr preserves existing HV and ME unless * explicitly overriden @@ -341,8 +1347,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* target registers */ srr0 = SPR_SRR0; srr1 = SPR_SRR1; - asrr0 = -1; - asrr1 = -1; /* * check for special resume at 0x100 from doze/nap/sleep/winkle on @@ -353,41 +1357,27 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. We also don't want to generate it if - * we don't have HVB in msr_mask (PAPR mode). + * We don't want to generate a Hypervisor Emulation Assistance + * Interrupt if we don't have HVB in msr_mask (PAPR mode), + * unless running a nested-hv guest, in which case the L1 + * kernel wants the interrupt. */ - if (excp == POWERPC_EXCP_HV_EMU -#if defined(TARGET_PPC64) - && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB)) -#endif /* defined(TARGET_PPC64) */ - - ) { + if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) && + !books_vhyp_handles_hv_excp(cpu)) { excp = POWERPC_EXCP_PROGRAM; } + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + + vector |= env->excp_prefix; + switch (excp) { - case POWERPC_EXCP_NONE: - /* Should never happen */ - return; - case POWERPC_EXCP_CRITICAL: /* Critical input */ - switch (excp_model) { - case POWERPC_EXCP_40x: - srr0 = SPR_40x_SRR2; - srr1 = SPR_40x_SRR3; - break; - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - break; - case POWERPC_EXCP_G2: - break; - default: - goto excp_invalid; - } - break; case POWERPC_EXCP_MCHECK: /* Machine check exception */ - if (msr_me == 0) { + if (!FIELD_EX64(env->msr, MSR, ME)) { /* * Machine check exception is not enabled. Enter * checkstop state. @@ -412,69 +1402,35 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); - /* XXX: should also have something loaded in DAR / DSISR */ - switch (excp_model) { - case POWERPC_EXCP_40x: - srr0 = SPR_40x_SRR2; - srr1 = SPR_40x_SRR3; - break; - case POWERPC_EXCP_BOOKE: - /* FIXME: choose one or the other based on CPU type */ - srr0 = SPR_BOOKE_MCSRR0; - srr1 = SPR_BOOKE_MCSRR1; - asrr0 = SPR_BOOKE_CSRR0; - asrr1 = SPR_BOOKE_CSRR1; - break; - default: - break; - } break; case POWERPC_EXCP_DSI: /* Data storage exception */ - LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx - "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); + trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ - LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx - "\n", msr, env->nip); + trace_ppc_excp_isi(msr, env->nip); msr |= env->error_code; break; case POWERPC_EXCP_EXTERNAL: /* External input */ { bool lpes0; - cs = CPU(cpu); - /* - * Exception targeting modifiers - * - * LPES0 is supported on POWER7/8/9 - * LPES1 is not supported (old iSeries mode) - * - * On anything else, we behave as if LPES0 is 1 - * (externals don't alter MSR:HV) + * LPES0 is only taken into consideration if we support HV + * mode for this CPU. */ -#if defined(TARGET_PPC64) - if (excp_model == POWERPC_EXCP_POWER7 || - excp_model == POWERPC_EXCP_POWER8 || - excp_model == POWERPC_EXCP_POWER9 || - excp_model == POWERPC_EXCP_POWER10) { - lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - } else -#endif /* defined(TARGET_PPC64) */ - { - lpes0 = true; + if (!env->has_hv_mode) { + break; } + lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + if (!lpes0) { new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; } - if (env->mpic_proxy) { - /* IACK the IRQ on delivery */ - env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); - } + break; } case POWERPC_EXCP_ALIGN: /* Alignment exception */ @@ -489,10 +1445,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: - if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { - LOG_EXCP("Ignore floating point exception\n"); - cs->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; + if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { + trace_ppc_excp_fp_ignore(); + powerpc_reset_excp_state(cpu); return; } @@ -502,20 +1457,16 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) * precise in the MSR. */ msr |= 0x00100000; - env->spr[SPR_BOOKE_ESR] = ESR_FP; break; case POWERPC_EXCP_INVAL: - LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); + trace_ppc_excp_inval(env->nip); msr |= 0x00080000; - env->spr[SPR_BOOKE_ESR] = ESR_PIL; break; case POWERPC_EXCP_PRIV: msr |= 0x00040000; - env->spr[SPR_BOOKE_ESR] = ESR_PPR; break; case POWERPC_EXCP_TRAP: msr |= 0x00020000; - env->spr[SPR_BOOKE_ESR] = ESR_PTR; break; default: /* Should never occur */ @@ -540,7 +1491,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) env->nip += 4; /* "PAPR mode" built-in hypercall emulation */ - if ((lev == 1) && cpu->vhyp) { + if ((lev == 1) && books_vhyp_handles_hcall(cpu)) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->hypercall(cpu->vhyp, cpu); @@ -556,70 +1507,18 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) env->nip += 4; new_msr |= env->msr & ((target_ulong)1 << MSR_EE); new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + + vector += lev * 0x20; + + env->lr = env->nip; + env->ctr = msr; break; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ - case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ case POWERPC_EXCP_DECR: /* Decrementer exception */ break; - case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ - /* FIT on 4xx */ - LOG_EXCP("FIT exception\n"); - break; - case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ - LOG_EXCP("WDT exception\n"); - switch (excp_model) { - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - break; - default: - break; - } - break; - case POWERPC_EXCP_DTLB: /* Data TLB error */ - case POWERPC_EXCP_ITLB: /* Instruction TLB error */ - break; - case POWERPC_EXCP_DEBUG: /* Debug interrupt */ - if (env->flags & POWERPC_FLAG_DE) { - /* FIXME: choose one or the other based on CPU type */ - srr0 = SPR_BOOKE_DSRR0; - srr1 = SPR_BOOKE_DSRR1; - asrr0 = SPR_BOOKE_CSRR0; - asrr1 = SPR_BOOKE_CSRR1; - /* DBSR already modified by caller */ - } else { - cpu_abort(cs, "Debug exception triggered on unsupported model\n"); - } - break; - case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - break; - case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ - /* XXX: TODO */ - cpu_abort(cs, "Embedded floating point data exception " - "is not implemented yet !\n"); - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - break; - case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ - /* XXX: TODO */ - cpu_abort(cs, "Embedded floating point round exception " - "is not implemented yet !\n"); - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - break; - case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ - /* XXX: TODO */ - cpu_abort(cs, - "Performance counter exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ - break; - case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - break; case POWERPC_EXCP_RESET: /* System reset exception */ /* A power-saving exception sets ME, otherwise it is unchanged */ - if (msr_pow) { + if (FIELD_EX64(env->msr, MSR, POW)) { /* indicate that we resumed from power save mode */ msr |= 0x10000; new_msr |= ((target_ulong)1 << MSR_ME); @@ -631,7 +1530,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) */ new_msr |= (target_ulong)MSR_HVB; } else { - if (msr_pow) { + if (FIELD_EX64(env->msr, MSR, POW)) { cpu_abort(cs, "Trying to deliver power-saving system reset " "exception %d with no HV support\n", excp); } @@ -646,8 +1545,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* fall through */ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ - case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ - case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ case POWERPC_EXCP_HV_EMU: case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ @@ -659,253 +1556,53 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) case POWERPC_EXCP_VPU: /* Vector unavailable exception */ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ case POWERPC_EXCP_FU: /* Facility unavailable exception */ -#ifdef TARGET_PPC64 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); -#endif break; case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ -#ifdef TARGET_PPC64 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); -#endif break; - case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ - LOG_EXCP("PIT exception\n"); - break; - case POWERPC_EXCP_IO: /* IO error exception */ - /* XXX: TODO */ - cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_RUNM: /* Run mode exception */ - /* XXX: TODO */ - cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_EMUL: /* Emulation trap exception */ - /* XXX: TODO */ - cpu_abort(cs, "602 emulation trap exception " - "is not implemented yet !\n"); - break; - case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ - case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ - case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ - switch (excp_model) { - case POWERPC_EXCP_602: - case POWERPC_EXCP_603: - case POWERPC_EXCP_603E: - case POWERPC_EXCP_G2: - /* Swap temporary saved registers with GPRs */ - if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { - new_msr |= (target_ulong)1 << MSR_TGPR; - hreg_swap_gpr_tgpr(env); - } - /* fall through */ - case POWERPC_EXCP_7x5: -#if defined(DEBUG_SOFTWARE_TLB) - if (qemu_log_enabled()) { - const char *es; - target_ulong *miss, *cmp; - int en; + case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */ + case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */ + env->spr[SPR_BESCR] &= ~BESCR_GE; - if (excp == POWERPC_EXCP_IFTLB) { - es = "I"; - en = 'I'; - miss = &env->spr[SPR_IMISS]; - cmp = &env->spr[SPR_ICMP]; - } else { - if (excp == POWERPC_EXCP_DLTLB) { - es = "DL"; - } else { - es = "DS"; - } - en = 'D'; - miss = &env->spr[SPR_DMISS]; - cmp = &env->spr[SPR_DCMP]; - } - qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " - TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " - TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, - env->spr[SPR_HASH1], env->spr[SPR_HASH2], - env->error_code); - } -#endif - msr |= env->crf[0] << 28; - msr |= env->error_code; /* key, D/I, S/L bits */ - /* Set way using a LRU mechanism */ - msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; - break; - case POWERPC_EXCP_74xx: -#if defined(DEBUG_SOFTWARE_TLB) - if (qemu_log_enabled()) { - const char *es; - target_ulong *miss, *cmp; - int en; + /* + * Save NIP for rfebb insn in SPR_EBBRR. Next nip is + * stored in the EBB Handler SPR_EBBHR. + */ + env->spr[SPR_EBBRR] = env->nip; + powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr); - if (excp == POWERPC_EXCP_IFTLB) { - es = "I"; - en = 'I'; - miss = &env->spr[SPR_TLBMISS]; - cmp = &env->spr[SPR_PTEHI]; - } else { - if (excp == POWERPC_EXCP_DLTLB) { - es = "DL"; - } else { - es = "DS"; - } - en = 'D'; - miss = &env->spr[SPR_TLBMISS]; - cmp = &env->spr[SPR_PTEHI]; - } - qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " - TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, - env->error_code); - } -#endif - msr |= env->error_code; /* key bit */ - break; - default: - cpu_abort(cs, "Invalid TLB miss exception\n"); - break; - } - break; - case POWERPC_EXCP_FPA: /* Floating-point assist exception */ - /* XXX: TODO */ - cpu_abort(cs, "Floating point assist exception " - "is not implemented yet !\n"); - break; - case POWERPC_EXCP_DABR: /* Data address breakpoint */ - /* XXX: TODO */ - cpu_abort(cs, "DABR exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ - /* XXX: TODO */ - cpu_abort(cs, "IABR exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_SMI: /* System management interrupt */ - /* XXX: TODO */ - cpu_abort(cs, "SMI exception is not implemented yet !\n"); - break; + /* + * This exception is handled in userspace. No need to proceed. + */ + return; case POWERPC_EXCP_THERM: /* Thermal interrupt */ - /* XXX: TODO */ - cpu_abort(cs, "Thermal management exception " - "is not implemented yet !\n"); - break; case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ - /* XXX: TODO */ - cpu_abort(cs, - "Performance counter exception is not implemented yet !\n"); - break; case POWERPC_EXCP_VPUA: /* Vector assist exception */ - /* XXX: TODO */ - cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_SOFTP: /* Soft patch exception */ - /* XXX: TODO */ - cpu_abort(cs, - "970 soft-patch exception is not implemented yet !\n"); - break; case POWERPC_EXCP_MAINT: /* Maintenance exception */ - /* XXX: TODO */ - cpu_abort(cs, - "970 maintenance exception is not implemented yet !\n"); - break; - case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ - /* XXX: TODO */ - cpu_abort(cs, "Maskable external exception " - "is not implemented yet !\n"); - break; - case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ - /* XXX: TODO */ - cpu_abort(cs, "Non maskable external exception " - "is not implemented yet !\n"); + case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */ + case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */ + cpu_abort(cs, "%s exception not implemented\n", + powerpc_excp_name(excp)); break; default: - excp_invalid: cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); break; } - /* Sanity check */ - if (!(env->msr_mask & MSR_HVB)) { - if (new_msr & MSR_HVB) { - cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " - "no HV support\n", excp); - } - if (srr0 == SPR_HSRR0) { - cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " - "no HV support\n", excp); - } - } - /* * Sort out endianness of interrupt, this differs depending on the * CPU, the HV mode, etc... */ -#ifdef TARGET_PPC64 - if (excp_model == POWERPC_EXCP_POWER7) { - if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { - new_msr |= (target_ulong)1 << MSR_LE; - } - } else if (excp_model == POWERPC_EXCP_POWER8) { - if (new_msr & MSR_HVB) { - if (env->spr[SPR_HID0] & HID0_HILE) { - new_msr |= (target_ulong)1 << MSR_LE; - } - } else if (env->spr[SPR_LPCR] & LPCR_ILE) { - new_msr |= (target_ulong)1 << MSR_LE; - } - } else if (excp_model == POWERPC_EXCP_POWER9 || - excp_model == POWERPC_EXCP_POWER10) { - if (new_msr & MSR_HVB) { - if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { - new_msr |= (target_ulong)1 << MSR_LE; - } - } else if (env->spr[SPR_LPCR] & LPCR_ILE) { - new_msr |= (target_ulong)1 << MSR_LE; - } - } else if (msr_ile) { + if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { new_msr |= (target_ulong)1 << MSR_LE; } -#else - if (msr_ile) { - new_msr |= (target_ulong)1 << MSR_LE; - } -#endif - vector = env->excp_vectors[excp]; - if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); - } - - vector |= env->excp_prefix; - - /* If any alternate SRR register are defined, duplicate saved values */ - if (asrr0 != -1) { - env->spr[asrr0] = env->nip; - } - if (asrr1 != -1) { - env->spr[asrr1] = msr; - } - -#if defined(TARGET_PPC64) - if (excp_model == POWERPC_EXCP_BOOKE) { - if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { - /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ - new_msr |= (target_ulong)1 << MSR_CM; - } else { - vector = (uint32_t)vector; - } - } else { - if (!msr_isf && !mmu_is_64bit(env->mmu_model)) { - vector = (uint32_t)vector; - } else { - new_msr |= (target_ulong)1 << MSR_SF; - } - } -#endif + new_msr |= (target_ulong)1 << MSR_SF; if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { /* Save PC */ @@ -913,53 +1610,433 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* Save MSR */ env->spr[srr1] = msr; - -#if defined(TARGET_PPC64) - } else { - vector += lev * 0x20; - - env->lr = env->nip; - env->ctr = msr; -#endif } - /* This can update new_msr and vector if AIL applies */ - ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector); + if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ + vhc->deliver_hv_excp(cpu, excp); - powerpc_set_excp_state(cpu, vector, new_msr); + powerpc_reset_excp_state(cpu); + + } else { + /* Sanity check */ + if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) { + cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " + "no HV support\n", excp); + } + + /* This can update new_msr and vector if AIL applies */ + ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector); + + powerpc_set_excp_state(cpu, vector, new_msr); + } +} +#else +static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) +{ + g_assert_not_reached(); +} +#endif + +static void powerpc_excp(PowerPCCPU *cpu, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + } + + qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx + " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp), + excp, env->error_code); + + switch (env->excp_model) { + case POWERPC_EXCP_40x: + powerpc_excp_40x(cpu, excp); + break; + case POWERPC_EXCP_6xx: + powerpc_excp_6xx(cpu, excp); + break; + case POWERPC_EXCP_7xx: + powerpc_excp_7xx(cpu, excp); + break; + case POWERPC_EXCP_74xx: + powerpc_excp_74xx(cpu, excp); + break; + case POWERPC_EXCP_BOOKE: + powerpc_excp_booke(cpu, excp); + break; + case POWERPC_EXCP_970: + case POWERPC_EXCP_POWER7: + case POWERPC_EXCP_POWER8: + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + powerpc_excp_books(cpu, excp); + break; + default: + g_assert_not_reached(); + } } void ppc_cpu_do_interrupt(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - powerpc_excp(cpu, env->excp_model, cs->exception_index); + powerpc_excp(cpu, cs->exception_index); } -static void ppc_hw_interrupt(CPUPPCState *env) +#if defined(TARGET_PPC64) +#define P7_UNUSED_INTERRUPTS \ + (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \ + PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ + PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ + PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) + +static int p7_interrupt_powersave(CPUPPCState *env) +{ + if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { + return PPC_INTERRUPT_EXT; + } + if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { + return PPC_INTERRUPT_DECR; + } + if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + return PPC_INTERRUPT_MCK; + } + if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + return PPC_INTERRUPT_HMI; + } + if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + return PPC_INTERRUPT_RESET; + } + return 0; +} + +static int p7_next_unmasked_interrupt(CPUPPCState *env) { PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + /* Ignore MSR[EE] when coming out of some power management states */ + bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; + + assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); + + if (cs->halted) { + /* LPCR[PECE] controls which interrupts can exit power-saving mode */ + return p7_interrupt_powersave(env); + } + + /* Machine check exception */ + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; + } + + /* Hypervisor decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + /* LPCR will be clear when not supported so this will work */ + bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { + /* HDEC clears on delivery */ + return PPC_INTERRUPT_HDECR; + } + } + + /* External interrupt can ignore MSR:EE under some circumstances */ + if (env->pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + /* HEIC blocks delivery to the hypervisor */ + if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && + !FIELD_EX64(env->msr, MSR, PR))) || + (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { + return PPC_INTERRUPT_EXT; + } + } + if (msr_ee != 0) { + /* Decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + return PPC_INTERRUPT_DECR; + } + if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + return PPC_INTERRUPT_PERFM; + } + } + + return 0; +} + +#define P8_UNUSED_INTERRUPTS \ + (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \ + PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ + PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) + +static int p8_interrupt_powersave(CPUPPCState *env) +{ + if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { + return PPC_INTERRUPT_EXT; + } + if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { + return PPC_INTERRUPT_DECR; + } + if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + return PPC_INTERRUPT_MCK; + } + if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + return PPC_INTERRUPT_HMI; + } + if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { + return PPC_INTERRUPT_DOORBELL; + } + if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { + return PPC_INTERRUPT_HDOORBELL; + } + if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + return PPC_INTERRUPT_RESET; + } + return 0; +} + +static int p8_next_unmasked_interrupt(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + /* Ignore MSR[EE] when coming out of some power management states */ + bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; + + assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0); + + if (cs->halted) { + /* LPCR[PECE] controls which interrupts can exit power-saving mode */ + return p8_interrupt_powersave(env); + } + + /* Machine check exception */ + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; + } + + /* Hypervisor decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + /* LPCR will be clear when not supported so this will work */ + bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { + /* HDEC clears on delivery */ + return PPC_INTERRUPT_HDECR; + } + } + + /* External interrupt can ignore MSR:EE under some circumstances */ + if (env->pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + /* HEIC blocks delivery to the hypervisor */ + if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && + !FIELD_EX64(env->msr, MSR, PR))) || + (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { + return PPC_INTERRUPT_EXT; + } + } + if (msr_ee != 0) { + /* Decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + return PPC_INTERRUPT_DECR; + } + if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + return PPC_INTERRUPT_DOORBELL; + } + if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + return PPC_INTERRUPT_HDOORBELL; + } + if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + return PPC_INTERRUPT_PERFM; + } + /* EBB exception */ + if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + /* + * EBB exception must be taken in problem state and + * with BESCR_GE set. + */ + if (FIELD_EX64(env->msr, MSR, PR) && + (env->spr[SPR_BESCR] & BESCR_GE)) { + return PPC_INTERRUPT_EBB; + } + } + } + + return 0; +} + +#define P9_UNUSED_INTERRUPTS \ + (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \ + PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ + PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) + +static int p9_interrupt_powersave(CPUPPCState *env) +{ + /* External Exception */ + if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && + (env->spr[SPR_LPCR] & LPCR_EEE)) { + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (!heic || !FIELD_EX64_HV(env->msr) || + FIELD_EX64(env->msr, MSR, PR)) { + return PPC_INTERRUPT_EXT; + } + } + /* Decrementer Exception */ + if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && + (env->spr[SPR_LPCR] & LPCR_DEE)) { + return PPC_INTERRUPT_DECR; + } + /* Machine Check or Hypervisor Maintenance Exception */ + if (env->spr[SPR_LPCR] & LPCR_OEE) { + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; + } + if (env->pending_interrupts & PPC_INTERRUPT_HMI) { + return PPC_INTERRUPT_HMI; + } + } + /* Privileged Doorbell Exception */ + if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (env->spr[SPR_LPCR] & LPCR_PDEE)) { + return PPC_INTERRUPT_DOORBELL; + } + /* Hypervisor Doorbell Exception */ + if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (env->spr[SPR_LPCR] & LPCR_HDEE)) { + return PPC_INTERRUPT_HDOORBELL; + } + /* Hypervisor virtualization exception */ + if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && + (env->spr[SPR_LPCR] & LPCR_HVEE)) { + return PPC_INTERRUPT_HVIRT; + } + if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + return PPC_INTERRUPT_RESET; + } + return 0; +} + +static int p9_next_unmasked_interrupt(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + /* Ignore MSR[EE] when coming out of some power management states */ + bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; + + assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); + + if (cs->halted) { + if (env->spr[SPR_PSSCR] & PSSCR_EC) { + /* + * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can + * wakeup the processor + */ + return p9_interrupt_powersave(env); + } else { + /* + * When it's clear, any system-caused exception exits power-saving + * mode, even the ones that gate on MSR[EE]. + */ + msr_ee = true; + } + } + + /* Machine check exception */ + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; + } + + /* Hypervisor decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + /* LPCR will be clear when not supported so this will work */ + bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { + /* HDEC clears on delivery */ + return PPC_INTERRUPT_HDECR; + } + } + + /* Hypervisor virtualization interrupt */ + if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { + /* LPCR will be clear when not supported so this will work */ + bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { + return PPC_INTERRUPT_HVIRT; + } + } + + /* External interrupt can ignore MSR:EE under some circumstances */ + if (env->pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + /* HEIC blocks delivery to the hypervisor */ + if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && + !FIELD_EX64(env->msr, MSR, PR))) || + (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { + return PPC_INTERRUPT_EXT; + } + } + if (msr_ee != 0) { + /* Decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + return PPC_INTERRUPT_DECR; + } + if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + return PPC_INTERRUPT_DOORBELL; + } + if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + return PPC_INTERRUPT_HDOORBELL; + } + if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + return PPC_INTERRUPT_PERFM; + } + /* EBB exception */ + if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + /* + * EBB exception must be taken in problem state and + * with BESCR_GE set. + */ + if (FIELD_EX64(env->msr, MSR, PR) && + (env->spr[SPR_BESCR] & BESCR_GE)) { + return PPC_INTERRUPT_EBB; + } + } + } + + return 0; +} +#endif + +static int ppc_next_unmasked_interrupt_generic(CPUPPCState *env) +{ bool async_deliver; /* External reset */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); - return; + if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + return PPC_INTERRUPT_RESET; } /* Machine check exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); - return; + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; } #if 0 /* TODO */ /* External debug exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); - return; + if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) { + return PPC_INTERRUPT_DEBUG; } #endif @@ -969,108 +2046,178 @@ static void ppc_hw_interrupt(CPUPPCState *env) * clear when coming out of some power management states (in order * for them to become a 0x100). */ - async_deliver = (msr_ee != 0) || env->resume_as_sreset; + async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; /* Hypervisor decrementer exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { + if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); - if ((async_deliver || msr_hv == 0) && hdice) { + if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); - return; + return PPC_INTERRUPT_HDECR; } } /* Hypervisor virtualization interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { + if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { /* LPCR will be clear when not supported so this will work */ bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); - if ((async_deliver || msr_hv == 0) && hvice) { - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); - return; + if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { + return PPC_INTERRUPT_HVIRT; } } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { + if (env->pending_interrupts & PPC_INTERRUPT_EXT) { bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ - if ((async_deliver && !(heic && msr_hv && !msr_pr)) || - (env->has_hv_mode && msr_hv == 0 && !lpes0)) { - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); - return; + if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && + !FIELD_EX64(env->msr, MSR, PR))) || + (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { + return PPC_INTERRUPT_EXT; } } - if (msr_ce != 0) { + if (FIELD_EX64(env->msr, MSR, CE)) { /* External critical interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); - return; + if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { + return PPC_INTERRUPT_CEXT; } } if (async_deliver != 0) { /* Watchdog timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); - return; + if (env->pending_interrupts & PPC_INTERRUPT_WDT) { + return PPC_INTERRUPT_WDT; } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); - return; + if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { + return PPC_INTERRUPT_CDOORBELL; } /* Fixed interval timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); - return; + if (env->pending_interrupts & PPC_INTERRUPT_FIT) { + return PPC_INTERRUPT_FIT; } /* Programmable interval timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); - return; + if (env->pending_interrupts & PPC_INTERRUPT_PIT) { + return PPC_INTERRUPT_PIT; } /* Decrementer exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { - if (ppc_decr_clear_on_delivery(env)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); - } - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); - return; + if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); - if (is_book3s_arch2x(env)) { - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); - } else { - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); - } - return; + if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); - return; + if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); - return; + if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + return PPC_INTERRUPT_PERFM; } /* Thermal interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); - return; + if (env->pending_interrupts & PPC_INTERRUPT_THERM) { + return PPC_INTERRUPT_THERM; + } + /* EBB exception */ + if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + /* + * EBB exception must be taken in problem state and + * with BESCR_GE set. + */ + if (FIELD_EX64(env->msr, MSR, PR) && + (env->spr[SPR_BESCR] & BESCR_GE)) { + return PPC_INTERRUPT_EBB; + } } } - if (env->resume_as_sreset) { + return 0; +} + +static int ppc_next_unmasked_interrupt(CPUPPCState *env) +{ + switch (env->excp_model) { +#if defined(TARGET_PPC64) + case POWERPC_EXCP_POWER7: + return p7_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER8: + return p8_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + return p9_next_unmasked_interrupt(env); +#endif + default: + return ppc_next_unmasked_interrupt_generic(env); + } +} + +/* + * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be + * delivered and clears CPU_INTERRUPT_HARD otherwise. + * + * This method is called by ppc_set_interrupt when an interrupt is raised or + * lowered, and should also be called whenever an interrupt masking condition + * is changed, e.g.: + * - When relevant bits of MSR are altered, like EE, HV, PR, etc.; + * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.; + * - When PSSCR[EC] or env->resume_as_sreset are changed; + * - When cs->halted is changed and the CPU has a different interrupt masking + * logic in power-saving mode (e.g., POWER7/8/9/10); + */ +void ppc_maybe_interrupt(CPUPPCState *env) +{ + CPUState *cs = env_cpu(env); + bool locked = false; + + if (!qemu_mutex_iothread_locked()) { + locked = true; + qemu_mutex_lock_iothread(); + } + + if (ppc_next_unmasked_interrupt(env)) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +#if defined(TARGET_PPC64) +static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + switch (interrupt) { + case PPC_INTERRUPT_MCK: /* Machine check exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_MCK; + powerpc_excp(cpu, POWERPC_EXCP_MCHECK); + break; + + case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ + /* HDEC clears on delivery */ + env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; + powerpc_excp(cpu, POWERPC_EXCP_HDECR); + break; + + case PPC_INTERRUPT_EXT: + if (books_vhyp_promotes_external_to_hvirt(cpu)) { + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + } else { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); + } + break; + + case PPC_INTERRUPT_DECR: /* Decrementer exception */ + powerpc_excp(cpu, POWERPC_EXCP_DECR); + break; + case PPC_INTERRUPT_PERFM: + env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; + powerpc_excp(cpu, POWERPC_EXCP_PERFM); + break; + case 0: /* * This is a bug ! It means that has_work took us out of halt without * anything to deliver while in a PM state that requires getting @@ -1082,17 +2229,293 @@ static void ppc_hw_interrupt(CPUPPCState *env) * It generally means a discrepancy between the wakeup conditions in the * processor has_work implementation and the logic in this function. */ - cpu_abort(env_cpu(env), - "Wakeup from PM state but interrupt Undelivered"); + assert(!env->resume_as_sreset); + break; + default: + cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + } +} + +static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + switch (interrupt) { + case PPC_INTERRUPT_MCK: /* Machine check exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_MCK; + powerpc_excp(cpu, POWERPC_EXCP_MCHECK); + break; + + case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ + /* HDEC clears on delivery */ + env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; + powerpc_excp(cpu, POWERPC_EXCP_HDECR); + break; + + case PPC_INTERRUPT_EXT: + if (books_vhyp_promotes_external_to_hvirt(cpu)) { + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + } else { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); + } + break; + + case PPC_INTERRUPT_DECR: /* Decrementer exception */ + powerpc_excp(cpu, POWERPC_EXCP_DECR); + break; + case PPC_INTERRUPT_DOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (is_book3s_arch2x(env)) { + powerpc_excp(cpu, POWERPC_EXCP_SDOOR); + } else { + powerpc_excp(cpu, POWERPC_EXCP_DOORI); + } + break; + case PPC_INTERRUPT_HDOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); + break; + case PPC_INTERRUPT_PERFM: + env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; + powerpc_excp(cpu, POWERPC_EXCP_PERFM); + break; + case PPC_INTERRUPT_EBB: /* EBB exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_EBB; + if (env->spr[SPR_BESCR] & BESCR_PMEO) { + powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); + } else if (env->spr[SPR_BESCR] & BESCR_EEO) { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); + } + break; + case 0: + /* + * This is a bug ! It means that has_work took us out of halt without + * anything to deliver while in a PM state that requires getting + * out via a 0x100 + * + * This means we will incorrectly execute past the power management + * instruction instead of triggering a reset. + * + * It generally means a discrepancy between the wakeup conditions in the + * processor has_work implementation and the logic in this function. + */ + assert(!env->resume_as_sreset); + break; + default: + cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + } +} + +static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) && + !FIELD_EX64(env->msr, MSR, EE)) { + /* + * A pending interrupt took us out of power-saving, but MSR[EE] says + * that we should return to NIP+4 instead of delivering it. + */ + return; + } + + switch (interrupt) { + case PPC_INTERRUPT_MCK: /* Machine check exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_MCK; + powerpc_excp(cpu, POWERPC_EXCP_MCHECK); + break; + + case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ + /* HDEC clears on delivery */ + env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; + powerpc_excp(cpu, POWERPC_EXCP_HDECR); + break; + case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + break; + + case PPC_INTERRUPT_EXT: + if (books_vhyp_promotes_external_to_hvirt(cpu)) { + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + } else { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); + } + break; + + case PPC_INTERRUPT_DECR: /* Decrementer exception */ + powerpc_excp(cpu, POWERPC_EXCP_DECR); + break; + case PPC_INTERRUPT_DOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + powerpc_excp(cpu, POWERPC_EXCP_SDOOR); + break; + case PPC_INTERRUPT_HDOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); + break; + case PPC_INTERRUPT_PERFM: + env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; + powerpc_excp(cpu, POWERPC_EXCP_PERFM); + break; + case PPC_INTERRUPT_EBB: /* EBB exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_EBB; + if (env->spr[SPR_BESCR] & BESCR_PMEO) { + powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); + } else if (env->spr[SPR_BESCR] & BESCR_EEO) { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); + } + break; + case 0: + /* + * This is a bug ! It means that has_work took us out of halt without + * anything to deliver while in a PM state that requires getting + * out via a 0x100 + * + * This means we will incorrectly execute past the power management + * instruction instead of triggering a reset. + * + * It generally means a discrepancy between the wakeup conditions in the + * processor has_work implementation and the logic in this function. + */ + assert(!env->resume_as_sreset); + break; + default: + cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + } +} +#endif + +static void ppc_deliver_interrupt_generic(CPUPPCState *env, int interrupt) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + switch (interrupt) { + case PPC_INTERRUPT_RESET: /* External reset */ + env->pending_interrupts &= ~PPC_INTERRUPT_RESET; + powerpc_excp(cpu, POWERPC_EXCP_RESET); + break; + case PPC_INTERRUPT_MCK: /* Machine check exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_MCK; + powerpc_excp(cpu, POWERPC_EXCP_MCHECK); + break; + + case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ + /* HDEC clears on delivery */ + env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; + powerpc_excp(cpu, POWERPC_EXCP_HDECR); + break; + case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + break; + + case PPC_INTERRUPT_EXT: + if (books_vhyp_promotes_external_to_hvirt(cpu)) { + powerpc_excp(cpu, POWERPC_EXCP_HVIRT); + } else { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); + } + break; + case PPC_INTERRUPT_CEXT: /* External critical interrupt */ + powerpc_excp(cpu, POWERPC_EXCP_CRITICAL); + break; + + case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */ + env->pending_interrupts &= ~PPC_INTERRUPT_WDT; + powerpc_excp(cpu, POWERPC_EXCP_WDT); + break; + case PPC_INTERRUPT_CDOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL; + powerpc_excp(cpu, POWERPC_EXCP_DOORCI); + break; + case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */ + env->pending_interrupts &= ~PPC_INTERRUPT_FIT; + powerpc_excp(cpu, POWERPC_EXCP_FIT); + break; + case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */ + env->pending_interrupts &= ~PPC_INTERRUPT_PIT; + powerpc_excp(cpu, POWERPC_EXCP_PIT); + break; + case PPC_INTERRUPT_DECR: /* Decrementer exception */ + if (ppc_decr_clear_on_delivery(env)) { + env->pending_interrupts &= ~PPC_INTERRUPT_DECR; + } + powerpc_excp(cpu, POWERPC_EXCP_DECR); + break; + case PPC_INTERRUPT_DOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (is_book3s_arch2x(env)) { + powerpc_excp(cpu, POWERPC_EXCP_SDOOR); + } else { + powerpc_excp(cpu, POWERPC_EXCP_DOORI); + } + break; + case PPC_INTERRUPT_HDOORBELL: + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); + break; + case PPC_INTERRUPT_PERFM: + env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; + powerpc_excp(cpu, POWERPC_EXCP_PERFM); + break; + case PPC_INTERRUPT_THERM: /* Thermal interrupt */ + env->pending_interrupts &= ~PPC_INTERRUPT_THERM; + powerpc_excp(cpu, POWERPC_EXCP_THERM); + break; + case PPC_INTERRUPT_EBB: /* EBB exception */ + env->pending_interrupts &= ~PPC_INTERRUPT_EBB; + if (env->spr[SPR_BESCR] & BESCR_PMEO) { + powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); + } else if (env->spr[SPR_BESCR] & BESCR_EEO) { + powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); + } + break; + case 0: + /* + * This is a bug ! It means that has_work took us out of halt without + * anything to deliver while in a PM state that requires getting + * out via a 0x100 + * + * This means we will incorrectly execute past the power management + * instruction instead of triggering a reset. + * + * It generally means a discrepancy between the wakeup conditions in the + * processor has_work implementation and the logic in this function. + */ + assert(!env->resume_as_sreset); + break; + default: + cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + } +} + +static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) +{ + switch (env->excp_model) { +#if defined(TARGET_PPC64) + case POWERPC_EXCP_POWER7: + p7_deliver_interrupt(env, interrupt); + break; + case POWERPC_EXCP_POWER8: + p8_deliver_interrupt(env, interrupt); + break; + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + p9_deliver_interrupt(env, interrupt); + break; +#endif + default: + ppc_deliver_interrupt_generic(env, interrupt); } } void ppc_cpu_do_system_reset(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); + powerpc_excp(cpu, POWERPC_EXCP_RESET); } void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) @@ -1107,36 +2530,38 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) */ msr = (1ULL << MSR_ME); msr |= env->msr & (1ULL << MSR_SF); - if (ppc_interrupts_little_endian(cpu)) { + if (ppc_interrupts_little_endian(cpu, false)) { msr |= (1ULL << MSR_LE); } + /* Anything for nested required here? MSR[HV] bit? */ + powerpc_set_excp_state(cpu, vector, msr); } -#endif /* !CONFIG_USER_ONLY */ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; + int interrupt; - if (interrupt_request & CPU_INTERRUPT_HARD) { - ppc_hw_interrupt(env); - if (env->pending_interrupts == 0) { - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; - } - return true; + if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) { + return false; } - return false; + + interrupt = ppc_next_unmasked_interrupt(env); + if (interrupt == 0) { + return false; + } + + ppc_deliver_interrupt(env, interrupt); + if (env->pending_interrupts == 0) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + return true; } -#if defined(DEBUG_OP) -static void cpu_dump_rfi(target_ulong RA, target_ulong msr) -{ - qemu_log("Return from exception at " TARGET_FMT_lx " with flags " - TARGET_FMT_lx "\n", RA, msr); -} -#endif +#endif /* !CONFIG_USER_ONLY */ /*****************************************************************************/ /* Exceptions processing helpers */ @@ -1194,6 +2619,11 @@ void helper_store_msr(CPUPPCState *env, target_ulong val) } } +void helper_ppc_maybe_interrupt(CPUPPCState *env) +{ + ppc_maybe_interrupt(env); +} + #if defined(TARGET_PPC64) void helper_scv(CPUPPCState *env, uint32_t lev) { @@ -1204,33 +2634,32 @@ void helper_scv(CPUPPCState *env, uint32_t lev) } } -void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) +void helper_pminsn(CPUPPCState *env, uint32_t insn) { CPUState *cs; cs = env_cpu(env); cs->halted = 1; - /* - * The architecture specifies that HDEC interrupts are discarded - * in PM states - */ - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); - /* Condition for waking up at 0x100 */ env->resume_as_sreset = (insn != PPC_PM_STOP) || (env->spr[SPR_PSSCR] & PSSCR_EC); + + ppc_maybe_interrupt(env); } #endif /* defined(TARGET_PPC64) */ -#endif /* CONFIG_TCG */ -static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) +static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) { CPUState *cs = env_cpu(env); /* MSR:POW cannot be set by any form of rfi */ msr &= ~(1ULL << MSR_POW); + /* MSR:TGPR cannot be set by any form of rfi */ + if (env->flags & POWERPC_FLAG_TGPR) + msr &= ~(1ULL << MSR_TGPR); + #if defined(TARGET_PPC64) /* Switching to 32-bit ? Crop the nip */ if (!msr_is_64bit(env, msr)) { @@ -1242,9 +2671,7 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) /* XXX: beware: this is false if VLE is supported */ env->nip = nip & ~((target_ulong)0x00000003); hreg_store_msr(env, msr, 1); -#if defined(DEBUG_OP) - cpu_dump_rfi(env->nip, env->msr); -#endif + trace_ppc_excp_rfi(env->nip, env->msr); /* * No need to raise an exception here, as rfi is always the last * insn of a TB @@ -1257,13 +2684,11 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) check_tlb_flush(env, false); } -#ifdef CONFIG_TCG void helper_rfi(CPUPPCState *env) { do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); } -#define MSR_BOOK3S_MASK #if defined(TARGET_PPC64) void helper_rfid(CPUPPCState *env) { @@ -1287,6 +2712,83 @@ void helper_hrfid(CPUPPCState *env) } #endif +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +void helper_rfebb(CPUPPCState *env, target_ulong s) +{ + target_ulong msr = env->msr; + + /* + * Handling of BESCR bits 32:33 according to PowerISA v3.1: + * + * "If BESCR 32:33 != 0b00 the instruction is treated as if + * the instruction form were invalid." + */ + if (env->spr[SPR_BESCR] & BESCR_INVALID) { + raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); + } + + env->nip = env->spr[SPR_EBBRR]; + + /* Switching to 32-bit ? Crop the nip */ + if (!msr_is_64bit(env, msr)) { + env->nip = (uint32_t)env->spr[SPR_EBBRR]; + } + + if (s) { + env->spr[SPR_BESCR] |= BESCR_GE; + } else { + env->spr[SPR_BESCR] &= ~BESCR_GE; + } +} + +/* + * Triggers or queues an 'ebb_excp' EBB exception. All checks + * but FSCR, HFSCR and msr_pr must be done beforehand. + * + * PowerISA v3.1 isn't clear about whether an EBB should be + * postponed or cancelled if the EBB facility is unavailable. + * Our assumption here is that the EBB is cancelled if both + * FSCR and HFSCR EBB facilities aren't available. + */ +static void do_ebb(CPUPPCState *env, int ebb_excp) +{ + PowerPCCPU *cpu = env_archcpu(env); + + /* + * FSCR_EBB and FSCR_IC_EBB are the same bits used with + * HFSCR. + */ + helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); + helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); + + if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { + env->spr[SPR_BESCR] |= BESCR_PMEO; + } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { + env->spr[SPR_BESCR] |= BESCR_EEO; + } + + if (FIELD_EX64(env->msr, MSR, PR)) { + powerpc_excp(cpu, ebb_excp); + } else { + ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); + } +} + +void raise_ebb_perfm_exception(CPUPPCState *env) +{ + bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && + env->spr[SPR_BESCR] & BESCR_PME && + env->spr[SPR_BESCR] & BESCR_GE; + + if (!perfm_ebb_enabled) { + return; + } + + do_ebb(env, POWERPC_EXCP_PERFM_EBB); +} +#endif + /*****************************************************************************/ /* Embedded PowerPC specific helpers */ void helper_40x_rfci(CPUPPCState *env) @@ -1343,15 +2845,94 @@ void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, #endif #endif +#ifdef CONFIG_TCG +static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) +{ + const uint16_t c = 0xfffc; + const uint64_t z0 = 0xfa2561cdf44ac398ULL; + uint16_t z = 0, temp; + uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; + + for (int i = 3; i >= 0; i--) { + k[i] = key & 0xffff; + key >>= 16; + } + xleft[0] = x & 0xffff; + xright[0] = (x >> 16) & 0xffff; + + for (int i = 0; i < 28; i++) { + z = (z0 >> (63 - i)) & 1; + temp = ror16(k[i + 3], 3) ^ k[i + 1]; + k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); + } + + for (int i = 0; i < 8; i++) { + eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; + eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; + eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; + eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; + } + + for (int i = 0; i < 32; i++) { + fxleft[i] = (rol16(xleft[i], 1) & + rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); + xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; + xright[i + 1] = xleft[i]; + } + + return (((uint32_t)xright[32]) << 16) | xleft[32]; +} + +static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) +{ + uint64_t stage0_h = 0ULL, stage0_l = 0ULL; + uint64_t stage1_h, stage1_l; + + for (int i = 0; i < 4; i++) { + stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); + stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); + stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); + stage0_l |= (ra & 0xff) << (8 * 2 * i); + rb >>= 8; + ra >>= 8; + } + + stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; + stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); + stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; + stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); + + return stage1_h ^ stage1_l; +} + +#include "qemu/guest-random.h" + +#define HELPER_HASH(op, key, store) \ +void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ + target_ulong rb) \ +{ \ + uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; \ + \ + if (store) { \ + cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); \ + } else { \ + loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); \ + if (loaded_hash != calculated_hash) { \ + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, \ + POWERPC_EXCP_TRAP, GETPC()); \ + } \ + } \ +} + +HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true) +HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false) +HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true) +HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false) +#endif /* CONFIG_TCG */ + #if !defined(CONFIG_USER_ONLY) -/*****************************************************************************/ -/* PowerPC 601 specific instructions (POWER bridge) */ #ifdef CONFIG_TCG -void helper_rfsvc(CPUPPCState *env) -{ - do_rfi(env, env->lr, env->ctr & 0x0000FFFF); -} /* Embedded.Processor Control */ static int dbell2irq(target_ulong rb) @@ -1385,7 +2966,7 @@ void helper_msgclr(CPUPPCState *env, target_ulong rb) return; } - env->pending_interrupts &= ~(1 << irq); + ppc_set_irq(env_archcpu(env), irq, 0); } void helper_msgsnd(target_ulong rb) @@ -1404,8 +2985,7 @@ void helper_msgsnd(target_ulong rb) CPUPPCState *cenv = &cpu->env; if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { - cenv->pending_interrupts |= 1 << irq; - cpu_interrupt(cs, CPU_INTERRUPT_HARD); + ppc_set_irq(cpu, irq, 1); } } qemu_mutex_unlock_iothread(); @@ -1429,7 +3009,7 @@ void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) return; } - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); + ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); } static void book3s_msgsnd_common(int pir, int irq) @@ -1443,8 +3023,7 @@ static void book3s_msgsnd_common(int pir, int irq) /* TODO: broadcast message to all threads of the same processor */ if (cenv->spr_cb[SPR_PIR].default_value == pir) { - cenv->pending_interrupts |= 1 << irq; - cpu_interrupt(cs, CPU_INTERRUPT_HARD); + ppc_set_irq(cpu, irq, 1); } } qemu_mutex_unlock_iothread(); @@ -1470,7 +3049,7 @@ void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) return; } - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); + ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); } /* @@ -1491,11 +3070,8 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); } -#endif -#endif /* CONFIG_TCG */ -#endif +#endif /* TARGET_PPC64 */ -#ifdef CONFIG_TCG void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) @@ -1504,11 +3080,25 @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, uint32_t insn; /* Restore state and reload the insn we executed, for filling in DSISR. */ - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); insn = cpu_ldl_code(env, env->nip); + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_4xx: + env->spr[SPR_40x_DEAR] = vaddr; + break; + case POWERPC_MMU_BOOKE: + case POWERPC_MMU_BOOKE206: + env->spr[SPR_BOOKE_DEAR] = vaddr; + break; + default: + env->spr[SPR_DAR] = vaddr; + break; + } + cs->exception_index = POWERPC_EXCP_ALIGN; env->error_code = insn & 0x03FF0000; cpu_loop_exit(cs); } -#endif +#endif /* CONFIG_TCG */ +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index c4896cecc8..a66e16c212 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -36,6 +36,15 @@ static inline float128 float128_snan_to_qnan(float128 x) #define float32_snan_to_qnan(x) ((x) | 0x00400000) #define float16_snan_to_qnan(x) ((x) | 0x0200) +static inline float32 bfp32_neg(float32 a) +{ + if (unlikely(float32_is_any_nan(a))) { + return a; + } else { + return float32_chs(a); + } +} + static inline bool fp_exceptions_enabled(CPUPPCState *env) { #ifdef CONFIG_USER_ONLY @@ -202,7 +211,7 @@ static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr) env->fpscr |= FP_VX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; - if (fpscr_ve != 0) { + if (env->fpscr & FP_VE) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; if (fp_exceptions_enabled(env)) { @@ -216,7 +225,7 @@ static void finish_invalid_op_arith(CPUPPCState *env, int op, bool set_fpcc, uintptr_t retaddr) { env->fpscr &= ~(FP_FR | FP_FI); - if (fpscr_ve == 0) { + if (!(env->fpscr & FP_VE)) { if (set_fpcc) { env->fpscr &= ~FP_FPCC; env->fpscr |= (FP_C | FP_FU); @@ -286,7 +295,7 @@ static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc, /* Update the floating-point exception summary */ env->fpscr |= FP_FX; /* We must update the target FPR before raising the exception */ - if (fpscr_ve != 0) { + if (env->fpscr & FP_VE) { CPUState *cs = env_cpu(env); cs->exception_index = POWERPC_EXCP_PROGRAM; @@ -303,7 +312,7 @@ static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc, { env->fpscr |= FP_VXCVI; env->fpscr &= ~(FP_FR | FP_FI); - if (fpscr_ve == 0) { + if (!(env->fpscr & FP_VE)) { if (set_fpcc) { env->fpscr &= ~FP_FPCC; env->fpscr |= (FP_C | FP_FU); @@ -318,7 +327,7 @@ static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr) env->fpscr &= ~(FP_FR | FP_FI); /* Update the floating-point exception summary */ env->fpscr |= FP_FX; - if (fpscr_ze != 0) { + if (env->fpscr & FP_ZE) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; if (fp_exceptions_enabled(env)) { @@ -329,24 +338,24 @@ static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr) } } -static inline void float_overflow_excp(CPUPPCState *env) +static inline int float_overflow_excp(CPUPPCState *env) { CPUState *cs = env_cpu(env); env->fpscr |= FP_OX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; - if (fpscr_oe != 0) { - /* XXX: should adjust the result */ + + bool overflow_enabled = !!(env->fpscr & FP_OE); + if (overflow_enabled) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; - } else { - env->fpscr |= FP_XX; - env->fpscr |= FP_FI; } + + return overflow_enabled ? 0 : float_flag_inexact; } static inline void float_underflow_excp(CPUPPCState *env) @@ -356,8 +365,7 @@ static inline void float_underflow_excp(CPUPPCState *env) env->fpscr |= FP_UX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; - if (fpscr_ue != 0) { - /* XXX: should adjust the result */ + if (env->fpscr & FP_UE) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ @@ -370,11 +378,10 @@ static inline void float_inexact_excp(CPUPPCState *env) { CPUState *cs = env_cpu(env); - env->fpscr |= FP_FI; env->fpscr |= FP_XX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; - if (fpscr_xe != 0) { + if (env->fpscr & FP_XE) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ @@ -414,20 +421,77 @@ void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles) ppc_store_fpscr(env, val); } -static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) +static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + target_ulong fpscr = env->fpscr; + int error = 0; + + if ((fpscr & FP_OX) && (fpscr & FP_OE)) { + error = POWERPC_EXCP_FP_OX; + } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) { + error = POWERPC_EXCP_FP_UX; + } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) { + error = POWERPC_EXCP_FP_XX; + } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) { + error = POWERPC_EXCP_FP_ZX; + } else if (fpscr & FP_VE) { + if (fpscr & FP_VXSOFT) { + error = POWERPC_EXCP_FP_VXSOFT; + } else if (fpscr & FP_VXSNAN) { + error = POWERPC_EXCP_FP_VXSNAN; + } else if (fpscr & FP_VXISI) { + error = POWERPC_EXCP_FP_VXISI; + } else if (fpscr & FP_VXIDI) { + error = POWERPC_EXCP_FP_VXIDI; + } else if (fpscr & FP_VXZDZ) { + error = POWERPC_EXCP_FP_VXZDZ; + } else if (fpscr & FP_VXIMZ) { + error = POWERPC_EXCP_FP_VXIMZ; + } else if (fpscr & FP_VXVC) { + error = POWERPC_EXCP_FP_VXVC; + } else if (fpscr & FP_VXSQRT) { + error = POWERPC_EXCP_FP_VXSQRT; + } else if (fpscr & FP_VXCVI) { + error = POWERPC_EXCP_FP_VXCVI; + } else { + return; + } + } else { + return; + } + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = error | POWERPC_EXCP_FP; + env->fpscr |= FP_FEX; + /* Deferred floating-point exception after target FPSCR update */ + if (fp_exceptions_enabled(env)) { + raise_exception_err_ra(env, cs->exception_index, + env->error_code, raddr); + } +} + +void helper_fpscr_check_status(CPUPPCState *env) +{ + do_fpscr_check_status(env, GETPC()); +} + +static void do_float_check_status(CPUPPCState *env, bool change_fi, + uintptr_t raddr) { CPUState *cs = env_cpu(env); int status = get_float_exception_flags(&env->fp_status); if (status & float_flag_overflow) { - float_overflow_excp(env); + status |= float_overflow_excp(env); } else if (status & float_flag_underflow) { float_underflow_excp(env); } if (status & float_flag_inexact) { float_inexact_excp(env); - } else { - env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */ + } + if (change_fi) { + env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI, + !!(status & float_flag_inexact)); } if (cs->exception_index == POWERPC_EXCP_PROGRAM && @@ -442,7 +506,7 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) void helper_float_check_status(CPUPPCState *env) { - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } void helper_reset_fpstatus(CPUPPCState *env) @@ -450,13 +514,12 @@ void helper_reset_fpstatus(CPUPPCState *env) set_float_exception_flags(0, &env->fp_status); } -static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc, - uintptr_t retaddr, int classes) +static void float_invalid_op_addsub(CPUPPCState *env, int flags, + bool set_fpcc, uintptr_t retaddr) { - if ((classes & ~is_neg) == is_inf) { - /* Magnitude subtraction of infinities */ + if (flags & float_flag_invalid_isi) { float_invalid_op_vxisi(env, set_fpcc, retaddr); - } else if (classes & is_snan) { + } else if (flags & float_flag_invalid_snan) { float_invalid_op_vxsnan(env, retaddr); } } @@ -465,39 +528,58 @@ static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc, float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_add(arg1, arg2, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status & float_flag_invalid)) { - float_invalid_op_addsub(env, 1, GETPC(), - float64_classify(arg1) | - float64_classify(arg2)); + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_addsub(env, flags, 1, GETPC()); } return ret; } +/* fadds - fadds. */ +float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2) +{ + float64 ret = float64r32_add(arg1, arg2, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_addsub(env, flags, 1, GETPC()); + } + return ret; +} + /* fsub - fsub. */ float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_sub(arg1, arg2, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status & float_flag_invalid)) { - float_invalid_op_addsub(env, 1, GETPC(), - float64_classify(arg1) | - float64_classify(arg2)); + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_addsub(env, flags, 1, GETPC()); } return ret; } -static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc, - uintptr_t retaddr, int classes) +/* fsubs - fsubs. */ +float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2) { - if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) { - /* Multiplication of zero by infinity */ + float64 ret = float64r32_sub(arg1, arg2, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_addsub(env, flags, 1, GETPC()); + } + return ret; +} + +static void float_invalid_op_mul(CPUPPCState *env, int flags, + bool set_fprc, uintptr_t retaddr) +{ + if (flags & float_flag_invalid_imz) { float_invalid_op_vximz(env, set_fprc, retaddr); - } else if (classes & is_snan) { + } else if (flags & float_flag_invalid_snan) { float_invalid_op_vxsnan(env, retaddr); } } @@ -506,28 +588,35 @@ static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc, float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_mul(arg1, arg2, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status & float_flag_invalid)) { - float_invalid_op_mul(env, 1, GETPC(), - float64_classify(arg1) | - float64_classify(arg2)); + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_mul(env, flags, 1, GETPC()); } return ret; } -static void float_invalid_op_div(CPUPPCState *env, bool set_fprc, - uintptr_t retaddr, int classes) +/* fmuls - fmuls. */ +float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2) { - classes &= ~is_neg; - if (classes == is_inf) { - /* Division of infinity by infinity */ + float64 ret = float64r32_mul(arg1, arg2, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_mul(env, flags, 1, GETPC()); + } + return ret; +} + +static void float_invalid_op_div(CPUPPCState *env, int flags, + bool set_fprc, uintptr_t retaddr) +{ + if (flags & float_flag_invalid_idi) { float_invalid_op_vxidi(env, set_fprc, retaddr); - } else if (classes == is_zero) { - /* Division of zero by zero */ + } else if (flags & float_flag_invalid_zdz) { float_invalid_op_vxzdz(env, set_fprc, retaddr); - } else if (classes & is_snan) { + } else if (flags & float_flag_invalid_snan) { float_invalid_op_vxsnan(env, retaddr); } } @@ -536,43 +625,57 @@ static void float_invalid_op_div(CPUPPCState *env, bool set_fprc, float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_div(arg1, arg2, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status)) { - if (status & float_flag_invalid) { - float_invalid_op_div(env, 1, GETPC(), - float64_classify(arg1) | - float64_classify(arg2)); - } - if (status & float_flag_divbyzero) { - float_zero_divide_excp(env, GETPC()); - } + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_div(env, flags, 1, GETPC()); + } + if (unlikely(flags & float_flag_divbyzero)) { + float_zero_divide_excp(env, GETPC()); } return ret; } -static void float_invalid_cvt(CPUPPCState *env, bool set_fprc, - uintptr_t retaddr, int class1) +/* fdivs - fdivs. */ +float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2) { - float_invalid_op_vxcvi(env, set_fprc, retaddr); - if (class1 & is_snan) { - float_invalid_op_vxsnan(env, retaddr); + float64 ret = float64r32_div(arg1, arg2, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_div(env, flags, 1, GETPC()); } + if (unlikely(flags & float_flag_divbyzero)) { + float_zero_divide_excp(env, GETPC()); + } + + return ret; +} + +static uint64_t float_invalid_cvt(CPUPPCState *env, int flags, + uint64_t ret, uint64_t ret_nan, + bool set_fprc, uintptr_t retaddr) +{ + /* + * VXCVI is different from most in that it sets two exception bits, + * VXCVI and VXSNAN for an SNaN input. + */ + if (flags & float_flag_invalid_snan) { + env->fpscr |= FP_VXSNAN; + } + float_invalid_op_vxcvi(env, set_fprc, retaddr); + + return flags & float_flag_invalid_cvti ? ret : ret_nan; } #define FPU_FCTI(op, cvt, nanval) \ uint64_t helper_##op(CPUPPCState *env, float64 arg) \ { \ uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \ - int status = get_float_exception_flags(&env->fp_status); \ - \ - if (unlikely(status)) { \ - if (status & float_flag_invalid) { \ - float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \ - ret = nanval; \ - } \ - do_float_check_status(env, GETPC()); \ + int flags = get_float_exception_flags(&env->fp_status); \ + if (unlikely(flags & float_flag_invalid)) { \ + ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \ } \ return ret; \ } @@ -597,7 +700,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ } else { \ farg.d = cvtr(arg, &env->fp_status); \ } \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, true, GETPC()); \ return farg.ll; \ } @@ -606,32 +709,26 @@ FPU_FCFI(fcfids, int64_to_float32, 1) FPU_FCFI(fcfidu, uint64_to_float64, 0) FPU_FCFI(fcfidus, uint64_to_float32, 1) -static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, - int rounding_mode) +static uint64_t do_fri(CPUPPCState *env, uint64_t arg, + FloatRoundMode rounding_mode) { - CPU_DoubleU farg; FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status); + int flags; - farg.ll = arg; + set_float_rounding_mode(rounding_mode, &env->fp_status); + arg = float64_round_to_int(arg, &env->fp_status); + set_float_rounding_mode(old_rounding_mode, &env->fp_status); - if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { - /* sNaN round */ + flags = get_float_exception_flags(&env->fp_status); + if (flags & float_flag_invalid_snan) { float_invalid_op_vxsnan(env, GETPC()); - farg.ll = arg | 0x0008000000000000ULL; - } else { - int inexact = get_float_exception_flags(&env->fp_status) & - float_flag_inexact; - set_float_rounding_mode(rounding_mode, &env->fp_status); - farg.ll = float64_round_to_int(farg.d, &env->fp_status); - set_float_rounding_mode(old_rounding_mode, &env->fp_status); - - /* fri* does not set FPSCR[XX] */ - if (!inexact) { - env->fp_status.float_exception_flags &= ~float_flag_inexact; - } } - do_float_check_status(env, GETPC()); - return farg.ll; + + /* fri* does not set FPSCR[XX] */ + set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status); + do_float_check_status(env, true, GETPC()); + + return arg; } uint64_t helper_frin(CPUPPCState *env, uint64_t arg) @@ -654,57 +751,48 @@ uint64_t helper_frim(CPUPPCState *env, uint64_t arg) return do_fri(env, arg, float_round_down); } -#define FPU_MADDSUB_UPDATE(NAME, TP) \ -static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \ - unsigned int madd_flags, uintptr_t retaddr) \ -{ \ - if (TP##_is_signaling_nan(arg1, &env->fp_status) || \ - TP##_is_signaling_nan(arg2, &env->fp_status) || \ - TP##_is_signaling_nan(arg3, &env->fp_status)) { \ - /* sNaN operation */ \ - float_invalid_op_vxsnan(env, retaddr); \ - } \ - if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \ - (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \ - /* Multiplication of zero by infinity */ \ - float_invalid_op_vximz(env, 1, retaddr); \ - } \ - if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \ - TP##_is_infinity(arg3)) { \ - uint8_t aSign, bSign, cSign; \ - \ - aSign = TP##_is_neg(arg1); \ - bSign = TP##_is_neg(arg2); \ - cSign = TP##_is_neg(arg3); \ - if (madd_flags & float_muladd_negate_c) { \ - cSign ^= 1; \ - } \ - if (aSign ^ bSign ^ cSign) { \ - float_invalid_op_vxisi(env, 1, retaddr); \ - } \ - } \ +static void float_invalid_op_madd(CPUPPCState *env, int flags, + bool set_fpcc, uintptr_t retaddr) +{ + if (flags & float_flag_invalid_imz) { + float_invalid_op_vximz(env, set_fpcc, retaddr); + } else { + float_invalid_op_addsub(env, flags, set_fpcc, retaddr); + } } -FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32) -FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64) -#define FPU_FMADD(op, madd_flags) \ -uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ - uint64_t arg2, uint64_t arg3) \ -{ \ - uint32_t flags; \ - float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \ - &env->fp_status); \ - flags = get_float_exception_flags(&env->fp_status); \ - if (flags) { \ - if (flags & float_flag_invalid) { \ - float64_maddsub_update_excp(env, arg1, arg2, arg3, \ - madd_flags, GETPC()); \ - } \ - do_float_check_status(env, GETPC()); \ - } \ - return ret; \ +static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b, + float64 c, int madd_flags, uintptr_t retaddr) +{ + float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_madd(env, flags, 1, retaddr); + } + return ret; } +static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b, + float64 c, int madd_flags, uintptr_t retaddr) +{ + float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_madd(env, flags, 1, retaddr); + } + return ret; +} + +#define FPU_FMADD(op, madd_flags) \ + uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ + uint64_t arg2, uint64_t arg3) \ + { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \ + uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \ + uint64_t arg2, uint64_t arg3) \ + { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); } + #define MADD_FLGS 0 #define MSUB_FLGS float_muladd_negate_c #define NMADD_FLGS float_muladd_negate_result @@ -716,62 +804,62 @@ FPU_FMADD(fmsub, MSUB_FLGS) FPU_FMADD(fnmsub, NMSUB_FLGS) /* frsp - frsp. */ +static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr) +{ + float32 f32 = float64_to_float32(arg, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, retaddr); + } + return helper_todouble(f32); +} + uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) { - CPU_DoubleU farg; - float32 f32; - - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { - float_invalid_op_vxsnan(env, GETPC()); - } - f32 = float64_to_float32(farg.d, &env->fp_status); - farg.d = float32_to_float64(f32, &env->fp_status); - - return farg.ll; + return do_frsp(env, arg, GETPC()); } -/* fsqrt - fsqrt. */ -float64 helper_fsqrt(CPUPPCState *env, float64 arg) +static void float_invalid_op_sqrt(CPUPPCState *env, int flags, + bool set_fpcc, uintptr_t retaddr) { - float64 ret = float64_sqrt(arg, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); - - if (unlikely(status & float_flag_invalid)) { - if (unlikely(float64_is_any_nan(arg))) { - if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) { - /* sNaN square root */ - float_invalid_op_vxsnan(env, GETPC()); - } - } else { - /* Square root of a negative nonzero number */ - float_invalid_op_vxsqrt(env, 1, GETPC()); - } + if (unlikely(flags & float_flag_invalid_sqrt)) { + float_invalid_op_vxsqrt(env, set_fpcc, retaddr); + } else if (unlikely(flags & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, retaddr); } - - return ret; } +#define FPU_FSQRT(name, op) \ +float64 helper_##name(CPUPPCState *env, float64 arg) \ +{ \ + float64 ret = op(arg, &env->fp_status); \ + int flags = get_float_exception_flags(&env->fp_status); \ + \ + if (unlikely(flags & float_flag_invalid)) { \ + float_invalid_op_sqrt(env, flags, 1, GETPC()); \ + } \ + \ + return ret; \ +} + +FPU_FSQRT(FSQRT, float64_sqrt) +FPU_FSQRT(FSQRTS, float64r32_sqrt) + /* fre - fre. */ float64 helper_fre(CPUPPCState *env, float64 arg) { /* "Estimate" the reciprocal with actual division. */ float64 ret = float64_div(float64_one, arg, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status)) { - if (status & float_flag_invalid) { - if (float64_is_signaling_nan(arg, &env->fp_status)) { - /* sNaN reciprocal */ - float_invalid_op_vxsnan(env, GETPC()); - } - } - if (status & float_flag_divbyzero) { - float_zero_divide_excp(env, GETPC()); - /* For FPSCR.ZE == 0, the result is 1/2. */ - ret = float64_set_sign(float64_half, float64_is_neg(arg)); - } + if (unlikely(flags & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, GETPC()); + } + if (unlikely(flags & float_flag_divbyzero)) { + float_zero_divide_excp(env, GETPC()); + /* For FPSCR.ZE == 0, the result is 1/2. */ + ret = float64_set_sign(float64_half, float64_is_neg(arg)); } return ret; @@ -780,20 +868,20 @@ float64 helper_fre(CPUPPCState *env, float64 arg) /* fres - fres. */ uint64_t helper_fres(CPUPPCState *env, uint64_t arg) { - CPU_DoubleU farg; - float32 f32; + /* "Estimate" the reciprocal with actual division. */ + float64 ret = float64r32_div(float64_one, arg, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { - /* sNaN reciprocal */ + if (unlikely(flags & float_flag_invalid_snan)) { float_invalid_op_vxsnan(env, GETPC()); } - farg.d = float64_div(float64_one, farg.d, &env->fp_status); - f32 = float64_to_float32(farg.d, &env->fp_status); - farg.d = float32_to_float64(f32, &env->fp_status); + if (unlikely(flags & float_flag_divbyzero)) { + float_zero_divide_excp(env, GETPC()); + /* For FPSCR.ZE == 0, the result is 1/2. */ + ret = float64_set_sign(float64_half, float64_is_neg(arg)); + } - return farg.ll; + return ret; } /* frsqrte - frsqrte. */ @@ -802,40 +890,50 @@ float64 helper_frsqrte(CPUPPCState *env, float64 arg) /* "Estimate" the reciprocal with actual division. */ float64 rets = float64_sqrt(arg, &env->fp_status); float64 retd = float64_div(float64_one, rets, &env->fp_status); - int status = get_float_exception_flags(&env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(status)) { - if (status & float_flag_invalid) { - if (float64_is_signaling_nan(arg, &env->fp_status)) { - /* sNaN reciprocal */ - float_invalid_op_vxsnan(env, GETPC()); - } else { - /* Square root of a negative nonzero number */ - float_invalid_op_vxsqrt(env, 1, GETPC()); - } - } - if (status & float_flag_divbyzero) { - /* Reciprocal of (square root of) zero. */ - float_zero_divide_excp(env, GETPC()); - } + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_sqrt(env, flags, 1, GETPC()); + } + if (unlikely(flags & float_flag_divbyzero)) { + /* Reciprocal of (square root of) zero. */ + float_zero_divide_excp(env, GETPC()); + } + + return retd; +} + +/* frsqrtes - frsqrtes. */ +float64 helper_frsqrtes(CPUPPCState *env, float64 arg) +{ + /* "Estimate" the reciprocal with actual division. */ + float64 rets = float64_sqrt(arg, &env->fp_status); + float64 retd = float64r32_div(float64_one, rets, &env->fp_status); + int flags = get_float_exception_flags(&env->fp_status); + + if (unlikely(flags & float_flag_invalid)) { + float_invalid_op_sqrt(env, flags, 1, GETPC()); + } + if (unlikely(flags & float_flag_divbyzero)) { + /* Reciprocal of (square root of) zero. */ + float_zero_divide_excp(env, GETPC()); } return retd; } /* fsel - fsel. */ -uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, - uint64_t arg3) +uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c) { - CPU_DoubleU farg1; + CPU_DoubleU fa; - farg1.ll = arg1; + fa.ll = a; - if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && - !float64_is_any_nan(farg1.d)) { - return arg2; + if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) && + !float64_is_any_nan(fa.d)) { + return c; } else { - return arg3; + return b; } } @@ -1598,13 +1696,13 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp) \ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1616,21 +1714,20 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - float_invalid_op_addsub(env, sfprf, GETPC(), \ - tp##_classify(xa->fld) | \ - tp##_classify(xb->fld)); \ + float_invalid_op_addsub(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0) @@ -1660,15 +1757,13 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode, env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - float_invalid_op_addsub(env, 1, GETPC(), - float128_classify(xa->f128) | - float128_classify(xb->f128)); + float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC()); } helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } /* @@ -1677,13 +1772,13 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode, * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1695,22 +1790,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - float_invalid_op_mul(env, sfprf, GETPC(), \ - tp##_classify(xa->fld) | \ - tp##_classify(xb->fld)); \ + float_invalid_op_mul(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0) @@ -1735,14 +1829,12 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode, env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - float_invalid_op_mul(env, 1, GETPC(), - float128_classify(xa->f128) | - float128_classify(xb->f128)); + float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC()); } helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } /* @@ -1751,13 +1843,13 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode, * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1769,25 +1861,24 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - float_invalid_op_div(env, sfprf, GETPC(), \ - tp##_classify(xa->fld) | \ - tp##_classify(xb->fld)); \ + float_invalid_op_div(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \ float_zero_divide_excp(env, GETPC()); \ } \ \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0) @@ -1812,9 +1903,7 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode, env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - float_invalid_op_div(env, 1, GETPC(), - float128_classify(xa->f128) | - float128_classify(xb->f128)); + float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC()); } if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { float_zero_divide_excp(env, GETPC()); @@ -1822,7 +1911,7 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode, helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } /* @@ -1831,12 +1920,12 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode, * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_RE(op, nels, tp, fld, sfifprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1848,16 +1937,16 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \ \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0) @@ -1871,12 +1960,12 @@ VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0) * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1888,24 +1977,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ - float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ - } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ - float_invalid_op_vxsnan(env, GETPC()); \ - } \ + float_invalid_op_sqrt(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0) @@ -1919,12 +2005,12 @@ VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0) * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \ +#define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -1935,26 +2021,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ t.fld = tp##_sqrt(xb->fld, &tstat); \ t.fld = tp##_div(tp##_one, t.fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ - \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ - float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ - } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ - float_invalid_op_vxsnan(env, GETPC()); \ - } \ + float_invalid_op_sqrt(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ - \ if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ + t.fld = do_frsp(env, t.fld, GETPC()); \ } \ \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0) @@ -2080,13 +2161,13 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) * fld - vsr_t field (VsrD(*) or VsrW(*)) * maddflgs - flags for the float*muladd routine that control the * various forms (madd, msub, nmadd, nmsub) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \ +#define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ - ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \ + ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ @@ -2094,108 +2175,129 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ - if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ - /* \ - * Avoid double rounding errors by rounding the intermediate \ - * result to odd. \ - */ \ - set_float_rounding_mode(float_round_to_zero, &tstat); \ - t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ - maddflgs, &tstat); \ - t.fld |= (get_float_exception_flags(&tstat) & \ - float_flag_inexact) != 0; \ - } else { \ - t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ - maddflgs, &tstat); \ - } \ + t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - tp##_maddsub_update_excp(env, xa->fld, b->fld, \ - c->fld, maddflgs, GETPC()); \ + float_invalid_op_madd(env, tstat.float_exception_flags, \ + sfifprf, GETPC()); \ } \ \ - if (r2sp) { \ - t.fld = helper_frsp(env, t.fld); \ - } \ - \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } -VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0) -VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0) -VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0) -VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0) -VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1) -VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1) -VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1) -VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1) +VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1) +VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1) +VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1) +VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1) +VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1) +VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1) +VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1) +VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1) -VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0) -VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0) -VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0) -VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0) +VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0) +VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0) +VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0) +VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0) -VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0) -VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0) -VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0) -VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0) +VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0) +VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0) +VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0) +VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0) /* - * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision + * VSX_MADDQ - VSX floating point quad-precision muliply/add * op - instruction mnemonic - * cmp - comparison operation - * exp - expected result of comparison - * svxvc - set VXVC bit + * maddflgs - flags for the float*muladd routine that control the + * various forms (madd, msub, nmadd, nmsub) + * ro - round to odd */ -#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \ -void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ - ppc_vsr_t *xa, ppc_vsr_t *xb) \ -{ \ - ppc_vsr_t t = *xt; \ - bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \ - \ - if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ - float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ - vxsnan_flag = true; \ - if (fpscr_ve == 0 && svxvc) { \ - vxvc_flag = true; \ - } \ - } else if (svxvc) { \ - vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \ - float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \ - } \ - if (vxsnan_flag) { \ - float_invalid_op_vxsnan(env, GETPC()); \ - } \ - if (vxvc_flag) { \ - float_invalid_op_vxvc(env, 0, GETPC()); \ - } \ - vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \ - \ - if (!vex_flag) { \ - if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \ - &env->fp_status) == exp) { \ - t.VsrD(0) = -1; \ - t.VsrD(1) = 0; \ - } else { \ - t.VsrD(0) = 0; \ - t.VsrD(1) = 0; \ - } \ - } \ - *xt = t; \ - do_float_check_status(env, GETPC()); \ +#define VSX_MADDQ(op, maddflgs, ro) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\ + ppc_vsr_t *s3) \ +{ \ + ppc_vsr_t t = *xt; \ + \ + helper_reset_fpstatus(env); \ + \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + if (ro) { \ + tstat.float_rounding_mode = float_round_to_odd; \ + } \ + t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + float_invalid_op_madd(env, tstat.float_exception_flags, \ + false, GETPC()); \ + } \ + \ + helper_compute_fprf_float128(env, t.f128); \ + *xt = t; \ + do_float_check_status(env, true, GETPC()); \ } -VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0) -VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1) -VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1) -VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0) +VSX_MADDQ(XSMADDQP, MADD_FLGS, 0) +VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1) +VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0) +VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1) +VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0) +VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1) +VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0) +VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0) + +/* + * VSX_SCALAR_CMP - VSX scalar floating point compare + * op - instruction mnemonic + * tp - type + * cmp - comparison operation + * fld - vsr_t field + * svxvc - set VXVC bit + */ +#define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \ + void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + int flags; \ + bool r, vxvc; \ + \ + helper_reset_fpstatus(env); \ + \ + if (svxvc) { \ + r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \ + } else { \ + r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \ + } \ + \ + flags = get_float_exception_flags(&env->fp_status); \ + if (unlikely(flags & float_flag_invalid)) { \ + vxvc = svxvc; \ + if (flags & float_flag_invalid_snan) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + vxvc &= !(env->fpscr & FP_VE); \ + } \ + if (vxvc) { \ + float_invalid_op_vxvc(env, 0, GETPC()); \ + } \ + } \ + \ + memset(xt, 0, sizeof(*xt)); \ + memset(&xt->fld, -r, sizeof(xt->fld)); \ + do_float_check_status(env, false, GETPC()); \ +} + +VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0) +VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1) +VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1) +VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0) +VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1) +VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1) void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, ppc_vsr_t *xb) @@ -2223,7 +2325,7 @@ void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode, env->fpscr |= cc << FPSCR_FPCC; env->crf[BF(opcode)] = cc; - do_float_check_status(env, GETPC()); + do_float_check_status(env, false, GETPC()); } void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode, @@ -2252,7 +2354,7 @@ void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode, env->fpscr |= cc << FPSCR_FPCC; env->crf[BF(opcode)] = cc; - do_float_check_status(env, GETPC()); + do_float_check_status(env, false, GETPC()); } static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb, @@ -2279,7 +2381,7 @@ static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb, if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { vxsnan_flag = true; - if (fpscr_ve == 0 && ordered) { + if (!(env->fpscr & FP_VE) && ordered) { vxvc_flag = true; } } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || @@ -2305,7 +2407,7 @@ static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb, float_invalid_op_vxvc(env, 0, GETPC()); } - do_float_check_status(env, GETPC()); + do_float_check_status(env, false, GETPC()); } void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, @@ -2344,7 +2446,7 @@ static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa, if (float128_is_signaling_nan(xa->f128, &env->fp_status) || float128_is_signaling_nan(xb->f128, &env->fp_status)) { vxsnan_flag = true; - if (fpscr_ve == 0 && ordered) { + if (!(env->fpscr & FP_VE) && ordered) { vxvc_flag = true; } } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || @@ -2370,7 +2472,7 @@ static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa, float_invalid_op_vxvc(env, 0, GETPC()); } - do_float_check_status(env, GETPC()); + do_float_check_status(env, false, GETPC()); } void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, @@ -2397,7 +2499,7 @@ void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ for (i = 0; i < nels; i++) { \ @@ -2409,7 +2511,7 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, false, GETPC()); \ } VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0)) @@ -2419,46 +2521,43 @@ VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0)) VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i)) VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i)) -#define VSX_MAX_MINC(name, max) \ -void helper_##name(CPUPPCState *env, uint32_t opcode, \ +#define VSX_MAX_MINC(name, max, tp, fld) \ +void helper_##name(CPUPPCState *env, \ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ - bool vxsnan_flag = false, vex_flag = false; \ + ppc_vsr_t t = { }; \ + bool first; \ \ - if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \ - float64_is_any_nan(xb->VsrD(0)))) { \ - if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ - float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ - vxsnan_flag = true; \ - } \ - t.VsrD(0) = xb->VsrD(0); \ - } else if ((max && \ - !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \ - (!max && \ - float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \ - t.VsrD(0) = xa->VsrD(0); \ + helper_reset_fpstatus(env); \ + \ + if (max) { \ + first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \ } else { \ - t.VsrD(0) = xb->VsrD(0); \ + first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \ } \ \ - vex_flag = fpscr_ve & vxsnan_flag; \ - if (vxsnan_flag) { \ - float_invalid_op_vxsnan(env, GETPC()); \ + if (first) { \ + t.fld = xa->fld; \ + } else { \ + t.fld = xb->fld; \ + if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ } \ - if (!vex_flag) { \ - *xt = t; \ - } \ -} \ + \ + *xt = t; \ +} -VSX_MAX_MINC(xsmaxcdp, 1); -VSX_MAX_MINC(xsmincdp, 0); +VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0)); +VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0)); +VSX_MAX_MINC(XSMAXCQP, true, float128, f128); +VSX_MAX_MINC(XSMINCQP, false, float128, f128); #define VSX_MAX_MINJ(name, max) \ -void helper_##name(CPUPPCState *env, uint32_t opcode, \ +void helper_##name(CPUPPCState *env, \ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ bool vxsnan_flag = false, vex_flag = false; \ \ if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \ @@ -2497,7 +2596,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode, \ t.VsrD(0) = xb->VsrD(0); \ } \ \ - vex_flag = fpscr_ve & vxsnan_flag; \ + vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ @@ -2506,8 +2605,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode, \ } \ } \ -VSX_MAX_MINJ(xsmaxjdp, 1); -VSX_MAX_MINJ(xsminjdp, 0); +VSX_MAX_MINJ(XSMAXJDP, 1); +VSX_MAX_MINJ(XSMINJDP, 0); /* * VSX_CMP - VSX floating point compare @@ -2529,6 +2628,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ int all_true = 1; \ int all_false = 1; \ \ + helper_reset_fpstatus(env); \ + \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_any_nan(xa->fld) || \ tp##_is_any_nan(xb->fld))) { \ @@ -2574,14 +2675,16 @@ VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0) * ttp - target type (float32 or float64) * sfld - source vsr_t field * tfld - target vsr_t field (f32 or f64) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \ +#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ + helper_reset_fpstatus(env); \ + \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ @@ -2589,20 +2692,46 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ float_invalid_op_vxsnan(env, GETPC()); \ t.tfld = ttp##_snan_to_qnan(t.tfld); \ } \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_##ttp(env, t.tfld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } -VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1) VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1) -VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0) VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0) +#define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = { }; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \ + if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \ + &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \ + } \ + if (sfifprf) { \ + helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \ + } \ + t.VsrW(2 * i + 1) = t.VsrW(2 * i); \ + } \ + \ + *xt = t; \ + do_float_check_status(env, sfifprf, GETPC()); \ +} + +VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0) +VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1) + /* * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion * op - instruction mnemonic @@ -2613,13 +2742,15 @@ VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0) * tfld - target vsr_t field (f32 or f64) * sfprf - set FPRF */ -#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \ -void helper_##op(CPUPPCState *env, uint32_t opcode, \ - ppc_vsr_t *xt, ppc_vsr_t *xb) \ +#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ + helper_reset_fpstatus(env); \ + \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ @@ -2633,7 +2764,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, true, GETPC()); \ } VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) @@ -2647,14 +2778,16 @@ VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) * ttp - target type * sfld - source vsr_t field * tfld - target vsr_t field - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \ +#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = { }; \ int i; \ \ + helper_reset_fpstatus(env); \ + \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ @@ -2662,13 +2795,13 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ float_invalid_op_vxsnan(env, GETPC()); \ t.tfld = ttp##_snan_to_qnan(t.tfld); \ } \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_##ttp(env, t.tfld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1) @@ -2676,18 +2809,36 @@ VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1) VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0) VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0) -/* - * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be - * added to this later. - */ -void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode, - ppc_vsr_t *xt, ppc_vsr_t *xb) +void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { }; + int i, status; + + helper_reset_fpstatus(env); + + for (i = 0; i < 4; i++) { + t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status); + } + + status = get_float_exception_flags(&env->fp_status); + if (unlikely(status & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, GETPC()); + } + + *xt = t; + do_float_check_status(env, false, GETPC()); +} + +void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt, + ppc_vsr_t *xb) { ppc_vsr_t t = { }; float_status tstat; + helper_reset_fpstatus(env); + tstat = env->fp_status; - if (unlikely(Rc(opcode) != 0)) { + if (ro != 0) { tstat.float_rounding_mode = float_round_to_odd; } @@ -2700,13 +2851,14 @@ void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode, helper_compute_fprf_float64(env, t.VsrD(0)); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) { uint64_t result, sign, exp, frac; + helper_reset_fpstatus(env); float_status tstat = env->fp_status; set_float_exception_flags(0, &tstat); @@ -2743,12 +2895,9 @@ uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) return (result << 32) | result; } -uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) +uint64_t helper_XSCVSPDPN(uint64_t xb) { - float_status tstat = env->fp_status; - set_float_exception_flags(0, &tstat); - - return float32_to_float64(xb >> 32, &tstat); + return helper_todouble(xb >> 32); } /* @@ -2759,48 +2908,101 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) * ttp - target type (int32, uint32, int64 or uint64) * sfld - source vsr_t field * tfld - target vsr_t field + * sfi - set FI * rnan - resulting NaN */ -#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \ +#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - int all_flags = env->fp_status.float_exception_flags, flags; \ - ppc_vsr_t t = *xt; \ - int i; \ + ppc_vsr_t t = { }; \ + int i, flags; \ + \ + helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ - env->fp_status.float_exception_flags = 0; \ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ flags = env->fp_status.float_exception_flags; \ if (unlikely(flags & float_flag_invalid)) { \ - float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ - t.tfld = rnan; \ + t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\ } \ - all_flags |= flags; \ } \ \ *xt = t; \ - env->fp_status.float_exception_flags = all_flags; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfi, GETPC()); \ } -VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \ +VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \ 0x8000000000000000ULL) -VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \ - 0x80000000U) -VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL) -VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U) -VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \ +VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL) +VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \ 0x8000000000000000ULL) -VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \ - 0x80000000U) -VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL) -VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U) -VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \ +VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \ + 0ULL) +VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \ 0x8000000000000000ULL) -VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U) -VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL) -VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) +VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \ + 0x80000000ULL) +VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \ + false, 0ULL) +VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U) + +#define VSX_CVT_FP_TO_INT128(op, tp, rnan) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t; \ + int flags; \ + \ + helper_reset_fpstatus(env); \ + t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \ + flags = get_float_exception_flags(&env->fp_status); \ + if (unlikely(flags & float_flag_invalid)) { \ + t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\ + t.VsrD(1) = -(t.VsrD(0) & 1); \ + } \ + \ + *xt = t; \ + do_float_check_status(env, true, GETPC()); \ +} + +VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0) +VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL); + +/* + * Likewise, except that the result is duplicated into both subwords. + * Power ISA v3.1 has Programming Notes for these insns: + * Previous versions of the architecture allowed the contents of + * word 0 of the result register to be undefined. However, all + * processors that support this instruction write the result into + * words 0 and 1 (and words 2 and 3) of the result register, as + * is required by this version of the architecture. + */ +#define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = { }; \ + int i, flags; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \ + &env->fp_status); \ + flags = env->fp_status.float_exception_flags; \ + if (unlikely(flags & float_flag_invalid)) { \ + t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \ + rnan, 0, GETPC()); \ + } \ + t.VsrW(2 * i + 1) = t.VsrW(2 * i); \ + } \ + \ + *xt = t; \ + do_float_check_status(env, sfi, GETPC()); \ +} + +VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U) +VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U) +VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U) +VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U) /* * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion @@ -2816,20 +3018,22 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = { }; \ + int flags; \ + \ + helper_reset_fpstatus(env); \ \ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ - if (env->fp_status.float_exception_flags & float_flag_invalid) { \ - float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ - t.tfld = rnan; \ + flags = get_float_exception_flags(&env->fp_status); \ + if (flags & float_flag_invalid) { \ + t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, true, GETPC()); \ } VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \ 0x8000000000000000ULL) - VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \ 0xffffffff80000000ULL) VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL) @@ -2844,26 +3048,28 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL) * sfld - source vsr_t field * tfld - target vsr_t field * jdef - definition of the j index (i or 2*i) - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \ +#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ + helper_reset_fpstatus(env); \ + \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (r2sp) { \ - t.tfld = helper_frsp(env, t.tfld); \ + t.tfld = do_frsp(env, t.tfld, GETPC()); \ } \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.tfld); \ } \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0) @@ -2874,11 +3080,39 @@ VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0) -VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0) -VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0) VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) +#define VSX_CVT_INT_TO_FP2(op, stp, ttp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = { }; \ + int i; \ + \ + for (i = 0; i < 2; i++) { \ + t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \ + t.VsrW(2 * i + 1) = t.VsrW(2 * i); \ + } \ + \ + *xt = t; \ + do_float_check_status(env, false, GETPC()); \ +} + +VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32) +VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32) + +#define VSX_CVT_INT128_TO_FP(op, tp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\ +{ \ + helper_reset_fpstatus(env); \ + xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \ + helper_compute_fprf_float128(env, xt->f128); \ + do_float_check_status(env, true, GETPC()); \ +} + +VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128); +VSX_CVT_INT128_TO_FP(XSCVSQQP, int128); + /* * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion * op - instruction mnemonic @@ -2893,11 +3127,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \ { \ ppc_vsr_t t = *xt; \ \ + helper_reset_fpstatus(env); \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ helper_compute_fprf_##ttp(env, t.tfld); \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, true, GETPC()); \ } VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128) @@ -2917,15 +3152,17 @@ VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * rmode - rounding mode - * sfprf - set FPRF + * sfifprf - set FI and FPRF */ -#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \ +#define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ FloatRoundMode curr_rounding_mode; \ \ + helper_reset_fpstatus(env); \ + \ if (rmode != FLOAT_ROUND_CURRENT) { \ curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \ set_float_rounding_mode(rmode, &env->fp_status); \ @@ -2939,7 +3176,7 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ } else { \ t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \ } \ - if (sfprf) { \ + if (sfifprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ @@ -2955,7 +3192,7 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ } \ \ *xt = t; \ - do_float_check_status(env, GETPC()); \ + do_float_check_status(env, sfifprf, GETPC()); \ } VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1) @@ -2980,35 +3217,14 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) { helper_reset_fpstatus(env); - uint64_t xt = helper_frsp(env, xb); + uint64_t xt = do_frsp(env, xb, GETPC()); helper_compute_fprf_float64(env, xt); - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); return xt; } -#define VSX_XXPERM(op, indexed) \ -void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ - ppc_vsr_t *xa, ppc_vsr_t *pcv) \ -{ \ - ppc_vsr_t t = *xt; \ - int i, idx; \ - \ - for (i = 0; i < 16; i++) { \ - idx = pcv->VsrB(i) & 0x1F; \ - if (indexed) { \ - idx = 31 - idx; \ - } \ - t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \ - : xt->VsrB(idx - 16); \ - } \ - *xt = t; \ -} - -VSX_XXPERM(xxperm, 0) -VSX_XXPERM(xxpermr, 1) - -void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) +void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { }; uint32_t exp, i, fraction; @@ -3025,94 +3241,82 @@ void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) *xt = t; } -/* - * VSX_TEST_DC - VSX floating point test data class - * op - instruction mnemonic - * nels - number of elements (1, 2 or 4) - * xbn - VSR register number - * tp - type (float32 or float64) - * fld - vsr_t field (VsrD(*) or VsrW(*)) - * tfld - target vsr_t field (VsrD(*) or VsrW(*)) - * fld_max - target field max - * scrf - set result in CR and FPCC - */ -#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \ -void helper_##op(CPUPPCState *env, uint32_t opcode) \ +#define VSX_TSTDC(tp) \ +static int32_t tp##_tstdc(tp b, uint32_t dcmx) \ { \ - ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \ - ppc_vsr_t *xb = &env->vsr[xbn]; \ - ppc_vsr_t t = { }; \ - uint32_t i, sign, dcmx; \ - uint32_t cc, match = 0; \ - \ - if (!scrf) { \ - dcmx = DCMX_XV(opcode); \ - } else { \ - t = *xt; \ - dcmx = DCMX(opcode); \ - } \ - \ - for (i = 0; i < nels; i++) { \ - sign = tp##_is_neg(xb->fld); \ - if (tp##_is_any_nan(xb->fld)) { \ - match = extract32(dcmx, 6, 1); \ - } else if (tp##_is_infinity(xb->fld)) { \ - match = extract32(dcmx, 4 + !sign, 1); \ - } else if (tp##_is_zero(xb->fld)) { \ - match = extract32(dcmx, 2 + !sign, 1); \ - } else if (tp##_is_zero_or_denormal(xb->fld)) { \ - match = extract32(dcmx, 0 + !sign, 1); \ - } \ - \ - if (scrf) { \ - cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \ - env->fpscr &= ~FP_FPCC; \ - env->fpscr |= cc << FPSCR_FPCC; \ - env->crf[BF(opcode)] = cc; \ - } else { \ - t.tfld = match ? fld_max : 0; \ - } \ - match = 0; \ - } \ - if (!scrf) { \ - *xt = t; \ + uint32_t match = 0; \ + uint32_t sign = tp##_is_neg(b); \ + if (tp##_is_any_nan(b)) { \ + match = extract32(dcmx, 6, 1); \ + } else if (tp##_is_infinity(b)) { \ + match = extract32(dcmx, 4 + !sign, 1); \ + } else if (tp##_is_zero(b)) { \ + match = extract32(dcmx, 2 + !sign, 1); \ + } else if (tp##_is_zero_or_denormal(b)) { \ + match = extract32(dcmx, 0 + !sign, 1); \ } \ + return (match != 0); \ } -VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0) -VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0) -VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1) -VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1) +VSX_TSTDC(float32) +VSX_TSTDC(float64) +VSX_TSTDC(float128) +#undef VSX_TSTDC -void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) +void helper_XVTSTDCDP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v) { - uint32_t dcmx, sign, exp; - uint32_t cc, match = 0, not_sp = 0; + int i; + for (i = 0; i < 2; i++) { + t->s64[i] = (int64_t)-float64_tstdc(b->f64[i], dcmx); + } +} - dcmx = DCMX(opcode); - exp = (xb->VsrD(0) >> 52) & 0x7FF; +void helper_XVTSTDCSP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v) +{ + int i; + for (i = 0; i < 4; i++) { + t->s32[i] = (int32_t)-float32_tstdc(b->f32[i], dcmx); + } +} - sign = float64_is_neg(xb->VsrD(0)); - if (float64_is_any_nan(xb->VsrD(0))) { - match = extract32(dcmx, 6, 1); - } else if (float64_is_infinity(xb->VsrD(0))) { - match = extract32(dcmx, 4 + !sign, 1); - } else if (float64_is_zero(xb->VsrD(0))) { - match = extract32(dcmx, 2 + !sign, 1); - } else if (float64_is_zero_or_denormal(xb->VsrD(0)) || - (exp > 0 && exp < 0x381)) { - match = extract32(dcmx, 0 + !sign, 1); +static bool not_SP_value(float64 val) +{ + return val != helper_todouble(helper_tosingle(val)); +} + +/* + * VSX_XS_TSTDC - VSX Scalar Test Data Class + * NAME - instruction name + * FLD - vsr_t field (VsrD(0) or f128) + * TP - type (float64 or float128) + */ +#define VSX_XS_TSTDC(NAME, FLD, TP) \ + void helper_##NAME(CPUPPCState *env, uint32_t bf, \ + uint32_t dcmx, ppc_vsr_t *b) \ + { \ + uint32_t cc, match, sign = TP##_is_neg(b->FLD); \ + match = TP##_tstdc(b->FLD, dcmx); \ + cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \ + env->fpscr &= ~FP_FPCC; \ + env->fpscr |= cc << FPSCR_FPCC; \ + env->crf[bf] = cc; \ } - not_sp = !float64_eq(xb->VsrD(0), - float32_to_float64( - float64_to_float32(xb->VsrD(0), &env->fp_status), - &env->fp_status), &env->fp_status); +VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64) +VSX_XS_TSTDC(XSTSTDCQP, f128, float128) +#undef VSX_XS_TSTDC +void helper_XSTSTDCSP(CPUPPCState *env, uint32_t bf, + uint32_t dcmx, ppc_vsr_t *b) +{ + uint32_t cc, match, sign = float64_is_neg(b->VsrD(0)); + uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF; + int not_sp = (int)not_SP_value(b->VsrD(0)); + match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381); cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT; env->fpscr &= ~FP_FPCC; env->fpscr |= cc << FPSCR_FPCC; - env->crf[BF(opcode)] = cc; + env->crf[bf] = cc; } void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, @@ -3130,7 +3334,7 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, if (r == 0 && rmc == 0) { rmode = float_round_ties_away; } else if (r == 0 && rmc == 0x3) { - rmode = fpscr_rn; + rmode = env->fpscr & FP_RN; } else if (r == 1) { switch (rmc) { case 0: @@ -3156,11 +3360,8 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, t.f128 = float128_round_to_int(xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; - if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - if (float128_is_signaling_nan(xb->f128, &tstat)) { - float_invalid_op_vxsnan(env, GETPC()); - t.f128 = float128_snan_to_qnan(t.f128); - } + if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, GETPC()); } if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) { @@ -3168,7 +3369,7 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, } helper_compute_fprf_float128(env, t.f128); - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); *xt = t; } @@ -3187,7 +3388,7 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode, if (r == 0 && rmc == 0) { rmode = float_round_ties_away; } else if (r == 0 && rmc == 0x3) { - rmode = fpscr_rn; + rmode = env->fpscr & FP_RN; } else if (r == 1) { switch (rmc) { case 0: @@ -3214,16 +3415,14 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode, t.f128 = floatx80_to_float128(round_res, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; - if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - if (float128_is_signaling_nan(xb->f128, &tstat)) { - float_invalid_op_vxsnan(env, GETPC()); - t.f128 = float128_snan_to_qnan(t.f128); - } + if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) { + float_invalid_op_vxsnan(env, GETPC()); + t.f128 = float128_snan_to_qnan(t.f128); } helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode, @@ -3244,20 +3443,12 @@ void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode, env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - if (float128_is_signaling_nan(xb->f128, &tstat)) { - float_invalid_op_vxsnan(env, GETPC()); - t.f128 = float128_snan_to_qnan(xb->f128); - } else if (float128_is_quiet_nan(xb->f128, &tstat)) { - t.f128 = xb->f128; - } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) { - float_invalid_op_vxsqrt(env, 1, GETPC()); - t.f128 = float128_default_nan(&env->fp_status); - } + float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC()); } helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); } void helper_xssubqp(CPUPPCState *env, uint32_t opcode, @@ -3278,12 +3469,320 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode, env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { - float_invalid_op_addsub(env, 1, GETPC(), - float128_classify(xa->f128) | - float128_classify(xb->f128)); + float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC()); } helper_compute_fprf_float128(env, t.f128); *xt = t; - do_float_check_status(env, GETPC()); + do_float_check_status(env, true, GETPC()); +} + +static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr) +{ + /* + * XV*GER instructions execute and set the FPSCR as if exceptions + * are disabled and only at the end throw an exception + */ + target_ulong enable; + enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR); + env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR); + int status = get_float_exception_flags(&env->fp_status); + if (unlikely(status & float_flag_invalid)) { + if (status & float_flag_invalid_snan) { + float_invalid_op_vxsnan(env, 0); + } + if (status & float_flag_invalid_imz) { + float_invalid_op_vximz(env, false, 0); + } + if (status & float_flag_invalid_isi) { + float_invalid_op_vxisi(env, false, 0); + } + } + do_float_check_status(env, false, retaddr); + env->fpscr |= enable; + do_fpscr_check_status(env, retaddr); +} + +typedef float64 extract_f16(float16, float_status *); + +static float64 extract_hf16(float16 in, float_status *fp_status) +{ + return float16_to_float64(in, true, fp_status); +} + +static float64 extract_bf16(bfloat16 in, float_status *fp_status) +{ + return bfloat16_to_float64(in, fp_status); +} + +static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask, bool acc, + bool neg_mul, bool neg_acc, extract_f16 extract) +{ + float32 r, aux_acc; + float64 psum, va, vb, vc, vd; + int i, j, xmsk_bit, ymsk_bit; + uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK), + xmsk = FIELD_EX32(mask, GER_MSK, XMSK), + ymsk = FIELD_EX32(mask, GER_MSK, YMSK); + float_status *excp_ptr = &env->fp_status; + for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) { + for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) { + if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) { + va = !(pmsk & 2) ? float64_zero : + extract(a->VsrHF(2 * i), excp_ptr); + vb = !(pmsk & 2) ? float64_zero : + extract(b->VsrHF(2 * j), excp_ptr); + vc = !(pmsk & 1) ? float64_zero : + extract(a->VsrHF(2 * i + 1), excp_ptr); + vd = !(pmsk & 1) ? float64_zero : + extract(b->VsrHF(2 * j + 1), excp_ptr); + psum = float64_mul(va, vb, excp_ptr); + psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr); + r = float64_to_float32(psum, excp_ptr); + if (acc) { + aux_acc = at[i].VsrSF(j); + if (neg_mul) { + r = bfp32_neg(r); + } + if (neg_acc) { + aux_acc = bfp32_neg(aux_acc); + } + r = float32_add(r, aux_acc, excp_ptr); + } + at[i].VsrSF(j) = r; + } else { + at[i].VsrSF(j) = float32_zero; + } + } + } + vsxger_excp(env, GETPC()); +} + +typedef void vsxger_zero(ppc_vsr_t *at, int, int); + +typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int, + int flags, float_status *s); + +static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, + int j, int flags, float_status *s) +{ + at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j), + at[i].VsrSF(j), flags, s); +} + +static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, + int j, int flags, float_status *s) +{ + at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s); +} + +static void vsxger_zero32(ppc_vsr_t *at, int i, int j) +{ + at[i].VsrSF(j) = float32_zero; +} + +static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, + int j, int flags, float_status *s) +{ + if (j >= 2) { + j -= 2; + at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j), + at[i].VsrDF(j), flags, s); + } +} + +static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, + int j, int flags, float_status *s) +{ + if (j >= 2) { + j -= 2; + at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s); + } +} + +static void vsxger_zero64(ppc_vsr_t *at, int i, int j) +{ + if (j >= 2) { + j -= 2; + at[i].VsrDF(j) = float64_zero; + } +} + +static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask, bool acc, bool neg_mul, + bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd, + vsxger_zero zero) +{ + int i, j, xmsk_bit, ymsk_bit, op_flags; + uint8_t xmsk = mask & 0x0F; + uint8_t ymsk = (mask >> 4) & 0x0F; + float_status *excp_ptr = &env->fp_status; + op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0; + op_flags |= (neg_mul) ? float_muladd_negate_result : 0; + helper_reset_fpstatus(env); + for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) { + for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) { + if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) { + if (acc) { + muladd(at, a, b, i, j, op_flags, excp_ptr); + } else { + mul(at, a, b, i, j, op_flags, excp_ptr); + } + } else { + zero(at, i, j); + } + } + } + vsxger_excp(env, GETPC()); +} + +QEMU_FLATTEN +void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, false, false, false, extract_bf16); +} + +QEMU_FLATTEN +void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, false, false, extract_bf16); +} + +QEMU_FLATTEN +void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, false, true, extract_bf16); +} + +QEMU_FLATTEN +void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, true, false, extract_bf16); +} + +QEMU_FLATTEN +void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, true, true, extract_bf16); +} + +QEMU_FLATTEN +void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, false, false, false, extract_hf16); +} + +QEMU_FLATTEN +void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, false, false, extract_hf16); +} + +QEMU_FLATTEN +void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, false, true, extract_hf16); +} + +QEMU_FLATTEN +void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, true, false, extract_hf16); +} + +QEMU_FLATTEN +void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger16(env, a, b, at, mask, true, true, true, extract_hf16); +} + +QEMU_FLATTEN +void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32, + vsxger_muladd32, vsxger_zero32); +} + +QEMU_FLATTEN +void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32, + vsxger_muladd32, vsxger_zero32); +} + +QEMU_FLATTEN +void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32, + vsxger_muladd32, vsxger_zero32); +} + +QEMU_FLATTEN +void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32, + vsxger_muladd32, vsxger_zero32); +} + +QEMU_FLATTEN +void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32, + vsxger_muladd32, vsxger_zero32); +} + +QEMU_FLATTEN +void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64, + vsxger_muladd64, vsxger_zero64); +} + +QEMU_FLATTEN +void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64, + vsxger_muladd64, vsxger_zero64); +} + +QEMU_FLATTEN +void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64, + vsxger_muladd64, vsxger_zero64); +} + +QEMU_FLATTEN +void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64, + vsxger_muladd64, vsxger_zero64); +} + +QEMU_FLATTEN +void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64, + vsxger_muladd64, vsxger_zero64); } diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c index 09ff1328d4..1a0b9ca82c 100644 --- a/target/ppc/gdbstub.c +++ b/target/ppc/gdbstub.c @@ -87,20 +87,22 @@ static int ppc_gdb_register_len(int n) /* * We need to present the registers to gdb in the "current" memory * ordering. For user-only mode we get this for free; - * TARGET_WORDS_BIGENDIAN is set to the proper ordering for the + * TARGET_BIG_ENDIAN is set to the proper ordering for the * binary, and cannot be changed. For system mode, - * TARGET_WORDS_BIGENDIAN is always set, and we must check the current + * TARGET_BIG_ENDIAN is always set, and we must check the current * mode of the chip to see if we're running in little-endian. */ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len) { #ifndef CONFIG_USER_ONLY - if (!msr_le) { + if (!FIELD_EX64(env->msr, MSR, LE)) { /* do nothing */ } else if (len == 4) { bswap32s((uint32_t *)mem_buf); } else if (len == 8) { bswap64s((uint64_t *)mem_buf); + } else if (len == 16) { + bswap128s((Int128 *)mem_buf); } else { g_assert_not_reached(); } @@ -157,7 +159,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n) gdb_get_regl(buf, env->ctr); break; case 69: - gdb_get_reg32(buf, env->xer); + gdb_get_reg32(buf, cpu_read_xer(env)); break; case 70: gdb_get_reg32(buf, env->fpscr); @@ -215,7 +217,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n) gdb_get_reg64(buf, env->ctr); break; case 69 + 32: - gdb_get_reg32(buf, env->xer); + gdb_get_reg32(buf, cpu_read_xer(env)); break; case 70 + 32: gdb_get_reg64(buf, env->fpscr); @@ -267,7 +269,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) env->ctr = ldtul_p(mem_buf); break; case 69: - env->xer = ldl_p(mem_buf); + cpu_write_xer(env, ldl_p(mem_buf)); break; case 70: /* fpscr */ @@ -317,7 +319,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) env->ctr = ldq_p(mem_buf); break; case 69 + 32: - env->xer = ldl_p(mem_buf); + cpu_write_xer(env, ldl_p(mem_buf)); break; case 70 + 32: /* fpscr */ @@ -389,15 +391,6 @@ const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name) } #endif -static bool avr_need_swap(CPUPPCState *env) -{ -#ifdef HOST_WORDS_BIGENDIAN - return msr_le; -#else - return !msr_le; -#endif -} - #if !defined(CONFIG_USER_ONLY) static int gdb_find_spr_idx(CPUPPCState *env, int n) { @@ -486,14 +479,9 @@ static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) if (n < 32) { ppc_avr_t *avr = cpu_avr_ptr(env, n); - if (!avr_need_swap(env)) { - gdb_get_reg128(buf, avr->u64[0] , avr->u64[1]); - } else { - gdb_get_reg128(buf, avr->u64[1] , avr->u64[0]); - } + gdb_get_reg128(buf, avr->VsrD(0), avr->VsrD(1)); mem_buf = gdb_get_reg_ptr(buf, 16); - ppc_maybe_bswap_register(env, mem_buf, 8); - ppc_maybe_bswap_register(env, mem_buf + 8, 8); + ppc_maybe_bswap_register(env, mem_buf, 16); return 16; } if (n == 32) { @@ -515,15 +503,9 @@ static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { if (n < 32) { ppc_avr_t *avr = cpu_avr_ptr(env, n); - ppc_maybe_bswap_register(env, mem_buf, 8); - ppc_maybe_bswap_register(env, mem_buf + 8, 8); - if (!avr_need_swap(env)) { - avr->u64[0] = ldq_p(mem_buf); - avr->u64[1] = ldq_p(mem_buf + 8); - } else { - avr->u64[1] = ldq_p(mem_buf); - avr->u64[0] = ldq_p(mem_buf + 8); - } + ppc_maybe_bswap_register(env, mem_buf, 16); + avr->VsrD(0) = ldq_p(mem_buf); + avr->VsrD(1) = ldq_p(mem_buf + 8); return 16; } if (n == 32) { diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 4076aa281e..8dd22a35e4 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -4,10 +4,14 @@ DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32) #endif +DEF_HELPER_4(HASHST, void, env, tl, tl, tl) +DEF_HELPER_4(HASHCHK, void, env, tl, tl, tl) +DEF_HELPER_4(HASHSTP, void, env, tl, tl, tl) +DEF_HELPER_4(HASHCHKP, void, env, tl, tl, tl) #if !defined(CONFIG_USER_ONLY) DEF_HELPER_2(store_msr, void, env, tl) +DEF_HELPER_1(ppc_maybe_interrupt, void, env) DEF_HELPER_1(rfi, void, env) -DEF_HELPER_1(rfsvc, void, env) DEF_HELPER_1(40x_rfci, void, env) DEF_HELPER_1(rfci, void, env) DEF_HELPER_1(rfdi, void, env) @@ -18,8 +22,15 @@ DEF_HELPER_2(pminsn, void, env, i32) DEF_HELPER_1(rfid, void, env) DEF_HELPER_1(rfscv, void, env) DEF_HELPER_1(hrfid, void, env) +DEF_HELPER_2(rfebb, void, env, tl) DEF_HELPER_2(store_lpcr, void, env, tl) DEF_HELPER_2(store_pcr, void, env, tl) +DEF_HELPER_2(store_mmcr0, void, env, tl) +DEF_HELPER_2(store_mmcr1, void, env, tl) +DEF_HELPER_3(store_pmc, void, env, i32, i64) +DEF_HELPER_2(read_pmc, tl, env, i32) +DEF_HELPER_2(insns_inc, void, env, i32) +DEF_HELPER_1(handle_pmc5_overflow, void, env) #endif DEF_HELPER_1(check_tlb_flush_local, void, env) DEF_HELPER_1(check_tlb_flush_global, void, env) @@ -46,14 +57,18 @@ DEF_HELPER_4(divwe, tl, env, tl, tl, i32) DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_3(sraw, tl, env, tl, tl) -DEF_HELPER_FLAGS_2(cfuged, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(CDTBCD, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(CBCDTD, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_3(srad, tl, env, tl, tl) -DEF_HELPER_0(darn32, tl) -DEF_HELPER_0(darn64, tl) +DEF_HELPER_FLAGS_0(darn32, TCG_CALL_NO_RWG, tl) +DEF_HELPER_FLAGS_0(darn64, TCG_CALL_NO_RWG, tl) #endif DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32) @@ -61,6 +76,7 @@ DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_1(float_check_status, void, env) +DEF_HELPER_1(fpscr_check_status, void, env) DEF_HELPER_1(reset_fpstatus, void, env) DEF_HELPER_2(compute_fprf_float64, void, env, i64) DEF_HELPER_3(store_fpscr, void, env, i64, i32) @@ -91,112 +107,95 @@ DEF_HELPER_2(frip, i64, env, i64) DEF_HELPER_2(frim, i64, env, i64) DEF_HELPER_3(fadd, f64, env, f64, f64) +DEF_HELPER_3(fadds, f64, env, f64, f64) DEF_HELPER_3(fsub, f64, env, f64, f64) +DEF_HELPER_3(fsubs, f64, env, f64, f64) DEF_HELPER_3(fmul, f64, env, f64, f64) +DEF_HELPER_3(fmuls, f64, env, f64, f64) DEF_HELPER_3(fdiv, f64, env, f64, f64) +DEF_HELPER_3(fdivs, f64, env, f64, f64) DEF_HELPER_4(fmadd, i64, env, i64, i64, i64) DEF_HELPER_4(fmsub, i64, env, i64, i64, i64) DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64) DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64) -DEF_HELPER_2(fsqrt, f64, env, f64) +DEF_HELPER_4(fmadds, i64, env, i64, i64, i64) +DEF_HELPER_4(fmsubs, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmadds, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmsubs, i64, env, i64, i64, i64) +DEF_HELPER_2(FSQRT, f64, env, f64) +DEF_HELPER_2(FSQRTS, f64, env, f64) DEF_HELPER_2(fre, i64, env, i64) DEF_HELPER_2(fres, i64, env, i64) DEF_HELPER_2(frsqrte, i64, env, i64) -DEF_HELPER_4(fsel, i64, env, i64, i64, i64) +DEF_HELPER_2(frsqrtes, i64, env, i64) +DEF_HELPER_FLAGS_3(FSEL, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64) #define dh_alias_avr ptr #define dh_ctype_avr ppc_avr_t * +#define dh_typecode_avr dh_typecode_ptr #define dh_alias_vsr ptr #define dh_ctype_vsr ppc_vsr_t * +#define dh_typecode_vsr dh_typecode_ptr -DEF_HELPER_3(vavgub, void, avr, avr, avr) -DEF_HELPER_3(vavguh, void, avr, avr, avr) -DEF_HELPER_3(vavguw, void, avr, avr, avr) -DEF_HELPER_3(vabsdub, void, avr, avr, avr) -DEF_HELPER_3(vabsduh, void, avr, avr, avr) -DEF_HELPER_3(vabsduw, void, avr, avr, avr) -DEF_HELPER_3(vavgsb, void, avr, avr, avr) -DEF_HELPER_3(vavgsh, void, avr, avr, avr) -DEF_HELPER_3(vavgsw, void, avr, avr, avr) -DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr) +#define dh_alias_acc ptr +#define dh_ctype_acc ppc_acc_t * +#define dh_typecode_acc dh_typecode_ptr + +DEF_HELPER_FLAGS_4(VAVGUB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VAVGUH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VAVGUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VABSDUB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VABSDUH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VABSDUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VAVGSB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VAVGSH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VAVGSW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr) -DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr) +DEF_HELPER_FLAGS_4(VCMPNEZB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VCMPNEZH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VCMPNEZW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr) -DEF_HELPER_3(vmrglb, void, avr, avr, avr) -DEF_HELPER_3(vmrglh, void, avr, avr, avr) -DEF_HELPER_3(vmrglw, void, avr, avr, avr) -DEF_HELPER_3(vmrghb, void, avr, avr, avr) -DEF_HELPER_3(vmrghh, void, avr, avr, avr) -DEF_HELPER_3(vmrghw, void, avr, avr, avr) -DEF_HELPER_3(vmulesb, void, avr, avr, avr) -DEF_HELPER_3(vmulesh, void, avr, avr, avr) -DEF_HELPER_3(vmulesw, void, avr, avr, avr) -DEF_HELPER_3(vmuleub, void, avr, avr, avr) -DEF_HELPER_3(vmuleuh, void, avr, avr, avr) -DEF_HELPER_3(vmuleuw, void, avr, avr, avr) -DEF_HELPER_3(vmulosb, void, avr, avr, avr) -DEF_HELPER_3(vmulosh, void, avr, avr, avr) -DEF_HELPER_3(vmulosw, void, avr, avr, avr) -DEF_HELPER_3(vmuloub, void, avr, avr, avr) -DEF_HELPER_3(vmulouh, void, avr, avr, avr) -DEF_HELPER_3(vmulouw, void, avr, avr, avr) -DEF_HELPER_3(vmulhsw, void, avr, avr, avr) -DEF_HELPER_3(vmulhuw, void, avr, avr, avr) -DEF_HELPER_3(vmulhsd, void, avr, avr, avr) -DEF_HELPER_3(vmulhud, void, avr, avr, avr) -DEF_HELPER_3(vslo, void, avr, avr, avr) -DEF_HELPER_3(vsro, void, avr, avr, avr) -DEF_HELPER_3(vsrv, void, avr, avr, avr) -DEF_HELPER_3(vslv, void, avr, avr, avr) -DEF_HELPER_3(vaddcuw, void, avr, avr, avr) -DEF_HELPER_2(vprtybw, void, avr, avr) -DEF_HELPER_2(vprtybd, void, avr, avr) -DEF_HELPER_2(vprtybq, void, avr, avr) -DEF_HELPER_3(vsubcuw, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrglb, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrglh, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrglw, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrghb, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrghh, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vmrghw, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULESB, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULESH, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULESW, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULEUB, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULEUH, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULEUW, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOSB, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOSH, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOSW, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOUB, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOUH, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMULOUW, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVSQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVESD, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVEUD, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVESQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VDIVEUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMODSQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VMODUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vslo, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vsro, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vsrv, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vslv, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VPRTYBQ, TCG_CALL_NO_RWG, void, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) @@ -209,43 +208,39 @@ DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) -DEF_HELPER_3(vadduqm, void, avr, avr, avr) -DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr) -DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr) -DEF_HELPER_3(vaddcuq, void, avr, avr, avr) -DEF_HELPER_3(vsubuqm, void, avr, avr, avr) -DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr) -DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr) -DEF_HELPER_3(vsubcuq, void, avr, avr, avr) -DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32) -DEF_HELPER_3(vextractub, void, avr, avr, i32) -DEF_HELPER_3(vextractuh, void, avr, avr, i32) -DEF_HELPER_3(vextractuw, void, avr, avr, i32) -DEF_HELPER_3(vextractd, void, avr, avr, i32) -DEF_HELPER_3(vinsertb, void, avr, avr, i32) -DEF_HELPER_3(vinserth, void, avr, avr, i32) -DEF_HELPER_3(vinsertw, void, avr, avr, i32) -DEF_HELPER_3(vinsertd, void, avr, avr, i32) -DEF_HELPER_2(vextsb2w, void, avr, avr) -DEF_HELPER_2(vextsh2w, void, avr, avr) -DEF_HELPER_2(vextsb2d, void, avr, avr) -DEF_HELPER_2(vextsh2d, void, avr, avr) -DEF_HELPER_2(vextsw2d, void, avr, avr) -DEF_HELPER_2(vnegw, void, avr, avr) -DEF_HELPER_2(vnegd, void, avr, avr) -DEF_HELPER_2(vupkhpx, void, avr, avr) -DEF_HELPER_2(vupklpx, void, avr, avr) -DEF_HELPER_2(vupkhsb, void, avr, avr) -DEF_HELPER_2(vupkhsh, void, avr, avr) -DEF_HELPER_2(vupkhsw, void, avr, avr) -DEF_HELPER_2(vupklsb, void, avr, avr) -DEF_HELPER_2(vupklsh, void, avr, avr) -DEF_HELPER_2(vupklsw, void, avr, avr) -DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(vsldoi, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_3(vextractub, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_FLAGS_3(vextractuh, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_FLAGS_3(vextractuw, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_FLAGS_3(vextractd, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl) +DEF_HELPER_FLAGS_2(VSTRIBL, TCG_CALL_NO_RWG, i32, avr, avr) +DEF_HELPER_FLAGS_2(VSTRIBR, TCG_CALL_NO_RWG, i32, avr, avr) +DEF_HELPER_FLAGS_2(VSTRIHL, TCG_CALL_NO_RWG, i32, avr, avr) +DEF_HELPER_FLAGS_2(VSTRIHR, TCG_CALL_NO_RWG, i32, avr, avr) +DEF_HELPER_FLAGS_2(vupkhpx, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupklpx, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupkhsb, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupkhsh, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupkhsw, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupklsb, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupklsh, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vupklsw, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_4(VMSUMUBM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VMSUMMBM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VPERM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VPERMR, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) DEF_HELPER_4(vpkshss, void, env, avr, avr, avr) DEF_HELPER_4(vpkshus, void, env, avr, avr, avr) DEF_HELPER_4(vpkswss, void, env, avr, avr, avr) @@ -258,14 +253,14 @@ DEF_HELPER_4(vpkudus, void, env, avr, avr, avr) DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr) DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr) DEF_HELPER_4(vpkudum, void, env, avr, avr, avr) -DEF_HELPER_3(vpkpx, void, avr, avr, avr) -DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr) -DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr) -DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(vpkpx, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_5(VMHADDSHS, void, env, avr, avr, avr, avr) +DEF_HELPER_5(VMHRADDSHS, void, env, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VMSUMUHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_5(VMSUMUHS, void, env, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VMSUMSHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_5(VMSUMSHS, void, env, avr, avr, avr, avr) +DEF_HELPER_FLAGS_5(VMLADDUHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env) DEF_HELPER_3(lvebx, void, env, avr, tl) @@ -291,10 +286,10 @@ DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr) DEF_HELPER_4(vminfp, void, env, avr, avr, avr) DEF_HELPER_3(vrefp, void, env, avr, avr) DEF_HELPER_3(vrsqrtefp, void, env, avr, avr) -DEF_HELPER_3(vrlwmi, void, avr, avr, avr) -DEF_HELPER_3(vrldmi, void, avr, avr, avr) -DEF_HELPER_3(vrldnm, void, avr, avr, avr) -DEF_HELPER_3(vrlwnm, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VRLWMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VRLDMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VRLDNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(VRLWNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr) DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr) DEF_HELPER_3(vexptefp, void, env, avr, avr) @@ -308,55 +303,59 @@ DEF_HELPER_4(vcfsx, void, env, avr, avr, i32) DEF_HELPER_4(vctuxs, void, env, avr, avr, i32) DEF_HELPER_4(vctsxs, void, env, avr, avr, i32) -DEF_HELPER_2(vclzb, void, avr, avr) -DEF_HELPER_2(vclzh, void, avr, avr) -DEF_HELPER_2(vctzb, void, avr, avr) -DEF_HELPER_2(vctzh, void, avr, avr) -DEF_HELPER_2(vctzw, void, avr, avr) -DEF_HELPER_2(vctzd, void, avr, avr) -DEF_HELPER_2(vpopcntb, void, avr, avr) -DEF_HELPER_2(vpopcnth, void, avr, avr) -DEF_HELPER_2(vpopcntw, void, avr, avr) -DEF_HELPER_2(vpopcntd, void, avr, avr) -DEF_HELPER_1(vclzlsbb, tl, avr) -DEF_HELPER_1(vctzlsbb, tl, avr) -DEF_HELPER_3(vbpermd, void, avr, avr, avr) -DEF_HELPER_3(vbpermq, void, avr, avr, avr) -DEF_HELPER_3(vpmsumb, void, avr, avr, avr) -DEF_HELPER_3(vpmsumh, void, avr, avr, avr) -DEF_HELPER_3(vpmsumw, void, avr, avr, avr) -DEF_HELPER_3(vpmsumd, void, avr, avr, avr) -DEF_HELPER_2(vextublx, tl, tl, avr) -DEF_HELPER_2(vextuhlx, tl, tl, avr) -DEF_HELPER_2(vextuwlx, tl, tl, avr) -DEF_HELPER_2(vextubrx, tl, tl, avr) -DEF_HELPER_2(vextuhrx, tl, tl, avr) -DEF_HELPER_2(vextuwrx, tl, tl, avr) +DEF_HELPER_FLAGS_2(vclzb, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vclzh, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vctzb, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vctzh, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vctzw, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vctzd, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vpopcntb, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vpopcnth, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vpopcntw, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_2(vpopcntd, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_1(vclzlsbb, TCG_CALL_NO_RWG, tl, avr) +DEF_HELPER_FLAGS_1(vctzlsbb, TCG_CALL_NO_RWG, tl, avr) +DEF_HELPER_FLAGS_3(vbpermd, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vbpermq, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vpmsumb, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vpmsumh, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vpmsumw, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VPMSUMD, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_2(vextublx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_FLAGS_2(vextuhlx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_FLAGS_2(vextuwlx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_FLAGS_2(vextubrx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_FLAGS_2(vextuhrx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_FLAGS_2(vextuwrx, TCG_CALL_NO_RWG, tl, tl, avr) +DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl) -DEF_HELPER_2(vsbox, void, avr, avr) -DEF_HELPER_3(vcipher, void, avr, avr, avr) -DEF_HELPER_3(vcipherlast, void, avr, avr, avr) -DEF_HELPER_3(vncipher, void, avr, avr, avr) -DEF_HELPER_3(vncipherlast, void, avr, avr, avr) -DEF_HELPER_3(vshasigmaw, void, avr, avr, i32) -DEF_HELPER_3(vshasigmad, void, avr, avr, i32) -DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_2(vsbox, TCG_CALL_NO_RWG, void, avr, avr) +DEF_HELPER_FLAGS_3(vcipher, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vcipherlast, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vncipher, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vncipherlast, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(vshasigmaw, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_FLAGS_3(vshasigmad, TCG_CALL_NO_RWG, void, avr, avr, i32) +DEF_HELPER_FLAGS_4(vpermxor, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32) -DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32) -DEF_HELPER_3(bcdcfn, i32, avr, avr, i32) -DEF_HELPER_3(bcdctn, i32, avr, avr, i32) -DEF_HELPER_3(bcdcfz, i32, avr, avr, i32) -DEF_HELPER_3(bcdctz, i32, avr, avr, i32) -DEF_HELPER_3(bcdcfsq, i32, avr, avr, i32) -DEF_HELPER_3(bcdctsq, i32, avr, avr, i32) -DEF_HELPER_4(bcdcpsgn, i32, avr, avr, avr, i32) -DEF_HELPER_3(bcdsetsgn, i32, avr, avr, i32) -DEF_HELPER_4(bcds, i32, avr, avr, avr, i32) -DEF_HELPER_4(bcdus, i32, avr, avr, avr, i32) -DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32) -DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32) -DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdadd, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdsub, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdcfn, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdctn, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdcfz, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdctz, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdcfsq, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdctsq, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdcpsgn, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_3(bcdsetsgn, TCG_CALL_NO_RWG, i32, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcds, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdus, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdsr, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdtrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) +DEF_HELPER_FLAGS_4(bcdutrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32) DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr) DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr) @@ -370,14 +369,16 @@ DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr) DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr) DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr) DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr) -DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr) -DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr) -DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr) -DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr) +DEF_HELPER_5(XSMADDDP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMSUBDP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMADDDP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMSUBDP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPEQDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPGTDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPGEDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPEQQP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPGTQP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSCMPGEQP, void, env, vsr, vsr, vsr) DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr) @@ -386,23 +387,29 @@ DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr) -DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr) -DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr) -DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr) -DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_4(XSMAXCDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSMINCDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSMAXJDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSMINJDP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSMAXCQP, void, env, vsr, vsr, vsr) +DEF_HELPER_4(XSMINCQP, void, env, vsr, vsr, vsr) DEF_HELPER_3(xscvdphp, void, env, vsr, vsr) DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr) DEF_HELPER_2(xscvdpspn, i64, env, i64) -DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr) +DEF_HELPER_4(XSCVQPDP, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr) +DEF_HELPER_3(XSCVQPUQZ, void, env, vsr, vsr) +DEF_HELPER_3(XSCVQPSQZ, void, env, vsr, vsr) +DEF_HELPER_3(XSCVUQQP, void, env, vsr, vsr) +DEF_HELPER_3(XSCVSQQP, void, env, vsr, vsr) DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr) DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvspdp, void, env, vsr, vsr) -DEF_HELPER_2(xscvspdpn, i64, env, i64) +DEF_HELPER_FLAGS_1(XSCVSPDPN, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr) DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr) DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr) @@ -412,9 +419,9 @@ DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr) DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr) DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr) -DEF_HELPER_3(xststdcsp, void, env, i32, vsr) -DEF_HELPER_2(xststdcdp, void, env, i32) -DEF_HELPER_2(xststdcqp, void, env, i32) +DEF_HELPER_4(XSTSTDCSP, void, env, i32, i32, vsr) +DEF_HELPER_4(XSTSTDCDP, void, env, i32, i32, vsr) +DEF_HELPER_4(XSTSTDCQP, void, env, i32, i32, vsr) DEF_HELPER_3(xsrdpi, void, env, vsr, vsr) DEF_HELPER_3(xsrdpic, void, env, vsr, vsr) DEF_HELPER_3(xsrdpim, void, env, vsr, vsr) @@ -433,10 +440,19 @@ DEF_HELPER_3(xsresp, void, env, vsr, vsr) DEF_HELPER_2(xsrsp, i64, env, i64) DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr) DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr) -DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr) -DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMADDSP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMSUBSP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMADDSP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMSUBSP, void, env, vsr, vsr, vsr, vsr) + +DEF_HELPER_5(XSMADDQP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMADDQPO, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMSUBQP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSMSUBQPO, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMADDQP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMADDQPO, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMSUBQP, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(XSNMSUBQPO, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr) @@ -494,6 +510,7 @@ DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr) DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr) DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr) +DEF_HELPER_3(XVCVSPBF16, void, env, vsr, vsr) DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr) DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr) DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr) @@ -502,18 +519,67 @@ DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr) -DEF_HELPER_2(xvtstdcsp, void, env, i32) -DEF_HELPER_2(xvtstdcdp, void, env, i32) +DEF_HELPER_FLAGS_4(XVTSTDCSP, TCG_CALL_NO_RWG, void, vsr, vsr, i64, i32) +DEF_HELPER_FLAGS_4(XVTSTDCDP, TCG_CALL_NO_RWG, void, vsr, vsr, i64, i32) DEF_HELPER_3(xvrspi, void, env, vsr, vsr) DEF_HELPER_3(xvrspic, void, env, vsr, vsr) DEF_HELPER_3(xvrspim, void, env, vsr, vsr) DEF_HELPER_3(xvrspip, void, env, vsr, vsr) DEF_HELPER_3(xvrspiz, void, env, vsr, vsr) -DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr) -DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr) -DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32) -DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32) -DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr) +DEF_HELPER_FLAGS_2(XXGENPCVBM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVBM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVBM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVBM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVHM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVHM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVHM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVHM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVWM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVWM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVWM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVWM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVDM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVDM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVDM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_2(XXGENPCVDM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr) +DEF_HELPER_FLAGS_3(XXEXTRACTUW, TCG_CALL_NO_RWG, void, vsr, vsr, i32) +DEF_HELPER_FLAGS_5(XXPERMX, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, tl) +DEF_HELPER_FLAGS_3(XXINSERTW, TCG_CALL_NO_RWG, void, vsr, vsr, i32) +DEF_HELPER_FLAGS_2(XVXSIGSP, TCG_CALL_NO_RWG, void, vsr, vsr) +DEF_HELPER_FLAGS_5(XXEVAL, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_FLAGS_5(XXBLENDVB, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_FLAGS_5(XXBLENDVH, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_FLAGS_5(XXBLENDVW, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_FLAGS_5(XXBLENDVD, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XVI4GER8, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI4GER8PP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI8GER4, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI8GER4PP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI8GER4SPP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI16GER2, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI16GER2S, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI16GER2PP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVI16GER2SPP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF16GER2, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF16GER2PP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF16GER2PN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF16GER2NP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF16GER2NN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVBF16GER2, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVBF16GER2PP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVBF16GER2PN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVBF16GER2NP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVBF16GER2NN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF32GER, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF32GERPP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF32GERPN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF32GERNP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF32GERNN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF64GER, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF64GERPP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF64GERPN, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF64GERNP, void, env, vsr, vsr, acc, i32) +DEF_HELPER_5(XVF64GERNN, void, env, vsr, vsr, acc, i32) DEF_HELPER_2(efscfsi, i32, env, i32) DEF_HELPER_2(efscfui, i32, env, i32) @@ -604,24 +670,24 @@ DEF_HELPER_2(booke_set_eplc, void, env, tl) DEF_HELPER_2(booke_set_epsc, void, env, tl) DEF_HELPER_2(6xx_tlbd, void, env, tl) DEF_HELPER_2(6xx_tlbi, void, env, tl) -DEF_HELPER_2(74xx_tlbd, void, env, tl) -DEF_HELPER_2(74xx_tlbi, void, env, tl) DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl) #if defined(TARGET_PPC64) -DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl) -DEF_HELPER_2(load_slb_esid, tl, env, tl) -DEF_HELPER_2(load_slb_vsid, tl, env, tl) -DEF_HELPER_2(find_slb_vsid, tl, env, tl) -DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32) -DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_4(tlbie_isa300, TCG_CALL_NO_WG, void, \ + env, tl, tl, i32) +DEF_HELPER_FLAGS_3(SLBMTE, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_2(SLBMFEE, tl, env, tl) +DEF_HELPER_2(SLBMFEV, tl, env, tl) +DEF_HELPER_2(SLBFEE, tl, env, tl) +DEF_HELPER_FLAGS_2(SLBIA, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_3(SLBIAG, TCG_CALL_NO_RWG, void, env, tl, i32) +DEF_HELPER_FLAGS_2(SLBIE, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(SLBIEG, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl) DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) -DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_1(msgsnd, void, tl) DEF_HELPER_2(msgclr, void, env, tl) DEF_HELPER_1(book3s_msgsnd, void, tl) @@ -629,17 +695,12 @@ DEF_HELPER_2(book3s_msgclr, void, env, tl) #endif DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) -DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32) #if !defined(CONFIG_USER_ONLY) DEF_HELPER_2(rac, tl, env, tl) -#endif -DEF_HELPER_3(div, tl, env, tl, tl) -DEF_HELPER_3(divo, tl, env, tl, tl) -DEF_HELPER_3(divs, tl, env, tl, tl) -DEF_HELPER_3(divso, tl, env, tl, tl) DEF_HELPER_2(load_dcr, tl, env, tl) DEF_HELPER_3(store_dcr, void, env, tl, tl) +#endif DEF_HELPER_2(load_dump_spr, void, env, i32) DEF_HELPER_2(store_dump_spr, void, env, i32) @@ -650,8 +711,6 @@ DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env) -DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env) -DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env) #if !defined(CONFIG_USER_ONLY) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env) @@ -669,18 +728,17 @@ DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_vtb, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_tbu40, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_2(store_hid0_601, void, env, tl) -DEF_HELPER_3(store_403_pbr, void, env, i32, tl) DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_40x_tcr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_40x_tsr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(store_40x_pid, void, env, tl) DEF_HELPER_2(store_40x_dbcr0, void, env, tl) DEF_HELPER_2(store_40x_sler, void, env, tl) DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl) @@ -689,65 +747,66 @@ DEF_HELPER_3(store_ibatl, void, env, i32, tl) DEF_HELPER_3(store_ibatu, void, env, i32, tl) DEF_HELPER_3(store_dbatl, void, env, i32, tl) DEF_HELPER_3(store_dbatu, void, env, i32, tl) -DEF_HELPER_3(store_601_batl, void, env, i32, tl) -DEF_HELPER_3(store_601_batu, void, env, i32, tl) #endif #define dh_alias_fprp ptr #define dh_ctype_fprp ppc_fprp_t * +#define dh_typecode_fprp dh_typecode_ptr -DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) -DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp) -DEF_HELPER_3(dcmpo, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpu, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstdc, i32, env, fprp, i32) -DEF_HELPER_3(dtstdcq, i32, env, fprp, i32) -DEF_HELPER_3(dtstdg, i32, env, fprp, i32) -DEF_HELPER_3(dtstdgq, i32, env, fprp, i32) -DEF_HELPER_3(dtstex, i32, env, fprp, fprp) -DEF_HELPER_3(dtstexq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsf, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfi, i32, env, i32, fprp) -DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp) -DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_3(dctdp, void, env, fprp, fprp) -DEF_HELPER_3(dctqpq, void, env, fprp, fprp) -DEF_HELPER_3(drsp, void, env, fprp, fprp) -DEF_HELPER_3(drdpq, void, env, fprp, fprp) -DEF_HELPER_3(dcffix, void, env, fprp, fprp) -DEF_HELPER_3(dcffixq, void, env, fprp, fprp) -DEF_HELPER_3(dctfix, void, env, fprp, fprp) -DEF_HELPER_3(dctfixq, void, env, fprp, fprp) -DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32) -DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32) -DEF_HELPER_3(dxex, void, env, fprp, fprp) -DEF_HELPER_3(dxexq, void, env, fprp, fprp) -DEF_HELPER_4(diex, void, env, fprp, fprp, fprp) -DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dscri, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscli, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) +DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp) +DEF_HELPER_3(DCMPO, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPU, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTDC, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDG, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp) +DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp) +DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_3(DCTDP, void, env, fprp, fprp) +DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp) +DEF_HELPER_3(DRSP, void, env, fprp, fprp) +DEF_HELPER_3(DRDPQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr) +DEF_HELPER_3(DCTFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp) +DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32) +DEF_HELPER_3(DXEX, void, env, fprp, fprp) +DEF_HELPER_3(DXEXQ, void, env, fprp, fprp) +DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 3723872aa6..e200091a23 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -23,6 +23,9 @@ #include "exec/exec-all.h" #include "sysemu/kvm.h" #include "helper_regs.h" +#include "power8-pmu.h" +#include "cpu-models.h" +#include "spr_common.h" /* Swap temporary saved registers with GPRs */ void hreg_swap_gpr_tgpr(CPUPPCState *env) @@ -43,6 +46,48 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env) env->tgpr[3] = tmp; } +static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) +{ + uint32_t hflags = 0; + +#if defined(TARGET_PPC64) + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { + hflags |= 1 << HFLAGS_PMCC0; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { + hflags |= 1 << HFLAGS_PMCC1; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { + hflags |= 1 << HFLAGS_PMCJCE; + } + +#ifndef CONFIG_USER_ONLY + if (env->pmc_ins_cnt) { + hflags |= 1 << HFLAGS_INSN_CNT; + } + if (env->pmc_ins_cnt & 0x1e) { + hflags |= 1 << HFLAGS_PMC_OTHER; + } +#endif +#endif + + return hflags; +} + +/* Mask of all PMU hflags */ +static uint32_t hreg_compute_pmu_hflags_mask(CPUPPCState *env) +{ + uint32_t hflags_mask = 0; +#if defined(TARGET_PPC64) + hflags_mask |= 1 << HFLAGS_PMCC0; + hflags_mask |= 1 << HFLAGS_PMCC1; + hflags_mask |= 1 << HFLAGS_PMCJCE; + hflags_mask |= 1 << HFLAGS_INSN_CNT; + hflags_mask |= 1 << HFLAGS_PMC_OTHER; +#endif + return hflags_mask; +} + static uint32_t hreg_compute_hflags_value(CPUPPCState *env) { target_ulong msr = env->msr; @@ -58,21 +103,12 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) | (1 << MSR_DR) | (1 << MSR_FP)); - if (ppc_flags & POWERPC_FLAG_HID0_LE) { - /* - * Note that MSR_LE is not set in env->msr_mask for this cpu, - * and so will never be set in msr. - */ - uint32_t le = extract32(env->spr[SPR_HID0], 3, 1); - hflags |= le << MSR_LE; - } - if (ppc_flags & POWERPC_FLAG_DE) { target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; - if (dbcr0 & DBCR0_ICMP) { + if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) { hflags |= 1 << HFLAGS_SE; } - if (dbcr0 & DBCR0_BRT) { + if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) { hflags |= 1 << HFLAGS_BE; } } else { @@ -106,6 +142,9 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (env->spr[SPR_LPCR] & LPCR_GTSE) { hflags |= 1 << HFLAGS_GTSE; } + if (env->spr[SPR_LPCR] & LPCR_HR) { + hflags |= 1 << HFLAGS_HR; + } #ifndef CONFIG_USER_ONLY if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { @@ -140,7 +179,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) */ unsigned immu_idx, dmmu_idx; dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1; - if (env->mmu_model & POWERPC_MMU_BOOKE) { + if (env->mmu_model == POWERPC_MMU_BOOKE || + env->mmu_model == POWERPC_MMU_BOOKE206) { dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0; immu_idx = dmmu_idx; immu_idx |= msr & (1 << MSR_IS) ? 2 : 0; @@ -155,6 +195,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) hflags |= dmmu_idx << HFLAGS_DMMU_IDX; #endif + hflags |= hreg_compute_pmu_hflags_value(env); + return hflags | (msr & msr_mask); } @@ -163,6 +205,17 @@ void hreg_compute_hflags(CPUPPCState *env) env->hflags = hreg_compute_hflags_value(env); } +/* + * This can be used as a lighter-weight alternative to hreg_compute_hflags + * when PMU MMCR0 or pmc_ins_cnt changes. pmc_ins_cnt is changed by + * pmu_update_summaries. + */ +void hreg_update_pmu_hflags(CPUPPCState *env) +{ + env->hflags &= ~hreg_compute_pmu_hflags_mask(env); + env->hflags |= hreg_compute_pmu_hflags_value(env); +} + #ifdef CONFIG_DEBUG_TCG void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) @@ -185,7 +238,11 @@ void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, void cpu_interrupt_exittb(CPUState *cs) { - if (!kvm_enabled()) { + /* + * We don't need to worry about translation blocks + * when running with KVM. + */ + if (kvm_enabled()) { return; } @@ -213,12 +270,12 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) value &= ~MSR_HVB; value |= env->msr & MSR_HVB; } - if (((value >> MSR_IR) & 1) != msr_ir || - ((value >> MSR_DR) & 1) != msr_dr) { + if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) { cpu_interrupt_exittb(cs); } - if ((env->mmu_model & POWERPC_MMU_BOOKE) && - ((value >> MSR_GS) & 1) != msr_gs) { + if ((env->mmu_model == POWERPC_MMU_BOOKE || + env->mmu_model == POWERPC_MMU_BOOKE206) && + ((value ^ env->msr) & R_MSR_GS_MASK)) { cpu_interrupt_exittb(cs); } if (unlikely((env->flags & POWERPC_FLAG_TGPR) && @@ -226,9 +283,8 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) /* Swap temporary saved registers with GPRs */ hreg_swap_gpr_tgpr(env); } - if (unlikely((value >> MSR_EP) & 1) != msr_ep) { - /* Change the exception prefix on PowerPC 601 */ - env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; + if (unlikely((value ^ env->msr) & R_MSR_EP_MASK)) { + env->excp_prefix = FIELD_EX64(value, MSR, EP) * 0xFFF00000; } /* * If PR=1 then EE, IR and DR must be 1 @@ -247,7 +303,9 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) env->msr = value; hreg_compute_hflags(env); #if !defined(CONFIG_USER_ONLY) - if (unlikely(msr_pow == 1)) { + ppc_maybe_interrupt(env); + + if (unlikely(FIELD_EX64(env->msr, MSR, POW))) { if (!env->pending_interrupts && (*env->check_pow)(env)) { cs->halted = 1; excp = EXCP_HALTED; @@ -258,6 +316,18 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) return excp; } +#ifdef CONFIG_SOFTMMU +void store_40x_sler(CPUPPCState *env, uint32_t val) +{ + /* XXX: TO BE FIXED */ + if (val != 0x00000000) { + cpu_abort(env_cpu(env), + "Little-endian regions are not supported by now\n"); + } + env->spr[SPR_405_SLER] = val; +} +#endif /* CONFIG_SOFTMMU */ + #ifndef CONFIG_USER_ONLY void check_tlb_flush(CPUPPCState *env, bool global) { @@ -267,7 +337,7 @@ void check_tlb_flush(CPUPPCState *env, bool global) if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; - tlb_flush_all_cpus_synced(cs); + tlb_flush_all_cpus(cs); return; } @@ -278,3 +348,403 @@ void check_tlb_flush(CPUPPCState *env, bool global) } } #endif + +/** + * _spr_register + * + * Register an SPR with all the callbacks required for tcg, + * and the ID number for KVM. + * + * The reason for the conditional compilation is that the tcg functions + * may be compiled out, and the system kvm header may not be available + * for supplying the ID numbers. This is ugly, but the best we can do. + */ +void _spr_register(CPUPPCState *env, int num, const char *name, + USR_ARG(spr_callback *uea_read) + USR_ARG(spr_callback *uea_write) + SYS_ARG(spr_callback *oea_read) + SYS_ARG(spr_callback *oea_write) + SYS_ARG(spr_callback *hea_read) + SYS_ARG(spr_callback *hea_write) + KVM_ARG(uint64_t one_reg_id) + target_ulong initial_value) +{ + ppc_spr_t *spr = &env->spr_cb[num]; + + /* No SPR should be registered twice. */ + assert(spr->name == NULL); + assert(name != NULL); + + spr->name = name; + spr->default_value = initial_value; + env->spr[num] = initial_value; + +#ifdef CONFIG_TCG + spr->uea_read = uea_read; + spr->uea_write = uea_write; +# ifndef CONFIG_USER_ONLY + spr->oea_read = oea_read; + spr->oea_write = oea_write; + spr->hea_read = hea_read; + spr->hea_write = hea_write; +# endif +#endif +#ifdef CONFIG_KVM + spr->one_reg_id = one_reg_id; +#endif +} + +/* Generic PowerPC SPRs */ +void register_generic_sprs(PowerPCCPU *cpu) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + + /* Integer processing */ + spr_register(env, SPR_XER, "XER", + &spr_read_xer, &spr_write_xer, + &spr_read_xer, &spr_write_xer, + 0x00000000); + /* Branch control */ + spr_register(env, SPR_LR, "LR", + &spr_read_lr, &spr_write_lr, + &spr_read_lr, &spr_write_lr, + 0x00000000); + spr_register(env, SPR_CTR, "CTR", + &spr_read_ctr, &spr_write_ctr, + &spr_read_ctr, &spr_write_ctr, + 0x00000000); + /* Interrupt processing */ + spr_register(env, SPR_SRR0, "SRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SRR1, "SRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Processor control */ + spr_register(env, SPR_SPRG0, "SPRG0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG1, "SPRG1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG2, "SPRG2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG3, "SPRG3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_PVR, "PVR", + /* Linux permits userspace to read PVR */ +#if defined(CONFIG_LINUX_USER) + &spr_read_generic, +#else + SPR_NOACCESS, +#endif + SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->pvr); + + /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ + if (pcc->svr != POWERPC_SVR_NONE) { + if (pcc->svr & POWERPC_SVR_E500) { + spr_register(env, SPR_E500_SVR, "SVR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->svr & ~POWERPC_SVR_E500); + } else { + spr_register(env, SPR_SVR, "SVR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->svr); + } + } + + /* Time base */ + spr_register(env, SPR_VTBL, "TBL", + &spr_read_tbl, SPR_NOACCESS, + &spr_read_tbl, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_TBL, "TBL", + &spr_read_tbl, SPR_NOACCESS, + &spr_read_tbl, &spr_write_tbl, + 0x00000000); + spr_register(env, SPR_VTBU, "TBU", + &spr_read_tbu, SPR_NOACCESS, + &spr_read_tbu, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_TBU, "TBU", + &spr_read_tbu, SPR_NOACCESS, + &spr_read_tbu, &spr_write_tbu, + 0x00000000); +} + +void register_non_embedded_sprs(CPUPPCState *env) +{ + /* Exception processing */ + spr_register_kvm(env, SPR_DSISR, "DSISR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DSISR, 0x00000000); + spr_register_kvm(env, SPR_DAR, "DAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DAR, 0x00000000); + /* Timer */ + spr_register(env, SPR_DECR, "DECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_decr, &spr_write_decr, + 0x00000000); +} + +/* Storage Description Register 1 */ +void register_sdr1_sprs(CPUPPCState *env) +{ +#ifndef CONFIG_USER_ONLY + if (env->has_hv_mode) { + /* + * SDR1 is a hypervisor resource on CPUs which have a + * hypervisor mode + */ + spr_register_hv(env, SPR_SDR1, "SDR1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sdr1, + 0x00000000); + } else { + spr_register(env, SPR_SDR1, "SDR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sdr1, + 0x00000000); + } +#endif +} + +/* BATs 0-3 */ +void register_low_BATs(CPUPPCState *env) +{ +#if !defined(CONFIG_USER_ONLY) + spr_register(env, SPR_IBAT0U, "IBAT0U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT0L, "IBAT0L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT1U, "IBAT1U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT1L, "IBAT1L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT2U, "IBAT2U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT2L, "IBAT2L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT3U, "IBAT3U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT3L, "IBAT3L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_DBAT0U, "DBAT0U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT0L, "DBAT0L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT1U, "DBAT1U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT1L, "DBAT1L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT2U, "DBAT2U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT2L, "DBAT2L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT3U, "DBAT3U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT3L, "DBAT3L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + env->nb_BATs += 4; +#endif +} + +/* BATs 4-7 */ +void register_high_BATs(CPUPPCState *env) +{ +#if !defined(CONFIG_USER_ONLY) + spr_register(env, SPR_IBAT4U, "IBAT4U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT4L, "IBAT4L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT5U, "IBAT5U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT5L, "IBAT5L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT6U, "IBAT6U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT6L, "IBAT6L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT7U, "IBAT7U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT7L, "IBAT7L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_DBAT4U, "DBAT4U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT4L, "DBAT4L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT5U, "DBAT5U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT5L, "DBAT5L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT6U, "DBAT6U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT6L, "DBAT6L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT7U, "DBAT7U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT7L, "DBAT7L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + env->nb_BATs += 4; +#endif +} + +/* Softare table search registers */ +void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) +{ +#if !defined(CONFIG_USER_ONLY) + env->nb_tlb = nb_tlbs; + env->nb_ways = nb_ways; + env->id_tlbs = 1; + env->tlb_type = TLB_6XX; + spr_register(env, SPR_DMISS, "DMISS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_DCMP, "DCMP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_HASH1, "HASH1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_HASH2, "HASH2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_IMISS, "IMISS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_ICMP, "ICMP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_RPA, "RPA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +#endif +} + +void register_thrm_sprs(CPUPPCState *env) +{ + /* Thermal management */ + spr_register(env, SPR_THRM1, "THRM1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_THRM2, "THRM2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); + + spr_register(env, SPR_THRM3, "THRM3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); +} + +void register_usprgh_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); +} diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h index 42f26870b9..8196c1346d 100644 --- a/target/ppc/helper_regs.h +++ b/target/ppc/helper_regs.h @@ -22,6 +22,7 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env); void hreg_compute_hflags(CPUPPCState *env); +void hreg_update_pmu_hflags(CPUPPCState *env); void cpu_interrupt_exittb(CPUState *cs); int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv); diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 9fd8d6b817..f8f589e9fd 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -17,31 +17,244 @@ # License along with this library; if not, see . # +&A frt fra frb frc rc:bool +@A ...... frt:5 fra:5 frb:5 frc:5 ..... rc:1 &A + +&A_tb frt frb rc:bool +@A_tb ...... frt:5 ..... frb:5 ..... ..... rc:1 &A_tb + &D rt ra si:int64_t -@D ...... rt:5 ra:5 si:s16 &D +@D ...... rt:5 ra:5 si:s16 &D &D_bf bf l:bool ra imm -@D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf -@D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf +@D_bfs ...... bf:3 . l:1 ra:5 imm:s16 &D_bf +@D_bfu ...... bf:3 . l:1 ra:5 imm:16 &D_bf + +%dq_si 4:s12 !function=times_16 +%dq_rtp 22:4 !function=times_2 +@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si + +%dq_rt_tsx 3:1 21:5 +@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx + +%rt_tsxp 21:1 22:4 !function=times_2 +@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp %ds_si 2:s14 !function=times_4 -@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si +@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si + +%ds_rtp 22:4 !function=times_2 +@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si + +&DX_b vrt b +%dx_b 6:10 16:5 0:1 +@DX_b ...... vrt:5 ..... .......... ..... . &DX_b b=%dx_b &DX rt d %dx_d 6:s10 16:5 0:1 -@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d +@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d + +&VA vrt vra vrb rc +@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA + +&VC vrt vra vrb rc:bool +@VC ...... vrt:5 vra:5 vrb:5 rc:1 .......... &VC + +&VN vrt vra vrb sh +@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN &VX vrt vra vrb -@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX +@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX + +&VX_bf bf vra vrb +@VX_bf ...... bf:3 .. vra:5 vrb:5 ........... &VX_bf + +&VX_mp rt mp:bool vrb +@VX_mp ...... rt:5 .... mp:1 vrb:5 ........... &VX_mp + +&VX_n rt vrb n +@VX_n ...... rt:5 .. n:3 vrb:5 ........... &VX_n + +&VX_tb_rc vrt vrb rc:bool +@VX_tb_rc ...... vrt:5 ..... vrb:5 rc:1 .......... &VX_tb_rc + +&VX_uim4 vrt uim vrb +@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4 + +&VX_tb vrt vrb +@VX_tb ...... vrt:5 ..... vrb:5 ........... &VX_tb &X rt ra rb -@X ...... rt:5 ra:5 rb:5 .......... . &X +@X ...... rt:5 ra:5 rb:5 .......... . &X + +&X_rc rt ra rb rc:bool +@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc + +&X_sa rs ra +@X_sa ...... rs:5 ra:5 ..... .......... . &X_sa + +%x_frtp 22:4 !function=times_2 +%x_frap 17:4 !function=times_2 +%x_frbp 12:4 !function=times_2 +@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp + +@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp + +&X_t rt +@X_t ...... rt:5 ..... ..... .......... . &X_t + +&X_tb rt rb +@X_tb ...... rt:5 ..... rb:5 .......... . &X_tb + +&X_t_rc rt rc:bool +@X_t_rc ...... rt:5 ..... ..... .......... rc:1 &X_t_rc + +&X_tb_rc rt rb rc:bool +@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc + +@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp + +@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp + +@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp &X_bi rt bi -@X_bi ...... rt:5 bi:5 ----- .......... - &X_bi +@X_bi ...... rt:5 bi:5 ..... .......... . &X_bi + +&X_bf bf ra rb +@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf + +@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp + +@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp + +&X_bf_uim bf uim rb +@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim + +@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp &X_bfl bf l:bool ra rb -@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl +@X_bfl ...... bf:3 . l:1 ra:5 rb:5 .......... . &X_bfl + +&X_imm2 rt imm +@X_imm2 ...... rt:5 ..... ... imm:2 .......... . &X_imm2 + +&X_imm3 rt imm +@X_imm3 ...... rt:5 ..... .. imm:3 .......... . &X_imm3 + +%x_xt 0:1 21:5 +&X_imm5 xt imm:uint8_t vrb +@X_imm5 ...... ..... imm:5 vrb:5 .......... . &X_imm5 xt=%x_xt + +&X_imm8 xt imm:uint8_t +@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt + +&X_ih ih:uint8_t +@X_ih ...... .. ih:3 ..... ..... .......... . &X_ih + +&X_rb rb +@X_rb ...... ..... ..... rb:5 .......... . &X_rb + +&X_rs_l rs l:bool +@X_rs_l ...... rs:5 .... l:1 ..... .......... . &X_rs_l + +&X_uim5 xt uim:uint8_t +@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt + +&X_tb_sp_rc rt rb sp rc:bool +@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc + +@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp + +&X_tb_s_rc rt rb s:bool rc:bool +@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc + +@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp + +%x_rt_tsx 0:1 21:5 +@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx +@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp + +%x_dw 0:1 21:5 !function=dw_compose_ea +@X_DW ...... ..... ra:5 rb:5 .......... . &X rt=%x_dw + +&X_frtp_vrb frtp vrb +@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp + +&X_vrt_frbp vrt frbp +@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp + +&X_a ra +@X_a ...... ra:3 .. ..... ..... .......... . &X_a + +%xx_xt 0:1 21:5 +%xx_xb 1:1 11:5 +%xx_xa 2:1 16:5 +%xx_xc 3:1 6:5 +&XX2 xt xb +@XX2 ...... ..... ..... ..... ......... .. &XX2 xt=%xx_xt xb=%xx_xb + +&XX2_uim xt xb uim:uint8_t +@XX2_uim2 ...... ..... ... uim:2 ..... ......... .. &XX2_uim xt=%xx_xt xb=%xx_xb + +@XX2_uim4 ...... ..... . uim:4 ..... ......... .. &XX2_uim xt=%xx_xt xb=%xx_xb + +%xx_uim7 6:1 2:1 16:5 +@XX2_uim7 ...... ..... ..... ..... .... . ... . .. &XX2_uim xt=%xx_xt xb=%xx_xb uim=%xx_uim7 + +&XX2_bf_uim bf xb uim +@XX2_bf_uim ...... bf:3 uim:7 ..... ......... . . &XX2_bf_uim + +&XX2_bf_xb bf xb +@XX2_bf_xb ...... bf:3 .. ..... ..... ......... . . &XX2_bf_xb xb=%xx_xb + +&XX3 xt xa xb +@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb + +# 32 bit GER instructions have all mask bits considered 1 +&MMIRR_XX3 xa xb xt pmsk xmsk ymsk +%xx_at 23:3 +%xx_xa_pair 2:1 17:4 !function=times_2 +@XX3_at ...... ... .. ..... ..... ........ ... &MMIRR_XX3 xt=%xx_at xb=%xx_xb \ + pmsk=255 xmsk=15 ymsk=15 + +&XX3_dm xt xa xb dm +@XX3_dm ...... ..... ..... ..... . dm:2 ..... ... &XX3_dm xt=%xx_xt xa=%xx_xa xb=%xx_xb + +&XX4 xt xa xb xc +@XX4 ...... ..... ..... ..... ..... .. .... &XX4 xt=%xx_xt xa=%xx_xa xb=%xx_xb xc=%xx_xc + +&Z22_bf_fra bf fra dm +@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra + +%z22_frap 17:4 !function=times_2 +@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap + +&Z22_ta_sh_rc rt ra sh rc:bool +@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc + +%z22_frtp 22:4 !function=times_2 +@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap + +&Z23_tab frt fra frb rmc rc:bool +@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab + +%z23_frtp 22:4 !function=times_2 +%z23_frap 17:4 !function=times_2 +%z23_frbp 12:4 !function=times_2 +@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp + +@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp + +&Z23_tb frt frb r:bool rmc rc:bool +@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb + +@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp + +&Z23_te_tb te frt frb rmc rc:bool +@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb + +@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp ### Fixed-Point Load Instructions @@ -74,6 +287,8 @@ LDU 111010 ..... ..... ..............01 @DS LDX 011111 ..... ..... ..... 0000010101 - @X LDUX 011111 ..... ..... ..... 0000110101 - @X +LQ 111000 ..... ..... ............ ---- @DQ_rtp + ### Fixed-Point Store Instructions STB 100110 ..... ..... ................ @D @@ -96,6 +311,8 @@ STDU 111110 ..... ..... ..............01 @DS STDX 011111 ..... ..... ..... 0010010101 - @X STDUX 011111 ..... ..... ..... 0010110101 - @X +STQ 111110 ..... ..... ..............10 @DS_rtp + ### Fixed-Point Compare Instructions CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl @@ -113,6 +330,56 @@ ADDPCIS 010011 ..... ..... .......... 00010 . @DX ## Fixed-Point Logical Instructions CFUGED 011111 ..... ..... ..... 0011011100 - @X +CNTLZDM 011111 ..... ..... ..... 0000111011 - @X +CNTTZDM 011111 ..... ..... ..... 1000111011 - @X +PDEPD 011111 ..... ..... ..... 0010011100 - @X +PEXTD 011111 ..... ..... ..... 0010111100 - @X + +# Fixed-Point Hash Instructions + +HASHST 011111 ..... ..... ..... 1011010010 . @X_DW +HASHCHK 011111 ..... ..... ..... 1011110010 . @X_DW +HASHSTP 011111 ..... ..... ..... 1010010010 . @X_DW +HASHCHKP 011111 ..... ..... ..... 1010110010 . @X_DW + +## BCD Assist + +ADDG6S 011111 ..... ..... ..... - 001001010 - @X +CDTBCD 011111 ..... ..... ----- 0100011010 - @X_sa +CBCDTD 011111 ..... ..... ----- 0100111010 - @X_sa + +### Float-Point Load Instructions + +LFS 110000 ..... ..... ................ @D +LFSU 110001 ..... ..... ................ @D +LFSX 011111 ..... ..... ..... 1000010111 - @X +LFSUX 011111 ..... ..... ..... 1000110111 - @X + +LFD 110010 ..... ..... ................ @D +LFDU 110011 ..... ..... ................ @D +LFDX 011111 ..... ..... ..... 1001010111 - @X +LFDUX 011111 ..... ..... ..... 1001110111 - @X + +### Float-Point Store Instructions + +STFS 110100 ..... ...... ............... @D +STFSU 110101 ..... ...... ............... @D +STFSX 011111 ..... ...... .... 1010010111 - @X +STFSUX 011111 ..... ...... .... 1010110111 - @X + +STFD 110110 ..... ...... ............... @D +STFDU 110111 ..... ...... ............... @D +STFDX 011111 ..... ...... .... 1011010111 - @X +STFDUX 011111 ..... ...... .... 1011110111 - @X + +### Floating-Point Arithmetic Instructions + +FSQRT 111111 ..... ----- ..... ----- 10110 . @A_tb +FSQRTS 111011 ..... ----- ..... ----- 10110 . @A_tb + +### Floating-Point Select Instruction + +FSEL 111111 ..... ..... ..... ..... 10111 . @A ### Move To/From System Register Instructions @@ -121,6 +388,581 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi +### Move To/From FPSCR + +MFFS 111111 ..... 00000 ----- 1001000111 . @X_t_rc +MFFSCE 111111 ..... 00001 ----- 1001000111 - @X_t +MFFSCRN 111111 ..... 10110 ..... 1001000111 - @X_tb +MFFSCDRN 111111 ..... 10100 ..... 1001000111 - @X_tb +MFFSCRNI 111111 ..... 10111 ---.. 1001000111 - @X_imm2 +MFFSCDRNI 111111 ..... 10101 --... 1001000111 - @X_imm3 +MFFSL 111111 ..... 11000 ----- 1001000111 - @X_t + +### Decimal Floating-Point Arithmetic Instructions + +DADD 111011 ..... ..... ..... 0000000010 . @X_rc +DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc + +DSUB 111011 ..... ..... ..... 1000000010 . @X_rc +DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc + +DMUL 111011 ..... ..... ..... 0000100010 . @X_rc +DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc + +DDIV 111011 ..... ..... ..... 1000100010 . @X_rc +DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc + +### Decimal Floating-Point Compare Instructions + +DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf +DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp + +DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf +DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp + +### Decimal Floating-Point Test Instructions + +DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra +DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap + +DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra +DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap + +DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf +DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp + +DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf +DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp + +DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim +DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp + +### Decimal Floating-Point Quantum Adjustment Instructions + +DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb +DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp + +DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab +DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp + +DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab +DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp + +DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb +DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp + +DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb +DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp + +### Decimal Floating-Point Conversion Instructions + +DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc +DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc + +DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc +DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc + +DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc +DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc +DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb + +DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc +DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc +DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp + +### Decimal Floating-Point Format Instructions + +DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc +DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc + +DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc +DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc + +DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc +DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc + +DIEX 111011 ..... ..... ..... 1101100010 . @X_rc +DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc + +DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc +DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc + +DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc +DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc + +## Vector Exclusive-OR-based Instructions + +VPMSUMD 000100 ..... ..... ..... 10011001000 @VX + +## Vector Integer Instructions + +VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC +VCMPEQUH 000100 ..... ..... ..... . 0001000110 @VC +VCMPEQUW 000100 ..... ..... ..... . 0010000110 @VC +VCMPEQUD 000100 ..... ..... ..... . 0011000111 @VC +VCMPEQUQ 000100 ..... ..... ..... . 0111000111 @VC + +VCMPGTSB 000100 ..... ..... ..... . 1100000110 @VC +VCMPGTSH 000100 ..... ..... ..... . 1101000110 @VC +VCMPGTSW 000100 ..... ..... ..... . 1110000110 @VC +VCMPGTSD 000100 ..... ..... ..... . 1111000111 @VC +VCMPGTSQ 000100 ..... ..... ..... . 1110000111 @VC + +VCMPGTUB 000100 ..... ..... ..... . 1000000110 @VC +VCMPGTUH 000100 ..... ..... ..... . 1001000110 @VC +VCMPGTUW 000100 ..... ..... ..... . 1010000110 @VC +VCMPGTUD 000100 ..... ..... ..... . 1011000111 @VC +VCMPGTUQ 000100 ..... ..... ..... . 1010000111 @VC + +VCMPNEB 000100 ..... ..... ..... . 0000000111 @VC +VCMPNEH 000100 ..... ..... ..... . 0001000111 @VC +VCMPNEW 000100 ..... ..... ..... . 0010000111 @VC + +VCMPNEZB 000100 ..... ..... ..... . 0100000111 @VC +VCMPNEZH 000100 ..... ..... ..... . 0101000111 @VC +VCMPNEZW 000100 ..... ..... ..... . 0110000111 @VC + +VCMPSQ 000100 ... -- ..... ..... 00101000001 @VX_bf +VCMPUQ 000100 ... -- ..... ..... 00100000001 @VX_bf + +## Vector Integer Average Instructions + +VAVGSB 000100 ..... ..... ..... 10100000010 @VX +VAVGSH 000100 ..... ..... ..... 10101000010 @VX +VAVGSW 000100 ..... ..... ..... 10110000010 @VX +VAVGUB 000100 ..... ..... ..... 10000000010 @VX +VAVGUH 000100 ..... ..... ..... 10001000010 @VX +VAVGUW 000100 ..... ..... ..... 10010000010 @VX + +## Vector Integer Absolute Difference Instructions + +VABSDUB 000100 ..... ..... ..... 10000000011 @VX +VABSDUH 000100 ..... ..... ..... 10001000011 @VX +VABSDUW 000100 ..... ..... ..... 10010000011 @VX + ## Vector Bit Manipulation Instruction +VGNB 000100 ..... -- ... ..... 10011001100 @VX_n + VCFUGED 000100 ..... ..... ..... 10101001101 @VX +VCLZDM 000100 ..... ..... ..... 11110000100 @VX +VCTZDM 000100 ..... ..... ..... 11111000100 @VX +VPDEPD 000100 ..... ..... ..... 10111001101 @VX +VPEXTD 000100 ..... ..... ..... 10110001101 @VX + +VPRTYBD 000100 ..... 01001 ..... 11000000010 @VX_tb +VPRTYBQ 000100 ..... 01010 ..... 11000000010 @VX_tb +VPRTYBW 000100 ..... 01000 ..... 11000000010 @VX_tb + +## Vector Permute and Formatting Instruction + +VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA +VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA +VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA +VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA +VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA +VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA +VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA +VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA + +VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4 +VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4 +VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4 +VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4 + +VINSBLX 000100 ..... ..... ..... 01000001111 @VX +VINSBRX 000100 ..... ..... ..... 01100001111 @VX +VINSHLX 000100 ..... ..... ..... 01001001111 @VX +VINSHRX 000100 ..... ..... ..... 01101001111 @VX +VINSWLX 000100 ..... ..... ..... 01010001111 @VX +VINSWRX 000100 ..... ..... ..... 01110001111 @VX +VINSDLX 000100 ..... ..... ..... 01011001111 @VX +VINSDRX 000100 ..... ..... ..... 01111001111 @VX + +VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4 +VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4 + +VINSBVLX 000100 ..... ..... ..... 00000001111 @VX +VINSBVRX 000100 ..... ..... ..... 00100001111 @VX +VINSHVLX 000100 ..... ..... ..... 00001001111 @VX +VINSHVRX 000100 ..... ..... ..... 00101001111 @VX +VINSWVLX 000100 ..... ..... ..... 00010001111 @VX +VINSWVRX 000100 ..... ..... ..... 00110001111 @VX + +VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN +VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN + +VPERM 000100 ..... ..... ..... ..... 101011 @VA +VPERMR 000100 ..... ..... ..... ..... 111011 @VA + +VSEL 000100 ..... ..... ..... ..... 101010 @VA + +## Vector Integer Shift Instruction + +VSLB 000100 ..... ..... ..... 00100000100 @VX +VSLH 000100 ..... ..... ..... 00101000100 @VX +VSLW 000100 ..... ..... ..... 00110000100 @VX +VSLD 000100 ..... ..... ..... 10111000100 @VX +VSLQ 000100 ..... ..... ..... 00100000101 @VX + +VSRB 000100 ..... ..... ..... 01000000100 @VX +VSRH 000100 ..... ..... ..... 01001000100 @VX +VSRW 000100 ..... ..... ..... 01010000100 @VX +VSRD 000100 ..... ..... ..... 11011000100 @VX +VSRQ 000100 ..... ..... ..... 01000000101 @VX + +VSRAB 000100 ..... ..... ..... 01100000100 @VX +VSRAH 000100 ..... ..... ..... 01101000100 @VX +VSRAW 000100 ..... ..... ..... 01110000100 @VX +VSRAD 000100 ..... ..... ..... 01111000100 @VX +VSRAQ 000100 ..... ..... ..... 01100000101 @VX + +VRLB 000100 ..... ..... ..... 00000000100 @VX +VRLH 000100 ..... ..... ..... 00001000100 @VX +VRLW 000100 ..... ..... ..... 00010000100 @VX +VRLD 000100 ..... ..... ..... 00011000100 @VX +VRLQ 000100 ..... ..... ..... 00000000101 @VX + +VRLWMI 000100 ..... ..... ..... 00010000101 @VX +VRLDMI 000100 ..... ..... ..... 00011000101 @VX +VRLQMI 000100 ..... ..... ..... 00001000101 @VX + +VRLWNM 000100 ..... ..... ..... 00110000101 @VX +VRLDNM 000100 ..... ..... ..... 00111000101 @VX +VRLQNM 000100 ..... ..... ..... 00101000101 @VX + +## Vector Integer Arithmetic Instructions + +VADDCUW 000100 ..... ..... ..... 00110000000 @VX +VADDCUQ 000100 ..... ..... ..... 00101000000 @VX +VADDUQM 000100 ..... ..... ..... 00100000000 @VX + +VADDEUQM 000100 ..... ..... ..... ..... 111100 @VA +VADDECUQ 000100 ..... ..... ..... ..... 111101 @VA + +VSUBCUW 000100 ..... ..... ..... 10110000000 @VX +VSUBCUQ 000100 ..... ..... ..... 10101000000 @VX +VSUBUQM 000100 ..... ..... ..... 10100000000 @VX + +VSUBECUQ 000100 ..... ..... ..... ..... 111111 @VA +VSUBEUQM 000100 ..... ..... ..... ..... 111110 @VA + +VEXTSB2W 000100 ..... 10000 ..... 11000000010 @VX_tb +VEXTSH2W 000100 ..... 10001 ..... 11000000010 @VX_tb +VEXTSB2D 000100 ..... 11000 ..... 11000000010 @VX_tb +VEXTSH2D 000100 ..... 11001 ..... 11000000010 @VX_tb +VEXTSW2D 000100 ..... 11010 ..... 11000000010 @VX_tb +VEXTSD2Q 000100 ..... 11011 ..... 11000000010 @VX_tb + +VNEGD 000100 ..... 00111 ..... 11000000010 @VX_tb +VNEGW 000100 ..... 00110 ..... 11000000010 @VX_tb + +## Vector Mask Manipulation Instructions + +MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb +MTVSRHM 000100 ..... 10001 ..... 11001000010 @VX_tb +MTVSRWM 000100 ..... 10010 ..... 11001000010 @VX_tb +MTVSRDM 000100 ..... 10011 ..... 11001000010 @VX_tb +MTVSRQM 000100 ..... 10100 ..... 11001000010 @VX_tb +MTVSRBMI 000100 ..... ..... .......... 01010 . @DX_b + +VEXPANDBM 000100 ..... 00000 ..... 11001000010 @VX_tb +VEXPANDHM 000100 ..... 00001 ..... 11001000010 @VX_tb +VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb +VEXPANDDM 000100 ..... 00011 ..... 11001000010 @VX_tb +VEXPANDQM 000100 ..... 00100 ..... 11001000010 @VX_tb + +VEXTRACTBM 000100 ..... 01000 ..... 11001000010 @VX_tb +VEXTRACTHM 000100 ..... 01001 ..... 11001000010 @VX_tb +VEXTRACTWM 000100 ..... 01010 ..... 11001000010 @VX_tb +VEXTRACTDM 000100 ..... 01011 ..... 11001000010 @VX_tb +VEXTRACTQM 000100 ..... 01100 ..... 11001000010 @VX_tb + +VCNTMBB 000100 ..... 1100 . ..... 11001000010 @VX_mp +VCNTMBH 000100 ..... 1101 . ..... 11001000010 @VX_mp +VCNTMBW 000100 ..... 1110 . ..... 11001000010 @VX_mp +VCNTMBD 000100 ..... 1111 . ..... 11001000010 @VX_mp + +## Vector Multiply Instruction + +VMULESB 000100 ..... ..... ..... 01100001000 @VX +VMULOSB 000100 ..... ..... ..... 00100001000 @VX +VMULEUB 000100 ..... ..... ..... 01000001000 @VX +VMULOUB 000100 ..... ..... ..... 00000001000 @VX + +VMULESH 000100 ..... ..... ..... 01101001000 @VX +VMULOSH 000100 ..... ..... ..... 00101001000 @VX +VMULEUH 000100 ..... ..... ..... 01001001000 @VX +VMULOUH 000100 ..... ..... ..... 00001001000 @VX + +VMULESW 000100 ..... ..... ..... 01110001000 @VX +VMULOSW 000100 ..... ..... ..... 00110001000 @VX +VMULEUW 000100 ..... ..... ..... 01010001000 @VX +VMULOUW 000100 ..... ..... ..... 00010001000 @VX + +VMULESD 000100 ..... ..... ..... 01111001000 @VX +VMULOSD 000100 ..... ..... ..... 00111001000 @VX +VMULEUD 000100 ..... ..... ..... 01011001000 @VX +VMULOUD 000100 ..... ..... ..... 00011001000 @VX + +VMULHSW 000100 ..... ..... ..... 01110001001 @VX +VMULHUW 000100 ..... ..... ..... 01010001001 @VX +VMULHSD 000100 ..... ..... ..... 01111001001 @VX +VMULHUD 000100 ..... ..... ..... 01011001001 @VX +VMULLD 000100 ..... ..... ..... 00111001001 @VX + +## Vector Multiply-Sum Instructions + +VMSUMUBM 000100 ..... ..... ..... ..... 100100 @VA +VMSUMMBM 000100 ..... ..... ..... ..... 100101 @VA +VMSUMSHM 000100 ..... ..... ..... ..... 101000 @VA +VMSUMSHS 000100 ..... ..... ..... ..... 101001 @VA +VMSUMUHM 000100 ..... ..... ..... ..... 100110 @VA +VMSUMUHS 000100 ..... ..... ..... ..... 100111 @VA + +VMSUMCUD 000100 ..... ..... ..... ..... 010111 @VA +VMSUMUDM 000100 ..... ..... ..... ..... 100011 @VA + +VMLADDUHM 000100 ..... ..... ..... ..... 100010 @VA +VMHADDSHS 000100 ..... ..... ..... ..... 100000 @VA +VMHRADDSHS 000100 ..... ..... ..... ..... 100001 @VA + +## Vector String Instructions + +VSTRIBL 000100 ..... 00000 ..... . 0000001101 @VX_tb_rc +VSTRIBR 000100 ..... 00001 ..... . 0000001101 @VX_tb_rc +VSTRIHL 000100 ..... 00010 ..... . 0000001101 @VX_tb_rc +VSTRIHR 000100 ..... 00011 ..... . 0000001101 @VX_tb_rc + +VCLRLB 000100 ..... ..... ..... 00110001101 @VX +VCLRRB 000100 ..... ..... ..... 00111001101 @VX + +# VSX Load/Store Instructions + +LXSD 111001 ..... ..... .............. 10 @DS +STXSD 111101 ..... ..... .............. 10 @DS +LXSSP 111001 ..... ..... .............. 11 @DS +STXSSP 111101 ..... ..... .............. 11 @DS +LXV 111101 ..... ..... ............ . 001 @DQ_TSX +STXV 111101 ..... ..... ............ . 101 @DQ_TSX +LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP +STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP +LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX +STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX +LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP +STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP +LXVRBX 011111 ..... ..... ..... 0000001101 . @X_TSX +LXVRHX 011111 ..... ..... ..... 0000101101 . @X_TSX +LXVRWX 011111 ..... ..... ..... 0001001101 . @X_TSX +LXVRDX 011111 ..... ..... ..... 0001101101 . @X_TSX +STXVRBX 011111 ..... ..... ..... 0010001101 . @X_TSX +STXVRHX 011111 ..... ..... ..... 0010101101 . @X_TSX +STXVRWX 011111 ..... ..... ..... 0011001101 . @X_TSX +STXVRDX 011111 ..... ..... ..... 0011101101 . @X_TSX + +## VSX Vector Binary Floating-Point Sign Manipulation Instructions + +XVABSDP 111100 ..... 00000 ..... 111011001 .. @XX2 +XVABSSP 111100 ..... 00000 ..... 110011001 .. @XX2 +XVNABSDP 111100 ..... 00000 ..... 111101001 .. @XX2 +XVNABSSP 111100 ..... 00000 ..... 110101001 .. @XX2 +XVNEGDP 111100 ..... 00000 ..... 111111001 .. @XX2 +XVNEGSP 111100 ..... 00000 ..... 110111001 .. @XX2 +XVCPSGNDP 111100 ..... ..... ..... 11110000 ... @XX3 +XVCPSGNSP 111100 ..... ..... ..... 11010000 ... @XX3 + +## VSX Scalar Multiply-Add Instructions + +XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3 +XSMADDMDP 111100 ..... ..... ..... 00101001 . . . @XX3 +XSMADDASP 111100 ..... ..... ..... 00000001 . . . @XX3 +XSMADDMSP 111100 ..... ..... ..... 00001001 . . . @XX3 +XSMADDQP 111111 ..... ..... ..... 0110000100 . @X_rc + +XSMSUBADP 111100 ..... ..... ..... 00110001 . . . @XX3 +XSMSUBMDP 111100 ..... ..... ..... 00111001 . . . @XX3 +XSMSUBASP 111100 ..... ..... ..... 00010001 . . . @XX3 +XSMSUBMSP 111100 ..... ..... ..... 00011001 . . . @XX3 +XSMSUBQP 111111 ..... ..... ..... 0110100100 . @X_rc + +XSNMADDASP 111100 ..... ..... ..... 10000001 . . . @XX3 +XSNMADDMSP 111100 ..... ..... ..... 10001001 . . . @XX3 +XSNMADDADP 111100 ..... ..... ..... 10100001 . . . @XX3 +XSNMADDMDP 111100 ..... ..... ..... 10101001 . . . @XX3 +XSNMADDQP 111111 ..... ..... ..... 0111000100 . @X_rc + +XSNMSUBASP 111100 ..... ..... ..... 10010001 . . . @XX3 +XSNMSUBMSP 111100 ..... ..... ..... 10011001 . . . @XX3 +XSNMSUBADP 111100 ..... ..... ..... 10110001 . . . @XX3 +XSNMSUBMDP 111100 ..... ..... ..... 10111001 . . . @XX3 +XSNMSUBQP 111111 ..... ..... ..... 0111100100 . @X_rc + +## VSX splat instruction + +XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8 +XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2_uim2 + +## VSX Permute Instructions + +XXEXTRACTUW 111100 ..... - .... ..... 010100101 .. @XX2_uim4 +XXINSERTW 111100 ..... - .... ..... 010110101 .. @XX2_uim4 + +XXPERM 111100 ..... ..... ..... 00011010 ... @XX3 +XXPERMR 111100 ..... ..... ..... 00111010 ... @XX3 +XXPERMDI 111100 ..... ..... ..... 0 .. 01010 ... @XX3_dm + +XXSEL 111100 ..... ..... ..... ..... 11 .... @XX4 + +## VSX Vector Generate PCV + +XXGENPCVBM 111100 ..... ..... ..... 1110010100 . @X_imm5 +XXGENPCVHM 111100 ..... ..... ..... 1110010101 . @X_imm5 +XXGENPCVWM 111100 ..... ..... ..... 1110110100 . @X_imm5 +XXGENPCVDM 111100 ..... ..... ..... 1110110101 . @X_imm5 + +## VSX Vector Load Special Value Instruction + +LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5 + +## VSX Comparison Instructions + +XSMAXCDP 111100 ..... ..... ..... 10000000 ... @XX3 +XSMINCDP 111100 ..... ..... ..... 10001000 ... @XX3 +XSMAXJDP 111100 ..... ..... ..... 10010000 ... @XX3 +XSMINJDP 111100 ..... ..... ..... 10011000 ... @XX3 +XSMAXCQP 111111 ..... ..... ..... 1010100100 - @X +XSMINCQP 111111 ..... ..... ..... 1011100100 - @X + +XSCMPEQDP 111100 ..... ..... ..... 00000011 ... @XX3 +XSCMPGEDP 111100 ..... ..... ..... 00010011 ... @XX3 +XSCMPGTDP 111100 ..... ..... ..... 00001011 ... @XX3 +XSCMPEQQP 111111 ..... ..... ..... 0001000100 - @X +XSCMPGEQP 111111 ..... ..... ..... 0011000100 - @X +XSCMPGTQP 111111 ..... ..... ..... 0011100100 - @X + +## VSX Binary Floating-Point Convert Instructions + +XSCVQPDP 111111 ..... 10100 ..... 1101000100 . @X_tb_rc +XSCVQPUQZ 111111 ..... 00000 ..... 1101000100 - @X_tb +XSCVQPSQZ 111111 ..... 01000 ..... 1101000100 - @X_tb +XSCVUQQP 111111 ..... 00011 ..... 1101000100 - @X_tb +XSCVSQQP 111111 ..... 01011 ..... 1101000100 - @X_tb +XVCVBF16SPN 111100 ..... 10000 ..... 111011011 .. @XX2 +XVCVSPBF16 111100 ..... 10001 ..... 111011011 .. @XX2 +XSCVSPDPN 111100 ..... ----- ..... 101001011 .. @XX2 + +## VSX Binary Floating-Point Math Support Instructions + +XVXSIGSP 111100 ..... 01001 ..... 111011011 .. @XX2 +XVTSTDCDP 111100 ..... ..... ..... 1111 . 101 ... @XX2_uim7 +XVTSTDCSP 111100 ..... ..... ..... 1101 . 101 ... @XX2_uim7 +XSTSTDCSP 111100 ... ....... ..... 100101010 . - @XX2_bf_uim xb=%xx_xb +XSTSTDCDP 111100 ... ....... ..... 101101010 . - @XX2_bf_uim xb=%xx_xb +XSTSTDCQP 111111 ... ....... xb:5 1011000100 - @XX2_bf_uim + +## VSX Vector Test Least-Significant Bit by Byte Instruction + +XVTLSBB 111100 ... -- 00010 ..... 111011011 . - @XX2_bf_xb + +### rfebb +&XL_s s:uint8_t +@XL_s ......-------------- s:1 .......... - &XL_s +RFEBB 010011-------------- . 0010010010 - @XL_s + +## Accumulator Instructions + +XXMFACC 011111 ... -- 00000 ----- 0010110001 - @X_a +XXMTACC 011111 ... -- 00001 ----- 0010110001 - @X_a +XXSETACCZ 011111 ... -- 00011 ----- 0010110001 - @X_a + +## VSX GER instruction + +XVI4GER8 111011 ... -- ..... ..... 00100011 ..- @XX3_at xa=%xx_xa +XVI4GER8PP 111011 ... -- ..... ..... 00100010 ..- @XX3_at xa=%xx_xa +XVI8GER4 111011 ... -- ..... ..... 00000011 ..- @XX3_at xa=%xx_xa +XVI8GER4PP 111011 ... -- ..... ..... 00000010 ..- @XX3_at xa=%xx_xa +XVI16GER2 111011 ... -- ..... ..... 01001011 ..- @XX3_at xa=%xx_xa +XVI16GER2PP 111011 ... -- ..... ..... 01101011 ..- @XX3_at xa=%xx_xa +XVI8GER4SPP 111011 ... -- ..... ..... 01100011 ..- @XX3_at xa=%xx_xa +XVI16GER2S 111011 ... -- ..... ..... 00101011 ..- @XX3_at xa=%xx_xa +XVI16GER2SPP 111011 ... -- ..... ..... 00101010 ..- @XX3_at xa=%xx_xa + +XVBF16GER2 111011 ... -- ..... ..... 00110011 ..- @XX3_at xa=%xx_xa +XVBF16GER2PP 111011 ... -- ..... ..... 00110010 ..- @XX3_at xa=%xx_xa +XVBF16GER2PN 111011 ... -- ..... ..... 10110010 ..- @XX3_at xa=%xx_xa +XVBF16GER2NP 111011 ... -- ..... ..... 01110010 ..- @XX3_at xa=%xx_xa +XVBF16GER2NN 111011 ... -- ..... ..... 11110010 ..- @XX3_at xa=%xx_xa + +XVF16GER2 111011 ... -- ..... ..... 00010011 ..- @XX3_at xa=%xx_xa +XVF16GER2PP 111011 ... -- ..... ..... 00010010 ..- @XX3_at xa=%xx_xa +XVF16GER2PN 111011 ... -- ..... ..... 10010010 ..- @XX3_at xa=%xx_xa +XVF16GER2NP 111011 ... -- ..... ..... 01010010 ..- @XX3_at xa=%xx_xa +XVF16GER2NN 111011 ... -- ..... ..... 11010010 ..- @XX3_at xa=%xx_xa + +XVF32GER 111011 ... -- ..... ..... 00011011 ..- @XX3_at xa=%xx_xa +XVF32GERPP 111011 ... -- ..... ..... 00011010 ..- @XX3_at xa=%xx_xa +XVF32GERPN 111011 ... -- ..... ..... 10011010 ..- @XX3_at xa=%xx_xa +XVF32GERNP 111011 ... -- ..... ..... 01011010 ..- @XX3_at xa=%xx_xa +XVF32GERNN 111011 ... -- ..... ..... 11011010 ..- @XX3_at xa=%xx_xa + +XVF64GER 111011 ... -- .... 0 ..... 00111011 ..- @XX3_at xa=%xx_xa_pair +XVF64GERPP 111011 ... -- .... 0 ..... 00111010 ..- @XX3_at xa=%xx_xa_pair +XVF64GERPN 111011 ... -- .... 0 ..... 10111010 ..- @XX3_at xa=%xx_xa_pair +XVF64GERNP 111011 ... -- .... 0 ..... 01111010 ..- @XX3_at xa=%xx_xa_pair +XVF64GERNN 111011 ... -- .... 0 ..... 11111010 ..- @XX3_at xa=%xx_xa_pair + +## Vector Division Instructions + +VDIVSW 000100 ..... ..... ..... 00110001011 @VX +VDIVUW 000100 ..... ..... ..... 00010001011 @VX +VDIVSD 000100 ..... ..... ..... 00111001011 @VX +VDIVUD 000100 ..... ..... ..... 00011001011 @VX +VDIVSQ 000100 ..... ..... ..... 00100001011 @VX +VDIVUQ 000100 ..... ..... ..... 00000001011 @VX + +VDIVESW 000100 ..... ..... ..... 01110001011 @VX +VDIVEUW 000100 ..... ..... ..... 01010001011 @VX +VDIVESD 000100 ..... ..... ..... 01111001011 @VX +VDIVEUD 000100 ..... ..... ..... 01011001011 @VX +VDIVESQ 000100 ..... ..... ..... 01100001011 @VX +VDIVEUQ 000100 ..... ..... ..... 01000001011 @VX + +VMODSW 000100 ..... ..... ..... 11110001011 @VX +VMODUW 000100 ..... ..... ..... 11010001011 @VX +VMODSD 000100 ..... ..... ..... 11111001011 @VX +VMODUD 000100 ..... ..... ..... 11011001011 @VX +VMODSQ 000100 ..... ..... ..... 11100001011 @VX +VMODUQ 000100 ..... ..... ..... 11000001011 @VX + +## SLB Management Instructions + +SLBIE 011111 ----- ----- ..... 0110110010 - @X_rb +SLBIEG 011111 ..... ----- ..... 0111010010 - @X_tb + +SLBIA 011111 --... ----- ----- 0111110010 - @X_ih +SLBIAG 011111 ..... ----. ----- 1101010010 - @X_rs_l + +SLBMTE 011111 ..... ----- ..... 0110010010 - @X_tb + +SLBMFEV 011111 ..... ----- ..... 1101010011 - @X_tb +SLBMFEE 011111 ..... ----- ..... 1110010011 - @X_tb + +SLBFEE 011111 ..... ----- ..... 1111010011 1 @X_tb + +SLBSYNC 011111 ----- ----- ----- 0101010010 - + +## TLB Management Instructions + +&X_tlbie rb rs ric prs:bool r:bool +@X_tlbie ...... rs:5 - ric:2 prs:1 r:1 rb:5 .......... - &X_tlbie + +TLBIE 011111 ..... - .. . . ..... 0100110010 - @X_tlbie +TLBIEL 011111 ..... - .. . . ..... 0100010010 - @X_tlbie + +# Processor Control Instructions + +MSGCLR 011111 ----- ----- ..... 0011101110 - @X_rb +MSGSND 011111 ----- ----- ..... 0011001110 - @X_rb +MSGCLRP 011111 ----- ----- ..... 0010101110 - @X_rb +MSGSNDP 011111 ----- ----- ..... 0010001110 - @X_rb +MSGSYNC 011111 ----- ----- ----- 1101110110 - diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode index 72c5944a53..de115c1943 100644 --- a/target/ppc/insn64.decode +++ b/target/ppc/insn64.decode @@ -23,6 +23,64 @@ @PLS_D ...... .. ... r:1 .. .................. \ ...... rt:5 ra:5 ................ \ &PLS_D si=%pls_si +@8LS_D_TSX ...... .. . .. r:1 .. .................. \ + ..... rt:6 ra:5 ................ \ + &PLS_D si=%pls_si + +%rt_tsxp 21:1 22:4 !function=times_2 +@8LS_D_TSXP ...... .. . .. r:1 .. .................. \ + ...... ..... ra:5 ................ \ + &PLS_D si=%pls_si rt=%rt_tsxp + +@8LS_D ...... .. . .. r:1 .. .................. \ + ...... rt:5 ra:5 ................ \ + &PLS_D si=%pls_si + +# Format 8RR:D +%8rr_si 32:s16 0:16 +%8rr_xt 16:1 21:5 +&8RR_D_IX xt ix si +@8RR_D_IX ...... .. .... .. .. ................ \ + ...... ..... ... ix:1 . ................ \ + &8RR_D_IX si=%8rr_si xt=%8rr_xt +&8RR_D xt si:int32_t +@8RR_D ...... .. .... .. .. ................ \ + ...... ..... .... . ................ \ + &8RR_D si=%8rr_si xt=%8rr_xt + +# Format 8RR:XX4 +%8rr_xx_xt 0:1 21:5 +%8rr_xx_xa 2:1 16:5 +%8rr_xx_xb 1:1 11:5 +%8rr_xx_xc 3:1 6:5 +&8RR_XX4 xt xa xb xc +@8RR_XX4 ........ ........ ........ ........ \ + ...... ..... ..... ..... ..... .. .... \ + &8RR_XX4 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc + +&8RR_XX4_imm xt xa xb xc imm +@8RR_XX4_imm ........ ........ ........ imm:8 \ + ...... ..... ..... ..... ..... .. .... \ + &8RR_XX4_imm xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc + +&8RR_XX4_uim3 xt xa xb xc uim3 +@8RR_XX4_uim3 ...... .. .... .. ............... uim3:3 \ + ...... ..... ..... ..... ..... .. .... \ + &8RR_XX4_uim3 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc + +# Format MMIRR:XX3 +&MMIRR_XX3 !extern xa xb xt pmsk xmsk ymsk +%xx3_xa 2:1 16:5 +%xx3_xb 1:1 11:5 +%xx3_at 23:3 +%xx3_xa_pair 2:1 17:4 !function=times_2 +@MMIRR_XX3 ...... .. .... .. . . ........ xmsk:4 ymsk:4 \ + ...... ... .. ..... ..... ........ ... \ + &MMIRR_XX3 xa=%xx3_xa xb=%xx3_xb xt=%xx3_at + +@MMIRR_XX3_NO_P ...... .. .... .. . . ........ xmsk:4 .... \ + ...... ... .. ..... ..... ........ ... \ + &MMIRR_XX3 xb=%xx3_xb xt=%xx3_at pmsk=1 ### Fixed-Point Load Instructions @@ -38,6 +96,8 @@ PLWA 000001 00 0--.-- .................. \ 101001 ..... ..... ................ @PLS_D PLD 000001 00 0--.-- .................. \ 111001 ..... ..... ................ @PLS_D +PLQ 000001 00 0--.-- .................. \ + 111000 ..... ..... ................ @PLS_D ### Fixed-Point Store Instructions @@ -50,12 +110,90 @@ PSTH 000001 10 0--.-- .................. \ PSTD 000001 00 0--.-- .................. \ 111101 ..... ..... ................ @PLS_D +PSTQ 000001 00 0--.-- .................. \ + 111100 ..... ..... ................ @PLS_D ### Fixed-Point Arithmetic Instructions PADDI 000001 10 0--.-- .................. \ 001110 ..... ..... ................ @PLS_D +### Float-Point Load and Store Instructions + +PLFS 000001 10 0--.-- .................. \ + 110000 ..... ..... ................ @PLS_D +PLFD 000001 10 0--.-- .................. \ + 110010 ..... ..... ................ @PLS_D +PSTFS 000001 10 0--.-- .................. \ + 110100 ..... ..... ................ @PLS_D +PSTFD 000001 10 0--.-- .................. \ + 110110 ..... ..... ................ @PLS_D + +## VSX GER instruction + +PMXVI4GER8 000001 11 1001 -- - - pmsk:8 ........ \ + 111011 ... -- ..... ..... 00100011 ..- @MMIRR_XX3 +PMXVI4GER8PP 000001 11 1001 -- - - pmsk:8 ........ \ + 111011 ... -- ..... ..... 00100010 ..- @MMIRR_XX3 +PMXVI8GER4 000001 11 1001 -- - - pmsk:4 ---- ........ \ + 111011 ... -- ..... ..... 00000011 ..- @MMIRR_XX3 +PMXVI8GER4PP 000001 11 1001 -- - - pmsk:4 ---- ........ \ + 111011 ... -- ..... ..... 00000010 ..- @MMIRR_XX3 +PMXVI16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 01001011 ..- @MMIRR_XX3 +PMXVI16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 01101011 ..- @MMIRR_XX3 +PMXVI8GER4SPP 000001 11 1001 -- - - pmsk:4 ---- ........ \ + 111011 ... -- ..... ..... 01100011 ..- @MMIRR_XX3 +PMXVI16GER2S 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00101011 ..- @MMIRR_XX3 +PMXVI16GER2SPP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00101010 ..- @MMIRR_XX3 + +PMXVBF16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00110011 ..- @MMIRR_XX3 +PMXVBF16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00110010 ..- @MMIRR_XX3 +PMXVBF16GER2PN 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 10110010 ..- @MMIRR_XX3 +PMXVBF16GER2NP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 01110010 ..- @MMIRR_XX3 +PMXVBF16GER2NN 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 11110010 ..- @MMIRR_XX3 + +PMXVF16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00010011 ..- @MMIRR_XX3 +PMXVF16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 00010010 ..- @MMIRR_XX3 +PMXVF16GER2PN 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 10010010 ..- @MMIRR_XX3 +PMXVF16GER2NP 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 01010010 ..- @MMIRR_XX3 +PMXVF16GER2NN 000001 11 1001 -- - - pmsk:2 ------ ........ \ + 111011 ... -- ..... ..... 11010010 ..- @MMIRR_XX3 + +PMXVF32GER 000001 11 1001 -- - - -------- .... ymsk:4 \ + 111011 ... -- ..... ..... 00011011 ..- @MMIRR_XX3_NO_P xa=%xx3_xa +PMXVF32GERPP 000001 11 1001 -- - - -------- .... ymsk:4 \ + 111011 ... -- ..... ..... 00011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa +PMXVF32GERPN 000001 11 1001 -- - - -------- .... ymsk:4 \ + 111011 ... -- ..... ..... 10011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa +PMXVF32GERNP 000001 11 1001 -- - - -------- .... ymsk:4 \ + 111011 ... -- ..... ..... 01011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa +PMXVF32GERNN 000001 11 1001 -- - - -------- .... ymsk:4 \ + 111011 ... -- ..... ..... 11011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa + +PMXVF64GER 000001 11 1001 -- - - -------- .... ymsk:2 -- \ + 111011 ... -- ....0 ..... 00111011 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair +PMXVF64GERPP 000001 11 1001 -- - - -------- .... ymsk:2 -- \ + 111011 ... -- ....0 ..... 00111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair +PMXVF64GERPN 000001 11 1001 -- - - -------- .... ymsk:2 -- \ + 111011 ... -- ....0 ..... 10111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair +PMXVF64GERNP 000001 11 1001 -- - - -------- .... ymsk:2 -- \ + 111011 ... -- ....0 ..... 01111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair +PMXVF64GERNN 000001 11 1001 -- - - -------- .... ymsk:2 -- \ + 111011 ... -- ....0 ..... 11111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair + ### Prefixed No-operation Instruction @PNOP 000001 11 0000-- 000000000000000000 \ @@ -122,3 +260,48 @@ PADDI 000001 10 0--.-- .................. \ PNOP ................................ \ -------------------------------- @PNOP } + +### VSX instructions + +PLXSD 000001 00 0--.-- .................. \ + 101010 ..... ..... ................ @8LS_D + +PSTXSD 000001 00 0--.-- .................. \ + 101110 ..... ..... ................ @8LS_D + +PLXSSP 000001 00 0--.-- .................. \ + 101011 ..... ..... ................ @8LS_D + +PSTXSSP 000001 00 0--.-- .................. \ + 101111 ..... ..... ................ @8LS_D + +PLXV 000001 00 0--.-- .................. \ + 11001 ...... ..... ................ @8LS_D_TSX +PSTXV 000001 00 0--.-- .................. \ + 11011 ...... ..... ................ @8LS_D_TSX +PLXVP 000001 00 0--.-- .................. \ + 111010 ..... ..... ................ @8LS_D_TSXP +PSTXVP 000001 00 0--.-- .................. \ + 111110 ..... ..... ................ @8LS_D_TSXP + +XXEVAL 000001 01 0000 -- ---------- ........ \ + 100010 ..... ..... ..... ..... 01 .... @8RR_XX4_imm + +XXSPLTIDP 000001 01 0000 -- -- ................ \ + 100000 ..... 0010 . ................ @8RR_D +XXSPLTIW 000001 01 0000 -- -- ................ \ + 100000 ..... 0011 . ................ @8RR_D +XXSPLTI32DX 000001 01 0000 -- -- ................ \ + 100000 ..... 000 .. ................ @8RR_D_IX + +XXBLENDVD 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 11 .... @8RR_XX4 +XXBLENDVW 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 10 .... @8RR_XX4 +XXBLENDVH 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 01 .... @8RR_XX4 +XXBLENDVB 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 00 .... @8RR_XX4 + +XXPERMX 000001 01 0000 -- --------------- ... \ + 100010 ..... ..... ..... ..... 00 .... @8RR_XX4_uim3 diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index efa833ef64..d97a7f1f28 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -28,6 +28,7 @@ #include "fpu/softfloat.h" #include "qapi/error.h" #include "qemu/guest-random.h" +#include "tcg/tcg-gvec-desc.h" #include "helper_regs.h" /*****************************************************************************/ @@ -36,9 +37,9 @@ static inline void helper_update_ov_legacy(CPUPPCState *env, int ov) { if (unlikely(ov)) { - env->so = env->ov = 1; + env->so = env->ov = env->ov32 = 1; } else { - env->ov = 0; + env->ov = env->ov32 = 0; } } @@ -104,10 +105,11 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) uint64_t rt = 0; int overflow = 0; - overflow = divu128(&rt, &ra, rb); - - if (unlikely(overflow)) { + if (unlikely(rb == 0 || ra >= rb)) { + overflow = 1; rt = 0; /* Undefined */ + } else { + divu128(&rt, &ra, rb); } if (oe) { @@ -119,13 +121,16 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) { - int64_t rt = 0; + uint64_t rt = 0; int64_t ra = (int64_t)rau; int64_t rb = (int64_t)rbu; - int overflow = divs128(&rt, &ra, rb); + int overflow = 0; - if (unlikely(overflow)) { + if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) { + overflow = 1; rt = 0; /* Undefined */ + } else { + divs128(&rt, &ra, rb); } if (oe) { @@ -320,7 +325,7 @@ target_ulong helper_popcntb(target_ulong val) } #endif -uint64_t helper_cfuged(uint64_t src, uint64_t mask) +uint64_t helper_CFUGED(uint64_t src, uint64_t mask) { /* * Instead of processing the mask bit-by-bit from the most significant to @@ -382,96 +387,45 @@ uint64_t helper_cfuged(uint64_t src, uint64_t mask) return left | (right >> n); } -/*****************************************************************************/ -/* PowerPC 601 specific instructions (POWER bridge) */ -target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) +uint64_t helper_PDEPD(uint64_t src, uint64_t mask) { - uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + int i, o; + uint64_t result = 0; - if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = tmp % arg2; - return tmp / (int32_t)arg2; + if (mask == -1) { + return src; } + + for (i = 0; mask != 0; i++) { + o = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; + } + + return result; } -target_ulong helper_divo(CPUPPCState *env, target_ulong arg1, - target_ulong arg2) +uint64_t helper_PEXTD(uint64_t src, uint64_t mask) { - uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + int i, o; + uint64_t result = 0; - if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->so = env->ov = 1; - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = tmp % arg2; - tmp /= (int32_t)arg2; - if ((int32_t)tmp != tmp) { - env->so = env->ov = 1; - } else { - env->ov = 0; - } - return tmp; + if (mask == -1) { + return src; } -} -target_ulong helper_divs(CPUPPCState *env, target_ulong arg1, - target_ulong arg2) -{ - if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; - return (int32_t)arg1 / (int32_t)arg2; + for (o = 0; mask != 0; o++) { + i = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; } -} -target_ulong helper_divso(CPUPPCState *env, target_ulong arg1, - target_ulong arg2) -{ - if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->so = env->ov = 1; - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->ov = 0; - env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; - return (int32_t)arg1 / (int32_t)arg2; - } + return result; } -/*****************************************************************************/ -/* 602 specific instructions */ -/* mfrom is the most crazy instruction ever seen, imho ! */ -/* Real implementation uses a ROM table. Do the same */ -/* - * Extremely decomposed: - * -arg / 256 - * return 256 * log10(10 + 1.0) + 0.5 - */ -#if !defined(CONFIG_USER_ONLY) -target_ulong helper_602_mfrom(target_ulong arg) -{ - if (likely(arg < 602)) { -#include "mfrom_table.c.inc" - return mfrom_ROM_table[arg]; - } else { - return 0; - } -} -#endif - /*****************************************************************************/ /* Altivec extension helpers */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define VECTOR_FOR_INORDER_I(index, element) \ for (index = 0; index < ARRAY_SIZE(r->element); index++) #else @@ -538,40 +492,8 @@ static inline void set_vscr_sat(CPUPPCState *env) env->vscr_sat.u32[0] = 1; } -void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - r->u32[i] = ~a->u32[i] < b->u32[i]; - } -} - -/* vprtybw */ -void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - uint64_t res = b->u32[i] ^ (b->u32[i] >> 16); - res ^= res >> 8; - r->u32[i] = res & 1; - } -} - -/* vprtybd */ -void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->u64); i++) { - uint64_t res = b->u64[i] ^ (b->u64[i] >> 32); - res ^= res >> 16; - res ^= res >> 8; - r->u64[i] = res & 1; - } -} - /* vprtybq */ -void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b) +void helper_VPRTYBQ(ppc_avr_t *r, ppc_avr_t *b, uint32_t v) { uint64_t res = b->u64[0] ^ b->u64[1]; res ^= res >> 32; @@ -648,29 +570,27 @@ VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw) #undef VARITHSAT_SIGNED #undef VARITHSAT_UNSIGNED -#define VAVG_DO(name, element, etype) \ - void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ - r->element[i] = x >> 1; \ - } \ +#define VAVG(name, element, etype) \ + void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ + r->element[i] = x >> 1; \ + } \ } -#define VAVG(type, signed_element, signed_type, unsigned_element, \ - unsigned_type) \ - VAVG_DO(avgs##type, signed_element, signed_type) \ - VAVG_DO(avgu##type, unsigned_element, unsigned_type) -VAVG(b, s8, int16_t, u8, uint16_t) -VAVG(h, s16, int32_t, u16, uint32_t) -VAVG(w, s32, int64_t, u32, uint64_t) -#undef VAVG_DO +VAVG(VAVGSB, s8, int16_t) +VAVG(VAVGUB, u8, uint16_t) +VAVG(VAVGSH, s16, int32_t) +VAVG(VAVGUH, u16, uint32_t) +VAVG(VAVGSW, s32, int64_t) +VAVG(VAVGUW, u32, uint64_t) #undef VAVG -#define VABSDU_DO(name, element) \ -void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ +#define VABSDU(name, element) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\ { \ int i; \ \ @@ -686,12 +606,9 @@ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ * name - instruction mnemonic suffix (b: byte, h: halfword, w: word) * element - element type to access from vector */ -#define VABSDU(type, element) \ - VABSDU_DO(absdu##type, element) -VABSDU(b, u8) -VABSDU(h, u16) -VABSDU(w, u32) -#undef VABSDU_DO +VABSDU(VABSDUB, u8) +VABSDU(VABSDUH, u16) +VABSDU(VABSDUW, u32) #undef VABSDU #define VCF(suffix, cvt, element) \ @@ -709,100 +626,18 @@ VCF(ux, uint32_to_float32, u32) VCF(sx, int32_to_float32, s32) #undef VCF -#define VCMP_DO(suffix, compare, element, record) \ - void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ - ppc_avr_t *a, ppc_avr_t *b) \ - { \ - uint64_t ones = (uint64_t)-1; \ - uint64_t all = ones; \ - uint64_t none = 0; \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - uint64_t result = (a->element[i] compare b->element[i] ? \ - ones : 0x0); \ - switch (sizeof(a->element[0])) { \ - case 8: \ - r->u64[i] = result; \ - break; \ - case 4: \ - r->u32[i] = result; \ - break; \ - case 2: \ - r->u16[i] = result; \ - break; \ - case 1: \ - r->u8[i] = result; \ - break; \ - } \ - all &= result; \ - none |= result; \ - } \ - if (record) { \ - env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ - } \ - } -#define VCMP(suffix, compare, element) \ - VCMP_DO(suffix, compare, element, 0) \ - VCMP_DO(suffix##_dot, compare, element, 1) -VCMP(equb, ==, u8) -VCMP(equh, ==, u16) -VCMP(equw, ==, u32) -VCMP(equd, ==, u64) -VCMP(gtub, >, u8) -VCMP(gtuh, >, u16) -VCMP(gtuw, >, u32) -VCMP(gtud, >, u64) -VCMP(gtsb, >, s8) -VCMP(gtsh, >, s16) -VCMP(gtsw, >, s32) -VCMP(gtsd, >, s64) -#undef VCMP_DO -#undef VCMP - -#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \ -void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \ - ppc_avr_t *a, ppc_avr_t *b) \ -{ \ - etype ones = (etype)-1; \ - etype all = ones; \ - etype result, none = 0; \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - if (cmpzero) { \ - result = ((a->element[i] == 0) \ - || (b->element[i] == 0) \ - || (a->element[i] != b->element[i]) ? \ - ones : 0x0); \ - } else { \ - result = (a->element[i] != b->element[i]) ? ones : 0x0; \ - } \ - r->element[i] = result; \ - all &= result; \ - none |= result; \ - } \ - if (record) { \ - env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ - } \ +#define VCMPNEZ(NAME, ELEM) \ +void helper_##NAME(ppc_vsr_t *t, ppc_vsr_t *a, ppc_vsr_t *b, uint32_t desc) \ +{ \ + for (int i = 0; i < ARRAY_SIZE(t->ELEM); i++) { \ + t->ELEM[i] = ((a->ELEM[i] == 0) || (b->ELEM[i] == 0) || \ + (a->ELEM[i] != b->ELEM[i])) ? -1 : 0; \ + } \ } - -/* - * VCMPNEZ - Vector compare not equal to zero - * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word) - * element - element type to access from vector - */ -#define VCMPNE(suffix, element, etype, cmpzero) \ - VCMPNE_DO(suffix, element, etype, cmpzero, 0) \ - VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1) -VCMPNE(zb, u8, uint8_t, 1) -VCMPNE(zh, u16, uint16_t, 1) -VCMPNE(zw, u32, uint32_t, 1) -VCMPNE(b, u8, uint8_t, 0) -VCMPNE(h, u16, uint16_t, 0) -VCMPNE(w, u32, uint32_t, 0) -#undef VCMPNE_DO -#undef VCMPNE +VCMPNEZ(VCMPNEZB, u8) +VCMPNEZ(VCMPNEZH, u16) +VCMPNEZ(VCMPNEZW, u32) +#undef VCMPNEZ #define VCMPFP_DO(suffix, compare, order, record) \ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ @@ -910,6 +745,137 @@ VCT(uxs, cvtsduw, u32) VCT(sxs, cvtsdsw, s32) #undef VCT +typedef int64_t do_ger(uint32_t, uint32_t, uint32_t); + +static int64_t ger_rank8(uint32_t a, uint32_t b, uint32_t mask) +{ + int64_t psum = 0; + for (int i = 0; i < 8; i++, mask >>= 1) { + if (mask & 1) { + psum += (int64_t)sextract32(a, 4 * i, 4) * sextract32(b, 4 * i, 4); + } + } + return psum; +} + +static int64_t ger_rank4(uint32_t a, uint32_t b, uint32_t mask) +{ + int64_t psum = 0; + for (int i = 0; i < 4; i++, mask >>= 1) { + if (mask & 1) { + psum += sextract32(a, 8 * i, 8) * (int64_t)extract32(b, 8 * i, 8); + } + } + return psum; +} + +static int64_t ger_rank2(uint32_t a, uint32_t b, uint32_t mask) +{ + int64_t psum = 0; + for (int i = 0; i < 2; i++, mask >>= 1) { + if (mask & 1) { + psum += (int64_t)sextract32(a, 16 * i, 16) * + sextract32(b, 16 * i, 16); + } + } + return psum; +} + +static void xviger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, ppc_acc_t *at, + uint32_t mask, bool sat, bool acc, do_ger ger) +{ + uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK), + xmsk = FIELD_EX32(mask, GER_MSK, XMSK), + ymsk = FIELD_EX32(mask, GER_MSK, YMSK); + uint8_t xmsk_bit, ymsk_bit; + int64_t psum; + int i, j; + for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) { + for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) { + if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) { + psum = ger(a->VsrW(i), b->VsrW(j), pmsk); + if (acc) { + psum += at[i].VsrSW(j); + } + if (sat && psum > INT32_MAX) { + set_vscr_sat(env); + at[i].VsrSW(j) = INT32_MAX; + } else if (sat && psum < INT32_MIN) { + set_vscr_sat(env); + at[i].VsrSW(j) = INT32_MIN; + } else { + at[i].VsrSW(j) = (int32_t) psum; + } + } else { + at[i].VsrSW(j) = 0; + } + } + } +} + +QEMU_FLATTEN +void helper_XVI4GER8(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, false, ger_rank8); +} + +QEMU_FLATTEN +void helper_XVI4GER8PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, true, ger_rank8); +} + +QEMU_FLATTEN +void helper_XVI8GER4(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, false, ger_rank4); +} + +QEMU_FLATTEN +void helper_XVI8GER4PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, true, ger_rank4); +} + +QEMU_FLATTEN +void helper_XVI8GER4SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, true, true, ger_rank4); +} + +QEMU_FLATTEN +void helper_XVI16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, false, ger_rank2); +} + +QEMU_FLATTEN +void helper_XVI16GER2S(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, true, false, ger_rank2); +} + +QEMU_FLATTEN +void helper_XVI16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, false, true, ger_rank2); +} + +QEMU_FLATTEN +void helper_XVI16GER2SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, + ppc_acc_t *at, uint32_t mask) +{ + xviger(env, a, b, at, mask, true, true, ger_rank2); +} + target_ulong helper_vclzlsbb(ppc_avr_t *r) { target_ulong count = 0; @@ -936,7 +902,7 @@ target_ulong helper_vctzlsbb(ppc_avr_t *r) return count; } -void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, +void helper_VMHADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int sat = 0; @@ -954,7 +920,7 @@ void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, +void helper_VMHRADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int sat = 0; @@ -971,7 +937,8 @@ void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VMLADDUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c, + uint32_t v) { int i; @@ -1003,8 +970,7 @@ VMRG(w, u32, VsrW) #undef VMRG_DO #undef VMRG -void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, - ppc_avr_t *b, ppc_avr_t *c) +void helper_VMSUMMBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[16]; int i; @@ -1019,8 +985,7 @@ void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, - ppc_avr_t *b, ppc_avr_t *c) +void helper_VMSUMSHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[8]; int i; @@ -1034,7 +999,7 @@ void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, +void helper_VMSUMSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[8]; @@ -1056,8 +1021,7 @@ void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, - ppc_avr_t *b, ppc_avr_t *c) +void helper_VMSUMUBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint16_t prod[16]; int i; @@ -1072,8 +1036,7 @@ void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, - ppc_avr_t *b, ppc_avr_t *c) +void helper_VMSUMUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint32_t prod[8]; int i; @@ -1087,7 +1050,7 @@ void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } } -void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, +void helper_VMSUMUHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint32_t prod[8]; @@ -1110,7 +1073,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } #define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \ - void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ @@ -1121,7 +1084,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } #define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \ - void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ @@ -1132,55 +1095,145 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, } #define VMUL(suffix, mul_element, mul_access, prod_access, cast) \ - VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \ - VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast) -VMUL(sb, s8, VsrSB, VsrSH, int16_t) -VMUL(sh, s16, VsrSH, VsrSW, int32_t) -VMUL(sw, s32, VsrSW, VsrSD, int64_t) -VMUL(ub, u8, VsrB, VsrH, uint16_t) -VMUL(uh, u16, VsrH, VsrW, uint32_t) -VMUL(uw, u32, VsrW, VsrD, uint64_t) + VMUL_DO_EVN(MULE##suffix, mul_element, mul_access, prod_access, cast) \ + VMUL_DO_ODD(MULO##suffix, mul_element, mul_access, prod_access, cast) +VMUL(SB, s8, VsrSB, VsrSH, int16_t) +VMUL(SH, s16, VsrSH, VsrSW, int32_t) +VMUL(SW, s32, VsrSW, VsrSD, int64_t) +VMUL(UB, u8, VsrB, VsrH, uint16_t) +VMUL(UH, u16, VsrH, VsrW, uint32_t) +VMUL(UW, u32, VsrW, VsrD, uint64_t) #undef VMUL_DO_EVN #undef VMUL_DO_ODD #undef VMUL -void helper_vmulhsw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_XXPERMX(ppc_vsr_t *t, ppc_vsr_t *s0, ppc_vsr_t *s1, ppc_vsr_t *pcv, + target_ulong uim) { - int i; + int i, idx; + ppc_vsr_t tmp = { .u64 = {0, 0} }; - for (i = 0; i < 4; i++) { - r->s32[i] = (int32_t)(((int64_t)a->s32[i] * (int64_t)b->s32[i]) >> 32); + for (i = 0; i < ARRAY_SIZE(t->u8); i++) { + if ((pcv->VsrB(i) >> 5) == uim) { + idx = pcv->VsrB(i) & 0x1f; + if (idx < ARRAY_SIZE(t->u8)) { + tmp.VsrB(i) = s0->VsrB(idx); + } else { + tmp.VsrB(i) = s1->VsrB(idx - ARRAY_SIZE(t->u8)); + } + } + } + + *t = tmp; +} + +void helper_VDIVSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 neg1 = int128_makes64(-1); + Int128 int128_min = int128_make128(0, INT64_MIN); + if (likely(int128_nz(b->s128) && + (int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) { + t->s128 = int128_divs(a->s128, b->s128); + } else { + t->s128 = a->s128; /* Undefined behavior */ } } -void helper_vmulhuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VDIVUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) { - int i; - - for (i = 0; i < 4; i++) { - r->u32[i] = (uint32_t)(((uint64_t)a->u32[i] * - (uint64_t)b->u32[i]) >> 32); + if (int128_nz(b->s128)) { + t->s128 = int128_divu(a->s128, b->s128); + } else { + t->s128 = a->s128; /* Undefined behavior */ } } -void helper_vmulhsd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VDIVESD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) { - uint64_t discard; - - muls64(&discard, &r->u64[0], a->s64[0], b->s64[0]); - muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]); + int i; + int64_t high; + uint64_t low; + for (i = 0; i < 2; i++) { + high = a->s64[i]; + low = 0; + if (unlikely((high == INT64_MIN && b->s64[i] == -1) || !b->s64[i])) { + t->s64[i] = a->s64[i]; /* Undefined behavior */ + } else { + divs128(&low, &high, b->s64[i]); + t->s64[i] = low; + } + } } -void helper_vmulhud(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VDIVEUD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) { - uint64_t discard; - - mulu64(&discard, &r->u64[0], a->u64[0], b->u64[0]); - mulu64(&discard, &r->u64[1], a->u64[1], b->u64[1]); + int i; + uint64_t high, low; + for (i = 0; i < 2; i++) { + high = a->u64[i]; + low = 0; + if (unlikely(!b->u64[i])) { + t->u64[i] = a->u64[i]; /* Undefined behavior */ + } else { + divu128(&low, &high, b->u64[i]); + t->u64[i] = low; + } + } } -void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, - ppc_avr_t *c) +void helper_VDIVESQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 high, low; + Int128 int128_min = int128_make128(0, INT64_MIN); + Int128 neg1 = int128_makes64(-1); + + high = a->s128; + low = int128_zero(); + if (unlikely(!int128_nz(b->s128) || + (int128_eq(b->s128, neg1) && int128_eq(high, int128_min)))) { + t->s128 = a->s128; /* Undefined behavior */ + } else { + divs256(&low, &high, b->s128); + t->s128 = low; + } +} + +void helper_VDIVEUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 high, low; + + high = a->s128; + low = int128_zero(); + if (unlikely(!int128_nz(b->s128))) { + t->s128 = a->s128; /* Undefined behavior */ + } else { + divu256(&low, &high, b->s128); + t->s128 = low; + } +} + +void helper_VMODSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 neg1 = int128_makes64(-1); + Int128 int128_min = int128_make128(0, INT64_MIN); + if (likely(int128_nz(b->s128) && + (int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) { + t->s128 = int128_rems(a->s128, b->s128); + } else { + t->s128 = int128_zero(); /* Undefined behavior */ + } +} + +void helper_VMODUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b) +{ + if (likely(int128_nz(b->s128))) { + t->s128 = int128_remu(a->s128, b->s128); + } else { + t->s128 = int128_zero(); /* Undefined behavior */ + } +} + +void helper_VPERM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { ppc_avr_t result; int i; @@ -1198,8 +1251,7 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, *r = result; } -void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, - ppc_avr_t *c) +void helper_VPERMR(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { ppc_avr_t result; int i; @@ -1217,18 +1269,122 @@ void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, *r = result; } -#if defined(HOST_WORDS_BIGENDIAN) +#define XXGENPCV_BE_EXP(NAME, SZ) \ +void glue(helper_, glue(NAME, _be_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \ +{ \ + ppc_vsr_t tmp; \ + \ + /* Initialize tmp with the result of an all-zeros mask */ \ + tmp.VsrD(0) = 0x1011121314151617; \ + tmp.VsrD(1) = 0x18191A1B1C1D1E1F; \ + \ + /* Iterate over the most significant byte of each element */ \ + for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \ + if (b->VsrB(i) & 0x80) { \ + /* Update each byte of the element */ \ + for (int k = 0; k < SZ; k++) { \ + tmp.VsrB(i + k) = j + k; \ + } \ + j += SZ; \ + } \ + } \ + \ + *t = tmp; \ +} + +#define XXGENPCV_BE_COMP(NAME, SZ) \ +void glue(helper_, glue(NAME, _be_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\ +{ \ + ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \ + \ + /* Iterate over the most significant byte of each element */ \ + for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \ + if (b->VsrB(i) & 0x80) { \ + /* Update each byte of the element */ \ + for (int k = 0; k < SZ; k++) { \ + tmp.VsrB(j + k) = i + k; \ + } \ + j += SZ; \ + } \ + } \ + \ + *t = tmp; \ +} + +#define XXGENPCV_LE_EXP(NAME, SZ) \ +void glue(helper_, glue(NAME, _le_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \ +{ \ + ppc_vsr_t tmp; \ + \ + /* Initialize tmp with the result of an all-zeros mask */ \ + tmp.VsrD(0) = 0x1F1E1D1C1B1A1918; \ + tmp.VsrD(1) = 0x1716151413121110; \ + \ + /* Iterate over the most significant byte of each element */ \ + for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \ + /* Reverse indexing of "i" */ \ + const int idx = ARRAY_SIZE(b->u8) - i - SZ; \ + if (b->VsrB(idx) & 0x80) { \ + /* Update each byte of the element */ \ + for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \ + tmp.VsrB(idx + rk) = j + k; \ + } \ + j += SZ; \ + } \ + } \ + \ + *t = tmp; \ +} + +#define XXGENPCV_LE_COMP(NAME, SZ) \ +void glue(helper_, glue(NAME, _le_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\ +{ \ + ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \ + \ + /* Iterate over the most significant byte of each element */ \ + for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \ + if (b->VsrB(ARRAY_SIZE(b->u8) - i - SZ) & 0x80) { \ + /* Update each byte of the element */ \ + for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \ + /* Reverse indexing of "j" */ \ + const int idx = ARRAY_SIZE(b->u8) - j - SZ; \ + tmp.VsrB(idx + rk) = i + k; \ + } \ + j += SZ; \ + } \ + } \ + \ + *t = tmp; \ +} + +#define XXGENPCV(NAME, SZ) \ + XXGENPCV_BE_EXP(NAME, SZ) \ + XXGENPCV_BE_COMP(NAME, SZ) \ + XXGENPCV_LE_EXP(NAME, SZ) \ + XXGENPCV_LE_COMP(NAME, SZ) \ + +XXGENPCV(XXGENPCVBM, 1) +XXGENPCV(XXGENPCVHM, 2) +XXGENPCV(XXGENPCVWM, 4) +XXGENPCV(XXGENPCVDM, 8) + +#undef XXGENPCV_BE_EXP +#undef XXGENPCV_BE_COMP +#undef XXGENPCV_LE_EXP +#undef XXGENPCV_LE_COMP +#undef XXGENPCV + +#if HOST_BIG_ENDIAN #define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)]) #define VBPERMD_INDEX(i) (i) #define VBPERMQ_DW(index) (((index) & 0x40) != 0) -#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1)) #else #define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)]) #define VBPERMD_INDEX(i) (1 - i) #define VBPERMQ_DW(index) (((index) & 0x40) == 0) -#define EXTRACT_BIT(avr, i, index) \ - (extract64((avr)->u64[1 - i], 63 - index, 1)) #endif +#define EXTRACT_BIT(avr, i, index) \ + (extract64((avr)->VsrD(i), 63 - index, 1)) void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { @@ -1292,53 +1448,25 @@ PMSUM(vpmsumb, u8, u16, uint16_t) PMSUM(vpmsumh, u16, u32, uint32_t) PMSUM(vpmsumw, u32, u64, uint64_t) -void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - -#ifdef CONFIG_INT128 int i, j; - __uint128_t prod[2]; + Int128 tmp, prod[2] = {int128_zero(), int128_zero()}; - VECTOR_FOR_INORDER_I(i, u64) { - prod[i] = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - prod[i] ^= (((__uint128_t)b->u64[i]) << j); + for (j = 0; j < 64; j++) { + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + if (a->VsrD(i) & (1ull << j)) { + tmp = int128_make64(b->VsrD(i)); + tmp = int128_lshift(tmp, j); + prod[i] = int128_xor(prod[i], tmp); } } } - r->u128 = prod[0] ^ prod[1]; - -#else - int i, j; - ppc_avr_t prod[2]; - - VECTOR_FOR_INORDER_I(i, u64) { - prod[i].VsrD(1) = prod[i].VsrD(0) = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - ppc_avr_t bshift; - if (j == 0) { - bshift.VsrD(0) = 0; - bshift.VsrD(1) = b->u64[i]; - } else { - bshift.VsrD(0) = b->u64[i] >> (64 - j); - bshift.VsrD(1) = b->u64[i] << j; - } - prod[i].VsrD(1) ^= bshift.VsrD(1); - prod[i].VsrD(0) ^= bshift.VsrD(0); - } - } - } - - r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1); - r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0); -#endif + r->s128 = int128_xor(prod[0], prod[1]); } - -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define PKBIG 1 #else #define PKBIG 0 @@ -1347,7 +1475,7 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j; ppc_avr_t result; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN const ppc_avr_t *x[2] = { a, b }; #else const ppc_avr_t *x[2] = { b, a }; @@ -1439,40 +1567,33 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) } } -#define VRLMI(name, size, element, insert) \ -void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ -{ \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - uint##size##_t src1 = a->element[i]; \ - uint##size##_t src2 = b->element[i]; \ - uint##size##_t src3 = r->element[i]; \ - uint##size##_t begin, end, shift, mask, rot_val; \ - \ - shift = extract##size(src2, 0, 6); \ - end = extract##size(src2, 8, 6); \ - begin = extract##size(src2, 16, 6); \ - rot_val = rol##size(src1, shift); \ - mask = mask_u##size(begin, end); \ - if (insert) { \ - r->element[i] = (rot_val & mask) | (src3 & ~mask); \ - } else { \ - r->element[i] = (rot_val & mask); \ - } \ - } \ +#define VRLMI(name, size, element, insert) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \ +{ \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + uint##size##_t src1 = a->element[i]; \ + uint##size##_t src2 = b->element[i]; \ + uint##size##_t src3 = r->element[i]; \ + uint##size##_t begin, end, shift, mask, rot_val; \ + \ + shift = extract##size(src2, 0, 6); \ + end = extract##size(src2, 8, 6); \ + begin = extract##size(src2, 16, 6); \ + rot_val = rol##size(src1, shift); \ + mask = mask_u##size(begin, end); \ + if (insert) { \ + r->element[i] = (rot_val & mask) | (src3 & ~mask); \ + } else { \ + r->element[i] = (rot_val & mask); \ + } \ + } \ } -VRLMI(vrldmi, 64, u64, 1); -VRLMI(vrlwmi, 32, u32, 1); -VRLMI(vrldnm, 64, u64, 0); -VRLMI(vrlwnm, 32, u32, 0); - -void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, - ppc_avr_t *c) -{ - r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]); - r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]); -} +VRLMI(VRLDMI, 64, u64, 1); +VRLMI(VRLWMI, 32, u32, 1); +VRLMI(VRLDNM, 64, u64, 0); +VRLMI(VRLWNM, 32, u32, 0); void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) { @@ -1492,34 +1613,16 @@ void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) } } -#if defined(HOST_WORDS_BIGENDIAN) -#define VEXTU_X_DO(name, size, left) \ - target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ - { \ - int index; \ - if (left) { \ - index = (a & 0xf) * 8; \ - } else { \ - index = ((15 - (a & 0xf) + 1) * 8) - size; \ - } \ - return int128_getlo(int128_rshift(b->s128, index)) & \ - MAKE_64BIT_MASK(0, size); \ - } -#else -#define VEXTU_X_DO(name, size, left) \ - target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ - { \ - int index; \ - if (left) { \ - index = ((15 - (a & 0xf) + 1) * 8) - size; \ - } else { \ - index = (a & 0xf) * 8; \ - } \ - return int128_getlo(int128_rshift(b->s128, index)) & \ - MAKE_64BIT_MASK(0, size); \ - } -#endif - +#define VEXTU_X_DO(name, size, left) \ +target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ +{ \ + int index = (a & 0xf) * 8; \ + if (left) { \ + index = 128 - index - size; \ + } \ + return int128_getlo(int128_rshift(b->s128, index)) & \ + MAKE_64BIT_MASK(0, size); \ +} VEXTU_X_DO(vextublx, 8, 1) VEXTU_X_DO(vextuhlx, 16, 1) VEXTU_X_DO(vextuwlx, 32, 1) @@ -1581,7 +1684,7 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int sh = (b->VsrB(0xf) >> 3) & 0xf; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN memmove(&r->u8[0], &a->u8[sh], 16 - sh); memset(&r->u8[16 - sh], 0, sh); #else @@ -1590,27 +1693,75 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) #endif } -#if defined(HOST_WORDS_BIGENDIAN) -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \ - sizeof(r->element[0])); \ - } +#if HOST_BIG_ENDIAN +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX]) #else -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - uint32_t d = (16 - index) - sizeof(r->element[0]); \ - memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \ - } +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1) #endif -VINSERT(b, u8) -VINSERT(h, u16) -VINSERT(w, u32) -VINSERT(d, u64) -#undef VINSERT -#if defined(HOST_WORDS_BIGENDIAN) + +#define VINSX(SUFFIX, TYPE) \ +void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \ + uint64_t val, target_ulong index) \ +{ \ + const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \ + target_long idx = index; \ + \ + if (idx < 0 || idx > maxidx) { \ + idx = idx < 0 ? sizeof(TYPE) - idx : idx; \ + qemu_log_mask(LOG_GUEST_ERROR, \ + "Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \ + ", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \ + } else { \ + TYPE src = val; \ + memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \ + } \ +} +VINSX(B, uint8_t) +VINSX(H, uint16_t) +VINSX(W, uint32_t) +VINSX(D, uint64_t) +#undef ELEM_ADDR +#undef VINSX +#if HOST_BIG_ENDIAN +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *a, *b }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#else +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *b, *a }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \ + (void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#endif +VEXTDVLX(VEXTDUBVLX, 1) +VEXTDVLX(VEXTDUHVLX, 2) +VEXTDVLX(VEXTDUWVLX, 4) +VEXTDVLX(VEXTDDVLX, 8) +#undef VEXTDVLX +#if HOST_BIG_ENDIAN #define VEXTRACT(suffix, element) \ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ { \ @@ -1636,8 +1787,35 @@ VEXTRACT(uw, u32) VEXTRACT(d, u64) #undef VEXTRACT -void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt, - ppc_vsr_t *xb, uint32_t index) +#define VSTRI(NAME, ELEM, NUM_ELEMS, LEFT) \ +uint32_t helper_##NAME(ppc_avr_t *t, ppc_avr_t *b) \ +{ \ + int i, idx, crf = 0; \ + \ + for (i = 0; i < NUM_ELEMS; i++) { \ + idx = LEFT ? i : NUM_ELEMS - i - 1; \ + if (b->Vsr##ELEM(idx)) { \ + t->Vsr##ELEM(idx) = b->Vsr##ELEM(idx); \ + } else { \ + crf = 0b0010; \ + break; \ + } \ + } \ + \ + for (; i < NUM_ELEMS; i++) { \ + idx = LEFT ? i : NUM_ELEMS - i - 1; \ + t->Vsr##ELEM(idx) = 0; \ + } \ + \ + return crf; \ +} +VSTRI(VSTRIBL, B, 16, true) +VSTRI(VSTRIBR, B, 16, false) +VSTRI(VSTRIHL, H, 8, true) +VSTRI(VSTRIHR, H, 8, false) +#undef VSTRI + +void helper_XXEXTRACTUW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index) { ppc_vsr_t t = { }; size_t es = sizeof(uint32_t); @@ -1652,8 +1830,7 @@ void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt, *xt = t; } -void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, - ppc_vsr_t *xb, uint32_t index) +void helper_XXINSERTW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index) { ppc_vsr_t t = *xt; size_t es = sizeof(uint32_t); @@ -1667,38 +1844,67 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, *xt = t; } -#define VEXT_SIGNED(name, element, cast) \ -void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ -{ \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - r->element[i] = (cast)b->element[i]; \ - } \ -} -VEXT_SIGNED(vextsb2w, s32, int8_t) -VEXT_SIGNED(vextsb2d, s64, int8_t) -VEXT_SIGNED(vextsh2w, s32, int16_t) -VEXT_SIGNED(vextsh2d, s64, int16_t) -VEXT_SIGNED(vextsw2d, s64, int32_t) -#undef VEXT_SIGNED +void helper_XXEVAL(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c, + uint32_t desc) +{ + /* + * Instead of processing imm bit-by-bit, we'll skip the computation of + * conjunctions whose corresponding bit is unset. + */ + int bit, imm = simd_data(desc); + Int128 conj, disj = int128_zero(); -#define VNEG(name, element) \ -void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ -{ \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - r->element[i] = -b->element[i]; \ - } \ + /* Iterate over set bits from the least to the most significant bit */ + while (imm) { + /* + * Get the next bit to be processed with ctz64. Invert the result of + * ctz64 to match the indexing used by PowerISA. + */ + bit = 7 - ctzl(imm); + if (bit & 0x4) { + conj = a->s128; + } else { + conj = int128_not(a->s128); + } + if (bit & 0x2) { + conj = int128_and(conj, b->s128); + } else { + conj = int128_and(conj, int128_not(b->s128)); + } + if (bit & 0x1) { + conj = int128_and(conj, c->s128); + } else { + conj = int128_and(conj, int128_not(c->s128)); + } + disj = int128_or(disj, conj); + + /* Unset the least significant bit that is set */ + imm &= imm - 1; + } + + t->s128 = disj; } -VNEG(vnegw, s32) -VNEG(vnegd, s64) -#undef VNEG + +#define XXBLEND(name, sz) \ +void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + ppc_avr_t *c, uint32_t desc) \ +{ \ + for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \ + t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \ + b->glue(u, sz)[i] : a->glue(u, sz)[i]; \ + } \ +} +XXBLEND(B, 8) +XXBLEND(H, 16) +XXBLEND(W, 32) +XXBLEND(D, 64) +#undef XXBLEND void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int sh = (b->VsrB(0xf) >> 3) & 0xf; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN memmove(&r->u8[sh], &a->u8[0], 16 - sh); memset(&r->u8[0], 0, sh); #else @@ -1707,15 +1913,6 @@ void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) #endif } -void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - r->u32[i] = a->u32[i] >= b->u32[i]; - } -} - void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int64_t t; @@ -1815,7 +2012,7 @@ void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) } } -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define UPKHI 1 #define UPKLO 0 #else @@ -1922,189 +2119,66 @@ VGENERIC_DO(popcntd, u64) #undef VGENERIC_DO -#if defined(HOST_WORDS_BIGENDIAN) -#define QW_ONE { .u64 = { 0, 1 } } -#else -#define QW_ONE { .u64 = { 1, 0 } } -#endif - -#ifndef CONFIG_INT128 - -static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a) +void helper_VADDUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - t->u64[0] = ~a.u64[0]; - t->u64[1] = ~a.u64[1]; + r->s128 = int128_add(a->s128, b->s128); } -static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) +void helper_VADDEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { - if (a.VsrD(0) < b.VsrD(0)) { - return -1; - } else if (a.VsrD(0) > b.VsrD(0)) { - return 1; - } else if (a.VsrD(1) < b.VsrD(1)) { - return -1; - } else if (a.VsrD(1) > b.VsrD(1)) { - return 1; - } else { - return 0; - } + r->s128 = int128_add(int128_add(a->s128, b->s128), + int128_make64(int128_getlo(c->s128) & 1)); } -static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) +void helper_VADDCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); -} - -static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) -{ - ppc_avr_t not_a; - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); - avr_qw_not(¬_a, a); - return avr_qw_cmpu(not_a, b) < 0; -} - -#endif - -void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128; -#else - avr_qw_add(r, *a, *b); -#endif -} - -void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128 + (c->u128 & 1); -#else - - if (c->VsrD(1) & 1) { - ppc_avr_t tmp; - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, *b); - } else { - avr_qw_add(r, *a, *b); - } -#endif -} - -void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < b->u128); -#else - ppc_avr_t not_a; - - avr_qw_not(¬_a, *a); - + r->VsrD(1) = int128_ult(int128_not(a->s128), b->s128); r->VsrD(0) = 0; - r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0); -#endif } -void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VADDECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - int carry_out = (~a->u128 < b->u128); - if (!carry_out && (c->u128 & 1)) { - carry_out = ((a->u128 + b->u128 + 1) == 0) && - ((a->u128 != 0) || (b->u128 != 0)); - } - r->u128 = carry_out; -#else - - int carry_in = c->VsrD(1) & 1; - int carry_out = 0; - ppc_avr_t tmp; - - carry_out = avr_qw_addc(&tmp, *a, *b); + bool carry_out = int128_ult(int128_not(a->s128), b->s128), + carry_in = int128_getlo(c->s128) & 1; if (!carry_out && carry_in) { - ppc_avr_t one = QW_ONE; - carry_out = avr_qw_addc(&tmp, tmp, one); - } - r->VsrD(0) = 0; - r->VsrD(1) = carry_out; -#endif -} - -void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ -#ifdef CONFIG_INT128 - r->u128 = a->u128 - b->u128; -#else - ppc_avr_t tmp; - ppc_avr_t one = QW_ONE; - - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, one); -#endif -} - -void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ -#ifdef CONFIG_INT128 - r->u128 = a->u128 + ~b->u128 + (c->u128 & 1); -#else - ppc_avr_t tmp, sum; - - avr_qw_not(&tmp, *b); - avr_qw_add(&sum, *a, tmp); - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(r, sum, tmp); -#endif -} - -void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < ~b->u128) || - (a->u128 + ~b->u128 == (__uint128_t)-1); -#else - int carry = (avr_qw_cmpu(*a, *b) > 0); - if (!carry) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull)); - } - r->VsrD(0) = 0; - r->VsrD(1) = carry; -#endif -} - -void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ -#ifdef CONFIG_INT128 - r->u128 = - (~a->u128 < ~b->u128) || - ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1)); -#else - int carry_in = c->VsrD(1) & 1; - int carry_out = (avr_qw_cmpu(*a, *b) > 0); - if (!carry_out && carry_in) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull)); + carry_out = (int128_nz(a->s128) || int128_nz(b->s128)) && + int128_eq(int128_add(a->s128, b->s128), int128_makes64(-1)); } r->VsrD(0) = 0; r->VsrD(1) = carry_out; -#endif +} + +void helper_VSUBUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + r->s128 = int128_sub(a->s128, b->s128); +} + +void helper_VSUBEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ + r->s128 = int128_add(int128_add(a->s128, int128_not(b->s128)), + int128_make64(int128_getlo(c->s128) & 1)); +} + +void helper_VSUBCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 tmp = int128_not(b->s128); + + r->VsrD(1) = int128_ult(int128_not(a->s128), tmp) || + int128_eq(int128_add(a->s128, tmp), int128_makes64(-1)); + r->VsrD(0) = 0; +} + +void helper_VSUBECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ + Int128 tmp = int128_not(b->s128); + bool carry_out = int128_ult(int128_not(a->s128), tmp), + carry_in = int128_getlo(c->s128) & 1; + + r->VsrD(1) = carry_out || (carry_in && int128_eq(int128_add(a->s128, tmp), + int128_makes64(-1))); + r->VsrD(0) = 0; } #define BCD_PLUS_PREF_1 0xC @@ -2498,41 +2572,77 @@ uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) return cr; } +/** + * Compare 2 128-bit unsigned integers, passed in as unsigned 64-bit pairs + * + * Returns: + * > 0 if ahi|alo > bhi|blo, + * 0 if ahi|alo == bhi|blo, + * < 0 if ahi|alo < bhi|blo + */ +static inline int ucmp128(uint64_t alo, uint64_t ahi, + uint64_t blo, uint64_t bhi) +{ + return (ahi == bhi) ? + (alo > blo ? 1 : (alo == blo ? 0 : -1)) : + (ahi > bhi ? 1 : -1); +} + uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; - int cr = 0; + int cr; uint64_t lo_value; uint64_t hi_value; + uint64_t rem; ppc_avr_t ret = { .u64 = { 0, 0 } }; if (b->VsrSD(0) < 0) { lo_value = -b->VsrSD(1); hi_value = ~b->VsrD(0) + !lo_value; bcd_put_digit(&ret, 0xD, 0); + + cr = CRF_LT; } else { lo_value = b->VsrD(1); hi_value = b->VsrD(0); bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0); + + if (hi_value == 0 && lo_value == 0) { + cr = CRF_EQ; + } else { + cr = CRF_GT; + } } - if (divu128(&lo_value, &hi_value, 1000000000000000ULL) || - lo_value > 9999999999999999ULL) { - cr = CRF_SO; + /* + * Check src limits: abs(src) <= 10^31 - 1 + * + * 10^31 - 1 = 0x0000007e37be2022 c0914b267fffffff + */ + if (ucmp128(lo_value, hi_value, + 0xc0914b267fffffffULL, 0x7e37be2022ULL) > 0) { + cr |= CRF_SO; + + /* + * According to the ISA, if src wouldn't fit in the destination + * register, the result is undefined. + * In that case, we leave r unchanged. + */ + } else { + rem = divu128(&lo_value, &hi_value, 1000000000000000ULL); + + for (i = 1; i < 16; rem /= 10, i++) { + bcd_put_digit(&ret, rem % 10, i); + } + + for (; i < 32; lo_value /= 10, i++) { + bcd_put_digit(&ret, lo_value % 10, i); + } + + *r = ret; } - for (i = 1; i < 16; hi_value /= 10, i++) { - bcd_put_digit(&ret, hi_value % 10, i); - } - - for (; i < 32; lo_value /= 10, i++) { - bcd_put_digit(&ret, lo_value % 10, i); - } - - cr |= bcd_cmp_zero(&ret); - - *r = ret; - return cr; } diff --git a/target/ppc/internal.h b/target/ppc/internal.h index f1fd3c8d04..337a362205 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -18,6 +18,8 @@ #ifndef PPC_INTERNAL_H #define PPC_INTERNAL_H +#include "hw/registerfields.h" + #define FUNC_MASK(name, ret_type, size, max_val) \ static inline ret_type name(uint##size##_t start, \ uint##size##_t end) \ @@ -157,15 +159,15 @@ EXTRACT_HELPER(FPL, 25, 1); EXTRACT_HELPER(FPFLM, 17, 8); EXTRACT_HELPER(FPW, 16, 1); -/* mffscrni */ -EXTRACT_HELPER(RM, 11, 2); - /* addpcis */ EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0) #if defined(TARGET_PPC64) /* darn */ EXTRACT_HELPER(L, 16, 2); #endif +/* wait */ +EXTRACT_HELPER(WC, 21, 2); +EXTRACT_HELPER(PL, 16, 2); /*** Jump target decoding ***/ /* Immediate address */ @@ -211,11 +213,6 @@ void helper_compute_fprf_float16(CPUPPCState *env, float16 arg); void helper_compute_fprf_float32(CPUPPCState *env, float32 arg); void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); -/* Raise a data fault alignment exception for the specified virtual address */ -void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); - /* translate.c */ int ppc_fixup_cpu(PowerPCCPU *cpu); @@ -245,4 +242,68 @@ static inline int prot_for_access_type(MMUAccessType access_type) g_assert_not_reached(); } +/* PowerPC MMU emulation */ + +typedef struct mmu_ctx_t mmu_ctx_t; +bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, + int mmu_idx, bool guest_visible); +int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, + MMUAccessType access_type, int type, + int mmu_idx); +/* Software driven TLB helpers */ +int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, + int way, int is_code); +/* Context used internally during MMU translations */ +struct mmu_ctx_t { + hwaddr raddr; /* Real address */ + hwaddr eaddr; /* Effective address */ + int prot; /* Protection bits */ + hwaddr hash[2]; /* Pagetable hash values */ + target_ulong ptem; /* Virtual segment ID | API */ + int key; /* Access key */ + int nx; /* Non-execute area */ +}; + +/* Common routines used by software and hardware TLBs emulation */ +static inline int pte_is_valid(target_ulong pte0) +{ + return pte0 & 0x80000000 ? 1 : 0; +} + +static inline void pte_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x80000000; +} + +#define PTE_PTEM_MASK 0x7FFFFFBF +#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) + +#ifdef CONFIG_USER_ONLY +void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +#else +bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +G_NORETURN void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); +#endif + +FIELD(GER_MSK, XMSK, 0, 4) +FIELD(GER_MSK, YMSK, 4, 4) +FIELD(GER_MSK, PMSK, 8, 8) + +static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk) +{ + int msk = 0; + msk = FIELD_DP32(msk, GER_MSK, XMSK, xmsk); + msk = FIELD_DP32(msk, GER_MSK, YMSK, ymsk); + msk = FIELD_DP32(msk, GER_MSK, PMSK, pmsk); + return msk; +} + #endif /* PPC_INTERNAL_H */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index dc93b99189..7c25348b7b 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -21,7 +21,6 @@ #include -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "cpu.h" @@ -267,7 +266,7 @@ struct ppc_radix_page_info *kvm_get_radix_page_info(void) { KVMState *s = KVM_STATE(current_accel()); struct ppc_radix_page_info *radix_page_info; - struct kvm_ppc_rmmu_info rmmu_info; + struct kvm_ppc_rmmu_info rmmu_info = { }; int i; if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) { @@ -418,7 +417,7 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) * will be a normal mapping, not a special hugepage one used * for RAM. */ - if (qemu_real_host_page_size < 0x10000) { + if (qemu_real_host_page_size() < 0x10000) { error_setg(errp, "KVM can't supply 64kiB CI pages, which guest expects"); } @@ -543,10 +542,11 @@ static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; + /* Init 'val' to avoid "uninitialised value" Valgrind warnings */ union { uint32_t u32; uint64_t u64; - } val; + } val = { }; struct kvm_one_reg reg = { .id = id, .addr = (uintptr_t) &val, @@ -632,7 +632,7 @@ static int kvm_put_fp(CPUState *cs) uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i); uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN vsr[0] = float64_val(*fpr); vsr[1] = *vsrl; #else @@ -710,7 +710,7 @@ static int kvm_get_fp(CPUState *cs) strerror(errno)); return ret; } else { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN *fpr = vsr[0]; if (vsx) { *vsrl = vsr[1]; @@ -850,7 +850,7 @@ static int kvm_put_vpa(CPUState *cs) int kvmppc_put_books_sregs(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; - struct kvm_sregs sregs; + struct kvm_sregs sregs = { }; int i; sregs.pvr = env->spr[SPR_PVR]; @@ -974,7 +974,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) } #ifdef TARGET_PPC64 - if (msr_ts) { + if (FIELD_EX64(env->msr, MSR, TS)) { for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); } @@ -1282,7 +1282,7 @@ int kvm_arch_get_registers(CPUState *cs) } #ifdef TARGET_PPC64 - if (msr_ts) { + if (FIELD_EX64(env->msr, MSR, TS)) { for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); } @@ -1352,7 +1352,8 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu) CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) { + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && + FIELD_EX64(env->msr, MSR, EE)) { cs->halted = 1; cs->exception_index = EXCP_HLT; } @@ -1681,7 +1682,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; #if defined(TARGET_PPC64) case KVM_EXIT_PAPR_HCALL: - trace_kvm_handle_papr_hcall(); + trace_kvm_handle_papr_hcall(run->papr_hcall.nr); run->papr_hcall.ret = spapr_hypercall(cpu, run->papr_hcall.nr, run->papr_hcall.args); @@ -1876,6 +1877,12 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len) buf[0] = '\0'; while ((dirp = readdir(dp)) != NULL) { FILE *f; + + /* Don't accidentally read from the current and parent directories */ + if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) { + continue; + } + snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU, dirp->d_name); f = fopen(buf, "r"); @@ -2537,7 +2544,7 @@ int kvmppc_get_cap_large_decr(void) int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable) { CPUState *cs = CPU(cpu); - uint64_t lpcr; + uint64_t lpcr = 0; kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr); /* Do we need to modify the LPCR? */ @@ -2959,3 +2966,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 93972df58e..134b16c625 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -2,12 +2,14 @@ #include "cpu.h" #include "exec/exec-all.h" #include "sysemu/kvm.h" +#include "sysemu/tcg.h" #include "helper_regs.h" #include "mmu-hash64.h" #include "migration/cpu.h" #include "qapi/error.h" #include "qemu/main-loop.h" #include "kvm_ppc.h" +#include "power8-pmu.h" static void post_load_update_msr(CPUPPCState *env) { @@ -21,117 +23,6 @@ static void post_load_update_msr(CPUPPCState *env) ppc_store_msr(env, msr); } -static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) -{ - PowerPCCPU *cpu = opaque; - CPUPPCState *env = &cpu->env; - unsigned int i, j; - target_ulong sdr1; - uint32_t fpscr, vscr; -#if defined(TARGET_PPC64) - int32_t slb_nr; -#endif - target_ulong xer; - - for (i = 0; i < 32; i++) { - qemu_get_betls(f, &env->gpr[i]); - } -#if !defined(TARGET_PPC64) - for (i = 0; i < 32; i++) { - qemu_get_betls(f, &env->gprh[i]); - } -#endif - qemu_get_betls(f, &env->lr); - qemu_get_betls(f, &env->ctr); - for (i = 0; i < 8; i++) { - qemu_get_be32s(f, &env->crf[i]); - } - qemu_get_betls(f, &xer); - cpu_write_xer(env, xer); - qemu_get_betls(f, &env->reserve_addr); - qemu_get_betls(f, &env->msr); - for (i = 0; i < 4; i++) { - qemu_get_betls(f, &env->tgpr[i]); - } - for (i = 0; i < 32; i++) { - union { - float64 d; - uint64_t l; - } u; - u.l = qemu_get_be64(f); - *cpu_fpr_ptr(env, i) = u.d; - } - qemu_get_be32s(f, &fpscr); - env->fpscr = fpscr; - qemu_get_sbe32s(f, &env->access_type); -#if defined(TARGET_PPC64) - qemu_get_betls(f, &env->spr[SPR_ASR]); - qemu_get_sbe32s(f, &slb_nr); -#endif - qemu_get_betls(f, &sdr1); - for (i = 0; i < 32; i++) { - qemu_get_betls(f, &env->sr[i]); - } - for (i = 0; i < 2; i++) { - for (j = 0; j < 8; j++) { - qemu_get_betls(f, &env->DBAT[i][j]); - } - } - for (i = 0; i < 2; i++) { - for (j = 0; j < 8; j++) { - qemu_get_betls(f, &env->IBAT[i][j]); - } - } - qemu_get_sbe32s(f, &env->nb_tlb); - qemu_get_sbe32s(f, &env->tlb_per_way); - qemu_get_sbe32s(f, &env->nb_ways); - qemu_get_sbe32s(f, &env->last_way); - qemu_get_sbe32s(f, &env->id_tlbs); - qemu_get_sbe32s(f, &env->nb_pids); - if (env->tlb.tlb6) { - /* XXX assumes 6xx */ - for (i = 0; i < env->nb_tlb; i++) { - qemu_get_betls(f, &env->tlb.tlb6[i].pte0); - qemu_get_betls(f, &env->tlb.tlb6[i].pte1); - qemu_get_betls(f, &env->tlb.tlb6[i].EPN); - } - } - for (i = 0; i < 4; i++) { - qemu_get_betls(f, &env->pb[i]); - } - for (i = 0; i < 1024; i++) { - qemu_get_betls(f, &env->spr[i]); - } - if (!cpu->vhyp) { - ppc_store_sdr1(env, sdr1); - } - qemu_get_be32s(f, &vscr); - ppc_store_vscr(env, vscr); - qemu_get_be64s(f, &env->spe_acc); - qemu_get_be32s(f, &env->spe_fscr); - qemu_get_betls(f, &env->msr_mask); - qemu_get_be32s(f, &env->flags); - qemu_get_sbe32s(f, &env->error_code); - qemu_get_be32s(f, &env->pending_interrupts); - qemu_get_be32s(f, &env->irq_input_state); - for (i = 0; i < POWERPC_EXCP_NB; i++) { - qemu_get_betls(f, &env->excp_vectors[i]); - } - qemu_get_betls(f, &env->excp_prefix); - qemu_get_betls(f, &env->ivor_mask); - qemu_get_betls(f, &env->ivpr_mask); - qemu_get_betls(f, &env->hreset_vector); - qemu_get_betls(f, &env->nip); - qemu_get_sbetl(f); /* Discard unused hflags */ - qemu_get_sbetl(f); /* Discard unused hflags_nmsr */ - qemu_get_sbe32(f); /* Discard unused mmu_idx */ - qemu_get_sbe32(f); /* Discard unused power_mode */ - - post_load_update_msr(env); - - return 0; -} - static int get_avr(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { @@ -262,7 +153,8 @@ static int cpu_pre_save(void *opaque) | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 - | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; + | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM + | PPC2_MEM_LWSYNC; env->spr[SPR_LR] = env->lr; env->spr[SPR_CTR] = env->ctr; @@ -314,9 +206,8 @@ static int cpu_pre_save(void *opaque) } } - /* Retain migration compatibility for pre 6.0 for 601 machines. */ - env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE - ? env->hflags & MSR_LE : 0); + /* Used to retain migration compatibility for pre 6.0 for 601 machines. */ + env->hflags_compat_nmsr = 0; return 0; } @@ -339,7 +230,7 @@ static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) if (pvr == pcc->pvr) { return true; } - return pcc->pvr_match(pcc, pvr); + return pcc->pvr_match(pcc, pvr, true); } static int cpu_post_load(void *opaque, int version_id) @@ -422,6 +313,10 @@ static int cpu_post_load(void *opaque, int version_id) post_load_update_msr(env); + if (tcg_enabled()) { + pmu_mmcr01_updated(env); + } + return 0; } @@ -523,14 +418,13 @@ static bool tm_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; - return msr_ts; + return FIELD_EX64(env->msr, MSR, TS); } static const VMStateDescription vmstate_tm = { .name = "cpu/tm", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .needed = tm_needed, .fields = (VMStateField []) { VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), @@ -707,25 +601,6 @@ static bool tlbemb_needed(void *opaque) return env->nb_tlb && (env->tlb_type == TLB_EMB); } -static bool pbr403_needed(void *opaque) -{ - PowerPCCPU *cpu = opaque; - uint32_t pvr = cpu->env.spr[SPR_PVR]; - - return (pvr & 0xffff0000) == 0x00200000; -} - -static const VMStateDescription vmstate_pbr403 = { - .name = "cpu/pbr403", - .version_id = 1, - .minimum_version_id = 1, - .needed = pbr403_needed, - .fields = (VMStateField[]) { - VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), - VMSTATE_END_OF_LIST() - }, -}; - static const VMStateDescription vmstate_tlbemb = { .name = "cpu/tlb6xx", .version_id = 1, @@ -737,13 +612,8 @@ static const VMStateDescription vmstate_tlbemb = { env.nb_tlb, vmstate_tlbemb_entry, ppcemb_tlb_t), - /* 403 protection registers */ VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { - &vmstate_pbr403, - NULL - } }; static const VMStateDescription vmstate_tlbmas_entry = { @@ -805,8 +675,6 @@ const VMStateDescription vmstate_ppc_cpu = { .name = "cpu", .version_id = 5, .minimum_version_id = 5, - .minimum_version_id_old = 4, - .load_state_old = cpu_load_old, .pre_save = cpu_pre_save, .post_load = cpu_post_load, .fields = (VMStateField[]) { diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index e2282baa8d..d1163f316c 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -25,7 +25,6 @@ #include "exec/helper-proto.h" #include "helper_regs.h" #include "exec/cpu_ldst.h" -#include "tcg/tcg.h" #include "internal.h" #include "qemu/atomic128.h" @@ -33,10 +32,10 @@ static inline bool needs_byteswap(const CPUPPCState *env) { -#if defined(TARGET_WORDS_BIGENDIAN) - return msr_le; +#if TARGET_BIG_ENDIAN + return FIELD_EX64(env->msr, MSR, LE); #else - return !msr_le; + return !FIELD_EX64(env->msr, MSR, LE); #endif } @@ -462,7 +461,7 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, /*****************************************************************************/ /* Altivec extension helpers */ -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN #define HI_IDX 0 #define LO_IDX 1 #else @@ -471,8 +470,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, #endif /* - * We use msr_le to determine index ordering in a vector. However, - * byteswapping is not simply controlled by msr_le. We also need to + * We use MSR_LE to determine index ordering in a vector. However, + * byteswapping is not simply controlled by MSR_LE. We also need to * take into account endianness of the target. This is done for the * little-endian PPC64 user-mode target. */ @@ -485,7 +484,7 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, int adjust = HI_IDX * (n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ - if (msr_le) { \ + if (FIELD_EX64(env->msr, MSR, LE)) { \ index = n_elems - index - 1; \ } \ \ @@ -512,7 +511,7 @@ LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) int adjust = HI_IDX * (n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ - if (msr_le) { \ + if (FIELD_EX64(env->msr, MSR, LE)) { \ index = n_elems - index - 1; \ } \ \ @@ -546,7 +545,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \ t.s128 = int128_zero(); \ if (nb) { \ nb = (nb >= 16) ? 16 : nb; \ - if (msr_le && !lj) { \ + if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \ for (i = 16; i > 16 - nb; i--) { \ t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ addr = addr_add(env, addr, 1); \ @@ -577,7 +576,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \ } \ \ nb = (nb >= 16) ? 16 : nb; \ - if (msr_le && !lj) { \ + if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \ for (i = 16; i > 16 - nb; i--) { \ cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ addr = addr_add(env, addr, 1); \ @@ -613,11 +612,12 @@ void helper_tbegin(CPUPPCState *env) env->spr[SPR_TEXASR] = (1ULL << TEXASR_FAILURE_PERSISTENT) | (1ULL << TEXASR_NESTING_OVERFLOW) | - (msr_hv << TEXASR_PRIVILEGE_HV) | - (msr_pr << TEXASR_PRIVILEGE_PR) | + (FIELD_EX64_HV(env->msr) << TEXASR_PRIVILEGE_HV) | + (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) | (1ULL << TEXASR_FAILURE_SUMMARY) | (1ULL << TEXASR_TFIAR_EXACT); - env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; + env->spr[SPR_TFIAR] = env->nip | (FIELD_EX64_HV(env->msr) << 1) | + FIELD_EX64(env->msr, MSR, PR); env->spr[SPR_TFHAR] = env->nip + 4; env->crf[0] = 0xB; /* 0b1010 = transaction failure */ } diff --git a/target/ppc/meson.build b/target/ppc/meson.build index a4f18ff414..79beaff147 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -16,6 +16,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files( 'misc_helper.c', 'timebase_helper.c', 'translate.c', + 'power8-pmu.c', )) ppc_ss.add(libdecnumber) @@ -37,11 +38,13 @@ ppc_softmmu_ss.add(files( 'arch_dump.c', 'machine.c', 'mmu-hash32.c', - 'mmu_helper.c', + 'mmu_common.c', 'monitor.c', )) -ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_false: files( - 'tcg-stub.c' +ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files( + 'mmu_helper.c', +), if_false: files( + 'tcg-stub.c', )) ppc_softmmu_ss.add(when: 'TARGET_PPC64', if_true: files( diff --git a/target/ppc/mfrom_table.c.inc b/target/ppc/mfrom_table.c.inc deleted file mode 100644 index 1653b974a4..0000000000 --- a/target/ppc/mfrom_table.c.inc +++ /dev/null @@ -1,78 +0,0 @@ -static const uint8_t mfrom_ROM_table[602] = { - 77, 77, 76, 76, 75, 75, 74, 74, - 73, 73, 72, 72, 71, 71, 70, 70, - 69, 69, 68, 68, 68, 67, 67, 66, - 66, 65, 65, 64, 64, 64, 63, 63, - 62, 62, 61, 61, 61, 60, 60, 59, - 59, 58, 58, 58, 57, 57, 56, 56, - 56, 55, 55, 54, 54, 54, 53, 53, - 53, 52, 52, 51, 51, 51, 50, 50, - 50, 49, 49, 49, 48, 48, 47, 47, - 47, 46, 46, 46, 45, 45, 45, 44, - 44, 44, 43, 43, 43, 42, 42, 42, - 42, 41, 41, 41, 40, 40, 40, 39, - 39, 39, 39, 38, 38, 38, 37, 37, - 37, 37, 36, 36, 36, 35, 35, 35, - 35, 34, 34, 34, 34, 33, 33, 33, - 33, 32, 32, 32, 32, 31, 31, 31, - 31, 30, 30, 30, 30, 29, 29, 29, - 29, 28, 28, 28, 28, 28, 27, 27, - 27, 27, 26, 26, 26, 26, 26, 25, - 25, 25, 25, 25, 24, 24, 24, 24, - 24, 23, 23, 23, 23, 23, 23, 22, - 22, 22, 22, 22, 21, 21, 21, 21, - 21, 21, 20, 20, 20, 20, 20, 20, - 19, 19, 19, 19, 19, 19, 19, 18, - 18, 18, 18, 18, 18, 17, 17, 17, - 17, 17, 17, 17, 16, 16, 16, 16, - 16, 16, 16, 16, 15, 15, 15, 15, - 15, 15, 15, 15, 14, 14, 14, 14, - 14, 14, 14, 14, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 11, - 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, - 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, - 7, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, -}; diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c deleted file mode 100644 index f96c4268ba..0000000000 --- a/target/ppc/mfrom_table_gen.c +++ /dev/null @@ -1,34 +0,0 @@ -#define _GNU_SOURCE -#include "qemu/osdep.h" -#include - -int main(void) -{ - double d; - uint8_t n; - int i; - - printf("static const uint8_t mfrom_ROM_table[602] =\n{\n "); - for (i = 0; i < 602; i++) { - /* - * Extremely decomposed: - * -T0 / 256 - * T0 = 256 * log10(10 + 1.0) + 0.5 - */ - d = -i; - d /= 256.0; - d = exp10(d); - d += 1.0; - d = log10(d); - d *= 256; - d += 0.5; - n = d; - printf("%3d, ", n); - if ((i & 7) == 7) { - printf("\n "); - } - } - printf("\n};\n"); - - return 0; -} diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index c33f5f39b9..a9bc1522e2 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -18,12 +18,14 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "mmu-book3s-v3.h" +#include "hw/ppc/ppc.h" #include "helper_regs.h" @@ -72,7 +74,7 @@ void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, const char *caller, uint32_t cause) { #ifdef TARGET_PPC64 - if ((env->msr_mask & MSR_HVB) && !msr_hv && + if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) && !(env->spr[SPR_HFSCR] & (1UL << bit))) { raise_hv_fu_exception(env, bit, caller, cause, GETPC()); } @@ -162,7 +164,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP); /* TODO: TCG supports only one thread */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { + if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { dpdes = 1; } @@ -172,7 +174,6 @@ target_ulong helper_load_dpdes(CPUPPCState *env) void helper_store_dpdes(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP); @@ -183,12 +184,7 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) return; } - if (val & 0x1) { - env->pending_interrupts |= 1 << PPC_INTERRUPT_DOORBELL; - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); - } + ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1); } #endif /* defined(TARGET_PPC64) */ @@ -211,30 +207,6 @@ void helper_store_lpidr(CPUPPCState *env, target_ulong val) tlb_flush(env_cpu(env)); } -void helper_store_hid0_601(CPUPPCState *env, target_ulong val) -{ - target_ulong hid0; - - hid0 = env->spr[SPR_HID0]; - env->spr[SPR_HID0] = (uint32_t)val; - - if ((val ^ hid0) & 0x00000008) { - /* Change current endianness */ - hreg_compute_hflags(env); - qemu_log("%s: set endianness to %c => %08x\n", __func__, - val & 0x8 ? 'l' : 'b', env->hflags); - } -} - -void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) -{ - if (likely(env->pb[num] != value)) { - env->pb[num] = value; - /* Should be optimized */ - tlb_flush(env_cpu(env)); - } -} - void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) { /* Bits 26 & 27 affect single-stepping. */ @@ -248,31 +220,6 @@ void helper_store_40x_sler(CPUPPCState *env, target_ulong val) store_40x_sler(env, val); } #endif -/*****************************************************************************/ -/* PowerPC 601 specific instructions (POWER bridge) */ - -target_ulong helper_clcs(CPUPPCState *env, uint32_t arg) -{ - switch (arg) { - case 0x0CUL: - /* Instruction cache line size */ - return env->icache_line_size; - case 0x0DUL: - /* Data cache line size */ - return env->dcache_line_size; - case 0x0EUL: - /* Minimum cache line size */ - return (env->icache_line_size < env->dcache_line_size) ? - env->icache_line_size : env->dcache_line_size; - case 0x0FUL: - /* Maximum cache line size */ - return (env->icache_line_size > env->dcache_line_size) ? - env->icache_line_size : env->dcache_line_size; - default: - /* Undefined */ - return 0; - } -} /*****************************************************************************/ /* Special registers manipulation */ diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c index f4985bae78..c8f69b3df9 100644 --- a/target/ppc/mmu-book3s-v3.c +++ b/target/ppc/mmu-book3s-v3.c @@ -28,6 +28,11 @@ bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB; uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS; + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + /* Calculate number of entries */ pats = 1ull << (pats + 12 - 4); if (pats <= lpid) { diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index d6d5ed8f8e..674377a19e 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -50,6 +50,21 @@ struct prtb_entry { #ifdef TARGET_PPC64 +/* + * tlbie[l] helper flags + * + * RIC, PRS, R and local are passed as flags in the last argument. + */ +#define TLBIE_F_RIC_SHIFT 0 +#define TLBIE_F_PRS_SHIFT 2 +#define TLBIE_F_R_SHIFT 3 +#define TLBIE_F_LOCAL_SHIFT 4 + +#define TLBIE_F_RIC_MASK (3 << TLBIE_F_RIC_SHIFT) +#define TLBIE_F_PRS (1 << TLBIE_F_PRS_SHIFT) +#define TLBIE_F_R (1 << TLBIE_F_R_SHIFT) +#define TLBIE_F_LOCAL (1 << TLBIE_F_LOCAL_SHIFT) + static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) { return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT); diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c index 3957aab2dc..cc091c3e62 100644 --- a/target/ppc/mmu-hash32.c +++ b/target/ppc/mmu-hash32.c @@ -125,30 +125,6 @@ static int hash32_bat_prot(PowerPCCPU *cpu, return prot; } -static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, - target_ulong batu, target_ulong batl) -{ - if (!(batl & BATL32_601_V)) { - return 0; - } - - return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); -} - -static int hash32_bat_601_prot(int mmu_idx, - target_ulong batu, target_ulong batl) -{ - int key, pp; - - pp = batu & BATU32_601_PP; - if (mmuidx_pr(mmu_idx) == 0) { - key = !!(batu & BATU32_601_KS); - } else { - key = !!(batu & BATU32_601_KP); - } - return ppc_hash32_pp_prot(key, pp, 0); -} - static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, MMUAccessType access_type, int *prot, int mmu_idx) @@ -172,11 +148,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, target_ulong batl = BATlt[i]; target_ulong mask; - if (unlikely(env->mmu_model == POWERPC_MMU_601)) { - mask = hash32_bat_601_size(cpu, batu, batl); - } else { - mask = hash32_bat_size(mmu_idx, batu, batl); - } + mask = hash32_bat_size(mmu_idx, batu, batl); LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, ifetch ? 'I' : 'D', i, ea, batu, batl); @@ -184,11 +156,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { hwaddr raddr = (batl & mask) | (ea & ~mask); - if (unlikely(env->mmu_model == POWERPC_MMU_601)) { - *prot = hash32_bat_601_prot(mmu_idx, batu, batl); - } else { - *prot = hash32_bat_prot(cpu, batu, batl); - } + *prot = hash32_bat_prot(cpu, batu, batl); return raddr & TARGET_PAGE_MASK; } @@ -231,18 +199,6 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); - if ((sr & 0x1FF00000) >> 20 == 0x07f) { - /* - * Memory-forced I/O controller interface access - * - * If T=1 and BUID=x'07F', the 601 performs a memory access - * to SR[28-31] LA[4-31], bypassing all protection mechanisms. - */ - *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return true; - } - if (access_type == MMU_INST_FETCH) { /* No code fetch is allowed in direct-store areas */ if (guest_visible) { diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h index 3892b693d6..7119a63d97 100644 --- a/target/ppc/mmu-hash32.h +++ b/target/ppc/mmu-hash32.h @@ -34,15 +34,6 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, #define BATL32_WIMG 0x00000078 #define BATL32_PP 0x00000003 -/* PowerPC 601 has slightly different BAT registers */ - -#define BATU32_601_KS 0x00000008 -#define BATU32_601_KP 0x00000004 -#define BATU32_601_PP 0x00000003 - -#define BATL32_601_V 0x00000040 -#define BATL32_601_BL 0x0000003f - /* * Hash page table definitions */ diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 19832c4b46..b9b31fd276 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -101,7 +101,7 @@ void dump_slb(PowerPCCPU *cpu) } #ifdef CONFIG_TCG -void helper_slbia(CPUPPCState *env, uint32_t ih) +void helper_SLBIA(CPUPPCState *env, uint32_t ih) { PowerPCCPU *cpu = env_archcpu(env); int starting_entry; @@ -173,6 +173,33 @@ void helper_slbia(CPUPPCState *env, uint32_t ih) } } +#if defined(TARGET_PPC64) +void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) +{ + PowerPCCPU *cpu = env_archcpu(env); + int n; + + /* + * slbiag must always flush all TLB (which is equivalent to ERAT in ppc + * architecture). Matching on SLB_ESID_V is not good enough, because slbmte + * can overwrite a valid SLB without flushing its lookaside information. + * + * It would be possible to keep the TLB in synch with the SLB by flushing + * when a valid entry is overwritten by slbmte, and therefore slbiag would + * not have to flush unless it evicts a valid SLB entry. However it is + * expected that slbmte is more common than slbiag, and slbiag is usually + * going to evict valid SLB entries, so that tradeoff is unlikely to be a + * good one. + */ + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + + for (n = 0; n < cpu->hash64_opts->slb_size; n++) { + ppc_slb_t *slb = &env->slb[n]; + slb->esid &= ~SLB_ESID_V; + } +} +#endif + static void __helper_slbie(CPUPPCState *env, target_ulong addr, target_ulong global) { @@ -197,12 +224,12 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr, } } -void helper_slbie(CPUPPCState *env, target_ulong addr) +void helper_SLBIE(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, false); } -void helper_slbieg(CPUPPCState *env, target_ulong addr) +void helper_SLBIEG(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, true); } @@ -309,7 +336,7 @@ static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, return 0; } -void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) { PowerPCCPU *cpu = env_archcpu(env); @@ -319,7 +346,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) } } -target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; @@ -331,7 +358,7 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) return rt; } -target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; @@ -343,7 +370,7 @@ target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) return rt; } -target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; @@ -786,7 +813,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) { - hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16; + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = @@ -803,7 +830,7 @@ static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) { - hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15; + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h index c5b2f97ff7..1496955d38 100644 --- a/target/ppc/mmu-hash64.h +++ b/target/ppc/mmu-hash64.h @@ -97,6 +97,11 @@ void ppc_hash64_finalize(PowerPCCPU *cpu); #define HPTE64_V_1TB_SEG 0x4000000000000000ULL #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL +/* PTE offsets */ +#define HPTE64_DW1 (HASH_PTE_SIZE_64 / 2) +#define HPTE64_DW1_R (HPTE64_DW1 + 6) +#define HPTE64_DW1_C (HPTE64_DW1 + 7) + /* Format changes for ARCH v3 */ #define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL #define HPTE64_R_3_0_SSIZE_SHIFT 58 diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 5b0e62e676..031efda0df 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -32,7 +32,12 @@ static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env, vaddr eaddr, uint64_t *lpid, uint64_t *pid) { - if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */ + /* When EA(2:11) are nonzero, raise a segment interrupt */ + if (eaddr & ~R_EADDR_VALID_MASK) { + return false; + } + + if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */ switch (eaddr & R_EADDR_QUADRANT) { case R_EADDR_QUADRANT0: *lpid = 0; @@ -97,12 +102,22 @@ static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type, env->error_code = 0; } +static inline const char *access_str(MMUAccessType access_type) +{ + return access_type == MMU_DATA_LOAD ? "reading" : + (access_type == MMU_DATA_STORE ? "writing" : "execute"); +} + static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type, vaddr eaddr, uint32_t cause) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; + qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n", + __func__, access_str(access_type), + eaddr, cause); + switch (access_type) { case MMU_INST_FETCH: /* Instruction Storage Interrupt */ @@ -130,6 +145,11 @@ static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; + qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%" + HWADDR_PRIx" cause %08x\n", + __func__, access_str(access_type), + eaddr, g_raddr, cause); + switch (access_type) { case MMU_INST_FETCH: /* H Instruction Storage Interrupt */ @@ -171,12 +191,13 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, } /* Determine permissions allowed by Encoded Access Authority */ - if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) { + if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && + FIELD_EX64(env->msr, MSR, PR)) { *prot = 0; } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || partition_scoped) { *prot = ppc_radix64_get_prot_eaa(pte); - } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ + } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ *prot = ppc_radix64_get_prot_eaa(pte); *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ } @@ -184,7 +205,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, /* Check if requested access type is allowed */ need_prot = prot_for_access_type(access_type); if (need_prot & ~*prot) { /* Page Protected for that Access */ - *fault_cause |= DSISR_PROTFAULT; + *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD : + DSISR_PROTFAULT; return true; } @@ -214,16 +236,47 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type, } } +static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls) +{ + bool ret; + + /* + * Check if this is a valid level, according to POWER9 and POWER10 + * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively: + * Supported Radix Tree Configurations and Resulting Page Sizes. + * + * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future + * CPUs that supports a different Radix MMU configuration will need their + * own implementation. + */ + switch (level) { + case 0: /* Root Page Dir */ + ret = psize == 52 && nls == 13; + break; + case 1: + case 2: + ret = nls == 9; + break; + case 3: + ret = nls == 9 || nls == 5; + break; + default: + ret = false; + } + + if (unlikely(!ret)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: " + "level %d size %d nls %"PRIu64"\n", + level, psize, nls); + } + return ret; +} + static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, uint64_t *pte_addr, uint64_t *nls, int *psize, uint64_t *pte, int *fault_cause) { - uint64_t index, pde; - - if (*nls < 5) { /* Directory maps less than 2**5 entries */ - *fault_cause |= DSISR_R_BADCONFIG; - return 1; - } + uint64_t index, mask, nlb, pde; /* Read page entry from guest address space */ pde = ldq_phys(as, *pte_addr); @@ -238,7 +291,17 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, *nls = pde & R_PDE_NLS; index = eaddr >> (*psize - *nls); /* Shift */ index &= ((1UL << *nls) - 1); /* Mask */ - *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde)); + nlb = pde & R_PDE_NLB; + mask = MAKE_64BIT_MASK(0, *nls + 3); + + if (nlb & mask) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx + " page dir size: 0x"TARGET_FMT_lx"\n", + __func__, nlb, mask + 1); + nlb &= ~mask; + } + *pte_addr = nlb + index * sizeof(pde); } return 0; } @@ -248,19 +311,30 @@ static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, hwaddr *raddr, int *psize, uint64_t *pte, int *fault_cause, hwaddr *pte_addr) { - uint64_t index, pde, rpn , mask; - - if (nls < 5) { /* Directory maps less than 2**5 entries */ - *fault_cause |= DSISR_R_BADCONFIG; - return 1; - } + uint64_t index, pde, rpn, mask; + int level = 0; index = eaddr >> (*psize - nls); /* Shift */ - index &= ((1UL << nls) - 1); /* Mask */ - *pte_addr = base_addr + (index * sizeof(pde)); + index &= ((1UL << nls) - 1); /* Mask */ + mask = MAKE_64BIT_MASK(0, nls + 3); + + if (base_addr & mask) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: misaligned page dir base: 0x"TARGET_FMT_lx + " page dir size: 0x"TARGET_FMT_lx"\n", + __func__, base_addr, mask + 1); + base_addr &= ~mask; + } + *pte_addr = base_addr + index * sizeof(pde); + do { int ret; + if (!ppc_radix64_is_valid_level(level++, *psize, nls)) { + *fault_cause |= DSISR_R_BADCONFIG; + return 1; + } + ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, fault_cause); if (ret) { @@ -284,7 +358,7 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) if (!(pate->dw0 & PATE0_HR)) { return false; } - if (lpid == 0 && !msr_hv) { + if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) { return false; } if ((pate->dw0 & PATE1_R_PRTS) < 5) { @@ -306,6 +380,11 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, hwaddr pte_addr; uint64_t pte; + qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx + " mmu_idx %u 0x%"HWADDR_PRIx"\n", + __func__, access_str(access_type), + eaddr, mmu_idx, g_raddr); + *h_page_size = PRTBE_R_GET_RTS(pate.dw0); /* No valid pte or access denied due to protection */ if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, @@ -329,6 +408,24 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, return 0; } +/* + * The spapr vhc has a flat partition scope provided by qemu memory when + * not nested. + * + * When running a nested guest, the addressing is 2-level radix on top of the + * vhc memory, so it works practically identically to the bare metal 2-level + * radix. So that code is selected directly. A cleaner and more flexible nested + * hypervisor implementation would allow the vhc to provide a ->nested_xlate() + * function but that is not required for the moment. + */ +static bool vhyp_flat_addressing(PowerPCCPU *cpu) +{ + if (cpu->vhyp) { + return !vhyp_cpu_in_nested(cpu); + } + return false; +} + static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, MMUAccessType access_type, vaddr eaddr, uint64_t pid, @@ -338,14 +435,28 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte; + uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte; int fault_cause = 0, h_page_size, h_prot; hwaddr h_raddr, pte_addr; int ret; + qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx + " mmu_idx %u pid %"PRIu64"\n", + __func__, access_str(access_type), + eaddr, mmu_idx, pid); + + prtb = (pate.dw1 & PATE1_R_PRTB); + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); + if (prtb & (size - 1)) { + /* Process Table not properly aligned */ + if (guest_visible) { + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); + } + return 1; + } + /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ if (guest_visible) { @@ -353,9 +464,9 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, } return 1; } - prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset; + prtbe_addr = prtb + offset; - if (cpu->vhyp) { + if (vhyp_flat_addressing(cpu)) { prtbe0 = ldq_phys(cs->as, prtbe_addr); } else { /* @@ -381,7 +492,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, *g_page_size = PRTBE_R_GET_RTS(prtbe0); base_addr = prtbe0 & PRTBE_R_RPDB; nls = prtbe0 & PRTBE_R_RPDS; - if (msr_hv || cpu->vhyp) { + if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) { /* * Can treat process table addresses as real addresses */ @@ -397,6 +508,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, } } else { uint64_t rpn, mask; + int level = 0; index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ index &= ((1UL << nls) - 1); /* Mask */ @@ -416,8 +528,15 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, return ret; } - ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr, - &nls, g_page_size, &pte, &fault_cause); + if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) { + fault_cause |= DSISR_R_BADCONFIG; + ret = 1; + } else { + ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, + &h_raddr, &nls, g_page_size, + &pte, &fault_cause); + } + if (ret) { /* No valid pte */ if (guest_visible) { @@ -468,9 +587,10 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, * | = On | Process Scoped | Scoped | * +-------------+----------------+---------------+ */ -bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, - hwaddr *raddr, int *psizep, int *protp, int mmu_idx, - bool guest_visible) +static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, hwaddr *raddr, + int *psizep, int *protp, int mmu_idx, + bool guest_visible) { CPUPPCState *env = &cpu->env; uint64_t lpid, pid; @@ -484,7 +604,7 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, relocation = !mmuidx_real(mmu_idx); /* HV or virtual hypervisor Real Mode Access */ - if (!relocation && (mmuidx_hv(mmu_idx) || cpu->vhyp)) { + if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) { /* In real mode top 4 effective addr bits (mostly) ignored */ *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; @@ -517,21 +637,29 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, return false; } - /* Get Process Table */ + /* Get Partition Table */ if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc; vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->get_pate(cpu->vhyp, &pate); + if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) { + if (guest_visible) { + ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, + DSISR_R_BADCONFIG); + } + return false; + } } else { if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { if (guest_visible) { - ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); + ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, + DSISR_R_BADCONFIG); } return false; } if (!validate_pate(cpu, lpid, &pate)) { if (guest_visible) { - ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); + ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, + DSISR_R_BADCONFIG); } return false; } @@ -561,7 +689,7 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, g_raddr = eaddr & R_EADDR_MASK; } - if (cpu->vhyp) { + if (vhyp_flat_addressing(cpu)) { *raddr = g_raddr; } else { /* @@ -588,3 +716,22 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, return true; } + +bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, + bool guest_visible) +{ + bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + + qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx + " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n", + __func__, access_str(access_type), + eaddr, mmu_idx, + *protp & PAGE_READ ? 'r' : '-', + *protp & PAGE_WRITE ? 'w' : '-', + *protp & PAGE_EXEC ? 'x' : '-', + *raddrp); + + return ret; +} diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h index b70357cf34..4c768aa5cc 100644 --- a/target/ppc/mmu-radix64.h +++ b/target/ppc/mmu-radix64.h @@ -5,6 +5,7 @@ /* Radix Quadrants */ #define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF +#define R_EADDR_VALID_MASK 0xC00FFFFFFFFFFFFF #define R_EADDR_QUADRANT 0xC000000000000000 #define R_EADDR_QUADRANT0 0x0000000000000000 #define R_EADDR_QUADRANT1 0x4000000000000000 diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c new file mode 100644 index 0000000000..89107a6af2 --- /dev/null +++ b/target/ppc/mmu_common.c @@ -0,0 +1,1553 @@ +/* + * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "mmu-hash64.h" +#include "mmu-hash32.h" +#include "exec/exec-all.h" +#include "exec/log.h" +#include "helper_regs.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/qemu-print.h" +#include "internal.h" +#include "mmu-book3s-v3.h" +#include "mmu-radix64.h" + +/* #define DUMP_PAGE_TABLES */ + +void ppc_store_sdr1(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); + assert(!cpu->env.has_hv_mode || !cpu->vhyp); +#if defined(TARGET_PPC64) + if (mmu_is_64bit(env->mmu_model)) { + target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; + target_ulong htabsize = value & SDR_64_HTABSIZE; + + if (value & ~sdr_mask) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx + " set in SDR1", value & ~sdr_mask); + value &= sdr_mask; + } + if (htabsize > 28) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx + " stored in SDR1", htabsize); + return; + } + } +#endif /* defined(TARGET_PPC64) */ + /* FIXME: Should check for valid HTABMASK values in 32-bit case */ + env->spr[SPR_SDR1] = value; +} + +/*****************************************************************************/ +/* PowerPC MMU emulation */ + +static int pp_check(int key, int pp, int nx) +{ + int access; + + /* Compute access rights */ + access = 0; + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + access |= PAGE_WRITE; + /* fall through */ + case 0x3: + access |= PAGE_READ; + break; + } + } else { + switch (pp) { + case 0x0: + access = 0; + break; + case 0x1: + case 0x3: + access = PAGE_READ; + break; + case 0x2: + access = PAGE_READ | PAGE_WRITE; + break; + } + } + if (nx == 0) { + access |= PAGE_EXEC; + } + + return access; +} + +static int check_prot(int prot, MMUAccessType access_type) +{ + return prot & prot_for_access_type(access_type) ? 0 : -2; +} + +int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, + int way, int is_code) +{ + int nr; + + /* Select TLB num in a way from address */ + nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); + /* Select TLB way */ + nr += env->tlb_per_way * way; + /* 6xx have separate TLBs for instructions and data */ + if (is_code && env->id_tlbs == 1) { + nr += env->nb_tlb; + } + + return nr; +} + +static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, + MMUAccessType access_type) +{ + target_ulong ptem, mmask; + int access, ret, pteh, ptev, pp; + + ret = -1; + /* Check validity and table match */ + ptev = pte_is_valid(pte0); + pteh = (pte0 >> 6) & 1; + if (ptev && h == pteh) { + /* Check vsid & api */ + ptem = pte0 & PTE_PTEM_MASK; + mmask = PTE_CHECK_MASK; + pp = pte1 & 0x00000003; + if (ptem == ctx->ptem) { + if (ctx->raddr != (hwaddr)-1ULL) { + /* all matches should have equal RPN, WIMG & PP */ + if ((ctx->raddr & mmask) != (pte1 & mmask)) { + qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); + return -3; + } + } + /* Compute access rights */ + access = pp_check(ctx->key, pp, ctx->nx); + /* Keep the matching PTE information */ + ctx->raddr = pte1; + ctx->prot = access; + ret = check_prot(ctx->prot, access_type); + if (ret == 0) { + /* Access granted */ + qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); + } else { + /* Access right violation */ + qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); + } + } + } + + return ret; +} + +static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, + int ret, MMUAccessType access_type) +{ + int store = 0; + + /* Update page flags */ + if (!(*pte1p & 0x00000100)) { + /* Update accessed flag */ + *pte1p |= 0x00000100; + store = 1; + } + if (!(*pte1p & 0x00000080)) { + if (access_type == MMU_DATA_STORE && ret == 0) { + /* Update changed flag */ + *pte1p |= 0x00000080; + store = 1; + } else { + /* Force page fault for first write access */ + ctx->prot &= ~PAGE_WRITE; + } + } + + return store; +} + +/* Software driven TLB helpers */ + +static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, MMUAccessType access_type) +{ + ppc6xx_tlb_t *tlb; + int nr, best, way; + int ret; + + best = -1; + ret = -1; /* No TLB found */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); + tlb = &env->tlb.tlb6[nr]; + /* This test "emulates" the PTE index match for hardware TLBs */ + if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { + qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx + " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", + nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); + continue; + } + qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " + TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", + nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, eaddr, tlb->pte1, + access_type == MMU_DATA_STORE ? 'S' : 'L', + access_type == MMU_INST_FETCH ? 'I' : 'D'); + switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, + 0, access_type)) { + case -3: + /* TLB inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + best = nr; + break; + case -1: + default: + /* No match */ + break; + case 0: + /* access granted */ + /* + * XXX: we should go on looping to check all TLBs + * consistency but we can speed-up the whole thing as + * the result would be undefined if TLBs are not + * consistent. + */ + ret = 0; + best = nr; + goto done; + } + } + if (best != -1) { + done: + qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " TARGET_FMT_plx + " prot=%01x ret=%d\n", + ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); + /* Update page flags */ + pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); + } + + return ret; +} + +/* Perform BAT hit & translation */ +static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, target_ulong *BATu, + target_ulong *BATl) +{ + target_ulong bl; + int pp, valid, prot; + + bl = (*BATu & 0x00001FFC) << 15; + valid = 0; + prot = 0; + if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || + (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { + valid = 1; + pp = *BATl & 0x00000003; + if (pp != 0) { + prot = PAGE_READ | PAGE_EXEC; + if (pp == 0x2) { + prot |= PAGE_WRITE; + } + } + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong virtual, MMUAccessType access_type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i, valid, prot; + int ret = -1; + bool ifetch = access_type == MMU_INST_FETCH; + + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + ifetch ? 'I' : 'D', virtual); + if (ifetch) { + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + } else { + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + } + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " + TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, + ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); + if ((virtual & 0xF0000000) == BEPIu && + ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { + /* BAT matches */ + if (valid != 0) { + /* Get physical address */ + ctx->raddr = (*BATl & 0xF0000000) | + ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | + (virtual & 0x0001F000); + /* Compute access rights */ + ctx->prot = prot; + ret = check_prot(ctx->prot, access_type); + if (ret == 0) { + qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " TARGET_FMT_plx + " prot=%c%c\n", i, ctx->raddr, + ctx->prot & PAGE_READ ? 'R' : '-', + ctx->prot & PAGE_WRITE ? 'W' : '-'); + } + break; + } + } + } + if (ret < 0) { + if (qemu_log_enabled()) { + qemu_log_mask(CPU_LOG_MMU, "no BAT match for " + TARGET_FMT_lx ":\n", virtual); + for (i = 0; i < 4; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " + TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + __func__, ifetch ? 'I' : 'D', i, virtual, + *BATu, *BATl, BEPIu, BEPIl, bl); + } + } + } + /* No hit */ + return ret; +} + +/* Perform segment based translation */ +static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, MMUAccessType access_type, + int type) +{ + PowerPCCPU *cpu = env_archcpu(env); + hwaddr hash; + target_ulong vsid; + int ds, target_page_bits; + bool pr; + int ret; + target_ulong sr, pgidx; + + pr = FIELD_EX64(env->msr, MSR, PR); + ctx->eaddr = eaddr; + + sr = env->sr[eaddr >> 28]; + ctx->key = (((sr & 0x20000000) && pr) || + ((sr & 0x40000000) && !pr)) ? 1 : 0; + ds = sr & 0x80000000 ? 1 : 0; + ctx->nx = sr & 0x10000000 ? 1 : 0; + vsid = sr & 0x00FFFFFF; + target_page_bits = TARGET_PAGE_BITS; + qemu_log_mask(CPU_LOG_MMU, + "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx + " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx + " ir=%d dr=%d pr=%d %d t=%d\n", + eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, + (int)FIELD_EX64(env->msr, MSR, IR), + (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, + access_type == MMU_DATA_STORE, type); + pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; + hash = vsid ^ pgidx; + ctx->ptem = (vsid << 7) | (pgidx >> 10); + + qemu_log_mask(CPU_LOG_MMU, + "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", + ctx->key, ds, ctx->nx, vsid); + ret = -1; + if (!ds) { + /* Check if instruction fetch is allowed, if needed */ + if (type != ACCESS_CODE || ctx->nx == 0) { + /* Page address translation */ + qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx + " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); + ctx->hash[0] = hash; + ctx->hash[1] = ~hash; + + /* Initialize real address with an invalid value */ + ctx->raddr = (hwaddr)-1ULL; + /* Software TLB search */ + ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); +#if defined(DUMP_PAGE_TABLES) + if (qemu_loglevel_mask(CPU_LOG_MMU)) { + CPUState *cs = env_cpu(env); + hwaddr curaddr; + uint32_t a0, a1, a2, a3; + + qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx + "\n", ppc_hash32_hpt_base(cpu), + ppc_hash32_hpt_mask(cpu) + 0x80); + for (curaddr = ppc_hash32_hpt_base(cpu); + curaddr < (ppc_hash32_hpt_base(cpu) + + ppc_hash32_hpt_mask(cpu) + 0x80); + curaddr += 16) { + a0 = ldl_phys(cs->as, curaddr); + a1 = ldl_phys(cs->as, curaddr + 4); + a2 = ldl_phys(cs->as, curaddr + 8); + a3 = ldl_phys(cs->as, curaddr + 12); + if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { + qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", + curaddr, a0, a1, a2, a3); + } + } + } +#endif + } else { + qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); + ret = -3; + } + } else { + qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); + /* Direct-store segment : absolutely *BUGGY* for now */ + + switch (type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_CODE: + /* No code fetch is allowed in direct-store areas */ + return -4; + case ACCESS_FLOAT: + /* Floating point load/store */ + return -4; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + return -4; + case ACCESS_CACHE: + /* + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi + * + * Should make the instruction do no-op. As it already do + * no-op, it's quite easy :-) + */ + ctx->raddr = eaddr; + return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + return -4; + default: + qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " + "address translation\n"); + return -4; + } + if ((access_type == MMU_DATA_STORE || ctx->key != 1) && + (access_type == MMU_DATA_LOAD || ctx->key != 0)) { + ctx->raddr = eaddr; + ret = 2; + } else { + ret = -2; + } + } + + return ret; +} + +/* Generic TLB check function for embedded PowerPC implementations */ +int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddrp, + target_ulong address, uint32_t pid, int ext, + int i) +{ + target_ulong mask; + + /* Check valid flag */ + if (!(tlb->prot & PAGE_VALID)) { + return -1; + } + mask = ~(tlb->size - 1); + qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx + " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", + __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); + /* Check PID */ + if (tlb->PID != 0 && tlb->PID != pid) { + return -1; + } + /* Check effective address */ + if ((address & mask) != tlb->EPN) { + return -1; + } + *raddrp = (tlb->RPN & mask) | (address & ~mask); + if (ext) { + /* Extend the physical address to 36 bits */ + *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; + } + + return 0; +} + +static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, + MMUAccessType access_type) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i, ret, zsel, zpr, pr; + + ret = -1; + raddr = (hwaddr)-1ULL; + pr = FIELD_EX64(env->msr, MSR, PR); + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, + env->spr[SPR_40x_PID], 0, i) < 0) { + continue; + } + zsel = (tlb->attr >> 4) & 0xF; + zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; + qemu_log_mask(CPU_LOG_MMU, + "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", + __func__, i, zsel, zpr, access_type, tlb->attr); + /* Check execute enable bit */ + switch (zpr) { + case 0x2: + if (pr != 0) { + goto check_perms; + } + /* fall through */ + case 0x3: + /* All accesses granted */ + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = 0; + break; + case 0x0: + if (pr != 0) { + /* Raise Zone protection fault. */ + env->spr[SPR_40x_ESR] = 1 << 22; + ctx->prot = 0; + ret = -2; + break; + } + /* fall through */ + case 0x1: + check_perms: + /* Check from TLB entry */ + ctx->prot = tlb->prot; + ret = check_prot(ctx->prot, access_type); + if (ret == -2) { + env->spr[SPR_40x_ESR] = 0; + } + break; + } + if (ret >= 0) { + ctx->raddr = raddr; + qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx + " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + return 0; + } + } + qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx + " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + + return ret; +} + +static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddr, int *prot, target_ulong address, + MMUAccessType access_type, int i) +{ + int prot2; + + if (ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID], + !env->nb_pids, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { + goto found_tlb; + } + + qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (FIELD_EX64(env->msr, MSR, PR)) { + prot2 = tlb->prot & 0xF; + } else { + prot2 = (tlb->prot >> 4) & 0xF; + } + + /* Check the address space */ + if ((access_type == MMU_INST_FETCH ? + FIELD_EX64(env->msr, MSR, IR) : + FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { + qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & prot_for_access_type(access_type)) { + qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); + return 0; + } + + qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); + return access_type == MMU_INST_FETCH ? -3 : -2; +} + +static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, + MMUAccessType access_type) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i, ret; + + ret = -1; + raddr = (hwaddr)-1ULL; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, + access_type, i); + if (ret != -1) { + break; + } + } + + if (ret >= 0) { + ctx->raddr = raddr; + qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx + " => " TARGET_FMT_plx " %d %d\n", __func__, + address, ctx->raddr, ctx->prot, ret); + } else { + qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx + " => " TARGET_FMT_plx " %d %d\n", __func__, + address, raddr, ctx->prot, ret); + } + + return ret; +} + +hwaddr booke206_tlb_to_page_size(CPUPPCState *env, + ppcmas_tlb_t *tlb) +{ + int tlbm_size; + + tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + + return 1024ULL << tlbm_size; +} + +/* TLB check function for MAS based SoftTLBs */ +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddrp, target_ulong address, + uint32_t pid) +{ + hwaddr mask; + uint32_t tlb_pid; + + if (!FIELD_EX64(env->msr, MSR, CM)) { + /* In 32bit mode we can only address 32bit EAs */ + address = (uint32_t)address; + } + + /* Check valid flag */ + if (!(tlb->mas1 & MAS1_VALID)) { + return -1; + } + + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx + " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" + HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", + __func__, address, pid, tlb->mas1, tlb->mas2, mask, + tlb->mas7_3, tlb->mas8); + + /* Check PID */ + tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlb_pid != 0 && tlb_pid != pid) { + return -1; + } + + /* Check effective address */ + if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { + return -1; + } + + if (raddrp) { + *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); + } + + return 0; +} + +static bool is_epid_mmu(int mmu_idx) +{ + return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; +} + +static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) +{ + uint32_t esr = 0; + if (access_type == MMU_DATA_STORE) { + esr |= ESR_ST; + } + if (is_epid_mmu(mmu_idx)) { + esr |= ESR_EPID; + } + return esr; +} + +/* + * Get EPID register given the mmu_idx. If this is regular load, + * construct the EPID access bits from current processor state + * + * Get the effective AS and PR bits and the PID. The PID is returned + * only if EPID load is requested, otherwise the caller must detect + * the correct EPID. Return true if valid EPID is returned. + */ +static bool mmubooke206_get_as(CPUPPCState *env, + int mmu_idx, uint32_t *epid_out, + bool *as_out, bool *pr_out) +{ + if (is_epid_mmu(mmu_idx)) { + uint32_t epidr; + if (mmu_idx == PPC_TLB_EPID_STORE) { + epidr = env->spr[SPR_BOOKE_EPSC]; + } else { + epidr = env->spr[SPR_BOOKE_EPLC]; + } + *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; + *as_out = !!(epidr & EPID_EAS); + *pr_out = !!(epidr & EPID_EPR); + return true; + } else { + *as_out = FIELD_EX64(env->msr, MSR, DS); + *pr_out = FIELD_EX64(env->msr, MSR, PR); + return false; + } +} + +/* Check if the tlb found by hashing really matches */ +static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddr, int *prot, + target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int prot2 = 0; + uint32_t epid; + bool as, pr; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + + if (!use_epid) { + if (ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2]) >= 0) { + goto found_tlb; + } + } else { + if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { + goto found_tlb; + } + } + + qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (pr) { + if (tlb->mas7_3 & MAS3_UR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_UW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_UX) { + prot2 |= PAGE_EXEC; + } + } else { + if (tlb->mas7_3 & MAS3_SR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_SW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_SX) { + prot2 |= PAGE_EXEC; + } + } + + /* Check the address space and permissions */ + if (access_type == MMU_INST_FETCH) { + /* There is no way to fetch code using epid load */ + assert(!use_epid); + as = FIELD_EX64(env->msr, MSR, IR); + } + + if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & prot_for_access_type(access_type)) { + qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); + return 0; + } + + qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); + return access_type == MMU_INST_FETCH ? -3 : -2; +} + +static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, + MMUAccessType access_type, + int mmu_idx) +{ + ppcmas_tlb_t *tlb; + hwaddr raddr; + int i, j, ret; + + ret = -1; + raddr = (hwaddr)-1ULL; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, + access_type, mmu_idx); + if (ret != -1) { + goto found_tlb; + } + } + } + +found_tlb: + + if (ret >= 0) { + ctx->raddr = raddr; + qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx + " => " TARGET_FMT_plx " %d %d\n", __func__, address, + ctx->raddr, ctx->prot, ret); + } else { + qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx + " => " TARGET_FMT_plx " %d %d\n", __func__, address, + raddr, ctx->prot, ret); + } + + return ret; +} + +static const char *book3e_tsize_to_str[32] = { + "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", + "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", + "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", + "1T", "2T" +}; + +static void mmubooke_dump_mmu(CPUPPCState *env) +{ + ppcemb_tlb_t *entry; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + qemu_printf("Cannot access KVM TLB\n"); + return; + } + + qemu_printf("\nTLB:\n"); + qemu_printf("Effective Physical Size PID Prot " + "Attr\n"); + + entry = &env->tlb.tlbe[0]; + for (i = 0; i < env->nb_tlb; i++, entry++) { + hwaddr ea, pa; + target_ulong mask; + uint64_t size = (uint64_t)entry->size; + char size_buf[20]; + + /* Check valid flag */ + if (!(entry->prot & PAGE_VALID)) { + continue; + } + + mask = ~(entry->size - 1); + ea = entry->EPN & mask; + pa = entry->RPN & mask; + /* Extend the physical address to 36 bits */ + pa |= (hwaddr)(entry->RPN & 0xF) << 32; + if (size >= 1 * MiB) { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); + } else { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); + } + qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", + (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, + entry->prot, entry->attr); + } + +} + +static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, + int tlbsize) +{ + ppcmas_tlb_t *entry; + int i; + + qemu_printf("\nTLB%d:\n", tlbn); + qemu_printf("Effective Physical Size TID TS SRWX" + " URWX WIMGE U0123\n"); + + entry = &env->tlb.tlbm[offset]; + for (i = 0; i < tlbsize; i++, entry++) { + hwaddr ea, pa, size; + int tsize; + + if (!(entry->mas1 & MAS1_VALID)) { + continue; + } + + tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size = 1024ULL << tsize; + ea = entry->mas2 & ~(size - 1); + pa = entry->mas7_3 & ~(size - 1); + + qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" + "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", + (uint64_t)ea, (uint64_t)pa, + book3e_tsize_to_str[tsize], + (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, + (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, + entry->mas7_3 & MAS3_SR ? 'R' : '-', + entry->mas7_3 & MAS3_SW ? 'W' : '-', + entry->mas7_3 & MAS3_SX ? 'X' : '-', + entry->mas7_3 & MAS3_UR ? 'R' : '-', + entry->mas7_3 & MAS3_UW ? 'W' : '-', + entry->mas7_3 & MAS3_UX ? 'X' : '-', + entry->mas2 & MAS2_W ? 'W' : '-', + entry->mas2 & MAS2_I ? 'I' : '-', + entry->mas2 & MAS2_M ? 'M' : '-', + entry->mas2 & MAS2_G ? 'G' : '-', + entry->mas2 & MAS2_E ? 'E' : '-', + entry->mas7_3 & MAS3_U0 ? '0' : '-', + entry->mas7_3 & MAS3_U1 ? '1' : '-', + entry->mas7_3 & MAS3_U2 ? '2' : '-', + entry->mas7_3 & MAS3_U3 ? '3' : '-'); + } +} + +static void mmubooke206_dump_mmu(CPUPPCState *env) +{ + int offset = 0; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + qemu_printf("Cannot access KVM TLB\n"); + return; + } + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int size = booke206_tlb_size(env, i); + + if (size == 0) { + continue; + } + + mmubooke206_dump_one_tlb(env, i, offset, size); + offset += size; + } +} + +static void mmu6xx_dump_BATs(CPUPPCState *env, int type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i; + + switch (type) { + case ACCESS_CODE: + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + break; + default: + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + break; + } + + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + qemu_printf("%s BAT%d BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + type == ACCESS_CODE ? "code" : "data", i, + *BATu, *BATl, BEPIu, BEPIl, bl); + } +} + +static void mmu6xx_dump_mmu(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc6xx_tlb_t *tlb; + target_ulong sr; + int type, way, entry, i; + + qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); + qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); + + qemu_printf("\nSegment registers:\n"); + for (i = 0; i < 32; i++) { + sr = env->sr[i]; + if (sr & 0x80000000) { + qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " + "CNTLR_SPEC=0x%05x\n", i, + sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, + sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), + (uint32_t)(sr & 0xFFFFF)); + } else { + qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, + sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, + sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, + (uint32_t)(sr & 0x00FFFFFF)); + } + } + + qemu_printf("\nBATs:\n"); + mmu6xx_dump_BATs(env, ACCESS_INT); + mmu6xx_dump_BATs(env, ACCESS_CODE); + + if (env->id_tlbs != 1) { + qemu_printf("ERROR: 6xx MMU should have separated TLB" + " for code and data\n"); + } + + qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); + + for (type = 0; type < 2; type++) { + for (way = 0; way < env->nb_ways; way++) { + for (entry = env->nb_tlb * type + env->tlb_per_way * way; + entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); + entry++) { + + tlb = &env->tlb.tlb6[entry]; + qemu_printf("%s TLB %02d/%02d way:%d %s [" + TARGET_FMT_lx " " TARGET_FMT_lx "]\n", + type ? "code" : "data", entry % env->nb_tlb, + env->nb_tlb, way, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); + } + } + } +} + +void dump_mmu(CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_BOOKE: + mmubooke_dump_mmu(env); + break; + case POWERPC_MMU_BOOKE206: + mmubooke206_dump_mmu(env); + break; + case POWERPC_MMU_SOFT_6xx: + mmu6xx_dump_mmu(env); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_03: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_07: + dump_slb(env_archcpu(env)); + break; + case POWERPC_MMU_3_00: + if (ppc64_v3_radix(env_archcpu(env))) { + qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", + __func__); + } else { + dump_slb(env_archcpu(env)); + } + break; +#endif + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); + } +} + +static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, + MMUAccessType access_type) +{ + ctx->raddr = eaddr; + ctx->prot = PAGE_READ | PAGE_EXEC; + + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_REAL: + case POWERPC_MMU_BOOKE: + ctx->prot |= PAGE_WRITE; + break; + + default: + /* Caller's checks mean we should never get here for other models */ + g_assert_not_reached(); + } + + return 0; +} + +int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, + MMUAccessType access_type, int type, + int mmu_idx) +{ + int ret = -1; + bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) || + (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR)); + + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, access_type); + } else { + /* Try to find a BAT */ + if (env->nb_BATs != 0) { + ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); + } + if (ret < 0) { + /* We didn't match any BAT entry or don't have BATs */ + ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); + } + } + break; + + case POWERPC_MMU_SOFT_4xx: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, access_type); + } else { + ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); + } + break; + case POWERPC_MMU_BOOKE: + ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); + break; + case POWERPC_MMU_BOOKE206: + ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, + mmu_idx); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, access_type); + } else { + cpu_abort(env_cpu(env), + "PowerPC in real mode do not do any translation\n"); + } + return -1; + default: + cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); + return -1; + } + + return ret; +} + +static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + uint32_t epid; + bool as, pr; + uint32_t missed_tid = 0; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + + if (access_type == MMU_INST_FETCH) { + as = FIELD_EX64(env->msr, MSR, IR); + } + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (as) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + if (!use_epid) { + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + missed_tid = env->spr[SPR_BOOKE_PID]; + break; + case MAS4_TIDSELD_PID1: + missed_tid = env->spr[SPR_BOOKE_PID1]; + break; + case MAS4_TIDSELD_PID2: + missed_tid = env->spr[SPR_BOOKE_PID2]; + break; + } + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + } else { + missed_tid = epid; + env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; + } + env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); + + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +/* Perform address translation */ +/* TODO: Split this by mmu_model. */ +static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, + int mmu_idx, bool guest_visible) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + mmu_ctx_t ctx; + int type; + int ret; + + if (access_type == MMU_INST_FETCH) { + /* code access */ + type = ACCESS_CODE; + } else if (guest_visible) { + /* data access */ + type = env->access_type; + } else { + type = ACCESS_INT; + } + + ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, + type, mmu_idx); + if (ret == 0) { + *raddrp = ctx.raddr; + *protp = ctx.prot; + *psizep = TARGET_PAGE_BITS; + return true; + } + + if (guest_visible) { + log_cpu_state_mask(CPU_LOG_MMU, cs, 0); + if (type == ACCESS_CODE) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + cs->exception_index = POWERPC_EXCP_IFTLB; + env->error_code = 1 << 18; + env->spr[SPR_IMISS] = eaddr; + env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; + goto tlb_miss; + case POWERPC_MMU_SOFT_4xx: + cs->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = eaddr; + env->spr[SPR_40x_ESR] = 0x00000000; + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); + /* fall through */ + case POWERPC_MMU_BOOKE: + cs->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = eaddr; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); + break; + case POWERPC_MMU_MPC8xx: + cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); + case POWERPC_MMU_REAL: + cpu_abort(cs, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + default: + cpu_abort(cs, "Unknown or invalid MMU model\n"); + } + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_ISI; + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->error_code = 0; + } else { + env->error_code = 0x08000000; + } + break; + case -3: + /* No execute protection violation */ + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_ESR] = 0x00000000; + env->error_code = 0; + } else { + env->error_code = 0x10000000; + } + cs->exception_index = POWERPC_EXCP_ISI; + break; + case -4: + /* Direct store exception */ + /* No code fetch is allowed in direct-store areas */ + cs->exception_index = POWERPC_EXCP_ISI; + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->error_code = 0; + } else { + env->error_code = 0x10000000; + } + break; + } + } else { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + if (access_type == MMU_DATA_STORE) { + cs->exception_index = POWERPC_EXCP_DSTLB; + env->error_code = 1 << 16; + } else { + cs->exception_index = POWERPC_EXCP_DLTLB; + env->error_code = 0; + } + env->spr[SPR_DMISS] = eaddr; + env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; + tlb_miss: + env->error_code |= ctx.key << 19; + env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[0]); + env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[1]); + break; + case POWERPC_MMU_SOFT_4xx: + cs->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_40x_ESR] = 0x00800000; + } else { + env->spr[SPR_40x_ESR] = 0x00000000; + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); + /* fall through */ + case POWERPC_MMU_BOOKE: + cs->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = eaddr; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); + break; + case POWERPC_MMU_REAL: + cpu_abort(cs, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + default: + cpu_abort(cs, "Unknown or invalid MMU model\n"); + } + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { + env->spr[SPR_40x_DEAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_40x_ESR] |= 0x00800000; + } + } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_DEAR] = eaddr; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); + } else { + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x0A000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + break; + case -4: + /* Direct store exception */ + switch (type) { + case ACCESS_FLOAT: + /* Floating point load/store */ + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = eaddr; + break; + case ACCESS_RES: + /* lwarx, ldarx or stwcx. */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x06000000; + } else { + env->spr[SPR_DSISR] = 0x04000000; + } + break; + case ACCESS_EXT: + /* eciwx or ecowx */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x06100000; + } else { + env->spr[SPR_DSISR] = 0x04100000; + } + break; + default: + printf("DSI: invalid exception (%d)\n", ret); + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; + env->spr[SPR_DAR] = eaddr; + break; + } + break; + } + } + } + return false; +} + +/*****************************************************************************/ + +bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, + int mmu_idx, bool guest_visible) +{ + switch (cpu->env.mmu_model) { +#if defined(TARGET_PPC64) + case POWERPC_MMU_3_00: + if (ppc64_v3_radix(cpu)) { + return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + } + /* fall through */ + case POWERPC_MMU_64B: + case POWERPC_MMU_2_03: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_07: + return ppc_hash64_xlate(cpu, eaddr, access_type, + raddrp, psizep, protp, mmu_idx, guest_visible); +#endif + + case POWERPC_MMU_32B: + return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + + default: + return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + } +} + +hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + hwaddr raddr; + int s, p; + + /* + * Some MMUs have separate TLBs for code and data. If we only + * try an MMU_DATA_LOAD, we may not be able to read instructions + * mapped by code TLBs, so we also try a MMU_INST_FETCH. + */ + if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, + cpu_mmu_index(&cpu->env, false), false) || + ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, + cpu_mmu_index(&cpu->env, true), false)) { + return raddr & TARGET_PAGE_MASK; + } + return -1; +} diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 869d24d301..2a91f3f46a 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -33,193 +33,15 @@ #include "internal.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" - -#ifdef CONFIG_TCG #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" -#endif -/* #define DEBUG_MMU */ -/* #define DEBUG_BATS */ -/* #define DEBUG_SOFTWARE_TLB */ -/* #define DUMP_PAGE_TABLES */ + /* #define FLUSH_ALL_TLBS */ -#ifdef DEBUG_MMU -# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) -#else -# define LOG_MMU_STATE(cpu) do { } while (0) -#endif - -#ifdef DEBUG_SOFTWARE_TLB -# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) -#else -# define LOG_SWTLB(...) do { } while (0) -#endif - -#ifdef DEBUG_BATS -# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) -#else -# define LOG_BATS(...) do { } while (0) -#endif - /*****************************************************************************/ /* PowerPC MMU emulation */ -/* Context used internally during MMU translations */ -typedef struct mmu_ctx_t mmu_ctx_t; -struct mmu_ctx_t { - hwaddr raddr; /* Real address */ - hwaddr eaddr; /* Effective address */ - int prot; /* Protection bits */ - hwaddr hash[2]; /* Pagetable hash values */ - target_ulong ptem; /* Virtual segment ID | API */ - int key; /* Access key */ - int nx; /* Non-execute area */ -}; - -/* Common routines used by software and hardware TLBs emulation */ -static inline int pte_is_valid(target_ulong pte0) -{ - return pte0 & 0x80000000 ? 1 : 0; -} - -static inline void pte_invalidate(target_ulong *pte0) -{ - *pte0 &= ~0x80000000; -} - -#define PTE_PTEM_MASK 0x7FFFFFBF -#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) - -static int pp_check(int key, int pp, int nx) -{ - int access; - - /* Compute access rights */ - access = 0; - if (key == 0) { - switch (pp) { - case 0x0: - case 0x1: - case 0x2: - access |= PAGE_WRITE; - /* fall through */ - case 0x3: - access |= PAGE_READ; - break; - } - } else { - switch (pp) { - case 0x0: - access = 0; - break; - case 0x1: - case 0x3: - access = PAGE_READ; - break; - case 0x2: - access = PAGE_READ | PAGE_WRITE; - break; - } - } - if (nx == 0) { - access |= PAGE_EXEC; - } - - return access; -} - -static int check_prot(int prot, MMUAccessType access_type) -{ - return prot & prot_for_access_type(access_type) ? 0 : -2; -} - -static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, - target_ulong pte1, int h, - MMUAccessType access_type) -{ - target_ulong ptem, mmask; - int access, ret, pteh, ptev, pp; - - ret = -1; - /* Check validity and table match */ - ptev = pte_is_valid(pte0); - pteh = (pte0 >> 6) & 1; - if (ptev && h == pteh) { - /* Check vsid & api */ - ptem = pte0 & PTE_PTEM_MASK; - mmask = PTE_CHECK_MASK; - pp = pte1 & 0x00000003; - if (ptem == ctx->ptem) { - if (ctx->raddr != (hwaddr)-1ULL) { - /* all matches should have equal RPN, WIMG & PP */ - if ((ctx->raddr & mmask) != (pte1 & mmask)) { - qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); - return -3; - } - } - /* Compute access rights */ - access = pp_check(ctx->key, pp, ctx->nx); - /* Keep the matching PTE information */ - ctx->raddr = pte1; - ctx->prot = access; - ret = check_prot(ctx->prot, access_type); - if (ret == 0) { - /* Access granted */ - qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); - } else { - /* Access right violation */ - qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); - } - } - } - - return ret; -} - -static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, - int ret, MMUAccessType access_type) -{ - int store = 0; - - /* Update page flags */ - if (!(*pte1p & 0x00000100)) { - /* Update accessed flag */ - *pte1p |= 0x00000100; - store = 1; - } - if (!(*pte1p & 0x00000080)) { - if (access_type == MMU_DATA_STORE && ret == 0) { - /* Update changed flag */ - *pte1p |= 0x00000080; - store = 1; - } else { - /* Force page fault for first write access */ - ctx->prot &= ~PAGE_WRITE; - } - } - - return store; -} - /* Software driven TLB helpers */ -static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, - int way, int is_code) -{ - int nr; - - /* Select TLB num in a way from address */ - nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); - /* Select TLB way */ - nr += env->tlb_per_way * way; - /* 6xx have separate TLBs for instructions and data */ - if (is_code && env->id_tlbs == 1) { - nr += env->nb_tlb; - } - - return nr; -} - static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) { ppc6xx_tlb_t *tlb; @@ -252,8 +74,8 @@ static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); tlb = &env->tlb.tlb6[nr]; if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { - LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, - env->nb_tlb, eaddr); + qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d " + TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr); pte_invalidate(&tlb->pte0); tlb_flush_page(cs, tlb->EPN); } @@ -270,7 +92,6 @@ static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); } -#ifdef CONFIG_TCG static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, int is_code, target_ulong pte0, target_ulong pte1) { @@ -279,8 +100,9 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); tlb = &env->tlb.tlb6[nr]; - LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); + qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " + TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, + EPN, pte0, pte1); /* Invalidate any pending reference in QEMU for this virtual address */ ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); tlb->pte0 = pte0; @@ -289,348 +111,7 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, /* Store last way for LRU mechanism */ env->last_way = way; } -#endif -static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, MMUAccessType access_type) -{ - ppc6xx_tlb_t *tlb; - int nr, best, way; - int ret; - - best = -1; - ret = -1; /* No TLB found */ - for (way = 0; way < env->nb_ways; way++) { - nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); - tlb = &env->tlb.tlb6[nr]; - /* This test "emulates" the PTE index match for hardware TLBs */ - if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { - LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx - "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, - pte_is_valid(tlb->pte0) ? "valid" : "inval", - tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); - continue; - } - LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " - TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, - pte_is_valid(tlb->pte0) ? "valid" : "inval", - tlb->EPN, eaddr, tlb->pte1, - access_type == MMU_DATA_STORE ? 'S' : 'L', - access_type == MMU_INST_FETCH ? 'I' : 'D'); - switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, - 0, access_type)) { - case -3: - /* TLB inconsistency */ - return -1; - case -2: - /* Access violation */ - ret = -2; - best = nr; - break; - case -1: - default: - /* No match */ - break; - case 0: - /* access granted */ - /* - * XXX: we should go on looping to check all TLBs - * consistency but we can speed-up the whole thing as - * the result would be undefined if TLBs are not - * consistent. - */ - ret = 0; - best = nr; - goto done; - } - } - if (best != -1) { - done: - LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", - ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); - /* Update page flags */ - pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); - } - - return ret; -} - -/* Perform BAT hit & translation */ -static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, - int *validp, int *protp, target_ulong *BATu, - target_ulong *BATl) -{ - target_ulong bl; - int pp, valid, prot; - - bl = (*BATu & 0x00001FFC) << 15; - valid = 0; - prot = 0; - if (((msr_pr == 0) && (*BATu & 0x00000002)) || - ((msr_pr != 0) && (*BATu & 0x00000001))) { - valid = 1; - pp = *BATl & 0x00000003; - if (pp != 0) { - prot = PAGE_READ | PAGE_EXEC; - if (pp == 0x2) { - prot |= PAGE_WRITE; - } - } - } - *blp = bl; - *validp = valid; - *protp = prot; -} - -static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong virtual, MMUAccessType access_type) -{ - target_ulong *BATlt, *BATut, *BATu, *BATl; - target_ulong BEPIl, BEPIu, bl; - int i, valid, prot; - int ret = -1; - bool ifetch = access_type == MMU_INST_FETCH; - - LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, - ifetch ? 'I' : 'D', virtual); - if (ifetch) { - BATlt = env->IBAT[1]; - BATut = env->IBAT[0]; - } else { - BATlt = env->DBAT[1]; - BATut = env->DBAT[0]; - } - for (i = 0; i < env->nb_BATs; i++) { - BATu = &BATut[i]; - BATl = &BATlt[i]; - BEPIu = *BATu & 0xF0000000; - BEPIl = *BATu & 0x0FFE0000; - bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); - LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n", __func__, - ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); - if ((virtual & 0xF0000000) == BEPIu && - ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { - /* BAT matches */ - if (valid != 0) { - /* Get physical address */ - ctx->raddr = (*BATl & 0xF0000000) | - ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | - (virtual & 0x0001F000); - /* Compute access rights */ - ctx->prot = prot; - ret = check_prot(ctx->prot, access_type); - if (ret == 0) { - LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", - i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', - ctx->prot & PAGE_WRITE ? 'W' : '-'); - } - break; - } - } - } - if (ret < 0) { -#if defined(DEBUG_BATS) - if (qemu_log_enabled()) { - LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); - for (i = 0; i < 4; i++) { - BATu = &BATut[i]; - BATl = &BATlt[i]; - BEPIu = *BATu & 0xF0000000; - BEPIl = *BATu & 0x0FFE0000; - bl = (*BATu & 0x00001FFC) << 15; - LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " - TARGET_FMT_lx " " TARGET_FMT_lx "\n", - __func__, ifetch ? 'I' : 'D', i, virtual, - *BATu, *BATl, BEPIu, BEPIl, bl); - } - } -#endif - } - /* No hit */ - return ret; -} - -/* Perform segment based translation */ -static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, MMUAccessType access_type, - int type) -{ - PowerPCCPU *cpu = env_archcpu(env); - hwaddr hash; - target_ulong vsid; - int ds, pr, target_page_bits; - int ret; - target_ulong sr, pgidx; - - pr = msr_pr; - ctx->eaddr = eaddr; - - sr = env->sr[eaddr >> 28]; - ctx->key = (((sr & 0x20000000) && (pr != 0)) || - ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; - ds = sr & 0x80000000 ? 1 : 0; - ctx->nx = sr & 0x10000000 ? 1 : 0; - vsid = sr & 0x00FFFFFF; - target_page_bits = TARGET_PAGE_BITS; - qemu_log_mask(CPU_LOG_MMU, - "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx - " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx - " ir=%d dr=%d pr=%d %d t=%d\n", - eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, - (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); - pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; - hash = vsid ^ pgidx; - ctx->ptem = (vsid << 7) | (pgidx >> 10); - - qemu_log_mask(CPU_LOG_MMU, - "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", - ctx->key, ds, ctx->nx, vsid); - ret = -1; - if (!ds) { - /* Check if instruction fetch is allowed, if needed */ - if (type != ACCESS_CODE || ctx->nx == 0) { - /* Page address translation */ - qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx - " htab_mask " TARGET_FMT_plx - " hash " TARGET_FMT_plx "\n", - ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); - ctx->hash[0] = hash; - ctx->hash[1] = ~hash; - - /* Initialize real address with an invalid value */ - ctx->raddr = (hwaddr)-1ULL; - /* Software TLB search */ - ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); -#if defined(DUMP_PAGE_TABLES) - if (qemu_loglevel_mask(CPU_LOG_MMU)) { - CPUState *cs = env_cpu(env); - hwaddr curaddr; - uint32_t a0, a1, a2, a3; - - qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx - "\n", ppc_hash32_hpt_base(cpu), - ppc_hash32_hpt_mask(cpu) + 0x80); - for (curaddr = ppc_hash32_hpt_base(cpu); - curaddr < (ppc_hash32_hpt_base(cpu) - + ppc_hash32_hpt_mask(cpu) + 0x80); - curaddr += 16) { - a0 = ldl_phys(cs->as, curaddr); - a1 = ldl_phys(cs->as, curaddr + 4); - a2 = ldl_phys(cs->as, curaddr + 8); - a3 = ldl_phys(cs->as, curaddr + 12); - if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { - qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", - curaddr, a0, a1, a2, a3); - } - } - } -#endif - } else { - qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); - ret = -3; - } - } else { - target_ulong sr; - - qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); - /* Direct-store segment : absolutely *BUGGY* for now */ - - /* - * Direct-store implies a 32-bit MMU. - * Check the Segment Register's bus unit ID (BUID). - */ - sr = env->sr[eaddr >> 28]; - if ((sr & 0x1FF00000) >> 20 == 0x07f) { - /* - * Memory-forced I/O controller interface access - * - * If T=1 and BUID=x'07F', the 601 performs a memory - * access to SR[28-31] LA[4-31], bypassing all protection - * mechanisms. - */ - ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); - ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return 0; - } - - switch (type) { - case ACCESS_INT: - /* Integer load/store : only access allowed */ - break; - case ACCESS_CODE: - /* No code fetch is allowed in direct-store areas */ - return -4; - case ACCESS_FLOAT: - /* Floating point load/store */ - return -4; - case ACCESS_RES: - /* lwarx, ldarx or srwcx. */ - return -4; - case ACCESS_CACHE: - /* - * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi - * - * Should make the instruction do no-op. As it already do - * no-op, it's quite easy :-) - */ - ctx->raddr = eaddr; - return 0; - case ACCESS_EXT: - /* eciwx or ecowx */ - return -4; - default: - qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " - "address translation\n"); - return -4; - } - if ((access_type == MMU_DATA_STORE || ctx->key != 1) && - (access_type == MMU_DATA_LOAD || ctx->key != 0)) { - ctx->raddr = eaddr; - ret = 2; - } else { - ret = -2; - } - } - - return ret; -} - -/* Generic TLB check function for embedded PowerPC implementations */ -static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddrp, - target_ulong address, uint32_t pid, int ext, - int i) -{ - target_ulong mask; - - /* Check valid flag */ - if (!(tlb->prot & PAGE_VALID)) { - return -1; - } - mask = ~(tlb->size - 1); - LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx - " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, - mask, (uint32_t)tlb->PID, tlb->prot); - /* Check PID */ - if (tlb->PID != 0 && tlb->PID != pid) { - return -1; - } - /* Check effective address */ - if ((address & mask) != tlb->EPN) { - return -1; - } - *raddrp = (tlb->RPN & mask) | (address & ~mask); - if (ext) { - /* Extend the physical address to 36 bits */ - *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; - } - - return 0; -} - -#ifdef CONFIG_TCG /* Generic TLB search function for PowerPC embedded implementations */ static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) @@ -651,7 +132,6 @@ static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, return ret; } -#endif /* Helpers specific to PowerPC 40x implementations */ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) @@ -666,166 +146,6 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) tlb_flush(env_cpu(env)); } -static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type) -{ - ppcemb_tlb_t *tlb; - hwaddr raddr; - int i, ret, zsel, zpr, pr; - - ret = -1; - raddr = (hwaddr)-1ULL; - pr = msr_pr; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (ppcemb_tlb_check(env, tlb, &raddr, address, - env->spr[SPR_40x_PID], 0, i) < 0) { - continue; - } - zsel = (tlb->attr >> 4) & 0xF; - zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; - LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", - __func__, i, zsel, zpr, access_type, tlb->attr); - /* Check execute enable bit */ - switch (zpr) { - case 0x2: - if (pr != 0) { - goto check_perms; - } - /* fall through */ - case 0x3: - /* All accesses granted */ - ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - ret = 0; - break; - case 0x0: - if (pr != 0) { - /* Raise Zone protection fault. */ - env->spr[SPR_40x_ESR] = 1 << 22; - ctx->prot = 0; - ret = -2; - break; - } - /* fall through */ - case 0x1: - check_perms: - /* Check from TLB entry */ - ctx->prot = tlb->prot; - ret = check_prot(ctx->prot, access_type); - if (ret == -2) { - env->spr[SPR_40x_ESR] = 0; - } - break; - } - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - return 0; - } - } - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - - return ret; -} - -void store_40x_sler(CPUPPCState *env, uint32_t val) -{ - /* XXX: TO BE FIXED */ - if (val != 0x00000000) { - cpu_abort(env_cpu(env), - "Little-endian regions are not supported by now\n"); - } - env->spr[SPR_405_SLER] = val; -} - -static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddr, int *prot, target_ulong address, - MMUAccessType access_type, int i) -{ - int prot2; - - if (ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID], - !env->nb_pids, i) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID1] && - ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID2] && - ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { - goto found_tlb; - } - - LOG_SWTLB("%s: TLB entry not found\n", __func__); - return -1; - -found_tlb: - - if (msr_pr != 0) { - prot2 = tlb->prot & 0xF; - } else { - prot2 = (tlb->prot >> 4) & 0xF; - } - - /* Check the address space */ - if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & prot_for_access_type(access_type)) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); - return access_type == MMU_INST_FETCH ? -3 : -2; -} - -static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type) -{ - ppcemb_tlb_t *tlb; - hwaddr raddr; - int i, ret; - - ret = -1; - raddr = (hwaddr)-1ULL; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, - access_type, i); - if (ret != -1) { - break; - } - } - - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - } else { - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - } - - return ret; -} - -#ifdef CONFIG_TCG static void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot) { @@ -847,879 +167,16 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags, tlb_flush(env_cpu(env)); } -#endif -static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, - ppcmas_tlb_t *tlb) -{ - int tlbm_size; - - tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - - return 1024ULL << tlbm_size; -} - -/* TLB check function for MAS based SoftTLBs */ -static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, - hwaddr *raddrp, target_ulong address, - uint32_t pid) -{ - hwaddr mask; - uint32_t tlb_pid; - - if (!msr_cm) { - /* In 32bit mode we can only address 32bit EAs */ - address = (uint32_t)address; - } - - /* Check valid flag */ - if (!(tlb->mas1 & MAS1_VALID)) { - return -1; - } - - mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); - LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" - PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" - PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, - tlb->mas7_3, tlb->mas8); - - /* Check PID */ - tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; - if (tlb_pid != 0 && tlb_pid != pid) { - return -1; - } - - /* Check effective address */ - if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { - return -1; - } - - if (raddrp) { - *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); - } - - return 0; -} - -static bool is_epid_mmu(int mmu_idx) -{ - return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; -} - -static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) -{ - uint32_t esr = 0; - if (access_type == MMU_DATA_STORE) { - esr |= ESR_ST; - } - if (is_epid_mmu(mmu_idx)) { - esr |= ESR_EPID; - } - return esr; -} - -/* - * Get EPID register given the mmu_idx. If this is regular load, - * construct the EPID access bits from current processor state - * - * Get the effective AS and PR bits and the PID. The PID is returned - * only if EPID load is requested, otherwise the caller must detect - * the correct EPID. Return true if valid EPID is returned. - */ -static bool mmubooke206_get_as(CPUPPCState *env, - int mmu_idx, uint32_t *epid_out, - bool *as_out, bool *pr_out) -{ - if (is_epid_mmu(mmu_idx)) { - uint32_t epidr; - if (mmu_idx == PPC_TLB_EPID_STORE) { - epidr = env->spr[SPR_BOOKE_EPSC]; - } else { - epidr = env->spr[SPR_BOOKE_EPLC]; - } - *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; - *as_out = !!(epidr & EPID_EAS); - *pr_out = !!(epidr & EPID_EPR); - return true; - } else { - *as_out = msr_ds; - *pr_out = msr_pr; - return false; - } -} - -/* Check if the tlb found by hashing really matches */ -static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, - hwaddr *raddr, int *prot, - target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int prot2 = 0; - uint32_t epid; - bool as, pr; - bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); - - if (!use_epid) { - if (ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID]) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID1] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID1]) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID2] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID2]) >= 0) { - goto found_tlb; - } - } else { - if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { - goto found_tlb; - } - } - - LOG_SWTLB("%s: TLB entry not found\n", __func__); - return -1; - -found_tlb: - - if (pr) { - if (tlb->mas7_3 & MAS3_UR) { - prot2 |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_UW) { - prot2 |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_UX) { - prot2 |= PAGE_EXEC; - } - } else { - if (tlb->mas7_3 & MAS3_SR) { - prot2 |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_SW) { - prot2 |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_SX) { - prot2 |= PAGE_EXEC; - } - } - - /* Check the address space and permissions */ - if (access_type == MMU_INST_FETCH) { - /* There is no way to fetch code using epid load */ - assert(!use_epid); - as = msr_ir; - } - - if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & prot_for_access_type(access_type)) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); - return access_type == MMU_INST_FETCH ? -3 : -2; -} - -static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type, - int mmu_idx) -{ - ppcmas_tlb_t *tlb; - hwaddr raddr; - int i, j, ret; - - ret = -1; - raddr = (hwaddr)-1ULL; - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int ways = booke206_tlb_ways(env, i); - - for (j = 0; j < ways; j++) { - tlb = booke206_get_tlbm(env, i, address, j); - if (!tlb) { - continue; - } - ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, - access_type, mmu_idx); - if (ret != -1) { - goto found_tlb; - } - } - } - -found_tlb: - - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - } else { - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - } - - return ret; -} - -static const char *book3e_tsize_to_str[32] = { - "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", - "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", - "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", - "1T", "2T" -}; - -static void mmubooke_dump_mmu(CPUPPCState *env) -{ - ppcemb_tlb_t *entry; - int i; - - if (kvm_enabled() && !env->kvm_sw_tlb) { - qemu_printf("Cannot access KVM TLB\n"); - return; - } - - qemu_printf("\nTLB:\n"); - qemu_printf("Effective Physical Size PID Prot " - "Attr\n"); - - entry = &env->tlb.tlbe[0]; - for (i = 0; i < env->nb_tlb; i++, entry++) { - hwaddr ea, pa; - target_ulong mask; - uint64_t size = (uint64_t)entry->size; - char size_buf[20]; - - /* Check valid flag */ - if (!(entry->prot & PAGE_VALID)) { - continue; - } - - mask = ~(entry->size - 1); - ea = entry->EPN & mask; - pa = entry->RPN & mask; - /* Extend the physical address to 36 bits */ - pa |= (hwaddr)(entry->RPN & 0xF) << 32; - if (size >= 1 * MiB) { - snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); - } else { - snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); - } - qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", - (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, - entry->prot, entry->attr); - } - -} - -static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, - int tlbsize) -{ - ppcmas_tlb_t *entry; - int i; - - qemu_printf("\nTLB%d:\n", tlbn); - qemu_printf("Effective Physical Size TID TS SRWX" - " URWX WIMGE U0123\n"); - - entry = &env->tlb.tlbm[offset]; - for (i = 0; i < tlbsize; i++, entry++) { - hwaddr ea, pa, size; - int tsize; - - if (!(entry->mas1 & MAS1_VALID)) { - continue; - } - - tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - size = 1024ULL << tsize; - ea = entry->mas2 & ~(size - 1); - pa = entry->mas7_3 & ~(size - 1); - - qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" - "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", - (uint64_t)ea, (uint64_t)pa, - book3e_tsize_to_str[tsize], - (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, - (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, - entry->mas7_3 & MAS3_SR ? 'R' : '-', - entry->mas7_3 & MAS3_SW ? 'W' : '-', - entry->mas7_3 & MAS3_SX ? 'X' : '-', - entry->mas7_3 & MAS3_UR ? 'R' : '-', - entry->mas7_3 & MAS3_UW ? 'W' : '-', - entry->mas7_3 & MAS3_UX ? 'X' : '-', - entry->mas2 & MAS2_W ? 'W' : '-', - entry->mas2 & MAS2_I ? 'I' : '-', - entry->mas2 & MAS2_M ? 'M' : '-', - entry->mas2 & MAS2_G ? 'G' : '-', - entry->mas2 & MAS2_E ? 'E' : '-', - entry->mas7_3 & MAS3_U0 ? '0' : '-', - entry->mas7_3 & MAS3_U1 ? '1' : '-', - entry->mas7_3 & MAS3_U2 ? '2' : '-', - entry->mas7_3 & MAS3_U3 ? '3' : '-'); - } -} - -static void mmubooke206_dump_mmu(CPUPPCState *env) -{ - int offset = 0; - int i; - - if (kvm_enabled() && !env->kvm_sw_tlb) { - qemu_printf("Cannot access KVM TLB\n"); - return; - } - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int size = booke206_tlb_size(env, i); - - if (size == 0) { - continue; - } - - mmubooke206_dump_one_tlb(env, i, offset, size); - offset += size; - } -} - -static void mmu6xx_dump_BATs(CPUPPCState *env, int type) -{ - target_ulong *BATlt, *BATut, *BATu, *BATl; - target_ulong BEPIl, BEPIu, bl; - int i; - - switch (type) { - case ACCESS_CODE: - BATlt = env->IBAT[1]; - BATut = env->IBAT[0]; - break; - default: - BATlt = env->DBAT[1]; - BATut = env->DBAT[0]; - break; - } - - for (i = 0; i < env->nb_BATs; i++) { - BATu = &BATut[i]; - BATl = &BATlt[i]; - BEPIu = *BATu & 0xF0000000; - BEPIl = *BATu & 0x0FFE0000; - bl = (*BATu & 0x00001FFC) << 15; - qemu_printf("%s BAT%d BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " - TARGET_FMT_lx " " TARGET_FMT_lx "\n", - type == ACCESS_CODE ? "code" : "data", i, - *BATu, *BATl, BEPIu, BEPIl, bl); - } -} - -static void mmu6xx_dump_mmu(CPUPPCState *env) -{ - PowerPCCPU *cpu = env_archcpu(env); - ppc6xx_tlb_t *tlb; - target_ulong sr; - int type, way, entry, i; - - qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); - qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); - - qemu_printf("\nSegment registers:\n"); - for (i = 0; i < 32; i++) { - sr = env->sr[i]; - if (sr & 0x80000000) { - qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " - "CNTLR_SPEC=0x%05x\n", i, - sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, - sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), - (uint32_t)(sr & 0xFFFFF)); - } else { - qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, - sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, - sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, - (uint32_t)(sr & 0x00FFFFFF)); - } - } - - qemu_printf("\nBATs:\n"); - mmu6xx_dump_BATs(env, ACCESS_INT); - mmu6xx_dump_BATs(env, ACCESS_CODE); - - if (env->id_tlbs != 1) { - qemu_printf("ERROR: 6xx MMU should have separated TLB" - " for code and data\n"); - } - - qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); - - for (type = 0; type < 2; type++) { - for (way = 0; way < env->nb_ways; way++) { - for (entry = env->nb_tlb * type + env->tlb_per_way * way; - entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); - entry++) { - - tlb = &env->tlb.tlb6[entry]; - qemu_printf("%s TLB %02d/%02d way:%d %s [" - TARGET_FMT_lx " " TARGET_FMT_lx "]\n", - type ? "code" : "data", entry % env->nb_tlb, - env->nb_tlb, way, - pte_is_valid(tlb->pte0) ? "valid" : "inval", - tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); - } - } - } -} - -void dump_mmu(CPUPPCState *env) -{ - switch (env->mmu_model) { - case POWERPC_MMU_BOOKE: - mmubooke_dump_mmu(env); - break; - case POWERPC_MMU_BOOKE206: - mmubooke206_dump_mmu(env); - break; - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - mmu6xx_dump_mmu(env); - break; -#if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_07: - dump_slb(env_archcpu(env)); - break; - case POWERPC_MMU_3_00: - if (ppc64_v3_radix(env_archcpu(env))) { - qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", - __func__); - } else { - dump_slb(env_archcpu(env)); - } - break; -#endif - default: - qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); - } -} - -static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, - MMUAccessType access_type) -{ - int in_plb, ret; - - ctx->raddr = eaddr; - ctx->prot = PAGE_READ | PAGE_EXEC; - ret = 0; - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_REAL: - case POWERPC_MMU_BOOKE: - ctx->prot |= PAGE_WRITE; - break; - - case POWERPC_MMU_SOFT_4xx_Z: - if (unlikely(msr_pe != 0)) { - /* - * 403 family add some particular protections, using - * PBL/PBU registers for accesses with no translation. - */ - in_plb = - /* Check PLB validity */ - (env->pb[0] < env->pb[1] && - /* and address in plb area */ - eaddr >= env->pb[0] && eaddr < env->pb[1]) || - (env->pb[2] < env->pb[3] && - eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; - if (in_plb ^ msr_px) { - /* Access in protected area */ - if (access_type == MMU_DATA_STORE) { - /* Access is not allowed */ - ret = -2; - } - } else { - /* Read-write access is allowed */ - ctx->prot |= PAGE_WRITE; - } - } - break; - - default: - /* Caller's checks mean we should never get here for other models */ - abort(); - return -1; - } - - return ret; -} - -static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, - MMUAccessType access_type, int type, - int mmu_idx) -{ - int ret = -1; - bool real_mode = (type == ACCESS_CODE && msr_ir == 0) - || (type != ACCESS_CODE && msr_dr == 0); - - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - /* Try to find a BAT */ - if (env->nb_BATs != 0) { - ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); - } - if (ret < 0) { - /* We didn't match any BAT entry or don't have BATs */ - ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); - } - } - break; - - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); - } - break; - case POWERPC_MMU_BOOKE: - ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); - break; - case POWERPC_MMU_BOOKE206: - ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, - mmu_idx); - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_REAL: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - cpu_abort(env_cpu(env), - "PowerPC in real mode do not do any translation\n"); - } - return -1; - default: - cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); - return -1; - } - - return ret; -} - -#ifdef CONFIG_TCG static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, MMUAccessType access_type, int type) { return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); } -#endif - -static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - uint32_t epid; - bool as, pr; - uint32_t missed_tid = 0; - bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); - - if (access_type == MMU_INST_FETCH) { - as = msr_ir; - } - env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; - env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; - env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; - env->spr[SPR_BOOKE_MAS3] = 0; - env->spr[SPR_BOOKE_MAS6] = 0; - env->spr[SPR_BOOKE_MAS7] = 0; - - /* AS */ - if (as) { - env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; - env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; - } - - env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; - env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; - - if (!use_epid) { - switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { - case MAS4_TIDSELD_PID0: - missed_tid = env->spr[SPR_BOOKE_PID]; - break; - case MAS4_TIDSELD_PID1: - missed_tid = env->spr[SPR_BOOKE_PID1]; - break; - case MAS4_TIDSELD_PID2: - missed_tid = env->spr[SPR_BOOKE_PID2]; - break; - } - env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; - } else { - missed_tid = epid; - env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; - } - env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); - /* next victim logic */ - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; - env->last_way++; - env->last_way &= booke206_tlb_ways(env, 0) - 1; - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; -} -/* Perform address translation */ -/* TODO: Split this by mmu_model. */ -static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, - MMUAccessType access_type, - hwaddr *raddrp, int *psizep, int *protp, - int mmu_idx, bool guest_visible) -{ - CPUState *cs = CPU(cpu); - CPUPPCState *env = &cpu->env; - mmu_ctx_t ctx; - int type; - int ret; - - if (access_type == MMU_INST_FETCH) { - /* code access */ - type = ACCESS_CODE; - } else if (guest_visible) { - /* data access */ - type = env->access_type; - } else { - type = ACCESS_INT; - } - - ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, - type, mmu_idx); - if (ret == 0) { - *raddrp = ctx.raddr; - *protp = ctx.prot; - *psizep = TARGET_PAGE_BITS; - return true; - } - - if (guest_visible) { - LOG_MMU_STATE(cs); - if (type == ACCESS_CODE) { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - cs->exception_index = POWERPC_EXCP_IFTLB; - env->error_code = 1 << 18; - env->spr[SPR_IMISS] = eaddr; - env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; - goto tlb_miss; - case POWERPC_MMU_SOFT_74xx: - cs->exception_index = POWERPC_EXCP_IFTLB; - goto tlb_miss_74xx; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - cs->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = eaddr; - env->spr[SPR_40x_ESR] = 0x00000000; - break; - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); - /* fall through */ - case POWERPC_MMU_BOOKE: - cs->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); - break; - case POWERPC_MMU_MPC8xx: - cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); - case POWERPC_MMU_REAL: - cpu_abort(cs, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - default: - cpu_abort(cs, "Unknown or invalid MMU model\n"); - } - break; - case -2: - /* Access rights violation */ - cs->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x08000000; - break; - case -3: - /* No execute protection violation */ - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_ESR] = 0x00000000; - } - cs->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x10000000; - break; - case -4: - /* Direct store exception */ - /* No code fetch is allowed in direct-store areas */ - cs->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x10000000; - break; - } - } else { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - if (access_type == MMU_DATA_STORE) { - cs->exception_index = POWERPC_EXCP_DSTLB; - env->error_code = 1 << 16; - } else { - cs->exception_index = POWERPC_EXCP_DLTLB; - env->error_code = 0; - } - env->spr[SPR_DMISS] = eaddr; - env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; - tlb_miss: - env->error_code |= ctx.key << 19; - env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + - get_pteg_offset32(cpu, ctx.hash[0]); - env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + - get_pteg_offset32(cpu, ctx.hash[1]); - break; - case POWERPC_MMU_SOFT_74xx: - if (access_type == MMU_DATA_STORE) { - cs->exception_index = POWERPC_EXCP_DSTLB; - } else { - cs->exception_index = POWERPC_EXCP_DLTLB; - } - tlb_miss_74xx: - /* Implement LRU algorithm */ - env->error_code = ctx.key << 19; - env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) | - ((env->last_way + 1) & (env->nb_ways - 1)); - env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; - break; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - cs->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_40x_ESR] = 0x00800000; - } else { - env->spr[SPR_40x_ESR] = 0x00000000; - } - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); - /* fall through */ - case POWERPC_MMU_BOOKE: - cs->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); - break; - case POWERPC_MMU_REAL: - cpu_abort(cs, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - default: - cpu_abort(cs, "Unknown or invalid MMU model\n"); - } - break; - case -2: - /* Access rights violation */ - cs->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - if (env->mmu_model == POWERPC_MMU_SOFT_4xx - || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { - env->spr[SPR_40x_DEAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_40x_ESR] |= 0x00800000; - } - } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); - } else { - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x0A000000; - } else { - env->spr[SPR_DSISR] = 0x08000000; - } - } - break; - case -4: - /* Direct store exception */ - switch (type) { - case ACCESS_FLOAT: - /* Floating point load/store */ - cs->exception_index = POWERPC_EXCP_ALIGN; - env->error_code = POWERPC_EXCP_ALIGN_FP; - env->spr[SPR_DAR] = eaddr; - break; - case ACCESS_RES: - /* lwarx, ldarx or stwcx. */ - cs->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x06000000; - } else { - env->spr[SPR_DSISR] = 0x04000000; - } - break; - case ACCESS_EXT: - /* eciwx or ecowx */ - cs->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x06100000; - } else { - env->spr[SPR_DSISR] = 0x04100000; - } - break; - default: - printf("DSI: invalid exception (%d)\n", ret); - cs->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; - env->spr[SPR_DAR] = eaddr; - break; - } - break; - } - } - } - return false; -} - -#ifdef CONFIG_TCG /*****************************************************************************/ /* BATs management */ #if !defined(FLUSH_ALL_TLBS) @@ -1733,25 +190,27 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, end = base + mask + 0x00020000; if (((end - base) >> TARGET_PAGE_BITS) > 1024) { /* Flushing 1024 4K pages is slower than a complete flush */ - LOG_BATS("Flush all BATs\n"); + qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n"); tlb_flush(cs); - LOG_BATS("Flush done\n"); + qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); return; } - LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" - TARGET_FMT_lx ")\n", base, end, mask); + qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx + " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", + base, end, mask); for (page = base; page != end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } - LOG_BATS("Flush done\n"); + qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); } #endif static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, target_ulong value) { - LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, - nr, ul == 0 ? 'u' : 'l', value, env->nip); + qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " (" + TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l', + value, env->nip); } void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) @@ -1820,90 +279,6 @@ void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) env->DBAT[1][nr] = value; } -void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) -{ - target_ulong mask; -#if defined(FLUSH_ALL_TLBS) - int do_inval; -#endif - - dump_store_bat(env, 'I', 0, nr, value); - if (env->IBAT[0][nr] != value) { -#if defined(FLUSH_ALL_TLBS) - do_inval = 0; -#endif - mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; - if (env->IBAT[1][nr] & 0x40) { - /* Invalidate BAT only if it is valid */ -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - /* - * When storing valid upper BAT, mask BEPI and BRPN and - * invalidate all TLBs covered by this BAT - */ - env->IBAT[0][nr] = (value & 0x00001FFFUL) | - (value & ~0x0001FFFFUL & ~mask); - env->DBAT[0][nr] = env->IBAT[0][nr]; - if (env->IBAT[1][nr] & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } -#if defined(FLUSH_ALL_TLBS) - if (do_inval) { - tlb_flush(env_cpu(env)); - } -#endif - } -} - -void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) -{ -#if !defined(FLUSH_ALL_TLBS) - target_ulong mask; -#else - int do_inval; -#endif - - dump_store_bat(env, 'I', 1, nr, value); - if (env->IBAT[1][nr] != value) { -#if defined(FLUSH_ALL_TLBS) - do_inval = 0; -#endif - if (env->IBAT[1][nr] & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - if (value & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - mask = (value << 17) & 0x0FFE0000UL; - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - env->IBAT[1][nr] = value; - env->DBAT[1][nr] = value; -#if defined(FLUSH_ALL_TLBS) - if (do_inval) { - tlb_flush(env_cpu(env)); - } -#endif - } -} -#endif - -#ifdef CONFIG_TCG /*****************************************************************************/ /* TLB management */ void ppc_tlb_invalidate_all(CPUPPCState *env) @@ -1916,11 +291,9 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) #endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_all(env); break; case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: ppc4xx_tlb_invalidate_all(env); break; case POWERPC_MMU_REAL: @@ -1937,7 +310,6 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) booke206_flush_tlb(env, -1, 0); break; case POWERPC_MMU_32B: - case POWERPC_MMU_601: env->tlb_need_flush = 0; tlb_flush(env_cpu(env)); break; @@ -1947,9 +319,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) break; } } -#endif -#ifdef CONFIG_TCG void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) @@ -1967,14 +337,12 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) #endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_virt(env, addr, 0); if (env->id_tlbs == 1) { ppc6xx_tlb_invalidate_virt(env, addr, 1); } break; case POWERPC_MMU_32B: - case POWERPC_MMU_601: /* * Actual CPUs invalidate entire congruence classes based on * the geometry of their TLBs and some OSes take that into @@ -2061,6 +429,160 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr) ppc_tlb_invalidate_one(env, addr); } +#if defined(TARGET_PPC64) + +/* Invalidation Selector */ +#define TLBIE_IS_VA 0 +#define TLBIE_IS_PID 1 +#define TLBIE_IS_LPID 2 +#define TLBIE_IS_ALL 3 + +/* Radix Invalidation Control */ +#define TLBIE_RIC_TLB 0 +#define TLBIE_RIC_PWC 1 +#define TLBIE_RIC_ALL 2 +#define TLBIE_RIC_GRP 3 + +/* Radix Actual Page sizes */ +#define TLBIE_R_AP_4K 0 +#define TLBIE_R_AP_64K 5 +#define TLBIE_R_AP_2M 1 +#define TLBIE_R_AP_1G 2 + +/* RB field masks */ +#define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51) +#define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53) +#define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58) + +void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs, + uint32_t flags) +{ + unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT; + /* + * With the exception of the checks for invalid instruction forms, + * PRS is currently ignored, because we don't know if a given TLB entry + * is process or partition scoped. + */ + bool prs = flags & TLBIE_F_PRS; + bool r = flags & TLBIE_F_R; + bool local = flags & TLBIE_F_LOCAL; + bool effR; + unsigned is = extract64(rb, PPC_BIT_NR(53), 2); + unsigned ap; /* actual page size */ + target_ulong addr, pgoffs_mask; + + qemu_log_mask(CPU_LOG_MMU, + "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n", + __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is); + + effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR; + + /* Partial TLB invalidation is supported for Radix only for now. */ + if (!effR) { + goto inval_all; + } + + /* Check for invalid instruction forms (effR=1). */ + if (unlikely(ric == TLBIE_RIC_GRP || + ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) && + is == TLBIE_IS_VA) || + (!prs && is == TLBIE_IS_PID))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n", + __func__, ric, prs, r, is); + goto invalid; + } + + /* We don't cache Page Walks. */ + if (ric == TLBIE_RIC_PWC) { + if (local) { + unsigned set = extract64(rb, PPC_BIT_NR(51), 12); + if (set != 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n", + __func__, set); + goto invalid; + } + } + return; + } + + /* + * Invalidation by LPID or PID is not supported, so fallback + * to full TLB flush in these cases. + */ + if (is != TLBIE_IS_VA) { + goto inval_all; + } + + /* + * The results of an attempt to invalidate a translation outside of + * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0, + * and EA 0:1 != 0b00) are boundedly undefined. + */ + if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA && + (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: attempt to invalidate a translation outside of quadrant 0\n", + __func__); + goto inval_all; + } + + assert(is == TLBIE_IS_VA); + assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL); + + ap = extract64(rb, PPC_BIT_NR(58), 3); + switch (ap) { + case TLBIE_R_AP_4K: + pgoffs_mask = 0xfffull; + break; + + case TLBIE_R_AP_64K: + pgoffs_mask = 0xffffull; + break; + + case TLBIE_R_AP_2M: + pgoffs_mask = 0x1fffffull; + break; + + case TLBIE_R_AP_1G: + pgoffs_mask = 0x3fffffffull; + break; + + default: + /* + * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58, + * RB 44:51, or RB 56:63, when it is needed to perform the specified + * operation, is not supported by the implementation, the instruction + * is treated as if the instruction form were invalid. + */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap); + goto invalid; + } + + addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask; + + if (local) { + tlb_flush_page(env_cpu(env), addr); + } else { + tlb_flush_page_all_cpus(env_cpu(env), addr); + } + return; + +inval_all: + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + if (!local) { + env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH; + } + return; + +invalid: + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); +} + +#endif + void helper_tlbiva(CPUPPCState *env, target_ulong addr) { /* tlbiva instruction only exists on BookE */ @@ -2086,9 +608,9 @@ static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) } way = (env->spr[SPR_SRR1] >> 17) & 1; (void)EPN; /* avoid a compiler warning */ - LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, - RPN, way); + qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx + " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", + __func__, new_EPN, EPN, CMP, RPN, way); /* Store this TLB */ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), way, is_code, CMP, RPN); @@ -2104,35 +626,6 @@ void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) do_6xx_tlb(env, EPN, 1); } -/* PowerPC 74xx software TLB load instructions helpers */ -static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) -{ - target_ulong RPN, CMP, EPN; - int way; - - RPN = env->spr[SPR_PTELO]; - CMP = env->spr[SPR_PTEHI]; - EPN = env->spr[SPR_TLBMISS] & ~0x3; - way = env->spr[SPR_TLBMISS] & 0x3; - (void)EPN; /* avoid a compiler warning */ - LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, - RPN, way); - /* Store this TLB */ - ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), - way, is_code, CMP, RPN); -} - -void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) -{ - do_74xx_tlb(env, EPN, 0); -} - -void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) -{ - do_74xx_tlb(env, EPN, 1); -} - /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ @@ -2241,6 +734,14 @@ static inline int booke_page_size_to_tlb(target_ulong page_size) #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 +void helper_store_40x_pid(CPUPPCState *env, target_ulong val) +{ + if (env->spr[SPR_40x_PID] != val) { + env->spr[SPR_40x_PID] = val; + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + } +} + target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) { ppcemb_tlb_t *tlb; @@ -2258,7 +759,7 @@ target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) size = PPC4XX_TLBHI_SIZE_DEFAULT; } ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; - env->spr[SPR_40x_PID] = tlb->PID; + helper_store_40x_pid(env, tlb->PID); return ret; } @@ -2286,15 +787,17 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, ppcemb_tlb_t *tlb; target_ulong page, end; - LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, + qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n", + __func__, (int)entry, val); entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; /* Invalidate previous TLB (if it's valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; - LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " - TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " + TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, + (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } @@ -2323,18 +826,20 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, tlb->prot &= ~PAGE_VALID; } tlb->PID = env->spr[SPR_40x_PID]; /* PID */ - LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx - " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, - (int)entry, tlb->RPN, tlb->EPN, tlb->size, - tlb->prot & PAGE_READ ? 'r' : '-', - tlb->prot & PAGE_WRITE ? 'w' : '-', - tlb->prot & PAGE_EXEC ? 'x' : '-', - tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); + qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx + " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx + " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); /* Invalidate new TLB (if valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; - LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " - TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + qemu_log_mask(CPU_LOG_MMU, "%s: invalidate TLB %d start " + TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, + (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } @@ -2346,8 +851,8 @@ void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, { ppcemb_tlb_t *tlb; - LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, - val); + qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n", + __func__, (int)entry, val); entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; @@ -2359,13 +864,16 @@ void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, if (val & PPC4XX_TLBLO_WR) { tlb->prot |= PAGE_WRITE; } - LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx - " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, - (int)entry, tlb->RPN, tlb->EPN, tlb->size, - tlb->prot & PAGE_READ ? 'r' : '-', - tlb->prot & PAGE_WRITE ? 'w' : '-', - tlb->prot & PAGE_EXEC ? 'x' : '-', - tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); + qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx + " EPN " TARGET_FMT_lx + " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); + + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; } target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) @@ -2381,8 +889,8 @@ void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, target_ulong EPN, RPN, size; int do_flush_tlbs; - LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", - __func__, word, (int)entry, value); + qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n", + __func__, word, (int)entry, value); do_flush_tlbs = 0; entry &= 0x3F; tlb = &env->tlb.tlbe[entry]; @@ -2581,7 +1089,7 @@ void helper_booke206_tlbwe(CPUPPCState *env) } if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && - !msr_gs) { + !FIELD_EX64(env->msr, MSR, GS)) { /* XXX we don't support direct LRAT setting yet */ fprintf(stderr, "cpu: don't support LRAT setting yet\n"); return; @@ -2608,7 +1116,7 @@ void helper_booke206_tlbwe(CPUPPCState *env) POWERPC_EXCP_INVAL_INVAL, GETPC()); } - if (msr_gs) { + if (FIELD_EX64(env->msr, MSR, GS)) { cpu_abort(env_cpu(env), "missing HV implementation\n"); } @@ -2649,7 +1157,7 @@ void helper_booke206_tlbwe(CPUPPCState *env) /* Add a mask for page attributes */ mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; - if (!msr_cm) { + if (!FIELD_EX64(env->msr, MSR, CM)) { /* * Executing a tlbwe instruction in 32-bit mode will set bits * 0:31 of the TLB EPN field to zero. @@ -2749,7 +1257,7 @@ void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) } static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, - uint32_t ea) + vaddr ea) { int i; int ways = booke206_tlb_ways(env, tlbn); @@ -2891,62 +1399,8 @@ void helper_check_tlb_flush_global(CPUPPCState *env) { check_tlb_flush(env, true); } -#endif /* CONFIG_TCG */ -/*****************************************************************************/ -static bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, - hwaddr *raddrp, int *psizep, int *protp, - int mmu_idx, bool guest_visible) -{ - switch (cpu->env.mmu_model) { -#if defined(TARGET_PPC64) - case POWERPC_MMU_3_00: - if (ppc64_v3_radix(cpu)) { - return ppc_radix64_xlate(cpu, eaddr, access_type, - raddrp, psizep, protp, mmu_idx, guest_visible); - } - /* fall through */ - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_07: - return ppc_hash64_xlate(cpu, eaddr, access_type, - raddrp, psizep, protp, mmu_idx, guest_visible); -#endif - - case POWERPC_MMU_32B: - case POWERPC_MMU_601: - return ppc_hash32_xlate(cpu, eaddr, access_type, - raddrp, psizep, protp, mmu_idx, guest_visible); - - default: - return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, - psizep, protp, mmu_idx, guest_visible); - } -} - -hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - hwaddr raddr; - int s, p; - - /* - * Some MMUs have separate TLBs for code and data. If we only - * try an MMU_DATA_LOAD, we may not be able to read instructions - * mapped by code TLBs, so we also try a MMU_INST_FETCH. - */ - if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, - cpu_mmu_index(&cpu->env, false), false) || - ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, - cpu_mmu_index(&cpu->env, true), false)) { - return raddr & TARGET_PAGE_MASK; - } - return -1; -} - -#ifdef CONFIG_TCG bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) @@ -2967,4 +1421,3 @@ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, raise_exception_err_ra(&cpu->env, cs->exception_index, cpu->env.error_code, retaddr); } -#endif diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c index a475108b2d..8250b1304e 100644 --- a/target/ppc/monitor.c +++ b/target/ppc/monitor.c @@ -44,10 +44,20 @@ static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md, return u; } +static target_long monitor_get_xer(Monitor *mon, const struct MonitorDef *md, + int val) +{ + CPUArchState *env = mon_get_cpu_env(mon); + return cpu_read_xer(env); +} + static target_long monitor_get_decr(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_decr(env); } @@ -55,6 +65,9 @@ static target_long monitor_get_tbu(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_tbu(env); } @@ -62,6 +75,9 @@ static target_long monitor_get_tbl(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_tbl(env); } @@ -85,7 +101,7 @@ const MonitorDef monitor_defs[] = { { "decr", 0, &monitor_get_decr, }, { "ccr|cr", 0, &monitor_get_ccr, }, /* Machine state register */ - { "xer", offsetof(CPUPPCState, xer) }, + { "xer", 0, &monitor_get_xer }, { "msr", offsetof(CPUPPCState, msr) }, { "tbu", 0, &monitor_get_tbu, }, { "tbl", 0, &monitor_get_tbl, }, diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc new file mode 100644 index 0000000000..c3cc919ee4 --- /dev/null +++ b/target/ppc/power8-pmu-regs.c.inc @@ -0,0 +1,325 @@ +/* + * PMU register read/write functions for TCG IBM POWER chips + * + * Copyright IBM Corp. 2021 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +/* + * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the + * PMCs) has problem state read access. + * + * Read acccess is granted for all PMCC values but 0b01, where a + * Facility Unavailable Interrupt will occur. + */ +static bool spr_groupA_read_allowed(DisasContext *ctx) +{ + if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); + return false; + } + + return true; +} + +/* + * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the + * PMCs) has problem state write access. + * + * Write acccess is granted for PMCC values 0b10 and 0b11. Userspace + * writing with PMCC 0b00 will generate a Hypervisor Emulation + * Assistance Interrupt. Userspace writing with PMCC 0b01 will + * generate a Facility Unavailable Interrupt. + */ +static bool spr_groupA_write_allowed(DisasContext *ctx) +{ + if (ctx->mmcr0_pmcc0) { + return true; + } + + if (ctx->mmcr0_pmcc1) { + /* PMCC = 0b01 */ + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); + } else { + /* PMCC = 0b00 */ + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); + } + + return false; +} + +/* + * Helper function to avoid code repetition between MMCR0 and + * MMCR2 problem state write functions. + * + * 'ret' must be tcg_temp_freed() by the caller. + */ +static TCGv masked_gprn_for_spr_write(int gprn, int sprn, + uint64_t spr_mask) +{ + TCGv ret = tcg_temp_new(); + TCGv t0 = tcg_temp_new(); + + /* 'ret' starts with all mask bits cleared */ + gen_load_spr(ret, sprn); + tcg_gen_andi_tl(ret, ret, ~(spr_mask)); + + /* Apply the mask into 'gprn' in a temp var */ + tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask); + + /* Add the masked gprn bits into 'ret' */ + tcg_gen_or_tl(ret, ret, t0); + + tcg_temp_free(t0); + + return ret; +} + +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) +{ + TCGv t0; + + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + t0 = tcg_temp_new(); + + /* + * Filter out all bits but FC, PMAO, and PMAE, according + * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, + * fourth paragraph. + */ + gen_load_spr(t0, SPR_POWER_MMCR0); + tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK); + tcg_gen_mov_tl(cpu_gpr[gprn], t0); + + tcg_temp_free(t0); +} + +static void write_MMCR0_common(DisasContext *ctx, TCGv val) +{ + /* + * helper_store_mmcr0 will make clock based operations that + * will cause 'bad icount read' errors if we do not execute + * gen_icount_io_start() beforehand. + */ + gen_icount_io_start(ctx); + gen_helper_store_mmcr0(cpu_env, val); + + /* + * End the translation block because MMCR0 writes can change + * ctx->pmu_insn_cnt. + */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; +} + +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) +{ + TCGv masked_gprn; + + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + /* + * Filter out all bits but FC, PMAO, and PMAE, according + * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, + * fourth paragraph. + */ + masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0, + MMCR0_UREG_MASK); + write_MMCR0_common(ctx, masked_gprn); + + tcg_temp_free(masked_gprn); +} + +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) +{ + TCGv t0; + + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + t0 = tcg_temp_new(); + + /* + * On read, filter out all bits that are not FCnP0 bits. + * When MMCR0[PMCC] is set to 0b10 or 0b11, providing + * problem state programs read/write access to MMCR2, + * only the FCnP0 bits can be accessed. All other bits are + * not changed when mtspr is executed in problem state, and + * all other bits return 0s when mfspr is executed in problem + * state, according to ISA v3.1, section 10.4.6 Monitor Mode + * Control Register 2, p. 1316, third paragraph. + */ + gen_load_spr(t0, SPR_POWER_MMCR2); + tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK); + tcg_gen_mov_tl(cpu_gpr[gprn], t0); + + tcg_temp_free(t0); +} + +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) +{ + TCGv masked_gprn; + + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + /* + * Filter the bits that can be written using MMCR2_UREG_MASK, + * similar to what is done in spr_write_MMCR0_ureg(). + */ + masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2, + MMCR2_UREG_MASK); + gen_store_spr(SPR_POWER_MMCR2, masked_gprn); + + tcg_temp_free(masked_gprn); +} + +void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) +{ + TCGv_i32 t_sprn = tcg_const_i32(sprn); + + gen_icount_io_start(ctx); + gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn); + + tcg_temp_free_i32(t_sprn); +} + +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) +{ + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + spr_read_PMC(ctx, gprn, sprn + 0x10); +} + +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) +{ + /* + * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance + * Monitor, and a read attempt results in a Facility Unavailable + * Interrupt. + */ + if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); + return; + } + + /* The remaining steps are similar to PMCs 1-4 userspace read */ + spr_read_PMC14_ureg(ctx, gprn, sprn); +} + +void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t_sprn = tcg_const_i32(sprn); + + gen_icount_io_start(ctx); + gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]); + + tcg_temp_free_i32(t_sprn); +} + +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) +{ + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + spr_write_PMC(ctx, sprn + 0x10, gprn); +} + +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) +{ + /* + * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance + * Monitor, and a write attempt results in a Facility Unavailable + * Interrupt. + */ + if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); + return; + } + + /* The remaining steps are similar to PMCs 1-4 userspace write */ + spr_write_PMC14_ureg(ctx, sprn, gprn); +} + +void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn) +{ + write_MMCR0_common(ctx, cpu_gpr[gprn]); +} + +void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]); +} +#else +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn) +{ + spr_write_generic(ctx, sprn, gprn); +} + +void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn) +{ + spr_write_generic(ctx, sprn, gprn); +} + +void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) +{ + spr_write_generic(ctx, sprn, gprn); +} +#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c new file mode 100644 index 0000000000..fccd011088 --- /dev/null +++ b/target/ppc/power8-pmu.c @@ -0,0 +1,359 @@ +/* + * PMU emulation helpers for TCG IBM POWER chips + * + * Copyright IBM Corp. 2021 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "helper_regs.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "hw/ppc/ppc.h" +#include "power8-pmu.h" + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn) +{ + if (sprn == SPR_POWER_PMC1) { + return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE; + } + + return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE; +} + +/* + * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt. + * hflags must subsequently be updated. + */ +static void pmu_update_summaries(CPUPPCState *env) +{ + target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; + target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1]; + int ins_cnt = 0; + int cyc_cnt = 0; + + if (mmcr0 & MMCR0_FC) { + goto out; + } + + if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) { + target_ulong sel; + + sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE); + switch (sel) { + case 0x02: + case 0xfe: + ins_cnt |= 1 << 1; + break; + case 0x1e: + case 0xf0: + cyc_cnt |= 1 << 1; + break; + } + + sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE); + ins_cnt |= (sel == 0x02) << 2; + cyc_cnt |= (sel == 0x1e) << 2; + + sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE); + ins_cnt |= (sel == 0x02) << 3; + cyc_cnt |= (sel == 0x1e) << 3; + + sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE); + ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4; + cyc_cnt |= (sel == 0x1e) << 4; + } + + ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5; + cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6; + + out: + env->pmc_ins_cnt = ins_cnt; + env->pmc_cyc_cnt = cyc_cnt; +} + +void pmu_mmcr01_updated(CPUPPCState *env) +{ + pmu_update_summaries(env); + hreg_update_pmu_hflags(env); + /* + * Should this update overflow timers (if mmcr0 is updated) so they + * get set in cpu_post_load? + */ +} + +static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns) +{ + target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; + unsigned ins_cnt = env->pmc_ins_cnt; + bool overflow_triggered = false; + target_ulong tmp; + + if (ins_cnt & (1 << 1)) { + tmp = env->spr[SPR_POWER_PMC1]; + tmp += num_insns; + if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) { + tmp = PMC_COUNTER_NEGATIVE_VAL; + overflow_triggered = true; + } + env->spr[SPR_POWER_PMC1] = tmp; + } + + if (ins_cnt & (1 << 2)) { + tmp = env->spr[SPR_POWER_PMC2]; + tmp += num_insns; + if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { + tmp = PMC_COUNTER_NEGATIVE_VAL; + overflow_triggered = true; + } + env->spr[SPR_POWER_PMC2] = tmp; + } + + if (ins_cnt & (1 << 3)) { + tmp = env->spr[SPR_POWER_PMC3]; + tmp += num_insns; + if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { + tmp = PMC_COUNTER_NEGATIVE_VAL; + overflow_triggered = true; + } + env->spr[SPR_POWER_PMC3] = tmp; + } + + if (ins_cnt & (1 << 4)) { + target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1]; + int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE); + if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) { + tmp = env->spr[SPR_POWER_PMC4]; + tmp += num_insns; + if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { + tmp = PMC_COUNTER_NEGATIVE_VAL; + overflow_triggered = true; + } + env->spr[SPR_POWER_PMC4] = tmp; + } + } + + if (ins_cnt & (1 << 5)) { + tmp = env->spr[SPR_POWER_PMC5]; + tmp += num_insns; + if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { + tmp = PMC_COUNTER_NEGATIVE_VAL; + overflow_triggered = true; + } + env->spr[SPR_POWER_PMC5] = tmp; + } + + return overflow_triggered; +} + +static void pmu_update_cycles(CPUPPCState *env) +{ + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t time_delta = now - env->pmu_base_time; + int sprn, cyc_cnt = env->pmc_cyc_cnt; + + for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { + if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) { + /* + * The pseries and powernv clock runs at 1Ghz, meaning + * that 1 nanosec equals 1 cycle. + */ + env->spr[sprn] += time_delta; + } + } + + /* Update base_time for future calculations */ + env->pmu_base_time = now; +} + +/* + * Helper function to retrieve the cycle overflow timer of the + * 'sprn' counter. + */ +static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn) +{ + return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1]; +} + +static void pmc_update_overflow_timer(CPUPPCState *env, int sprn) +{ + QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn); + int64_t timeout; + + /* + * PMC5 does not have an overflow timer and this pointer + * will be NULL. + */ + if (!pmc_overflow_timer) { + return; + } + + if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) || + !pmc_has_overflow_enabled(env, sprn)) { + /* Overflow timer is not needed for this counter */ + timer_del(pmc_overflow_timer); + return; + } + + if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) { + timeout = 0; + } else { + timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn]; + } + + /* + * Use timer_mod_anticipate() because an overflow timer might + * be already running for this PMC. + */ + timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout); +} + +static void pmu_update_overflow_timers(CPUPPCState *env) +{ + int sprn; + + /* + * Scroll through all PMCs and start counter overflow timers for + * PM_CYC events, if needed. + */ + for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { + pmc_update_overflow_timer(env, sprn); + } +} + +static void pmu_delete_timers(CPUPPCState *env) +{ + QEMUTimer *pmc_overflow_timer; + int sprn; + + for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { + pmc_overflow_timer = get_cyc_overflow_timer(env, sprn); + + if (pmc_overflow_timer) { + timer_del(pmc_overflow_timer); + } + } +} + +void helper_store_mmcr0(CPUPPCState *env, target_ulong value) +{ + pmu_update_cycles(env); + + env->spr[SPR_POWER_MMCR0] = value; + + pmu_mmcr01_updated(env); + + /* Update cycle overflow timers with the current MMCR0 state */ + pmu_update_overflow_timers(env); +} + +void helper_store_mmcr1(CPUPPCState *env, uint64_t value) +{ + pmu_update_cycles(env); + + env->spr[SPR_POWER_MMCR1] = value; + + pmu_mmcr01_updated(env); +} + +target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn) +{ + pmu_update_cycles(env); + + return env->spr[sprn]; +} + +void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value) +{ + pmu_update_cycles(env); + + env->spr[sprn] = value; + + pmc_update_overflow_timer(env, sprn); +} + +static void fire_PMC_interrupt(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + pmu_update_cycles(env); + + if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) { + env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE; + env->spr[SPR_POWER_MMCR0] |= MMCR0_FC; + + /* Changing MMCR0_FC requires summaries and hflags update */ + pmu_mmcr01_updated(env); + + /* + * Delete all pending timers if we need to freeze + * the PMC. We'll restart them when the PMC starts + * running again. + */ + pmu_delete_timers(env); + } + + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) { + /* These MMCR0 bits do not require summaries or hflags update. */ + env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE; + env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO; + } + + raise_ebb_perfm_exception(env); +} + +void helper_handle_pmc5_overflow(CPUPPCState *env) +{ + env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL; + fire_PMC_interrupt(env_archcpu(env)); +} + +/* This helper assumes that the PMC is running. */ +void helper_insns_inc(CPUPPCState *env, uint32_t num_insns) +{ + bool overflow_triggered; + PowerPCCPU *cpu; + + overflow_triggered = pmu_increment_insns(env, num_insns); + + if (overflow_triggered) { + cpu = env_archcpu(env); + fire_PMC_interrupt(cpu); + } +} + +static void cpu_ppc_pmu_timer_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + fire_PMC_interrupt(cpu); +} + +void cpu_ppc_pmu_init(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + int i, sprn; + + for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { + if (sprn == SPR_POWER_PMC5) { + continue; + } + + i = sprn - SPR_POWER_PMC1; + + env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &cpu_ppc_pmu_timer_cb, + cpu); + } +} +#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ diff --git a/target/ppc/power8-pmu.h b/target/ppc/power8-pmu.h new file mode 100644 index 0000000000..775e640053 --- /dev/null +++ b/target/ppc/power8-pmu.h @@ -0,0 +1,27 @@ +/* + * PMU emulation helpers for TCG IBM POWER chips + * + * Copyright IBM Corp. 2021 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef POWER8_PMU_H +#define POWER8_PMU_H + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL + +void cpu_ppc_pmu_init(CPUPPCState *env); +void pmu_mmcr01_updated(CPUPPCState *env); +#else +static inline void cpu_ppc_pmu_init(CPUPPCState *env) { } +static inline void pmu_mmcr01_updated(CPUPPCState *env) { } +#endif + +#endif diff --git a/target/ppc/spr_tcg.h b/target/ppc/spr_common.h similarity index 63% rename from target/ppc/spr_tcg.h rename to target/ppc/spr_common.h index 0be5f347d5..b5a5bc6895 100644 --- a/target/ppc/spr_tcg.h +++ b/target/ppc/spr_common.h @@ -16,15 +16,75 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -#ifndef SPR_TCG_H -#define SPR_TCG_H +#ifndef SPR_COMMON_H +#define SPR_COMMON_H #define SPR_NOACCESS (&spr_noaccess) +#ifdef CONFIG_TCG +# define USR_ARG(X) X, +# ifdef CONFIG_USER_ONLY +# define SYS_ARG(X) +# else +# define SYS_ARG(X) X, +# endif +#else +# define USR_ARG(X) +# define SYS_ARG(X) +#endif +#ifdef CONFIG_KVM +# define KVM_ARG(X) X, +#else +# define KVM_ARG(X) +#endif + +typedef void spr_callback(DisasContext *, int, int); + +void _spr_register(CPUPPCState *env, int num, const char *name, + USR_ARG(spr_callback *uea_read) + USR_ARG(spr_callback *uea_write) + SYS_ARG(spr_callback *oea_read) + SYS_ARG(spr_callback *oea_write) + SYS_ARG(spr_callback *hea_read) + SYS_ARG(spr_callback *hea_write) + KVM_ARG(uint64_t one_reg_id) + target_ulong initial_value); + +/* spr_register_kvm_hv passes all required arguments. */ +#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, \ + one_reg_id, initial_value) \ + _spr_register(env, num, name, \ + USR_ARG(uea_read) USR_ARG(uea_write) \ + SYS_ARG(oea_read) SYS_ARG(oea_write) \ + SYS_ARG(hea_read) SYS_ARG(hea_write) \ + KVM_ARG(one_reg_id) initial_value) + +/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */ +#define spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, one_reg_id, ival) \ + spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ + oea_write, oea_read, oea_write, one_reg_id, ival) + +/* spr_register_hv and spr_register are similar, except there is no kvm id. */ +#define spr_register_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, ival) \ + spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ + oea_write, hea_read, hea_write, 0, ival) + +#define spr_register(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, ival) \ + spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, 0, ival) + /* prototypes for readers and writers for SPRs */ void spr_noaccess(DisasContext *ctx, int gprn, int sprn); void spr_read_generic(DisasContext *ctx, int gprn, int sprn); void spr_write_generic(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn); +void spr_write_PMC(DisasContext *ctx, int sprn, int gprn); +void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn); void spr_read_xer(DisasContext *ctx, int gprn, int sprn); void spr_write_xer(DisasContext *ctx, int sprn, int gprn); void spr_read_lr(DisasContext *ctx, int gprn, int sprn); @@ -32,14 +92,21 @@ void spr_write_lr(DisasContext *ctx, int sprn, int gprn); void spr_read_ctr(DisasContext *ctx, int gprn, int sprn); void spr_write_ctr(DisasContext *ctx, int sprn, int gprn); void spr_read_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_PMC(DisasContext *ctx, int gprn, int sprn); +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn); void spr_read_tbl(DisasContext *ctx, int gprn, int sprn); void spr_read_tbu(DisasContext *ctx, int gprn, int sprn); void spr_read_atbl(DisasContext *ctx, int gprn, int sprn); void spr_read_atbu(DisasContext *ctx, int gprn, int sprn); -void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn); -void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn); void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn); void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn); #ifndef CONFIG_USER_ONLY void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); @@ -64,16 +131,13 @@ void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn); void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn); void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn); void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn); -void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn); -void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn); -void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn); -void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn); -void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn); -void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn); void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn); void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn); void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn); void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn); +void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn); +void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn); +void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn); void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn); void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn); void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn); @@ -133,4 +197,13 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn); void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn); #endif +void register_low_BATs(CPUPPCState *env); +void register_high_BATs(CPUPPCState *env); +void register_sdr1_sprs(CPUPPCState *env); +void register_thrm_sprs(CPUPPCState *env); +void register_usprgh_sprs(CPUPPCState *env); +void register_non_embedded_sprs(CPUPPCState *env); +void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways); +void register_generic_sprs(PowerPCCPU *cpu); + #endif diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 8ff4080eb9..b80f56af7e 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -63,16 +63,6 @@ void helper_store_purr(CPUPPCState *env, target_ulong val) } #endif -target_ulong helper_load_601_rtcl(CPUPPCState *env) -{ - return cpu_ppc601_load_rtcl(env); -} - -target_ulong helper_load_601_rtcu(CPUPPCState *env) -{ - return cpu_ppc601_load_rtcu(env); -} - #if !defined(CONFIG_USER_ONLY) void helper_store_tbl(CPUPPCState *env, target_ulong val) { @@ -94,16 +84,6 @@ void helper_store_atbu(CPUPPCState *env, target_ulong val) cpu_ppc_store_atbu(env, val); } -void helper_store_601_rtcl(CPUPPCState *env, target_ulong val) -{ - cpu_ppc601_store_rtcl(env, val); -} - -void helper_store_601_rtcu(CPUPPCState *env, target_ulong val) -{ - cpu_ppc601_store_rtcu(env, val); -} - target_ulong helper_load_decr(CPUPPCState *env) { return cpu_ppc_load_decr(env); @@ -144,6 +124,16 @@ void helper_store_40x_pit(CPUPPCState *env, target_ulong val) store_40x_pit(env, val); } +void helper_store_40x_tcr(CPUPPCState *env, target_ulong val) +{ + store_40x_tcr(env, val); +} + +void helper_store_40x_tsr(CPUPPCState *env, target_ulong val) +{ + store_40x_tsr(env, val); +} + void helper_store_booke_tcr(CPUPPCState *env, target_ulong val) { store_booke_tcr(env, val); @@ -153,7 +143,6 @@ void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) { store_booke_tsr(env, val); } -#endif /*****************************************************************************/ /* Embedded PowerPC specific helpers */ @@ -179,7 +168,7 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) (uint32_t)dcrn, (uint32_t)dcrn); raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | - POWERPC_EXCP_PRIV_REG, GETPC()); + POWERPC_EXCP_INVAL_INVAL, GETPC()); } } return val; @@ -202,7 +191,8 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) (uint32_t)dcrn, (uint32_t)dcrn); raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | - POWERPC_EXCP_PRIV_REG, GETPC()); + POWERPC_EXCP_INVAL_INVAL, GETPC()); } } } +#endif diff --git a/target/ppc/trace-events b/target/ppc/trace-events index c88cfccf8d..a79f1b4370 100644 --- a/target/ppc/trace-events +++ b/target/ppc/trace-events @@ -23,8 +23,16 @@ kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM" kvm_handle_dcr_write(void) "handle dcr write" kvm_handle_dcr_read(void) "handle dcr read" kvm_handle_halt(void) "handle halt" -kvm_handle_papr_hcall(void) "handle PAPR hypercall" +kvm_handle_papr_hcall(uint64_t hcall) "0x%" PRIx64 kvm_handle_epr(void) "handle epr" kvm_handle_watchdog_expiry(void) "handle watchdog expiry" kvm_handle_debug_exception(void) "handle debug exception" kvm_handle_nmi_exception(void) "handle NMI exception" + +# excp_helper.c +ppc_excp_rfi(uint64_t nip, uint64_t msr) "Return from exception at 0x%" PRIx64 " with flags 0x%016" PRIx64 +ppc_excp_dsi(uint64_t dsisr, uint64_t dar) "DSI exception: DSISR=0x%" PRIx64 " DAR=0x%" PRIx64 +ppc_excp_isi(uint64_t msr, uint64_t nip) "ISI exception: msr=0x%016" PRIx64 " nip=0x%" PRIx64 +ppc_excp_fp_ignore(void) "Ignore floating point exception" +ppc_excp_inval(uint64_t nip) "Invalid instruction at 0x%" PRIx64 +ppc_excp_print(const char *excp) "%s exception" diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 171b216e17..1de7eca9c4 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -35,14 +35,14 @@ #include "exec/translator.h" #include "exec/log.h" #include "qemu/atomic128.h" -#include "spr_tcg.h" +#include "spr_common.h" +#include "power8-pmu.h" #include "qemu/qemu-print.h" #include "qapi/error.h" #define CPU_SINGLE_STEP 0x1 #define CPU_BRANCH_STEP 0x2 -#define GDBSTUB_SINGLE_STEP 0x4 /* Include definitions for instructions classes and implementations flags */ /* #define PPC_DEBUG_DISAS */ @@ -175,6 +175,12 @@ struct DisasContext { bool spe_enabled; bool tm_enabled; bool gtse; + bool hr; + bool mmcr0_pmcc0; + bool mmcr0_pmcc1; + bool mmcr0_pmcjce; + bool pmc_other; + bool pmu_insn_cnt; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint32_t flags; @@ -190,7 +196,7 @@ struct DisasContext { /* Return true iff byteswap is needed in a scalar memop */ static inline bool need_byteswap(const DisasContext *ctx) { -#if defined(TARGET_WORDS_BIGENDIAN) +#if TARGET_BIG_ENDIAN return ctx->le_mode; #else return !ctx->le_mode; @@ -302,6 +308,14 @@ static void gen_icount_io_start(DisasContext *ctx) } } +#if !defined(CONFIG_USER_ONLY) +static void gen_ppc_maybe_interrupt(DisasContext *ctx) +{ + gen_icount_io_start(ctx); + gen_helper_ppc_maybe_interrupt(cpu_env); +} +#endif + /* * Tells the caller what is the appropriate exception to generate and prepares * SPR registers for this exception. @@ -332,7 +346,7 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx) static void gen_debug_exception(DisasContext *ctx) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG)); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); ctx->base.is_jmp = DISAS_NORETURN; } @@ -400,6 +414,18 @@ void spr_write_generic(DisasContext *ctx, int sprn, int gprn) spr_store_dump_spr(sprn); } +void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) +{ + spr_write_generic(ctx, sprn, gprn); + + /* + * SPR_CTRL writes must force a new translation block, + * allowing the PMU to calculate the run latch events with + * more accuracy. + */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; +} + #if !defined(CONFIG_USER_ONLY) void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) { @@ -779,61 +805,6 @@ void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) #endif #endif -/* PowerPC 601 specific registers */ -/* RTC */ -void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env); -} - -void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env); -} - -#if !defined(CONFIG_USER_ONLY) -void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]); -} - -void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]); -} - -void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]); - /* Must stop the translation as endianness may have changed */ - ctx->base.is_jmp = DISAS_EXIT_UPDATE; -} -#endif - -/* Unified bats */ -#if !defined(CONFIG_USER_ONLY) -void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); -} - -void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} -#endif - /* PowerPC 40x specific registers */ #if !defined(CONFIG_USER_ONLY) void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) @@ -863,6 +834,26 @@ void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); } +void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); + gen_helper_store_40x_pid(cpu_env, t0); + tcg_temp_free(t0); +} + void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) { gen_icount_io_start(ctx); @@ -876,22 +867,8 @@ void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) } #endif -/* PowerPC 403 specific registers */ -/* PBL1 / PBU1 / PBL2 / PBU2 */ +/* PIR */ #if !defined(CONFIG_USER_ONLY) -void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); -} - -void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1); - gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - void spr_write_pir(DisasContext *ctx, int sprn, int gprn) { TCGv t0 = tcg_temp_new(); @@ -941,9 +918,9 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; } else { - printf("Trying to write an unknown exception vector %d %03x\n", - sprn, sprn); - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception" + " vector 0x%03x\n", sprn); + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } @@ -1301,38 +1278,43 @@ typedef struct opcode_t { const char *oname; } opcode_t; +static void gen_priv_opc(DisasContext *ctx) +{ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); +} + /* Helpers for priv. check */ -#define GEN_PRIV \ - do { \ - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ +#define GEN_PRIV(CTX) \ + do { \ + gen_priv_opc(CTX); return; \ } while (0) #if defined(CONFIG_USER_ONLY) -#define CHK_HV GEN_PRIV -#define CHK_SV GEN_PRIV -#define CHK_HVRM GEN_PRIV +#define CHK_HV(CTX) GEN_PRIV(CTX) +#define CHK_SV(CTX) GEN_PRIV(CTX) +#define CHK_HVRM(CTX) GEN_PRIV(CTX) #else -#define CHK_HV \ - do { \ - if (unlikely(ctx->pr || !ctx->hv)) { \ - GEN_PRIV; \ - } \ +#define CHK_HV(CTX) \ + do { \ + if (unlikely(ctx->pr || !ctx->hv)) {\ + GEN_PRIV(CTX); \ + } \ } while (0) -#define CHK_SV \ +#define CHK_SV(CTX) \ do { \ if (unlikely(ctx->pr)) { \ - GEN_PRIV; \ + GEN_PRIV(CTX); \ } \ } while (0) -#define CHK_HVRM \ - do { \ - if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ - GEN_PRIV; \ - } \ +#define CHK_HVRM(CTX) \ + do { \ + if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ + GEN_PRIV(CTX); \ + } \ } while (0) #endif -#define CHK_NONE +#define CHK_NONE(CTX) /*****************************************************************************/ /* PowerPC instructions table */ @@ -3195,6 +3177,20 @@ static inline void gen_align_no_le(DisasContext *ctx) (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); } +static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ) +{ + TCGv ea = tcg_temp_new(); + if (ra) { + tcg_gen_add_tl(ea, cpu_gpr[ra], displ); + } else { + tcg_gen_mov_tl(ea, displ); + } + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(ea, ea); + } + return ea; +} + /*** Integer load ***/ #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) @@ -3228,10 +3224,10 @@ GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB)) GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW)) GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) -GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q)) +GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ)) #if defined(TARGET_PPC64) -GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q)) +GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ)) #endif #define GEN_QEMU_STORE_TL(stop, op) \ @@ -3262,17 +3258,17 @@ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \ GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB)) GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW)) GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL)) -GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q)) +GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ)) #if defined(TARGET_PPC64) -GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q)) +GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ)) #endif #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGv EA; \ - chk; \ + chk(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3290,7 +3286,7 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGv EA; \ - CHK_SV; \ + CHK_SV(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3302,7 +3298,7 @@ GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) #if defined(TARGET_PPC64) -GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) +GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) #endif #if defined(TARGET_PPC64) @@ -3311,69 +3307,6 @@ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) - -/* lq */ -static void gen_lq(DisasContext *ctx) -{ - int ra, rd; - TCGv EA, hi, lo; - - /* lq is a legal user mode instruction starting in ISA 2.07 */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - ra = rA(ctx->opcode); - rd = rD(ctx->opcode); - if (unlikely((rd & 1) || rd == ra)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x0F); - - /* Note that the low part is always in RD+1, even in LE mode. */ - lo = cpu_gpr[rd + 1]; - hi = cpu_gpr[rd]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); - } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); -} #endif /*** Integer store ***/ @@ -3381,7 +3314,7 @@ static void gen_lq(DisasContext *ctx) static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGv EA; \ - chk; \ + chk(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3398,7 +3331,7 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGv EA; \ - CHK_SV; \ + CHK_SV(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3411,7 +3344,7 @@ GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) #if defined(TARGET_PPC64) -GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04) +GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04) #endif #if defined(TARGET_PPC64) @@ -3419,88 +3352,6 @@ GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) - -static void gen_std(DisasContext *ctx) -{ - int rs; - TCGv EA; - - rs = rS(ctx->opcode); - if ((ctx->opcode & 0x3) == 0x2) { /* stq */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - TCGv hi, lo; - - if (!(ctx->insns_flags & PPC_64BX)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - } - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - - if (unlikely(rs & 1)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - - /* Note that the low part is always in RS+1, even in LE mode. */ - lo = cpu_gpr[rs + 1]; - hi = cpu_gpr[rs]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); - } - tcg_temp_free_i32(oi); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); - } else { - /* std / stdu */ - if (Rc(ctx->opcode)) { - if (unlikely(rA(ctx->opcode) == 0)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); - if (Rc(ctx->opcode)) { - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); - } - tcg_temp_free(EA); - } -} #endif /*** Integer load and store with byte reverse ***/ @@ -3678,7 +3529,32 @@ static void gen_stswx(DisasContext *ctx) /* eieio */ static void gen_eieio(DisasContext *ctx) { - TCGBar bar = TCG_MO_LD_ST; + TCGBar bar = TCG_MO_ALL; + + /* + * eieio has complex semanitcs. It provides memory ordering between + * operations in the set: + * - loads from CI memory. + * - stores to CI memory. + * - stores to WT memory. + * + * It separately also orders memory for operations in the set: + * - stores to cacheble memory. + * + * It also serializes instructions: + * - dcbt and dcbst. + * + * It separately serializes: + * - tlbie and tlbsync. + * + * And separately serializes: + * - slbieg, slbiag, and slbsync. + * + * The end result is that CI memory ordering requires TCG_MO_ALL + * and it is not possible to special-case more relaxed ordering for + * cacheable accesses. TCG_BAR_SC is required to provide this + * serialization. + */ /* * POWER9 has a eieio instruction variant using bit 6 as a hint to @@ -3903,7 +3779,7 @@ static void gen_lwat(DisasContext *ctx) #ifdef TARGET_PPC64 static void gen_ldat(DisasContext *ctx) { - gen_ld_atomic(ctx, DEF_MEMOP(MO_Q)); + gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ)); } #endif @@ -3986,7 +3862,7 @@ static void gen_stwat(DisasContext *ctx) #ifdef TARGET_PPC64 static void gen_stdat(DisasContext *ctx) { - gen_st_atomic(ctx, DEF_MEMOP(MO_Q)); + gen_st_atomic(ctx, DEF_MEMOP(MO_UQ)); } #endif @@ -4038,9 +3914,9 @@ STCX(stwcx_, DEF_MEMOP(MO_UL)) #if defined(TARGET_PPC64) /* ldarx */ -LARX(ldarx, DEF_MEMOP(MO_Q)) +LARX(ldarx, DEF_MEMOP(MO_UQ)) /* stdcx. */ -STCX(stdcx_, DEF_MEMOP(MO_Q)) +STCX(stdcx_, DEF_MEMOP(MO_UQ)) /* lqarx */ static void gen_lqarx(DisasContext *ctx) @@ -4066,11 +3942,11 @@ static void gen_lqarx(DisasContext *ctx) if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(); if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); } @@ -4084,18 +3960,19 @@ static void gen_lqarx(DisasContext *ctx) return; } } else if (ctx->le_mode) { - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16); + tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16); tcg_gen_mov_tl(cpu_reserve, EA); gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ); } else { - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16); + tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16); tcg_gen_mov_tl(cpu_reserve, EA); gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ); } tcg_temp_free(EA); + tcg_gen_mov_tl(cpu_reserve, EA); tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2)); } @@ -4121,7 +3998,7 @@ static void gen_stqcx_(DisasContext *ctx) if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { - TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); + TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN); if (ctx->le_mode) { gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi); @@ -4181,8 +4058,13 @@ static void gen_stqcx_(DisasContext *ctx) /* sync */ static void gen_sync(DisasContext *ctx) { + TCGBar bar = TCG_MO_ALL; uint32_t l = (ctx->opcode >> 21) & 3; + if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) { + bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST; + } + /* * We may need to check for a pending TLB flush. * @@ -4194,29 +4076,109 @@ static void gen_sync(DisasContext *ctx) if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { gen_check_tlb_flush(ctx, true); } - tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + + tcg_gen_mb(bar | TCG_BAR_SC); } /* wait */ static void gen_wait(DisasContext *ctx) { - TCGv_i32 t0 = tcg_const_i32(1); - tcg_gen_st_i32(t0, cpu_env, - -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); - tcg_temp_free_i32(t0); - /* Stop translation, as the CPU is supposed to sleep from now */ - gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); + uint32_t wc; + + if (ctx->insns_flags & PPC_WAIT) { + /* v2.03-v2.07 define an older incompatible 'wait' encoding. */ + + if (ctx->insns_flags2 & PPC2_PM_ISA206) { + /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */ + wc = WC(ctx->opcode); + } else { + wc = 0; + } + + } else if (ctx->insns_flags2 & PPC2_ISA300) { + /* v3.0 defines a new 'wait' encoding. */ + wc = WC(ctx->opcode); + if (ctx->insns_flags2 & PPC2_ISA310) { + uint32_t pl = PL(ctx->opcode); + + /* WC 1,2 may be treated as no-op. WC 3 is reserved. */ + if (wc == 3) { + gen_invalid(ctx); + return; + } + + /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */ + if (pl > 0 && wc != 2) { + gen_invalid(ctx); + return; + } + + } else { /* ISA300 */ + /* WC 1-3 are reserved */ + if (wc > 0) { + gen_invalid(ctx); + return; + } + } + + } else { + warn_report("wait instruction decoded with wrong ISA flags."); + gen_invalid(ctx); + return; + } + + /* + * wait without WC field or with WC=0 waits for an exception / interrupt + * to occur. + */ + if (wc == 0) { + TCGv_i32 t0 = tcg_const_i32(1); + tcg_gen_st_i32(t0, cpu_env, + -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); + tcg_temp_free_i32(t0); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); + } + + /* + * Other wait types must not just wait until an exception occurs because + * ignoring their other wake-up conditions could cause a hang. + * + * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as + * no-ops. + * + * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op. + * + * wc=2 waits for an implementation-specific condition, such could be + * always true, so it can be implemented as a no-op. + * + * For v3.1, wc=1,2 are architected but may be implemented as no-ops. + * + * wc=1 (waitrsv) waits for an exception or a reservation to be lost. + * Reservation-loss may have implementation-specific conditions, so it + * can be implemented as a no-op. + * + * wc=2 waits for an exception or an amount of time to pass. This + * amount is implementation-specific so it can be implemented as a + * no-op. + * + * ISA v3.1 allows for execution to resume "in the rare case of + * an implementation-dependent event", so in any case software must + * not depend on the architected resumption condition to become + * true, so no-op implementations should be architecturally correct + * (if suboptimal). + */ } #if defined(TARGET_PPC64) static void gen_doze(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_DOZE); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4228,11 +4190,11 @@ static void gen_doze(DisasContext *ctx) static void gen_nap(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_NAP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4244,11 +4206,11 @@ static void gen_nap(DisasContext *ctx) static void gen_stop(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_STOP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4260,11 +4222,11 @@ static void gen_stop(DisasContext *ctx) static void gen_sleep(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_SLEEP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4276,11 +4238,11 @@ static void gen_sleep(DisasContext *ctx) static void gen_rvwinkle(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_RVWINKLE); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4299,6 +4261,70 @@ static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) #endif } +#if defined(TARGET_PPC64) +static void pmu_count_insns(DisasContext *ctx) +{ + /* + * Do not bother calling the helper if the PMU isn't counting + * instructions. + */ + if (!ctx->pmu_insn_cnt) { + return; + } + + #if !defined(CONFIG_USER_ONLY) + TCGLabel *l; + TCGv t0; + + /* + * The PMU insns_inc() helper stops the internal PMU timer if a + * counter overflows happens. In that case, if the guest is + * running with icount and we do not handle it beforehand, + * the helper can trigger a 'bad icount read'. + */ + gen_icount_io_start(ctx); + + /* Avoid helper calls when only PMC5-6 are enabled. */ + if (!ctx->pmc_other) { + l = gen_new_label(); + t0 = tcg_temp_new(); + + gen_load_spr(t0, SPR_POWER_PMC5); + tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); + gen_store_spr(SPR_POWER_PMC5, t0); + /* Check for overflow, if it's enabled */ + if (ctx->mmcr0_pmcjce) { + tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l); + gen_helper_handle_pmc5_overflow(cpu_env); + } + + gen_set_label(l); + tcg_temp_free(t0); + } else { + gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns)); + } + #else + /* + * User mode can read (but not write) PMC5 and start/stop + * the PMU via MMCR0_FC. In this case just increment + * PMC5 with base.num_insns. + */ + TCGv t0 = tcg_temp_new(); + + gen_load_spr(t0, SPR_POWER_PMC5); + tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); + gen_store_spr(SPR_POWER_PMC5, t0); + + tcg_temp_free(t0); + #endif /* #if !defined(CONFIG_USER_ONLY) */ +} +#else +static void pmu_count_insns(DisasContext *ctx) +{ + return; +} +#endif /* #if defined(TARGET_PPC64) */ + static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { return translator_use_goto_tb(&ctx->base, dest); @@ -4306,16 +4332,17 @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) static void gen_lookup_and_goto_ptr(DisasContext *ctx) { - int sse = ctx->singlestep_enabled; - if (unlikely(sse)) { - if (sse & GDBSTUB_SINGLE_STEP) { - gen_debug_exception(ctx); - } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); - } else { - tcg_gen_exit_tb(NULL, 0); - } + if (unlikely(ctx->singlestep_enabled)) { + gen_debug_exception(ctx); } else { + /* + * tcg_gen_lookup_and_goto_ptr will exit the TB if + * CF_NO_GOTO_PTR is set. Count insns now. + */ + if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { + pmu_count_insns(ctx); + } + tcg_gen_lookup_and_goto_ptr(); } } @@ -4327,6 +4354,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) dest = (uint32_t) dest; } if (use_goto_tb(ctx, dest)) { + pmu_count_insns(ctx); tcg_gen_goto_tb(n); tcg_gen_movi_tl(cpu_nip, dest & ~3); tcg_gen_exit_tb(ctx->base.tb, n); @@ -4565,7 +4593,7 @@ static void gen_mcrf(DisasContext *ctx) static void gen_rfi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* * This instruction doesn't exist anymore on 64-bit server @@ -4576,7 +4604,7 @@ static void gen_rfi(DisasContext *ctx) return; } /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfi(cpu_env); @@ -4588,10 +4616,10 @@ static void gen_rfi(DisasContext *ctx) static void gen_rfid(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfid(cpu_env); @@ -4603,10 +4631,10 @@ static void gen_rfid(DisasContext *ctx) static void gen_rfscv(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfscv(cpu_env); @@ -4618,10 +4646,10 @@ static void gen_rfscv(DisasContext *ctx) static void gen_hrfid(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_HV; + CHK_HV(ctx); gen_helper_hrfid(cpu_env); ctx->base.is_jmp = DISAS_EXIT; #endif @@ -4822,7 +4850,7 @@ static void gen_mfcr(DisasContext *ctx) /* mfmsr */ static void gen_mfmsr(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr); } @@ -4878,11 +4906,11 @@ static inline void gen_op_mfspr(DisasContext *ctx) */ if (sprn & 0x10) { if (ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } } @@ -4936,61 +4964,65 @@ static void gen_mtmsrd(DisasContext *ctx) return; } - CHK_SV; + CHK_SV(ctx); #if !defined(CONFIG_USER_ONLY) + TCGv t0, t1; + target_ulong mask; + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_icount_io_start(ctx); + if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], - (1 << MSR_RI) | (1 << MSR_EE)); - tcg_gen_andi_tl(t1, cpu_msr, - ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); - tcg_gen_or_tl(t1, t1, t0); - - gen_helper_store_msr(cpu_env, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - + mask = (1ULL << MSR_RI) | (1ULL << MSR_EE); } else { + /* mtmsrd does not alter HV, S, ME, or LE */ + mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) | + (1ULL << MSR_HV)); /* * XXX: we need to update nip before the store if we enter * power saving mode, we will exit the loop directly from * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); - gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); } + + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); + tcg_gen_andi_tl(t1, cpu_msr, ~mask); + tcg_gen_or_tl(t0, t0, t1); + + gen_helper_store_msr(cpu_env, t0); + /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; + + tcg_temp_free(t0); + tcg_temp_free(t1); #endif /* !defined(CONFIG_USER_ONLY) */ } #endif /* defined(TARGET_PPC64) */ static void gen_mtmsr(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); #if !defined(CONFIG_USER_ONLY) + TCGv t0, t1; + target_ulong mask = 0xFFFFFFFF; + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_icount_io_start(ctx); if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], - (1 << MSR_RI) | (1 << MSR_EE)); - tcg_gen_andi_tl(t1, cpu_msr, - ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); - tcg_gen_or_tl(t1, t1, t0); - - gen_helper_store_msr(cpu_env, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - + mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE); } else { - TCGv msr = tcg_temp_new(); + /* mtmsr does not alter S, ME, or LE */ + mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); /* * XXX: we need to update nip before the store if we enter @@ -4998,16 +5030,19 @@ static void gen_mtmsr(DisasContext *ctx) * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); -#if defined(TARGET_PPC64) - tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); -#else - tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]); -#endif - gen_helper_store_msr(cpu_env, msr); - tcg_temp_free(msr); } + + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); + tcg_gen_andi_tl(t1, cpu_msr, ~mask); + tcg_gen_or_tl(t0, t0, t1); + + gen_helper_store_msr(cpu_env, t0); + /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; + + tcg_temp_free(t0); + tcg_temp_free(t1); #endif } @@ -5058,11 +5093,11 @@ static void gen_mtspr(DisasContext *ctx) */ if (sprn & 0x10) { if (ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { if (ctx->pr || sprn == 0) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } } @@ -5073,19 +5108,15 @@ static void gen_mtspr(DisasContext *ctx) static void gen_setb(DisasContext *ctx) { TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t8 = tcg_temp_new_i32(); - TCGv_i32 tm1 = tcg_temp_new_i32(); + TCGv_i32 t8 = tcg_constant_i32(8); + TCGv_i32 tm1 = tcg_constant_i32(-1); int crf = crfS(ctx->opcode); tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); - tcg_gen_movi_i32(t8, 8); - tcg_gen_movi_i32(tm1, -1); tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free_i32(t0); - tcg_temp_free_i32(t8); - tcg_temp_free_i32(tm1); } #endif @@ -5108,7 +5139,7 @@ static void gen_dcbfep(DisasContext *ctx) { /* XXX: specification says this is treated as a load by the MMU */ TCGv t0; - CHK_SV; + CHK_SV(ctx); gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); @@ -5120,11 +5151,11 @@ static void gen_dcbfep(DisasContext *ctx) static void gen_dcbi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv EA, val; - CHK_SV; + CHK_SV(ctx); EA = tcg_temp_new(); gen_set_access_type(ctx, ACCESS_CACHE); gen_addr_reg_index(ctx, EA); @@ -5309,11 +5340,11 @@ static void gen_dcba(DisasContext *ctx) static void gen_mfsr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); @@ -5324,11 +5355,11 @@ static void gen_mfsr(DisasContext *ctx) static void gen_mfsrin(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -5340,11 +5371,11 @@ static void gen_mfsrin(DisasContext *ctx) static void gen_mtsr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); @@ -5355,10 +5386,10 @@ static void gen_mtsr(DisasContext *ctx) static void gen_mtsrin(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); @@ -5374,11 +5405,11 @@ static void gen_mtsrin(DisasContext *ctx) static void gen_mfsr_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); @@ -5389,11 +5420,11 @@ static void gen_mfsr_64b(DisasContext *ctx) static void gen_mfsrin_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -5405,11 +5436,11 @@ static void gen_mfsrin_64b(DisasContext *ctx) static void gen_mtsr_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); @@ -5420,11 +5451,11 @@ static void gen_mtsr_64b(DisasContext *ctx) static void gen_mtsrin_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); @@ -5432,67 +5463,6 @@ static void gen_mtsrin_64b(DisasContext *ctx) #endif /* defined(CONFIG_USER_ONLY) */ } -/* slbmte */ -static void gen_slbmte(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)], - cpu_gpr[rS(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbmfee(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbmfev(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbfee_(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); -#else - TCGLabel *l1, *l2; - - if (unlikely(ctx->pr)) { - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); - return; - } - gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); - l1 = gen_new_label(); - l2 = gen_new_label(); - tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1); - tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0); - gen_set_label(l2); -#endif -} #endif /* defined(TARGET_PPC64) */ /*** Lookaside buffer management ***/ @@ -5502,67 +5472,25 @@ static void gen_slbfee_(DisasContext *ctx) static void gen_tlbia(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_HV; + CHK_HV(ctx); gen_helper_tlbia(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } -/* tlbiel */ -static void gen_tlbiel(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* tlbie */ -static void gen_tlbie(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - TCGv_i32 t1; - - if (ctx->gtse) { - CHK_SV; /* If gtse is set then tlbie is supervisor privileged */ - } else { - CHK_HV; /* Else hypervisor privileged */ - } - - if (NARROW_MODE(ctx)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]); - gen_helper_tlbie(cpu_env, t0); - tcg_temp_free(t0); - } else { - gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); - } - t1 = tcg_temp_new_i32(); - tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); - tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_temp_free_i32(t1); -#endif /* defined(CONFIG_USER_ONLY) */ -} - /* tlbsync */ static void gen_tlbsync(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else if (ctx->gtse) { - CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */ + CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */ } else { - CHK_HV; /* Else hypervisor privileged */ + CHK_HV(ctx); /* Else hypervisor privileged */ } /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ @@ -5572,60 +5500,6 @@ static void gen_tlbsync(DisasContext *ctx) #endif /* defined(CONFIG_USER_ONLY) */ } -#if defined(TARGET_PPC64) -/* slbia */ -static void gen_slbia(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - uint32_t ih = (ctx->opcode >> 21) & 0x7; - TCGv_i32 t0 = tcg_const_i32(ih); - - CHK_SV; - - gen_helper_slbia(cpu_env, t0); - tcg_temp_free_i32(t0); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbie */ -static void gen_slbie(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbieg */ -static void gen_slbieg(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbsync */ -static void gen_slbsync(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_check_tlb_flush(ctx, true); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -#endif /* defined(TARGET_PPC64) */ - /*** External control ***/ /* Optional: */ @@ -5655,705 +5529,15 @@ static void gen_ecowx(DisasContext *ctx) tcg_temp_free(t0); } -/* PowerPC 601 specific instructions */ - -/* abs - abs. */ -static void gen_abs(DisasContext *ctx) -{ - TCGv d = cpu_gpr[rD(ctx->opcode)]; - TCGv a = cpu_gpr[rA(ctx->opcode)]; - - tcg_gen_abs_tl(d, a); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, d); - } -} - -/* abso - abso. */ -static void gen_abso(DisasContext *ctx) -{ - TCGv d = cpu_gpr[rD(ctx->opcode)]; - TCGv a = cpu_gpr[rA(ctx->opcode)]; - - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_ov, a, 0x80000000); - tcg_gen_abs_tl(d, a); - tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, d); - } -} - -/* clcs */ -static void gen_clcs(DisasContext *ctx) -{ - TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode)); - gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free_i32(t0); - /* Rc=1 sets CR0 to an undefined state */ -} - -/* div - div. */ -static void gen_div(DisasContext *ctx) -{ - gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* divo - divo. */ -static void gen_divo(DisasContext *ctx) -{ - gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* divs - divs. */ -static void gen_divs(DisasContext *ctx) -{ - gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* divso - divso. */ -static void gen_divso(DisasContext *ctx) -{ - gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env, - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* doz - doz. */ -static void gen_doz(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], - cpu_gpr[rA(ctx->opcode)], l1); - tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], - cpu_gpr[rA(ctx->opcode)]); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); - gen_set_label(l2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* dozo - dozo. */ -static void gen_dozo(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - /* Start with XER OV disabled, the most likely case */ - tcg_gen_movi_tl(cpu_ov, 0); - tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], - cpu_gpr[rA(ctx->opcode)], l1); - tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); - tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); - tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0); - tcg_gen_andc_tl(t1, t1, t2); - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2); - tcg_gen_movi_tl(cpu_ov, 1); - tcg_gen_movi_tl(cpu_so, 1); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); - gen_set_label(l2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* dozi */ -static void gen_dozi(DisasContext *ctx) -{ - target_long simm = SIMM(ctx->opcode); - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1); - tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); - gen_set_label(l2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* lscbx - lscbx. */ -static void gen_lscbx(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode)); - TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode)); - TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode)); - - gen_addr_reg_index(ctx, t0); - gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); - tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F); - tcg_gen_or_tl(cpu_xer, cpu_xer, t0); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, t0); - } - tcg_temp_free(t0); -} - -/* maskg - maskg. */ -static void gen_maskg(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - TCGv t3 = tcg_temp_new(); - tcg_gen_movi_tl(t3, 0xFFFFFFFF); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F); - tcg_gen_addi_tl(t2, t0, 1); - tcg_gen_shr_tl(t2, t3, t2); - tcg_gen_shr_tl(t3, t3, t1); - tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3); - tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1); - tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); - gen_set_label(l1); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* maskir - maskir. */ -static void gen_maskir(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* mul - mul. */ -static void gen_mul(DisasContext *ctx) -{ - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv t2 = tcg_temp_new(); - tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_mul_i64(t0, t0, t1); - tcg_gen_trunc_i64_tl(t2, t0); - gen_store_spr(SPR_MQ, t2); - tcg_gen_shri_i64(t1, t0, 32); - tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulo - mulo. */ -static void gen_mulo(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv t2 = tcg_temp_new(); - /* Start with XER OV disabled, the most likely case */ - tcg_gen_movi_tl(cpu_ov, 0); - tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_mul_i64(t0, t0, t1); - tcg_gen_trunc_i64_tl(t2, t0); - gen_store_spr(SPR_MQ, t2); - tcg_gen_shri_i64(t1, t0, 32); - tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_gen_ext32s_i64(t1, t0); - tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1); - tcg_gen_movi_tl(cpu_ov, 1); - tcg_gen_movi_tl(cpu_so, 1); - gen_set_label(l1); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* nabs - nabs. */ -static void gen_nabs(DisasContext *ctx) -{ - TCGv d = cpu_gpr[rD(ctx->opcode)]; - TCGv a = cpu_gpr[rA(ctx->opcode)]; - - tcg_gen_abs_tl(d, a); - tcg_gen_neg_tl(d, d); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, d); - } -} - -/* nabso - nabso. */ -static void gen_nabso(DisasContext *ctx) -{ - TCGv d = cpu_gpr[rD(ctx->opcode)]; - TCGv a = cpu_gpr[rA(ctx->opcode)]; - - tcg_gen_abs_tl(d, a); - tcg_gen_neg_tl(d, d); - /* nabs never overflows */ - tcg_gen_movi_tl(cpu_ov, 0); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, d); - } -} - -/* rlmi - rlmi. */ -static void gen_rlmi(DisasContext *ctx) -{ - uint32_t mb = MB(ctx->opcode); - uint32_t me = ME(ctx->opcode); - TCGv t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); - tcg_gen_andi_tl(t0, t0, MASK(mb, me)); - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - ~MASK(mb, me)); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0); - tcg_temp_free(t0); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* rrib - rrib. */ -static void gen_rrib(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_movi_tl(t1, 0x80000000); - tcg_gen_shr_tl(t1, t1, t0); - tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); - tcg_gen_and_tl(t0, t0, t1); - tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sle - sle. */ -static void gen_sle(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_subfi_tl(t1, 32, t1); - tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_or_tl(t1, t0, t1); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - gen_store_spr(SPR_MQ, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sleq - sleq. */ -static void gen_sleq(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_movi_tl(t2, 0xFFFFFFFF); - tcg_gen_shl_tl(t2, t2, t0); - tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); - gen_load_spr(t1, SPR_MQ); - gen_store_spr(SPR_MQ, t0); - tcg_gen_and_tl(t0, t0, t2); - tcg_gen_andc_tl(t1, t1, t2); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sliq - sliq. */ -static void gen_sliq(DisasContext *ctx) -{ - int sh = SH(ctx->opcode); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh); - tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); - tcg_gen_or_tl(t1, t0, t1); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - gen_store_spr(SPR_MQ, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* slliq - slliq. */ -static void gen_slliq(DisasContext *ctx) -{ - int sh = SH(ctx->opcode); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh); - gen_load_spr(t1, SPR_MQ); - gen_store_spr(SPR_MQ, t0); - tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh)); - tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh)); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sllq - sllq. */ -static void gen_sllq(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_local_new(); - TCGv t1 = tcg_temp_local_new(); - TCGv t2 = tcg_temp_local_new(); - tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_movi_tl(t1, 0xFFFFFFFF); - tcg_gen_shl_tl(t1, t1, t2); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); - gen_load_spr(t0, SPR_MQ); - tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); - gen_load_spr(t2, SPR_MQ); - tcg_gen_andc_tl(t1, t2, t1); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - gen_set_label(l2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* slq - slq. */ -static void gen_slq(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_subfi_tl(t1, 32, t1); - tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_or_tl(t1, t0, t1); - gen_store_spr(SPR_MQ, t1); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); - gen_set_label(l1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sraiq - sraiq. */ -static void gen_sraiq(DisasContext *ctx) -{ - int sh = SH(ctx->opcode); - TCGLabel *l1 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh); - tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); - tcg_gen_or_tl(t0, t0, t1); - gen_store_spr(SPR_MQ, t0); - tcg_gen_movi_tl(cpu_ca, 0); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1); - tcg_gen_movi_tl(cpu_ca, 1); - gen_set_label(l1); - tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sraq - sraq. */ -static void gen_sraq(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_local_new(); - TCGv t2 = tcg_temp_local_new(); - tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); - tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2); - tcg_gen_subfi_tl(t2, 32, t2); - tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2); - tcg_gen_or_tl(t0, t0, t2); - gen_store_spr(SPR_MQ, t0); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); - tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1); - tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31); - gen_set_label(l1); - tcg_temp_free(t0); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1); - tcg_gen_movi_tl(cpu_ca, 0); - tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2); - tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2); - tcg_gen_movi_tl(cpu_ca, 1); - gen_set_label(l2); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sre - sre. */ -static void gen_sre(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_subfi_tl(t1, 32, t1); - tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_or_tl(t1, t0, t1); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - gen_store_spr(SPR_MQ, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* srea - srea. */ -static void gen_srea(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); - gen_store_spr(SPR_MQ, t0); - tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sreq */ -static void gen_sreq(DisasContext *ctx) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_movi_tl(t1, 0xFFFFFFFF); - tcg_gen_shr_tl(t1, t1, t0); - tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); - gen_load_spr(t2, SPR_MQ); - gen_store_spr(SPR_MQ, t0); - tcg_gen_and_tl(t0, t0, t1); - tcg_gen_andc_tl(t2, t2, t1); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* sriq */ -static void gen_sriq(DisasContext *ctx) -{ - int sh = SH(ctx->opcode); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh); - tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); - tcg_gen_or_tl(t1, t0, t1); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - gen_store_spr(SPR_MQ, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* srliq */ -static void gen_srliq(DisasContext *ctx) -{ - int sh = SH(ctx->opcode); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh); - gen_load_spr(t1, SPR_MQ); - gen_store_spr(SPR_MQ, t0); - tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh)); - tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh)); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* srlq */ -static void gen_srlq(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_local_new(); - TCGv t1 = tcg_temp_local_new(); - TCGv t2 = tcg_temp_local_new(); - tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_movi_tl(t1, 0xFFFFFFFF); - tcg_gen_shr_tl(t2, t1, t2); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); - gen_load_spr(t0, SPR_MQ); - tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); - tcg_gen_and_tl(t0, t0, t2); - gen_load_spr(t1, SPR_MQ); - tcg_gen_andc_tl(t1, t1, t2); - tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - gen_set_label(l2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* srq */ -static void gen_srq(DisasContext *ctx) -{ - TCGLabel *l1 = gen_new_label(); - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); - tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_subfi_tl(t1, 32, t1); - tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1); - tcg_gen_or_tl(t1, t0, t1); - gen_store_spr(SPR_MQ, t1); - tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20); - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); - gen_set_label(l1); - tcg_temp_free(t0); - tcg_temp_free(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* PowerPC 602 specific instructions */ - -/* dsa */ -static void gen_dsa(DisasContext *ctx) -{ - /* XXX: TODO */ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); -} - -/* esa */ -static void gen_esa(DisasContext *ctx) -{ - /* XXX: TODO */ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); -} - -/* mfrom */ -static void gen_mfrom(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - /* 602 - 603 - G2 TLB management */ /* tlbld */ static void gen_tlbld_6xx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6362,112 +5546,13 @@ static void gen_tlbld_6xx(DisasContext *ctx) static void gen_tlbli_6xx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } -/* 74xx TLB management */ - -/* tlbld */ -static void gen_tlbld_74xx(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* tlbli */ -static void gen_tlbli_74xx(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* POWER instructions not in PowerPC 601 */ - -/* clf */ -static void gen_clf(DisasContext *ctx) -{ - /* Cache line flush: implemented as no-op */ -} - -/* cli */ -static void gen_cli(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - /* Cache line invalidate: privileged and treated as no-op */ - CHK_SV; -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* dclst */ -static void gen_dclst(DisasContext *ctx) -{ - /* Data cache line store: treated as no-op */ -} - -static void gen_mfsri(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - int ra = rA(ctx->opcode); - int rd = rD(ctx->opcode); - TCGv t0; - - CHK_SV; - t0 = tcg_temp_new(); - gen_addr_reg_index(ctx, t0); - tcg_gen_extract_tl(t0, t0, 28, 4); - gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0); - tcg_temp_free(t0); - if (ra != 0 && ra != rd) { - tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]); - } -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_rac(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - TCGv t0; - - CHK_SV; - t0 = tcg_temp_new(); - gen_addr_reg_index(ctx, t0); - gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_rfsvc(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_rfsvc(cpu_env); - ctx->base.is_jmp = DISAS_EXIT; -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* svc is not implemented for now */ - /* BookE specific instructions */ /* XXX: not implemented on 440 ? */ @@ -6481,11 +5566,11 @@ static void gen_mfapidi(DisasContext *ctx) static void gen_tlbiva(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]); @@ -6712,11 +5797,11 @@ GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); static void gen_mfdcr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv dcrn; - CHK_SV; + CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); tcg_temp_free(dcrn); @@ -6727,11 +5812,11 @@ static void gen_mfdcr(DisasContext *ctx) static void gen_mtdcr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv dcrn; - CHK_SV; + CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(dcrn); @@ -6743,9 +5828,9 @@ static void gen_mtdcr(DisasContext *ctx) static void gen_mfdcrx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ @@ -6757,35 +5842,19 @@ static void gen_mfdcrx(DisasContext *ctx) static void gen_mtdcrx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ #endif /* defined(CONFIG_USER_ONLY) */ } -/* mfdcrux (PPC 460) : user-mode access to DCR */ -static void gen_mfdcrux(DisasContext *ctx) -{ - gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, - cpu_gpr[rA(ctx->opcode)]); - /* Note: Rc update flag set leads to undefined state of Rc0 */ -} - -/* mtdcrux (PPC 460) : user-mode access to DCR */ -static void gen_mtdcrux(DisasContext *ctx) -{ - gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rS(ctx->opcode)]); - /* Note: Rc update flag set leads to undefined state of Rc0 */ -} - /* dccci */ static void gen_dccci(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } @@ -6793,11 +5862,11 @@ static void gen_dccci(DisasContext *ctx) static void gen_dcread(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv EA, val; - CHK_SV; + CHK_SV(ctx); gen_set_access_type(ctx, ACCESS_CACHE); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -6822,14 +5891,14 @@ static void gen_icbt_40x(DisasContext *ctx) /* iccci */ static void gen_iccci(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } /* icread */ static void gen_icread(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } @@ -6837,9 +5906,9 @@ static void gen_icread(DisasContext *ctx) static void gen_rfci_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_40x_rfci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6849,9 +5918,9 @@ static void gen_rfci_40x(DisasContext *ctx) static void gen_rfci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6864,9 +5933,9 @@ static void gen_rfci(DisasContext *ctx) static void gen_rfdi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfdi(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6877,9 +5946,9 @@ static void gen_rfdi(DisasContext *ctx) static void gen_rfmci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfmci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6892,9 +5961,9 @@ static void gen_rfmci(DisasContext *ctx) static void gen_tlbre_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env, @@ -6915,11 +5984,11 @@ static void gen_tlbre_40x(DisasContext *ctx) static void gen_tlbsx_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -6938,9 +6007,9 @@ static void gen_tlbsx_40x(DisasContext *ctx) static void gen_tlbwe_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: @@ -6964,9 +6033,9 @@ static void gen_tlbwe_40x(DisasContext *ctx) static void gen_tlbre_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: @@ -6990,11 +6059,11 @@ static void gen_tlbre_440(DisasContext *ctx) static void gen_tlbsx_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -7013,9 +6082,9 @@ static void gen_tlbsx_440(DisasContext *ctx) static void gen_tlbwe_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: case 1: @@ -7040,9 +6109,9 @@ static void gen_tlbwe_440(DisasContext *ctx) static void gen_tlbre_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_booke206_tlbre(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -7051,11 +6120,11 @@ static void gen_tlbre_booke206(DisasContext *ctx) static void gen_tlbsx_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); if (rA(ctx->opcode)) { t0 = tcg_temp_new(); tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]); @@ -7073,9 +6142,9 @@ static void gen_tlbsx_booke206(DisasContext *ctx) static void gen_tlbwe_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_booke206_tlbwe(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -7083,11 +6152,11 @@ static void gen_tlbwe_booke206(DisasContext *ctx) static void gen_tlbivax_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_booke206_tlbivax(cpu_env, t0); @@ -7098,11 +6167,11 @@ static void gen_tlbivax_booke206(DisasContext *ctx) static void gen_tlbilx_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); @@ -7125,20 +6194,20 @@ static void gen_tlbilx_booke206(DisasContext *ctx) #endif /* defined(CONFIG_USER_ONLY) */ } - /* wrtee */ static void gen_wrtee(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); tcg_gen_or_tl(cpu_msr, cpu_msr, t0); + gen_ppc_maybe_interrupt(ctx); tcg_temp_free(t0); /* * Stop translation to have a chance to raise an exception if we @@ -7152,11 +6221,12 @@ static void gen_wrtee(DisasContext *ctx) static void gen_wrteei(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); if (ctx->opcode & 0x00008000) { tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE)); + gen_ppc_maybe_interrupt(ctx); /* Stop translation to have a chance to raise an exception */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; } else { @@ -7203,68 +6273,6 @@ static void gen_icbt_440(DisasContext *ctx) */ } -/* Embedded.Processor Control */ - -static void gen_msgclr(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_HV; - if (is_book3s_arch2x(ctx)) { - gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); - } else { - gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); - } -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_msgsnd(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_HV; - if (is_book3s_arch2x(ctx)) { - gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]); - } else { - gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]); - } -#endif /* defined(CONFIG_USER_ONLY) */ -} - -#if defined(TARGET_PPC64) -static void gen_msgclrp(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_msgsndp(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} -#endif - -static void gen_msgsync(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_HV; -#endif /* defined(CONFIG_USER_ONLY) */ - /* interpreted as no-op */ -} - #if defined(TARGET_PPC64) static void gen_maddld(DisasContext *ctx) { @@ -7371,7 +6379,7 @@ static void gen_tcheck(DisasContext *ctx) #define GEN_TM_PRIV_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \ + gen_priv_opc(ctx); \ } #else @@ -7379,7 +6387,7 @@ static inline void gen_##name(DisasContext *ctx) \ #define GEN_TM_PRIV_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - CHK_SV; \ + CHK_SV(ctx); \ if (unlikely(!ctx->tm_enabled)) { \ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ return; \ @@ -7407,6 +6415,14 @@ static inline void get_fpr(TCGv_i64 dst, int regno) static inline void set_fpr(int regno, TCGv_i64 src) { tcg_gen_st_i64(src, cpu_env, fpr_offset(regno)); + /* + * Before PowerISA v3.1 the result of doubleword 1 of the VSR + * corresponding to the target FPR was undefined. However, + * most (if not all) real hardware were setting the result to 0. + * Starting at ISA v3.1, the result for doubleword 1 is now defined + * to be 0. + */ + tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false)); } static inline void get_avr64(TCGv_i64 dst, int regno, bool high) @@ -7422,11 +6438,26 @@ static inline void set_avr64(int regno, TCGv_i64 src, bool high) /* * Helpers for decodetree used by !function for decoding arguments. */ +static int times_2(DisasContext *ctx, int x) +{ + return x * 2; +} + static int times_4(DisasContext *ctx, int x) { return x * 4; } +static int times_16(DisasContext *ctx, int x) +{ + return x * 16; +} + +static int64_t dw_compose_ea(DisasContext *ctx, int x) +{ + return deposit64(0xfffffffffffffe00, 3, 6, x); +} + /* * Helpers for trans_* functions to check for specific insns flags. * Use token pasting to ensure that we use the proper flag with the @@ -7453,6 +6484,51 @@ static int times_4(DisasContext *ctx, int x) # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B) #endif +#define REQUIRE_VECTOR(CTX) \ + do { \ + if (unlikely(!(CTX)->altivec_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VPU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_VSX(CTX) \ + do { \ + if (unlikely(!(CTX)->vsx_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VSXU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_FPU(ctx) \ + do { \ + if (unlikely(!(ctx)->fpu_enabled)) { \ + gen_exception((ctx), POWERPC_EXCP_FPU); \ + return true; \ + } \ + } while (0) + +#if !defined(CONFIG_USER_ONLY) +#define REQUIRE_SV(CTX) \ + do { \ + if (unlikely((CTX)->pr)) { \ + gen_priv_opc(CTX); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_HV(CTX) \ + do { \ + if (unlikely((CTX)->pr || !(CTX)->hv)) { \ + gen_priv_opc(CTX); \ + return true; \ + } \ + } while (0) +#else +#define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0) +#define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0) +#endif + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. @@ -7460,16 +6536,56 @@ static int times_4(DisasContext *ctx, int x) #define TRANS(NAME, FUNC, ...) \ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ { return FUNC(ctx, a, __VA_ARGS__); } +#define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { \ + REQUIRE_INSNS_FLAGS(ctx, FLAGS); \ + return FUNC(ctx, a, __VA_ARGS__); \ + } +#define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { \ + REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ + return FUNC(ctx, a, __VA_ARGS__); \ + } #define TRANS64(NAME, FUNC, ...) \ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); } +#define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { \ + REQUIRE_64BIT(ctx); \ + REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ + return FUNC(ctx, a, __VA_ARGS__); \ + } /* TODO: More TRANS* helpers for extra insn_flags checks. */ #include "decode-insn32.c.inc" #include "decode-insn64.c.inc" +#include "power8-pmu-regs.c.inc" + +/* + * Incorporate CIA into the constant when R=1. + * Validate that when R=1, RA=0. + */ +static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) +{ + d->rt = a->rt; + d->ra = a->ra; + d->si = a->si; + if (a->r) { + if (unlikely(a->ra != 0)) { + gen_invalid(ctx); + return false; + } + d->si += ctx->cia; + } + return true; +} + #include "translate/fixedpoint-impl.c.inc" #include "translate/fp-impl.c.inc" @@ -7477,68 +6593,35 @@ static int times_4(DisasContext *ctx, int x) #include "translate/vmx-impl.c.inc" #include "translate/vsx-impl.c.inc" -#include "translate/vector-impl.c.inc" #include "translate/dfp-impl.c.inc" #include "translate/spe-impl.c.inc" -/* Handles lfdp, lxsd, lxssp */ +#include "translate/branch-impl.c.inc" + +#include "translate/processor-ctrl-impl.c.inc" + +#include "translate/storage-ctrl-impl.c.inc" + +/* Handles lfdp */ static void gen_dform39(DisasContext *ctx) { - switch (ctx->opcode & 0x3) { - case 0: /* lfdp */ + if ((ctx->opcode & 0x3) == 0) { if (ctx->insns_flags2 & PPC2_ISA205) { return gen_lfdp(ctx); } - break; - case 2: /* lxsd */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_lxsd(ctx); - } - break; - case 3: /* lxssp */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_lxssp(ctx); - } - break; } return gen_invalid(ctx); } -/* handles stfdp, lxv, stxsd, stxssp lxvx */ +/* Handles stfdp */ static void gen_dform3D(DisasContext *ctx) { - if ((ctx->opcode & 3) == 1) { /* DQ-FORM */ - switch (ctx->opcode & 0x7) { - case 1: /* lxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_lxv(ctx); - } - break; - case 5: /* stxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_stxv(ctx); - } - break; - } - } else { /* DS-FORM */ - switch (ctx->opcode & 0x3) { - case 0: /* stfdp */ - if (ctx->insns_flags2 & PPC2_ISA205) { - return gen_stfdp(ctx); - } - break; - case 2: /* stxsd */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_stxsd(ctx); - } - break; - case 3: /* stxssp */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_stxssp(ctx); - } - break; + if ((ctx->opcode & 3) == 0) { /* DS-FORM */ + /* stfdp */ + if (ctx->insns_flags2 & PPC2_ISA205) { + return gen_stfdp(ctx); } } return gen_invalid(ctx); @@ -7562,18 +6645,16 @@ static void gen_brw(DisasContext *ctx) /* brh */ static void gen_brh(DisasContext *ctx) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_movi_i64(t0, 0x00ff00ff00ff00ffull); tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8); - tcg_gen_and_i64(t2, t1, t0); - tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_and_i64(t2, t1, mask); + tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); tcg_gen_shli_i64(t1, t1, 8); tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); - tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } @@ -7647,13 +6728,9 @@ GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, PPC_NONE, PPC2_ISA300), #endif -#if defined(TARGET_PPC64) -GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX), -GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B), -#endif /* handles lfdp, lxsd, lxssp */ GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), -/* handles stfdp, lxv, stxsd, stxssp, stxv */ +/* handles stfdp, stxsd, stxssp */ GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), @@ -7680,8 +6757,9 @@ GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), #endif GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), -GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT), -GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300), +/* ISA v3.0 changed the extended opcode from 62 to 30 */ +GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT), +GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), @@ -7755,92 +6833,23 @@ GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), #endif GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), /* * XXX Those instructions will need to be handled differently for * different ISA versions */ -GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), -GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), -GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), -#if defined(TARGET_PPC64) -GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), -GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), -GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300), -#endif GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), -GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR), -GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR), -GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR), -GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR), -GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC), -GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC), -GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC), GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB), -GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB), -GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB), -GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER), -GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER), -GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER), -GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER), -GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER), -GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER), -GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2), -GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2), -GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2), -GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2), -GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2), -GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2), -GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2), -GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2), GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI), GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA), GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), -GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX), -GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX), GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), @@ -7866,12 +6875,6 @@ GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, PPC_NONE, PPC2_BOOKE206), -GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001, - PPC_NONE, PPC2_PRCNTL), -GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001, - PPC_NONE, PPC2_PRCNTL), -GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000, - PPC_NONE, PPC2_PRCNTL), GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), @@ -7886,15 +6889,10 @@ GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), -GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC), #if defined(TARGET_PPC64) GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), -GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001, - PPC_NONE, PPC2_ISA207S), -GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001, - PPC_NONE, PPC2_ISA207S), #endif #undef GEN_INT_ARITH_ADD @@ -8039,7 +7037,7 @@ GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) #if defined(TARGET_PPC64) -GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) +GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) #endif #undef GEN_STX_E @@ -8065,7 +7063,7 @@ GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) #if defined(TARGET_PPC64) -GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04) +GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04) #endif #undef GEN_CRLOGIC @@ -8155,8 +7153,6 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ #include "translate/vsx-ops.c.inc" -#include "translate/dfp-ops.c.inc" - #include "translate/spe-ops.c.inc" }; @@ -8530,7 +7526,6 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); #endif ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B - || env->mmu_model == POWERPC_MMU_601 || env->mmu_model & POWERPC_MMU_64; ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1; @@ -8539,21 +7534,21 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1; ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1; ctx->gtse = (hflags >> HFLAGS_GTSE) & 1; + ctx->hr = (hflags >> HFLAGS_HR) & 1; + ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1; + ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1; + ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1; + ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1; + ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1; ctx->singlestep_enabled = 0; if ((hflags >> HFLAGS_SE) & 1) { ctx->singlestep_enabled |= CPU_SINGLE_STEP; + ctx->base.max_insns = 1; } if ((hflags >> HFLAGS_BE) & 1) { ctx->singlestep_enabled |= CPU_BRANCH_STEP; } - if (unlikely(ctx->base.singlestep_enabled)) { - ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; - } - - if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) { - ctx->base.max_insns = 1; - } } static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -8585,7 +7580,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); ctx->cia = pc = ctx->base.pc_next; - insn = translator_ldl_swap(env, pc, need_byteswap(ctx)); + insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)); ctx->base.pc_next = pc += 4; if (!is_prefix_insn(ctx, insn)) { @@ -8600,7 +7595,8 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN); ok = true; } else { - uint32_t insn2 = translator_ldl_swap(env, pc, need_byteswap(ctx)); + uint32_t insn2 = translator_ldl_swap(env, dcbase, pc, + need_byteswap(ctx)); ctx->base.pc_next = pc += 4; ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn)); } @@ -8621,7 +7617,6 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasJumpType is_jmp = ctx->base.is_jmp; target_ulong nip = ctx->base.pc_next; - int sse; if (is_jmp == DISAS_NORETURN) { /* We have already exited the TB. */ @@ -8629,8 +7624,8 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } /* Honor single stepping. */ - sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP); - if (unlikely(sse)) { + if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP) + && (nip <= 0x100 || nip > 0xf00)) { switch (is_jmp) { case DISAS_TOO_MANY: case DISAS_EXIT_UPDATE: @@ -8644,20 +7639,14 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) g_assert_not_reached(); } - if (sse & GDBSTUB_SINGLE_STEP) { - gen_debug_exception(ctx); - return; - } - /* else CPU_SINGLE_STEP... */ - if (nip <= 0x100 || nip > 0xf00) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); - return; - } + gen_debug_exception(ctx); + return; } switch (is_jmp) { case DISAS_TOO_MANY: if (use_goto_tb(ctx, nip)) { + pmu_count_insns(ctx); tcg_gen_goto_tb(0); gen_update_nip(ctx, nip); tcg_gen_exit_tb(ctx->base.tb, 0); @@ -8668,6 +7657,14 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) gen_update_nip(ctx, nip); /* fall through */ case DISAS_CHAIN: + /* + * tcg_gen_lookup_and_goto_ptr will exit the TB if + * CF_NO_GOTO_PTR is set. Count insns now. + */ + if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { + pmu_count_insns(ctx); + } + tcg_gen_lookup_and_goto_ptr(); break; @@ -8675,6 +7672,7 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) gen_update_nip(ctx, nip); /* fall through */ case DISAS_EXIT: + pmu_count_insns(ctx); tcg_gen_exit_tb(NULL, 0); break; @@ -8683,10 +7681,11 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void ppc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void ppc_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps ppc_tr_ops = { @@ -8698,15 +7697,10 @@ static const TranslatorOps ppc_tr_ops = { .disas_log = ppc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns); -} - -void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->nip = data[0]; + translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base); } diff --git a/target/ppc/translate/branch-impl.c.inc b/target/ppc/translate/branch-impl.c.inc new file mode 100644 index 0000000000..29cfa11854 --- /dev/null +++ b/target/ppc/translate/branch-impl.c.inc @@ -0,0 +1,33 @@ +/* + * Power ISA decode for branch instructions + * + * Copyright IBM Corp. 2021 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA207S); + + gen_icount_io_start(ctx); + gen_update_cfar(ctx, ctx->cia); + gen_helper_rfebb(cpu_env, cpu_gpr[arg->s]); + + ctx->base.is_jmp = DISAS_CHAIN; + + return true; +} +#else +static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg) +{ + gen_invalid(ctx); + return true; +} +#endif diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc index 6c556dc2e1..f9f1d58d44 100644 --- a/target/ppc/translate/dfp-impl.c.inc +++ b/target/ppc/translate/dfp-impl.c.inc @@ -7,226 +7,223 @@ static inline TCGv_ptr gen_fprp_ptr(int reg) return r; } -#define GEN_DFP_T_A_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rd, ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rd = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rd, ra, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_T_A_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_A_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, rb); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_I_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_i32 uim; \ - TCGv_ptr rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - uim = tcg_const_i32(UIMM5(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, uim, rb); \ - tcg_temp_free_i32(uim); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_I_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, tcg_constant_i32(a->uim), rb);\ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_DCM(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra; \ - TCGv_i32 dcm; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - dcm = tcg_const_i32(DCM(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, dcm); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(dcm); \ +#define TRANS_DFP_BF_A_DCM(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->fra); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, tcg_constant_i32(a->dm)); \ + tcg_temp_free_ptr(ra); \ + return true; \ } -#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - TCGv_i32 u32_1, u32_2; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \ - u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_i32(u32_1); \ - tcg_temp_free_i32(u32_2); \ +#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, rb, \ + tcg_constant_i32(a->U32F1), \ + tcg_constant_i32(a->U32F2)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, ra, rb; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, ra, rb, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(i32); \ - } - -#define GEN_DFP_T_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - } - -#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rs; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rs = gen_fprp_ptr(fprfld(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rs, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rs); \ - tcg_temp_free_i32(i32); \ +#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + ra = gen_fprp_ptr(a->fra); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -GEN_DFP_T_A_B_Rc(dadd) -GEN_DFP_T_A_B_Rc(daddq) -GEN_DFP_T_A_B_Rc(dsub) -GEN_DFP_T_A_B_Rc(dsubq) -GEN_DFP_T_A_B_Rc(dmul) -GEN_DFP_T_A_B_Rc(dmulq) -GEN_DFP_T_A_B_Rc(ddiv) -GEN_DFP_T_A_B_Rc(ddivq) -GEN_DFP_BF_A_B(dcmpu) -GEN_DFP_BF_A_B(dcmpuq) -GEN_DFP_BF_A_B(dcmpo) -GEN_DFP_BF_A_B(dcmpoq) -GEN_DFP_BF_A_DCM(dtstdc) -GEN_DFP_BF_A_DCM(dtstdcq) -GEN_DFP_BF_A_DCM(dtstdg) -GEN_DFP_BF_A_DCM(dtstdgq) -GEN_DFP_BF_A_B(dtstex) -GEN_DFP_BF_A_B(dtstexq) -GEN_DFP_BF_A_B(dtstsf) -GEN_DFP_BF_A_B(dtstsfq) -GEN_DFP_BF_I_B(dtstsfi) -GEN_DFP_BF_I_B(dtstsfiq) -GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC) -GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC) -GEN_DFP_T_A_B_I32_Rc(dqua, RMC) -GEN_DFP_T_A_B_I32_Rc(dquaq, RMC) -GEN_DFP_T_A_B_I32_Rc(drrnd, RMC) -GEN_DFP_T_A_B_I32_Rc(drrndq, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC) -GEN_DFP_T_B_Rc(dctdp) -GEN_DFP_T_B_Rc(dctqpq) -GEN_DFP_T_B_Rc(drsp) -GEN_DFP_T_B_Rc(drdpq) -GEN_DFP_T_B_Rc(dcffix) -GEN_DFP_T_B_Rc(dcffixq) -GEN_DFP_T_B_Rc(dctfix) -GEN_DFP_T_B_Rc(dctfixq) -GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP) -GEN_DFP_T_B_Rc(dxex) -GEN_DFP_T_B_Rc(dxexq) -GEN_DFP_T_A_B_Rc(diex) -GEN_DFP_T_A_B_Rc(diexq) -GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM) +#define TRANS_DFP_T_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ +} -#undef GEN_DFP_T_A_B_Rc -#undef GEN_DFP_BF_A_B -#undef GEN_DFP_BF_A_DCM -#undef GEN_DFP_T_B_U32_U32_Rc -#undef GEN_DFP_T_A_B_I32_Rc -#undef GEN_DFP_T_B_Rc -#undef GEN_DFP_T_FPR_I32_Rc +#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rx; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rx = gen_fprp_ptr(a->FPRFLD); \ + gen_helper_##NAME(cpu_env, rt, rx, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rx); \ + return true; \ +} + +TRANS_DFP_T_A_B_Rc(DADD) +TRANS_DFP_T_A_B_Rc(DADDQ) +TRANS_DFP_T_A_B_Rc(DSUB) +TRANS_DFP_T_A_B_Rc(DSUBQ) +TRANS_DFP_T_A_B_Rc(DMUL) +TRANS_DFP_T_A_B_Rc(DMULQ) +TRANS_DFP_T_A_B_Rc(DDIV) +TRANS_DFP_T_A_B_Rc(DDIVQ) +TRANS_DFP_BF_A_B(DCMPU) +TRANS_DFP_BF_A_B(DCMPUQ) +TRANS_DFP_BF_A_B(DCMPO) +TRANS_DFP_BF_A_B(DCMPOQ) +TRANS_DFP_BF_A_DCM(DTSTDC) +TRANS_DFP_BF_A_DCM(DTSTDCQ) +TRANS_DFP_BF_A_DCM(DTSTDG) +TRANS_DFP_BF_A_DCM(DTSTDGQ) +TRANS_DFP_BF_A_B(DTSTEX) +TRANS_DFP_BF_A_B(DTSTEXQ) +TRANS_DFP_BF_A_B(DTSTSF) +TRANS_DFP_BF_A_B(DTSTSFQ) +TRANS_DFP_BF_I_B(DTSTSFI) +TRANS_DFP_BF_I_B(DTSTSFIQ) +TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc) +TRANS_DFP_T_B_Rc(DCTDP) +TRANS_DFP_T_B_Rc(DCTQPQ) +TRANS_DFP_T_B_Rc(DRSP) +TRANS_DFP_T_B_Rc(DRDPQ) +TRANS_DFP_T_B_Rc(DCFFIX) +TRANS_DFP_T_B_Rc(DCFFIXQ) +TRANS_DFP_T_B_Rc(DCTFIX) +TRANS_DFP_T_B_Rc(DCTFIXQ) +TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s) +TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s) +TRANS_DFP_T_B_Rc(DXEX) +TRANS_DFP_T_B_Rc(DXEXQ) +TRANS_DFP_T_A_B_Rc(DIEX) +TRANS_DFP_T_A_B_Rc(DIEXQ) +TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh) + +static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_fprp_ptr(a->frtp); + rb = gen_avr_ptr(a->vrb); + gen_helper_DCFFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} + +static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_avr_ptr(a->vrt); + rb = gen_fprp_ptr(a->frbp); + gen_helper_DCTFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} diff --git a/target/ppc/translate/dfp-ops.c.inc b/target/ppc/translate/dfp-ops.c.inc deleted file mode 100644 index 6ef38e5712..0000000000 --- a/target/ppc/translate/dfp-ops.c.inc +++ /dev/null @@ -1,165 +0,0 @@ -#define _GEN_DFP_LONG(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONG_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_LONGx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONGx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_QUADx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUADx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define GEN_DFP_T_A_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00210800) - -#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00200800) - -#define GEN_DFP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x001F0000) - -#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0800) - -#define GEN_DFP_Tp_B_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0000) - -#define GEN_DFP_T_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x001F0800) - -#define GEN_DFP_BF_A_B(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000001) - -#define GEN_DFP_BF_A_B_300(name, op1, op2) \ -_GEN_DFP_LONG_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00610801) - -#define GEN_DFP_BF_A_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00600801) - -#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \ -_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_A_DCM(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00600001) - -#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00610001) - -#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02010800) - -#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02000800) - -#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x00200800) - -#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000) - -#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800) - -#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00070000) - -#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00270800) - -#define GEN_DFP_S_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x000F0000) - -#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x002F0800) - -#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00210000) - -GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00), -GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00), -GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10), -GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10), -GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01), -GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01), -GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11), -GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11), -GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14), -GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14), -GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04), -GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04), -GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06), -GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06), -GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07), -GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07), -GEN_DFP_BF_A_B(dtstex, 0x02, 0x05), -GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05), -GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15), -GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15), -GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15), -GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15), -GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02), -GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02), -GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00), -GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00), -GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01), -GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01), -GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03), -GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07), -GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08), -GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08), -GEN_DFP_T_B_Rc(drsp, 0x02, 0x18), -GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18), -GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19), -GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19), -GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09), -GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09), -GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a), -GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a), -GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a), -GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a), -GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b), -GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b), -GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b), -GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b), -GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02), -GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02), -GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03), -GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03), diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 2e2518ee15..1ba56cbed5 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -17,25 +17,6 @@ * License along with this library; if not, see . */ -/* - * Incorporate CIA into the constant when R=1. - * Validate that when R=1, RA=0. - */ -static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) -{ - d->rt = a->rt; - d->ra = a->ra; - d->si = a->si; - if (a->r) { - if (unlikely(a->ra != 0)) { - gen_invalid(ctx); - return false; - } - d->si += ctx->cia; - } - return true; -} - /* * Fixed-Point Load/Store Instructions */ @@ -51,15 +32,7 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, } gen_set_access_type(ctx, ACCESS_INT); - ea = tcg_temp_new(); - if (ra) { - tcg_gen_add_tl(ea, cpu_gpr[ra], displ); - } else { - tcg_gen_mov_tl(ea, displ); - } - if (NARROW_MODE(ctx)) { - tcg_gen_ext32u_tl(ea, ea); - } + ea = do_ea_calc(ctx, ra, displ); mop ^= ctx->default_tcg_memop_mask; if (store) { tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); @@ -96,6 +69,104 @@ static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update, return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop); } +static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) +{ +#if defined(TARGET_PPC64) + TCGv ea; + TCGv_i64 low_addr_gpr, high_addr_gpr; + MemOp mop; + + REQUIRE_INSNS_FLAGS(ctx, 64BX); + + if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { + /* lq and stq were privileged prior to V. 2.07 */ + REQUIRE_SV(ctx); + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return true; + } + } + + if (!store && unlikely(a->ra == a->rt)) { + gen_invalid(ctx); + return true; + } + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si)); + + if (prefixed || !ctx->le_mode) { + low_addr_gpr = cpu_gpr[a->rt]; + high_addr_gpr = cpu_gpr[a->rt + 1]; + } else { + low_addr_gpr = cpu_gpr[a->rt + 1]; + high_addr_gpr = cpu_gpr[a->rt]; + } + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + mop = DEF_MEMOP(MO_128); + TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); + if (store) { + if (ctx->le_mode) { + gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, + high_addr_gpr, oi); + } else { + gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, + low_addr_gpr, oi); + + } + } else { + if (ctx->le_mode) { + gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(high_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } else { + gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(low_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } + } + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + } else { + mop = DEF_MEMOP(MO_UQ); + if (store) { + tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } + + gen_addr_add(ctx, ea, ea, 8); + + if (store) { + tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } + } + tcg_temp_free(ea); +#else + qemu_build_not_reached(); +#endif + + return true; +} + +static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_ldst_quad(ctx, &d, store, true); +} + /* Load Byte and Zero */ TRANS(LBZ, do_ldst_D, false, false, MO_UB) TRANS(LBZX, do_ldst_X, false, false, MO_UB) @@ -131,11 +202,15 @@ TRANS64(LWAUX, do_ldst_X, true, false, MO_SL) TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL) /* Load Doubleword */ -TRANS64(LD, do_ldst_D, false, false, MO_Q) -TRANS64(LDX, do_ldst_X, false, false, MO_Q) -TRANS64(LDU, do_ldst_D, true, false, MO_Q) -TRANS64(LDUX, do_ldst_X, true, false, MO_Q) -TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q) +TRANS64(LD, do_ldst_D, false, false, MO_UQ) +TRANS64(LDX, do_ldst_X, false, false, MO_UQ) +TRANS64(LDU, do_ldst_D, true, false, MO_UQ) +TRANS64(LDUX, do_ldst_X, true, false, MO_UQ) +TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ) + +/* Load Quadword */ +TRANS64(LQ, do_ldst_quad, false, false); +TRANS64(PLQ, do_ldst_quad_PLS_D, false); /* Store Byte */ TRANS(STB, do_ldst_D, false, true, MO_UB) @@ -159,11 +234,15 @@ TRANS(STWUX, do_ldst_X, true, true, MO_UL) TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL) /* Store Doubleword */ -TRANS64(STD, do_ldst_D, false, true, MO_Q) -TRANS64(STDX, do_ldst_X, false, true, MO_Q) -TRANS64(STDU, do_ldst_D, true, true, MO_Q) -TRANS64(STDUX, do_ldst_X, true, true, MO_Q) -TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q) +TRANS64(STD, do_ldst_D, false, true, MO_UQ) +TRANS64(STDX, do_ldst_X, false, true, MO_UQ) +TRANS64(STDU, do_ldst_D, true, true, MO_UQ) +TRANS64(STDUX, do_ldst_X, true, true, MO_UQ) +TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ) + +/* Store Quadword */ +TRANS64(STQ, do_ldst_quad, true, false); +TRANS64(PSTQ, do_ldst_quad_PLS_D, true); /* * Fixed-Point Compare Instructions @@ -325,9 +404,173 @@ static bool trans_CFUGED(DisasContext *ctx, arg_X *a) REQUIRE_64BIT(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA310); #if defined(TARGET_PPC64) - gen_helper_cfuged(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); #else qemu_build_not_reached(); #endif return true; } + +static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) +{ + TCGv_i64 t0, t1; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_and_i64(t0, src, mask); + if (trail) { + tcg_gen_ctzi_i64(t0, t0, -1); + } else { + tcg_gen_clzi_i64(t0, t0, -1); + } + + tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1); + tcg_gen_andi_i64(t0, t0, 63); + tcg_gen_xori_i64(t0, t0, 63); + if (trail) { + tcg_gen_shl_i64(t0, mask, t0); + tcg_gen_shl_i64(t0, t0, t1); + } else { + tcg_gen_shr_i64(t0, mask, t0); + tcg_gen_shr_i64(t0, t0, t1); + } + + tcg_gen_ctpop_i64(dst, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PDEPD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PEXTD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) +{ + const uint64_t carry_bits = 0x1111111111111111ULL; + TCGv t0, t1, carry, zero = tcg_constant_tl(0); + + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + + t0 = tcg_temp_new(); + t1 = tcg_const_tl(0); + carry = tcg_const_tl(0); + + for (int i = 0; i < 16; i++) { + tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4); + tcg_gen_andi_tl(t0, t0, 0xf); + tcg_gen_add_tl(t1, t1, t0); + + tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4); + tcg_gen_andi_tl(t0, t0, 0xf); + tcg_gen_add_tl(t1, t1, t0); + + tcg_gen_andi_tl(t1, t1, 0x10); + tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero); + + tcg_gen_shli_tl(t0, t1, i * 4); + tcg_gen_or_tl(carry, carry, t0); + } + + tcg_gen_xori_tl(carry, carry, (target_long)carry_bits); + tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(carry); + + return true; +} + +static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]); + return true; +} + +static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]); + return true; +} + +static bool do_hash(DisasContext *ctx, arg_X *a, bool priv, + void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv)) +{ + TCGv ea; + + if (!(ctx->insns_flags2 & PPC2_ISA310)) { + /* if version is before v3.1, this operation is a nop */ + return true; + } + + if (priv) { + /* if instruction is privileged but the context is in user space */ + REQUIRE_SV(ctx); + } + + if (unlikely(a->ra == 0)) { + /* if RA=0, the instruction form is invalid */ + gen_invalid(ctx); + return true; + } + + ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt)); + helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]); + + tcg_temp_free(ea); + + return true; +} + +TRANS(HASHST, do_hash, false, gen_helper_HASHST) +TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK) +TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP) +TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP) diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 9f7868ee28..8d5cf0f982 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -31,7 +31,7 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx) #endif /*** Floating-Point arithmetic ***/ -#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ +#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGv_i64 t0; \ @@ -50,10 +50,7 @@ static void gen_f##name(DisasContext *ctx) \ get_fpr(t0, rA(ctx->opcode)); \ get_fpr(t1, rC(ctx->opcode)); \ get_fpr(t2, rB(ctx->opcode)); \ - gen_helper_f##op(t3, cpu_env, t0, t1, t2); \ - if (isfloat) { \ - gen_helper_frsp(t3, cpu_env, t3); \ - } \ + gen_helper_f##name(t3, cpu_env, t0, t1, t2); \ set_fpr(rD(ctx->opcode), t3); \ if (set_fprf) { \ gen_compute_fprf_float64(t3); \ @@ -68,10 +65,10 @@ static void gen_f##name(DisasContext *ctx) \ } #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ -_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \ -_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type); +_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \ +_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type); -#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGv_i64 t0; \ @@ -87,10 +84,7 @@ static void gen_f##name(DisasContext *ctx) \ gen_reset_fpstatus(); \ get_fpr(t0, rA(ctx->opcode)); \ get_fpr(t1, rB(ctx->opcode)); \ - gen_helper_f##op(t2, cpu_env, t0, t1); \ - if (isfloat) { \ - gen_helper_frsp(t2, cpu_env, t2); \ - } \ + gen_helper_f##name(t2, cpu_env, t0, t1); \ set_fpr(rD(ctx->opcode), t2); \ if (set_fprf) { \ gen_compute_fprf_float64(t2); \ @@ -103,10 +97,10 @@ static void gen_f##name(DisasContext *ctx) \ tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ -_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); +_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ +_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type); -#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGv_i64 t0; \ @@ -122,10 +116,7 @@ static void gen_f##name(DisasContext *ctx) \ gen_reset_fpstatus(); \ get_fpr(t0, rA(ctx->opcode)); \ get_fpr(t1, rC(ctx->opcode)); \ - gen_helper_f##op(t2, cpu_env, t0, t1); \ - if (isfloat) { \ - gen_helper_frsp(t2, cpu_env, t2); \ - } \ + gen_helper_f##name(t2, cpu_env, t0, t1); \ set_fpr(rD(ctx->opcode), t2); \ if (set_fprf) { \ gen_compute_fprf_float64(t2); \ @@ -138,8 +129,8 @@ static void gen_f##name(DisasContext *ctx) \ tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ -_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); +_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ +_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type); #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ @@ -157,8 +148,9 @@ static void gen_f##name(DisasContext *ctx) \ gen_helper_f##name(t1, cpu_env, t0); \ set_fpr(rD(ctx->opcode), t1); \ if (set_fprf) { \ - gen_compute_fprf_float64(t1); \ + gen_helper_compute_fprf_float64(cpu_env, t1); \ } \ + gen_helper_float_check_status(cpu_env); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ @@ -220,8 +212,7 @@ static void gen_frsqrtes(DisasContext *ctx) t1 = tcg_temp_new_i64(); gen_reset_fpstatus(); get_fpr(t0, rB(ctx->opcode)); - gen_helper_frsqrte(t1, cpu_env, t0); - gen_helper_frsp(t1, cpu_env, t1); + gen_helper_frsqrtes(t1, cpu_env, t0); set_fpr(rD(ctx->opcode), t1); gen_compute_fprf_float64(t1); if (unlikely(Rc(ctx->opcode) != 0)) { @@ -231,57 +222,66 @@ static void gen_frsqrtes(DisasContext *ctx) tcg_temp_free_i64(t1); } -/* fsel */ -_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL); +static bool trans_FSEL(DisasContext *ctx, arg_A *a) +{ + TCGv_i64 t0, t1, t2; + + REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL); + REQUIRE_FPU(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + + get_fpr(t0, a->fra); + get_fpr(t1, a->frb); + get_fpr(t2, a->frc); + + gen_helper_FSEL(t0, t0, t1, t2); + set_fpr(a->frt, t0); + if (a->rc) { + gen_set_cr1_from_fpscr(ctx); + } + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + + return true; +} + /* fsub - fsubs */ GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); /* Optional: */ -/* fsqrt */ -static void gen_fsqrt(DisasContext *ctx) +static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, + void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) { - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1; + + REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT); + REQUIRE_FPU(ctx); + t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); + gen_reset_fpstatus(); - get_fpr(t0, rB(ctx->opcode)); - gen_helper_fsqrt(t1, cpu_env, t0); - set_fpr(rD(ctx->opcode), t1); + get_fpr(t0, a->frb); + helper(t1, cpu_env, t0); + set_fpr(a->frt, t1); gen_compute_fprf_float64(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { + if (unlikely(a->rc != 0)) { gen_set_cr1_from_fpscr(ctx); } + tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); + + return true; } -static void gen_fsqrts(DisasContext *ctx) -{ - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - gen_reset_fpstatus(); - get_fpr(t0, rB(ctx->opcode)); - gen_helper_fsqrt(t1, cpu_env, t0); - gen_helper_frsp(t1, cpu_env, t1); - set_fpr(rD(ctx->opcode), t1); - gen_compute_fprf_float64(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_cr1_from_fpscr(ctx); - } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); -} +TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); +TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); /*** Floating-Point multiply-and-add ***/ /* fmadd - fmadds */ @@ -599,141 +599,162 @@ static void gen_mcrfs(DisasContext *ctx) tcg_temp_free_i64(tnew_fpscr); } -/* mffs */ -static void gen_mffs(DisasContext *ctx) +static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) { - TCGv_i64 t0; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); + TCGv_i64 fpscr = tcg_temp_new_i64(); + TCGv_i64 fpscr_masked = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); + tcg_gen_andi_i64(fpscr_masked, fpscr, mask); + set_fpr(rt, fpscr_masked); + + tcg_temp_free_i64(fpscr_masked); + + return fpscr; +} + +static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, + TCGv_i64 set_mask, uint32_t store_mask) +{ + TCGv_i64 fpscr_masked = tcg_temp_new_i64(); + TCGv_i32 st_mask = tcg_constant_i32(store_mask); + + tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); + tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); + gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); + + tcg_temp_free_i64(fpscr_masked); +} + +static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) +{ + TCGv_i64 fpscr; + + REQUIRE_FPU(ctx); + gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - set_fpr(rD(ctx->opcode), t0); - if (unlikely(Rc(ctx->opcode))) { + fpscr = place_from_fpscr(a->rt, UINT64_MAX); + if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); + + tcg_temp_free_i64(fpscr); + + return true; } -/* mffsl */ -static void gen_mffsl(DisasContext *ctx) +static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) { - TCGv_i64 t0; + TCGv_i64 fpscr; - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } - - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); - gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - /* Mask everything except mode, status, and enables. */ - tcg_gen_andi_i64(t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN); - set_fpr(rD(ctx->opcode), t0); - tcg_temp_free_i64(t0); -} - -/* mffsce */ -static void gen_mffsce(DisasContext *ctx) -{ - TCGv_i64 t0; - TCGv_i32 mask; - - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } - - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - - t0 = tcg_temp_new_i64(); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - set_fpr(rD(ctx->opcode), t0); + fpscr = place_from_fpscr(a->rt, UINT64_MAX); + store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); - /* Clear exception enable bits in the FPSCR. */ - tcg_gen_andi_i64(t0, t0, ~FP_ENABLES); - mask = tcg_const_i32(0x0003); - gen_helper_store_fpscr(cpu_env, t0, mask); + tcg_temp_free_i64(fpscr); - tcg_temp_free_i32(mask); - tcg_temp_free_i64(t0); + return true; } -static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1) +static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i32 mask = tcg_const_i32(0x0001); + TCGv_i64 t1, fpscr; - gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - tcg_gen_andi_i64(t0, t0, FP_DRN | FP_ENABLES | FP_RN); - set_fpr(rD(ctx->opcode), t0); - - /* Mask FPSCR value to clear RN. */ - tcg_gen_andi_i64(t0, t0, ~FP_RN); - - /* Merge RN into FPSCR value. */ - tcg_gen_or_i64(t0, t0, t1); - - gen_helper_store_fpscr(cpu_env, t0, mask); - - tcg_temp_free_i32(mask); - tcg_temp_free_i64(t0); -} - -/* mffscrn */ -static void gen_mffscrn(DisasContext *ctx) -{ - TCGv_i64 t1; - - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } - - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); - get_fpr(t1, rB(ctx->opcode)); - /* Mask FRB to get just RN. */ + get_fpr(t1, a->rb); tcg_gen_andi_i64(t1, t1, FP_RN); - gen_helper_mffscrn(ctx, t1); + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; } -/* mffscrni */ -static void gen_mffscrni(DisasContext *ctx) +static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) { - TCGv_i64 t1; + TCGv_i64 t1, fpscr; - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + t1 = tcg_temp_new_i64(); + get_fpr(t1, a->rb); + tcg_gen_andi_i64(t1, t1, FP_DRN); - t1 = tcg_const_i64((uint64_t)RM(ctx->opcode)); - - gen_helper_mffscrn(ctx, t1); + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; +} + +static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) +{ + TCGv_i64 t1, fpscr; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); + + t1 = tcg_temp_new_i64(); + tcg_gen_movi_i64(t1, a->imm); + + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; +} + +static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) +{ + TCGv_i64 t1, fpscr; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); + + t1 = tcg_temp_new_i64(); + tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0); + + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; +} + +static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) +{ + TCGv_i64 fpscr; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); + + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, + FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); + + tcg_temp_free_i64(fpscr); + + return true; } /* mtfsb0 */ @@ -769,7 +790,6 @@ static void gen_mtfsb1(DisasContext *ctx) return; } crb = 31 - crbD(ctx->opcode); - gen_reset_fpstatus(); /* XXX: we pretend we can only do IEEE floating-point computations */ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { TCGv_i32 t0; @@ -782,7 +802,7 @@ static void gen_mtfsb1(DisasContext *ctx) tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ - gen_helper_float_check_status(cpu_env); + gen_helper_fpscr_check_status(cpu_env); } /* mtfsf */ @@ -803,7 +823,6 @@ static void gen_mtfsf(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } - gen_reset_fpstatus(); if (l) { t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); } else { @@ -818,7 +837,7 @@ static void gen_mtfsf(DisasContext *ctx) tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ - gen_helper_float_check_status(cpu_env); + gen_helper_fpscr_check_status(cpu_env); tcg_temp_free_i64(t1); } @@ -840,7 +859,6 @@ static void gen_mtfsfi(DisasContext *ctx) return; } sh = (8 * w) + 7 - bf; - gen_reset_fpstatus(); t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); t1 = tcg_const_i32(1 << sh); gen_helper_store_fpscr(cpu_env, t0, t1); @@ -851,102 +869,9 @@ static void gen_mtfsfi(DisasContext *ctx) tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ - gen_helper_float_check_status(cpu_env); + gen_helper_fpscr_check_status(cpu_env); } -/*** Floating-point load ***/ -#define GEN_LDF(name, ldop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUF(name, ldop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUXF(name, ldop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -static void glue(gen_, name##x)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type); \ -GEN_LDUF(name, ldop, op | 0x21, type); \ -GEN_LDUXF(name, ldop, op | 0x01, type); \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -955,17 +880,12 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) tcg_temp_free_i32(tmp); } - /* lfd lfdu lfdux lfdx */ -GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT); - /* lfs lfsu lfsux lfsx */ -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); - /* lfdepx (external PID lfdx) */ static void gen_lfdepx(DisasContext *ctx) { TCGv EA; TCGv_i64 t0; - CHK_SV; + CHK_SV(ctx); if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; @@ -974,7 +894,7 @@ static void gen_lfdepx(DisasContext *ctx) EA = tcg_temp_new(); t0 = tcg_temp_new_i64(); gen_addr_reg_index(ctx, EA); - tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q)); + tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); set_fpr(rD(ctx->opcode), t0); tcg_temp_free(EA); tcg_temp_free_i64(t0); @@ -1089,73 +1009,6 @@ static void gen_lfiwzx(DisasContext *ctx) tcg_temp_free(EA); tcg_temp_free_i64(t0); } -/*** Floating-point store ***/ -#define GEN_STF(name, stop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUF(name, stop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUXF(name, stop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} #define GEN_STXF(name, stop, opc2, opc3, type) \ static void glue(gen_, name##x)(DisasContext *ctx) \ @@ -1176,12 +1029,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ tcg_temp_free_i64(t0); \ } -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type); \ -GEN_STUF(name, stop, op | 0x21, type); \ -GEN_STUXF(name, stop, op | 0x01, type); \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) - static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -1190,17 +1037,12 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) tcg_temp_free_i32(tmp); } -/* stfd stfdu stfdux stfdx */ -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT); -/* stfs stfsu stfsux stfsx */ -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT); - /* stfdepx (external PID lfdx) */ static void gen_stfdepx(DisasContext *ctx) { TCGv EA; TCGv_i64 t0; - CHK_SV; + CHK_SV(ctx); if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; @@ -1210,7 +1052,7 @@ static void gen_stfdepx(DisasContext *ctx) t0 = tcg_temp_new_i64(); gen_addr_reg_index(ctx, EA); get_fpr(t0, rD(ctx->opcode)); - tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q)); + tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); tcg_temp_free(EA); tcg_temp_free_i64(t0); } @@ -1294,185 +1136,91 @@ static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) /* stfiwx */ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); -/* POWER2 specific instructions */ -/* Quad manipulation (load/store two floats at a time) */ - -/* lfq */ -static void gen_lfq(DisasContext *ctx) +/* Floating-point Load/Store Instructions */ +static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, + bool update, bool store, bool single) { - int rd = rD(ctx->opcode); - TCGv t0; - TCGv_i64 t1; - gen_set_access_type(ctx, ACCESS_FLOAT); - t0 = tcg_temp_new(); - t1 = tcg_temp_new_i64(); - gen_addr_imm_index(ctx, t0, 0); - gen_qemu_ld64_i64(ctx, t1, t0); - set_fpr(rd, t1); - gen_addr_add(ctx, t0, t0, 8); - gen_qemu_ld64_i64(ctx, t1, t0); - set_fpr((rd + 1) % 32, t1); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); -} - -/* lfqu */ -static void gen_lfqu(DisasContext *ctx) -{ - int ra = rA(ctx->opcode); - int rd = rD(ctx->opcode); - TCGv t0, t1; - TCGv_i64 t2; - gen_set_access_type(ctx, ACCESS_FLOAT); - t0 = tcg_temp_new(); - t1 = tcg_temp_new(); - t2 = tcg_temp_new_i64(); - gen_addr_imm_index(ctx, t0, 0); - gen_qemu_ld64_i64(ctx, t2, t0); - set_fpr(rd, t2); - gen_addr_add(ctx, t1, t0, 8); - gen_qemu_ld64_i64(ctx, t2, t1); - set_fpr((rd + 1) % 32, t2); - if (ra != 0) { - tcg_gen_mov_tl(cpu_gpr[ra], t0); + TCGv ea; + TCGv_i64 t0; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + if (update && ra == 0) { + gen_invalid(ctx); + return true; } - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free_i64(t2); -} - -/* lfqux */ -static void gen_lfqux(DisasContext *ctx) -{ - int ra = rA(ctx->opcode); - int rd = rD(ctx->opcode); gen_set_access_type(ctx, ACCESS_FLOAT); - TCGv t0, t1; - TCGv_i64 t2; - t2 = tcg_temp_new_i64(); - t0 = tcg_temp_new(); - gen_addr_reg_index(ctx, t0); - gen_qemu_ld64_i64(ctx, t2, t0); - set_fpr(rd, t2); - t1 = tcg_temp_new(); - gen_addr_add(ctx, t1, t0, 8); - gen_qemu_ld64_i64(ctx, t2, t1); - set_fpr((rd + 1) % 32, t2); - tcg_temp_free(t1); - if (ra != 0) { - tcg_gen_mov_tl(cpu_gpr[ra], t0); + t0 = tcg_temp_new_i64(); + ea = do_ea_calc(ctx, ra, displ); + if (store) { + get_fpr(t0, rt); + if (single) { + gen_qemu_st32fs(ctx, t0, ea); + } else { + gen_qemu_st64_i64(ctx, t0, ea); + } + } else { + if (single) { + gen_qemu_ld32fs(ctx, t0, ea); + } else { + gen_qemu_ld64_i64(ctx, t0, ea); + } + set_fpr(rt, t0); } - tcg_temp_free(t0); - tcg_temp_free_i64(t2); -} - -/* lfqx */ -static void gen_lfqx(DisasContext *ctx) -{ - int rd = rD(ctx->opcode); - TCGv t0; - TCGv_i64 t1; - gen_set_access_type(ctx, ACCESS_FLOAT); - t0 = tcg_temp_new(); - t1 = tcg_temp_new_i64(); - gen_addr_reg_index(ctx, t0); - gen_qemu_ld64_i64(ctx, t1, t0); - set_fpr(rd, t1); - gen_addr_add(ctx, t0, t0, 8); - gen_qemu_ld64_i64(ctx, t1, t0); - set_fpr((rd + 1) % 32, t1); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); -} - -/* stfq */ -static void gen_stfq(DisasContext *ctx) -{ - int rd = rD(ctx->opcode); - TCGv t0; - TCGv_i64 t1; - gen_set_access_type(ctx, ACCESS_FLOAT); - t0 = tcg_temp_new(); - t1 = tcg_temp_new_i64(); - gen_addr_imm_index(ctx, t0, 0); - get_fpr(t1, rd); - gen_qemu_st64_i64(ctx, t1, t0); - gen_addr_add(ctx, t0, t0, 8); - get_fpr(t1, (rd + 1) % 32); - gen_qemu_st64_i64(ctx, t1, t0); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); -} - -/* stfqu */ -static void gen_stfqu(DisasContext *ctx) -{ - int ra = rA(ctx->opcode); - int rd = rD(ctx->opcode); - TCGv t0, t1; - TCGv_i64 t2; - gen_set_access_type(ctx, ACCESS_FLOAT); - t2 = tcg_temp_new_i64(); - t0 = tcg_temp_new(); - gen_addr_imm_index(ctx, t0, 0); - get_fpr(t2, rd); - gen_qemu_st64_i64(ctx, t2, t0); - t1 = tcg_temp_new(); - gen_addr_add(ctx, t1, t0, 8); - get_fpr(t2, (rd + 1) % 32); - gen_qemu_st64_i64(ctx, t2, t1); - tcg_temp_free(t1); - if (ra != 0) { - tcg_gen_mov_tl(cpu_gpr[ra], t0); + if (update) { + tcg_gen_mov_tl(cpu_gpr[ra], ea); } - tcg_temp_free(t0); - tcg_temp_free_i64(t2); + tcg_temp_free_i64(t0); + tcg_temp_free(ea); + return true; } -/* stfqux */ -static void gen_stfqux(DisasContext *ctx) +static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, + bool single) { - int ra = rA(ctx->opcode); - int rd = rD(ctx->opcode); - TCGv t0, t1; - TCGv_i64 t2; - gen_set_access_type(ctx, ACCESS_FLOAT); - t2 = tcg_temp_new_i64(); - t0 = tcg_temp_new(); - gen_addr_reg_index(ctx, t0); - get_fpr(t2, rd); - gen_qemu_st64_i64(ctx, t2, t0); - t1 = tcg_temp_new(); - gen_addr_add(ctx, t1, t0, 8); - get_fpr(t2, (rd + 1) % 32); - gen_qemu_st64_i64(ctx, t2, t1); - tcg_temp_free(t1); - if (ra != 0) { - tcg_gen_mov_tl(cpu_gpr[ra], t0); + return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, + single); +} + +static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, + bool store, bool single) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; } - tcg_temp_free(t0); - tcg_temp_free_i64(t2); + return do_lsfp_D(ctx, &d, update, store, single); } -/* stfqx */ -static void gen_stfqx(DisasContext *ctx) +static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, + bool store, bool single) { - int rd = rD(ctx->opcode); - TCGv t0; - TCGv_i64 t1; - gen_set_access_type(ctx, ACCESS_FLOAT); - t1 = tcg_temp_new_i64(); - t0 = tcg_temp_new(); - gen_addr_reg_index(ctx, t0); - get_fpr(t1, rd); - gen_qemu_st64_i64(ctx, t1, t0); - gen_addr_add(ctx, t0, t0, 8); - get_fpr(t1, (rd + 1) % 32); - gen_qemu_st64_i64(ctx, t1, t0); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); + return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); } +TRANS(LFS, do_lsfp_D, false, false, true) +TRANS(LFSU, do_lsfp_D, true, false, true) +TRANS(LFSX, do_lsfp_X, false, false, true) +TRANS(LFSUX, do_lsfp_X, true, false, true) +TRANS(PLFS, do_lsfp_PLS_D, false, false, true) + +TRANS(LFD, do_lsfp_D, false, false, false) +TRANS(LFDU, do_lsfp_D, true, false, false) +TRANS(LFDX, do_lsfp_X, false, false, false) +TRANS(LFDUX, do_lsfp_X, true, false, false) +TRANS(PLFD, do_lsfp_PLS_D, false, false, false) + +TRANS(STFS, do_lsfp_D, false, true, true) +TRANS(STFSU, do_lsfp_D, true, true, true) +TRANS(STFSX, do_lsfp_X, false, true, true) +TRANS(STFSUX, do_lsfp_X, true, true, true) +TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) + +TRANS(STFD, do_lsfp_D, false, true, false) +TRANS(STFDU, do_lsfp_D, true, true, false) +TRANS(STFDX, do_lsfp_X, false, true, false) +TRANS(STFDUX, do_lsfp_X, true, true, false) +TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) + #undef _GEN_FLOAT_ACB #undef GEN_FLOAT_ACB #undef _GEN_FLOAT_AB diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index 88fab65628..d4c6c4bed1 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -24,7 +24,6 @@ GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT), GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT), GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES), GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE), -_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL), GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT), GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT), GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT), @@ -50,50 +49,19 @@ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), -#define GEN_LDF(name, ldop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUF(name, ldop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUXF(name, ldop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type) \ -GEN_LDUF(name, ldop, op | 0x21, type) \ -GEN_LDUXF(name, ldop, op | 0x01, type) \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - -GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT) -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT) GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205), -#define GEN_STF(name, stop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUF(name, stop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUXF(name, stop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), #define GEN_STXF(name, stop, opc2, opc3, type) \ GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type) \ -GEN_STUF(name, stop, op | 0x21, type) \ -GEN_STUXF(name, stop, op | 0x01, type) \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT) -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT) GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES), -GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), -GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT), GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT), GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT), @@ -104,15 +72,6 @@ GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), -GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE), -GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT, - PPC2_ISA300), -GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT, - PPC2_ISA300), -GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT, - PPC_NONE), -GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT, - PPC_NONE), GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT), diff --git a/target/ppc/translate/processor-ctrl-impl.c.inc b/target/ppc/translate/processor-ctrl-impl.c.inc new file mode 100644 index 0000000000..cc7a50d579 --- /dev/null +++ b/target/ppc/translate/processor-ctrl-impl.c.inc @@ -0,0 +1,105 @@ +/* + * Power ISA decode for Storage Control instructions + * + * Copyright (c) 2022 Instituto de Pesquisas Eldorado (eldorado.org.br) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Processor Control Instructions + */ + +static bool trans_MSGCLR(DisasContext *ctx, arg_X_rb *a) +{ + if (!(ctx->insns_flags2 & PPC2_ISA207S)) { + /* + * Before Power ISA 2.07, processor control instructions were only + * implemented in the "Embedded.Processor Control" category. + */ + REQUIRE_INSNS_FLAGS2(ctx, PRCNTL); + } + + REQUIRE_HV(ctx); + +#if !defined(CONFIG_USER_ONLY) + if (is_book3s_arch2x(ctx)) { + gen_helper_book3s_msgclr(cpu_env, cpu_gpr[a->rb]); + } else { + gen_helper_msgclr(cpu_env, cpu_gpr[a->rb]); + } +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MSGSND(DisasContext *ctx, arg_X_rb *a) +{ + if (!(ctx->insns_flags2 & PPC2_ISA207S)) { + /* + * Before Power ISA 2.07, processor control instructions were only + * implemented in the "Embedded.Processor Control" category. + */ + REQUIRE_INSNS_FLAGS2(ctx, PRCNTL); + } + + REQUIRE_HV(ctx); + +#if !defined(CONFIG_USER_ONLY) + if (is_book3s_arch2x(ctx)) { + gen_helper_book3s_msgsnd(cpu_gpr[a->rb]); + } else { + gen_helper_msgsnd(cpu_gpr[a->rb]); + } +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MSGCLRP(DisasContext *ctx, arg_X_rb *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA207S); + REQUIRE_SV(ctx); +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MSGSNDP(DisasContext *ctx, arg_X_rb *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA207S); + REQUIRE_SV(ctx); +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MSGSYNC(DisasContext *ctx, arg_MSGSYNC *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_HV(ctx); + + /* interpreted as no-op */ + return true; +} diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc new file mode 100644 index 0000000000..6ea1d22ef9 --- /dev/null +++ b/target/ppc/translate/storage-ctrl-impl.c.inc @@ -0,0 +1,250 @@ +/* + * Power ISA decode for Storage Control instructions + * + * Copyright (c) 2022 Instituto de Pesquisas Eldorado (eldorado.org.br) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Store Control Instructions + */ + +#include "mmu-book3s-v3.h" + +static bool trans_SLBIE(DisasContext *ctx, arg_SLBIE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SLBI); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIE(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIEG(DisasContext *ctx, arg_SLBIEG *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIEG(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIA(DisasContext *ctx, arg_SLBIA *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SLBI); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIA(cpu_env, tcg_constant_i32(a->ih)); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIAG(DisasContext *ctx, arg_SLBIAG *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIAG(cpu_env, cpu_gpr[a->rs], tcg_constant_i32(a->l)); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMTE(DisasContext *ctx, arg_SLBMTE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMTE(cpu_env, cpu_gpr[a->rb], cpu_gpr[a->rt]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMFEV(DisasContext *ctx, arg_SLBMFEV *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMFEV(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMFEE(DisasContext *ctx, arg_SLBMFEE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMFEE(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBFEE(DisasContext *ctx, arg_SLBFEE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); +#else + +#if defined(TARGET_PPC64) + TCGLabel *l1, *l2; + + if (unlikely(ctx->pr)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return true; + } + gen_helper_SLBFEE(cpu_gpr[a->rt], cpu_env, + cpu_gpr[a->rb]); + l1 = gen_new_label(); + l2 = gen_new_label(); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], -1, l1); + tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_tl(cpu_gpr[a->rt], 0); + gen_set_label(l2); +#else + qemu_build_not_reached(); +#endif +#endif + return true; +} + +static bool trans_SLBSYNC(DisasContext *ctx, arg_SLBSYNC *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_check_tlb_flush(ctx, true); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) +{ +#if defined(CONFIG_USER_ONLY) + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; +#else + TCGv_i32 t1; + int rb; + + rb = a->rb; + + if ((ctx->insns_flags2 & PPC2_ISA300) == 0) { + /* + * Before Power ISA 3.0, the corresponding bits of RIC, PRS, and R + * (and RS for tlbiel) were reserved fields and should be ignored. + */ + a->ric = 0; + a->prs = false; + a->r = false; + if (local) { + a->rs = 0; + } + } + + if (ctx->pr) { + /* tlbie[l] is privileged... */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } else if (!ctx->hv) { + if ((!a->prs && ctx->hr) || (!local && !ctx->gtse)) { + /* + * ... except when PRS=0 and HR=1, or when GTSE=0 for tlbie, + * making it hypervisor privileged. + */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } + } + + if (!local && NARROW_MODE(ctx)) { + TCGv t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[rb]); + gen_helper_tlbie(cpu_env, t0); + tcg_temp_free(t0); + +#if defined(TARGET_PPC64) + /* + * ISA 3.1B says that MSR SF must be 1 when this instruction is executed; + * otherwise the results are undefined. + */ + } else if (a->r) { + gen_helper_tlbie_isa300(cpu_env, cpu_gpr[rb], cpu_gpr[a->rs], + tcg_constant_i32(a->ric << TLBIE_F_RIC_SHIFT | + a->prs << TLBIE_F_PRS_SHIFT | + a->r << TLBIE_F_R_SHIFT | + local << TLBIE_F_LOCAL_SHIFT)); + return true; +#endif + + } else { + gen_helper_tlbie(cpu_env, cpu_gpr[rb]); + } + + if (local) { + return true; + } + + t1 = tcg_temp_new_i32(); + tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); + tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_temp_free_i32(t1); + + return true; +#endif +} + +TRANS_FLAGS(MEM_TLBIE, TLBIE, do_tlbie, false) +TRANS_FLAGS(MEM_TLBIE, TLBIEL, do_tlbie, true) diff --git a/target/ppc/translate/vector-impl.c.inc b/target/ppc/translate/vector-impl.c.inc deleted file mode 100644 index 117ce9b137..0000000000 --- a/target/ppc/translate/vector-impl.c.inc +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Power ISA decode for Vector Facility instructions - * - * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#define REQUIRE_ALTIVEC(CTX) \ - do { \ - if (unlikely(!(CTX)->altivec_enabled)) { \ - gen_exception((CTX), POWERPC_EXCP_VPU); \ - return true; \ - } \ - } while (0) - -static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) -{ - TCGv_i64 tgt, src, mask; - - REQUIRE_INSNS_FLAGS2(ctx, ISA310); - REQUIRE_ALTIVEC(ctx); - - tgt = tcg_temp_new_i64(); - src = tcg_temp_new_i64(); - mask = tcg_temp_new_i64(); - - /* centrifuge lower double word */ - get_cpu_vsrl(src, a->vra + 32); - get_cpu_vsrl(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrl(a->vrt + 32, tgt); - - /* centrifuge higher double word */ - get_cpu_vsrh(src, a->vra + 32); - get_cpu_vsrh(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrh(a->vrt + 32, tgt); - - tcg_temp_free_i64(tgt); - tcg_temp_free_i64(src); - tcg_temp_free_i64(mask); - - return true; -} diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 92b9527aff..764b76dcc6 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -173,7 +173,7 @@ static void gen_mtvscr(DisasContext *ctx) val = tcg_temp_new_i32(); bofs = avr_full_offset(rB(ctx->opcode)); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN bofs += 3 * 4; #endif @@ -431,21 +431,6 @@ GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); -GEN_VXFORM(vavgub, 1, 16); -GEN_VXFORM(vabsdub, 1, 16); -GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ - vabsdub, PPC_NONE, PPC2_ISA300) -GEN_VXFORM(vavguh, 1, 17); -GEN_VXFORM(vabsduh, 1, 17); -GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ - vabsduh, PPC_NONE, PPC2_ISA300) -GEN_VXFORM(vavguw, 1, 18); -GEN_VXFORM(vabsduw, 1, 18); -GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ - vabsduw, PPC_NONE, PPC2_ISA300) -GEN_VXFORM(vavgsb, 1, 20); -GEN_VXFORM(vavgsh, 1, 21); -GEN_VXFORM(vavgsw, 1, 22); GEN_VXFORM(vmrghb, 6, 0); GEN_VXFORM(vmrghh, 6, 1); GEN_VXFORM(vmrghw, 6, 2); @@ -798,51 +783,386 @@ static void trans_vclzd(DisasContext *ctx) tcg_temp_free_i64(avr); } -GEN_VXFORM(vmuloub, 4, 0); -GEN_VXFORM(vmulouh, 4, 1); -GEN_VXFORM(vmulouw, 4, 2); GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2); -GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, - vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXFORM(vmulosb, 4, 4); -GEN_VXFORM(vmulosh, 4, 5); -GEN_VXFORM(vmulosw, 4, 6); -GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7); -GEN_VXFORM(vmuleub, 4, 8); -GEN_VXFORM(vmuleuh, 4, 9); -GEN_VXFORM(vmuleuw, 4, 10); -GEN_VXFORM(vmulhuw, 4, 10); -GEN_VXFORM(vmulhud, 4, 11); -GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE, - vmulhuw, PPC_NONE, PPC2_ISA310); -GEN_VXFORM(vmulesb, 4, 12); -GEN_VXFORM(vmulesh, 4, 13); -GEN_VXFORM(vmulesw, 4, 14); -GEN_VXFORM(vmulhsw, 4, 14); -GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE, - vmulhsw, PPC_NONE, PPC2_ISA310); -GEN_VXFORM(vmulhsd, 4, 15); -GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); -GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); -GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); -GEN_VXFORM(vrlwnm, 2, 6); -GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ - vrlwnm, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); -GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); -GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); -GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); -GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); -GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); -GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); -GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); -GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); GEN_VXFORM(vsrv, 2, 28); GEN_VXFORM(vslv, 2, 29); GEN_VXFORM(vslo, 6, 16); GEN_VXFORM(vsro, 6, 17); -GEN_VXFORM(vaddcuw, 0, 6); -GEN_VXFORM(vsubcuw, 0, 22); + +static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece, + void (*gen_gvec)(unsigned, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t)) +{ + REQUIRE_VECTOR(ctx); + + gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16); + + return true; +} + +TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv); +TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv); +TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv); +TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv); + +TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv); +TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv); +TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv); +TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv); + +TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv); +TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv); +TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv); +TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv); + +TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv) +TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv) +TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv) +TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv) + +static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb) +{ + TCGv_vec t0 = tcg_temp_new_vec_matching(vrb), + t1 = tcg_temp_new_vec_matching(vrb), + t2 = tcg_temp_new_vec_matching(vrb), + ones = tcg_constant_vec_matching(vrb, vece, -1); + + /* Extract b and e */ + tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1); + + tcg_gen_shri_vec(vece, t0, vrb, 16); + tcg_gen_and_vec(vece, t0, t0, t2); + + tcg_gen_shri_vec(vece, t1, vrb, 8); + tcg_gen_and_vec(vece, t1, t1, t2); + + /* Compare b and e to negate the mask where begin > end */ + tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1); + + /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */ + tcg_gen_shrv_vec(vece, t0, ones, t0); + tcg_gen_shrv_vec(vece, t1, ones, t1); + tcg_gen_shri_vec(vece, t1, t1, 1); + tcg_gen_xor_vec(vece, t0, t0, t1); + + /* negate the mask */ + tcg_gen_xor_vec(vece, t0, t0, t2); + + tcg_temp_free_vec(t1); + tcg_temp_free_vec(t2); + + return t0; +} + +static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, + TCGv_vec vrb) +{ + TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt); + + /* Create the mask */ + mask = do_vrl_mask_vec(vece, vrb); + + /* Extract n */ + tcg_gen_dupi_vec(vece, n, (8 << vece) - 1); + tcg_gen_and_vec(vece, n, vrb, n); + + /* Rotate and mask */ + tcg_gen_rotlv_vec(vece, vrt, vra, n); + tcg_gen_and_vec(vece, vrt, vrt, mask); + + tcg_temp_free_vec(n); + tcg_temp_free_vec(mask); +} + +static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec, + INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0 + }; + static const GVecGen3 ops[2] = { + { + .fniv = gen_vrlnm_vec, + .fno = gen_helper_VRLWNM, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_32 + }, + { + .fniv = gen_vrlnm_vec, + .fno = gen_helper_VRLDNM, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_64 + } + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]); + + return true; +} + +TRANS(VRLWNM, do_vrlnm, MO_32) +TRANS(VRLDNM, do_vrlnm, MO_64) + +static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, + TCGv_vec vrb) +{ + TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt), + tmp = tcg_temp_new_vec_matching(vrt); + + /* Create the mask */ + mask = do_vrl_mask_vec(vece, vrb); + + /* Extract n */ + tcg_gen_dupi_vec(vece, n, (8 << vece) - 1); + tcg_gen_and_vec(vece, n, vrb, n); + + /* Rotate and insert */ + tcg_gen_rotlv_vec(vece, tmp, vra, n); + tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt); + + tcg_temp_free_vec(n); + tcg_temp_free_vec(tmp); + tcg_temp_free_vec(mask); +} + +static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec, + INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0 + }; + static const GVecGen3 ops[2] = { + { + .fniv = gen_vrlmi_vec, + .fno = gen_helper_VRLWMI, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_32 + }, + { + .fniv = gen_vrlnm_vec, + .fno = gen_helper_VRLDMI, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_64 + } + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]); + + return true; +} + +TRANS(VRLWMI, do_vrlmi, MO_32) +TRANS(VRLDMI, do_vrlmi, MO_64) + +static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, + bool alg) +{ + TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0); + + REQUIRE_VECTOR(ctx); + + n = tcg_temp_new_i64(); + hi = tcg_temp_new_i64(); + lo = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_const_i64(0); + + get_avr64(lo, a->vra, false); + get_avr64(hi, a->vra, true); + + get_avr64(n, a->vrb, true); + + tcg_gen_andi_i64(t0, n, 64); + if (right) { + tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo); + if (alg) { + tcg_gen_sari_i64(t1, lo, 63); + } + tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi); + } else { + tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi); + tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo); + } + tcg_gen_andi_i64(n, n, 0x3F); + + if (right) { + if (alg) { + tcg_gen_sar_i64(t0, hi, n); + } else { + tcg_gen_shr_i64(t0, hi, n); + } + } else { + tcg_gen_shl_i64(t0, lo, n); + } + set_avr64(a->vrt, t0, right); + + if (right) { + tcg_gen_shr_i64(lo, lo, n); + } else { + tcg_gen_shl_i64(hi, hi, n); + } + tcg_gen_xori_i64(n, n, 63); + if (right) { + tcg_gen_shl_i64(hi, hi, n); + tcg_gen_shli_i64(hi, hi, 1); + } else { + tcg_gen_shr_i64(lo, lo, n); + tcg_gen_shri_i64(lo, lo, 1); + } + tcg_gen_or_i64(hi, hi, lo); + set_avr64(a->vrt, hi, !right); + + tcg_temp_free_i64(hi); + tcg_temp_free_i64(lo); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(n); + + return true; +} + +TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false); +TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false); +TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true); + +static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e) +{ + TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0), + ones = tcg_constant_i64(-1); + + th = tcg_temp_new_i64(); + tl = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + /* m = ~0 >> b */ + tcg_gen_andi_i64(t0, b, 64); + tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones); + tcg_gen_andi_i64(t0, b, 0x3F); + tcg_gen_shr_i64(mh, t1, t0); + tcg_gen_shr_i64(ml, ones, t0); + tcg_gen_xori_i64(t0, t0, 63); + tcg_gen_shl_i64(t1, t1, t0); + tcg_gen_shli_i64(t1, t1, 1); + tcg_gen_or_i64(ml, t1, ml); + + /* t = ~0 >> e */ + tcg_gen_andi_i64(t0, e, 64); + tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones); + tcg_gen_andi_i64(t0, e, 0x3F); + tcg_gen_shr_i64(th, t1, t0); + tcg_gen_shr_i64(tl, ones, t0); + tcg_gen_xori_i64(t0, t0, 63); + tcg_gen_shl_i64(t1, t1, t0); + tcg_gen_shli_i64(t1, t1, 1); + tcg_gen_or_i64(tl, t1, tl); + + /* t = t >> 1 */ + tcg_gen_extract2_i64(tl, tl, th, 1); + tcg_gen_shri_i64(th, th, 1); + + /* m = m ^ t */ + tcg_gen_xor_i64(mh, mh, th); + tcg_gen_xor_i64(ml, ml, tl); + + /* Negate the mask if begin > end */ + tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero); + + tcg_gen_xor_i64(mh, mh, t0); + tcg_gen_xor_i64(ml, ml, t0); + + tcg_temp_free_i64(th); + tcg_temp_free_i64(tl); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, + bool insert) +{ + TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0); + + REQUIRE_VECTOR(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + + ah = tcg_temp_new_i64(); + al = tcg_temp_new_i64(); + vrb = tcg_temp_new_i64(); + n = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(ah, a->vra, true); + get_avr64(al, a->vra, false); + get_avr64(vrb, a->vrb, true); + + tcg_gen_mov_i64(t0, ah); + tcg_gen_andi_i64(t1, vrb, 64); + tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah); + tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al); + tcg_gen_andi_i64(n, vrb, 0x3F); + + tcg_gen_shl_i64(t0, ah, n); + tcg_gen_shl_i64(t1, al, n); + + tcg_gen_xori_i64(n, n, 63); + + tcg_gen_shr_i64(al, al, n); + tcg_gen_shri_i64(al, al, 1); + tcg_gen_or_i64(t0, al, t0); + + tcg_gen_shr_i64(ah, ah, n); + tcg_gen_shri_i64(ah, ah, 1); + tcg_gen_or_i64(t1, ah, t1); + + if (mask || insert) { + tcg_gen_extract_i64(n, vrb, 8, 7); + tcg_gen_extract_i64(vrb, vrb, 16, 7); + + do_vrlq_mask(ah, al, vrb, n); + + tcg_gen_and_i64(t0, t0, ah); + tcg_gen_and_i64(t1, t1, al); + + if (insert) { + get_avr64(n, a->vrt, true); + get_avr64(vrb, a->vrt, false); + tcg_gen_andc_i64(n, n, ah); + tcg_gen_andc_i64(vrb, vrb, al); + tcg_gen_or_i64(t0, t0, n); + tcg_gen_or_i64(t1, t1, vrb); + } + } + + set_avr64(a->vrt, t0, true); + set_avr64(a->vrt, t1, false); + + tcg_temp_free_i64(ah); + tcg_temp_free_i64(al); + tcg_temp_free_i64(vrb); + tcg_temp_free_i64(n); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +TRANS(VRLQ, do_vector_rotl_quad, false, false) +TRANS(VRLQNM, do_vector_rotl_quad, true, false) +TRANS(VRLQMI, do_vector_rotl_quad, false, true) #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ @@ -897,32 +1217,7 @@ GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); -GEN_VXFORM(vadduqm, 0, 4); -GEN_VXFORM(vaddcuq, 0, 5); -GEN_VXFORM3(vaddeuqm, 30, 0); -GEN_VXFORM3(vaddecuq, 30, 0); -GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXFORM(vsubuqm, 0, 20); -GEN_VXFORM(vsubcuq, 0, 21); -GEN_VXFORM3(vsubeuqm, 31, 0); -GEN_VXFORM3(vsubecuq, 31, 0); -GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0); -GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1); -GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2); -GEN_VXFORM(vrlwmi, 2, 2); -GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ - vrlwmi, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3); -GEN_VXFORM(vrldmi, 2, 3); -GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ - vrldmi, PPC_NONE, PPC2_ISA300) GEN_VXFORM_TRANS(vsl, 2, 7); -GEN_VXFORM(vrldnm, 2, 7); -GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ - vrldnm, PPC_NONE, PPC2_ISA300) GEN_VXFORM_TRANS(vsr, 2, 11); GEN_VXFORM_ENV(vpkuhum, 7, 0); GEN_VXFORM_ENV(vpkuwum, 7, 1); @@ -1008,41 +1303,252 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ } \ } -GEN_VXRFORM(vcmpequb, 3, 0) -GEN_VXRFORM(vcmpequh, 3, 1) -GEN_VXRFORM(vcmpequw, 3, 2) -GEN_VXRFORM(vcmpequd, 3, 3) -GEN_VXRFORM(vcmpnezb, 3, 4) -GEN_VXRFORM(vcmpnezh, 3, 5) -GEN_VXRFORM(vcmpnezw, 3, 6) -GEN_VXRFORM(vcmpgtsb, 3, 12) -GEN_VXRFORM(vcmpgtsh, 3, 13) -GEN_VXRFORM(vcmpgtsw, 3, 14) -GEN_VXRFORM(vcmpgtsd, 3, 15) -GEN_VXRFORM(vcmpgtub, 3, 8) -GEN_VXRFORM(vcmpgtuh, 3, 9) -GEN_VXRFORM(vcmpgtuw, 3, 10) -GEN_VXRFORM(vcmpgtud, 3, 11) +static void do_vcmp_rc(int vrt) +{ + TCGv_i64 tmp, set, clr; + + tmp = tcg_temp_new_i64(); + set = tcg_temp_new_i64(); + clr = tcg_temp_new_i64(); + + get_avr64(tmp, vrt, true); + tcg_gen_mov_i64(set, tmp); + get_avr64(tmp, vrt, false); + tcg_gen_or_i64(clr, set, tmp); + tcg_gen_and_i64(set, set, tmp); + + tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0); + tcg_gen_shli_i64(clr, clr, 1); + + tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1); + tcg_gen_shli_i64(set, set, 3); + + tcg_gen_or_i64(tmp, set, clr); + tcg_gen_extrl_i64_i32(cpu_crf[6], tmp); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(set); + tcg_temp_free_i64(clr); +} + +static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece) +{ + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt), + avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16); + + if (a->rc) { + do_vcmp_rc(a->vrt); + } + + return true; +} + +TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8) +TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16) +TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32) +TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64) + +TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8) +TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16) +TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32) +TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64) +TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8) +TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16) +TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32) +TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64) + +TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8) +TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16) +TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32) + +static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t0, t1, zero; + + t0 = tcg_temp_new_vec_matching(t); + t1 = tcg_temp_new_vec_matching(t); + zero = tcg_constant_vec_matching(t, vece, 0); + + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero); + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero); + tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b); + + tcg_gen_or_vec(vece, t, t, t0); + tcg_gen_or_vec(vece, t, t, t1); + + tcg_temp_free_vec(t0); + tcg_temp_free_vec(t1); +} + +static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_cmp_vec, 0 + }; + static const GVecGen3 ops[3] = { + { + .fniv = gen_vcmpnez_vec, + .fno = gen_helper_VCMPNEZB, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vcmpnez_vec, + .fno = gen_helper_VCMPNEZH, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vcmpnez_vec, + .fno = gen_helper_VCMPNEZW, + .opt_opc = vecop_list, + .vece = MO_32 + } + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &ops[vece]); + + if (a->rc) { + do_vcmp_rc(a->vrt); + } + + return true; +} + +TRANS(VCMPNEZB, do_vcmpnez, MO_8) +TRANS(VCMPNEZH, do_vcmpnez, MO_16) +TRANS(VCMPNEZW, do_vcmpnez, MO_32) + +static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a) +{ + TCGv_i64 t0, t1, t2; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + + get_avr64(t0, a->vra, true); + get_avr64(t1, a->vrb, true); + tcg_gen_xor_i64(t2, t0, t1); + + get_avr64(t0, a->vra, false); + get_avr64(t1, a->vrb, false); + tcg_gen_xor_i64(t1, t0, t1); + + tcg_gen_or_i64(t1, t1, t2); + tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0); + tcg_gen_neg_i64(t1, t1); + + set_avr64(a->vrt, t1, true); + set_avr64(a->vrt, t1, false); + + if (a->rc) { + tcg_gen_extrl_i64_i32(cpu_crf[6], t1); + tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); + tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); + } + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + + return true; +} + +static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign) +{ + TCGv_i64 t0, t1, t2; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + + get_avr64(t0, a->vra, false); + get_avr64(t1, a->vrb, false); + tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1); + + get_avr64(t0, a->vra, true); + get_avr64(t1, a->vrb, true); + tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0)); + tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1); + + tcg_gen_or_i64(t1, t1, t2); + tcg_gen_neg_i64(t1, t1); + + set_avr64(a->vrt, t1, true); + set_avr64(a->vrt, t1, false); + + if (a->rc) { + tcg_gen_extrl_i64_i32(cpu_crf[6], t1); + tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); + tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); + } + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + + return true; +} + +TRANS(VCMPGTSQ, do_vcmpgtq, true) +TRANS(VCMPGTUQ, do_vcmpgtq, false) + +static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign) +{ + TCGv_i64 vra, vrb; + TCGLabel *gt, *lt, *done; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + vra = tcg_temp_local_new_i64(); + vrb = tcg_temp_local_new_i64(); + gt = gen_new_label(); + lt = gen_new_label(); + done = gen_new_label(); + + get_avr64(vra, a->vra, true); + get_avr64(vrb, a->vrb, true); + tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt); + tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt); + + get_avr64(vra, a->vra, false); + get_avr64(vrb, a->vrb, false); + tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt); + tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt); + + tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ); + tcg_gen_br(done); + + gen_set_label(gt); + tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT); + tcg_gen_br(done); + + gen_set_label(lt); + tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT); + tcg_gen_br(done); + + gen_set_label(done); + tcg_temp_free_i64(vra); + tcg_temp_free_i64(vrb); + + return true; +} + +TRANS(VCMPSQ, do_vcmpq, true) +TRANS(VCMPUQ, do_vcmpq, false) + GEN_VXRFORM(vcmpeqfp, 3, 3) GEN_VXRFORM(vcmpgefp, 3, 7) GEN_VXRFORM(vcmpgtfp, 3, 11) GEN_VXRFORM(vcmpbfp, 3, 15) -GEN_VXRFORM(vcmpneb, 3, 0) -GEN_VXRFORM(vcmpneh, 3, 1) -GEN_VXRFORM(vcmpnew, 3, 2) - -GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ - vcmpneb, PPC_NONE, PPC2_ISA300) -GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ - vcmpneh, PPC_NONE, PPC2_ISA300) -GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ - vcmpnew, PPC_NONE, PPC2_ISA300) -GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ - vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ - vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ - vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) static void gen_vsplti(DisasContext *ctx, int vece) { @@ -1138,9 +1644,71 @@ GEN_VXFORM_NOA_ENV(vrfim, 5, 11); GEN_VXFORM_NOA_ENV(vrfin, 5, 8); GEN_VXFORM_NOA_ENV(vrfip, 5, 10); GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); -GEN_VXFORM_NOA(vprtybw, 1, 24); -GEN_VXFORM_NOA(vprtybd, 1, 24); -GEN_VXFORM_NOA(vprtybq, 1, 24); + +static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b) +{ + int i; + TCGv_vec tmp = tcg_temp_new_vec_matching(b); + /* MO_32 is 2, so 2 iteractions for MO_32 and 3 for MO_64 */ + for (i = 0; i < vece; i++) { + tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i))); + tcg_gen_xor_vec(vece, b, tmp, b); + } + tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1)); + tcg_temp_free_vec(tmp); +} + +/* vprtybw */ +static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b) +{ + tcg_gen_ctpop_i32(t, b); + tcg_gen_and_i32(t, t, tcg_constant_i32(1)); +} + +/* vprtybd */ +static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b) +{ + tcg_gen_ctpop_i64(t, b); + tcg_gen_and_i64(t, t, tcg_constant_i64(1)); +} + +static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, 0 + }; + + static const GVecGen2 op[] = { + { + .fniv = gen_vprtyb_vec, + .fni4 = gen_vprtyb_i32, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vprtyb_vec, + .fni8 = gen_vprtyb_i64, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_VPRTYBQ, + .vece = MO_128 + }, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb), + 16, 16, &op[vece - MO_32]); + + return true; +} + +TRANS(VPRTYBW, do_vx_vprtyb, MO_32) +TRANS(VPRTYBD, do_vx_vprtyb, MO_64) +TRANS(VPRTYBQ, do_vx_vprtyb, MO_128) static void gen_vsplt(DisasContext *ctx, int vece) { @@ -1157,7 +1725,7 @@ static void gen_vsplt(DisasContext *ctx, int vece) /* Experimental testing shows that hardware masks the immediate. */ bofs += (uimm << vece) & 15; -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN bofs ^= 15; bofs &= ~((1 << vece) - 1); #endif @@ -1217,10 +1785,6 @@ GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); -GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); -GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); -GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); -GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); @@ -1231,12 +1795,319 @@ GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, vextractuh, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, vextractuw, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, - vinsertb, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, - vinserth, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, - vinsertw, PPC_NONE, PPC2_ISA300); + +static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a) +{ + /* + * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations + * to gather the bits. The masks can be created with + * + * uint64_t mask(uint64_t n, uint64_t step) + * { + * uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step), + * plen = n << step, m = 0; + * for(int i = 0; i < 64/plen; i++) { + * m |= p; + * m = ror64(m, plen); + * } + * p >>= plen * DIV_ROUND_UP(64, plen) - 64; + * return m | p; + * } + * + * But since there are few values of N, we'll use a lookup table to avoid + * these calculations at runtime. + */ + static const uint64_t mask[6][5] = { + { + 0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL, + 0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL + }, + { + 0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL, + 0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL + }, + { + /* For N >= 4, some mask operations can be elided */ + 0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0, + 0xFFFF000000000000ULL + }, + { + 0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0 + }, + { + 0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0 + }, + { + 0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0 + } + }; + uint64_t m; + int i, sh, nbits = DIV_ROUND_UP(64, a->n); + TCGv_i64 hi, lo, t0, t1; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + if (a->n < 2) { + /* + * "N can be any value between 2 and 7, inclusive." Otherwise, the + * result is undefined, so we don't need to change RT. Also, N > 7 is + * impossible since the immediate field is 3 bits only. + */ + return true; + } + + hi = tcg_temp_new_i64(); + lo = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(hi, a->vrb, true); + get_avr64(lo, a->vrb, false); + + /* Align the lower doubleword so we can use the same mask */ + tcg_gen_shli_i64(lo, lo, a->n * nbits - 64); + + /* + * Starting from the most significant bit, gather every Nth bit with a + * sequence of mask-shift-or operation. E.g.: for N=3 + * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV + * & rep(0b100) + * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V + * << 2 + * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V.. + * | + * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V + * & rep(0b110000) + * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV.. + * << 4 + * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV...... + * | + * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV.. + * & rep(0b111100000000) + * ABCD........EFGH........IJKL........MNOP........QRST........UV.. + * << 8 + * ....EFGH........IJKL........MNOP........QRST........UV.......... + * | + * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV.. + * & rep(0b111111110000000000000000) + * ABCDEFGH................IJKLMNOP................QRSTUV.......... + * << 16 + * ........IJKLMNOP................QRSTUV.......................... + * | + * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV.......... + * & rep(0b111111111111111100000000000000000000000000000000) + * ABCDEFGHIJKLMNOP................................QRSTUV.......... + * << 32 + * ................QRSTUV.......................................... + * | + * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV.......... + */ + for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) { + m = mask[a->n - 2][i]; + if (m) { + tcg_gen_andi_i64(hi, hi, m); + tcg_gen_andi_i64(lo, lo, m); + } + if (sh < 64) { + tcg_gen_shli_i64(t0, hi, sh); + tcg_gen_shli_i64(t1, lo, sh); + tcg_gen_or_i64(hi, t0, hi); + tcg_gen_or_i64(lo, t1, lo); + } + } + + tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits)); + tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits)); + tcg_gen_shri_i64(lo, lo, nbits); + tcg_gen_or_i64(hi, hi, lo); + tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi); + + tcg_temp_free_i64(hi); + tcg_temp_free_i64(lo); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv)) +{ + TCGv_ptr vrt, vra, vrb; + TCGv rc; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vra = gen_avr_ptr(a->vra); + vrb = gen_avr_ptr(a->vrb); + rc = tcg_temp_new(); + + tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F); + if (right) { + tcg_gen_subfi_tl(rc, 32 - size, rc); + } + gen_helper(cpu_env, vrt, vra, vrb, rc); + + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vra); + tcg_temp_free_ptr(vrb); + tcg_temp_free(rc); + return true; +} + +TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX) + +TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX) + +static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + TCGv_ptr t; + TCGv idx; + + t = gen_avr_ptr(vrt); + idx = tcg_temp_new(); + + tcg_gen_andi_tl(idx, ra, 0xF); + if (right) { + tcg_gen_subfi_tl(idx, 16 - size, idx); + } + + gen_helper(cpu_env, t, rb, idx); + + tcg_temp_free_ptr(t); + tcg_temp_free(idx); + + return true; +} + +static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + val = tcg_temp_new_i64(); + get_avr64(val, vrb, true); + ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb, + gen_helper); +} + +static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + /* + * PowerISA v3.1 says that the resulting value is undefined in this + * case, so just log a guest error and leave VRT unchanged. The + * real hardware would do a partial insert, e.g. if VRT is zeroed and + * RB is 0x12345678, executing "vinsw VRT,RB,14" results in + * VRT = 0x0000...00001234, but we don't bother to reproduce this + * behavior as software shouldn't rely on it. + */ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, + gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb, + gen_helper); +} + +TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX) +TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX) + +TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX) +TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX) + +TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX) + +TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX) + +TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX) + +TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX) +TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX) +TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX) static void gen_vsldoi(DisasContext *ctx) { @@ -1257,6 +2128,421 @@ static void gen_vsldoi(DisasContext *ctx) tcg_temp_free_i32(sh); } +static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t0, t1, t2; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vra, true); + get_avr64(t1, a->vra, false); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vrb, true); + + tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); + tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, true); + set_avr64(a->vrt, t1, false); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t2, t1, t0; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vrb, false); + get_avr64(t1, a->vrb, true); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vra, false); + + tcg_gen_extract2_i64(t0, t0, t1, a->sh); + tcg_gen_extract2_i64(t1, t1, t2, a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, false); + set_avr64(a->vrt, t1, true); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb), + (8 << vece) - 1, 16, 16); + + return true; +} + +TRANS(VEXPANDBM, do_vexpand, MO_8) +TRANS(VEXPANDHM, do_vexpand, MO_16) +TRANS(VEXPANDWM, do_vexpand, MO_32) +TRANS(VEXPANDDM, do_vexpand, MO_64) + +static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a) +{ + TCGv_i64 tmp; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tmp = tcg_temp_new_i64(); + + get_avr64(tmp, a->vrb, true); + tcg_gen_sari_i64(tmp, tmp, 63); + set_avr64(a->vrt, tmp, false); + set_avr64(a->vrt, tmp, true); + + tcg_temp_free_i64(tmp); + return true; +} + +static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) +{ + const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece, + mask = dup_const(vece, 1ULL << (elem_width - 1)); + uint64_t i, j; + TCGv_i64 lo, hi, t0, t1; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + hi = tcg_temp_new_i64(); + lo = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(lo, a->vrb, false); + get_avr64(hi, a->vrb, true); + + tcg_gen_andi_i64(lo, lo, mask); + tcg_gen_andi_i64(hi, hi, mask); + + /* + * Gather the most significant bit of each element in the highest element + * element. E.g. for bytes: + * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX + * & dup(1 << (elem_width - 1)) + * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000 + * << 32 - 4 + * 0000e0000000f0000000g0000000h00000000000000000000000000000000000 + * | + * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000 + * << 16 - 2 + * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000 + * | + * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000 + * << 8 - 1 + * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000 + * | + * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000 + */ + for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) { + tcg_gen_shli_i64(t0, hi, j - i); + tcg_gen_shli_i64(t1, lo, j - i); + tcg_gen_or_i64(hi, hi, t0); + tcg_gen_or_i64(lo, lo, t1); + } + + tcg_gen_shri_i64(hi, hi, 64 - elem_count_half); + tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half); + tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo); + + tcg_temp_free_i64(hi); + tcg_temp_free_i64(lo); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +TRANS(VEXTRACTBM, do_vextractm, MO_8) +TRANS(VEXTRACTHM, do_vextractm, MO_16) +TRANS(VEXTRACTWM, do_vextractm, MO_32) +TRANS(VEXTRACTDM, do_vextractm, MO_64) + +static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a) +{ + TCGv_i64 tmp; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tmp = tcg_temp_new_i64(); + + get_avr64(tmp, a->vrb, true); + tcg_gen_shri_i64(tmp, tmp, 63); + tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp); + + tcg_temp_free_i64(tmp); + + return true; +} + +static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) +{ + const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece; + uint64_t c; + int i, j; + TCGv_i64 hi, lo, t0, t1; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + hi = tcg_temp_new_i64(); + lo = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]); + tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half); + tcg_gen_extract_i64(lo, t0, 0, elem_count_half); + + /* + * Spread the bits into their respective elements. + * E.g. for bytes: + * 00000000000000000000000000000000000000000000000000000000abcdefgh + * << 32 - 4 + * 0000000000000000000000000000abcdefgh0000000000000000000000000000 + * | + * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh + * << 16 - 2 + * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000 + * | + * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh + * << 8 - 1 + * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000 + * | + * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh + * & dup(1) + * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h + * * 0xff + * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh + */ + for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) { + tcg_gen_shli_i64(t0, hi, j - i); + tcg_gen_shli_i64(t1, lo, j - i); + tcg_gen_or_i64(hi, hi, t0); + tcg_gen_or_i64(lo, lo, t1); + } + + c = dup_const(vece, 1); + tcg_gen_andi_i64(hi, hi, c); + tcg_gen_andi_i64(lo, lo, c); + + c = MAKE_64BIT_MASK(0, elem_width); + tcg_gen_muli_i64(hi, hi, c); + tcg_gen_muli_i64(lo, lo, c); + + set_avr64(a->vrt, lo, false); + set_avr64(a->vrt, hi, true); + + tcg_temp_free_i64(hi); + tcg_temp_free_i64(lo); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +TRANS(MTVSRBM, do_mtvsrm, MO_8) +TRANS(MTVSRHM, do_mtvsrm, MO_16) +TRANS(MTVSRWM, do_mtvsrm, MO_32) +TRANS(MTVSRDM, do_mtvsrm, MO_64) + +static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a) +{ + TCGv_i64 tmp; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tmp = tcg_temp_new_i64(); + + tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]); + tcg_gen_sextract_i64(tmp, tmp, 0, 1); + set_avr64(a->vrt, tmp, false); + set_avr64(a->vrt, tmp, true); + + tcg_temp_free_i64(tmp); + + return true; +} + +static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a) +{ + const uint64_t mask = dup_const(MO_8, 1); + uint64_t hi, lo; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + hi = extract16(a->b, 8, 8); + lo = extract16(a->b, 0, 8); + + for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) { + hi |= hi << (j - i); + lo |= lo << (j - i); + } + + hi = (hi & mask) * 0xFF; + lo = (lo & mask) * 0xFF; + + set_avr64(a->vrt, tcg_constant_i64(hi), true); + set_avr64(a->vrt, tcg_constant_i64(lo), false); + + return true; +} + +static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece) +{ + TCGv_i64 rt, vrb, mask; + rt = tcg_const_i64(0); + vrb = tcg_temp_new_i64(); + mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1))); + + for (int i = 0; i < 2; i++) { + get_avr64(vrb, a->vrb, i); + if (a->mp) { + tcg_gen_and_i64(vrb, mask, vrb); + } else { + tcg_gen_andc_i64(vrb, mask, vrb); + } + tcg_gen_ctpop_i64(vrb, vrb); + tcg_gen_add_i64(rt, rt, vrb); + } + + tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece); + tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt); + + tcg_temp_free_i64(vrb); + tcg_temp_free_i64(rt); + + return true; +} + +TRANS(VCNTMBB, do_vcntmb, MO_8) +TRANS(VCNTMBH, do_vcntmb, MO_16) +TRANS(VCNTMBW, do_vcntmb, MO_32) +TRANS(VCNTMBD, do_vcntmb, MO_64) + +static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a, + void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr vrt, vrb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vrb = gen_avr_ptr(a->vrb); + + if (a->rc) { + gen_helper(cpu_crf[6], vrt, vrb); + } else { + TCGv_i32 discard = tcg_temp_new_i32(); + gen_helper(discard, vrt, vrb); + tcg_temp_free_i32(discard); + } + + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vrb); + + return true; +} + +TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL) +TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR) +TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL) +TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR) + +static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right) +{ + TCGv_i64 rb, mh, ml, tmp, + ones = tcg_constant_i64(-1), + zero = tcg_constant_i64(0); + + rb = tcg_temp_new_i64(); + mh = tcg_temp_new_i64(); + ml = tcg_temp_new_i64(); + tmp = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]); + tcg_gen_andi_i64(tmp, rb, 7); + tcg_gen_shli_i64(tmp, tmp, 3); + if (right) { + tcg_gen_shr_i64(tmp, ones, tmp); + } else { + tcg_gen_shl_i64(tmp, ones, tmp); + } + tcg_gen_not_i64(tmp, tmp); + + if (right) { + tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8), + tmp, ones); + tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8), + zero, tmp); + tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16), + ml, ones); + } else { + tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8), + tmp, ones); + tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8), + zero, tmp); + tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16), + mh, ones); + } + + get_avr64(tmp, a->vra, true); + tcg_gen_and_i64(tmp, tmp, mh); + set_avr64(a->vrt, tmp, true); + + get_avr64(tmp, a->vra, false); + tcg_gen_and_i64(tmp, tmp, ml); + set_avr64(a->vrt, tmp, false); + + tcg_temp_free_i64(rb); + tcg_temp_free_i64(mh); + tcg_temp_free_i64(ml); + tcg_temp_free_i64(tmp); + + return true; +} + +TRANS(VCLRLB, do_vclrb, false) +TRANS(VCLRRB, do_vclrb, true) + #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ @@ -1280,61 +2566,196 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ tcg_temp_free_ptr(rd); \ } -GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) - -static void gen_vmladduhm(DisasContext *ctx) -{ - TCGv_ptr ra, rb, rc, rd; - if (unlikely(!ctx->altivec_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VPU); - return; - } - ra = gen_avr_ptr(rA(ctx->opcode)); - rb = gen_avr_ptr(rB(ctx->opcode)); - rc = gen_avr_ptr(rC(ctx->opcode)); - rd = gen_avr_ptr(rD(ctx->opcode)); - gen_helper_vmladduhm(rd, ra, rb, rc); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rc); - tcg_temp_free_ptr(rd); -} - -static void gen_vpermr(DisasContext *ctx) -{ - TCGv_ptr ra, rb, rc, rd; - if (unlikely(!ctx->altivec_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VPU); - return; - } - ra = gen_avr_ptr(rA(ctx->opcode)); - rb = gen_avr_ptr(rB(ctx->opcode)); - rc = gen_avr_ptr(rC(ctx->opcode)); - rd = gen_avr_ptr(rD(ctx->opcode)); - gen_helper_vpermr(cpu_env, rd, ra, rb, rc); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rc); - tcg_temp_free_ptr(rd); -} - -GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) -GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) -GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) -GEN_VAFORM_PAIRED(vsel, vperm, 21) GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) +static bool do_va_helper(DisasContext *ctx, arg_VA *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr vrt, vra, vrb, vrc; + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vra = gen_avr_ptr(a->vra); + vrb = gen_avr_ptr(a->vrb); + vrc = gen_avr_ptr(a->rc); + gen_helper(vrt, vra, vrb, vrc); + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vra); + tcg_temp_free_ptr(vrb); + tcg_temp_free_ptr(vrc); + + return true; +} + +TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM) + +TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM) +TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ) + +TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM) +TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR) + +static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + TCGv_vec c) +{ + tcg_gen_mul_vec(vece, t, a, b); + tcg_gen_add_vec(vece, t, t, c); +} + +static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_add_vec, INDEX_op_mul_vec, 0 + }; + + static const GVecGen4 op = { + .fno = gen_helper_VMLADDUHM, + .fniv = gen_vmladduhm_vec, + .opt_opc = vecop_list, + .vece = MO_16 + }; + + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), avr_full_offset(a->rc), + 16, 16, &op); + + return true; +} + +static bool trans_VSEL(DisasContext *ctx, arg_VA *a) +{ + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc), + avr_full_offset(a->vrb), avr_full_offset(a->vra), + 16, 16); + + return true; +} + +TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM) +TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM) +TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM) +TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM) + +static bool do_va_env_helper(DisasContext *ctx, arg_VA *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr vrt, vra, vrb, vrc; + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vra = gen_avr_ptr(a->vra); + vrb = gen_avr_ptr(a->vrb); + vrc = gen_avr_ptr(a->rc); + gen_helper(cpu_env, vrt, vra, vrb, vrc); + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vra); + tcg_temp_free_ptr(vrb); + tcg_temp_free_ptr(vrc); + + return true; +} + +TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS) +TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS) + +TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS) +TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS) + GEN_VXFORM_NOA(vclzb, 1, 28) GEN_VXFORM_NOA(vclzh, 1, 29) GEN_VXFORM_TRANS(vclzw, 1, 30) GEN_VXFORM_TRANS(vclzd, 1, 31) -GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) -GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) -GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) -GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) -GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) -GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) -GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) + +static bool do_vneg(DisasContext *ctx, arg_VX_tb *a, unsigned vece) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_neg(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb), + 16, 16); + return true; +} + +TRANS(VNEGW, do_vneg, MO_32) +TRANS(VNEGD, do_vneg, MO_64) + +static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s) +{ + tcg_gen_sextract_i64(t, b, 0, 64 - s); +} + +static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s) +{ + tcg_gen_sextract_i32(t, b, 0, 32 - s); +} + +static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s) +{ + tcg_gen_shli_vec(vece, t, b, s); + tcg_gen_sari_vec(vece, t, t, s); +} + +static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, 0 + }; + + static const GVecGen2i op[2] = { + { + .fni4 = gen_vexts_i32, + .fniv = gen_vexts_vec, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vexts_i64, + .fniv = gen_vexts_vec, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb), + 16, 16, s, &op[vece - MO_32]); + + return true; +} + +TRANS(VEXTSB2W, do_vexts, MO_32, 24); +TRANS(VEXTSH2W, do_vexts, MO_32, 16); +TRANS(VEXTSB2D, do_vexts, MO_64, 56); +TRANS(VEXTSH2D, do_vexts, MO_64, 48); +TRANS(VEXTSW2D, do_vexts, MO_64, 32); + +static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a) +{ + TCGv_i64 tmp; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tmp = tcg_temp_new_i64(); + + get_avr64(tmp, a->vrb, false); + set_avr64(a->vrt, tmp, false); + tcg_gen_sari_i64(tmp, tmp, 63); + set_avr64(a->vrt, tmp, true); + + tcg_temp_free_i64(tmp); + return true; +} + GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) @@ -1359,7 +2780,6 @@ GEN_VXFORM_TRANS(vgbbd, 6, 20); GEN_VXFORM(vpmsumb, 4, 16) GEN_VXFORM(vpmsumh, 4, 17) GEN_VXFORM(vpmsumw, 4, 18) -GEN_VXFORM(vpmsumd, 4, 19) #define GEN_BCD(op) \ static void gen_##op(DisasContext *ctx) \ @@ -1483,8 +2903,6 @@ static void gen_xpnd04_2(DisasContext *ctx) } -GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ - xpnd04_1, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ xpnd04_2, PPC_NONE, PPC2_ISA300) @@ -1504,11 +2922,6 @@ GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ bcdus, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdutrunc, PPC_NONE, PPC2_ISA300) - static void gen_vsbox(DisasContext *ctx) { @@ -1559,6 +2972,683 @@ GEN_VXFORM3(vpermxor, 22, 0xFF) GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, vpermxor, PPC_NONE, PPC2_ALTIVEC_207) +static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_CFUGED, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, false, &g); + + return true; +} + +static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, true, &g); + + return true; +} + +static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PDEPD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PEXTD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a) +{ + TCGv_i64 rl, rh, src1, src2; + int dw; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + rh = tcg_temp_new_i64(); + rl = tcg_temp_new_i64(); + src1 = tcg_temp_new_i64(); + src2 = tcg_temp_new_i64(); + + get_avr64(rl, a->rc, false); + get_avr64(rh, a->rc, true); + + for (dw = 0; dw < 2; dw++) { + get_avr64(src1, a->vra, dw); + get_avr64(src2, a->vrb, dw); + tcg_gen_mulu2_i64(src1, src2, src1, src2); + tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2); + } + + set_avr64(a->vrt, rl, false); + set_avr64(a->vrt, rh, true); + + tcg_temp_free_i64(rl); + tcg_temp_free_i64(rh); + tcg_temp_free_i64(src1); + tcg_temp_free_i64(src2); + + return true; +} + +static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a) +{ + TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tmp0 = tcg_temp_new_i64(); + tmp1 = tcg_temp_new_i64(); + prod1h = tcg_temp_new_i64(); + prod1l = tcg_temp_new_i64(); + prod0h = tcg_temp_new_i64(); + prod0l = tcg_temp_new_i64(); + zero = tcg_constant_i64(0); + + /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */ + get_avr64(tmp0, a->vra, false); + get_avr64(tmp1, a->vrb, false); + tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1); + + /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */ + get_avr64(tmp0, a->vra, true); + get_avr64(tmp1, a->vrb, true); + tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1); + + /* Sum lower 64-bits elements */ + get_avr64(tmp1, a->rc, false); + tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero); + tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero); + + /* + * Discard lower 64-bits, leaving the carry into bit 64. + * Then sum the higher 64-bit elements. + */ + get_avr64(tmp1, a->rc, true); + tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero); + tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero); + tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero); + + /* Discard 64 more bits to complete the CHOP128(temp >> 128) */ + set_avr64(a->vrt, tmp0, false); + set_avr64(a->vrt, zero, true); + + tcg_temp_free_i64(tmp0); + tcg_temp_free_i64(tmp1); + tcg_temp_free_i64(prod1h); + tcg_temp_free_i64(prod1l); + tcg_temp_free_i64(prod0h); + tcg_temp_free_i64(prod0l); + + return true; +} + +static bool do_vx_helper(DisasContext *ctx, arg_VX *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr ra, rb, rd; + REQUIRE_VECTOR(ctx); + + ra = gen_avr_ptr(a->vra); + rb = gen_avr_ptr(a->vrb); + rd = gen_avr_ptr(a->vrt); + gen_helper(rd, ra, rb); + tcg_temp_free_ptr(ra); + tcg_temp_free_ptr(rb); + tcg_temp_free_ptr(rd); + + return true; +} + +TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM) + +TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD) + +TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ) +TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM) + +static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_not_vec(vece, a, a); + tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b); + tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1)); +} + +static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_not_i32(a, a); + tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b); +} + +static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b); + tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1)); +} + +static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b); +} + +static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_cmp_vec, 0 + }; + + static const GVecGen3 op[] = { + { + .fniv = gen_VSUBCUW_vec, + .fni4 = gen_VSUBCUW_i32, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_VADDCUW_vec, + .fni4 = gen_VADDCUW_i32, + .opt_opc = vecop_list, + .vece = MO_32 + }, + }; + + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &op[add]); + + return true; +} + +TRANS(VSUBCUW, do_vx_vaddsubcuw, 0) +TRANS(VADDCUW, do_vx_vaddsubcuw, 1) + +static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even, + void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 vra, vrb, vrt0, vrt1; + REQUIRE_VECTOR(ctx); + + vra = tcg_temp_new_i64(); + vrb = tcg_temp_new_i64(); + vrt0 = tcg_temp_new_i64(); + vrt1 = tcg_temp_new_i64(); + + get_avr64(vra, a->vra, even); + get_avr64(vrb, a->vrb, even); + gen_mul(vrt0, vrt1, vra, vrb); + set_avr64(a->vrt, vrt0, false); + set_avr64(a->vrt, vrt1, true); + + tcg_temp_free_i64(vra); + tcg_temp_free_i64(vrb); + tcg_temp_free_i64(vrt0); + tcg_temp_free_i64(vrt1); + + return true; +} + +static bool trans_VMULLD(DisasContext *ctx, arg_VX *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16); + + return true; +} + +TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB) +TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB) +TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB) +TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB) +TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH) +TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH) +TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH) +TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH) +TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW) +TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW) +TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW) +TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW) +TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64) +TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64) +TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64) +TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64) + +static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) +{ + TCGv_i64 hh, lh, temp; + + hh = tcg_temp_new_i64(); + lh = tcg_temp_new_i64(); + temp = tcg_temp_new_i64(); + + if (sign) { + tcg_gen_ext32s_i64(lh, a); + tcg_gen_ext32s_i64(temp, b); + } else { + tcg_gen_ext32u_i64(lh, a); + tcg_gen_ext32u_i64(temp, b); + } + tcg_gen_mul_i64(lh, lh, temp); + + if (sign) { + tcg_gen_sari_i64(hh, a, 32); + tcg_gen_sari_i64(temp, b, 32); + } else { + tcg_gen_shri_i64(hh, a, 32); + tcg_gen_shri_i64(temp, b, 32); + } + tcg_gen_mul_i64(hh, hh, temp); + + tcg_gen_shri_i64(lh, lh, 32); + tcg_gen_deposit_i64(t, hh, lh, 0, 32); + + tcg_temp_free_i64(hh); + tcg_temp_free_i64(lh); + tcg_temp_free_i64(temp); +} + +static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) +{ + TCGv_i64 tlow; + + tlow = tcg_temp_new_i64(); + if (sign) { + tcg_gen_muls2_i64(tlow, t, a, b); + } else { + tcg_gen_mulu2_i64(tlow, t, a, b); + } + + tcg_temp_free_i64(tlow); +} + +static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign, + void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + TCGv_i64 vra, vrb, vrt; + int i; + + vra = tcg_temp_new_i64(); + vrb = tcg_temp_new_i64(); + vrt = tcg_temp_new_i64(); + + for (i = 0; i < 2; i++) { + get_avr64(vra, a->vra, i); + get_avr64(vrb, a->vrb, i); + get_avr64(vrt, a->vrt, i); + + func(vrt, vra, vrb, sign); + + set_avr64(a->vrt, vrt, i); + } + + tcg_temp_free_i64(vra); + tcg_temp_free_i64(vrb); + tcg_temp_free_i64(vrt); + + return true; + +} + +TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64) +TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64) +TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64) +TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64) + +static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + void (*gen_shr_vec)(unsigned, TCGv_vec, TCGv_vec, int64_t)) +{ + TCGv_vec tmp = tcg_temp_new_vec_matching(t); + tcg_gen_or_vec(vece, tmp, a, b); + tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1)); + gen_shr_vec(vece, a, a, 1); + gen_shr_vec(vece, b, b, 1); + tcg_gen_add_vec(vece, t, a, b); + tcg_gen_add_vec(vece, t, t, tmp); + tcg_temp_free_vec(tmp); +} + +QEMU_FLATTEN +static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_shri_vec); +} + +QEMU_FLATTEN +static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_sari_vec); +} + +static bool do_vx_vavg(DisasContext *ctx, arg_VX *a, int sign, int vece) +{ + static const TCGOpcode vecop_list_s[] = { + INDEX_op_add_vec, INDEX_op_sari_vec, 0 + }; + static const TCGOpcode vecop_list_u[] = { + INDEX_op_add_vec, INDEX_op_shri_vec, 0 + }; + + static const GVecGen3 op[2][3] = { + { + { + .fniv = gen_vavgu, + .fno = gen_helper_VAVGUB, + .opt_opc = vecop_list_u, + .vece = MO_8 + }, + { + .fniv = gen_vavgu, + .fno = gen_helper_VAVGUH, + .opt_opc = vecop_list_u, + .vece = MO_16 + }, + { + .fniv = gen_vavgu, + .fno = gen_helper_VAVGUW, + .opt_opc = vecop_list_u, + .vece = MO_32 + }, + }, + { + { + .fniv = gen_vavgs, + .fno = gen_helper_VAVGSB, + .opt_opc = vecop_list_s, + .vece = MO_8 + }, + { + .fniv = gen_vavgs, + .fno = gen_helper_VAVGSH, + .opt_opc = vecop_list_s, + .vece = MO_16 + }, + { + .fniv = gen_vavgs, + .fno = gen_helper_VAVGSW, + .opt_opc = vecop_list_s, + .vece = MO_32 + }, + }, + }; + + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &op[sign][vece]); + + + return true; +} + + +TRANS_FLAGS(ALTIVEC, VAVGSB, do_vx_vavg, 1, MO_8) +TRANS_FLAGS(ALTIVEC, VAVGSH, do_vx_vavg, 1, MO_16) +TRANS_FLAGS(ALTIVEC, VAVGSW, do_vx_vavg, 1, MO_32) +TRANS_FLAGS(ALTIVEC, VAVGUB, do_vx_vavg, 0, MO_8) +TRANS_FLAGS(ALTIVEC, VAVGUH, do_vx_vavg, 0, MO_16) +TRANS_FLAGS(ALTIVEC, VAVGUW, do_vx_vavg, 0, MO_32) + +static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_umax_vec(vece, t, a, b); + tcg_gen_umin_vec(vece, a, a, b); + tcg_gen_sub_vec(vece, t, t, a); +} + +static bool do_vabsdu(DisasContext *ctx, arg_VX *a, const int vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0 + }; + + static const GVecGen3 op[] = { + { + .fniv = gen_vabsdu, + .fno = gen_helper_VABSDUB, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vabsdu, + .fno = gen_helper_VABSDUH, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vabsdu, + .fno = gen_helper_VABSDUW, + .opt_opc = vecop_list, + .vece = MO_32 + }, + }; + + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &op[vece]); + + return true; +} + +TRANS_FLAGS2(ISA300, VABSDUB, do_vabsdu, MO_8) +TRANS_FLAGS2(ISA300, VABSDUH, do_vabsdu, MO_16) +TRANS_FLAGS2(ISA300, VABSDUW, do_vabsdu, MO_32) + +static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece, + void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b), + void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)) +{ + const GVecGen3 op = { + .fni4 = func_32, + .fni8 = func_64, + .vece = vece + }; + + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &op); + + return true; +} + +#define DIVU32(NAME, DIV) \ +static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \ +{ \ + TCGv_i32 zero = tcg_constant_i32(0); \ + TCGv_i32 one = tcg_constant_i32(1); \ + tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b); \ + DIV(t, a, b); \ +} + +#define DIVS32(NAME, DIV) \ +static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \ +{ \ + TCGv_i32 t0 = tcg_temp_new_i32(); \ + TCGv_i32 t1 = tcg_temp_new_i32(); \ + tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN); \ + tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1); \ + tcg_gen_and_i32(t0, t0, t1); \ + tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0); \ + tcg_gen_or_i32(t0, t0, t1); \ + tcg_gen_movi_i32(t1, 0); \ + tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \ + DIV(t, a, b); \ + tcg_temp_free_i32(t0); \ + tcg_temp_free_i32(t1); \ +} + +#define DIVU64(NAME, DIV) \ +static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \ +{ \ + TCGv_i64 zero = tcg_constant_i64(0); \ + TCGv_i64 one = tcg_constant_i64(1); \ + tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b); \ + DIV(t, a, b); \ +} + +#define DIVS64(NAME, DIV) \ +static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \ +{ \ + TCGv_i64 t0 = tcg_temp_new_i64(); \ + TCGv_i64 t1 = tcg_temp_new_i64(); \ + tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN); \ + tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1); \ + tcg_gen_and_i64(t0, t0, t1); \ + tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0); \ + tcg_gen_or_i64(t0, t0, t1); \ + tcg_gen_movi_i64(t1, 0); \ + tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \ + DIV(t, a, b); \ + tcg_temp_free_i64(t0); \ + tcg_temp_free_i64(t1); \ +} + +DIVS32(do_divsw, tcg_gen_div_i32) +DIVU32(do_divuw, tcg_gen_divu_i32) +DIVS64(do_divsd, tcg_gen_div_i64) +DIVU64(do_divud, tcg_gen_divu_i64) + +TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL) +TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL) +TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd) +TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud) +TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ) +TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ) + +static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i64 val1, val2; + + val1 = tcg_temp_new_i64(); + val2 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(val1, a); + tcg_gen_ext_i32_i64(val2, b); + + /* (a << 32)/b */ + tcg_gen_shli_i64(val1, val1, 32); + tcg_gen_div_i64(val1, val1, val2); + + /* if quotient doesn't fit in 32 bits the result is undefined */ + tcg_gen_extrl_i64_i32(t, val1); + + tcg_temp_free_i64(val1); + tcg_temp_free_i64(val2); +} + +static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i64 val1, val2; + + val1 = tcg_temp_new_i64(); + val2 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(val1, a); + tcg_gen_extu_i32_i64(val2, b); + + /* (a << 32)/b */ + tcg_gen_shli_i64(val1, val1, 32); + tcg_gen_divu_i64(val1, val1, val2); + + /* if quotient doesn't fit in 32 bits the result is undefined */ + tcg_gen_extrl_i64_i32(t, val1); + + tcg_temp_free_i64(val1); + tcg_temp_free_i64(val2); +} + +DIVS32(do_divesw, do_dives_i32) +DIVU32(do_diveuw, do_diveu_i32) + +DIVS32(do_modsw, tcg_gen_rem_i32) +DIVU32(do_moduw, tcg_gen_remu_i32) +DIVS64(do_modsd, tcg_gen_rem_i64) +DIVU64(do_modud, tcg_gen_remu_i64) + +TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL) +TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL) +TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD) +TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD) +TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ) +TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ) + +TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL) +TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL) +TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd) +TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud) +TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ) +TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ) + +#undef DIVS32 +#undef DIVU32 +#undef DIVS64 +#undef DIVU64 + #undef GEN_VR_LDX #undef GEN_VR_STX #undef GEN_VR_LVE diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index f3f4855111..33fec8aca4 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -83,12 +83,6 @@ GEN_VXFORM(vminsb, 1, 12), GEN_VXFORM(vminsh, 1, 13), GEN_VXFORM(vminsw, 1, 14), GEN_VXFORM_207(vminsd, 1, 15), -GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM(vavgsb, 1, 20), -GEN_VXFORM(vavgsh, 1, 21), -GEN_VXFORM(vavgsw, 1, 22), GEN_VXFORM(vmrghb, 6, 0), GEN_VXFORM(vmrghh, 6, 1), GEN_VXFORM(vmrghw, 6, 2), @@ -101,43 +95,13 @@ GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_300(vextubrx, 6, 28), GEN_VXFORM_300(vextuhrx, 6, 29), GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM(vmuloub, 4, 0), -GEN_VXFORM(vmulouh, 4, 1), -GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM(vmulosb, 4, 4), -GEN_VXFORM(vmulosh, 4, 5), -GEN_VXFORM_207(vmulosw, 4, 6), -GEN_VXFORM_310(vmulld, 4, 7), -GEN_VXFORM(vmuleub, 4, 8), -GEN_VXFORM(vmuleuh, 4, 9), -GEN_VXFORM_DUAL(vmuleuw, vmulhuw, 4, 10, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_310(vmulhud, 4, 11), -GEN_VXFORM(vmulesb, 4, 12), -GEN_VXFORM(vmulesh, 4, 13), -GEN_VXFORM_DUAL(vmulesw, vmulhsw, 4, 14, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_310(vmulhsd, 4, 15), -GEN_VXFORM(vslb, 2, 4), -GEN_VXFORM(vslh, 2, 5), -GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_207(vsld, 2, 23), -GEN_VXFORM(vsrb, 2, 8), -GEN_VXFORM(vsrh, 2, 9), -GEN_VXFORM(vsrw, 2, 10), -GEN_VXFORM_207(vsrd, 2, 27), -GEN_VXFORM(vsrab, 2, 12), -GEN_VXFORM(vsrah, 2, 13), -GEN_VXFORM(vsraw, 2, 14), -GEN_VXFORM_207(vsrad, 2, 15), +GEN_VXFORM_207(vmuluwm, 4, 2), GEN_VXFORM_300(vsrv, 2, 28), GEN_VXFORM_300(vslv, 2, 29), GEN_VXFORM(vslo, 6, 16), GEN_VXFORM(vsro, 6, 17), -GEN_VXFORM(vaddcuw, 0, 6), -GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(xpnd04_1, 0, 22), GEN_VXFORM_300(bcdsr, 0, 23), GEN_VXFORM_300(bcdsr, 0, 31), GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE), @@ -152,17 +116,9 @@ GEN_VXFORM(vsubuws, 0, 26), GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM(vsubshs, 0, 29), GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_207(vadduqm, 0, 4), -GEN_VXFORM_207(vaddcuq, 0, 5), -GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM(vrlb, 2, 0), -GEN_VXFORM(vrlh, 2, 1), -GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_300(bcdtrunc, 0, 20), +GEN_VXFORM_300(bcdutrunc, 0, 21), +GEN_VXFORM(vsl, 2, 7), GEN_VXFORM(vsr, 2, 11), GEN_VXFORM(vpkuhum, 7, 0), GEN_VXFORM(vpkuwum, 7, 1), @@ -198,22 +154,10 @@ GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \ GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) -GEN_VXRFORM_300(vcmpnezb, 3, 4) -GEN_VXRFORM_300(vcmpnezh, 3, 5) -GEN_VXRFORM_300(vcmpnezw, 3, 6) -GEN_VXRFORM(vcmpgtsb, 3, 12) -GEN_VXRFORM(vcmpgtsh, 3, 13) -GEN_VXRFORM(vcmpgtsw, 3, 14) -GEN_VXRFORM(vcmpgtub, 3, 8) -GEN_VXRFORM(vcmpgtuh, 3, 9) -GEN_VXRFORM(vcmpgtuw, 3, 10) -GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM(vcmpeqfp, 3, 3) GEN_VXRFORM(vcmpgefp, 3, 7) -GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE) -GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE) -GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE) -GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE) -GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM(vcmpgtfp, 3, 11) +GEN_VXRFORM(vcmpbfp, 3, 15) #define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \ @@ -225,27 +169,15 @@ GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000, GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000), -GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000), -GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06), -GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07), -GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10), -GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11), -GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18), -GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19), -GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A), +GEN_VXFORM(vspltisb, 6, 12), +GEN_VXFORM(vspltish, 6, 13), +GEN_VXFORM(vspltisw, 6, 14), GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C), GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D), GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E), GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F), GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0), GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1), -GEN_VXFORM_300(vpermr, 0x1D, 0xFF), #define GEN_VXFORM_NOA(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC) @@ -273,14 +205,8 @@ GEN_VXFORM_UIMM(vcfsx, 5, 13), GEN_VXFORM_UIMM(vctuxs, 5, 14), GEN_VXFORM_UIMM(vctsxs, 5, 15), - #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC) -GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16), -GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18), -GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19), -GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20), -GEN_VAFORM_PAIRED(vsel, vperm, 21), GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23), GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207), @@ -294,7 +220,6 @@ GEN_VXFORM_207(vgbbd, 6, 20), GEN_VXFORM_207(vpmsumb, 4, 16), GEN_VXFORM_207(vpmsumh, 4, 17), GEN_VXFORM_207(vpmsumw, 4, 18), -GEN_VXFORM_207(vpmsumd, 4, 19), GEN_VXFORM_207(vsbox, 4, 23), diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 57a7f73bba..4deb29ee42 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -1,23 +1,13 @@ /*** VSX extension ***/ -static inline void get_cpu_vsrh(TCGv_i64 dst, int n) +static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true)); + tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high)); } -static inline void get_cpu_vsrl(TCGv_i64 dst, int n) +static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false)); -} - -static inline void set_cpu_vsrh(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true)); -} - -static inline void set_cpu_vsrl(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false)); + tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high)); } static inline TCGv_ptr gen_vsr_ptr(int reg) @@ -27,6 +17,13 @@ static inline TCGv_ptr gen_vsr_ptr(int reg) return r; } +static inline TCGv_ptr gen_acc_ptr(int reg) +{ + TCGv_ptr r = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(r, cpu_env, acc_full_offset(reg)); + return r; +} + #define VSX_LOAD_SCALAR(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ @@ -41,7 +38,7 @@ static void gen_##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##operation(ctx, t0, EA); \ - set_cpu_vsrh(xT(ctx->opcode), t0); \ + set_cpu_vsr(xT(ctx->opcode), t0, true); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -67,10 +64,10 @@ static void gen_lxvd2x(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_addi_tl(EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free(EA); tcg_temp_free_i64(t0); } @@ -95,22 +92,22 @@ static void gen_lxvw4x(DisasContext *ctx) TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); tcg_gen_shri_i64(t1, t0, 32); tcg_gen_deposit_i64(xth, t1, t0, 32, 32); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); tcg_gen_shri_i64(t1, t0, 32); tcg_gen_deposit_i64(xtl, t1, t0, 32, 32); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } else { - tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -162,8 +159,8 @@ static void gen_lxvdsx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); data = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q)); - tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); + tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ)); + tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); tcg_temp_free(EA); tcg_temp_free_i64(data); @@ -227,14 +224,14 @@ static void gen_lxvh8x(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); if (ctx->le_mode) { gen_bswap16x8(xth, xtl, xth, xtl); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -255,124 +252,16 @@ static void gen_lxvb16x(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); } -#define VSX_VECTOR_LOAD(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrl(xt, xtl); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrh(xt, xth); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrh(xt, xth); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrl(xt, xtl); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_LOAD(lxv, ld_i64, 0) -VSX_VECTOR_LOAD(lxvx, ld_i64, 1) - -#define VSX_VECTOR_STORE(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, xt); \ - get_cpu_vsrl(xtl, xt); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_STORE(stxv, st_i64, 0) -VSX_VECTOR_STORE(stxvx, st_i64, 1) - #ifdef TARGET_PPC64 #define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ static void gen_##name(DisasContext *ctx) \ @@ -406,30 +295,6 @@ VSX_VECTOR_LOAD_STORE_LENGTH(stxvl) VSX_VECTOR_LOAD_STORE_LENGTH(stxvll) #endif -#define VSX_LOAD_SCALAR_DS(name, operation) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 xth; \ - \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - xth = tcg_temp_new_i64(); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - gen_addr_imm_index(ctx, EA, 0x03); \ - gen_qemu_##operation(ctx, xth, EA); \ - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); \ - /* NOTE: cpu_vsrl is undefined */ \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ -} - -VSX_LOAD_SCALAR_DS(lxsd, ld64_i64) -VSX_LOAD_SCALAR_DS(lxssp, ld32fs) - #define VSX_STORE_SCALAR(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ @@ -443,7 +308,7 @@ static void gen_##name(DisasContext *ctx) \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ - get_cpu_vsrh(t0, xS(ctx->opcode)); \ + get_cpu_vsr(t0, xS(ctx->opcode), true); \ gen_qemu_##operation(ctx, t0, EA); \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -468,10 +333,10 @@ static void gen_stxvd2x(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(EA, EA, 8); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); gen_qemu_st64_i64(ctx, t0, EA); tcg_temp_free(EA); tcg_temp_free_i64(t0); @@ -489,8 +354,8 @@ static void gen_stxvw4x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -500,17 +365,17 @@ static void gen_stxvw4x(DisasContext *ctx) tcg_gen_shri_i64(t0, xsh, 32); tcg_gen_deposit_i64(t1, t0, xsh, 32, 32); - tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_shri_i64(t0, xsl, 32); tcg_gen_deposit_i64(t1, t0, xsl, 32, 32); - tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } else { - tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } tcg_temp_free(EA); tcg_temp_free_i64(xsh); @@ -529,8 +394,8 @@ static void gen_stxvh8x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -539,15 +404,15 @@ static void gen_stxvh8x(DisasContext *ctx) TCGv_i64 outl = tcg_temp_new_i64(); gen_bswap16x8(outh, outl, xsh, xsl); - tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ); tcg_temp_free_i64(outh); tcg_temp_free_i64(outl); } else { - tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } tcg_temp_free(EA); tcg_temp_free_i64(xsh); @@ -566,43 +431,19 @@ static void gen_stxvb16x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); - tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); tcg_temp_free(EA); tcg_temp_free_i64(xsh); tcg_temp_free_i64(xsl); } -#define VSX_STORE_SCALAR_DS(name, operation) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 xth; \ - \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - xth = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - gen_addr_imm_index(ctx, EA, 0x03); \ - gen_qemu_##operation(ctx, xth, EA); \ - /* NOTE: cpu_vsrl is undefined */ \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ -} - -VSX_STORE_SCALAR_DS(stxsd, st64_i64) -VSX_STORE_SCALAR_DS(stxssp, st32fs) - static void gen_mfvsrwz(DisasContext *ctx) { if (xS(ctx->opcode) < 32) { @@ -618,7 +459,7 @@ static void gen_mfvsrwz(DisasContext *ctx) } TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 xsh = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); tcg_gen_ext32u_i64(tmp, xsh); tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); tcg_temp_free_i64(tmp); @@ -642,7 +483,7 @@ static void gen_mtvsrwa(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -664,7 +505,7 @@ static void gen_mtvsrwz(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -685,7 +526,7 @@ static void gen_mfvsrd(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -706,7 +547,7 @@ static void gen_mtvsrd(DisasContext *ctx) } t0 = tcg_temp_new_i64(); tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } @@ -725,7 +566,7 @@ static void gen_mfvsrld(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -751,10 +592,10 @@ static void gen_mtvsrdd(DisasContext *ctx) } else { tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); } - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free_i64(t0); } @@ -776,66 +617,23 @@ static void gen_mtvsrws(DisasContext *ctx) t0 = tcg_temp_new_i64(); tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32, 32); - set_cpu_vsrl(xT(ctx->opcode), t0); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } #endif -static void gen_xxpermdi(DisasContext *ctx) -{ - TCGv_i64 xh, xl; - - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } - - xh = tcg_temp_new_i64(); - xl = tcg_temp_new_i64(); - - if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || - (xT(ctx->opcode) == xB(ctx->opcode)))) { - if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - } - if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - } - - set_cpu_vsrh(xT(ctx->opcode), xh); - set_cpu_vsrl(xT(ctx->opcode), xl); - } else { - if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); - } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); - } - if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); - } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); - } - } - tcg_temp_free_i64(xh); - tcg_temp_free_i64(xl); -} - #define OP_ABS 1 #define OP_NABS 2 #define OP_NEG 3 #define OP_CPSGN 4 #define SGN_MASK_DP 0x8000000000000000ull #define SGN_MASK_SP 0x8000000080000000ull +#define EXP_MASK_DP 0x7FF0000000000000ull +#define EXP_MASK_SP 0x7F8000007F800000ull +#define FRC_MASK_DP (~(SGN_MASK_DP | EXP_MASK_DP)) +#define FRC_MASK_SP (~(SGN_MASK_SP | EXP_MASK_SP)) #define VSX_SCALAR_MOVE(name, op, sgn_mask) \ static void glue(gen_, name)(DisasContext *ctx) \ @@ -847,7 +645,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ xb = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xb, xB(ctx->opcode)); \ + get_cpu_vsr(xb, xB(ctx->opcode), true); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ @@ -864,7 +662,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ case OP_CPSGN: { \ TCGv_i64 xa = tcg_temp_new_i64(); \ - get_cpu_vsrh(xa, xA(ctx->opcode)); \ + get_cpu_vsr(xa, xA(ctx->opcode), true); \ tcg_gen_and_i64(xa, xa, sgm); \ tcg_gen_andc_i64(xb, xb, sgm); \ tcg_gen_or_i64(xb, xb, xa); \ @@ -872,7 +670,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ break; \ } \ } \ - set_cpu_vsrh(xT(ctx->opcode), xb); \ + set_cpu_vsr(xT(ctx->opcode), xb, true); \ + set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ tcg_temp_free_i64(xb); \ tcg_temp_free_i64(sgm); \ } @@ -898,8 +697,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ xbl = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xb); \ - get_cpu_vsrl(xbl, xb); \ + get_cpu_vsr(xbh, xb, true); \ + get_cpu_vsr(xbl, xb, false); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: \ @@ -914,15 +713,15 @@ static void glue(gen_, name)(DisasContext *ctx) \ case OP_CPSGN: \ xah = tcg_temp_new_i64(); \ xa = rA(ctx->opcode) + 32; \ - get_cpu_vsrh(tmp, xa); \ + get_cpu_vsr(tmp, xa, true); \ tcg_gen_and_i64(xah, tmp, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ tcg_gen_or_i64(xbh, xbh, xah); \ tcg_temp_free_i64(xah); \ break; \ } \ - set_cpu_vsrh(xt, xbh); \ - set_cpu_vsrl(xt, xbl); \ + set_cpu_vsr(xt, xbh, true); \ + set_cpu_vsr(xt, xbl, false); \ tcg_temp_free_i64(xbl); \ tcg_temp_free_i64(xbh); \ tcg_temp_free_i64(sgm); \ @@ -934,67 +733,125 @@ VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) -#define VSX_VECTOR_MOVE(name, op, sgn_mask) \ -static void glue(gen_, name)(DisasContext *ctx) \ - { \ - TCGv_i64 xbh, xbl, sgm; \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - xbh = tcg_temp_new_i64(); \ - xbl = tcg_temp_new_i64(); \ - sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xB(ctx->opcode)); \ - get_cpu_vsrl(xbl, xB(ctx->opcode)); \ - tcg_gen_movi_i64(sgm, sgn_mask); \ - switch (op) { \ - case OP_ABS: { \ - tcg_gen_andc_i64(xbh, xbh, sgm); \ - tcg_gen_andc_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_NABS: { \ - tcg_gen_or_i64(xbh, xbh, sgm); \ - tcg_gen_or_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_NEG: { \ - tcg_gen_xor_i64(xbh, xbh, sgm); \ - tcg_gen_xor_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_CPSGN: { \ - TCGv_i64 xah = tcg_temp_new_i64(); \ - TCGv_i64 xal = tcg_temp_new_i64(); \ - get_cpu_vsrh(xah, xA(ctx->opcode)); \ - get_cpu_vsrl(xal, xA(ctx->opcode)); \ - tcg_gen_and_i64(xah, xah, sgm); \ - tcg_gen_and_i64(xal, xal, sgm); \ - tcg_gen_andc_i64(xbh, xbh, sgm); \ - tcg_gen_andc_i64(xbl, xbl, sgm); \ - tcg_gen_or_i64(xbh, xbh, xah); \ - tcg_gen_or_i64(xbl, xbl, xal); \ - tcg_temp_free_i64(xah); \ - tcg_temp_free_i64(xal); \ - break; \ - } \ - } \ - set_cpu_vsrh(xT(ctx->opcode), xbh); \ - set_cpu_vsrl(xT(ctx->opcode), xbl); \ - tcg_temp_free_i64(xbh); \ - tcg_temp_free_i64(xbl); \ - tcg_temp_free_i64(sgm); \ +#define TCG_OP_IMM_i64(FUNC, OP, IMM) \ + static void FUNC(TCGv_i64 t, TCGv_i64 b) \ + { \ + OP(t, b, IMM); \ } -VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) -VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) -VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) -VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) -VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) -VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) -VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) -VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) +TCG_OP_IMM_i64(do_xvabssp_i64, tcg_gen_andi_i64, ~SGN_MASK_SP) +TCG_OP_IMM_i64(do_xvnabssp_i64, tcg_gen_ori_i64, SGN_MASK_SP) +TCG_OP_IMM_i64(do_xvnegsp_i64, tcg_gen_xori_i64, SGN_MASK_SP) +TCG_OP_IMM_i64(do_xvabsdp_i64, tcg_gen_andi_i64, ~SGN_MASK_DP) +TCG_OP_IMM_i64(do_xvnabsdp_i64, tcg_gen_ori_i64, SGN_MASK_DP) +TCG_OP_IMM_i64(do_xvnegdp_i64, tcg_gen_xori_i64, SGN_MASK_DP) +#undef TCG_OP_IMM_i64 + +static void xv_msb_op1(unsigned vece, TCGv_vec t, TCGv_vec b, + void (*tcg_gen_op_vec)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) +{ + uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_op_vec(vece, t, b, tcg_constant_vec_matching(t, vece, msb)); +} + +static void do_xvabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b) +{ + xv_msb_op1(vece, t, b, tcg_gen_andc_vec); +} + +static void do_xvnabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b) +{ + xv_msb_op1(vece, t, b, tcg_gen_or_vec); +} + +static void do_xvneg_vec(unsigned vece, TCGv_vec t, TCGv_vec b) +{ + xv_msb_op1(vece, t, b, tcg_gen_xor_vec); +} + +static bool do_vsx_msb_op(DisasContext *ctx, arg_XX2 *a, unsigned vece, + void (*vec)(unsigned, TCGv_vec, TCGv_vec), + void (*i64)(TCGv_i64, TCGv_i64)) +{ + static const TCGOpcode vecop_list[] = { + 0 + }; + + const GVecGen2 op = { + .fni8 = i64, + .fniv = vec, + .opt_opc = vecop_list, + .vece = vece + }; + + REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb), + 16, 16, &op); + + return true; +} + +TRANS(XVABSDP, do_vsx_msb_op, MO_64, do_xvabs_vec, do_xvabsdp_i64) +TRANS(XVNABSDP, do_vsx_msb_op, MO_64, do_xvnabs_vec, do_xvnabsdp_i64) +TRANS(XVNEGDP, do_vsx_msb_op, MO_64, do_xvneg_vec, do_xvnegdp_i64) +TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64) +TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64) +TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64) + +static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_andi_i64(a, a, SGN_MASK_DP); + tcg_gen_andi_i64(b, b, ~SGN_MASK_DP); + tcg_gen_or_i64(t, a, b); +} + +static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_andi_i64(a, a, SGN_MASK_SP); + tcg_gen_andi_i64(b, b, ~SGN_MASK_SP); + tcg_gen_or_i64(t, a, b); +} + +static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b); +} + +static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + 0 + }; + + static const GVecGen3 op[] = { + { + .fni8 = do_xvcpsgnsp_i64, + .fniv = do_xvcpsgn_vec, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = do_xvcpsgndp_i64, + .fniv = do_xvcpsgn_vec, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa), + vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]); + + return true; +} + +TRANS(XVCPSGNSP, do_xvcpsgn, MO_32) +TRANS(XVCPSGNDP, do_xvcpsgn, MO_64) #define VSX_CMP(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ @@ -1015,7 +872,6 @@ static void gen_##name(DisasContext *ctx) \ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \ tcg_temp_free_i32(ignored); \ } \ - gen_helper_float_check_status(cpu_env); \ tcg_temp_free_ptr(xt); \ tcg_temp_free_ptr(xa); \ tcg_temp_free_ptr(xb); \ @@ -1030,23 +886,48 @@ VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX) VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX) VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX) -static void gen_xscvqpdp(DisasContext *ctx) +static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a) { - TCGv_i32 opc; + TCGv_i32 ro; TCGv_ptr xt, xb; - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } - opc = tcg_const_i32(ctx->opcode); - xt = gen_vsr_ptr(xT(ctx->opcode)); - xb = gen_vsr_ptr(xB(ctx->opcode)); - gen_helper_xscvqpdp(cpu_env, opc, xt, xb); - tcg_temp_free_i32(opc); + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + ro = tcg_const_i32(a->rc); + + xt = gen_avr_ptr(a->rt); + xb = gen_avr_ptr(a->rb); + gen_helper_XSCVQPDP(cpu_env, ro, xt, xb); + tcg_temp_free_i32(ro); tcg_temp_free_ptr(xt); tcg_temp_free_ptr(xb); + + return true; } +static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr xt, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + xt = gen_avr_ptr(a->rt); + xb = gen_avr_ptr(a->rb); + gen_helper(cpu_env, xt, xb); + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xb); + + return true; +} + +TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP) +TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP) +TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ) +TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ) + #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ @@ -1193,9 +1074,10 @@ static void gen_##name(DisasContext *ctx) \ } \ t0 = tcg_temp_new_i64(); \ t1 = tcg_temp_new_i64(); \ - get_cpu_vsrh(t0, xB(ctx->opcode)); \ + get_cpu_vsr(t0, xB(ctx->opcode), true); \ gen_helper_##name(t1, cpu_env, t0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ + set_cpu_vsr(xT(ctx->opcode), t1, true); \ + set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ tcg_temp_free_i64(t0); \ tcg_temp_free_i64(t1); \ } @@ -1212,10 +1094,6 @@ GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) -GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300) -GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300) -GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300) -GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300) GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300) GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300) GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX) @@ -1224,10 +1102,6 @@ GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX) -GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300) -GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300) -GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300) -GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300) @@ -1239,7 +1113,213 @@ GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300) GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) -GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207) + +/* test if +Inf */ +static void gen_is_pos_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP; + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, exp_msk)); +} + +/* test if -Inf */ +static void gen_is_neg_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP; + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, sgn_msk | exp_msk)); +} + +/* test if +Inf or -Inf */ +static void gen_is_any_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP; + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk)); + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, exp_msk)); +} + +/* test if +0 */ +static void gen_is_pos_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, 0)); +} + +/* test if -0 */ +static void gen_is_neg_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, sgn_msk)); +} + +/* test if +0 or -0 */ +static void gen_is_any_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk)); + tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b, + tcg_constant_vec_matching(t, vece, 0)); +} + +/* test if +Denormal */ +static void gen_is_pos_denormal(unsigned vece, TCGv_vec t, + TCGv_vec b, int64_t v) +{ + uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP; + tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b, + tcg_constant_vec_matching(t, vece, frc_msk)); + tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b, + tcg_constant_vec_matching(t, vece, 0)); + tcg_gen_and_vec(vece, t, t, b); +} + +/* test if -Denormal */ +static void gen_is_neg_denormal(unsigned vece, TCGv_vec t, + TCGv_vec b, int64_t v) +{ + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP; + tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b, + tcg_constant_vec_matching(t, vece, sgn_msk | frc_msk)); + tcg_gen_cmp_vec(TCG_COND_GTU, vece, b, b, + tcg_constant_vec_matching(t, vece, sgn_msk)); + tcg_gen_and_vec(vece, t, t, b); +} + +/* test if +Denormal or -Denormal */ +static void gen_is_any_denormal(unsigned vece, TCGv_vec t, + TCGv_vec b, int64_t v) +{ + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP; + tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk)); + tcg_gen_cmp_vec(TCG_COND_LE, vece, t, b, + tcg_constant_vec_matching(t, vece, frc_msk)); + tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b, + tcg_constant_vec_matching(t, vece, 0)); + tcg_gen_and_vec(vece, t, t, b); +} + +/* test if NaN */ +static void gen_is_nan(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v) +{ + uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP; + uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_and_vec(vece, b, b, tcg_constant_vec_matching(t, vece, ~sgn_msk)); + tcg_gen_cmp_vec(TCG_COND_GT, vece, t, b, + tcg_constant_vec_matching(t, vece, exp_msk)); +} + +static bool do_xvtstdc(DisasContext *ctx, arg_XX2_uim *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_cmp_vec, 0 + }; + + GVecGen2i op = { + .fnoi = (vece == MO_32) ? gen_helper_XVTSTDCSP : gen_helper_XVTSTDCDP, + .vece = vece, + .opt_opc = vecop_list + }; + + REQUIRE_VSX(ctx); + + switch (a->uim) { + case 0: + set_cpu_vsr(a->xt, tcg_constant_i64(0), true); + set_cpu_vsr(a->xt, tcg_constant_i64(0), false); + return true; + case ((1 << 0) | (1 << 1)): + /* test if +Denormal or -Denormal */ + op.fniv = gen_is_any_denormal; + break; + case (1 << 0): + /* test if -Denormal */ + op.fniv = gen_is_neg_denormal; + break; + case (1 << 1): + /* test if +Denormal */ + op.fniv = gen_is_pos_denormal; + break; + case ((1 << 2) | (1 << 3)): + /* test if +0 or -0 */ + op.fniv = gen_is_any_zero; + break; + case (1 << 2): + /* test if -0 */ + op.fniv = gen_is_neg_zero; + break; + case (1 << 3): + /* test if +0 */ + op.fniv = gen_is_pos_zero; + break; + case ((1 << 4) | (1 << 5)): + /* test if +Inf or -Inf */ + op.fniv = gen_is_any_inf; + break; + case (1 << 4): + /* test if -Inf */ + op.fniv = gen_is_neg_inf; + break; + case (1 << 5): + /* test if +Inf */ + op.fniv = gen_is_pos_inf; + break; + case (1 << 6): + /* test if NaN */ + op.fniv = gen_is_nan; + break; + } + tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb), + 16, 16, a->uim, &op); + + return true; +} + +TRANS_FLAGS2(VSX, XVTSTDCSP, do_xvtstdc, MO_32) +TRANS_FLAGS2(VSX, XVTSTDCDP, do_xvtstdc, MO_64) + +static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr, + void (*gen_helper)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_ptr)) +{ + TCGv_ptr xb; + + REQUIRE_VSX(ctx); + xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb); + gen_helper(cpu_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb); + tcg_temp_free_ptr(xb); + + return true; +} + +TRANS_FLAGS2(ISA300, XSTSTDCSP, do_XX2_bf_uim, true, gen_helper_XSTSTDCSP) +TRANS_FLAGS2(ISA300, XSTSTDCDP, do_XX2_bf_uim, true, gen_helper_XSTSTDCDP) +TRANS_FLAGS2(ISA300, XSTSTDCQP, do_XX2_bf_uim, false, gen_helper_XSTSTDCQP) + +bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a) +{ + TCGv_i64 tmp; + + REQUIRE_INSNS_FLAGS2(ctx, VSX207); + REQUIRE_VSX(ctx); + + tmp = tcg_temp_new_i64(); + get_cpu_vsr(tmp, a->xb, true); + + gen_helper_XSCVSPDPN(tmp, tmp); + + set_cpu_vsr(a->xt, tmp, true); + set_cpu_vsr(a->xt, tcg_constant_i64(0), false); + + tcg_temp_free_i64(tmp); + + return true; +} + GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX) @@ -1266,9 +1346,6 @@ GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) -GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300) -GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300) -GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) @@ -1323,49 +1400,248 @@ GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX) -GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX) -GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX) -GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300) -GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300) + +static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a) +{ + TCGv_ptr xt, xa, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + xt = gen_vsr_ptr(a->xt); + xa = gen_vsr_ptr(a->xa); + xb = gen_vsr_ptr(a->xb); + + gen_helper_VPERM(xt, xa, xt, xb); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xa); + tcg_temp_free_ptr(xb); + + return true; +} + +static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a) +{ + TCGv_ptr xt, xa, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + xt = gen_vsr_ptr(a->xt); + xa = gen_vsr_ptr(a->xa); + xb = gen_vsr_ptr(a->xb); + + gen_helper_VPERMR(xt, xa, xt, xb); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xa); + tcg_temp_free_ptr(xb); + + return true; +} + +static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) +{ + TCGv_i64 t0, t1; + + REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); + + t0 = tcg_temp_new_i64(); + + if (unlikely(a->xt == a->xa || a->xt == a->xb)) { + t1 = tcg_temp_new_i64(); + + get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); + get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0); + + set_cpu_vsr(a->xt, t0, true); + set_cpu_vsr(a->xt, t1, false); + + tcg_temp_free_i64(t1); + } else { + get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); + set_cpu_vsr(a->xt, t0, true); + + get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0); + set_cpu_vsr(a->xt, t0, false); + } + + tcg_temp_free_i64(t0); + + return true; +} + +static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a) +{ + TCGv_ptr xt, xa, xb, xc; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + xt = gen_vsr_ptr(a->xt); + xa = gen_vsr_ptr(a->xa); + xb = gen_vsr_ptr(a->xb); + xc = gen_vsr_ptr(a->xc); + + gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3)); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xa); + tcg_temp_free_ptr(xb); + tcg_temp_free_ptr(xc); + + return true; +} + +typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr); + +static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a, + const xxgenpcv_genfn fn[4]) +{ + TCGv_ptr xt, vrb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (a->imm & ~0x3) { + gen_invalid(ctx); + return true; + } + + xt = gen_vsr_ptr(a->xt); + vrb = gen_avr_ptr(a->vrb); + + fn[a->imm](xt, vrb); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(vrb); + + return true; +} + +#define XXGENPCV(NAME) \ + static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a) \ + { \ + static const xxgenpcv_genfn fn[4] = { \ + gen_helper_##NAME##_be_exp, \ + gen_helper_##NAME##_be_comp, \ + gen_helper_##NAME##_le_exp, \ + gen_helper_##NAME##_le_comp, \ + }; \ + return do_xxgenpcv(ctx, a, fn); \ + } + +XXGENPCV(XXGENPCVBM) +XXGENPCV(XXGENPCVHM) +XXGENPCV(XXGENPCVWM) +XXGENPCV(XXGENPCVDM) +#undef XXGENPCV + +static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr t, s1, s2, s3; + + t = gen_vsr_ptr(tgt); + s1 = gen_vsr_ptr(src1); + s2 = gen_vsr_ptr(src2); + s3 = gen_vsr_ptr(src3); + + gen_helper(cpu_env, t, s1, s2, s3); + + tcg_temp_free_ptr(t); + tcg_temp_free_ptr(s1); + tcg_temp_free_ptr(s2); + tcg_temp_free_ptr(s3); + + return true; +} + +static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + REQUIRE_VSX(ctx); + + if (type_a) { + return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper); + } + return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper); +} + +TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP) +TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP) +TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP) +TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP) +TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP) +TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP) +TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP) +TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP) +TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP) +TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP) +TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP) +TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP) +TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP) +TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP) +TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP) +TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP) + +static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr), + void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + int vrt, vra, vrb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + vrt = a->rt + 32; + vra = a->ra + 32; + vrb = a->rb + 32; + + if (a->rc) { + return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro); + } + + return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper); +} + +TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO) +TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO) +TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO) +TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO) #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ - TCGv_ptr xt, xa, b, c; \ + TCGv_ptr xt, s1, s2, s3; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(xT(ctx->opcode)); \ - xa = gen_vsr_ptr(xA(ctx->opcode)); \ + s1 = gen_vsr_ptr(xA(ctx->opcode)); \ if (ctx->opcode & PPC_BIT32(25)) { \ /* \ * AxT + B \ */ \ - b = gen_vsr_ptr(xT(ctx->opcode)); \ - c = gen_vsr_ptr(xB(ctx->opcode)); \ + s2 = gen_vsr_ptr(xB(ctx->opcode)); \ + s3 = gen_vsr_ptr(xT(ctx->opcode)); \ } else { \ /* \ * AxB + T \ */ \ - b = gen_vsr_ptr(xB(ctx->opcode)); \ - c = gen_vsr_ptr(xT(ctx->opcode)); \ + s2 = gen_vsr_ptr(xT(ctx->opcode)); \ + s3 = gen_vsr_ptr(xB(ctx->opcode)); \ } \ - gen_helper_##name(cpu_env, xt, xa, b, c); \ + gen_helper_##name(cpu_env, xt, s1, s2, s3); \ tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(b); \ - tcg_temp_free_ptr(c); \ + tcg_temp_free_ptr(s1); \ + tcg_temp_free_ptr(s2); \ + tcg_temp_free_ptr(s3); \ } -GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX) -GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX) -GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX) -GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX) -GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207) -GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207) -GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207) -GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207) GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX) @@ -1390,13 +1666,13 @@ static void gen_xxbrd(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_bswap64_i64(xth, xbh); tcg_gen_bswap64_i64(xtl, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1419,12 +1695,12 @@ static void gen_xxbrh(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap16x8(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1448,15 +1724,15 @@ static void gen_xxbrq(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_bswap64_i64(t0, xbl); tcg_gen_bswap64_i64(xtl, xbh); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_gen_mov_i64(xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1480,12 +1756,12 @@ static void gen_xxbrw(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap32x4(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1527,23 +1803,16 @@ static void glue(gen_, name)(DisasContext *ctx) \ b0 = tcg_temp_new_i64(); \ b1 = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - if (high) { \ - get_cpu_vsrh(a0, xA(ctx->opcode)); \ - get_cpu_vsrh(a1, xA(ctx->opcode)); \ - get_cpu_vsrh(b0, xB(ctx->opcode)); \ - get_cpu_vsrh(b1, xB(ctx->opcode)); \ - } else { \ - get_cpu_vsrl(a0, xA(ctx->opcode)); \ - get_cpu_vsrl(a1, xA(ctx->opcode)); \ - get_cpu_vsrl(b0, xB(ctx->opcode)); \ - get_cpu_vsrl(b1, xB(ctx->opcode)); \ - } \ + get_cpu_vsr(a0, xA(ctx->opcode), high); \ + get_cpu_vsr(a1, xA(ctx->opcode), high); \ + get_cpu_vsr(b0, xB(ctx->opcode), high); \ + get_cpu_vsr(b1, xB(ctx->opcode), high); \ tcg_gen_shri_i64(a0, a0, 32); \ tcg_gen_shri_i64(b0, b0, 32); \ tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \ - set_cpu_vsrh(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, true); \ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ - set_cpu_vsrl(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, false); \ tcg_temp_free_i64(a0); \ tcg_temp_free_i64(a1); \ tcg_temp_free_i64(b0); \ @@ -1554,62 +1823,165 @@ static void glue(gen_, name)(DisasContext *ctx) \ VSX_XXMRG(xxmrghw, 1) VSX_XXMRG(xxmrglw, 0) -static void gen_xxsel(DisasContext *ctx) +static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a) { - int rt = xT(ctx->opcode); - int ra = xA(ctx->opcode); - int rb = xB(ctx->opcode); - int rc = xC(ctx->opcode); + REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } - tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(rt), vsr_full_offset(rc), - vsr_full_offset(rb), vsr_full_offset(ra), 16, 16); + tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc), + vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16); + + return true; } -static void gen_xxspltw(DisasContext *ctx) +static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a) { - int rt = xT(ctx->opcode); - int rb = xB(ctx->opcode); - int uim = UIM(ctx->opcode); int tofs, bofs; - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } + REQUIRE_VSX(ctx); - tofs = vsr_full_offset(rt); - bofs = vsr_full_offset(rb); - bofs += uim << MO_32; -#ifndef HOST_WORDS_BIG_ENDIAN + tofs = vsr_full_offset(a->xt); + bofs = vsr_full_offset(a->xb); + bofs += a->uim << MO_32; +#if !HOST_BIG_ENDIAN bofs ^= 8 | 4; #endif tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16); + return true; } #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) -static void gen_xxspltib(DisasContext *ctx) +static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a) { - uint8_t uim8 = IMM8(ctx->opcode); - int rt = xT(ctx->opcode); - - if (rt < 32) { - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } + if (a->xt < 32) { + REQUIRE_VSX(ctx); } else { - if (unlikely(!ctx->altivec_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VPU); - return; - } + REQUIRE_VECTOR(ctx); } - tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8); + tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm); + return true; +} + +static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si); + + return true; +} + +static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16, + helper_todouble(a->si)); + return true; +} + +static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a) +{ + TCGv_i32 imm; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + imm = tcg_constant_i32(a->si); + + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix))); + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix))); + + return true; +} + +static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a) +{ + static const uint64_t values[32] = { + 0, /* Unspecified */ + 0x3FFF000000000000llu, /* QP +1.0 */ + 0x4000000000000000llu, /* QP +2.0 */ + 0x4000800000000000llu, /* QP +3.0 */ + 0x4001000000000000llu, /* QP +4.0 */ + 0x4001400000000000llu, /* QP +5.0 */ + 0x4001800000000000llu, /* QP +6.0 */ + 0x4001C00000000000llu, /* QP +7.0 */ + 0x7FFF000000000000llu, /* QP +Inf */ + 0x7FFF800000000000llu, /* QP dQNaN */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0x8000000000000000llu, /* QP -0.0 */ + 0xBFFF000000000000llu, /* QP -1.0 */ + 0xC000000000000000llu, /* QP -2.0 */ + 0xC000800000000000llu, /* QP -3.0 */ + 0xC001000000000000llu, /* QP -4.0 */ + 0xC001400000000000llu, /* QP -5.0 */ + 0xC001800000000000llu, /* QP -6.0 */ + 0xC001C00000000000llu, /* QP -7.0 */ + 0xFFFF000000000000llu, /* QP -Inf */ + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (values[a->uim]) { + set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false); + set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true); + } else { + gen_invalid(ctx); + } + + return true; +} + +static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a) +{ + TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + xb = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + all_true = tcg_temp_new_i64(); + all_false = tcg_temp_new_i64(); + mask = tcg_constant_i64(dup_const(MO_8, 1)); + zero = tcg_constant_i64(0); + + get_cpu_vsr(xb, a->xb, true); + tcg_gen_and_i64(t0, mask, xb); + get_cpu_vsr(xb, a->xb, false); + tcg_gen_and_i64(t1, mask, xb); + + tcg_gen_or_i64(all_false, t0, t1); + tcg_gen_and_i64(all_true, t0, t1); + + tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero); + tcg_gen_shli_i64(all_false, all_false, 1); + tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask); + tcg_gen_shli_i64(all_true, all_true, 3); + + tcg_gen_or_i64(t0, all_false, all_true); + tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0); + + tcg_temp_free_i64(xb); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(all_true); + tcg_temp_free_i64(all_false); + + return true; } static void gen_xxsldwi(DisasContext *ctx) @@ -1624,40 +1996,40 @@ static void gen_xxsldwi(DisasContext *ctx) switch (SHW(ctx->opcode)) { case 0: { - get_cpu_vsrh(xth, xA(ctx->opcode)); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); + get_cpu_vsr(xtl, xA(ctx->opcode), false); break; } case 1: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrh(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrl(t0, xA(ctx->opcode)); + get_cpu_vsr(t0, xA(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xtl, xA(ctx->opcode), false); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); break; } case 2: { - get_cpu_vsrl(xth, xA(ctx->opcode)); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); + get_cpu_vsr(xtl, xB(ctx->opcode), true); break; } case 3: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrl(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xtl, xB(ctx->opcode), true); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrl(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); @@ -1665,49 +2037,42 @@ static void gen_xxsldwi(DisasContext *ctx) } } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); } -#define VSX_EXTRACT_INSERT(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr xt, xb; \ - TCGv_i32 t0; \ - TCGv_i64 t1; \ - uint8_t uimm = UIMM4(ctx->opcode); \ - \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - xt = gen_vsr_ptr(xT(ctx->opcode)); \ - xb = gen_vsr_ptr(xB(ctx->opcode)); \ - t0 = tcg_temp_new_i32(); \ - t1 = tcg_temp_new_i64(); \ - /* \ - * uimm > 15 out of bound and for \ - * uimm > 12 handle as per hardware in helper \ - */ \ - if (uimm > 15) { \ - tcg_gen_movi_i64(t1, 0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ - set_cpu_vsrl(xT(ctx->opcode), t1); \ - return; \ - } \ - tcg_gen_movi_i32(t0, uimm); \ - gen_helper_##name(cpu_env, xt, xb, t0); \ - tcg_temp_free_ptr(xb); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i64(t1); \ +static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32)) +{ + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_ptr xt, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + /* + * uim > 15 out of bound and for + * uim > 12 handle as per hardware in helper + */ + if (a->uim > 15) { + set_cpu_vsr(a->xt, zero, true); + set_cpu_vsr(a->xt, zero, false); + } else { + xt = gen_vsr_ptr(a->xt); + xb = gen_vsr_ptr(a->xb); + gen_helper(xt, xb, tcg_constant_i32(a->uim)); + tcg_temp_free_ptr(xb); + tcg_temp_free_ptr(xt); + } + + return true; } -VSX_EXTRACT_INSERT(xxextractuw) -VSX_EXTRACT_INSERT(xxinsertw) +TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW) +TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW) #ifdef TARGET_PPC64 static void gen_xsxexpdp(DisasContext *ctx) @@ -1719,7 +2084,7 @@ static void gen_xsxexpdp(DisasContext *ctx) return; } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_extract_i64(rt, t0, 52, 11); tcg_temp_free_i64(t0); } @@ -1737,12 +2102,12 @@ static void gen_xsxexpqp(DisasContext *ctx) xth = tcg_temp_new_i64(); xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); tcg_gen_extract_i64(xth, xbh, 48, 15); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_movi_i64(xtl, 0); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(xbh); tcg_temp_free_i64(xth); @@ -1766,8 +2131,8 @@ static void gen_xsiexpdp(DisasContext *ctx) tcg_gen_andi_i64(t0, rb, 0x7FF); tcg_gen_shli_i64(t0, t0, 52); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); - /* dword[1] is undefined */ + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); } @@ -1789,19 +2154,19 @@ static void gen_xsiexpqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xah = tcg_temp_new_i64(); xal = tcg_temp_new_i64(); - get_cpu_vsrh(xah, rA(ctx->opcode) + 32); - get_cpu_vsrl(xal, rA(ctx->opcode) + 32); + get_cpu_vsr(xah, rA(ctx->opcode) + 32, true); + get_cpu_vsr(xal, rA(ctx->opcode) + 32, false); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF); tcg_gen_andi_i64(t0, xbh, 0x7FFF); tcg_gen_shli_i64(t0, t0, 48); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xal); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1826,12 +2191,12 @@ static void gen_xsxsigdp(DisasContext *ctx) zr = tcg_const_i64(0); nan = tcg_const_i64(2047); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_extract_i64(exp, t1, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_deposit_i64(rt, t0, t1, 0, 52); tcg_temp_free_i64(t0); @@ -1857,8 +2222,8 @@ static void gen_xsxsigqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); - get_cpu_vsrl(xbl, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); + get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -1869,9 +2234,9 @@ static void gen_xsxsigqp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 48); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xbl); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -1904,22 +2269,22 @@ static void gen_xviexpsp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbh, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbl, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xtl, xtl, t0); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1949,16 +2314,16 @@ static void gen_xviexpdp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_deposit_i64(xth, xah, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1983,15 +2348,15 @@ static void gen_xvxexpsp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_shri_i64(xth, xbh, 23); tcg_gen_andi_i64(xth, xth, 0xFF000000FF); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_shri_i64(xtl, xbl, 23); tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2014,13 +2379,13 @@ static void gen_xvxexpdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_extract_i64(xth, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(xtl, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2028,7 +2393,23 @@ static void gen_xvxexpdp(DisasContext *ctx) tcg_temp_free_i64(xbl); } -GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300) +static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) +{ + TCGv_ptr t, b; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + t = gen_vsr_ptr(a->xt); + b = gen_vsr_ptr(a->xb); + + gen_helper_XVXSIGSP(t, b); + + tcg_temp_free_ptr(t); + tcg_temp_free_ptr(b); + + return true; +} static void gen_xvxsigdp(DisasContext *ctx) { @@ -2046,8 +2427,8 @@ static void gen_xvxsigdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -2058,14 +2439,14 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 52); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(exp, xbl, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -2077,6 +2458,736 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_temp_free_i64(xbl); } +static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, + int rt, bool store, bool paired) +{ + TCGv ea; + TCGv_i64 xt; + MemOp mop; + int rt1, rt2; + + xt = tcg_temp_new_i64(); + + mop = DEF_MEMOP(MO_UQ); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, ra, displ); + + if (paired && ctx->le_mode) { + rt1 = rt + 1; + rt2 = rt; + } else { + rt1 = rt; + rt2 = rt + 1; + } + + if (store) { + get_cpu_vsr(xt, rt1, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt1, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + } + } else { + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, ctx->le_mode); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, ctx->le_mode); + } + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + return true; +} + +static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired) +{ + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired); +} + +static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a, + bool store, bool paired) +{ + arg_D d; + REQUIRE_VSX(ctx); + + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired); +} + +static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired) +{ + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired); +} + +static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) +{ + TCGv ea; + TCGv_i64 xt; + MemOp mop; + + if (store) { + REQUIRE_VECTOR(ctx); + } else { + REQUIRE_VSX(ctx); + } + + xt = tcg_temp_new_i64(); + mop = DEF_MEMOP(MO_UQ); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, ra, displ); + + if (store) { + get_cpu_vsr(xt, rt + 32, true); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt + 32, xt, true); + set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + + return true; +} + +static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store) +{ + return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store); +} + +static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) +{ + arg_D d; + + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store); +} + +static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) +{ + TCGv ea; + TCGv_i64 xt; + + REQUIRE_VECTOR(ctx); + + xt = tcg_temp_new_i64(); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, ra, displ); + + if (store) { + get_cpu_vsr(xt, rt + 32, true); + gen_qemu_st32fs(ctx, xt, ea); + } else { + gen_qemu_ld32fs(ctx, xt, ea); + set_cpu_vsr(rt + 32, xt, true); + set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + + return true; +} + +static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store) +{ + return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store); +} + +static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) +{ + arg_D d; + + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store); +} + +TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false) +TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true) +TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false) +TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true) +TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false) +TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false) +TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true) +TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true) +TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false) +TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false) +TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true) +TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true) +TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false) +TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true) +TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false) +TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true) +TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false) +TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false) +TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true) +TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true) + +static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store) +{ + TCGv ea; + TCGv_i64 xt; + + REQUIRE_VSX(ctx); + + xt = tcg_temp_new_i64(); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]); + + if (store) { + get_cpu_vsr(xt, a->rt, false); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(a->rt, xt, false); + set_cpu_vsr(a->rt, tcg_constant_i64(0), true); + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + return true; +} + +TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false) +TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false) +TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false) +TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false) +TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true) +TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true) +TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true) +TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true) + +static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, + int64_t imm) +{ + /* + * Instead of processing imm bit-by-bit, we'll skip the computation of + * conjunctions whose corresponding bit is unset. + */ + int bit; + TCGv_i64 conj, disj; + + conj = tcg_temp_new_i64(); + disj = tcg_const_i64(0); + + /* Iterate over set bits from the least to the most significant bit */ + while (imm) { + /* + * Get the next bit to be processed with ctz64. Invert the result of + * ctz64 to match the indexing used by PowerISA. + */ + bit = 7 - ctz64(imm); + if (bit & 0x4) { + tcg_gen_mov_i64(conj, a); + } else { + tcg_gen_not_i64(conj, a); + } + if (bit & 0x2) { + tcg_gen_and_i64(conj, conj, b); + } else { + tcg_gen_andc_i64(conj, conj, b); + } + if (bit & 0x1) { + tcg_gen_and_i64(conj, conj, c); + } else { + tcg_gen_andc_i64(conj, conj, c); + } + tcg_gen_or_i64(disj, disj, conj); + + /* Unset the least significant bit that is set */ + imm &= imm - 1; + } + + tcg_gen_mov_i64(t, disj); + + tcg_temp_free_i64(conj); + tcg_temp_free_i64(disj); +} + +static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + TCGv_vec c, int64_t imm) +{ + /* + * Instead of processing imm bit-by-bit, we'll skip the computation of + * conjunctions whose corresponding bit is unset. + */ + int bit; + TCGv_vec disj, conj; + + disj = tcg_const_zeros_vec_matching(t); + conj = tcg_temp_new_vec_matching(t); + + /* Iterate over set bits from the least to the most significant bit */ + while (imm) { + /* + * Get the next bit to be processed with ctz64. Invert the result of + * ctz64 to match the indexing used by PowerISA. + */ + bit = 7 - ctz64(imm); + if (bit & 0x4) { + tcg_gen_mov_vec(conj, a); + } else { + tcg_gen_not_vec(vece, conj, a); + } + if (bit & 0x2) { + tcg_gen_and_vec(vece, conj, conj, b); + } else { + tcg_gen_andc_vec(vece, conj, conj, b); + } + if (bit & 0x1) { + tcg_gen_and_vec(vece, conj, conj, c); + } else { + tcg_gen_andc_vec(vece, conj, conj, c); + } + tcg_gen_or_vec(vece, disj, disj, conj); + + /* Unset the least significant bit that is set */ + imm &= imm - 1; + } + + tcg_gen_mov_vec(t, disj); + + tcg_temp_free_vec(disj); + tcg_temp_free_vec(conj); +} + +static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_andc_vec, 0 + }; + static const GVecGen4i op = { + .fniv = gen_xxeval_vec, + .fno = gen_helper_XXEVAL, + .fni8 = gen_xxeval_i64, + .opt_opc = vecop_list, + .vece = MO_64 + }; + int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa), + xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc); + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + /* Equivalent functions that can be implemented with a single gen_gvec */ + switch (a->imm) { + case 0b00000000: /* true */ + set_cpu_vsr(a->xt, tcg_constant_i64(0), true); + set_cpu_vsr(a->xt, tcg_constant_i64(0), false); + break; + case 0b00000011: /* and(B,A) */ + tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16); + break; + case 0b00000101: /* and(C,A) */ + tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16); + break; + case 0b00001111: /* A */ + tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16); + break; + case 0b00010001: /* and(C,B) */ + tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16); + break; + case 0b00011011: /* C?B:A */ + tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16); + break; + case 0b00011101: /* B?C:A */ + tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16); + break; + case 0b00100111: /* C?A:B */ + tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16); + break; + case 0b00110011: /* B */ + tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16); + break; + case 0b00110101: /* A?C:B */ + tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16); + break; + case 0b00111100: /* xor(B,A) */ + tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16); + break; + case 0b00111111: /* or(B,A) */ + tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16); + break; + case 0b01000111: /* B?A:C */ + tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16); + break; + case 0b01010011: /* A?B:C */ + tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16); + break; + case 0b01010101: /* C */ + tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16); + break; + case 0b01011010: /* xor(C,A) */ + tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16); + break; + case 0b01011111: /* or(C,A) */ + tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16); + break; + case 0b01100110: /* xor(C,B) */ + tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16); + break; + case 0b01110111: /* or(C,B) */ + tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16); + break; + case 0b10001000: /* nor(C,B) */ + tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16); + break; + case 0b10011001: /* eqv(C,B) */ + tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16); + break; + case 0b10100000: /* nor(C,A) */ + tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16); + break; + case 0b10100101: /* eqv(C,A) */ + tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16); + break; + case 0b10101010: /* not(C) */ + tcg_gen_gvec_not(MO_64, xt, xc, 16, 16); + break; + case 0b11000000: /* nor(B,A) */ + tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16); + break; + case 0b11000011: /* eqv(B,A) */ + tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16); + break; + case 0b11001100: /* not(B) */ + tcg_gen_gvec_not(MO_64, xt, xb, 16, 16); + break; + case 0b11101110: /* nand(C,B) */ + tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16); + break; + case 0b11110000: /* not(A) */ + tcg_gen_gvec_not(MO_64, xt, xa, 16, 16); + break; + case 0b11111010: /* nand(C,A) */ + tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16); + break; + case 0b11111100: /* nand(B,A) */ + tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16); + break; + case 0b11111111: /* true */ + set_cpu_vsr(a->xt, tcg_constant_i64(-1), true); + set_cpu_vsr(a->xt, tcg_constant_i64(-1), false); + break; + default: + /* Fallback to compute all conjunctions/disjunctions */ + tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op); + } + + return true; +} + +static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + TCGv_vec c) +{ + TCGv_vec tmp = tcg_temp_new_vec_matching(c); + tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); + tcg_gen_bitsel_vec(vece, t, tmp, b, a); + tcg_temp_free_vec(tmp); +} + +static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, 0 + }; + static const GVecGen4 ops[4] = { + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVB, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVH, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVW, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVD, + .opt_opc = vecop_list, + .vece = MO_64 + } + }; + + REQUIRE_VSX(ctx); + + tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa), + vsr_full_offset(a->xb), vsr_full_offset(a->xc), + 16, 16, &ops[vece]); + + return true; +} + +TRANS(XXBLENDVB, do_xxblendv, MO_8) +TRANS(XXBLENDVH, do_xxblendv, MO_16) +TRANS(XXBLENDVW, do_xxblendv, MO_32) +TRANS(XXBLENDVD, do_xxblendv, MO_64) + +static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a, + void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr xt, xa, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); + + xt = gen_vsr_ptr(a->xt); + xa = gen_vsr_ptr(a->xa); + xb = gen_vsr_ptr(a->xb); + + helper(cpu_env, xt, xa, xb); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xa); + tcg_temp_free_ptr(xb); + + return true; +} + +TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP) +TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP) +TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP) +TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP) +TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP) +TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP) +TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP) + +static bool do_helper_X(arg_X *a, + void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + TCGv_ptr rt, ra, rb; + + rt = gen_avr_ptr(a->rt); + ra = gen_avr_ptr(a->ra); + rb = gen_avr_ptr(a->rb); + + helper(cpu_env, rt, ra, rb); + + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(ra); + tcg_temp_free_ptr(rb); + + return true; +} + +static bool do_xscmpqp(DisasContext *ctx, arg_X *a, + void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + return do_helper_X(a, helper); +} + +TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP) +TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP) +TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP) +TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP) +TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP) + +static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a) +{ + TCGv_ptr xt, xb; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + xt = gen_vsr_ptr(a->xt); + xb = gen_vsr_ptr(a->xb); + + gen_helper_XVCVSPBF16(cpu_env, xt, xb); + + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xb); + + return true; +} + +static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb), + 16, 16, 16); + + return true; +} + + /* + * The PowerISA 3.1 mentions that for the current version of the + * architecture, "the hardware implementation provides the effect of + * ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data" + * and "The Accumulators introduce no new logical state at this time" + * (page 501). For now it seems unnecessary to create new structures, + * so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore + * move to and from accumulators are no-ops. + */ +static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + return true; +} + +static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + return true; +} + +static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0); + return true; +} + +static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a, + void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32)) +{ + uint32_t mask; + TCGv_ptr xt, xa, xb; + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) { + gen_invalid(ctx); + return true; + } + + xt = gen_acc_ptr(a->xt); + xa = gen_vsr_ptr(a->xa); + xb = gen_vsr_ptr(a->xb); + + mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk); + helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask)); + tcg_temp_free_ptr(xt); + tcg_temp_free_ptr(xa); + tcg_temp_free_ptr(xb); + return true; +} + +TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8) +TRANS(XVI4GER8PP, do_ger, gen_helper_XVI4GER8PP) +TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4) +TRANS(XVI8GER4PP, do_ger, gen_helper_XVI8GER4PP) +TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP) +TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2) +TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP) +TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S) +TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP) + +TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8) +TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP) +TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4) +TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP) +TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP) +TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2) +TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP) +TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S) +TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP) + +TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2) +TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP) +TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN) +TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP) +TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN) + +TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2) +TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP) +TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN) +TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP) +TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN) + +TRANS(XVF32GER, do_ger, gen_helper_XVF32GER) +TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP) +TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN) +TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP) +TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN) + +TRANS(XVF64GER, do_ger, gen_helper_XVF64GER) +TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP) +TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN) +TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP) +TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN) + +TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2) +TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP) +TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN) +TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP) +TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN) + +TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2) +TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP) +TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN) +TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP) +TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN) + +TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER) +TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP) +TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN) +TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP) +TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN) + +TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER) +TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP) +TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN) +TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP) +TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN) + #undef GEN_XX2FORM #undef GEN_XX3FORM #undef GEN_XX2IFORM diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc index 1d41beef26..a3ba094d62 100644 --- a/target/ppc/translate/vsx-ops.c.inc +++ b/target/ppc/translate/vsx-ops.c.inc @@ -10,7 +10,6 @@ GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300), @@ -25,7 +24,6 @@ GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300), @@ -135,7 +133,6 @@ GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001), GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001), GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001), GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001), -GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0), GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001), @@ -150,33 +147,11 @@ GEN_HANDLER_E(xsiexpdp, 0x3C, 0x16, 0x1C, 0, PPC_NONE, PPC2_ISA300), GEN_VSX_XFORM_300(xsiexpqp, 0x4, 0x1B, 0x00000001), #endif -GEN_XX2FORM(xststdcdp, 0x14, 0x16, PPC2_ISA300), -GEN_XX2FORM(xststdcsp, 0x14, 0x12, PPC2_ISA300), -GEN_VSX_XFORM_300(xststdcqp, 0x04, 0x16, 0x00000001), - GEN_XX3FORM(xviexpsp, 0x00, 0x1B, PPC2_ISA300), GEN_XX3FORM(xviexpdp, 0x00, 0x1F, PPC2_ISA300), GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300), GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300), GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300), -GEN_XX2FORM_EO(xvxsigsp, 0x16, 0x1D, 0x09, PPC2_ISA300), - -/* DCMX = bit[25] << 6 | bit[29] << 5 | bit[11:15] */ -#define GEN_XX2FORM_DCMX(name, opc2, opc3, fl2) \ -GEN_XX3FORM(name, opc2, opc3 | 0, fl2), \ -GEN_XX3FORM(name, opc2, opc3 | 1, fl2) - -GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300), -GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300), - -GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX), -GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX), -GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX), -GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX), -GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX), -GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX), -GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX), -GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX), GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX), GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0), @@ -189,18 +164,6 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX), GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX), GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX), GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX), -GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX), -GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX), -GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX), -GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX), -GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX), -GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX), -GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX), -GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX), -GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300), -GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300), -GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300), -GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300), GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300), GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001), GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX), @@ -209,17 +172,12 @@ GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001), GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001), GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX), GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX), -GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300), -GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300), -GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300), -GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300), GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300), GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX), GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207), GEN_XX2FORM_EO(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300), GEN_VSX_XFORM_300_EO(xscvsdqp, 0x04, 0x1A, 0x0A, 0x00000001), GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX), -GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207), GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX), GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX), GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX), @@ -242,14 +200,6 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207), GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207), GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207), GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207), -GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207), -GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207), -GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207), -GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207), -GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207), -GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207), -GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207), -GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207), GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207), GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207), @@ -348,55 +298,4 @@ VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207), VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207), GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX), GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX), -GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300), -GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300), -GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX), -GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300), GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00), -GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300), -GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300), - -#define GEN_XXSEL_ROW(opc3) \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \ -GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \ - -GEN_XXSEL_ROW(0x00) -GEN_XXSEL_ROW(0x01) -GEN_XXSEL_ROW(0x02) -GEN_XXSEL_ROW(0x03) -GEN_XXSEL_ROW(0x04) -GEN_XXSEL_ROW(0x05) -GEN_XXSEL_ROW(0x06) -GEN_XXSEL_ROW(0x07) -GEN_XXSEL_ROW(0x08) -GEN_XXSEL_ROW(0x09) -GEN_XXSEL_ROW(0x0A) -GEN_XXSEL_ROW(0x0B) -GEN_XXSEL_ROW(0x0C) -GEN_XXSEL_ROW(0x0D) -GEN_XXSEL_ROW(0x0E) -GEN_XXSEL_ROW(0x0F) -GEN_XXSEL_ROW(0x10) -GEN_XXSEL_ROW(0x11) -GEN_XXSEL_ROW(0x12) -GEN_XXSEL_ROW(0x13) -GEN_XXSEL_ROW(0x14) -GEN_XXSEL_ROW(0x15) -GEN_XXSEL_ROW(0x16) -GEN_XXSEL_ROW(0x17) -GEN_XXSEL_ROW(0x18) -GEN_XXSEL_ROW(0x19) -GEN_XXSEL_ROW(0x1A) -GEN_XXSEL_ROW(0x1B) -GEN_XXSEL_ROW(0x1C) -GEN_XXSEL_ROW(0x1D) -GEN_XXSEL_ROW(0x1E) -GEN_XXSEL_ROW(0x1F) - -GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01), diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c index aa3f867596..7ff76f7a06 100644 --- a/target/ppc/user_only_helper.c +++ b/target/ppc/user_only_helper.c @@ -21,16 +21,23 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" +#include "internal.h" - -bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; int exception, error_code; + /* + * Both DSISR and the "trap number" (exception vector offset, + * looked up from exception_index) are present in the linux-user + * signal frame. + * FIXME: we don't actually populate the trap number properly. + * It would be easiest to fill in an env->trap value now. + */ if (access_type == MMU_INST_FETCH) { exception = POWERPC_EXCP_ISI; error_code = 0x40000000; diff --git a/target/riscv/XVentanaCondOps.decode b/target/riscv/XVentanaCondOps.decode new file mode 100644 index 0000000000..5aef7c3d72 --- /dev/null +++ b/target/riscv/XVentanaCondOps.decode @@ -0,0 +1,25 @@ +# +# RISC-V translation routines for the XVentanaCondOps extension +# +# Copyright (c) 2022 Dr. Philipp Tomsich, philipp.tomsich@vrull.eu +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: VTx-family custom instructions +# Custom ISA extensions for Ventana Micro Systems RISC-V cores +# (https://github.com/ventanamicro/ventana-custom-extensions/releases/download/v1.0.0/ventana-custom-extensions-v1.0.0.pdf) + +# Fields +%rs2 20:5 +%rs1 15:5 +%rd 7:5 + +# Argument sets +&r rd rs1 rs2 !extern + +# Formats +@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd + +# *** RV64 Custom-3 Extension *** +vt_maskc 0000000 ..... ..... 110 ..... 1111011 @r +vt_maskcn 0000000 ..... ..... 111 ..... 1111011 @r diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c index 709f621d82..736a232956 100644 --- a/target/riscv/arch_dump.c +++ b/target/riscv/arch_dump.c @@ -64,12 +64,11 @@ static void riscv64_note_init(struct riscv64_note *note, DumpState *s, } int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { struct riscv64_note note; RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - DumpState *s = opaque; int ret, i = 0; const char name[] = "CORE"; @@ -134,12 +133,11 @@ static void riscv32_note_init(struct riscv32_note *note, DumpState *s, } int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { struct riscv32_note note; RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - DumpState *s = opaque; int ret, i; const char name[] = "CORE"; diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c index 5b2f795d03..b99c4a39a1 100644 --- a/target/riscv/bitmanip_helper.c +++ b/target/riscv/bitmanip_helper.c @@ -3,6 +3,7 @@ * * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,68 +24,108 @@ #include "exec/helper-proto.h" #include "tcg/tcg.h" -static const uint64_t adjacent_masks[] = { - dup_const(MO_8, 0x55), - dup_const(MO_8, 0x33), - dup_const(MO_8, 0x0f), - dup_const(MO_16, 0xff), - dup_const(MO_32, 0xffff), - UINT32_MAX -}; +target_ulong HELPER(clmul)(target_ulong rs1, target_ulong rs2) +{ + target_ulong result = 0; + + for (int i = 0; i < TARGET_LONG_BITS; i++) { + if ((rs2 >> i) & 1) { + result ^= (rs1 << i); + } + } + + return result; +} + +target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2) +{ + target_ulong result = 0; + + for (int i = 0; i < TARGET_LONG_BITS; i++) { + if ((rs2 >> i) & 1) { + result ^= (rs1 >> (TARGET_LONG_BITS - i - 1)); + } + } + + return result; +} static inline target_ulong do_swap(target_ulong x, uint64_t mask, int shift) { return ((x & mask) << shift) | ((x & ~mask) >> shift); } -static target_ulong do_grev(target_ulong rs1, - target_ulong rs2, - int bits) +target_ulong HELPER(brev8)(target_ulong rs1) { target_ulong x = rs1; - int i, shift; - - for (i = 0, shift = 1; shift < bits; i++, shift <<= 1) { - if (rs2 & shift) { - x = do_swap(x, adjacent_masks[i], shift); - } - } + x = do_swap(x, 0x5555555555555555ull, 1); + x = do_swap(x, 0x3333333333333333ull, 2); + x = do_swap(x, 0x0f0f0f0f0f0f0f0full, 4); return x; } -target_ulong HELPER(grev)(target_ulong rs1, target_ulong rs2) +static const uint64_t shuf_masks[] = { + dup_const(MO_8, 0x44), + dup_const(MO_8, 0x30), + dup_const(MO_16, 0x0f00), + dup_const(MO_32, 0xff0000) +}; + +static inline target_ulong do_shuf_stage(target_ulong src, uint64_t maskL, + uint64_t maskR, int shift) { - return do_grev(rs1, rs2, TARGET_LONG_BITS); -} - -target_ulong HELPER(grevw)(target_ulong rs1, target_ulong rs2) -{ - return do_grev(rs1, rs2, 32); -} - -static target_ulong do_gorc(target_ulong rs1, - target_ulong rs2, - int bits) -{ - target_ulong x = rs1; - int i, shift; - - for (i = 0, shift = 1; shift < bits; i++, shift <<= 1) { - if (rs2 & shift) { - x |= do_swap(x, adjacent_masks[i], shift); - } - } + target_ulong x = src & ~(maskL | maskR); + x |= ((src << shift) & maskL) | ((src >> shift) & maskR); return x; } -target_ulong HELPER(gorc)(target_ulong rs1, target_ulong rs2) +target_ulong HELPER(unzip)(target_ulong rs1) { - return do_gorc(rs1, rs2, TARGET_LONG_BITS); + target_ulong x = rs1; + + x = do_shuf_stage(x, shuf_masks[0], shuf_masks[0] >> 1, 1); + x = do_shuf_stage(x, shuf_masks[1], shuf_masks[1] >> 2, 2); + x = do_shuf_stage(x, shuf_masks[2], shuf_masks[2] >> 4, 4); + x = do_shuf_stage(x, shuf_masks[3], shuf_masks[3] >> 8, 8); + return x; } -target_ulong HELPER(gorcw)(target_ulong rs1, target_ulong rs2) +target_ulong HELPER(zip)(target_ulong rs1) { - return do_gorc(rs1, rs2, 32); + target_ulong x = rs1; + + x = do_shuf_stage(x, shuf_masks[3], shuf_masks[3] >> 8, 8); + x = do_shuf_stage(x, shuf_masks[2], shuf_masks[2] >> 4, 4); + x = do_shuf_stage(x, shuf_masks[1], shuf_masks[1] >> 2, 2); + x = do_shuf_stage(x, shuf_masks[0], shuf_masks[0] >> 1, 1); + return x; +} + +static inline target_ulong do_xperm(target_ulong rs1, target_ulong rs2, + uint32_t sz_log2) +{ + target_ulong r = 0; + target_ulong sz = 1LL << sz_log2; + target_ulong mask = (1LL << sz) - 1; + target_ulong pos; + + for (int i = 0; i < TARGET_LONG_BITS; i += sz) { + pos = ((rs2 >> i) & mask) << sz_log2; + if (pos < sizeof(target_ulong) * 8) { + r |= ((rs1 >> pos) & mask) << i; + } + } + return r; +} + +target_ulong HELPER(xperm4)(target_ulong rs1, target_ulong rs2) +{ + return do_xperm(rs1, rs2, 2); +} + +target_ulong HELPER(xperm8)(target_ulong rs1, target_ulong rs2) +{ + return do_xperm(rs1, rs2, 3); } diff --git a/target/riscv/common-semi-target.h b/target/riscv/common-semi-target.h new file mode 100644 index 0000000000..7c8a59e0cc --- /dev/null +++ b/target/riscv/common-semi-target.h @@ -0,0 +1,50 @@ +/* + * Target-specific parts of semihosting/arm-compat-semi.c. + * + * Copyright (c) 2005, 2007 CodeSourcery. + * Copyright (c) 2019, 2022 Linaro + * Copyright © 2020 by Keith Packard + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_RISCV_COMMON_SEMI_TARGET_H +#define TARGET_RISCV_COMMON_SEMI_TARGET_H + +static inline target_ulong common_semi_arg(CPUState *cs, int argno) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + return env->gpr[xA0 + argno]; +} + +static inline void common_semi_set_ret(CPUState *cs, target_ulong ret) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + env->gpr[xA0] = ret; +} + +static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr) +{ + return (nr == TARGET_SYS_EXIT_EXTENDED || sizeof(target_ulong) == 8); +} + +static inline bool is_64bit_semihosting(CPUArchState *env) +{ + return riscv_cpu_mxl(env) != MXL_RV32; +} + +static inline target_ulong common_semi_stack_bottom(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + return env->gpr[xSP]; +} + +static inline bool common_semi_has_synccache(CPUArchState *env) +{ + return true; +} + +#endif diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h index 80eb615f93..ebaf26d26d 100644 --- a/target/riscv/cpu-param.h +++ b/target/riscv/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef RISCV_CPU_PARAM_H -#define RISCV_CPU_PARAM_H 1 +#define RISCV_CPU_PARAM_H #if defined(TARGET_RISCV64) # define TARGET_LONG_BITS 64 diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 991a6bb760..d14e95c9dc 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -22,17 +22,110 @@ #include "qemu/ctype.h" #include "qemu/log.h" #include "cpu.h" +#include "pmu.h" #include "internals.h" +#include "time_helper.h" #include "exec/exec-all.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "fpu/softfloat-helpers.h" +#include "sysemu/kvm.h" +#include "kvm_riscv.h" /* RISC-V CPU definitions */ -static const char riscv_exts[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG"; +#define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \ + (QEMU_VERSION_MINOR << 8) | \ + (QEMU_VERSION_MICRO)) +#define RISCV_CPU_MIMPID RISCV_CPU_MARCHID + +static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; + +struct isa_ext_data { + const char *name; + bool multi_letter; + int min_version; + int ext_enable_offset; +}; + +#define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \ +{#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} + +/** + * Here are the ordering rules of extension naming defined by RISC-V + * specification : + * 1. All extensions should be separated from other multi-letter extensions + * by an underscore. + * 2. The first letter following the 'Z' conventionally indicates the most + * closely related alphabetical extension category, IMAFDQLCBKJTPVH. + * If multiple 'Z' extensions are named, they should be ordered first + * by category, then alphabetically within a category. + * 3. Standard supervisor-level extensions (starts with 'S') should be + * listed after standard unprivileged extensions. If multiple + * supervisor-level extensions are listed, they should be ordered + * alphabetically. + * 4. Non-standard extensions (starts with 'X') must be listed after all + * standard extensions. They must be separated from other multi-letter + * extensions by an underscore. + */ +static const struct isa_ext_data isa_edata_arr[] = { + ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h), + ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v), + ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr), + ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei), + ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause), + ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_12_0, ext_zfh), + ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin), + ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx), + ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx), + ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba), + ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb), + ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc), + ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb), + ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc), + ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx), + ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs), + ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk), + ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn), + ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd), + ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne), + ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh), + ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr), + ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks), + ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed), + ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh), + ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt), + ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f), + ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f), + ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx), + ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin), + ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia), + ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia), + ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf), + ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval), + ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot), + ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt), + ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), +}; + +static bool isa_ext_is_enabled(RISCVCPU *cpu, + const struct isa_ext_data *edata) +{ + bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; + + return *ext_enabled; +} + +static void isa_ext_update_enabled(RISCVCPU *cpu, + const struct isa_ext_data *edata, bool en) +{ + bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; + + *ext_enabled = en; +} const char * const riscv_int_regnames[] = { "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", @@ -42,6 +135,15 @@ const char * const riscv_int_regnames[] = { "x28/t3", "x29/t4", "x30/t5", "x31/t6" }; +const char * const riscv_int_regnamesh[] = { + "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", + "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", + "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", + "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", + "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", + "x30h/t5h", "x31h/t6h" +}; + const char * const riscv_fpr_regnames[] = { "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", @@ -97,6 +199,8 @@ static const char * const riscv_intr_names[] = { "reserved" }; +static void register_cpu_props(DeviceState *dev); + const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) { if (async) { @@ -108,18 +212,10 @@ const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) } } -bool riscv_cpu_is_32bit(CPURISCVState *env) +static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) { - if (env->misa & RV64) { - return false; - } - - return true; -} - -static void set_misa(CPURISCVState *env, target_ulong misa) -{ - env->misa_mask = env->misa = misa; + env->misa_mxl_max = env->misa_mxl = mxl; + env->misa_ext_mask = env->misa_ext = ext; } static void set_priv_version(CPURISCVState *env, int priv_ver) @@ -127,37 +223,21 @@ static void set_priv_version(CPURISCVState *env, int priv_ver) env->priv_ver = priv_ver; } -static void set_bext_version(CPURISCVState *env, int bext_ver) -{ - env->bext_ver = bext_ver; -} - static void set_vext_version(CPURISCVState *env, int vext_ver) { env->vext_ver = vext_ver; } -static void set_feature(CPURISCVState *env, int feature) -{ - env->features |= (1ULL << feature); -} - -static void set_resetvec(CPURISCVState *env, target_ulong resetvec) -{ -#ifndef CONFIG_USER_ONLY - env->resetvec = resetvec; -#endif -} - static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; #if defined(TARGET_RISCV32) - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #elif defined(TARGET_RISCV64) - set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #endif - set_priv_version(env, PRIV_VERSION_1_11_0); + set_priv_version(env, PRIV_VERSION_1_12_0); + register_cpu_props(DEVICE(obj)); } #if defined(TARGET_RISCV64) @@ -165,62 +245,104 @@ static void rv64_base_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ - set_misa(env, RV64); + set_misa(env, MXL_RV64, 0); + register_cpu_props(DEVICE(obj)); + /* Set latest version of privileged specification */ + set_priv_version(env, PRIV_VERSION_1_12_0); } static void rv64_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); } static void rv64_sifive_e_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU); + RISCVCPU *cpu = RISCV_CPU(obj); + + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); - qdev_prop_set_bit(DEVICE(obj), "mmu", false); + cpu->cfg.mmu = false; +} + +static void rv128_base_cpu_init(Object *obj) +{ + if (qemu_tcg_mttcg_enabled()) { + /* Missing 128-bit aligned atomics */ + error_report("128-bit RISC-V currently does not work with Multi " + "Threaded TCG. Please use: -accel tcg,thread=single"); + exit(EXIT_FAILURE); + } + CPURISCVState *env = &RISCV_CPU(obj)->env; + /* We set this in the realise function */ + set_misa(env, MXL_RV128, 0); + register_cpu_props(DEVICE(obj)); + /* Set latest version of privileged specification */ + set_priv_version(env, PRIV_VERSION_1_12_0); } #else static void rv32_base_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ - set_misa(env, RV32); + set_misa(env, MXL_RV32, 0); + register_cpu_props(DEVICE(obj)); + /* Set latest version of privileged specification */ + set_priv_version(env, PRIV_VERSION_1_12_0); } static void rv32_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); } static void rv32_sifive_e_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU); + RISCVCPU *cpu = RISCV_CPU(obj); + + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); - qdev_prop_set_bit(DEVICE(obj), "mmu", false); + cpu->cfg.mmu = false; } static void rv32_ibex_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVC | RVU); - set_priv_version(env, PRIV_VERSION_1_10_0); - qdev_prop_set_bit(DEVICE(obj), "mmu", false); - qdev_prop_set_bit(DEVICE(obj), "x-epmp", true); + RISCVCPU *cpu = RISCV_CPU(obj); + + set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); + set_priv_version(env, PRIV_VERSION_1_11_0); + cpu->cfg.mmu = false; + cpu->cfg.epmp = true; } static void rv32_imafcu_nommu_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVC | RVU); + RISCVCPU *cpu = RISCV_CPU(obj); + + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); - set_resetvec(env, DEFAULT_RSTVEC); - qdev_prop_set_bit(DEVICE(obj), "mmu", false); + cpu->cfg.mmu = false; +} +#endif + +#if defined(CONFIG_KVM) +static void riscv_host_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; +#if defined(TARGET_RISCV32) + set_misa(env, MXL_RV32, 0); +#elif defined(TARGET_RISCV64) + set_misa(env, MXL_RV64, 0); +#endif + register_cpu_props(DEVICE(obj)); } #endif @@ -255,55 +377,63 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) #endif qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); #ifndef CONFIG_USER_ONLY - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", (target_ulong)env->mstatus); - if (riscv_cpu_is_32bit(env)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatush ", - (target_ulong)(env->mstatus >> 32)); + { + static const int dump_csrs[] = { + CSR_MHARTID, + CSR_MSTATUS, + CSR_MSTATUSH, + CSR_HSTATUS, + CSR_VSSTATUS, + CSR_MIP, + CSR_MIE, + CSR_MIDELEG, + CSR_HIDELEG, + CSR_MEDELEG, + CSR_HEDELEG, + CSR_MTVEC, + CSR_STVEC, + CSR_VSTVEC, + CSR_MEPC, + CSR_SEPC, + CSR_VSEPC, + CSR_MCAUSE, + CSR_SCAUSE, + CSR_VSCAUSE, + CSR_MTVAL, + CSR_STVAL, + CSR_HTVAL, + CSR_MTVAL2, + CSR_MSCRATCH, + CSR_SSCRATCH, + CSR_SATP, + CSR_MMTE, + CSR_UPMBASE, + CSR_UPMMASK, + CSR_SPMBASE, + CSR_SPMMASK, + CSR_MPMBASE, + CSR_MPMMASK, + }; + + for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { + int csrno = dump_csrs[i]; + target_ulong val = 0; + RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); + + /* + * Rely on the smode, hmode, etc, predicates within csr.c + * to do the filtering of the registers that are present. + */ + if (res == RISCV_EXCP_NONE) { + qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", + csr_ops[csrno].name, val); + } + } } - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hstatus ", env->hstatus); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsstatus ", - (target_ulong)env->vsstatus); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ", env->mip); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hideleg ", env->hideleg); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hedeleg ", env->hedeleg); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtvec ", env->mtvec); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stvec ", env->stvec); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vstvec ", env->vstvec); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mepc ", env->mepc); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "sepc ", env->sepc); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsepc ", env->vsepc); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mcause ", env->mcause); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "scause ", env->scause); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vscause ", env->vscause); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval ", env->mtval); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stval ", env->stval); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "htval ", env->htval); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval2 ", env->mtval2); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mscratch", env->mscratch); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "sscratch", env->sscratch); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "satp ", env->satp); #endif for (i = 0; i < 32; i++) { - qemu_fprintf(f, " %s " TARGET_FMT_lx, + qemu_fprintf(f, " %-8s " TARGET_FMT_lx, riscv_int_regnames[i], env->gpr[i]); if ((i & 3) == 3) { qemu_fprintf(f, "\n"); @@ -311,7 +441,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) } if (flags & CPU_DUMP_FPU) { for (i = 0; i < 32; i++) { - qemu_fprintf(f, " %s %016" PRIx64, + qemu_fprintf(f, " %-8s %016" PRIx64, riscv_fpr_regnames[i], env->fpr[i]); if ((i & 3) == 3) { qemu_fprintf(f, "\n"); @@ -324,7 +454,24 @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - env->pc = value; + + if (env->xl == MXL_RV32) { + env->pc = (int32_t)value; + } else { + env->pc = value; + } +} + +static vaddr riscv_cpu_get_pc(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + /* Match cpu_get_tb_cpu_state. */ + if (env->xl == MXL_RV32) { + return env->pc & UINT32_MAX; + } + return env->pc; } static void riscv_cpu_synchronize_from_tb(CPUState *cs, @@ -332,7 +479,13 @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs, { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - env->pc = tb->pc; + RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); + + if (xl == MXL_RV32) { + env->pc = (int32_t)tb_pc(tb); + } else { + env->pc = tb_pc(tb); + } } static bool riscv_cpu_has_work(CPUState *cs) @@ -344,20 +497,34 @@ static bool riscv_cpu_has_work(CPUState *cs) * Definition of the WFI instruction requires it to ignore the privilege * mode and delegation registers, but respect individual enables */ - return (env->mip & env->mie) != 0; + return riscv_cpu_all_pending(env) != 0; #else return true; #endif } -void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, - target_ulong *data) +static void riscv_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { - env->pc = data[0]; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); + + if (xl == MXL_RV32) { + env->pc = (int32_t)data[0]; + } else { + env->pc = data[0]; + } + env->bins = data[1]; } static void riscv_cpu_reset(DeviceState *dev) { +#ifndef CONFIG_USER_ONLY + uint8_t iprio; + int i, irq, rdzero; +#endif CPUState *cs = CPU(dev); RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); @@ -365,24 +532,83 @@ static void riscv_cpu_reset(DeviceState *dev) mcc->parent_reset(dev); #ifndef CONFIG_USER_ONLY + env->misa_mxl = env->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); + if (env->misa_mxl > MXL_RV32) { + /* + * The reset status of SXL/UXL is undefined, but mstatus is WARL + * and we must ensure that the value after init is valid for read. + */ + env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); + env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); + if (riscv_has_ext(env, RVH)) { + env->vsstatus = set_field(env->vsstatus, + MSTATUS64_SXL, env->misa_mxl); + env->vsstatus = set_field(env->vsstatus, + MSTATUS64_UXL, env->misa_mxl); + env->mstatus_hs = set_field(env->mstatus_hs, + MSTATUS64_SXL, env->misa_mxl); + env->mstatus_hs = set_field(env->mstatus_hs, + MSTATUS64_UXL, env->misa_mxl); + } + } env->mcause = 0; + env->miclaim = MIP_SGEIP; env->pc = env->resetvec; + env->bins = 0; env->two_stage_lookup = false; + + /* Initialized default priorities of local interrupts. */ + for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { + iprio = riscv_cpu_default_priority(i); + env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; + env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; + env->hviprio[i] = 0; + } + i = 0; + while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { + if (!rdzero) { + env->hviprio[irq] = env->miprio[irq]; + } + i++; + } + /* mmte is supposed to have pm.current hardwired to 1 */ + env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT); #endif + env->xl = riscv_cpu_mxl(env); + riscv_cpu_update_mask(env); cs->exception_index = RISCV_EXCP_NONE; env->load_res = -1; set_default_nan_mode(1, &env->fp_status); + +#ifndef CONFIG_USER_ONLY + if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + riscv_trigger_init(env); + } + + if (kvm_enabled()) { + kvm_riscv_reset_vcpu(cpu); + } +#endif } static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) { RISCVCPU *cpu = RISCV_CPU(s); - if (riscv_cpu_is_32bit(&cpu->env)) { + + switch (riscv_cpu_mxl(&cpu->env)) { + case MXL_RV32: info->print_insn = print_insn_riscv32; - } else { + break; + case MXL_RV64: info->print_insn = print_insn_riscv64; + break; + case MXL_RV128: + info->print_insn = print_insn_riscv128; + break; + default: + g_assert_not_reached(); } } @@ -392,10 +618,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) RISCVCPU *cpu = RISCV_CPU(dev); CPURISCVState *env = &cpu->env; RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); - int priv_version = PRIV_VERSION_1_11_0; - int bext_version = BEXT_VERSION_0_93_0; - int vext_version = VEXT_VERSION_0_07_1; - target_ulong target_misa = env->misa; + CPUClass *cc = CPU_CLASS(mcc); + int i, priv_version = -1; Error *local_err = NULL; cpu_exec_realizefn(cs, &local_err); @@ -405,7 +629,9 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } if (cpu->cfg.priv_spec) { - if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { + if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { + priv_version = PRIV_VERSION_1_12_0; + } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { priv_version = PRIV_VERSION_1_11_0; } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { priv_version = PRIV_VERSION_1_10_0; @@ -417,105 +643,217 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } - set_priv_version(env, priv_version); - set_bext_version(env, bext_version); - set_vext_version(env, vext_version); + if (priv_version >= PRIV_VERSION_1_10_0) { + set_priv_version(env, priv_version); + } + + /* Force disable extensions if priv spec version does not match */ + for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { + if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && + (env->priv_ver < isa_edata_arr[i].min_version)) { + isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); +#ifndef CONFIG_USER_ONLY + warn_report("disabling %s extension for hart 0x%lx because " + "privilege spec version does not match", + isa_edata_arr[i].name, (unsigned long)env->mhartid); +#else + warn_report("disabling %s extension because " + "privilege spec version does not match", + isa_edata_arr[i].name); +#endif + } + } if (cpu->cfg.mmu) { - set_feature(env, RISCV_FEATURE_MMU); + riscv_set_feature(env, RISCV_FEATURE_MMU); } if (cpu->cfg.pmp) { - set_feature(env, RISCV_FEATURE_PMP); + riscv_set_feature(env, RISCV_FEATURE_PMP); /* * Enhanced PMP should only be available * on harts with PMP support */ if (cpu->cfg.epmp) { - set_feature(env, RISCV_FEATURE_EPMP); + riscv_set_feature(env, RISCV_FEATURE_EPMP); } } - set_resetvec(env, cpu->cfg.resetvec); + if (cpu->cfg.debug) { + riscv_set_feature(env, RISCV_FEATURE_DEBUG); + } + + +#ifndef CONFIG_USER_ONLY + if (cpu->cfg.ext_sstc) { + riscv_timer_init(cpu); + } +#endif /* CONFIG_USER_ONLY */ + + /* Validate that MISA_MXL is set properly. */ + switch (env->misa_mxl_max) { +#ifdef TARGET_RISCV64 + case MXL_RV64: + case MXL_RV128: + cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; + break; +#endif + case MXL_RV32: + cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; + break; + default: + g_assert_not_reached(); + } + assert(env->misa_mxl_max == env->misa_mxl); + + /* If only MISA_EXT is unset for misa, then set it from properties */ + if (env->misa_ext == 0) { + uint32_t ext = 0; - /* If only XLEN is set for misa, then set misa from properties */ - if (env->misa == RV32 || env->misa == RV64) { /* Do some ISA extension error checking */ - if (cpu->cfg.ext_i && cpu->cfg.ext_e) { - error_setg(errp, - "I and E extensions are incompatible"); - return; - } - - if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { - error_setg(errp, - "Either I or E extension must be set"); - return; - } - - if (cpu->cfg.ext_g && !(cpu->cfg.ext_i & cpu->cfg.ext_m & - cpu->cfg.ext_a & cpu->cfg.ext_f & - cpu->cfg.ext_d)) { - warn_report("Setting G will also set IMAFD"); + if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m && + cpu->cfg.ext_a && cpu->cfg.ext_f && + cpu->cfg.ext_d && + cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { + warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); cpu->cfg.ext_i = true; cpu->cfg.ext_m = true; cpu->cfg.ext_a = true; cpu->cfg.ext_f = true; cpu->cfg.ext_d = true; + cpu->cfg.ext_icsr = true; + cpu->cfg.ext_ifencei = true; + } + + if (cpu->cfg.ext_i && cpu->cfg.ext_e) { + error_setg(errp, + "I and E extensions are incompatible"); + return; + } + + if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { + error_setg(errp, + "Either I or E extension must be set"); + return; + } + + if (cpu->cfg.ext_s && !cpu->cfg.ext_u) { + error_setg(errp, + "Setting S extension without U extension is illegal"); + return; + } + + if (cpu->cfg.ext_h && !cpu->cfg.ext_i) { + error_setg(errp, + "H depends on an I base integer ISA with 32 x registers"); + return; + } + + if (cpu->cfg.ext_h && !cpu->cfg.ext_s) { + error_setg(errp, "H extension implicitly requires S-mode"); + return; + } + + if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) { + error_setg(errp, "F extension requires Zicsr"); + return; + } + + if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) { + error_setg(errp, "Zfh/Zfhmin extensions require F extension"); + return; + } + + if (cpu->cfg.ext_d && !cpu->cfg.ext_f) { + error_setg(errp, "D extension requires F extension"); + return; + } + + if (cpu->cfg.ext_v && !cpu->cfg.ext_d) { + error_setg(errp, "V extension requires D extension"); + return; + } + + if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) { + error_setg(errp, "Zve32f/Zve64f extensions require F extension"); + return; } /* Set the ISA extensions, checks should have happened above */ + if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx || + cpu->cfg.ext_zhinxmin) { + cpu->cfg.ext_zfinx = true; + } + + if (cpu->cfg.ext_zfinx) { + if (!cpu->cfg.ext_icsr) { + error_setg(errp, "Zfinx extension requires Zicsr"); + return; + } + if (cpu->cfg.ext_f) { + error_setg(errp, + "Zfinx cannot be supported together with F extension"); + return; + } + } + + if (cpu->cfg.ext_zk) { + cpu->cfg.ext_zkn = true; + cpu->cfg.ext_zkr = true; + cpu->cfg.ext_zkt = true; + } + + if (cpu->cfg.ext_zkn) { + cpu->cfg.ext_zbkb = true; + cpu->cfg.ext_zbkc = true; + cpu->cfg.ext_zbkx = true; + cpu->cfg.ext_zkne = true; + cpu->cfg.ext_zknd = true; + cpu->cfg.ext_zknh = true; + } + + if (cpu->cfg.ext_zks) { + cpu->cfg.ext_zbkb = true; + cpu->cfg.ext_zbkc = true; + cpu->cfg.ext_zbkx = true; + cpu->cfg.ext_zksed = true; + cpu->cfg.ext_zksh = true; + } + if (cpu->cfg.ext_i) { - target_misa |= RVI; + ext |= RVI; } if (cpu->cfg.ext_e) { - target_misa |= RVE; + ext |= RVE; } if (cpu->cfg.ext_m) { - target_misa |= RVM; + ext |= RVM; } if (cpu->cfg.ext_a) { - target_misa |= RVA; + ext |= RVA; } if (cpu->cfg.ext_f) { - target_misa |= RVF; + ext |= RVF; } if (cpu->cfg.ext_d) { - target_misa |= RVD; + ext |= RVD; } if (cpu->cfg.ext_c) { - target_misa |= RVC; + ext |= RVC; } if (cpu->cfg.ext_s) { - target_misa |= RVS; + ext |= RVS; } if (cpu->cfg.ext_u) { - target_misa |= RVU; + ext |= RVU; } if (cpu->cfg.ext_h) { - target_misa |= RVH; - } - if (cpu->cfg.ext_b) { - target_misa |= RVB; - - if (cpu->cfg.bext_spec) { - if (!g_strcmp0(cpu->cfg.bext_spec, "v0.93")) { - bext_version = BEXT_VERSION_0_93_0; - } else { - error_setg(errp, - "Unsupported bitmanip spec version '%s'", - cpu->cfg.bext_spec); - return; - } - } else { - qemu_log("bitmanip version is not specified, " - "use the default value v0.93\n"); - } - set_bext_version(env, bext_version); + ext |= RVH; } if (cpu->cfg.ext_v) { - target_misa |= RVV; + int vext_version = VEXT_VERSION_1_00_0; + ext |= RVV; if (!is_power_of_2(cpu->cfg.vlen)) { error_setg(errp, "Vector extension VLEN must be power of 2"); @@ -539,8 +877,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) return; } if (cpu->cfg.vext_spec) { - if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) { - vext_version = VEXT_VERSION_0_07_1; + if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { + vext_version = VEXT_VERSION_1_00_0; } else { error_setg(errp, "Unsupported vector spec version '%s'", @@ -549,14 +887,26 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } else { qemu_log("vector version is not specified, " - "use the default value v0.7.1\n"); + "use the default value v1.0\n"); } set_vext_version(env, vext_version); } + if (cpu->cfg.ext_j) { + ext |= RVJ; + } - set_misa(env, target_misa); + set_misa(env, env->misa_mxl, ext); } +#ifndef CONFIG_USER_ONLY + if (cpu->cfg.pmu_num) { + if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { + cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + riscv_pmu_timer_cb, cpu); + } + } +#endif + riscv_cpu_register_gdb_regs_for_features(cs); qemu_init_vcpu(cs); @@ -565,17 +915,92 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) mcc->parent_realize(dev, errp); } +#ifndef CONFIG_USER_ONLY +static void riscv_cpu_set_irq(void *opaque, int irq, int level) +{ + RISCVCPU *cpu = RISCV_CPU(opaque); + CPURISCVState *env = &cpu->env; + + if (irq < IRQ_LOCAL_MAX) { + switch (irq) { + case IRQ_U_SOFT: + case IRQ_S_SOFT: + case IRQ_VS_SOFT: + case IRQ_M_SOFT: + case IRQ_U_TIMER: + case IRQ_S_TIMER: + case IRQ_VS_TIMER: + case IRQ_M_TIMER: + case IRQ_U_EXT: + case IRQ_VS_EXT: + case IRQ_M_EXT: + if (kvm_enabled()) { + kvm_riscv_set_irq(cpu, irq, level); + } else { + riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level)); + } + break; + case IRQ_S_EXT: + if (kvm_enabled()) { + kvm_riscv_set_irq(cpu, irq, level); + } else { + env->external_seip = level; + riscv_cpu_update_mip(cpu, 1 << irq, + BOOL_TO_MASK(level | env->software_seip)); + } + break; + default: + g_assert_not_reached(); + } + } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { + /* Require H-extension for handling guest local interrupts */ + if (!riscv_has_ext(env, RVH)) { + g_assert_not_reached(); + } + + /* Compute bit position in HGEIP CSR */ + irq = irq - IRQ_LOCAL_MAX + 1; + if (env->geilen < irq) { + g_assert_not_reached(); + } + + /* Update HGEIP CSR */ + env->hgeip &= ~((target_ulong)1 << irq); + if (level) { + env->hgeip |= (target_ulong)1 << irq; + } + + /* Update mip.SGEIP bit */ + riscv_cpu_update_mip(cpu, MIP_SGEIP, + BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); + } else { + g_assert_not_reached(); + } +} +#endif /* CONFIG_USER_ONLY */ + static void riscv_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); + cpu->cfg.ext_ifencei = true; + cpu->cfg.ext_icsr = true; + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + cpu_set_cpustate_pointers(cpu); + +#ifndef CONFIG_USER_ONLY + qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, + IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); +#endif /* CONFIG_USER_ONLY */ } -static Property riscv_cpu_properties[] = { +static Property riscv_cpu_extensions[] = { + /* Defaults for standard extensions */ DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true), DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false), - DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, true), + DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false), DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true), DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true), DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true), @@ -583,23 +1008,92 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true), DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true), DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true), - /* This is experimental so mark with 'x-' */ - DEFINE_PROP_BOOL("x-b", RISCVCPU, cfg.ext_b, false), - DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false), - DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false), - DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true), + DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false), + DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true), + DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), + DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), + DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), + DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), + DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), + DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), + DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), + DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), + DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), + DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), + DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), - DEFINE_PROP_STRING("bext_spec", RISCVCPU, cfg.bext_spec), DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), - DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), - DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), - DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), - DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC), + DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), + DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), + DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), + + DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), + DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), + DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), + DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), + DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), + DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), + DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), + DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), + DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), + DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), + DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), + DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), + DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), + DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), + DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), + DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), + DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), + + DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), + DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), + DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), + DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), + + DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), + + /* Vendor-specific custom extensions */ + DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), + + /* These are experimental so mark with 'x-' */ + DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), + /* ePMP 0.9.3 */ + DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), + DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), + DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void register_cpu_props(DeviceState *dev) +{ + Property *prop; + + for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { + qdev_property_add_static(dev, prop); + } +} + +static Property riscv_cpu_properties[] = { + DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), + + DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0), + DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID), + DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID), + +#ifndef CONFIG_USER_ONLY + DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), +#endif + + DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), + + DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), + DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), DEFINE_PROP_END_OF_LIST(), }; @@ -608,10 +1102,14 @@ static gchar *riscv_gdb_arch_name(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - if (riscv_cpu_is_32bit(env)) { + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: return g_strdup("riscv:rv32"); - } else { + case MXL_RV64: + case MXL_RV128: return g_strdup("riscv:rv64"); + default: + g_assert_not_reached(); } } @@ -621,6 +1119,8 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) if (strcmp(xmlname, "riscv-csr.xml") == 0) { return cpu->dyn_csr_xml; + } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { + return cpu->dyn_vreg_xml; } return NULL; @@ -642,13 +1142,17 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { static const struct TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, - .cpu_exec_interrupt = riscv_cpu_exec_interrupt, - .tlb_fill = riscv_cpu_tlb_fill, + .restore_state_to_opc = riscv_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = riscv_cpu_tlb_fill, + .cpu_exec_interrupt = riscv_cpu_exec_interrupt, .do_interrupt = riscv_cpu_do_interrupt, .do_transaction_failed = riscv_cpu_do_transaction_failed, .do_unaligned_access = riscv_cpu_do_unaligned_access, + .debug_excp_handler = riscv_cpu_debug_excp_handler, + .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, + .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, #endif /* !CONFIG_USER_ONLY */ }; @@ -667,14 +1171,10 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->has_work = riscv_cpu_has_work; cc->dump_state = riscv_cpu_dump_state; cc->set_pc = riscv_cpu_set_pc; + cc->get_pc = riscv_cpu_get_pc; cc->gdb_read_register = riscv_cpu_gdb_read_register; cc->gdb_write_register = riscv_cpu_gdb_write_register; cc->gdb_num_core_regs = 33; -#if defined(TARGET_RISCV32) - cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; -#elif defined(TARGET_RISCV64) - cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; -#endif cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY @@ -687,18 +1187,39 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) device_class_set_props(dc, riscv_cpu_properties); } +static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len) +{ + char *old = *isa_str; + char *new = *isa_str; + int i; + + for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { + if (isa_edata_arr[i].multi_letter && + isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { + new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); + g_free(old); + old = new; + } + } + + *isa_str = new; +} + char *riscv_isa_string(RISCVCPU *cpu) { int i; - const size_t maxlen = sizeof("rv128") + sizeof(riscv_exts) + 1; + const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); char *isa_str = g_new(char, maxlen); char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); - for (i = 0; i < sizeof(riscv_exts); i++) { - if (cpu->env.misa & RV(riscv_exts[i])) { - *p++ = qemu_tolower(riscv_exts[i]); + for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { + if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { + *p++ = qemu_tolower(riscv_single_letter_exts[i]); } } *p = '\0'; + if (!cpu->cfg.short_isa_string) { + riscv_isa_string_ext(cpu, &isa_str, maxlen); + } return isa_str; } @@ -750,6 +1271,9 @@ static const TypeInfo riscv_cpu_type_infos[] = { .class_init = riscv_cpu_class_init, }, DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), +#if defined(CONFIG_KVM) + DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), +#endif #if defined(TARGET_RISCV32) DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), @@ -761,6 +1285,7 @@ static const TypeInfo riscv_cpu_type_infos[] = { DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index bf1c899c00..3a9e25053f 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -23,11 +23,19 @@ #include "hw/core/cpu.h" #include "hw/registerfields.h" #include "exec/cpu-defs.h" -#include "fpu/softfloat-types.h" +#include "qemu/cpu-float.h" #include "qom/object.h" +#include "qemu/int128.h" +#include "cpu_bits.h" #define TCG_GUEST_DEFAULT_MO 0 +/* + * RISC-V-specific extra insn start words: + * 1: Original instruction opcode + */ +#define TARGET_INSN_START_EXTRA_WORDS 1 + #define TYPE_RISCV_CPU "riscv-cpu" #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU @@ -37,6 +45,7 @@ #define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") +#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") @@ -44,6 +53,7 @@ #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") +#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") #if defined(TARGET_RISCV32) # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 @@ -51,9 +61,6 @@ # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 #endif -#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) -#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) - #define RV(x) ((target_ulong)1 << (x - 'A')) #define RVI RV('I') @@ -67,7 +74,7 @@ #define RVS RV('S') #define RVU RV('U') #define RVH RV('H') -#define RVB RV('B') +#define RVJ RV('J') /* S extension denotes that Supervisor mode exists, however it is possible to have a core that support S mode but does not have an MMU and there @@ -77,14 +84,18 @@ enum { RISCV_FEATURE_MMU, RISCV_FEATURE_PMP, RISCV_FEATURE_EPMP, - RISCV_FEATURE_MISA + RISCV_FEATURE_MISA, + RISCV_FEATURE_DEBUG }; -#define PRIV_VERSION_1_10_0 0x00011000 -#define PRIV_VERSION_1_11_0 0x00011100 +/* Privileged specification version */ +enum { + PRIV_VERSION_1_10_0 = 0, + PRIV_VERSION_1_11_0, + PRIV_VERSION_1_12_0, +}; -#define BEXT_VERSION_0_93_0 0x00009300 -#define VEXT_VERSION_0_07_1 0x00000701 +#define VEXT_VERSION_1_00_0 0x00010000 enum { TRANSLATE_SUCCESS, @@ -97,22 +108,41 @@ enum { #define MAX_RISCV_PMPS (16) -typedef struct CPURISCVState CPURISCVState; +typedef struct CPUArchState CPURISCVState; #if !defined(CONFIG_USER_ONLY) #include "pmp.h" +#include "debug.h" #endif -#define RV_VLEN_MAX 256 +#define RV_VLEN_MAX 1024 +#define RV_MAX_MHPMEVENTS 32 +#define RV_MAX_MHPMCOUNTERS 32 -FIELD(VTYPE, VLMUL, 0, 2) -FIELD(VTYPE, VSEW, 2, 3) -FIELD(VTYPE, VEDIV, 5, 2) -FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9) -FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1) +FIELD(VTYPE, VLMUL, 0, 3) +FIELD(VTYPE, VSEW, 3, 3) +FIELD(VTYPE, VTA, 6, 1) +FIELD(VTYPE, VMA, 7, 1) +FIELD(VTYPE, VEDIV, 8, 2) +FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) -struct CPURISCVState { +typedef struct PMUCTRState { + /* Current value of a counter */ + target_ulong mhpmcounter_val; + /* Current value of a counter in RV32*/ + target_ulong mhpmcounterh_val; + /* Snapshot values of counter */ + target_ulong mhpmcounter_prev; + /* Snapshort value of a counter in RV32 */ + target_ulong mhpmcounterh_prev; + bool started; + /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */ + target_ulong irq_overflow_left; +} PMUCTRState; + +struct CPUArchState { target_ulong gpr[32]; + target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ uint64_t fpr[32]; /* assume both F and D extensions */ /* vector coprocessor state. */ @@ -122,6 +152,7 @@ struct CPURISCVState { target_ulong vl; target_ulong vstart; target_ulong vtype; + bool vill; target_ulong pc; target_ulong load_res; @@ -130,13 +161,23 @@ struct CPURISCVState { target_ulong frm; target_ulong badaddr; + target_ulong bins; + target_ulong guest_phys_fault_addr; target_ulong priv_ver; target_ulong bext_ver; target_ulong vext_ver; - target_ulong misa; - target_ulong misa_mask; + + /* RISCVMXL, but uint32_t for vmstate migration */ + uint32_t misa_mxl; /* current mxl */ + uint32_t misa_mxl_max; /* max mxl for this cpu */ + uint32_t misa_ext; /* current extensions */ + uint32_t misa_ext_mask; /* max ext for this cpu */ + uint32_t xl; /* current xlen */ + + /* 128-bit helpers upper part return value */ + target_ulong retxh; uint32_t features; @@ -148,7 +189,8 @@ struct CPURISCVState { target_ulong priv; /* This contains QEMU specific information about the virt state. */ target_ulong virt; - target_ulong resetvec; + target_ulong geilen; + uint64_t resetvec; target_ulong mhartid; /* @@ -157,12 +199,20 @@ struct CPURISCVState { */ uint64_t mstatus; - target_ulong mip; + uint64_t mip; + /* + * MIP contains the software writable version of SEIP ORed with the + * external interrupt value. The MIP register is always up-to-date. + * To keep track of the current source, we also save booleans of the values + * here. + */ + bool external_seip; + bool software_seip; - uint32_t miclaim; + uint64_t miclaim; - target_ulong mie; - target_ulong mideleg; + uint64_t mie; + uint64_t mideleg; target_ulong satp; /* since: priv-1.10.0 */ target_ulong stval; @@ -177,16 +227,34 @@ struct CPURISCVState { target_ulong mcause; target_ulong mtval; /* since: priv-1.10.0 */ + /* Machine and Supervisor interrupt priorities */ + uint8_t miprio[64]; + uint8_t siprio[64]; + + /* AIA CSRs */ + target_ulong miselect; + target_ulong siselect; + /* Hypervisor CSRs */ target_ulong hstatus; target_ulong hedeleg; - target_ulong hideleg; + uint64_t hideleg; target_ulong hcounteren; target_ulong htval; target_ulong htinst; target_ulong hgatp; + target_ulong hgeie; + target_ulong hgeip; uint64_t htimedelta; + /* Hypervisor controlled virtual interrupt priorities */ + target_ulong hvictl; + uint8_t hviprio[64]; + + /* Upper 64-bits of 128-bit CSRs */ + uint64_t mscratchh; + uint64_t sscratchh; + /* Virtual CSRs */ /* * For RV32 this is 32-bit vsstatus and 32-bit vsstatush. @@ -200,6 +268,9 @@ struct CPURISCVState { target_ulong vstval; target_ulong vsatp; + /* AIA VS-mode CSRs */ + target_ulong vsiselect; + target_ulong mtval2; target_ulong mtinst; @@ -215,38 +286,111 @@ struct CPURISCVState { /* Signals whether the current exception occurred with two-stage address translation active. */ bool two_stage_lookup; + /* + * Signals whether the current exception occurred while doing two-stage + * address translation for the VS-stage page table walk. + */ + bool two_stage_indirect_lookup; target_ulong scounteren; target_ulong mcounteren; + target_ulong mcountinhibit; + + /* PMU counter state */ + PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; + + /* PMU event selector configured values. First three are unused*/ + target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS]; + + /* PMU event selector configured values for RV32*/ + target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; + target_ulong sscratch; target_ulong mscratch; /* temporary htif regs */ uint64_t mfromhost; uint64_t mtohost; - uint64_t timecmp; + + /* Sstc CSRs */ + uint64_t stimecmp; + + uint64_t vstimecmp; /* physical memory protection */ pmp_table_t pmp_state; target_ulong mseccfg; + /* trigger module */ + target_ulong trigger_cur; + target_ulong tdata1[RV_MAX_TRIGGERS]; + target_ulong tdata2[RV_MAX_TRIGGERS]; + target_ulong tdata3[RV_MAX_TRIGGERS]; + struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; + struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; + /* machine specific rdtime callback */ - uint64_t (*rdtime_fn)(uint32_t); - uint32_t rdtime_fn_arg; + uint64_t (*rdtime_fn)(void *); + void *rdtime_fn_arg; + + /* machine specific AIA ireg read-modify-write callback */ +#define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \ + ((((__xlen) & 0xff) << 24) | \ + (((__vgein) & 0x3f) << 20) | \ + (((__virt) & 0x1) << 18) | \ + (((__priv) & 0x3) << 16) | \ + (__isel & 0xffff)) +#define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff) +#define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3) +#define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1) +#define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f) +#define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff) + int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg, + target_ulong *val, target_ulong new_val, target_ulong write_mask); + void *aia_ireg_rmw_fn_arg[4]; /* True if in debugger mode. */ bool debugger; + + /* + * CSRs for PointerMasking extension + */ + target_ulong mmte; + target_ulong mpmmask; + target_ulong mpmbase; + target_ulong spmmask; + target_ulong spmbase; + target_ulong upmmask; + target_ulong upmbase; + + /* CSRs for execution enviornment configuration */ + uint64_t menvcfg; + target_ulong senvcfg; + uint64_t henvcfg; #endif + target_ulong cur_pmmask; + target_ulong cur_pmbase; float_status fp_status; /* Fields from here on are preserved across CPU reset. */ - QEMUTimer *timer; /* Internal timer */ + QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ + QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */ + bool vstime_irq; + + hwaddr kernel_addr; + hwaddr fdt_addr; + + /* kvm timer */ + bool kvm_timer_dirty; + uint64_t kvm_timer_time; + uint64_t kvm_timer_compare; + uint64_t kvm_timer_state; + uint64_t kvm_timer_frequency; }; -OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass, - RISCV_CPU) +OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) /** * RISCVCPUClass: @@ -263,13 +407,90 @@ struct RISCVCPUClass { DeviceReset parent_reset; }; +struct RISCVCPUConfig { + bool ext_i; + bool ext_e; + bool ext_g; + bool ext_m; + bool ext_a; + bool ext_f; + bool ext_d; + bool ext_c; + bool ext_s; + bool ext_u; + bool ext_h; + bool ext_j; + bool ext_v; + bool ext_zba; + bool ext_zbb; + bool ext_zbc; + bool ext_zbkb; + bool ext_zbkc; + bool ext_zbkx; + bool ext_zbs; + bool ext_zk; + bool ext_zkn; + bool ext_zknd; + bool ext_zkne; + bool ext_zknh; + bool ext_zkr; + bool ext_zks; + bool ext_zksed; + bool ext_zksh; + bool ext_zkt; + bool ext_ifencei; + bool ext_icsr; + bool ext_zihintpause; + bool ext_sstc; + bool ext_svinval; + bool ext_svnapot; + bool ext_svpbmt; + bool ext_zdinx; + bool ext_zfh; + bool ext_zfhmin; + bool ext_zfinx; + bool ext_zhinx; + bool ext_zhinxmin; + bool ext_zve32f; + bool ext_zve64f; + bool ext_zmmul; + bool ext_smaia; + bool ext_ssaia; + bool ext_sscofpmf; + bool rvv_ta_all_1s; + bool rvv_ma_all_1s; + + uint32_t mvendorid; + uint64_t marchid; + uint64_t mimpid; + + /* Vendor-specific custom extensions */ + bool ext_XVentanaCondOps; + + uint8_t pmu_num; + char *priv_spec; + char *user_spec; + char *bext_spec; + char *vext_spec; + uint16_t vlen; + uint16_t elen; + bool mmu; + bool pmp; + bool epmp; + bool debug; + + bool short_isa_string; +}; + +typedef struct RISCVCPUConfig RISCVCPUConfig; + /** * RISCVCPU: * @env: #CPURISCVState * * A RISCV CPU. */ -struct RISCVCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -277,42 +498,21 @@ struct RISCVCPU { CPURISCVState env; char *dyn_csr_xml; + char *dyn_vreg_xml; /* Configuration Settings */ - struct { - bool ext_i; - bool ext_e; - bool ext_g; - bool ext_m; - bool ext_a; - bool ext_f; - bool ext_d; - bool ext_c; - bool ext_b; - bool ext_s; - bool ext_u; - bool ext_h; - bool ext_v; - bool ext_counters; - bool ext_ifencei; - bool ext_icsr; + RISCVCPUConfig cfg; - char *priv_spec; - char *user_spec; - char *bext_spec; - char *vext_spec; - uint16_t vlen; - uint16_t elen; - bool mmu; - bool pmp; - bool epmp; - uint64_t resetvec; - } cfg; + QEMUTimer *pmu_timer; + /* A bitmask of Available programmable counters */ + uint32_t pmu_avail_ctrs; + /* Mapping of events to counters */ + GHashTable *pmu_event_ctr_map; }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) { - return (env->misa & ext) != 0; + return (env->misa_ext & ext) != 0; } static inline bool riscv_feature(CPURISCVState *env, int feature) @@ -320,32 +520,43 @@ static inline bool riscv_feature(CPURISCVState *env, int feature) return env->features & (1ULL << feature); } +static inline void riscv_set_feature(CPURISCVState *env, int feature) +{ + env->features |= (1ULL << feature); +} + #include "cpu_user.h" -#include "cpu_bits.h" extern const char * const riscv_int_regnames[]; +extern const char * const riscv_int_regnamesh[]; extern const char * const riscv_fpr_regnames[]; const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); void riscv_cpu_do_interrupt(CPUState *cpu); int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); +int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero); +uint8_t riscv_cpu_default_priority(int irq); +uint64_t riscv_cpu_all_pending(CPURISCVState *env); +int riscv_cpu_mirq_pending(CPURISCVState *env); +int riscv_cpu_sirq_pending(CPURISCVState *env); +int riscv_cpu_vsirq_pending(CPURISCVState *env); bool riscv_cpu_fp_enabled(CPURISCVState *env); +target_ulong riscv_cpu_get_geilen(CPURISCVState *env); +void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); +bool riscv_cpu_vector_enabled(CPURISCVState *env); bool riscv_cpu_virt_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); -bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env); -void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable); bool riscv_cpu_two_stage_lookup(int mmu_idx); int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, int mmu_idx, - uintptr_t retaddr); +G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -357,104 +568,147 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, char *riscv_isa_string(RISCVCPU *cpu); void riscv_cpu_list(void); -#define cpu_signal_handler riscv_cpu_signal_handler #define cpu_list riscv_cpu_list #define cpu_mmu_index riscv_cpu_mmu_index #ifndef CONFIG_USER_ONLY +bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); -int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts); -uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value); +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); +uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value); #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ -void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), - uint32_t arg); +void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), + void *arg); +void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, + int (*rmw_fn)(void *arg, + target_ulong reg, + target_ulong *val, + target_ulong new_val, + target_ulong write_mask), + void *rmw_fn_arg); #endif void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); void riscv_translate_init(void); -int riscv_cpu_signal_handler(int host_signum, void *pinfo, void *puc); -void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, - uint32_t exception, uintptr_t pc); +G_NORETURN void riscv_raise_exception(CPURISCVState *env, + uint32_t exception, uintptr_t pc); target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); -#define TB_FLAGS_MMU_MASK 7 #define TB_FLAGS_PRIV_MMU_MASK 3 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2) #define TB_FLAGS_MSTATUS_FS MSTATUS_FS +#define TB_FLAGS_MSTATUS_VS MSTATUS_VS -typedef CPURISCVState CPUArchState; -typedef RISCVCPU ArchCPU; #include "exec/cpu-all.h" -FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1) -FIELD(TB_FLAGS, LMUL, 3, 2) -FIELD(TB_FLAGS, SEW, 5, 3) -FIELD(TB_FLAGS, VILL, 8, 1) +FIELD(TB_FLAGS, MEM_IDX, 0, 3) +FIELD(TB_FLAGS, LMUL, 3, 3) +FIELD(TB_FLAGS, SEW, 6, 3) +/* Skip MSTATUS_VS (0x600) bits */ +FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1) +FIELD(TB_FLAGS, VILL, 12, 1) +/* Skip MSTATUS_FS (0x6000) bits */ /* Is a Hypervisor instruction load/store allowed? */ -FIELD(TB_FLAGS, HLSX, 9, 1) +FIELD(TB_FLAGS, HLSX, 15, 1) +FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2) +FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2) +/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ +FIELD(TB_FLAGS, XL, 20, 2) +/* If PointerMasking should be applied */ +FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1) +FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1) +FIELD(TB_FLAGS, VTA, 24, 1) +FIELD(TB_FLAGS, VMA, 25, 1) -bool riscv_cpu_is_32bit(CPURISCVState *env); - -/* - * A simplification for VLMAX - * = (1 << LMUL) * VLEN / (8 * (1 << SEW)) - * = (VLEN << LMUL) / (8 << SEW) - * = (VLEN << LMUL) >> (SEW + 3) - * = VLEN >> (SEW + 3 - LMUL) - */ -static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) -{ - uint8_t sew, lmul; - - sew = FIELD_EX64(vtype, VTYPE, VSEW); - lmul = FIELD_EX64(vtype, VTYPE, VLMUL); - return cpu->cfg.vlen >> (sew + 3 - lmul); -} - -static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *pflags) -{ - uint32_t flags = 0; - - *pc = env->pc; - *cs_base = 0; - - if (riscv_has_ext(env, RVV)) { - uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); - bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl); - flags = FIELD_DP32(flags, TB_FLAGS, VILL, - FIELD_EX64(env->vtype, VTYPE, VILL)); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, - FIELD_EX64(env->vtype, VTYPE, VSEW)); - flags = FIELD_DP32(flags, TB_FLAGS, LMUL, - FIELD_EX64(env->vtype, VTYPE, VLMUL)); - flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); - } else { - flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); - } - -#ifdef CONFIG_USER_ONLY - flags |= TB_FLAGS_MSTATUS_FS; +#ifdef TARGET_RISCV32 +#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) #else - flags |= cpu_mmu_index(env, 0); - if (riscv_cpu_fp_enabled(env)) { - flags |= env->mstatus & MSTATUS_FS; - } +static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) +{ + return env->misa_mxl; +} +#endif +#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) - if (riscv_has_ext(env, RVH)) { - if (env->priv == PRV_M || - (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || - (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && - get_field(env->hstatus, HSTATUS_HU))) { - flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); +#if defined(TARGET_RISCV32) +#define cpu_recompute_xl(env) ((void)(env), MXL_RV32) +#else +static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) +{ + RISCVMXL xl = env->misa_mxl; +#if !defined(CONFIG_USER_ONLY) + /* + * When emulating a 32-bit-only cpu, use RV32. + * When emulating a 64-bit cpu, and MXL has been reduced to RV32, + * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened + * back to RV64 for lower privs. + */ + if (xl != MXL_RV32) { + switch (env->priv) { + case PRV_M: + break; + case PRV_U: + xl = get_field(env->mstatus, MSTATUS64_UXL); + break; + default: /* PRV_S | PRV_H */ + xl = get_field(env->mstatus, MSTATUS64_SXL); + break; } } #endif - - *pflags = flags; + return xl; } +#endif + +static inline int riscv_cpu_xlen(CPURISCVState *env) +{ + return 16 << env->xl; +} + +#ifdef TARGET_RISCV32 +#define riscv_cpu_sxl(env) ((void)(env), MXL_RV32) +#else +static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) +{ +#ifdef CONFIG_USER_ONLY + return env->misa_mxl; +#else + return get_field(env->mstatus, MSTATUS64_SXL); +#endif +} +#endif + +/* + * Encode LMUL to lmul as follows: + * LMUL vlmul lmul + * 1 000 0 + * 2 001 1 + * 4 010 2 + * 8 011 3 + * - 100 - + * 1/8 101 -3 + * 1/4 110 -2 + * 1/2 111 -1 + * + * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul) + * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8 + * => VLMAX = vlen >> (1 + 3 - (-3)) + * = 256 >> 7 + * = 2 + */ +static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) +{ + uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW); + int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3); + return cpu->cfg.vlen >> (sew + 3 - lmul); +} + +void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags); + +void riscv_cpu_update_mask(CPURISCVState *env); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, @@ -488,12 +742,25 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong new_value, target_ulong write_mask); +RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, + Int128 *ret_value, + Int128 new_value, Int128 write_mask); + +typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, + Int128 *ret_value); +typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno, + Int128 new_value); + typedef struct { const char *name; riscv_csr_predicate_fn predicate; riscv_csr_read_fn read; riscv_csr_write_fn write; riscv_csr_op_fn op; + riscv_csr_read128_fn read128; + riscv_csr_write128_fn write128; + /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */ + uint32_t min_priv_ver; } riscv_csr_operations; /* CSR function table constants */ @@ -501,6 +768,19 @@ enum { CSR_TABLE_SIZE = 0x1000 }; +/** + * The event id are encoded based on the encoding specified in the + * SBI specification v0.3 + */ + +enum riscv_pmu_event_idx { + RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01, + RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02, + RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019, + RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B, + RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021, +}; + /* CSR function table */ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 7330ff5a19..d8f5f0abed 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -60,8 +60,16 @@ #define CSR_VSTART 0x008 #define CSR_VXSAT 0x009 #define CSR_VXRM 0x00a +#define CSR_VCSR 0x00f #define CSR_VL 0xc20 #define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 + +/* VCSR fields */ +#define VCSR_VXSAT_SHIFT 0 +#define VCSR_VXSAT (0x1 << VCSR_VXSAT_SHIFT) +#define VCSR_VXRM_SHIFT 1 +#define VCSR_VXRM (0x3 << VCSR_VXRM_SHIFT) /* User Timers and Counters */ #define CSR_CYCLE 0xc00 @@ -140,6 +148,7 @@ #define CSR_MARCHID 0xf12 #define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 +#define CSR_MCONFIGPTR 0xf15 /* Machine Trap Setup */ #define CSR_MSTATUS 0x300 @@ -160,14 +169,34 @@ #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 +/* Machine-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_MISELECT 0x350 +#define CSR_MIREG 0x351 + +/* Machine-Level Interrupts (AIA) */ +#define CSR_MTOPEI 0x35c +#define CSR_MTOPI 0xfb0 + +/* Virtual Interrupts for Supervisor Level (AIA) */ +#define CSR_MVIEN 0x308 +#define CSR_MVIP 0x309 + +/* Machine-Level High-Half CSRs (AIA) */ +#define CSR_MIDELEGH 0x313 +#define CSR_MIEH 0x314 +#define CSR_MVIENH 0x318 +#define CSR_MVIPH 0x319 +#define CSR_MIPH 0x354 + /* Supervisor Trap Setup */ #define CSR_SSTATUS 0x100 -#define CSR_SEDELEG 0x102 -#define CSR_SIDELEG 0x103 #define CSR_SIE 0x104 #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 +/* Supervisor Configuration CSRs */ +#define CSR_SENVCFG 0x10A + /* Supervisor Trap Handling */ #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 @@ -175,10 +204,26 @@ #define CSR_STVAL 0x143 #define CSR_SIP 0x144 +/* Sstc supervisor CSRs */ +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D + /* Supervisor Protection and Translation */ #define CSR_SPTBR 0x180 #define CSR_SATP 0x180 +/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_SISELECT 0x150 +#define CSR_SIREG 0x151 + +/* Supervisor-Level Interrupts (AIA) */ +#define CSR_STOPEI 0x15c +#define CSR_STOPI 0xdb0 + +/* Supervisor-Level High-Half CSRs (AIA) */ +#define CSR_SIEH 0x114 +#define CSR_SIPH 0x154 + /* Hpervisor CSRs */ #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 @@ -195,6 +240,10 @@ #define CSR_HTIMEDELTA 0x605 #define CSR_HTIMEDELTAH 0x615 +/* Hypervisor Configuration CSRs */ +#define CSR_HENVCFG 0x60A +#define CSR_HENVCFGH 0x61A + /* Virtual CSRs */ #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 @@ -206,12 +255,43 @@ #define CSR_VSIP 0x244 #define CSR_VSATP 0x280 +/* Sstc virtual CSRs */ +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D + #define CSR_MTINST 0x34a #define CSR_MTVAL2 0x34b +/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ +#define CSR_HVIEN 0x608 +#define CSR_HVICTL 0x609 +#define CSR_HVIPRIO1 0x646 +#define CSR_HVIPRIO2 0x647 + +/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */ +#define CSR_VSISELECT 0x250 +#define CSR_VSIREG 0x251 + +/* VS-Level Interrupts (H-extension with AIA) */ +#define CSR_VSTOPEI 0x25c +#define CSR_VSTOPI 0xeb0 + +/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ +#define CSR_HIDELEGH 0x613 +#define CSR_HVIENH 0x618 +#define CSR_HVIPH 0x655 +#define CSR_HVIPRIO1H 0x656 +#define CSR_HVIPRIO2H 0x657 +#define CSR_VSIEH 0x214 +#define CSR_VSIPH 0x254 + +/* Machine Configuration CSRs */ +#define CSR_MENVCFG 0x30A +#define CSR_MENVCFGH 0x31A + /* Enhanced Physical Memory Protection (ePMP) */ -#define CSR_MSECCFG 0x390 -#define CSR_MSECCFGH 0x391 +#define CSR_MSECCFG 0x747 +#define CSR_MSECCFGH 0x757 /* Physical Memory Protection */ #define CSR_PMPCFG0 0x3a0 #define CSR_PMPCFG1 0x3a1 @@ -239,6 +319,7 @@ #define CSR_TDATA1 0x7a1 #define CSR_TDATA2 0x7a2 #define CSR_TDATA3 0x7a3 +#define CSR_TINFO 0x7a4 /* Debug Mode Registers */ #define CSR_DCSR 0x7b0 @@ -275,6 +356,10 @@ #define CSR_MHPMCOUNTER29 0xb1d #define CSR_MHPMCOUNTER30 0xb1e #define CSR_MHPMCOUNTER31 0xb1f + +/* Machine counter-inhibit register */ +#define CSR_MCOUNTINHIBIT 0x320 + #define CSR_MHPMEVENT3 0x323 #define CSR_MHPMEVENT4 0x324 #define CSR_MHPMEVENT5 0x325 @@ -304,6 +389,37 @@ #define CSR_MHPMEVENT29 0x33d #define CSR_MHPMEVENT30 0x33e #define CSR_MHPMEVENT31 0x33f + +#define CSR_MHPMEVENT3H 0x723 +#define CSR_MHPMEVENT4H 0x724 +#define CSR_MHPMEVENT5H 0x725 +#define CSR_MHPMEVENT6H 0x726 +#define CSR_MHPMEVENT7H 0x727 +#define CSR_MHPMEVENT8H 0x728 +#define CSR_MHPMEVENT9H 0x729 +#define CSR_MHPMEVENT10H 0x72a +#define CSR_MHPMEVENT11H 0x72b +#define CSR_MHPMEVENT12H 0x72c +#define CSR_MHPMEVENT13H 0x72d +#define CSR_MHPMEVENT14H 0x72e +#define CSR_MHPMEVENT15H 0x72f +#define CSR_MHPMEVENT16H 0x730 +#define CSR_MHPMEVENT17H 0x731 +#define CSR_MHPMEVENT18H 0x732 +#define CSR_MHPMEVENT19H 0x733 +#define CSR_MHPMEVENT20H 0x734 +#define CSR_MHPMEVENT21H 0x735 +#define CSR_MHPMEVENT22H 0x736 +#define CSR_MHPMEVENT23H 0x737 +#define CSR_MHPMEVENT24H 0x738 +#define CSR_MHPMEVENT25H 0x739 +#define CSR_MHPMEVENT26H 0x73a +#define CSR_MHPMEVENT27H 0x73b +#define CSR_MHPMEVENT28H 0x73c +#define CSR_MHPMEVENT29H 0x73d +#define CSR_MHPMEVENT30H 0x73e +#define CSR_MHPMEVENT31H 0x73f + #define CSR_MHPMCOUNTER3H 0xb83 #define CSR_MHPMCOUNTER4H 0xb84 #define CSR_MHPMCOUNTER5H 0xb85 @@ -334,6 +450,42 @@ #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f +/* + * User PointerMasking registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_UMTE 0x4c0 +#define CSR_UPMMASK 0x4c1 +#define CSR_UPMBASE 0x4c2 + +/* + * Machine PointerMasking registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_MMTE 0x3c0 +#define CSR_MPMMASK 0x3c1 +#define CSR_MPMBASE 0x3c2 + +/* + * Supervisor PointerMaster registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_SMTE 0x1c0 +#define CSR_SPMMASK 0x1c1 +#define CSR_SPMBASE 0x1c2 + +/* + * Hypervisor PointerMaster registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_VSMTE 0x2c0 +#define CSR_VSPMMASK 0x2c1 +#define CSR_VSPMBASE 0x2c2 +#define CSR_SCOUNTOVF 0xda0 + +/* Crypto Extension */ +#define CSR_SEED 0x015 + /* mstatus CSR bits */ #define MSTATUS_UIE 0x00000001 #define MSTATUS_SIE 0x00000002 @@ -343,6 +495,7 @@ #define MSTATUS_UBE 0x00000040 #define MSTATUS_MPIE 0x00000080 #define MSTATUS_SPP 0x00000100 +#define MSTATUS_VS 0x00000600 #define MSTATUS_MPP 0x00001800 #define MSTATUS_FS 0x00006000 #define MSTATUS_XS 0x00018000 @@ -360,13 +513,16 @@ #define MSTATUS32_SD 0x80000000 #define MSTATUS64_SD 0x8000000000000000ULL +#define MSTATUSH128_SD 0x8000000000000000ULL #define MISA32_MXL 0xC0000000 #define MISA64_MXL 0xC000000000000000ULL -#define MXL_RV32 1 -#define MXL_RV64 2 -#define MXL_RV128 3 +typedef enum { + MXL_RV32 = 1, + MXL_RV64 = 2, + MXL_RV128 = 3, +} RISCVMXL; /* sstatus CSR bits */ #define SSTATUS_UIE 0x00000001 @@ -374,11 +530,14 @@ #define SSTATUS_UPIE 0x00000010 #define SSTATUS_SPIE 0x00000020 #define SSTATUS_SPP 0x00000100 +#define SSTATUS_VS 0x00000600 #define SSTATUS_FS 0x00006000 #define SSTATUS_XS 0x00018000 #define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define SSTATUS_MXR 0x00080000 +#define SSTATUS64_UXL 0x0000000300000000ULL + #define SSTATUS32_SD 0x80000000 #define SSTATUS64_SD 0x8000000000000000ULL @@ -397,10 +556,13 @@ #define HSTATUS32_WPRI 0xFF8FF87E #define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL -#define HCOUNTEREN_CY (1 << 0) -#define HCOUNTEREN_TM (1 << 1) -#define HCOUNTEREN_IR (1 << 2) -#define HCOUNTEREN_HPM3 (1 << 3) +#define COUNTEREN_CY (1 << 0) +#define COUNTEREN_TM (1 << 1) +#define COUNTEREN_IR (1 << 2) +#define COUNTEREN_HPM3 (1 << 3) + +/* vsstatus CSR bits */ +#define VSSTATUS64_UXL 0x0000000300000000ULL /* Privilege modes */ #define PRV_U 0 @@ -410,12 +572,6 @@ /* Virtulisation Register Fields */ #define VIRT_ONOFF 1 -/* This is used to save state for when we take an exception. If this is set - * that means that we want to force a HS level exception (no matter what the - * delegation is set to). This will occur for things such as a second level - * page table fault. - */ -#define FORCE_HS_EXCEP 2 /* RV32 satp CSR field masks */ #define SATP32_MODE 0x80000000 @@ -427,14 +583,6 @@ #define SATP64_ASID 0x0FFFF00000000000ULL #define SATP64_PPN 0x00000FFFFFFFFFFFULL -/* VM modes (mstatus.vm) privileged ISA 1.9.1 */ -#define VM_1_09_MBARE 0 -#define VM_1_09_MBB 1 -#define VM_1_09_MBBID 2 -#define VM_1_09_SV32 8 -#define VM_1_09_SV39 9 -#define VM_1_09_SV48 10 - /* VM modes (satp.mode) privileged ISA 1.10 */ #define VM_1_10_MBARE 0 #define VM_1_10_SV32 1 @@ -453,10 +601,16 @@ #define PTE_A 0x040 /* Accessed */ #define PTE_D 0x080 /* Dirty */ #define PTE_SOFT 0x300 /* Reserved for Software */ +#define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */ +#define PTE_N 0x8000000000000000ULL /* NAPOT translation */ +#define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ /* Page table PPN shift amount */ #define PTE_PPN_SHIFT 10 +/* Page table PPN mask */ +#define PTE_PPN_MASK 0x3FFFFFFFFFFC00ULL + /* Leaf page shift amount */ #define PGSHIFT 12 @@ -504,6 +658,10 @@ typedef enum RISCVException { #define IRQ_S_EXT 9 #define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 +#define IRQ_PMU_OVF 13 +#define IRQ_LOCAL_MAX 16 +#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1) /* mip masks */ #define MIP_USIP (1 << IRQ_U_SOFT) @@ -518,11 +676,14 @@ typedef enum RISCVException { #define MIP_SEIP (1 << IRQ_S_EXT) #define MIP_VSEIP (1 << IRQ_VS_EXT) #define MIP_MEIP (1 << IRQ_M_EXT) +#define MIP_SGEIP (1 << IRQ_S_GEXT) +#define MIP_LCOFIP (1 << IRQ_PMU_OVF) /* sip masks */ #define SIP_SSIP MIP_SSIP #define SIP_STIP MIP_STIP #define SIP_SEIP MIP_SEIP +#define SIP_LCOFIP MIP_LCOFIP /* MIE masks */ #define MIE_SEIE (1 << IRQ_S_EXT) @@ -531,4 +692,169 @@ typedef enum RISCVException { #define MIE_UTIE (1 << IRQ_U_TIMER) #define MIE_SSIE (1 << IRQ_S_SOFT) #define MIE_USIE (1 << IRQ_U_SOFT) + +/* General PointerMasking CSR bits*/ +#define PM_ENABLE 0x00000001ULL +#define PM_CURRENT 0x00000002ULL +#define PM_INSN 0x00000004ULL +#define PM_XS_MASK 0x00000003ULL + +/* PointerMasking XS bits values */ +#define PM_EXT_DISABLE 0x00000000ULL +#define PM_EXT_INITIAL 0x00000001ULL +#define PM_EXT_CLEAN 0x00000002ULL +#define PM_EXT_DIRTY 0x00000003ULL + +/* Execution enviornment configuration bits */ +#define MENVCFG_FIOM BIT(0) +#define MENVCFG_CBIE (3UL << 4) +#define MENVCFG_CBCFE BIT(6) +#define MENVCFG_CBZE BIT(7) +#define MENVCFG_PBMTE (1ULL << 62) +#define MENVCFG_STCE (1ULL << 63) + +/* For RV32 */ +#define MENVCFGH_PBMTE BIT(30) +#define MENVCFGH_STCE BIT(31) + +#define SENVCFG_FIOM MENVCFG_FIOM +#define SENVCFG_CBIE MENVCFG_CBIE +#define SENVCFG_CBCFE MENVCFG_CBCFE +#define SENVCFG_CBZE MENVCFG_CBZE + +#define HENVCFG_FIOM MENVCFG_FIOM +#define HENVCFG_CBIE MENVCFG_CBIE +#define HENVCFG_CBCFE MENVCFG_CBCFE +#define HENVCFG_CBZE MENVCFG_CBZE +#define HENVCFG_PBMTE MENVCFG_PBMTE +#define HENVCFG_STCE MENVCFG_STCE + +/* For RV32 */ +#define HENVCFGH_PBMTE MENVCFGH_PBMTE +#define HENVCFGH_STCE MENVCFGH_STCE + +/* Offsets for every pair of control bits per each priv level */ +#define XS_OFFSET 0ULL +#define U_OFFSET 2ULL +#define S_OFFSET 5ULL +#define M_OFFSET 8ULL + +#define PM_XS_BITS (PM_XS_MASK << XS_OFFSET) +#define U_PM_ENABLE (PM_ENABLE << U_OFFSET) +#define U_PM_CURRENT (PM_CURRENT << U_OFFSET) +#define U_PM_INSN (PM_INSN << U_OFFSET) +#define S_PM_ENABLE (PM_ENABLE << S_OFFSET) +#define S_PM_CURRENT (PM_CURRENT << S_OFFSET) +#define S_PM_INSN (PM_INSN << S_OFFSET) +#define M_PM_ENABLE (PM_ENABLE << M_OFFSET) +#define M_PM_CURRENT (PM_CURRENT << M_OFFSET) +#define M_PM_INSN (PM_INSN << M_OFFSET) + +/* mmte CSR bits */ +#define MMTE_PM_XS_BITS PM_XS_BITS +#define MMTE_U_PM_ENABLE U_PM_ENABLE +#define MMTE_U_PM_CURRENT U_PM_CURRENT +#define MMTE_U_PM_INSN U_PM_INSN +#define MMTE_S_PM_ENABLE S_PM_ENABLE +#define MMTE_S_PM_CURRENT S_PM_CURRENT +#define MMTE_S_PM_INSN S_PM_INSN +#define MMTE_M_PM_ENABLE M_PM_ENABLE +#define MMTE_M_PM_CURRENT M_PM_CURRENT +#define MMTE_M_PM_INSN M_PM_INSN +#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \ + MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \ + MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \ + MMTE_PM_XS_BITS) + +/* (v)smte CSR bits */ +#define SMTE_PM_XS_BITS PM_XS_BITS +#define SMTE_U_PM_ENABLE U_PM_ENABLE +#define SMTE_U_PM_CURRENT U_PM_CURRENT +#define SMTE_U_PM_INSN U_PM_INSN +#define SMTE_S_PM_ENABLE S_PM_ENABLE +#define SMTE_S_PM_CURRENT S_PM_CURRENT +#define SMTE_S_PM_INSN S_PM_INSN +#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \ + SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \ + SMTE_PM_XS_BITS) + +/* umte CSR bits */ +#define UMTE_U_PM_ENABLE U_PM_ENABLE +#define UMTE_U_PM_CURRENT U_PM_CURRENT +#define UMTE_U_PM_INSN U_PM_INSN +#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN) + +/* MISELECT, SISELECT, and VSISELECT bits (AIA) */ +#define ISELECT_IPRIO0 0x30 +#define ISELECT_IPRIO15 0x3f +#define ISELECT_IMSIC_EIDELIVERY 0x70 +#define ISELECT_IMSIC_EITHRESHOLD 0x72 +#define ISELECT_IMSIC_EIP0 0x80 +#define ISELECT_IMSIC_EIP63 0xbf +#define ISELECT_IMSIC_EIE0 0xc0 +#define ISELECT_IMSIC_EIE63 0xff +#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY +#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63 +#define ISELECT_MASK 0x1ff + +/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */ +#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1) + +/* IMSIC bits (AIA) */ +#define IMSIC_TOPEI_IID_SHIFT 16 +#define IMSIC_TOPEI_IID_MASK 0x7ff +#define IMSIC_TOPEI_IPRIO_MASK 0x7ff +#define IMSIC_EIPx_BITS 32 +#define IMSIC_EIEx_BITS 32 + +/* MTOPI and STOPI bits (AIA) */ +#define TOPI_IID_SHIFT 16 +#define TOPI_IID_MASK 0xfff +#define TOPI_IPRIO_MASK 0xff + +/* Interrupt priority bits (AIA) */ +#define IPRIO_IRQ_BITS 8 +#define IPRIO_MMAXIPRIO 255 +#define IPRIO_DEFAULT_UPPER 4 +#define IPRIO_DEFAULT_MIDDLE (IPRIO_DEFAULT_UPPER + 12) +#define IPRIO_DEFAULT_M IPRIO_DEFAULT_MIDDLE +#define IPRIO_DEFAULT_S (IPRIO_DEFAULT_M + 3) +#define IPRIO_DEFAULT_SGEXT (IPRIO_DEFAULT_S + 3) +#define IPRIO_DEFAULT_VS (IPRIO_DEFAULT_SGEXT + 1) +#define IPRIO_DEFAULT_LOWER (IPRIO_DEFAULT_VS + 3) + +/* HVICTL bits (AIA) */ +#define HVICTL_VTI 0x40000000 +#define HVICTL_IID 0x0fff0000 +#define HVICTL_IPRIOM 0x00000100 +#define HVICTL_IPRIO 0x000000ff +#define HVICTL_VALID_MASK \ + (HVICTL_VTI | HVICTL_IID | HVICTL_IPRIOM | HVICTL_IPRIO) + +/* seed CSR bits */ +#define SEED_OPST (0b11 << 30) +#define SEED_OPST_BIST (0b00 << 30) +#define SEED_OPST_WAIT (0b01 << 30) +#define SEED_OPST_ES16 (0b10 << 30) +#define SEED_OPST_DEAD (0b11 << 30) +/* PMU related bits */ +#define MIE_LCOFIE (1 << IRQ_PMU_OVF) + +#define MHPMEVENT_BIT_OF BIT_ULL(63) +#define MHPMEVENTH_BIT_OF BIT(31) +#define MHPMEVENT_BIT_MINH BIT_ULL(62) +#define MHPMEVENTH_BIT_MINH BIT(30) +#define MHPMEVENT_BIT_SINH BIT_ULL(61) +#define MHPMEVENTH_BIT_SINH BIT(29) +#define MHPMEVENT_BIT_UINH BIT_ULL(60) +#define MHPMEVENTH_BIT_UINH BIT(28) +#define MHPMEVENT_BIT_VSINH BIT_ULL(59) +#define MHPMEVENTH_BIT_VSINH BIT(27) +#define MHPMEVENT_BIT_VUINH BIT_ULL(58) +#define MHPMEVENTH_BIT_VUINH BIT(26) + +#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) +#define MHPMEVENT_IDX_MASK 0xFFFFF +#define MHPMEVENT_SSCOF_RESVD 16 + #endif diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 968cb8046f..278d163803 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -21,10 +21,13 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" +#include "pmu.h" #include "exec/exec-all.h" +#include "instmap.h" #include "tcg/tcg-op.h" #include "trace.h" #include "semihosting/common-semi.h" +#include "cpu_bits.h" int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) { @@ -35,51 +38,398 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) #endif } -#ifndef CONFIG_USER_ONLY -static int riscv_cpu_local_irq_pending(CPURISCVState *env) +void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags) { - target_ulong irqs; + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); - target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); - target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); - target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); + uint32_t flags = 0; - target_ulong pending = env->mip & env->mie & - ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); - target_ulong vspending = (env->mip & env->mie & - (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)); + *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; + *cs_base = 0; - target_ulong mie = env->priv < PRV_M || - (env->priv == PRV_M && mstatus_mie); - target_ulong sie = env->priv < PRV_S || - (env->priv == PRV_S && mstatus_sie); - target_ulong hs_sie = env->priv < PRV_S || - (env->priv == PRV_S && hs_mstatus_sie); + if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + /* + * If env->vl equals to VLMAX, we can use generic vector operation + * expanders (GVEC) to accerlate the vector operations. + * However, as LMUL could be a fractional number. The maximum + * vector size can be operated might be less than 8 bytes, + * which is not supported by GVEC. So we set vl_eq_vlmax flag to true + * only when maxsz >= 8 bytes. + */ + uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); + uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t maxsz = vlmax << sew; + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && + (maxsz >= 8); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + flags = FIELD_DP32(flags, TB_FLAGS, VTA, + FIELD_EX64(env->vtype, VTYPE, VTA)); + flags = FIELD_DP32(flags, TB_FLAGS, VMA, + FIELD_EX64(env->vtype, VTYPE, VMA)); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } - if (riscv_cpu_virt_enabled(env)) { - target_ulong pending_hs_irq = pending & -hs_sie; +#ifdef CONFIG_USER_ONLY + flags |= TB_FLAGS_MSTATUS_FS; + flags |= TB_FLAGS_MSTATUS_VS; +#else + flags |= cpu_mmu_index(env, 0); + if (riscv_cpu_fp_enabled(env)) { + flags |= env->mstatus & MSTATUS_FS; + } - if (pending_hs_irq) { - riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); - return ctz64(pending_hs_irq); + if (riscv_cpu_vector_enabled(env)) { + flags |= env->mstatus & MSTATUS_VS; + } + + if (riscv_has_ext(env, RVH)) { + if (env->priv == PRV_M || + (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || + (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && + get_field(env->hstatus, HSTATUS_HU))) { + flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); } - pending = vspending; + flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, + get_field(env->mstatus_hs, MSTATUS_FS)); + + flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, + get_field(env->mstatus_hs, MSTATUS_VS)); + } +#endif + + flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); + if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { + flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); + } + if (env->cur_pmbase != 0) { + flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); } - irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); + *pflags = flags; +} - if (irqs) { - return ctz64(irqs); /* since non-zero */ +void riscv_cpu_update_mask(CPURISCVState *env) +{ + target_ulong mask = -1, base = 0; + /* + * TODO: Current RVJ spec does not specify + * how the extension interacts with XLEN. + */ +#ifndef CONFIG_USER_ONLY + if (riscv_has_ext(env, RVJ)) { + switch (env->priv) { + case PRV_M: + if (env->mmte & M_PM_ENABLE) { + mask = env->mpmmask; + base = env->mpmbase; + } + break; + case PRV_S: + if (env->mmte & S_PM_ENABLE) { + mask = env->spmmask; + base = env->spmbase; + } + break; + case PRV_U: + if (env->mmte & U_PM_ENABLE) { + mask = env->upmmask; + base = env->upmbase; + } + break; + default: + g_assert_not_reached(); + } + } +#endif + if (env->xl == MXL_RV32) { + env->cur_pmmask = mask & UINT32_MAX; + env->cur_pmbase = base & UINT32_MAX; } else { - return RISCV_EXCP_NONE; /* indicates no pending interrupt */ + env->cur_pmmask = mask; + env->cur_pmbase = base; } } -#endif + +#ifndef CONFIG_USER_ONLY + +/* + * The HS-mode is allowed to configure priority only for the + * following VS-mode local interrupts: + * + * 0 (Reserved interrupt, reads as zero) + * 1 Supervisor software interrupt + * 4 (Reserved interrupt, reads as zero) + * 5 Supervisor timer interrupt + * 8 (Reserved interrupt, reads as zero) + * 13 (Reserved interrupt) + * 14 " + * 15 " + * 16 " + * 17 " + * 18 " + * 19 " + * 20 " + * 21 " + * 22 " + * 23 " + */ + +static const int hviprio_index2irq[] = { + 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; +static const int hviprio_index2rdzero[] = { + 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) +{ + if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { + return -EINVAL; + } + + if (out_irq) { + *out_irq = hviprio_index2irq[index]; + } + + if (out_rdzero) { + *out_rdzero = hviprio_index2rdzero[index]; + } + + return 0; +} + +/* + * Default priorities of local interrupts are defined in the + * RISC-V Advanced Interrupt Architecture specification. + * + * ---------------------------------------------------------------- + * Default | + * Priority | Major Interrupt Numbers + * ---------------------------------------------------------------- + * Highest | 47, 23, 46, 45, 22, 44, + * | 43, 21, 42, 41, 20, 40 + * | + * | 11 (0b), 3 (03), 7 (07) + * | 9 (09), 1 (01), 5 (05) + * | 12 (0c) + * | 10 (0a), 2 (02), 6 (06) + * | + * | 39, 19, 38, 37, 18, 36, + * Lowest | 35, 17, 34, 33, 16, 32 + * ---------------------------------------------------------------- + */ +static const uint8_t default_iprio[64] = { + /* Custom interrupts 48 to 63 */ + [63] = IPRIO_MMAXIPRIO, + [62] = IPRIO_MMAXIPRIO, + [61] = IPRIO_MMAXIPRIO, + [60] = IPRIO_MMAXIPRIO, + [59] = IPRIO_MMAXIPRIO, + [58] = IPRIO_MMAXIPRIO, + [57] = IPRIO_MMAXIPRIO, + [56] = IPRIO_MMAXIPRIO, + [55] = IPRIO_MMAXIPRIO, + [54] = IPRIO_MMAXIPRIO, + [53] = IPRIO_MMAXIPRIO, + [52] = IPRIO_MMAXIPRIO, + [51] = IPRIO_MMAXIPRIO, + [50] = IPRIO_MMAXIPRIO, + [49] = IPRIO_MMAXIPRIO, + [48] = IPRIO_MMAXIPRIO, + + /* Custom interrupts 24 to 31 */ + [31] = IPRIO_MMAXIPRIO, + [30] = IPRIO_MMAXIPRIO, + [29] = IPRIO_MMAXIPRIO, + [28] = IPRIO_MMAXIPRIO, + [27] = IPRIO_MMAXIPRIO, + [26] = IPRIO_MMAXIPRIO, + [25] = IPRIO_MMAXIPRIO, + [24] = IPRIO_MMAXIPRIO, + + [47] = IPRIO_DEFAULT_UPPER, + [23] = IPRIO_DEFAULT_UPPER + 1, + [46] = IPRIO_DEFAULT_UPPER + 2, + [45] = IPRIO_DEFAULT_UPPER + 3, + [22] = IPRIO_DEFAULT_UPPER + 4, + [44] = IPRIO_DEFAULT_UPPER + 5, + + [43] = IPRIO_DEFAULT_UPPER + 6, + [21] = IPRIO_DEFAULT_UPPER + 7, + [42] = IPRIO_DEFAULT_UPPER + 8, + [41] = IPRIO_DEFAULT_UPPER + 9, + [20] = IPRIO_DEFAULT_UPPER + 10, + [40] = IPRIO_DEFAULT_UPPER + 11, + + [11] = IPRIO_DEFAULT_M, + [3] = IPRIO_DEFAULT_M + 1, + [7] = IPRIO_DEFAULT_M + 2, + + [9] = IPRIO_DEFAULT_S, + [1] = IPRIO_DEFAULT_S + 1, + [5] = IPRIO_DEFAULT_S + 2, + + [12] = IPRIO_DEFAULT_SGEXT, + + [10] = IPRIO_DEFAULT_VS, + [2] = IPRIO_DEFAULT_VS + 1, + [6] = IPRIO_DEFAULT_VS + 2, + + [39] = IPRIO_DEFAULT_LOWER, + [19] = IPRIO_DEFAULT_LOWER + 1, + [38] = IPRIO_DEFAULT_LOWER + 2, + [37] = IPRIO_DEFAULT_LOWER + 3, + [18] = IPRIO_DEFAULT_LOWER + 4, + [36] = IPRIO_DEFAULT_LOWER + 5, + + [35] = IPRIO_DEFAULT_LOWER + 6, + [17] = IPRIO_DEFAULT_LOWER + 7, + [34] = IPRIO_DEFAULT_LOWER + 8, + [33] = IPRIO_DEFAULT_LOWER + 9, + [16] = IPRIO_DEFAULT_LOWER + 10, + [32] = IPRIO_DEFAULT_LOWER + 11, +}; + +uint8_t riscv_cpu_default_priority(int irq) +{ + if (irq < 0 || irq > 63) { + return IPRIO_MMAXIPRIO; + } + + return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; +}; + +static int riscv_cpu_pending_to_irq(CPURISCVState *env, + int extirq, unsigned int extirq_def_prio, + uint64_t pending, uint8_t *iprio) +{ + RISCVCPU *cpu = env_archcpu(env); + int irq, best_irq = RISCV_EXCP_NONE; + unsigned int prio, best_prio = UINT_MAX; + + if (!pending) { + return RISCV_EXCP_NONE; + } + + irq = ctz64(pending); + if (!((extirq == IRQ_M_EXT) ? cpu->cfg.ext_smaia : cpu->cfg.ext_ssaia)) { + return irq; + } + + pending = pending >> irq; + while (pending) { + prio = iprio[irq]; + if (!prio) { + if (irq == extirq) { + prio = extirq_def_prio; + } else { + prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? + 1 : IPRIO_MMAXIPRIO; + } + } + if ((pending & 0x1) && (prio <= best_prio)) { + best_irq = irq; + best_prio = prio; + } + irq++; + pending = pending >> 1; + } + + return best_irq; +} + +uint64_t riscv_cpu_all_pending(CPURISCVState *env) +{ + uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); + uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; + + return (env->mip | vsgein | vstip) & env->mie; +} + +int riscv_cpu_mirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & + ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, + irqs, env->miprio); +} + +int riscv_cpu_sirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & + ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs, env->siprio); +} + +int riscv_cpu_vsirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & + (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs >> 1, env->hviprio); +} + +static int riscv_cpu_local_irq_pending(CPURISCVState *env) +{ + int virq; + uint64_t irqs, pending, mie, hsie, vsie; + + /* Determine interrupt enable state of all privilege modes */ + if (riscv_cpu_virt_enabled(env)) { + mie = 1; + hsie = 1; + vsie = (env->priv < PRV_S) || + (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); + } else { + mie = (env->priv < PRV_M) || + (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); + hsie = (env->priv < PRV_S) || + (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); + vsie = 0; + } + + /* Determine all pending interrupts */ + pending = riscv_cpu_all_pending(env); + + /* Check M-mode interrupts */ + irqs = pending & ~env->mideleg & -mie; + if (irqs) { + return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, + irqs, env->miprio); + } + + /* Check HS-mode interrupts */ + irqs = pending & env->mideleg & ~env->hideleg & -hsie; + if (irqs) { + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs, env->siprio); + } + + /* Check VS-mode interrupts */ + irqs = pending & env->mideleg & env->hideleg & -vsie; + if (irqs) { + virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs >> 1, env->hviprio); + return (virq <= 0) ? virq : virq + 1; + } + + /* Indicate no pending interrupt */ + return RISCV_EXCP_NONE; +} bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { -#if !defined(CONFIG_USER_ONLY) if (interrupt_request & CPU_INTERRUPT_HARD) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; @@ -90,12 +440,9 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return true; } } -#endif return false; } -#if !defined(CONFIG_USER_ONLY) - /* Return true is floating point support is currently enabled */ bool riscv_cpu_fp_enabled(CPURISCVState *env) { @@ -109,11 +456,28 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env) return false; } +/* Return true is vector support is currently enabled */ +bool riscv_cpu_vector_enabled(CPURISCVState *env) +{ + if (env->mstatus & MSTATUS_VS) { + if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) { + return false; + } + return true; + } + + return false; +} + void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) { - uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | + uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | - MSTATUS64_UXL; + MSTATUS64_UXL | MSTATUS_VS; + + if (riscv_has_ext(env, RVF)) { + mstatus_mask |= MSTATUS_FS; + } bool current_virt = riscv_cpu_virt_enabled(env); g_assert(riscv_has_ext(env, RVH)); @@ -167,6 +531,28 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) } } +target_ulong riscv_cpu_get_geilen(CPURISCVState *env) +{ + if (!riscv_has_ext(env, RVH)) { + return 0; + } + + return env->geilen; +} + +void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) +{ + if (!riscv_has_ext(env, RVH)) { + return; + } + + if (geilen > (TARGET_LONG_BITS - 1)) { + return; + } + + env->geilen = geilen; +} + bool riscv_cpu_virt_enabled(CPURISCVState *env) { if (!riscv_has_ext(env, RVH)) { @@ -188,24 +574,19 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) } env->virt = set_field(env->virt, VIRT_ONOFF, enable); -} -bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) -{ - if (!riscv_has_ext(env, RVH)) { - return false; + if (enable) { + /* + * The guest external interrupts from an interrupt controller are + * delivered only when the Guest/VM is running (i.e. V=1). This means + * any guest external interrupt which is triggered while the Guest/VM + * is not running (i.e. V=0) will be missed on QEMU resulting in guest + * with sluggish response to serial console input and other I/O events. + * + * To solve this, we check and inject interrupt after setting V=1. + */ + riscv_cpu_update_mip(env_archcpu(env), 0, 0); } - - return get_field(env->virt, FORCE_HS_EXCEP); -} - -void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) -{ - if (!riscv_has_ext(env, RVH)) { - return; - } - - env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); } bool riscv_cpu_two_stage_lookup(int mmu_idx) @@ -213,7 +594,7 @@ bool riscv_cpu_two_stage_lookup(int mmu_idx) return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; } -int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) { CPURISCVState *env = &cpu->env; if (env->miclaim & interrupts) { @@ -224,13 +605,22 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) } } -uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) +uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) { CPURISCVState *env = &cpu->env; CPUState *cs = CPU(cpu); - uint32_t old = env->mip; + uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; bool locked = false; + if (riscv_cpu_virt_enabled(env)) { + gein = get_field(env->hstatus, HSTATUS_VGEIN); + vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + } + + /* No need to update mip for VSTIP */ + mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; + vstip = env->vstime_irq ? MIP_VSTIP : 0; + if (!qemu_mutex_iothread_locked()) { locked = true; qemu_mutex_lock_iothread(); @@ -238,7 +628,7 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) env->mip = (env->mip & ~mask) | (value & mask); - if (env->mip) { + if (env->mip | vsgein | vstip) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); @@ -251,13 +641,27 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) return old; } -void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), - uint32_t arg) +void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), + void *arg) { env->rdtime_fn = fn; env->rdtime_fn_arg = arg; } +void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, + int (*rmw_fn)(void *arg, + target_ulong reg, + target_ulong *val, + target_ulong new_val, + target_ulong write_mask), + void *rmw_fn_arg) +{ + if (priv <= PRV_M) { + env->aia_ireg_rmw_fn[priv] = rmw_fn; + env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; + } +} + void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) { if (newpriv > PRV_M) { @@ -268,6 +672,8 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; + env->xl = cpu_recompute_xl(env); + riscv_cpu_update_mask(env); /* * Clear the load reservation - otherwise a reservation placed in one @@ -358,6 +764,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; bool use_background = false; + hwaddr ppn; + RISCVCPU *cpu = env_archcpu(env); + int napot_bits = 0; + target_ulong napot_mask; /* * Check if we should use the background registers for the two @@ -405,7 +815,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, if (first_stage == true) { if (use_background) { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; vm = get_field(env->vsatp, SATP32_MODE); } else { @@ -413,7 +823,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, vm = get_field(env->vsatp, SATP64_MODE); } } else { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; vm = get_field(env->satp, SATP32_MODE); } else { @@ -423,7 +833,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } widened = 0; } else { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; vm = get_field(env->hgatp, SATP32_MODE); } else { @@ -516,7 +926,7 @@ restart: } target_ulong pte; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { pte = address_space_ldl(cs->as, pte_addr, attrs, &res); } else { pte = address_space_ldq(cs->as, pte_addr, attrs, &res); @@ -526,13 +936,27 @@ restart: return TRANSLATE_FAIL; } - hwaddr ppn = pte >> PTE_PPN_SHIFT; + if (riscv_cpu_sxl(env) == MXL_RV32) { + ppn = pte >> PTE_PPN_SHIFT; + } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { + ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; + } else { + ppn = pte >> PTE_PPN_SHIFT; + if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { + return TRANSLATE_FAIL; + } + } if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; + } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { + return TRANSLATE_FAIL; } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { /* Inner PTE, continue walking */ + if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { + return TRANSLATE_FAIL; + } base = ppn << PGSHIFT; } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { /* Reserved leaf PTE flags: PTE_W */ @@ -606,8 +1030,18 @@ restart: /* for superpage mappings, make a fake leaf PTE for the TLB's benefit. */ target_ulong vpn = addr >> PGSHIFT; - *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) | - (addr & ~TARGET_PAGE_MASK); + + if (cpu->cfg.ext_svnapot && (pte & PTE_N)) { + napot_bits = ctzl(ppn) + 1; + if ((i != (levels - 1)) || (napot_bits != 4)) { + return TRANSLATE_FAIL; + } + } + + napot_mask = (1 << napot_bits) - 1; + *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | + (vpn & (((target_ulong)1 << ptshift) - 1)) + ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); /* set permissions on the TLB entry */ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { @@ -630,13 +1064,14 @@ restart: static void raise_mmu_exception(CPURISCVState *env, target_ulong address, MMUAccessType access_type, bool pmp_violation, - bool first_stage, bool two_stage) + bool first_stage, bool two_stage, + bool two_stage_indirect) { CPUState *cs = env_cpu(env); int page_fault_exceptions, vm; uint64_t stap_mode; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { stap_mode = SATP32_MODE; } else { stap_mode = SATP64_MODE; @@ -680,6 +1115,7 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address, } env->badaddr = address; env->two_stage_lookup = two_stage; + env->two_stage_indirect_lookup = two_stage_indirect; } hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) @@ -725,7 +1161,8 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, env->badaddr = addr; env->two_stage_lookup = riscv_cpu_virt_enabled(env) || riscv_cpu_two_stage_lookup(mmu_idx); - riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); + env->two_stage_indirect_lookup = false; + cpu_loop_exit_restore(cs, retaddr); } void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, @@ -750,9 +1187,31 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, env->badaddr = addr; env->two_stage_lookup = riscv_cpu_virt_enabled(env) || riscv_cpu_two_stage_lookup(mmu_idx); - riscv_raise_exception(env, cs->exception_index, retaddr); + env->two_stage_indirect_lookup = false; + cpu_loop_exit_restore(cs, retaddr); +} + + +static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) +{ + enum riscv_pmu_event_idx pmu_event_type; + + switch (access_type) { + case MMU_INST_FETCH: + pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; + break; + case MMU_DATA_LOAD: + pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; + break; + case MMU_DATA_STORE: + pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; + break; + default: + return; + } + + riscv_pmu_incr_ctr(cpu, pmu_event_type); } -#endif /* !CONFIG_USER_ONLY */ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, @@ -760,13 +1219,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; -#ifndef CONFIG_USER_ONLY vaddr im_address; hwaddr pa = 0; int prot, prot2, prot_pmp; bool pmp_violation = false; bool first_stage_error = true; bool two_stage_lookup = false; + bool two_stage_indirect_error = false; int ret = TRANSLATE_FAIL; int mode = mmu_idx; /* default TLB page size */ @@ -804,6 +1263,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, */ if (ret == TRANSLATE_G_STAGE_FAIL) { first_stage_error = false; + two_stage_indirect_error = true; access_type = MMU_DATA_LOAD; } @@ -851,6 +1311,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } } } else { + pmu_tlb_fill_incr_ctr(cpu, access_type); /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, NULL, access_type, mmu_idx, true, false, false); @@ -887,31 +1348,220 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error, riscv_cpu_virt_enabled(env) || - riscv_cpu_two_stage_lookup(mmu_idx)); - riscv_raise_exception(env, cs->exception_index, retaddr); + riscv_cpu_two_stage_lookup(mmu_idx), + two_stage_indirect_error); + cpu_loop_exit_restore(cs, retaddr); } return true; - -#else - switch (access_type) { - case MMU_INST_FETCH: - cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; - break; - case MMU_DATA_LOAD: - cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; - break; - case MMU_DATA_STORE: - cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; - break; - default: - g_assert_not_reached(); - } - env->badaddr = address; - cpu_loop_exit_restore(cs, retaddr); -#endif } +static target_ulong riscv_transformed_insn(CPURISCVState *env, + target_ulong insn, + target_ulong taddr) +{ + target_ulong xinsn = 0; + target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; + + /* + * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to + * be uncompressed. The Quadrant 1 of RVC instruction space need + * not be transformed because these instructions won't generate + * any load/store trap. + */ + + if ((insn & 0x3) != 0x3) { + /* Transform 16bit instruction into 32bit instruction */ + switch (GET_C_OP(insn)) { + case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ + switch (GET_C_FUNC(insn)) { + case OPC_RISC_C_FUNC_FLD_LQ: + if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ + xinsn = OPC_RISC_FLD; + xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_LD_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_LW: /* C.LW */ + xinsn = OPC_RISC_LW; + xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_LW_IMM(insn); + access_size = 4; + break; + case OPC_RISC_C_FUNC_FLW_LD: + if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ + xinsn = OPC_RISC_FLW; + xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_LW_IMM(insn); + access_size = 4; + } else { /* C.LD (RV64/RV128) */ + xinsn = OPC_RISC_LD; + xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_LD_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_FSD_SQ: + if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ + xinsn = OPC_RISC_FSD; + xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_SD_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_SW: /* C.SW */ + xinsn = OPC_RISC_SW; + xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_SW_IMM(insn); + access_size = 4; + break; + case OPC_RISC_C_FUNC_FSW_SD: + if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ + xinsn = OPC_RISC_FSW; + xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_SW_IMM(insn); + access_size = 4; + } else { /* C.SD (RV64/RV128) */ + xinsn = OPC_RISC_SD; + xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); + access_rs1 = GET_C_RS1S(insn); + access_imm = GET_C_SD_IMM(insn); + access_size = 8; + } + break; + default: + break; + } + break; + case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ + switch (GET_C_FUNC(insn)) { + case OPC_RISC_C_FUNC_FLDSP_LQSP: + if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ + xinsn = OPC_RISC_FLD; + xinsn = SET_RD(xinsn, GET_C_RD(insn)); + access_rs1 = 2; + access_imm = GET_C_LDSP_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ + xinsn = OPC_RISC_LW; + xinsn = SET_RD(xinsn, GET_C_RD(insn)); + access_rs1 = 2; + access_imm = GET_C_LWSP_IMM(insn); + access_size = 4; + break; + case OPC_RISC_C_FUNC_FLWSP_LDSP: + if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ + xinsn = OPC_RISC_FLW; + xinsn = SET_RD(xinsn, GET_C_RD(insn)); + access_rs1 = 2; + access_imm = GET_C_LWSP_IMM(insn); + access_size = 4; + } else { /* C.LDSP (RV64/RV128) */ + xinsn = OPC_RISC_LD; + xinsn = SET_RD(xinsn, GET_C_RD(insn)); + access_rs1 = 2; + access_imm = GET_C_LDSP_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_FSDSP_SQSP: + if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ + xinsn = OPC_RISC_FSD; + xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); + access_rs1 = 2; + access_imm = GET_C_SDSP_IMM(insn); + access_size = 8; + } + break; + case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ + xinsn = OPC_RISC_SW; + xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); + access_rs1 = 2; + access_imm = GET_C_SWSP_IMM(insn); + access_size = 4; + break; + case 7: + if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ + xinsn = OPC_RISC_FSW; + xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); + access_rs1 = 2; + access_imm = GET_C_SWSP_IMM(insn); + access_size = 4; + } else { /* C.SDSP (RV64/RV128) */ + xinsn = OPC_RISC_SD; + xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); + access_rs1 = 2; + access_imm = GET_C_SDSP_IMM(insn); + access_size = 8; + } + break; + default: + break; + } + break; + default: + break; + } + + /* + * Clear Bit1 of transformed instruction to indicate that + * original insruction was a 16bit instruction + */ + xinsn &= ~((target_ulong)0x2); + } else { + /* Transform 32bit (or wider) instructions */ + switch (MASK_OP_MAJOR(insn)) { + case OPC_RISC_ATOMIC: + xinsn = insn; + access_rs1 = GET_RS1(insn); + access_size = 1 << GET_FUNCT3(insn); + break; + case OPC_RISC_LOAD: + case OPC_RISC_FP_LOAD: + xinsn = SET_I_IMM(insn, 0); + access_rs1 = GET_RS1(insn); + access_imm = GET_IMM(insn); + access_size = 1 << GET_FUNCT3(insn); + break; + case OPC_RISC_STORE: + case OPC_RISC_FP_STORE: + xinsn = SET_S_IMM(insn, 0); + access_rs1 = GET_RS1(insn); + access_imm = GET_STORE_IMM(insn); + access_size = 1 << GET_FUNCT3(insn); + break; + case OPC_RISC_SYSTEM: + if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { + xinsn = insn; + access_rs1 = GET_RS1(insn); + access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); + access_size = 1 << access_size; + } + break; + default: + break; + } + } + + if (access_size) { + xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & + (access_size - 1)); + } + + return xinsn; +} +#endif /* !CONFIG_USER_ONLY */ + /* * Handle Traps * @@ -924,7 +1574,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); + bool write_gva = false; uint64_t s; /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide @@ -932,40 +1582,62 @@ void riscv_cpu_do_interrupt(CPUState *cs) */ bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; - target_ulong deleg = async ? env->mideleg : env->medeleg; - bool write_tval = false; + uint64_t deleg = async ? env->mideleg : env->medeleg; target_ulong tval = 0; + target_ulong tinst = 0; target_ulong htval = 0; target_ulong mtval2 = 0; if (cause == RISCV_EXCP_SEMIHOST) { - if (env->priv >= PRV_S) { - env->gpr[xA0] = do_common_semihosting(cs); - env->pc += 4; - return; - } - cause = RISCV_EXCP_BREAKPOINT; + do_common_semihosting(cs); + env->pc += 4; + return; } if (!async) { /* set tval to badaddr for traps with address information */ switch (cause) { - case RISCV_EXCP_INST_GUEST_PAGE_FAULT: case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: - force_hs_execp = true; - /* fallthrough */ - case RISCV_EXCP_INST_ADDR_MIS: - case RISCV_EXCP_INST_ACCESS_FAULT: case RISCV_EXCP_LOAD_ADDR_MIS: case RISCV_EXCP_STORE_AMO_ADDR_MIS: case RISCV_EXCP_LOAD_ACCESS_FAULT: case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: - case RISCV_EXCP_INST_PAGE_FAULT: case RISCV_EXCP_LOAD_PAGE_FAULT: case RISCV_EXCP_STORE_PAGE_FAULT: - write_tval = true; + write_gva = env->two_stage_lookup; tval = env->badaddr; + if (env->two_stage_indirect_lookup) { + /* + * special pseudoinstruction for G-stage fault taken while + * doing VS-stage page table walk. + */ + tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; + } else { + /* + * The "Addr. Offset" field in transformed instruction is + * non-zero only for misaligned access. + */ + tinst = riscv_transformed_insn(env, env->bins, tval); + } + break; + case RISCV_EXCP_INST_GUEST_PAGE_FAULT: + case RISCV_EXCP_INST_ADDR_MIS: + case RISCV_EXCP_INST_ACCESS_FAULT: + case RISCV_EXCP_INST_PAGE_FAULT: + write_gva = env->two_stage_lookup; + tval = env->badaddr; + if (env->two_stage_indirect_lookup) { + /* + * special pseudoinstruction for G-stage fault taken while + * doing VS-stage page table walk. + */ + tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; + } + break; + case RISCV_EXCP_ILLEGAL_INST: + case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: + tval = env->bins; break; default: break; @@ -999,22 +1671,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { /* handle the trap in S-mode */ if (riscv_has_ext(env, RVH)) { - target_ulong hdeleg = async ? env->hideleg : env->hedeleg; + uint64_t hdeleg = async ? env->hideleg : env->hedeleg; - if (env->two_stage_lookup && write_tval) { - /* - * If we are writing a guest virtual address to stval, set - * this to 1. If we are trapping to VS we will set this to 0 - * later. - */ - env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1); - } else { - /* For other HS-mode traps, we set this to 0. */ - env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0); - } - - if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && - !force_hs_execp) { + if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { /* Trap to VS mode */ /* * See if we need to adjust cause. Yes if its VS mode interrupt @@ -1024,7 +1683,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) cause == IRQ_VS_EXT) { cause = cause - 1; } - env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0); + write_gva = false; } else if (riscv_cpu_virt_enabled(env)) { /* Trap into HS mode, from virt */ riscv_cpu_swap_hypervisor_regs(env); @@ -1033,15 +1692,16 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->hstatus = set_field(env->hstatus, HSTATUS_SPV, riscv_cpu_virt_enabled(env)); + htval = env->guest_phys_fault_addr; riscv_cpu_set_virt_enabled(env, 0); - riscv_cpu_set_force_hs_excep(env, 0); } else { /* Trap into HS mode */ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); htval = env->guest_phys_fault_addr; } + env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); } s = env->mstatus; @@ -1053,6 +1713,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->sepc = env->pc; env->stval = tval; env->htval = htval; + env->htinst = tinst; env->pc = (env->stvec >> 2 << 2) + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_S); @@ -1072,7 +1733,6 @@ void riscv_cpu_do_interrupt(CPUState *cs) /* Trapping to M mode, virt is disabled */ riscv_cpu_set_virt_enabled(env, 0); - riscv_cpu_set_force_hs_excep(env, 0); } s = env->mstatus; @@ -1084,6 +1744,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->mepc = env->pc; env->mtval = tval; env->mtval2 = mtval2; + env->mtinst = tinst; env->pc = (env->mtvec >> 2 << 2) + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_M); @@ -1096,6 +1757,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) */ env->two_stage_lookup = false; + env->two_stage_indirect_lookup = false; #endif cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ } diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c new file mode 100644 index 0000000000..2ef30281b1 --- /dev/null +++ b/target/riscv/crypto_helper.c @@ -0,0 +1,302 @@ +/* + * RISC-V Crypto Emulation Helpers for QEMU. + * + * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com + * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "crypto/aes.h" +#include "crypto/sm4.h" + +#define AES_XTIME(a) \ + ((a << 1) ^ ((a & 0x80) ? 0x1b : 0)) + +#define AES_GFMUL(a, b) (( \ + (((b) & 0x1) ? (a) : 0) ^ \ + (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \ + (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \ + (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF) + +static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd) +{ + uint32_t u; + + if (fwd) { + u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) | + (AES_GFMUL(x, 2) << 0); + } else { + u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) | + (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0); + } + return u; +} + +#define sext32_xlen(x) (target_ulong)(int32_t)(x) + +static inline target_ulong aes32_operation(target_ulong shamt, + target_ulong rs1, target_ulong rs2, + bool enc, bool mix) +{ + uint8_t si = rs2 >> shamt; + uint8_t so; + uint32_t mixed; + target_ulong res; + + if (enc) { + so = AES_sbox[si]; + if (mix) { + mixed = aes_mixcolumn_byte(so, true); + } else { + mixed = so; + } + } else { + so = AES_isbox[si]; + if (mix) { + mixed = aes_mixcolumn_byte(so, false); + } else { + mixed = so; + } + } + mixed = rol32(mixed, shamt); + res = rs1 ^ mixed; + + return sext32_xlen(res); +} + +target_ulong HELPER(aes32esmi)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + return aes32_operation(shamt, rs1, rs2, true, true); +} + +target_ulong HELPER(aes32esi)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + return aes32_operation(shamt, rs1, rs2, true, false); +} + +target_ulong HELPER(aes32dsmi)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + return aes32_operation(shamt, rs1, rs2, false, true); +} + +target_ulong HELPER(aes32dsi)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + return aes32_operation(shamt, rs1, rs2, false, false); +} + +#define BY(X, I) ((X >> (8 * I)) & 0xFF) + +#define AES_SHIFROWS_LO(RS1, RS2) ( \ + (((RS1 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS2 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS2 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS1 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0)) + +#define AES_INVSHIFROWS_LO(RS1, RS2) ( \ + (((RS2 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS1 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS1 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS2 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0)) + +#define AES_MIXBYTE(COL, B0, B1, B2, B3) ( \ + BY(COL, B3) ^ BY(COL, B2) ^ AES_GFMUL(BY(COL, B1), 3) ^ \ + AES_GFMUL(BY(COL, B0), 2)) + +#define AES_MIXCOLUMN(COL) ( \ + AES_MIXBYTE(COL, 3, 0, 1, 2) << 24 | \ + AES_MIXBYTE(COL, 2, 3, 0, 1) << 16 | \ + AES_MIXBYTE(COL, 1, 2, 3, 0) << 8 | AES_MIXBYTE(COL, 0, 1, 2, 3) << 0) + +#define AES_INVMIXBYTE(COL, B0, B1, B2, B3) ( \ + AES_GFMUL(BY(COL, B3), 0x9) ^ AES_GFMUL(BY(COL, B2), 0xd) ^ \ + AES_GFMUL(BY(COL, B1), 0xb) ^ AES_GFMUL(BY(COL, B0), 0xe)) + +#define AES_INVMIXCOLUMN(COL) ( \ + AES_INVMIXBYTE(COL, 3, 0, 1, 2) << 24 | \ + AES_INVMIXBYTE(COL, 2, 3, 0, 1) << 16 | \ + AES_INVMIXBYTE(COL, 1, 2, 3, 0) << 8 | \ + AES_INVMIXBYTE(COL, 0, 1, 2, 3) << 0) + +static inline target_ulong aes64_operation(target_ulong rs1, target_ulong rs2, + bool enc, bool mix) +{ + uint64_t RS1 = rs1; + uint64_t RS2 = rs2; + uint64_t result; + uint64_t temp; + uint32_t col_0; + uint32_t col_1; + + if (enc) { + temp = AES_SHIFROWS_LO(RS1, RS2); + temp = (((uint64_t)AES_sbox[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_sbox[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_sbox[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_sbox[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_sbox[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_sbox[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_sbox[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_sbox[(temp >> 56) & 0xFF] << 56)); + if (mix) { + col_0 = temp & 0xFFFFFFFF; + col_1 = temp >> 32; + + col_0 = AES_MIXCOLUMN(col_0); + col_1 = AES_MIXCOLUMN(col_1); + + result = ((uint64_t)col_1 << 32) | col_0; + } else { + result = temp; + } + } else { + temp = AES_INVSHIFROWS_LO(RS1, RS2); + temp = (((uint64_t)AES_isbox[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_isbox[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_isbox[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_isbox[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_isbox[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_isbox[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_isbox[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_isbox[(temp >> 56) & 0xFF] << 56)); + if (mix) { + col_0 = temp & 0xFFFFFFFF; + col_1 = temp >> 32; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + + result = ((uint64_t)col_1 << 32) | col_0; + } else { + result = temp; + } + } + + return result; +} + +target_ulong HELPER(aes64esm)(target_ulong rs1, target_ulong rs2) +{ + return aes64_operation(rs1, rs2, true, true); +} + +target_ulong HELPER(aes64es)(target_ulong rs1, target_ulong rs2) +{ + return aes64_operation(rs1, rs2, true, false); +} + +target_ulong HELPER(aes64ds)(target_ulong rs1, target_ulong rs2) +{ + return aes64_operation(rs1, rs2, false, false); +} + +target_ulong HELPER(aes64dsm)(target_ulong rs1, target_ulong rs2) +{ + return aes64_operation(rs1, rs2, false, true); +} + +target_ulong HELPER(aes64ks2)(target_ulong rs1, target_ulong rs2) +{ + uint64_t RS1 = rs1; + uint64_t RS2 = rs2; + uint32_t rs1_hi = RS1 >> 32; + uint32_t rs2_lo = RS2; + uint32_t rs2_hi = RS2 >> 32; + + uint32_t r_lo = (rs1_hi ^ rs2_lo); + uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi); + target_ulong result = ((uint64_t)r_hi << 32) | r_lo; + + return result; +} + +target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum) +{ + uint64_t RS1 = rs1; + static const uint8_t round_consts[10] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 + }; + + uint8_t enc_rnum = rnum; + uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF; + uint8_t rcon_ = 0; + target_ulong result; + + if (enc_rnum != 0xA) { + temp = ror32(temp, 8); /* Rotate right by 8 */ + rcon_ = round_consts[enc_rnum]; + } + + temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) | + ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) | + ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) | + ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0); + + temp ^= rcon_; + + result = ((uint64_t)temp << 32) | temp; + + return result; +} + +target_ulong HELPER(aes64im)(target_ulong rs1) +{ + uint64_t RS1 = rs1; + uint32_t col_0 = RS1 & 0xFFFFFFFF; + uint32_t col_1 = RS1 >> 32; + target_ulong result; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + + result = ((uint64_t)col_1 << 32) | col_0; + + return result; +} + +target_ulong HELPER(sm4ed)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + uint32_t sb_in = (uint8_t)(rs2 >> shamt); + uint32_t sb_out = (uint32_t)sm4_sbox[sb_in]; + + uint32_t x = sb_out ^ (sb_out << 8) ^ (sb_out << 2) ^ (sb_out << 18) ^ + ((sb_out & 0x3f) << 26) ^ ((sb_out & 0xC0) << 10); + + uint32_t rotl = rol32(x, shamt); + + return sext32_xlen(rotl ^ (uint32_t)rs1); +} + +target_ulong HELPER(sm4ks)(target_ulong rs1, target_ulong rs2, + target_ulong shamt) +{ + uint32_t sb_in = (uint8_t)(rs2 >> shamt); + uint32_t sb_out = sm4_sbox[sb_in]; + + uint32_t x = sb_out ^ ((sb_out & 0x07) << 29) ^ ((sb_out & 0xFE) << 7) ^ + ((sb_out & 0x01) << 23) ^ ((sb_out & 0xF8) << 13); + + uint32_t rotl = rol32(x, shamt); + + return sext32_xlen(rotl ^ (uint32_t)rs1); +} +#undef sext32_xlen diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 9a4ed18ac5..5c9a7ee287 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -19,9 +19,15 @@ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/timer.h" #include "cpu.h" +#include "pmu.h" +#include "time_helper.h" #include "qemu/main-loop.h" #include "exec/exec-all.h" +#include "sysemu/cpu-timers.h" +#include "qemu/guest-random.h" +#include "qapi/error.h" /* CSR function table public API */ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) @@ -38,11 +44,8 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) - /* loose check condition for fcsr in vector extension */ - if ((csrno == CSR_FCSR) && (env->misa & RVV)) { - return RISCV_EXCP_NONE; - } - if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + if (!env->debugger && !riscv_cpu_fp_enabled(env) && + !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { return RISCV_EXCP_ILLEGAL_INST; } #endif @@ -51,7 +54,16 @@ static RISCVException fs(CPURISCVState *env, int csrno) static RISCVException vs(CPURISCVState *env, int csrno) { - if (env->misa & RVV) { + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (env->misa_ext & RVV || + cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { +#if !defined(CONFIG_USER_ONLY) + if (!env->debugger && !riscv_cpu_vector_enabled(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } +#endif return RISCV_EXCP_NONE; } return RISCV_EXCP_ILLEGAL_INST; @@ -62,75 +74,53 @@ static RISCVException ctr(CPURISCVState *env, int csrno) #if !defined(CONFIG_USER_ONLY) CPUState *cs = env_cpu(env); RISCVCPU *cpu = RISCV_CPU(cs); + int ctr_index; + target_ulong ctr_mask; + int base_csrno = CSR_CYCLE; + bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; - if (!cpu->cfg.ext_counters) { - /* The Counters extensions is not enabled */ + if (rv32 && csrno >= CSR_CYCLEH) { + /* Offset for RV32 hpmcounternh counters */ + base_csrno += 0x80; + } + ctr_index = csrno - base_csrno; + ctr_mask = BIT(ctr_index); + + if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) || + (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) { + goto skip_ext_pmu_check; + } + + if (!(cpu->pmu_avail_ctrs & ctr_mask)) { + /* No counter is enabled in PMU or the counter is out of range */ + return RISCV_EXCP_ILLEGAL_INST; + } + +skip_ext_pmu_check: + + if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { return RISCV_EXCP_ILLEGAL_INST; } if (riscv_cpu_virt_enabled(env)) { - switch (csrno) { - case CSR_CYCLE: - if (!get_field(env->hcounteren, HCOUNTEREN_CY) && - get_field(env->mcounteren, HCOUNTEREN_CY)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_TIME: - if (!get_field(env->hcounteren, HCOUNTEREN_TM) && - get_field(env->mcounteren, HCOUNTEREN_TM)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_INSTRET: - if (!get_field(env->hcounteren, HCOUNTEREN_IR) && - get_field(env->mcounteren, HCOUNTEREN_IR)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31: - if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) && - get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - } - if (riscv_cpu_is_32bit(env)) { - switch (csrno) { - case CSR_CYCLEH: - if (!get_field(env->hcounteren, HCOUNTEREN_CY) && - get_field(env->mcounteren, HCOUNTEREN_CY)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_TIMEH: - if (!get_field(env->hcounteren, HCOUNTEREN_TM) && - get_field(env->mcounteren, HCOUNTEREN_TM)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_INSTRETH: - if (!get_field(env->hcounteren, HCOUNTEREN_IR) && - get_field(env->mcounteren, HCOUNTEREN_IR)) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H: - if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) && - get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - break; - } + if (!get_field(env->hcounteren, ctr_mask) || + (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } } + + if (riscv_has_ext(env, RVS) && env->priv == PRV_U && + !get_field(env->scounteren, ctr_mask)) { + return RISCV_EXCP_ILLEGAL_INST; + } + #endif return RISCV_EXCP_NONE; } static RISCVException ctr32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; } @@ -138,6 +128,47 @@ static RISCVException ctr32(CPURISCVState *env, int csrno) } #if !defined(CONFIG_USER_ONLY) +static RISCVException mctr(CPURISCVState *env, int csrno) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + int ctr_index; + int base_csrno = CSR_MHPMCOUNTER3; + + if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { + /* Offset for RV32 mhpmcounternh counters */ + base_csrno += 0x80; + } + ctr_index = csrno - base_csrno; + if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { + /* The PMU is not enabled or counter is out of range*/ + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException mctr32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return mctr(env, csrno); +} + +static RISCVException sscofpmf(CPURISCVState *env, int csrno) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (!cpu->cfg.ext_sscofpmf) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + static RISCVException any(CPURISCVState *env, int csrno) { return RISCV_EXCP_NONE; @@ -145,7 +176,7 @@ static RISCVException any(CPURISCVState *env, int csrno) static RISCVException any32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; } @@ -153,6 +184,28 @@ static RISCVException any32(CPURISCVState *env, int csrno) } +static int aia_any(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_smaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any(env, csrno); +} + +static int aia_any32(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_smaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any32(env, csrno); +} + static RISCVException smode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS)) { @@ -162,17 +215,41 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } +static int smode32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + +static int aia_smode(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_ssaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + +static int aia_smode32(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_ssaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode32(env, csrno); +} + static RISCVException hmode(CPURISCVState *env, int csrno) { - if (riscv_has_ext(env, RVS) && - riscv_has_ext(env, RVH)) { - /* Hypervisor extension is supported */ - if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || - env->priv == PRV_M) { - return RISCV_EXCP_NONE; - } else { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } + if (riscv_has_ext(env, RVH)) { + return RISCV_EXCP_NONE; } return RISCV_EXCP_ILLEGAL_INST; @@ -180,18 +257,64 @@ static RISCVException hmode(CPURISCVState *env, int csrno) static RISCVException hmode32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { - if (riscv_cpu_virt_enabled(env)) { - return RISCV_EXCP_ILLEGAL_INST; - } else { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; } return hmode(env, csrno); } +static RISCVException umode(CPURISCVState *env, int csrno) +{ + if (riscv_has_ext(env, RVU)) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; +} + +static RISCVException umode32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return umode(env, csrno); +} + +/* Checks if PointerMasking registers could be accessed */ +static RISCVException pointer_masking(CPURISCVState *env, int csrno) +{ + /* Check if j-ext is present */ + if (riscv_has_ext(env, RVJ)) { + return RISCV_EXCP_NONE; + } + return RISCV_EXCP_ILLEGAL_INST; +} + +static int aia_hmode(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_ssaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + +static int aia_hmode32(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_ssaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode32(env, csrno); +} + static RISCVException pmp(CPURISCVState *env, int csrno) { if (riscv_feature(env, RISCV_FEATURE_PMP)) { @@ -209,8 +332,57 @@ static RISCVException epmp(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } + +static RISCVException debug(CPURISCVState *env, int csrno) +{ + if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; +} #endif +static RISCVException seed(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_zkr) { + return RISCV_EXCP_ILLEGAL_INST; + } + +#if !defined(CONFIG_USER_ONLY) + /* + * With a CSR read-write instruction: + * 1) The seed CSR is always available in machine mode as normal. + * 2) Attempted access to seed from virtual modes VS and VU always raises + * an exception(virtual instruction exception only if mseccfg.sseed=1). + * 3) Without the corresponding access control bit set to 1, any attempted + * access to seed from U, S or HS modes will raise an illegal instruction + * exception. + */ + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } else if (riscv_cpu_virt_enabled(env)) { + if (env->mseccfg & MSECCFG_SSEED) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } else { + return RISCV_EXCP_ILLEGAL_INST; + } + } else { + if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) { + return RISCV_EXCP_NONE; + } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) { + return RISCV_EXCP_NONE; + } else { + return RISCV_EXCP_ILLEGAL_INST; + } + } +#else + return RISCV_EXCP_NONE; +#endif +} + /* User Floating-Point CSRs */ static RISCVException read_fflags(CPURISCVState *env, int csrno, target_ulong *val) @@ -223,7 +395,9 @@ static RISCVException write_fflags(CPURISCVState *env, int csrno, target_ulong val) { #if !defined(CONFIG_USER_ONLY) - env->mstatus |= MSTATUS_FS; + if (riscv_has_ext(env, RVF)) { + env->mstatus |= MSTATUS_FS; + } #endif riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); return RISCV_EXCP_NONE; @@ -240,7 +414,9 @@ static RISCVException write_frm(CPURISCVState *env, int csrno, target_ulong val) { #if !defined(CONFIG_USER_ONLY) - env->mstatus |= MSTATUS_FS; + if (riscv_has_ext(env, RVF)) { + env->mstatus |= MSTATUS_FS; + } #endif env->frm = val & (FSR_RD >> FSR_RD_SHIFT); return RISCV_EXCP_NONE; @@ -251,10 +427,6 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno, { *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) | (env->frm << FSR_RD_SHIFT); - if (vs(env, csrno) >= 0) { - *val |= (env->vxrm << FSR_VXRM_SHIFT) - | (env->vxsat << FSR_VXSAT_SHIFT); - } return RISCV_EXCP_NONE; } @@ -262,13 +434,11 @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno, target_ulong val) { #if !defined(CONFIG_USER_ONLY) - env->mstatus |= MSTATUS_FS; + if (riscv_has_ext(env, RVF)) { + env->mstatus |= MSTATUS_FS; + } #endif env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; - if (vs(env, csrno) >= 0) { - env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT; - env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT; - } riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); return RISCV_EXCP_NONE; } @@ -276,7 +446,18 @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno, static RISCVException read_vtype(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->vtype; + uint64_t vill; + switch (env->xl) { + case MXL_RV32: + vill = (uint32_t)env->vill << 31; + break; + case MXL_RV64: + vill = (uint64_t)env->vill << 63; + break; + default: + g_assert_not_reached(); + } + *val = (target_ulong)vill | env->vtype; return RISCV_EXCP_NONE; } @@ -287,6 +468,12 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env_archcpu(env)->cfg.vlen >> 3; + return RISCV_EXCP_NONE; +} + static RISCVException read_vxrm(CPURISCVState *env, int csrno, target_ulong *val) { @@ -297,6 +484,9 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno, static RISCVException write_vxrm(CPURISCVState *env, int csrno, target_ulong val) { +#if !defined(CONFIG_USER_ONLY) + env->mstatus |= MSTATUS_VS; +#endif env->vxrm = val; return RISCV_EXCP_NONE; } @@ -311,6 +501,9 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno, static RISCVException write_vxsat(CPURISCVState *env, int csrno, target_ulong val) { +#if !defined(CONFIG_USER_ONLY) + env->mstatus |= MSTATUS_VS; +#endif env->vxsat = val; return RISCV_EXCP_NONE; } @@ -325,39 +518,56 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno, static RISCVException write_vstart(CPURISCVState *env, int csrno, target_ulong val) { - env->vstart = val; +#if !defined(CONFIG_USER_ONLY) + env->mstatus |= MSTATUS_VS; +#endif + /* + * The vstart CSR is defined to have only enough writable bits + * to hold the largest element index, i.e. lg2(VLEN) bits. + */ + env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); + return RISCV_EXCP_NONE; +} + +static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); + return RISCV_EXCP_NONE; +} + +static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) +{ +#if !defined(CONFIG_USER_ONLY) + env->mstatus |= MSTATUS_VS; +#endif + env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT; + env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT; return RISCV_EXCP_NONE; } /* User Timers and Counters */ -static RISCVException read_instret(CPURISCVState *env, int csrno, - target_ulong *val) +static target_ulong get_ticks(bool shift) { -#if !defined(CONFIG_USER_ONLY) - if (icount_enabled()) { - *val = icount_get(); - } else { - *val = cpu_get_host_ticks(); - } -#else - *val = cpu_get_host_ticks(); -#endif - return RISCV_EXCP_NONE; -} + int64_t val; + target_ulong result; -static RISCVException read_instreth(CPURISCVState *env, int csrno, - target_ulong *val) -{ #if !defined(CONFIG_USER_ONLY) if (icount_enabled()) { - *val = icount_get() >> 32; + val = icount_get(); } else { - *val = cpu_get_host_ticks() >> 32; + val = cpu_get_host_ticks(); } #else - *val = cpu_get_host_ticks() >> 32; + val = cpu_get_host_ticks(); #endif - return RISCV_EXCP_NONE; + + if (shift) { + result = val >> 32; + } else { + result = val; + } + + return result; } #if defined(CONFIG_USER_ONLY) @@ -375,8 +585,209 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = get_ticks(false); + return RISCV_EXCP_NONE; +} + +static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = get_ticks(true); + return RISCV_EXCP_NONE; +} + #else /* CONFIG_USER_ONLY */ +static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) +{ + int evt_index = csrno - CSR_MCOUNTINHIBIT; + + *val = env->mhpmevent_val[evt_index]; + + return RISCV_EXCP_NONE; +} + +static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) +{ + int evt_index = csrno - CSR_MCOUNTINHIBIT; + uint64_t mhpmevt_val = val; + + env->mhpmevent_val[evt_index] = val; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmevt_val = mhpmevt_val | + ((uint64_t)env->mhpmeventh_val[evt_index] << 32); + } + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); + + return RISCV_EXCP_NONE; +} + +static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) +{ + int evt_index = csrno - CSR_MHPMEVENT3H + 3; + + *val = env->mhpmeventh_val[evt_index]; + + return RISCV_EXCP_NONE; +} + +static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) +{ + int evt_index = csrno - CSR_MHPMEVENT3H + 3; + uint64_t mhpmevth_val = val; + uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; + + mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); + env->mhpmeventh_val[evt_index] = val; + + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); + + return RISCV_EXCP_NONE; +} + +static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) +{ + int ctr_idx = csrno - CSR_MCYCLE; + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + uint64_t mhpmctr_val = val; + + counter->mhpmcounter_val = val; + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + counter->mhpmcounter_prev = get_ticks(false); + if (ctr_idx > 2) { + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmctr_val = mhpmctr_val | + ((uint64_t)counter->mhpmcounterh_val << 32); + } + riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); + } + } else { + /* Other counters can keep incrementing from the given value */ + counter->mhpmcounter_prev = val; + } + + return RISCV_EXCP_NONE; +} + +static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) +{ + int ctr_idx = csrno - CSR_MCYCLEH; + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + uint64_t mhpmctr_val = counter->mhpmcounter_val; + uint64_t mhpmctrh_val = val; + + counter->mhpmcounterh_val = val; + mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + counter->mhpmcounterh_prev = get_ticks(true); + if (ctr_idx > 2) { + riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); + } + } else { + counter->mhpmcounterh_prev = val; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, + bool upper_half, uint32_t ctr_idx) +{ + PMUCTRState counter = env->pmu_ctrs[ctr_idx]; + target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev : + counter.mhpmcounter_prev; + target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val : + counter.mhpmcounter_val; + + if (get_field(env->mcountinhibit, BIT(ctr_idx))) { + /** + * Counter should not increment if inhibit bit is set. We can't really + * stop the icount counting. Just return the counter value written by + * the supervisor to indicate that counter was not incremented. + */ + if (!counter.started) { + *val = ctr_val; + return RISCV_EXCP_NONE; + } else { + /* Mark that the counter has been stopped */ + counter.started = false; + } + } + + /** + * The kernel computes the perf delta by subtracting the current value from + * the value it initialized previously (ctr_val). + */ + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + *val = get_ticks(upper_half) - ctr_prev + ctr_val; + } else { + *val = ctr_val; + } + + return RISCV_EXCP_NONE; +} + +static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +{ + uint16_t ctr_index; + + if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) { + ctr_index = csrno - CSR_MCYCLE; + } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) { + ctr_index = csrno - CSR_CYCLE; + } else { + return RISCV_EXCP_ILLEGAL_INST; + } + + return riscv_pmu_read_ctr(env, val, false, ctr_index); +} + +static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +{ + uint16_t ctr_index; + + if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) { + ctr_index = csrno - CSR_MCYCLEH; + } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) { + ctr_index = csrno - CSR_CYCLEH; + } else { + return RISCV_EXCP_ILLEGAL_INST; + } + + return riscv_pmu_read_ctr(env, val, true, ctr_index); +} + +static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) +{ + int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; + int i; + *val = 0; + target_ulong *mhpm_evt_val; + uint64_t of_bit_mask; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpm_evt_val = env->mhpmeventh_val; + of_bit_mask = MHPMEVENTH_BIT_OF; + } else { + mhpm_evt_val = env->mhpmevent_val; + of_bit_mask = MHPMEVENT_BIT_OF; + } + + for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { + if ((get_field(env->mcounteren, BIT(i))) && + (mhpm_evt_val[i] & of_bit_mask)) { + *val |= BIT(i); + } + } + + return RISCV_EXCP_NONE; +} + static RISCVException read_time(CPURISCVState *env, int csrno, target_ulong *val) { @@ -403,17 +814,171 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException sstc(CPURISCVState *env, int csrno) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + bool hmode_check = false; + + if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + + /* + * No need of separate function for rv32 as menvcfg stores both menvcfg + * menvcfgh for RV32. + */ + if (!(get_field(env->mcounteren, COUNTEREN_TM) && + get_field(env->menvcfg, MENVCFG_STCE))) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (riscv_cpu_virt_enabled(env)) { + if (!(get_field(env->hcounteren, COUNTEREN_TM) & + get_field(env->henvcfg, HENVCFG_STCE))) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + + if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { + hmode_check = true; + } + + return hmode_check ? hmode(env, csrno) : smode(env, csrno); +} + +static RISCVException sstc_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return sstc(env, csrno); +} + +static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->vstimecmp; + + return RISCV_EXCP_NONE; +} + +static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->vstimecmp >> 32; + + return RISCV_EXCP_NONE; +} + +static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, + target_ulong val) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (riscv_cpu_mxl(env) == MXL_RV32) { + env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); + } else { + env->vstimecmp = val; + } + + riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, + target_ulong val) +{ + RISCVCPU *cpu = env_archcpu(env); + + env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); + riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + + return RISCV_EXCP_NONE; +} + +static RISCVException read_stimecmp(CPURISCVState *env, int csrno, + target_ulong *val) +{ + if (riscv_cpu_virt_enabled(env)) { + *val = env->vstimecmp; + } else { + *val = env->stimecmp; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException read_stimecmph(CPURISCVState *env, int csrno, + target_ulong *val) +{ + if (riscv_cpu_virt_enabled(env)) { + *val = env->vstimecmp >> 32; + } else { + *val = env->stimecmp >> 32; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException write_stimecmp(CPURISCVState *env, int csrno, + target_ulong val) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (riscv_cpu_virt_enabled(env)) { + return write_vstimecmp(env, csrno, val); + } + + if (riscv_cpu_mxl(env) == MXL_RV32) { + env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val); + } else { + env->stimecmp = val; + } + + riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_stimecmph(CPURISCVState *env, int csrno, + target_ulong val) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (riscv_cpu_virt_enabled(env)) { + return write_vstimecmph(env, csrno, val); + } + + env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); + riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); + + return RISCV_EXCP_NONE; +} + /* Machine constants */ -#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP) -#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP) -#define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) +#define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP)) +#define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \ + MIP_LCOFIP)) +#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) +#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS)) -static const target_ulong delegable_ints = S_MODE_INTERRUPTS | +#define VSTOPI_NUM_SRCS 5 + +static const uint64_t delegable_ints = S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS; -static const target_ulong vs_delegable_ints = VS_MODE_INTERRUPTS; -static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | - VS_MODE_INTERRUPTS; +static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS; +static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | + HS_MODE_INTERRUPTS; #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ @@ -443,8 +1008,9 @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS & (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))); static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | - SSTATUS_SUM | SSTATUS_MXR; -static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP; + SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS; +static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP | + SIP_LCOFIP; static const target_ulong hip_writable_mask = MIP_VSSIP; static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; static const target_ulong vsip_writable_mask = MIP_VSSIP; @@ -469,6 +1035,42 @@ static RISCVException read_zero(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException write_ignore(CPURISCVState *env, int csrno, + target_ulong val) +{ + return RISCV_EXCP_NONE; +} + +static RISCVException read_mvendorid(CPURISCVState *env, int csrno, + target_ulong *val) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + *val = cpu->cfg.mvendorid; + return RISCV_EXCP_NONE; +} + +static RISCVException read_marchid(CPURISCVState *env, int csrno, + target_ulong *val) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + *val = cpu->cfg.marchid; + return RISCV_EXCP_NONE; +} + +static RISCVException read_mimpid(CPURISCVState *env, int csrno, + target_ulong *val) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + *val = cpu->cfg.mimpid; + return RISCV_EXCP_NONE; +} + static RISCVException read_mhartid(CPURISCVState *env, int csrno, target_ulong *val) { @@ -477,16 +1079,37 @@ static RISCVException read_mhartid(CPURISCVState *env, int csrno, } /* Machine Trap Setup */ + +/* We do not store SD explicitly, only compute it on demand. */ +static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) +{ + if ((status & MSTATUS_FS) == MSTATUS_FS || + (status & MSTATUS_VS) == MSTATUS_VS || + (status & MSTATUS_XS) == MSTATUS_XS) { + switch (xl) { + case MXL_RV32: + return status | MSTATUS32_SD; + case MXL_RV64: + return status | MSTATUS64_SD; + case MXL_RV128: + return MSTATUSH128_SD; + default: + g_assert_not_reached(); + } + } + return status; +} + static RISCVException read_mstatus(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->mstatus; + *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); return RISCV_EXCP_NONE; } static int validate_vm(CPURISCVState *env, target_ulong vm) { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { return valid_vm_1_10_32[vm & 0xf]; } else { return valid_vm_1_10_64[vm & 0xf]; @@ -498,7 +1121,7 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, { uint64_t mstatus = env->mstatus; uint64_t mask = 0; - int dirty; + RISCVMXL xl = riscv_cpu_mxl(env); /* flush tlb on mstatus fields that affect VM */ if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | @@ -506,28 +1129,33 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, tlb_flush(env_cpu(env)); } mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | - MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | + MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | - MSTATUS_TW; + MSTATUS_TW | MSTATUS_VS; - if (!riscv_cpu_is_32bit(env)) { + if (riscv_has_ext(env, RVF)) { + mask |= MSTATUS_FS; + } + + if (xl != MXL_RV32 || env->debugger) { /* * RV32: MPV and GVA are not in mstatus. The current plan is to * add them to mstatush. For now, we just don't support it. */ mask |= MSTATUS_MPV | MSTATUS_GVA; + if ((val & MSTATUS64_UXL) != 0) { + mask |= MSTATUS64_UXL; + } } mstatus = (mstatus & ~mask) | (val & mask); - dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | - ((mstatus & MSTATUS_XS) == MSTATUS_XS); - if (riscv_cpu_is_32bit(env)) { - mstatus = set_field(mstatus, MSTATUS32_SD, dirty); - } else { - mstatus = set_field(mstatus, MSTATUS64_SD, dirty); + if (xl > MXL_RV32) { + /* SXL field is for now read only */ + mstatus = set_field(mstatus, MSTATUS64_SXL, xl); } env->mstatus = mstatus; + env->xl = cpu_recompute_xl(env); return RISCV_EXCP_NONE; } @@ -554,10 +1182,39 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, + Int128 *val) +{ + *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus)); + return RISCV_EXCP_NONE; +} + +static RISCVException read_misa_i128(CPURISCVState *env, int csrno, + Int128 *val) +{ + *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62); + return RISCV_EXCP_NONE; +} + static RISCVException read_misa(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->misa; + target_ulong misa; + + switch (env->misa_mxl) { + case MXL_RV32: + misa = (target_ulong)MXL_RV32 << 30; + break; +#ifdef TARGET_RISCV64 + case MXL_RV64: + misa = (target_ulong)MXL_RV64 << 62; + break; +#endif + default: + g_assert_not_reached(); + } + + *val = misa | env->misa_ext; return RISCV_EXCP_NONE; } @@ -583,11 +1240,16 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } + /* + * misa.MXL writes are not supported by QEMU. + * Drop writes to those bits. + */ + /* Mask extensions that are not supported by this hart */ - val &= env->misa_mask; + val &= env->misa_ext_mask; /* Mask extensions that are not supported by QEMU */ - val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ if ((val & RVD) && !(val & RVF)) { @@ -601,20 +1263,19 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, val &= ~RVC; } - /* misa.MXL writes are not supported by QEMU */ - if (riscv_cpu_is_32bit(env)) { - val = (env->misa & MISA32_MXL) | (val & ~MISA32_MXL); - } else { - val = (env->misa & MISA64_MXL) | (val & ~MISA64_MXL); + /* If nothing changed, do nothing. */ + if (val == env->misa_ext) { + return RISCV_EXCP_NONE; + } + + if (!(val & RVF)) { + env->mstatus &= ~MSTATUS_FS; } /* flush translation cache */ - if (val != env->misa) { - tb_flush(env_cpu(env)); - } - - env->misa = val; - + tb_flush(env_cpu(env)); + env->misa_ext = val; + env->xl = riscv_cpu_mxl(env); return RISCV_EXCP_NONE; } @@ -632,34 +1293,345 @@ static RISCVException write_medeleg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException read_mideleg(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - *val = env->mideleg; + uint64_t mask = wr_mask & delegable_ints; + + if (ret_val) { + *ret_val = env->mideleg; + } + + env->mideleg = (env->mideleg & ~mask) | (new_val & mask); + + if (riscv_has_ext(env, RVH)) { + env->mideleg |= HS_MODE_INTERRUPTS; + } + return RISCV_EXCP_NONE; } -static RISCVException write_mideleg(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException rmw_mideleg(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); - if (riscv_has_ext(env, RVH)) { - env->mideleg |= VS_MODE_INTERRUPTS; + uint64_t rval; + RISCVException ret; + + ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_midelegh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, + target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mideleg64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_mie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + uint64_t mask = wr_mask & all_ints; + + if (ret_val) { + *ret_val = env->mie; + } + + env->mie = (env->mie & ~mask) | (new_val & mask); + + if (!riscv_has_ext(env, RVH)) { + env->mie &= ~((uint64_t)MIP_SGEIP); + } + + return RISCV_EXCP_NONE; +} + +static RISCVException rmw_mie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_mieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq; + uint8_t iprio; + + irq = riscv_cpu_mirq_pending(env); + if (irq <= 0 || irq > 63) { + *val = 0; + } else { + iprio = env->miprio[irq]; + if (!iprio) { + if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) { + iprio = IPRIO_MMAXIPRIO; + } + } + *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + } + + return RISCV_EXCP_NONE; +} + +static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_virt_enabled(env)) { + return csrno; + } + + switch (csrno) { + case CSR_SISELECT: + return CSR_VSISELECT; + case CSR_SIREG: + return CSR_VSIREG; + case CSR_STOPEI: + return CSR_VSTOPEI; + default: + return csrno; + }; +} + +static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + target_ulong *iselect; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Find the iselect CSR based on CSR number */ + switch (csrno) { + case CSR_MISELECT: + iselect = &env->miselect; + break; + case CSR_SISELECT: + iselect = &env->siselect; + break; + case CSR_VSISELECT: + iselect = &env->vsiselect; + break; + default: + return RISCV_EXCP_ILLEGAL_INST; + }; + + if (val) { + *val = *iselect; + } + + wr_mask &= ISELECT_MASK; + if (wr_mask) { + *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); + } + + return RISCV_EXCP_NONE; +} + +static int rmw_iprio(target_ulong xlen, + target_ulong iselect, uint8_t *iprio, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask, int ext_irq_no) +{ + int i, firq, nirqs; + target_ulong old_val; + + if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) { + return -EINVAL; + } + if (xlen != 32 && iselect & 0x1) { + return -EINVAL; + } + + nirqs = 4 * (xlen / 32); + firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs); + + old_val = 0; + for (i = 0; i < nirqs; i++) { + old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i); + } + + if (val) { + *val = old_val; + } + + if (wr_mask) { + new_val = (old_val & ~wr_mask) | (new_val & wr_mask); + for (i = 0; i < nirqs; i++) { + /* + * M-level and S-level external IRQ priority always read-only + * zero. This means default priority order is always preferred + * for M-level and S-level external IRQs. + */ + if ((firq + i) == ext_irq_no) { + continue; + } + iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff; + } + } + + return 0; +} + +static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + bool virt; + uint8_t *iprio; + int ret = -EINVAL; + target_ulong priv, isel, vgein; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + virt = false; + switch (csrno) { + case CSR_MIREG: + iprio = env->miprio; + isel = env->miselect; + priv = PRV_M; + break; + case CSR_SIREG: + iprio = env->siprio; + isel = env->siselect; + priv = PRV_S; + break; + case CSR_VSIREG: + iprio = env->hviprio; + isel = env->vsiselect; + priv = PRV_S; + virt = true; + break; + default: + goto done; + }; + + /* Find the selected guest interrupt file */ + vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; + + if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) { + /* Local interrupt priority registers not available for VS-mode */ + if (!virt) { + ret = rmw_iprio(riscv_cpu_mxl_bits(env), + isel, iprio, val, new_val, wr_mask, + (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT); + } + } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) { + /* IMSIC registers only available when machine implements it. */ + if (env->aia_ireg_rmw_fn[priv]) { + /* Selected guest interrupt file should not be zero */ + if (virt && (!vgein || env->geilen < vgein)) { + goto done; + } + /* Call machine specific IMSIC register emulation */ + ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], + AIA_MAKE_IREG(isel, priv, virt, vgein, + riscv_cpu_mxl_bits(env)), + val, new_val, wr_mask); + } + } + +done: + if (ret) { + return (riscv_cpu_virt_enabled(env) && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } return RISCV_EXCP_NONE; } -static RISCVException read_mie(CPURISCVState *env, int csrno, - target_ulong *val) +static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) { - *val = env->mie; - return RISCV_EXCP_NONE; -} + bool virt; + int ret = -EINVAL; + target_ulong priv, vgein; -static RISCVException write_mie(CPURISCVState *env, int csrno, - target_ulong val) -{ - env->mie = (env->mie & ~all_ints) | (val & all_ints); + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + virt = false; + switch (csrno) { + case CSR_MTOPEI: + priv = PRV_M; + break; + case CSR_STOPEI: + priv = PRV_S; + break; + case CSR_VSTOPEI: + priv = PRV_S; + virt = true; + break; + default: + goto done; + }; + + /* IMSIC CSRs only available when machine implements IMSIC. */ + if (!env->aia_ireg_rmw_fn[priv]) { + goto done; + } + + /* Find the selected guest interrupt file */ + vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; + + /* Selected guest interrupt file should be valid */ + if (virt && (!vgein || env->geilen < vgein)) { + goto done; + } + + /* Call machine specific IMSIC register emulation for TOPEI */ + ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], + AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein, + riscv_cpu_mxl_bits(env)), + val, new_val, wr_mask); + +done: + if (ret) { + return (riscv_cpu_virt_enabled(env) && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } return RISCV_EXCP_NONE; } @@ -682,6 +1654,32 @@ static RISCVException write_mtvec(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mcountinhibit; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, + target_ulong val) +{ + int cidx; + PMUCTRState *counter; + + env->mcountinhibit = val; + + /* Check if any other counter is also monitoring cycles/instructions */ + for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { + if (!get_field(env->mcountinhibit, BIT(cidx))) { + counter = &env->pmu_ctrs[cidx]; + counter->started = true; + } + } + + return RISCV_EXCP_NONE; +} + static RISCVException read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val) { @@ -697,6 +1695,21 @@ static RISCVException write_mcounteren(CPURISCVState *env, int csrno, } /* Machine Trap Handling */ +static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno, + Int128 *val) +{ + *val = int128_make128(env->mscratch, env->mscratchh); + return RISCV_EXCP_NONE; +} + +static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno, + Int128 val) +{ + env->mscratch = int128_getlo(val); + env->mscratchh = int128_gethi(val); + return RISCV_EXCP_NONE; +} + static RISCVException read_mscratch(CPURISCVState *env, int csrno, target_ulong *val) { @@ -753,41 +1766,193 @@ static RISCVException write_mtval(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException rmw_mip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +/* Execution environment configuration setup */ +static RISCVException read_menvcfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->menvcfg; + return RISCV_EXCP_NONE; +} + +static RISCVException write_menvcfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; + + if (riscv_cpu_mxl(env) == MXL_RV64) { + mask |= MENVCFG_PBMTE | MENVCFG_STCE; + } + env->menvcfg = (env->menvcfg & ~mask) | (val & mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->menvcfg >> 32; + return RISCV_EXCP_NONE; +} + +static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; + uint64_t valh = (uint64_t)val << 32; + + env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException read_senvcfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->senvcfg; + return RISCV_EXCP_NONE; +} + +static RISCVException write_senvcfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; + + env->senvcfg = (env->senvcfg & ~mask) | (val & mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException read_henvcfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->henvcfg; + return RISCV_EXCP_NONE; +} + +static RISCVException write_henvcfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; + + if (riscv_cpu_mxl(env) == MXL_RV64) { + mask |= HENVCFG_PBMTE | HENVCFG_STCE; + } + + env->henvcfg = (env->henvcfg & ~mask) | (val & mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->henvcfg >> 32; + return RISCV_EXCP_NONE; +} + +static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; + uint64_t valh = (uint64_t)val << 32; + + env->henvcfg = (env->henvcfg & ~mask) | (valh & mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException rmw_mip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { RISCVCPU *cpu = env_archcpu(env); - /* Allow software control of delegable interrupts not claimed by hardware */ - target_ulong mask = write_mask & delegable_ints & ~env->miclaim; - uint32_t old_mip; + uint64_t old_mip, mask = wr_mask & delegable_ints; + uint32_t gin; + + if (mask & MIP_SEIP) { + env->software_seip = new_val & MIP_SEIP; + new_val |= env->external_seip * MIP_SEIP; + } + + if (cpu->cfg.ext_sstc && (env->priv == PRV_M) && + get_field(env->menvcfg, MENVCFG_STCE)) { + /* sstc extension forbids STIP & VSTIP to be writeable in mip */ + mask = mask & ~(MIP_STIP | MIP_VSTIP); + } if (mask) { - old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); + old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask)); } else { old_mip = env->mip; } - if (ret_value) { - *ret_value = old_mip; + if (csrno != CSR_HVIP) { + gin = get_field(env->hstatus, HSTATUS_VGEIN); + old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0; + old_mip |= env->vstime_irq ? MIP_VSTIP : 0; + } + + if (ret_val) { + *ret_val = old_mip; } return RISCV_EXCP_NONE; } +static RISCVException rmw_mip(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_miph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + /* Supervisor Trap Setup */ +static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, + Int128 *val) +{ + uint64_t mask = sstatus_v1_10_mask; + uint64_t sstatus = env->mstatus & mask; + if (env->xl != MXL_RV32 || env->debugger) { + mask |= SSTATUS64_UXL; + } + + *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); + return RISCV_EXCP_NONE; +} + static RISCVException read_sstatus(CPURISCVState *env, int csrno, target_ulong *val) { target_ulong mask = (sstatus_v1_10_mask); - - if (riscv_cpu_is_32bit(env)) { - mask |= SSTATUS32_SD; - } else { - mask |= SSTATUS64_SD; + if (env->xl != MXL_RV32 || env->debugger) { + mask |= SSTATUS64_UXL; } - - *val = env->mstatus & mask; + /* TODO: Use SXL not MXL. */ + *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); return RISCV_EXCP_NONE; } @@ -795,49 +1960,125 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, target_ulong val) { target_ulong mask = (sstatus_v1_10_mask); + + if (env->xl != MXL_RV32 || env->debugger) { + if ((val & SSTATUS64_UXL) != 0) { + mask |= SSTATUS64_UXL; + } + } target_ulong newval = (env->mstatus & ~mask) | (val & mask); return write_mstatus(env, CSR_MSTATUS, newval); } -static RISCVException read_vsie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - /* Shift the VS bits to their S bit location in vsie */ - *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1; - return RISCV_EXCP_NONE; -} + RISCVException ret; + uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS; -static RISCVException read_sie(CPURISCVState *env, int csrno, - target_ulong *val) -{ - if (riscv_cpu_virt_enabled(env)) { - read_vsie(env, CSR_VSIE, val); - } else { - *val = env->mie & env->mideleg; - } - return RISCV_EXCP_NONE; -} + /* Bring VS-level bits to correct position */ + vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); + new_val &= ~(VS_MODE_INTERRUPTS >> 1); + new_val |= vsbits << 1; + vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); + wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); + wr_mask |= vsbits << 1; -static RISCVException write_vsie(CPURISCVState *env, int csrno, - target_ulong val) -{ - /* Shift the S bits to their VS bit location in mie */ - target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | - ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS); - return write_mie(env, CSR_MIE, newval); -} - -static int write_sie(CPURISCVState *env, int csrno, target_ulong val) -{ - if (riscv_cpu_virt_enabled(env)) { - write_vsie(env, CSR_VSIE, val); - } else { - target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) | - (val & S_MODE_INTERRUPTS); - write_mie(env, CSR_MIE, newval); + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask); + if (ret_val) { + rval &= mask; + vsbits = rval & VS_MODE_INTERRUPTS; + rval &= ~VS_MODE_INTERRUPTS; + *ret_val = rval | (vsbits >> 1); } - return RISCV_EXCP_NONE; + return ret; +} + +static RISCVException rmw_vsie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_vsieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_vsie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_sie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; + + if (riscv_cpu_virt_enabled(env)) { + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); + } else { + ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); + } + + if (ret_val) { + *ret_val &= mask; + } + + return ret; +} + +static RISCVException rmw_sie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask); + if (ret == RISCV_EXCP_NONE && ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_sieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; } static RISCVException read_stvec(CPURISCVState *env, int csrno, @@ -874,6 +2115,21 @@ static RISCVException write_scounteren(CPURISCVState *env, int csrno, } /* Supervisor Trap Handling */ +static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno, + Int128 *val) +{ + *val = int128_make128(env->sscratch, env->sscratchh); + return RISCV_EXCP_NONE; +} + +static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno, + Int128 val) +{ + env->sscratch = int128_getlo(val); + env->sscratchh = int128_gethi(val); + return RISCV_EXCP_NONE; +} + static RISCVException read_sscratch(CPURISCVState *env, int csrno, target_ulong *val) { @@ -930,33 +2186,114 @@ static RISCVException write_stval(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException rmw_vsip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_vsip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - /* Shift the S bits to their VS bit location in mip */ - int ret = rmw_mip(env, 0, ret_value, new_value << 1, - (write_mask << 1) & vsip_writable_mask & env->hideleg); - *ret_value &= VS_MODE_INTERRUPTS; - /* Shift the VS bits to their S bit location in vsip */ - *ret_value >>= 1; + RISCVException ret; + uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask; + + /* Bring VS-level bits to correct position */ + vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); + new_val &= ~(VS_MODE_INTERRUPTS >> 1); + new_val |= vsbits << 1; + vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); + wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); + wr_mask |= vsbits << 1; + + ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask); + if (ret_val) { + rval &= mask; + vsbits = rval & VS_MODE_INTERRUPTS; + rval &= ~VS_MODE_INTERRUPTS; + *ret_val = rval | (vsbits >> 1); + } + + return ret; +} + +static RISCVException rmw_vsip(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_vsiph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_vsip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_sip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + uint64_t mask = env->mideleg & sip_writable_mask; + + if (riscv_cpu_virt_enabled(env)) { + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); + } else { + ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); + } + + if (ret_val) { + *ret_val &= env->mideleg & S_MODE_INTERRUPTS; + } + return ret; } static RISCVException rmw_sip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - int ret; + uint64_t rval; + RISCVException ret; - if (riscv_cpu_virt_enabled(env)) { - ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask); - } else { - ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value, - write_mask & env->mideleg & sip_writable_mask); + ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_siph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; } - *ret_value &= env->mideleg; return ret; } @@ -981,41 +2318,157 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, static RISCVException write_satp(CPURISCVState *env, int csrno, target_ulong val) { - int vm, mask, asid; + target_ulong vm, mask; if (!riscv_feature(env, RISCV_FEATURE_MMU)) { return RISCV_EXCP_NONE; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { vm = validate_vm(env, get_field(val, SATP32_MODE)); mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); - asid = (val ^ env->satp) & SATP32_ASID; } else { vm = validate_vm(env, get_field(val, SATP64_MODE)); mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); - asid = (val ^ env->satp) & SATP64_ASID; } if (vm && mask) { if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { return RISCV_EXCP_ILLEGAL_INST; } else { - if (asid) { - tlb_flush(env_cpu(env)); - } + /* + * The ISA defines SATP.MODE=Bare as "no translation", but we still + * pass these through QEMU's TLB emulation as it improves + * performance. Flushing the TLB on SATP writes with paging + * enabled avoids leaking those invalid cached mappings. + */ + tlb_flush(env_cpu(env)); env->satp = val; } } return RISCV_EXCP_NONE; } +static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq, ret; + target_ulong topei; + uint64_t vseip, vsgein; + uint32_t iid, iprio, hviid, hviprio, gein; + uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS]; + + gein = get_field(env->hstatus, HSTATUS_VGEIN); + hviid = get_field(env->hvictl, HVICTL_IID); + hviprio = get_field(env->hvictl, HVICTL_IPRIO); + + if (gein) { + vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP; + if (gein <= env->geilen && vseip) { + siid[scount] = IRQ_S_EXT; + siprio[scount] = IPRIO_MMAXIPRIO + 1; + if (env->aia_ireg_rmw_fn[PRV_S]) { + /* + * Call machine specific IMSIC register emulation for + * reading TOPEI. + */ + ret = env->aia_ireg_rmw_fn[PRV_S]( + env->aia_ireg_rmw_fn_arg[PRV_S], + AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein, + riscv_cpu_mxl_bits(env)), + &topei, 0, 0); + if (!ret && topei) { + siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK; + } + } + scount++; + } + } else { + if (hviid == IRQ_S_EXT && hviprio) { + siid[scount] = IRQ_S_EXT; + siprio[scount] = hviprio; + scount++; + } + } + + if (env->hvictl & HVICTL_VTI) { + if (hviid != IRQ_S_EXT) { + siid[scount] = hviid; + siprio[scount] = hviprio; + scount++; + } + } else { + irq = riscv_cpu_vsirq_pending(env); + if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) { + siid[scount] = irq; + siprio[scount] = env->hviprio[irq]; + scount++; + } + } + + iid = 0; + iprio = UINT_MAX; + for (s = 0; s < scount; s++) { + if (siprio[s] < iprio) { + iid = siid[s]; + iprio = siprio[s]; + } + } + + if (iid) { + if (env->hvictl & HVICTL_IPRIOM) { + if (iprio > IPRIO_MMAXIPRIO) { + iprio = IPRIO_MMAXIPRIO; + } + if (!iprio) { + if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) { + iprio = IPRIO_MMAXIPRIO; + } + } + } else { + iprio = 1; + } + } else { + iprio = 0; + } + + *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + return RISCV_EXCP_NONE; +} + +static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq; + uint8_t iprio; + + if (riscv_cpu_virt_enabled(env)) { + return read_vstopi(env, CSR_VSTOPI, val); + } + + irq = riscv_cpu_sirq_pending(env); + if (irq <= 0 || irq > 63) { + *val = 0; + } else { + iprio = env->siprio[irq]; + if (!iprio) { + if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) { + iprio = IPRIO_MMAXIPRIO; + } + } + *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + } + + return RISCV_EXCP_NONE; +} + /* Hypervisor Extensions */ static RISCVException read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hstatus; - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { /* We only support 64-bit VSXL */ *val = set_field(*val, HSTATUS_VSXL, 2); } @@ -1028,7 +2481,7 @@ static RISCVException write_hstatus(CPURISCVState *env, int csrno, target_ulong val) { env->hstatus = val; - if (!riscv_cpu_is_32bit(env) && get_field(val, HSTATUS_VSXL) != 2) { + if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); } if (get_field(val, HSTATUS_VSBE) != 0) { @@ -1051,28 +2504,93 @@ static RISCVException write_hedeleg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException read_hideleg(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - *val = env->hideleg; + uint64_t mask = wr_mask & vs_delegable_ints; + + if (ret_val) { + *ret_val = env->hideleg & vs_delegable_ints; + } + + env->hideleg = (env->hideleg & ~mask) | (new_val & mask); return RISCV_EXCP_NONE; } -static RISCVException write_hideleg(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException rmw_hideleg(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - env->hideleg = val & vs_delegable_ints; - return RISCV_EXCP_NONE; + uint64_t rval; + RISCVException ret; + + ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_hideleg64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + + ret = rmw_mip64(env, csrno, ret_val, new_val, + wr_mask & hvip_writable_mask); + if (ret_val) { + *ret_val &= VS_MODE_INTERRUPTS; + } + + return ret; } static RISCVException rmw_hvip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - int ret = rmw_mip(env, 0, ret_value, new_value, - write_mask & hvip_writable_mask); + uint64_t rval; + RISCVException ret; - *ret_value &= hvip_writable_mask; + ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_hviph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_hvip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } return ret; } @@ -1081,26 +2599,28 @@ static RISCVException rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { - int ret = rmw_mip(env, 0, ret_value, new_value, + int ret = rmw_mip(env, csrno, ret_value, new_value, write_mask & hip_writable_mask); - *ret_value &= hip_writable_mask; - + if (ret_value) { + *ret_value &= HS_MODE_INTERRUPTS; + } return ret; } -static RISCVException read_hie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_hie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - *val = env->mie & VS_MODE_INTERRUPTS; - return RISCV_EXCP_NONE; -} + uint64_t rval; + RISCVException ret; -static RISCVException write_hie(CPURISCVState *env, int csrno, - target_ulong val) -{ - target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS); - return write_mie(env, CSR_MIE, newval); + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS); + if (ret_val) { + *ret_val = rval & HS_MODE_INTERRUPTS; + } + + return ret; } static RISCVException read_hcounteren(CPURISCVState *env, int csrno, @@ -1120,14 +2640,21 @@ static RISCVException write_hcounteren(CPURISCVState *env, int csrno, static RISCVException read_hgeie(CPURISCVState *env, int csrno, target_ulong *val) { - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); + if (val) { + *val = env->hgeie; + } return RISCV_EXCP_NONE; } static RISCVException write_hgeie(CPURISCVState *env, int csrno, target_ulong val) { - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); + /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ + val &= ((((target_ulong)1) << env->geilen) - 1) << 1; + env->hgeie = val; + /* Update mip.SGEIP bit */ + riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP, + BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); return RISCV_EXCP_NONE; } @@ -1161,14 +2688,9 @@ static RISCVException write_htinst(CPURISCVState *env, int csrno, static RISCVException read_hgeip(CPURISCVState *env, int csrno, target_ulong *val) { - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); - return RISCV_EXCP_NONE; -} - -static RISCVException write_hgeip(CPURISCVState *env, int csrno, - target_ulong val) -{ - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); + if (val) { + *val = env->hgeip; + } return RISCV_EXCP_NONE; } @@ -1204,7 +2726,7 @@ static RISCVException write_htimedelta(CPURISCVState *env, int csrno, return RISCV_EXCP_ILLEGAL_INST; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); } else { env->htimedelta = val; @@ -1234,6 +2756,110 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hvictl; + return RISCV_EXCP_NONE; +} + +static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hvictl = val & HVICTL_VALID_MASK; + return RISCV_EXCP_NONE; +} + +static int read_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong *val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be a multiple of number of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up return value */ + *val = 0; + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + continue; + } + *val |= ((target_ulong)iprio[irq]) << (i * 8); + } + + return RISCV_EXCP_NONE; +} + +static int write_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be a multiple of number of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up priority arrary */ + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + iprio[irq] = 0; + } else { + iprio[irq] = (val >> (i * 8)) & 0xff; + } + } + + return RISCV_EXCP_NONE; +} + +static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 0, env->hviprio, val); +} + +static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 0, env->hviprio, val); +} + +static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 4, env->hviprio, val); +} + +static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 4, env->hviprio, val); +} + +static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 8, env->hviprio, val); +} + +static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 8, env->hviprio, val); +} + +static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 12, env->hviprio, val); +} + +static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 12, env->hviprio, val); +} + /* Virtual CSR Registers */ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) @@ -1246,6 +2872,9 @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno, target_ulong val) { uint64_t mask = (target_ulong)-1; + if ((val & VSSTATUS64_UXL) == 0) { + mask &= ~VSSTATUS64_UXL; + } env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; return RISCV_EXCP_NONE; } @@ -1376,9 +3005,23 @@ static RISCVException write_mseccfg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) +{ + /* TODO: RV128 restriction check */ + if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { + return false; + } + return true; +} + static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) { + uint32_t reg_index = csrno - CSR_PMPCFG0; + + if (!check_pmp_reg_index(env, reg_index)) { + return RISCV_EXCP_ILLEGAL_INST; + } *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); return RISCV_EXCP_NONE; } @@ -1386,6 +3029,11 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val) { + uint32_t reg_index = csrno - CSR_PMPCFG0; + + if (!check_pmp_reg_index(env, reg_index)) { + return RISCV_EXCP_ILLEGAL_INST; + } pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); return RISCV_EXCP_NONE; } @@ -1404,8 +3052,376 @@ static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_tselect(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = tselect_csr_read(env); + return RISCV_EXCP_NONE; +} + +static RISCVException write_tselect(CPURISCVState *env, int csrno, + target_ulong val) +{ + tselect_csr_write(env, val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_tdata(CPURISCVState *env, int csrno, + target_ulong *val) +{ + /* return 0 in tdata1 to end the trigger enumeration */ + if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) { + *val = 0; + return RISCV_EXCP_NONE; + } + + if (!tdata_available(env, csrno - CSR_TDATA1)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + *val = tdata_csr_read(env, csrno - CSR_TDATA1); + return RISCV_EXCP_NONE; +} + +static RISCVException write_tdata(CPURISCVState *env, int csrno, + target_ulong val) +{ + if (!tdata_available(env, csrno - CSR_TDATA1)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + tdata_csr_write(env, csrno - CSR_TDATA1, val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_tinfo(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = tinfo_csr_read(env); + return RISCV_EXCP_NONE; +} + +/* + * Functions to access Pointer Masking feature registers + * We have to check if current priv lvl could modify + * csr in given mode + */ +static bool check_pm_current_disabled(CPURISCVState *env, int csrno) +{ + int csr_priv = get_field(csrno, 0x300); + int pm_current; + + if (env->debugger) { + return false; + } + /* + * If priv lvls differ that means we're accessing csr from higher priv lvl, + * so allow the access + */ + if (env->priv != csr_priv) { + return false; + } + switch (env->priv) { + case PRV_M: + pm_current = get_field(env->mmte, M_PM_CURRENT); + break; + case PRV_S: + pm_current = get_field(env->mmte, S_PM_CURRENT); + break; + case PRV_U: + pm_current = get_field(env->mmte, U_PM_CURRENT); + break; + default: + g_assert_not_reached(); + } + /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ + return !pm_current; +} + +static RISCVException read_mmte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & MMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mmte(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + target_ulong wpri_val = val & MMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "MMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + /* for machine mode pm.current is hardwired to 1 */ + wpri_val |= MMTE_M_PM_CURRENT; + + /* hardwiring pm.instruction bit to 0, since it's not supported yet */ + wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); + env->mmte = wpri_val | PM_EXT_DIRTY; + riscv_cpu_update_mask(env); + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_smte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & SMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_smte(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong wpri_val = val & SMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "SMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + + wpri_val |= (env->mmte & ~SMTE_MASK); + write_mmte(env, csrno, wpri_val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_umte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & UMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_umte(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong wpri_val = val & UMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "UMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + + wpri_val |= (env->mmte & ~UMTE_MASK); + write_mmte(env, csrno, wpri_val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_mpmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mpmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mpmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + env->mpmmask = val; + if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { + env->cur_pmmask = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_spmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->spmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_spmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->spmmask = val; + if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { + env->cur_pmmask = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_upmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->upmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_upmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->upmmask = val; + if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { + env->cur_pmmask = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_mpmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mpmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mpmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + env->mpmbase = val; + if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { + env->cur_pmbase = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_spmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->spmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_spmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->spmbase = val; + if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { + env->cur_pmbase = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_upmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->upmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_upmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->upmbase = val; + if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { + env->cur_pmbase = val; + } + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + #endif +/* Crypto Extension */ +static RISCVException rmw_seed(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, + target_ulong write_mask) +{ + uint16_t random_v; + Error *random_e = NULL; + int random_r; + target_ulong rval; + + random_r = qemu_guest_getrandom(&random_v, 2, &random_e); + if (unlikely(random_r < 0)) { + /* + * Failed, for unknown reasons in the crypto subsystem. + * The best we can do is log the reason and return a + * failure indication to the guest. There is no reason + * we know to expect the failure to be transitory, so + * indicate DEAD to avoid having the guest spin on WAIT. + */ + qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", + __func__, error_get_pretty(random_e)); + error_free(random_e); + rval = SEED_OPST_DEAD; + } else { + rval = random_v | SEED_OPST_ES16; + } + + if (ret_value) { + *ret_value = rval; + } + + return RISCV_EXCP_NONE; +} + /* * riscv_csrrw - read and/or update control and status register * @@ -1415,50 +3431,69 @@ static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); */ -RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static inline RISCVException riscv_csrrw_check(CPURISCVState *env, + int csrno, + bool write_mask, + RISCVCPU *cpu) { - RISCVException ret; - target_ulong old_value; - RISCVCPU *cpu = env_archcpu(env); - - /* check privileges and return -1 if check fails */ -#if !defined(CONFIG_USER_ONLY) - int effective_priv = env->priv; + /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ int read_only = get_field(csrno, 0xC00) == 3; - - if (riscv_has_ext(env, RVH) && - env->priv == PRV_S && - !riscv_cpu_virt_enabled(env)) { - /* - * We are in S mode without virtualisation, therefore we are in HS Mode. - * Add 1 to the effective privledge level to allow us to access the - * Hypervisor CSRs. - */ - effective_priv++; - } - - if ((write_mask && read_only) || - (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) { - return RISCV_EXCP_ILLEGAL_INST; - } -#endif + int csr_min_priv = csr_ops[csrno].min_priv_ver; /* ensure the CSR extension is enabled. */ if (!cpu->cfg.ext_icsr) { return RISCV_EXCP_ILLEGAL_INST; } + if (env->priv_ver < csr_min_priv) { + return RISCV_EXCP_ILLEGAL_INST; + } + /* check predicate */ if (!csr_ops[csrno].predicate) { return RISCV_EXCP_ILLEGAL_INST; } - ret = csr_ops[csrno].predicate(env, csrno); + + if (write_mask && read_only) { + return RISCV_EXCP_ILLEGAL_INST; + } + + RISCVException ret = csr_ops[csrno].predicate(env, csrno); if (ret != RISCV_EXCP_NONE) { return ret; } +#if !defined(CONFIG_USER_ONLY) + int csr_priv, effective_priv = env->priv; + + if (riscv_has_ext(env, RVH) && env->priv == PRV_S && + !riscv_cpu_virt_enabled(env)) { + /* + * We are in HS mode. Add 1 to the effective privledge level to + * allow us to access the Hypervisor CSRs. + */ + effective_priv++; + } + + csr_priv = get_field(csrno, 0x300); + if (!env->debugger && (effective_priv < csr_priv)) { + if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + return RISCV_EXCP_ILLEGAL_INST; + } +#endif + return RISCV_EXCP_NONE; +} + +static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, + target_ulong write_mask) +{ + RISCVException ret; + target_ulong old_value; + /* execute combined read/write operation if it exists */ if (csr_ops[csrno].op) { return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); @@ -1493,6 +3528,92 @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +RISCVException riscv_csrrw(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + RISCVCPU *cpu = env_archcpu(env); + + RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); +} + +static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, + Int128 *ret_value, + Int128 new_value, + Int128 write_mask) +{ + RISCVException ret; + Int128 old_value; + + /* read old value */ + ret = csr_ops[csrno].read128(env, csrno, &old_value); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + /* write value if writable and write mask set, otherwise drop writes */ + if (int128_nz(write_mask)) { + new_value = int128_or(int128_and(old_value, int128_not(write_mask)), + int128_and(new_value, write_mask)); + if (csr_ops[csrno].write128) { + ret = csr_ops[csrno].write128(env, csrno, new_value); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + } else if (csr_ops[csrno].write) { + /* avoids having to write wrappers for all registers */ + ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + } + } + + /* return old value */ + if (ret_value) { + *ret_value = old_value; + } + + return RISCV_EXCP_NONE; +} + +RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, + Int128 *ret_value, + Int128 new_value, Int128 write_mask) +{ + RISCVException ret; + RISCVCPU *cpu = env_archcpu(env); + + ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (csr_ops[csrno].read128) { + return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); + } + + /* + * Fall back to 64-bit version for now, if the 128-bit alternative isn't + * at all defined. + * Note, some CSRs don't need to extend to MXLEN (64 upper bits non + * significant), for those, this fallback is correctly handling the accesses + */ + target_ulong old_value; + ret = riscv_csrrw_do64(env, csrno, &old_value, + int128_getlo(new_value), + int128_getlo(write_mask)); + if (ret == RISCV_EXCP_NONE && ret_value) { + *ret_value = int128_make64(old_value); + } + return ret; +} + /* * Debugger support. If not in user mode, set env->debugger before the * riscv_csrrw call and clear it after the call. @@ -1520,16 +3641,25 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_FRM] = { "frm", fs, read_frm, write_frm }, [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr }, /* Vector CSRs */ - [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart }, - [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat }, - [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm }, - [CSR_VL] = { "vl", vs, read_vl }, - [CSR_VTYPE] = { "vtype", vs, read_vtype }, + [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VL] = { "vl", vs, read_vl, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VTYPE] = { "vtype", vs, read_vtype, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VLENB] = { "vlenb", vs, read_vlenb, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* User Timers and Counters */ - [CSR_CYCLE] = { "cycle", ctr, read_instret }, - [CSR_INSTRET] = { "instret", ctr, read_instret }, - [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth }, - [CSR_INSTRETH] = { "instreth", ctr32, read_instreth }, + [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter }, + [CSR_INSTRET] = { "instret", ctr, read_hpmcounter }, + [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh }, + [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh }, /* * In privileged mode, the monitor will have to emulate TIME CSRs only if @@ -1538,83 +3668,217 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_TIME] = { "time", ctr, read_time }, [CSR_TIMEH] = { "timeh", ctr32, read_timeh }, + /* Crypto Extension */ + [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed }, + #if !defined(CONFIG_USER_ONLY) /* Machine Timers and Counters */ - [CSR_MCYCLE] = { "mcycle", any, read_instret }, - [CSR_MINSTRET] = { "minstret", any, read_instret }, - [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth }, - [CSR_MINSTRETH] = { "minstreth", any32, read_instreth }, + [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, + write_mhpmcounter }, + [CSR_MINSTRET] = { "minstret", any, read_hpmcounter, + write_mhpmcounter }, + [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh, + write_mhpmcounterh }, /* Machine Information Registers */ - [CSR_MVENDORID] = { "mvendorid", any, read_zero }, - [CSR_MARCHID] = { "marchid", any, read_zero }, - [CSR_MIMPID] = { "mimpid", any, read_zero }, - [CSR_MHARTID] = { "mhartid", any, read_mhartid }, + [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid }, + [CSR_MARCHID] = { "marchid", any, read_marchid }, + [CSR_MIMPID] = { "mimpid", any, read_mimpid }, + [CSR_MHARTID] = { "mhartid", any, read_mhartid }, + [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Machine Trap Setup */ - [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus }, - [CSR_MISA] = { "misa", any, read_misa, write_misa }, - [CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg }, - [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, - [CSR_MIE] = { "mie", any, read_mie, write_mie }, - [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, - [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren }, + [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, + NULL, read_mstatus_i128 }, + [CSR_MISA] = { "misa", any, read_misa, write_misa, + NULL, read_misa_i128 }, + [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, + [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, + [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, + [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, + [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, + write_mcounteren }, - [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush }, + [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, + write_mstatush }, /* Machine Trap Handling */ - [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch }, + [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, + NULL, read_mscratch_i128, write_mscratch_i128 }, [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, + /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ + [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect }, + [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg }, + + /* Machine-Level Interrupts (AIA) */ + [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, + [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi }, + + /* Virtual Interrupts for Supervisor Level (AIA) */ + [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore }, + [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore }, + + /* Machine-Level High-Half CSRs (AIA) */ + [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, + [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, + [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore }, + [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore }, + [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph }, + + /* Execution environment configuration */ + [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + /* Supervisor Trap Setup */ - [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus }, - [CSR_SIE] = { "sie", smode, read_sie, write_sie }, - [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, - [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren }, + [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, + NULL, read_sstatus_i128 }, + [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, + [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, + [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, + write_scounteren }, /* Supervisor Trap Handling */ - [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch }, + [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, + NULL, read_sscratch_i128, write_sscratch_i128 }, [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, - [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, + [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, + [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp, + write_vstimecmp, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph, + write_vstimecmph, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Supervisor Protection and Translation */ - [CSR_SATP] = { "satp", smode, read_satp, write_satp }, + [CSR_SATP] = { "satp", smode, read_satp, write_satp }, - [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus }, - [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg }, - [CSR_HIDELEG] = { "hideleg", hmode, read_hideleg, write_hideleg }, - [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip }, - [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip }, - [CSR_HIE] = { "hie", hmode, read_hie, write_hie }, - [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren }, - [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie }, - [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval }, - [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst }, - [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, write_hgeip }, - [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp }, - [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta }, - [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah }, + /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ + [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, + [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg }, - [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus }, - [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip }, - [CSR_VSIE] = { "vsie", hmode, read_vsie, write_vsie }, - [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec }, - [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch }, - [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc }, - [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause }, - [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval }, - [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp }, + /* Supervisor-Level Interrupts (AIA) */ + [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, + [CSR_STOPI] = { "stopi", aia_smode, read_stopi }, - [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 }, - [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, + /* Supervisor-Level High-Half CSRs (AIA) */ + [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh }, + [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph }, + + [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, + write_hcounteren, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, + write_htimedelta, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, + write_htimedeltah, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + + [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, + write_vsstatus, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie , + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, + write_vsscratch, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + + [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + + /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ + [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore }, + [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, + write_hvictl }, + [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, + write_hviprio1 }, + [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, + write_hviprio2 }, + + /* + * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) + */ + [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, + rmw_xiselect }, + [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg }, + + /* VS-Level Interrupts (H-extension with AIA) */ + [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, + [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi }, + + /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ + [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, + rmw_hidelegh }, + [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, + write_ignore }, + [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, + [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, + write_hviprio1h }, + [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, + write_hviprio2h }, + [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, + [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, /* Physical Memory Protection */ - [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg }, + [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg, + .min_priv_ver = PRIV_VERSION_1_11_0 }, [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, @@ -1636,155 +3900,363 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + /* Debug CSRs */ + [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, + [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, + [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, + [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, + [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, + + /* User Pointer Masking */ + [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, + [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, + write_upmmask }, + [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, + write_upmbase }, + /* Machine Pointer Masking */ + [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, + [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, + write_mpmmask }, + [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, + write_mpmbase }, + /* Supervisor Pointer Masking */ + [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, + [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, + write_spmmask }, + [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, + write_spmbase }, + /* Performance Counters */ - [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero }, - [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero }, - [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero }, - [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero }, - [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero }, - [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero }, - [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero }, - [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero }, - [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero }, - [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero }, - [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero }, - [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero }, - [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero }, - [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero }, - [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero }, - [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero }, - [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero }, - [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero }, - [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero }, - [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero }, - [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero }, - [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero }, - [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero }, - [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero }, - [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero }, - [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero }, - [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero }, - [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero }, - [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero }, + [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter }, + [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter }, - [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero }, - [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero }, - [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero }, - [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero }, - [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero }, - [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero }, - [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero }, - [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero }, - [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero }, - [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero }, - [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero }, - [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero }, - [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero }, - [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero }, - [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero }, - [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero }, - [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero }, - [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero }, - [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero }, - [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero }, - [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero }, - [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero }, - [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero }, - [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero }, - [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero }, - [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero }, - [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero }, - [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero }, - [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero }, + [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter, + write_mhpmcounter }, + [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter, + write_mhpmcounter }, - [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero }, - [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero }, - [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero }, - [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero }, - [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero }, - [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero }, - [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero }, - [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero }, - [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero }, - [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero }, - [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero }, - [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero }, - [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero }, - [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero }, - [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero }, - [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero }, - [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero }, - [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero }, - [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero }, - [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero }, - [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero }, - [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero }, - [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero }, - [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero }, - [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero }, - [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero }, - [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero }, - [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero }, - [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero }, + [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit, + write_mcountinhibit, + .min_priv_ver = PRIV_VERSION_1_11_0 }, - [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero }, - [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero }, - [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero }, - [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero }, - [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero }, - [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero }, - [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero }, - [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero }, - [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero }, - [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero }, - [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero }, - [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero }, - [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero }, - [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero }, - [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero }, - [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero }, - [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero }, - [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero }, - [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero }, - [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero }, - [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero }, - [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero }, - [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero }, - [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero }, - [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero }, - [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero }, - [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero }, - [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero }, - [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero }, + [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent, + write_mhpmevent }, + [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, + write_mhpmevent }, + + [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh, + write_mhpmeventh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + + [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh }, + [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh }, + + [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh, + write_mhpmcounterh }, + [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf, + .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero }, - [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero }, - [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero }, - [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero }, - [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero }, - [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero }, - [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero }, - [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero }, - [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero }, - [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero }, - [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero }, - [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero }, - [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero }, - [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero }, - [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero }, - [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero }, - [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero }, - [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero }, - [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero }, - [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero }, - [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero }, - [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero }, - [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero }, - [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero }, - [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero }, - [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero }, - [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero }, - [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero }, - [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero }, #endif /* !CONFIG_USER_ONLY */ }; diff --git a/target/riscv/debug.c b/target/riscv/debug.c new file mode 100644 index 0000000000..26ea764407 --- /dev/null +++ b/target/riscv/debug.c @@ -0,0 +1,731 @@ +/* + * QEMU RISC-V Native Debug Support + * + * Copyright (c) 2022 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This provides the native debug support via the Trigger Module, as defined + * in the RISC-V Debug Specification: + * https://github.com/riscv/riscv-debug-spec/raw/master/riscv-debug-stable.pdf + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "cpu.h" +#include "trace.h" +#include "exec/exec-all.h" + +/* + * The following M-mode trigger CSRs are implemented: + * + * - tselect + * - tdata1 + * - tdata2 + * - tdata3 + * - tinfo + * + * The following triggers are initialized by default: + * + * Index | Type | tdata mapping | Description + * ------+------+------------------------+------------ + * 0 | 2 | tdata1, tdata2 | Address / Data Match + * 1 | 2 | tdata1, tdata2 | Address / Data Match + */ + +/* tdata availability of a trigger */ +typedef bool tdata_avail[TDATA_NUM]; + +static tdata_avail tdata_mapping[TRIGGER_TYPE_NUM] = { + [TRIGGER_TYPE_NO_EXIST] = { false, false, false }, + [TRIGGER_TYPE_AD_MATCH] = { true, true, true }, + [TRIGGER_TYPE_INST_CNT] = { true, false, true }, + [TRIGGER_TYPE_INT] = { true, true, true }, + [TRIGGER_TYPE_EXCP] = { true, true, true }, + [TRIGGER_TYPE_AD_MATCH6] = { true, true, true }, + [TRIGGER_TYPE_EXT_SRC] = { true, false, false }, + [TRIGGER_TYPE_UNAVAIL] = { true, true, true } +}; + +/* only breakpoint size 1/2/4/8 supported */ +static int access_size[SIZE_NUM] = { + [SIZE_ANY] = 0, + [SIZE_1B] = 1, + [SIZE_2B] = 2, + [SIZE_4B] = 4, + [SIZE_6B] = -1, + [SIZE_8B] = 8, + [6 ... 15] = -1, +}; + +static inline target_ulong extract_trigger_type(CPURISCVState *env, + target_ulong tdata1) +{ + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + return extract32(tdata1, 28, 4); + case MXL_RV64: + case MXL_RV128: + return extract64(tdata1, 60, 4); + default: + g_assert_not_reached(); + } +} + +static inline target_ulong get_trigger_type(CPURISCVState *env, + target_ulong trigger_index) +{ + return extract_trigger_type(env, env->tdata1[trigger_index]); +} + +static trigger_action_t get_trigger_action(CPURISCVState *env, + target_ulong trigger_index) +{ + target_ulong tdata1 = env->tdata1[trigger_index]; + int trigger_type = get_trigger_type(env, trigger_index); + trigger_action_t action = DBG_ACTION_NONE; + + switch (trigger_type) { + case TRIGGER_TYPE_AD_MATCH: + action = (tdata1 & TYPE2_ACTION) >> 12; + break; + case TRIGGER_TYPE_AD_MATCH6: + action = (tdata1 & TYPE6_ACTION) >> 12; + break; + case TRIGGER_TYPE_INST_CNT: + case TRIGGER_TYPE_INT: + case TRIGGER_TYPE_EXCP: + case TRIGGER_TYPE_EXT_SRC: + qemu_log_mask(LOG_UNIMP, "trigger type: %d is not supported\n", + trigger_type); + break; + case TRIGGER_TYPE_NO_EXIST: + case TRIGGER_TYPE_UNAVAIL: + qemu_log_mask(LOG_GUEST_ERROR, "trigger type: %d does not exit\n", + trigger_type); + break; + default: + g_assert_not_reached(); + } + + return action; +} + +static inline target_ulong build_tdata1(CPURISCVState *env, + trigger_type_t type, + bool dmode, target_ulong data) +{ + target_ulong tdata1; + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + tdata1 = RV32_TYPE(type) | + (dmode ? RV32_DMODE : 0) | + (data & RV32_DATA_MASK); + break; + case MXL_RV64: + case MXL_RV128: + tdata1 = RV64_TYPE(type) | + (dmode ? RV64_DMODE : 0) | + (data & RV64_DATA_MASK); + break; + default: + g_assert_not_reached(); + } + + return tdata1; +} + +bool tdata_available(CPURISCVState *env, int tdata_index) +{ + int trigger_type = get_trigger_type(env, env->trigger_cur); + + if (unlikely(tdata_index >= TDATA_NUM)) { + return false; + } + + return tdata_mapping[trigger_type][tdata_index]; +} + +target_ulong tselect_csr_read(CPURISCVState *env) +{ + return env->trigger_cur; +} + +void tselect_csr_write(CPURISCVState *env, target_ulong val) +{ + if (val < RV_MAX_TRIGGERS) { + env->trigger_cur = val; + } +} + +static target_ulong tdata1_validate(CPURISCVState *env, target_ulong val, + trigger_type_t t) +{ + uint32_t type, dmode; + target_ulong tdata1; + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + type = extract32(val, 28, 4); + dmode = extract32(val, 27, 1); + tdata1 = RV32_TYPE(t); + break; + case MXL_RV64: + case MXL_RV128: + type = extract64(val, 60, 4); + dmode = extract64(val, 59, 1); + tdata1 = RV64_TYPE(t); + break; + default: + g_assert_not_reached(); + } + + if (type != t) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring type write to tdata1 register\n"); + } + + if (dmode != 0) { + qemu_log_mask(LOG_UNIMP, "debug mode is not supported\n"); + } + + return tdata1; +} + +static inline void warn_always_zero_bit(target_ulong val, target_ulong mask, + const char *msg) +{ + if (val & mask) { + qemu_log_mask(LOG_UNIMP, "%s bit is always zero\n", msg); + } +} + +static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index) +{ + trigger_action_t action = get_trigger_action(env, trigger_index); + + switch (action) { + case DBG_ACTION_NONE: + break; + case DBG_ACTION_BP: + riscv_raise_exception(env, RISCV_EXCP_BREAKPOINT, 0); + break; + case DBG_ACTION_DBG_MODE: + case DBG_ACTION_TRACE0: + case DBG_ACTION_TRACE1: + case DBG_ACTION_TRACE2: + case DBG_ACTION_TRACE3: + case DBG_ACTION_EXT_DBG0: + case DBG_ACTION_EXT_DBG1: + qemu_log_mask(LOG_UNIMP, "action: %d is not supported\n", action); + break; + default: + g_assert_not_reached(); + } +} + +/* type 2 trigger */ + +static uint32_t type2_breakpoint_size(CPURISCVState *env, target_ulong ctrl) +{ + uint32_t size, sizelo, sizehi = 0; + + if (riscv_cpu_mxl(env) == MXL_RV64) { + sizehi = extract32(ctrl, 21, 2); + } + sizelo = extract32(ctrl, 16, 2); + size = (sizehi << 2) | sizelo; + + return size; +} + +static inline bool type2_breakpoint_enabled(target_ulong ctrl) +{ + bool mode = !!(ctrl & (TYPE2_U | TYPE2_S | TYPE2_M)); + bool rwx = !!(ctrl & (TYPE2_LOAD | TYPE2_STORE | TYPE2_EXEC)); + + return mode && rwx; +} + +static target_ulong type2_mcontrol_validate(CPURISCVState *env, + target_ulong ctrl) +{ + target_ulong val; + uint32_t size; + + /* validate the generic part first */ + val = tdata1_validate(env, ctrl, TRIGGER_TYPE_AD_MATCH); + + /* validate unimplemented (always zero) bits */ + warn_always_zero_bit(ctrl, TYPE2_MATCH, "match"); + warn_always_zero_bit(ctrl, TYPE2_CHAIN, "chain"); + warn_always_zero_bit(ctrl, TYPE2_ACTION, "action"); + warn_always_zero_bit(ctrl, TYPE2_TIMING, "timing"); + warn_always_zero_bit(ctrl, TYPE2_SELECT, "select"); + warn_always_zero_bit(ctrl, TYPE2_HIT, "hit"); + + /* validate size encoding */ + size = type2_breakpoint_size(env, ctrl); + if (access_size[size] == -1) { + qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using SIZE_ANY\n", + size); + } else { + val |= (ctrl & TYPE2_SIZELO); + if (riscv_cpu_mxl(env) == MXL_RV64) { + val |= (ctrl & TYPE2_SIZEHI); + } + } + + /* keep the mode and attribute bits */ + val |= (ctrl & (TYPE2_U | TYPE2_S | TYPE2_M | + TYPE2_LOAD | TYPE2_STORE | TYPE2_EXEC)); + + return val; +} + +static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index) +{ + target_ulong ctrl = env->tdata1[index]; + target_ulong addr = env->tdata2[index]; + bool enabled = type2_breakpoint_enabled(ctrl); + CPUState *cs = env_cpu(env); + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + uint32_t size; + + if (!enabled) { + return; + } + + if (ctrl & TYPE2_EXEC) { + cpu_breakpoint_insert(cs, addr, flags, &env->cpu_breakpoint[index]); + } + + if (ctrl & TYPE2_LOAD) { + flags |= BP_MEM_READ; + } + if (ctrl & TYPE2_STORE) { + flags |= BP_MEM_WRITE; + } + + if (flags & BP_MEM_ACCESS) { + size = type2_breakpoint_size(env, ctrl); + if (size != 0) { + cpu_watchpoint_insert(cs, addr, size, flags, + &env->cpu_watchpoint[index]); + } else { + cpu_watchpoint_insert(cs, addr, 8, flags, + &env->cpu_watchpoint[index]); + } + } +} + +static void type2_breakpoint_remove(CPURISCVState *env, target_ulong index) +{ + CPUState *cs = env_cpu(env); + + if (env->cpu_breakpoint[index]) { + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); + env->cpu_breakpoint[index] = NULL; + } + + if (env->cpu_watchpoint[index]) { + cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); + env->cpu_watchpoint[index] = NULL; + } +} + +static void type2_reg_write(CPURISCVState *env, target_ulong index, + int tdata_index, target_ulong val) +{ + target_ulong new_val; + + switch (tdata_index) { + case TDATA1: + new_val = type2_mcontrol_validate(env, val); + if (new_val != env->tdata1[index]) { + env->tdata1[index] = new_val; + type2_breakpoint_remove(env, index); + type2_breakpoint_insert(env, index); + } + break; + case TDATA2: + if (val != env->tdata2[index]) { + env->tdata2[index] = val; + type2_breakpoint_remove(env, index); + type2_breakpoint_insert(env, index); + } + break; + case TDATA3: + qemu_log_mask(LOG_UNIMP, + "tdata3 is not supported for type 2 trigger\n"); + break; + default: + g_assert_not_reached(); + } + + return; +} + +/* type 6 trigger */ + +static inline bool type6_breakpoint_enabled(target_ulong ctrl) +{ + bool mode = !!(ctrl & (TYPE6_VU | TYPE6_VS | TYPE6_U | TYPE6_S | TYPE6_M)); + bool rwx = !!(ctrl & (TYPE6_LOAD | TYPE6_STORE | TYPE6_EXEC)); + + return mode && rwx; +} + +static target_ulong type6_mcontrol6_validate(CPURISCVState *env, + target_ulong ctrl) +{ + target_ulong val; + uint32_t size; + + /* validate the generic part first */ + val = tdata1_validate(env, ctrl, TRIGGER_TYPE_AD_MATCH6); + + /* validate unimplemented (always zero) bits */ + warn_always_zero_bit(ctrl, TYPE6_MATCH, "match"); + warn_always_zero_bit(ctrl, TYPE6_CHAIN, "chain"); + warn_always_zero_bit(ctrl, TYPE6_ACTION, "action"); + warn_always_zero_bit(ctrl, TYPE6_TIMING, "timing"); + warn_always_zero_bit(ctrl, TYPE6_SELECT, "select"); + warn_always_zero_bit(ctrl, TYPE6_HIT, "hit"); + + /* validate size encoding */ + size = extract32(ctrl, 16, 4); + if (access_size[size] == -1) { + qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using SIZE_ANY\n", + size); + } else { + val |= (ctrl & TYPE6_SIZE); + } + + /* keep the mode and attribute bits */ + val |= (ctrl & (TYPE6_VU | TYPE6_VS | TYPE6_U | TYPE6_S | TYPE6_M | + TYPE6_LOAD | TYPE6_STORE | TYPE6_EXEC)); + + return val; +} + +static void type6_breakpoint_insert(CPURISCVState *env, target_ulong index) +{ + target_ulong ctrl = env->tdata1[index]; + target_ulong addr = env->tdata2[index]; + bool enabled = type6_breakpoint_enabled(ctrl); + CPUState *cs = env_cpu(env); + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + uint32_t size; + + if (!enabled) { + return; + } + + if (ctrl & TYPE6_EXEC) { + cpu_breakpoint_insert(cs, addr, flags, &env->cpu_breakpoint[index]); + } + + if (ctrl & TYPE6_LOAD) { + flags |= BP_MEM_READ; + } + + if (ctrl & TYPE6_STORE) { + flags |= BP_MEM_WRITE; + } + + if (flags & BP_MEM_ACCESS) { + size = extract32(ctrl, 16, 4); + if (size != 0) { + cpu_watchpoint_insert(cs, addr, size, flags, + &env->cpu_watchpoint[index]); + } else { + cpu_watchpoint_insert(cs, addr, 8, flags, + &env->cpu_watchpoint[index]); + } + } +} + +static void type6_breakpoint_remove(CPURISCVState *env, target_ulong index) +{ + type2_breakpoint_remove(env, index); +} + +static void type6_reg_write(CPURISCVState *env, target_ulong index, + int tdata_index, target_ulong val) +{ + target_ulong new_val; + + switch (tdata_index) { + case TDATA1: + new_val = type6_mcontrol6_validate(env, val); + if (new_val != env->tdata1[index]) { + env->tdata1[index] = new_val; + type6_breakpoint_remove(env, index); + type6_breakpoint_insert(env, index); + } + break; + case TDATA2: + if (val != env->tdata2[index]) { + env->tdata2[index] = val; + type6_breakpoint_remove(env, index); + type6_breakpoint_insert(env, index); + } + break; + case TDATA3: + qemu_log_mask(LOG_UNIMP, + "tdata3 is not supported for type 6 trigger\n"); + break; + default: + g_assert_not_reached(); + } + + return; +} + +target_ulong tdata_csr_read(CPURISCVState *env, int tdata_index) +{ + switch (tdata_index) { + case TDATA1: + return env->tdata1[env->trigger_cur]; + case TDATA2: + return env->tdata2[env->trigger_cur]; + case TDATA3: + return env->tdata3[env->trigger_cur]; + default: + g_assert_not_reached(); + } +} + +void tdata_csr_write(CPURISCVState *env, int tdata_index, target_ulong val) +{ + int trigger_type; + + if (tdata_index == TDATA1) { + trigger_type = extract_trigger_type(env, val); + } else { + trigger_type = get_trigger_type(env, env->trigger_cur); + } + + switch (trigger_type) { + case TRIGGER_TYPE_AD_MATCH: + type2_reg_write(env, env->trigger_cur, tdata_index, val); + break; + case TRIGGER_TYPE_AD_MATCH6: + type6_reg_write(env, env->trigger_cur, tdata_index, val); + break; + case TRIGGER_TYPE_INST_CNT: + case TRIGGER_TYPE_INT: + case TRIGGER_TYPE_EXCP: + case TRIGGER_TYPE_EXT_SRC: + qemu_log_mask(LOG_UNIMP, "trigger type: %d is not supported\n", + trigger_type); + break; + case TRIGGER_TYPE_NO_EXIST: + case TRIGGER_TYPE_UNAVAIL: + qemu_log_mask(LOG_GUEST_ERROR, "trigger type: %d does not exit\n", + trigger_type); + break; + default: + g_assert_not_reached(); + } +} + +target_ulong tinfo_csr_read(CPURISCVState *env) +{ + /* assume all triggers support the same types of triggers */ + return BIT(TRIGGER_TYPE_AD_MATCH) | + BIT(TRIGGER_TYPE_AD_MATCH6); +} + +void riscv_cpu_debug_excp_handler(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + if (cs->watchpoint_hit) { + if (cs->watchpoint_hit->flags & BP_CPU) { + cs->watchpoint_hit = NULL; + do_trigger_action(env, DBG_ACTION_BP); + } + } else { + if (cpu_breakpoint_test(cs, env->pc, BP_CPU)) { + do_trigger_action(env, DBG_ACTION_BP); + } + } +} + +bool riscv_cpu_debug_check_breakpoint(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + CPUBreakpoint *bp; + target_ulong ctrl; + target_ulong pc; + int trigger_type; + int i; + + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + for (i = 0; i < RV_MAX_TRIGGERS; i++) { + trigger_type = get_trigger_type(env, i); + + switch (trigger_type) { + case TRIGGER_TYPE_AD_MATCH: + /* type 2 trigger cannot be fired in VU/VS mode */ + if (riscv_cpu_virt_enabled(env)) { + return false; + } + + ctrl = env->tdata1[i]; + pc = env->tdata2[i]; + + if ((ctrl & TYPE2_EXEC) && (bp->pc == pc)) { + /* check U/S/M bit against current privilege level */ + if ((ctrl >> 3) & BIT(env->priv)) { + return true; + } + } + break; + case TRIGGER_TYPE_AD_MATCH6: + ctrl = env->tdata1[i]; + pc = env->tdata2[i]; + + if ((ctrl & TYPE6_EXEC) && (bp->pc == pc)) { + if (riscv_cpu_virt_enabled(env)) { + /* check VU/VS bit against current privilege level */ + if ((ctrl >> 23) & BIT(env->priv)) { + return true; + } + } else { + /* check U/S/M bit against current privilege level */ + if ((ctrl >> 3) & BIT(env->priv)) { + return true; + } + } + } + break; + default: + /* other trigger types are not supported or irrelevant */ + break; + } + } + } + + return false; +} + +bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + target_ulong ctrl; + target_ulong addr; + int trigger_type; + int flags; + int i; + + for (i = 0; i < RV_MAX_TRIGGERS; i++) { + trigger_type = get_trigger_type(env, i); + + switch (trigger_type) { + case TRIGGER_TYPE_AD_MATCH: + /* type 2 trigger cannot be fired in VU/VS mode */ + if (riscv_cpu_virt_enabled(env)) { + return false; + } + + ctrl = env->tdata1[i]; + addr = env->tdata2[i]; + flags = 0; + + if (ctrl & TYPE2_LOAD) { + flags |= BP_MEM_READ; + } + if (ctrl & TYPE2_STORE) { + flags |= BP_MEM_WRITE; + } + + if ((wp->flags & flags) && (wp->vaddr == addr)) { + /* check U/S/M bit against current privilege level */ + if ((ctrl >> 3) & BIT(env->priv)) { + return true; + } + } + break; + case TRIGGER_TYPE_AD_MATCH6: + ctrl = env->tdata1[i]; + addr = env->tdata2[i]; + flags = 0; + + if (ctrl & TYPE6_LOAD) { + flags |= BP_MEM_READ; + } + if (ctrl & TYPE6_STORE) { + flags |= BP_MEM_WRITE; + } + + if ((wp->flags & flags) && (wp->vaddr == addr)) { + if (riscv_cpu_virt_enabled(env)) { + /* check VU/VS bit against current privilege level */ + if ((ctrl >> 23) & BIT(env->priv)) { + return true; + } + } else { + /* check U/S/M bit against current privilege level */ + if ((ctrl >> 3) & BIT(env->priv)) { + return true; + } + } + } + break; + default: + /* other trigger types are not supported */ + break; + } + } + + return false; +} + +void riscv_trigger_init(CPURISCVState *env) +{ + target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0); + int i; + + /* init to type 2 triggers */ + for (i = 0; i < RV_MAX_TRIGGERS; i++) { + /* + * type = TRIGGER_TYPE_AD_MATCH + * dmode = 0 (both debug and M-mode can write tdata) + * maskmax = 0 (unimplemented, always 0) + * sizehi = 0 (match against any size, RV64 only) + * hit = 0 (unimplemented, always 0) + * select = 0 (always 0, perform match on address) + * timing = 0 (always 0, trigger before instruction) + * sizelo = 0 (match against any size) + * action = 0 (always 0, raise a breakpoint exception) + * chain = 0 (unimplemented, always 0) + * match = 0 (always 0, when any compare value equals tdata2) + */ + env->tdata1[i] = tdata1; + env->tdata2[i] = 0; + env->tdata3[i] = 0; + env->cpu_breakpoint[i] = NULL; + env->cpu_watchpoint[i] = NULL; + } +} diff --git a/target/riscv/debug.h b/target/riscv/debug.h new file mode 100644 index 0000000000..a1226b4d29 --- /dev/null +++ b/target/riscv/debug.h @@ -0,0 +1,137 @@ +/* + * QEMU RISC-V Native Debug Support + * + * Copyright (c) 2022 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef RISCV_DEBUG_H +#define RISCV_DEBUG_H + +#define RV_MAX_TRIGGERS 2 + +/* register index of tdata CSRs */ +enum { + TDATA1 = 0, + TDATA2, + TDATA3, + TDATA_NUM +}; + +typedef enum { + TRIGGER_TYPE_NO_EXIST = 0, /* trigger does not exist */ + TRIGGER_TYPE_AD_MATCH = 2, /* address/data match trigger */ + TRIGGER_TYPE_INST_CNT = 3, /* instruction count trigger */ + TRIGGER_TYPE_INT = 4, /* interrupt trigger */ + TRIGGER_TYPE_EXCP = 5, /* exception trigger */ + TRIGGER_TYPE_AD_MATCH6 = 6, /* new address/data match trigger */ + TRIGGER_TYPE_EXT_SRC = 7, /* external source trigger */ + TRIGGER_TYPE_UNAVAIL = 15, /* trigger exists, but unavailable */ + TRIGGER_TYPE_NUM +} trigger_type_t; + +/* actions */ +typedef enum { + DBG_ACTION_NONE = -1, /* sentinel value */ + DBG_ACTION_BP = 0, + DBG_ACTION_DBG_MODE, + DBG_ACTION_TRACE0, + DBG_ACTION_TRACE1, + DBG_ACTION_TRACE2, + DBG_ACTION_TRACE3, + DBG_ACTION_EXT_DBG0 = 8, + DBG_ACTION_EXT_DBG1 +} trigger_action_t; + +/* tdata1 field masks */ + +#define RV32_TYPE(t) ((uint32_t)(t) << 28) +#define RV32_TYPE_MASK (0xf << 28) +#define RV32_DMODE BIT(27) +#define RV32_DATA_MASK 0x7ffffff +#define RV64_TYPE(t) ((uint64_t)(t) << 60) +#define RV64_TYPE_MASK (0xfULL << 60) +#define RV64_DMODE BIT_ULL(59) +#define RV64_DATA_MASK 0x7ffffffffffffff + +/* mcontrol field masks */ + +#define TYPE2_LOAD BIT(0) +#define TYPE2_STORE BIT(1) +#define TYPE2_EXEC BIT(2) +#define TYPE2_U BIT(3) +#define TYPE2_S BIT(4) +#define TYPE2_M BIT(6) +#define TYPE2_MATCH (0xf << 7) +#define TYPE2_CHAIN BIT(11) +#define TYPE2_ACTION (0xf << 12) +#define TYPE2_SIZELO (0x3 << 16) +#define TYPE2_TIMING BIT(18) +#define TYPE2_SELECT BIT(19) +#define TYPE2_HIT BIT(20) +#define TYPE2_SIZEHI (0x3 << 21) /* RV64 only */ + +/* mcontrol6 field masks */ + +#define TYPE6_LOAD BIT(0) +#define TYPE6_STORE BIT(1) +#define TYPE6_EXEC BIT(2) +#define TYPE6_U BIT(3) +#define TYPE6_S BIT(4) +#define TYPE6_M BIT(6) +#define TYPE6_MATCH (0xf << 7) +#define TYPE6_CHAIN BIT(11) +#define TYPE6_ACTION (0xf << 12) +#define TYPE6_SIZE (0xf << 16) +#define TYPE6_TIMING BIT(20) +#define TYPE6_SELECT BIT(21) +#define TYPE6_HIT BIT(22) +#define TYPE6_VU BIT(23) +#define TYPE6_VS BIT(24) + +/* access size */ +enum { + SIZE_ANY = 0, + SIZE_1B, + SIZE_2B, + SIZE_4B, + SIZE_6B, + SIZE_8B, + SIZE_10B, + SIZE_12B, + SIZE_14B, + SIZE_16B, + SIZE_NUM = 16 +}; + +bool tdata_available(CPURISCVState *env, int tdata_index); + +target_ulong tselect_csr_read(CPURISCVState *env); +void tselect_csr_write(CPURISCVState *env, target_ulong val); + +target_ulong tdata_csr_read(CPURISCVState *env, int tdata_index); +void tdata_csr_write(CPURISCVState *env, int tdata_index, target_ulong val); + +target_ulong tinfo_csr_read(CPURISCVState *env); + +void riscv_cpu_debug_excp_handler(CPUState *cs); +bool riscv_cpu_debug_check_breakpoint(CPUState *cs); +bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); + +void riscv_trigger_init(CPURISCVState *env); + +#endif /* RISCV_DEBUG_H */ diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 8700516a14..5699c9517f 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -55,23 +55,23 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm) { int softrm; - if (rm == 7) { + if (rm == RISCV_FRM_DYN) { rm = env->frm; } switch (rm) { - case 0: + case RISCV_FRM_RNE: softrm = float_round_nearest_even; break; - case 1: + case RISCV_FRM_RTZ: softrm = float_round_to_zero; break; - case 2: + case RISCV_FRM_RDN: softrm = float_round_down; break; - case 3: + case RISCV_FRM_RUP: softrm = float_round_up; break; - case 4: + case RISCV_FRM_RMM: softrm = float_round_ties_away; break; default: @@ -81,13 +81,29 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm) set_float_rounding_mode(softrm, &env->fp_status); } +void helper_set_rod_rounding_mode(CPURISCVState *env) +{ + set_float_rounding_mode(float_round_to_odd, &env->fp_status); +} + +static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2, + uint64_t rs3, int flags) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + float16 frs3 = check_nanbox_h(env, rs3); + return nanbox_h(env, float16_muladd(frs1, frs2, frs3, flags, + &env->fp_status)); +} + static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2, uint64_t rs3, int flags) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - float32 frs3 = check_nanbox_s(rs3); - return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + float32 frs3 = check_nanbox_s(env, rs3); + return nanbox_s(env, float32_muladd(frs1, frs2, frs3, flags, + &env->fp_status)); } uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, @@ -102,6 +118,12 @@ uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status); } +uint64_t helper_fmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return do_fmadd_h(env, frs1, frs2, frs3, 0); +} + uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { @@ -115,6 +137,12 @@ uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, &env->fp_status); } +uint64_t helper_fmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_c); +} + uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { @@ -128,6 +156,12 @@ uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, &env->fp_status); } +uint64_t helper_fnmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_product); +} + uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { @@ -142,122 +176,133 @@ uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, float_muladd_negate_product, &env->fp_status); } +uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return do_fmadd_h(env, frs1, frs2, frs3, + float_muladd_negate_c | float_muladd_negate_product); +} + uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_add(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, float32_add(frs1, frs2, &env->fp_status)); } uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_sub(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, float32_sub(frs1, frs2, &env->fp_status)); } uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_mul(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, float32_mul(frs1, frs2, &env->fp_status)); } uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_div(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, float32_div(frs1, frs2, &env->fp_status)); } uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_minnum(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ? + float32_minnum(frs1, frs2, &env->fp_status) : + float32_minimum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_maxnum(frs1, frs2, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); + return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ? + float32_maxnum(frs1, frs2, &env->fp_status) : + float32_maximum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); - return nanbox_s(float32_sqrt(frs1, &env->fp_status)); + float32 frs1 = check_nanbox_s(env, rs1); + return nanbox_s(env, float32_sqrt(frs1, &env->fp_status)); } target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); return float32_le(frs1, frs2, &env->fp_status); } target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); return float32_lt(frs1, frs2, &env->fp_status); } target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - float32 frs1 = check_nanbox_s(rs1); - float32 frs2 = check_nanbox_s(rs2); + float32 frs1 = check_nanbox_s(env, rs1); + float32 frs2 = check_nanbox_s(env, rs2); return float32_eq_quiet(frs1, frs2, &env->fp_status); } target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return float32_to_int32(frs1, &env->fp_status); } target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return (int32_t)float32_to_uint32(frs1, &env->fp_status); } target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return float32_to_int64(frs1, &env->fp_status); } target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return float32_to_uint64(frs1, &env->fp_status); } uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1) { - return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status)); + return nanbox_s(env, int32_to_float32((int32_t)rs1, &env->fp_status)); } uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1) { - return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status)); + return nanbox_s(env, uint32_to_float32((uint32_t)rs1, &env->fp_status)); } uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1) { - return nanbox_s(int64_to_float32(rs1, &env->fp_status)); + return nanbox_s(env, int64_to_float32(rs1, &env->fp_status)); } uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1) { - return nanbox_s(uint64_to_float32(rs1, &env->fp_status)); + return nanbox_s(env, uint64_to_float32(rs1, &env->fp_status)); } -target_ulong helper_fclass_s(uint64_t rs1) +target_ulong helper_fclass_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return fclass_s(frs1); } @@ -283,22 +328,26 @@ uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { - return float64_minnum(frs1, frs2, &env->fp_status); + return env->priv_ver < PRIV_VERSION_1_11_0 ? + float64_minnum(frs1, frs2, &env->fp_status) : + float64_minimum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { - return float64_maxnum(frs1, frs2, &env->fp_status); + return env->priv_ver < PRIV_VERSION_1_11_0 ? + float64_maxnum(frs1, frs2, &env->fp_status) : + float64_maximum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) { - return nanbox_s(float64_to_float32(rs1, &env->fp_status)); + return nanbox_s(env, float64_to_float32(rs1, &env->fp_status)); } uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1) { - float32 frs1 = check_nanbox_s(rs1); + float32 frs1 = check_nanbox_s(env, rs1); return float32_to_float64(frs1, &env->fp_status); } @@ -366,3 +415,149 @@ target_ulong helper_fclass_d(uint64_t frs1) { return fclass_d(frs1); } + +uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, float16_add(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, float16_sub(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, float16_mul(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, float16_div(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ? + float16_minnum(frs1, frs2, &env->fp_status) : + float16_minimum_number(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ? + float16_maxnum(frs1, frs2, &env->fp_status) : + float16_maximum_number(frs1, frs2, &env->fp_status)); +} + +uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return nanbox_h(env, float16_sqrt(frs1, &env->fp_status)); +} + +target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return float16_le(frs1, frs2, &env->fp_status); +} + +target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return float16_lt(frs1, frs2, &env->fp_status); +} + +target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) +{ + float16 frs1 = check_nanbox_h(env, rs1); + float16 frs2 = check_nanbox_h(env, rs2); + return float16_eq_quiet(frs1, frs2, &env->fp_status); +} + +target_ulong helper_fclass_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return fclass_h(frs1); +} + +target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return float16_to_int32(frs1, &env->fp_status); +} + +target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return (int32_t)float16_to_uint32(frs1, &env->fp_status); +} + +target_ulong helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return float16_to_int64(frs1, &env->fp_status); +} + +target_ulong helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return float16_to_uint64(frs1, &env->fp_status); +} + +uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1) +{ + return nanbox_h(env, int32_to_float16((int32_t)rs1, &env->fp_status)); +} + +uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1) +{ + return nanbox_h(env, uint32_to_float16((uint32_t)rs1, &env->fp_status)); +} + +uint64_t helper_fcvt_h_l(CPURISCVState *env, target_ulong rs1) +{ + return nanbox_h(env, int64_to_float16(rs1, &env->fp_status)); +} + +uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1) +{ + return nanbox_h(env, uint64_to_float16(rs1, &env->fp_status)); +} + +uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1) +{ + float32 frs1 = check_nanbox_s(env, rs1); + return nanbox_h(env, float32_to_float16(frs1, true, &env->fp_status)); +} + +uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status)); +} + +uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1) +{ + return nanbox_h(env, float64_to_float16(rs1, true, &env->fp_status)); +} + +uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1) +{ + float16 frs1 = check_nanbox_h(env, rs1); + return float16_to_float64(frs1, true, &env->fp_status); +} diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index a7a9c0b1fe..6e7bbdbd5e 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -20,15 +20,54 @@ #include "exec/gdbstub.h" #include "cpu.h" +struct TypeSize { + const char *gdb_type; + const char *id; + int size; + const char suffix; +}; + +static const struct TypeSize vec_lanes[] = { + /* quads */ + { "uint128", "quads", 128, 'q' }, + /* 64 bit */ + { "uint64", "longs", 64, 'l' }, + /* 32 bit */ + { "uint32", "words", 32, 'w' }, + /* 16 bit */ + { "uint16", "shorts", 16, 's' }, + /* + * TODO: currently there is no reliable way of telling + * if the remote gdb actually understands ieee_half so + * we don't expose it in the target description for now. + * { "ieee_half", 16, 'h', 'f' }, + */ + /* bytes */ + { "uint8", "bytes", 8, 'b' }, +}; + int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; + target_ulong tmp; if (n < 32) { - return gdb_get_regl(mem_buf, env->gpr[n]); + tmp = env->gpr[n]; } else if (n == 32) { - return gdb_get_regl(mem_buf, env->pc); + tmp = env->pc; + } else { + return 0; + } + + switch (env->misa_mxl_max) { + case MXL_RV32: + return gdb_get_reg32(mem_buf, tmp); + case MXL_RV64: + case MXL_RV128: + return gdb_get_reg64(mem_buf, tmp); + default: + g_assert_not_reached(); } return 0; } @@ -37,43 +76,44 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; + int length = 0; + target_ulong tmp; - if (n == 0) { - /* discard writes to x0 */ - return sizeof(target_ulong); - } else if (n < 32) { - env->gpr[n] = ldtul_p(mem_buf); - return sizeof(target_ulong); - } else if (n == 32) { - env->pc = ldtul_p(mem_buf); - return sizeof(target_ulong); + switch (env->misa_mxl_max) { + case MXL_RV32: + tmp = (int32_t)ldl_p(mem_buf); + length = 4; + break; + case MXL_RV64: + case MXL_RV128: + if (env->xl < MXL_RV64) { + tmp = (int32_t)ldq_p(mem_buf); + } else { + tmp = ldq_p(mem_buf); + } + length = 8; + break; + default: + g_assert_not_reached(); } - return 0; + if (n > 0 && n < 32) { + env->gpr[n] = tmp; + } else if (n == 32) { + env->pc = tmp; + } + + return length; } static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n) { if (n < 32) { - if (env->misa & RVD) { + if (env->misa_ext & RVD) { return gdb_get_reg64(buf, env->fpr[n]); } - if (env->misa & RVF) { + if (env->misa_ext & RVF) { return gdb_get_reg32(buf, env->fpr[n]); } - /* there is hole between ft11 and fflags in fpu.xml */ - } else if (n < 36 && n > 32) { - target_ulong val = 0; - int result; - /* - * CSR_FFLAGS is at index 1 in csr_register, and gdb says it is FP - * register 33, so we recalculate the map index. - * This also works for CSR_FRM and CSR_FCSR. - */ - result = riscv_csrrw_debug(env, n - 32, &val, - 0, 0); - if (result == RISCV_EXCP_NONE) { - return gdb_get_regl(buf, val); - } } return 0; } @@ -83,24 +123,100 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) if (n < 32) { env->fpr[n] = ldq_p(mem_buf); /* always 64-bit */ return sizeof(uint64_t); - /* there is hole between ft11 and fflags in fpu.xml */ - } else if (n < 36 && n > 32) { - target_ulong val = ldtul_p(mem_buf); - int result; - /* - * CSR_FFLAGS is at index 1 in csr_register, and gdb says it is FP - * register 33, so we recalculate the map index. - * This also works for CSR_FRM and CSR_FCSR. - */ - result = riscv_csrrw_debug(env, n - 32, NULL, - val, -1); - if (result == RISCV_EXCP_NONE) { - return sizeof(target_ulong); - } } return 0; } +/* + * Convert register index number passed by GDB to the correspond + * vector CSR number. Vector CSRs are defined after vector registers + * in dynamic generated riscv-vector.xml, thus the starting register index + * of vector CSRs is 32. + * Return 0 if register index number is out of range. + */ +static int riscv_gdb_vector_csrno(int num_regs) +{ + /* + * The order of vector CSRs in the switch case + * should match with the order defined in csr_ops[]. + */ + switch (num_regs) { + case 32: + return CSR_VSTART; + case 33: + return CSR_VXSAT; + case 34: + return CSR_VXRM; + case 35: + return CSR_VCSR; + case 36: + return CSR_VL; + case 37: + return CSR_VTYPE; + case 38: + return CSR_VLENB; + default: + /* Unknown register. */ + return 0; + } +} + +static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) +{ + uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + if (n < 32) { + int i; + int cnt = 0; + for (i = 0; i < vlenb; i += 8) { + cnt += gdb_get_reg64(buf, + env->vreg[(n * vlenb + i) / 8]); + } + return cnt; + } + + int csrno = riscv_gdb_vector_csrno(n); + + if (!csrno) { + return 0; + } + + target_ulong val = 0; + int result = riscv_csrrw_debug(env, csrno, &val, 0, 0); + + if (result == RISCV_EXCP_NONE) { + return gdb_get_regl(buf, val); + } + + return 0; +} + +static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) +{ + uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + if (n < 32) { + int i; + for (i = 0; i < vlenb; i += 8) { + env->vreg[(n * vlenb + i) / 8] = ldq_p(mem_buf + i); + } + return vlenb; + } + + int csrno = riscv_gdb_vector_csrno(n); + + if (!csrno) { + return 0; + } + + target_ulong val = ldtul_p(mem_buf); + int result = riscv_csrrw_debug(env, csrno, NULL, val, -1); + + if (result == RISCV_EXCP_NONE) { + return sizeof(target_ulong); + } + + return 0; +} + static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n) { if (n < CSR_TABLE_SIZE) { @@ -161,9 +277,14 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) CPURISCVState *env = &cpu->env; GString *s = g_string_new(NULL); riscv_csr_predicate_fn predicate; - int bitsize = riscv_cpu_is_32bit(env) ? 32 : 64; + int bitsize = 16 << env->misa_mxl_max; int i; + /* Until gdb knows about 128-bit registers */ + if (bitsize > 64) { + bitsize = 64; + } + g_string_printf(s, ""); g_string_append_printf(s, ""); g_string_append_printf(s, ""); @@ -187,24 +308,100 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) return CSR_TABLE_SIZE; } +static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + GString *s = g_string_new(NULL); + g_autoptr(GString) ts = g_string_new(""); + int reg_width = cpu->cfg.vlen; + int num_regs = 0; + int i; + + g_string_printf(s, ""); + g_string_append_printf(s, ""); + g_string_append_printf(s, ""); + + /* First define types and totals in a whole VL */ + for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { + int count = reg_width / vec_lanes[i].size; + g_string_printf(ts, "%s", vec_lanes[i].id); + g_string_append_printf(s, + "", + ts->str, vec_lanes[i].gdb_type, count); + } + + /* Define unions */ + g_string_append_printf(s, ""); + for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { + g_string_append_printf(s, "", + vec_lanes[i].suffix, + vec_lanes[i].id); + } + g_string_append(s, ""); + + /* Define vector registers */ + for (i = 0; i < 32; i++) { + g_string_append_printf(s, + "", + i, reg_width, base_reg++); + num_regs++; + } + + /* Define vector CSRs */ + const char *vector_csrs[7] = { + "vstart", "vxsat", "vxrm", "vcsr", + "vl", "vtype", "vlenb" + }; + + for (i = 0; i < 7; i++) { + g_string_append_printf(s, + "", + vector_csrs[i], TARGET_LONG_BITS, base_reg++); + num_regs++; + } + + g_string_append_printf(s, ""); + + cpu->dyn_vreg_xml = g_string_free(s, false); + return num_regs; +} + void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - if (env->misa & RVD) { + if (env->misa_ext & RVD) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, - 36, "riscv-64bit-fpu.xml", 0); - } else if (env->misa & RVF) { + 32, "riscv-64bit-fpu.xml", 0); + } else if (env->misa_ext & RVF) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, - 36, "riscv-32bit-fpu.xml", 0); + 32, "riscv-32bit-fpu.xml", 0); + } + if (env->misa_ext & RVV) { + gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector, + ricsv_gen_dynamic_vector_xml(cs, + cs->gdb_num_regs), + "riscv-vector.xml", 0); + } + switch (env->misa_mxl_max) { + case MXL_RV32: + gdb_register_coprocessor(cs, riscv_gdb_get_virtual, + riscv_gdb_set_virtual, + 1, "riscv-32bit-virtual.xml", 0); + break; + case MXL_RV64: + case MXL_RV128: + gdb_register_coprocessor(cs, riscv_gdb_get_virtual, + riscv_gdb_set_virtual, + 1, "riscv-64bit-virtual.xml", 0); + break; + default: + g_assert_not_reached(); } -#if defined(TARGET_RISCV32) - gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, - 1, "riscv-32bit-virtual.xml", 0); -#elif defined(TARGET_RISCV64) - gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, - 1, "riscv-64bit-virtual.xml", 0); -#endif gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs), diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 415e37bc37..a03014fe67 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -3,16 +3,21 @@ DEF_HELPER_2(raise_exception, noreturn, env, i32) /* Floating Point - rounding mode */ DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_FLAGS_1(set_rod_rounding_mode, TCG_CALL_NO_WG, void, env) /* Floating Point - fused */ DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) /* Floating Point - Single Precision */ DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64) @@ -33,7 +38,7 @@ DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl) -DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64) +DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64) /* Floating Point - Double Precision */ DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64) @@ -59,18 +64,49 @@ DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64) /* Bitmanip */ -DEF_HELPER_FLAGS_2(grev, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(grevw, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(gorc, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_1(brev8, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(unzip, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(zip, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(xperm4, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(xperm8, TCG_CALL_NO_RWG_SE, tl, tl, tl) + +/* Floating Point - Half Precision */ +DEF_HELPER_FLAGS_3(fadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_h, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(fsqrt_h, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(fle_h, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(flt_h, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(feq_h, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvt_s_h, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_h_s, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_d_h, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_h_d, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_w_h, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_wu_h, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_l_h, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_lu_h, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64) /* Special functions */ -DEF_HELPER_3(csrrw, tl, env, tl, tl) -DEF_HELPER_4(csrrs, tl, env, tl, tl, tl) -DEF_HELPER_4(csrrc, tl, env, tl, tl, tl) +DEF_HELPER_2(csrr, tl, env, int) +DEF_HELPER_3(csrw, void, env, int, tl) +DEF_HELPER_4(csrrw, tl, env, int, tl, tl) +DEF_HELPER_2(csrr_i128, tl, env, int) +DEF_HELPER_4(csrw_i128, void, env, int, tl, tl) +DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl) #ifndef CONFIG_USER_ONLY -DEF_HELPER_2(sret, tl, env, tl) -DEF_HELPER_2(mret, tl, env, tl) +DEF_HELPER_1(sret, tl, env) +DEF_HELPER_1(mret, tl, env) DEF_HELPER_1(wfi, void, env) DEF_HELPER_1(tlb_flush, void, env) #endif @@ -85,195 +121,89 @@ DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl) /* Vector functions */ DEF_HELPER_3(vsetvl, tl, env, tl, tl) -DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32) -DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32) -DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32) -DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32) -DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle64_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle8_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle16_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle32_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle64_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse8_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse16_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse32_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse64_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse8_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse16_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse32_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse64_v_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlm_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsm_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_6(vlse8_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse16_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse32_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse64_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlxei8_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei8_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei8_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei8_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei16_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei16_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei16_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei16_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei32_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei32_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei32_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei32_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei64_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei64_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei64_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxei64_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei8_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei8_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei8_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei8_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei16_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei16_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei16_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei16_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei32_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei32_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei32_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei32_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_5(vle8ff_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32) + +DEF_HELPER_4(vl1re8_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl1re16_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl1re32_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl1re64_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl2re8_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl2re16_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl2re32_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl2re64_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl4re8_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl4re16_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl4re32_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl4re64_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl8re8_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl8re16_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl8re32_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vl8re64_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vs1r_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vs2r_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vs4r_v, void, ptr, tl, env, i32) +DEF_HELPER_4(vs8r_v, void, ptr, tl, env, i32) DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) @@ -432,18 +362,18 @@ DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsrl_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsrl_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsrl_wx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_wx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32) @@ -729,18 +659,34 @@ DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasubu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasubu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasubu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasubu_vx_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) @@ -751,28 +697,6 @@ DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32) - DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) @@ -790,18 +714,18 @@ DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_wx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_wx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) @@ -927,6 +851,14 @@ DEF_HELPER_5(vfsqrt_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfsqrt_v_w, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfsqrt_v_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfrsqrt7_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfrsqrt7_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfrsqrt7_v_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_5(vfrec7_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfrec7_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfrec7_v_d, void, ptr, ptr, ptr, env, i32) + DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32) @@ -989,12 +921,6 @@ DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32) -DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32) -DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32) -DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_5(vfclass_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfclass_v_w, void, ptr, ptr, ptr, env, i32) @@ -1021,23 +947,27 @@ DEF_HELPER_5(vfwcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_xu_v_b, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_x_v_b, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vfwcvt_f_f_v_w, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_x_f_v_h, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_x_f_v_w, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_xu_f_w_b, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_xu_f_w_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_xu_f_w_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_x_f_w_b, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_x_f_w_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_x_f_w_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_xu_w_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_xu_w_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_x_w_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_x_w_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_f_w_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_f_w_w, void, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) @@ -1079,9 +1009,12 @@ DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredusum_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32) @@ -1089,21 +1022,23 @@ DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmandn_mm, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmorn_mm, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_4(vmpopc_m, tl, ptr, ptr, env, i32) +DEF_HELPER_4(vcpop_m, tl, ptr, ptr, env, i32) -DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32) +DEF_HELPER_4(vfirst_m, tl, ptr, ptr, env, i32) DEF_HELPER_5(vmsbf_m, void, ptr, ptr, ptr, env, i32) DEF_HELPER_5(vmsif_m, void, ptr, ptr, ptr, env, i32) @@ -1136,10 +1071,21 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32) + DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgatherei16_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgatherei16_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgatherei16_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgatherei16_vv_d, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32) @@ -1149,3 +1095,42 @@ DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_4(vmvr_v, void, ptr, ptr, env, i32) + +DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vzext_vf4_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vzext_vf4_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vzext_vf8_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_5(vsext_vf2_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsext_vf2_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32) + +/* 128-bit integer multiplication and division */ +DEF_HELPER_5(divu_i128, tl, env, tl, tl, tl, tl) +DEF_HELPER_5(divs_i128, tl, env, tl, tl, tl, tl) +DEF_HELPER_5(remu_i128, tl, env, tl, tl, tl, tl) +DEF_HELPER_5(rems_i128, tl, env, tl, tl, tl, tl) + +/* Crypto functions */ +DEF_HELPER_FLAGS_3(aes32esmi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(aes32esi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(aes32dsmi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(aes32dsi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) + +DEF_HELPER_FLAGS_2(aes64esm, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(aes64es, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(aes64ds, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(aes64dsm, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(aes64ks2, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(aes64ks1i, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_1(aes64im, TCG_CALL_NO_RWG_SE, tl, tl) + +DEF_HELPER_FLAGS_3(sm4ed, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(sm4ks, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index 2e9212663c..ccfe59f294 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -25,14 +25,18 @@ # Immediates: %imm_ci 12:s1 2:5 %nzuimm_ciw 7:4 11:2 5:1 6:1 !function=ex_shift_2 +%uimm_cl_q 10:1 5:2 11:2 !function=ex_shift_4 %uimm_cl_d 5:2 10:3 !function=ex_shift_3 %uimm_cl_w 5:1 10:3 6:1 !function=ex_shift_2 %imm_cb 12:s1 5:2 2:1 10:2 3:2 !function=ex_shift_1 %imm_cj 12:s1 8:1 9:2 6:1 7:1 2:1 11:1 3:3 !function=ex_shift_1 -%shimm_6bit 12:1 2:5 !function=ex_rvc_shifti +%shlimm_6bit 12:1 2:5 !function=ex_rvc_shiftli +%shrimm_6bit 12:1 2:5 !function=ex_rvc_shiftri +%uimm_6bit_lq 2:4 12:1 6:1 !function=ex_shift_4 %uimm_6bit_ld 2:3 12:1 5:2 !function=ex_shift_3 %uimm_6bit_lw 2:2 12:1 4:3 !function=ex_shift_2 +%uimm_6bit_sq 7:4 11:2 !function=ex_shift_4 %uimm_6bit_sd 7:3 10:3 !function=ex_shift_3 %uimm_6bit_sw 7:2 9:4 !function=ex_shift_2 @@ -54,16 +58,20 @@ # Formats 16: @cr .... ..... ..... .. &r rs2=%rs2_5 rs1=%rd %rd @ci ... . ..... ..... .. &i imm=%imm_ci rs1=%rd %rd +@cl_q ... . ..... ..... .. &i imm=%uimm_cl_q rs1=%rs1_3 rd=%rs2_3 @cl_d ... ... ... .. ... .. &i imm=%uimm_cl_d rs1=%rs1_3 rd=%rs2_3 @cl_w ... ... ... .. ... .. &i imm=%uimm_cl_w rs1=%rs1_3 rd=%rs2_3 @cs_2 ... ... ... .. ... .. &r rs2=%rs2_3 rs1=%rs1_3 rd=%rs1_3 +@cs_q ... ... ... .. ... .. &s imm=%uimm_cl_q rs1=%rs1_3 rs2=%rs2_3 @cs_d ... ... ... .. ... .. &s imm=%uimm_cl_d rs1=%rs1_3 rs2=%rs2_3 @cs_w ... ... ... .. ... .. &s imm=%uimm_cl_w rs1=%rs1_3 rs2=%rs2_3 @cj ... ........... .. &j imm=%imm_cj @cb_z ... ... ... .. ... .. &b imm=%imm_cb rs1=%rs1_3 rs2=0 +@c_lqsp ... . ..... ..... .. &i imm=%uimm_6bit_lq rs1=2 %rd @c_ldsp ... . ..... ..... .. &i imm=%uimm_6bit_ld rs1=2 %rd @c_lwsp ... . ..... ..... .. &i imm=%uimm_6bit_lw rs1=2 %rd +@c_sqsp ... . ..... ..... .. &s imm=%uimm_6bit_sq rs1=2 rs2=%rs2_5 @c_sdsp ... . ..... ..... .. &s imm=%uimm_6bit_sd rs1=2 rs2=%rs2_5 @c_swsp ... . ..... ..... .. &s imm=%uimm_6bit_sw rs1=2 rs2=%rs2_5 @c_li ... . ..... ..... .. &i imm=%imm_ci rs1=0 %rd @@ -75,9 +83,9 @@ @c_addi16sp ... . ..... ..... .. &i imm=%imm_addi16sp rs1=2 rd=2 @c_shift ... . .. ... ..... .. \ - &shift rd=%rs1_3 rs1=%rs1_3 shamt=%shimm_6bit + &shift rd=%rs1_3 rs1=%rs1_3 shamt=%shrimm_6bit @c_shift2 ... . .. ... ..... .. \ - &shift rd=%rd rs1=%rd shamt=%shimm_6bit + &shift rd=%rd rs1=%rd shamt=%shlimm_6bit @c_andi ... . .. ... ..... .. &i imm=%imm_ci rs1=%rs1_3 rd=%rs1_3 @@ -87,9 +95,15 @@ illegal 000 000 000 00 --- 00 addi 000 ... ... .. ... 00 @c_addi4spn } -fld 001 ... ... .. ... 00 @cl_d +{ + lq 001 ... ... .. ... 00 @cl_q + fld 001 ... ... .. ... 00 @cl_d +} lw 010 ... ... .. ... 00 @cl_w -fsd 101 ... ... .. ... 00 @cs_d +{ + sq 101 ... ... .. ... 00 @cs_q + fsd 101 ... ... .. ... 00 @cs_d +} sw 110 ... ... .. ... 00 @cs_w # *** RV32C and RV64C specific Standard Extension (Quadrant 0) *** @@ -132,7 +146,10 @@ addw 100 1 11 ... 01 ... 01 @cs_2 # *** RV32/64C Standard Extension (Quadrant 2) *** slli 000 . ..... ..... 10 @c_shift2 -fld 001 . ..... ..... 10 @c_ldsp +{ + lq 001 ... ... .. ... 10 @c_lqsp + fld 001 . ..... ..... 10 @c_ldsp +} { illegal 010 - 00000 ----- 10 # c.lwsp, RES rd=0 lw 010 . ..... ..... 10 @c_lwsp @@ -147,7 +164,10 @@ fld 001 . ..... ..... 10 @c_ldsp jalr 100 1 ..... 00000 10 @c_jalr rd=1 # C.JALR add 100 1 ..... ..... 10 @cr } -fsd 101 ...... ..... 10 @c_sdsp +{ + sq 101 ... ... .. ... 10 @c_sqsp + fsd 101 ...... ..... 10 @c_sdsp +} sw 110 . ..... ..... 10 @c_swsp # *** RV32C and RV64C specific Standard Extension (Quadrant 2) *** diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index f09f8d5faf..d0253b8104 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -22,6 +22,7 @@ %rs1 15:5 %rd 7:5 %sh5 20:5 +%sh6 20:6 %sh7 20:7 %csr 20:12 @@ -34,6 +35,8 @@ %imm_b 31:s1 7:1 25:6 8:4 !function=ex_shift_1 %imm_j 31:s1 12:8 20:1 21:10 !function=ex_shift_1 %imm_u 12:s20 !function=ex_shift_12 +%imm_bs 30:2 !function=ex_shift_3 +%imm_rnum 20:4 # Argument sets: &empty @@ -42,15 +45,16 @@ &j imm rd &r rd rs1 rs2 &r2 rd rs1 +&r2_s rs1 rs2 &s imm rs1 rs2 &u imm rd &shift shamt rs1 rd &atomic aq rl rs2 rs1 rd &rmrr vm rd rs1 rs2 &rmr vm rd rs2 -&rwdvm vm wd rd rs1 rs2 &r2nfvm vm rd rs1 nf &rnfvm vm rd rs1 rs2 nf +&k_aes shamt rs2 rs1 rd # Formats 32: @r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd @@ -78,8 +82,8 @@ @r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd @r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd @r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd -@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd -@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd +@r2_zimm11 . zimm:11 ..... ... ..... ....... %rs1 %rd +@r2_zimm10 .. zimm:10 ..... ... ..... ....... %rs1 %rd @r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1 @hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1 @@ -88,9 +92,15 @@ @sfence_vma ....... ..... ..... ... ..... ....... %rs2 %rs1 @sfence_vm ....... ..... ..... ... ..... ....... %rs1 +@k_aes .. ..... ..... ..... ... ..... ....... &k_aes shamt=%imm_bs %rs2 %rs1 %rd +@i_aes .. ..... ..... ..... ... ..... ....... &i imm=%imm_rnum %rs1 %rd + # Formats 64: @sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd +# Formats 128: +@sh6 ...... ...... ..... ... ..... ....... &shift shamt=%sh6 %rs1 %rd + # *** Privileged Instructions *** ecall 000000000000 00000 000 00000 1110011 ebreak 000000000001 00000 000 00000 1110011 @@ -139,7 +149,12 @@ srl 0000000 ..... ..... 101 ..... 0110011 @r sra 0100000 ..... ..... 101 ..... 0110011 @r or 0000000 ..... ..... 110 ..... 0110011 @r and 0000000 ..... ..... 111 ..... 0110011 @r -fence ---- pred:4 succ:4 ----- 000 ----- 0001111 + +{ + pause 0000 0001 0000 00000 000 00000 0001111 + fence ---- pred:4 succ:4 ----- 000 ----- 0001111 +} + fence_i ---- ---- ---- ----- 001 ----- 0001111 csrrw ............ ..... 001 ..... 1110011 @csr csrrs ............ ..... 010 ..... 1110011 @csr @@ -162,6 +177,20 @@ sllw 0000000 ..... ..... 001 ..... 0111011 @r srlw 0000000 ..... ..... 101 ..... 0111011 @r sraw 0100000 ..... ..... 101 ..... 0111011 @r +# *** RV128I Base Instruction Set (in addition to RV64I) *** +ldu ............ ..... 111 ..... 0000011 @i +lq ............ ..... 010 ..... 0001111 @i +sq ............ ..... 100 ..... 0100011 @s +addid ............ ..... 000 ..... 1011011 @i +sllid 000000 ...... ..... 001 ..... 1011011 @sh6 +srlid 000000 ...... ..... 101 ..... 1011011 @sh6 +sraid 010000 ...... ..... 101 ..... 1011011 @sh6 +addd 0000000 ..... ..... 000 ..... 1111011 @r +subd 0100000 ..... ..... 000 ..... 1111011 @r +slld 0000000 ..... ..... 001 ..... 1111011 @r +srld 0000000 ..... ..... 101 ..... 1111011 @r +srad 0100000 ..... ..... 101 ..... 1111011 @r + # *** RV32M Standard Extension *** mul 0000001 ..... ..... 000 ..... 0110011 @r mulh 0000001 ..... ..... 001 ..... 0110011 @r @@ -179,6 +208,13 @@ divuw 0000001 ..... ..... 101 ..... 0111011 @r remw 0000001 ..... ..... 110 ..... 0111011 @r remuw 0000001 ..... ..... 111 ..... 0111011 @r +# *** RV128M Standard Extension (in addition to RV64M) *** +muld 0000001 ..... ..... 000 ..... 1111011 @r +divd 0000001 ..... ..... 100 ..... 1111011 @r +divud 0000001 ..... ..... 101 ..... 1111011 @r +remd 0000001 ..... ..... 110 ..... 1111011 @r +remud 0000001 ..... ..... 111 ..... 1111011 @r + # *** RV32A Standard Extension *** lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st @@ -295,60 +331,69 @@ hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2 hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s # *** Vector loads and stores are encoded within LOADFP/STORE-FP *** -vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm -vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm -vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm -vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm -vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm -vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm -vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm -vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm -vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm -vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm -vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm -vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm -vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm -vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm -vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm -vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm -vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm -vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm +# Vector unit-stride load/store insns. +vle8_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm +vle16_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm +vle32_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm +vle64_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm +vse8_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm +vse16_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm +vse32_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm +vse64_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm -vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm -vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm -vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm -vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm -vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm -vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm -vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm -vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm -vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm -vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm -vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm +# Vector unit-stride mask load/store insns. +vlm_v 000 000 1 01011 ..... 000 ..... 0000111 @r2 +vsm_v 000 000 1 01011 ..... 000 ..... 0100111 @r2 + +# Vector strided insns. +vlse8_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm +vlse16_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm +vlse32_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm +vlse64_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm +vsse8_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm +vsse16_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm +vsse32_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm +vsse64_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm + +# Vector ordered-indexed and unordered-indexed load insns. +vlxei8_v ... 0-1 . ..... ..... 000 ..... 0000111 @r_nfvm +vlxei16_v ... 0-1 . ..... ..... 101 ..... 0000111 @r_nfvm +vlxei32_v ... 0-1 . ..... ..... 110 ..... 0000111 @r_nfvm +vlxei64_v ... 0-1 . ..... ..... 111 ..... 0000111 @r_nfvm -vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm -vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm -vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm -vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm -vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm -vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm -vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm # Vector ordered-indexed and unordered-indexed store insns. -vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm -vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm -vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm -vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm +vsxei8_v ... 0-1 . ..... ..... 000 ..... 0100111 @r_nfvm +vsxei16_v ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm +vsxei32_v ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm +vsxei64_v ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm -#*** Vector AMO operations are encoded under the standard AMO major opcode *** -vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm -vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm +# Vector unit-stride fault-only-first load insns. +vle8ff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm +vle16ff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm +vle32ff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm +vle64ff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm + +# Vector whole register insns +vl1re8_v 000 000 1 01000 ..... 000 ..... 0000111 @r2 +vl1re16_v 000 000 1 01000 ..... 101 ..... 0000111 @r2 +vl1re32_v 000 000 1 01000 ..... 110 ..... 0000111 @r2 +vl1re64_v 000 000 1 01000 ..... 111 ..... 0000111 @r2 +vl2re8_v 001 000 1 01000 ..... 000 ..... 0000111 @r2 +vl2re16_v 001 000 1 01000 ..... 101 ..... 0000111 @r2 +vl2re32_v 001 000 1 01000 ..... 110 ..... 0000111 @r2 +vl2re64_v 001 000 1 01000 ..... 111 ..... 0000111 @r2 +vl4re8_v 011 000 1 01000 ..... 000 ..... 0000111 @r2 +vl4re16_v 011 000 1 01000 ..... 101 ..... 0000111 @r2 +vl4re32_v 011 000 1 01000 ..... 110 ..... 0000111 @r2 +vl4re64_v 011 000 1 01000 ..... 111 ..... 0000111 @r2 +vl8re8_v 111 000 1 01000 ..... 000 ..... 0000111 @r2 +vl8re16_v 111 000 1 01000 ..... 101 ..... 0000111 @r2 +vl8re32_v 111 000 1 01000 ..... 110 ..... 0000111 @r2 +vl8re64_v 111 000 1 01000 ..... 111 ..... 0000111 @r2 +vs1r_v 000 000 1 01000 ..... 000 ..... 0100111 @r2 +vs2r_v 001 000 1 01000 ..... 000 ..... 0100111 @r2 +vs4r_v 011 000 1 01000 ..... 000 ..... 0100111 @r2 +vs8r_v 111 000 1 01000 ..... 000 ..... 0100111 @r2 # *** new major opcode OP-V *** vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm @@ -374,16 +419,16 @@ vwsubu_wv 110110 . ..... ..... 010 ..... 1010111 @r_vm vwsubu_wx 110110 . ..... ..... 110 ..... 1010111 @r_vm vwsub_wv 110111 . ..... ..... 010 ..... 1010111 @r_vm vwsub_wx 110111 . ..... ..... 110 ..... 1010111 @r_vm -vadc_vvm 010000 1 ..... ..... 000 ..... 1010111 @r_vm_1 -vadc_vxm 010000 1 ..... ..... 100 ..... 1010111 @r_vm_1 -vadc_vim 010000 1 ..... ..... 011 ..... 1010111 @r_vm_1 -vmadc_vvm 010001 1 ..... ..... 000 ..... 1010111 @r_vm_1 -vmadc_vxm 010001 1 ..... ..... 100 ..... 1010111 @r_vm_1 -vmadc_vim 010001 1 ..... ..... 011 ..... 1010111 @r_vm_1 -vsbc_vvm 010010 1 ..... ..... 000 ..... 1010111 @r_vm_1 -vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r_vm_1 -vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r_vm_1 -vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r_vm_1 +vadc_vvm 010000 0 ..... ..... 000 ..... 1010111 @r_vm_1 +vadc_vxm 010000 0 ..... ..... 100 ..... 1010111 @r_vm_1 +vadc_vim 010000 0 ..... ..... 011 ..... 1010111 @r_vm_1 +vmadc_vvm 010001 . ..... ..... 000 ..... 1010111 @r_vm +vmadc_vxm 010001 . ..... ..... 100 ..... 1010111 @r_vm +vmadc_vim 010001 . ..... ..... 011 ..... 1010111 @r_vm +vsbc_vvm 010010 0 ..... ..... 000 ..... 1010111 @r_vm_1 +vsbc_vxm 010010 0 ..... ..... 100 ..... 1010111 @r_vm_1 +vmsbc_vvm 010011 . ..... ..... 000 ..... 1010111 @r_vm +vmsbc_vxm 010011 . ..... ..... 100 ..... 1010111 @r_vm vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm @@ -402,12 +447,12 @@ vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm -vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm -vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm -vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm -vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm -vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm -vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm +vnsrl_wv 101100 . ..... ..... 000 ..... 1010111 @r_vm +vnsrl_wx 101100 . ..... ..... 100 ..... 1010111 @r_vm +vnsrl_wi 101100 . ..... ..... 011 ..... 1010111 @r_vm +vnsra_wv 101101 . ..... ..... 000 ..... 1010111 @r_vm +vnsra_wx 101101 . ..... ..... 100 ..... 1010111 @r_vm +vnsra_wi 101101 . ..... ..... 011 ..... 1010111 @r_vm vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm @@ -470,9 +515,9 @@ vwmaccu_vv 111100 . ..... ..... 010 ..... 1010111 @r_vm vwmaccu_vx 111100 . ..... ..... 110 ..... 1010111 @r_vm vwmacc_vv 111101 . ..... ..... 010 ..... 1010111 @r_vm vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm -vwmaccsu_vv 111110 . ..... ..... 010 ..... 1010111 @r_vm -vwmaccsu_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm -vwmaccus_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm +vwmaccsu_vv 111111 . ..... ..... 010 ..... 1010111 @r_vm +vwmaccsu_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm +vwmaccus_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2 vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2 vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2 @@ -489,32 +534,28 @@ vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm -vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm -vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm -vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm -vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm -vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm +vaadd_vv 001001 . ..... ..... 010 ..... 1010111 @r_vm +vaadd_vx 001001 . ..... ..... 110 ..... 1010111 @r_vm +vaaddu_vv 001000 . ..... ..... 010 ..... 1010111 @r_vm +vaaddu_vx 001000 . ..... ..... 110 ..... 1010111 @r_vm +vasub_vv 001011 . ..... ..... 010 ..... 1010111 @r_vm +vasub_vx 001011 . ..... ..... 110 ..... 1010111 @r_vm +vasubu_vv 001010 . ..... ..... 010 ..... 1010111 @r_vm +vasubu_vx 001010 . ..... ..... 110 ..... 1010111 @r_vm vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm -vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm -vwsmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm -vwsmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm -vwsmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm -vwsmaccsu_vv 111110 . ..... ..... 000 ..... 1010111 @r_vm -vwsmaccsu_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm -vwsmaccus_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm vssrl_vv 101010 . ..... ..... 000 ..... 1010111 @r_vm vssrl_vx 101010 . ..... ..... 100 ..... 1010111 @r_vm vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm -vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm -vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm -vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm -vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm -vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm -vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm +vnclipu_wv 101110 . ..... ..... 000 ..... 1010111 @r_vm +vnclipu_wx 101110 . ..... ..... 100 ..... 1010111 @r_vm +vnclipu_wi 101110 . ..... ..... 011 ..... 1010111 @r_vm +vnclip_wv 101111 . ..... ..... 000 ..... 1010111 @r_vm +vnclip_wx 101111 . ..... ..... 100 ..... 1010111 @r_vm +vnclip_wi 101111 . ..... ..... 011 ..... 1010111 @r_vm vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm @@ -559,7 +600,9 @@ vfwmsac_vv 111110 . ..... ..... 001 ..... 1010111 @r_vm vfwmsac_vf 111110 . ..... ..... 101 ..... 1010111 @r_vm vfwnmsac_vv 111111 . ..... ..... 001 ..... 1010111 @r_vm vfwnmsac_vf 111111 . ..... ..... 101 ..... 1010111 @r_vm -vfsqrt_v 100011 . ..... 00000 001 ..... 1010111 @r2_vm +vfsqrt_v 010011 . ..... 00000 001 ..... 1010111 @r2_vm +vfrsqrt7_v 010011 . ..... 00100 001 ..... 1010111 @r2_vm +vfrec7_v 010011 . ..... 00101 001 ..... 1010111 @r2_vm vfmin_vv 000100 . ..... ..... 001 ..... 1010111 @r_vm vfmin_vf 000100 . ..... ..... 101 ..... 1010111 @r_vm vfmax_vv 000110 . ..... ..... 001 ..... 1010111 @r_vm @@ -570,6 +613,8 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm +vfslide1up_vf 001110 . ..... ..... 101 ..... 1010111 @r_vm +vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm @@ -580,25 +625,34 @@ vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm -vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm -vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm -vfclass_v 100011 . ..... 10000 001 ..... 1010111 @r2_vm +vfclass_v 010011 . ..... 10000 001 ..... 1010111 @r2_vm vfmerge_vfm 010111 0 ..... ..... 101 ..... 1010111 @r_vm_0 vfmv_v_f 010111 1 00000 ..... 101 ..... 1010111 @r2 -vfcvt_xu_f_v 100010 . ..... 00000 001 ..... 1010111 @r2_vm -vfcvt_x_f_v 100010 . ..... 00001 001 ..... 1010111 @r2_vm -vfcvt_f_xu_v 100010 . ..... 00010 001 ..... 1010111 @r2_vm -vfcvt_f_x_v 100010 . ..... 00011 001 ..... 1010111 @r2_vm -vfwcvt_xu_f_v 100010 . ..... 01000 001 ..... 1010111 @r2_vm -vfwcvt_x_f_v 100010 . ..... 01001 001 ..... 1010111 @r2_vm -vfwcvt_f_xu_v 100010 . ..... 01010 001 ..... 1010111 @r2_vm -vfwcvt_f_x_v 100010 . ..... 01011 001 ..... 1010111 @r2_vm -vfwcvt_f_f_v 100010 . ..... 01100 001 ..... 1010111 @r2_vm -vfncvt_xu_f_v 100010 . ..... 10000 001 ..... 1010111 @r2_vm -vfncvt_x_f_v 100010 . ..... 10001 001 ..... 1010111 @r2_vm -vfncvt_f_xu_v 100010 . ..... 10010 001 ..... 1010111 @r2_vm -vfncvt_f_x_v 100010 . ..... 10011 001 ..... 1010111 @r2_vm -vfncvt_f_f_v 100010 . ..... 10100 001 ..... 1010111 @r2_vm + +vfcvt_xu_f_v 010010 . ..... 00000 001 ..... 1010111 @r2_vm +vfcvt_x_f_v 010010 . ..... 00001 001 ..... 1010111 @r2_vm +vfcvt_f_xu_v 010010 . ..... 00010 001 ..... 1010111 @r2_vm +vfcvt_f_x_v 010010 . ..... 00011 001 ..... 1010111 @r2_vm +vfcvt_rtz_xu_f_v 010010 . ..... 00110 001 ..... 1010111 @r2_vm +vfcvt_rtz_x_f_v 010010 . ..... 00111 001 ..... 1010111 @r2_vm + +vfwcvt_xu_f_v 010010 . ..... 01000 001 ..... 1010111 @r2_vm +vfwcvt_x_f_v 010010 . ..... 01001 001 ..... 1010111 @r2_vm +vfwcvt_f_xu_v 010010 . ..... 01010 001 ..... 1010111 @r2_vm +vfwcvt_f_x_v 010010 . ..... 01011 001 ..... 1010111 @r2_vm +vfwcvt_f_f_v 010010 . ..... 01100 001 ..... 1010111 @r2_vm +vfwcvt_rtz_xu_f_v 010010 . ..... 01110 001 ..... 1010111 @r2_vm +vfwcvt_rtz_x_f_v 010010 . ..... 01111 001 ..... 1010111 @r2_vm + +vfncvt_xu_f_w 010010 . ..... 10000 001 ..... 1010111 @r2_vm +vfncvt_x_f_w 010010 . ..... 10001 001 ..... 1010111 @r2_vm +vfncvt_f_xu_w 010010 . ..... 10010 001 ..... 1010111 @r2_vm +vfncvt_f_x_w 010010 . ..... 10011 001 ..... 1010111 @r2_vm +vfncvt_f_f_w 010010 . ..... 10100 001 ..... 1010111 @r2_vm +vfncvt_rod_f_f_w 010010 . ..... 10101 001 ..... 1010111 @r2_vm +vfncvt_rtz_xu_f_w 010010 . ..... 10110 001 ..... 1010111 @r2_vm +vfncvt_rtz_x_f_w 010010 . ..... 10111 001 ..... 1010111 @r2_vm + vredsum_vs 000000 . ..... ..... 010 ..... 1010111 @r_vm vredand_vs 000001 . ..... ..... 010 ..... 1010111 @r_vm vredor_vs 000010 . ..... ..... 010 ..... 1010111 @r_vm @@ -610,30 +664,32 @@ vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm # Vector ordered and unordered reduction sum -vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm +vfredusum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm +vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm # Vector widening ordered and unordered float reduction sum -vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm +vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm +vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r -vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r +vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r -vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r +vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r -vmpopc_m 010100 . ..... ----- 010 ..... 1010111 @r2_vm -vmfirst_m 010101 . ..... ----- 010 ..... 1010111 @r2_vm -vmsbf_m 010110 . ..... 00001 010 ..... 1010111 @r2_vm -vmsif_m 010110 . ..... 00011 010 ..... 1010111 @r2_vm -vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm -viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm -vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm -vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r -vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2 -vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd -vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2 +vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm +vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm +vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm +vmsif_m 010100 . ..... 00011 010 ..... 1010111 @r2_vm +vmsof_m 010100 . ..... 00010 010 ..... 1010111 @r2_vm +viota_m 010100 . ..... 10000 010 ..... 1010111 @r2_vm +vid_v 010100 . 00000 10001 010 ..... 1010111 @r1_vm +vmv_x_s 010000 1 ..... 00000 010 ..... 1010111 @r2rd +vmv_s_x 010000 1 00000 ..... 110 ..... 1010111 @r2 +vfmv_f_s 010000 1 ..... 00000 001 ..... 1010111 @r2rd +vfmv_s_f 010000 1 00000 ..... 101 ..... 1010111 @r2 vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm @@ -641,94 +697,192 @@ vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm +vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r +vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd +vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd +vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd +vmv8r_v 100111 1 ..... 00111 011 ..... 1010111 @r2rd -vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm +# Vector Integer Extension +vzext_vf2 010010 . ..... 00110 010 ..... 1010111 @r2_vm +vzext_vf4 010010 . ..... 00100 010 ..... 1010111 @r2_vm +vzext_vf8 010010 . ..... 00010 010 ..... 1010111 @r2_vm +vsext_vf2 010010 . ..... 00111 010 ..... 1010111 @r2_vm +vsext_vf4 010010 . ..... 00101 010 ..... 1010111 @r2_vm +vsext_vf8 010010 . ..... 00011 010 ..... 1010111 @r2_vm + +vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm11 +vsetivli 11 .......... ..... 111 ..... 1010111 @r2_zimm10 vsetvl 1000000 ..... ..... 111 ..... 1010111 @r -#*** Vector AMO operations (in addition to Zvamo) *** -vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm - -# *** RV32B Standard Extension *** -clz 011000 000000 ..... 001 ..... 0010011 @r2 -ctz 011000 000001 ..... 001 ..... 0010011 @r2 -cpop 011000 000010 ..... 001 ..... 0010011 @r2 -sext_b 011000 000100 ..... 001 ..... 0010011 @r2 -sext_h 011000 000101 ..... 001 ..... 0010011 @r2 - -andn 0100000 .......... 111 ..... 0110011 @r -orn 0100000 .......... 110 ..... 0110011 @r -xnor 0100000 .......... 100 ..... 0110011 @r -pack 0000100 .......... 100 ..... 0110011 @r -packu 0100100 .......... 100 ..... 0110011 @r -packh 0000100 .......... 111 ..... 0110011 @r -min 0000101 .......... 100 ..... 0110011 @r -minu 0000101 .......... 101 ..... 0110011 @r -max 0000101 .......... 110 ..... 0110011 @r -maxu 0000101 .......... 111 ..... 0110011 @r -bset 0010100 .......... 001 ..... 0110011 @r -bclr 0100100 .......... 001 ..... 0110011 @r -binv 0110100 .......... 001 ..... 0110011 @r -bext 0100100 .......... 101 ..... 0110011 @r -slo 0010000 .......... 001 ..... 0110011 @r -sro 0010000 .......... 101 ..... 0110011 @r -ror 0110000 .......... 101 ..... 0110011 @r -rol 0110000 .......... 001 ..... 0110011 @r -grev 0110100 .......... 101 ..... 0110011 @r -gorc 0010100 .......... 101 ..... 0110011 @r +# *** RV32 Zba Standard Extension *** sh1add 0010000 .......... 010 ..... 0110011 @r sh2add 0010000 .......... 100 ..... 0110011 @r sh3add 0010000 .......... 110 ..... 0110011 @r -bseti 00101. ........... 001 ..... 0010011 @sh -bclri 01001. ........... 001 ..... 0010011 @sh -binvi 01101. ........... 001 ..... 0010011 @sh -bexti 01001. ........... 101 ..... 0010011 @sh -sloi 00100. ........... 001 ..... 0010011 @sh -sroi 00100. ........... 101 ..... 0010011 @sh -rori 01100. ........... 101 ..... 0010011 @sh -grevi 01101. ........... 101 ..... 0010011 @sh -gorci 00101. ........... 101 ..... 0010011 @sh +# *** RV64 Zba Standard Extension (in addition to RV32 Zba) *** +add_uw 0000100 .......... 000 ..... 0111011 @r +sh1add_uw 0010000 .......... 010 ..... 0111011 @r +sh2add_uw 0010000 .......... 100 ..... 0111011 @r +sh3add_uw 0010000 .......... 110 ..... 0111011 @r +slli_uw 00001 ............ 001 ..... 0011011 @sh -# *** RV64B Standard Extension (in addition to RV32B) *** +# *** RV32 Zbb/Zbkb Standard Extension *** +andn 0100000 .......... 111 ..... 0110011 @r +rol 0110000 .......... 001 ..... 0110011 @r +ror 0110000 .......... 101 ..... 0110011 @r +rori 01100 ............ 101 ..... 0010011 @sh +# The encoding for rev8 differs between RV32 and RV64. +# rev8_32 denotes the RV32 variant. +rev8_32 011010 011000 ..... 101 ..... 0010011 @r2 +# The encoding for zext.h differs between RV32 and RV64. +# zext_h_32 denotes the RV32 variant. +{ + zext_h_32 0000100 00000 ..... 100 ..... 0110011 @r2 + pack 0000100 ..... ..... 100 ..... 0110011 @r +} +xnor 0100000 .......... 100 ..... 0110011 @r +# *** RV32 extra Zbb Standard Extension *** +clz 011000 000000 ..... 001 ..... 0010011 @r2 +cpop 011000 000010 ..... 001 ..... 0010011 @r2 +ctz 011000 000001 ..... 001 ..... 0010011 @r2 +max 0000101 .......... 110 ..... 0110011 @r +maxu 0000101 .......... 111 ..... 0110011 @r +min 0000101 .......... 100 ..... 0110011 @r +minu 0000101 .......... 101 ..... 0110011 @r +orc_b 001010 000111 ..... 101 ..... 0010011 @r2 +orn 0100000 .......... 110 ..... 0110011 @r +sext_b 011000 000100 ..... 001 ..... 0010011 @r2 +sext_h 011000 000101 ..... 001 ..... 0010011 @r2 +# *** RV32 extra Zbkb Standard Extension *** +brev8 0110100 00111 ..... 101 ..... 0010011 @r2 #grevi +packh 0000100 .......... 111 ..... 0110011 @r +unzip 0000100 01111 ..... 101 ..... 0010011 @r2 #unshfl +zip 0000100 01111 ..... 001 ..... 0010011 @r2 #shfl + +# *** RV64 Zbb/Zbkb Standard Extension (in addition to RV32 Zbb/Zbkb) *** +# The encoding for rev8 differs between RV32 and RV64. +# When executing on RV64, the encoding used in RV32 is an illegal +# instruction, so we use different handler functions to differentiate. +rev8_64 011010 111000 ..... 101 ..... 0010011 @r2 +rolw 0110000 .......... 001 ..... 0111011 @r +roriw 0110000 .......... 101 ..... 0011011 @sh5 +rorw 0110000 .......... 101 ..... 0111011 @r +# The encoding for zext.h differs between RV32 and RV64. +# When executing on RV64, the encoding used in RV32 is an illegal +# instruction, so we use different handler functions to differentiate. +{ + zext_h_64 0000100 00000 ..... 100 ..... 0111011 @r2 + packw 0000100 ..... ..... 100 ..... 0111011 @r +} +# *** RV64 extra Zbb Standard Extension (in addition to RV32 Zbb) *** clzw 0110000 00000 ..... 001 ..... 0011011 @r2 ctzw 0110000 00001 ..... 001 ..... 0011011 @r2 cpopw 0110000 00010 ..... 001 ..... 0011011 @r2 -packw 0000100 .......... 100 ..... 0111011 @r -packuw 0100100 .......... 100 ..... 0111011 @r -bsetw 0010100 .......... 001 ..... 0111011 @r -bclrw 0100100 .......... 001 ..... 0111011 @r -binvw 0110100 .......... 001 ..... 0111011 @r -bextw 0100100 .......... 101 ..... 0111011 @r -slow 0010000 .......... 001 ..... 0111011 @r -srow 0010000 .......... 101 ..... 0111011 @r -rorw 0110000 .......... 101 ..... 0111011 @r -rolw 0110000 .......... 001 ..... 0111011 @r -grevw 0110100 .......... 101 ..... 0111011 @r -gorcw 0010100 .......... 101 ..... 0111011 @r -sh1add_uw 0010000 .......... 010 ..... 0111011 @r -sh2add_uw 0010000 .......... 100 ..... 0111011 @r -sh3add_uw 0010000 .......... 110 ..... 0111011 @r -add_uw 0000100 .......... 000 ..... 0111011 @r +# *** RV32 Zbc/Zbkc Standard Extension *** +clmul 0000101 .......... 001 ..... 0110011 @r +clmulh 0000101 .......... 011 ..... 0110011 @r +# *** RV32 extra Zbc Standard Extension *** +clmulr 0000101 .......... 010 ..... 0110011 @r -bsetiw 0010100 .......... 001 ..... 0011011 @sh5 -bclriw 0100100 .......... 001 ..... 0011011 @sh5 -binviw 0110100 .......... 001 ..... 0011011 @sh5 -sloiw 0010000 .......... 001 ..... 0011011 @sh5 -sroiw 0010000 .......... 101 ..... 0011011 @sh5 -roriw 0110000 .......... 101 ..... 0011011 @sh5 -greviw 0110100 .......... 101 ..... 0011011 @sh5 -gorciw 0010100 .......... 101 ..... 0011011 @sh5 +# *** RV32 Zbkx Standard Extension *** +xperm4 0010100 .......... 010 ..... 0110011 @r +xperm8 0010100 .......... 100 ..... 0110011 @r -slli_uw 00001. ........... 001 ..... 0011011 @sh +# *** RV32 Zbs Standard Extension *** +bclr 0100100 .......... 001 ..... 0110011 @r +bclri 01001. ........... 001 ..... 0010011 @sh +bext 0100100 .......... 101 ..... 0110011 @r +bexti 01001. ........... 101 ..... 0010011 @sh +binv 0110100 .......... 001 ..... 0110011 @r +binvi 01101. ........... 001 ..... 0010011 @sh +bset 0010100 .......... 001 ..... 0110011 @r +bseti 00101. ........... 001 ..... 0010011 @sh + +# *** RV32 Zfh Extension *** +flh ............ ..... 001 ..... 0000111 @i +fsh ....... ..... ..... 001 ..... 0100111 @s +fmadd_h ..... 10 ..... ..... ... ..... 1000011 @r4_rm +fmsub_h ..... 10 ..... ..... ... ..... 1000111 @r4_rm +fnmsub_h ..... 10 ..... ..... ... ..... 1001011 @r4_rm +fnmadd_h ..... 10 ..... ..... ... ..... 1001111 @r4_rm +fadd_h 0000010 ..... ..... ... ..... 1010011 @r_rm +fsub_h 0000110 ..... ..... ... ..... 1010011 @r_rm +fmul_h 0001010 ..... ..... ... ..... 1010011 @r_rm +fdiv_h 0001110 ..... ..... ... ..... 1010011 @r_rm +fsqrt_h 0101110 00000 ..... ... ..... 1010011 @r2_rm +fsgnj_h 0010010 ..... ..... 000 ..... 1010011 @r +fsgnjn_h 0010010 ..... ..... 001 ..... 1010011 @r +fsgnjx_h 0010010 ..... ..... 010 ..... 1010011 @r +fmin_h 0010110 ..... ..... 000 ..... 1010011 @r +fmax_h 0010110 ..... ..... 001 ..... 1010011 @r +fcvt_h_s 0100010 00000 ..... ... ..... 1010011 @r2_rm +fcvt_s_h 0100000 00010 ..... ... ..... 1010011 @r2_rm +fcvt_h_d 0100010 00001 ..... ... ..... 1010011 @r2_rm +fcvt_d_h 0100001 00010 ..... ... ..... 1010011 @r2_rm +fcvt_w_h 1100010 00000 ..... ... ..... 1010011 @r2_rm +fcvt_wu_h 1100010 00001 ..... ... ..... 1010011 @r2_rm +fmv_x_h 1110010 00000 ..... 000 ..... 1010011 @r2 +feq_h 1010010 ..... ..... 010 ..... 1010011 @r +flt_h 1010010 ..... ..... 001 ..... 1010011 @r +fle_h 1010010 ..... ..... 000 ..... 1010011 @r +fclass_h 1110010 00000 ..... 001 ..... 1010011 @r2 +fcvt_h_w 1101010 00000 ..... ... ..... 1010011 @r2_rm +fcvt_h_wu 1101010 00001 ..... ... ..... 1010011 @r2_rm +fmv_h_x 1111010 00000 ..... 000 ..... 1010011 @r2 + +# *** RV64 Zfh Extension (in addition to RV32 Zfh) *** +fcvt_l_h 1100010 00010 ..... ... ..... 1010011 @r2_rm +fcvt_lu_h 1100010 00011 ..... ... ..... 1010011 @r2_rm +fcvt_h_l 1101010 00010 ..... ... ..... 1010011 @r2_rm +fcvt_h_lu 1101010 00011 ..... ... ..... 1010011 @r2_rm + +# *** Svinval Standard Extension *** +sinval_vma 0001011 ..... ..... 000 00000 1110011 @sfence_vma +sfence_w_inval 0001100 00000 00000 000 00000 1110011 +sfence_inval_ir 0001100 00001 00000 000 00000 1110011 +hinval_vvma 0010011 ..... ..... 000 00000 1110011 @hfence_vvma +hinval_gvma 0110011 ..... ..... 000 00000 1110011 @hfence_gvma + +# *** RV32 Zknd Standard Extension *** +aes32dsmi .. 10111 ..... ..... 000 ..... 0110011 @k_aes +aes32dsi .. 10101 ..... ..... 000 ..... 0110011 @k_aes +# *** RV64 Zknd Standard Extension *** +aes64dsm 00 11111 ..... ..... 000 ..... 0110011 @r +aes64ds 00 11101 ..... ..... 000 ..... 0110011 @r +aes64im 00 11000 00000 ..... 001 ..... 0010011 @r2 +# *** RV32 Zkne Standard Extension *** +aes32esmi .. 10011 ..... ..... 000 ..... 0110011 @k_aes +aes32esi .. 10001 ..... ..... 000 ..... 0110011 @k_aes +# *** RV64 Zkne Standard Extension *** +aes64es 00 11001 ..... ..... 000 ..... 0110011 @r +aes64esm 00 11011 ..... ..... 000 ..... 0110011 @r +# *** RV64 Zkne/zknd Standard Extension *** +aes64ks2 01 11111 ..... ..... 000 ..... 0110011 @r +aes64ks1i 00 11000 1.... ..... 001 ..... 0010011 @i_aes +# *** RV32 Zknh Standard Extension *** +sha256sig0 00 01000 00010 ..... 001 ..... 0010011 @r2 +sha256sig1 00 01000 00011 ..... 001 ..... 0010011 @r2 +sha256sum0 00 01000 00000 ..... 001 ..... 0010011 @r2 +sha256sum1 00 01000 00001 ..... 001 ..... 0010011 @r2 +sha512sum0r 01 01000 ..... ..... 000 ..... 0110011 @r +sha512sum1r 01 01001 ..... ..... 000 ..... 0110011 @r +sha512sig0l 01 01010 ..... ..... 000 ..... 0110011 @r +sha512sig0h 01 01110 ..... ..... 000 ..... 0110011 @r +sha512sig1l 01 01011 ..... ..... 000 ..... 0110011 @r +sha512sig1h 01 01111 ..... ..... 000 ..... 0110011 @r +# *** RV64 Zknh Standard Extension *** +sha512sig0 00 01000 00110 ..... 001 ..... 0010011 @r2 +sha512sig1 00 01000 00111 ..... 001 ..... 0010011 @r2 +sha512sum0 00 01000 00100 ..... 001 ..... 0010011 @r2 +sha512sum1 00 01000 00101 ..... 001 ..... 0010011 @r2 +# *** RV32 Zksh Standard Extension *** +sm3p0 00 01000 01000 ..... 001 ..... 0010011 @r2 +sm3p1 00 01000 01001 ..... 001 ..... 0010011 @r2 +# *** RV32 Zksed Standard Extension *** +sm4ed .. 11000 ..... ..... 000 ..... 0110011 @k_aes +sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index 32312be202..74e2894462 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -22,8 +22,6 @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a) { /* always generates U-level ECALL, fixed in do_interrupt handler */ generate_exception(ctx, RISCV_EXCP_U_ECALL); - exit_tb(ctx); /* no chaining */ - ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -54,19 +52,18 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) * that no exception will be raised when fetching them. */ - if ((pre_addr & TARGET_PAGE_MASK) == (post_addr & TARGET_PAGE_MASK)) { + if (semihosting_enabled(ctx->mem_idx < PRV_S) && + (pre_addr & TARGET_PAGE_MASK) == (post_addr & TARGET_PAGE_MASK)) { pre = opcode_at(&ctx->base, pre_addr); ebreak = opcode_at(&ctx->base, ebreak_addr); post = opcode_at(&ctx->base, post_addr); } - if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) { + if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) { generate_exception(ctx, RISCV_EXCP_SEMIHOST); } else { generate_exception(ctx, RISCV_EXCP_BREAKPOINT); } - exit_tb(ctx); /* no chaining */ - ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -78,11 +75,13 @@ static bool trans_uret(DisasContext *ctx, arg_uret *a) static bool trans_sret(DisasContext *ctx, arg_sret *a) { #ifndef CONFIG_USER_ONLY - tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); - if (has_ext(ctx, RVS)) { - gen_helper_sret(cpu_pc, cpu_env, cpu_pc); - exit_tb(ctx); /* no chaining */ + decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_sret(cpu_pc, cpu_env); + tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; } else { return false; @@ -96,9 +95,12 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) static bool trans_mret(DisasContext *ctx, arg_mret *a) { #ifndef CONFIG_USER_ONLY - tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); - gen_helper_mret(cpu_pc, cpu_env, cpu_pc); - exit_tb(ctx); /* no chaining */ + decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_mret(cpu_pc, cpu_env); + tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; #else @@ -109,7 +111,8 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) static bool trans_wfi(DisasContext *ctx, arg_wfi *a) { #ifndef CONFIG_USER_ONLY - tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); + decode_save_opc(ctx); + gen_set_pc_imm(ctx, ctx->pc_succ_insn); gen_helper_wfi(cpu_env); return true; #else @@ -120,6 +123,7 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a) static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) { #ifndef CONFIG_USER_ONLY + decode_save_opc(ctx); gen_helper_tlb_flush(cpu_env); return true; #endif diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index ab2ec4f0a5..45db82c9be 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -18,11 +18,10 @@ * this program. If not, see . */ -static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) +static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { - TCGv src1 = tcg_temp_new(); - /* Put addr in load_res, data in load_val. */ - gen_get_gpr(src1, a->rs1); + TCGv src1 = get_address(ctx, a->rs1, 0); + if (a->rl) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } @@ -30,33 +29,33 @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) if (a->aq) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } - tcg_gen_mov_tl(load_res, src1); - gen_set_gpr(a->rd, load_val); - tcg_temp_free(src1); + /* Put addr in load_res, data in load_val. */ + tcg_gen_mov_tl(load_res, src1); + gen_set_gpr(ctx, a->rd, load_val); + return true; } -static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) +static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) { - TCGv src1 = tcg_temp_new(); - TCGv src2 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); + TCGv dest, src1, src2; TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - gen_get_gpr(src1, a->rs1); + src1 = get_address(ctx, a->rs1, 0); tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); - gen_get_gpr(src2, a->rs2); /* * Note that the TCG atomic primitives are SC, * so we can ignore AQ/RL along this path. */ - tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2, + dest = dest_gpr(ctx, a->rd); + src2 = get_gpr(ctx, a->rs2, EXT_NONE); + tcg_gen_atomic_cmpxchg_tl(dest, load_res, load_val, src2, ctx->mem_idx, mop); - tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val); - gen_set_gpr(a->rd, dat); + tcg_gen_setcond_tl(TCG_COND_NE, dest, dest, load_val); + gen_set_gpr(ctx, a->rd, dest); tcg_gen_br(l2); gen_set_label(l1); @@ -65,8 +64,7 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) * provide the memory barrier implied by AQ/RL. */ tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); - tcg_gen_movi_tl(dat, 1); - gen_set_gpr(a->rd, dat); + gen_set_gpr(ctx, a->rd, tcg_constant_tl(1)); gen_set_label(l2); /* @@ -75,9 +73,6 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) */ tcg_gen_movi_tl(load_res, -1); - tcg_temp_free(dat); - tcg_temp_free(src1); - tcg_temp_free(src2); return true; } @@ -85,17 +80,13 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), MemOp mop) { - TCGv src1 = tcg_temp_new(); - TCGv src2 = tcg_temp_new(); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); - gen_get_gpr(src1, a->rs1); - gen_get_gpr(src2, a->rs2); + func(dest, src1, src2, ctx->mem_idx, mop); - (*func)(src2, src1, src2, ctx->mem_idx, mop); - - gen_set_gpr(a->rd, src2); - tcg_temp_free(src1); - tcg_temp_free(src2); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -168,65 +159,65 @@ static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { REQUIRE_64BIT(ctx); - return gen_lr(ctx, a, MO_ALIGN | MO_TEQ); + return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { REQUIRE_64BIT(ctx); - return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ)); + return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); } diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc index 9e81f6e3de..e2b8329f1e 100644 --- a/target/riscv/insn_trans/trans_rvb.c.inc +++ b/target/riscv/insn_trans/trans_rvb.c.inc @@ -1,8 +1,9 @@ /* - * RISC-V translation routines for the RVB Standard Extension. + * RISC-V translation routines for the Zb[abcs] and Zbk[bcx] Standard Extension. * * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -17,422 +18,577 @@ * this program. If not, see . */ +#define REQUIRE_ZBA(ctx) do { \ + if (!ctx->cfg_ptr->ext_zba) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBB(ctx) do { \ + if (!ctx->cfg_ptr->ext_zbb) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBC(ctx) do { \ + if (!ctx->cfg_ptr->ext_zbc) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBS(ctx) do { \ + if (!ctx->cfg_ptr->ext_zbs) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBKB(ctx) do { \ + if (!ctx->cfg_ptr->ext_zbkb) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBKX(ctx) do { \ + if (!ctx->cfg_ptr->ext_zbkx) { \ + return false; \ + } \ +} while (0) + +static void gen_clz(TCGv ret, TCGv arg1) +{ + tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS); +} + +static void gen_clzw(TCGv ret, TCGv arg1) +{ + TCGv t = tcg_temp_new(); + tcg_gen_shli_tl(t, arg1, 32); + tcg_gen_clzi_tl(ret, t, 32); + tcg_temp_free(t); +} + static bool trans_clz(DisasContext *ctx, arg_clz *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, gen_clz); + REQUIRE_ZBB(ctx); + return gen_unary_per_ol(ctx, a, EXT_NONE, gen_clz, gen_clzw); +} + +static void gen_ctz(TCGv ret, TCGv arg1) +{ + tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS); +} + +static void gen_ctzw(TCGv ret, TCGv arg1) +{ + tcg_gen_ctzi_tl(ret, arg1, 32); } static bool trans_ctz(DisasContext *ctx, arg_ctz *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, gen_ctz); + REQUIRE_ZBB(ctx); + return gen_unary_per_ol(ctx, a, EXT_ZERO, gen_ctz, gen_ctzw); } static bool trans_cpop(DisasContext *ctx, arg_cpop *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, tcg_gen_ctpop_tl); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl); } static bool trans_andn(DisasContext *ctx, arg_andn *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_andc_tl); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_logic(ctx, a, tcg_gen_andc_tl); } static bool trans_orn(DisasContext *ctx, arg_orn *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_orc_tl); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_logic(ctx, a, tcg_gen_orc_tl); } static bool trans_xnor(DisasContext *ctx, arg_xnor *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_eqv_tl); -} - -static bool trans_pack(DisasContext *ctx, arg_pack *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_pack); -} - -static bool trans_packu(DisasContext *ctx, arg_packu *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_packu); -} - -static bool trans_packh(DisasContext *ctx, arg_packh *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_packh); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_logic(ctx, a, tcg_gen_eqv_tl); } static bool trans_min(DisasContext *ctx, arg_min *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_smin_tl); + REQUIRE_ZBB(ctx); + return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl, NULL); } static bool trans_max(DisasContext *ctx, arg_max *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_smax_tl); + REQUIRE_ZBB(ctx); + return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl, NULL); } static bool trans_minu(DisasContext *ctx, arg_minu *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_umin_tl); + REQUIRE_ZBB(ctx); + return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl, NULL); } static bool trans_maxu(DisasContext *ctx, arg_maxu *a) { - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, tcg_gen_umax_tl); + REQUIRE_ZBB(ctx); + return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl, NULL); } static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, tcg_gen_ext8s_tl); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8s_tl); } static bool trans_sext_h(DisasContext *ctx, arg_sext_h *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, tcg_gen_ext16s_tl); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16s_tl); +} + +static void gen_sbop_mask(TCGv ret, TCGv shamt) +{ + tcg_gen_movi_tl(ret, 1); + tcg_gen_shl_tl(ret, ret, shamt); +} + +static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt) +{ + TCGv t = tcg_temp_new(); + + gen_sbop_mask(t, shamt); + tcg_gen_or_tl(ret, arg1, t); + + tcg_temp_free(t); } static bool trans_bset(DisasContext *ctx, arg_bset *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_bset); + REQUIRE_ZBS(ctx); + return gen_shift(ctx, a, EXT_NONE, gen_bset, NULL); } static bool trans_bseti(DisasContext *ctx, arg_bseti *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_bset); + REQUIRE_ZBS(ctx); + return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset); +} + +static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt) +{ + TCGv t = tcg_temp_new(); + + gen_sbop_mask(t, shamt); + tcg_gen_andc_tl(ret, arg1, t); + + tcg_temp_free(t); } static bool trans_bclr(DisasContext *ctx, arg_bclr *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_bclr); + REQUIRE_ZBS(ctx); + return gen_shift(ctx, a, EXT_NONE, gen_bclr, NULL); } static bool trans_bclri(DisasContext *ctx, arg_bclri *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_bclr); + REQUIRE_ZBS(ctx); + return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr); +} + +static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt) +{ + TCGv t = tcg_temp_new(); + + gen_sbop_mask(t, shamt); + tcg_gen_xor_tl(ret, arg1, t); + + tcg_temp_free(t); } static bool trans_binv(DisasContext *ctx, arg_binv *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_binv); + REQUIRE_ZBS(ctx); + return gen_shift(ctx, a, EXT_NONE, gen_binv, NULL); } static bool trans_binvi(DisasContext *ctx, arg_binvi *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_binv); + REQUIRE_ZBS(ctx); + return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv); +} + +static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt) +{ + tcg_gen_shr_tl(ret, arg1, shamt); + tcg_gen_andi_tl(ret, ret, 1); } static bool trans_bext(DisasContext *ctx, arg_bext *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_bext); + REQUIRE_ZBS(ctx); + return gen_shift(ctx, a, EXT_NONE, gen_bext, NULL); } static bool trans_bexti(DisasContext *ctx, arg_bexti *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_bext); + REQUIRE_ZBS(ctx); + return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext); } -static bool trans_slo(DisasContext *ctx, arg_slo *a) +static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_slo); -} + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); -static bool trans_sloi(DisasContext *ctx, arg_sloi *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_slo); -} + /* truncate to 32-bits */ + tcg_gen_trunc_tl_i32(t1, arg1); + tcg_gen_trunc_tl_i32(t2, arg2); -static bool trans_sro(DisasContext *ctx, arg_sro *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_sro); -} + tcg_gen_rotr_i32(t1, t1, t2); -static bool trans_sroi(DisasContext *ctx, arg_sroi *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_sro); + /* sign-extend 64-bits */ + tcg_gen_ext_i32_tl(ret, t1); + + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); } static bool trans_ror(DisasContext *ctx, arg_ror *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, tcg_gen_rotr_tl); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw, NULL); +} + +static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt) +{ + TCGv_i32 t1 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t1, arg1); + tcg_gen_rotri_i32(t1, t1, shamt); + tcg_gen_ext_i32_tl(ret, t1); + + tcg_temp_free_i32(t1); } static bool trans_rori(DisasContext *ctx, arg_rori *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, tcg_gen_rotr_tl); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_rotri_tl, gen_roriw, NULL); +} + +static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + + /* truncate to 32-bits */ + tcg_gen_trunc_tl_i32(t1, arg1); + tcg_gen_trunc_tl_i32(t2, arg2); + + tcg_gen_rotl_i32(t1, t1, t2); + + /* sign-extend 64-bits */ + tcg_gen_ext_i32_tl(ret, t1); + + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); } static bool trans_rol(DisasContext *ctx, arg_rol *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, tcg_gen_rotl_tl); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw, NULL); } -static bool trans_grev(DisasContext *ctx, arg_grev *a) +static void gen_rev8_32(TCGv ret, TCGv src1) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_helper_grev); + tcg_gen_bswap32_tl(ret, src1, TCG_BSWAP_OS); } -static bool trans_grevi(DisasContext *ctx, arg_grevi *a) +static bool trans_rev8_32(DisasContext *ctx, arg_rev8_32 *a) { - REQUIRE_EXT(ctx, RVB); - - if (a->shamt >= TARGET_LONG_BITS) { - return false; - } - - return gen_grevi(ctx, a); + REQUIRE_32BIT(ctx); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_unary(ctx, a, EXT_NONE, gen_rev8_32); } -static bool trans_gorc(DisasContext *ctx, arg_gorc *a) +static bool trans_rev8_64(DisasContext *ctx, arg_rev8_64 *a) { - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, gen_helper_gorc); + REQUIRE_64BIT(ctx); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_bswap_tl); } -static bool trans_gorci(DisasContext *ctx, arg_gorci *a) +static void gen_orc_b(TCGv ret, TCGv source1) { - REQUIRE_EXT(ctx, RVB); - return gen_shifti(ctx, a, gen_helper_gorc); + TCGv tmp = tcg_temp_new(); + TCGv low7 = tcg_constant_tl(dup_const_tl(MO_8, 0x7f)); + + /* Set msb in each byte if the byte was non-zero. */ + tcg_gen_and_tl(tmp, source1, low7); + tcg_gen_add_tl(tmp, tmp, low7); + tcg_gen_or_tl(tmp, tmp, source1); + + /* Extract the msb to the lsb in each byte */ + tcg_gen_andc_tl(tmp, tmp, low7); + tcg_gen_shri_tl(tmp, tmp, 7); + + /* Replicate the lsb of each byte across the byte. */ + tcg_gen_muli_tl(ret, tmp, 0xff); + + tcg_temp_free(tmp); } +static bool trans_orc_b(DisasContext *ctx, arg_orc_b *a) +{ + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_ZERO, gen_orc_b); +} + +#define GEN_SHADD(SHAMT) \ +static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ +{ \ + TCGv t = tcg_temp_new(); \ + \ + tcg_gen_shli_tl(t, arg1, SHAMT); \ + tcg_gen_add_tl(ret, t, arg2); \ + \ + tcg_temp_free(t); \ +} + +GEN_SHADD(1) +GEN_SHADD(2) +GEN_SHADD(3) + #define GEN_TRANS_SHADD(SHAMT) \ static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \ { \ - REQUIRE_EXT(ctx, RVB); \ - return gen_arith(ctx, a, gen_sh##SHAMT##add); \ + REQUIRE_ZBA(ctx); \ + return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add, NULL); \ } GEN_TRANS_SHADD(1) GEN_TRANS_SHADD(2) GEN_TRANS_SHADD(3) +static bool trans_zext_h_32(DisasContext *ctx, arg_zext_h_32 *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl); +} + +static bool trans_zext_h_64(DisasContext *ctx, arg_zext_h_64 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl); +} + static bool trans_clzw(DisasContext *ctx, arg_clzw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, gen_clzw); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_clzw); } static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, gen_ctzw); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_ZERO, gen_ctzw); } static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, gen_cpopw); -} - -static bool trans_packw(DisasContext *ctx, arg_packw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_packw); -} - -static bool trans_packuw(DisasContext *ctx, arg_packuw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_packuw); -} - -static bool trans_bsetw(DisasContext *ctx, arg_bsetw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_bset); -} - -static bool trans_bsetiw(DisasContext *ctx, arg_bsetiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_bset); -} - -static bool trans_bclrw(DisasContext *ctx, arg_bclrw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_bclr); -} - -static bool trans_bclriw(DisasContext *ctx, arg_bclriw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_bclr); -} - -static bool trans_binvw(DisasContext *ctx, arg_binvw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_binv); -} - -static bool trans_binviw(DisasContext *ctx, arg_binviw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_binv); -} - -static bool trans_bextw(DisasContext *ctx, arg_bextw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_bext); -} - -static bool trans_slow(DisasContext *ctx, arg_slow *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_slo); -} - -static bool trans_sloiw(DisasContext *ctx, arg_sloiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_slo); -} - -static bool trans_srow(DisasContext *ctx, arg_srow *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_sro); -} - -static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_sro); + REQUIRE_ZBB(ctx); + ctx->ol = MXL_RV32; + return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl); } static bool trans_rorw(DisasContext *ctx, arg_rorw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_rorw); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_NONE, gen_rorw, NULL); } static bool trans_roriw(DisasContext *ctx, arg_roriw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_rorw); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + ctx->ol = MXL_RV32; + return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw, NULL); } static bool trans_rolw(DisasContext *ctx, arg_rolw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_rolw); + REQUIRE_EITHER_EXT(ctx, zbb, zbkb); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_NONE, gen_rolw, NULL); } -static bool trans_grevw(DisasContext *ctx, arg_grevw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_grevw); +#define GEN_SHADD_UW(SHAMT) \ +static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \ +{ \ + TCGv t = tcg_temp_new(); \ + \ + tcg_gen_ext32u_tl(t, arg1); \ + \ + tcg_gen_shli_tl(t, t, SHAMT); \ + tcg_gen_add_tl(ret, t, arg2); \ + \ + tcg_temp_free(t); \ } -static bool trans_greviw(DisasContext *ctx, arg_greviw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_grevw); -} - -static bool trans_gorcw(DisasContext *ctx, arg_gorcw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftw(ctx, a, gen_gorcw); -} - -static bool trans_gorciw(DisasContext *ctx, arg_gorciw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_shiftiw(ctx, a, gen_gorcw); -} +GEN_SHADD_UW(1) +GEN_SHADD_UW(2) +GEN_SHADD_UW(3) #define GEN_TRANS_SHADD_UW(SHAMT) \ static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \ arg_sh##SHAMT##add_uw *a) \ { \ REQUIRE_64BIT(ctx); \ - REQUIRE_EXT(ctx, RVB); \ - return gen_arith(ctx, a, gen_sh##SHAMT##add_uw); \ + REQUIRE_ZBA(ctx); \ + return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw, NULL); \ } GEN_TRANS_SHADD_UW(1) GEN_TRANS_SHADD_UW(2) GEN_TRANS_SHADD_UW(3) +static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv t = tcg_temp_new(); + tcg_gen_ext32u_tl(t, arg1); + tcg_gen_add_tl(ret, t, arg2); + tcg_temp_free(t); +} + static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, gen_add_uw); + REQUIRE_ZBA(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_add_uw, NULL); +} + +static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt) +{ + tcg_gen_deposit_z_tl(dest, src, shamt, MIN(32, TARGET_LONG_BITS - shamt)); } static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - - TCGv source1 = tcg_temp_new(); - gen_get_gpr(source1, a->rs1); - - if (a->shamt < 32) { - tcg_gen_deposit_z_tl(source1, source1, a->shamt, 32); - } else { - tcg_gen_shli_tl(source1, source1, a->shamt); - } - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - return true; + REQUIRE_ZBA(ctx); + return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw, NULL); +} + +static bool trans_clmul(DisasContext *ctx, arg_clmul *a) +{ + REQUIRE_EITHER_EXT(ctx, zbc, zbkc); + return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul, NULL); +} + +static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_clmulr(dst, src1, src2); + tcg_gen_shri_tl(dst, dst, 1); +} + +static bool trans_clmulh(DisasContext *ctx, arg_clmulr *a) +{ + REQUIRE_EITHER_EXT(ctx, zbc, zbkc); + return gen_arith(ctx, a, EXT_NONE, gen_clmulh, NULL); +} + +static bool trans_clmulr(DisasContext *ctx, arg_clmulh *a) +{ + REQUIRE_ZBC(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr, NULL); +} + +static void gen_pack(TCGv ret, TCGv src1, TCGv src2) +{ + tcg_gen_deposit_tl(ret, src1, src2, + TARGET_LONG_BITS / 2, + TARGET_LONG_BITS / 2); +} + +static void gen_packh(TCGv ret, TCGv src1, TCGv src2) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ext8u_tl(t, src2); + tcg_gen_deposit_tl(ret, src1, t, 8, TARGET_LONG_BITS - 8); + tcg_temp_free(t); +} + +static void gen_packw(TCGv ret, TCGv src1, TCGv src2) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ext16s_tl(t, src2); + tcg_gen_deposit_tl(ret, src1, t, 16, TARGET_LONG_BITS - 16); + tcg_temp_free(t); +} + +static bool trans_brev8(DisasContext *ctx, arg_brev8 *a) +{ + REQUIRE_ZBKB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_helper_brev8); +} + +static bool trans_pack(DisasContext *ctx, arg_pack *a) +{ + REQUIRE_ZBKB(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_pack, NULL); +} + +static bool trans_packh(DisasContext *ctx, arg_packh *a) +{ + REQUIRE_ZBKB(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_packh, NULL); +} + +static bool trans_packw(DisasContext *ctx, arg_packw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBKB(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_packw, NULL); +} + +static bool trans_unzip(DisasContext *ctx, arg_unzip *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZBKB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_helper_unzip); +} + +static bool trans_zip(DisasContext *ctx, arg_zip *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZBKB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_helper_zip); +} + +static bool trans_xperm4(DisasContext *ctx, arg_xperm4 *a) +{ + REQUIRE_ZBKX(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_xperm4, NULL); +} + +static bool trans_xperm8(DisasContext *ctx, arg_xperm8 *a) +{ + REQUIRE_ZBKX(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_xperm8, NULL); } diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 7e45538ae0..1397c1ce1c 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -18,42 +18,59 @@ * this program. If not, see . */ +#define REQUIRE_ZDINX_OR_D(ctx) do { \ + if (!ctx->cfg_ptr->ext_zdinx) { \ + REQUIRE_EXT(ctx, RVD); \ + } \ +} while (0) + +#define REQUIRE_EVEN(ctx, reg) do { \ + if (ctx->cfg_ptr->ext_zdinx && (get_xl(ctx) == MXL_RV32) && \ + ((reg) & 0x1)) { \ + return false; \ + } \ +} while (0) + static bool trans_fld(DisasContext *ctx, arg_fld *a) { + TCGv addr; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ); + addr = get_address(ctx, a->rs1, a->imm); + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); mark_fs_dirty(ctx); - tcg_temp_free(t0); return true; } static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { + TCGv addr; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ); - - tcg_temp_free(t0); + addr = get_address(ctx, a->rs1, a->imm); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); return true; } static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_d(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fmadd_d(dest, cpu_env, src1, src2, src3); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -61,10 +78,17 @@ static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a) static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_d(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fmsub_d(dest, cpu_env, src1, src2, src3); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -72,10 +96,17 @@ static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a) static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_d(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fnmsub_d(dest, cpu_env, src1, src2, src3); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -83,10 +114,17 @@ static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a) static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_d(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fnmadd_d(dest, cpu_env, src1, src2, src3); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -94,12 +132,16 @@ static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a) static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - + gen_helper_fadd_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -107,12 +149,16 @@ static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a) static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - + gen_helper_fsub_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -120,12 +166,16 @@ static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a) static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - + gen_helper_fmul_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -133,12 +183,16 @@ static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a) static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - + gen_helper_fdiv_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -146,23 +200,34 @@ static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a) static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); gen_set_rm(ctx, a->rm); - gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]); - + gen_helper_fsqrt_d(dest, cpu_env, src1); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a) { + REQUIRE_FPU; + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); if (a->rs1 == a->rs2) { /* FMOV */ - tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]); + dest = get_fpr_d(ctx, a->rs1); } else { - tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2], - cpu_fpr[a->rs1], 0, 63); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + tcg_gen_deposit_i64(dest, src2, src1, 0, 63); } + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -170,15 +235,22 @@ static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a) static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + if (a->rs1 == a->rs2) { /* FNEG */ - tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN); + tcg_gen_xori_i64(dest, src1, INT64_MIN); } else { + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); TCGv_i64 t0 = tcg_temp_new_i64(); - tcg_gen_not_i64(t0, cpu_fpr[a->rs2]); - tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63); + tcg_gen_not_i64(t0, src2); + tcg_gen_deposit_i64(dest, t0, src1, 0, 63); tcg_temp_free_i64(t0); } + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -186,15 +258,22 @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + if (a->rs1 == a->rs2) { /* FABS */ - tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN); + tcg_gen_andi_i64(dest, src1, ~INT64_MIN); } else { + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); TCGv_i64 t0 = tcg_temp_new_i64(); - tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN); - tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0); + tcg_gen_andi_i64(t0, src2, INT64_MIN); + tcg_gen_xor_i64(dest, src1, t0); tcg_temp_free_i64(t0); } + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -202,11 +281,15 @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); - gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + gen_helper_fmin_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -214,11 +297,15 @@ static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a) static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2); - gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + gen_helper_fmax_d(dest, cpu_env, src1, src2); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -226,11 +313,15 @@ static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a) static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]); - + gen_helper_fcvt_s_d(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -238,11 +329,15 @@ static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a) static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]); - + gen_helper_fcvt_d_s(dest, cpu_env, src1); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -250,93 +345,104 @@ static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a) static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1 | a->rs2); - TCGv t0 = tcg_temp_new(); - gen_helper_feq_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + gen_helper_feq_d(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1 | a->rs2); - TCGv t0 = tcg_temp_new(); - gen_helper_flt_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + gen_helper_flt_d(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1 | a->rs2); - TCGv t0 = tcg_temp_new(); - gen_helper_fle_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_d(ctx, a->rs2); + gen_helper_fle_d(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); - gen_helper_fclass_d(t0, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + + gen_helper_fclass_d(dest, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); - + gen_helper_fcvt_w_d(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); - + gen_helper_fcvt_wu_d(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, t0); - tcg_temp_free(t0); + gen_helper_fcvt_d_w(dest, cpu_env, src); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; @@ -345,14 +451,15 @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a) static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, t0); - tcg_temp_free(t0); + gen_helper_fcvt_d_wu(dest, cpu_env, src); + gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; @@ -362,13 +469,15 @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + gen_helper_fcvt_l_d(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -376,13 +485,15 @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rs1); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + gen_helper_fcvt_lu_d(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -393,7 +504,7 @@ static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a) REQUIRE_EXT(ctx, RVD); #ifdef TARGET_RISCV64 - gen_set_gpr(a->rd, cpu_fpr[a->rs1]); + gen_set_gpr(ctx, a->rd, cpu_fpr[a->rs1]); return true; #else qemu_build_not_reached(); @@ -404,14 +515,16 @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, t0); - tcg_temp_free(t0); + gen_helper_fcvt_d_l(dest, cpu_env, src); + gen_set_fpr_d(ctx, a->rd, dest); + mark_fs_dirty(ctx); return true; } @@ -420,14 +533,16 @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVD); + REQUIRE_ZDINX_OR_D(ctx); + REQUIRE_EVEN(ctx, a->rd); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, t0); - tcg_temp_free(t0); + gen_helper_fcvt_d_lu(dest, cpu_env, src); + gen_set_fpr_d(ctx, a->rd, dest); + mark_fs_dirty(ctx); return true; } @@ -439,11 +554,7 @@ static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a) REQUIRE_EXT(ctx, RVD); #ifdef TARGET_RISCV64 - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - - tcg_gen_mov_tl(cpu_fpr[a->rd], t0); - tcg_temp_free(t0); + tcg_gen_mov_tl(cpu_fpr[a->rd], get_gpr(ctx, a->rs1, EXT_NONE)); mark_fs_dirty(ctx); return true; #else diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index db1c0c9974..a1d3eb52ad 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -20,47 +20,58 @@ #define REQUIRE_FPU do {\ if (ctx->mstatus_fs == 0) \ - return false; \ + if (!ctx->cfg_ptr->ext_zfinx) \ + return false; \ +} while (0) + +#define REQUIRE_ZFINX_OR_F(ctx) do {\ + if (!ctx->cfg_ptr->ext_zfinx) { \ + REQUIRE_EXT(ctx, RVF); \ + } \ } while (0) static bool trans_flw(DisasContext *ctx, arg_flw *a) { + TCGv_i64 dest; + TCGv addr; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL); - gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]); + addr = get_address(ctx, a->rs1, a->imm); + dest = cpu_fpr[a->rd]; + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); + gen_nanbox_s(dest, dest); - tcg_temp_free(t0); mark_fs_dirty(ctx); return true; } static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { + TCGv addr; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUL); - - tcg_temp_free(t0); + addr = get_address(ctx, a->rs1, a->imm); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); return true; } static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -68,10 +79,16 @@ static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -79,10 +96,16 @@ static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -90,10 +113,16 @@ static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + gen_set_rm(ctx, a->rm); - gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2], cpu_fpr[a->rs3]); + gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -101,11 +130,15 @@ static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + gen_helper_fadd_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -113,11 +146,15 @@ static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + gen_helper_fsub_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -125,11 +162,15 @@ static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + gen_helper_fmul_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -137,11 +178,15 @@ static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); gen_set_rm(ctx, a->rm); - gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env, - cpu_fpr[a->rs1], cpu_fpr[a->rs2]); + gen_helper_fdiv_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -149,10 +194,14 @@ static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); gen_set_rm(ctx, a->rm); - gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]); + gen_helper_fsqrt_s(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -160,22 +209,37 @@ static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); if (a->rs1 == a->rs2) { /* FMOV */ - gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]); + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_s(dest, src1); + } else { + tcg_gen_ext32s_i64(dest, src1); + } } else { /* FSGNJ */ - TCGv_i64 rs1 = tcg_temp_new_i64(); - TCGv_i64 rs2 = tcg_temp_new_i64(); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); - gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); - gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + if (!ctx->cfg_ptr->ext_zfinx) { + TCGv_i64 rs1 = tcg_temp_new_i64(); + TCGv_i64 rs2 = tcg_temp_new_i64(); + gen_check_nanbox_s(rs1, src1); + gen_check_nanbox_s(rs2, src2); - /* This formulation retains the nanboxing of rs2. */ - tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31); - tcg_temp_free_i64(rs1); - tcg_temp_free_i64(rs2); + /* This formulation retains the nanboxing of rs2 in normal 'F'. */ + tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31); + + tcg_temp_free_i64(rs1); + tcg_temp_free_i64(rs2); + } else { + tcg_gen_deposit_i64(dest, src2, src1, 0, 31); + tcg_gen_ext32s_i64(dest, dest); + } } + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -185,31 +249,45 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) TCGv_i64 rs1, rs2, mask; REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); rs1 = tcg_temp_new_i64(); - gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); - - if (a->rs1 == a->rs2) { /* FNEG */ - tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1)); + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_s(rs1, src1); } else { + tcg_gen_mov_i64(rs1, src1); + } + if (a->rs1 == a->rs2) { /* FNEG */ + tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1)); + } else { + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); rs2 = tcg_temp_new_i64(); - gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_s(rs2, src2); + } else { + tcg_gen_mov_i64(rs2, src2); + } /* * Replace bit 31 in rs1 with inverse in rs2. * This formulation retains the nanboxing of rs1. */ - mask = tcg_const_i64(~MAKE_64BIT_MASK(31, 1)); + mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1)); tcg_gen_nor_i64(rs2, rs2, mask); - tcg_gen_and_i64(rs1, mask, rs1); - tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2); + tcg_gen_and_i64(dest, mask, rs1); + tcg_gen_or_i64(dest, dest, rs2); - tcg_temp_free_i64(mask); tcg_temp_free_i64(rs2); } + /* signed-extended intead of nanboxing for result if enable zfinx */ + if (ctx->cfg_ptr->ext_zfinx) { + tcg_gen_ext32s_i64(dest, dest); + } + gen_set_fpr_hs(ctx, a->rd, dest); tcg_temp_free_i64(rs1); - mark_fs_dirty(ctx); return true; } @@ -219,28 +297,45 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) TCGv_i64 rs1, rs2; REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); rs1 = tcg_temp_new_i64(); - gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); + + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_s(rs1, src1); + } else { + tcg_gen_mov_i64(rs1, src1); + } if (a->rs1 == a->rs2) { /* FABS */ - tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1)); + tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1)); } else { + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); rs2 = tcg_temp_new_i64(); - gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_s(rs2, src2); + } else { + tcg_gen_mov_i64(rs2, src2); + } /* * Xor bit 31 in rs1 with that in rs2. * This formulation retains the nanboxing of rs1. */ - tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1)); - tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2); + tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1)); + tcg_gen_xor_i64(dest, rs1, dest); tcg_temp_free_i64(rs2); } + /* signed-extended intead of nanboxing for result if enable zfinx */ + if (ctx->cfg_ptr->ext_zfinx) { + tcg_gen_ext32s_i64(dest, dest); + } tcg_temp_free_i64(rs1); - + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -248,10 +343,14 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2]); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fmin_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -259,10 +358,14 @@ static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], - cpu_fpr[a->rs2]); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fmax_s(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; } @@ -270,28 +373,28 @@ static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); - + gen_helper_fcvt_w_s(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); - + gen_helper_fcvt_wu_s(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -299,101 +402,102 @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a) { /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */ REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); - - TCGv t0 = tcg_temp_new(); + REQUIRE_ZFINX_OR_F(ctx); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); #if defined(TARGET_RISCV64) - tcg_gen_ext32s_tl(t0, cpu_fpr[a->rs1]); + tcg_gen_ext32s_tl(dest, src1); #else - tcg_gen_extrl_i64_i32(t0, cpu_fpr[a->rs1]); + tcg_gen_extrl_i64_i32(dest, src1); #endif - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); - + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); - TCGv t0 = tcg_temp_new(); - gen_helper_feq_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_feq_s(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); - TCGv t0 = tcg_temp_new(); - gen_helper_flt_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_flt_s(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); - TCGv t0 = tcg_temp_new(); - gen_helper_fle_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fle_s(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - - gen_helper_fclass_s(t0, cpu_fpr[a->rs1]); - - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + gen_helper_fclass_s(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, t0); - + gen_helper_fcvt_s_w(dest, cpu_env, src); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); - tcg_temp_free(t0); - return true; } static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a) { REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, t0); - + gen_helper_fcvt_s_wu(dest, cpu_env, src); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); - tcg_temp_free(t0); - return true; } @@ -401,17 +505,15 @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) { /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */ REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - - tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0); - gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); + tcg_gen_extu_tl_i64(dest, src); + gen_nanbox_s(dest, dest); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); - tcg_temp_free(t0); - return true; } @@ -419,13 +521,14 @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + gen_helper_fcvt_l_s(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -433,13 +536,14 @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); - TCGv t0 = tcg_temp_new(); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[a->rs1]); - gen_set_gpr(a->rd, t0); - tcg_temp_free(t0); + gen_helper_fcvt_lu_s(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); return true; } @@ -447,16 +551,15 @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, t0); - + gen_helper_fcvt_s_l(dest, cpu_env, src); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); - tcg_temp_free(t0); return true; } @@ -464,15 +567,14 @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) { REQUIRE_64BIT(ctx); REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); + REQUIRE_ZFINX_OR_F(ctx); - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); gen_set_rm(ctx, a->rm); - gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, t0); - + gen_helper_fcvt_s_lu(dest, cpu_env, src); + gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); - tcg_temp_free(t0); return true; } diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc index 6b5edf82b7..4f8aecddc7 100644 --- a/target/riscv/insn_trans/trans_rvh.c.inc +++ b/target/riscv/insn_trans/trans_rvh.c.inc @@ -17,281 +17,139 @@ */ #ifndef CONFIG_USER_ONLY -static void check_access(DisasContext *ctx) { +static bool check_access(DisasContext *ctx) +{ if (!ctx->hlsx) { if (ctx->virt_enabled) { generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT); } else { generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); } + return false; } + return true; } #endif +static bool do_hlv(DisasContext *ctx, arg_r2 *a, MemOp mop) +{ +#ifdef CONFIG_USER_ONLY + return false; +#else + if (check_access(ctx)) { + TCGv dest = dest_gpr(ctx, a->rd); + TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); + int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK; + tcg_gen_qemu_ld_tl(dest, addr, mem_idx, mop); + gen_set_gpr(ctx, a->rd, dest); + } + return true; +#endif +} + static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_SB); } static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_TESW); } static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_TESL); } static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_UB); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_UB); } static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); + return do_hlv(ctx, a, MO_TEUW); +} - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUW); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else +static bool do_hsv(DisasContext *ctx, arg_r2_s *a, MemOp mop) +{ +#ifdef CONFIG_USER_ONLY return false; +#else + if (check_access(ctx)) { + TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK; + tcg_gen_qemu_st_tl(data, addr, mem_idx, mop); + } + return true; #endif } static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - gen_get_gpr(dat, a->rs2); - - tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB); - - tcg_temp_free(t0); - tcg_temp_free(dat); - return true; -#else - return false; -#endif + return do_hsv(ctx, a, MO_SB); } static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - gen_get_gpr(dat, a->rs2); - - tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW); - - tcg_temp_free(t0); - tcg_temp_free(dat); - return true; -#else - return false; -#endif + return do_hsv(ctx, a, MO_TESW); } static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - gen_get_gpr(dat, a->rs2); - - tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL); - - tcg_temp_free(t0); - tcg_temp_free(dat); - return true; -#else - return false; -#endif + return do_hsv(ctx, a, MO_TESL); } static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); - -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUL); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_TEUL); } static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); - -#ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; -#else - return false; -#endif + return do_hlv(ctx, a, MO_TEUQ); } static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); + return do_hsv(ctx, a, MO_TEUQ); +} #ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - gen_get_gpr(dat, a->rs2); - - tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ); - - tcg_temp_free(t0); - tcg_temp_free(dat); +static bool do_hlvx(DisasContext *ctx, arg_r2 *a, + void (*func)(TCGv, TCGv_env, TCGv)) +{ + if (check_access(ctx)) { + TCGv dest = dest_gpr(ctx, a->rd); + TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); + func(dest, cpu_env, addr); + gen_set_gpr(ctx, a->rd, dest); + } return true; -#else - return false; -#endif } +#endif static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - gen_helper_hyp_hlvx_hu(t1, cpu_env, t0); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; + return do_hlvx(ctx, a, gen_helper_hyp_hlvx_hu); #else return false; #endif @@ -301,19 +159,7 @@ static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - - check_access(ctx); - - gen_get_gpr(t0, a->rs1); - - gen_helper_hyp_hlvx_wu(t1, cpu_env, t0); - gen_set_gpr(a->rd, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; + return do_hlvx(ctx, a, gen_helper_hyp_hlvx_wu); #else return false; #endif @@ -323,6 +169,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY + decode_save_opc(ctx); gen_helper_hyp_gvma_tlb_flush(cpu_env); return true; #endif @@ -333,6 +180,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY + decode_save_opc(ctx); gen_helper_hyp_tlb_flush(cpu_env); return true; #endif diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 6e736c9d0d..c49dbec0eb 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -26,23 +26,19 @@ static bool trans_illegal(DisasContext *ctx, arg_empty *a) static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a) { - REQUIRE_64BIT(ctx); - return trans_illegal(ctx, a); + REQUIRE_64_OR_128BIT(ctx); + return trans_illegal(ctx, a); } static bool trans_lui(DisasContext *ctx, arg_lui *a) { - if (a->rd != 0) { - tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm); - } + gen_set_gpri(ctx, a->rd, a->imm); return true; } static bool trans_auipc(DisasContext *ctx, arg_auipc *a) { - if (a->rd != 0) { - tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next); - } + gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next); return true; } @@ -54,25 +50,23 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a) static bool trans_jalr(DisasContext *ctx, arg_jalr *a) { - /* no chaining with JALR */ TCGLabel *misaligned = NULL; - TCGv t0 = tcg_temp_new(); - - gen_get_gpr(cpu_pc, a->rs1); - tcg_gen_addi_tl(cpu_pc, cpu_pc, a->imm); + tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm); tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); + gen_set_pc(ctx, cpu_pc); if (!has_ext(ctx, RVC)) { + TCGv t0 = tcg_temp_new(); + misaligned = gen_new_label(); tcg_gen_andi_tl(t0, cpu_pc, 0x2); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); + tcg_temp_free(t0); } - if (a->rd != 0) { - tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn); - } - lookup_and_goto_ptr(ctx); + gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn); + tcg_gen_lookup_and_goto_ptr(); if (misaligned) { gen_set_label(misaligned); @@ -80,21 +74,108 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) } ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(t0); return true; } +static TCGCond gen_compare_i128(bool bz, TCGv rl, + TCGv al, TCGv ah, TCGv bl, TCGv bh, + TCGCond cond) +{ + TCGv rh = tcg_temp_new(); + bool invert = false; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + if (bz) { + tcg_gen_or_tl(rl, al, ah); + } else { + tcg_gen_xor_tl(rl, al, bl); + tcg_gen_xor_tl(rh, ah, bh); + tcg_gen_or_tl(rl, rl, rh); + } + break; + + case TCG_COND_GE: + case TCG_COND_LT: + if (bz) { + tcg_gen_mov_tl(rl, ah); + } else { + TCGv tmp = tcg_temp_new(); + + tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh); + tcg_gen_xor_tl(rl, rh, ah); + tcg_gen_xor_tl(tmp, ah, bh); + tcg_gen_and_tl(rl, rl, tmp); + tcg_gen_xor_tl(rl, rh, rl); + + tcg_temp_free(tmp); + } + break; + + case TCG_COND_LTU: + invert = true; + /* fallthrough */ + case TCG_COND_GEU: + { + TCGv tmp = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + TCGv one = tcg_constant_tl(1); + + cond = TCG_COND_NE; + /* borrow in to second word */ + tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl); + /* seed third word with 1, which will be result */ + tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero); + tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero); + + tcg_temp_free(tmp); + } + break; + + default: + g_assert_not_reached(); + } + + if (invert) { + cond = tcg_invert_cond(cond); + } + + tcg_temp_free(rh); + return cond; +} + +static void gen_setcond_i128(TCGv rl, TCGv rh, + TCGv src1l, TCGv src1h, + TCGv src2l, TCGv src2h, + TCGCond cond) +{ + cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond); + tcg_gen_setcondi_tl(cond, rl, rl, 0); + tcg_gen_movi_tl(rh, 0); +} + static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) { TCGLabel *l = gen_new_label(); - TCGv source1, source2; - source1 = tcg_temp_new(); - source2 = tcg_temp_new(); - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN); - tcg_gen_brcond_tl(cond, source1, source2, l); + if (get_xl(ctx) == MXL_RV128) { + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv src2h = get_gprh(ctx, a->rs2); + TCGv tmp = tcg_temp_new(); + + cond = gen_compare_i128(a->rs2 == 0, + tmp, src1, src1h, src2, src2h, cond); + tcg_gen_brcondi_tl(cond, tmp, 0, l); + + tcg_temp_free(tmp); + } else { + tcg_gen_brcond_tl(cond, src1, src2, l); + } gen_goto_tb(ctx, 1, ctx->pc_succ_insn); + gen_set_label(l); /* branch taken */ if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { @@ -105,9 +186,6 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) } ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; } @@ -141,18 +219,53 @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) return gen_branch(ctx, a, TCG_COND_GEU); } +static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv addr = get_address(ctx, a->rs1, a->imm); + + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +/* Compute only 64-bit addresses to use the address translation mechanism */ +static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) +{ + TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv destl = dest_gpr(ctx, a->rd); + TCGv desth = dest_gprh(ctx, a->rd); + TCGv addrl = tcg_temp_new(); + + tcg_gen_addi_tl(addrl, src1l, a->imm); + + if ((memop & MO_SIZE) <= MO_64) { + tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_tl(desth, destl, 63); + } else { + tcg_gen_movi_tl(desth, 0); + } + } else { + /* assume little-endian memory access for now */ + tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ); + tcg_gen_addi_tl(addrl, addrl, 8); + tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ); + } + + gen_set_gpr128(ctx, a->rd, destl, desth); + + tcg_temp_free(addrl); + return true; +} + static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop); - gen_set_gpr(a->rd, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; + if (get_xl(ctx) == MXL_RV128) { + return gen_load_i128(ctx, a, memop); + } else { + return gen_load_tl(ctx, a, memop); + } } static bool trans_lb(DisasContext *ctx, arg_lb *a) @@ -170,6 +283,18 @@ static bool trans_lw(DisasContext *ctx, arg_lw *a) return gen_load(ctx, a, MO_TESL); } +static bool trans_ld(DisasContext *ctx, arg_ld *a) +{ + REQUIRE_64_OR_128BIT(ctx); + return gen_load(ctx, a, MO_TESQ); +} + +static bool trans_lq(DisasContext *ctx, arg_lq *a) +{ + REQUIRE_128BIT(ctx); + return gen_load(ctx, a, MO_TEUO); +} + static bool trans_lbu(DisasContext *ctx, arg_lbu *a) { return gen_load(ctx, a, MO_UB); @@ -180,20 +305,57 @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a) return gen_load(ctx, a, MO_TEUW); } -static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) +static bool trans_lwu(DisasContext *ctx, arg_lwu *a) { - TCGv t0 = tcg_temp_new(); - TCGv dat = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); - tcg_gen_addi_tl(t0, t0, a->imm); - gen_get_gpr(dat, a->rs2); + REQUIRE_64_OR_128BIT(ctx); + return gen_load(ctx, a, MO_TEUL); +} - tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop); - tcg_temp_free(t0); - tcg_temp_free(dat); +static bool trans_ldu(DisasContext *ctx, arg_ldu *a) +{ + REQUIRE_128BIT(ctx); + return gen_load(ctx, a, MO_TEUQ); +} + +static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) +{ + TCGv addr = get_address(ctx, a->rs1, a->imm); + TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); return true; } +static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) +{ + TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE); + TCGv src2h = get_gprh(ctx, a->rs2); + TCGv addrl = tcg_temp_new(); + + tcg_gen_addi_tl(addrl, src1l, a->imm); + + if ((memop & MO_SIZE) <= MO_64) { + tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop); + } else { + /* little-endian memory access assumed for now */ + tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ); + tcg_gen_addi_tl(addrl, addrl, 8); + tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ); + } + + tcg_temp_free(addrl); + return true; +} + +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) +{ + if (get_xl(ctx) == MXL_RV128) { + return gen_store_i128(ctx, a, memop); + } else { + return gen_store_tl(ctx, a, memop); + } +} static bool trans_sb(DisasContext *ctx, arg_sb *a) { @@ -210,27 +372,50 @@ static bool trans_sw(DisasContext *ctx, arg_sw *a) return gen_store(ctx, a, MO_TESL); } -static bool trans_lwu(DisasContext *ctx, arg_lwu *a) -{ - REQUIRE_64BIT(ctx); - return gen_load(ctx, a, MO_TEUL); -} - -static bool trans_ld(DisasContext *ctx, arg_ld *a) -{ - REQUIRE_64BIT(ctx); - return gen_load(ctx, a, MO_TEQ); -} - static bool trans_sd(DisasContext *ctx, arg_sd *a) { - REQUIRE_64BIT(ctx); - return gen_store(ctx, a, MO_TEQ); + REQUIRE_64_OR_128BIT(ctx); + return gen_store(ctx, a, MO_TEUQ); +} + +static bool trans_sq(DisasContext *ctx, arg_sq *a) +{ + REQUIRE_128BIT(ctx); + return gen_store(ctx, a, MO_TEUO); +} + +static bool trans_addd(DisasContext *ctx, arg_addd *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL); +} + +static bool trans_addid(DisasContext *ctx, arg_addid *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL); +} + +static bool trans_subd(DisasContext *ctx, arg_subd *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL); +} + +static void gen_addi2_i128(TCGv retl, TCGv reth, + TCGv srcl, TCGv srch, target_long imm) +{ + TCGv imml = tcg_constant_tl(imm); + TCGv immh = tcg_constant_tl(-(imm < 0)); + tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh); } static bool trans_addi(DisasContext *ctx, arg_addi *a) { - return gen_arith_imm_fn(ctx, a, &tcg_gen_addi_tl); + return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128); } static void gen_slt(TCGv ret, TCGv s1, TCGv s2) @@ -238,207 +423,388 @@ static void gen_slt(TCGv ret, TCGv s1, TCGv s2) tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2); } +static void gen_slt_i128(TCGv retl, TCGv reth, + TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h) +{ + gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT); +} + static void gen_sltu(TCGv ret, TCGv s1, TCGv s2) { tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2); } +static void gen_sltu_i128(TCGv retl, TCGv reth, + TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h) +{ + gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU); +} static bool trans_slti(DisasContext *ctx, arg_slti *a) { - return gen_arith_imm_tl(ctx, a, &gen_slt); + return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128); } static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) { - return gen_arith_imm_tl(ctx, a, &gen_sltu); + return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128); } static bool trans_xori(DisasContext *ctx, arg_xori *a) { - return gen_arith_imm_fn(ctx, a, &tcg_gen_xori_tl); + return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl); } + static bool trans_ori(DisasContext *ctx, arg_ori *a) { - return gen_arith_imm_fn(ctx, a, &tcg_gen_ori_tl); + return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl); } + static bool trans_andi(DisasContext *ctx, arg_andi *a) { - return gen_arith_imm_fn(ctx, a, &tcg_gen_andi_tl); + return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl); } + +static void gen_slli_i128(TCGv retl, TCGv reth, + TCGv src1l, TCGv src1h, + target_long shamt) +{ + if (shamt >= 64) { + tcg_gen_shli_tl(reth, src1l, shamt - 64); + tcg_gen_movi_tl(retl, 0); + } else { + tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt); + tcg_gen_shli_tl(retl, src1l, shamt); + } +} + static bool trans_slli(DisasContext *ctx, arg_slli *a) { - return gen_shifti(ctx, a, tcg_gen_shl_tl); + return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128); +} + +static void gen_srliw(TCGv dst, TCGv src, target_long shamt) +{ + tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); +} + +static void gen_srli_i128(TCGv retl, TCGv reth, + TCGv src1l, TCGv src1h, + target_long shamt) +{ + if (shamt >= 64) { + tcg_gen_shri_tl(retl, src1h, shamt - 64); + tcg_gen_movi_tl(reth, 0); + } else { + tcg_gen_extract2_tl(retl, src1l, src1h, shamt); + tcg_gen_shri_tl(reth, src1h, shamt); + } } static bool trans_srli(DisasContext *ctx, arg_srli *a) { - return gen_shifti(ctx, a, tcg_gen_shr_tl); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_shri_tl, gen_srliw, gen_srli_i128); +} + +static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) +{ + tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); +} + +static void gen_srai_i128(TCGv retl, TCGv reth, + TCGv src1l, TCGv src1h, + target_long shamt) +{ + if (shamt >= 64) { + tcg_gen_sari_tl(retl, src1h, shamt - 64); + tcg_gen_sari_tl(reth, src1h, 63); + } else { + tcg_gen_extract2_tl(retl, src1l, src1h, shamt); + tcg_gen_sari_tl(reth, src1h, shamt); + } } static bool trans_srai(DisasContext *ctx, arg_srai *a) { - return gen_shifti(ctx, a, tcg_gen_sar_tl); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_sari_tl, gen_sraiw, gen_srai_i128); } static bool trans_add(DisasContext *ctx, arg_add *a) { - return gen_arith(ctx, a, &tcg_gen_add_tl); + return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl); } static bool trans_sub(DisasContext *ctx, arg_sub *a) { - return gen_arith(ctx, a, &tcg_gen_sub_tl); + return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl); +} + +static void gen_sll_i128(TCGv destl, TCGv desth, + TCGv src1l, TCGv src1h, TCGv shamt) +{ + TCGv ls = tcg_temp_new(); + TCGv rs = tcg_temp_new(); + TCGv hs = tcg_temp_new(); + TCGv ll = tcg_temp_new(); + TCGv lr = tcg_temp_new(); + TCGv h0 = tcg_temp_new(); + TCGv h1 = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + tcg_gen_andi_tl(hs, shamt, 64); + tcg_gen_andi_tl(ls, shamt, 63); + tcg_gen_neg_tl(shamt, shamt); + tcg_gen_andi_tl(rs, shamt, 63); + + tcg_gen_shl_tl(ll, src1l, ls); + tcg_gen_shl_tl(h0, src1h, ls); + tcg_gen_shr_tl(lr, src1l, rs); + tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero); + tcg_gen_or_tl(h1, h0, lr); + + tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll); + tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1); + + tcg_temp_free(ls); + tcg_temp_free(rs); + tcg_temp_free(hs); + tcg_temp_free(ll); + tcg_temp_free(lr); + tcg_temp_free(h0); + tcg_temp_free(h1); } static bool trans_sll(DisasContext *ctx, arg_sll *a) { - return gen_shift(ctx, a, &tcg_gen_shl_tl); + return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128); } static bool trans_slt(DisasContext *ctx, arg_slt *a) { - return gen_arith(ctx, a, &gen_slt); + return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128); } static bool trans_sltu(DisasContext *ctx, arg_sltu *a) { - return gen_arith(ctx, a, &gen_sltu); + return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128); } -static bool trans_xor(DisasContext *ctx, arg_xor *a) +static void gen_srl_i128(TCGv destl, TCGv desth, + TCGv src1l, TCGv src1h, TCGv shamt) { - return gen_arith(ctx, a, &tcg_gen_xor_tl); + TCGv ls = tcg_temp_new(); + TCGv rs = tcg_temp_new(); + TCGv hs = tcg_temp_new(); + TCGv ll = tcg_temp_new(); + TCGv lr = tcg_temp_new(); + TCGv h0 = tcg_temp_new(); + TCGv h1 = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + tcg_gen_andi_tl(hs, shamt, 64); + tcg_gen_andi_tl(rs, shamt, 63); + tcg_gen_neg_tl(shamt, shamt); + tcg_gen_andi_tl(ls, shamt, 63); + + tcg_gen_shr_tl(lr, src1l, rs); + tcg_gen_shr_tl(h1, src1h, rs); + tcg_gen_shl_tl(ll, src1h, ls); + tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); + tcg_gen_or_tl(h0, ll, lr); + + tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); + tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1); + + tcg_temp_free(ls); + tcg_temp_free(rs); + tcg_temp_free(hs); + tcg_temp_free(ll); + tcg_temp_free(lr); + tcg_temp_free(h0); + tcg_temp_free(h1); } static bool trans_srl(DisasContext *ctx, arg_srl *a) { - return gen_shift(ctx, a, &tcg_gen_shr_tl); + return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128); +} + +static void gen_sra_i128(TCGv destl, TCGv desth, + TCGv src1l, TCGv src1h, TCGv shamt) +{ + TCGv ls = tcg_temp_new(); + TCGv rs = tcg_temp_new(); + TCGv hs = tcg_temp_new(); + TCGv ll = tcg_temp_new(); + TCGv lr = tcg_temp_new(); + TCGv h0 = tcg_temp_new(); + TCGv h1 = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + tcg_gen_andi_tl(hs, shamt, 64); + tcg_gen_andi_tl(rs, shamt, 63); + tcg_gen_neg_tl(shamt, shamt); + tcg_gen_andi_tl(ls, shamt, 63); + + tcg_gen_shr_tl(lr, src1l, rs); + tcg_gen_sar_tl(h1, src1h, rs); + tcg_gen_shl_tl(ll, src1h, ls); + tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); + tcg_gen_or_tl(h0, ll, lr); + tcg_gen_sari_tl(lr, src1h, 63); + + tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); + tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1); + + tcg_temp_free(ls); + tcg_temp_free(rs); + tcg_temp_free(hs); + tcg_temp_free(ll); + tcg_temp_free(lr); + tcg_temp_free(h0); + tcg_temp_free(h1); } static bool trans_sra(DisasContext *ctx, arg_sra *a) { - return gen_shift(ctx, a, &tcg_gen_sar_tl); + return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128); +} + +static bool trans_xor(DisasContext *ctx, arg_xor *a) +{ + return gen_logic(ctx, a, tcg_gen_xor_tl); } static bool trans_or(DisasContext *ctx, arg_or *a) { - return gen_arith(ctx, a, &tcg_gen_or_tl); + return gen_logic(ctx, a, tcg_gen_or_tl); } static bool trans_and(DisasContext *ctx, arg_and *a) { - return gen_arith(ctx, a, &tcg_gen_and_tl); + return gen_logic(ctx, a, tcg_gen_and_tl); } static bool trans_addiw(DisasContext *ctx, arg_addiw *a) { - REQUIRE_64BIT(ctx); - return gen_arith_imm_tl(ctx, a, &gen_addw); + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL); } static bool trans_slliw(DisasContext *ctx, arg_slliw *a) { - REQUIRE_64BIT(ctx); - return gen_shiftiw(ctx, a, tcg_gen_shl_tl); + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); } static bool trans_srliw(DisasContext *ctx, arg_srliw *a) { - REQUIRE_64BIT(ctx); - TCGv t = tcg_temp_new(); - gen_get_gpr(t, a->rs1); - tcg_gen_extract_tl(t, t, a->shamt, 32 - a->shamt); - /* sign-extend for W instructions */ - tcg_gen_ext32s_tl(t, t); - gen_set_gpr(a->rd, t); - tcg_temp_free(t); - return true; + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL); } static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) { - REQUIRE_64BIT(ctx); - TCGv t = tcg_temp_new(); - gen_get_gpr(t, a->rs1); - tcg_gen_sextract_tl(t, t, a->shamt, 32 - a->shamt); - gen_set_gpr(a->rd, t); - tcg_temp_free(t); - return true; + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL); +} + +static bool trans_sllid(DisasContext *ctx, arg_sllid *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); +} + +static bool trans_srlid(DisasContext *ctx, arg_srlid *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL); +} + +static bool trans_sraid(DisasContext *ctx, arg_sraid *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL); } static bool trans_addw(DisasContext *ctx, arg_addw *a) { - REQUIRE_64BIT(ctx); - return gen_arith(ctx, a, &gen_addw); + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL); } static bool trans_subw(DisasContext *ctx, arg_subw *a) { - REQUIRE_64BIT(ctx); - return gen_arith(ctx, a, &gen_subw); + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL); } static bool trans_sllw(DisasContext *ctx, arg_sllw *a) { - REQUIRE_64BIT(ctx); - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - - tcg_gen_andi_tl(source2, source2, 0x1F); - tcg_gen_shl_tl(source1, source1, source2); - - tcg_gen_ext32s_tl(source1, source1); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); } static bool trans_srlw(DisasContext *ctx, arg_srlw *a) { - REQUIRE_64BIT(ctx); - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - - /* clear upper 32 */ - tcg_gen_ext32u_tl(source1, source1); - tcg_gen_andi_tl(source2, source2, 0x1F); - tcg_gen_shr_tl(source1, source1, source2); - - tcg_gen_ext32s_tl(source1, source1); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); } static bool trans_sraw(DisasContext *ctx, arg_sraw *a) { - REQUIRE_64BIT(ctx); - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); + REQUIRE_64_OR_128BIT(ctx); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); +} - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); +static bool trans_slld(DisasContext *ctx, arg_slld *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); +} + +static bool trans_srld(DisasContext *ctx, arg_srld *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); +} + +static bool trans_srad(DisasContext *ctx, arg_srad *a) +{ + REQUIRE_128BIT(ctx); + ctx->ol = MXL_RV64; + return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); +} + +static bool trans_pause(DisasContext *ctx, arg_pause *a) +{ + if (!ctx->cfg_ptr->ext_zihintpause) { + return false; + } /* - * first, trick to get it to act like working on 32 bits (get rid of - * upper 32, sign extend to fill space) + * PAUSE is a no-op in QEMU, + * end the TB and return to main loop */ - tcg_gen_ext32s_tl(source1, source1); - tcg_gen_andi_tl(source2, source2, 0x1F); - tcg_gen_sar_tl(source1, source1, source2); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); + gen_set_pc_imm(ctx, ctx->pc_succ_insn); + tcg_gen_exit_tb(NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -452,7 +818,7 @@ static bool trans_fence(DisasContext *ctx, arg_fence *a) static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) { - if (!ctx->ext_ifencei) { + if (!ctx->cfg_ptr->ext_ifencei) { return false; } @@ -460,86 +826,277 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) * FENCE_I is a no-op in QEMU, * however we need to end the translation block */ - tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); - exit_tb(ctx); + gen_set_pc_imm(ctx, ctx->pc_succ_insn); + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; return true; } -#define RISCV_OP_CSR_PRE do {\ - source1 = tcg_temp_new(); \ - csr_store = tcg_temp_new(); \ - dest = tcg_temp_new(); \ - rs1_pass = tcg_temp_new(); \ - gen_get_gpr(source1, a->rs1); \ - tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); \ - tcg_gen_movi_tl(rs1_pass, a->rs1); \ - tcg_gen_movi_tl(csr_store, a->csr); \ - gen_io_start();\ -} while (0) +static bool do_csr_post(DisasContext *ctx) +{ + /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ + decode_save_opc(ctx); + /* We may have changed important cpu state -- exit to main loop. */ + gen_set_pc_imm(ctx, ctx->pc_succ_insn); + tcg_gen_exit_tb(NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} -#define RISCV_OP_CSR_POST do {\ - gen_set_gpr(a->rd, dest); \ - tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \ - exit_tb(ctx); \ - ctx->base.is_jmp = DISAS_NORETURN; \ - tcg_temp_free(source1); \ - tcg_temp_free(csr_store); \ - tcg_temp_free(dest); \ - tcg_temp_free(rs1_pass); \ -} while (0) +static bool do_csrr(DisasContext *ctx, int rd, int rc) +{ + TCGv dest = dest_gpr(ctx, rd); + TCGv_i32 csr = tcg_constant_i32(rc); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrr(dest, cpu_env, csr); + gen_set_gpr(ctx, rd, dest); + return do_csr_post(ctx); +} + +static bool do_csrw(DisasContext *ctx, int rc, TCGv src) +{ + TCGv_i32 csr = tcg_constant_i32(rc); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrw(cpu_env, csr, src); + return do_csr_post(ctx); +} + +static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask) +{ + TCGv dest = dest_gpr(ctx, rd); + TCGv_i32 csr = tcg_constant_i32(rc); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrrw(dest, cpu_env, csr, src, mask); + gen_set_gpr(ctx, rd, dest); + return do_csr_post(ctx); +} + +static bool do_csrr_i128(DisasContext *ctx, int rd, int rc) +{ + TCGv destl = dest_gpr(ctx, rd); + TCGv desth = dest_gprh(ctx, rd); + TCGv_i32 csr = tcg_constant_i32(rc); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrr_i128(destl, cpu_env, csr); + tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh)); + gen_set_gpr128(ctx, rd, destl, desth); + return do_csr_post(ctx); +} + +static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch) +{ + TCGv_i32 csr = tcg_constant_i32(rc); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrw_i128(cpu_env, csr, srcl, srch); + return do_csr_post(ctx); +} + +static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc, + TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh) +{ + TCGv destl = dest_gpr(ctx, rd); + TCGv desth = dest_gprh(ctx, rd); + TCGv_i32 csr = tcg_constant_i32(rc); + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh); + tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh)); + gen_set_gpr128(ctx, rd, destl, desth); + return do_csr_post(ctx); +} static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrw(dest, cpu_env, source1, csr_store); - RISCV_OP_CSR_POST; - return true; + RISCVMXL xl = get_xl(ctx); + if (xl < MXL_RV128) { + TCGv src = get_gpr(ctx, a->rs1, EXT_NONE); + + /* + * If rd == 0, the insn shall not read the csr, nor cause any of the + * side effects that might occur on a csr read. + */ + if (a->rd == 0) { + return do_csrw(ctx, a->csr, src); + } + + TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX : + (target_ulong)-1); + return do_csrrw(ctx, a->rd, a->csr, src, mask); + } else { + TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv srch = get_gprh(ctx, a->rs1); + + /* + * If rd == 0, the insn shall not read the csr, nor cause any of the + * side effects that might occur on a csr read. + */ + if (a->rd == 0) { + return do_csrw_i128(ctx, a->csr, srcl, srch); + } + + TCGv mask = tcg_constant_tl(-1); + return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask); + } } static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass); - RISCV_OP_CSR_POST; - return true; + /* + * If rs1 == 0, the insn shall not write to the csr at all, nor + * cause any of the side effects that might occur on a csr write. + * Note that if rs1 specifies a register other than x0, holding + * a zero value, the instruction will still attempt to write the + * unmodified value back to the csr and will cause side effects. + */ + if (get_xl(ctx) < MXL_RV128) { + if (a->rs1 == 0) { + return do_csrr(ctx, a->rd, a->csr); + } + + TCGv ones = tcg_constant_tl(-1); + TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); + return do_csrrw(ctx, a->rd, a->csr, ones, mask); + } else { + if (a->rs1 == 0) { + return do_csrr_i128(ctx, a->rd, a->csr); + } + + TCGv ones = tcg_constant_tl(-1); + TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO); + TCGv maskh = get_gprh(ctx, a->rs1); + return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh); + } } static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass); - RISCV_OP_CSR_POST; - return true; + /* + * If rs1 == 0, the insn shall not write to the csr at all, nor + * cause any of the side effects that might occur on a csr write. + * Note that if rs1 specifies a register other than x0, holding + * a zero value, the instruction will still attempt to write the + * unmodified value back to the csr and will cause side effects. + */ + if (get_xl(ctx) < MXL_RV128) { + if (a->rs1 == 0) { + return do_csrr(ctx, a->rd, a->csr); + } + + TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); + return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); + } else { + if (a->rs1 == 0) { + return do_csrr_i128(ctx, a->rd, a->csr); + } + + TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO); + TCGv maskh = get_gprh(ctx, a->rs1); + return do_csrrw_i128(ctx, a->rd, a->csr, + ctx->zero, ctx->zero, maskl, maskh); + } } static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrw(dest, cpu_env, rs1_pass, csr_store); - RISCV_OP_CSR_POST; - return true; + RISCVMXL xl = get_xl(ctx); + if (xl < MXL_RV128) { + TCGv src = tcg_constant_tl(a->rs1); + + /* + * If rd == 0, the insn shall not read the csr, nor cause any of the + * side effects that might occur on a csr read. + */ + if (a->rd == 0) { + return do_csrw(ctx, a->csr, src); + } + + TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX : + (target_ulong)-1); + return do_csrrw(ctx, a->rd, a->csr, src, mask); + } else { + TCGv src = tcg_constant_tl(a->rs1); + + /* + * If rd == 0, the insn shall not read the csr, nor cause any of the + * side effects that might occur on a csr read. + */ + if (a->rd == 0) { + return do_csrw_i128(ctx, a->csr, src, ctx->zero); + } + + TCGv mask = tcg_constant_tl(-1); + return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask); + } } static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrs(dest, cpu_env, rs1_pass, csr_store, rs1_pass); - RISCV_OP_CSR_POST; - return true; + /* + * If rs1 == 0, the insn shall not write to the csr at all, nor + * cause any of the side effects that might occur on a csr write. + * Note that if rs1 specifies a register other than x0, holding + * a zero value, the instruction will still attempt to write the + * unmodified value back to the csr and will cause side effects. + */ + if (get_xl(ctx) < MXL_RV128) { + if (a->rs1 == 0) { + return do_csrr(ctx, a->rd, a->csr); + } + + TCGv ones = tcg_constant_tl(-1); + TCGv mask = tcg_constant_tl(a->rs1); + return do_csrrw(ctx, a->rd, a->csr, ones, mask); + } else { + if (a->rs1 == 0) { + return do_csrr_i128(ctx, a->rd, a->csr); + } + + TCGv ones = tcg_constant_tl(-1); + TCGv mask = tcg_constant_tl(a->rs1); + return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero); + } } -static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a) +static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a) { - TCGv source1, csr_store, dest, rs1_pass; - RISCV_OP_CSR_PRE; - gen_helper_csrrc(dest, cpu_env, rs1_pass, csr_store, rs1_pass); - RISCV_OP_CSR_POST; - return true; + /* + * If rs1 == 0, the insn shall not write to the csr at all, nor + * cause any of the side effects that might occur on a csr write. + * Note that if rs1 specifies a register other than x0, holding + * a zero value, the instruction will still attempt to write the + * unmodified value back to the csr and will cause side effects. + */ + if (get_xl(ctx) < MXL_RV128) { + if (a->rs1 == 0) { + return do_csrr(ctx, a->rd, a->csr); + } + + TCGv mask = tcg_constant_tl(a->rs1); + return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); + } else { + if (a->rs1 == 0) { + return do_csrr_i128(ctx, a->rd, a->csr); + } + + TCGv mask = tcg_constant_tl(a->rs1); + return do_csrrw_i128(ctx, a->rd, a->csr, + ctx->zero, ctx->zero, mask, ctx->zero); + } } diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc new file mode 100644 index 0000000000..90f4eeff60 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvk.c.inc @@ -0,0 +1,391 @@ +/* + * RISC-V translation routines for the Zk[nd,ne,nh,sed,sh] Standard Extension. + * + * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com + * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZKND(ctx) do { \ + if (!ctx->cfg_ptr->ext_zknd) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZKNE(ctx) do { \ + if (!ctx->cfg_ptr->ext_zkne) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZKNH(ctx) do { \ + if (!ctx->cfg_ptr->ext_zknh) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZKSED(ctx) do { \ + if (!ctx->cfg_ptr->ext_zksed) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZKSH(ctx) do { \ + if (!ctx->cfg_ptr->ext_zksh) { \ + return false; \ + } \ +} while (0) + +static bool gen_aes32_sm4(DisasContext *ctx, arg_k_aes *a, + void (*func)(TCGv, TCGv, TCGv, TCGv)) +{ + TCGv shamt = tcg_constant_tl(a->shamt); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + func(dest, src1, src2, shamt); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_aes32esmi(DisasContext *ctx, arg_aes32esmi *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNE(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_aes32esmi); +} + +static bool trans_aes32esi(DisasContext *ctx, arg_aes32esi *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNE(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_aes32esi); +} + +static bool trans_aes32dsmi(DisasContext *ctx, arg_aes32dsmi *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKND(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_aes32dsmi); +} + +static bool trans_aes32dsi(DisasContext *ctx, arg_aes32dsi *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKND(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_aes32dsi); +} + +static bool trans_aes64es(DisasContext *ctx, arg_aes64es *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNE(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64es, NULL); +} + +static bool trans_aes64esm(DisasContext *ctx, arg_aes64esm *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNE(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64esm, NULL); +} + +static bool trans_aes64ds(DisasContext *ctx, arg_aes64ds *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKND(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ds, NULL); +} + +static bool trans_aes64dsm(DisasContext *ctx, arg_aes64dsm *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKND(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64dsm, NULL); +} + +static bool trans_aes64ks2(DisasContext *ctx, arg_aes64ks2 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_EITHER_EXT(ctx, zknd, zkne); + return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ks2, NULL); +} + +static bool trans_aes64ks1i(DisasContext *ctx, arg_aes64ks1i *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_EITHER_EXT(ctx, zknd, zkne); + + if (a->imm > 0xA) { + return false; + } + + return gen_arith_imm_tl(ctx, a, EXT_NONE, gen_helper_aes64ks1i, NULL); +} + +static bool trans_aes64im(DisasContext *ctx, arg_aes64im *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKND(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_helper_aes64im); +} + +static bool gen_sha256(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*func)(TCGv_i32, TCGv_i32, int32_t), + int32_t num1, int32_t num2, int32_t num3) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t0, src1); + tcg_gen_rotri_i32(t1, t0, num1); + tcg_gen_rotri_i32(t2, t0, num2); + tcg_gen_xor_i32(t1, t1, t2); + func(t2, t0, num3); + tcg_gen_xor_i32(t1, t1, t2); + tcg_gen_ext_i32_tl(dest, t1); + + gen_set_gpr(ctx, a->rd, dest); + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); + return true; +} + +static bool trans_sha256sig0(DisasContext *ctx, arg_sha256sig0 *a) +{ + REQUIRE_ZKNH(ctx); + return gen_sha256(ctx, a, EXT_NONE, tcg_gen_shri_i32, 7, 18, 3); +} + +static bool trans_sha256sig1(DisasContext *ctx, arg_sha256sig1 *a) +{ + REQUIRE_ZKNH(ctx); + return gen_sha256(ctx, a, EXT_NONE, tcg_gen_shri_i32, 17, 19, 10); +} + +static bool trans_sha256sum0(DisasContext *ctx, arg_sha256sum0 *a) +{ + REQUIRE_ZKNH(ctx); + return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 2, 13, 22); +} + +static bool trans_sha256sum1(DisasContext *ctx, arg_sha256sum1 *a) +{ + REQUIRE_ZKNH(ctx); + return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 6, 11, 25); +} + +static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*func1)(TCGv_i64, TCGv_i64, int64_t), + void (*func2)(TCGv_i64, TCGv_i64, int64_t), + int64_t num1, int64_t num2, int64_t num3) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv src2 = get_gpr(ctx, a->rs2, ext); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + tcg_gen_concat_tl_i64(t0, src1, src2); + func1(t1, t0, num1); + func2(t2, t0, num2); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_rotri_i64(t2, t0, num3); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_trunc_i64_tl(dest, t1); + + gen_set_gpr(ctx, a->rd, dest); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_sha512sum0r(DisasContext *ctx, arg_sha512sum0r *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, + tcg_gen_rotli_i64, 25, 30, 28); +} + +static bool trans_sha512sum1r(DisasContext *ctx, arg_sha512sum1r *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, + tcg_gen_rotri_i64, 23, 14, 18); +} + +static bool trans_sha512sig0l(DisasContext *ctx, arg_sha512sig0l *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64, + tcg_gen_rotri_i64, 1, 7, 8); +} + +static bool trans_sha512sig1l(DisasContext *ctx, arg_sha512sig1l *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, + tcg_gen_rotri_i64, 3, 6, 19); +} + +static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*func)(TCGv_i64, TCGv_i64, int64_t), + int64_t num1, int64_t num2, int64_t num3) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv src2 = get_gpr(ctx, a->rs2, ext); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + tcg_gen_concat_tl_i64(t0, src1, src2); + func(t1, t0, num1); + tcg_gen_ext32u_i64(t2, t0); + tcg_gen_shri_i64(t2, t2, num2); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_rotri_i64(t2, t0, num3); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_trunc_i64_tl(dest, t1); + + gen_set_gpr(ctx, a->rd, dest); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_sha512sig0h(DisasContext *ctx, arg_sha512sig0h *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 1, 7, 8); +} + +static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19); +} + +static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*func)(TCGv_i64, TCGv_i64, int64_t), + int64_t num1, int64_t num2, int64_t num3) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(t0, src1); + tcg_gen_rotri_i64(t1, t0, num1); + tcg_gen_rotri_i64(t2, t0, num2); + tcg_gen_xor_i64(t1, t1, t2); + func(t2, t0, num3); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_trunc_i64_tl(dest, t1); + + gen_set_gpr(ctx, a->rd, dest); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_sha512sig0(DisasContext *ctx, arg_sha512sig0 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 1, 8, 7); +} + +static bool trans_sha512sig1(DisasContext *ctx, arg_sha512sig1 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 19, 61, 6); +} + +static bool trans_sha512sum0(DisasContext *ctx, arg_sha512sum0 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 28, 34, 39); +} + +static bool trans_sha512sum1(DisasContext *ctx, arg_sha512sum1 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 14, 18, 41); +} + +/* SM3 */ +static bool gen_sm3(DisasContext *ctx, arg_r2 *a, int32_t b, int32_t c) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t0, src1); + tcg_gen_rotli_i32(t1, t0, b); + tcg_gen_xor_i32(t1, t0, t1); + tcg_gen_rotli_i32(t0, t0, c); + tcg_gen_xor_i32(t1, t1, t0); + tcg_gen_ext_i32_tl(dest, t1); + gen_set_gpr(ctx, a->rd, dest); + + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); + return true; +} + +static bool trans_sm3p0(DisasContext *ctx, arg_sm3p0 *a) +{ + REQUIRE_ZKSH(ctx); + return gen_sm3(ctx, a, 9, 17); +} + +static bool trans_sm3p1(DisasContext *ctx, arg_sm3p1 *a) +{ + REQUIRE_ZKSH(ctx); + return gen_sm3(ctx, a, 15, 23); +} + +/* SM4 */ +static bool trans_sm4ed(DisasContext *ctx, arg_sm4ed *a) +{ + REQUIRE_ZKSED(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_sm4ed); +} + +static bool trans_sm4ks(DisasContext *ctx, arg_sm4ks *a) +{ + REQUIRE_ZKSED(ctx); + return gen_aes32_sm4(ctx, a, gen_helper_sm4ks); +} diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc index 10ecc456fc..ec7f705aab 100644 --- a/target/riscv/insn_trans/trans_rvm.c.inc +++ b/target/riscv/insn_trans/trans_rvm.c.inc @@ -18,111 +18,416 @@ * this program. If not, see . */ +#define REQUIRE_M_OR_ZMMUL(ctx) do { \ + if (!ctx->cfg_ptr->ext_zmmul && !has_ext(ctx, RVM)) { \ + return false; \ + } \ +} while (0) + +static void gen_mulhu_i128(TCGv r2, TCGv r3, TCGv al, TCGv ah, TCGv bl, TCGv bh) +{ + TCGv tmpl = tcg_temp_new(); + TCGv tmph = tcg_temp_new(); + TCGv r0 = tcg_temp_new(); + TCGv r1 = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + tcg_gen_mulu2_tl(r0, r1, al, bl); + + tcg_gen_mulu2_tl(tmpl, tmph, al, bh); + tcg_gen_add2_tl(r1, r2, r1, zero, tmpl, tmph); + tcg_gen_mulu2_tl(tmpl, tmph, ah, bl); + tcg_gen_add2_tl(r1, tmph, r1, r2, tmpl, tmph); + /* Overflow detection into r3 */ + tcg_gen_setcond_tl(TCG_COND_LTU, r3, tmph, r2); + + tcg_gen_mov_tl(r2, tmph); + + tcg_gen_mulu2_tl(tmpl, tmph, ah, bh); + tcg_gen_add2_tl(r2, r3, r2, r3, tmpl, tmph); + + tcg_temp_free(tmpl); + tcg_temp_free(tmph); +} + +static void gen_mul_i128(TCGv rl, TCGv rh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + TCGv tmpl = tcg_temp_new(); + TCGv tmph = tcg_temp_new(); + TCGv tmpx = tcg_temp_new(); + TCGv zero = tcg_constant_tl(0); + + tcg_gen_mulu2_tl(rl, rh, rs1l, rs2l); + tcg_gen_mulu2_tl(tmpl, tmph, rs1l, rs2h); + tcg_gen_add2_tl(rh, tmpx, rh, zero, tmpl, tmph); + tcg_gen_mulu2_tl(tmpl, tmph, rs1h, rs2l); + tcg_gen_add2_tl(rh, tmph, rh, tmpx, tmpl, tmph); + + tcg_temp_free(tmpl); + tcg_temp_free(tmph); + tcg_temp_free(tmpx); +} static bool trans_mul(DisasContext *ctx, arg_mul *a) { - REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &tcg_gen_mul_tl); + REQUIRE_M_OR_ZMMUL(ctx); + return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, gen_mul_i128); +} + +static void gen_mulh_i128(TCGv rl, TCGv rh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + TCGv t0l = tcg_temp_new(); + TCGv t0h = tcg_temp_new(); + TCGv t1l = tcg_temp_new(); + TCGv t1h = tcg_temp_new(); + + gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h); + tcg_gen_sari_tl(t0h, rs1h, 63); + tcg_gen_and_tl(t0l, t0h, rs2l); + tcg_gen_and_tl(t0h, t0h, rs2h); + tcg_gen_sari_tl(t1h, rs2h, 63); + tcg_gen_and_tl(t1l, t1h, rs1l); + tcg_gen_and_tl(t1h, t1h, rs1h); + tcg_gen_sub2_tl(t0l, t0h, rl, rh, t0l, t0h); + tcg_gen_sub2_tl(rl, rh, t0l, t0h, t1l, t1h); + + tcg_temp_free(t0l); + tcg_temp_free(t0h); + tcg_temp_free(t1l); + tcg_temp_free(t1h); +} + +static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) +{ + TCGv discard = tcg_temp_new(); + + tcg_gen_muls2_tl(discard, ret, s1, s2); + tcg_temp_free(discard); +} + +static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2) +{ + tcg_gen_mul_tl(ret, s1, s2); + tcg_gen_sari_tl(ret, ret, 32); } static bool trans_mulh(DisasContext *ctx, arg_mulh *a) { - REQUIRE_EXT(ctx, RVM); - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); + REQUIRE_M_OR_ZMMUL(ctx); + return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w, + gen_mulh_i128); +} - tcg_gen_muls2_tl(source2, source1, source1, source2); +static void gen_mulhsu_i128(TCGv rl, TCGv rh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; + TCGv t0l = tcg_temp_new(); + TCGv t0h = tcg_temp_new(); + + gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h); + tcg_gen_sari_tl(t0h, rs1h, 63); + tcg_gen_and_tl(t0l, t0h, rs2l); + tcg_gen_and_tl(t0h, t0h, rs2h); + tcg_gen_sub2_tl(rl, rh, rl, rh, t0l, t0h); + + tcg_temp_free(t0l); + tcg_temp_free(t0h); +} + +static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv rl = tcg_temp_new(); + TCGv rh = tcg_temp_new(); + + tcg_gen_mulu2_tl(rl, rh, arg1, arg2); + /* fix up for one negative */ + tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1); + tcg_gen_and_tl(rl, rl, arg2); + tcg_gen_sub_tl(ret, rh, rl); + + tcg_temp_free(rl); + tcg_temp_free(rh); +} + +static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + tcg_gen_ext32s_tl(t1, arg1); + tcg_gen_ext32u_tl(t2, arg2); + tcg_gen_mul_tl(ret, t1, t2); + tcg_temp_free(t1); + tcg_temp_free(t2); + tcg_gen_sari_tl(ret, ret, 32); } static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a) { - REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &gen_mulhsu); + REQUIRE_M_OR_ZMMUL(ctx); + return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w, + gen_mulhsu_i128); +} + +static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2) +{ + TCGv discard = tcg_temp_new(); + + tcg_gen_mulu2_tl(discard, ret, s1, s2); + tcg_temp_free(discard); } static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) { - REQUIRE_EXT(ctx, RVM); - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); + REQUIRE_M_OR_ZMMUL(ctx); + /* gen_mulh_w works for either sign as input. */ + return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w, + gen_mulhu_i128); +} - tcg_gen_mulu2_tl(source2, source1, source1, source2); +static void gen_div_i128(TCGv rdl, TCGv rdh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + gen_helper_divs_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h); + tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh)); +} - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; +static void gen_div(TCGv ret, TCGv source1, TCGv source2) +{ + TCGv temp1, temp2, zero, one, mone, min; + + temp1 = tcg_temp_new(); + temp2 = tcg_temp_new(); + zero = tcg_constant_tl(0); + one = tcg_constant_tl(1); + mone = tcg_constant_tl(-1); + min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1)); + + /* + * If overflow, set temp2 to 1, else source2. + * This produces the required result of min. + */ + tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min); + tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone); + tcg_gen_and_tl(temp1, temp1, temp2); + tcg_gen_movcond_tl(TCG_COND_NE, temp2, temp1, zero, one, source2); + + /* + * If div by zero, set temp1 to -1 and temp2 to 1 to + * produce the required result of -1. + */ + tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, mone, source1); + tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2); + + tcg_gen_div_tl(ret, temp1, temp2); + + tcg_temp_free(temp1); + tcg_temp_free(temp2); } static bool trans_div(DisasContext *ctx, arg_div *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &gen_div); + return gen_arith(ctx, a, EXT_SIGN, gen_div, gen_div_i128); +} + +static void gen_divu_i128(TCGv rdl, TCGv rdh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + gen_helper_divu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h); + tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh)); +} + +static void gen_divu(TCGv ret, TCGv source1, TCGv source2) +{ + TCGv temp1, temp2, zero, one, max; + + temp1 = tcg_temp_new(); + temp2 = tcg_temp_new(); + zero = tcg_constant_tl(0); + one = tcg_constant_tl(1); + max = tcg_constant_tl(~0); + + /* + * If div by zero, set temp1 to max and temp2 to 1 to + * produce the required result of max. + */ + tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1); + tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2); + tcg_gen_divu_tl(ret, temp1, temp2); + + tcg_temp_free(temp1); + tcg_temp_free(temp2); } static bool trans_divu(DisasContext *ctx, arg_divu *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &gen_divu); + return gen_arith(ctx, a, EXT_ZERO, gen_divu, gen_divu_i128); +} + +static void gen_rem_i128(TCGv rdl, TCGv rdh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + gen_helper_rems_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h); + tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh)); +} + +static void gen_rem(TCGv ret, TCGv source1, TCGv source2) +{ + TCGv temp1, temp2, zero, one, mone, min; + + temp1 = tcg_temp_new(); + temp2 = tcg_temp_new(); + zero = tcg_constant_tl(0); + one = tcg_constant_tl(1); + mone = tcg_constant_tl(-1); + min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1)); + + /* + * If overflow, set temp1 to 0, else source1. + * This avoids a possible host trap, and produces the required result of 0. + */ + tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min); + tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone); + tcg_gen_and_tl(temp1, temp1, temp2); + tcg_gen_movcond_tl(TCG_COND_NE, temp1, temp1, zero, zero, source1); + + /* + * If div by zero, set temp2 to 1, else source2. + * This avoids a possible host trap, but produces an incorrect result. + */ + tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2); + + tcg_gen_rem_tl(temp1, temp1, temp2); + + /* If div by zero, the required result is the original dividend. */ + tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1); + + tcg_temp_free(temp1); + tcg_temp_free(temp2); } static bool trans_rem(DisasContext *ctx, arg_rem *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &gen_rem); + return gen_arith(ctx, a, EXT_SIGN, gen_rem, gen_rem_i128); +} + +static void gen_remu_i128(TCGv rdl, TCGv rdh, + TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h) +{ + gen_helper_remu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h); + tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh)); +} + +static void gen_remu(TCGv ret, TCGv source1, TCGv source2) +{ + TCGv temp, zero, one; + + temp = tcg_temp_new(); + zero = tcg_constant_tl(0); + one = tcg_constant_tl(1); + + /* + * If div by zero, set temp to 1, else source2. + * This avoids a possible host trap, but produces an incorrect result. + */ + tcg_gen_movcond_tl(TCG_COND_EQ, temp, source2, zero, one, source2); + + tcg_gen_remu_tl(temp, source1, temp); + + /* If div by zero, the required result is the original dividend. */ + tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp); + + tcg_temp_free(temp); } static bool trans_remu(DisasContext *ctx, arg_remu *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, &gen_remu); + return gen_arith(ctx, a, EXT_ZERO, gen_remu, gen_remu_i128); } static bool trans_mulw(DisasContext *ctx, arg_mulw *a) { - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVM); - - return gen_arith(ctx, a, &gen_mulw); + REQUIRE_64_OR_128BIT(ctx); + REQUIRE_M_OR_ZMMUL(ctx); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, NULL); } static bool trans_divw(DisasContext *ctx, arg_divw *a) { - REQUIRE_64BIT(ctx); + REQUIRE_64_OR_128BIT(ctx); REQUIRE_EXT(ctx, RVM); - - return gen_arith_div_w(ctx, a, &gen_div); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL); } static bool trans_divuw(DisasContext *ctx, arg_divuw *a) { - REQUIRE_64BIT(ctx); + REQUIRE_64_OR_128BIT(ctx); REQUIRE_EXT(ctx, RVM); - - return gen_arith_div_uw(ctx, a, &gen_divu); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL); } static bool trans_remw(DisasContext *ctx, arg_remw *a) { - REQUIRE_64BIT(ctx); + REQUIRE_64_OR_128BIT(ctx); REQUIRE_EXT(ctx, RVM); - - return gen_arith_div_w(ctx, a, &gen_rem); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL); } static bool trans_remuw(DisasContext *ctx, arg_remuw *a) { - REQUIRE_64BIT(ctx); + REQUIRE_64_OR_128BIT(ctx); REQUIRE_EXT(ctx, RVM); - - return gen_arith_div_uw(ctx, a, &gen_remu); + ctx->ol = MXL_RV32; + return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL); +} + +static bool trans_muld(DisasContext *ctx, arg_muld *a) +{ + REQUIRE_128BIT(ctx); + REQUIRE_M_OR_ZMMUL(ctx); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_SIGN, tcg_gen_mul_tl, NULL); +} + +static bool trans_divd(DisasContext *ctx, arg_divd *a) +{ + REQUIRE_128BIT(ctx); + REQUIRE_EXT(ctx, RVM); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL); +} + +static bool trans_divud(DisasContext *ctx, arg_divud *a) +{ + REQUIRE_128BIT(ctx); + REQUIRE_EXT(ctx, RVM); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL); +} + +static bool trans_remd(DisasContext *ctx, arg_remd *a) +{ + REQUIRE_128BIT(ctx); + REQUIRE_EXT(ctx, RVM); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL); +} + +static bool trans_remud(DisasContext *ctx, arg_remud *a) +{ + REQUIRE_128BIT(ctx); + REQUIRE_EXT(ctx, RVM); + ctx->ol = MXL_RV64; + return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL); } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 83d9a285ba..4dea4413ae 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -1,5 +1,4 @@ /* - * RISC-V translation routines for the RVV Standard Extension. * * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. * @@ -19,76 +18,599 @@ #include "tcg/tcg-gvec-desc.h" #include "internals.h" -static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a) +static inline bool is_overlapped(const int8_t astart, int8_t asize, + const int8_t bstart, int8_t bsize) { - TCGv s1, s2, dst; + const int8_t aend = astart + asize; + const int8_t bend = bstart + bsize; - if (!has_ext(ctx, RVV)) { + return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize; +} + +static bool require_rvv(DisasContext *s) +{ + return s->mstatus_vs != 0; +} + +static bool require_rvf(DisasContext *s) +{ + if (s->mstatus_fs == 0) { return false; } - s2 = tcg_temp_new(); - dst = tcg_temp_new(); - - /* Using x0 as the rs1 register specifier, encodes an infinite AVL */ - if (a->rs1 == 0) { - /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ - s1 = tcg_const_tl(RV_VLEN_MAX); - } else { - s1 = tcg_temp_new(); - gen_get_gpr(s1, a->rs1); + switch (s->sew) { + case MO_16: + case MO_32: + return has_ext(s, RVF); + case MO_64: + return has_ext(s, RVD); + default: + return false; + } +} + +static bool require_scale_rvf(DisasContext *s) +{ + if (s->mstatus_fs == 0) { + return false; + } + + switch (s->sew) { + case MO_8: + case MO_16: + return has_ext(s, RVF); + case MO_32: + return has_ext(s, RVD); + default: + return false; + } +} + +static bool require_zve32f(DisasContext *s) +{ + /* RVV + Zve32f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve32f doesn't support FP64. (Section 18.2) */ + return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; +} + +static bool require_scale_zve32f(DisasContext *s) +{ + /* RVV + Zve32f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve32f doesn't support FP64. (Section 18.2) */ + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; +} + +static bool require_zve64f(DisasContext *s) +{ + /* RVV + Zve64f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve64f doesn't support FP64. (Section 18.2) */ + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; +} + +static bool require_scale_zve64f(DisasContext *s) +{ + /* RVV + Zve64f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve64f doesn't support FP64. (Section 18.2) */ + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; +} + +/* Destination vector register group cannot overlap source mask register. */ +static bool require_vm(int vm, int vd) +{ + return (vm != 0 || vd != 0); +} + +static bool require_nf(int vd, int nf, int lmul) +{ + int size = nf << MAX(lmul, 0); + return size <= 8 && vd + size <= 32; +} + +/* + * Vector register should aligned with the passed-in LMUL (EMUL). + * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed. + */ +static bool require_align(const int8_t val, const int8_t lmul) +{ + return lmul <= 0 || extract32(val, 0, lmul) == 0; +} + +/* + * A destination vector register group can overlap a source vector + * register group only if one of the following holds: + * 1. The destination EEW equals the source EEW. + * 2. The destination EEW is smaller than the source EEW and the overlap + * is in the lowest-numbered part of the source register group. + * 3. The destination EEW is greater than the source EEW, the source EMUL + * is at least 1, and the overlap is in the highest-numbered part of + * the destination register group. + * (Section 5.2) + * + * This function returns true if one of the following holds: + * * Destination vector register group does not overlap a source vector + * register group. + * * Rule 3 met. + * For rule 1, overlap is allowed so this function doesn't need to be called. + * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before + * calling this function. + */ +static bool require_noover(const int8_t dst, const int8_t dst_lmul, + const int8_t src, const int8_t src_lmul) +{ + int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul; + int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul; + + /* Destination EEW is greater than the source EEW, check rule 3. */ + if (dst_size > src_size) { + if (dst < src && + src_lmul >= 0 && + is_overlapped(dst, dst_size, src, src_size) && + !is_overlapped(dst, dst_size, src + src_size, src_size)) { + return true; + } + } + + return !is_overlapped(dst, dst_size, src, src_size); +} + +static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) +{ + TCGv s1, dst; + + if (!require_rvv(s) || + !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || + s->cfg_ptr->ext_zve64f)) { + return false; + } + + dst = dest_gpr(s, rd); + + if (rd == 0 && rs1 == 0) { + s1 = tcg_temp_new(); + tcg_gen_mov_tl(s1, cpu_vl); + } else if (rs1 == 0) { + /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ + s1 = tcg_constant_tl(RV_VLEN_MAX); + } else { + s1 = get_gpr(s, rs1, EXT_ZERO); + } + + gen_helper_vsetvl(dst, cpu_env, s1, s2); + gen_set_gpr(s, rd, dst); + mark_vs_dirty(s); + + gen_set_pc_imm(s, s->pc_succ_insn); + tcg_gen_lookup_and_goto_ptr(); + s->base.is_jmp = DISAS_NORETURN; + + if (rd == 0 && rs1 == 0) { + tcg_temp_free(s1); } - gen_get_gpr(s2, a->rs2); - gen_helper_vsetvl(dst, cpu_env, s1, s2); - gen_set_gpr(a->rd, dst); - tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); - lookup_and_goto_ptr(ctx); - ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(s1); - tcg_temp_free(s2); - tcg_temp_free(dst); return true; } -static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a) +static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) { - TCGv s1, s2, dst; + TCGv dst; - if (!has_ext(ctx, RVV)) { + if (!require_rvv(s) || + !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || + s->cfg_ptr->ext_zve64f)) { return false; } - s2 = tcg_const_tl(a->zimm); - dst = tcg_temp_new(); + dst = dest_gpr(s, rd); - /* Using x0 as the rs1 register specifier, encodes an infinite AVL */ - if (a->rs1 == 0) { - /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ - s1 = tcg_const_tl(RV_VLEN_MAX); - } else { - s1 = tcg_temp_new(); - gen_get_gpr(s1, a->rs1); - } gen_helper_vsetvl(dst, cpu_env, s1, s2); - gen_set_gpr(a->rd, dst); - gen_goto_tb(ctx, 0, ctx->pc_succ_insn); - ctx->base.is_jmp = DISAS_NORETURN; + gen_set_gpr(s, rd, dst); + mark_vs_dirty(s); + gen_set_pc_imm(s, s->pc_succ_insn); + tcg_gen_lookup_and_goto_ptr(); + s->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(s1); - tcg_temp_free(s2); - tcg_temp_free(dst); return true; } +static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a) +{ + TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO); + return do_vsetvl(s, a->rd, a->rs1, s2); +} + +static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a) +{ + TCGv s2 = tcg_constant_tl(a->zimm); + return do_vsetvl(s, a->rd, a->rs1, s2); +} + +static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) +{ + TCGv s1 = tcg_const_tl(a->rs1); + TCGv s2 = tcg_const_tl(a->zimm); + return do_vsetivli(s, a->rd, s1, s2); +} + /* vector register offset from env */ static uint32_t vreg_ofs(DisasContext *s, int reg) { - return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8; + return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8; } /* check functions */ +/* + * Vector unit-stride, strided, unit-stride segment, strided segment + * store check function. + * + * Rules to be checked here: + * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3) + * 2. Destination vector register number is multiples of EMUL. + * (Section 3.4.2, 7.3) + * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8) + * 4. Vector register numbers accessed by the segment load or store + * cannot increment past 31. (Section 7.8) + */ +static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew) +{ + int8_t emul = eew - s->sew + s->lmul; + return (emul >= -3 && emul <= 3) && + require_align(vd, emul) && + require_nf(vd, nf, emul); +} + +/* + * Vector unit-stride, strided, unit-stride segment, strided segment + * load check function. + * + * Rules to be checked here: + * 1. All rules applies to store instructions are applies + * to load instructions. + * 2. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + */ +static bool vext_check_load(DisasContext *s, int vd, int nf, int vm, + uint8_t eew) +{ + return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd); +} + +/* + * Vector indexed, indexed segment store check function. + * + * Rules to be checked here: + * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3) + * 2. Index vector register number is multiples of EMUL. + * (Section 3.4.2, 7.3) + * 3. Destination vector register number is multiples of LMUL. + * (Section 3.4.2, 7.3) + * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8) + * 5. Vector register numbers accessed by the segment load or store + * cannot increment past 31. (Section 7.8) + */ +static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, + uint8_t eew) +{ + int8_t emul = eew - s->sew + s->lmul; + bool ret = (emul >= -3 && emul <= 3) && + require_align(vs2, emul) && + require_align(vd, s->lmul) && + require_nf(vd, nf, s->lmul); + + /* + * All Zve* extensions support all vector load and store instructions, + * except Zve64* extensions do not support EEW=64 for index values + * when XLEN=32. (Section 18.2) + */ + if (get_xl(s) == MXL_RV32) { + ret &= (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); + } + + return ret; +} + +/* + * Vector indexed, indexed segment load check function. + * + * Rules to be checked here: + * 1. All rules applies to store instructions are applies + * to load instructions. + * 2. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + * 3. Destination vector register cannot overlap a source vector + * register (vs2) group. + * (Section 5.2) + * 4. Destination vector register groups cannot overlap + * the source vector register (vs2) group for + * indexed segment load instructions. (Section 7.8.3) + */ +static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, + int nf, int vm, uint8_t eew) +{ + int8_t seg_vd; + int8_t emul = eew - s->sew + s->lmul; + bool ret = vext_check_st_index(s, vd, vs2, nf, eew) && + require_vm(vm, vd); + + /* Each segment register group has to follow overlap rules. */ + for (int i = 0; i < nf; ++i) { + seg_vd = vd + (1 << MAX(s->lmul, 0)) * i; + + if (eew > s->sew) { + if (seg_vd != vs2) { + ret &= require_noover(seg_vd, s->lmul, vs2, emul); + } + } else if (eew < s->sew) { + ret &= require_noover(seg_vd, s->lmul, vs2, emul); + } + + /* + * Destination vector register groups cannot overlap + * the source vector register (vs2) group for + * indexed segment load instructions. + */ + if (nf > 1) { + ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0), + vs2, 1 << MAX(emul, 0)); + } + } + return ret; +} + +static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) +{ + return require_vm(vm, vd) && + require_align(vd, s->lmul) && + require_align(vs, s->lmul); +} + +/* + * Check function for vector instruction with format: + * single-width result and single-width sources (SEW = SEW op SEW) + * + * Rules to be checked here: + * 1. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + * 2. Destination vector register number is multiples of LMUL. + * (Section 3.4.2) + * 3. Source (vs2, vs1) vector register number are multiples of LMUL. + * (Section 3.4.2) + */ +static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) +{ + return vext_check_ss(s, vd, vs2, vm) && + require_align(vs1, s->lmul); +} + +static bool vext_check_ms(DisasContext *s, int vd, int vs) +{ + bool ret = require_align(vs, s->lmul); + if (vd != vs) { + ret &= require_noover(vd, 0, vs, s->lmul); + } + return ret; +} + +/* + * Check function for maskable vector instruction with format: + * single-width result and single-width sources (SEW = SEW op SEW) + * + * Rules to be checked here: + * 1. Source (vs2, vs1) vector register number are multiples of LMUL. + * (Section 3.4.2) + * 2. Destination vector register cannot overlap a source vector + * register (vs2, vs1) group. + * (Section 5.2) + * 3. The destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0), + * unless the destination vector register is being written + * with a mask value (e.g., comparisons) or the scalar result + * of a reduction. (Section 5.3) + */ +static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2) +{ + bool ret = vext_check_ms(s, vd, vs2) && + require_align(vs1, s->lmul); + if (vd != vs1) { + ret &= require_noover(vd, 0, vs1, s->lmul); + } + return ret; +} + +/* + * Common check function for vector widening instructions + * of double-width result (2*SEW). + * + * Rules to be checked here: + * 1. The largest vector register group used by an instruction + * can not be greater than 8 vector registers (Section 5.2): + * => LMUL < 8. + * => SEW < 64. + * 2. Double-width SEW cannot greater than ELEN. + * 3. Destination vector register number is multiples of 2 * LMUL. + * (Section 3.4.2) + * 4. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + */ +static bool vext_wide_check_common(DisasContext *s, int vd, int vm) +{ + return (s->lmul <= 2) && + (s->sew < MO_64) && + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && + require_align(vd, s->lmul + 1) && + require_vm(vm, vd); +} + +/* + * Common check function for vector narrowing instructions + * of single-width result (SEW) and double-width source (2*SEW). + * + * Rules to be checked here: + * 1. The largest vector register group used by an instruction + * can not be greater than 8 vector registers (Section 5.2): + * => LMUL < 8. + * => SEW < 64. + * 2. Double-width SEW cannot greater than ELEN. + * 3. Source vector register number is multiples of 2 * LMUL. + * (Section 3.4.2) + * 4. Destination vector register number is multiples of LMUL. + * (Section 3.4.2) + * 5. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + */ +static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, + int vm) +{ + return (s->lmul <= 2) && + (s->sew < MO_64) && + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && + require_align(vs2, s->lmul + 1) && + require_align(vd, s->lmul) && + require_vm(vm, vd); +} + +static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) +{ + return vext_wide_check_common(s, vd, vm) && + require_align(vs, s->lmul) && + require_noover(vd, s->lmul + 1, vs, s->lmul); +} + +static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) +{ + return vext_wide_check_common(s, vd, vm) && + require_align(vs, s->lmul + 1); +} + +/* + * Check function for vector instruction with format: + * double-width result and single-width sources (2*SEW = SEW op SEW) + * + * Rules to be checked here: + * 1. All rules in defined in widen common rules are applied. + * 2. Source (vs2, vs1) vector register number are multiples of LMUL. + * (Section 3.4.2) + * 3. Destination vector register cannot overlap a source vector + * register (vs2, vs1) group. + * (Section 5.2) + */ +static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) +{ + return vext_check_ds(s, vd, vs2, vm) && + require_align(vs1, s->lmul) && + require_noover(vd, s->lmul + 1, vs1, s->lmul); +} + +/* + * Check function for vector instruction with format: + * double-width result and double-width source1 and single-width + * source2 (2*SEW = 2*SEW op SEW) + * + * Rules to be checked here: + * 1. All rules in defined in widen common rules are applied. + * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL. + * (Section 3.4.2) + * 3. Source 2 (vs1) vector register number is multiples of LMUL. + * (Section 3.4.2) + * 4. Destination vector register cannot overlap a source vector + * register (vs1) group. + * (Section 5.2) + */ +static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) +{ + return vext_check_ds(s, vd, vs1, vm) && + require_align(vs2, s->lmul + 1); +} + +static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) +{ + bool ret = vext_narrow_check_common(s, vd, vs, vm); + if (vd != vs) { + ret &= require_noover(vd, s->lmul, vs, s->lmul + 1); + } + return ret; +} + +/* + * Check function for vector instruction with format: + * single-width result and double-width source 1 and single-width + * source 2 (SEW = 2*SEW op SEW) + * + * Rules to be checked here: + * 1. All rules in defined in narrow common rules are applied. + * 2. Destination vector register cannot overlap a source vector + * register (vs2) group. + * (Section 5.2) + * 3. Source 2 (vs1) vector register number is multiples of LMUL. + * (Section 3.4.2) + */ +static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) +{ + return vext_check_sd(s, vd, vs2, vm) && + require_align(vs1, s->lmul); +} + +/* + * Check function for vector reduction instructions. + * + * Rules to be checked here: + * 1. Source 1 (vs2) vector register number is multiples of LMUL. + * (Section 3.4.2) + */ +static bool vext_check_reduction(DisasContext *s, int vs2) +{ + return require_align(vs2, s->lmul) && (s->vstart == 0); +} + +/* + * Check function for vector slide instructions. + * + * Rules to be checked here: + * 1. Source 1 (vs2) vector register number is multiples of LMUL. + * (Section 3.4.2) + * 2. Destination vector register number is multiples of LMUL. + * (Section 3.4.2) + * 3. Destination vector register group for a masked vector + * instruction cannot overlap the source mask register (v0). + * (Section 5.3) + * 4. The destination vector register group for vslideup, vslide1up, + * vfslide1up, cannot overlap the source vector register (vs2) group. + * (Section 5.2, 16.3.1, 16.3.3) + */ +static bool vext_check_slide(DisasContext *s, int vd, int vs2, + int vm, bool is_over) +{ + bool ret = require_align(vs2, s->lmul) && + require_align(vd, s->lmul) && + require_vm(vm, vd); + if (is_over) { + ret &= (vd != vs2); + } + return ret; +} + /* * In cpu_get_tb_cpu_state(), set VILL if RVV was not present. * So RVV is also be checked in this function. @@ -98,62 +620,20 @@ static bool vext_check_isa_ill(DisasContext *s) return !s->vill; } -/* - * There are two rules check here. - * - * 1. Vector register numbers are multiples of LMUL. (Section 3.2) - * - * 2. For all widening instructions, the destination LMUL value must also be - * a supported LMUL value. (Section 11.2) - */ -static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen) -{ - /* - * The destination vector register group results are arranged as if both - * SEW and LMUL were at twice their current settings. (Section 11.2). - */ - int legal = widen ? 2 << s->lmul : 1 << s->lmul; - - return !((s->lmul == 0x3 && widen) || (reg % legal)); -} - -/* - * There are two rules check here. - * - * 1. The destination vector register group for a masked vector instruction can - * only overlap the source mask register (v0) when LMUL=1. (Section 5.3) - * - * 2. In widen instructions and some other insturctions, like vslideup.vx, - * there is no need to check whether LMUL=1. - */ -static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm, - bool force) -{ - return (vm != 0 || vd != 0) || (!force && (s->lmul == 0)); -} - -/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */ -static bool vext_check_nf(DisasContext *s, uint32_t nf) -{ - return (1 << s->lmul) * nf <= 8; -} - -/* - * The destination vector register group cannot overlap a source vector register - * group of a different element width. (Section 11.2) - */ -static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen) -{ - return ((rd >= rs + slen) || (rs >= rd + dlen)); -} /* common translation macro */ -#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \ -static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\ -{ \ - if (CHECK(s, a)) { \ - return OP(s, a, SEQ); \ - } \ - return false; \ +#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \ +{ \ + if (CHECK(s, a, EEW)) { \ + return OP(s, a, EEW); \ + } \ + return false; \ +} + +static uint8_t vext_get_emul(DisasContext *s, uint8_t eew) +{ + int8_t emul = eew - s->sew + s->lmul; + return emul < 0 ? 0 : emul; } /* @@ -163,7 +643,8 @@ typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, - gen_helper_ldst_us *fn, DisasContext *s) + gen_helper_ldst_us *fn, DisasContext *s, + bool is_store) { TCGv_ptr dest, mask; TCGv base; @@ -171,21 +652,22 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); - base = tcg_temp_new(); + base = get_gpr(s, rs1, EXT_NONE); /* - * As simd_desc supports at most 256 bytes, and in this implementation, - * the max vector group length is 2048 bytes. So split it into two parts. + * As simd_desc supports at most 2048 bytes, and in this implementation, + * the max vector group length is 4096 bytes. So split it into two parts. * * The first part is vlen in bytes, encoded in maxsz of simd_desc. * The second part is lmul, encoded in data of simd_desc. */ - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); - gen_get_gpr(base, rs1); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -193,125 +675,138 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); - tcg_temp_free(base); - tcg_temp_free_i32(desc); + + if (!is_store) { + mark_vs_dirty(s); + } + gen_set_label(over); return true; } -static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_us *fn; - static gen_helper_ldst_us * const fns[2][7][4] = { + static gen_helper_ldst_us * const fns[2][4] = { /* masked unit stride load */ - { { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask, - gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask }, - { NULL, gen_helper_vlh_v_h_mask, - gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask }, - { NULL, NULL, - gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask }, - { gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask, - gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask }, - { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask, - gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask }, - { NULL, gen_helper_vlhu_v_h_mask, - gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask }, - { NULL, NULL, - gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } }, + { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask, + gen_helper_vle32_v_mask, gen_helper_vle64_v_mask }, /* unmasked unit stride load */ - { { gen_helper_vlb_v_b, gen_helper_vlb_v_h, - gen_helper_vlb_v_w, gen_helper_vlb_v_d }, - { NULL, gen_helper_vlh_v_h, - gen_helper_vlh_v_w, gen_helper_vlh_v_d }, - { NULL, NULL, - gen_helper_vlw_v_w, gen_helper_vlw_v_d }, - { gen_helper_vle_v_b, gen_helper_vle_v_h, - gen_helper_vle_v_w, gen_helper_vle_v_d }, - { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h, - gen_helper_vlbu_v_w, gen_helper_vlbu_v_d }, - { NULL, gen_helper_vlhu_v_h, - gen_helper_vlhu_v_w, gen_helper_vlhu_v_d }, - { NULL, NULL, - gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } } + { gen_helper_vle8_v, gen_helper_vle16_v, + gen_helper_vle32_v, gen_helper_vle64_v } }; - fn = fns[a->vm][seq][s->sew]; + fn = fns[a->vm][eew]; if (fn == NULL) { return false; } - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + /* + * Vector load/store instructions have the EEW encoded + * directly in the instructions. The maximum vector size is + * calculated with EMUL rather than LMUL. + */ + uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_us_trans(a->rd, a->rs1, data, fn, s); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + return ldst_us_trans(a->rd, a->rs1, data, fn, s, false); } -static bool ld_us_check(DisasContext *s, arg_r2nfvm* a) +static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_nf(s, a->nf)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_load(s, a->rd, a->nf, a->vm, eew); } -GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check) -GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check) -static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_us *fn; - static gen_helper_ldst_us * const fns[2][4][4] = { - /* masked unit stride load and store */ - { { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask, - gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask }, - { NULL, gen_helper_vsh_v_h_mask, - gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask }, - { NULL, NULL, - gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask }, - { gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask, - gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } }, + static gen_helper_ldst_us * const fns[2][4] = { + /* masked unit stride store */ + { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask, + gen_helper_vse32_v_mask, gen_helper_vse64_v_mask }, /* unmasked unit stride store */ - { { gen_helper_vsb_v_b, gen_helper_vsb_v_h, - gen_helper_vsb_v_w, gen_helper_vsb_v_d }, - { NULL, gen_helper_vsh_v_h, - gen_helper_vsh_v_w, gen_helper_vsh_v_d }, - { NULL, NULL, - gen_helper_vsw_v_w, gen_helper_vsw_v_d }, - { gen_helper_vse_v_b, gen_helper_vse_v_h, - gen_helper_vse_v_w, gen_helper_vse_v_d } } + { gen_helper_vse8_v, gen_helper_vse16_v, + gen_helper_vse32_v, gen_helper_vse64_v } }; - fn = fns[a->vm][seq][s->sew]; + fn = fns[a->vm][eew]; if (fn == NULL) { return false; } - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_us_trans(a->rd, a->rs1, data, fn, s); + return ldst_us_trans(a->rd, a->rs1, data, fn, s, true); } -static bool st_us_check(DisasContext *s, arg_r2nfvm* a) +static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_nf(s, a->nf)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_store(s, a->rd, a->nf, eew); } -GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check) -GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check) -GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check) -GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check) + +/* + *** unit stride mask load and store + */ +static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew) +{ + uint32_t data = 0; + gen_helper_ldst_us *fn = gen_helper_vlm_v; + + /* EMUL = 1, NFIELDS = 1 */ + data = FIELD_DP32(data, VDATA, LMUL, 0); + data = FIELD_DP32(data, VDATA, NF, 1); + /* Mask destination register are always tail-agnostic */ + data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + return ldst_us_trans(a->rd, a->rs1, data, fn, s, false); +} + +static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew) +{ + /* EMUL = 1, NFIELDS = 1 */ + return require_rvv(s) && vext_check_isa_ill(s); +} + +static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew) +{ + uint32_t data = 0; + gen_helper_ldst_us *fn = gen_helper_vsm_v; + + /* EMUL = 1, NFIELDS = 1 */ + data = FIELD_DP32(data, VDATA, LMUL, 0); + data = FIELD_DP32(data, VDATA, NF, 1); + return ldst_us_trans(a->rd, a->rs1, data, fn, s, true); +} + +static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew) +{ + /* EMUL = 1, NFIELDS = 1 */ + return require_rvv(s) && vext_check_isa_ill(s); +} + +GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check) +GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check) /* *** stride load and store @@ -321,7 +816,7 @@ typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, uint32_t data, gen_helper_ldst_stride *fn, - DisasContext *s) + DisasContext *s, bool is_store) { TCGv_ptr dest, mask; TCGv base, stride; @@ -329,15 +824,15 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); - base = tcg_temp_new(); - stride = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + base = get_gpr(s, rs1, EXT_NONE); + stride = get_gpr(s, rs2, EXT_NONE); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); - gen_get_gpr(base, rs1); - gen_get_gpr(stride, rs2); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -345,101 +840,83 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); - tcg_temp_free(base); - tcg_temp_free(stride); - tcg_temp_free_i32(desc); + + if (!is_store) { + mark_vs_dirty(s); + } + gen_set_label(over); return true; } -static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[7][4] = { - { gen_helper_vlsb_v_b, gen_helper_vlsb_v_h, - gen_helper_vlsb_v_w, gen_helper_vlsb_v_d }, - { NULL, gen_helper_vlsh_v_h, - gen_helper_vlsh_v_w, gen_helper_vlsh_v_d }, - { NULL, NULL, - gen_helper_vlsw_v_w, gen_helper_vlsw_v_d }, - { gen_helper_vlse_v_b, gen_helper_vlse_v_h, - gen_helper_vlse_v_w, gen_helper_vlse_v_d }, - { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h, - gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d }, - { NULL, gen_helper_vlshu_v_h, - gen_helper_vlshu_v_w, gen_helper_vlshu_v_d }, - { NULL, NULL, - gen_helper_vlswu_v_w, gen_helper_vlswu_v_d }, + static gen_helper_ldst_stride * const fns[4] = { + gen_helper_vlse8_v, gen_helper_vlse16_v, + gen_helper_vlse32_v, gen_helper_vlse64_v }; - fn = fns[seq][s->sew]; + fn = fns[eew]; if (fn == NULL) { return false; } - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); } -static bool ld_stride_check(DisasContext *s, arg_rnfvm* a) +static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_nf(s, a->nf)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_load(s, a->rd, a->nf, a->vm, eew); } -GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check) -GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check) -static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[4][4] = { + static gen_helper_ldst_stride * const fns[4] = { /* masked stride store */ - { gen_helper_vssb_v_b, gen_helper_vssb_v_h, - gen_helper_vssb_v_w, gen_helper_vssb_v_d }, - { NULL, gen_helper_vssh_v_h, - gen_helper_vssh_v_w, gen_helper_vssh_v_d }, - { NULL, NULL, - gen_helper_vssw_v_w, gen_helper_vssw_v_d }, - { gen_helper_vsse_v_b, gen_helper_vsse_v_h, - gen_helper_vsse_v_w, gen_helper_vsse_v_d } + gen_helper_vsse8_v, gen_helper_vsse16_v, + gen_helper_vsse32_v, gen_helper_vsse64_v }; - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - fn = fns[seq][s->sew]; + fn = fns[eew]; if (fn == NULL) { return false; } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); } -static bool st_stride_check(DisasContext *s, arg_rnfvm* a) +static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_nf(s, a->nf)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_store(s, a->rd, a->nf, eew); } -GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check) -GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check) -GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check) -GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check) /* *** index load and store @@ -449,7 +926,7 @@ typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t data, gen_helper_ldst_index *fn, - DisasContext *s) + DisasContext *s, bool is_store) { TCGv_ptr dest, mask, index; TCGv base; @@ -457,14 +934,15 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); index = tcg_temp_new_ptr(); - base = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + base = get_gpr(s, rs1, EXT_NONE); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); - gen_get_gpr(base, rs1); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -474,109 +952,120 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(index); - tcg_temp_free(base); - tcg_temp_free_i32(desc); + + if (!is_store) { + mark_vs_dirty(s); + } + gen_set_label(over); return true; } -static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) -{ - uint32_t data = 0; - gen_helper_ldst_index *fn; - static gen_helper_ldst_index * const fns[7][4] = { - { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h, - gen_helper_vlxb_v_w, gen_helper_vlxb_v_d }, - { NULL, gen_helper_vlxh_v_h, - gen_helper_vlxh_v_w, gen_helper_vlxh_v_d }, - { NULL, NULL, - gen_helper_vlxw_v_w, gen_helper_vlxw_v_d }, - { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h, - gen_helper_vlxe_v_w, gen_helper_vlxe_v_d }, - { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h, - gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d }, - { NULL, gen_helper_vlxhu_v_h, - gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d }, - { NULL, NULL, - gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d }, - }; - - fn = fns[seq][s->sew]; - if (fn == NULL) { - return false; - } - - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); - data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); -} - -/* - * For vector indexed segment loads, the destination vector register - * groups cannot overlap the source vector register group (specified by - * `vs2`), else an illegal instruction exception is raised. - */ -static bool ld_index_check(DisasContext *s, arg_rnfvm* a) -{ - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_nf(s, a->nf) && - ((a->nf == 1) || - vext_check_overlap_group(a->rd, a->nf << s->lmul, - a->rs2, 1 << s->lmul))); -} - -GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check) -GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check) - -static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_index *fn; static gen_helper_ldst_index * const fns[4][4] = { - { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h, - gen_helper_vsxb_v_w, gen_helper_vsxb_v_d }, - { NULL, gen_helper_vsxh_v_h, - gen_helper_vsxh_v_w, gen_helper_vsxh_v_d }, - { NULL, NULL, - gen_helper_vsxw_v_w, gen_helper_vsxw_v_d }, - { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h, - gen_helper_vsxe_v_w, gen_helper_vsxe_v_d } + /* + * offset vector register group EEW = 8, + * data vector register group EEW = SEW + */ + { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v, + gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v }, + /* + * offset vector register group EEW = 16, + * data vector register group EEW = SEW + */ + { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v, + gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v }, + /* + * offset vector register group EEW = 32, + * data vector register group EEW = SEW + */ + { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v, + gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v }, + /* + * offset vector register group EEW = 64, + * data vector register group EEW = SEW + */ + { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v, + gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v } }; - fn = fns[seq][s->sew]; - if (fn == NULL) { - return false; - } + fn = fns[eew][s->sew]; - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + uint8_t emul = vext_get_emul(s, s->sew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); } -static bool st_index_check(DisasContext *s, arg_rnfvm* a) +static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_nf(s, a->nf)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew); } -GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check) -GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check) -GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check) -GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check) + +static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) +{ + uint32_t data = 0; + gen_helper_ldst_index *fn; + static gen_helper_ldst_index * const fns[4][4] = { + /* + * offset vector register group EEW = 8, + * data vector register group EEW = SEW + */ + { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v, + gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v }, + /* + * offset vector register group EEW = 16, + * data vector register group EEW = SEW + */ + { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v, + gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v }, + /* + * offset vector register group EEW = 32, + * data vector register group EEW = SEW + */ + { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v, + gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v }, + /* + * offset vector register group EEW = 64, + * data vector register group EEW = SEW + */ + { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v, + gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v } + }; + + fn = fns[eew][s->sew]; + + uint8_t emul = vext_get_emul(s, s->sew); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, emul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); +} + +static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_st_index(s, a->rd, a->rs2, a->nf, eew); +} + +GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check) /* *** unit stride fault-only-first load @@ -590,13 +1079,14 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); - base = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + base = get_gpr(s, rs1, EXT_NONE); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); - gen_get_gpr(base, rs1); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -604,205 +1094,137 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); - tcg_temp_free(base); - tcg_temp_free_i32(desc); + mark_vs_dirty(s); gen_set_label(over); return true; } -static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) { uint32_t data = 0; gen_helper_ldst_us *fn; - static gen_helper_ldst_us * const fns[7][4] = { - { gen_helper_vlbff_v_b, gen_helper_vlbff_v_h, - gen_helper_vlbff_v_w, gen_helper_vlbff_v_d }, - { NULL, gen_helper_vlhff_v_h, - gen_helper_vlhff_v_w, gen_helper_vlhff_v_d }, - { NULL, NULL, - gen_helper_vlwff_v_w, gen_helper_vlwff_v_d }, - { gen_helper_vleff_v_b, gen_helper_vleff_v_h, - gen_helper_vleff_v_w, gen_helper_vleff_v_d }, - { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h, - gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d }, - { NULL, gen_helper_vlhuff_v_h, - gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d }, - { NULL, NULL, - gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d } + static gen_helper_ldst_us * const fns[4] = { + gen_helper_vle8ff_v, gen_helper_vle16ff_v, + gen_helper_vle32ff_v, gen_helper_vle64ff_v }; - fn = fns[seq][s->sew]; + fn = fns[eew]; if (fn == NULL) { return false; } - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); return ldff_trans(a->rd, a->rs1, data, fn, s); } -GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check) -GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) /* - *** vector atomic operation + * load and store whole register instructions */ -typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr, - TCGv_env, TCGv_i32); +typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); -static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, - uint32_t data, gen_helper_amo *fn, DisasContext *s) +static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, + uint32_t width, gen_helper_ldst_whole *fn, + DisasContext *s, bool is_store) { - TCGv_ptr dest, mask, index; + uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over); + + TCGv_ptr dest; TCGv base; TCGv_i32 desc; - TCGLabel *over = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - + uint32_t data = FIELD_DP32(0, VDATA, NF, nf); dest = tcg_temp_new_ptr(); - mask = tcg_temp_new_ptr(); - index = tcg_temp_new_ptr(); - base = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); - gen_get_gpr(base, rs1); + base = get_gpr(s, rs1, EXT_NONE); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); - tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); - tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); - fn(dest, mask, base, index, cpu_env, desc); + fn(dest, base, cpu_env, desc); tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(index); - tcg_temp_free(base); - tcg_temp_free_i32(desc); + + if (!is_store) { + mark_vs_dirty(s); + } gen_set_label(over); + return true; } -static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) -{ - uint32_t data = 0; - gen_helper_amo *fn; - static gen_helper_amo *const fnsw[9] = { - /* no atomic operation */ - gen_helper_vamoswapw_v_w, - gen_helper_vamoaddw_v_w, - gen_helper_vamoxorw_v_w, - gen_helper_vamoandw_v_w, - gen_helper_vamoorw_v_w, - gen_helper_vamominw_v_w, - gen_helper_vamomaxw_v_w, - gen_helper_vamominuw_v_w, - gen_helper_vamomaxuw_v_w - }; - static gen_helper_amo *const fnsd[18] = { - gen_helper_vamoswapw_v_d, - gen_helper_vamoaddw_v_d, - gen_helper_vamoxorw_v_d, - gen_helper_vamoandw_v_d, - gen_helper_vamoorw_v_d, - gen_helper_vamominw_v_d, - gen_helper_vamomaxw_v_d, - gen_helper_vamominuw_v_d, - gen_helper_vamomaxuw_v_d, - gen_helper_vamoswapd_v_d, - gen_helper_vamoaddd_v_d, - gen_helper_vamoxord_v_d, - gen_helper_vamoandd_v_d, - gen_helper_vamoord_v_d, - gen_helper_vamomind_v_d, - gen_helper_vamomaxd_v_d, - gen_helper_vamominud_v_d, - gen_helper_vamomaxud_v_d - }; - - if (tb_cflags(s->base.tb) & CF_PARALLEL) { - gen_helper_exit_atomic(cpu_env); - s->base.is_jmp = DISAS_NORETURN; - return true; - } else { - if (s->sew == 3) { - if (!is_32bit(s)) { - fn = fnsd[seq]; - } else { - /* Check done in amo_check(). */ - g_assert_not_reached(); - } - } else { - assert(seq < ARRAY_SIZE(fnsw)); - fn = fnsw[seq]; - } - } - - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); - data = FIELD_DP32(data, VDATA, VM, a->vm); - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - data = FIELD_DP32(data, VDATA, WD, a->wd); - return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s); -} /* - * There are two rules check here. - * - * 1. SEW must be at least as wide as the AMO memory element size. - * - * 2. If SEW is greater than XLEN, an illegal instruction exception is raised. + * load and store whole register instructions ignore vtype and vl setting. + * Thus, we don't need to check vill bit. (Section 7.9) */ -static bool amo_check(DisasContext *s, arg_rwdvm* a) -{ - return (!s->vill && has_ext(s, RVA) && - (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - ((1 << s->sew) <= sizeof(target_ulong)) && - ((1 << s->sew) >= 4)); +#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \ +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ +{ \ + if (require_rvv(s) && \ + QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ + return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \ + gen_helper_##NAME, s, IS_STORE); \ + } \ + return false; \ } -static bool amo_check64(DisasContext *s, arg_rwdvm* a) -{ - return !is_32bit(s) && amo_check(s, a); -} +GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false) +GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false) +GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false) +GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false) +GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false) +GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false) +GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false) +GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false) +GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false) +GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false) +GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false) +GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false) +GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false) +GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false) +GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false) +GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false) -GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check64) -GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check64) +/* + * The vector whole register store instructions are encoded similar to + * unmasked unit-stride store of elements with EEW=8. + */ +GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true) +GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true) +GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true) +GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true) /* *** Vector Integer Arithmetic Instructions */ -#define MAXSZ(s) (s->vlen >> (3 - s->lmul)) + +/* + * MAXSZ returns the maximum vector size can be operated in bytes, + * which is used in GVEC IR when vl_eq_vlmax flag is set to true + * to accerlate vector operation. + */ +static inline uint32_t MAXSZ(DisasContext *s) +{ + int scale = s->lmul - 3; + return s->cfg_ptr->vlen >> -scale; +} static bool opivv_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); } typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, @@ -818,21 +1240,25 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, } tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - if (a->vm && s->vl_eq_vlmax) { + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1), MAXSZ(s), MAXSZ(s)); } else { uint32_t data = 0; - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, fn); + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); } + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -864,17 +1290,20 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - src1 = tcg_temp_new(); - gen_get_gpr(src1, rs1); + src1 = get_gpr(s, rs1, EXT_SIGN); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); @@ -885,18 +1314,16 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(src2); - tcg_temp_free(src1); - tcg_temp_free_i32(desc); + mark_vs_dirty(s); gen_set_label(over); return true; } static bool opivx_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ss(s, a->rd, a->rs2, a->vm); } typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64, @@ -910,17 +1337,15 @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, return false; } - if (a->vm && s->vl_eq_vlmax) { + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { TCGv_i64 src1 = tcg_temp_new_i64(); - TCGv tmp = tcg_temp_new(); - gen_get_gpr(tmp, a->rs1); - tcg_gen_ext_tl_i64(src1, tmp); + tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN)); gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); tcg_temp_free_i64(src1); - tcg_temp_free(tmp); + mark_vs_dirty(s); return true; } return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); @@ -999,8 +1424,32 @@ static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs, GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs) +typedef enum { + IMM_ZX, /* Zero-extended */ + IMM_SX, /* Sign-extended */ + IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */ + IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */ +} imm_mode_t; + +static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode) +{ + switch (imm_mode) { + case IMM_ZX: + return extract64(imm, 0, 5); + case IMM_SX: + return sextract64(imm, 0, 5); + case IMM_TRUNC_SEW: + return extract64(imm, 0, s->sew + 3); + case IMM_TRUNC_2SEW: + return extract64(imm, 0, s->sew + 4); + default: + g_assert_not_reached(); + } +} + static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, - gen_helper_opivx *fn, DisasContext *s, int zx) + gen_helper_opivx *fn, DisasContext *s, + imm_mode_t imm_mode) { TCGv_ptr dest, src2, mask; TCGv src1; @@ -1009,19 +1458,20 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - if (zx) { - src1 = tcg_const_tl(imm); - } else { - src1 = tcg_const_tl(sextract64(imm, 0, 5)); - } - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode)); + data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); @@ -1032,8 +1482,7 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(src2); - tcg_temp_free(src1); - tcg_temp_free_i32(desc); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -1043,28 +1492,23 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, static inline bool do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn, - gen_helper_opivx *fn, int zx) + gen_helper_opivx *fn, imm_mode_t imm_mode) { if (!opivx_check(s, a)) { return false; } - if (a->vm && s->vl_eq_vlmax) { - if (zx) { - gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), - extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s)); - } else { - gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), - sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s)); - } - } else { - return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx); + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), + extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s)); + mark_vs_dirty(s); + return true; } - return true; + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode); } /* OPIVI with GVEC IR */ -#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \ +#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ static gen_helper_opivx * const fns[4] = { \ @@ -1072,36 +1516,28 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ }; \ return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \ - fns[s->sew], ZX); \ + fns[s->sew], IMM_MODE); \ } -GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi) +GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi) static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_const_i64(c); + TCGv_i64 tmp = tcg_constant_i64(c); tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz); - tcg_temp_free_i64(tmp); } -GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi) +GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi) /* Vector Widening Integer Add/Subtract */ /* OPIVV with WIDEN */ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, - 1 << s->lmul) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, @@ -1112,15 +1548,19 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, uint32_t data = 0; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -1146,13 +1586,9 @@ GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check) /* OPIVX with WIDEN */ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm); } static bool do_opivx_widen(DisasContext *s, arg_rmrr *a, @@ -1183,14 +1619,9 @@ GEN_OPIVX_WIDEN_TRANS(vwsub_vx) /* WIDEN OPIVV with WIDEN */ static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, true) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); } static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, @@ -1200,14 +1631,18 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, uint32_t data = 0; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, fn); + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -1233,11 +1668,9 @@ GEN_OPIWV_WIDEN_TRANS(vwsub_wv) /* WIDEN OPIVX with WIDEN */ static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, true) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dd(s, a->rd, a->rs2, a->vm); } static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a, @@ -1278,15 +1711,21 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ }; \ TCGLabel *over = gen_new_label(); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = \ + FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -1295,15 +1734,14 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ /* * For vadc and vsbc, an illegal instruction exception is raised if the - * destination vector register is v0 and LMUL > 1. (Section 12.3) + * destination vector register is v0 and LMUL > 1. (Section 11.4) */ static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - ((a->rd != 0) || (s->lmul == 0))); + return require_rvv(s) && + vext_check_isa_ill(s) && + (a->rd != 0) && + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); } GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check) @@ -1315,11 +1753,9 @@ GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check) */ static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_mss(s, a->rd, a->rs1, a->rs2); } GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check) @@ -1327,10 +1763,10 @@ GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check) static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - ((a->rd != 0) || (s->lmul == 0))); + return require_rvv(s) && + vext_check_isa_ill(s) && + (a->rd != 0) && + vext_check_ss(s, a->rd, a->rs2, a->vm); } /* OPIVX without GVEC IR */ @@ -1353,16 +1789,16 @@ GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check) static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ms(s, a->rd, a->rs2); } GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check) GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check) /* OPIVI without GVEC IR */ -#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK) \ +#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ if (CHECK(s, a)) { \ @@ -1371,13 +1807,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ }; \ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ - fns[s->sew], s, ZX); \ + fns[s->sew], s, IMM_MODE); \ } \ return false; \ } -GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check) -GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check) +GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check) +GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check) /* Vector Bitwise Logical Instructions */ GEN_OPIVV_GVEC_TRANS(vand_vv, and) @@ -1386,9 +1822,9 @@ GEN_OPIVV_GVEC_TRANS(vxor_vv, xor) GEN_OPIVX_GVEC_TRANS(vand_vx, ands) GEN_OPIVX_GVEC_TRANS(vor_vx, ors) GEN_OPIVX_GVEC_TRANS(vxor_vx, xors) -GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi) -GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx, ori) -GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori) +GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi) +GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori) +GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori) /* Vector Single-Width Bit Shift Instructions */ GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv) @@ -1406,18 +1842,16 @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, return false; } - if (a->vm && s->vl_eq_vlmax) { + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { TCGv_i32 src1 = tcg_temp_new_i32(); - TCGv tmp = tcg_temp_new(); - gen_get_gpr(tmp, a->rs1); - tcg_gen_trunc_tl_i32(src1, tmp); + tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE)); tcg_gen_extract_i32(src1, src1, 0, s->sew + 3); gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); tcg_temp_free_i32(src1); - tcg_temp_free(tmp); + mark_vs_dirty(s); return true; } return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); @@ -1438,28 +1872,23 @@ GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls) GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs) GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars) -GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx, shli) -GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx, shri) -GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx, sari) +GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli) +GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri) +GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari) /* Vector Narrowing Integer Right Shift Instructions */ -static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a) +static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, true) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, - 2 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPIVV with NARROW */ -#define GEN_OPIVV_NARROW_TRANS(NAME) \ +#define GEN_OPIWV_NARROW_TRANS(NAME) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opivv_narrow_check(s, a)) { \ + if (opiwv_narrow_check(s, a)) { \ uint32_t data = 0; \ static gen_helper_gvec_4_ptr * const fns[3] = { \ gen_helper_##NAME##_b, \ @@ -1468,39 +1897,39 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ }; \ TCGLabel *over = gen_new_label(); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ return false; \ } -GEN_OPIVV_NARROW_TRANS(vnsra_vv) -GEN_OPIVV_NARROW_TRANS(vnsrl_vv) +GEN_OPIWV_NARROW_TRANS(vnsra_wv) +GEN_OPIWV_NARROW_TRANS(vnsrl_wv) -static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a) +static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, true) && - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, - 2 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_sd(s, a->rd, a->rs2, a->vm); } /* OPIVX with NARROW */ -#define GEN_OPIVX_NARROW_TRANS(NAME) \ +#define GEN_OPIWX_NARROW_TRANS(NAME) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opivx_narrow_check(s, a)) { \ + if (opiwx_narrow_check(s, a)) { \ static gen_helper_opivx * const fns[3] = { \ gen_helper_##NAME##_b, \ gen_helper_##NAME##_h, \ @@ -1511,27 +1940,27 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ return false; \ } -GEN_OPIVX_NARROW_TRANS(vnsra_vx) -GEN_OPIVX_NARROW_TRANS(vnsrl_vx) +GEN_OPIWX_NARROW_TRANS(vnsra_wx) +GEN_OPIWX_NARROW_TRANS(vnsrl_wx) -/* OPIVI with NARROW */ -#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX) \ +/* OPIWI with NARROW */ +#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opivx_narrow_check(s, a)) { \ + if (opiwx_narrow_check(s, a)) { \ static gen_helper_opivx * const fns[3] = { \ gen_helper_##OPIVX##_b, \ gen_helper_##OPIVX##_h, \ gen_helper_##OPIVX##_w, \ }; \ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ - fns[s->sew], s, ZX); \ + fns[s->sew], s, IMM_MODE); \ } \ return false; \ } -GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx) -GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx) +GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx) +GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx) /* Vector Integer Comparison Instructions */ /* @@ -1541,13 +1970,11 @@ GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx) */ static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) || - (s->lmul == 0))); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_mss(s, a->rd, a->rs1, a->rs2); } + GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check) GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check) GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check) @@ -1557,10 +1984,9 @@ GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check) static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) || - (s->lmul == 0))); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ms(s, a->rd, a->rs2); } GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check) @@ -1572,12 +1998,12 @@ GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check) GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check) GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check) -GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check) /* Vector Integer Min/Max Instructions */ GEN_OPIVV_GVEC_TRANS(vminu_vv, umin) @@ -1590,14 +2016,43 @@ GEN_OPIVX_TRANS(vmaxu_vx, opivx_check) GEN_OPIVX_TRANS(vmax_vx, opivx_check) /* Vector Single-Width Integer Multiply Instructions */ + +static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector integer instructions, + * except that the vmulh integer multiply variants + * that return the high word of the product + * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) + * are not included for EEW=64 in Zve64*. (Section 18.2) + */ + return opivv_check(s, a) && + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); +} + +static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector integer instructions, + * except that the vmulh integer multiply variants + * that return the high word of the product + * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) + * are not included for EEW=64 in Zve64*. (Section 18.2) + */ + return opivx_check(s, a) && + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); +} + GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) -GEN_OPIVV_TRANS(vmulh_vv, opivv_check) -GEN_OPIVV_TRANS(vmulhu_vv, opivv_check) -GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check) +GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check) +GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check) +GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check) GEN_OPIVX_GVEC_TRANS(vmul_vx, muls) -GEN_OPIVX_TRANS(vmulh_vx, opivx_check) -GEN_OPIVX_TRANS(vmulhu_vx, opivx_check) -GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check) +GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check) +GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check) +GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check) /* Vector Integer Divide Instructions */ GEN_OPIVV_TRANS(vdivu_vv, opivv_check) @@ -1639,27 +2094,32 @@ GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx) /* Vector Integer Merge and Move Instructions */ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs1, false)) { - - if (s->vl_eq_vlmax) { + if (require_rvv(s) && + vext_check_isa_ill(s) && + /* vmv.v.v has rs2 = 0 and vm = 1 */ + vext_check_sss(s, a->rd, a->rs1, 0, 1)) { + if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), MAXSZ(s), MAXSZ(s)); } else { uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); static gen_helper_gvec_2_ptr * const fns[4] = { gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h, gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d, }; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - cpu_env, 0, s->vlen / 8, data, fns[s->sew]); + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, + fns[s->sew]); gen_set_label(over); } + mark_vs_dirty(s); return true; } return false; @@ -1668,40 +2128,50 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32); static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false)) { - + if (require_rvv(s) && + vext_check_isa_ill(s) && + /* vmv.v.x has rs2 = 0 and vm = 1 */ + vext_check_ss(s, a->rd, 0, 1)) { TCGv s1; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - s1 = tcg_temp_new(); - gen_get_gpr(s1, a->rs1); + s1 = get_gpr(s, a->rs1, EXT_SIGN); - if (s->vl_eq_vlmax) { - tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), - MAXSZ(s), MAXSZ(s), s1); + if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { + if (get_xl(s) == MXL_RV32 && s->sew == MO_64) { + TCGv_i64 s1_i64 = tcg_temp_new_i64(); + tcg_gen_ext_tl_i64(s1_i64, s1); + tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), s1_i64); + tcg_temp_free_i64(s1_i64); + } else { + tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), s1); + } } else { - TCGv_i32 desc ; + TCGv_i32 desc; TCGv_i64 s1_i64 = tcg_temp_new_i64(); TCGv_ptr dest = tcg_temp_new_ptr(); uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); static gen_helper_vmv_vx * const fns[4] = { gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, }; tcg_gen_ext_tl_i64(s1_i64, s1); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, cpu_env, desc); tcg_temp_free_ptr(dest); - tcg_temp_free_i32(desc); tcg_temp_free_i64(s1_i64); } - tcg_temp_free(s1); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -1710,34 +2180,38 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false)) { - + if (require_rvv(s) && + vext_check_isa_ill(s) && + /* vmv.v.i has rs2 = 0 and vm = 1 */ + vext_check_ss(s, a->rd, 0, 1)) { int64_t simm = sextract64(a->rs1, 0, 5); - if (s->vl_eq_vlmax) { + if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), simm); + mark_vs_dirty(s); } else { TCGv_i32 desc; TCGv_i64 s1; TCGv_ptr dest; uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); static gen_helper_vmv_vx * const fns[4] = { gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, }; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - s1 = tcg_const_i64(simm); + s1 = tcg_constant_i64(simm); dest = tcg_temp_new_ptr(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, cpu_env, desc); tcg_temp_free_ptr(dest); - tcg_temp_free_i32(desc); - tcg_temp_free_i64(s1); + mark_vs_dirty(s); gen_set_label(over); } return true; @@ -1747,7 +2221,7 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check) GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check) -GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check) +GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check) /* *** Vector Fixed-Point Arithmetic Instructions @@ -1762,48 +2236,94 @@ GEN_OPIVX_TRANS(vsaddu_vx, opivx_check) GEN_OPIVX_TRANS(vsadd_vx, opivx_check) GEN_OPIVX_TRANS(vssubu_vx, opivx_check) GEN_OPIVX_TRANS(vssub_vx, opivx_check) -GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check) -GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check) +GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check) +GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check) /* Vector Single-Width Averaging Add and Subtract */ GEN_OPIVV_TRANS(vaadd_vv, opivv_check) +GEN_OPIVV_TRANS(vaaddu_vv, opivv_check) GEN_OPIVV_TRANS(vasub_vv, opivv_check) +GEN_OPIVV_TRANS(vasubu_vv, opivv_check) GEN_OPIVX_TRANS(vaadd_vx, opivx_check) +GEN_OPIVX_TRANS(vaaddu_vx, opivx_check) GEN_OPIVX_TRANS(vasub_vx, opivx_check) -GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check) +GEN_OPIVX_TRANS(vasubu_vx, opivx_check) /* Vector Single-Width Fractional Multiply with Rounding and Saturation */ -GEN_OPIVV_TRANS(vsmul_vv, opivv_check) -GEN_OPIVX_TRANS(vsmul_vx, opivx_check) -/* Vector Widening Saturating Scaled Multiply-Add */ -GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx) -GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx) -GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx) -GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx) +static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector fixed-point arithmetic + * instructions, except that vsmul.vv and vsmul.vx are not supported + * for EEW=64 in Zve64*. (Section 18.2) + */ + return opivv_check(s, a) && + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); +} + +static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector fixed-point arithmetic + * instructions, except that vsmul.vv and vsmul.vx are not supported + * for EEW=64 in Zve64*. (Section 18.2) + */ + return opivx_check(s, a) && + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); +} + +GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) +GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check) /* Vector Single-Width Scaling Shift Instructions */ GEN_OPIVV_TRANS(vssrl_vv, opivv_check) GEN_OPIVV_TRANS(vssra_vv, opivv_check) GEN_OPIVX_TRANS(vssrl_vx, opivx_check) GEN_OPIVX_TRANS(vssra_vx, opivx_check) -GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check) -GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check) +GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check) +GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check) /* Vector Narrowing Fixed-Point Clip Instructions */ -GEN_OPIVV_NARROW_TRANS(vnclipu_vv) -GEN_OPIVV_NARROW_TRANS(vnclip_vv) -GEN_OPIVX_NARROW_TRANS(vnclipu_vx) -GEN_OPIVX_NARROW_TRANS(vnclip_vx) -GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx) -GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx) +GEN_OPIWV_NARROW_TRANS(vnclipu_wv) +GEN_OPIWV_NARROW_TRANS(vnclip_wv) +GEN_OPIWX_NARROW_TRANS(vnclipu_wx) +GEN_OPIWX_NARROW_TRANS(vnclip_wx) +GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx) +GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx) /* *** Vector Float Point Arithmetic Instructions */ + +/* + * As RVF-only cpus always have values NaN-boxed to 64-bits, + * RVF and RVD can be treated equally. + * We don't have to deal with the cases of: SEW > FLEN. + * + * If SEW < FLEN, check whether input fp register is a valid + * NaN-boxed value, in which case the least-significant SEW bits + * of the f regsiter are used, else the canonical NaN value is used. + */ +static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in) +{ + switch (s->sew) { + case 1: + gen_check_nanbox_h(out, in); + break; + case 2: + gen_check_nanbox_s(out, in); + break; + case 3: + tcg_gen_mov_i64(out, in); + break; + default: + g_assert_not_reached(); + } +} + /* Vector Single-Width Floating-Point Add/Subtract Instructions */ /* @@ -1812,12 +2332,12 @@ GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx) */ static bool opfvv_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - (s->sew != 0)); + return require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } /* OPFVV without GVEC IR */ @@ -1832,17 +2352,23 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_d, \ }; \ TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ + gen_set_rm(s, RISCV_FRM_DYN); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = \ + FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -1859,40 +2385,49 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, { TCGv_ptr dest, src2, mask; TCGv_i32 desc; + TCGv_i64 t1; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); - fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc); + /* NaN-box f[rs1] */ + t1 = tcg_temp_new_i64(); + do_nanbox(s, t1, cpu_fpr[rs1]); + + fn(dest, mask, t1, src2, cpu_env, desc); tcg_temp_free_ptr(dest); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(src2); - tcg_temp_free_i32(desc); + tcg_temp_free_i64(t1); + mark_vs_dirty(s); gen_set_label(over); return true; } -static bool opfvf_check(DisasContext *s, arg_rmrr *a) -{ /* * If the current SEW does not correspond to a supported IEEE floating-point * type, an illegal instruction exception is raised */ - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - (s->sew != 0)); +static bool opfvf_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } /* OPFVF without GVEC IR */ @@ -1906,10 +2441,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_w, \ gen_helper_##NAME##_d, \ }; \ - gen_set_rm(s, 7); \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + gen_set_rm(s, RISCV_FRM_DYN); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, \ + s->cfg_vta_all_1s); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ fns[s->sew - 1], s); \ } \ @@ -1923,16 +2461,13 @@ GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check) /* Vector Widening Floating-Point Add/Subtract Instructions */ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, - 1 << s->lmul) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + require_scale_rvf(s) && + (s->sew != MO_8) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* OPFVV with WIDEN */ @@ -1945,17 +2480,21 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ + gen_set_rm(s, RISCV_FRM_DYN); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -1967,13 +2506,13 @@ GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check) static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + require_scale_rvf(s) && + (s->sew != MO_8) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* OPFVF with WIDEN */ @@ -1985,10 +2524,11 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ static gen_helper_opfvf *const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ - gen_set_rm(s, 7); \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + gen_set_rm(s, RISCV_FRM_DYN); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ fns[s->sew - 1], s); \ } \ @@ -2000,14 +2540,13 @@ GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, true) && - vext_check_reg(s, a->rs1, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + require_scale_rvf(s) && + (s->sew != MO_8) && + vext_check_isa_ill(s) && + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* WIDEN OPFVV with WIDEN */ @@ -2020,17 +2559,21 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ + gen_set_rm(s, RISCV_FRM_DYN); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -2042,11 +2585,13 @@ GEN_OPFWV_WIDEN_TRANS(vfwsub_wv) static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, true) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + require_scale_rvf(s) && + (s->sew != MO_8) && + vext_check_isa_ill(s) && + vext_check_dd(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* WIDEN OPFVF with WIDEN */ @@ -2058,10 +2603,11 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ static gen_helper_opfvf *const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ - gen_set_rm(s, 7); \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + gen_set_rm(s, RISCV_FRM_DYN); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ fns[s->sew - 1], s); \ } \ @@ -2118,41 +2664,60 @@ GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) */ static bool opfv_check(DisasContext *s, arg_rmr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - (s->sew != 0)); + return require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + /* OPFV instructions ignore vs1 check */ + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } -#define GEN_OPFV_TRANS(NAME, CHECK) \ -static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ -{ \ - if (CHECK(s, a)) { \ - uint32_t data = 0; \ - static gen_helper_gvec_3_ptr * const fns[3] = { \ - gen_helper_##NAME##_h, \ - gen_helper_##NAME##_w, \ - gen_helper_##NAME##_d, \ - }; \ - TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ - \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ - data = FIELD_DP32(data, VDATA, VM, a->vm); \ - data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ - tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ - vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ - fns[s->sew - 1]); \ - gen_set_label(over); \ - return true; \ - } \ - return false; \ +static bool do_opfv(DisasContext *s, arg_rmr *a, + gen_helper_gvec_3_ptr *fn, + bool (*checkfn)(DisasContext *, arg_rmr *), + int rm) +{ + if (checkfn(s, a)) { + if (rm != RISCV_FRM_DYN) { + gen_set_rm(s, RISCV_FRM_DYN); + } + + uint32_t data = 0; + TCGLabel *over = gen_new_label(); + gen_set_rm(s, rm); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); + + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs2), cpu_env, + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); + mark_vs_dirty(s); + gen_set_label(over); + return true; + } + return false; } -GEN_OPFV_TRANS(vfsqrt_v, opfv_check) +#define GEN_OPFV_TRANS(NAME, CHECK, FRM) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + gen_helper_##NAME##_d \ + }; \ + return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \ +} + +GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN) +GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN) +GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN) /* Vector Floating-Point MIN/MAX Instructions */ GEN_OPFVV_TRANS(vfmin_vv, opfvv_check) @@ -2171,28 +2736,27 @@ GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check) /* Vector Floating-Point Compare Instructions */ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - vext_check_reg(s, a->rs1, false) && - (s->sew != 0) && - ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) || - (s->lmul == 0))); + return require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + vext_check_mss(s, a->rd, a->rs1, a->rs2) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check) GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check) GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check) -GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check) static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rs2, false) && - (s->sew != 0) && - (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) || - (s->lmul == 0))); + return require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + vext_check_ms(s, a->rd, a->rs2) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2201,27 +2765,39 @@ GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check) GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check) GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check) GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check) -GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check) /* Vector Floating-Point Classify Instruction */ -GEN_OPFV_TRANS(vfclass_v, opfv_check) +GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN) /* Vector Floating-Point Merge Instruction */ GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check) static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - (s->sew != 0)) { + if (require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + require_align(a->rd, s->lmul) && + require_zve32f(s) && + require_zve64f(s)) { + gen_set_rm(s, RISCV_FRM_DYN); + + TCGv_i64 t1; + + if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { + t1 = tcg_temp_new_i64(); + /* NaN-box f[rs1] */ + do_nanbox(s, t1, cpu_fpr[a->rs1]); - if (s->vl_eq_vlmax) { tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), - MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]); + MAXSZ(s), MAXSZ(s), t1); + mark_vs_dirty(s); } else { TCGv_ptr dest; TCGv_i32 desc; uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); static gen_helper_vmv_vx * const fns[3] = { gen_helper_vmv_v_x_h, gen_helper_vmv_v_x_w, @@ -2229,26 +2805,48 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) }; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); + + t1 = tcg_temp_new_i64(); + /* NaN-box f[rs1] */ + do_nanbox(s, t1, cpu_fpr[a->rs1]); dest = tcg_temp_new_ptr(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); - fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc); + + fns[s->sew - 1](dest, t1, cpu_env, desc); tcg_temp_free_ptr(dest); - tcg_temp_free_i32(desc); + mark_vs_dirty(s); gen_set_label(over); } + tcg_temp_free_i64(t1); return true; } return false; } /* Single-Width Floating-Point/Integer Type-Convert Instructions */ -GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check) -GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check) -GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check) -GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check) +#define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_##HELPER##_h, \ + gen_helper_##HELPER##_w, \ + gen_helper_##HELPER##_d \ + }; \ + return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \ +} + +GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN) +GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN) +GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN) +GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN) +/* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */ +GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ) +GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ) /* Widening Floating-Point/Integer Type-Convert Instructions */ @@ -2258,46 +2856,118 @@ GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check) */ static bool opfv_widen_check(DisasContext *s, arg_rmr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, true) && - vext_check_reg(s, a->rs2, false) && - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, - 1 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm); } -#define GEN_OPFV_WIDEN_TRANS(NAME) \ +static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) +{ + return opfv_widen_check(s, a) && + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); +} + +static bool opffv_widen_check(DisasContext *s, arg_rmr *a) +{ + return opfv_widen_check(s, a) && + require_scale_rvf(s) && + (s->sew != MO_8) && + require_scale_zve32f(s) && + require_scale_zve64f(s); +} + +#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ { \ - if (opfv_widen_check(s, a)) { \ + if (CHECK(s, a)) { \ + if (FRM != RISCV_FRM_DYN) { \ + gen_set_rm(s, RISCV_FRM_DYN); \ + } \ + \ uint32_t data = 0; \ static gen_helper_gvec_3_ptr * const fns[2] = { \ - gen_helper_##NAME##_h, \ - gen_helper_##NAME##_w, \ + gen_helper_##HELPER##_h, \ + gen_helper_##HELPER##_w, \ }; \ TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ + gen_set_rm(s, FRM); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ return false; \ } -GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v) -GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v) -GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v) -GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v) -GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v) +GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v, + RISCV_FRM_DYN) +GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v, + RISCV_FRM_DYN) +GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v, + RISCV_FRM_DYN) +/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */ +GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v, + RISCV_FRM_RTZ) +GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v, + RISCV_FRM_RTZ) + +static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) +{ + return require_rvv(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + /* OPFV widening instructions ignore vs1 check */ + vext_check_ds(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); +} + +#define GEN_OPFXV_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (opfxv_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, RISCV_FRM_DYN); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ + \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs2), cpu_env, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ + fns[s->sew]); \ + mark_vs_dirty(s); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v) +GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v) /* Narrowing Floating-Point/Integer Type-Convert Instructions */ @@ -2307,46 +2977,125 @@ GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v) */ static bool opfv_narrow_check(DisasContext *s, arg_rmr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, false) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, true) && - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, - 2 << s->lmul) && - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); + return require_rvv(s) && + vext_check_isa_ill(s) && + /* OPFV narrowing instructions ignore vs1 check */ + vext_check_sd(s, a->rd, a->rs2, a->vm); } -#define GEN_OPFV_NARROW_TRANS(NAME) \ +static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) +{ + return opfv_narrow_check(s, a) && + require_rvf(s) && + (s->sew != MO_64) && + require_zve32f(s) && + require_zve64f(s); +} + +static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) +{ + return opfv_narrow_check(s, a) && + require_scale_rvf(s) && + (s->sew != MO_8) && + require_scale_zve32f(s) && + require_scale_zve64f(s); +} + +#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ { \ - if (opfv_narrow_check(s, a)) { \ + if (CHECK(s, a)) { \ + if (FRM != RISCV_FRM_DYN) { \ + gen_set_rm(s, RISCV_FRM_DYN); \ + } \ + \ uint32_t data = 0; \ static gen_helper_gvec_3_ptr * const fns[2] = { \ - gen_helper_##NAME##_h, \ - gen_helper_##NAME##_w, \ + gen_helper_##HELPER##_h, \ + gen_helper_##HELPER##_w, \ }; \ TCGLabel *over = gen_new_label(); \ - gen_set_rm(s, 7); \ + gen_set_rm(s, FRM); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ return false; \ } -GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v) -GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v) -GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v) -GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v) -GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v) +GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w, + RISCV_FRM_DYN) +GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w, + RISCV_FRM_DYN) +GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w, + RISCV_FRM_DYN) +/* Reuse the helper function from vfncvt.f.f.w */ +GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w, + RISCV_FRM_ROD) + +static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) +{ + return require_rvv(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + /* OPFV narrowing instructions ignore vs1 check */ + vext_check_sd(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); +} + +#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (opxfv_narrow_check(s, a)) { \ + if (FRM != RISCV_FRM_DYN) { \ + gen_set_rm(s, RISCV_FRM_DYN); \ + } \ + \ + uint32_t data = 0; \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_##HELPER##_b, \ + gen_helper_##HELPER##_h, \ + gen_helper_##HELPER##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, FRM); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ + \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs2), cpu_env, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ + fns[s->sew]); \ + mark_vs_dirty(s); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN) +GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN) +/* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */ +GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ) +GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ) /* *** Vector Reduction Operations @@ -2354,7 +3103,9 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v) /* Vector Single-Width Integer Reduction Instructions */ static bool reduction_check(DisasContext *s, arg_rmrr *a) { - return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_reduction(s, a->rs2); } GEN_OPIVV_TRANS(vredsum_vs, reduction_check) @@ -2367,16 +3118,39 @@ GEN_OPIVV_TRANS(vredor_vs, reduction_check) GEN_OPIVV_TRANS(vredxor_vs, reduction_check) /* Vector Widening Integer Reduction Instructions */ -GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check) -GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check) +static bool reduction_widen_check(DisasContext *s, arg_rmrr *a) +{ + return reduction_check(s, a) && (s->sew < MO_64) && + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)); +} + +GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) /* Vector Single-Width Floating-Point Reduction Instructions */ -GEN_OPFVV_TRANS(vfredsum_vs, reduction_check) -GEN_OPFVV_TRANS(vfredmax_vs, reduction_check) -GEN_OPFVV_TRANS(vfredmin_vs, reduction_check) +static bool freduction_check(DisasContext *s, arg_rmrr *a) +{ + return reduction_check(s, a) && + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); +} + +GEN_OPFVV_TRANS(vfredusum_vs, freduction_check) +GEN_OPFVV_TRANS(vfredosum_vs, freduction_check) +GEN_OPFVV_TRANS(vfredmax_vs, freduction_check) +GEN_OPFVV_TRANS(vfredmin_vs, freduction_check) /* Vector Widening Floating-Point Reduction Instructions */ -GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check) +static bool freduction_widen_check(DisasContext *s, arg_rmrr *a) +{ + return reduction_widen_check(s, a) && + require_scale_rvf(s) && + (s->sew != MO_8); +} + +GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check) /* *** Vector Mask Operations @@ -2386,18 +3160,23 @@ GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check) #define GEN_MM_TRANS(NAME) \ static bool trans_##NAME(DisasContext *s, arg_r *a) \ { \ - if (vext_check_isa_ill(s)) { \ + if (require_rvv(s) && \ + vext_check_isa_ill(s)) { \ uint32_t data = 0; \ gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \ TCGLabel *over = gen_new_label(); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = \ + FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, fn); \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, fn); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -2406,72 +3185,73 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \ GEN_MM_TRANS(vmand_mm) GEN_MM_TRANS(vmnand_mm) -GEN_MM_TRANS(vmandnot_mm) +GEN_MM_TRANS(vmandn_mm) GEN_MM_TRANS(vmxor_mm) GEN_MM_TRANS(vmor_mm) GEN_MM_TRANS(vmnor_mm) -GEN_MM_TRANS(vmornot_mm) +GEN_MM_TRANS(vmorn_mm) GEN_MM_TRANS(vmxnor_mm) -/* Vector mask population count vmpopc */ -static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a) +/* Vector count population in mask vcpop */ +static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) { - if (vext_check_isa_ill(s)) { + if (require_rvv(s) && + vext_check_isa_ill(s) && + s->vstart == 0) { TCGv_ptr src2, mask; TCGv dst; TCGv_i32 desc; uint32_t data = 0; - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - dst = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + dst = dest_gpr(s, a->rd); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); - gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc); - gen_set_gpr(a->rd, dst); + gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc); + gen_set_gpr(s, a->rd, dst); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(src2); - tcg_temp_free(dst); - tcg_temp_free_i32(desc); + return true; } return false; } /* vmfirst find-first-set mask bit */ -static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a) +static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) { - if (vext_check_isa_ill(s)) { + if (require_rvv(s) && + vext_check_isa_ill(s) && + s->vstart == 0) { TCGv_ptr src2, mask; TCGv dst; TCGv_i32 desc; uint32_t data = 0; - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - dst = tcg_temp_new(); - desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + dst = dest_gpr(s, a->rd); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); - gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc); - gen_set_gpr(a->rd, dst); + gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc); + gen_set_gpr(s, a->rd, dst); tcg_temp_free_ptr(mask); tcg_temp_free_ptr(src2); - tcg_temp_free(dst); - tcg_temp_free_i32(desc); return true; } return false; @@ -2483,19 +3263,27 @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a) #define GEN_M_TRANS(NAME) \ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ { \ - if (vext_check_isa_ill(s)) { \ + if (require_rvv(s) && \ + vext_check_isa_ill(s) && \ + require_vm(a->vm, a->rd) && \ + (a->rd != a->rs2) && \ + (s->vstart == 0)) { \ uint32_t data = 0; \ gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \ TCGLabel *over = gen_new_label(); \ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ \ - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = \ + FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ - cpu_env, s->vlen / 8, s->vlen / 8, \ + cpu_env, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, \ data, fn); \ + mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ } \ @@ -2506,27 +3294,38 @@ GEN_M_TRANS(vmsbf_m) GEN_M_TRANS(vmsif_m) GEN_M_TRANS(vmsof_m) -/* Vector Iota Instruction */ +/* + * Vector Iota Instruction + * + * 1. The destination register cannot overlap the source register. + * 2. If masked, cannot overlap the mask register ('v0'). + * 3. An illegal instruction exception is raised if vstart is non-zero. + */ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) && - (a->vm != 0 || a->rd != 0)) { + if (require_rvv(s) && + vext_check_isa_ill(s) && + !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) && + require_vm(a->vm, a->rd) && + require_align(a->rd, s->lmul) && + (s->vstart == 0)) { uint32_t data = 0; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); static gen_helper_gvec_3_ptr * const fns[4] = { gen_helper_viota_m_b, gen_helper_viota_m_h, gen_helper_viota_m_w, gen_helper_viota_m_d, }; tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), cpu_env, - s->vlen / 8, s->vlen / 8, data, fns[s->sew]); + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -2536,23 +3335,28 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) /* Vector Element Index Instruction */ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) { - if (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_overlap_mask(s, a->rd, a->vm, false)) { + if (require_rvv(s) && + vext_check_isa_ill(s) && + require_align(a->rd, s->lmul) && + require_vm(a->vm, a->rd)) { uint32_t data = 0; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); static gen_helper_gvec_2_ptr * const fns[4] = { gen_helper_vid_v_b, gen_helper_vid_v_h, gen_helper_vid_v_w, gen_helper_vid_v_d, }; tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), - cpu_env, s->vlen / 8, s->vlen / 8, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -2563,20 +3367,30 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) *** Vector Permutation Instructions */ -/* Integer Extract Instruction */ - static void load_element(TCGv_i64 dest, TCGv_ptr base, - int ofs, int sew) + int ofs, int sew, bool sign) { switch (sew) { case MO_8: - tcg_gen_ld8u_i64(dest, base, ofs); + if (!sign) { + tcg_gen_ld8u_i64(dest, base, ofs); + } else { + tcg_gen_ld8s_i64(dest, base, ofs); + } break; case MO_16: - tcg_gen_ld16u_i64(dest, base, ofs); + if (!sign) { + tcg_gen_ld16u_i64(dest, base, ofs); + } else { + tcg_gen_ld16s_i64(dest, base, ofs); + } break; case MO_32: - tcg_gen_ld32u_i64(dest, base, ofs); + if (!sign) { + tcg_gen_ld32u_i64(dest, base, ofs); + } else { + tcg_gen_ld32s_i64(dest, base, ofs); + } break; case MO_64: tcg_gen_ld_i64(dest, base, ofs); @@ -2590,7 +3404,7 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base, /* offset of the idx element with base regsiter r */ static uint32_t endian_ofs(DisasContext *s, int r, int idx) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew); #else return vreg_ofs(s, r) + (idx << s->sew); @@ -2600,7 +3414,7 @@ static uint32_t endian_ofs(DisasContext *s, int r, int idx) /* adjust the index according to the endian */ static void endian_adjust(TCGv_i32 ofs, int sew) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN tcg_gen_xori_i32(ofs, ofs, 7 >> sew); #endif } @@ -2631,48 +3445,25 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, /* Perform the load. */ load_element(dest, base, - vreg_ofs(s, vreg), s->sew); + vreg_ofs(s, vreg), s->sew, false); tcg_temp_free_ptr(base); tcg_temp_free_i32(ofs); /* Flush out-of-range indexing to zero. */ - t_vlmax = tcg_const_i64(vlmax); - t_zero = tcg_const_i64(0); + t_vlmax = tcg_constant_i64(vlmax); + t_zero = tcg_constant_i64(0); tcg_gen_extu_tl_i64(t_idx, idx); tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx, t_vlmax, dest, t_zero); - tcg_temp_free_i64(t_vlmax); - tcg_temp_free_i64(t_zero); tcg_temp_free_i64(t_idx); } static void vec_element_loadi(DisasContext *s, TCGv_i64 dest, - int vreg, int idx) + int vreg, int idx, bool sign) { - load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew); -} - -static bool trans_vext_x_v(DisasContext *s, arg_r *a) -{ - TCGv_i64 tmp = tcg_temp_new_i64(); - TCGv dest = tcg_temp_new(); - - if (a->rs1 == 0) { - /* Special case vmv.x.s rd, vs2. */ - vec_element_loadi(s, tmp, a->rs2, 0); - } else { - /* This instruction ignores LMUL and vector register groups */ - int vlmax = s->vlen >> (3 + s->sew); - vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax); - } - tcg_gen_trunc_i64_tl(dest, tmp); - gen_set_gpr(a->rd, dest); - - tcg_temp_free(dest); - tcg_temp_free_i64(tmp); - return true; + load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign); } /* Integer Scalar Move Instruction */ @@ -2709,26 +3500,55 @@ static void vec_element_storei(DisasContext *s, int vreg, store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew); } +/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */ +static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a) +{ + if (require_rvv(s) && + vext_check_isa_ill(s)) { + TCGv_i64 t1; + TCGv dest; + + t1 = tcg_temp_new_i64(); + dest = tcg_temp_new(); + /* + * load vreg and sign-extend to 64 bits, + * then truncate to XLEN bits before storing to gpr. + */ + vec_element_loadi(s, t1, a->rs2, 0, true); + tcg_gen_trunc_i64_tl(dest, t1); + gen_set_gpr(s, a->rd, dest); + tcg_temp_free_i64(t1); + tcg_temp_free(dest); + + return true; + } + return false; +} + /* vmv.s.x vd, rs1 # vd[0] = rs1 */ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) { - if (vext_check_isa_ill(s)) { + if (require_rvv(s) && + vext_check_isa_ill(s)) { /* This instruction ignores LMUL and vector register groups */ - int maxsz = s->vlen >> 3; TCGv_i64 t1; + TCGv s1; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0); - if (a->rs1 == 0) { - goto done; - } + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); t1 = tcg_temp_new_i64(); - tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]); + + /* + * load gpr and sign-extend to 64 bits, + * then truncate to SEW bits when storing to vreg. + */ + s1 = get_gpr(s, a->rs1, EXT_NONE); + tcg_gen_ext_tl_i64(t1, s1); vec_element_storei(s, a->rd, 0, t1); tcg_temp_free_i64(t1); - done: + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -2738,14 +3558,23 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) /* Floating-Point Scalar Move Instructions */ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { - if (!s->vill && has_ext(s, RVF) && - (s->mstatus_fs != 0) && (s->sew != 0)) { - unsigned int len = 8 << s->sew; + if (require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + require_zve32f(s) && + require_zve64f(s)) { + gen_set_rm(s, RISCV_FRM_DYN); - vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0); - if (len < 64) { - tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], - MAKE_64BIT_MASK(len, 64 - len)); + unsigned int ofs = (8 << s->sew); + unsigned int len = 64 - ofs; + TCGv_i64 t_nan; + + vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false); + /* NaN-box f[rd] as necessary for SEW */ + if (len) { + t_nan = tcg_constant_i64(UINT64_MAX); + tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], + t_nan, ofs, len); } mark_fs_dirty(s); @@ -2757,27 +3586,28 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { - if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) { - TCGv_i64 t1; + if (require_rvv(s) && + require_rvf(s) && + vext_check_isa_ill(s) && + require_zve32f(s) && + require_zve64f(s)) { + gen_set_rm(s, RISCV_FRM_DYN); + /* The instructions ignore LMUL and vector register group. */ - uint32_t vlmax = s->vlen >> 3; - - /* if vl == 0, skip vector register write back */ + TCGv_i64 t1; TCGLabel *over = gen_new_label(); + + /* if vl == 0 or vstart >= vl, skip vector register write back */ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - /* zeroed all elements */ - tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0); - - /* NaN-box f[rs1] as necessary for SEW */ + /* NaN-box f[rs1] */ t1 = tcg_temp_new_i64(); - if (s->sew == MO_64 && !has_ext(s, RVD)) { - tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32)); - } else { - tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]); - } + do_nanbox(s, t1, cpu_fpr[a->rs1]); + vec_element_storei(s, a->rd, 0, t1); tcg_temp_free_i64(t1); + mark_vs_dirty(s); gen_set_label(over); return true; } @@ -2787,41 +3617,86 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) /* Vector Slide Instructions */ static bool slideup_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - (a->rd != a->rs2)); + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_slide(s, a->rd, a->rs2, a->vm, true); } GEN_OPIVX_TRANS(vslideup_vx, slideup_check) GEN_OPIVX_TRANS(vslide1up_vx, slideup_check) -GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check) +GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check) -GEN_OPIVX_TRANS(vslidedown_vx, opivx_check) -GEN_OPIVX_TRANS(vslide1down_vx, opivx_check) -GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check) +static bool slidedown_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_slide(s, a->rd, a->rs2, a->vm, false); +} + +GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) +GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) +GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) + +/* Vector Floating-Point Slide Instructions */ +static bool fslideup_check(DisasContext *s, arg_rmrr *a) +{ + return slideup_check(s, a) && + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); +} + +static bool fslidedown_check(DisasContext *s, arg_rmrr *a) +{ + return slidedown_check(s, a) && + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); +} + +GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) +GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check) /* Vector Register Gather Instruction */ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs1, false) && - vext_check_reg(s, a->rs2, false) && - (a->rd != a->rs2) && (a->rd != a->rs1)); + return require_rvv(s) && + vext_check_isa_ill(s) && + require_align(a->rd, s->lmul) && + require_align(a->rs1, s->lmul) && + require_align(a->rs2, s->lmul) && + (a->rd != a->rs2 && a->rd != a->rs1) && + require_vm(a->vm, a->rd); +} + +static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a) +{ + int8_t emul = MO_16 - s->sew + s->lmul; + return require_rvv(s) && + vext_check_isa_ill(s) && + (emul >= -3 && emul <= 3) && + require_align(a->rd, s->lmul) && + require_align(a->rs1, emul) && + require_align(a->rs2, s->lmul) && + (a->rd != a->rs2 && a->rd != a->rs1) && + !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), + a->rs1, 1 << MAX(emul, 0)) && + !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), + a->rs2, 1 << MAX(s->lmul, 0)) && + require_vm(a->vm, a->rd); } GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check) +GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check) static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) { - return (vext_check_isa_ill(s) && - vext_check_overlap_mask(s, a->rd, a->vm, true) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - (a->rd != a->rs2)); + return require_rvv(s) && + vext_check_isa_ill(s) && + require_align(a->rd, s->lmul) && + require_align(a->rs2, s->lmul) && + (a->rd != a->rs2) && + require_vm(a->vm, a->rd); } /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ @@ -2831,12 +3706,13 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) return false; } - if (a->vm && s->vl_eq_vlmax) { - int vlmax = s->vlen / s->mlen; + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { + int scale = s->lmul - (s->sew + 3); + int vlmax = s->cfg_ptr->vlen >> -scale; TCGv_i64 dest = tcg_temp_new_i64(); if (a->rs1 == 0) { - vec_element_loadi(s, dest, a->rs2, 0); + vec_element_loadi(s, dest, a->rs2, 0, false); } else { vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax); } @@ -2844,6 +3720,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), dest); tcg_temp_free_i64(dest); + mark_vs_dirty(s); } else { static gen_helper_opivx * const fns[4] = { gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, @@ -2861,33 +3738,44 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) return false; } - if (a->vm && s->vl_eq_vlmax) { - if (a->rs1 >= s->vlen / s->mlen) { - tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), + if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { + int scale = s->lmul - (s->sew + 3); + int vlmax = s->cfg_ptr->vlen >> -scale; + if (a->rs1 >= vlmax) { + tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), 0); } else { tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd), endian_ofs(s, a->rs2, a->rs1), MAXSZ(s), MAXSZ(s)); } + mark_vs_dirty(s); } else { static gen_helper_opivx * const fns[4] = { gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d }; - return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1); + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], + s, IMM_ZX); } return true; } -/* Vector Compress Instruction */ +/* + * Vector Compress Instruction + * + * The destination vector register group cannot overlap the + * source vector register group or the source mask register. + */ static bool vcompress_vm_check(DisasContext *s, arg_r *a) { - return (vext_check_isa_ill(s) && - vext_check_reg(s, a->rd, false) && - vext_check_reg(s, a->rs2, false) && - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) && - (a->rd != a->rs2)); + return require_rvv(s) && + vext_check_isa_ill(s) && + require_align(a->rd, s->lmul) && + require_align(a->rs2, s->lmul) && + (a->rd != a->rs2) && + !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) && + (s->vstart == 0); } static bool trans_vcompress_vm(DisasContext *s, arg_r *a) @@ -2901,14 +3789,135 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a) TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); + mark_vs_dirty(s); gen_set_label(over); return true; } return false; } + +/* + * Whole Vector Register Move Instructions ignore vtype and vl setting. + * Thus, we don't need to check vill bit. (Section 16.6) + */ +#define GEN_VMV_WHOLE_TRANS(NAME, LEN) \ +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ +{ \ + if (require_rvv(s) && \ + QEMU_IS_ALIGNED(a->rd, LEN) && \ + QEMU_IS_ALIGNED(a->rs2, LEN)) { \ + uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ + if (s->vstart == 0) { \ + /* EEW = 8 */ \ + tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ + vreg_ofs(s, a->rs2), maxsz, maxsz); \ + mark_vs_dirty(s); \ + } else { \ + TCGLabel *over = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \ + tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \ + cpu_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \ + mark_vs_dirty(s); \ + gen_set_label(over); \ + } \ + return true; \ + } \ + return false; \ +} + +GEN_VMV_WHOLE_TRANS(vmv1r_v, 1) +GEN_VMV_WHOLE_TRANS(vmv2r_v, 2) +GEN_VMV_WHOLE_TRANS(vmv4r_v, 4) +GEN_VMV_WHOLE_TRANS(vmv8r_v, 8) + +static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div) +{ + uint8_t from = (s->sew + 3) - div; + bool ret = require_rvv(s) && + (from >= 3 && from <= 8) && + (a->rd != a->rs2) && + require_align(a->rd, s->lmul) && + require_align(a->rs2, s->lmul - div) && + require_vm(a->vm, a->rd) && + require_noover(a->rd, s->lmul, a->rs2, s->lmul - div); + return ret; +} + +static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_gvec_3_ptr *fn; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); + + static gen_helper_gvec_3_ptr * const fns[6][4] = { + { + NULL, gen_helper_vzext_vf2_h, + gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d + }, + { + NULL, NULL, + gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d, + }, + { + NULL, NULL, + NULL, gen_helper_vzext_vf8_d + }, + { + NULL, gen_helper_vsext_vf2_h, + gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d + }, + { + NULL, NULL, + gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d, + }, + { + NULL, NULL, + NULL, gen_helper_vsext_vf8_d + } + }; + + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, VTA, s->vta); + data = FIELD_DP32(data, VDATA, VMA, s->vma); + + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs2), cpu_env, + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); + + mark_vs_dirty(s); + gen_set_label(over); + return true; +} + +/* Vector Integer Extension */ +#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (int_ext_check(s, a, DIV)) { \ + return int_ext_op(s, a, SEQ); \ + } \ + return false; \ +} + +GEN_INT_EXT_TRANS(vzext_vf2, 1, 0) +GEN_INT_EXT_TRANS(vzext_vf4, 2, 1) +GEN_INT_EXT_TRANS(vzext_vf8, 3, 2) +GEN_INT_EXT_TRANS(vsext_vf2, 1, 3) +GEN_INT_EXT_TRANS(vsext_vf4, 2, 4) +GEN_INT_EXT_TRANS(vsext_vf8, 3, 5) diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc new file mode 100644 index 0000000000..5d07150cd0 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -0,0 +1,679 @@ +/* + * RISC-V translation routines for the RV64Zfh Standard Extension. + * + * Copyright (c) 2020 Chih-Min Chao, chihmin.chao@sifive.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZFH(ctx) do { \ + if (!ctx->cfg_ptr->ext_zfh) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZHINX_OR_ZFH(ctx) do { \ + if (!ctx->cfg_ptr->ext_zhinx && !ctx->cfg_ptr->ext_zfh) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \ + if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \ + if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \ + ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \ + return false; \ + } \ +} while (0) + +static bool trans_flh(DisasContext *ctx, arg_flh *a) +{ + TCGv_i64 dest; + TCGv t0; + + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN(ctx); + + t0 = get_gpr(ctx, a->rs1, EXT_NONE); + if (a->imm) { + TCGv temp = temp_new(ctx); + tcg_gen_addi_tl(temp, t0, a->imm); + t0 = temp; + } + + dest = cpu_fpr[a->rd]; + tcg_gen_qemu_ld_i64(dest, t0, ctx->mem_idx, MO_TEUW); + gen_nanbox_h(dest, dest); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsh(DisasContext *ctx, arg_fsh *a) +{ + TCGv t0; + + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN(ctx); + + t0 = get_gpr(ctx, a->rs1, EXT_NONE); + if (a->imm) { + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, t0, a->imm); + t0 = temp; + } + + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUW); + + return true; +} + +static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + + gen_set_rm(ctx, a->rm); + gen_helper_fmadd_h(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + + gen_set_rm(ctx, a->rm); + gen_helper_fmsub_h(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + + gen_set_rm(ctx, a->rm); + gen_helper_fnmsub_h(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); + + gen_set_rm(ctx, a->rm); + gen_helper_fnmadd_h(dest, cpu_env, src1, src2, src3); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_set_rm(ctx, a->rm); + gen_helper_fadd_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_set_rm(ctx, a->rm); + gen_helper_fsub_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_set_rm(ctx, a->rm); + gen_helper_fmul_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_set_rm(ctx, a->rm); + gen_helper_fdiv_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fsqrt_h(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + if (a->rs1 == a->rs2) { /* FMOV */ + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_h(dest, src1); + } else { + tcg_gen_ext16s_i64(dest, src1); + } + } else { + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + if (!ctx->cfg_ptr->ext_zfinx) { + TCGv_i64 rs1 = tcg_temp_new_i64(); + TCGv_i64 rs2 = tcg_temp_new_i64(); + gen_check_nanbox_h(rs1, src1); + gen_check_nanbox_h(rs2, src2); + + /* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */ + tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15); + + tcg_temp_free_i64(rs1); + tcg_temp_free_i64(rs2); + } else { + tcg_gen_deposit_i64(dest, src2, src1, 0, 15); + tcg_gen_ext16s_i64(dest, dest); + } + } + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a) +{ + TCGv_i64 rs1, rs2, mask; + + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + rs1 = tcg_temp_new_i64(); + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_h(rs1, src1); + } else { + tcg_gen_mov_i64(rs1, src1); + } + + if (a->rs1 == a->rs2) { /* FNEG */ + tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(15, 1)); + } else { + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + rs2 = tcg_temp_new_i64(); + + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_h(rs2, src2); + } else { + tcg_gen_mov_i64(rs2, src2); + } + + /* + * Replace bit 15 in rs1 with inverse in rs2. + * This formulation retains the nanboxing of rs1. + */ + mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1)); + tcg_gen_not_i64(rs2, rs2); + tcg_gen_andc_i64(rs2, rs2, mask); + tcg_gen_and_i64(dest, mask, rs1); + tcg_gen_or_i64(dest, dest, rs2); + + tcg_temp_free_i64(mask); + tcg_temp_free_i64(rs2); + } + /* signed-extended intead of nanboxing for result if enable zfinx */ + if (ctx->cfg_ptr->ext_zfinx) { + tcg_gen_ext16s_i64(dest, dest); + } + tcg_temp_free_i64(rs1); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a) +{ + TCGv_i64 rs1, rs2; + + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + rs1 = tcg_temp_new_i64(); + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_h(rs1, src1); + } else { + tcg_gen_mov_i64(rs1, src1); + } + + if (a->rs1 == a->rs2) { /* FABS */ + tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(15, 1)); + } else { + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + rs2 = tcg_temp_new_i64(); + + if (!ctx->cfg_ptr->ext_zfinx) { + gen_check_nanbox_h(rs2, src2); + } else { + tcg_gen_mov_i64(rs2, src2); + } + + /* + * Xor bit 15 in rs1 with that in rs2. + * This formulation retains the nanboxing of rs1. + */ + tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1)); + tcg_gen_xor_i64(dest, rs1, dest); + + tcg_temp_free_i64(rs2); + } + /* signed-extended intead of nanboxing for result if enable zfinx */ + if (ctx->cfg_ptr->ext_zfinx) { + tcg_gen_ext16s_i64(dest, dest); + } + tcg_temp_free_i64(rs1); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fmin_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fmax_h(dest, cpu_env, src1, src2); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_h(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + + return true; +} + +static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZDINX_OR_D(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_h(dest, cpu_env, src1); + gen_set_fpr_d(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + + return true; +} + +static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_s(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + + return true; +} + +static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZDINX_OR_D(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_d(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_d(dest, cpu_env, src1); + gen_set_fpr_hs(ctx, a->rd, dest); + mark_fs_dirty(ctx); + + return true; +} + +static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_feq_h(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_flt_h(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); + + return true; +} + +static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); + + gen_helper_fle_h(dest, cpu_env, src1, src2); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_helper_fclass_h(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_w_h(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_wu_h(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_w(dest, cpu_env, t0); + gen_set_fpr_hs(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a) +{ + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_wu(dest, cpu_env, t0); + gen_set_fpr_hs(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + +#if defined(TARGET_RISCV64) + /* 16 bits -> 64 bits */ + tcg_gen_ext16s_tl(dest, cpu_fpr[a->rs1]); +#else + /* 16 bits -> 32 bits */ + tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]); + tcg_gen_ext16s_tl(dest, dest); +#endif + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a) +{ + REQUIRE_FPU; + REQUIRE_ZFH_OR_ZFHMIN(ctx); + + TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO); + + tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0); + gen_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rd]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_l_h(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_lu_h(dest, cpu_env, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_l(dest, cpu_env, t0); + gen_set_fpr_hs(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_FPU; + REQUIRE_ZHINX_OR_ZFH(ctx); + + TCGv_i64 dest = dest_fpr(ctx, a->rd); + TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_h_lu(dest, cpu_env, t0); + gen_set_fpr_hs(ctx, a->rd, dest); + + mark_fs_dirty(ctx); + return true; +} diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc new file mode 100644 index 0000000000..2682bd969f --- /dev/null +++ b/target/riscv/insn_trans/trans_svinval.c.inc @@ -0,0 +1,75 @@ +/* + * RISC-V translation routines for the Svinval Standard Instruction Set. + * + * Copyright (c) 2020-2022 PLCT lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_SVINVAL(ctx) do { \ + if (!ctx->cfg_ptr->ext_svinval) { \ + return false; \ + } \ +} while (0) + +static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as sfence.vma currently */ + REQUIRE_EXT(ctx, RVS); +#ifndef CONFIG_USER_ONLY + gen_helper_tlb_flush(cpu_env); + return true; +#endif + return false; +} + +static bool trans_sfence_w_inval(DisasContext *ctx, arg_sfence_w_inval *a) +{ + REQUIRE_SVINVAL(ctx); + REQUIRE_EXT(ctx, RVS); + /* Do nothing currently */ + return true; +} + +static bool trans_sfence_inval_ir(DisasContext *ctx, arg_sfence_inval_ir *a) +{ + REQUIRE_SVINVAL(ctx); + REQUIRE_EXT(ctx, RVS); + /* Do nothing currently */ + return true; +} + +static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as hfence.vvma currently */ + REQUIRE_EXT(ctx, RVH); +#ifndef CONFIG_USER_ONLY + gen_helper_hyp_tlb_flush(cpu_env); + return true; +#endif + return false; +} + +static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as hfence.gvma currently */ + REQUIRE_EXT(ctx, RVH); +#ifndef CONFIG_USER_ONLY + gen_helper_hyp_gvma_tlb_flush(cpu_env); + return true; +#endif + return false; +} diff --git a/target/riscv/insn_trans/trans_xventanacondops.c.inc b/target/riscv/insn_trans/trans_xventanacondops.c.inc new file mode 100644 index 0000000000..16849e6d4e --- /dev/null +++ b/target/riscv/insn_trans/trans_xventanacondops.c.inc @@ -0,0 +1,39 @@ +/* + * RISC-V translation routines for the XVentanaCondOps extension. + * + * Copyright (c) 2021-2022 VRULL GmbH. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +static bool gen_vt_condmask(DisasContext *ctx, arg_r *a, TCGCond cond) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, ctx->zero); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_vt_maskc(DisasContext *ctx, arg_r *a) +{ + return gen_vt_condmask(ctx, a, TCG_COND_NE); +} + +static bool trans_vt_maskcn(DisasContext *ctx, arg_r *a) +{ + return gen_vt_condmask(ctx, a, TCG_COND_EQ); +} diff --git a/target/riscv/instmap.h b/target/riscv/instmap.h index 40b6d2b64d..f877530576 100644 --- a/target/riscv/instmap.h +++ b/target/riscv/instmap.h @@ -184,6 +184,8 @@ enum { OPC_RISC_CSRRWI = OPC_RISC_SYSTEM | (0x5 << 12), OPC_RISC_CSRRSI = OPC_RISC_SYSTEM | (0x6 << 12), OPC_RISC_CSRRCI = OPC_RISC_SYSTEM | (0x7 << 12), + + OPC_RISC_HLVHSV = OPC_RISC_SYSTEM | (0x4 << 12), }; #define MASK_OP_FP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) @@ -310,12 +312,20 @@ enum { | (extract32(inst, 12, 8) << 12) \ | (sextract64(inst, 31, 1) << 20)) +#define GET_FUNCT3(inst) extract32(inst, 12, 3) +#define GET_FUNCT7(inst) extract32(inst, 25, 7) #define GET_RM(inst) extract32(inst, 12, 3) #define GET_RS3(inst) extract32(inst, 27, 5) #define GET_RS1(inst) extract32(inst, 15, 5) #define GET_RS2(inst) extract32(inst, 20, 5) #define GET_RD(inst) extract32(inst, 7, 5) #define GET_IMM(inst) sextract64(inst, 20, 12) +#define SET_RS1(inst, val) deposit32(inst, 15, 5, val) +#define SET_RS2(inst, val) deposit32(inst, 20, 5, val) +#define SET_RD(inst, val) deposit32(inst, 7, 5, val) +#define SET_I_IMM(inst, val) deposit32(inst, 20, 12, val) +#define SET_S_IMM(inst, val) \ + deposit32(deposit32(inst, 7, 5, val), 25, 7, (val) >> 5) /* RVC decoding macros */ #define GET_C_IMM(inst) (extract32(inst, 2, 5) \ @@ -346,6 +356,8 @@ enum { | (extract32(inst, 5, 1) << 6)) #define GET_C_LD_IMM(inst) ((extract16(inst, 10, 3) << 3) \ | (extract16(inst, 5, 2) << 6)) +#define GET_C_SW_IMM(inst) GET_C_LW_IMM(inst) +#define GET_C_SD_IMM(inst) GET_C_LD_IMM(inst) #define GET_C_J_IMM(inst) ((extract32(inst, 3, 3) << 1) \ | (extract32(inst, 11, 1) << 4) \ | (extract32(inst, 2, 1) << 5) \ @@ -366,4 +378,37 @@ enum { #define GET_C_RS1S(inst) (8 + extract16(inst, 7, 3)) #define GET_C_RS2S(inst) (8 + extract16(inst, 2, 3)) +#define GET_C_FUNC(inst) extract32(inst, 13, 3) +#define GET_C_OP(inst) extract32(inst, 0, 2) + +enum { + /* RVC Quadrants */ + OPC_RISC_C_OP_QUAD0 = 0x0, + OPC_RISC_C_OP_QUAD1 = 0x1, + OPC_RISC_C_OP_QUAD2 = 0x2 +}; + +enum { + /* RVC Quadrant 0 */ + OPC_RISC_C_FUNC_ADDI4SPN = 0x0, + OPC_RISC_C_FUNC_FLD_LQ = 0x1, + OPC_RISC_C_FUNC_LW = 0x2, + OPC_RISC_C_FUNC_FLW_LD = 0x3, + OPC_RISC_C_FUNC_FSD_SQ = 0x5, + OPC_RISC_C_FUNC_SW = 0x6, + OPC_RISC_C_FUNC_FSW_SD = 0x7 +}; + +enum { + /* RVC Quadrant 2 */ + OPC_RISC_C_FUNC_SLLI_SLLI64 = 0x0, + OPC_RISC_C_FUNC_FLDSP_LQSP = 0x1, + OPC_RISC_C_FUNC_LWSP = 0x2, + OPC_RISC_C_FUNC_FLWSP_LDSP = 0x3, + OPC_RISC_C_FUNC_JR_MV_EBREAK_JALR_ADD = 0x4, + OPC_RISC_C_FUNC_FSDSP_SQSP = 0x5, + OPC_RISC_C_FUNC_SWSP = 0x6, + OPC_RISC_C_FUNC_FSWSP_SDSP = 0x7 +}; + #endif diff --git a/target/riscv/internals.h b/target/riscv/internals.h index b15ad394bb..5620fbffb6 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -22,33 +22,50 @@ #include "hw/registerfields.h" /* share data between vector helpers and decode code */ -FIELD(VDATA, MLEN, 0, 8) -FIELD(VDATA, VM, 8, 1) -FIELD(VDATA, LMUL, 9, 2) -FIELD(VDATA, NF, 11, 4) -FIELD(VDATA, WD, 11, 1) +FIELD(VDATA, VM, 0, 1) +FIELD(VDATA, LMUL, 1, 3) +FIELD(VDATA, VTA, 4, 1) +FIELD(VDATA, VTA_ALL_1S, 5, 1) +FIELD(VDATA, VMA, 6, 1) +FIELD(VDATA, NF, 7, 4) +FIELD(VDATA, WD, 7, 1) /* float point classify helpers */ target_ulong fclass_h(uint64_t frs1); target_ulong fclass_s(uint64_t frs1); target_ulong fclass_d(uint64_t frs1); -#define SEW8 0 -#define SEW16 1 -#define SEW32 2 -#define SEW64 3 - #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_riscv_cpu; #endif -static inline uint64_t nanbox_s(float32 f) +enum { + RISCV_FRM_RNE = 0, /* Round to Nearest, ties to Even */ + RISCV_FRM_RTZ = 1, /* Round towards Zero */ + RISCV_FRM_RDN = 2, /* Round Down */ + RISCV_FRM_RUP = 3, /* Round Up */ + RISCV_FRM_RMM = 4, /* Round to Nearest, ties to Max Magnitude */ + RISCV_FRM_DYN = 7, /* Dynamic rounding mode */ + RISCV_FRM_ROD = 8, /* Round to Odd */ +}; + +static inline uint64_t nanbox_s(CPURISCVState *env, float32 f) { - return f | MAKE_64BIT_MASK(32, 32); + /* the value is sign-extended instead of NaN-boxing for zfinx */ + if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + return (int32_t)f; + } else { + return f | MAKE_64BIT_MASK(32, 32); + } } -static inline float32 check_nanbox_s(uint64_t f) +static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f) { + /* Disable NaN-boxing check when enable zfinx */ + if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + return (uint32_t)f; + } + uint64_t mask = MAKE_64BIT_MASK(32, 32); if (likely((f & mask) == mask)) { @@ -58,4 +75,30 @@ static inline float32 check_nanbox_s(uint64_t f) } } +static inline uint64_t nanbox_h(CPURISCVState *env, float16 f) +{ + /* the value is sign-extended instead of NaN-boxing for zfinx */ + if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + return (int16_t)f; + } else { + return f | MAKE_64BIT_MASK(16, 48); + } +} + +static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) +{ + /* Disable nanbox check when enable zfinx */ + if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + return (uint16_t)f; + } + + uint64_t mask = MAKE_64BIT_MASK(16, 48); + + if (likely((f & mask) == mask)) { + return (uint16_t)f; + } else { + return 0x7E00u; /* default qnan */ + } +} + #endif diff --git a/target/riscv/kvm-stub.c b/target/riscv/kvm-stub.c new file mode 100644 index 0000000000..4e8fc31a21 --- /dev/null +++ b/target/riscv/kvm-stub.c @@ -0,0 +1,30 @@ +/* + * QEMU KVM RISC-V specific function stubs + * + * Copyright (c) 2020 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "kvm_riscv.h" + +void kvm_riscv_reset_vcpu(RISCVCPU *cpu) +{ + abort(); +} + +void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) +{ + abort(); +} diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c new file mode 100644 index 0000000000..30f21453d6 --- /dev/null +++ b/target/riscv/kvm.c @@ -0,0 +1,538 @@ +/* + * RISC-V implementation of KVM hooks + * + * Copyright (c) 2020 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include + +#include + +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" +#include "cpu.h" +#include "trace.h" +#include "hw/pci/pci.h" +#include "exec/memattrs.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "kvm_riscv.h" +#include "sbi_ecall_interface.h" +#include "chardev/char-fe.h" +#include "migration/migration.h" +#include "sysemu/runstate.h" + +static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, + uint64_t idx) +{ + uint64_t id = KVM_REG_RISCV | type | idx; + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + id |= KVM_REG_SIZE_U32; + break; + case MXL_RV64: + id |= KVM_REG_SIZE_U64; + break; + default: + g_assert_not_reached(); + } + return id; +} + +#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \ + KVM_REG_RISCV_CORE_REG(name)) + +#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \ + KVM_REG_RISCV_CSR_REG(name)) + +#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \ + KVM_REG_RISCV_TIMER_REG(name)) + +#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx) + +#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx) + +#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ + do { \ + int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ + if (ret) { \ + return ret; \ + } \ + } while (0) + +#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ + do { \ + int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ + if (ret) { \ + return ret; \ + } \ + } while (0) + +#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \ + do { \ + int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \ + if (ret) { \ + abort(); \ + } \ + } while (0) + +#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \ + do { \ + int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), ®); \ + if (ret) { \ + abort(); \ + } \ + } while (0) + +static int kvm_riscv_get_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + target_ulong reg; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + if (ret) { + return ret; + } + env->pc = reg; + + for (i = 1; i < 32; i++) { + uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + ret = kvm_get_one_reg(cs, id, ®); + if (ret) { + return ret; + } + env->gpr[i] = reg; + } + + return ret; +} + +static int kvm_riscv_put_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + target_ulong reg; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + reg = env->pc; + ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + if (ret) { + return ret; + } + + for (i = 1; i < 32; i++) { + uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + reg = env->gpr[i]; + ret = kvm_set_one_reg(cs, id, ®); + if (ret) { + return ret; + } + } + + return ret; +} + +static int kvm_riscv_get_regs_csr(CPUState *cs) +{ + int ret = 0; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); + KVM_RISCV_GET_CSR(cs, env, sie, env->mie); + KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); + KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); + KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); + KVM_RISCV_GET_CSR(cs, env, scause, env->scause); + KVM_RISCV_GET_CSR(cs, env, stval, env->stval); + KVM_RISCV_GET_CSR(cs, env, sip, env->mip); + KVM_RISCV_GET_CSR(cs, env, satp, env->satp); + return ret; +} + +static int kvm_riscv_put_regs_csr(CPUState *cs) +{ + int ret = 0; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); + KVM_RISCV_SET_CSR(cs, env, sie, env->mie); + KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); + KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); + KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); + KVM_RISCV_SET_CSR(cs, env, scause, env->scause); + KVM_RISCV_SET_CSR(cs, env, stval, env->stval); + KVM_RISCV_SET_CSR(cs, env, sip, env->mip); + KVM_RISCV_SET_CSR(cs, env, satp, env->satp); + + return ret; +} + +static int kvm_riscv_get_regs_fp(CPUState *cs) +{ + int ret = 0; + int i; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (riscv_has_ext(env, RVD)) { + uint64_t reg; + for (i = 0; i < 32; i++) { + ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + if (ret) { + return ret; + } + env->fpr[i] = reg; + } + return ret; + } + + if (riscv_has_ext(env, RVF)) { + uint32_t reg; + for (i = 0; i < 32; i++) { + ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + if (ret) { + return ret; + } + env->fpr[i] = reg; + } + return ret; + } + + return ret; +} + +static int kvm_riscv_put_regs_fp(CPUState *cs) +{ + int ret = 0; + int i; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (riscv_has_ext(env, RVD)) { + uint64_t reg; + for (i = 0; i < 32; i++) { + reg = env->fpr[i]; + ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + if (ret) { + return ret; + } + } + return ret; + } + + if (riscv_has_ext(env, RVF)) { + uint32_t reg; + for (i = 0; i < 32; i++) { + reg = env->fpr[i]; + ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + if (ret) { + return ret; + } + } + return ret; + } + + return ret; +} + +static void kvm_riscv_get_regs_timer(CPUState *cs) +{ + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (env->kvm_timer_dirty) { + return; + } + + KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time); + KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare); + KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state); + KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency); + + env->kvm_timer_dirty = true; +} + +static void kvm_riscv_put_regs_timer(CPUState *cs) +{ + uint64_t reg; + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (!env->kvm_timer_dirty) { + return; + } + + KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time); + KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare); + + /* + * To set register of RISCV_TIMER_REG(state) will occur a error from KVM + * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it + * doesn't matter that adaping in QEMU now. + * TODO If KVM changes, adapt here. + */ + if (env->kvm_timer_state) { + KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state); + } + + /* + * For now, migration will not work between Hosts with different timer + * frequency. Therefore, we should check whether they are the same here + * during the migration. + */ + if (migration_is_running(migrate_get_current()->state)) { + KVM_RISCV_GET_TIMER(cs, env, frequency, reg); + if (reg != env->kvm_timer_frequency) { + error_report("Dst Hosts timer frequency != Src Hosts"); + } + } + + env->kvm_timer_dirty = false; +} + +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; + +int kvm_arch_get_registers(CPUState *cs) +{ + int ret = 0; + + ret = kvm_riscv_get_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_riscv_get_regs_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_riscv_get_regs_fp(cs); + if (ret) { + return ret; + } + + return ret; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + int ret = 0; + + ret = kvm_riscv_put_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_riscv_put_regs_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_riscv_put_regs_fp(cs); + if (ret) { + return ret; + } + + return ret; +} + +int kvm_arch_release_virq_post(int virq) +{ + return 0; +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +unsigned long kvm_arch_vcpu_id(CPUState *cpu) +{ + return cpu->cpu_index; +} + +static void kvm_riscv_vm_state_change(void *opaque, bool running, + RunState state) +{ + CPUState *cs = opaque; + + if (running) { + kvm_riscv_put_regs_timer(cs); + } else { + kvm_riscv_get_regs_timer(cs); + } +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ +} + +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret = 0; + target_ulong isa; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + uint64_t id; + + qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs); + + id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, + KVM_REG_RISCV_CONFIG_REG(isa)); + ret = kvm_get_one_reg(cs, id, &isa); + if (ret) { + return ret; + } + env->misa_ext = isa; + + return ret; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + abort(); +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + return 0; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return 0; +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + return true; +} + +static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + unsigned char ch; + switch (run->riscv_sbi.extension_id) { + case SBI_EXT_0_1_CONSOLE_PUTCHAR: + ch = run->riscv_sbi.args[0]; + qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch)); + break; + case SBI_EXT_0_1_CONSOLE_GETCHAR: + ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch)); + if (ret == sizeof(ch)) { + run->riscv_sbi.args[0] = ch; + } else { + run->riscv_sbi.args[0] = -1; + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: un-handled SBI EXIT, specific reasons is %lu\n", + __func__, run->riscv_sbi.extension_id); + ret = -1; + break; + } + return ret; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + switch (run->exit_reason) { + case KVM_EXIT_RISCV_SBI: + ret = kvm_riscv_handle_sbi(cs, run); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", + __func__, run->exit_reason); + ret = -1; + break; + } + return ret; +} + +void kvm_riscv_reset_vcpu(RISCVCPU *cpu) +{ + CPURISCVState *env = &cpu->env; + + if (!kvm_enabled()) { + return; + } + env->pc = cpu->env.kernel_addr; + env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */ + env->gpr[11] = cpu->env.fdt_addr; /* a1 */ + env->satp = 0; +} + +void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) +{ + int ret; + unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET; + + if (irq != IRQ_S_EXT) { + perror("kvm riscv set irq != IRQ_S_EXT\n"); + abort(); + } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq); + if (ret < 0) { + perror("Set irq failed"); + abort(); + } +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h new file mode 100644 index 0000000000..ed281bdce0 --- /dev/null +++ b/target/riscv/kvm_riscv.h @@ -0,0 +1,25 @@ +/* + * QEMU KVM support -- RISC-V specific functions. + * + * Copyright (c) 2020 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef QEMU_KVM_RISCV_H +#define QEMU_KVM_RISCV_H + +void kvm_riscv_reset_vcpu(RISCVCPU *cpu); +void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level); + +#endif diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c new file mode 100644 index 0000000000..7bf115b85e --- /dev/null +++ b/target/riscv/m128_helper.c @@ -0,0 +1,109 @@ +/* + * RISC-V Emulation Helpers for QEMU. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/main-loop.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +target_ulong HELPER(divu_i128)(CPURISCVState *env, + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) +{ + target_ulong ql, qh; + Int128 q; + + if (vl == 0 && vh == 0) { /* Handle special behavior on div by zero */ + ql = ~0x0; + qh = ~0x0; + } else { + q = int128_divu(int128_make128(ul, uh), int128_make128(vl, vh)); + ql = int128_getlo(q); + qh = int128_gethi(q); + } + + env->retxh = qh; + return ql; +} + +target_ulong HELPER(remu_i128)(CPURISCVState *env, + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) +{ + target_ulong rl, rh; + Int128 r; + + if (vl == 0 && vh == 0) { + rl = ul; + rh = uh; + } else { + r = int128_remu(int128_make128(ul, uh), int128_make128(vl, vh)); + rl = int128_getlo(r); + rh = int128_gethi(r); + } + + env->retxh = rh; + return rl; +} + +target_ulong HELPER(divs_i128)(CPURISCVState *env, + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) +{ + target_ulong qh, ql; + Int128 q; + + if (vl == 0 && vh == 0) { /* Div by zero check */ + ql = ~0x0; + qh = ~0x0; + } else if (uh == (1ULL << (TARGET_LONG_BITS - 1)) && ul == 0 && + vh == ~0x0 && vl == ~0x0) { + /* Signed div overflow check (-2**127 / -1) */ + ql = ul; + qh = uh; + } else { + q = int128_divs(int128_make128(ul, uh), int128_make128(vl, vh)); + ql = int128_getlo(q); + qh = int128_gethi(q); + } + + env->retxh = qh; + return ql; +} + +target_ulong HELPER(rems_i128)(CPURISCVState *env, + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) +{ + target_ulong rh, rl; + Int128 r; + + if (vl == 0 && vh == 0) { + rl = ul; + rh = uh; + } else { + r = int128_rems(int128_make128(ul, uh), int128_make128(vl, vh)); + rl = int128_getlo(r); + rh = int128_gethi(r); + } + + env->retxh = rh; + return rl; +} diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 16a08302da..c2a94a82b3 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -76,44 +76,26 @@ static bool hyper_needed(void *opaque) return riscv_has_ext(env, RVH); } -static bool vector_needed(void *opaque) -{ - RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - - return riscv_has_ext(env, RVV); -} - -static const VMStateDescription vmstate_vector = { - .name = "cpu/vector", - .version_id = 1, - .minimum_version_id = 1, - .needed = vector_needed, - .fields = (VMStateField[]) { - VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), - VMSTATE_UINTTL(env.vxrm, RISCVCPU), - VMSTATE_UINTTL(env.vxsat, RISCVCPU), - VMSTATE_UINTTL(env.vl, RISCVCPU), - VMSTATE_UINTTL(env.vstart, RISCVCPU), - VMSTATE_UINTTL(env.vtype, RISCVCPU), - VMSTATE_END_OF_LIST() - } -}; - static const VMStateDescription vmstate_hyper = { .name = "cpu/hyper", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .needed = hyper_needed, .fields = (VMStateField[]) { VMSTATE_UINTTL(env.hstatus, RISCVCPU), VMSTATE_UINTTL(env.hedeleg, RISCVCPU), - VMSTATE_UINTTL(env.hideleg, RISCVCPU), + VMSTATE_UINT64(env.hideleg, RISCVCPU), VMSTATE_UINTTL(env.hcounteren, RISCVCPU), VMSTATE_UINTTL(env.htval, RISCVCPU), VMSTATE_UINTTL(env.htinst, RISCVCPU), VMSTATE_UINTTL(env.hgatp, RISCVCPU), + VMSTATE_UINTTL(env.hgeie, RISCVCPU), + VMSTATE_UINTTL(env.hgeip, RISCVCPU), VMSTATE_UINT64(env.htimedelta, RISCVCPU), + VMSTATE_UINT64(env.vstimecmp, RISCVCPU), + + VMSTATE_UINTTL(env.hvictl, RISCVCPU), + VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64), VMSTATE_UINT64(env.vsstatus, RISCVCPU), VMSTATE_UINTTL(env.vstvec, RISCVCPU), @@ -122,6 +104,7 @@ static const VMStateDescription vmstate_hyper = { VMSTATE_UINTTL(env.vscause, RISCVCPU), VMSTATE_UINTTL(env.vstval, RISCVCPU), VMSTATE_UINTTL(env.vsatp, RISCVCPU), + VMSTATE_UINTTL(env.vsiselect, RISCVCPU), VMSTATE_UINTTL(env.mtval2, RISCVCPU), VMSTATE_UINTTL(env.mtinst, RISCVCPU), @@ -138,13 +121,191 @@ static const VMStateDescription vmstate_hyper = { } }; -const VMStateDescription vmstate_riscv_cpu = { - .name = "cpu", +static bool vector_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return riscv_has_ext(env, RVV); +} + +static const VMStateDescription vmstate_vector = { + .name = "cpu/vector", .version_id = 2, .minimum_version_id = 2, + .needed = vector_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), + VMSTATE_UINTTL(env.vxrm, RISCVCPU), + VMSTATE_UINTTL(env.vxsat, RISCVCPU), + VMSTATE_UINTTL(env.vl, RISCVCPU), + VMSTATE_UINTTL(env.vstart, RISCVCPU), + VMSTATE_UINTTL(env.vtype, RISCVCPU), + VMSTATE_BOOL(env.vill, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool pointermasking_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return riscv_has_ext(env, RVJ); +} + +static const VMStateDescription vmstate_pointermasking = { + .name = "cpu/pointer_masking", + .version_id = 1, + .minimum_version_id = 1, + .needed = pointermasking_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(env.mmte, RISCVCPU), + VMSTATE_UINTTL(env.mpmmask, RISCVCPU), + VMSTATE_UINTTL(env.mpmbase, RISCVCPU), + VMSTATE_UINTTL(env.spmmask, RISCVCPU), + VMSTATE_UINTTL(env.spmbase, RISCVCPU), + VMSTATE_UINTTL(env.upmmask, RISCVCPU), + VMSTATE_UINTTL(env.upmbase, RISCVCPU), + + VMSTATE_END_OF_LIST() + } +}; + +static bool rv128_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return env->misa_mxl_max == MXL_RV128; +} + +static const VMStateDescription vmstate_rv128 = { + .name = "cpu/rv128", + .version_id = 1, + .minimum_version_id = 1, + .needed = rv128_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32), + VMSTATE_UINT64(env.mscratchh, RISCVCPU), + VMSTATE_UINT64(env.sscratchh, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool kvmtimer_needed(void *opaque) +{ + return kvm_enabled(); +} + +static int cpu_post_load(void *opaque, int version_id) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + env->kvm_timer_dirty = true; + return 0; +} + +static const VMStateDescription vmstate_kvmtimer = { + .name = "cpu/kvmtimer", + .version_id = 1, + .minimum_version_id = 1, + .needed = kvmtimer_needed, + .post_load = cpu_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), + VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), + VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool debug_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return riscv_feature(env, RISCV_FEATURE_DEBUG); +} + +static const VMStateDescription vmstate_debug = { + .name = "cpu/debug", + .version_id = 2, + .minimum_version_id = 2, + .needed = debug_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(env.trigger_cur, RISCVCPU), + VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS), + VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS), + VMSTATE_UINTTL_ARRAY(env.tdata3, RISCVCPU, RV_MAX_TRIGGERS), + VMSTATE_END_OF_LIST() + } +}; + +static int riscv_cpu_post_load(void *opaque, int version_id) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + env->xl = cpu_recompute_xl(env); + riscv_cpu_update_mask(env); + return 0; +} + +static bool envcfg_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0); +} + +static const VMStateDescription vmstate_envcfg = { + .name = "cpu/envcfg", + .version_id = 1, + .minimum_version_id = 1, + .needed = envcfg_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.menvcfg, RISCVCPU), + VMSTATE_UINTTL(env.senvcfg, RISCVCPU), + VMSTATE_UINT64(env.henvcfg, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool pmu_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.pmu_num; +} + +static const VMStateDescription vmstate_pmu_ctr_state = { + .name = "cpu/pmu", + .version_id = 1, + .minimum_version_id = 1, + .needed = pmu_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState), + VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState), + VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState), + VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState), + VMSTATE_BOOL(started, PMUCTRState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_riscv_cpu = { + .name = "cpu", + .version_id = 5, + .minimum_version_id = 5, + .post_load = riscv_cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), + VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64), + VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64), VMSTATE_UINTTL(env.pc, RISCVCPU), VMSTATE_UINTTL(env.load_res, RISCVCPU), VMSTATE_UINTTL(env.load_val, RISCVCPU), @@ -153,18 +314,20 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU), VMSTATE_UINTTL(env.priv_ver, RISCVCPU), VMSTATE_UINTTL(env.vext_ver, RISCVCPU), - VMSTATE_UINTTL(env.misa, RISCVCPU), - VMSTATE_UINTTL(env.misa_mask, RISCVCPU), + VMSTATE_UINT32(env.misa_mxl, RISCVCPU), + VMSTATE_UINT32(env.misa_ext, RISCVCPU), + VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), + VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), VMSTATE_UINT32(env.features, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_UINTTL(env.virt, RISCVCPU), - VMSTATE_UINTTL(env.resetvec, RISCVCPU), + VMSTATE_UINT64(env.resetvec, RISCVCPU), VMSTATE_UINTTL(env.mhartid, RISCVCPU), VMSTATE_UINT64(env.mstatus, RISCVCPU), - VMSTATE_UINTTL(env.mip, RISCVCPU), - VMSTATE_UINT32(env.miclaim, RISCVCPU), - VMSTATE_UINTTL(env.mie, RISCVCPU), - VMSTATE_UINTTL(env.mideleg, RISCVCPU), + VMSTATE_UINT64(env.mip, RISCVCPU), + VMSTATE_UINT64(env.miclaim, RISCVCPU), + VMSTATE_UINT64(env.mie, RISCVCPU), + VMSTATE_UINT64(env.mideleg, RISCVCPU), VMSTATE_UINTTL(env.satp, RISCVCPU), VMSTATE_UINTTL(env.stval, RISCVCPU), VMSTATE_UINTTL(env.medeleg, RISCVCPU), @@ -175,13 +338,20 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.mepc, RISCVCPU), VMSTATE_UINTTL(env.mcause, RISCVCPU), VMSTATE_UINTTL(env.mtval, RISCVCPU), + VMSTATE_UINTTL(env.miselect, RISCVCPU), + VMSTATE_UINTTL(env.siselect, RISCVCPU), VMSTATE_UINTTL(env.scounteren, RISCVCPU), VMSTATE_UINTTL(env.mcounteren, RISCVCPU), + VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU), + VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0, + vmstate_pmu_ctr_state, PMUCTRState), + VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS), + VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS), VMSTATE_UINTTL(env.sscratch, RISCVCPU), VMSTATE_UINTTL(env.mscratch, RISCVCPU), VMSTATE_UINT64(env.mfromhost, RISCVCPU), VMSTATE_UINT64(env.mtohost, RISCVCPU), - VMSTATE_UINT64(env.timecmp, RISCVCPU), + VMSTATE_UINT64(env.stimecmp, RISCVCPU), VMSTATE_END_OF_LIST() }, @@ -189,6 +359,11 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_pmp, &vmstate_hyper, &vmstate_vector, + &vmstate_pointermasking, + &vmstate_rv128, + &vmstate_kvmtimer, + &vmstate_envcfg, + &vmstate_debug, NULL } }; diff --git a/target/riscv/meson.build b/target/riscv/meson.build index d5e0bc93ea..ba25164d74 100644 --- a/target/riscv/meson.build +++ b/target/riscv/meson.build @@ -1,9 +1,8 @@ # FIXME extra_args should accept files() -dir = meson.current_source_dir() - gen = [ decodetree.process('insn16.decode', extra_args: ['--static-decode=decode_insn16', '--insnwidth=16']), decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'), + decodetree.process('XVentanaCondOps.decode', extra_args: '--static-decode=decode_XVentanaCodeOps'), ] riscv_ss = ss.source_set() @@ -18,14 +17,20 @@ riscv_ss.add(files( 'vector_helper.c', 'bitmanip_helper.c', 'translate.c', + 'm128_helper.c', + 'crypto_helper.c' )) +riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c')) riscv_softmmu_ss = ss.source_set() riscv_softmmu_ss.add(files( 'arch_dump.c', 'pmp.c', + 'debug.c', 'monitor.c', - 'machine.c' + 'machine.c', + 'pmu.c', + 'time_helper.c' )) target_arch += {'riscv': riscv_ss} diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index f7e6ea72b3..17e63fab00 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -84,6 +84,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, { hwaddr pte_addr; hwaddr paddr; + target_ulong last_start = -1; target_ulong pgsize; target_ulong pte; int ptshift; @@ -111,12 +112,13 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, * A leaf PTE has been found * * If current PTE's permission bits differ from the last one, - * or current PTE's ppn does not make a contiguous physical - * address block together with the last one, print out the last - * contiguous mapped block details. + * or the current PTE breaks up a contiguous virtual or + * physical mapping, address block together with the last one, + * print out the last contiguous mapped block details. */ if ((*last_attr != attr) || - (*last_paddr + *last_size != paddr)) { + (*last_paddr + *last_size != paddr) || + (last_start + *last_size != start)) { print_pte(mon, va_bits, *vbase, *pbase, *last_paddr + *last_size - *pbase, *last_attr); @@ -125,6 +127,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, *last_attr = attr; } + last_start = start; *last_paddr = paddr; *last_size = pgsize; } else { @@ -150,7 +153,7 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env) target_ulong last_size; int last_attr; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; vm = get_field(env->satp, SATP32_MODE); } else { @@ -220,7 +223,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { if (!(env->satp & SATP32_MODE)) { monitor_printf(mon, "No translation or protection\n"); return; diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 3c48e739ac..09f1f5185d 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -24,8 +24,8 @@ #include "exec/helper-proto.h" /* Exceptions processing helpers */ -void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, - uint32_t exception, uintptr_t pc) +G_NORETURN void riscv_raise_exception(CPURISCVState *env, + uint32_t exception, uintptr_t pc) { CPUState *cs = env_cpu(env); cs->exception_index = exception; @@ -37,11 +37,19 @@ void helper_raise_exception(CPURISCVState *env, uint32_t exception) riscv_raise_exception(env, exception, 0); } -target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, - target_ulong csr) +target_ulong helper_csrr(CPURISCVState *env, int csr) { + /* + * The seed CSR must be accessed with a read-write instruction. A + * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/ + * CSRRCI with uimm=0 will raise an illegal instruction exception. + */ + if (csr == CSR_SEED) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, src, -1); + RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -49,11 +57,21 @@ target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, return val; } -target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, - target_ulong csr, target_ulong rs1_pass) +void helper_csrw(CPURISCVState *env, int csr, target_ulong src) +{ + target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; + RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); + + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); + } +} + +target_ulong helper_csrrw(CPURISCVState *env, int csr, + target_ulong src, target_ulong write_mask) { target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0); + RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -61,21 +79,53 @@ target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, return val; } -target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, - target_ulong csr, target_ulong rs1_pass) +target_ulong helper_csrr_i128(CPURISCVState *env, int csr) { - target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0); + Int128 rv = int128_zero(); + RISCVException ret = riscv_csrrw_i128(env, csr, &rv, + int128_zero(), + int128_zero()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); } - return val; + + env->retxh = int128_gethi(rv); + return int128_getlo(rv); +} + +void helper_csrw_i128(CPURISCVState *env, int csr, + target_ulong srcl, target_ulong srch) +{ + RISCVException ret = riscv_csrrw_i128(env, csr, NULL, + int128_make128(srcl, srch), + UINT128_MAX); + + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); + } +} + +target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, + target_ulong srcl, target_ulong srch, + target_ulong maskl, target_ulong maskh) +{ + Int128 rv = int128_zero(); + RISCVException ret = riscv_csrrw_i128(env, csr, &rv, + int128_make128(srcl, srch), + int128_make128(maskl, maskh)); + + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); + } + + env->retxh = int128_gethi(rv); + return int128_getlo(rv); } #ifndef CONFIG_USER_ONLY -target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) +target_ulong helper_sret(CPURISCVState *env) { uint64_t mstatus; target_ulong prev_priv, prev_virt; @@ -136,7 +186,7 @@ target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) return retpc; } -target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) +target_ulong helper_mret(CPURISCVState *env) { if (!(env->priv >= PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); @@ -150,7 +200,8 @@ target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (!pmp_get_num_rules(env) && (prev_priv != PRV_M)) { + if (riscv_feature(env, RISCV_FEATURE_PMP) && + !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 54abf42583..2b43e399b8 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -141,17 +141,9 @@ static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) 0111...1111 2^(XLEN+2)-byte NAPOT range 1111...1111 Reserved */ - if (a == -1) { - *sa = 0u; - *ea = -1; - return; - } else { - target_ulong t1 = ctz64(~a); - target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2; - target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1; - *sa = base; - *ea = base + range; - } + a = (a << 2) | 0x3; + *sa = a & (a + 1); + *ea = a | (a + 1); } void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) @@ -175,6 +167,9 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) case PMP_AMATCH_TOR: sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ ea = (this_addr << 2) - 1u; + if (sa > ea) { + sa = ea = 0u; + } break; case PMP_AMATCH_NA4: @@ -463,16 +458,11 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, { int i; uint8_t cfg_val; + int pmpcfg_nums = 2 << riscv_cpu_mxl(env); trace_pmpcfg_csr_write(env->mhartid, reg_index, val); - if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { - qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpcfg write - incorrect address\n"); - return; - } - - for (i = 0; i < sizeof(target_ulong); i++) { + for (i = 0; i < pmpcfg_nums; i++) { cfg_val = (val >> 8 * i) & 0xff; pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); } @@ -490,8 +480,9 @@ target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) int i; target_ulong cfg_val = 0; target_ulong val = 0; + int pmpcfg_nums = 2 << riscv_cpu_mxl(env); - for (i = 0; i < sizeof(target_ulong); i++) { + for (i = 0; i < pmpcfg_nums; i++) { val = pmp_read_cfg(env, (reg_index * 4) + i); cfg_val |= (val << (i * 8)); } @@ -637,6 +628,18 @@ bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, } if (*tlb_size != 0) { + /* + * At this point we have a tlb_size that is the smallest possible size + * That fits within a TARGET_PAGE_SIZE and the PMP region. + * + * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. + * This means the result isn't cached in the TLB and is only used for + * a single translation. + */ + if (*tlb_size < TARGET_PAGE_SIZE) { + *tlb_size = 1; + } + return true; } diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index a9a0b363a7..a8dd797476 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -22,6 +22,8 @@ #ifndef RISCV_PMP_H #define RISCV_PMP_H +#include "cpu.h" + typedef enum { PMP_READ = 1 << 0, PMP_WRITE = 1 << 1, @@ -37,9 +39,11 @@ typedef enum { } pmp_am_t; typedef enum { - MSECCFG_MML = 1 << 0, - MSECCFG_MMWP = 1 << 1, - MSECCFG_RLB = 1 << 2 + MSECCFG_MML = 1 << 0, + MSECCFG_MMWP = 1 << 1, + MSECCFG_RLB = 1 << 2, + MSECCFG_USEED = 1 << 8, + MSECCFG_SSEED = 1 << 9 } mseccfg_field_t; typedef struct { diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c new file mode 100644 index 0000000000..b8e56d2b7b --- /dev/null +++ b/target/riscv/pmu.c @@ -0,0 +1,453 @@ +/* + * RISC-V PMU file. + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "pmu.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/device_tree.h" + +#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */ +#define MAKE_32BIT_MASK(shift, length) \ + (((uint32_t)(~0UL) >> (32 - (length))) << (shift)) + +/* + * To keep it simple, any event can be mapped to any programmable counters in + * QEMU. The generic cycle & instruction count events can also be monitored + * using programmable counters. In that case, mcycle & minstret must continue + * to provide the correct value as well. Heterogeneous PMU per hart is not + * supported yet. Thus, number of counters are same across all harts. + */ +void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name) +{ + uint32_t fdt_event_ctr_map[20] = {}; + uint32_t cmask; + + /* All the programmable counters can map to any event */ + cmask = MAKE_32BIT_MASK(3, num_ctrs); + + /* + * The event encoding is specified in the SBI specification + * Event idx is a 20bits wide number encoded as follows: + * event_idx[19:16] = type + * event_idx[15:0] = code + * The code field in cache events are encoded as follows: + * event_idx.code[15:3] = cache_id + * event_idx.code[2:1] = op_id + * event_idx.code[0:0] = result_id + */ + + /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */ + fdt_event_ctr_map[0] = cpu_to_be32(0x00000001); + fdt_event_ctr_map[1] = cpu_to_be32(0x00000001); + fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0); + + /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */ + fdt_event_ctr_map[3] = cpu_to_be32(0x00000002); + fdt_event_ctr_map[4] = cpu_to_be32(0x00000002); + fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2); + + /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */ + fdt_event_ctr_map[6] = cpu_to_be32(0x00010019); + fdt_event_ctr_map[7] = cpu_to_be32(0x00010019); + fdt_event_ctr_map[8] = cpu_to_be32(cmask); + + /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */ + fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B); + fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B); + fdt_event_ctr_map[11] = cpu_to_be32(cmask); + + /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */ + fdt_event_ctr_map[12] = cpu_to_be32(0x00010021); + fdt_event_ctr_map[13] = cpu_to_be32(0x00010021); + fdt_event_ctr_map[14] = cpu_to_be32(cmask); + + /* This a OpenSBI specific DT property documented in OpenSBI docs */ + qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters", + fdt_event_ctr_map, sizeof(fdt_event_ctr_map)); +} + +static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx) +{ + if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS || + !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) { + return false; + } else { + return true; + } +} + +static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx) +{ + CPURISCVState *env = &cpu->env; + + if (riscv_pmu_counter_valid(cpu, ctr_idx) && + !get_field(env->mcountinhibit, BIT(ctr_idx))) { + return true; + } else { + return false; + } +} + +static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx) +{ + CPURISCVState *env = &cpu->env; + target_ulong max_val = UINT32_MAX; + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + bool virt_on = riscv_cpu_virt_enabled(env); + + /* Privilege mode filtering */ + if ((env->priv == PRV_M && + (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) || + (env->priv == PRV_S && virt_on && + (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) || + (env->priv == PRV_U && virt_on && + (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) || + (env->priv == PRV_S && !virt_on && + (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) || + (env->priv == PRV_U && !virt_on && + (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) { + return 0; + } + + /* Handle the overflow scenario */ + if (counter->mhpmcounter_val == max_val) { + if (counter->mhpmcounterh_val == max_val) { + counter->mhpmcounter_val = 0; + counter->mhpmcounterh_val = 0; + /* Generate interrupt only if OF bit is clear */ + if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) { + env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF; + riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + } + } else { + counter->mhpmcounterh_val++; + } + } else { + counter->mhpmcounter_val++; + } + + return 0; +} + +static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx) +{ + CPURISCVState *env = &cpu->env; + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + uint64_t max_val = UINT64_MAX; + bool virt_on = riscv_cpu_virt_enabled(env); + + /* Privilege mode filtering */ + if ((env->priv == PRV_M && + (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) || + (env->priv == PRV_S && virt_on && + (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) || + (env->priv == PRV_U && virt_on && + (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) || + (env->priv == PRV_S && !virt_on && + (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) || + (env->priv == PRV_U && !virt_on && + (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) { + return 0; + } + + /* Handle the overflow scenario */ + if (counter->mhpmcounter_val == max_val) { + counter->mhpmcounter_val = 0; + /* Generate interrupt only if OF bit is clear */ + if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) { + env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF; + riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + } + } else { + counter->mhpmcounter_val++; + } + return 0; +} + +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx) +{ + uint32_t ctr_idx; + int ret; + CPURISCVState *env = &cpu->env; + gpointer value; + + if (!cpu->cfg.pmu_num) { + return 0; + } + value = g_hash_table_lookup(cpu->pmu_event_ctr_map, + GUINT_TO_POINTER(event_idx)); + if (!value) { + return -1; + } + + ctr_idx = GPOINTER_TO_UINT(value); + if (!riscv_pmu_counter_enabled(cpu, ctr_idx) || + get_field(env->mcountinhibit, BIT(ctr_idx))) { + return -1; + } + + if (riscv_cpu_mxl(env) == MXL_RV32) { + ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx); + } else { + ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx); + } + + return ret; +} + +bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env, + uint32_t target_ctr) +{ + RISCVCPU *cpu; + uint32_t event_idx; + uint32_t ctr_idx; + + /* Fixed instret counter */ + if (target_ctr == 2) { + return true; + } + + cpu = RISCV_CPU(env_cpu(env)); + if (!cpu->pmu_event_ctr_map) { + return false; + } + + event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS; + ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map, + GUINT_TO_POINTER(event_idx))); + if (!ctr_idx) { + return false; + } + + return target_ctr == ctr_idx ? true : false; +} + +bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr) +{ + RISCVCPU *cpu; + uint32_t event_idx; + uint32_t ctr_idx; + + /* Fixed mcycle counter */ + if (target_ctr == 0) { + return true; + } + + cpu = RISCV_CPU(env_cpu(env)); + if (!cpu->pmu_event_ctr_map) { + return false; + } + + event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES; + ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map, + GUINT_TO_POINTER(event_idx))); + + /* Counter zero is not used for event_ctr_map */ + if (!ctr_idx) { + return false; + } + + return (target_ctr == ctr_idx) ? true : false; +} + +static gboolean pmu_remove_event_map(gpointer key, gpointer value, + gpointer udata) +{ + return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false; +} + +static int64_t pmu_icount_ticks_to_ns(int64_t value) +{ + int64_t ret = 0; + + if (icount_enabled()) { + ret = icount_to_ns(value); + } else { + ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value; + } + + return ret; +} + +int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value, + uint32_t ctr_idx) +{ + uint32_t event_idx; + RISCVCPU *cpu = RISCV_CPU(env_cpu(env)); + + if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) { + return -1; + } + + /* + * Expected mhpmevent value is zero for reset case. Remove the current + * mapping. + */ + if (!value) { + g_hash_table_foreach_remove(cpu->pmu_event_ctr_map, + pmu_remove_event_map, + GUINT_TO_POINTER(ctr_idx)); + return 0; + } + + event_idx = value & MHPMEVENT_IDX_MASK; + if (g_hash_table_lookup(cpu->pmu_event_ctr_map, + GUINT_TO_POINTER(event_idx))) { + return 0; + } + + switch (event_idx) { + case RISCV_PMU_EVENT_HW_CPU_CYCLES: + case RISCV_PMU_EVENT_HW_INSTRUCTIONS: + case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS: + case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS: + case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS: + break; + default: + /* We don't support any raw events right now */ + return -1; + } + g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx), + GUINT_TO_POINTER(ctr_idx)); + + return 0; +} + +static void pmu_timer_trigger_irq(RISCVCPU *cpu, + enum riscv_pmu_event_idx evt_idx) +{ + uint32_t ctr_idx; + CPURISCVState *env = &cpu->env; + PMUCTRState *counter; + target_ulong *mhpmevent_val; + uint64_t of_bit_mask; + int64_t irq_trigger_at; + + if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES && + evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) { + return; + } + + ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map, + GUINT_TO_POINTER(evt_idx))); + if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) { + return; + } + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmevent_val = &env->mhpmeventh_val[ctr_idx]; + of_bit_mask = MHPMEVENTH_BIT_OF; + } else { + mhpmevent_val = &env->mhpmevent_val[ctr_idx]; + of_bit_mask = MHPMEVENT_BIT_OF; + } + + counter = &env->pmu_ctrs[ctr_idx]; + if (counter->irq_overflow_left > 0) { + irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + counter->irq_overflow_left; + timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at); + counter->irq_overflow_left = 0; + return; + } + + if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) { + /* Generate interrupt only if OF bit is clear */ + if (!(*mhpmevent_val & of_bit_mask)) { + *mhpmevent_val |= of_bit_mask; + riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + } + } +} + +/* Timer callback for instret and cycle counter overflow */ +void riscv_pmu_timer_cb(void *priv) +{ + RISCVCPU *cpu = priv; + + /* Timer event was triggered only for these events */ + pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES); + pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS); +} + +int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) +{ + uint64_t overflow_delta, overflow_at; + int64_t overflow_ns, overflow_left = 0; + RISCVCPU *cpu = RISCV_CPU(env_cpu(env)); + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + + if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) { + return -1; + } + + if (value) { + overflow_delta = UINT64_MAX - value + 1; + } else { + overflow_delta = UINT64_MAX; + } + + /* + * QEMU supports only int64_t timers while RISC-V counters are uint64_t. + * Compute the leftover and save it so that it can be reprogrammed again + * when timer expires. + */ + if (overflow_delta > INT64_MAX) { + overflow_left = overflow_delta - INT64_MAX; + } + + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta); + overflow_left = pmu_icount_ticks_to_ns(overflow_left) ; + } else { + return -1; + } + overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns; + + if (overflow_at > INT64_MAX) { + overflow_left += overflow_at - INT64_MAX; + counter->irq_overflow_left = overflow_left; + overflow_at = INT64_MAX; + } + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + + return 0; +} + + +int riscv_pmu_init(RISCVCPU *cpu, int num_counters) +{ + if (num_counters > (RV_MAX_MHPMCOUNTERS - 3)) { + return -1; + } + + cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal); + if (!cpu->pmu_event_ctr_map) { + /* PMU support can not be enabled */ + qemu_log_mask(LOG_UNIMP, "PMU events can't be supported\n"); + cpu->cfg.pmu_num = 0; + return -1; + } + + /* Create a bitmask of available programmable counters */ + cpu->pmu_avail_ctrs = MAKE_32BIT_MASK(3, num_counters); + + return 0; +} diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h new file mode 100644 index 0000000000..3004ce37b6 --- /dev/null +++ b/target/riscv/pmu.h @@ -0,0 +1,36 @@ +/* + * RISC-V PMU header file. + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "qemu/main-loop.h" +#include "exec/exec-all.h" + +bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env, + uint32_t target_ctr); +bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, + uint32_t target_ctr); +void riscv_pmu_timer_cb(void *priv); +int riscv_pmu_init(RISCVCPU *cpu, int num_counters); +int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value, + uint32_t ctr_idx); +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx); +void riscv_pmu_generate_fdt_node(void *fdt, int num_counters, char *pmu_name); +int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, + uint32_t ctr_idx); diff --git a/target/riscv/sbi_ecall_interface.h b/target/riscv/sbi_ecall_interface.h new file mode 100644 index 0000000000..77574ed4cb --- /dev/null +++ b/target/riscv/sbi_ecall_interface.h @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#ifndef SBI_ECALL_INTERFACE_H +#define SBI_ECALL_INTERFACE_H + +/* clang-format off */ + +/* SBI Extension IDs */ +#define SBI_EXT_0_1_SET_TIMER 0x0 +#define SBI_EXT_0_1_CONSOLE_PUTCHAR 0x1 +#define SBI_EXT_0_1_CONSOLE_GETCHAR 0x2 +#define SBI_EXT_0_1_CLEAR_IPI 0x3 +#define SBI_EXT_0_1_SEND_IPI 0x4 +#define SBI_EXT_0_1_REMOTE_FENCE_I 0x5 +#define SBI_EXT_0_1_REMOTE_SFENCE_VMA 0x6 +#define SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID 0x7 +#define SBI_EXT_0_1_SHUTDOWN 0x8 +#define SBI_EXT_BASE 0x10 +#define SBI_EXT_TIME 0x54494D45 +#define SBI_EXT_IPI 0x735049 +#define SBI_EXT_RFENCE 0x52464E43 +#define SBI_EXT_HSM 0x48534D + +/* SBI function IDs for BASE extension*/ +#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0 +#define SBI_EXT_BASE_GET_IMP_ID 0x1 +#define SBI_EXT_BASE_GET_IMP_VERSION 0x2 +#define SBI_EXT_BASE_PROBE_EXT 0x3 +#define SBI_EXT_BASE_GET_MVENDORID 0x4 +#define SBI_EXT_BASE_GET_MARCHID 0x5 +#define SBI_EXT_BASE_GET_MIMPID 0x6 + +/* SBI function IDs for TIME extension*/ +#define SBI_EXT_TIME_SET_TIMER 0x0 + +/* SBI function IDs for IPI extension*/ +#define SBI_EXT_IPI_SEND_IPI 0x0 + +/* SBI function IDs for RFENCE extension*/ +#define SBI_EXT_RFENCE_REMOTE_FENCE_I 0x0 +#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA 0x1 +#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID 0x2 +#define SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA 0x3 +#define SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID 0x4 +#define SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA 0x5 +#define SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID 0x6 + +/* SBI function IDs for HSM extension */ +#define SBI_EXT_HSM_HART_START 0x0 +#define SBI_EXT_HSM_HART_STOP 0x1 +#define SBI_EXT_HSM_HART_GET_STATUS 0x2 + +#define SBI_HSM_HART_STATUS_STARTED 0x0 +#define SBI_HSM_HART_STATUS_STOPPED 0x1 +#define SBI_HSM_HART_STATUS_START_PENDING 0x2 +#define SBI_HSM_HART_STATUS_STOP_PENDING 0x3 + +#define SBI_SPEC_VERSION_MAJOR_OFFSET 24 +#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f +#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff +#define SBI_EXT_VENDOR_START 0x09000000 +#define SBI_EXT_VENDOR_END 0x09FFFFFF +/* clang-format on */ + +#endif diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c new file mode 100644 index 0000000000..8cce667dfd --- /dev/null +++ b/target/riscv/time_helper.c @@ -0,0 +1,114 @@ +/* + * RISC-V timer helper implementation. + * + * Copyright (c) 2022 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu_bits.h" +#include "time_helper.h" +#include "hw/intc/riscv_aclint.h" + +static void riscv_vstimer_cb(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + env->vstime_irq = 1; + riscv_cpu_update_mip(cpu, MIP_VSTIP, BOOL_TO_MASK(1)); +} + +static void riscv_stimer_cb(void *opaque) +{ + RISCVCPU *cpu = opaque; + riscv_cpu_update_mip(cpu, MIP_STIP, BOOL_TO_MASK(1)); +} + +/* + * Called when timecmp is written to update the QEMU timer or immediately + * trigger timer interrupt if mtimecmp <= current timer value. + */ +void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, + uint64_t timecmp, uint64_t delta, + uint32_t timer_irq) +{ + uint64_t diff, ns_diff, next; + CPURISCVState *env = &cpu->env; + RISCVAclintMTimerState *mtimer = env->rdtime_fn_arg; + uint32_t timebase_freq = mtimer->timebase_freq; + uint64_t rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; + + if (timecmp <= rtc_r) { + /* + * If we're setting an stimecmp value in the "past", + * immediately raise the timer interrupt + */ + if (timer_irq == MIP_VSTIP) { + env->vstime_irq = 1; + } + riscv_cpu_update_mip(cpu, timer_irq, BOOL_TO_MASK(1)); + return; + } + + if (timer_irq == MIP_VSTIP) { + env->vstime_irq = 0; + } + /* Clear the [V]STIP bit in mip */ + riscv_cpu_update_mip(cpu, timer_irq, BOOL_TO_MASK(0)); + + /* otherwise, set up the future timer interrupt */ + diff = timecmp - rtc_r; + /* back to ns (note args switched in muldiv64) */ + ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq); + + /* + * check if ns_diff overflowed and check if the addition would potentially + * overflow + */ + if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) || + ns_diff > INT64_MAX) { + next = INT64_MAX; + } else { + /* + * as it is very unlikely qemu_clock_get_ns will return a value + * greater than INT64_MAX, no additional check is needed for an + * unsigned integer overflow. + */ + next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff; + /* + * if ns_diff is INT64_MAX next may still be outside the range + * of a signed integer. + */ + next = MIN(next, INT64_MAX); + } + + timer_mod(timer, next); +} + +void riscv_timer_init(RISCVCPU *cpu) +{ + CPURISCVState *env; + + if (!cpu) { + return; + } + + env = &cpu->env; + env->stimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &riscv_stimer_cb, cpu); + env->stimecmp = 0; + + env->vstimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &riscv_vstimer_cb, cpu); + env->vstimecmp = 0; +} diff --git a/target/riscv/time_helper.h b/target/riscv/time_helper.h new file mode 100644 index 0000000000..7b3cdcc350 --- /dev/null +++ b/target/riscv/time_helper.h @@ -0,0 +1,30 @@ +/* + * RISC-V timer header file. + * + * Copyright (c) 2022 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef RISCV_TIME_HELPER_H +#define RISCV_TIME_HELPER_H + +#include "cpu.h" +#include "qemu/timer.h" + +void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, + uint64_t timecmp, uint64_t delta, + uint32_t timer_irq); +void riscv_timer_init(RISCVCPU *cpu); + +#endif diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 6983be5723..1ed4bb5ec3 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -28,26 +28,46 @@ #include "exec/translator.h" #include "exec/log.h" +#include "semihosting/semihost.h" #include "instmap.h" +#include "internals.h" /* global register indices */ -static TCGv cpu_gpr[32], cpu_pc, cpu_vl; +static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart; static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ static TCGv load_res; static TCGv load_val; +/* globals for PM CSRs */ +static TCGv pm_mask; +static TCGv pm_base; #include "exec/gen-icount.h" +/* + * If an operation is being performed on less than TARGET_LONG_BITS, + * it may require the inputs to be sign- or zero-extended; which will + * depend on the exact operation being performed. + */ +typedef enum { + EXT_NONE, + EXT_SIGN, + EXT_ZERO, +} DisasExtend; + typedef struct DisasContext { DisasContextBase base; /* pc_succ_insn points to the instruction following base.pc_next */ target_ulong pc_succ_insn; target_ulong priv_ver; - bool virt_enabled; + RISCVMXL misa_mxl_max; + RISCVMXL xl; + uint32_t misa_ext; uint32_t opcode; uint32_t mstatus_fs; - target_ulong misa; + uint32_t mstatus_vs; + uint32_t mstatus_hs_fs; + uint32_t mstatus_hs_vs; uint32_t mem_idx; /* Remember the rounding mode encoded in the previous fp instruction, which we have already installed into env->fp_status. Or -1 for @@ -55,32 +75,95 @@ typedef struct DisasContext { to any system register, which includes CSR_FRM, so we do not have to reset this known value. */ int frm; - bool ext_ifencei; + RISCVMXL ol; + bool virt_enabled; + const RISCVCPUConfig *cfg_ptr; bool hlsx; /* vector extension */ bool vill; - uint8_t lmul; + /* + * Encode LMUL to lmul as follows: + * LMUL vlmul lmul + * 1 000 0 + * 2 001 1 + * 4 010 2 + * 8 011 3 + * - 100 - + * 1/8 101 -3 + * 1/4 110 -2 + * 1/2 111 -1 + */ + int8_t lmul; uint8_t sew; - uint16_t vlen; - uint16_t mlen; + uint8_t vta; + uint8_t vma; + bool cfg_vta_all_1s; + target_ulong vstart; bool vl_eq_vlmax; + uint8_t ntemp; CPUState *cs; + TCGv zero; + /* Space for 3 operands plus 1 extra for address computation. */ + TCGv temp[4]; + /* Space for 4 operands(1 dest and <=3 src) for float point computation */ + TCGv_i64 ftemp[4]; + uint8_t nftemp; + /* PointerMasking extension */ + bool pm_mask_enabled; + bool pm_base_enabled; + /* TCG of the current insn_start */ + TCGOp *insn_start; } DisasContext; static inline bool has_ext(DisasContext *ctx, uint32_t ext) { - return ctx->misa & ext; + return ctx->misa_ext & ext; } -#ifdef TARGET_RISCV32 -# define is_32bit(ctx) true -#elif defined(CONFIG_USER_ONLY) -# define is_32bit(ctx) false -#else -static inline bool is_32bit(DisasContext *ctx) +static bool always_true_p(DisasContext *ctx __attribute__((__unused__))) { - return (ctx->misa & RV32) == RV32; + return true; } + +#define MATERIALISE_EXT_PREDICATE(ext) \ + static bool has_ ## ext ## _p(DisasContext *ctx) \ + { \ + return ctx->cfg_ptr->ext_ ## ext ; \ + } + +MATERIALISE_EXT_PREDICATE(XVentanaCondOps); + +#ifdef TARGET_RISCV32 +#define get_xl(ctx) MXL_RV32 +#elif defined(CONFIG_USER_ONLY) +#define get_xl(ctx) MXL_RV64 +#else +#define get_xl(ctx) ((ctx)->xl) +#endif + +/* The word size for this machine mode. */ +static inline int __attribute__((unused)) get_xlen(DisasContext *ctx) +{ + return 16 << get_xl(ctx); +} + +/* The operation length, as opposed to the xlen. */ +#ifdef TARGET_RISCV32 +#define get_ol(ctx) MXL_RV32 +#else +#define get_ol(ctx) ((ctx)->ol) +#endif + +static inline int get_olen(DisasContext *ctx) +{ + return 16 << get_ol(ctx); +} + +/* The maximum register length */ +#ifdef TARGET_RISCV32 +#define get_xl_max(ctx) MXL_RV32 +#else +#define get_xl_max(ctx) ((ctx)->misa_mxl_max) #endif /* @@ -94,6 +177,11 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in) tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32)); } +static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in) +{ + tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48)); +} + /* * A narrow n-bit operation, where n < FLEN, checks that input operands * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1. @@ -102,229 +190,334 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in) * * Here, the result is always nan-boxed, even the canonical nan. */ -static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) +static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in) { - TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull); - TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull); + TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull); + TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull); tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); tcg_temp_free_i64(t_max); tcg_temp_free_i64(t_nan); } +static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) +{ + TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull); + TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull); + + tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); +} + +static void decode_save_opc(DisasContext *ctx) +{ + assert(ctx->insn_start != NULL); + tcg_set_insn_start_param(ctx->insn_start, 1, ctx->opcode); + ctx->insn_start = NULL; +} + +static void gen_set_pc_imm(DisasContext *ctx, target_ulong dest) +{ + if (get_xl(ctx) == MXL_RV32) { + dest = (int32_t)dest; + } + tcg_gen_movi_tl(cpu_pc, dest); +} + +static void gen_set_pc(DisasContext *ctx, TCGv dest) +{ + if (get_xl(ctx) == MXL_RV32) { + tcg_gen_ext32s_tl(cpu_pc, dest); + } else { + tcg_gen_mov_tl(cpu_pc, dest); + } +} + static void generate_exception(DisasContext *ctx, int excp) { - tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); - TCGv_i32 helper_tmp = tcg_const_i32(excp); - gen_helper_raise_exception(cpu_env, helper_tmp); - tcg_temp_free_i32(helper_tmp); + gen_set_pc_imm(ctx, ctx->base.pc_next); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp)); ctx->base.is_jmp = DISAS_NORETURN; } -static void generate_exception_mtval(DisasContext *ctx, int excp) -{ - tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); - tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr)); - TCGv_i32 helper_tmp = tcg_const_i32(excp); - gen_helper_raise_exception(cpu_env, helper_tmp); - tcg_temp_free_i32(helper_tmp); - ctx->base.is_jmp = DISAS_NORETURN; -} - -static void gen_exception_debug(void) -{ - TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG); - gen_helper_raise_exception(cpu_env, helper_tmp); - tcg_temp_free_i32(helper_tmp); -} - -/* Wrapper around tcg_gen_exit_tb that handles single stepping */ -static void exit_tb(DisasContext *ctx) -{ - if (ctx->base.singlestep_enabled) { - gen_exception_debug(); - } else { - tcg_gen_exit_tb(NULL, 0); - } -} - -/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */ -static void lookup_and_goto_ptr(DisasContext *ctx) -{ - if (ctx->base.singlestep_enabled) { - gen_exception_debug(); - } else { - tcg_gen_lookup_and_goto_ptr(); - } -} - static void gen_exception_illegal(DisasContext *ctx) { + tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env, + offsetof(CPURISCVState, bins)); generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); } static void gen_exception_inst_addr_mis(DisasContext *ctx) { - generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS); + tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr)); + generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS); } static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { if (translator_use_goto_tb(&ctx->base, dest)) { tcg_gen_goto_tb(n); - tcg_gen_movi_tl(cpu_pc, dest); + gen_set_pc_imm(ctx, dest); tcg_gen_exit_tb(ctx->base.tb, n); } else { - tcg_gen_movi_tl(cpu_pc, dest); - lookup_and_goto_ptr(ctx); + gen_set_pc_imm(ctx, dest); + tcg_gen_lookup_and_goto_ptr(); } } -/* Wrapper for getting reg values - need to check of reg is zero since - * cpu_gpr[0] is not actually allocated +/* + * Wrappers for getting reg values. + * + * The $zero register does not have cpu_gpr[0] allocated -- we supply the + * constant zero as a source, and an uninitialized sink as destination. + * + * Further, we may provide an extension for word operations. */ -static inline void gen_get_gpr(TCGv t, int reg_num) +static TCGv temp_new(DisasContext *ctx) +{ + assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); + return ctx->temp[ctx->ntemp++] = tcg_temp_new(); +} + +static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) +{ + TCGv t; + + if (reg_num == 0) { + return ctx->zero; + } + + switch (get_ol(ctx)) { + case MXL_RV32: + switch (ext) { + case EXT_NONE: + break; + case EXT_SIGN: + t = temp_new(ctx); + tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); + return t; + case EXT_ZERO: + t = temp_new(ctx); + tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); + return t; + default: + g_assert_not_reached(); + } + break; + case MXL_RV64: + case MXL_RV128: + break; + default: + g_assert_not_reached(); + } + return cpu_gpr[reg_num]; +} + +static TCGv get_gprh(DisasContext *ctx, int reg_num) +{ + assert(get_xl(ctx) == MXL_RV128); + if (reg_num == 0) { + return ctx->zero; + } + return cpu_gprh[reg_num]; +} + +static TCGv dest_gpr(DisasContext *ctx, int reg_num) +{ + if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) { + return temp_new(ctx); + } + return cpu_gpr[reg_num]; +} + +static TCGv dest_gprh(DisasContext *ctx, int reg_num) { if (reg_num == 0) { - tcg_gen_movi_tl(t, 0); - } else { - tcg_gen_mov_tl(t, cpu_gpr[reg_num]); + return temp_new(ctx); + } + return cpu_gprh[reg_num]; +} + +static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t) +{ + if (reg_num != 0) { + switch (get_ol(ctx)) { + case MXL_RV32: + tcg_gen_ext32s_tl(cpu_gpr[reg_num], t); + break; + case MXL_RV64: + case MXL_RV128: + tcg_gen_mov_tl(cpu_gpr[reg_num], t); + break; + default: + g_assert_not_reached(); + } + + if (get_xl_max(ctx) == MXL_RV128) { + tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63); + } } } -/* Wrapper for setting reg values - need to check of reg is zero since - * cpu_gpr[0] is not actually allocated. this is more for safety purposes, - * since we usually avoid calling the OP_TYPE_gen function if we see a write to - * $zero - */ -static inline void gen_set_gpr(int reg_num_dst, TCGv t) +static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm) { - if (reg_num_dst != 0) { - tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t); + if (reg_num != 0) { + switch (get_ol(ctx)) { + case MXL_RV32: + tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm); + break; + case MXL_RV64: + case MXL_RV128: + tcg_gen_movi_tl(cpu_gpr[reg_num], imm); + break; + default: + g_assert_not_reached(); + } + + if (get_xl_max(ctx) == MXL_RV128) { + tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0)); + } } } -static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) +static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh) { - TCGv rl = tcg_temp_new(); - TCGv rh = tcg_temp_new(); - - tcg_gen_mulu2_tl(rl, rh, arg1, arg2); - /* fix up for one negative */ - tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1); - tcg_gen_and_tl(rl, rl, arg2); - tcg_gen_sub_tl(ret, rh, rl); - - tcg_temp_free(rl); - tcg_temp_free(rh); + assert(get_ol(ctx) == MXL_RV128); + if (reg_num != 0) { + tcg_gen_mov_tl(cpu_gpr[reg_num], rl); + tcg_gen_mov_tl(cpu_gprh[reg_num], rh); + } } -static void gen_div(TCGv ret, TCGv source1, TCGv source2) +static TCGv_i64 ftemp_new(DisasContext *ctx) { - TCGv cond1, cond2, zeroreg, resultopt1; - /* - * Handle by altering args to tcg_gen_div to produce req'd results: - * For overflow: want source1 in source1 and 1 in source2 - * For div by zero: want -1 in source1 and 1 in source2 -> -1 result - */ - cond1 = tcg_temp_new(); - cond2 = tcg_temp_new(); - zeroreg = tcg_const_tl(0); - resultopt1 = tcg_temp_new(); - - tcg_gen_movi_tl(resultopt1, (target_ulong)-1); - tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L)); - tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1, - ((target_ulong)1) << (TARGET_LONG_BITS - 1)); - tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */ - /* if div by zero, set source1 to -1, otherwise don't change */ - tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1, - resultopt1); - /* if overflow or div by zero, set source2 to 1, else don't change */ - tcg_gen_or_tl(cond1, cond1, cond2); - tcg_gen_movi_tl(resultopt1, (target_ulong)1); - tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, - resultopt1); - tcg_gen_div_tl(ret, source1, source2); - - tcg_temp_free(cond1); - tcg_temp_free(cond2); - tcg_temp_free(zeroreg); - tcg_temp_free(resultopt1); + assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp)); + return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64(); } -static void gen_divu(TCGv ret, TCGv source1, TCGv source2) +static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) { - TCGv cond1, zeroreg, resultopt1; - cond1 = tcg_temp_new(); + if (!ctx->cfg_ptr->ext_zfinx) { + return cpu_fpr[reg_num]; + } - zeroreg = tcg_const_tl(0); - resultopt1 = tcg_temp_new(); - - tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); - tcg_gen_movi_tl(resultopt1, (target_ulong)-1); - tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1, - resultopt1); - tcg_gen_movi_tl(resultopt1, (target_ulong)1); - tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, - resultopt1); - tcg_gen_divu_tl(ret, source1, source2); - - tcg_temp_free(cond1); - tcg_temp_free(zeroreg); - tcg_temp_free(resultopt1); + if (reg_num == 0) { + return tcg_constant_i64(0); + } + switch (get_xl(ctx)) { + case MXL_RV32: +#ifdef TARGET_RISCV32 + { + TCGv_i64 t = ftemp_new(ctx); + tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]); + return t; + } +#else + /* fall through */ + case MXL_RV64: + return cpu_gpr[reg_num]; +#endif + default: + g_assert_not_reached(); + } } -static void gen_rem(TCGv ret, TCGv source1, TCGv source2) +static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num) { - TCGv cond1, cond2, zeroreg, resultopt1; + if (!ctx->cfg_ptr->ext_zfinx) { + return cpu_fpr[reg_num]; + } - cond1 = tcg_temp_new(); - cond2 = tcg_temp_new(); - zeroreg = tcg_const_tl(0); - resultopt1 = tcg_temp_new(); - - tcg_gen_movi_tl(resultopt1, 1L); - tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1); - tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1, - (target_ulong)1 << (TARGET_LONG_BITS - 1)); - tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */ - /* if overflow or div by zero, set source2 to 1, else don't change */ - tcg_gen_or_tl(cond2, cond1, cond2); - tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2, - resultopt1); - tcg_gen_rem_tl(resultopt1, source1, source2); - /* if div by zero, just return the original dividend */ - tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, - source1); - - tcg_temp_free(cond1); - tcg_temp_free(cond2); - tcg_temp_free(zeroreg); - tcg_temp_free(resultopt1); + if (reg_num == 0) { + return tcg_constant_i64(0); + } + switch (get_xl(ctx)) { + case MXL_RV32: + { + TCGv_i64 t = ftemp_new(ctx); + tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); + return t; + } +#ifdef TARGET_RISCV64 + case MXL_RV64: + return cpu_gpr[reg_num]; +#endif + default: + g_assert_not_reached(); + } } -static void gen_remu(TCGv ret, TCGv source1, TCGv source2) +static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num) { - TCGv cond1, zeroreg, resultopt1; - cond1 = tcg_temp_new(); - zeroreg = tcg_const_tl(0); - resultopt1 = tcg_temp_new(); + if (!ctx->cfg_ptr->ext_zfinx) { + return cpu_fpr[reg_num]; + } - tcg_gen_movi_tl(resultopt1, (target_ulong)1); - tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); - tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, - resultopt1); - tcg_gen_remu_tl(resultopt1, source1, source2); - /* if div by zero, just return the original dividend */ - tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, - source1); + if (reg_num == 0) { + return ftemp_new(ctx); + } - tcg_temp_free(cond1); - tcg_temp_free(zeroreg); - tcg_temp_free(resultopt1); + switch (get_xl(ctx)) { + case MXL_RV32: + return ftemp_new(ctx); +#ifdef TARGET_RISCV64 + case MXL_RV64: + return cpu_gpr[reg_num]; +#endif + default: + g_assert_not_reached(); + } +} + +/* assume t is nanboxing (for normal) or sign-extended (for zfinx) */ +static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t) +{ + if (!ctx->cfg_ptr->ext_zfinx) { + tcg_gen_mov_i64(cpu_fpr[reg_num], t); + return; + } + if (reg_num != 0) { + switch (get_xl(ctx)) { + case MXL_RV32: +#ifdef TARGET_RISCV32 + tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t); + break; +#else + /* fall through */ + case MXL_RV64: + tcg_gen_mov_i64(cpu_gpr[reg_num], t); + break; +#endif + default: + g_assert_not_reached(); + } + } +} + +static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t) +{ + if (!ctx->cfg_ptr->ext_zfinx) { + tcg_gen_mov_i64(cpu_fpr[reg_num], t); + return; + } + + if (reg_num != 0) { + switch (get_xl(ctx)) { + case MXL_RV32: +#ifdef TARGET_RISCV32 + tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t); + break; +#else + tcg_gen_ext32s_i64(cpu_gpr[reg_num], t); + tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32); + break; + case MXL_RV64: + tcg_gen_mov_i64(cpu_gpr[reg_num], t); + break; +#endif + default: + g_assert_not_reached(); + } + } } static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) @@ -339,14 +532,30 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) return; } } - if (rd != 0) { - tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn); - } + gen_set_gpri(ctx, rd, ctx->pc_succ_insn); gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */ ctx->base.is_jmp = DISAS_NORETURN; } +/* Compute a canonical address from a register plus offset. */ +static TCGv get_address(DisasContext *ctx, int rs1, int imm) +{ + TCGv addr = temp_new(ctx); + TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); + + tcg_gen_addi_tl(addr, src1, imm); + if (ctx->pm_mask_enabled) { + tcg_gen_andc_tl(addr, addr, pm_mask); + } else if (get_xl(ctx) == MXL_RV32) { + tcg_gen_ext32u_tl(addr, addr); + } + if (ctx->pm_base_enabled) { + tcg_gen_or_tl(addr, addr, pm_base); + } + return addr; +} + #ifndef CONFIG_USER_ONLY /* The states of mstatus_fs are: * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty @@ -356,43 +565,88 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) static void mark_fs_dirty(DisasContext *ctx) { TCGv tmp; - target_ulong sd; - if (ctx->mstatus_fs == MSTATUS_FS) { + if (!has_ext(ctx, RVF)) { return; } - /* Remember the state change for the rest of the TB. */ - ctx->mstatus_fs = MSTATUS_FS; - tmp = tcg_temp_new(); - sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD; + if (ctx->mstatus_fs != MSTATUS_FS) { + /* Remember the state change for the rest of the TB. */ + ctx->mstatus_fs = MSTATUS_FS; - tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); - tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - - if (ctx->virt_enabled) { - tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); - tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_temp_free(tmp); + } + + if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) { + /* Remember the stage change for the rest of the TB. */ + ctx->mstatus_hs_fs = MSTATUS_FS; + + tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_temp_free(tmp); } - tcg_temp_free(tmp); } #else static inline void mark_fs_dirty(DisasContext *ctx) { } #endif +#ifndef CONFIG_USER_ONLY +/* The states of mstatus_vs are: + * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty + * We will have already diagnosed disabled state, + * and need to turn initial/clean into dirty. + */ +static void mark_vs_dirty(DisasContext *ctx) +{ + TCGv tmp; + + if (ctx->mstatus_vs != MSTATUS_VS) { + /* Remember the state change for the rest of the TB. */ + ctx->mstatus_vs = MSTATUS_VS; + + tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_temp_free(tmp); + } + + if (ctx->virt_enabled && ctx->mstatus_hs_vs != MSTATUS_VS) { + /* Remember the stage change for the rest of the TB. */ + ctx->mstatus_hs_vs = MSTATUS_VS; + + tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_temp_free(tmp); + } +} +#else +static inline void mark_vs_dirty(DisasContext *ctx) { } +#endif + static void gen_set_rm(DisasContext *ctx, int rm) { - TCGv_i32 t0; - if (ctx->frm == rm) { return; } ctx->frm = rm; - t0 = tcg_const_i32(rm); - gen_helper_set_rounding_mode(cpu_env, t0); - tcg_temp_free_i32(t0); + + if (rm == RISCV_FRM_ROD) { + gen_helper_set_rod_rounding_mode(cpu_env); + return; + } + + /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ + decode_save_opc(ctx); + gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm)); } static int ex_plus_1(DisasContext *ctx, int nf) @@ -417,10 +671,35 @@ EX_SH(12) } \ } while (0) -#define REQUIRE_64BIT(ctx) do { \ - if (is_32bit(ctx)) { \ - return false; \ - } \ +#define REQUIRE_32BIT(ctx) do { \ + if (get_xl(ctx) != MXL_RV32) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_64BIT(ctx) do { \ + if (get_xl(ctx) != MXL_RV64) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_128BIT(ctx) do { \ + if (get_xl(ctx) != MXL_RV128) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_64_OR_128BIT(ctx) do { \ + if (get_xl(ctx) == MXL_RV32) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_EITHER_EXT(ctx, A, B) do { \ + if (!ctx->cfg_ptr->ext_##A && \ + !ctx->cfg_ptr->ext_##B) { \ + return false; \ + } \ } while (0) static int ex_rvc_register(DisasContext *ctx, int reg) @@ -428,367 +707,310 @@ static int ex_rvc_register(DisasContext *ctx, int reg) return 8 + reg; } -static int ex_rvc_shifti(DisasContext *ctx, int imm) +static int ex_rvc_shiftli(DisasContext *ctx, int imm) { /* For RV128 a shamt of 0 means a shift by 64. */ - return imm ? imm : 64; + if (get_ol(ctx) == MXL_RV128) { + imm = imm ? imm : 64; + } + return imm; +} + +static int ex_rvc_shiftri(DisasContext *ctx, int imm) +{ + /* + * For RV128 a shamt of 0 means a shift by 64, furthermore, for right + * shifts, the shamt is sign-extended. + */ + if (get_ol(ctx) == MXL_RV128) { + imm = imm | (imm & 32) << 1; + imm = imm ? imm : 64; + } + return imm; } /* Include the auto-generated decoder for 32 bit insn */ #include "decode-insn32.c.inc" -static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, +static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a, void (*func)(TCGv, TCGv, target_long)) { - TCGv source1; - source1 = tcg_temp_new(); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); - gen_get_gpr(source1, a->rs1); + func(dest, src1, a->imm); - (*func)(source1, source1, a->imm); + if (get_xl(ctx) == MXL_RV128) { + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv desth = dest_gprh(ctx, a->rd); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - return true; -} - -static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, - void (*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1, source2; - source1 = tcg_temp_new(); - source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - tcg_gen_movi_tl(source2, a->imm); - - (*func)(source1, source1, source2); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_add_tl(ret, arg1, arg2); - tcg_gen_ext32s_tl(ret, ret); -} - -static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_sub_tl(ret, arg1, arg2); - tcg_gen_ext32s_tl(ret, ret); -} - -static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_mul_tl(ret, arg1, arg2); - tcg_gen_ext32s_tl(ret, ret); -} - -static bool gen_arith_div_w(DisasContext *ctx, arg_r *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1, source2; - source1 = tcg_temp_new(); - source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - tcg_gen_ext32s_tl(source1, source1); - tcg_gen_ext32s_tl(source2, source2); - - (*func)(source1, source1, source2); - - tcg_gen_ext32s_tl(source1, source1); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1, source2; - source1 = tcg_temp_new(); - source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - tcg_gen_ext32u_tl(source1, source1); - tcg_gen_ext32u_tl(source2, source2); - - (*func)(source1, source1, source2); - - tcg_gen_ext32s_tl(source1, source1); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_deposit_tl(ret, arg1, arg2, - TARGET_LONG_BITS / 2, - TARGET_LONG_BITS / 2); -} - -static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2); - tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2); - tcg_temp_free(t); -} - -static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_ext8u_tl(t, arg2); - tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8); - tcg_temp_free(t); -} - -static void gen_sbop_mask(TCGv ret, TCGv shamt) -{ - tcg_gen_movi_tl(ret, 1); - tcg_gen_shl_tl(ret, ret, shamt); -} - -static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt) -{ - TCGv t = tcg_temp_new(); - - gen_sbop_mask(t, shamt); - tcg_gen_or_tl(ret, arg1, t); - - tcg_temp_free(t); -} - -static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt) -{ - TCGv t = tcg_temp_new(); - - gen_sbop_mask(t, shamt); - tcg_gen_andc_tl(ret, arg1, t); - - tcg_temp_free(t); -} - -static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt) -{ - TCGv t = tcg_temp_new(); - - gen_sbop_mask(t, shamt); - tcg_gen_xor_tl(ret, arg1, t); - - tcg_temp_free(t); -} - -static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt) -{ - tcg_gen_shr_tl(ret, arg1, shamt); - tcg_gen_andi_tl(ret, ret, 1); -} - -static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_not_tl(ret, arg1); - tcg_gen_shl_tl(ret, ret, arg2); - tcg_gen_not_tl(ret, ret); -} - -static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_not_tl(ret, arg1); - tcg_gen_shr_tl(ret, ret, arg2); - tcg_gen_not_tl(ret, ret); -} - -static bool gen_grevi(DisasContext *ctx, arg_grevi *a) -{ - TCGv source1 = tcg_temp_new(); - TCGv source2; - - gen_get_gpr(source1, a->rs1); - - if (a->shamt == (TARGET_LONG_BITS - 8)) { - /* rev8, byte swaps */ - tcg_gen_bswap_tl(source1, source1); + func(desth, src1h, -(a->imm < 0)); + gen_set_gpr128(ctx, a->rd, dest, desth); } else { - source2 = tcg_temp_new(); - tcg_gen_movi_tl(source2, a->shamt); - gen_helper_grev(source1, source1, source2); - tcg_temp_free(source2); + gen_set_gpr(ctx, a->rd, dest); } - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); return true; } -#define GEN_SHADD(SHAMT) \ -static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ -{ \ - TCGv t = tcg_temp_new(); \ - \ - tcg_gen_shli_tl(t, arg1, SHAMT); \ - tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ -} - -GEN_SHADD(1) -GEN_SHADD(2) -GEN_SHADD(3) - -static void gen_ctzw(TCGv ret, TCGv arg1) +static bool gen_logic(DisasContext *ctx, arg_r *a, + void (*func)(TCGv, TCGv, TCGv)) { - tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32)); - tcg_gen_ctzi_tl(ret, ret, 64); -} + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); -static void gen_clzw(TCGv ret, TCGv arg1) -{ - tcg_gen_ext32u_tl(ret, arg1); - tcg_gen_clzi_tl(ret, ret, 64); - tcg_gen_subi_tl(ret, ret, 32); -} + func(dest, src1, src2); -static void gen_cpopw(TCGv ret, TCGv arg1) -{ - tcg_gen_ext32u_tl(arg1, arg1); - tcg_gen_ctpop_tl(ret, arg1); -} + if (get_xl(ctx) == MXL_RV128) { + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv src2h = get_gprh(ctx, a->rs2); + TCGv desth = dest_gprh(ctx, a->rd); -static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_ext16s_tl(t, arg2); - tcg_gen_deposit_tl(ret, arg1, t, 16, 48); - tcg_temp_free(t); -} + func(desth, src1h, src2h); + gen_set_gpr128(ctx, a->rd, dest, desth); + } else { + gen_set_gpr(ctx, a->rd, dest); + } -static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_shri_tl(t, arg1, 16); - tcg_gen_deposit_tl(ret, arg2, t, 0, 16); - tcg_gen_ext32s_tl(ret, ret); - tcg_temp_free(t); -} - -static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - - /* truncate to 32-bits */ - tcg_gen_trunc_tl_i32(t1, arg1); - tcg_gen_trunc_tl_i32(t2, arg2); - - tcg_gen_rotr_i32(t1, t1, t2); - - /* sign-extend 64-bits */ - tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); -} - -static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - - /* truncate to 32-bits */ - tcg_gen_trunc_tl_i32(t1, arg1); - tcg_gen_trunc_tl_i32(t2, arg2); - - tcg_gen_rotl_i32(t1, t1, t2); - - /* sign-extend 64-bits */ - tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); -} - -static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_ext32u_tl(arg1, arg1); - gen_helper_grev(ret, arg1, arg2); -} - -static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_ext32u_tl(arg1, arg1); - gen_helper_gorcw(ret, arg1, arg2); -} - -#define GEN_SHADD_UW(SHAMT) \ -static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \ -{ \ - TCGv t = tcg_temp_new(); \ - \ - tcg_gen_ext32u_tl(t, arg1); \ - \ - tcg_gen_shli_tl(t, t, SHAMT); \ - tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ -} - -GEN_SHADD_UW(1) -GEN_SHADD_UW(2) -GEN_SHADD_UW(3) - -static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_ext32u_tl(arg1, arg1); - tcg_gen_add_tl(ret, arg1, arg2); -} - -static bool gen_arith(DisasContext *ctx, arg_r *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1, source2; - source1 = tcg_temp_new(); - source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - - (*func)(source1, source1, source2); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); return true; } -static bool gen_shift(DisasContext *ctx, arg_r *a, - void(*func)(TCGv, TCGv, TCGv)) +static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext, + void (*func)(TCGv, TCGv, target_long), + void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long)) { - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); + if (get_ol(ctx) < MXL_RV128) { + func(dest, src1, a->imm); + gen_set_gpr(ctx, a->rd, dest); + } else { + if (f128 == NULL) { + return false; + } - tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1); - (*func)(source1, source1, source2); + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv desth = dest_gprh(ctx, a->rd); - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); + f128(dest, desth, src1, src1h, a->imm); + gen_set_gpr128(ctx, a->rd, dest, desth); + } return true; } +static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext, + void (*func)(TCGv, TCGv, TCGv), + void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv src2 = tcg_constant_tl(a->imm); + + if (get_ol(ctx) < MXL_RV128) { + func(dest, src1, src2); + gen_set_gpr(ctx, a->rd, dest); + } else { + if (f128 == NULL) { + return false; + } + + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv src2h = tcg_constant_tl(-(a->imm < 0)); + TCGv desth = dest_gprh(ctx, a->rd); + + f128(dest, desth, src1, src1h, src2, src2h); + gen_set_gpr128(ctx, a->rd, dest, desth); + } + return true; +} + +static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*func)(TCGv, TCGv, TCGv), + void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv src2 = get_gpr(ctx, a->rs2, ext); + + if (get_ol(ctx) < MXL_RV128) { + func(dest, src1, src2); + gen_set_gpr(ctx, a->rd, dest); + } else { + if (f128 == NULL) { + return false; + } + + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv src2h = get_gprh(ctx, a->rs2); + TCGv desth = dest_gprh(ctx, a->rd); + + f128(dest, desth, src1, src1h, src2, src2h); + gen_set_gpr128(ctx, a->rd, dest, desth); + } + return true; +} + +static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv, TCGv), + void (*f_32)(TCGv, TCGv, TCGv), + void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) +{ + int olen = get_olen(ctx); + + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else if (olen != 128) { + g_assert_not_reached(); + } + } + return gen_arith(ctx, a, ext, f_tl, f_128); +} + +static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext, + void (*func)(TCGv, TCGv, target_long), + void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long)) +{ + TCGv dest, src1; + int max_len = get_olen(ctx); + + if (a->shamt >= max_len) { + return false; + } + + dest = dest_gpr(ctx, a->rd); + src1 = get_gpr(ctx, a->rs1, ext); + + if (max_len < 128) { + func(dest, src1, a->shamt); + gen_set_gpr(ctx, a->rd, dest); + } else { + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv desth = dest_gprh(ctx, a->rd); + + if (f128 == NULL) { + return false; + } + f128(dest, desth, src1, src1h, a->shamt); + gen_set_gpr128(ctx, a->rd, dest, desth); + } + return true; +} + +static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a, + DisasExtend ext, + void (*f_tl)(TCGv, TCGv, target_long), + void (*f_32)(TCGv, TCGv, target_long), + void (*f_128)(TCGv, TCGv, TCGv, TCGv, + target_long)) +{ + int olen = get_olen(ctx); + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else if (olen != 128) { + g_assert_not_reached(); + } + } + return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128); +} + +static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext, + void (*func)(TCGv, TCGv, TCGv)) +{ + TCGv dest, src1, src2; + int max_len = get_olen(ctx); + + if (a->shamt >= max_len) { + return false; + } + + dest = dest_gpr(ctx, a->rd); + src1 = get_gpr(ctx, a->rs1, ext); + src2 = tcg_constant_tl(a->shamt); + + func(dest, src1, src2); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*func)(TCGv, TCGv, TCGv), + void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv)) +{ + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + TCGv ext2 = tcg_temp_new(); + int max_len = get_olen(ctx); + + tcg_gen_andi_tl(ext2, src2, max_len - 1); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + + if (max_len < 128) { + func(dest, src1, ext2); + gen_set_gpr(ctx, a->rd, dest); + } else { + TCGv src1h = get_gprh(ctx, a->rs1); + TCGv desth = dest_gprh(ctx, a->rd); + + if (f128 == NULL) { + return false; + } + f128(dest, desth, src1, src1h, ext2); + gen_set_gpr128(ctx, a->rd, dest, desth); + } + tcg_temp_free(ext2); + return true; +} + +static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv, TCGv), + void (*f_32)(TCGv, TCGv, TCGv), + void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv)) +{ + int olen = get_olen(ctx); + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else if (olen != 128) { + g_assert_not_reached(); + } + } + return gen_shift(ctx, a, ext, f_tl, f_128); +} + +static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*func)(TCGv, TCGv)) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + + func(dest, src1); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv), + void (*f_32)(TCGv, TCGv)) +{ + int olen = get_olen(ctx); + + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else { + g_assert_not_reached(); + } + } + return gen_unary(ctx, a, ext, f_tl); +} + static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) { DisasContext *ctx = container_of(dcbase, DisasContext, base); @@ -798,88 +1020,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) return cpu_ldl_code(env, pc); } -static bool gen_shifti(DisasContext *ctx, arg_shift *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - if (a->shamt >= TARGET_LONG_BITS) { - return false; - } - - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - - tcg_gen_movi_tl(source2, a->shamt); - (*func)(source1, source1, source2); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static bool gen_shiftw(DisasContext *ctx, arg_r *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - gen_get_gpr(source2, a->rs2); - - tcg_gen_andi_tl(source2, source2, 31); - (*func)(source1, source1, source2); - tcg_gen_ext32s_tl(source1, source1); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static bool gen_shiftiw(DisasContext *ctx, arg_shift *a, - void(*func)(TCGv, TCGv, TCGv)) -{ - TCGv source1 = tcg_temp_new(); - TCGv source2 = tcg_temp_new(); - - gen_get_gpr(source1, a->rs1); - tcg_gen_movi_tl(source2, a->shamt); - - (*func)(source1, source1, source2); - tcg_gen_ext32s_tl(source1, source1); - - gen_set_gpr(a->rd, source1); - tcg_temp_free(source1); - tcg_temp_free(source2); - return true; -} - -static void gen_ctz(TCGv ret, TCGv arg1) -{ - tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS); -} - -static void gen_clz(TCGv ret, TCGv arg1) -{ - tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS); -} - -static bool gen_unary(DisasContext *ctx, arg_r2 *a, - void(*func)(TCGv, TCGv)) -{ - TCGv source = tcg_temp_new(); - - gen_get_gpr(source, a->rs1); - - (*func)(source, source); - - gen_set_gpr(a->rd, source); - tcg_temp_free(source); - return true; -} - /* Include insn module translation function */ #include "insn_trans/trans_rvi.c.inc" #include "insn_trans/trans_rvm.c.inc" @@ -889,32 +1029,63 @@ static bool gen_unary(DisasContext *ctx, arg_r2 *a, #include "insn_trans/trans_rvh.c.inc" #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" +#include "insn_trans/trans_rvzfh.c.inc" +#include "insn_trans/trans_rvk.c.inc" #include "insn_trans/trans_privileged.c.inc" +#include "insn_trans/trans_svinval.c.inc" +#include "insn_trans/trans_xventanacondops.c.inc" /* Include the auto-generated decoder for 16 bit insn */ #include "decode-insn16.c.inc" +/* Include decoders for factored-out extensions */ +#include "decode-XVentanaCondOps.c.inc" + +/* The specification allows for longer insns, but not supported by qemu. */ +#define MAX_INSN_LEN 4 + +static inline int insn_len(uint16_t first_word) +{ + return (first_word & 3) == 3 ? 4 : 2; +} static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) { - /* check for compressed insn */ - if (extract16(opcode, 0, 2) != 3) { - if (!has_ext(ctx, RVC)) { - gen_exception_illegal(ctx); - } else { - ctx->pc_succ_insn = ctx->base.pc_next + 2; - if (!decode_insn16(ctx, opcode)) { - gen_exception_illegal(ctx); - } + /* + * A table with predicate (i.e., guard) functions and decoder functions + * that are tested in-order until a decoder matches onto the opcode. + */ + static const struct { + bool (*guard_func)(DisasContext *); + bool (*decode_func)(DisasContext *, uint32_t); + } decoders[] = { + { always_true_p, decode_insn32 }, + { has_XVentanaCondOps_p, decode_XVentanaCodeOps }, + }; + + /* Check for compressed insn */ + if (insn_len(opcode) == 2) { + ctx->opcode = opcode; + ctx->pc_succ_insn = ctx->base.pc_next + 2; + if (has_ext(ctx, RVC) && decode_insn16(ctx, opcode)) { + return; } } else { uint32_t opcode32 = opcode; opcode32 = deposit32(opcode32, 16, 16, - translator_lduw(env, ctx->base.pc_next + 2)); + translator_lduw(env, &ctx->base, + ctx->base.pc_next + 2)); + ctx->opcode = opcode32; ctx->pc_succ_insn = ctx->base.pc_next + 4; - if (!decode_insn32(ctx, opcode32)) { - gen_exception_illegal(ctx); + + for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) { + if (decoders[i].guard_func(ctx) && + decoders[i].decode_func(ctx, opcode32)) { + return; + } } } + + gen_exception_illegal(ctx); } static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) @@ -925,8 +1096,9 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) uint32_t tb_flags = ctx->base.tb->flags; ctx->pc_succ_insn = ctx->base.pc_first; - ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK; + ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX); ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS; + ctx->mstatus_vs = tb_flags & TB_FLAGS_MSTATUS_VS; ctx->priv_ver = env->priv_ver; #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVH)) { @@ -937,17 +1109,30 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #else ctx->virt_enabled = false; #endif - ctx->misa = env->misa; + ctx->misa_ext = env->misa_ext; ctx->frm = -1; /* unknown rounding mode */ - ctx->ext_ifencei = cpu->cfg.ext_ifencei; - ctx->vlen = cpu->cfg.vlen; + ctx->cfg_ptr = &(cpu->cfg); + ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS); + ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS); ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX); ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); - ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL); - ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul); + ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3); + ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s; + ctx->vma = FIELD_EX32(tb_flags, TB_FLAGS, VMA) && cpu->cfg.rvv_ma_all_1s; + ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; + ctx->vstart = env->vstart; ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); + ctx->misa_mxl_max = env->misa_mxl_max; + ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->cs = cs; + ctx->ntemp = 0; + memset(ctx->temp, 0, sizeof(ctx->temp)); + ctx->nftemp = 0; + memset(ctx->ftemp, 0, sizeof(ctx->ftemp)); + ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); + ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + ctx->zero = tcg_constant_tl(0); } static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -958,24 +1143,47 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - tcg_gen_insn_start(ctx->base.pc_next); + tcg_gen_insn_start(ctx->base.pc_next, 0); + ctx->insn_start = tcg_last_op(); } static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu->env_ptr; - uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next); + uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); + int i; + ctx->ol = ctx->xl; decode_opc(env, ctx, opcode16); ctx->base.pc_next = ctx->pc_succ_insn; - if (ctx->base.is_jmp == DISAS_NEXT) { - target_ulong page_start; + for (i = ctx->ntemp - 1; i >= 0; --i) { + tcg_temp_free(ctx->temp[i]); + ctx->temp[i] = NULL; + } + ctx->ntemp = 0; + for (i = ctx->nftemp - 1; i >= 0; --i) { + tcg_temp_free_i64(ctx->ftemp[i]); + ctx->ftemp[i] = NULL; + } + ctx->nftemp = 0; - page_start = ctx->base.pc_first & TARGET_PAGE_MASK; - if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) { + /* Only the first insn within a TB is allowed to cross a page boundary. */ + if (ctx->base.is_jmp == DISAS_NEXT) { + if (!is_same_page(&ctx->base, ctx->base.pc_next)) { ctx->base.is_jmp = DISAS_TOO_MANY; + } else { + unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK; + + if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) { + uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next); + int len = insn_len(next_insn); + + if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } + } } } } @@ -995,18 +1203,20 @@ static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } -static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void riscv_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { #ifndef CONFIG_USER_ONLY RISCVCPU *rvcpu = RISCV_CPU(cpu); CPURISCVState *env = &rvcpu->env; #endif - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); #ifndef CONFIG_USER_ONLY - qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt); + fprintf(logfile, "Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", + env->priv, env->virt); #endif - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps riscv_tr_ops = { @@ -1018,25 +1228,31 @@ static const TranslatorOps riscv_tr_ops = { .disas_log = riscv_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base); } void riscv_translate_init(void) { int i; - /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */ - /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */ - /* registers, unless you specifically block reads/writes to reg 0 */ + /* + * cpu_gpr[0] is a placeholder for the zero register. Do not use it. + * Use the gen_set_gpr and get_gpr helper functions when accessing regs, + * unless you specifically block reads/writes to reg 0. + */ cpu_gpr[0] = NULL; + cpu_gprh[0] = NULL; for (i = 1; i < 32; i++) { cpu_gpr[i] = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]); + cpu_gprh[i] = tcg_global_mem_new(cpu_env, + offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]); } for (i = 0; i < 32; i++) { @@ -1046,8 +1262,15 @@ void riscv_translate_init(void) cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc"); cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl"); + cpu_vstart = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vstart), + "vstart"); load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res), "load_res"); load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val), "load_val"); + /* Assign PM CSRs to tcg globals */ + pm_mask = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmmask), + "pmmask"); + pm_base = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmbase), + "pmbase"); } diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 12c31aa4b4..0020b9a95d 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -17,6 +17,8 @@ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/bitops.h" #include "cpu.h" #include "exec/memop.h" #include "exec/exec-all.h" @@ -31,14 +33,30 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, { int vlmax, vl; RISCVCPU *cpu = env_archcpu(env); + uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL); uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); - bool vill = FIELD_EX64(s2, VTYPE, VILL); - target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED); + int xlen = riscv_cpu_xlen(env); + bool vill = (s2 >> (xlen - 1)) & 0x1; + target_ulong reserved = s2 & + MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT, + xlen - 1 - R_VTYPE_RESERVED_SHIFT); - if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) { + if (lmul & 4) { + /* Fractional LMUL. */ + if (lmul == 4 || + cpu->cfg.elen >> (8 - lmul) < sew) { + vill = true; + } + } + + if ((sew > cpu->cfg.elen) + || vill + || (ediv != 0) + || (reserved != 0)) { /* only set vill bit. */ - env->vtype = FIELD_DP64(0, VTYPE, VILL, 1); + env->vill = 1; + env->vtype = 0; env->vl = 0; env->vstart = 0; return 0; @@ -53,6 +71,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, env->vl = vl; env->vtype = s2; env->vstart = 0; + env->vill = 0; return vl; } @@ -60,7 +79,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, * Note that vector data is stored in host-endian 64-bit chunks, * so addressing units smaller than that needs a host-endian fixup. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define H1(x) ((x) ^ 7) #define H1_2(x) ((x) ^ 6) #define H1_4(x) ((x) ^ 4) @@ -81,35 +100,79 @@ static inline uint32_t vext_nf(uint32_t desc) return FIELD_EX32(simd_data(desc), VDATA, NF); } -static inline uint32_t vext_mlen(uint32_t desc) -{ - return FIELD_EX32(simd_data(desc), VDATA, MLEN); -} - static inline uint32_t vext_vm(uint32_t desc) { return FIELD_EX32(simd_data(desc), VDATA, VM); } -static inline uint32_t vext_lmul(uint32_t desc) +/* + * Encode LMUL to lmul as following: + * LMUL vlmul lmul + * 1 000 0 + * 2 001 1 + * 4 010 2 + * 8 011 3 + * - 100 - + * 1/8 101 -3 + * 1/4 110 -2 + * 1/2 111 -1 + */ +static inline int32_t vext_lmul(uint32_t desc) { - return FIELD_EX32(simd_data(desc), VDATA, LMUL); + return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3); } -static uint32_t vext_wd(uint32_t desc) +static inline uint32_t vext_vta(uint32_t desc) { - return (simd_data(desc) >> 11) & 0x1; + return FIELD_EX32(simd_data(desc), VDATA, VTA); +} + +static inline uint32_t vext_vma(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, VMA); +} + +static inline uint32_t vext_vta_all_1s(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S); } /* - * Get vector group length in bytes. Its range is [64, 2048]. + * Get the maximum number of elements can be operated. * - * As simd_desc support at most 256, the max vlen is 512 bits. - * So vlen in bytes is encoded as maxsz. + * log2_esz: log2 of element size in bytes. */ -static inline uint32_t vext_maxsz(uint32_t desc) +static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz) { - return simd_maxsz(desc) << vext_lmul(desc); + /* + * As simd_desc support at most 2048 bytes, the max vlen is 1024 bits. + * so vlen in bytes (vlenb) is encoded as maxsz. + */ + uint32_t vlenb = simd_maxsz(desc); + + /* Return VLMAX */ + int scale = vext_lmul(desc) - log2_esz; + return scale < 0 ? vlenb >> -scale : vlenb << scale; +} + +/* + * Get number of total elements, including prestart, body and tail elements. + * Note that when LMUL < 1, the tail includes the elements past VLMAX that + * are held in the same vector register. + */ +static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc, + uint32_t esz) +{ + uint32_t vlenb = simd_maxsz(desc); + uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW); + int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 : + ctzl(esz) - ctzl(sew) + vext_lmul(desc); + return (vlenb << emul) / esz; +} + +static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr) +{ + return (addr & env->cur_pmmask) | env->cur_pmbase; } /* @@ -129,118 +192,67 @@ static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong pagelen = -(addr | TARGET_PAGE_MASK); target_ulong curlen = MIN(pagelen, len); - probe_access(env, addr, curlen, access_type, + probe_access(env, adjust_addr(env, addr), curlen, access_type, cpu_mmu_index(env, false), ra); if (len > curlen) { addr += curlen; curlen = len - curlen; - probe_access(env, addr, curlen, access_type, + probe_access(env, adjust_addr(env, addr), curlen, access_type, cpu_mmu_index(env, false), ra); } } -#ifdef HOST_WORDS_BIGENDIAN -static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) +/* set agnostic elements to 1s */ +static void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt, + uint32_t tot) { - /* - * Split the remaining range to two parts. - * The first part is in the last uint64_t unit. - * The second part start from the next uint64_t unit. - */ - int part1 = 0, part2 = tot - cnt; - if (cnt % 8) { - part1 = 8 - (cnt % 8); - part2 = tot - cnt - part1; - memset(QEMU_ALIGN_PTR_DOWN(tail, 8), 0, part1); - memset(QEMU_ALIGN_PTR_UP(tail, 8), 0, part2); - } else { - memset(tail, 0, part2); + if (is_agnostic == 0) { + /* policy undisturbed */ + return; } -} -#else -static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) -{ - memset(tail, 0, tot - cnt); -} -#endif - -static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) -{ - int8_t *cur = ((int8_t *)vd + H1(idx)); - vext_clear(cur, cnt, tot); + if (tot - cnt == 0) { + return; + } + memset(base + cnt, -1, tot - cnt); } -static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) +static inline void vext_set_elem_mask(void *v0, int index, + uint8_t value) { - int16_t *cur = ((int16_t *)vd + H2(idx)); - vext_clear(cur, cnt, tot); -} - -static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) -{ - int32_t *cur = ((int32_t *)vd + H4(idx)); - vext_clear(cur, cnt, tot); -} - -static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) -{ - int64_t *cur = (int64_t *)vd + idx; - vext_clear(cur, cnt, tot); -} - -static inline void vext_set_elem_mask(void *v0, int mlen, int index, - uint8_t value) -{ - int idx = (index * mlen) / 64; - int pos = (index * mlen) % 64; + int idx = index / 64; + int pos = index % 64; uint64_t old = ((uint64_t *)v0)[idx]; - ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value); + ((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value); } -static inline int vext_elem_mask(void *v0, int mlen, int index) +/* + * Earlier designs (pre-0.9) had a varying number of bits + * per mask value (MLEN). In the 0.9 design, MLEN=1. + * (Section 4.5) + */ +static inline int vext_elem_mask(void *v0, int index) { - int idx = (index * mlen) / 64; - int pos = (index * mlen) % 64; + int idx = index / 64; + int pos = index % 64; return (((uint64_t *)v0)[idx] >> pos) & 1; } /* elements operations for load and store */ typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr, uint32_t idx, void *vd, uintptr_t retaddr); -typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot); -#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \ +#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \ static void NAME(CPURISCVState *env, abi_ptr addr, \ uint32_t idx, void *vd, uintptr_t retaddr)\ { \ - MTYPE data; \ ETYPE *cur = ((ETYPE *)vd + H(idx)); \ - data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ - *cur = data; \ + *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ } \ -GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb) -GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb) -GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb) -GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb) -GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw) -GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw) -GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw) -GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl) -GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl) -GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb) -GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw) -GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl) -GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq) -GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub) -GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub) -GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub) -GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub) -GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw) -GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw) -GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw) -GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl) -GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl) +GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb) +GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw) +GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl) +GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq) #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ static void NAME(CPURISCVState *env, abi_ptr addr, \ @@ -250,15 +262,6 @@ static void NAME(CPURISCVState *env, abi_ptr addr, \ cpu_##STSUF##_data_ra(env, addr, data, retaddr); \ } -GEN_VEXT_ST_ELEM(stb_b, int8_t, H1, stb) -GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb) -GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb) -GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb) -GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw) -GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw) -GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw) -GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl) -GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl) GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb) GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) @@ -271,100 +274,76 @@ static void vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride, CPURISCVState *env, uint32_t desc, uint32_t vm, - vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, - uint32_t esz, uint32_t msz, uintptr_t ra, - MMUAccessType access_type) + vext_ldst_elem_fn *ldst_elem, + uint32_t log2_esz, uintptr_t ra) { uint32_t i, k; uint32_t nf = vext_nf(desc); - uint32_t mlen = vext_mlen(desc); - uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t max_elems = vext_max_elems(desc, log2_esz); + uint32_t esz = 1 << log2_esz; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); - /* probe every access*/ - for (i = 0; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } - probe_pages(env, base + stride * i, nf * msz, ra, access_type); - } - /* do real access */ - for (i = 0; i < env->vl; i++) { + for (i = env->vstart; i < env->vl; i++, env->vstart++) { k = 0; - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } while (k < nf) { - target_ulong addr = base + stride * i + k * msz; - ldst_elem(env, addr, i + k * vlmax, vd, ra); + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + (i + k * max_elems + 1) * esz); + k++; + continue; + } + target_ulong addr = base + stride * i + (k << log2_esz); + ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); k++; } } - /* clear tail elements */ - if (clear_elem) { - for (k = 0; k < nf; k++) { - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); - } + env->vstart = 0; + /* set tail elements to 1s */ + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, + (k * max_elems + max_elems) * esz); + } + if (nf * max_elems % total_elems != 0) { + uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t registers_used = + ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); } } -#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ +#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \ target_ulong stride, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \ - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_LOAD); \ + ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b, clearb) -GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h, clearh) -GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w, clearl) -GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d, clearq) -GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h, clearh) -GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w, clearl) -GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d, clearq) -GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w, clearl) -GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d, clearq) -GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b, clearb) -GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h, clearh) -GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w, clearl) -GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d, clearq) -GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) -GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) -GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) -GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) -GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh) -GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl) -GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq) -GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl) -GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq) +GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b) +GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h) +GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w) +GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d) -#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \ +#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ target_ulong stride, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \ - NULL, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_STORE); \ + ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b) -GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h) -GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w) -GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d) -GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h) -GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w) -GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d) -GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w) -GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d) -GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b) -GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h) -GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w) -GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) +GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b) +GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h) +GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w) +GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) /* *** unit-stride: access elements stored contiguously in memory @@ -373,30 +352,37 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) /* unmasked unit-stride load and store operation*/ static void vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, - uint32_t esz, uint32_t msz, uintptr_t ra, - MMUAccessType access_type) + vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl, + uintptr_t ra) { uint32_t i, k; uint32_t nf = vext_nf(desc); - uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t max_elems = vext_max_elems(desc, log2_esz); + uint32_t esz = 1 << log2_esz; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); - /* probe every access */ - probe_pages(env, base, env->vl * nf * msz, ra, access_type); /* load bytes from guest memory */ - for (i = 0; i < env->vl; i++) { + for (i = env->vstart; i < evl; i++, env->vstart++) { k = 0; while (k < nf) { - target_ulong addr = base + (i * nf + k) * msz; - ldst_elem(env, addr, i + k * vlmax, vd, ra); + target_ulong addr = base + ((i * nf + k) << log2_esz); + ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); k++; } } - /* clear tail elements */ - if (clear_elem) { - for (k = 0; k < nf; k++) { - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); - } + env->vstart = 0; + /* set tail elements to 1s */ + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + evl) * esz, + (k * max_elems + max_elems) * esz); + } + if (nf * max_elems % total_elems != 0) { + uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t registers_used = + ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); } } @@ -405,76 +391,68 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, * stride = NF * sizeof (MTYPE) */ -#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ +#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \ - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_LOAD); \ + ctzl(sizeof(ETYPE)), GETPC()); \ } \ \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ - vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \ - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \ + vext_ldst_us(vd, base, env, desc, LOAD_FN, \ + ctzl(sizeof(ETYPE)), env->vl, GETPC()); \ } -GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b, clearb) -GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h, clearh) -GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w, clearl) -GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d, clearq) -GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h, clearh) -GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w, clearl) -GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d, clearq) -GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w, clearl) -GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d, clearq) -GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b, clearb) -GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h, clearh) -GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w, clearl) -GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d, clearq) -GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) -GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) -GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) -GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) -GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh) -GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl) -GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq) -GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl) -GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq) +GEN_VEXT_LD_US(vle8_v, int8_t, lde_b) +GEN_VEXT_LD_US(vle16_v, int16_t, lde_h) +GEN_VEXT_LD_US(vle32_v, int32_t, lde_w) +GEN_VEXT_LD_US(vle64_v, int64_t, lde_d) -#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \ -void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ - vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ - NULL, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_STORE); \ -} \ - \ -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \ - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\ +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ + ctzl(sizeof(ETYPE)), GETPC()); \ +} \ + \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_us(vd, base, env, desc, STORE_FN, \ + ctzl(sizeof(ETYPE)), env->vl, GETPC()); \ } -GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b) -GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h) -GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w) -GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d) -GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h) -GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w) -GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d) -GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w) -GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d) -GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b) -GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h) -GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w) -GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d) +GEN_VEXT_ST_US(vse8_v, int8_t, ste_b) +GEN_VEXT_ST_US(vse16_v, int16_t, ste_h) +GEN_VEXT_ST_US(vse32_v, int32_t, ste_w) +GEN_VEXT_ST_US(vse64_v, int64_t, ste_d) + +/* + *** unit stride mask load and store, EEW = 1 + */ +void HELPER(vlm_v)(void *vd, void *v0, target_ulong base, + CPURISCVState *env, uint32_t desc) +{ + /* evl = ceil(vl/8) */ + uint8_t evl = (env->vl + 7) >> 3; + vext_ldst_us(vd, base, env, desc, lde_b, + 0, evl, GETPC()); +} + +void HELPER(vsm_v)(void *vd, void *v0, target_ulong base, + CPURISCVState *env, uint32_t desc) +{ + /* evl = ceil(vl/8) */ + uint8_t evl = (env->vl + 7) >> 3; + vext_ldst_us(vd, base, env, desc, ste_b, + 0, evl, GETPC()); +} /* *** index: access vector element from indexed memory @@ -489,108 +467,108 @@ static target_ulong NAME(target_ulong base, \ return (base + *((ETYPE *)vs2 + H(idx))); \ } -GEN_VEXT_GET_INDEX_ADDR(idx_b, int8_t, H1) -GEN_VEXT_GET_INDEX_ADDR(idx_h, int16_t, H2) -GEN_VEXT_GET_INDEX_ADDR(idx_w, int32_t, H4) -GEN_VEXT_GET_INDEX_ADDR(idx_d, int64_t, H8) +GEN_VEXT_GET_INDEX_ADDR(idx_b, uint8_t, H1) +GEN_VEXT_GET_INDEX_ADDR(idx_h, uint16_t, H2) +GEN_VEXT_GET_INDEX_ADDR(idx_w, uint32_t, H4) +GEN_VEXT_GET_INDEX_ADDR(idx_d, uint64_t, H8) static inline void vext_ldst_index(void *vd, void *v0, target_ulong base, void *vs2, CPURISCVState *env, uint32_t desc, vext_get_index_addr get_index_addr, vext_ldst_elem_fn *ldst_elem, - clear_fn *clear_elem, - uint32_t esz, uint32_t msz, uintptr_t ra, - MMUAccessType access_type) + uint32_t log2_esz, uintptr_t ra) { uint32_t i, k; uint32_t nf = vext_nf(desc); uint32_t vm = vext_vm(desc); - uint32_t mlen = vext_mlen(desc); - uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t max_elems = vext_max_elems(desc, log2_esz); + uint32_t esz = 1 << log2_esz; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); - /* probe every access*/ - for (i = 0; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } - probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra, - access_type); - } /* load bytes from guest memory */ - for (i = 0; i < env->vl; i++) { + for (i = env->vstart; i < env->vl; i++, env->vstart++) { k = 0; - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } while (k < nf) { - abi_ptr addr = get_index_addr(base, i, vs2) + k * msz; - ldst_elem(env, addr, i + k * vlmax, vd, ra); + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + (i + k * max_elems + 1) * esz); + k++; + continue; + } + abi_ptr addr = get_index_addr(base, i, vs2) + (k << log2_esz); + ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); k++; } } - /* clear tail elements */ - if (clear_elem) { - for (k = 0; k < nf; k++) { - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); - } + env->vstart = 0; + /* set tail elements to 1s */ + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, + (k * max_elems + max_elems) * esz); + } + if (nf * max_elems % total_elems != 0) { + uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t registers_used = + ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); } } -#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \ +#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ - LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_LOAD); \ + LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b, clearb) -GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h, clearh) -GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w, clearl) -GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d, clearq) -GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h, clearh) -GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w, clearl) -GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d, clearq) -GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w, clearl) -GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d, clearq) -GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b, clearb) -GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h, clearh) -GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w, clearl) -GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d, clearq) -GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b, clearb) -GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h, clearh) -GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w, clearl) -GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d, clearq) -GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh) -GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl) -GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq) -GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl) -GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq) +GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b) +GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h) +GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w) +GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d) +GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b) +GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h) +GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w) +GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d) +GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b) +GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h) +GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w) +GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d) +GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b) +GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h) +GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w) +GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d) -#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\ +#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ - STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\ - GETPC(), MMU_DATA_STORE); \ + STORE_FN, ctzl(sizeof(ETYPE)), \ + GETPC()); \ } -GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b) -GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h) -GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w) -GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d) -GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h) -GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w) -GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d) -GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w) -GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d) -GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b) -GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h) -GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w) -GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d) +GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b) +GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h) +GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w) +GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d) +GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b) +GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h) +GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w) +GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d) +GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b) +GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h) +GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w) +GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d) +GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b) +GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h) +GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w) +GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d) /* *** unit-stride fault-only-fisrt load instructions @@ -599,40 +577,42 @@ static inline void vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, uint32_t desc, vext_ldst_elem_fn *ldst_elem, - clear_fn *clear_elem, - uint32_t esz, uint32_t msz, uintptr_t ra) + uint32_t log2_esz, uintptr_t ra) { void *host; uint32_t i, k, vl = 0; - uint32_t mlen = vext_mlen(desc); uint32_t nf = vext_nf(desc); uint32_t vm = vext_vm(desc); - uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t max_elems = vext_max_elems(desc, log2_esz); + uint32_t esz = 1 << log2_esz; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); target_ulong addr, offset, remain; /* probe every access*/ - for (i = 0; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (i = env->vstart; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { continue; } - addr = base + nf * i * msz; + addr = adjust_addr(env, base + i * (nf << log2_esz)); if (i == 0) { - probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); + probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD); } else { /* if it triggers an exception, no need to check watchpoint */ - remain = nf * msz; + remain = nf << log2_esz; while (remain > 0) { offset = -(addr | TARGET_PAGE_MASK); host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, cpu_mmu_index(env, false)); if (host) { #ifdef CONFIG_USER_ONLY - if (page_check_range(addr, nf * msz, PAGE_READ) < 0) { + if (page_check_range(addr, offset, PAGE_READ) < 0) { vl = i; goto ProbeSuccess; } #else - probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); + probe_pages(env, addr, offset, ra, MMU_DATA_LOAD); #endif } else { vl = i; @@ -642,7 +622,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, break; } remain -= offset; - addr += offset; + addr = adjust_addr(env, addr + offset); } } } @@ -651,89 +631,55 @@ ProbeSuccess: if (vl != 0) { env->vl = vl; } - for (i = 0; i < env->vl; i++) { + for (i = env->vstart; i < env->vl; i++) { k = 0; - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } while (k < nf) { - target_ulong addr = base + (i * nf + k) * msz; - ldst_elem(env, addr, i + k * vlmax, vd, ra); + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + (i + k * max_elems + 1) * esz); + k++; + continue; + } + target_ulong addr = base + ((i * nf + k) << log2_esz); + ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); k++; } } - /* clear tail elements */ - if (vl != 0) { - return; + env->vstart = 0; + /* set tail elements to 1s */ + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, + (k * max_elems + max_elems) * esz); } - for (k = 0; k < nf; k++) { - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); + if (nf * max_elems % total_elems != 0) { + uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t registers_used = + ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); } } -#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \ - sizeof(ETYPE), sizeof(MTYPE), GETPC()); \ +#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldff(vd, v0, base, env, desc, LOAD_FN, \ + ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb) -GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh) -GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl) -GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq) -GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh) -GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl) -GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq) -GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl) -GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq) -GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb) -GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh) -GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl) -GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq) -GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb) -GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh) -GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl) -GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq) -GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh) -GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl) -GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq) -GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl) -GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq) +GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b) +GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h) +GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w) +GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d) -/* - *** Vector AMO Operations (Zvamo) - */ -typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr, - uint32_t wd, uint32_t idx, CPURISCVState *env, - uintptr_t retaddr); - -/* no atomic opreation for vector atomic insructions */ #define DO_SWAP(N, M) (M) #define DO_AND(N, M) (N & M) #define DO_XOR(N, M) (N ^ M) #define DO_OR(N, M) (N | M) #define DO_ADD(N, M) (N + M) -#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \ -static void \ -vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \ - uint32_t wd, uint32_t idx, \ - CPURISCVState *env, uintptr_t retaddr)\ -{ \ - typedef int##ESZ##_t ETYPE; \ - typedef int##MSZ##_t MTYPE; \ - typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \ - ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \ - MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \ - \ - cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \ - if (wd) { \ - *pe3 = a; \ - } \ -} - /* Signed min/max */ #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) @@ -742,103 +688,78 @@ vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \ #define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M) #define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M) -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l) -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q) - -static inline void -vext_amo_noatomic(void *vs3, void *v0, target_ulong base, - void *vs2, CPURISCVState *env, uint32_t desc, - vext_get_index_addr get_index_addr, - vext_amo_noatomic_fn *noatomic_op, - clear_fn *clear_elem, - uint32_t esz, uint32_t msz, uintptr_t ra) +/* + *** load and store whole register instructions + */ +static void +vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, + vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra) { - uint32_t i; - target_long addr; - uint32_t wd = vext_wd(desc); - uint32_t vm = vext_vm(desc); - uint32_t mlen = vext_mlen(desc); - uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t i, k, off, pos; + uint32_t nf = vext_nf(desc); + uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t max_elems = vlenb >> log2_esz; - for (i = 0; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; + k = env->vstart / max_elems; + off = env->vstart % max_elems; + + if (off) { + /* load/store rest of elements of current segment pointed by vstart */ + for (pos = off; pos < max_elems; pos++, env->vstart++) { + target_ulong addr = base + ((pos + k * max_elems) << log2_esz); + ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd, ra); } - probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD); - probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE); + k++; } - for (i = 0; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; + + /* load/store elements for rest of segments */ + for (; k < nf; k++) { + for (i = 0; i < max_elems; i++, env->vstart++) { + target_ulong addr = base + ((i + k * max_elems) << log2_esz); + ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); } - addr = get_index_addr(base, i, vs2); - noatomic_op(vs3, addr, wd, i, env, ra); } - clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz); + + env->vstart = 0; } -#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \ -void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ -{ \ - vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \ - INDEX_FN, vext_##NAME##_noatomic_op, \ - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ - GETPC()); \ +#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \ +void HELPER(NAME)(void *vd, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_whole(vd, base, env, desc, LOAD_FN, \ + ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq) -GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq) -GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq) -GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq) -GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq) -GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl) -GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl) -GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl) +GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b) +GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h) +GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w) +GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d) +GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b) +GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h) +GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w) +GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d) +GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b) +GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h) +GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w) +GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d) +GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b) +GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h) +GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w) +GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d) + +#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \ +void HELPER(NAME)(void *vd, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_whole(vd, base, env, desc, STORE_FN, \ + ctzl(sizeof(ETYPE)), GETPC()); \ +} + +GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b) +GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b) +GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b) +GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b) /* *** Vector Integer Arithmetic Instructions @@ -903,42 +824,46 @@ RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB) static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, CPURISCVState *env, uint32_t desc, - uint32_t esz, uint32_t dsz, - opivv2_fn *fn, clear_fn *clearfn) + opivv2_fn *fn, uint32_t esz) { - uint32_t vlmax = vext_maxsz(desc) / esz; - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); uint32_t i; - for (i = 0; i < vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (i = env->vstart; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); continue; } fn(vd, vs1, vs2, i); } - clearfn(vd, vl, vl * dsz, vlmax * dsz); + env->vstart = 0; + /* set tail elements to 1s */ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); } /* generate the helpers for OPIVV */ -#define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VV(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ - do_##NAME, CLEAR_FN); \ + do_vext_vv(vd, v0, vs1, vs2, env, desc, \ + do_##NAME, ESZ); \ } -GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vadd_vv_b, 1) +GEN_VEXT_VV(vadd_vv_h, 2) +GEN_VEXT_VV(vadd_vv_w, 4) +GEN_VEXT_VV(vadd_vv_d, 8) +GEN_VEXT_VV(vsub_vv_b, 1) +GEN_VEXT_VV(vsub_vv_h, 2) +GEN_VEXT_VV(vsub_vv_w, 4) +GEN_VEXT_VV(vsub_vv_d, 8) typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i); @@ -968,46 +893,50 @@ RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB) static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, CPURISCVState *env, uint32_t desc, - uint32_t esz, uint32_t dsz, - opivx2_fn fn, clear_fn *clearfn) + opivx2_fn fn, uint32_t esz) { - uint32_t vlmax = vext_maxsz(desc) / esz; - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); uint32_t i; - for (i = 0; i < vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (i = env->vstart; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); continue; } fn(vd, s1, vs2, i); } - clearfn(vd, vl, vl * dsz, vlmax * dsz); + env->vstart = 0; + /* set tail elements to 1s */ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); } /* generate the helpers for OPIVX */ -#define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VX(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ - do_##NAME, CLEAR_FN); \ + do_vext_vx(vd, v0, s1, vs2, env, desc, \ + do_##NAME, ESZ); \ } -GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vadd_vx_b, 1) +GEN_VEXT_VX(vadd_vx_h, 2) +GEN_VEXT_VX(vadd_vx_w, 4) +GEN_VEXT_VX(vadd_vx_d, 8) +GEN_VEXT_VX(vsub_vx_b, 1) +GEN_VEXT_VX(vsub_vx_h, 2) +GEN_VEXT_VX(vsub_vx_w, 4) +GEN_VEXT_VX(vsub_vx_d, 8) +GEN_VEXT_VX(vrsub_vx_b, 1) +GEN_VEXT_VX(vrsub_vx_h, 2) +GEN_VEXT_VX(vrsub_vx_w, 4) +GEN_VEXT_VX(vrsub_vx_d, 8) void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc) { @@ -1086,30 +1015,30 @@ RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD) RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB) RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB) RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB) -GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh) -GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl) -GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq) -GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh) -GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl) -GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq) -GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh) -GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl) -GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq) -GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh) -GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl) -GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq) +GEN_VEXT_VV(vwaddu_vv_b, 2) +GEN_VEXT_VV(vwaddu_vv_h, 4) +GEN_VEXT_VV(vwaddu_vv_w, 8) +GEN_VEXT_VV(vwsubu_vv_b, 2) +GEN_VEXT_VV(vwsubu_vv_h, 4) +GEN_VEXT_VV(vwsubu_vv_w, 8) +GEN_VEXT_VV(vwadd_vv_b, 2) +GEN_VEXT_VV(vwadd_vv_h, 4) +GEN_VEXT_VV(vwadd_vv_w, 8) +GEN_VEXT_VV(vwsub_vv_b, 2) +GEN_VEXT_VV(vwsub_vv_h, 4) +GEN_VEXT_VV(vwsub_vv_w, 8) +GEN_VEXT_VV(vwaddu_wv_b, 2) +GEN_VEXT_VV(vwaddu_wv_h, 4) +GEN_VEXT_VV(vwaddu_wv_w, 8) +GEN_VEXT_VV(vwsubu_wv_b, 2) +GEN_VEXT_VV(vwsubu_wv_h, 4) +GEN_VEXT_VV(vwsubu_wv_w, 8) +GEN_VEXT_VV(vwadd_wv_b, 2) +GEN_VEXT_VV(vwadd_wv_h, 4) +GEN_VEXT_VV(vwadd_wv_w, 8) +GEN_VEXT_VV(vwsub_wv_b, 2) +GEN_VEXT_VV(vwsub_wv_h, 4) +GEN_VEXT_VV(vwsub_wv_w, 8) RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD) RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD) @@ -1135,93 +1064,98 @@ RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD) RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB) RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB) RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB) -GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh) -GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl) -GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq) -GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh) -GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl) -GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq) -GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh) -GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl) -GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq) -GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh) -GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl) -GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq) +GEN_VEXT_VX(vwaddu_vx_b, 2) +GEN_VEXT_VX(vwaddu_vx_h, 4) +GEN_VEXT_VX(vwaddu_vx_w, 8) +GEN_VEXT_VX(vwsubu_vx_b, 2) +GEN_VEXT_VX(vwsubu_vx_h, 4) +GEN_VEXT_VX(vwsubu_vx_w, 8) +GEN_VEXT_VX(vwadd_vx_b, 2) +GEN_VEXT_VX(vwadd_vx_h, 4) +GEN_VEXT_VX(vwadd_vx_w, 8) +GEN_VEXT_VX(vwsub_vx_b, 2) +GEN_VEXT_VX(vwsub_vx_h, 4) +GEN_VEXT_VX(vwsub_vx_w, 8) +GEN_VEXT_VX(vwaddu_wx_b, 2) +GEN_VEXT_VX(vwaddu_wx_h, 4) +GEN_VEXT_VX(vwaddu_wx_w, 8) +GEN_VEXT_VX(vwsubu_wx_b, 2) +GEN_VEXT_VX(vwsubu_wx_h, 4) +GEN_VEXT_VX(vwsubu_wx_w, 8) +GEN_VEXT_VX(vwadd_wx_b, 2) +GEN_VEXT_VX(vwadd_wx_h, 4) +GEN_VEXT_VX(vwadd_wx_w, 8) +GEN_VEXT_VX(vwsub_wx_b, 2) +GEN_VEXT_VX(vwsub_wx_h, 4) +GEN_VEXT_VX(vwsub_wx_w, 8) /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */ #define DO_VADC(N, M, C) (N + M + C) #define DO_VSBC(N, M, C) (N - M - C) -#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ +#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - uint8_t carry = vext_elem_mask(v0, mlen, i); \ + ETYPE carry = vext_elem_mask(v0, i); \ \ *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC, clearb) -GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh) -GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl) -GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq) +GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC) +GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC) +GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC) +GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC) -GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC, clearb) -GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh) -GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl) -GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq) +GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC) +GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC) +GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC) +GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC) -#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ +#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - uint8_t carry = vext_elem_mask(v0, mlen, i); \ + ETYPE carry = vext_elem_mask(v0, i); \ \ *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC, clearb) -GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh) -GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl) -GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq) +GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC) +GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC) +GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC) +GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC) -GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC, clearb) -GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh) -GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl) -GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq) +GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC) +GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC) +GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC) +GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC) #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \ (__typeof(N))(N + M) < N) @@ -1231,20 +1165,25 @@ GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq) void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t vm = vext_vm(desc); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - uint8_t carry = vext_elem_mask(v0, mlen, i); \ - \ - vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\ + ETYPE carry = !vm && vext_elem_mask(v0, i); \ + vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -1262,20 +1201,25 @@ GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC) void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t vm = vext_vm(desc); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - uint8_t carry = vext_elem_mask(v0, mlen, i); \ - \ - vext_set_elem_mask(vd, mlen, i, \ + ETYPE carry = !vm && vext_elem_mask(v0, i); \ + vext_set_elem_mask(vd, i, \ DO_OP(s2, (ETYPE)(target_long)s1, carry)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -1302,18 +1246,18 @@ RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR) RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR) RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR) RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR) -GEN_VEXT_VV(vand_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vand_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vand_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vand_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vor_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vor_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vor_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vor_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vand_vv_b, 1) +GEN_VEXT_VV(vand_vv_h, 2) +GEN_VEXT_VV(vand_vv_w, 4) +GEN_VEXT_VV(vand_vv_d, 8) +GEN_VEXT_VV(vor_vv_b, 1) +GEN_VEXT_VV(vor_vv_h, 2) +GEN_VEXT_VV(vor_vv_w, 4) +GEN_VEXT_VV(vor_vv_d, 8) +GEN_VEXT_VV(vxor_vv_b, 1) +GEN_VEXT_VV(vxor_vv_h, 2) +GEN_VEXT_VV(vxor_vv_w, 4) +GEN_VEXT_VV(vxor_vv_d, 8) RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND) RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND) @@ -1327,111 +1271,123 @@ RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR) RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR) RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR) RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR) -GEN_VEXT_VX(vand_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vand_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vand_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vand_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vor_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vor_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vor_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vor_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vand_vx_b, 1) +GEN_VEXT_VX(vand_vx_h, 2) +GEN_VEXT_VX(vand_vx_w, 4) +GEN_VEXT_VX(vand_vx_d, 8) +GEN_VEXT_VX(vor_vx_b, 1) +GEN_VEXT_VX(vor_vx_h, 2) +GEN_VEXT_VX(vor_vx_w, 4) +GEN_VEXT_VX(vor_vx_d, 8) +GEN_VEXT_VX(vxor_vx_b, 1) +GEN_VEXT_VX(vxor_vx_h, 2) +GEN_VEXT_VX(vxor_vx_w, 4) +GEN_VEXT_VX(vxor_vx_d, 8) /* Vector Single-Width Bit Shift Instructions */ #define DO_SLL(N, M) (N << (M)) #define DO_SRL(N, M) (N >> (M)) /* generate the helpers for shift instructions with two vector operators */ -#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \ +#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(TS1); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ TS1 s1 = *((TS1 *)vs1 + HS1(i)); \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7, clearb) -GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh) -GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl) -GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq) +GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7) +GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf) +GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f) +GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f) -GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) -GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) -GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) -GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) +GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7) +GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f) -GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) -GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) -GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) -GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) +GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7) +GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f) /* generate the helpers for shift instructions with one vector and one scalar */ -#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vm = vext_vm(desc); \ - uint32_t vl = env->vl; \ - uint32_t esz = sizeof(TD); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ - uint32_t i; \ - \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ - continue; \ - } \ - TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ - *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \ - } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TD); \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + uint32_t i; \ + \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, \ + (i + 1) * esz); \ + continue; \ + } \ + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \ + } \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);\ } -GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb) -GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh) -GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl) -GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq) +GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7) +GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf) +GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f) +GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f) -GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) -GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) -GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) -GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) +GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7) +GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f) -GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) -GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) -GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) -GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) +GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7) +GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f) /* Vector Narrowing Integer Right Shift Instructions */ -GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) -GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) -GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) -GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) -GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) -GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) -GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) -GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) -GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) -GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) -GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) -GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) +GEN_VEXT_SHIFT_VV(vnsrl_wv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VV(vnsrl_wv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VV(vnsrl_wv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f) +GEN_VEXT_SHIFT_VV(vnsra_wv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VV(vnsra_wv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VV(vnsra_wv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f) +GEN_VEXT_SHIFT_VX(vnsrl_wx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VX(vnsrl_wx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VX(vnsrl_wx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f) +GEN_VEXT_SHIFT_VX(vnsra_wx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf) +GEN_VEXT_SHIFT_VX(vnsra_wx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f) +GEN_VEXT_SHIFT_VX(vnsra_wx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f) /* Vector Integer Comparison Instructions */ #define DO_MSEQ(N, M) (N == M) @@ -1444,22 +1400,32 @@ GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + if (vma) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ continue; \ } \ - vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \ + vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -1497,22 +1463,32 @@ GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE) void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + if (vma) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ continue; \ } \ - vext_set_elem_mask(vd, mlen, i, \ + vext_set_elem_mask(vd, i, \ DO_OP(s2, (ETYPE)(target_long)s1)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -1573,22 +1549,22 @@ RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX) RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX) RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX) RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX) -GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vminu_vv_b, 1) +GEN_VEXT_VV(vminu_vv_h, 2) +GEN_VEXT_VV(vminu_vv_w, 4) +GEN_VEXT_VV(vminu_vv_d, 8) +GEN_VEXT_VV(vmin_vv_b, 1) +GEN_VEXT_VV(vmin_vv_h, 2) +GEN_VEXT_VV(vmin_vv_w, 4) +GEN_VEXT_VV(vmin_vv_d, 8) +GEN_VEXT_VV(vmaxu_vv_b, 1) +GEN_VEXT_VV(vmaxu_vv_h, 2) +GEN_VEXT_VV(vmaxu_vv_w, 4) +GEN_VEXT_VV(vmaxu_vv_d, 8) +GEN_VEXT_VV(vmax_vv_b, 1) +GEN_VEXT_VV(vmax_vv_h, 2) +GEN_VEXT_VV(vmax_vv_w, 4) +GEN_VEXT_VV(vmax_vv_d, 8) RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN) RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN) @@ -1606,22 +1582,22 @@ RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX) RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX) RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX) RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX) -GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmaxu_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vminu_vx_b, 1) +GEN_VEXT_VX(vminu_vx_h, 2) +GEN_VEXT_VX(vminu_vx_w, 4) +GEN_VEXT_VX(vminu_vx_d, 8) +GEN_VEXT_VX(vmin_vx_b, 1) +GEN_VEXT_VX(vmin_vx_h, 2) +GEN_VEXT_VX(vmin_vx_w, 4) +GEN_VEXT_VX(vmin_vx_d, 8) +GEN_VEXT_VX(vmaxu_vx_b, 1) +GEN_VEXT_VX(vmaxu_vx_h, 2) +GEN_VEXT_VX(vmaxu_vx_w, 4) +GEN_VEXT_VX(vmaxu_vx_d, 8) +GEN_VEXT_VX(vmax_vx_b, 1) +GEN_VEXT_VX(vmax_vx_h, 2) +GEN_VEXT_VX(vmax_vx_w, 4) +GEN_VEXT_VX(vmax_vx_d, 8) /* Vector Single-Width Integer Multiply Instructions */ #define DO_MUL(N, M) (N * M) @@ -1629,10 +1605,10 @@ RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL) RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL) RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL) RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL) -GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmul_vv_b, 1) +GEN_VEXT_VV(vmul_vv_h, 2) +GEN_VEXT_VV(vmul_vv_w, 4) +GEN_VEXT_VV(vmul_vv_d, 8) static int8_t do_mulh_b(int8_t s2, int8_t s1) { @@ -1736,18 +1712,18 @@ RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b) RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h) RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w) RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d) -GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmulh_vv_b, 1) +GEN_VEXT_VV(vmulh_vv_h, 2) +GEN_VEXT_VV(vmulh_vv_w, 4) +GEN_VEXT_VV(vmulh_vv_d, 8) +GEN_VEXT_VV(vmulhu_vv_b, 1) +GEN_VEXT_VV(vmulhu_vv_h, 2) +GEN_VEXT_VV(vmulhu_vv_w, 4) +GEN_VEXT_VV(vmulhu_vv_d, 8) +GEN_VEXT_VV(vmulhsu_vv_b, 1) +GEN_VEXT_VV(vmulhsu_vv_h, 2) +GEN_VEXT_VV(vmulhsu_vv_w, 4) +GEN_VEXT_VV(vmulhsu_vv_d, 8) RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL) RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL) @@ -1765,22 +1741,22 @@ RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b) RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h) RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w) RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d) -GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmul_vx_b, 1) +GEN_VEXT_VX(vmul_vx_h, 2) +GEN_VEXT_VX(vmul_vx_w, 4) +GEN_VEXT_VX(vmul_vx_d, 8) +GEN_VEXT_VX(vmulh_vx_b, 1) +GEN_VEXT_VX(vmulh_vx_h, 2) +GEN_VEXT_VX(vmulh_vx_w, 4) +GEN_VEXT_VX(vmulh_vx_d, 8) +GEN_VEXT_VX(vmulhu_vx_b, 1) +GEN_VEXT_VX(vmulhu_vx_h, 2) +GEN_VEXT_VX(vmulhu_vx_w, 4) +GEN_VEXT_VX(vmulhu_vx_d, 8) +GEN_VEXT_VX(vmulhsu_vx_b, 1) +GEN_VEXT_VX(vmulhsu_vx_h, 2) +GEN_VEXT_VX(vmulhsu_vx_w, 4) +GEN_VEXT_VX(vmulhsu_vx_d, 8) /* Vector Integer Divide Instructions */ #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M) @@ -1806,22 +1782,22 @@ RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM) RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM) RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM) RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM) -GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vdivu_vv_b, 1) +GEN_VEXT_VV(vdivu_vv_h, 2) +GEN_VEXT_VV(vdivu_vv_w, 4) +GEN_VEXT_VV(vdivu_vv_d, 8) +GEN_VEXT_VV(vdiv_vv_b, 1) +GEN_VEXT_VV(vdiv_vv_h, 2) +GEN_VEXT_VV(vdiv_vv_w, 4) +GEN_VEXT_VV(vdiv_vv_d, 8) +GEN_VEXT_VV(vremu_vv_b, 1) +GEN_VEXT_VV(vremu_vv_h, 2) +GEN_VEXT_VV(vremu_vv_w, 4) +GEN_VEXT_VV(vremu_vv_d, 8) +GEN_VEXT_VV(vrem_vv_b, 1) +GEN_VEXT_VV(vrem_vv_h, 2) +GEN_VEXT_VV(vrem_vv_w, 4) +GEN_VEXT_VV(vrem_vv_d, 8) RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU) RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU) @@ -1839,22 +1815,22 @@ RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM) RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM) RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM) RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM) -GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vdivu_vx_b, 1) +GEN_VEXT_VX(vdivu_vx_h, 2) +GEN_VEXT_VX(vdivu_vx_w, 4) +GEN_VEXT_VX(vdivu_vx_d, 8) +GEN_VEXT_VX(vdiv_vx_b, 1) +GEN_VEXT_VX(vdiv_vx_h, 2) +GEN_VEXT_VX(vdiv_vx_w, 4) +GEN_VEXT_VX(vdiv_vx_d, 8) +GEN_VEXT_VX(vremu_vx_b, 1) +GEN_VEXT_VX(vremu_vx_h, 2) +GEN_VEXT_VX(vremu_vx_w, 4) +GEN_VEXT_VX(vremu_vx_d, 8) +GEN_VEXT_VX(vrem_vx_b, 1) +GEN_VEXT_VX(vrem_vx_h, 2) +GEN_VEXT_VX(vrem_vx_w, 4) +GEN_VEXT_VX(vrem_vx_d, 8) /* Vector Widening Integer Multiply Instructions */ RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL) @@ -1866,15 +1842,15 @@ RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL) RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL) RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL) RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL) -GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmul_vv_b, 2) +GEN_VEXT_VV(vwmul_vv_h, 4) +GEN_VEXT_VV(vwmul_vv_w, 8) +GEN_VEXT_VV(vwmulu_vv_b, 2) +GEN_VEXT_VV(vwmulu_vv_h, 4) +GEN_VEXT_VV(vwmulu_vv_w, 8) +GEN_VEXT_VV(vwmulsu_vv_b, 2) +GEN_VEXT_VV(vwmulsu_vv_h, 4) +GEN_VEXT_VV(vwmulsu_vv_w, 8) RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL) RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL) @@ -1885,15 +1861,15 @@ RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL) RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL) RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL) RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL) -GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmul_vx_b, 2) +GEN_VEXT_VX(vwmul_vx_h, 4) +GEN_VEXT_VX(vwmul_vx_w, 8) +GEN_VEXT_VX(vwmulu_vx_b, 2) +GEN_VEXT_VX(vwmulu_vx_h, 4) +GEN_VEXT_VX(vwmulu_vx_w, 8) +GEN_VEXT_VX(vwmulsu_vx_b, 2) +GEN_VEXT_VX(vwmulsu_vx_h, 4) +GEN_VEXT_VX(vwmulsu_vx_w, 8) /* Vector Single-Width Integer Multiply-Add Instructions */ #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ @@ -1925,22 +1901,22 @@ RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB) RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB) RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB) RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB) -GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq) -GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb) -GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh) -GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl) -GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmacc_vv_b, 1) +GEN_VEXT_VV(vmacc_vv_h, 2) +GEN_VEXT_VV(vmacc_vv_w, 4) +GEN_VEXT_VV(vmacc_vv_d, 8) +GEN_VEXT_VV(vnmsac_vv_b, 1) +GEN_VEXT_VV(vnmsac_vv_h, 2) +GEN_VEXT_VV(vnmsac_vv_w, 4) +GEN_VEXT_VV(vnmsac_vv_d, 8) +GEN_VEXT_VV(vmadd_vv_b, 1) +GEN_VEXT_VV(vmadd_vv_h, 2) +GEN_VEXT_VV(vmadd_vv_w, 4) +GEN_VEXT_VV(vmadd_vv_d, 8) +GEN_VEXT_VV(vnmsub_vv_b, 1) +GEN_VEXT_VV(vnmsub_vv_h, 2) +GEN_VEXT_VV(vnmsub_vv_w, 4) +GEN_VEXT_VV(vnmsub_vv_d, 8) #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \ @@ -1966,22 +1942,22 @@ RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB) RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB) RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB) RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB) -GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq) -GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb) -GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh) -GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl) -GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmacc_vx_b, 1) +GEN_VEXT_VX(vmacc_vx_h, 2) +GEN_VEXT_VX(vmacc_vx_w, 4) +GEN_VEXT_VX(vmacc_vx_d, 8) +GEN_VEXT_VX(vnmsac_vx_b, 1) +GEN_VEXT_VX(vnmsac_vx_h, 2) +GEN_VEXT_VX(vnmsac_vx_w, 4) +GEN_VEXT_VX(vnmsac_vx_d, 8) +GEN_VEXT_VX(vmadd_vx_b, 1) +GEN_VEXT_VX(vmadd_vx_h, 2) +GEN_VEXT_VX(vmadd_vx_w, 4) +GEN_VEXT_VX(vmadd_vx_d, 8) +GEN_VEXT_VX(vnmsub_vx_b, 1) +GEN_VEXT_VX(vnmsub_vx_h, 2) +GEN_VEXT_VX(vnmsub_vx_w, 4) +GEN_VEXT_VX(vnmsub_vx_d, 8) /* Vector Widening Integer Multiply-Add Instructions */ RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC) @@ -1993,15 +1969,15 @@ RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC) RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC) RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC) RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC) -GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq) -GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh) -GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl) -GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmaccu_vv_b, 2) +GEN_VEXT_VV(vwmaccu_vv_h, 4) +GEN_VEXT_VV(vwmaccu_vv_w, 8) +GEN_VEXT_VV(vwmacc_vv_b, 2) +GEN_VEXT_VV(vwmacc_vv_h, 4) +GEN_VEXT_VV(vwmacc_vv_w, 8) +GEN_VEXT_VV(vwmaccsu_vv_b, 2) +GEN_VEXT_VV(vwmaccsu_vv_h, 4) +GEN_VEXT_VV(vwmaccsu_vv_w, 8) RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC) RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC) @@ -2015,106 +1991,116 @@ RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC) RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC) RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC) RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC) -GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq) -GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh) -GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl) -GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmaccu_vx_b, 2) +GEN_VEXT_VX(vwmaccu_vx_h, 4) +GEN_VEXT_VX(vwmaccu_vx_w, 8) +GEN_VEXT_VX(vwmacc_vx_b, 2) +GEN_VEXT_VX(vwmacc_vx_h, 4) +GEN_VEXT_VX(vwmacc_vx_w, 8) +GEN_VEXT_VX(vwmaccsu_vx_b, 2) +GEN_VEXT_VX(vwmaccsu_vx_h, 4) +GEN_VEXT_VX(vwmaccsu_vx_w, 8) +GEN_VEXT_VX(vwmaccus_vx_b, 2) +GEN_VEXT_VX(vwmaccus_vx_h, 4) +GEN_VEXT_VX(vwmaccus_vx_w, 8) /* Vector Integer Merge and Move Instructions */ -#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VMV_VV(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ *((ETYPE *)vd + H(i)) = s1; \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1, clearb) -GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh) -GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl) -GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq) +GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1) +GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2) +GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4) +GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8) -#define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VMV_VX(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1, clearb) -GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh) -GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl) -GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq) +GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1) +GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2) +GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4) +GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8) -#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \ + for (i = env->vstart; i < vl; i++) { \ + ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \ *((ETYPE *)vd + H(i)) = *(vt + H(i)); \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1, clearb) -GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh) -GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl) -GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq) +GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1) +GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2) +GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4) +GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8) -#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \ + ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \ (ETYPE)(target_long)s1); \ *((ETYPE *)vd + H(i)) = d; \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1, clearb) -GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh) -GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl) -GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq) +GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1) +GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2) +GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4) +GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8) /* *** Vector Fixed-Point Arithmetic Instructions @@ -2142,57 +2128,61 @@ do_##NAME(void *vd, void *vs1, void *vs2, int i, \ static inline void vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, CPURISCVState *env, - uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, - opivv2_rm_fn *fn) + uint32_t vl, uint32_t vm, int vxrm, + opivv2_rm_fn *fn, uint32_t vma, uint32_t esz) { - for (uint32_t i = 0; i < vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (uint32_t i = env->vstart; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); continue; } fn(vd, vs1, vs2, i, env, vxrm); } + env->vstart = 0; } static inline void vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2, CPURISCVState *env, - uint32_t desc, uint32_t esz, uint32_t dsz, - opivv2_rm_fn *fn, clear_fn *clearfn) + uint32_t desc, + opivv2_rm_fn *fn, uint32_t esz) { - uint32_t vlmax = vext_maxsz(desc) / esz; - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); switch (env->vxrm) { case 0: /* rnu */ vext_vv_rm_1(vd, v0, vs1, vs2, - env, vl, vm, mlen, 0, fn); + env, vl, vm, 0, fn, vma, esz); break; case 1: /* rne */ vext_vv_rm_1(vd, v0, vs1, vs2, - env, vl, vm, mlen, 1, fn); + env, vl, vm, 1, fn, vma, esz); break; case 2: /* rdn */ vext_vv_rm_1(vd, v0, vs1, vs2, - env, vl, vm, mlen, 2, fn); + env, vl, vm, 2, fn, vma, esz); break; default: /* rod */ vext_vv_rm_1(vd, v0, vs1, vs2, - env, vl, vm, mlen, 3, fn); + env, vl, vm, 3, fn, vma, esz); break; } - - clearfn(vd, vl, vl * dsz, vlmax * dsz); + /* set tail elements to 1s */ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); } /* generate helpers for fixed point instructions with OPIVV format */ -#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VV_RM(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ - do_##NAME, CLEAR_FN); \ + vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, \ + do_##NAME, ESZ); \ } static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) @@ -2242,10 +2232,10 @@ RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8) RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16) RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32) RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64) -GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vsaddu_vv_b, 1) +GEN_VEXT_VV_RM(vsaddu_vv_h, 2) +GEN_VEXT_VV_RM(vsaddu_vv_w, 4) +GEN_VEXT_VV_RM(vsaddu_vv_d, 8) typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i, CPURISCVState *env, int vxrm); @@ -2262,67 +2252,71 @@ do_##NAME(void *vd, target_long s1, void *vs2, int i, \ static inline void vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, CPURISCVState *env, - uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, - opivx2_rm_fn *fn) + uint32_t vl, uint32_t vm, int vxrm, + opivx2_rm_fn *fn, uint32_t vma, uint32_t esz) { - for (uint32_t i = 0; i < vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (uint32_t i = env->vstart; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); continue; } fn(vd, s1, vs2, i, env, vxrm); } + env->vstart = 0; } static inline void vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, CPURISCVState *env, - uint32_t desc, uint32_t esz, uint32_t dsz, - opivx2_rm_fn *fn, clear_fn *clearfn) + uint32_t desc, + opivx2_rm_fn *fn, uint32_t esz) { - uint32_t vlmax = vext_maxsz(desc) / esz; - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vta = vext_vta(desc); + uint32_t vma = vext_vma(desc); switch (env->vxrm) { case 0: /* rnu */ vext_vx_rm_1(vd, v0, s1, vs2, - env, vl, vm, mlen, 0, fn); + env, vl, vm, 0, fn, vma, esz); break; case 1: /* rne */ vext_vx_rm_1(vd, v0, s1, vs2, - env, vl, vm, mlen, 1, fn); + env, vl, vm, 1, fn, vma, esz); break; case 2: /* rdn */ vext_vx_rm_1(vd, v0, s1, vs2, - env, vl, vm, mlen, 2, fn); + env, vl, vm, 2, fn, vma, esz); break; default: /* rod */ vext_vx_rm_1(vd, v0, s1, vs2, - env, vl, vm, mlen, 3, fn); + env, vl, vm, 3, fn, vma, esz); break; } - - clearfn(vd, vl, vl * dsz, vlmax * dsz); + /* set tail elements to 1s */ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); } /* generate helpers for fixed point instructions with OPIVX format */ -#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VX_RM(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ - vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ - do_##NAME, CLEAR_FN); \ + vext_vx_rm_2(vd, v0, s1, vs2, env, desc, \ + do_##NAME, ESZ); \ } RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8) RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16) RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32) RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64) -GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vsaddu_vx_b, 1) +GEN_VEXT_VX_RM(vsaddu_vx_h, 2) +GEN_VEXT_VX_RM(vsaddu_vx_w, 4) +GEN_VEXT_VX_RM(vsaddu_vx_d, 8) static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) { @@ -2368,19 +2362,19 @@ RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8) RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16) RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32) RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64) -GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vsadd_vv_b, 1) +GEN_VEXT_VV_RM(vsadd_vv_h, 2) +GEN_VEXT_VV_RM(vsadd_vv_w, 4) +GEN_VEXT_VV_RM(vsadd_vv_d, 8) RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8) RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16) RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32) RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64) -GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vsadd_vx_b, 1) +GEN_VEXT_VX_RM(vsadd_vx_h, 2) +GEN_VEXT_VX_RM(vsadd_vx_w, 4) +GEN_VEXT_VX_RM(vsadd_vx_d, 8) static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) { @@ -2429,19 +2423,19 @@ RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8) RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16) RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32) RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64) -GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vssubu_vv_b, 1) +GEN_VEXT_VV_RM(vssubu_vv_h, 2) +GEN_VEXT_VV_RM(vssubu_vv_w, 4) +GEN_VEXT_VV_RM(vssubu_vv_d, 8) RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8) RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16) RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32) RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64) -GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vssubu_vx_b, 1) +GEN_VEXT_VX_RM(vssubu_vx_h, 2) +GEN_VEXT_VX_RM(vssubu_vx_w, 4) +GEN_VEXT_VX_RM(vssubu_vx_d, 8) static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) { @@ -2487,19 +2481,19 @@ RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8) RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16) RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32) RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64) -GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vssub_vv_b, 1) +GEN_VEXT_VV_RM(vssub_vv_h, 2) +GEN_VEXT_VV_RM(vssub_vv_w, 4) +GEN_VEXT_VV_RM(vssub_vv_d, 8) RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8) RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16) RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32) RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64) -GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vssub_vx_b, 1) +GEN_VEXT_VX_RM(vssub_vx_h, 2) +GEN_VEXT_VX_RM(vssub_vx_w, 4) +GEN_VEXT_VX_RM(vssub_vx_d, 8) /* Vector Single-Width Averaging Add and Subtract */ static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) @@ -2551,19 +2545,56 @@ RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32) RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32) RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32) RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64) -GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vaadd_vv_b, 1) +GEN_VEXT_VV_RM(vaadd_vv_h, 2) +GEN_VEXT_VV_RM(vaadd_vv_w, 4) +GEN_VEXT_VV_RM(vaadd_vv_d, 8) RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32) RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32) RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32) RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64) -GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vaadd_vx_b, 1) +GEN_VEXT_VX_RM(vaadd_vx_h, 2) +GEN_VEXT_VX_RM(vaadd_vx_w, 4) +GEN_VEXT_VX_RM(vaadd_vx_d, 8) + +static inline uint32_t aaddu32(CPURISCVState *env, int vxrm, + uint32_t a, uint32_t b) +{ + uint64_t res = (uint64_t)a + b; + uint8_t round = get_round(vxrm, res, 1); + + return (res >> 1) + round; +} + +static inline uint64_t aaddu64(CPURISCVState *env, int vxrm, + uint64_t a, uint64_t b) +{ + uint64_t res = a + b; + uint8_t round = get_round(vxrm, res, 1); + uint64_t over = (uint64_t)(res < a) << 63; + + return ((res >> 1) | over) + round; +} + +RVVCALL(OPIVV2_RM, vaaddu_vv_b, OP_UUU_B, H1, H1, H1, aaddu32) +RVVCALL(OPIVV2_RM, vaaddu_vv_h, OP_UUU_H, H2, H2, H2, aaddu32) +RVVCALL(OPIVV2_RM, vaaddu_vv_w, OP_UUU_W, H4, H4, H4, aaddu32) +RVVCALL(OPIVV2_RM, vaaddu_vv_d, OP_UUU_D, H8, H8, H8, aaddu64) +GEN_VEXT_VV_RM(vaaddu_vv_b, 1) +GEN_VEXT_VV_RM(vaaddu_vv_h, 2) +GEN_VEXT_VV_RM(vaaddu_vv_w, 4) +GEN_VEXT_VV_RM(vaaddu_vv_d, 8) + +RVVCALL(OPIVX2_RM, vaaddu_vx_b, OP_UUU_B, H1, H1, aaddu32) +RVVCALL(OPIVX2_RM, vaaddu_vx_h, OP_UUU_H, H2, H2, aaddu32) +RVVCALL(OPIVX2_RM, vaaddu_vx_w, OP_UUU_W, H4, H4, aaddu32) +RVVCALL(OPIVX2_RM, vaaddu_vx_d, OP_UUU_D, H8, H8, aaddu64) +GEN_VEXT_VX_RM(vaaddu_vx_b, 1) +GEN_VEXT_VX_RM(vaaddu_vx_h, 2) +GEN_VEXT_VX_RM(vaaddu_vx_w, 4) +GEN_VEXT_VX_RM(vaaddu_vx_d, 8) static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) { @@ -2587,19 +2618,56 @@ RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32) RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32) RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32) RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64) -GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vasub_vv_b, 1) +GEN_VEXT_VV_RM(vasub_vv_h, 2) +GEN_VEXT_VV_RM(vasub_vv_w, 4) +GEN_VEXT_VV_RM(vasub_vv_d, 8) RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32) RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32) RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32) RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64) -GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vasub_vx_b, 1) +GEN_VEXT_VX_RM(vasub_vx_h, 2) +GEN_VEXT_VX_RM(vasub_vx_w, 4) +GEN_VEXT_VX_RM(vasub_vx_d, 8) + +static inline uint32_t asubu32(CPURISCVState *env, int vxrm, + uint32_t a, uint32_t b) +{ + int64_t res = (int64_t)a - b; + uint8_t round = get_round(vxrm, res, 1); + + return (res >> 1) + round; +} + +static inline uint64_t asubu64(CPURISCVState *env, int vxrm, + uint64_t a, uint64_t b) +{ + uint64_t res = (uint64_t)a - b; + uint8_t round = get_round(vxrm, res, 1); + uint64_t over = (uint64_t)(res > a) << 63; + + return ((res >> 1) | over) + round; +} + +RVVCALL(OPIVV2_RM, vasubu_vv_b, OP_UUU_B, H1, H1, H1, asubu32) +RVVCALL(OPIVV2_RM, vasubu_vv_h, OP_UUU_H, H2, H2, H2, asubu32) +RVVCALL(OPIVV2_RM, vasubu_vv_w, OP_UUU_W, H4, H4, H4, asubu32) +RVVCALL(OPIVV2_RM, vasubu_vv_d, OP_UUU_D, H8, H8, H8, asubu64) +GEN_VEXT_VV_RM(vasubu_vv_b, 1) +GEN_VEXT_VV_RM(vasubu_vv_h, 2) +GEN_VEXT_VV_RM(vasubu_vv_w, 4) +GEN_VEXT_VV_RM(vasubu_vv_d, 8) + +RVVCALL(OPIVX2_RM, vasubu_vx_b, OP_UUU_B, H1, H1, asubu32) +RVVCALL(OPIVX2_RM, vasubu_vx_h, OP_UUU_H, H2, H2, asubu32) +RVVCALL(OPIVX2_RM, vasubu_vx_w, OP_UUU_W, H4, H4, asubu32) +RVVCALL(OPIVX2_RM, vasubu_vx_d, OP_UUU_D, H8, H8, asubu64) +GEN_VEXT_VX_RM(vasubu_vx_b, 1) +GEN_VEXT_VX_RM(vasubu_vx_h, 2) +GEN_VEXT_VX_RM(vasubu_vx_w, 4) +GEN_VEXT_VX_RM(vasubu_vx_d, 8) /* Vector Single-Width Fractional Multiply with Rounding and Saturation */ static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) @@ -2694,224 +2762,19 @@ RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8) RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16) RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32) RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64) -GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vsmul_vv_b, 1) +GEN_VEXT_VV_RM(vsmul_vv_h, 2) +GEN_VEXT_VV_RM(vsmul_vv_w, 4) +GEN_VEXT_VV_RM(vsmul_vv_d, 8) RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8) RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16) RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32) RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64) -GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq) - -/* Vector Widening Saturating Scaled Multiply-Add */ -static inline uint16_t -vwsmaccu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b, - uint16_t c) -{ - uint8_t round; - uint16_t res = (uint16_t)a * b; - - round = get_round(vxrm, res, 4); - res = (res >> 4) + round; - return saddu16(env, vxrm, c, res); -} - -static inline uint32_t -vwsmaccu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b, - uint32_t c) -{ - uint8_t round; - uint32_t res = (uint32_t)a * b; - - round = get_round(vxrm, res, 8); - res = (res >> 8) + round; - return saddu32(env, vxrm, c, res); -} - -static inline uint64_t -vwsmaccu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b, - uint64_t c) -{ - uint8_t round; - uint64_t res = (uint64_t)a * b; - - round = get_round(vxrm, res, 16); - res = (res >> 16) + round; - return saddu64(env, vxrm, c, res); -} - -#define OPIVV3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ -static inline void \ -do_##NAME(void *vd, void *vs1, void *vs2, int i, \ - CPURISCVState *env, int vxrm) \ -{ \ - TX1 s1 = *((T1 *)vs1 + HS1(i)); \ - TX2 s2 = *((T2 *)vs2 + HS2(i)); \ - TD d = *((TD *)vd + HD(i)); \ - *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1, d); \ -} - -RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8) -RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16) -RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32) -GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh) -GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl) -GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq) - -#define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ -static inline void \ -do_##NAME(void *vd, target_long s1, void *vs2, int i, \ - CPURISCVState *env, int vxrm) \ -{ \ - TX2 s2 = *((T2 *)vs2 + HS2(i)); \ - TD d = *((TD *)vd + HD(i)); \ - *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1, d); \ -} - -RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8) -RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16) -RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32) -GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh) -GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl) -GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq) - -static inline int16_t -vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c) -{ - uint8_t round; - int16_t res = (int16_t)a * b; - - round = get_round(vxrm, res, 4); - res = (res >> 4) + round; - return sadd16(env, vxrm, c, res); -} - -static inline int32_t -vwsmacc16(CPURISCVState *env, int vxrm, int16_t a, int16_t b, int32_t c) -{ - uint8_t round; - int32_t res = (int32_t)a * b; - - round = get_round(vxrm, res, 8); - res = (res >> 8) + round; - return sadd32(env, vxrm, c, res); - -} - -static inline int64_t -vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c) -{ - uint8_t round; - int64_t res = (int64_t)a * b; - - round = get_round(vxrm, res, 16); - res = (res >> 16) + round; - return sadd64(env, vxrm, c, res); -} - -RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8) -RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16) -RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32) -GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh) -GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl) -GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq) -RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8) -RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16) -RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32) -GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh) -GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl) -GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq) - -static inline int16_t -vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c) -{ - uint8_t round; - int16_t res = a * (int16_t)b; - - round = get_round(vxrm, res, 4); - res = (res >> 4) + round; - return ssub16(env, vxrm, c, res); -} - -static inline int32_t -vwsmaccsu16(CPURISCVState *env, int vxrm, uint16_t a, int16_t b, uint32_t c) -{ - uint8_t round; - int32_t res = a * (int32_t)b; - - round = get_round(vxrm, res, 8); - res = (res >> 8) + round; - return ssub32(env, vxrm, c, res); -} - -static inline int64_t -vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c) -{ - uint8_t round; - int64_t res = a * (int64_t)b; - - round = get_round(vxrm, res, 16); - res = (res >> 16) + round; - return ssub64(env, vxrm, c, res); -} - -RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8) -RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16) -RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32) -GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh) -GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl) -GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq) -RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8) -RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16) -RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32) -GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh) -GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl) -GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq) - -static inline int16_t -vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c) -{ - uint8_t round; - int16_t res = (int16_t)a * b; - - round = get_round(vxrm, res, 4); - res = (res >> 4) + round; - return ssub16(env, vxrm, c, res); -} - -static inline int32_t -vwsmaccus16(CPURISCVState *env, int vxrm, int16_t a, uint16_t b, int32_t c) -{ - uint8_t round; - int32_t res = (int32_t)a * b; - - round = get_round(vxrm, res, 8); - res = (res >> 8) + round; - return ssub32(env, vxrm, c, res); -} - -static inline int64_t -vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c) -{ - uint8_t round; - int64_t res = (int64_t)a * b; - - round = get_round(vxrm, res, 16); - res = (res >> 16) + round; - return ssub64(env, vxrm, c, res); -} - -RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8) -RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16) -RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32) -GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh) -GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl) -GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq) +GEN_VEXT_VX_RM(vsmul_vx_b, 1) +GEN_VEXT_VX_RM(vsmul_vx_h, 2) +GEN_VEXT_VX_RM(vsmul_vx_w, 4) +GEN_VEXT_VX_RM(vsmul_vx_d, 8) /* Vector Single-Width Scaling Shift Instructions */ static inline uint8_t @@ -2958,19 +2821,19 @@ RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8) RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16) RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32) RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64) -GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vssrl_vv_b, 1) +GEN_VEXT_VV_RM(vssrl_vv_h, 2) +GEN_VEXT_VV_RM(vssrl_vv_w, 4) +GEN_VEXT_VV_RM(vssrl_vv_d, 8) RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8) RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16) RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32) RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64) -GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vssrl_vx_b, 1) +GEN_VEXT_VX_RM(vssrl_vx_h, 2) +GEN_VEXT_VX_RM(vssrl_vx_w, 4) +GEN_VEXT_VX_RM(vssrl_vx_d, 8) static inline int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) @@ -3017,19 +2880,19 @@ RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16) RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32) RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64) -GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl) -GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq) +GEN_VEXT_VV_RM(vssra_vv_b, 1) +GEN_VEXT_VV_RM(vssra_vv_h, 2) +GEN_VEXT_VV_RM(vssra_vv_w, 4) +GEN_VEXT_VV_RM(vssra_vv_d, 8) RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8) RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16) RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32) RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64) -GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl) -GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq) +GEN_VEXT_VX_RM(vssra_vx_b, 1) +GEN_VEXT_VX_RM(vssra_vx_h, 2) +GEN_VEXT_VX_RM(vssra_vx_w, 4) +GEN_VEXT_VX_RM(vssra_vx_d, 8) /* Vector Narrowing Fixed-Point Clip Instructions */ static inline int8_t @@ -3089,19 +2952,19 @@ vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b) } } -RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8) -RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16) -RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32) -GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl) +RVVCALL(OPIVV2_RM, vnclip_wv_b, NOP_SSS_B, H1, H2, H1, vnclip8) +RVVCALL(OPIVV2_RM, vnclip_wv_h, NOP_SSS_H, H2, H4, H2, vnclip16) +RVVCALL(OPIVV2_RM, vnclip_wv_w, NOP_SSS_W, H4, H8, H4, vnclip32) +GEN_VEXT_VV_RM(vnclip_wv_b, 1) +GEN_VEXT_VV_RM(vnclip_wv_h, 2) +GEN_VEXT_VV_RM(vnclip_wv_w, 4) -RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8) -RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16) -RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32) -GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl) +RVVCALL(OPIVX2_RM, vnclip_wx_b, NOP_SSS_B, H1, H2, vnclip8) +RVVCALL(OPIVX2_RM, vnclip_wx_h, NOP_SSS_H, H2, H4, vnclip16) +RVVCALL(OPIVX2_RM, vnclip_wx_w, NOP_SSS_W, H4, H8, vnclip32) +GEN_VEXT_VX_RM(vnclip_wx_b, 1) +GEN_VEXT_VX_RM(vnclip_wx_h, 2) +GEN_VEXT_VX_RM(vnclip_wx_w, 4) static inline uint8_t vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b) @@ -3139,7 +3002,7 @@ static inline uint32_t vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) { uint8_t round, shift = b & 0x3f; - int64_t res; + uint64_t res; round = get_round(vxrm, a, shift); res = (a >> shift) + round; @@ -3151,19 +3014,19 @@ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) } } -RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8) -RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16) -RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32) -GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb) -GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh) -GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl) +RVVCALL(OPIVV2_RM, vnclipu_wv_b, NOP_UUU_B, H1, H2, H1, vnclipu8) +RVVCALL(OPIVV2_RM, vnclipu_wv_h, NOP_UUU_H, H2, H4, H2, vnclipu16) +RVVCALL(OPIVV2_RM, vnclipu_wv_w, NOP_UUU_W, H4, H8, H4, vnclipu32) +GEN_VEXT_VV_RM(vnclipu_wv_b, 1) +GEN_VEXT_VV_RM(vnclipu_wv_h, 2) +GEN_VEXT_VV_RM(vnclipu_wv_w, 4) -RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8) -RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16) -RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32) -GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb) -GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh) -GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl) +RVVCALL(OPIVX2_RM, vnclipu_wx_b, NOP_UUU_B, H1, H2, vnclipu8) +RVVCALL(OPIVX2_RM, vnclipu_wx_h, NOP_UUU_H, H2, H4, vnclipu16) +RVVCALL(OPIVX2_RM, vnclipu_wx_w, NOP_UUU_W, H4, H8, vnclipu32) +GEN_VEXT_VX_RM(vnclipu_wx_b, 1) +GEN_VEXT_VX_RM(vnclipu_wx_h, 2) +GEN_VEXT_VX_RM(vnclipu_wx_w, 4) /* *** Vector Float Point Arithmetic Instructions @@ -3178,32 +3041,40 @@ static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \ } -#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VV_ENV(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - uint32_t vlmax = vext_maxsz(desc) / ESZ; \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, ESZ); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * ESZ, \ + (i + 1) * ESZ); \ continue; \ } \ do_##NAME(vd, vs1, vs2, i, env); \ } \ - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * ESZ, \ + total_elems * ESZ); \ } RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add) RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add) RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add) -GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfadd_vv_h, 2) +GEN_VEXT_VV_ENV(vfadd_vv_w, 4) +GEN_VEXT_VV_ENV(vfadd_vv_d, 8) #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ @@ -3213,45 +3084,53 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\ } -#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_VF(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - uint32_t vlmax = vext_maxsz(desc) / ESZ; \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, ESZ); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * ESZ, \ + (i + 1) * ESZ); \ continue; \ } \ do_##NAME(vd, s1, vs2, i, env); \ } \ - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * ESZ, \ + total_elems * ESZ); \ } RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add) RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add) RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add) -GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfadd_vf_h, 2) +GEN_VEXT_VF(vfadd_vf_w, 4) +GEN_VEXT_VF(vfadd_vf_d, 8) RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub) RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub) RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub) -GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfsub_vv_h, 2) +GEN_VEXT_VV_ENV(vfsub_vv_w, 4) +GEN_VEXT_VV_ENV(vfsub_vv_d, 8) RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub) RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub) RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub) -GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfsub_vf_h, 2) +GEN_VEXT_VF(vfsub_vf_w, 4) +GEN_VEXT_VF(vfsub_vf_d, 8) static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s) { @@ -3271,9 +3150,9 @@ static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s) RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub) RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub) RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub) -GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfrsub_vf_h, 2) +GEN_VEXT_VF(vfrsub_vf_w, 4) +GEN_VEXT_VF(vfrsub_vf_d, 8) /* Vector Widening Floating-Point Add/Subtract Instructions */ static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) @@ -3291,12 +3170,12 @@ static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s) RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16) RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32) -GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwadd_vv_h, 4) +GEN_VEXT_VV_ENV(vfwadd_vv_w, 8) RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16) RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32) -GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwadd_vf_h, 4) +GEN_VEXT_VF(vfwadd_vf_w, 8) static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) { @@ -3313,12 +3192,12 @@ static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s) RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16) RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32) -GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwsub_vv_h, 4) +GEN_VEXT_VV_ENV(vfwsub_vv_w, 8) RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16) RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32) -GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwsub_vf_h, 4) +GEN_VEXT_VF(vfwsub_vf_w, 8) static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s) { @@ -3332,12 +3211,12 @@ static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s) RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16) RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32) -GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwadd_wv_h, 4) +GEN_VEXT_VV_ENV(vfwadd_wv_w, 8) RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16) RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32) -GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwadd_wf_h, 4) +GEN_VEXT_VF(vfwadd_wf_w, 8) static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s) { @@ -3351,39 +3230,39 @@ static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s) RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16) RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32) -GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwsub_wv_h, 4) +GEN_VEXT_VV_ENV(vfwsub_wv_w, 8) RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16) RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32) -GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwsub_wf_h, 4) +GEN_VEXT_VF(vfwsub_wf_w, 8) /* Vector Single-Width Floating-Point Multiply/Divide Instructions */ RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul) RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul) RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul) -GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfmul_vv_h, 2) +GEN_VEXT_VV_ENV(vfmul_vv_w, 4) +GEN_VEXT_VV_ENV(vfmul_vv_d, 8) RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul) RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul) RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul) -GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfmul_vf_h, 2) +GEN_VEXT_VF(vfmul_vf_w, 4) +GEN_VEXT_VF(vfmul_vf_d, 8) RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div) RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div) RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div) -GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfdiv_vv_h, 2) +GEN_VEXT_VV_ENV(vfdiv_vv_w, 4) +GEN_VEXT_VV_ENV(vfdiv_vv_d, 8) RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div) RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div) RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div) -GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfdiv_vf_h, 2) +GEN_VEXT_VF(vfdiv_vf_w, 4) +GEN_VEXT_VF(vfdiv_vf_d, 8) static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s) { @@ -3403,9 +3282,9 @@ static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s) RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv) RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv) RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv) -GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfrdiv_vf_h, 2) +GEN_VEXT_VF(vfrdiv_vf_w, 4) +GEN_VEXT_VF(vfrdiv_vf_d, 8) /* Vector Widening Floating-Point Multiply */ static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s) @@ -3422,12 +3301,12 @@ static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s) } RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16) RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32) -GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwmul_vv_h, 4) +GEN_VEXT_VV_ENV(vfwmul_vv_w, 8) RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16) RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32) -GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwmul_vf_h, 4) +GEN_VEXT_VF(vfwmul_vf_w, 8) /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ @@ -3458,9 +3337,9 @@ static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16) RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32) RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64) -GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfmacc_vv_h, 2) +GEN_VEXT_VV_ENV(vfmacc_vv_w, 4) +GEN_VEXT_VV_ENV(vfmacc_vv_d, 8) #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ @@ -3474,9 +3353,9 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16) RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32) RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64) -GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfmacc_vf_h, 2) +GEN_VEXT_VF(vfmacc_vf_w, 4) +GEN_VEXT_VF(vfmacc_vf_d, 8) static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3499,15 +3378,15 @@ static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16) RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32) RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64) -GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2) +GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4) +GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8) RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16) RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32) RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64) -GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfnmacc_vf_h, 2) +GEN_VEXT_VF(vfnmacc_vf_w, 4) +GEN_VEXT_VF(vfnmacc_vf_d, 8) static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3527,15 +3406,15 @@ static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16) RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32) RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64) -GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfmsac_vv_h, 2) +GEN_VEXT_VV_ENV(vfmsac_vv_w, 4) +GEN_VEXT_VV_ENV(vfmsac_vv_d, 8) RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16) RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32) RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64) -GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfmsac_vf_h, 2) +GEN_VEXT_VF(vfmsac_vf_w, 4) +GEN_VEXT_VF(vfmsac_vf_d, 8) static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3555,15 +3434,15 @@ static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16) RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32) RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64) -GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2) +GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4) +GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8) RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16) RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32) RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64) -GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfnmsac_vf_h, 2) +GEN_VEXT_VF(vfnmsac_vf_w, 4) +GEN_VEXT_VF(vfnmsac_vf_d, 8) static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3583,15 +3462,15 @@ static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16) RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32) RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64) -GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfmadd_vv_h, 2) +GEN_VEXT_VV_ENV(vfmadd_vv_w, 4) +GEN_VEXT_VV_ENV(vfmadd_vv_d, 8) RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16) RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32) RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64) -GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfmadd_vf_h, 2) +GEN_VEXT_VF(vfmadd_vf_w, 4) +GEN_VEXT_VF(vfmadd_vf_d, 8) static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3614,15 +3493,15 @@ static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16) RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32) RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64) -GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2) +GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4) +GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8) RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16) RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32) RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64) -GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfnmadd_vf_h, 2) +GEN_VEXT_VF(vfnmadd_vf_w, 4) +GEN_VEXT_VF(vfnmadd_vf_d, 8) static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3642,15 +3521,15 @@ static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16) RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32) RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64) -GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfmsub_vv_h, 2) +GEN_VEXT_VV_ENV(vfmsub_vv_w, 4) +GEN_VEXT_VV_ENV(vfmsub_vv_d, 8) RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16) RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32) RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64) -GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfmsub_vf_h, 2) +GEN_VEXT_VF(vfmsub_vf_w, 4) +GEN_VEXT_VF(vfmsub_vf_d, 8) static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { @@ -3670,15 +3549,15 @@ static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16) RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32) RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64) -GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2) +GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4) +GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8) RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16) RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32) RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64) -GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfnmsub_vf_h, 2) +GEN_VEXT_VF(vfnmsub_vf_w, 4) +GEN_VEXT_VF(vfnmsub_vf_d, 8) /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) @@ -3695,12 +3574,12 @@ static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16) RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32) -GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwmacc_vv_h, 4) +GEN_VEXT_VV_ENV(vfwmacc_vv_w, 8) RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16) RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32) -GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwmacc_vf_h, 4) +GEN_VEXT_VF(vfwmacc_vf_w, 8) static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { @@ -3718,12 +3597,12 @@ static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16) RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32) -GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 4) +GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 8) RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16) RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32) -GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwnmacc_vf_h, 4) +GEN_VEXT_VF(vfwnmacc_vf_w, 8) static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { @@ -3741,12 +3620,12 @@ static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16) RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32) -GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwmsac_vv_h, 4) +GEN_VEXT_VV_ENV(vfwmsac_vv_w, 8) RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16) RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32) -GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwmsac_vf_h, 4) +GEN_VEXT_VF(vfwmsac_vf_w, 8) static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { @@ -3764,12 +3643,12 @@ static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16) RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32) -GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl) -GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq) +GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 4) +GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 8) RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16) RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32) -GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl) -GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq) +GEN_VEXT_VF(vfwnmsac_vf_h, 4) +GEN_VEXT_VF(vfwnmsac_vf_w, 8) /* Vector Floating-Point Square-Root Instruction */ /* (TD, T2, TX2) */ @@ -3785,61 +3664,441 @@ static void do_##NAME(void *vd, void *vs2, int i, \ *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \ } -#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_V_ENV(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t vlmax = vext_maxsz(desc) / ESZ; \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, ESZ); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ if (vl == 0) { \ return; \ } \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * ESZ, \ + (i + 1) * ESZ); \ continue; \ } \ do_##NAME(vd, vs2, i, env); \ } \ - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ + env->vstart = 0; \ + vext_set_elems_1s(vd, vta, vl * ESZ, \ + total_elems * ESZ); \ } RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt) RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt) RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt) -GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl) -GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq) +GEN_VEXT_V_ENV(vfsqrt_v_h, 2) +GEN_VEXT_V_ENV(vfsqrt_v_w, 4) +GEN_VEXT_V_ENV(vfsqrt_v_d, 8) + +/* + * Vector Floating-Point Reciprocal Square-Root Estimate Instruction + * + * Adapted from riscv-v-spec recip.c: + * https://github.com/riscv/riscv-v-spec/blob/master/recip.c + */ +static uint64_t frsqrt7(uint64_t f, int exp_size, int frac_size) +{ + uint64_t sign = extract64(f, frac_size + exp_size, 1); + uint64_t exp = extract64(f, frac_size, exp_size); + uint64_t frac = extract64(f, 0, frac_size); + + const uint8_t lookup_table[] = { + 52, 51, 50, 48, 47, 46, 44, 43, + 42, 41, 40, 39, 38, 36, 35, 34, + 33, 32, 31, 30, 30, 29, 28, 27, + 26, 25, 24, 23, 23, 22, 21, 20, + 19, 19, 18, 17, 16, 16, 15, 14, + 14, 13, 12, 12, 11, 10, 10, 9, + 9, 8, 7, 7, 6, 6, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0, + 127, 125, 123, 121, 119, 118, 116, 114, + 113, 111, 109, 108, 106, 105, 103, 102, + 100, 99, 97, 96, 95, 93, 92, 91, + 90, 88, 87, 86, 85, 84, 83, 82, + 80, 79, 78, 77, 76, 75, 74, 73, + 72, 71, 70, 70, 69, 68, 67, 66, + 65, 64, 63, 63, 62, 61, 60, 59, + 59, 58, 57, 56, 56, 55, 54, 53 + }; + const int precision = 7; + + if (exp == 0 && frac != 0) { /* subnormal */ + /* Normalize the subnormal. */ + while (extract64(frac, frac_size - 1, 1) == 0) { + exp--; + frac <<= 1; + } + + frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size); + } + + int idx = ((exp & 1) << (precision - 1)) | + (frac >> (frac_size - precision + 1)); + uint64_t out_frac = (uint64_t)(lookup_table[idx]) << + (frac_size - precision); + uint64_t out_exp = (3 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp) / 2; + + uint64_t val = 0; + val = deposit64(val, 0, frac_size, out_frac); + val = deposit64(val, frac_size, exp_size, out_exp); + val = deposit64(val, frac_size + exp_size, 1, sign); + return val; +} + +static float16 frsqrt7_h(float16 f, float_status *s) +{ + int exp_size = 5, frac_size = 10; + bool sign = float16_is_neg(f); + + /* + * frsqrt7(sNaN) = canonical NaN + * frsqrt7(-inf) = canonical NaN + * frsqrt7(-normal) = canonical NaN + * frsqrt7(-subnormal) = canonical NaN + */ + if (float16_is_signaling_nan(f, s) || + (float16_is_infinity(f) && sign) || + (float16_is_normal(f) && sign) || + (float16_is_zero_or_denormal(f) && !float16_is_zero(f) && sign)) { + s->float_exception_flags |= float_flag_invalid; + return float16_default_nan(s); + } + + /* frsqrt7(qNaN) = canonical NaN */ + if (float16_is_quiet_nan(f, s)) { + return float16_default_nan(s); + } + + /* frsqrt7(+-0) = +-inf */ + if (float16_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float16_set_sign(float16_infinity, sign); + } + + /* frsqrt7(+inf) = +0 */ + if (float16_is_infinity(f) && !sign) { + return float16_set_sign(float16_zero, sign); + } + + /* +normal, +subnormal */ + uint64_t val = frsqrt7(f, exp_size, frac_size); + return make_float16(val); +} + +static float32 frsqrt7_s(float32 f, float_status *s) +{ + int exp_size = 8, frac_size = 23; + bool sign = float32_is_neg(f); + + /* + * frsqrt7(sNaN) = canonical NaN + * frsqrt7(-inf) = canonical NaN + * frsqrt7(-normal) = canonical NaN + * frsqrt7(-subnormal) = canonical NaN + */ + if (float32_is_signaling_nan(f, s) || + (float32_is_infinity(f) && sign) || + (float32_is_normal(f) && sign) || + (float32_is_zero_or_denormal(f) && !float32_is_zero(f) && sign)) { + s->float_exception_flags |= float_flag_invalid; + return float32_default_nan(s); + } + + /* frsqrt7(qNaN) = canonical NaN */ + if (float32_is_quiet_nan(f, s)) { + return float32_default_nan(s); + } + + /* frsqrt7(+-0) = +-inf */ + if (float32_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float32_set_sign(float32_infinity, sign); + } + + /* frsqrt7(+inf) = +0 */ + if (float32_is_infinity(f) && !sign) { + return float32_set_sign(float32_zero, sign); + } + + /* +normal, +subnormal */ + uint64_t val = frsqrt7(f, exp_size, frac_size); + return make_float32(val); +} + +static float64 frsqrt7_d(float64 f, float_status *s) +{ + int exp_size = 11, frac_size = 52; + bool sign = float64_is_neg(f); + + /* + * frsqrt7(sNaN) = canonical NaN + * frsqrt7(-inf) = canonical NaN + * frsqrt7(-normal) = canonical NaN + * frsqrt7(-subnormal) = canonical NaN + */ + if (float64_is_signaling_nan(f, s) || + (float64_is_infinity(f) && sign) || + (float64_is_normal(f) && sign) || + (float64_is_zero_or_denormal(f) && !float64_is_zero(f) && sign)) { + s->float_exception_flags |= float_flag_invalid; + return float64_default_nan(s); + } + + /* frsqrt7(qNaN) = canonical NaN */ + if (float64_is_quiet_nan(f, s)) { + return float64_default_nan(s); + } + + /* frsqrt7(+-0) = +-inf */ + if (float64_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float64_set_sign(float64_infinity, sign); + } + + /* frsqrt7(+inf) = +0 */ + if (float64_is_infinity(f) && !sign) { + return float64_set_sign(float64_zero, sign); + } + + /* +normal, +subnormal */ + uint64_t val = frsqrt7(f, exp_size, frac_size); + return make_float64(val); +} + +RVVCALL(OPFVV1, vfrsqrt7_v_h, OP_UU_H, H2, H2, frsqrt7_h) +RVVCALL(OPFVV1, vfrsqrt7_v_w, OP_UU_W, H4, H4, frsqrt7_s) +RVVCALL(OPFVV1, vfrsqrt7_v_d, OP_UU_D, H8, H8, frsqrt7_d) +GEN_VEXT_V_ENV(vfrsqrt7_v_h, 2) +GEN_VEXT_V_ENV(vfrsqrt7_v_w, 4) +GEN_VEXT_V_ENV(vfrsqrt7_v_d, 8) + +/* + * Vector Floating-Point Reciprocal Estimate Instruction + * + * Adapted from riscv-v-spec recip.c: + * https://github.com/riscv/riscv-v-spec/blob/master/recip.c + */ +static uint64_t frec7(uint64_t f, int exp_size, int frac_size, + float_status *s) +{ + uint64_t sign = extract64(f, frac_size + exp_size, 1); + uint64_t exp = extract64(f, frac_size, exp_size); + uint64_t frac = extract64(f, 0, frac_size); + + const uint8_t lookup_table[] = { + 127, 125, 123, 121, 119, 117, 116, 114, + 112, 110, 109, 107, 105, 104, 102, 100, + 99, 97, 96, 94, 93, 91, 90, 88, + 87, 85, 84, 83, 81, 80, 79, 77, + 76, 75, 74, 72, 71, 70, 69, 68, + 66, 65, 64, 63, 62, 61, 60, 59, + 58, 57, 56, 55, 54, 53, 52, 51, + 50, 49, 48, 47, 46, 45, 44, 43, + 42, 41, 40, 40, 39, 38, 37, 36, + 35, 35, 34, 33, 32, 31, 31, 30, + 29, 28, 28, 27, 26, 25, 25, 24, + 23, 23, 22, 21, 21, 20, 19, 19, + 18, 17, 17, 16, 15, 15, 14, 14, + 13, 12, 12, 11, 11, 10, 9, 9, + 8, 8, 7, 7, 6, 5, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0 + }; + const int precision = 7; + + if (exp == 0 && frac != 0) { /* subnormal */ + /* Normalize the subnormal. */ + while (extract64(frac, frac_size - 1, 1) == 0) { + exp--; + frac <<= 1; + } + + frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size); + + if (exp != 0 && exp != UINT64_MAX) { + /* + * Overflow to inf or max value of same sign, + * depending on sign and rounding mode. + */ + s->float_exception_flags |= (float_flag_inexact | + float_flag_overflow); + + if ((s->float_rounding_mode == float_round_to_zero) || + ((s->float_rounding_mode == float_round_down) && !sign) || + ((s->float_rounding_mode == float_round_up) && sign)) { + /* Return greatest/negative finite value. */ + return (sign << (exp_size + frac_size)) | + (MAKE_64BIT_MASK(frac_size, exp_size) - 1); + } else { + /* Return +-inf. */ + return (sign << (exp_size + frac_size)) | + MAKE_64BIT_MASK(frac_size, exp_size); + } + } + } + + int idx = frac >> (frac_size - precision); + uint64_t out_frac = (uint64_t)(lookup_table[idx]) << + (frac_size - precision); + uint64_t out_exp = 2 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp; + + if (out_exp == 0 || out_exp == UINT64_MAX) { + /* + * The result is subnormal, but don't raise the underflow exception, + * because there's no additional loss of precision. + */ + out_frac = (out_frac >> 1) | MAKE_64BIT_MASK(frac_size - 1, 1); + if (out_exp == UINT64_MAX) { + out_frac >>= 1; + out_exp = 0; + } + } + + uint64_t val = 0; + val = deposit64(val, 0, frac_size, out_frac); + val = deposit64(val, frac_size, exp_size, out_exp); + val = deposit64(val, frac_size + exp_size, 1, sign); + return val; +} + +static float16 frec7_h(float16 f, float_status *s) +{ + int exp_size = 5, frac_size = 10; + bool sign = float16_is_neg(f); + + /* frec7(+-inf) = +-0 */ + if (float16_is_infinity(f)) { + return float16_set_sign(float16_zero, sign); + } + + /* frec7(+-0) = +-inf */ + if (float16_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float16_set_sign(float16_infinity, sign); + } + + /* frec7(sNaN) = canonical NaN */ + if (float16_is_signaling_nan(f, s)) { + s->float_exception_flags |= float_flag_invalid; + return float16_default_nan(s); + } + + /* frec7(qNaN) = canonical NaN */ + if (float16_is_quiet_nan(f, s)) { + return float16_default_nan(s); + } + + /* +-normal, +-subnormal */ + uint64_t val = frec7(f, exp_size, frac_size, s); + return make_float16(val); +} + +static float32 frec7_s(float32 f, float_status *s) +{ + int exp_size = 8, frac_size = 23; + bool sign = float32_is_neg(f); + + /* frec7(+-inf) = +-0 */ + if (float32_is_infinity(f)) { + return float32_set_sign(float32_zero, sign); + } + + /* frec7(+-0) = +-inf */ + if (float32_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float32_set_sign(float32_infinity, sign); + } + + /* frec7(sNaN) = canonical NaN */ + if (float32_is_signaling_nan(f, s)) { + s->float_exception_flags |= float_flag_invalid; + return float32_default_nan(s); + } + + /* frec7(qNaN) = canonical NaN */ + if (float32_is_quiet_nan(f, s)) { + return float32_default_nan(s); + } + + /* +-normal, +-subnormal */ + uint64_t val = frec7(f, exp_size, frac_size, s); + return make_float32(val); +} + +static float64 frec7_d(float64 f, float_status *s) +{ + int exp_size = 11, frac_size = 52; + bool sign = float64_is_neg(f); + + /* frec7(+-inf) = +-0 */ + if (float64_is_infinity(f)) { + return float64_set_sign(float64_zero, sign); + } + + /* frec7(+-0) = +-inf */ + if (float64_is_zero(f)) { + s->float_exception_flags |= float_flag_divbyzero; + return float64_set_sign(float64_infinity, sign); + } + + /* frec7(sNaN) = canonical NaN */ + if (float64_is_signaling_nan(f, s)) { + s->float_exception_flags |= float_flag_invalid; + return float64_default_nan(s); + } + + /* frec7(qNaN) = canonical NaN */ + if (float64_is_quiet_nan(f, s)) { + return float64_default_nan(s); + } + + /* +-normal, +-subnormal */ + uint64_t val = frec7(f, exp_size, frac_size, s); + return make_float64(val); +} + +RVVCALL(OPFVV1, vfrec7_v_h, OP_UU_H, H2, H2, frec7_h) +RVVCALL(OPFVV1, vfrec7_v_w, OP_UU_W, H4, H4, frec7_s) +RVVCALL(OPFVV1, vfrec7_v_d, OP_UU_D, H8, H8, frec7_d) +GEN_VEXT_V_ENV(vfrec7_v_h, 2) +GEN_VEXT_V_ENV(vfrec7_v_w, 4) +GEN_VEXT_V_ENV(vfrec7_v_d, 8) /* Vector Floating-Point MIN/MAX Instructions */ -RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum) -RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum) -RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum) -GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq) -RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum) -RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum) -RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum) -GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq) +RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minimum_number) +RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minimum_number) +RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minimum_number) +GEN_VEXT_VV_ENV(vfmin_vv_h, 2) +GEN_VEXT_VV_ENV(vfmin_vv_w, 4) +GEN_VEXT_VV_ENV(vfmin_vv_d, 8) +RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minimum_number) +RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minimum_number) +RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minimum_number) +GEN_VEXT_VF(vfmin_vf_h, 2) +GEN_VEXT_VF(vfmin_vf_w, 4) +GEN_VEXT_VF(vfmin_vf_d, 8) -RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum) -RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum) -RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum) -GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq) -RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum) -RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum) -RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum) -GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq) +RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maximum_number) +RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maximum_number) +RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maximum_number) +GEN_VEXT_VV_ENV(vfmax_vv_h, 2) +GEN_VEXT_VV_ENV(vfmax_vv_w, 4) +GEN_VEXT_VV_ENV(vfmax_vv_d, 8) +RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maximum_number) +RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maximum_number) +RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maximum_number) +GEN_VEXT_VF(vfmax_vf_h, 2) +GEN_VEXT_VF(vfmax_vf_w, 4) +GEN_VEXT_VF(vfmax_vf_d, 8) /* Vector Floating-Point Sign-Injection Instructions */ static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s) @@ -3860,15 +4119,15 @@ static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s) RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16) RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32) RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64) -GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2) +GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4) +GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8) RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16) RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32) RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64) -GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfsgnj_vf_h, 2) +GEN_VEXT_VF(vfsgnj_vf_w, 4) +GEN_VEXT_VF(vfsgnj_vf_d, 8) static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s) { @@ -3888,15 +4147,15 @@ static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s) RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16) RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32) RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64) -GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2) +GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4) +GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8) RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16) RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32) RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64) -GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfsgnjn_vf_h, 2) +GEN_VEXT_VF(vfsgnjn_vf_w, 4) +GEN_VEXT_VF(vfsgnjn_vf_d, 8) static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s) { @@ -3916,38 +4175,48 @@ static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s) RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16) RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32) RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64) -GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh) -GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl) -GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq) +GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2) +GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4) +GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8) RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16) RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32) RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64) -GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh) -GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl) -GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq) +GEN_VEXT_VF(vfsgnjx_vf_h, 2) +GEN_VEXT_VF(vfsgnjx_vf_w, 4) +GEN_VEXT_VF(vfsgnjx_vf_d, 8) /* Vector Floating-Point Compare Instructions */ #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + if (vma) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ continue; \ } \ - vext_set_elem_mask(vd, mlen, i, \ + vext_set_elem_mask(vd, i, \ DO_OP(s2, s1, &env->fp_status)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -3959,22 +4228,32 @@ GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet) void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + if (vma) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ continue; \ } \ - vext_set_elem_mask(vd, mlen, i, \ + vext_set_elem_mask(vd, i, \ DO_OP(s2, (ETYPE)s1, &env->fp_status)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail-agnostic */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -4068,13 +4347,6 @@ GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16) GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32) GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64) -GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet) -GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet) -GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet) -GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet) -GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet) -GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet) - /* Vector Floating-Point Classify Instruction */ #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, void *vs2, int i) \ @@ -4083,23 +4355,31 @@ static void do_##NAME(void *vd, void *vs2, int i) \ *((TD *)vd + HD(i)) = OP(s2); \ } -#define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN) \ +#define GEN_VEXT_V(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t vlmax = vext_maxsz(desc) / ESZ; \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, ESZ); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * ESZ, \ + (i + 1) * ESZ); \ continue; \ } \ do_##NAME(vd, vs2, i); \ } \ - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * ESZ, \ + total_elems * ESZ); \ } target_ulong fclass_h(uint64_t frs1) @@ -4162,97 +4442,106 @@ target_ulong fclass_d(uint64_t frs1) RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h) RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s) RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d) -GEN_VEXT_V(vfclass_v_h, 2, 2, clearh) -GEN_VEXT_V(vfclass_v_w, 4, 4, clearl) -GEN_VEXT_V(vfclass_v_d, 8, 8, clearq) +GEN_VEXT_V(vfclass_v_h, 2) +GEN_VEXT_V(vfclass_v_w, 4) +GEN_VEXT_V(vfclass_v_d, 8) /* Vector Floating-Point Merge Instruction */ -#define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN) \ + +#define GEN_VFMERGE_VF(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ uint32_t esz = sizeof(ETYPE); \ - uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t total_elems = \ + vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ *((ETYPE *)vd + H(i)) \ - = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1); \ + = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \ } \ - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh) -GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl) -GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq) +GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2) +GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4) +GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8) /* Single-Width Floating-Point/Integer Type-Convert Instructions */ /* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16) RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32) RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64) -GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl) -GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8) /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */ RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16) RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32) RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64) -GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl) -GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq) +GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2) +GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4) +GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8) /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */ RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16) RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32) RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64) -GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl) -GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8) /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */ RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16) RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32) RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64) -GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl) -GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq) +GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2) +GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4) +GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8) /* Widening Floating-Point/Integer Type-Convert Instructions */ /* (TD, T2, TX2) */ +#define WOP_UU_B uint16_t, uint8_t, uint8_t #define WOP_UU_H uint32_t, uint16_t, uint16_t #define WOP_UU_W uint64_t, uint32_t, uint32_t /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/ RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32) RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64) -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl) -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq) +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 4) +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 8) /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */ RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32) RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64) -GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl) -GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq) +GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 4) +GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 8) /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */ +RVVCALL(OPFVV1, vfwcvt_f_xu_v_b, WOP_UU_B, H2, H1, uint8_to_float16) RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32) RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64) -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl) -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq) +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b, 2) +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 4) +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 8) /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */ +RVVCALL(OPFVV1, vfwcvt_f_x_v_b, WOP_UU_B, H2, H1, int8_to_float16) RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32) RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64) -GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl) -GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq) +GEN_VEXT_V_ENV(vfwcvt_f_x_v_b, 2) +GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 4) +GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 8) /* - * vfwcvt.f.f.v vd, vs2, vm # + * vfwcvt.f.f.v vd, vs2, vm * Convert single-width float to double-width float. */ static uint32_t vfwcvtffv16(uint16_t a, float_status *s) @@ -4262,36 +4551,41 @@ static uint32_t vfwcvtffv16(uint16_t a, float_status *s) RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16) RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64) -GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl) -GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq) +GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 4) +GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 8) /* Narrowing Floating-Point/Integer Type-Convert Instructions */ /* (TD, T2, TX2) */ +#define NOP_UU_B uint8_t, uint16_t, uint32_t #define NOP_UU_H uint16_t, uint32_t, uint32_t #define NOP_UU_W uint32_t, uint64_t, uint64_t /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ -RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16) -RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32) -GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl) +RVVCALL(OPFVV1, vfncvt_xu_f_w_b, NOP_UU_B, H1, H2, float16_to_uint8) +RVVCALL(OPFVV1, vfncvt_xu_f_w_h, NOP_UU_H, H2, H4, float32_to_uint16) +RVVCALL(OPFVV1, vfncvt_xu_f_w_w, NOP_UU_W, H4, H8, float64_to_uint32) +GEN_VEXT_V_ENV(vfncvt_xu_f_w_b, 1) +GEN_VEXT_V_ENV(vfncvt_xu_f_w_h, 2) +GEN_VEXT_V_ENV(vfncvt_xu_f_w_w, 4) /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */ -RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16) -RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32) -GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl) +RVVCALL(OPFVV1, vfncvt_x_f_w_b, NOP_UU_B, H1, H2, float16_to_int8) +RVVCALL(OPFVV1, vfncvt_x_f_w_h, NOP_UU_H, H2, H4, float32_to_int16) +RVVCALL(OPFVV1, vfncvt_x_f_w_w, NOP_UU_W, H4, H8, float64_to_int32) +GEN_VEXT_V_ENV(vfncvt_x_f_w_b, 1) +GEN_VEXT_V_ENV(vfncvt_x_f_w_h, 2) +GEN_VEXT_V_ENV(vfncvt_x_f_w_w, 4) /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */ -RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16) -RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32) -GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl) +RVVCALL(OPFVV1, vfncvt_f_xu_w_h, NOP_UU_H, H2, H4, uint32_to_float16) +RVVCALL(OPFVV1, vfncvt_f_xu_w_w, NOP_UU_W, H4, H8, uint64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_xu_w_h, 2) +GEN_VEXT_V_ENV(vfncvt_f_xu_w_w, 4) /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */ -RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16) -RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32) -GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl) +RVVCALL(OPFVV1, vfncvt_f_x_w_h, NOP_UU_H, H2, H4, int32_to_float16) +RVVCALL(OPFVV1, vfncvt_f_x_w_w, NOP_UU_W, H4, H8, int64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_x_w_h, 2) +GEN_VEXT_V_ENV(vfncvt_f_x_w_w, 4) /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */ static uint16_t vfncvtffv16(uint32_t a, float_status *s) @@ -4299,180 +4593,163 @@ static uint16_t vfncvtffv16(uint32_t a, float_status *s) return float32_to_float16(a, true, s); } -RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16) -RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32) -GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh) -GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl) +RVVCALL(OPFVV1, vfncvt_f_f_w_h, NOP_UU_H, H2, H4, vfncvtffv16) +RVVCALL(OPFVV1, vfncvt_f_f_w_w, NOP_UU_W, H4, H8, float64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2) +GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4) /* *** Vector Reduction Operations */ /* Vector Single-Width Integer Reduction Instructions */ -#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ +#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TD); \ + uint32_t vlenb = simd_maxsz(desc); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ continue; \ } \ s1 = OP(s1, (TD)s2); \ } \ *((TD *)vd + HD(0)) = s1; \ - CLEAR_FN(vd, 1, sizeof(TD), tot); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, esz, vlenb); \ } /* vd[0] = sum(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb) -GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh) -GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl) -GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq) +GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD) +GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD) +GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD) +GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD) /* vd[0] = maxu(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb) -GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh) -GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl) -GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq) +GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX) +GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX) +GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX) +GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX) /* vd[0] = max(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb) -GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh) -GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl) -GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq) +GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX) +GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX) +GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX) +GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX) /* vd[0] = minu(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb) -GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh) -GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl) -GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq) +GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN) +GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN) +GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN) +GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN) /* vd[0] = min(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb) -GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh) -GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl) -GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq) +GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN) +GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN) +GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN) +GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN) /* vd[0] = and(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb) -GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh) -GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl) -GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq) +GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND) +GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND) +GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND) +GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND) /* vd[0] = or(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb) -GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh) -GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl) -GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq) +GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR) +GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR) +GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR) +GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR) /* vd[0] = xor(vs1[0], vs2[*]) */ -GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb) -GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh) -GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl) -GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq) +GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR) +GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR) +GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR) +GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR) /* Vector Widening Integer Reduction Instructions */ /* signed sum reduction into double-width accumulator */ -GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh) -GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl) -GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq) +GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD) +GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD) +GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD) /* Unsigned sum reduction into double-width accumulator */ -GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh) -GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl) -GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq) +GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD) +GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD) +GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD) /* Vector Single-Width Floating-Point Reduction Instructions */ -#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ +#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TD); \ + uint32_t vlenb = simd_maxsz(desc); \ + uint32_t vta = vext_vta(desc); \ uint32_t i; \ - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ - for (i = 0; i < vl; i++) { \ + for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ continue; \ } \ s1 = OP(s1, (TD)s2, &env->fp_status); \ } \ *((TD *)vd + HD(0)) = s1; \ - CLEAR_FN(vd, 1, sizeof(TD), tot); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, esz, vlenb); \ } /* Unordered sum */ -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh) -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl) -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq) +GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add) +GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add) +GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add) + +/* Ordered sum */ +GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add) +GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add) +GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add) /* Maximum value */ -GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh) -GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl) -GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq) +GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maximum_number) +GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maximum_number) +GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maximum_number) /* Minimum value */ -GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh) -GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl) -GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq) +GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minimum_number) +GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minimum_number) +GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minimum_number) + +/* Vector Widening Floating-Point Add Instructions */ +static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s) +{ + return float32_add(a, float16_to_float32(b, true, s), s); +} + +static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s) +{ + return float64_add(a, float32_to_float64(b, s), s); +} /* Vector Widening Floating-Point Reduction Instructions */ -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */ -void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1, - void *vs2, CPURISCVState *env, uint32_t desc) -{ - uint32_t mlen = vext_mlen(desc); - uint32_t vm = vext_vm(desc); - uint32_t vl = env->vl; - uint32_t i; - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; - uint32_t s1 = *((uint32_t *)vs1 + H4(0)); - - for (i = 0; i < vl; i++) { - uint16_t s2 = *((uint16_t *)vs2 + H2(i)); - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } - s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status), - &env->fp_status); - } - *((uint32_t *)vd + H4(0)) = s1; - clearl(vd, 1, sizeof(uint32_t), tot); -} - -void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1, - void *vs2, CPURISCVState *env, uint32_t desc) -{ - uint32_t mlen = vext_mlen(desc); - uint32_t vm = vext_vm(desc); - uint32_t vl = env->vl; - uint32_t i; - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; - uint64_t s1 = *((uint64_t *)vs1); - - for (i = 0; i < vl; i++) { - uint32_t s2 = *((uint32_t *)vs2 + H4(i)); - if (!vm && !vext_elem_mask(v0, mlen, i)) { - continue; - } - s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status), - &env->fp_status); - } - *((uint64_t *)vd) = s1; - clearq(vd, 1, sizeof(uint64_t), tot); -} +/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */ +GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16) +GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32) +GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16) +GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32) /* *** Vector Mask Operations @@ -4483,19 +4760,26 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vl = env->vl; \ + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ \ - for (i = 0; i < vl; i++) { \ - a = vext_elem_mask(vs1, mlen, i); \ - b = vext_elem_mask(vs2, mlen, i); \ - vext_set_elem_mask(vd, mlen, i, OP(b, a)); \ + for (i = env->vstart; i < vl; i++) { \ + a = vext_elem_mask(vs1, i); \ + b = vext_elem_mask(vs2, i); \ + vext_set_elem_mask(vd, i, OP(b, a)); \ } \ - for (; i < vlmax; i++) { \ - vext_set_elem_mask(vd, mlen, i, 0); \ + env->vstart = 0; \ + /* mask destination register are always tail- \ + * agnostic \ + */ \ + /* set tail elements to 1s */ \ + if (vta_all_1s) { \ + for (; i < total_elems; i++) { \ + vext_set_elem_mask(vd, i, 1); \ + } \ } \ } @@ -4507,49 +4791,49 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ GEN_VEXT_MASK_VV(vmand_mm, DO_AND) GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND) -GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT) +GEN_VEXT_MASK_VV(vmandn_mm, DO_ANDNOT) GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR) GEN_VEXT_MASK_VV(vmor_mm, DO_OR) GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR) -GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT) +GEN_VEXT_MASK_VV(vmorn_mm, DO_ORNOT) GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR) -/* Vector mask population count vmpopc */ -target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env, - uint32_t desc) +/* Vector count population in mask vcpop */ +target_ulong HELPER(vcpop_m)(void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) { target_ulong cnt = 0; - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; int i; - for (i = 0; i < vl; i++) { - if (vm || vext_elem_mask(v0, mlen, i)) { - if (vext_elem_mask(vs2, mlen, i)) { + for (i = env->vstart; i < vl; i++) { + if (vm || vext_elem_mask(v0, i)) { + if (vext_elem_mask(vs2, i)) { cnt++; } } } + env->vstart = 0; return cnt; } -/* vmfirst find-first-set mask bit*/ -target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env, - uint32_t desc) +/* vfirst find-first-set mask bit*/ +target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) { - uint32_t mlen = vext_mlen(desc); uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; int i; - for (i = 0; i < vl; i++) { - if (vm || vext_elem_mask(v0, mlen, i)) { - if (vext_elem_mask(vs2, mlen, i)) { + for (i = env->vstart; i < vl; i++) { + if (vm || vext_elem_mask(v0, i)) { + if (vext_elem_mask(vs2, i)) { return i; } } } + env->vstart = 0; return -1LL; } @@ -4562,39 +4846,49 @@ enum set_mask_type { static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, uint32_t desc, enum set_mask_type type) { - uint32_t mlen = vext_mlen(desc); - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; + uint32_t total_elems = env_archcpu(env)->cfg.vlen; + uint32_t vta_all_1s = vext_vta_all_1s(desc); + uint32_t vma = vext_vma(desc); int i; bool first_mask_bit = false; - for (i = 0; i < vl; i++) { - if (!vm && !vext_elem_mask(v0, mlen, i)) { + for (i = env->vstart; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + if (vma) { + vext_set_elem_mask(vd, i, 1); + } continue; } /* write a zero to all following active elements */ if (first_mask_bit) { - vext_set_elem_mask(vd, mlen, i, 0); + vext_set_elem_mask(vd, i, 0); continue; } - if (vext_elem_mask(vs2, mlen, i)) { + if (vext_elem_mask(vs2, i)) { first_mask_bit = true; if (type == BEFORE_FIRST) { - vext_set_elem_mask(vd, mlen, i, 0); + vext_set_elem_mask(vd, i, 0); } else { - vext_set_elem_mask(vd, mlen, i, 1); + vext_set_elem_mask(vd, i, 1); } } else { if (type == ONLY_FIRST) { - vext_set_elem_mask(vd, mlen, i, 0); + vext_set_elem_mask(vd, i, 0); } else { - vext_set_elem_mask(vd, mlen, i, 1); + vext_set_elem_mask(vd, i, 1); } } } - for (; i < vlmax; i++) { - vext_set_elem_mask(vd, mlen, i, 0); + env->vstart = 0; + /* mask destination register are always tail-agnostic */ + /* set tail elements to 1s */ + if (vta_all_1s) { + for (; i < total_elems; i++) { + vext_set_elem_mask(vd, i, 1); + } } } @@ -4617,217 +4911,329 @@ void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, } /* Vector Iota Instruction */ -#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint32_t sum = 0; \ int i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ *((ETYPE *)vd + H(i)) = sum; \ - if (vext_elem_mask(vs2, mlen, i)) { \ + if (vext_elem_mask(vs2, i)) { \ sum++; \ } \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb) -GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh) -GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl) -GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq) +GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1) +GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2) +GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4) +GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8) /* Vector Element Index Instruction */ -#define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VID_V(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ int i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ *((ETYPE *)vd + H(i)) = i; \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } -GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb) -GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh) -GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl) -GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq) +GEN_VEXT_VID_V(vid_v_b, uint8_t, H1) +GEN_VEXT_VID_V(vid_v_h, uint16_t, H2) +GEN_VEXT_VID_V(vid_v_w, uint32_t, H4) +GEN_VEXT_VID_V(vid_v_d, uint64_t, H8) /* *** Vector Permutation Instructions */ /* Vector Slide Instructions */ -#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - target_ulong offset = s1, i; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + target_ulong offset = s1, i_min, i; \ \ - for (i = offset; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + i_min = MAX(env->vstart, offset); \ + for (i = i_min; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */ -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb) -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh) -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl) -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8) -#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - target_ulong offset = s1, i; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + target_ulong i_max, i; \ \ - for (i = 0; i < vl; ++i) { \ - target_ulong j = i + offset; \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \ + for (i = env->vstart; i < i_max; ++i) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ - *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + \ + for (i = i_max; i < vl; ++i) { \ + if (vm || vext_elem_mask(v0, i)) { \ + *((ETYPE *)vd + H(i)) = 0; \ + } \ + } \ + \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */ -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb) -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh) -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl) -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8) -#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ - uint32_t vm = vext_vm(desc); \ - uint32_t vl = env->vl; \ - uint32_t i; \ - \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ - continue; \ - } \ - if (i == 0) { \ - *((ETYPE *)vd + H(i)) = s1; \ - } else { \ - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ - } \ - } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +#define GEN_VEXT_VSLIE1UP(BITWIDTH, H) \ +static void vslide1up_##BITWIDTH(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + typedef uint##BITWIDTH##_t ETYPE; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + uint32_t i; \ + \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ + } \ + if (i == 0) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ + } \ + } \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ +} + +GEN_VEXT_VSLIE1UP(8, H1) +GEN_VEXT_VSLIE1UP(16, H2) +GEN_VEXT_VSLIE1UP(32, H4) +GEN_VEXT_VSLIE1UP(64, H8) + +#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64) -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ - uint32_t vm = vext_vm(desc); \ - uint32_t vl = env->vl; \ - uint32_t i; \ - \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ - continue; \ - } \ - if (i == vl - 1) { \ - *((ETYPE *)vd + H(i)) = s1; \ - } else { \ - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ - } \ - } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +#define GEN_VEXT_VSLIDE1DOWN(BITWIDTH, H) \ +static void vslide1down_##BITWIDTH(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + typedef uint##BITWIDTH##_t ETYPE; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + uint32_t i; \ + \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ + } \ + if (i == vl - 1) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ + } \ + } \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ +} + +GEN_VEXT_VSLIDE1DOWN(8, H1) +GEN_VEXT_VSLIDE1DOWN(16, H2) +GEN_VEXT_VSLIDE1DOWN(32, H4) +GEN_VEXT_VSLIDE1DOWN(64, H8) + +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64) + +/* Vector Floating-Point Slide Instructions */ +#define GEN_VEXT_VFSLIDE1UP_VF(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +} + +/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */ +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16) +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32) +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64) + +#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +} + +/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */ +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16) +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32) +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64) /* Vector Register Gather Instruction */ -#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TS2); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint64_t index; \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ - index = *((ETYPE *)vs1 + H(i)); \ + index = *((TS1 *)vs1 + HS1(i)); \ if (index >= vlmax) { \ - *((ETYPE *)vd + H(i)) = 0; \ + *((TS2 *)vd + HS2(i)) = 0; \ } else { \ - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ + *((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index)); \ } \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */ -GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb) -GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh) -GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl) -GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq) +GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, uint8_t, H1, H1) +GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, uint16_t, H2, H2) +GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, uint32_t, H4, H4) +GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, uint64_t, H8, H8) -#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \ +GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_b, uint16_t, uint8_t, H2, H1) +GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_h, uint16_t, uint16_t, H2, H2) +GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_w, uint16_t, uint32_t, H2, H4) +GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_d, uint16_t, uint64_t, H2, H8) + +#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ uint64_t index = s1; \ uint32_t i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ continue; \ } \ if (index >= vlmax) { \ @@ -4836,37 +5242,98 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ } \ } \ - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ -GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb) -GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh) -GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl) -GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq) +GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1) +GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2) +GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4) +GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8) /* Vector Compress Instruction */ -#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN) \ +#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t mlen = vext_mlen(desc); \ - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ uint32_t num = 0, i; \ \ - for (i = 0; i < vl; i++) { \ - if (!vext_elem_mask(vs1, mlen, i)) { \ + for (i = env->vstart; i < vl; i++) { \ + if (!vext_elem_mask(vs1, i)) { \ continue; \ } \ *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \ num++; \ } \ - CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } /* Compress into vd elements of vs2 where vs1 is enabled */ -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb) -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh) -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl) -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8) + +/* Vector Whole Register Move */ +void HELPER(vmvr_v)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) +{ + /* EEW = SEW */ + uint32_t maxsz = simd_maxsz(desc); + uint32_t sewb = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t startb = env->vstart * sewb; + uint32_t i = startb; + + memcpy((uint8_t *)vd + H1(i), + (uint8_t *)vs2 + H1(i), + maxsz - startb); + + env->vstart = 0; +} + +/* Vector Integer Extension */ +#define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1) \ +void HELPER(NAME)(void *vd, void *v0, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t vl = env->vl; \ + uint32_t vm = vext_vm(desc); \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \ + uint32_t vta = vext_vta(desc); \ + uint32_t vma = vext_vma(desc); \ + uint32_t i; \ + \ + for (i = env->vstart; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + /* set masked-off elements to 1s */ \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ + } \ + *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \ + } \ + env->vstart = 0; \ + /* set tail elements to 1s */ \ + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ +} + +GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1) +GEN_VEXT_INT_EXT(vzext_vf2_w, uint32_t, uint16_t, H4, H2) +GEN_VEXT_INT_EXT(vzext_vf2_d, uint64_t, uint32_t, H8, H4) +GEN_VEXT_INT_EXT(vzext_vf4_w, uint32_t, uint8_t, H4, H1) +GEN_VEXT_INT_EXT(vzext_vf4_d, uint64_t, uint16_t, H8, H2) +GEN_VEXT_INT_EXT(vzext_vf8_d, uint64_t, uint8_t, H8, H1) + +GEN_VEXT_INT_EXT(vsext_vf2_h, int16_t, int8_t, H2, H1) +GEN_VEXT_INT_EXT(vsext_vf2_w, int32_t, int16_t, H4, H2) +GEN_VEXT_INT_EXT(vsext_vf2_d, int64_t, int32_t, H8, H4) +GEN_VEXT_INT_EXT(vsext_vf4_w, int32_t, int8_t, H4, H1) +GEN_VEXT_INT_EXT(vsext_vf4_d, int64_t, int16_t, H8, H2) +GEN_VEXT_INT_EXT(vsext_vf8_d, int64_t, int8_t, H8, H1) diff --git a/target/rx/cpu-qom.h b/target/rx/cpu-qom.h index 7310558e0c..4533759d96 100644 --- a/target/rx/cpu-qom.h +++ b/target/rx/cpu-qom.h @@ -26,8 +26,7 @@ #define TYPE_RX62N_CPU RX_CPU_TYPE_NAME("rx62n") -OBJECT_DECLARE_TYPE(RXCPU, RXCPUClass, - RX_CPU) +OBJECT_DECLARE_CPU_TYPE(RXCPU, RXCPUClass, RX_CPU) /* * RXCPUClass: @@ -45,6 +44,4 @@ struct RXCPUClass { DeviceReset parent_reset; }; -#define CPUArchState struct CPURXState - #endif diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 96cc96e514..9003c6e9fe 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -20,7 +20,6 @@ #include "qemu/qemu-print.h" #include "qapi/error.h" #include "cpu.h" -#include "qemu-common.h" #include "migration/vmstate.h" #include "exec/exec-all.h" #include "hw/loader.h" @@ -33,12 +32,28 @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr rx_cpu_get_pc(CPUState *cs) +{ + RXCPU *cpu = RX_CPU(cs); + + return cpu->env.pc; +} + static void rx_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { RXCPU *cpu = RX_CPU(cs); - cpu->env.pc = tb->pc; + cpu->env.pc = tb_pc(tb); +} + +static void rx_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + RXCPU *cpu = RX_CPU(cs); + + cpu->env.pc = data[0]; } static bool rx_cpu_has_work(CPUState *cs) @@ -186,10 +201,11 @@ static const struct SysemuCPUOps rx_sysemu_ops = { static const struct TCGCPUOps rx_tcg_ops = { .initialize = rx_translate_init, .synchronize_from_tb = rx_cpu_synchronize_from_tb, - .cpu_exec_interrupt = rx_cpu_exec_interrupt, + .restore_state_to_opc = rx_restore_state_to_opc, .tlb_fill = rx_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .cpu_exec_interrupt = rx_cpu_exec_interrupt, .do_interrupt = rx_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ }; @@ -209,6 +225,7 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) cc->has_work = rx_cpu_has_work; cc->dump_state = rx_cpu_dump_state; cc->set_pc = rx_cpu_set_pc; + cc->get_pc = rx_cpu_get_pc; #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &rx_sysemu_ops; diff --git a/target/rx/cpu.h b/target/rx/cpu.h index 0b4b998c7b..5655dffeff 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -20,11 +20,11 @@ #define RX_CPU_H #include "qemu/bitops.h" -#include "qemu-common.h" #include "hw/registerfields.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" /* PSW define */ REG32(PSW, 0) @@ -66,7 +66,7 @@ enum { NUM_REGS = 16, }; -typedef struct CPURXState { +typedef struct CPUArchState { /* CPU registers */ uint32_t regs[NUM_REGS]; /* general registers */ uint32_t psw_o; /* O bit of status register */ @@ -106,7 +106,7 @@ typedef struct CPURXState { * * A RX CPU */ -struct RXCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -115,30 +115,24 @@ struct RXCPU { CPURXState env; }; -typedef RXCPU ArchCPU; - -#define ENV_OFFSET offsetof(RXCPU, env) - #define RX_CPU_TYPE_SUFFIX "-" TYPE_RX_CPU #define RX_CPU_TYPE_NAME(model) model RX_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_RX_CPU const char *rx_crname(uint8_t cr); +#ifndef CONFIG_USER_ONLY void rx_cpu_do_interrupt(CPUState *cpu); bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req); +#endif /* !CONFIG_USER_ONLY */ void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void rx_translate_init(void); -int cpu_rx_signal_handler(int host_signum, void *pinfo, - void *puc); - void rx_cpu_list(void); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); -#define cpu_signal_handler cpu_rx_signal_handler #define cpu_list rx_cpu_list #include "exec/cpu-all.h" @@ -155,6 +149,7 @@ static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc, *pc = env->pc; *cs_base = 0; *flags = FIELD_DP32(0, PSW, PM, env->psw_pm); + *flags = FIELD_DP32(*flags, PSW, U, env->psw_u); } static inline int cpu_mmu_index(CPURXState *env, bool ifetch) diff --git a/target/rx/gdbstub.c b/target/rx/gdbstub.c index c811d4810b..7eb2059a84 100644 --- a/target/rx/gdbstub.c +++ b/target/rx/gdbstub.c @@ -16,7 +16,6 @@ * this program. If not, see . */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "exec/gdbstub.h" diff --git a/target/rx/helper.c b/target/rx/helper.c index db6b07e389..f34945e7e2 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -40,6 +40,8 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) env->psw_c = FIELD_EX32(psw, PSW, C); } +#ifndef CONFIG_USER_ONLY + #define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR) void rx_cpu_do_interrupt(CPUState *cs) { @@ -142,6 +144,8 @@ bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } +#endif /* !CONFIG_USER_ONLY */ + hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { return addr; diff --git a/target/rx/helper.h b/target/rx/helper.h index f0b7ebbbf7..ebb4739474 100644 --- a/target/rx/helper.h +++ b/target/rx/helper.h @@ -2,7 +2,6 @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_access_fault, noreturn, env) DEF_HELPER_1(raise_privilege_violation, noreturn, env) DEF_HELPER_1(wait, noreturn, env) -DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_2(rxint, noreturn, env, i32) DEF_HELPER_1(rxbrk, noreturn, env) DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32) diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c index 4d315b4449..acce650185 100644 --- a/target/rx/op_helper.c +++ b/target/rx/op_helper.c @@ -24,8 +24,9 @@ #include "exec/cpu_ldst.h" #include "fpu/softfloat.h" -static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index, - uintptr_t retaddr); +static inline G_NORETURN +void raise_exception(CPURXState *env, int index, + uintptr_t retaddr); static void _set_psw(CPURXState *env, uint32_t psw, uint32_t rte) { @@ -285,7 +286,7 @@ void helper_suntil(CPURXState *env, uint32_t sz) uint32_t tmp; tcg_debug_assert(sz < 3); if (env->regs[3] == 0) { - return ; + return; } do { tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); @@ -304,7 +305,7 @@ void helper_swhile(CPURXState *env, uint32_t sz) uint32_t tmp; tcg_debug_assert(sz < 3); if (env->regs[3] == 0) { - return ; + return; } do { tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); @@ -418,8 +419,9 @@ uint32_t helper_divu(CPURXState *env, uint32_t num, uint32_t den) } /* exception */ -static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index, - uintptr_t retaddr) +static inline G_NORETURN +void raise_exception(CPURXState *env, int index, + uintptr_t retaddr) { CPUState *cs = env_cpu(env); @@ -427,44 +429,37 @@ static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index, cpu_loop_exit_restore(cs, retaddr); } -void QEMU_NORETURN helper_raise_privilege_violation(CPURXState *env) +G_NORETURN void helper_raise_privilege_violation(CPURXState *env) { raise_exception(env, 20, GETPC()); } -void QEMU_NORETURN helper_raise_access_fault(CPURXState *env) +G_NORETURN void helper_raise_access_fault(CPURXState *env) { raise_exception(env, 21, GETPC()); } -void QEMU_NORETURN helper_raise_illegal_instruction(CPURXState *env) +G_NORETURN void helper_raise_illegal_instruction(CPURXState *env) { raise_exception(env, 23, GETPC()); } -void QEMU_NORETURN helper_wait(CPURXState *env) +G_NORETURN void helper_wait(CPURXState *env) { CPUState *cs = env_cpu(env); cs->halted = 1; env->in_sleep = 1; + env->psw_i = 1; raise_exception(env, EXCP_HLT, 0); } -void QEMU_NORETURN helper_debug(CPURXState *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); -} - -void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec) +G_NORETURN void helper_rxint(CPURXState *env, uint32_t vec) { raise_exception(env, 0x100 + vec, 0); } -void QEMU_NORETURN helper_rxbrk(CPURXState *env) +G_NORETURN void helper_rxbrk(CPURXState *env) { raise_exception(env, 0x100, 0); } diff --git a/target/rx/translate.c b/target/rx/translate.c index a3cf720455..87a3f54adb 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -32,6 +32,7 @@ typedef struct DisasContext { DisasContextBase base; CPURXState *env; uint32_t pc; + uint32_t tb_flags; } DisasContext; typedef struct DisasCompare { @@ -150,11 +151,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) tcg_gen_exit_tb(dc->base.tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (dc->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } dc->base.is_jmp = DISAS_NORETURN; } @@ -235,7 +232,7 @@ static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem, /* Processor mode check */ static int is_privileged(DisasContext *ctx, int is_exception) { - if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) { + if (FIELD_EX32(ctx->tb_flags, PSW, PM)) { if (is_exception) { gen_helper_raise_privilege_violation(cpu_env); } @@ -314,9 +311,8 @@ static void psw_cond(DisasCompare *dc, uint32_t cond) } } -static void move_from_cr(TCGv ret, int cr, uint32_t pc) +static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc) { - TCGv z = tcg_const_i32(0); switch (cr) { case 0: /* PSW */ gen_helper_pack_psw(ret, cpu_env); @@ -325,8 +321,11 @@ static void move_from_cr(TCGv ret, int cr, uint32_t pc) tcg_gen_movi_i32(ret, pc); break; case 2: /* USP */ - tcg_gen_movcond_i32(TCG_COND_NE, ret, - cpu_psw_u, z, cpu_sp, cpu_usp); + if (FIELD_EX32(ctx->tb_flags, PSW, U)) { + tcg_gen_mov_i32(ret, cpu_sp); + } else { + tcg_gen_mov_i32(ret, cpu_usp); + } break; case 3: /* FPSW */ tcg_gen_mov_i32(ret, cpu_fpsw); @@ -338,8 +337,11 @@ static void move_from_cr(TCGv ret, int cr, uint32_t pc) tcg_gen_mov_i32(ret, cpu_bpc); break; case 10: /* ISP */ - tcg_gen_movcond_i32(TCG_COND_EQ, ret, - cpu_psw_u, z, cpu_sp, cpu_isp); + if (FIELD_EX32(ctx->tb_flags, PSW, U)) { + tcg_gen_mov_i32(ret, cpu_isp); + } else { + tcg_gen_mov_i32(ret, cpu_sp); + } break; case 11: /* FINTV */ tcg_gen_mov_i32(ret, cpu_fintv); @@ -353,28 +355,31 @@ static void move_from_cr(TCGv ret, int cr, uint32_t pc) tcg_gen_movi_i32(ret, 0); break; } - tcg_temp_free(z); } static void move_to_cr(DisasContext *ctx, TCGv val, int cr) { - TCGv z; if (cr >= 8 && !is_privileged(ctx, 0)) { /* Some control registers can only be written in privileged mode. */ qemu_log_mask(LOG_GUEST_ERROR, "disallow control register write %s", rx_crname(cr)); return; } - z = tcg_const_i32(0); switch (cr) { case 0: /* PSW */ gen_helper_set_psw(cpu_env, val); + if (is_privileged(ctx, 0)) { + /* PSW.{I,U} may be updated here. exit TB. */ + ctx->base.is_jmp = DISAS_UPDATE; + } break; /* case 1: to PC not supported */ case 2: /* USP */ - tcg_gen_mov_i32(cpu_usp, val); - tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp, - cpu_psw_u, z, cpu_usp, cpu_sp); + if (FIELD_EX32(ctx->tb_flags, PSW, U)) { + tcg_gen_mov_i32(cpu_sp, val); + } else { + tcg_gen_mov_i32(cpu_usp, val); + } break; case 3: /* FPSW */ gen_helper_set_fpsw(cpu_env, val); @@ -386,10 +391,11 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr) tcg_gen_mov_i32(cpu_bpc, val); break; case 10: /* ISP */ - tcg_gen_mov_i32(cpu_isp, val); - /* if PSW.U is 0, copy isp to r0 */ - tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp, - cpu_psw_u, z, cpu_isp, cpu_sp); + if (FIELD_EX32(ctx->tb_flags, PSW, U)) { + tcg_gen_mov_i32(cpu_isp, val); + } else { + tcg_gen_mov_i32(cpu_sp, val); + } break; case 11: /* FINTV */ tcg_gen_mov_i32(cpu_fintv, val); @@ -402,7 +408,6 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr) "Unimplement control register %d", cr); break; } - tcg_temp_free(z); } static void push(TCGv val) @@ -630,10 +635,6 @@ static bool trans_POPC(DisasContext *ctx, arg_POPC *a) val = tcg_temp_new(); pop(val); move_to_cr(ctx, val, a->cr); - if (a->cr == 0 && is_privileged(ctx, 0)) { - /* PSW.I may be updated here. exit TB. */ - ctx->base.is_jmp = DISAS_UPDATE; - } tcg_temp_free(val); return true; } @@ -686,7 +687,7 @@ static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) { TCGv val; val = tcg_temp_new(); - move_from_cr(val, a->cr, ctx->pc); + move_from_cr(ctx, val, a->cr, ctx->pc); push(val); tcg_temp_free(val); return true; @@ -2164,7 +2165,12 @@ static inline void clrsetpsw(DisasContext *ctx, int cb, int val) ctx->base.is_jmp = DISAS_UPDATE; break; case PSW_U: - tcg_gen_movi_i32(cpu_psw_u, val); + if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) { + ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val); + tcg_gen_movi_i32(cpu_psw_u, val); + tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp); + tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp); + } break; default: qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb); @@ -2204,9 +2210,6 @@ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) imm = tcg_const_i32(a->imm); move_to_cr(ctx, imm, a->cr); - if (a->cr == 0 && is_privileged(ctx, 0)) { - ctx->base.is_jmp = DISAS_UPDATE; - } tcg_temp_free(imm); return true; } @@ -2215,16 +2218,13 @@ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a) { move_to_cr(ctx, cpu_regs[a->rs], a->cr); - if (a->cr == 0 && is_privileged(ctx, 0)) { - ctx->base.is_jmp = DISAS_UPDATE; - } return true; } /* mvfc rs, rd */ static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) { - move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc); + move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc); return true; } @@ -2285,7 +2285,7 @@ static bool trans_INT(DisasContext *ctx, arg_INT *a) static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) { if (is_privileged(ctx, 1)) { - tcg_gen_addi_i32(cpu_pc, cpu_pc, 2); + tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); gen_helper_wait(cpu_env); } return true; @@ -2296,6 +2296,7 @@ static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) CPURXState *env = cs->env_ptr; DisasContext *ctx = container_of(dcbase, DisasContext, base); ctx->env = env; + ctx->tb_flags = ctx->base.tb->flags; } static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) @@ -2331,11 +2332,7 @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) gen_goto_tb(ctx, 0, dcbase->pc_next); break; case DISAS_JUMP: - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; case DISAS_UPDATE: tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); @@ -2350,10 +2347,11 @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void rx_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { - qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */ - log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps rx_tr_ops = { @@ -2365,17 +2363,12 @@ static const TranslatorOps rx_tr_ops = { .disas_log = rx_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns); -} - -void restore_state_to_opc(CPURXState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; + translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base); } #define ALLOC_REGISTER(sym, name) \ diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c index 08daf93ae1..a7c44ba49d 100644 --- a/target/s390x/arch_dump.c +++ b/target/s390x/arch_dump.c @@ -12,11 +12,13 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "cpu.h" #include "s390x-internal.h" #include "elf.h" #include "sysemu/dump.h" - +#include "hw/s390x/pv.h" +#include "kvm/kvm_s390x.h" struct S390xUserRegsStruct { uint64_t psw[2]; @@ -76,9 +78,16 @@ typedef struct noteStruct { uint64_t todcmp; uint32_t todpreg; uint64_t ctrs[16]; + uint8_t dynamic[1]; /* + * Would be a flexible array member, if + * that was legal inside a union. Real + * size comes from PV info interface. + */ } contents; } QEMU_PACKED Note; +static bool pv_dump_initialized; + static void s390x_write_elf64_prstatus(Note *note, S390CPU *cpu, int id) { int i; @@ -177,52 +186,82 @@ static void s390x_write_elf64_prefix(Note *note, S390CPU *cpu, int id) note->contents.prefix = cpu_to_be32((uint32_t)(cpu->env.psa)); } +static void s390x_write_elf64_pv(Note *note, S390CPU *cpu, int id) +{ + note->hdr.n_type = cpu_to_be32(NT_S390_PV_CPU_DATA); + if (!pv_dump_initialized) { + return; + } + kvm_s390_dump_cpu(cpu, ¬e->contents.dynamic); +} typedef struct NoteFuncDescStruct { int contents_size; + uint64_t (*note_size_func)(void); /* NULL for non-dynamic sized contents */ void (*note_contents_func)(Note *note, S390CPU *cpu, int id); + bool pvonly; } NoteFuncDesc; static const NoteFuncDesc note_core[] = { - {sizeof_field(Note, contents.prstatus), s390x_write_elf64_prstatus}, - {sizeof_field(Note, contents.fpregset), s390x_write_elf64_fpregset}, - { 0, NULL} + {sizeof_field(Note, contents.prstatus), NULL, s390x_write_elf64_prstatus, false}, + {sizeof_field(Note, contents.fpregset), NULL, s390x_write_elf64_fpregset, false}, + { 0, NULL, NULL, false} }; static const NoteFuncDesc note_linux[] = { - {sizeof_field(Note, contents.prefix), s390x_write_elf64_prefix}, - {sizeof_field(Note, contents.ctrs), s390x_write_elf64_ctrs}, - {sizeof_field(Note, contents.timer), s390x_write_elf64_timer}, - {sizeof_field(Note, contents.todcmp), s390x_write_elf64_todcmp}, - {sizeof_field(Note, contents.todpreg), s390x_write_elf64_todpreg}, - {sizeof_field(Note, contents.vregslo), s390x_write_elf64_vregslo}, - {sizeof_field(Note, contents.vregshi), s390x_write_elf64_vregshi}, - {sizeof_field(Note, contents.gscb), s390x_write_elf64_gscb}, - { 0, NULL} + {sizeof_field(Note, contents.prefix), NULL, s390x_write_elf64_prefix, false}, + {sizeof_field(Note, contents.ctrs), NULL, s390x_write_elf64_ctrs, false}, + {sizeof_field(Note, contents.timer), NULL, s390x_write_elf64_timer, false}, + {sizeof_field(Note, contents.todcmp), NULL, s390x_write_elf64_todcmp, false}, + {sizeof_field(Note, contents.todpreg), NULL, s390x_write_elf64_todpreg, false}, + {sizeof_field(Note, contents.vregslo), NULL, s390x_write_elf64_vregslo, false}, + {sizeof_field(Note, contents.vregshi), NULL, s390x_write_elf64_vregshi, false}, + {sizeof_field(Note, contents.gscb), NULL, s390x_write_elf64_gscb, false}, + {0, kvm_s390_pv_dmp_get_size_cpu, s390x_write_elf64_pv, true}, + { 0, NULL, NULL, false} }; static int s390x_write_elf64_notes(const char *note_name, WriteCoreDumpFunction f, S390CPU *cpu, int id, - void *opaque, + DumpState *s, const NoteFuncDesc *funcs) { - Note note; + Note note, *notep; const NoteFuncDesc *nf; - int note_size; + int note_size, content_size; int ret = -1; assert(strlen(note_name) < sizeof(note.name)); for (nf = funcs; nf->note_contents_func; nf++) { - memset(¬e, 0, sizeof(note)); - note.hdr.n_namesz = cpu_to_be32(strlen(note_name) + 1); - note.hdr.n_descsz = cpu_to_be32(nf->contents_size); - g_strlcpy(note.name, note_name, sizeof(note.name)); - (*nf->note_contents_func)(¬e, cpu, id); + notep = ¬e; + if (nf->pvonly && !s390_is_pv()) { + continue; + } - note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size; - ret = f(¬e, note_size, opaque); + content_size = nf->note_size_func ? nf->note_size_func() : nf->contents_size; + note_size = sizeof(note) - sizeof(notep->contents) + content_size; + + /* Notes with dynamic sizes need to allocate a note */ + if (nf->note_size_func) { + notep = g_malloc(note_size); + } + + memset(notep, 0, note_size); + + /* Setup note header data */ + notep->hdr.n_descsz = cpu_to_be32(content_size); + notep->hdr.n_namesz = cpu_to_be32(strlen(note_name) + 1); + g_strlcpy(notep->name, note_name, sizeof(notep->name)); + + /* Get contents and write them out */ + (*nf->note_contents_func)(notep, cpu, id); + ret = f(notep, note_size, s); + + if (nf->note_size_func) { + g_free(notep); + } if (ret < 0) { return -1; @@ -235,16 +274,169 @@ static int s390x_write_elf64_notes(const char *note_name, int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque) + int cpuid, DumpState *s) { S390CPU *cpu = S390_CPU(cs); int r; - r = s390x_write_elf64_notes("CORE", f, cpu, cpuid, opaque, note_core); + r = s390x_write_elf64_notes("CORE", f, cpu, cpuid, s, note_core); if (r) { return r; } - return s390x_write_elf64_notes("LINUX", f, cpu, cpuid, opaque, note_linux); + return s390x_write_elf64_notes("LINUX", f, cpu, cpuid, s, note_linux); +} + +/* PV dump section size functions */ +static uint64_t get_mem_state_size_from_len(uint64_t len) +{ + return (len / (MiB)) * kvm_s390_pv_dmp_get_size_mem_state(); +} + +static uint64_t get_size_mem_state(DumpState *s) +{ + return get_mem_state_size_from_len(s->total_size); +} + +static uint64_t get_size_completion_data(DumpState *s) +{ + return kvm_s390_pv_dmp_get_size_completion_data(); +} + +/* PV dump section data functions*/ +static int get_data_completion(DumpState *s, uint8_t *buff) +{ + int rc; + + if (!pv_dump_initialized) { + return 0; + } + rc = kvm_s390_dump_completion_data(buff); + if (!rc) { + pv_dump_initialized = false; + } + return rc; +} + +static int get_mem_state(DumpState *s, uint8_t *buff) +{ + int64_t memblock_size, memblock_start; + GuestPhysBlock *block; + uint64_t off; + int rc; + + QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { + memblock_start = dump_filtered_memblock_start(block, s->filter_area_begin, + s->filter_area_length); + if (memblock_start == -1) { + continue; + } + + memblock_size = dump_filtered_memblock_size(block, s->filter_area_begin, + s->filter_area_length); + + off = get_mem_state_size_from_len(block->target_start); + + rc = kvm_s390_dump_mem_state(block->target_start, + get_mem_state_size_from_len(memblock_size), + buff + off); + if (rc) { + return rc; + } + } + + return 0; +} + +static struct sections { + uint64_t (*sections_size_func)(DumpState *s); + int (*sections_contents_func)(DumpState *s, uint8_t *buff); + char sctn_str[12]; +} sections[] = { + { get_size_mem_state, get_mem_state, "pv_mem_meta"}, + { get_size_completion_data, get_data_completion, "pv_compl"}, + {NULL , NULL, ""} +}; + +static uint64_t arch_sections_write_hdr(DumpState *s, uint8_t *buff) +{ + Elf64_Shdr *shdr = (void *)buff; + struct sections *sctn = sections; + uint64_t off = s->section_offset; + + if (!pv_dump_initialized) { + return 0; + } + + for (; sctn->sections_size_func; off += shdr->sh_size, sctn++, shdr++) { + memset(shdr, 0, sizeof(*shdr)); + shdr->sh_type = SHT_PROGBITS; + shdr->sh_offset = off; + shdr->sh_size = sctn->sections_size_func(s); + shdr->sh_name = s->string_table_buf->len; + g_array_append_vals(s->string_table_buf, sctn->sctn_str, sizeof(sctn->sctn_str)); + } + + return (uintptr_t)shdr - (uintptr_t)buff; +} + + +/* Add arch specific number of sections and their respective sizes */ +static void arch_sections_add(DumpState *s) +{ + struct sections *sctn = sections; + + /* + * We only do a PV dump if we are running a PV guest, KVM supports + * the dump API and we got valid dump length information. + */ + if (!s390_is_pv() || !kvm_s390_get_protected_dump() || + !kvm_s390_pv_info_basic_valid()) { + return; + } + + /* + * Start the UV dump process by doing the initialize dump call via + * KVM as the proxy. + */ + if (!kvm_s390_dump_init()) { + pv_dump_initialized = true; + } else { + /* + * Dump init failed, maybe the guest owner disabled dumping. + * We'll continue the non-PV dump process since this is no + * reason to crash qemu. + */ + return; + } + + for (; sctn->sections_size_func; sctn++) { + s->shdr_num += 1; + s->elf_section_data_size += sctn->sections_size_func(s); + } +} + +/* + * After the PV dump has been initialized, the CPU data has been + * fetched and memory has been dumped, we need to grab the tweak data + * and the completion data. + */ +static int arch_sections_write(DumpState *s, uint8_t *buff) +{ + struct sections *sctn = sections; + int rc; + + if (!pv_dump_initialized) { + return -EINVAL; + } + + for (; sctn->sections_size_func; sctn++) { + rc = sctn->sections_contents_func(s, buff); + buff += sctn->sections_size_func(s); + if (rc) { + return rc; + } + } + return 0; } int cpu_get_dump_info(ArchDumpInfo *info, @@ -253,7 +445,20 @@ int cpu_get_dump_info(ArchDumpInfo *info, info->d_machine = EM_S390; info->d_endian = ELFDATA2MSB; info->d_class = ELFCLASS64; - + /* + * This is evaluated for each dump so we can freely switch + * between PV and non-PV. + */ + if (s390_is_pv() && kvm_s390_get_protected_dump() && + kvm_s390_pv_info_basic_valid()) { + info->arch_sections_add_fn = *arch_sections_add; + info->arch_sections_write_hdr_fn = *arch_sections_write_hdr; + info->arch_sections_write_fn = *arch_sections_write; + } else { + info->arch_sections_add_fn = NULL; + info->arch_sections_write_hdr_fn = NULL; + info->arch_sections_write_fn = NULL; + } return 0; } @@ -261,7 +466,7 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) { int name_size = 8; /* "LINUX" or "CORE" + pad */ size_t elf_note_size = 0; - int note_head_size; + int note_head_size, content_size; const NoteFuncDesc *nf; assert(class == ELFCLASS64); @@ -270,12 +475,15 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) note_head_size = sizeof(Elf64_Nhdr); for (nf = note_core; nf->note_contents_func; nf++) { - elf_note_size = elf_note_size + note_head_size + name_size + - nf->contents_size; + elf_note_size = elf_note_size + note_head_size + name_size + nf->contents_size; } for (nf = note_linux; nf->note_contents_func; nf++) { + if (nf->pvonly && !s390_is_pv()) { + continue; + } + content_size = nf->contents_size ? nf->contents_size : nf->note_size_func(); elf_note_size = elf_note_size + note_head_size + name_size + - nf->contents_size; + content_size; } return (elf_note_size) * nr_cpus; diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c index 0f5c062994..ffa9e94d84 100644 --- a/target/s390x/cpu-dump.c +++ b/target/s390x/cpu-dump.c @@ -121,8 +121,7 @@ const char *cc_name(enum cc_op cc_op) [CC_OP_NZ_F64] = "CC_OP_NZ_F64", [CC_OP_NZ_F128] = "CC_OP_NZ_F128", [CC_OP_ICM] = "CC_OP_ICM", - [CC_OP_SLA_32] = "CC_OP_SLA_32", - [CC_OP_SLA_64] = "CC_OP_SLA_64", + [CC_OP_SLA] = "CC_OP_SLA", [CC_OP_FLOGR] = "CC_OP_FLOGR", [CC_OP_LCBB] = "CC_OP_LCBB", [CC_OP_VC] = "CC_OP_VC", diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h index 472db648d7..bf951a002e 100644 --- a/target/s390x/cpu-param.h +++ b/target/s390x/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef S390_CPU_PARAM_H -#define S390_CPU_PARAM_H 1 +#define S390_CPU_PARAM_H #define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 12 diff --git a/target/s390x/cpu-qom.h b/target/s390x/cpu-qom.h index 9f3a0d86c5..00cae2b131 100644 --- a/target/s390x/cpu-qom.h +++ b/target/s390x/cpu-qom.h @@ -25,12 +25,13 @@ #define TYPE_S390_CPU "s390x-cpu" -OBJECT_DECLARE_TYPE(S390CPU, S390CPUClass, - S390_CPU) +OBJECT_DECLARE_CPU_TYPE(S390CPU, S390CPUClass, S390_CPU) typedef struct S390CPUModel S390CPUModel; typedef struct S390CPUDef S390CPUDef; +typedef struct CPUArchState CPUS390XState; + typedef enum cpu_reset_type { S390_CPU_RESET_NORMAL, S390_CPU_RESET_INITIAL, @@ -63,6 +64,4 @@ struct S390CPUClass { void (*reset)(CPUState *cpu, cpu_reset_type type); }; -typedef struct CPUS390XState CPUS390XState; - #endif diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c index df2c6bf694..948e4bd3e0 100644 --- a/target/s390x/cpu-sysemu.c +++ b/target/s390x/cpu-sysemu.c @@ -34,7 +34,6 @@ #include "hw/s390x/pv.h" #include "hw/boards.h" -#include "sysemu/arch_init.h" #include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "hw/core/sysemu-cpu-ops.h" @@ -77,7 +76,7 @@ static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs) S390CPU *cpu = S390_CPU(cs); cpu_synchronize_state(cs); - panic_info = g_malloc0(sizeof(GuestPanicInformation)); + panic_info = g_new0(GuestPanicInformation, 1); panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390; panic_info->u.s390.core = cpu->env.core_id; diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 7b7b05f1d3..96562c516d 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -88,6 +88,13 @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.psw.addr = value; } +static vaddr s390_cpu_get_pc(CPUState *cs) +{ + S390CPU *cpu = S390_CPU(cs); + + return cpu->env.psw.addr; +} + static bool s390_cpu_has_work(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); @@ -178,7 +185,6 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type) static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { info->mach = bfd_mach_s390_64; - info->print_insn = print_insn_s390; info->cap_arch = CS_ARCH_SYSZ; info->cap_insn_unit = 2; info->cap_insn_split = 6; @@ -266,9 +272,13 @@ static void s390_cpu_reset_full(DeviceState *dev) static const struct TCGCPUOps s390_tcg_ops = { .initialize = s390x_translate_init, - .tlb_fill = s390_cpu_tlb_fill, + .restore_state_to_opc = s390x_restore_state_to_opc, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = s390_cpu_record_sigsegv, + .record_sigbus = s390_cpu_record_sigbus, +#else + .tlb_fill = s390_cpu_tlb_fill, .cpu_exec_interrupt = s390_cpu_exec_interrupt, .do_interrupt = s390_cpu_do_interrupt, .debug_excp_handler = s390x_cpu_debug_excp_handler, @@ -295,6 +305,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = s390_cpu_has_work; cc->dump_state = s390_cpu_dump_state; cc->set_pc = s390_cpu_set_pc; + cc->get_pc = s390_cpu_get_pc; cc->gdb_read_register = s390_cpu_gdb_read_register; cc->gdb_write_register = s390_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index b26ae8fff2..8aaf8dd5a3 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -28,6 +28,7 @@ #include "cpu-qom.h" #include "cpu_models.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" #define ELF_MACHINE_UNAME "S390X" @@ -51,7 +52,7 @@ typedef struct PSW { uint64_t addr; } PSW; -struct CPUS390XState { +struct CPUArchState { uint64_t regs[16]; /* GP registers */ /* * The floating point registers are part of the vector registers. @@ -63,6 +64,8 @@ struct CPUS390XState { uint64_t etoken; /* etoken */ uint64_t etoken_extension; /* etoken extension */ + uint64_t diag318_info; + /* Fields up to this point are not cleared by initial CPU reset */ struct {} start_initial_reset_fields; @@ -84,6 +87,7 @@ struct CPUS390XState { uint64_t cc_vr; uint64_t ex_value; + uint64_t ex_target; uint64_t __excp_addr; uint64_t psa; @@ -118,8 +122,6 @@ struct CPUS390XState { uint16_t external_call_addr; DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS); - uint64_t diag318_info; - #if !defined(CONFIG_USER_ONLY) uint64_t tlb_fill_tec; /* translation exception code during tlb_fill */ int tlb_fill_exc; /* exception number seen during tlb_fill */ @@ -163,7 +165,7 @@ static inline uint64_t *get_freg(CPUS390XState *cs, int nr) * * An S/390 CPU. */ -struct S390CPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -674,11 +676,6 @@ QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096); #define SIGP_STAT_INVALID_ORDER 0x00000002UL #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL -/* SIGP SET ARCHITECTURE modes */ -#define SIGP_MODE_ESA_S390 0 -#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1 -#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2 - /* SIGP order code mask corresponding to bit positions 56-63 */ #define SIGP_ORDER_MASK 0x000000ff @@ -809,13 +806,6 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, #define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_S390_CPU -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_s390x_signal_handler(int host_signum, void *pinfo, void *puc); -#define cpu_signal_handler cpu_s390x_signal_handler - - /* interrupt.c */ #define RA_IGNORED 0 void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra); @@ -852,9 +842,6 @@ uint64_t s390_cpu_get_psw_mask(CPUS390XState *env); /* outside of target/s390x/ */ S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); -typedef CPUS390XState CPUArchState; -typedef S390CPU ArchCPU; - #include "exec/cpu-all.h" #endif diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc index e86662bb3b..e3cfe63735 100644 --- a/target/s390x/cpu_features_def.h.inc +++ b/target/s390x/cpu_features_def.h.inc @@ -58,7 +58,7 @@ DEF_FEAT(ENHANCED_MONITOR, "emon", STFL, 36, "Enhanced-monitor facility") DEF_FEAT(FLOATING_POINT_EXT, "fpe", STFL, 37, "Floating-point extension facility") DEF_FEAT(ORDER_PRESERVING_COMPRESSION, "opc", STFL, 38, "Order Preserving Compression facility") DEF_FEAT(SET_PROGRAM_PARAMETERS, "sprogp", STFL, 40, "Set-program-parameters facility") -DEF_FEAT(FLOATING_POINT_SUPPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities") +DEF_FEAT(FLOATING_POINT_SUPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities") DEF_FEAT(DFP, "dfp", STFL, 42, "DFP (decimal-floating-point) facility") DEF_FEAT(DFP_FAST, "dfphp", STFL, 43, "DFP (decimal-floating-point) facility has high performance") DEF_FEAT(PFPO, "pfpo", STFL, 44, "PFPO instruction") @@ -114,6 +114,7 @@ DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH2, "vxpdeh2", STFL, 192, "Vector-Packed-Decima DEF_FEAT(BEAR_ENH, "beareh", STFL, 193, "BEAR-enhancement facility") DEF_FEAT(RDP, "rdp", STFL, 194, "Reset-DAT-protection facility") DEF_FEAT(PAI, "pai", STFL, 196, "Processor-Activity-Instrumentation facility") +DEF_FEAT(PAIE, "paie", STFL, 197, "Processor-Activity-Instrumentation extension-1") /* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility") diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 4e4598cc77..a85c56b4ee 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -19,6 +19,7 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/module.h" +#include "qemu/hw-version.h" #include "qemu/qemu-print.h" #ifndef CONFIG_USER_ONLY #include "sysemu/sysemu.h" @@ -85,10 +86,9 @@ static S390CPUDef s390_cpu_defs[] = { CPUDEF_INIT(0x3932, 16, 1, 47, 0x08000000U, "gen16b", "IBM 3932 GA1"), }; -#define QEMU_MAX_CPU_TYPE 0x3906 -#define QEMU_MAX_CPU_GEN 14 -#define QEMU_MAX_CPU_EC_GA 2 -static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX }; +#define QEMU_MAX_CPU_TYPE 0x8561 +#define QEMU_MAX_CPU_GEN 15 +#define QEMU_MAX_CPU_EC_GA 1 static S390FeatBitmap qemu_max_cpu_feat; /* features part of a base model but not relevant for finding a base model */ @@ -334,18 +334,31 @@ const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data) { const S390CPUClass *scc = S390_CPU_CLASS((ObjectClass *)data); + CPUClass *cc = CPU_CLASS(scc); char *name = g_strdup(object_class_get_name((ObjectClass *)data)); - const char *details = ""; + g_autoptr(GString) details = g_string_new(""); if (scc->is_static) { - details = "(static, migration-safe)"; - } else if (scc->is_migration_safe) { - details = "(migration-safe)"; + g_string_append(details, "static, "); + } + if (scc->is_migration_safe) { + g_string_append(details, "migration-safe, "); + } + if (cc->deprecation_note) { + g_string_append(details, "deprecated, "); + } + if (details->len) { + /* cull trailing ', ' */ + g_string_truncate(details, details->len - 2); } /* strip off the -s390x-cpu */ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; - qemu_printf("s390 %-15s %-35s %s\n", name, scc->desc, details); + if (details->len) { + qemu_printf("s390 %-15s %-35s (%s)\n", name, scc->desc, details->str); + } else { + qemu_printf("s390 %-15s %-35s\n", name, scc->desc); + } g_free(name); } @@ -398,14 +411,14 @@ void s390_cpu_list(void) for (feat = 0; feat < S390_FEAT_MAX; feat++) { const S390FeatDef *def = s390_feat_def(feat); - qemu_printf("%-20s %-50s\n", def->name, def->desc); + qemu_printf("%-20s %s\n", def->name, def->desc); } qemu_printf("\nRecognized feature groups:\n"); for (group = 0; group < S390_FEAT_GROUP_MAX; group++) { const S390FeatGroupDef *def = s390_feat_group_def(group); - qemu_printf("%-20s %-50s\n", def->name, def->desc); + qemu_printf("%-20s %s\n", def->name, def->desc); } } @@ -591,8 +604,8 @@ void s390_realize_cpu_model(CPUState *cs, Error **errp) #if !defined(CONFIG_USER_ONLY) cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); if (tcg_enabled()) { - /* basic mode, write the cpu address into the first 4 bit of the ID */ - cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id); + cpu->env.cpuid = deposit64(cpu->env.cpuid, CPU_PHYS_ADDR_SHIFT, + CPU_PHYS_ADDR_BITS, cpu->env.core_id); } #endif } @@ -727,7 +740,6 @@ static void s390_cpu_model_initfn(Object *obj) } } -static S390CPUDef s390_qemu_cpu_def; static S390CPUModel s390_qemu_cpu_model; /* Set the qemu CPU model (on machine initialization). Must not be called @@ -741,17 +753,8 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, g_assert(def); g_assert(QTAILQ_EMPTY_RCU(&cpus)); - /* TCG emulates some features that can usually not be enabled with - * the emulated machine generation. Make sure they can be enabled - * when using the QEMU model by adding them to full_feat. We have - * to copy the definition to do that. - */ - memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def)); - bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat, - qemu_max_cpu_feat, S390_FEAT_MAX); - /* build the CPU model */ - s390_qemu_cpu_model.def = &s390_qemu_cpu_def; + s390_qemu_cpu_model.def = def; bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX); s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features); } @@ -884,9 +887,8 @@ static void s390_max_cpu_model_class_init(ObjectClass *oc, void *data) /* * The "max" model is neither static nor migration safe. Under KVM - * it represents the "host" model. Under TCG it represents some kind of - * "qemu" CPU model without compat handling and maybe with some additional - * CPU features that are not yet unlocked in the "qemu" model. + * it represents the "host" model. Under TCG it represents the "qemu" CPU + * model of the latest QEMU machine. */ xcc->desc = "Enables all features supported by the accelerator in the current host"; @@ -965,13 +967,13 @@ static void init_ignored_base_feat(void) static void register_types(void) { - static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST }; + static const S390FeatInit qemu_max_init = { S390_FEAT_LIST_QEMU_MAX }; int i; init_ignored_base_feat(); /* init all bitmaps from gnerated data initially */ - s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat); + s390_init_feat_bitmap(qemu_max_init, qemu_max_cpu_feat); for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { s390_init_feat_bitmap(s390_cpu_defs[i].base_init, s390_cpu_defs[i].base_feat); @@ -981,9 +983,9 @@ static void register_types(void) s390_cpu_defs[i].full_feat); } - /* initialize the qemu model with latest definition */ + /* initialize the qemu model with the maximum definition ("max" model) */ s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, - QEMU_MAX_CPU_EC_GA, qemu_latest_init); + QEMU_MAX_CPU_EC_GA, qemu_max_init); for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name); diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h index 74d1f87e4f..cc7305ec21 100644 --- a/target/s390x/cpu_models.h +++ b/target/s390x/cpu_models.h @@ -24,13 +24,13 @@ struct S390CPUDef { uint8_t gen; /* hw generation identification */ uint16_t type; /* cpu type identification */ uint8_t ec_ga; /* EC GA version (on which also the BC is based) */ - uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */ + uint8_t mha_pow; /* maximum host address power, mha = 2^pow-1 */ uint32_t hmfai; /* hypervisor-managed facilities */ /* base/min features, must never be changed between QEMU versions */ S390FeatBitmap base_feat; /* used to init base_feat from generated data */ S390FeatInit base_init; - /* deafault features, QEMU version specific */ + /* default features, QEMU version specific */ S390FeatBitmap default_feat; /* used to init default_feat from generated data */ S390FeatInit default_init; @@ -96,10 +96,18 @@ static inline bool s390_known_cpu_type(uint16_t type) { return s390_get_gen_for_cpu_type(type) != 0; } +#define CPU_ID_SHIFT 32 +#define CPU_ID_BITS 24 +/* + * When cpu_id_format is 0 (basic mode), the leftmost 4 bits of cpu_id contain + * the rightmost 4 bits of the physical CPU address. + */ +#define CPU_PHYS_ADDR_BITS 4 +#define CPU_PHYS_ADDR_SHIFT (CPU_ID_SHIFT + CPU_ID_BITS - CPU_PHYS_ADDR_BITS) static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) { return ((uint64_t)model->cpu_ver << 56) | - ((uint64_t)model->cpu_id << 32) | + ((uint64_t)model->cpu_id << CPU_ID_SHIFT) | ((uint64_t)model->def->type << 16) | (model->def->gen == 7 ? 0 : (uint64_t)model->cpu_id_format << 15); } diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c index 05c3ccaaff..d8a141a023 100644 --- a/target/s390x/cpu_models_sysemu.c +++ b/target/s390x/cpu_models_sysemu.c @@ -15,7 +15,6 @@ #include "s390x-internal.h" #include "kvm/kvm_s390x.h" #include "sysemu/kvm.h" -#include "sysemu/tcg.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "qapi/qmp/qerror.h" diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c index 7d85322d68..1e3b7c0dc9 100644 --- a/target/s390x/gen-features.c +++ b/target/s390x/gen-features.c @@ -374,7 +374,7 @@ static uint16_t base_GEN10_GA1[] = { S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, S390_FEAT_GENERAL_INSTRUCTIONS_EXT, S390_FEAT_EXECUTE_EXT, - S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_FLOATING_POINT_SUPPORT_ENH, S390_FEAT_DFP, S390_FEAT_DFP_FAST, S390_FEAT_PFPO, @@ -476,7 +476,7 @@ static uint16_t full_GEN9_GA2[] = { S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, S390_FEAT_EXTRACT_CPU_TIME, S390_FEAT_COMPARE_AND_SWAP_AND_STORE, - S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_FLOATING_POINT_SUPPORT_ENH, S390_FEAT_DFP, }; @@ -575,6 +575,7 @@ static uint16_t full_GEN16_GA1[] = { S390_FEAT_BEAR_ENH, S390_FEAT_RDP, S390_FEAT_PAI, + S390_FEAT_PAIE, }; @@ -663,7 +664,14 @@ static uint16_t default_GEN15_GA1[] = { S390_FEAT_ETOKEN, }; -#define default_GEN16_GA1 EmptyFeat +static uint16_t default_GEN16_GA1[] = { + S390_FEAT_NNPA, + S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, + S390_FEAT_BEAR_ENH, + S390_FEAT_RDP, + S390_FEAT_PAI, + S390_FEAT_PAIE, +}; /* QEMU (CPU model) features */ @@ -694,7 +702,7 @@ static uint16_t qemu_V3_1[] = { S390_FEAT_GENERAL_INSTRUCTIONS_EXT, S390_FEAT_EXECUTE_EXT, S390_FEAT_SET_PROGRAM_PARAMETERS, - S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_FLOATING_POINT_SUPPORT_ENH, S390_FEAT_STFLE_45, S390_FEAT_STFLE_49, S390_FEAT_LOCAL_TLB_CLEARING, @@ -725,17 +733,31 @@ static uint16_t qemu_V6_0[] = { S390_FEAT_ESOP, }; -static uint16_t qemu_LATEST[] = { +static uint16_t qemu_V6_2[] = { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_MISC_INSTRUCTION_EXT2, S390_FEAT_MSA_EXT_8, S390_FEAT_VECTOR_ENH, }; -/* add all new definitions before this point */ +static uint16_t qemu_V7_0[] = { + S390_FEAT_MISC_INSTRUCTION_EXT3, +}; + +static uint16_t qemu_V7_1[] = { + S390_FEAT_VECTOR_ENH2, +}; + +/* + * Features for the "qemu" CPU model of the latest QEMU machine and the "max" + * CPU model under TCG. Don't include features that are not part of the full + * feature set of the current "max" CPU model generation. + */ static uint16_t qemu_MAX[] = { - /* generates a dependency warning, leave it out for now */ S390_FEAT_MSA_EXT_5, + S390_FEAT_KIMD_SHA_512, + S390_FEAT_KLMD_SHA_512, + S390_FEAT_PRNO_TRNG, }; /****** END FEATURE DEFS ******/ @@ -856,7 +878,9 @@ static FeatGroupDefSpec QemuFeatDef[] = { QEMU_FEAT_INITIALIZER(V4_0), QEMU_FEAT_INITIALIZER(V4_1), QEMU_FEAT_INITIALIZER(V6_0), - QEMU_FEAT_INITIALIZER(LATEST), + QEMU_FEAT_INITIALIZER(V6_2), + QEMU_FEAT_INITIALIZER(V7_0), + QEMU_FEAT_INITIALIZER(V7_1), QEMU_FEAT_INITIALIZER(MAX), }; diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 6e35473c7f..473c8e51b0 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -27,7 +27,6 @@ #include "hw/s390x/pv.h" #include "sysemu/hw_accel.h" #include "sysemu/runstate.h" -#include "sysemu/tcg.h" void s390x_tod_timer(void *opaque) { diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 6215ca00bc..bf33d86f74 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -4,6 +4,7 @@ DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(mvcrl, TCG_CALL_NO_WG, void, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_3(mvcl, i32, env, i32, i32) @@ -202,8 +203,11 @@ DEF_HELPER_FLAGS_3(gvec_vpopct16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verim8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verim16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vsl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vsl_ve2, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vsra, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vsra_ve2, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vsrl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vsrl_ve2, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vscbi8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vscbi16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_4(gvec_vtm, void, ptr, cptr, env, i32) @@ -245,6 +249,12 @@ DEF_HELPER_6(gvec_vstrc_cc32, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt8, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt16, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt32, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_8, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_16, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_32, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_zs8, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_zs16, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrs_zs32, void, ptr, cptr, cptr, cptr, env, i32) /* === Vector Floating-Point Instructions */ DEF_HELPER_FLAGS_5(gvec_vfa32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) @@ -274,6 +284,10 @@ DEF_HELPER_FLAGS_5(gvec_vfche64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32 DEF_HELPER_5(gvec_vfche64_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfche128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfche128_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdg32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdlg32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcgd32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vclgd32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdlg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) @@ -336,9 +350,9 @@ DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64) DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64) -DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64) -DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64) -DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64) +DEF_HELPER_2(iske, i64, env, i64) +DEF_HELPER_3(sske, void, env, i64, i64) +DEF_HELPER_2(rrbe, i32, env, i64) DEF_HELPER_4(mvcs, i32, env, i64, i64, i64) DEF_HELPER_4(mvcp, i32, env, i64, i64, i64) DEF_HELPER_4(sigp, i32, env, i64, i32, i32) diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c index 4eb0a7a9f8..053aaabb5a 100644 --- a/target/s390x/ioinst.c +++ b/target/s390x/ioinst.c @@ -123,7 +123,7 @@ static int ioinst_schib_valid(SCHIB *schib) } /* for MB format 1 bits 26-31 of word 11 must be 0 */ /* MBA uses words 10 and 11, it means align on 2**6 */ - if ((be16_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_MBFC) && + if ((be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_MBFC) && (be64_to_cpu(schib->mba) & 0x03fUL)) { return 0; } @@ -284,8 +284,8 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, g_assert(!s390_is_pv()); /* * As operand exceptions have a lower priority than access exceptions, - * we check whether the memory area is writeable (injecting the - * access execption if it is not) first. + * we check whether the memory area is writable (injecting the + * access exception if it is not) first. */ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { s390_program_interrupt(env, PGM_OPERAND, ra); diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 5b1fdb55c4..3ac7ec9acf 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -24,7 +24,6 @@ #include #include -#include "qemu-common.h" #include "cpu.h" #include "s390x-internal.h" #include "kvm_s390x.h" @@ -152,11 +151,16 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static int cap_sync_regs; static int cap_async_pf; static int cap_mem_op; +static int cap_mem_op_extension; static int cap_s390_irq; static int cap_ri; static int cap_hpage_1m; static int cap_vcpu_resets; static int cap_protected; +static int cap_zpci_op; +static int cap_protected_dump; + +static bool mem_op_storage_key_support; static int active_cmma; @@ -355,9 +359,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); + cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); + mem_op_storage_key_support = cap_mem_op_extension > 0; cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); + cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); + cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP); kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); @@ -843,6 +851,7 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, : KVM_S390_MEMOP_LOGICAL_READ, .buf = (uint64_t)hostbuf, .ar = ar, + .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, }; int ret; @@ -852,6 +861,9 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, if (!hostbuf) { mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; } + if (mem_op_storage_key_support) { + mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; + } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); if (ret < 0) { @@ -1023,7 +1035,7 @@ int kvm_arch_remove_hw_breakpoint(target_ulong addr, } size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); hw_breakpoints = - (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); + g_realloc(hw_breakpoints, size); } else { g_free(hw_breakpoints); hw_breakpoints = NULL; @@ -1585,6 +1597,10 @@ void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) env->diag318_info = diag318_info; cs->kvm_run->s.regs.diag318 = diag318_info; cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; + /* + * diag 318 info is zeroed during a clear reset and + * diag 308 IPL subcodes. + */ } } @@ -2031,6 +2047,11 @@ int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); } +int kvm_s390_get_protected_dump(void) +{ + return cap_protected_dump; +} + int kvm_s390_get_ri(void) { return cap_ri; @@ -2562,3 +2583,12 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +int kvm_s390_get_zpci_op(void) +{ + return cap_zpci_op; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h index 05a5e1e6f4..f9785564d0 100644 --- a/target/s390x/kvm/kvm_s390x.h +++ b/target/s390x/kvm/kvm_s390x.h @@ -26,7 +26,9 @@ int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state); void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); int kvm_s390_get_hpage_1m(void); +int kvm_s390_get_protected_dump(void); int kvm_s390_get_ri(void); +int kvm_s390_get_zpci_op(void); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_clock); diff --git a/target/s390x/kvm/meson.build b/target/s390x/kvm/meson.build index d1356356b1..aef52b6686 100644 --- a/target/s390x/kvm/meson.build +++ b/target/s390x/kvm/meson.build @@ -1,6 +1,8 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files( 'kvm.c' +), if_false: files( + 'stubs.c' )) # Newer kernels on s390 check for an S390_PGSTE program header and diff --git a/target/s390x/kvm/stubs.c b/target/s390x/kvm/stubs.c new file mode 100644 index 0000000000..5fd63b9a7e --- /dev/null +++ b/target/s390x/kvm/stubs.c @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "kvm_s390x.h" + +int kvm_s390_get_protected_dump(void) +{ + return false; +} diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c index d779a9fc51..b04b57c235 100644 --- a/target/s390x/mmu_helper.c +++ b/target/s390x/mmu_helper.c @@ -94,6 +94,14 @@ target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr) return raddr; } +bool mmu_absolute_addr_valid(target_ulong addr, bool is_write) +{ + return address_space_access_valid(&address_space_memory, + addr & TARGET_PAGE_MASK, + TARGET_PAGE_SIZE, is_write, + MEMTXATTRS_UNSPECIFIED); +} + static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr, uint64_t *entry) { @@ -117,7 +125,7 @@ static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr, static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, uint64_t asc, uint64_t asce, target_ulong *raddr, - int *flags, int rw) + int *flags) { const bool edat1 = (env->cregs[0] & CR0_EDAT) && s390_has_feat(S390_FEAT_EDAT); @@ -293,19 +301,26 @@ static void mmu_handle_skey(target_ulong addr, int rw, int *flags) { static S390SKeysClass *skeyclass; static S390SKeysState *ss; - MachineState *ms = MACHINE(qdev_get_machine()); - uint8_t key; + uint8_t key, old_key; int rc; - if (unlikely(addr >= ms->ram_size)) { - return; - } - + /* + * We expect to be called with an absolute address that has already been + * validated, such that we can reliably use it to lookup the storage key. + */ if (unlikely(!ss)) { ss = s390_get_skeys_device(); skeyclass = S390_SKEYS_GET_CLASS(ss); } + /* + * Don't enable storage keys if they are still disabled, i.e., no actual + * storage key instruction was issued yet. + */ + if (!skeyclass->skeys_are_enabled(ss)) { + return; + } + /* * Whenever we create a new TLB entry, we set the storage key reference * bit. In case we allow write accesses, we set the storage key change @@ -330,6 +345,7 @@ static void mmu_handle_skey(target_ulong addr, int rw, int *flags) trace_get_skeys_nonzero(rc); return; } + old_key = key; switch (rw) { case MMU_DATA_LOAD: @@ -353,20 +369,23 @@ static void mmu_handle_skey(target_ulong addr, int rw, int *flags) /* Any store/fetch sets the reference bit */ key |= SK_R; - rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); - if (rc) { - trace_set_skeys_nonzero(rc); + if (key != old_key) { + rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + trace_set_skeys_nonzero(rc); + } } } /** * Translate a virtual (logical) address into a physical (absolute) address. * @param vaddr the virtual address - * @param rw 0 = read, 1 = write, 2 = code fetch + * @param rw 0 = read, 1 = write, 2 = code fetch, < 0 = load real address * @param asc address space control (one of the PSW_ASC_* modes) * @param raddr the translated address is stored to this pointer * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer - * @param exc true = inject a program check if a fault occurred + * @param tec the translation exception code if stored to this pointer if + * there is an exception to raise * @return 0 = success, != 0, the exception to raise */ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, @@ -420,7 +439,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, } /* perform the DAT translation */ - r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags, rw); + r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags); if (unlikely(r)) { return r; } @@ -440,10 +459,17 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, } nodat: - /* Convert real address -> absolute address */ - *raddr = mmu_real2abs(env, *raddr); + if (rw >= 0) { + /* Convert real address -> absolute address */ + *raddr = mmu_real2abs(env, *raddr); - mmu_handle_skey(*raddr, rw, flags); + if (!mmu_absolute_addr_valid(*raddr, rw == MMU_DATA_STORE)) { + *tec = 0; /* unused */ + return PGM_ADDRESSING; + } + + mmu_handle_skey(*raddr, rw, flags); + } return 0; } @@ -464,12 +490,6 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, if (ret) { return ret; } - if (!address_space_access_valid(&address_space_memory, pages[i], - TARGET_PAGE_SIZE, is_write, - MEMTXATTRS_UNSPECIFIED)) { - *tec = 0; /* unused */ - return PGM_ADDRESSING; - } addr += TARGET_PAGE_SIZE; } @@ -579,6 +599,12 @@ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK); + if (!mmu_absolute_addr_valid(*addr, rw == MMU_DATA_STORE)) { + /* unused */ + *tec = 0; + return PGM_ADDRESSING; + } + mmu_handle_skey(*addr, rw, flags); return 0; } diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 5506f185e8..825252d728 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -11,6 +11,7 @@ #define S390X_INTERNAL_H #include "cpu.h" +#include "fpu/softfloat.h" #ifndef CONFIG_USER_ONLY typedef struct LowCore { @@ -193,8 +194,7 @@ enum cc_op { CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ CC_OP_ICM, /* insert characters under mask */ - CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ - CC_OP_SLA_64, /* Calculate shift left signed (64bit) */ + CC_OP_SLA, /* Calculate shift left signed */ CC_OP_FLOGR, /* find leftmost one */ CC_OP_LCBB, /* load count to block boundary */ CC_OP_VC, /* vector compare result */ @@ -228,7 +228,7 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, /* arch_dump.c */ int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, - int cpuid, void *opaque); + int cpuid, DumpState *s); /* cc_helper.c */ @@ -270,12 +270,21 @@ ObjectClass *s390_cpu_class_by_name(const char *name); void s390x_cpu_debug_excp_handler(CPUState *cs); void s390_cpu_do_interrupt(CPUState *cpu); bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); + +#ifdef CONFIG_USER_ONLY +void s390_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr); +void s390_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr); +#else bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); +#endif /* fpu_helper.c */ @@ -291,7 +300,7 @@ uint32_t set_cc_nz_f128(float128 v); uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); -int float_comp_to_cc(CPUS390XState *env, int float_compare); +int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare); #define DCMASK_ZERO 0x0c00 #define DCMASK_NORMAL 0x0300 @@ -373,6 +382,9 @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, /* mmu_helper.c */ +bool mmu_absolute_addr_valid(target_ulong addr, bool is_write); +/* Special access mode only valid for mmu_translate() */ +#define MMU_S390_LRA -1 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags, uint64_t *tec); int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, @@ -387,7 +399,9 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, /* translate.c */ void s390x_translate_init(void); - +void s390x_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); /* sigp.c */ int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index d57427ced8..9dd977349a 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -139,7 +139,7 @@ static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) case S390_CPU_STATE_OPERATING: cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; cpu_inject_stop(cpu); - /* store will be performed in do_stop_interrup() */ + /* store will be performed in do_stop_interrupt() */ break; case S390_CPU_STATE_STOPPED: /* already stopped, just store the status */ @@ -428,26 +428,10 @@ static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order, static int sigp_set_architecture(S390CPU *cpu, uint32_t param, uint64_t *status_reg) { - CPUState *cur_cs; - S390CPU *cur_cpu; - bool all_stopped = true; - - CPU_FOREACH(cur_cs) { - cur_cpu = S390_CPU(cur_cs); - - if (cur_cpu == cpu) { - continue; - } - if (s390_cpu_get_state(cur_cpu) != S390_CPU_STATE_STOPPED) { - all_stopped = false; - } - } - *status_reg &= 0xffffffff00000000ULL; /* Reject set arch order, with czam we're always in z/Arch mode. */ - *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER : - SIGP_STAT_INCORRECT_STATE); + *status_reg |= SIGP_STAT_INVALID_PARAMETER; return SIGP_CC_STATUS_STORED; } @@ -495,13 +479,17 @@ void do_stop_interrupt(CPUS390XState *env) { S390CPU *cpu = env_archcpu(env); - if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); - } + /* + * Complete the STOP operation before exposing the CPU as + * STOPPED to the system. + */ if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) { s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); } env->sigp_order = 0; + if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } env->pending_int &= ~INTERRUPT_STOP; } diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c index c2c96c3a3c..b36f8cdc8b 100644 --- a/target/s390x/tcg/cc_helper.c +++ b/target/s390x/tcg/cc_helper.c @@ -136,7 +136,7 @@ static uint32_t cc_calc_subu(uint64_t borrow_out, uint64_t result) static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) { - if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar >= 0)) { return 3; /* overflow */ } else { if (ar < 0) { @@ -151,7 +151,7 @@ static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) { - if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + if ((a1 >= 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { @@ -196,7 +196,7 @@ static uint32_t cc_calc_comp_64(int64_t dst) static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) { - if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar >= 0)) { return 3; /* overflow */ } else { if (ar < 0) { @@ -211,7 +211,7 @@ static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) { - if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + if ((a1 >= 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { @@ -268,36 +268,9 @@ static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) } } -static uint32_t cc_calc_sla_32(uint32_t src, int shift) +static uint32_t cc_calc_sla(uint64_t src, int shift) { - uint32_t mask = ((1U << shift) - 1U) << (32 - shift); - uint32_t sign = 1U << 31; - uint32_t match; - int32_t r; - - /* Check if the sign bit stays the same. */ - if (src & sign) { - match = mask; - } else { - match = 0; - } - if ((src & mask) != match) { - /* Overflow. */ - return 3; - } - - r = ((src << shift) & ~sign) | (src & sign); - if (r == 0) { - return 0; - } else if (r < 0) { - return 1; - } - return 2; -} - -static uint32_t cc_calc_sla_64(uint64_t src, int shift) -{ - uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); + uint64_t mask = -1ULL << (63 - shift); uint64_t sign = 1ULL << 63; uint64_t match; int64_t r; @@ -459,11 +432,8 @@ static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, case CC_OP_ICM: r = cc_calc_icm(src, dst); break; - case CC_OP_SLA_32: - r = cc_calc_sla_32(src, dst); - break; - case CC_OP_SLA_64: - r = cc_calc_sla_64(src, dst); + case CC_OP_SLA: + r = cc_calc_sla(src, dst); break; case CC_OP_FLOGR: r = cc_calc_flogr(dst); @@ -517,6 +487,10 @@ void HELPER(sacf)(CPUS390XState *env, uint64_t a1) { HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); + if (!(env->psw.mask & PSW_MASK_DAT)) { + tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); + } + switch (a1 & 0xf00) { case 0x000: env->psw.mask &= ~PSW_MASK_ASC; @@ -527,6 +501,9 @@ void HELPER(sacf)(CPUS390XState *env, uint64_t a1) env->psw.mask |= PSW_ASC_SECONDARY; break; case 0x300: + if ((env->psw.mask & PSW_MASK_PSTATE) != 0) { + tcg_s390_program_interrupt(env, PGM_PRIVILEGED, GETPC()); + } env->psw.mask &= ~PSW_MASK_ASC; env->psw.mask |= PSW_ASC_HOME; break; diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c index 138d9e7ad9..762b277884 100644 --- a/target/s390x/tcg/crypto_helper.c +++ b/target/s390x/tcg/crypto_helper.c @@ -1,10 +1,12 @@ /* * s390x crypto helpers * + * Copyright (C) 2022 Jason A. Donenfeld . All Rights Reserved. * Copyright (c) 2017 Red Hat Inc * * Authors: * David Hildenbrand + * Jason A. Donenfeld * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -12,12 +14,262 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" +#include "qemu/guest-random.h" #include "s390x-internal.h" #include "tcg_s390x.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +static uint64_t R(uint64_t x, int c) +{ + return (x >> c) | (x << (64 - c)); +} +static uint64_t Ch(uint64_t x, uint64_t y, uint64_t z) +{ + return (x & y) ^ (~x & z); +} +static uint64_t Maj(uint64_t x, uint64_t y, uint64_t z) +{ + return (x & y) ^ (x & z) ^ (y & z); +} +static uint64_t Sigma0(uint64_t x) +{ + return R(x, 28) ^ R(x, 34) ^ R(x, 39); +} +static uint64_t Sigma1(uint64_t x) +{ + return R(x, 14) ^ R(x, 18) ^ R(x, 41); +} +static uint64_t sigma0(uint64_t x) +{ + return R(x, 1) ^ R(x, 8) ^ (x >> 7); +} +static uint64_t sigma1(uint64_t x) +{ + return R(x, 19) ^ R(x, 61) ^ (x >> 6); +} + +static const uint64_t K[80] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, + 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, + 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, + 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, + 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, + 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, + 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, + 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, + 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, + 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, + 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, + 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, + 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, + 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL +}; + +/* a is icv/ocv, w is a single message block. w will get reused internally. */ +static void sha512_bda(uint64_t a[8], uint64_t w[16]) +{ + uint64_t t, z[8], b[8]; + int i, j; + + memcpy(z, a, sizeof(z)); + for (i = 0; i < 80; i++) { + memcpy(b, a, sizeof(b)); + + t = a[7] + Sigma1(a[4]) + Ch(a[4], a[5], a[6]) + K[i] + w[i % 16]; + b[7] = t + Sigma0(a[0]) + Maj(a[0], a[1], a[2]); + b[3] += t; + for (j = 0; j < 8; ++j) { + a[(j + 1) % 8] = b[j]; + } + if (i % 16 == 15) { + for (j = 0; j < 16; ++j) { + w[j] += w[(j + 9) % 16] + sigma0(w[(j + 1) % 16]) + + sigma1(w[(j + 14) % 16]); + } + } + } + + for (i = 0; i < 8; i++) { + a[i] += z[i]; + } +} + +/* a is icv/ocv, w is a single message block that needs be64 conversion. */ +static void sha512_bda_be64(uint64_t a[8], uint64_t w[16]) +{ + uint64_t t[16]; + int i; + + for (i = 0; i < 16; i++) { + t[i] = be64_to_cpu(w[i]); + } + sha512_bda(a, t); +} + +static void sha512_read_icv(CPUS390XState *env, uint64_t addr, + uint64_t a[8], uintptr_t ra) +{ + int i; + + for (i = 0; i < 8; i++, addr += 8) { + addr = wrap_address(env, addr); + a[i] = cpu_ldq_be_data_ra(env, addr, ra); + } +} + +static void sha512_write_ocv(CPUS390XState *env, uint64_t addr, + uint64_t a[8], uintptr_t ra) +{ + int i; + + for (i = 0; i < 8; i++, addr += 8) { + addr = wrap_address(env, addr); + cpu_stq_be_data_ra(env, addr, a[i], ra); + } +} + +static void sha512_read_block(CPUS390XState *env, uint64_t addr, + uint64_t a[16], uintptr_t ra) +{ + int i; + + for (i = 0; i < 16; i++, addr += 8) { + addr = wrap_address(env, addr); + a[i] = cpu_ldq_be_data_ra(env, addr, ra); + } +} + +static void sha512_read_mbl_be64(CPUS390XState *env, uint64_t addr, + uint8_t a[16], uintptr_t ra) +{ + int i; + + for (i = 0; i < 16; i++, addr += 1) { + addr = wrap_address(env, addr); + a[i] = cpu_ldub_data_ra(env, addr, ra); + } +} + +static int cpacf_sha512(CPUS390XState *env, uintptr_t ra, uint64_t param_addr, + uint64_t *message_reg, uint64_t *len_reg, uint32_t type) +{ + enum { MAX_BLOCKS_PER_RUN = 64 }; /* Arbitrary: keep interactivity. */ + uint64_t len = *len_reg, a[8], processed = 0; + int i, message_reg_len = 64; + + g_assert(type == S390_FEAT_TYPE_KIMD || type == S390_FEAT_TYPE_KLMD); + + if (!(env->psw.mask & PSW_MASK_64)) { + len = (uint32_t)len; + message_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24; + } + + /* KIMD: length has to be properly aligned. */ + if (type == S390_FEAT_TYPE_KIMD && !QEMU_IS_ALIGNED(len, 128)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + sha512_read_icv(env, param_addr, a, ra); + + /* Process full blocks first. */ + for (; len >= 128; len -= 128, processed += 128) { + uint64_t w[16]; + + if (processed >= MAX_BLOCKS_PER_RUN * 128) { + break; + } + + sha512_read_block(env, *message_reg + processed, w, ra); + sha512_bda(a, w); + } + + /* KLMD: Process partial/empty block last. */ + if (type == S390_FEAT_TYPE_KLMD && len < 128) { + uint8_t x[128]; + + /* Read the remainder of the message byte-per-byte. */ + for (i = 0; i < len; i++) { + uint64_t addr = wrap_address(env, *message_reg + processed + i); + + x[i] = cpu_ldub_data_ra(env, addr, ra); + } + /* Pad the remainder with zero and set the top bit. */ + memset(x + len, 0, 128 - len); + x[len] = 128; + + /* + * Place the MBL either into this block (if there is space left), + * or use an additional one. + */ + if (len < 112) { + sha512_read_mbl_be64(env, param_addr + 64, x + 112, ra); + } + sha512_bda_be64(a, (uint64_t *)x); + + if (len >= 112) { + memset(x, 0, 112); + sha512_read_mbl_be64(env, param_addr + 64, x + 112, ra); + sha512_bda_be64(a, (uint64_t *)x); + } + + processed += len; + len = 0; + } + + /* + * Modify memory after we read all inputs and modify registers only after + * writing memory succeeded. + * + * TODO: if writing fails halfway through (e.g., when crossing page + * boundaries), we're in trouble. We'd need something like access_prepare(). + */ + sha512_write_ocv(env, param_addr, a, ra); + *message_reg = deposit64(*message_reg, 0, message_reg_len, + *message_reg + processed); + *len_reg -= processed; + return !len ? 0 : 3; +} + +static void fill_buf_random(CPUS390XState *env, uintptr_t ra, + uint64_t *buf_reg, uint64_t *len_reg) +{ + uint8_t tmp[256]; + uint64_t len = *len_reg; + int buf_reg_len = 64; + + if (!(env->psw.mask & PSW_MASK_64)) { + len = (uint32_t)len; + buf_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24; + } + + while (len) { + size_t block = MIN(len, sizeof(tmp)); + + qemu_guest_getrandom_nofail(tmp, block); + for (size_t i = 0; i < block; ++i) { + cpu_stb_data_ra(env, wrap_address(env, *buf_reg), tmp[i], ra); + *buf_reg = deposit64(*buf_reg, 0, buf_reg_len, *buf_reg + 1); + --*len_reg; + } + len -= block; + } +} + uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t type) { @@ -52,6 +304,13 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, cpu_stb_data_ra(env, param_addr, subfunc[i], ra); } break; + case 3: /* CPACF_*_SHA_512 */ + return cpacf_sha512(env, ra, env->regs[1], &env->regs[r2], + &env->regs[r2 + 1], type); + case 114: /* CPACF_PRNO_TRNG */ + fill_buf_random(env, ra, &env->regs[r1], &env->regs[r1 + 1]); + fill_buf_random(env, ra, &env->regs[r2], &env->regs[r2 + 1]); + break; default: /* we don't implement any other subfunction yet */ g_assert_not_reached(); diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index a61917d04f..fe02d82201 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "s390x-internal.h" #include "exec/helper-proto.h" @@ -33,20 +34,20 @@ #include "hw/boards.h" #endif -void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, - uint32_t code, uintptr_t ra) +G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env, + uint32_t code, uintptr_t ra) { CPUState *cs = env_cpu(env); - cpu_restore_state(cs, ra, true); + cpu_restore_state(cs, ra); qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", env->psw.addr); trigger_pgm_exception(env, code); cpu_loop_exit(cs); } -void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, - uintptr_t ra) +G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra) { g_assert(dxc <= 0xff); #if !defined(CONFIG_USER_ONLY) @@ -62,8 +63,8 @@ void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, tcg_s390_program_interrupt(env, PGM_DATA, ra); } -void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, - uintptr_t ra) +G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, + uintptr_t ra) { g_assert(vxc <= 0xff); #if !defined(CONFIG_USER_ONLY) @@ -82,6 +83,20 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) tcg_s390_data_exception(env, dxc, GETPC()); } +/* + * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, + * this is only for the atomic operations, for which we want to raise a + * specification exception. + */ +static G_NORETURN +void do_unaligned_access(CPUState *cs, uintptr_t retaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); +} + #if defined(CONFIG_USER_ONLY) void s390_cpu_do_interrupt(CPUState *cs) @@ -89,19 +104,29 @@ void s390_cpu_do_interrupt(CPUState *cs) cs->exception_index = -1; } -bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void s390_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { S390CPU *cpu = S390_CPU(cs); - trigger_pgm_exception(&cpu->env, PGM_ADDRESSING); - /* On real machines this value is dropped into LowMem. Since this - is userland, simply put this someplace that cpu_loop can find it. */ - cpu->env.__excp_addr = address; + trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION); + /* + * On real machines this value is dropped into LowMem. Since this + * is userland, simply put this someplace that cpu_loop can find it. + * S390 only gives the page of the fault, not the exact address. + * C.f. the construction of TEC in mmu_translate(). + */ + cpu->env.__excp_addr = address & TARGET_PAGE_MASK; cpu_loop_exit_restore(cs, retaddr); } +void s390_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr) +{ + do_unaligned_access(cs, retaddr); +} + #else /* !CONFIG_USER_ONLY */ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) @@ -150,19 +175,6 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, g_assert_not_reached(); } - /* check out of RAM access */ - if (!excp && - !address_space_access_valid(&address_space_memory, raddr, - TARGET_PAGE_SIZE, access_type, - MEMTXATTRS_UNSPECIFIED)) { - MachineState *ms = MACHINE(qdev_get_machine()); - qemu_log_mask(CPU_LOG_MMU, - "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", - __func__, (uint64_t)raddr, (uint64_t)ms->ram_size); - excp = PGM_ADDRESSING; - tec = 0; /* unused */ - } - env->tlb_fill_exc = excp; env->tlb_fill_tec = tec; @@ -541,7 +553,7 @@ try_deliver: /* don't trigger a cpu_loop_exit(), use an interrupt instead */ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); } else if (cs->halted) { - /* unhalt if we had a WAIT PSW somehwere in our injection chain */ + /* unhalt if we had a WAIT PSW somewhere in our injection chain */ s390_cpu_unhalt(cpu); } } @@ -602,22 +614,17 @@ void s390x_cpu_debug_excp_handler(CPUState *cs) } } -/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, - this is only for the atomic operations, for which we want to raise a - specification exception. */ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; - - tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); + do_unaligned_access(cs, retaddr); } -static void QEMU_NORETURN monitor_event(CPUS390XState *env, - uint64_t monitor_code, - uint8_t monitor_class, uintptr_t ra) +static G_NORETURN +void monitor_event(CPUS390XState *env, + uint64_t monitor_code, + uint8_t monitor_class, uintptr_t ra) { /* Store the Monitor Code and the Monitor Class Number into the lowcore */ stq_phys(env_cpu(env)->as, diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c index 4067205405..be80b2373c 100644 --- a/target/s390x/tcg/fpu_helper.c +++ b/target/s390x/tcg/fpu_helper.c @@ -89,7 +89,7 @@ static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) /* * invalid/divbyzero cannot coexist with other conditions. * overflow/underflow however can coexist with inexact, we have to - * handle it separatly. + * handle it separately. */ if (s390_exc & ~S390_IEEE_MASK_INEXACT) { if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.h.inc similarity index 93% rename from target/s390x/tcg/insn-data.def rename to target/s390x/tcg/insn-data.h.inc index 3e5594210c..4249632af3 100644 --- a/target/s390x/tcg/insn-data.def +++ b/target/s390x/tcg/insn-data.h.inc @@ -8,7 +8,7 @@ * * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode * NAME = name of the opcode, used internally - * FMT = format of the opcode (defined in insn-format.def) + * FMT = format of the opcode (defined in insn-format.h.inc) * FAC = facility the opcode is available in (defined in DisasFacility) * I1 = func in1_xx fills o->in1 * I2 = func in2_xx fills o->in2 @@ -45,7 +45,7 @@ D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL) C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32) C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64) - D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ) + D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEUQ) C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64) /* ADD IMMEDIATE HIGH */ C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32) @@ -76,7 +76,7 @@ /* ADD LOGICAL WITH SIGNED IMMEDIATE */ D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL) C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32) - D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ) + D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEUQ) C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64) /* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32) @@ -105,6 +105,9 @@ D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000) D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB) D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB) +/* AND WITH COMPLEMENT */ + C(0xb9f5, NCRK, RRF_a, MIE3, r2, r3, new, r1_32, andc, nz32) + C(0xb9e5, NCGRK, RRF_a, MIE3, r2, r3, r1, 0, andc, nz64) /* BRANCH AND LINK */ C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0) @@ -196,8 +199,8 @@ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) /* COMPARE HALFWORD RELATIVE LONG */ - C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) - C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) + C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps32) + C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps64) /* COMPARE HIGH */ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) @@ -269,10 +272,10 @@ /* COMPARE AND SWAP */ D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) - D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) + D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEUQ) /* COMPARE DOUBLE AND SWAP */ - D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) - D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) + D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ) + D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ) C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) /* COMPARE AND SWAP AND STORE */ C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0) @@ -287,8 +290,8 @@ D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1) D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1) D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1) - D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1) - D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1) + D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_16u, 0, 0, ct, 0, 1) + D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_16u, 0, 0, ct, 0, 1) /* CONVERT TO DECIMAL */ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) @@ -436,19 +439,19 @@ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) /* LOAD AND ADD */ D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) - D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) + D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEUQ) /* LOAD AND ADD LOGICAL */ D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) - D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) + D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEUQ) /* LOAD AND AND */ D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) - D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) + D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEUQ) /* LOAD AND EXCLUSIVE OR */ D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) - D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) + D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEUQ) /* LOAD AND OR */ D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) - D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) + D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEUQ) /* LOAD AND TEST */ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) @@ -463,7 +466,7 @@ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) /* LOAD AND ZERO RIGHTMOST BYTE */ - C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) + C(0xe33b, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) /* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) @@ -483,7 +486,7 @@ F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) /* LOAD COUNT TO BLOCK BOUNDARY */ - C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) + C(0xe727, LCBB, RXE, V, la2, 0, new, r1_32, lcbb, 0) /* LOAD HALFWORD */ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) @@ -561,11 +564,11 @@ C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) /* LOAD HIGH ON CONDITION */ - C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) + C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2_sr32, new, r1_32h, loc, 0) C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) /* LOAD PAIR DISJOINT */ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) - D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) + D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEUQ) /* LOAD PAIR FROM QUADWORD */ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) /* LOAD POSITIVE */ @@ -603,7 +606,7 @@ F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) - F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) + F(0xb324, LDER, RRE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) /* LOAD ROUNDED */ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) @@ -640,6 +643,8 @@ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0) /* MOVE NUMERICS */ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) +/* MOVE RIGHT TO LEFT */ + C(0xe50a, MVCRL, SSE, MIE3, la1, a2, 0, 0, mvcrl, 0) /* MOVE PAGE */ C(0xb254, MVPG, RRE, Z, 0, 0, 0, 0, mvpg, 0) /* MOVE STRING */ @@ -707,6 +712,16 @@ F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP) +/* NAND */ + C(0xb974, NNRK, RRF_a, MIE3, r2, r3, new, r1_32, nand, nz32) + C(0xb964, NNGRK, RRF_a, MIE3, r2, r3, r1, 0, nand, nz64) +/* NOR */ + C(0xb976, NORK, RRF_a, MIE3, r2, r3, new, r1_32, nor, nz32) + C(0xb966, NOGRK, RRF_a, MIE3, r2, r3, r1, 0, nor, nz64) +/* NOT EXCLUSIVE OR */ + C(0xb977, NXRK, RRF_a, MIE3, r2, r3, new, r1_32, nxor, nz32) + C(0xb967, NXGRK, RRF_a, MIE3, r2, r3, r1, 0, nxor, nz64) + /* OR */ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32) @@ -725,6 +740,9 @@ D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000) D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB) D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB) +/* OR WITH COMPLEMENT */ + C(0xb975, OCRK, RRF_a, MIE3, r2, r3, new, r1_32, orc, nz32) + C(0xb965, OCGRK, RRF_a, MIE3, r2, r3, r1, 0, orc, nz64) /* PACK */ /* Really format SS_b, but we pack both lengths into one argument @@ -735,6 +753,9 @@ /* PACK UNICODE */ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0) +/* POPULATION COUNT */ + C(0xb9e1, POPCNT, RRF_c, PC, 0, r2_o, r1, 0, popcnt, nz64) + /* PREFETCH */ /* Implemented as nops of course. */ C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0) @@ -743,12 +764,9 @@ /* Implemented as nop of course. */ C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0) -/* POPULATION COUNT */ - C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) - /* ROTATE LEFT SINGLE LOGICAL */ - C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0) - C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0) + C(0xeb1d, RLL, RSY_a, Z, r3_o, sh, new, r1_32, rll32, 0) + C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh, r1, 0, rll64, 0) /* ROTATE THEN INSERT SELECTED BITS */ C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) @@ -765,6 +783,12 @@ /* SEARCH STRING UNICODE */ C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0) +/* SELECT */ + C(0xb9f0, SELR, RRF_a, MIE3, r3, r2, new, r1_32, loc, 0) + C(0xb9e3, SELGR, RRF_a, MIE3, r3, r2, r1, 0, loc, 0) +/* SELECT HIGH */ + C(0xb9c0, SELFHR, RRF_a, MIE3, r3_sr32, r2_sr32, new, r1_32h, loc, 0) + /* SET ACCESS */ C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0) /* SET ADDRESSING MODE */ @@ -784,29 +808,29 @@ C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) /* SHIFT LEFT SINGLE */ - D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31) - D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31) - D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63) + D(0x8b00, SLA, RS_a, Z, r1, sh, new, r1_32, sla, 0, 31) + D(0xebdd, SLAK, RSY_a, DO, r3, sh, new, r1_32, sla, 0, 31) + D(0xeb0b, SLAG, RSY_a, Z, r3, sh, r1, 0, sla, 0, 63) /* SHIFT LEFT SINGLE LOGICAL */ - C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0) - C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0) - C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0) + C(0x8900, SLL, RS_a, Z, r1_o, sh, new, r1_32, sll, 0) + C(0xebdf, SLLK, RSY_a, DO, r3_o, sh, new, r1_32, sll, 0) + C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh, r1, 0, sll, 0) /* SHIFT RIGHT SINGLE */ - C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32) - C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32) - C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64) + C(0x8a00, SRA, RS_a, Z, r1_32s, sh, new, r1_32, sra, s32) + C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh, new, r1_32, sra, s32) + C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh, r1, 0, sra, s64) /* SHIFT RIGHT SINGLE LOGICAL */ - C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0) - C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0) - C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0) + C(0x8800, SRL, RS_a, Z, r1_32u, sh, new, r1_32, srl, 0) + C(0xebde, SRLK, RSY_a, DO, r3_32u, sh, new, r1_32, srl, 0) + C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh, r1, 0, srl, 0) /* SHIFT LEFT DOUBLE */ - D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31) + D(0x8f00, SLDA, RS_a, Z, r1_D32, sh, new, r1_D32, sla, 0, 63) /* SHIFT LEFT DOUBLE LOGICAL */ - C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0) + C(0x8d00, SLDL, RS_a, Z, r1_D32, sh, new, r1_D32, sll, 0) /* SHIFT RIGHT DOUBLE */ - C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64) + C(0x8e00, SRDA, RS_a, Z, r1_D32, sh, new, r1_D32, sra, s64) /* SHIFT RIGHT DOUBLE LOGICAL */ - C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) + C(0x8c00, SRDL, RS_a, Z, r1_D32, sh, new, r1_D32, srl, 0) /* SQUARE ROOT */ F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) @@ -1003,6 +1027,16 @@ F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC) /* VECTOR LOAD AND REPLICATE */ F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC) +/* VECTOR LOAD BYTE REVERSED ELEMENT */ + E(0xe601, VLEBRH, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_16, IF_VEC) + E(0xe603, VLEBRF, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_32, IF_VEC) + E(0xe602, VLEBRG, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_64, IF_VEC) +/* VECTOR LOAD BYTE REVERSED ELEMENT AND REPLICATE */ + F(0xe605, VLBRREP, VRX, VE2, la2, 0, 0, 0, vlbrrep, 0, IF_VEC) +/* VECTOR LOAD BYTE REVERSED ELEMENT AND ZERO */ + F(0xe604, VLLEBRZ, VRX, VE2, la2, 0, 0, 0, vllebrz, 0, IF_VEC) +/* VECTOR LOAD BYTE REVERSED ELEMENTS */ + F(0xe606, VLBR, VRX, VE2, la2, 0, 0, 0, vlbr, 0, IF_VEC) /* VECTOR LOAD ELEMENT */ E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC) E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC) @@ -1013,6 +1047,8 @@ E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC) E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC) E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC) +/* VECTOR LOAD ELEMENTS REVERSED */ + F(0xe607, VLER, VRX, VE2, la2, 0, 0, 0, vler, 0, IF_VEC) /* VECTOR LOAD GR FROM VR ELEMENT */ F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC) /* VECTOR LOAD LOGICAL ELEMENT AND ZERO */ @@ -1053,11 +1089,19 @@ F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC) /* VECTOR STORE */ F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC) +/* VECTOR STORE BYTE REVERSED ELEMENT */ + E(0xe609, VSTEBRH, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_16, IF_VEC) + E(0xe60b, VSTEBRF, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_32, IF_VEC) + E(0xe60a, VSTEBRG, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_64, IF_VEC) +/* VECTOR STORE BYTE REVERSED ELEMENTS */ + F(0xe60e, VSTBR, VRX, VE2, la2, 0, 0, 0, vstbr, 0, IF_VEC) /* VECTOR STORE ELEMENT */ E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC) E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC) E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC) E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC) +/* VECTOR STORE ELEMENTS REVERSED */ + F(0xe60f, VSTER, VRX, VE2, la2, 0, 0, 0, vster, 0, IF_VEC) /* VECTOR STORE MULTIPLE */ F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC) /* VECTOR STORE WITH LENGTH */ @@ -1180,19 +1224,23 @@ F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) /* VECTOR SHIFT LEFT */ - F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) + E(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, 0, IF_VEC) /* VECTOR SHIFT LEFT BY BYTE */ - F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) + E(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, 1, IF_VEC) +/* VECTOR SHIFT LEFT DOUBLE BY BIT */ + E(0xe786, VSLD, VRI_d, VE2, 0, 0, 0, 0, vsld, 0, 0, IF_VEC) /* VECTOR SHIFT LEFT DOUBLE BY BYTE */ - F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) + E(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsld, 0, 1, IF_VEC) /* VECTOR SHIFT RIGHT ARITHMETIC */ - F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) + E(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, 0, IF_VEC) /* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ - F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) + E(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, 1, IF_VEC) +/* VECTOR SHIFT RIGHT DOUBLE BY BIT */ + F(0xe787, VSRD, VRI_d, VE2, 0, 0, 0, 0, vsrd, 0, IF_VEC) /* VECTOR SHIFT RIGHT LOGICAL */ - F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) + E(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, 0, IF_VEC) /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ - F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) + E(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, 1, IF_VEC) /* VECTOR SUBTRACT */ F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) /* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ @@ -1222,6 +1270,8 @@ F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC) /* VECTOR STRING RANGE COMPARE */ F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC) +/* VECTOR STRING SEARCH */ + F(0xe78b, VSTRS, VRR_d, VE2, 0, 0, 0, 0, vstrs, 0, IF_VEC) /* === Vector Floating-Point Instructions */ @@ -1279,7 +1329,7 @@ #ifndef CONFIG_USER_ONLY /* COMPARE AND SWAP AND PURGE */ E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) - E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) + E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEUQ, IF_PRIV) /* DIAGNOSE (KVM hypercall) */ F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO) /* INSERT STORAGE KEY EXTENDED */ @@ -1303,7 +1353,7 @@ F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) /* LOAD USING REAL ADDRESS */ E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) - E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV) + E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEUQ, IF_PRIV) /* MOVE TO PRIMARY */ F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) /* MOVE TO SECONDARY */ @@ -1315,9 +1365,9 @@ /* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV | IF_IO) /* SET ADDRESS SPACE CONTROL FAST */ - F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) + C(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0) /* SET CLOCK */ - F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV | IF_IO) + F(0xb204, SCK, S, Z, 0, m2_64a, 0, 0, sck, 0, IF_PRIV | IF_IO) /* SET CLOCK COMPARATOR */ F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV | IF_IO) /* SET CLOCK PROGRAMMABLE FIELD */ @@ -1357,7 +1407,7 @@ F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) /* STORE USING REAL ADDRESS */ E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV) - E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV) + E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUQ, IF_PRIV) /* TEST BLOCK */ F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) /* TEST PROTECTION */ diff --git a/target/s390x/tcg/insn-format.def b/target/s390x/tcg/insn-format.h.inc similarity index 100% rename from target/s390x/tcg/insn-format.def rename to target/s390x/tcg/insn-format.h.inc diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 21a4de4067..7e7de5e2f1 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "s390x-internal.h" #include "tcg_s390x.h" @@ -27,7 +28,7 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" +#include "trace.h" #if !defined(CONFIG_USER_ONLY) #include "hw/s390x/storage-keys.h" @@ -141,24 +142,12 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { +#if defined(CONFIG_USER_ONLY) + return probe_access_flags(env, addr, access_type, mmu_idx, + nonfault, phost, ra); +#else int flags; -#if defined(CONFIG_USER_ONLY) - flags = page_get_flags(addr); - if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE_ORG))) { - env->__excp_addr = addr; - flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING; - if (nonfault) { - return flags; - } - tcg_s390_program_interrupt(env, flags, ra); - } - *phost = g2h(env_cpu(env), addr); -#else - /* - * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL - * to detect if there was an exception during tlb_fill(). - */ env->tlb_fill_exc = 0; flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost, ra); @@ -173,8 +162,8 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, (access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ), ra); } -#endif return 0; +#endif } static int access_prepare_nf(S390Access *access, CPUS390XState *env, @@ -238,7 +227,7 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, g_assert(haddr); memset(haddr, byte, size); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); int i; if (likely(haddr)) { @@ -249,13 +238,13 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, * page. This is especially relevant to speed up TLB_NOTDIRTY. */ g_assert(size > 0); - helper_ret_stb_mmu(env, vaddr, byte, oi, ra); + cpu_stb_mmu(env, vaddr, byte, oi, ra); haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); if (likely(haddr)) { memset(haddr + 1, byte, size - 1); } else { for (i = 1; i < size; i++) { - helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); + cpu_stb_mmu(env, vaddr + i, byte, oi, ra); } } } @@ -281,7 +270,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, #ifdef CONFIG_USER_ONLY return ldub_p(*haddr + offset); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); uint8_t byte; if (likely(*haddr)) { @@ -291,7 +280,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); + byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); return byte; #endif @@ -315,7 +304,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, #ifdef CONFIG_USER_ONLY stb_p(*haddr + offset, byte); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); if (likely(*haddr)) { stb_p(*haddr + offset, byte); @@ -325,7 +314,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); + cpu_stb_mmu(env, vaddr + offset, byte, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); #endif } @@ -554,6 +543,26 @@ void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) do_helper_mvc(env, l, dest, src, GETPC()); } +/* move right to left */ +void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src) +{ + const int mmu_idx = cpu_mmu_index(env, false); + const uint64_t ra = GETPC(); + S390Access srca, desta; + int32_t i; + + /* MVCRL always copies one more byte than specified - maximum is 256 */ + l++; + + srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + + for (i = l - 1; i >= 0; i--) { + uint8_t byte = access_get_byte(env, &srca, i, ra); + access_set_byte(env, &desta, i, byte, ra); + } +} + /* move inverse */ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { @@ -1803,14 +1812,14 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 oldv; bool fail; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); fail = !int128_eq(oldv, cmpv); @@ -1883,7 +1892,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint32_t *haddr = g2h(env_cpu(env), a1); ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); #else - TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); #endif } else { @@ -1903,7 +1912,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, if (parallel) { #ifdef CONFIG_ATOMIC64 - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); #else /* Note that we asserted !parallel above. */ @@ -1939,7 +1948,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); } else if (HAVE_CMPXCHG128) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); cc = !int128_eq(ov, cv); } else { @@ -1978,7 +1987,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a2 + 0, svh, ra); cpu_stq_data_ra(env, a2 + 8, svl, ra); } else if (HAVE_ATOMIC128) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra); } else { @@ -2171,22 +2180,28 @@ uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) /* insert storage key extended */ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) { - MachineState *ms = MACHINE(qdev_get_machine()); static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint64_t addr = wrap_address(env, r2); uint8_t key; + int rc; - if (addr > ms->ram_size) { - return 0; + addr = mmu_real2abs(env, addr); + if (!mmu_absolute_addr_valid(addr, false)) { + tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); } if (unlikely(!ss)) { ss = s390_get_skeys_device(); skeyclass = S390_SKEYS_GET_CLASS(ss); + if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { + tlb_flush_all_cpus_synced(env_cpu(env)); + } } - if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { + rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + trace_get_skeys_nonzero(rc); return 0; } return key; @@ -2195,23 +2210,30 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) /* set storage key extended */ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) { - MachineState *ms = MACHINE(qdev_get_machine()); static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint64_t addr = wrap_address(env, r2); uint8_t key; + int rc; - if (addr > ms->ram_size) { - return; + addr = mmu_real2abs(env, addr); + if (!mmu_absolute_addr_valid(addr, false)) { + tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); } if (unlikely(!ss)) { ss = s390_get_skeys_device(); skeyclass = S390_SKEYS_GET_CLASS(ss); + if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { + tlb_flush_all_cpus_synced(env_cpu(env)); + } } - key = (uint8_t) r1; - skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + key = r1 & 0xfe; + rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + trace_set_skeys_nonzero(rc); + } /* * As we can only flush by virtual address and not all the entries * that point to a physical address we have to flush the whole TLB. @@ -2222,28 +2244,37 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) /* reset reference bit extended */ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) { - MachineState *ms = MACHINE(qdev_get_machine()); + uint64_t addr = wrap_address(env, r2); static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint8_t re, key; + int rc; - if (r2 > ms->ram_size) { - return 0; + addr = mmu_real2abs(env, addr); + if (!mmu_absolute_addr_valid(addr, false)) { + tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); } if (unlikely(!ss)) { ss = s390_get_skeys_device(); skeyclass = S390_SKEYS_GET_CLASS(ss); + if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { + tlb_flush_all_cpus_synced(env_cpu(env)); + } } - if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { + rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + trace_get_skeys_nonzero(rc); return 0; } re = key & (SK_R | SK_C); key &= ~SK_R; - if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { + rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + trace_set_skeys_nonzero(rc); return 0; } /* @@ -2441,7 +2472,7 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); } - exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec); + exc = mmu_translate(env, addr, MMU_S390_LRA, asc, &ret, &flags, &tec); if (exc) { cc = 3; ret = exc | 0x80000000; @@ -2474,13 +2505,13 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) uintptr_t ra = GETPC(); uint64_t hi, lo; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra); hi = int128_gethi(v); lo = int128_getlo(v); @@ -2505,13 +2536,13 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, { uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); v = int128_make128(low, high); cpu_atomic_sto_be_mmu(env, addr, v, oi, ra); } @@ -2587,6 +2618,7 @@ void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) that ex_value is non-zero, which flags that we are in a state that requires such execution. */ env->ex_value = insn | ilen; + env->ex_target = addr; } uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 33e6999e15..71388a7119 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -151,13 +151,33 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) /* Set Prefix */ void HELPER(spx)(CPUS390XState *env, uint64_t a1) { + const uint32_t prefix = a1 & 0x7fffe000; + const uint32_t old_prefix = env->psa; CPUState *cs = env_cpu(env); - uint32_t prefix = a1 & 0x7fffe000; + + if (prefix == old_prefix) { + return; + } + /* + * Since prefix got aligned to 8k and memory increments are a multiple of + * 8k checking the first page is sufficient + */ + if (!mmu_absolute_addr_valid(prefix, true)) { + tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); + } env->psa = prefix; HELPER_LOG("prefix: %#x\n", prefix); tlb_flush_page(cs, 0); tlb_flush_page(cs, TARGET_PAGE_SIZE); + if (prefix != 0) { + tlb_flush_page(cs, prefix); + tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE); + } + if (old_prefix != 0) { + tlb_flush_page(cs, old_prefix); + tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE); + } } static void update_ckc_timer(CPUS390XState *env) @@ -313,7 +333,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) /* same as machine type number in STORE CPU ID, but in EBCDIC */ snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); ebcdic_put(sysib.sysib_111.type, type, 4); - /* model number (not stored in STORE CPU ID for z/Architecure) */ + /* model number (not stored in STORE CPU ID for z/Architecture) */ ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); diff --git a/target/s390x/tcg/tcg_s390x.h b/target/s390x/tcg/tcg_s390x.h index 2f54ccb027..78558912f9 100644 --- a/target/s390x/tcg/tcg_s390x.h +++ b/target/s390x/tcg/tcg_s390x.h @@ -14,11 +14,11 @@ #define TCG_S390X_H void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); -void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, - uint32_t code, uintptr_t ra); -void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, - uintptr_t ra); -void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, - uintptr_t ra); +G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env, + uint32_t code, uintptr_t ra); +G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra); +G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, + uintptr_t ra); #endif /* TCG_S390X_H */ diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 0632b0374b..0885bf2641 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -138,6 +138,7 @@ struct DisasFields { struct DisasContext { DisasContextBase base; const DisasInsn *insn; + TCGOp *insn_start; DisasFields fields; uint64_t ex_value; /* @@ -148,7 +149,7 @@ struct DisasContext { uint64_t pc_tmp; uint32_t ilen; enum cc_op cc_op; - bool do_debug; + bool exit_to_mainloop; }; /* Information carried about a condition to be evaluated. */ @@ -263,7 +264,7 @@ static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) * 16 byte operations to handle it in a special way. */ g_assert(es <= MO_64); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN offs ^= (8 - bytes); #endif return offs + vec_full_reg_offset(reg); @@ -388,14 +389,16 @@ static void update_cc_op(DisasContext *s) } } -static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) +static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s, + uint64_t pc) { - return (uint64_t)cpu_lduw_code(env, pc); + return (uint64_t)translator_lduw(env, &s->base, pc); } -static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) +static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s, + uint64_t pc) { - return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); + return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc); } static int get_mem_index(DisasContext *s) @@ -432,7 +435,7 @@ static void gen_program_exception(DisasContext *s, int code) { TCGv_i32 tmp; - /* Remember what pgm exeption this was. */ + /* Remember what pgm exception this was. */ tmp = tcg_const_i32(code); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); tcg_temp_free_i32(tmp); @@ -488,7 +491,7 @@ static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) /* * Note that d2 is limited to 20 bits, signed. If we crop negative - * displacements early we create larger immedate addends. + * displacements early we create larger immediate addends. */ if (b2 && x2) { tcg_gen_add_i64(tmp, regs[b2], regs[x2]); @@ -634,8 +637,7 @@ static void gen_op_calc_cc(DisasContext *s) case CC_OP_LTUGTU_64: case CC_OP_TM_32: case CC_OP_TM_64: - case CC_OP_SLA_32: - case CC_OP_SLA_64: + case CC_OP_SLA: case CC_OP_SUBU: case CC_OP_NZ_F128: case CC_OP_VC: @@ -1009,7 +1011,7 @@ static void free_compare(DisasCompare *c) #define F6(N, X1, X2, X3, X4, X5, X6) F0(N) typedef enum { -#include "insn-format.def" +#include "insn-format.h.inc" } DisasFormat; #undef F0 @@ -1074,7 +1076,7 @@ typedef struct DisasFormatInfo { #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, static const DisasFormatInfo format_info[] = { -#include "insn-format.def" +#include "insn-format.h.inc" }; #undef F0 @@ -1122,19 +1124,9 @@ typedef struct { exiting the TB. */ #define DISAS_PC_UPDATED DISAS_TARGET_0 -/* We have emitted one or more goto_tb. No fixup required. */ -#define DISAS_GOTO_TB DISAS_TARGET_1 - /* We have updated the PC and CC values. */ #define DISAS_PC_CC_UPDATED DISAS_TARGET_2 -/* We are exiting the TB, but have neither emitted a goto_tb, nor - updated the PC for the next instruction to be executed. */ -#define DISAS_PC_STALE DISAS_TARGET_3 - -/* We are exiting the TB to the main loop. */ -#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 - /* Instruction flags */ #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ @@ -1176,19 +1168,6 @@ struct DisasInsn { /* ====================================================================== */ /* Miscellaneous helpers, used by several operations. */ -static void help_l2_shift(DisasContext *s, DisasOps *o, int mask) -{ - int b2 = get_field(s, b2); - int d2 = get_field(s, d2); - - if (b2 == 0) { - o->in2 = tcg_const_i64(d2 & mask); - } else { - o->in2 = get_address(s, 0, b2, d2); - tcg_gen_andi_i64(o->in2, o->in2, mask); - } -} - static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) { if (dest == s->pc_tmp) { @@ -1201,7 +1180,7 @@ static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) tcg_gen_goto_tb(0); tcg_gen_movi_i64(psw_addr, dest); tcg_gen_exit_tb(s->base.tb, 0); - return DISAS_GOTO_TB; + return DISAS_NORETURN; } else { tcg_gen_movi_i64(psw_addr, dest); per_branch(s, false); @@ -1213,7 +1192,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, bool is_imm, int imm, TCGv_i64 cdest) { DisasJumpType ret; - uint64_t dest = s->base.pc_next + 2 * imm; + uint64_t dest = s->base.pc_next + (int64_t)imm * 2; TCGLabel *lab; /* Take care of the special cases first. */ @@ -1270,7 +1249,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, tcg_gen_movi_i64(psw_addr, dest); tcg_gen_exit_tb(s->base.tb, 1); - ret = DISAS_GOTO_TB; + ret = DISAS_NORETURN; } else { /* Fallthru can use goto_tb, but taken branch cannot. */ /* Store taken branch destination before the brcond. This @@ -1510,6 +1489,36 @@ static DisasJumpType op_andi(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_andc(DisasContext *s, DisasOps *o) +{ + tcg_gen_andc_i64(o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_orc(DisasContext *s, DisasOps *o) +{ + tcg_gen_orc_i64(o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_nand(DisasContext *s, DisasOps *o) +{ + tcg_gen_nand_i64(o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_nor(DisasContext *s, DisasOps *o) +{ + tcg_gen_nor_i64(o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_nxor(DisasContext *s, DisasOps *o) +{ + tcg_gen_eqv_i64(o->out, o->in1, o->in2); + return DISAS_NEXT; +} + static DisasJumpType op_ni(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); @@ -1576,18 +1585,51 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o) } } +/* + * Disassemble the target of a branch. The results are returned in a form + * suitable for passing into help_branch(): + * + * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd + * branches, whose DisasContext *S contains the relative immediate field RI, + * are considered fixed. All the other branches are considered computed. + * - int IMM is the value of RI. + * - TCGv_i64 CDEST is the address of the computed target. + */ +#define disas_jdest(s, ri, is_imm, imm, cdest) do { \ + if (have_field(s, ri)) { \ + if (unlikely(s->ex_value)) { \ + cdest = tcg_temp_new_i64(); \ + tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\ + tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \ + is_imm = false; \ + } else { \ + is_imm = true; \ + } \ + } else { \ + is_imm = false; \ + } \ + imm = is_imm ? get_field(s, ri) : 0; \ +} while (false) + static DisasJumpType op_basi(DisasContext *s, DisasOps *o) { + DisasCompare c; + bool is_imm; + int imm; + pc_to_link_info(o->out, s, s->pc_tmp); - return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2)); + + disas_jdest(s, i2, is_imm, imm, o->in2); + disas_jcc(s, &c, 0xf); + return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bc(DisasContext *s, DisasOps *o) { int m1 = get_field(s, m1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; /* BCR with R2 = 0 causes no branching */ if (have_field(s, r2) && get_field(s, r2) == 0) { @@ -1604,6 +1646,7 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) return DISAS_NEXT; } + disas_jdest(s, i2, is_imm, imm, o->in2); disas_jcc(s, &c, m1); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1611,10 +1654,10 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = TCG_COND_NE; c.is_64 = false; @@ -1629,6 +1672,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) tcg_gen_extrl_i64_i32(c.u.s32.a, t); tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1659,9 +1703,9 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = TCG_COND_NE; c.is_64 = true; @@ -1672,6 +1716,7 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) c.u.s64.a = regs[r1]; c.u.s64.b = tcg_const_i64(0); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1679,10 +1724,10 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = false; @@ -1698,6 +1743,7 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) store_reg32_i64(r1, t); tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1705,9 +1751,9 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = true; @@ -1724,6 +1770,7 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) c.u.s64.a = regs[r1]; c.g1 = true; + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1741,10 +1788,9 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) c.u.s64.a = o->in1; c.u.s64.b = o->in2; - is_imm = have_field(s, i4); - if (is_imm) { - imm = get_field(s, i4); - } else { + o->out = NULL; + disas_jdest(s, i4, is_imm, imm, o->out); + if (!is_imm && !o->out) { imm = 0; o->out = get_address(s, 0, get_field(s, b4), get_field(s, d4)); @@ -2604,7 +2650,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); - ccm |= 0xff << pos; + ccm |= 0xffull << pos; } m3 = (m3 << 1) & 0xf; pos -= 8; @@ -2970,7 +3016,13 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) { DisasCompare c; - disas_jcc(s, &c, get_field(s, m3)); + if (have_field(s, m3)) { + /* LOAD * ON CONDITION */ + disas_jcc(s, &c, get_field(s, m3)); + } else { + /* SELECT */ + disas_jcc(s, &c, get_field(s, m4)); + } if (c.is_64) { tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, @@ -3005,7 +3057,8 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) @@ -3016,7 +3069,8 @@ static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_lra(DisasContext *s, DisasOps *o) @@ -3061,7 +3115,7 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), - MO_TEQ | MO_ALIGN_8); + MO_TEUQ | MO_ALIGN_8); tcg_gen_addi_i64(o->in2, o->in2, 8); tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); gen_helper_load_psw(cpu_env, t1, t2); @@ -3370,6 +3424,12 @@ static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o) +{ + gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2); + return DISAS_NEXT; +} + static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) { TCGv_i32 l = tcg_const_i32(get_field(s, l1)); @@ -3756,7 +3816,13 @@ static DisasJumpType op_pku(DisasContext *s, DisasOps *o) static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) { - gen_helper_popcnt(o->out, o->in2); + const uint8_t m3 = get_field(s, m3); + + if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) { + tcg_gen_ctpop_i64(o->out, o->in2); + } else { + gen_helper_popcnt(o->out, o->in2); + } return DISAS_NEXT; } @@ -3960,7 +4026,7 @@ static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) { gen_helper_sacf(cpu_env, o->in2); /* Addressing mode has changed, so end the block. */ - return DISAS_PC_STALE; + return DISAS_TOO_MANY; } #endif @@ -3996,7 +4062,7 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o) tcg_temp_free_i64(tsam); /* Always exit the TB, since we (may have) changed execution mode. */ - return DISAS_PC_STALE; + return DISAS_TOO_MANY; } static DisasJumpType op_sar(DisasContext *s, DisasOps *o) @@ -4111,9 +4177,18 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) static DisasJumpType op_sla(DisasContext *s, DisasOps *o) { + TCGv_i64 t; uint64_t sign = 1ull << s->insn->data; - enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; - gen_op_update2_cc_i64(s, cco, o->in1, o->in2); + if (s->insn->data == 31) { + t = tcg_temp_new_i64(); + tcg_gen_shli_i64(t, o->in1, 32); + } else { + t = o->in1; + } + gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2); + if (s->insn->data == 31) { + tcg_temp_free_i64(t); + } tcg_gen_shl_i64(o->out, o->in1, o->in2); /* The arithmetic left shift is curious in that it does not affect the sign bit. Copy that over from the source unchanged. */ @@ -4245,7 +4320,8 @@ static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_stap(DisasContext *s, DisasOps *o) @@ -4293,8 +4369,7 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_sck(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); - gen_helper_sck(cc_op, cpu_env, o->in1); + gen_helper_sck(cc_op, cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4511,7 +4586,8 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) } /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_stura(DisasContext *s, DisasOps *o) @@ -5418,9 +5494,11 @@ static void wout_r1_P32(DisasContext *s, DisasOps *o) static void wout_r1_D32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); + TCGv_i64 t = tcg_temp_new_i64(); store_reg32_i64(r1 + 1, o->out); - tcg_gen_shri_i64(o->out, o->out, 32); - store_reg32_i64(r1, o->out); + tcg_gen_shri_i64(t, o->out, 32); + store_reg32_i64(r1, t); + tcg_temp_free_i64(t); } #define SPEC_wout_r1_D32 SPEC_r1_even @@ -5519,7 +5597,7 @@ static void wout_m1_64(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static void wout_m1_64a(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN); } #define SPEC_wout_m1_64a 0 #endif @@ -5669,6 +5747,13 @@ static void in1_r3_D32(DisasContext *s, DisasOps *o) } #define SPEC_in1_r3_D32 SPEC_r3_even +static void in1_r3_sr32(DisasContext *s, DisasOps *o) +{ + o->in1 = tcg_temp_new_i64(); + tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32); +} +#define SPEC_in1_r3_sr32 0 + static void in1_e1(DisasContext *s, DisasOps *o) { o->in1 = load_freg32_i64(get_field(s, r1)); @@ -5914,23 +5999,39 @@ static void in2_a2(DisasContext *s, DisasOps *o) } #define SPEC_in2_a2 0 +static TCGv gen_ri2(DisasContext *s) +{ + TCGv ri2 = NULL; + bool is_imm; + int imm; + + disas_jdest(s, i2, is_imm, imm, ri2); + if (is_imm) { + ri2 = tcg_constant_i64(s->base.pc_next + imm * 2); + } + + return ri2; +} + static void in2_ri2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); + o->in2 = gen_ri2(s); } #define SPEC_in2_ri2 0 -static void in2_sh32(DisasContext *s, DisasOps *o) +static void in2_sh(DisasContext *s, DisasOps *o) { - help_l2_shift(s, o, 31); -} -#define SPEC_in2_sh32 0 + int b2 = get_field(s, b2); + int d2 = get_field(s, d2); -static void in2_sh64(DisasContext *s, DisasOps *o) -{ - help_l2_shift(s, o, 63); + if (b2 == 0) { + o->in2 = tcg_const_i64(d2 & 0x3f); + } else { + o->in2 = get_address(s, 0, b2, d2); + tcg_gen_andi_i64(o->in2, o->in2, 0x3f); + } } -#define SPEC_in2_sh64 0 +#define SPEC_in2_sh 0 static void in2_m2_8u(DisasContext *s, DisasOps *o) { @@ -5995,36 +6096,43 @@ static void in2_m2_64w(DisasContext *s, DisasOps *o) static void in2_m2_64a(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN); } #define SPEC_in2_m2_64a 0 #endif +static void in2_mri2_16s(DisasContext *s, DisasOps *o) +{ + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s)); +} +#define SPEC_in2_mri2_16s 0 + static void in2_mri2_16u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_16u 0 static void in2_mri2_32s(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32s 0 static void in2_mri2_32u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32u 0 static void in2_mri2_64(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_64 0 @@ -6093,7 +6201,7 @@ static void in2_insn(DisasContext *s, DisasOps *o) #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, enum DisasInsnEnum { -#include "insn-data.def" +#include "insn-data.h.inc" }; #undef E @@ -6130,17 +6238,17 @@ enum DisasInsnEnum { #define FAC_Z S390_FEAT_ZARCH #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE #define FAC_DFP S390_FEAT_DFP -#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ +#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */ #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ #define FAC_EE S390_FEAT_EXECUTE_EXT #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT -#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ -#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ +#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */ +#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */ #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB #define FAC_HW S390_FEAT_STFLE_45 /* high-word */ -#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ +#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */ #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ @@ -6167,11 +6275,13 @@ enum DisasInsnEnum { #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION #define FAC_V S390_FEAT_VECTOR /* vector facility */ -#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ +#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ +#define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */ #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */ +#define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */ static const DisasInsn insn_info[] = { -#include "insn-data.def" +#include "insn-data.h.inc" }; #undef E @@ -6181,7 +6291,7 @@ static const DisasInsn insn_info[] = { static const DisasInsn *lookup_opc(uint16_t opc) { switch (opc) { -#include "insn-data.def" +#include "insn-data.h.inc" default: return NULL; } @@ -6265,15 +6375,21 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) if (unlikely(s->ex_value)) { /* Drop the EX data now, so that it's clear on exception paths. */ TCGv_i64 zero = tcg_const_i64(0); + int i; tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value)); tcg_temp_free_i64(zero); /* Extract the values saved by EXECUTE. */ insn = s->ex_value & 0xffffffffffff0000ull; ilen = s->ex_value & 0xf; + /* register insn bytes with translator so plugins work */ + for (i = 0; i < ilen; i++) { + uint8_t byte = extract64(insn, 56 - (i * 8), 8); + translator_fake_ldb(byte, pc + i); + } op = insn >> 56; } else { - insn = ld_code2(env, pc); + insn = ld_code2(env, s, pc); op = (insn >> 8) & 0xff; ilen = get_ilen(op); switch (ilen) { @@ -6281,10 +6397,10 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) insn = insn << 48; break; case 4: - insn = ld_code4(env, pc) << 32; + insn = ld_code4(env, s, pc) << 32; break; case 6: - insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); + insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16); break; default: g_assert_not_reached(); @@ -6378,8 +6494,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* Search for the insn in the table. */ insn = extract_insn(env, s); - /* Emit insn_start now that we know the ILEN. */ - tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); + /* Update insn_start now that we know the ILEN. */ + tcg_set_insn_start_param(s->insn_start, 2, s->ilen); /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { @@ -6508,13 +6624,13 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* io should be the last instruction in tb when icount is enabled */ if (unlikely(icount && ret == DISAS_NEXT)) { - ret = DISAS_PC_STALE; + ret = DISAS_TOO_MANY; } #ifndef CONFIG_USER_ONLY if (s->base.tb->flags & FLAG_MASK_PER) { /* An exception might be triggered, save PSW if not already done. */ - if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { + if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) { tcg_gen_movi_i64(psw_addr, s->pc_tmp); } @@ -6541,7 +6657,7 @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->cc_op = CC_OP_DYNAMIC; dc->ex_value = dc->base.tb->cs_base; - dc->do_debug = dc->base.singlestep_enabled; + dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value; } static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -6550,6 +6666,19 @@ static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { + DisasContext *dc = container_of(dcbase, DisasContext, base); + + /* Delay the set of ilen until we've read the insn. */ + tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0); + dc->insn_start = tcg_last_op(); +} + +static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s, + uint64_t pc) +{ + uint64_t insn = cpu_lduw_code(env, pc); + + return pc + get_ilen((insn >> 8) & 0xff); } static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) @@ -6559,10 +6688,9 @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) dc->base.is_jmp = translate_one(env, dc); if (dc->base.is_jmp == DISAS_NEXT) { - uint64_t page_start; - - page_start = dc->base.pc_first & TARGET_PAGE_MASK; - if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { + if (dc->ex_value || + !is_same_page(dcbase, dc->base.pc_next) || + !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) { dc->base.is_jmp = DISAS_TOO_MANY; } } @@ -6573,12 +6701,9 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) DisasContext *dc = container_of(dcbase, DisasContext, base); switch (dc->base.is_jmp) { - case DISAS_GOTO_TB: case DISAS_NORETURN: break; case DISAS_TOO_MANY: - case DISAS_PC_STALE: - case DISAS_PC_STALE_NOCHAIN: update_psw_addr(dc); /* FALLTHRU */ case DISAS_PC_UPDATED: @@ -6588,10 +6713,7 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* FALLTHRU */ case DISAS_PC_CC_UPDATED: /* Exit the TB, either by raising a debug exception or by return. */ - if (dc->do_debug) { - gen_exception(EXCP_DEBUG); - } else if ((dc->base.tb->flags & FLAG_MASK_PER) || - dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { + if (dc->exit_to_mainloop) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); @@ -6602,16 +6724,17 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void s390x_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { DisasContext *dc = container_of(dcbase, DisasContext, base); if (unlikely(dc->ex_value)) { - /* ??? Unfortunately log_target_disas can't use host memory. */ - qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value); + /* ??? Unfortunately target_disas can't use host memory. */ + fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value); } else { - qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); - log_target_disas(cs, dc->base.pc_first, dc->base.tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); + target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size); } } @@ -6624,16 +6747,20 @@ static const TranslatorOps s390x_tr_ops = { .disas_log = s390x_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc; - translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base); } -void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, - target_ulong *data) +void s390x_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; int cc_op = data[1]; env->psw.addr = data[0]; diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc index 0afa46e463..d39ee81cd6 100644 --- a/target/s390x/tcg/translate_vx.c.inc +++ b/target/s390x/tcg/translate_vx.c.inc @@ -67,7 +67,7 @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); - switch (memop) { + switch ((unsigned)memop) { case ES_8: tcg_gen_ld8u_i64(dst, cpu_env, offs); break; @@ -175,7 +175,7 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, /* convert it to an element offset relative to cpu_env (vec_reg_offset() */ tcg_gen_shli_i64(tmp, tmp, es); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); #endif tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg)); @@ -424,9 +424,9 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o) TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); - tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); tcg_temp_free(t0); @@ -457,6 +457,129 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_vlebr(DisasContext *s, DisasOps *o) +{ + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); + write_vec_element_i64(tmp, get_field(s, v1), enr, es); + tcg_temp_free_i64(tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vlbrrep(DisasContext *s, DisasOps *o) +{ + const uint8_t es = get_field(s, m3); + TCGv_i64 tmp; + + if (es < ES_16 || es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); + gen_gvec_dup_i64(es, get_field(s, v1), tmp); + tcg_temp_free_i64(tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vllebrz(DisasContext *s, DisasOps *o) +{ + const uint8_t m3 = get_field(s, m3); + TCGv_i64 tmp; + int es, lshift; + + switch (m3) { + case ES_16: + case ES_32: + case ES_64: + es = m3; + lshift = 0; + break; + case 6: + es = ES_32; + lshift = 32; + break; + default: + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); + tcg_gen_shli_i64(tmp, tmp, lshift); + + write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64); + tcg_temp_free_i64(tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o) +{ + const uint8_t es = get_field(s, m3); + TCGv_i64 t0, t1; + + if (es < ES_16 || es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + + if (es == ES_128) { + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); + goto write; + } + + /* Begin with byte reversed doublewords... */ + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); + + /* + * For 16 and 32-bit elements, the doubleword bswap also reversed + * the order of the elements. Perform a larger order swap to put + * them back into place. For the 128-bit "element", finish the + * bswap by swapping the doublewords. + */ + switch (es) { + case ES_16: + tcg_gen_hswap_i64(t0, t0); + tcg_gen_hswap_i64(t1, t1); + break; + case ES_32: + tcg_gen_wswap_i64(t0, t0); + tcg_gen_wswap_i64(t1, t1); + break; + case ES_64: + break; + default: + g_assert_not_reached(); + } + +write: + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return DISAS_NEXT; +} + static DisasJumpType op_vle(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; @@ -492,6 +615,46 @@ static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_vler(DisasContext *s, DisasOps *o) +{ + const uint8_t es = get_field(s, m3); + + if (es < ES_16 || es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + /* Begin with the two doublewords swapped... */ + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ); + + /* ... then swap smaller elements within the doublewords as required. */ + switch (es) { + case MO_16: + tcg_gen_hswap_i64(t1, t1); + tcg_gen_hswap_i64(t0, t0); + break; + case MO_32: + tcg_gen_wswap_i64(t1, t1); + tcg_gen_wswap_i64(t0, t0); + break; + case MO_64: + break; + default: + g_assert_not_reached(); + } + + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); + tcg_temp_free(t0); + tcg_temp_free(t1); + return DISAS_NEXT; +} + static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) { const uint8_t es = get_field(s, m4); @@ -592,16 +755,16 @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8); - tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEUQ); for (;; v1++) { - tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); write_vec_element_i64(t1, v1, 0, ES_64); if (v1 == v3) { break; } gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); - tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); write_vec_element_i64(t1, v1, 1, ES_64); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); } @@ -797,7 +960,7 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) } break; case 0x94: - /* If sources and destination dont't overlap -> fast path */ + /* If sources and destination don't overlap -> fast path */ if (v1 != v2 && v1 != v3) { const uint8_t src_es = get_field(s, m4); const uint8_t dst_es = src_es - 1; @@ -950,14 +1113,89 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o) gen_helper_probe_write_access(cpu_env, o->addr1, tmp); read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); - tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); - tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); tcg_temp_free_i64(tmp); return DISAS_NEXT; } +static DisasJumpType op_vstebr(DisasContext *s, DisasOps *o) +{ + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(); + read_vec_element_i64(tmp, get_field(s, v1), enr, es); + tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); + tcg_temp_free_i64(tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vstbr(DisasContext *s, DisasOps *o) +{ + const uint8_t es = get_field(s, m3); + TCGv_i64 t0, t1; + + if (es < ES_16 || es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* Probe write access before actually modifying memory */ + gen_helper_probe_write_access(cpu_env, o->addr1, tcg_constant_i64(16)); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + + if (es == ES_128) { + read_vec_element_i64(t1, get_field(s, v1), 0, ES_64); + read_vec_element_i64(t0, get_field(s, v1), 1, ES_64); + goto write; + } + + read_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + read_vec_element_i64(t1, get_field(s, v1), 1, ES_64); + + /* + * For 16 and 32-bit elements, the doubleword bswap below will + * reverse the order of the elements. Perform a larger order + * swap to put them back into place. For the 128-bit "element", + * finish the bswap by swapping the doublewords. + */ + switch (es) { + case MO_16: + tcg_gen_hswap_i64(t0, t0); + tcg_gen_hswap_i64(t1, t1); + break; + case MO_32: + tcg_gen_wswap_i64(t0, t0); + tcg_gen_wswap_i64(t1, t1); + break; + case MO_64: + break; + default: + g_assert_not_reached(); + } + +write: + tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return DISAS_NEXT; +} + static DisasJumpType op_vste(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; @@ -976,6 +1214,50 @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_vster(DisasContext *s, DisasOps *o) +{ + const uint8_t es = get_field(s, m3); + TCGv_i64 t0, t1; + + if (es < ES_16 || es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* Probe write access before actually modifying memory */ + gen_helper_probe_write_access(cpu_env, o->addr1, tcg_constant_i64(16)); + + /* Begin with the two doublewords swapped... */ + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + read_vec_element_i64(t1, get_field(s, v1), 0, ES_64); + read_vec_element_i64(t0, get_field(s, v1), 1, ES_64); + + /* ... then swap smaller elements within the doublewords as required. */ + switch (es) { + case MO_16: + tcg_gen_hswap_i64(t1, t1); + tcg_gen_hswap_i64(t0, t0); + break; + case MO_32: + tcg_gen_wswap_i64(t1, t1); + tcg_gen_wswap_i64(t0, t0); + break; + case MO_64: + break; + default: + g_assert_not_reached(); + } + + tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return DISAS_NEXT; +} + static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) { const uint8_t v3 = get_field(s, v3); @@ -993,10 +1275,10 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) for (;; v1++) { read_vec_element_i64(tmp, v1, 0, ES_64); - tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); read_vec_element_i64(tmp, v1, 1, ES_64); - tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); if (v1 == v3) { break; } @@ -1793,7 +2075,7 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) l2 = tcg_temp_new_i64(); h2 = tcg_temp_new_i64(); - /* Multipy both even elements from v2 and v3 */ + /* Multiply both even elements from v2 and v3 */ read_vec_element_i64(l1, get_field(s, v2), 0, ES_64); read_vec_element_i64(h1, get_field(s, v3), 0, ES_64); tcg_gen_mulu2_i64(l1, h1, l1, h1); @@ -1802,7 +2084,7 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1); } - /* Multipy both odd elements from v2 and v3 */ + /* Multiply both odd elements from v2 and v3 */ read_vec_element_i64(l2, get_field(s, v2), 1, ES_64); read_vec_element_i64(h2, get_field(s, v3), 1, ES_64); tcg_gen_mulu2_i64(l2, h2, l2, h2); @@ -2018,31 +2300,61 @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o) return DISAS_NEXT; } -static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) +static DisasJumpType gen_vsh_by_byte(DisasContext *s, DisasOps *o, + gen_helper_gvec_2i *gen, + gen_helper_gvec_3 *gen_ve2) { - TCGv_i64 shift = tcg_temp_new_i64(); + bool byte = s->insn->data; - read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); - if (s->fields.op2 == 0x74) { - tcg_gen_andi_i64(shift, shift, 0x7); + if (!byte && s390_has_feat(S390_FEAT_VECTOR_ENH2)) { + gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), 0, gen_ve2); } else { - tcg_gen_andi_i64(shift, shift, 0x78); - } + TCGv_i64 shift = tcg_temp_new_i64(); - gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), - shift, 0, gen_helper_gvec_vsl); - tcg_temp_free_i64(shift); + read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); + tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7); + gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen); + tcg_temp_free_i64(shift); + } return DISAS_NEXT; } -static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) +static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) { - const uint8_t i4 = get_field(s, i4) & 0xf; - const int left_shift = (i4 & 7) * 8; - const int right_shift = 64 - left_shift; - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + return gen_vsh_by_byte(s, o, gen_helper_gvec_vsl, + gen_helper_gvec_vsl_ve2); +} + +static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) +{ + return gen_vsh_by_byte(s, o, gen_helper_gvec_vsra, + gen_helper_gvec_vsra_ve2); +} + +static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) +{ + return gen_vsh_by_byte(s, o, gen_helper_gvec_vsrl, + gen_helper_gvec_vsrl_ve2); +} + +static DisasJumpType op_vsld(DisasContext *s, DisasOps *o) +{ + const bool byte = s->insn->data; + const uint8_t mask = byte ? 15 : 7; + const uint8_t mul = byte ? 8 : 1; + const uint8_t i4 = get_field(s, i4); + const int right_shift = 64 - (i4 & 7) * mul; + TCGv_i64 t0, t1, t2; + + if (i4 & ~mask) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); if ((i4 & 8) == 0) { read_vec_element_i64(t0, get_field(s, v2), 0, ES_64); @@ -2053,8 +2365,10 @@ static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) read_vec_element_i64(t1, get_field(s, v3), 0, ES_64); read_vec_element_i64(t2, get_field(s, v3), 1, ES_64); } + tcg_gen_extract2_i64(t0, t1, t0, right_shift); tcg_gen_extract2_i64(t1, t2, t1, right_shift); + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); @@ -2064,37 +2378,33 @@ static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) return DISAS_NEXT; } -static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) +static DisasJumpType op_vsrd(DisasContext *s, DisasOps *o) { - TCGv_i64 shift = tcg_temp_new_i64(); + const uint8_t i4 = get_field(s, i4); + TCGv_i64 t0, t1, t2; - read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); - if (s->fields.op2 == 0x7e) { - tcg_gen_andi_i64(shift, shift, 0x7); - } else { - tcg_gen_andi_i64(shift, shift, 0x78); + if (i4 & ~7) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; } - gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), - shift, 0, gen_helper_gvec_vsra); - tcg_temp_free_i64(shift); - return DISAS_NEXT; -} + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); -static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) -{ - TCGv_i64 shift = tcg_temp_new_i64(); + read_vec_element_i64(t0, get_field(s, v2), 1, ES_64); + read_vec_element_i64(t1, get_field(s, v3), 0, ES_64); + read_vec_element_i64(t2, get_field(s, v3), 1, ES_64); - read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); - if (s->fields.op2 == 0x7c) { - tcg_gen_andi_i64(shift, shift, 0x7); - } else { - tcg_gen_andi_i64(shift, shift, 0x78); - } + tcg_gen_extract2_i64(t0, t1, t0, i4); + tcg_gen_extract2_i64(t1, t2, t1, i4); - gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), - shift, 0, gen_helper_gvec_vsrl); - tcg_temp_free_i64(shift); + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(t2); return DISAS_NEXT; } @@ -2413,7 +2723,7 @@ static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s, m4); + const uint8_t es = get_field(s, m3); const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_2 * const g[3] = { gen_helper_gvec_vistr8, @@ -2497,6 +2807,31 @@ static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_vstrs(DisasContext *s, DisasOps *o) +{ + typedef void (*helper_vstrs)(TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_ptr, TCGv_i32); + static const helper_vstrs fns[3][2] = { + { gen_helper_gvec_vstrs_8, gen_helper_gvec_vstrs_zs8 }, + { gen_helper_gvec_vstrs_16, gen_helper_gvec_vstrs_zs16 }, + { gen_helper_gvec_vstrs_32, gen_helper_gvec_vstrs_zs32 }, + }; + const uint8_t es = get_field(s, m5); + const uint8_t m6 = get_field(s, m6); + const bool zs = extract32(m6, 1, 1); + + if (es > ES_32 || m6 & ~2) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + cpu_env, 0, fns[es][zs]); + set_cc_static(s); + return DISAS_NEXT; +} + static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) { const uint8_t fpf = get_field(s, m4); @@ -2720,23 +3055,59 @@ static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) switch (s->fields.op2) { case 0xc3: - if (fpf == FPF_LONG) { + switch (fpf) { + case FPF_LONG: fn = gen_helper_gvec_vcdg64; + break; + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH2)) { + fn = gen_helper_gvec_vcdg32; + } + break; + default: + break; } break; case 0xc1: - if (fpf == FPF_LONG) { + switch (fpf) { + case FPF_LONG: fn = gen_helper_gvec_vcdlg64; + break; + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH2)) { + fn = gen_helper_gvec_vcdlg32; + } + break; + default: + break; } break; case 0xc2: - if (fpf == FPF_LONG) { + switch (fpf) { + case FPF_LONG: fn = gen_helper_gvec_vcgd64; + break; + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH2)) { + fn = gen_helper_gvec_vcgd32; + } + break; + default: + break; } break; case 0xc0: - if (fpf == FPF_LONG) { + switch (fpf) { + case FPF_LONG: fn = gen_helper_gvec_vclgd64; + break; + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH2)) { + fn = gen_helper_gvec_vclgd32; + } + break; + default: + break; } break; case 0xc7: diff --git a/target/s390x/tcg/vec.h b/target/s390x/tcg/vec.h index a6e361869b..8d095efcfc 100644 --- a/target/s390x/tcg/vec.h +++ b/target/s390x/tcg/vec.h @@ -38,7 +38,7 @@ typedef union S390Vector { * W: [ 1][ 0] - [ 3][ 2] * DW: [ 0] - [ 1] */ -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN #define H1(x) ((x) ^ 7) #define H2(x) ((x) ^ 3) #define H4(x) ((x) ^ 1) diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c index 1a77993471..75cf605b9f 100644 --- a/target/s390x/tcg/vec_fpu_helper.c +++ b/target/s390x/tcg/vec_fpu_helper.c @@ -10,7 +10,6 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "s390x-internal.h" #include "vec.h" @@ -176,6 +175,30 @@ static void vop128_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, *v1 = tmp; } +static float32 vcdg32(float32 a, float_status *s) +{ + return int32_to_float32(a, s); +} + +static float32 vcdlg32(float32 a, float_status *s) +{ + return uint32_to_float32(a, s); +} + +static float32 vcgd32(float32 a, float_status *s) +{ + const float32 tmp = float32_to_int32(a, s); + + return float32_is_any_nan(a) ? INT32_MIN : tmp; +} + +static float32 vclgd32(float32 a, float_status *s) +{ + const float32 tmp = float32_to_uint32(a, s); + + return float32_is_any_nan(a) ? 0 : tmp; +} + static float64 vcdg64(float64 a, float_status *s) { return int64_to_float64(a, s); @@ -211,6 +234,9 @@ void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, CPUS390XState *env, \ vop##BITS##_2(v1, v2, env, se, XxC, erm, FN, GETPC()); \ } +#define DEF_GVEC_VOP2_32(NAME) \ +DEF_GVEC_VOP2_FN(NAME, NAME##32, 32) + #define DEF_GVEC_VOP2_64(NAME) \ DEF_GVEC_VOP2_FN(NAME, NAME##64, 64) @@ -219,6 +245,10 @@ DEF_GVEC_VOP2_FN(NAME, float32_##OP, 32) \ DEF_GVEC_VOP2_FN(NAME, float64_##OP, 64) \ DEF_GVEC_VOP2_FN(NAME, float128_##OP, 128) +DEF_GVEC_VOP2_32(vcdg) +DEF_GVEC_VOP2_32(vcdlg) +DEF_GVEC_VOP2_32(vcgd) +DEF_GVEC_VOP2_32(vclgd) DEF_GVEC_VOP2_64(vcdg) DEF_GVEC_VOP2_64(vcdlg) DEF_GVEC_VOP2_64(vcgd) @@ -794,7 +824,7 @@ static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { switch (type) { case S390_MINMAX_TYPE_JAVA: return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; @@ -844,7 +874,7 @@ static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { const bool neg_a = dcmask_a & DCMASK_NEGATIVE; switch (type) { diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c index ededf13cf0..48d86722b2 100644 --- a/target/s390x/tcg/vec_helper.c +++ b/target/s390x/tcg/vec_helper.c @@ -200,7 +200,6 @@ void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, addr = wrap_address(env, addr + 8); cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); } else { - S390Vector tmp = {}; int i; for (i = 0; i < bytes; i++) { @@ -209,6 +208,5 @@ void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, cpu_stb_data_ra(env, addr, byte, GETPC()); addr = wrap_address(env, addr + 1); } - *(S390Vector *)v1 = tmp; } } diff --git a/target/s390x/tcg/vec_int_helper.c b/target/s390x/tcg/vec_int_helper.c index 5561b3ed90..53ab5c5eb3 100644 --- a/target/s390x/tcg/vec_int_helper.c +++ b/target/s390x/tcg/vec_int_helper.c @@ -10,7 +10,6 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "vec.h" #include "exec/helper-proto.h" @@ -540,18 +539,73 @@ void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count, s390_vec_shl(v1, v2, count); } +void HELPER(gvec_vsl_ve2)(void *v1, const void *v2, const void *v3, + uint32_t desc) +{ + S390Vector tmp; + uint32_t sh, e0, e1 = 0; + int i; + + for (i = 15; i >= 0; --i, e1 = e0) { + e0 = s390_vec_read_element8(v2, i); + sh = s390_vec_read_element8(v3, i) & 7; + + s390_vec_write_element8(&tmp, i, rol32(e0 | (e1 << 24), sh)); + } + + *(S390Vector *)v1 = tmp; +} + void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count, uint32_t desc) { s390_vec_sar(v1, v2, count); } +void HELPER(gvec_vsra_ve2)(void *v1, const void *v2, const void *v3, + uint32_t desc) +{ + S390Vector tmp; + uint32_t sh, e0, e1 = 0; + int i = 0; + + /* Byte 0 is special only. */ + e0 = (int32_t)(int8_t)s390_vec_read_element8(v2, i); + sh = s390_vec_read_element8(v3, i) & 7; + s390_vec_write_element8(&tmp, i, e0 >> sh); + + e1 = e0; + for (i = 1; i < 16; ++i, e1 = e0) { + e0 = s390_vec_read_element8(v2, i); + sh = s390_vec_read_element8(v3, i) & 7; + s390_vec_write_element8(&tmp, i, (e0 | e1 << 8) >> sh); + } + + *(S390Vector *)v1 = tmp; +} + void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count, uint32_t desc) { s390_vec_shr(v1, v2, count); } +void HELPER(gvec_vsrl_ve2)(void *v1, const void *v2, const void *v3, + uint32_t desc) +{ + S390Vector tmp; + uint32_t sh, e0, e1 = 0; + + for (int i = 0; i < 16; ++i, e1 = e0) { + e0 = s390_vec_read_element8(v2, i); + sh = s390_vec_read_element8(v3, i) & 7; + + s390_vec_write_element8(&tmp, i, (e0 | (e1 << 8)) >> sh); + } + + *(S390Vector *)v1 = tmp; +} + #define DEF_VSCBI(BITS) \ void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ diff --git a/target/s390x/tcg/vec_string_helper.c b/target/s390x/tcg/vec_string_helper.c index ac315eb095..9b85becdfb 100644 --- a/target/s390x/tcg/vec_string_helper.c +++ b/target/s390x/tcg/vec_string_helper.c @@ -10,7 +10,6 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "cpu.h" #include "s390x-internal.h" #include "vec.h" @@ -471,3 +470,102 @@ void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \ DEF_VSTRC_CC_RT_HELPER(8) DEF_VSTRC_CC_RT_HELPER(16) DEF_VSTRC_CC_RT_HELPER(32) + +static int vstrs(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + const S390Vector *v4, uint8_t es, bool zs) +{ + int substr_elen, substr_0, str_elen, i, j, k, cc; + int nelem = 16 >> es; + bool eos = false; + + substr_elen = s390_vec_read_element8(v4, 7) >> es; + + /* If ZS, bound substr length by min(nelem, strlen(v3)). */ + if (zs) { + substr_elen = MIN(substr_elen, nelem); + for (i = 0; i < substr_elen; i++) { + if (s390_vec_read_element(v3, i, es) == 0) { + substr_elen = i; + break; + } + } + } + + if (substr_elen == 0) { + cc = 2; /* full match for degenerate case of empty substr */ + k = 0; + goto done; + } + + /* If ZS, look for eos in the searched string. */ + if (zs) { + for (k = 0; k < nelem; k++) { + if (s390_vec_read_element(v2, k, es) == 0) { + eos = true; + break; + } + } + str_elen = k; + } else { + str_elen = nelem; + } + + substr_0 = s390_vec_read_element(v3, 0, es); + + for (k = 0; ; k++) { + for (; k < str_elen; k++) { + if (s390_vec_read_element(v2, k, es) == substr_0) { + break; + } + } + + /* If we reached the end of the string, no match. */ + if (k == str_elen) { + cc = eos; /* no match (with or without zero char) */ + goto done; + } + + /* If the substring is only one char, match. */ + if (substr_elen == 1) { + cc = 2; /* full match */ + goto done; + } + + /* If the match begins at the last char, we have a partial match. */ + if (k == str_elen - 1) { + cc = 3; /* partial match */ + goto done; + } + + i = MIN(nelem, k + substr_elen); + for (j = k + 1; j < i; j++) { + uint32_t e2 = s390_vec_read_element(v2, j, es); + uint32_t e3 = s390_vec_read_element(v3, j - k, es); + if (e2 != e3) { + break; + } + } + if (j == i) { + /* Matched up until "end". */ + cc = i - k == substr_elen ? 2 : 3; /* full or partial match */ + goto done; + } + } + + done: + s390_vec_write_element64(v1, 0, k << es); + s390_vec_write_element64(v1, 1, 0); + return cc; +} + +#define DEF_VSTRS_HELPER(BITS) \ +void QEMU_FLATTEN HELPER(gvec_vstrs_##BITS)(void *v1, const void *v2, \ + const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) \ + { env->cc_op = vstrs(v1, v2, v3, v4, MO_##BITS, false); } \ +void QEMU_FLATTEN HELPER(gvec_vstrs_zs##BITS)(void *v1, const void *v2, \ + const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) \ + { env->cc_op = vstrs(v1, v2, v3, v4, MO_##BITS, true); } + +DEF_VSTRS_HELPER(8) +DEF_VSTRS_HELPER(16) +DEF_VSTRS_HELPER(32) diff --git a/target/sh4/README.sh4 b/target/sh4/README.sh4 deleted file mode 100644 index a192ca7540..0000000000 --- a/target/sh4/README.sh4 +++ /dev/null @@ -1,150 +0,0 @@ -qemu target: sh4 -author: Samuel Tardieu -last modified: Tue Dec 6 07:22:44 CET 2005 - -The sh4 target is not ready at all yet for integration in qemu. This -file describes the current state of implementation. - -Most places requiring attention and/or modification can be detected by -looking for "XXXXX" or "abort()". - -The sh4 core is located in target/sh4/*, while the 7750 peripheral -features (IO ports for example) are located in hw/sh7750.[ch]. The -main board description is in hw/shix.c, and the NAND flash in -hw/tc58128.[ch]. - -All the shortcomings indicated here will eventually be resolved. This -is a work in progress. Features are added in a semi-random order: if a -point is blocking to progress on booting the Linux kernel for the shix -board, it is addressed first; if feedback is necessary and no progress -can be made on blocking points until it is received, a random feature -is worked on. - -Goals ------ - -The primary model being worked on is the soft MMU target to be able to -emulate the Shix 2.0 board by Alexis Polti, described at -https://web.archive.org/web/20070917001736/http://perso.enst.fr/~polti/realisations/shix20/ - -Ultimately, qemu will be coupled with a system C or a verilog -simulator to simulate the whole board functionalities. - -A sh4 user-mode has also somewhat started but will be worked on -afterwards. The goal is to automate tests for GNAT (GNU Ada) compiler -that I ported recently to the sh4-linux target. - -Registers ---------- - -16 general purpose registers are available at any time. The first 8 -registers are banked and the non-directly visible ones can be accessed -by privileged instructions. In qemu, we define 24 general purpose -registers and the code generation use either [0-7]+[8-15] or -[16-23]+[8-15] depending on the MD and RB flags in the sr -configuration register. - -Instructions ------------- - -Most sh4 instructions have been implemented. The missing ones at this -time are: - - FPU related instructions - - LDTLB to load a new MMU entry - - SLEEP to put the processor in sleep mode - -Most instructions could be optimized a lot. This will be worked on -after the current model is fully functional unless debugging -convenience requires that it is done early. - -Many instructions did not have a chance to be tested yet. The plan is -to implement unit and regression testing of those in the future. - -MMU ---- - -The MMU is implemented in the sh4 core. MMU management has not been -tested at all yet. In the sh7750, it can be manipulated through memory -mapped registers and this part has not yet been implemented. - -Exceptions ----------- - -Exceptions are implemented as described in the sh4 reference manual -but have not been tested yet. They do not use qemu EXCP_ features -yet. - -IRQ ---- - -IRQ are not implemented yet. - -Peripheral features -------------------- - - + Serial ports - -Configuration and use of the first serial port (SCI) without -interrupts is supported. Input has not yet been tested. - -Configuration of the second serial port (SCIF) is supported. FIFO -handling infrastructure has been started but is not completed yet. - - + GPIO ports - -GPIO ports have been implemented. A registration function allows -external modules to register interest in some port changes (see -hw/tc58128.[ch] for an example) and will be called back. Interrupt -generation is not yet supported but some infrastructure is in place -for this purpose. Note that in the current model a peripheral module -cannot directly simulate a H->L->H input port transition and have an -interrupt generated on the low level. - - + TC58128 NAND flash - -TC58128 NAND flash is partially implemented through GPIO ports. It -supports reading from flash. - -GDB ---- - -GDB remote target support has been implemented and lightly tested. - -Files ------ - -File names are hardcoded at this time. The bootloader must be stored in -shix_bios.bin in the current directory. The initial Linux image must -be stored in shix_linux_nand.bin in the current directory in NAND -format. Test files can be obtained from -http://perso.enst.fr/~polti/robot/ as well as the various datasheets I -use. - -qemu disk parameter on the command line is unused. You can supply any -existing image and it will be ignored. As the goal is to simulate an -embedded target, it is not clear how this parameter will be handled in -the future. - -To build an ELF kernel image from the NAND image, 16 bytes have to be -stripped off the end of every 528 bytes, keeping only 512 of them. The -following Python code snippet does it: - -#! /usr/bin/python - -def denand (infd, outfd): - while True: - d = infd.read (528) - if not d: return - outfd.write (d[:512]) - -if __name__ == '__main__': - import sys - denand (open (sys.argv[1], 'rb'), - open (sys.argv[2], 'wb')) - -Style isssues -------------- - -There is currently a mix between my style (space before opening -parenthesis) and qemu style. This will be resolved before final -integration is proposed. diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h index 81ace3503b..98a02509bb 100644 --- a/target/sh4/cpu-param.h +++ b/target/sh4/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef SH4_CPU_PARAM_H -#define SH4_CPU_PARAM_H 1 +#define SH4_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 /* 4k */ diff --git a/target/sh4/cpu-qom.h b/target/sh4/cpu-qom.h index 8903b4b9c7..d4192d1090 100644 --- a/target/sh4/cpu-qom.h +++ b/target/sh4/cpu-qom.h @@ -29,8 +29,7 @@ #define TYPE_SH7751R_CPU SUPERH_CPU_TYPE_NAME("sh7751r") #define TYPE_SH7785_CPU SUPERH_CPU_TYPE_NAME("sh7785") -OBJECT_DECLARE_TYPE(SuperHCPU, SuperHCPUClass, - SUPERH_CPU) +OBJECT_DECLARE_CPU_TYPE(SuperHCPU, SuperHCPUClass, SUPERH_CPU) /** * SuperHCPUClass: diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 8326922942..827cee25af 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -34,15 +34,37 @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr superh_cpu_get_pc(CPUState *cs) +{ + SuperHCPU *cpu = SUPERH_CPU(cs); + + return cpu->env.pc; +} + static void superh_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { SuperHCPU *cpu = SUPERH_CPU(cs); - cpu->env.pc = tb->pc; + cpu->env.pc = tb_pc(tb); cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; } +static void superh_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + SuperHCPU *cpu = SUPERH_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.flags = data[1]; + /* + * Theoretically delayed_pc should also be restored. In practice the + * branch instruction is re-executed after exception, so the delayed + * branch target will be recomputed. + */ +} + #ifndef CONFIG_USER_ONLY static bool superh_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) @@ -50,10 +72,10 @@ static bool superh_io_recompile_replay_branch(CPUState *cs, SuperHCPU *cpu = SUPERH_CPU(cs); CPUSH4State *env = &cpu->env; - if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 - && env->pc != tb->pc) { + if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND)) + && env->pc != tb_pc(tb)) { env->pc -= 2; - env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); + env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND); return true; } return false; @@ -236,10 +258,11 @@ static const struct SysemuCPUOps sh4_sysemu_ops = { static const struct TCGCPUOps superh_tcg_ops = { .initialize = sh4_translate_init, .synchronize_from_tb = superh_cpu_synchronize_from_tb, - .cpu_exec_interrupt = superh_cpu_exec_interrupt, - .tlb_fill = superh_cpu_tlb_fill, + .restore_state_to_opc = superh_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = superh_cpu_tlb_fill, + .cpu_exec_interrupt = superh_cpu_exec_interrupt, .do_interrupt = superh_cpu_do_interrupt, .do_unaligned_access = superh_cpu_do_unaligned_access, .io_recompile_replay_branch = superh_io_recompile_replay_branch, @@ -261,6 +284,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = superh_cpu_has_work; cc->dump_state = superh_cpu_dump_state; cc->set_pc = superh_cpu_set_pc; + cc->get_pc = superh_cpu_get_pc; cc->gdb_read_register = superh_cpu_gdb_read_register; cc->gdb_write_register = superh_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index 01c4344082..727b829598 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -22,6 +22,7 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" /* CPU Subtypes */ #define SH_CPU_SH7750 (1 << 0) @@ -77,25 +78,33 @@ #define FPSCR_RM_NEAREST (0 << 0) #define FPSCR_RM_ZERO (1 << 0) -#define DELAY_SLOT_MASK 0x7 -#define DELAY_SLOT (1 << 0) -#define DELAY_SLOT_CONDITIONAL (1 << 1) -#define DELAY_SLOT_RTE (1 << 2) +#define TB_FLAG_DELAY_SLOT (1 << 0) +#define TB_FLAG_DELAY_SLOT_COND (1 << 1) +#define TB_FLAG_DELAY_SLOT_RTE (1 << 2) +#define TB_FLAG_PENDING_MOVCA (1 << 3) +#define TB_FLAG_GUSA_SHIFT 4 /* [11:4] */ +#define TB_FLAG_GUSA_EXCLUSIVE (1 << 12) +#define TB_FLAG_UNALIGN (1 << 13) +#define TB_FLAG_SR_FD (1 << SR_FD) /* 15 */ +#define TB_FLAG_FPSCR_PR FPSCR_PR /* 19 */ +#define TB_FLAG_FPSCR_SZ FPSCR_SZ /* 20 */ +#define TB_FLAG_FPSCR_FR FPSCR_FR /* 21 */ +#define TB_FLAG_SR_RB (1 << SR_RB) /* 29 */ +#define TB_FLAG_SR_MD (1 << SR_MD) /* 30 */ -#define TB_FLAG_PENDING_MOVCA (1 << 3) - -#define GUSA_SHIFT 4 -#ifdef CONFIG_USER_ONLY -#define GUSA_EXCLUSIVE (1 << 12) -#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE) -#else -/* Provide dummy versions of the above to allow tests against tbflags - to be elided while avoiding ifdefs. */ -#define GUSA_EXCLUSIVE 0 -#define GUSA_MASK 0 -#endif - -#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK) +#define TB_FLAG_DELAY_SLOT_MASK (TB_FLAG_DELAY_SLOT | \ + TB_FLAG_DELAY_SLOT_COND | \ + TB_FLAG_DELAY_SLOT_RTE) +#define TB_FLAG_GUSA_MASK ((0xff << TB_FLAG_GUSA_SHIFT) | \ + TB_FLAG_GUSA_EXCLUSIVE) +#define TB_FLAG_FPSCR_MASK (TB_FLAG_FPSCR_PR | \ + TB_FLAG_FPSCR_SZ | \ + TB_FLAG_FPSCR_FR) +#define TB_FLAG_SR_MASK (TB_FLAG_SR_FD | \ + TB_FLAG_SR_RB | \ + TB_FLAG_SR_MD) +#define TB_FLAG_ENVFLAGS_MASK (TB_FLAG_DELAY_SLOT_MASK | \ + TB_FLAG_GUSA_MASK) typedef struct tlb_t { uint32_t vpn; /* virtual page number */ @@ -129,7 +138,7 @@ typedef struct memory_content { struct memory_content *next; } memory_content; -typedef struct CPUSH4State { +typedef struct CPUArchState { uint32_t flags; /* general execution flags */ uint32_t gregs[24]; /* general registers */ float32 fregs[32]; /* floating point registers */ @@ -194,7 +203,7 @@ typedef struct CPUSH4State { * * A SuperH CPU. */ -struct SuperHCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -204,25 +213,23 @@ struct SuperHCPU { }; -void superh_cpu_do_interrupt(CPUState *cpu); -bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req); void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); void sh4_translate_init(void); -int cpu_sh4_signal_handler(int host_signum, void *pinfo, - void *puc); +void sh4_cpu_list(void); + +#if !defined(CONFIG_USER_ONLY) bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); - -void sh4_cpu_list(void); -#if !defined(CONFIG_USER_ONLY) +void superh_cpu_do_interrupt(CPUState *cpu); +bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req); void cpu_sh4_invalidate_tlb(CPUSH4State *s); uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr); @@ -250,7 +257,6 @@ void cpu_load_tlb(CPUSH4State * env); #define SUPERH_CPU_TYPE_NAME(model) model SUPERH_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU -#define cpu_signal_handler cpu_sh4_signal_handler #define cpu_list sh4_cpu_list /* MMU modes definitions */ @@ -259,16 +265,13 @@ static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch) { /* The instruction in a RTE delay slot is fetched in privileged mode, but executed in user mode. */ - if (ifetch && (env->flags & DELAY_SLOT_RTE)) { + if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) { return 0; } else { return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0; } } -typedef CPUSH4State CPUArchState; -typedef SuperHCPU ArchCPU; - #include "exec/cpu-all.h" /* MMU control register */ @@ -370,12 +373,14 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc, { *pc = env->pc; /* For a gUSA region, notice the end of the region. */ - *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0; - *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */ - | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ - | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */ - | (env->sr & (1u << SR_FD)) /* Bit 15 */ + *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0; + *flags = env->flags + | (env->fpscr & TB_FLAG_FPSCR_MASK) + | (env->sr & TB_FLAG_SR_MASK) | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */ +#ifdef CONFIG_USER_ONLY + *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; +#endif } #endif /* SH4_CPU_H */ diff --git a/target/sh4/helper.c b/target/sh4/helper.c index 2d622081e8..e02e7af607 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -45,11 +45,6 @@ #if defined(CONFIG_USER_ONLY) -void superh_cpu_do_interrupt(CPUState *cs) -{ - cs->exception_index = -1; -} - int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr) { /* For user mode, only U0 area is cacheable. */ @@ -152,11 +147,11 @@ void superh_cpu_do_interrupt(CPUState *cs) env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB); env->lock_addr = -1; - if (env->flags & DELAY_SLOT_MASK) { + if (env->flags & TB_FLAG_DELAY_SLOT_MASK) { /* Branch instruction should be executed again before delay slot. */ env->spc -= 2; /* Clear flags for exception/interrupt routine. */ - env->flags &= ~DELAY_SLOT_MASK; + env->flags &= ~TB_FLAG_DELAY_SLOT_MASK; } if (do_exp) { @@ -784,8 +779,6 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) return 0; } -#endif - bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { @@ -793,7 +786,7 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) CPUSH4State *env = &cpu->env; /* Delay slots are indivisible, ignore interrupts */ - if (env->flags & DELAY_SLOT_MASK) { + if (env->flags & TB_FLAG_DELAY_SLOT_MASK) { return false; } else { superh_cpu_do_interrupt(cs); @@ -811,11 +804,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPUSH4State *env = &cpu->env; int ret; -#ifdef CONFIG_USER_ONLY - ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE : - access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION : - MMU_DTLB_VIOLATION_READ); -#else target_ulong physical; int prot; @@ -834,7 +822,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) { env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK); } -#endif env->tea = address; switch (ret) { @@ -873,3 +860,4 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } cpu_loop_exit_restore(cs, retaddr); } +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/sh4/helper.h b/target/sh4/helper.h index 1e768fcbc7..8d792f6b55 100644 --- a/target/sh4/helper.h +++ b/target/sh4/helper.h @@ -3,7 +3,6 @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_fpu_disable, noreturn, env) DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env) -DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_1(sleep, noreturn, env) DEF_HELPER_2(trapa, noreturn, env, i32) DEF_HELPER_1(exclusive, noreturn, env) diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index c0cbb95382..a663335c39 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -29,6 +29,9 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { + CPUSH4State *env = cs->env_ptr; + + env->tea = addr; switch (access_type) { case MMU_INST_FETCH: case MMU_DATA_LOAD: @@ -37,6 +40,8 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, case MMU_DATA_STORE: cs->exception_index = 0x100; break; + default: + g_assert_not_reached(); } cpu_loop_exit_restore(cs, retaddr); } @@ -52,8 +57,9 @@ void helper_ldtlb(CPUSH4State *env) #endif } -static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index, - uintptr_t retaddr) +static inline G_NORETURN +void raise_exception(CPUSH4State *env, int index, + uintptr_t retaddr) { CPUState *cs = env_cpu(env); @@ -81,11 +87,6 @@ void helper_raise_slot_fpu_disable(CPUSH4State *env) raise_exception(env, 0x820, 0); } -void helper_debug(CPUSH4State *env) -{ - raise_exception(env, EXCP_DEBUG, 0); -} - void helper_sleep(CPUSH4State *env) { CPUState *cs = env_cpu(env); diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 8704fea1ca..7db3468b01 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -50,8 +50,10 @@ typedef struct DisasContext { #if defined(CONFIG_USER_ONLY) #define IS_USER(ctx) 1 +#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN) #else #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD))) +#define UNALIGN(C) 0 #endif /* Target-specific values for ctx->base.is_jmp. */ @@ -173,13 +175,13 @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags) i, env->gregs[i], i + 1, env->gregs[i + 1], i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); } - if (env->flags & DELAY_SLOT) { + if (env->flags & TB_FLAG_DELAY_SLOT) { qemu_printf("in delay slot (delayed_pc=0x%08x)\n", env->delayed_pc); - } else if (env->flags & DELAY_SLOT_CONDITIONAL) { + } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) { qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n", env->delayed_pc); - } else if (env->flags & DELAY_SLOT_RTE) { + } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) { qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n", env->delayed_pc); } @@ -221,7 +223,7 @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) static inline bool use_exit_tb(DisasContext *ctx) { - return (ctx->tbflags & GUSA_EXCLUSIVE) != 0; + return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0; } static bool use_goto_tb(DisasContext *ctx, target_ulong dest) @@ -240,9 +242,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (use_exit_tb(ctx)) { + if (use_exit_tb(ctx)) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); @@ -258,9 +258,7 @@ static void gen_jump(DisasContext * ctx) delayed jump as immediate jump are conditinal jumps */ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); tcg_gen_discard_i32(cpu_delayed_pc); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (use_exit_tb(ctx)) { + if (use_exit_tb(ctx)) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); @@ -278,12 +276,12 @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest, TCGLabel *l1 = gen_new_label(); TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE; - if (ctx->tbflags & GUSA_EXCLUSIVE) { + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { /* When in an exclusive region, we must continue to the end. Therefore, exit the region on a taken branch, but otherwise fall through to the next instruction. */ tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); - tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK); /* Note that this won't actually use a goto_tb opcode because we disallow it in use_goto_tb, but it handles exit + singlestep. */ gen_goto_tb(ctx, 0, dest); @@ -309,14 +307,14 @@ static void gen_delayed_conditional_jump(DisasContext * ctx) tcg_gen_mov_i32(ds, cpu_delayed_cond); tcg_gen_discard_i32(cpu_delayed_cond); - if (ctx->tbflags & GUSA_EXCLUSIVE) { + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { /* When in an exclusive region, we must continue to the end. Therefore, exit the region on a taken branch, but otherwise fall through to the next instruction. */ tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1); /* Leave the gUSA region. */ - tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK); gen_jump(ctx); gen_set_label(l1); @@ -363,8 +361,8 @@ static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) #define CHECK_NOT_DELAY_SLOT \ - if (ctx->envflags & DELAY_SLOT_MASK) { \ - goto do_illegal_slot; \ + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \ + goto do_illegal_slot; \ } #define CHECK_PRIVILEGED \ @@ -438,7 +436,7 @@ static void _decode_opc(DisasContext * ctx) case 0x000b: /* rts */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0028: /* clrmac */ @@ -460,7 +458,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_NOT_DELAY_SLOT gen_write_sr(cpu_ssr); tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); - ctx->envflags |= DELAY_SLOT_RTE; + ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE; ctx->delayed_pc = (uint32_t) - 1; ctx->base.is_jmp = DISAS_STOP; return; @@ -499,7 +497,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); - tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, + MO_TEUL | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -507,18 +506,22 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESL | UNALIGN(ctx)); tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ #ifdef CONFIG_USER_ONLY - /* Detect the start of a gUSA region. If so, update envflags - and end the TB. This will allow us to see the end of the - region (stored in R0) in the next TB. */ + /* + * Detect the start of a gUSA region (mov #-n, r15). + * If so, update envflags and end the TB. This will allow us + * to see the end of the region (stored in R0) in the next TB. + */ if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->base.tb) & CF_PARALLEL)) { - ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s); + ctx->envflags = + deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s); ctx->base.is_jmp = DISAS_STOP; } #endif @@ -544,13 +547,13 @@ static void _decode_opc(DisasContext * ctx) case 0xa000: /* bra disp */ CHECK_NOT_DELAY_SLOT ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2; - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; return; case 0xb000: /* bsr disp */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2; - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; return; } @@ -562,19 +565,23 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB); return; case 0x2001: /* mov.w Rm,@Rn */ - tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW); + tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, + MO_TEUW | UNALIGN(ctx)); return; case 0x2002: /* mov.l Rm,@Rn */ - tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, + MO_TEUL | UNALIGN(ctx)); return; case 0x6000: /* mov.b @Rm,Rn */ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB); return; case 0x6001: /* mov.w @Rm,Rn */ - tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, + MO_TESW | UNALIGN(ctx)); return; case 0x6002: /* mov.l @Rm,Rn */ - tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, + MO_TESL | UNALIGN(ctx)); return; case 0x2004: /* mov.b Rm,@-Rn */ { @@ -590,7 +597,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 2); - tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); + tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, + MO_TEUW | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } @@ -599,7 +607,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, + MO_TEUL | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } @@ -610,12 +619,14 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); return; case 0x6005: /* mov.w @Rm+,Rn */ - tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, + MO_TESW | UNALIGN(ctx)); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); return; case 0x6006: /* mov.l @Rm+,Rn */ - tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, + MO_TESL | UNALIGN(ctx)); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); return; @@ -631,7 +642,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); - tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); + tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, + MO_TEUW | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -639,7 +651,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); - tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, + MO_TEUL | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -655,7 +668,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESW | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -663,7 +677,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESL | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -998,7 +1013,7 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ); + tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ); tcg_temp_free_i64(fp); } else { tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); @@ -1008,7 +1023,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); tcg_temp_free_i64(fp); } else { @@ -1019,7 +1034,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); tcg_temp_free_i64(fp); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); @@ -1036,7 +1051,7 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_subi_i32(addr, REG(B11_8), 8); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); tcg_temp_free_i64(fp); } else { tcg_gen_subi_i32(addr, REG(B11_8), 4); @@ -1053,7 +1068,7 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ); + tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); tcg_temp_free_i64(fp); } else { @@ -1070,7 +1085,7 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); tcg_temp_free_i64(fp); } else { tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); @@ -1182,7 +1197,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_NOT_DELAY_SLOT tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1); ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2; - ctx->envflags |= DELAY_SLOT_CONDITIONAL; + ctx->envflags |= TB_FLAG_DELAY_SLOT_COND; return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT @@ -1192,7 +1207,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t); ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2; - ctx->envflags |= DELAY_SLOT_CONDITIONAL; + ctx->envflags |= TB_FLAG_DELAY_SLOT_COND; return; case 0x8800: /* cmp/eq #imm,R0 */ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s); @@ -1257,7 +1272,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); - tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); + tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, + MO_TEUW | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -1273,7 +1289,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); - tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, + MO_TESW | UNALIGN(ctx)); tcg_temp_free(addr); } return; @@ -1374,14 +1391,14 @@ static void _decode_opc(DisasContext * ctx) case 0x0023: /* braf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4); - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0003: /* bsrf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x4015: /* cmp/pl Rn */ @@ -1397,14 +1414,14 @@ static void _decode_opc(DisasContext * ctx) case 0x402b: /* jmp @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400b: /* jsr @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); - ctx->envflags |= DELAY_SLOT; + ctx->envflags |= TB_FLAG_DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400e: /* ldc Rm,SR */ @@ -1825,7 +1842,7 @@ static void _decode_opc(DisasContext * ctx) fflush(stderr); #endif do_illegal: - if (ctx->envflags & DELAY_SLOT_MASK) { + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { do_illegal_slot: gen_save_cpu_state(ctx, true); gen_helper_raise_slot_illegal_instruction(cpu_env); @@ -1838,7 +1855,7 @@ static void _decode_opc(DisasContext * ctx) do_fpu_disabled: gen_save_cpu_state(ctx, true); - if (ctx->envflags & DELAY_SLOT_MASK) { + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { gen_helper_raise_slot_fpu_disable(cpu_env); } else { gen_helper_raise_fpu_disable(cpu_env); @@ -1853,23 +1870,23 @@ static void decode_opc(DisasContext * ctx) _decode_opc(ctx); - if (old_flags & DELAY_SLOT_MASK) { + if (old_flags & TB_FLAG_DELAY_SLOT_MASK) { /* go out of the delay slot */ - ctx->envflags &= ~DELAY_SLOT_MASK; + ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK; /* When in an exclusive region, we must continue to the end for conditional branches. */ - if (ctx->tbflags & GUSA_EXCLUSIVE - && old_flags & DELAY_SLOT_CONDITIONAL) { + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE + && old_flags & TB_FLAG_DELAY_SLOT_COND) { gen_delayed_conditional_jump(ctx); return; } /* Otherwise this is probably an invalid gUSA region. Drop the GUSA bits so the next TB doesn't see them. */ - ctx->envflags &= ~GUSA_MASK; + ctx->envflags &= ~TB_FLAG_GUSA_MASK; tcg_gen_movi_i32(cpu_flags, ctx->envflags); - if (old_flags & DELAY_SLOT_CONDITIONAL) { + if (old_flags & TB_FLAG_DELAY_SLOT_COND) { gen_delayed_conditional_jump(ctx); } else { gen_jump(ctx); @@ -1907,7 +1924,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) /* Read all of the insns for the region. */ for (i = 0; i < max_insns; ++i) { - insns[i] = translator_lduw(env, pc + i * 2); + insns[i] = translator_lduw(env, &ctx->base, pc + i * 2); } ld_adr = ld_dst = ld_mop = -1; @@ -2209,7 +2226,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } /* The entire region has been translated. */ - ctx->envflags &= ~GUSA_MASK; + ctx->envflags &= ~TB_FLAG_GUSA_MASK; ctx->base.pc_next = pc_end; ctx->base.num_insns += max_insns - 1; return; @@ -2220,7 +2237,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) /* Restart with the EXCLUSIVE bit set, within a TB run via cpu_exec_step_atomic holding the exclusive lock. */ - ctx->envflags |= GUSA_EXCLUSIVE; + ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE; gen_save_cpu_state(ctx, false); gen_helper_exclusive(cpu_env); ctx->base.is_jmp = DISAS_NORETURN; @@ -2253,17 +2270,19 @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) (tbflags & (1 << SR_RB))) * 0x10; ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0; - if (tbflags & GUSA_MASK) { +#ifdef CONFIG_USER_ONLY + if (tbflags & TB_FLAG_GUSA_MASK) { + /* In gUSA exclusive region. */ uint32_t pc = ctx->base.pc_next; uint32_t pc_end = ctx->base.tb->cs_base; - int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8); + int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8); int max_insns = (pc_end - pc) / 2; if (pc != pc_end + backup || max_insns < 2) { /* This is a malformed gUSA region. Don't do anything special, since the interpreter is likely to get confused. */ - ctx->envflags &= ~GUSA_MASK; - } else if (tbflags & GUSA_EXCLUSIVE) { + ctx->envflags &= ~TB_FLAG_GUSA_MASK; + } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) { /* Regardless of single-stepping or the end of the page, we must complete execution of the gUSA region while holding the exclusive lock. */ @@ -2271,6 +2290,7 @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) return; } } +#endif /* Since the ISA is fixed-width, we can bound by the number of instructions remaining on the page. */ @@ -2295,8 +2315,8 @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); #ifdef CONFIG_USER_ONLY - if (unlikely(ctx->envflags & GUSA_MASK) - && !(ctx->envflags & GUSA_EXCLUSIVE)) { + if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK) + && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) { /* We're in an gUSA region, and we have not already fallen back on using an exclusive region. Attempt to parse the region into a single supported atomic operation. Failure @@ -2307,7 +2327,7 @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) } #endif - ctx->opcode = translator_lduw(env, ctx->base.pc_next); + ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next); decode_opc(ctx); ctx->base.pc_next += 2; } @@ -2316,19 +2336,15 @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - if (ctx->tbflags & GUSA_EXCLUSIVE) { + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { /* Ending the region of exclusivity. Clear the bits. */ - ctx->envflags &= ~GUSA_MASK; + ctx->envflags &= ~TB_FLAG_GUSA_MASK; } switch (ctx->base.is_jmp) { case DISAS_STOP: gen_save_cpu_state(ctx, true); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; case DISAS_NEXT: case DISAS_TOO_MANY: @@ -2342,10 +2358,11 @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +static void sh4_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs, FILE *logfile) { - qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */ - log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps sh4_tr_ops = { @@ -2357,19 +2374,10 @@ static const TranslatorOps sh4_tr_ops = { .disas_log = sh4_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns); -} - -void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->flags = data[1]; - /* Theoretically delayed_pc should also be restored. In practice the - branch instruction is re-executed after exception, so the delayed - branch target will be recomputed. */ + translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base); } diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h index 4746d89411..72ddc4a34f 100644 --- a/target/sparc/cpu-param.h +++ b/target/sparc/cpu-param.h @@ -5,7 +5,7 @@ */ #ifndef SPARC_CPU_PARAM_H -#define SPARC_CPU_PARAM_H 1 +#define SPARC_CPU_PARAM_H #ifdef TARGET_SPARC64 # define TARGET_LONG_BITS 64 diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h index f33949aaee..86ed37d933 100644 --- a/target/sparc/cpu-qom.h +++ b/target/sparc/cpu-qom.h @@ -29,8 +29,7 @@ #define TYPE_SPARC_CPU "sparc-cpu" #endif -OBJECT_DECLARE_TYPE(SPARCCPU, SPARCCPUClass, - SPARC_CPU) +OBJECT_DECLARE_CPU_TYPE(SPARCCPU, SPARCCPUClass, SPARC_CPU) typedef struct sparc_def_t sparc_def_t; /** diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index da6b30ec74..4c3d08a875 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -77,6 +77,7 @@ static void sparc_cpu_reset(DeviceState *dev) env->cache_control = 0; } +#ifndef CONFIG_USER_ONLY static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { @@ -96,6 +97,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } return false; } +#endif /* !CONFIG_USER_ONLY */ static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info) { @@ -610,7 +612,7 @@ static void cpu_print_cc(FILE *f, uint32_t cc) #define REGS_PER_LINE 8 #endif -void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags) +static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; @@ -691,12 +693,19 @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.npc = value + 4; } +static vaddr sparc_cpu_get_pc(CPUState *cs) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + + return cpu->env.pc; +} + static void sparc_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { SPARCCPU *cpu = SPARC_CPU(cs); - cpu->env.pc = tb->pc; + cpu->env.pc = tb_pc(tb); cpu->env.npc = tb->cs_base; } @@ -863,10 +872,11 @@ static const struct SysemuCPUOps sparc_sysemu_ops = { static const struct TCGCPUOps sparc_tcg_ops = { .initialize = sparc_tcg_init, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, - .cpu_exec_interrupt = sparc_cpu_exec_interrupt, - .tlb_fill = sparc_cpu_tlb_fill, + .restore_state_to_opc = sparc_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = sparc_cpu_tlb_fill, + .cpu_exec_interrupt = sparc_cpu_exec_interrupt, .do_interrupt = sparc_cpu_do_interrupt, .do_transaction_failed = sparc_cpu_do_transaction_failed, .do_unaligned_access = sparc_cpu_do_unaligned_access, @@ -894,6 +904,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) cc->memory_rw_debug = sparc_cpu_memory_rw_debug; #endif cc->set_pc = sparc_cpu_set_pc; + cc->get_pc = sparc_cpu_get_pc; cc->gdb_read_register = sparc_cpu_gdb_read_register; cc->gdb_write_register = sparc_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index ff8ae73002..e478c5eb16 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -4,6 +4,7 @@ #include "qemu/bswap.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" #if !defined(TARGET_SPARC64) #define TARGET_DPREGS 16 @@ -420,7 +421,7 @@ struct CPUTimer typedef struct CPUTimer CPUTimer; -typedef struct CPUSPARCState CPUSPARCState; +typedef struct CPUArchState CPUSPARCState; #if defined(TARGET_SPARC64) typedef union { uint64_t mmuregs[16]; @@ -439,7 +440,7 @@ typedef union { }; } SparcV9MMU; #endif -struct CPUSPARCState { +struct CPUArchState { target_ulong gregs[8]; /* general registers */ target_ulong *regwptr; /* pointer to current register window */ target_ulong pc; /* program counter */ @@ -556,7 +557,7 @@ struct CPUSPARCState { * * A SPARC CPU. */ -struct SPARCCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -571,15 +572,14 @@ extern const VMStateDescription vmstate_sparc_cpu; #endif void sparc_cpu_do_interrupt(CPUState *cpu); -void sparc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int sparc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, - uintptr_t retaddr); -void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN; +G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, + uintptr_t retaddr); +G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t); #ifndef NO_CPU_IO_DEFS /* cpu_init.c */ @@ -600,6 +600,9 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, /* translate.c */ void sparc_tcg_init(void); +void sparc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); /* cpu-exec.c */ @@ -649,13 +652,11 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, int mmu_idx); #endif #endif -int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc); #define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU #define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU -#define cpu_signal_handler cpu_sparc_signal_handler #define cpu_list sparc_cpu_list /* MMU modes definitions */ @@ -746,9 +747,6 @@ static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil) #endif } -typedef CPUSPARCState CPUArchState; -typedef SPARCCPU ArchCPU; - #include "exec/cpu-all.h" #ifdef TARGET_SPARC64 diff --git a/target/sparc/helper.c b/target/sparc/helper.c index c7bcaa3a20..c4358bba84 100644 --- a/target/sparc/helper.c +++ b/target/sparc/helper.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" +#include "qemu/timer.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index 22327d7d72..ec4fae78c3 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "tcg/tcg.h" #include "exec/helper-proto.h" @@ -27,7 +28,6 @@ //#define DEBUG_MMU //#define DEBUG_MXCC -//#define DEBUG_UNALIGNED //#define DEBUG_UNASSIGNED //#define DEBUG_ASI //#define DEBUG_CACHE_CONTROL @@ -364,10 +364,6 @@ static void do_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align, uintptr_t ra) { if (addr & align) { -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif cpu_raise_exception_ra(env, TT_UNALIGNED, ra); } } @@ -1318,7 +1314,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case ASI_SNF: case ASI_SNFL: { - TCGMemOpIdx oi; + MemOpIdx oi; int idx = (env->pstate & PS_PRIV ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX) : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX)); @@ -1333,27 +1329,27 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, oi = make_memop_idx(memop, idx); switch (size) { case 1: - ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); + ret = cpu_ldb_mmu(env, addr, oi, GETPC()); break; case 2: if (asi & 8) { - ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_be_mmu(env, addr, oi, GETPC()); } break; case 4: if (asi & 8) { - ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_be_mmu(env, addr, oi, GETPC()); } break; case 8: if (asi & 8) { - ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_be_mmu(env, addr, oi, GETPC()); } break; default: @@ -1958,20 +1954,3 @@ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, is_asi, size, retaddr); } #endif - -#if !defined(CONFIG_USER_ONLY) -void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, - uintptr_t retaddr) -{ - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif - cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); -} -#endif diff --git a/target/sparc/machine.c b/target/sparc/machine.c index 917375c3a1..44b9e7d75d 100644 --- a/target/sparc/machine.c +++ b/target/sparc/machine.c @@ -10,7 +10,6 @@ static const VMStateDescription vmstate_cpu_timer = { .name = "cpu_timer", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(frequency, CPUTimer), VMSTATE_UINT32(disabled, CPUTimer), @@ -30,7 +29,6 @@ static const VMStateDescription vmstate_trap_state = { .name = "trap_state", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(tpc, trap_state), VMSTATE_UINT64(tnpc, trap_state), @@ -44,7 +42,6 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(tag, SparcTLBEntry), VMSTATE_UINT64(tte, SparcTLBEntry), @@ -113,7 +110,6 @@ const VMStateDescription vmstate_sparc_cpu = { .name = "cpu", .version_id = SPARC_VMSTATE_VER, .minimum_version_id = SPARC_VMSTATE_VER, - .minimum_version_id_old = SPARC_VMSTATE_VER, .pre_save = cpu_pre_save, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8), diff --git a/target/sparc/meson.build b/target/sparc/meson.build index a3638b9503..a801802ee2 100644 --- a/target/sparc/meson.build +++ b/target/sparc/meson.build @@ -6,7 +6,6 @@ sparc_ss.add(files( 'gdbstub.c', 'helper.c', 'ldst_helper.c', - 'mmu_helper.c', 'translate.c', 'win_helper.c', )) @@ -16,6 +15,7 @@ sparc_ss.add(when: 'TARGET_SPARC64', if_true: files('int64_helper.c', 'vis_helpe sparc_softmmu_ss = ss.source_set() sparc_softmmu_ss.add(files( 'machine.c', + 'mmu_helper.c', 'monitor.c', )) diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index a44473a1c7..919448a494 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/qemu-print.h" @@ -25,30 +26,6 @@ /* Sparc MMU emulation */ -#if defined(CONFIG_USER_ONLY) - -bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - - if (access_type == MMU_INST_FETCH) { - cs->exception_index = TT_TFAULT; - } else { - cs->exception_index = TT_DFAULT; -#ifdef TARGET_SPARC64 - env->dmmu.mmuregs[4] = address; -#else - env->mmuregs[4] = address; -#endif - } - cpu_loop_exit_restore(cs, retaddr); -} - -#else - #ifndef TARGET_SPARC64 /* * Sparc V8 Reference MMU (SRMMU) @@ -526,16 +503,60 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, return 0; } +static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) +{ + uint64_t sfsr = SFSR_VALID_BIT; + + switch (mmu_idx) { + case MMU_PHYS_IDX: + sfsr |= SFSR_CT_NOTRANS; + break; + case MMU_USER_IDX: + case MMU_KERNEL_IDX: + sfsr |= SFSR_CT_PRIMARY; + break; + case MMU_USER_SECONDARY_IDX: + case MMU_KERNEL_SECONDARY_IDX: + sfsr |= SFSR_CT_SECONDARY; + break; + case MMU_NUCLEUS_IDX: + sfsr |= SFSR_CT_NUCLEUS; + break; + default: + g_assert_not_reached(); + } + + if (rw == 1) { + sfsr |= SFSR_WRITE_BIT; + } else if (rw == 4) { + sfsr |= SFSR_NF_BIT; + } + + if (env->pstate & PS_PRIV) { + sfsr |= SFSR_PR_BIT; + } + + if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ + sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ + } + + /* FIXME: ASI field in SFSR must be set */ + + return sfsr; +} + static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, int *prot, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); unsigned int i; + uint64_t sfsr; uint64_t context; - uint64_t sfsr = 0; bool is_user = false; + sfsr = build_sfsr(env, mmu_idx, rw); + switch (mmu_idx) { case MMU_PHYS_IDX: g_assert_not_reached(); @@ -544,29 +565,18 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, /* fallthru */ case MMU_KERNEL_IDX: context = env->dmmu.mmu_primary_context & 0x1fff; - sfsr |= SFSR_CT_PRIMARY; break; case MMU_USER_SECONDARY_IDX: is_user = true; /* fallthru */ case MMU_KERNEL_SECONDARY_IDX: context = env->dmmu.mmu_secondary_context & 0x1fff; - sfsr |= SFSR_CT_SECONDARY; break; - case MMU_NUCLEUS_IDX: - sfsr |= SFSR_CT_NUCLEUS; - /* FALLTHRU */ default: context = 0; break; } - if (rw == 1) { - sfsr |= SFSR_WRITE_BIT; - } else if (rw == 4) { - sfsr |= SFSR_NF_BIT; - } - for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { @@ -616,22 +626,9 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, return 0; } - if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ - sfsr |= SFSR_OW_BIT; /* overflow (not read before - another fault) */ - } - - if (env->pstate & PS_PRIV) { - sfsr |= SFSR_PR_BIT; - } - - /* FIXME: ASI field in SFSR must be set */ - env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; - + env->dmmu.sfsr = sfsr; env->dmmu.sfar = address; /* Fault address register */ - env->dmmu.tag_access = (address & ~0x1fffULL) | context; - return 1; } } @@ -926,4 +923,23 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } return phys_addr; } + +#ifndef CONFIG_USER_ONLY +G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, + uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + +#ifdef TARGET_SPARC64 + env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); + env->dmmu.sfar = addr; +#else + env->mmuregs[4] = addr; #endif + + cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 11de5a4963..34858eb95f 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -2464,7 +2464,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) static void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); + DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); TCGv_i32 d32; TCGv_i64 d64; @@ -2578,7 +2578,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, static void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); + DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); TCGv_i32 d32; switch (da.type) { @@ -2660,7 +2660,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEQ); + DisasASI da = get_asi(dc, insn, MO_TEUQ); TCGv_i64 hi = gen_dest_gpr(dc, rd); TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); @@ -2727,7 +2727,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEQ); + DisasASI da = get_asi(dc, insn, MO_TEUQ); TCGv lo = gen_load_gpr(dc, rd + 1); switch (da.type) { @@ -2787,7 +2787,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEQ); + DisasASI da = get_asi(dc, insn, MO_TEUQ); TCGv oldv; switch (da.type) { @@ -2817,7 +2817,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) TCGv lo = gen_dest_gpr(dc, rd | 1); TCGv hi = gen_dest_gpr(dc, rd); TCGv_i64 t64 = tcg_temp_new_i64(); - DisasASI da = get_asi(dc, insn, MO_TEQ); + DisasASI da = get_asi(dc, insn, MO_TEUQ); switch (da.type) { case GET_ASI_EXCP: @@ -2830,7 +2830,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) default: { TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_Q); + TCGv_i32 r_mop = tcg_const_i32(MO_UQ); save_state(dc); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); @@ -2849,7 +2849,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEQ); + DisasASI da = get_asi(dc, insn, MO_TEUQ); TCGv lo = gen_load_gpr(dc, rd + 1); TCGv_i64 t64 = tcg_temp_new_i64(); @@ -2886,7 +2886,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, default: { TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_Q); + TCGv_i32 r_mop = tcg_const_i32(MO_UQ); save_state(dc); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); @@ -3401,7 +3401,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); + /* I/O operations in icount mode must end the TB */ + dc->base.is_jmp = DISAS_EXIT; } } break; @@ -3454,7 +3455,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); + /* I/O operations in icount mode must end the TB */ + dc->base.is_jmp = DISAS_EXIT; } } break; @@ -3588,7 +3590,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_temp_free_ptr(r_tickptr); tcg_temp_free_i32(r_const); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); + /* I/O operations in icount mode must end the TB */ + dc->base.is_jmp = DISAS_EXIT; } } break; @@ -4582,7 +4585,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_wrpstate(cpu_env, cpu_tmp0); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); + /* I/O ops in icount mode must end the TB */ + dc->base.is_jmp = DISAS_EXIT; } dc->npc = DYNAMIC_PC; break; @@ -4598,7 +4602,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_wrpil(cpu_env, cpu_tmp0); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); + /* I/O ops in icount mode must end the TB */ + dc->base.is_jmp = DISAS_EXIT; } break; case 9: // cwp @@ -4697,10 +4702,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_tick_set_limit(r_tickptr, cpu_hstick_cmpr); tcg_temp_free_ptr(r_tickptr); - if (tb_cflags(dc->base.tb) & - CF_USE_ICOUNT) { - gen_io_end(); - } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -5327,9 +5328,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_io_start(); } gen_helper_done(cpu_env); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } goto jmp_insn; case 1: if (!supervisor(dc)) @@ -5340,9 +5338,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_io_start(); } gen_helper_retry(cpu_env); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } goto jmp_insn; default: goto illegal_insn; @@ -5484,7 +5479,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); break; case 0x1b: /* V9 ldxa */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); break; case 0x2d: /* V9 prefetch, no effect */ goto skip_move; @@ -5538,7 +5533,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) if (rd == 1) { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEQ); + dc->mem_idx, MO_TEUQ); gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64); tcg_temp_free_i64(t64); break; @@ -5554,11 +5549,11 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); cpu_src1_64 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEQ | MO_ALIGN_4); + MO_TEUQ | MO_ALIGN_4); tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); cpu_src2_64 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, - MO_TEQ | MO_ALIGN_4); + MO_TEUQ | MO_ALIGN_4); gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); tcg_temp_free_i64(cpu_src1_64); tcg_temp_free_i64(cpu_src2_64); @@ -5567,7 +5562,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); cpu_dst_64 = gen_dest_fpr_D(dc, rd); tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx, - MO_TEQ | MO_ALIGN_4); + MO_TEUQ | MO_ALIGN_4); gen_store_fpr_D(dc, rd, cpu_dst_64); break; default: @@ -5628,7 +5623,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx); break; case 0x1e: /* V9 stxa */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); + gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); break; #endif default: @@ -5669,11 +5664,11 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) before performing the first write. */ cpu_src1_64 = gen_load_fpr_Q0(dc, rd); tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEQ | MO_ALIGN_16); + dc->mem_idx, MO_TEUQ | MO_ALIGN_16); tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); cpu_src2_64 = gen_load_fpr_Q1(dc, rd); tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEQ); + dc->mem_idx, MO_TEUQ); break; #else /* !TARGET_SPARC64 */ /* stdfq, store floating point queue */ @@ -5692,7 +5687,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); cpu_src1_64 = gen_load_fpr_D(dc, rd); tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEQ | MO_ALIGN_4); + MO_TEUQ | MO_ALIGN_4); break; default: goto illegal_insn; @@ -5860,7 +5855,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) CPUSPARCState *env = cs->env_ptr; unsigned int insn; - insn = translator_ldl(env, dc->pc); + insn = translator_ldl(env, &dc->base, dc->pc); dc->base.pc_next += 4; disas_sparc_insn(dc, insn); @@ -5906,10 +5901,11 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } -static void sparc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void sparc_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps sparc_tr_ops = { @@ -5921,11 +5917,12 @@ static const TranslatorOps sparc_tr_ops = { .disas_log = sparc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc = {}; - translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base); } void sparc_tcg_init(void) @@ -6014,9 +6011,12 @@ void sparc_tcg_init(void) } } -void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, - target_ulong *data) +void sparc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; target_ulong pc = data[0]; target_ulong npc = data[1]; diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c index f917e5992d..3afdc6975c 100644 --- a/target/sparc/vis_helper.c +++ b/target/sparc/vis_helper.c @@ -42,7 +42,7 @@ target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize) GET_FIELD_SP(pixel_addr, 11, 12); } -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN #define VIS_B64(n) b[7 - (n)] #define VIS_W64(n) w[3 - (n)] #define VIS_SW64(n) sw[3 - (n)] @@ -470,7 +470,7 @@ uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2) uint32_t i, mask, host; /* Set up S such that we can index across all of the bytes. */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN s.ll[0] = src1; s.ll[1] = src2; host = 0; diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h index cf5d9af89d..2727913047 100644 --- a/target/tricore/cpu-param.h +++ b/target/tricore/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef TRICORE_CPU_PARAM_H -#define TRICORE_CPU_PARAM_H 1 +#define TRICORE_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 14 diff --git a/target/tricore/cpu-qom.h b/target/tricore/cpu-qom.h index 59bfd01bbc..ee24e9fa76 100644 --- a/target/tricore/cpu-qom.h +++ b/target/tricore/cpu-qom.h @@ -24,8 +24,7 @@ #define TYPE_TRICORE_CPU "tricore-cpu" -OBJECT_DECLARE_TYPE(TriCoreCPU, TriCoreCPUClass, - TRICORE_CPU) +OBJECT_DECLARE_CPU_TYPE(TriCoreCPU, TriCoreCPUClass, TRICORE_CPU) struct TriCoreCPUClass { /*< private >*/ diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index b95682b7f0..2c54a2825f 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -41,13 +41,31 @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value) env->PC = value & ~(target_ulong)1; } +static vaddr tricore_cpu_get_pc(CPUState *cs) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + return env->PC; +} + static void tricore_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; - env->PC = tb->pc; + env->PC = tb_pc(tb); +} + +static void tricore_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = data[0]; } static void tricore_cpu_reset(DeviceState *dev) @@ -153,6 +171,7 @@ static const struct SysemuCPUOps tricore_sysemu_ops = { static const struct TCGCPUOps tricore_tcg_ops = { .initialize = tricore_tcg_init, .synchronize_from_tb = tricore_cpu_synchronize_from_tb, + .restore_state_to_opc = tricore_restore_state_to_opc, .tlb_fill = tricore_cpu_tlb_fill, }; @@ -176,6 +195,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data) cc->dump_state = tricore_cpu_dump_state; cc->set_pc = tricore_cpu_set_pc; + cc->get_pc = tricore_cpu_get_pc; cc->sysemu_ops = &tricore_sysemu_ops; cc->tcg_ops = &tricore_tcg_ops; } diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index 4b61a2c03f..3b9c533a7c 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -22,14 +22,14 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "qemu/cpu-float.h" #include "tricore-defs.h" struct tricore_boot_info; typedef struct tricore_def_t tricore_def_t; -typedef struct CPUTriCoreState CPUTriCoreState; -struct CPUTriCoreState { +typedef struct CPUArchState { /* GPR Register */ uint32_t gpr_a[16]; uint32_t gpr_d[16]; @@ -189,7 +189,7 @@ struct CPUTriCoreState { const tricore_def_t *cpu_model; void *irq[8]; struct QEMUTimer *timer; /* Internal timer */ -}; +} CPUTriCoreState; /** * TriCoreCPU: @@ -197,7 +197,7 @@ struct CPUTriCoreState { * * A TriCore CPU. */ -struct TriCoreCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ @@ -362,7 +362,6 @@ void fpu_set_state(CPUTriCoreState *env); void tricore_cpu_list(void); -#define cpu_signal_handler cpu_tricore_signal_handler #define cpu_list tricore_cpu_list static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) @@ -370,14 +369,10 @@ static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) return 0; } -typedef CPUTriCoreState CPUArchState; -typedef TriCoreCPU ArchCPU; - #include "exec/cpu-all.h" void cpu_state_reset(CPUTriCoreState *s); void tricore_tcg_init(void); -int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc); static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) diff --git a/target/tricore/csfr.def b/target/tricore/csfr.h.inc similarity index 100% rename from target/tricore/csfr.def rename to target/tricore/csfr.h.inc diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c index 3ce55abb8e..ebf32defde 100644 --- a/target/tricore/gdbstub.c +++ b/target/tricore/gdbstub.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "exec/gdbstub.h" diff --git a/target/tricore/helper.c b/target/tricore/helper.c index c5e997f321..1db32808e8 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -16,7 +16,7 @@ */ #include "qemu/osdep.h" - +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "fpu/softfloat-helpers.h" diff --git a/target/tricore/helper.h b/target/tricore/helper.h index 78176aa17a..b64780c37d 100644 --- a/target/tricore/helper.h +++ b/target/tricore/helper.h @@ -153,4 +153,3 @@ DEF_HELPER_2(psw_write, void, env, i32) DEF_HELPER_1(psw_read, i32, env) /* Exceptions */ DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32) -DEF_HELPER_2(qemu_excp, noreturn, env, i32) diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c index 32c2bc1699..532ae6b74c 100644 --- a/target/tricore/op_helper.c +++ b/target/tricore/op_helper.c @@ -25,13 +25,13 @@ /* Exception helpers */ -static void QEMU_NORETURN -raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, - uintptr_t pc, uint32_t fcd_pc) +static G_NORETURN +void raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, + uintptr_t pc, uint32_t fcd_pc) { CPUState *cs = env_cpu(env); /* in case we come from a helper-call we need to restore the PC */ - cpu_restore_state(cs, pc, true); + cpu_restore_state(cs, pc); /* Tin is loaded into d[15] */ env->gpr_d[15] = tin; @@ -107,13 +107,6 @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class, raise_exception_sync_internal(env, class, tin, pc, 0); } -void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp) -{ - CPUState *cs = env_cpu(env); - cs->exception_index = excp; - cpu_loop_exit(cs); -} - /* Addressing mode helper */ static uint16_t reverse16(uint16_t val) diff --git a/target/tricore/translate.c b/target/tricore/translate.c index a0cc0f1cb3..df9e46c649 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -246,7 +246,7 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) TCGv_i64 temp = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(temp, rl, rh); - tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ); tcg_temp_free_i64(temp); } @@ -264,7 +264,7 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) { TCGv_i64 temp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ); + tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEUQ); /* write back to two 32 bit regs */ tcg_gen_extr_i64_i32(rl, rh, temp); @@ -388,7 +388,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) gen_helper_psw_read(ret, cpu_env); } else { switch (offset) { -#include "csfr.def" +#include "csfr.h.inc" } } } @@ -418,7 +418,7 @@ static inline void gen_mtcr(DisasContext *ctx, TCGv r1, gen_helper_psw_write(cpu_env, r1); } else { switch (offset) { -#include "csfr.def" +#include "csfr.h.inc" } } } else { @@ -3225,14 +3225,6 @@ static inline void gen_save_pc(target_ulong pc) tcg_gen_movi_tl(cpu_PC, pc); } -static void generate_qemu_excp(DisasContext *ctx, int excp) -{ - TCGv_i32 tmp = tcg_const_i32(excp); - gen_helper_qemu_excp(cpu_env, tmp); - ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(tmp); -} - static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { if (translator_use_goto_tb(&ctx->base, dest)) { @@ -3241,11 +3233,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { gen_save_pc(dest); - if (ctx->base.singlestep_enabled) { - generate_qemu_excp(ctx, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } @@ -8873,10 +8861,11 @@ static void tricore_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } -static void tricore_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void tricore_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps tricore_tr_ops = { @@ -8889,18 +8878,14 @@ static const TranslatorOps tricore_tr_ops = { }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext ctx; - translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns); + translator_loop(cs, tb, max_insns, pc, host_pc, + &tricore_tr_ops, &ctx.base); } -void -restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->PC = data[0]; -} /* * * Initialization diff --git a/target/xtensa/core-de233_fpu.c b/target/xtensa/core-de233_fpu.c index c7cbeb1b48..41af8057fb 100644 --- a/target/xtensa/core-de233_fpu.c +++ b/target/xtensa/core-de233_fpu.c @@ -28,7 +28,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" -#include "qemu-common.h" #include "qemu/host-utils.h" #include "core-de233_fpu/core-isa.h" diff --git a/target/xtensa/core-de233_fpu/core-isa.h b/target/xtensa/core-de233_fpu/core-isa.h index f125619e8d..40543b2c5e 100644 --- a/target/xtensa/core-de233_fpu/core-isa.h +++ b/target/xtensa/core-de233_fpu/core-isa.h @@ -28,8 +28,8 @@ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef XTENSA_CORE_CONFIGURATION_H_ -#define XTENSA_CORE_CONFIGURATION_H_ +#ifndef XTENSA_CORE_DE233_FPU_CORE_ISA_H +#define XTENSA_CORE_DE233_FPU_CORE_ISA_H //depot/dev/Homewood/Xtensa/SWConfig/hal/core-common.h.tph#24 - edit change 444323 (text+ko) @@ -723,5 +723,4 @@ #endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ -#endif /* XTENSA_CORE_CONFIGURATION_H_ */ - +#endif /* XTENSA_CORE_DE233_FPU_CORE_ISA_H */ diff --git a/target/xtensa/core-de233_fpu/core-matmap.h b/target/xtensa/core-de233_fpu/core-matmap.h index cca51c7af1..e99e7d3123 100644 --- a/target/xtensa/core-de233_fpu/core-matmap.h +++ b/target/xtensa/core-de233_fpu/core-matmap.h @@ -43,11 +43,9 @@ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - #ifndef XTENSA_CONFIG_CORE_MATMAP_H #define XTENSA_CONFIG_CORE_MATMAP_H - /*---------------------------------------------------------------------- CACHE (MEMORY ACCESS) ATTRIBUTES ----------------------------------------------------------------------*/ @@ -713,5 +711,5 @@ -#endif /*XTENSA_CONFIG_CORE_MATMAP_H*/ +#endif /* XTENSA_CONFIG_CORE_MATMAP_H */ diff --git a/target/xtensa/core-dsp3400.c b/target/xtensa/core-dsp3400.c index 4e0bc8a8c4..81e425c568 100644 --- a/target/xtensa/core-dsp3400.c +++ b/target/xtensa/core-dsp3400.c @@ -28,7 +28,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" -#include "qemu-common.h" #include "qemu/host-utils.h" #include "core-dsp3400/core-isa.h" diff --git a/target/xtensa/core-dsp3400/core-isa.h b/target/xtensa/core-dsp3400/core-isa.h index 336b2467c6..1499ef2914 100644 --- a/target/xtensa/core-dsp3400/core-isa.h +++ b/target/xtensa/core-dsp3400/core-isa.h @@ -28,9 +28,8 @@ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef _XTENSA_CORE_CONFIGURATION_H -#define _XTENSA_CORE_CONFIGURATION_H - +#ifndef XTENSA_CORE_DSP3400_CORE_ISA_H +#define XTENSA_CORE_DSP3400_CORE_ISA_H /**************************************************************************** Parameters Useful for Any Code, USER or PRIVILEGED @@ -448,5 +447,4 @@ #endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ -#endif /* _XTENSA_CORE_CONFIGURATION_H */ - +#endif /* XTENSA_CORE_DSP3400_CORE_ISA_H */ diff --git a/target/xtensa/core-dsp3400/core-matmap.h b/target/xtensa/core-dsp3400/core-matmap.h index 8d1aa8336e..692012f9f4 100644 --- a/target/xtensa/core-dsp3400/core-matmap.h +++ b/target/xtensa/core-dsp3400/core-matmap.h @@ -43,11 +43,9 @@ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - #ifndef XTENSA_CONFIG_CORE_MATMAP_H #define XTENSA_CONFIG_CORE_MATMAP_H - /*---------------------------------------------------------------------- CACHE (MEMORY ACCESS) ATTRIBUTES ----------------------------------------------------------------------*/ @@ -308,5 +306,5 @@ -#endif /*XTENSA_CONFIG_CORE_MATMAP_H*/ +#endif /* XTENSA_CONFIG_CORE_MATMAP_H */ diff --git a/target/xtensa/core-lx106.c b/target/xtensa/core-lx106.c new file mode 100644 index 0000000000..7a771d09a6 --- /dev/null +++ b/target/xtensa/core-lx106.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022, Simon Safar, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/gdbstub.h" +#include "qemu/host-utils.h" + +#include "core-lx106/core-isa.h" +#include "overlay_tool.h" + +#define xtensa_modules xtensa_modules_lx106 +#include "core-lx106/xtensa-modules.c.inc" + +static XtensaConfig lx106 __attribute__((unused)) = { + .name = "lx106", + .gdb_regmap = { + .reg = { +#include "core-lx106/gdb-config.c.inc" + } + }, + .isa_internal = &xtensa_modules, + .clock_freq_khz = 40000, + DEFAULT_SECTIONS +}; + +REGISTER_CORE(lx106) diff --git a/target/xtensa/core-lx106/core-isa.h b/target/xtensa/core-lx106/core-isa.h new file mode 100644 index 0000000000..86bcf1a5d6 --- /dev/null +++ b/target/xtensa/core-lx106/core-isa.h @@ -0,0 +1,470 @@ +/* + * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa + * processor CORE configuration + * + * See , which includes this file, for more details. + */ + +/* Xtensa processor core configuration information. + + Copyright (c) 1999-2010 Tensilica Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef _XTENSA_CORE_CONFIGURATION_H +#define _XTENSA_CORE_CONFIGURATION_H + + +/**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED + ****************************************************************************/ + +/* + * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is + * configured, and a value of 0 otherwise. These macros are always defined. + */ + + +/*---------------------------------------------------------------------- + ISA + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */ +#define XCHAL_HAVE_WINDOWED 0 /* windowed registers option */ +#define XCHAL_NUM_AREGS 16 /* num of physical addr regs */ +#define XCHAL_NUM_AREGS_LOG2 4 /* log2(XCHAL_NUM_AREGS) */ +#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */ +#define XCHAL_HAVE_DEBUG 1 /* debug option */ +#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */ +#define XCHAL_HAVE_LOOPS 0 /* zero-overhead loops */ +#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */ +#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */ +#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */ +#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */ +#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */ +#define XCHAL_HAVE_MUL32 1 /* MULL instruction */ +#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */ +#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */ +#define XCHAL_HAVE_L32R 1 /* L32R instruction */ +#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */ +#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */ +#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */ +#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */ +#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */ +#define XCHAL_HAVE_CALL4AND12 0 /* (obsolete option) */ +#define XCHAL_HAVE_ABS 1 /* ABS instruction */ +/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */ +/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */ +#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */ +#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */ +#define XCHAL_HAVE_SPECULATION 0 /* speculation */ +#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */ +#define XCHAL_NUM_CONTEXTS 1 /* */ +#define XCHAL_NUM_MISC_REGS 0 /* num of scratch regs (0..4) */ +#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */ +#define XCHAL_HAVE_PRID 1 /* processor ID register */ +#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */ +#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */ +#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */ +#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */ +#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */ +#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */ +#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */ +#define XCHAL_HAVE_MAC16 0 /* MAC16 package */ +#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */ +#define XCHAL_HAVE_FP 0 /* floating point pkg */ +#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */ +#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */ +#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */ +#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */ +#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */ +#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */ +#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */ + + +/*---------------------------------------------------------------------- + MISC + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_WRITEBUFFER_ENTRIES 1 /* size of write buffer */ +#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */ +#define XCHAL_DATA_WIDTH 4 /* data width in bytes */ +/* In T1050, applies to selected core load and store instructions (see ISA): */ +#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ +#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/ +#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */ +#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/ + +#define XCHAL_SW_VERSION 800001 /* sw version of this header */ + +#define XCHAL_CORE_ID "lx106" /* alphanum core name + (CoreID) set in the Xtensa + Processor Generator */ + +#define XCHAL_BUILD_UNIQUE_ID 0x0002B6F6 /* 22-bit sw build ID */ + +/* + * These definitions describe the hardware targeted by this software. + */ +#define XCHAL_HW_CONFIGID0 0xC28CDAFA /* ConfigID hi 32 bits*/ +#define XCHAL_HW_CONFIGID1 0x1082B6F6 /* ConfigID lo 32 bits*/ +#define XCHAL_HW_VERSION_NAME "LX3.0.1" /* full version name */ +#define XCHAL_HW_VERSION_MAJOR 2300 /* major ver# of targeted hw */ +#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */ +#define XCHAL_HW_VERSION 230001 /* major*100+minor */ +#define XCHAL_HW_REL_LX3 1 +#define XCHAL_HW_REL_LX3_0 1 +#define XCHAL_HW_REL_LX3_0_1 1 +#define XCHAL_HW_CONFIGID_RELIABLE 1 +/* If software targets a *range* of hardware versions, these are the bounds: */ +#define XCHAL_HW_MIN_VERSION_MAJOR 2300 /* major v of earliest tgt hw */ +#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */ +#define XCHAL_HW_MIN_VERSION 230001 /* earliest targeted hw */ +#define XCHAL_HW_MAX_VERSION_MAJOR 2300 /* major v of latest tgt hw */ +#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */ +#define XCHAL_HW_MAX_VERSION 230001 /* latest targeted hw */ + + +/*---------------------------------------------------------------------- + CACHE + ----------------------------------------------------------------------*/ + +#define XCHAL_ICACHE_LINESIZE 4 /* I-cache line size in bytes */ +#define XCHAL_DCACHE_LINESIZE 4 /* D-cache line size in bytes */ +#define XCHAL_ICACHE_LINEWIDTH 2 /* log2(I line size in bytes) */ +#define XCHAL_DCACHE_LINEWIDTH 2 /* log2(D line size in bytes) */ + +#define XCHAL_ICACHE_SIZE 0 /* I-cache size in bytes or 0 */ +#define XCHAL_DCACHE_SIZE 0 /* D-cache size in bytes or 0 */ + +#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */ +#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */ + +#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */ + + + + +/**************************************************************************** + Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code + ****************************************************************************/ + + +#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY + +/*---------------------------------------------------------------------- + CACHE + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */ + +/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */ + +/* Number of cache sets in log2(lines per way): */ +#define XCHAL_ICACHE_SETWIDTH 0 +#define XCHAL_DCACHE_SETWIDTH 0 + +/* Cache set associativity (number of ways): */ +#define XCHAL_ICACHE_WAYS 1 +#define XCHAL_DCACHE_WAYS 1 + +/* Cache features: */ +#define XCHAL_ICACHE_LINE_LOCKABLE 0 +#define XCHAL_DCACHE_LINE_LOCKABLE 0 +#define XCHAL_ICACHE_ECC_PARITY 0 +#define XCHAL_DCACHE_ECC_PARITY 0 + +/* Cache access size in bytes (affects operation of SICW instruction): */ +#define XCHAL_ICACHE_ACCESS_SIZE 1 +#define XCHAL_DCACHE_ACCESS_SIZE 1 + +/* Number of encoded cache attr bits (see for decoded bits): */ +#define XCHAL_CA_BITS 4 + + +/*---------------------------------------------------------------------- + INTERNAL I/D RAM/ROMs and XLMI + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_INSTROM 1 /* number of core instr. ROMs */ +#define XCHAL_NUM_INSTRAM 2 /* number of core instr. RAMs */ +#define XCHAL_NUM_DATAROM 1 /* number of core data ROMs */ +#define XCHAL_NUM_DATARAM 2 /* number of core data RAMs */ +#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/ +#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */ + +/* Instruction ROM 0: */ +#define XCHAL_INSTROM0_VADDR 0x40200000 +#define XCHAL_INSTROM0_PADDR 0x40200000 +// #define XCHAL_INSTROM0_VADDR 0x400000 +// #define XCHAL_INSTROM0_PADDR 0x400000 +#define XCHAL_INSTROM0_SIZE 1048576 +#define XCHAL_INSTROM0_ECC_PARITY 0 + +/* Instruction RAM 0: */ +#define XCHAL_INSTRAM0_VADDR 0x40000000 +#define XCHAL_INSTRAM0_PADDR 0x40000000 +#define XCHAL_INSTRAM0_SIZE 1048576 +#define XCHAL_INSTRAM0_ECC_PARITY 0 + +/* Instruction RAM 1: */ +#define XCHAL_INSTRAM1_VADDR 0x40100000 +#define XCHAL_INSTRAM1_PADDR 0x40100000 +#define XCHAL_INSTRAM1_SIZE 1048576 +#define XCHAL_INSTRAM1_ECC_PARITY 0 + +/* Data ROM 0: */ +#define XCHAL_DATAROM0_VADDR 0x3FF40000 +#define XCHAL_DATAROM0_PADDR 0x3FF40000 +#define XCHAL_DATAROM0_SIZE 262144 +#define XCHAL_DATAROM0_ECC_PARITY 0 + +/* Data RAM 0: */ +#define XCHAL_DATARAM0_VADDR 0x3FFC0000 +#define XCHAL_DATARAM0_PADDR 0x3FFC0000 +#define XCHAL_DATARAM0_SIZE 262144 +#define XCHAL_DATARAM0_ECC_PARITY 0 + +/* Data RAM 1: */ +#define XCHAL_DATARAM1_VADDR 0x3FF80000 +#define XCHAL_DATARAM1_PADDR 0x3FF80000 +#define XCHAL_DATARAM1_SIZE 262144 +#define XCHAL_DATARAM1_ECC_PARITY 0 + +/* XLMI Port 0: */ +#define XCHAL_XLMI0_VADDR 0x3FF00000 +#define XCHAL_XLMI0_PADDR 0x3FF00000 +#define XCHAL_XLMI0_SIZE 262144 +#define XCHAL_XLMI0_ECC_PARITY 0 + + +/*---------------------------------------------------------------------- + INTERRUPTS and TIMERS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */ +#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */ +#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */ +#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */ +#define XCHAL_NUM_TIMERS 1 /* number of CCOMPAREn regs */ +#define XCHAL_NUM_INTERRUPTS 15 /* number of interrupts */ +#define XCHAL_NUM_INTERRUPTS_LOG2 4 /* ceil(log2(NUM_INTERRUPTS)) */ +#define XCHAL_NUM_EXTINTERRUPTS 13 /* num of external interrupts */ +#define XCHAL_NUM_INTLEVELS 2 /* number of interrupt levels + (not including level zero) */ +#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */ + /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */ + +/* Masks of interrupts at each interrupt level: */ +#define XCHAL_INTLEVEL1_MASK 0x00003FFF +#define XCHAL_INTLEVEL2_MASK 0x00000000 +#define XCHAL_INTLEVEL3_MASK 0x00004000 +#define XCHAL_INTLEVEL4_MASK 0x00000000 +#define XCHAL_INTLEVEL5_MASK 0x00000000 +#define XCHAL_INTLEVEL6_MASK 0x00000000 +#define XCHAL_INTLEVEL7_MASK 0x00000000 + +/* Masks of interrupts at each range 1..n of interrupt levels: */ +#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x00003FFF +#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x00003FFF +#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x00007FFF +#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x00007FFF +#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x00007FFF +#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x00007FFF +#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x00007FFF + +/* Level of each interrupt: */ +#define XCHAL_INT0_LEVEL 1 +#define XCHAL_INT1_LEVEL 1 +#define XCHAL_INT2_LEVEL 1 +#define XCHAL_INT3_LEVEL 1 +#define XCHAL_INT4_LEVEL 1 +#define XCHAL_INT5_LEVEL 1 +#define XCHAL_INT6_LEVEL 1 +#define XCHAL_INT7_LEVEL 1 +#define XCHAL_INT8_LEVEL 1 +#define XCHAL_INT9_LEVEL 1 +#define XCHAL_INT10_LEVEL 1 +#define XCHAL_INT11_LEVEL 1 +#define XCHAL_INT12_LEVEL 1 +#define XCHAL_INT13_LEVEL 1 +#define XCHAL_INT14_LEVEL 3 +#define XCHAL_DEBUGLEVEL 2 /* debug interrupt level */ +#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */ +#define XCHAL_NMILEVEL 3 /* NMI "level" (for use with + EXCSAVE/EPS/EPC_n, RFI n) */ + +/* Type of each interrupt: */ +#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT13_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI + +/* Masks of interrupts for each type of interrupt: */ +#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFF8000 +#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000080 +#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00003F00 +#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000003F +#define XCHAL_INTTYPE_MASK_TIMER 0x00000040 +#define XCHAL_INTTYPE_MASK_NMI 0x00004000 +#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000 + +/* Interrupt numbers assigned to specific interrupt sources: */ +#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */ +#define XCHAL_TIMER1_INTERRUPT XTHAL_TIMER_UNCONFIGURED +#define XCHAL_TIMER2_INTERRUPT XTHAL_TIMER_UNCONFIGURED +#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED +#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */ + +/* Interrupt numbers for levels at which only one interrupt is configured: */ +#define XCHAL_INTLEVEL3_NUM 14 +/* (There are many interrupts each at level(s) 1.) */ + + +/* + * External interrupt vectors/levels. + * These macros describe how Xtensa processor interrupt numbers + * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) + * map to external BInterrupt pins, for those interrupts + * configured as external (level-triggered, edge-triggered, or NMI). + * See the Xtensa processor databook for more details. + */ + +/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */ +#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ +#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */ +#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */ +#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ +#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ +#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ +#define XCHAL_EXTINT6_NUM 8 /* (intlevel 1) */ +#define XCHAL_EXTINT7_NUM 9 /* (intlevel 1) */ +#define XCHAL_EXTINT8_NUM 10 /* (intlevel 1) */ +#define XCHAL_EXTINT9_NUM 11 /* (intlevel 1) */ +#define XCHAL_EXTINT10_NUM 12 /* (intlevel 1) */ +#define XCHAL_EXTINT11_NUM 13 /* (intlevel 1) */ +#define XCHAL_EXTINT12_NUM 14 /* (intlevel 3) */ + + +/*---------------------------------------------------------------------- + EXCEPTIONS and VECTORS + ----------------------------------------------------------------------*/ + +#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture + number: 1 == XEA1 (old) + 2 == XEA2 (new) + 0 == XEAX (extern) */ +#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */ +#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */ +#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */ +#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */ +#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */ +#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */ +#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */ +#define XCHAL_VECBASE_RESET_VADDR 0x40000000 /* VECBASE reset value */ +#define XCHAL_VECBASE_RESET_PADDR 0x40000000 +#define XCHAL_RESET_VECBASE_OVERLAP 0 + +#define XCHAL_RESET_VECTOR0_VADDR 0x50000000 +#define XCHAL_RESET_VECTOR0_PADDR 0x50000000 +#define XCHAL_RESET_VECTOR1_VADDR 0x40000080 +#define XCHAL_RESET_VECTOR1_PADDR 0x40000080 +#define XCHAL_RESET_VECTOR_VADDR 0x50000000 +#define XCHAL_RESET_VECTOR_PADDR 0x50000000 + +// #define XCHAL_RESET_VECTOR0_VADDR 0x4000f8 +// #define XCHAL_RESET_VECTOR0_PADDR 0x4000f8 +// #define XCHAL_RESET_VECTOR1_VADDR 0x40000080 +// #define XCHAL_RESET_VECTOR1_PADDR 0x40000080 +// #define XCHAL_RESET_VECTOR_VADDR 0x4000f8 +// #define XCHAL_RESET_VECTOR_PADDR 0x4000f8 + + +#define XCHAL_USER_VECOFS 0x00000050 +#define XCHAL_USER_VECTOR_VADDR 0x40000050 +#define XCHAL_USER_VECTOR_PADDR 0x40000050 +#define XCHAL_KERNEL_VECOFS 0x00000030 +#define XCHAL_KERNEL_VECTOR_VADDR 0x40000030 +#define XCHAL_KERNEL_VECTOR_PADDR 0x40000030 +#define XCHAL_DOUBLEEXC_VECOFS 0x00000070 +#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x40000070 +#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x40000070 +#define XCHAL_INTLEVEL2_VECOFS 0x00000010 +#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000010 +#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000010 +#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL2_VECOFS +#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR +#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL2_VECTOR_PADDR +#define XCHAL_NMI_VECOFS 0x00000020 +#define XCHAL_NMI_VECTOR_VADDR 0x40000020 +#define XCHAL_NMI_VECTOR_PADDR 0x40000020 +#define XCHAL_INTLEVEL3_VECOFS XCHAL_NMI_VECOFS +#define XCHAL_INTLEVEL3_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR +#define XCHAL_INTLEVEL3_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR + + +/*---------------------------------------------------------------------- + DEBUG + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */ +#define XCHAL_NUM_IBREAK 1 /* number of IBREAKn regs */ +#define XCHAL_NUM_DBREAK 1 /* number of DBREAKn regs */ +#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option */ + + +/*---------------------------------------------------------------------- + MMU + ----------------------------------------------------------------------*/ + +/* See core-matmap.h header file for more details. */ + +#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */ +#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */ +#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */ +#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */ +#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */ +#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */ +#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */ +#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table + [autorefill] and protection) + usable for an MMU-based OS */ +/* If none of the above last 4 are set, it's a custom TLB configuration. */ + +#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */ +#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */ +#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */ + +#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ + + +#endif /* _XTENSA_CORE_CONFIGURATION_H */ + diff --git a/target/xtensa/core-lx106/gdb-config.c.inc b/target/xtensa/core-lx106/gdb-config.c.inc new file mode 100644 index 0000000000..365d6fe609 --- /dev/null +++ b/target/xtensa/core-lx106/gdb-config.c.inc @@ -0,0 +1,83 @@ +/* Configuration for the Xtensa architecture for GDB, the GNU debugger. + + Copyright (c) 2003-2010 Tensilica Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + XTREG( 0, 0,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0) + XTREG( 1, 4,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0) + XTREG( 2, 8,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0) + XTREG( 3, 12,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0) + XTREG( 4, 16,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0) + XTREG( 5, 20,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0) + XTREG( 6, 24,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0) + XTREG( 7, 28,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0) + XTREG( 8, 32,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0) + XTREG( 9, 36,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0) + XTREG( 10, 40,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0) + XTREG( 11, 44,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0) + XTREG( 12, 48,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0) + XTREG( 13, 52,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0) + XTREG( 14, 56,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0) + XTREG( 15, 60,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0) + XTREG( 16, 64,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0) + XTREG( 17, 68, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0) + XTREG( 18, 72,32, 4, 4,0x0205,0x0006,-2, 2,0x1100,litbase, 0,0,0,0,0,0) + XTREG( 19, 76,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,sr176, 0,0,0,0,0,0) + XTREG( 20, 80,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,sr208, 0,0,0,0,0,0) + XTREG( 21, 84, 6, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0) + XTREG( 22, 88,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0) + XTREG( 23, 92, 1, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0) + XTREG( 24, 96,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0) + XTREG( 25,100,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0) + XTREG( 26,104,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0) + XTREG( 27,108,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0) + XTREG( 28,112,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0) + XTREG( 29,116,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0) + XTREG( 30,120,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0) + XTREG( 31,124,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0) + XTREG( 32,128, 6, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0) + XTREG( 33,132, 6, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0) + XTREG( 34,136,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0) + XTREG( 35,140,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0) + XTREG( 36,144,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0) + XTREG( 37,148,15, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0) + XTREG( 38,152,15, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0) + XTREG( 39,156,15, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0) + XTREG( 40,160,15, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0) + XTREG( 41,164,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0) + XTREG( 42,168, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0) + XTREG( 43,172,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0) + XTREG( 44,176,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0) + XTREG( 45,180,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0) + XTREG( 46,184,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0) + XTREG( 47,188, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0) + XTREG( 48,192,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0) + XTREG( 49,196,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0) + XTREG( 50,200, 4, 4, 4,0x2002,0x0006,-2, 6,0x1010,psintlevel, + 0,0,&xtensa_mask0,0,0,0) + XTREG( 51,204, 1, 4, 4,0x2003,0x0006,-2, 6,0x1010,psum, + 0,0,&xtensa_mask1,0,0,0) + XTREG( 52,208, 1, 4, 4,0x2004,0x0006,-2, 6,0x1010,psexcm, + 0,0,&xtensa_mask2,0,0,0) + XTREG( 53,212,20, 4, 4,0x2005,0x0006,-2, 6,0x1010,litbaddr, + 0,0,&xtensa_mask3,0,0,0) + XTREG( 54,216, 1, 4, 4,0x2006,0x0006,-2, 6,0x1010,litben, + 0,0,&xtensa_mask4,0,0,0) + XTREG_END diff --git a/target/xtensa/core-lx106/xtensa-modules.c.inc b/target/xtensa/core-lx106/xtensa-modules.c.inc new file mode 100644 index 0000000000..f2b5efc6ec --- /dev/null +++ b/target/xtensa/core-lx106/xtensa-modules.c.inc @@ -0,0 +1,7668 @@ +/* Xtensa configuration-specific ISA information. + + Copyright (c) 2003-2010 Tensilica Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#include "xtensa-isa.h" +#include "xtensa-isa-internal.h" + + +/* Sysregs. */ + +static xtensa_sysreg_internal sysregs[] = { + { "MMID", 89, 0 }, + { "DDR", 104, 0 }, + { "176", 176, 0 }, + { "208", 208, 0 }, + { "INTERRUPT", 226, 0 }, + { "INTCLEAR", 227, 0 }, + { "CCOUNT", 234, 0 }, + { "PRID", 235, 0 }, + { "ICOUNT", 236, 0 }, + { "CCOMPARE0", 240, 0 }, + { "VECBASE", 231, 0 }, + { "EPC1", 177, 0 }, + { "EPC2", 178, 0 }, + { "EPC3", 179, 0 }, + { "EXCSAVE1", 209, 0 }, + { "EXCSAVE2", 210, 0 }, + { "EXCSAVE3", 211, 0 }, + { "EPS2", 194, 0 }, + { "EPS3", 195, 0 }, + { "EXCCAUSE", 232, 0 }, + { "DEPC", 192, 0 }, + { "EXCVADDR", 238, 0 }, + { "SAR", 3, 0 }, + { "LITBASE", 5, 0 }, + { "PS", 230, 0 }, + { "INTENABLE", 228, 0 }, + { "DBREAKA0", 144, 0 }, + { "DBREAKC0", 160, 0 }, + { "IBREAKA0", 128, 0 }, + { "IBREAKENABLE", 96, 0 }, + { "ICOUNTLEVEL", 237, 0 }, + { "DEBUGCAUSE", 233, 0 } +}; + +#define NUM_SYSREGS 32 +#define MAX_SPECIAL_REG 240 +#define MAX_USER_REG 0 + + +/* Processor states. */ + +static xtensa_state_internal states[] = { + { "PC", 32, 0 }, + { "ICOUNT", 32, 0 }, + { "DDR", 32, 0 }, + { "INTERRUPT", 15, 0 }, + { "CCOUNT", 32, 0 }, + { "XTSYNC", 1, 0 }, + { "VECBASE", 25, 0 }, + { "EPC1", 32, 0 }, + { "EPC2", 32, 0 }, + { "EPC3", 32, 0 }, + { "EXCSAVE1", 32, 0 }, + { "EXCSAVE2", 32, 0 }, + { "EXCSAVE3", 32, 0 }, + { "EPS2", 6, 0 }, + { "EPS3", 6, 0 }, + { "EXCCAUSE", 6, 0 }, + { "PSINTLEVEL", 4, 0 }, + { "PSUM", 1, 0 }, + { "PSEXCM", 1, 0 }, + { "DEPC", 32, 0 }, + { "EXCVADDR", 32, 0 }, + { "SAR", 6, 0 }, + { "LITBADDR", 20, 0 }, + { "LITBEN", 1, 0 }, + { "InOCDMode", 1, 0 }, + { "INTENABLE", 15, 0 }, + { "DBREAKA0", 32, 0 }, + { "DBREAKC0", 8, 0 }, + { "IBREAKA0", 32, 0 }, + { "IBREAKENABLE", 1, 0 }, + { "ICOUNTLEVEL", 4, 0 }, + { "DEBUGCAUSE", 6, 0 }, + { "DBNUM", 4, 0 }, + { "CCOMPARE0", 32, 0 } +}; + +#define NUM_STATES 34 + +enum xtensa_state_id { + STATE_PC, + STATE_ICOUNT, + STATE_DDR, + STATE_INTERRUPT, + STATE_CCOUNT, + STATE_XTSYNC, + STATE_VECBASE, + STATE_EPC1, + STATE_EPC2, + STATE_EPC3, + STATE_EXCSAVE1, + STATE_EXCSAVE2, + STATE_EXCSAVE3, + STATE_EPS2, + STATE_EPS3, + STATE_EXCCAUSE, + STATE_PSINTLEVEL, + STATE_PSUM, + STATE_PSEXCM, + STATE_DEPC, + STATE_EXCVADDR, + STATE_SAR, + STATE_LITBADDR, + STATE_LITBEN, + STATE_InOCDMode, + STATE_INTENABLE, + STATE_DBREAKA0, + STATE_DBREAKC0, + STATE_IBREAKA0, + STATE_IBREAKENABLE, + STATE_ICOUNTLEVEL, + STATE_DEBUGCAUSE, + STATE_DBNUM, + STATE_CCOMPARE0 +}; + + +/* Field definitions. */ + +static unsigned +Field_t_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); +} + +static unsigned +Field_s_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_r_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_op2_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28); + return tie_t; +} + +static void +Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20); +} + +static unsigned +Field_op1_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28); + return tie_t; +} + +static void +Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16); +} + +static unsigned +Field_op0_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28); + return tie_t; +} + +static void +Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf) | (tie_t << 0); +} + +static unsigned +Field_m_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30); + return tie_t; +} + +static void +Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 30) >> 30; + insn[0] = (insn[0] & ~0xc0) | (tie_t << 6); +} + +static unsigned +Field_n_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30); + return tie_t; +} + +static void +Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 30) >> 30; + insn[0] = (insn[0] & ~0x30) | (tie_t << 4); +} + +static unsigned +Field_thi3_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29); + return tie_t; +} + +static void +Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 29) >> 29; + insn[0] = (insn[0] & ~0xe0) | (tie_t << 5); +} + +static unsigned +Field_sr_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28); + return tie_t; +} + +static void +Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf) | (tie_t << 0); +} + +static unsigned +Field_z_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31); + return tie_t; +} + +static void +Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x40) | (tie_t << 6); +} + +static unsigned +Field_i_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31); + return tie_t; +} + +static void +Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x80) | (tie_t << 7); +} + +static unsigned +Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28); + return tie_t; +} + +static void +Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf) | (tie_t << 0); +} + +static unsigned +Field_t_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); +} + +static unsigned +Field_r_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_s_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_t_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); +} + +static unsigned +Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31); + return tie_t; +} + +static void +Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x1000) | (tie_t << 12); +} + +static unsigned +Field_bbi_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31); + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); + tie_t = (val << 27) >> 31; + insn[0] = (insn[0] & ~0x1000) | (tie_t << 12); +} + +static unsigned +Field_imm12_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20); + return tie_t; +} + +static void +Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 20) >> 20; + insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12); +} + +static unsigned +Field_imm8_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24); + return tie_t; +} + +static void +Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 24) >> 24; + insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16); +} + +static unsigned +Field_s_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24); + return tie_t; +} + +static void +Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 24) >> 24; + insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16); + tie_t = (val << 20) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_imm16_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16); + return tie_t; +} + +static void +Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 16) >> 16; + insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8); +} + +static unsigned +Field_offset_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14); + return tie_t; +} + +static void +Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 14) >> 14; + insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6); +} + +static unsigned +Field_r_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_sa4_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31); + return tie_t; +} + +static void +Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x100000) | (tie_t << 20); +} + +static unsigned +Field_sae4_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31); + return tie_t; +} + +static void +Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x10000) | (tie_t << 16); +} + +static unsigned +Field_sae_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 27) >> 31; + insn[0] = (insn[0] & ~0x10000) | (tie_t << 16); +} + +static unsigned +Field_sal_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31); + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); + tie_t = (val << 27) >> 31; + insn[0] = (insn[0] & ~0x100000) | (tie_t << 20); +} + +static unsigned +Field_sargt_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 27) >> 31; + insn[0] = (insn[0] & ~0x100000) | (tie_t << 20); +} + +static unsigned +Field_sas4_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31); + return tie_t; +} + +static void +Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x10) | (tie_t << 4); +} + +static unsigned +Field_sas_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 27) >> 31; + insn[0] = (insn[0] & ~0x10) | (tie_t << 4); +} + +static unsigned +Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + return tie_t; +} + +static void +Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_st_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_st_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_st_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28); + tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28); + return tie_t; +} + +static void +Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf0) | (tie_t << 4); + tie_t = (val << 24) >> 28; + insn[0] = (insn[0] & ~0xf00) | (tie_t << 8); +} + +static unsigned +Field_imm4_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_i_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31); + return tie_t; +} + +static void +Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x80) | (tie_t << 7); +} + +static unsigned +Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30); + return tie_t; +} + +static void +Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 30) >> 30; + insn[0] = (insn[0] & ~0x30) | (tie_t << 4); +} + +static unsigned +Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30); + return tie_t; +} + +static void +Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 30) >> 30; + insn[0] = (insn[0] & ~0x30) | (tie_t << 4); +} + +static unsigned +Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); +} + +static unsigned +Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29); + return tie_t; +} + +static void +Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 29) >> 29; + insn[0] = (insn[0] & ~0x70) | (tie_t << 4); +} + +static unsigned +Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29); + return tie_t; +} + +static void +Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 29) >> 29; + insn[0] = (insn[0] & ~0x70) | (tie_t << 4); +} + +static unsigned +Field_z_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31); + return tie_t; +} + +static void +Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 31) >> 31; + insn[0] = (insn[0] & ~0x40) | (tie_t << 6); +} + +static unsigned +Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30); + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); + tie_t = (val << 26) >> 30; + insn[0] = (insn[0] & ~0x30) | (tie_t << 4); +} + +static unsigned +Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30); + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); + tie_t = (val << 26) >> 30; + insn[0] = (insn[0] & ~0x30) | (tie_t << 4); +} + +static unsigned +Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29); + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); + tie_t = (val << 25) >> 29; + insn[0] = (insn[0] & ~0x70) | (tie_t << 4); +} + +static unsigned +Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29); + tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28); + return tie_t; +} + +static void +Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 28) >> 28; + insn[0] = (insn[0] & ~0xf000) | (tie_t << 12); + tie_t = (val << 25) >> 29; + insn[0] = (insn[0] & ~0x70) | (tie_t << 4); +} + +static unsigned +Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17); + return tie_t; +} + +static void +Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 17) >> 17; + insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9); +} + +static unsigned +Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn) +{ + unsigned tie_t = 0; + tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14); + return tie_t; +} + +static void +Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val) +{ + uint32 tie_t; + tie_t = (val << 14) >> 14; + insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6); +} + +static void +Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED, + uint32 val ATTRIBUTE_UNUSED) +{ + /* Do nothing. */ +} + +static unsigned +Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED) +{ + return 0; +} + +enum xtensa_field_id { + FIELD_t, + FIELD_bbi4, + FIELD_bbi, + FIELD_imm12, + FIELD_imm8, + FIELD_s, + FIELD_imm12b, + FIELD_imm16, + FIELD_m, + FIELD_n, + FIELD_offset, + FIELD_op0, + FIELD_op1, + FIELD_op2, + FIELD_r, + FIELD_sa4, + FIELD_sae4, + FIELD_sae, + FIELD_sal, + FIELD_sargt, + FIELD_sas4, + FIELD_sas, + FIELD_sr, + FIELD_st, + FIELD_thi3, + FIELD_imm4, + FIELD_i, + FIELD_imm6lo, + FIELD_imm6hi, + FIELD_imm7lo, + FIELD_imm7hi, + FIELD_z, + FIELD_imm6, + FIELD_imm7, + FIELD_xt_wbr15_imm, + FIELD_xt_wbr18_imm, + FIELD__ar0 +}; + + +/* Functional units. */ + +static xtensa_funcUnit_internal funcUnits[] = { + +}; + + +/* Register files. */ + +enum xtensa_regfile_id { + REGFILE_AR +}; + +static xtensa_regfile_internal regfiles[] = { + { "AR", "a", REGFILE_AR, 32, 16 } +}; + + +/* Interfaces. */ + +static xtensa_interface_internal interfaces[] = { + +}; + + +/* Constant tables. */ + +/* constant table ai4c */ +static const unsigned CONST_TBL_ai4c_0[] = { + 0xffffffff, + 0x1, + 0x2, + 0x3, + 0x4, + 0x5, + 0x6, + 0x7, + 0x8, + 0x9, + 0xa, + 0xb, + 0xc, + 0xd, + 0xe, + 0xf, + 0 +}; + +/* constant table b4c */ +static const unsigned CONST_TBL_b4c_0[] = { + 0xffffffff, + 0x1, + 0x2, + 0x3, + 0x4, + 0x5, + 0x6, + 0x7, + 0x8, + 0xa, + 0xc, + 0x10, + 0x20, + 0x40, + 0x80, + 0x100, + 0 +}; + +/* constant table b4cu */ +static const unsigned CONST_TBL_b4cu_0[] = { + 0x8000, + 0x10000, + 0x2, + 0x3, + 0x4, + 0x5, + 0x6, + 0x7, + 0x8, + 0xa, + 0xc, + 0x10, + 0x20, + 0x40, + 0x80, + 0x100, + 0 +}; + + +/* Instruction operands. */ + +static int +Operand_art_decode (uint32 *valp ATTRIBUTE_UNUSED) +{ + return 0; +} + +static int +Operand_art_encode (uint32 *valp) +{ + int error; + error = (*valp & ~0xf) != 0; + return error; +} + +static int +Operand_ars_decode (uint32 *valp ATTRIBUTE_UNUSED) +{ + return 0; +} + +static int +Operand_ars_encode (uint32 *valp) +{ + int error; + error = (*valp & ~0xf) != 0; + return error; +} + +static int +Operand_arr_decode (uint32 *valp ATTRIBUTE_UNUSED) +{ + return 0; +} + +static int +Operand_arr_encode (uint32 *valp) +{ + int error; + error = (*valp & ~0xf) != 0; + return error; +} + +static int +Operand_ar0_decode (uint32 *valp ATTRIBUTE_UNUSED) +{ + return 0; +} + +static int +Operand_ar0_encode (uint32 *valp) +{ + int error; + error = (*valp & ~0xf) != 0; + return error; +} + +static int +Operand_soffsetx4_decode (uint32 *valp) +{ + unsigned soffsetx4_0, offset_0; + offset_0 = *valp & 0x3ffff; + soffsetx4_0 = 0x4 + ((((int) offset_0 << 14) >> 14) << 2); + *valp = soffsetx4_0; + return 0; +} + +static int +Operand_soffsetx4_encode (uint32 *valp) +{ + unsigned offset_0, soffsetx4_0; + soffsetx4_0 = *valp; + offset_0 = ((soffsetx4_0 - 0x4) >> 2) & 0x3ffff; + *valp = offset_0; + return 0; +} + +static int +Operand_soffsetx4_ator (uint32 *valp, uint32 pc) +{ + *valp -= (pc & ~0x3); + return 0; +} + +static int +Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc) +{ + *valp += (pc & ~0x3); + return 0; +} + +static int +Operand_lsi4x4_decode (uint32 *valp) +{ + unsigned lsi4x4_0, r_0; + r_0 = *valp & 0xf; + lsi4x4_0 = r_0 << 2; + *valp = lsi4x4_0; + return 0; +} + +static int +Operand_lsi4x4_encode (uint32 *valp) +{ + unsigned r_0, lsi4x4_0; + lsi4x4_0 = *valp; + r_0 = ((lsi4x4_0 >> 2) & 0xf); + *valp = r_0; + return 0; +} + +static int +Operand_simm7_decode (uint32 *valp) +{ + unsigned simm7_0, imm7_0; + imm7_0 = *valp & 0x7f; + simm7_0 = ((((-((((imm7_0 >> 6) & 1)) & (((imm7_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | imm7_0; + *valp = simm7_0; + return 0; +} + +static int +Operand_simm7_encode (uint32 *valp) +{ + unsigned imm7_0, simm7_0; + simm7_0 = *valp; + imm7_0 = (simm7_0 & 0x7f); + *valp = imm7_0; + return 0; +} + +static int +Operand_uimm6_decode (uint32 *valp) +{ + unsigned uimm6_0, imm6_0; + imm6_0 = *valp & 0x3f; + uimm6_0 = 0x4 + (((0) << 6) | imm6_0); + *valp = uimm6_0; + return 0; +} + +static int +Operand_uimm6_encode (uint32 *valp) +{ + unsigned imm6_0, uimm6_0; + uimm6_0 = *valp; + imm6_0 = (uimm6_0 - 0x4) & 0x3f; + *valp = imm6_0; + return 0; +} + +static int +Operand_uimm6_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_uimm6_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static int +Operand_ai4const_decode (uint32 *valp) +{ + unsigned ai4const_0, t_0; + t_0 = *valp & 0xf; + ai4const_0 = CONST_TBL_ai4c_0[t_0 & 0xf]; + *valp = ai4const_0; + return 0; +} + +static int +Operand_ai4const_encode (uint32 *valp) +{ + unsigned t_0, ai4const_0; + ai4const_0 = *valp; + switch (ai4const_0) + { + case 0xffffffff: t_0 = 0; break; + case 0x1: t_0 = 0x1; break; + case 0x2: t_0 = 0x2; break; + case 0x3: t_0 = 0x3; break; + case 0x4: t_0 = 0x4; break; + case 0x5: t_0 = 0x5; break; + case 0x6: t_0 = 0x6; break; + case 0x7: t_0 = 0x7; break; + case 0x8: t_0 = 0x8; break; + case 0x9: t_0 = 0x9; break; + case 0xa: t_0 = 0xa; break; + case 0xb: t_0 = 0xb; break; + case 0xc: t_0 = 0xc; break; + case 0xd: t_0 = 0xd; break; + case 0xe: t_0 = 0xe; break; + default: t_0 = 0xf; break; + } + *valp = t_0; + return 0; +} + +static int +Operand_b4const_decode (uint32 *valp) +{ + unsigned b4const_0, r_0; + r_0 = *valp & 0xf; + b4const_0 = CONST_TBL_b4c_0[r_0 & 0xf]; + *valp = b4const_0; + return 0; +} + +static int +Operand_b4const_encode (uint32 *valp) +{ + unsigned r_0, b4const_0; + b4const_0 = *valp; + switch (b4const_0) + { + case 0xffffffff: r_0 = 0; break; + case 0x1: r_0 = 0x1; break; + case 0x2: r_0 = 0x2; break; + case 0x3: r_0 = 0x3; break; + case 0x4: r_0 = 0x4; break; + case 0x5: r_0 = 0x5; break; + case 0x6: r_0 = 0x6; break; + case 0x7: r_0 = 0x7; break; + case 0x8: r_0 = 0x8; break; + case 0xa: r_0 = 0x9; break; + case 0xc: r_0 = 0xa; break; + case 0x10: r_0 = 0xb; break; + case 0x20: r_0 = 0xc; break; + case 0x40: r_0 = 0xd; break; + case 0x80: r_0 = 0xe; break; + default: r_0 = 0xf; break; + } + *valp = r_0; + return 0; +} + +static int +Operand_b4constu_decode (uint32 *valp) +{ + unsigned b4constu_0, r_0; + r_0 = *valp & 0xf; + b4constu_0 = CONST_TBL_b4cu_0[r_0 & 0xf]; + *valp = b4constu_0; + return 0; +} + +static int +Operand_b4constu_encode (uint32 *valp) +{ + unsigned r_0, b4constu_0; + b4constu_0 = *valp; + switch (b4constu_0) + { + case 0x8000: r_0 = 0; break; + case 0x10000: r_0 = 0x1; break; + case 0x2: r_0 = 0x2; break; + case 0x3: r_0 = 0x3; break; + case 0x4: r_0 = 0x4; break; + case 0x5: r_0 = 0x5; break; + case 0x6: r_0 = 0x6; break; + case 0x7: r_0 = 0x7; break; + case 0x8: r_0 = 0x8; break; + case 0xa: r_0 = 0x9; break; + case 0xc: r_0 = 0xa; break; + case 0x10: r_0 = 0xb; break; + case 0x20: r_0 = 0xc; break; + case 0x40: r_0 = 0xd; break; + case 0x80: r_0 = 0xe; break; + default: r_0 = 0xf; break; + } + *valp = r_0; + return 0; +} + +static int +Operand_uimm8_decode (uint32 *valp) +{ + unsigned uimm8_0, imm8_0; + imm8_0 = *valp & 0xff; + uimm8_0 = imm8_0; + *valp = uimm8_0; + return 0; +} + +static int +Operand_uimm8_encode (uint32 *valp) +{ + unsigned imm8_0, uimm8_0; + uimm8_0 = *valp; + imm8_0 = (uimm8_0 & 0xff); + *valp = imm8_0; + return 0; +} + +static int +Operand_uimm8x2_decode (uint32 *valp) +{ + unsigned uimm8x2_0, imm8_0; + imm8_0 = *valp & 0xff; + uimm8x2_0 = imm8_0 << 1; + *valp = uimm8x2_0; + return 0; +} + +static int +Operand_uimm8x2_encode (uint32 *valp) +{ + unsigned imm8_0, uimm8x2_0; + uimm8x2_0 = *valp; + imm8_0 = ((uimm8x2_0 >> 1) & 0xff); + *valp = imm8_0; + return 0; +} + +static int +Operand_uimm8x4_decode (uint32 *valp) +{ + unsigned uimm8x4_0, imm8_0; + imm8_0 = *valp & 0xff; + uimm8x4_0 = imm8_0 << 2; + *valp = uimm8x4_0; + return 0; +} + +static int +Operand_uimm8x4_encode (uint32 *valp) +{ + unsigned imm8_0, uimm8x4_0; + uimm8x4_0 = *valp; + imm8_0 = ((uimm8x4_0 >> 2) & 0xff); + *valp = imm8_0; + return 0; +} + +static int +Operand_uimm4x16_decode (uint32 *valp) +{ + unsigned uimm4x16_0, op2_0; + op2_0 = *valp & 0xf; + uimm4x16_0 = op2_0 << 4; + *valp = uimm4x16_0; + return 0; +} + +static int +Operand_uimm4x16_encode (uint32 *valp) +{ + unsigned op2_0, uimm4x16_0; + uimm4x16_0 = *valp; + op2_0 = ((uimm4x16_0 >> 4) & 0xf); + *valp = op2_0; + return 0; +} + +static int +Operand_simm8_decode (uint32 *valp) +{ + unsigned simm8_0, imm8_0; + imm8_0 = *valp & 0xff; + simm8_0 = ((int) imm8_0 << 24) >> 24; + *valp = simm8_0; + return 0; +} + +static int +Operand_simm8_encode (uint32 *valp) +{ + unsigned imm8_0, simm8_0; + simm8_0 = *valp; + imm8_0 = (simm8_0 & 0xff); + *valp = imm8_0; + return 0; +} + +static int +Operand_simm8x256_decode (uint32 *valp) +{ + unsigned simm8x256_0, imm8_0; + imm8_0 = *valp & 0xff; + simm8x256_0 = (((int) imm8_0 << 24) >> 24) << 8; + *valp = simm8x256_0; + return 0; +} + +static int +Operand_simm8x256_encode (uint32 *valp) +{ + unsigned imm8_0, simm8x256_0; + simm8x256_0 = *valp; + imm8_0 = ((simm8x256_0 >> 8) & 0xff); + *valp = imm8_0; + return 0; +} + +static int +Operand_simm12b_decode (uint32 *valp) +{ + unsigned simm12b_0, imm12b_0; + imm12b_0 = *valp & 0xfff; + simm12b_0 = ((int) imm12b_0 << 20) >> 20; + *valp = simm12b_0; + return 0; +} + +static int +Operand_simm12b_encode (uint32 *valp) +{ + unsigned imm12b_0, simm12b_0; + simm12b_0 = *valp; + imm12b_0 = (simm12b_0 & 0xfff); + *valp = imm12b_0; + return 0; +} + +static int +Operand_msalp32_decode (uint32 *valp) +{ + unsigned msalp32_0, sal_0; + sal_0 = *valp & 0x1f; + msalp32_0 = 0x20 - sal_0; + *valp = msalp32_0; + return 0; +} + +static int +Operand_msalp32_encode (uint32 *valp) +{ + unsigned sal_0, msalp32_0; + msalp32_0 = *valp; + sal_0 = (0x20 - msalp32_0) & 0x1f; + *valp = sal_0; + return 0; +} + +static int +Operand_op2p1_decode (uint32 *valp) +{ + unsigned op2p1_0, op2_0; + op2_0 = *valp & 0xf; + op2p1_0 = op2_0 + 0x1; + *valp = op2p1_0; + return 0; +} + +static int +Operand_op2p1_encode (uint32 *valp) +{ + unsigned op2_0, op2p1_0; + op2p1_0 = *valp; + op2_0 = (op2p1_0 - 0x1) & 0xf; + *valp = op2_0; + return 0; +} + +static int +Operand_label8_decode (uint32 *valp) +{ + unsigned label8_0, imm8_0; + imm8_0 = *valp & 0xff; + label8_0 = 0x4 + (((int) imm8_0 << 24) >> 24); + *valp = label8_0; + return 0; +} + +static int +Operand_label8_encode (uint32 *valp) +{ + unsigned imm8_0, label8_0; + label8_0 = *valp; + imm8_0 = (label8_0 - 0x4) & 0xff; + *valp = imm8_0; + return 0; +} + +static int +Operand_label8_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_label8_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static int +Operand_label12_decode (uint32 *valp) +{ + unsigned label12_0, imm12_0; + imm12_0 = *valp & 0xfff; + label12_0 = 0x4 + (((int) imm12_0 << 20) >> 20); + *valp = label12_0; + return 0; +} + +static int +Operand_label12_encode (uint32 *valp) +{ + unsigned imm12_0, label12_0; + label12_0 = *valp; + imm12_0 = (label12_0 - 0x4) & 0xfff; + *valp = imm12_0; + return 0; +} + +static int +Operand_label12_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_label12_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static int +Operand_soffset_decode (uint32 *valp) +{ + unsigned soffset_0, offset_0; + offset_0 = *valp & 0x3ffff; + soffset_0 = 0x4 + (((int) offset_0 << 14) >> 14); + *valp = soffset_0; + return 0; +} + +static int +Operand_soffset_encode (uint32 *valp) +{ + unsigned offset_0, soffset_0; + soffset_0 = *valp; + offset_0 = (soffset_0 - 0x4) & 0x3ffff; + *valp = offset_0; + return 0; +} + +static int +Operand_soffset_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_soffset_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static int +Operand_uimm16x4_decode (uint32 *valp) +{ + unsigned uimm16x4_0, imm16_0; + imm16_0 = *valp & 0xffff; + uimm16x4_0 = (((0xffff) << 16) | imm16_0) << 2; + *valp = uimm16x4_0; + return 0; +} + +static int +Operand_uimm16x4_encode (uint32 *valp) +{ + unsigned imm16_0, uimm16x4_0; + uimm16x4_0 = *valp; + imm16_0 = (uimm16x4_0 >> 2) & 0xffff; + *valp = imm16_0; + return 0; +} + +static int +Operand_uimm16x4_ator (uint32 *valp, uint32 pc) +{ + *valp -= ((pc + 3) & ~0x3); + return 0; +} + +static int +Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc) +{ + *valp += ((pc + 3) & ~0x3); + return 0; +} + +static int +Operand_immt_decode (uint32 *valp) +{ + unsigned immt_0, t_0; + t_0 = *valp & 0xf; + immt_0 = t_0; + *valp = immt_0; + return 0; +} + +static int +Operand_immt_encode (uint32 *valp) +{ + unsigned t_0, immt_0; + immt_0 = *valp; + t_0 = immt_0 & 0xf; + *valp = t_0; + return 0; +} + +static int +Operand_imms_decode (uint32 *valp) +{ + unsigned imms_0, s_0; + s_0 = *valp & 0xf; + imms_0 = s_0; + *valp = imms_0; + return 0; +} + +static int +Operand_imms_encode (uint32 *valp) +{ + unsigned s_0, imms_0; + imms_0 = *valp; + s_0 = imms_0 & 0xf; + *valp = s_0; + return 0; +} + +static int +Operand_xt_wbr15_label_decode (uint32 *valp) +{ + unsigned xt_wbr15_label_0, xt_wbr15_imm_0; + xt_wbr15_imm_0 = *valp & 0x7fff; + xt_wbr15_label_0 = 0x4 + (((int) xt_wbr15_imm_0 << 17) >> 17); + *valp = xt_wbr15_label_0; + return 0; +} + +static int +Operand_xt_wbr15_label_encode (uint32 *valp) +{ + unsigned xt_wbr15_imm_0, xt_wbr15_label_0; + xt_wbr15_label_0 = *valp; + xt_wbr15_imm_0 = (xt_wbr15_label_0 - 0x4) & 0x7fff; + *valp = xt_wbr15_imm_0; + return 0; +} + +static int +Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static int +Operand_xt_wbr18_label_decode (uint32 *valp) +{ + unsigned xt_wbr18_label_0, xt_wbr18_imm_0; + xt_wbr18_imm_0 = *valp & 0x3ffff; + xt_wbr18_label_0 = 0x4 + (((int) xt_wbr18_imm_0 << 14) >> 14); + *valp = xt_wbr18_label_0; + return 0; +} + +static int +Operand_xt_wbr18_label_encode (uint32 *valp) +{ + unsigned xt_wbr18_imm_0, xt_wbr18_label_0; + xt_wbr18_label_0 = *valp; + xt_wbr18_imm_0 = (xt_wbr18_label_0 - 0x4) & 0x3ffff; + *valp = xt_wbr18_imm_0; + return 0; +} + +static int +Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc) +{ + *valp -= pc; + return 0; +} + +static int +Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc) +{ + *valp += pc; + return 0; +} + +static xtensa_operand_internal operands[] = { + { "art", FIELD_t, REGFILE_AR, 1, + XTENSA_OPERAND_IS_REGISTER, + Operand_art_encode, Operand_art_decode, + 0, 0 }, + { "ars", FIELD_s, REGFILE_AR, 1, + XTENSA_OPERAND_IS_REGISTER, + Operand_ars_encode, Operand_ars_decode, + 0, 0 }, + { "*ars_invisible", FIELD_s, REGFILE_AR, 1, + XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE, + Operand_ars_encode, Operand_ars_decode, + 0, 0 }, + { "arr", FIELD_r, REGFILE_AR, 1, + XTENSA_OPERAND_IS_REGISTER, + Operand_arr_encode, Operand_arr_decode, + 0, 0 }, + { "ar0", FIELD__ar0, REGFILE_AR, 1, + XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE, + Operand_ar0_encode, Operand_ar0_decode, + 0, 0 }, + { "soffsetx4", FIELD_offset, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_soffsetx4_encode, Operand_soffsetx4_decode, + Operand_soffsetx4_ator, Operand_soffsetx4_rtoa }, + { "lsi4x4", FIELD_r, -1, 0, + 0, + Operand_lsi4x4_encode, Operand_lsi4x4_decode, + 0, 0 }, + { "simm7", FIELD_imm7, -1, 0, + 0, + Operand_simm7_encode, Operand_simm7_decode, + 0, 0 }, + { "uimm6", FIELD_imm6, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_uimm6_encode, Operand_uimm6_decode, + Operand_uimm6_ator, Operand_uimm6_rtoa }, + { "ai4const", FIELD_t, -1, 0, + 0, + Operand_ai4const_encode, Operand_ai4const_decode, + 0, 0 }, + { "b4const", FIELD_r, -1, 0, + 0, + Operand_b4const_encode, Operand_b4const_decode, + 0, 0 }, + { "b4constu", FIELD_r, -1, 0, + 0, + Operand_b4constu_encode, Operand_b4constu_decode, + 0, 0 }, + { "uimm8", FIELD_imm8, -1, 0, + 0, + Operand_uimm8_encode, Operand_uimm8_decode, + 0, 0 }, + { "uimm8x2", FIELD_imm8, -1, 0, + 0, + Operand_uimm8x2_encode, Operand_uimm8x2_decode, + 0, 0 }, + { "uimm8x4", FIELD_imm8, -1, 0, + 0, + Operand_uimm8x4_encode, Operand_uimm8x4_decode, + 0, 0 }, + { "uimm4x16", FIELD_op2, -1, 0, + 0, + Operand_uimm4x16_encode, Operand_uimm4x16_decode, + 0, 0 }, + { "simm8", FIELD_imm8, -1, 0, + 0, + Operand_simm8_encode, Operand_simm8_decode, + 0, 0 }, + { "simm8x256", FIELD_imm8, -1, 0, + 0, + Operand_simm8x256_encode, Operand_simm8x256_decode, + 0, 0 }, + { "simm12b", FIELD_imm12b, -1, 0, + 0, + Operand_simm12b_encode, Operand_simm12b_decode, + 0, 0 }, + { "msalp32", FIELD_sal, -1, 0, + 0, + Operand_msalp32_encode, Operand_msalp32_decode, + 0, 0 }, + { "op2p1", FIELD_op2, -1, 0, + 0, + Operand_op2p1_encode, Operand_op2p1_decode, + 0, 0 }, + { "label8", FIELD_imm8, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_label8_encode, Operand_label8_decode, + Operand_label8_ator, Operand_label8_rtoa }, + { "label12", FIELD_imm12, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_label12_encode, Operand_label12_decode, + Operand_label12_ator, Operand_label12_rtoa }, + { "soffset", FIELD_offset, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_soffset_encode, Operand_soffset_decode, + Operand_soffset_ator, Operand_soffset_rtoa }, + { "uimm16x4", FIELD_imm16, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_uimm16x4_encode, Operand_uimm16x4_decode, + Operand_uimm16x4_ator, Operand_uimm16x4_rtoa }, + { "immt", FIELD_t, -1, 0, + 0, + Operand_immt_encode, Operand_immt_decode, + 0, 0 }, + { "imms", FIELD_s, -1, 0, + 0, + Operand_imms_encode, Operand_imms_decode, + 0, 0 }, + { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_xt_wbr15_label_encode, Operand_xt_wbr15_label_decode, + Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa }, + { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0, + XTENSA_OPERAND_IS_PCRELATIVE, + Operand_xt_wbr18_label_encode, Operand_xt_wbr18_label_decode, + Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa }, + { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 }, + { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 }, + { "bbi", FIELD_bbi, -1, 0, 0, 0, 0, 0, 0 }, + { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 }, + { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 }, + { "s", FIELD_s, -1, 0, 0, 0, 0, 0, 0 }, + { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 }, + { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 }, + { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 }, + { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 }, + { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 }, + { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 }, + { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 }, + { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 }, + { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 }, + { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 }, + { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 }, + { "sae", FIELD_sae, -1, 0, 0, 0, 0, 0, 0 }, + { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 }, + { "sargt", FIELD_sargt, -1, 0, 0, 0, 0, 0, 0 }, + { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 }, + { "sas", FIELD_sas, -1, 0, 0, 0, 0, 0, 0 }, + { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 }, + { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 }, + { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 }, + { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 }, + { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 }, + { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 }, + { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 }, + { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 }, + { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 }, + { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 }, + { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 }, + { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 }, + { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 }, + { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 } +}; + +enum xtensa_operand_id { + OPERAND_art, + OPERAND_ars, + OPERAND__ars_invisible, + OPERAND_arr, + OPERAND_ar0, + OPERAND_soffsetx4, + OPERAND_lsi4x4, + OPERAND_simm7, + OPERAND_uimm6, + OPERAND_ai4const, + OPERAND_b4const, + OPERAND_b4constu, + OPERAND_uimm8, + OPERAND_uimm8x2, + OPERAND_uimm8x4, + OPERAND_uimm4x16, + OPERAND_simm8, + OPERAND_simm8x256, + OPERAND_simm12b, + OPERAND_msalp32, + OPERAND_op2p1, + OPERAND_label8, + OPERAND_label12, + OPERAND_soffset, + OPERAND_uimm16x4, + OPERAND_immt, + OPERAND_imms, + OPERAND_xt_wbr15_label, + OPERAND_xt_wbr18_label, + OPERAND_t, + OPERAND_bbi4, + OPERAND_bbi, + OPERAND_imm12, + OPERAND_imm8, + OPERAND_s, + OPERAND_imm12b, + OPERAND_imm16, + OPERAND_m, + OPERAND_n, + OPERAND_offset, + OPERAND_op0, + OPERAND_op1, + OPERAND_op2, + OPERAND_r, + OPERAND_sa4, + OPERAND_sae4, + OPERAND_sae, + OPERAND_sal, + OPERAND_sargt, + OPERAND_sas4, + OPERAND_sas, + OPERAND_sr, + OPERAND_st, + OPERAND_thi3, + OPERAND_imm4, + OPERAND_i, + OPERAND_imm6lo, + OPERAND_imm6hi, + OPERAND_imm7lo, + OPERAND_imm7hi, + OPERAND_z, + OPERAND_imm6, + OPERAND_imm7, + OPERAND_xt_wbr15_imm, + OPERAND_xt_wbr18_imm +}; + + +/* Iclass table. */ + +static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = { + { { STATE_PSEXCM }, 'o' }, + { { STATE_EPC1 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = { + { { STATE_DEPC }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_ai4const }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm6 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_lsi4x4 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = { + { { OPERAND_ars }, 'o' }, + { { OPERAND_simm7 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = { + { { OPERAND__ars_invisible }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_lsi4x4 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_simm8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_simm8x256 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_b4const }, 'i' }, + { { OPERAND_label8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_bbi }, 'i' }, + { { OPERAND_label8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_b4constu }, 'i' }, + { { OPERAND_label8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' }, + { { OPERAND_label8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_label12 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = { + { { OPERAND_soffsetx4 }, 'i' }, + { { OPERAND_ar0 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = { + { { OPERAND_ars }, 'i' }, + { { OPERAND_ar0 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_art }, 'i' }, + { { OPERAND_sae }, 'i' }, + { { OPERAND_op2p1 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = { + { { OPERAND_soffset }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = { + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8x2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8x2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8x4 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_uimm16x4 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = { + { { STATE_LITBADDR }, 'i' }, + { { STATE_LITBEN }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_simm12b }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = { + { { OPERAND_arr }, 'm' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_return_args[] = { + { { OPERAND__ars_invisible }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8x2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8x4 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_uimm8 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = { + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = { + { { STATE_SAR }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = { + { { OPERAND_sas }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = { + { { STATE_SAR }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = { + { { STATE_SAR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = { + { { STATE_SAR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = { + { { STATE_SAR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_msalp32 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_art }, 'i' }, + { { OPERAND_sargt }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_art }, 'i' }, + { { OPERAND_s }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = { + { { STATE_XTSYNC }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_s }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = { + { { STATE_PSUM }, 'i' }, + { { STATE_PSEXCM }, 'i' }, + { { STATE_PSINTLEVEL }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = { + { { STATE_SAR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = { + { { STATE_SAR }, 'o' }, + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = { + { { STATE_SAR }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = { + { { STATE_LITBADDR }, 'i' }, + { { STATE_LITBEN }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = { + { { STATE_LITBADDR }, 'o' }, + { { STATE_LITBEN }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = { + { { STATE_LITBADDR }, 'm' }, + { { STATE_LITBEN }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_176_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_176_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_208_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = { + { { STATE_PSUM }, 'i' }, + { { STATE_PSEXCM }, 'i' }, + { { STATE_PSINTLEVEL }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = { + { { STATE_PSUM }, 'o' }, + { { STATE_PSEXCM }, 'o' }, + { { STATE_PSINTLEVEL }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = { + { { STATE_PSUM }, 'm' }, + { { STATE_PSEXCM }, 'm' }, + { { STATE_PSINTLEVEL }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = { + { { STATE_EPC1 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = { + { { STATE_EPC1 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = { + { { STATE_EPC1 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = { + { { STATE_EXCSAVE1 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = { + { { STATE_EXCSAVE1 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = { + { { STATE_EXCSAVE1 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = { + { { STATE_EPC2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = { + { { STATE_EPC2 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = { + { { STATE_EPC2 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = { + { { STATE_EXCSAVE2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = { + { { STATE_EXCSAVE2 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = { + { { STATE_EXCSAVE2 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = { + { { STATE_EPC3 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = { + { { STATE_EPC3 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = { + { { STATE_EPC3 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = { + { { STATE_EXCSAVE3 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = { + { { STATE_EXCSAVE3 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = { + { { STATE_EXCSAVE3 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = { + { { STATE_EPS2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = { + { { STATE_EPS2 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = { + { { STATE_EPS2 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = { + { { STATE_EPS3 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = { + { { STATE_EPS3 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = { + { { STATE_EPS3 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = { + { { STATE_EXCVADDR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = { + { { STATE_EXCVADDR }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = { + { { STATE_EXCVADDR }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = { + { { STATE_DEPC }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = { + { { STATE_DEPC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = { + { { STATE_DEPC }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = { + { { STATE_EXCCAUSE }, 'i' }, + { { STATE_XTSYNC }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = { + { { STATE_EXCCAUSE }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = { + { { STATE_EXCCAUSE }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = { + { { STATE_VECBASE }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = { + { { STATE_VECBASE }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = { + { { STATE_VECBASE }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_mul16_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_mul32_args[] = { + { { OPERAND_arr }, 'o' }, + { { OPERAND_ars }, 'i' }, + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = { + { { OPERAND_s }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = { + { { STATE_PSUM }, 'o' }, + { { STATE_PSEXCM }, 'o' }, + { { STATE_PSINTLEVEL }, 'o' }, + { { STATE_EPC1 }, 'i' }, + { { STATE_EPC2 }, 'i' }, + { { STATE_EPC3 }, 'i' }, + { { STATE_EPS2 }, 'i' }, + { { STATE_EPS3 }, 'i' }, + { { STATE_InOCDMode }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = { + { { OPERAND_s }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = { + { { STATE_PSINTLEVEL }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = { + { { STATE_INTERRUPT }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_INTERRUPT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_INTERRUPT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = { + { { STATE_INTENABLE }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = { + { { STATE_INTENABLE }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = { + { { STATE_INTENABLE }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_break_args[] = { + { { OPERAND_imms }, 'i' }, + { { OPERAND_immt }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = { + { { STATE_PSEXCM }, 'i' }, + { { STATE_PSINTLEVEL }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = { + { { OPERAND_imms }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = { + { { STATE_PSEXCM }, 'i' }, + { { STATE_PSINTLEVEL }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = { + { { STATE_DBREAKA0 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = { + { { STATE_DBREAKA0 }, 'o' }, + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = { + { { STATE_DBREAKA0 }, 'm' }, + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = { + { { STATE_DBREAKC0 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = { + { { STATE_DBREAKC0 }, 'o' }, + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = { + { { STATE_DBREAKC0 }, 'm' }, + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = { + { { STATE_IBREAKA0 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = { + { { STATE_IBREAKA0 }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = { + { { STATE_IBREAKA0 }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = { + { { STATE_IBREAKENABLE }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = { + { { STATE_IBREAKENABLE }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = { + { { STATE_IBREAKENABLE }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = { + { { STATE_DEBUGCAUSE }, 'i' }, + { { STATE_DBNUM }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = { + { { STATE_DEBUGCAUSE }, 'o' }, + { { STATE_DBNUM }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = { + { { STATE_DEBUGCAUSE }, 'm' }, + { { STATE_DBNUM }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = { + { { STATE_ICOUNT }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_ICOUNT }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_ICOUNT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = { + { { STATE_ICOUNTLEVEL }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = { + { { STATE_ICOUNTLEVEL }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = { + { { STATE_ICOUNTLEVEL }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = { + { { STATE_DDR }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_DDR }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_DDR }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = { + { { OPERAND_imms }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = { + { { STATE_InOCDMode }, 'm' }, + { { STATE_EPC2 }, 'i' }, + { { STATE_PSUM }, 'o' }, + { { STATE_PSEXCM }, 'o' }, + { { STATE_PSINTLEVEL }, 'o' }, + { { STATE_EPS2 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = { + { { STATE_InOCDMode }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = { + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = { + { { STATE_CCOUNT }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_CCOUNT }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = { + { { STATE_XTSYNC }, 'o' }, + { { STATE_CCOUNT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = { + { { OPERAND_art }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = { + { { STATE_CCOMPARE0 }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = { + { { OPERAND_art }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = { + { { STATE_CCOMPARE0 }, 'o' }, + { { STATE_INTERRUPT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = { + { { OPERAND_art }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = { + { { STATE_CCOMPARE0 }, 'm' }, + { { STATE_INTERRUPT }, 'm' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = { + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = { + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = { + { { STATE_XTSYNC }, 'o' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = { + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = { + { { OPERAND_art }, 'i' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = { + { { OPERAND_art }, 'o' }, + { { OPERAND_ars }, 'i' } +}; + +static xtensa_iclass_internal iclasses[] = { + { 0, 0 /* xt_iclass_excw */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_rfe */, + 2, Iclass_xt_iclass_rfe_stateArgs, 0, 0 }, + { 0, 0 /* xt_iclass_rfde */, + 1, Iclass_xt_iclass_rfde_stateArgs, 0, 0 }, + { 0, 0 /* xt_iclass_syscall */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_simcall */, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_add_n_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_addi_n_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_bz6_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_ill_n */, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_loadi4_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_mov_n_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_movi_n_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_nopn */, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_retn_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_storei4_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_addi_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_addmi_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_addsub_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_bit_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_bsi8_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_bsi8b_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_bsi8u_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_bst8_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_bsz12_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_call0_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_callx0_args, + 0, 0, 0, 0 }, + { 4, Iclass_xt_iclass_exti_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_ill */, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_jump_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_jumpx_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_l16ui_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_l16si_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_l32i_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_l32r_args, + 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 }, + { 3, Iclass_xt_iclass_l8i_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_movi_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_movz_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_neg_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_nop */, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_return_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_s16i_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_s32i_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_s8i_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_sar_args, + 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_sari_args, + 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 }, + { 2, Iclass_xt_iclass_shifts_args, + 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 }, + { 3, Iclass_xt_iclass_shiftst_args, + 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 }, + { 2, Iclass_xt_iclass_shiftt_args, + 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 }, + { 3, Iclass_xt_iclass_slli_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_srai_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_iclass_srli_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_memw */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_extw */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_isync */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_sync */, + 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 }, + { 2, Iclass_xt_iclass_rsil_args, + 3, Iclass_xt_iclass_rsil_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_sar_args, + 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_sar_args, + 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_sar_args, + 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_litbase_args, + 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_litbase_args, + 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_litbase_args, + 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_176_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_176_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_208_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ps_args, + 3, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ps_args, + 3, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ps_args, + 3, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_epc1_args, + 1, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_epc1_args, + 1, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_epc1_args, + 1, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_excsave1_args, + 1, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_excsave1_args, + 1, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_excsave1_args, + 1, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_epc2_args, + 1, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_epc2_args, + 1, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_epc2_args, + 1, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_excsave2_args, + 1, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_excsave2_args, + 1, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_excsave2_args, + 1, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_epc3_args, + 1, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_epc3_args, + 1, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_epc3_args, + 1, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_excsave3_args, + 1, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_excsave3_args, + 1, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_excsave3_args, + 1, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_eps2_args, + 1, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_eps2_args, + 1, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_eps2_args, + 1, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_eps3_args, + 1, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_eps3_args, + 1, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_eps3_args, + 1, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_excvaddr_args, + 1, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_excvaddr_args, + 1, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_excvaddr_args, + 1, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_depc_args, + 1, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_depc_args, + 1, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_depc_args, + 1, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_exccause_args, + 2, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_exccause_args, + 1, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_exccause_args, + 1, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_prid_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_vecbase_args, + 1, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_vecbase_args, + 1, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_vecbase_args, + 1, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 }, + { 3, Iclass_xt_mul16_args, + 0, 0, 0, 0 }, + { 3, Iclass_xt_mul32_args, + 0, 0, 0, 0 }, + { 1, Iclass_xt_iclass_rfi_args, + 9, Iclass_xt_iclass_rfi_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wait_args, + 1, Iclass_xt_iclass_wait_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_interrupt_args, + 1, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_intset_args, + 2, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_intclear_args, + 2, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_intenable_args, + 1, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_intenable_args, + 1, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_intenable_args, + 1, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 }, + { 2, Iclass_xt_iclass_break_args, + 2, Iclass_xt_iclass_break_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_break_n_args, + 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_dbreaka0_args, + 1, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_dbreaka0_args, + 2, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_dbreaka0_args, + 2, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_dbreakc0_args, + 1, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_dbreakc0_args, + 2, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_dbreakc0_args, + 2, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ibreaka0_args, + 1, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ibreaka0_args, + 1, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ibreaka0_args, + 1, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ibreakenable_args, + 1, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ibreakenable_args, + 1, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ibreakenable_args, + 1, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_debugcause_args, + 2, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_debugcause_args, + 2, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_debugcause_args, + 2, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_icount_args, + 1, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_icount_args, + 2, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_icount_args, + 2, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_icountlevel_args, + 1, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_icountlevel_args, + 1, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_icountlevel_args, + 1, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ddr_args, + 1, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ddr_args, + 2, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ddr_args, + 2, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rfdo_args, + 6, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 }, + { 0, 0 /* xt_iclass_rfdd */, + 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_mmid_args, + 1, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ccount_args, + 1, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ccount_args, + 2, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ccount_args, + 2, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_rsr_ccompare0_args, + 1, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_wsr_ccompare0_args, + 2, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_xsr_ccompare0_args, + 2, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_idtlb_args, + 1, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 }, + { 2, Iclass_xt_iclass_rdtlb_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_wdtlb_args, + 1, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 }, + { 1, Iclass_xt_iclass_iitlb_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_ritlb_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_witlb_args, + 0, 0, 0, 0 }, + { 2, Iclass_xt_iclass_nsa_args, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_rer */, + 0, 0, 0, 0 }, + { 0, 0 /* xt_iclass_wer */, + 0, 0, 0, 0 } +}; + +enum xtensa_iclass_id { + ICLASS_xt_iclass_excw, + ICLASS_xt_iclass_rfe, + ICLASS_xt_iclass_rfde, + ICLASS_xt_iclass_syscall, + ICLASS_xt_iclass_simcall, + ICLASS_xt_iclass_add_n, + ICLASS_xt_iclass_addi_n, + ICLASS_xt_iclass_bz6, + ICLASS_xt_iclass_ill_n, + ICLASS_xt_iclass_loadi4, + ICLASS_xt_iclass_mov_n, + ICLASS_xt_iclass_movi_n, + ICLASS_xt_iclass_nopn, + ICLASS_xt_iclass_retn, + ICLASS_xt_iclass_storei4, + ICLASS_xt_iclass_addi, + ICLASS_xt_iclass_addmi, + ICLASS_xt_iclass_addsub, + ICLASS_xt_iclass_bit, + ICLASS_xt_iclass_bsi8, + ICLASS_xt_iclass_bsi8b, + ICLASS_xt_iclass_bsi8u, + ICLASS_xt_iclass_bst8, + ICLASS_xt_iclass_bsz12, + ICLASS_xt_iclass_call0, + ICLASS_xt_iclass_callx0, + ICLASS_xt_iclass_exti, + ICLASS_xt_iclass_ill, + ICLASS_xt_iclass_jump, + ICLASS_xt_iclass_jumpx, + ICLASS_xt_iclass_l16ui, + ICLASS_xt_iclass_l16si, + ICLASS_xt_iclass_l32i, + ICLASS_xt_iclass_l32r, + ICLASS_xt_iclass_l8i, + ICLASS_xt_iclass_movi, + ICLASS_xt_iclass_movz, + ICLASS_xt_iclass_neg, + ICLASS_xt_iclass_nop, + ICLASS_xt_iclass_return, + ICLASS_xt_iclass_s16i, + ICLASS_xt_iclass_s32i, + ICLASS_xt_iclass_s8i, + ICLASS_xt_iclass_sar, + ICLASS_xt_iclass_sari, + ICLASS_xt_iclass_shifts, + ICLASS_xt_iclass_shiftst, + ICLASS_xt_iclass_shiftt, + ICLASS_xt_iclass_slli, + ICLASS_xt_iclass_srai, + ICLASS_xt_iclass_srli, + ICLASS_xt_iclass_memw, + ICLASS_xt_iclass_extw, + ICLASS_xt_iclass_isync, + ICLASS_xt_iclass_sync, + ICLASS_xt_iclass_rsil, + ICLASS_xt_iclass_rsr_sar, + ICLASS_xt_iclass_wsr_sar, + ICLASS_xt_iclass_xsr_sar, + ICLASS_xt_iclass_rsr_litbase, + ICLASS_xt_iclass_wsr_litbase, + ICLASS_xt_iclass_xsr_litbase, + ICLASS_xt_iclass_rsr_176, + ICLASS_xt_iclass_wsr_176, + ICLASS_xt_iclass_rsr_208, + ICLASS_xt_iclass_rsr_ps, + ICLASS_xt_iclass_wsr_ps, + ICLASS_xt_iclass_xsr_ps, + ICLASS_xt_iclass_rsr_epc1, + ICLASS_xt_iclass_wsr_epc1, + ICLASS_xt_iclass_xsr_epc1, + ICLASS_xt_iclass_rsr_excsave1, + ICLASS_xt_iclass_wsr_excsave1, + ICLASS_xt_iclass_xsr_excsave1, + ICLASS_xt_iclass_rsr_epc2, + ICLASS_xt_iclass_wsr_epc2, + ICLASS_xt_iclass_xsr_epc2, + ICLASS_xt_iclass_rsr_excsave2, + ICLASS_xt_iclass_wsr_excsave2, + ICLASS_xt_iclass_xsr_excsave2, + ICLASS_xt_iclass_rsr_epc3, + ICLASS_xt_iclass_wsr_epc3, + ICLASS_xt_iclass_xsr_epc3, + ICLASS_xt_iclass_rsr_excsave3, + ICLASS_xt_iclass_wsr_excsave3, + ICLASS_xt_iclass_xsr_excsave3, + ICLASS_xt_iclass_rsr_eps2, + ICLASS_xt_iclass_wsr_eps2, + ICLASS_xt_iclass_xsr_eps2, + ICLASS_xt_iclass_rsr_eps3, + ICLASS_xt_iclass_wsr_eps3, + ICLASS_xt_iclass_xsr_eps3, + ICLASS_xt_iclass_rsr_excvaddr, + ICLASS_xt_iclass_wsr_excvaddr, + ICLASS_xt_iclass_xsr_excvaddr, + ICLASS_xt_iclass_rsr_depc, + ICLASS_xt_iclass_wsr_depc, + ICLASS_xt_iclass_xsr_depc, + ICLASS_xt_iclass_rsr_exccause, + ICLASS_xt_iclass_wsr_exccause, + ICLASS_xt_iclass_xsr_exccause, + ICLASS_xt_iclass_rsr_prid, + ICLASS_xt_iclass_rsr_vecbase, + ICLASS_xt_iclass_wsr_vecbase, + ICLASS_xt_iclass_xsr_vecbase, + ICLASS_xt_mul16, + ICLASS_xt_mul32, + ICLASS_xt_iclass_rfi, + ICLASS_xt_iclass_wait, + ICLASS_xt_iclass_rsr_interrupt, + ICLASS_xt_iclass_wsr_intset, + ICLASS_xt_iclass_wsr_intclear, + ICLASS_xt_iclass_rsr_intenable, + ICLASS_xt_iclass_wsr_intenable, + ICLASS_xt_iclass_xsr_intenable, + ICLASS_xt_iclass_break, + ICLASS_xt_iclass_break_n, + ICLASS_xt_iclass_rsr_dbreaka0, + ICLASS_xt_iclass_wsr_dbreaka0, + ICLASS_xt_iclass_xsr_dbreaka0, + ICLASS_xt_iclass_rsr_dbreakc0, + ICLASS_xt_iclass_wsr_dbreakc0, + ICLASS_xt_iclass_xsr_dbreakc0, + ICLASS_xt_iclass_rsr_ibreaka0, + ICLASS_xt_iclass_wsr_ibreaka0, + ICLASS_xt_iclass_xsr_ibreaka0, + ICLASS_xt_iclass_rsr_ibreakenable, + ICLASS_xt_iclass_wsr_ibreakenable, + ICLASS_xt_iclass_xsr_ibreakenable, + ICLASS_xt_iclass_rsr_debugcause, + ICLASS_xt_iclass_wsr_debugcause, + ICLASS_xt_iclass_xsr_debugcause, + ICLASS_xt_iclass_rsr_icount, + ICLASS_xt_iclass_wsr_icount, + ICLASS_xt_iclass_xsr_icount, + ICLASS_xt_iclass_rsr_icountlevel, + ICLASS_xt_iclass_wsr_icountlevel, + ICLASS_xt_iclass_xsr_icountlevel, + ICLASS_xt_iclass_rsr_ddr, + ICLASS_xt_iclass_wsr_ddr, + ICLASS_xt_iclass_xsr_ddr, + ICLASS_xt_iclass_rfdo, + ICLASS_xt_iclass_rfdd, + ICLASS_xt_iclass_wsr_mmid, + ICLASS_xt_iclass_rsr_ccount, + ICLASS_xt_iclass_wsr_ccount, + ICLASS_xt_iclass_xsr_ccount, + ICLASS_xt_iclass_rsr_ccompare0, + ICLASS_xt_iclass_wsr_ccompare0, + ICLASS_xt_iclass_xsr_ccompare0, + ICLASS_xt_iclass_idtlb, + ICLASS_xt_iclass_rdtlb, + ICLASS_xt_iclass_wdtlb, + ICLASS_xt_iclass_iitlb, + ICLASS_xt_iclass_ritlb, + ICLASS_xt_iclass_witlb, + ICLASS_xt_iclass_nsa, + ICLASS_xt_iclass_rer, + ICLASS_xt_iclass_wer +}; + + +/* Opcode encodings. */ + +static void +Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2080; +} + +static void +Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3000; +} + +static void +Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3200; +} + +static void +Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x5000; +} + +static void +Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x5100; +} + +static void +Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa; +} + +static void +Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb; +} + +static void +Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x8c; +} + +static void +Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xcc; +} + +static void +Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf06d; +} + +static void +Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x8; +} + +static void +Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd; +} + +static void +Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc; +} + +static void +Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf03d; +} + +static void +Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf00d; +} + +static void +Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x9; +} + +static void +Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc002; +} + +static void +Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd002; +} + +static void +Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x800000; +} + +static void +Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc00000; +} + +static void +Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x900000; +} + +static void +Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa00000; +} + +static void +Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb00000; +} + +static void +Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd00000; +} + +static void +Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xe00000; +} + +static void +Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf00000; +} + +static void +Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x100000; +} + +static void +Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x200000; +} + +static void +Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x300000; +} + +static void +Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x26; +} + +static void +Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x66; +} + +static void +Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xe6; +} + +static void +Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa6; +} + +static void +Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x6007; +} + +static void +Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xe007; +} + +static void +Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf6; +} + +static void +Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb6; +} + +static void +Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x1007; +} + +static void +Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x9007; +} + +static void +Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa007; +} + +static void +Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2007; +} + +static void +Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb007; +} + +static void +Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3007; +} + +static void +Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x8007; +} + +static void +Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x7; +} + +static void +Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x4007; +} + +static void +Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc007; +} + +static void +Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x5007; +} + +static void +Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd007; +} + +static void +Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x16; +} + +static void +Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x56; +} + +static void +Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd6; +} + +static void +Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x96; +} + +static void +Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x5; +} + +static void +Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc0; +} + +static void +Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x40000; +} + +static void +Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0; +} + +static void +Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x6; +} + +static void +Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa0; +} + +static void +Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x1002; +} + +static void +Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x9002; +} + +static void +Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2002; +} + +static void +Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x1; +} + +static void +Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2; +} + +static void +Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa002; +} + +static void +Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x830000; +} + +static void +Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x930000; +} + +static void +Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa30000; +} + +static void +Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb30000; +} + +static void +Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x600000; +} + +static void +Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x600100; +} + +static void +Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x20f0; +} + +static void +Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x80; +} + +static void +Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x5002; +} + +static void +Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x6002; +} + +static void +Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x4002; +} + +static void +Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x400000; +} + +static void +Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x401000; +} + +static void +Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x402000; +} + +static void +Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x403000; +} + +static void +Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x404000; +} + +static void +Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xa10000; +} + +static void +Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x810000; +} + +static void +Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x910000; +} + +static void +Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xb10000; +} + +static void +Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x10000; +} + +static void +Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x210000; +} + +static void +Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x410000; +} + +static void +Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x20c0; +} + +static void +Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x20d0; +} + +static void +Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2000; +} + +static void +Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2010; +} + +static void +Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2020; +} + +static void +Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x2030; +} + +static void +Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x6000; +} + +static void +Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x30300; +} + +static void +Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x130300; +} + +static void +Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x610300; +} + +static void +Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x30500; +} + +static void +Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x130500; +} + +static void +Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x610500; +} + +static void +Opcode_rsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3b000; +} + +static void +Opcode_wsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13b000; +} + +static void +Opcode_rsr_208_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3d000; +} + +static void +Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e600; +} + +static void +Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e600; +} + +static void +Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61e600; +} + +static void +Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3b100; +} + +static void +Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13b100; +} + +static void +Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61b100; +} + +static void +Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3d100; +} + +static void +Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13d100; +} + +static void +Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61d100; +} + +static void +Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3b200; +} + +static void +Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13b200; +} + +static void +Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61b200; +} + +static void +Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3d200; +} + +static void +Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13d200; +} + +static void +Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61d200; +} + +static void +Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3b300; +} + +static void +Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13b300; +} + +static void +Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61b300; +} + +static void +Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3d300; +} + +static void +Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13d300; +} + +static void +Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61d300; +} + +static void +Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3c200; +} + +static void +Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13c200; +} + +static void +Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61c200; +} + +static void +Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3c300; +} + +static void +Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13c300; +} + +static void +Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61c300; +} + +static void +Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3ee00; +} + +static void +Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13ee00; +} + +static void +Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61ee00; +} + +static void +Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3c000; +} + +static void +Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13c000; +} + +static void +Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61c000; +} + +static void +Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e800; +} + +static void +Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e800; +} + +static void +Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61e800; +} + +static void +Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3eb00; +} + +static void +Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e700; +} + +static void +Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e700; +} + +static void +Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61e700; +} + +static void +Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xc10000; +} + +static void +Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xd10000; +} + +static void +Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x820000; +} + +static void +Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3010; +} + +static void +Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x7000; +} + +static void +Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e200; +} + +static void +Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e200; +} + +static void +Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e300; +} + +static void +Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e400; +} + +static void +Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e400; +} + +static void +Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61e400; +} + +static void +Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x4000; +} + +static void +Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf02d; +} + +static void +Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x39000; +} + +static void +Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x139000; +} + +static void +Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x619000; +} + +static void +Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3a000; +} + +static void +Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13a000; +} + +static void +Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61a000; +} + +static void +Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x38000; +} + +static void +Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x138000; +} + +static void +Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x618000; +} + +static void +Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x36000; +} + +static void +Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x136000; +} + +static void +Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x616000; +} + +static void +Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3e900; +} + +static void +Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13e900; +} + +static void +Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61e900; +} + +static void +Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3ec00; +} + +static void +Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13ec00; +} + +static void +Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61ec00; +} + +static void +Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3ed00; +} + +static void +Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13ed00; +} + +static void +Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61ed00; +} + +static void +Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x36800; +} + +static void +Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x136800; +} + +static void +Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x616800; +} + +static void +Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf1e000; +} + +static void +Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0xf1e010; +} + +static void +Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x135900; +} + +static void +Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3ea00; +} + +static void +Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13ea00; +} + +static void +Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61ea00; +} + +static void +Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x3f000; +} + +static void +Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x13f000; +} + +static void +Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x61f000; +} + +static void +Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x50c000; +} + +static void +Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x50d000; +} + +static void +Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x50b000; +} + +static void +Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x50f000; +} + +static void +Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x50e000; +} + +static void +Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x504000; +} + +static void +Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x505000; +} + +static void +Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x503000; +} + +static void +Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x507000; +} + +static void +Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x506000; +} + +static void +Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x40e000; +} + +static void +Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x40f000; +} + +static void +Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x406000; +} + +static void +Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf) +{ + slotbuf[0] = 0x407000; +} + +static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = { + Opcode_excw_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = { + Opcode_rfe_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = { + Opcode_rfde_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = { + Opcode_syscall_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = { + Opcode_simcall_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = { + 0, Opcode_add_n_Slot_inst16a_encode, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = { + 0, Opcode_addi_n_Slot_inst16a_encode, 0 +}; + +static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = { + 0, 0, Opcode_beqz_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = { + 0, 0, Opcode_bnez_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = { + 0, 0, Opcode_ill_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = { + 0, Opcode_l32i_n_Slot_inst16a_encode, 0 +}; + +static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = { + 0, 0, Opcode_mov_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = { + 0, 0, Opcode_movi_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = { + 0, 0, Opcode_nop_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = { + 0, 0, Opcode_ret_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = { + 0, Opcode_s32i_n_Slot_inst16a_encode, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = { + Opcode_addi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = { + Opcode_addmi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = { + Opcode_add_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = { + Opcode_sub_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = { + Opcode_addx2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = { + Opcode_addx4_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = { + Opcode_addx8_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = { + Opcode_subx2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = { + Opcode_subx4_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = { + Opcode_subx8_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = { + Opcode_and_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = { + Opcode_or_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = { + Opcode_xor_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = { + Opcode_beqi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = { + Opcode_bnei_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = { + Opcode_bgei_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = { + Opcode_blti_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = { + Opcode_bbci_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = { + Opcode_bbsi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = { + Opcode_bgeui_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = { + Opcode_bltui_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = { + Opcode_beq_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = { + Opcode_bne_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = { + Opcode_bge_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = { + Opcode_blt_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = { + Opcode_bgeu_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = { + Opcode_bltu_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = { + Opcode_bany_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = { + Opcode_bnone_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = { + Opcode_ball_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = { + Opcode_bnall_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = { + Opcode_bbc_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = { + Opcode_bbs_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = { + Opcode_beqz_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = { + Opcode_bnez_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = { + Opcode_bgez_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = { + Opcode_bltz_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = { + Opcode_call0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = { + Opcode_callx0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = { + Opcode_extui_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = { + Opcode_ill_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = { + Opcode_j_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = { + Opcode_jx_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = { + Opcode_l16ui_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = { + Opcode_l16si_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = { + Opcode_l32i_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = { + Opcode_l32r_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = { + Opcode_l8ui_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = { + Opcode_movi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = { + Opcode_moveqz_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = { + Opcode_movnez_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = { + Opcode_movltz_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = { + Opcode_movgez_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = { + Opcode_neg_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = { + Opcode_abs_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = { + Opcode_nop_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = { + Opcode_ret_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = { + Opcode_s16i_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = { + Opcode_s32i_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = { + Opcode_s8i_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = { + Opcode_ssr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = { + Opcode_ssl_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = { + Opcode_ssa8l_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = { + Opcode_ssa8b_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = { + Opcode_ssai_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = { + Opcode_sll_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = { + Opcode_src_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = { + Opcode_srl_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = { + Opcode_sra_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = { + Opcode_slli_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = { + Opcode_srai_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = { + Opcode_srli_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = { + Opcode_memw_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = { + Opcode_extw_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = { + Opcode_isync_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = { + Opcode_rsync_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = { + Opcode_esync_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = { + Opcode_dsync_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = { + Opcode_rsil_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = { + Opcode_rsr_sar_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = { + Opcode_wsr_sar_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = { + Opcode_xsr_sar_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = { + Opcode_rsr_litbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = { + Opcode_wsr_litbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = { + Opcode_xsr_litbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_176_encode_fns[] = { + Opcode_rsr_176_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_176_encode_fns[] = { + Opcode_wsr_176_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_208_encode_fns[] = { + Opcode_rsr_208_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = { + Opcode_rsr_ps_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = { + Opcode_wsr_ps_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = { + Opcode_xsr_ps_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = { + Opcode_rsr_epc1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = { + Opcode_wsr_epc1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = { + Opcode_xsr_epc1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = { + Opcode_rsr_excsave1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = { + Opcode_wsr_excsave1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = { + Opcode_xsr_excsave1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = { + Opcode_rsr_epc2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = { + Opcode_wsr_epc2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = { + Opcode_xsr_epc2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = { + Opcode_rsr_excsave2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = { + Opcode_wsr_excsave2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = { + Opcode_xsr_excsave2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = { + Opcode_rsr_epc3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = { + Opcode_wsr_epc3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = { + Opcode_xsr_epc3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = { + Opcode_rsr_excsave3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = { + Opcode_wsr_excsave3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = { + Opcode_xsr_excsave3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = { + Opcode_rsr_eps2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = { + Opcode_wsr_eps2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = { + Opcode_xsr_eps2_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = { + Opcode_rsr_eps3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = { + Opcode_wsr_eps3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = { + Opcode_xsr_eps3_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = { + Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = { + Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = { + Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = { + Opcode_rsr_depc_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = { + Opcode_wsr_depc_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = { + Opcode_xsr_depc_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = { + Opcode_rsr_exccause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = { + Opcode_wsr_exccause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = { + Opcode_xsr_exccause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = { + Opcode_rsr_prid_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = { + Opcode_rsr_vecbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = { + Opcode_wsr_vecbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = { + Opcode_xsr_vecbase_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = { + Opcode_mul16u_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = { + Opcode_mul16s_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = { + Opcode_mull_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = { + Opcode_rfi_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = { + Opcode_waiti_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = { + Opcode_rsr_interrupt_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = { + Opcode_wsr_intset_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = { + Opcode_wsr_intclear_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = { + Opcode_rsr_intenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = { + Opcode_wsr_intenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = { + Opcode_xsr_intenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = { + Opcode_break_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = { + 0, 0, Opcode_break_n_Slot_inst16b_encode +}; + +static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = { + Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = { + Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = { + Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = { + Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = { + Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = { + Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = { + Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = { + Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = { + Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = { + Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = { + Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = { + Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = { + Opcode_rsr_debugcause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = { + Opcode_wsr_debugcause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = { + Opcode_xsr_debugcause_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = { + Opcode_rsr_icount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = { + Opcode_wsr_icount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = { + Opcode_xsr_icount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = { + Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = { + Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = { + Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = { + Opcode_rsr_ddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = { + Opcode_wsr_ddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = { + Opcode_xsr_ddr_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = { + Opcode_rfdo_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = { + Opcode_rfdd_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = { + Opcode_wsr_mmid_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = { + Opcode_rsr_ccount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = { + Opcode_wsr_ccount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = { + Opcode_xsr_ccount_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = { + Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = { + Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = { + Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = { + Opcode_idtlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = { + Opcode_pdtlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = { + Opcode_rdtlb0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = { + Opcode_rdtlb1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = { + Opcode_wdtlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = { + Opcode_iitlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = { + Opcode_pitlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = { + Opcode_ritlb0_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = { + Opcode_ritlb1_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = { + Opcode_witlb_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = { + Opcode_nsa_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = { + Opcode_nsau_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = { + Opcode_rer_Slot_inst_encode, 0, 0 +}; + +static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = { + Opcode_wer_Slot_inst_encode, 0, 0 +}; + + +/* Opcode table. */ + +static xtensa_opcode_internal opcodes[] = { + { "excw", ICLASS_xt_iclass_excw, + 0, + Opcode_excw_encode_fns, 0, 0 }, + { "rfe", ICLASS_xt_iclass_rfe, + XTENSA_OPCODE_IS_JUMP, + Opcode_rfe_encode_fns, 0, 0 }, + { "rfde", ICLASS_xt_iclass_rfde, + XTENSA_OPCODE_IS_JUMP, + Opcode_rfde_encode_fns, 0, 0 }, + { "syscall", ICLASS_xt_iclass_syscall, + 0, + Opcode_syscall_encode_fns, 0, 0 }, + { "simcall", ICLASS_xt_iclass_simcall, + 0, + Opcode_simcall_encode_fns, 0, 0 }, + { "add.n", ICLASS_xt_iclass_add_n, + 0, + Opcode_add_n_encode_fns, 0, 0 }, + { "addi.n", ICLASS_xt_iclass_addi_n, + 0, + Opcode_addi_n_encode_fns, 0, 0 }, + { "beqz.n", ICLASS_xt_iclass_bz6, + XTENSA_OPCODE_IS_BRANCH, + Opcode_beqz_n_encode_fns, 0, 0 }, + { "bnez.n", ICLASS_xt_iclass_bz6, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bnez_n_encode_fns, 0, 0 }, + { "ill.n", ICLASS_xt_iclass_ill_n, + 0, + Opcode_ill_n_encode_fns, 0, 0 }, + { "l32i.n", ICLASS_xt_iclass_loadi4, + 0, + Opcode_l32i_n_encode_fns, 0, 0 }, + { "mov.n", ICLASS_xt_iclass_mov_n, + 0, + Opcode_mov_n_encode_fns, 0, 0 }, + { "movi.n", ICLASS_xt_iclass_movi_n, + 0, + Opcode_movi_n_encode_fns, 0, 0 }, + { "nop.n", ICLASS_xt_iclass_nopn, + 0, + Opcode_nop_n_encode_fns, 0, 0 }, + { "ret.n", ICLASS_xt_iclass_retn, + XTENSA_OPCODE_IS_JUMP, + Opcode_ret_n_encode_fns, 0, 0 }, + { "s32i.n", ICLASS_xt_iclass_storei4, + 0, + Opcode_s32i_n_encode_fns, 0, 0 }, + { "addi", ICLASS_xt_iclass_addi, + 0, + Opcode_addi_encode_fns, 0, 0 }, + { "addmi", ICLASS_xt_iclass_addmi, + 0, + Opcode_addmi_encode_fns, 0, 0 }, + { "add", ICLASS_xt_iclass_addsub, + 0, + Opcode_add_encode_fns, 0, 0 }, + { "sub", ICLASS_xt_iclass_addsub, + 0, + Opcode_sub_encode_fns, 0, 0 }, + { "addx2", ICLASS_xt_iclass_addsub, + 0, + Opcode_addx2_encode_fns, 0, 0 }, + { "addx4", ICLASS_xt_iclass_addsub, + 0, + Opcode_addx4_encode_fns, 0, 0 }, + { "addx8", ICLASS_xt_iclass_addsub, + 0, + Opcode_addx8_encode_fns, 0, 0 }, + { "subx2", ICLASS_xt_iclass_addsub, + 0, + Opcode_subx2_encode_fns, 0, 0 }, + { "subx4", ICLASS_xt_iclass_addsub, + 0, + Opcode_subx4_encode_fns, 0, 0 }, + { "subx8", ICLASS_xt_iclass_addsub, + 0, + Opcode_subx8_encode_fns, 0, 0 }, + { "and", ICLASS_xt_iclass_bit, + 0, + Opcode_and_encode_fns, 0, 0 }, + { "or", ICLASS_xt_iclass_bit, + 0, + Opcode_or_encode_fns, 0, 0 }, + { "xor", ICLASS_xt_iclass_bit, + 0, + Opcode_xor_encode_fns, 0, 0 }, + { "beqi", ICLASS_xt_iclass_bsi8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_beqi_encode_fns, 0, 0 }, + { "bnei", ICLASS_xt_iclass_bsi8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bnei_encode_fns, 0, 0 }, + { "bgei", ICLASS_xt_iclass_bsi8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bgei_encode_fns, 0, 0 }, + { "blti", ICLASS_xt_iclass_bsi8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_blti_encode_fns, 0, 0 }, + { "bbci", ICLASS_xt_iclass_bsi8b, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bbci_encode_fns, 0, 0 }, + { "bbsi", ICLASS_xt_iclass_bsi8b, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bbsi_encode_fns, 0, 0 }, + { "bgeui", ICLASS_xt_iclass_bsi8u, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bgeui_encode_fns, 0, 0 }, + { "bltui", ICLASS_xt_iclass_bsi8u, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bltui_encode_fns, 0, 0 }, + { "beq", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_beq_encode_fns, 0, 0 }, + { "bne", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bne_encode_fns, 0, 0 }, + { "bge", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bge_encode_fns, 0, 0 }, + { "blt", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_blt_encode_fns, 0, 0 }, + { "bgeu", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bgeu_encode_fns, 0, 0 }, + { "bltu", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bltu_encode_fns, 0, 0 }, + { "bany", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bany_encode_fns, 0, 0 }, + { "bnone", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bnone_encode_fns, 0, 0 }, + { "ball", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_ball_encode_fns, 0, 0 }, + { "bnall", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bnall_encode_fns, 0, 0 }, + { "bbc", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bbc_encode_fns, 0, 0 }, + { "bbs", ICLASS_xt_iclass_bst8, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bbs_encode_fns, 0, 0 }, + { "beqz", ICLASS_xt_iclass_bsz12, + XTENSA_OPCODE_IS_BRANCH, + Opcode_beqz_encode_fns, 0, 0 }, + { "bnez", ICLASS_xt_iclass_bsz12, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bnez_encode_fns, 0, 0 }, + { "bgez", ICLASS_xt_iclass_bsz12, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bgez_encode_fns, 0, 0 }, + { "bltz", ICLASS_xt_iclass_bsz12, + XTENSA_OPCODE_IS_BRANCH, + Opcode_bltz_encode_fns, 0, 0 }, + { "call0", ICLASS_xt_iclass_call0, + XTENSA_OPCODE_IS_CALL, + Opcode_call0_encode_fns, 0, 0 }, + { "callx0", ICLASS_xt_iclass_callx0, + XTENSA_OPCODE_IS_CALL, + Opcode_callx0_encode_fns, 0, 0 }, + { "extui", ICLASS_xt_iclass_exti, + 0, + Opcode_extui_encode_fns, 0, 0 }, + { "ill", ICLASS_xt_iclass_ill, + 0, + Opcode_ill_encode_fns, 0, 0 }, + { "j", ICLASS_xt_iclass_jump, + XTENSA_OPCODE_IS_JUMP, + Opcode_j_encode_fns, 0, 0 }, + { "jx", ICLASS_xt_iclass_jumpx, + XTENSA_OPCODE_IS_JUMP, + Opcode_jx_encode_fns, 0, 0 }, + { "l16ui", ICLASS_xt_iclass_l16ui, + 0, + Opcode_l16ui_encode_fns, 0, 0 }, + { "l16si", ICLASS_xt_iclass_l16si, + 0, + Opcode_l16si_encode_fns, 0, 0 }, + { "l32i", ICLASS_xt_iclass_l32i, + 0, + Opcode_l32i_encode_fns, 0, 0 }, + { "l32r", ICLASS_xt_iclass_l32r, + 0, + Opcode_l32r_encode_fns, 0, 0 }, + { "l8ui", ICLASS_xt_iclass_l8i, + 0, + Opcode_l8ui_encode_fns, 0, 0 }, + { "movi", ICLASS_xt_iclass_movi, + 0, + Opcode_movi_encode_fns, 0, 0 }, + { "moveqz", ICLASS_xt_iclass_movz, + 0, + Opcode_moveqz_encode_fns, 0, 0 }, + { "movnez", ICLASS_xt_iclass_movz, + 0, + Opcode_movnez_encode_fns, 0, 0 }, + { "movltz", ICLASS_xt_iclass_movz, + 0, + Opcode_movltz_encode_fns, 0, 0 }, + { "movgez", ICLASS_xt_iclass_movz, + 0, + Opcode_movgez_encode_fns, 0, 0 }, + { "neg", ICLASS_xt_iclass_neg, + 0, + Opcode_neg_encode_fns, 0, 0 }, + { "abs", ICLASS_xt_iclass_neg, + 0, + Opcode_abs_encode_fns, 0, 0 }, + { "nop", ICLASS_xt_iclass_nop, + 0, + Opcode_nop_encode_fns, 0, 0 }, + { "ret", ICLASS_xt_iclass_return, + XTENSA_OPCODE_IS_JUMP, + Opcode_ret_encode_fns, 0, 0 }, + { "s16i", ICLASS_xt_iclass_s16i, + 0, + Opcode_s16i_encode_fns, 0, 0 }, + { "s32i", ICLASS_xt_iclass_s32i, + 0, + Opcode_s32i_encode_fns, 0, 0 }, + { "s8i", ICLASS_xt_iclass_s8i, + 0, + Opcode_s8i_encode_fns, 0, 0 }, + { "ssr", ICLASS_xt_iclass_sar, + 0, + Opcode_ssr_encode_fns, 0, 0 }, + { "ssl", ICLASS_xt_iclass_sar, + 0, + Opcode_ssl_encode_fns, 0, 0 }, + { "ssa8l", ICLASS_xt_iclass_sar, + 0, + Opcode_ssa8l_encode_fns, 0, 0 }, + { "ssa8b", ICLASS_xt_iclass_sar, + 0, + Opcode_ssa8b_encode_fns, 0, 0 }, + { "ssai", ICLASS_xt_iclass_sari, + 0, + Opcode_ssai_encode_fns, 0, 0 }, + { "sll", ICLASS_xt_iclass_shifts, + 0, + Opcode_sll_encode_fns, 0, 0 }, + { "src", ICLASS_xt_iclass_shiftst, + 0, + Opcode_src_encode_fns, 0, 0 }, + { "srl", ICLASS_xt_iclass_shiftt, + 0, + Opcode_srl_encode_fns, 0, 0 }, + { "sra", ICLASS_xt_iclass_shiftt, + 0, + Opcode_sra_encode_fns, 0, 0 }, + { "slli", ICLASS_xt_iclass_slli, + 0, + Opcode_slli_encode_fns, 0, 0 }, + { "srai", ICLASS_xt_iclass_srai, + 0, + Opcode_srai_encode_fns, 0, 0 }, + { "srli", ICLASS_xt_iclass_srli, + 0, + Opcode_srli_encode_fns, 0, 0 }, + { "memw", ICLASS_xt_iclass_memw, + 0, + Opcode_memw_encode_fns, 0, 0 }, + { "extw", ICLASS_xt_iclass_extw, + 0, + Opcode_extw_encode_fns, 0, 0 }, + { "isync", ICLASS_xt_iclass_isync, + 0, + Opcode_isync_encode_fns, 0, 0 }, + { "rsync", ICLASS_xt_iclass_sync, + 0, + Opcode_rsync_encode_fns, 0, 0 }, + { "esync", ICLASS_xt_iclass_sync, + 0, + Opcode_esync_encode_fns, 0, 0 }, + { "dsync", ICLASS_xt_iclass_sync, + 0, + Opcode_dsync_encode_fns, 0, 0 }, + { "rsil", ICLASS_xt_iclass_rsil, + 0, + Opcode_rsil_encode_fns, 0, 0 }, + { "rsr.sar", ICLASS_xt_iclass_rsr_sar, + 0, + Opcode_rsr_sar_encode_fns, 0, 0 }, + { "wsr.sar", ICLASS_xt_iclass_wsr_sar, + 0, + Opcode_wsr_sar_encode_fns, 0, 0 }, + { "xsr.sar", ICLASS_xt_iclass_xsr_sar, + 0, + Opcode_xsr_sar_encode_fns, 0, 0 }, + { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase, + 0, + Opcode_rsr_litbase_encode_fns, 0, 0 }, + { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase, + 0, + Opcode_wsr_litbase_encode_fns, 0, 0 }, + { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase, + 0, + Opcode_xsr_litbase_encode_fns, 0, 0 }, + { "rsr.176", ICLASS_xt_iclass_rsr_176, + 0, + Opcode_rsr_176_encode_fns, 0, 0 }, + { "wsr.176", ICLASS_xt_iclass_wsr_176, + 0, + Opcode_wsr_176_encode_fns, 0, 0 }, + { "rsr.208", ICLASS_xt_iclass_rsr_208, + 0, + Opcode_rsr_208_encode_fns, 0, 0 }, + { "rsr.ps", ICLASS_xt_iclass_rsr_ps, + 0, + Opcode_rsr_ps_encode_fns, 0, 0 }, + { "wsr.ps", ICLASS_xt_iclass_wsr_ps, + 0, + Opcode_wsr_ps_encode_fns, 0, 0 }, + { "xsr.ps", ICLASS_xt_iclass_xsr_ps, + 0, + Opcode_xsr_ps_encode_fns, 0, 0 }, + { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1, + 0, + Opcode_rsr_epc1_encode_fns, 0, 0 }, + { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1, + 0, + Opcode_wsr_epc1_encode_fns, 0, 0 }, + { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1, + 0, + Opcode_xsr_epc1_encode_fns, 0, 0 }, + { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1, + 0, + Opcode_rsr_excsave1_encode_fns, 0, 0 }, + { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1, + 0, + Opcode_wsr_excsave1_encode_fns, 0, 0 }, + { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1, + 0, + Opcode_xsr_excsave1_encode_fns, 0, 0 }, + { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2, + 0, + Opcode_rsr_epc2_encode_fns, 0, 0 }, + { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2, + 0, + Opcode_wsr_epc2_encode_fns, 0, 0 }, + { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2, + 0, + Opcode_xsr_epc2_encode_fns, 0, 0 }, + { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2, + 0, + Opcode_rsr_excsave2_encode_fns, 0, 0 }, + { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2, + 0, + Opcode_wsr_excsave2_encode_fns, 0, 0 }, + { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2, + 0, + Opcode_xsr_excsave2_encode_fns, 0, 0 }, + { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3, + 0, + Opcode_rsr_epc3_encode_fns, 0, 0 }, + { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3, + 0, + Opcode_wsr_epc3_encode_fns, 0, 0 }, + { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3, + 0, + Opcode_xsr_epc3_encode_fns, 0, 0 }, + { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3, + 0, + Opcode_rsr_excsave3_encode_fns, 0, 0 }, + { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3, + 0, + Opcode_wsr_excsave3_encode_fns, 0, 0 }, + { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3, + 0, + Opcode_xsr_excsave3_encode_fns, 0, 0 }, + { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2, + 0, + Opcode_rsr_eps2_encode_fns, 0, 0 }, + { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2, + 0, + Opcode_wsr_eps2_encode_fns, 0, 0 }, + { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2, + 0, + Opcode_xsr_eps2_encode_fns, 0, 0 }, + { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3, + 0, + Opcode_rsr_eps3_encode_fns, 0, 0 }, + { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3, + 0, + Opcode_wsr_eps3_encode_fns, 0, 0 }, + { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3, + 0, + Opcode_xsr_eps3_encode_fns, 0, 0 }, + { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr, + 0, + Opcode_rsr_excvaddr_encode_fns, 0, 0 }, + { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr, + 0, + Opcode_wsr_excvaddr_encode_fns, 0, 0 }, + { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr, + 0, + Opcode_xsr_excvaddr_encode_fns, 0, 0 }, + { "rsr.depc", ICLASS_xt_iclass_rsr_depc, + 0, + Opcode_rsr_depc_encode_fns, 0, 0 }, + { "wsr.depc", ICLASS_xt_iclass_wsr_depc, + 0, + Opcode_wsr_depc_encode_fns, 0, 0 }, + { "xsr.depc", ICLASS_xt_iclass_xsr_depc, + 0, + Opcode_xsr_depc_encode_fns, 0, 0 }, + { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause, + 0, + Opcode_rsr_exccause_encode_fns, 0, 0 }, + { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause, + 0, + Opcode_wsr_exccause_encode_fns, 0, 0 }, + { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause, + 0, + Opcode_xsr_exccause_encode_fns, 0, 0 }, + { "rsr.prid", ICLASS_xt_iclass_rsr_prid, + 0, + Opcode_rsr_prid_encode_fns, 0, 0 }, + { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase, + 0, + Opcode_rsr_vecbase_encode_fns, 0, 0 }, + { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase, + 0, + Opcode_wsr_vecbase_encode_fns, 0, 0 }, + { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase, + 0, + Opcode_xsr_vecbase_encode_fns, 0, 0 }, + { "mul16u", ICLASS_xt_mul16, + 0, + Opcode_mul16u_encode_fns, 0, 0 }, + { "mul16s", ICLASS_xt_mul16, + 0, + Opcode_mul16s_encode_fns, 0, 0 }, + { "mull", ICLASS_xt_mul32, + 0, + Opcode_mull_encode_fns, 0, 0 }, + { "rfi", ICLASS_xt_iclass_rfi, + XTENSA_OPCODE_IS_JUMP, + Opcode_rfi_encode_fns, 0, 0 }, + { "waiti", ICLASS_xt_iclass_wait, + 0, + Opcode_waiti_encode_fns, 0, 0 }, + { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt, + 0, + Opcode_rsr_interrupt_encode_fns, 0, 0 }, + { "wsr.intset", ICLASS_xt_iclass_wsr_intset, + 0, + Opcode_wsr_intset_encode_fns, 0, 0 }, + { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear, + 0, + Opcode_wsr_intclear_encode_fns, 0, 0 }, + { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable, + 0, + Opcode_rsr_intenable_encode_fns, 0, 0 }, + { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable, + 0, + Opcode_wsr_intenable_encode_fns, 0, 0 }, + { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable, + 0, + Opcode_xsr_intenable_encode_fns, 0, 0 }, + { "break", ICLASS_xt_iclass_break, + 0, + Opcode_break_encode_fns, 0, 0 }, + { "break.n", ICLASS_xt_iclass_break_n, + 0, + Opcode_break_n_encode_fns, 0, 0 }, + { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0, + 0, + Opcode_rsr_dbreaka0_encode_fns, 0, 0 }, + { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0, + 0, + Opcode_wsr_dbreaka0_encode_fns, 0, 0 }, + { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0, + 0, + Opcode_xsr_dbreaka0_encode_fns, 0, 0 }, + { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0, + 0, + Opcode_rsr_dbreakc0_encode_fns, 0, 0 }, + { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0, + 0, + Opcode_wsr_dbreakc0_encode_fns, 0, 0 }, + { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0, + 0, + Opcode_xsr_dbreakc0_encode_fns, 0, 0 }, + { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0, + 0, + Opcode_rsr_ibreaka0_encode_fns, 0, 0 }, + { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0, + 0, + Opcode_wsr_ibreaka0_encode_fns, 0, 0 }, + { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0, + 0, + Opcode_xsr_ibreaka0_encode_fns, 0, 0 }, + { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable, + 0, + Opcode_rsr_ibreakenable_encode_fns, 0, 0 }, + { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable, + 0, + Opcode_wsr_ibreakenable_encode_fns, 0, 0 }, + { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable, + 0, + Opcode_xsr_ibreakenable_encode_fns, 0, 0 }, + { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause, + 0, + Opcode_rsr_debugcause_encode_fns, 0, 0 }, + { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause, + 0, + Opcode_wsr_debugcause_encode_fns, 0, 0 }, + { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause, + 0, + Opcode_xsr_debugcause_encode_fns, 0, 0 }, + { "rsr.icount", ICLASS_xt_iclass_rsr_icount, + 0, + Opcode_rsr_icount_encode_fns, 0, 0 }, + { "wsr.icount", ICLASS_xt_iclass_wsr_icount, + 0, + Opcode_wsr_icount_encode_fns, 0, 0 }, + { "xsr.icount", ICLASS_xt_iclass_xsr_icount, + 0, + Opcode_xsr_icount_encode_fns, 0, 0 }, + { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel, + 0, + Opcode_rsr_icountlevel_encode_fns, 0, 0 }, + { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel, + 0, + Opcode_wsr_icountlevel_encode_fns, 0, 0 }, + { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel, + 0, + Opcode_xsr_icountlevel_encode_fns, 0, 0 }, + { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr, + 0, + Opcode_rsr_ddr_encode_fns, 0, 0 }, + { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr, + 0, + Opcode_wsr_ddr_encode_fns, 0, 0 }, + { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr, + 0, + Opcode_xsr_ddr_encode_fns, 0, 0 }, + { "rfdo", ICLASS_xt_iclass_rfdo, + XTENSA_OPCODE_IS_JUMP, + Opcode_rfdo_encode_fns, 0, 0 }, + { "rfdd", ICLASS_xt_iclass_rfdd, + XTENSA_OPCODE_IS_JUMP, + Opcode_rfdd_encode_fns, 0, 0 }, + { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid, + 0, + Opcode_wsr_mmid_encode_fns, 0, 0 }, + { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount, + 0, + Opcode_rsr_ccount_encode_fns, 0, 0 }, + { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount, + 0, + Opcode_wsr_ccount_encode_fns, 0, 0 }, + { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount, + 0, + Opcode_xsr_ccount_encode_fns, 0, 0 }, + { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0, + 0, + Opcode_rsr_ccompare0_encode_fns, 0, 0 }, + { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0, + 0, + Opcode_wsr_ccompare0_encode_fns, 0, 0 }, + { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0, + 0, + Opcode_xsr_ccompare0_encode_fns, 0, 0 }, + { "idtlb", ICLASS_xt_iclass_idtlb, + 0, + Opcode_idtlb_encode_fns, 0, 0 }, + { "pdtlb", ICLASS_xt_iclass_rdtlb, + 0, + Opcode_pdtlb_encode_fns, 0, 0 }, + { "rdtlb0", ICLASS_xt_iclass_rdtlb, + 0, + Opcode_rdtlb0_encode_fns, 0, 0 }, + { "rdtlb1", ICLASS_xt_iclass_rdtlb, + 0, + Opcode_rdtlb1_encode_fns, 0, 0 }, + { "wdtlb", ICLASS_xt_iclass_wdtlb, + 0, + Opcode_wdtlb_encode_fns, 0, 0 }, + { "iitlb", ICLASS_xt_iclass_iitlb, + 0, + Opcode_iitlb_encode_fns, 0, 0 }, + { "pitlb", ICLASS_xt_iclass_ritlb, + 0, + Opcode_pitlb_encode_fns, 0, 0 }, + { "ritlb0", ICLASS_xt_iclass_ritlb, + 0, + Opcode_ritlb0_encode_fns, 0, 0 }, + { "ritlb1", ICLASS_xt_iclass_ritlb, + 0, + Opcode_ritlb1_encode_fns, 0, 0 }, + { "witlb", ICLASS_xt_iclass_witlb, + 0, + Opcode_witlb_encode_fns, 0, 0 }, + { "nsa", ICLASS_xt_iclass_nsa, + 0, + Opcode_nsa_encode_fns, 0, 0 }, + { "nsau", ICLASS_xt_iclass_nsa, + 0, + Opcode_nsau_encode_fns, 0, 0 }, + { "rer", ICLASS_xt_iclass_rer, + 0, + Opcode_rer_encode_fns, 0, 0 }, + { "wer", ICLASS_xt_iclass_wer, + 0, + Opcode_wer_encode_fns, 0, 0 } +}; + +enum xtensa_opcode_id { + OPCODE_EXCW, + OPCODE_RFE, + OPCODE_RFDE, + OPCODE_SYSCALL, + OPCODE_SIMCALL, + OPCODE_ADD_N, + OPCODE_ADDI_N, + OPCODE_BEQZ_N, + OPCODE_BNEZ_N, + OPCODE_ILL_N, + OPCODE_L32I_N, + OPCODE_MOV_N, + OPCODE_MOVI_N, + OPCODE_NOP_N, + OPCODE_RET_N, + OPCODE_S32I_N, + OPCODE_ADDI, + OPCODE_ADDMI, + OPCODE_ADD, + OPCODE_SUB, + OPCODE_ADDX2, + OPCODE_ADDX4, + OPCODE_ADDX8, + OPCODE_SUBX2, + OPCODE_SUBX4, + OPCODE_SUBX8, + OPCODE_AND, + OPCODE_OR, + OPCODE_XOR, + OPCODE_BEQI, + OPCODE_BNEI, + OPCODE_BGEI, + OPCODE_BLTI, + OPCODE_BBCI, + OPCODE_BBSI, + OPCODE_BGEUI, + OPCODE_BLTUI, + OPCODE_BEQ, + OPCODE_BNE, + OPCODE_BGE, + OPCODE_BLT, + OPCODE_BGEU, + OPCODE_BLTU, + OPCODE_BANY, + OPCODE_BNONE, + OPCODE_BALL, + OPCODE_BNALL, + OPCODE_BBC, + OPCODE_BBS, + OPCODE_BEQZ, + OPCODE_BNEZ, + OPCODE_BGEZ, + OPCODE_BLTZ, + OPCODE_CALL0, + OPCODE_CALLX0, + OPCODE_EXTUI, + OPCODE_ILL, + OPCODE_J, + OPCODE_JX, + OPCODE_L16UI, + OPCODE_L16SI, + OPCODE_L32I, + OPCODE_L32R, + OPCODE_L8UI, + OPCODE_MOVI, + OPCODE_MOVEQZ, + OPCODE_MOVNEZ, + OPCODE_MOVLTZ, + OPCODE_MOVGEZ, + OPCODE_NEG, + OPCODE_ABS, + OPCODE_NOP, + OPCODE_RET, + OPCODE_S16I, + OPCODE_S32I, + OPCODE_S8I, + OPCODE_SSR, + OPCODE_SSL, + OPCODE_SSA8L, + OPCODE_SSA8B, + OPCODE_SSAI, + OPCODE_SLL, + OPCODE_SRC, + OPCODE_SRL, + OPCODE_SRA, + OPCODE_SLLI, + OPCODE_SRAI, + OPCODE_SRLI, + OPCODE_MEMW, + OPCODE_EXTW, + OPCODE_ISYNC, + OPCODE_RSYNC, + OPCODE_ESYNC, + OPCODE_DSYNC, + OPCODE_RSIL, + OPCODE_RSR_SAR, + OPCODE_WSR_SAR, + OPCODE_XSR_SAR, + OPCODE_RSR_LITBASE, + OPCODE_WSR_LITBASE, + OPCODE_XSR_LITBASE, + OPCODE_RSR_176, + OPCODE_WSR_176, + OPCODE_RSR_208, + OPCODE_RSR_PS, + OPCODE_WSR_PS, + OPCODE_XSR_PS, + OPCODE_RSR_EPC1, + OPCODE_WSR_EPC1, + OPCODE_XSR_EPC1, + OPCODE_RSR_EXCSAVE1, + OPCODE_WSR_EXCSAVE1, + OPCODE_XSR_EXCSAVE1, + OPCODE_RSR_EPC2, + OPCODE_WSR_EPC2, + OPCODE_XSR_EPC2, + OPCODE_RSR_EXCSAVE2, + OPCODE_WSR_EXCSAVE2, + OPCODE_XSR_EXCSAVE2, + OPCODE_RSR_EPC3, + OPCODE_WSR_EPC3, + OPCODE_XSR_EPC3, + OPCODE_RSR_EXCSAVE3, + OPCODE_WSR_EXCSAVE3, + OPCODE_XSR_EXCSAVE3, + OPCODE_RSR_EPS2, + OPCODE_WSR_EPS2, + OPCODE_XSR_EPS2, + OPCODE_RSR_EPS3, + OPCODE_WSR_EPS3, + OPCODE_XSR_EPS3, + OPCODE_RSR_EXCVADDR, + OPCODE_WSR_EXCVADDR, + OPCODE_XSR_EXCVADDR, + OPCODE_RSR_DEPC, + OPCODE_WSR_DEPC, + OPCODE_XSR_DEPC, + OPCODE_RSR_EXCCAUSE, + OPCODE_WSR_EXCCAUSE, + OPCODE_XSR_EXCCAUSE, + OPCODE_RSR_PRID, + OPCODE_RSR_VECBASE, + OPCODE_WSR_VECBASE, + OPCODE_XSR_VECBASE, + OPCODE_MUL16U, + OPCODE_MUL16S, + OPCODE_MULL, + OPCODE_RFI, + OPCODE_WAITI, + OPCODE_RSR_INTERRUPT, + OPCODE_WSR_INTSET, + OPCODE_WSR_INTCLEAR, + OPCODE_RSR_INTENABLE, + OPCODE_WSR_INTENABLE, + OPCODE_XSR_INTENABLE, + OPCODE_BREAK, + OPCODE_BREAK_N, + OPCODE_RSR_DBREAKA0, + OPCODE_WSR_DBREAKA0, + OPCODE_XSR_DBREAKA0, + OPCODE_RSR_DBREAKC0, + OPCODE_WSR_DBREAKC0, + OPCODE_XSR_DBREAKC0, + OPCODE_RSR_IBREAKA0, + OPCODE_WSR_IBREAKA0, + OPCODE_XSR_IBREAKA0, + OPCODE_RSR_IBREAKENABLE, + OPCODE_WSR_IBREAKENABLE, + OPCODE_XSR_IBREAKENABLE, + OPCODE_RSR_DEBUGCAUSE, + OPCODE_WSR_DEBUGCAUSE, + OPCODE_XSR_DEBUGCAUSE, + OPCODE_RSR_ICOUNT, + OPCODE_WSR_ICOUNT, + OPCODE_XSR_ICOUNT, + OPCODE_RSR_ICOUNTLEVEL, + OPCODE_WSR_ICOUNTLEVEL, + OPCODE_XSR_ICOUNTLEVEL, + OPCODE_RSR_DDR, + OPCODE_WSR_DDR, + OPCODE_XSR_DDR, + OPCODE_RFDO, + OPCODE_RFDD, + OPCODE_WSR_MMID, + OPCODE_RSR_CCOUNT, + OPCODE_WSR_CCOUNT, + OPCODE_XSR_CCOUNT, + OPCODE_RSR_CCOMPARE0, + OPCODE_WSR_CCOMPARE0, + OPCODE_XSR_CCOMPARE0, + OPCODE_IDTLB, + OPCODE_PDTLB, + OPCODE_RDTLB0, + OPCODE_RDTLB1, + OPCODE_WDTLB, + OPCODE_IITLB, + OPCODE_PITLB, + OPCODE_RITLB0, + OPCODE_RITLB1, + OPCODE_WITLB, + OPCODE_NSA, + OPCODE_NSAU, + OPCODE_RER, + OPCODE_WER +}; + + +/* Slot-specific opcode decode functions. */ + +static int +Slot_inst_decode (const xtensa_insnbuf insn) +{ + switch (Field_op0_Slot_inst_get (insn)) + { + case 0: + switch (Field_op1_Slot_inst_get (insn)) + { + case 0: + switch (Field_op2_Slot_inst_get (insn)) + { + case 0: + switch (Field_r_Slot_inst_get (insn)) + { + case 0: + switch (Field_m_Slot_inst_get (insn)) + { + case 0: + if (Field_s_Slot_inst_get (insn) == 0 && + Field_n_Slot_inst_get (insn) == 0) + return OPCODE_ILL; + break; + case 2: + switch (Field_n_Slot_inst_get (insn)) + { + case 0: + return OPCODE_RET; + case 2: + return OPCODE_JX; + } + break; + case 3: + if (Field_n_Slot_inst_get (insn) == 0) + return OPCODE_CALLX0; + break; + } + break; + case 2: + if (Field_s_Slot_inst_get (insn) == 0) + { + switch (Field_t_Slot_inst_get (insn)) + { + case 0: + return OPCODE_ISYNC; + case 1: + return OPCODE_RSYNC; + case 2: + return OPCODE_ESYNC; + case 3: + return OPCODE_DSYNC; + case 8: + return OPCODE_EXCW; + case 12: + return OPCODE_MEMW; + case 13: + return OPCODE_EXTW; + case 15: + return OPCODE_NOP; + } + } + break; + case 3: + switch (Field_t_Slot_inst_get (insn)) + { + case 0: + switch (Field_s_Slot_inst_get (insn)) + { + case 0: + return OPCODE_RFE; + case 2: + return OPCODE_RFDE; + } + break; + case 1: + return OPCODE_RFI; + } + break; + case 4: + return OPCODE_BREAK; + case 5: + switch (Field_s_Slot_inst_get (insn)) + { + case 0: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SYSCALL; + break; + case 1: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SIMCALL; + break; + } + break; + case 6: + return OPCODE_RSIL; + case 7: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_WAITI; + break; + } + break; + case 1: + return OPCODE_AND; + case 2: + return OPCODE_OR; + case 3: + return OPCODE_XOR; + case 4: + switch (Field_r_Slot_inst_get (insn)) + { + case 0: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SSR; + break; + case 1: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SSL; + break; + case 2: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SSA8L; + break; + case 3: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SSA8B; + break; + case 4: + if (Field_thi3_Slot_inst_get (insn) == 0) + return OPCODE_SSAI; + break; + case 6: + return OPCODE_RER; + case 7: + return OPCODE_WER; + case 14: + return OPCODE_NSA; + case 15: + return OPCODE_NSAU; + } + break; + case 5: + switch (Field_r_Slot_inst_get (insn)) + { + case 3: + return OPCODE_RITLB0; + case 4: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_IITLB; + break; + case 5: + return OPCODE_PITLB; + case 6: + return OPCODE_WITLB; + case 7: + return OPCODE_RITLB1; + case 11: + return OPCODE_RDTLB0; + case 12: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_IDTLB; + break; + case 13: + return OPCODE_PDTLB; + case 14: + return OPCODE_WDTLB; + case 15: + return OPCODE_RDTLB1; + } + break; + case 6: + switch (Field_s_Slot_inst_get (insn)) + { + case 0: + return OPCODE_NEG; + case 1: + return OPCODE_ABS; + } + break; + case 8: + return OPCODE_ADD; + case 9: + return OPCODE_ADDX2; + case 10: + return OPCODE_ADDX4; + case 11: + return OPCODE_ADDX8; + case 12: + return OPCODE_SUB; + case 13: + return OPCODE_SUBX2; + case 14: + return OPCODE_SUBX4; + case 15: + return OPCODE_SUBX8; + } + break; + case 1: + switch (Field_op2_Slot_inst_get (insn)) + { + case 0: + case 1: + return OPCODE_SLLI; + case 2: + case 3: + return OPCODE_SRAI; + case 4: + return OPCODE_SRLI; + case 6: + switch (Field_sr_Slot_inst_get (insn)) + { + case 3: + return OPCODE_XSR_SAR; + case 5: + return OPCODE_XSR_LITBASE; + case 96: + return OPCODE_XSR_IBREAKENABLE; + case 104: + return OPCODE_XSR_DDR; + case 128: + return OPCODE_XSR_IBREAKA0; + case 144: + return OPCODE_XSR_DBREAKA0; + case 160: + return OPCODE_XSR_DBREAKC0; + case 177: + return OPCODE_XSR_EPC1; + case 178: + return OPCODE_XSR_EPC2; + case 179: + return OPCODE_XSR_EPC3; + case 192: + return OPCODE_XSR_DEPC; + case 194: + return OPCODE_XSR_EPS2; + case 195: + return OPCODE_XSR_EPS3; + case 209: + return OPCODE_XSR_EXCSAVE1; + case 210: + return OPCODE_XSR_EXCSAVE2; + case 211: + return OPCODE_XSR_EXCSAVE3; + case 228: + return OPCODE_XSR_INTENABLE; + case 230: + return OPCODE_XSR_PS; + case 231: + return OPCODE_XSR_VECBASE; + case 232: + return OPCODE_XSR_EXCCAUSE; + case 233: + return OPCODE_XSR_DEBUGCAUSE; + case 234: + return OPCODE_XSR_CCOUNT; + case 236: + return OPCODE_XSR_ICOUNT; + case 237: + return OPCODE_XSR_ICOUNTLEVEL; + case 238: + return OPCODE_XSR_EXCVADDR; + case 240: + return OPCODE_XSR_CCOMPARE0; + } + break; + case 8: + return OPCODE_SRC; + case 9: + if (Field_s_Slot_inst_get (insn) == 0) + return OPCODE_SRL; + break; + case 10: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_SLL; + break; + case 11: + if (Field_s_Slot_inst_get (insn) == 0) + return OPCODE_SRA; + break; + case 12: + return OPCODE_MUL16U; + case 13: + return OPCODE_MUL16S; + case 15: + switch (Field_r_Slot_inst_get (insn)) + { + case 14: + if (Field_t_Slot_inst_get (insn) == 0) + return OPCODE_RFDO; + if (Field_t_Slot_inst_get (insn) == 1) + return OPCODE_RFDD; + break; + } + break; + } + break; + case 2: + if (Field_op2_Slot_inst_get (insn) == 8) + return OPCODE_MULL; + break; + case 3: + switch (Field_op2_Slot_inst_get (insn)) + { + case 0: + switch (Field_sr_Slot_inst_get (insn)) + { + case 3: + return OPCODE_RSR_SAR; + case 5: + return OPCODE_RSR_LITBASE; + case 96: + return OPCODE_RSR_IBREAKENABLE; + case 104: + return OPCODE_RSR_DDR; + case 128: + return OPCODE_RSR_IBREAKA0; + case 144: + return OPCODE_RSR_DBREAKA0; + case 160: + return OPCODE_RSR_DBREAKC0; + case 176: + return OPCODE_RSR_176; + case 177: + return OPCODE_RSR_EPC1; + case 178: + return OPCODE_RSR_EPC2; + case 179: + return OPCODE_RSR_EPC3; + case 192: + return OPCODE_RSR_DEPC; + case 194: + return OPCODE_RSR_EPS2; + case 195: + return OPCODE_RSR_EPS3; + case 208: + return OPCODE_RSR_208; + case 209: + return OPCODE_RSR_EXCSAVE1; + case 210: + return OPCODE_RSR_EXCSAVE2; + case 211: + return OPCODE_RSR_EXCSAVE3; + case 226: + return OPCODE_RSR_INTERRUPT; + case 228: + return OPCODE_RSR_INTENABLE; + case 230: + return OPCODE_RSR_PS; + case 231: + return OPCODE_RSR_VECBASE; + case 232: + return OPCODE_RSR_EXCCAUSE; + case 233: + return OPCODE_RSR_DEBUGCAUSE; + case 234: + return OPCODE_RSR_CCOUNT; + case 235: + return OPCODE_RSR_PRID; + case 236: + return OPCODE_RSR_ICOUNT; + case 237: + return OPCODE_RSR_ICOUNTLEVEL; + case 238: + return OPCODE_RSR_EXCVADDR; + case 240: + return OPCODE_RSR_CCOMPARE0; + } + break; + case 1: + switch (Field_sr_Slot_inst_get (insn)) + { + case 3: + return OPCODE_WSR_SAR; + case 5: + return OPCODE_WSR_LITBASE; + case 89: + return OPCODE_WSR_MMID; + case 96: + return OPCODE_WSR_IBREAKENABLE; + case 104: + return OPCODE_WSR_DDR; + case 128: + return OPCODE_WSR_IBREAKA0; + case 144: + return OPCODE_WSR_DBREAKA0; + case 160: + return OPCODE_WSR_DBREAKC0; + case 176: + return OPCODE_WSR_176; + case 177: + return OPCODE_WSR_EPC1; + case 178: + return OPCODE_WSR_EPC2; + case 179: + return OPCODE_WSR_EPC3; + case 192: + return OPCODE_WSR_DEPC; + case 194: + return OPCODE_WSR_EPS2; + case 195: + return OPCODE_WSR_EPS3; + case 209: + return OPCODE_WSR_EXCSAVE1; + case 210: + return OPCODE_WSR_EXCSAVE2; + case 211: + return OPCODE_WSR_EXCSAVE3; + case 226: + return OPCODE_WSR_INTSET; + case 227: + return OPCODE_WSR_INTCLEAR; + case 228: + return OPCODE_WSR_INTENABLE; + case 230: + return OPCODE_WSR_PS; + case 231: + return OPCODE_WSR_VECBASE; + case 232: + return OPCODE_WSR_EXCCAUSE; + case 233: + return OPCODE_WSR_DEBUGCAUSE; + case 234: + return OPCODE_WSR_CCOUNT; + case 236: + return OPCODE_WSR_ICOUNT; + case 237: + return OPCODE_WSR_ICOUNTLEVEL; + case 238: + return OPCODE_WSR_EXCVADDR; + case 240: + return OPCODE_WSR_CCOMPARE0; + } + break; + case 8: + return OPCODE_MOVEQZ; + case 9: + return OPCODE_MOVNEZ; + case 10: + return OPCODE_MOVLTZ; + case 11: + return OPCODE_MOVGEZ; + } + break; + case 4: + case 5: + return OPCODE_EXTUI; + } + break; + case 1: + return OPCODE_L32R; + case 2: + switch (Field_r_Slot_inst_get (insn)) + { + case 0: + return OPCODE_L8UI; + case 1: + return OPCODE_L16UI; + case 2: + return OPCODE_L32I; + case 4: + return OPCODE_S8I; + case 5: + return OPCODE_S16I; + case 6: + return OPCODE_S32I; + case 9: + return OPCODE_L16SI; + case 10: + return OPCODE_MOVI; + case 12: + return OPCODE_ADDI; + case 13: + return OPCODE_ADDMI; + } + break; + case 5: + if (Field_n_Slot_inst_get (insn) == 0) + return OPCODE_CALL0; + break; + case 6: + switch (Field_n_Slot_inst_get (insn)) + { + case 0: + return OPCODE_J; + case 1: + switch (Field_m_Slot_inst_get (insn)) + { + case 0: + return OPCODE_BEQZ; + case 1: + return OPCODE_BNEZ; + case 2: + return OPCODE_BLTZ; + case 3: + return OPCODE_BGEZ; + } + break; + case 2: + switch (Field_m_Slot_inst_get (insn)) + { + case 0: + return OPCODE_BEQI; + case 1: + return OPCODE_BNEI; + case 2: + return OPCODE_BLTI; + case 3: + return OPCODE_BGEI; + } + break; + case 3: + switch (Field_m_Slot_inst_get (insn)) + { + case 2: + return OPCODE_BLTUI; + case 3: + return OPCODE_BGEUI; + } + break; + } + break; + case 7: + switch (Field_r_Slot_inst_get (insn)) + { + case 0: + return OPCODE_BNONE; + case 1: + return OPCODE_BEQ; + case 2: + return OPCODE_BLT; + case 3: + return OPCODE_BLTU; + case 4: + return OPCODE_BALL; + case 5: + return OPCODE_BBC; + case 6: + case 7: + return OPCODE_BBCI; + case 8: + return OPCODE_BANY; + case 9: + return OPCODE_BNE; + case 10: + return OPCODE_BGE; + case 11: + return OPCODE_BGEU; + case 12: + return OPCODE_BNALL; + case 13: + return OPCODE_BBS; + case 14: + case 15: + return OPCODE_BBSI; + } + break; + } + return XTENSA_UNDEFINED; +} + +static int +Slot_inst16a_decode (const xtensa_insnbuf insn) +{ + switch (Field_op0_Slot_inst16a_get (insn)) + { + case 8: + return OPCODE_L32I_N; + case 9: + return OPCODE_S32I_N; + case 10: + return OPCODE_ADD_N; + case 11: + return OPCODE_ADDI_N; + } + return XTENSA_UNDEFINED; +} + +static int +Slot_inst16b_decode (const xtensa_insnbuf insn) +{ + switch (Field_op0_Slot_inst16b_get (insn)) + { + case 12: + switch (Field_i_Slot_inst16b_get (insn)) + { + case 0: + return OPCODE_MOVI_N; + case 1: + switch (Field_z_Slot_inst16b_get (insn)) + { + case 0: + return OPCODE_BEQZ_N; + case 1: + return OPCODE_BNEZ_N; + } + break; + } + break; + case 13: + switch (Field_r_Slot_inst16b_get (insn)) + { + case 0: + return OPCODE_MOV_N; + case 15: + switch (Field_t_Slot_inst16b_get (insn)) + { + case 0: + return OPCODE_RET_N; + case 2: + return OPCODE_BREAK_N; + case 3: + if (Field_s_Slot_inst16b_get (insn) == 0) + return OPCODE_NOP_N; + break; + case 6: + if (Field_s_Slot_inst16b_get (insn) == 0) + return OPCODE_ILL_N; + break; + } + break; + } + break; + } + return XTENSA_UNDEFINED; +} + + +/* Instruction slots. */ + +static void +Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn, + xtensa_insnbuf slotbuf) +{ + slotbuf[0] = (insn[0] & 0xffffff); +} + +static void +Slot_x24_Format_inst_0_set (xtensa_insnbuf insn, + const xtensa_insnbuf slotbuf) +{ + insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff); +} + +static void +Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn, + xtensa_insnbuf slotbuf) +{ + slotbuf[0] = (insn[0] & 0xffff); +} + +static void +Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn, + const xtensa_insnbuf slotbuf) +{ + insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff); +} + +static void +Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn, + xtensa_insnbuf slotbuf) +{ + slotbuf[0] = (insn[0] & 0xffff); +} + +static void +Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn, + const xtensa_insnbuf slotbuf) +{ + insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff); +} + +static xtensa_get_field_fn +Slot_inst_get_field_fns[] = { + Field_t_Slot_inst_get, + Field_bbi4_Slot_inst_get, + Field_bbi_Slot_inst_get, + Field_imm12_Slot_inst_get, + Field_imm8_Slot_inst_get, + Field_s_Slot_inst_get, + Field_imm12b_Slot_inst_get, + Field_imm16_Slot_inst_get, + Field_m_Slot_inst_get, + Field_n_Slot_inst_get, + Field_offset_Slot_inst_get, + Field_op0_Slot_inst_get, + Field_op1_Slot_inst_get, + Field_op2_Slot_inst_get, + Field_r_Slot_inst_get, + Field_sa4_Slot_inst_get, + Field_sae4_Slot_inst_get, + Field_sae_Slot_inst_get, + Field_sal_Slot_inst_get, + Field_sargt_Slot_inst_get, + Field_sas4_Slot_inst_get, + Field_sas_Slot_inst_get, + Field_sr_Slot_inst_get, + Field_st_Slot_inst_get, + Field_thi3_Slot_inst_get, + Field_imm4_Slot_inst_get, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_xt_wbr15_imm_Slot_inst_get, + Field_xt_wbr18_imm_Slot_inst_get, + Implicit_Field_ar0_get +}; + +static xtensa_set_field_fn +Slot_inst_set_field_fns[] = { + Field_t_Slot_inst_set, + Field_bbi4_Slot_inst_set, + Field_bbi_Slot_inst_set, + Field_imm12_Slot_inst_set, + Field_imm8_Slot_inst_set, + Field_s_Slot_inst_set, + Field_imm12b_Slot_inst_set, + Field_imm16_Slot_inst_set, + Field_m_Slot_inst_set, + Field_n_Slot_inst_set, + Field_offset_Slot_inst_set, + Field_op0_Slot_inst_set, + Field_op1_Slot_inst_set, + Field_op2_Slot_inst_set, + Field_r_Slot_inst_set, + Field_sa4_Slot_inst_set, + Field_sae4_Slot_inst_set, + Field_sae_Slot_inst_set, + Field_sal_Slot_inst_set, + Field_sargt_Slot_inst_set, + Field_sas4_Slot_inst_set, + Field_sas_Slot_inst_set, + Field_sr_Slot_inst_set, + Field_st_Slot_inst_set, + Field_thi3_Slot_inst_set, + Field_imm4_Slot_inst_set, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_xt_wbr15_imm_Slot_inst_set, + Field_xt_wbr18_imm_Slot_inst_set, + Implicit_Field_set +}; + +static xtensa_get_field_fn +Slot_inst16a_get_field_fns[] = { + Field_t_Slot_inst16a_get, + 0, + 0, + 0, + 0, + Field_s_Slot_inst16a_get, + 0, + 0, + 0, + 0, + 0, + Field_op0_Slot_inst16a_get, + 0, + 0, + Field_r_Slot_inst16a_get, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_sr_Slot_inst16a_get, + Field_st_Slot_inst16a_get, + 0, + Field_imm4_Slot_inst16a_get, + Field_i_Slot_inst16a_get, + Field_imm6lo_Slot_inst16a_get, + Field_imm6hi_Slot_inst16a_get, + Field_imm7lo_Slot_inst16a_get, + Field_imm7hi_Slot_inst16a_get, + Field_z_Slot_inst16a_get, + Field_imm6_Slot_inst16a_get, + Field_imm7_Slot_inst16a_get, + 0, + 0, + Implicit_Field_ar0_get +}; + +static xtensa_set_field_fn +Slot_inst16a_set_field_fns[] = { + Field_t_Slot_inst16a_set, + 0, + 0, + 0, + 0, + Field_s_Slot_inst16a_set, + 0, + 0, + 0, + 0, + 0, + Field_op0_Slot_inst16a_set, + 0, + 0, + Field_r_Slot_inst16a_set, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_sr_Slot_inst16a_set, + Field_st_Slot_inst16a_set, + 0, + Field_imm4_Slot_inst16a_set, + Field_i_Slot_inst16a_set, + Field_imm6lo_Slot_inst16a_set, + Field_imm6hi_Slot_inst16a_set, + Field_imm7lo_Slot_inst16a_set, + Field_imm7hi_Slot_inst16a_set, + Field_z_Slot_inst16a_set, + Field_imm6_Slot_inst16a_set, + Field_imm7_Slot_inst16a_set, + 0, + 0, + Implicit_Field_set +}; + +static xtensa_get_field_fn +Slot_inst16b_get_field_fns[] = { + Field_t_Slot_inst16b_get, + 0, + 0, + 0, + 0, + Field_s_Slot_inst16b_get, + 0, + 0, + 0, + 0, + 0, + Field_op0_Slot_inst16b_get, + 0, + 0, + Field_r_Slot_inst16b_get, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_sr_Slot_inst16b_get, + Field_st_Slot_inst16b_get, + 0, + Field_imm4_Slot_inst16b_get, + Field_i_Slot_inst16b_get, + Field_imm6lo_Slot_inst16b_get, + Field_imm6hi_Slot_inst16b_get, + Field_imm7lo_Slot_inst16b_get, + Field_imm7hi_Slot_inst16b_get, + Field_z_Slot_inst16b_get, + Field_imm6_Slot_inst16b_get, + Field_imm7_Slot_inst16b_get, + 0, + 0, + Implicit_Field_ar0_get +}; + +static xtensa_set_field_fn +Slot_inst16b_set_field_fns[] = { + Field_t_Slot_inst16b_set, + 0, + 0, + 0, + 0, + Field_s_Slot_inst16b_set, + 0, + 0, + 0, + 0, + 0, + Field_op0_Slot_inst16b_set, + 0, + 0, + Field_r_Slot_inst16b_set, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + Field_sr_Slot_inst16b_set, + Field_st_Slot_inst16b_set, + 0, + Field_imm4_Slot_inst16b_set, + Field_i_Slot_inst16b_set, + Field_imm6lo_Slot_inst16b_set, + Field_imm6hi_Slot_inst16b_set, + Field_imm7lo_Slot_inst16b_set, + Field_imm7hi_Slot_inst16b_set, + Field_z_Slot_inst16b_set, + Field_imm6_Slot_inst16b_set, + Field_imm7_Slot_inst16b_set, + 0, + 0, + Implicit_Field_set +}; + +static xtensa_slot_internal slots[] = { + { "Inst", "x24", 0, + Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set, + Slot_inst_get_field_fns, Slot_inst_set_field_fns, + Slot_inst_decode, "nop" }, + { "Inst16a", "x16a", 0, + Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set, + Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns, + Slot_inst16a_decode, "" }, + { "Inst16b", "x16b", 0, + Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set, + Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns, + Slot_inst16b_decode, "nop.n" } +}; + + +/* Instruction formats. */ + +static void +Format_x24_encode (xtensa_insnbuf insn) +{ + insn[0] = 0; +} + +static void +Format_x16a_encode (xtensa_insnbuf insn) +{ + insn[0] = 0x8; +} + +static void +Format_x16b_encode (xtensa_insnbuf insn) +{ + insn[0] = 0xc; +} + +static int Format_x24_slots[] = { 0 }; + +static int Format_x16a_slots[] = { 1 }; + +static int Format_x16b_slots[] = { 2 }; + +static xtensa_format_internal formats[] = { + { "x24", 3, Format_x24_encode, 1, Format_x24_slots }, + { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots }, + { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots } +}; + + +static int +format_decoder (const xtensa_insnbuf insn) +{ + if ((insn[0] & 0x8) == 0) + return 0; /* x24 */ + if ((insn[0] & 0xc) == 0x8) + return 1; /* x16a */ + if ((insn[0] & 0xe) == 0xc) + return 2; /* x16b */ + return -1; +} + +static int length_table[16] = { + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + -1, + -1 +}; + +static int +length_decoder (const unsigned char *insn) +{ + int op0 = insn[0] & 0xf; + return length_table[op0]; +} + + +/* Top-level ISA structure. */ + +static xtensa_isa_internal xtensa_modules = { + 0 /* little-endian */, + 3 /* insn_size */, 0, + 3, formats, format_decoder, length_decoder, + 3, slots, + 37 /* num_fields */, + 65, operands, + 159, iclasses, + 204, opcodes, 0, + 1, regfiles, + NUM_STATES, states, 0, + NUM_SYSREGS, sysregs, 0, + { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 }, + 0, interfaces, 0, + 0, funcUnits, 0 +}; diff --git a/target/xtensa/core-test_mmuhifi_c3.c b/target/xtensa/core-test_mmuhifi_c3.c index 123c630b0d..c0e5d32d1e 100644 --- a/target/xtensa/core-test_mmuhifi_c3.c +++ b/target/xtensa/core-test_mmuhifi_c3.c @@ -28,7 +28,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" -#include "qemu-common.h" #include "qemu/host-utils.h" #include "core-test_mmuhifi_c3/core-isa.h" diff --git a/target/xtensa/cores.list b/target/xtensa/cores.list new file mode 100644 index 0000000000..a526a71cfd --- /dev/null +++ b/target/xtensa/cores.list @@ -0,0 +1,10 @@ +core-dc232b.c +core-dc233c.c +core-de212.c +core-de233_fpu.c +core-dsp3400.c +core-fsf.c +core-lx106.c +core-sample_controller.c +core-test_kc705_be.c +core-test_mmuhifi_c3.c diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h index 4fde21b941..b53e9a3e08 100644 --- a/target/xtensa/cpu-param.h +++ b/target/xtensa/cpu-param.h @@ -6,7 +6,7 @@ */ #ifndef XTENSA_CPU_PARAM_H -#define XTENSA_CPU_PARAM_H 1 +#define XTENSA_CPU_PARAM_H #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h index 41d9859673..4fc35ee49b 100644 --- a/target/xtensa/cpu-qom.h +++ b/target/xtensa/cpu-qom.h @@ -34,8 +34,7 @@ #define TYPE_XTENSA_CPU "xtensa-cpu" -OBJECT_DECLARE_TYPE(XtensaCPU, XtensaCPUClass, - XTENSA_CPU) +OBJECT_DECLARE_CPU_TYPE(XtensaCPU, XtensaCPUClass, XTENSA_CPU) typedef struct XtensaConfig XtensaConfig; diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index 58ec3a0862..09923301c4 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -34,6 +34,7 @@ #include "fpu/softfloat.h" #include "qemu/module.h" #include "migration/vmstate.h" +#include "hw/qdev-clock.h" static void xtensa_cpu_set_pc(CPUState *cs, vaddr value) @@ -43,6 +44,22 @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value) cpu->env.pc = value; } +static vaddr xtensa_cpu_get_pc(CPUState *cs) +{ + XtensaCPU *cpu = XTENSA_CPU(cs); + + return cpu->env.pc; +} + +static void xtensa_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + XtensaCPU *cpu = XTENSA_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool xtensa_cpu_has_work(CPUState *cs) { #ifndef CONFIG_USER_ONLY @@ -172,9 +189,23 @@ static void xtensa_cpu_initfn(Object *obj) memory_region_init_io(env->system_er, obj, NULL, env, "er", UINT64_C(0x100000000)); address_space_init(env->address_space_er, env->system_er, "ER"); + + cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu, 0); + clock_set_hz(cpu->clock, env->config->clock_freq_khz * 1000); #endif } +XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk) +{ + DeviceState *cpu; + + cpu = DEVICE(object_new(cpu_type)); + qdev_connect_clock_in(cpu, "clk-in", cpu_refclk); + qdev_realize(cpu, NULL, &error_abort); + + return XTENSA_CPU(cpu); +} + #ifndef CONFIG_USER_ONLY static const VMStateDescription vmstate_xtensa_cpu = { .name = "cpu", @@ -192,11 +223,12 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = { static const struct TCGCPUOps xtensa_tcg_ops = { .initialize = xtensa_translate_init, - .cpu_exec_interrupt = xtensa_cpu_exec_interrupt, - .tlb_fill = xtensa_cpu_tlb_fill, .debug_excp_handler = xtensa_breakpoint_handler, + .restore_state_to_opc = xtensa_restore_state_to_opc, #ifndef CONFIG_USER_ONLY + .tlb_fill = xtensa_cpu_tlb_fill, + .cpu_exec_interrupt = xtensa_cpu_exec_interrupt, .do_interrupt = xtensa_cpu_do_interrupt, .do_transaction_failed = xtensa_cpu_do_transaction_failed, .do_unaligned_access = xtensa_cpu_do_unaligned_access, @@ -218,6 +250,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) cc->has_work = xtensa_cpu_has_work; cc->dump_state = xtensa_cpu_dump_state; cc->set_pc = xtensa_cpu_set_pc; + cc->get_pc = xtensa_cpu_get_pc; cc->gdb_read_register = xtensa_cpu_gdb_read_register; cc->gdb_write_register = xtensa_cpu_gdb_write_register; cc->gdb_stop_before_watchpoint = true; diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 2345cb59c7..579adcb769 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -29,7 +29,9 @@ #define XTENSA_CPU_H #include "cpu-qom.h" +#include "qemu/cpu-float.h" #include "exec/cpu-defs.h" +#include "hw/clock.h" #include "xtensa-isa.h" /* Xtensa processors have a weak memory model */ @@ -306,7 +308,7 @@ typedef enum { INTTYPE_MAX } interrupt_type; -struct CPUXtensaState; +typedef struct CPUArchState CPUXtensaState; typedef struct xtensa_tlb_entry { uint32_t vaddr; @@ -344,7 +346,7 @@ typedef struct XtensaGdbRegmap { } XtensaGdbRegmap; typedef struct XtensaCcompareTimer { - struct CPUXtensaState *env; + CPUXtensaState *env; QEMUTimer *timer; } XtensaCcompareTimer; @@ -494,7 +496,7 @@ typedef struct XtensaConfigList { struct XtensaConfigList *next; } XtensaConfigList; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN enum { FP_F32_HIGH, FP_F32_LOW, @@ -506,7 +508,7 @@ enum { }; #endif -typedef struct CPUXtensaState { +struct CPUArchState { const XtensaConfig *config; uint32_t regs[16]; uint32_t pc; @@ -545,7 +547,7 @@ typedef struct CPUXtensaState { /* Watchpoints for DBREAK registers */ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK]; -} CPUXtensaState; +}; /** * XtensaCPU: @@ -553,16 +555,18 @@ typedef struct CPUXtensaState { * * An Xtensa CPU. */ -struct XtensaCPU { +struct ArchCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ + Clock *clock; CPUNegativeOffsetState neg; CPUXtensaState env; }; +#ifndef CONFIG_USER_ONLY bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -572,24 +576,24 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); +#endif void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void xtensa_count_regs(const XtensaConfig *config, unsigned *n_regs, unsigned *n_core_regs); int xtensa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); +G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); -#define cpu_signal_handler cpu_xtensa_signal_handler #define cpu_list xtensa_cpu_list #define XTENSA_CPU_TYPE_SUFFIX "-" TYPE_XTENSA_CPU #define XTENSA_CPU_TYPE_NAME(model) model XTENSA_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_XTENSA_CPU -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN #define XTENSA_DEFAULT_CPU_MODEL "fsf" #define XTENSA_DEFAULT_CPU_NOMMU_MODEL "fsf" #else @@ -611,7 +615,6 @@ void check_interrupts(CPUXtensaState *s); void xtensa_irq_init(CPUXtensaState *env); qemu_irq *xtensa_get_extints(CPUXtensaState *env); qemu_irq xtensa_get_runstall(CPUXtensaState *env); -int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc); void xtensa_cpu_list(void); void xtensa_sync_window_from_phys(CPUXtensaState *env); void xtensa_sync_phys_from_window(CPUXtensaState *env); @@ -722,9 +725,6 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch) #define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000 #define XTENSA_CSBASE_LBEG_OFF_SHIFT 16 -typedef CPUXtensaState CPUArchState; -typedef XtensaCPU ArchCPU; - #include "exec/cpu-all.h" static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, @@ -795,4 +795,7 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, } } +XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, + Clock *cpu_refclk); + #endif diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c index be1f81107b..ce2a820c60 100644 --- a/target/xtensa/dbg_helper.c +++ b/target/xtensa/dbg_helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index 10e75ab070..d4823a65cd 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" @@ -255,11 +256,6 @@ void xtensa_cpu_do_interrupt(CPUState *cs) } check_interrupts(env); } -#else -void xtensa_cpu_do_interrupt(CPUState *cs) -{ -} -#endif bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { @@ -270,3 +266,5 @@ bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } return false; } + +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c index ba3c29d19d..d2a10cc797 100644 --- a/target/xtensa/fpu_helper.c +++ b/target/xtensa/fpu_helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index f18ab383fd..2aa9777a8e 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/gdbstub.h" @@ -242,27 +243,7 @@ void xtensa_cpu_list(void) } } -#ifdef CONFIG_USER_ONLY - -bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; - - qemu_log_mask(CPU_LOG_INT, - "%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n", - __func__, access_type, address, size); - env->sregs[EXCVADDR] = address; - env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ? - STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE); - cs->exception_index = EXC_USER; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - +#ifndef CONFIG_USER_ONLY void xtensa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) @@ -272,7 +253,7 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs, assert(xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION)); - cpu_restore_state(CPU(cpu), retaddr, true); + cpu_restore_state(CPU(cpu), retaddr); HELPER(exception_cause_vaddr)(env, env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); @@ -303,7 +284,7 @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else if (probe) { return false; } else { - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); HELPER(exception_cause_vaddr)(env, env->pc, ret, address); } } @@ -316,7 +297,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(cs, retaddr); HELPER(exception_cause_vaddr)(env, env->pc, access_type == MMU_INST_FETCH ? INSTR_PIF_ADDR_ERROR_CAUSE : diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh index 396b264be9..b4c15556c2 100755 --- a/target/xtensa/import_core.sh +++ b/target/xtensa/import_core.sh @@ -42,7 +42,6 @@ cat < "${TARGET}.c" #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" -#include "qemu-common.h" #include "qemu/host-utils.h" #include "core-$NAME/core-isa.h" @@ -66,3 +65,6 @@ static XtensaConfig $NAME __attribute__((unused)) = { REGISTER_CORE($NAME) EOF + +grep -qxf core-${NAME}.c "$BASE"/cores.list || \ + echo core-${NAME}.c >> "$BASE"/cores.list diff --git a/target/xtensa/meson.build b/target/xtensa/meson.build index 7c4efa6c62..20bbf9b335 100644 --- a/target/xtensa/meson.build +++ b/target/xtensa/meson.build @@ -1,7 +1,7 @@ xtensa_ss = ss.source_set() -xtensa_cores = run_command('sh', '-c', 'cd $MESON_SOURCE_ROOT/$MESON_SUBDIR ; ls -1 core-*.c') -xtensa_ss.add(files(xtensa_cores.stdout().strip().split('\n'))) +xtensa_cores = fs.read('cores.list') +xtensa_ss.add(files(xtensa_cores.strip().split('\n'))) xtensa_ss.add(files( 'cpu.c', diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c index b01ff9399a..fa66e8e867 100644 --- a/target/xtensa/mmu_helper.c +++ b/target/xtensa/mmu_helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/qemu-print.h" #include "qemu/units.h" @@ -1098,7 +1099,7 @@ static void dump_tlb(CPUXtensaState *env, bool dtlb) qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" "\t---------- ---------- ---- ---- --- -------\n"); } - qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", + qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n", entry->vaddr, entry->paddr, entry->asid, diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c index d85d3516d6..1af7becc54 100644 --- a/target/xtensa/op_helper.c +++ b/target/xtensa/op_helper.c @@ -38,12 +38,12 @@ void HELPER(update_ccount)(CPUXtensaState *env) { + XtensaCPU *cpu = XTENSA_CPU(env_cpu(env)); uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); env->ccount_time = now; env->sregs[CCOUNT] = env->ccount_base + - (uint32_t)((now - env->time_base) * - env->config->clock_freq_khz / 1000000); + (uint32_t)clock_ns_to_ticks(cpu->clock, now - env->time_base); } void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v) @@ -59,6 +59,7 @@ void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v) void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) { + XtensaCPU *cpu = XTENSA_CPU(env_cpu(env)); uint64_t dcc; qatomic_and(&env->sregs[INTSET], @@ -66,7 +67,7 @@ void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) HELPER(update_ccount)(env); dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1; timer_mod(env->ccompare[i].timer, - env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz); + env->ccount_time + clock_ticks_to_ns(cpu->clock, dcc)); env->yield_needed = 1; } diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h index 78720734fe..701c00eed2 100644 --- a/target/xtensa/overlay_tool.h +++ b/target/xtensa/overlay_tool.h @@ -449,7 +449,7 @@ #endif -#if (defined(TARGET_WORDS_BIGENDIAN) != 0) == (XCHAL_HAVE_BE != 0) +#if TARGET_BIG_ENDIAN == (XCHAL_HAVE_BE != 0) #define REGISTER_CORE(core) \ static void __attribute__((constructor)) register_core(void) \ { \ diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 20399d6a04..77bcd71030 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -306,32 +306,25 @@ static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { - TCGv_i32 tmp = tcg_const_i32(32); if (!dc->sar_m32_allocated) { dc->sar_m32 = tcg_temp_local_new_i32(); dc->sar_m32_allocated = true; } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); - tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); + tcg_gen_sub_i32(cpu_SR[SAR], tcg_constant_i32(32), dc->sar_m32); dc->sar_5bit = false; dc->sar_m32_5bit = true; - tcg_temp_free(tmp); } static void gen_exception(DisasContext *dc, int excp) { - TCGv_i32 tmp = tcg_const_i32(excp); - gen_helper_exception(cpu_env, tmp); - tcg_temp_free(tmp); + gen_helper_exception(cpu_env, tcg_constant_i32(excp)); } static void gen_exception_cause(DisasContext *dc, uint32_t cause) { - TCGv_i32 tpc = tcg_const_i32(dc->pc); - TCGv_i32 tcause = tcg_const_i32(cause); - gen_helper_exception_cause(cpu_env, tpc, tcause); - tcg_temp_free(tpc); - tcg_temp_free(tcause); + TCGv_i32 pc = tcg_constant_i32(dc->pc); + gen_helper_exception_cause(cpu_env, pc, tcg_constant_i32(cause)); if (cause == ILLEGAL_INSTRUCTION_CAUSE || cause == SYSCALL_CAUSE) { dc->base.is_jmp = DISAS_NORETURN; @@ -340,11 +333,8 @@ static void gen_exception_cause(DisasContext *dc, uint32_t cause) static void gen_debug_exception(DisasContext *dc, uint32_t cause) { - TCGv_i32 tpc = tcg_const_i32(dc->pc); - TCGv_i32 tcause = tcg_const_i32(cause); - gen_helper_debug_exception(cpu_env, tpc, tcause); - tcg_temp_free(tpc); - tcg_temp_free(tcause); + TCGv_i32 pc = tcg_constant_i32(dc->pc); + gen_helper_debug_exception(cpu_env, pc, tcg_constant_i32(cause)); if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) { dc->base.is_jmp = DISAS_NORETURN; } @@ -382,18 +372,14 @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } - if (dc->base.singlestep_enabled) { - gen_exception(dc, EXCP_DEBUG); + if (dc->op_flags & XTENSA_OP_POSTPROCESS) { + slot = gen_postprocess(dc, slot); + } + if (slot >= 0) { + tcg_gen_goto_tb(slot); + tcg_gen_exit_tb(dc->base.tb, slot); } else { - if (dc->op_flags & XTENSA_OP_POSTPROCESS) { - slot = gen_postprocess(dc, slot); - } - if (slot >= 0) { - tcg_gen_goto_tb(slot); - tcg_gen_exit_tb(dc->base.tb, slot); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); } dc->base.is_jmp = DISAS_NORETURN; } @@ -410,19 +396,15 @@ static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot) static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) { - TCGv_i32 tmp = tcg_const_i32(dest); - gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot)); - tcg_temp_free(tmp); + gen_jump_slot(dc, tcg_constant_i32(dest), + adjust_jump_slot(dc, dest, slot)); } static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, int slot) { - TCGv_i32 tcallinc = tcg_const_i32(callinc); - tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], - tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); - tcg_temp_free(tcallinc); + tcg_constant_i32(callinc), PS_CALLINC_SHIFT, PS_CALLINC_LEN); tcg_gen_movi_i32(cpu_R[callinc << 2], (callinc << 30) | (dc->base.pc_next & 0x3fffffff)); gen_jump_slot(dc, dest, slot); @@ -468,9 +450,7 @@ static void gen_brcond(DisasContext *dc, TCGCond cond, static void gen_brcondi(DisasContext *dc, TCGCond cond, TCGv_i32 t0, uint32_t t1, uint32_t addr) { - TCGv_i32 tmp = tcg_const_i32(t1); - gen_brcond(dc, cond, t0, tmp, addr); - tcg_temp_free(tmp); + gen_brcond(dc, cond, t0, tcg_constant_i32(t1), addr); } static uint32_t test_exceptions_sr(DisasContext *dc, const OpcodeArg arg[], @@ -555,28 +535,13 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop, return mop; } -#ifndef CONFIG_USER_ONLY -static void gen_waiti(DisasContext *dc, uint32_t imm4) -{ - TCGv_i32 pc = tcg_const_i32(dc->base.pc_next); - TCGv_i32 intlevel = tcg_const_i32(imm4); - - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_waiti(cpu_env, pc, intlevel); - tcg_temp_free(pc); - tcg_temp_free(intlevel); -} -#endif - static bool gen_window_check(DisasContext *dc, uint32_t mask) { unsigned r = 31 - clz32(mask); if (r / 4 > dc->window) { - TCGv_i32 pc = tcg_const_i32(dc->pc); - TCGv_i32 w = tcg_const_i32(r / 4); + TCGv_i32 pc = tcg_constant_i32(dc->pc); + TCGv_i32 w = tcg_constant_i32(r / 4); gen_helper_window_check(cpu_env, pc, w); dc->base.is_jmp = DISAS_NORETURN; @@ -882,7 +847,8 @@ static int arg_copy_compare(const void *a, const void *b) static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) { xtensa_isa isa = dc->config->isa; - unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)}; + unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, &dc->base, + dc->pc)}; unsigned len = xtensa_op0_insn_len(dc, b[0]); xtensa_format fmt; int slot, slots; @@ -907,7 +873,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) dc->base.pc_next = dc->pc + len; for (i = 1; i < len; ++i) { - b[i] = translator_ldub(env, dc->pc + i); + b[i] = translator_ldub(env, &dc->base, dc->pc + i); } xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len); fmt = xtensa_format_decode(isa, dc->insnbuf); @@ -1083,17 +1049,15 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) } if (op_flags & XTENSA_OP_UNDERFLOW) { - TCGv_i32 tmp = tcg_const_i32(dc->pc); + TCGv_i32 pc = tcg_constant_i32(dc->pc); - gen_helper_test_underflow_retw(cpu_env, tmp); - tcg_temp_free(tmp); + gen_helper_test_underflow_retw(cpu_env, pc); } if (op_flags & XTENSA_OP_ALLOCA) { - TCGv_i32 tmp = tcg_const_i32(dc->pc); + TCGv_i32 pc = tcg_constant_i32(dc->pc); - gen_helper_movsp(cpu_env, tmp); - tcg_temp_free(tmp); + gen_helper_movsp(cpu_env, pc); } if (coprocessor && !gen_check_cpenable(dc, coprocessor)) { @@ -1292,22 +1256,18 @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_NORETURN: break; case DISAS_TOO_MANY: - if (dc->base.singlestep_enabled) { - tcg_gen_movi_i32(cpu_pc, dc->pc); - gen_exception(dc, EXCP_DEBUG); - } else { - gen_jumpi(dc, dc->pc, 0); - } + gen_jumpi(dc, dc->pc, 0); break; default: g_assert_not_reached(); } } -static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) +static void xtensa_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cpu, FILE *logfile) { - qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); - log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); + fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); + target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps xtensa_translator_ops = { @@ -1319,10 +1279,12 @@ static const TranslatorOps xtensa_translator_ops = { .disas_log = xtensa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, + target_ulong pc, void *host_pc) { DisasContext dc = {}; - translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns); + translator_loop(cpu, tb, max_insns, pc, host_pc, + &xtensa_translator_ops, &dc.base); } void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -1393,12 +1355,6 @@ void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} - static void translate_abs(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { @@ -1479,14 +1435,14 @@ static void translate_b(DisasContext *dc, const OpcodeArg arg[], static void translate_bb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN TCGv_i32 bit = tcg_const_i32(0x80000000u); #else TCGv_i32 bit = tcg_const_i32(0x00000001u); #endif TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[1].in, 0x1f); -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN tcg_gen_shr_i32(bit, bit, tmp); #else tcg_gen_shl_i32(bit, bit, tmp); @@ -1501,7 +1457,7 @@ static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); -#ifdef TARGET_WORDS_BIGENDIAN +#if TARGET_BIG_ENDIAN tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm); #else tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); @@ -1677,13 +1633,10 @@ static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[], static void translate_entry(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 pc = tcg_const_i32(dc->pc); - TCGv_i32 s = tcg_const_i32(arg[0].imm); - TCGv_i32 imm = tcg_const_i32(arg[1].imm); + TCGv_i32 pc = tcg_constant_i32(dc->pc); + TCGv_i32 s = tcg_constant_i32(arg[0].imm); + TCGv_i32 imm = tcg_constant_i32(arg[1].imm); gen_helper_entry(cpu_env, pc, s, imm); - tcg_temp_free(imm); - tcg_temp_free(s); - tcg_temp_free(pc); } static void translate_extui(DisasContext *dc, const OpcodeArg arg[], @@ -1725,10 +1678,9 @@ static void translate_itlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY - TCGv_i32 dtlb = tcg_const_i32(par[0]); + TCGv_i32 dtlb = tcg_constant_i32(par[0]); gen_helper_itlb(cpu_env, arg[0].in, dtlb); - tcg_temp_free(dtlb); #endif } @@ -1764,12 +1716,10 @@ static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write) static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write) { if (!option_enabled(dc, XTENSA_OPTION_MPU)) { - TCGv_i32 tpc = tcg_const_i32(dc->pc); - TCGv_i32 write = tcg_const_i32(is_write); + TCGv_i32 pc = tcg_constant_i32(dc->pc); - gen_helper_check_exclusive(cpu_env, tpc, addr, write); - tcg_temp_free(tpc); - tcg_temp_free(write); + gen_helper_check_exclusive(cpu_env, pc, addr, + tcg_constant_i32(is_write)); } } #endif @@ -1812,6 +1762,12 @@ static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], tcg_temp_free(addr); } +static void translate_lct(DisasContext *dc, const OpcodeArg arg[], + const uint32_t par[]) +{ + tcg_gen_movi_i32(arg[0].out, 0); +} + static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { @@ -1964,11 +1920,10 @@ static void translate_mov(DisasContext *dc, const OpcodeArg arg[], static void translate_movcond(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_movcond_i32(par[0], arg[0].out, arg[2].in, zero, arg[1].in, arg[0].in); - tcg_temp_free(zero); } static void translate_movi(DisasContext *dc, const OpcodeArg arg[], @@ -1980,7 +1935,7 @@ static void translate_movi(DisasContext *dc, const OpcodeArg arg[], static void translate_movp(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm); @@ -1988,7 +1943,6 @@ static void translate_movp(DisasContext *dc, const OpcodeArg arg[], arg[0].out, tmp, zero, arg[1].in, arg[0].in); tcg_temp_free(tmp); - tcg_temp_free(zero); } static void translate_movsp(DisasContext *dc, const OpcodeArg arg[], @@ -2067,11 +2021,10 @@ static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY - TCGv_i32 dtlb = tcg_const_i32(par[0]); + TCGv_i32 dtlb = tcg_constant_i32(par[0]); tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb); - tcg_temp_free(dtlb); #endif } @@ -2149,10 +2102,9 @@ static uint32_t test_exceptions_retw(DisasContext *dc, const OpcodeArg arg[], "Illegal retw instruction(pc = %08x)\n", dc->pc); return XTENSA_OP_ILL; } else { - TCGv_i32 tmp = tcg_const_i32(dc->pc); + TCGv_i32 pc = tcg_constant_i32(dc->pc); - gen_helper_test_ill_retw(cpu_env, tmp); - tcg_temp_free(tmp); + gen_helper_test_ill_retw(cpu_env, pc); return 0; } } @@ -2270,10 +2222,9 @@ static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[], gen_helper_rtlb0, gen_helper_rtlb1, }; - TCGv_i32 dtlb = tcg_const_i32(par[0]); + TCGv_i32 dtlb = tcg_constant_i32(par[0]); helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb); - tcg_temp_free(dtlb); #endif } @@ -2313,10 +2264,9 @@ static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) #else static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) { - TCGv_i32 tpc = tcg_const_i32(dc->pc); + TCGv_i32 pc = tcg_constant_i32(dc->pc); - gen_helper_check_atomctl(cpu_env, tpc, addr); - tcg_temp_free(tpc); + gen_helper_check_atomctl(cpu_env, pc, addr); } #endif @@ -2406,13 +2356,14 @@ static uint32_t test_exceptions_simcall(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { + bool is_semi = semihosting_enabled(dc->cring != 0); #ifdef CONFIG_USER_ONLY bool ill = true; #else /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */ - bool ill = dc->config->hw_version <= 250002 && !semihosting_enabled(); + bool ill = dc->config->hw_version <= 250002 && !is_semi; #endif - if (ill || !semihosting_enabled()) { + if (ill || !is_semi) { qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n"); } return ill ? XTENSA_OP_ILL : 0; @@ -2422,7 +2373,7 @@ static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY - if (semihosting_enabled()) { + if (semihosting_enabled(dc->cring != 0)) { gen_helper_simcall(cpu_env); } #endif @@ -2537,9 +2488,7 @@ static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], static void translate_ssai(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(arg[0].imm); - gen_right_shift_sar(dc, tmp); - tcg_temp_free(tmp); + gen_right_shift_sar(dc, tcg_constant_i32(arg[0].imm)); } static void translate_ssl(DisasContext *dc, const OpcodeArg arg[], @@ -2573,7 +2522,12 @@ static void translate_waiti(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY - gen_waiti(dc, arg[0].imm); + TCGv_i32 pc = tcg_constant_i32(dc->base.pc_next); + + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_waiti(cpu_env, pc, tcg_constant_i32(arg[0].imm)); #endif } @@ -2581,10 +2535,9 @@ static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY - TCGv_i32 dtlb = tcg_const_i32(par[0]); + TCGv_i32 dtlb = tcg_constant_i32(par[0]); gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb); - tcg_temp_free(dtlb); #endif } @@ -2636,15 +2589,13 @@ static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[], { #ifndef CONFIG_USER_ONLY uint32_t id = par[0] - CCOMPARE; - TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->nccompare); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in); - gen_helper_update_ccompare(cpu_env, tmp); - tcg_temp_free(tmp); + gen_helper_update_ccompare(cpu_env, tcg_constant_i32(id)); #endif } @@ -2664,11 +2615,9 @@ static void translate_wsr_dbreaka(DisasContext *dc, const OpcodeArg arg[], { #ifndef CONFIG_USER_ONLY unsigned id = par[0] - DBREAKA; - TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->ndbreak); - gen_helper_wsr_dbreaka(cpu_env, tmp, arg[0].in); - tcg_temp_free(tmp); + gen_helper_wsr_dbreaka(cpu_env, tcg_constant_i32(id), arg[0].in); #endif } @@ -2677,11 +2626,9 @@ static void translate_wsr_dbreakc(DisasContext *dc, const OpcodeArg arg[], { #ifndef CONFIG_USER_ONLY unsigned id = par[0] - DBREAKC; - TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->ndbreak); - gen_helper_wsr_dbreakc(cpu_env, tmp, arg[0].in); - tcg_temp_free(tmp); + gen_helper_wsr_dbreakc(cpu_env, tcg_constant_i32(id), arg[0].in); #endif } @@ -2690,11 +2637,9 @@ static void translate_wsr_ibreaka(DisasContext *dc, const OpcodeArg arg[], { #ifndef CONFIG_USER_ONLY unsigned id = par[0] - IBREAKA; - TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->nibreak); - gen_helper_wsr_ibreaka(cpu_env, tmp, arg[0].in); - tcg_temp_free(tmp); + gen_helper_wsr_ibreaka(cpu_env, tcg_constant_i32(id), arg[0].in); #endif } @@ -3377,6 +3322,14 @@ static const XtensaOpcodeOps core_ops[] = { .translate = translate_ldst, .par = (const uint32_t[]){MO_UB, false, false}, .op_flags = XTENSA_OP_LOAD, + }, { + .name = "ldct", + .translate = translate_lct, + .op_flags = XTENSA_OP_PRIVILEGED, + }, { + .name = "ldcw", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "lddec", .translate = translate_mac16, @@ -3390,6 +3343,14 @@ static const XtensaOpcodeOps core_ops[] = { }, { .name = "ldpte", .op_flags = XTENSA_OP_ILL, + }, { + .name = "lict", + .translate = translate_lct, + .op_flags = XTENSA_OP_PRIVILEGED, + }, { + .name = "licw", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = (const char * const[]) { "loop", "loop.w15", NULL, @@ -4693,12 +4654,28 @@ static const XtensaOpcodeOps core_ops[] = { .name = "saltu", .translate = translate_salt, .par = (const uint32_t[]){TCG_COND_LTU}, + }, { + .name = "sdct", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, + }, { + .name = "sdcw", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "setb_expstate", .translate = translate_setb_expstate, }, { .name = "sext", .translate = translate_sext, + }, { + .name = "sict", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, + }, { + .name = "sicw", + .translate = translate_nop, + .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "simcall", .translate = translate_simcall, @@ -6451,7 +6428,7 @@ static void translate_compare_d(DisasContext *dc, const OpcodeArg arg[], [COMPARE_OLE] = gen_helper_ole_d, [COMPARE_ULE] = gen_helper_ule_d, }; - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 res = tcg_temp_new_i32(); TCGv_i32 set_br = tcg_temp_new_i32(); TCGv_i32 clr_br = tcg_temp_new_i32(); @@ -6463,7 +6440,6 @@ static void translate_compare_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(TCG_COND_NE, arg[0].out, res, zero, set_br, clr_br); - tcg_temp_free(zero); tcg_temp_free(res); tcg_temp_free(set_br); tcg_temp_free(clr_br); @@ -6483,7 +6459,7 @@ static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], [COMPARE_ULE] = gen_helper_ule_s, }; OpcodeArg arg32[3]; - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 res = tcg_temp_new_i32(); TCGv_i32 set_br = tcg_temp_new_i32(); TCGv_i32 clr_br = tcg_temp_new_i32(); @@ -6497,7 +6473,6 @@ static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], arg[0].out, res, zero, set_br, clr_br); put_f32_i2(arg, arg32, 1, 2); - tcg_temp_free(zero); tcg_temp_free(res); tcg_temp_free(set_br); tcg_temp_free(clr_br); @@ -6546,20 +6521,19 @@ static void translate_const_s(DisasContext *dc, const OpcodeArg arg[], static void translate_float_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 scale = tcg_const_i32(-arg[2].imm); + TCGv_i32 scale = tcg_constant_i32(-arg[2].imm); if (par[0]) { gen_helper_uitof_d(arg[0].out, cpu_env, arg[1].in, scale); } else { gen_helper_itof_d(arg[0].out, cpu_env, arg[1].in, scale); } - tcg_temp_free(scale); } static void translate_float_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 scale = tcg_const_i32(-arg[2].imm); + TCGv_i32 scale = tcg_constant_i32(-arg[2].imm); OpcodeArg arg32[1]; get_f32_o1(arg, arg32, 0); @@ -6569,14 +6543,13 @@ static void translate_float_s(DisasContext *dc, const OpcodeArg arg[], gen_helper_itof_s(arg32[0].out, cpu_env, arg[1].in, scale); } put_f32_o1(arg, arg32, 0); - tcg_temp_free(scale); } static void translate_ftoi_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 rounding_mode = tcg_const_i32(par[0]); - TCGv_i32 scale = tcg_const_i32(arg[2].imm); + TCGv_i32 rounding_mode = tcg_constant_i32(par[0]); + TCGv_i32 scale = tcg_constant_i32(arg[2].imm); if (par[1]) { gen_helper_ftoui_d(arg[0].out, cpu_env, arg[1].in, @@ -6585,15 +6558,13 @@ static void translate_ftoi_d(DisasContext *dc, const OpcodeArg arg[], gen_helper_ftoi_d(arg[0].out, cpu_env, arg[1].in, rounding_mode, scale); } - tcg_temp_free(rounding_mode); - tcg_temp_free(scale); } static void translate_ftoi_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 rounding_mode = tcg_const_i32(par[0]); - TCGv_i32 scale = tcg_const_i32(arg[2].imm); + TCGv_i32 rounding_mode = tcg_constant_i32(par[0]); + TCGv_i32 scale = tcg_constant_i32(arg[2].imm); OpcodeArg arg32[2]; get_f32_i1(arg, arg32, 1); @@ -6605,8 +6576,6 @@ static void translate_ftoi_s(DisasContext *dc, const OpcodeArg arg[], rounding_mode, scale); } put_f32_i1(arg, arg32, 1); - tcg_temp_free(rounding_mode); - tcg_temp_free(scale); } static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[], @@ -6673,14 +6642,13 @@ static void translate_mov_s(DisasContext *dc, const OpcodeArg arg[], static void translate_movcond_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 zero = tcg_constant_i64(0); TCGv_i64 arg2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(arg2, arg[2].in); tcg_gen_movcond_i64(par[0], arg[0].out, arg2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i64(zero); tcg_temp_free_i64(arg2); } @@ -6688,12 +6656,11 @@ static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (arg[0].num_bits == 32) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_movcond_i32(par[0], arg[0].out, arg[2].in, zero, arg[1].in, arg[0].in); - tcg_temp_free(zero); } else { translate_movcond_d(dc, arg, par); } @@ -6702,7 +6669,7 @@ static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[], static void translate_movp_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 zero = tcg_constant_i64(0); TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i64 tmp2 = tcg_temp_new_i64(); @@ -6711,7 +6678,6 @@ static void translate_movp_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i64(par[0], arg[0].out, tmp2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i64(zero); tcg_temp_free_i32(tmp1); tcg_temp_free_i64(tmp2); } @@ -6720,7 +6686,7 @@ static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (arg[0].num_bits == 32) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm); @@ -6728,7 +6694,6 @@ static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], arg[0].out, tmp, zero, arg[1].in, arg[0].in); tcg_temp_free(tmp); - tcg_temp_free(zero); } else { translate_movp_d(dc, arg, par); } @@ -7085,7 +7050,7 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - mop = gen_load_store_alignment(dc, MO_TEQ, addr); + mop = gen_load_store_alignment(dc, MO_TEUQ, addr); if (par[0]) { tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop); } else { @@ -7150,7 +7115,7 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - mop = gen_load_store_alignment(dc, MO_TEQ, addr); + mop = gen_load_store_alignment(dc, MO_TEUQ, addr); if (par[0]) { tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop); } else { diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c index f6f96a64c3..5a1555360a 100644 --- a/target/xtensa/win_helper.c +++ b/target/xtensa/win_helper.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" diff --git a/tcg/README b/tcg/README index c2e7762a37..bc15cc3b32 100644 --- a/tcg/README +++ b/tcg/README @@ -254,6 +254,12 @@ t0 = t1 ? clz(t1) : t2 t0 = t1 ? ctz(t1) : t2 +* ctpop_i32/i64 t0, t1 + +t0 = number of bits set in t1 +With "ctpop" short for "count population", matching +the function name used in include/qemu/host-utils.h. + ********* Shifts/Rotates * shl_i32/i64 t0, t1, t2 diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 5924977b42..344b63e20f 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -10,6 +10,7 @@ * See the COPYING file in the top-level directory for details. */ +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #include "qemu/bitops.h" @@ -443,6 +444,7 @@ typedef enum { I3404_ANDI = 0x12000000, I3404_ORRI = 0x32000000, I3404_EORI = 0x52000000, + I3404_ANDSI = 0x72000000, /* Move wide immediate instructions. */ I3405_MOVN = 0x12800000, @@ -1084,9 +1086,9 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, opc = I3405_MOVZ; } s0 = ctz64(t0) & (63 & -16); - t1 = t0 & ~(0xffffUL << s0); + t1 = t0 & ~(0xffffull << s0); s1 = ctz64(t1) & (63 & -16); - t2 = t1 & ~(0xffffUL << s1); + t2 = t1 & ~(0xffffull << s1); if (t2 == 0) { tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0); if (t1 != 0) { @@ -1259,7 +1261,7 @@ static inline void tcg_out_shl(TCGContext *s, TCGType ext, { int bits = ext ? 64 : 32; int max = bits - 1; - tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); + tcg_out_ubfm(s, ext, rd, rn, (bits - m) & max, (max - m) & max); } static inline void tcg_out_shr(TCGContext *s, TCGType ext, @@ -1328,8 +1330,9 @@ static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target) if (offset == sextract64(offset, 0, 26)) { tcg_out_insn(s, 3206, B, offset); } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); - tcg_out_insn(s, 3207, BR, TCG_REG_TMP); + /* Choose X9 as a call-clobbered non-LR temporary. */ + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target); + tcg_out_insn(s, 3207, BR, TCG_REG_X9); } } @@ -1541,15 +1544,20 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, } } -#ifdef CONFIG_SOFTMMU -#include "../tcg-ldst.c.inc" +static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) +{ + ptrdiff_t offset = tcg_pcrel_diff(s, target); + tcg_debug_assert(offset == sextract64(offset, 0, 21)); + tcg_out_insn(s, 3406, ADR, rd, offset); +} +#ifdef CONFIG_SOFTMMU /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * TCGMemOpIdx oi, uintptr_t ra) + * MemOpIdx oi, uintptr_t ra) */ -static void * const qemu_ld_helpers[4] = { +static void * const qemu_ld_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_ldub_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_16] = helper_be_lduw_mmu, [MO_32] = helper_be_ldul_mmu, [MO_64] = helper_be_ldq_mmu, @@ -1561,12 +1569,12 @@ static void * const qemu_ld_helpers[4] = { }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, TCGMemOpIdx oi, + * uintxx_t val, MemOpIdx oi, * uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_16] = helper_be_stw_mmu, [MO_32] = helper_be_stl_mmu, [MO_64] = helper_be_stq_mmu, @@ -1577,16 +1585,9 @@ static void * const qemu_st_helpers[4] = { #endif }; -static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) -{ - ptrdiff_t offset = tcg_pcrel_diff(s, target); - tcg_debug_assert(offset == sextract64(offset, 0, 21)); - tcg_out_insn(s, 3406, ADR, rd, offset); -} - static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; @@ -1611,7 +1612,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; @@ -1629,7 +1630,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) return true; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGType ext, TCGReg data_reg, TCGReg addr_reg, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { @@ -1714,15 +1715,58 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); } +#else +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, + unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->addrlo_reg = addr_reg; + + /* tst addr, #mask */ + tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); + + label->label_ptr[0] = s->code_ptr; + + /* b.ne slow_path */ + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); + + label->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_adr(s, TCG_REG_LR, l->raddr); + tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st)); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((memop & MO_BSWAP) == 0); - switch (memop & MO_SSIZE) { case MO_UB: tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); @@ -1744,7 +1788,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, case MO_SL: tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); break; - case MO_Q: + case MO_UQ: tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); break; default: @@ -1756,9 +1800,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((memop & MO_BSWAP) == 0); - switch (memop & MO_SIZE) { case MO_8: tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); @@ -1778,10 +1819,14 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, } static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi, TCGType ext) + MemOpIdx oi, TCGType ext) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((memop & MO_BSWAP) == 0); + #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; @@ -1792,6 +1837,10 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ + unsigned a_bits = get_alignment_bits(memop); + if (a_bits) { + tcg_out_test_alignment(s, true, addr_reg, a_bits); + } if (USE_GUEST_BASE) { tcg_out_qemu_ld_direct(s, memop, ext, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); @@ -1803,10 +1852,14 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, } static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((memop & MO_BSWAP) == 0); + #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; @@ -1817,6 +1870,10 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, data_reg, addr_reg, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ + unsigned a_bits = get_alignment_bits(memop); + if (a_bits) { + tcg_out_test_alignment(s, false, addr_reg, a_bits); + } if (USE_GUEST_BASE) { tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); @@ -1859,24 +1916,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_goto_tb: - if (s->tb_jmp_insn_offset != NULL) { - /* TCG_TARGET_HAS_direct_jump */ - /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic - write can be used to patch the target address. */ - if ((uintptr_t)s->code_ptr & 7) { - tcg_out32(s, NOP); - } - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); - /* actual branch destination will be patched by - tb_target_set_jmp_target later. */ - tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); - } else { - /* !TCG_TARGET_HAS_direct_jump */ - tcg_debug_assert(s->tb_jmp_target_addr != NULL); - intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2; - tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP); + tcg_debug_assert(s->tb_jmp_insn_offset != NULL); + /* + * Ensure that ADRP+ADD are 8-byte aligned so that an atomic + * write can be used to patch the target address. + */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out32(s, NOP); } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* + * actual branch destination will be patched by + * tb_target_set_jmp_target later + */ + tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); tcg_out_insn(s, 3207, BR, TCG_REG_TMP); set_jmp_reset_offset(s, a0); break; diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 7a93ac8023..485f685bd2 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -131,6 +131,9 @@ typedef enum { #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 1 #define TCG_TARGET_HAS_abs_vec 1 @@ -151,9 +154,7 @@ typedef enum { void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #endif /* AARCH64_TCG_TARGET_H */ diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 007ceee68e..2c6c353eea 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -23,6 +23,7 @@ */ #include "elf.h" +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" int arm_arch = __ARM_ARCH; @@ -34,13 +35,6 @@ bool use_idiv_instructions; bool use_neon_instructions; #endif -/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ -#ifdef CONFIG_SOFTMMU -# define USING_SOFTMMU 1 -#else -# define USING_SOFTMMU 0 -#endif - #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", @@ -91,8 +85,11 @@ static const int tcg_target_call_oarg_regs[2] = { #define TCG_REG_TMP TCG_REG_R12 #define TCG_VEC_TMP TCG_REG_Q15 +#ifndef CONFIG_SOFTMMU +#define TCG_REG_GUEST_BASE TCG_REG_R11 +#endif -enum arm_cond_code_e { +typedef enum { COND_EQ = 0x0, COND_NE = 0x1, COND_CS = 0x2, /* Unsigned greater or equal */ @@ -108,7 +105,7 @@ enum arm_cond_code_e { COND_GT = 0xc, COND_LE = 0xd, COND_AL = 0xe, -}; +} ARMCond; #define TO_CPSR (1 << 20) @@ -141,6 +138,9 @@ typedef enum { INSN_CLZ = 0x016f0f10, INSN_RBIT = 0x06ff0f30, + INSN_LDMIA = 0x08b00000, + INSN_STMDB = 0x09200000, + INSN_LDR_IMM = 0x04100000, INSN_LDR_REG = 0x06100000, INSN_STR_IMM = 0x04000000, @@ -309,10 +309,10 @@ static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) { const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; - int rot = encode_imm(offset); + int imm12 = encode_imm(offset); - if (rot >= 0) { - *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7)); + if (imm12 >= 0) { + *src_rw = deposit32(*src_rw, 0, 12, imm12); return true; } return false; @@ -366,36 +366,55 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) #endif -static inline uint32_t rotl(uint32_t val, int n) -{ - return (val << n) | (val >> (32 - n)); -} - -/* ARM immediates for ALU instructions are made of an unsigned 8-bit - right-rotated by an even amount between 0 and 30. */ +/* + * ARM immediates for ALU instructions are made of an unsigned 8-bit + * right-rotated by an even amount between 0 and 30. + * + * Return < 0 if @imm cannot be encoded, else the entire imm12 field. + */ static int encode_imm(uint32_t imm) { - int shift; + uint32_t rot, imm8; - /* simple case, only lower bits */ - if ((imm & ~0xff) == 0) - return 0; - /* then try a simple even shift */ - shift = ctz32(imm) & ~1; - if (((imm >> shift) & ~0xff) == 0) - return 32 - shift; - /* now try harder with rotations */ - if ((rotl(imm, 2) & ~0xff) == 0) - return 2; - if ((rotl(imm, 4) & ~0xff) == 0) - return 4; - if ((rotl(imm, 6) & ~0xff) == 0) - return 6; - /* imm can't be encoded */ + /* Simple case, no rotation required. */ + if ((imm & ~0xff) == 0) { + return imm; + } + + /* Next, try a simple even shift. */ + rot = ctz32(imm) & ~1; + imm8 = imm >> rot; + rot = 32 - rot; + if ((imm8 & ~0xff) == 0) { + goto found; + } + + /* + * Finally, try harder with rotations. + * The ctz test above will have taken care of rotates >= 8. + */ + for (rot = 2; rot < 8; rot += 2) { + imm8 = rol32(imm, rot); + if ((imm8 & ~0xff) == 0) { + goto found; + } + } + /* Fail: imm cannot be encoded. */ return -1; + + found: + /* Note that rot is even, and we discard bit 0 by shifting by 7. */ + return rot << 7 | imm8; } -static inline int check_fit_imm(uint32_t imm) +static int encode_imm_nofail(uint32_t imm) +{ + int ret = encode_imm(imm); + tcg_debug_assert(ret >= 0); + return ret; +} + +static bool check_fit_imm(uint32_t imm) { return encode_imm(imm) >= 0; } @@ -525,42 +544,37 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) return 0; } -static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) +static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0a000000 | (((offset - 8) >> 2) & 0x00ffffff)); } -static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) +static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0b000000 | (((offset - 8) >> 2) & 0x00ffffff)); } -static inline void tcg_out_blx(TCGContext *s, int cond, int rn) +static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) { tcg_out32(s, (cond << 28) | 0x012fff30 | rn); } -static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset) +static void tcg_out_blx_imm(TCGContext *s, int32_t offset) { tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | (((offset - 8) >> 2) & 0x00ffffff)); } -static inline void tcg_out_dat_reg(TCGContext *s, - int cond, int opc, int rd, int rn, int rm, int shift) +static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, + TCGReg rd, TCGReg rn, TCGReg rm, int shift) { tcg_out32(s, (cond << 28) | (0 << 25) | opc | (rn << 16) | (rd << 12) | shift | rm); } -static inline void tcg_out_nop(TCGContext *s) -{ - tcg_out32(s, INSN_NOP); -} - -static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) +static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) { /* Simple reg-reg move, optimising out the 'do nothing' case */ if (rd != rm) { @@ -568,35 +582,43 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) } } -static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn) +static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) { - /* Unless the C portion of QEMU is compiled as thumb, we don't - actually need true BX semantics; merely a branch to an address - held in a register. */ - if (use_armv5t_instructions) { - tcg_out32(s, (cond << 28) | 0x012fff10 | rn); - } else { - tcg_out_mov_reg(s, cond, TCG_REG_PC, rn); - } + tcg_out32(s, (cond << 28) | 0x012fff10 | rn); } -static inline void tcg_out_dat_imm(TCGContext *s, - int cond, int opc, int rd, int rn, int im) +static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) +{ + /* + * Unless the C portion of QEMU is compiled as thumb, we don't need + * true BX semantics; merely a branch to an address held in a register. + */ + tcg_out_bx_reg(s, cond, rn); +} + +static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, + TCGReg rd, TCGReg rn, int im) { tcg_out32(s, (cond << 28) | (1 << 25) | opc | (rn << 16) | (rd << 12) | im); } +static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, + TCGReg rn, uint16_t mask) +{ + tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); +} + /* Note that this routine is used for both LDR and LDRH formats, so we do not wish to include an immediate shift at this point. */ -static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, +static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, TCGReg rn, TCGReg rm, bool u, bool p, bool w) { tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | (rn << 16) | (rt << 12) | rm); } -static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, +static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, TCGReg rn, int imm8, bool p, bool w) { bool u = 1; @@ -608,8 +630,8 @@ static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); } -static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, - TCGReg rn, int imm12, bool p, bool w) +static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, + TCGReg rt, TCGReg rn, int imm12, bool p, bool w) { bool u = 1; if (imm12 < 0) { @@ -620,167 +642,167 @@ static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, (rn << 16) | (rt << 12) | imm12); } -static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) +static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); } -static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) +static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); } -static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void __attribute__((unused)) +tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); } -static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); } /* Register pre-increment with base writeback. */ -static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); } -static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); } -static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) +static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); } -static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) +static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); } -static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); } -static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) +static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); } -static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) +static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); } -static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg) +static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, + TCGReg rd, uint32_t arg) { new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); } -static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) +static void tcg_out_movi32(TCGContext *s, ARMCond cond, + TCGReg rd, uint32_t arg) { - int rot, diff, opc, sh1, sh2; + int imm12, diff, opc, sh1, sh2; uint32_t tt0, tt1, tt2; /* Check a single MOV/MVN before anything else. */ - rot = encode_imm(arg); - if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, - rotl(arg, rot) | (rot << 7)); + imm12 = encode_imm(arg); + if (imm12 >= 0) { + tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); return; } - rot = encode_imm(~arg); - if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, - rotl(~arg, rot) | (rot << 7)); + imm12 = encode_imm(~arg); + if (imm12 >= 0) { + tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); return; } @@ -788,17 +810,15 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) or within the TB, which is immediately before the code block. */ diff = tcg_pcrel_diff(s, (void *)arg) - 8; if (diff >= 0) { - rot = encode_imm(diff); - if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, - rotl(diff, rot) | (rot << 7)); + imm12 = encode_imm(diff); + if (imm12 >= 0) { + tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); return; } } else { - rot = encode_imm(-diff); - if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, - rotl(-diff, rot) | (rot << 7)); + imm12 = encode_imm(-diff); + if (imm12 >= 0) { + tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); return; } } @@ -830,6 +850,8 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) sh2 = ctz32(tt1) & ~1; tt2 = tt1 & ~(0xff << sh2); if (tt2 == 0) { + int rot; + rot = ((32 - sh1) << 7) & 0xf00; tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); rot = ((32 - sh2) << 7) & 0xf00; @@ -842,263 +864,143 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) tcg_out_movi_pool(s, cond, rd, arg); } -static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst, - TCGArg lhs, TCGArg rhs, int rhs_is_const) +/* + * Emit either the reg,imm or reg,reg form of a data-processing insn. + * rhs must satisfy the "rI" constraint. + */ +static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, + TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) { - /* Emit either the reg,imm or reg,reg form of a data-processing insn. - * rhs must satisfy the "rI" constraint. - */ if (rhs_is_const) { - int rot = encode_imm(rhs); - tcg_debug_assert(rot >= 0); - tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); + tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } -static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv, - TCGReg dst, TCGReg lhs, TCGArg rhs, +/* + * Emit either the reg,imm or reg,reg form of a data-processing insn. + * rhs must satisfy the "rIK" constraint. + */ +static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, + ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) { - /* Emit either the reg,imm or reg,reg form of a data-processing insn. - * rhs must satisfy the "rIK" constraint. - */ if (rhs_is_const) { - int rot = encode_imm(rhs); - if (rot < 0) { - rhs = ~rhs; - rot = encode_imm(rhs); - tcg_debug_assert(rot >= 0); + int imm12 = encode_imm(rhs); + if (imm12 < 0) { + imm12 = encode_imm_nofail(~rhs); opc = opinv; } - tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); + tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } -static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg, - TCGArg dst, TCGArg lhs, TCGArg rhs, +static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, + ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) { /* Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the "rIN" constraint. */ if (rhs_is_const) { - int rot = encode_imm(rhs); - if (rot < 0) { - rhs = -rhs; - rot = encode_imm(rhs); - tcg_debug_assert(rot >= 0); + int imm12 = encode_imm(rhs); + if (imm12 < 0) { + imm12 = encode_imm_nofail(-rhs); opc = opneg; } - tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); + tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } -static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd, - TCGReg rn, TCGReg rm) +static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, + TCGReg rn, TCGReg rm) { - /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */ - if (!use_armv6_instructions && rd == rn) { - if (rd == rm) { - /* rd == rn == rm; copy an input to tmp first. */ - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); - rm = rn = TCG_REG_TMP; - } else { - rn = rm; - rm = rd; - } - } /* mul */ tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); } -static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0, - TCGReg rd1, TCGReg rn, TCGReg rm) +static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, + TCGReg rd1, TCGReg rn, TCGReg rm) { - /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ - if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { - if (rd0 == rm || rd1 == rm) { - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); - rn = TCG_REG_TMP; - } else { - TCGReg t = rn; - rn = rm; - rm = t; - } - } /* umull */ tcg_out32(s, (cond << 28) | 0x00800090 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); } -static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0, - TCGReg rd1, TCGReg rn, TCGReg rm) +static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, + TCGReg rd1, TCGReg rn, TCGReg rm) { - /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ - if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { - if (rd0 == rm || rd1 == rm) { - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); - rn = TCG_REG_TMP; - } else { - TCGReg t = rn; - rn = rm; - rm = t; - } - } /* smull */ tcg_out32(s, (cond << 28) | 0x00c00090 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); } -static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm) +static void tcg_out_sdiv(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, TCGReg rm) { tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); } -static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm) +static void tcg_out_udiv(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, TCGReg rm) { tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); } -static inline void tcg_out_ext8s(TCGContext *s, int cond, - int rd, int rn) +static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) { - if (use_armv6_instructions) { - /* sxtb */ - tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); - } else { - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rn, SHIFT_IMM_LSL(24)); - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rd, SHIFT_IMM_ASR(24)); - } + /* sxtb */ + tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); } -static inline void tcg_out_ext8u(TCGContext *s, int cond, - int rd, int rn) +static void __attribute__((unused)) +tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) { tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); } -static inline void tcg_out_ext16s(TCGContext *s, int cond, - int rd, int rn) +static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) { - if (use_armv6_instructions) { - /* sxth */ - tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); - } else { - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rn, SHIFT_IMM_LSL(16)); - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rd, SHIFT_IMM_ASR(16)); - } + /* sxth */ + tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); } -static inline void tcg_out_ext16u(TCGContext *s, int cond, - int rd, int rn) +static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) { - if (use_armv6_instructions) { + /* uxth */ + tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); +} + +static void tcg_out_bswap16(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int flags) +{ + if (flags & TCG_BSWAP_OS) { + /* revsh */ + tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); + return; + } + + /* rev16 */ + tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); + if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { /* uxth */ - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); - } else { - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rn, SHIFT_IMM_LSL(16)); - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rd, SHIFT_IMM_LSR(16)); + tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); } } -static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags) +static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) { - if (use_armv6_instructions) { - if (flags & TCG_BSWAP_OS) { - /* revsh */ - tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); - return; - } - - /* rev16 */ - tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); - if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - /* uxth */ - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); - } - return; - } - - if (flags == 0) { - /* - * For stores, no input or output extension: - * rn = xxAB - * lsr tmp, rn, #8 tmp = 0xxA - * and tmp, tmp, #0xff tmp = 000A - * orr rd, tmp, rn, lsl #8 rd = xABA - */ - tcg_out_dat_reg(s, cond, ARITH_MOV, - TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8)); - tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff); - tcg_out_dat_reg(s, cond, ARITH_ORR, - rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8)); - return; - } - - /* - * Byte swap, leaving the result at the top of the register. - * We will then shift down, zero or sign-extending. - */ - if (flags & TCG_BSWAP_IZ) { - /* - * rn = 00AB - * ror tmp, rn, #8 tmp = B00A - * orr tmp, tmp, tmp, lsl #16 tmp = BA00 - */ - tcg_out_dat_reg(s, cond, ARITH_MOV, - TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8)); - tcg_out_dat_reg(s, cond, ARITH_ORR, - TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP, - SHIFT_IMM_LSL(16)); - } else { - /* - * rn = xxAB - * and tmp, rn, #0xff00 tmp = 00A0 - * lsl tmp, tmp, #8 tmp = 0A00 - * orr tmp, tmp, rn, lsl #24 tmp = BA00 - */ - tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1); - tcg_out_dat_reg(s, cond, ARITH_MOV, - TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8)); - tcg_out_dat_reg(s, cond, ARITH_ORR, - TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24)); - } - tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP, - (flags & TCG_BSWAP_OS - ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8))); + /* rev */ + tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); } -static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn) -{ - if (use_armv6_instructions) { - /* rev */ - tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); - } else { - tcg_out_dat_reg(s, cond, ARITH_EOR, - TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16)); - tcg_out_dat_imm(s, cond, ARITH_BIC, - TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800); - tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, rn, SHIFT_IMM_ROR(8)); - tcg_out_dat_reg(s, cond, ARITH_EOR, - rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8)); - } -} - -static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, - TCGArg a1, int ofs, int len, bool const_a1) +static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, + TCGArg a1, int ofs, int len, bool const_a1) { if (const_a1) { /* bfi becomes bfc with rn == 15. */ @@ -1109,24 +1011,24 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, | (ofs << 7) | ((ofs + len - 1) << 16)); } -static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd, - TCGArg a1, int ofs, int len) +static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, + TCGReg rn, int ofs, int len) { /* ubfx */ - tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1 + tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn | (ofs << 7) | ((len - 1) << 16)); } -static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd, - TCGArg a1, int ofs, int len) +static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, + TCGReg rn, int ofs, int len) { /* sbfx */ - tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1 + tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn | (ofs << 7) | ((len - 1) << 16)); } -static inline void tcg_out_ld32u(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_ld32u(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1135,8 +1037,8 @@ static inline void tcg_out_ld32u(TCGContext *s, int cond, tcg_out_ld32_12(s, cond, rd, rn, offset); } -static inline void tcg_out_st32(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_st32(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1145,8 +1047,8 @@ static inline void tcg_out_st32(TCGContext *s, int cond, tcg_out_st32_12(s, cond, rd, rn, offset); } -static inline void tcg_out_ld16u(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_ld16u(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1155,8 +1057,8 @@ static inline void tcg_out_ld16u(TCGContext *s, int cond, tcg_out_ld16u_8(s, cond, rd, rn, offset); } -static inline void tcg_out_ld16s(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_ld16s(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1165,8 +1067,8 @@ static inline void tcg_out_ld16s(TCGContext *s, int cond, tcg_out_ld16s_8(s, cond, rd, rn, offset); } -static inline void tcg_out_st16(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_st16(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1175,8 +1077,8 @@ static inline void tcg_out_st16(TCGContext *s, int cond, tcg_out_st16_8(s, cond, rd, rn, offset); } -static inline void tcg_out_ld8u(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_ld8u(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1185,8 +1087,8 @@ static inline void tcg_out_ld8u(TCGContext *s, int cond, tcg_out_ld8_12(s, cond, rd, rn, offset); } -static inline void tcg_out_ld8s(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_ld8s(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1195,8 +1097,8 @@ static inline void tcg_out_ld8s(TCGContext *s, int cond, tcg_out_ld8s_8(s, cond, rd, rn, offset); } -static inline void tcg_out_st8(TCGContext *s, int cond, - int rd, int rn, int32_t offset) +static void tcg_out_st8(TCGContext *s, ARMCond cond, + TCGReg rd, TCGReg rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); @@ -1205,64 +1107,64 @@ static inline void tcg_out_st8(TCGContext *s, int cond, tcg_out_st8_12(s, cond, rd, rn, offset); } -/* The _goto case is normally between TBs within the same code buffer, and +/* + * The _goto case is normally between TBs within the same code buffer, and * with the code buffer limited to 16MB we wouldn't need the long case. * But we also use it for the tail-call to the qemu_ld/st helpers, which does. */ -static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr) +static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) { intptr_t addri = (intptr_t)addr; ptrdiff_t disp = tcg_pcrel_diff(s, addr); + bool arm_mode = !(addri & 1); - if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { - tcg_out_b(s, cond, disp); + if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { + tcg_out_b_imm(s, cond, disp); return; } + + /* LDR is interworking from v5t. */ tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); } -/* The call case is mostly used for helpers - so it's not unreasonable - * for them to be beyond branch range */ +/* + * The call case is mostly used for helpers - so it's not unreasonable + * for them to be beyond branch range. + */ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr) { intptr_t addri = (intptr_t)addr; ptrdiff_t disp = tcg_pcrel_diff(s, addr); + bool arm_mode = !(addri & 1); if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { - if (addri & 1) { - /* Use BLX if the target is in Thumb mode */ - if (!use_armv5t_instructions) { - tcg_abort(); - } - tcg_out_blx_imm(s, disp); + if (arm_mode) { + tcg_out_bl_imm(s, COND_AL, disp); } else { - tcg_out_bl(s, COND_AL, disp); + tcg_out_blx_imm(s, disp); } - } else if (use_armv7_instructions) { - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); - tcg_out_blx(s, COND_AL, TCG_REG_TMP); - } else { - /* ??? Know that movi_pool emits exactly 1 insn. */ - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0); - tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri); + return; } + + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); + tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); } -static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l) +static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) { if (l->has_value) { tcg_out_goto(s, cond, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); - tcg_out_b(s, cond, 0); + tcg_out_b_imm(s, cond, 0); } } -static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, TCGArg a0) { if (use_armv7_instructions) { tcg_out32(s, INSN_DMB_ISH); - } else if (use_armv6_instructions) { + } else { tcg_out32(s, INSN_DMB_MCR); } } @@ -1388,24 +1290,22 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn, } #ifdef CONFIG_SOFTMMU -#include "../tcg-ldst.c.inc" - /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[8] = { +static void * const qemu_ld_helpers[MO_SSIZE + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_UW] = helper_be_lduw_mmu, [MO_UL] = helper_be_ldul_mmu, - [MO_Q] = helper_be_ldq_mmu, + [MO_UQ] = helper_be_ldq_mmu, [MO_SW] = helper_be_ldsw_mmu, [MO_SL] = helper_be_ldul_mmu, #else [MO_UW] = helper_le_lduw_mmu, [MO_UL] = helper_le_ldul_mmu, - [MO_Q] = helper_le_ldq_mmu, + [MO_UQ] = helper_le_ldq_mmu, [MO_SW] = helper_le_ldsw_mmu, [MO_SL] = helper_le_ldul_mmu, #endif @@ -1414,9 +1314,9 @@ static void * const qemu_ld_helpers[8] = { /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_16] = helper_be_stw_mmu, [MO_32] = helper_be_stl_mmu, [MO_64] = helper_be_stq_mmu, @@ -1468,8 +1368,7 @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, if (argreg & 1) { argreg++; } - if (use_armv6_instructions && argreg >= 4 - && (arglo & 1) == 0 && arghi == arglo + 1) { + if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { tcg_out_strd_8(s, COND_AL, arglo, TCG_REG_CALL_STACK, (argreg - 4) * 4); return argreg + 2; @@ -1499,26 +1398,12 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); int fast_off = TLB_MASK_TABLE_OFS(mem_index); - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); - int table_off = fast_off + offsetof(CPUTLBDescFast, table); - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); - - /* - * We don't support inline unaligned acceses, but we can easily - * support overalignment checks. - */ - if (a_bits < s_bits) { - a_bits = s_bits; - } + unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; + unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; + TCGReg t_addr; /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ - if (use_armv6_instructions) { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); - } else { - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); - } + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); /* Extract the tlb index from the address into R0. */ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, @@ -1529,7 +1414,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, * Load the tlb comparator into R2/R3 and the fast path addend into R1. */ if (cmp_off == 0) { - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } else { tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); @@ -1537,15 +1422,12 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } else { tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } } - if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { - tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); - } /* Load the tlb addend. */ tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, @@ -1553,27 +1435,35 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* * Check alignment, check comparators. - * Do this in no more than 3 insns. Use MOVW for v7, if possible, + * Do this in 2-4 insns. Use MOVW for v7, if possible, * to reduce the number of sequential conditional instructions. * Almost all guests have at least 4k pages, which means that we need * to clear at least 9 bits even for an 8-byte memory, which means it * isn't worth checking for an immediate operand for BIC. + * + * For unaligned accesses, test the page of the last unit of alignment. + * This leaves the least significant alignment bits unchanged, and of + * course must be zero. */ + t_addr = addrlo; + if (a_mask < s_mask) { + t_addr = TCG_REG_R0; + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, + addrlo, s_mask - a_mask); + } if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { - tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); - - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, - addrlo, TCG_REG_TMP, 0); + t_addr, TCG_REG_TMP, 0); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); } else { - if (a_bits) { - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, - (1 << a_bits) - 1); + if (a_mask) { + tcg_debug_assert(a_mask <= 0xff); + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); - tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, + tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); } @@ -1588,7 +1478,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) @@ -1608,9 +1498,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); - void *func; if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { return false; @@ -1625,18 +1514,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) argreg = tcg_out_arg_imm32(s, argreg, oi); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); - /* For armv6 we can use the canonical unsigned helpers and minimize - icache usage. For pre-armv6, use the signed helpers since we do - not have a single insn sign-extend. */ - if (use_armv6_instructions) { - func = qemu_ld_helpers[opc & MO_SIZE]; - } else { - func = qemu_ld_helpers[opc & MO_SSIZE]; - if (opc & MO_SIGN) { - opc = MO_UL; - } - } - tcg_out_call(s, func); + /* Use the canonical unsigned helpers and minimize icache usage. */ + tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]); datalo = lb->datalo_reg; datahi = lb->datahi_reg; @@ -1650,7 +1529,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) default: tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); break; - case MO_Q: + case MO_UQ: if (datalo != TCG_REG_R1) { tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); @@ -1672,7 +1551,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { @@ -1712,11 +1591,80 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); return true; } +#else + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, + TCGReg addrhi, unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + + /* We are expecting a_bits to max out at 7, and can easily support 8. */ + tcg_debug_assert(a_mask <= 0xff); + /* tst addr, #mask */ + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); + + /* blne slow_path */ + label->label_ptr[0] = s->code_ptr; + tcg_out_bl_imm(s, COND_NE, 0); + + label->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + if (TARGET_LONG_BITS == 64) { + /* 64-bit target address is aligned into R2:R3. */ + if (l->addrhi_reg != TCG_REG_R2) { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); + } else if (l->addrlo_reg != TCG_REG_R3) { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); + } else { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); + } + } else { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); + } + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); + + /* + * Tail call to the helper, with the return address back inline, + * just for the clarity of the debugging traceback -- the helper + * cannot return. We have used BLNE to arrive here, so LR is + * already set. + */ + tcg_out_goto(s, COND_AL, (const void *) + (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} #endif /* SOFTMMU */ -static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addend) +static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addend, + bool scratch_addend) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); @@ -1737,12 +1685,25 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, case MO_UL: tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); break; - case MO_Q: - /* Avoid ldrd for user-only emulation, to handle unaligned. */ - if (USING_SOFTMMU && use_armv6_instructions + case MO_UQ: + /* LDRD requires alignment; double-check that. */ + if (get_alignment_bits(opc) >= MO_64 && (datalo & 1) == 0 && datahi == datalo + 1) { - tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); - } else if (datalo != addend) { + /* + * Rm (the second address op) must not overlap Rt or Rt + 1. + * Since datalo is aligned, we can simplify the test via alignment. + * Flip the two address arguments if that works. + */ + if ((addend & ~1) != datalo) { + tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); + break; + } + if ((addrlo & ~1) != datalo) { + tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); + break; + } + } + if (scratch_addend) { tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); } else { @@ -1757,9 +1718,9 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, } } -static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo) +#ifndef CONFIG_SOFTMMU +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, + TCGReg datahi, TCGReg addrlo) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); @@ -1780,9 +1741,9 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, case MO_UL: tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); break; - case MO_Q: - /* Avoid ldrd for user-only emulation, to handle unaligned. */ - if (USING_SOFTMMU && use_armv6_instructions + case MO_UQ: + /* LDRD requires alignment; double-check that. */ + if (get_alignment_bits(opc) >= MO_64 && (datalo & 1) == 0 && datahi == datalo + 1) { tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); } else if (datalo == addrlo) { @@ -1797,16 +1758,19 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, g_assert_not_reached(); } } +#endif static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; tcg_insn_unit *label_ptr; +#else + unsigned a_bits; #endif datalo = *args++; @@ -1823,25 +1787,30 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) /* This a conditional BL only to load a pointer within this opcode into LR for the slow path. We will not be using the value for a tail call. */ label_ptr = s->code_ptr; - tcg_out_bl(s, COND_NE, 0); + tcg_out_bl_imm(s, COND_NE, 0); - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); + tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); + } if (guest_base) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); + tcg_out_qemu_ld_index(s, opc, datalo, datahi, + addrlo, TCG_REG_GUEST_BASE, false); } else { tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); } #endif } -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addend) +static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addend, + bool scratch_addend) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); @@ -1857,13 +1826,18 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, tcg_out_st32_r(s, cond, datalo, addrlo, addend); break; case MO_64: - /* Avoid strd for user-only emulation, to handle unaligned. */ - if (USING_SOFTMMU && use_armv6_instructions + /* STRD requires alignment; double-check that. */ + if (get_alignment_bits(opc) >= MO_64 && (datalo & 1) == 0 && datahi == datalo + 1) { tcg_out_strd_r(s, cond, datalo, addrlo, addend); - } else { + } else if (scratch_addend) { tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); tcg_out_st32_12(s, cond, datahi, addend, 4); + } else { + tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, + addend, addrlo, SHIFT_IMM_LSL(0)); + tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); + tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); } break; default: @@ -1871,9 +1845,9 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, } } -static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo) +#ifndef CONFIG_SOFTMMU +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, + TCGReg datahi, TCGReg addrlo) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); @@ -1889,8 +1863,8 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); break; case MO_64: - /* Avoid strd for user-only emulation, to handle unaligned. */ - if (USING_SOFTMMU && use_armv6_instructions + /* STRD requires alignment; double-check that. */ + if (get_alignment_bits(opc) >= MO_64 && (datalo & 1) == 0 && datahi == datalo + 1) { tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); } else { @@ -1902,16 +1876,19 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, g_assert_not_reached(); } } +#endif static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; tcg_insn_unit *label_ptr; +#else + unsigned a_bits; #endif datalo = *args++; @@ -1925,19 +1902,23 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) mem_index = get_mmuidx(oi); addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); - tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); + tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, + addrlo, addend, true); /* The conditional call must come last, as we're going to return here. */ label_ptr = s->code_ptr; - tcg_out_bl(s, COND_NE, 0); + tcg_out_bl_imm(s, COND_NE, 0); add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); + } if (guest_base) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); - tcg_out_qemu_st_index(s, COND_AL, opc, datalo, - datahi, addrlo, TCG_REG_TMP); + tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, + addrlo, TCG_REG_GUEST_BASE, false); } else { tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); } @@ -1946,9 +1927,9 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_epilogue(TCGContext *s); -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) { TCGArg a0, a1, a2, a3, a4, a5; int c; @@ -1982,7 +1963,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_goto_ptr: - tcg_out_bx(s, COND_AL, args[0]); + tcg_out_b_reg(s, COND_AL, args[0]); break; case INDEX_op_br: tcg_out_goto_label(s, COND_AL, arg_label(args[0])); @@ -2428,6 +2409,11 @@ static void tcg_target_init(TCGContext *s) if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { arm_arch = pl[1] - '0'; } + + if (arm_arch < 6) { + error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); + exit(EXIT_FAILURE); + } } tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; @@ -2477,8 +2463,13 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); return; case TCG_TYPE_V128: - /* regs 2; size 8; align 16 */ - tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2); + /* + * We have only 8-byte alignment for the stack per the ABI. + * Rather than dynamically re-align the stack, it's easier + * to simply not request alignment beyond that. So: + * regs 2; size 8; align 8 + */ + tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); return; default: g_assert_not_reached(); @@ -2497,16 +2488,16 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); return; case TCG_TYPE_V128: - /* regs 2; size 8; align 16 */ - tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2); + /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ + tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); return; default: g_assert_not_reached(); } } -static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, - TCGReg base, intptr_t ofs) +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) { return false; } @@ -2715,7 +2706,8 @@ static const ARMInsn vec_cmp0_insn[16] = { static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, - const TCGArg *args, const int *const_args) + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) { TCGType type = vecl + TCG_TYPE_V64; unsigned q = vecl; @@ -3055,7 +3047,10 @@ static void tcg_target_qemu_prologue(TCGContext *s) { /* Calling convention requires us to save r4-r11 and lr. */ /* stmdb sp!, { r4 - r11, lr } */ - tcg_out32(s, (COND_AL << 28) | 0x092d4ff0); + tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, + (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | + (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | + (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); /* Reserve callee argument and tcg temp space. */ tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, @@ -3065,7 +3060,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]); +#ifndef CONFIG_SOFTMMU + if (guest_base) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); + } +#endif + + tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, @@ -3083,7 +3085,10 @@ static void tcg_out_epilogue(TCGContext *s) TCG_REG_CALL_STACK, STACK_ADDEND, 1); /* ldmia sp!, { r4 - r11, pc } */ - tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0); + tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, + (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | + (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | + (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); } typedef struct { diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index d113b7f8db..7e96495392 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -26,35 +26,8 @@ #ifndef ARM_TCG_TARGET_H #define ARM_TCG_TARGET_H -/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ -#ifndef __ARM_ARCH -# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7EM__) -# define __ARM_ARCH 7 -# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) -# define __ARM_ARCH 6 -# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \ - || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ - || defined(__ARM_ARCH_5TEJ__) -# define __ARM_ARCH 5 -# else -# define __ARM_ARCH 4 -# endif -#endif - extern int arm_arch; -#if defined(__ARM_ARCH_5T__) \ - || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) -# define use_armv5t_instructions 1 -#else -# define use_armv5t_instructions use_armv6_instructions -#endif - -#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) #undef TCG_TARGET_STACK_GROWSUP @@ -134,7 +107,7 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions +#define TCG_TARGET_HAS_clz_i32 1 #define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions @@ -157,6 +130,9 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 1 #define TCG_TARGET_HAS_abs_vec 1 @@ -178,9 +154,7 @@ extern bool use_neon_instructions; /* not defined -- call should be eliminated at compile time */ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h index 369e38d153..d121ace0c4 100644 --- a/tcg/i386/tcg-target-con-set.h +++ b/tcg/i386/tcg-target-con-set.h @@ -49,6 +49,7 @@ C_O1_I2(x, x, x) C_O1_I2(r, x, x) C_N1_I2(r, r, r) C_N1_I2(r, r, rW) +C_O1_I3(x, 0, x, x) C_O1_I3(x, x, x, x) C_O1_I4(r, r, re, r, 0) C_O1_I4(r, r, r, ri, ri) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index f552607486..0c0940e49b 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -22,6 +22,7 @@ * THE SOFTWARE. */ +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #ifdef CONFIG_DEBUG_TCG @@ -170,6 +171,10 @@ bool have_bmi1; bool have_popcnt; bool have_avx1; bool have_avx2; +bool have_avx512bw; +bool have_avx512dq; +bool have_avx512vbmi2; +bool have_avx512vl; bool have_movbe; #ifdef CONFIG_CPUID_H @@ -241,8 +246,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define P_EXT 0x100 /* 0x0f opcode prefix */ #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ #define P_DATA16 0x400 /* 0x66 opcode prefix */ +#define P_VEXW 0x1000 /* Set VEX.W = 1 */ #if TCG_TARGET_REG_BITS == 64 -# define P_REXW 0x1000 /* Set REX.W = 1 */ +# define P_REXW P_VEXW /* Set REX.W = 1; match VEXW */ # define P_REXB_R 0x2000 /* REG field as byte register */ # define P_REXB_RM 0x4000 /* R/M field as byte register */ # define P_GS 0x8000 /* gs segment override */ @@ -256,6 +262,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */ #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */ #define P_VEXL 0x80000 /* Set VEX.L = 1 */ +#define P_EVEX 0x100000 /* Requires EVEX encoding */ #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) @@ -327,6 +334,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_PABSB (0x1c | P_EXT38 | P_DATA16) #define OPC_PABSW (0x1d | P_EXT38 | P_DATA16) #define OPC_PABSD (0x1e | P_EXT38 | P_DATA16) +#define OPC_VPABSQ (0x1f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16) #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16) #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16) @@ -353,15 +361,19 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16) #define OPC_PMAXSW (0xee | P_EXT | P_DATA16) #define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16) +#define OPC_VPMAXSQ (0x3d | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PMAXUB (0xde | P_EXT | P_DATA16) #define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16) #define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16) +#define OPC_VPMAXUQ (0x3f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16) #define OPC_PMINSW (0xea | P_EXT | P_DATA16) #define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16) +#define OPC_VPMINSQ (0x39 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PMINUB (0xda | P_EXT | P_DATA16) #define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16) #define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16) +#define OPC_VPMINUQ (0x3b | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16) #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16) #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16) @@ -370,19 +382,21 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16) #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16) #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16) +#define OPC_VPMULLQ (0x40 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_POR (0xeb | P_EXT | P_DATA16) #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16) #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16) #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2) #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3) #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */ -#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */ +#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /1 /2 /6 /4 */ #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */ #define OPC_PSLLW (0xf1 | P_EXT | P_DATA16) #define OPC_PSLLD (0xf2 | P_EXT | P_DATA16) #define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16) #define OPC_PSRAW (0xe1 | P_EXT | P_DATA16) #define OPC_PSRAD (0xe2 | P_EXT | P_DATA16) +#define OPC_VPSRAQ (0xe2 | P_EXT | P_DATA16 | P_VEXW | P_EVEX) #define OPC_PSRLW (0xd1 | P_EXT | P_DATA16) #define OPC_PSRLD (0xd2 | P_EXT | P_DATA16) #define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16) @@ -436,18 +450,37 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16) -#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW) +#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW) #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL) +#define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHLDW (0x70 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHLDD (0x71 | P_EXT3A | P_DATA16 | P_EVEX) +#define OPC_VPSHLDQ (0x71 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHLDVW (0x70 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHLDVD (0x71 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPSHLDVQ (0x71 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHRDVW (0x72 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSHRDVD (0x73 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPSHRDVQ (0x73 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16) -#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW) +#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW) +#define OPC_VPSRAVW (0x11 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16) +#define OPC_VPSRAVQ (0x46 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16) -#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW) +#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW) +#define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VZEROUPPER (0x77 | P_EXT) #define OPC_XCHG_ax_r32 (0x90) -#define OPC_GRP3_Ev (0xf7) -#define OPC_GRP5 (0xff) +#define OPC_GRP3_Eb (0xf6) +#define OPC_GRP3_Ev (0xf7) +#define OPC_GRP5 (0xff) #define OPC_GRP14 (0x73 | P_EXT | P_DATA16) /* Group 1 opcode extensions for 0x80-0x83. @@ -469,6 +502,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define SHIFT_SAR 7 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ +#define EXT3_TESTi 0 #define EXT3_NOT 2 #define EXT3_NEG 3 #define EXT3_MUL 4 @@ -600,7 +634,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, /* Use the two byte form if possible, which cannot encode VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */ - if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT + if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT && ((rm | index) & 8) == 0) { /* Two byte VEX prefix. */ tcg_out8(s, 0xc5); @@ -625,7 +659,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ tcg_out8(s, tmp); - tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ + tmp = (opc & P_VEXW ? 0x80 : 0); /* VEX.W */ } tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */ @@ -642,9 +676,57 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, tcg_out8(s, opc); } +static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v, + int rm, int index) +{ + /* The entire 4-byte evex prefix; with R' and V' set. */ + uint32_t p = 0x08041062; + int mm, pp; + + tcg_debug_assert(have_avx512vl); + + /* EVEX.mm */ + if (opc & P_EXT3A) { + mm = 3; + } else if (opc & P_EXT38) { + mm = 2; + } else if (opc & P_EXT) { + mm = 1; + } else { + g_assert_not_reached(); + } + + /* EVEX.pp */ + if (opc & P_DATA16) { + pp = 1; /* 0x66 */ + } else if (opc & P_SIMDF3) { + pp = 2; /* 0xf3 */ + } else if (opc & P_SIMDF2) { + pp = 3; /* 0xf2 */ + } else { + pp = 0; + } + + p = deposit32(p, 8, 2, mm); + p = deposit32(p, 13, 1, (rm & 8) == 0); /* EVEX.RXB.B */ + p = deposit32(p, 14, 1, (index & 8) == 0); /* EVEX.RXB.X */ + p = deposit32(p, 15, 1, (r & 8) == 0); /* EVEX.RXB.R */ + p = deposit32(p, 16, 2, pp); + p = deposit32(p, 19, 4, ~v); + p = deposit32(p, 23, 1, (opc & P_VEXW) != 0); + p = deposit32(p, 29, 2, (opc & P_VEXL) != 0); + + tcg_out32(s, p); + tcg_out8(s, opc); +} + static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) { - tcg_out_vex_opc(s, opc, r, v, rm, 0); + if (opc & P_EVEX) { + tcg_out_evex_opc(s, opc, r, v, rm, 0); + } else { + tcg_out_vex_opc(s, opc, r, v, rm, 0); + } tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } @@ -1626,7 +1708,7 @@ static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest) } else { /* rip-relative addressing into the constant pool. This is 6 + 8 = 14 bytes, as compared to using an - an immediate load 10 + 6 = 16 bytes, plus we may + immediate load 10 + 6 = 16 bytes, plus we may be able to re-use the pool constant for more calls. */ tcg_out_opc(s, OPC_GRP5, 0, 0, 0); tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5); @@ -1661,32 +1743,30 @@ static void tcg_out_nopn(TCGContext *s, int n) } #if defined(CONFIG_SOFTMMU) -#include "../tcg-ldst.c.inc" - /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, + [MO_LEUQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, + [MO_BEUQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, + [MO_LEUQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, + [MO_BEUQ] = helper_be_stq_mmu, }; /* Perform the TLB load and compare. @@ -1796,7 +1876,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, * for a load or store, so that we can later generate the correct helper code */ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, - TCGMemOpIdx oi, + MemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, @@ -1823,7 +1903,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; @@ -1882,7 +1962,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) case MO_UL: tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); break; - case MO_Q: + case MO_UQ: if (TCG_TARGET_REG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); } else if (data_reg == TCG_REG_EDX) { @@ -1908,7 +1988,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) */ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; @@ -1971,7 +2051,84 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); return true; } -#elif TCG_TARGET_REG_BITS == 32 +#else + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, + TCGReg addrhi, unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *label; + + /* + * We are expecting a_bits to max out at 7, so we can usually use testb. + * For i686, we have to use testl for %esi/%edi. + */ + if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) { + tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo); + tcg_out8(s, a_mask); + } else { + tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo); + tcg_out32(s, a_mask); + } + + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + + label = new_ldst_label(s); + label->is_ld = is_ld; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4); + label->label_ptr[0] = s->code_ptr; + + s->code_ptr += 4; +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + /* resolve label address */ + tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4); + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_pushi(s, (uintptr_t)l->raddr); + } else { + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr); + tcg_out_push(s, TCG_REG_RAX); + } + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st)); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +#if TCG_TARGET_REG_BITS == 32 # define x86_guest_base_seg 0 # define x86_guest_base_index -1 # define x86_guest_base_offset guest_base @@ -2005,6 +2162,7 @@ static inline int setup_guest_base_seg(void) return 0; } # endif +#endif #endif /* SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, @@ -2074,7 +2232,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, } break; #endif - case MO_Q: + case MO_UQ: if (TCG_TARGET_REG_BITS == 64) { tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, base, index, 0, ofs); @@ -2109,11 +2267,13 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; +#else + unsigned a_bits; #endif datalo = *args++; @@ -2136,6 +2296,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); + } + tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, x86_guest_base_offset, x86_guest_base_seg, is64, opc); @@ -2198,11 +2363,13 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; +#else + unsigned a_bits; #endif datalo = *args++; @@ -2225,6 +2392,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); + } + tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index, x86_guest_base_offset, x86_guest_base_seg, opc); #endif @@ -2893,7 +3065,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2 }; static int const mul_insn[4] = { - OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2 + OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_VPMULLQ }; static int const shift_imm_insn[4] = { OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib @@ -2917,28 +3089,31 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2 }; static int const smin_insn[4] = { - OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_UD2 + OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_VPMINSQ }; static int const smax_insn[4] = { - OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_UD2 + OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ }; static int const umin_insn[4] = { - OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_UD2 + OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ }; static int const umax_insn[4] = { - OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2 + OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ + }; + static int const rotlv_insn[4] = { + OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ + }; + static int const rotrv_insn[4] = { + OPC_UD2, OPC_UD2, OPC_VPRORVD, OPC_VPRORVQ }; static int const shlv_insn[4] = { - /* TODO: AVX512 adds support for MO_16. */ - OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ + OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ }; static int const shrv_insn[4] = { - /* TODO: AVX512 adds support for MO_16. */ - OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ + OPC_UD2, OPC_VPSRLVW, OPC_VPSRLVD, OPC_VPSRLVQ }; static int const sarv_insn[4] = { - /* TODO: AVX512 adds support for MO_16, MO_64. */ - OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2 + OPC_UD2, OPC_VPSRAVW, OPC_VPSRAVD, OPC_VPSRAVQ }; static int const shls_insn[4] = { OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ @@ -2947,16 +3122,24 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ }; static int const sars_insn[4] = { - OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2 + OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ + }; + static int const vpshldi_insn[4] = { + OPC_UD2, OPC_VPSHLDW, OPC_VPSHLDD, OPC_VPSHLDQ + }; + static int const vpshldv_insn[4] = { + OPC_UD2, OPC_VPSHLDVW, OPC_VPSHLDVD, OPC_VPSHLDVQ + }; + static int const vpshrdv_insn[4] = { + OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ }; static int const abs_insn[4] = { - /* TODO: AVX512 adds support for MO_64. */ - OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2 + OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_VPABSQ }; TCGType type = vecl + TCG_TYPE_V64; int insn, sub; - TCGArg a0, a1, a2; + TCGArg a0, a1, a2, a3; a0 = args[0]; a1 = args[1]; @@ -3014,6 +3197,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_sarv_vec: insn = sarv_insn[vece]; goto gen_simd; + case INDEX_op_rotlv_vec: + insn = rotlv_insn[vece]; + goto gen_simd; + case INDEX_op_rotrv_vec: + insn = rotrv_insn[vece]; + goto gen_simd; case INDEX_op_shls_vec: insn = shls_insn[vece]; goto gen_simd; @@ -3035,6 +3224,16 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_x86_packus_vec: insn = packus_insn[vece]; goto gen_simd; + case INDEX_op_x86_vpshldv_vec: + insn = vpshldv_insn[vece]; + a1 = a2; + a2 = args[3]; + goto gen_simd; + case INDEX_op_x86_vpshrdv_vec: + insn = vpshrdv_insn[vece]; + a1 = a2; + a2 = args[3]; + goto gen_simd; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_dup2_vec: /* First merge the two 32-bit inputs to a single 64-bit element. */ @@ -3078,17 +3277,30 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_shli_vec: + insn = shift_imm_insn[vece]; sub = 6; goto gen_shift; case INDEX_op_shri_vec: + insn = shift_imm_insn[vece]; sub = 2; goto gen_shift; case INDEX_op_sari_vec: - tcg_debug_assert(vece != MO_64); + if (vece == MO_64) { + insn = OPC_PSHIFTD_Ib | P_VEXW | P_EVEX; + } else { + insn = shift_imm_insn[vece]; + } sub = 4; + goto gen_shift; + case INDEX_op_rotli_vec: + insn = OPC_PSHIFTD_Ib | P_EVEX; /* VPROL[DQ] */ + if (vece == MO_64) { + insn |= P_VEXW; + } + sub = 1; + goto gen_shift; gen_shift: tcg_debug_assert(vece != MO_8); - insn = shift_imm_insn[vece]; if (type == TCG_TYPE_V256) { insn |= P_VEXL; } @@ -3124,7 +3336,51 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, insn = OPC_VPERM2I128; sub = args[3]; goto gen_simd_imm8; + case INDEX_op_x86_vpshldi_vec: + insn = vpshldi_insn[vece]; + sub = args[3]; + goto gen_simd_imm8; + + case INDEX_op_not_vec: + insn = OPC_VPTERNLOGQ; + a2 = a1; + sub = 0x33; /* !B */ + goto gen_simd_imm8; + case INDEX_op_nor_vec: + insn = OPC_VPTERNLOGQ; + sub = 0x11; /* norCB */ + goto gen_simd_imm8; + case INDEX_op_nand_vec: + insn = OPC_VPTERNLOGQ; + sub = 0x77; /* nandCB */ + goto gen_simd_imm8; + case INDEX_op_eqv_vec: + insn = OPC_VPTERNLOGQ; + sub = 0x99; /* xnorCB */ + goto gen_simd_imm8; + case INDEX_op_orc_vec: + insn = OPC_VPTERNLOGQ; + sub = 0xdd; /* orB!C */ + goto gen_simd_imm8; + + case INDEX_op_bitsel_vec: + insn = OPC_VPTERNLOGQ; + a3 = args[3]; + if (a0 == a1) { + a1 = a2; + a2 = a3; + sub = 0xca; /* A?B:C */ + } else if (a0 == a2) { + a2 = a3; + sub = 0xe2; /* B?A:C */ + } else { + tcg_out_mov(s, type, a0, a3); + sub = 0xb8; /* B?C:A */ + } + goto gen_simd_imm8; + gen_simd_imm8: + tcg_debug_assert(insn != OPC_UD2); if (type == TCG_TYPE_V256) { insn |= P_VEXL; } @@ -3392,6 +3648,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: + case INDEX_op_orc_vec: + case INDEX_op_nand_vec: + case INDEX_op_nor_vec: + case INDEX_op_eqv_vec: case INDEX_op_ssadd_vec: case INDEX_op_usadd_vec: case INDEX_op_sssub_vec: @@ -3403,10 +3663,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: case INDEX_op_shls_vec: case INDEX_op_shrs_vec: case INDEX_op_sars_vec: - case INDEX_op_rotls_vec: case INDEX_op_cmp_vec: case INDEX_op_x86_shufps_vec: case INDEX_op_x86_blend_vec: @@ -3415,6 +3676,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_x86_vperm2i128_vec: case INDEX_op_x86_punpckl_vec: case INDEX_op_x86_punpckh_vec: + case INDEX_op_x86_vpshldi_vec: #if TCG_TARGET_REG_BITS == 32 case INDEX_op_dup2_vec: #endif @@ -3422,12 +3684,19 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_abs_vec: case INDEX_op_dup_vec: + case INDEX_op_not_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: + case INDEX_op_rotli_vec: case INDEX_op_x86_psrldq_vec: return C_O1_I1(x, x); + case INDEX_op_x86_vpshldv_vec: + case INDEX_op_x86_vpshrdv_vec: + return C_O1_I3(x, 0, x, x); + + case INDEX_op_bitsel_vec: case INDEX_op_x86_vpblendvb_vec: return C_O1_I3(x, x, x, x); @@ -3445,53 +3714,96 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: + case INDEX_op_orc_vec: + case INDEX_op_nand_vec: + case INDEX_op_nor_vec: + case INDEX_op_eqv_vec: + case INDEX_op_not_vec: + case INDEX_op_bitsel_vec: return 1; - case INDEX_op_rotli_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return -1; + case INDEX_op_rotli_vec: + return have_avx512vl && vece >= MO_32 ? 1 : -1; + case INDEX_op_shli_vec: case INDEX_op_shri_vec: /* We must expand the operation for MO_8. */ return vece == MO_8 ? -1 : 1; case INDEX_op_sari_vec: - /* We must expand the operation for MO_8. */ - if (vece == MO_8) { + switch (vece) { + case MO_8: return -1; - } - /* We can emulate this for MO_64, but it does not pay off - unless we're producing at least 4 values. */ - if (vece == MO_64) { + case MO_16: + case MO_32: + return 1; + case MO_64: + if (have_avx512vl) { + return 1; + } + /* + * We can emulate this for MO_64, but it does not pay off + * unless we're producing at least 4 values. + */ return type >= TCG_TYPE_V256 ? -1 : 0; } - return 1; + return 0; case INDEX_op_shls_vec: case INDEX_op_shrs_vec: return vece >= MO_16; case INDEX_op_sars_vec: - return vece >= MO_16 && vece <= MO_32; + switch (vece) { + case MO_16: + case MO_32: + return 1; + case MO_64: + return have_avx512vl; + } + return 0; case INDEX_op_rotls_vec: return vece >= MO_16 ? -1 : 0; case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: - return have_avx2 && vece >= MO_32; + switch (vece) { + case MO_16: + return have_avx512bw; + case MO_32: + case MO_64: + return have_avx2; + } + return 0; case INDEX_op_sarv_vec: - return have_avx2 && vece == MO_32; + switch (vece) { + case MO_16: + return have_avx512bw; + case MO_32: + return have_avx2; + case MO_64: + return have_avx512vl; + } + return 0; case INDEX_op_rotlv_vec: case INDEX_op_rotrv_vec: - return have_avx2 && vece >= MO_32 ? -1 : 0; + switch (vece) { + case MO_16: + return have_avx512vbmi2 ? -1 : 0; + case MO_32: + case MO_64: + return have_avx512vl ? 1 : have_avx2 ? -1 : 0; + } + return 0; case INDEX_op_mul_vec: - if (vece == MO_8) { - /* We can expand the operation for MO_8. */ + switch (vece) { + case MO_8: return -1; - } - if (vece == MO_64) { - return 0; + case MO_64: + return have_avx512dq; } return 1; @@ -3505,7 +3817,7 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_umin_vec: case INDEX_op_umax_vec: case INDEX_op_abs_vec: - return vece <= MO_32; + return vece <= MO_32 || have_avx512vl; default: return 0; @@ -3623,6 +3935,12 @@ static void expand_vec_rotli(TCGType type, unsigned vece, return; } + if (have_avx512vbmi2) { + vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece, + tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm); + return; + } + t = tcg_temp_new_vec(type); tcg_gen_shli_vec(vece, t, v1, imm); tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm); @@ -3630,31 +3948,19 @@ static void expand_vec_rotli(TCGType type, unsigned vece, tcg_temp_free_vec(t); } -static void expand_vec_rotls(TCGType type, unsigned vece, - TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh) -{ - TCGv_i32 rsh; - TCGv_vec t; - - tcg_debug_assert(vece != MO_8); - - t = tcg_temp_new_vec(type); - rsh = tcg_temp_new_i32(); - - tcg_gen_neg_i32(rsh, lsh); - tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1); - tcg_gen_shls_vec(vece, t, v1, lsh); - tcg_gen_shrs_vec(vece, v0, v1, rsh); - tcg_gen_or_vec(vece, v0, v0, t); - tcg_temp_free_vec(t); - tcg_temp_free_i32(rsh); -} - static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec sh, bool right) { - TCGv_vec t = tcg_temp_new_vec(type); + TCGv_vec t; + if (have_avx512vbmi2) { + vec_gen_4(right ? INDEX_op_x86_vpshrdv_vec : INDEX_op_x86_vpshldv_vec, + type, vece, tcgv_vec_arg(v0), tcgv_vec_arg(v1), + tcgv_vec_arg(v1), tcgv_vec_arg(sh)); + return; + } + + t = tcg_temp_new_vec(type); tcg_gen_dupi_vec(vece, t, 8 << vece); tcg_gen_sub_vec(vece, t, t, sh); if (right) { @@ -3668,6 +3974,35 @@ static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0, tcg_temp_free_vec(t); } +static void expand_vec_rotls(TCGType type, unsigned vece, + TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh) +{ + TCGv_vec t = tcg_temp_new_vec(type); + + tcg_debug_assert(vece != MO_8); + + if (vece >= MO_32 ? have_avx512vl : have_avx512vbmi2) { + tcg_gen_dup_i32_vec(vece, t, lsh); + if (vece >= MO_32) { + tcg_gen_rotlv_vec(vece, v0, v1, t); + } else { + expand_vec_rotv(type, vece, v0, v1, t, false); + } + } else { + TCGv_i32 rsh = tcg_temp_new_i32(); + + tcg_gen_neg_i32(rsh, lsh); + tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1); + tcg_gen_shls_vec(vece, t, v1, lsh); + tcg_gen_shrs_vec(vece, v0, v1, rsh); + tcg_gen_or_vec(vece, v0, v0, t); + + tcg_temp_free_i32(rsh); + } + + tcg_temp_free_vec(t); +} + static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) { @@ -3763,28 +4098,28 @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, fixup = NEED_SWAP | NEED_INV; break; case TCG_COND_LEU: - if (vece <= MO_32) { + if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) { fixup = NEED_UMIN; } else { fixup = NEED_BIAS | NEED_INV; } break; case TCG_COND_GTU: - if (vece <= MO_32) { + if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) { fixup = NEED_UMIN | NEED_INV; } else { fixup = NEED_BIAS; } break; case TCG_COND_GEU: - if (vece <= MO_32) { + if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) { fixup = NEED_UMAX; } else { fixup = NEED_BIAS | NEED_SWAP | NEED_INV; } break; case TCG_COND_LTU: - if (vece <= MO_32) { + if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) { fixup = NEED_UMAX | NEED_INV; } else { fixup = NEED_BIAS | NEED_SWAP; @@ -4035,12 +4370,12 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) static void tcg_target_init(TCGContext *s) { #ifdef CONFIG_CPUID_H - unsigned a, b, c, d, b7 = 0; - int max = __get_cpuid_max(0, 0); + unsigned a, b, c, d, b7 = 0, c7 = 0; + unsigned max = __get_cpuid_max(0, 0); if (max >= 7) { /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ - __cpuid_count(7, 0, a, b7, c, d); + __cpuid_count(7, 0, a, b7, c7, d); have_bmi1 = (b7 & bit_BMI) != 0; have_bmi2 = (b7 & bit_BMI2) != 0; } @@ -4070,6 +4405,22 @@ static void tcg_target_init(TCGContext *s) if ((xcrl & 6) == 6) { have_avx1 = (c & bit_AVX) != 0; have_avx2 = (b7 & bit_AVX2) != 0; + + /* + * There are interesting instructions in AVX512, so long + * as we have AVX512VL, which indicates support for EVEX + * on sizes smaller than 512 bits. We are required to + * check that OPMASK and all extended ZMM state are enabled + * even if we're not using them -- the insns will fault. + */ + if ((xcrl & 0xe0) == 0xe0 + && (b7 & bit_AVX512F) + && (b7 & bit_AVX512VL)) { + have_avx512vl = true; + have_avx512bw = (b7 & bit_AVX512BW) != 0; + have_avx512dq = (b7 & bit_AVX512DQ) != 0; + have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0; + } } } } diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 6cff3b8e3c..d23ab76458 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -103,6 +103,10 @@ extern bool have_bmi1; extern bool have_popcnt; extern bool have_avx1; extern bool have_avx2; +extern bool have_avx512bw; +extern bool have_avx512dq; +extern bool have_avx512vbmi2; +extern bool have_avx512vl; extern bool have_movbe; /* optional instructions */ @@ -186,20 +190,23 @@ extern bool have_movbe; #define TCG_TARGET_HAS_v256 have_avx2 #define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec 0 -#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_orc_vec have_avx512vl +#define TCG_TARGET_HAS_nand_vec have_avx512vl +#define TCG_TARGET_HAS_nor_vec have_avx512vl +#define TCG_TARGET_HAS_eqv_vec have_avx512vl +#define TCG_TARGET_HAS_not_vec have_avx512vl #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_abs_vec 1 -#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_roti_vec have_avx512vl #define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_rotv_vec have_avx512vl #define TCG_TARGET_HAS_shi_vec 1 #define TCG_TARGET_HAS_shs_vec 1 #define TCG_TARGET_HAS_shv_vec have_avx2 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_bitsel_vec have_avx512vl #define TCG_TARGET_HAS_cmpsel_vec -1 #define TCG_TARGET_deposit_i32_valid(ofs, len) \ @@ -234,9 +241,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, #define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target.opc.h index 1312941800..b5f403e35e 100644 --- a/tcg/i386/tcg-target.opc.h +++ b/tcg/i386/tcg-target.opc.h @@ -33,3 +33,6 @@ DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC) DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC) DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC) DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC) +DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC) +DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC) +DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC) diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc new file mode 100644 index 0000000000..d162571856 --- /dev/null +++ b/tcg/loongarch64/tcg-insn-defs.c.inc @@ -0,0 +1,979 @@ +/* SPDX-License-Identifier: MIT */ +/* + * LoongArch instruction formats, opcodes, and encoders for TCG use. + * + * This file is auto-generated by genqemutcgdefs from + * https://github.com/loongson-community/loongarch-opcodes, + * from commit 961f0c60f5b63e574d785995600c71ad5413fdc4. + * DO NOT EDIT. + */ + +typedef enum { + OPC_CLZ_W = 0x00001400, + OPC_CTZ_W = 0x00001c00, + OPC_CLZ_D = 0x00002400, + OPC_CTZ_D = 0x00002c00, + OPC_REVB_2H = 0x00003000, + OPC_REVB_2W = 0x00003800, + OPC_REVB_D = 0x00003c00, + OPC_SEXT_H = 0x00005800, + OPC_SEXT_B = 0x00005c00, + OPC_ADD_W = 0x00100000, + OPC_ADD_D = 0x00108000, + OPC_SUB_W = 0x00110000, + OPC_SUB_D = 0x00118000, + OPC_SLT = 0x00120000, + OPC_SLTU = 0x00128000, + OPC_MASKEQZ = 0x00130000, + OPC_MASKNEZ = 0x00138000, + OPC_NOR = 0x00140000, + OPC_AND = 0x00148000, + OPC_OR = 0x00150000, + OPC_XOR = 0x00158000, + OPC_ORN = 0x00160000, + OPC_ANDN = 0x00168000, + OPC_SLL_W = 0x00170000, + OPC_SRL_W = 0x00178000, + OPC_SRA_W = 0x00180000, + OPC_SLL_D = 0x00188000, + OPC_SRL_D = 0x00190000, + OPC_SRA_D = 0x00198000, + OPC_ROTR_W = 0x001b0000, + OPC_ROTR_D = 0x001b8000, + OPC_MUL_W = 0x001c0000, + OPC_MULH_W = 0x001c8000, + OPC_MULH_WU = 0x001d0000, + OPC_MUL_D = 0x001d8000, + OPC_MULH_D = 0x001e0000, + OPC_MULH_DU = 0x001e8000, + OPC_DIV_W = 0x00200000, + OPC_MOD_W = 0x00208000, + OPC_DIV_WU = 0x00210000, + OPC_MOD_WU = 0x00218000, + OPC_DIV_D = 0x00220000, + OPC_MOD_D = 0x00228000, + OPC_DIV_DU = 0x00230000, + OPC_MOD_DU = 0x00238000, + OPC_SLLI_W = 0x00408000, + OPC_SLLI_D = 0x00410000, + OPC_SRLI_W = 0x00448000, + OPC_SRLI_D = 0x00450000, + OPC_SRAI_W = 0x00488000, + OPC_SRAI_D = 0x00490000, + OPC_ROTRI_W = 0x004c8000, + OPC_ROTRI_D = 0x004d0000, + OPC_BSTRINS_W = 0x00600000, + OPC_BSTRPICK_W = 0x00608000, + OPC_BSTRINS_D = 0x00800000, + OPC_BSTRPICK_D = 0x00c00000, + OPC_SLTI = 0x02000000, + OPC_SLTUI = 0x02400000, + OPC_ADDI_W = 0x02800000, + OPC_ADDI_D = 0x02c00000, + OPC_CU52I_D = 0x03000000, + OPC_ANDI = 0x03400000, + OPC_ORI = 0x03800000, + OPC_XORI = 0x03c00000, + OPC_LU12I_W = 0x14000000, + OPC_CU32I_D = 0x16000000, + OPC_PCADDU2I = 0x18000000, + OPC_PCALAU12I = 0x1a000000, + OPC_PCADDU12I = 0x1c000000, + OPC_PCADDU18I = 0x1e000000, + OPC_LD_B = 0x28000000, + OPC_LD_H = 0x28400000, + OPC_LD_W = 0x28800000, + OPC_LD_D = 0x28c00000, + OPC_ST_B = 0x29000000, + OPC_ST_H = 0x29400000, + OPC_ST_W = 0x29800000, + OPC_ST_D = 0x29c00000, + OPC_LD_BU = 0x2a000000, + OPC_LD_HU = 0x2a400000, + OPC_LD_WU = 0x2a800000, + OPC_LDX_B = 0x38000000, + OPC_LDX_H = 0x38040000, + OPC_LDX_W = 0x38080000, + OPC_LDX_D = 0x380c0000, + OPC_STX_B = 0x38100000, + OPC_STX_H = 0x38140000, + OPC_STX_W = 0x38180000, + OPC_STX_D = 0x381c0000, + OPC_LDX_BU = 0x38200000, + OPC_LDX_HU = 0x38240000, + OPC_LDX_WU = 0x38280000, + OPC_DBAR = 0x38720000, + OPC_JIRL = 0x4c000000, + OPC_B = 0x50000000, + OPC_BL = 0x54000000, + OPC_BEQ = 0x58000000, + OPC_BNE = 0x5c000000, + OPC_BGT = 0x60000000, + OPC_BLE = 0x64000000, + OPC_BGTU = 0x68000000, + OPC_BLEU = 0x6c000000, +} LoongArchInsn; + +static int32_t __attribute__((unused)) +encode_d_slot(LoongArchInsn opc, uint32_t d) +{ + return opc | d; +} + +static int32_t __attribute__((unused)) +encode_dj_slots(LoongArchInsn opc, uint32_t d, uint32_t j) +{ + return opc | d | j << 5; +} + +static int32_t __attribute__((unused)) +encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k) +{ + return opc | d | j << 5 | k << 10; +} + +static int32_t __attribute__((unused)) +encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k, + uint32_t m) +{ + return opc | d | j << 5 | k << 10 | m << 16; +} + +static int32_t __attribute__((unused)) +encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k) +{ + return opc | d | k << 10; +} + +static int32_t __attribute__((unused)) +encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + return encode_dj_slots(opc, d, j); +} + +static int32_t __attribute__((unused)) +encode_djk_insn(LoongArchInsn opc, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(k >= 0 && k <= 0x1f); + return encode_djk_slots(opc, d, j, k); +} + +static int32_t __attribute__((unused)) +encode_djsk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff); + return encode_djk_slots(opc, d, j, sk12 & 0xfff); +} + +static int32_t __attribute__((unused)) +encode_djsk16_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(sk16 >= -0x8000 && sk16 <= 0x7fff); + return encode_djk_slots(opc, d, j, sk16 & 0xffff); +} + +static int32_t __attribute__((unused)) +encode_djuk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk12 <= 0xfff); + return encode_djk_slots(opc, d, j, uk12); +} + +static int32_t __attribute__((unused)) +encode_djuk5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk5 <= 0x1f); + return encode_djk_slots(opc, d, j, uk5); +} + +static int32_t __attribute__((unused)) +encode_djuk5um5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk5 <= 0x1f); + tcg_debug_assert(um5 <= 0x1f); + return encode_djkm_slots(opc, d, j, uk5, um5); +} + +static int32_t __attribute__((unused)) +encode_djuk6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk6 <= 0x3f); + return encode_djk_slots(opc, d, j, uk6); +} + +static int32_t __attribute__((unused)) +encode_djuk6um6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk6 <= 0x3f); + tcg_debug_assert(um6 <= 0x3f); + return encode_djkm_slots(opc, d, j, uk6, um6); +} + +static int32_t __attribute__((unused)) +encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(sj20 >= -0x80000 && sj20 <= 0x7ffff); + return encode_dj_slots(opc, d, sj20 & 0xfffff); +} + +static int32_t __attribute__((unused)) +encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16) +{ + tcg_debug_assert(sd10k16 >= -0x2000000 && sd10k16 <= 0x1ffffff); + return encode_dk_slots(opc, (sd10k16 >> 16) & 0x3ff, sd10k16 & 0xffff); +} + +static int32_t __attribute__((unused)) +encode_ud15_insn(LoongArchInsn opc, uint32_t ud15) +{ + tcg_debug_assert(ud15 <= 0x7fff); + return encode_d_slot(opc, ud15); +} + +/* Emits the `clz.w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CLZ_W, d, j)); +} + +/* Emits the `ctz.w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ctz_w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CTZ_W, d, j)); +} + +/* Emits the `clz.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_clz_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CLZ_D, d, j)); +} + +/* Emits the `ctz.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ctz_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CTZ_D, d, j)); +} + +/* Emits the `revb.2h d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_2h(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_2H, d, j)); +} + +/* Emits the `revb.2w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_2w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_2W, d, j)); +} + +/* Emits the `revb.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_D, d, j)); +} + +/* Emits the `sext.h d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sext_h(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_SEXT_H, d, j)); +} + +/* Emits the `sext.b d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sext_b(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_SEXT_B, d, j)); +} + +/* Emits the `add.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_add_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ADD_W, d, j, k)); +} + +/* Emits the `add.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_add_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ADD_D, d, j, k)); +} + +/* Emits the `sub.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sub_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SUB_W, d, j, k)); +} + +/* Emits the `sub.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sub_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SUB_D, d, j, k)); +} + +/* Emits the `slt d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slt(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLT, d, j, k)); +} + +/* Emits the `sltu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sltu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLTU, d, j, k)); +} + +/* Emits the `maskeqz d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_maskeqz(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MASKEQZ, d, j, k)); +} + +/* Emits the `masknez d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_masknez(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MASKNEZ, d, j, k)); +} + +/* Emits the `nor d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_nor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_NOR, d, j, k)); +} + +/* Emits the `and d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_and(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_AND, d, j, k)); +} + +/* Emits the `or d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_or(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_OR, d, j, k)); +} + +/* Emits the `xor d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_xor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_XOR, d, j, k)); +} + +/* Emits the `orn d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_orn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ORN, d, j, k)); +} + +/* Emits the `andn d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_andn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ANDN, d, j, k)); +} + +/* Emits the `sll.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sll_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLL_W, d, j, k)); +} + +/* Emits the `srl.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srl_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRL_W, d, j, k)); +} + +/* Emits the `sra.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sra_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRA_W, d, j, k)); +} + +/* Emits the `sll.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sll_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLL_D, d, j, k)); +} + +/* Emits the `srl.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srl_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRL_D, d, j, k)); +} + +/* Emits the `sra.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sra_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRA_D, d, j, k)); +} + +/* Emits the `rotr.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotr_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ROTR_W, d, j, k)); +} + +/* Emits the `rotr.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotr_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ROTR_D, d, j, k)); +} + +/* Emits the `mul.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mul_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MUL_W, d, j, k)); +} + +/* Emits the `mulh.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_W, d, j, k)); +} + +/* Emits the `mulh.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_WU, d, j, k)); +} + +/* Emits the `mul.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mul_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MUL_D, d, j, k)); +} + +/* Emits the `mulh.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_D, d, j, k)); +} + +/* Emits the `mulh.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_DU, d, j, k)); +} + +/* Emits the `div.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_W, d, j, k)); +} + +/* Emits the `mod.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_W, d, j, k)); +} + +/* Emits the `div.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_WU, d, j, k)); +} + +/* Emits the `mod.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_WU, d, j, k)); +} + +/* Emits the `div.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_D, d, j, k)); +} + +/* Emits the `mod.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_D, d, j, k)); +} + +/* Emits the `div.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_DU, d, j, k)); +} + +/* Emits the `mod.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_DU, d, j, k)); +} + +/* Emits the `slli.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SLLI_W, d, j, uk5)); +} + +/* Emits the `slli.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SLLI_D, d, j, uk6)); +} + +/* Emits the `srli.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SRLI_W, d, j, uk5)); +} + +/* Emits the `srli.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SRLI_D, d, j, uk6)); +} + +/* Emits the `srai.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srai_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SRAI_W, d, j, uk5)); +} + +/* Emits the `srai.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srai_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SRAI_D, d, j, uk6)); +} + +/* Emits the `rotri.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotri_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_ROTRI_W, d, j, uk5)); +} + +/* Emits the `rotri.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotri_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_ROTRI_D, d, j, uk6)); +} + +/* Emits the `bstrins.w d, j, uk5, um5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrins_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRINS_W, d, j, uk5, um5)); +} + +/* Emits the `bstrpick.w d, j, uk5, um5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrpick_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRPICK_W, d, j, uk5, um5)); +} + +/* Emits the `bstrins.d d, j, uk6, um6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrins_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRINS_D, d, j, uk6, um6)); +} + +/* Emits the `bstrpick.d d, j, uk6, um6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrpick_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRPICK_D, d, j, uk6, um6)); +} + +/* Emits the `slti d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slti(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_SLTI, d, j, sk12)); +} + +/* Emits the `sltui d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sltui(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_SLTUI, d, j, sk12)); +} + +/* Emits the `addi.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_addi_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ADDI_W, d, j, sk12)); +} + +/* Emits the `addi.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_addi_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ADDI_D, d, j, sk12)); +} + +/* Emits the `cu52i.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_cu52i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_CU52I_D, d, j, sk12)); +} + +/* Emits the `andi d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_andi(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_ANDI, d, j, uk12)); +} + +/* Emits the `ori d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_ORI, d, j, uk12)); +} + +/* Emits the `xori d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12)); +} + +/* Emits the `lu12i.w d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_lu12i_w(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_LU12I_W, d, sj20)); +} + +/* Emits the `cu32i.d d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_cu32i_d(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_CU32I_D, d, sj20)); +} + +/* Emits the `pcaddu2i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu2i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU2I, d, sj20)); +} + +/* Emits the `pcalau12i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcalau12i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCALAU12I, d, sj20)); +} + +/* Emits the `pcaddu12i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu12i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU12I, d, sj20)); +} + +/* Emits the `pcaddu18i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu18i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU18I, d, sj20)); +} + +/* Emits the `ld.b d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_B, d, j, sk12)); +} + +/* Emits the `ld.h d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_H, d, j, sk12)); +} + +/* Emits the `ld.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_W, d, j, sk12)); +} + +/* Emits the `ld.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_D, d, j, sk12)); +} + +/* Emits the `st.b d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_B, d, j, sk12)); +} + +/* Emits the `st.h d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_H, d, j, sk12)); +} + +/* Emits the `st.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_W, d, j, sk12)); +} + +/* Emits the `st.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_D, d, j, sk12)); +} + +/* Emits the `ld.bu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_bu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_BU, d, j, sk12)); +} + +/* Emits the `ld.hu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_hu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_HU, d, j, sk12)); +} + +/* Emits the `ld.wu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12)); +} + +/* Emits the `ldx.b d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_B, d, j, k)); +} + +/* Emits the `ldx.h d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_H, d, j, k)); +} + +/* Emits the `ldx.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_W, d, j, k)); +} + +/* Emits the `ldx.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_D, d, j, k)); +} + +/* Emits the `stx.b d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_B, d, j, k)); +} + +/* Emits the `stx.h d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_H, d, j, k)); +} + +/* Emits the `stx.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_W, d, j, k)); +} + +/* Emits the `stx.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_D, d, j, k)); +} + +/* Emits the `ldx.bu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_bu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_BU, d, j, k)); +} + +/* Emits the `ldx.hu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_hu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_HU, d, j, k)); +} + +/* Emits the `ldx.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k)); +} + +/* Emits the `dbar ud15` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_dbar(TCGContext *s, uint32_t ud15) +{ + tcg_out32(s, encode_ud15_insn(OPC_DBAR, ud15)); +} + +/* Emits the `jirl d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_jirl(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_JIRL, d, j, sk16)); +} + +/* Emits the `b sd10k16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_b(TCGContext *s, int32_t sd10k16) +{ + tcg_out32(s, encode_sd10k16_insn(OPC_B, sd10k16)); +} + +/* Emits the `bl sd10k16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bl(TCGContext *s, int32_t sd10k16) +{ + tcg_out32(s, encode_sd10k16_insn(OPC_BL, sd10k16)); +} + +/* Emits the `beq d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_beq(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BEQ, d, j, sk16)); +} + +/* Emits the `bne d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bne(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BNE, d, j, sk16)); +} + +/* Emits the `bgt d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bgt(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BGT, d, j, sk16)); +} + +/* Emits the `ble d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ble(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BLE, d, j, sk16)); +} + +/* Emits the `bgtu d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bgtu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BGTU, d, j, sk16)); +} + +/* Emits the `bleu d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16)); +} + +/* End of generated code. */ diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h new file mode 100644 index 0000000000..349c672687 --- /dev/null +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define LoongArch target-specific constraint sets. + * + * Copyright (c) 2021 WANG Xuerui + * + * Based on tcg/riscv/tcg-target-con-set.h + * + * Copyright (c) 2021 Linaro + */ + +/* + * C_On_Im(...) defines a constraint set with outputs and inputs. + * Each operand should be a sequence of constraint letters as defined by + * tcg-target-con-str.h; the constraint combination is inclusive or. + */ +C_O0_I1(r) +C_O0_I2(rZ, r) +C_O0_I2(rZ, rZ) +C_O0_I2(LZ, L) +C_O1_I1(r, r) +C_O1_I1(r, L) +C_O1_I2(r, r, rC) +C_O1_I2(r, r, ri) +C_O1_I2(r, r, rI) +C_O1_I2(r, r, rU) +C_O1_I2(r, r, rW) +C_O1_I2(r, r, rZ) +C_O1_I2(r, 0, rZ) +C_O1_I2(r, rZ, rN) +C_O1_I2(r, rZ, rZ) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h new file mode 100644 index 0000000000..c3986a4fd4 --- /dev/null +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define LoongArch target-specific operand constraints. + * + * Copyright (c) 2021 WANG Xuerui + * + * Based on tcg/riscv/tcg-target-con-str.h + * + * Copyright (c) 2021 Linaro + */ + +/* + * Define constraint letters for register sets: + * REGS(letter, register_mask) + */ +REGS('r', ALL_GENERAL_REGS) +REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) + +/* + * Define constraint letters for constants: + * CONST(letter, TCG_CT_CONST_* bit set) + */ +CONST('I', TCG_CT_CONST_S12) +CONST('N', TCG_CT_CONST_N12) +CONST('U', TCG_CT_CONST_U12) +CONST('Z', TCG_CT_CONST_ZERO) +CONST('C', TCG_CT_CONST_C12) +CONST('W', TCG_CT_CONST_WSZ) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc new file mode 100644 index 0000000000..d326e28740 --- /dev/null +++ b/tcg/loongarch64/tcg-target.c.inc @@ -0,0 +1,1784 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2021 WANG Xuerui + * + * Based on tcg/riscv/tcg-target.c.inc + * + * Copyright (c) 2018 SiFive, Inc + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "../tcg-ldst.c.inc" + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "zero", + "ra", + "tp", + "sp", + "a0", + "a1", + "a2", + "a3", + "a4", + "a5", + "a6", + "a7", + "t0", + "t1", + "t2", + "t3", + "t4", + "t5", + "t6", + "t7", + "t8", + "r21", /* reserved in the LP64* ABI, hence no ABI name */ + "s9", + "s0", + "s1", + "s2", + "s3", + "s4", + "s5", + "s6", + "s7", + "s8" +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { + /* Registers preserved across calls */ + /* TCG_REG_S0 reserved for TCG_AREG0 */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + + /* Registers (potentially) clobbered across calls */ + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + + /* Argument registers, opposite order of allocation. */ + TCG_REG_A7, + TCG_REG_A6, + TCG_REG_A5, + TCG_REG_A4, + TCG_REG_A3, + TCG_REG_A2, + TCG_REG_A1, + TCG_REG_A0, +}; + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, +}; + +#ifndef CONFIG_SOFTMMU +#define USE_GUEST_BASE (guest_base != 0) +#define TCG_GUEST_BASE_REG TCG_REG_S1 +#endif + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S12 0x200 +#define TCG_CT_CONST_N12 0x400 +#define TCG_CT_CONST_U12 0x800 +#define TCG_CT_CONST_C12 0x1000 +#define TCG_CT_CONST_WSZ 0x2000 + +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) +/* + * For softmmu, we need to avoid conflicts with the first 5 + * argument registers to call the helper. Some of these are + * also used for the tlb lookup. + */ +#ifdef CONFIG_SOFTMMU +#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) +#else +#define SOFTMMU_RESERVE_REGS 0 +#endif + + +static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) +{ + return sextract64(val, pos, len); +} + +/* test if a constant matches the constraint */ +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) +{ + if (ct & TCG_CT_CONST) { + return true; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return true; + } + if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { + return true; + } + if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { + return true; + } + if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { + return true; + } + if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { + return true; + } + if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + return true; + } + return false; +} + +/* + * Relocations + */ + +/* + * Relocation records defined in LoongArch ELF psABI v1.00 is way too + * complicated; a whopping stack machine is needed to stuff the fields, at + * the very least one SOP_PUSH and one SOP_POP (of the correct format) are + * needed. + * + * Hence, define our own simpler relocation types. Numbers are chosen as to + * not collide with potential future additions to the true ELF relocation + * type enum. + */ + +/* Field Sk16, shifted right by 2; suitable for conditional jumps */ +#define R_LOONGARCH_BR_SK16 256 +/* Field Sd10k16, shifted right by 2; suitable for B and BL */ +#define R_LOONGARCH_BR_SD10K16 257 + +static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + intptr_t offset = (intptr_t)target - (intptr_t)src_rx; + + tcg_debug_assert((offset & 3) == 0); + offset >>= 2; + if (offset == sextreg(offset, 0, 16)) { + *src_rw = deposit64(*src_rw, 10, 16, offset); + return true; + } + + return false; +} + +static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, + const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + intptr_t offset = (intptr_t)target - (intptr_t)src_rx; + + tcg_debug_assert((offset & 3) == 0); + offset >>= 2; + if (offset == sextreg(offset, 0, 26)) { + *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ + *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ + return true; + } + + return false; +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + tcg_debug_assert(addend == 0); + switch (type) { + case R_LOONGARCH_BR_SK16: + return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); + case R_LOONGARCH_BR_SD10K16: + return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); + default: + g_assert_not_reached(); + } +} + +#include "tcg-insn-defs.c.inc" + +/* + * TCG intrinsics + */ + +static void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + /* Baseline LoongArch only has the full barrier, unfortunately. */ + tcg_out_opc_dbar(s, 0); +} + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + /* + * Conventional register-register move used in LoongArch is + * `or dst, src, zero`. + */ + tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); + break; + default: + g_assert_not_reached(); + } + return true; +} + +static bool imm_part_needs_loading(bool high_bits_are_ones, + tcg_target_long part) +{ + if (high_bits_are_ones) { + return part != -1; + } else { + return part != 0; + } +} + +/* Loads a 32-bit immediate into rd, sign-extended. */ +static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) +{ + tcg_target_long lo = sextreg(val, 0, 12); + tcg_target_long hi12 = sextreg(val, 12, 20); + + /* Single-instruction cases. */ + if (lo == val) { + /* val fits in simm12: addi.w rd, zero, val */ + tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); + return; + } + if (0x800 <= val && val <= 0xfff) { + /* val fits in uimm12: ori rd, zero, val */ + tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); + return; + } + + /* High bits must be set; load with lu12i.w + optional ori. */ + tcg_out_opc_lu12i_w(s, rd, hi12); + if (lo != 0) { + tcg_out_opc_ori(s, rd, rd, lo & 0xfff); + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, + tcg_target_long val) +{ + /* + * LoongArch conventionally loads 64-bit immediates in at most 4 steps, + * with dedicated instructions for filling the respective bitfields + * below: + * + * 6 5 4 3 + * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 + * +-----------------------+---------------------------------------+... + * | hi52 | hi32 | + * +-----------------------+---------------------------------------+... + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * ...+-------------------------------------+-------------------------+ + * | hi12 | lo | + * ...+-------------------------------------+-------------------------+ + * + * Check if val belong to one of the several fast cases, before falling + * back to the slow path. + */ + + intptr_t pc_offset; + tcg_target_long val_lo, val_hi, pc_hi, offset_hi; + tcg_target_long hi32, hi52; + bool rd_high_bits_are_ones; + + /* Value fits in signed i32. */ + if (type == TCG_TYPE_I32 || val == (int32_t)val) { + tcg_out_movi_i32(s, rd, val); + return; + } + + /* PC-relative cases. */ + pc_offset = tcg_pcrel_diff(s, (void *)val); + if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { + /* Single pcaddu2i. */ + tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); + return; + } + + if (pc_offset == (int32_t)pc_offset) { + /* Offset within 32 bits; load with pcalau12i + ori. */ + val_lo = sextreg(val, 0, 12); + val_hi = val >> 12; + pc_hi = (val - pc_offset) >> 12; + offset_hi = val_hi - pc_hi; + + tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); + tcg_out_opc_pcalau12i(s, rd, offset_hi); + if (val_lo != 0) { + tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); + } + return; + } + + hi32 = sextreg(val, 32, 20); + hi52 = sextreg(val, 52, 12); + + /* Single cu52i.d case. */ + if (ctz64(val) >= 52) { + tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); + return; + } + + /* Slow path. Initialize the low 32 bits, then concat high bits. */ + tcg_out_movi_i32(s, rd, val); + rd_high_bits_are_ones = (int32_t)val < 0; + + if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) { + tcg_out_opc_cu32i_d(s, rd, hi32); + rd_high_bits_are_ones = hi32 < 0; + } + + if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) { + tcg_out_opc_cu52i_d(s, rd, rd, hi52); + } +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_andi(s, ret, arg, 0xff); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); +} + +static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_sext_b(s, ret, arg); +} + +static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_sext_h(s, ret, arg); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_addi_w(s, ret, arg, 0); +} + +static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, + TCGReg a0, TCGReg a1, TCGReg a2, + bool c2, bool is_32bit) +{ + if (c2) { + /* + * Fast path: semantics already satisfied due to constraint and + * insn behavior, single instruction is enough. + */ + tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); + /* all clz/ctz insns belong to DJ-format */ + tcg_out32(s, encode_dj_insn(opc, a0, a1)); + return; + } + + tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); + /* a0 = a1 ? REG_TMP0 : a2 */ + tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); + tcg_out_opc_masknez(s, a0, a2, a1); + tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); +} + +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, TCGReg arg2, bool c2) +{ + TCGReg tmp; + + if (c2) { + tcg_debug_assert(arg2 == 0); + } + + switch (cond) { + case TCG_COND_EQ: + if (c2) { + tmp = arg1; + } else { + tcg_out_opc_sub_d(s, ret, arg1, arg2); + tmp = ret; + } + tcg_out_opc_sltui(s, ret, tmp, 1); + break; + case TCG_COND_NE: + if (c2) { + tmp = arg1; + } else { + tcg_out_opc_sub_d(s, ret, arg1, arg2); + tmp = ret; + } + tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); + break; + case TCG_COND_LT: + tcg_out_opc_slt(s, ret, arg1, arg2); + break; + case TCG_COND_GE: + tcg_out_opc_slt(s, ret, arg1, arg2); + tcg_out_opc_xori(s, ret, ret, 1); + break; + case TCG_COND_LE: + tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false); + break; + case TCG_COND_GT: + tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false); + break; + case TCG_COND_LTU: + tcg_out_opc_sltu(s, ret, arg1, arg2); + break; + case TCG_COND_GEU: + tcg_out_opc_sltu(s, ret, arg1, arg2); + tcg_out_opc_xori(s, ret, ret, 1); + break; + case TCG_COND_LEU: + tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false); + break; + case TCG_COND_GTU: + tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false); + break; + default: + g_assert_not_reached(); + break; + } +} + +/* + * Branch helpers + */ + +static const struct { + LoongArchInsn op; + bool swap; +} tcg_brcond_to_loongarch[] = { + [TCG_COND_EQ] = { OPC_BEQ, false }, + [TCG_COND_NE] = { OPC_BNE, false }, + [TCG_COND_LT] = { OPC_BGT, true }, + [TCG_COND_GE] = { OPC_BLE, true }, + [TCG_COND_LE] = { OPC_BLE, false }, + [TCG_COND_GT] = { OPC_BGT, false }, + [TCG_COND_LTU] = { OPC_BGTU, true }, + [TCG_COND_GEU] = { OPC_BLEU, true }, + [TCG_COND_LEU] = { OPC_BLEU, false }, + [TCG_COND_GTU] = { OPC_BGTU, false } +}; + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, TCGLabel *l) +{ + LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; + + tcg_debug_assert(op != 0); + + if (tcg_brcond_to_loongarch[cond].swap) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + + /* all conditional branch insns belong to DJSk16-format */ + tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); + tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); +} + +static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) +{ + TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; + ptrdiff_t offset = tcg_pcrel_diff(s, arg); + + tcg_debug_assert((offset & 3) == 0); + if (offset == sextreg(offset, 0, 28)) { + /* short jump: +/- 256MiB */ + if (tail) { + tcg_out_opc_b(s, offset >> 2); + } else { + tcg_out_opc_bl(s, offset >> 2); + } + } else if (offset == sextreg(offset, 0, 38)) { + /* long jump: +/- 256GiB */ + tcg_target_long lo = sextreg(offset, 0, 18); + tcg_target_long hi = offset - lo; + tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); + tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); + } else { + /* far jump: 64-bit */ + tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); + tcg_target_long hi = (tcg_target_long)arg - lo; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); + tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); + } +} + +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) +{ + tcg_out_call_int(s, arg, false); +} + +/* + * Load/store helpers + */ + +static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, + TCGReg addr, intptr_t offset) +{ + intptr_t imm12 = sextreg(offset, 0, 12); + + if (offset != imm12) { + intptr_t diff = offset - (uintptr_t)s->code_ptr; + + if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { + imm12 = sextreg(diff, 0, 12); + tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); + if (addr != TCG_REG_ZERO) { + tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); + } + } + addr = TCG_REG_TMP2; + } + + switch (opc) { + case OPC_LD_B: + case OPC_LD_BU: + case OPC_LD_H: + case OPC_LD_HU: + case OPC_LD_W: + case OPC_LD_WU: + case OPC_LD_D: + case OPC_ST_B: + case OPC_ST_H: + case OPC_ST_W: + case OPC_ST_D: + tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + bool is_32bit = type == TCG_TYPE_I32; + tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + bool is_32bit = type == TCG_TYPE_I32; + tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); +} + +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + if (val == 0) { + tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); + return true; + } + return false; +} + +/* + * Load/store helpers for SoftMMU, and qemu_ld/st implementations + */ + +#if defined(CONFIG_SOFTMMU) +/* + * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * MemOpIdx oi, uintptr_t ra) + */ +static void * const qemu_ld_helpers[4] = { + [MO_8] = helper_ret_ldub_mmu, + [MO_16] = helper_le_lduw_mmu, + [MO_32] = helper_le_ldul_mmu, + [MO_64] = helper_le_ldq_mmu, +}; + +/* + * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, MemOpIdx oi, + * uintptr_t ra) + */ +static void * const qemu_st_helpers[4] = { + [MO_8] = helper_ret_stb_mmu, + [MO_16] = helper_le_stw_mmu, + [MO_32] = helper_le_stl_mmu, + [MO_64] = helper_le_stq_mmu, +}; + +/* We expect to use a 12-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); + +static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) +{ + tcg_out_opc_b(s, 0); + return reloc_br_sd10k16(s->code_ptr - 1, target); +} + +/* + * Emits common code for TLB addend lookup, that eventually loads the + * addend in TCG_REG_TMP2. + */ +static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi, + tcg_insn_unit **label_ptr, bool is_load) +{ + MemOp opc = get_memop(oi); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + tcg_target_long compare_mask; + int mem_index = get_mmuidx(oi); + int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); + + tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); + tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, + is_load ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, + offsetof(CPUTLBEntry, addend)); + + /* We don't support unaligned accesses. */ + if (a_bits < s_bits) { + a_bits = s_bits; + } + /* Clear the non-page, non-alignment bits from the address. */ + compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl); + + /* Compare masked address with the TLB entry. */ + label_ptr[0] = s->code_ptr; + tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); + + /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */ +} + +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, + TCGType type, + TCGReg datalo, TCGReg addrlo, + void *raddr, tcg_insn_unit **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = type; + label->datalo_reg = datalo; + label->datahi_reg = 0; /* unused */ + label->addrlo_reg = addrlo; + label->addrhi_reg = 0; /* unused */ + label->raddr = tcg_splitwx_to_rx(raddr); + label->label_ptr[0] = label_ptr[0]; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + MemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + TCGType type = l->type; + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + /* call load helper */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_ld_helpers[size]); + + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_SW: + tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_SL: + tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_UL: + if (type == TCG_TYPE_I32) { + /* MO_UL loads of i32 should be sign-extended too */ + tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); + break; + } + /* fallthrough */ + default: + tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0); + break; + } + + return tcg_out_goto(s, l->raddr); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + MemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + /* call store helper */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); + switch (size) { + case MO_8: + tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_16: + tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_32: + tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_64: + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg); + break; + default: + g_assert_not_reached(); + break; + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_st_helpers[size]); + + return tcg_out_goto(s, l->raddr); +} +#else + +/* + * Alignment helpers for user-mode emulation + */ + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, + unsigned a_bits) +{ + TCGLabelQemuLdst *l = new_ldst_label(s); + + l->is_ld = is_ld; + l->addrlo_reg = addr_reg; + + /* + * Without micro-architecture details, we don't know which of bstrpick or + * andi is faster, so use bstrpick as it's not constrained by imm field + * width. (Not to say alignments >= 2^12 are going to happen any time + * soon, though) + */ + tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); + + l->label_ptr[0] = s->code_ptr; + tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); + + l->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + + /* tail call, with the return address back inline. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); + tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st), true); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +#endif /* CONFIG_SOFTMMU */ + +/* + * `ext32u` the address register into the temp register given, + * if target is 32-bit, no-op otherwise. + * + * Returns the address register ready for use with TLB addend. + */ +static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s, + TCGReg addr, TCGReg tmp) +{ + if (TARGET_LONG_BITS == 32) { + tcg_out_ext32u(s, tmp, addr); + return tmp; + } + return addr; +} + +static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, + TCGReg rk, MemOp opc, TCGType type) +{ + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); + + switch (opc & MO_SSIZE) { + case MO_UB: + tcg_out_opc_ldx_bu(s, rd, rj, rk); + break; + case MO_SB: + tcg_out_opc_ldx_b(s, rd, rj, rk); + break; + case MO_UW: + tcg_out_opc_ldx_hu(s, rd, rj, rk); + break; + case MO_SW: + tcg_out_opc_ldx_h(s, rd, rj, rk); + break; + case MO_UL: + if (type == TCG_TYPE_I64) { + tcg_out_opc_ldx_wu(s, rd, rj, rk); + break; + } + /* fallthrough */ + case MO_SL: + tcg_out_opc_ldx_w(s, rd, rj, rk); + break; + case MO_UQ: + tcg_out_opc_ldx_d(s, rd, rj, rk); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) +{ + TCGReg addr_regl; + TCGReg data_regl; + MemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; +#endif + TCGReg base; + + data_regl = *args++; + addr_regl = *args++; + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1); + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type); + add_qemu_ldst_label(s, 1, oi, type, + data_regl, addr_regl, + s->code_ptr, label_ptr); +#else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addr_regl, a_bits); + } + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; + tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); +#endif +} + +static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, + TCGReg rj, TCGReg rk, MemOp opc) +{ + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); + + switch (opc & MO_SIZE) { + case MO_8: + tcg_out_opc_stx_b(s, data, rj, rk); + break; + case MO_16: + tcg_out_opc_stx_h(s, data, rj, rk); + break; + case MO_32: + tcg_out_opc_stx_w(s, data, rj, rk); + break; + case MO_64: + tcg_out_opc_stx_d(s, data, rj, rk); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) +{ + TCGReg addr_regl; + TCGReg data_regl; + MemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; +#endif + TCGReg base; + + data_regl = *args++; + addr_regl = *args++; + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0); + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc); + add_qemu_ldst_label(s, 0, oi, + 0, /* type param is unused for stores */ + data_regl, addr_regl, + s->code_ptr, label_ptr); +#else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addr_regl, a_bits); + } + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; + tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); +#endif +} + +/* LoongArch uses `andi zero, zero, 0` as NOP. */ +#define NOP OPC_ANDI +static void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, NOP); +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, + uintptr_t jmp_rw, uintptr_t addr) +{ + tcg_insn_unit i1, i2; + ptrdiff_t upper, lower; + ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2; + + if (offset == sextreg(offset, 0, 26)) { + i1 = encode_sd10k16_insn(OPC_B, offset); + i2 = NOP; + } else { + tcg_debug_assert(offset == sextreg(offset, 0, 36)); + lower = (int16_t)offset; + upper = (offset - lower) >> 16; + + i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper); + i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower); + } + uint64_t pair = ((uint64_t)i2 << 32) | i1; + qatomic_set((uint64_t *)jmp_rw, pair); + flush_idcache_range(jmp_rx, jmp_rw, 8); +} + +/* + * Entry-points + */ + +static const tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_call_int(s, tcg_code_gen_epilogue, true); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); + tcg_out_call_int(s, tb_ret_addr, true); + } + break; + + case INDEX_op_goto_tb: + tcg_debug_assert(s->tb_jmp_insn_offset != NULL); + /* + * Ensure that patch area is 8-byte aligned so that an + * atomic write can be used to patch the target address. + */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out_nop(s); + } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* + * actual branch destination will be patched by + * tb_target_set_jmp_target later + */ + tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0); + tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); + set_jmp_reset_offset(s, a0); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + + case INDEX_op_goto_ptr: + tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); + break; + + case INDEX_op_br: + tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), + 0); + tcg_out_opc_b(s, 0); + break; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); + break; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + tcg_out_ext8s(s, a0, a1); + break; + + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_ext8u(s, a0, a1); + break; + + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + tcg_out_ext16s(s, a0, a1); + break; + + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_ext16u(s, a0, a1); + break; + + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + tcg_out_ext32u(s, a0, a1); + break; + + case INDEX_op_ext32s_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_ext_i32_i64: + tcg_out_ext32s(s, a0, a1); + break; + + case INDEX_op_extrh_i64_i32: + tcg_out_opc_srai_d(s, a0, a1, 32); + break; + + case INDEX_op_not_i32: + case INDEX_op_not_i64: + tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); + break; + + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + if (c2) { + tcg_out_opc_ori(s, a0, a1, a2); + tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); + } else { + tcg_out_opc_nor(s, a0, a1, a2); + } + break; + + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + if (c2) { + /* guaranteed to fit due to constraint */ + tcg_out_opc_andi(s, a0, a1, ~a2); + } else { + tcg_out_opc_andn(s, a0, a1, a2); + } + break; + + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + if (c2) { + /* guaranteed to fit due to constraint */ + tcg_out_opc_ori(s, a0, a1, ~a2); + } else { + tcg_out_opc_orn(s, a0, a1, a2); + } + break; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + if (c2) { + tcg_out_opc_andi(s, a0, a1, a2); + } else { + tcg_out_opc_and(s, a0, a1, a2); + } + break; + + case INDEX_op_or_i32: + case INDEX_op_or_i64: + if (c2) { + tcg_out_opc_ori(s, a0, a1, a2); + } else { + tcg_out_opc_or(s, a0, a1, a2); + } + break; + + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + if (c2) { + tcg_out_opc_xori(s, a0, a1, a2); + } else { + tcg_out_opc_xor(s, a0, a1, a2); + } + break; + + case INDEX_op_extract_i32: + tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); + break; + case INDEX_op_extract_i64: + tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); + break; + + case INDEX_op_deposit_i32: + tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); + break; + case INDEX_op_deposit_i64: + tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); + break; + + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + tcg_out_opc_revb_2h(s, a0, a1); + if (a2 & TCG_BSWAP_OS) { + tcg_out_ext16s(s, a0, a0); + } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext16u(s, a0, a0); + } + break; + + case INDEX_op_bswap32_i32: + /* All 32-bit values are computed sign-extended in the register. */ + a2 = TCG_BSWAP_OS; + /* fallthrough */ + case INDEX_op_bswap32_i64: + tcg_out_opc_revb_2w(s, a0, a1); + if (a2 & TCG_BSWAP_OS) { + tcg_out_ext32s(s, a0, a0); + } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext32u(s, a0, a0); + } + break; + + case INDEX_op_bswap64_i64: + tcg_out_opc_revb_d(s, a0, a1); + break; + + case INDEX_op_clz_i32: + tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); + break; + case INDEX_op_clz_i64: + tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); + break; + + case INDEX_op_ctz_i32: + tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); + break; + case INDEX_op_ctz_i64: + tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); + break; + + case INDEX_op_shl_i32: + if (c2) { + tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_sll_w(s, a0, a1, a2); + } + break; + case INDEX_op_shl_i64: + if (c2) { + tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_sll_d(s, a0, a1, a2); + } + break; + + case INDEX_op_shr_i32: + if (c2) { + tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_srl_w(s, a0, a1, a2); + } + break; + case INDEX_op_shr_i64: + if (c2) { + tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_srl_d(s, a0, a1, a2); + } + break; + + case INDEX_op_sar_i32: + if (c2) { + tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_sra_w(s, a0, a1, a2); + } + break; + case INDEX_op_sar_i64: + if (c2) { + tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_sra_d(s, a0, a1, a2); + } + break; + + case INDEX_op_rotl_i32: + /* transform into equivalent rotr/rotri */ + if (c2) { + tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); + } else { + tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); + } + break; + case INDEX_op_rotl_i64: + /* transform into equivalent rotr/rotri */ + if (c2) { + tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); + } else { + tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); + } + break; + + case INDEX_op_rotr_i32: + if (c2) { + tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_rotr_w(s, a0, a1, a2); + } + break; + case INDEX_op_rotr_i64: + if (c2) { + tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_rotr_d(s, a0, a1, a2); + } + break; + + case INDEX_op_add_i32: + if (c2) { + tcg_out_opc_addi_w(s, a0, a1, a2); + } else { + tcg_out_opc_add_w(s, a0, a1, a2); + } + break; + case INDEX_op_add_i64: + if (c2) { + tcg_out_opc_addi_d(s, a0, a1, a2); + } else { + tcg_out_opc_add_d(s, a0, a1, a2); + } + break; + + case INDEX_op_sub_i32: + if (c2) { + tcg_out_opc_addi_w(s, a0, a1, -a2); + } else { + tcg_out_opc_sub_w(s, a0, a1, a2); + } + break; + case INDEX_op_sub_i64: + if (c2) { + tcg_out_opc_addi_d(s, a0, a1, -a2); + } else { + tcg_out_opc_sub_d(s, a0, a1, a2); + } + break; + + case INDEX_op_mul_i32: + tcg_out_opc_mul_w(s, a0, a1, a2); + break; + case INDEX_op_mul_i64: + tcg_out_opc_mul_d(s, a0, a1, a2); + break; + + case INDEX_op_mulsh_i32: + tcg_out_opc_mulh_w(s, a0, a1, a2); + break; + case INDEX_op_mulsh_i64: + tcg_out_opc_mulh_d(s, a0, a1, a2); + break; + + case INDEX_op_muluh_i32: + tcg_out_opc_mulh_wu(s, a0, a1, a2); + break; + case INDEX_op_muluh_i64: + tcg_out_opc_mulh_du(s, a0, a1, a2); + break; + + case INDEX_op_div_i32: + tcg_out_opc_div_w(s, a0, a1, a2); + break; + case INDEX_op_div_i64: + tcg_out_opc_div_d(s, a0, a1, a2); + break; + + case INDEX_op_divu_i32: + tcg_out_opc_div_wu(s, a0, a1, a2); + break; + case INDEX_op_divu_i64: + tcg_out_opc_div_du(s, a0, a1, a2); + break; + + case INDEX_op_rem_i32: + tcg_out_opc_mod_w(s, a0, a1, a2); + break; + case INDEX_op_rem_i64: + tcg_out_opc_mod_d(s, a0, a1, a2); + break; + + case INDEX_op_remu_i32: + tcg_out_opc_mod_wu(s, a0, a1, a2); + break; + case INDEX_op_remu_i64: + tcg_out_opc_mod_du(s, a0, a1, a2); + break; + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + tcg_out_setcond(s, args[3], a0, a1, a2, c2); + break; + + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); + break; + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); + break; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); + break; + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); + break; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, TCG_TYPE_I32); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, TCG_TYPE_I64); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + g_assert_not_reached(); + } +} + +static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +{ + switch (op) { + case INDEX_op_goto_ptr: + return C_O0_I1(r); + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i32: + case INDEX_op_st_i64: + return C_O0_I2(rZ, r); + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return C_O0_I2(rZ, rZ); + + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + return C_O0_I2(LZ, L); + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + case INDEX_op_ext_i32_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i32: + case INDEX_op_ld_i64: + return C_O1_I1(r, r); + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return C_O1_I1(r, L); + + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + /* + * LoongArch insns for these ops don't have reg-imm forms, but we + * can express using andi/ori if ~constant satisfies + * TCG_CT_CONST_U12. + */ + return C_O1_I2(r, r, rC); + + case INDEX_op_shl_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i32: + case INDEX_op_shr_i64: + case INDEX_op_sar_i32: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i32: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i32: + case INDEX_op_rotr_i64: + return C_O1_I2(r, r, ri); + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return C_O1_I2(r, r, rI); + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + /* LoongArch reg-imm bitops have their imms ZERO-extended */ + return C_O1_I2(r, r, rU); + + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + return C_O1_I2(r, r, rW); + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return C_O1_I2(r, r, rZ); + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + /* Must deposit into the same register as input */ + return C_O1_I2(r, 0, rZ); + + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return C_O1_I2(r, rZ, rN); + + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_mulsh_i32: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i32: + case INDEX_op_muluh_i64: + case INDEX_op_div_i32: + case INDEX_op_div_i64: + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + return C_O1_I2(r, rZ, rZ); + + default: + g_assert_not_reached(); + } +} + +static const int tcg_target_callee_save_regs[] = { + TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + TCG_REG_RA, /* should be last for ABI compliance */ +}; + +/* Stack frame parameters. */ +#define REG_SIZE (TCG_TARGET_REG_BITS / 8) +#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) +#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) +#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & -TCG_TARGET_STACK_ALIGN) +#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) + +/* We're expecting to be able to use an immediate for frame allocation. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); + + /* TB prologue */ + tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + +#if !defined(CONFIG_SOFTMMU) + if (USE_GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + /* Call generated code */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); + + /* Return path for goto_ptr. Set return value to 0 */ + tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); + + /* TB epilogue */ + tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + + tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); + tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); +} + +static void tcg_target_init(TCGContext *s) +{ + tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; + tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; + + tcg_target_call_clobber_regs = ALL_GENERAL_REGS; + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_LOONGARCH + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.return_column = TCG_REG_RA, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ + 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ + 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ + 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ + 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ + 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ + 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ + 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ + 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ + 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ + 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ + } +}; + +void tcg_register_jit(const void *buf, size_t buf_size) +{ + tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h new file mode 100644 index 0000000000..a659c8d6fd --- /dev/null +++ b/tcg/loongarch64/tcg-target.h @@ -0,0 +1,181 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2021 WANG Xuerui + * + * Based on tcg/riscv/tcg-target.h + * + * Copyright (c) 2018 SiFive, Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef LOONGARCH_TCG_TARGET_H +#define LOONGARCH_TCG_TARGET_H + +/* + * Loongson removed the (incomplete) 32-bit support from kernel and toolchain + * for the initial upstreaming of this architecture, so don't bother and just + * support the LP64* ABI for now. + */ +#if defined(__loongarch64) +# define TCG_TARGET_REG_BITS 64 +#else +# error unsupported LoongArch register size +#endif + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_NB_REGS 32 +/* + * PCADDU18I + JIRL sequence can give 20 + 16 + 2 = 38 bits + * signed offset, which is +/- 128 GiB. + */ +#define MAX_CODE_GEN_BUFFER_SIZE (128 * GiB) + +typedef enum { + TCG_REG_ZERO, + TCG_REG_RA, + TCG_REG_TP, + TCG_REG_SP, + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + TCG_REG_RESERVED, + TCG_REG_S9, + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + + /* aliases */ + TCG_AREG0 = TCG_REG_S0, + TCG_REG_TMP0 = TCG_REG_T8, + TCG_REG_TMP1 = TCG_REG_T7, + TCG_REG_TMP2 = TCG_REG_T6, +} TCGReg; + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_movcond_i32 0 +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 1 +#define TCG_TARGET_HAS_div2_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 0 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 1 +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_direct_jump 1 +#define TCG_TARGET_HAS_brcond2 0 +#define TCG_TARGET_HAS_setcond2 0 +#define TCG_TARGET_HAS_qemu_st8_i32 0 + +/* 64-bit operations */ +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 1 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 0 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 + +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); + +#define TCG_TARGET_DEFAULT_MO (0) + +#define TCG_TARGET_NEED_LDST_LABELS + +#define TCG_TARGET_HAS_MEMORY_BSWAP 0 + +#endif /* LOONGARCH_TCG_TARGET_H */ diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index bf0eb84e2d..bd76f0c97f 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -24,7 +24,9 @@ * THE SOFTWARE. */ -#ifdef HOST_WORDS_BIGENDIAN +#include "../tcg-ldst.c.inc" + +#if HOST_BIG_ENDIAN # define MIPS_BE 1 #else # define MIPS_BE 0 @@ -187,7 +189,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #endif -static inline bool is_p2m1(tcg_target_long val) +static bool is_p2m1(tcg_target_long val) { return val && ((val + 1) & val) == 0; } @@ -230,16 +232,26 @@ typedef enum { OPC_ORI = 015 << 26, OPC_XORI = 016 << 26, OPC_LUI = 017 << 26, + OPC_BNEL = 025 << 26, + OPC_BNEZALC_R6 = 030 << 26, OPC_DADDIU = 031 << 26, + OPC_LDL = 032 << 26, + OPC_LDR = 033 << 26, OPC_LB = 040 << 26, OPC_LH = 041 << 26, + OPC_LWL = 042 << 26, OPC_LW = 043 << 26, OPC_LBU = 044 << 26, OPC_LHU = 045 << 26, + OPC_LWR = 046 << 26, OPC_LWU = 047 << 26, OPC_SB = 050 << 26, OPC_SH = 051 << 26, + OPC_SWL = 052 << 26, OPC_SW = 053 << 26, + OPC_SDL = 054 << 26, + OPC_SDR = 055 << 26, + OPC_SWR = 056 << 26, OPC_LD = 067 << 26, OPC_SD = 077 << 26, @@ -361,8 +373,8 @@ typedef enum { /* * Type reg */ -static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rs, TCGReg rt) +static void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rs, TCGReg rt) { int32_t inst; @@ -376,8 +388,8 @@ static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, /* * Type immediate */ -static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs, TCGArg imm) +static void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs, TCGArg imm) { int32_t inst; @@ -391,8 +403,8 @@ static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, /* * Type bitfield */ -static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, - TCGReg rs, int msb, int lsb) +static void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, + TCGReg rs, int msb, int lsb) { int32_t inst; @@ -404,8 +416,8 @@ static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, tcg_out32(s, inst); } -static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, - MIPSInsn oph, TCGReg rt, TCGReg rs, +static void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, + MIPSInsn oph, TCGReg rt, TCGReg rs, int msb, int lsb) { if (lsb >= 32) { @@ -422,8 +434,7 @@ static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, /* * Type branch */ -static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs) +static void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs) { tcg_out_opc_imm(s, opc, rt, rs, 0); } @@ -431,8 +442,8 @@ static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, /* * Type sa */ -static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rt, TCGArg sa) { int32_t inst; @@ -479,28 +490,27 @@ static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, const void *target) return true; } -static inline void tcg_out_nop(TCGContext *s) +static void tcg_out_nop(TCGContext *s) { tcg_out32(s, 0); } -static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa); } -static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa); } -static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa); } -static inline bool tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { /* Simple reg-reg move, optimising out the 'do nothing' case */ if (ret != arg) { @@ -575,8 +585,10 @@ static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags) static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub) { - bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub); - tcg_debug_assert(ok); + if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP1, (uintptr_t)sub); + tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_TMP1, 0); + } } static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags) @@ -612,27 +624,7 @@ static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) } } -static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); - } -} - -static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); - } -} - -static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); @@ -656,8 +648,8 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, tcg_out_opc_imm(s, opc, data, addr, lo); } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_LD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { @@ -666,8 +658,8 @@ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, tcg_out_ldst(s, opc, arg, arg1, arg2); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_SD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { @@ -676,8 +668,8 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_ldst(s, opc, arg, arg1, arg2); } -static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, - TCGReg base, intptr_t ofs) +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); @@ -1035,33 +1027,31 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) } #if defined(CONFIG_SOFTMMU) -#include "../tcg-ldst.c.inc" - -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, + [MO_LEUQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, + [MO_BEUQ] = helper_be_ldq_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_LESL] = helper_le_ldsl_mmu, [MO_BESL] = helper_be_ldsl_mmu, #endif }; -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, + [MO_LEUQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, + [MO_BEUQ] = helper_be_stq_mmu, }; /* Helper routines for marshalling helper function arguments into @@ -1140,12 +1130,14 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); * Clobbers TMP0, TMP1, TMP2, TMP3. */ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, - TCGReg addrh, TCGMemOpIdx oi, + TCGReg addrh, MemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { MemOp opc = get_memop(oi); - unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask = (1 << a_bits) - 1; + unsigned s_mask = (1 << s_bits) - 1; int mem_index = get_mmuidx(oi); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); @@ -1153,7 +1145,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, int add_off = offsetof(CPUTLBEntry, addend); int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); - target_ulong mask; + target_ulong tlb_mask; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); @@ -1167,27 +1159,13 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); - /* We don't currently support unaligned accesses. - We could do so with mips32r6. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - - /* Mask the page bits, keeping the alignment bits to compare against. */ - mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); - /* Load the (low-half) tlb comparator. */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); - tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask); + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); } else { tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW), TCG_TMP0, TCG_TMP3, cmp_off); - tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask); - /* No second compare is required here; - load the tlb addend for the fast path. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); } /* Zero extend a 32-bit guest address for a 64-bit host. */ @@ -1195,7 +1173,25 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, tcg_out_ext32u(s, base, addrl); addrl = base; } - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); + + /* + * Mask the page bits, keeping the alignment bits to compare against. + * For unaligned accesses, compare against the end of the access to + * verify that it does not cross a page boundary. + */ + tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; + tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask); + if (a_mask >= s_mask) { + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); + } else { + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); + } + + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { + /* Load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); + } label_ptr[0] = s->code_ptr; tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); @@ -1203,7 +1199,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, /* Load and test the high half tlb comparator. */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { /* delay slot */ - tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); /* Load the tlb addend for the fast path. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); @@ -1216,7 +1212,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, @@ -1241,7 +1237,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg v0; int i; @@ -1295,7 +1291,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; int i; @@ -1344,7 +1340,82 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); return true; } -#endif + +#else + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, + TCGReg addrhi, unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *l = new_ldst_label(s); + + l->is_ld = is_ld; + l->addrlo_reg = addrlo; + l->addrhi_reg = addrhi; + + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ + tcg_debug_assert(a_bits < 16); + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); + + l->label_ptr[0] = s->code_ptr; + if (use_mips32r6_instructions) { + tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); + } else { + tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); + tcg_out_nop(s); + } + + l->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + void *target; + + if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */ + TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg; + TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg; + + if (a3 != TCG_REG_A2) { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); + } else if (a2 != TCG_REG_A3) { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); + } else { + tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3); + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0); + } + } else { + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); + } + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + + /* + * Tail call to the helper, with the return address back inline. + * We have arrived here via BNEL, so $31 is already set. + */ + target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st); + tcg_out_call_int(s, target, true); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} +#endif /* SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc, bool is_64) @@ -1404,7 +1475,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; - case MO_Q | MO_BSWAP: + case MO_UQ | MO_BSWAP: if (TCG_TARGET_REG_BITS == 64) { if (use_mips32r2_instructions) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); @@ -1433,7 +1504,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); } break; - case MO_Q: + case MO_UQ: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); @@ -1450,15 +1521,137 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, } } +static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc, bool is_64) +{ + const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; + const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL; + const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR; + const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL; + + bool sgn = (opc & MO_SIGN); + + switch (opc & (MO_SSIZE | MO_BSWAP)) { + case MO_SW | MO_BE: + case MO_UW | MO_BE: + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_LBU, lo, base, 1); + if (use_mips32r2_instructions) { + tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); + } else { + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); + tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1); + } + break; + + case MO_SW | MO_LE: + case MO_UW | MO_LE: + if (use_mips32r2_instructions && lo != base) { + tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1); + tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); + } else { + tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1); + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8); + tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1); + } + break; + + case MO_SL: + case MO_UL: + tcg_out_opc_imm(s, lw1, lo, base, 0); + tcg_out_opc_imm(s, lw2, lo, base, 3); + if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) { + tcg_out_ext32u(s, lo, lo); + } + break; + + case MO_UL | MO_BSWAP: + case MO_SL | MO_BSWAP: + if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, lw1, lo, base, 0); + tcg_out_opc_imm(s, lw2, lo, base, 3); + tcg_out_bswap32(s, lo, lo, + TCG_TARGET_REG_BITS == 64 && is_64 + ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0); + } else { + const tcg_insn_unit *subr = + (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn + ? bswap32u_addr : bswap32_addr); + + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0); + tcg_out_bswap_subr(s, subr); + /* delay slot */ + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3); + tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3); + } + break; + + case MO_UQ: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_imm(s, ld1, lo, base, 0); + tcg_out_opc_imm(s, ld2, lo, base, 7); + } else { + tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0); + tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3); + tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0); + tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3); + } + break; + + case MO_UQ | MO_BSWAP: + if (TCG_TARGET_REG_BITS == 64) { + if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, ld1, lo, base, 0); + tcg_out_opc_imm(s, ld2, lo, base, 7); + tcg_out_bswap64(s, lo, lo); + } else { + tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0); + tcg_out_bswap_subr(s, bswap64_addr); + /* delay slot */ + tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7); + tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); + } + } else if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); + tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0); + tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); + } else { + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot */ + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0); + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot */ + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3); + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); + } + break; + + default: + g_assert_not_reached(); + } +} + static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; +#else #endif + unsigned a_bits, s_bits; TCGReg base = TCG_REG_A0; data_regl = *args++; @@ -1467,10 +1660,20 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); + a_bits = get_alignment_bits(opc); + s_bits = opc & MO_SIZE; + /* + * R6 removes the left/right instructions but requires the + * system to support misaligned memory accesses. + */ #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + if (use_mips32r6_instructions || a_bits >= s_bits) { + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + } else { + tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); + } add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, @@ -1487,7 +1690,21 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } else { tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); } - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + if (use_mips32r6_instructions) { + if (a_bits) { + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); + } + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + } else { + if (a_bits && a_bits != s_bits) { + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); + } + if (a_bits >= s_bits) { + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + } else { + tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); + } + } #endif } @@ -1552,15 +1769,88 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, } } +static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc) +{ + const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR; + const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL; + const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR; + const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL; + + /* Don't clutter the code below with checks to avoid bswapping ZERO. */ + if ((lo | hi) == 0) { + opc &= ~MO_BSWAP; + } + + switch (opc & (MO_SIZE | MO_BSWAP)) { + case MO_16 | MO_BE: + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); + tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_SB, lo, base, 1); + break; + + case MO_16 | MO_LE: + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); + tcg_out_opc_imm(s, OPC_SB, lo, base, 0); + tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1); + break; + + case MO_32 | MO_BSWAP: + tcg_out_bswap32(s, TCG_TMP3, lo, 0); + lo = TCG_TMP3; + /* fall through */ + case MO_32: + tcg_out_opc_imm(s, sw1, lo, base, 0); + tcg_out_opc_imm(s, sw2, lo, base, 3); + break; + + case MO_64 | MO_BSWAP: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_bswap64(s, TCG_TMP3, lo); + lo = TCG_TMP3; + } else if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi); + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); + hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1; + lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0; + } else { + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0); + tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0); + tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3); + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0); + tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0); + tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3); + break; + } + /* fall through */ + case MO_64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_imm(s, sd1, lo, base, 0); + tcg_out_opc_imm(s, sd2, lo, base, 7); + } else { + tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0); + tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3); + tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0); + tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3); + } + break; + + default: + tcg_abort(); + } +} static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif + unsigned a_bits, s_bits; TCGReg base = TCG_REG_A0; data_regl = *args++; @@ -1569,16 +1859,25 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); + a_bits = get_alignment_bits(opc); + s_bits = opc & MO_SIZE; + /* + * R6 removes the left/right instructions but requires the + * system to support misaligned memory accesses. + */ #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + if (use_mips32r6_instructions || a_bits >= s_bits) { + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + } else { + tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); + } add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else - base = TCG_REG_A0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; @@ -1590,7 +1889,21 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) } else { tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); } - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + if (use_mips32r6_instructions) { + if (a_bits) { + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); + } + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + } else { + if (a_bits && a_bits != s_bits) { + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); + } + if (a_bits >= s_bits) { + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + } else { + tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); + } + } #endif } @@ -1637,9 +1950,9 @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, } } -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) { MIPSInsn i1, i2; TCGArg a0, a1, a2; @@ -1674,17 +1987,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_goto_tb: - if (s->tb_jmp_insn_offset) { - /* direct jump method */ - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); - /* Avoid clobbering the address during retranslation. */ - tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); - } else { - /* indirect jump method */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, - (uintptr_t)(s->tb_jmp_target_addr + a0)); - tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); - } + /* indirect jump method */ + tcg_debug_assert(s->tb_jmp_insn_offset == 0); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); tcg_out_nop(s); set_jmp_reset_offset(s, a0); break; @@ -2558,13 +2865,6 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ } -void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, - uintptr_t jmp_rw, uintptr_t addr) -{ - qatomic_set((uint32_t *)jmp_rw, deposit32(OPC_J, 0, 26, addr >> 2)); - flush_idcache_range(jmp_rx, jmp_rw, 4); -} - typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 3a62055f04..7669213175 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -39,11 +39,7 @@ #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define TCG_TARGET_NB_REGS 32 -/* - * We have a 256MB branch region, but leave room to make sure the - * main executable is also within that region. - */ -#define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) +#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) typedef enum { TCG_REG_ZERO = 0, @@ -136,7 +132,7 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_direct_jump 1 +#define TCG_TARGET_HAS_direct_jump 0 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 @@ -207,10 +203,10 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 -void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t) + QEMU_ERROR("code path is reachable"); -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #endif diff --git a/tcg/optimize.c b/tcg/optimize.c index 9876ac52a8..ae081ab29c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "qemu/int128.h" #include "tcg/tcg-op.h" #include "tcg-internal.h" @@ -41,9 +42,61 @@ typedef struct TempOptInfo { TCGTemp *prev_copy; TCGTemp *next_copy; uint64_t val; - uint64_t mask; + uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ + uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ } TempOptInfo; +typedef struct OptContext { + TCGContext *tcg; + TCGOp *prev_mb; + TCGTempSet temps_used; + + /* In flight values from optimization. */ + uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ + uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ + uint64_t s_mask; /* mask of clrsb(value) bits */ + TCGType type; +} OptContext; + +/* Calculate the smask for a specific value. */ +static uint64_t smask_from_value(uint64_t value) +{ + int rep = clrsb64(value); + return ~(~0ull >> rep); +} + +/* + * Calculate the smask for a given set of known-zeros. + * If there are lots of zeros on the left, we can consider the remainder + * an unsigned field, and thus the corresponding signed field is one bit + * larger. + */ +static uint64_t smask_from_zmask(uint64_t zmask) +{ + /* + * Only the 0 bits are significant for zmask, thus the msb itself + * must be zero, else we have no sign information. + */ + int rep = clz64(zmask); + if (rep == 0) { + return 0; + } + rep -= 1; + return ~(~0ull >> rep); +} + +/* + * Recreate a properly left-aligned smask after manipulation. + * Some bit-shuffling, particularly shifts and rotates, may + * retain sign bits on the left, but may scatter disconnected + * sign bits on the right. Retain only what remains to the left. + */ +static uint64_t smask_from_smask(int64_t smask) +{ + /* Only the 1 bits are significant for smask */ + return smask_from_zmask(~smask); +} + static inline TempOptInfo *ts_info(TCGTemp *ts) { return ts->state_ptr; @@ -81,7 +134,8 @@ static void reset_ts(TCGTemp *ts) ti->next_copy = ts; ti->prev_copy = ts; ti->is_const = false; - ti->mask = -1; + ti->z_mask = -1; + ti->s_mask = 0; } static void reset_temp(TCGArg arg) @@ -90,15 +144,15 @@ static void reset_temp(TCGArg arg) } /* Initialize and activate a temporary. */ -static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) +static void init_ts_info(OptContext *ctx, TCGTemp *ts) { size_t idx = temp_idx(ts); TempOptInfo *ti; - if (test_bit(idx, temps_used->l)) { + if (test_bit(idx, ctx->temps_used.l)) { return; } - set_bit(idx, temps_used->l); + set_bit(idx, ctx->temps_used.l); ti = ts->state_ptr; if (ti == NULL) { @@ -111,22 +165,15 @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) if (ts->kind == TEMP_CONST) { ti->is_const = true; ti->val = ts->val; - ti->mask = ts->val; - if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { - /* High bits of a 32-bit quantity are garbage. */ - ti->mask |= ~0xffffffffull; - } + ti->z_mask = ts->val; + ti->s_mask = smask_from_value(ts->val); } else { ti->is_const = false; - ti->mask = -1; + ti->z_mask = -1; + ti->s_mask = 0; } } -static void init_arg_info(TCGTempSet *temps_used, TCGArg arg) -{ - init_ts_info(temps_used, arg_temp(arg)); -} - static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) { TCGTemp *i, *g, *l; @@ -179,43 +226,45 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } -static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) +static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); TCGTemp *src_ts = arg_temp(src); - const TCGOpDef *def; TempOptInfo *di; TempOptInfo *si; - uint64_t mask; TCGOpcode new_op; if (ts_are_copies(dst_ts, src_ts)) { - tcg_op_remove(s, op); - return; + tcg_op_remove(ctx->tcg, op); + return true; } reset_ts(dst_ts); di = ts_info(dst_ts); si = ts_info(src_ts); - def = &tcg_op_defs[op->opc]; - if (def->flags & TCG_OPF_VECTOR) { - new_op = INDEX_op_mov_vec; - } else if (def->flags & TCG_OPF_64BIT) { - new_op = INDEX_op_mov_i64; - } else { + + switch (ctx->type) { + case TCG_TYPE_I32: new_op = INDEX_op_mov_i32; + break; + case TCG_TYPE_I64: + new_op = INDEX_op_mov_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ + new_op = INDEX_op_mov_vec; + break; + default: + g_assert_not_reached(); } op->opc = new_op; - /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ op->args[0] = dst; op->args[1] = src; - mask = si->mask; - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { - /* High bits of the destination are now garbage. */ - mask |= ~0xffffffffull; - } - di->mask = mask; + di->z_mask = si->z_mask; + di->s_mask = si->s_mask; if (src_ts->type == dst_ts->type) { TempOptInfo *ni = ts_info(si->next_copy); @@ -227,27 +276,22 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) di->is_const = si->is_const; di->val = si->val; } + return true; } -static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used, - TCGOp *op, TCGArg dst, uint64_t val) +static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, + TCGArg dst, uint64_t val) { - const TCGOpDef *def = &tcg_op_defs[op->opc]; - TCGType type; TCGTemp *tv; - if (def->flags & TCG_OPF_VECTOR) { - type = TCGOP_VECL(op) + TCG_TYPE_V64; - } else if (def->flags & TCG_OPF_64BIT) { - type = TCG_TYPE_I64; - } else { - type = TCG_TYPE_I32; + if (ctx->type == TCG_TYPE_I32) { + val = (int32_t)val; } /* Convert movi to mov with constant temp. */ - tv = tcg_constant_internal(type, val); - init_ts_info(temps_used, tv); - tcg_opt_gen_mov(s, op, dst, temp_arg(tv)); + tv = tcg_constant_internal(ctx->type, val); + init_ts_info(ctx, tv); + return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); } static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) @@ -264,13 +308,13 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) CASE_OP_32_64(mul): return x * y; - CASE_OP_32_64(and): + CASE_OP_32_64_VEC(and): return x & y; - CASE_OP_32_64(or): + CASE_OP_32_64_VEC(or): return x | y; - CASE_OP_32_64(xor): + CASE_OP_32_64_VEC(xor): return x ^ y; case INDEX_op_shl_i32: @@ -303,25 +347,25 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) case INDEX_op_rotl_i64: return rol64(x, y & 63); - CASE_OP_32_64(not): + CASE_OP_32_64_VEC(not): return ~x; CASE_OP_32_64(neg): return -x; - CASE_OP_32_64(andc): + CASE_OP_32_64_VEC(andc): return x & ~y; - CASE_OP_32_64(orc): + CASE_OP_32_64_VEC(orc): return x | ~y; - CASE_OP_32_64(eqv): + CASE_OP_32_64_VEC(eqv): return ~(x ^ y); - CASE_OP_32_64(nand): + CASE_OP_32_64_VEC(nand): return ~(x & y); - CASE_OP_32_64(nor): + CASE_OP_32_64_VEC(nor): return ~(x | y); case INDEX_op_clz_i32: @@ -415,11 +459,11 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) } } -static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y) +static uint64_t do_constant_folding(TCGOpcode op, TCGType type, + uint64_t x, uint64_t y) { - const TCGOpDef *def = &tcg_op_defs[op]; uint64_t res = do_constant_folding_2(op, x, y); - if (!(def->flags & TCG_OPF_64BIT)) { + if (type == TCG_TYPE_I32) { res = (int32_t)res; } return res; @@ -501,40 +545,46 @@ static bool do_constant_folding_cond_eq(TCGCond c) } } -/* Return 2 if the condition can't be simplified, and the result - of the condition (0 or 1) if it can */ -static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, - TCGArg y, TCGCond c) +/* + * Return -1 if the condition can't be simplified, + * and the result of the condition (0 or 1) if it can. + */ +static int do_constant_folding_cond(TCGType type, TCGArg x, + TCGArg y, TCGCond c) { - uint64_t xv = arg_info(x)->val; - uint64_t yv = arg_info(y)->val; - if (arg_is_const(x) && arg_is_const(y)) { - const TCGOpDef *def = &tcg_op_defs[op]; - tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); - if (def->flags & TCG_OPF_64BIT) { - return do_constant_folding_cond_64(xv, yv, c); - } else { + uint64_t xv = arg_info(x)->val; + uint64_t yv = arg_info(y)->val; + + switch (type) { + case TCG_TYPE_I32: return do_constant_folding_cond_32(xv, yv, c); + case TCG_TYPE_I64: + return do_constant_folding_cond_64(xv, yv, c); + default: + /* Only scalar comparisons are optimizable */ + return -1; } } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); - } else if (arg_is_const(y) && yv == 0) { + } else if (arg_is_const(y) && arg_info(y)->val == 0) { switch (c) { case TCG_COND_LTU: return 0; case TCG_COND_GEU: return 1; default: - return 2; + return -1; } } - return 2; + return -1; } -/* Return 2 if the condition can't be simplified, and the result - of the condition (0 or 1) if it can */ -static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) +/* + * Return -1 if the condition can't be simplified, + * and the result of the condition (0 or 1) if it can. + */ +static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) { TCGArg al = p1[0], ah = p1[1]; TCGArg bl = p2[0], bh = p2[1]; @@ -564,9 +614,22 @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { return do_constant_folding_cond_eq(c); } - return 2; + return -1; } +/** + * swap_commutative: + * @dest: TCGArg of the destination argument, or NO_DEST. + * @p1: first paired argument + * @p2: second paired argument + * + * If *@p1 is a constant and *@p2 is not, swap. + * If *@p2 matches @dest, swap. + * Return true if a swap was performed. + */ + +#define NO_DEST temp_arg(NULL) + static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) { TCGArg a1 = *p1, a2 = *p2; @@ -600,12 +663,1371 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) return false; } +static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) +{ + for (int i = 0; i < nb_args; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + if (ts) { + init_ts_info(ctx, ts); + } + } +} + +static void copy_propagate(OptContext *ctx, TCGOp *op, + int nb_oargs, int nb_iargs) +{ + TCGContext *s = ctx->tcg; + + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + if (ts && ts_is_copy(ts)) { + op->args[i] = temp_arg(find_better_copy(s, ts)); + } + } +} + +static void finish_folding(OptContext *ctx, TCGOp *op) +{ + const TCGOpDef *def = &tcg_op_defs[op->opc]; + int i, nb_oargs; + + /* + * For an opcode that ends a BB, reset all temp data. + * We do no cross-BB optimization. + */ + if (def->flags & TCG_OPF_BB_END) { + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); + ctx->prev_mb = NULL; + return; + } + + nb_oargs = def->nb_oargs; + for (i = 0; i < nb_oargs; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + reset_ts(ts); + /* + * Save the corresponding known-zero/sign bits mask for the + * first output argument (only one supported so far). + */ + if (i == 0) { + ts_info(ts)->z_mask = ctx->z_mask; + ts_info(ts)->s_mask = ctx->s_mask; + } + } +} + +/* + * The fold_* functions return true when processing is complete, + * usually by folding the operation to a constant or to a copy, + * and calling tcg_opt_gen_{mov,movi}. They may do other things, + * like collect information about the value produced, for use in + * optimizing a subsequent operation. + * + * These first fold_* functions are all helpers, used by other + * folders for more specific operations. + */ + +static bool fold_const1(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = do_constant_folding(op->opc, ctx->type, t, 0); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return false; +} + +static bool fold_const2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t1 = arg_info(op->args[1])->val; + uint64_t t2 = arg_info(op->args[2])->val; + + t1 = do_constant_folding(op->opc, ctx->type, t1, t2); + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); + } + return false; +} + +static bool fold_commutative(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[1], &op->args[2]); + return false; +} + +static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[1], &op->args[2]); + return fold_const2(ctx, op); +} + +static bool fold_masks(OptContext *ctx, TCGOp *op) +{ + uint64_t a_mask = ctx->a_mask; + uint64_t z_mask = ctx->z_mask; + uint64_t s_mask = ctx->s_mask; + + /* + * 32-bit ops generate 32-bit results, which for the purpose of + * simplifying tcg are sign-extended. Certainly that's how we + * represent our constants elsewhere. Note that the bits will + * be reset properly for a 64-bit value when encountering the + * type changing opcodes. + */ + if (ctx->type == TCG_TYPE_I32) { + a_mask = (int32_t)a_mask; + z_mask = (int32_t)z_mask; + s_mask |= MAKE_64BIT_MASK(32, 32); + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + } + + if (z_mask == 0) { + return tcg_opt_gen_movi(ctx, op, op->args[0], 0); + } + if (a_mask == 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* + * Convert @op to NOT, if NOT is supported by the host. + * Return true f the conversion is successful, which will still + * indicate that the processing is complete. + */ +static bool fold_not(OptContext *ctx, TCGOp *op); +static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) +{ + TCGOpcode not_op; + bool have_not; + + switch (ctx->type) { + case TCG_TYPE_I32: + not_op = INDEX_op_not_i32; + have_not = TCG_TARGET_HAS_not_i32; + break; + case TCG_TYPE_I64: + not_op = INDEX_op_not_i64; + have_not = TCG_TARGET_HAS_not_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + not_op = INDEX_op_not_vec; + have_not = TCG_TARGET_HAS_not_vec; + break; + default: + g_assert_not_reached(); + } + if (have_not) { + op->opc = not_op; + op->args[1] = op->args[idx]; + return fold_not(ctx, op); + } + return false; +} + +/* If the binary operation has first argument @i, fold to @i. */ +static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has first argument @i, fold to NOT. */ +static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + return fold_to_not(ctx, op, 2); + } + return false; +} + +/* If the binary operation has second argument @i, fold to @i. */ +static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has second argument @i, fold to identity. */ +static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* If the binary operation has second argument @i, fold to NOT. */ +static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return fold_to_not(ctx, op, 1); + } + return false; +} + +/* If the binary operation has both arguments equal, fold to @i. */ +static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (args_are_copies(op->args[1], op->args[2])) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has both arguments equal, fold to identity. */ +static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) +{ + if (args_are_copies(op->args[1], op->args[2])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* + * These outermost fold_ functions are sorted alphabetically. + * + * The ordering of the transformations should be: + * 1) those that produce a constant + * 2) those that produce a copy + * 3) those that produce information about the result value. + */ + +static bool fold_add(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + return false; +} + +/* We cannot as yet do_constant_folding with vectors. */ +static bool fold_add_vec(OptContext *ctx, TCGOp *op) +{ + if (fold_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) +{ + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && + arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { + uint64_t al = arg_info(op->args[2])->val; + uint64_t ah = arg_info(op->args[3])->val; + uint64_t bl = arg_info(op->args[4])->val; + uint64_t bh = arg_info(op->args[5])->val; + TCGArg rl, rh; + TCGOp *op2; + + if (ctx->type == TCG_TYPE_I32) { + uint64_t a = deposit64(al, 32, 32, ah); + uint64_t b = deposit64(bl, 32, 32, bh); + + if (add) { + a += b; + } else { + a -= b; + } + + al = sextract64(a, 0, 32); + ah = sextract64(a, 32, 32); + } else { + Int128 a = int128_make128(al, ah); + Int128 b = int128_make128(bl, bh); + + if (add) { + a = int128_add(a, b); + } else { + a = int128_sub(a, b); + } + + al = int128_getlo(a); + ah = int128_gethi(a); + } + + rl = op->args[0]; + rh = op->args[1]; + + /* The proper opcode is supplied by tcg_opt_gen_mov. */ + op2 = tcg_op_insert_before(ctx->tcg, op, 0); + + tcg_opt_gen_movi(ctx, op, rl, al); + tcg_opt_gen_movi(ctx, op2, rh, ah); + return true; + } + return false; +} + +static bool fold_add2(OptContext *ctx, TCGOp *op) +{ + /* Note that the high and low parts may be independently swapped. */ + swap_commutative(op->args[0], &op->args[2], &op->args[4]); + swap_commutative(op->args[1], &op->args[3], &op->args[5]); + + return fold_addsub2(ctx, op, true); +} + +static bool fold_and(OptContext *ctx, TCGOp *op) +{ + uint64_t z1, z2; + + if (fold_const2_commutative(ctx, op) || + fold_xi_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, -1) || + fold_xx_to_x(ctx, op)) { + return true; + } + + z1 = arg_info(op->args[1])->z_mask; + z2 = arg_info(op->args[2])->z_mask; + ctx->z_mask = z1 & z2; + + /* + * Sign repetitions are perforce all identical, whether they are 1 or 0. + * Bitwise operations preserve the relative quantity of the repetitions. + */ + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + + /* + * Known-zeros does not imply known-ones. Therefore unless + * arg2 is constant, we can't infer affected bits from it. + */ + if (arg_is_const(op->args[2])) { + ctx->a_mask = z1 & ~z2; + } + + return fold_masks(ctx, op); +} + +static bool fold_andc(OptContext *ctx, TCGOp *op) +{ + uint64_t z1; + + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_ix_to_not(ctx, op, -1)) { + return true; + } + + z1 = arg_info(op->args[1])->z_mask; + + /* + * Known-zeros does not imply known-ones. Therefore unless + * arg2 is constant, we can't infer anything from it. + */ + if (arg_is_const(op->args[2])) { + uint64_t z2 = ~arg_info(op->args[2])->z_mask; + ctx->a_mask = z1 & ~z2; + z1 &= z2; + } + ctx->z_mask = z1; + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + +static bool fold_brcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[2]; + int i; + + if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { + op->args[2] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); + if (i == 0) { + tcg_op_remove(ctx->tcg, op); + return true; + } + if (i > 0) { + op->opc = INDEX_op_br; + op->args[0] = op->args[3]; + } + return false; +} + +static bool fold_brcond2(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[4]; + TCGArg label = op->args[5]; + int i, inv = 0; + + if (swap_commutative2(&op->args[0], &op->args[2])) { + op->args[4] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); + if (i >= 0) { + goto do_brcond_const; + } + + switch (cond) { + case TCG_COND_LT: + case TCG_COND_GE: + /* + * Simplify LT/GE comparisons vs zero to a single compare + * vs the high word of the input. + */ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && + arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { + goto do_brcond_high; + } + break; + + case TCG_COND_NE: + inv = 1; + QEMU_FALLTHROUGH; + case TCG_COND_EQ: + /* + * Simplify EQ/NE comparisons where one of the pairs + * can be simplified. + */ + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], + op->args[2], cond); + switch (i ^ inv) { + case 0: + goto do_brcond_const; + case 1: + goto do_brcond_high; + } + + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], + op->args[3], cond); + switch (i ^ inv) { + case 0: + goto do_brcond_const; + case 1: + op->opc = INDEX_op_brcond_i32; + op->args[1] = op->args[2]; + op->args[2] = cond; + op->args[3] = label; + break; + } + break; + + default: + break; + + do_brcond_high: + op->opc = INDEX_op_brcond_i32; + op->args[0] = op->args[1]; + op->args[1] = op->args[3]; + op->args[2] = cond; + op->args[3] = label; + break; + + do_brcond_const: + if (i == 0) { + tcg_op_remove(ctx->tcg, op); + return true; + } + op->opc = INDEX_op_br; + op->args[0] = label; + break; + } + return false; +} + +static bool fold_bswap(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask, s_mask, sign; + + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + + t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask = arg_info(op->args[1])->z_mask; + + switch (op->opc) { + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + z_mask = bswap16(z_mask); + sign = INT16_MIN; + break; + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + z_mask = bswap32(z_mask); + sign = INT32_MIN; + break; + case INDEX_op_bswap64_i64: + z_mask = bswap64(z_mask); + sign = INT64_MIN; + break; + default: + g_assert_not_reached(); + } + s_mask = smask_from_zmask(z_mask); + + switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { + case TCG_BSWAP_OZ: + break; + case TCG_BSWAP_OS: + /* If the sign bit may be 1, force all the bits above to 1. */ + if (z_mask & sign) { + z_mask |= sign; + s_mask = sign << 1; + } + break; + default: + /* The high bits are undefined: force all bits above the sign to 1. */ + z_mask |= sign << 1; + s_mask = 0; + break; + } + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + + return fold_masks(ctx, op); +} + +static bool fold_call(OptContext *ctx, TCGOp *op) +{ + TCGContext *s = ctx->tcg; + int nb_oargs = TCGOP_CALLO(op); + int nb_iargs = TCGOP_CALLI(op); + int flags, i; + + init_arguments(ctx, op, nb_oargs + nb_iargs); + copy_propagate(ctx, op, nb_oargs, nb_iargs); + + /* If the function reads or writes globals, reset temp data. */ + flags = tcg_call_flags(op); + if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { + int nb_globals = s->nb_globals; + + for (i = 0; i < nb_globals; i++) { + if (test_bit(i, ctx->temps_used.l)) { + reset_ts(&ctx->tcg->temps[i]); + } + } + } + + /* Reset temp data for outputs. */ + for (i = 0; i < nb_oargs; i++) { + reset_temp(op->args[i]); + } + + /* Stop optimizing MB across calls. */ + ctx->prev_mb = NULL; + return true; +} + +static bool fold_count_zeros(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask; + + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + + if (t != 0) { + t = do_constant_folding(op->opc, ctx->type, t, 0); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); + } + + switch (ctx->type) { + case TCG_TYPE_I32: + z_mask = 31; + break; + case TCG_TYPE_I64: + z_mask = 63; + break; + default: + g_assert_not_reached(); + } + ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; + ctx->s_mask = smask_from_zmask(ctx->z_mask); + return false; +} + +static bool fold_ctpop(OptContext *ctx, TCGOp *op) +{ + if (fold_const1(ctx, op)) { + return true; + } + + switch (ctx->type) { + case TCG_TYPE_I32: + ctx->z_mask = 32 | 31; + break; + case TCG_TYPE_I64: + ctx->z_mask = 64 | 63; + break; + default: + g_assert_not_reached(); + } + ctx->s_mask = smask_from_zmask(ctx->z_mask); + return false; +} + +static bool fold_deposit(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t1 = arg_info(op->args[1])->val; + uint64_t t2 = arg_info(op->args[2])->val; + + t1 = deposit64(t1, op->args[3], op->args[4], t2); + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); + } + + ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, + op->args[3], op->args[4], + arg_info(op->args[2])->z_mask); + return false; +} + +static bool fold_divide(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xi_to_x(ctx, op, 1)) { + return true; + } + return false; +} + +static bool fold_dup(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + t = dup_const(TCGOP_VECE(op), t); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return false; +} + +static bool fold_dup2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, + arg_info(op->args[2])->val); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + if (args_are_copies(op->args[1], op->args[2])) { + op->opc = INDEX_op_dup_vec; + TCGOP_VECE(op) = MO_32; + } + return false; +} + +static bool fold_eqv(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, -1) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_extract(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask_old, z_mask; + int pos = op->args[2]; + int len = op->args[3]; + + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = extract64(t, pos, len); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask_old = arg_info(op->args[1])->z_mask; + z_mask = extract64(z_mask_old, pos, len); + if (pos == 0) { + ctx->a_mask = z_mask_old ^ z_mask; + } + ctx->z_mask = z_mask; + ctx->s_mask = smask_from_zmask(z_mask); + + return fold_masks(ctx, op); +} + +static bool fold_extract2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t v1 = arg_info(op->args[1])->val; + uint64_t v2 = arg_info(op->args[2])->val; + int shr = op->args[3]; + + if (op->opc == INDEX_op_extract2_i64) { + v1 >>= shr; + v2 <<= 64 - shr; + } else { + v1 = (uint32_t)v1 >> shr; + v2 = (uint64_t)((int32_t)v2 << (32 - shr)); + } + return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); + } + return false; +} + +static bool fold_exts(OptContext *ctx, TCGOp *op) +{ + uint64_t s_mask_old, s_mask, z_mask, sign; + bool type_change = false; + + if (fold_const1(ctx, op)) { + return true; + } + + z_mask = arg_info(op->args[1])->z_mask; + s_mask = arg_info(op->args[1])->s_mask; + s_mask_old = s_mask; + + switch (op->opc) { + CASE_OP_32_64(ext8s): + sign = INT8_MIN; + z_mask = (uint8_t)z_mask; + break; + CASE_OP_32_64(ext16s): + sign = INT16_MIN; + z_mask = (uint16_t)z_mask; + break; + case INDEX_op_ext_i32_i64: + type_change = true; + QEMU_FALLTHROUGH; + case INDEX_op_ext32s_i64: + sign = INT32_MIN; + z_mask = (uint32_t)z_mask; + break; + default: + g_assert_not_reached(); + } + + if (z_mask & sign) { + z_mask |= sign; + } + s_mask |= sign << 1; + + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + if (!type_change) { + ctx->a_mask = s_mask & ~s_mask_old; + } + + return fold_masks(ctx, op); +} + +static bool fold_extu(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask_old, z_mask; + bool type_change = false; + + if (fold_const1(ctx, op)) { + return true; + } + + z_mask_old = z_mask = arg_info(op->args[1])->z_mask; + + switch (op->opc) { + CASE_OP_32_64(ext8u): + z_mask = (uint8_t)z_mask; + break; + CASE_OP_32_64(ext16u): + z_mask = (uint16_t)z_mask; + break; + case INDEX_op_extrl_i64_i32: + case INDEX_op_extu_i32_i64: + type_change = true; + QEMU_FALLTHROUGH; + case INDEX_op_ext32u_i64: + z_mask = (uint32_t)z_mask; + break; + case INDEX_op_extrh_i64_i32: + type_change = true; + z_mask >>= 32; + break; + default: + g_assert_not_reached(); + } + + ctx->z_mask = z_mask; + ctx->s_mask = smask_from_zmask(z_mask); + if (!type_change) { + ctx->a_mask = z_mask_old ^ z_mask; + } + return fold_masks(ctx, op); +} + +static bool fold_mb(OptContext *ctx, TCGOp *op) +{ + /* Eliminate duplicate and redundant fence instructions. */ + if (ctx->prev_mb) { + /* + * Merge two barriers of the same type into one, + * or a weaker barrier into a stronger one, + * or two weaker barriers into a stronger one. + * mb X; mb Y => mb X|Y + * mb; strl => mb; st + * ldaq; mb => ld; mb + * ldaq; strl => ld; mb; st + * Other combinations are also merged into a strong + * barrier. This is stricter than specified but for + * the purposes of TCG is better than not optimizing. + */ + ctx->prev_mb->args[0] |= op->args[0]; + tcg_op_remove(ctx->tcg, op); + } else { + ctx->prev_mb = op; + } + return true; +} + +static bool fold_mov(OptContext *ctx, TCGOp *op) +{ + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); +} + +static bool fold_movcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[5]; + int i; + + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[5] = cond = tcg_swap_cond(cond); + } + /* + * Canonicalize the "false" input reg to match the destination reg so + * that the tcg backend can implement a "move if true" operation. + */ + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { + op->args[5] = cond = tcg_invert_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + if (i >= 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); + } + + ctx->z_mask = arg_info(op->args[3])->z_mask + | arg_info(op->args[4])->z_mask; + ctx->s_mask = arg_info(op->args[3])->s_mask + & arg_info(op->args[4])->s_mask; + + if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { + uint64_t tv = arg_info(op->args[3])->val; + uint64_t fv = arg_info(op->args[4])->val; + TCGOpcode opc; + + switch (ctx->type) { + case TCG_TYPE_I32: + opc = INDEX_op_setcond_i32; + break; + case TCG_TYPE_I64: + opc = INDEX_op_setcond_i64; + break; + default: + g_assert_not_reached(); + } + + if (tv == 1 && fv == 0) { + op->opc = opc; + op->args[3] = cond; + } else if (fv == 1 && tv == 0) { + op->opc = opc; + op->args[3] = tcg_invert_cond(cond); + } + } + return false; +} + +static bool fold_mul(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xi_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 1)) { + return true; + } + return false; +} + +static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_i(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_multiply2(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[2], &op->args[3]); + + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint64_t a = arg_info(op->args[2])->val; + uint64_t b = arg_info(op->args[3])->val; + uint64_t h, l; + TCGArg rl, rh; + TCGOp *op2; + + switch (op->opc) { + case INDEX_op_mulu2_i32: + l = (uint64_t)(uint32_t)a * (uint32_t)b; + h = (int32_t)(l >> 32); + l = (int32_t)l; + break; + case INDEX_op_muls2_i32: + l = (int64_t)(int32_t)a * (int32_t)b; + h = l >> 32; + l = (int32_t)l; + break; + case INDEX_op_mulu2_i64: + mulu64(&l, &h, a, b); + break; + case INDEX_op_muls2_i64: + muls64(&l, &h, a, b); + break; + default: + g_assert_not_reached(); + } + + rl = op->args[0]; + rh = op->args[1]; + + /* The proper opcode is supplied by tcg_opt_gen_mov. */ + op2 = tcg_op_insert_before(ctx->tcg, op, 0); + + tcg_opt_gen_movi(ctx, op, rl, l); + tcg_opt_gen_movi(ctx, op2, rh, h); + return true; + } + return false; +} + +static bool fold_nand(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_not(ctx, op, -1)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_neg(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask; + + if (fold_const1(ctx, op)) { + return true; + } + + /* Set to 1 all bits to the left of the rightmost. */ + z_mask = arg_info(op->args[1])->z_mask; + ctx->z_mask = -(z_mask & -z_mask); + + /* + * Because of fold_sub_to_neg, we want to always return true, + * via finish_folding. + */ + finish_folding(ctx, op); + return true; +} + +static bool fold_nor(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_not(OptContext *ctx, TCGOp *op) +{ + if (fold_const1(ctx, op)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask; + + /* Because of fold_to_not, we want to always return true, via finish. */ + finish_folding(ctx, op); + return true; +} + +static bool fold_or(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0) || + fold_xx_to_x(ctx, op)) { + return true; + } + + ctx->z_mask = arg_info(op->args[1])->z_mask + | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + +static bool fold_orc(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, -1) || + fold_xi_to_x(ctx, op, -1) || + fold_ix_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) +{ + const TCGOpDef *def = &tcg_op_defs[op->opc]; + MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; + MemOp mop = get_memop(oi); + int width = 8 * memop_size(mop); + + if (width < 64) { + ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); + if (!(mop & MO_SIGN)) { + ctx->z_mask = MAKE_64BIT_MASK(0, width); + ctx->s_mask <<= 1; + } + } + + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return false; +} + +static bool fold_qemu_st(OptContext *ctx, TCGOp *op) +{ + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return false; +} + +static bool fold_remainder(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_setcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[3]; + int i; + + if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { + op->args[3] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + if (i >= 0) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + + ctx->z_mask = 1; + ctx->s_mask = smask_from_zmask(1); + return false; +} + +static bool fold_setcond2(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[5]; + int i, inv = 0; + + if (swap_commutative2(&op->args[1], &op->args[3])) { + op->args[5] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); + if (i >= 0) { + goto do_setcond_const; + } + + switch (cond) { + case TCG_COND_LT: + case TCG_COND_GE: + /* + * Simplify LT/GE comparisons vs zero to a single compare + * vs the high word of the input. + */ + if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && + arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { + goto do_setcond_high; + } + break; + + case TCG_COND_NE: + inv = 1; + QEMU_FALLTHROUGH; + case TCG_COND_EQ: + /* + * Simplify EQ/NE comparisons where one of the pairs + * can be simplified. + */ + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], + op->args[3], cond); + switch (i ^ inv) { + case 0: + goto do_setcond_const; + case 1: + goto do_setcond_high; + } + + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], + op->args[4], cond); + switch (i ^ inv) { + case 0: + goto do_setcond_const; + case 1: + op->args[2] = op->args[3]; + op->args[3] = cond; + op->opc = INDEX_op_setcond_i32; + break; + } + break; + + default: + break; + + do_setcond_high: + op->args[1] = op->args[2]; + op->args[2] = op->args[4]; + op->args[3] = cond; + op->opc = INDEX_op_setcond_i32; + break; + } + + ctx->z_mask = 1; + ctx->s_mask = smask_from_zmask(1); + return false; + + do_setcond_const: + return tcg_opt_gen_movi(ctx, op, op->args[0], i); +} + +static bool fold_sextract(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask, s_mask, s_mask_old; + int pos = op->args[2]; + int len = op->args[3]; + + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = sextract64(t, pos, len); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask = arg_info(op->args[1])->z_mask; + z_mask = sextract64(z_mask, pos, len); + ctx->z_mask = z_mask; + + s_mask_old = arg_info(op->args[1])->s_mask; + s_mask = sextract64(s_mask_old, pos, len); + s_mask |= MAKE_64BIT_MASK(len, 64 - len); + ctx->s_mask = s_mask; + + if (pos == 0) { + ctx->a_mask = s_mask & ~s_mask_old; + } + + return fold_masks(ctx, op); +} + +static bool fold_shift(OptContext *ctx, TCGOp *op) +{ + uint64_t s_mask, z_mask, sign; + + if (fold_const2(ctx, op) || + fold_ix_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + + s_mask = arg_info(op->args[1])->s_mask; + z_mask = arg_info(op->args[1])->z_mask; + + if (arg_is_const(op->args[2])) { + int sh = arg_info(op->args[2])->val; + + ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); + + s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); + ctx->s_mask = smask_from_smask(s_mask); + + return fold_masks(ctx, op); + } + + switch (op->opc) { + CASE_OP_32_64(sar): + /* + * Arithmetic right shift will not reduce the number of + * input sign repetitions. + */ + ctx->s_mask = s_mask; + break; + CASE_OP_32_64(shr): + /* + * If the sign bit is known zero, then logical right shift + * will not reduced the number of input sign repetitions. + */ + sign = (s_mask & -s_mask) >> 1; + if (!(z_mask & sign)) { + ctx->s_mask = s_mask; + } + break; + default: + break; + } + + return false; +} + +static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) +{ + TCGOpcode neg_op; + bool have_neg; + + if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { + return false; + } + + switch (ctx->type) { + case TCG_TYPE_I32: + neg_op = INDEX_op_neg_i32; + have_neg = TCG_TARGET_HAS_neg_i32; + break; + case TCG_TYPE_I64: + neg_op = INDEX_op_neg_i64; + have_neg = TCG_TARGET_HAS_neg_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + neg_op = INDEX_op_neg_vec; + have_neg = (TCG_TARGET_HAS_neg_vec && + tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); + break; + default: + g_assert_not_reached(); + } + if (have_neg) { + op->opc = neg_op; + op->args[1] = op->args[2]; + return fold_neg(ctx, op); + } + return false; +} + +/* We cannot as yet do_constant_folding with vectors. */ +static bool fold_sub_vec(OptContext *ctx, TCGOp *op) +{ + if (fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_sub_to_neg(ctx, op)) { + return true; + } + return false; +} + +static bool fold_sub(OptContext *ctx, TCGOp *op) +{ + return fold_const2(ctx, op) || fold_sub_vec(ctx, op); +} + +static bool fold_sub2(OptContext *ctx, TCGOp *op) +{ + return fold_addsub2(ctx, op, false); +} + +static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) +{ + /* We can't do any folding with a load, but we can record bits. */ + switch (op->opc) { + CASE_OP_32_64(ld8s): + ctx->s_mask = MAKE_64BIT_MASK(8, 56); + break; + CASE_OP_32_64(ld8u): + ctx->z_mask = MAKE_64BIT_MASK(0, 8); + ctx->s_mask = MAKE_64BIT_MASK(9, 55); + break; + CASE_OP_32_64(ld16s): + ctx->s_mask = MAKE_64BIT_MASK(16, 48); + break; + CASE_OP_32_64(ld16u): + ctx->z_mask = MAKE_64BIT_MASK(0, 16); + ctx->s_mask = MAKE_64BIT_MASK(17, 47); + break; + case INDEX_op_ld32s_i64: + ctx->s_mask = MAKE_64BIT_MASK(32, 32); + break; + case INDEX_op_ld32u_i64: + ctx->z_mask = MAKE_64BIT_MASK(0, 32); + ctx->s_mask = MAKE_64BIT_MASK(33, 31); + break; + default: + g_assert_not_reached(); + } + return false; +} + +static bool fold_xor(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_xi_to_not(ctx, op, -1)) { + return true; + } + + ctx->z_mask = arg_info(op->args[1])->z_mask + | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + /* Propagate constants and copies, fold constant expressions. */ void tcg_optimize(TCGContext *s) { - int nb_temps, nb_globals, i; - TCGOp *op, *op_next, *prev_mb = NULL; - TCGTempSet temps_used; + int nb_temps, i; + TCGOp *op, *op_next; + OptContext ctx = { .tcg = s }; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. @@ -613,991 +2035,206 @@ void tcg_optimize(TCGContext *s) available through the doubly linked circular list. */ nb_temps = s->nb_temps; - nb_globals = s->nb_globals; - - memset(&temps_used, 0, sizeof(temps_used)); for (i = 0; i < nb_temps; ++i) { s->temps[i].state_ptr = NULL; } QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { - uint64_t mask, partmask, affected, tmp; - int nb_oargs, nb_iargs; TCGOpcode opc = op->opc; - const TCGOpDef *def = &tcg_op_defs[opc]; + const TCGOpDef *def; + bool done = false; - /* Count the arguments, and initialize the temps that are - going to be used */ + /* Calls are special. */ if (opc == INDEX_op_call) { - nb_oargs = TCGOP_CALLO(op); - nb_iargs = TCGOP_CALLI(op); - for (i = 0; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(op->args[i]); - if (ts) { - init_ts_info(&temps_used, ts); - } - } + fold_call(&ctx, op); + continue; + } + + def = &tcg_op_defs[opc]; + init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); + copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); + + /* Pre-compute the type of the operation. */ + if (def->flags & TCG_OPF_VECTOR) { + ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); + } else if (def->flags & TCG_OPF_64BIT) { + ctx.type = TCG_TYPE_I64; } else { - nb_oargs = def->nb_oargs; - nb_iargs = def->nb_iargs; - for (i = 0; i < nb_oargs + nb_iargs; i++) { - init_arg_info(&temps_used, op->args[i]); - } + ctx.type = TCG_TYPE_I32; } - /* Do copy propagation */ - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(op->args[i]); - if (ts && ts_is_copy(ts)) { - op->args[i] = temp_arg(find_better_copy(s, ts)); - } - } + /* Assume all bits affected, no bits known zero, no sign reps. */ + ctx.a_mask = -1; + ctx.z_mask = -1; + ctx.s_mask = 0; - /* For commutative operations make constant second argument */ + /* + * Process each opcode. + * Sorted alphabetically by opcode as much as possible. + */ switch (opc) { - CASE_OP_32_64_VEC(add): - CASE_OP_32_64_VEC(mul): - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64(eqv): - CASE_OP_32_64(nand): - CASE_OP_32_64(nor): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - swap_commutative(op->args[0], &op->args[1], &op->args[2]); + CASE_OP_32_64(add): + done = fold_add(&ctx, op); break; - CASE_OP_32_64(brcond): - if (swap_commutative(-1, &op->args[0], &op->args[1])) { - op->args[2] = tcg_swap_cond(op->args[2]); - } - break; - CASE_OP_32_64(setcond): - if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { - op->args[3] = tcg_swap_cond(op->args[3]); - } - break; - CASE_OP_32_64(movcond): - if (swap_commutative(-1, &op->args[1], &op->args[2])) { - op->args[5] = tcg_swap_cond(op->args[5]); - } - /* For movcond, we canonicalize the "false" input reg to match - the destination reg so that the tcg backend can implement - a "move if true" operation. */ - if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { - op->args[5] = tcg_invert_cond(op->args[5]); - } + case INDEX_op_add_vec: + done = fold_add_vec(&ctx, op); break; CASE_OP_32_64(add2): - swap_commutative(op->args[0], &op->args[2], &op->args[4]); - swap_commutative(op->args[1], &op->args[3], &op->args[5]); + done = fold_add2(&ctx, op); break; - CASE_OP_32_64(mulu2): - CASE_OP_32_64(muls2): - swap_commutative(op->args[0], &op->args[2], &op->args[3]); + CASE_OP_32_64_VEC(and): + done = fold_and(&ctx, op); + break; + CASE_OP_32_64_VEC(andc): + done = fold_andc(&ctx, op); + break; + CASE_OP_32_64(brcond): + done = fold_brcond(&ctx, op); break; case INDEX_op_brcond2_i32: - if (swap_commutative2(&op->args[0], &op->args[2])) { - op->args[4] = tcg_swap_cond(op->args[4]); - } + done = fold_brcond2(&ctx, op); break; - case INDEX_op_setcond2_i32: - if (swap_commutative2(&op->args[1], &op->args[3])) { - op->args[5] = tcg_swap_cond(op->args[5]); - } - break; - default: - break; - } - - /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", - and "sub r, 0, a => neg r, a" case. */ - switch (opc) { - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - CASE_OP_32_64_VEC(sub): - { - TCGOpcode neg_op; - bool have_neg; - - if (arg_is_const(op->args[2])) { - /* Proceed with possible constant folding. */ - break; - } - if (opc == INDEX_op_sub_i32) { - neg_op = INDEX_op_neg_i32; - have_neg = TCG_TARGET_HAS_neg_i32; - } else if (opc == INDEX_op_sub_i64) { - neg_op = INDEX_op_neg_i64; - have_neg = TCG_TARGET_HAS_neg_i64; - } else if (TCG_TARGET_HAS_neg_vec) { - TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; - unsigned vece = TCGOP_VECE(op); - neg_op = INDEX_op_neg_vec; - have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0; - } else { - break; - } - if (!have_neg) { - break; - } - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - op->opc = neg_op; - reset_temp(op->args[0]); - op->args[1] = op->args[2]; - continue; - } - } - break; - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64(nand): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == -1) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64(nor): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64_VEC(andc): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == -1) { - i = 2; - goto try_not; - } - break; - CASE_OP_32_64_VEC(orc): - CASE_OP_32_64(eqv): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - i = 2; - goto try_not; - } - break; - try_not: - { - TCGOpcode not_op; - bool have_not; - - if (def->flags & TCG_OPF_VECTOR) { - not_op = INDEX_op_not_vec; - have_not = TCG_TARGET_HAS_not_vec; - } else if (def->flags & TCG_OPF_64BIT) { - not_op = INDEX_op_not_i64; - have_not = TCG_TARGET_HAS_not_i64; - } else { - not_op = INDEX_op_not_i32; - have_not = TCG_TARGET_HAS_not_i32; - } - if (!have_not) { - break; - } - op->opc = not_op; - reset_temp(op->args[0]); - op->args[1] = op->args[i]; - continue; - } - default: - break; - } - - /* Simplify expression for "op r, a, const => mov r, a" cases */ - switch (opc) { - CASE_OP_32_64_VEC(add): - CASE_OP_32_64_VEC(sub): - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64_VEC(andc): - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(orc): - CASE_OP_32_64(eqv): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == -1) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - default: - break; - } - - /* Simplify using known-zero bits. Currently only ops with a single - output argument is supported. */ - mask = -1; - affected = -1; - switch (opc) { - CASE_OP_32_64(ext8s): - if ((arg_info(op->args[1])->mask & 0x80) != 0) { - break; - } - QEMU_FALLTHROUGH; - CASE_OP_32_64(ext8u): - mask = 0xff; - goto and_const; - CASE_OP_32_64(ext16s): - if ((arg_info(op->args[1])->mask & 0x8000) != 0) { - break; - } - QEMU_FALLTHROUGH; - CASE_OP_32_64(ext16u): - mask = 0xffff; - goto and_const; - case INDEX_op_ext32s_i64: - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { - break; - } - QEMU_FALLTHROUGH; - case INDEX_op_ext32u_i64: - mask = 0xffffffffU; - goto and_const; - - CASE_OP_32_64(and): - mask = arg_info(op->args[2])->mask; - if (arg_is_const(op->args[2])) { - and_const: - affected = arg_info(op->args[1])->mask & ~mask; - } - mask = arg_info(op->args[1])->mask & mask; - break; - - case INDEX_op_ext_i32_i64: - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { - break; - } - QEMU_FALLTHROUGH; - case INDEX_op_extu_i32_i64: - /* We do not compute affected as it is a size changing op. */ - mask = (uint32_t)arg_info(op->args[1])->mask; - break; - - CASE_OP_32_64(andc): - /* Known-zeros does not imply known-ones. Therefore unless - op->args[2] is constant, we can't infer anything from it. */ - if (arg_is_const(op->args[2])) { - mask = ~arg_info(op->args[2])->mask; - goto and_const; - } - /* But we certainly know nothing outside args[1] may be set. */ - mask = arg_info(op->args[1])->mask; - break; - - case INDEX_op_sar_i32: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 31; - mask = (int32_t)arg_info(op->args[1])->mask >> tmp; - } - break; - case INDEX_op_sar_i64: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 63; - mask = (int64_t)arg_info(op->args[1])->mask >> tmp; - } - break; - - case INDEX_op_shr_i32: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 31; - mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; - } - break; - case INDEX_op_shr_i64: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 63; - mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; - } - break; - - case INDEX_op_extrl_i64_i32: - mask = (uint32_t)arg_info(op->args[1])->mask; - break; - case INDEX_op_extrh_i64_i32: - mask = (uint64_t)arg_info(op->args[1])->mask >> 32; - break; - - CASE_OP_32_64(shl): - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); - mask = arg_info(op->args[1])->mask << tmp; - } - break; - - CASE_OP_32_64(neg): - /* Set to 1 all bits to the left of the rightmost. */ - mask = -(arg_info(op->args[1])->mask - & -arg_info(op->args[1])->mask); - break; - - CASE_OP_32_64(deposit): - mask = deposit64(arg_info(op->args[1])->mask, - op->args[3], op->args[4], - arg_info(op->args[2])->mask); - break; - - CASE_OP_32_64(extract): - mask = extract64(arg_info(op->args[1])->mask, - op->args[2], op->args[3]); - if (op->args[2] == 0) { - affected = arg_info(op->args[1])->mask & ~mask; - } - break; - CASE_OP_32_64(sextract): - mask = sextract64(arg_info(op->args[1])->mask, - op->args[2], op->args[3]); - if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { - affected = arg_info(op->args[1])->mask & ~mask; - } - break; - - CASE_OP_32_64(or): - CASE_OP_32_64(xor): - mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; - break; - - case INDEX_op_clz_i32: - case INDEX_op_ctz_i32: - mask = arg_info(op->args[2])->mask | 31; - break; - - case INDEX_op_clz_i64: - case INDEX_op_ctz_i64: - mask = arg_info(op->args[2])->mask | 63; - break; - - case INDEX_op_ctpop_i32: - mask = 32 | 31; - break; - case INDEX_op_ctpop_i64: - mask = 64 | 63; - break; - - CASE_OP_32_64(setcond): - case INDEX_op_setcond2_i32: - mask = 1; - break; - - CASE_OP_32_64(movcond): - mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; - break; - - CASE_OP_32_64(ld8u): - mask = 0xff; - break; - CASE_OP_32_64(ld16u): - mask = 0xffff; - break; - case INDEX_op_ld32u_i64: - mask = 0xffffffffu; - break; - - CASE_OP_32_64(qemu_ld): - { - TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; - MemOp mop = get_memop(oi); - if (!(mop & MO_SIGN)) { - mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; - } - } - break; - - CASE_OP_32_64(bswap16): - mask = arg_info(op->args[1])->mask; - if (mask <= 0xffff) { - op->args[2] |= TCG_BSWAP_IZ; - } - mask = bswap16(mask); - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { - case TCG_BSWAP_OZ: - break; - case TCG_BSWAP_OS: - mask = (int16_t)mask; - break; - default: /* undefined high bits */ - mask |= MAKE_64BIT_MASK(16, 48); - break; - } - break; - - case INDEX_op_bswap32_i64: - mask = arg_info(op->args[1])->mask; - if (mask <= 0xffffffffu) { - op->args[2] |= TCG_BSWAP_IZ; - } - mask = bswap32(mask); - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { - case TCG_BSWAP_OZ: - break; - case TCG_BSWAP_OS: - mask = (int32_t)mask; - break; - default: /* undefined high bits */ - mask |= MAKE_64BIT_MASK(32, 32); - break; - } - break; - - default: - break; - } - - /* 32-bit ops generate 32-bit results. For the result is zero test - below, we can ignore high bits, but for further optimizations we - need to record that the high bits contain garbage. */ - partmask = mask; - if (!(def->flags & TCG_OPF_64BIT)) { - mask |= ~(tcg_target_ulong)0xffffffffu; - partmask &= 0xffffffffu; - affected &= 0xffffffffu; - } - - if (partmask == 0) { - tcg_debug_assert(nb_oargs == 1); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - if (affected == 0) { - tcg_debug_assert(nb_oargs == 1); - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - - /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ - switch (opc) { - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(mul): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - if (arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - default: - break; - } - - /* Simplify expression for "op r, a, a => mov r, a" cases */ - switch (opc) { - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(and): - if (args_are_copies(op->args[1], op->args[2])) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - default: - break; - } - - /* Simplify expression for "op r, a, a => movi r, 0" cases */ - switch (opc) { - CASE_OP_32_64_VEC(andc): - CASE_OP_32_64_VEC(sub): - CASE_OP_32_64_VEC(xor): - if (args_are_copies(op->args[1], op->args[2])) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - default: - break; - } - - /* Propagate constants through copy operations and do constant - folding. Constants will be substituted to arguments by register - allocator where needed and possible. Also detect copies. */ - switch (opc) { - CASE_OP_32_64_VEC(mov): - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - break; - - case INDEX_op_dup_vec: - if (arg_is_const(op->args[1])) { - tmp = arg_info(op->args[1])->val; - tmp = dup_const(TCGOP_VECE(op), tmp); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - case INDEX_op_dup2_vec: - assert(TCG_TARGET_REG_BITS == 32); - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], - deposit64(arg_info(op->args[1])->val, 32, 32, - arg_info(op->args[2])->val)); - break; - } else if (args_are_copies(op->args[1], op->args[2])) { - op->opc = INDEX_op_dup_vec; - TCGOP_VECE(op) = MO_32; - nb_iargs = 1; - } - goto do_default; - - CASE_OP_32_64(not): - CASE_OP_32_64(neg): - CASE_OP_32_64(ext8s): - CASE_OP_32_64(ext8u): - CASE_OP_32_64(ext16s): - CASE_OP_32_64(ext16u): - CASE_OP_32_64(ctpop): - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - if (arg_is_const(op->args[1])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - CASE_OP_32_64(bswap16): CASE_OP_32_64(bswap32): case INDEX_op_bswap64_i64: - if (arg_is_const(op->args[1])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, - op->args[2]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(add): - CASE_OP_32_64(sub): - CASE_OP_32_64(mul): - CASE_OP_32_64(or): - CASE_OP_32_64(and): - CASE_OP_32_64(xor): - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - CASE_OP_32_64(andc): - CASE_OP_32_64(orc): - CASE_OP_32_64(eqv): - CASE_OP_32_64(nand): - CASE_OP_32_64(nor): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - CASE_OP_32_64(div): - CASE_OP_32_64(divu): - CASE_OP_32_64(rem): - CASE_OP_32_64(remu): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, - arg_info(op->args[2])->val); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_bswap(&ctx, op); + break; CASE_OP_32_64(clz): CASE_OP_32_64(ctz): - if (arg_is_const(op->args[1])) { - TCGArg v = arg_info(op->args[1])->val; - if (v != 0) { - tmp = do_constant_folding(opc, v, 0); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - } else { - tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); - } - break; - } - goto do_default; - + done = fold_count_zeros(&ctx, op); + break; + CASE_OP_32_64(ctpop): + done = fold_ctpop(&ctx, op); + break; CASE_OP_32_64(deposit): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tmp = deposit64(arg_info(op->args[1])->val, - op->args[3], op->args[4], - arg_info(op->args[2])->val); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_deposit(&ctx, op); + break; + CASE_OP_32_64(div): + CASE_OP_32_64(divu): + done = fold_divide(&ctx, op); + break; + case INDEX_op_dup_vec: + done = fold_dup(&ctx, op); + break; + case INDEX_op_dup2_vec: + done = fold_dup2(&ctx, op); + break; + CASE_OP_32_64_VEC(eqv): + done = fold_eqv(&ctx, op); + break; CASE_OP_32_64(extract): - if (arg_is_const(op->args[1])) { - tmp = extract64(arg_info(op->args[1])->val, - op->args[2], op->args[3]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(sextract): - if (arg_is_const(op->args[1])) { - tmp = sextract64(arg_info(op->args[1])->val, - op->args[2], op->args[3]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_extract(&ctx, op); + break; CASE_OP_32_64(extract2): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t v1 = arg_info(op->args[1])->val; - uint64_t v2 = arg_info(op->args[2])->val; - int shr = op->args[3]; - - if (opc == INDEX_op_extract2_i64) { - tmp = (v1 >> shr) | (v2 << (64 - shr)); - } else { - tmp = (int32_t)(((uint32_t)v1 >> shr) | - ((uint32_t)v2 << (32 - shr))); - } - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(setcond): - tmp = do_constant_folding_cond(opc, op->args[1], - op->args[2], op->args[3]); - if (tmp != 2) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(brcond): - tmp = do_constant_folding_cond(opc, op->args[0], - op->args[1], op->args[2]); - if (tmp != 2) { - if (tmp) { - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_br; - op->args[0] = op->args[3]; - } else { - tcg_op_remove(s, op); - } - break; - } - goto do_default; - + done = fold_extract2(&ctx, op); + break; + CASE_OP_32_64(ext8s): + CASE_OP_32_64(ext16s): + case INDEX_op_ext32s_i64: + case INDEX_op_ext_i32_i64: + done = fold_exts(&ctx, op); + break; + CASE_OP_32_64(ext8u): + CASE_OP_32_64(ext16u): + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + done = fold_extu(&ctx, op); + break; + CASE_OP_32_64(ld8s): + CASE_OP_32_64(ld8u): + CASE_OP_32_64(ld16s): + CASE_OP_32_64(ld16u): + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + done = fold_tcg_ld(&ctx, op); + break; + case INDEX_op_mb: + done = fold_mb(&ctx, op); + break; + CASE_OP_32_64_VEC(mov): + done = fold_mov(&ctx, op); + break; CASE_OP_32_64(movcond): - tmp = do_constant_folding_cond(opc, op->args[1], - op->args[2], op->args[5]); - if (tmp != 2) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); - break; - } - if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { - uint64_t tv = arg_info(op->args[3])->val; - uint64_t fv = arg_info(op->args[4])->val; - TCGCond cond = op->args[5]; - - if (fv == 1 && tv == 0) { - cond = tcg_invert_cond(cond); - } else if (!(tv == 1 && fv == 0)) { - goto do_default; - } - op->args[3] = cond; - op->opc = opc = (opc == INDEX_op_movcond_i32 - ? INDEX_op_setcond_i32 - : INDEX_op_setcond_i64); - nb_iargs = 2; - } - goto do_default; - - case INDEX_op_add2_i32: - case INDEX_op_sub2_i32: - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) - && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { - uint32_t al = arg_info(op->args[2])->val; - uint32_t ah = arg_info(op->args[3])->val; - uint32_t bl = arg_info(op->args[4])->val; - uint32_t bh = arg_info(op->args[5])->val; - uint64_t a = ((uint64_t)ah << 32) | al; - uint64_t b = ((uint64_t)bh << 32) | bl; - TCGArg rl, rh; - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); - - if (opc == INDEX_op_add2_i32) { - a += b; - } else { - a -= b; - } - - rl = op->args[0]; - rh = op->args[1]; - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a); - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32)); - break; - } - goto do_default; - - case INDEX_op_mulu2_i32: - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { - uint32_t a = arg_info(op->args[2])->val; - uint32_t b = arg_info(op->args[3])->val; - uint64_t r = (uint64_t)a * b; - TCGArg rl, rh; - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); - - rl = op->args[0]; - rh = op->args[1]; - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r); - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32)); - break; - } - goto do_default; - - case INDEX_op_brcond2_i32: - tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], - op->args[4]); - if (tmp != 2) { - if (tmp) { - do_brcond_true: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_br; - op->args[0] = op->args[5]; - } else { - do_brcond_false: - tcg_op_remove(s, op); - } - } else if ((op->args[4] == TCG_COND_LT - || op->args[4] == TCG_COND_GE) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0 - && arg_is_const(op->args[3]) - && arg_info(op->args[3])->val == 0) { - /* Simplify LT/GE comparisons vs zero to a single compare - vs the high word of the input. */ - do_brcond_high: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_brcond_i32; - op->args[0] = op->args[1]; - op->args[1] = op->args[3]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[4] == TCG_COND_EQ) { - /* Simplify EQ comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[0], op->args[2], - TCG_COND_EQ); - if (tmp == 0) { - goto do_brcond_false; - } else if (tmp == 1) { - goto do_brcond_high; - } - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[1], op->args[3], - TCG_COND_EQ); - if (tmp == 0) { - goto do_brcond_false; - } else if (tmp != 1) { - goto do_default; - } - do_brcond_low: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_brcond_i32; - op->args[1] = op->args[2]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[4] == TCG_COND_NE) { - /* Simplify NE comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[0], op->args[2], - TCG_COND_NE); - if (tmp == 0) { - goto do_brcond_high; - } else if (tmp == 1) { - goto do_brcond_true; - } - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[1], op->args[3], - TCG_COND_NE); - if (tmp == 0) { - goto do_brcond_low; - } else if (tmp == 1) { - goto do_brcond_true; - } - goto do_default; - } else { - goto do_default; - } + done = fold_movcond(&ctx, op); + break; + CASE_OP_32_64(mul): + done = fold_mul(&ctx, op); + break; + CASE_OP_32_64(mulsh): + CASE_OP_32_64(muluh): + done = fold_mul_highpart(&ctx, op); + break; + CASE_OP_32_64(muls2): + CASE_OP_32_64(mulu2): + done = fold_multiply2(&ctx, op); + break; + CASE_OP_32_64_VEC(nand): + done = fold_nand(&ctx, op); + break; + CASE_OP_32_64(neg): + done = fold_neg(&ctx, op); + break; + CASE_OP_32_64_VEC(nor): + done = fold_nor(&ctx, op); + break; + CASE_OP_32_64_VEC(not): + done = fold_not(&ctx, op); + break; + CASE_OP_32_64_VEC(or): + done = fold_or(&ctx, op); + break; + CASE_OP_32_64_VEC(orc): + done = fold_orc(&ctx, op); + break; + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + done = fold_qemu_ld(&ctx, op); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st8_i32: + case INDEX_op_qemu_st_i64: + done = fold_qemu_st(&ctx, op); + break; + CASE_OP_32_64(rem): + CASE_OP_32_64(remu): + done = fold_remainder(&ctx, op); + break; + CASE_OP_32_64(rotl): + CASE_OP_32_64(rotr): + CASE_OP_32_64(sar): + CASE_OP_32_64(shl): + CASE_OP_32_64(shr): + done = fold_shift(&ctx, op); + break; + CASE_OP_32_64(setcond): + done = fold_setcond(&ctx, op); break; - case INDEX_op_setcond2_i32: - tmp = do_constant_folding_cond2(&op->args[1], &op->args[3], - op->args[5]); - if (tmp != 2) { - do_setcond_const: - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - } else if ((op->args[5] == TCG_COND_LT - || op->args[5] == TCG_COND_GE) - && arg_is_const(op->args[3]) - && arg_info(op->args[3])->val == 0 - && arg_is_const(op->args[4]) - && arg_info(op->args[4])->val == 0) { - /* Simplify LT/GE comparisons vs zero to a single compare - vs the high word of the input. */ - do_setcond_high: - reset_temp(op->args[0]); - arg_info(op->args[0])->mask = 1; - op->opc = INDEX_op_setcond_i32; - op->args[1] = op->args[2]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[5] == TCG_COND_EQ) { - /* Simplify EQ comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[1], op->args[3], - TCG_COND_EQ); - if (tmp == 0) { - goto do_setcond_const; - } else if (tmp == 1) { - goto do_setcond_high; - } - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[2], op->args[4], - TCG_COND_EQ); - if (tmp == 0) { - goto do_setcond_high; - } else if (tmp != 1) { - goto do_default; - } - do_setcond_low: - reset_temp(op->args[0]); - arg_info(op->args[0])->mask = 1; - op->opc = INDEX_op_setcond_i32; - op->args[2] = op->args[3]; - op->args[3] = op->args[5]; - } else if (op->args[5] == TCG_COND_NE) { - /* Simplify NE comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[1], op->args[3], - TCG_COND_NE); - if (tmp == 0) { - goto do_setcond_high; - } else if (tmp == 1) { - goto do_setcond_const; - } - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[2], op->args[4], - TCG_COND_NE); - if (tmp == 0) { - goto do_setcond_low; - } else if (tmp == 1) { - goto do_setcond_const; - } - goto do_default; - } else { - goto do_default; - } + done = fold_setcond2(&ctx, op); + break; + CASE_OP_32_64(sextract): + done = fold_sextract(&ctx, op); + break; + CASE_OP_32_64(sub): + done = fold_sub(&ctx, op); + break; + case INDEX_op_sub_vec: + done = fold_sub_vec(&ctx, op); + break; + CASE_OP_32_64(sub2): + done = fold_sub2(&ctx, op); + break; + CASE_OP_32_64_VEC(xor): + done = fold_xor(&ctx, op); break; - - case INDEX_op_call: - if (!(tcg_call_flags(op) - & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { - for (i = 0; i < nb_globals; i++) { - if (test_bit(i, temps_used.l)) { - reset_ts(&s->temps[i]); - } - } - } - goto do_reset_output; - default: - do_default: - /* Default case: we know nothing about operation (or were unable - to compute the operation result) so no propagation is done. - We trash everything if the operation is the end of a basic - block, otherwise we only trash the output args. "mask" is - the non-zero bits mask for the first output arg. */ - if (def->flags & TCG_OPF_BB_END) { - memset(&temps_used, 0, sizeof(temps_used)); - } else { - do_reset_output: - for (i = 0; i < nb_oargs; i++) { - reset_temp(op->args[i]); - /* Save the corresponding known-zero bits mask for the - first output argument (only one supported so far). */ - if (i == 0) { - arg_info(op->args[i])->mask = mask; - } - } - } break; } - /* Eliminate duplicate and redundant fence instructions. */ - if (prev_mb) { - switch (opc) { - case INDEX_op_mb: - /* Merge two barriers of the same type into one, - * or a weaker barrier into a stronger one, - * or two weaker barriers into a stronger one. - * mb X; mb Y => mb X|Y - * mb; strl => mb; st - * ldaq; mb => ld; mb - * ldaq; strl => ld; mb; st - * Other combinations are also merged into a strong - * barrier. This is stricter than specified but for - * the purposes of TCG is better than not optimizing. - */ - prev_mb->args[0] |= op->args[0]; - tcg_op_remove(s, op); - break; - - default: - /* Opcodes that end the block stop the optimization. */ - if ((def->flags & TCG_OPF_BB_END) == 0) { - break; - } - /* fallthru */ - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_st_i64: - case INDEX_op_call: - /* Opcodes that touch guest memory stop the optimization. */ - prev_mb = NULL; - break; - } - } else if (opc == INDEX_op_mb) { - prev_mb = op; + if (!done) { + finish_folding(&ctx, op); } } } diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index e0f4665213..e3dba47697 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -24,10 +24,26 @@ #include "elf.h" #include "../tcg-pool.c.inc" +#include "../tcg-ldst.c.inc" + +/* + * Standardize on the _CALL_FOO symbols used by GCC: + * Apple XCode does not define _CALL_DARWIN. + * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit). + */ +#if !defined(_CALL_SYSV) && \ + !defined(_CALL_DARWIN) && \ + !defined(_CALL_AIX) && \ + !defined(_CALL_ELF) +# if defined(__APPLE__) +# define _CALL_DARWIN +# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32 +# define _CALL_SYSV +# else +# error "Unknown ABI" +# endif +#endif -#if defined _CALL_DARWIN || defined __APPLE__ -#define TCG_TARGET_CALL_DARWIN -#endif #ifdef _CALL_SYSV # define TCG_TARGET_CALL_ALIGN_ARGS 1 #endif @@ -169,7 +185,7 @@ static const int tcg_target_call_oarg_regs[] = { }; static const int tcg_target_callee_save_regs[] = { -#ifdef TCG_TARGET_CALL_DARWIN +#ifdef _CALL_DARWIN TCG_REG_R11, #endif TCG_REG_R14, @@ -355,6 +371,8 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define MULHWU XO31( 11) #define DIVW XO31(491) #define DIVWU XO31(459) +#define MODSW XO31(779) +#define MODUW XO31(267) #define CMP XO31( 0) #define CMPL XO31( 32) #define LHBRX XO31(790) @@ -387,6 +405,8 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define MULHDU XO31( 9) #define DIVD XO31(489) #define DIVDU XO31(457) +#define MODSD XO31(777) +#define MODUD XO31(265) #define LBZX XO31( 87) #define LHZX XO31(279) @@ -1816,57 +1836,116 @@ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, static void tcg_out_mb(TCGContext *s, TCGArg a0) { - uint32_t insn = HWSYNC; - a0 &= TCG_MO_ALL; - if (a0 == TCG_MO_LD_LD) { + uint32_t insn; + + if (a0 & TCG_MO_ST_LD) { + insn = HWSYNC; + } else { insn = LWSYNC; - } else if (a0 == TCG_MO_ST_ST) { - insn = EIEIO; } + tcg_out32(s, insn); } +static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2) +{ + if (HOST_BIG_ENDIAN) { + return (uint64_t)i1 << 32 | i2; + } + return (uint64_t)i2 << 32 | i1; +} + +static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw, + tcg_insn_unit i0, tcg_insn_unit i1) +{ +#if TCG_TARGET_REG_BITS == 64 + qatomic_set((uint64_t *)rw, make_pair(i0, i1)); + flush_idcache_range(rx, rw, 8); +#else + qemu_build_not_reached(); +#endif +} + +static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw, + tcg_insn_unit i0, tcg_insn_unit i1, + tcg_insn_unit i2, tcg_insn_unit i3) +{ + uint64_t p[2]; + + p[!HOST_BIG_ENDIAN] = make_pair(i0, i1); + p[HOST_BIG_ENDIAN] = make_pair(i2, i3); + + /* + * There's no convenient way to get the compiler to allocate a pair + * of registers at an even index, so copy into r6/r7 and clobber. + */ + asm("mr %%r6, %1\n\t" + "mr %%r7, %2\n\t" + "stq %%r6, %0" + : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7"); + flush_idcache_range(rx, rw, 16); +} + void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr) { - if (TCG_TARGET_REG_BITS == 64) { - tcg_insn_unit i1, i2; - intptr_t tb_diff = addr - tc_ptr; - intptr_t br_diff = addr - (jmp_rx + 4); - uint64_t pair; + tcg_insn_unit i0, i1, i2, i3; + intptr_t tb_diff = addr - tc_ptr; + intptr_t br_diff = addr - (jmp_rx + 4); + intptr_t lo, hi; - /* This does not exercise the range of the branch, but we do - still need to be able to load the new value of TCG_REG_TB. - But this does still happen quite often. */ - if (tb_diff == (int16_t)tb_diff) { - i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); - i2 = B | (br_diff & 0x3fffffc); - } else { - intptr_t lo = (int16_t)tb_diff; - intptr_t hi = (int32_t)(tb_diff - lo); - assert(tb_diff == hi + lo); - i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); - i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); - } -#ifdef HOST_WORDS_BIGENDIAN - pair = (uint64_t)i1 << 32 | i2; -#else - pair = (uint64_t)i2 << 32 | i1; -#endif - - /* As per the enclosing if, this is ppc64. Avoid the _Static_assert - within qatomic_set that would fail to build a ppc32 host. */ - qatomic_set__nocheck((uint64_t *)jmp_rw, pair); - flush_idcache_range(jmp_rx, jmp_rw, 8); - } else { + if (TCG_TARGET_REG_BITS == 32) { intptr_t diff = addr - jmp_rx; tcg_debug_assert(in_range_b(diff)); qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc)); flush_idcache_range(jmp_rx, jmp_rw, 4); + return; } + + /* + * For 16-bit displacements, we can use a single add + branch. + * This happens quite often. + */ + if (tb_diff == (int16_t)tb_diff) { + i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); + i1 = B | (br_diff & 0x3fffffc); + ppc64_replace2(jmp_rx, jmp_rw, i0, i1); + return; + } + + lo = (int16_t)tb_diff; + hi = (int32_t)(tb_diff - lo); + assert(tb_diff == hi + lo); + i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); + i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); + + /* + * Without stq from 2.07, we can only update two insns, + * and those must be the ones that load the target address. + */ + if (!have_isa_2_07) { + ppc64_replace2(jmp_rx, jmp_rw, i0, i1); + return; + } + + /* + * For 26-bit displacements, we can use a direct branch. + * Otherwise we still need the indirect branch, which we + * must restore after a potential direct branch write. + */ + br_diff -= 4; + if (in_range_b(br_diff)) { + i2 = B | (br_diff & 0x3fffffc); + i3 = NOP; + } else { + i2 = MTSPR | RS(TCG_REG_TB) | CTR; + i3 = BCCTR | BO_ALWAYS; + } + ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3); } -static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) +static void tcg_out_call_int(TCGContext *s, int lk, + const tcg_insn_unit *target) { #ifdef _CALL_AIX /* Look through the descriptor. If the branch is in range, and we @@ -1877,7 +1956,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) if (in_range_b(diff) && toc == (uint32_t)toc) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); - tcg_out_b(s, LK, tgt); + tcg_out_b(s, lk, tgt); } else { /* Fold the low bits of the constant into the addresses below. */ intptr_t arg = (intptr_t)target; @@ -1892,7 +1971,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); - tcg_out32(s, BCCTR | BO_ALWAYS | LK); + tcg_out32(s, BCCTR | BO_ALWAYS | lk); } #elif defined(_CALL_ELF) && _CALL_ELF == 2 intptr_t diff; @@ -1906,38 +1985,43 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) diff = tcg_pcrel_diff(s, target); if (in_range_b(diff)) { - tcg_out_b(s, LK, target); + tcg_out_b(s, lk, target); } else { tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); - tcg_out32(s, BCCTR | BO_ALWAYS | LK); + tcg_out32(s, BCCTR | BO_ALWAYS | lk); } #else - tcg_out_b(s, LK, target); + tcg_out_b(s, lk, target); #endif } -static const uint32_t qemu_ldx_opc[16] = { +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) +{ + tcg_out_call_int(s, LK, target); +} + +static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = { [MO_UB] = LBZX, [MO_UW] = LHZX, [MO_UL] = LWZX, - [MO_Q] = LDX, + [MO_UQ] = LDX, [MO_SW] = LHAX, [MO_SL] = LWAX, [MO_BSWAP | MO_UB] = LBZX, [MO_BSWAP | MO_UW] = LHBRX, [MO_BSWAP | MO_UL] = LWBRX, - [MO_BSWAP | MO_Q] = LDBRX, + [MO_BSWAP | MO_UQ] = LDBRX, }; -static const uint32_t qemu_stx_opc[16] = { +static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { [MO_UB] = STBX, [MO_UW] = STHX, [MO_UL] = STWX, - [MO_Q] = STDX, + [MO_UQ] = STDX, [MO_BSWAP | MO_UB] = STBX, [MO_BSWAP | MO_UW] = STHBRX, [MO_BSWAP | MO_UL] = STWBRX, - [MO_BSWAP | MO_Q] = STDBRX, + [MO_BSWAP | MO_UQ] = STDBRX, }; static const uint32_t qemu_exts_opc[4] = { @@ -1945,32 +2029,30 @@ static const uint32_t qemu_exts_opc[4] = { }; #if defined (CONFIG_SOFTMMU) -#include "../tcg-ldst.c.inc" - /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, + [MO_LEUQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, + [MO_BEUQ] = helper_be_ldq_mmu, }; /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, + [MO_LEUQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, + [MO_BEUQ] = helper_be_stq_mmu, }; /* We expect to use a 16-bit negative offset from ENV. */ @@ -2088,7 +2170,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg datalo_reg, TCGReg datahi_reg, TCGReg addrlo_reg, TCGReg addrhi_reg, tcg_insn_unit *raddr, tcg_insn_unit *lptr) @@ -2107,7 +2189,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; @@ -2154,7 +2236,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; @@ -2212,17 +2294,84 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_b(s, 0, lb->raddr); return true; } +#else + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, + TCGReg addrhi, unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ + tcg_debug_assert(a_bits < 16); + tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask)); + + label->label_ptr[0] = s->code_ptr; + tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); + + label->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + TCGReg arg = TCG_REG_R4; +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + if (l->addrlo_reg != arg) { + tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); + } else if (l->addrhi_reg != arg + 1) { + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); + } else { + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg); + tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1); + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0); + } + } else { + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg); + } + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0); + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st)); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + #endif /* SOFTMMU */ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; +#else + unsigned a_bits; #endif datalo = *args++; @@ -2243,6 +2392,10 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); + } rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); @@ -2293,11 +2446,13 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; +#else + unsigned a_bits; #endif datalo = *args++; @@ -2318,6 +2473,10 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); + } rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); @@ -2372,7 +2531,7 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) # define LINK_AREA_SIZE (6 * SZR) # define LR_OFFSET (1 * SZR) # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR) -#elif defined(TCG_TARGET_CALL_DARWIN) +#elif defined(_CALL_DARWIN) # define LINK_AREA_SIZE (6 * SZR) # define LR_OFFSET (2 * SZR) #elif TCG_TARGET_REG_BITS == 64 @@ -2472,8 +2631,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if (s->tb_jmp_insn_offset) { /* Direct jump. */ if (TCG_TARGET_REG_BITS == 64) { - /* Ensure the next insns are 8-byte aligned. */ - if ((uintptr_t)s->code_ptr & 7) { + /* Ensure the next insns are 8 or 16-byte aligned. */ + while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) { tcg_out32(s, NOP); } s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); @@ -2708,6 +2867,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); break; + case INDEX_op_rem_i32: + tcg_out32(s, MODSW | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_remu_i32: + tcg_out32(s, MODUW | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_shl_i32: if (const_args[2]) { /* Limit immediate shift count lest we create an illegal insn. */ @@ -2849,6 +3016,12 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_divu_i64: tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); break; + case INDEX_op_rem_i64: + tcg_out32(s, MODSD | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_remu_i64: + tcg_out32(s, MODUD | TAB(args[0], args[1], args[2])); + break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, false); @@ -3025,6 +3198,9 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_not_vec: + case INDEX_op_nor_vec: + case INDEX_op_eqv_vec: + case INDEX_op_nand_vec: return 1; case INDEX_op_orc_vec: return have_isa_2_07; @@ -3135,7 +3311,7 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out_mem_long(s, 0, LVEBX, out, base, offset); } elt = extract32(offset, 0, 4); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN elt ^= 15; #endif tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16)); @@ -3148,7 +3324,7 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out_mem_long(s, 0, LVEHX, out, base, offset); } elt = extract32(offset, 1, 3); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN elt ^= 7; #endif tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); @@ -3161,7 +3337,7 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_debug_assert((offset & 3) == 0); tcg_out_mem_long(s, 0, LVEWX, out, base, offset); elt = extract32(offset, 2, 2); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN elt ^= 3; #endif tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); @@ -3175,7 +3351,7 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); elt = extract32(offset, 3, 1); -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN elt = !elt; #endif if (elt) { @@ -3303,6 +3479,15 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_orc_vec: insn = VORC; break; + case INDEX_op_nand_vec: + insn = VNAND; + break; + case INDEX_op_nor_vec: + insn = VNOR; + break; + case INDEX_op_eqv_vec: + insn = VEQV; + break; case INDEX_op_cmp_vec: switch (args[3]) { @@ -3612,6 +3797,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_div_i32: case INDEX_op_divu_i32: + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: case INDEX_op_nand_i32: case INDEX_op_nor_i32: case INDEX_op_muluh_i32: @@ -3622,6 +3809,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_nor_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: return C_O1_I2(r, r, r); @@ -3690,6 +3879,9 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_orc_vec: + case INDEX_op_nor_vec: + case INDEX_op_eqv_vec: + case INDEX_op_nand_vec: case INDEX_op_cmp_vec: case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: @@ -3890,3 +4082,10 @@ void tcg_register_jit(const void *buf, size_t buf_size) tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); } #endif /* __ELF__ */ +#undef VMULEUB +#undef VMULEUH +#undef VMULEUW +#undef VMULOUB +#undef VMULOUH +#undef VMULOUW +#undef VMSUMUHM diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 0943192cde..b5cd225cfa 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -83,7 +83,7 @@ extern bool have_vsx; /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 0 +#define TCG_TARGET_HAS_rem_i32 have_isa_3_00 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 @@ -117,7 +117,7 @@ extern bool have_vsx; #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_rem_i64 have_isa_3_00 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 @@ -162,6 +162,9 @@ extern bool have_vsx; #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec have_isa_2_07 +#define TCG_TARGET_HAS_nand_vec have_isa_2_07 +#define TCG_TARGET_HAS_nor_vec 1 +#define TCG_TARGET_HAS_eqv_vec have_isa_2_07 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec have_isa_3_00 #define TCG_TARGET_HAS_abs_vec 0 @@ -182,9 +185,7 @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/region.c b/tcg/region.c index e64c3ea230..88d6bb273f 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -24,6 +24,10 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/madvise.h" +#include "qemu/mprotect.h" +#include "qemu/memalign.h" +#include "qemu/cacheinfo.h" #include "qapi/error.h" #include "exec/exec-all.h" #include "tcg/tcg.h" @@ -467,38 +471,6 @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) -#ifdef __mips__ -/* - * In order to use J and JAL within the code_gen_buffer, we require - * that the buffer not cross a 256MB boundary. - */ -static inline bool cross_256mb(void *addr, size_t size) -{ - return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; -} - -/* - * We weren't able to allocate a buffer without crossing that boundary, - * so make do with the larger portion of the buffer that doesn't cross. - * Returns the new base and size of the buffer in *obuf and *osize. - */ -static inline void split_cross_256mb(void **obuf, size_t *osize, - void *buf1, size_t size1) -{ - void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); - size_t size2 = buf1 + size1 - buf2; - - size1 = buf2 - buf1; - if (size1 < size2) { - size1 = size2; - buf1 = buf2; - } - - *obuf = buf1; - *osize = size1; -} -#endif - #ifdef USE_STATIC_CODE_GEN_BUFFER static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] __attribute__((aligned(CODE_GEN_ALIGN))); @@ -516,22 +488,16 @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) /* page-align the beginning and end of the buffer */ buf = static_code_gen_buffer; end = static_code_gen_buffer + sizeof(static_code_gen_buffer); - buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); - end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); + buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size()); + end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size()); size = end - buf; /* Honor a command-line option limiting the size of the buffer. */ if (size > tb_size) { - size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); + size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size()); } -#ifdef __mips__ - if (cross_256mb(buf, size)) { - split_cross_256mb(&buf, &size, buf, size); - } -#endif - region.start_aligned = buf; region.total_size = size; @@ -573,39 +539,6 @@ static int alloc_code_gen_buffer_anon(size_t size, int prot, return -1; } -#ifdef __mips__ - if (cross_256mb(buf, size)) { - /* - * Try again, with the original still mapped, to avoid re-acquiring - * the same 256mb crossing. - */ - size_t size2; - void *buf2 = mmap(NULL, size, prot, flags, -1, 0); - switch ((int)(buf2 != MAP_FAILED)) { - case 1: - if (!cross_256mb(buf2, size)) { - /* Success! Use the new buffer. */ - munmap(buf, size); - break; - } - /* Failure. Work with what we had. */ - munmap(buf2, size); - /* fallthru */ - default: - /* Split the original buffer. Free the smaller half. */ - split_cross_256mb(&buf2, &size2, buf, size); - if (buf == buf2) { - munmap(buf + size2, size - size2); - } else { - munmap(buf, size - size2); - } - size = size2; - break; - } - buf = buf2; - } -#endif - region.start_aligned = buf; region.total_size = size; return prot; @@ -615,40 +548,20 @@ static int alloc_code_gen_buffer_anon(size_t size, int prot, #ifdef CONFIG_POSIX #include "qemu/memfd.h" -static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) +static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) { void *buf_rw = NULL, *buf_rx = MAP_FAILED; int fd = -1; -#ifdef __mips__ - /* Find space for the RX mapping, vs the 256MiB regions. */ - if (alloc_code_gen_buffer_anon(size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | - MAP_NORESERVE, errp) < 0) { - return false; - } - /* The size of the mapping may have been adjusted. */ - buf_rx = region.start_aligned; - size = region.total_size; -#endif - buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); if (buf_rw == NULL) { goto fail; } -#ifdef __mips__ - void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, - MAP_SHARED | MAP_FIXED, fd, 0); - if (tmp != buf_rx) { - goto fail_rx; - } -#else buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); if (buf_rx == MAP_FAILED) { goto fail_rx; } -#endif close(fd); region.start_aligned = buf_rw; @@ -816,7 +729,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) */ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) { - const size_t page_size = qemu_real_host_page_size; + const size_t page_size = qemu_real_host_page_size(); size_t region_size; int have_prot, need_prot; diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index c16f96b401..81a83e45b1 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -27,6 +27,7 @@ * THE SOFTWARE. */ +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #ifdef CONFIG_DEBUG_TCG @@ -847,22 +848,20 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) */ #if defined(CONFIG_SOFTMMU) -#include "../tcg-ldst.c.inc" - /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * TCGMemOpIdx oi, uintptr_t ra) + * MemOpIdx oi, uintptr_t ra) */ -static void * const qemu_ld_helpers[8] = { +static void * const qemu_ld_helpers[MO_SSIZE + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_UW] = helper_be_lduw_mmu, [MO_SW] = helper_be_ldsw_mmu, [MO_UL] = helper_be_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_SL] = helper_be_ldsl_mmu, #endif - [MO_Q] = helper_be_ldq_mmu, + [MO_UQ] = helper_be_ldq_mmu, #else [MO_UW] = helper_le_lduw_mmu, [MO_SW] = helper_le_ldsw_mmu, @@ -870,17 +869,17 @@ static void * const qemu_ld_helpers[8] = { #if TCG_TARGET_REG_BITS == 64 [MO_SL] = helper_le_ldsl_mmu, #endif - [MO_Q] = helper_le_ldq_mmu, + [MO_UQ] = helper_le_ldq_mmu, #endif }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, TCGMemOpIdx oi, + * uintxx_t val, MemOpIdx oi, * uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN [MO_16] = helper_be_stw_mmu, [MO_32] = helper_be_stl_mmu, [MO_64] = helper_be_stq_mmu, @@ -906,7 +905,7 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) } static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, - TCGReg addrh, TCGMemOpIdx oi, + TCGReg addrh, MemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { MemOp opc = get_memop(oi); @@ -959,7 +958,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, @@ -980,7 +979,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; @@ -1012,7 +1011,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; @@ -1053,6 +1052,54 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_goto(s, l->raddr); return true; } +#else + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, + unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *l = new_ldst_label(s); + + l->is_ld = is_ld; + l->addrlo_reg = addr_reg; + + /* We are expecting a_bits to max out at 7, so we can always use andi. */ + tcg_debug_assert(a_bits < 12); + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); + + l->label_ptr[0] = s->code_ptr; + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); + + l->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + /* resolve label address */ + if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + + /* tail call, with the return address back inline. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); + tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st), true); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, @@ -1083,7 +1130,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; - case MO_Q: + case MO_UQ: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); @@ -1104,10 +1151,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; #endif TCGReg base = TCG_REG_TMP0; @@ -1130,10 +1179,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } - - if (guest_base == 0) { - tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); - } else { + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addr_regl, a_bits); + } + if (guest_base != 0) { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); @@ -1173,10 +1223,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; #endif TCGReg base = TCG_REG_TMP0; @@ -1199,10 +1251,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } - - if (guest_base == 0) { - tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); - } else { + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addr_regl, a_bits); + } + if (guest_base != 0) { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index ef78b99e98..11c9b3e4f4 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -165,9 +165,7 @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #define TCG_TARGET_HAS_MEMORY_BSWAP 0 diff --git a/tcg/s390/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h similarity index 86% rename from tcg/s390/tcg-target-con-set.h rename to tcg/s390x/tcg-target-con-set.h index 31985e4903..426dd92e51 100644 --- a/tcg/s390/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -13,13 +13,20 @@ C_O0_I1(r) C_O0_I2(L, L) C_O0_I2(r, r) C_O0_I2(r, ri) +C_O0_I2(v, r) C_O1_I1(r, L) C_O1_I1(r, r) +C_O1_I1(v, r) +C_O1_I1(v, v) +C_O1_I1(v, vr) C_O1_I2(r, 0, ri) C_O1_I2(r, 0, rI) C_O1_I2(r, 0, rJ) C_O1_I2(r, r, ri) C_O1_I2(r, rZ, r) +C_O1_I2(v, v, r) +C_O1_I2(v, v, v) +C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, r, 0) C_O1_I4(r, r, ri, rI, 0) C_O2_I2(b, a, 0, r) diff --git a/tcg/s390/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h similarity index 96% rename from tcg/s390/tcg-target-con-str.h rename to tcg/s390x/tcg-target-con-str.h index 892d8f8c06..8bb0358ae5 100644 --- a/tcg/s390/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -10,6 +10,7 @@ */ REGS('r', ALL_GENERAL_REGS) REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) +REGS('v', ALL_VECTOR_REGS) /* * A (single) even/odd pair for division. * TODO: Add something to the register allocator to allow diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc similarity index 71% rename from tcg/s390/tcg-target.c.inc rename to tcg/s390x/tcg-target.c.inc index b82cf19f09..33becd7694 100644 --- a/tcg/s390/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -29,6 +29,7 @@ #error "unsupported code generation mode" #endif +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #include "elf.h" @@ -43,6 +44,8 @@ #define TCG_CT_CONST_ZERO 0x800 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) +#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) + /* * For softmmu, we need to avoid conflicts with the first 3 * argument registers to perform the tlb lookup, and to call @@ -66,7 +69,7 @@ We don't need this when we have pc-relative loads with the general instructions extension facility. */ #define TCG_REG_TB TCG_REG_R12 -#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT)) +#define USE_REG_TB (!HAVE_FACILITY(GEN_INST_EXT)) #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG TCG_REG_R13 @@ -134,6 +137,7 @@ typedef enum S390Opcode { RI_OIHL = 0xa509, RI_OILH = 0xa50a, RI_OILL = 0xa50b, + RI_TMLL = 0xa701, RIE_CGIJ = 0xec7c, RIE_CGRJ = 0xec64, @@ -263,13 +267,68 @@ typedef enum S390Opcode { RX_STC = 0x42, RX_STH = 0x40, + VRIa_VGBM = 0xe744, + VRIa_VREPI = 0xe745, + VRIb_VGM = 0xe746, + VRIc_VREP = 0xe74d, + + VRRa_VLC = 0xe7de, + VRRa_VLP = 0xe7df, + VRRa_VLR = 0xe756, + VRRc_VA = 0xe7f3, + VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */ + VRRc_VCH = 0xe7fb, /* " */ + VRRc_VCHL = 0xe7f9, /* " */ + VRRc_VERLLV = 0xe773, + VRRc_VESLV = 0xe770, + VRRc_VESRAV = 0xe77a, + VRRc_VESRLV = 0xe778, + VRRc_VML = 0xe7a2, + VRRc_VMN = 0xe7fe, + VRRc_VMNL = 0xe7fc, + VRRc_VMX = 0xe7ff, + VRRc_VMXL = 0xe7fd, + VRRc_VN = 0xe768, + VRRc_VNC = 0xe769, + VRRc_VNN = 0xe76e, + VRRc_VNO = 0xe76b, + VRRc_VNX = 0xe76c, + VRRc_VO = 0xe76a, + VRRc_VOC = 0xe76f, + VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */ + VRRc_VS = 0xe7f7, + VRRa_VUPH = 0xe7d7, + VRRa_VUPL = 0xe7d6, + VRRc_VX = 0xe76d, + VRRe_VSEL = 0xe78d, + VRRf_VLVGP = 0xe762, + + VRSa_VERLL = 0xe733, + VRSa_VESL = 0xe730, + VRSa_VESRA = 0xe73a, + VRSa_VESRL = 0xe738, + VRSb_VLVG = 0xe722, + VRSc_VLGV = 0xe721, + + VRX_VL = 0xe706, + VRX_VLLEZ = 0xe704, + VRX_VLREP = 0xe705, + VRX_VST = 0xe70e, + VRX_VSTEF = 0xe70b, + VRX_VSTEG = 0xe70a, + NOP = 0x0707, } S390Opcode; #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", - "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7", + "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15", + "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23", + "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31", }; #endif @@ -295,6 +354,32 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R4, TCG_REG_R3, TCG_REG_R2, + + /* V8-V15 are call saved, and omitted. */ + TCG_REG_V0, + TCG_REG_V1, + TCG_REG_V2, + TCG_REG_V3, + TCG_REG_V4, + TCG_REG_V5, + TCG_REG_V6, + TCG_REG_V7, + TCG_REG_V16, + TCG_REG_V17, + TCG_REG_V18, + TCG_REG_V19, + TCG_REG_V20, + TCG_REG_V21, + TCG_REG_V22, + TCG_REG_V23, + TCG_REG_V24, + TCG_REG_V25, + TCG_REG_V26, + TCG_REG_V27, + TCG_REG_V28, + TCG_REG_V29, + TCG_REG_V30, + TCG_REG_V31, }; static const int tcg_target_call_iarg_regs[] = { @@ -350,34 +435,44 @@ static const uint8_t tcg_cond_to_ltr_cond[] = { }; #ifdef CONFIG_SOFTMMU -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LESL] = helper_le_ldsl_mmu, - [MO_LEQ] = helper_le_ldq_mmu, + [MO_LEUQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BESL] = helper_be_ldsl_mmu, - [MO_BEQ] = helper_be_ldq_mmu, + [MO_BEUQ] = helper_be_ldq_mmu, }; -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, + [MO_LEUQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, + [MO_BEUQ] = helper_be_stq_mmu, }; #endif static const tcg_insn_unit *tb_ret_addr; -uint64_t s390_facilities; +uint64_t s390_facilities[3]; + +static inline bool is_general_reg(TCGReg r) +{ + return r <= TCG_REG_R15; +} + +static inline bool is_vector_reg(TCGReg r) +{ + return r >= TCG_REG_V0 && r <= TCG_REG_V31; +} static bool patch_reloc(tcg_insn_unit *src_rw, int type, intptr_t value, intptr_t addend) @@ -496,6 +591,138 @@ static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, #define tcg_out_insn_RX tcg_out_insn_RS #define tcg_out_insn_RXY tcg_out_insn_RSY +static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4) +{ + /* + * Shift bit 4 of each regno to its corresponding bit of RXB. + * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4 + * is the left-shift of the 4th operand. + */ + return ((v1 & 0x10) << (4 + 3)) + | ((v2 & 0x10) << (4 + 2)) + | ((v3 & 0x10) << (4 + 1)) + | ((v4 & 0x10) << (4 + 0)); +} + +static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op, + TCGReg v1, uint16_t i2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4)); + tcg_out16(s, i2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12)); +} + +static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op, + TCGReg v1, uint8_t i2, uint8_t i3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4)); + tcg_out16(s, (i2 << 8) | (i3 & 0xff)); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op, + TCGReg v1, uint16_t i2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf)); + tcg_out16(s, i2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12)); +} + +static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out16(s, v3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_debug_assert(is_vector_reg(v4)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out16(s, v3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12)); +} + +static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg r2, TCGReg r3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_general_reg(r2)); + tcg_debug_assert(is_general_reg(r3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2); + tcg_out16(s, r3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0)); +} + +static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1, + intptr_t d2, TCGReg b2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf)); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1, + intptr_t d2, TCGReg b2, TCGReg r3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_general_reg(r3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1, + intptr_t d2, TCGReg b2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_general_reg(r1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf)); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1, + TCGReg b2, TCGReg x2, intptr_t d2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(x2)); + tcg_debug_assert(is_general_reg(b2)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2); + tcg_out16(s, (b2 << 12) | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12)); +} + /* Emit an opcode with "type-checking" of the format. */ #define tcg_out_insn(S, FMT, OP, ...) \ glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) @@ -517,12 +744,38 @@ static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { - if (src != dst) { - if (type == TCG_TYPE_I32) { + if (src == dst) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(dst) && is_general_reg(src))) { tcg_out_insn(s, RR, LR, dst, src); - } else { - tcg_out_insn(s, RRE, LGR, dst, src); + break; } + /* fallthru */ + + case TCG_TYPE_I64: + if (likely(is_general_reg(dst))) { + if (likely(is_general_reg(src))) { + tcg_out_insn(s, RRE, LGR, dst, src); + } else { + tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3); + } + break; + } else if (is_general_reg(src)) { + tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_out_insn(s, VRRa, VLR, dst, src, 0); + break; + + default: + g_assert_not_reached(); } return true; } @@ -577,7 +830,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } /* Try all 48-bit insns that can load it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (sval == (int32_t)sval) { tcg_out_insn(s, RIL, LGFI, ret, sval); return; @@ -620,7 +873,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } /* Otherwise, stuff it in the constant pool. */ - if (s390_facilities & FACILITY_GEN_INST_EXT) { + if (HAVE_FACILITY(GEN_INST_EXT)) { tcg_out_insn(s, RIL, LGRL, ret, 0); new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); } else if (USE_REG_TB && !in_prologue) { @@ -672,25 +925,92 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, } } +static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx, + TCGReg data, TCGReg base, TCGReg index, + tcg_target_long ofs, int m3) +{ + if (ofs < 0 || ofs >= 0x1000) { + if (ofs >= -0x80000 && ofs < 0x80000) { + tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs); + base = TCG_TMP0; + index = TCG_REG_NONE; + ofs = 0; + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs); + if (index != TCG_REG_NONE) { + tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); + } + index = TCG_TMP0; + ofs = 0; + } + } + tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3); +} /* load data without address translation or endianness conversion */ -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, - TCGReg base, intptr_t ofs) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) { - if (type == TCG_TYPE_I32) { - tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); - } else { - tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); + break; + } + tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32); + break; + + case TCG_TYPE_I64: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64); + break; + + case TCG_TYPE_V128: + /* Hint quadword aligned. */ + tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4); + break; + + default: + g_assert_not_reached(); } } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, - TCGReg base, intptr_t ofs) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) { - if (type == TCG_TYPE_I32) { - tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); - } else { - tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); + } else { + tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1); + } + break; + + case TCG_TYPE_I64: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0); + break; + + case TCG_TYPE_V128: + /* Hint quadword aligned. */ + tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4); + break; + + default: + g_assert_not_reached(); } } @@ -706,7 +1026,7 @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type, { intptr_t addr = (intptr_t)abs; - if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { + if (HAVE_FACILITY(GEN_INST_EXT) && !(addr & 1)) { ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; if (disp == (int32_t)disp) { if (type == TCG_TYPE_I32) { @@ -740,7 +1060,7 @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LGBR, dest, src); return; } @@ -760,7 +1080,7 @@ static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LLGCR, dest, src); return; } @@ -780,7 +1100,7 @@ static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LGHR, dest, src); return; } @@ -800,7 +1120,7 @@ static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LLGHR, dest, src); return; } @@ -888,7 +1208,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) tgen_ext32u(s, dest, dest); return; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if ((val & valid) == 0xff) { tgen_ext8u(s, TCG_TYPE_I64, dest, dest); return; @@ -909,7 +1229,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = ~(0xffffffffull << i*32); if (((val | ~valid) & mask) == mask) { @@ -918,7 +1238,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } } } - if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { + if (HAVE_FACILITY(GEN_INST_EXT) && risbg_mask(val)) { tgen_andi_risbg(s, dest, dest, val); return; } @@ -967,7 +1287,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = (0xffffffffull << i*32); if ((val & mask) != 0 && (val & ~mask) == 0) { @@ -992,7 +1312,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) /* Perform the OR via sequential modifications to the high and low parts. Do this via recursion to handle 16-bit vs 32-bit masks in each half. */ - tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + tcg_debug_assert(HAVE_FACILITY(EXT_IMM)); tgen_ori(s, type, dest, val & 0x00000000ffffffffull); tgen_ori(s, type, dest, val & 0xffffffff00000000ull); } @@ -1001,7 +1321,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if ((val & 0xffffffff00000000ull) == 0) { tcg_out_insn(s, RIL, XILF, dest, val); return; @@ -1025,7 +1345,7 @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) tcg_tbrel_diff(s, NULL)); } else { /* Perform the xor by parts. */ - tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + tcg_debug_assert(HAVE_FACILITY(EXT_IMM)); if (val & 0xffffffff) { tcg_out_insn(s, RIL, XILF, dest, val); } @@ -1059,7 +1379,7 @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, goto exit; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (type == TCG_TYPE_I32) { op = (is_unsigned ? RIL_CLFI : RIL_CFI); tcg_out_insn_RIL(s, op, r1, c2); @@ -1122,7 +1442,7 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, bool have_loc; /* With LOC2, we can always emit the minimum 3 insns. */ - if (s390_facilities & FACILITY_LOAD_ON_COND2) { + if (HAVE_FACILITY(LOAD_ON_COND2)) { /* Emit: d = 0, d = (cc ? 1 : d). */ cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); tcg_out_movi(s, TCG_TYPE_I64, dest, 0); @@ -1130,7 +1450,7 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, return; } - have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0; + have_loc = HAVE_FACILITY(LOAD_ON_COND); /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */ restart: @@ -1216,7 +1536,7 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, TCGArg v3, int v3const) { int cc; - if (s390_facilities & FACILITY_LOAD_ON_COND) { + if (HAVE_FACILITY(LOAD_ON_COND)) { cc = tgen_cmp(s, type, c, c1, c2, c2const, false); if (v3const) { tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc); @@ -1249,7 +1569,7 @@ static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, } else { tcg_out_mov(s, TCG_TYPE_I64, dest, a2); } - if (s390_facilities & FACILITY_LOAD_ON_COND) { + if (HAVE_FACILITY(LOAD_ON_COND)) { /* Emit: if (one bit found) dest = r0. */ tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2); } else { @@ -1325,7 +1645,7 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, { int cc; - if (s390_facilities & FACILITY_GEN_INST_EXT) { + if (HAVE_FACILITY(GEN_INST_EXT)) { bool is_unsigned = is_unsigned_cond(c); bool in_range; S390Opcode opc; @@ -1429,10 +1749,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, tcg_out_insn(s, RXY, LGF, data, base, index, disp); break; - case MO_Q | MO_BSWAP: + case MO_UQ | MO_BSWAP: tcg_out_insn(s, RXY, LRVG, data, base, index, disp); break; - case MO_Q: + case MO_UQ: tcg_out_insn(s, RXY, LG, data, base, index, disp); break; @@ -1475,10 +1795,10 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, } break; - case MO_Q | MO_BSWAP: + case MO_UQ | MO_BSWAP: tcg_out_insn(s, RXY, STRVG, data, base, index, disp); break; - case MO_Q: + case MO_UQ: tcg_out_insn(s, RXY, STG, data, base, index, disp); break; @@ -1488,8 +1808,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, } #if defined(CONFIG_SOFTMMU) -#include "../tcg-ldst.c.inc" - /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); @@ -1519,7 +1837,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, cross pages using the address of the last byte of the access. */ a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; - if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) { + if (HAVE_FACILITY(GEN_INST_EXT) && a_off == 0) { tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); } else { tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); @@ -1547,7 +1865,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, return addr_reg; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg data, TCGReg addr, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { @@ -1565,7 +1883,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, @@ -1590,7 +1908,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, @@ -1612,7 +1930,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) case MO_UL: tgen_ext32u(s, TCG_REG_R4, data_reg); break; - case MO_Q: + case MO_UQ: tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); break; default: @@ -1626,6 +1944,53 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) return true; } #else +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, + TCGReg addrlo, unsigned a_bits) +{ + unsigned a_mask = (1 << a_bits) - 1; + TCGLabelQemuLdst *l = new_ldst_label(s); + + l->is_ld = is_ld; + l->addrlo_reg = addrlo; + + /* We are expecting a_bits to max out at 7, much lower than TMLL. */ + tcg_debug_assert(a_bits < 16); + tcg_out_insn(s, RI, TMLL, addrlo, a_mask); + + tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ + l->label_ptr[0] = s->code_ptr; + s->code_ptr += 1; + + l->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr); + tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld + : helper_unaligned_st)); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, TCGReg *index_reg, tcg_target_long *disp) { @@ -1644,7 +2009,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU @@ -1664,14 +2029,18 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, #else TCGReg index_reg; tcg_target_long disp; + unsigned a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addr_reg, a_bits); + } tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); #endif } static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU @@ -1691,7 +2060,11 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, #else TCGReg index_reg; tcg_target_long disp; + unsigned a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addr_reg, a_bits); + } tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); #endif @@ -1810,7 +2183,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AHI, a0, a2); break; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RIL, AFI, a0, a2); break; } @@ -2056,7 +2429,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AGHI, a0, a2); break; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (a2 == (int32_t)a2) { tcg_out_insn(s, RIL, AGFI, a0, a2); break; @@ -2281,8 +2654,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, /* The host memory model is quite strong, we simply need to serialize the instruction stream. */ if (args[0] & TCG_MO_ST_LD) { - tcg_out_insn(s, RR, BCR, - s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); + tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0); } break; @@ -2294,6 +2666,437 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + if (is_general_reg(src)) { + /* Replicate general register into two MO_64. */ + tcg_out_insn(s, VRRf, VLVGP, dst, src, src); + if (vece == MO_64) { + return true; + } + src = dst; + } + + /* + * Recall that the "standard" integer, within a vector, is the + * rightmost element of the leftmost doubleword, a-la VLLEZ. + */ + tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece); + return true; +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset) +{ + tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece); + return true; +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, int64_t val) +{ + int i, mask, msb, lsb; + + /* Look for int16_t elements. */ + if (vece <= MO_16 || + (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) { + tcg_out_insn(s, VRIa, VREPI, dst, val, vece); + return; + } + + /* Look for bit masks. */ + if (vece == MO_32) { + if (risbg_mask((int32_t)val)) { + /* Handle wraparound by swapping msb and lsb. */ + if ((val & 0x80000001u) == 0x80000001u) { + msb = 32 - ctz32(~val); + lsb = clz32(~val) - 1; + } else { + msb = clz32(val); + lsb = 31 - ctz32(val); + } + tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32); + return; + } + } else { + if (risbg_mask(val)) { + /* Handle wraparound by swapping msb and lsb. */ + if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { + /* Handle wraparound by swapping msb and lsb. */ + msb = 64 - ctz64(~val); + lsb = clz64(~val) - 1; + } else { + msb = clz64(val); + lsb = 63 - ctz64(val); + } + tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64); + return; + } + } + + /* Look for all bytes 0x00 or 0xff. */ + for (i = mask = 0; i < 8; i++) { + uint8_t byte = val >> (i * 8); + if (byte == 0xff) { + mask |= 1 << i; + } else if (byte != 0) { + break; + } + } + if (i == 8) { + tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0); + return; + } + + /* Otherwise, stuff it in the constant pool. */ + tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0); + new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2); + tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64); +} + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + break; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + break; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + break; + + case INDEX_op_abs_vec: + tcg_out_insn(s, VRRa, VLP, a0, a1, vece); + break; + case INDEX_op_neg_vec: + tcg_out_insn(s, VRRa, VLC, a0, a1, vece); + break; + case INDEX_op_not_vec: + tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0); + break; + + case INDEX_op_add_vec: + tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece); + break; + case INDEX_op_sub_vec: + tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece); + break; + case INDEX_op_and_vec: + tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0); + break; + case INDEX_op_andc_vec: + tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0); + break; + case INDEX_op_mul_vec: + tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece); + break; + case INDEX_op_or_vec: + tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0); + break; + case INDEX_op_orc_vec: + tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0); + break; + case INDEX_op_xor_vec: + tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0); + break; + case INDEX_op_nand_vec: + tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0); + break; + case INDEX_op_nor_vec: + tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0); + break; + case INDEX_op_eqv_vec: + tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0); + break; + + case INDEX_op_shli_vec: + tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_shri_vec: + tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_sari_vec: + tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_rotli_vec: + tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_shls_vec: + tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece); + break; + case INDEX_op_shrs_vec: + tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece); + break; + case INDEX_op_sars_vec: + tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece); + break; + case INDEX_op_rotls_vec: + tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece); + break; + case INDEX_op_shlv_vec: + tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece); + break; + case INDEX_op_shrv_vec: + tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece); + break; + case INDEX_op_sarv_vec: + tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece); + break; + case INDEX_op_rotlv_vec: + tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece); + break; + + case INDEX_op_smin_vec: + tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece); + break; + case INDEX_op_smax_vec: + tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece); + break; + case INDEX_op_umin_vec: + tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece); + break; + case INDEX_op_umax_vec: + tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece); + break; + + case INDEX_op_bitsel_vec: + tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1); + break; + + case INDEX_op_cmp_vec: + switch ((TCGCond)args[3]) { + case TCG_COND_EQ: + tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece); + break; + case TCG_COND_GT: + tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece); + break; + case TCG_COND_GTU: + tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece); + break; + default: + g_assert_not_reached(); + } + break; + + case INDEX_op_s390_vuph_vec: + tcg_out_insn(s, VRRa, VUPH, a0, a1, vece); + break; + case INDEX_op_s390_vupl_vec: + tcg_out_insn(s, VRRa, VUPL, a0, a1, vece); + break; + case INDEX_op_s390_vpks_vec: + tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece); + break; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} + +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_abs_vec: + case INDEX_op_add_vec: + case INDEX_op_and_vec: + case INDEX_op_andc_vec: + case INDEX_op_bitsel_vec: + case INDEX_op_eqv_vec: + case INDEX_op_nand_vec: + case INDEX_op_neg_vec: + case INDEX_op_nor_vec: + case INDEX_op_not_vec: + case INDEX_op_or_vec: + case INDEX_op_orc_vec: + case INDEX_op_rotli_vec: + case INDEX_op_rotls_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_sari_vec: + case INDEX_op_sars_vec: + case INDEX_op_sarv_vec: + case INDEX_op_shli_vec: + case INDEX_op_shls_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shri_vec: + case INDEX_op_shrs_vec: + case INDEX_op_shrv_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_sub_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_xor_vec: + return 1; + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + case INDEX_op_rotrv_vec: + return -1; + case INDEX_op_mul_vec: + return vece < MO_64; + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + return vece < MO_64 ? -1 : 0; + default: + return 0; + } +} + +static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + bool need_swap = false, need_inv = false; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGv_vec t1; + t1 = v1, v1 = v2, v2 = t1; + cond = tcg_swap_cond(cond); + } + + vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), + tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); + + return need_inv; +} + +static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) { + tcg_gen_not_vec(vece, v0, v0); + } +} + +static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec c1, TCGv_vec c2, + TCGv_vec v3, TCGv_vec v4, TCGCond cond) +{ + TCGv_vec t = tcg_temp_new_vec(type); + + if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) { + /* Invert the sense of the compare by swapping arguments. */ + tcg_gen_bitsel_vec(vece, v0, t, v4, v3); + } else { + tcg_gen_bitsel_vec(vece, v0, t, v3, v4); + } + tcg_temp_free_vec(t); +} + +static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc) +{ + TCGv_vec h1 = tcg_temp_new_vec(type); + TCGv_vec h2 = tcg_temp_new_vec(type); + TCGv_vec l1 = tcg_temp_new_vec(type); + TCGv_vec l2 = tcg_temp_new_vec(type); + + tcg_debug_assert (vece < MO_64); + + /* Unpack with sign-extension. */ + vec_gen_2(INDEX_op_s390_vuph_vec, type, vece, + tcgv_vec_arg(h1), tcgv_vec_arg(v1)); + vec_gen_2(INDEX_op_s390_vuph_vec, type, vece, + tcgv_vec_arg(h2), tcgv_vec_arg(v2)); + + vec_gen_2(INDEX_op_s390_vupl_vec, type, vece, + tcgv_vec_arg(l1), tcgv_vec_arg(v1)); + vec_gen_2(INDEX_op_s390_vupl_vec, type, vece, + tcgv_vec_arg(l2), tcgv_vec_arg(v2)); + + /* Arithmetic on a wider element size. */ + vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1), + tcgv_vec_arg(h1), tcgv_vec_arg(h2)); + vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1), + tcgv_vec_arg(l1), tcgv_vec_arg(l2)); + + /* Pack with saturation. */ + vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1, + tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1)); + + tcg_temp_free_vec(h1); + tcg_temp_free_vec(h2); + tcg_temp_free_vec(l1); + tcg_temp_free_vec(l2); +} + +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + va_list va; + TCGv_vec v0, v1, v2, v3, v4, t0; + + va_start(va, a0); + v0 = temp_tcgv_vec(arg_temp(a0)); + v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + + switch (opc) { + case INDEX_op_cmp_vec: + expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); + break; + + case INDEX_op_cmpsel_vec: + v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); + break; + + case INDEX_op_rotrv_vec: + t0 = tcg_temp_new_vec(type); + tcg_gen_neg_vec(vece, t0, v2); + tcg_gen_rotlv_vec(vece, v0, v1, t0); + tcg_temp_free_vec(t0); + break; + + case INDEX_op_ssadd_vec: + expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec); + break; + case INDEX_op_sssub_vec: + expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec); + break; + + default: + g_assert_not_reached(); + } + va_end(va); +} + static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) { switch (op) { @@ -2345,7 +3148,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: - return (s390_facilities & FACILITY_DISTINCT_OPS + return (HAVE_FACILITY(DISTINCT_OPS) ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ri)); @@ -2353,19 +3156,19 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) /* If we have the general-instruction-extensions, then we have MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ - return (s390_facilities & FACILITY_GEN_INST_EXT + return (HAVE_FACILITY(GEN_INST_EXT) ? C_O1_I2(r, 0, ri) : C_O1_I2(r, 0, rI)); case INDEX_op_mul_i64: - return (s390_facilities & FACILITY_GEN_INST_EXT + return (HAVE_FACILITY(GEN_INST_EXT) ? C_O1_I2(r, 0, rJ) : C_O1_I2(r, 0, rI)); case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: - return (s390_facilities & FACILITY_DISTINCT_OPS + return (HAVE_FACILITY(DISTINCT_OPS) ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ri)); @@ -2409,7 +3212,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return (s390_facilities & FACILITY_LOAD_ON_COND2 + return (HAVE_FACILITY(LOAD_ON_COND2) ? C_O1_I4(r, r, ri, rI, 0) : C_O1_I4(r, r, ri, r, 0)); @@ -2424,21 +3227,77 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_add2_i32: case INDEX_op_sub2_i32: - return (s390_facilities & FACILITY_EXT_IMM + return (HAVE_FACILITY(EXT_IMM) ? C_O2_I4(r, r, 0, 1, ri, r) : C_O2_I4(r, r, 0, 1, r, r)); case INDEX_op_add2_i64: case INDEX_op_sub2_i64: - return (s390_facilities & FACILITY_EXT_IMM + return (HAVE_FACILITY(EXT_IMM) ? C_O2_I4(r, r, 0, 1, rA, r) : C_O2_I4(r, r, 0, 1, r, r)); + case INDEX_op_st_vec: + return C_O0_I2(v, r); + case INDEX_op_ld_vec: + case INDEX_op_dupm_vec: + return C_O1_I1(v, r); + case INDEX_op_dup_vec: + return C_O1_I1(v, vr); + case INDEX_op_abs_vec: + case INDEX_op_neg_vec: + case INDEX_op_not_vec: + case INDEX_op_rotli_vec: + case INDEX_op_sari_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_s390_vuph_vec: + case INDEX_op_s390_vupl_vec: + return C_O1_I1(v, v); + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_andc_vec: + case INDEX_op_or_vec: + case INDEX_op_orc_vec: + case INDEX_op_xor_vec: + case INDEX_op_nand_vec: + case INDEX_op_nor_vec: + case INDEX_op_eqv_vec: + case INDEX_op_cmp_vec: + case INDEX_op_mul_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_s390_vpks_vec: + return C_O1_I2(v, v, v); + case INDEX_op_rotls_vec: + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + return C_O1_I2(v, v, r); + case INDEX_op_bitsel_vec: + return C_O1_I3(v, v, v, v); + default: g_assert_not_reached(); } } +/* + * Mainline glibc added HWCAP_S390_VX before it was kernel abi. + * Some distros have fixed this up locally, others have not. + */ +#ifndef HWCAP_S390_VXRS +#define HWCAP_S390_VXRS 2048 +#endif + static void query_s390_facilities(void) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); @@ -2446,13 +3305,22 @@ static void query_s390_facilities(void) /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this is present on all 64-bit systems, but let's check for it anyway. */ if (hwcap & HWCAP_S390_STFLE) { - register int r0 __asm__("0"); - register void *r1 __asm__("1"); + register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1; + register void *r1 __asm__("1") = s390_facilities; /* stfle 0(%r1) */ - r1 = &s390_facilities; asm volatile(".word 0xb2b0,0x1000" - : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); + : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc"); + } + + /* + * Use of vector registers requires os support beyond the facility bit. + * If the kernel does not advertise support, disable the facility bits. + * There is nothing else we currently care about in the 3rd word, so + * disable VECTOR with one store. + */ + if (!(hwcap & HWCAP_S390_VXRS)) { + s390_facilities[2] = 0; } } @@ -2462,6 +3330,10 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; + if (HAVE_FACILITY(VECTOR)) { + tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + } tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); @@ -2476,6 +3348,31 @@ static void tcg_target_init(TCGContext *s) /* The return register can be considered call-clobbered. */ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ diff --git a/tcg/s390/tcg-target.h b/tcg/s390x/tcg-target.h similarity index 64% rename from tcg/s390/tcg-target.h rename to tcg/s390x/tcg-target.h index 2e4ede2ea2..23e2063667 100644 --- a/tcg/s390/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -32,39 +32,44 @@ #define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) typedef enum TCGReg { - TCG_REG_R0 = 0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15 + TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, + TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, + TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, + TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + + TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, + + TCG_AREG0 = TCG_REG_R10, + TCG_REG_CALL_STACK = TCG_REG_R15 } TCGReg; -#define TCG_TARGET_NB_REGS 16 +#define TCG_TARGET_NB_REGS 64 /* A list of relevant facilities used by this translator. Some of these are required for proper operation, and these are checked at startup. */ -#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) -#define FACILITY_LONG_DISP (1ULL << (63 - 18)) -#define FACILITY_EXT_IMM (1ULL << (63 - 21)) -#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) -#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) +#define FACILITY_ZARCH_ACTIVE 2 +#define FACILITY_LONG_DISP 18 +#define FACILITY_EXT_IMM 21 +#define FACILITY_GEN_INST_EXT 34 +#define FACILITY_LOAD_ON_COND 45 #define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND #define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND -#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53)) +#define FACILITY_LOAD_ON_COND2 53 +#define FACILITY_VECTOR 129 +#define FACILITY_VECTOR_ENH1 135 -extern uint64_t s390_facilities; +extern uint64_t s390_facilities[3]; + +#define HAVE_FACILITY(X) \ + ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1) /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 @@ -85,8 +90,8 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_clz_i32 0 #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT) -#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_deposit_i32 HAVE_FACILITY(GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i32 HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 @@ -98,7 +103,7 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 -#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_direct_jump HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_div2_i64 1 @@ -119,11 +124,11 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM) +#define TCG_TARGET_HAS_clz_i64 HAVE_FACILITY(EXT_IMM) #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT) -#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_deposit_i64 HAVE_FACILITY(GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i64 HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 @@ -134,8 +139,31 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_nor_vec 1 +#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 1 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 + /* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_R15 #define TCG_TARGET_STACK_ALIGN 8 #define TCG_TARGET_CALL_STACK_OFFSET 160 @@ -144,10 +172,6 @@ extern uint64_t s390_facilities; #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) -enum { - TCG_AREG0 = TCG_REG_R10, -}; - static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr) { @@ -157,9 +181,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, /* no need to flush icache explicitly */ } -#ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS -#endif #define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h new file mode 100644 index 0000000000..0eb2350fb3 --- /dev/null +++ b/tcg/s390x/tcg-target.opc.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ +DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC) +DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC) +DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC) diff --git a/tcg/sparc/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h similarity index 69% rename from tcg/sparc/tcg-target-con-set.h rename to tcg/sparc64/tcg-target-con-set.h index 3b751dc3fb..31e6fea1fc 100644 --- a/tcg/sparc/tcg-target-con-set.h +++ b/tcg/sparc64/tcg-target-con-set.h @@ -11,22 +11,12 @@ */ C_O0_I1(r) C_O0_I2(rZ, r) -C_O0_I2(RZ, r) C_O0_I2(rZ, rJ) -C_O0_I2(RZ, RJ) -C_O0_I2(sZ, A) -C_O0_I2(SZ, A) -C_O1_I1(r, A) -C_O1_I1(R, A) +C_O0_I2(sZ, s) +C_O1_I1(r, s) C_O1_I1(r, r) -C_O1_I1(r, R) -C_O1_I1(R, r) -C_O1_I1(R, R) -C_O1_I2(R, R, R) +C_O1_I2(r, r, r) C_O1_I2(r, rZ, rJ) -C_O1_I2(R, RZ, RJ) C_O1_I4(r, rZ, rJ, rI, 0) -C_O1_I4(R, RZ, RJ, RI, 0) C_O2_I2(r, r, rZ, rJ) -C_O2_I4(R, R, RZ, RZ, RJ, RI) C_O2_I4(r, r, rZ, rZ, rJ, rJ) diff --git a/tcg/sparc/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h similarity index 77% rename from tcg/sparc/tcg-target-con-str.h rename to tcg/sparc64/tcg-target-con-str.h index fdb25d9313..8f5c7aef97 100644 --- a/tcg/sparc/tcg-target-con-str.h +++ b/tcg/sparc64/tcg-target-con-str.h @@ -9,10 +9,7 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('R', ALL_GENERAL_REGS64) REGS('s', ALL_QLDST_REGS) -REGS('S', ALL_QLDST_REGS64) -REGS('A', TARGET_LONG_BITS == 64 ? ALL_QLDST_REGS64 : ALL_QLDST_REGS) /* * Define constraint letters for constants: diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc similarity index 77% rename from tcg/sparc/tcg-target.c.inc rename to tcg/sparc64/tcg-target.c.inc index 688827968b..cb9453efdd 100644 --- a/tcg/sparc/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -22,6 +22,11 @@ * THE SOFTWARE. */ +/* We only support generating code for 64-bit mode. */ +#ifndef __arch64__ +#error "unsupported code generation mode" +#endif + #include "../tcg-pool.c.inc" #ifdef CONFIG_DEBUG_TCG @@ -61,12 +66,6 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { }; #endif -#ifdef __arch64__ -# define SPARC64 1 -#else -# define SPARC64 0 -#endif - #define TCG_CT_CONST_S11 0x100 #define TCG_CT_CONST_S13 0x200 #define TCG_CT_CONST_ZERO 0x400 @@ -81,23 +80,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #else #define SOFTMMU_RESERVE_REGS 0 #endif - -/* - * Note that sparcv8plus can only hold 64 bit quantities in %g and %o - * registers. These are saved manually by the kernel in full 64-bit - * slots. The %i and %l registers are saved by the register window - * mechanism, which only allocates space for 32 bits. Given that this - * window spill/fill can happen on any signal, we must consider the - * high bits of the %i and %l registers garbage at all times. - */ #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -#if SPARC64 -# define ALL_GENERAL_REGS64 ALL_GENERAL_REGS -#else -# define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16) -#endif #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) -#define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS) /* Define some temporary registers. T2 is used for constant generation. */ #define TCG_REG_T1 TCG_REG_G1 @@ -211,6 +195,7 @@ static const int tcg_target_call_oarg_regs[] = { #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) +#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11)) #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) @@ -294,22 +279,18 @@ static const int tcg_target_call_oarg_regs[] = { bool use_vis3_instructions; #endif -static inline int check_fit_i64(int64_t val, unsigned int bits) +static bool check_fit_i64(int64_t val, unsigned int bits) { return val == sextract64(val, 0, bits); } -static inline int check_fit_i32(int32_t val, unsigned int bits) +static bool check_fit_i32(int32_t val, unsigned int bits) { return val == sextract32(val, 0, bits); } #define check_fit_tl check_fit_i64 -#if SPARC64 -# define check_fit_ptr check_fit_i64 -#else -# define check_fit_ptr check_fit_i32 -#endif +#define check_fit_ptr check_fit_i64 static bool patch_reloc(tcg_insn_unit *src_rw, int type, intptr_t value, intptr_t addend) @@ -323,15 +304,26 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, switch (type) { case R_SPARC_WDISP16: - assert(check_fit_ptr(pcrel >> 2, 16)); + if (!check_fit_ptr(pcrel >> 2, 16)) { + return false; + } insn &= ~INSN_OFF16(-1); insn |= INSN_OFF16(pcrel); break; case R_SPARC_WDISP19: - assert(check_fit_ptr(pcrel >> 2, 19)); + if (!check_fit_ptr(pcrel >> 2, 19)) { + return false; + } insn &= ~INSN_OFF19(-1); insn |= INSN_OFF19(pcrel); break; + case R_SPARC_13: + if (!check_fit_ptr(value, 13)) { + return false; + } + insn &= ~INSN_IMM13(-1); + insn |= INSN_IMM13(value); + break; default: g_assert_not_reached(); } @@ -362,14 +354,19 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) } } -static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, - TCGReg rs2, int op) +static void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, NOP); +} + +static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, + TCGReg rs2, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); } -static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, - int32_t offset, int op) +static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, + int32_t offset, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); } @@ -381,8 +378,7 @@ static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); } -static inline bool tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret != arg) { tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); @@ -390,25 +386,50 @@ static inline bool tcg_out_mov(TCGContext *s, TCGType type, return true; } -static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) +static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); + } else { + tcg_out_nop(s); + } +} + +static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) { tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); } -static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) +static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) { tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } +static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) +{ + if (check_fit_i32(arg, 13)) { + /* A 13-bit constant sign-extended to 64-bits. */ + tcg_out_movi_imm13(s, ret, arg); + } else { + /* A 32-bit constant zero-extended to 64 bits. */ + tcg_out_sethi(s, ret, arg); + if (arg & 0x3ff) { + tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); + } + } +} + static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, - tcg_target_long arg, bool in_prologue) + tcg_target_long arg, bool in_prologue, + TCGReg scratch) { tcg_target_long hi, lo = (int32_t)arg; tcg_target_long test, lsb; - /* Make sure we test 32-bit constants for imm13 properly. */ - if (type == TCG_TYPE_I32) { - arg = lo; + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ + if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { + tcg_out_movi_imm32(s, ret, arg); + return; } /* A 13-bit constant sign-extended to 64-bits. */ @@ -426,15 +447,6 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } } - /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ - if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { - tcg_out_sethi(s, ret, arg); - if (arg & 0x3ff) { - tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); - } - return; - } - /* A 32-bit constant sign-extended to 64-bits. */ if (arg == lo) { tcg_out_sethi(s, ret, ~arg); @@ -442,42 +454,51 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, return; } - /* A 21-bit constant, shifted. */ + /* A 32-bit constant, shifted. */ lsb = ctz64(arg); test = (tcg_target_long)arg >> lsb; - if (check_fit_tl(test, 13)) { - tcg_out_movi_imm13(s, ret, test); - tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); - return; - } else if (lsb > 10 && test == extract64(test, 0, 21)) { + if (lsb > 10 && test == extract64(test, 0, 21)) { tcg_out_sethi(s, ret, test << 10); tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); return; + } else if (test == (uint32_t)test || test == (int32_t)test) { + tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch); + tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); + return; + } + + /* Use the constant pool, if possible. */ + if (!in_prologue && USE_REG_TB) { + new_pool_label(s, arg, R_SPARC_13, s->code_ptr, + tcg_tbrel_diff(s, NULL)); + tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB)); + return; } /* A 64-bit constant decomposed into 2 32-bit pieces. */ if (check_fit_i32(lo, 13)) { hi = (arg - lo) >> 32; - tcg_out_movi(s, TCG_TYPE_I32, ret, hi); + tcg_out_movi_imm32(s, ret, hi); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); } else { hi = arg >> 32; - tcg_out_movi(s, TCG_TYPE_I32, ret, hi); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo); + tcg_out_movi_imm32(s, ret, hi); + tcg_out_movi_imm32(s, scratch, lo); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); - tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR); + tcg_out_arith(s, ret, ret, scratch, ARITH_OR); } } -static inline void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) { - tcg_out_movi_int(s, type, ret, arg, false); + tcg_debug_assert(ret != TCG_REG_T2); + tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); } -static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, - TCGReg a2, int op) +static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, + TCGReg a2, int op) { tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); } @@ -494,20 +515,20 @@ static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, } } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); } -static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, - TCGReg base, intptr_t ofs) +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_G0, base, ofs); @@ -527,16 +548,11 @@ static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg) tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff); } -static inline void tcg_out_sety(TCGContext *s, TCGReg rs) +static void tcg_out_sety(TCGContext *s, TCGReg rs) { tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); } -static inline void tcg_out_rdy(TCGContext *s, TCGReg rd) -{ - tcg_out32(s, RDY | INSN_RD(rd)); -} - static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, int32_t val2, int val2const, int uns) { @@ -552,11 +568,6 @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, uns ? ARITH_UDIV : ARITH_SDIV); } -static inline void tcg_out_nop(TCGContext *s) -{ - tcg_out32(s, NOP); -} - static const uint8_t tcg_cond_to_bcond[] = { [TCG_COND_EQ] = COND_E, [TCG_COND_NE] = COND_NE, @@ -787,7 +798,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, if (use_vis3_instructions && !is_sub) { /* Note that ADDXC doesn't accept immediates. */ if (bhconst && bh != 0) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); + tcg_out_movi_imm13(s, TCG_REG_T2, bh); bh = TCG_REG_T2; } tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); @@ -803,9 +814,13 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); } } else { - /* Otherwise adjust BH as if there is carry into T2 ... */ + /* + * Otherwise adjust BH as if there is carry into T2. + * Note that constant BH is constrained to 11 bits for the MOVCC, + * so the adjustment fits 12 bits. + */ if (bhconst) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); } else { tcg_out_arithi(s, TCG_REG_T2, bh, 1, is_sub ? ARITH_SUB : ARITH_ADD); @@ -819,6 +834,19 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); } +static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, + bool in_prologue, bool tail_call) +{ + uintptr_t desti = (uintptr_t)dest; + + /* Be careful not to clobber %o7 for a tail call. */ + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, + desti & ~0xfff, in_prologue, + tail_call ? TCG_REG_G2 : TCG_REG_O7); + tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, + TCG_REG_T1, desti & 0xfff, JMPL); +} + static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, bool in_prologue) { @@ -827,10 +855,7 @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, if (disp == (int32_t)disp) { tcg_out32(s, CALL | (uint32_t)disp >> 2); } else { - uintptr_t desti = (uintptr_t)dest; - tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, - desti & ~0xfff, in_prologue); - tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); + tcg_out_jmpl_const(s, dest, in_prologue, false); } } @@ -847,8 +872,8 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) } #ifdef CONFIG_SOFTMMU -static const tcg_insn_unit *qemu_ld_trampoline[16]; -static const tcg_insn_unit *qemu_st_trampoline[16]; +static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1]; +static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1]; static void emit_extend(TCGContext *s, TCGReg r, int op) { @@ -864,9 +889,7 @@ static void emit_extend(TCGContext *s, TCGReg r, int op) tcg_out_arithi(s, r, r, 16, SHIFT_SRL); break; case MO_32: - if (SPARC64) { - tcg_out_arith(s, r, r, 0, SHIFT_SRL); - } + tcg_out_arith(s, r, r, 0, SHIFT_SRL); break; case MO_64: break; @@ -875,32 +898,31 @@ static void emit_extend(TCGContext *s, TCGReg r, int op) static void build_trampolines(TCGContext *s) { - static void * const qemu_ld_helpers[16] = { + static void * const qemu_ld_helpers[] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, + [MO_LEUQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, + [MO_BEUQ] = helper_be_ldq_mmu, }; - static void * const qemu_st_helpers[16] = { + static void * const qemu_st_helpers[] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, + [MO_LEUQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, + [MO_BEUQ] = helper_be_stq_mmu, }; int i; - TCGReg ra; - for (i = 0; i < 16; ++i) { + for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { if (qemu_ld_helpers[i] == NULL) { continue; } @@ -911,24 +933,15 @@ static void build_trampolines(TCGContext *s) } qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - if (SPARC64 || TARGET_LONG_BITS == 32) { - ra = TCG_REG_O3; - } else { - /* Install the high part of the address. */ - tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX); - ra = TCG_REG_O4; - } - /* Set the retaddr operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); - /* Set the env operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7); /* Tail call. */ - tcg_out_call_nodelay(s, qemu_ld_helpers[i], true); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); + tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); + /* delay slot -- set the env argument */ + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); } - for (i = 0; i < 16; ++i) { + for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { if (qemu_st_helpers[i] == NULL) { continue; } @@ -939,42 +952,42 @@ static void build_trampolines(TCGContext *s) } qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - if (SPARC64) { - emit_extend(s, TCG_REG_O2, i); - ra = TCG_REG_O4; - } else { - ra = TCG_REG_O1; - if (TARGET_LONG_BITS == 64) { - /* Install the high part of the address. */ - tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); - ra += 2; - } else { - ra += 1; - } - if ((i & MO_SIZE) == MO_64) { - /* Install the high part of the data. */ - tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); - ra += 2; - } else { - emit_extend(s, ra, i); - ra += 1; - } - /* Skip the oi argument. */ - ra += 1; - } - + emit_extend(s, TCG_REG_O2, i); + /* Set the retaddr operand. */ - if (ra >= TCG_REG_O6) { - tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK, - TCG_TARGET_CALL_STACK_OFFSET); - ra = TCG_REG_G1; - } - tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); - /* Set the env operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7); + /* Tail call. */ - tcg_out_call_nodelay(s, qemu_st_helpers[i], true); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); + tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); + /* delay slot -- set the env argument */ + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); + } +} +#else +static const tcg_insn_unit *qemu_unalign_ld_trampoline; +static const tcg_insn_unit *qemu_unalign_st_trampoline; + +static void build_trampolines(TCGContext *s) +{ + for (int ld = 0; ld < 2; ++ld) { + void *helper; + + while ((uintptr_t)s->code_ptr & 15) { + tcg_out_nop(s); + } + + if (ld) { + helper = helper_unaligned_ld; + qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); + } else { + helper = helper_unaligned_st; + qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); + } + + /* Tail call. */ + tcg_out_jmpl_const(s, helper, true, true); + /* delay slot -- set the env argument */ + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); } } #endif @@ -1005,7 +1018,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) #ifndef CONFIG_SOFTMMU if (guest_base != 0) { - tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, + guest_base, true, TCG_REG_T1); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif @@ -1026,9 +1040,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) /* delay slot */ tcg_out_movi_imm13(s, TCG_REG_O0, 0); -#ifdef CONFIG_SOFTMMU build_trampolines(s); -#endif } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) @@ -1102,7 +1114,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, tcg_out_cmp(s, r0, r2, 0); /* If the guest address must be zero-extended, do so now. */ - if (SPARC64 && TARGET_LONG_BITS == 32) { + if (TARGET_LONG_BITS == 32) { tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); return r0; } @@ -1110,44 +1122,49 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, } #endif /* CONFIG_SOFTMMU */ -static const int qemu_ld_opc[16] = { +static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = LDUB, [MO_SB] = LDSB, + [MO_UB | MO_LE] = LDUB, + [MO_SB | MO_LE] = LDSB, [MO_BEUW] = LDUH, [MO_BESW] = LDSH, [MO_BEUL] = LDUW, [MO_BESL] = LDSW, - [MO_BEQ] = LDX, + [MO_BEUQ] = LDX, + [MO_BESQ] = LDX, [MO_LEUW] = LDUH_LE, [MO_LESW] = LDSH_LE, [MO_LEUL] = LDUW_LE, [MO_LESL] = LDSW_LE, - [MO_LEQ] = LDX_LE, + [MO_LEUQ] = LDX_LE, + [MO_LESQ] = LDX_LE, }; -static const int qemu_st_opc[16] = { +static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = STB, [MO_BEUW] = STH, [MO_BEUL] = STW, - [MO_BEQ] = STX, + [MO_BEUQ] = STX, [MO_LEUW] = STH_LE, [MO_LEUL] = STW_LE, - [MO_LEQ] = STX_LE, + [MO_LEUQ] = STX_LE, }; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOpIdx oi, bool is_64) + MemOpIdx oi, bool is_64) { MemOp memop = get_memop(oi); + tcg_insn_unit *label_ptr; + #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); - TCGReg addrz, param; + TCGReg addrz; const tcg_insn_unit *func; - tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_read)); @@ -1166,12 +1183,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, /* TLB Miss. */ - param = TCG_REG_O1; - if (!SPARC64 && TARGET_LONG_BITS == 64) { - /* Skip the high-part; we'll perform the extract in the trampoline. */ - param++; - } - tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ @@ -1183,53 +1195,122 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, param, oi); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); - /* Recall that all of the helpers return 64-bit results. - Which complicates things for sparcv8plus. */ - if (SPARC64) { - /* We let the helper sign-extend SB and SW, but leave SL for here. */ - if (is_64 && (memop & MO_SSIZE) == MO_SL) { - tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); - } else { - tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); - } + /* We let the helper sign-extend SB and SW, but leave SL for here. */ + if (is_64 && (memop & MO_SSIZE) == MO_SL) { + tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); } else { - if ((memop & MO_SIZE) == MO_64) { - tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); - tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); - tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); - } else if (is_64) { - /* Re-extend from 32-bit rather than reassembling when we - know the high register must be an extension. */ - tcg_out_arithi(s, data, TCG_REG_O1, 0, - memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL); - } else { - tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1); - } + tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); } *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else - if (SPARC64 && TARGET_LONG_BITS == 32) { + TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); + unsigned a_bits = get_alignment_bits(memop); + unsigned s_bits = memop & MO_SIZE; + unsigned t_bits; + + if (TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } - tcg_out_ldst_rr(s, data, addr, - (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), + + /* + * Normal case: alignment equal to access size. + */ + if (a_bits == s_bits) { + tcg_out_ldst_rr(s, data, addr, index, + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); + return; + } + + /* + * Test for at least natural alignment, and assume most accesses + * will be aligned -- perform a straight load in the delay slot. + * This is required to preserve atomicity for aligned accesses. + */ + t_bits = MAX(a_bits, s_bits); + tcg_debug_assert(t_bits < 13); + tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); + + /* beq,a,pt %icc, label */ + label_ptr = s->code_ptr; + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); + /* delay slot */ + tcg_out_ldst_rr(s, data, addr, index, qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); + + if (a_bits >= s_bits) { + /* + * Overalignment: A successful alignment test will perform the memory + * operation in the delay slot, and failure need only invoke the + * handler for SIGBUS. + */ + tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); + /* delay slot -- move to low part of argument reg */ + tcg_out_mov_delay(s, TCG_REG_O1, addr); + } else { + /* Underalignment: load by pieces of minimum alignment. */ + int ld_opc, a_size, s_size, i; + + /* + * Force full address into T1 early; avoids problems with + * overlap between @addr and @data. + */ + tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); + + a_size = 1 << a_bits; + s_size = 1 << s_bits; + if ((memop & MO_BSWAP) == MO_BE) { + ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; + tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); + ld_opc = qemu_ld_opc[a_bits | MO_BE]; + for (i = a_size; i < s_size; i += a_size) { + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); + tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); + } + } else if (a_bits == 0) { + ld_opc = LDUB; + tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); + for (i = a_size; i < s_size; i += a_size) { + if ((memop & MO_SIGN) && i == s_size - a_size) { + ld_opc = LDSB; + } + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); + tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); + } + } else { + ld_opc = qemu_ld_opc[a_bits | MO_LE]; + tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); + for (i = a_size; i < s_size; i += a_size) { + tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); + if ((memop & MO_SIGN) && i == s_size - a_size) { + ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; + } + tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); + tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); + } + } + } + + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp memop = get_memop(oi); + tcg_insn_unit *label_ptr; + #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); - TCGReg addrz, param; + TCGReg addrz; const tcg_insn_unit *func; - tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_write)); @@ -1246,33 +1327,103 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, /* TLB Miss. */ - param = TCG_REG_O1; - if (!SPARC64 && TARGET_LONG_BITS == 64) { - /* Skip the high-part; we'll perform the extract in the trampoline. */ - param++; - } - tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); - if (!SPARC64 && (memop & MO_SIZE) == MO_64) { - /* Skip the high-part; we'll perform the extract in the trampoline. */ - param++; - } - tcg_out_mov(s, TCG_TYPE_REG, param++, data); + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data); func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, param, oi); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi); *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else - if (SPARC64 && TARGET_LONG_BITS == 32) { + TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); + unsigned a_bits = get_alignment_bits(memop); + unsigned s_bits = memop & MO_SIZE; + unsigned t_bits; + + if (TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } - tcg_out_ldst_rr(s, data, addr, - (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), + + /* + * Normal case: alignment equal to access size. + */ + if (a_bits == s_bits) { + tcg_out_ldst_rr(s, data, addr, index, + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); + return; + } + + /* + * Test for at least natural alignment, and assume most accesses + * will be aligned -- perform a straight store in the delay slot. + * This is required to preserve atomicity for aligned accesses. + */ + t_bits = MAX(a_bits, s_bits); + tcg_debug_assert(t_bits < 13); + tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); + + /* beq,a,pt %icc, label */ + label_ptr = s->code_ptr; + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); + /* delay slot */ + tcg_out_ldst_rr(s, data, addr, index, qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); + + if (a_bits >= s_bits) { + /* + * Overalignment: A successful alignment test will perform the memory + * operation in the delay slot, and failure need only invoke the + * handler for SIGBUS. + */ + tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); + /* delay slot -- move to low part of argument reg */ + tcg_out_mov_delay(s, TCG_REG_O1, addr); + } else { + /* Underalignment: store by pieces of minimum alignment. */ + int st_opc, a_size, s_size, i; + + /* + * Force full address into T1 early; avoids problems with + * overlap between @addr and @data. + */ + tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); + + a_size = 1 << a_bits; + s_size = 1 << s_bits; + if ((memop & MO_BSWAP) == MO_BE) { + st_opc = qemu_st_opc[a_bits | MO_BE]; + for (i = 0; i < s_size; i += a_size) { + TCGReg d = data; + int shift = (s_size - a_size - i) * 8; + if (shift) { + d = TCG_REG_T2; + tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); + } + tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); + } + } else if (a_bits == 0) { + tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); + for (i = 1; i < s_size; i++) { + tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); + } + } else { + /* Note that ST*A with immediate asi must use indexed address. */ + st_opc = qemu_st_opc[a_bits + MO_LE]; + tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); + for (i = a_size; i < s_size; i += a_size) { + tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); + tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); + tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); + } + } + } + + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #endif /* CONFIG_SOFTMMU */ } @@ -1350,7 +1501,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_goto_ptr: tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); if (USE_REG_TB) { - tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR); + tcg_out_mov_delay(s, TCG_REG_TB, a0); } else { tcg_out_nop(s); } @@ -1467,14 +1618,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_muls2_i32: c = ARITH_SMUL; do_mul2: - /* The 32-bit multiply insns produce a full 64-bit result. If the - destination register can hold it, we can avoid the slower RDY. */ + /* The 32-bit multiply insns produce a full 64-bit result. */ tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); - if (SPARC64 || a0 <= TCG_REG_O7) { - tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); - } else { - tcg_out_rdy(s, a1); - } + tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); break; case INDEX_op_qemu_ld_i32: @@ -1581,107 +1727,91 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O0_I1(r); case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: case INDEX_op_neg_i32: + case INDEX_op_neg_i64: case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: return C_O1_I1(r, r); case INDEX_op_st8_i32: + case INDEX_op_st8_i64: case INDEX_op_st16_i32: + case INDEX_op_st16_i64: case INDEX_op_st_i32: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: return C_O0_I2(rZ, r); case INDEX_op_add_i32: + case INDEX_op_add_i64: case INDEX_op_mul_i32: + case INDEX_op_mul_i64: case INDEX_op_div_i32: + case INDEX_op_div_i64: case INDEX_op_divu_i32: + case INDEX_op_divu_i64: case INDEX_op_sub_i32: + case INDEX_op_sub_i64: case INDEX_op_and_i32: + case INDEX_op_and_i64: case INDEX_op_andc_i32: + case INDEX_op_andc_i64: case INDEX_op_or_i32: + case INDEX_op_or_i64: case INDEX_op_orc_i32: + case INDEX_op_orc_i64: case INDEX_op_xor_i32: + case INDEX_op_xor_i64: case INDEX_op_shl_i32: + case INDEX_op_shl_i64: case INDEX_op_shr_i32: + case INDEX_op_shr_i64: case INDEX_op_sar_i32: + case INDEX_op_sar_i64: case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: return C_O1_I2(r, rZ, rJ); case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: return C_O0_I2(rZ, rJ); case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: return C_O1_I4(r, rZ, rJ, rI, 0); case INDEX_op_add2_i32: + case INDEX_op_add2_i64: case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: return C_O2_I4(r, r, rZ, rZ, rJ, rJ); case INDEX_op_mulu2_i32: case INDEX_op_muls2_i32: return C_O2_I2(r, r, rZ, rJ); - - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - return C_O1_I1(R, r); - - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(RZ, r); - - case INDEX_op_add_i64: - case INDEX_op_mul_i64: - case INDEX_op_div_i64: - case INDEX_op_divu_i64: - case INDEX_op_sub_i64: - case INDEX_op_and_i64: - case INDEX_op_andc_i64: - case INDEX_op_or_i64: - case INDEX_op_orc_i64: - case INDEX_op_xor_i64: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_setcond_i64: - return C_O1_I2(R, RZ, RJ); - - case INDEX_op_neg_i64: - case INDEX_op_not_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - return C_O1_I1(R, R); - - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - return C_O1_I1(r, R); - - case INDEX_op_brcond_i64: - return C_O0_I2(RZ, RJ); - case INDEX_op_movcond_i64: - return C_O1_I4(R, RZ, RJ, RI, 0); - case INDEX_op_add2_i64: - case INDEX_op_sub2_i64: - return C_O2_I4(R, R, RZ, RZ, RJ, RI); case INDEX_op_muluh_i64: - return C_O1_I2(R, R, R); + return C_O1_I2(r, r, r); case INDEX_op_qemu_ld_i32: - return C_O1_I1(r, A); case INDEX_op_qemu_ld_i64: - return C_O1_I1(R, A); + return C_O1_I1(r, s); case INDEX_op_qemu_st_i32: - return C_O0_I2(sZ, A); case INDEX_op_qemu_st_i64: - return C_O0_I2(SZ, A); + return C_O0_I2(sZ, s); default: g_assert_not_reached(); @@ -1702,7 +1832,7 @@ static void tcg_target_init(TCGContext *s) #endif tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; - tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64; + tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1); @@ -1732,16 +1862,11 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ } -#if SPARC64 -# define ELF_HOST_MACHINE EM_SPARCV9 -#else -# define ELF_HOST_MACHINE EM_SPARC32PLUS -# define ELF_HOST_FLAGS EF_SPARC_32PLUS -#endif +#define ELF_HOST_MACHINE EM_SPARCV9 typedef struct { DebugFrameHeader h; - uint8_t fde_def_cfa[SPARC64 ? 4 : 2]; + uint8_t fde_def_cfa[4]; uint8_t fde_win_save; uint8_t fde_ret_save[3]; } DebugFrame; @@ -1758,12 +1883,8 @@ static const DebugFrame debug_frame = { .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { -#if SPARC64 12, 30, /* DW_CFA_def_cfa i6, 2047 */ (2047 & 0x7f) | 0x80, (2047 >> 7) -#else - 13, 30 /* DW_CFA_def_cfa_register i6 */ -#endif }, .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc64/tcg-target.h similarity index 95% rename from tcg/sparc/tcg-target.h rename to tcg/sparc64/tcg-target.h index c050763049..8655acdbe5 100644 --- a/tcg/sparc/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -25,8 +25,6 @@ #ifndef SPARC_TCG_TARGET_H #define SPARC_TCG_TARGET_H -#define TCG_TARGET_REG_BITS 64 - #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define TCG_TARGET_NB_REGS 32 @@ -70,19 +68,10 @@ typedef enum { /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_O6 -#ifdef __arch64__ #define TCG_TARGET_STACK_BIAS 2047 #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS) -#else -#define TCG_TARGET_STACK_BIAS 0 -#define TCG_TARGET_STACK_ALIGN 8 -#define TCG_TARGET_CALL_STACK_OFFSET (64 + 4 + 6*4) -#endif - -#ifdef __arch64__ #define TCG_TARGET_EXTEND_ARGS 1 -#endif #if defined(__VIS__) && __VIS__ >= 0x300 #define use_vis3_instructions 1 diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 92c91dcde9..cc82088d52 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -23,7 +23,7 @@ */ #ifndef TCG_INTERNAL_H -#define TCG_INTERNAL_H 1 +#define TCG_INTERNAL_H #define TCG_HIGHWATER 1024 diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc index c3ce88e69d..6c6848d034 100644 --- a/tcg/tcg-ldst.c.inc +++ b/tcg/tcg-ldst.c.inc @@ -22,7 +22,7 @@ typedef struct TCGLabelQemuLdst { bool is_ld; /* qemu_ld: true, qemu_st: false */ - TCGMemOpIdx oi; + MemOpIdx oi; TCGType type; /* result type of a load */ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index ffe55e908f..079a761b04 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -836,6 +836,30 @@ static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_temp_free_i32(t0); } +static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, int32_t c, + void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, + int32_t)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(t1, cpu_env, aofs + i); + tcg_gen_ld_i32(t2, cpu_env, bofs + i); + tcg_gen_ld_i32(t3, cpu_env, cofs + i); + fni(t0, t1, t2, t3, c); + tcg_gen_st_i32(t0, cpu_env, dofs + i); + } + tcg_temp_free_i32(t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t0); +} + /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, bool load_dest, void (*fni)(TCGv_i64, TCGv_i64)) @@ -971,6 +995,30 @@ static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_temp_free_i64(t0); } +static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, int64_t c, + void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, + int64_t)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(t1, cpu_env, aofs + i); + tcg_gen_ld_i64(t2, cpu_env, bofs + i); + tcg_gen_ld_i64(t3, cpu_env, cofs + i); + fni(t0, t1, t2, t3, c); + tcg_gen_st_i64(t0, cpu_env, dofs + i); + } + tcg_temp_free_i64(t3); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t0); +} + /* Expand OPSZ bytes worth of two-operand operations using host vectors. */ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, @@ -1121,6 +1169,35 @@ static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_temp_free_vec(t0); } +/* + * Expand OPSZ bytes worth of four-vector operands and an immediate operand + * using host vectors. + */ +static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t cofs, uint32_t oprsz, + uint32_t tysz, TCGType type, int64_t c, + void (*fni)(unsigned, TCGv_vec, TCGv_vec, + TCGv_vec, TCGv_vec, int64_t)) +{ + TCGv_vec t0 = tcg_temp_new_vec(type); + TCGv_vec t1 = tcg_temp_new_vec(type); + TCGv_vec t2 = tcg_temp_new_vec(type); + TCGv_vec t3 = tcg_temp_new_vec(type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(t1, cpu_env, aofs + i); + tcg_gen_ld_vec(t2, cpu_env, bofs + i); + tcg_gen_ld_vec(t3, cpu_env, cofs + i); + fni(vece, t0, t1, t2, t3, c); + tcg_gen_st_vec(t0, cpu_env, dofs + i); + } + tcg_temp_free_vec(t3); + tcg_temp_free_vec(t2); + tcg_temp_free_vec(t1); + tcg_temp_free_vec(t0); +} + /* Expand a vector two-operand operation. */ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) @@ -1533,6 +1610,75 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, } } +/* Expand a vector four-operand operation. */ +void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz, int64_t c, + const GVecGen4i *g) +{ + const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); + check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, some, + 32, TCG_TYPE_V256, c, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + bofs += some; + cofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz, + 16, TCG_TYPE_V128, c, g->fniv); + break; + case TCG_TYPE_V64: + expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz, + 8, TCG_TYPE_V64, c, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_4i_i64(dofs, aofs, bofs, cofs, oprsz, c, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_4i_i32(dofs, aofs, bofs, cofs, oprsz, c, g->fni4); + } else { + assert(g->fno != NULL); + tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs, + oprsz, maxsz, c, g->fno); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(dofs + oprsz, maxsz - oprsz); + } +} + /* * Expand specific vector operations. */ diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index 15e026ae49..463dabf515 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -119,6 +119,18 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list, continue; } break; + case INDEX_op_usadd_vec: + if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece) || + tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) { + continue; + } + break; + case INDEX_op_ussub_vec: + if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece) || + tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) { + continue; + } + break; case INDEX_op_cmpsel_vec: case INDEX_op_smin_vec: case INDEX_op_smax_vec: @@ -359,23 +371,32 @@ void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */ - tcg_gen_and_vec(0, r, a, b); - tcg_gen_not_vec(0, r, r); + if (TCG_TARGET_HAS_nand_vec) { + vec_gen_op3(INDEX_op_nand_vec, 0, r, a, b); + } else { + tcg_gen_and_vec(0, r, a, b); + tcg_gen_not_vec(0, r, r); + } } void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */ - tcg_gen_or_vec(0, r, a, b); - tcg_gen_not_vec(0, r, r); + if (TCG_TARGET_HAS_nor_vec) { + vec_gen_op3(INDEX_op_nor_vec, 0, r, a, b); + } else { + tcg_gen_or_vec(0, r, a, b); + tcg_gen_not_vec(0, r, r); + } } void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */ - tcg_gen_xor_vec(0, r, a, b); - tcg_gen_not_vec(0, r, r); + if (TCG_TARGET_HAS_eqv_vec) { + vec_gen_op3(INDEX_op_eqv_vec, 0, r, a, b); + } else { + tcg_gen_xor_vec(0, r, a, b); + tcg_gen_not_vec(0, r, r); + } } static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc) @@ -603,7 +624,18 @@ void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - do_op3_nofail(vece, r, a, b, INDEX_op_usadd_vec); + if (!do_op3(vece, r, a, b, INDEX_op_usadd_vec)) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + TCGv_vec t = tcg_temp_new_vec_matching(r); + + /* usadd(a, b) = min(a, ~b) + b */ + tcg_gen_not_vec(vece, t, b); + tcg_gen_umin_vec(vece, t, t, a); + tcg_gen_add_vec(vece, r, t, b); + + tcg_temp_free_vec(t); + tcg_swap_vecop_list(hold_list); + } } void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) @@ -613,7 +645,17 @@ void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - do_op3_nofail(vece, r, a, b, INDEX_op_ussub_vec); + if (!do_op3(vece, r, a, b, INDEX_op_ussub_vec)) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + TCGv_vec t = tcg_temp_new_vec_matching(r); + + /* ussub(a, b) = max(a, b) - b */ + tcg_gen_umax_vec(vece, t, a, b); + tcg_gen_sub_vec(vece, r, t, b); + + tcg_temp_free_vec(t); + tcg_swap_vecop_list(hold_list); + } } static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a, diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 01b8405b40..e02f3afb96 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -27,8 +27,6 @@ #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" -#include "trace-tcg.h" -#include "trace/mem.h" #include "exec/plugin-gen.h" /* Reduce the number of ifdefs below. This assumes that all uses of @@ -1058,6 +1056,12 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) } } +void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg) +{ + /* Swapping 2 16-bit elements is a rotate. */ + tcg_gen_rotli_i32(ret, arg, 16); +} + void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b); @@ -1158,7 +1162,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { /* Since arg2 and ret have different types, they cannot be the same temporary */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); #else @@ -1169,7 +1173,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); #else @@ -1794,6 +1798,30 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) } } +void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg) +{ + uint64_t m = 0x0000ffff0000ffffull; + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + /* See include/qemu/bitops.h, hswap64. */ + tcg_gen_rotli_i64(t1, arg, 32); + tcg_gen_andi_i64(t0, t1, m); + tcg_gen_shli_i64(t0, t0, 16); + tcg_gen_shri_i64(t1, t1, 16); + tcg_gen_andi_i64(t1, t1, m); + tcg_gen_or_i64(ret, t0, t1); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg) +{ + /* Swapping 2 32-bit elements is a rotate. */ + tcg_gen_rotli_i64(ret, arg, 32); +} + void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { @@ -2767,7 +2795,12 @@ void tcg_gen_lookup_and_goto_ptr(void) static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ - (void)get_alignment_bits(op); + unsigned a_bits = get_alignment_bits(op); + + /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ + if (a_bits == (op & MO_SIZE)) { + op = (op & ~MO_AMASK) | MO_ALIGN; + } switch (op & MO_SIZE) { case MO_8: @@ -2781,10 +2814,13 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) } break; case MO_64: - if (!is64) { - tcg_abort(); + if (is64) { + op &= ~MO_SIGN; + break; } - break; + /* fall through */ + default: + g_assert_not_reached(); } if (st) { op &= ~MO_SIGN; @@ -2795,7 +2831,7 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, MemOp memop, TCGArg idx) { - TCGMemOpIdx oi = make_memop_idx(memop, idx); + MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 tcg_gen_op3i_i32(opc, val, addr, oi); #else @@ -2810,7 +2846,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, MemOp memop, TCGArg idx) { - TCGMemOpIdx oi = make_memop_idx(memop, idx); + MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); @@ -2851,10 +2887,12 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) return vaddr; } -static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) +static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, + enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { + qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); plugin_gen_empty_mem_callback(vaddr, info); tcg_temp_free(vaddr); } @@ -2864,11 +2902,11 @@ static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; - uint16_t info = trace_mem_get_info(memop, idx, 0); + MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2881,7 +2919,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) addr = plugin_prep_mem_callbacks(addr); gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2902,11 +2940,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; - uint16_t info = trace_mem_get_info(memop, idx, 1); + MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2930,7 +2968,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } else { gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); } - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); @@ -2940,7 +2978,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; - uint16_t info; + MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2954,8 +2992,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); - info = trace_mem_get_info(memop, idx, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2968,7 +3005,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) addr = plugin_prep_mem_callbacks(addr); gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN @@ -2993,7 +3030,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; - uint16_t info; + MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); @@ -3002,8 +3039,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - info = trace_mem_get_info(memop, idx, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); @@ -3026,7 +3062,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) addr = plugin_prep_mem_callbacks(addr); gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); @@ -3096,7 +3132,7 @@ typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, # define WITH_ATOMIC64(X) #endif -static void * const table_cmpxchg[16] = { +static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { [MO_8] = gen_helper_atomic_cmpxchgb, [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, @@ -3130,7 +3166,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, tcg_temp_free_i32(t1); } else { gen_atomic_cx_i32 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3169,7 +3205,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } else if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_cx_i64 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3225,7 +3261,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; - TCGMemOpIdx oi; + MemOpIdx oi; memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3267,7 +3303,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3298,7 +3334,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, } #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ -static void * const table_##NAME[16] = { \ +static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ [MO_8] = gen_helper_atomic_##NAME##b, \ [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ diff --git a/tcg/tcg.c b/tcg/tcg.c index 3dfd5a7ef2..0d2854550a 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -36,6 +36,7 @@ #include "qemu/qemu-print.h" #include "qemu/timer.h" #include "qemu/cacheflush.h" +#include "qemu/cacheinfo.h" /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st @@ -50,7 +51,7 @@ #else # define ELF_CLASS ELFCLASS64 #endif -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN # define ELF_DATA ELFDATA2MSB #else # define ELF_DATA ELFDATA2LSB @@ -58,6 +59,7 @@ #include "elf.h" #include "exec/log.h" +#include "tcg/tcg-ldst.h" #include "tcg-internal.h" #ifdef CONFIG_TCG_INTERPRETER @@ -318,7 +320,8 @@ static void set_jmp_reset_offset(TCGContext *s, int which) } /* Signal overflow, starting over with fewer guest insns. */ -static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s) +static G_NORETURN +void tcg_raise_tb_overflow(TCGContext *s) { siglongjmp(s->jmp_trans, -2); } @@ -646,9 +649,9 @@ static void tcg_context_init(unsigned max_cpus) if (nargs != 0) { ca->cif.arg_types = ca->args; - for (i = 0; i < nargs; ++i) { - int typecode = extract32(typemask, (i + 1) * 3, 3); - ca->args[i] = typecode_to_ffi[typecode]; + for (int j = 0; j < nargs; ++j) { + int typecode = extract32(typemask, (j + 1) * 3, 3); + ca->args[j] = typecode_to_ffi[typecode]; } } @@ -769,32 +772,35 @@ void tcg_prologue_init(TCGContext *s) #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { - FILE *logfile = qemu_log_lock(); - qemu_log("PROLOGUE: [size=%zu]\n", prologue_size); - if (s->data_gen_ptr) { - size_t code_size = s->data_gen_ptr - s->code_gen_ptr; - size_t data_size = prologue_size - code_size; - size_t i; + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size); + if (s->data_gen_ptr) { + size_t code_size = s->data_gen_ptr - s->code_gen_ptr; + size_t data_size = prologue_size - code_size; + size_t i; - log_disas(s->code_gen_ptr, code_size); + disas(logfile, s->code_gen_ptr, code_size); - for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { - if (sizeof(tcg_target_ulong) == 8) { - qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", - (uintptr_t)s->data_gen_ptr + i, - *(uint64_t *)(s->data_gen_ptr + i)); - } else { - qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", - (uintptr_t)s->data_gen_ptr + i, - *(uint32_t *)(s->data_gen_ptr + i)); + for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { + if (sizeof(tcg_target_ulong) == 8) { + fprintf(logfile, + "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", + (uintptr_t)s->data_gen_ptr + i, + *(uint64_t *)(s->data_gen_ptr + i)); + } else { + fprintf(logfile, + "0x%08" PRIxPTR ": .long 0x%08x\n", + (uintptr_t)s->data_gen_ptr + i, + *(uint32_t *)(s->data_gen_ptr + i)); + } } + } else { + disas(logfile, s->code_gen_ptr, prologue_size); } - } else { - log_disas(s->code_gen_ptr, prologue_size); + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); } - qemu_log("\n"); - qemu_log_flush(); - qemu_log_unlock(logfile); } #endif @@ -896,7 +902,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, TCGTemp *base_ts = tcgv_ptr_temp(base); TCGTemp *ts = tcg_global_alloc(s); int indirect_reg = 0, bigendian = 0; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN bigendian = 1; #endif @@ -1037,9 +1043,18 @@ void tcg_temp_free_internal(TCGTemp *ts) TCGContext *s = tcg_ctx; int k, idx; - /* In order to simplify users of tcg_constant_*, silently ignore free. */ - if (ts->kind == TEMP_CONST) { + switch (ts->kind) { + case TEMP_CONST: + /* + * In order to simplify users of tcg_constant_*, + * silently ignore free. + */ return; + case TEMP_NORMAL: + case TEMP_LOCAL: + break; + default: + g_assert_not_reached(); } #if defined(CONFIG_DEBUG_TCG) @@ -1049,7 +1064,6 @@ void tcg_temp_free_internal(TCGTemp *ts) } #endif - tcg_debug_assert(ts->kind < TEMP_GLOBAL); tcg_debug_assert(ts->temp_allocated != 0); ts->temp_allocated = 0; @@ -1420,6 +1434,12 @@ bool tcg_op_supported(TCGOpcode op) return have_vec && TCG_TARGET_HAS_andc_vec; case INDEX_op_orc_vec: return have_vec && TCG_TARGET_HAS_orc_vec; + case INDEX_op_nand_vec: + return have_vec && TCG_TARGET_HAS_nand_vec; + case INDEX_op_nor_vec: + return have_vec && TCG_TARGET_HAS_nor_vec; + case INDEX_op_eqv_vec: + return have_vec && TCG_TARGET_HAS_eqv_vec; case INDEX_op_mul_vec: return have_vec && TCG_TARGET_HAS_mul_vec; case INDEX_op_shli_vec: @@ -1527,39 +1547,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) } #endif -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* We have 64-bit values in one register, but need to pass as two - separate parameters. Split them. */ - int orig_typemask = typemask; - int orig_nargs = nargs; - TCGv_i64 retl, reth; - TCGTemp *split_args[MAX_OPC_PARAM]; - - retl = NULL; - reth = NULL; - typemask = 0; - for (i = real_args = 0; i < nargs; ++i) { - int argtype = extract32(orig_typemask, (i + 1) * 3, 3); - bool is_64bit = (argtype & ~1) == dh_typecode_i64; - - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(args[i]); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - typemask |= dh_typecode_i32 << (real_args * 3); - split_args[real_args++] = tcgv_i32_temp(l); - typemask |= dh_typecode_i32 << (real_args * 3); - } else { - split_args[real_args++] = args[i]; - typemask |= argtype << (real_args * 3); - } - } - nargs = real_args; - args = split_args; -#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 +#if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int argtype = extract32(typemask, (i + 1) * 3, 3); bool is_32bit = (argtype & ~1) == dh_typecode_i32; @@ -1567,11 +1555,11 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) if (is_32bit) { TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 orig = temp_tcgv_i32(args[i]); if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); + tcg_gen_ext_i32_i64(temp, orig); } else { - tcg_gen_ext32u_i64(temp, orig); + tcg_gen_extu_i32_i64(temp, orig); } args[i] = tcgv_i64_temp(temp); } @@ -1582,24 +1570,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) pi = 0; if (ret != NULL) { -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - if ((typemask & 6) == dh_typecode_i64) { - /* The 32-bit ABI is going to return the 64-bit value in - the %o0/%o1 register pair. Prepare for this by using - two return temporaries, and reassemble below. */ - retl = tcg_temp_new_i64(); - reth = tcg_temp_new_i64(); - op->args[pi++] = tcgv_i64_arg(reth); - op->args[pi++] = tcgv_i64_arg(retl); - nb_rets = 2; - } else { - op->args[pi++] = temp_arg(ret); - nb_rets = 1; - } -#else if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN op->args[pi++] = temp_arg(ret + 1); op->args[pi++] = temp_arg(ret); #else @@ -1611,7 +1583,6 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) op->args[pi++] = temp_arg(ret); nb_rets = 1; } -#endif } else { nb_rets = 0; } @@ -1652,7 +1623,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) * have to get more complicated to differentiate between * stack arguments and register arguments. */ -#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) +#if HOST_BIG_ENDIAN != defined(TCG_TARGET_STACK_GROWSUP) op->args[pi++] = temp_arg(args[i] + 1); op->args[pi++] = temp_arg(args[i]); #else @@ -1674,29 +1645,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) tcg_debug_assert(TCGOP_CALLI(op) == real_args); tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* Free all of the parts we allocated above. */ - for (i = real_args = 0; i < orig_nargs; ++i) { - int argtype = extract32(orig_typemask, (i + 1) * 3, 3); - bool is_64bit = (argtype & ~1) == dh_typecode_i64; - - if (is_64bit) { - tcg_temp_free_internal(args[real_args++]); - tcg_temp_free_internal(args[real_args++]); - } else { - real_args++; - } - } - if ((orig_typemask & 6) == dh_typecode_i64) { - /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. - Note that describing these as TCGv_i64 eliminates an unnecessary - zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth); - tcg_temp_free_i64(retl); - tcg_temp_free_i64(reth); - } -#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 +#if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int argtype = extract32(typemask, (i + 1) * 3, 3); bool is_32bit = (argtype & ~1) == dh_typecode_i32; @@ -1726,6 +1675,7 @@ static void tcg_reg_alloc_start(TCGContext *s) case TEMP_GLOBAL: break; case TEMP_NORMAL: + case TEMP_EBB: val = TEMP_VAL_DEAD; /* fall through */ case TEMP_LOCAL: @@ -1753,6 +1703,9 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, case TEMP_LOCAL: snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); break; + case TEMP_EBB: + snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals); + break; case TEMP_NORMAL: snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); break; @@ -1816,12 +1769,12 @@ static const char * const ldst_name[] = [MO_LESW] = "lesw", [MO_LEUL] = "leul", [MO_LESL] = "lesl", - [MO_LEQ] = "leq", + [MO_LEUQ] = "leq", [MO_BEUW] = "beuw", [MO_BESW] = "besw", [MO_BEUL] = "beul", [MO_BESL] = "besl", - [MO_BEQ] = "beq", + [MO_BEUQ] = "beq", }; static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { @@ -1862,7 +1815,11 @@ static inline TCGReg tcg_regset_first(TCGRegSet d) } } -static void tcg_dump_ops(TCGContext *s, bool have_prefs) +/* Return only the number of characters output -- no error return. */ +#define ne_fprintf(...) \ + ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; }) + +static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) { char buf[128]; TCGOp *op; @@ -1878,7 +1835,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) if (c == INDEX_op_insn_start) { nb_oargs = 0; - col += qemu_log("\n ----"); + col += ne_fprintf(f, "\n ----"); for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { target_ulong a; @@ -1887,7 +1844,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) #else a = op->args[i]; #endif - col += qemu_log(" " TARGET_FMT_lx, a); + col += ne_fprintf(f, " " TARGET_FMT_lx, a); } } else if (c == INDEX_op_call) { const TCGHelperInfo *info = tcg_call_info(op); @@ -1898,7 +1855,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) nb_iargs = TCGOP_CALLI(op); nb_cargs = def->nb_cargs; - col += qemu_log(" %s ", def->name); + col += ne_fprintf(f, " %s ", def->name); /* * Print the function name from TCGHelperInfo, if available. @@ -1906,15 +1863,15 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) * but the actual function pointer comes from the plugin. */ if (func == info->func) { - col += qemu_log("%s", info->name); + col += ne_fprintf(f, "%s", info->name); } else { - col += qemu_log("plugin(%p)", func); + col += ne_fprintf(f, "plugin(%p)", func); } - col += qemu_log(",$0x%x,$%d", info->flags, nb_oargs); + col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs); for (i = 0; i < nb_oargs; i++) { - col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf), - op->args[i])); + col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf), + op->args[i])); } for (i = 0; i < nb_iargs; i++) { TCGArg arg = op->args[nb_oargs + i]; @@ -1922,34 +1879,32 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) if (arg != TCG_CALL_DUMMY_ARG) { t = tcg_get_arg_str(s, buf, sizeof(buf), arg); } - col += qemu_log(",%s", t); + col += ne_fprintf(f, ",%s", t); } } else { - col += qemu_log(" %s ", def->name); + col += ne_fprintf(f, " %s ", def->name); nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; nb_cargs = def->nb_cargs; if (def->flags & TCG_OPF_VECTOR) { - col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op), - 8 << TCGOP_VECE(op)); + col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op), + 8 << TCGOP_VECE(op)); } k = 0; for (i = 0; i < nb_oargs; i++) { - if (k != 0) { - col += qemu_log(","); - } - col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf), - op->args[k++])); + const char *sep = k ? "," : ""; + col += ne_fprintf(f, "%s%s", sep, + tcg_get_arg_str(s, buf, sizeof(buf), + op->args[k++])); } for (i = 0; i < nb_iargs; i++) { - if (k != 0) { - col += qemu_log(","); - } - col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf), - op->args[k++])); + const char *sep = k ? "," : ""; + col += ne_fprintf(f, "%s%s", sep, + tcg_get_arg_str(s, buf, sizeof(buf), + op->args[k++])); } switch (c) { case INDEX_op_brcond_i32: @@ -1964,9 +1919,9 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_cmpsel_vec: if (op->args[k] < ARRAY_SIZE(cond_name) && cond_name[op->args[k]]) { - col += qemu_log(",%s", cond_name[op->args[k++]]); + col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]); } else { - col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]); + col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]); } i = 1; break; @@ -1976,17 +1931,17 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: { - TCGMemOpIdx oi = op->args[k++]; + MemOpIdx oi = op->args[k++]; MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { - col += qemu_log(",$0x%x,%u", op, ix); + col += ne_fprintf(f, ",$0x%x,%u", op, ix); } else { const char *s_al, *s_op; s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; - col += qemu_log(",%s%s,%u", s_al, s_op, ix); + col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix); } i = 1; } @@ -2004,9 +1959,9 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) name = bswap_flag_name[flags]; } if (name) { - col += qemu_log(",%s", name); + col += ne_fprintf(f, ",%s", name); } else { - col += qemu_log(",$0x%" TCG_PRIlx, flags); + col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags); } i = k = 1; } @@ -2021,49 +1976,42 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: case INDEX_op_brcond2_i32: - col += qemu_log("%s$L%d", k ? "," : "", - arg_label(op->args[k])->id); + col += ne_fprintf(f, "%s$L%d", k ? "," : "", + arg_label(op->args[k])->id); i++, k++; break; default: break; } for (; i < nb_cargs; i++, k++) { - col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]); + col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "", + op->args[k]); } } if (have_prefs || op->life) { - - QemuLogFile *logfile; - - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - for (; col < 40; ++col) { - putc(' ', logfile->fd); - } + for (; col < 40; ++col) { + putc(' ', f); } - rcu_read_unlock(); } if (op->life) { unsigned life = op->life; if (life & (SYNC_ARG * 3)) { - qemu_log(" sync:"); + ne_fprintf(f, " sync:"); for (i = 0; i < 2; ++i) { if (life & (SYNC_ARG << i)) { - qemu_log(" %d", i); + ne_fprintf(f, " %d", i); } } } life /= DEAD_ARG; if (life) { - qemu_log(" dead:"); + ne_fprintf(f, " dead:"); for (i = 0; life; ++i, life >>= 1) { if (life & 1) { - qemu_log(" %d", i); + ne_fprintf(f, " %d", i); } } } @@ -2074,28 +2022,28 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) TCGRegSet set = op->output_pref[i]; if (i == 0) { - qemu_log(" pref="); + ne_fprintf(f, " pref="); } else { - qemu_log(","); + ne_fprintf(f, ","); } if (set == 0) { - qemu_log("none"); + ne_fprintf(f, "none"); } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) { - qemu_log("all"); + ne_fprintf(f, "all"); #ifdef CONFIG_DEBUG_TCG } else if (tcg_regset_single(set)) { TCGReg reg = tcg_regset_first(set); - qemu_log("%s", tcg_target_reg_names[reg]); + ne_fprintf(f, "%s", tcg_target_reg_names[reg]); #endif } else if (TCG_TARGET_NB_REGS <= 32) { - qemu_log("%#x", (uint32_t)set); + ne_fprintf(f, "0x%x", (uint32_t)set); } else { - qemu_log("%#" PRIx64, (uint64_t)set); + ne_fprintf(f, "0x%" PRIx64, (uint64_t)set); } } } - qemu_log("\n"); + putc('\n', f); } } @@ -2441,6 +2389,7 @@ static void la_bb_end(TCGContext *s, int ng, int nt) state = TS_DEAD | TS_MEM; break; case TEMP_NORMAL: + case TEMP_EBB: case TEMP_CONST: state = TS_DEAD; break; @@ -2468,8 +2417,9 @@ static void la_global_sync(TCGContext *s, int ng) } /* - * liveness analysis: conditional branch: all temps are dead, - * globals and local temps should be synced. + * liveness analysis: conditional branch: all temps are dead unless + * explicitly live-across-conditional-branch, globals and local temps + * should be synced. */ static void la_bb_sync(TCGContext *s, int ng, int nt) { @@ -2490,6 +2440,7 @@ static void la_bb_sync(TCGContext *s, int ng, int nt) case TEMP_NORMAL: s->temps[i].state = TS_DEAD; break; + case TEMP_EBB: case TEMP_CONST: continue; default: @@ -2860,6 +2811,7 @@ static bool liveness_pass_2(TCGContext *s) TCGTemp *dts = tcg_temp_alloc(s); dts->type = its->type; dts->base_type = its->base_type; + dts->kind = TEMP_EBB; its->state_ptr = dts; } else { its->state_ptr = NULL; @@ -3133,7 +3085,13 @@ static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) g_assert_not_reached(); } - assert(align <= TCG_TARGET_STACK_ALIGN); + /* + * Assume the stack is sufficiently aligned. + * This affects e.g. ARM NEON, where we have 8 byte stack alignment + * and do not require 16 byte vector alignment. This seems slightly + * easier than fully parameterizing the above switch statement. + */ + align = MIN(TCG_TARGET_STACK_ALIGN, align); off = ROUND_UP(s->current_frame_offset, align); /* If we've exhausted the stack frame, restart with a smaller TB. */ @@ -3166,6 +3124,7 @@ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) new_type = TEMP_VAL_MEM; break; case TEMP_NORMAL: + case TEMP_EBB: new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; break; case TEMP_CONST: @@ -3414,6 +3373,7 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) temp_save(s, ts, allocated_regs); break; case TEMP_NORMAL: + case TEMP_EBB: /* The liveness analysis already ensures that temps are dead. Keep an tcg_debug_assert for safety. */ tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); @@ -3431,8 +3391,9 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) } /* - * At a conditional branch, we assume all temporaries are dead and - * all globals and local temps are synced to their location. + * At a conditional branch, we assume all temporaries are dead unless + * explicitly live-across-conditional-branch; all globals and local + * temps are synced to their location. */ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) { @@ -3451,6 +3412,7 @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) case TEMP_NORMAL: tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); break; + case TEMP_EBB: case TEMP_CONST: break; default: @@ -3659,7 +3621,7 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) /* fall through */ case TEMP_VAL_MEM: -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8; endian_fixup -= 1 << vece; #else @@ -3940,7 +3902,7 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op) if (!itsh->mem_coherent) { temp_sync(s, itsh, s->reserved_regs, 0, 0); } -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN TCGTemp *its = itsh; #else TCGTemp *its = itsl; @@ -4190,15 +4152,15 @@ static void tcg_profile_snapshot_table(TCGProfile *prof) tcg_profile_snapshot(prof, false, true); } -void tcg_dump_op_count(void) +void tcg_dump_op_count(GString *buf) { TCGProfile prof = {}; int i; tcg_profile_snapshot_table(&prof); for (i = 0; i < NB_OPS; i++) { - qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name, - prof.table_op_count[i]); + g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name, + prof.table_op_count[i]); } } @@ -4217,9 +4179,9 @@ int64_t tcg_cpu_exec_time(void) return ret; } #else -void tcg_dump_op_count(void) +void tcg_dump_op_count(GString *buf) { - qemu_printf("[TCG profiler not compiled]\n"); + g_string_append_printf(buf, "[TCG profiler not compiled]\n"); } int64_t tcg_cpu_exec_time(void) @@ -4230,7 +4192,7 @@ int64_t tcg_cpu_exec_time(void) #endif -int tcg_gen_code(TCGContext *s, TranslationBlock *tb) +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) { #ifdef CONFIG_PROFILER TCGProfile *prof = &s->prof; @@ -4260,12 +4222,14 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) - && qemu_log_in_addr_range(tb->pc))) { - FILE *logfile = qemu_log_lock(); - qemu_log("OP:\n"); - tcg_dump_ops(s, false); - qemu_log("\n"); - qemu_log_unlock(logfile); + && qemu_log_in_addr_range(pc_start))) { + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "OP:\n"); + tcg_dump_ops(s, logfile, false); + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); + } } #endif @@ -4305,12 +4269,14 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) if (s->nb_indirects > 0) { #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND) - && qemu_log_in_addr_range(tb->pc))) { - FILE *logfile = qemu_log_lock(); - qemu_log("OP before indirect lowering:\n"); - tcg_dump_ops(s, false); - qemu_log("\n"); - qemu_log_unlock(logfile); + && qemu_log_in_addr_range(pc_start))) { + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "OP before indirect lowering:\n"); + tcg_dump_ops(s, logfile, false); + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); + } } #endif /* Replace indirect temps with direct temps. */ @@ -4326,15 +4292,29 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) - && qemu_log_in_addr_range(tb->pc))) { - FILE *logfile = qemu_log_lock(); - qemu_log("OP after optimization and liveness analysis:\n"); - tcg_dump_ops(s, true); - qemu_log("\n"); - qemu_log_unlock(logfile); + && qemu_log_in_addr_range(pc_start))) { + FILE *logfile = qemu_log_trylock(); + if (logfile) { + fprintf(logfile, "OP after optimization and liveness analysis:\n"); + tcg_dump_ops(s, logfile, true); + fprintf(logfile, "\n"); + qemu_log_unlock(logfile); + } } #endif + /* Initialize goto_tb jump offsets. */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } + tcg_reg_alloc_start(s); /* @@ -4457,7 +4437,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) } #ifdef CONFIG_PROFILER -void tcg_dump_info(void) +void tcg_dump_info(GString *buf) { TCGProfile prof = {}; const TCGProfile *s; @@ -4471,53 +4451,59 @@ void tcg_dump_info(void) tb_div_count = tb_count ? tb_count : 1; tot = s->interm_time + s->code_time; - qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", - tot, tot / 2.4e9); - qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64 - " %0.1f%%)\n", - tb_count, s->tb_count1 - tb_count, - (double)(s->tb_count1 - s->tb_count) - / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); - qemu_printf("avg ops/TB %0.1f max=%d\n", - (double)s->op_count / tb_div_count, s->op_count_max); - qemu_printf("deleted ops/TB %0.2f\n", - (double)s->del_op_count / tb_div_count); - qemu_printf("avg temps/TB %0.2f max=%d\n", - (double)s->temp_count / tb_div_count, s->temp_count_max); - qemu_printf("avg host code/TB %0.1f\n", - (double)s->code_out_len / tb_div_count); - qemu_printf("avg search data/TB %0.1f\n", - (double)s->search_out_len / tb_div_count); + g_string_append_printf(buf, "JIT cycles %" PRId64 + " (%0.3f s at 2.4 GHz)\n", + tot, tot / 2.4e9); + g_string_append_printf(buf, "translated TBs %" PRId64 + " (aborted=%" PRId64 " %0.1f%%)\n", + tb_count, s->tb_count1 - tb_count, + (double)(s->tb_count1 - s->tb_count) + / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); + g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n", + (double)s->op_count / tb_div_count, s->op_count_max); + g_string_append_printf(buf, "deleted ops/TB %0.2f\n", + (double)s->del_op_count / tb_div_count); + g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n", + (double)s->temp_count / tb_div_count, + s->temp_count_max); + g_string_append_printf(buf, "avg host code/TB %0.1f\n", + (double)s->code_out_len / tb_div_count); + g_string_append_printf(buf, "avg search data/TB %0.1f\n", + (double)s->search_out_len / tb_div_count); - qemu_printf("cycles/op %0.1f\n", - s->op_count ? (double)tot / s->op_count : 0); - qemu_printf("cycles/in byte %0.1f\n", - s->code_in_len ? (double)tot / s->code_in_len : 0); - qemu_printf("cycles/out byte %0.1f\n", - s->code_out_len ? (double)tot / s->code_out_len : 0); - qemu_printf("cycles/search byte %0.1f\n", - s->search_out_len ? (double)tot / s->search_out_len : 0); + g_string_append_printf(buf, "cycles/op %0.1f\n", + s->op_count ? (double)tot / s->op_count : 0); + g_string_append_printf(buf, "cycles/in byte %0.1f\n", + s->code_in_len ? (double)tot / s->code_in_len : 0); + g_string_append_printf(buf, "cycles/out byte %0.1f\n", + s->code_out_len ? (double)tot / s->code_out_len : 0); + g_string_append_printf(buf, "cycles/search byte %0.1f\n", + s->search_out_len ? + (double)tot / s->search_out_len : 0); if (tot == 0) { tot = 1; } - qemu_printf(" gen_interm time %0.1f%%\n", - (double)s->interm_time / tot * 100.0); - qemu_printf(" gen_code time %0.1f%%\n", - (double)s->code_time / tot * 100.0); - qemu_printf("optim./code time %0.1f%%\n", - (double)s->opt_time / (s->code_time ? s->code_time : 1) - * 100.0); - qemu_printf("liveness/code time %0.1f%%\n", - (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); - qemu_printf("cpu_restore count %" PRId64 "\n", - s->restore_count); - qemu_printf(" avg cycles %0.1f\n", - s->restore_count ? (double)s->restore_time / s->restore_count : 0); + g_string_append_printf(buf, " gen_interm time %0.1f%%\n", + (double)s->interm_time / tot * 100.0); + g_string_append_printf(buf, " gen_code time %0.1f%%\n", + (double)s->code_time / tot * 100.0); + g_string_append_printf(buf, "optim./code time %0.1f%%\n", + (double)s->opt_time / (s->code_time ? + s->code_time : 1) + * 100.0); + g_string_append_printf(buf, "liveness/code time %0.1f%%\n", + (double)s->la_time / (s->code_time ? + s->code_time : 1) * 100.0); + g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n", + s->restore_count); + g_string_append_printf(buf, " avg cycles %0.1f\n", + s->restore_count ? + (double)s->restore_time / s->restore_count : 0); } #else -void tcg_dump_info(void) +void tcg_dump_info(GString *buf) { - qemu_printf("[TCG profiler not compiled]\n"); + g_string_append_printf(buf, "[TCG profiler not compiled]\n"); } #endif @@ -4759,7 +4745,8 @@ static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size, /* Enable this block to be able to debug the ELF image file creation. One can use readelf, objdump, or other inspection utilities. */ { - FILE *f = fopen("/tmp/qemu.jit", "w+b"); + g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir()); + FILE *f = fopen(jit, "w+b"); if (f) { if (fwrite(img, img_size, 1, f) != img_size) { /* Avoid stupid unused return value warning for fwrite. */ diff --git a/tcg/tci.c b/tcg/tci.c index b672c7cae5..bdfac83492 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -18,10 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ #include "exec/cpu_ldst.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-ldst.h" #include "qemu/compiler.h" #include @@ -61,7 +61,7 @@ static uint64_t tci_uint64(uint32_t high, uint32_t low) * i = immediate (uint32_t) * I = immediate (tcg_target_ulong) * l = label or pointer - * m = immediate (TCGMemOpIdx) + * m = immediate (MemOpIdx) * n = immediate (call return length) * r = register * s = signed ldst offset @@ -105,7 +105,7 @@ static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) } static void tci_args_rrm(uint32_t insn, TCGReg *r0, - TCGReg *r1, TCGMemOpIdx *m2) + TCGReg *r1, MemOpIdx *m2) { *r0 = extract32(insn, 8, 4); *r1 = extract32(insn, 12, 4); @@ -145,7 +145,7 @@ static void tci_args_rrrc(uint32_t insn, } static void tci_args_rrrm(uint32_t insn, - TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) + TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) { *r0 = extract32(insn, 8, 4); *r1 = extract32(insn, 12, 4); @@ -289,13 +289,13 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) } static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, - TCGMemOpIdx oi, const void *tb_ptr) + MemOpIdx oi, const void *tb_ptr) { - MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); + MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; #ifdef CONFIG_SOFTMMU - switch (mop) { + switch (mop & (MO_BSWAP | MO_SSIZE)) { case MO_UB: return helper_ret_ldub_mmu(env, taddr, oi, ra); case MO_SB: @@ -308,7 +308,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, return helper_le_ldul_mmu(env, taddr, oi, ra); case MO_LESL: return helper_le_ldsl_mmu(env, taddr, oi, ra); - case MO_LEQ: + case MO_LEUQ: return helper_le_ldq_mmu(env, taddr, oi, ra); case MO_BEUW: return helper_be_lduw_mmu(env, taddr, oi, ra); @@ -318,17 +318,21 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, return helper_be_ldul_mmu(env, taddr, oi, ra); case MO_BESL: return helper_be_ldsl_mmu(env, taddr, oi, ra); - case MO_BEQ: + case MO_BEUQ: return helper_be_ldq_mmu(env, taddr, oi, ra); default: g_assert_not_reached(); } #else void *haddr = g2h(env_cpu(env), taddr); + unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; uint64_t ret; set_helper_retaddr(ra); - switch (mop) { + if (taddr & a_mask) { + helper_unaligned_ld(env, taddr); + } + switch (mop & (MO_BSWAP | MO_SSIZE)) { case MO_UB: ret = ldub_p(haddr); break; @@ -347,7 +351,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, case MO_LESL: ret = (int32_t)ldl_le_p(haddr); break; - case MO_LEQ: + case MO_LEUQ: ret = ldq_le_p(haddr); break; case MO_BEUW: @@ -362,7 +366,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, case MO_BESL: ret = (int32_t)ldl_be_p(haddr); break; - case MO_BEQ: + case MO_BEUQ: ret = ldq_be_p(haddr); break; default: @@ -374,13 +378,13 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, } static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, - TCGMemOpIdx oi, const void *tb_ptr) + MemOpIdx oi, const void *tb_ptr) { - MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); + MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; #ifdef CONFIG_SOFTMMU - switch (mop) { + switch (mop & (MO_BSWAP | MO_SIZE)) { case MO_UB: helper_ret_stb_mmu(env, taddr, val, oi, ra); break; @@ -390,7 +394,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, case MO_LEUL: helper_le_stl_mmu(env, taddr, val, oi, ra); break; - case MO_LEQ: + case MO_LEUQ: helper_le_stq_mmu(env, taddr, val, oi, ra); break; case MO_BEUW: @@ -399,7 +403,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, case MO_BEUL: helper_be_stl_mmu(env, taddr, val, oi, ra); break; - case MO_BEQ: + case MO_BEUQ: helper_be_stq_mmu(env, taddr, val, oi, ra); break; default: @@ -407,9 +411,13 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, } #else void *haddr = g2h(env_cpu(env), taddr); + unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; set_helper_retaddr(ra); - switch (mop) { + if (taddr & a_mask) { + helper_unaligned_st(env, taddr); + } + switch (mop & (MO_BSWAP | MO_SIZE)) { case MO_UB: stb_p(haddr, val); break; @@ -419,7 +427,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, case MO_LEUL: stl_le_p(haddr, val); break; - case MO_LEQ: + case MO_LEUQ: stq_le_p(haddr, val); break; case MO_BEUW: @@ -428,7 +436,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, case MO_BEUL: stl_be_p(haddr, val); break; - case MO_BEQ: + case MO_BEUQ: stq_be_p(haddr, val); break; default: @@ -482,7 +490,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, uint32_t tmp32; uint64_t tmp64; uint64_t T1, T2; - TCGMemOpIdx oi; + MemOpIdx oi; int32_t ofs; void *ptr; @@ -1148,7 +1156,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) tcg_target_ulong i1; int32_t s2; TCGCond c; - TCGMemOpIdx oi; + MemOpIdx oi; uint8_t pos, len; void *ptr; diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 0cb16aaa81..f3d7441e06 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -197,7 +197,7 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R0, }; -#if MAX_OPC_PARAM_IARGS != 6 +#if MAX_OPC_PARAM_IARGS != 7 # error Fix needed, number of supported input arguments changed! #endif @@ -790,14 +790,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, intptr_t offset) { - stack_bounds_check(base, offset); switch (type) { case TCG_TYPE_I32: - tcg_out_op_rrs(s, INDEX_op_st_i32, val, base, offset); + tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); break; #if TCG_TARGET_REG_BITS == 64 case TCG_TYPE_I64: - tcg_out_op_rrs(s, INDEX_op_st_i64, val, base, offset); + tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); break; #endif default: @@ -824,13 +823,6 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) static void tcg_target_init(TCGContext *s) { -#if defined(CONFIG_DEBUG_TCG_INTERPRETER) - const char *envval = getenv("DEBUG_TCG"); - if (envval) { - qemu_set_log(strtol(envval, NULL, 0)); - } -#endif - /* The current code uses uint8_t for tcg operations. */ tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 033e613f24..ceb36c4f7a 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -53,11 +53,6 @@ # error Unknown pointer size for tci target #endif -#ifdef CONFIG_DEBUG_TCG -/* Enable debug output. */ -#define CONFIG_DEBUG_TCG_INTERPRETER -#endif - /* Optional instructions. */ #define TCG_TARGET_HAS_bswap16_i32 1 diff --git a/tests/Makefile.include b/tests/Makefile.include index 6e16c05f10..9422ddaece 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -3,28 +3,28 @@ .PHONY: check-help check-help: @echo "Regression testing targets:" - @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests" - @echo " $(MAKE) bench Run speed tests" + @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests" + @echo " $(MAKE) bench Run speed tests" @echo @echo "Individual test suites:" - @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target" - @echo " $(MAKE) check-qtest Run qtest tests" - @echo " $(MAKE) check-unit Run qobject tests" - @echo " $(MAKE) check-qapi-schema Run QAPI schema tests" - @echo " $(MAKE) check-block Run block tests" + @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target" + @echo " $(MAKE) check-qtest Run qtest tests" + @echo " $(MAKE) check-unit Run qobject tests" + @echo " $(MAKE) check-qapi-schema Run QAPI schema tests" + @echo " $(MAKE) check-block Run block tests" ifneq ($(filter $(all-check-targets), check-softfloat),) - @echo " $(MAKE) check-tcg Run TCG tests" - @echo " $(MAKE) check-softfloat Run FPU emulation tests" + @echo " $(MAKE) check-tcg Run TCG tests" + @echo " $(MAKE) check-softfloat Run FPU emulation tests" endif - @echo " $(MAKE) check-acceptance Run all acceptance (functional) tests" + @echo " $(MAKE) check-avocado Run avocado (integration) tests for currently configured targets" @echo - @echo " $(MAKE) check-report.tap Generates an aggregated TAP test report" - @echo " $(MAKE) check-venv Creates a Python venv for tests" - @echo " $(MAKE) check-clean Clean the tests and related data" + @echo " $(MAKE) check-report.junit.xml Generates an aggregated XML test report" + @echo " $(MAKE) check-venv Creates a Python venv for tests" + @echo " $(MAKE) check-clean Clean the tests and related data" @echo @echo "The following are useful for CI builds" - @echo " $(MAKE) check-build Build most test binaris" - @echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)" + @echo " $(MAKE) check-build Build most test binaries" + @echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)" @echo @echo @echo "The variable SPEED can be set to control the gtester speed setting." @@ -34,73 +34,87 @@ endif ifneq ($(wildcard config-host.mak),) export SRC_PATH -# Get the list of all supported sysemu targets -SYSEMU_TARGET_LIST := $(subst -softmmu.mak,,$(notdir \ - $(wildcard $(SRC_PATH)/configs/*-softmmu.mak))) - SPEED = quick -# Build up our target list from the filtered list of ninja targets -TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets))) - # Per guest TCG tests -BUILD_TCG_TARGET_RULES=$(patsubst %,build-tcg-tests-%, $(TARGETS)) -CLEAN_TCG_TARGET_RULES=$(patsubst %,clean-tcg-tests-%, $(TARGETS)) -RUN_TCG_TARGET_RULES=$(patsubst %,run-tcg-tests-%, $(TARGETS)) +BUILD_TCG_TARGET_RULES=$(patsubst %,build-tcg-tests-%, $(TCG_TESTS_TARGETS)) +CLEAN_TCG_TARGET_RULES=$(patsubst %,clean-tcg-tests-%, $(TCG_TESTS_TARGETS)) +DISTCLEAN_TCG_TARGET_RULES=$(patsubst %,distclean-tcg-tests-%, $(TCG_TESTS_TARGETS)) +RUN_TCG_TARGET_RULES=$(patsubst %,run-tcg-tests-%, $(TCG_TESTS_TARGETS)) -# Probe for the Docker Builds needed for each build -$(foreach PROBE_TARGET,$(TARGET_DIRS), \ - $(eval -include $(SRC_PATH)/tests/tcg/Makefile.prereqs)) +$(foreach TARGET,$(TCG_TESTS_TARGETS), \ + $(eval $(BUILD_DIR)/tests/tcg/config-$(TARGET).mak: config-host.mak)) -$(BUILD_TCG_TARGET_RULES): build-tcg-tests-%: $(if $(CONFIG_PLUGIN),test-plugins) - $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \ - -f $(SRC_PATH)/tests/tcg/Makefile.qemu \ - SRC_PATH=$(SRC_PATH) \ - V="$(V)" TARGET="$*" guest-tests, \ - "BUILD", "TCG tests for $*") +.PHONY: $(TCG_TESTS_TARGETS:%=build-tcg-tests-%) +$(TCG_TESTS_TARGETS:%=build-tcg-tests-%): build-tcg-tests-%: $(BUILD_DIR)/tests/tcg/config-%.mak + $(call quiet-command, \ + $(MAKE) -C tests/tcg/$* $(SUBDIR_MAKEFLAGS), \ + "BUILD","$* guest-tests") -$(RUN_TCG_TARGET_RULES): run-tcg-tests-%: build-tcg-tests-% all - $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \ - -f $(SRC_PATH)/tests/tcg/Makefile.qemu \ - SRC_PATH=$(SRC_PATH) SPEED="$(SPEED)" \ - V="$(V)" TARGET="$*" run-guest-tests, \ - "RUN", "TCG tests for $*") +.PHONY: $(TCG_TESTS_TARGETS:%=run-tcg-tests-%) +$(TCG_TESTS_TARGETS:%=run-tcg-tests-%): run-tcg-tests-%: build-tcg-tests-% + $(call quiet-command, \ + $(MAKE) -C tests/tcg/$* $(SUBDIR_MAKEFLAGS) SPEED=$(SPEED) run, \ + "RUN", "$* guest-tests") -$(CLEAN_TCG_TARGET_RULES): clean-tcg-tests-%: - $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \ - -f $(SRC_PATH)/tests/tcg/Makefile.qemu \ - SRC_PATH=$(SRC_PATH) TARGET="$*" clean-guest-tests, \ - "CLEAN", "TCG tests for $*") +.PHONY: $(TCG_TESTS_TARGETS:%=clean-tcg-tests-%) +$(TCG_TESTS_TARGETS:%=clean-tcg-tests-%): clean-tcg-tests-%: + $(call quiet-command, \ + $(MAKE) -C tests/tcg/$* $(SUBDIR_MAKEFLAGS) clean, \ + "CLEAN", "$* guest-tests") + +.PHONY: $(TCG_TESTS_TARGETS:%=distclean-tcg-tests-%) +$(TCG_TESTS_TARGETS:%=distclean-tcg-tests-%): distclean-tcg-tests-%: + $(call quiet-command, \ + $(MAKE) -C tests/tcg/$* $(SUBDIR_MAKEFLAGS) distclean, \ + "CLEAN", "$* guest-tests") .PHONY: build-tcg build-tcg: $(BUILD_TCG_TARGET_RULES) .PHONY: check-tcg +.ninja-goals.check-tcg = all $(if $(CONFIG_PLUGIN),test-plugins) check-tcg: $(RUN_TCG_TARGET_RULES) .PHONY: clean-tcg clean-tcg: $(CLEAN_TCG_TARGET_RULES) +.PHONY: distclean-tcg +distclean-tcg: $(DISTCLEAN_TCG_TARGET_RULES) + # Python venv for running tests -.PHONY: check-venv check-acceptance +.PHONY: check-venv check-avocado check-acceptance check-acceptance-deprecated-warning + +# Build up our target list from the filtered list of ninja targets +TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets))) TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results +TESTS_PYTHON=$(TESTS_VENV_DIR)/bin/python3 +ifndef AVOCADO_TESTS + AVOCADO_TESTS=tests/avocado +endif # Controls the output generated by Avocado when running tests. # Any number of command separated loggers are accepted. For more # information please refer to "avocado --help". AVOCADO_SHOW=app -AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGETS))) +ifndef AVOCADO_TAGS + AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t arch:%, \ + $(filter %-softmmu,$(TARGETS))) +else + AVOCADO_CMDLINE_TAGS=$(addprefix -t , $(AVOCADO_TAGS)) +endif + +quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \ + $(TESTS_PYTHON) -m pip -q --disable-pip-version-check $1, \ + "VENVPIP","$1") $(TESTS_VENV_DIR): $(TESTS_VENV_REQ) - $(call quiet-command, \ - $(PYTHON) -m venv $@, \ - VENV, $@) - $(call quiet-command, \ - $(TESTS_VENV_DIR)/bin/python -m pip -q install -r $(TESTS_VENV_REQ), \ - PIP, $(TESTS_VENV_REQ)) + $(call quiet-command, $(PYTHON) -m venv $@, VENV, $@) + $(call quiet-venv-pip,install -e "$(SRC_PATH)/python/") + $(call quiet-venv-pip,install -r $(TESTS_VENV_REQ)) $(call quiet-command, touch $@) $(TESTS_RESULTS_DIR): @@ -117,42 +131,41 @@ FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES)) # download one specific Fedora 31 image get-vm-image-fedora-31-%: check-venv $(call quiet-command, \ - $(TESTS_VENV_DIR)/bin/python -m avocado vmimage get \ + $(TESTS_PYTHON) -m avocado vmimage get \ --distro=fedora --distro-version=31 --arch=$*, \ - "AVOCADO", "Downloading acceptance tests VM image for $*") + "AVOCADO", "Downloading avocado tests VM image for $*") # download all vm images, according to defined targets get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD)) -check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images +check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images $(call quiet-command, \ - $(TESTS_VENV_DIR)/bin/python -m avocado \ + $(TESTS_PYTHON) -m avocado \ --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \ - --filter-by-tags-include-empty --filter-by-tags-include-empty-key \ - $(AVOCADO_TAGS) \ - $(if $(GITLAB_CI),,--failfast) tests/acceptance, \ - "AVOCADO", "tests/acceptance") + $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \ + --filter-by-tags-include-empty-key) \ + $(AVOCADO_CMDLINE_TAGS) \ + $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \ + "AVOCADO", "tests/avocado") + +check-acceptance-deprecated-warning: + @echo + @echo "Note '$(MAKE) check-acceptance' is deprecated, use '$(MAKE) check-avocado' instead." + @echo + +check-acceptance: check-acceptance-deprecated-warning | check-avocado # Consolidated targets -.PHONY: check-block check check-clean get-vm-images +.PHONY: check check-clean get-vm-images check: -ifeq ($(CONFIG_TOOLS)$(CONFIG_POSIX),yy) -QEMU_IOTESTS_HELPERS-$(CONFIG_LINUX) = tests/qemu-iotests/socket_scm_helper$(EXESUF) -check: check-block -export PYTHON -check-block: $(SRC_PATH)/tests/check-block.sh qemu-img$(EXESUF) \ - qemu-io$(EXESUF) qemu-nbd$(EXESUF) $(QEMU_IOTESTS_HELPERS-y) \ - $(filter qemu-system-%, $(ninja-targets)) - @$< -endif - -check-build: $(QEMU_IOTESTS_HELPERS-y) +check-build: run-ninja check-clean: rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR) -clean: check-clean +clean: check-clean clean-tcg +distclean: distclean-tcg endif diff --git a/tests/acceptance/README.rst b/tests/acceptance/README.rst deleted file mode 100644 index 89260faed6..0000000000 --- a/tests/acceptance/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -============================================ -Acceptance tests using the Avocado Framework -============================================ - -This directory contains functional tests, also known as acceptance -level tests. They're usually higher level, and may interact with -external resources and with various guest operating systems. - -For more information, please refer to ``docs/devel/testing.rst``, -section "Acceptance tests using the Avocado Framework". diff --git a/tests/acceptance/machine_ppc.py b/tests/acceptance/machine_ppc.py deleted file mode 100644 index a836e2496f..0000000000 --- a/tests/acceptance/machine_ppc.py +++ /dev/null @@ -1,69 +0,0 @@ -# Test that Linux kernel boots on ppc machines and check the console -# -# Copyright (c) 2018, 2020 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import Test -from avocado_qemu import wait_for_console_pattern - -class PpcMachine(Test): - - timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - panic_message = 'Kernel panic - not syncing' - - def test_ppc64_pseries(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/ppc64le/os' - '/ppc/ppc64/vmlinuz') - kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - wait_for_console_pattern(self, console_pattern, self.panic_message) - - def test_ppc_mpc8544ds(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:mpc8544ds - """ - tar_url = ('https://www.qemu-advent-calendar.org' - '/2020/download/day17.tar.gz') - tar_hash = '7a5239542a7c4257aa4d3b7f6ddf08fb6775c494' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin') - self.vm.launch() - wait_for_console_pattern(self, 'QEMU advent calendar 2020', - self.panic_message) - - def test_ppc_virtex_ml507(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:virtex-ml507 - """ - tar_url = ('https://www.qemu-advent-calendar.org' - '/2020/download/hippo.tar.gz') - tar_hash = '306b95bfe7d147f125aa176a877e266db8ef914a' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux', - '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb', - '-m', '512') - self.vm.launch() - wait_for_console_pattern(self, 'QEMU advent calendar 2020', - self.panic_message) diff --git a/tests/avocado/README.rst b/tests/avocado/README.rst new file mode 100644 index 0000000000..94488371bb --- /dev/null +++ b/tests/avocado/README.rst @@ -0,0 +1,10 @@ +============================================= +Integration tests using the Avocado Framework +============================================= + +This directory contains integration tests. They're usually higher +level, and may interact with external resources and with various +guest operating systems. + +For more information, please refer to ``docs/devel/testing.rst``, +section "Integration tests using the Avocado Framework". diff --git a/tests/avocado/acpi-bits.py b/tests/avocado/acpi-bits.py new file mode 100644 index 0000000000..898c837f26 --- /dev/null +++ b/tests/avocado/acpi-bits.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 +# group: rw quick +# Exercize QEMU generated ACPI/SMBIOS tables using biosbits, +# https://biosbits.org/ +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# Author: +# Ani Sinha + +# pylint: disable=invalid-name +# pylint: disable=consider-using-f-string + +""" +This is QEMU ACPI/SMBIOS avocado tests using biosbits. +Biosbits is available originally at https://biosbits.org/. +This test uses a fork of the upstream bits and has numerous fixes +including an upgraded acpica. The fork is located here: +https://gitlab.com/qemu-project/biosbits-bits . +""" + +import logging +import os +import platform +import re +import shutil +import subprocess +import tarfile +import tempfile +import time +import zipfile +from typing import ( + List, + Optional, + Sequence, +) +from qemu.machine import QEMUMachine +from avocado import skipIf +from avocado_qemu import QemuBaseTest + +deps = ["xorriso"] # dependent tools needed in the test setup/box. +supported_platforms = ['x86_64'] # supported test platforms. + + +def which(tool): + """ looks up the full path for @tool, returns None if not found + or if @tool does not have executable permissions. + """ + paths=os.getenv('PATH') + for p in paths.split(os.path.pathsep): + p = os.path.join(p, tool) + if os.path.exists(p) and os.access(p, os.X_OK): + return p + return None + +def missing_deps(): + """ returns True if any of the test dependent tools are absent. + """ + for dep in deps: + if which(dep) is None: + return True + return False + +def supported_platform(): + """ checks if the test is running on a supported platform. + """ + return platform.machine() in supported_platforms + +class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods + """ + A QEMU VM, with isa-debugcon enabled and bits iso passed + using -cdrom to QEMU commandline. + + """ + def __init__(self, + binary: str, + args: Sequence[str] = (), + wrapper: Sequence[str] = (), + name: Optional[str] = None, + base_temp_dir: str = "/var/tmp", + debugcon_log: str = "debugcon-log.txt", + debugcon_addr: str = "0x403", + sock_dir: Optional[str] = None, + qmp_timer: Optional[float] = None): + # pylint: disable=too-many-arguments + + if name is None: + name = "qemu-bits-%d" % os.getpid() + if sock_dir is None: + sock_dir = base_temp_dir + super().__init__(binary, args, wrapper=wrapper, name=name, + base_temp_dir=base_temp_dir, + sock_dir=sock_dir, qmp_timer=qmp_timer) + self.debugcon_log = debugcon_log + self.debugcon_addr = debugcon_addr + self.base_temp_dir = base_temp_dir + + @property + def _base_args(self) -> List[str]: + args = super()._base_args + args.extend([ + '-chardev', + 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir, + self.debugcon_log), + '-device', + 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr, + ]) + return args + + def base_args(self): + """return the base argument to QEMU binary""" + return self._base_args + +@skipIf(not supported_platform() or missing_deps() or os.getenv('GITLAB_CI'), + 'incorrect platform or dependencies (%s) not installed ' \ + 'or running on GitLab' % ','.join(deps)) +class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes + """ + ACPI and SMBIOS tests using biosbits. + + :avocado: tags=arch:x86_64 + :avocado: tags=acpi + + """ + # in slower systems the test can take as long as 3 minutes to complete. + timeout = 200 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._vm = None + self._workDir = None + self._baseDir = None + + # following are some standard configuration constants + self._bitsInternalVer = 2020 + self._bitsCommitHash = 'b48b88ff' # commit hash must match + # the artifact tag below + self._bitsTag = "qemu-bits-10182022" # this is the latest bits + # release as of today. + self._bitsArtSHA1Hash = 'b04790ac9b99b5662d0416392c73b97580641fe5' + self._bitsArtURL = ("https://gitlab.com/qemu-project/" + "biosbits-bits/-/jobs/artifacts/%s/" + "download?job=qemu-bits-build" %self._bitsTag) + self._debugcon_addr = '0x403' + self._debugcon_log = 'debugcon-log.txt' + logging.basicConfig(level=logging.INFO) + self.logger = logging.getLogger('acpi-bits') + + def _print_log(self, log): + self.logger.info('\nlogs from biosbits follows:') + self.logger.info('==========================================\n') + self.logger.info(log) + self.logger.info('==========================================\n') + + def copy_bits_config(self): + """ copies the bios bits config file into bits. + """ + config_file = 'bits-cfg.txt' + bits_config_dir = os.path.join(self._baseDir, 'acpi-bits', + 'bits-config') + target_config_dir = os.path.join(self._workDir, + 'bits-%d' %self._bitsInternalVer, + 'boot') + self.assertTrue(os.path.exists(bits_config_dir)) + self.assertTrue(os.path.exists(target_config_dir)) + self.assertTrue(os.access(os.path.join(bits_config_dir, + config_file), os.R_OK)) + shutil.copy2(os.path.join(bits_config_dir, config_file), + target_config_dir) + self.logger.info('copied config file %s to %s', + config_file, target_config_dir) + + def copy_test_scripts(self): + """copies the python test scripts into bits. """ + + bits_test_dir = os.path.join(self._baseDir, 'acpi-bits', + 'bits-tests') + target_test_dir = os.path.join(self._workDir, + 'bits-%d' %self._bitsInternalVer, + 'boot', 'python') + + self.assertTrue(os.path.exists(bits_test_dir)) + self.assertTrue(os.path.exists(target_test_dir)) + + for filename in os.listdir(bits_test_dir): + if os.path.isfile(os.path.join(bits_test_dir, filename)) and \ + filename.endswith('.py2'): + # all test scripts are named with extension .py2 so that + # avocado does not try to load them. These scripts are + # written for python 2.7 not python 3 and hence if avocado + # loaded them, it would complain about python 3 specific + # syntaxes. + newfilename = os.path.splitext(filename)[0] + '.py' + shutil.copy2(os.path.join(bits_test_dir, filename), + os.path.join(target_test_dir, newfilename)) + self.logger.info('copied test file %s to %s', + filename, target_test_dir) + + # now remove the pyc test file if it exists, otherwise the + # changes in the python test script won't be executed. + testfile_pyc = os.path.splitext(filename)[0] + '.pyc' + if os.access(os.path.join(target_test_dir, testfile_pyc), + os.F_OK): + os.remove(os.path.join(target_test_dir, testfile_pyc)) + self.logger.info('removed compiled file %s', + os.path.join(target_test_dir, + testfile_pyc)) + + def fix_mkrescue(self, mkrescue): + """ grub-mkrescue is a bash script with two variables, 'prefix' and + 'libdir'. They must be pointed to the right location so that the + iso can be generated appropriately. We point the two variables to + the directory where we have extracted our pre-built bits grub + tarball. + """ + grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi') + grub_i386_mods = os.path.join(self._workDir, 'grub-inst') + + self.assertTrue(os.path.exists(grub_x86_64_mods)) + self.assertTrue(os.path.exists(grub_i386_mods)) + + new_script = "" + with open(mkrescue, 'r', encoding='utf-8') as filehandle: + orig_script = filehandle.read() + new_script = re.sub('(^prefix=)(.*)', + r'\1"%s"' %grub_x86_64_mods, + orig_script, flags=re.M) + new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods, + new_script, flags=re.M) + + with open(mkrescue, 'w', encoding='utf-8') as filehandle: + filehandle.write(new_script) + + def generate_bits_iso(self): + """ Uses grub-mkrescue to generate a fresh bits iso with the python + test scripts + """ + bits_dir = os.path.join(self._workDir, + 'bits-%d' %self._bitsInternalVer) + iso_file = os.path.join(self._workDir, + 'bits-%d.iso' %self._bitsInternalVer) + mkrescue_script = os.path.join(self._workDir, + 'grub-inst-x86_64-efi', 'bin', + 'grub-mkrescue') + + self.assertTrue(os.access(mkrescue_script, + os.R_OK | os.W_OK | os.X_OK)) + + self.fix_mkrescue(mkrescue_script) + + self.logger.info('using grub-mkrescue for generating biosbits iso ...') + + try: + if os.getenv('V') or os.getenv('BITS_DEBUG'): + subprocess.check_call([mkrescue_script, '-o', iso_file, + bits_dir], stderr=subprocess.STDOUT) + else: + subprocess.check_call([mkrescue_script, '-o', + iso_file, bits_dir], + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL) + except Exception as e: # pylint: disable=broad-except + self.skipTest("Error while generating the bits iso. " + "Pass V=1 in the environment to get more details. " + + str(e)) + + self.assertTrue(os.access(iso_file, os.R_OK)) + + self.logger.info('iso file %s successfully generated.', iso_file) + + def setUp(self): # pylint: disable=arguments-differ + super().setUp('qemu-system-') + + self._baseDir = os.getenv('AVOCADO_TEST_BASEDIR') + + # workdir could also be avocado's own workdir in self.workdir. + # At present, I prefer to maintain my own temporary working + # directory. It gives us more control over the generated bits + # log files and also for debugging, we may chose not to remove + # this working directory so that the logs and iso can be + # inspected manually and archived if needed. + self._workDir = tempfile.mkdtemp(prefix='acpi-bits-', + suffix='.tmp') + self.logger.info('working dir: %s', self._workDir) + + prebuiltDir = os.path.join(self._workDir, 'prebuilt') + if not os.path.isdir(prebuiltDir): + os.mkdir(prebuiltDir, mode=0o775) + + bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip' + %(self._bitsInternalVer, + self._bitsCommitHash)) + grub_tar_file = os.path.join(prebuiltDir, + 'bits-%d-%s-grub.tar.gz' + %(self._bitsInternalVer, + self._bitsCommitHash)) + + bitsLocalArtLoc = self.fetch_asset(self._bitsArtURL, + asset_hash=self._bitsArtSHA1Hash) + self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc) + + # extract the bits artifact in the temp working directory + with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref: + zref.extractall(prebuiltDir) + + # extract the bits software in the temp working directory + with zipfile.ZipFile(bits_zip_file, 'r') as zref: + zref.extractall(self._workDir) + + with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball: + tarball.extractall(self._workDir) + + self.copy_test_scripts() + self.copy_bits_config() + self.generate_bits_iso() + + def parse_log(self): + """parse the log generated by running bits tests and + check for failures. + """ + debugconf = os.path.join(self._workDir, self._debugcon_log) + log = "" + with open(debugconf, 'r', encoding='utf-8') as filehandle: + log = filehandle.read() + + matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*', + log) + for match in matchiter: + # verify that no test cases failed. + try: + self.assertEqual(match.group(3).split()[0], '0', + 'Some bits tests seems to have failed. ' \ + 'Please check the test logs for more info.') + except AssertionError as e: + self._print_log(log) + raise e + else: + if os.getenv('V') or os.getenv('BITS_DEBUG'): + self._print_log(log) + + def tearDown(self): + """ + Lets do some cleanups. + """ + if self._vm: + self.assertFalse(not self._vm.is_running) + if not os.getenv('BITS_DEBUG'): + self.logger.info('removing the work directory %s', self._workDir) + shutil.rmtree(self._workDir) + else: + self.logger.info('not removing the work directory %s ' \ + 'as BITS_DEBUG is ' \ + 'passed in the environment', self._workDir) + super().tearDown() + + def test_acpi_smbios_bits(self): + """The main test case implementaion.""" + + iso_file = os.path.join(self._workDir, + 'bits-%d.iso' %self._bitsInternalVer) + + self.assertTrue(os.access(iso_file, os.R_OK)) + + self._vm = QEMUBitsMachine(binary=self.qemu_bin, + base_temp_dir=self._workDir, + debugcon_log=self._debugcon_log, + debugcon_addr=self._debugcon_addr) + + self._vm.add_args('-cdrom', '%s' %iso_file) + # the vm needs to be run under icount so that TCG emulation is + # consistent in terms of timing. smilatency tests have consistent + # timing requirements. + self._vm.add_args('-icount', 'auto') + + args = " ".join(str(arg) for arg in self._vm.base_args()) + \ + " " + " ".join(str(arg) for arg in self._vm.args) + + self.logger.info("launching QEMU vm with the following arguments: %s", + args) + + self._vm.launch() + # biosbits has been configured to run all the specified test suites + # in batch mode and then automatically initiate a vm shutdown. + # Rely on avocado's unit test timeout. + self._vm.wait(timeout=None) + self.parse_log() diff --git a/tests/avocado/acpi-bits/bits-config/bits-cfg.txt b/tests/avocado/acpi-bits/bits-config/bits-cfg.txt new file mode 100644 index 0000000000..8010804453 --- /dev/null +++ b/tests/avocado/acpi-bits/bits-config/bits-cfg.txt @@ -0,0 +1,18 @@ +# BITS configuration file +[bits] + +# To run BITS in batch mode, set batch to a list of one or more of the +# following keywords; BITS will then run all of the requested operations, then +# save the log file to disk. +# +# test: Run the full BITS testsuite. +# acpi: Dump all ACPI structures. +# smbios: Dump all SMBIOS structures. +# +# Leave batch set to an empty string to disable batch mode. +# batch = + +# Uncomment the following to run all available batch operations +# please take a look at boot/python/init.py in bits zip file +# to see how these options are parsed and used. +batch = test acpi smbios diff --git a/tests/avocado/acpi-bits/bits-tests/smbios.py2 b/tests/avocado/acpi-bits/bits-tests/smbios.py2 new file mode 100644 index 0000000000..9667d0542c --- /dev/null +++ b/tests/avocado/acpi-bits/bits-tests/smbios.py2 @@ -0,0 +1,2430 @@ +# Copyright (c) 2015, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""SMBIOS/DMI module.""" + +import bits +import bitfields +import ctypes +import redirect +import struct +import uuid +import unpack +import ttypager +import sys + +class SMBIOS(unpack.Struct): + def __new__(cls): + if sys.platform == "BITS-EFI": + import efi + sm_ptr = efi.system_table.ConfigurationTableDict.get(efi.SMBIOS_TABLE_GUID) + else: + address = 0xF0000 + mem = bits.memory(0xF0000, 0x10000) + for offset in range(0, len(mem), 16): + signature = (ctypes.c_char * 4).from_address(address + offset).value + if signature == "_SM_": + entry_point_length = ctypes.c_ubyte.from_address(address + offset + 5).value + csum = sum(map(ord, mem[offset:offset + entry_point_length])) & 0xff + if csum == 0: + sm_ptr = address + offset + break + else: + return None + + if not sm_ptr: + return None + + sm = super(SMBIOS, cls).__new__(cls) + sm._header_memory = bits.memory(sm_ptr, 0x1f) + return sm + + def __init__(self): + super(SMBIOS, self).__init__() + u = unpack.Unpackable(self._header_memory) + self.add_field('header', Header(u)) + self._structure_memory = bits.memory(self.header.structure_table_address, self.header.structure_table_length) + u = unpack.Unpackable(self._structure_memory) + self.add_field('structures', unpack.unpack_all(u, _smbios_structures, self), unpack.format_each("\n\n{!r}")) + + def structure_type(self, num): + '''Dumps structure of given Type if present''' + try: + types_present = [self.structures[x].smbios_structure_type for x in range(len(self.structures))] + matrix = dict() + for index in range(len(types_present)): + if types_present.count(types_present[index]) == 1: + matrix[types_present[index]] = self.structures[index] + else: # if multiple structures of the same type, return a list of structures for the type number + if matrix.has_key(types_present[index]): + matrix[types_present[index]].append(self.structures[index]) + else: + matrix[types_present[index]] = [self.structures[index]] + return matrix[num] + except: + print "Failure: Type {} - not found".format(num) + +class Header(unpack.Struct): + def __new__(cls, u): + return super(Header, cls).__new__(cls) + + def __init__(self, u): + super(Header, self).__init__() + self.raw_data = u.unpack_rest() + u = unpack.Unpackable(self.raw_data) + self.add_field('anchor_string', u.unpack_one("4s")) + self.add_field('checksum', u.unpack_one("B")) + self.add_field('length', u.unpack_one("B")) + self.add_field('major_version', u.unpack_one("B")) + self.add_field('minor_version', u.unpack_one("B")) + self.add_field('max_structure_size', u.unpack_one(" len(self.strings): + return "(error: string index out of range)" + return self.strings[i - 1] + +class BIOSInformation(SmbiosBaseStructure): + smbios_structure_type = 0 + + def __init__(self, u, sm): + super(BIOSInformation, self).__init__(u, sm) + u = self.u + try: + self.add_field('vendor', u.unpack_one("B"), self.fmtstr) + self.add_field('version', u.unpack_one("B"), self.fmtstr) + self.add_field('starting_address_segment', u.unpack_one("= (2,"4"): + characteristic_bytes = 2 + else: + characteristic_bytes = self.length - 0x12 + self.add_field('characteristics_extensions', [u.unpack_one("B") for b in range(characteristic_bytes)]) + if (sm.header.major_version, minor_version_str) >= (2,"4"): + self.add_field('major_release', u.unpack_one("B")) + self.add_field('minor_release', u.unpack_one("B")) + self.add_field('ec_major_release', u.unpack_one("B")) + self.add_field('ec_minor_release', u.unpack_one("B")) + except: + self.decode_failure = True + print "Error parsing BIOSInformation" + import traceback + traceback.print_exc() + self.fini() + +class SystemInformation(SmbiosBaseStructure): + smbios_structure_type = 1 + + def __init__(self, u, sm): + super(SystemInformation, self).__init__(u, sm) + u = self.u + try: + self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr) + self.add_field('product_name', u.unpack_one("B"), self.fmtstr) + self.add_field('version', u.unpack_one("B"), self.fmtstr) + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x8: + self.add_field('uuid', uuid.UUID(bytes_le=u.unpack_one("16s"))) + wakeup_types = { + 0: 'Reserved', + 1: 'Other', + 2: 'Unknown', + 3: 'APM Timer', + 4: 'Modem Ring', + 5: 'LAN Remote', + 6: 'Power Switch', + 7: 'PCI PME#', + 8: 'AC Power Restored' + } + self.add_field('wakeup_type', u.unpack_one("B"), unpack.format_table("{}", wakeup_types)) + if self.length > 0x19: + self.add_field('sku_number', u.unpack_one("B"), self.fmtstr) + self.add_field('family', u.unpack_one("B"), self.fmtstr) + except: + self.decode_failure = True + print "Error parsing SystemInformation" + import traceback + traceback.print_exc() + self.fini() + +_board_types = { + 1: 'Unknown', + 2: 'Other', + 3: 'Server Blade', + 4: 'Connectivity Switch', + 5: 'System Management Module', + 6: 'Processor Module', + 7: 'I/O Module', + 8: 'Memory Module', + 9: 'Daughter Board', + 0xA: 'Motherboard', + 0xB: 'Processor/Memory Module', + 0xC: 'Processor/IO Module', + 0xD: 'Interconnect Board' +} + +class BaseboardInformation(SmbiosBaseStructure): + smbios_structure_type = 2 + + def __init__(self, u, sm): + super(BaseboardInformation, self).__init__(u, sm) + u = self.u + try: + self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr) + self.add_field('product', u.unpack_one("B"), self.fmtstr) + self.add_field('version', u.unpack_one("B"), self.fmtstr) + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + + if self.length > 0x8: + self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr) + + if self.length > 0x9: + self.add_field('feature_flags', u.unpack_one("B")) + self.add_field('hosting_board', bool(bitfields.getbits(self.feature_flags, 0)), "feature_flags[0]={}") + self.add_field('requires_daughter_card', bool(bitfields.getbits(self.feature_flags, 1)), "feature_flags[1]={}") + self.add_field('removable', bool(bitfields.getbits(self.feature_flags, 2)), "feature_flags[2]={}") + self.add_field('replaceable', bool(bitfields.getbits(self.feature_flags, 3)), "feature_flags[3]={}") + self.add_field('hot_swappable', bool(bitfields.getbits(self.feature_flags, 4)), "feature_flags[4]={}") + + if self.length > 0xA: + self.add_field('location', u.unpack_one("B"), self.fmtstr) + + if self.length > 0xB: + self.add_field('chassis_handle', u.unpack_one(" 0xD: + self.add_field('board_type', u.unpack_one("B"), unpack.format_table("{}", _board_types)) + + if self.length > 0xE: + self.add_field('handle_count', u.unpack_one("B")) + if self.handle_count > 0: + self.add_field('contained_object_handles', tuple(u.unpack_one(" 9: + chassis_states = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Safe', + 0x04: 'Warning', + 0x05: 'Critical', + 0x06: 'Non-recoverable', + } + self.add_field('bootup_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states)) + self.add_field('power_supply_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states)) + self.add_field('thermal_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states)) + security_states = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'None', + 0x04: 'External interface locked out', + 0x05: 'External interface enabled', + } + self.add_field('security_status', u.unpack_one("B"), unpack.format_table("{}", security_states)) + if self.length > 0xd: + self.add_field('oem_defined', u.unpack_one(" 0x11: + self.add_field('height', u.unpack_one("B")) + self.add_field('num_power_cords', u.unpack_one("B")) + self.add_field('contained_element_count', u.unpack_one("B")) + self.add_field('contained_element_length', u.unpack_one("B")) + if getattr(self, 'contained_element_count', 0): + self.add_field('contained_elements', tuple(SystemEnclosureContainedElement(u, self.contained_element_length) for i in range(self.contained_element_count))) + if self.length > (0x15 + (getattr(self, 'contained_element_count', 0) * getattr(self, 'contained_element_length', 0))): + self.add_field('sku_number', u.unpack_one("B"), self.fmtstr) + except: + self.decode_failure = True + print "Error parsing SystemEnclosure" + import traceback + traceback.print_exc() + self.fini() + +class SystemEnclosureContainedElement(unpack.Struct): + def __init__(self, u, length): + super(SystemEnclosureContainedElement, self).__init__() + self.start_offset = u.offset + self.raw_data = u.unpack_raw(length) + self.u = unpack.Unpackable(self.raw_data) + u = self.u + self.add_field('contained_element_type', u.unpack_one("B")) + type_selections = { + 0: 'SMBIOS baseboard type enumeration', + 1: 'SMBIOS structure type enumeration', + } + self.add_field('type_select', bitfields.getbits(self.contained_element_type, 7), unpack.format_table("contained_element_type[7]={}", type_selections)) + self.add_field('type', bitfields.getbits(self.contained_element_type, 6, 0)) + if self.type_select == 0: + self.add_field('smbios_board_type', self.type, unpack.format_table("{}", _board_types)) + else: + self.add_field('smbios_structure_type', self.type) + self.add_field('minimum', u.unpack_one("B")) + self.add_field('maximum', u.unpack_one("B")) + if not u.at_end(): + self.add_field('data', u.unpack_rest()) + del self.u + +class ProcessorInformation(SmbiosBaseStructure): + smbios_structure_type = 4 + + def __init__(self, u, sm): + super(ProcessorInformation, self).__init__(u, sm) + u = self.u + try: + self.add_field('socket_designation', u.unpack_one("B"), self.fmtstr) + processor_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Central Processor', + 0x04: 'Math Processor', + 0x05: 'DSP Processor', + 0x06: 'Video Processor', + } + self.add_field('processor_type', u.unpack_one("B"), unpack.format_table("{}", processor_types)) + self.add_field('processor_family', u.unpack_one("B")) + self.add_field('processor_manufacturer', u.unpack_one("B"), self.fmtstr) + self.add_field('processor_id', u.unpack_one(" 0x1A: + self.add_field('l1_cache_handle', u.unpack_one(" 0x20: + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr) + self.add_field('part_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x24: + self.add_field('core_count', u.unpack_one("B")) + self.add_field('core_enabled', u.unpack_one("B")) + self.add_field('thread_count', u.unpack_one("B")) + self.add_field('processor_characteristics', u.unpack_one(" 0x28: + self.add_field('processor_family_2', u.unpack_one(" 0x2A: + self.add_field('core_count2', u.unpack_one(" 0x0F: + self.add_field('cache_speed', u.unpack_one("B")) + if self.length > 0x10: + _error_correction = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'None', + 0x04: 'Parity', + 0x05: 'Single-bit ECC', + 0x06: 'Multi-bit ECC' + } + self.add_field('error_correction', u.unpack_one("B"), unpack.format_table("{}", _error_correction)) + if self.length > 0x10: + _system_cache_type = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Instruction', + 0x04: 'Data', + 0x05: 'Unified' + } + self.add_field('system_cache_type', u.unpack_one("B"), unpack.format_table("{}", _system_cache_type)) + if self.length > 0x12: + _associativity = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Direct Mapped', + 0x04: '2-way Set-Associative', + 0x05: '4-way Set-Associative', + 0x06: 'Fully Associative', + 0x07: '8-way Set-Associative', + 0x08: '16-way Set-Associative', + 0x09: '12-way Set-Associative', + 0x0A: '24-way Set-Associative', + 0x0B: '32-way Set-Associative', + 0x0C: '48-way Set-Associative', + 0x0D: '64-way Set-Associative', + 0x0E: '20-way Set-Associative' + } + self.add_field('associativity', u.unpack_one("B"), unpack.format_table("{}", _associativity)) + + except: + self.decode_failure = True + print "Error parsing CacheInformation" + import traceback + traceback.print_exc() + self.fini() + +class PortConnectorInfo(SmbiosBaseStructure): + smbios_structure_type = 8 + + def __init__(self, u, sm): + super(PortConnectorInfo, self).__init__(u, sm) + u = self.u + try: + self.add_field('internal_reference_designator', u.unpack_one("B"), self.fmtstr) + connector_types = { + 0x00: 'None', + 0x01: 'Centronics', + 0x02: 'Mini Centronics', + 0x03: 'Proprietary', + 0x04: 'DB-25 pin male', + 0x05: 'DB-25 pin female', + 0x06: 'DB-15 pin male', + 0x07: 'DB-15 pin female', + 0x08: 'DB-9 pin male', + 0x09: 'DB-9 pin female', + 0x0A: 'RJ-11', + 0x0B: 'RJ-45', + 0x0C: '50-pin MiniSCSI', + 0x0D: 'Mini-DIN', + 0x0E: 'Micro-DIN', + 0x0F: 'PS/2', + 0x10: 'Infrared', + 0x11: 'HP-HIL', + 0x12: 'Access Bus (USB)', + 0x13: 'SSA SCSI', + 0x14: 'Circular DIN-8 male', + 0x15: 'Circular DIN-8 female', + 0x16: 'On Board IDE', + 0x17: 'On Board Floppy', + 0x18: '9-pin Dual Inline (pin 10 cut)', + 0x19: '25-pin Dual Inline (pin 26 cut)', + 0x1A: '50-pin Dual Inline', + 0x1B: '68-pin Dual Inline', + 0x1C: 'On Board Sound Input from CD-ROM', + 0x1D: 'Mini-Centronics Type-14', + 0x1E: 'Mini-Centronics Type-26', + 0x1F: 'Mini-jack (headphones)', + 0x20: 'BNC', + 0x21: '1394', + 0x22: 'SAS/SATA Plug Receptacle', + 0xA0: 'PC-98', + 0xA1: 'PC-98Hireso', + 0xA2: 'PC-H98', + 0xA3: 'PC-98Note', + 0xA4: 'PC-98Full', + 0xFF: 'Other', + } + self.add_field('internal_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types)) + self.add_field('external_reference_designator', u.unpack_one("B"), self.fmtstr) + self.add_field('external_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types)) + port_types = { + 0x00: 'None', + 0x01: 'Parallel Port XT/AT Compatible', + 0x02: 'Parallel Port PS/2', + 0x03: 'Parallel Port ECP', + 0x04: 'Parallel Port EPP', + 0x05: 'Parallel Port ECP/EPP', + 0x06: 'Serial Port XT/AT Compatible', + 0x07: 'Serial Port 16450 Compatible', + 0x08: 'Serial Port 16550 Compatible', + 0x09: 'Serial Port 16550A Compatible', + 0x0A: 'SCSI Port', + 0x0B: 'MIDI Port', + 0x0C: 'Joy Stick Port', + 0x0D: 'Keyboard Port', + 0x0E: 'Mouse Port', + 0x0F: 'SSA SCSI', + 0x10: 'USB', + 0x11: 'FireWire (IEEE P1394)', + 0x12: 'PCMCIA Type I2', + 0x13: 'PCMCIA Type II', + 0x14: 'PCMCIA Type III', + 0x15: 'Cardbus', + 0x16: 'Access Bus Port', + 0x17: 'SCSI II', + 0x18: 'SCSI Wide', + 0x19: 'PC-98', + 0x1A: 'PC-98-Hireso', + 0x1B: 'PC-H98', + 0x1C: 'Video Port', + 0x1D: 'Audio Port', + 0x1E: 'Modem Port', + 0x1F: 'Network Port', + 0x20: 'SATA', + 0x21: 'SAS', + 0xA0: '8251 Compatible', + 0xA1: '8251 FIFO Compatible', + 0xFF: 'Other', + } + self.add_field('port_type', u.unpack_one("B"), unpack.format_table("{}", port_types)) + except: + self.decodeFailure = True + print "Error parsing PortConnectorInfo" + import traceback + traceback.print_exc() + self.fini() + +class SystemSlots(SmbiosBaseStructure): + smbios_structure_type = 9 + + def __init__(self, u, sm): + super(SystemSlots, self).__init__(u, sm) + u = self.u + try: + self.add_field('designation', u.unpack_one("B"), self.fmtstr) + _slot_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'ISA', + 0x04: 'MCA', + 0x05: 'EISA', + 0x06: 'PCI', + 0x07: 'PC Card (PCMCIA)', + 0x08: 'VL-VESA', + 0x09: 'Proprietary', + 0x0A: 'Processor Card Slot', + 0x0B: 'Proprietary Memory Card Slot', + 0x0C: 'I/O Riser Card Slot', + 0x0D: 'NuBus', + 0x0E: 'PCI 66MHz Capable', + 0x0F: 'AGP', + 0x10: 'AGP 2X', + 0x11: 'AGP 4X', + 0x12: 'PCI-X', + 0x13: 'AGP 8X', + 0xA0: 'PC-98/C20', + 0xA1: 'PC-98/C24', + 0xA2: 'PC-98/E', + 0xA3: 'PC-98/Local Bus', + 0xA4: 'PC-98/Card', + 0xA5: 'PCI Express', + 0xA6: 'PCI Express x1', + 0xA7: 'PCI Express x2', + 0xA8: 'PCI Express x4', + 0xA9: 'PCI Express x8', + 0xAA: 'PCI Express x16', + 0xAB: 'PCI Express Gen 2', + 0xAC: 'PCI Express Gen 2 x1', + 0xAD: 'PCI Express Gen 2 x2', + 0xAE: 'PCI Express Gen 2 x4', + 0xAF: 'PCI Express Gen 2 x8', + 0xB0: 'PCI Express Gen 2 x16', + 0xB1: 'PCI Express Gen 3', + 0xB2: 'PCI Express Gen 3 x1', + 0xB3: 'PCI Express Gen 3 x2', + 0xB4: 'PCI Express Gen 3 x4', + 0xB5: 'PCI Express Gen 3 x8', + 0xB6: 'PCI Express Gen 3 x16', + } + self.add_field('slot_type', u.unpack_one("B"), unpack.format_table("{}", _slot_types)) + _slot_data_bus_widths = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: '8 bit', + 0x04: '16 bit', + 0x05: '32 bit', + 0x06: '64 bit', + 0x07: '128 bit', + 0x08: '1x or x1', + 0x09: '2x or x2', + 0x0A: '4x or x4', + 0x0B: '8x or x8', + 0x0C: '12x or x12', + 0x0D: '16x or x16', + 0x0E: '32x or x32', + } + self.add_field('slot_data_bus_width', u.unpack_one('B'), unpack.format_table("{}", _slot_data_bus_widths)) + _current_usages = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Available', + 0x04: 'In use', + } + self.add_field('current_usage', u.unpack_one('B'), unpack.format_table("{}", _current_usages)) + _slot_lengths = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Short Length', + 0x04: 'Long Length', + } + self.add_field('slot_length', u.unpack_one('B'), unpack.format_table("{}", _slot_lengths)) + self.add_field('slot_id', u.unpack_one(' 0x0C: + self.add_field('characteristics2', u.unpack_one('B')) + self.add_field('supports_PME', bool(bitfields.getbits(self.characteristics2, 0)), "characteristics2[0]={}") + self.add_field('supports_hot_plug', bool(bitfields.getbits(self.characteristics2, 1)), "characteristics2[1]={}") + self.add_field('supports_smbus', bool(bitfields.getbits(self.characteristics2, 2)), "characteristics2[2]={}") + if self.length > 0x0D: + self.add_field('segment_group_number', u.unpack_one(' 0x05: + self.add_field('flags', u.unpack_one('B')) + self.add_field('abbreviated_format', bool(bitfields.getbits(self.flags, 0)), "flags[0]={}") + if self.length > 0x6: + u.skip(15) + self.add_field('current_language', u.unpack_one('B'), self.fmtstr) + except: + self.decodeFailure = True + print "Error parsing BIOSLanguageInformation" + import traceback + traceback.print_exc() + self.fini() + +class GroupAssociations(SmbiosBaseStructure): + smbios_structure_type = 14 + + def __init__(self, u, sm): + super(GroupAssociations, self).__init__(u, sm) + u = self.u + try: + self.add_field('group_name', u.unpack_one("B"), self.fmtstr) + self.add_field('item_type', u.unpack_one('B')) + self.add_field('item_handle', u.unpack_one(' 0x14: + _log_header_formats = { + 0: 'No header', + 1: 'Type 1 log header', + xrange(2, 0x7f): 'Available for future assignment', + xrange(0x80, 0xff): 'BIOS vendor or OEM-specific format' + } + self.add_field('log_header_format', u.unpack_one("B"), unpack.format_table("{}", _log_header_formats)) + if self.length > 0x15: + self.add_field('num_supported_log_type_descriptors', u.unpack_one('B')) + if self.length > 0x16: + self.add_field('length_log_type_descriptor', u.unpack_one('B')) + if self.length != (0x17 + (self.num_supported_log_type_descriptors * self.length_log_type_descriptor)): + print "Error: structure length ({}) != 0x17 + (num_supported_log_type_descriptors ({}) * length_log_type_descriptor({}))".format(self.length, self.num_supported_log_type_descriptors, self.length_log_type_descriptor) + print "structure length = {}".format(self.length) + print "num_supported_log_type_descriptors = {}".format(self.num_supported_log_type_descriptors) + print "length_log_type_descriptor = {}".format(self.length_log_type_descriptor) + self.decodeFailure = True + self.add_field('descriptors', tuple(EventLogDescriptor.unpack(u) for i in range(self.num_supported_log_type_descriptors)), unpack.format_each("\n{!r}")) + except: + self.decodeFailure = True + print "Error parsing SystemEventLog" + import traceback + traceback.print_exc() + self.fini() + +class EventLogDescriptor(unpack.Struct): + @staticmethod + def _unpack(u): + _event_log_type_descriptors = { + 0x00: 'Reserved', + 0x01: 'Single-bit ECC memory error', + 0x02: 'Multi-bit ECC memory error', + 0x03: 'Parity memory error', + 0x04: 'Bus time-out', + 0x05: 'I/O Channel Check', + 0x06: 'Software NMI', + 0x07: 'POST Memory Resize', + 0x08: 'POST Error', + 0x09: 'PCI Parity Error', + 0x0A: 'PCI System Error', + 0x0B: 'CPU Failure', + 0x0C: 'EISA FailSafe Timer time-out', + 0x0D: 'Correctable memory log disabled', + 0x0E: 'Logging disabled for a specific Event Type - too many errors of the same type received in a short amount of time', + 0x0F: 'Reserved', + 0x10: 'System Limit Exceeded', + 0x11: 'Asynchronous hardware timer expired and issued a system reset', + 0x12: 'System configuration information', + 0x13: 'Hard-disk information', + 0x14: 'System reconfigured', + 0x15: 'Uncorrectable CPU-complex error', + 0x16: 'Log Area Reset/Cleared', + 0x17: 'System boot', + xrange(0x18, 0x7F): 'Unused, available for assignment', + xrange(0x80, 0xFE): 'Availalbe for system- and OEM-specific assignments', + 0xFF: 'End of log' + } + yield 'log_type', u.unpack_one('B'), unpack.format_table("{}", _event_log_type_descriptors) + _event_log_format = { + 0x00: 'None', + 0x01: 'Handle', + 0x02: 'Multiple-Event', + 0x03: 'Multiple-Event Handle', + 0x04: 'POST Results Bitmap', + 0x05: 'System Management Type', + 0x06: 'Multiple-Event System Management Type', + xrange(0x80, 0xFF): 'OEM assigned' + } + yield 'variable_data_format_type', u.unpack_one('B'), unpack.format_table("{}", _event_log_format) + +class PhysicalMemoryArray(SmbiosBaseStructure): + smbios_structure_type = 16 + + def __init__(self, u, sm): + super(PhysicalMemoryArray, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + _location_field = { + 0x01: "Other", + 0x02: "Unknown", + 0x03: "System board or motherboard", + 0x04: "ISA add-on card", + 0x05: "EISA add-on card", + 0x06: "PCI add-on card", + 0x07: "MCA add-on card", + 0x08: "PCMCIA add-on card", + 0x09: "Proprietary add-on card", + 0x0A: "NuBus", + 0xA0: "PC-98/C20 add-on card", + 0xA1: "PC-98/C24 add-on card", + 0xA2: "PC-98/E add-on card", + 0xA3: "PC-98/Local bus add-on card" + } + self.add_field('location', u.unpack_one("B"), unpack.format_table("{}", _location_field)) + if self.length > 0x05: + _use = { + 0x01: "Other", + 0x02: "Unknown", + 0x03: "System memory", + 0x04: "Video memory", + 0x05: "Flash memory", + 0x06: "Non-volatile RAM", + 0x07: "Cache memory" + } + self.add_field('use', u.unpack_one('B'), unpack.format_table("{}", _use)) + if self.length > 0x06: + _error_correction = { + 0x01: "Other", + 0x02: "Unknown", + 0x03: "None", + 0x04: "Parity", + 0x05: "Single-bit ECC", + 0x06: "Multi-bit ECC", + 0x07: "CRC" + } + self.add_field('memory_error_correction', u.unpack_one('B'), unpack.format_table("{}", _error_correction)) + if self.length > 0x07: + self.add_field('maximum_capacity', u.unpack_one(' 0x0B: + self.add_field('memory_error_information_handle', u.unpack_one(' 0x0D: + self.add_field('num_memory_devices', u.unpack_one(' 0x0F: + self.add_field('extended_maximum_capacity', u.unpack_one(' 0x4: + self.add_field('physical_memory_array_handle', u.unpack_one(" 0x6: + self.add_field('memory_error_information_handle', u.unpack_one(" 0x8: + self.add_field('total_width', u.unpack_one(" 0xA: + self.add_field('data_width', u.unpack_one(" 0xC: + self.add_field('size', u.unpack_one(" 0xE: + _form_factors = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'SIMM', + 0x04: 'SIP', + 0x05: 'Chip', + 0x06: 'DIP', + 0x07: 'ZIP', + 0x08: 'Proprietary Card', + 0x09: 'DIMM', + 0x0A: 'TSOP', + 0x0B: 'Row of chips', + 0x0C: 'RIMM', + 0x0D: 'SODIMM', + 0x0E: 'SRIMM', + 0x0F: 'FB-DIMM' + } + self.add_field('form_factor', u.unpack_one("B"), unpack.format_table("{}", _form_factors)) + if self.length > 0xF: + self.add_field('device_set', u.unpack_one("B")) + if self.length > 0x10: + self.add_field('device_locator', u.unpack_one("B"), self.fmtstr) + if self.length > 0x11: + self.add_field('bank_locator', u.unpack_one("B"), self.fmtstr) + if self.length > 0x12: + _memory_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'DRAM', + 0x04: 'EDRAM', + 0x05: 'VRAM', + 0x06: 'SRAM', + 0x07: 'RAM', + 0x08: 'ROM', + 0x09: 'FLASH', + 0x0A: 'EEPROM', + 0x0B: 'FEPROM', + 0x0C: 'EPROM', + 0x0D: 'CDRAM', + 0x0E: '3DRAM', + 0x0F: 'SDRAM', + 0x10: 'SGRAM', + 0x11: 'RDRAM', + 0x12: 'DDR', + 0x13: 'DDR2', + 0x14: 'DDR2 FB-DIMM', + xrange(0x15, 0x17): 'Reserved', + 0x18: 'DDR3', + 0x19: 'FBD2' + } + self.add_field('memory_type', u.unpack_one("B"), unpack.format_table("{}", _memory_types)) + if self.length > 0x13: + self.add_field('type_detail', u.unpack_one(' 0x15: + self.add_field('speed', u.unpack_one(" 0x17: + self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr) + if self.length > 0x18: + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x19: + self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr) + if self.length > 0x1A: + self.add_field('part_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x1B: + self.add_field('attributes', u.unpack_one("B")) + self.add_field('rank', bitfields.getbits(self.attributes, 3, 0), "attributes[3:0]={}") + if self.length > 0x1C: + if self.size == 0x7FFF: + self.add_field('extended_size', u.unpack_one(' 0x20: + self.add_field('configured_memory_clock_speed', u.unpack_one(" 0x22: + self.add_field('minimum_voltage', u.unpack_one(" 0x24: + self.add_field('maximum_voltage', u.unpack_one(" 0x26: + self.add_field('configured_voltage', u.unpack_one(" 0x4: + _error_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'OK', + 0x04: 'Bad read', + 0x05: 'Parity error', + 0x06: 'Single-bit error', + 0x07: 'Double-bit error', + 0x08: 'Multi-bit error', + 0x09: 'Nibble error', + 0x0A: 'Checksum error', + 0x0B: 'CRC error', + 0x0C: 'Corrected single-bit error', + 0x0D: 'Corrected error', + 0x0E: 'Uncorrectable error' + } + self.add_field('error_type', u.unpack_one("B"), unpack.format_table("{}", _error_types)) + if self.length > 0x5: + _error_granularity_field = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Device level', + 0x04: 'Memory partition level' + } + self.add_field('error_granularity', u.unpack_one("B"), unpack.format_table("{}", _error_granularity_field)) + if self.length > 0x6: + _error_operation_field = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Read', + 0x04: 'Write', + 0x05: 'Partial write' + } + self.add_field('error_operation', u.unpack_one("B"), unpack.format_table("{}", _error_operation_field)) + if self.length > 0x7: + self.add_field('vendor_syndrome', u.unpack_one(" 0xB: + self.add_field('memory_array_error_address', u.unpack_one(" 0xF: + self.add_field('device_error_address', u.unpack_one(" 0x13: + self.add_field('error_resolution', u.unpack_one(" 0x4: + self.add_field('starting_address', u.unpack_one(" 0x8: + self.add_field('ending_address', u.unpack_one(" 0xC: + self.add_field('memory_array_handle', u.unpack_one(" 0xE: + self.add_field('partition_width', u.unpack_one("B")) + if self.length > 0xF: + # valid if starting_address = FFFF FFFF + if self.starting_address == 0xFFFFFFFF: + self.add_field('extended_starting_address', u.unpack_one(" 0x17: + self.add_field('extended_ending_address', u.unpack_one(" 0x4: + self.add_field('starting_address', u.unpack_one(" 0x8: + self.add_field('ending_address', u.unpack_one(" 0xC: + self.add_field('memory_device_handle', u.unpack_one(" 0xE: + self.add_field('memory_array_mapped_address_handle', u.unpack_one(" 0x10: + self.add_field('partition_row_position', u.unpack_one("B")) + if self.length > 0x11: + self.add_field('interleave_position', u.unpack_one("B")) + if self.length > 0x12: + self.add_field('interleave_data_depth', u.unpack_one("B")) + if self.length > 0x13: + # valid if starting_address = FFFF FFFF + if self.starting_address == 0xFFFFFFFF: + self.add_field('extended_starting_address', u.unpack_one(" 0x1B: + self.add_field('extended_ending_address', u.unpack_one(" 0x4: + _pointing_device_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Mouse', + 0x04: 'Track Ball', + 0x05: 'Track Point', + 0x06: 'Glide Point', + 0x07: 'Touch Pad', + 0x08: 'Touch Screen', + 0x09: 'Optical Sensor' + } + self.add_field('pointing_device_type', u.unpack_one("B"), unpack.format_table("{}", _pointing_device_types)) + if self.length > 0x5: + _interfaces = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Serial', + 0x04: 'PS/2', + 0x05: 'Infared', + 0x06: 'HP-HIL', + 0x07: 'Bus mouse', + 0x08: 'ADB (Apple Desktop Bus)', + 0x09: 'Bus mouse DB-9', + 0x0A: 'Bus mouse micro-DIN', + 0x0B: 'USB' + } + self.add_field('interface', u.unpack_one("B"), unpack.format_table("{}", _interfaces)) + if self.length > 0x6: + self.add_field('num_buttons', u.unpack_one("B")) + except: + self.decodeFailure = True + print "Error parsing BuiltInPointingDevice" + import traceback + traceback.print_exc() + self.fini() + +class PortableBattery(SmbiosBaseStructure): + smbios_structure_type = 22 + + def __init__(self, u, sm): + super(PortableBattery, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + self.add_field('location', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr) + if self.length > 0x6: + self.add_field('manufacturer_date', u.unpack_one("B"), self.fmtstr) + if self.length > 0x7: + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x8: + self.add_field('device_name', u.unpack_one("B"), self.fmtstr) + if self.length > 0x9: + _device_chemistry = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Lead Acid', + 0x04: 'Nickel Cadmium', + 0x05: 'Nickel metal hydride', + 0x06: 'Lithium-ion', + 0x07: 'Zinc air', + 0x08: 'Lithium Polymer' + } + self.add_field('device_chemistry', u.unpack_one("B"), unpack.format_table("{}", _device_chemistry)) + if self.length > 0xA: + self.add_field('design_capacity', u.unpack_one(" 0xC: + self.add_field('design_voltage', u.unpack_one(" 0xE: + self.add_field('sbds_version_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0xF: + self.add_field('max_error_battery_data', u.unpack_one("B"), self.fmtstr) + if self.length > 0x10: + if self.serial_number == 0: + self.add_field('sbds_serial_number', u.unpack_one(" 0x12: + if self.manufacturer_date == 0: + self.add_field('sbds_manufacture_date', u.unpack_one(" 0x14: + if self.device_chemistry == 0x02: + self.add_field('sbds_device_chemistry', u.unpack_one("B"), self.fmtstr) + else: + u.skip(1) + if self.length > 0x15: + self.add_field('design_capacity_multiplier', u.unpack_one("B")) + if self.length > 0x16: + self.add_field('oem_specific', u.unpack_one(" 0x4: + self.add_field('capabilities', u.unpack_one("B")) + self.add_field('contains_watchdog_timer', bool(bitfields.getbits(self.capabilities, 5)), "capabilities[5]={}") + _boot_option = { + 0b00: 'Reserved, do not use', + 0b01: 'Operating System', + 0b10: 'System utilities', + 0b11: 'Do not reboot' + } + self.add_field('boot_option_on_limit', bitfields.getbits(self.capabilities, 4, 3), unpack.format_table("capabilities[4:3]={}", _boot_option)) + self.add_field('boot_option_after_watchdog_reset', bitfields.getbits(self.capabilities, 2, 1), unpack.format_table("capabilities[2:1]={}", _boot_option)) + self.add_field('system_reset_enabled_by_user', bool(bitfields.getbits(self.capabilities, 0)), "capabilities[0]={}") + if self.length > 0x5: + self.add_field('reset_count', u.unpack_one(" 0x5: + self.add_field('reset_limit', u.unpack_one(" 0x9: + self.add_field('timer_interval', u.unpack_one(" 0xB: + self.add_field('timeout', u.unpack_one(" 0x4: + self.add_field('hardware_security_settings', u.unpack_one("B")) + _status = { + 0x00: 'Disabled', + 0x01: 'Enabled', + 0x02: 'Not Implemented', + 0x03: 'Unknown' + } + self.add_field('power_on_password_status', bitfields.getbits(self.hardware_security_settings, 7, 6), unpack.format_table("hardware_security_settings[7:6]={}", _status)) + self.add_field('keyboard_password_status', bitfields.getbits(self.hardware_security_settings, 5, 4), unpack.format_table("hardware_security_settings[5:4]={}", _status)) + self.add_field('admin_password_status', bitfields.getbits(self.hardware_security_settings, 3, 2), unpack.format_table("hardware_security_settings0[3:2]={}", _status)) + self.add_field('front_panel_reset_status', bitfields.getbits(self.hardware_security_settings, 1, 0), unpack.format_table("hardware_security_settings[1:0]={}", _status)) + except: + self.decodeFailure = True + print "Error parsing HardwareSecurity" + import traceback + traceback.print_exc() + self.fini() + +class SystemPowerControls(SmbiosBaseStructure): + smbios_structure_type = 25 + + def __init__(self, u, sm): + super(SystemPowerControls, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + self.add_field('next_scheduled_poweron_month', u.unpack_one("B")) + self.add_field('next_scheduled_poweron_day_of_month', u.unpack_one("B")) + self.add_field('next_scheduled_poweron_hour', u.unpack_one("B")) + self.add_field('next_scheduled_poweron_minute', u.unpack_one("B")) + self.add_field('next_scheduled_poweron_second', u.unpack_one("B")) + except: + self.decodeFailure = True + print "Error parsing SystemPowerControls" + import traceback + traceback.print_exc() + self.fini() + +class VoltageProbe(SmbiosBaseStructure): + smbios_structure_type = 26 + + def __init__(self, u, sm): + super(VoltageProbe, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('location_and_status', u.unpack_one("B")) + _status = { + 0b001: 'Other', + 0b010: 'Unknown', + 0b011: 'OK', + 0b100: 'Non-critical', + 0b101: 'Critical', + 0b110: 'Non-recoverable' + } + _location = { + 0b00001: 'Other', + 0b00010: 'Unknown', + 0b00011: 'Processor', + 0b00100: 'Disk', + 0b00101: 'Peripheral Bay', + 0b00110: 'System Management Module', + 0b00111: 'Motherboard', + 0b01000: 'Memory Module', + 0b01001: 'Processor Module', + 0b01010: 'Power Unit', + 0b01011: 'Add-in Card' + } + self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status)) + self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location)) + if self.length > 0x6: + self.add_field('max_value', u.unpack_one(" 0x8: + self.add_field('min_value', u.unpack_one(" 0xA: + self.add_field('resolution', u.unpack_one(" 0xC: + self.add_field('tolerance', u.unpack_one(" 0xE: + self.add_field('accuracy', u.unpack_one(" 0x10: + self.add_field('oem_defined', u.unpack_one(" 0x14: + self.add_field('nominal_value', u.unpack_one(" 0x4: + self.add_field('temperature_probe_handle', u.unpack_one(" 0x6: + self.add_field('device_type_and_status', u.unpack_one("B")) + _status = { + 0b001: 'Other', + 0b010: 'Unknown', + 0b011: 'OK', + 0b100: 'Non-critical', + 0b101: 'Critical', + 0b110: 'Non-recoverable' + } + _type = { + 0b00001: 'Other', + 0b00010: 'Unknown', + 0b00011: 'Fan', + 0b00100: 'Centrifugal Blower', + 0b00101: 'Chip Fan', + 0b00110: 'Cabinet Fan', + 0b00111: 'Power Supply Fan', + 0b01000: 'Heat Pipe', + 0b01001: 'Integrated Refrigeration', + 0b10000: 'Active Cooling', + 0b10001: 'Passive Cooling' + } + self.add_field('status', bitfields.getbits(self.device_type_and_status, 7, 5), unpack.format_table("device_type_and_status[7:5]={}", _status)) + self.add_field('device_type', bitfields.getbits(self.device_type_and_status, 4, 0), unpack.format_table("device_type_and_status[4:0]={}", _type)) + if self.length > 0x7: + self.add_field('cooling_unit_group', u.unpack_one("B")) + if self.length > 0x8: + self.add_field('OEM_defined', u.unpack_one(" 0xC: + self.add_field('nominal_speed', u.unpack_one(" 0xE: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + except: + self.decodeFailure = True + print "Error parsing CoolingDevice" + import traceback + traceback.print_exc() + self.fini() + +class TemperatureProbe(SmbiosBaseStructure): + smbios_structure_type = 28 + + def __init__(self, u, sm): + super(TemperatureProbe, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('location_and_status', u.unpack_one("B")) + _status = { + 0b001: 'Other', + 0b010: 'Unknown', + 0b011: 'OK', + 0b100: 'Non-critical', + 0b101: 'Critical', + 0b110: 'Non-recoverable' + } + _location = { + 0b00001: 'Other', + 0b00010: 'Unknown', + 0b00011: 'Processor', + 0b00100: 'Disk', + 0b00101: 'Peripheral Bay', + 0b00110: 'System Management Module', + 0b00111: 'Motherboard', + 0b01000: 'Memory Module', + 0b01001: 'Processor Module', + 0b01010: 'Power Unit', + 0b01011: 'Add-in Card', + 0b01100: 'Front Panel Board', + 0b01101: 'Back Panel Board', + 0b01110: 'Power System Board', + 0b01111: 'Drive Back Plane' + } + self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status)) + self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location)) + if self.length > 0x6: + self.add_field('maximum_value', u.unpack_one(" 0x8: + self.add_field('minimum_value', u.unpack_one(" 0xA: + self.add_field('resolution', u.unpack_one(" 0xC: + self.add_field('tolerance', u.unpack_one(" 0xE: + self.add_field('accuracy', u.unpack_one(" 0x10: + self.add_field('OEM_defined', u.unpack_one(" 0x14: + self.add_field('nominal_value', u.unpack_one(" 0x4: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('location_and_status', u.unpack_one("B")) + _status = { + 0b001: 'Other', + 0b010: 'Unknown', + 0b011: 'OK', + 0b100: 'Non-critical', + 0b101: 'Critical', + 0b110: 'Non-recoverable' + } + _location = { + 0b00001: 'Other', + 0b00010: 'Unknown', + 0b00011: 'Processor', + 0b00100: 'Disk', + 0b00101: 'Peripheral Bay', + 0b00110: 'System Management Module', + 0b00111: 'Motherboard', + 0b01000: 'Memory Module', + 0b01001: 'Processor Module', + 0b01010: 'Power Unit', + 0b01011: 'Add-in Card', + 0b01100: 'Front Panel Board', + 0b01101: 'Back Panel Board', + 0b01110: 'Power System Board', + 0b01111: 'Drive Back Plane' + } + self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status)) + self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location)) + if self.length > 0x6: + self.add_field('maximum_value', u.unpack_one(" 0x8: + self.add_field('minimum_value', u.unpack_one(" 0xA: + self.add_field('resolution', u.unpack_one(" 0xC: + self.add_field('tolerance', u.unpack_one(" 0xE: + self.add_field('accuracy', u.unpack_one(" 0x10: + self.add_field('OEM_defined', u.unpack_one(" 0x14: + self.add_field('nominal_value', u.unpack_one(" 0x4: + self.add_field('manufacturer_name', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('connections', u.unpack_one("B")) + self.add_field('outbound_connection_enabled', bool(bitfields.getbits(self.connections, 1)), "connections[1]={}") + self.add_field('inbound_connection_enabled', bool(bitfields.getbits(self.connections, 0)), "connections[0]={}") + except: + self.decodeFailure = True + print "Error parsing OutOfBandRemoteAccess" + import traceback + traceback.print_exc() + self.fini() + +class BootIntegrityServicesEntryPoint(SmbiosBaseStructure): + smbios_structure_type = 31 + +class SystemBootInformation(SmbiosBaseStructure): + smbios_structure_type = 32 + + def __init__(self, u, sm): + super(SystemBootInformation, self).__init__(u, sm) + u = self.u + try: + if self.length > 0xA: + u.skip(6) + _boot_status = { + 0: 'No errors detected', + 1: 'No bootable media', + 2: '"normal" operating system failed to load', + 3: 'Firmware-detected hardware failure, including "unknown" failure types', + 4: 'Operating system-detected hardware failure', + 5: 'User-requested boot, usually through a keystroke', + 6: 'System security violation', + 7: 'Previously-requested image', + 8: 'System watchdog timer expired, causing the system to reboot', + xrange(9,127): 'Reserved for future assignment', + xrange(128, 191): 'Vendor/OEM-specific implementations', + xrange(192, 255): 'Product-specific implementations' + } + self.add_field('boot_status', u.unpack_one("B"), unpack.format_table("{}", _boot_status)) + except: + self.decodeFailure = True + print "Error parsing SystemBootInformation" + import traceback + traceback.print_exc() + self.fini() + +class MemoryErrorInfo64Bit(SmbiosBaseStructure): + smbios_structure_type = 33 + + def __init__(self, u, sm): + super(MemoryErrorInfo64Bit, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + _error_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'OK', + 0x04: 'Bad read', + 0x05: 'Parity error', + 0x06: 'Single-bit error', + 0x07: 'Double-bit error', + 0x08: 'Multi-bit error', + 0x09: 'Nibble error', + 0x0A: 'Checksum error', + 0x0B: 'CRC error', + 0x0C: 'Corrected single-bit error', + 0x0D: 'Corrected error', + 0x0E: 'Uncorrectable error' + } + self.add_field('error_type', u.unpack_one("B"), unpack.format_table("{}", _error_types)) + if self.length > 0x5: + _error_granularity_field = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Device level', + 0x04: 'Memory partition level' + } + self.add_field('error_granularity', u.unpack_one("B"), unpack.format_table("{}", _error_granularity_field)) + if self.length > 0x6: + _error_operation_field = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Read', + 0x04: 'Write', + 0x05: 'Partial write' + } + self.add_field('error_operation', u.unpack_one("B"), unpack.format_table("{}", _error_operation_field)) + if self.length > 0x7: + self.add_field('vendor_syndrome', u.unpack_one(" 0xB: + self.add_field('memory_array_error_address', u.unpack_one(" 0xF: + self.add_field('device_error_address', u.unpack_one(" 0x13: + self.add_field('error_resolution', u.unpack_one(" 0x4: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + _type = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'National Semiconductor LM75', + 0x04: 'National Semiconductor LM78', + 0x05: 'National Semiconductor LM79', + 0x06: 'National Semiconductor LM80', + 0x07: 'National Semiconductor LM81', + 0x08: 'Analog Devices ADM9240', + 0x09: 'Dallas Semiconductor DS1780', + 0x0A: 'Maxim 1617', + 0x0B: 'Genesys GL518SM', + 0x0C: 'Winbond W83781D', + 0x0D: 'Holtek HT82H791' + } + self.add_field('device_type', u.unpack_one("B"), unpack.format_table("{}", _type)) + if self.length > 0x6: + self.add_field('address', u.unpack_one(" 0xA: + _address_type = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'I/O Port', + 0x04: 'Memory', + 0x05: 'SM Bus' + } + self.add_field('address_type', u.unpack_one("B"), unpack.format_table("{}", _address_type)) + except: + self.decodeFailure = True + print "Error parsing ManagementDevice" + import traceback + traceback.print_exc() + self.fini() + +class ManagementDeviceComponent(SmbiosBaseStructure): + smbios_structure_type = 35 + + def __init__(self, u, sm): + super(ManagementDeviceComponent, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + self.add_field('description', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('management_device_handle', u.unpack_one(" 0x7: + self.add_field('component_handle', u.unpack_one(" 0x9: + self.add_field('threshold_handle', u.unpack_one(" 0x4: + self.add_field('lower_threshold_noncritical', u.unpack_one(" 0x6: + self.add_field('upper_threshold_noncritical', u.unpack_one(" 0x8: + self.add_field('lower_threshold_critical', u.unpack_one(" 0xA: + self.add_field('upper_threshold_critical', u.unpack_one(" 0xC: + self.add_field('lower_threshold_nonrecoverable', u.unpack_one(" 0xE: + self.add_field('upper_threshold_nonrecoverable', u.unpack_one(" 0x4: + _channel_type = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'RamBus', + 0x04: 'SyncLink' + } + self.add_field('channel_type', u.unpack_one("B"), unpack.format_table("{}", _channel_type)) + if self.length > 0x6: + self.add_field('max_channel_load', u.unpack_one("B")) + if self.length > 0x8: + self.add_field('memory_device_count', u.unpack_one("B")) + if self.length > 0xA: + self.add_field('memory_device_load', u.unpack_one("B")) + if self.length > 0xC: + self.add_field('memory_device_handle', u.unpack_one(" 0x4: + self.add_field('power_unit_group', u.unpack_one("B")) + if self.length > 0x5: + self.add_field('location', u.unpack_one("B"), self.fmtstr) + if self.length > 0x6: + self.add_field('device_name', u.unpack_one("B"), self.fmtstr) + if self.length > 0x7: + self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr) + if self.length > 0x8: + self.add_field('serial_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0x9: + self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr) + if self.length > 0xA: + self.add_field('model_part_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0xB: + self.add_field('revision_level', u.unpack_one("B"), self.fmtstr) + if self.length > 0xC: + self.add_field('max_power_capacity', u.unpack_one(" 0xE: + self.add_field('power_supply_characteristics', u.unpack_one(" 0x10: + self.add_field('input_voltage_probe_handle', u.unpack_one(" 0x12: + self.add_field('cooling_device_handle', u.unpack_one(" 0x14: + self.add_field('input_current_probe_handle', u.unpack_one(" 0x4: + self.add_field('num_additional_information_entries', u.unpack_one("B")) + if self.length > 0x5: + self.add_field('additional_information_entry_length', u.unpack_one("B")) + self.add_field('referenced_handle', u.unpack_one(" 0x4: + self.add_field('reference_designation', u.unpack_one("B"), self.fmtstr) + if self.length > 0x5: + self.add_field('device_type', u.unpack_one("B")) + self.add_field('device_enabled', bool(bitfields.getbits(self.device_type, 7)), "device_type[7]={}") + _device_types = { + 0x01: 'Other', + 0x02: 'Unknown', + 0x03: 'Video', + 0x04: 'SCSI Controller', + 0x05: 'Ethernet', + 0x06: 'Token Ring', + 0x07: 'Sound', + 0x08: 'PATA Controller', + 0x09: 'SATA Controller', + 0x0A: 'SAS Controller' + } + self.add_field('type_of_device', bitfields.getbits(self.device_type, 6, 0), unpack.format_table("device_type[6:0]={}", _device_types)) + if self.length > 0x6: + self.add_field('device_type_instance', u.unpack_one("B")) + if self.length > 0x7: + self.add_field('segment_group_number', u.unpack_one(" 0x9: + self.add_field('bus_number', u.unpack_one("B"), self.fmtstr) + if self.length > 0xA: + self.add_field('device_and_function_number', u.unpack_one("B")) + self.add_field('device_number', bitfields.getbits(self.device_type, 7, 3), "device_and_function_number[7:3]={}") + self.add_field('function_number', bitfields.getbits(self.device_type, 2, 0), "device_and_function_number[2:0]={}") + except: + self.decodeFailure = True + print "Error parsing OnboardDevicesExtendedInformation" + import traceback + traceback.print_exc() + self.fini() + +class ManagementControllerHostInterface(SmbiosBaseStructure): + smbios_structure_type = 42 + + def __init__(self, u, sm): + super(ManagementControllerHostInterface, self).__init__(u, sm) + u = self.u + try: + if self.length > 0x4: + _interface_types = { + 0x00: 'Reserved', + 0x01: 'Reserved', + 0x02: 'KCS: Keyboard Controller Style', + 0x03: '8250 UART Register Compatible', + 0x04: '16450 UART Register Compatible', + 0x05: '16550/16550A UART Register Compatible', + 0x06: '16650/16650A UART Register Compatible', + 0x07: '16750/16750A UART Register Compatible', + 0x08: '16850/16850A UART Register Compatible', + 0xF0: 'OEM' + } + self.add_field('interface_type', u.unpack_one("B"), unpack.format_table("{}", _interface_types)) + if self.length > 0x5: + self.add_field('mc_host_interface_data', u.unpack_rest(), self.fmtstr) + except: + self.decodeFailure = True + print "Error parsing ManagementControllerHostInterface" + import traceback + traceback.print_exc() + self.fini() + +class Inactive(SmbiosBaseStructure): + smbios_structure_type = 126 + + def __init__(self, u, sm): + super(Inactive, self).__init__(u, sm) + self.fini() + +class EndOfTable(SmbiosBaseStructure): + smbios_structure_type = 127 + + def __init__(self, u, sm): + super(EndOfTable, self).__init__(u, sm) + self.fini() + +class SmbiosStructureUnknown(SmbiosBaseStructure): + smbios_structure_type = None + + def __init__(self, u, sm): + super(SmbiosStructureUnknown, self).__init__(u, sm) + self.fini() + +_smbios_structures = [ + BIOSInformation, + SystemInformation, + BaseboardInformation, + SystemEnclosure, + ProcessorInformation, + MemoryControllerInformation, + MemoryModuleInformation, + CacheInformation, + PortConnectorInfo, + SystemSlots, + OnBoardDevicesInformation, + OEMStrings, + SystemConfigOptions, + BIOSLanguageInformation, + GroupAssociations, + SystemEventLog, + PhysicalMemoryArray, + MemoryDevice, + MemoryErrorInfo32Bit, + MemoryArrayMappedAddress, + MemoryDeviceMappedAddress, + BuiltInPointingDevice, + PortableBattery, + SystemReset, + HardwareSecurity, + SystemPowerControls, + VoltageProbe, + CoolingDevice, + TemperatureProbe, + ElectricalCurrentProbe, + OutOfBandRemoteAccess, + BootIntegrityServicesEntryPoint, + SystemBootInformation, + MemoryErrorInfo64Bit, + ManagementDevice, + ManagementDeviceComponent, + ManagementDeviceThresholdData, + MemoryChannel, + IPMIDeviceInformation, + SystemPowerSupply, + AdditionalInformation, + OnboardDevicesExtendedInformation, + ManagementControllerHostInterface, + Inactive, + EndOfTable, + SmbiosStructureUnknown, # Must always come last +] + +def log_smbios_info(): + with redirect.logonly(): + try: + sm = SMBIOS() + print + if sm is None: + print "No SMBIOS structures found" + return + output = {} + known_types = (0, 1) + for sm_struct in sm.structures: + if sm_struct.type in known_types: + output.setdefault(sm_struct.type, []).append(sm_struct) + if len(output) == len(known_types): + break + + print "SMBIOS information:" + for key in sorted(known_types): + for s in output.get(key, ["No structure of type {} found".format(key)]): + print ttypager._wrap("{}: {}".format(key, s)) + except: + print "Error parsing SMBIOS information:" + import traceback + traceback.print_exc() + +def dump_raw(): + try: + sm = SMBIOS() + if sm: + s = "SMBIOS -- Raw bytes and structure decode.\n\n" + + s += str(sm.header) + '\n' + s += bits.dumpmem(sm._header_memory) + '\n' + + s += "Raw bytes for the SMBIOS structures\n" + s += bits.dumpmem(sm._structure_memory) + '\n' + + for sm_struct in sm.structures: + s += str(sm_struct) + '\n' + s += bits.dumpmem(sm_struct.raw_data) + + s += "Strings:\n" + for n in range(1, len(getattr(sm_struct, "strings", [])) + 1): + s += str(sm_struct.fmtstr(n)) + '\n' + s += bits.dumpmem(sm_struct.raw_strings) + '\n' + else: + s = "No SMBIOS structures found" + ttypager.ttypager_wrap(s, indent=False) + except: + print "Error parsing SMBIOS information:" + import traceback + traceback.print_exc() + +def dump(): + try: + sm = SMBIOS() + if sm: + s = str(sm) + else: + s = "No SMBIOS structures found" + ttypager.ttypager_wrap(s, indent=False) + except: + print "Error parsing SMBIOS information:" + import traceback + traceback.print_exc() + +def annex_a_conformance(): + try: + sm = SMBIOS() + + # check: 1. The table anchor string "_SM_" is present in the address range 0xF0000 to 0xFFFFF on a 16-byte bound + + def table_entry_point_verification(): + ''' Verify table entry-point''' + if (sm.header.length < 0x1F): + print "Failure: Table entry-point - The entry-point Length must be at least 0x1F" + if sm.header.checksum != 0: + print "Failure: Table entry-point - The entry-point checksum must evaluate to 0" + if ((sm.header.major_version < 2) and (sm.header.minor_version < 4)): + print "Failure: Table entry-point - SMBIOS version must be at least 2.4" + if (sm.header.intermediate_anchor_string == '_DMI_'): + print "Failure: Table entry-point - The Intermediate Anchor String must be '_DMI_'" + if (sm.header.intermediate_checksum != 0): + print "Failure: Table entry-point - The Intermediate checksum must evaluate to 0" + + #check: 3. The structure-table is traversable and conforms to the entry-point specifications: + + def req_structures(): + '''Checks for required structures and corresponding data''' + types_present = [sm.structures[x].smbios_structure_type for x in range(len(sm.structures))] + required = [0, 1, 4, 7, 9, 16, 17, 19, 31, 32] + for s in required: + if s not in set(types_present): + print "Failure: Type {} required but not found".format(s) + + else: + if s == 0: + if types_present.count(s) > 1: + print "Failure: Type {} - One and only one structure of this type must be present.".format(s) + if sm.structure_type(s).length < 0x18: + print "Failure: Type {} - The structure Length field must be at least 0x18".format(s) + if sm.structure_type(s).version is None: + print "Failure: Type {} - BIOS Version string must be present and non-null.".format(s) + if sm.structure_type(s).release_date is None: + print "Failure: Type {} - BIOS Release Date string must be present, non-null, and include a 4-digit year".format(s) + if bitfields.getbits(sm.structure_type(s).characteristics, 3, 0) != 0 or bitfields.getbits(sm.structure_type(s).characteristics, 31, 4) == 0: + print "Failure: Type {} - BIOS Characteristics: bits 3:0 must all be 0, and at least one of bits 31:4 must be set to 1.".format(s) + elif s == 1: + if types_present.count(s) > 1: + print "Failure: Type {} - One and only one structure of this type must be present.".format(s) + if sm.structure_type(s).length < 0x1B: + print "Failure: Type {} - The structure Length field must be at least 0x1B".format(s) + if sm.structure_type(s).manufacturer == None: + print "Failure: Type {} - Manufacturer string must be present and non-null.".format(s) + if sm.structure_type(s).product_name == None: + print "Failure: Type {} - Product Name string must be present and non-null".format(s) + if sm.structure_type(s).uuid == '00000000 00000000' and sm.structure_type(s).uuid == 'FFFFFFFF FFFFFFFF': + print "Failure: Type {} - UUID field must be neither 00000000 00000000 nor FFFFFFFF FFFFFFFF.".format(s) + if sm.structure_type(s).wakeup_type == 00 and sm.structure_type(s).wakeup_type == 0x02: + print "Failure: Type {} - Wake-up Type field must be neither 00h (Reserved) nor 02h (Unknown).".format(s) + # continue for remaining required types + + # check remaining conformance guidelines + + table_entry_point_verification() + req_structures() + except: + print "Error checking ANNEX A conformance guidelines" + import traceback + traceback.print_exc() diff --git a/tests/avocado/acpi-bits/bits-tests/testacpi.py2 b/tests/avocado/acpi-bits/bits-tests/testacpi.py2 new file mode 100644 index 0000000000..dbc150076e --- /dev/null +++ b/tests/avocado/acpi-bits/bits-tests/testacpi.py2 @@ -0,0 +1,283 @@ +# Copyright (c) 2015, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests for ACPI""" + +import acpi +import bits +import bits.mwait +import struct +import testutil +import testsuite +import time + +def register_tests(): + testsuite.add_test("ACPI _MAT (Multiple APIC Table Entry) under Processor objects", test_mat, submenu="ACPI Tests") +# testsuite.add_test("ACPI _PSS (Pstate) table conformance tests", test_pss, submenu="ACPI Tests") +# testsuite.add_test("ACPI _PSS (Pstate) runtime tests", test_pstates, submenu="ACPI Tests") + testsuite.add_test("ACPI DSDT (Differentiated System Description Table)", test_dsdt, submenu="ACPI Tests") + testsuite.add_test("ACPI FACP (Fixed ACPI Description Table)", test_facp, submenu="ACPI Tests") + testsuite.add_test("ACPI HPET (High Precision Event Timer Table)", test_hpet, submenu="ACPI Tests") + testsuite.add_test("ACPI MADT (Multiple APIC Description Table)", test_apic, submenu="ACPI Tests") + testsuite.add_test("ACPI MPST (Memory Power State Table)", test_mpst, submenu="ACPI Tests") + testsuite.add_test("ACPI RSDP (Root System Description Pointer Structure)", test_rsdp, submenu="ACPI Tests") + testsuite.add_test("ACPI XSDT (Extended System Description Table)", test_xsdt, submenu="ACPI Tests") + +def test_mat(): + cpupaths = acpi.get_cpupaths() + apic = acpi.parse_apic() + procid_apicid = apic.procid_apicid + uid_x2apicid = apic.uid_x2apicid + for cpupath in cpupaths: + # Find the ProcId defined by the processor object + processor = acpi.evaluate(cpupath) + # Find the UID defined by the processor object's _UID method + uid = acpi.evaluate(cpupath + "._UID") + mat_buffer = acpi.evaluate(cpupath + "._MAT") + if mat_buffer is None: + continue + # Process each _MAT subtable + mat = acpi._MAT(mat_buffer) + for index, subtable in enumerate(mat): + if subtable.subtype == acpi.MADT_TYPE_LOCAL_APIC: + if subtable.flags.bits.enabled: + testsuite.test("{} Processor declaration ProcId = _MAT ProcId".format(cpupath), processor.ProcId == subtable.proc_id) + testsuite.print_detail("{} ProcId ({:#02x}) != _MAT ProcId ({:#02x})".format(cpupath, processor.ProcId, subtable.proc_id)) + testsuite.print_detail("Processor Declaration: {}".format(processor)) + testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable)) + if testsuite.test("{} with local APIC in _MAT has local APIC in MADT".format(cpupath), processor.ProcId in procid_apicid): + testsuite.test("{} ApicId derived using Processor declaration ProcId = _MAT ApicId".format(cpupath), procid_apicid[processor.ProcId] == subtable.apic_id) + testsuite.print_detail("{} ApicId derived from MADT ({:#02x}) != _MAT ApicId ({:#02x})".format(cpupath, procid_apicid[processor.ProcId], subtable.apic_id)) + testsuite.print_detail("Processor Declaration: {}".format(processor)) + testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable)) + if subtable.subtype == acpi.MADT_TYPE_LOCAL_X2APIC: + if subtable.flags.bits.enabled: + if testsuite.test("{} with x2Apic in _MAT has _UID".format(cpupath), uid is not None): + testsuite.test("{}._UID = _MAT UID".format(cpupath), uid == subtable.uid) + testsuite.print_detail("{}._UID ({:#x}) != _MAT UID ({:#x})".format(cpupath, uid, subtable.uid)) + testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable)) + if testsuite.test("{} with _MAT x2Apic has x2Apic in MADT".format(cpupath), subtable.uid in uid_x2apicid): + testsuite.test("{} x2ApicId derived from MADT using UID = _MAT x2ApicId".format(cpupath), uid_x2apicid[subtable.uid] == subtable.x2apicid) + testsuite.print_detail("{} x2ApicId derived from MADT ({:#02x}) != _MAT x2ApicId ({:#02x})".format(cpupath, uid_x2apicid[subtable.uid], subtable.x2apicid)) + testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable)) + +def test_pss(): + uniques = acpi.parse_cpu_method("_PSS") + # We special-case None here to avoid a double-failure for CPUs without a _PSS + testsuite.test("_PSS must be identical for all CPUs", len(uniques) <= 1 or (len(uniques) == 2 and None in uniques)) + for pss, cpupaths in uniques.iteritems(): + if not testsuite.test("_PSS must exist", pss is not None): + testsuite.print_detail(acpi.factor_commonprefix(cpupaths)) + testsuite.print_detail('No _PSS exists') + continue + + if not testsuite.test("_PSS must not be empty", pss.pstates): + testsuite.print_detail(acpi.factor_commonprefix(cpupaths)) + testsuite.print_detail('_PSS is empty') + continue + + testsuite.print_detail(acpi.factor_commonprefix(cpupaths)) + for index, pstate in enumerate(pss.pstates): + testsuite.print_detail("P[{}]: {}".format(index, pstate)) + + testsuite.test("_PSS must contain at most 16 Pstates", len(pss.pstates) <= 16) + testsuite.test("_PSS must have no duplicate Pstates", len(pss.pstates) == len(set(pss.pstates))) + + frequencies = [p.core_frequency for p in pss.pstates] + testsuite.test("_PSS must list Pstates in descending order of frequency", frequencies == sorted(frequencies, reverse=True)) + + testsuite.test("_PSS must have Pstates with no duplicate frequencies", len(frequencies) == len(set(frequencies))) + + dissipations = [p.power for p in pss.pstates] + testsuite.test("_PSS must list Pstates in descending order of power dissipation", dissipations == sorted(dissipations, reverse=True)) + +def test_pstates(): + """Execute and verify frequency for each Pstate in the _PSS""" + IA32_PERF_CTL = 0x199 + with bits.mwait.use_hint(), bits.preserve_msr(IA32_PERF_CTL): + cpupath_procid = acpi.find_procid() + cpupath_uid = acpi.find_uid() + apic = acpi.parse_apic() + procid_apicid = apic.procid_apicid + uid_x2apicid = apic.uid_x2apicid + def cpupath_apicid(cpupath): + if procid_apicid is not None: + procid = cpupath_procid.get(cpupath, None) + if procid is not None: + apicid = procid_apicid.get(procid, None) + if apicid is not None: + return apicid + if uid_x2apicid is not None: + uid = cpupath_uid.get(cpupath, None) + if uid is not None: + apicid = uid_x2apicid.get(uid, None) + if apicid is not None: + return apicid + return bits.cpus()[0] + + bclk = testutil.adjust_to_nearest(bits.bclk(), 100.0/12) * 1000000 + + uniques = acpi.parse_cpu_method("_PSS") + for pss, cpupaths in uniques.iteritems(): + if not testsuite.test("_PSS must exist", pss is not None): + testsuite.print_detail(acpi.factor_commonprefix(cpupaths)) + testsuite.print_detail('No _PSS exists') + continue + + for n, pstate in enumerate(pss.pstates): + for cpupath in cpupaths: + apicid = cpupath_apicid(cpupath) + if apicid is None: + print 'Failed to find apicid for cpupath {}'.format(cpupath) + continue + bits.wrmsr(apicid, IA32_PERF_CTL, pstate.control) + + # Detecting Turbo frequency requires at least 2 pstates + # since turbo frequency = max non-turbo frequency + 1 + turbo = False + if len(pss.pstates) >= 2: + turbo = (n == 0 and pstate.core_frequency == (pss.pstates[1].core_frequency + 1)) + if turbo: + # Needs to busywait, not sleep + start = time.time() + while (time.time() - start < 2): + pass + + for duration in (0.1, 1.0): + frequency_data = bits.cpu_frequency(duration) + # Abort the test if no cpu frequency is not available + if frequency_data is None: + continue + aperf = frequency_data[1] + aperf = testutil.adjust_to_nearest(aperf, bclk/2) + aperf = int(aperf / 1000000) + if turbo: + if aperf >= pstate.core_frequency: + break + else: + if aperf == pstate.core_frequency: + break + + if turbo: + testsuite.test("P{}: Turbo measured frequency {} >= expected {} MHz".format(n, aperf, pstate.core_frequency), aperf >= pstate.core_frequency) + else: + testsuite.test("P{}: measured frequency {} MHz == expected {} MHz".format(n, aperf, pstate.core_frequency), aperf == pstate.core_frequency) + +def test_psd_thread_scope(): + uniques = acpi.parse_cpu_method("_PSD") + if not testsuite.test("_PSD (P-State Dependency) must exist for each processor", None not in uniques): + testsuite.print_detail(acpi.factor_commonprefix(uniques[None])) + testsuite.print_detail('No _PSD exists') + return + unique_num_dependencies = {} + unique_num_entries = {} + unique_revision = {} + unique_domain = {} + unique_coordination_type = {} + unique_num_processors = {} + for value, cpupaths in uniques.iteritems(): + unique_num_dependencies.setdefault(len(value.dependencies), []).extend(cpupaths) + unique_num_entries.setdefault(value.dependencies[0].num_entries, []).extend(cpupaths) + unique_revision.setdefault(value.dependencies[0].revision, []).extend(cpupaths) + unique_domain.setdefault(value.dependencies[0].domain, []).extend(cpupaths) + unique_coordination_type.setdefault(value.dependencies[0].coordination_type, []).extend(cpupaths) + unique_num_processors.setdefault(value.dependencies[0].num_processors, []).extend(cpupaths) + def detail(d, fmt): + for value, cpupaths in sorted(d.iteritems(), key=(lambda (k,v): v)): + testsuite.print_detail(acpi.factor_commonprefix(cpupaths)) + testsuite.print_detail(fmt.format(value)) + + testsuite.test('Dependency count for each processor must be 1', unique_num_dependencies.keys() == [1]) + detail(unique_num_dependencies, 'Dependency count for each processor = {} (Expected 1)') + testsuite.test('_PSD.num_entries must be 5', unique_num_entries.keys() == [5]) + detail(unique_num_entries, 'num_entries = {} (Expected 5)') + testsuite.test('_PSD.revision must be 0', unique_revision.keys() == [0]) + detail(unique_revision, 'revision = {}') + testsuite.test('_PSD.coordination_type must be 0xFE (HW_ALL)', unique_coordination_type.keys() == [0xfe]) + detail(unique_coordination_type, 'coordination_type = {:#x} (Expected 0xFE HW_ALL)') + testsuite.test('_PSD.domain must be unique (thread-scoped) for each processor', len(unique_domain) == len(acpi.get_cpupaths())) + detail(unique_domain, 'domain = {:#x} (Expected a unique value for each processor)') + testsuite.test('_PSD.num_processors must be 1', unique_num_processors.keys() == [1]) + detail(unique_num_processors, 'num_processors = {} (Expected 1)') + +def test_table_checksum(data): + csum = sum(ord(c) for c in data) % 0x100 + testsuite.test('ACPI table cumulative checksum must equal 0', csum == 0) + testsuite.print_detail("Cumulative checksum = {} (Expected 0)".format(csum)) + +def test_apic(): + data = acpi.get_table("APIC") + if data is None: + return + test_table_checksum(data) + apic = acpi.parse_apic() + +def test_dsdt(): + data = acpi.get_table("DSDT") + if data is None: + return + test_table_checksum(data) + +def test_facp(): + data = acpi.get_table("FACP") + if data is None: + return + test_table_checksum(data) + facp = acpi.parse_facp() + +def test_hpet(): + data = acpi.get_table("HPET") + if data is None: + return + test_table_checksum(data) + hpet = acpi.parse_hpet() + +def test_mpst(): + data = acpi.get_table("MPST") + if data is None: + return + test_table_checksum(data) + mpst = acpi.MPST(data) + +def test_rsdp(): + data = acpi.get_table("RSD PTR ") + if data is None: + return + + # Checksum the first 20 bytes per ACPI 1.0 + csum = sum(ord(c) for c in data[:20]) % 0x100 + testsuite.test('ACPI 1.0 table first 20 bytes cummulative checksum must equal 0', csum == 0) + testsuite.print_detail("Cummulative checksum = {} (Expected 0)".format(csum)) + + test_table_checksum(data) + rsdp = acpi.parse_rsdp() + +def test_xsdt(): + data = acpi.get_table("XSDT") + if data is None: + return + test_table_checksum(data) + xsdt = acpi.parse_xsdt() diff --git a/tests/avocado/acpi-bits/bits-tests/testcpuid.py2 b/tests/avocado/acpi-bits/bits-tests/testcpuid.py2 new file mode 100644 index 0000000000..ac55d912e1 --- /dev/null +++ b/tests/avocado/acpi-bits/bits-tests/testcpuid.py2 @@ -0,0 +1,83 @@ +# Copyright (c) 2012, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests and helpers for CPUID.""" + +import bits +import testsuite +import testutil + +def cpuid_helper(function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0): + if index is None: + index = 0 + indexdesc = "" + else: + indexdesc = " index {0:#x}".format(index) + + def find_mask(m): + if m == ~0: + return mask + return m + masks = map(find_mask, [eax_mask, ebx_mask, ecx_mask, edx_mask]) + + uniques = {} + for cpu in bits.cpus(): + regs = bits.cpuid_result(*[(r >> shift) & m for r, m in zip(bits.cpuid(cpu, function, index), masks)]) + uniques.setdefault(regs, []).append(cpu) + + desc = ["CPUID function {:#x}{}".format(function, indexdesc)] + + if shift != 0: + desc.append("Register values have been shifted by {}".format(shift)) + if mask != ~0 or eax_mask != ~0 or ebx_mask != ~0 or ecx_mask != ~0 or edx_mask != ~0: + desc.append("Register values have been masked:") + shifted_masks = bits.cpuid_result(*[m << shift for m in masks]) + desc.append("Masks: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**shifted_masks._asdict())) + + if len(uniques) > 1: + regvalues = zip(*uniques.iterkeys()) + common_masks = bits.cpuid_result(*map(testutil.find_common_mask, regvalues)) + common_values = bits.cpuid_result(*[v[0] & m for v, m in zip(regvalues, common_masks)]) + desc.append('Register values are not unique across all logical processors') + desc.append("Common bits: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**common_values._asdict())) + desc.append("Mask of common bits: {eax:#010x} {ebx:#010x} {ecx:#010x} {edx:#010x}".format(**common_masks._asdict())) + + for regs in sorted(uniques.iterkeys()): + cpus = uniques[regs] + desc.append("Register value: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**regs._asdict())) + desc.append("On {0} CPUs: {1}".format(len(cpus), testutil.apicid_list(cpus))) + + return uniques, desc + +def test_cpuid_consistency(text, function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0): + uniques, desc = cpuid_helper(function, index, shift, mask, eax_mask, ebx_mask, ecx_mask, edx_mask) + desc[0] += " Consistency Check" + if text: + desc.insert(0, text) + status = testsuite.test(desc[0], len(uniques) == 1) + for line in desc[1:]: + testsuite.print_detail(line) + return status diff --git a/tests/acceptance/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py similarity index 82% rename from tests/acceptance/avocado_qemu/__init__.py rename to tests/avocado/avocado_qemu/__init__.py index 2c4fef3e14..910f3ba1ea 100644 --- a/tests/acceptance/avocado_qemu/__init__.py +++ b/tests/avocado/avocado_qemu/__init__.py @@ -11,19 +11,20 @@ import logging import os import shutil +import subprocess import sys -import uuid import tempfile +import time +import uuid import avocado - -from avocado.utils import cloudinit -from avocado.utils import datadrainer -from avocado.utils import network -from avocado.utils import ssh -from avocado.utils import vmimage +from avocado.utils import cloudinit, datadrainer, process, ssh, vmimage from avocado.utils.path import find_command +from qemu.machine import QEMUMachine +from qemu.utils import (get_info_usernet_hostfwd_port, kvm_available, + tcg_available) + #: The QEMU build root directory. It may also be the source directory #: if building from the source dir, but it's safer to use BUILD_DIR for @@ -32,27 +33,75 @@ from avocado.utils.path import find_command BUILD_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) if os.path.islink(os.path.dirname(os.path.dirname(__file__))): - # The link to the acceptance tests dir in the source code directory + # The link to the avocado tests dir in the source code directory lnk = os.path.dirname(os.path.dirname(__file__)) #: The QEMU root source directory SOURCE_DIR = os.path.dirname(os.path.dirname(os.readlink(lnk))) else: SOURCE_DIR = BUILD_DIR -sys.path.append(os.path.join(SOURCE_DIR, 'python')) -from qemu.machine import QEMUMachine -from qemu.utils import ( - get_info_usernet_hostfwd_port, - kvm_available, - tcg_available, -) +def has_cmd(name, args=None): + """ + This function is for use in a @avocado.skipUnless decorator, e.g.: + + @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true'))) + def test_something_that_needs_sudo(self): + ... + """ + + if args is None: + args = ('which', name) + + try: + _, stderr, exitcode = run_cmd(args) + except Exception as e: + exitcode = -1 + stderr = str(e) + + if exitcode != 0: + cmd_line = ' '.join(args) + err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}' + return (False, err) + else: + return (True, '') + +def has_cmds(*cmds): + """ + This function is for use in a @avocado.skipUnless decorator and + allows checking for the availability of multiple commands, e.g.: + + @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')), + 'cmd2', 'cmd3')) + def test_something_that_needs_cmd1_and_cmd2(self): + ... + """ + + for cmd in cmds: + if isinstance(cmd, str): + cmd = (cmd,) + + ok, errstr = has_cmd(*cmd) + if not ok: + return (False, errstr) + + return (True, '') + +def run_cmd(args): + subp = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + stdout, stderr = subp.communicate() + ret = subp.returncode + + return (stdout, stderr, ret) def is_readable_executable_file(path): return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK) -def pick_default_qemu_bin(arch=None): +def pick_default_qemu_bin(bin_prefix='qemu-system-', arch=None): """ Picks the path of a QEMU binary, starting either in the current working directory or in the source tree root directory. @@ -71,14 +120,16 @@ def pick_default_qemu_bin(arch=None): # qemu binary path does not match arch for powerpc, handle it if 'ppc64le' in arch: arch = 'ppc64' - qemu_bin_relative_path = "./qemu-system-%s" % arch - if is_readable_executable_file(qemu_bin_relative_path): - return qemu_bin_relative_path - - qemu_bin_from_bld_dir_path = os.path.join(BUILD_DIR, - qemu_bin_relative_path) - if is_readable_executable_file(qemu_bin_from_bld_dir_path): - return qemu_bin_from_bld_dir_path + qemu_bin_name = bin_prefix + arch + qemu_bin_paths = [ + os.path.join(".", qemu_bin_name), + os.path.join(BUILD_DIR, qemu_bin_name), + os.path.join(BUILD_DIR, "build", qemu_bin_name), + ] + for path in qemu_bin_paths: + if is_readable_executable_file(path): + return path + return None def _console_interaction(test, success_message, failure_message, @@ -125,7 +176,7 @@ def interrupt_interactive_console_until_pattern(test, success_message, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails :param interrupt_string: a string to send to the console before trying @@ -141,7 +192,7 @@ def wait_for_console_pattern(test, success_message, failure_message=None, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails """ @@ -153,7 +204,7 @@ def exec_command(test, command): the content. :param test: an Avocado test containing a VM. - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param command: the command to send :type command: str """ @@ -168,14 +219,18 @@ def exec_command_and_wait_for_pattern(test, command, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param command: the command to send :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails """ _console_interaction(test, success_message, failure_message, command + '\r') -class Test(avocado.Test): +class QemuBaseTest(avocado.Test): + + # default timeout for all tests, can be overridden + timeout = 120 + def _get_unique_tag_val(self, tag_name): """ Gets a tag value, if unique for a key @@ -185,6 +240,43 @@ class Test(avocado.Test): return vals.pop() return None + def setUp(self, bin_prefix): + self.arch = self.params.get('arch', + default=self._get_unique_tag_val('arch')) + + self.cpu = self.params.get('cpu', + default=self._get_unique_tag_val('cpu')) + + default_qemu_bin = pick_default_qemu_bin(bin_prefix, arch=self.arch) + self.qemu_bin = self.params.get('qemu_bin', + default=default_qemu_bin) + if self.qemu_bin is None: + self.cancel("No QEMU binary defined or found in the build tree") + + def fetch_asset(self, name, + asset_hash=None, algorithm=None, + locations=None, expire=None, + find_only=False, cancel_on_missing=True): + return super().fetch_asset(name, + asset_hash=asset_hash, + algorithm=algorithm, + locations=locations, + expire=expire, + find_only=find_only, + cancel_on_missing=cancel_on_missing) + + +class QemuSystemTest(QemuBaseTest): + """Facilitates system emulation tests.""" + + def setUp(self): + self._vms = {} + + super().setUp('qemu-system-') + + self.machine = self.params.get('machine', + default=self._get_unique_tag_val('machine')) + def require_accelerator(self, accelerator): """ Requires an accelerator to be available for the test to continue @@ -207,23 +299,11 @@ class Test(avocado.Test): self.cancel("%s accelerator does not seem to be " "available" % accelerator) - def setUp(self): - self._vms = {} - - self.arch = self.params.get('arch', - default=self._get_unique_tag_val('arch')) - - self.cpu = self.params.get('cpu', - default=self._get_unique_tag_val('cpu')) - - self.machine = self.params.get('machine', - default=self._get_unique_tag_val('machine')) - - default_qemu_bin = pick_default_qemu_bin(arch=self.arch) - self.qemu_bin = self.params.get('qemu_bin', - default=default_qemu_bin) - if self.qemu_bin is None: - self.cancel("No QEMU binary defined or found in the build tree") + def require_netdev(self, netdevname): + netdevhelp = run_cmd([self.qemu_bin, + '-M', 'none', '-netdev', 'help'])[0]; + if netdevhelp.find('\n' + netdevname + '\n') < 0: + self.cancel('no support for user networking') def _new_vm(self, name, *args): self._sd = tempfile.TemporaryDirectory(prefix="avo_qemu_sock_") @@ -276,18 +356,24 @@ class Test(avocado.Test): for vm in self._vms.values(): vm.shutdown() self._sd = None + super().tearDown() - def fetch_asset(self, name, - asset_hash=None, algorithm=None, - locations=None, expire=None, - find_only=False, cancel_on_missing=True): - return super(Test, self).fetch_asset(name, - asset_hash=asset_hash, - algorithm=algorithm, - locations=locations, - expire=expire, - find_only=find_only, - cancel_on_missing=cancel_on_missing) + +class QemuUserTest(QemuBaseTest): + """Facilitates user-mode emulation tests.""" + + def setUp(self): + self._ldpath = [] + super().setUp('qemu-') + + def add_ldpath(self, ldpath): + self._ldpath.append(os.path.abspath(ldpath)) + + def run(self, bin_path, args=[]): + qemu_args = " ".join(["-L %s" % ldpath for ldpath in self._ldpath]) + bin_args = " ".join(args) + return process.run("%s %s %s %s" % (self.qemu_bin, qemu_args, + bin_path, bin_args)) class LinuxSSHMixIn: @@ -312,8 +398,7 @@ class LinuxSSHMixIn: self.ssh_session.connect() return except: - time.sleep(4) - pass + time.sleep(i) self.fail('ssh connection timeout') def ssh_command(self, command): @@ -430,17 +515,18 @@ class LinuxDistro: return self._info.get('kernel_params', None) -class LinuxTest(Test, LinuxSSHMixIn): +class LinuxTest(LinuxSSHMixIn, QemuSystemTest): """Facilitates having a cloud-image Linux based available. - For tests that indend to interact with guests, this is a better choice - to start with than the more vanilla `Test` class. + For tests that intend to interact with guests, this is a better choice + to start with than the more vanilla `QemuSystemTest` class. """ - timeout = 900 distro = None username = 'root' password = 'password' + smp = '2' + memory = '1024' def _set_distro(self): distro_name = self.params.get( @@ -469,10 +555,11 @@ class LinuxTest(Test, LinuxSSHMixIn): self.distro.checksum = distro_checksum def setUp(self, ssh_pubkey=None, network_device_type='virtio-net'): - super(LinuxTest, self).setUp() + super().setUp() + self.require_netdev('user') self._set_distro() - self.vm.add_args('-smp', '2') - self.vm.add_args('-m', '1024') + self.vm.add_args('-smp', self.smp) + self.vm.add_args('-m', self.memory) # The following network device allows for SSH connections self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', '-device', '%s,netdev=vnet' % network_device_type) @@ -527,7 +614,6 @@ class LinuxTest(Test, LinuxSSHMixIn): self.log.info('Preparing cloudinit image') try: cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso') - self.phone_home_port = network.find_free_port() pubkey_content = None if ssh_pubkey: with open(ssh_pubkey) as pubkey: @@ -537,7 +623,7 @@ class LinuxTest(Test, LinuxSSHMixIn): password=self.password, # QEMU's hard coded usermode router address phone_home_host='10.0.2.2', - phone_home_port=self.phone_home_port, + phone_home_port=self.phone_server.server_port, authorized_key=pubkey_content) except Exception: self.cancel('Failed to prepare the cloudinit image') @@ -548,6 +634,8 @@ class LinuxTest(Test, LinuxSSHMixIn): self.vm.add_args('-drive', 'file=%s' % path) def set_up_cloudinit(self, ssh_pubkey=None): + self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0), + self.name) cloudinit_iso = self.prepare_cloudinit(ssh_pubkey) self.vm.add_args('-drive', 'file=%s,format=raw' % cloudinit_iso) @@ -558,7 +646,9 @@ class LinuxTest(Test, LinuxSSHMixIn): logger=self.log.getChild('console')) console_drainer.start() self.log.info('VM launched, waiting for boot confirmation from guest') - cloudinit.wait_for_phone_home(('0.0.0.0', self.phone_home_port), self.name) + while not self.phone_server.instance_phoned_back: + self.phone_server.handle_request() + if set_up_ssh_connection: self.log.info('Setting up the SSH connection') self.ssh_connect(self.username, self.ssh_key) diff --git a/tests/acceptance/boot_linux.py b/tests/avocado/boot_linux.py similarity index 87% rename from tests/acceptance/boot_linux.py rename to tests/avocado/boot_linux.py index ab19146d1e..b3e58fa309 100644 --- a/tests/acceptance/boot_linux.py +++ b/tests/avocado/boot_linux.py @@ -19,6 +19,7 @@ class BootLinuxX8664(LinuxTest): """ :avocado: tags=arch:x86_64 """ + timeout = 480 def test_pc_i440fx_tcg(self): """ @@ -57,12 +58,16 @@ class BootLinuxX8664(LinuxTest): self.launch_and_wait(set_up_ssh_connection=False) +# For Aarch64 we only boot KVM tests in CI as the TCG tests are very +# heavyweight. There are lighter weight distros which we use in the +# machine_aarch64_virt.py tests. class BootLinuxAarch64(LinuxTest): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=machine:gic-version=2 """ + timeout = 720 def add_common_args(self): self.vm.add_args('-bios', @@ -71,7 +76,8 @@ class BootLinuxAarch64(LinuxTest): self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') - def test_virt_tcg_gicv2(self): + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') + def test_fedora_cloud_tcg_gicv2(self): """ :avocado: tags=accel:tcg :avocado: tags=cpu:max @@ -79,11 +85,13 @@ class BootLinuxAarch64(LinuxTest): """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "max,lpa2=off") self.vm.add_args("-machine", "virt,gic-version=2") self.add_common_args() self.launch_and_wait(set_up_ssh_connection=False) - def test_virt_tcg_gicv3(self): + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') + def test_fedora_cloud_tcg_gicv3(self): """ :avocado: tags=accel:tcg :avocado: tags=cpu:max @@ -91,6 +99,7 @@ class BootLinuxAarch64(LinuxTest): """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "max,lpa2=off") self.vm.add_args("-machine", "virt,gic-version=3") self.add_common_args() self.launch_and_wait(set_up_ssh_connection=False) @@ -112,6 +121,8 @@ class BootLinuxPPC64(LinuxTest): :avocado: tags=arch:ppc64 """ + timeout = 360 + def test_pseries_tcg(self): """ :avocado: tags=machine:pseries @@ -127,6 +138,8 @@ class BootLinuxS390X(LinuxTest): :avocado: tags=arch:s390x """ + timeout = 240 + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_s390_ccw_virtio_tcg(self): """ diff --git a/tests/acceptance/boot_linux_console.py b/tests/avocado/boot_linux_console.py similarity index 93% rename from tests/acceptance/boot_linux_console.py rename to tests/avocado/boot_linux_console.py index 5248c8097d..ec07c64291 100644 --- a/tests/acceptance/boot_linux_console.py +++ b/tests/avocado/boot_linux_console.py @@ -15,20 +15,14 @@ import shutil from avocado import skip from avocado import skipUnless -from avocado_qemu import Test +from avocado import skipIf +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive -from avocado.utils.path import find_command, CmdNotFoundError - -P7ZIP_AVAILABLE = True -try: - find_command('7z') -except CmdNotFoundError: - P7ZIP_AVAILABLE = False """ Round up to next power of 2 @@ -46,7 +40,7 @@ def image_pow2ceil_expand(path): with open(path, 'ab+') as fd: fd.truncate(size_aligned) -class LinuxKernelTest(Test): +class LinuxKernelTest(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def wait_for_console_pattern(self, success_message, vm=None): @@ -332,31 +326,6 @@ class BootLinuxConsole(LinuxKernelTest): kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash) - def test_aarch64_virt(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=accel:tcg - :avocado: tags=cpu:cortex-a53 - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/aarch64/os/images/pxeboot' - '/vmlinuz') - kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - self.require_accelerator("tcg") - self.vm.add_args('-cpu', 'cortex-a53', - '-accel', 'tcg', - '-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - def test_aarch64_xlnx_versal_virt(self): """ :avocado: tags=arch:aarch64 @@ -367,13 +336,13 @@ class BootLinuxConsole(LinuxKernelTest): """ images_url = ('http://ports.ubuntu.com/ubuntu-ports/dists/' 'bionic-updates/main/installer-arm64/' - '20101020ubuntu543.15/images/') + '20101020ubuntu543.19/images/') kernel_url = images_url + 'netboot/ubuntu-installer/arm64/linux' - kernel_hash = '5bfc54cf7ed8157d93f6e5b0241e727b6dc22c50' + kernel_hash = 'e167757620640eb26de0972f578741924abb3a82' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = images_url + 'netboot/ubuntu-installer/arm64/initrd.gz' - initrd_hash = 'd385d3e88d53e2004c5d43cbe668b458a094f772' + initrd_hash = 'cab5cb3fcefca8408aa5aae57f24574bfce8bdb9' initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) self.vm.set_console() @@ -413,6 +382,8 @@ class BootLinuxConsole(LinuxKernelTest): :avocado: tags=u-boot :avocado: tags=accel:tcg """ + self.require_netdev('user') + uboot_url = ('https://raw.githubusercontent.com/' 'Subbaraya-Sundeep/qemu-test-binaries/' 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot') @@ -475,7 +446,7 @@ class BootLinuxConsole(LinuxKernelTest): def test_arm_raspi2_uart0(self): """ :avocado: tags=arch:arm - :avocado: tags=machine:raspi2 + :avocado: tags=machine:raspi2b :avocado: tags=device:pl011 :avocado: tags=accel:tcg """ @@ -484,7 +455,7 @@ class BootLinuxConsole(LinuxKernelTest): def test_arm_raspi2_initrd(self): """ :avocado: tags=arch:arm - :avocado: tags=machine:raspi2 + :avocado: tags=machine:raspi2b """ deb_url = ('http://archive.raspberrypi.org/debian/' 'pool/main/r/raspberrypi-firmware/' @@ -519,7 +490,7 @@ class BootLinuxConsole(LinuxKernelTest): 'BCM2835') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', '/soc/cprman@7e101000') - exec_command(self, 'halt') + exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') # Wait for VM to shut down gracefully self.vm.wait() @@ -811,6 +782,8 @@ class BootLinuxConsole(LinuxKernelTest): :avocado: tags=machine:orangepi-pc :avocado: tags=device:sd """ + self.require_netdev('user') + deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' @@ -820,8 +793,8 @@ class BootLinuxConsole(LinuxKernelTest): dtb_path = '/usr/lib/linux-image-current-sunxi/sun8i-h3-orangepi-pc.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' - 'kci-2019.02/armel/base/rootfs.ext2.xz') - rootfs_hash = '692510cb625efda31640d1de0a8d60e26040f061' + 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') + rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') archive.lzma_uncompress(rootfs_path_xz, rootfs_path) @@ -971,7 +944,7 @@ class BootLinuxConsole(LinuxKernelTest): def test_aarch64_raspi3_atf(self): """ :avocado: tags=arch:aarch64 - :avocado: tags=machine:raspi3 + :avocado: tags=machine:raspi3b :avocado: tags=cpu:cortex-a53 :avocado: tags=device:pl011 :avocado: tags=atf @@ -1056,8 +1029,8 @@ class BootLinuxConsole(LinuxKernelTest): self.wait_for_console_pattern(console_pattern) def do_test_advcal_2018(self, day, tar_hash, kernel_name, console=0): - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day' + day + '.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day' + day + '.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console(console_index=console) @@ -1075,49 +1048,6 @@ class BootLinuxConsole(LinuxKernelTest): self.vm.add_args('-dtb', self.workdir + '/day16/vexpress-v2p-ca9.dtb') self.do_test_advcal_2018('16', tar_hash, 'winter.zImage') - def test_arm_ast2400_palmetto_openbmc_v2_9_0(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:palmetto-bmc - """ - - image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' - 'obmc-phosphor-image-palmetto.static.mtd') - image_hash = ('3e13bbbc28e424865dc42f35ad672b10f2e82cdb11846bb28fa625b48beafd0d') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.do_test_arm_aspeed(image_path) - - def test_arm_ast2500_romulus_openbmc_v2_9_0(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:romulus-bmc - """ - - image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' - 'obmc-phosphor-image-romulus.static.mtd') - image_hash = ('820341076803f1955bc31e647a512c79f9add4f5233d0697678bab4604c7bb25') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.do_test_arm_aspeed(image_path) - - def do_test_arm_aspeed(self, image): - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic') - self.vm.launch() - - self.wait_for_console_pattern("U-Boot 2016.07") - self.wait_for_console_pattern("## Loading kernel from FIT Image at 20080000") - self.wait_for_console_pattern("Starting kernel ...") - self.wait_for_console_pattern("Booting Linux on physical CPU 0x0") - self.wait_for_console_pattern( - "aspeed-smc 1e620000.spi: read control register: 203b0641") - self.wait_for_console_pattern("ftgmac100 1e660000.ethernet eth0: irq ") - self.wait_for_console_pattern("systemd[1]: Set hostname to") - def test_arm_ast2600_debian(self): """ :avocado: tags=arch:arm @@ -1172,15 +1102,61 @@ class BootLinuxConsole(LinuxKernelTest): :avocado: tags=arch:ppc64 :avocado: tags=machine:ppce500 :avocado: tags=cpu:e5500 + :avocado: tags=accel:tcg """ + self.require_accelerator("tcg") tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' self.do_test_advcal_2018('19', tar_hash, 'uImage') + def do_test_ppc64_powernv(self, proc): + self.require_accelerator("tcg") + images_url = ('https://github.com/open-power/op-build/releases/download/v2.7/') + + kernel_url = images_url + 'zImage.epapr' + kernel_hash = '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash, + algorithm='sha256') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-append', 'console=tty0 console=hvc0', + '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0', + '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234', + '-device', 'e1000e,bus=bridge1,addr=0x3', + '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2') + self.vm.launch() + + self.wait_for_console_pattern("CPU: " + proc + " generation processor") + self.wait_for_console_pattern("zImage starting: loaded") + self.wait_for_console_pattern("Run /init as init process") + self.wait_for_console_pattern("Creating 1 MTD partitions") + + def test_ppc_powernv8(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:powernv8 + :avocado: tags=accel:tcg + """ + self.do_test_ppc64_powernv('P8') + + def test_ppc_powernv9(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:powernv9 + :avocado: tags=accel:tcg + """ + self.do_test_ppc64_powernv('P9') + def test_ppc_g3beige(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:g3beige + :avocado: tags=accel:tcg """ + # TODO: g3beige works with kvm_pr but we don't have a + # reliable way ATM (e.g. looking at /proc/modules) to detect + # whether we're running kvm_hv or kvm_pr. For now let's + # disable this test if we don't have TCG support. + self.require_accelerator("tcg") tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' self.vm.add_args('-M', 'graphics=off') self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') @@ -1189,11 +1165,21 @@ class BootLinuxConsole(LinuxKernelTest): """ :avocado: tags=arch:ppc :avocado: tags=machine:mac99 + :avocado: tags=accel:tcg """ + # TODO: mac99 works with kvm_pr but we don't have a + # reliable way ATM (e.g. looking at /proc/modules) to detect + # whether we're running kvm_hv or kvm_pr. For now let's + # disable this test if we don't have TCG support. + self.require_accelerator("tcg") tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' self.vm.add_args('-M', 'graphics=off') self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') + # This test has a 6-10% failure rate on various hosts that look + # like issues with a buggy kernel. As a result we don't want it + # gating releases on Gitlab. + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_sh4_r2d(self): """ :avocado: tags=arch:sh4 diff --git a/tests/acceptance/boot_xen.py b/tests/avocado/boot_xen.py similarity index 99% rename from tests/acceptance/boot_xen.py rename to tests/avocado/boot_xen.py index 3479b5233b..fc2faeedb5 100644 --- a/tests/acceptance/boot_xen.py +++ b/tests/avocado/boot_xen.py @@ -13,7 +13,6 @@ import os -from avocado import skipIf from avocado_qemu import wait_for_console_pattern from boot_linux_console import LinuxKernelTest diff --git a/tests/acceptance/cpu_queries.py b/tests/avocado/cpu_queries.py similarity index 91% rename from tests/acceptance/cpu_queries.py rename to tests/avocado/cpu_queries.py index cc9e380cc7..cf69f69b11 100644 --- a/tests/acceptance/cpu_queries.py +++ b/tests/avocado/cpu_queries.py @@ -8,9 +8,9 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class QueryCPUModelExpansion(Test): +class QueryCPUModelExpansion(QemuSystemTest): """ Run query-cpu-model-expansion for each CPU model, and validate results """ diff --git a/tests/acceptance/empty_cpu_model.py b/tests/avocado/empty_cpu_model.py similarity index 88% rename from tests/acceptance/empty_cpu_model.py rename to tests/avocado/empty_cpu_model.py index a1e59e45e4..22f504418d 100644 --- a/tests/acceptance/empty_cpu_model.py +++ b/tests/avocado/empty_cpu_model.py @@ -7,9 +7,9 @@ # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class EmptyCPUModel(Test): +class EmptyCPUModel(QemuSystemTest): def test(self): self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '') self.vm.set_qmp_monitor(enabled=False) diff --git a/tests/acceptance/hotplug_cpu.py b/tests/avocado/hotplug_cpu.py similarity index 100% rename from tests/acceptance/hotplug_cpu.py rename to tests/avocado/hotplug_cpu.py diff --git a/tests/acceptance/info_usernet.py b/tests/avocado/info_usernet.py similarity index 85% rename from tests/acceptance/info_usernet.py rename to tests/avocado/info_usernet.py index 9c1fd903a0..fdc4d90c42 100644 --- a/tests/acceptance/info_usernet.py +++ b/tests/avocado/info_usernet.py @@ -8,14 +8,18 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from qemu.utils import get_info_usernet_hostfwd_port -class InfoUsernet(Test): +class InfoUsernet(QemuSystemTest): + """ + :avocado: tags=machine:none + """ def test_hostfwd(self): + self.require_netdev('user') self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22') self.vm.launch() res = self.vm.command('human-monitor-command', diff --git a/tests/acceptance/intel_iommu.py b/tests/avocado/intel_iommu.py similarity index 100% rename from tests/acceptance/intel_iommu.py rename to tests/avocado/intel_iommu.py diff --git a/tests/acceptance/linux_initrd.py b/tests/avocado/linux_initrd.py similarity index 96% rename from tests/acceptance/linux_initrd.py rename to tests/avocado/linux_initrd.py index a249e2f14a..ba02e5a563 100644 --- a/tests/acceptance/linux_initrd.py +++ b/tests/avocado/linux_initrd.py @@ -1,4 +1,4 @@ -# Linux initrd acceptance test. +# Linux initrd integration test. # # Copyright (c) 2018 Red Hat, Inc. # @@ -12,11 +12,11 @@ import os import logging import tempfile -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipIf -class LinuxInitrd(Test): +class LinuxInitrd(QemuSystemTest): """ Checks QEMU evaluates correctly the initrd file passed as -initrd option. diff --git a/tests/acceptance/linux_ssh_mips_malta.py b/tests/avocado/linux_ssh_mips_malta.py similarity index 97% rename from tests/acceptance/linux_ssh_mips_malta.py rename to tests/avocado/linux_ssh_mips_malta.py index 4de1947418..0179d8a6ca 100644 --- a/tests/acceptance/linux_ssh_mips_malta.py +++ b/tests/avocado/linux_ssh_mips_malta.py @@ -12,7 +12,8 @@ import logging import time from avocado import skipUnless -from avocado_qemu import Test, LinuxSSHMixIn +from avocado_qemu import LinuxSSHMixIn +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive @@ -21,7 +22,10 @@ from avocado.utils import ssh @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') @skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available') -class LinuxSSH(Test, LinuxSSHMixIn): +class LinuxSSH(QemuSystemTest, LinuxSSHMixIn): + """ + :avocado: tags=accel:tcg + """ timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg' diff --git a/tests/avocado/load_bflt.py b/tests/avocado/load_bflt.py new file mode 100644 index 0000000000..bb50cec1ee --- /dev/null +++ b/tests/avocado/load_bflt.py @@ -0,0 +1,54 @@ +# Test the bFLT loader format +# +# Copyright (C) 2019 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import bz2 +import subprocess + +from avocado import skipUnless +from avocado_qemu import QemuUserTest +from avocado_qemu import has_cmd + + +class LoadBFLT(QemuUserTest): + + def extract_cpio(self, cpio_path): + """ + Extracts a cpio archive into the test workdir + + :param cpio_path: path to the cpio archive + """ + cwd = os.getcwd() + os.chdir(self.workdir) + with bz2.open(cpio_path, 'rb') as archive_cpio: + subprocess.run(['cpio', '-i'], input=archive_cpio.read(), + stderr=subprocess.DEVNULL) + os.chdir(cwd) + + @skipUnless(*has_cmd('cpio')) + @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') + def test_stm32(self): + """ + :avocado: tags=arch:arm + :avocado: tags=linux_user + :avocado: tags=quick + """ + # See https://elinux.org/STM32#User_Space + rootfs_url = ('https://elinux.org/images/5/51/' + 'Stm32_mini_rootfs.cpio.bz2') + rootfs_hash = '9f065e6ba40cce7411ba757f924f30fcc57951e6' + rootfs_path_bz2 = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) + busybox_path = os.path.join(self.workdir, "/bin/busybox") + + self.extract_cpio(rootfs_path_bz2) + + res = self.run(busybox_path) + ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.' + self.assertIn(ver, res.stdout_text) + + res = self.run(busybox_path, ['uname', '-a']) + unm = 'armv7l GNU/Linux' + self.assertIn(unm, res.stdout_text) diff --git a/tests/avocado/machine_aarch64_virt.py b/tests/avocado/machine_aarch64_virt.py new file mode 100644 index 0000000000..c2b2ba2cf8 --- /dev/null +++ b/tests/avocado/machine_aarch64_virt.py @@ -0,0 +1,95 @@ +# Functional test that boots a various Linux systems and checks the +# console output. +# +# Copyright (c) 2022 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import time +import os + +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command +from avocado_qemu import BUILD_DIR + +class Aarch64VirtMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + timeout = 360 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + # This tests the whole boot chain from EFI to Userspace + # We only boot a whole OS for the current top level CPU and GIC + # Other test profiles should use more minimal boots + def test_alpine_virt_tcg_gic_max(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:virt + :avocado: tags=accel:tcg + """ + iso_url = ('https://dl-cdn.alpinelinux.org/' + 'alpine/v3.16/releases/aarch64/' + 'alpine-virt-3.16.3-aarch64.iso') + + # Alpine use sha256 so I recalculated this myself + iso_sha1 = '0683bc089486d55c91bf6607d5ecb93925769bc0' + iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + self.require_accelerator("tcg") + + self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "max,pauth-impdef=on") + self.vm.add_args("-machine", + "virt,acpi=on," + "virtualization=on," + "mte=on," + "gic-version=max,iommu=smmuv3") + self.vm.add_args("-smp", "2", "-m", "1024") + self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', + 'edk2-aarch64-code.fd')) + self.vm.add_args("-drive", f"file={iso_path},format=raw") + self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') + self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') + + self.vm.launch() + self.wait_for_console_pattern('Welcome to Alpine Linux 3.16') + + + def test_aarch64_virt(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:virt + :avocado: tags=accel:tcg + :avocado: tags=cpu:max + """ + kernel_url = ('https://fileserver.linaro.org/s/' + 'z6B2ARM7DQT3HWN/download') + + kernel_hash = 'ed11daab50c151dde0e1e9c9cb8b2d9bd3215347' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + self.require_accelerator("tcg") + self.vm.add_args('-cpu', 'max,pauth-impdef=on', + '-accel', 'tcg', + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + self.wait_for_console_pattern('Welcome to Buildroot') + time.sleep(0.1) + exec_command(self, 'root') + time.sleep(0.1) + exec_command(self, 'cat /proc/self/maps') + time.sleep(0.1) diff --git a/tests/acceptance/machine_arm_canona1100.py b/tests/avocado/machine_arm_canona1100.py similarity index 84% rename from tests/acceptance/machine_arm_canona1100.py rename to tests/avocado/machine_arm_canona1100.py index 0e5c43dbcf..a42d8b0f2b 100644 --- a/tests/acceptance/machine_arm_canona1100.py +++ b/tests/avocado/machine_arm_canona1100.py @@ -8,11 +8,11 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class CanonA1100Machine(Test): +class CanonA1100Machine(QemuSystemTest): """Boots the barebox firmware and checks that the console is operational""" timeout = 90 @@ -23,8 +23,8 @@ class CanonA1100Machine(Test): :avocado: tags=machine:canon-a1100 :avocado: tags=device:pflash_cfi02 """ - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day18.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day18.tar.xz') tar_hash = '068b5fc4242b29381acee94713509f8a876e9db6' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) diff --git a/tests/acceptance/machine_arm_integratorcp.py b/tests/avocado/machine_arm_integratorcp.py similarity index 97% rename from tests/acceptance/machine_arm_integratorcp.py rename to tests/avocado/machine_arm_integratorcp.py index 49c8ebff78..1ffe1073ef 100644 --- a/tests/acceptance/machine_arm_integratorcp.py +++ b/tests/avocado/machine_arm_integratorcp.py @@ -12,7 +12,7 @@ import os import logging from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern @@ -29,7 +29,7 @@ except ImportError: CV2_AVAILABLE = False -class IntegratorMachine(Test): +class IntegratorMachine(QemuSystemTest): timeout = 90 diff --git a/tests/acceptance/machine_arm_n8x0.py b/tests/avocado/machine_arm_n8x0.py similarity index 95% rename from tests/acceptance/machine_arm_n8x0.py rename to tests/avocado/machine_arm_n8x0.py index e5741f2d8d..12e9a6803b 100644 --- a/tests/acceptance/machine_arm_n8x0.py +++ b/tests/avocado/machine_arm_n8x0.py @@ -11,10 +11,10 @@ import os from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class N8x0Machine(Test): +class N8x0Machine(QemuSystemTest): """Boots the Linux kernel and checks that the console is operational""" timeout = 90 diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py new file mode 100644 index 0000000000..1fc385e1c8 --- /dev/null +++ b/tests/avocado/machine_aspeed.py @@ -0,0 +1,272 @@ +# Functional test that boots the ASPEED SoCs with firmware +# +# Copyright (C) 2022 ASPEED Technology Inc +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import time +import os + +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command +from avocado_qemu import exec_command_and_wait_for_pattern +from avocado_qemu import interrupt_interactive_console_until_pattern +from avocado.utils import archive +from avocado import skipIf + + +class AST1030Machine(QemuSystemTest): + """Boots the zephyr os and checks that the console is operational""" + + timeout = 10 + + def test_ast1030_zephyros(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast1030-evb + """ + tar_url = ('https://github.com/AspeedTech-BMC' + '/zephyr/releases/download/v00.01.04/ast1030-evb-demo.zip') + tar_hash = '4c6a8ce3a8ba76ef1a65dae419ae3409343c4b20' + tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(tar_path, self.workdir) + kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.elf" + self.vm.set_console() + self.vm.add_args('-kernel', kernel_file, + '-nographic') + self.vm.launch() + wait_for_console_pattern(self, "Booting Zephyr OS") + exec_command_and_wait_for_pattern(self, "help", + "Available commands") + +class AST2x00Machine(QemuSystemTest): + + timeout = 90 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def do_test_arm_aspeed(self, image): + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic') + self.vm.launch() + + self.wait_for_console_pattern("U-Boot 2016.07") + self.wait_for_console_pattern("## Loading kernel from FIT Image at 20080000") + self.wait_for_console_pattern("Starting kernel ...") + self.wait_for_console_pattern("Booting Linux on physical CPU 0x0") + wait_for_console_pattern(self, + "aspeed-smc 1e620000.spi: read control register: 203b0641") + self.wait_for_console_pattern("ftgmac100 1e660000.ethernet eth0: irq ") + self.wait_for_console_pattern("systemd[1]: Set hostname to") + + def test_arm_ast2400_palmetto_openbmc_v2_9_0(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:palmetto-bmc + """ + + image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' + 'obmc-phosphor-image-palmetto.static.mtd') + image_hash = ('3e13bbbc28e424865dc42f35ad672b10f2e82cdb11846bb28fa625b48beafd0d') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + + self.do_test_arm_aspeed(image_path) + + def test_arm_ast2500_romulus_openbmc_v2_9_0(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:romulus-bmc + """ + + image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' + 'obmc-phosphor-image-romulus.static.mtd') + image_hash = ('820341076803f1955bc31e647a512c79f9add4f5233d0697678bab4604c7bb25') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + + self.do_test_arm_aspeed(image_path) + + def do_test_arm_aspeed_buildroot_start(self, image, cpu_id): + self.require_netdev('user') + + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic', '-net', 'user') + self.vm.launch() + + self.wait_for_console_pattern('U-Boot 2019.04') + self.wait_for_console_pattern('## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') + self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id) + self.wait_for_console_pattern('lease of 10.0.2.15') + # the line before login: + self.wait_for_console_pattern('Aspeed EVB') + time.sleep(0.1) + exec_command(self, 'root') + time.sleep(0.1) + + def do_test_arm_aspeed_buildroot_poweroff(self): + exec_command_and_wait_for_pattern(self, 'poweroff', + 'reboot: System halted'); + + def test_arm_ast2500_evb_buildroot(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast2500-evb + """ + + image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2500-evb/buildroot-2022.05/flash.img') + image_hash = ('549db6e9d8cdaf4367af21c36385a68bb465779c18b5e37094fc7343decccd3f') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); + self.do_test_arm_aspeed_buildroot_start(image_path, '0x0') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') + self.vm.command('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') + + self.do_test_arm_aspeed_buildroot_poweroff() + + def test_arm_ast2600_evb_buildroot(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast2600-evb + """ + + image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2600-evb/buildroot-2022.05/flash.img') + image_hash = ('6cc9e7d128fd4fa1fd01c883af67593cae8072c3239a0b8b6ace857f3538a92d') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); + self.vm.add_args('-device', + 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon0/temp1_input', '0') + self.vm.command('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon0/temp1_input', '18000') + + exec_command_and_wait_for_pattern(self, + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32'); + year = time.strftime("%Y") + exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); + + self.do_test_arm_aspeed_buildroot_poweroff() + + +class AST2x00MachineSDK(QemuSystemTest): + + EXTRA_BOOTARGS = ' quiet' + + # FIXME: Although these tests boot a whole distro they are still + # slower than comparable machine models. There may be some + # optimisations which bring down the runtime. In the meantime they + # have generous timeouts and are disable for CI which aims for all + # tests to run in less than 60 seconds. + timeout = 240 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def do_test_arm_aspeed_sdk_start(self, image): + self.require_netdev('user') + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic', '-net', 'user') + self.vm.launch() + + self.wait_for_console_pattern('U-Boot 2019.04') + interrupt_interactive_console_until_pattern( + self, 'Hit any key to stop autoboot:', 'ast#') + exec_command_and_wait_for_pattern( + self, 'setenv bootargs ${bootargs}' + self.EXTRA_BOOTARGS, 'ast#') + exec_command_and_wait_for_pattern( + self, 'boot', '## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') + + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') + def test_arm_ast2500_evb_sdk(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast2500-evb + """ + + image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' + 'download/v08.01/ast2500-default-obmc.tar.gz') + image_hash = ('5375f82b4c43a79427909342a1e18b4e48bd663e38466862145d27bb358796fd') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + archive.extract(image_path, self.workdir) + + self.do_test_arm_aspeed_sdk_start( + self.workdir + '/ast2500-default/image-bmc') + self.wait_for_console_pattern('ast2500-default login:') + + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') + def test_arm_ast2600_evb_sdk(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast2600-evb + """ + + image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' + 'download/v08.01/ast2600-default-obmc.tar.gz') + image_hash = ('f12ef15e8c1f03a214df3b91c814515c5e2b2f56119021398c1dbdd626817d15') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + archive.extract(image_path, self.workdir) + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test'); + self.vm.add_args('-device', + 'ds1338,bus=aspeed.i2c.bus.5,address=0x32'); + self.do_test_arm_aspeed_sdk_start( + self.workdir + '/ast2600-default/image-bmc') + self.wait_for_console_pattern('ast2600-default login:') + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, '0penBmc', 'root@ast2600-default:~#') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d'); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') + self.vm.command('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') + + exec_command_and_wait_for_pattern(self, + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32'); + year = time.strftime("%Y") + exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); diff --git a/tests/acceptance/machine_avr6.py b/tests/avocado/machine_avr6.py similarity index 94% rename from tests/acceptance/machine_avr6.py rename to tests/avocado/machine_avr6.py index 6baf4e9c7f..5485db79c6 100644 --- a/tests/acceptance/machine_avr6.py +++ b/tests/avocado/machine_avr6.py @@ -1,5 +1,5 @@ # -# QEMU AVR acceptance tests +# QEMU AVR integration tests # # Copyright (c) 2019-2020 Michael Rolnik # @@ -19,9 +19,9 @@ import time -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class AVR6Machine(Test): +class AVR6Machine(QemuSystemTest): timeout = 5 def test_freertos(self): diff --git a/tests/acceptance/machine_m68k_nextcube.py b/tests/avocado/machine_m68k_nextcube.py similarity index 97% rename from tests/acceptance/machine_m68k_nextcube.py rename to tests/avocado/machine_m68k_nextcube.py index 09e2745cc5..6790e7d9cd 100644 --- a/tests/acceptance/machine_m68k_nextcube.py +++ b/tests/avocado/machine_m68k_nextcube.py @@ -8,7 +8,7 @@ import os import time -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipUnless from tesseract_utils import tesseract_available, tesseract_ocr @@ -20,7 +20,7 @@ except ImportError: PIL_AVAILABLE = False -class NextCubeMachine(Test): +class NextCubeMachine(QemuSystemTest): """ :avocado: tags=arch:m68k :avocado: tags=machine:next-cube diff --git a/tests/acceptance/machine_microblaze.py b/tests/avocado/machine_microblaze.py similarity index 85% rename from tests/acceptance/machine_microblaze.py rename to tests/avocado/machine_microblaze.py index 7f6d18495d..8d0efff30d 100644 --- a/tests/acceptance/machine_microblaze.py +++ b/tests/avocado/machine_microblaze.py @@ -5,11 +5,11 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class MicroblazeMachine(Test): +class MicroblazeMachine(QemuSystemTest): timeout = 90 @@ -19,8 +19,8 @@ class MicroblazeMachine(Test): :avocado: tags=machine:petalogix-s3adsp1800 """ - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day17.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day17.tar.xz') tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) diff --git a/tests/acceptance/machine_mips_fuloong2e.py b/tests/avocado/machine_mips_fuloong2e.py similarity index 95% rename from tests/acceptance/machine_mips_fuloong2e.py rename to tests/avocado/machine_mips_fuloong2e.py index 0ac285e2af..89291f47b2 100644 --- a/tests/acceptance/machine_mips_fuloong2e.py +++ b/tests/avocado/machine_mips_fuloong2e.py @@ -10,10 +10,10 @@ import os from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class MipsFuloong2e(Test): +class MipsFuloong2e(QemuSystemTest): timeout = 60 diff --git a/tests/acceptance/machine_mips_loongson3v.py b/tests/avocado/machine_mips_loongson3v.py similarity index 94% rename from tests/acceptance/machine_mips_loongson3v.py rename to tests/avocado/machine_mips_loongson3v.py index 85b131a40f..5194cf18c9 100644 --- a/tests/acceptance/machine_mips_loongson3v.py +++ b/tests/avocado/machine_mips_loongson3v.py @@ -11,10 +11,10 @@ import os import time from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class MipsLoongson3v(Test): +class MipsLoongson3v(QemuSystemTest): timeout = 60 @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') diff --git a/tests/acceptance/machine_mips_malta.py b/tests/avocado/machine_mips_malta.py similarity index 98% rename from tests/acceptance/machine_mips_malta.py rename to tests/avocado/machine_mips_malta.py index b67d8cb141..f1895d59f3 100644 --- a/tests/acceptance/machine_mips_malta.py +++ b/tests/avocado/machine_mips_malta.py @@ -12,7 +12,7 @@ import gzip import logging from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive from avocado import skipIf @@ -33,7 +33,7 @@ except ImportError: @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') -class MaltaMachineFramebuffer(Test): +class MaltaMachineFramebuffer(QemuSystemTest): timeout = 30 diff --git a/tests/acceptance/machine_rx_gdbsim.py b/tests/avocado/machine_rx_gdbsim.py similarity index 97% rename from tests/acceptance/machine_rx_gdbsim.py rename to tests/avocado/machine_rx_gdbsim.py index 32b737b6d8..6cd8704b01 100644 --- a/tests/acceptance/machine_rx_gdbsim.py +++ b/tests/avocado/machine_rx_gdbsim.py @@ -11,13 +11,13 @@ import os from avocado import skipIf -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class RxGdbSimMachine(Test): +class RxGdbSimMachine(QemuSystemTest): timeout = 30 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' diff --git a/tests/acceptance/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py similarity index 98% rename from tests/acceptance/machine_s390_ccw_virtio.py rename to tests/avocado/machine_s390_ccw_virtio.py index 4028c99afc..78152f2ad1 100644 --- a/tests/acceptance/machine_s390_ccw_virtio.py +++ b/tests/avocado/machine_s390_ccw_virtio.py @@ -13,12 +13,12 @@ import os import tempfile from avocado import skipIf -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class S390CCWVirtioMachine(Test): +class S390CCWVirtioMachine(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' timeout = 120 @@ -66,6 +66,7 @@ class S390CCWVirtioMachine(Test): '-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line, + '-cpu', 'max,prno-trng=off', '-device', 'virtio-net-ccw,devno=fe.1.1111', '-device', 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1', @@ -248,7 +249,7 @@ class S390CCWVirtioMachine(Test): line = ppmfile.readline() self.assertEqual(line, b"P6\n") line = ppmfile.readline() - self.assertEqual(line, b"1024 768\n") + self.assertEqual(line, b"1280 800\n") line = ppmfile.readline() self.assertEqual(line, b"255\n") line = ppmfile.readline(256) diff --git a/tests/acceptance/machine_sparc64_sun4u.py b/tests/avocado/machine_sparc64_sun4u.py similarity index 90% rename from tests/acceptance/machine_sparc64_sun4u.py rename to tests/avocado/machine_sparc64_sun4u.py index 458165500e..d333c0ae91 100644 --- a/tests/acceptance/machine_sparc64_sun4u.py +++ b/tests/avocado/machine_sparc64_sun4u.py @@ -24,8 +24,8 @@ class Sun4uMachine(LinuxKernelTest): :avocado: tags=arch:sparc64 :avocado: tags=machine:sun4u """ - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day23.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day23.tar.xz') tar_hash = '142db83cd974ffadc4f75c8a5cad5bcc5722c240' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) diff --git a/tests/acceptance/machine_sparc_leon3.py b/tests/avocado/machine_sparc_leon3.py similarity index 94% rename from tests/acceptance/machine_sparc_leon3.py rename to tests/avocado/machine_sparc_leon3.py index 2405cd7a0d..e61b223185 100644 --- a/tests/acceptance/machine_sparc_leon3.py +++ b/tests/avocado/machine_sparc_leon3.py @@ -5,12 +5,12 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado import skip -class Leon3Machine(Test): +class Leon3Machine(QemuSystemTest): timeout = 60 diff --git a/tests/acceptance/migration.py b/tests/avocado/migration.py similarity index 94% rename from tests/acceptance/migration.py rename to tests/avocado/migration.py index 792639cb69..4b25680c50 100644 --- a/tests/acceptance/migration.py +++ b/tests/avocado/migration.py @@ -11,15 +11,15 @@ import tempfile -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipUnless -from avocado.utils import network +from avocado.utils.network import ports from avocado.utils import wait from avocado.utils.path import find_command -class Migration(Test): +class Migration(QemuSystemTest): """ :avocado: tags=migration """ @@ -57,7 +57,7 @@ class Migration(Test): self.assert_migration(source_vm, dest_vm) def _get_free_port(self): - port = network.find_free_port() + port = ports.find_free_port() if port is None: self.cancel('Failed to find a free port') return port diff --git a/tests/acceptance/multiprocess.py b/tests/avocado/multiprocess.py similarity index 98% rename from tests/acceptance/multiprocess.py rename to tests/avocado/multiprocess.py index 96627f022a..80a3b8f442 100644 --- a/tests/acceptance/multiprocess.py +++ b/tests/avocado/multiprocess.py @@ -7,12 +7,12 @@ import os import socket -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern -class Multiprocess(Test): +class Multiprocess(QemuSystemTest): """ :avocado: tags=multiprocess """ diff --git a/tests/acceptance/pc_cpu_hotplug_props.py b/tests/avocado/pc_cpu_hotplug_props.py similarity index 93% rename from tests/acceptance/pc_cpu_hotplug_props.py rename to tests/avocado/pc_cpu_hotplug_props.py index 2e86d5017a..52b878188e 100644 --- a/tests/acceptance/pc_cpu_hotplug_props.py +++ b/tests/avocado/pc_cpu_hotplug_props.py @@ -20,9 +20,9 @@ # License along with this library; if not, see . # -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class OmittedCPUProps(Test): +class OmittedCPUProps(QemuSystemTest): """ :avocado: tags=arch:x86_64 :avocado: tags=cpu:qemu64 diff --git a/tests/avocado/ppc_405.py b/tests/avocado/ppc_405.py new file mode 100644 index 0000000000..4e7e01aa76 --- /dev/null +++ b/tests/avocado/ppc_405.py @@ -0,0 +1,36 @@ +# Test that the U-Boot firmware boots on ppc 405 machines and check the console +# +# Copyright (c) 2021 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command_and_wait_for_pattern + +class Ppc405Machine(QemuSystemTest): + + timeout = 90 + + def do_test_ppc405(self): + uboot_url = ('https://gitlab.com/huth/u-boot/-/raw/' + 'taihu-2021-10-09/u-boot-taihu.bin') + uboot_hash = ('3208940e908a5edc7c03eab072c60f0dcfadc2ab'); + file_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) + self.vm.set_console(console_index=1) + self.vm.add_args('-bios', file_path) + self.vm.launch() + wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board') + exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP') + + def test_ppc_ref405ep(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:ref405ep + :avocado: tags=cpu:405ep + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + self.do_test_ppc405() diff --git a/tests/avocado/ppc_74xx.py b/tests/avocado/ppc_74xx.py new file mode 100644 index 0000000000..f54757c243 --- /dev/null +++ b/tests/avocado/ppc_74xx.py @@ -0,0 +1,136 @@ +# Smoke tests for 74xx cpus (aka G4). +# +# Copyright (c) 2021, IBM Corp. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class ppc74xxCpu(QemuSystemTest): + """ + :avocado: tags=arch:ppc + :avocado: tags=accel:tcg + """ + timeout = 5 + + def test_ppc_7400(self): + """ + :avocado: tags=cpu:7400 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7410(self): + """ + :avocado: tags=cpu:7410 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,74xx') + + def test_ppc_7441(self): + """ + :avocado: tags=cpu:7441 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7445(self): + """ + :avocado: tags=cpu:7445 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7447(self): + """ + :avocado: tags=cpu:7447 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7447a(self): + """ + :avocado: tags=cpu:7447a + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7448(self): + """ + :avocado: tags=cpu:7448 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx') + + def test_ppc_7450(self): + """ + :avocado: tags=cpu:7450 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7451(self): + """ + :avocado: tags=cpu:7451 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7455(self): + """ + :avocado: tags=cpu:7455 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7457(self): + """ + :avocado: tags=cpu:7457 + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7457a(self): + """ + :avocado: tags=cpu:7457a + """ + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') diff --git a/tests/avocado/ppc_bamboo.py b/tests/avocado/ppc_bamboo.py new file mode 100644 index 0000000000..a81be3d608 --- /dev/null +++ b/tests/avocado/ppc_bamboo.py @@ -0,0 +1,42 @@ +# Test that Linux kernel boots on the ppc bamboo board and check the console +# +# Copyright (c) 2021 Red Hat +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command_and_wait_for_pattern + +class BambooMachine(QemuSystemTest): + + timeout = 90 + + def test_ppc_bamboo(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:bamboo + :avocado: tags=cpu:440epb + :avocado: tags=device:rtl8139 + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + self.require_netdev('user') + tar_url = ('http://landley.net/aboriginal/downloads/binaries/' + 'system-image-powerpc-440fp.tar.gz') + tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + + '/system-image-powerpc-440fp/linux', + '-initrd', self.workdir + + '/system-image-powerpc-440fp/rootfs.cpio.gz', + '-nic', 'user,model=rtl8139,restrict=on') + self.vm.launch() + wait_for_console_pattern(self, 'Type exit when done') + exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2', + '10.0.2.2 is alive!') + exec_command_and_wait_for_pattern(self, 'halt', 'System Halted') diff --git a/tests/avocado/ppc_mpc8544ds.py b/tests/avocado/ppc_mpc8544ds.py new file mode 100644 index 0000000000..b599fb1cc9 --- /dev/null +++ b/tests/avocado/ppc_mpc8544ds.py @@ -0,0 +1,34 @@ +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class Mpc8544dsMachine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + panic_message = 'Kernel panic - not syncing' + + def test_ppc_mpc8544ds(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:mpc8544ds + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day04.tar.xz') + tar_hash = 'f46724d281a9f30fa892d458be7beb7d34dc25f9' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU advent calendar 2020', + self.panic_message) diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/avocado/ppc_prep_40p.py similarity index 86% rename from tests/acceptance/ppc_prep_40p.py rename to tests/avocado/ppc_prep_40p.py index 2993ee3b07..d4f1eb7e1d 100644 --- a/tests/acceptance/ppc_prep_40p.py +++ b/tests/avocado/ppc_prep_40p.py @@ -7,13 +7,12 @@ import os -from avocado import skipIf from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class IbmPrep40pMachine(Test): +class IbmPrep40pMachine(QemuSystemTest): timeout = 60 @@ -29,7 +28,9 @@ class IbmPrep40pMachine(Test): :avocado: tags=machine:40p :avocado: tags=os:netbsd :avocado: tags=slowness:high + :avocado: tags=accel:tcg """ + self.require_accelerator("tcg") bios_url = ('http://ftpmirror.your.org/pub/misc/' 'ftp.software.ibm.com/rs6000/firmware/' '7020-40p/P12H0456.IMG') @@ -52,7 +53,9 @@ class IbmPrep40pMachine(Test): """ :avocado: tags=arch:ppc :avocado: tags=machine:40p + :avocado: tags=accel:tcg """ + self.require_accelerator("tcg") self.vm.set_console() self.vm.add_args('-m', '192') # test fw_cfg @@ -66,9 +69,11 @@ class IbmPrep40pMachine(Test): :avocado: tags=arch:ppc :avocado: tags=machine:40p :avocado: tags=os:netbsd + :avocado: tags=accel:tcg """ - drive_url = ('https://cdn.netbsd.org/pub/NetBSD/iso/7.1.2/' - 'NetBSD-7.1.2-prep.iso') + self.require_accelerator("tcg") + drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/' + 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso') drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e' drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash, algorithm='md5') diff --git a/tests/avocado/ppc_pseries.py b/tests/avocado/ppc_pseries.py new file mode 100644 index 0000000000..d8b04dc3ea --- /dev/null +++ b/tests/avocado/ppc_pseries.py @@ -0,0 +1,35 @@ +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class pseriesMachine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + panic_message = 'Kernel panic - not syncing' + + def test_ppc64_pseries(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + """ + kernel_url = ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/29/Everything/ppc64le/os' + '/ppc/ppc64/vmlinuz') + kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + wait_for_console_pattern(self, console_pattern, self.panic_message) diff --git a/tests/avocado/ppc_virtex_ml507.py b/tests/avocado/ppc_virtex_ml507.py new file mode 100644 index 0000000000..a73f8ae396 --- /dev/null +++ b/tests/avocado/ppc_virtex_ml507.py @@ -0,0 +1,36 @@ +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class VirtexMl507Machine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + panic_message = 'Kernel panic - not syncing' + + def test_ppc_virtex_ml507(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:virtex-ml507 + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day08.tar.xz') + tar_hash = '74c68f5af7a7b8f21c03097b298f3bb77ff52c1f' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux', + '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb', + '-m', '512') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU advent calendar 2020', + self.panic_message) diff --git a/tests/acceptance/replay_kernel.py b/tests/avocado/replay_kernel.py similarity index 85% rename from tests/acceptance/replay_kernel.py rename to tests/avocado/replay_kernel.py index bb32b31240..00a26e4a0c 100644 --- a/tests/acceptance/replay_kernel.py +++ b/tests/avocado/replay_kernel.py @@ -36,6 +36,9 @@ class ReplayKernelBase(LinuxKernelTest): def run_vm(self, kernel_path, kernel_command_line, console_pattern, record, shift, args, replay_path): + # icount requires TCG to be available + self.require_accelerator('tcg') + logger = logging.getLogger('replay') start_time = time.time() vm = self.get_vm() @@ -207,10 +210,43 @@ class ReplayKernelNormal(ReplayKernelBase): '-initrd', initrd_path, '-no-reboot')) + def test_s390x_s390_ccw_virtio(self): + """ + :avocado: tags=arch:s390x + :avocado: tags=machine:s390-ccw-virtio + """ + kernel_url = ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/29/Everything/s390x/os/images' + '/kernel.img') + kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9) + + def test_alpha_clipper(self): + """ + :avocado: tags=arch:alpha + :avocado: tags=machine:clipper + """ + kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/' + 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz') + kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + uncompressed_kernel = archive.uncompress(kernel_path, self.workdir) + + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(uncompressed_kernel, kernel_command_line, console_pattern, shift=9, + args=('-nodefaults', )) + def test_ppc64_pseries(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries + :avocado: tags=accel:tcg """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/ppc64le/os' @@ -260,8 +296,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=machine:vexpress-a9 """ tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day16.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day16.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) dtb_path = self.workdir + '/day16/vexpress-v2p-ca9.dtb' self.do_test_advcal_2018(file_path, 'winter.zImage', @@ -273,8 +309,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=machine:mcf5208evb """ tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day07.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day07.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'sanity-clause.elf') @@ -285,8 +321,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=machine:petalogix-s3adsp1800 """ tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day17.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day17.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'ballerina.bin') @@ -297,19 +333,41 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=cpu:e5500 """ tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day19.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day19.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'uImage') + def test_or1k_sim(self): + """ + :avocado: tags=arch:or1k + :avocado: tags=machine:or1k-sim + """ + tar_hash = '20334cdaf386108c530ff0badaecc955693027dd' + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day20.tar.xz') + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + self.do_test_advcal_2018(file_path, 'vmlinux') + + def test_nios2_10m50(self): + """ + :avocado: tags=arch:nios2 + :avocado: tags=machine:10m50-ghrd + """ + tar_hash = 'e4251141726c412ac0407c5a6bceefbbff018918' + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day14.tar.xz') + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + self.do_test_advcal_2018(file_path, 'vmlinux.elf') + def test_ppc_g3beige(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:g3beige """ tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day15.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day15.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'invaders.elf', args=('-M', 'graphics=off')) @@ -320,8 +378,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=machine:mac99 """ tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day15.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day15.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'invaders.elf', args=('-M', 'graphics=off')) @@ -332,8 +390,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=machine:SS-20 """ tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day11.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day11.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'zImage.elf') @@ -344,8 +402,8 @@ class ReplayKernelNormal(ReplayKernelBase): :avocado: tags=cpu:dc233c """ tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34' - tar_url = ('https://www.qemu-advent-calendar.org' - '/2018/download/day02.tar.xz') + tar_url = ('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day02.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'santas-sleigh-ride.elf') diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py new file mode 100644 index 0000000000..a76dd507fc --- /dev/null +++ b/tests/avocado/replay_linux.py @@ -0,0 +1,193 @@ +# Record/replay test that boots a complete Linux system via a cloud image +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +import logging +import time + +from avocado import skipUnless +from avocado_qemu import BUILD_DIR +from avocado.utils import cloudinit +from avocado.utils import network +from avocado.utils import vmimage +from avocado.utils import datadrainer +from avocado.utils.path import find_command +from avocado_qemu import LinuxTest + +class ReplayLinux(LinuxTest): + """ + Boots a Linux system, checking for a successful initialization + """ + + timeout = 1800 + chksum = None + hdd = 'ide-hd' + cd = 'ide-cd' + bus = 'ide' + + def setUp(self): + # LinuxTest does many replay-incompatible things, but includes + # useful methods. Do not setup LinuxTest here and just + # call some functions. + super(LinuxTest, self).setUp() + self._set_distro() + self.boot_path = self.download_boot() + self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0), + self.name) + ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys() + self.cloudinit_path = self.prepare_cloudinit(ssh_pubkey) + + def vm_add_disk(self, vm, path, id, device): + bus_string = '' + if self.bus: + bus_string = ',bus=%s.%d' % (self.bus, id,) + vm.add_args('-drive', 'file=%s,snapshot,id=disk%s,if=none' % (path, id)) + vm.add_args('-drive', + 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id)) + vm.add_args('-device', + '%s,drive=disk%s-rr%s' % (device, id, bus_string)) + + def launch_and_wait(self, record, args, shift): + self.require_netdev('user') + vm = self.get_vm() + vm.add_args('-smp', '1') + vm.add_args('-m', '1024') + vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', + '-device', 'virtio-net,netdev=vnet') + vm.add_args('-object', 'filter-replay,id=replay,netdev=vnet') + if args: + vm.add_args(*args) + self.vm_add_disk(vm, self.boot_path, 0, self.hdd) + self.vm_add_disk(vm, self.cloudinit_path, 1, self.cd) + logger = logging.getLogger('replay') + if record: + logger.info('recording the execution...') + mode = 'record' + else: + logger.info('replaying the execution...') + mode = 'replay' + replay_path = os.path.join(self.workdir, 'replay.bin') + vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % + (shift, mode, replay_path)) + + start_time = time.time() + + vm.set_console() + vm.launch() + console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), + logger=self.log.getChild('console'), + stop_check=(lambda : not vm.is_running())) + console_drainer.start() + if record: + while not self.phone_server.instance_phoned_back: + self.phone_server.handle_request() + vm.shutdown() + logger.info('finished the recording with log size %s bytes' + % os.path.getsize(replay_path)) + else: + vm.event_wait('SHUTDOWN', self.timeout) + vm.shutdown(True) + logger.info('successfully fihished the replay') + elapsed = time.time() - start_time + logger.info('elapsed time %.2f sec' % elapsed) + return elapsed + + def run_rr(self, args=None, shift=7): + t1 = self.launch_and_wait(True, args, shift) + t2 = self.launch_and_wait(False, args, shift) + logger = logging.getLogger('replay') + logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) + +@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') +class ReplayLinuxX8664(ReplayLinux): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=accel:tcg + """ + + chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' + + def test_pc_i440fx(self): + """ + :avocado: tags=machine:pc + """ + self.run_rr(shift=1) + + def test_pc_q35(self): + """ + :avocado: tags=machine:q35 + """ + self.run_rr(shift=3) + +@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') +class ReplayLinuxX8664Virtio(ReplayLinux): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=virtio + :avocado: tags=accel:tcg + """ + + hdd = 'virtio-blk-pci' + cd = 'virtio-blk-pci' + bus = None + + chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' + + def test_pc_i440fx(self): + """ + :avocado: tags=machine:pc + """ + self.run_rr(shift=1) + + def test_pc_q35(self): + """ + :avocado: tags=machine:q35 + """ + self.run_rr(shift=3) + +@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') +class ReplayLinuxAarch64(ReplayLinux): + """ + :avocado: tags=accel:tcg + :avocado: tags=arch:aarch64 + :avocado: tags=machine:virt + :avocado: tags=cpu:max + """ + + chksum = '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49' + + hdd = 'virtio-blk-device' + cd = 'virtio-blk-device' + bus = None + + def get_common_args(self): + return ('-bios', + os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd'), + "-cpu", "max,lpa2=off", + '-device', 'virtio-rng-pci,rng=rng0', + '-object', 'rng-builtin,id=rng0') + + def test_virt_gicv2(self): + """ + :avocado: tags=machine:gic-version=2 + """ + + self.run_rr(shift=3, + args=(*self.get_common_args(), + "-machine", "virt,gic-version=2")) + + def test_virt_gicv3(self): + """ + :avocado: tags=machine:gic-version=3 + """ + + self.run_rr(shift=3, + args=(*self.get_common_args(), + "-machine", "virt,gic-version=3")) diff --git a/tests/acceptance/reverse_debugging.py b/tests/avocado/reverse_debugging.py similarity index 100% rename from tests/acceptance/reverse_debugging.py rename to tests/avocado/reverse_debugging.py diff --git a/tests/acceptance/smmu.py b/tests/avocado/smmu.py similarity index 100% rename from tests/acceptance/smmu.py rename to tests/avocado/smmu.py diff --git a/tests/acceptance/tcg_plugins.py b/tests/avocado/tcg_plugins.py similarity index 98% rename from tests/acceptance/tcg_plugins.py rename to tests/avocado/tcg_plugins.py index 9ca1515c3b..642d2e49e3 100644 --- a/tests/acceptance/tcg_plugins.py +++ b/tests/avocado/tcg_plugins.py @@ -131,7 +131,7 @@ class PluginKernelNormal(PluginKernelBase): suffix=".log") self.run_vm(kernel_path, kernel_command_line, - "tests/plugin/libmem.so,arg=both", plugin_log.name, + "tests/plugin/libmem.so,inline=true,callback=true", plugin_log.name, console_pattern, args=('-icount', 'shift=1')) diff --git a/tests/acceptance/tesseract_utils.py b/tests/avocado/tesseract_utils.py similarity index 100% rename from tests/acceptance/tesseract_utils.py rename to tests/avocado/tesseract_utils.py diff --git a/tests/acceptance/version.py b/tests/avocado/version.py similarity index 88% rename from tests/acceptance/version.py rename to tests/avocado/version.py index 79b923d4fc..ded7f039c1 100644 --- a/tests/acceptance/version.py +++ b/tests/avocado/version.py @@ -9,10 +9,10 @@ # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class Version(Test): +class Version(QemuSystemTest): """ :avocado: tags=quick """ diff --git a/tests/acceptance/virtio-gpu.py b/tests/avocado/virtio-gpu.py similarity index 98% rename from tests/acceptance/virtio-gpu.py rename to tests/avocado/virtio-gpu.py index 4acc1e6d5f..2a249a3a2c 100644 --- a/tests/acceptance/virtio-gpu.py +++ b/tests/avocado/virtio-gpu.py @@ -4,8 +4,8 @@ # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test from avocado_qemu import BUILD_DIR +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import is_readable_executable_file @@ -27,7 +27,7 @@ def pick_default_vug_bin(): return bld_dir_path -class VirtioGPUx86(Test): +class VirtioGPUx86(QemuSystemTest): """ :avocado: tags=virtio-gpu :avocado: tags=arch:x86_64 diff --git a/tests/acceptance/virtio_check_params.py b/tests/avocado/virtio_check_params.py similarity index 97% rename from tests/acceptance/virtio_check_params.py rename to tests/avocado/virtio_check_params.py index 87e6c839d1..4093da8a67 100644 --- a/tests/acceptance/virtio_check_params.py +++ b/tests/avocado/virtio_check_params.py @@ -22,9 +22,8 @@ import os import re import logging -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skip #list of machine types and virtqueue properties to test @@ -41,7 +40,7 @@ VM_DEV_PARAMS = {'virtio-scsi-pci': ['-device', 'virtio-scsi-pci,id=scsi0'], 'driver=null-co,id=drive0,if=none']} -class VirtioMaxSegSettingsCheck(Test): +class VirtioMaxSegSettingsCheck(QemuSystemTest): @staticmethod def make_pattern(props): pattern_items = ['{0} = \w+'.format(prop) for prop in props] diff --git a/tests/acceptance/virtio_version.py b/tests/avocado/virtio_version.py similarity index 97% rename from tests/acceptance/virtio_version.py rename to tests/avocado/virtio_version.py index 33593c29dd..c84e48813a 100644 --- a/tests/acceptance/virtio_version.py +++ b/tests/avocado/virtio_version.py @@ -11,9 +11,8 @@ Check compatibility of virtio device types import sys import os -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest # Virtio Device IDs: VIRTIO_NET = 1 @@ -55,7 +54,7 @@ def get_pci_interfaces(vm, devtype): interfaces = ('pci-express-device', 'conventional-pci-device') return [i for i in interfaces if devtype_implements(vm, devtype, i)] -class VirtioVersionCheck(Test): +class VirtioVersionCheck(QemuSystemTest): """ Check if virtio-version-specific device types result in the same device tree created by `disable-modern` and diff --git a/tests/acceptance/virtiofs_submounts.py b/tests/avocado/virtiofs_submounts.py similarity index 82% rename from tests/acceptance/virtiofs_submounts.py rename to tests/avocado/virtiofs_submounts.py index 21ad7d792e..e6dc32ffd4 100644 --- a/tests/acceptance/virtiofs_submounts.py +++ b/tests/avocado/virtiofs_submounts.py @@ -6,67 +6,12 @@ import time from avocado import skipUnless from avocado_qemu import LinuxTest, BUILD_DIR +from avocado_qemu import has_cmds +from avocado_qemu import run_cmd from avocado_qemu import wait_for_console_pattern from avocado.utils import ssh -def run_cmd(args): - subp = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - stdout, stderr = subp.communicate() - ret = subp.returncode - - return (stdout, stderr, ret) - -def has_cmd(name, args=None): - """ - This function is for use in a @avocado.skipUnless decorator, e.g.: - - @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true'))) - def test_something_that_needs_sudo(self): - ... - """ - - if args is None: - args = ('which', name) - - try: - _, stderr, exitcode = run_cmd(args) - except Exception as e: - exitcode = -1 - stderr = str(e) - - if exitcode != 0: - cmd_line = ' '.join(args) - err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}' - return (False, err) - else: - return (True, '') - -def has_cmds(*cmds): - """ - This function is for use in a @avocado.skipUnless decorator and - allows checking for the availability of multiple commands, e.g.: - - @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')), - 'cmd2', 'cmd3')) - def test_something_that_needs_cmd1_and_cmd2(self): - ... - """ - - for cmd in cmds: - if isinstance(cmd, str): - cmd = (cmd,) - - ok, errstr = has_cmd(*cmd) - if not ok: - return (False, errstr) - - return (True, '') - - class VirtiofsSubmountsTest(LinuxTest): """ :avocado: tags=arch:x86_64 diff --git a/tests/acceptance/virtiofs_submounts.py.data/cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/cleanup.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/cleanup.sh rename to tests/avocado/virtiofs_submounts.py.data/cleanup.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/guest-cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/guest-cleanup.sh rename to tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/guest.sh b/tests/avocado/virtiofs_submounts.py.data/guest.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/guest.sh rename to tests/avocado/virtiofs_submounts.py.data/guest.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/host.sh b/tests/avocado/virtiofs_submounts.py.data/host.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/host.sh rename to tests/avocado/virtiofs_submounts.py.data/host.sh diff --git a/tests/acceptance/vnc.py b/tests/avocado/vnc.py similarity index 50% rename from tests/acceptance/vnc.py rename to tests/avocado/vnc.py index 22656bbcc2..aeeefc70be 100644 --- a/tests/acceptance/vnc.py +++ b/tests/avocado/vnc.py @@ -8,12 +8,52 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +import socket +from typing import List + +from avocado_qemu import QemuSystemTest -class Vnc(Test): +VNC_ADDR = '127.0.0.1' +VNC_PORT_START = 32768 +VNC_PORT_END = VNC_PORT_START + 1024 + + +def check_bind(port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + try: + sock.bind((VNC_ADDR, port)) + except OSError: + return False + + return True + + +def check_connect(port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + try: + sock.connect((VNC_ADDR, port)) + except ConnectionRefusedError: + return False + + return True + + +def find_free_ports(count: int) -> List[int]: + result = [] + for port in range(VNC_PORT_START, VNC_PORT_END): + if check_bind(port): + result.append(port) + if len(result) >= count: + break + assert len(result) == count + return result + + +class Vnc(QemuSystemTest): """ :avocado: tags=vnc,quick + :avocado: tags=machine:none """ def test_no_vnc(self): self.vm.add_args('-nodefaults', '-S') @@ -45,9 +85,33 @@ class Vnc(Test): 'Could not set password') def test_change_password(self): - self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password') + self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password=on') self.vm.launch() self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) set_password_response = self.vm.qmp('change-vnc-password', password='new_password') self.assertEqual(set_password_response['return'], {}) + + def test_change_listen(self): + a, b, c = find_free_ports(3) + self.assertFalse(check_connect(a)) + self.assertFalse(check_connect(b)) + self.assertFalse(check_connect(c)) + + self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}') + self.vm.launch() + self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a)) + self.assertTrue(check_connect(a)) + self.assertFalse(check_connect(b)) + self.assertFalse(check_connect(c)) + + res = self.vm.qmp('display-update', type='vnc', + addresses=[{'type': 'inet', 'host': VNC_ADDR, + 'port': str(b)}, + {'type': 'inet', 'host': VNC_ADDR, + 'port': str(c)}]) + self.assertEqual(res['return'], {}) + self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b)) + self.assertFalse(check_connect(a)) + self.assertTrue(check_connect(b)) + self.assertTrue(check_connect(c)) diff --git a/tests/acceptance/x86_cpu_model_versions.py b/tests/avocado/x86_cpu_model_versions.py similarity index 99% rename from tests/acceptance/x86_cpu_model_versions.py rename to tests/avocado/x86_cpu_model_versions.py index 0e9feda62d..a6edf74c1c 100644 --- a/tests/acceptance/x86_cpu_model_versions.py +++ b/tests/avocado/x86_cpu_model_versions.py @@ -24,7 +24,7 @@ import avocado_qemu import re -class X86CPUModelAliases(avocado_qemu.Test): +class X86CPUModelAliases(avocado_qemu.QemuSystemTest): """ Validation of PC CPU model versions and CPU model aliases @@ -239,7 +239,7 @@ class X86CPUModelAliases(avocado_qemu.Test): self.validate_aliases(cpus) -class CascadelakeArchCapabilities(avocado_qemu.Test): +class CascadelakeArchCapabilities(avocado_qemu.QemuSystemTest): """ Validation of Cascadelake arch-capabilities diff --git a/tests/bench/atomic_add-bench.c b/tests/bench/atomic_add-bench.c index f05471ab45..8a6faad6ec 100644 --- a/tests/bench/atomic_add-bench.c +++ b/tests/bench/atomic_add-bench.c @@ -2,6 +2,7 @@ #include "qemu/thread.h" #include "qemu/host-utils.h" #include "qemu/processor.h" +#include "qemu/memalign.h" struct thread_info { uint64_t r; diff --git a/tests/bench/benchmark-crypto-akcipher.c b/tests/bench/benchmark-crypto-akcipher.c new file mode 100644 index 0000000000..15e69557ed --- /dev/null +++ b/tests/bench/benchmark-crypto-akcipher.c @@ -0,0 +1,137 @@ +/* + * QEMU Crypto akcipher speed benchmark + * + * Copyright (c) 2022 Bytedance + * + * Authors: + * lei he + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "crypto/init.h" +#include "crypto/akcipher.h" +#include "standard-headers/linux/virtio_crypto.h" + +#include "test_akcipher_keys.inc" + +static QCryptoAkCipher *create_rsa_akcipher(const uint8_t *priv_key, + size_t keylen, + QCryptoRSAPaddingAlgorithm padding, + QCryptoHashAlgorithm hash) +{ + QCryptoAkCipherOptions opt; + QCryptoAkCipher *rsa; + + opt.alg = QCRYPTO_AKCIPHER_ALG_RSA; + opt.u.rsa.padding_alg = padding; + opt.u.rsa.hash_alg = hash; + rsa = qcrypto_akcipher_new(&opt, QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + priv_key, keylen, &error_abort); + return rsa; +} + +static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, + size_t key_size) +{ +#define BYTE 8 +#define SHA1_DGST_LEN 20 +#define SIGN_TIMES 10000 +#define VERIFY_TIMES 100000 +#define PADDING QCRYPTO_RSA_PADDING_ALG_PKCS1 +#define HASH QCRYPTO_HASH_ALG_SHA1 + + g_autoptr(QCryptoAkCipher) rsa = + create_rsa_akcipher(priv_key, keylen, PADDING, HASH); + g_autofree uint8_t *dgst = NULL; + g_autofree uint8_t *signature = NULL; + size_t count; + + dgst = g_new0(uint8_t, SHA1_DGST_LEN); + memset(dgst, g_test_rand_int(), SHA1_DGST_LEN); + signature = g_new0(uint8_t, key_size / BYTE); + + g_test_message("benchmark rsa%zu (%s-%s) sign...", key_size, + QCryptoRSAPaddingAlgorithm_str(PADDING), + QCryptoHashAlgorithm_str(HASH)); + g_test_timer_start(); + for (count = 0; count < SIGN_TIMES; ++count) { + g_assert(qcrypto_akcipher_sign(rsa, dgst, SHA1_DGST_LEN, + signature, key_size / BYTE, + &error_abort) > 0); + } + g_test_timer_elapsed(); + g_test_message("rsa%zu (%s-%s) sign %zu times in %.2f seconds," + " %.2f times/sec ", + key_size, QCryptoRSAPaddingAlgorithm_str(PADDING), + QCryptoHashAlgorithm_str(HASH), + count, g_test_timer_last(), + (double)count / g_test_timer_last()); + + g_test_message("benchmark rsa%zu (%s-%s) verification...", key_size, + QCryptoRSAPaddingAlgorithm_str(PADDING), + QCryptoHashAlgorithm_str(HASH)); + g_test_timer_start(); + for (count = 0; count < VERIFY_TIMES; ++count) { + g_assert(qcrypto_akcipher_verify(rsa, signature, key_size / BYTE, + dgst, SHA1_DGST_LEN, + &error_abort) == 0); + } + g_test_timer_elapsed(); + g_test_message("rsa%zu (%s-%s) verify %zu times in %.2f seconds," + " %.2f times/sec ", + key_size, QCryptoRSAPaddingAlgorithm_str(PADDING), + QCryptoHashAlgorithm_str(HASH), + count, g_test_timer_last(), + (double)count / g_test_timer_last()); +} + +static void test_rsa_1024_speed(const void *opaque) +{ + size_t key_size = (size_t)opaque; + test_rsa_speed(rsa1024_priv_key, sizeof(rsa1024_priv_key), key_size); +} + +static void test_rsa_2048_speed(const void *opaque) +{ + size_t key_size = (size_t)opaque; + test_rsa_speed(rsa2048_priv_key, sizeof(rsa2048_priv_key), key_size); +} + +static void test_rsa_4096_speed(const void *opaque) +{ + size_t key_size = (size_t)opaque; + test_rsa_speed(rsa4096_priv_key, sizeof(rsa4096_priv_key), key_size); +} + +int main(int argc, char **argv) +{ + char *alg = NULL; + char *size = NULL; + g_test_init(&argc, &argv, NULL); + g_assert(qcrypto_init(NULL) == 0); + +#define ADD_TEST(asym_alg, keysize) \ + if ((!alg || g_str_equal(alg, #asym_alg)) && \ + (!size || g_str_equal(size, #keysize))) \ + g_test_add_data_func( \ + "/crypto/akcipher/" #asym_alg "-" #keysize, \ + (void *)keysize, \ + test_ ## asym_alg ## _ ## keysize ## _speed) + + if (argc >= 2) { + alg = argv[1]; + } + if (argc >= 3) { + size = argv[2]; + } + + ADD_TEST(rsa, 1024); + ADD_TEST(rsa, 2048); + ADD_TEST(rsa, 4096); + + return g_test_run(); +} diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 00b3c209dc..279a8fcc33 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -20,6 +20,7 @@ if have_block 'benchmark-crypto-hash': [crypto], 'benchmark-crypto-hmac': [crypto], 'benchmark-crypto-cipher': [crypto], + 'benchmark-crypto-akcipher': [crypto], } endif diff --git a/tests/bench/qht-bench.c b/tests/bench/qht-bench.c index 2e5b70ccd0..8afe161d10 100644 --- a/tests/bench/qht-bench.c +++ b/tests/bench/qht-bench.c @@ -10,6 +10,7 @@ #include "qemu/qht.h" #include "qemu/rcu.h" #include "qemu/xxhash.h" +#include "qemu/memalign.h" struct thread_stats { size_t rd; diff --git a/tests/bench/test_akcipher_keys.inc b/tests/bench/test_akcipher_keys.inc new file mode 100644 index 0000000000..df3eccb45e --- /dev/null +++ b/tests/bench/test_akcipher_keys.inc @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2022 Bytedance, and/or its affiliates + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Author: lei he + */ + +/* RSA test keys, generated by OpenSSL */ +static const uint8_t rsa1024_priv_key[] = { + 0x30, 0x82, 0x02, 0x5c, 0x02, 0x01, 0x00, 0x02, + 0x81, 0x81, 0x00, 0xe6, 0x4d, 0x76, 0x4f, 0xb2, + 0x97, 0x09, 0xad, 0x9d, 0x17, 0x33, 0xf2, 0x30, + 0x42, 0x83, 0xa9, 0xcb, 0x49, 0xa4, 0x2e, 0x59, + 0x5e, 0x75, 0x51, 0xd1, 0xac, 0xc8, 0x86, 0x3e, + 0xdb, 0x72, 0x2e, 0xb2, 0xf7, 0xc3, 0x5b, 0xc7, + 0xea, 0xed, 0x30, 0xd1, 0xf7, 0x37, 0xee, 0x9d, + 0x36, 0x59, 0x6f, 0xf8, 0xce, 0xc0, 0x5c, 0x82, + 0x80, 0x37, 0x83, 0xd7, 0x45, 0x6a, 0xe9, 0xea, + 0xc5, 0x3a, 0x59, 0x6b, 0x34, 0x31, 0x44, 0x00, + 0x74, 0xa7, 0x29, 0xab, 0x79, 0x4a, 0xbd, 0xe8, + 0x25, 0x35, 0x01, 0x11, 0x40, 0xbf, 0x31, 0xbd, + 0xd3, 0xe0, 0x68, 0x1e, 0xd5, 0x5b, 0x2f, 0xe9, + 0x20, 0xf2, 0x9f, 0x46, 0x35, 0x30, 0xa8, 0xf1, + 0xfe, 0xef, 0xd8, 0x76, 0x23, 0x46, 0x34, 0x70, + 0xa1, 0xce, 0xc6, 0x65, 0x6d, 0xb0, 0x94, 0x7e, + 0xe5, 0x92, 0x45, 0x7b, 0xaa, 0xbb, 0x95, 0x97, + 0x77, 0xcd, 0xd3, 0x02, 0x03, 0x01, 0x00, 0x01, + 0x02, 0x81, 0x80, 0x30, 0x6a, 0xc4, 0x9e, 0xc8, + 0xba, 0xfc, 0x2b, 0xe5, 0xc4, 0xc5, 0x04, 0xfb, + 0xa4, 0x60, 0x2d, 0xc8, 0x31, 0x39, 0x35, 0x0d, + 0x50, 0xd0, 0x75, 0x5d, 0x11, 0x68, 0x2e, 0xe0, + 0xf4, 0x1d, 0xb3, 0x37, 0xa8, 0xe3, 0x07, 0x5e, + 0xa6, 0x43, 0x2b, 0x6a, 0x59, 0x01, 0x07, 0x47, + 0x41, 0xef, 0xd7, 0x9c, 0x85, 0x4a, 0xe7, 0xa7, + 0xff, 0xf0, 0xab, 0xe5, 0x0c, 0x11, 0x08, 0x10, + 0x75, 0x5a, 0x68, 0xa0, 0x08, 0x03, 0xc9, 0x40, + 0x79, 0x67, 0x1d, 0x65, 0x89, 0x2d, 0x08, 0xf9, + 0xb5, 0x1b, 0x7d, 0xd2, 0x41, 0x3b, 0x33, 0xf2, + 0x47, 0x2f, 0x9c, 0x0b, 0xd5, 0xaf, 0xcb, 0xdb, + 0xbb, 0x37, 0x63, 0x03, 0xf8, 0xe7, 0x2e, 0xc7, + 0x3c, 0x86, 0x9f, 0xc2, 0x9b, 0xb4, 0x70, 0x6a, + 0x4d, 0x7c, 0xe4, 0x1b, 0x3a, 0xa9, 0xae, 0xd7, + 0xce, 0x7f, 0x56, 0xc2, 0x73, 0x5e, 0x58, 0x63, + 0xd5, 0x86, 0x41, 0x02, 0x41, 0x00, 0xf6, 0x56, + 0x69, 0xec, 0xef, 0x65, 0x95, 0xdc, 0x25, 0x47, + 0xe0, 0x6f, 0xb0, 0x4f, 0x79, 0x77, 0x0a, 0x5e, + 0x46, 0xcb, 0xbd, 0x0b, 0x71, 0x51, 0x2a, 0xa4, + 0x65, 0x29, 0x18, 0xc6, 0x30, 0xa0, 0x95, 0x4c, + 0x4b, 0xbe, 0x8c, 0x40, 0xe3, 0x9c, 0x23, 0x02, + 0x14, 0x43, 0xe9, 0x64, 0xea, 0xe3, 0xa8, 0xe2, + 0x1a, 0xd5, 0xf9, 0x5c, 0xe0, 0x36, 0x2c, 0x97, + 0xda, 0xd5, 0xc7, 0x46, 0xce, 0x11, 0x02, 0x41, + 0x00, 0xef, 0x56, 0x08, 0xb8, 0x29, 0xa5, 0xa6, + 0x7c, 0xf7, 0x5f, 0xb4, 0xf5, 0x63, 0xe7, 0xeb, + 0x45, 0xfd, 0x89, 0xaa, 0x94, 0xa6, 0x3d, 0x0b, + 0xd9, 0x04, 0x6f, 0x78, 0xe0, 0xbb, 0xa2, 0xd4, + 0x29, 0x83, 0x17, 0x95, 0x6f, 0x50, 0x3d, 0x40, + 0x5d, 0xe5, 0x24, 0xda, 0xc2, 0x23, 0x50, 0x86, + 0xa8, 0x34, 0xc8, 0x6f, 0xec, 0x7f, 0xb6, 0x45, + 0x3a, 0xdd, 0x78, 0x9b, 0xee, 0xa1, 0xe4, 0x09, + 0xa3, 0x02, 0x40, 0x5c, 0xd6, 0x66, 0x67, 0x58, + 0x35, 0xc5, 0xcb, 0xc8, 0xf5, 0x14, 0xbd, 0xa3, + 0x09, 0xe0, 0xb2, 0x1f, 0x63, 0x36, 0x75, 0x34, + 0x52, 0xea, 0xaa, 0xf7, 0x52, 0x2b, 0x99, 0xd8, + 0x6f, 0x61, 0x06, 0x34, 0x1e, 0x23, 0xf1, 0xb5, + 0x34, 0x03, 0x53, 0xe5, 0xd1, 0xb3, 0xc7, 0x80, + 0x5f, 0x7b, 0x32, 0xbf, 0x84, 0x2f, 0x2e, 0xf3, + 0x22, 0xb0, 0x91, 0x5a, 0x2f, 0x04, 0xd7, 0x4a, + 0x9a, 0x01, 0xb1, 0x02, 0x40, 0x34, 0x0b, 0x26, + 0x4c, 0x3d, 0xaa, 0x2a, 0xc0, 0xe3, 0xdd, 0xe8, + 0xf0, 0xaf, 0x6f, 0xe0, 0x06, 0x51, 0x32, 0x9d, + 0x68, 0x43, 0x99, 0xe4, 0xb8, 0xa5, 0x31, 0x44, + 0x3c, 0xc2, 0x30, 0x8f, 0x28, 0x13, 0xbc, 0x8e, + 0x1f, 0x2d, 0x78, 0x94, 0x45, 0x96, 0xad, 0x63, + 0xf0, 0x71, 0x53, 0x72, 0x64, 0xa3, 0x4d, 0xae, + 0xa0, 0xe3, 0xc8, 0x93, 0xd7, 0x50, 0x0f, 0x89, + 0x00, 0xe4, 0x2d, 0x3d, 0x37, 0x02, 0x41, 0x00, + 0xbe, 0xa6, 0x08, 0xe0, 0xc8, 0x15, 0x2a, 0x47, + 0xcb, 0xd5, 0xec, 0x93, 0xd3, 0xaa, 0x12, 0x82, + 0xaf, 0xac, 0x51, 0x5a, 0x5b, 0xa7, 0x93, 0x4b, + 0xb9, 0xab, 0x00, 0xfa, 0x5a, 0xea, 0x34, 0xe4, + 0x80, 0xf1, 0x44, 0x6a, 0x65, 0xe4, 0x33, 0x99, + 0xfb, 0x54, 0xd7, 0x89, 0x5a, 0x1b, 0xd6, 0x2b, + 0xcc, 0x6e, 0x4b, 0x19, 0xa0, 0x6d, 0x93, 0x9f, + 0xc3, 0x91, 0x7a, 0xa5, 0xd8, 0x59, 0x0e, 0x9e, +}; + +static const uint8_t rsa2048_priv_key[] = { + 0x30, 0x82, 0x04, 0xa4, 0x02, 0x01, 0x00, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xbd, 0x9c, 0x83, 0x6b, + 0x0e, 0x8e, 0xcf, 0xfa, 0xaa, 0x4f, 0x6a, 0xf4, + 0xe3, 0x52, 0x0f, 0xa5, 0xd0, 0xbe, 0x5e, 0x7f, + 0x08, 0x24, 0xba, 0x87, 0x46, 0xfb, 0x28, 0x93, + 0xe5, 0xe5, 0x81, 0x42, 0xc0, 0xf9, 0x17, 0xc7, + 0x81, 0x01, 0xf4, 0x18, 0x6a, 0x17, 0xf5, 0x57, + 0x20, 0x37, 0xcf, 0xf9, 0x74, 0x5e, 0xe1, 0x48, + 0x6a, 0x71, 0x0a, 0x0f, 0x79, 0x72, 0x2b, 0x46, + 0x10, 0x53, 0xdc, 0x14, 0x43, 0xbd, 0xbc, 0x6d, + 0x15, 0x6f, 0x15, 0x4e, 0xf0, 0x0d, 0x89, 0x39, + 0x02, 0xc3, 0x68, 0x5c, 0xa8, 0xfc, 0xed, 0x64, + 0x9d, 0x98, 0xb7, 0xcd, 0x83, 0x66, 0x93, 0xc3, + 0xd9, 0x57, 0xa0, 0x21, 0x93, 0xad, 0x5c, 0x75, + 0x69, 0x88, 0x9e, 0x81, 0xdc, 0x7f, 0x1d, 0xd5, + 0xbd, 0x1c, 0xc1, 0x30, 0x56, 0xa5, 0xda, 0x99, + 0x46, 0xa6, 0x6d, 0x0e, 0x6f, 0x5e, 0x51, 0x34, + 0x49, 0x73, 0xc3, 0x67, 0x49, 0x7e, 0x21, 0x2a, + 0x20, 0xa7, 0x2b, 0x92, 0x73, 0x1d, 0xa5, 0x25, + 0x2a, 0xd0, 0x3a, 0x89, 0x75, 0xb2, 0xbb, 0x19, + 0x37, 0x78, 0x48, 0xd2, 0xf2, 0x2a, 0x6d, 0x9e, + 0xc6, 0x26, 0xca, 0x46, 0x8c, 0xf1, 0x42, 0x2a, + 0x31, 0xb2, 0xfc, 0xe7, 0x55, 0x51, 0xff, 0x07, + 0x13, 0x5b, 0x36, 0x59, 0x2b, 0x43, 0x30, 0x4b, + 0x05, 0x5c, 0xd2, 0x45, 0xa0, 0xa0, 0x7c, 0x17, + 0x5b, 0x07, 0xbb, 0x5d, 0x83, 0x80, 0x92, 0x6d, + 0x87, 0x1a, 0x43, 0xac, 0xc7, 0x6b, 0x8d, 0x11, + 0x60, 0x27, 0xd2, 0xdf, 0xdb, 0x71, 0x02, 0x55, + 0x6e, 0xb5, 0xca, 0x4d, 0xda, 0x59, 0x0d, 0xb8, + 0x8c, 0xcd, 0xd3, 0x0e, 0x55, 0xa0, 0xa4, 0x8d, + 0xa0, 0x14, 0x10, 0x48, 0x42, 0x35, 0x56, 0x08, + 0xf7, 0x29, 0x5f, 0xa2, 0xea, 0xa4, 0x5e, 0x8e, + 0x99, 0x56, 0xaa, 0x5a, 0x8c, 0x23, 0x8f, 0x35, + 0x22, 0x8a, 0xff, 0xed, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x02, 0x82, 0x01, 0x00, 0x4e, 0x4a, 0xf3, + 0x44, 0xe0, 0x64, 0xfd, 0xe1, 0xde, 0x33, 0x1e, + 0xd1, 0xf1, 0x8f, 0x6f, 0xe0, 0xa2, 0xfa, 0x08, + 0x60, 0xe1, 0xc6, 0xf0, 0xb2, 0x6d, 0x0f, 0xc6, + 0x28, 0x93, 0xb4, 0x19, 0x94, 0xab, 0xc3, 0xef, + 0x1a, 0xb4, 0xdd, 0x4e, 0xa2, 0x4a, 0x24, 0x8c, + 0x6c, 0xa6, 0x64, 0x05, 0x5f, 0x56, 0xba, 0xda, + 0xc1, 0x21, 0x1a, 0x7d, 0xf1, 0xf7, 0xce, 0xb9, + 0xa9, 0x9b, 0x92, 0x54, 0xfc, 0x95, 0x20, 0x22, + 0x4e, 0xd4, 0x9b, 0xe2, 0xab, 0x8e, 0x99, 0xb8, + 0x40, 0xaf, 0x30, 0x6a, 0xc6, 0x60, 0x0c, 0xd8, + 0x25, 0x44, 0xa1, 0xcb, 0xbb, 0x73, 0x77, 0x86, + 0xaa, 0x46, 0xf3, 0x54, 0xae, 0xa8, 0xa0, 0xdb, + 0xdd, 0xab, 0x6e, 0xfb, 0x2c, 0x5a, 0x14, 0xaf, + 0x08, 0x13, 0xa7, 0x6c, 0xe9, 0xfd, 0xcd, 0x4c, + 0x1f, 0x20, 0x3a, 0x16, 0x2b, 0xf0, 0xb6, 0x7c, + 0x47, 0x5f, 0xd1, 0x0a, 0x2c, 0xc4, 0xa5, 0x68, + 0xd0, 0x43, 0x75, 0x6b, 0x65, 0xaa, 0x32, 0xc6, + 0x99, 0x06, 0xcb, 0x8f, 0xe6, 0x8d, 0xce, 0xbf, + 0x4d, 0x0d, 0x7b, 0x22, 0x2a, 0x8a, 0xcb, 0x7d, + 0x7f, 0x16, 0x48, 0x85, 0xf1, 0x86, 0xcb, 0x54, + 0xb9, 0x39, 0xd4, 0xbc, 0xe3, 0x2d, 0x27, 0x59, + 0xf6, 0x81, 0x5e, 0x94, 0x45, 0xdf, 0xb9, 0x22, + 0xaf, 0x64, 0x0d, 0x14, 0xec, 0x8c, 0xeb, 0x71, + 0xac, 0xee, 0x09, 0x4c, 0xbf, 0x34, 0xf9, 0xf4, + 0x66, 0x77, 0x36, 0x3b, 0x41, 0x74, 0x01, 0x4f, + 0xfc, 0x56, 0x83, 0xba, 0x14, 0xb0, 0x2f, 0xdd, + 0x4d, 0xb9, 0x3f, 0xdf, 0x71, 0xbe, 0x7b, 0xba, + 0x66, 0xc8, 0xc5, 0x42, 0xc9, 0xba, 0x18, 0x63, + 0x45, 0x07, 0x2f, 0x84, 0x3e, 0xc3, 0xfb, 0x47, + 0xda, 0xd4, 0x1d, 0x0e, 0x9d, 0x96, 0xc0, 0xea, + 0xee, 0x45, 0x2f, 0xe1, 0x62, 0x23, 0xee, 0xef, + 0x3d, 0x5e, 0x55, 0xa1, 0x0d, 0x02, 0x81, 0x81, + 0x00, 0xeb, 0x76, 0x88, 0xd3, 0xae, 0x3f, 0x1d, + 0xf2, 0x49, 0xe0, 0x37, 0x49, 0x83, 0x82, 0x6c, + 0xf7, 0xf1, 0x17, 0x30, 0x75, 0x2e, 0x89, 0x06, + 0x88, 0x56, 0x32, 0xf6, 0xfa, 0x58, 0xcb, 0x3c, + 0x98, 0x67, 0xc3, 0xde, 0x10, 0x82, 0xe5, 0xfa, + 0xfa, 0x52, 0x47, 0x8d, 0xd7, 0x00, 0xc6, 0xcb, + 0xf7, 0xf6, 0x57, 0x9b, 0x6e, 0x0c, 0xac, 0xe8, + 0x3b, 0xd1, 0xde, 0xb5, 0x34, 0xaf, 0x8b, 0x2a, + 0xb0, 0x2d, 0x01, 0xeb, 0x7c, 0xa0, 0x42, 0x26, + 0xbb, 0x2b, 0x43, 0x0e, 0x1d, 0xe2, 0x4e, 0xc9, + 0xc1, 0x0a, 0x67, 0x1d, 0xfc, 0x83, 0x25, 0xce, + 0xb2, 0x18, 0xd9, 0x0d, 0x70, 0xf5, 0xa3, 0x5a, + 0x9c, 0x99, 0xdd, 0x47, 0xa1, 0x57, 0xe7, 0x20, + 0xde, 0xa1, 0x29, 0x8d, 0x96, 0x62, 0xf9, 0x26, + 0x95, 0x51, 0xa6, 0xe7, 0x09, 0x8b, 0xba, 0x16, + 0x8b, 0x19, 0x5b, 0xf9, 0x27, 0x0d, 0xc5, 0xd6, + 0x5f, 0x02, 0x81, 0x81, 0x00, 0xce, 0x26, 0x31, + 0xb5, 0x43, 0x53, 0x95, 0x39, 0xdd, 0x01, 0x98, + 0x8b, 0x3d, 0x27, 0xeb, 0x0b, 0x87, 0x1c, 0x95, + 0xfc, 0x3e, 0x36, 0x51, 0x31, 0xb5, 0xea, 0x59, + 0x56, 0xc0, 0x97, 0x62, 0xf0, 0x63, 0x2b, 0xb6, + 0x30, 0x9b, 0xdf, 0x19, 0x10, 0xe9, 0xa0, 0x3d, + 0xea, 0x54, 0x5a, 0xe6, 0xc6, 0x9e, 0x7e, 0xb5, + 0xf0, 0xb0, 0x54, 0xef, 0xc3, 0xe1, 0x47, 0xa6, + 0x95, 0xc7, 0xe4, 0xa3, 0x4a, 0x30, 0x68, 0x24, + 0x98, 0x7d, 0xc1, 0x34, 0xa9, 0xcb, 0xbc, 0x3c, + 0x08, 0x9c, 0x7d, 0x0c, 0xa2, 0xb7, 0x60, 0xaa, + 0x38, 0x08, 0x16, 0xa6, 0x7f, 0xdb, 0xd2, 0xb1, + 0x67, 0xe7, 0x93, 0x8e, 0xbb, 0x7e, 0xb9, 0xb5, + 0xd0, 0xd0, 0x9f, 0x7b, 0xcc, 0x46, 0xe6, 0x74, + 0x78, 0x1a, 0x96, 0xd6, 0xd7, 0x74, 0x34, 0x54, + 0x3b, 0x54, 0x55, 0x7f, 0x89, 0x81, 0xbc, 0x40, + 0x55, 0x87, 0x24, 0x95, 0x33, 0x02, 0x81, 0x81, + 0x00, 0xb0, 0x18, 0x5d, 0x2a, 0x1a, 0x95, 0x9f, + 0x9a, 0xd5, 0x3f, 0x37, 0x79, 0xe6, 0x3d, 0x83, + 0xab, 0x46, 0x86, 0x36, 0x3a, 0x5d, 0x0c, 0x23, + 0x73, 0x91, 0x2b, 0xda, 0x63, 0xce, 0x46, 0x68, + 0xd1, 0xfe, 0x40, 0x90, 0xf2, 0x3e, 0x43, 0x2b, + 0x19, 0x4c, 0xb1, 0xb0, 0xd5, 0x8c, 0x02, 0x21, + 0x07, 0x18, 0x17, 0xda, 0xe9, 0x49, 0xd7, 0x82, + 0x73, 0x42, 0x78, 0xd1, 0x82, 0x4e, 0x8a, 0xc0, + 0xe9, 0x33, 0x2f, 0xcd, 0x62, 0xce, 0x23, 0xca, + 0xfd, 0x8d, 0xd4, 0x3f, 0x59, 0x80, 0x27, 0xb6, + 0x61, 0x85, 0x9b, 0x2a, 0xe4, 0xef, 0x5c, 0x36, + 0x22, 0x21, 0xcd, 0x2a, 0x6d, 0x41, 0x77, 0xe2, + 0xcb, 0x5d, 0x93, 0x0d, 0x00, 0x10, 0x52, 0x8d, + 0xd5, 0x92, 0x28, 0x16, 0x78, 0xd3, 0x1a, 0x4c, + 0x8d, 0xbd, 0x9c, 0x1a, 0x0b, 0x9c, 0x91, 0x16, + 0x4c, 0xff, 0x31, 0x36, 0xbb, 0xcb, 0x64, 0x1a, + 0xf7, 0x02, 0x81, 0x80, 0x32, 0x65, 0x09, 0xdf, + 0xca, 0xee, 0xa2, 0xdb, 0x3b, 0x58, 0xc9, 0x86, + 0xb8, 0x53, 0x8a, 0xd5, 0x0d, 0x99, 0x82, 0x5c, + 0xe0, 0x84, 0x7c, 0xc2, 0xcf, 0x3a, 0xd3, 0xce, + 0x2e, 0x54, 0x93, 0xbe, 0x3a, 0x30, 0x14, 0x60, + 0xbb, 0xaa, 0x05, 0x41, 0xaa, 0x2b, 0x1f, 0x17, + 0xaa, 0xb9, 0x72, 0x12, 0xf9, 0xe9, 0xf5, 0xe6, + 0x39, 0xe4, 0xf9, 0x9c, 0x03, 0xf5, 0x75, 0x16, + 0xc6, 0x7f, 0xf1, 0x1f, 0x10, 0xc8, 0x54, 0xb1, + 0xe6, 0x84, 0x15, 0xb0, 0xb0, 0x7a, 0x7a, 0x9e, + 0x8c, 0x4a, 0xd1, 0x8c, 0xf1, 0x91, 0x32, 0xeb, + 0x71, 0xa6, 0xbf, 0xdb, 0x1f, 0xcc, 0xd8, 0xcb, + 0x92, 0xc3, 0xf2, 0xaf, 0x89, 0x22, 0x32, 0xfd, + 0x32, 0x12, 0xda, 0xbb, 0xac, 0x55, 0x68, 0x01, + 0x78, 0x56, 0x89, 0x7c, 0xb0, 0x0e, 0x9e, 0xcc, + 0xc6, 0x28, 0x04, 0x7e, 0x83, 0xf5, 0x96, 0x30, + 0x92, 0x51, 0xf2, 0x1b, 0x02, 0x81, 0x81, 0x00, + 0x83, 0x6d, 0xd1, 0x98, 0x90, 0x41, 0x8c, 0xa7, + 0x92, 0x83, 0xac, 0x89, 0x05, 0x0c, 0x79, 0x67, + 0x90, 0xb6, 0xa1, 0xf3, 0x2f, 0xca, 0xf0, 0x15, + 0xe0, 0x30, 0x58, 0xe9, 0x4f, 0xcb, 0x4c, 0x56, + 0x56, 0x56, 0x14, 0x3f, 0x1b, 0x79, 0xb6, 0xef, + 0x57, 0x4b, 0x28, 0xbd, 0xb0, 0xe6, 0x0c, 0x49, + 0x4b, 0xbe, 0xe1, 0x57, 0x28, 0x2a, 0x23, 0x5e, + 0xc4, 0xa2, 0x19, 0x4b, 0x00, 0x67, 0x78, 0xd9, + 0x26, 0x6e, 0x17, 0x25, 0xce, 0xe4, 0xfd, 0xde, + 0x86, 0xa8, 0x5a, 0x67, 0x47, 0x6b, 0x15, 0x09, + 0xe1, 0xec, 0x8e, 0x62, 0x98, 0x91, 0x6f, 0xc0, + 0x98, 0x0c, 0x70, 0x0e, 0x7d, 0xbe, 0x63, 0xbd, + 0x12, 0x5a, 0x98, 0x1c, 0xe3, 0x0c, 0xfb, 0xc7, + 0xfb, 0x1b, 0xbd, 0x02, 0x87, 0xcc, 0x0c, 0xbb, + 0xc2, 0xd4, 0xb6, 0xc1, 0xa1, 0x23, 0xd3, 0x1e, + 0x21, 0x6f, 0x48, 0xba, 0x0e, 0x2e, 0xc7, 0x42 +}; + +static const uint8_t rsa4096_priv_key[] = { + 0x30, 0x82, 0x09, 0x29, 0x02, 0x01, 0x00, 0x02, + 0x82, 0x02, 0x01, 0x00, 0xcc, 0x30, 0xc6, 0x90, + 0x49, 0x2b, 0x86, 0xe7, 0x7a, 0xa5, 0x7a, 0x9a, + 0x4f, 0xee, 0x0e, 0xa1, 0x5c, 0x43, 0x64, 0xd0, + 0x76, 0xe1, 0xfd, 0x0b, 0xfd, 0x43, 0x7a, 0x65, + 0xe6, 0x20, 0xbd, 0xf2, 0x0e, 0xbe, 0x76, 0x54, + 0xae, 0x37, 0xbe, 0xa0, 0x02, 0x96, 0xae, 0x8d, + 0x8a, 0xae, 0x3b, 0x88, 0xbb, 0x67, 0xce, 0x7c, + 0x20, 0xbf, 0x14, 0xc3, 0x71, 0x51, 0x87, 0x03, + 0x34, 0xaa, 0x3c, 0x09, 0xff, 0xe9, 0xeb, 0xb7, + 0x85, 0x5c, 0xbb, 0x8d, 0xce, 0x8e, 0x3f, 0xd1, + 0x16, 0x30, 0x00, 0x32, 0x2f, 0x25, 0x8d, 0xef, + 0x71, 0xd9, 0xea, 0x6b, 0x45, 0x53, 0x49, 0xc3, + 0x09, 0x4f, 0xb0, 0xa8, 0xa5, 0x89, 0x76, 0x59, + 0x31, 0xa5, 0xf1, 0x5c, 0x42, 0x54, 0x57, 0x70, + 0x57, 0xad, 0xd8, 0xeb, 0x89, 0xa6, 0x87, 0xa2, + 0x6c, 0x95, 0x58, 0x8f, 0xb6, 0x82, 0xc7, 0xde, + 0xc2, 0x3a, 0xdc, 0x5b, 0xe8, 0x02, 0xcc, 0x26, + 0x4b, 0x01, 0xaa, 0xe6, 0xf3, 0x66, 0x4d, 0x90, + 0x85, 0xde, 0xf4, 0x5d, 0x80, 0x98, 0xc6, 0x65, + 0xcf, 0x44, 0x4c, 0xde, 0xb5, 0x4a, 0xfc, 0xda, + 0x0a, 0x0a, 0x10, 0x26, 0xa3, 0xcb, 0x9d, 0xe4, + 0x8d, 0xab, 0x2c, 0x04, 0xfd, 0xaa, 0xfc, 0x3b, + 0xac, 0x4e, 0x56, 0xb8, 0x4c, 0x9f, 0x22, 0x49, + 0xcb, 0x76, 0x45, 0x24, 0x36, 0x2d, 0xbb, 0xe6, + 0x7e, 0xa9, 0x93, 0x13, 0x96, 0x1e, 0xfc, 0x4b, + 0x75, 0xd4, 0x54, 0xc8, 0x8c, 0x55, 0xe6, 0x3f, + 0x09, 0x5a, 0x03, 0x74, 0x7c, 0x8a, 0xc8, 0xe7, + 0x49, 0x0b, 0x86, 0x7c, 0x97, 0xa0, 0xf2, 0x0d, + 0xf1, 0x5c, 0x0e, 0x7a, 0xc0, 0x3f, 0x78, 0x2d, + 0x9b, 0xe2, 0x26, 0xa0, 0x89, 0x49, 0x0c, 0xad, + 0x79, 0xa6, 0x82, 0x98, 0xa6, 0xb7, 0x74, 0xb4, + 0x45, 0xc8, 0xed, 0xea, 0x81, 0xcd, 0xf0, 0x3b, + 0x8e, 0x24, 0xfb, 0x0c, 0xd0, 0x3a, 0x14, 0xb9, + 0xb4, 0x3b, 0x69, 0xd9, 0xf2, 0x42, 0x6e, 0x7f, + 0x6f, 0x5e, 0xb1, 0x52, 0x5b, 0xaa, 0xef, 0xae, + 0x1e, 0x34, 0xca, 0xed, 0x0a, 0x8d, 0x56, 0xd6, + 0xdd, 0xd4, 0x2c, 0x54, 0x7a, 0x57, 0xca, 0x7e, + 0x4a, 0x11, 0xde, 0x48, 0xdf, 0x2b, 0x09, 0x97, + 0x39, 0x24, 0xce, 0x45, 0xe0, 0x75, 0xb1, 0x19, + 0x42, 0xdb, 0x63, 0x40, 0x9b, 0xb9, 0x95, 0x96, + 0x78, 0x91, 0xd5, 0x19, 0x12, 0xab, 0xef, 0x55, + 0x6f, 0x0d, 0x65, 0xc0, 0x8f, 0x62, 0x99, 0x78, + 0xc0, 0xe0, 0xe1, 0x33, 0xc7, 0x68, 0xff, 0x29, + 0x66, 0x22, 0x3a, 0x6f, 0xa0, 0xf8, 0x5c, 0x68, + 0x9b, 0xa9, 0x05, 0xad, 0x6b, 0x1d, 0xae, 0xc1, + 0x30, 0xbb, 0xfe, 0xb7, 0x31, 0x85, 0x0d, 0xd1, + 0xd5, 0xfc, 0x43, 0x1e, 0xb3, 0x61, 0x6f, 0xc4, + 0x75, 0xed, 0x76, 0x9d, 0x13, 0xb3, 0x61, 0x57, + 0xc8, 0x33, 0x0d, 0x77, 0x84, 0xf0, 0xc7, 0x62, + 0xb9, 0x9e, 0xd5, 0x01, 0xfa, 0x87, 0x4a, 0xf5, + 0xd7, 0x4f, 0x5d, 0xae, 0xe7, 0x08, 0xd2, 0x5a, + 0x65, 0x30, 0xc9, 0xf0, 0x0a, 0x11, 0xf1, 0x2a, + 0xd3, 0x43, 0x43, 0xca, 0x05, 0x90, 0x85, 0xf4, + 0xbc, 0x37, 0x49, 0x40, 0x45, 0x35, 0xd3, 0x56, + 0x06, 0x4c, 0x63, 0x93, 0x07, 0x14, 0x8b, 0xd3, + 0x12, 0xd0, 0xe5, 0x00, 0x48, 0x76, 0xd2, 0xdf, + 0x7c, 0xea, 0xc7, 0xff, 0xf0, 0x88, 0xd5, 0xa4, + 0x61, 0x7d, 0x79, 0xc2, 0xda, 0x53, 0x24, 0xdc, + 0x20, 0xae, 0xe6, 0x08, 0x65, 0xef, 0xc9, 0x0d, + 0x7d, 0x66, 0x6d, 0x1b, 0x1c, 0x5d, 0x46, 0xe1, + 0x26, 0x8a, 0x29, 0x77, 0x76, 0x19, 0xe5, 0x19, + 0x2a, 0x75, 0x21, 0xf1, 0x92, 0x8a, 0x9c, 0x7b, + 0xe8, 0x0b, 0x38, 0xc1, 0xbf, 0x76, 0x22, 0x45, + 0x4a, 0xd3, 0x43, 0xc3, 0x8c, 0x74, 0xd8, 0xd8, + 0xec, 0x3e, 0x14, 0xdf, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x02, 0x82, 0x02, 0x01, 0x00, 0x9e, 0x13, + 0x64, 0xa5, 0x6e, 0xff, 0xf3, 0x80, 0x60, 0xc2, + 0x9b, 0x17, 0xbb, 0xa9, 0x60, 0x4a, 0x2b, 0x53, + 0x41, 0x48, 0xe1, 0xc0, 0x32, 0x56, 0x85, 0xcb, + 0x27, 0x86, 0x9b, 0x91, 0xdd, 0x7a, 0xf7, 0x4f, + 0x1b, 0xec, 0x92, 0xb3, 0x35, 0x30, 0x4a, 0xd0, + 0xbc, 0x71, 0x77, 0x5b, 0x4b, 0x5b, 0x9f, 0x39, + 0xcd, 0xf0, 0xea, 0xa9, 0x03, 0x3a, 0x0b, 0x10, + 0x42, 0xa5, 0x88, 0xb0, 0x01, 0xaa, 0xfc, 0x23, + 0xec, 0x08, 0x37, 0x86, 0x82, 0xec, 0x55, 0x6c, + 0x6a, 0x9b, 0x43, 0xc2, 0x05, 0x64, 0xd4, 0x7b, + 0x0e, 0x56, 0xc0, 0x9d, 0x23, 0x8d, 0xc8, 0x2d, + 0xa2, 0x7d, 0x0b, 0x48, 0x56, 0x4b, 0x39, 0x5c, + 0x21, 0xf3, 0x0b, 0x2c, 0x9c, 0x9d, 0xff, 0xfb, + 0xab, 0x75, 0x9d, 0x6b, 0x48, 0xf3, 0x8f, 0xad, + 0x0c, 0x74, 0x01, 0xfb, 0xdc, 0x83, 0xe5, 0x97, + 0x79, 0x84, 0x4a, 0x79, 0xa6, 0xfe, 0xbf, 0xae, + 0xea, 0xbc, 0xfa, 0x74, 0x60, 0x0a, 0x4b, 0x84, + 0x77, 0xa7, 0xda, 0xfb, 0xaf, 0xd2, 0x73, 0x2b, + 0xd2, 0xec, 0x1e, 0x79, 0x91, 0xc9, 0x18, 0x30, + 0xe5, 0x6f, 0x27, 0x36, 0x83, 0x2a, 0x66, 0xc3, + 0xcb, 0x88, 0x94, 0xe4, 0x5f, 0x3f, 0xbd, 0xe2, + 0x11, 0x43, 0x61, 0x31, 0x84, 0x91, 0x49, 0x40, + 0x29, 0x1b, 0x58, 0x18, 0x47, 0x8e, 0xb1, 0x22, + 0xd6, 0xc4, 0xaa, 0x6a, 0x3d, 0x22, 0x7c, 0xa5, + 0xa0, 0x4c, 0x0a, 0xfc, 0x46, 0x66, 0xbb, 0xbe, + 0x04, 0x71, 0xe8, 0x9b, 0x76, 0xf1, 0x47, 0x39, + 0x6a, 0x2f, 0x23, 0xad, 0x78, 0x80, 0x1c, 0x22, + 0xcd, 0x41, 0x5e, 0x09, 0x16, 0x6c, 0x91, 0x48, + 0x91, 0x91, 0x3d, 0x8c, 0xe6, 0xba, 0x81, 0x8d, + 0xbb, 0xf2, 0xd0, 0xaa, 0xc7, 0x8f, 0xc6, 0x01, + 0x60, 0xa7, 0xef, 0x1e, 0x8e, 0x91, 0x6d, 0xcc, + 0x30, 0x9e, 0xea, 0x7c, 0x56, 0x9d, 0x42, 0xcf, + 0x44, 0x85, 0x52, 0xa8, 0xf2, 0x36, 0x9c, 0x46, + 0xfa, 0x9d, 0xd3, 0x4e, 0x13, 0x46, 0x81, 0xce, + 0x99, 0xc9, 0x58, 0x47, 0xe4, 0xeb, 0x27, 0x56, + 0x29, 0x61, 0x0f, 0xb5, 0xcb, 0xf3, 0x48, 0x58, + 0x8f, 0xbc, 0xaf, 0x0a, 0xbf, 0x40, 0xd1, 0xf6, + 0x4f, 0xd2, 0x89, 0x4a, 0xff, 0x6f, 0x54, 0x70, + 0x49, 0x42, 0xf6, 0xf8, 0x0e, 0x4f, 0xa5, 0xf6, + 0x8b, 0x49, 0x80, 0xd4, 0xf5, 0x03, 0xf8, 0x65, + 0xe7, 0x1f, 0x0a, 0xc0, 0x8f, 0xd3, 0x7a, 0x70, + 0xca, 0x67, 0xaf, 0x71, 0xfd, 0x4b, 0xe1, 0x17, + 0x76, 0x74, 0x2e, 0x12, 0x7b, 0xad, 0x4b, 0xbb, + 0xd2, 0x64, 0xd0, 0xa9, 0xf9, 0x79, 0xa9, 0xa6, + 0x03, 0xd2, 0xc2, 0x8f, 0x47, 0x59, 0x1b, 0x7c, + 0xe3, 0xce, 0x92, 0xb2, 0xac, 0x3e, 0xee, 0x12, + 0x43, 0x5f, 0x23, 0xec, 0xf1, 0xd3, 0xf2, 0x21, + 0x22, 0xe8, 0x7e, 0x7f, 0xa4, 0x93, 0x8e, 0x78, + 0x69, 0x69, 0xa0, 0xc9, 0xce, 0x86, 0x36, 0x13, + 0x10, 0x21, 0xc4, 0x7a, 0x52, 0xcf, 0x53, 0xd9, + 0x9b, 0x58, 0xe6, 0x2d, 0xeb, 0x60, 0xe3, 0x75, + 0x1a, 0x22, 0xf6, 0x3c, 0x54, 0x6b, 0xfa, 0xa1, + 0x5d, 0xf6, 0x38, 0xf0, 0xd4, 0x26, 0x2d, 0x7d, + 0x74, 0x99, 0x6a, 0x13, 0x8a, 0x07, 0x9f, 0x07, + 0xc5, 0xf4, 0xa8, 0x20, 0x11, 0xa9, 0x76, 0x11, + 0xe4, 0x48, 0xae, 0xa4, 0x8a, 0xa1, 0xbf, 0x1f, + 0xba, 0x37, 0x50, 0x53, 0x43, 0x91, 0x45, 0x88, + 0x03, 0x52, 0xba, 0xac, 0xc8, 0xe3, 0xe1, 0xba, + 0x63, 0x24, 0x72, 0xbe, 0x1d, 0x01, 0x1f, 0x6c, + 0x34, 0x10, 0xb8, 0x56, 0x4a, 0x67, 0x28, 0x4b, + 0x7a, 0x2b, 0x31, 0x29, 0x47, 0xda, 0xdf, 0x53, + 0x88, 0x79, 0x22, 0x31, 0x15, 0x56, 0xe3, 0xa0, + 0x79, 0x75, 0x94, 0x90, 0xb2, 0xe8, 0x4b, 0xca, + 0x82, 0x6d, 0x3c, 0x69, 0x43, 0x01, 0x02, 0x82, + 0x01, 0x01, 0x00, 0xe7, 0x8b, 0xd6, 0x1a, 0xe8, + 0x00, 0xed, 0x9d, 0x7c, 0x5a, 0x32, 0x10, 0xc1, + 0x53, 0x50, 0xbe, 0x27, 0x1d, 0xef, 0x69, 0x73, + 0xa2, 0x8f, 0x95, 0x96, 0x86, 0xfe, 0xfb, 0x82, + 0xdb, 0xea, 0x7d, 0x73, 0x5a, 0x2b, 0xe7, 0x4b, + 0xd5, 0x8f, 0x4f, 0xaf, 0x85, 0x1d, 0x15, 0x1a, + 0x58, 0x5f, 0x41, 0x79, 0x70, 0x5c, 0x8f, 0xa9, + 0x8e, 0x23, 0x31, 0xa7, 0x6d, 0x99, 0x0c, 0xf0, + 0x51, 0xbf, 0xbb, 0xd3, 0xe3, 0xa3, 0x34, 0xf0, + 0x1d, 0x7f, 0x4a, 0xb7, 0x8f, 0xf6, 0x0a, 0x49, + 0x65, 0xaf, 0x35, 0x7b, 0x02, 0x2e, 0x69, 0x49, + 0x95, 0xb5, 0x20, 0x70, 0xb2, 0x98, 0x54, 0x9b, + 0x8e, 0x4f, 0x48, 0xa8, 0xfa, 0x7e, 0xc7, 0x0a, + 0xae, 0x84, 0xe1, 0xba, 0x85, 0x98, 0x96, 0x8a, + 0x7c, 0xdd, 0xcc, 0xcd, 0xd8, 0x5b, 0x50, 0x60, + 0x88, 0x2d, 0xb6, 0x3e, 0xb8, 0xc2, 0xae, 0xa5, + 0x62, 0x10, 0xcd, 0xdc, 0xae, 0x86, 0xfe, 0x31, + 0x8b, 0xf7, 0xee, 0x1a, 0x35, 0x46, 0x83, 0xee, + 0x5f, 0x55, 0x9a, 0xc2, 0xca, 0x53, 0xb7, 0x2c, + 0xbf, 0x03, 0x8a, 0x78, 0xcc, 0x1d, 0x96, 0x7b, + 0xac, 0x00, 0x62, 0x1e, 0xbd, 0x6f, 0x0b, 0xa5, + 0xec, 0xf3, 0x02, 0x47, 0x47, 0x1e, 0x3d, 0xf6, + 0x78, 0x42, 0xe4, 0xcd, 0xf8, 0x14, 0xa3, 0x7d, + 0xd5, 0x2f, 0x6e, 0xcc, 0x1a, 0x9e, 0xe7, 0xcf, + 0x48, 0xb9, 0x80, 0xb8, 0xba, 0xaa, 0x7b, 0xae, + 0x65, 0x74, 0x09, 0x7b, 0x43, 0x26, 0x31, 0xa2, + 0x95, 0x43, 0x69, 0xd0, 0xb7, 0x95, 0xe4, 0x76, + 0x2c, 0x42, 0x19, 0x47, 0x4f, 0x63, 0x35, 0x9c, + 0xa2, 0x1a, 0xce, 0x28, 0xdf, 0x76, 0x98, 0x1d, + 0xd4, 0x2e, 0xf6, 0x3a, 0xc8, 0x3e, 0xc7, 0xaf, + 0xf7, 0x38, 0x3f, 0x83, 0x3a, 0xcb, 0xae, 0x41, + 0x75, 0x46, 0x63, 0xaa, 0x45, 0xb1, 0x2c, 0xd9, + 0x9f, 0x17, 0x37, 0x02, 0x82, 0x01, 0x01, 0x00, + 0xe1, 0xc1, 0x57, 0x4d, 0x0f, 0xa5, 0xea, 0x1d, + 0x39, 0x9c, 0xe0, 0xf0, 0x6d, 0x13, 0x7f, 0x79, + 0xdc, 0x72, 0x61, 0xc0, 0x7f, 0x88, 0xf6, 0x38, + 0x4f, 0x49, 0x06, 0x1e, 0xb8, 0x6c, 0x21, 0x04, + 0x60, 0x76, 0x5a, 0x6d, 0x04, 0xd1, 0x6d, 0xac, + 0x7c, 0x25, 0x4f, 0x32, 0xcb, 0xbc, 0xf8, 0x4a, + 0x22, 0x8f, 0xf5, 0x41, 0xfd, 0x1c, 0x76, 0x30, + 0xc2, 0x5f, 0x99, 0x13, 0x5c, 0x57, 0x0f, 0xfd, + 0xac, 0x0b, 0x10, 0x9a, 0x4f, 0x78, 0x0a, 0x86, + 0xe8, 0x07, 0x40, 0x40, 0x13, 0xba, 0x96, 0x07, + 0xd5, 0x39, 0x91, 0x51, 0x3e, 0x80, 0xd8, 0xa0, + 0x1f, 0xff, 0xdc, 0x9e, 0x09, 0x3b, 0xae, 0x38, + 0xa9, 0xc2, 0x14, 0x7b, 0xee, 0xd2, 0x69, 0x3d, + 0xd6, 0x26, 0x74, 0x72, 0x7b, 0x86, 0xd4, 0x13, + 0x5b, 0xb8, 0x76, 0x4b, 0x08, 0xfb, 0x93, 0xfa, + 0x44, 0xaf, 0x98, 0x3b, 0xfa, 0xd0, 0x2a, 0x04, + 0x8b, 0xb3, 0x3c, 0x6d, 0x32, 0xf7, 0x18, 0x6a, + 0x51, 0x0e, 0x40, 0x90, 0xce, 0x8e, 0xdf, 0xe8, + 0x07, 0x4c, 0x0f, 0xc7, 0xc8, 0xc2, 0x18, 0x58, + 0x6a, 0x01, 0xc8, 0x27, 0xd6, 0x43, 0x2a, 0xfb, + 0xa5, 0x34, 0x01, 0x3c, 0x72, 0xb1, 0x48, 0xce, + 0x2b, 0x9b, 0xb4, 0x69, 0xd9, 0x82, 0xf8, 0xbe, + 0x29, 0x88, 0x75, 0x96, 0xd8, 0xef, 0x78, 0x2a, + 0x07, 0x90, 0xa0, 0x56, 0x33, 0x42, 0x05, 0x19, + 0xb0, 0x69, 0x34, 0xf9, 0x03, 0xc5, 0xa8, 0x0d, + 0x72, 0xa2, 0x27, 0xb4, 0x45, 0x6d, 0xd2, 0x01, + 0x6c, 0xf1, 0x74, 0x51, 0x0a, 0x9a, 0xe2, 0xc1, + 0x96, 0x80, 0x30, 0x0e, 0xc6, 0xa9, 0x79, 0xf7, + 0x6f, 0xaf, 0xf6, 0xe8, 0x2a, 0xcc, 0xbd, 0xad, + 0x8f, 0xe0, 0x32, 0x87, 0x85, 0x49, 0x68, 0x88, + 0x15, 0x5c, 0xdb, 0x48, 0x40, 0xa2, 0xfa, 0x42, + 0xe8, 0x4e, 0x3e, 0xe2, 0x3f, 0xe0, 0xf3, 0x99, + 0x02, 0x82, 0x01, 0x00, 0x08, 0x39, 0x97, 0x69, + 0x6d, 0x44, 0x5b, 0x2c, 0x74, 0xf6, 0x5f, 0x40, + 0xe9, 0x1d, 0x24, 0x89, 0x1c, 0xaa, 0x9b, 0x8e, + 0x8b, 0x65, 0x02, 0xe4, 0xb5, 0x6c, 0x26, 0x32, + 0x98, 0xfb, 0x66, 0xe0, 0xfd, 0xef, 0xfe, 0x0f, + 0x41, 0x4a, 0x5c, 0xc4, 0xdf, 0xdf, 0x42, 0xa1, + 0x35, 0x46, 0x5e, 0x5b, 0xdd, 0x0c, 0x78, 0xbd, + 0x41, 0xb0, 0xa2, 0xdf, 0x68, 0xab, 0x23, 0xfc, + 0xa9, 0xac, 0xbd, 0xba, 0xd6, 0x54, 0x07, 0xc0, + 0x21, 0xa7, 0x6a, 0x96, 0x24, 0xdf, 0x20, 0x46, + 0x4d, 0x45, 0x27, 0x6c, 0x26, 0xea, 0x74, 0xeb, + 0x98, 0x89, 0x90, 0xdd, 0x8e, 0x23, 0x49, 0xf5, + 0xf7, 0x70, 0x9e, 0xb0, 0x5e, 0x10, 0x47, 0xe0, + 0x9a, 0x28, 0x88, 0xdf, 0xdb, 0xd8, 0x53, 0x0b, + 0x45, 0xf0, 0x19, 0x90, 0xe4, 0xdf, 0x02, 0x9f, + 0x60, 0x4e, 0x76, 0x11, 0x3b, 0x39, 0x24, 0xf1, + 0x3f, 0x3e, 0xb4, 0x8a, 0x1b, 0x84, 0xb7, 0x96, + 0xdf, 0xfb, 0xb0, 0xda, 0xec, 0x63, 0x68, 0x15, + 0xd7, 0xa9, 0xdb, 0x48, 0x9c, 0x12, 0xc3, 0xd6, + 0x85, 0xe8, 0x63, 0x1f, 0xd0, 0x1a, 0xb0, 0x12, + 0x60, 0x62, 0x43, 0xc1, 0x38, 0x86, 0x52, 0x23, + 0x7f, 0xc9, 0x62, 0xf8, 0x79, 0xbf, 0xb4, 0xfb, + 0x4e, 0x7e, 0x07, 0x22, 0x49, 0x8e, 0xbe, 0x6c, + 0xf0, 0x53, 0x5a, 0x53, 0xfd, 0x3c, 0x14, 0xd8, + 0xf7, 0x2c, 0x06, 0x2a, 0xe4, 0x64, 0xfd, 0x19, + 0x57, 0xa0, 0x92, 0xf6, 0xa3, 0x42, 0x47, 0x61, + 0x0b, 0xfd, 0x71, 0x5f, 0x98, 0xe2, 0x6c, 0x98, + 0xa8, 0xf9, 0xf9, 0x7f, 0x1c, 0x61, 0x5d, 0x8c, + 0xd1, 0xfb, 0x90, 0x28, 0x32, 0x9b, 0x7d, 0x82, + 0xf9, 0xcc, 0x47, 0xbe, 0xc7, 0x67, 0xc5, 0x93, + 0x22, 0x55, 0x0d, 0xd2, 0x73, 0xbe, 0xea, 0xed, + 0x4d, 0xb5, 0xf4, 0xc2, 0x25, 0x92, 0x44, 0x30, + 0xeb, 0xaa, 0x13, 0x11, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x82, 0x42, 0x02, 0x53, 0x4e, 0x72, 0x16, + 0xf1, 0x21, 0xea, 0xe8, 0xc7, 0x10, 0xc8, 0xad, + 0x46, 0xec, 0xf1, 0x7a, 0x81, 0x8d, 0x94, 0xc3, + 0x2c, 0x9e, 0x62, 0xae, 0x0b, 0x4f, 0xb1, 0xe4, + 0x23, 0x18, 0x5d, 0x71, 0xb3, 0x71, 0x92, 0x3d, + 0x4b, 0xc6, 0x9d, 0xe8, 0x62, 0x90, 0xb7, 0xca, + 0x33, 0x4c, 0x59, 0xef, 0xd3, 0x51, 0x6d, 0xf8, + 0xac, 0x0d, 0x9b, 0x07, 0x41, 0xea, 0x87, 0xb9, + 0x8c, 0x4e, 0x96, 0x5b, 0xd0, 0x0d, 0x86, 0x5f, + 0xdc, 0x93, 0x48, 0x8b, 0xc3, 0xed, 0x1e, 0x3d, + 0xae, 0xeb, 0x52, 0xba, 0x0c, 0x3c, 0x9a, 0x2f, + 0x63, 0xc4, 0xd2, 0xe6, 0xc2, 0xb0, 0xe5, 0x24, + 0x93, 0x41, 0x2f, 0xe0, 0x8d, 0xd9, 0xb0, 0xc2, + 0x54, 0x91, 0x99, 0xc2, 0x9a, 0xc3, 0xb7, 0x79, + 0xea, 0x69, 0x83, 0xb7, 0x8d, 0x77, 0xf3, 0x60, + 0xe0, 0x88, 0x7d, 0x20, 0xc3, 0x8a, 0xe6, 0x4d, + 0x38, 0x2e, 0x3b, 0x0e, 0xe4, 0x9b, 0x01, 0x83, + 0xae, 0xe4, 0x71, 0xea, 0xc3, 0x22, 0xcb, 0xc1, + 0x59, 0xa9, 0xcc, 0x33, 0x56, 0xbc, 0xf9, 0x70, + 0xfe, 0xa2, 0xbb, 0xc0, 0x77, 0x6b, 0xe3, 0x79, + 0x8b, 0x95, 0x38, 0xba, 0x75, 0xdc, 0x5f, 0x7a, + 0x78, 0xab, 0x24, 0xbe, 0x26, 0x4d, 0x00, 0x8a, + 0xf1, 0x7e, 0x19, 0x64, 0x6f, 0xd3, 0x5f, 0xe8, + 0xdf, 0xa7, 0x59, 0xc5, 0x89, 0xb7, 0x2d, 0xa2, + 0xaf, 0xbd, 0xe0, 0x16, 0x56, 0x8f, 0xdc, 0x9e, + 0x28, 0x94, 0x3a, 0x07, 0xda, 0xb6, 0x2c, 0xb5, + 0x7d, 0x69, 0x14, 0xb0, 0x5e, 0x8a, 0x55, 0xef, + 0xfc, 0x6f, 0x10, 0x2b, 0xaa, 0x7a, 0xea, 0x12, + 0x9b, 0xb8, 0x6f, 0xb9, 0x71, 0x20, 0x30, 0xde, + 0x48, 0xa4, 0xb9, 0x61, 0xae, 0x5c, 0x33, 0x8d, + 0x02, 0xe8, 0x00, 0x99, 0xed, 0xc8, 0x8d, 0xc1, + 0x04, 0x95, 0xf1, 0x7f, 0xcb, 0x1f, 0xbc, 0x76, + 0x11, 0x02, 0x82, 0x01, 0x00, 0x2d, 0x0c, 0xa9, + 0x8f, 0x11, 0xc2, 0xf3, 0x02, 0xc8, 0xf2, 0x55, + 0xc5, 0x6d, 0x25, 0x88, 0xba, 0x59, 0xf6, 0xd1, + 0xdb, 0x94, 0x2f, 0x0b, 0x65, 0x2c, 0xad, 0x54, + 0xe0, 0x2b, 0xe6, 0xa3, 0x49, 0xa2, 0xb3, 0xca, + 0xd7, 0xec, 0x27, 0x32, 0xbb, 0xa4, 0x16, 0x90, + 0xbb, 0x67, 0xad, 0x1b, 0xb9, 0x0f, 0x78, 0xcb, + 0xad, 0x5c, 0xc3, 0x66, 0xd6, 0xbb, 0x97, 0x28, + 0x01, 0x31, 0xf9, 0x0f, 0x71, 0x2a, 0xb9, 0x5b, + 0xea, 0x34, 0x49, 0x9c, 0x6b, 0x13, 0x40, 0x65, + 0xbd, 0x18, 0x0a, 0x14, 0xf9, 0x33, 0x47, 0xe8, + 0x9f, 0x64, 0x0e, 0x24, 0xf6, 0xbb, 0x90, 0x23, + 0x66, 0x01, 0xa6, 0xa4, 0xa9, 0x7f, 0x64, 0x51, + 0xa3, 0x8a, 0x73, 0xc1, 0x80, 0xaf, 0x7a, 0x49, + 0x75, 0x5d, 0x56, 0x1c, 0xaa, 0x3f, 0x64, 0xa9, + 0x96, 0xfd, 0xb0, 0x90, 0xc5, 0xe0, 0x3d, 0x36, + 0x05, 0xad, 0xad, 0x84, 0x93, 0x84, 0xab, 0x1b, + 0x34, 0x57, 0x39, 0xae, 0x0e, 0x80, 0x0f, 0x4a, + 0x9b, 0x32, 0x56, 0xbd, 0x30, 0xeb, 0xd1, 0xc8, + 0xc4, 0x9f, 0x9c, 0x07, 0xb6, 0x05, 0xb1, 0x21, + 0x7f, 0x69, 0x92, 0x9f, 0xb7, 0x68, 0xe7, 0xde, + 0xb7, 0xbc, 0xb4, 0x89, 0x5b, 0x1c, 0x1b, 0x48, + 0xd1, 0x44, 0x6e, 0xd7, 0x6b, 0xe2, 0xa1, 0xf4, + 0xbf, 0x17, 0xb4, 0x43, 0x70, 0x26, 0xd4, 0xb9, + 0xf5, 0x19, 0x09, 0x08, 0xe9, 0xa3, 0x49, 0x7d, + 0x2f, 0xdc, 0xe8, 0x75, 0x79, 0xa1, 0xc1, 0x70, + 0x1b, 0x60, 0x97, 0xaf, 0x0c, 0x56, 0x68, 0xac, + 0x0e, 0x53, 0xbe, 0x56, 0xf4, 0xc3, 0xb1, 0xfb, + 0xfb, 0xff, 0x73, 0x5b, 0xa7, 0xf6, 0x99, 0x0e, + 0x14, 0x5a, 0x5f, 0x9d, 0xbd, 0x8e, 0x94, 0xec, + 0x8b, 0x38, 0x72, 0xbc, 0x8b, 0xca, 0x32, 0xa8, + 0x39, 0x43, 0xb1, 0x1d, 0x43, 0x29, 0xbe, 0x60, + 0xdb, 0x91, 0x6c, 0x9c, 0x06, +}; diff --git a/tests/check-block.sh b/tests/check-block.sh index f86cb863de..5de2c1ba0b 100755 --- a/tests/check-block.sh +++ b/tests/check-block.sh @@ -1,72 +1,25 @@ #!/bin/sh -# Honor the SPEED environment variable, just like we do it for the qtests. -if [ "$SPEED" = "slow" ]; then - format_list="raw qcow2" - group= -elif [ "$SPEED" = "thorough" ]; then - format_list="raw qcow2 qed vmdk vpc" +if [ "$#" -eq 0 ]; then + echo "Usage: $0 fmt..." >&2 + exit 99 +fi + +# Honor the SPEED environment variable, just like we do it for "meson test" +format_list="$@" +if [ "$SPEED" = "slow" ] || [ "$SPEED" = "thorough" ]; then group= else - format_list=qcow2 group="-g auto" fi -if [ "$#" -ne 0 ]; then - format_list="$@" -fi - -if grep -q "CONFIG_GPROF=y" config-host.mak 2>/dev/null ; then - echo "GPROF is enabled ==> Not running the qemu-iotests." +skip() { + echo "1..0 #SKIP $*" exit 0 -fi - -# Disable tests with any sanitizer except for specific ones -SANITIZE_FLAGS=$( grep "CFLAGS.*-fsanitize" config-host.mak 2>/dev/null ) -ALLOWED_SANITIZE_FLAGS="safe-stack cfi-icall" -#Remove all occurrencies of allowed Sanitize flags -for j in ${ALLOWED_SANITIZE_FLAGS}; do - TMP_FLAGS=${SANITIZE_FLAGS} - SANITIZE_FLAGS="" - for i in ${TMP_FLAGS}; do - if ! echo ${i} | grep -q "${j}" 2>/dev/null; then - SANITIZE_FLAGS="${SANITIZE_FLAGS} ${i}" - fi - done -done -if echo ${SANITIZE_FLAGS} | grep -q "\-fsanitize" 2>/dev/null; then - # Have a sanitize flag that is not allowed, stop - echo "Sanitizers are enabled ==> Not running the qemu-iotests." - exit 0 -fi +} if [ -z "$(find . -name 'qemu-system-*' -print)" ]; then - echo "No qemu-system binary available ==> Not running the qemu-iotests." - exit 0 -fi - -if ! command -v bash >/dev/null 2>&1 ; then - echo "bash not available ==> Not running the qemu-iotests." - exit 0 -fi - -if LANG=C bash --version | grep -q 'GNU bash, version [123]' ; then - echo "bash version too old ==> Not running the qemu-iotests." - exit 0 -fi - -if ! (sed --version | grep 'GNU sed') > /dev/null 2>&1 ; then - if ! command -v gsed >/dev/null 2>&1; then - echo "GNU sed not available ==> Not running the qemu-iotests." - exit 0 - fi -else - # Double-check that we're not using BusyBox' sed which says - # that "This is not GNU sed version 4.0" ... - if sed --version | grep -q 'not GNU sed' ; then - echo "BusyBox sed not supported ==> Not running the qemu-iotests." - exit 0 - fi + skip "No qemu-system binary available ==> Not running the qemu-iotests." fi cd tests/qemu-iotests @@ -74,10 +27,17 @@ cd tests/qemu-iotests # QEMU_CHECK_BLOCK_AUTO is used to disable some unstable sub-tests export QEMU_CHECK_BLOCK_AUTO=1 export PYTHONUTF8=1 +# If make was called with -jN we want to call ./check with -j N. Extract the +# flag from MAKEFLAGS, so that if it absent (or MAKEFLAGS is not defined), JOBS +# would be an empty line otherwise JOBS is prepared string of flag with value: +# "-j N" +# Note, that the following works even if make was called with "-j N" or even +# "--jobs N", as all these variants becomes simply "-jN" in MAKEFLAGS variable. +JOBS=$(echo "$MAKEFLAGS" | sed -n 's/\(^\|.* \)-j\([0-9]\+\)\( .*\|$\)/-j \2/p') ret=0 for fmt in $format_list ; do - ${PYTHON} ./check -makecheck -$fmt $group || ret=1 + ${PYTHON} ./check $JOBS -tap -$fmt $group || ret=1 done exit $ret diff --git a/tests/data/acpi/microvm/ERST.pcie b/tests/data/acpi/microvm/ERST.pcie new file mode 100644 index 0000000000..a6d0cb7838 Binary files /dev/null and b/tests/data/acpi/microvm/ERST.pcie differ diff --git a/tests/data/acpi/pc/DSDT b/tests/data/acpi/pc/DSDT index cc1223773e..b688686dc3 100644 Binary files a/tests/data/acpi/pc/DSDT and b/tests/data/acpi/pc/DSDT differ diff --git a/tests/data/acpi/pc/DSDT.acpierst b/tests/data/acpi/pc/DSDT.acpierst new file mode 100644 index 0000000000..86259be9d1 Binary files /dev/null and b/tests/data/acpi/pc/DSDT.acpierst differ diff --git a/tests/data/acpi/pc/DSDT.acpihmat b/tests/data/acpi/pc/DSDT.acpihmat index 2d0678eb83..e2cc2a6fc9 100644 Binary files a/tests/data/acpi/pc/DSDT.acpihmat and b/tests/data/acpi/pc/DSDT.acpihmat differ diff --git a/tests/data/acpi/pc/DSDT.bridge b/tests/data/acpi/pc/DSDT.bridge index 77778c3a69..75016fd4b7 100644 Binary files a/tests/data/acpi/pc/DSDT.bridge and b/tests/data/acpi/pc/DSDT.bridge differ diff --git a/tests/data/acpi/pc/DSDT.cphp b/tests/data/acpi/pc/DSDT.cphp index af046b40b0..53eb0dd7d4 100644 Binary files a/tests/data/acpi/pc/DSDT.cphp and b/tests/data/acpi/pc/DSDT.cphp differ diff --git a/tests/data/acpi/pc/DSDT.dimmpxm b/tests/data/acpi/pc/DSDT.dimmpxm index b56b2e0890..9089d994e0 100644 Binary files a/tests/data/acpi/pc/DSDT.dimmpxm and b/tests/data/acpi/pc/DSDT.dimmpxm differ diff --git a/tests/data/acpi/pc/DSDT.hpbridge b/tests/data/acpi/pc/DSDT.hpbridge index bb0593eeb8..86259be9d1 100644 Binary files a/tests/data/acpi/pc/DSDT.hpbridge and b/tests/data/acpi/pc/DSDT.hpbridge differ diff --git a/tests/data/acpi/pc/DSDT.hpbrroot b/tests/data/acpi/pc/DSDT.hpbrroot index 6ff6f198c7..578468f4f0 100644 Binary files a/tests/data/acpi/pc/DSDT.hpbrroot and b/tests/data/acpi/pc/DSDT.hpbrroot differ diff --git a/tests/data/acpi/pc/DSDT.ipmikcs b/tests/data/acpi/pc/DSDT.ipmikcs index 2e618e49d3..39427103aa 100644 Binary files a/tests/data/acpi/pc/DSDT.ipmikcs and b/tests/data/acpi/pc/DSDT.ipmikcs differ diff --git a/tests/data/acpi/pc/DSDT.memhp b/tests/data/acpi/pc/DSDT.memhp index c32d28575b..987a263339 100644 Binary files a/tests/data/acpi/pc/DSDT.memhp and b/tests/data/acpi/pc/DSDT.memhp differ diff --git a/tests/data/acpi/pc/DSDT.nohpet b/tests/data/acpi/pc/DSDT.nohpet index 623f06a900..fc7598b762 100644 Binary files a/tests/data/acpi/pc/DSDT.nohpet and b/tests/data/acpi/pc/DSDT.nohpet differ diff --git a/tests/data/acpi/pc/DSDT.numamem b/tests/data/acpi/pc/DSDT.numamem index f0a3fa92de..85af400cdb 100644 Binary files a/tests/data/acpi/pc/DSDT.numamem and b/tests/data/acpi/pc/DSDT.numamem differ diff --git a/tests/data/acpi/pc/DSDT.roothp b/tests/data/acpi/pc/DSDT.roothp index cee3b4d80b..545512adfa 100644 Binary files a/tests/data/acpi/pc/DSDT.roothp and b/tests/data/acpi/pc/DSDT.roothp differ diff --git a/tests/data/acpi/pc/ERST.acpierst b/tests/data/acpi/pc/ERST.acpierst new file mode 100644 index 0000000000..7965ac2562 Binary files /dev/null and b/tests/data/acpi/pc/ERST.acpierst differ diff --git a/tests/data/acpi/pc/SSDT.dimmpxm b/tests/data/acpi/pc/SSDT.dimmpxm index a50a961fa1..70f133412f 100644 Binary files a/tests/data/acpi/pc/SSDT.dimmpxm and b/tests/data/acpi/pc/SSDT.dimmpxm differ diff --git a/tests/data/acpi/q35/APIC.acpihmat-noinitiator b/tests/data/acpi/q35/APIC.acpihmat-noinitiator new file mode 100644 index 0000000000..d904d4a70d Binary files /dev/null and b/tests/data/acpi/q35/APIC.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/APIC.core-count2 b/tests/data/acpi/q35/APIC.core-count2 new file mode 100644 index 0000000000..a255082ef5 Binary files /dev/null and b/tests/data/acpi/q35/APIC.core-count2 differ diff --git a/tests/data/acpi/q35/APIC.xapic b/tests/data/acpi/q35/APIC.xapic new file mode 100644 index 0000000000..c1969c35aa Binary files /dev/null and b/tests/data/acpi/q35/APIC.xapic differ diff --git a/tests/data/acpi/q35/CEDT.cxl b/tests/data/acpi/q35/CEDT.cxl new file mode 100644 index 0000000000..ff8203af07 Binary files /dev/null and b/tests/data/acpi/q35/CEDT.cxl differ diff --git a/tests/data/acpi/q35/DMAR.dmar b/tests/data/acpi/q35/DMAR.dmar new file mode 100644 index 0000000000..0dca6e68ad Binary files /dev/null and b/tests/data/acpi/q35/DMAR.dmar differ diff --git a/tests/data/acpi/q35/DSDT b/tests/data/acpi/q35/DSDT index 842533f53e..2771bcea89 100644 Binary files a/tests/data/acpi/q35/DSDT and b/tests/data/acpi/q35/DSDT differ diff --git a/tests/data/acpi/q35/DSDT.acpierst b/tests/data/acpi/q35/DSDT.acpierst new file mode 100644 index 0000000000..b45abca7c2 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.acpierst differ diff --git a/tests/data/acpi/q35/DSDT.acpihmat b/tests/data/acpi/q35/DSDT.acpihmat index 8d00f2ea0d..d90fd4723a 100644 Binary files a/tests/data/acpi/q35/DSDT.acpihmat and b/tests/data/acpi/q35/DSDT.acpihmat differ diff --git a/tests/data/acpi/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator new file mode 100644 index 0000000000..279fafa821 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/DSDT.applesmc b/tests/data/acpi/q35/DSDT.applesmc new file mode 100644 index 0000000000..fdf6d14428 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.applesmc differ diff --git a/tests/data/acpi/q35/DSDT.bridge b/tests/data/acpi/q35/DSDT.bridge index 55ad4bd7ab..b41a4dddc0 100644 Binary files a/tests/data/acpi/q35/DSDT.bridge and b/tests/data/acpi/q35/DSDT.bridge differ diff --git a/tests/data/acpi/q35/DSDT.core-count2 b/tests/data/acpi/q35/DSDT.core-count2 new file mode 100644 index 0000000000..375aceed6b Binary files /dev/null and b/tests/data/acpi/q35/DSDT.core-count2 differ diff --git a/tests/data/acpi/q35/DSDT.cphp b/tests/data/acpi/q35/DSDT.cphp index ccde2add9f..a0ecafc36c 100644 Binary files a/tests/data/acpi/q35/DSDT.cphp and b/tests/data/acpi/q35/DSDT.cphp differ diff --git a/tests/data/acpi/q35/DSDT.cxl b/tests/data/acpi/q35/DSDT.cxl new file mode 100644 index 0000000000..f9c6dd4ee0 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.cxl differ diff --git a/tests/data/acpi/q35/DSDT.dimmpxm b/tests/data/acpi/q35/DSDT.dimmpxm index b062e30117..f0659716e3 100644 Binary files a/tests/data/acpi/q35/DSDT.dimmpxm and b/tests/data/acpi/q35/DSDT.dimmpxm differ diff --git a/tests/data/acpi/q35/DSDT.ipmibt b/tests/data/acpi/q35/DSDT.ipmibt index 1c5737692f..9c52529919 100644 Binary files a/tests/data/acpi/q35/DSDT.ipmibt and b/tests/data/acpi/q35/DSDT.ipmibt differ diff --git a/tests/data/acpi/q35/DSDT.ipmismbus b/tests/data/acpi/q35/DSDT.ipmismbus new file mode 100644 index 0000000000..3f32dffdbf Binary files /dev/null and b/tests/data/acpi/q35/DSDT.ipmismbus differ diff --git a/tests/data/acpi/q35/DSDT.ivrs b/tests/data/acpi/q35/DSDT.ivrs new file mode 100644 index 0000000000..b45abca7c2 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.ivrs differ diff --git a/tests/data/acpi/q35/DSDT.memhp b/tests/data/acpi/q35/DSDT.memhp index 7b6f6487b2..28a192c69a 100644 Binary files a/tests/data/acpi/q35/DSDT.memhp and b/tests/data/acpi/q35/DSDT.memhp differ diff --git a/tests/data/acpi/q35/DSDT.mmio64 b/tests/data/acpi/q35/DSDT.mmio64 index 2e0a772a85..8fda921296 100644 Binary files a/tests/data/acpi/q35/DSDT.mmio64 and b/tests/data/acpi/q35/DSDT.mmio64 differ diff --git a/tests/data/acpi/q35/DSDT.multi-bridge b/tests/data/acpi/q35/DSDT.multi-bridge new file mode 100644 index 0000000000..3dba4d8436 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.multi-bridge differ diff --git a/tests/data/acpi/q35/DSDT.nohpet b/tests/data/acpi/q35/DSDT.nohpet index ceb61f4115..b116947dac 100644 Binary files a/tests/data/acpi/q35/DSDT.nohpet and b/tests/data/acpi/q35/DSDT.nohpet differ diff --git a/tests/data/acpi/q35/DSDT.numamem b/tests/data/acpi/q35/DSDT.numamem index a3f846df54..5eb6159d5f 100644 Binary files a/tests/data/acpi/q35/DSDT.numamem and b/tests/data/acpi/q35/DSDT.numamem differ diff --git a/tests/data/acpi/q35/DSDT.pvpanic-isa b/tests/data/acpi/q35/DSDT.pvpanic-isa new file mode 100644 index 0000000000..908e7b6606 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.pvpanic-isa differ diff --git a/tests/data/acpi/q35/DSDT.tis b/tests/data/acpi/q35/DSDT.tis.tpm12 similarity index 87% rename from tests/data/acpi/q35/DSDT.tis rename to tests/data/acpi/q35/DSDT.tis.tpm12 index d1433e3c14..ce2c2c29c2 100644 Binary files a/tests/data/acpi/q35/DSDT.tis and b/tests/data/acpi/q35/DSDT.tis.tpm12 differ diff --git a/tests/data/acpi/q35/DSDT.tis.tpm2 b/tests/data/acpi/q35/DSDT.tis.tpm2 new file mode 100644 index 0000000000..e9e4b7f6ed Binary files /dev/null and b/tests/data/acpi/q35/DSDT.tis.tpm2 differ diff --git a/tests/data/acpi/q35/DSDT.viot b/tests/data/acpi/q35/DSDT.viot new file mode 100644 index 0000000000..6b436f9cd9 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.viot differ diff --git a/tests/data/acpi/q35/DSDT.xapic b/tests/data/acpi/q35/DSDT.xapic new file mode 100644 index 0000000000..f47f091222 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.xapic differ diff --git a/tests/data/acpi/q35/ERST.acpierst b/tests/data/acpi/q35/ERST.acpierst new file mode 100644 index 0000000000..7965ac2562 Binary files /dev/null and b/tests/data/acpi/q35/ERST.acpierst differ diff --git a/tests/data/acpi/q35/FACP b/tests/data/acpi/q35/FACP index f6a864cc86..a8f6a89611 100644 Binary files a/tests/data/acpi/q35/FACP and b/tests/data/acpi/q35/FACP differ diff --git a/tests/data/acpi/q35/FACP.core-count2 b/tests/data/acpi/q35/FACP.core-count2 new file mode 100644 index 0000000000..31fa5dd19c Binary files /dev/null and b/tests/data/acpi/q35/FACP.core-count2 differ diff --git a/tests/data/acpi/q35/FACP.nosmm b/tests/data/acpi/q35/FACP.nosmm index 6a9aa5f370..c4e6d18ee5 100644 Binary files a/tests/data/acpi/q35/FACP.nosmm and b/tests/data/acpi/q35/FACP.nosmm differ diff --git a/tests/data/acpi/q35/FACP.slic b/tests/data/acpi/q35/FACP.slic new file mode 100644 index 0000000000..48bbb1cf5a Binary files /dev/null and b/tests/data/acpi/q35/FACP.slic differ diff --git a/tests/data/acpi/q35/FACP.xapic b/tests/data/acpi/q35/FACP.xapic new file mode 100644 index 0000000000..31fa5dd19c Binary files /dev/null and b/tests/data/acpi/q35/FACP.xapic differ diff --git a/tests/data/acpi/q35/HMAT.acpihmat-noinitiator b/tests/data/acpi/q35/HMAT.acpihmat-noinitiator new file mode 100644 index 0000000000..6494d11b9f Binary files /dev/null and b/tests/data/acpi/q35/HMAT.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/IVRS.ivrs b/tests/data/acpi/q35/IVRS.ivrs new file mode 100644 index 0000000000..17611202e5 Binary files /dev/null and b/tests/data/acpi/q35/IVRS.ivrs differ diff --git a/tests/data/acpi/q35/SLIC.slic b/tests/data/acpi/q35/SLIC.slic new file mode 100644 index 0000000000..fd26592e24 Binary files /dev/null and b/tests/data/acpi/q35/SLIC.slic differ diff --git a/tests/data/acpi/q35/SRAT.acpihmat-noinitiator b/tests/data/acpi/q35/SRAT.acpihmat-noinitiator new file mode 100644 index 0000000000..a11d3119ab Binary files /dev/null and b/tests/data/acpi/q35/SRAT.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/SRAT.xapic b/tests/data/acpi/q35/SRAT.xapic new file mode 100644 index 0000000000..1a91cfa65f Binary files /dev/null and b/tests/data/acpi/q35/SRAT.xapic differ diff --git a/tests/data/acpi/q35/SSDT.dimmpxm b/tests/data/acpi/q35/SSDT.dimmpxm index 617a1c911c..9ea4e0d0ce 100644 Binary files a/tests/data/acpi/q35/SSDT.dimmpxm and b/tests/data/acpi/q35/SSDT.dimmpxm differ diff --git a/tests/data/acpi/q35/TCPA.tis.tpm12 b/tests/data/acpi/q35/TCPA.tis.tpm12 new file mode 100644 index 0000000000..a56961b413 Binary files /dev/null and b/tests/data/acpi/q35/TCPA.tis.tpm12 differ diff --git a/tests/data/acpi/q35/TPM2.tis b/tests/data/acpi/q35/TPM2.tis.tpm2 similarity index 100% rename from tests/data/acpi/q35/TPM2.tis rename to tests/data/acpi/q35/TPM2.tis.tpm2 diff --git a/tests/data/acpi/q35/VIOT.viot b/tests/data/acpi/q35/VIOT.viot new file mode 100644 index 0000000000..275c78fbe8 Binary files /dev/null and b/tests/data/acpi/q35/VIOT.viot differ diff --git a/tests/data/acpi/rebuild-expected-aml.sh b/tests/data/acpi/rebuild-expected-aml.sh index fc78770544..dcf2e2f221 100755 --- a/tests/data/acpi/rebuild-expected-aml.sh +++ b/tests/data/acpi/rebuild-expected-aml.sh @@ -12,7 +12,7 @@ # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. -qemu_bins="./qemu-system-x86_64 ./qemu-system-aarch64" +qemu_arches="x86_64 aarch64" if [ ! -e "tests/qtest/bios-tables-test" ]; then echo "Test: bios-tables-test is required! Run make check before this script." @@ -20,6 +20,26 @@ if [ ! -e "tests/qtest/bios-tables-test" ]; then exit 1; fi +if grep TARGET_DIRS= config-host.mak; then + for arch in $qemu_arches; do + if grep TARGET_DIRS= config-host.mak | grep "$arch"-softmmu; + then + qemu_bins="$qemu_bins ./qemu-system-$arch" + fi + done +else + echo "config-host.mak missing!" + echo "Run this script from the build directory." + exit 1; +fi + +if [ -z "$qemu_bins" ]; then + echo "Only the following architectures are currently supported: $qemu_arches" + echo "None of these configured!" + echo "To fix, run configure --target-list=x86_64-softmmu,aarch64-softmmu" + exit 1; +fi + for qemu in $qemu_bins; do if [ ! -e $qemu ]; then echo "Run 'make' to build the following QEMU executables: $qemu_bins" diff --git a/tests/data/acpi/virt/APIC b/tests/data/acpi/virt/APIC index 023f15f12e..179d274770 100644 Binary files a/tests/data/acpi/virt/APIC and b/tests/data/acpi/virt/APIC differ diff --git a/tests/data/acpi/virt/APIC.acpihmatvirt b/tests/data/acpi/virt/APIC.acpihmatvirt new file mode 100644 index 0000000000..68200204c6 Binary files /dev/null and b/tests/data/acpi/virt/APIC.acpihmatvirt differ diff --git a/tests/data/acpi/virt/APIC.memhp b/tests/data/acpi/virt/APIC.memhp index 023f15f12e..179d274770 100644 Binary files a/tests/data/acpi/virt/APIC.memhp and b/tests/data/acpi/virt/APIC.memhp differ diff --git a/tests/data/acpi/virt/APIC.numamem b/tests/data/acpi/virt/APIC.numamem index 023f15f12e..179d274770 100644 Binary files a/tests/data/acpi/virt/APIC.numamem and b/tests/data/acpi/virt/APIC.numamem differ diff --git a/tests/data/acpi/virt/DBG2 b/tests/data/acpi/virt/DBG2 new file mode 100644 index 0000000000..86e6314f7b Binary files /dev/null and b/tests/data/acpi/virt/DBG2 differ diff --git a/tests/data/acpi/virt/DSDT.acpihmatvirt b/tests/data/acpi/virt/DSDT.acpihmatvirt new file mode 100644 index 0000000000..aee6ba017c Binary files /dev/null and b/tests/data/acpi/virt/DSDT.acpihmatvirt differ diff --git a/tests/data/acpi/virt/FACP b/tests/data/acpi/virt/FACP index 1f764220f8..ac05c35a69 100644 Binary files a/tests/data/acpi/virt/FACP and b/tests/data/acpi/virt/FACP differ diff --git a/tests/data/acpi/virt/FACP.memhp b/tests/data/acpi/virt/FACP.memhp index 1f764220f8..ac05c35a69 100644 Binary files a/tests/data/acpi/virt/FACP.memhp and b/tests/data/acpi/virt/FACP.memhp differ diff --git a/tests/data/acpi/virt/FACP.numamem b/tests/data/acpi/virt/FACP.numamem index 1f764220f8..ac05c35a69 100644 Binary files a/tests/data/acpi/virt/FACP.numamem and b/tests/data/acpi/virt/FACP.numamem differ diff --git a/tests/data/acpi/virt/GTDT b/tests/data/acpi/virt/GTDT index 9408b71b59..6f8cb9b8f3 100644 Binary files a/tests/data/acpi/virt/GTDT and b/tests/data/acpi/virt/GTDT differ diff --git a/tests/data/acpi/virt/GTDT.memhp b/tests/data/acpi/virt/GTDT.memhp index 9408b71b59..6f8cb9b8f3 100644 Binary files a/tests/data/acpi/virt/GTDT.memhp and b/tests/data/acpi/virt/GTDT.memhp differ diff --git a/tests/data/acpi/virt/GTDT.numamem b/tests/data/acpi/virt/GTDT.numamem index 9408b71b59..6f8cb9b8f3 100644 Binary files a/tests/data/acpi/virt/GTDT.numamem and b/tests/data/acpi/virt/GTDT.numamem differ diff --git a/tests/data/acpi/virt/HMAT.acpihmatvirt b/tests/data/acpi/virt/HMAT.acpihmatvirt new file mode 100644 index 0000000000..6494d11b9f Binary files /dev/null and b/tests/data/acpi/virt/HMAT.acpihmatvirt differ diff --git a/tests/data/acpi/virt/IORT b/tests/data/acpi/virt/IORT new file mode 100644 index 0000000000..7efd0ce8a6 Binary files /dev/null and b/tests/data/acpi/virt/IORT differ diff --git a/tests/data/acpi/virt/IORT.memhp b/tests/data/acpi/virt/IORT.memhp new file mode 100644 index 0000000000..7efd0ce8a6 Binary files /dev/null and b/tests/data/acpi/virt/IORT.memhp differ diff --git a/tests/data/acpi/virt/IORT.numamem b/tests/data/acpi/virt/IORT.numamem new file mode 100644 index 0000000000..7efd0ce8a6 Binary files /dev/null and b/tests/data/acpi/virt/IORT.numamem differ diff --git a/tests/data/acpi/virt/IORT.pxb b/tests/data/acpi/virt/IORT.pxb new file mode 100644 index 0000000000..7efd0ce8a6 Binary files /dev/null and b/tests/data/acpi/virt/IORT.pxb differ diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT new file mode 100644 index 0000000000..f56ea63b36 Binary files /dev/null and b/tests/data/acpi/virt/PPTT differ diff --git a/tests/data/acpi/virt/PPTT.acpihmatvirt b/tests/data/acpi/virt/PPTT.acpihmatvirt new file mode 100644 index 0000000000..710dba5e79 Binary files /dev/null and b/tests/data/acpi/virt/PPTT.acpihmatvirt differ diff --git a/tests/data/acpi/virt/SRAT.acpihmatvirt b/tests/data/acpi/virt/SRAT.acpihmatvirt new file mode 100644 index 0000000000..691ef56e34 Binary files /dev/null and b/tests/data/acpi/virt/SRAT.acpihmatvirt differ diff --git a/tests/data/acpi/virt/SSDT.memhp b/tests/data/acpi/virt/SSDT.memhp index e8b850ae22..2fcfc5fda9 100644 Binary files a/tests/data/acpi/virt/SSDT.memhp and b/tests/data/acpi/virt/SSDT.memhp differ diff --git a/tests/data/acpi/virt/VIOT b/tests/data/acpi/virt/VIOT new file mode 100644 index 0000000000..921f40d88c Binary files /dev/null and b/tests/data/acpi/virt/VIOT differ diff --git a/tests/data/test-qga-config b/tests/data/test-qga-config index 4bb721a4a1..b6b7bc9dfd 100644 --- a/tests/data/test-qga-config +++ b/tests/data/test-qga-config @@ -5,4 +5,4 @@ path=/path/to/org.qemu.guest_agent.0 pidfile=/var/foo/qemu-ga.pid statedir=/var/state verbose=true -blacklist=guest-ping;guest-get-time +block-rpcs=guest-ping;guest-get-time diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include index ff5d732889..fc7a3b7e71 100644 --- a/tests/docker/Makefile.include +++ b/tests/docker/Makefile.include @@ -8,29 +8,15 @@ COMMA := , HOST_ARCH = $(if $(ARCH),$(ARCH),$(shell uname -m)) -DOCKER_SUFFIX := .docker DOCKER_FILES_DIR := $(SRC_PATH)/tests/docker/dockerfiles -# we don't run tests on intermediate images (used as base by another image) -DOCKER_PARTIAL_IMAGES := debian10 debian11 debian-bootstrap empty -DOCKER_IMAGES := $(sort $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.docker)))) -DOCKER_TARGETS := $(patsubst %,docker-image-%,$(DOCKER_IMAGES)) -# Use a global constant ccache directory to speed up repetitive builds -DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache ifeq ($(HOST_ARCH),x86_64) DOCKER_DEFAULT_REGISTRY := registry.gitlab.com/qemu-project/qemu endif DOCKER_REGISTRY := $(if $(REGISTRY),$(REGISTRY),$(DOCKER_DEFAULT_REGISTRY)) -DOCKER_TESTS := $(notdir $(shell \ - find $(SRC_PATH)/tests/docker/ -name 'test-*' -type f)) - -ENGINE := auto - +ENGINE ?= auto DOCKER_SCRIPT=$(SRC_PATH)/tests/docker/docker.py --engine $(ENGINE) -TESTS ?= % -IMAGES ?= % - CUR_TIME := $(shell date +%Y-%m-%d-%H.%M.%S.$$$$) DOCKER_SRC_COPY := $(BUILD_DIR)/docker-src.$(CUR_TIME) @@ -47,17 +33,7 @@ $(DOCKER_SRC_COPY): docker-qemu-src: $(DOCKER_SRC_COPY) -docker-image: ${DOCKER_TARGETS} - -# General rule for building docker images. If we are a sub-make -# invoked with SKIP_DOCKER_BUILD we still check the image is up to date -# though -ifdef SKIP_DOCKER_BUILD -docker-image-%: $(DOCKER_FILES_DIR)/%.docker - $(call quiet-command, \ - $(DOCKER_SCRIPT) check --quiet qemu/$* $<, \ - "CHECK", "$*") -else +# General rule for building docker images. docker-image-%: $(DOCKER_FILES_DIR)/%.docker $(call quiet-command,\ $(DOCKER_SCRIPT) build -t qemu/$* -f $< \ @@ -93,110 +69,87 @@ docker-binfmt-image-debian-%: $(DOCKER_FILES_DIR)/debian-bootstrap.docker { echo "You will need to build $(EXECUTABLE)"; exit 1;},\ "CHECK", "debian-$* exists")) -# These are test targets -USER_TCG_TARGETS=$(patsubst %-linux-user,qemu-%,$(filter %-linux-user,$(TARGET_DIRS))) -EXEC_COPY_TESTS=$(patsubst %,docker-exec-copy-test-%, $(USER_TCG_TARGETS)) - -$(EXEC_COPY_TESTS): docker-exec-copy-test-%: $(DOCKER_FILES_DIR)/empty.docker - $(call quiet-command, \ - $(DOCKER_SCRIPT) build -t qemu/exec-copy-test-$* -f $< \ - $(if $V,,--quiet) --no-cache \ - --include-executable=$* \ - --skip-binfmt, \ - "TEST","copy $* to container") - $(call quiet-command, \ - $(DOCKER_SCRIPT) run qemu/exec-copy-test-$* \ - /$* -version > tests/docker-exec-copy-test-$*.out, \ - "TEST","check $* works in container") - -docker-exec-copy-test: $(EXEC_COPY_TESTS) - -endif - -# Enforce dependencies for composite images +# Special case cross-compiling x86_64 on non-x86_64 systems. ifeq ($(HOST_ARCH),x86_64) -docker-image-debian-amd64: docker-image-debian10 DOCKER_PARTIAL_IMAGES += debian-amd64-cross else -docker-image-debian-amd64-cross: docker-image-debian10 DOCKER_PARTIAL_IMAGES += debian-amd64 endif # For non-x86 hosts not all cross-compilers have been packaged ifneq ($(HOST_ARCH),x86_64) -DOCKER_PARTIAL_IMAGES += debian-mips-cross debian-mipsel-cross debian-mips64el-cross +DOCKER_PARTIAL_IMAGES += debian-mipsel-cross debian-mips64el-cross DOCKER_PARTIAL_IMAGES += debian-ppc64el-cross DOCKER_PARTIAL_IMAGES += debian-s390x-cross DOCKER_PARTIAL_IMAGES += fedora endif -docker-image-debian-alpha-cross: docker-image-debian10 -docker-image-debian-arm64-cross: docker-image-debian10 -docker-image-debian-armel-cross: docker-image-debian10 -docker-image-debian-armhf-cross: docker-image-debian10 -docker-image-debian-hppa-cross: docker-image-debian10 -docker-image-debian-m68k-cross: docker-image-debian10 -docker-image-debian-mips-cross: docker-image-debian10 -docker-image-debian-mips64-cross: docker-image-debian10 -docker-image-debian-mips64el-cross: docker-image-debian10 -docker-image-debian-mipsel-cross: docker-image-debian10 -docker-image-debian-ppc64el-cross: docker-image-debian10 -docker-image-debian-riscv64-cross: docker-image-debian10 -docker-image-debian-s390x-cross: docker-image-debian10 -docker-image-debian-sh4-cross: docker-image-debian10 -docker-image-debian-sparc64-cross: docker-image-debian10 +# The native build should never use the registry +docker-image-debian-native: DOCKER_REGISTRY= -# -# The build rule for hexagon-cross is special in so far for most of -# the time we don't want to build it. While dockers caching does avoid -# this most of the time sometimes we want to force the issue. -# -docker-image-debian-hexagon-cross: $(DOCKER_FILES_DIR)/debian-hexagon-cross.docker - $(if $(NOCACHE), \ - $(call quiet-command, \ - $(DOCKER_SCRIPT) build -t qemu/debian-hexagon-cross -f $< \ - $(if $V,,--quiet) --no-cache \ - --registry $(DOCKER_REGISTRY) --extra-files \ - $(DOCKER_FILES_DIR)/debian-hexagon-cross.docker.d/build-toolchain.sh, \ - "BUILD", "debian-hexagon-cross"), \ - $(call quiet-command, \ - $(DOCKER_SCRIPT) fetch $(if $V,,--quiet) \ - qemu/debian-hexagon-cross $(DOCKER_REGISTRY), \ - "FETCH", "debian-hexagon-cross") \ - $(call quiet-command, \ - $(DOCKER_SCRIPT) update $(if $V,,--quiet) \ - qemu/debian-hexagon-cross --add-current-user, \ - "PREPARE", "debian-hexagon-cross")) +# alpine has no adduser +docker-image-alpine: NOUSER=1 -# Specialist build images, sometimes very limited tools -docker-image-debian-tricore-cross: docker-image-debian10 -docker-image-debian-all-test-cross: docker-image-debian10 -docker-image-debian-arm64-test-cross: docker-image-debian11 -docker-image-debian-powerpc-test-cross: docker-image-debian11 +debian-toolchain-run = \ + $(if $(NOCACHE), \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) build -t qemu/$1 -f $< \ + $(if $V,,--quiet) --no-cache \ + --registry $(DOCKER_REGISTRY) --extra-files \ + $(DOCKER_FILES_DIR)/$1.d/build-toolchain.sh, \ + "BUILD", $1), \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) fetch $(if $V,,--quiet) \ + qemu/$1 $(DOCKER_REGISTRY), \ + "FETCH", $1) \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) update $(if $V,,--quiet) \ + qemu/$1 \ + $(if $(NOUSER),,--add-current-user) \ + "PREPARE", $1)) +debian-toolchain = $(call debian-toolchain-run,$(patsubst docker-image-%,%,$1)) + +docker-image-debian-hexagon-cross: $(DOCKER_FILES_DIR)/debian-hexagon-cross.docker \ + $(DOCKER_FILES_DIR)/debian-hexagon-cross.d/build-toolchain.sh + $(call debian-toolchain, $@) + +docker-image-debian-microblaze-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docker \ + $(DOCKER_FILES_DIR)/debian-microblaze-cross.d/build-toolchain.sh + $(call debian-toolchain, $@) + +docker-image-debian-nios2-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docker \ + $(DOCKER_FILES_DIR)/debian-nios2-cross.d/build-toolchain.sh + $(call debian-toolchain, $@) # These images may be good enough for building tests but not for test builds DOCKER_PARTIAL_IMAGES += debian-alpha-cross -DOCKER_PARTIAL_IMAGES += debian-arm64-test-cross DOCKER_PARTIAL_IMAGES += debian-powerpc-test-cross DOCKER_PARTIAL_IMAGES += debian-hppa-cross +DOCKER_PARTIAL_IMAGES += debian-loongarch-cross DOCKER_PARTIAL_IMAGES += debian-m68k-cross debian-mips64-cross -DOCKER_PARTIAL_IMAGES += debian-riscv64-cross +DOCKER_PARTIAL_IMAGES += debian-microblaze-cross +DOCKER_PARTIAL_IMAGES += debian-mips-cross +DOCKER_PARTIAL_IMAGES += debian-nios2-cross +DOCKER_PARTIAL_IMAGES += debian-riscv64-test-cross DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross DOCKER_PARTIAL_IMAGES += debian-tricore-cross DOCKER_PARTIAL_IMAGES += debian-xtensa-cross -DOCKER_PARTIAL_IMAGES += fedora-i386-cross fedora-cris-cross +DOCKER_PARTIAL_IMAGES += fedora-cris-cross -# Rules for building linux-user powered images -# -# These are slower than using native cross compiler setups but can -# work around issues with poorly working multi-arch systems and broken -# packages. +# images that are only used to build other images +DOCKER_VIRTUAL_IMAGES := debian-bootstrap debian-toolchain + +__IMAGES := $(sort $(filter-out $(DOCKER_VIRTUAL_IMAGES), $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.docker))))) +DOCKER_IMAGES := $(if $(IMAGES), $(filter $(IMAGES), $(__IMAGES)), $(__IMAGES)) + +__TESTS := $(notdir $(shell find $(SRC_PATH)/tests/docker/ -name 'test-*' -type f)) +DOCKER_TESTS := $(if $(TESTS), $(filter $(TESTS), $(__TESTS)), $(__TESTS)) # Expand all the pre-requistes for each docker image and test combination $(foreach i,$(filter-out $(DOCKER_PARTIAL_IMAGES),$(DOCKER_IMAGES)), \ $(foreach t,$(DOCKER_TESTS), \ $(eval .PHONY: docker-$t@$i) \ - $(eval docker-$t@$i: docker-image-$i docker-run-$t@$i) \ + $(eval docker-$t@$i: docker-image-$i; @$(MAKE) docker-run TEST=$t IMAGE=qemu/$i) \ ) \ $(foreach t,$(DOCKER_TESTS), \ $(eval docker-all-tests: docker-$t@$i) \ @@ -223,11 +176,6 @@ docker: @echo @echo 'Available container images:' @echo ' $(DOCKER_IMAGES)' -ifneq ($(DOCKER_USER_IMAGES),) - @echo - @echo 'Available linux-user images (docker-binfmt-image-debian-%):' - @echo ' $(DOCKER_USER_IMAGES)' -endif @echo @echo 'Available tests:' @echo ' $(DOCKER_TESTS)' @@ -236,8 +184,9 @@ endif @echo ' TARGET_LIST=a,b,c Override target list in builds.' @echo ' EXTRA_CONFIGURE_OPTS="..."' @echo ' Extra configure options.' - @echo ' IMAGES="a b c ..": Filters which images to build or run.' - @echo ' TESTS="x y z .." Filters which tests to run (for docker-test).' + @echo ' TEST_COMMAND="..." Override the default `make check` target.' + @echo ' IMAGES="a b c ..": Restrict available images to subset.' + @echo ' TESTS="x y z .." Restrict available tests to subset.' @echo ' J=[0..9]* Overrides the -jN parameter for make commands' @echo ' (default is 1)' @echo ' DEBUG=1 Stop and drop to shell in the created container' @@ -255,9 +204,12 @@ endif docker-help: docker +# Use a global constant ccache directory to speed up repetitive builds +DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache + # This rule if for directly running against an arbitrary docker target. # It is called by the expanded docker targets (e.g. make -# docker-test-foo@bar) which will do additional verification. +# docker-test-foo@bar) which will also ensure the image is up to date. # # For example: make docker-run TEST="test-quick" IMAGE="debian:arm64" EXECUTABLE=./aarch64-linux-user/qemu-aarch64 # @@ -279,6 +231,7 @@ docker-run: docker-qemu-src $(if $(NETWORK),$(if $(subst $(NETWORK),,1),--net=$(NETWORK)),--net=none) \ -e TARGET_LIST=$(subst $(SPACE),$(COMMA),$(TARGET_LIST)) \ -e EXTRA_CONFIGURE_OPTS="$(EXTRA_CONFIGURE_OPTS)" \ + -e TEST_COMMAND="$(TEST_COMMAND)" \ -e V=$V -e J=$J -e DEBUG=$(DEBUG) \ -e SHOW_ENV=$(SHOW_ENV) \ $(if $(NOUSER),, \ @@ -292,13 +245,7 @@ docker-run: docker-qemu-src $(call quiet-command, rm -r $(DOCKER_SRC_COPY), \ " CLEANUP $(DOCKER_SRC_COPY)") -# Run targets: -# -# Of the form docker-TEST-FOO@IMAGE-BAR which will then be expanded into a call to "make docker-run" -docker-run-%: CMD = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\1/') -docker-run-%: IMAGE = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\2/') -docker-run-%: - @$(MAKE) docker-run TEST=$(CMD) IMAGE=qemu/$(IMAGE) +docker-image: ${DOCKER_IMAGES:%=docker-image-%} docker-clean: $(call quiet-command, $(DOCKER_SCRIPT) clean) diff --git a/tests/docker/common.rc b/tests/docker/common.rc index c5cc33d366..9a33df2832 100755 --- a/tests/docker/common.rc +++ b/tests/docker/common.rc @@ -12,8 +12,14 @@ # the top-level directory. # This might be set by ENV of a docker container... it is always -# overriden by TARGET_LIST if the user sets it. -DEF_TARGET_LIST=${DEF_TARGET_LIST:-"x86_64-softmmu,aarch64-softmmu"} +# overriden by TARGET_LIST if the user sets it. We special case +# "none" to allow for other options like --disable-tcg to restrict the +# builds we eventually do. +if test "$DEF_TARGET_LIST" = "none"; then + DEF_TARGET_LIST="" +else + DEF_TARGET_LIST=${DEF_TARGET_LIST:-"x86_64-softmmu,aarch64-softmmu"} +fi requires_binary() { @@ -57,12 +63,12 @@ check_qemu() { # default to make check unless the caller specifies if [ $# = 0 ]; then - INVOCATION="check" + INVOCATION="${TEST_COMMAND:-make $MAKEFLAGS check}" else - INVOCATION="$@" + INVOCATION="make $MAKEFLAGS $@" fi - make $MAKEFLAGS $INVOCATION + $INVOCATION } test_fail() diff --git a/tests/docker/docker.py b/tests/docker/docker.py index 78dd13171e..3a1ed7cb18 100755 --- a/tests/docker/docker.py +++ b/tests/docker/docker.py @@ -205,22 +205,17 @@ def _read_qemu_dockerfile(img_name): return _read_dockerfile(df) -def _dockerfile_preprocess(df): - out = "" +def _dockerfile_verify_flat(df): + "Verify we do not include other qemu/ layers" for l in df.splitlines(): if len(l.strip()) == 0 or l.startswith("#"): continue from_pref = "FROM qemu/" if l.startswith(from_pref): - # TODO: Alternatively we could replace this line with "FROM $ID" - # where $ID is the image's hex id obtained with - # $ docker images $IMAGE --format="{{.Id}}" - # but unfortunately that's not supported by RHEL 7. - inlining = _read_qemu_dockerfile(l[len(from_pref):]) - out += _dockerfile_preprocess(inlining) - continue - out += l + "\n" - return out + print("We no longer support multiple QEMU layers.") + print("Dockerfiles should be flat, ideally created by lcitool") + return False + return True class Docker(object): @@ -309,23 +304,10 @@ class Docker(object): if argv is None: argv = [] - # pre-calculate the docker checksum before any - # substitutions we make for caching - checksum = _text_checksum(_dockerfile_preprocess(dockerfile)) + if not _dockerfile_verify_flat(dockerfile): + return -1 - if registry is not None: - sources = re.findall("FROM qemu\/(.*)", dockerfile) - # Fetch any cache layers we can, may fail - for s in sources: - pull_args = ["pull", "%s/qemu/%s" % (registry, s)] - if self._do(pull_args, quiet=quiet) != 0: - registry = None - break - # Make substitutions - if registry is not None: - dockerfile = dockerfile.replace("FROM qemu/", - "FROM %s/qemu/" % - (registry)) + checksum = _text_checksum(dockerfile) tmp_df = tempfile.NamedTemporaryFile(mode="w+t", encoding='utf-8', @@ -371,7 +353,7 @@ class Docker(object): checksum = self.get_image_dockerfile_checksum(tag) except Exception: return False - return checksum == _text_checksum(_dockerfile_preprocess(dockerfile)) + return checksum == _text_checksum(dockerfile) def run(self, cmd, keep, quiet, as_user=False): label = uuid.uuid4().hex @@ -676,63 +658,6 @@ class CcCommand(SubCommand): as_user=True) -class CheckCommand(SubCommand): - """Check if we need to re-build a docker image out of a dockerfile. - Arguments: """ - name = "check" - - def args(self, parser): - parser.add_argument("tag", - help="Image Tag") - parser.add_argument("dockerfile", default=None, - help="Dockerfile name", nargs='?') - parser.add_argument("--checktype", choices=["checksum", "age"], - default="checksum", help="check type") - parser.add_argument("--olderthan", default=60, type=int, - help="number of minutes") - - def run(self, args, argv): - tag = args.tag - - try: - dkr = Docker() - except subprocess.CalledProcessError: - print("Docker not set up") - return 1 - - info = dkr.inspect_tag(tag) - if info is None: - print("Image does not exist") - return 1 - - if args.checktype == "checksum": - if not args.dockerfile: - print("Need a dockerfile for tag:%s" % (tag)) - return 1 - - dockerfile = _read_dockerfile(args.dockerfile) - - if dkr.image_matches_dockerfile(tag, dockerfile): - if not args.quiet: - print("Image is up to date") - return 0 - else: - print("Image needs updating") - return 1 - elif args.checktype == "age": - timestr = dkr.get_image_creation_time(info).split(".")[0] - created = datetime.strptime(timestr, "%Y-%m-%dT%H:%M:%S") - past = datetime.now() - timedelta(minutes=args.olderthan) - if created < past: - print ("Image created @ %s more than %d minutes old" % - (timestr, args.olderthan)) - return 1 - else: - if not args.quiet: - print ("Image less than %d minutes old" % (args.olderthan)) - return 0 - - def main(): global USE_ENGINE diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker index 7e6997e301..094f66f4eb 100644 --- a/tests/docker/dockerfiles/alpine.docker +++ b/tests/docker/dockerfiles/alpine.docker @@ -1,59 +1,127 @@ +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all alpine-316 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -FROM alpine:edge +FROM docker.io/library/alpine:3.16 -RUN apk update -RUN apk upgrade +RUN apk update && \ + apk upgrade && \ + apk add \ + alsa-lib-dev \ + attr-dev \ + bash \ + bc \ + bison \ + bzip2 \ + bzip2-dev \ + ca-certificates \ + capstone-dev \ + ccache \ + cdrkit \ + ceph-dev \ + clang \ + cmocka-dev \ + ctags \ + curl-dev \ + cyrus-sasl-dev \ + dbus \ + diffutils \ + dtc-dev \ + eudev-dev \ + findutils \ + flex \ + fuse3-dev \ + g++ \ + gcc \ + gcovr \ + gettext \ + git \ + glib-dev \ + glib-static \ + gnutls-dev \ + gtk+3.0-dev \ + json-c-dev \ + libaio-dev \ + libbpf-dev \ + libcap-ng-dev \ + libdrm-dev \ + libepoxy-dev \ + libffi-dev \ + libgcrypt-dev \ + libjpeg-turbo-dev \ + libnfs-dev \ + libpng-dev \ + libseccomp-dev \ + libselinux-dev \ + libslirp-dev \ + libssh-dev \ + libtasn1-dev \ + liburing-dev \ + libusb-dev \ + linux-pam-dev \ + llvm11 \ + lttng-ust-dev \ + lzo-dev \ + make \ + mesa-dev \ + meson \ + multipath-tools \ + musl-dev \ + ncurses-dev \ + ndctl-dev \ + net-tools \ + nettle-dev \ + nmap-ncat \ + numactl-dev \ + openssh-client \ + pcre-dev \ + perl \ + pixman-dev \ + pkgconf \ + pulseaudio-dev \ + py3-numpy \ + py3-pillow \ + py3-pip \ + py3-sphinx \ + py3-sphinx_rtd_theme \ + py3-yaml \ + python3 \ + rpm2cpio \ + samurai \ + sdl2-dev \ + sdl2_image-dev \ + sed \ + snappy-dev \ + sndio-dev \ + sparse \ + spice-dev \ + spice-protocol \ + tar \ + tesseract-ocr \ + texinfo \ + usbredir-dev \ + util-linux \ + vde2-dev \ + virglrenderer-dev \ + vte3-dev \ + which \ + xen-dev \ + xfsprogs-dev \ + zlib-dev \ + zlib-static \ + zstd-dev && \ + apk list | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc -# Please keep this list sorted alphabetically -ENV PACKAGES \ - alsa-lib-dev \ - bash \ - binutils \ - ccache \ - coreutils \ - curl-dev \ - g++ \ - gcc \ - git \ - glib-dev \ - glib-static \ - gnutls-dev \ - gtk+3.0-dev \ - libaio-dev \ - libbpf-dev \ - libcap-ng-dev \ - libffi-dev \ - libjpeg-turbo-dev \ - libnfs-dev \ - libpng-dev \ - libseccomp-dev \ - libssh-dev \ - libusb-dev \ - libxml2-dev \ - lzo-dev \ - make \ - mesa-dev \ - mesa-egl \ - mesa-gbm \ - meson \ - ncurses-dev \ - ninja \ - perl \ - pulseaudio-dev \ - python3 \ - py3-sphinx \ - py3-sphinx_rtd_theme \ - shadow \ - snappy-dev \ - spice-dev \ - texinfo \ - usbredir-dev \ - util-linux-dev \ - vde2-dev \ - virglrenderer-dev \ - vte3-dev \ - xfsprogs-dev \ - zlib-dev \ - zlib-static - -RUN apk add $PACKAGES +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" diff --git a/tests/docker/dockerfiles/centos8.docker b/tests/docker/dockerfiles/centos8.docker index 46398c61ee..1f70d41aeb 100644 --- a/tests/docker/dockerfiles/centos8.docker +++ b/tests/docker/dockerfiles/centos8.docker @@ -1,111 +1,137 @@ -FROM docker.io/centos:8 +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all centos-stream-8 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -RUN dnf -y update -ENV PACKAGES \ - SDL2-devel \ - alsa-lib-devel \ - bc \ - brlapi-devel \ - bzip2 \ - bzip2-devel \ - ca-certificates \ - capstone-devel \ - ccache \ - clang \ - ctags \ - cyrus-sasl-devel \ - daxctl-devel \ - dbus-daemon \ - device-mapper-multipath-devel \ - diffutils \ - findutils \ - gcc \ - gcc-c++ \ - genisoimage \ - gettext \ - git \ - glib2-devel \ - glibc-langpack-en \ - glibc-static \ - glusterfs-api-devel \ - gnutls-devel \ - gtk3-devel \ - hostname \ - jemalloc-devel \ - libaio-devel \ - libasan \ - libattr-devel \ - libbpf-devel \ - libcacard-devel \ - libcap-ng-devel \ - libcurl-devel \ - libdrm-devel \ - libepoxy-devel \ - libfdt-devel \ - libffi-devel \ - libgcrypt-devel \ - libiscsi-devel \ - libjpeg-devel \ - libnfs-devel \ - libpmem-devel \ - libpng-devel \ - librbd-devel \ - libseccomp-devel \ - libslirp-devel \ - libssh-devel \ - libtasn1-devel \ - libubsan \ - libudev-devel \ - libusbx-devel \ - libxml2-devel \ - libzstd-devel \ - llvm \ - lzo-devel \ - make \ - mesa-libgbm-devel \ - ncurses-devel \ - nettle-devel \ - ninja-build \ - nmap-ncat \ - numactl-devel \ - openssh-clients \ - pam-devel \ - perl \ - perl-Test-Harness \ - pixman-devel \ - pkgconfig \ - pulseaudio-libs-devel \ - python3 \ - python3-PyYAML \ - python3-numpy \ - python3-pillow \ - python3-pip \ - python3-setuptools \ - python3-sphinx \ - python3-sphinx_rtd_theme \ - python3-virtualenv \ - python3-wheel \ - rdma-core-devel \ - rpm \ - sed \ - snappy-devel \ - spice-protocol \ - spice-server-devel \ - systemd-devel \ - systemtap-sdt-devel \ - tar \ - texinfo \ - usbredir-devel \ - util-linux \ - virglrenderer-devel \ - vte291-devel \ - which \ - xfsprogs-devel \ - zlib-devel +FROM quay.io/centos/centos:stream8 -RUN dnf install -y dnf-plugins-core && \ - dnf config-manager --set-enabled powertools && \ - dnf install -y centos-release-advanced-virtualization && \ - dnf install -y epel-release && \ - dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt +RUN dnf distro-sync -y && \ + dnf install 'dnf-command(config-manager)' -y && \ + dnf config-manager --set-enabled -y powertools && \ + dnf install -y centos-release-advanced-virtualization && \ + dnf install -y epel-release && \ + dnf install -y epel-next-release && \ + dnf install -y \ + SDL2-devel \ + alsa-lib-devel \ + bash \ + bc \ + bison \ + brlapi-devel \ + bzip2 \ + bzip2-devel \ + ca-certificates \ + capstone-devel \ + ccache \ + clang \ + ctags \ + cyrus-sasl-devel \ + daxctl-devel \ + dbus-daemon \ + device-mapper-multipath-devel \ + diffutils \ + findutils \ + flex \ + fuse3-devel \ + gcc \ + gcc-c++ \ + genisoimage \ + gettext \ + git \ + glib2-devel \ + glib2-static \ + glibc-langpack-en \ + glibc-static \ + glusterfs-api-devel \ + gnutls-devel \ + gtk3-devel \ + hostname \ + jemalloc-devel \ + json-c-devel \ + libaio-devel \ + libasan \ + libattr-devel \ + libbpf-devel \ + libcacard-devel \ + libcap-ng-devel \ + libcmocka-devel \ + libcurl-devel \ + libdrm-devel \ + libepoxy-devel \ + libfdt-devel \ + libffi-devel \ + libgcrypt-devel \ + libiscsi-devel \ + libjpeg-devel \ + libnfs-devel \ + libpmem-devel \ + libpng-devel \ + librbd-devel \ + libseccomp-devel \ + libselinux-devel \ + libslirp-devel \ + libssh-devel \ + libtasn1-devel \ + libubsan \ + liburing-devel \ + libusbx-devel \ + libzstd-devel \ + llvm \ + lttng-ust-devel \ + lzo-devel \ + make \ + mesa-libgbm-devel \ + meson \ + ncurses-devel \ + nettle-devel \ + ninja-build \ + nmap-ncat \ + numactl-devel \ + openssh-clients \ + pam-devel \ + pcre-static \ + perl \ + pixman-devel \ + pkgconfig \ + pulseaudio-libs-devel \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + rdma-core-devel \ + rpm \ + sed \ + snappy-devel \ + spice-protocol \ + spice-server-devel \ + systemd-devel \ + systemtap-sdt-devel \ + tar \ + texinfo \ + usbredir-devel \ + util-linux \ + virglrenderer-devel \ + vte291-devel \ + which \ + xfsprogs-devel \ + zlib-devel \ + zlib-static && \ + dnf autoremove -y && \ + dnf clean all -y && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" diff --git a/tests/docker/dockerfiles/debian-all-test-cross.docker b/tests/docker/dockerfiles/debian-all-test-cross.docker index dedcea58b4..8dc5e1b5de 100644 --- a/tests/docker/dockerfiles/debian-all-test-cross.docker +++ b/tests/docker/dockerfiles/debian-all-test-cross.docker @@ -6,16 +6,26 @@ # basic compilers for as many targets as possible. We shall use this # to build and run linux-user tests on GitLab # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -# What we need to build QEMU itself -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list + +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ apt build-dep -yy qemu -# Add the foreign architecture we want and install dependencies +# Add extra build tools and as many cross compilers as we can for testing RUN DEBIAN_FRONTEND=noninteractive eatmydata \ apt install -y --no-install-recommends \ + bison \ + ccache \ + clang \ + flex \ + git \ + ninja-build \ gcc-aarch64-linux-gnu \ libc6-dev-arm64-cross \ gcc-alpha-linux-gnu \ diff --git a/tests/docker/dockerfiles/debian-alpha-cross.docker b/tests/docker/dockerfiles/debian-alpha-cross.docker index 10fe30df0d..4eeb43c78a 100644 --- a/tests/docker/dockerfiles/debian-alpha-cross.docker +++ b/tests/docker/dockerfiles/debian-alpha-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-alpha-linux-gnu \ libc6.1-dev-alpha-cross diff --git a/tests/docker/dockerfiles/debian-amd64-cross.docker b/tests/docker/dockerfiles/debian-amd64-cross.docker index 870109ef6a..5e57309361 100644 --- a/tests/docker/dockerfiles/debian-amd64-cross.docker +++ b/tests/docker/dockerfiles/debian-amd64-cross.docker @@ -1,22 +1,172 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker x86_64 cross target +# $ lcitool dockerfile --layers all --cross x86_64 debian-11 qemu # -# This docker target is used on non-x86_64 machines which need the -# x86_64 cross compilers installed. -# -FROM qemu/debian10 -MAINTAINER Alex Bennée +# https://gitlab.com/libvirt/libvirt-ci -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture amd64 -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - crossbuild-essential-amd64 -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a amd64 --arch-only qemu +FROM docker.io/library/debian:11-slim -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture amd64 && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-x86-64-linux-gnu \ + gcc-x86-64-linux-gnu \ + libaio-dev:amd64 \ + libasan5:amd64 \ + libasound2-dev:amd64 \ + libattr1-dev:amd64 \ + libbpf-dev:amd64 \ + libbrlapi-dev:amd64 \ + libbz2-dev:amd64 \ + libc6-dev:amd64 \ + libcacard-dev:amd64 \ + libcap-ng-dev:amd64 \ + libcapstone-dev:amd64 \ + libcmocka-dev:amd64 \ + libcurl4-gnutls-dev:amd64 \ + libdaxctl-dev:amd64 \ + libdrm-dev:amd64 \ + libepoxy-dev:amd64 \ + libfdt-dev:amd64 \ + libffi-dev:amd64 \ + libfuse3-dev:amd64 \ + libgbm-dev:amd64 \ + libgcrypt20-dev:amd64 \ + libglib2.0-dev:amd64 \ + libglusterfs-dev:amd64 \ + libgnutls28-dev:amd64 \ + libgtk-3-dev:amd64 \ + libibumad-dev:amd64 \ + libibverbs-dev:amd64 \ + libiscsi-dev:amd64 \ + libjemalloc-dev:amd64 \ + libjpeg62-turbo-dev:amd64 \ + libjson-c-dev:amd64 \ + liblttng-ust-dev:amd64 \ + liblzo2-dev:amd64 \ + libncursesw5-dev:amd64 \ + libnfs-dev:amd64 \ + libnuma-dev:amd64 \ + libpam0g-dev:amd64 \ + libpixman-1-dev:amd64 \ + libpmem-dev:amd64 \ + libpng-dev:amd64 \ + libpulse-dev:amd64 \ + librbd-dev:amd64 \ + librdmacm-dev:amd64 \ + libsasl2-dev:amd64 \ + libsdl2-dev:amd64 \ + libsdl2-image-dev:amd64 \ + libseccomp-dev:amd64 \ + libselinux1-dev:amd64 \ + libslirp-dev:amd64 \ + libsnappy-dev:amd64 \ + libspice-server-dev:amd64 \ + libssh-gcrypt-dev:amd64 \ + libsystemd-dev:amd64 \ + libtasn1-6-dev:amd64 \ + libubsan1:amd64 \ + libudev-dev:amd64 \ + liburing-dev:amd64 \ + libusb-1.0-0-dev:amd64 \ + libusbredirhost-dev:amd64 \ + libvdeplug-dev:amd64 \ + libvirglrenderer-dev:amd64 \ + libvte-2.91-dev:amd64 \ + libxen-dev:amd64 \ + libzstd-dev:amd64 \ + nettle-dev:amd64 \ + systemtap-sdt-dev:amd64 \ + xfslibs-dev:amd64 \ + zlib1g-dev:amd64 && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/x86_64-linux-gnu-gcc'\n\ +ar = '/usr/bin/x86_64-linux-gnu-gcc-ar'\n\ +strip = '/usr/bin/x86_64-linux-gnu-strip'\n\ +pkgconfig = '/usr/bin/x86_64-linux-gnu-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'x86_64'\n\ +cpu = 'x86_64'\n\ +endian = 'little'" > /usr/local/share/meson/cross/x86_64-linux-gnu && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-linux-gnu-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-linux-gnu-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-linux-gnu-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-linux-gnu-gcc + +ENV ABI "x86_64-linux-gnu" +ENV MESON_OPTS "--cross-file=x86_64-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-linux-gnu- ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user diff --git a/tests/docker/dockerfiles/debian-amd64.docker b/tests/docker/dockerfiles/debian-amd64.docker index ed546edcd6..bfeab01ee3 100644 --- a/tests/docker/dockerfiles/debian-amd64.docker +++ b/tests/docker/dockerfiles/debian-amd64.docker @@ -1,59 +1,158 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker x86_64 target +# $ lcitool dockerfile --layers all debian-11 qemu # -# This docker target builds on the Debian Buster base image. Further -# libraries which are not widely available are installed by hand. -# -FROM qemu/debian10 -MAINTAINER Philippe Mathieu-Daudé +# https://gitlab.com/libvirt/libvirt-ci -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy qemu +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - cscope \ - genisoimage \ - exuberant-ctags \ - global \ - libbz2-dev \ - liblzo2-dev \ - libgcrypt20-dev \ - libfdt-dev \ - librdmacm-dev \ - libsasl2-dev \ - libsnappy-dev \ - libvte-dev \ - netcat-openbsd \ - openssh-client \ - python3-numpy \ - python3-opencv \ - python3-venv +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + clang \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + g++ \ + gcc \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libaio-dev \ + libasan5 \ + libasound2-dev \ + libattr1-dev \ + libbpf-dev \ + libbrlapi-dev \ + libbz2-dev \ + libc6-dev \ + libcacard-dev \ + libcap-ng-dev \ + libcapstone-dev \ + libcmocka-dev \ + libcurl4-gnutls-dev \ + libdaxctl-dev \ + libdrm-dev \ + libepoxy-dev \ + libfdt-dev \ + libffi-dev \ + libfuse3-dev \ + libgbm-dev \ + libgcrypt20-dev \ + libglib2.0-dev \ + libglusterfs-dev \ + libgnutls28-dev \ + libgtk-3-dev \ + libibumad-dev \ + libibverbs-dev \ + libiscsi-dev \ + libjemalloc-dev \ + libjpeg62-turbo-dev \ + libjson-c-dev \ + liblttng-ust-dev \ + liblzo2-dev \ + libncursesw5-dev \ + libnfs-dev \ + libnuma-dev \ + libpam0g-dev \ + libpcre2-dev \ + libpixman-1-dev \ + libpmem-dev \ + libpng-dev \ + libpulse-dev \ + librbd-dev \ + librdmacm-dev \ + libsasl2-dev \ + libsdl2-dev \ + libsdl2-image-dev \ + libseccomp-dev \ + libselinux1-dev \ + libslirp-dev \ + libsnappy-dev \ + libsndio-dev \ + libspice-protocol-dev \ + libspice-server-dev \ + libssh-gcrypt-dev \ + libsystemd-dev \ + libtasn1-6-dev \ + libubsan1 \ + libudev-dev \ + liburing-dev \ + libusb-1.0-0-dev \ + libusbredirhost-dev \ + libvdeplug-dev \ + libvirglrenderer-dev \ + libvte-2.91-dev \ + libxen-dev \ + libzstd-dev \ + llvm \ + locales \ + make \ + meson \ + multipath-tools \ + ncat \ + nettle-dev \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + systemtap-sdt-dev \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo \ + xfslibs-dev \ + zlib1g-dev && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc -# virgl -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libegl1-mesa-dev \ - libepoxy-dev \ - libgbm-dev -RUN git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git /usr/src/virglrenderer && \ - cd /usr/src/virglrenderer && git checkout virglrenderer-0.8.0 -RUN cd /usr/src/virglrenderer && ./autogen.sh && ./configure --disable-tests && make install - -# netmap -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - linux-headers-amd64 +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" +# netmap/cscope/global +RUN DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + cscope\ + global\ + linux-headers-amd64 RUN git clone https://github.com/luigirizzo/netmap.git /usr/src/netmap RUN cd /usr/src/netmap && git checkout v11.3 RUN cd /usr/src/netmap/LINUX && ./configure --no-drivers --no-apps --kernel-dir=$(ls -d /usr/src/linux-headers-*-amd64) && make install ENV QEMU_CONFIGURE_OPTS --enable-netmap - -RUN ldconfig - -# gcrypt -ENV QEMU_CONFIGURE_OPTS $QEMU_CONFIGURE_OPTS --enable-gcrypt diff --git a/tests/docker/dockerfiles/debian-arm64-cross.docker b/tests/docker/dockerfiles/debian-arm64-cross.docker index 166e24df13..98885bd0ee 100644 --- a/tests/docker/dockerfiles/debian-arm64-cross.docker +++ b/tests/docker/dockerfiles/debian-arm64-cross.docker @@ -1,32 +1,171 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker arm64 cross-compiler target +# $ lcitool dockerfile --layers all --cross aarch64 debian-11 qemu # -# This docker target builds on the debian Buster base image. -# -FROM qemu/debian10 +# https://gitlab.com/libvirt/libvirt-ci -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture arm64 -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - crossbuild-essential-arm64 -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a arm64 --arch-only qemu +FROM docker.io/library/debian:11-slim -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture arm64 && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-aarch64-linux-gnu \ + gcc-aarch64-linux-gnu \ + libaio-dev:arm64 \ + libasan5:arm64 \ + libasound2-dev:arm64 \ + libattr1-dev:arm64 \ + libbpf-dev:arm64 \ + libbrlapi-dev:arm64 \ + libbz2-dev:arm64 \ + libc6-dev:arm64 \ + libcacard-dev:arm64 \ + libcap-ng-dev:arm64 \ + libcapstone-dev:arm64 \ + libcmocka-dev:arm64 \ + libcurl4-gnutls-dev:arm64 \ + libdaxctl-dev:arm64 \ + libdrm-dev:arm64 \ + libepoxy-dev:arm64 \ + libfdt-dev:arm64 \ + libffi-dev:arm64 \ + libfuse3-dev:arm64 \ + libgbm-dev:arm64 \ + libgcrypt20-dev:arm64 \ + libglib2.0-dev:arm64 \ + libglusterfs-dev:arm64 \ + libgnutls28-dev:arm64 \ + libgtk-3-dev:arm64 \ + libibumad-dev:arm64 \ + libibverbs-dev:arm64 \ + libiscsi-dev:arm64 \ + libjemalloc-dev:arm64 \ + libjpeg62-turbo-dev:arm64 \ + libjson-c-dev:arm64 \ + liblttng-ust-dev:arm64 \ + liblzo2-dev:arm64 \ + libncursesw5-dev:arm64 \ + libnfs-dev:arm64 \ + libnuma-dev:arm64 \ + libpam0g-dev:arm64 \ + libpixman-1-dev:arm64 \ + libpng-dev:arm64 \ + libpulse-dev:arm64 \ + librbd-dev:arm64 \ + librdmacm-dev:arm64 \ + libsasl2-dev:arm64 \ + libsdl2-dev:arm64 \ + libsdl2-image-dev:arm64 \ + libseccomp-dev:arm64 \ + libselinux1-dev:arm64 \ + libslirp-dev:arm64 \ + libsnappy-dev:arm64 \ + libspice-server-dev:arm64 \ + libssh-gcrypt-dev:arm64 \ + libsystemd-dev:arm64 \ + libtasn1-6-dev:arm64 \ + libubsan1:arm64 \ + libudev-dev:arm64 \ + liburing-dev:arm64 \ + libusb-1.0-0-dev:arm64 \ + libusbredirhost-dev:arm64 \ + libvdeplug-dev:arm64 \ + libvirglrenderer-dev:arm64 \ + libvte-2.91-dev:arm64 \ + libxen-dev:arm64 \ + libzstd-dev:arm64 \ + nettle-dev:arm64 \ + systemtap-sdt-dev:arm64 \ + xfslibs-dev:arm64 \ + zlib1g-dev:arm64 && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/aarch64-linux-gnu-gcc'\n\ +ar = '/usr/bin/aarch64-linux-gnu-gcc-ar'\n\ +strip = '/usr/bin/aarch64-linux-gnu-strip'\n\ +pkgconfig = '/usr/bin/aarch64-linux-gnu-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'aarch64'\n\ +cpu = 'aarch64'\n\ +endian = 'little'" > /usr/local/share/meson/cross/aarch64-linux-gnu && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/aarch64-linux-gnu-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/aarch64-linux-gnu-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/aarch64-linux-gnu-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/aarch64-linux-gnu-gcc + +ENV ABI "aarch64-linux-gnu" +ENV MESON_OPTS "--cross-file=aarch64-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=aarch64-linux-gnu- ENV DEF_TARGET_LIST aarch64-softmmu,aarch64-linux-user - -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:arm64 \ - liblzo2-dev:arm64 \ - librdmacm-dev:arm64 \ - libsnappy-dev:arm64 \ - libxen-dev:arm64 - -# nettle -ENV QEMU_CONFIGURE_OPTS $QEMU_CONFIGURE_OPTS --enable-nettle diff --git a/tests/docker/dockerfiles/debian-arm64-test-cross.docker b/tests/docker/dockerfiles/debian-arm64-test-cross.docker deleted file mode 100644 index 53a9012beb..0000000000 --- a/tests/docker/dockerfiles/debian-arm64-test-cross.docker +++ /dev/null @@ -1,13 +0,0 @@ -# -# Docker arm64 cross-compiler target (tests only) -# -# This docker target builds on the debian Bullseye base image. -# -FROM qemu/debian11 - -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture arm64 -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - crossbuild-essential-arm64 gcc-10-aarch64-linux-gnu diff --git a/tests/docker/dockerfiles/debian-armel-cross.docker b/tests/docker/dockerfiles/debian-armel-cross.docker index b7b1a3585f..d5c08714e4 100644 --- a/tests/docker/dockerfiles/debian-armel-cross.docker +++ b/tests/docker/dockerfiles/debian-armel-cross.docker @@ -1,26 +1,170 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker armel cross-compiler target +# $ lcitool dockerfile --layers all --cross armv6l debian-11 qemu # -# This docker target builds on the debian Stretch base image. -# -FROM qemu/debian10 -MAINTAINER Philippe Mathieu-Daudé +# https://gitlab.com/libvirt/libvirt-ci -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture armel && \ - apt update && \ - apt install -yy crossbuild-essential-armel && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a armel --arch-only qemu +FROM docker.io/library/debian:11-slim -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture armel && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-arm-linux-gnueabi \ + gcc-arm-linux-gnueabi \ + libaio-dev:armel \ + libasan5:armel \ + libasound2-dev:armel \ + libattr1-dev:armel \ + libbpf-dev:armel \ + libbrlapi-dev:armel \ + libbz2-dev:armel \ + libc6-dev:armel \ + libcacard-dev:armel \ + libcap-ng-dev:armel \ + libcapstone-dev:armel \ + libcmocka-dev:armel \ + libcurl4-gnutls-dev:armel \ + libdaxctl-dev:armel \ + libdrm-dev:armel \ + libepoxy-dev:armel \ + libfdt-dev:armel \ + libffi-dev:armel \ + libfuse3-dev:armel \ + libgbm-dev:armel \ + libgcrypt20-dev:armel \ + libglib2.0-dev:armel \ + libglusterfs-dev:armel \ + libgnutls28-dev:armel \ + libgtk-3-dev:armel \ + libibumad-dev:armel \ + libibverbs-dev:armel \ + libiscsi-dev:armel \ + libjemalloc-dev:armel \ + libjpeg62-turbo-dev:armel \ + libjson-c-dev:armel \ + liblttng-ust-dev:armel \ + liblzo2-dev:armel \ + libncursesw5-dev:armel \ + libnfs-dev:armel \ + libnuma-dev:armel \ + libpam0g-dev:armel \ + libpixman-1-dev:armel \ + libpng-dev:armel \ + libpulse-dev:armel \ + librbd-dev:armel \ + librdmacm-dev:armel \ + libsasl2-dev:armel \ + libsdl2-dev:armel \ + libsdl2-image-dev:armel \ + libseccomp-dev:armel \ + libselinux1-dev:armel \ + libslirp-dev:armel \ + libsnappy-dev:armel \ + libspice-server-dev:armel \ + libssh-gcrypt-dev:armel \ + libsystemd-dev:armel \ + libtasn1-6-dev:armel \ + libubsan1:armel \ + libudev-dev:armel \ + liburing-dev:armel \ + libusb-1.0-0-dev:armel \ + libusbredirhost-dev:armel \ + libvdeplug-dev:armel \ + libvirglrenderer-dev:armel \ + libvte-2.91-dev:armel \ + libzstd-dev:armel \ + nettle-dev:armel \ + systemtap-sdt-dev:armel \ + xfslibs-dev:armel \ + zlib1g-dev:armel && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/arm-linux-gnueabi-gcc'\n\ +ar = '/usr/bin/arm-linux-gnueabi-gcc-ar'\n\ +strip = '/usr/bin/arm-linux-gnueabi-strip'\n\ +pkgconfig = '/usr/bin/arm-linux-gnueabi-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'arm'\n\ +cpu = 'arm'\n\ +endian = 'little'" > /usr/local/share/meson/cross/arm-linux-gnueabi && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-gcc + +ENV ABI "arm-linux-gnueabi" +ENV MESON_OPTS "--cross-file=arm-linux-gnueabi" ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabi- ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user,armeb-linux-user - -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:armel \ - liblzo2-dev:armel \ - librdmacm-dev:armel \ - libsnappy-dev:armel diff --git a/tests/docker/dockerfiles/debian-armhf-cross.docker b/tests/docker/dockerfiles/debian-armhf-cross.docker index 25d7618833..471444fcf4 100644 --- a/tests/docker/dockerfiles/debian-armhf-cross.docker +++ b/tests/docker/dockerfiles/debian-armhf-cross.docker @@ -1,29 +1,171 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker armhf cross-compiler target +# $ lcitool dockerfile --layers all --cross armv7l debian-11 qemu # -# This docker target builds on the debian Stretch base image. -# -FROM qemu/debian10 +# https://gitlab.com/libvirt/libvirt-ci -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture armhf -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - crossbuild-essential-armhf -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a armhf --arch-only qemu +FROM docker.io/library/debian:11-slim -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture armhf && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-arm-linux-gnueabihf \ + gcc-arm-linux-gnueabihf \ + libaio-dev:armhf \ + libasan5:armhf \ + libasound2-dev:armhf \ + libattr1-dev:armhf \ + libbpf-dev:armhf \ + libbrlapi-dev:armhf \ + libbz2-dev:armhf \ + libc6-dev:armhf \ + libcacard-dev:armhf \ + libcap-ng-dev:armhf \ + libcapstone-dev:armhf \ + libcmocka-dev:armhf \ + libcurl4-gnutls-dev:armhf \ + libdaxctl-dev:armhf \ + libdrm-dev:armhf \ + libepoxy-dev:armhf \ + libfdt-dev:armhf \ + libffi-dev:armhf \ + libfuse3-dev:armhf \ + libgbm-dev:armhf \ + libgcrypt20-dev:armhf \ + libglib2.0-dev:armhf \ + libglusterfs-dev:armhf \ + libgnutls28-dev:armhf \ + libgtk-3-dev:armhf \ + libibumad-dev:armhf \ + libibverbs-dev:armhf \ + libiscsi-dev:armhf \ + libjemalloc-dev:armhf \ + libjpeg62-turbo-dev:armhf \ + libjson-c-dev:armhf \ + liblttng-ust-dev:armhf \ + liblzo2-dev:armhf \ + libncursesw5-dev:armhf \ + libnfs-dev:armhf \ + libnuma-dev:armhf \ + libpam0g-dev:armhf \ + libpixman-1-dev:armhf \ + libpng-dev:armhf \ + libpulse-dev:armhf \ + librbd-dev:armhf \ + librdmacm-dev:armhf \ + libsasl2-dev:armhf \ + libsdl2-dev:armhf \ + libsdl2-image-dev:armhf \ + libseccomp-dev:armhf \ + libselinux1-dev:armhf \ + libslirp-dev:armhf \ + libsnappy-dev:armhf \ + libspice-server-dev:armhf \ + libssh-gcrypt-dev:armhf \ + libsystemd-dev:armhf \ + libtasn1-6-dev:armhf \ + libubsan1:armhf \ + libudev-dev:armhf \ + liburing-dev:armhf \ + libusb-1.0-0-dev:armhf \ + libusbredirhost-dev:armhf \ + libvdeplug-dev:armhf \ + libvirglrenderer-dev:armhf \ + libvte-2.91-dev:armhf \ + libxen-dev:armhf \ + libzstd-dev:armhf \ + nettle-dev:armhf \ + systemtap-sdt-dev:armhf \ + xfslibs-dev:armhf \ + zlib1g-dev:armhf && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/arm-linux-gnueabihf-gcc'\n\ +ar = '/usr/bin/arm-linux-gnueabihf-gcc-ar'\n\ +strip = '/usr/bin/arm-linux-gnueabihf-strip'\n\ +pkgconfig = '/usr/bin/arm-linux-gnueabihf-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'arm'\n\ +cpu = 'armhf'\n\ +endian = 'little'" > /usr/local/share/meson/cross/arm-linux-gnueabihf && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabihf-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabihf-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabihf-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabihf-gcc + +ENV ABI "arm-linux-gnueabihf" +ENV MESON_OPTS "--cross-file=arm-linux-gnueabihf" ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabihf- -ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user,armeb-linux-user - -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:armhf \ - liblzo2-dev:armhf \ - librdmacm-dev:armhf \ - libsnappy-dev:armhf \ - libxen-dev:armhf +ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-hexagon-cross.d/build-toolchain.sh similarity index 100% rename from tests/docker/dockerfiles/debian-hexagon-cross.docker.d/build-toolchain.sh rename to tests/docker/dockerfiles/debian-hexagon-cross.d/build-toolchain.sh diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker index d5dc299dc1..c4238e893f 100644 --- a/tests/docker/dockerfiles/debian-hexagon-cross.docker +++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker @@ -2,12 +2,10 @@ # Docker Hexagon cross-compiler target # # This docker target is used for building hexagon tests. As it also -# needs to be able to build QEMU itself in CI we include it's -# build-deps. It is also a "stand-alone" image so as not to be -# triggered by re-builds on other base images given it takes a long -# time to build. +# needs to be able to build QEMU itself in CI we include its +# build-deps. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim # Install common build utilities RUN apt update && \ @@ -15,11 +13,18 @@ RUN apt update && \ DEBIAN_FRONTEND=noninteractive eatmydata \ apt install -y --no-install-recommends \ bison \ + ca-certificates \ + clang \ cmake \ flex \ + gcc \ lld \ + make \ + ninja-build \ + python3 \ rsync \ - wget + wget \ + xz-utils ENV TOOLCHAIN_INSTALL /usr/local ENV ROOTFS /usr/local @@ -32,13 +37,13 @@ ADD build-toolchain.sh /root/hexagon-toolchain/build-toolchain.sh RUN cd /root/hexagon-toolchain && ./build-toolchain.sh -FROM debian:buster-slim +FROM docker.io/library/debian:11-slim # Duplicate deb line as deb-src RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list # Install QEMU build deps for use in CI RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ - DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy git ninja-build && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy bison flex git ninja-build && \ DEBIAN_FRONTEND=noninteractive eatmydata \ apt build-dep -yy --arch-only qemu COPY --from=0 /usr/local /usr/local diff --git a/tests/docker/dockerfiles/debian-hppa-cross.docker b/tests/docker/dockerfiles/debian-hppa-cross.docker index 3d6c65a3ef..af1c8403d8 100644 --- a/tests/docker/dockerfiles/debian-hppa-cross.docker +++ b/tests/docker/dockerfiles/debian-hppa-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-hppa-linux-gnu \ libc6-dev-hppa-cross diff --git a/tests/docker/dockerfiles/debian-loongarch-cross.docker b/tests/docker/dockerfiles/debian-loongarch-cross.docker new file mode 100644 index 0000000000..a8e8e98909 --- /dev/null +++ b/tests/docker/dockerfiles/debian-loongarch-cross.docker @@ -0,0 +1,27 @@ +# +# Docker cross-compiler target +# +# This docker target uses prebuilt toolchains for LoongArch64 from: +# https://github.com/loongson/build-tools/releases +# +FROM docker.io/library/debian:11-slim + +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + curl \ + gettext \ + git \ + python3-minimal + +RUN curl -#SL https://github.com/loongson/build-tools/releases/download/2022.05.29/loongarch64-clfs-5.0-cross-tools-gcc-glibc.tar.xz \ + | tar -xJC /opt + +ENV PATH $PATH:/opt/cross-tools/bin +ENV LD_LIBRARY_PATH /opt/cross-tools/lib:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib:$LD_LIBRARY_PATH diff --git a/tests/docker/dockerfiles/debian-m68k-cross.docker b/tests/docker/dockerfiles/debian-m68k-cross.docker index fcb10e3534..dded71c5d2 100644 --- a/tests/docker/dockerfiles/debian-m68k-cross.docker +++ b/tests/docker/dockerfiles/debian-m68k-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-m68k-linux-gnu \ libc6-dev-m68k-cross diff --git a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh new file mode 100755 index 0000000000..23ec0aa9a7 --- /dev/null +++ b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -e + +TARGET=microblaze-linux-musl +LINUX_ARCH=microblaze + +J=$(expr $(nproc) / 2) +TOOLCHAIN_INSTALL=/usr/local +TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin +CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root + +export PATH=${TOOLCHAIN_BIN}:$PATH + +# +# Grab all of the source for the toolchain bootstrap. +# + +wget https://ftp.gnu.org/gnu/binutils/binutils-2.37.tar.xz +wget https://ftp.gnu.org/gnu/gcc/gcc-11.2.0/gcc-11.2.0.tar.xz +wget https://www.musl-libc.org/releases/musl-1.2.2.tar.gz +wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.70.tar.xz + +tar axf binutils-2.37.tar.xz +tar axf gcc-11.2.0.tar.xz +tar axf musl-1.2.2.tar.gz +tar axf linux-5.10.70.tar.xz + +mv binutils-2.37 src-binu +mv gcc-11.2.0 src-gcc +mv musl-1.2.2 src-musl +mv linux-5.10.70 src-linux + +mkdir -p bld-hdr bld-binu bld-gcc bld-musl +mkdir -p ${CROSS_SYSROOT}/usr/include + +# +# Install kernel headers +# + +cd src-linux +make headers_install ARCH=${LINUX_ARCH} INSTALL_HDR_PATH=${CROSS_SYSROOT}/usr +cd .. + +# +# Build binutils +# + +cd bld-binu +../src-binu/configure --disable-werror \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} +make -j${J} +make install +cd .. + +# +# Build gcc, just the compiler so far. +# + +cd bld-gcc +../src-gcc/configure --disable-werror --disable-shared \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} \ + --enable-languages=c --disable-libssp --disable-libsanitizer \ + --disable-libatomic --disable-libgomp --disable-libquadmath +make -j${J} all-gcc +make install-gcc +cd .. + +# +# Build musl. +# We won't go through the extra step of building shared libraries +# because we don't actually use them in QEMU docker testing. +# + +cd bld-musl +../src-musl/configure --prefix=/usr --host=${TARGET} --disable-shared +make -j${j} +make install DESTDIR=${CROSS_SYSROOT} +cd .. + +# +# Go back and build the compiler runtime +# + +cd bld-gcc +make -j${j} +make install +cd .. diff --git a/tests/docker/dockerfiles/debian-mips-cross.docker b/tests/docker/dockerfiles/debian-mips-cross.docker index 26c154014d..7b55f0f3b2 100644 --- a/tests/docker/dockerfiles/debian-mips-cross.docker +++ b/tests/docker/dockerfiles/debian-mips-cross.docker @@ -1,32 +1,14 @@ # # Docker mips cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -MAINTAINER Philippe Mathieu-Daudé - -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture mips -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-mips-linux-gnu - -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a mips --arch-only qemu - -# Specify the cross prefix for this image (see tests/docker/common.rc) -ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips-linux-gnu- -ENV DEF_TARGET_LIST mips-softmmu,mipsel-linux-user - -# Install extra libraries to increase code coverage -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:mips \ - liblzo2-dev:mips \ - librdmacm-dev:mips \ - libsnappy-dev:mips +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + gcc-mips-linux-gnu \ + libc6-dev-mips-cross diff --git a/tests/docker/dockerfiles/debian-mips64-cross.docker b/tests/docker/dockerfiles/debian-mips64-cross.docker index 09c2ba584e..afcff9726f 100644 --- a/tests/docker/dockerfiles/debian-mips64-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-mips64-linux-gnuabi64 \ libc6-dev-mips64-cross diff --git a/tests/docker/dockerfiles/debian-mips64el-cross.docker b/tests/docker/dockerfiles/debian-mips64el-cross.docker index c990b683b7..15b0224b76 100644 --- a/tests/docker/dockerfiles/debian-mips64el-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64el-cross.docker @@ -1,33 +1,168 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker mips64el cross-compiler target -# -# This docker target builds on the debian Stretch base image. +# $ lcitool dockerfile --layers all --cross mips64el debian-11 qemu # +# https://gitlab.com/libvirt/libvirt-ci -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -MAINTAINER Philippe Mathieu-Daudé +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture mips64el && \ - apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-mips64el-linux-gnuabi64 +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a mips64el --arch-only qemu +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture mips64el && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-mips64el-linux-gnuabi64 \ + gcc-mips64el-linux-gnuabi64 \ + libaio-dev:mips64el \ + libasound2-dev:mips64el \ + libattr1-dev:mips64el \ + libbpf-dev:mips64el \ + libbrlapi-dev:mips64el \ + libbz2-dev:mips64el \ + libc6-dev:mips64el \ + libcacard-dev:mips64el \ + libcap-ng-dev:mips64el \ + libcapstone-dev:mips64el \ + libcmocka-dev:mips64el \ + libcurl4-gnutls-dev:mips64el \ + libdaxctl-dev:mips64el \ + libdrm-dev:mips64el \ + libepoxy-dev:mips64el \ + libfdt-dev:mips64el \ + libffi-dev:mips64el \ + libfuse3-dev:mips64el \ + libgbm-dev:mips64el \ + libgcrypt20-dev:mips64el \ + libglib2.0-dev:mips64el \ + libglusterfs-dev:mips64el \ + libgnutls28-dev:mips64el \ + libgtk-3-dev:mips64el \ + libibumad-dev:mips64el \ + libibverbs-dev:mips64el \ + libiscsi-dev:mips64el \ + libjemalloc-dev:mips64el \ + libjpeg62-turbo-dev:mips64el \ + libjson-c-dev:mips64el \ + liblttng-ust-dev:mips64el \ + liblzo2-dev:mips64el \ + libncursesw5-dev:mips64el \ + libnfs-dev:mips64el \ + libnuma-dev:mips64el \ + libpam0g-dev:mips64el \ + libpixman-1-dev:mips64el \ + libpng-dev:mips64el \ + libpulse-dev:mips64el \ + librbd-dev:mips64el \ + librdmacm-dev:mips64el \ + libsasl2-dev:mips64el \ + libsdl2-dev:mips64el \ + libsdl2-image-dev:mips64el \ + libseccomp-dev:mips64el \ + libselinux1-dev:mips64el \ + libslirp-dev:mips64el \ + libsnappy-dev:mips64el \ + libspice-server-dev:mips64el \ + libssh-gcrypt-dev:mips64el \ + libsystemd-dev:mips64el \ + libtasn1-6-dev:mips64el \ + libudev-dev:mips64el \ + liburing-dev:mips64el \ + libusb-1.0-0-dev:mips64el \ + libusbredirhost-dev:mips64el \ + libvdeplug-dev:mips64el \ + libvirglrenderer-dev:mips64el \ + libvte-2.91-dev:mips64el \ + libzstd-dev:mips64el \ + nettle-dev:mips64el \ + systemtap-sdt-dev:mips64el \ + xfslibs-dev:mips64el \ + zlib1g-dev:mips64el && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/mips64el-linux-gnuabi64-gcc'\n\ +ar = '/usr/bin/mips64el-linux-gnuabi64-gcc-ar'\n\ +strip = '/usr/bin/mips64el-linux-gnuabi64-strip'\n\ +pkgconfig = '/usr/bin/mips64el-linux-gnuabi64-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'mips64'\n\ +cpu = 'mips64el'\n\ +endian = 'little'" > /usr/local/share/meson/cross/mips64el-linux-gnuabi64 && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mips64el-linux-gnuabi64-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mips64el-linux-gnuabi64-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mips64el-linux-gnuabi64-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mips64el-linux-gnuabi64-gcc -# Specify the cross prefix for this image (see tests/docker/common.rc) +ENV ABI "mips64el-linux-gnuabi64" +ENV MESON_OPTS "--cross-file=mips64el-linux-gnuabi64" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips64el-linux-gnuabi64- ENV DEF_TARGET_LIST mips64el-softmmu,mips64el-linux-user - -# Install extra libraries to increase code coverage -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:mips64el \ - liblzo2-dev:mips64el \ - librdmacm-dev:mips64el \ - libsnappy-dev:mips64el diff --git a/tests/docker/dockerfiles/debian-mipsel-cross.docker b/tests/docker/dockerfiles/debian-mipsel-cross.docker index 0e5dd42d3c..a5d3ca6e2f 100644 --- a/tests/docker/dockerfiles/debian-mipsel-cross.docker +++ b/tests/docker/dockerfiles/debian-mipsel-cross.docker @@ -1,31 +1,168 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker mipsel cross-compiler target +# $ lcitool dockerfile --layers all --cross mipsel debian-11 qemu # -# This docker target builds on the debian Stretch base image. -# -FROM qemu/debian10 +# https://gitlab.com/libvirt/libvirt-ci -MAINTAINER Philippe Mathieu-Daudé +FROM docker.io/library/debian:11-slim -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture mipsel -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-mipsel-linux-gnu +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a mipsel --arch-only qemu +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture mipsel && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-mipsel-linux-gnu \ + gcc-mipsel-linux-gnu \ + libaio-dev:mipsel \ + libasound2-dev:mipsel \ + libattr1-dev:mipsel \ + libbpf-dev:mipsel \ + libbrlapi-dev:mipsel \ + libbz2-dev:mipsel \ + libc6-dev:mipsel \ + libcacard-dev:mipsel \ + libcap-ng-dev:mipsel \ + libcapstone-dev:mipsel \ + libcmocka-dev:mipsel \ + libcurl4-gnutls-dev:mipsel \ + libdaxctl-dev:mipsel \ + libdrm-dev:mipsel \ + libepoxy-dev:mipsel \ + libfdt-dev:mipsel \ + libffi-dev:mipsel \ + libfuse3-dev:mipsel \ + libgbm-dev:mipsel \ + libgcrypt20-dev:mipsel \ + libglib2.0-dev:mipsel \ + libglusterfs-dev:mipsel \ + libgnutls28-dev:mipsel \ + libgtk-3-dev:mipsel \ + libibumad-dev:mipsel \ + libibverbs-dev:mipsel \ + libiscsi-dev:mipsel \ + libjemalloc-dev:mipsel \ + libjpeg62-turbo-dev:mipsel \ + libjson-c-dev:mipsel \ + liblttng-ust-dev:mipsel \ + liblzo2-dev:mipsel \ + libncursesw5-dev:mipsel \ + libnfs-dev:mipsel \ + libnuma-dev:mipsel \ + libpam0g-dev:mipsel \ + libpixman-1-dev:mipsel \ + libpng-dev:mipsel \ + libpulse-dev:mipsel \ + librbd-dev:mipsel \ + librdmacm-dev:mipsel \ + libsasl2-dev:mipsel \ + libsdl2-dev:mipsel \ + libsdl2-image-dev:mipsel \ + libseccomp-dev:mipsel \ + libselinux1-dev:mipsel \ + libslirp-dev:mipsel \ + libsnappy-dev:mipsel \ + libspice-server-dev:mipsel \ + libssh-gcrypt-dev:mipsel \ + libsystemd-dev:mipsel \ + libtasn1-6-dev:mipsel \ + libudev-dev:mipsel \ + liburing-dev:mipsel \ + libusb-1.0-0-dev:mipsel \ + libusbredirhost-dev:mipsel \ + libvdeplug-dev:mipsel \ + libvirglrenderer-dev:mipsel \ + libvte-2.91-dev:mipsel \ + libzstd-dev:mipsel \ + nettle-dev:mipsel \ + systemtap-sdt-dev:mipsel \ + xfslibs-dev:mipsel \ + zlib1g-dev:mipsel && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/mipsel-linux-gnu-gcc'\n\ +ar = '/usr/bin/mipsel-linux-gnu-gcc-ar'\n\ +strip = '/usr/bin/mipsel-linux-gnu-strip'\n\ +pkgconfig = '/usr/bin/mipsel-linux-gnu-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'mips'\n\ +cpu = 'mipsel'\n\ +endian = 'little'" > /usr/local/share/meson/cross/mipsel-linux-gnu && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mipsel-linux-gnu-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mipsel-linux-gnu-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mipsel-linux-gnu-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mipsel-linux-gnu-gcc + +ENV ABI "mipsel-linux-gnu" +ENV MESON_OPTS "--cross-file=mipsel-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mipsel-linux-gnu- - -# Install extra libraries to increase code coverage -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:mipsel \ - liblzo2-dev:mipsel \ - librdmacm-dev:mipsel \ - libsnappy-dev:mipsel +ENV DEF_TARGET_LIST mipsel-softmmu,mipsel-linux-user diff --git a/tests/docker/dockerfiles/debian-native.docker b/tests/docker/dockerfiles/debian-native.docker new file mode 100644 index 0000000000..8dd033097c --- /dev/null +++ b/tests/docker/dockerfiles/debian-native.docker @@ -0,0 +1,49 @@ +# +# Docker Debian Native +# +# This is intended to build QEMU on native host systems. Debian is +# chosen due to the broadest range on supported host systems for QEMU. +# +# This docker target is based on the docker.io Debian Bullseye base +# image rather than QEMU's base because we would otherwise confuse the +# build grabbing stuff from the registry built for other +# architectures. +# +FROM docker.io/library/debian:bullseye-slim +MAINTAINER Alex Bennée + +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list + +# Install common build utilities +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt build-dep -yy --arch-only qemu + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + cscope \ + genisoimage \ + exuberant-ctags \ + global \ + libbz2-dev \ + liblzo2-dev \ + libgcrypt20-dev \ + libfdt-dev \ + librdmacm-dev \ + libsasl2-dev \ + libsnappy-dev \ + libvte-dev \ + netcat-openbsd \ + ninja-build \ + openssh-client \ + python3-numpy \ + python3-opencv \ + python3-venv + +ENV QEMU_CONFIGURE_OPTS $QEMU_CONFIGURE_OPTS +ENV DEF_TARGET_LIST "none" diff --git a/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh new file mode 100755 index 0000000000..ba3c9d8aff --- /dev/null +++ b/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -e + +TARGET=nios2-linux-gnu +LINUX_ARCH=nios2 + +J=$(expr $(nproc) / 2) +TOOLCHAIN_INSTALL=/usr/local +TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin +CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root + +export PATH=${TOOLCHAIN_BIN}:$PATH + +# +# Grab all of the source for the toolchain bootstrap. +# + +wget https://ftp.gnu.org/gnu/binutils/binutils-2.37.tar.xz +wget https://ftp.gnu.org/gnu/gcc/gcc-11.2.0/gcc-11.2.0.tar.xz +wget https://ftp.gnu.org/gnu/glibc/glibc-2.34.tar.xz +wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.70.tar.xz + +tar axf binutils-2.37.tar.xz +tar axf gcc-11.2.0.tar.xz +tar axf glibc-2.34.tar.xz +tar axf linux-5.10.70.tar.xz + +mv binutils-2.37 src-binu +mv gcc-11.2.0 src-gcc +mv glibc-2.34 src-glibc +mv linux-5.10.70 src-linux + +mkdir -p bld-hdr bld-binu bld-gcc bld-glibc +mkdir -p ${CROSS_SYSROOT}/usr/include + +# +# Install kernel and glibc headers +# + +cd src-linux +make headers_install ARCH=${LINUX_ARCH} INSTALL_HDR_PATH=${CROSS_SYSROOT}/usr +cd .. + +cd bld-hdr +../src-glibc/configure --prefix=/usr --host=${TARGET} +make install-headers DESTDIR=${CROSS_SYSROOT} +touch ${CROSS_SYSROOT}/usr/include/gnu/stubs.h +cd .. + +# +# Build binutils +# + +cd bld-binu +../src-binu/configure --disable-werror \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} +make -j${J} +make install +cd .. + +# +# Build gcc, without shared libraries, because we do not yet +# have a shared libc against which to link. +# + +cd bld-gcc +../src-gcc/configure --disable-werror --disable-shared \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} \ + --enable-languages=c --disable-libssp --disable-libsanitizer \ + --disable-libatomic --disable-libgomp --disable-libquadmath +make -j${J} +make install +cd .. + +# +# Build glibc +# There are a few random things that use c++ but we didn't build that +# cross-compiler. We can get away without them. Disable CXX so that +# glibc doesn't try to use the host c++ compiler. +# + +cd bld-glibc +CXX=false ../src-glibc/configure --prefix=/usr --host=${TARGET} +make -j${j} +make install DESTDIR=${CROSS_SYSROOT} +cd .. diff --git a/tests/docker/dockerfiles/debian-powerpc-test-cross.docker b/tests/docker/dockerfiles/debian-powerpc-test-cross.docker index 36b336f709..d6b2909cc4 100644 --- a/tests/docker/dockerfiles/debian-powerpc-test-cross.docker +++ b/tests/docker/dockerfiles/debian-powerpc-test-cross.docker @@ -1,13 +1,15 @@ # # Docker powerpc/ppc64/ppc64le cross-compiler target # -# This docker target builds on the debian Bullseye base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian11 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-powerpc-linux-gnu \ libc6-dev-powerpc-cross \ gcc-10-powerpc64-linux-gnu \ diff --git a/tests/docker/dockerfiles/debian-ppc64el-cross.docker b/tests/docker/dockerfiles/debian-ppc64el-cross.docker index 1146a06be6..d2954e61f6 100644 --- a/tests/docker/dockerfiles/debian-ppc64el-cross.docker +++ b/tests/docker/dockerfiles/debian-ppc64el-cross.docker @@ -1,28 +1,170 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker ppc64el cross-compiler target +# $ lcitool dockerfile --layers all --cross ppc64le debian-11 qemu # -# This docker target builds on the debian Stretch base image. -# -FROM qemu/debian10 +# https://gitlab.com/libvirt/libvirt-ci -# Add the foreign architecture we want and install dependencies -RUN dpkg --add-architecture ppc64el && \ - apt update && \ - apt install -yy crossbuild-essential-ppc64el +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a ppc64el --arch-only qemu +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales -# Specify the cross prefix for this image (see tests/docker/common.rc) +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture ppc64el && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-powerpc64le-linux-gnu \ + gcc-powerpc64le-linux-gnu \ + libaio-dev:ppc64el \ + libasan5:ppc64el \ + libasound2-dev:ppc64el \ + libattr1-dev:ppc64el \ + libbpf-dev:ppc64el \ + libbrlapi-dev:ppc64el \ + libbz2-dev:ppc64el \ + libc6-dev:ppc64el \ + libcacard-dev:ppc64el \ + libcap-ng-dev:ppc64el \ + libcapstone-dev:ppc64el \ + libcmocka-dev:ppc64el \ + libcurl4-gnutls-dev:ppc64el \ + libdaxctl-dev:ppc64el \ + libdrm-dev:ppc64el \ + libepoxy-dev:ppc64el \ + libfdt-dev:ppc64el \ + libffi-dev:ppc64el \ + libfuse3-dev:ppc64el \ + libgbm-dev:ppc64el \ + libgcrypt20-dev:ppc64el \ + libglib2.0-dev:ppc64el \ + libglusterfs-dev:ppc64el \ + libgnutls28-dev:ppc64el \ + libgtk-3-dev:ppc64el \ + libibumad-dev:ppc64el \ + libibverbs-dev:ppc64el \ + libiscsi-dev:ppc64el \ + libjemalloc-dev:ppc64el \ + libjpeg62-turbo-dev:ppc64el \ + libjson-c-dev:ppc64el \ + liblttng-ust-dev:ppc64el \ + liblzo2-dev:ppc64el \ + libncursesw5-dev:ppc64el \ + libnfs-dev:ppc64el \ + libnuma-dev:ppc64el \ + libpam0g-dev:ppc64el \ + libpixman-1-dev:ppc64el \ + libpng-dev:ppc64el \ + libpulse-dev:ppc64el \ + librbd-dev:ppc64el \ + librdmacm-dev:ppc64el \ + libsasl2-dev:ppc64el \ + libsdl2-dev:ppc64el \ + libsdl2-image-dev:ppc64el \ + libseccomp-dev:ppc64el \ + libselinux1-dev:ppc64el \ + libslirp-dev:ppc64el \ + libsnappy-dev:ppc64el \ + libspice-server-dev:ppc64el \ + libssh-gcrypt-dev:ppc64el \ + libsystemd-dev:ppc64el \ + libtasn1-6-dev:ppc64el \ + libubsan1:ppc64el \ + libudev-dev:ppc64el \ + liburing-dev:ppc64el \ + libusb-1.0-0-dev:ppc64el \ + libusbredirhost-dev:ppc64el \ + libvdeplug-dev:ppc64el \ + libvirglrenderer-dev:ppc64el \ + libvte-2.91-dev:ppc64el \ + libzstd-dev:ppc64el \ + nettle-dev:ppc64el \ + systemtap-sdt-dev:ppc64el \ + xfslibs-dev:ppc64el \ + zlib1g-dev:ppc64el && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/powerpc64le-linux-gnu-gcc'\n\ +ar = '/usr/bin/powerpc64le-linux-gnu-gcc-ar'\n\ +strip = '/usr/bin/powerpc64le-linux-gnu-strip'\n\ +pkgconfig = '/usr/bin/powerpc64le-linux-gnu-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 'ppc64'\n\ +cpu = 'powerpc64le'\n\ +endian = 'little'" > /usr/local/share/meson/cross/powerpc64le-linux-gnu && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/powerpc64le-linux-gnu-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/powerpc64le-linux-gnu-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/powerpc64le-linux-gnu-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/powerpc64le-linux-gnu-gcc + +ENV ABI "powerpc64le-linux-gnu" +ENV MESON_OPTS "--cross-file=powerpc64le-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=powerpc64le-linux-gnu- -ENV DEF_TARGET_LIST ppc64-softmmu,ppc64-linux-user,ppc64abi32-linux-user - -# Install extra libraries to increase code coverage -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:ppc64el \ - liblzo2-dev:ppc64el \ - librdmacm-dev:ppc64el \ - libsnappy-dev:ppc64el +ENV DEF_TARGET_LIST ppc64-softmmu,ppc64-linux-user diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker index 2bbff19772..9715791e0b 100644 --- a/tests/docker/dockerfiles/debian-riscv64-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker @@ -1,12 +1,51 @@ # -# Docker cross-compiler target +# Docker cross-compiler target for riscv64 # -# This docker target builds on the debian Buster base image. +# Currently the only distro that gets close to cross compiling riscv64 +# images is Debian Sid (with unofficial ports). As this is a moving +# target we keep the library list minimal and are aiming to migrate +# from this hack as soon as we are able. # -FROM qemu/debian10 +FROM docker.io/library/debian:sid-slim + +# Add ports +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt update -yy && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt upgrade -yy + +# Install common build utilities +RUN DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \ + bison \ + bc \ + build-essential \ + ca-certificates \ + debian-ports-archive-keyring \ + dpkg-dev \ + flex \ + gettext \ + git \ + libglib2.0-dev \ + ninja-build \ + pkg-config \ + python3 + +# Add ports and riscv64 architecture +RUN echo "deb http://ftp.ports.debian.org/debian-ports/ sid main" >> /etc/apt/sources.list +RUN dpkg --add-architecture riscv64 + +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list RUN apt update && \ DEBIAN_FRONTEND=noninteractive eatmydata \ apt install -y --no-install-recommends \ - gcc-riscv64-linux-gnu \ - libc6-dev-riscv64-cross + gcc-riscv64-linux-gnu \ + libc6-dev-riscv64-cross \ + libffi-dev:riscv64 \ + libglib2.0-dev:riscv64 \ + libpixman-1-dev:riscv64 + +# Specify the cross prefix for this image (see tests/docker/common.rc) +ENV QEMU_CONFIGURE_OPTS --cross-prefix=riscv64-linux-gnu- +ENV DEF_TARGET_LIST riscv64-softmmu,riscv64-linux-user diff --git a/tests/docker/dockerfiles/debian-riscv64-test-cross.docker b/tests/docker/dockerfiles/debian-riscv64-test-cross.docker new file mode 100644 index 0000000000..e5f83a5aeb --- /dev/null +++ b/tests/docker/dockerfiles/debian-riscv64-test-cross.docker @@ -0,0 +1,14 @@ +# +# Docker cross-compiler target +# +# This docker target builds on the Debian Bullseye base image. +# +FROM docker.io/library/debian:11-slim + +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + gcc-riscv64-linux-gnu \ + libc6-dev-riscv64-cross diff --git a/tests/docker/dockerfiles/debian-s390x-cross.docker b/tests/docker/dockerfiles/debian-s390x-cross.docker index 9f2ab51eb0..d43ce16317 100644 --- a/tests/docker/dockerfiles/debian-s390x-cross.docker +++ b/tests/docker/dockerfiles/debian-s390x-cross.docker @@ -1,33 +1,169 @@ +# THIS FILE WAS AUTO-GENERATED # -# Docker s390 cross-compiler target +# $ lcitool dockerfile --layers all --cross s390x debian-11 qemu # -# This docker target builds on the debian Stretch base image. -# -FROM qemu/debian10 +# https://gitlab.com/libvirt/libvirt-ci -# Add the s390x architecture -RUN dpkg --add-architecture s390x +FROM docker.io/library/debian:11-slim -# Grab the updated list of packages -RUN apt update && apt dist-upgrade -yy -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-multilib-s390x-linux-gnu +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libglib2.0-dev \ + libpcre2-dev \ + libsndio-dev \ + libspice-protocol-dev \ + llvm \ + locales \ + make \ + meson \ + ncat \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt build-dep -yy -a s390x --arch-only qemu +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" -# Specify the cross prefix for this image (see tests/docker/common.rc) +RUN export DEBIAN_FRONTEND=noninteractive && \ + dpkg --add-architecture s390x && \ + eatmydata apt-get update && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ + eatmydata apt-get install --no-install-recommends -y \ + g++-s390x-linux-gnu \ + gcc-s390x-linux-gnu \ + libaio-dev:s390x \ + libasan5:s390x \ + libasound2-dev:s390x \ + libattr1-dev:s390x \ + libbpf-dev:s390x \ + libbrlapi-dev:s390x \ + libbz2-dev:s390x \ + libc6-dev:s390x \ + libcacard-dev:s390x \ + libcap-ng-dev:s390x \ + libcapstone-dev:s390x \ + libcmocka-dev:s390x \ + libcurl4-gnutls-dev:s390x \ + libdaxctl-dev:s390x \ + libdrm-dev:s390x \ + libepoxy-dev:s390x \ + libfdt-dev:s390x \ + libffi-dev:s390x \ + libfuse3-dev:s390x \ + libgbm-dev:s390x \ + libgcrypt20-dev:s390x \ + libglib2.0-dev:s390x \ + libglusterfs-dev:s390x \ + libgnutls28-dev:s390x \ + libgtk-3-dev:s390x \ + libibumad-dev:s390x \ + libibverbs-dev:s390x \ + libiscsi-dev:s390x \ + libjemalloc-dev:s390x \ + libjpeg62-turbo-dev:s390x \ + libjson-c-dev:s390x \ + liblttng-ust-dev:s390x \ + liblzo2-dev:s390x \ + libncursesw5-dev:s390x \ + libnfs-dev:s390x \ + libnuma-dev:s390x \ + libpam0g-dev:s390x \ + libpixman-1-dev:s390x \ + libpng-dev:s390x \ + libpulse-dev:s390x \ + librbd-dev:s390x \ + librdmacm-dev:s390x \ + libsasl2-dev:s390x \ + libsdl2-dev:s390x \ + libsdl2-image-dev:s390x \ + libseccomp-dev:s390x \ + libselinux1-dev:s390x \ + libslirp-dev:s390x \ + libsnappy-dev:s390x \ + libssh-gcrypt-dev:s390x \ + libsystemd-dev:s390x \ + libtasn1-6-dev:s390x \ + libubsan1:s390x \ + libudev-dev:s390x \ + liburing-dev:s390x \ + libusb-1.0-0-dev:s390x \ + libusbredirhost-dev:s390x \ + libvdeplug-dev:s390x \ + libvirglrenderer-dev:s390x \ + libvte-2.91-dev:s390x \ + libzstd-dev:s390x \ + nettle-dev:s390x \ + systemtap-sdt-dev:s390x \ + xfslibs-dev:s390x \ + zlib1g-dev:s390x && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + mkdir -p /usr/local/share/meson/cross && \ + echo "[binaries]\n\ +c = '/usr/bin/s390x-linux-gnu-gcc'\n\ +ar = '/usr/bin/s390x-linux-gnu-gcc-ar'\n\ +strip = '/usr/bin/s390x-linux-gnu-strip'\n\ +pkgconfig = '/usr/bin/s390x-linux-gnu-pkg-config'\n\ +\n\ +[host_machine]\n\ +system = 'linux'\n\ +cpu_family = 's390x'\n\ +cpu = 's390x'\n\ +endian = 'big'" > /usr/local/share/meson/cross/s390x-linux-gnu && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/s390x-linux-gnu-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/s390x-linux-gnu-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/s390x-linux-gnu-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/s390x-linux-gnu-gcc + +ENV ABI "s390x-linux-gnu" +ENV MESON_OPTS "--cross-file=s390x-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=s390x-linux-gnu- ENV DEF_TARGET_LIST s390x-softmmu,s390x-linux-user - -# Install extra libraries to increase code coverage -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - libbz2-dev:s390x \ - liblzo2-dev:s390x \ - librdmacm-dev:s390x \ - libsnappy-dev:s390x diff --git a/tests/docker/dockerfiles/debian-sh4-cross.docker b/tests/docker/dockerfiles/debian-sh4-cross.docker index fd3af89575..d48ed9065f 100644 --- a/tests/docker/dockerfiles/debian-sh4-cross.docker +++ b/tests/docker/dockerfiles/debian-sh4-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-sh4-linux-gnu \ libc6-dev-sh4-cross diff --git a/tests/docker/dockerfiles/debian-sparc64-cross.docker b/tests/docker/dockerfiles/debian-sparc64-cross.docker index f4bb9b561c..8d3d306bc1 100644 --- a/tests/docker/dockerfiles/debian-sparc64-cross.docker +++ b/tests/docker/dockerfiles/debian-sparc64-cross.docker @@ -1,12 +1,14 @@ # # Docker cross-compiler target # -# This docker target builds on the debian Buster base image. +# This docker target builds on the Debian Bullseye base image. # -FROM qemu/debian10 +FROM docker.io/library/debian:11-slim -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ gcc-sparc64-linux-gnu \ libc6-dev-sparc64-cross diff --git a/tests/docker/dockerfiles/debian-toolchain.docker b/tests/docker/dockerfiles/debian-toolchain.docker new file mode 100644 index 0000000000..d3d4d3344e --- /dev/null +++ b/tests/docker/dockerfiles/debian-toolchain.docker @@ -0,0 +1,37 @@ +# +# Docker toolchain cross-compiler +# +# This dockerfile is used for building a cross-compiler toolchain. +# The script for building the toolchain is supplied via extra-files. +# +FROM docker.io/library/debian:11-slim + +# Install build utilities for building gcc and glibc. +# ??? The build-dep isn't working, missing a number of +# minimal build dependiencies, e.g. libmpc. + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + bison \ + ca-certificates \ + flex \ + gawk \ + libmpc-dev \ + libmpfr-dev \ + rsync \ + texinfo \ + wget && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt build-dep -yy --arch-only gcc glibc + +ADD build-toolchain.sh /root/build-toolchain.sh + +RUN cd /root && ./build-toolchain.sh + +# Throw away the extra toolchain build deps, the downloaded source, +# and the build trees by restoring the original debian10 image, +# then copying the built toolchain from stage 0. +FROM docker.io/library/debian:bullseye-slim +COPY --from=0 /usr/local /usr/local diff --git a/tests/docker/dockerfiles/debian-tricore-cross.docker b/tests/docker/dockerfiles/debian-tricore-cross.docker index d8df2c6117..b573b9ded2 100644 --- a/tests/docker/dockerfiles/debian-tricore-cross.docker +++ b/tests/docker/dockerfiles/debian-tricore-cross.docker @@ -16,6 +16,7 @@ MAINTAINER Philippe Mathieu-Daudé RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \ + bison \ bzip2 \ ca-certificates \ ccache \ @@ -24,7 +25,6 @@ RUN apt update && \ git \ libglib2.0-dev \ libpixman-1-dev \ - libtest-harness-perl \ locales \ make \ ninja-build \ diff --git a/tests/docker/dockerfiles/debian-xtensa-cross.docker b/tests/docker/dockerfiles/debian-xtensa-cross.docker index 2f11b3b7bc..aebfabdd6e 100644 --- a/tests/docker/dockerfiles/debian-xtensa-cross.docker +++ b/tests/docker/dockerfiles/debian-xtensa-cross.docker @@ -5,7 +5,7 @@ # using a prebuilt toolchains for Xtensa cores from: # https://github.com/foss-xtensa/toolchain/releases # -FROM docker.io/library/debian:stretch-slim +FROM docker.io/library/debian:11-slim RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ diff --git a/tests/docker/dockerfiles/debian10.docker b/tests/docker/dockerfiles/debian10.docker deleted file mode 100644 index b414af1b9f..0000000000 --- a/tests/docker/dockerfiles/debian10.docker +++ /dev/null @@ -1,37 +0,0 @@ -# -# Docker multiarch cross-compiler target -# -# This docker target is builds on Debian cross compiler targets to build distro -# with a selection of cross compilers for building test binaries. -# -# On its own you can't build much but the docker-foo-cross targets -# build on top of the base debian image. -# -FROM docker.io/library/debian:buster-slim - -# Duplicate deb line as deb-src -RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list - -# Install common build utilities -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ - DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - bc \ - build-essential \ - ca-certificates \ - ccache \ - clang \ - dbus \ - gdb-multiarch \ - gettext \ - git \ - libffi-dev \ - libncurses5-dev \ - ninja-build \ - pkg-config \ - psmisc \ - python3 \ - python3-sphinx \ - python3-sphinx-rtd-theme \ - $(apt-get -s build-dep --arch-only qemu | egrep ^Inst | fgrep '[all]' | cut -d\ -f2) diff --git a/tests/docker/dockerfiles/debian11.docker b/tests/docker/dockerfiles/debian11.docker deleted file mode 100644 index febf884f8f..0000000000 --- a/tests/docker/dockerfiles/debian11.docker +++ /dev/null @@ -1,18 +0,0 @@ -# -# Docker multiarch cross-compiler target -# -# This docker target uses the current development version of Debian as -# a base for cross compilers for building test binaries. We won't -# attempt to build QEMU on it yet given it is still in development. -# -# On its own you can't build much but the docker-foo-cross targets -# build on top of the base debian image. -# -FROM docker.io/library/debian:bullseye-slim - -# Duplicate deb line as deb-src -RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list - -# Install common build utilities -RUN apt update && \ - DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata diff --git a/tests/docker/dockerfiles/empty.docker b/tests/docker/dockerfiles/empty.docker deleted file mode 100644 index 9ba980f1a8..0000000000 --- a/tests/docker/dockerfiles/empty.docker +++ /dev/null @@ -1,8 +0,0 @@ -# -# Empty Dockerfile -# - -FROM scratch - -# Add everything from the context into the container -ADD . / diff --git a/tests/docker/dockerfiles/fedora-i386-cross.docker b/tests/docker/dockerfiles/fedora-i386-cross.docker index dbb8195eb1..7eec648d2d 100644 --- a/tests/docker/dockerfiles/fedora-i386-cross.docker +++ b/tests/docker/dockerfiles/fedora-i386-cross.docker @@ -1,12 +1,16 @@ -FROM registry.fedoraproject.org/fedora:33 +FROM registry.fedoraproject.org/fedora:34 + ENV PACKAGES \ + bison \ bzip2 \ ccache \ diffutils \ + flex \ findutils \ gcc \ git \ libffi-devel.i686 \ + libselinux-devel.i686 \ libtasn1-devel.i686 \ libzstd-devel.i686 \ make \ @@ -17,12 +21,13 @@ ENV PACKAGES \ glibc-static.i686 \ gnutls-devel.i686 \ nettle-devel.i686 \ - perl-Test-Harness \ + pcre-devel.i686 \ pixman-devel.i686 \ + sysprof-capture-devel.i686 \ zlib-devel.i686 -ENV QEMU_CONFIGURE_OPTS --extra-cflags=-m32 --disable-vhost-user -ENV PKG_CONFIG_PATH /usr/lib/pkgconfig +ENV QEMU_CONFIGURE_OPTS --cpu=i386 --disable-vhost-user +ENV PKG_CONFIG_LIBDIR /usr/lib/pkgconfig -RUN dnf install -y $PACKAGES +RUN dnf update -y && dnf install -y $PACKAGES RUN rpm -q $PACKAGES | sort > /packages.txt diff --git a/tests/docker/dockerfiles/fedora-win32-cross.docker b/tests/docker/dockerfiles/fedora-win32-cross.docker index aad39dd97f..75383ba185 100644 --- a/tests/docker/dockerfiles/fedora-win32-cross.docker +++ b/tests/docker/dockerfiles/fedora-win32-cross.docker @@ -1,43 +1,103 @@ -FROM registry.fedoraproject.org/fedora:33 +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all --cross mingw32 fedora-35 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -# Please keep this list sorted alphabetically -ENV PACKAGES \ - bc \ - bzip2 \ - ccache \ - diffutils \ - findutils \ - gcc \ - gettext \ - git \ - hostname \ - make \ - meson \ - mingw32-bzip2 \ - mingw32-curl \ - mingw32-glib2 \ - mingw32-gmp \ - mingw32-gnutls \ - mingw32-gtk3 \ - mingw32-libffi \ - mingw32-libjpeg-turbo \ - mingw32-libpng \ - mingw32-libtasn1 \ - mingw32-libusbx \ - mingw32-nettle \ - mingw32-nsis \ - mingw32-pixman \ - mingw32-pkg-config \ - mingw32-SDL2 \ - perl \ - perl-Test-Harness \ - python3 \ - python3-PyYAML \ - tar \ - which +FROM registry.fedoraproject.org/fedora:35 -RUN dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt +RUN dnf install -y nosync && \ + echo -e '#!/bin/sh\n\ +if test -d /usr/lib64\n\ +then\n\ + export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ +else\n\ + export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ +fi\n\ +exec "$@"' > /usr/bin/nosync && \ + chmod +x /usr/bin/nosync && \ + nosync dnf update -y && \ + nosync dnf install -y \ + bash \ + bc \ + bison \ + bzip2 \ + ca-certificates \ + ccache \ + ctags \ + dbus-daemon \ + diffutils \ + findutils \ + flex \ + gcovr \ + genisoimage \ + git \ + glib2-devel \ + glibc-langpack-en \ + hostname \ + llvm \ + make \ + meson \ + ninja-build \ + nmap-ncat \ + openssh-clients \ + pcre-static \ + perl-base \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + rpm \ + sed \ + sparse \ + spice-protocol \ + tar \ + tesseract \ + tesseract-langpack-eng \ + texinfo \ + util-linux \ + which && \ + nosync dnf autoremove -y && \ + nosync dnf clean all -y -# Specify the cross prefix for this image (see tests/docker/common.rc) +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN nosync dnf install -y \ + mingw32-SDL2 \ + mingw32-SDL2_image \ + mingw32-bzip2 \ + mingw32-curl \ + mingw32-gcc \ + mingw32-gcc-c++ \ + mingw32-gettext \ + mingw32-glib2 \ + mingw32-gnutls \ + mingw32-gtk3 \ + mingw32-libgcrypt \ + mingw32-libjpeg-turbo \ + mingw32-libpng \ + mingw32-libtasn1 \ + mingw32-nettle \ + mingw32-nsis \ + mingw32-pixman \ + mingw32-pkg-config && \ + nosync dnf clean all -y && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-gcc + +ENV ABI "i686-w64-mingw32" +ENV MESON_OPTS "--cross-file=/usr/share/mingw/toolchain-mingw32.meson" ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-w64-mingw32- +ENV DEF_TARGET_LIST i386-softmmu diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker index 9a224a619b..98c03dc13b 100644 --- a/tests/docker/dockerfiles/fedora-win64-cross.docker +++ b/tests/docker/dockerfiles/fedora-win64-cross.docker @@ -1,40 +1,103 @@ -FROM registry.fedoraproject.org/fedora:33 +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all --cross mingw64 fedora-35 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -# Please keep this list sorted alphabetically -ENV PACKAGES \ - bc \ - bzip2 \ - ccache \ - diffutils \ - findutils \ - gcc \ - gettext \ - git \ - hostname \ - make \ - meson \ - mingw32-nsis \ - mingw64-bzip2 \ - mingw64-curl \ - mingw64-glib2 \ - mingw64-gmp \ - mingw64-gtk3 \ - mingw64-libffi \ - mingw64-libjpeg-turbo \ - mingw64-libpng \ - mingw64-libtasn1 \ - mingw64-libusbx \ - mingw64-pixman \ - mingw64-pkg-config \ - perl \ - perl-Test-Harness \ - python3 \ - python3-PyYAML \ - tar \ - which +FROM registry.fedoraproject.org/fedora:35 -RUN dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt +RUN dnf install -y nosync && \ + echo -e '#!/bin/sh\n\ +if test -d /usr/lib64\n\ +then\n\ + export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ +else\n\ + export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ +fi\n\ +exec "$@"' > /usr/bin/nosync && \ + chmod +x /usr/bin/nosync && \ + nosync dnf update -y && \ + nosync dnf install -y \ + bash \ + bc \ + bison \ + bzip2 \ + ca-certificates \ + ccache \ + ctags \ + dbus-daemon \ + diffutils \ + findutils \ + flex \ + gcovr \ + genisoimage \ + git \ + glib2-devel \ + glibc-langpack-en \ + hostname \ + llvm \ + make \ + meson \ + ninja-build \ + nmap-ncat \ + openssh-clients \ + pcre-static \ + perl-base \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + rpm \ + sed \ + sparse \ + spice-protocol \ + tar \ + tesseract \ + tesseract-langpack-eng \ + texinfo \ + util-linux \ + which && \ + nosync dnf autoremove -y && \ + nosync dnf clean all -y -# Specify the cross prefix for this image (see tests/docker/common.rc) -ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-w64-mingw32- --disable-capstone +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" + +RUN nosync dnf install -y \ + mingw32-nsis \ + mingw64-SDL2 \ + mingw64-SDL2_image \ + mingw64-bzip2 \ + mingw64-curl \ + mingw64-gcc \ + mingw64-gcc-c++ \ + mingw64-gettext \ + mingw64-glib2 \ + mingw64-gnutls \ + mingw64-gtk3 \ + mingw64-libgcrypt \ + mingw64-libjpeg-turbo \ + mingw64-libpng \ + mingw64-libtasn1 \ + mingw64-nettle \ + mingw64-pixman \ + mingw64-pkg-config && \ + nosync dnf clean all -y && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-w64-mingw32-c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-w64-mingw32-cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-w64-mingw32-g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-w64-mingw32-gcc + +ENV ABI "x86_64-w64-mingw32" +ENV MESON_OPTS "--cross-file=/usr/share/mingw/toolchain-mingw64.meson" +ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-w64-mingw32- +ENV DEF_TARGET_LIST x86_64-softmmu diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker index eec1add7f6..d200c7fc10 100644 --- a/tests/docker/dockerfiles/fedora.docker +++ b/tests/docker/dockerfiles/fedora.docker @@ -1,117 +1,149 @@ -FROM registry.fedoraproject.org/fedora:33 +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all fedora-35 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -# Please keep this list sorted alphabetically -ENV PACKAGES \ - SDL2-devel \ - SDL2_image-devel \ - alsa-lib-devel \ - bc \ - brlapi-devel \ - bzip2 \ - bzip2-devel \ - ca-certificates \ - capstone-devel \ - ccache \ - clang \ - ctags \ - cyrus-sasl-devel \ - daxctl-devel \ - dbus-daemon \ - device-mapper-multipath-devel \ - diffutils \ - findutils \ - gcc \ - gcc-c++ \ - gcovr \ - genisoimage \ - gettext \ - git \ - glib2-devel \ - glibc-langpack-en \ - glibc-static \ - glusterfs-api-devel \ - gnutls-devel \ - gtk3-devel \ - hostname \ - jemalloc-devel \ - libaio-devel \ - libasan \ - libattr-devel \ - libbpf-devel \ - libcacard-devel \ - libcap-ng-devel \ - libcurl-devel \ - libdrm-devel \ - libepoxy-devel \ - libfdt-devel \ - libffi-devel \ - libgcrypt-devel \ - libiscsi-devel \ - libjpeg-devel \ - libnfs-devel \ - libpmem-devel \ - libpng-devel \ - librbd-devel \ - libseccomp-devel \ - libslirp-devel \ - libssh-devel \ - libtasn1-devel \ - libubsan \ - libudev-devel \ - liburing-devel \ - libusbx-devel \ - libxml2-devel \ - libzstd-devel \ - llvm \ - lttng-ust-devel \ - lzo-devel \ - make \ - mesa-libgbm-devel \ - meson \ - ncurses-devel \ - nettle-devel \ - ninja-build \ - nmap-ncat \ - numactl-devel \ - openssh-clients \ - pam-devel \ - perl-Test-Harness \ - perl-base \ - pixman-devel \ - pkgconfig \ - pulseaudio-libs-devel \ - python3 \ - python3-PyYAML \ - python3-numpy \ - python3-opencv \ - python3-pillow \ - python3-pip \ - python3-sphinx \ - python3-sphinx_rtd_theme \ - python3-virtualenv \ - rdma-core-devel \ - rpm \ - sed \ - snappy-devel \ - sparse \ - spice-protocol \ - spice-server-devel \ - systemd-devel \ - systemtap-sdt-devel \ - tar \ - tesseract \ - tesseract-langpack-eng \ - texinfo \ - usbredir-devel \ - util-linux \ - virglrenderer-devel \ - vte291-devel \ - which \ - xen-devel \ - xfsprogs-devel \ - zlib-devel -ENV QEMU_CONFIGURE_OPTS --python=/usr/bin/python3 +FROM registry.fedoraproject.org/fedora:35 -RUN dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt -ENV PATH $PATH:/usr/libexec/python3-sphinx/ +RUN dnf install -y nosync && \ + echo -e '#!/bin/sh\n\ +if test -d /usr/lib64\n\ +then\n\ + export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ +else\n\ + export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ +fi\n\ +exec "$@"' > /usr/bin/nosync && \ + chmod +x /usr/bin/nosync && \ + nosync dnf update -y && \ + nosync dnf install -y \ + SDL2-devel \ + SDL2_image-devel \ + alsa-lib-devel \ + bash \ + bc \ + bison \ + brlapi-devel \ + bzip2 \ + bzip2-devel \ + ca-certificates \ + capstone-devel \ + ccache \ + clang \ + ctags \ + cyrus-sasl-devel \ + daxctl-devel \ + dbus-daemon \ + device-mapper-multipath-devel \ + diffutils \ + findutils \ + flex \ + fuse3-devel \ + gcc \ + gcc-c++ \ + gcovr \ + genisoimage \ + gettext \ + git \ + glib2-devel \ + glib2-static \ + glibc-langpack-en \ + glibc-static \ + glusterfs-api-devel \ + gnutls-devel \ + gtk3-devel \ + hostname \ + jemalloc-devel \ + json-c-devel \ + libaio-devel \ + libasan \ + libattr-devel \ + libbpf-devel \ + libcacard-devel \ + libcap-ng-devel \ + libcmocka-devel \ + libcurl-devel \ + libdrm-devel \ + libepoxy-devel \ + libfdt-devel \ + libffi-devel \ + libgcrypt-devel \ + libiscsi-devel \ + libjpeg-devel \ + libnfs-devel \ + libpmem-devel \ + libpng-devel \ + librbd-devel \ + libseccomp-devel \ + libselinux-devel \ + libslirp-devel \ + libssh-devel \ + libtasn1-devel \ + libubsan \ + liburing-devel \ + libusbx-devel \ + libzstd-devel \ + llvm \ + lttng-ust-devel \ + lzo-devel \ + make \ + mesa-libgbm-devel \ + meson \ + ncurses-devel \ + nettle-devel \ + ninja-build \ + nmap-ncat \ + numactl-devel \ + openssh-clients \ + pam-devel \ + pcre-static \ + perl-base \ + pixman-devel \ + pkgconfig \ + pulseaudio-libs-devel \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + rdma-core-devel \ + rpm \ + sed \ + snappy-devel \ + sparse \ + spice-protocol \ + spice-server-devel \ + systemd-devel \ + systemtap-sdt-devel \ + tar \ + tesseract \ + tesseract-langpack-eng \ + texinfo \ + usbredir-devel \ + util-linux \ + virglrenderer-devel \ + vte291-devel \ + which \ + xen-devel \ + xfsprogs-devel \ + zlib-devel \ + zlib-static && \ + nosync dnf autoremove -y && \ + nosync dnf clean all -y && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" diff --git a/tests/docker/dockerfiles/opensuse-leap.docker b/tests/docker/dockerfiles/opensuse-leap.docker index 5a8bee0289..4361b01464 100644 --- a/tests/docker/dockerfiles/opensuse-leap.docker +++ b/tests/docker/dockerfiles/opensuse-leap.docker @@ -1,113 +1,140 @@ -FROM registry.opensuse.org/opensuse/leap:15.2 +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all opensuse-leap-153 qemu +# +# https://gitlab.com/libvirt/libvirt-ci -# Please keep this list sorted alphabetically -ENV PACKAGES \ - Mesa-devel \ - alsa-lib-devel \ - bc \ - brlapi-devel \ - bzip2 \ - ca-certificates \ - ccache \ - clang \ - ctags \ - cyrus-sasl-devel \ - dbus-1 \ - diffutils \ - findutils \ - gcc \ - gcc-c++ \ - gcovr \ - gettext-runtime \ - git \ - glib2-devel \ - glibc-locale \ - glibc-static \ - glusterfs-devel \ - gtk3-devel \ - hostname \ - jemalloc-devel \ - libSDL2-devel \ - libSDL2_image-devel \ - libaio-devel \ - libasan6 \ - libattr-devel \ - libbpf-devel \ - libbz2-devel \ - libcacard-devel \ - libcap-ng-devel \ - libcurl-devel \ - libdrm-devel \ - libepoxy-devel \ - libfdt-devel \ - libffi-devel \ - libgcrypt-devel \ - libgnutls-devel \ - libiscsi-devel \ - libjpeg8-devel \ - libndctl-devel \ - libnettle-devel \ - libnfs-devel \ - libnuma-devel \ - libpixman-1-0-devel \ - libpmem-devel \ - libpng16-devel \ - libpulse-devel \ - librbd-devel \ - libseccomp-devel \ - libspice-server-devel \ - libssh-devel \ - libtasn1-devel \ - libubsan1 \ - libudev-devel \ - libusb-1_0-devel \ - libxml2-devel \ - libzstd-devel \ - llvm \ - lttng-ust-devel \ - lzo-devel \ - make \ - mkisofs \ - ncat \ - ncurses-devel \ - ninja \ - openssh \ - pam-devel \ - perl-Test-Harness \ - perl-base \ - pkgconfig \ - python3-Pillow \ - python3-PyYAML \ - python3-Sphinx \ - python3-base \ - python3-numpy \ - python3-opencv \ - python3-pip \ - python3-setuptools \ - python3-sphinx_rtd_theme \ - python3-virtualenv \ - python3-wheel \ - rdma-core-devel \ - rpm \ - sed \ - snappy-devel \ - sparse \ - spice-protocol-devel \ - systemd-devel \ - systemtap-sdt-devel \ - tar \ - tesseract-ocr \ - tesseract-ocr-traineddata-english \ - texinfo \ - usbredir-devel \ - util-linux \ - virglrenderer-devel \ - vte-devel \ - which \ - xen-devel \ - xfsprogs-devel \ - zlib-devel -ENV QEMU_CONFIGURE_OPTS --python=/usr/bin/python3.6 +FROM registry.opensuse.org/opensuse/leap:15.3 -RUN zypper update -y && zypper --non-interactive install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt +RUN zypper update -y && \ + zypper install -y \ + Mesa-devel \ + alsa-lib-devel \ + bash \ + bc \ + bison \ + brlapi-devel \ + bzip2 \ + ca-certificates \ + ccache \ + clang \ + ctags \ + cyrus-sasl-devel \ + dbus-1 \ + diffutils \ + findutils \ + flex \ + fuse3-devel \ + gcc \ + gcc-c++ \ + gcovr \ + gettext-runtime \ + git \ + glib2-devel \ + glibc-locale \ + glibc-static \ + glusterfs-devel \ + gtk3-devel \ + hostname \ + jemalloc-devel \ + libSDL2-devel \ + libSDL2_image-devel \ + libaio-devel \ + libasan6 \ + libattr-devel \ + libbpf-devel \ + libbz2-devel \ + libcacard-devel \ + libcap-ng-devel \ + libcmocka-devel \ + libcurl-devel \ + libdrm-devel \ + libepoxy-devel \ + libfdt-devel \ + libffi-devel \ + libgcrypt-devel \ + libgnutls-devel \ + libiscsi-devel \ + libjpeg8-devel \ + libjson-c-devel \ + libndctl-devel \ + libnettle-devel \ + libnfs-devel \ + libnuma-devel \ + libpixman-1-0-devel \ + libpmem-devel \ + libpng16-devel \ + libpulse-devel \ + librbd-devel \ + libseccomp-devel \ + libselinux-devel \ + libslirp-devel \ + libspice-server-devel \ + libssh-devel \ + libtasn1-devel \ + libubsan1 \ + libudev-devel \ + liburing-devel \ + libusb-1_0-devel \ + libzstd-devel \ + llvm \ + lttng-ust-devel \ + lzo-devel \ + make \ + mkisofs \ + ncat \ + ncurses-devel \ + ninja \ + openssh \ + pam-devel \ + pcre-devel-static \ + perl-base \ + pkgconfig \ + python3-Pillow \ + python3-PyYAML \ + python3-Sphinx \ + python3-base \ + python3-numpy \ + python3-opencv \ + python3-pip \ + python3-setuptools \ + python3-sphinx_rtd_theme \ + python3-wheel \ + rdma-core-devel \ + rpm \ + sed \ + snappy-devel \ + sndio-devel \ + sparse \ + spice-protocol-devel \ + systemd-devel \ + systemtap-sdt-devel \ + tar \ + tesseract-ocr \ + tesseract-ocr-traineddata-english \ + texinfo \ + usbredir-devel \ + util-linux \ + virglrenderer-devel \ + vte-devel \ + which \ + xen-devel \ + xfsprogs-devel \ + zlib-devel \ + zlib-devel-static && \ + zypper clean --all && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +RUN /usr/bin/pip3 install meson==0.56.0 + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" diff --git a/tests/docker/dockerfiles/ubuntu.docker b/tests/docker/dockerfiles/ubuntu.docker deleted file mode 100644 index f0e0180d21..0000000000 --- a/tests/docker/dockerfiles/ubuntu.docker +++ /dev/null @@ -1,71 +0,0 @@ -# -# Latest Ubuntu Release -# -# Useful for testing against relatively bleeding edge libraries and -# compilers. We also have seperate recipe for the most recent LTS -# release. -# -# When updating use the full tag not :latest otherwise the build -# system won't pick up that it has changed. -# - -FROM docker.io/library/ubuntu:20.04 -ENV PACKAGES \ - ccache \ - clang \ - dbus \ - gcc \ - gettext \ - git \ - glusterfs-common \ - libaio-dev \ - libattr1-dev \ - libbrlapi-dev \ - libbz2-dev \ - libcacard-dev \ - libcap-ng-dev \ - libcurl4-gnutls-dev \ - libdrm-dev \ - libepoxy-dev \ - libfdt-dev \ - libffi-dev \ - libgbm-dev \ - libgnutls28-dev \ - libgtk-3-dev \ - libibverbs-dev \ - libiscsi-dev \ - libjemalloc-dev \ - libjpeg-turbo8-dev \ - liblzo2-dev \ - libncurses5-dev \ - libncursesw5-dev \ - libnfs-dev \ - libnuma-dev \ - libpixman-1-dev \ - libpng-dev \ - librados-dev \ - librbd-dev \ - librdmacm-dev \ - libsasl2-dev \ - libsdl2-dev \ - libseccomp-dev \ - libsnappy-dev \ - libspice-protocol-dev \ - libspice-server-dev \ - libssh-dev \ - libusb-1.0-0-dev \ - libusbredirhost-dev \ - libvdeplug-dev \ - libvte-2.91-dev \ - libxen-dev \ - libzstd-dev \ - make \ - ninja-build \ - python3-yaml \ - python3-sphinx \ - python3-sphinx-rtd-theme \ - sparse \ - xfslibs-dev -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install $PACKAGES -RUN dpkg -l $PACKAGES | sort > /packages.txt diff --git a/tests/docker/dockerfiles/ubuntu1804.docker b/tests/docker/dockerfiles/ubuntu1804.docker deleted file mode 100644 index 0880bf3e29..0000000000 --- a/tests/docker/dockerfiles/ubuntu1804.docker +++ /dev/null @@ -1,116 +0,0 @@ -FROM docker.io/library/ubuntu:18.04 -ENV PACKAGES \ - bc \ - bsdmainutils \ - bzip2 \ - ca-certificates \ - ccache \ - clang \ - dbus \ - debianutils \ - diffutils \ - exuberant-ctags \ - findutils \ - g++ \ - gcc \ - gcovr \ - genisoimage \ - gettext \ - git \ - glusterfs-common \ - hostname \ - libaio-dev \ - libasan5 \ - libasound2-dev \ - libattr1-dev \ - libbrlapi-dev \ - libbz2-dev \ - libc6-dev \ - libcacard-dev \ - libcap-ng-dev \ - libcapstone-dev \ - libcurl4-gnutls-dev \ - libdaxctl-dev \ - libdrm-dev \ - libepoxy-dev \ - libfdt-dev \ - libffi-dev \ - libgbm-dev \ - libgcrypt20-dev \ - libglib2.0-dev \ - libgnutls28-dev \ - libgtk-3-dev \ - libibverbs-dev \ - libiscsi-dev \ - libjemalloc-dev \ - libjpeg-turbo8-dev \ - liblttng-ust-dev \ - liblzo2-dev \ - libncursesw5-dev \ - libnfs-dev \ - libnuma-dev \ - libpam0g-dev \ - libpixman-1-dev \ - libpmem-dev \ - libpng-dev \ - libpulse-dev \ - librbd-dev \ - librdmacm-dev \ - libsasl2-dev \ - libsdl2-dev \ - libsdl2-image-dev \ - libseccomp-dev \ - libsnappy-dev \ - libspice-protocol-dev \ - libspice-server-dev \ - libssh-dev \ - libsystemd-dev \ - libtasn1-6-dev \ - libtest-harness-perl \ - libubsan1 \ - libudev-dev \ - libusb-1.0-0-dev \ - libusbredirhost-dev \ - libvdeplug-dev \ - libvirglrenderer-dev \ - libvte-2.91-dev \ - libxen-dev \ - libxml2-dev \ - libzstd-dev \ - llvm \ - locales \ - make \ - multipath-tools \ - netcat-openbsd \ - nettle-dev \ - ninja-build \ - openssh-client \ - perl-base \ - pkgconf \ - python3 \ - python3-numpy \ - python3-opencv \ - python3-pillow \ - python3-pip \ - python3-setuptools \ - python3-sphinx \ - python3-sphinx-rtd-theme \ - python3-venv \ - python3-wheel \ - python3-yaml \ - rpm2cpio \ - sed \ - sparse \ - systemtap-sdt-dev \ - tar \ - tesseract-ocr \ - tesseract-ocr-eng \ - texinfo \ - xfslibs-dev \ - zlib1g-dev -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install $PACKAGES -RUN dpkg -l $PACKAGES | sort > /packages.txt - -# https://bugs.launchpad.net/qemu/+bug/1838763 -ENV QEMU_CONFIGURE_OPTS --disable-libssh diff --git a/tests/docker/dockerfiles/ubuntu2004.docker b/tests/docker/dockerfiles/ubuntu2004.docker index 39de63d012..9417bca2fa 100644 --- a/tests/docker/dockerfiles/ubuntu2004.docker +++ b/tests/docker/dockerfiles/ubuntu2004.docker @@ -1,118 +1,152 @@ -FROM docker.io/library/ubuntu:20.04 -ENV PACKAGES \ - bc \ - bsdmainutils \ - bzip2 \ - ca-certificates \ - ccache \ - clang \ - dbus \ - debianutils \ - diffutils \ - exuberant-ctags \ - findutils \ - g++ \ - gcc \ - gcovr \ - genisoimage \ - gettext \ - git \ - hostname \ - libaio-dev \ - libasan5 \ - libasound2-dev \ - libattr1-dev \ - libbrlapi-dev \ - libbz2-dev \ - libc6-dev \ - libcacard-dev \ - libcap-ng-dev \ - libcapstone-dev \ - libcurl4-gnutls-dev \ - libdaxctl-dev \ - libdrm-dev \ - libepoxy-dev \ - libfdt-dev \ - libffi-dev \ - libgbm-dev \ - libgcrypt20-dev \ - libglib2.0-dev \ - libglusterfs-dev \ - libgnutls28-dev \ - libgtk-3-dev \ - libibverbs-dev \ - libiscsi-dev \ - libjemalloc-dev \ - libjpeg-turbo8-dev \ - liblttng-ust-dev \ - liblzo2-dev \ - libncursesw5-dev \ - libnfs-dev \ - libnuma-dev \ - libpam0g-dev \ - libpixman-1-dev \ - libpmem-dev \ - libpng-dev \ - libpulse-dev \ - librbd-dev \ - librdmacm-dev \ - libsasl2-dev \ - libsdl2-dev \ - libsdl2-image-dev \ - libseccomp-dev \ - libslirp-dev \ - libsnappy-dev \ - libspice-protocol-dev \ - libspice-server-dev \ - libssh-dev \ - libsystemd-dev \ - libtasn1-6-dev \ - libtest-harness-perl \ - libubsan1 \ - libudev-dev \ - libusb-1.0-0-dev \ - libusbredirhost-dev \ - libvdeplug-dev \ - libvirglrenderer-dev \ - libvte-2.91-dev \ - libxen-dev \ - libxml2-dev \ - libzstd-dev \ - llvm \ - locales \ - make \ - multipath-tools \ - ncat \ - nettle-dev \ - ninja-build \ - openssh-client \ - perl-base \ - pkgconf \ - python3 \ - python3-numpy \ - python3-opencv \ - python3-pillow \ - python3-pip \ - python3-setuptools \ - python3-sphinx \ - python3-sphinx-rtd-theme \ - python3-venv \ - python3-wheel \ - python3-yaml \ - rpm2cpio \ - sed \ - sparse \ - systemtap-sdt-dev \ - tar \ - tesseract-ocr \ - tesseract-ocr-eng \ - texinfo \ - xfslibs-dev \ - zlib1g-dev -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install $PACKAGES -RUN dpkg -l $PACKAGES | sort > /packages.txt +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all ubuntu-2004 qemu +# +# https://gitlab.com/libvirt/libvirt-ci +FROM docker.io/library/ubuntu:20.04 + +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdmainutils \ + bzip2 \ + ca-certificates \ + ccache \ + clang \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + g++ \ + gcc \ + gcovr \ + genisoimage \ + gettext \ + git \ + hostname \ + libaio-dev \ + libasan5 \ + libasound2-dev \ + libattr1-dev \ + libbrlapi-dev \ + libbz2-dev \ + libc6-dev \ + libcacard-dev \ + libcap-ng-dev \ + libcapstone-dev \ + libcmocka-dev \ + libcurl4-gnutls-dev \ + libdaxctl-dev \ + libdrm-dev \ + libepoxy-dev \ + libfdt-dev \ + libffi-dev \ + libfuse3-dev \ + libgbm-dev \ + libgcrypt20-dev \ + libglib2.0-dev \ + libglusterfs-dev \ + libgnutls28-dev \ + libgtk-3-dev \ + libibumad-dev \ + libibverbs-dev \ + libiscsi-dev \ + libjemalloc-dev \ + libjpeg-turbo8-dev \ + libjson-c-dev \ + liblttng-ust-dev \ + liblzo2-dev \ + libncursesw5-dev \ + libnfs-dev \ + libnuma-dev \ + libpam0g-dev \ + libpcre2-dev \ + libpixman-1-dev \ + libpmem-dev \ + libpng-dev \ + libpulse-dev \ + librbd-dev \ + librdmacm-dev \ + libsasl2-dev \ + libsdl2-dev \ + libsdl2-image-dev \ + libseccomp-dev \ + libselinux1-dev \ + libslirp-dev \ + libsnappy-dev \ + libsndio-dev \ + libspice-protocol-dev \ + libspice-server-dev \ + libssh-dev \ + libsystemd-dev \ + libtasn1-6-dev \ + libubsan1 \ + libudev-dev \ + libusb-1.0-0-dev \ + libusbredirhost-dev \ + libvdeplug-dev \ + libvirglrenderer-dev \ + libvte-2.91-dev \ + libxen-dev \ + libzstd-dev \ + llvm \ + locales \ + make \ + multipath-tools \ + ncat \ + nettle-dev \ + ninja-build \ + openssh-client \ + perl-base \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-setuptools \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-wheel \ + python3-yaml \ + rpm2cpio \ + sed \ + sparse \ + systemtap-sdt-dev \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + texinfo \ + xfslibs-dev \ + zlib1g-dev && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +RUN /usr/bin/pip3 install meson==0.56.0 + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" # Apply patch https://reviews.llvm.org/D75820 # This is required for TSan in clang-10 to compile with QEMU. RUN sed -i 's/^const/static const/g' /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h diff --git a/tests/docker/run b/tests/docker/run index 421393046b..9eb96129da 100755 --- a/tests/docker/run +++ b/tests/docker/run @@ -15,7 +15,7 @@ if test -n "$V"; then set -x fi -BASE="$(dirname $(readlink -e $0))" +BASE="$(dirname $(realpath $0))" # Prepare the environment export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH diff --git a/tests/docker/test-mingw b/tests/docker/test-mingw index 0bc6d78872..18366972eb 100755 --- a/tests/docker/test-mingw +++ b/tests/docker/test-mingw @@ -13,14 +13,12 @@ . common.rc -requires_binary x86_64-w64-mingw32-gcc -requires_binary i686-w64-mingw32-gcc +requires_binary x86_64-w64-mingw32-gcc i686-w64-mingw32-gcc cd "$BUILD_DIR" -for prefix in x86_64-w64-mingw32- i686-w64-mingw32-; do - TARGET_LIST=${TARGET_LIST:-$DEF_TARGET_LIST} \ - build_qemu --cross-prefix=$prefix \ +TARGET_LIST=${TARGET_LIST:-$DEF_TARGET_LIST} \ +build_qemu \ --enable-trace-backends=simple \ --enable-gnutls \ --enable-nettle \ @@ -29,8 +27,6 @@ for prefix in x86_64-w64-mingw32- i686-w64-mingw32-; do --enable-bzip2 \ --enable-guest-agent \ --enable-docs - install_qemu - make installer - make clean - -done +install_qemu +make installer +make clean diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c index c24baf8535..8ce0ca1545 100644 --- a/tests/fp/fp-bench.c +++ b/tests/fp/fp-bench.c @@ -545,7 +545,8 @@ static int round_name_to_mode(const char *name) return -1; } -static void QEMU_NORETURN die_host_rounding(enum rounding rounding) +static G_NORETURN +void die_host_rounding(enum rounding rounding) { fprintf(stderr, "fatal: '%s' rounding not supported on this host\n", round_names[rounding]); diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c index 352dd71c44..35829ad5f7 100644 --- a/tests/fp/fp-test.c +++ b/tests/fp/fp-test.c @@ -921,7 +921,8 @@ static void parse_args(int argc, char *argv[]) } } -static void QEMU_NORETURN run_test(void) +static G_NORETURN +void run_test(void) { unsigned int i; diff --git a/tests/fp/meson.build b/tests/fp/meson.build index 07e2cdc8d2..6258e2bd7d 100644 --- a/tests/fp/meson.build +++ b/tests/fp/meson.build @@ -1,3 +1,6 @@ +if 'CONFIG_TCG' not in config_all + subdir_done() +endif # There are namespace pollution issues on Windows, due to osdep.h # bringing in Windows headers that define a FLOAT128 type. if targetos == 'windows' @@ -37,6 +40,11 @@ tfcflags = [ '-Wno-error', ] +if cc.get_id() == 'clang' + # Clang does not support '#pragma STDC FENV_ACCESS' + tfcflags += [ '-Wno-ignored-pragmas' ] +endif + tfgencases = [ tfdir / 'genCases_ui32.c', tfdir / 'genCases_ui64.c', @@ -622,9 +630,9 @@ test('fp-test-mulAdd', fptest, # no fptest_rounding_args args: fptest_args + ['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'], - suite: ['softfloat-slow', 'softfloat-ops-slow'], timeout: 90) + suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 90) -fpbench = executable( +executable( 'fp-bench', ['fp-bench.c', '../../fpu/softfloat.c'], link_with: [libtestfloat, libsoftfloat], diff --git a/tests/fp/platform.h b/tests/fp/platform.h index c20ba70baa..6c72ad0cd0 100644 --- a/tests/fp/platform.h +++ b/tests/fp/platform.h @@ -29,9 +29,9 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "config-host.h" +#include "qemu/compiler.h" -#ifndef HOST_WORDS_BIGENDIAN +#if !HOST_BIG_ENDIAN #define LITTLEENDIAN 1 /* otherwise do not define it */ #endif diff --git a/tests/guest-debug/run-test.py b/tests/guest-debug/run-test.py index 2e58795a10..d865e46ecd 100755 --- a/tests/guest-debug/run-test.py +++ b/tests/guest-debug/run-test.py @@ -92,17 +92,18 @@ if __name__ == '__main__': result = subprocess.call(gdb_cmd, shell=True, stdout=output) - # A negative result is the result of an internal gdb failure like - # a crash. We force a return of 0 so we don't fail the test on + # A result of greater than 128 indicates a fatal signal (likely a + # crash due to gdb internal failure). That's a problem for GDB and + # not the test so we force a return of 0 so we don't fail the test on # account of broken external tools. - if result < 0: - print("GDB crashed? SKIPPING") + if result > 128: + log(output, "GDB crashed? (%d, %d) SKIPPING" % (result, result - 128)) exit(0) try: inferior.wait(2) except subprocess.TimeoutExpired: - print("GDB never connected? Killed guest") + log(output, "GDB never connected? Killed guest") inferior.kill() exit(result) diff --git a/tests/lcitool/Makefile.include b/tests/lcitool/Makefile.include new file mode 100644 index 0000000000..3780185c7c --- /dev/null +++ b/tests/lcitool/Makefile.include @@ -0,0 +1,17 @@ + +LCITOOL_REFRESH = $(SRC_PATH)/tests/lcitool/refresh + +lcitool: + @echo 'Manage build environment manifests' + @echo + @echo 'Available targets:' + @echo + @echo ' lcitool: Print this help.' + @echo ' lcitool-refresh: Re-generate all build environment manifests.' + @echo + +lcitool-help: lcitool + +lcitool-refresh: + $(call quiet-command, cd $(SRC_PATH) && git submodule update --init tests/lcitool/libvirt-ci) + $(call quiet-command, $(PYTHON) $(LCITOOL_REFRESH)) diff --git a/tests/lcitool/libvirt-ci b/tests/lcitool/libvirt-ci new file mode 160000 index 0000000000..e3eb28cf2e --- /dev/null +++ b/tests/lcitool/libvirt-ci @@ -0,0 +1 @@ +Subproject commit e3eb28cf2e17fbcf7fe7e19505ee432b8ec5bbb5 diff --git a/tests/lcitool/projects/qemu.yml b/tests/lcitool/projects/qemu.yml new file mode 100644 index 0000000000..c62dbc00f9 --- /dev/null +++ b/tests/lcitool/projects/qemu.yml @@ -0,0 +1,121 @@ +--- +packages: + - alsa + - bash + - bc + - bison + - brlapi + - bzip2 + - bzip2-libs + - capstone + - ccache + - clang + - cmocka + - column + - ctags + - cyrus-sasl + - daxctl + - dbus-daemon + - device-mapper-multipath + - diffutils + - dtrace + - findutils + - flex + - fuse3 + - g++ + - gcc + - gcovr + - gettext + - genisoimage + - glib2 + - glib2-native + - glib2-static + - glusterfs + - gnutls + - gtk3 + - hostname + - json-c + - libaio + - libattr + - libasan + - libbpf + - libc-static + - libcacard + - libcap-ng + - libcurl + - libdrm + - libepoxy + - libfdt + - libffi + - libgcrypt + - libibumad + - libibverbs + - libiscsi + - libjemalloc + - libjpeg + - libnfs + - libnuma + - libpmem + - libpng + - librbd + - librdmacm + - libseccomp + - libselinux + - libslirp + - libssh + - libtasn1 + - libubsan + - libudev + - liburing + - libusbx + - libvdeplug + - libzstd + - llvm + - lttng-ust + - lzo + - netcat + - nettle + - ninja + - nsis + - make + - mesa-libgbm + - meson + - ncursesw + - pam + - pcre-static + - perl + - pixman + - pkg-config + - pulseaudio + - python3 + - python3-PyYAML + - python3-numpy + - python3-opencv + - python3-pillow + - python3-pip + - python3-sphinx + - python3-sphinx-rtd-theme + - python3-venv + - rpm2cpio + - sdl2 + - sdl2-image + - sed + - snappy + - sndio + - sparse + - spice-protocol + - spice-server + - ssh-client + - systemd + - tar + - tesseract + - tesseract-eng + - texinfo + - usbredir + - virglrenderer + - vte + - which + - xen + - xfsprogs + - zlib + - zlib-static diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh new file mode 100755 index 0000000000..fa966e4009 --- /dev/null +++ b/tests/lcitool/refresh @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +# +# Re-generate container recipes +# +# This script uses the "lcitool" available from +# +# https://gitlab.com/libvirt/libvirt-ci +# +# Copyright (c) 2020 Red Hat Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 +# or (at your option) any later version. See the COPYING file in +# the top-level directory. + +import sys +import subprocess + +from pathlib import Path + +if len(sys.argv) != 1: + print("syntax: %s" % sys.argv[0], file=sys.stderr) + sys.exit(1) + +self_dir = Path(__file__).parent +src_dir = self_dir.parent.parent +dockerfiles_dir = Path(src_dir, "tests", "docker", "dockerfiles") + +lcitool_path = Path(self_dir, "libvirt-ci", "bin", "lcitool") + +lcitool_cmd = [lcitool_path, "--data-dir", self_dir] + + +def atomic_write(filename, content): + tmp = filename.with_suffix(filename.suffix + ".tmp") + try: + with tmp.open("w") as fp: + print(content, file=fp, end="") + tmp.rename(filename) + except Exception as ex: + tmp.unlink() + raise + + +def generate(filename, cmd, trailer): + print("Generate %s" % filename) + lcitool = subprocess.run(cmd, capture_output=True) + + if lcitool.returncode != 0: + raise Exception("Failed to generate %s: %s" % (filename, lcitool.stderr)) + + content = lcitool.stdout.decode("utf8") + if trailer is not None: + content += trailer + atomic_write(filename, content) + + +def generate_dockerfile(host, target, cross=None, trailer=None): + filename = Path(src_dir, "tests", "docker", "dockerfiles", host + ".docker") + cmd = lcitool_cmd + ["dockerfile"] + if cross is not None: + cmd.extend(["--cross", cross]) + cmd.extend([target, "qemu"]) + generate(filename, cmd, trailer) + + +def generate_cirrus(target, trailer=None): + filename = Path(src_dir, ".gitlab-ci.d", "cirrus", target + ".vars") + cmd = lcitool_cmd + ["variables", target, "qemu"] + generate(filename, cmd, trailer) + + +ubuntu2004_tsanhack = [ + "# Apply patch https://reviews.llvm.org/D75820\n", + "# This is required for TSan in clang-10 to compile with QEMU.\n", + "RUN sed -i 's/^const/static const/g' /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h\n" +] + + +# Netmap still needs to be manually built as it is yet to be packaged +# into a distro. We also add cscope and gtags which are used in the CI +# test +debian11_extras = [ + "# netmap/cscope/global\n", + "RUN DEBIAN_FRONTEND=noninteractive eatmydata \\\n", + " apt install -y --no-install-recommends \\\n", + " cscope\\\n", + " global\\\n", + " linux-headers-amd64\n", + "RUN git clone https://github.com/luigirizzo/netmap.git /usr/src/netmap\n", + "RUN cd /usr/src/netmap && git checkout v11.3\n", + "RUN cd /usr/src/netmap/LINUX && ./configure --no-drivers --no-apps --kernel-dir=$(ls -d /usr/src/linux-headers-*-amd64) && make install\n", + "ENV QEMU_CONFIGURE_OPTS --enable-netmap\n" +] + + +def cross_build(prefix, targets): + conf = "ENV QEMU_CONFIGURE_OPTS --cross-prefix=%s\n" % (prefix) + targets = "ENV DEF_TARGET_LIST %s\n" % (targets) + return "".join([conf, targets]) + +# +# Update all the various build configurations. +# Please keep each group sorted alphabetically for easy reading. +# + +try: + # + # Standard native builds + # + generate_dockerfile("alpine", "alpine-316") + generate_dockerfile("centos8", "centos-stream-8") + generate_dockerfile("debian-amd64", "debian-11", + trailer="".join(debian11_extras)) + generate_dockerfile("fedora", "fedora-35") + generate_dockerfile("opensuse-leap", "opensuse-leap-153") + generate_dockerfile("ubuntu2004", "ubuntu-2004", + trailer="".join(ubuntu2004_tsanhack)) + + # + # Cross compiling builds + # + generate_dockerfile("debian-amd64-cross", "debian-11", + cross="x86_64", + trailer=cross_build("x86_64-linux-gnu-", + "x86_64-softmmu," + "x86_64-linux-user," + "i386-softmmu,i386-linux-user")) + + generate_dockerfile("debian-arm64-cross", "debian-11", + cross="aarch64", + trailer=cross_build("aarch64-linux-gnu-", + "aarch64-softmmu,aarch64-linux-user")) + + generate_dockerfile("debian-armel-cross", "debian-11", + cross="armv6l", + trailer=cross_build("arm-linux-gnueabi-", + "arm-softmmu,arm-linux-user,armeb-linux-user")) + + generate_dockerfile("debian-armhf-cross", "debian-11", + cross="armv7l", + trailer=cross_build("arm-linux-gnueabihf-", + "arm-softmmu,arm-linux-user")) + + generate_dockerfile("debian-mips64el-cross", "debian-11", + cross="mips64el", + trailer=cross_build("mips64el-linux-gnuabi64-", + "mips64el-softmmu,mips64el-linux-user")) + + generate_dockerfile("debian-mipsel-cross", "debian-11", + cross="mipsel", + trailer=cross_build("mipsel-linux-gnu-", + "mipsel-softmmu,mipsel-linux-user")) + + generate_dockerfile("debian-ppc64el-cross", "debian-11", + cross="ppc64le", + trailer=cross_build("powerpc64le-linux-gnu-", + "ppc64-softmmu,ppc64-linux-user")) + + generate_dockerfile("debian-s390x-cross", "debian-11", + cross="s390x", + trailer=cross_build("s390x-linux-gnu-", + "s390x-softmmu,s390x-linux-user")) + + generate_dockerfile("fedora-win32-cross", "fedora-35", + cross="mingw32", + trailer=cross_build("i686-w64-mingw32-", + "i386-softmmu")) + + generate_dockerfile("fedora-win64-cross", "fedora-35", + cross="mingw64", + trailer=cross_build("x86_64-w64-mingw32-", + "x86_64-softmmu")) + + # + # Cirrus packages lists for GitLab + # + generate_cirrus("freebsd-12") + generate_cirrus("freebsd-13") + generate_cirrus("macos-12") + + sys.exit(0) +except Exception as ex: + print(str(ex), file=sys.stderr) + sys.exit(1) diff --git a/tests/meson.build b/tests/meson.build index 55a7b08275..8e318ec513 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -1,6 +1,5 @@ -py3 = import('python').find_installation() - subdir('bench') +subdir('qemu-iotests') test_qapi_outputs = [ 'qapi-builtin-types.c', @@ -31,13 +30,21 @@ test_qapi_outputs = [ 'test-qapi-visit.h', ] +# Problem: to generate trace events, we'd have to add the .trace-events +# file to qapi_trace_events like we do in qapi/meson.build. Since +# qapi_trace_events is used by trace/meson.build, we'd have to move +# subdir('tests') above subdir('trace') in the top-level meson.build. +# Can't, because it would break the dependency of qga on qemuutil (which +# depends on trace_ss). Not worth solving now; simply suppress trace +# event generation instead. test_qapi_files = custom_target('Test QAPI files', output: test_qapi_outputs, input: files('qapi-schema/qapi-schema-test.json', 'qapi-schema/include/sub-module.json', 'qapi-schema/sub-sub-module.json'), command: [ qapi_gen, '-o', meson.current_build_dir(), - '-b', '-p', 'test-', '@INPUT0@' ], + '-b', '-p', 'test-', '@INPUT0@', + '--suppress-tracing' ], depend_files: qapi_gen_depends) # meson doesn't like generated output in other directories @@ -61,16 +68,12 @@ test_deps = { 'test-qht-par': qht_bench, } -if have_tools and 'CONFIG_VHOST_USER' in config_host and 'CONFIG_LINUX' in config_host +if have_tools and have_vhost_user and 'CONFIG_LINUX' in config_host executable('vhost-user-bridge', sources: files('vhost-user-bridge.c'), dependencies: [qemuutil, vhost_user]) endif -if have_system and 'CONFIG_POSIX' in config_host - subdir('qemu-iotests') -endif - test('decodetree', sh, args: [ files('decode/check.sh'), config_host['PYTHON'], files('../scripts/decodetree.py') ], workdir: meson.current_source_dir() / 'decode', @@ -80,7 +83,7 @@ if 'CONFIG_TCG' in config_all subdir('fp') endif -if not get_option('tcg').disabled() +if get_option('tcg').allowed() if 'CONFIG_PLUGIN' in config_host subdir('plugin') endif diff --git a/tests/migration/aarch64/a-b-kernel.S b/tests/migration/aarch64/a-b-kernel.S index 0225945348..a4103ecb71 100644 --- a/tests/migration/aarch64/a-b-kernel.S +++ b/tests/migration/aarch64/a-b-kernel.S @@ -53,7 +53,6 @@ innerloop: /* increment the first byte of each page by 1 */ ldrb w3, [x4] add w3, w3, #1 - and w3, w3, #0xff strb w3, [x4] /* make sure QEMU user space can see consistent data as MMU is off */ @@ -64,7 +63,7 @@ innerloop: blt innerloop add w5, w5, #1 - and w5, w5, #0xff + and w5, w5, #0x1f cmp w5, #0 bne mainloop diff --git a/tests/migration/aarch64/a-b-kernel.h b/tests/migration/aarch64/a-b-kernel.h index 0a9b01137e..34e518d061 100644 --- a/tests/migration/aarch64/a-b-kernel.h +++ b/tests/migration/aarch64/a-b-kernel.h @@ -10,9 +10,9 @@ unsigned char aarch64_kernel[] = { 0x03, 0x00, 0x80, 0x52, 0xe4, 0x03, 0x00, 0xaa, 0x83, 0x00, 0x00, 0x39, 0x84, 0x04, 0x40, 0x91, 0x9f, 0x00, 0x01, 0xeb, 0xad, 0xff, 0xff, 0x54, 0x05, 0x00, 0x80, 0x52, 0xe4, 0x03, 0x00, 0xaa, 0x83, 0x00, 0x40, 0x39, - 0x63, 0x04, 0x00, 0x11, 0x63, 0x1c, 0x00, 0x12, 0x83, 0x00, 0x00, 0x39, - 0x24, 0x7e, 0x0b, 0xd5, 0x84, 0x04, 0x40, 0x91, 0x9f, 0x00, 0x01, 0xeb, - 0x2b, 0xff, 0xff, 0x54, 0xa5, 0x04, 0x00, 0x11, 0xa5, 0x1c, 0x00, 0x12, - 0xbf, 0x00, 0x00, 0x71, 0x81, 0xfe, 0xff, 0x54, 0x43, 0x08, 0x80, 0x52, - 0x43, 0x00, 0x00, 0x39, 0xf1, 0xff, 0xff, 0x17 + 0x63, 0x04, 0x00, 0x11, 0x83, 0x00, 0x00, 0x39, 0x24, 0x7e, 0x0b, 0xd5, + 0x84, 0x04, 0x40, 0x91, 0x9f, 0x00, 0x01, 0xeb, 0x4b, 0xff, 0xff, 0x54, + 0xa5, 0x04, 0x00, 0x11, 0xa5, 0x10, 0x00, 0x12, 0xbf, 0x00, 0x00, 0x71, + 0xa1, 0xfe, 0xff, 0x54, 0x43, 0x08, 0x80, 0x52, 0x43, 0x00, 0x00, 0x39, + 0xf2, 0xff, 0xff, 0x17 }; diff --git a/tests/migration/guestperf/engine.py b/tests/migration/guestperf/engine.py index 87a6ab2009..59fca2c70b 100644 --- a/tests/migration/guestperf/engine.py +++ b/tests/migration/guestperf/engine.py @@ -65,7 +65,6 @@ class Engine(object): return records def _cpu_timing(self, pid): - records = [] now = time.time() jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/migration/i386/a-b-bootblock.S index 3f97f28023..3d464c7568 100644 --- a/tests/migration/i386/a-b-bootblock.S +++ b/tests/migration/i386/a-b-bootblock.S @@ -50,6 +50,7 @@ innerloop: jl innerloop inc %bl + andb $0x3f,%bl jnz mainloop mov $66,%ax diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h index 7d459d4fde..b7b0fce2ee 100644 --- a/tests/migration/i386/a-b-bootblock.h +++ b/tests/migration/i386/a-b-bootblock.h @@ -4,17 +4,17 @@ * the header and the assembler differences in your patch submission. */ unsigned char x86_bootsect[] = { - 0xfa, 0x0f, 0x01, 0x16, 0x74, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, + 0xfa, 0x0f, 0x01, 0x16, 0x78, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x22, 0xc0, 0x66, 0xea, 0x20, 0x7c, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x92, 0x0c, 0x02, 0xe6, 0x92, 0xb8, 0x10, 0x00, 0x00, 0x00, 0x8e, 0xd8, 0x66, 0xb8, 0x41, 0x00, 0x66, 0xba, 0xf8, 0x03, 0xee, 0xb3, 0x00, 0xb8, 0x00, 0x00, 0x10, 0x00, 0xfe, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x40, - 0x06, 0x7c, 0xf2, 0xfe, 0xc3, 0x75, 0xe9, 0x66, 0xb8, 0x42, 0x00, 0x66, - 0xba, 0xf8, 0x03, 0xee, 0xeb, 0xde, 0x66, 0x90, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0x5c, 0x7c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x06, 0x7c, 0xf2, 0xfe, 0xc3, 0x80, 0xe3, 0x3f, 0x75, 0xe6, 0x66, 0xb8, + 0x42, 0x00, 0x66, 0xba, 0xf8, 0x03, 0xee, 0xeb, 0xdb, 0x8d, 0x76, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x9a, 0xcf, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00, + 0x27, 0x00, 0x60, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/tests/migration/stress.c b/tests/migration/stress.c index b7240a15c8..88acf8dc25 100644 --- a/tests/migration/stress.c +++ b/tests/migration/stress.c @@ -232,7 +232,7 @@ static void stress(unsigned long long ramsizeGB, int ncpus) static int mount_misc(const char *fstype, const char *dir) { - if (mkdir(dir, 0755) < 0 && errno != EEXIST) { + if (g_mkdir_with_parents(dir, 0755) < 0 && errno != EEXIST) { fprintf(stderr, "%s (%05d): ERROR: cannot create %s: %s\n", argv0, gettid(), dir, strerror(errno)); return -1; diff --git a/tests/plugin/bb.c b/tests/plugin/bb.c index de09bdde4e..7d470a1011 100644 --- a/tests/plugin/bb.c +++ b/tests/plugin/bb.c @@ -104,10 +104,17 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, for (i = 0; i < argc; i++) { char *opt = argv[i]; - if (g_strcmp0(opt, "inline") == 0) { - do_inline = true; - } else if (g_strcmp0(opt, "idle") == 0) { - idle_report = true; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "inline") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "idle") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &idle_report)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; diff --git a/tests/plugin/insn.c b/tests/plugin/insn.c index c253980ec8..cd5ea5d4ae 100644 --- a/tests/plugin/insn.c +++ b/tests/plugin/insn.c @@ -16,20 +16,80 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; -static uint64_t insn_count; +#define MAX_CPUS 8 /* lets not go nuts */ + +typedef struct { + uint64_t last_pc; + uint64_t insn_count; +} InstructionCount; + +static InstructionCount counts[MAX_CPUS]; +static uint64_t inline_insn_count; + static bool do_inline; +static bool do_size; +static GArray *sizes; + +typedef struct { + char *match_string; + uint64_t hits[MAX_CPUS]; + uint64_t last_hit[MAX_CPUS]; + uint64_t total_delta[MAX_CPUS]; + GPtrArray *history[MAX_CPUS]; +} Match; + +static GArray *matches; + +typedef struct { + Match *match; + uint64_t vaddr; + uint64_t hits; + char *disas; +} Instruction; static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) { - static uint64_t last_pc; + unsigned int i = cpu_index % MAX_CPUS; + InstructionCount *c = &counts[i]; uint64_t this_pc = GPOINTER_TO_UINT(udata); - if (this_pc == last_pc) { + if (this_pc == c->last_pc) { g_autofree gchar *out = g_strdup_printf("detected repeat execution @ 0x%" PRIx64 "\n", this_pc); qemu_plugin_outs(out); } - last_pc = this_pc; - insn_count++; + c->last_pc = this_pc; + c->insn_count++; +} + +static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata) +{ + unsigned int i = cpu_index % MAX_CPUS; + Instruction *insn = (Instruction *) udata; + Match *match = insn->match; + g_autoptr(GString) ts = g_string_new(""); + + insn->hits++; + g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits", + insn->vaddr, insn->disas, insn->hits); + + uint64_t icount = counts[i].insn_count; + uint64_t delta = icount - match->last_hit[i]; + + match->hits[i]++; + match->total_delta[i] += delta; + + g_string_append_printf(ts, + ", %"PRId64" match hits, " + "Δ+%"PRId64 " since last match," + " %"PRId64 " avg insns/match\n", + match->hits[i], delta, + match->total_delta[i] / match->hits[i]); + + match->last_hit[i] = icount; + + qemu_plugin_outs(ts->str); + + g_ptr_array_add(match->history[i], insn); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -42,28 +102,121 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) if (do_inline) { qemu_plugin_register_vcpu_insn_exec_inline( - insn, QEMU_PLUGIN_INLINE_ADD_U64, &insn_count, 1); + insn, QEMU_PLUGIN_INLINE_ADD_U64, &inline_insn_count, 1); } else { uint64_t vaddr = qemu_plugin_insn_vaddr(insn); qemu_plugin_register_vcpu_insn_exec_cb( insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, GUINT_TO_POINTER(vaddr)); } + + if (do_size) { + size_t sz = qemu_plugin_insn_size(insn); + if (sz > sizes->len) { + g_array_set_size(sizes, sz); + } + unsigned long *cnt = &g_array_index(sizes, unsigned long, sz); + (*cnt)++; + } + + /* + * If we are tracking certain instructions we will need more + * information about the instruction which we also need to + * save if there is a hit. + */ + if (matches) { + char *insn_disas = qemu_plugin_insn_disas(insn); + int j; + for (j = 0; j < matches->len; j++) { + Match *m = &g_array_index(matches, Match, j); + if (g_str_has_prefix(insn_disas, m->match_string)) { + Instruction *rec = g_new0(Instruction, 1); + rec->disas = g_strdup(insn_disas); + rec->vaddr = qemu_plugin_insn_vaddr(insn); + rec->match = m; + qemu_plugin_register_vcpu_insn_exec_cb( + insn, vcpu_insn_matched_exec_before, + QEMU_PLUGIN_CB_NO_REGS, rec); + } + } + g_free(insn_disas); + } } } static void plugin_exit(qemu_plugin_id_t id, void *p) { - g_autofree gchar *out = g_strdup_printf("insns: %" PRIu64 "\n", insn_count); - qemu_plugin_outs(out); + g_autoptr(GString) out = g_string_new(NULL); + int i; + + if (do_size) { + for (i = 0; i <= sizes->len; i++) { + unsigned long *cnt = &g_array_index(sizes, unsigned long, i); + if (*cnt) { + g_string_append_printf(out, + "len %d bytes: %ld insns\n", i, *cnt); + } + } + } else if (do_inline) { + g_string_append_printf(out, "insns: %" PRIu64 "\n", inline_insn_count); + } else { + uint64_t total_insns = 0; + for (i = 0; i < MAX_CPUS; i++) { + InstructionCount *c = &counts[i]; + if (c->insn_count) { + g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n", + i, c->insn_count); + total_insns += c->insn_count; + } + } + g_string_append_printf(out, "total insns: %" PRIu64 "\n", + total_insns); + } + qemu_plugin_outs(out->str); +} + + +/* Add a match to the array of matches */ +static void parse_match(char *match) +{ + Match new_match = { .match_string = match }; + int i; + for (i = 0; i < MAX_CPUS; i++) { + new_match.history[i] = g_ptr_array_new(); + } + if (!matches) { + matches = g_array_new(false, true, sizeof(Match)); + } + g_array_append_val(matches, new_match); } QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { - if (argc && !strcmp(argv[0], "inline")) { - do_inline = true; + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "inline") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "sizes") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_size)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "match") == 0) { + parse_match(tokens[1]); + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + if (do_size) { + sizes = g_array_new(true, true, sizeof(unsigned long)); } qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c index afd1d27e5c..4570f7d815 100644 --- a/tests/plugin/mem.c +++ b/tests/plugin/mem.c @@ -80,29 +80,40 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { - if (argc) { - if (argc >= 3) { - if (!strcmp(argv[2], "haddr")) { - do_haddr = true; - } - } - if (argc >= 2) { - const char *str = argv[1]; - if (!strcmp(str, "r")) { - rw = QEMU_PLUGIN_MEM_R; - } else if (!strcmp(str, "w")) { - rw = QEMU_PLUGIN_MEM_W; + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + + if (g_strcmp0(tokens[0], "haddr") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_haddr)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "track") == 0) { + if (g_strcmp0(tokens[1], "r") == 0) { + rw = QEMU_PLUGIN_MEM_R; + } else if (g_strcmp0(tokens[1], "w") == 0) { + rw = QEMU_PLUGIN_MEM_W; + } else if (g_strcmp0(tokens[1], "rw") == 0) { + rw = QEMU_PLUGIN_MEM_RW; + } else { + fprintf(stderr, "invaild value for argument track: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "inline") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "callback") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_callback)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; } - } - if (!strcmp(argv[0], "inline")) { - do_inline = true; - do_callback = false; - } else if (!strcmp(argv[0], "both")) { - do_inline = true; - do_callback = true; } else { - do_callback = true; + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; } } diff --git a/tests/plugin/syscall.c b/tests/plugin/syscall.c index 6dd71092e1..96040c578f 100644 --- a/tests/plugin/syscall.c +++ b/tests/plugin/syscall.c @@ -70,19 +70,17 @@ static void vcpu_syscall_ret(qemu_plugin_id_t id, unsigned int vcpu_idx, } g_mutex_unlock(&lock); } else { - g_autofree gchar *out; - out = g_strdup_printf("syscall #%" PRIi64 " returned -> %" PRIi64 "\n", - num, ret); + g_autofree gchar *out = g_strdup_printf( + "syscall #%" PRIi64 " returned -> %" PRIi64 "\n", num, ret); qemu_plugin_outs(out); } } static void print_entry(gpointer val, gpointer user_data) { - g_autofree gchar *out; SyscallStats *entry = (SyscallStats *) val; int64_t syscall_num = entry->num; - out = g_strdup_printf( + g_autofree gchar *out = g_strdup_printf( "%-13" PRIi64 "%-6" PRIi64 " %" PRIi64 "\n", syscall_num, entry->calls, entry->errors); qemu_plugin_outs(out); @@ -119,17 +117,26 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { - if (argc == 0) { - statistics = g_hash_table_new_full(NULL, g_direct_equal, NULL, g_free); - } else { - for (int i = 0; i < argc; i++) { - if (g_strcmp0(argv[i], "print") != 0) { - fprintf(stderr, "unsupported argument: %s\n", argv[i]); - return -1; + bool do_print = false; + + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_autofree char **tokens = g_strsplit(opt, "=", 2); + + if (g_strcmp0(tokens[0], "print") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_print)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); } + } else { + fprintf(stderr, "unsupported argument: %s\n", argv[i]); + return -1; } } + if (!do_print) { + statistics = g_hash_table_new_full(NULL, g_direct_equal, NULL, g_free); + } + qemu_plugin_register_vcpu_syscall_cb(id, vcpu_syscall); qemu_plugin_register_vcpu_syscall_ret_cb(id, vcpu_syscall_ret); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); diff --git a/tests/qapi-schema/alternate-array.err b/tests/qapi-schema/alternate-array.err index b1aa1f4e8d..e69de29bb2 100644 --- a/tests/qapi-schema/alternate-array.err +++ b/tests/qapi-schema/alternate-array.err @@ -1,2 +0,0 @@ -alternate-array.json: In alternate 'Alt': -alternate-array.json:5: 'data' member 'two' cannot be an array diff --git a/tests/qapi-schema/alternate-array.json b/tests/qapi-schema/alternate-array.json index f241aac122..b878a2db77 100644 --- a/tests/qapi-schema/alternate-array.json +++ b/tests/qapi-schema/alternate-array.json @@ -1,5 +1,3 @@ -# we do not allow array branches in alternates -# TODO: should we support this? { 'struct': 'One', 'data': { 'name': 'str' } } { 'alternate': 'Alt', diff --git a/tests/qapi-schema/alternate-array.out b/tests/qapi-schema/alternate-array.out index e69de29bb2..a657d85738 100644 --- a/tests/qapi-schema/alternate-array.out +++ b/tests/qapi-schema/alternate-array.out @@ -0,0 +1,18 @@ +module ./builtin +object q_empty +enum QType + prefix QTYPE + member none + member qnull + member qnum + member qstring + member qdict + member qlist + member qbool +module alternate-array.json +object One + member name: str optional=False +alternate Alt + tag type + case one: One + case two: intList diff --git a/tests/qapi-schema/alternate-branch-if-invalid.err b/tests/qapi-schema/alternate-branch-if-invalid.err index d384929c51..03bad877a3 100644 --- a/tests/qapi-schema/alternate-branch-if-invalid.err +++ b/tests/qapi-schema/alternate-branch-if-invalid.err @@ -1,2 +1,2 @@ alternate-branch-if-invalid.json: In alternate 'Alt': -alternate-branch-if-invalid.json:2: 'if' condition ' ' of 'data' member 'branch' makes no sense +alternate-branch-if-invalid.json:2: 'if' condition ' ' of 'data' member 'branch' is not a valid identifier diff --git a/tests/qapi-schema/alternate-conflict-lists.err b/tests/qapi-schema/alternate-conflict-lists.err new file mode 100644 index 0000000000..f3374ec1e7 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-lists.err @@ -0,0 +1,2 @@ +alternate-conflict-lists.json: In alternate 'Alt': +alternate-conflict-lists.json:4: branch 'two' can't be distinguished from 'one' diff --git a/tests/qapi-schema/alternate-conflict-lists.json b/tests/qapi-schema/alternate-conflict-lists.json new file mode 100644 index 0000000000..a3efd6c501 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-lists.json @@ -0,0 +1,6 @@ +# Two lists conflict even if their inner types would be compatible +{ 'struct': 'One', + 'data': { 'name': 'str' } } +{ 'alternate': 'Alt', + 'data': { 'one': [ 'int' ], + 'two': [ 'str' ] } } diff --git a/tests/qapi-schema/flat-union-array-branch.out b/tests/qapi-schema/alternate-conflict-lists.out similarity index 100% rename from tests/qapi-schema/flat-union-array-branch.out rename to tests/qapi-schema/alternate-conflict-lists.out diff --git a/tests/qapi-schema/args-union.err b/tests/qapi-schema/args-union.err index 4bf4955027..4b80a99f74 100644 --- a/tests/qapi-schema/args-union.err +++ b/tests/qapi-schema/args-union.err @@ -1,2 +1,2 @@ args-union.json: In command 'oops': -args-union.json:3: command's 'data' can take union type 'Uni' only with 'boxed': true +args-union.json:9: command's 'data' can take union type 'Uni' only with 'boxed': true diff --git a/tests/qapi-schema/args-union.json b/tests/qapi-schema/args-union.json index 2fcaeaae16..aabb159063 100644 --- a/tests/qapi-schema/args-union.json +++ b/tests/qapi-schema/args-union.json @@ -1,3 +1,9 @@ # use of union arguments requires 'boxed':true -{ 'union': 'Uni', 'data': { 'case1': 'int', 'case2': 'str' } } +{ 'enum': 'Enum', 'data': [ 'case1', 'case2' ] } +{ 'struct': 'Case1', 'data': { 'data': 'int' } } +{ 'struct': 'Case2', 'data': { 'data': 'str' } } +{ 'union': 'Uni', + 'base': { 'type': 'Enum' }, + 'discriminator': 'type', + 'data': { 'case1': 'Case1', 'case2': 'Case2' } } { 'command': 'oops', 'data': 'Uni' } diff --git a/tests/qapi-schema/bad-base.err b/tests/qapi-schema/bad-base.err index 61a1efc2c0..1fad63e392 100644 --- a/tests/qapi-schema/bad-base.err +++ b/tests/qapi-schema/bad-base.err @@ -1,2 +1,2 @@ bad-base.json: In struct 'MyType': -bad-base.json:3: 'base' requires a struct type, union type 'Union' isn't +bad-base.json:9: 'base' requires a struct type, union type 'Union' isn't diff --git a/tests/qapi-schema/bad-base.json b/tests/qapi-schema/bad-base.json index a634331cdd..8c773ff544 100644 --- a/tests/qapi-schema/bad-base.json +++ b/tests/qapi-schema/bad-base.json @@ -1,3 +1,9 @@ # we reject a base that is not a struct -{ 'union': 'Union', 'data': { 'a': 'int', 'b': 'str' } } +{ 'enum': 'Enum', 'data': [ 'a', 'b' ] } +{ 'struct': 'Int', 'data': { 'data': 'int' } } +{ 'struct': 'Str', 'data': { 'data': 'str' } } +{ 'union': 'Union', + 'base': { 'type': 'Enum' }, + 'discriminator': 'type', + 'data': { 'a': 'Int', 'b': 'Str' } } { 'struct': 'MyType', 'base': 'Union', 'data': { 'c': 'int' } } diff --git a/tests/qapi-schema/bad-if-all.err b/tests/qapi-schema/bad-if-all.err new file mode 100644 index 0000000000..a04f6e7043 --- /dev/null +++ b/tests/qapi-schema/bad-if-all.err @@ -0,0 +1,2 @@ +bad-if-all.json: In struct 'TestIfStruct': +bad-if-all.json:2: 'all' condition of struct must be an array diff --git a/tests/qapi-schema/bad-if-all.json b/tests/qapi-schema/bad-if-all.json new file mode 100644 index 0000000000..44837d3981 --- /dev/null +++ b/tests/qapi-schema/bad-if-all.json @@ -0,0 +1,3 @@ +# check 'if all' is not a list +{ 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, + 'if': { 'all': 'ALL' } } diff --git a/tests/qapi-schema/flat-union-bad-base.out b/tests/qapi-schema/bad-if-all.out similarity index 100% rename from tests/qapi-schema/flat-union-bad-base.out rename to tests/qapi-schema/bad-if-all.out diff --git a/tests/qapi-schema/bad-if-empty-list.json b/tests/qapi-schema/bad-if-empty-list.json index 94f2eb8670..b62b5671df 100644 --- a/tests/qapi-schema/bad-if-empty-list.json +++ b/tests/qapi-schema/bad-if-empty-list.json @@ -1,3 +1,3 @@ # check empty 'if' list { 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, - 'if': [] } + 'if': { 'all': [] } } diff --git a/tests/qapi-schema/bad-if-empty.err b/tests/qapi-schema/bad-if-empty.err index a0f3effefb..5208f543ce 100644 --- a/tests/qapi-schema/bad-if-empty.err +++ b/tests/qapi-schema/bad-if-empty.err @@ -1,2 +1,2 @@ bad-if-empty.json: In struct 'TestIfStruct': -bad-if-empty.json:2: 'if' condition '' of struct makes no sense +bad-if-empty.json:2: 'if' condition '' of struct is not a valid identifier diff --git a/tests/qapi-schema/bad-if-key.err b/tests/qapi-schema/bad-if-key.err new file mode 100644 index 0000000000..38cf44b687 --- /dev/null +++ b/tests/qapi-schema/bad-if-key.err @@ -0,0 +1,3 @@ +bad-if-key.json: In struct 'TestIfStruct': +bad-if-key.json:2: 'if' condition of struct has unknown key 'value' +Valid keys are 'all', 'any', 'not'. diff --git a/tests/qapi-schema/bad-if-key.json b/tests/qapi-schema/bad-if-key.json new file mode 100644 index 0000000000..64c74c13f2 --- /dev/null +++ b/tests/qapi-schema/bad-if-key.json @@ -0,0 +1,3 @@ +# check unknown 'if' dict key +{ 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, + 'if': { 'value': 'defined(TEST_IF_STRUCT)' } } diff --git a/tests/qapi-schema/flat-union-bad-discriminator.out b/tests/qapi-schema/bad-if-key.out similarity index 100% rename from tests/qapi-schema/flat-union-bad-discriminator.out rename to tests/qapi-schema/bad-if-key.out diff --git a/tests/qapi-schema/bad-if-keys.err b/tests/qapi-schema/bad-if-keys.err new file mode 100644 index 0000000000..fe87bd30ac --- /dev/null +++ b/tests/qapi-schema/bad-if-keys.err @@ -0,0 +1,2 @@ +bad-if-keys.json: In struct 'TestIfStruct': +bad-if-keys.json:2: 'if' condition of struct has conflicting keys diff --git a/tests/qapi-schema/bad-if-keys.json b/tests/qapi-schema/bad-if-keys.json new file mode 100644 index 0000000000..9e2f39ae21 --- /dev/null +++ b/tests/qapi-schema/bad-if-keys.json @@ -0,0 +1,3 @@ +# check multiple 'if' keys +{ 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, + 'if': { 'any': ['ANY'], 'all': ['ALL'] } } diff --git a/tests/qapi-schema/flat-union-base-any.out b/tests/qapi-schema/bad-if-keys.out similarity index 100% rename from tests/qapi-schema/flat-union-base-any.out rename to tests/qapi-schema/bad-if-keys.out diff --git a/tests/qapi-schema/bad-if-list.err b/tests/qapi-schema/bad-if-list.err index c462f11b90..334e8b845a 100644 --- a/tests/qapi-schema/bad-if-list.err +++ b/tests/qapi-schema/bad-if-list.err @@ -1,2 +1,2 @@ bad-if-list.json: In struct 'TestIfStruct': -bad-if-list.json:2: 'if' condition ' ' of struct makes no sense +bad-if-list.json:2: 'if' condition 'foo' of struct is not a valid identifier diff --git a/tests/qapi-schema/bad-if-list.json b/tests/qapi-schema/bad-if-list.json index ea3d95bb6b..1fefef16a7 100644 --- a/tests/qapi-schema/bad-if-list.json +++ b/tests/qapi-schema/bad-if-list.json @@ -1,3 +1,3 @@ # check invalid 'if' content { 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, - 'if': ['foo', ' '] } + 'if': { 'all': ['foo', ' '] } } diff --git a/tests/qapi-schema/bad-if-not.err b/tests/qapi-schema/bad-if-not.err new file mode 100644 index 0000000000..b33f5e16b8 --- /dev/null +++ b/tests/qapi-schema/bad-if-not.err @@ -0,0 +1,2 @@ +bad-if-not.json: In struct 'TestIfStruct': +bad-if-not.json:2: 'if' condition '' of struct is not a valid identifier diff --git a/tests/qapi-schema/bad-if-not.json b/tests/qapi-schema/bad-if-not.json new file mode 100644 index 0000000000..9fdaacc47b --- /dev/null +++ b/tests/qapi-schema/bad-if-not.json @@ -0,0 +1,3 @@ +# check 'if not' with empy argument +{ 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, + 'if': { 'not': '' } } diff --git a/tests/qapi-schema/flat-union-base-union.out b/tests/qapi-schema/bad-if-not.out similarity index 100% rename from tests/qapi-schema/flat-union-base-union.out rename to tests/qapi-schema/bad-if-not.out diff --git a/tests/qapi-schema/bad-if.err b/tests/qapi-schema/bad-if.err index f83dee65da..ec373b213f 100644 --- a/tests/qapi-schema/bad-if.err +++ b/tests/qapi-schema/bad-if.err @@ -1,2 +1,2 @@ bad-if.json: In struct 'TestIfStruct': -bad-if.json:2: 'if' condition of struct must be a string or a list of strings +bad-if.json:2: 'if' condition of struct must be a string or an object diff --git a/tests/qapi-schema/bad-if.json b/tests/qapi-schema/bad-if.json index 3edd1a0bf2..2639e3c661 100644 --- a/tests/qapi-schema/bad-if.json +++ b/tests/qapi-schema/bad-if.json @@ -1,3 +1,3 @@ # check invalid 'if' type { 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, - 'if': { 'value': 'defined(TEST_IF_STRUCT)' } } + 'if': ['TEST_IF_STRUCT'] } diff --git a/tests/qapi-schema/doc-bad-feature.err b/tests/qapi-schema/doc-bad-feature.err index e4c62adfa3..49d1746c3d 100644 --- a/tests/qapi-schema/doc-bad-feature.err +++ b/tests/qapi-schema/doc-bad-feature.err @@ -1 +1 @@ -doc-bad-feature.json:3: documented member 'a' does not exist +doc-bad-feature.json:3: documented feature 'a' does not exist diff --git a/tests/qapi-schema/doc-empty-symbol.err b/tests/qapi-schema/doc-empty-symbol.err index 81b90e882a..aa51be41b2 100644 --- a/tests/qapi-schema/doc-empty-symbol.err +++ b/tests/qapi-schema/doc-empty-symbol.err @@ -1 +1 @@ -doc-empty-symbol.json:4:1: invalid name +doc-empty-symbol.json:4:1: name required after '@' diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json index 423ea23e07..74745fb405 100644 --- a/tests/qapi-schema/doc-good.json +++ b/tests/qapi-schema/doc-good.json @@ -53,27 +53,34 @@ ## # @Enum: +# # @one: The _one_ {and only} # # Features: # @enum-feat: Also _one_ {and only} +# @enum-member-feat: a member feature # # @two is undocumented ## -{ 'enum': 'Enum', 'data': - [ { 'name': 'one', 'if': 'defined(IFONE)' }, 'two' ], +{ 'enum': 'Enum', + 'data': [ { 'name': 'one', 'if': 'IFONE', + 'features': [ 'enum-member-feat' ] }, + 'two' ], 'features': [ 'enum-feat' ], - 'if': 'defined(IFCOND)' } + 'if': 'IFCOND' } ## # @Base: +# # @base1: # the first member ## -{ 'struct': 'Base', 'data': { 'base1': 'Enum' } } +{ 'struct': 'Base', 'data': { 'base1': 'Enum' }, + 'if': { 'all': ['IFALL1', 'IFALL2'] } } ## # @Variant1: +# # A paragraph # # Another paragraph (but no @var: line) @@ -86,15 +93,17 @@ 'features': [ 'variant1-feat' ], 'data': { 'var1': { 'type': 'str', 'features': [ 'member-feat' ], - 'if': 'defined(IFSTR)' } } } + 'if': 'IFSTR' } } } ## # @Variant2: +# ## { 'struct': 'Variant2', 'data': {} } ## # @Object: +# # Features: # @union-feat1: a feature ## @@ -102,19 +111,13 @@ 'features': [ 'union-feat1' ], 'base': 'Base', 'discriminator': 'base1', - 'data': { 'one': 'Variant1', 'two': { 'type': 'Variant2', 'if': 'IFTWO' } } } - -## -# @SugaredUnion: -# Features: -# @union-feat2: a feature -## -{ 'union': 'SugaredUnion', - 'features': [ 'union-feat2' ], - 'data': { 'one': 'Variant1', 'two': { 'type': 'Variant2', 'if': 'IFTWO' } } } + 'data': { 'one': 'Variant1', + 'two': { 'type': 'Variant2', + 'if': { 'any': ['IFONE', 'IFTWO'] } } } } ## # @Alternate: +# # @i: an integer # @b is undocumented # @@ -123,7 +126,8 @@ ## { 'alternate': 'Alternate', 'features': [ 'alt-feat' ], - 'data': { 'i': 'int', 'b': 'bool' } } + 'data': { 'i': 'int', 'b': 'bool' }, + 'if': { 'not': { 'any': [ 'IFONE', 'IFTWO' ] } } } ## # == Another subsection @@ -131,6 +135,7 @@ ## # @cmd: +# # @arg1: the first argument # # @arg2: the second @@ -180,6 +185,7 @@ ## # @EVT_BOXED: +# # Features: # @feat3: a feature ## diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out index 8f54ceff2e..9dd65b9d92 100644 --- a/tests/qapi-schema/doc-good.out +++ b/tests/qapi-schema/doc-good.out @@ -12,15 +12,17 @@ enum QType module doc-good.json enum Enum member one - if ['defined(IFONE)'] + if IFONE + feature enum-member-feat member two - if ['defined(IFCOND)'] + if IFCOND feature enum-feat object Base member base1: Enum optional=False + if {'all': ['IFALL1', 'IFALL2']} object Variant1 member var1: str optional=False - if ['defined(IFSTR)'] + if IFSTR feature member-feat feature variant1-feat object Variant2 @@ -29,27 +31,13 @@ object Object tag base1 case one: Variant1 case two: Variant2 - if ['IFTWO'] + if {'any': ['IFONE', 'IFTWO']} feature union-feat1 -object q_obj_Variant1-wrapper - member data: Variant1 optional=False -object q_obj_Variant2-wrapper - member data: Variant2 optional=False -enum SugaredUnionKind - member one - member two - if ['IFTWO'] -object SugaredUnion - member type: SugaredUnionKind optional=False - tag type - case one: q_obj_Variant1-wrapper - case two: q_obj_Variant2-wrapper - if ['IFTWO'] - feature union-feat2 alternate Alternate tag type case i: int case b: bool + if {'not': {'any': ['IFONE', 'IFTWO']}} feature alt-feat object q_obj_cmd-arg member arg1: int optional=False @@ -121,6 +109,8 @@ The _one_ {and only} feature=enum-feat Also _one_ {and only} + feature=enum-member-feat +a member feature section=None @two is undocumented doc symbol=Base @@ -147,13 +137,6 @@ doc symbol=Object feature=union-feat1 a feature -doc symbol=SugaredUnion - body= - - arg=type - - feature=union-feat2 -a feature doc symbol=Alternate body= diff --git a/tests/qapi-schema/doc-good.txt b/tests/qapi-schema/doc-good.txt index 726727af74..b3b76bd43f 100644 --- a/tests/qapi-schema/doc-good.txt +++ b/tests/qapi-schema/doc-good.txt @@ -43,7 +43,7 @@ Example: Values ~~~~~~ -"one" (**If: **"defined(IFONE)") +"one" (**If: **"IFONE") The _one_ {and only} "two" @@ -56,13 +56,16 @@ Features "enum-feat" Also _one_ {and only} +"enum-member-feat" + a member feature + "two" is undocumented If ~~ -"defined(IFCOND)" +"IFCOND" "Base" (Object) @@ -76,6 +79,12 @@ Members the first member +If +~~ + +"IFALL1 and IFALL2" + + "Variant1" (Object) ------------------- @@ -87,7 +96,7 @@ Another paragraph (but no "var": line) Members ~~~~~~~ -"var1": "string" (**If: **"defined(IFSTR)") +"var1": "string" (**If: **"IFSTR") Not documented @@ -114,7 +123,8 @@ Members The members of "Base" The members of "Variant1" when "base1" is ""one"" -The members of "Variant2" when "base1" is ""two"" (**If: **"IFTWO") +The members of "Variant2" when "base1" is ""two"" (**If: **"IFONE or +IFTWO") Features ~~~~~~~~ @@ -123,26 +133,6 @@ Features a feature -"SugaredUnion" (Object) ------------------------ - - -Members -~~~~~~~ - -"type" - One of "one", "two" - -"data": "Variant1" when "type" is ""one"" -"data": "Variant2" when "type" is ""two"" (**If: **"IFTWO") - -Features -~~~~~~~~ - -"union-feat2" - a feature - - "Alternate" (Alternate) ----------------------- @@ -164,6 +154,12 @@ Features a feature +If +~~ + +"not (IFONE or IFTWO)" + + Another subsection ================== diff --git a/tests/qapi-schema/double-type.err b/tests/qapi-schema/double-type.err index 576e716197..6a1e8a5990 100644 --- a/tests/qapi-schema/double-type.err +++ b/tests/qapi-schema/double-type.err @@ -1,3 +1 @@ -double-type.json: In struct 'Bar': -double-type.json:2: struct has unknown key 'command' -Valid keys are 'base', 'data', 'features', 'if', 'struct'. +double-type.json:2: expression must have exactly one key 'enum', 'struct', 'union', 'alternate', 'command', 'event' diff --git a/tests/qapi-schema/enum-dict-member-unknown.err b/tests/qapi-schema/enum-dict-member-unknown.err index f8617ea179..235cde0c49 100644 --- a/tests/qapi-schema/enum-dict-member-unknown.err +++ b/tests/qapi-schema/enum-dict-member-unknown.err @@ -1,3 +1,3 @@ enum-dict-member-unknown.json: In enum 'MyEnum': enum-dict-member-unknown.json:2: 'data' member has unknown key 'bad-key' -Valid keys are 'if', 'name'. +Valid keys are 'features', 'if', 'name'. diff --git a/tests/qapi-schema/enum-if-invalid.err b/tests/qapi-schema/enum-if-invalid.err index 0556dc967b..2b2bbffb65 100644 --- a/tests/qapi-schema/enum-if-invalid.err +++ b/tests/qapi-schema/enum-if-invalid.err @@ -1,2 +1,3 @@ enum-if-invalid.json: In enum 'TestIfEnum': -enum-if-invalid.json:2: 'if' condition of 'data' member 'bar' must be a string or a list of strings +enum-if-invalid.json:2: 'if' condition of 'data' member 'bar' has unknown key 'val' +Valid keys are 'all', 'any', 'not'. diff --git a/tests/qapi-schema/enum-if-invalid.json b/tests/qapi-schema/enum-if-invalid.json index 60bd0ef1d7..6bd20041f3 100644 --- a/tests/qapi-schema/enum-if-invalid.json +++ b/tests/qapi-schema/enum-if-invalid.json @@ -1,3 +1,3 @@ # check invalid 'if' type -{ 'enum': 'TestIfEnum', 'data': - [ 'foo', { 'name' : 'bar', 'if': { 'val': 'foo' } } ] } +{ 'enum': 'TestIfEnum', + 'data': [ 'foo', { 'name' : 'bar', 'if': { 'val': 'foo' } } ] } diff --git a/tests/qapi-schema/features-if-invalid.err b/tests/qapi-schema/features-if-invalid.err index f63b89535e..0ce7b6fcdf 100644 --- a/tests/qapi-schema/features-if-invalid.err +++ b/tests/qapi-schema/features-if-invalid.err @@ -1,2 +1,2 @@ features-if-invalid.json: In struct 'Stru': -features-if-invalid.json:2: 'if' condition of 'features' member 'f' must be a string or a list of strings +features-if-invalid.json:2: 'if' condition of 'features' member 'f' must be a string or an object diff --git a/tests/qapi-schema/features-missing-name.json b/tests/qapi-schema/features-missing-name.json index 2314f97c00..8772c8f7b3 100644 --- a/tests/qapi-schema/features-missing-name.json +++ b/tests/qapi-schema/features-missing-name.json @@ -1,3 +1,3 @@ { 'struct': 'FeatureStruct0', 'data': { 'foo': 'int' }, - 'features': [ { 'if': 'defined(NAMELESS_FEATURES)' } ] } + 'features': [ { 'if': 'NAMELESS_FEATURES' } ] } diff --git a/tests/qapi-schema/flat-union-array-branch.err b/tests/qapi-schema/flat-union-array-branch.err deleted file mode 100644 index 20a8ef1406..0000000000 --- a/tests/qapi-schema/flat-union-array-branch.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-array-branch.json: In union 'TestUnion': -flat-union-array-branch.json:8: 'data' member 'value1' cannot be an array diff --git a/tests/qapi-schema/flat-union-bad-base.err b/tests/qapi-schema/flat-union-bad-base.err deleted file mode 100644 index e0a205a58c..0000000000 --- a/tests/qapi-schema/flat-union-bad-base.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-bad-base.json: In union 'TestUnion': -flat-union-bad-base.json:8: member 'string' of type 'TestTypeA' collides with base member 'string' diff --git a/tests/qapi-schema/flat-union-bad-discriminator.err b/tests/qapi-schema/flat-union-bad-discriminator.err deleted file mode 100644 index b705439bd9..0000000000 --- a/tests/qapi-schema/flat-union-bad-discriminator.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-bad-discriminator.json: In union 'TestUnion': -flat-union-bad-discriminator.json:11: 'discriminator' requires a string name diff --git a/tests/qapi-schema/flat-union-base-any.err b/tests/qapi-schema/flat-union-base-any.err deleted file mode 100644 index c2d4de6a5d..0000000000 --- a/tests/qapi-schema/flat-union-base-any.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-base-any.json: In union 'TestUnion': -flat-union-base-any.json:8: 'base' requires a struct type, built-in type 'any' isn't diff --git a/tests/qapi-schema/flat-union-base-union.err b/tests/qapi-schema/flat-union-base-union.err deleted file mode 100644 index 3b0087220e..0000000000 --- a/tests/qapi-schema/flat-union-base-union.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-base-union.json: In union 'TestUnion': -flat-union-base-union.json:14: 'base' requires a struct type, union type 'UnionBase' isn't diff --git a/tests/qapi-schema/flat-union-clash-member.err b/tests/qapi-schema/flat-union-clash-member.err deleted file mode 100644 index 07551e6ef5..0000000000 --- a/tests/qapi-schema/flat-union-clash-member.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-clash-member.json: In union 'TestUnion': -flat-union-clash-member.json:11: member 'name' of type 'Branch1' collides with member 'name' of type 'Base' diff --git a/tests/qapi-schema/flat-union-discriminator-bad-name.err b/tests/qapi-schema/flat-union-discriminator-bad-name.err deleted file mode 100644 index 28be49c31a..0000000000 --- a/tests/qapi-schema/flat-union-discriminator-bad-name.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-discriminator-bad-name.json: In union 'MyUnion': -flat-union-discriminator-bad-name.json:6: discriminator '*switch' is not a member of 'base' diff --git a/tests/qapi-schema/flat-union-empty.err b/tests/qapi-schema/flat-union-empty.err deleted file mode 100644 index 89b0f25cb0..0000000000 --- a/tests/qapi-schema/flat-union-empty.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-empty.json: In union 'Union': -flat-union-empty.json:4: union has no branches diff --git a/tests/qapi-schema/flat-union-empty.json b/tests/qapi-schema/flat-union-empty.json deleted file mode 100644 index 83e1cc7b96..0000000000 --- a/tests/qapi-schema/flat-union-empty.json +++ /dev/null @@ -1,4 +0,0 @@ -# flat union discriminator cannot be empty -{ 'enum': 'Empty', 'data': [ ] } -{ 'struct': 'Base', 'data': { 'type': 'Empty' } } -{ 'union': 'Union', 'base': 'Base', 'discriminator': 'type', 'data': { } } diff --git a/tests/qapi-schema/flat-union-inline-invalid-dict.err b/tests/qapi-schema/flat-union-inline-invalid-dict.err deleted file mode 100644 index 53e5416707..0000000000 --- a/tests/qapi-schema/flat-union-inline-invalid-dict.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-inline-invalid-dict.json: In union 'TestUnion': -flat-union-inline-invalid-dict.json:7: 'data' member 'value1' misses key 'type' diff --git a/tests/qapi-schema/flat-union-int-branch.err b/tests/qapi-schema/flat-union-int-branch.err deleted file mode 100644 index ae7f800603..0000000000 --- a/tests/qapi-schema/flat-union-int-branch.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-int-branch.json: In union 'TestUnion': -flat-union-int-branch.json:8: branch 'value1' cannot use built-in type 'int' diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.err b/tests/qapi-schema/flat-union-invalid-branch-key.err deleted file mode 100644 index 5576a25f9b..0000000000 --- a/tests/qapi-schema/flat-union-invalid-branch-key.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-invalid-branch-key.json: In union 'TestUnion': -flat-union-invalid-branch-key.json:13: branch 'value_wrong' is not a value of enum type 'TestEnum' diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.err b/tests/qapi-schema/flat-union-invalid-discriminator.err deleted file mode 100644 index 99bca2ddab..0000000000 --- a/tests/qapi-schema/flat-union-invalid-discriminator.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-invalid-discriminator.json: In union 'TestUnion': -flat-union-invalid-discriminator.json:10: discriminator 'enum_wrong' is not a member of 'base' diff --git a/tests/qapi-schema/flat-union-invalid-if-discriminator.err b/tests/qapi-schema/flat-union-invalid-if-discriminator.err deleted file mode 100644 index 350f28da9d..0000000000 --- a/tests/qapi-schema/flat-union-invalid-if-discriminator.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-invalid-if-discriminator.json: In union 'TestUnion': -flat-union-invalid-if-discriminator.json:10: discriminator member 'enum1' of 'base' must not be conditional diff --git a/tests/qapi-schema/flat-union-no-base.err b/tests/qapi-schema/flat-union-no-base.err deleted file mode 100644 index 5167565b00..0000000000 --- a/tests/qapi-schema/flat-union-no-base.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-no-base.json: In union 'TestUnion': -flat-union-no-base.json:8: 'discriminator' requires 'base' diff --git a/tests/qapi-schema/flat-union-optional-discriminator.err b/tests/qapi-schema/flat-union-optional-discriminator.err deleted file mode 100644 index 3d60a1b496..0000000000 --- a/tests/qapi-schema/flat-union-optional-discriminator.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-optional-discriminator.json: In union 'MyUnion': -flat-union-optional-discriminator.json:6: discriminator member 'switch' of base type 'Base' must not be optional diff --git a/tests/qapi-schema/flat-union-string-discriminator.err b/tests/qapi-schema/flat-union-string-discriminator.err deleted file mode 100644 index ff42c9728b..0000000000 --- a/tests/qapi-schema/flat-union-string-discriminator.err +++ /dev/null @@ -1,2 +0,0 @@ -flat-union-string-discriminator.json: In union 'TestUnion': -flat-union-string-discriminator.json:13: discriminator member 'kind' of base type 'TestBase' must be of enum type diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build index b8de58116a..406bc7255d 100644 --- a/tests/qapi-schema/meson.build +++ b/tests/qapi-schema/meson.build @@ -1,5 +1,5 @@ test_env = environment() -test_env.set('PYTHONPATH', meson.source_root() / 'scripts') +test_env.set('PYTHONPATH', meson.project_source_root() / 'scripts') test_env.set('PYTHONIOENCODING', 'utf-8') schemas = [ @@ -11,6 +11,7 @@ schemas = [ 'alternate-conflict-dict.json', 'alternate-conflict-enum-bool.json', 'alternate-conflict-enum-int.json', + 'alternate-conflict-lists.json', 'alternate-conflict-string.json', 'alternate-conflict-bool-string.json', 'alternate-conflict-num-string.json', @@ -37,9 +38,13 @@ schemas = [ 'bad-data.json', 'bad-ident.json', 'bad-if.json', + 'bad-if-all.json', 'bad-if-empty.json', 'bad-if-empty-list.json', + 'bad-if-key.json', + 'bad-if-keys.json', 'bad-if-list.json', + 'bad-if-not.json', 'bad-type-bool.json', 'bad-type-dict.json', 'bad-type-int.json', @@ -103,22 +108,6 @@ schemas = [ 'features-name-bad-type.json', 'features-no-list.json', 'features-unknown-key.json', - 'flat-union-array-branch.json', - 'flat-union-bad-base.json', - 'flat-union-bad-discriminator.json', - 'flat-union-base-any.json', - 'flat-union-base-union.json', - 'flat-union-clash-member.json', - 'flat-union-discriminator-bad-name.json', - 'flat-union-empty.json', - 'flat-union-inline-invalid-dict.json', - 'flat-union-int-branch.json', - 'flat-union-invalid-branch-key.json', - 'flat-union-invalid-discriminator.json', - 'flat-union-invalid-if-discriminator.json', - 'flat-union-no-base.json', - 'flat-union-optional-discriminator.json', - 'flat-union-string-discriminator.json', 'funny-char.json', 'funny-word.json', 'ident-with-escape.json', @@ -164,7 +153,6 @@ schemas = [ 'reserved-member-q.json', 'reserved-member-u.json', 'reserved-member-underscore.json', - 'reserved-type-kind.json', 'reserved-type-list.json', 'returns-alternate.json', 'returns-array-bad.json', @@ -187,16 +175,28 @@ schemas = [ 'unclosed-list.json', 'unclosed-object.json', 'unclosed-string.json', + 'union-array-branch.json', + 'union-bad-base.json', + 'union-bad-discriminator.json', + 'union-base-any.json', 'union-base-empty.json', 'union-base-no-discriminator.json', - 'union-branch-case.json', + 'union-base-union.json', 'union-branch-if-invalid.json', 'union-branch-invalid-dict.json', - 'union-clash-branches.json', + 'union-clash-member.json', + 'union-discriminator-bad-name.json', 'union-empty.json', + 'union-inline-invalid-dict.json', + 'union-int-branch.json', 'union-invalid-base.json', + 'union-invalid-branch-key.json', 'union-invalid-data.json', - 'union-optional-branch.json', + 'union-invalid-discriminator.json', + 'union-invalid-if-discriminator.json', + 'union-no-base.json', + 'union-optional-discriminator.json', + 'union-string-discriminator.json', 'union-unknown.json', 'unknown-escape.json', 'unknown-expr-key.json', @@ -215,18 +215,18 @@ test('QAPI schema regression tests', python, diff = find_program('diff') -qapi_doc = custom_target('QAPI doc', - output: ['doc-good-qapi-commands.c', 'doc-good-qapi-commands.h', - 'doc-good-qapi-emit-events.c', 'doc-good-qapi-emit-events.h', - 'doc-good-qapi-events.c', 'doc-good-qapi-events.h', - 'doc-good-qapi-init-commands.c', 'doc-good-qapi-init-commands.h', - 'doc-good-qapi-introspect.c', 'doc-good-qapi-introspect.h', - 'doc-good-qapi-types.c', 'doc-good-qapi-types.h', - 'doc-good-qapi-visit.c', 'doc-good-qapi-visit.h' ], - input: files('doc-good.json'), - command: [ qapi_gen, '-o', meson.current_build_dir(), - '-p', 'doc-good-', '@INPUT0@' ], - depend_files: qapi_gen_depends) +custom_target('QAPI doc', + output: ['doc-good-qapi-commands.c', 'doc-good-qapi-commands.h', + 'doc-good-qapi-emit-events.c', 'doc-good-qapi-emit-events.h', + 'doc-good-qapi-events.c', 'doc-good-qapi-events.h', + 'doc-good-qapi-init-commands.c', 'doc-good-qapi-init-commands.h', + 'doc-good-qapi-introspect.c', 'doc-good-qapi-introspect.h', + 'doc-good-qapi-types.c', 'doc-good-qapi-types.h', + 'doc-good-qapi-visit.c', 'doc-good-qapi-visit.h' ], + input: files('doc-good.json'), + command: [ qapi_gen, '-o', meson.current_build_dir(), + '-p', 'doc-good-', '@INPUT0@' ], + depend_files: qapi_gen_depends) if build_docs # Test the document-comment document generation code by running a test schema @@ -242,15 +242,17 @@ if build_docs output: ['doc-good.txt'], input: files('doc-good.json', 'doc-good.rst'), build_by_default: true, - depend_files: sphinx_extn_depends, + depfile: 'docs.d', # We use -E to suppress Sphinx's caching, because # we want it to always really run the QAPI doc # generation code. It also means we don't # clutter up the build dir with the cache. command: [SPHINX_ARGS, '-b', 'text', '-E', - '-c', meson.source_root() / 'docs', + '-c', meson.project_source_root() / 'docs', '-D', 'master_doc=doc-good', + '-Ddepfile=@DEPFILE@', + '-Ddepfile_stamp=doc-good.stamp', meson.current_source_dir(), meson.current_build_dir()]) diff --git a/tests/qapi-schema/missing-type.err b/tests/qapi-schema/missing-type.err index 5755386a18..cb39569e49 100644 --- a/tests/qapi-schema/missing-type.err +++ b/tests/qapi-schema/missing-type.err @@ -1 +1 @@ -missing-type.json:2: expression is missing metatype +missing-type.json:2: expression must have exactly one key 'enum', 'struct', 'union', 'alternate', 'command', 'event' diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json index 84b9d41f15..ba7302f42b 100644 --- a/tests/qapi-schema/qapi-schema-test.json +++ b/tests/qapi-schema/qapi-schema-test.json @@ -30,7 +30,7 @@ { 'struct': 'Empty1', 'data': { } } { 'struct': 'Empty2', 'base': 'Empty1', 'data': { } } -# Likewise for an empty flat union +# Likewise for an empty union { 'union': 'Union', 'base': { 'type': 'EnumOne' }, 'discriminator': 'type', 'data': { } } @@ -119,12 +119,12 @@ { 'alternate': 'AltEnumNum', 'data': { 'e': 'EnumOne', 'n': 'number' } } { 'alternate': 'AltNumEnum', 'data': { 'n': 'number', 'e': 'EnumOne' } } { 'alternate': 'AltEnumInt', 'data': { 'e': 'EnumOne', 'i': 'int' } } +{ 'alternate': 'AltListInt', 'data': { 'l': ['int'], 'i': 'int' } } # for testing use of 'str' within alternates { 'alternate': 'AltStrObj', 'data': { 's': 'str', 'o': 'TestStruct' } } -# for testing lists -{ 'union': 'UserDefListUnion', +{ 'struct': 'ArrayStruct', 'data': { 'integer': ['int'], 's8': ['int8'], 's16': ['int16'], @@ -137,9 +137,9 @@ 'number': ['number'], 'boolean': ['bool'], 'string': ['str'], - 'sizes': ['size'], - 'any': ['any'], - 'user': ['Status'] } } # intentional forward ref. to sub-module + '*sz': ['size'], + '*any': ['any'], + '*user': ['Status'] } } # intentional forward ref. to sub-module # for testing sub-modules { 'include': 'include/sub-module.json' } @@ -159,7 +159,7 @@ 'returns': 'int' } { 'command': 'guest-sync', 'data': { 'arg': 'any' }, 'returns': 'any' } { 'command': 'boxed-struct', 'boxed': true, 'data': 'UserDefZero' } -{ 'command': 'boxed-union', 'data': 'UserDefListUnion', 'boxed': true } +{ 'command': 'boxed-union', 'data': 'UserDefFlatUnion', 'boxed': true } { 'command': 'boxed-empty', 'boxed': true, 'data': 'Empty1' } # Smoke test on out-of-band and allow-preconfig-test @@ -203,11 +203,10 @@ 'data': { '__org.qemu_x-member1': '__org.qemu_x-Enum' } } { 'struct': '__org.qemu_x-Struct', 'base': '__org.qemu_x-Base', 'data': { '__org.qemu_x-member2': 'str', '*wchar-t': 'int' } } -{ 'union': '__org.qemu_x-Union1', 'data': { '__org.qemu_x-branch': 'str' } } { 'alternate': '__org.qemu_x-Alt1', 'data': { '__org.qemu_x-branch': 'str' } } { 'struct': '__org.qemu_x-Struct2', - 'data': { 'array': ['__org.qemu_x-Union1'] } } -{ 'union': '__org.qemu_x-Union2', 'base': '__org.qemu_x-Base', + 'data': { 'array': ['__org.qemu_x-Union'] } } +{ 'union': '__org.qemu_x-Union', 'base': '__org.qemu_x-Base', 'discriminator': '__org.qemu_x-member1', 'data': { '__org.qemu_x-value': '__org.qemu_x-Struct2' } } { 'alternate': '__org.qemu_x-Alt', @@ -215,51 +214,56 @@ { 'event': '__ORG.QEMU_X-EVENT', 'data': '__org.qemu_x-Struct' } { 'command': '__org.qemu_x-command', 'data': { 'a': ['__org.qemu_x-Enum'], 'b': ['__org.qemu_x-Struct'], - 'c': '__org.qemu_x-Union2', 'd': '__org.qemu_x-Alt' }, - 'returns': '__org.qemu_x-Union1' } + 'c': '__org.qemu_x-Union', 'd': '__org.qemu_x-Alt' } } # test 'if' condition handling -{ 'struct': 'TestIfStruct', 'data': - { 'foo': 'int', - 'bar': { 'type': 'int', 'if': 'defined(TEST_IF_STRUCT_BAR)'} }, - 'if': 'defined(TEST_IF_STRUCT)' } +{ 'struct': 'TestIfStruct', + 'data': { 'foo': 'int', + 'bar': { 'type': 'int', 'if': 'TEST_IF_STRUCT_BAR'} }, + 'if': 'TEST_IF_STRUCT' } -{ 'enum': 'TestIfEnum', 'data': - [ 'foo', { 'name' : 'bar', 'if': 'defined(TEST_IF_ENUM_BAR)' } ], - 'if': 'defined(TEST_IF_ENUM)' } +{ 'enum': 'TestIfEnum', + 'data': [ 'foo', { 'name' : 'bar', 'if': 'TEST_IF_ENUM_BAR' } ], + 'if': 'TEST_IF_ENUM' } -{ 'union': 'TestIfUnion', 'data': - { 'foo': 'TestStruct', - 'bar': { 'type': 'str', 'if': 'defined(TEST_IF_UNION_BAR)'} }, - 'if': 'defined(TEST_IF_UNION) && defined(TEST_IF_STRUCT)' } +{ 'union': 'TestIfUnion', + 'base': { 'type': 'TestIfEnum' }, + 'discriminator': 'type', + 'data': { 'foo': 'TestStruct', + 'bar': { 'type': 'UserDefZero', 'if': 'TEST_IF_ENUM_BAR'} }, + 'if': { 'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-union-cmd', 'data': { 'union-cmd-arg': 'TestIfUnion' }, - 'if': 'defined(TEST_IF_UNION)' } + 'if': { 'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT'] } } -{ 'alternate': 'TestIfAlternate', 'data': - { 'foo': 'int', - 'bar': { 'type': 'TestStruct', 'if': 'defined(TEST_IF_ALT_BAR)'} }, - 'if': 'defined(TEST_IF_ALT) && defined(TEST_IF_STRUCT)' } +{ 'alternate': 'TestIfAlternate', + 'data': { 'foo': 'int', + 'bar': { 'type': 'TestStruct', 'if': 'TEST_IF_ALT_BAR'} }, + 'if': { 'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-alternate-cmd', 'data': { 'alt-cmd-arg': 'TestIfAlternate' }, - 'if': 'defined(TEST_IF_ALT)' } + 'if': { 'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-cmd', 'data': { 'foo': 'TestIfStruct', - 'bar': { 'type': 'TestIfEnum', 'if': 'defined(TEST_IF_CMD_BAR)' } }, + 'bar': { 'type': 'TestIfEnum', 'if': 'TEST_IF_CMD_BAR' } }, 'returns': 'UserDefThree', - 'if': ['defined(TEST_IF_CMD)', 'defined(TEST_IF_STRUCT)'] } + 'if': { 'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT'] } } { 'command': 'test-cmd-return-def-three', 'returns': 'UserDefThree' } -{ 'event': 'TEST_IF_EVENT', 'data': - { 'foo': 'TestIfStruct', - 'bar': { 'type': ['TestIfEnum'], 'if': 'defined(TEST_IF_EVT_BAR)' } }, - 'if': 'defined(TEST_IF_EVT) && defined(TEST_IF_STRUCT)' } +{ 'event': 'TEST_IF_EVENT', + 'data': { 'foo': 'TestIfStruct', + 'bar': { 'type': ['TestIfEnum'], 'if': 'TEST_IF_EVT_BAR' } }, + 'if': { 'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT'] } } + +{ 'event': 'TEST_IF_EVENT2', 'data': {}, + 'if': { 'not': { 'any': [ { 'not': 'TEST_IF_EVT' }, + { 'not': 'TEST_IF_STRUCT' } ] } } } # test 'features' @@ -270,7 +274,7 @@ 'data': { 'foo': { 'type': 'int', 'features': [ 'deprecated' ] } }, 'features': [ 'feature1' ] } { 'struct': 'FeatureStruct2', - 'data': { 'foo': 'int' }, + 'data': { 'foo': { 'type': 'int', 'features': [ 'unstable' ] } }, 'features': [ { 'name': 'feature1' } ] } { 'struct': 'FeatureStruct3', 'data': { 'foo': 'int' }, @@ -281,18 +285,25 @@ { 'struct': 'CondFeatureStruct1', 'data': { 'foo': 'int' }, - 'features': [ { 'name': 'feature1', 'if': 'defined(TEST_IF_FEATURE_1)'} ] } + 'features': [ { 'name': 'feature1', 'if': 'TEST_IF_FEATURE_1'} ] } { 'struct': 'CondFeatureStruct2', 'data': { 'foo': 'int' }, - 'features': [ { 'name': 'feature1', 'if': 'defined(TEST_IF_FEATURE_1)'}, - { 'name': 'feature2', 'if': 'defined(TEST_IF_FEATURE_2)'} ] } + 'features': [ { 'name': 'feature1', 'if': 'TEST_IF_FEATURE_1'}, + { 'name': 'feature2', 'if': 'TEST_IF_FEATURE_2'} ] } { 'struct': 'CondFeatureStruct3', 'data': { 'foo': 'int' }, - 'features': [ { 'name': 'feature1', 'if': [ 'defined(TEST_IF_COND_1)', - 'defined(TEST_IF_COND_2)'] } ] } + 'features': [ { 'name': 'feature1', + 'if': { 'all': [ 'TEST_IF_COND_1', + 'TEST_IF_COND_2'] } } ] } +{ 'struct': 'CondFeatureStruct4', + 'data': { 'foo': 'int' }, + 'features': [ { 'name': 'feature1', + 'if': {'any': ['TEST_IF_COND_1', + 'TEST_IF_COND_2'] } } ] } { 'enum': 'FeatureEnum1', - 'data': [ 'eins', 'zwei', 'drei' ], + 'data': [ 'eins', 'zwei', + { 'name': 'drei', 'features': [ 'deprecated' ] } ], 'features': [ 'feature1' ] } { 'union': 'FeatureUnion1', @@ -313,26 +324,31 @@ '*fs4': 'FeatureStruct4', '*cfs1': 'CondFeatureStruct1', '*cfs2': 'CondFeatureStruct2', - '*cfs3': 'CondFeatureStruct3' }, + '*cfs3': 'CondFeatureStruct3', + '*cfs4': 'CondFeatureStruct4' }, 'returns': 'FeatureStruct1', 'features': [] } { 'command': 'test-command-features1', 'features': [ 'deprecated' ] } { 'command': 'test-command-features3', - 'features': [ 'feature1', 'feature2' ] } + 'features': [ 'unstable', 'feature1', 'feature2' ] } { 'command': 'test-command-cond-features1', - 'features': [ { 'name': 'feature1', 'if': 'defined(TEST_IF_FEATURE_1)'} ] } + 'features': [ { 'name': 'feature1', 'if': 'TEST_IF_FEATURE_1'} ] } { 'command': 'test-command-cond-features2', - 'features': [ { 'name': 'feature1', 'if': 'defined(TEST_IF_FEATURE_1)'}, - { 'name': 'feature2', 'if': 'defined(TEST_IF_FEATURE_2)'} ] } + 'features': [ { 'name': 'feature1', 'if': 'TEST_IF_FEATURE_1'}, + { 'name': 'feature2', 'if': 'TEST_IF_FEATURE_2'} ] } { 'command': 'test-command-cond-features3', - 'features': [ { 'name': 'feature1', 'if': [ 'defined(TEST_IF_COND_1)', - 'defined(TEST_IF_COND_2)'] } ] } + 'features': [ { 'name': 'feature1', + 'if': { 'all': [ 'TEST_IF_COND_1', + 'TEST_IF_COND_2'] } } ] } { 'event': 'TEST_EVENT_FEATURES0', 'data': 'FeatureStruct1' } { 'event': 'TEST_EVENT_FEATURES1', 'features': [ 'deprecated' ] } + +{ 'event': 'TEST_EVENT_FEATURES2', + 'features': [ 'unstable' ] } diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index e0b8a5f0b6..043d75c655 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -121,74 +121,30 @@ alternate AltEnumInt tag type case e: EnumOne case i: int +alternate AltListInt + tag type + case l: intList + case i: int alternate AltStrObj tag type case s: str case o: TestStruct -object q_obj_intList-wrapper - member data: intList optional=False -object q_obj_int8List-wrapper - member data: int8List optional=False -object q_obj_int16List-wrapper - member data: int16List optional=False -object q_obj_int32List-wrapper - member data: int32List optional=False -object q_obj_int64List-wrapper - member data: int64List optional=False -object q_obj_uint8List-wrapper - member data: uint8List optional=False -object q_obj_uint16List-wrapper - member data: uint16List optional=False -object q_obj_uint32List-wrapper - member data: uint32List optional=False -object q_obj_uint64List-wrapper - member data: uint64List optional=False -object q_obj_numberList-wrapper - member data: numberList optional=False -object q_obj_boolList-wrapper - member data: boolList optional=False -object q_obj_strList-wrapper - member data: strList optional=False -object q_obj_sizeList-wrapper - member data: sizeList optional=False -object q_obj_anyList-wrapper - member data: anyList optional=False -object q_obj_StatusList-wrapper - member data: StatusList optional=False -enum UserDefListUnionKind - member integer - member s8 - member s16 - member s32 - member s64 - member u8 - member u16 - member u32 - member u64 - member number - member boolean - member string - member sizes - member any - member user -object UserDefListUnion - member type: UserDefListUnionKind optional=False - tag type - case integer: q_obj_intList-wrapper - case s8: q_obj_int8List-wrapper - case s16: q_obj_int16List-wrapper - case s32: q_obj_int32List-wrapper - case s64: q_obj_int64List-wrapper - case u8: q_obj_uint8List-wrapper - case u16: q_obj_uint16List-wrapper - case u32: q_obj_uint32List-wrapper - case u64: q_obj_uint64List-wrapper - case number: q_obj_numberList-wrapper - case boolean: q_obj_boolList-wrapper - case string: q_obj_strList-wrapper - case sizes: q_obj_sizeList-wrapper - case any: q_obj_anyList-wrapper - case user: q_obj_StatusList-wrapper +object ArrayStruct + member integer: intList optional=False + member s8: int8List optional=False + member s16: int16List optional=False + member s32: int32List optional=False + member s64: int64List optional=False + member u8: uint8List optional=False + member u16: uint16List optional=False + member u32: uint32List optional=False + member u64: uint64List optional=False + member number: numberList optional=False + member boolean: boolList optional=False + member string: strList optional=False + member sz: sizeList optional=True + member any: anyList optional=True + member user: StatusList optional=True include include/sub-module.json command user-def-cmd None -> None gen=True success_response=True boxed=False oob=False preconfig=False @@ -216,7 +172,7 @@ command guest-sync q_obj_guest-sync-arg -> any gen=True success_response=True boxed=False oob=False preconfig=False command boxed-struct UserDefZero -> None gen=True success_response=True boxed=True oob=False preconfig=False -command boxed-union UserDefListUnion -> None +command boxed-union UserDefFlatUnion -> None gen=True success_response=True boxed=True oob=False preconfig=False command boxed-empty Empty1 -> None gen=True success_response=True boxed=True oob=False preconfig=False @@ -263,21 +219,13 @@ object __org.qemu_x-Struct base __org.qemu_x-Base member __org.qemu_x-member2: str optional=False member wchar-t: int optional=True -object q_obj_str-wrapper - member data: str optional=False -enum __org.qemu_x-Union1Kind - member __org.qemu_x-branch -object __org.qemu_x-Union1 - member type: __org.qemu_x-Union1Kind optional=False - tag type - case __org.qemu_x-branch: q_obj_str-wrapper alternate __org.qemu_x-Alt1 tag type case __org.qemu_x-branch: str -array __org.qemu_x-Union1List __org.qemu_x-Union1 +array __org.qemu_x-UnionList __org.qemu_x-Union object __org.qemu_x-Struct2 - member array: __org.qemu_x-Union1List optional=False -object __org.qemu_x-Union2 + member array: __org.qemu_x-UnionList optional=False +object __org.qemu_x-Union base __org.qemu_x-Base tag __org.qemu_x-member1 case __org.qemu_x-value: __org.qemu_x-Struct2 @@ -291,72 +239,71 @@ array __org.qemu_x-StructList __org.qemu_x-Struct object q_obj___org.qemu_x-command-arg member a: __org.qemu_x-EnumList optional=False member b: __org.qemu_x-StructList optional=False - member c: __org.qemu_x-Union2 optional=False + member c: __org.qemu_x-Union optional=False member d: __org.qemu_x-Alt optional=False -command __org.qemu_x-command q_obj___org.qemu_x-command-arg -> __org.qemu_x-Union1 +command __org.qemu_x-command q_obj___org.qemu_x-command-arg -> None gen=True success_response=True boxed=False oob=False preconfig=False object TestIfStruct member foo: int optional=False member bar: int optional=False - if ['defined(TEST_IF_STRUCT_BAR)'] - if ['defined(TEST_IF_STRUCT)'] + if TEST_IF_STRUCT_BAR + if TEST_IF_STRUCT enum TestIfEnum member foo member bar - if ['defined(TEST_IF_ENUM_BAR)'] - if ['defined(TEST_IF_ENUM)'] -object q_obj_TestStruct-wrapper - member data: TestStruct optional=False -enum TestIfUnionKind - member foo - member bar - if ['defined(TEST_IF_UNION_BAR)'] - if ['defined(TEST_IF_UNION) && defined(TEST_IF_STRUCT)'] + if TEST_IF_ENUM_BAR + if TEST_IF_ENUM +object q_obj_TestIfUnion-base + member type: TestIfEnum optional=False + if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} object TestIfUnion - member type: TestIfUnionKind optional=False + base q_obj_TestIfUnion-base tag type - case foo: q_obj_TestStruct-wrapper - case bar: q_obj_str-wrapper - if ['defined(TEST_IF_UNION_BAR)'] - if ['defined(TEST_IF_UNION) && defined(TEST_IF_STRUCT)'] + case foo: TestStruct + case bar: UserDefZero + if TEST_IF_ENUM_BAR + if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} object q_obj_test-if-union-cmd-arg member union-cmd-arg: TestIfUnion optional=False - if ['defined(TEST_IF_UNION)'] + if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} command test-if-union-cmd q_obj_test-if-union-cmd-arg -> None gen=True success_response=True boxed=False oob=False preconfig=False - if ['defined(TEST_IF_UNION)'] + if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} alternate TestIfAlternate tag type case foo: int case bar: TestStruct - if ['defined(TEST_IF_ALT_BAR)'] - if ['defined(TEST_IF_ALT) && defined(TEST_IF_STRUCT)'] + if TEST_IF_ALT_BAR + if {'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT']} object q_obj_test-if-alternate-cmd-arg member alt-cmd-arg: TestIfAlternate optional=False - if ['defined(TEST_IF_ALT)'] + if {'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT']} command test-if-alternate-cmd q_obj_test-if-alternate-cmd-arg -> None gen=True success_response=True boxed=False oob=False preconfig=False - if ['defined(TEST_IF_ALT)'] + if {'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT']} object q_obj_test-if-cmd-arg member foo: TestIfStruct optional=False member bar: TestIfEnum optional=False - if ['defined(TEST_IF_CMD_BAR)'] - if ['defined(TEST_IF_CMD)', 'defined(TEST_IF_STRUCT)'] + if TEST_IF_CMD_BAR + if {'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT']} command test-if-cmd q_obj_test-if-cmd-arg -> UserDefThree gen=True success_response=True boxed=False oob=False preconfig=False - if ['defined(TEST_IF_CMD)', 'defined(TEST_IF_STRUCT)'] + if {'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT']} command test-cmd-return-def-three None -> UserDefThree gen=True success_response=True boxed=False oob=False preconfig=False array TestIfEnumList TestIfEnum - if ['defined(TEST_IF_ENUM)'] + if TEST_IF_ENUM object q_obj_TEST_IF_EVENT-arg member foo: TestIfStruct optional=False member bar: TestIfEnumList optional=False - if ['defined(TEST_IF_EVT_BAR)'] - if ['defined(TEST_IF_EVT) && defined(TEST_IF_STRUCT)'] + if TEST_IF_EVT_BAR + if {'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT']} event TEST_IF_EVENT q_obj_TEST_IF_EVENT-arg boxed=False - if ['defined(TEST_IF_EVT) && defined(TEST_IF_STRUCT)'] + if {'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT']} +event TEST_IF_EVENT2 None + boxed=False + if {'not': {'any': [{'not': 'TEST_IF_EVT'}, {'not': 'TEST_IF_STRUCT'}]}} object FeatureStruct0 member foo: int optional=False object FeatureStruct1 @@ -365,6 +312,7 @@ object FeatureStruct1 feature feature1 object FeatureStruct2 member foo: int optional=False + feature unstable feature feature1 object FeatureStruct3 member foo: int optional=False @@ -379,21 +327,26 @@ object FeatureStruct4 object CondFeatureStruct1 member foo: int optional=False feature feature1 - if ['defined(TEST_IF_FEATURE_1)'] + if TEST_IF_FEATURE_1 object CondFeatureStruct2 member foo: int optional=False feature feature1 - if ['defined(TEST_IF_FEATURE_1)'] + if TEST_IF_FEATURE_1 feature feature2 - if ['defined(TEST_IF_FEATURE_2)'] + if TEST_IF_FEATURE_2 object CondFeatureStruct3 member foo: int optional=False feature feature1 - if ['defined(TEST_IF_COND_1)', 'defined(TEST_IF_COND_2)'] + if {'all': ['TEST_IF_COND_1', 'TEST_IF_COND_2']} +object CondFeatureStruct4 + member foo: int optional=False + feature feature1 + if {'any': ['TEST_IF_COND_1', 'TEST_IF_COND_2']} enum FeatureEnum1 member eins member zwei member drei + feature deprecated feature feature1 object q_obj_FeatureUnion1-base member tag: FeatureEnum1 optional=False @@ -417,6 +370,7 @@ object q_obj_test-features0-arg member cfs1: CondFeatureStruct1 optional=True member cfs2: CondFeatureStruct2 optional=True member cfs3: CondFeatureStruct3 optional=True + member cfs4: CondFeatureStruct4 optional=True command test-features0 q_obj_test-features0-arg -> FeatureStruct1 gen=True success_response=True boxed=False oob=False preconfig=False command test-command-features1 None -> None @@ -424,27 +378,31 @@ command test-command-features1 None -> None feature deprecated command test-command-features3 None -> None gen=True success_response=True boxed=False oob=False preconfig=False + feature unstable feature feature1 feature feature2 command test-command-cond-features1 None -> None gen=True success_response=True boxed=False oob=False preconfig=False feature feature1 - if ['defined(TEST_IF_FEATURE_1)'] + if TEST_IF_FEATURE_1 command test-command-cond-features2 None -> None gen=True success_response=True boxed=False oob=False preconfig=False feature feature1 - if ['defined(TEST_IF_FEATURE_1)'] + if TEST_IF_FEATURE_1 feature feature2 - if ['defined(TEST_IF_FEATURE_2)'] + if TEST_IF_FEATURE_2 command test-command-cond-features3 None -> None gen=True success_response=True boxed=False oob=False preconfig=False feature feature1 - if ['defined(TEST_IF_COND_1)', 'defined(TEST_IF_COND_2)'] + if {'all': ['TEST_IF_COND_1', 'TEST_IF_COND_2']} event TEST_EVENT_FEATURES0 FeatureStruct1 boxed=False event TEST_EVENT_FEATURES1 None boxed=False feature deprecated +event TEST_EVENT_FEATURES2 None + boxed=False + feature unstable module include/sub-module.json include sub-sub-module.json object SecondArrayRef diff --git a/tests/qapi-schema/redefined-event.json b/tests/qapi-schema/redefined-event.json index 7717e91c18..7901930e3d 100644 --- a/tests/qapi-schema/redefined-event.json +++ b/tests/qapi-schema/redefined-event.json @@ -1,3 +1,3 @@ # we reject duplicate events { 'event': 'EVENT_A', 'data': { 'myint': 'int' } } -{ 'event': 'EVENT_A', 'data': { 'myint': 'int' } } +{ 'event': 'EVENT_A', 'data': { 'myint': 'int' }, 'if': 'FOO' } diff --git a/tests/qapi-schema/reserved-member-u.json b/tests/qapi-schema/reserved-member-u.json index 2bfb8f59b6..d982ab5e0c 100644 --- a/tests/qapi-schema/reserved-member-u.json +++ b/tests/qapi-schema/reserved-member-u.json @@ -2,6 +2,6 @@ # We reject use of 'u' as a member name, to allow it for internal use in # putting union branch members in a separate namespace from QMP members. # This is true even for non-unions, because it is possible to convert a -# struct to flat union while remaining backwards compatible in QMP. +# struct to union while remaining backwards compatible in QMP. # TODO - we could munge the member name to 'q_u' to avoid the collision { 'struct': 'Oops', 'data': { '*u': 'str' } } diff --git a/tests/qapi-schema/reserved-type-kind.err b/tests/qapi-schema/reserved-type-kind.err deleted file mode 100644 index d8fb769f9d..0000000000 --- a/tests/qapi-schema/reserved-type-kind.err +++ /dev/null @@ -1,2 +0,0 @@ -reserved-type-kind.json: In enum 'UnionKind': -reserved-type-kind.json:2: enum name should not end in 'Kind' diff --git a/tests/qapi-schema/reserved-type-kind.json b/tests/qapi-schema/reserved-type-kind.json deleted file mode 100644 index 9ecaba12bc..0000000000 --- a/tests/qapi-schema/reserved-type-kind.json +++ /dev/null @@ -1,2 +0,0 @@ -# we reject types that would conflict with implicit union enum -{ 'enum': 'UnionKind', 'data': [ 'oops' ] } diff --git a/tests/qapi-schema/struct-member-if-invalid.err b/tests/qapi-schema/struct-member-if-invalid.err index 42e7fdae3c..5ee08afa41 100644 --- a/tests/qapi-schema/struct-member-if-invalid.err +++ b/tests/qapi-schema/struct-member-if-invalid.err @@ -1,2 +1,2 @@ struct-member-if-invalid.json: In struct 'Stru': -struct-member-if-invalid.json:2: 'if' condition of 'data' member 'member' must be a string or a list of strings +struct-member-if-invalid.json:2: 'if' condition of 'data' member 'member' must be a string or an object diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index f1c4deb9a5..2160cef082 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -37,6 +37,7 @@ class QAPISchemaTestVisitor(QAPISchemaVisitor): for m in members: print(' member %s' % m.name) self._print_if(m.ifcond, indent=8) + self._print_features(m.features, indent=8) self._print_if(ifcond) self._print_features(features) @@ -94,8 +95,17 @@ class QAPISchemaTestVisitor(QAPISchemaVisitor): @staticmethod def _print_if(ifcond, indent=4): - if ifcond: - print('%sif %s' % (' ' * indent, ifcond)) + # TODO Drop this hack after replacing OrderedDict by plain + # dict (requires Python 3.7) + def _massage(subcond): + if isinstance(subcond, str): + return subcond + if isinstance(subcond, list): + return [_massage(val) for val in subcond] + return {key: _massage(val) for key, val in subcond.items()} + + if ifcond.is_present(): + print('%sif %s' % (' ' * indent, _massage(ifcond.ifcond))) @classmethod def _print_features(cls, features, indent=4): @@ -123,6 +133,17 @@ def test_frontend(fname): print(' section=%s\n%s' % (section.name, section.text)) +def open_test_result(dir_name, file_name, update): + mode = 'r+' if update else 'r' + try: + fp = open(os.path.join(dir_name, file_name), mode) + except FileNotFoundError: + if not update: + raise + fp = open(os.path.join(dir_name, file_name), 'w+') + return fp + + def test_and_diff(test_name, dir_name, update): sys.stdout = StringIO() try: @@ -139,13 +160,12 @@ def test_and_diff(test_name, dir_name, update): sys.stdout.close() sys.stdout = sys.__stdout__ - mode = 'r+' if update else 'r' try: - outfp = open(os.path.join(dir_name, test_name + '.out'), mode) - errfp = open(os.path.join(dir_name, test_name + '.err'), mode) + outfp = open_test_result(dir_name, test_name + '.out', update) + errfp = open_test_result(dir_name, test_name + '.err', update) expected_out = outfp.readlines() expected_err = errfp.readlines() - except IOError as err: + except OSError as err: print("%s: can't open '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) @@ -171,7 +191,7 @@ def test_and_diff(test_name, dir_name, update): errfp.truncate(0) errfp.seek(0) errfp.writelines(actual_err) - except IOError as err: + except OSError as err: print("%s: can't write '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) diff --git a/tests/qapi-schema/union-array-branch.err b/tests/qapi-schema/union-array-branch.err new file mode 100644 index 0000000000..5db9c17481 --- /dev/null +++ b/tests/qapi-schema/union-array-branch.err @@ -0,0 +1,2 @@ +union-array-branch.json: In union 'TestUnion': +union-array-branch.json:8: 'data' member 'value1' cannot be an array diff --git a/tests/qapi-schema/flat-union-array-branch.json b/tests/qapi-schema/union-array-branch.json similarity index 86% rename from tests/qapi-schema/flat-union-array-branch.json rename to tests/qapi-schema/union-array-branch.json index 0b98820a8f..6dda7ec379 100644 --- a/tests/qapi-schema/flat-union-array-branch.json +++ b/tests/qapi-schema/union-array-branch.json @@ -1,4 +1,4 @@ -# we require flat union branches to be a struct +# we require union branches to be a struct { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } { 'struct': 'Base', diff --git a/tests/qapi-schema/flat-union-clash-member.out b/tests/qapi-schema/union-array-branch.out similarity index 100% rename from tests/qapi-schema/flat-union-clash-member.out rename to tests/qapi-schema/union-array-branch.out diff --git a/tests/qapi-schema/union-bad-base.err b/tests/qapi-schema/union-bad-base.err new file mode 100644 index 0000000000..42b2ed1dda --- /dev/null +++ b/tests/qapi-schema/union-bad-base.err @@ -0,0 +1,2 @@ +union-bad-base.json: In union 'TestUnion': +union-bad-base.json:8: member 'string' of type 'TestTypeA' collides with base member 'string' diff --git a/tests/qapi-schema/flat-union-bad-base.json b/tests/qapi-schema/union-bad-base.json similarity index 100% rename from tests/qapi-schema/flat-union-bad-base.json rename to tests/qapi-schema/union-bad-base.json diff --git a/tests/qapi-schema/flat-union-discriminator-bad-name.out b/tests/qapi-schema/union-bad-base.out similarity index 100% rename from tests/qapi-schema/flat-union-discriminator-bad-name.out rename to tests/qapi-schema/union-bad-base.out diff --git a/tests/qapi-schema/union-bad-discriminator.err b/tests/qapi-schema/union-bad-discriminator.err new file mode 100644 index 0000000000..7cfd470f58 --- /dev/null +++ b/tests/qapi-schema/union-bad-discriminator.err @@ -0,0 +1,2 @@ +union-bad-discriminator.json: In union 'TestUnion': +union-bad-discriminator.json:11: 'discriminator' requires a string name diff --git a/tests/qapi-schema/flat-union-bad-discriminator.json b/tests/qapi-schema/union-bad-discriminator.json similarity index 100% rename from tests/qapi-schema/flat-union-bad-discriminator.json rename to tests/qapi-schema/union-bad-discriminator.json diff --git a/tests/qapi-schema/flat-union-empty.out b/tests/qapi-schema/union-bad-discriminator.out similarity index 100% rename from tests/qapi-schema/flat-union-empty.out rename to tests/qapi-schema/union-bad-discriminator.out diff --git a/tests/qapi-schema/union-base-any.err b/tests/qapi-schema/union-base-any.err new file mode 100644 index 0000000000..82b48bc1c8 --- /dev/null +++ b/tests/qapi-schema/union-base-any.err @@ -0,0 +1,2 @@ +union-base-any.json: In union 'TestUnion': +union-base-any.json:8: 'base' requires a struct type, built-in type 'any' isn't diff --git a/tests/qapi-schema/flat-union-base-any.json b/tests/qapi-schema/union-base-any.json similarity index 100% rename from tests/qapi-schema/flat-union-base-any.json rename to tests/qapi-schema/union-base-any.json diff --git a/tests/qapi-schema/flat-union-inline-invalid-dict.out b/tests/qapi-schema/union-base-any.out similarity index 100% rename from tests/qapi-schema/flat-union-inline-invalid-dict.out rename to tests/qapi-schema/union-base-any.out diff --git a/tests/qapi-schema/union-base-empty.json b/tests/qapi-schema/union-base-empty.json index d1843d33b4..6f8ef000db 100644 --- a/tests/qapi-schema/union-base-empty.json +++ b/tests/qapi-schema/union-base-empty.json @@ -1,4 +1,4 @@ -# Flat union with empty base and therefore without discriminator +# Union with empty base and therefore without discriminator { 'struct': 'Empty', 'data': { } } diff --git a/tests/qapi-schema/union-base-no-discriminator.err b/tests/qapi-schema/union-base-no-discriminator.err index 9cd5d11b0b..a730b7fd3c 100644 --- a/tests/qapi-schema/union-base-no-discriminator.err +++ b/tests/qapi-schema/union-base-no-discriminator.err @@ -1,2 +1,2 @@ union-base-no-discriminator.json: In union 'TestUnion': -union-base-no-discriminator.json:11: 'base' requires 'discriminator' +union-base-no-discriminator.json:11: union misses key 'discriminator' diff --git a/tests/qapi-schema/union-base-no-discriminator.json b/tests/qapi-schema/union-base-no-discriminator.json index 1409cf5c9e..2e7cae9b22 100644 --- a/tests/qapi-schema/union-base-no-discriminator.json +++ b/tests/qapi-schema/union-base-no-discriminator.json @@ -1,4 +1,4 @@ -# we reject simple unions with a base (or flat unions without discriminator) +# we reject unions without discriminator { 'struct': 'TestTypeA', 'data': { 'string': 'str' } } diff --git a/tests/qapi-schema/union-base-union.err b/tests/qapi-schema/union-base-union.err new file mode 100644 index 0000000000..2bddaf6a84 --- /dev/null +++ b/tests/qapi-schema/union-base-union.err @@ -0,0 +1,2 @@ +union-base-union.json: In union 'TestUnion': +union-base-union.json:17: 'base' requires a struct type, union type 'UnionBase' isn't diff --git a/tests/qapi-schema/flat-union-base-union.json b/tests/qapi-schema/union-base-union.json similarity index 86% rename from tests/qapi-schema/flat-union-base-union.json rename to tests/qapi-schema/union-base-union.json index 98b4eba181..82d4c96e57 100644 --- a/tests/qapi-schema/flat-union-base-union.json +++ b/tests/qapi-schema/union-base-union.json @@ -8,7 +8,10 @@ 'data': { 'string': 'str' } } { 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } +{ 'enum': 'Enum', 'data': [ 'kind1', 'kind2' ] } { 'union': 'UnionBase', + 'base': { 'type': 'Enum' }, + 'discriminator': 'type', 'data': { 'kind1': 'TestTypeA', 'kind2': 'TestTypeB' } } { 'union': 'TestUnion', diff --git a/tests/qapi-schema/flat-union-int-branch.out b/tests/qapi-schema/union-base-union.out similarity index 100% rename from tests/qapi-schema/flat-union-int-branch.out rename to tests/qapi-schema/union-base-union.out diff --git a/tests/qapi-schema/union-branch-case.err b/tests/qapi-schema/union-branch-case.err deleted file mode 100644 index d2d5cb8993..0000000000 --- a/tests/qapi-schema/union-branch-case.err +++ /dev/null @@ -1,2 +0,0 @@ -union-branch-case.json: In union 'Uni': -union-branch-case.json:2: name of 'data' member 'Branch' must not use uppercase or '_' diff --git a/tests/qapi-schema/union-branch-case.json b/tests/qapi-schema/union-branch-case.json deleted file mode 100644 index b7894b75d6..0000000000 --- a/tests/qapi-schema/union-branch-case.json +++ /dev/null @@ -1,2 +0,0 @@ -# Branch names should be 'lower-case' -{ 'union': 'Uni', 'data': { 'Branch': 'int' } } diff --git a/tests/qapi-schema/union-branch-if-invalid.err b/tests/qapi-schema/union-branch-if-invalid.err index dd4518233e..046187a5b9 100644 --- a/tests/qapi-schema/union-branch-if-invalid.err +++ b/tests/qapi-schema/union-branch-if-invalid.err @@ -1,2 +1,2 @@ union-branch-if-invalid.json: In union 'Uni': -union-branch-if-invalid.json:4: 'if' condition '' of 'data' member 'branch1' makes no sense +union-branch-if-invalid.json:4: 'if' condition '' of 'data' member 'branch1' is not a valid identifier diff --git a/tests/qapi-schema/union-branch-if-invalid.json b/tests/qapi-schema/union-branch-if-invalid.json index 46d4239af6..c41633856f 100644 --- a/tests/qapi-schema/union-branch-if-invalid.json +++ b/tests/qapi-schema/union-branch-if-invalid.json @@ -3,4 +3,4 @@ { 'struct': 'Stru', 'data': { 'member': 'str' } } { 'union': 'Uni', 'base': { 'tag': 'Branches' }, 'discriminator': 'tag', - 'data': { 'branch1': { 'type': 'Stru', 'if': [''] } } } + 'data': { 'branch1': { 'type': 'Stru', 'if': { 'all': [''] } } } } diff --git a/tests/qapi-schema/union-branch-invalid-dict.err b/tests/qapi-schema/union-branch-invalid-dict.err index 8137c5a767..001cdec069 100644 --- a/tests/qapi-schema/union-branch-invalid-dict.err +++ b/tests/qapi-schema/union-branch-invalid-dict.err @@ -1,2 +1,2 @@ union-branch-invalid-dict.json: In union 'UnionInvalidBranch': -union-branch-invalid-dict.json:2: 'data' member 'integer' misses key 'type' +union-branch-invalid-dict.json:4: 'data' member 'integer' misses key 'type' diff --git a/tests/qapi-schema/union-branch-invalid-dict.json b/tests/qapi-schema/union-branch-invalid-dict.json index 9778598dbd..c7c81c0e00 100644 --- a/tests/qapi-schema/union-branch-invalid-dict.json +++ b/tests/qapi-schema/union-branch-invalid-dict.json @@ -1,4 +1,8 @@ # Long form of member must have a value member 'type' +{ 'enum': 'TestEnum', + 'data': [ 'integer', 's8' ] } { 'union': 'UnionInvalidBranch', + 'base': { 'type': 'TestEnum' }, + 'discriminator': 'type', 'data': { 'integer': { 'if': 'foo'}, 's8': 'int8' } } diff --git a/tests/qapi-schema/union-clash-branches.err b/tests/qapi-schema/union-clash-branches.err deleted file mode 100644 index ef53645728..0000000000 --- a/tests/qapi-schema/union-clash-branches.err +++ /dev/null @@ -1,2 +0,0 @@ -union-clash-branches.json: In union 'TestUnion': -union-clash-branches.json:6: name of 'data' member 'a_b' must not use uppercase or '_' diff --git a/tests/qapi-schema/union-clash-branches.json b/tests/qapi-schema/union-clash-branches.json deleted file mode 100644 index 7bdda0b0da..0000000000 --- a/tests/qapi-schema/union-clash-branches.json +++ /dev/null @@ -1,7 +0,0 @@ -# Union branch name collision -# Naming rules make collision impossible (even with the pragma). If -# that wasn't the case, then we'd get collisions in generated C: two -# union members a_b, and two enum members TEST_UNION_A_B. -{ 'pragma': { 'member-name-exceptions': [ 'TestUnion' ] } } -{ 'union': 'TestUnion', - 'data': { 'a-b': 'int', 'a_b': 'str' } } diff --git a/tests/qapi-schema/union-clash-member.err b/tests/qapi-schema/union-clash-member.err new file mode 100644 index 0000000000..c1f3a02552 --- /dev/null +++ b/tests/qapi-schema/union-clash-member.err @@ -0,0 +1,2 @@ +union-clash-member.json: In union 'TestUnion': +union-clash-member.json:11: member 'name' of type 'Branch1' collides with member 'name' of type 'Base' diff --git a/tests/qapi-schema/flat-union-clash-member.json b/tests/qapi-schema/union-clash-member.json similarity index 100% rename from tests/qapi-schema/flat-union-clash-member.json rename to tests/qapi-schema/union-clash-member.json diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.out b/tests/qapi-schema/union-clash-member.out similarity index 100% rename from tests/qapi-schema/flat-union-invalid-branch-key.out rename to tests/qapi-schema/union-clash-member.out diff --git a/tests/qapi-schema/union-discriminator-bad-name.err b/tests/qapi-schema/union-discriminator-bad-name.err new file mode 100644 index 0000000000..5793e9af66 --- /dev/null +++ b/tests/qapi-schema/union-discriminator-bad-name.err @@ -0,0 +1,2 @@ +union-discriminator-bad-name.json: In union 'MyUnion': +union-discriminator-bad-name.json:6: discriminator '*switch' is not a member of 'base' diff --git a/tests/qapi-schema/flat-union-discriminator-bad-name.json b/tests/qapi-schema/union-discriminator-bad-name.json similarity index 100% rename from tests/qapi-schema/flat-union-discriminator-bad-name.json rename to tests/qapi-schema/union-discriminator-bad-name.json diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.out b/tests/qapi-schema/union-discriminator-bad-name.out similarity index 100% rename from tests/qapi-schema/flat-union-invalid-discriminator.out rename to tests/qapi-schema/union-discriminator-bad-name.out diff --git a/tests/qapi-schema/union-empty.err b/tests/qapi-schema/union-empty.err index 59788c94ce..d428439962 100644 --- a/tests/qapi-schema/union-empty.err +++ b/tests/qapi-schema/union-empty.err @@ -1,2 +1,2 @@ union-empty.json: In union 'Union': -union-empty.json:2: union has no branches +union-empty.json:4: union has no branches diff --git a/tests/qapi-schema/union-empty.json b/tests/qapi-schema/union-empty.json index df3e5e639a..584ed6098c 100644 --- a/tests/qapi-schema/union-empty.json +++ b/tests/qapi-schema/union-empty.json @@ -1,2 +1,4 @@ -# simple unions cannot be empty -{ 'union': 'Union', 'data': { } } +# union discriminator enum cannot be empty +{ 'enum': 'Empty', 'data': [ ] } +{ 'struct': 'Base', 'data': { 'type': 'Empty' } } +{ 'union': 'Union', 'base': 'Base', 'discriminator': 'type', 'data': { } } diff --git a/tests/qapi-schema/union-inline-invalid-dict.err b/tests/qapi-schema/union-inline-invalid-dict.err new file mode 100644 index 0000000000..25ddf7c765 --- /dev/null +++ b/tests/qapi-schema/union-inline-invalid-dict.err @@ -0,0 +1,2 @@ +union-inline-invalid-dict.json: In union 'TestUnion': +union-inline-invalid-dict.json:7: 'data' member 'value1' misses key 'type' diff --git a/tests/qapi-schema/flat-union-inline-invalid-dict.json b/tests/qapi-schema/union-inline-invalid-dict.json similarity index 100% rename from tests/qapi-schema/flat-union-inline-invalid-dict.json rename to tests/qapi-schema/union-inline-invalid-dict.json diff --git a/tests/qapi-schema/flat-union-invalid-if-discriminator.out b/tests/qapi-schema/union-inline-invalid-dict.out similarity index 100% rename from tests/qapi-schema/flat-union-invalid-if-discriminator.out rename to tests/qapi-schema/union-inline-invalid-dict.out diff --git a/tests/qapi-schema/union-int-branch.err b/tests/qapi-schema/union-int-branch.err new file mode 100644 index 0000000000..8fdc81edd1 --- /dev/null +++ b/tests/qapi-schema/union-int-branch.err @@ -0,0 +1,2 @@ +union-int-branch.json: In union 'TestUnion': +union-int-branch.json:8: branch 'value1' cannot use built-in type 'int' diff --git a/tests/qapi-schema/flat-union-int-branch.json b/tests/qapi-schema/union-int-branch.json similarity index 86% rename from tests/qapi-schema/flat-union-int-branch.json rename to tests/qapi-schema/union-int-branch.json index 9370c349e8..567043d9d2 100644 --- a/tests/qapi-schema/flat-union-int-branch.json +++ b/tests/qapi-schema/union-int-branch.json @@ -1,4 +1,4 @@ -# we require flat union branches to be a struct +# we require union branches to be a struct { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } { 'struct': 'Base', diff --git a/tests/qapi-schema/flat-union-no-base.out b/tests/qapi-schema/union-int-branch.out similarity index 100% rename from tests/qapi-schema/flat-union-no-base.out rename to tests/qapi-schema/union-int-branch.out diff --git a/tests/qapi-schema/union-invalid-branch-key.err b/tests/qapi-schema/union-invalid-branch-key.err new file mode 100644 index 0000000000..bf58800507 --- /dev/null +++ b/tests/qapi-schema/union-invalid-branch-key.err @@ -0,0 +1,2 @@ +union-invalid-branch-key.json: In union 'TestUnion': +union-invalid-branch-key.json:13: branch 'value_wrong' is not a value of enum type 'TestEnum' diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.json b/tests/qapi-schema/union-invalid-branch-key.json similarity index 100% rename from tests/qapi-schema/flat-union-invalid-branch-key.json rename to tests/qapi-schema/union-invalid-branch-key.json diff --git a/tests/qapi-schema/flat-union-optional-discriminator.out b/tests/qapi-schema/union-invalid-branch-key.out similarity index 100% rename from tests/qapi-schema/flat-union-optional-discriminator.out rename to tests/qapi-schema/union-invalid-branch-key.out diff --git a/tests/qapi-schema/union-invalid-discriminator.err b/tests/qapi-schema/union-invalid-discriminator.err new file mode 100644 index 0000000000..38efb24b98 --- /dev/null +++ b/tests/qapi-schema/union-invalid-discriminator.err @@ -0,0 +1,2 @@ +union-invalid-discriminator.json: In union 'TestUnion': +union-invalid-discriminator.json:10: discriminator 'enum_wrong' is not a member of 'base' diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.json b/tests/qapi-schema/union-invalid-discriminator.json similarity index 100% rename from tests/qapi-schema/flat-union-invalid-discriminator.json rename to tests/qapi-schema/union-invalid-discriminator.json diff --git a/tests/qapi-schema/flat-union-string-discriminator.out b/tests/qapi-schema/union-invalid-discriminator.out similarity index 100% rename from tests/qapi-schema/flat-union-string-discriminator.out rename to tests/qapi-schema/union-invalid-discriminator.out diff --git a/tests/qapi-schema/union-invalid-if-discriminator.err b/tests/qapi-schema/union-invalid-if-discriminator.err new file mode 100644 index 0000000000..3f41d03f8e --- /dev/null +++ b/tests/qapi-schema/union-invalid-if-discriminator.err @@ -0,0 +1,2 @@ +union-invalid-if-discriminator.json: In union 'TestUnion': +union-invalid-if-discriminator.json:10: discriminator member 'enum1' of 'base' must not be conditional diff --git a/tests/qapi-schema/flat-union-invalid-if-discriminator.json b/tests/qapi-schema/union-invalid-if-discriminator.json similarity index 100% rename from tests/qapi-schema/flat-union-invalid-if-discriminator.json rename to tests/qapi-schema/union-invalid-if-discriminator.json diff --git a/tests/qapi-schema/reserved-type-kind.out b/tests/qapi-schema/union-invalid-if-discriminator.out similarity index 100% rename from tests/qapi-schema/reserved-type-kind.out rename to tests/qapi-schema/union-invalid-if-discriminator.out diff --git a/tests/qapi-schema/union-no-base.err b/tests/qapi-schema/union-no-base.err new file mode 100644 index 0000000000..cbf12ac526 --- /dev/null +++ b/tests/qapi-schema/union-no-base.err @@ -0,0 +1,2 @@ +union-no-base.json: In union 'TestUnion': +union-no-base.json:8: union misses key 'base' diff --git a/tests/qapi-schema/flat-union-no-base.json b/tests/qapi-schema/union-no-base.json similarity index 90% rename from tests/qapi-schema/flat-union-no-base.json rename to tests/qapi-schema/union-no-base.json index 327877b563..f6fe12da3b 100644 --- a/tests/qapi-schema/flat-union-no-base.json +++ b/tests/qapi-schema/union-no-base.json @@ -1,4 +1,4 @@ -# flat unions require a base +# unions require a base { 'struct': 'TestTypeA', 'data': { 'string': 'str' } } { 'struct': 'TestTypeB', diff --git a/tests/qapi-schema/union-branch-case.out b/tests/qapi-schema/union-no-base.out similarity index 100% rename from tests/qapi-schema/union-branch-case.out rename to tests/qapi-schema/union-no-base.out diff --git a/tests/qapi-schema/union-optional-branch.err b/tests/qapi-schema/union-optional-branch.err deleted file mode 100644 index b33f111de4..0000000000 --- a/tests/qapi-schema/union-optional-branch.err +++ /dev/null @@ -1,2 +0,0 @@ -union-optional-branch.json: In union 'Union': -union-optional-branch.json:2: 'data' member '*a' has an invalid name diff --git a/tests/qapi-schema/union-optional-branch.json b/tests/qapi-schema/union-optional-branch.json deleted file mode 100644 index 591615fc68..0000000000 --- a/tests/qapi-schema/union-optional-branch.json +++ /dev/null @@ -1,2 +0,0 @@ -# union branches cannot be optional -{ 'union': 'Union', 'data': { '*a': 'int', 'b': 'str' } } diff --git a/tests/qapi-schema/union-optional-discriminator.err b/tests/qapi-schema/union-optional-discriminator.err new file mode 100644 index 0000000000..8d980bd2ac --- /dev/null +++ b/tests/qapi-schema/union-optional-discriminator.err @@ -0,0 +1,2 @@ +union-optional-discriminator.json: In union 'MyUnion': +union-optional-discriminator.json:6: discriminator member 'switch' of base type 'Base' must not be optional diff --git a/tests/qapi-schema/flat-union-optional-discriminator.json b/tests/qapi-schema/union-optional-discriminator.json similarity index 100% rename from tests/qapi-schema/flat-union-optional-discriminator.json rename to tests/qapi-schema/union-optional-discriminator.json diff --git a/tests/qapi-schema/union-clash-branches.out b/tests/qapi-schema/union-optional-discriminator.out similarity index 100% rename from tests/qapi-schema/union-clash-branches.out rename to tests/qapi-schema/union-optional-discriminator.out diff --git a/tests/qapi-schema/union-string-discriminator.err b/tests/qapi-schema/union-string-discriminator.err new file mode 100644 index 0000000000..eccbe681bd --- /dev/null +++ b/tests/qapi-schema/union-string-discriminator.err @@ -0,0 +1,2 @@ +union-string-discriminator.json: In union 'TestUnion': +union-string-discriminator.json:13: discriminator member 'kind' of base type 'TestBase' must be of enum type diff --git a/tests/qapi-schema/flat-union-string-discriminator.json b/tests/qapi-schema/union-string-discriminator.json similarity index 100% rename from tests/qapi-schema/flat-union-string-discriminator.json rename to tests/qapi-schema/union-string-discriminator.json diff --git a/tests/qapi-schema/union-optional-branch.out b/tests/qapi-schema/union-string-discriminator.out similarity index 100% rename from tests/qapi-schema/union-optional-branch.out rename to tests/qapi-schema/union-string-discriminator.out diff --git a/tests/qapi-schema/union-unknown.err b/tests/qapi-schema/union-unknown.err index 7aba9f94da..dad79beae0 100644 --- a/tests/qapi-schema/union-unknown.err +++ b/tests/qapi-schema/union-unknown.err @@ -1,2 +1,2 @@ union-unknown.json: In union 'Union': -union-unknown.json:2: union uses unknown type 'MissingType' +union-unknown.json:3: branch 'unknown' uses unknown type 'MissingType' diff --git a/tests/qapi-schema/union-unknown.json b/tests/qapi-schema/union-unknown.json index 64d3666176..4736f1ab08 100644 --- a/tests/qapi-schema/union-unknown.json +++ b/tests/qapi-schema/union-unknown.json @@ -1,3 +1,6 @@ # we reject a union with unknown type in branch +{ 'enum': 'Enum', 'data': [ 'unknown' ] } { 'union': 'Union', - 'data': { 'unknown': ['MissingType'] } } + 'base': { 'type': 'Enum' }, + 'discriminator': 'type', + 'data': { 'unknown': 'MissingType' } } diff --git a/tests/qemu-iotests/025 b/tests/qemu-iotests/025 index 80686a30d5..5771ea9200 100755 --- a/tests/qemu-iotests/025 +++ b/tests/qemu-iotests/025 @@ -20,7 +20,7 @@ # # creator -owner=stefanha@linux.vnet.ibm.com +owner=stefanha@redhat.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/027 b/tests/qemu-iotests/027 index b279c88f33..24c93627bb 100755 --- a/tests/qemu-iotests/027 +++ b/tests/qemu-iotests/027 @@ -20,7 +20,7 @@ # # creator -owner=stefanha@linux.vnet.ibm.com +owner=stefanha@redhat.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/028 b/tests/qemu-iotests/028 index 8c391f2adc..2b232c4614 100755 --- a/tests/qemu-iotests/028 +++ b/tests/qemu-iotests/028 @@ -23,7 +23,7 @@ # # creator -owner=stefanha@linux.vnet.ibm.com +owner=stefanha@redhat.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index 5fb65b4bef..98595d47fe 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -64,16 +64,18 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), - qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, + 'image file map does not match backing file after streaming') def test_stream_intermediate(self): self.assert_no_active_block_jobs() - self.assertNotEqual(qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img), - qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img), - 'image file map matches backing file before streaming') + self.assertNotEqual( + qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img).stdout, + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img).stdout, + 'image file map matches backing file before streaming') result = self.vm.qmp('block-stream', device='mid', job_id='stream-mid') self.assert_qmp(result, 'return', {}) @@ -83,9 +85,10 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), - qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout, + 'image file map does not match backing file after streaming') def test_stream_pause(self): self.assert_no_active_block_jobs() @@ -113,15 +116,17 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), - qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, + 'image file map does not match backing file after streaming') def test_stream_no_op(self): self.assert_no_active_block_jobs() # The image map is empty before the operation - empty_map = qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', test_img) + empty_map = qemu_io( + '-f', iotests.imgfmt, '-rU', '-c', 'map', test_img).stdout # This is a no-op: no data should ever be copied from the base image result = self.vm.qmp('block-stream', device='drive0', base=mid_img) @@ -132,8 +137,9 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), - empty_map, 'image file map changed after a no-op') + self.assertEqual( + qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, + empty_map, 'image file map changed after a no-op') def test_stream_partial(self): self.assert_no_active_block_jobs() @@ -146,9 +152,10 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img), - qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, + 'image file map does not match backing file after streaming') def test_device_not_found(self): result = self.vm.qmp('block-stream', device='nonexistent') @@ -236,9 +243,10 @@ class TestParallelOps(iotests.QMPTestCase): # Check that the maps don't match before the streaming operations for i in range(2, self.num_imgs, 2): - self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]), - qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]), - 'image file map matches backing file before streaming') + self.assertNotEqual( + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]).stdout, + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]).stdout, + 'image file map matches backing file before streaming') # Create all streaming jobs pending_jobs = [] @@ -251,7 +259,16 @@ class TestParallelOps(iotests.QMPTestCase): speed=1024) self.assert_qmp(result, 'return', {}) - for job in pending_jobs: + # Do this in reverse: After unthrottling them, some jobs may finish + # before we have unthrottled all of them. This will drain their + # subgraph, and this will make jobs above them advance (despite those + # jobs on top being throttled). In the worst case, all jobs below the + # top one are finished before we can unthrottle it, and this makes it + # advance so far that it completes before we can unthrottle it - which + # results in an error. + # Starting from the top (i.e. in reverse) does not have this problem: + # When a job finishes, the ones below it are not advanced. + for job in reversed(pending_jobs): result = self.vm.qmp('block-job-set-speed', device=job, speed=0) self.assert_qmp(result, 'return', {}) @@ -269,9 +286,10 @@ class TestParallelOps(iotests.QMPTestCase): # Check that all maps match now for i in range(2, self.num_imgs, 2): - self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), - qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]).stdout, + 'image file map does not match backing file after streaming') # Test that it's not possible to perform two block-stream # operations if there are nodes involved in both. @@ -427,6 +445,11 @@ class TestParallelOps(iotests.QMPTestCase): self.vm.run_job(job='node4', auto_dismiss=True) self.assert_no_active_block_jobs() + # Assert that node0 is now the backing node of node4 + result = self.vm.qmp('query-named-block-nodes') + node4 = next(node for node in result['return'] if node['node-name'] == 'node4') + self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0]) + # Test a block-stream and a block-commit job in parallel # Here the stream job is supposed to finish quickly in order to reproduce # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507 @@ -500,9 +523,10 @@ class TestParallelOps(iotests.QMPTestCase): def test_stream_base_node_name(self): self.assert_no_active_block_jobs() - self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]), - qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]), - 'image file map matches backing file before streaming') + self.assertNotEqual( + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]).stdout, + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]).stdout, + 'image file map matches backing file before streaming') # Error: the base node does not exist result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream') @@ -533,9 +557,10 @@ class TestParallelOps(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]), - qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]), - 'image file map matches backing file after streaming') + self.assertEqual( + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]).stdout, + 'image file map matches backing file after streaming') class TestQuorum(iotests.QMPTestCase): num_children = 3 @@ -574,9 +599,10 @@ class TestQuorum(iotests.QMPTestCase): os.remove(img) def test_stream_quorum(self): - self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]), - qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]), - 'image file map matches backing file before streaming') + self.assertNotEqual( + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]).stdout, + qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]).stdout, + 'image file map matches backing file before streaming') self.assert_no_active_block_jobs() @@ -588,9 +614,10 @@ class TestQuorum(iotests.QMPTestCase): self.assert_no_active_block_jobs() self.vm.shutdown() - self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]), - qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]), - 'image file map does not match backing file after streaming') + self.assertEqual( + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]).stdout, + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]).stdout, + 'image file map does not match backing file after streaming') class TestSmallerBackingFile(iotests.QMPTestCase): backing_len = 1 * 1024 * 1024 # MB @@ -710,7 +737,8 @@ class TestEIO(TestErrors): if result == {'return': []}: # Job finished too quickly continue - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], + ['running', 'pending', 'aborting', 'concluded']) elif event['event'] == 'BLOCK_JOB_COMPLETED': self.assertTrue(error, 'job completed unexpectedly') self.assert_qmp(event, 'data/type', 'stream') @@ -740,8 +768,14 @@ class TestEIO(TestErrors): self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'read') + if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': + self.vm.events_wait([( + 'JOB_STATUS_CHANGE', + {'data': {'id': 'drive0', 'status': 'paused'}} + )]) + result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', True) + self.assert_qmp(result, 'return[0]/status', 'paused') self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) self.assert_qmp(result, 'return[0]/io-status', 'failed') @@ -752,7 +786,8 @@ class TestEIO(TestErrors): if result == {'return': []}: # Race; likely already finished. Check. continue - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], + ['running', 'pending', 'aborting', 'concluded']) self.assert_qmp(result, 'return[0]/io-status', 'ok') elif event['event'] == 'BLOCK_JOB_COMPLETED': self.assertTrue(error, 'job completed unexpectedly') @@ -829,8 +864,14 @@ class TestENOSPC(TestErrors): self.assert_qmp(event, 'data/operation', 'read') error = True + if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': + self.vm.events_wait([( + 'JOB_STATUS_CHANGE', + {'data': {'id': 'drive0', 'status': 'paused'}} + )]) + result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', True) + self.assert_qmp(result, 'return[0]/status', 'paused') self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) self.assert_qmp(result, 'return[0]/io-status', 'nospace') @@ -841,7 +882,8 @@ class TestENOSPC(TestErrors): if result == {'return': []}: # Race; likely already finished. Check. continue - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], + ['running', 'pending', 'aborting', 'concluded']) self.assert_qmp(result, 'return[0]/io-status', 'ok') elif event['event'] == 'BLOCK_JOB_COMPLETED': self.assertTrue(error, 'job completed unexpectedly') diff --git a/tests/qemu-iotests/031 b/tests/qemu-iotests/031 index 58b57a0ef2..ee587b1606 100755 --- a/tests/qemu-iotests/031 +++ b/tests/qemu-iotests/031 @@ -42,8 +42,9 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt qcow2 _supported_proto file fuse # We want to test compat=0.10, which does not support external data -# files or refcount widths other than 16 -_unsupported_imgopts data_file 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' +# files or refcount widths other than 16 or compression type +_unsupported_imgopts data_file compression_type \ + 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' CLUSTER_SIZE=65536 @@ -58,21 +59,21 @@ for compat in "compat=0.10" "compat=1.1"; do echo _make_test_img -o $compat 64M $PYTHON qcow2.py "$TEST_IMG" add-header-ext 0x12345678 "This is a test header extension" - $PYTHON qcow2.py "$TEST_IMG" dump-header + _qcow2_dump_header _check_test_img echo echo === Rewrite header with no backing file === echo $QEMU_IMG rebase -u -b "" "$TEST_IMG" - $PYTHON qcow2.py "$TEST_IMG" dump-header + _qcow2_dump_header _check_test_img echo echo === Add a backing file and format === echo $QEMU_IMG rebase -u -b "/some/backing/file/path" -F host_device "$TEST_IMG" - $PYTHON qcow2.py "$TEST_IMG" dump-header + _qcow2_dump_header done # success, all done diff --git a/tests/qemu-iotests/036 b/tests/qemu-iotests/036 index 5e567012a8..16a401985c 100755 --- a/tests/qemu-iotests/036 +++ b/tests/qemu-iotests/036 @@ -23,7 +23,7 @@ # # creator -owner=stefanha@linux.vnet.ibm.com +owner=stefanha@redhat.com seq=`basename $0` echo "QA output created by $seq" @@ -58,7 +58,7 @@ $PYTHON qcow2.py "$TEST_IMG" set-feature-bit incompatible 63 # Without feature table $PYTHON qcow2.py "$TEST_IMG" del-header-ext 0x6803f857 -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep features +_qcow2_dump_header | grep features $PYTHON qcow2.py "$TEST_IMG" dump-header-exts _img_info @@ -107,7 +107,7 @@ echo === Create image with unknown autoclear feature bit === echo _make_test_img 64M $PYTHON qcow2.py "$TEST_IMG" set-feature-bit autoclear 63 -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep features +_qcow2_dump_header | grep features $PYTHON qcow2.py "$TEST_IMG" dump-header-exts echo @@ -115,7 +115,7 @@ echo === Repair image === echo _check_test_img -r all -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep features +_qcow2_dump_header | grep features $PYTHON qcow2.py "$TEST_IMG" dump-header-exts # success, all done diff --git a/tests/qemu-iotests/039 b/tests/qemu-iotests/039 index 12b2c7fa7b..e43e7026ce 100755 --- a/tests/qemu-iotests/039 +++ b/tests/qemu-iotests/039 @@ -23,7 +23,7 @@ # # creator -owner=stefanha@linux.vnet.ibm.com +owner=stefanha@redhat.com seq=`basename $0` echo "QA output created by $seq" @@ -59,7 +59,7 @@ _make_test_img -o "compat=1.1,lazy_refcounts=on" $size $QEMU_IO -c "write -P 0x5a 0 512" "$TEST_IMG" | _filter_qemu_io # The dirty bit must not be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img echo @@ -73,7 +73,7 @@ $QEMU_IO -c "write -P 0x5a 0 512" \ | _filter_qemu_io # The dirty bit must be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img echo @@ -82,7 +82,7 @@ echo "== Read-only access must still work ==" $QEMU_IO -r -c "read -P 0x5a 0 512" "$TEST_IMG" | _filter_qemu_io # The dirty bit must be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features echo echo "== Repairing the image file must succeed ==" @@ -90,7 +90,7 @@ echo "== Repairing the image file must succeed ==" _check_test_img -r all # The dirty bit must not be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features echo echo "== Data should still be accessible after repair ==" @@ -108,12 +108,12 @@ $QEMU_IO -c "write -P 0x5a 0 512" \ | _filter_qemu_io # The dirty bit must be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features $QEMU_IO -c "write 0 512" "$TEST_IMG" | _filter_qemu_io # The dirty bit must not be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features echo echo "== Creating an image file with lazy_refcounts=off ==" @@ -126,7 +126,7 @@ $QEMU_IO -c "write -P 0x5a 0 512" \ | _filter_qemu_io # The dirty bit must not be set since lazy_refcounts=off -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img echo @@ -141,8 +141,8 @@ $QEMU_IO -c "write 0 512" "$TEST_IMG" | _filter_qemu_io $QEMU_IMG commit "$TEST_IMG" # The dirty bit must not be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features -$PYTHON qcow2.py "$TEST_IMG".base dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features +_qcow2_dump_header "$TEST_IMG".base | grep incompatible_features _check_test_img TEST_IMG="$TEST_IMG".base _check_test_img @@ -159,7 +159,7 @@ $QEMU_IO -c "reopen -o lazy-refcounts=on" \ | _filter_qemu_io # The dirty bit must be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img _make_test_img -o "compat=1.1,lazy_refcounts=on" $size @@ -171,7 +171,7 @@ $QEMU_IO -c "reopen -o lazy-refcounts=off" \ | _filter_qemu_io # The dirty bit must not be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040 index f3677de9df..30eb97829e 100755 --- a/tests/qemu-iotests/040 +++ b/tests/qemu-iotests/040 @@ -86,29 +86,34 @@ class TestSingleDrive(ImageCommitTestCase): qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % mid_img, '-F', iotests.imgfmt, test_img) - qemu_io('-f', 'raw', '-c', 'write -P 0xab 0 524288', backing_img) - qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0xef 524288 524288', mid_img) + if self.image_len: + qemu_io('-f', 'raw', '-c', 'write -P 0xab 0 524288', backing_img) + qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0xef 524288 524288', + mid_img) self.vm = iotests.VM().add_drive(test_img, "node-name=top,backing.node-name=mid,backing.backing.node-name=base", interface="none") self.vm.add_device('virtio-scsi') self.vm.add_device("scsi-hd,id=scsi0,drive=drive0") self.vm.launch() - self.has_quit = False def tearDown(self): - self.vm.shutdown(has_quit=self.has_quit) + self.vm.shutdown() os.remove(test_img) os.remove(mid_img) os.remove(backing_img) def test_commit(self): self.run_commit_test(mid_img, backing_img) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) + if not self.image_len: + return + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img) def test_commit_node(self): self.run_commit_test("mid", "base", node_names=True) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) + if not self.image_len: + return + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img) @iotests.skip_if_unsupported(['throttle']) def test_commit_with_filter_and_quit(self): @@ -127,8 +132,6 @@ class TestSingleDrive(ImageCommitTestCase): result = self.vm.qmp('quit') self.assert_qmp(result, 'return', {}) - self.has_quit = True - # Same as above, but this time we add the filter after starting the job @iotests.skip_if_unsupported(['throttle']) def test_commit_plus_filter_and_quit(self): @@ -147,8 +150,6 @@ class TestSingleDrive(ImageCommitTestCase): result = self.vm.qmp('quit') self.assert_qmp(result, 'return', {}) - self.has_quit = True - def test_device_not_found(self): result = self.vm.qmp('block-commit', device='nonexistent', top='%s' % mid_img) self.assert_qmp(result, 'error/class', 'DeviceNotFound') @@ -197,13 +198,17 @@ class TestSingleDrive(ImageCommitTestCase): def test_top_is_active(self): self.run_commit_test(test_img, backing_img, need_ready=True) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) + if not self.image_len: + return + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img) def test_top_is_default_active(self): self.run_default_commit_test() - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) + if not self.image_len: + return + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img) def test_top_and_base_reversed(self): self.assert_no_active_block_jobs() @@ -339,8 +344,8 @@ class TestRelativePaths(ImageCommitTestCase): def test_commit(self): self.run_commit_test(self.mid_img, self.backing_img) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', self.backing_img_abs).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', self.backing_img_abs).find("verification failed")) + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', self.backing_img_abs) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', self.backing_img_abs) def test_device_not_found(self): result = self.vm.qmp('block-commit', device='nonexistent', top='%s' % self.mid_img) @@ -366,8 +371,8 @@ class TestRelativePaths(ImageCommitTestCase): def test_top_is_active(self): self.run_commit_test(self.test_img, self.backing_img) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', self.backing_img_abs).find("verification failed")) - self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', self.backing_img_abs).find("verification failed")) + qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', self.backing_img_abs) + qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', self.backing_img_abs) def test_top_and_base_reversed(self): self.assert_no_active_block_jobs() @@ -743,12 +748,12 @@ class TestCommitWithFilters(iotests.QMPTestCase): def do_test_io(self, read_or_write): for index, pattern_file in enumerate(self.pattern_files): - result = qemu_io('-f', iotests.imgfmt, - '-c', - f'{read_or_write} -P {index + 1} {index}M 1M', - pattern_file) - self.assertFalse('Pattern verification failed' in result) + qemu_io('-f', iotests.imgfmt, + '-c', + f'{read_or_write} -P {index + 1} {index}M 1M', + pattern_file) + @iotests.skip_if_unsupported(['throttle']) def setUp(self): qemu_img('create', '-f', iotests.imgfmt, self.img0, '64M') qemu_img('create', '-f', iotests.imgfmt, self.img1, '64M') @@ -831,7 +836,8 @@ class TestCommitWithFilters(iotests.QMPTestCase): job_id='commit', device='top-filter', top_node='cow-2', - base_node='cow-1') + base_node='cow-1', + backing_file=self.img1) self.assert_qmp(result, 'return', {}) self.wait_until_completed(drive='commit') @@ -847,7 +853,8 @@ class TestCommitWithFilters(iotests.QMPTestCase): job_id='commit', device='top-filter', top_node='cow-1', - base_node='cow-0') + base_node='cow-0', + backing_file=self.img0) self.assert_qmp(result, 'return', {}) self.wait_until_completed(drive='commit') diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041 index db9f5dc540..8429958bf0 100755 --- a/tests/qemu-iotests/041 +++ b/tests/qemu-iotests/041 @@ -24,7 +24,7 @@ import os import re import json import iotests -from iotests import qemu_img, qemu_img_pipe, qemu_io +from iotests import qemu_img, qemu_img_map, qemu_io backing_img = os.path.join(iotests.test_dir, 'backing.img') target_backing_img = os.path.join(iotests.test_dir, 'target-backing.img') @@ -529,7 +529,7 @@ new_state = "1" self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'read') result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], ['running', 'ready']) self.complete_and_wait() def test_large_cluster(self): @@ -555,7 +555,7 @@ new_state = "1" self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'read') result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], ['running', 'ready']) self.complete_and_wait() self.vm.shutdown() @@ -580,8 +580,14 @@ new_state = "1" self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'read') + if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': + self.vm.events_wait([( + 'JOB_STATUS_CHANGE', + {'data': {'id': 'drive0', 'status': 'paused'}} + )]) + result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', True) + self.assert_qmp(result, 'return[0]/status', 'paused') self.assert_qmp(result, 'return[0]/io-status', 'failed') result = self.vm.qmp('block-job-resume', device='drive0') @@ -593,7 +599,7 @@ new_state = "1" ready = True result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', False) + self.assert_qmp(result, 'return[0]/status', 'ready') self.assert_qmp(result, 'return[0]/io-status', 'ok') self.complete_and_wait(wait_ready=False) @@ -686,7 +692,7 @@ new_state = "1" self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'write') result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], ['running', 'ready']) self.complete_and_wait() def test_stop_write(self): @@ -705,15 +711,21 @@ new_state = "1" self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/operation', 'write') + if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': + self.vm.events_wait([( + 'JOB_STATUS_CHANGE', + {'data': {'id': 'drive0', 'status': 'paused'}} + )]) + result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', True) + self.assert_qmp(result, 'return[0]/status', 'paused') self.assert_qmp(result, 'return[0]/io-status', 'failed') result = self.vm.qmp('block-job-resume', device='drive0') self.assert_qmp(result, 'return', {}) result = self.vm.qmp('query-block-jobs') - self.assert_qmp(result, 'return[0]/paused', False) + self.assertIn(result['return'][0]['status'], ['running', 'ready']) self.assert_qmp(result, 'return[0]/io-status', 'ok') error = True elif event['event'] == 'BLOCK_JOB_READY': @@ -1360,8 +1372,7 @@ class TestFilters(iotests.QMPTestCase): self.vm.qmp('blockdev-del', node_name='target') - target_map = qemu_img_pipe('map', '--output=json', target_img) - target_map = json.loads(target_map) + target_map = qemu_img_map(target_img) assert target_map[0]['start'] == 0 assert target_map[0]['length'] == 512 * 1024 diff --git a/tests/qemu-iotests/044 b/tests/qemu-iotests/044 index 64b18eb7c8..a5ee9a7ded 100755 --- a/tests/qemu-iotests/044 +++ b/tests/qemu-iotests/044 @@ -24,7 +24,7 @@ import os import qcow2 from qcow2 import QcowHeader import iotests -from iotests import qemu_img, qemu_img_verbose, qemu_io +from iotests import qemu_img, qemu_img_log, qemu_io import struct import subprocess import sys @@ -112,9 +112,11 @@ class TestRefcountTableGrowth(iotests.QMPTestCase): def test_grow_refcount_table(self): qemu_io('-c', 'write 3800M 1M', test_img) - qemu_img_verbose('check' , test_img) + qemu_img_log('check' , test_img) pass if __name__ == '__main__': + iotests.activate_logging() iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['refcount_bits']) diff --git a/tests/qemu-iotests/044.out b/tests/qemu-iotests/044.out index 703cf3dee1..ff663b17d7 100644 --- a/tests/qemu-iotests/044.out +++ b/tests/qemu-iotests/044.out @@ -1,6 +1,7 @@ No errors were found on the image. 7292415/33554432 = 21.73% allocated, 0.00% fragmented, 0.00% compressed clusters Image end offset: 4296217088 + . ---------------------------------------------------------------------- Ran 1 tests diff --git a/tests/qemu-iotests/049.out b/tests/qemu-iotests/049.out index 01f7b1f240..8719c91b48 100644 --- a/tests/qemu-iotests/049.out +++ b/tests/qemu-iotests/049.out @@ -174,11 +174,11 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp qemu-img create -f qcow2 -o compat=0.42 TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 compat=0.42 lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter '0.42' +qemu-img: TEST_DIR/t.qcow2: Parameter 'version' does not accept value '0.42' qemu-img create -f qcow2 -o compat=foobar TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 compat=foobar lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter 'foobar' +qemu-img: TEST_DIR/t.qcow2: Parameter 'version' does not accept value 'foobar' == Check preallocation option == @@ -190,7 +190,7 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off prea qemu-img create -f qcow2 -o preallocation=1234 TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off preallocation=1234 compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter '1234' +qemu-img: TEST_DIR/t.qcow2: Parameter 'preallocation' does not accept value '1234' == Check encryption option == diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051 index 7bf29343d7..4c079b11e3 100755 --- a/tests/qemu-iotests/051 +++ b/tests/qemu-iotests/051 @@ -41,10 +41,15 @@ _supported_fmt qcow2 _supported_proto file # A compat=0.10 image is created in this test which does not support anything # other than refcount_bits=16; -# it also will not support an external data file -_unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file +# it also will not support an external data file and compression type +_unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file \ + compression_type _require_drivers nbd +if [ "$QEMU_DEFAULT_MACHINE" = "pc" ]; then + _require_devices lsi53c895a +fi + do_run_qemu() { echo Testing: "$@" @@ -199,7 +204,7 @@ case "$QEMU_DEFAULT_MACHINE" in # virtio-blk enables the iothread only when the driver initialises the # device, so a second virtio-blk device can't be added even with the # same iothread. virtio-scsi allows this. - run_qemu $iothread -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on + run_qemu $iothread -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on run_qemu $iothread -device virtio-scsi,id=virtio-scsi1,iothread=thread0 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on ;; *) @@ -370,7 +375,8 @@ if [ "${VALGRIND_QEMU_VM}" == "y" ]; then _casenotrun "Valgrind needs a valid TMPDIR for itself" fi VALGRIND_QEMU_VM= \ -TMPDIR=/nonexistent run_qemu -drive driver=null-co,snapshot=on +TMPDIR=/nonexistent run_qemu -drive driver=null-co,snapshot=on | + sed -e "s#'[^']*/vl\.[A-Za-z0-9]\{6\}'#SNAPSHOT_PATH#g" # Using snapshot=on together with read-only=on echo "info block" | diff --git a/tests/qemu-iotests/051.out b/tests/qemu-iotests/051.out index 441f83e41a..e5ddb03bda 100644 --- a/tests/qemu-iotests/051.out +++ b/tests/qemu-iotests/051.out @@ -459,7 +459,7 @@ wrote 4096/4096 bytes at offset 0 read 4096/4096 bytes at offset 0 4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) Testing: -drive driver=null-co,snapshot=on -QEMU_PROG: -drive driver=null-co,snapshot=on: Could not get temporary filename: No such file or directory +QEMU_PROG: -drive driver=null-co,snapshot=on: Could not open temporary file SNAPSHOT_PATH: No such file or directory Testing: -drive file=TEST_DIR/t.qcow2,snapshot=on,read-only=on,if=none,id=drive0 QEMU X.Y.Z monitor - type 'help' for more information diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out index afe7632964..bade1ff3b9 100644 --- a/tests/qemu-iotests/051.pc.out +++ b/tests/qemu-iotests/051.pc.out @@ -183,9 +183,9 @@ Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id QEMU X.Y.Z monitor - type 'help' for more information (qemu) QEMU_PROG: -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on: Cannot change iothread of active block backend -Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on +Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on: Cannot change iothread of active block backend +(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on: Cannot change iothread of active block backend Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-scsi,id=virtio-scsi1,iothread=thread0 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information @@ -539,7 +539,7 @@ wrote 4096/4096 bytes at offset 0 read 4096/4096 bytes at offset 0 4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) Testing: -drive driver=null-co,snapshot=on -QEMU_PROG: -drive driver=null-co,snapshot=on: Could not get temporary filename: No such file or directory +QEMU_PROG: -drive driver=null-co,snapshot=on: Could not open temporary file SNAPSHOT_PATH: No such file or directory Testing: -drive file=TEST_DIR/t.qcow2,snapshot=on,read-only=on,if=none,id=drive0 QEMU X.Y.Z monitor - type 'help' for more information diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056 index b459a3f1e8..bef865eec4 100755 --- a/tests/qemu-iotests/056 +++ b/tests/qemu-iotests/056 @@ -102,7 +102,7 @@ class TestSyncModesNoneAndTop(iotests.QMPTestCase): self.vm.shutdown() time.sleep(1) - self.assertEqual(-1, qemu_io('-c', 'read -P0x41 0 512', target_img).find("verification failed")) + qemu_io('-c', 'read -P0x41 0 512', target_img) class TestBeforeWriteNotifier(iotests.QMPTestCase): def setUp(self): diff --git a/tests/qemu-iotests/059 b/tests/qemu-iotests/059 index 65c0c32b26..e8be217e1f 100755 --- a/tests/qemu-iotests/059 +++ b/tests/qemu-iotests/059 @@ -20,7 +20,7 @@ # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/060 b/tests/qemu-iotests/060 index db26c6b246..5cd21a6f68 100755 --- a/tests/qemu-iotests/060 +++ b/tests/qemu-iotests/060 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" @@ -80,13 +80,13 @@ poke_file "$TEST_IMG" "$l1_offset" "\x80\x00\x00\x00\x00\x03\x00\x00" _check_test_img # The corrupt bit should not be set anyway -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features # Try to write something, thereby forcing the corrupt bit to be set $QEMU_IO -c "$OPEN_RW" -c "write -P 0x2a 0 512" | _filter_qemu_io # The corrupt bit must now be set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features # This information should be available through qemu-img info _img_info --format-specific @@ -114,19 +114,19 @@ poke_file "$TEST_IMG" "$(($rb_offset+8))" "\x00\x01" # Redirect new data cluster onto refcount block poke_file "$TEST_IMG" "$l2_offset" "\x80\x00\x00\x00\x00\x02\x00\x00" _check_test_img -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features $QEMU_IO -c "$OPEN_RW" -c "write -P 0x2a 0 512" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features # Try to fix it _check_test_img -r all # The corrupt bit should be cleared -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features # Look if it's really really fixed $QEMU_IO -c "$OPEN_RW" -c "write -P 0x2a 0 512" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features echo echo "=== Testing cluster data reference into inactive L2 table ===" @@ -139,13 +139,13 @@ $QEMU_IO -c "$OPEN_RW" -c "write -P 2 0 512" | _filter_qemu_io poke_file "$TEST_IMG" "$l2_offset_after_snapshot" \ "\x80\x00\x00\x00\x00\x04\x00\x00" _check_test_img -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features $QEMU_IO -c "$OPEN_RW" -c "write -P 3 0 512" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features _check_test_img -r all -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features $QEMU_IO -c "$OPEN_RW" -c "write -P 4 0 512" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header | grep incompatible_features # Check data $QEMU_IO -c "$OPEN_RO" -c "read -P 4 0 512" | _filter_qemu_io @@ -326,7 +326,7 @@ _make_test_img 64M # Let the refblock appear unaligned poke_file "$TEST_IMG" "$rt_offset" "\x00\x00\x00\x00\xff\xff\x2a\x00" # Mark the image dirty, thus forcing an automatic check when opening it -poke_file "$TEST_IMG" 72 "\x00\x00\x00\x00\x00\x00\x00\x01" +$PYTHON qcow2.py "$TEST_IMG" set-feature-bit incompatible 0 # Open the image (qemu should refuse to do so) $QEMU_IO -c close "$TEST_IMG" 2>&1 | _filter_testdir | _filter_imgfmt diff --git a/tests/qemu-iotests/060.out b/tests/qemu-iotests/060.out index b74540bafb..329977d9b9 100644 --- a/tests/qemu-iotests/060.out +++ b/tests/qemu-iotests/060.out @@ -17,7 +17,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: true diff --git a/tests/qemu-iotests/061 b/tests/qemu-iotests/061 index 9507c223bd..509ad247cd 100755 --- a/tests/qemu-iotests/061 +++ b/tests/qemu-iotests/061 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=`basename $0` echo "QA output created by $seq" @@ -48,16 +48,20 @@ _supported_os Linux # not work with it; # we have explicit tests for various cluster sizes, the remaining tests # require the default 64k cluster -_unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file cluster_size +# we don't have explicit tests for zstd qcow2 compression type, as zstd may be +# not compiled in. And we can't create compat images with comression type +# extension +_unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file \ + cluster_size compression_type echo echo "=== Testing version downgrade with zero expansion ===" echo _make_test_img -o "compat=1.1,lazy_refcounts=on" 64M $QEMU_IO -c "write -z 0 128k" "$TEST_IMG" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "compat=0.10" "$TEST_IMG" -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IO -c "read -P 0 0 128k" "$TEST_IMG" | _filter_qemu_io _check_test_img @@ -68,10 +72,10 @@ _make_test_img -o "compat=1.1,lazy_refcounts=on" 64M $QEMU_IO -c "write -z 0 128k" "$TEST_IMG" | _filter_qemu_io $QEMU_IO -c "write -z 32M 128k" "$TEST_IMG" | _filter_qemu_io $QEMU_IO -c map "$TEST_IMG" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "compat=0.10" --image-opts \ driver=qcow2,file.filename=$TEST_IMG,l2-cache-entry-size=4096 -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IO -c "read -P 0 0 128k" "$TEST_IMG" | _filter_qemu_io $QEMU_IO -c "read -P 0 32M 128k" "$TEST_IMG" | _filter_qemu_io $QEMU_IO -c map "$TEST_IMG" | _filter_qemu_io @@ -84,9 +88,9 @@ _make_test_img -o "compat=1.1,lazy_refcounts=on" 64M _NO_VALGRIND \ $QEMU_IO -c "write -P 0x2a 0 128k" -c flush \ -c "sigraise $(kill -l KILL)" "$TEST_IMG" 2>&1 | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "compat=0.10" "$TEST_IMG" -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IO -c "read -P 0x2a 0 128k" "$TEST_IMG" | _filter_qemu_io _check_test_img @@ -96,9 +100,9 @@ echo _make_test_img -o "compat=1.1" 64M $PYTHON qcow2.py "$TEST_IMG" set-feature-bit compatible 42 $PYTHON qcow2.py "$TEST_IMG" set-feature-bit autoclear 42 -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "compat=0.10" "$TEST_IMG" -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header _check_test_img echo @@ -106,9 +110,9 @@ echo "=== Testing version upgrade and resize ===" echo _make_test_img -o "compat=0.10" 64M $QEMU_IO -c "write -P 0x2a 42M 64k" "$TEST_IMG" | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "compat=1.1,lazy_refcounts=on,size=128M" "$TEST_IMG" -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IO -c "read -P 0x2a 42M 64k" "$TEST_IMG" | _filter_qemu_io _check_test_img @@ -120,29 +124,29 @@ $QEMU_IO -c "write -P 0x2a 24M 64k" "$TEST_IMG" | _filter_qemu_io $QEMU_IMG snapshot -c foo "$TEST_IMG" $QEMU_IMG resize "$TEST_IMG" 64M && echo "unexpected pass" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' $QEMU_IMG amend -o "compat=1.1,size=128M" "$TEST_IMG" || echo "unexpected fail" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' $QEMU_IMG snapshot -c bar "$TEST_IMG" $QEMU_IMG resize --shrink "$TEST_IMG" 64M || echo "unexpected fail" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' $QEMU_IMG amend -o "compat=0.10,size=32M" "$TEST_IMG" && echo "unexpected pass" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' $QEMU_IMG snapshot -a bar "$TEST_IMG" || echo "unexpected fail" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' $QEMU_IMG snapshot -d bar "$TEST_IMG" $QEMU_IMG amend -o "compat=0.10,size=32M" "$TEST_IMG" || echo "unexpected fail" -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)' +_qcow2_dump_header | grep '^\(version\|size\|nb_snap\)' _check_test_img @@ -154,9 +158,9 @@ _make_test_img -o "compat=1.1,lazy_refcounts=on" 64M _NO_VALGRIND \ $QEMU_IO -c "write -P 0x2a 0 128k" -c flush \ -c "sigraise $(kill -l KILL)" "$TEST_IMG" 2>&1 | _filter_qemu_io -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IMG amend -o "lazy_refcounts=off" "$TEST_IMG" -$PYTHON qcow2.py "$TEST_IMG" dump-header +_qcow2_dump_header $QEMU_IO -c "read -P 0x2a 0 128k" "$TEST_IMG" | _filter_qemu_io _check_test_img diff --git a/tests/qemu-iotests/061.out b/tests/qemu-iotests/061.out index 7ecbd4dea8..139fc68177 100644 --- a/tests/qemu-iotests/061.out +++ b/tests/qemu-iotests/061.out @@ -525,7 +525,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file: TEST_DIR/t.IMGFMT.data @@ -552,7 +552,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file: foo @@ -567,7 +567,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file raw: false @@ -583,7 +583,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file: TEST_DIR/t.IMGFMT.data @@ -597,7 +597,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file: TEST_DIR/t.IMGFMT.data @@ -612,7 +612,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 data file: TEST_DIR/t.IMGFMT.data diff --git a/tests/qemu-iotests/062 b/tests/qemu-iotests/062 index 321252298d..6a71bf1477 100755 --- a/tests/qemu-iotests/062 +++ b/tests/qemu-iotests/062 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/064 b/tests/qemu-iotests/064 index 71fc575b21..21e25cf39f 100755 --- a/tests/qemu-iotests/064 +++ b/tests/qemu-iotests/064 @@ -20,7 +20,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/065 b/tests/qemu-iotests/065 index 3c2ca27627..b724c89c7c 100755 --- a/tests/qemu-iotests/065 +++ b/tests/qemu-iotests/065 @@ -24,7 +24,7 @@ import os import re import json import iotests -from iotests import qemu_img, qemu_img_pipe +from iotests import qemu_img, qemu_img_info, supports_qcow2_zstd_compression import unittest test_img = os.path.join(iotests.test_dir, 'test.img') @@ -49,13 +49,12 @@ class TestQemuImgInfo(TestImageInfoSpecific): human_compare = None def test_json(self): - data = json.loads(qemu_img_pipe('info', '--output=json', test_img)) - data = data['format-specific'] + data = qemu_img_info(test_img)['format-specific'] self.assertEqual(data['type'], iotests.imgfmt) self.assertEqual(data['data'], self.json_compare) def test_human(self): - data = qemu_img_pipe('info', '--output=human', test_img).split('\n') + data = qemu_img('info', '--output=human', test_img).stdout.split('\n') data = data[(data.index('Format specific information:') + 1) :data.index('')] for field in data: @@ -88,7 +87,7 @@ class TestQMP(TestImageInfoSpecific): class TestQCow2(TestQemuImgInfo): '''Testing a qcow2 version 2 image''' - img_options = 'compat=0.10' + img_options = 'compat=0.10,compression_type=zlib' json_compare = { 'compat': '0.10', 'refcount-bits': 16, 'compression-type': 'zlib' } human_compare = [ 'compat: 0.10', 'compression type: zlib', @@ -96,17 +95,23 @@ class TestQCow2(TestQemuImgInfo): class TestQCow3NotLazy(TestQemuImgInfo): '''Testing a qcow2 version 3 image with lazy refcounts disabled''' + if supports_qcow2_zstd_compression(): + compression_type = 'zstd' + else: + compression_type = 'zlib' + img_options = 'compat=1.1,lazy_refcounts=off' + img_options += f',compression_type={compression_type}' json_compare = { 'compat': '1.1', 'lazy-refcounts': False, 'refcount-bits': 16, 'corrupt': False, - 'compression-type': 'zlib', 'extended-l2': False } - human_compare = [ 'compat: 1.1', 'compression type: zlib', + 'compression-type': compression_type, 'extended-l2': False } + human_compare = [ 'compat: 1.1', f'compression type: {compression_type}', 'lazy refcounts: false', 'refcount bits: 16', 'corrupt: false', 'extended l2: false' ] class TestQCow3Lazy(TestQemuImgInfo): '''Testing a qcow2 version 3 image with lazy refcounts enabled''' - img_options = 'compat=1.1,lazy_refcounts=on' + img_options = 'compat=1.1,lazy_refcounts=on,compression_type=zlib' json_compare = { 'compat': '1.1', 'lazy-refcounts': True, 'refcount-bits': 16, 'corrupt': False, 'compression-type': 'zlib', 'extended-l2': False } @@ -117,7 +122,7 @@ class TestQCow3Lazy(TestQemuImgInfo): class TestQCow3NotLazyQMP(TestQMP): '''Testing a qcow2 version 3 image with lazy refcounts disabled, opening with lazy refcounts enabled''' - img_options = 'compat=1.1,lazy_refcounts=off' + img_options = 'compat=1.1,lazy_refcounts=off,compression_type=zlib' qemu_options = 'lazy-refcounts=on' compare = { 'compat': '1.1', 'lazy-refcounts': False, 'refcount-bits': 16, 'corrupt': False, @@ -127,11 +132,17 @@ class TestQCow3NotLazyQMP(TestQMP): class TestQCow3LazyQMP(TestQMP): '''Testing a qcow2 version 3 image with lazy refcounts enabled, opening with lazy refcounts disabled''' + if supports_qcow2_zstd_compression(): + compression_type = 'zstd' + else: + compression_type = 'zlib' + img_options = 'compat=1.1,lazy_refcounts=on' + img_options += f',compression_type={compression_type}' qemu_options = 'lazy-refcounts=off' compare = { 'compat': '1.1', 'lazy-refcounts': True, 'refcount-bits': 16, 'corrupt': False, - 'compression-type': 'zlib', 'extended-l2': False } + 'compression-type': compression_type, 'extended-l2': False } TestImageInfoSpecific = None TestQemuImgInfo = None @@ -139,4 +150,5 @@ TestQMP = None if __name__ == '__main__': iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['refcount_bits']) diff --git a/tests/qemu-iotests/066 b/tests/qemu-iotests/066 index a780ed7ab5..cf63144cb9 100755 --- a/tests/qemu-iotests/066 +++ b/tests/qemu-iotests/066 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/068 b/tests/qemu-iotests/068 index 54e49c8ffa..7ecd247409 100755 --- a/tests/qemu-iotests/068 +++ b/tests/qemu-iotests/068 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/069 b/tests/qemu-iotests/069 index 222dcba741..6647e11861 100755 --- a/tests/qemu-iotests/069 +++ b/tests/qemu-iotests/069 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/070 b/tests/qemu-iotests/070 index b181e00f9b..edb71afbe3 100755 --- a/tests/qemu-iotests/070 +++ b/tests/qemu-iotests/070 @@ -21,7 +21,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/071 b/tests/qemu-iotests/071 index d99cef5a42..27bc7305bf 100755 --- a/tests/qemu-iotests/071 +++ b/tests/qemu-iotests/071 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/072 b/tests/qemu-iotests/072 index c492ab8a78..662ede961c 100755 --- a/tests/qemu-iotests/072 +++ b/tests/qemu-iotests/072 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/074 b/tests/qemu-iotests/074 index c32c94b50d..ee73e636b2 100755 --- a/tests/qemu-iotests/074 +++ b/tests/qemu-iotests/074 @@ -21,7 +21,7 @@ ## # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/082.out b/tests/qemu-iotests/082.out index 077ed0f2c7..d0dd333117 100644 --- a/tests/qemu-iotests/082.out +++ b/tests/qemu-iotests/082.out @@ -17,7 +17,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 4096 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false @@ -31,7 +31,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 8192 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false @@ -329,7 +329,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 4096 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false @@ -342,7 +342,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 8192 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false @@ -639,7 +639,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false @@ -652,7 +652,7 @@ virtual size: 130 MiB (136314880 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -665,7 +665,7 @@ virtual size: 132 MiB (138412032 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 16 corrupt: false diff --git a/tests/qemu-iotests/084 b/tests/qemu-iotests/084 index e51e91a7c8..1181cb7cd0 100755 --- a/tests/qemu-iotests/084 +++ b/tests/qemu-iotests/084 @@ -21,7 +21,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/085 b/tests/qemu-iotests/085 index d557522943..3fb7b0b5c8 100755 --- a/tests/qemu-iotests/085 +++ b/tests/qemu-iotests/085 @@ -25,7 +25,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" @@ -103,11 +103,18 @@ do_blockdev_add() } # ${1}: unique identifier for the snapshot filename -add_snapshot_image() +create_snapshot_image() { base_image="${TEST_DIR}/$((${1}-1))-${snapshot_virt0}" snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}" TEST_IMG=$snapshot_file _make_test_img -u -b "${base_image}" -F $IMGFMT "$size" +} + +# ${1}: unique identifier for the snapshot filename +add_snapshot_image() +{ + snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}" + create_snapshot_image "$1" do_blockdev_add "$1" "'backing': null, " "${snapshot_file}" } @@ -230,6 +237,28 @@ _make_test_img -b "${TEST_IMG}.base" -F $IMGFMT "$size" do_blockdev_add ${SNAPSHOTS} "" "${TEST_IMG}" blockdev_snapshot ${SNAPSHOTS} error +echo +echo === Invalid command - creating loops === +echo + +SNAPSHOTS=$((${SNAPSHOTS}+1)) +add_snapshot_image ${SNAPSHOTS} + +_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_${SNAPSHOTS}', + 'overlay':'snap_${SNAPSHOTS}' } + }" "error" + +SNAPSHOTS=$((${SNAPSHOTS}+1)) +create_snapshot_image ${SNAPSHOTS} +do_blockdev_add ${SNAPSHOTS} "'backing': 'snap_$((${SNAPSHOTS}-1))', " \ + "${TEST_DIR}/${SNAPSHOTS}-${snapshot_virt0}" + +_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_${SNAPSHOTS}', + 'overlay':'snap_$((${SNAPSHOTS}-1))' } + }" "error" + echo echo === Invalid command - The node does not exist === echo diff --git a/tests/qemu-iotests/085.out b/tests/qemu-iotests/085.out index 1d4c565b6d..b543b992ff 100644 --- a/tests/qemu-iotests/085.out +++ b/tests/qemu-iotests/085.out @@ -217,15 +217,42 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/ 'overlay':'snap_13' } } {"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}} +=== Invalid command - creating loops === + +Formatting 'TEST_DIR/14-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/13-snapshot-v0.IMGFMT backing_fmt=IMGFMT +{ 'execute': 'blockdev-add', 'arguments': + { 'driver': 'IMGFMT', 'node-name': 'snap_14', 'backing': null, + 'file': + { 'driver': 'file', 'filename': 'TEST_DIR/14-snapshot-v0.IMGFMT', + 'node-name': 'file_14' } } } +{"return": {}} +{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_14', + 'overlay':'snap_14' } + } +{"error": {"class": "GenericError", "desc": "Making 'snap_14' a backing child of 'snap_14' would create a cycle"}} +Formatting 'TEST_DIR/15-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/14-snapshot-v0.IMGFMT backing_fmt=IMGFMT +{ 'execute': 'blockdev-add', 'arguments': + { 'driver': 'IMGFMT', 'node-name': 'snap_15', 'backing': 'snap_14', + 'file': + { 'driver': 'file', 'filename': 'TEST_DIR/15-snapshot-v0.IMGFMT', + 'node-name': 'file_15' } } } +{"return": {}} +{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_15', + 'overlay':'snap_14' } + } +{"error": {"class": "GenericError", "desc": "Making 'snap_15' a backing child of 'snap_14' would create a cycle"}} + === Invalid command - The node does not exist === { 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', - 'overlay':'snap_14' } } -{"error": {"class": "GenericError", "desc": "Cannot find device='snap_14' nor node-name='snap_14'"}} + 'overlay':'snap_16' } } +{"error": {"class": "GenericError", "desc": "Cannot find device='snap_16' nor node-name='snap_16'"}} { 'execute': 'blockdev-snapshot', 'arguments': { 'node':'nodevice', - 'overlay':'snap_13' } + 'overlay':'snap_15' } } {"error": {"class": "GenericError", "desc": "Cannot find device='nodevice' nor node-name='nodevice'"}} *** done diff --git a/tests/qemu-iotests/089 b/tests/qemu-iotests/089 index 48bdc42e42..c68c5a66b9 100755 --- a/tests/qemu-iotests/089 +++ b/tests/qemu-iotests/089 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/090 b/tests/qemu-iotests/090 index 2044c09e9b..8f88eea9aa 100755 --- a/tests/qemu-iotests/090 +++ b/tests/qemu-iotests/090 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/091 b/tests/qemu-iotests/091 index 9d144b9439..e396748a91 100755 --- a/tests/qemu-iotests/091 +++ b/tests/qemu-iotests/091 @@ -22,7 +22,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/094 b/tests/qemu-iotests/094 index a295fb20ef..4766e9a458 100755 --- a/tests/qemu-iotests/094 +++ b/tests/qemu-iotests/094 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/095 b/tests/qemu-iotests/095 index 20b5f9bf61..d1d347eb1f 100755 --- a/tests/qemu-iotests/095 +++ b/tests/qemu-iotests/095 @@ -23,7 +23,7 @@ # along with this program. If not, see . # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/097 b/tests/qemu-iotests/097 index 30313f8867..93857f4fd0 100755 --- a/tests/qemu-iotests/097 +++ b/tests/qemu-iotests/097 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/098 b/tests/qemu-iotests/098 index 4c37eb0cf5..e3eadb3296 100755 --- a/tests/qemu-iotests/098 +++ b/tests/qemu-iotests/098 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/099 b/tests/qemu-iotests/099 index 2f1199ce04..a5d2d30931 100755 --- a/tests/qemu-iotests/099 +++ b/tests/qemu-iotests/099 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/102 b/tests/qemu-iotests/102 index 8b4c4c905f..141bfe1e90 100755 --- a/tests/qemu-iotests/102 +++ b/tests/qemu-iotests/102 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/103 b/tests/qemu-iotests/103 index 726f8313ef..bb9fd6f650 100755 --- a/tests/qemu-iotests/103 +++ b/tests/qemu-iotests/103 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/105 b/tests/qemu-iotests/105 index d804685110..b8f2029f62 100755 --- a/tests/qemu-iotests/105 +++ b/tests/qemu-iotests/105 @@ -20,7 +20,7 @@ # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/106 b/tests/qemu-iotests/106 index 333144502c..9d6adb542d 100755 --- a/tests/qemu-iotests/106 +++ b/tests/qemu-iotests/106 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/107 b/tests/qemu-iotests/107 index e68f1e07c7..3fabff2791 100755 --- a/tests/qemu-iotests/107 +++ b/tests/qemu-iotests/107 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/108 b/tests/qemu-iotests/108 index 8eaef0b8bf..54e935acf2 100755 --- a/tests/qemu-iotests/108 +++ b/tests/qemu-iotests/108 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" @@ -30,13 +30,20 @@ status=1 # failure is the default! _cleanup() { - _cleanup_test_img + _cleanup_test_img + if [ -f "$TEST_DIR/qsd.pid" ]; then + qsd_pid=$(cat "$TEST_DIR/qsd.pid") + kill -KILL "$qsd_pid" + fusermount -u "$TEST_DIR/fuse-export" &>/dev/null + fi + rm -f "$TEST_DIR/fuse-export" } trap "_cleanup; exit \$status" 0 1 2 3 15 # get standard environment, filters and checks . ./common.rc . ./common.filter +. ./common.qemu # This tests qcow2-specific low-level functionality _supported_fmt qcow2 @@ -47,6 +54,27 @@ _supported_os Linux # files _unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file +# This test either needs sudo -n losetup or FUSE exports to work +if sudo -n losetup &>/dev/null; then + loopdev=true +else + loopdev=false + + # Check for usable FUSE in the host environment: + if test ! -c "/dev/fuse"; then + _notrun 'No passwordless sudo nor usable /dev/fuse' + fi + + # QSD --export fuse will either yield "Parameter 'id' is missing" + # or "Invalid parameter 'fuse'", depending on whether there is + # FUSE support or not. + error=$($QSD --export fuse 2>&1) + if [[ $error = *"'fuse'"* ]]; then + _notrun 'Passwordless sudo for losetup or FUSE support required, but' \ + 'neither is available' + fi +fi + echo echo '=== Repairing an image without any refcount table ===' echo @@ -138,6 +166,240 @@ _make_test_img 64M poke_file "$TEST_IMG" $((0x10008)) "\xff\xff\xff\xff\xff\xff\x00\x00" _check_test_img -r all +echo +echo '=== Check rebuilt reftable location ===' + +# In an earlier version of the refcount rebuild algorithm, the +# reftable was generally placed at the image end (unless something was +# allocated in the area covered by the refblock right before the image +# file end, then we would try to place the reftable in that refblock). +# This was later changed so the reftable would be placed in the +# earliest possible location. Test this. + +echo +echo '--- Does the image size increase? ---' +echo + +# First test: Just create some image, write some data to it, and +# resize it so there is free space at the end of the image (enough +# that it spans at least one full refblock, which for cluster_size=512 +# images, spans 128k). With the old algorithm, the reftable would +# have then been placed at the end of the image file, but with the new +# one, it will be put in that free space. +# We want to check whether the size of the image file increases due to +# rebuilding the refcount structures (it should not). + +_make_test_img -o 'cluster_size=512' 1M +# Write something +$QEMU_IO -c 'write 0 64k' "$TEST_IMG" | _filter_qemu_io + +# Add free space +file_len=$(stat -c '%s' "$TEST_IMG") +truncate -s $((file_len + 256 * 1024)) "$TEST_IMG" + +# Corrupt the image by saying the image header was not allocated +rt_offset=$(peek_file_be "$TEST_IMG" 48 8) +rb_offset=$(peek_file_be "$TEST_IMG" $rt_offset 8) +poke_file "$TEST_IMG" $rb_offset "\x00\x00" + +# Check whether rebuilding the refcount structures increases the image +# file size +file_len=$(stat -c '%s' "$TEST_IMG") +echo +# The only leaks there can be are the old refcount structures that are +# leaked during rebuilding, no need to clutter the output with them +_check_test_img -r all | grep -v '^Repairing cluster.*refcount=1 reference=0' +echo +post_repair_file_len=$(stat -c '%s' "$TEST_IMG") + +if [[ $file_len -eq $post_repair_file_len ]]; then + echo 'OK: Image size did not change' +else + echo 'ERROR: Image size differs' \ + "($file_len before, $post_repair_file_len after)" +fi + +echo +echo '--- Will the reftable occupy a hole specifically left for it? ---' +echo + +# Note: With cluster_size=512, every refblock covers 128k. +# The reftable covers 8M per reftable cluster. + +# Create an image that requires two reftable clusters (just because +# this is more interesting than a single-clustered reftable). +_make_test_img -o 'cluster_size=512' 9M +$QEMU_IO -c 'write 0 8M' "$TEST_IMG" | _filter_qemu_io + +# Writing 8M will have resized the reftable. Unfortunately, doing so +# will leave holes in the file, so we need to fill them up so we can +# be sure the whole file is allocated. Do that by writing +# consecutively smaller chunks starting from 8 MB, until the file +# length increases even with a chunk size of 512. Then we must have +# filled all holes. +ofs=$((8 * 1024 * 1024)) +block_len=$((16 * 1024)) +while [[ $block_len -ge 512 ]]; do + file_len=$(stat -c '%s' "$TEST_IMG") + while [[ $(stat -c '%s' "$TEST_IMG") -eq $file_len ]]; do + # Do not include this in the reference output, it does not + # really matter which qemu-io calls we do here exactly + $QEMU_IO -c "write $ofs $block_len" "$TEST_IMG" >/dev/null + ofs=$((ofs + block_len)) + done + block_len=$((block_len / 2)) +done + +# Fill up to 9M (do not include this in the reference output either, +# $ofs is random for all we know) +$QEMU_IO -c "write $ofs $((9 * 1024 * 1024 - ofs))" "$TEST_IMG" >/dev/null + +# Make space as follows: +# - For the first refblock: Right at the beginning of the image (this +# refblock is placed in the first place possible), +# - For the reftable somewhere soon afterwards, still near the +# beginning of the image (i.e. covered by the first refblock); the +# reftable too is placed in the first place possible, but only after +# all refblocks have been placed) +# No space is needed for the other refblocks, because no refblock is +# put before the space it covers. In this test case, we do not mind +# if they are placed at the image file's end. + +# Before we make that space, we have to find out the host offset of +# the area that belonged to the two data clusters at guest offset 4k, +# because we expect the reftable to be placed there, and we will have +# to verify that it is. + +l1_offset=$(peek_file_be "$TEST_IMG" 40 8) +l2_offset=$(peek_file_be "$TEST_IMG" $l1_offset 8) +l2_offset=$((l2_offset & 0x00fffffffffffe00)) +data_4k_offset=$(peek_file_be "$TEST_IMG" \ + $((l2_offset + 4096 / 512 * 8)) 8) +data_4k_offset=$((data_4k_offset & 0x00fffffffffffe00)) + +$QEMU_IO -c "discard 0 512" -c "discard 4k 1k" "$TEST_IMG" | _filter_qemu_io + +# Corrupt the image by saying the image header was not allocated +rt_offset=$(peek_file_be "$TEST_IMG" 48 8) +rb_offset=$(peek_file_be "$TEST_IMG" $rt_offset 8) +poke_file "$TEST_IMG" $rb_offset "\x00\x00" + +echo +# The only leaks there can be are the old refcount structures that are +# leaked during rebuilding, no need to clutter the output with them +_check_test_img -r all | grep -v '^Repairing cluster.*refcount=1 reference=0' +echo + +# Check whether the reftable was put where we expected +rt_offset=$(peek_file_be "$TEST_IMG" 48 8) +if [[ $rt_offset -eq $data_4k_offset ]]; then + echo 'OK: Reftable is where we expect it' +else + echo "ERROR: Reftable is at $rt_offset, but was expected at $data_4k_offset" +fi + +echo +echo '--- Rebuilding refcount structures on block devices ---' +echo + +# A block device cannot really grow, at least not during qemu-img +# check. As mentioned in the above cases, rebuilding the refcount +# structure may lead to new refcount structures being written after +# the end of the image, and in the past that happened even if there +# was more than sufficient space in the image. Such post-EOF writes +# will not work on block devices, so test that the new algorithm +# avoids it. + +# If we have passwordless sudo and losetup, we can use those to create +# a block device. Otherwise, we can resort to qemu's FUSE export to +# create a file that isn't growable, which effectively tests the same +# thing. + +_cleanup_test_img +truncate -s $((64 * 1024 * 1024)) "$TEST_IMG" + +if $loopdev; then + export_mp=$(sudo -n losetup --show -f "$TEST_IMG") + export_mp_driver=host_device + sudo -n chmod go+rw "$export_mp" +else + # Create non-growable FUSE export that is a bit like an empty + # block device + export_mp="$TEST_DIR/fuse-export" + export_mp_driver=file + touch "$export_mp" + + $QSD \ + --blockdev file,node-name=export-node,filename="$TEST_IMG" \ + --export fuse,id=fuse-export,node-name=export-node,mountpoint="$export_mp",writable=on,growable=off,allow-other=off \ + --pidfile "$TEST_DIR/qsd.pid" \ + --daemonize +fi + +# Now create a qcow2 image on the device -- unfortunately, qemu-img +# create force-creates the file, so we have to resort to the +# blockdev-create job. +_launch_qemu \ + --blockdev $export_mp_driver,node-name=file,filename="$export_mp" + +_send_qemu_cmd \ + $QEMU_HANDLE \ + '{ "execute": "qmp_capabilities" }' \ + 'return' + +# Small cluster size again, so the image needs multiple refblocks +_send_qemu_cmd \ + $QEMU_HANDLE \ + '{ "execute": "blockdev-create", + "arguments": { + "job-id": "create", + "options": { + "driver": "qcow2", + "file": "file", + "size": '$((64 * 1024 * 1024))', + "cluster-size": 512 + } } }' \ + '"concluded"' + +_send_qemu_cmd \ + $QEMU_HANDLE \ + '{ "execute": "job-dismiss", "arguments": { "id": "create" } }' \ + 'return' + +_send_qemu_cmd \ + $QEMU_HANDLE \ + '{ "execute": "quit" }' \ + 'return' + +wait=y _cleanup_qemu +echo + +# Write some data +$QEMU_IO -c 'write 0 64k' "$export_mp" | _filter_qemu_io + +# Corrupt the image by saying the image header was not allocated +rt_offset=$(peek_file_be "$export_mp" 48 8) +rb_offset=$(peek_file_be "$export_mp" $rt_offset 8) +poke_file "$export_mp" $rb_offset "\x00\x00" + +# Repairing such a simple case should just work +# (We used to put the reftable at the end of the image file, which can +# never work for non-growable devices.) +echo +TEST_IMG="$export_mp" _check_test_img -r all \ + | grep -v '^Repairing cluster.*refcount=1 reference=0' + +if $loopdev; then + sudo -n losetup -d "$export_mp" +else + qsd_pid=$(cat "$TEST_DIR/qsd.pid") + kill -TERM "$qsd_pid" + # Wait for process to exit (cannot `wait` because the QSD is daemonized) + while [ -f "$TEST_DIR/qsd.pid" ]; do + true + done +fi + # success, all done echo '*** done' rm -f $seq.full diff --git a/tests/qemu-iotests/108.out b/tests/qemu-iotests/108.out index 75bab8dc84..b5401d788d 100644 --- a/tests/qemu-iotests/108.out +++ b/tests/qemu-iotests/108.out @@ -105,6 +105,87 @@ The following inconsistencies were found and repaired: 0 leaked clusters 1 corruptions +Double checking the fixed image now... +No errors were found on the image. + +=== Check rebuilt reftable location === + +--- Does the image size increase? --- + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +ERROR cluster 0 refcount=0 reference=1 +Rebuilding refcount structure +The following inconsistencies were found and repaired: + + 0 leaked clusters + 1 corruptions + +Double checking the fixed image now... +No errors were found on the image. + +OK: Image size did not change + +--- Will the reftable occupy a hole specifically left for it? --- + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=9437184 +wrote 8388608/8388608 bytes at offset 0 +8 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +discard 512/512 bytes at offset 0 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +discard 1024/1024 bytes at offset 4096 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +ERROR cluster 0 refcount=0 reference=1 +Rebuilding refcount structure +The following inconsistencies were found and repaired: + + 0 leaked clusters + 1 corruptions + +Double checking the fixed image now... +No errors were found on the image. + +OK: Reftable is where we expect it + +--- Rebuilding refcount structures on block devices --- + +{ "execute": "qmp_capabilities" } +{"return": {}} +{ "execute": "blockdev-create", + "arguments": { + "job-id": "create", + "options": { + "driver": "IMGFMT", + "file": "file", + "size": 67108864, + "cluster-size": 512 + } } } +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "create"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "create"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "create"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "create"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "create"}} +{ "execute": "job-dismiss", "arguments": { "id": "create" } } +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "create"}} +{"return": {}} +{ "execute": "quit" } +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} + +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +ERROR cluster 0 refcount=0 reference=1 +Rebuilding refcount structure +The following inconsistencies were found and repaired: + + 0 leaked clusters + 1 corruptions + Double checking the fixed image now... No errors were found on the image. *** done diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out index 8f839b4b7f..e29280015e 100644 --- a/tests/qemu-iotests/109.out +++ b/tests/qemu-iotests/109.out @@ -44,9 +44,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -95,9 +94,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -146,9 +144,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -197,9 +194,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -248,9 +244,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -299,9 +294,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -349,9 +343,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -399,9 +392,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -449,9 +441,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -499,9 +490,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -529,9 +519,8 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -552,9 +541,8 @@ Images are identical. {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. diff --git a/tests/qemu-iotests/110 b/tests/qemu-iotests/110 index 1fa36ccdb7..91b15f7513 100755 --- a/tests/qemu-iotests/110 +++ b/tests/qemu-iotests/110 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/111 b/tests/qemu-iotests/111 index 3ba25f6161..382dbf0606 100755 --- a/tests/qemu-iotests/111 +++ b/tests/qemu-iotests/111 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/112 b/tests/qemu-iotests/112 index 07ac74fb2c..a2ffc96e60 100755 --- a/tests/qemu-iotests/112 +++ b/tests/qemu-iotests/112 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" @@ -43,7 +43,8 @@ _supported_proto file fuse # This test will set refcount_bits on its own which would conflict with the # manual setting; compat will be overridden as well; # and external data files do not work well with our refcount testing -_unsupported_imgopts refcount_bits 'compat=0.10' data_file +# also, compression type is not supported with compat=0.10 used in test +_unsupported_imgopts refcount_bits 'compat=0.10' data_file compression_type print_refcount_bits() { diff --git a/tests/qemu-iotests/113 b/tests/qemu-iotests/113 index ee59b9a4b8..a3ad208fd7 100755 --- a/tests/qemu-iotests/113 +++ b/tests/qemu-iotests/113 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/115 b/tests/qemu-iotests/115 index 26dd37dd6d..7a24070caa 100755 --- a/tests/qemu-iotests/115 +++ b/tests/qemu-iotests/115 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/117 b/tests/qemu-iotests/117 index 48ebc012b1..6081473584 100755 --- a/tests/qemu-iotests/117 +++ b/tests/qemu-iotests/117 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/119 b/tests/qemu-iotests/119 index 5770b50045..6cac8793ba 100755 --- a/tests/qemu-iotests/119 +++ b/tests/qemu-iotests/119 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/120 b/tests/qemu-iotests/120 index 7187731253..ac7bd8c4e3 100755 --- a/tests/qemu-iotests/120 +++ b/tests/qemu-iotests/120 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/121 b/tests/qemu-iotests/121 index ba3d8d9377..f0dd1d1114 100755 --- a/tests/qemu-iotests/121 +++ b/tests/qemu-iotests/121 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122 index 5d550ed13e..be0f6b79e5 100755 --- a/tests/qemu-iotests/122 +++ b/tests/qemu-iotests/122 @@ -67,7 +67,7 @@ echo _make_test_img -b "$TEST_IMG".base -F $IMGFMT $QEMU_IO -c "write -P 0 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir -$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \ +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -F $IMGFMT \ "$TEST_IMG" "$TEST_IMG".orig $QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir $QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \ @@ -251,6 +251,7 @@ $QEMU_IO -c "write -P 0 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_test $QEMU_IO -c "write 0 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir $QEMU_IO -c "write 8k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir $QEMU_IO -c "write 17k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0 65k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir for min_sparse in 4k 8k; do echo diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out index 8fbdac2b39..e18766e167 100644 --- a/tests/qemu-iotests/122.out +++ b/tests/qemu-iotests/122.out @@ -192,6 +192,8 @@ wrote 1024/1024 bytes at offset 8192 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) wrote 1024/1024 bytes at offset 17408 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 66560 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) convert -S 4k [{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, diff --git a/tests/qemu-iotests/123 b/tests/qemu-iotests/123 index e19111f70d..4d34a2ac49 100755 --- a/tests/qemu-iotests/123 +++ b/tests/qemu-iotests/123 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/125 b/tests/qemu-iotests/125 index bd390b3a99..46279d6b38 100755 --- a/tests/qemu-iotests/125 +++ b/tests/qemu-iotests/125 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/126 b/tests/qemu-iotests/126 index 92c0547746..d8d2d654f2 100755 --- a/tests/qemu-iotests/126 +++ b/tests/qemu-iotests/126 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/127 b/tests/qemu-iotests/127 index 32edc3b068..7cc3ce1d78 100755 --- a/tests/qemu-iotests/127 +++ b/tests/qemu-iotests/127 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/129 b/tests/qemu-iotests/129 index 5251e2669e..c75ec62ecf 100755 --- a/tests/qemu-iotests/129 +++ b/tests/qemu-iotests/129 @@ -77,8 +77,8 @@ class TestStopWithBlockJob(iotests.QMPTestCase): self.do_test_stop("drive-backup", device="drive0", target=self.target_img, format=iotests.imgfmt, sync="full", - x_perf={ 'max-chunk': 65536, - 'max-workers': 8 }) + x_perf={'max-chunk': 65536, + 'max-workers': 8}) def test_block_commit(self): # Add overlay above the source node so that we actually use a @@ -88,13 +88,13 @@ class TestStopWithBlockJob(iotests.QMPTestCase): '1G') result = self.vm.qmp('blockdev-add', **{ - 'node-name': 'overlay', - 'driver': iotests.imgfmt, - 'file': { - 'driver': 'file', - 'filename': self.overlay_img - } - }) + 'node-name': 'overlay', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': self.overlay_img + } + }) self.assert_qmp(result, 'return', {}) result = self.vm.qmp('blockdev-snapshot', diff --git a/tests/qemu-iotests/131 b/tests/qemu-iotests/131 index d7456cae5b..a847692b4c 100755 --- a/tests/qemu-iotests/131 +++ b/tests/qemu-iotests/131 @@ -43,7 +43,7 @@ _supported_os Linux inuse_offset=$((0x2c)) -size=64M +size=$((64 * 1024 * 1024)) CLUSTER_SIZE=64k IMGFMT=parallels _make_test_img $size @@ -70,6 +70,39 @@ _check_test_img _check_test_img -r all { $QEMU_IO -c "read -P 0x11 64k 64k" "$TEST_IMG"; } 2>&1 | _filter_qemu_io | _filter_testdir +echo "== allocate with backing ==" +# Verify that allocating clusters works fine even when there is a backing image. +# Regression test for a bug where we would pass a buffer read from the backing +# node as a QEMUIOVector object, which could cause anything from I/O errors over +# assertion failures to invalid reads from memory. + +# Clear image +_make_test_img $size +# Create base image +TEST_IMG="$TEST_IMG.base" _make_test_img $size + +# Write some data to the base image (which would trigger an assertion failure if +# interpreted as a QEMUIOVector) +$QEMU_IO -c 'write -P 42 0 64k' "$TEST_IMG.base" | _filter_qemu_io + +# Parallels does not seem to support storing a backing filename in the image +# itself, so we need to build our backing chain on the command line +imgopts="driver=$IMGFMT,file.driver=$IMGPROTO,file.filename=$TEST_IMG" +imgopts+=",backing.driver=$IMGFMT" +imgopts+=",backing.file.driver=$IMGPROTO,backing.file.filename=$TEST_IMG.base" + +# Cause allocation in the top image +QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT \ + $QEMU_IO --image-opts "$imgopts" -c 'write -P 1 0 64' | _filter_qemu_io + +# Verify +QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT \ + $QEMU_IO --image-opts "$imgopts" \ + -c 'read -P 1 0 64' \ + -c "read -P 42 64 $((64 * 1024 - 64))" \ + -c "read -P 0 64k $((size - 64 * 1024))" \ + | _filter_qemu_io + # success, all done echo "*** done" rm -f $seq.full diff --git a/tests/qemu-iotests/131.out b/tests/qemu-iotests/131.out index 70da03dee5..de5ef7a8f5 100644 --- a/tests/qemu-iotests/131.out +++ b/tests/qemu-iotests/131.out @@ -37,4 +37,17 @@ Double checking the fixed image now... No errors were found on the image. read 65536/65536 bytes at offset 65536 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +== allocate with backing == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 64/64 bytes at offset 0 +64 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 64/64 bytes at offset 0 +64 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 65472/65472 bytes at offset 64 +63.938 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 67043328/67043328 bytes at offset 65536 +63.938 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) *** done diff --git a/tests/qemu-iotests/135 b/tests/qemu-iotests/135 index 299075b4c9..71125719ee 100755 --- a/tests/qemu-iotests/135 +++ b/tests/qemu-iotests/135 @@ -20,7 +20,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/137 b/tests/qemu-iotests/137 index 4680d5df3d..52ee135184 100755 --- a/tests/qemu-iotests/137 +++ b/tests/qemu-iotests/137 @@ -140,7 +140,7 @@ $QEMU_IO \ # The dirty bit must not be set # (Filter the external data file bit) -if $PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features \ +if _qcow2_dump_header | grep incompatible_features \ | grep -q '\<0\>' then echo 'ERROR: Dirty bit set' diff --git a/tests/qemu-iotests/138 b/tests/qemu-iotests/138 index 951cfa67d4..76628adab7 100755 --- a/tests/qemu-iotests/138 +++ b/tests/qemu-iotests/138 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/140 b/tests/qemu-iotests/140 index 91e08c30d4..d923b777e2 100755 --- a/tests/qemu-iotests/140 +++ b/tests/qemu-iotests/140 @@ -24,7 +24,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/141 b/tests/qemu-iotests/141 index 115cc1691e..a37030ee17 100755 --- a/tests/qemu-iotests/141 +++ b/tests/qemu-iotests/141 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/141.out b/tests/qemu-iotests/141.out index c4c15fb275..63203d9944 100644 --- a/tests/qemu-iotests/141.out +++ b/tests/qemu-iotests/141.out @@ -132,7 +132,7 @@ wrote 1048576/1048576 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}} {'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}} -{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}} +{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: commit"}} {'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}} {"return": {}} diff --git a/tests/qemu-iotests/142 b/tests/qemu-iotests/142 index 69fd10ef51..86d65a2d1a 100755 --- a/tests/qemu-iotests/142 +++ b/tests/qemu-iotests/142 @@ -350,6 +350,35 @@ info block backing-file" echo "$hmp_cmds" | run_qemu -drive "$files","$ids" | grep "Cache" +echo +echo "--- Alignment after changing O_DIRECT ---" +echo + +# Directly test the protocol level: Can unaligned requests succeed even if +# O_DIRECT was only enabled through a reopen and vice versa? + +# Ensure image size is a multiple of the sector size (required for O_DIRECT) +$QEMU_IMG create -f file "$TEST_IMG" 1M | _filter_img_create + +# And write some data (not strictly necessary, but it feels better to actually +# have something to be read) +$QEMU_IO -f file -c 'write 0 4096' "$TEST_IMG" | _filter_qemu_io + +$QEMU_IO --cache=writeback -f file $TEST_IMG <. # +import math import os +import subprocess +import time +from typing import List, Optional import iotests from iotests import qemu_img @@ -50,7 +54,7 @@ class TestActiveMirror(iotests.QMPTestCase): self.vm = iotests.VM() self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) - self.vm.add_device('virtio-blk,drive=source') + self.vm.add_device('virtio-blk,id=vblk,drive=source') self.vm.launch() def tearDown(self): @@ -192,6 +196,227 @@ class TestActiveMirror(iotests.QMPTestCase): self.potential_writes_in_flight = False +class TestThrottledWithNbdExportBase(iotests.QMPTestCase): + image_len = 128 * 1024 * 1024 # MB + iops: Optional[int] = None + background_processes: List['subprocess.Popen[str]'] = [] + + def setUp(self): + # Must be set by subclasses + self.assertIsNotNone(self.iops) + + qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') + qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') + + self.vm = iotests.VM() + self.vm.launch() + + result = self.vm.qmp('object-add', **{ + 'qom-type': 'throttle-group', + 'id': 'thrgr', + 'limits': { + 'iops-total': self.iops, + 'iops-total-max': self.iops + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('blockdev-add', **{ + 'node-name': 'source-node', + 'driver': 'throttle', + 'throttle-group': 'thrgr', + 'file': { + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source_img + } + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('blockdev-add', **{ + 'node-name': 'target-node', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': target_img + } + }) + self.assert_qmp(result, 'return', {}) + + self.nbd_sock = iotests.file_path('nbd.sock', + base_dir=iotests.sock_dir) + self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' + + result = self.vm.qmp('nbd-server-start', addr={ + 'type': 'unix', + 'data': { + 'path': self.nbd_sock + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('block-export-add', id='exp0', type='nbd', + node_name='source-node', writable=True) + self.assert_qmp(result, 'return', {}) + + def tearDown(self): + # Wait for background requests to settle + try: + while True: + p = self.background_processes.pop() + while True: + try: + p.wait(timeout=0.0) + break + except subprocess.TimeoutExpired: + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') + except IndexError: + pass + + # Cancel ongoing block jobs + for job in self.vm.qmp('query-jobs')['return']: + self.vm.qmp('block-job-cancel', device=job['id'], force=True) + + while True: + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') + if len(self.vm.qmp('query-jobs')['return']) == 0: + break + + self.vm.shutdown() + os.remove(source_img) + os.remove(target_img) + + +class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase): + iops = 16 + + def testUnderLoad(self): + ''' + Throttle the source node, then issue a whole bunch of external requests + while the mirror job (in write-blocking mode) is running. We want to + see background requests being issued even while the source is under + full load by active writes, so that progress can be made towards READY. + ''' + + # Fill the first half of the source image; do not fill the second half, + # that is where we will have active requests occur. This ensures that + # active mirroring itself will not directly contribute to the job's + # progress (because when the job was started, those areas were not + # intended to be copied, so active mirroring will only lead to not + # losing progress, but also not making any). + self.vm.hmp_qemu_io('source-node', + f'aio_write -P 1 0 {self.image_len // 2}') + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') + + # Launch the mirror job + mirror_buf_size = 65536 + result = self.vm.qmp('blockdev-mirror', + job_id='mirror', + filter_node_name='mirror-node', + device='source-node', + target='target-node', + sync='full', + copy_mode='write-blocking', + buf_size=mirror_buf_size) + self.assert_qmp(result, 'return', {}) + + # We create the external requests via qemu-io processes on the NBD + # server. Have their offset start in the middle of the image so they + # do not overlap with the background requests (which start from the + # beginning). + active_request_offset = self.image_len // 2 + active_request_len = 4096 + + # Create enough requests to saturate the node for 5 seconds + for _ in range(0, 5 * self.iops): + req = f'write -P 42 {active_request_offset} {active_request_len}' + active_request_offset += active_request_len + p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) + self.background_processes += [p] + + # Now advance the clock one I/O operation at a time by the 4 seconds + # (i.e. one less than 5). We expect the mirror job to issue background + # operations here, even though active requests are still in flight. + # The active requests will take precedence, however, because they have + # been issued earlier than mirror's background requests. + # Once the active requests we have started above are done (i.e. after 5 + # virtual seconds), we expect those background requests to be worked + # on. We only advance 4 seconds here to avoid race conditions. + for _ in range(0, 4 * self.iops): + step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) + self.vm.qtest(f'clock_step {step}') + + # Note how much remains to be done until the mirror job is finished + job_status = self.vm.qmp('query-jobs')['return'][0] + start_remaining = job_status['total-progress'] - \ + job_status['current-progress'] + + # Create a whole bunch of more active requests + for _ in range(0, 10 * self.iops): + req = f'write -P 42 {active_request_offset} {active_request_len}' + active_request_offset += active_request_len + p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) + self.background_processes += [p] + + # Let the clock advance more. After 1 second, as noted above, we + # expect the background requests to be worked on. Give them a couple + # of seconds (specifically 4) to see their impact. + for _ in range(0, 5 * self.iops): + step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) + self.vm.qtest(f'clock_step {step}') + + # Note how much remains to be done now. We expect this number to be + # reduced thanks to those background requests. + job_status = self.vm.qmp('query-jobs')['return'][0] + end_remaining = job_status['total-progress'] - \ + job_status['current-progress'] + + # See that indeed progress was being made on the job, even while the + # node was saturated with active requests + self.assertGreater(start_remaining - end_remaining, 0) + + +class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase): + iops = 1024 + + def testActiveOnCreation(self): + ''' + Issue requests on the mirror source node right as the mirror is + instated. It's possible that requests occur before the actual job is + created, but after the node has been put into the graph. Write + requests across the node must in that case be forwarded to the source + node without attempting to mirror them (there is no job object yet, so + attempting to access it would cause a segfault). + We do this with a lightly throttled node (i.e. quite high IOPS limit). + Using throttling seems to increase reproductivity, but if the limit is + too low, all requests allowed per second will be submitted before + mirror_start_job() gets to the problematic point. + ''' + + # Let qemu-img bench create write requests (enough for two seconds on + # the virtual clock) + bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd', + '-c', str(self.iops * 2), self.nbd_url] + p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args) + self.background_processes += [p] + + # Give qemu-img bench time to start up and issue requests + time.sleep(1.0) + # Flush the request queue, so new requests can come in right as we + # start blockdev-mirror + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') + + result = self.vm.qmp('blockdev-mirror', + job_id='mirror', + device='source-node', + target='target-node', + sync='full', + copy_mode='write-blocking') + self.assert_qmp(result, 'return', {}) + + if __name__ == '__main__': iotests.main(supported_fmts=['qcow2', 'raw'], supported_protocols=['file']) diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out index 89968f35d7..3f8a935a08 100644 --- a/tests/qemu-iotests/151.out +++ b/tests/qemu-iotests/151.out @@ -1,5 +1,5 @@ -.... +...... ---------------------------------------------------------------------- -Ran 4 tests +Ran 6 tests OK diff --git a/tests/qemu-iotests/153 b/tests/qemu-iotests/153 index 607af59091..9bc3be8f75 100755 --- a/tests/qemu-iotests/153 +++ b/tests/qemu-iotests/153 @@ -20,7 +20,7 @@ # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/156 b/tests/qemu-iotests/156 index 65dcedd493..a9540bd80d 100755 --- a/tests/qemu-iotests/156 +++ b/tests/qemu-iotests/156 @@ -28,7 +28,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/162 b/tests/qemu-iotests/162 index cf17f494d8..94dae60d30 100755 --- a/tests/qemu-iotests/162 +++ b/tests/qemu-iotests/162 @@ -21,7 +21,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/163 b/tests/qemu-iotests/163 index dedce8ef43..c94ad16f4a 100755 --- a/tests/qemu-iotests/163 +++ b/tests/qemu-iotests/163 @@ -107,17 +107,13 @@ class ShrinkBaseClass(iotests.QMPTestCase): if iotests.imgfmt == 'raw': return - self.assertEqual(qemu_img('check', test_img), 0, - "Verifying image corruption") + qemu_img('check', test_img) def test_empty_image(self): qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img, self.shrink_size) - self.assertEqual( - qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, test_img), - qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, check_img), - "Verifying image content") + qemu_io('-c', f"read -P 0x00 0 {self.shrink_size}", test_img) self.image_verify() @@ -130,8 +126,7 @@ class ShrinkBaseClass(iotests.QMPTestCase): qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img, self.shrink_size) - self.assertEqual(qemu_img("compare", test_img, check_img), 0, - "Verifying image content") + qemu_img("compare", test_img, check_img) self.image_verify() @@ -146,8 +141,7 @@ class ShrinkBaseClass(iotests.QMPTestCase): qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img, self.shrink_size) - self.assertEqual(qemu_img("compare", test_img, check_img), 0, - "Verifying image content") + qemu_img("compare", test_img, check_img) self.image_verify() @@ -169,4 +163,5 @@ ShrinkBaseClass = None if __name__ == '__main__': iotests.main(supported_fmts=['raw', 'qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['compat']) diff --git a/tests/qemu-iotests/165 b/tests/qemu-iotests/165 index ce499946b8..e3ef28e2ee 100755 --- a/tests/qemu-iotests/165 +++ b/tests/qemu-iotests/165 @@ -157,4 +157,5 @@ class TestPersistentDirtyBitmap(iotests.QMPTestCase): if __name__ == '__main__': iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['compat']) diff --git a/tests/qemu-iotests/172.out b/tests/qemu-iotests/172.out index 4cf4d536b4..07eebf3583 100644 --- a/tests/qemu-iotests/172.out +++ b/tests/qemu-iotests/172.out @@ -15,7 +15,6 @@ Testing: fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -29,6 +28,8 @@ Testing: discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" @@ -43,7 +44,6 @@ Testing: -fda TEST_DIR/t.qcow2 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -57,6 +57,8 @@ Testing: -fda TEST_DIR/t.qcow2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -81,7 +83,6 @@ Testing: -fdb TEST_DIR/t.qcow2 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -95,6 +96,8 @@ Testing: -fdb TEST_DIR/t.qcow2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -107,6 +110,8 @@ Testing: -fdb TEST_DIR/t.qcow2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" floppy1 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -135,7 +140,6 @@ Testing: -fda TEST_DIR/t.qcow2 -fdb TEST_DIR/t.qcow2.2 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -149,6 +153,8 @@ Testing: -fda TEST_DIR/t.qcow2 -fdb TEST_DIR/t.qcow2.2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -161,6 +167,8 @@ Testing: -fda TEST_DIR/t.qcow2 -fdb TEST_DIR/t.qcow2.2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -190,7 +198,6 @@ Testing: -fdb fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -204,6 +211,8 @@ Testing: -fdb discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" dev: floppy, id "" unit = 0 (0x0) @@ -216,6 +225,8 @@ Testing: -fdb discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" @@ -230,7 +241,6 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -244,6 +254,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -268,7 +280,6 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2,index=1 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -282,6 +293,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2,index=1 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -294,6 +307,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2,index=1 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" floppy1 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -322,7 +337,6 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=floppy,file=TEST_DIR/t fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -336,6 +350,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=floppy,file=TEST_DIR/t discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -348,6 +364,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=floppy,file=TEST_DIR/t discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -380,7 +398,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -394,6 +411,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -418,7 +437,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,unit=1 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -432,6 +450,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,unit=1 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -456,7 +476,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qco fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -470,6 +489,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qco discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -482,6 +503,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qco discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -520,7 +543,6 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -534,6 +556,8 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -546,6 +570,8 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -575,7 +601,6 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -589,6 +614,8 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -601,6 +628,8 @@ Testing: -fda TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -630,7 +659,6 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -644,6 +672,8 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 1 (0x1) @@ -656,6 +686,8 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy1 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -685,7 +717,6 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -699,6 +730,8 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 1 (0x1) @@ -711,6 +744,8 @@ Testing: -fdb TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.qcow2.2 -device fl discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy1 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -749,7 +784,6 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -763,6 +797,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -775,6 +811,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -804,7 +842,6 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -818,6 +855,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" dev: floppy, id "" unit = 0 (0x0) @@ -830,6 +869,8 @@ Testing: -drive if=floppy,file=TEST_DIR/t.qcow2 -drive if=none,file=TEST_DIR/t.q discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/unattached/device[N] @@ -865,7 +906,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -global floppy.drive=none0 -device fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -879,6 +919,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -global floppy.drive=none0 -device discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -933,7 +975,6 @@ Testing: -device floppy fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -947,6 +988,8 @@ Testing: -device floppy discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" Testing: -device floppy,drive-type=120 @@ -958,7 +1001,6 @@ Testing: -device floppy,drive-type=120 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -972,6 +1014,8 @@ Testing: -device floppy,drive-type=120 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "120" Testing: -device floppy,drive-type=144 @@ -983,7 +1027,6 @@ Testing: -device floppy,drive-type=144 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -997,6 +1040,8 @@ Testing: -device floppy,drive-type=144 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" Testing: -device floppy,drive-type=288 @@ -1008,7 +1053,6 @@ Testing: -device floppy,drive-type=288 fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -1022,6 +1066,8 @@ Testing: -device floppy,drive-type=288 discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" @@ -1036,7 +1082,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,drive-t fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -1050,6 +1095,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,drive-t discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "120" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -1074,7 +1121,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,drive-t fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -1088,6 +1134,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,drive-t discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "288" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -1115,7 +1163,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,logical fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -1129,6 +1176,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,logical discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] @@ -1153,7 +1202,6 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,physica fdtypeA = "auto" fdtypeB = "auto" fallback = "288" - isa irq 6 bus: floppy-bus.0 type floppy-bus dev: floppy, id "" @@ -1167,6 +1215,8 @@ Testing: -drive if=none,file=TEST_DIR/t.qcow2 -device floppy,drive=none0,physica discard_granularity = 4294967295 (4 GiB) write-cache = "auto" share-rw = false + account-invalid = "auto" + account-failed = "auto" drive-type = "144" none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) Attached to: /machine/peripheral-anon/device[N] diff --git a/tests/qemu-iotests/173 b/tests/qemu-iotests/173 index 9594f3c5ea..217e55c168 100755 --- a/tests/qemu-iotests/173 +++ b/tests/qemu-iotests/173 @@ -20,7 +20,7 @@ # along with this program. If not, see . # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/176 b/tests/qemu-iotests/176 index 27ac25467f..a6a2a4cd44 100755 --- a/tests/qemu-iotests/176 +++ b/tests/qemu-iotests/176 @@ -25,7 +25,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/182 b/tests/qemu-iotests/182 index fcd1d796eb..bbd1132b05 100755 --- a/tests/qemu-iotests/182 +++ b/tests/qemu-iotests/182 @@ -20,7 +20,7 @@ # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/185 b/tests/qemu-iotests/185 index f2ec5c5ceb..8b1143dc16 100755 --- a/tests/qemu-iotests/185 +++ b/tests/qemu-iotests/185 @@ -33,6 +33,12 @@ _cleanup() _rm_test_img "${TEST_IMG}.copy" _cleanup_test_img _cleanup_qemu + + if [ -f "$TEST_DIR/qsd.pid" ]; then + kill -SIGKILL "$(cat "$TEST_DIR/qsd.pid")" + rm -f "$TEST_DIR/qsd.pid" + fi + rm -f "$SOCK_DIR/qsd.sock" } trap "_cleanup; exit \$status" 0 1 2 3 15 @@ -45,7 +51,7 @@ _supported_fmt qcow2 _supported_proto file _supported_os Linux -size=64M +size=$((64 * 1048576)) TEST_IMG="${TEST_IMG}.base" _make_test_img $size echo @@ -216,6 +222,188 @@ wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE' _check_test_img +echo +echo === Start mirror to throttled QSD and exit qemu === +echo + +# Mirror to a throttled QSD instance (so that qemu cannot drain the +# throttling), wait for READY, then write some data to the device, +# and then quit qemu. +# (qemu should force-cancel the job and not wait for the data to be +# written to the target.) + +_make_test_img $size + +# Will be used by this and the next case +set_up_throttled_qsd() { + $QSD \ + --object throttle-group,id=thrgr,limits.bps-total=1048576 \ + --blockdev null-co,node-name=null,size=$size \ + --blockdev throttle,node-name=throttled,throttle-group=thrgr,file=null \ + --nbd-server addr.type=unix,addr.path="$SOCK_DIR/qsd.sock" \ + --export nbd,id=exp,node-name=throttled,name=target,writable=true \ + --pidfile "$TEST_DIR/qsd.pid" \ + --daemonize +} + +set_up_throttled_qsd + +# Need a virtio-blk device so that qemu-io writes will not block the monitor +_launch_qemu \ + --blockdev file,node-name=source-proto,filename="$TEST_IMG" \ + --blockdev qcow2,node-name=source-fmt,file=source-proto \ + --device virtio-blk,id=vblk,drive=source-fmt \ + --blockdev "{\"driver\": \"nbd\", + \"node-name\": \"target\", + \"server\": { + \"type\": \"unix\", + \"path\": \"$SOCK_DIR/qsd.sock\" + }, + \"export\": \"target\"}" + +h=$QEMU_HANDLE +_send_qemu_cmd $h '{"execute": "qmp_capabilities"}' 'return' + +# Use sync=top, so the first pass will not copy the whole image +_send_qemu_cmd $h \ + '{"execute": "blockdev-mirror", + "arguments": { + "job-id": "mirror", + "device": "source-fmt", + "target": "target", + "sync": "top" + }}' \ + 'return' \ + | grep -v JOB_STATUS_CHANGE # Ignore these events during creation + +# This too will be used by this and the next case +# $1: QEMU handle +# $2: Image size +wait_for_job_and_quit() { + h=$1 + size=$2 + + # List of expected events + capture_events='BLOCK_JOB_READY JOB_STATUS_CHANGE' + _wait_event $h 'BLOCK_JOB_READY' + QEMU_EVENTS= # Ignore all JOB_STATUS_CHANGE events that came before READY + + # Write something to the device for post-READY mirroring. Write it in + # blocks matching the cluster size, each spaced one block apart, so + # that the mirror job will have to spawn one request per cluster. + # Because the number of concurrent requests is limited (to 16), this + # limits the number of bytes concurrently in flight, which speeds up + # cancelling the job (in-flight requests still are waited for). + # To limit the number of bytes in flight, we could alternatively pass + # something for blockdev-mirror's @buf-size parameter, but + # block-commit does not have such a parameter, so we need to figure + # something out that works for both. + + cluster_size=65536 + step=$((cluster_size * 2)) + + echo '--- Writing data to the virtio-blk device ---' + + for ofs in $(seq 0 $step $((size - step))); do + qemu_io_cmd="qemu-io -d vblk/virtio-backend " + qemu_io_cmd+="\\\"aio_write $ofs $cluster_size\\\"" + + # Do not include these requests in the reference output + # (it's just too much) + silent=yes _send_qemu_cmd $h \ + "{\"execute\": \"human-monitor-command\", + \"arguments\": { + \"command-line\": \"$qemu_io_cmd\" + }}" \ + 'return' + done + + # Wait until the job's length is updated to reflect the write requests + + # We have written to half of the device, so this is the expected job length + final_len=$((size / 2)) + timeout=100 # unit: 0.1 seconds + while true; do + len=$( + _send_qemu_cmd $h \ + '{"execute": "query-block-jobs"}' \ + 'return.*"len": [0-9]\+' \ + | grep 'return.*"len": [0-9]\+' \ + | sed -e 's/.*"len": \([0-9]\+\).*/\1/' + ) + if [ "$len" -eq "$final_len" ]; then + break + fi + timeout=$((timeout - 1)) + if [ "$timeout" -eq 0 ]; then + echo "ERROR: Timeout waiting for job to reach len=$final_len" + break + fi + sleep 0.1 + done + + sleep 1 + + _send_qemu_cmd $h \ + '{"execute": "quit"}' \ + 'return' + + # List of expected events + capture_events='BLOCK_JOB_CANCELLED JOB_STATUS_CHANGE SHUTDOWN' + _wait_event $h 'SHUTDOWN' + QEMU_EVENTS= # Ignore all JOB_STATUS_CHANGE events that came before SHUTDOWN + _wait_event $h 'JOB_STATUS_CHANGE' # standby + _wait_event $h 'JOB_STATUS_CHANGE' # ready + _wait_event $h 'JOB_STATUS_CHANGE' # aborting + # Filter the offset (depends on when exactly `quit` was issued) + _wait_event $h 'BLOCK_JOB_CANCELLED' \ + | sed -e 's/"offset": [0-9]\+/"offset": (filtered)/' + _wait_event $h 'JOB_STATUS_CHANGE' # concluded + _wait_event $h 'JOB_STATUS_CHANGE' # null + + wait=yes _cleanup_qemu + + kill -SIGTERM "$(cat "$TEST_DIR/qsd.pid")" +} + +wait_for_job_and_quit $h $size + +echo +echo === Start active commit to throttled QSD and exit qemu === +echo + +# Same as the above, but instead of mirroring, do an active commit + +_make_test_img $size + +set_up_throttled_qsd + +_launch_qemu \ + --blockdev "{\"driver\": \"nbd\", + \"node-name\": \"target\", + \"server\": { + \"type\": \"unix\", + \"path\": \"$SOCK_DIR/qsd.sock\" + }, + \"export\": \"target\"}" \ + --blockdev file,node-name=source-proto,filename="$TEST_IMG" \ + --blockdev qcow2,node-name=source-fmt,file=source-proto,backing=target \ + --device virtio-blk,id=vblk,drive=source-fmt + +h=$QEMU_HANDLE +_send_qemu_cmd $h '{"execute": "qmp_capabilities"}' 'return' + +_send_qemu_cmd $h \ + '{"execute": "block-commit", + "arguments": { + "job-id": "commit", + "device": "source-fmt" + }}' \ + 'return' \ + | grep -v JOB_STATUS_CHANGE # Ignore these events during creation + +wait_for_job_and_quit $h $size + # success, all done echo "*** done" rm -f $seq.full diff --git a/tests/qemu-iotests/185.out b/tests/qemu-iotests/185.out index 754a641258..70e8dd6c87 100644 --- a/tests/qemu-iotests/185.out +++ b/tests/qemu-iotests/185.out @@ -116,4 +116,52 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 67108864, "offset": 524288, "speed": 65536, "type": "stream"}} No errors were found on the image. + +=== Start mirror to throttled QSD and exit qemu === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +{"execute": "qmp_capabilities"} +{"return": {}} +{"execute": "blockdev-mirror", + "arguments": { + "job-id": "mirror", + "device": "source-fmt", + "target": "target", + "sync": "top" + }} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}} +--- Writing data to the virtio-blk device --- +{"execute": "quit"} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "mirror", "len": 33554432, "offset": (filtered), "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "mirror"}} + +=== Start active commit to throttled QSD and exit qemu === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +{"execute": "qmp_capabilities"} +{"return": {}} +{"execute": "block-commit", + "arguments": { + "job-id": "commit", + "device": "source-fmt" + }} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "commit", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} +--- Writing data to the virtio-blk device --- +{"execute": "quit"} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "commit", "len": 33554432, "offset": (filtered), "speed": 0, "type": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "commit"}} *** done diff --git a/tests/qemu-iotests/192 b/tests/qemu-iotests/192 index d809187fca..e66e1a4f06 100755 --- a/tests/qemu-iotests/192 +++ b/tests/qemu-iotests/192 @@ -21,7 +21,7 @@ # # creator -owner=famz@redhat.com +owner=fam@euphon.net seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 index e44b8df728..68894371f5 100755 --- a/tests/qemu-iotests/194 +++ b/tests/qemu-iotests/194 @@ -33,8 +33,8 @@ with iotests.FilePath('source.img') as source_img_path, \ iotests.VM('dest') as dest_vm: img_size = '1G' - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, source_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, dest_img_path, img_size) iotests.log('Launching VMs...') (source_vm.add_drive(source_img_path) diff --git a/tests/qemu-iotests/196 b/tests/qemu-iotests/196 index 2451515094..76509a5ad1 100755 --- a/tests/qemu-iotests/196 +++ b/tests/qemu-iotests/196 @@ -65,4 +65,5 @@ class TestInvalidateAutoclear(iotests.QMPTestCase): if __name__ == '__main__': iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['compat']) diff --git a/tests/qemu-iotests/198.out b/tests/qemu-iotests/198.out index 3952708444..805494916f 100644 --- a/tests/qemu-iotests/198.out +++ b/tests/qemu-iotests/198.out @@ -36,7 +36,7 @@ image: json:{ /* filtered */ } file format: IMGFMT virtual size: 16 MiB (16777216 bytes) Format specific information: - compression type: zlib + compression type: COMPRESSION_TYPE encrypt: ivgen alg: plain64 hash alg: sha256 @@ -81,7 +81,7 @@ virtual size: 16 MiB (16777216 bytes) backing file: TEST_DIR/t.IMGFMT.base backing file format: IMGFMT Format specific information: - compression type: zlib + compression type: COMPRESSION_TYPE encrypt: ivgen alg: plain64 hash alg: sha256 diff --git a/tests/qemu-iotests/200 b/tests/qemu-iotests/200 index f80517e342..f66c571d24 100755 --- a/tests/qemu-iotests/200 +++ b/tests/qemu-iotests/200 @@ -22,7 +22,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202 index 8eb5f32d15..b784dcd791 100755 --- a/tests/qemu-iotests/202 +++ b/tests/qemu-iotests/202 @@ -35,8 +35,8 @@ with iotests.FilePath('disk0.img') as disk0_img_path, \ iotests.VM() as vm: img_size = '10M' - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size) - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, disk0_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, disk1_img_path, img_size) iotests.log('Launching VM...') vm.launch() diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203 index ea30e50497..ab80fd0e44 100755 --- a/tests/qemu-iotests/203 +++ b/tests/qemu-iotests/203 @@ -33,8 +33,8 @@ with iotests.FilePath('disk0.img') as disk0_img_path, \ iotests.VM() as vm: img_size = '10M' - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size) - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, disk0_img_path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, disk1_img_path, img_size) iotests.log('Launching VM...') (vm.add_object('iothread,id=iothread0') diff --git a/tests/qemu-iotests/205 b/tests/qemu-iotests/205 index c0e107328f..15f798288a 100755 --- a/tests/qemu-iotests/205 +++ b/tests/qemu-iotests/205 @@ -85,13 +85,13 @@ class TestNbdServerRemove(iotests.QMPTestCase): def do_test_connect_after_remove(self, mode=None): args = ('-r', '-f', 'raw', '-c', 'read 0 512', nbd_uri) - self.assertReadOk(qemu_io(*args)) + self.assertReadOk(qemu_io(*args).stdout) result = self.remove_export('exp', mode) self.assert_qmp(result, 'return', {}) self.assertExportNotFound('exp') - self.assertConnectFailed(qemu_io(*args)) + self.assertConnectFailed(qemu_io(*args, check=False).stdout) def test_connect_after_remove_default(self): self.do_test_connect_after_remove() diff --git a/tests/qemu-iotests/206 b/tests/qemu-iotests/206 index c3cdad4ce4..10eff343f7 100755 --- a/tests/qemu-iotests/206 +++ b/tests/qemu-iotests/206 @@ -162,8 +162,8 @@ with iotests.FilePath('t.qcow2') as disk_path, \ 'encrypt': { 'format': 'luks', 'key-secret': 'keysec0', - 'cipher-alg': 'twofish-128', - 'cipher-mode': 'ctr', + 'cipher-alg': 'aes-128', + 'cipher-mode': 'cbc', 'ivgen-alg': 'plain64', 'ivgen-hash-alg': 'md5', 'hash-alg': 'sha1', diff --git a/tests/qemu-iotests/206.out b/tests/qemu-iotests/206.out index b68c443867..7e95694777 100644 --- a/tests/qemu-iotests/206.out +++ b/tests/qemu-iotests/206.out @@ -18,7 +18,7 @@ virtual size: 128 MiB (134217728 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -42,7 +42,7 @@ virtual size: 64 MiB (67108864 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -66,7 +66,7 @@ virtual size: 32 MiB (33554432 bytes) cluster_size: 2097152 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: true refcount bits: 1 corrupt: false @@ -92,12 +92,12 @@ backing file: TEST_IMG.base backing file format: IMGFMT Format specific information: compat: 0.10 - compression type: zlib + compression type: COMPRESSION_TYPE refcount bits: 16 === Successful image creation (encrypted) === -{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "encrypt": {"cipher-alg": "twofish-128", "cipher-mode": "ctr", "format": "luks", "hash-alg": "sha1", "iter-time": 10, "ivgen-alg": "plain64", "ivgen-hash-alg": "md5", "key-secret": "keysec0"}, "file": {"driver": "file", "filename": "TEST_DIR/PID-t.qcow2"}, "size": 33554432}}} +{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "encrypt": {"cipher-alg": "aes-128", "cipher-mode": "cbc", "format": "luks", "hash-alg": "sha1", "iter-time": 10, "ivgen-alg": "plain64", "ivgen-hash-alg": "md5", "key-secret": "keysec0"}, "file": {"driver": "file", "filename": "TEST_DIR/PID-t.qcow2"}, "size": 33554432}}} {"return": {}} {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} @@ -109,16 +109,16 @@ encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha1 - cipher alg: twofish-128 + cipher alg: aes-128 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX format: luks - cipher mode: ctr + cipher mode: cbc slots: [0]: active: true @@ -192,7 +192,7 @@ Job failed: Could not resize image: Failed to grow the L1 table: File too large === Invalid version === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "file": "node0", "size": 67108864, "version": "v1"}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'v1'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'version' does not accept value 'v1'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "file": "node0", "lazy-refcounts": true, "size": 67108864, "version": "v2"}}} {"return": {}} diff --git a/tests/qemu-iotests/207 b/tests/qemu-iotests/207 index 0f5c4bc8a0..41dcf3ff55 100755 --- a/tests/qemu-iotests/207 +++ b/tests/qemu-iotests/207 @@ -35,7 +35,12 @@ def filter_hash(qmsg): if key == 'hash' and re.match('[0-9a-f]+', value): return 'HASH' return value - return iotests.filter_qmp(qmsg, _filter) + if isinstance(qmsg, str): + # Strip key type and fingerprint + p = r"\S+ (key fingerprint) '(md5|sha1|sha256):[0-9a-f]+'" + return re.sub(p, r"\1 '\2:HASH'", qmsg) + else: + return iotests.filter_qmp(qmsg, _filter) def blockdev_create(vm, options): vm.blockdev_create(options, filters=[iotests.filter_qmp_testfiles, filter_hash]) diff --git a/tests/qemu-iotests/207.out b/tests/qemu-iotests/207.out index aeb8569d77..05cf753283 100644 --- a/tests/qemu-iotests/207.out +++ b/tests/qemu-iotests/207.out @@ -42,7 +42,7 @@ virtual size: 4 MiB (4194304 bytes) {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "ssh", "location": {"host-key-check": {"hash": "wrong", "mode": "hash", "type": "md5"}, "path": "TEST_DIR/PID-t.img", "server": {"host": "127.0.0.1", "port": "22"}}, "size": 2097152}}} {"return": {}} -Job failed: remote host key does not match host_key_check 'wrong' +Job failed: remote host key fingerprint 'md5:HASH' does not match host_key_check 'md5:wrong' {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} @@ -59,7 +59,7 @@ virtual size: 8 MiB (8388608 bytes) {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "ssh", "location": {"host-key-check": {"hash": "wrong", "mode": "hash", "type": "sha1"}, "path": "TEST_DIR/PID-t.img", "server": {"host": "127.0.0.1", "port": "22"}}, "size": 2097152}}} {"return": {}} -Job failed: remote host key does not match host_key_check 'wrong' +Job failed: remote host key fingerprint 'sha1:HASH' does not match host_key_check 'sha1:wrong' {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} @@ -76,7 +76,7 @@ virtual size: 4 MiB (4194304 bytes) {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "ssh", "location": {"host-key-check": {"hash": "wrong", "mode": "hash", "type": "sha256"}, "path": "TEST_DIR/PID-t.img", "server": {"host": "127.0.0.1", "port": "22"}}, "size": 2097152}}} {"return": {}} -Job failed: remote host key does not match host_key_check 'wrong' +Job failed: remote host key fingerprint 'sha256:HASH' does not match host_key_check 'sha256:wrong' {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} diff --git a/tests/qemu-iotests/209 b/tests/qemu-iotests/209 index ff7efea11b..f6ad08ec42 100755 --- a/tests/qemu-iotests/209 +++ b/tests/qemu-iotests/209 @@ -20,8 +20,8 @@ # import iotests -from iotests import qemu_img_create, qemu_io, qemu_img_verbose, qemu_nbd, \ - file_path +from iotests import qemu_img_create, qemu_io, qemu_img_log, qemu_nbd, \ + file_path, log iotests.script_initialize(supported_fmts=['qcow2']) @@ -33,4 +33,5 @@ qemu_img_create('-f', iotests.imgfmt, disk, '1M') qemu_io('-f', iotests.imgfmt, '-c', 'write 0 512K', disk) qemu_nbd('-k', nbd_sock, '-x', 'exp', '-f', iotests.imgfmt, disk) -qemu_img_verbose('map', '-f', 'raw', '--output=json', nbd_uri) +qemu_img_log('map', '-f', 'raw', '--output=json', nbd_uri) +log('done.') # avoid new line at the end of output file diff --git a/tests/qemu-iotests/209.out b/tests/qemu-iotests/209.out index f27be3fa7b..515906ac7a 100644 --- a/tests/qemu-iotests/209.out +++ b/tests/qemu-iotests/209.out @@ -1,2 +1,4 @@ [{ "start": 0, "length": 524288, "depth": 0, "present": true, "zero": false, "data": true, "offset": 0}, { "start": 524288, "length": 524288, "depth": 0, "present": true, "zero": true, "data": false, "offset": 524288}] + +done. diff --git a/tests/qemu-iotests/210 b/tests/qemu-iotests/210 index 5a62ed4dd1..10b0a0b87c 100755 --- a/tests/qemu-iotests/210 +++ b/tests/qemu-iotests/210 @@ -62,7 +62,7 @@ with iotests.FilePath('t.luks') as disk_path, \ 'driver=luks,file.driver=file,file.filename=%s,key-secret=keysec0' % (disk_path), filter_path=disk_path, extra_args=['--object', 'secret,id=keysec0,data=foo'], - imgopts=True) + use_image_opts=True) # # Successful image creation (with non-default options) @@ -83,8 +83,8 @@ with iotests.FilePath('t.luks') as disk_path, \ }, 'size': size, 'key-secret': 'keysec0', - 'cipher-alg': 'twofish-128', - 'cipher-mode': 'ctr', + 'cipher-alg': 'aes-128', + 'cipher-mode': 'cbc', 'ivgen-alg': 'plain64', 'ivgen-hash-alg': 'md5', 'hash-alg': 'sha1', @@ -96,7 +96,7 @@ with iotests.FilePath('t.luks') as disk_path, \ 'driver=luks,file.driver=file,file.filename=%s,key-secret=keysec0' % (disk_path), filter_path=disk_path, extra_args=['--object', 'secret,id=keysec0,data=foo'], - imgopts=True) + use_image_opts=True) # # Invalid BlockdevRef @@ -132,7 +132,7 @@ with iotests.FilePath('t.luks') as disk_path, \ 'driver=luks,file.driver=file,file.filename=%s,key-secret=keysec0' % (disk_path), filter_path=disk_path, extra_args=['--object', 'secret,id=keysec0,data=foo'], - imgopts=True) + use_image_opts=True) # # Invalid sizes @@ -176,4 +176,4 @@ with iotests.FilePath('t.luks') as disk_path, \ 'driver=luks,file.driver=file,file.filename=%s,key-secret=keysec0' % (disk_path), filter_path=disk_path, extra_args=['--object', 'secret,id=keysec0,data=foo'], - imgopts=True) + use_image_opts=True) diff --git a/tests/qemu-iotests/210.out b/tests/qemu-iotests/210.out index 55c0844370..96d9f749dd 100644 --- a/tests/qemu-iotests/210.out +++ b/tests/qemu-iotests/210.out @@ -59,7 +59,7 @@ Format specific information: {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} -{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"cipher-alg": "twofish-128", "cipher-mode": "ctr", "driver": "luks", "file": {"driver": "file", "filename": "TEST_DIR/PID-t.luks"}, "hash-alg": "sha1", "iter-time": 10, "ivgen-alg": "plain64", "ivgen-hash-alg": "md5", "key-secret": "keysec0", "size": 67108864}}} +{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"cipher-alg": "aes-128", "cipher-mode": "cbc", "driver": "luks", "file": {"driver": "file", "filename": "TEST_DIR/PID-t.luks"}, "hash-alg": "sha1", "iter-time": 10, "ivgen-alg": "plain64", "ivgen-hash-alg": "md5", "key-secret": "keysec0", "size": 67108864}}} {"return": {}} {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} @@ -71,9 +71,9 @@ encrypted: yes Format specific information: ivgen alg: plain64 hash alg: sha1 - cipher alg: twofish-128 + cipher alg: aes-128 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - cipher mode: ctr + cipher mode: cbc slots: [0]: active: true diff --git a/tests/qemu-iotests/211 b/tests/qemu-iotests/211 index f52cadade1..1a3b4596c8 100755 --- a/tests/qemu-iotests/211 +++ b/tests/qemu-iotests/211 @@ -59,7 +59,7 @@ with iotests.FilePath('t.vdi') as disk_path, \ vm.shutdown() iotests.img_info_log(disk_path) - iotests.log(iotests.qemu_img_pipe('map', '--output=json', disk_path)) + iotests.log(iotests.qemu_img_map(disk_path)) # # Successful image creation (explicit defaults) @@ -83,7 +83,7 @@ with iotests.FilePath('t.vdi') as disk_path, \ vm.shutdown() iotests.img_info_log(disk_path) - iotests.log(iotests.qemu_img_pipe('map', '--output=json', disk_path)) + iotests.log(iotests.qemu_img_map(disk_path)) # # Successful image creation (with non-default options) @@ -107,7 +107,7 @@ with iotests.FilePath('t.vdi') as disk_path, \ vm.shutdown() iotests.img_info_log(disk_path) - iotests.log(iotests.qemu_img_pipe('map', '--output=json', disk_path)) + iotests.log(iotests.qemu_img_map(disk_path)) # # Invalid BlockdevRef diff --git a/tests/qemu-iotests/211.out b/tests/qemu-iotests/211.out index c4425b5982..f02c75409c 100644 --- a/tests/qemu-iotests/211.out +++ b/tests/qemu-iotests/211.out @@ -17,8 +17,7 @@ file format: IMGFMT virtual size: 128 MiB (134217728 bytes) cluster_size: 1048576 -[{ "start": 0, "length": 134217728, "depth": 0, "present": true, "zero": true, "data": false}] - +[{"data": false, "depth": 0, "length": 134217728, "present": true, "start": 0, "zero": true}] === Successful image creation (explicit defaults) === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}} @@ -36,8 +35,7 @@ file format: IMGFMT virtual size: 64 MiB (67108864 bytes) cluster_size: 1048576 -[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": true, "data": false}] - +[{"data": false, "depth": 0, "length": 67108864, "present": true, "start": 0, "zero": true}] === Successful image creation (with non-default options) === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}} @@ -55,9 +53,7 @@ file format: IMGFMT virtual size: 32 MiB (33554432 bytes) cluster_size: 1048576 -[{ "start": 0, "length": 3072, "depth": 0, "present": true, "zero": false, "data": true, "offset": 1024}, -{ "start": 3072, "length": 33551360, "depth": 0, "present": true, "zero": true, "data": true, "offset": 4096}] - +[{"data": true, "depth": 0, "length": 3072, "offset": 1024, "present": true, "start": 0, "zero": false}, {"data": true, "depth": 0, "length": 33551360, "offset": 4096, "present": true, "start": 3072, "zero": true}] === Invalid BlockdevRef === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vdi", "file": "this doesn't exist", "size": 33554432}}} diff --git a/tests/qemu-iotests/214 b/tests/qemu-iotests/214 index 0889089d81..c66e246ba2 100755 --- a/tests/qemu-iotests/214 +++ b/tests/qemu-iotests/214 @@ -51,7 +51,7 @@ echo # The L2 entries of the two compressed clusters are located at # 0x800000 and 0x800008, their original values are 0x4008000000a00000 # and 0x4008000000a00802 (5 sectors for compressed data each). -_make_test_img 8M -o cluster_size=2M +_make_test_img 8M -o cluster_size=2M,compression_type=zlib $QEMU_IO -c "write -c -P 0x11 0 2M" -c "write -c -P 0x11 2M 2M" "$TEST_IMG" \ 2>&1 | _filter_qemu_io | _filter_testdir diff --git a/tests/qemu-iotests/216 b/tests/qemu-iotests/216 index c02f8d2880..311e02af3a 100755 --- a/tests/qemu-iotests/216 +++ b/tests/qemu-iotests/216 @@ -18,10 +18,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # -# Creator/Owner: Max Reitz +# Creator/Owner: Hanna Reitz import iotests -from iotests import log, qemu_img, qemu_io_silent +from iotests import log, qemu_img, qemu_io # Need backing file support iotests.script_initialize(supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk'], @@ -51,11 +51,11 @@ with iotests.FilePath('base.img') as base_img_path, \ log('--- Setting up images ---') log('') - assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0 - assert qemu_io_silent(base_img_path, '-c', 'write -P 1 0M 1M') == 0 - assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, - '-F', iotests.imgfmt, top_img_path) == 0 - assert qemu_io_silent(top_img_path, '-c', 'write -P 2 1M 1M') == 0 + qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') + qemu_io(base_img_path, '-c', 'write -P 1 0M 1M') + qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, + '-F', iotests.imgfmt, top_img_path) + qemu_io(top_img_path, '-c', 'write -P 2 1M 1M') log('Done') @@ -110,8 +110,8 @@ with iotests.FilePath('base.img') as base_img_path, \ log('--- Checking COR result ---') log('') - assert qemu_io_silent(base_img_path, '-c', 'discard 0 64M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 1 0M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 2 1M 1M') == 0 + qemu_io(base_img_path, '-c', 'discard 0 64M') + qemu_io(top_img_path, '-c', 'read -P 1 0M 1M') + qemu_io(top_img_path, '-c', 'read -P 2 1M 1M') log('Done') diff --git a/tests/qemu-iotests/218 b/tests/qemu-iotests/218 index 325d8244fb..6320c4cb56 100755 --- a/tests/qemu-iotests/218 +++ b/tests/qemu-iotests/218 @@ -25,10 +25,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # -# Creator/Owner: Max Reitz +# Creator/Owner: Hanna Reitz import iotests -from iotests import log, qemu_img, qemu_io_silent +from iotests import log, qemu_img, qemu_io iotests.script_initialize(supported_fmts=['qcow2', 'raw']) @@ -145,9 +145,8 @@ log('') with iotests.VM() as vm, \ iotests.FilePath('src.img') as src_img_path: - assert qemu_img('create', '-f', iotests.imgfmt, src_img_path, '64M') == 0 - assert qemu_io_silent('-f', iotests.imgfmt, src_img_path, - '-c', 'write -P 42 0M 64M') == 0 + qemu_img('create', '-f', iotests.imgfmt, src_img_path, '64M') + qemu_io('-f', iotests.imgfmt, src_img_path, '-c', 'write -P 42 0M 64M') vm.launch() @@ -187,4 +186,4 @@ with iotests.VM() as vm, \ log(vm.qmp('quit')) with iotests.Timeout(5, 'Timeout waiting for VM to quit'): - vm.shutdown(has_quit=True) + vm.shutdown() diff --git a/tests/qemu-iotests/222 b/tests/qemu-iotests/222 deleted file mode 100755 index b48afe623e..0000000000 --- a/tests/qemu-iotests/222 +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 -# group: rw quick -# -# This test covers the basic fleecing workflow, which provides a -# point-in-time snapshot of a node that can be queried over NBD. -# -# Copyright (C) 2018 Red Hat, Inc. -# John helped, too. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# Creator/Owner: John Snow - -import iotests -from iotests import log, qemu_img, qemu_io, qemu_io_silent - -iotests.script_initialize( - supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk', 'vhdx', 'raw'], - supported_platforms=['linux'], -) - -patterns = [("0x5d", "0", "64k"), - ("0xd5", "1M", "64k"), - ("0xdc", "32M", "64k"), - ("0xcd", "0x3ff0000", "64k")] # 64M - 64K - -overwrite = [("0xab", "0", "64k"), # Full overwrite - ("0xad", "0x00f8000", "64k"), # Partial-left (1M-32K) - ("0x1d", "0x2008000", "64k"), # Partial-right (32M+32K) - ("0xea", "0x3fe0000", "64k")] # Adjacent-left (64M - 128K) - -zeroes = [("0", "0x00f8000", "32k"), # Left-end of partial-left (1M-32K) - ("0", "0x2010000", "32k"), # Right-end of partial-right (32M+64K) - ("0", "0x3fe0000", "64k")] # overwrite[3] - -remainder = [("0xd5", "0x108000", "32k"), # Right-end of partial-left [1] - ("0xdc", "32M", "32k"), # Left-end of partial-right [2] - ("0xcd", "0x3ff0000", "64k")] # patterns[3] - -with iotests.FilePath('base.img') as base_img_path, \ - iotests.FilePath('fleece.img') as fleece_img_path, \ - iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock_path, \ - iotests.VM() as vm: - - log('--- Setting up images ---') - log('') - - assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0 - assert qemu_img('create', '-f', "qcow2", fleece_img_path, '64M') == 0 - - for p in patterns: - qemu_io('-f', iotests.imgfmt, - '-c', 'write -P%s %s %s' % p, base_img_path) - - log('Done') - - log('') - log('--- Launching VM ---') - log('') - - vm.add_drive(base_img_path) - vm.launch() - log('Done') - - log('') - log('--- Setting up Fleecing Graph ---') - log('') - - src_node = "drive0" - tgt_node = "fleeceNode" - - # create tgt_node backed by src_node - log(vm.qmp("blockdev-add", **{ - "driver": "qcow2", - "node-name": tgt_node, - "file": { - "driver": "file", - "filename": fleece_img_path, - }, - "backing": src_node, - })) - - # Establish COW from source to fleecing node - log(vm.qmp("blockdev-backup", - device=src_node, - target=tgt_node, - sync="none")) - - log('') - log('--- Setting up NBD Export ---') - log('') - - nbd_uri = 'nbd+unix:///%s?socket=%s' % (tgt_node, nbd_sock_path) - log(vm.qmp("nbd-server-start", - **{"addr": { "type": "unix", - "data": { "path": nbd_sock_path } } })) - - log(vm.qmp("nbd-server-add", device=tgt_node)) - - log('') - log('--- Sanity Check ---') - log('') - - for p in (patterns + zeroes): - cmd = "read -P%s %s %s" % p - log(cmd) - assert qemu_io_silent('-r', '-f', 'raw', '-c', cmd, nbd_uri) == 0 - - log('') - log('--- Testing COW ---') - log('') - - for p in overwrite: - cmd = "write -P%s %s %s" % p - log(cmd) - log(vm.hmp_qemu_io(src_node, cmd)) - - log('') - log('--- Verifying Data ---') - log('') - - for p in (patterns + zeroes): - cmd = "read -P%s %s %s" % p - log(cmd) - assert qemu_io_silent('-r', '-f', 'raw', '-c', cmd, nbd_uri) == 0 - - log('') - log('--- Cleanup ---') - log('') - - log(vm.qmp('block-job-cancel', device=src_node)) - log(vm.event_wait('BLOCK_JOB_CANCELLED'), - filters=[iotests.filter_qmp_event]) - log(vm.qmp('nbd-server-stop')) - log(vm.qmp('blockdev-del', node_name=tgt_node)) - vm.shutdown() - - log('') - log('--- Confirming writes ---') - log('') - - for p in (overwrite + remainder): - cmd = "read -P%s %s %s" % p - log(cmd) - assert qemu_io_silent(base_img_path, '-c', cmd) == 0 - - log('') - log('Done') diff --git a/tests/qemu-iotests/222.out b/tests/qemu-iotests/222.out deleted file mode 100644 index 16643dde30..0000000000 --- a/tests/qemu-iotests/222.out +++ /dev/null @@ -1,67 +0,0 @@ ---- Setting up images --- - -Done - ---- Launching VM --- - -Done - ---- Setting up Fleecing Graph --- - -{"return": {}} -{"return": {}} - ---- Setting up NBD Export --- - -{"return": {}} -{"return": {}} - ---- Sanity Check --- - -read -P0x5d 0 64k -read -P0xd5 1M 64k -read -P0xdc 32M 64k -read -P0xcd 0x3ff0000 64k -read -P0 0x00f8000 32k -read -P0 0x2010000 32k -read -P0 0x3fe0000 64k - ---- Testing COW --- - -write -P0xab 0 64k -{"return": ""} -write -P0xad 0x00f8000 64k -{"return": ""} -write -P0x1d 0x2008000 64k -{"return": ""} -write -P0xea 0x3fe0000 64k -{"return": ""} - ---- Verifying Data --- - -read -P0x5d 0 64k -read -P0xd5 1M 64k -read -P0xdc 32M 64k -read -P0xcd 0x3ff0000 64k -read -P0 0x00f8000 32k -read -P0 0x2010000 32k -read -P0 0x3fe0000 64k - ---- Cleanup --- - -{"return": {}} -{"data": {"device": "drive0", "len": 67108864, "offset": 393216, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_CANCELLED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} -{"return": {}} -{"return": {}} - ---- Confirming writes --- - -read -P0xab 0 64k -read -P0xad 0x00f8000 64k -read -P0x1d 0x2008000 64k -read -P0xea 0x3fe0000 64k -read -P0xd5 0x108000 32k -read -P0xdc 32M 32k -read -P0xcd 0x3ff0000 64k - -Done diff --git a/tests/qemu-iotests/223 b/tests/qemu-iotests/223 index da87f2f4a2..0bbb283010 100755 --- a/tests/qemu-iotests/223 +++ b/tests/qemu-iotests/223 @@ -120,6 +120,11 @@ _send_qemu_cmd $QEMU_HANDLE '{"execute":"blockdev-add", "file":{"driver":"file", "filename":"'"$TEST_IMG"'"}}}' "return" _send_qemu_cmd $QEMU_HANDLE '{"execute":"block-dirty-bitmap-disable", "arguments":{"node":"n", "name":"b"}}' "return" +_send_qemu_cmd $QEMU_HANDLE '{"execute":"blockdev-add", + "arguments":{"driver":"null-co", "node-name":"null", + "size": 4194304}}' "return" +_send_qemu_cmd $QEMU_HANDLE '{"execute":"block-dirty-bitmap-add", + "arguments":{"node":"null", "name":"b3"}}' "return" for attempt in normal iothread; do @@ -155,6 +160,9 @@ _send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-add", _send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}}' "return" +_send_qemu_cmd $QEMU_HANDLE '{"execute":"block-export-add", + "arguments":{"type": "nbd", "node-name":"n", "id":"n3", "name": "n3", + "bitmaps":[{"node":"null","name":"b3"}]}}' "return" $QEMU_NBD_PROG -L -k "$SOCK_DIR/nbd" echo @@ -178,6 +186,14 @@ IMG="driver=nbd,export=n2,server.type=unix,server.path=$SOCK_DIR/nbd" $QEMU_IMG map --output=json --image-opts \ "$IMG,x-dirty-bitmap=qemu:dirty-bitmap:b2" | _filter_qemu_img_map +echo +echo "=== Check bitmap taken from another node ===" +echo + +IMG="driver=nbd,export=n3,server.type=unix,server.path=$SOCK_DIR/nbd" +$QEMU_IMG map --output=json --image-opts \ + "$IMG,x-dirty-bitmap=qemu:dirty-bitmap:b3" | _filter_qemu_img_map + echo echo "=== End qemu NBD server ===" echo diff --git a/tests/qemu-iotests/223.out b/tests/qemu-iotests/223.out index e58ea5abbd..26fb347c5d 100644 --- a/tests/qemu-iotests/223.out +++ b/tests/qemu-iotests/223.out @@ -33,6 +33,13 @@ wrote 2097152/2097152 bytes at offset 2097152 {"execute":"block-dirty-bitmap-disable", "arguments":{"node":"n", "name":"b"}} {"return": {}} +{"execute":"blockdev-add", + "arguments":{"driver":"null-co", "node-name":"null", + "size": 4194304}} +{"return": {}} +{"execute":"block-dirty-bitmap-add", + "arguments":{"node":"null", "name":"b3"}} +{"return": {}} === Set up NBD with normal access === @@ -69,7 +76,11 @@ exports available: 0 "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}} {"return": {}} -exports available: 2 +{"execute":"block-export-add", + "arguments":{"type": "nbd", "node-name":"n", "id":"n3", "name": "n3", + "bitmaps":[{"node":"null","name":"b3"}]}} +{"return": {}} +exports available: 3 export: 'n' size: 4194304 flags: 0x58f ( readonly flush fua df multi cache ) @@ -82,13 +93,22 @@ exports available: 2 export: 'n2' description: some text size: 4194304 - flags: 0xced ( flush fua trim zeroes df cache fast-zero ) + flags: 0xded ( flush fua trim zeroes df multi cache fast-zero ) min block: 1 opt block: 4096 max block: 33554432 available meta contexts: 2 base:allocation qemu:dirty-bitmap:b2 + export: 'n3' + size: 4194304 + flags: 0x58f ( readonly flush fua df multi cache ) + min block: 1 + opt block: 4096 + max block: 33554432 + available meta contexts: 2 + base:allocation + qemu:dirty-bitmap:b3 === Contrast normal status to large granularity dirty-bitmap === @@ -114,6 +134,10 @@ read 2097152/2097152 bytes at offset 2097152 { "start": 1024, "length": 2096128, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, { "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] +=== Check bitmap taken from another node === + +[{ "start": 0, "length": 4194304, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + === End qemu NBD server === {"execute":"nbd-server-remove", @@ -128,6 +152,7 @@ read 2097152/2097152 bytes at offset 2097152 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}} {"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}} {"execute":"nbd-server-stop"} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n3"}} {"return": {}} {"execute":"nbd-server-stop"} {"error": {"class": "GenericError", "desc": "NBD server not running"}} @@ -170,7 +195,11 @@ exports available: 0 "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}} {"return": {}} -exports available: 2 +{"execute":"block-export-add", + "arguments":{"type": "nbd", "node-name":"n", "id":"n3", "name": "n3", + "bitmaps":[{"node":"null","name":"b3"}]}} +{"return": {}} +exports available: 3 export: 'n' size: 4194304 flags: 0x58f ( readonly flush fua df multi cache ) @@ -183,13 +212,22 @@ exports available: 2 export: 'n2' description: some text size: 4194304 - flags: 0xced ( flush fua trim zeroes df cache fast-zero ) + flags: 0xded ( flush fua trim zeroes df multi cache fast-zero ) min block: 1 opt block: 4096 max block: 33554432 available meta contexts: 2 base:allocation qemu:dirty-bitmap:b2 + export: 'n3' + size: 4194304 + flags: 0x58f ( readonly flush fua df multi cache ) + min block: 1 + opt block: 4096 + max block: 33554432 + available meta contexts: 2 + base:allocation + qemu:dirty-bitmap:b3 === Contrast normal status to large granularity dirty-bitmap === @@ -215,6 +253,10 @@ read 2097152/2097152 bytes at offset 2097152 { "start": 1024, "length": 2096128, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, { "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] +=== Check bitmap taken from another node === + +[{ "start": 0, "length": 4194304, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + === End qemu NBD server === {"execute":"nbd-server-remove", @@ -229,6 +271,7 @@ read 2097152/2097152 bytes at offset 2097152 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}} {"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}} {"execute":"nbd-server-stop"} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n3"}} {"return": {}} {"execute":"nbd-server-stop"} {"error": {"class": "GenericError", "desc": "NBD server not running"}} diff --git a/tests/qemu-iotests/224 b/tests/qemu-iotests/224 index 38dd153625..542d0eefa6 100755 --- a/tests/qemu-iotests/224 +++ b/tests/qemu-iotests/224 @@ -19,10 +19,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # -# Creator/Owner: Max Reitz +# Creator/Owner: Hanna Reitz import iotests -from iotests import log, qemu_img, qemu_io_silent, filter_qmp_testfiles, \ +from iotests import log, qemu_img, qemu_io, filter_qmp_testfiles, \ filter_qmp_imgfmt import json @@ -47,15 +47,14 @@ for filter_node_name in False, True: iotests.FilePath('top.img') as top_img_path, \ iotests.VM() as vm: - assert qemu_img('create', '-f', iotests.imgfmt, - base_img_path, '64M') == 0 - assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, - '-F', iotests.imgfmt, mid_img_path) == 0 - assert qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path, - '-F', iotests.imgfmt, top_img_path) == 0 + qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') + qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, + '-F', iotests.imgfmt, mid_img_path) + qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path, + '-F', iotests.imgfmt, top_img_path) # Something to commit - assert qemu_io_silent(mid_img_path, '-c', 'write -P 1 0 1M') == 0 + qemu_io(mid_img_path, '-c', 'write -P 1 0 1M') vm.launch() diff --git a/tests/qemu-iotests/225 b/tests/qemu-iotests/225 index c0053790db..b5949fcb58 100755 --- a/tests/qemu-iotests/225 +++ b/tests/qemu-iotests/225 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/227.out b/tests/qemu-iotests/227.out index 9c09ee3917..378c1b8fb1 100644 --- a/tests/qemu-iotests/227.out +++ b/tests/qemu-iotests/227.out @@ -188,7 +188,7 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b ], "failed_unmap_operations": 0, "failed_flush_operations": 0, - "account_invalid": false, + "account_invalid": true, "rd_total_time_ns": 0, "invalid_unmap_operations": 0, "flush_operations": 0, @@ -198,7 +198,7 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b "rd_bytes": 0, "unmap_total_time_ns": 0, "invalid_flush_operations": 0, - "account_failed": false, + "account_failed": true, "rd_operations": 0, "invalid_wr_operations": 0, "invalid_rd_operations": 0 diff --git a/tests/qemu-iotests/228 b/tests/qemu-iotests/228 index a5eda2e149..7341777f9f 100755 --- a/tests/qemu-iotests/228 +++ b/tests/qemu-iotests/228 @@ -19,7 +19,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # -# Creator/Owner: Max Reitz +# Creator/Owner: Hanna Reitz import iotests from iotests import log, qemu_img, filter_testfiles, filter_imgfmt, \ @@ -54,11 +54,11 @@ with iotests.FilePath('base.img') as base_img_path, \ iotests.FilePath('top.img') as top_img_path, \ iotests.VM() as vm: - assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0 + qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') # Choose a funny way to describe the backing filename - assert qemu_img('create', '-f', iotests.imgfmt, '-b', - 'file:' + base_img_path, '-F', iotests.imgfmt, - top_img_path) == 0 + qemu_img('create', '-f', iotests.imgfmt, '-b', + 'file:' + base_img_path, '-F', iotests.imgfmt, + top_img_path) vm.launch() @@ -172,8 +172,8 @@ with iotests.FilePath('base.img') as base_img_path, \ # (because qemu cannot "canonicalize"/"resolve" the backing # filename unless the backing file is opened implicitly with the # overlay) - assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, - '-F', iotests.imgfmt, top_img_path) == 0 + qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, + '-F', iotests.imgfmt, top_img_path) # You can only reliably override backing options by using a node # reference (or by specifying file.filename, but, well...) diff --git a/tests/qemu-iotests/229 b/tests/qemu-iotests/229 index 4bc99390b5..aaa6996ce3 100755 --- a/tests/qemu-iotests/229 +++ b/tests/qemu-iotests/229 @@ -21,7 +21,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq="$(basename $0)" echo "QA output created by $seq" diff --git a/tests/qemu-iotests/231 b/tests/qemu-iotests/231 index 8e6c6447c1..eddc8e9641 100755 --- a/tests/qemu-iotests/231 +++ b/tests/qemu-iotests/231 @@ -22,7 +22,7 @@ # # creator -owner=jcody@redhat.com +owner=codyprime@gmail.com seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/233 b/tests/qemu-iotests/233 index 9ca7b68f42..55db5b3811 100755 --- a/tests/qemu-iotests/233 +++ b/tests/qemu-iotests/233 @@ -61,11 +61,13 @@ tls_x509_create_server "ca1" "server1" tls_x509_create_client "ca1" "client1" tls_x509_create_client "ca2" "client2" tls_x509_create_client "ca1" "client3" +tls_psk_create_creds "psk1" +tls_psk_create_creds "psk2" echo echo "== preparing image ==" _make_test_img 64M -$QEMU_IO -c 'w -P 0x11 1m 1m' "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c 'w -P 0x11 1m 1m' "$TEST_IMG" 2>&1 | _filter_qemu_io echo echo "== check TLS client to plain server fails ==" @@ -74,9 +76,9 @@ nbd_server_start_tcp_socket -f $IMGFMT "$TEST_IMG" 2> "$TEST_DIR/server.log" obj=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 $QEMU_IMG info --image-opts --object $obj \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd $QEMU_NBD_PROG -L -b $nbd_tcp_addr -p $nbd_tcp_port --object $obj \ - --tls-creds=tls0 + --tls-creds=tls0 2>&1 | _filter_qemu_nbd_exports nbd_server_stop @@ -88,8 +90,10 @@ nbd_server_start_tcp_socket \ --tls-creds tls0 \ -f $IMGFMT "$TEST_IMG" 2>> "$TEST_DIR/server.log" -$QEMU_IMG info nbd://localhost:$nbd_tcp_port 2>&1 | sed "s/$nbd_tcp_port/PORT/g" -$QEMU_NBD_PROG -L -b $nbd_tcp_addr -p $nbd_tcp_port +$QEMU_IMG info nbd://localhost:$nbd_tcp_port \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -b $nbd_tcp_addr -p $nbd_tcp_port \ + 2>&1 | _filter_qemu_nbd_exports echo echo "== check TLS works ==" @@ -97,21 +101,39 @@ obj1=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 obj2=tls-creds-x509,dir=${tls_dir}/client3,endpoint=client,id=tls0 $QEMU_IMG info --image-opts --object $obj1 \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd $QEMU_IMG info --image-opts --object $obj2 \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd $QEMU_NBD_PROG -L -b $nbd_tcp_addr -p $nbd_tcp_port --object $obj1 \ - --tls-creds=tls0 + --tls-creds=tls0 2>&1 | _filter_qemu_nbd_exports + +echo +echo "== check TLS fail over TCP with mismatched hostname ==" +obj1=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,host=localhost,port=$nbd_tcp_port,tls-creds=tls0 \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -b localhost -p $nbd_tcp_port --object $obj1 \ + --tls-creds=tls0 | _filter_qemu_nbd_exports + +echo +echo "== check TLS works over TCP with mismatched hostname and override ==" +obj1=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,host=localhost,port=$nbd_tcp_port,tls-creds=tls0,tls-hostname=127.0.0.1 \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -b localhost -p $nbd_tcp_port --object $obj1 \ + --tls-creds=tls0 --tls-hostname=127.0.0.1 | _filter_qemu_nbd_exports echo echo "== check TLS with different CA fails ==" obj=tls-creds-x509,dir=${tls_dir}/client2,endpoint=client,id=tls0 $QEMU_IMG info --image-opts --object $obj \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd $QEMU_NBD_PROG -L -b $nbd_tcp_addr -p $nbd_tcp_port --object $obj \ - --tls-creds=tls0 + --tls-creds=tls0 2>&1 | _filter_qemu_nbd_exports echo echo "== perform I/O over TLS ==" @@ -121,7 +143,8 @@ $QEMU_IO -c 'r -P 0x11 1m 1m' -c 'w -P 0x22 1m 1m' --image-opts \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ 2>&1 | _filter_qemu_io -$QEMU_IO -f $IMGFMT -r -U -c 'r -P 0x22 1m 1m' "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -f $IMGFMT -r -U -c 'r -P 0x22 1m 1m' "$TEST_IMG" \ + 2>&1 | _filter_qemu_io echo echo "== check TLS with authorization ==" @@ -139,12 +162,62 @@ nbd_server_start_tcp_socket \ $QEMU_IMG info --image-opts \ --object tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd $QEMU_IMG info --image-opts \ --object tls-creds-x509,dir=${tls_dir}/client3,endpoint=client,id=tls0 \ driver=nbd,host=$nbd_tcp_addr,port=$nbd_tcp_port,tls-creds=tls0 \ - 2>&1 | sed "s/$nbd_tcp_port/PORT/g" + 2>&1 | _filter_nbd + +nbd_server_stop + +nbd_server_start_unix_socket \ + --object tls-creds-x509,dir=${tls_dir}/server1,endpoint=server,id=tls0,verify-peer=on \ + --tls-creds tls0 \ + -f $IMGFMT "$TEST_IMG" 2>> "$TEST_DIR/server.log" + +echo +echo "== check TLS fail over UNIX with no hostname ==" +obj1=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,path=$nbd_unix_socket,tls-creds=tls0 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -k $nbd_unix_socket --object $obj1 --tls-creds=tls0 \ + 2>&1 | _filter_qemu_nbd_exports + +echo +echo "== check TLS works over UNIX with hostname override ==" +obj1=tls-creds-x509,dir=${tls_dir}/client1,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,path=$nbd_unix_socket,tls-creds=tls0,tls-hostname=127.0.0.1 \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -k $nbd_unix_socket --object $obj1 \ + --tls-creds=tls0 --tls-hostname=127.0.0.1 2>&1 | _filter_qemu_nbd_exports + + +echo +echo "== check TLS works over UNIX with PSK ==" +nbd_server_stop + +nbd_server_start_unix_socket \ + --object tls-creds-psk,dir=${tls_dir}/psk1,endpoint=server,id=tls0,verify-peer=on \ + --tls-creds tls0 \ + -f $IMGFMT "$TEST_IMG" 2>> "$TEST_DIR/server.log" + +obj1=tls-creds-psk,dir=${tls_dir}/psk1,username=psk1,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,path=$nbd_unix_socket,tls-creds=tls0 \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -k $nbd_unix_socket --object $obj1 \ + --tls-creds=tls0 2>&1 | _filter_qemu_nbd_exports + +echo +echo "== check TLS fails over UNIX with mismatch PSK ==" +obj1=tls-creds-psk,dir=${tls_dir}/psk2,username=psk2,endpoint=client,id=tls0 +$QEMU_IMG info --image-opts --object $obj1 \ + driver=nbd,path=$nbd_unix_socket,tls-creds=tls0 \ + 2>&1 | _filter_nbd +$QEMU_NBD_PROG -L -k $nbd_unix_socket --object $obj1 \ + --tls-creds=tls0 2>&1 | _filter_qemu_nbd_exports echo echo "== final server log ==" diff --git a/tests/qemu-iotests/233.out b/tests/qemu-iotests/233.out index 4b1f6a0e15..237c82767e 100644 --- a/tests/qemu-iotests/233.out +++ b/tests/qemu-iotests/233.out @@ -7,6 +7,8 @@ Generating a signed certificate... Generating a signed certificate... Generating a signed certificate... Generating a signed certificate... +Generating a random key for user 'psk1' +Generating a random key for user 'psk2' == preparing image == Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 @@ -17,15 +19,12 @@ wrote 1048576/1048576 bytes at offset 1048576 qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Denied by server for option 5 (starttls) server reported: TLS not configured qemu-nbd: Denied by server for option 5 (starttls) -server reported: TLS not configured == check plain client to TLS server fails == qemu-img: Could not open 'nbd://localhost:PORT': TLS negotiation required before option 7 (go) Did you forget a valid tls-creds? server reported: Option 0x7 not permitted before TLS qemu-nbd: TLS negotiation required before option 3 (list) -Did you forget a valid tls-creds? -server reported: Option 0x3 not permitted before TLS == check TLS works == image: nbd://127.0.0.1:PORT @@ -39,12 +38,21 @@ disk size: unavailable exports available: 1 export: '' size: 67108864 - flags: 0xced ( flush fua trim zeroes df cache fast-zero ) min block: 1 - opt block: 4096 - max block: 33554432 - available meta contexts: 1 - base:allocation + +== check TLS fail over TCP with mismatched hostname == +qemu-img: Could not open 'driver=nbd,host=localhost,port=PORT,tls-creds=tls0': Certificate does not match the hostname localhost +qemu-nbd: Certificate does not match the hostname localhost + +== check TLS works over TCP with mismatched hostname and override == +image: nbd://localhost:PORT +file format: nbd +virtual size: 64 MiB (67108864 bytes) +disk size: unavailable +exports available: 1 + export: '' + size: 67108864 + min block: 1 == check TLS with different CA fails == qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': The certificate hasn't got a known issuer @@ -62,9 +70,43 @@ read 1048576/1048576 bytes at offset 1048576 qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort +== check TLS fail over UNIX with no hostname == +qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0': No hostname for certificate validation +qemu-nbd: No hostname for certificate validation + +== check TLS works over UNIX with hostname override == +image: nbd+unix://?socket=SOCK_DIR/qemu-nbd.sock +file format: nbd +virtual size: 64 MiB (67108864 bytes) +disk size: unavailable +exports available: 1 + export: '' + size: 67108864 + min block: 1 + +== check TLS works over UNIX with PSK == +image: nbd+unix://?socket=SOCK_DIR/qemu-nbd.sock +file format: nbd +virtual size: 64 MiB (67108864 bytes) +disk size: unavailable +exports available: 1 + export: '' + size: 67108864 + min block: 1 + +== check TLS fails over UNIX with mismatch PSK == +qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0': TLS handshake failed: The TLS connection was non-properly terminated. +qemu-nbd: TLS handshake failed: The TLS connection was non-properly terminated. + == final server log == +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort qemu-nbd: option negotiation failed: Verify failed: No certificate was found. qemu-nbd: option negotiation failed: Verify failed: No certificate was found. qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort +qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received. +qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received. *** done diff --git a/tests/qemu-iotests/234 b/tests/qemu-iotests/234 index cb5f1753e0..a9f764bb2c 100755 --- a/tests/qemu-iotests/234 +++ b/tests/qemu-iotests/234 @@ -34,8 +34,8 @@ with iotests.FilePath('img') as img_path, \ iotests.VM(path_suffix='a') as vm_a, \ iotests.VM(path_suffix='b') as vm_b: - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, backing_path, '64M') - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, img_path, '64M') + iotests.qemu_img_create('-f', iotests.imgfmt, backing_path, '64M') + iotests.qemu_img_create('-f', iotests.imgfmt, img_path, '64M') os.mkfifo(fifo_a) os.mkfifo(fifo_b) diff --git a/tests/qemu-iotests/235 b/tests/qemu-iotests/235 index 8aed45f9a7..4de920c380 100755 --- a/tests/qemu-iotests/235 +++ b/tests/qemu-iotests/235 @@ -24,8 +24,6 @@ import os import iotests from iotests import qemu_img_create, qemu_io, file_path, log -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) - from qemu.machine import QEMUMachine iotests.script_initialize(supported_fmts=['qcow2']) diff --git a/tests/qemu-iotests/237 b/tests/qemu-iotests/237 index 43dfd3bd40..5ea13eb01f 100755 --- a/tests/qemu-iotests/237 +++ b/tests/qemu-iotests/237 @@ -165,8 +165,7 @@ with iotests.FilePath('t.vmdk') as disk_path, \ iotests.log("") for path in [ extent1_path, extent2_path, extent3_path ]: - msg = iotests.qemu_img_pipe('create', '-f', imgfmt, path, '0') - iotests.log(msg, [iotests.filter_testfiles]) + iotests.qemu_img_create('-f', imgfmt, path, '0') vm.add_blockdev('driver=file,filename=%s,node-name=ext1' % (extent1_path)) vm.add_blockdev('driver=file,filename=%s,node-name=ext2' % (extent2_path)) diff --git a/tests/qemu-iotests/237.out b/tests/qemu-iotests/237.out index aa94986803..62b8865677 100644 --- a/tests/qemu-iotests/237.out +++ b/tests/qemu-iotests/237.out @@ -116,25 +116,19 @@ Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't == Invalid adapter types == {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "foo", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'foo'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'foo'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "IDE", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'IDE'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'IDE'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "legacyesx", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'legacyesx'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'legacyesx'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": 1, "driver": "vmdk", "file": "node0", "size": 33554432}}} {"error": {"class": "GenericError", "desc": "Invalid parameter type for 'options.adapter-type', expected: string"}} === Other subformats === -Formatting 'TEST_DIR/PID-t.vmdk.1', fmt=vmdk size=0 compat6=off hwversion=undefined - -Formatting 'TEST_DIR/PID-t.vmdk.2', fmt=vmdk size=0 compat6=off hwversion=undefined - -Formatting 'TEST_DIR/PID-t.vmdk.3', fmt=vmdk size=0 compat6=off hwversion=undefined - == Missing extent == {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vmdk", "file": "node0", "size": 33554432, "subformat": "monolithicFlat"}}} diff --git a/tests/qemu-iotests/241 b/tests/qemu-iotests/241 index c962c8b607..f196650afa 100755 --- a/tests/qemu-iotests/241 +++ b/tests/qemu-iotests/241 @@ -58,7 +58,7 @@ echo nbd_server_start_unix_socket -f $IMGFMT "$TEST_IMG_FILE" -$QEMU_NBD_PROG --list -k $nbd_unix_socket | grep '\(size\|min\)' +$QEMU_NBD_PROG --list -k $nbd_unix_socket | _filter_qemu_nbd_exports $QEMU_IMG map -f raw --output=json "$TEST_IMG" | _filter_qemu_img_map $QEMU_IO -f raw -c map "$TEST_IMG" nbd_server_stop @@ -71,7 +71,7 @@ echo # sector alignment, here at the server. nbd_server_start_unix_socket "$TEST_IMG_FILE" 2> "$TEST_DIR/server.log" -$QEMU_NBD_PROG --list -k $nbd_unix_socket | grep '\(size\|min\)' +$QEMU_NBD_PROG --list -k $nbd_unix_socket | _filter_qemu_nbd_exports $QEMU_IMG map -f raw --output=json "$TEST_IMG" | _filter_qemu_img_map $QEMU_IO -f raw -c map "$TEST_IMG" nbd_server_stop @@ -84,7 +84,7 @@ echo # Now force sector alignment at the client. nbd_server_start_unix_socket -f $IMGFMT "$TEST_IMG_FILE" -$QEMU_NBD_PROG --list -k $nbd_unix_socket | grep '\(size\|min\)' +$QEMU_NBD_PROG --list -k $nbd_unix_socket | _filter_qemu_nbd_exports $QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map $QEMU_IO -c map "$TEST_IMG" nbd_server_stop diff --git a/tests/qemu-iotests/241.out b/tests/qemu-iotests/241.out index 56e95b599a..88e8cfcd7e 100644 --- a/tests/qemu-iotests/241.out +++ b/tests/qemu-iotests/241.out @@ -2,6 +2,8 @@ QA output created by 241 === Exporting unaligned raw image, natural alignment === +exports available: 1 + export: '' size: 1024 min block: 1 [{ "start": 0, "length": 1000, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, @@ -10,6 +12,8 @@ QA output created by 241 === Exporting unaligned raw image, forced server sector alignment === +exports available: 1 + export: '' size: 1024 min block: 512 [{ "start": 0, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] @@ -20,6 +24,8 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed === Exporting unaligned raw image, forced client sector alignment === +exports available: 1 + export: '' size: 1024 min block: 1 [{ "start": 0, "length": 1000, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, diff --git a/tests/qemu-iotests/242 b/tests/qemu-iotests/242 index a9b27668c2..c89f0c6cb3 100755 --- a/tests/qemu-iotests/242 +++ b/tests/qemu-iotests/242 @@ -22,11 +22,12 @@ import iotests import json import struct -from iotests import qemu_img_create, qemu_io, qemu_img_pipe, \ - file_path, img_info_log, log, filter_qemu_io +from iotests import qemu_img_create, qemu_io_log, qemu_img_info, \ + file_path, img_info_log, log iotests.script_initialize(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['refcount_bits', 'compat']) disk = file_path('disk') chunk = 256 * 1024 @@ -38,8 +39,7 @@ flag_offset = 0x5000f def print_bitmap(extra_args): log('qemu-img info dump:\n') img_info_log(disk, extra_args=extra_args) - result = json.loads(qemu_img_pipe('info', '--force-share', - '--output=json', disk)) + result = qemu_img_info('--force-share', disk) if 'bitmaps' in result['format-specific']['data']: bitmaps = result['format-specific']['data']['bitmaps'] log('The same bitmaps in JSON format:') @@ -61,7 +61,7 @@ def add_bitmap(bitmap_number, persistent, disabled): def write_to_disk(offset, size): write = 'write {} {}'.format(offset, size) - log(qemu_io('-c', write, disk), filters=[filter_qemu_io]) + qemu_io_log('-c', write, disk) def toggle_flag(offset): @@ -100,7 +100,7 @@ add_bitmap(1, True, False) log('Write an unknown bitmap flag \'{}\' into a new QCOW2 image at offset {}' .format(hex(bitmap_flag_unknown), flag_offset)) toggle_flag(flag_offset) -img_info_log(disk) +img_info_log(disk, check=False) toggle_flag(flag_offset) log('Unset the unknown bitmap flag \'{}\' in the bitmap directory entry:\n' .format(hex(bitmap_flag_unknown))) diff --git a/tests/qemu-iotests/242.out b/tests/qemu-iotests/242.out index 3759c99284..ce231424a7 100644 --- a/tests/qemu-iotests/242.out +++ b/tests/qemu-iotests/242.out @@ -12,7 +12,7 @@ virtual size: 1 MiB (1048576 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -34,7 +34,7 @@ virtual size: 1 MiB (1048576 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false bitmaps: [0]: @@ -68,7 +68,7 @@ virtual size: 1 MiB (1048576 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false bitmaps: [0]: @@ -110,7 +110,7 @@ virtual size: 1 MiB (1048576 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false bitmaps: [0]: @@ -161,7 +161,7 @@ virtual size: 1 MiB (1048576 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false bitmaps: [0]: diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245 index bf8261eec0..edaf29094b 100755 --- a/tests/qemu-iotests/245 +++ b/tests/qemu-iotests/245 @@ -20,11 +20,13 @@ # along with this program. If not, see . # -import os -import re -import iotests import copy import json +import os +import re +from subprocess import CalledProcessError + +import iotests from iotests import qemu_img, qemu_io hd_path = [ @@ -149,7 +151,7 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.reopen(opts, {'node-name': ''}, "Failed to find node with node-name=''") self.reopen(opts, {'node-name': None}, "Invalid parameter type for 'options[0].node-name', expected: string") self.reopen(opts, {'driver': 'raw'}, "Cannot change the option 'driver'") - self.reopen(opts, {'driver': ''}, "Invalid parameter ''") + self.reopen(opts, {'driver': ''}, "Parameter 'driver' does not accept value ''") self.reopen(opts, {'driver': None}, "Invalid parameter type for 'options[0].driver', expected: string") self.reopen(opts, {'file': 'not-found'}, "Cannot find device='' nor node-name='not-found'") self.reopen(opts, {'file': ''}, "Cannot find device='' nor node-name=''") @@ -216,11 +218,14 @@ class TestBlockdevReopen(iotests.QMPTestCase): # Reopen an image several times changing some of its options def test_reopen(self): - # Check whether the filesystem supports O_DIRECT - if 'O_DIRECT' in qemu_io('-f', 'raw', '-t', 'none', '-c', 'quit', hd_path[0]): - supports_direct = False - else: + try: + qemu_io('-f', 'raw', '-t', 'none', '-c', 'quit', hd_path[0]) supports_direct = True + except CalledProcessError as exc: + if 'O_DIRECT' in exc.stdout: + supports_direct = False + else: + raise # Open the hd1 image passing all backing options opts = hd_opts(1) @@ -1138,12 +1143,13 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.assertEqual(self.get_node('hd1'), None) self.assert_qmp(self.get_node('hd2'), 'ro', True) - def run_test_iothreads(self, iothread_a, iothread_b, errmsg = None): - opts = hd_opts(0) + def run_test_iothreads(self, iothread_a, iothread_b, errmsg = None, + opts_a = None, opts_b = None): + opts = opts_a or hd_opts(0) result = self.vm.qmp('blockdev-add', conv_keys = False, **opts) self.assert_qmp(result, 'return', {}) - opts2 = hd_opts(2) + opts2 = opts_b or hd_opts(2) result = self.vm.qmp('blockdev-add', conv_keys = False, **opts2) self.assert_qmp(result, 'return', {}) @@ -1189,10 +1195,39 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.run_test_iothreads('iothread0', 'iothread0') def test_iothreads_switch_backing(self): - self.run_test_iothreads('iothread0', None) + self.run_test_iothreads('iothread0', '') def test_iothreads_switch_overlay(self): - self.run_test_iothreads(None, 'iothread0') + self.run_test_iothreads('', 'iothread0') + + def test_iothreads_with_throttling(self): + # Create a throttle-group object + opts = { 'qom-type': 'throttle-group', 'id': 'group0', + 'limits': { 'iops-total': 1000 } } + result = self.vm.qmp('object-add', conv_keys = False, **opts) + self.assert_qmp(result, 'return', {}) + + # Options with a throttle filter between format and protocol + opts = [ + { + 'driver': iotests.imgfmt, + 'node-name': f'hd{idx}', + 'file' : { + 'node-name': f'hd{idx}-throttle', + 'driver': 'throttle', + 'throttle-group': 'group0', + 'file': { + 'driver': 'file', + 'node-name': f'hd{idx}-file', + 'filename': hd_path[idx], + }, + }, + } + for idx in (0, 2) + ] + + self.run_test_iothreads('iothread0', 'iothread0', None, + opts[0], opts[1]) if __name__ == '__main__': iotests.activate_logging() diff --git a/tests/qemu-iotests/245.out b/tests/qemu-iotests/245.out index 4eced19294..a4e04a3266 100644 --- a/tests/qemu-iotests/245.out +++ b/tests/qemu-iotests/245.out @@ -17,8 +17,8 @@ read 1/1 bytes at offset 262152 read 1/1 bytes at offset 262160 1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -............... +................ ---------------------------------------------------------------------- -Ran 25 tests +Ran 26 tests OK diff --git a/tests/qemu-iotests/246 b/tests/qemu-iotests/246 index 5932a0e8a9..b009a78397 100755 --- a/tests/qemu-iotests/246 +++ b/tests/qemu-iotests/246 @@ -23,7 +23,8 @@ import iotests from iotests import log -iotests.script_initialize(supported_fmts=['qcow2']) +iotests.script_initialize(supported_fmts=['qcow2'], + unsupported_imgopts=['compat']) size = 64 * 1024 * 1024 * 1024 gran_small = 32 * 1024 gran_large = 128 * 1024 diff --git a/tests/qemu-iotests/250 b/tests/qemu-iotests/250 index f069ca1759..af48f83aba 100755 --- a/tests/qemu-iotests/250 +++ b/tests/qemu-iotests/250 @@ -20,7 +20,7 @@ # # creator -owner=vsementsov@virtuozzo.com +owner=v.sementsov-og@mail.ru seq=`basename $0` echo "QA output created by $seq" diff --git a/tests/qemu-iotests/251 b/tests/qemu-iotests/251 index 8bdec37d32..794cad58b2 100755 --- a/tests/qemu-iotests/251 +++ b/tests/qemu-iotests/251 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/252 b/tests/qemu-iotests/252 index 2134b9993a..522333cf1d 100755 --- a/tests/qemu-iotests/252 +++ b/tests/qemu-iotests/252 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/254 b/tests/qemu-iotests/254 index 108bf5f894..7ea098818c 100755 --- a/tests/qemu-iotests/254 +++ b/tests/qemu-iotests/254 @@ -22,7 +22,8 @@ import iotests from iotests import qemu_img_create, file_path, log -iotests.script_initialize(supported_fmts=['qcow2']) +iotests.script_initialize(supported_fmts=['qcow2'], + unsupported_imgopts=['compat']) disk, top = file_path('disk', 'top') size = 1024 * 1024 diff --git a/tests/qemu-iotests/255 b/tests/qemu-iotests/255 index c43aa9c67a..88b29d64b4 100755 --- a/tests/qemu-iotests/255 +++ b/tests/qemu-iotests/255 @@ -42,8 +42,8 @@ with iotests.FilePath('t.qcow2') as disk_path, \ size_str = str(size) iotests.create_image(base_path, size) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, mid_path, size_str) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, disk_path, size_str) + iotests.qemu_img_create('-f', iotests.imgfmt, mid_path, size_str) + iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, size_str) # Create a backing chain like this: # base <- [throttled: bps-read=4096] <- mid <- overlay @@ -92,12 +92,10 @@ with iotests.FilePath('src.qcow2') as src_path, \ size = 128 * 1024 * 1024 size_str = str(size) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, src_path, size_str) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, dst_path, size_str) + iotests.qemu_img_create('-f', iotests.imgfmt, src_path, size_str) + iotests.qemu_img_create('-f', iotests.imgfmt, dst_path, size_str) - iotests.log(iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write 0 1M', - src_path), - filters=[iotests.filter_test_dir, iotests.filter_qemu_io]) + iotests.qemu_io_log('-f', iotests.imgfmt, '-c', 'write 0 1M', src_path), vm.add_object('throttle-group,x-bps-read=4096,id=throttle0') @@ -123,4 +121,4 @@ with iotests.FilePath('src.qcow2') as src_path, \ vm.qmp_log('block-job-cancel', device='job0') vm.qmp_log('quit') - vm.shutdown(has_quit=True) + vm.shutdown() diff --git a/tests/qemu-iotests/255.out b/tests/qemu-iotests/255.out index 33b7f22de3..2e837cbb5f 100644 --- a/tests/qemu-iotests/255.out +++ b/tests/qemu-iotests/255.out @@ -3,10 +3,6 @@ Finishing a commit job with background reads === Create backing chain and start VM === -Formatting 'TEST_DIR/PID-t.qcow2.mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 lazy_refcounts=off refcount_bits=16 - === Start background read requests === === Run a commit job === @@ -23,10 +19,6 @@ Closing the VM while a job is being cancelled === Create images and start VM === -Formatting 'TEST_DIR/PID-src.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-dst.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 lazy_refcounts=off refcount_bits=16 - wrote 1048576/1048576 bytes at offset 0 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) diff --git a/tests/qemu-iotests/257 b/tests/qemu-iotests/257 index c72c82a171..e7e7a2317e 100755 --- a/tests/qemu-iotests/257 +++ b/tests/qemu-iotests/257 @@ -240,13 +240,14 @@ def compare_images(image, reference, baseimg=None, expected_match=True): """ expected_ret = 0 if expected_match else 1 if baseimg: - assert qemu_img("rebase", "-u", "-b", baseimg, '-F', iotests.imgfmt, - image) == 0 - ret = qemu_img("compare", image, reference) + qemu_img("rebase", "-u", "-b", baseimg, '-F', iotests.imgfmt, image) + + sub = qemu_img("compare", image, reference, check=False) + log('qemu_img compare "{:s}" "{:s}" ==> {:s}, {:s}'.format( image, reference, - "Identical" if ret == 0 else "Mismatch", - "OK!" if ret == expected_ret else "ERROR!"), + "Identical" if sub.returncode == 0 else "Mismatch", + "OK!" if sub.returncode == expected_ret else "ERROR!"), filters=[iotests.filter_testfiles]) def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None): diff --git a/tests/qemu-iotests/257.out b/tests/qemu-iotests/257.out index 50cbd8e882..aa76131ca9 100644 --- a/tests/qemu-iotests/257.out +++ b/tests/qemu-iotests/257.out @@ -106,6 +106,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -566,6 +582,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -819,6 +851,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -1279,6 +1327,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -1532,6 +1596,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -1992,6 +2072,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -2245,6 +2341,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -2705,6 +2817,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -2958,6 +3086,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -3418,6 +3562,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -3671,6 +3831,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -4131,6 +4307,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -4384,6 +4576,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, @@ -4844,6 +5052,22 @@ write -P0x67 0x3fe0000 0x20000 {"return": ""} { "bitmaps": { + "backup-top": [ + { + "busy": false, + "count": 67108864, + "granularity": 65536, + "persistent": false, + "recording": false + }, + { + "busy": false, + "count": 458752, + "granularity": 65536, + "persistent": false, + "recording": false + } + ], "drive0": [ { "busy": false, diff --git a/tests/qemu-iotests/258 b/tests/qemu-iotests/258 index a6618208a8..73d4af645f 100755 --- a/tests/qemu-iotests/258 +++ b/tests/qemu-iotests/258 @@ -18,10 +18,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # -# Creator/Owner: Max Reitz +# Creator/Owner: Hanna Reitz import iotests -from iotests import log, qemu_img, qemu_io_silent, \ +from iotests import log, qemu_img, qemu_io, \ filter_qmp_testfiles, filter_qmp_imgfmt # Returns a node for blockdev-add @@ -75,26 +75,25 @@ def test_concurrent_finish(write_to_stream_node): # It is important to use raw for the base layer (so that # permissions are just handed through to the protocol layer) - assert qemu_img('create', '-f', 'raw', node0_path, '64M') == 0 + qemu_img('create', '-f', 'raw', node0_path, '64M') stream_throttle=None commit_throttle=None for path in [node1_path, node2_path, node3_path, node4_path]: - assert qemu_img('create', '-f', iotests.imgfmt, path, '64M') == 0 + qemu_img('create', '-f', iotests.imgfmt, path, '64M') if write_to_stream_node: # This is what (most of the time) makes commit finish # earlier and then pull in stream - assert qemu_io_silent(node2_path, - '-c', 'write %iK 64K' % (65536 - 192), - '-c', 'write %iK 64K' % (65536 - 64)) == 0 + qemu_io(node2_path, + '-c', 'write %iK 64K' % (65536 - 192), + '-c', 'write %iK 64K' % (65536 - 64)) stream_throttle='tg' else: # And this makes stream finish earlier - assert qemu_io_silent(node1_path, - '-c', 'write %iK 64K' % (65536 - 64)) == 0 + qemu_io(node1_path, '-c', 'write %iK 64K' % (65536 - 64)) commit_throttle='tg' diff --git a/tests/qemu-iotests/259 b/tests/qemu-iotests/259 index 1b15e8fb48..82f5de4b34 100755 --- a/tests/qemu-iotests/259 +++ b/tests/qemu-iotests/259 @@ -20,7 +20,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/260 b/tests/qemu-iotests/260 index 2ec64a9b99..c2133f9980 100755 --- a/tests/qemu-iotests/260 +++ b/tests/qemu-iotests/260 @@ -23,7 +23,8 @@ import iotests from iotests import qemu_img_create, file_path, log, filter_qmp_event iotests.script_initialize( - supported_fmts=['qcow2'] + supported_fmts=['qcow2'], + unsupported_imgopts=['compat'] ) base, top = file_path('base', 'top') diff --git a/tests/qemu-iotests/261 b/tests/qemu-iotests/261 index d1c8037ab1..b73da565da 100755 --- a/tests/qemu-iotests/261 +++ b/tests/qemu-iotests/261 @@ -22,7 +22,7 @@ # # creator -owner=mreitz@redhat.com +owner=hreitz@redhat.com seq=$(basename $0) echo "QA output created by $seq" diff --git a/tests/qemu-iotests/262 b/tests/qemu-iotests/262 index 32d69988ef..2294fd5ecb 100755 --- a/tests/qemu-iotests/262 +++ b/tests/qemu-iotests/262 @@ -51,7 +51,7 @@ with iotests.FilePath('img') as img_path, \ vm.add_device('virtio-blk,drive=%s,iothread=iothread0' % root) - iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, img_path, '64M') + iotests.qemu_img_create('-f', iotests.imgfmt, img_path, '64M') os.mkfifo(fifo) diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264 index bc431d1a19..289381e315 100755 --- a/tests/qemu-iotests/264 +++ b/tests/qemu-iotests/264 @@ -101,7 +101,7 @@ class TestNbdReconnect(iotests.QMPTestCase): start_t = time.time() self.vm.event_wait('BLOCK_JOB_CANCELLED') delta_t = time.time() - start_t - self.assertTrue(delta_t < 2.0) + self.assertTrue(delta_t < 5.0) def test_mirror_cancel(self): # Mirror speed limit doesn't work well enough, it seems that mirror diff --git a/tests/qemu-iotests/266 b/tests/qemu-iotests/266 index 71ce81d0df..8fc3807ac5 100755 --- a/tests/qemu-iotests/266 +++ b/tests/qemu-iotests/266 @@ -137,7 +137,7 @@ def main(): iotests.log('') vm.shutdown() - iotests.img_info_log(file_path) + iotests.img_info_log(file_path, check=False) iotests.script_main(main, diff --git a/tests/qemu-iotests/271 b/tests/qemu-iotests/271 index 599b849cc6..c7c2cadda0 100755 --- a/tests/qemu-iotests/271 +++ b/tests/qemu-iotests/271 @@ -893,7 +893,10 @@ EOF } _make_test_img -o extended_l2=on 1M -_concurrent_io | $QEMU_IO | _filter_qemu_io +# Second and third writes in _concurrent_io() are independent and may finish in +# different order. So, filter offset out to match both possible variants. +_concurrent_io | $QEMU_IO | _filter_qemu_io | \ + sed -e 's/\(20480\|40960\)/OFFSET/' _concurrent_verify | $QEMU_IO | _filter_qemu_io # success, all done diff --git a/tests/qemu-iotests/271.out b/tests/qemu-iotests/271.out index 81043ba4d7..5be780de76 100644 --- a/tests/qemu-iotests/271.out +++ b/tests/qemu-iotests/271.out @@ -719,8 +719,8 @@ blkdebug: Suspended request 'A' blkdebug: Resuming request 'A' wrote 2048/2048 bytes at offset 30720 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -wrote 2048/2048 bytes at offset 20480 +wrote 2048/2048 bytes at offset OFFSET 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -wrote 2048/2048 bytes at offset 40960 +wrote 2048/2048 bytes at offset OFFSET 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) *** done diff --git a/tests/qemu-iotests/273.out b/tests/qemu-iotests/273.out index 4e840b6730..6a74a8138b 100644 --- a/tests/qemu-iotests/273.out +++ b/tests/qemu-iotests/273.out @@ -204,7 +204,6 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev "name": "file", "parent": 5, "shared-perm": [ - "graph-mod", "write-unchanged", "consistent-read" ], @@ -219,7 +218,6 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev "name": "backing", "parent": 5, "shared-perm": [ - "graph-mod", "resize", "write-unchanged", "write", @@ -233,7 +231,6 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev "name": "file", "parent": 3, "shared-perm": [ - "graph-mod", "write-unchanged", "consistent-read" ], @@ -246,7 +243,6 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev "name": "backing", "parent": 3, "shared-perm": [ - "graph-mod", "resize", "write-unchanged", "write", diff --git a/tests/qemu-iotests/274 b/tests/qemu-iotests/274 index caab008e07..2495e051a2 100755 --- a/tests/qemu-iotests/274 +++ b/tests/qemu-iotests/274 @@ -23,19 +23,19 @@ import iotests iotests.script_initialize(supported_fmts=['qcow2'], - supported_platforms=['linux']) + supported_platforms=['linux'], + unsupported_imgopts=['refcount_bits', 'compat']) size_short = 1 * 1024 * 1024 size_long = 2 * 1024 * 1024 size_diff = size_long - size_short def create_chain() -> None: - iotests.qemu_img_log('create', '-f', iotests.imgfmt, base, - str(size_long)) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, '-b', base, - '-F', iotests.imgfmt, mid, str(size_short)) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, '-b', mid, - '-F', iotests.imgfmt, top, str(size_long)) + iotests.qemu_img_create('-f', iotests.imgfmt, base, str(size_long)) + iotests.qemu_img_create('-f', iotests.imgfmt, '-b', base, + '-F', iotests.imgfmt, mid, str(size_short)) + iotests.qemu_img_create('-f', iotests.imgfmt, '-b', mid, + '-F', iotests.imgfmt, top, str(size_long)) iotests.qemu_io_log('-c', 'write -P 1 0 %d' % size_long, base) @@ -159,9 +159,9 @@ with iotests.FilePath('base') as base, \ ('off', '512k', '256k', '500k', '436k')]: iotests.log('=== preallocation=%s ===' % prealloc) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, base, base_size) - iotests.qemu_img_log('create', '-f', iotests.imgfmt, '-b', base, - '-F', iotests.imgfmt, top, top_size_old) + iotests.qemu_img_create('-f', iotests.imgfmt, base, base_size) + iotests.qemu_img_create('-f', iotests.imgfmt, '-b', base, + '-F', iotests.imgfmt, top, top_size_old) iotests.qemu_io_log('-c', 'write -P 1 %s 64k' % off, base) # After this, top_size_old to base_size should be allocated/zeroed. diff --git a/tests/qemu-iotests/274.out b/tests/qemu-iotests/274.out index 16a95a4850..acd8b166a6 100644 --- a/tests/qemu-iotests/274.out +++ b/tests/qemu-iotests/274.out @@ -1,10 +1,4 @@ == Commit tests == -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 2097152/2097152 bytes at offset 0 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -53,7 +47,7 @@ backing file: TEST_DIR/PID-base backing file format: IMGFMT Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -66,12 +60,6 @@ read 1048576/1048576 bytes at offset 1048576 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) === Testing HMP commit (top -> mid) === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 2097152/2097152 bytes at offset 0 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -85,7 +73,7 @@ backing file: TEST_DIR/PID-base backing file format: IMGFMT Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -98,12 +86,6 @@ read 1048576/1048576 bytes at offset 1048576 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) === Testing QMP active commit (top -> mid) === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 2097152/2097152 bytes at offset 0 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -123,7 +105,7 @@ backing file: TEST_DIR/PID-base backing file format: IMGFMT Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -136,12 +118,6 @@ read 1048576/1048576 bytes at offset 1048576 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) === Testing qemu-img commit (top -> base) === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 2097152/2097152 bytes at offset 0 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -153,7 +129,7 @@ virtual size: 2 MiB (2097152 bytes) cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -166,12 +142,6 @@ read 1048576/1048576 bytes at offset 1048576 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) === Testing QMP active commit (top -> base) === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 2097152/2097152 bytes at offset 0 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -191,7 +161,7 @@ backing file: TEST_DIR/PID-base backing file format: IMGFMT Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false @@ -205,10 +175,6 @@ read 1048576/1048576 bytes at offset 1048576 == Resize tests == === preallocation=off === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=6442450944 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1073741824 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 5368709120 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -224,10 +190,6 @@ read 65536/65536 bytes at offset 5368709120 { "start": 1073741824, "length": 7516192768, "depth": 0, "present": true, "zero": true, "data": false}] === preallocation=metadata === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=34359738368 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=32212254720 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 33285996544 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -248,10 +210,6 @@ read 65536/65536 bytes at offset 33285996544 { "start": 34896609280, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 2685075456}] === preallocation=falloc === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=10485760 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=5242880 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 9437184 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -267,10 +225,6 @@ read 65536/65536 bytes at offset 9437184 { "start": 5242880, "length": 10485760, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] === preallocation=full === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=16777216 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=8388608 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 11534336 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -286,10 +240,6 @@ read 65536/65536 bytes at offset 11534336 { "start": 8388608, "length": 4194304, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] === preallocation=off === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=393216 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=259072 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 259072 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -306,10 +256,6 @@ read 65536/65536 bytes at offset 259072 { "start": 262144, "length": 262144, "depth": 0, "present": true, "zero": true, "data": false}] === preallocation=off === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=409600 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=262144 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 344064 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -325,10 +271,6 @@ read 65536/65536 bytes at offset 344064 { "start": 262144, "length": 262144, "depth": 0, "present": true, "zero": true, "data": false}] === preallocation=off === -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=524288 lazy_refcounts=off refcount_bits=16 - -Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=262144 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16 - wrote 65536/65536 bytes at offset 446464 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) diff --git a/tests/qemu-iotests/280 b/tests/qemu-iotests/280 index 628f3c33ca..5f50500fdb 100755 --- a/tests/qemu-iotests/280 +++ b/tests/qemu-iotests/280 @@ -33,7 +33,7 @@ with iotests.FilePath('base') as base_path , \ iotests.FilePath('top') as top_path, \ iotests.VM() as vm: - iotests.qemu_img_log('create', '-f', iotests.imgfmt, base_path, '64M') + iotests.qemu_img_create('-f', iotests.imgfmt, base_path, '64M') iotests.log('=== Launch VM ===') vm.add_object('iothread,id=iothread0') diff --git a/tests/qemu-iotests/280.out b/tests/qemu-iotests/280.out index 09a0f1a7cb..c75f437c00 100644 --- a/tests/qemu-iotests/280.out +++ b/tests/qemu-iotests/280.out @@ -1,5 +1,3 @@ -Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16 - === Launch VM === Enabling migration QMP events on VM... {"return": {}} diff --git a/tests/qemu-iotests/281 b/tests/qemu-iotests/281 index 956698083f..5e1339bd75 100755 --- a/tests/qemu-iotests/281 +++ b/tests/qemu-iotests/281 @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# group: rw quick +# group: rw # # Test cases for blockdev + IOThread interactions # @@ -20,8 +20,9 @@ # import os +import time import iotests -from iotests import qemu_img +from iotests import qemu_img, QemuStorageDaemon image_len = 64 * 1024 * 1024 @@ -243,6 +244,103 @@ class TestBlockdevBackupAbort(iotests.QMPTestCase): # Hangs on failure, we expect this error. self.assert_qmp(result, 'error/class', 'GenericError') +# Test for RHBZ#2033626 +class TestYieldingAndTimers(iotests.QMPTestCase): + sock = os.path.join(iotests.sock_dir, 'nbd.sock') + qsd = None + + def setUp(self): + self.create_nbd_export() + + # Simple VM with an NBD block device connected to the NBD export + # provided by the QSD, and an (initially unused) iothread + self.vm = iotests.VM() + self.vm.add_object('iothread,id=iothr') + self.vm.add_blockdev('nbd,node-name=nbd,server.type=unix,' + + f'server.path={self.sock},export=exp,' + + 'reconnect-delay=1,open-timeout=1') + + self.vm.launch() + + def tearDown(self): + self.stop_nbd_export() + self.vm.shutdown() + + def test_timers_with_blockdev_del(self): + # The NBD BDS will have had an active open timer, because setUp() gave + # a positive value for @open-timeout. It should be gone once the BDS + # has been opened. + # (But there used to be a bug where it remained active, which will + # become important below.) + + # Stop and restart the NBD server, and do some I/O on the client to + # trigger a reconnect and start the reconnect delay timer + self.stop_nbd_export() + self.create_nbd_export() + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io nbd "write 0 512"') + self.assert_qmp(result, 'return', '') + + # Reconnect is done, so the reconnect delay timer should be gone. + # (This is similar to how the open timer should be gone after open, + # and similarly there used to be a bug where it was not gone.) + + # Delete the BDS to see whether both timers are gone. If they are not, + # they will remain active, fire later, and then access freed data. + # (Or, with "block/nbd: Assert there are no timers when closed" + # applied, the assertions added in that patch will fail.) + result = self.vm.qmp('blockdev-del', node_name='nbd') + self.assert_qmp(result, 'return', {}) + + # Give the timers some time to fire (both have a timeout of 1 s). + # (Sleeping in an iotest may ring some alarm bells, but note that if + # the timing is off here, the test will just always pass. If we kill + # the VM too early, then we just kill the timers before they can fire, + # thus not see the error, and so the test will pass.) + time.sleep(2) + + def test_yield_in_iothread(self): + # Move the NBD node to the I/O thread; the NBD block driver should + # attach the connection's QIOChannel to that thread's AioContext, too + result = self.vm.qmp('x-blockdev-set-iothread', + node_name='nbd', iothread='iothr') + self.assert_qmp(result, 'return', {}) + + # Do some I/O that will be throttled by the QSD, so that the network + # connection hopefully will yield here. When it is resumed, it must + # then be resumed in the I/O thread's AioContext. + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io nbd "read 0 128K"') + self.assert_qmp(result, 'return', '') + + def create_nbd_export(self): + assert self.qsd is None + + # Export a throttled null-co BDS: Reads are throttled (max 64 kB/s), + # writes are not. + self.qsd = QemuStorageDaemon( + '--object', + 'throttle-group,id=thrgr,x-bps-read=65536,x-bps-read-max=65536', + + '--blockdev', + 'null-co,node-name=null,read-zeroes=true', + + '--blockdev', + 'throttle,node-name=thr,file=null,throttle-group=thrgr', + + '--nbd-server', + f'addr.type=unix,addr.path={self.sock}', + + '--export', + 'nbd,id=exp,node-name=thr,name=exp,writable=true' + ) + + def stop_nbd_export(self): + self.qsd.stop() + self.qsd = None + if __name__ == '__main__': iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + unsupported_imgopts=['compat']) diff --git a/tests/qemu-iotests/281.out b/tests/qemu-iotests/281.out index 89968f35d7..3f8a935a08 100644 --- a/tests/qemu-iotests/281.out +++ b/tests/qemu-iotests/281.out @@ -1,5 +1,5 @@ -.... +...... ---------------------------------------------------------------------- -Ran 4 tests +Ran 6 tests OK diff --git a/tests/qemu-iotests/283 b/tests/qemu-iotests/283 index 010c22f0a2..5defe48e97 100755 --- a/tests/qemu-iotests/283 +++ b/tests/qemu-iotests/283 @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # group: auto quick # -# Test for backup-top filter permission activation failure +# Test for copy-before-write filter permission conflict # # Copyright (c) 2019 Virtuozzo International GmbH. # @@ -31,13 +31,13 @@ size = 1024 * 1024 """ Test description When performing a backup, all writes on the source subtree must go through the -backup-top filter so it can copy all data to the target before it is changed. -backup-top filter is appended above source node, to achieve this thing, so all -parents of source node are handled. A configuration with side parents of source -sub-tree with write permission is unsupported (we'd have append several -backup-top filter like nodes to handle such parents). The test create an -example of such configuration and checks that a backup is then not allowed -(blockdev-backup command should fail). +copy-before-write filter so it can copy all data to the target before it is +changed. copy-before-write filter is appended above source node, to achieve +this thing, so all parents of source node are handled. A configuration with +side parents of source sub-tree with write permission is unsupported (we'd have +append several copy-before-write filter like nodes to handle such parents). The +test create an example of such configuration and checks that a backup is then +not allowed (blockdev-backup command should fail). The configuration: @@ -57,11 +57,10 @@ The configuration: │ base │ ◀──────────── │ other │ └─────────────┘ └───────┘ -On activation (see .active field of backup-top state in block/backup-top.c), -backup-top is going to unshare write permission on its source child. Write -unsharing will be propagated to the "source->base" link and will conflict with -other node write permission. So permission update will fail and backup job will -not be started. +copy-before-write filter wants to unshare write permission on its source child. +Write unsharing will be propagated to the "source->base" link and will conflict +with other node write permission. So permission update will fail and backup job +will not be started. Note, that the only thing which prevents backup of running on such configuration is default permission propagation scheme. It may be altered by @@ -94,18 +93,15 @@ vm.qmp_log('blockdev-add', **{ 'take-child-perms': ['write'] }) -vm.qmp_log('blockdev-backup', sync='full', device='source', target='target') +vm.qmp_log('blockdev-backup', sync='full', device='source', target='target', + job_id="backup0") vm.shutdown() -print('\n=== backup-top should be gone after job-finalize ===\n') +print('\n=== copy-before-write filter should be gone after job-finalize ===\n') -# Check that the backup-top node is gone after job-finalize. -# -# During finalization, the node becomes inactive and can no longer -# function. If it is still present, new parents might be attached, and -# there would be no meaningful way to handle their I/O requests. +# Check that the copy-before-write node is gone after job-finalize. vm = iotests.VM() vm.launch() @@ -131,7 +127,7 @@ vm.qmp_log('blockdev-backup', vm.event_wait('BLOCK_JOB_PENDING', 5.0) -# The backup-top filter should still be present prior to finalization +# The copy-before-write filter should still be present prior to finalization assert vm.node_info('backup-filter') is not None vm.qmp_log('job-finalize', id='backup') diff --git a/tests/qemu-iotests/283.out b/tests/qemu-iotests/283.out index c6e12b15c5..5e4f456db5 100644 --- a/tests/qemu-iotests/283.out +++ b/tests/qemu-iotests/283.out @@ -4,10 +4,10 @@ {"return": {}} {"execute": "blockdev-add", "arguments": {"driver": "blkdebug", "image": "base", "node-name": "other", "take-child-perms": ["write"]}} {"return": {}} -{"execute": "blockdev-backup", "arguments": {"device": "source", "sync": "full", "target": "target"}} -{"error": {"class": "GenericError", "desc": "Cannot append backup-top filter: Permission conflict on node 'base': permissions 'write' are both required by node 'other' (uses node 'base' as 'image' child) and unshared by node 'source' (uses node 'base' as 'image' child)."}} +{"execute": "blockdev-backup", "arguments": {"device": "source", "job-id": "backup0", "sync": "full", "target": "target"}} +{"error": {"class": "GenericError", "desc": "Permission conflict on node 'base': permissions 'write' are both required by node 'other' (uses node 'base' as 'image' child) and unshared by node 'source' (uses node 'base' as 'image' child)."}} -=== backup-top should be gone after job-finalize === +=== copy-before-write filter should be gone after job-finalize === {"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "source"}} {"return": {}} diff --git a/tests/qemu-iotests/287 b/tests/qemu-iotests/287 index 22ce9ff0e4..6414640b21 100755 --- a/tests/qemu-iotests/287 +++ b/tests/qemu-iotests/287 @@ -53,7 +53,7 @@ CLUSTER_SIZE=65536 # Check if we can run this test. output=$(_make_test_img -o 'compression_type=zstd' 64M; _cleanup_test_img) -if echo "$output" | grep -q "Invalid parameter 'zstd'"; then +if echo "$output" | grep -q "Parameter 'compression-type' does not accept value 'zstd'"; then _notrun "ZSTD is disabled" fi @@ -61,13 +61,13 @@ echo echo "=== Testing compression type incompatible bit setting for zlib ===" echo _make_test_img -o compression_type=zlib 64M -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header --no-filter-compression | grep incompatible_features echo echo "=== Testing compression type incompatible bit setting for zstd ===" echo _make_test_img -o compression_type=zstd 64M -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header --no-filter-compression | grep incompatible_features echo echo "=== Testing zlib with incompatible bit set ===" @@ -75,7 +75,7 @@ echo _make_test_img -o compression_type=zlib 64M $PYTHON qcow2.py "$TEST_IMG" set-feature-bit incompatible 3 # to make sure the bit was actually set -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header --no-filter-compression | grep incompatible_features if $QEMU_IMG info "$TEST_IMG" >/dev/null 2>&1 ; then echo "Error: The image opened successfully. The image must not be opened." @@ -87,7 +87,7 @@ echo _make_test_img -o compression_type=zstd 64M $PYTHON qcow2.py "$TEST_IMG" set-header incompatible_features 0 # to make sure the bit was actually unset -$PYTHON qcow2.py "$TEST_IMG" dump-header | grep incompatible_features +_qcow2_dump_header --no-filter-compression | grep incompatible_features if $QEMU_IMG info "$TEST_IMG" >/dev/null 2>&1 ; then echo "Error: The image opened successfully. The image must not be opened." diff --git a/tests/qemu-iotests/290 b/tests/qemu-iotests/290 index ed80da2685..776b59de1b 100755 --- a/tests/qemu-iotests/290 +++ b/tests/qemu-iotests/290 @@ -41,7 +41,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt qcow2 _supported_proto file fuse _supported_os Linux -_unsupported_imgopts 'compat=0.10' refcount_bits data_file +_unsupported_imgopts 'compat=0.10' refcount_bits data_file compression_type echo echo "### Test 'qemu-io -c discard' on a QCOW2 image without a backing file" diff --git a/tests/qemu-iotests/296 b/tests/qemu-iotests/296 index 099a3eeaa5..0d21b740a7 100755 --- a/tests/qemu-iotests/296 +++ b/tests/qemu-iotests/296 @@ -76,7 +76,7 @@ class EncryptionSetupTestCase(iotests.QMPTestCase): # create the encrypted block device using qemu-img def createImg(self, file, secret): - output = iotests.qemu_img_pipe( + iotests.qemu_img( 'create', '--object', *secret.to_cmdline_object(), '-f', iotests.imgfmt, @@ -84,8 +84,7 @@ class EncryptionSetupTestCase(iotests.QMPTestCase): '-o', 'iter-time=10', file, '1M') - - iotests.log(output, filters=[iotests.filter_test_dir]) + iotests.log('') # attempts to add a key using qemu-img def addKey(self, file, secret, new_secret): @@ -99,7 +98,7 @@ class EncryptionSetupTestCase(iotests.QMPTestCase): } } - output = iotests.qemu_img_pipe( + output = iotests.qemu_img( 'amend', '--object', *secret.to_cmdline_object(), '--object', *new_secret.to_cmdline_object(), @@ -108,8 +107,9 @@ class EncryptionSetupTestCase(iotests.QMPTestCase): '-o', 'new-secret=' + new_secret.id(), '-o', 'iter-time=10', - "json:" + json.dumps(image_options) - ) + "json:" + json.dumps(image_options), + check=False # Expected to fail. Log output. + ).stdout iotests.log(output, filters=[iotests.filter_test_dir]) @@ -174,8 +174,12 @@ class EncryptionSetupTestCase(iotests.QMPTestCase): } result = vm.qmp('x-blockdev-amend', **args) - assert result['return'] == {} - vm.run_job('job0') + iotests.log(result) + # Run the job only if it was created + event = ('JOB_STATUS_CHANGE', + {'data': {'id': 'job0', 'status': 'created'}}) + if vm.events_wait([event], timeout=0.0) is not None: + vm.run_job('job0') # test that when the image opened by two qemu processes, # neither of them can update the encryption keys diff --git a/tests/qemu-iotests/296.out b/tests/qemu-iotests/296.out index 6c69735604..609826eaa0 100644 --- a/tests/qemu-iotests/296.out +++ b/tests/qemu-iotests/296.out @@ -1,37 +1,26 @@ -Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10 +{"return": {}} {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} -Job failed: Failed to get shared "consistent read" lock -{"execute": "job-dismiss", "arguments": {"id": "job0"}} -{"return": {}} -Job failed: Failed to get shared "consistent read" lock -{"execute": "job-dismiss", "arguments": {"id": "job0"}} +{"error": {"class": "GenericError", "desc": "Failed to get shared \"consistent read\" lock"}} +{"error": {"class": "GenericError", "desc": "Failed to get shared \"consistent read\" lock"}} {"return": {}} {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} qemu-img: Failed to get shared "consistent read" lock Is another process using the image [TEST_DIR/test.img]? -.Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10 - -Job failed: Block node is read-only -{"execute": "job-dismiss", "arguments": {"id": "job0"}} -{"return": {}} -Job failed: Failed to get shared "consistent read" lock -{"execute": "job-dismiss", "arguments": {"id": "job0"}} -{"return": {}} -Job failed: Failed to get shared "consistent read" lock -{"execute": "job-dismiss", "arguments": {"id": "job0"}} +. +{"error": {"class": "GenericError", "desc": "Block node is read-only"}} +{"error": {"class": "GenericError", "desc": "Failed to get shared \"consistent read\" lock"}} +{"error": {"class": "GenericError", "desc": "Failed to get shared \"consistent read\" lock"}} {"return": {}} {"execute": "job-dismiss", "arguments": {"id": "job0"}} {"return": {}} -.Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10 - +. {"return": {}} {"error": {"class": "GenericError", "desc": "Failed to get \"write\" lock"}} -.Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10 - +. {"return": {}} {"return": {}} . diff --git a/tests/qemu-iotests/297 b/tests/qemu-iotests/297 index 433b732336..ee78a62735 100755 --- a/tests/qemu-iotests/297 +++ b/tests/qemu-iotests/297 @@ -17,98 +17,66 @@ # along with this program. If not, see . import os -import re -import shutil import subprocess import sys +from typing import List import iotests +import linters -# TODO: Empty this list! -SKIP_FILES = ( - '030', '040', '041', '044', '045', '055', '056', '057', '065', '093', - '096', '118', '124', '132', '136', '139', '147', '148', '149', - '151', '152', '155', '163', '165', '169', '194', '196', '199', '202', - '203', '205', '206', '207', '208', '210', '211', '212', '213', '216', - '218', '219', '222', '224', '228', '234', '235', '236', '237', '238', - '240', '242', '245', '246', '248', '255', '256', '257', '258', '260', - '262', '264', '266', '274', '277', '280', '281', '295', '296', '298', - '299', '302', '303', '304', '307', - 'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py' -) +# Looking for something? +# +# List of files to exclude from linting: linters.py +# mypy configuration: mypy.ini +# pylint configuration: pylintrc -def is_python_file(filename): - if not os.path.isfile(filename): +def check_linter(linter: str) -> bool: + try: + linters.run_linter(linter, ['--version'], suppress_output=True) + except subprocess.CalledProcessError: + iotests.case_notrun(f"'{linter}' not found") return False - - if filename.endswith('.py'): - return True - - with open(filename) as f: - try: - first_line = f.readline() - return re.match('^#!.*python', first_line) is not None - except UnicodeDecodeError: # Ignore binary files - return False + return True -def run_linters(): - files = [filename for filename in (set(os.listdir('.')) - set(SKIP_FILES)) - if is_python_file(filename)] +def test_pylint(files: List[str]) -> None: + print('=== pylint ===') + sys.stdout.flush() + + if not check_linter('pylint'): + return + + linters.run_linter('pylint', files) + + +def test_mypy(files: List[str]) -> None: + print('=== mypy ===') + sys.stdout.flush() + + if not check_linter('mypy'): + return + + env = os.environ.copy() + env['MYPYPATH'] = env['PYTHONPATH'] + + linters.run_linter('mypy', files, env=env, suppress_output=True) + + +def main() -> None: + files = linters.get_test_files() iotests.logger.debug('Files to be checked:') iotests.logger.debug(', '.join(sorted(files))) - print('=== pylint ===') - sys.stdout.flush() - - # Todo notes are fine, but fixme's or xxx's should probably just be - # fixed (in tests, at least) - env = os.environ.copy() - qemu_module_path = os.path.join(os.path.dirname(__file__), - '..', '..', 'python') - try: - env['PYTHONPATH'] += os.pathsep + qemu_module_path - except KeyError: - env['PYTHONPATH'] = qemu_module_path - subprocess.run(('pylint-3', '--score=n', '--notes=FIXME,XXX', *files), - env=env, check=False) - - print('=== mypy ===') - sys.stdout.flush() - - # We have to call mypy separately for each file. Otherwise, it - # will interpret all given files as belonging together (i.e., they - # may not both define the same classes, etc.; most notably, they - # must not both define the __main__ module). - env['MYPYPATH'] = env['PYTHONPATH'] - for filename in files: - p = subprocess.run(('mypy', - '--warn-unused-configs', - '--disallow-subclassing-any', - '--disallow-any-generics', - '--disallow-incomplete-defs', - '--disallow-untyped-decorators', - '--no-implicit-optional', - '--warn-redundant-casts', - '--warn-unused-ignores', - '--no-implicit-reexport', - '--namespace-packages', - filename), - env=env, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - - if p.returncode != 0: - print(p.stdout) + for test in (test_pylint, test_mypy): + try: + test(files) + except subprocess.CalledProcessError as exc: + # Linter failure will be caught by diffing the IO. + if exc.output: + print(exc.output) -for linter in ('pylint-3', 'mypy'): - if shutil.which(linter) is None: - iotests.notrun(f'{linter} not found') - -iotests.script_main(run_linters) +iotests.script_main(main) diff --git a/tests/qemu-iotests/298 b/tests/qemu-iotests/298 index fae72211b1..ad560e2941 100755 --- a/tests/qemu-iotests/298 +++ b/tests/qemu-iotests/298 @@ -129,16 +129,13 @@ class TestTruncate(iotests.QMPTestCase): os.remove(refdisk) def do_test(self, prealloc_mode, new_size): - ret = iotests.qemu_io_silent('--image-opts', '-c', 'write 0 10M', '-c', - f'truncate -m {prealloc_mode} {new_size}', - drive_opts) - self.assertEqual(ret, 0) + iotests.qemu_io('--image-opts', '-c', 'write 0 10M', '-c', + f'truncate -m {prealloc_mode} {new_size}', + drive_opts) - ret = iotests.qemu_io_silent('-f', iotests.imgfmt, '-c', 'write 0 10M', - '-c', - f'truncate -m {prealloc_mode} {new_size}', - refdisk) - self.assertEqual(ret, 0) + iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write 0 10M', + '-c', f'truncate -m {prealloc_mode} {new_size}', + refdisk) stat = os.stat(disk) refstat = os.stat(refdisk) diff --git a/tests/qemu-iotests/300 b/tests/qemu-iotests/300 index fe94de84ed..dbd28384ec 100755 --- a/tests/qemu-iotests/300 +++ b/tests/qemu-iotests/300 @@ -26,9 +26,6 @@ from typing import Dict, List, Optional import iotests -# Import qemu after iotests.py has amended sys.path -# pylint: disable=wrong-import-order -from qemu.machine import machine BlockBitmapMapping = List[Dict[str, object]] @@ -462,12 +459,11 @@ class TestBlockBitmapMappingErrors(TestDirtyBitmapMigration): f"'{self.src_node_name}': Name is longer than 255 bytes", log) - # Expect abnormal shutdown of the destination VM because of - # the failed migration - try: - self.vm_b.shutdown() - except machine.AbnormalShutdown: - pass + # Destination VM will terminate w/ error of its own accord + # due to the failed migration. + self.vm_b.wait() + rc = self.vm_b.exitcode() + assert rc is not None and rc > 0 def test_aliased_bitmap_name_too_long(self) -> None: # Longer than the maximum for bitmap names diff --git a/tests/qemu-iotests/302 b/tests/qemu-iotests/302 index 5695af4914..a6d79e727b 100755 --- a/tests/qemu-iotests/302 +++ b/tests/qemu-iotests/302 @@ -34,6 +34,7 @@ from iotests import ( qemu_img_measure, qemu_io, qemu_nbd_popen, + img_info_log, ) iotests.script_initialize(supported_fmts=["qcow2"]) @@ -88,6 +89,7 @@ with tarfile.open(tar_file, "w") as tar: tar_file): iotests.log("=== Target image info ===") + # Not img_info_log as it enforces imgfmt, but now we print info on raw qemu_img_log("info", nbd_uri) qemu_img( @@ -99,7 +101,7 @@ with tarfile.open(tar_file, "w") as tar: nbd_uri) iotests.log("=== Converted image info ===") - qemu_img_log("info", nbd_uri) + img_info_log(nbd_uri) iotests.log("=== Converted image check ===") qemu_img_log("check", nbd_uri) diff --git a/tests/qemu-iotests/302.out b/tests/qemu-iotests/302.out index e2f6077e83..3e7c281b91 100644 --- a/tests/qemu-iotests/302.out +++ b/tests/qemu-iotests/302.out @@ -6,14 +6,13 @@ virtual size: 448 KiB (458752 bytes) disk size: unavailable === Converted image info === -image: nbd+unix:///exp?socket=SOCK_DIR/PID-nbd-sock -file format: qcow2 +image: TEST_IMG +file format: IMGFMT virtual size: 1 GiB (1073741824 bytes) -disk size: unavailable cluster_size: 65536 Format specific information: compat: 1.1 - compression type: zlib + compression type: COMPRESSION_TYPE lazy refcounts: false refcount bits: 16 corrupt: false diff --git a/tests/qemu-iotests/303 b/tests/qemu-iotests/303 index 425544c064..a8cc6a23df 100755 --- a/tests/qemu-iotests/303 +++ b/tests/qemu-iotests/303 @@ -21,9 +21,12 @@ import iotests import subprocess -from iotests import qemu_img_create, qemu_io, file_path, log, filter_qemu_io +from iotests import qemu_img_create, qemu_io_log, file_path, log, \ + verify_qcow2_zstd_compression -iotests.script_initialize(supported_fmts=['qcow2']) +iotests.script_initialize(supported_fmts=['qcow2'], + unsupported_imgopts=['refcount_bits', 'compat']) +verify_qcow2_zstd_compression() disk = file_path('disk') chunk = 1024 * 1024 @@ -37,12 +40,12 @@ def create_bitmap(bitmap_number, disabled): if disabled: args.append('--disable') - iotests.qemu_img_pipe(*args) + iotests.qemu_img(*args) def write_to_disk(offset, size): write = f'write {offset} {size}' - log(qemu_io('-c', write, disk), filters=[filter_qemu_io]) + qemu_io_log('-c', write, disk) def add_bitmap(num, begin, end, disabled): @@ -53,12 +56,19 @@ def add_bitmap(num, begin, end, disabled): log('') -qemu_img_create('-f', iotests.imgfmt, disk, '10M') +def test(compression_type: str, json_output: bool) -> None: + qemu_img_create('-f', iotests.imgfmt, + '-o', f'compression_type={compression_type}', + disk, '10M') + add_bitmap(1, 0, 6, False) + add_bitmap(2, 6, 8, True) -add_bitmap(1, 0, 6, False) -add_bitmap(2, 6, 8, True) -dump = ['./qcow2.py', disk, 'dump-header'] -subprocess.run(dump) -# Dump the metadata in JSON format -dump.append('-j') -subprocess.run(dump) + cmd = ['./qcow2.py', disk, 'dump-header'] + if json_output: + cmd.append('-j') + + subprocess.run(cmd) + + +test('zlib', False) +test('zstd', True) diff --git a/tests/qemu-iotests/303.out b/tests/qemu-iotests/303.out index 7c16998587..b3c70827b7 100644 --- a/tests/qemu-iotests/303.out +++ b/tests/qemu-iotests/303.out @@ -80,6 +80,34 @@ extra_data_size 0 Bitmap table type size offset 0 all-zeroes 0 0 +Add bitmap 1 +wrote 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 1048576 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 2097152 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 3145728 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 4194304 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 5242880 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + + +Add bitmap 2 +wrote 1048576/1048576 bytes at offset 6291456 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +wrote 1048576/1048576 bytes at offset 7340032 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + + { "magic": 1363560955, "version": 3, @@ -94,7 +122,7 @@ Bitmap table type size offset "refcount_table_clusters": 1, "nb_snapshots": 0, "snapshot_offset": 0, - "incompatible_features": 0, + "incompatible_features": 8, "compatible_features": 0, "autoclear_features": 1, "refcount_order": 4, diff --git a/tests/qemu-iotests/307.out b/tests/qemu-iotests/307.out index ec8d2be0e0..390f05d1b7 100644 --- a/tests/qemu-iotests/307.out +++ b/tests/qemu-iotests/307.out @@ -83,7 +83,7 @@ exports available: 2 export: 'export1' description: This is the writable second export size: 67108864 - flags: 0xced ( flush fua trim zeroes df cache fast-zero ) + flags: 0xded ( flush fua trim zeroes df multi cache fast-zero ) min block: XXX opt block: XXX max block: XXX @@ -109,7 +109,7 @@ exports available: 1 export: 'export1' description: This is the writable second export size: 67108864 - flags: 0xced ( flush fua trim zeroes df cache fast-zero ) + flags: 0xded ( flush fua trim zeroes df multi cache fast-zero ) min block: XXX opt block: XXX max block: XXX diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308 index 6b386bd523..bde4aac2fa 100755 --- a/tests/qemu-iotests/308 +++ b/tests/qemu-iotests/308 @@ -148,7 +148,7 @@ rmdir "$EXT_MP" 2>/dev/null rm -f "$EXT_MP" output=$(fuse_export_add 'export-err' "'mountpoint': '$EXT_MP'" error) -if echo "$output" | grep -q "Invalid parameter 'fuse'"; then +if echo "$output" | grep -q "Parameter 'type' does not accept value 'fuse'"; then _notrun 'No FUSE support' fi @@ -230,8 +230,29 @@ echo '=== Writable export ===' fuse_export_add 'export-mp' "'mountpoint': '$EXT_MP', 'writable': true" # Check that writing to the read-only export fails -$QEMU_IO -f raw -c 'write -P 42 1M 64k' "$TEST_IMG" 2>&1 \ - | _filter_qemu_io | _filter_testdir | _filter_imgfmt +output=$($QEMU_IO -f raw -c 'write -P 42 1M 64k' "$TEST_IMG" 2>&1 \ + | _filter_qemu_io | _filter_testdir | _filter_imgfmt) + +# Expected reference output: Opening the file fails because it has no +# write permission +reference="Could not open 'TEST_DIR/t.IMGFMT': Permission denied" + +if echo "$output" | grep -q "$reference"; then + echo "Writing to read-only export failed: OK" +elif echo "$output" | grep -q "write failed: Permission denied"; then + # With CAP_DAC_OVERRIDE (e.g. when running this test as root), the export + # can be opened regardless of its file permissions, but writing will then + # fail. This is not the result for which we want to test, so count this as + # a SKIP. + _casenotrun "Opening RO export as R/W succeeded, perhaps because of" \ + "CAP_DAC_OVERRIDE" + + # Still, write this to the reference output to make the test pass + echo "Writing to read-only export failed: OK" +else + echo "Writing to read-only export failed: ERROR" + echo "$output" +fi # But here it should work $QEMU_IO -f raw -c 'write -P 42 1M 64k' "$EXT_MP" | _filter_qemu_io diff --git a/tests/qemu-iotests/308.out b/tests/qemu-iotests/308.out index fc47bb11a2..e4467a10cf 100644 --- a/tests/qemu-iotests/308.out +++ b/tests/qemu-iotests/308.out @@ -95,7 +95,7 @@ virtual size: 0 B (0 bytes) 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } } {"return": {}} -qemu-io: can't open device TEST_DIR/t.IMGFMT: Could not open 'TEST_DIR/t.IMGFMT': Permission denied +Writing to read-only export failed: OK wrote 65536/65536 bytes at offset 1048576 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) wrote 65536/65536 bytes at offset 1048576 diff --git a/tests/qemu-iotests/310 b/tests/qemu-iotests/310 index 9d9c928c4b..650d2cb6fb 100755 --- a/tests/qemu-iotests/310 +++ b/tests/qemu-iotests/310 @@ -21,7 +21,7 @@ # import iotests -from iotests import log, qemu_img, qemu_io_silent +from iotests import log, qemu_img, qemu_io # Need backing file support iotests.script_initialize(supported_fmts=['qcow2'], @@ -31,7 +31,7 @@ log('') log('=== Copy-on-read across nodes ===') log('') -# This test is similar to the 216 one by Max Reitz +# This test is similar to the 216 one by Hanna Reitz # The difference is that this test case involves a bottom node to the # COR filter driver. @@ -43,16 +43,16 @@ with iotests.FilePath('base.img') as base_img_path, \ log('--- Setting up images ---') log('') - assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0 - assert qemu_io_silent(base_img_path, '-c', 'write -P 1 0M 1M') == 0 - assert qemu_io_silent(base_img_path, '-c', 'write -P 1 3M 1M') == 0 - assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, - '-F', iotests.imgfmt, mid_img_path) == 0 - assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 2M 1M') == 0 - assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 4M 1M') == 0 - assert qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path, - '-F', iotests.imgfmt, top_img_path) == 0 - assert qemu_io_silent(top_img_path, '-c', 'write -P 2 1M 1M') == 0 + qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') + qemu_io(base_img_path, '-c', 'write -P 1 0M 1M') + qemu_io(base_img_path, '-c', 'write -P 1 3M 1M') + qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, + '-F', iotests.imgfmt, mid_img_path) + qemu_io(mid_img_path, '-c', 'write -P 3 2M 1M') + qemu_io(mid_img_path, '-c', 'write -P 3 4M 1M') + qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path, + '-F', iotests.imgfmt, top_img_path) + qemu_io(top_img_path, '-c', 'write -P 2 1M 1M') # 0 1 2 3 4 # top 2 @@ -105,13 +105,12 @@ with iotests.FilePath('base.img') as base_img_path, \ log('') # Detach backing to check that we can read the data from the top level now - assert qemu_img('rebase', '-u', '-b', '', '-f', iotests.imgfmt, - top_img_path) == 0 + qemu_img('rebase', '-u', '-b', '', '-f', iotests.imgfmt, top_img_path) - assert qemu_io_silent(top_img_path, '-c', 'read -P 0 0 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 2 1M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 3 2M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 0 3M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 3 4M 1M') == 0 + qemu_io(top_img_path, '-c', 'read -P 0 0 1M') + qemu_io(top_img_path, '-c', 'read -P 2 1M 1M') + qemu_io(top_img_path, '-c', 'read -P 3 2M 1M') + qemu_io(top_img_path, '-c', 'read -P 0 3M 1M') + qemu_io(top_img_path, '-c', 'read -P 3 4M 1M') log('Done') diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check index 2dd529eb75..75de1b4691 100755 --- a/tests/qemu-iotests/check +++ b/tests/qemu-iotests/check @@ -32,15 +32,27 @@ def make_argparser() -> argparse.ArgumentParser: p.add_argument('-n', '--dry-run', action='store_true', help='show me, do not run tests') - p.add_argument('-makecheck', action='store_true', - help='pretty print output for make check') + p.add_argument('-j', dest='jobs', type=int, default=1, + help='run tests in multiple parallel jobs') p.add_argument('-d', dest='debug', action='store_true', help='debug') + p.add_argument('-p', dest='print', action='store_true', + help='redirects qemu\'s stdout and stderr to ' + 'the test output') + p.add_argument('-gdb', action='store_true', + help="start gdbserver with $GDB_OPTIONS options " + "('localhost:12345' if $GDB_OPTIONS is empty)") + p.add_argument('-valgrind', action='store_true', + help='use valgrind, sets VALGRIND_QEMU environment ' + 'variable') + p.add_argument('-misalign', action='store_true', help='misalign memory allocations') p.add_argument('--color', choices=['on', 'off', 'auto'], default='auto', help="use terminal colors. The default " "'auto' value means use colors if terminal stdout detected") + p.add_argument('-tap', action='store_true', + help='produce TAP output') g_env = p.add_argument_group('test environment options') mg = g_env.add_mutually_exclusive_group() @@ -85,9 +97,6 @@ def make_argparser() -> argparse.ArgumentParser: g_bash.add_argument('-o', dest='imgopts', help='options to pass to qemu-img create/convert, ' 'sets IMGOPTS environment variable') - g_bash.add_argument('-valgrind', action='store_true', - help='use valgrind, sets VALGRIND_QEMU environment ' - 'variable') g_sel = p.add_argument_group('test selecting options', 'The following options specify test set ' @@ -114,7 +123,8 @@ if __name__ == '__main__': env = TestEnv(imgfmt=args.imgfmt, imgproto=args.imgproto, aiomode=args.aiomode, cachemode=args.cachemode, imgopts=args.imgopts, misalign=args.misalign, - debug=args.debug, valgrind=args.valgrind) + debug=args.debug, valgrind=args.valgrind, + gdb=args.gdb, qprint=args.print) if len(sys.argv) > 1 and sys.argv[-len(args.tests)-1] == '--': if not args.tests: @@ -154,9 +164,9 @@ if __name__ == '__main__': if args.dry_run: print('\n'.join(tests)) else: - with TestRunner(env, makecheck=args.makecheck, + with TestRunner(env, tap=args.tap, color=args.color) as tr: paths = [os.path.join(env.source_iotests, t) for t in tests] - ok = tr.run_tests(paths) + ok = tr.run_tests(paths, args.jobs) if not ok: sys.exit(1) diff --git a/tests/qemu-iotests/common.config b/tests/qemu-iotests/common.config deleted file mode 100644 index 9bd1a5a6fc..0000000000 --- a/tests/qemu-iotests/common.config +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (C) 2009 Red Hat, Inc. -# Copyright (c) 2000-2003,2006 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# all tests should use a common language setting to prevent golden -# output mismatches. -export LANG=C - -PATH=".:$PATH" - -HOSTOS=$(uname -s) -arch=$(uname -m) -[[ "$arch" =~ "ppc64" ]] && qemu_arch=ppc64 || qemu_arch="$arch" - -# make sure we have a standard umask -umask 022 - -_optstr_add() -{ - if [ -n "$1" ]; then - echo "$1,$2" - else - echo "$2" - fi -} - -# make sure this script returns success -true diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter index 2b2b53946c..cc9f1a5891 100644 --- a/tests/qemu-iotests/common.filter +++ b/tests/qemu-iotests/common.filter @@ -21,44 +21,44 @@ _filter_date() { - $SED -re 's/[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/yyyy-mm-dd hh:mm:ss/' + sed -Ee 's/[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/yyyy-mm-dd hh:mm:ss/' } _filter_vmstate_size() { - $SED -r -e 's/[0-9. ]{5} [KMGT]iB/ SIZE/' \ - -e 's/[0-9. ]{5} B/ SIZE/' + sed -E -e 's/[0-9. ]{5} [KMGT]iB/ SIZE/' \ + -e 's/[0-9. ]{5} B/ SIZE/' } _filter_generated_node_ids() { - $SED -re 's/\#block[0-9]{3,}/NODE_NAME/' + sed -Ee 's/\#block[0-9]{3,}/NODE_NAME/' } _filter_qom_path() { - $SED -e '/Attached to:/s/\device[[0-9]\+\]/device[N]/g' + gsed -e '/Attached to:/s/\device[[0-9]\+\]/device[N]/g' } # replace occurrences of the actual TEST_DIR value with TEST_DIR _filter_testdir() { - $SED -e "s#$TEST_DIR/#TEST_DIR/#g" \ - -e "s#$SOCK_DIR/#SOCK_DIR/#g" \ - -e "s#SOCK_DIR/fuse-#TEST_DIR/#g" + sed -e "s#$TEST_DIR/#TEST_DIR/#g" \ + -e "s#$SOCK_DIR/#SOCK_DIR/#g" \ + -e "s#SOCK_DIR/fuse-#TEST_DIR/#g" } # replace occurrences of the actual IMGFMT value with IMGFMT _filter_imgfmt() { - $SED -e "s#$IMGFMT#IMGFMT#g" + sed -e "s#$IMGFMT#IMGFMT#g" } # Replace error message when the format is not supported and delete # the output lines after the first one _filter_qemu_img_check() { - $SED -e '/allocated.*fragmented.*compressed clusters/d' \ + gsed -e '/allocated.*fragmented.*compressed clusters/d' \ -e 's/qemu-img: This image format does not support checks/No errors were found on the image./' \ -e '/Image end offset: [0-9]\+/d' } @@ -66,13 +66,14 @@ _filter_qemu_img_check() # Removes \r from messages _filter_win32() { - $SED -e 's/\r//g' + gsed -e 's/\r//g' } # sanitize qemu-io output _filter_qemu_io() { - _filter_win32 | $SED -e "s/[0-9]* ops\; [0-9/:. sec]* ([0-9/.inf]* [EPTGMKiBbytes]*\/sec and [0-9/.inf]* ops\/sec)/X ops\; XX:XX:XX.X (XXX YYY\/sec and XXX ops\/sec)/" \ + _filter_win32 | \ + gsed -e "s/[0-9]* ops\; [0-9/:. sec]* ([0-9/.inf]* [EPTGMKiBbytes]*\/sec and [0-9/.inf]* ops\/sec)/X ops\; XX:XX:XX.X (XXX YYY\/sec and XXX ops\/sec)/" \ -e "s/: line [0-9][0-9]*: *[0-9][0-9]*\( Aborted\| Killed\)/:\1/" \ -e "s/qemu-io> //g" } @@ -80,7 +81,7 @@ _filter_qemu_io() # replace occurrences of QEMU_PROG with "qemu" _filter_qemu() { - $SED -e "s#\\(^\\|(qemu) \\)$(basename $QEMU_PROG):#\1QEMU_PROG:#" \ + gsed -e "s#\\(^\\|(qemu) \\)$(basename $QEMU_PROG):#\1QEMU_PROG:#" \ -e 's#^QEMU [0-9]\+\.[0-9]\+\.[0-9]\+ monitor#QEMU X.Y.Z monitor#' \ -e $'s#\r##' # QEMU monitor uses \r\n line endings } @@ -89,7 +90,7 @@ _filter_qemu() _filter_qmp() { _filter_win32 | \ - $SED -e 's#\("\(micro\)\?seconds": \)[0-9]\+#\1 TIMESTAMP#g' \ + gsed -e 's#\("\(micro\)\?seconds": \)[0-9]\+#\1 TIMESTAMP#g' \ -e 's#^{"QMP":.*}$#QMP_VERSION#' \ -e '/^ "QMP": {\s*$/, /^ }\s*$/ c\' \ -e ' QMP_VERSION' @@ -98,32 +99,32 @@ _filter_qmp() # readline makes HMP command strings so long that git complains _filter_hmp() { - $SED -e $'s/^\\((qemu) \\)\\?.*\e\\[D/\\1/g' \ + gsed -e $'s/^\\((qemu) \\)\\?.*\e\\[D/\\1/g' \ -e $'s/\e\\[K//g' } # replace block job offset _filter_block_job_offset() { - $SED -e 's/, "offset": [0-9]\+,/, "offset": OFFSET,/' + gsed -e 's/, "offset": [0-9]\+,/, "offset": OFFSET,/' } # replace block job len _filter_block_job_len() { - $SED -e 's/, "len": [0-9]\+,/, "len": LEN,/g' + gsed -e 's/, "len": [0-9]\+,/, "len": LEN,/g' } # replace actual image size (depends on the host filesystem) _filter_actual_image_size() { - $SED -s 's/\("actual-size":\s*\)[0-9]\+/\1SIZE/g' + gsed -s 's/\("actual-size":\s*\)[0-9]\+/\1SIZE/g' } # Filename filters for qemu-img create _filter_img_create_filenames() { - $SED \ + sed \ -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \ -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ -e "s#$TEST_DIR#TEST_DIR#g" \ @@ -141,7 +142,7 @@ _do_filter_img_create() # precedes ", fmt=") and the options part ($options, which starts # with "fmt=") # (And just echo everything before the first "^Formatting") - readarray formatting_line < <($SED -e 's/, fmt=/\n/') + readarray formatting_line < <(gsed -e 's/, fmt=/\n/') filename_part=${formatting_line[0]} unset formatting_line[0] @@ -168,11 +169,11 @@ _do_filter_img_create() options=$( echo "$options" \ | tr '\n' '\0' \ - | $SED -e 's/ \([a-z0-9_.-]*\)=/\n\1=/g' \ + | gsed -e 's/ \([a-z0-9_.-]*\)=/\n\1=/g' \ | grep -a -e '^fmt' -e '^size' -e '^backing' -e '^preallocation' \ -e '^encryption' "${grep_data_file[@]}" \ | _filter_img_create_filenames \ - | $SED \ + | sed \ -e 's/^\(fmt\)/0-\1/' \ -e 's/^\(size\)/1-\1/' \ -e 's/^\(backing\)/2-\1/' \ @@ -180,9 +181,9 @@ _do_filter_img_create() -e 's/^\(encryption\)/4-\1/' \ -e 's/^\(preallocation\)/8-\1/' \ | LC_ALL=C sort \ - | $SED -e 's/^[0-9]-//' \ + | sed -e 's/^[0-9]-//' \ | tr '\n\0' ' \n' \ - | $SED -e 's/^ *$//' -e 's/ *$//' + | sed -e 's/^ *$//' -e 's/ *$//' ) if [ -n "$options" ]; then @@ -208,7 +209,7 @@ _filter_img_create() _filter_img_create_size() { - $SED -e "s# size=[0-9]\\+# size=SIZE#g" + gsed -e "s# size=[0-9]\\+# size=SIZE#g" } _filter_img_info() @@ -222,7 +223,7 @@ _filter_img_info() discard=0 regex_json_spec_start='^ *"format-specific": \{' - $SED -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \ + gsed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \ -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ -e "s#$TEST_DIR#TEST_DIR#g" \ -e "s#$SOCK_DIR#SOCK_DIR#g" \ @@ -247,6 +248,7 @@ _filter_img_info() -e "/block_state_zero: \\(on\\|off\\)/d" \ -e "/log_size: [0-9]\\+/d" \ -e "s/iters: [0-9]\\+/iters: 1024/" \ + -e 's/\(compression type: \)\(zlib\|zstd\)/\1COMPRESSION_TYPE/' \ -e "s/uuid: [-a-f0-9]\\+/uuid: 00000000-0000-0000-0000-000000000000/" | \ while IFS='' read -r line; do if [[ $format_specific == 1 ]]; then @@ -283,7 +285,7 @@ _filter_qemu_img_map() data_file_filter=(-e "s#$data_file_pattern#\\1#") fi - $SED -e 's/\([0-9a-fx]* *[0-9a-fx]* *\)[0-9a-fx]* */\1/g' \ + sed -e 's/\([0-9a-fx]* *[0-9a-fx]* *\)[0-9a-fx]* */\1/g' \ -e 's/"offset": [0-9]\+/"offset": OFFSET/g' \ -e 's/Mapped to *//' \ "${data_file_filter[@]}" \ @@ -297,12 +299,21 @@ _filter_nbd() # receive callbacks sometimes, making them unreliable. # # Filter out the TCP port number since this changes between runs. - $SED -e '/nbd\/.*\.c:/d' \ + sed -e '/nbd\/.*\.c:/d' \ -e 's#127\.0\.0\.1:[0-9]*#127.0.0.1:PORT#g' \ + -e 's#localhost:[0-9]*#localhost:PORT#g' \ + -e 's#host=127\.0\.0\.1,port=[0-9]*#host=127.0.0.1,port=PORT#g' \ + -e 's#host=localhost,port=[0-9]*#host=localhost,port=PORT#g' \ + -e "s#path=$SOCK_DIR#path=SOCK_DIR#g" \ -e "s#?socket=$SOCK_DIR#?socket=SOCK_DIR#g" \ -e 's#\(foo\|PORT/\?\|.sock\): Failed to .*$#\1#' } +_filter_qemu_nbd_exports() +{ + grep '\(exports available\|export\|size\|min block\|qemu-nbd\):' +} + _filter_qmp_empty_return() { grep -v '{"return": {}}' @@ -334,7 +345,14 @@ sys.stdout.write(result)' _filter_authz_check_tls() { - $SED -e 's/TLS x509 authz check for .* is denied/TLS x509 authz check for DISTINGUISHED-NAME is denied/' + sed -e 's/TLS x509 authz check for .* is denied/TLS x509 authz check for DISTINGUISHED-NAME is denied/' +} + +_filter_qcow2_compression_type_bit() +{ + gsed -e 's/\(incompatible_features\s\+\)\[3\(, \)\?/\1[/' \ + -e 's/\(incompatible_features.*\), 3\]/\1]/' \ + -e 's/\(incompatible_features.*\), 3\(,.*\)/\1\2/' } # make sure this script returns success diff --git a/tests/qemu-iotests/common.qemu b/tests/qemu-iotests/common.qemu index 0fc52d20d7..0f1fecc68e 100644 --- a/tests/qemu-iotests/common.qemu +++ b/tests/qemu-iotests/common.qemu @@ -85,7 +85,12 @@ _timed_wait_for() timeout=yes QEMU_STATUS[$h]=0 - while IFS= read -t ${QEMU_COMM_TIMEOUT} resp <&${QEMU_OUT[$h]} + read_timeout="-t ${QEMU_COMM_TIMEOUT}" + if [ -n "${GDB_OPTIONS}" ]; then + read_timeout= + fi + + while IFS= read ${read_timeout} resp <&${QEMU_OUT[$h]} do if [ -n "$capture_events" ]; then capture=0 diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc index 609d82de89..db757025cb 100644 --- a/tests/qemu-iotests/common.rc +++ b/tests/qemu-iotests/common.rc @@ -17,17 +17,39 @@ # along with this program. If not, see . # -SED= -for sed in sed gsed; do - ($sed --version | grep 'GNU sed') > /dev/null 2>&1 - if [ "$?" -eq 0 ]; then - SED=$sed - break +export LANG=C + +PATH=".:$PATH" + +HOSTOS=$(uname -s) +arch=$(uname -m) +[[ "$arch" =~ "ppc64" ]] && qemu_arch=ppc64 || qemu_arch="$arch" + +# make sure we have a standard umask +umask 022 + +# bail out, setting up .notrun file +_notrun() +{ + echo "$*" >"$TEST_DIR/$seq.notrun" + echo "$seq not run: $*" + status=0 + exit +} + +if ! command -v gsed >/dev/null 2>&1; then + if sed --version 2>&1 | grep -v 'not GNU sed' | grep 'GNU sed' > /dev/null; + then + gsed() + { + sed "$@" + } + else + gsed() + { + _notrun "GNU sed not available" + } fi -done -if [ -z "$SED" ]; then - echo "$0: GNU sed not found" - exit 1 fi dd() @@ -109,18 +131,14 @@ peek_file_raw() dd if="$1" bs=1 skip="$2" count="$3" status=none } -config=common.config -test -f $config || config=../common.config -if ! test -f $config -then - echo "$0: failed to find common.config" - exit 1 -fi -if ! . $config - then - echo "$0: failed to source common.config" - exit 1 -fi +_optstr_add() +{ + if [ -n "$1" ]; then + echo "$1,$2" + else + echo "$2" + fi +} # Set the variables to the empty string to turn Valgrind off # for specific processes, e.g. @@ -166,8 +184,14 @@ _qemu_wrapper() if [ -n "${QEMU_NEED_PID}" ]; then echo $BASHPID > "${QEMU_TEST_DIR}/qemu-${_QEMU_HANDLE}.pid" fi + + GDB="" + if [ -n "${GDB_OPTIONS}" ]; then + GDB="gdbserver ${GDB_OPTIONS}" + fi + VALGRIND_QEMU="${VALGRIND_QEMU_VM}" _qemu_proc_exec "${VALGRIND_LOGFILE}" \ - "$QEMU_PROG" $QEMU_OPTIONS "$@" + $GDB "$QEMU_PROG" $QEMU_OPTIONS "$@" ) RETVAL=$? _qemu_proc_valgrind_log "${VALGRIND_LOGFILE}" $RETVAL @@ -693,6 +717,7 @@ _img_info() -e "s#$TEST_DIR#TEST_DIR#g" \ -e "s#$SOCK_DIR/fuse-#TEST_DIR/#g" \ -e "s#$IMGFMT#IMGFMT#g" \ + -e 's/\(compression type: \)\(zlib\|zstd\)/\1COMPRESSION_TYPE/' \ -e "/^disk size:/ D" \ -e "/actual-size/ D" | \ while IFS='' read -r line; do @@ -715,30 +740,20 @@ _img_info() done } -# bail out, setting up .notrun file -# -_notrun() -{ - echo "$*" >"$OUTPUT_DIR/$seq.notrun" - echo "$seq not run: $*" - status=0 - exit -} - # bail out, setting up .casenotrun file # The function _casenotrun() is used as a notifier. It is the # caller's responsibility to make skipped a particular test. # _casenotrun() { - echo " [case not run] $*" >>"$OUTPUT_DIR/$seq.casenotrun" + echo " [case not run] $*" >>"$TEST_DIR/$seq.casenotrun" } # just plain bail out # _fail() { - echo "$*" | tee -a "$OUTPUT_DIR/$seq.full" + echo "$*" | tee -a "$TEST_DIR/$seq.full" echo "(see $seq.full for details)" status=1 exit 1 @@ -913,7 +928,7 @@ _require_working_luks() IMGFMT='luks' _rm_test_img "$file" if [ $status != 0 ]; then - reason=$(echo "$output" | grep "$file:" | $SED -e "s#.*$file: *##") + reason=$(echo "$output" | grep "$file:" | sed -e "s#.*$file: *##") if [ -z "$reason" ]; then reason="Failed to create a LUKS image" fi @@ -967,7 +982,7 @@ _require_large_file() # _require_devices() { - available=$($QEMU -M none -device help | \ + available=$($QEMU -M none -device help 2> /dev/null | \ grep ^name | sed -e 's/^name "//' -e 's/".*$//') for device do @@ -979,7 +994,7 @@ _require_devices() _require_one_device_of() { - available=$($QEMU -M none -device help | \ + available=$($QEMU -M none -device help 2> /dev/null | \ grep ^name | sed -e 's/^name "//' -e 's/".*$//') for device do @@ -990,5 +1005,26 @@ _require_one_device_of() _notrun "$* not available" } +_qcow2_dump_header() +{ + if [[ "$1" == "--no-filter-compression" ]]; then + local filter_compression=0 + shift + else + local filter_compression=1 + fi + + img="$1" + if [ -z "$img" ]; then + img="$TEST_IMG" + fi + + if [[ $filter_compression == 0 ]]; then + $PYTHON qcow2.py "$img" dump-header + else + $PYTHON qcow2.py "$img" dump-header | _filter_qcow2_compression_type_bit + fi +} + # make sure this script returns success true diff --git a/tests/qemu-iotests/common.tls b/tests/qemu-iotests/common.tls index 6ba28a78d3..b9c5462986 100644 --- a/tests/qemu-iotests/common.tls +++ b/tests/qemu-iotests/common.tls @@ -24,6 +24,7 @@ tls_x509_cleanup() { rm -f "${tls_dir}"/*.pem rm -f "${tls_dir}"/*/*.pem + rm -f "${tls_dir}"/*/*.psk rmdir "${tls_dir}"/* rmdir "${tls_dir}" } @@ -40,6 +41,18 @@ tls_certtool() rm -f "${tls_dir}"/certtool.log } +tls_psktool() +{ + psktool "$@" 1>"${tls_dir}"/psktool.log 2>&1 + if test "$?" = 0; then + head -1 "${tls_dir}"/psktool.log + else + cat "${tls_dir}"/psktool.log + fi + rm -f "${tls_dir}"/psktool.log +} + + tls_x509_init() { (certtool --help) >/dev/null 2>&1 || \ @@ -118,12 +131,13 @@ tls_x509_create_server() caname=$1 name=$2 + # We don't include 'localhost' in the cert, as + # we want to keep it unlisted to let tests + # validate hostname override mkdir -p "${tls_dir}/$name" cat > "${tls_dir}/cert.info" <. # +import argparse import atexit import bz2 from collections import OrderedDict @@ -30,16 +31,15 @@ import struct import subprocess import sys import time -from typing import (Any, Callable, Dict, Iterable, +from typing import (Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, TextIO, Tuple, Type, TypeVar) import unittest from contextlib import contextmanager -# pylint: disable=import-error, wrong-import-position -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import qtest -from qemu.qmp import QMPMessage +from qemu.qmp.legacy import QMPMessage, QEMUMonitorProtocol +from qemu.utils import VerboseProcessError # Use this logger for logging messages directly from the iotests module logger = logging.getLogger('qemu.iotests') @@ -74,9 +74,17 @@ if os.environ.get('QEMU_NBD_OPTIONS'): qemu_prog = os.environ.get('QEMU_PROG', 'qemu') qemu_opts = os.environ.get('QEMU_OPTIONS', '').strip().split(' ') +qsd_prog = os.environ.get('QSD_PROG', 'qemu-storage-daemon') + +gdb_qemu_env = os.environ.get('GDB_OPTIONS') +qemu_gdb = [] +if gdb_qemu_env: + qemu_gdb = ['gdbserver'] + gdb_qemu_env.strip().split(' ') + +qemu_print = os.environ.get('PRINT_QEMU', False) + imgfmt = os.environ.get('IMGFMT', 'raw') imgproto = os.environ.get('IMGPROTO', 'file') -output_dir = os.environ.get('OUTPUT_DIR', '.') try: test_dir = os.environ['TEST_DIR'] @@ -91,7 +99,16 @@ except KeyError: sys.stderr.write('Please run this test via the "check" script\n') sys.exit(os.EX_USAGE) -socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper') +qemu_valgrind = [] +if os.environ.get('VALGRIND_QEMU') == "y" and \ + os.environ.get('NO_VALGRIND') != "y": + valgrind_logfile = "--log-file=" + test_dir + # %p allows to put the valgrind process PID, since + # we don't know it a priori (subprocess.Popen is + # not yet invoked) + valgrind_logfile += "/%p.valgrind" + + qemu_valgrind = ['valgrind', valgrind_logfile, '--error-exitcode=99'] luks_default_secret_object = 'secret,id=keysec0,data=' + \ os.environ.get('IMGKEYSECRET', '') @@ -100,37 +117,149 @@ luks_default_key_secret_opt = 'key-secret=keysec0' sample_img_dir = os.environ['SAMPLE_IMG_DIR'] +@contextmanager +def change_log_level( + logger_name: str, level: int = logging.CRITICAL) -> Iterator[None]: + """ + Utility function for temporarily changing the log level of a logger. + + This can be used to silence errors that are expected or uninteresting. + """ + _logger = logging.getLogger(logger_name) + current_level = _logger.level + _logger.setLevel(level) + + try: + yield + finally: + _logger.setLevel(current_level) + + def unarchive_sample_image(sample, fname): sample_fname = os.path.join(sample_img_dir, sample + '.bz2') with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) +def qemu_tool_popen(args: Sequence[str], + connect_stderr: bool = True) -> 'subprocess.Popen[str]': + stderr = subprocess.STDOUT if connect_stderr else None + # pylint: disable=consider-using-with + return subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=stderr, + universal_newlines=True) + + def qemu_tool_pipe_and_status(tool: str, args: Sequence[str], - connect_stderr: bool = True) -> Tuple[str, int]: + connect_stderr: bool = True, + drop_successful_output: bool = False) \ + -> Tuple[str, int]: """ Run a tool and return both its output and its exit code """ - stderr = subprocess.STDOUT if connect_stderr else None - with subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=stderr, universal_newlines=True) as subp: + with qemu_tool_popen(args, connect_stderr) as subp: output = subp.communicate()[0] if subp.returncode < 0: cmd = ' '.join(args) sys.stderr.write(f'{tool} received signal \ {-subp.returncode}: {cmd}\n') + if drop_successful_output and subp.returncode == 0: + output = '' return (output, subp.returncode) -def qemu_img_pipe_and_status(*args: str) -> Tuple[str, int]: - """ - Run qemu-img and return both its output and its exit code - """ - full_args = qemu_img_args + list(args) - return qemu_tool_pipe_and_status('qemu-img', full_args) +def qemu_img_create_prepare_args(args: List[str]) -> List[str]: + if not args or args[0] != 'create': + return list(args) + args = args[1:] + + p = argparse.ArgumentParser(allow_abbrev=False) + # -o option may be specified several times + p.add_argument('-o', action='append', default=[]) + p.add_argument('-f') + parsed, remaining = p.parse_known_args(args) + + opts_list = parsed.o + + result = ['create'] + if parsed.f is not None: + result += ['-f', parsed.f] + + # IMGOPTS most probably contain options specific for the selected format, + # like extended_l2 or compression_type for qcow2. Test may want to create + # additional images in other formats that doesn't support these options. + # So, use IMGOPTS only for images created in imgfmt format. + imgopts = os.environ.get('IMGOPTS') + if imgopts and parsed.f == imgfmt: + opts_list.insert(0, imgopts) + + # default luks support + if parsed.f == 'luks' and \ + all('key-secret' not in opts for opts in opts_list): + result += ['--object', luks_default_secret_object] + opts_list.append(luks_default_key_secret_opt) + + for opts in opts_list: + result += ['-o', opts] + + result += remaining + + return result + + +def qemu_tool(*args: str, check: bool = True, combine_stdio: bool = True + ) -> 'subprocess.CompletedProcess[str]': + """ + Run a qemu tool and return its status code and console output. + + :param args: full command line to run. + :param check: Enforce a return code of zero. + :param combine_stdio: set to False to keep stdout/stderr separated. + + :raise VerboseProcessError: + When the return code is negative, or on any non-zero exit code + when 'check=True' was provided (the default). This exception has + 'stdout', 'stderr', and 'returncode' properties that may be + inspected to show greater detail. If this exception is not + handled, the command-line, return code, and all console output + will be included at the bottom of the stack trace. + + :return: + a CompletedProcess. This object has args, returncode, and stdout + properties. If streams are not combined, it will also have a + stderr property. + """ + subp = subprocess.run( + args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT if combine_stdio else subprocess.PIPE, + universal_newlines=True, + check=False + ) + + if check and subp.returncode or (subp.returncode < 0): + raise VerboseProcessError( + subp.returncode, args, + output=subp.stdout, + stderr=subp.stderr, + ) + + return subp + + +def qemu_img(*args: str, check: bool = True, combine_stdio: bool = True + ) -> 'subprocess.CompletedProcess[str]': + """ + Run QEMU_IMG_PROG and return its status code and console output. + + This function always prepends QEMU_IMG_OPTIONS and may further alter + the args for 'create' commands. + + See `qemu_tool()` for greater detail. + """ + full_args = qemu_img_args + qemu_img_create_prepare_args(list(args)) + return qemu_tool(*full_args, check=check, combine_stdio=combine_stdio) -def qemu_img(*args: str) -> int: - '''Run qemu-img and return the exit code''' - return qemu_img_pipe_and_status(*args)[1] def ordered_qmp(qmsg, conv_keys=True): # Dictionaries are not ordered prior to 3.6, therefore: @@ -145,96 +274,105 @@ def ordered_qmp(qmsg, conv_keys=True): return od return qmsg -def qemu_img_create(*args): - args = list(args) +def qemu_img_create(*args: str) -> 'subprocess.CompletedProcess[str]': + return qemu_img('create', *args) - # default luks support - if '-f' in args and args[args.index('-f') + 1] == 'luks': - if '-o' in args: - i = args.index('-o') - if 'key-secret' not in args[i + 1]: - args[i + 1].append(luks_default_key_secret_opt) - args.insert(i + 2, '--object') - args.insert(i + 3, luks_default_secret_object) - else: - args = ['-o', luks_default_key_secret_opt, - '--object', luks_default_secret_object] + args +def qemu_img_json(*args: str) -> Any: + """ + Run qemu-img and return its output as deserialized JSON. - args.insert(0, 'create') + :raise CalledProcessError: + When qemu-img crashes, or returns a non-zero exit code without + producing a valid JSON document to stdout. + :raise JSONDecoderError: + When qemu-img returns 0, but failed to produce a valid JSON document. - return qemu_img(*args) + :return: A deserialized JSON object; probably a dict[str, Any]. + """ + try: + res = qemu_img(*args, combine_stdio=False) + except subprocess.CalledProcessError as exc: + # Terminated due to signal. Don't bother. + if exc.returncode < 0: + raise -def qemu_img_measure(*args): - return json.loads(qemu_img_pipe("measure", "--output", "json", *args)) + # Commands like 'check' can return failure (exit codes 2 and 3) + # to indicate command completion, but with errors found. For + # multi-command flexibility, ignore the exact error codes and + # *try* to load JSON. + try: + return json.loads(exc.stdout) + except json.JSONDecodeError: + # Nope. This thing is toast. Raise the /process/ error. + pass + raise -def qemu_img_check(*args): - return json.loads(qemu_img_pipe("check", "--output", "json", *args)) + return json.loads(res.stdout) -def qemu_img_verbose(*args): - '''Run qemu-img without suppressing its output and return the exit code''' - exitcode = subprocess.call(qemu_img_args + list(args)) - if exitcode < 0: - sys.stderr.write('qemu-img received signal %i: %s\n' - % (-exitcode, ' '.join(qemu_img_args + list(args)))) - return exitcode +def qemu_img_measure(*args: str) -> Any: + return qemu_img_json("measure", "--output", "json", *args) -def qemu_img_pipe(*args: str) -> str: - '''Run qemu-img and return its output''' - return qemu_img_pipe_and_status(*args)[0] +def qemu_img_check(*args: str) -> Any: + return qemu_img_json("check", "--output", "json", *args) -def qemu_img_log(*args): - result = qemu_img_pipe(*args) - log(result, filters=[filter_testfiles]) +def qemu_img_info(*args: str) -> Any: + return qemu_img_json('info', "--output", "json", *args) + +def qemu_img_map(*args: str) -> Any: + return qemu_img_json('map', "--output", "json", *args) + +def qemu_img_log(*args: str, check: bool = True + ) -> 'subprocess.CompletedProcess[str]': + result = qemu_img(*args, check=check) + log(result.stdout, filters=[filter_testfiles]) return result -def img_info_log(filename, filter_path=None, imgopts=False, extra_args=()): +def img_info_log(filename: str, filter_path: Optional[str] = None, + use_image_opts: bool = False, extra_args: Sequence[str] = (), + check: bool = True, + ) -> None: args = ['info'] - if imgopts: + if use_image_opts: args.append('--image-opts') else: args += ['-f', imgfmt] args += extra_args args.append(filename) - output = qemu_img_pipe(*args) + output = qemu_img(*args, check=check).stdout if not filter_path: filter_path = filename log(filter_img_info(output, filter_path)) -def qemu_io(*args): - '''Run qemu-io and return the stdout data''' - args = qemu_io_args + list(args) - return qemu_tool_pipe_and_status('qemu-io', args)[0] - -def qemu_io_log(*args): - result = qemu_io(*args) - log(result, filters=[filter_testfiles, filter_qemu_io]) - return result - -def qemu_io_silent(*args): - '''Run qemu-io and return the exit code, suppressing stdout''' +def qemu_io_wrap_args(args: Sequence[str]) -> List[str]: if '-f' in args or '--image-opts' in args: - default_args = qemu_io_args_no_fmt + return qemu_io_args_no_fmt + list(args) else: - default_args = qemu_io_args + return qemu_io_args + list(args) - args = default_args + list(args) - exitcode = subprocess.call(args, stdout=open('/dev/null', 'w')) - if exitcode < 0: - sys.stderr.write('qemu-io received signal %i: %s\n' % - (-exitcode, ' '.join(args))) - return exitcode +def qemu_io_popen(*args): + return qemu_tool_popen(qemu_io_wrap_args(args)) -def qemu_io_silent_check(*args): - '''Run qemu-io and return the true if subprocess returned 0''' - args = qemu_io_args + list(args) - exitcode = subprocess.call(args, stdout=open('/dev/null', 'w'), - stderr=subprocess.STDOUT) - return exitcode == 0 +def qemu_io(*args: str, check: bool = True, combine_stdio: bool = True + ) -> 'subprocess.CompletedProcess[str]': + """ + Run QEMU_IO_PROG and return the status code and console output. + + This function always prepends either QEMU_IO_OPTIONS or + QEMU_IO_OPTIONS_NO_FMT. + """ + return qemu_tool(*qemu_io_wrap_args(args), + check=check, combine_stdio=combine_stdio) + +def qemu_io_log(*args: str, check: bool = True + ) -> 'subprocess.CompletedProcess[str]': + result = qemu_io(*args, check=check) + log(result.stdout, filters=[filter_testfiles, filter_qemu_io]) + return result class QemuIoInteractive: def __init__(self, *args): - self.args = qemu_io_args_no_fmt + list(args) + self.args = qemu_io_wrap_args(args) # We need to keep the Popen objext around, and not # close it immediately. Therefore, disable the pylint check: # pylint: disable=consider-using-with @@ -280,6 +418,73 @@ class QemuIoInteractive: return self._read_output() +class QemuStorageDaemon: + _qmp: Optional[QEMUMonitorProtocol] = None + _qmpsock: Optional[str] = None + # Python < 3.8 would complain if this type were not a string literal + # (importing `annotations` from `__future__` would work; but not on <= 3.6) + _p: 'Optional[subprocess.Popen[bytes]]' = None + + def __init__(self, *args: str, instance_id: str = 'a', qmp: bool = False): + assert '--pidfile' not in args + self.pidfile = os.path.join(test_dir, f'qsd-{instance_id}-pid') + all_args = [qsd_prog] + list(args) + ['--pidfile', self.pidfile] + + if qmp: + self._qmpsock = os.path.join(sock_dir, f'qsd-{instance_id}.sock') + all_args += ['--chardev', + f'socket,id=qmp-sock,path={self._qmpsock}', + '--monitor', 'qmp-sock'] + + self._qmp = QEMUMonitorProtocol(self._qmpsock, server=True) + + # Cannot use with here, we want the subprocess to stay around + # pylint: disable=consider-using-with + self._p = subprocess.Popen(all_args) + if self._qmp is not None: + self._qmp.accept() + while not os.path.exists(self.pidfile): + if self._p.poll() is not None: + cmd = ' '.join(all_args) + raise RuntimeError( + 'qemu-storage-daemon terminated with exit code ' + + f'{self._p.returncode}: {cmd}') + + time.sleep(0.01) + + with open(self.pidfile, encoding='utf-8') as f: + self._pid = int(f.read().strip()) + + assert self._pid == self._p.pid + + def qmp(self, cmd: str, args: Optional[Dict[str, object]] = None) \ + -> QMPMessage: + assert self._qmp is not None + return self._qmp.cmd(cmd, args) + + def stop(self, kill_signal=15): + self._p.send_signal(kill_signal) + self._p.wait() + self._p = None + + if self._qmp: + self._qmp.close() + + if self._qmpsock is not None: + try: + os.remove(self._qmpsock) + except OSError: + pass + try: + os.remove(self.pidfile) + except OSError: + pass + + def __del__(self): + if self._p is not None: + self.stop(kill_signal=9) + + def qemu_nbd(*args): '''Run qemu-nbd in daemon mode and return the parent's exit code''' return subprocess.call(qemu_nbd_args + ['--fork'] + list(args)) @@ -328,10 +533,22 @@ def qemu_nbd_popen(*args): p.kill() p.wait() -def compare_images(img1, img2, fmt1=imgfmt, fmt2=imgfmt): - '''Return True if two image files are identical''' - return qemu_img('compare', '-f', fmt1, - '-F', fmt2, img1, img2) == 0 +def compare_images(img1: str, img2: str, + fmt1: str = imgfmt, fmt2: str = imgfmt) -> bool: + """ + Compare two images with QEMU_IMG; return True if they are identical. + + :raise CalledProcessError: + when qemu-img crashes or returns a status code of anything other + than 0 (identical) or 1 (different). + """ + try: + qemu_img('compare', '-f', fmt1, '-F', fmt2, img1, img2) + return True + except subprocess.CalledProcessError as exc: + if exc.returncode == 1: + return False + raise def create_image(name, size): '''Create a fully-allocated raw image with sector markers''' @@ -342,10 +559,14 @@ def create_image(name, size): file.write(sector) i = i + 512 -def image_size(img): - '''Return image's virtual size''' - r = qemu_img_pipe('info', '--output=json', '-f', imgfmt, img) - return json.loads(r)['virtual-size'] +def image_size(img: str) -> int: + """Return image's virtual size""" + value = qemu_img_info('-f', imgfmt, img)['virtual-size'] + if not isinstance(value, int): + type_name = type(value).__name__ + raise TypeError("Expected 'int' for 'virtual-size', " + f"got '{value}' of type '{type_name}'") + return value def is_str(val): return isinstance(val, str) @@ -384,8 +605,10 @@ def filter_qmp(qmsg, filter_fn): # Iterate through either lists or dicts; if isinstance(qmsg, list): items = enumerate(qmsg) - else: + elif isinstance(qmsg, dict): items = qmsg.items() + else: + return filter_fn(None, qmsg) for k, v in items: if isinstance(v, (dict, list)): @@ -432,6 +655,8 @@ def filter_img_info(output, filename): 'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', line) line = re.sub('cid: [0-9]+', 'cid: XXXXXXXXXX', line) + line = re.sub('(compression type: )(zlib|zstd)', r'\1COMPRESSION_TYPE', + line) lines.append(line) return '\n'.join(lines) @@ -472,10 +697,14 @@ class Timeout: self.seconds = seconds self.errmsg = errmsg def __enter__(self): + if qemu_gdb or qemu_valgrind: + return self signal.signal(signal.SIGALRM, self.timeout) signal.setitimer(signal.ITIMER_REAL, self.seconds) return self def __exit__(self, exc_type, value, traceback): + if qemu_gdb or qemu_valgrind: + return False signal.setitimer(signal.ITIMER_REAL, 0) return False def timeout(self, signum, frame): @@ -570,12 +799,34 @@ class VM(qtest.QEMUQtestMachine): def __init__(self, path_suffix=''): name = "qemu%s-%d" % (path_suffix, os.getpid()) - super().__init__(qemu_prog, qemu_opts, name=name, + timer = 15.0 if not (qemu_gdb or qemu_valgrind) else None + if qemu_gdb and qemu_valgrind: + sys.stderr.write('gdb and valgrind are mutually exclusive\n') + sys.exit(1) + wrapper = qemu_gdb if qemu_gdb else qemu_valgrind + super().__init__(qemu_prog, qemu_opts, wrapper=wrapper, + name=name, base_temp_dir=test_dir, - socket_scm_helper=socket_scm_helper, - sock_dir=sock_dir) + sock_dir=sock_dir, qmp_timer=timer) self._num_drives = 0 + def _post_shutdown(self) -> None: + super()._post_shutdown() + if not qemu_valgrind or not self._popen: + return + valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind" + if self.exitcode() == 99: + with open(valgrind_filename, encoding='utf-8') as f: + print(f.read()) + else: + os.remove(valgrind_filename) + + def _pre_launch(self) -> None: + super()._pre_launch() + if qemu_print: + # set QEMU binary output to stdout + self._close_qemu_log_file() + def add_object(self, opts): self._args.append('-object') self._args.append(opts) @@ -651,13 +902,14 @@ class VM(qtest.QEMUQtestMachine): self.hmp(f'qemu-io {drive} "remove_break bp_{drive}"') def hmp_qemu_io(self, drive: str, cmd: str, - use_log: bool = False) -> QMPMessage: + use_log: bool = False, qdev: bool = False) -> QMPMessage: """Write to a given drive using an HMP command""" - return self.hmp(f'qemu-io {drive} "{cmd}"', use_log=use_log) + d = '-d ' if qdev else '' + return self.hmp(f'qemu-io {d}{drive} "{cmd}"', use_log=use_log) def flatten_qmp_object(self, obj, output=None, basestr=''): if output is None: - output = dict() + output = {} if isinstance(obj, list): for i, item in enumerate(obj): self.flatten_qmp_object(item, output, basestr + str(i) + '.') @@ -670,7 +922,7 @@ class VM(qtest.QEMUQtestMachine): def qmp_to_opts(self, obj): obj = self.flatten_qmp_object(obj) - output_list = list() + output_list = [] for key in obj: output_list += [key + '=' + obj[key]] return ','.join(output_list) @@ -692,8 +944,12 @@ class VM(qtest.QEMUQtestMachine): return result # Returns None on success, and an error string on failure - def run_job(self, job, auto_finalize=True, auto_dismiss=False, - pre_finalize=None, cancel=False, wait=60.0): + def run_job(self, job: str, auto_finalize: bool = True, + auto_dismiss: bool = False, + pre_finalize: Optional[Callable[[], None]] = None, + cancel: bool = False, wait: float = 60.0, + filters: Iterable[Callable[[Any], Any]] = (), + ) -> Optional[str]: """ run_job moves a job from creation through to dismissal. @@ -723,7 +979,7 @@ class VM(qtest.QEMUQtestMachine): while True: ev = filter_qmp_event(self.events_wait(events, timeout=wait)) if ev['event'] != 'JOB_STATUS_CHANGE': - log(ev) + log(ev, filters=filters) continue status = ev['data']['status'] if status == 'aborting': @@ -731,18 +987,18 @@ class VM(qtest.QEMUQtestMachine): for j in result['return']: if j['id'] == job: error = j['error'] - log('Job failed: %s' % (j['error'])) + log('Job failed: %s' % (j['error']), filters=filters) elif status == 'ready': - self.qmp_log('job-complete', id=job) + self.qmp_log('job-complete', id=job, filters=filters) elif status == 'pending' and not auto_finalize: if pre_finalize: pre_finalize() if cancel: - self.qmp_log('job-cancel', id=job) + self.qmp_log('job-cancel', id=job, filters=filters) else: - self.qmp_log('job-finalize', id=job) + self.qmp_log('job-finalize', id=job, filters=filters) elif status == 'concluded' and not auto_dismiss: - self.qmp_log('job-dismiss', id=job) + self.qmp_log('job-dismiss', id=job, filters=filters) elif status == 'null': return error @@ -755,7 +1011,7 @@ class VM(qtest.QEMUQtestMachine): if 'return' in result: assert result['return'] == {} - job_result = self.run_job(job_id) + job_result = self.run_job(job_id, filters=filters) else: job_result = result['error'] @@ -1075,7 +1331,9 @@ def notrun(reason): # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) - open('%s/%s.notrun' % (output_dir, seq), 'w').write(reason + '\n') + with open('%s/%s.notrun' % (test_dir, seq), 'w', encoding='utf-8') \ + as outfile: + outfile.write(reason + '\n') logger.warning("%s not run: %s", seq, reason) sys.exit(0) @@ -1088,8 +1346,9 @@ def case_notrun(reason): # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) - open('%s/%s.casenotrun' % (output_dir, seq), 'a').write( - ' [case not run] ' + reason + '\n') + with open('%s/%s.casenotrun' % (test_dir, seq), 'a', encoding='utf-8') \ + as outfile: + outfile.write(' [case not run] ' + reason + '\n') def _verify_image_format(supported_fmts: Sequence[str] = (), unsupported_fmts: Sequence[str] = ()) -> None: @@ -1152,8 +1411,19 @@ def _verify_virtio_scsi_pci_or_ccw() -> None: notrun('Missing virtio-scsi-pci or virtio-scsi-ccw in QEMU binary') -def supports_quorum(): - return 'quorum' in qemu_img_pipe('--help') +def _verify_imgopts(unsupported: Sequence[str] = ()) -> None: + imgopts = os.environ.get('IMGOPTS') + # One of usage examples for IMGOPTS is "data_file=$TEST_IMG.ext_data_file" + # but it supported only for bash tests. We don't have a concept of global + # TEST_IMG in iotests.py, not saying about somehow parsing $variables. + # So, for simplicity let's just not support any IMGOPTS with '$' inside. + unsup = list(unsupported) + ['$'] + if imgopts and any(x in imgopts for x in unsup): + notrun(f'not suitable for this imgopts: {imgopts}') + + +def supports_quorum() -> bool: + return 'quorum' in qemu_img('--help').stdout def verify_quorum(): '''Skip test suite if quorum support is not available''' @@ -1169,20 +1439,20 @@ def has_working_luks() -> Tuple[bool, str]: """ img_file = f'{test_dir}/luks-test.luks' - (output, status) = \ - qemu_img_pipe_and_status('create', '-f', 'luks', - '--object', luks_default_secret_object, - '-o', luks_default_key_secret_opt, - '-o', 'iter-time=10', - img_file, '1G') + res = qemu_img('create', '-f', 'luks', + '--object', luks_default_secret_object, + '-o', luks_default_key_secret_opt, + '-o', 'iter-time=10', + img_file, '1G', + check=False) try: os.remove(img_file) except OSError: pass - if status != 0: - reason = output - for line in output.splitlines(): + if res.returncode: + reason = res.stdout + for line in res.stdout.splitlines(): if img_file + ':' in line: reason = line.split(img_file + ':', 1)[1].strip() break @@ -1199,6 +1469,26 @@ def verify_working_luks(): if not working: notrun(reason) +def supports_qcow2_zstd_compression() -> bool: + img_file = f'{test_dir}/qcow2-zstd-test.qcow2' + res = qemu_img('create', '-f', 'qcow2', '-o', 'compression_type=zstd', + img_file, '0', + check=False) + try: + os.remove(img_file) + except OSError: + pass + + if res.returncode == 1 and \ + "'compression-type' does not accept value 'zstd'" in res.stdout: + return False + else: + return True + +def verify_qcow2_zstd_compression(): + if not supports_qcow2_zstd_compression(): + notrun('zstd compression not supported') + def qemu_pipe(*args: str) -> str: """ Run qemu with an option to print something and exit (e.g. a help option). @@ -1301,8 +1591,9 @@ class ReproducibleStreamWrapper: class ReproducibleTestRunner(unittest.TextTestRunner): def __init__(self, stream: Optional[TextIO] = None, - resultclass: Type[unittest.TestResult] = ReproducibleTestResult, - **kwargs: Any) -> None: + resultclass: Type[unittest.TestResult] = + ReproducibleTestResult, + **kwargs: Any) -> None: rstream = ReproducibleStreamWrapper(stream or sys.stdout) super().__init__(stream=rstream, # type: ignore descriptions=True, @@ -1327,7 +1618,8 @@ def execute_setup_common(supported_fmts: Sequence[str] = (), unsupported_fmts: Sequence[str] = (), supported_protocols: Sequence[str] = (), unsupported_protocols: Sequence[str] = (), - required_fmts: Sequence[str] = ()) -> bool: + required_fmts: Sequence[str] = (), + unsupported_imgopts: Sequence[str] = ()) -> bool: """ Perform necessary setup for either script-style or unittest-style tests. @@ -1347,6 +1639,7 @@ def execute_setup_common(supported_fmts: Sequence[str] = (), _verify_aio_mode(supported_aio_modes) _verify_formats(required_fmts) _verify_virtio_blk() + _verify_imgopts(unsupported_imgopts) return debug diff --git a/tests/qemu-iotests/linters.py b/tests/qemu-iotests/linters.py new file mode 100644 index 0000000000..65c4c4e827 --- /dev/null +++ b/tests/qemu-iotests/linters.py @@ -0,0 +1,105 @@ +# Copyright (C) 2020 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import re +import subprocess +import sys +from typing import List, Mapping, Optional + + +# TODO: Empty this list! +SKIP_FILES = ( + '030', '040', '041', '044', '045', '055', '056', '057', '065', '093', + '096', '118', '124', '132', '136', '139', '147', '148', '149', + '151', '152', '155', '163', '165', '194', '196', '202', + '203', '205', '206', '207', '208', '210', '211', '212', '213', '216', + '218', '219', '224', '228', '234', '235', '236', '237', '238', + '240', '242', '245', '246', '248', '255', '256', '257', '258', '260', + '262', '264', '266', '274', '277', '280', '281', '295', '296', '298', + '299', '302', '303', '304', '307', + 'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py' +) + + +def is_python_file(filename): + if not os.path.isfile(filename): + return False + + if filename.endswith('.py'): + return True + + with open(filename, encoding='utf-8') as f: + try: + first_line = f.readline() + return re.match('^#!.*python', first_line) is not None + except UnicodeDecodeError: # Ignore binary files + return False + + +def get_test_files() -> List[str]: + named_tests = [f'tests/{entry}' for entry in os.listdir('tests')] + check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES) + return list(filter(is_python_file, check_tests)) + + +def run_linter( + tool: str, + args: List[str], + env: Optional[Mapping[str, str]] = None, + suppress_output: bool = False, +) -> None: + """ + Run a python-based linting tool. + + :param suppress_output: If True, suppress all stdout/stderr output. + :raise CalledProcessError: If the linter process exits with failure. + """ + subprocess.run( + ('python3', '-m', tool, *args), + env=env, + check=True, + stdout=subprocess.PIPE if suppress_output else None, + stderr=subprocess.STDOUT if suppress_output else None, + universal_newlines=True, + ) + + +def main() -> None: + """ + Used by the Python CI system as an entry point to run these linters. + """ + def show_usage() -> None: + print(f"Usage: {sys.argv[0]} < --mypy | --pylint >", file=sys.stderr) + sys.exit(1) + + if len(sys.argv) != 2: + show_usage() + + files = get_test_files() + + if sys.argv[1] == '--pylint': + run_linter('pylint', files) + elif sys.argv[1] == '--mypy': + # mypy bug #9852; disable incremental checking as a workaround. + args = ['--no-incremental'] + files + run_linter('mypy', args) + else: + print(f"Unrecognized argument: '{sys.argv[1]}'", file=sys.stderr) + show_usage() + + +if __name__ == '__main__': + main() diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build index 67aed1e492..323a4acb6a 100644 --- a/tests/qemu-iotests/meson.build +++ b/tests/qemu-iotests/meson.build @@ -1,5 +1,47 @@ -if 'CONFIG_LINUX' in config_host - socket_scm_helper = executable('socket_scm_helper', 'socket_scm_helper.c') -else - socket_scm_helper = [] +if not have_tools or targetos == 'windows' or get_option('gprof') + subdir_done() endif + +foreach cflag: config_host['QEMU_CFLAGS'].split() + if cflag.startswith('-fsanitize') and \ + not cflag.contains('safe-stack') and not cflag.contains('cfi-icall') + message('Sanitizers are enabled ==> Disabled the qemu-iotests.') + subdir_done() + endif +endforeach + +bash = find_program('bash', required: false, version: '>= 4.0') +if not bash.found() + message('bash >= v4.0 not available ==> Disabled the qemu-iotests.') + subdir_done() +endif + +qemu_iotests_binaries = [qemu_img, qemu_io, qemu_nbd, qsd] +qemu_iotests_env = {'PYTHON': python.full_path()} +qemu_iotests_formats = { + 'qcow2': 'quick', + 'raw': 'slow', + 'qed': 'thorough', + 'vmdk': 'thorough', + 'vpc': 'thorough' +} + +foreach k, v : emulators + if k.startswith('qemu-system-') + qemu_iotests_binaries += v + endif +endforeach + +foreach format, speed: qemu_iotests_formats + if speed == 'quick' + suites = 'block' + else + suites = ['block-' + speed, speed] + endif + test('qemu-iotests ' + format, sh, args: [files('../check-block.sh'), format], + depends: qemu_iotests_binaries, env: qemu_iotests_env, + protocol: 'tap', + suite: suites, + timeout: 0, + is_parallel: false) +endforeach diff --git a/tests/qemu-iotests/mypy.ini b/tests/qemu-iotests/mypy.ini new file mode 100644 index 0000000000..d66ffc2e3c --- /dev/null +++ b/tests/qemu-iotests/mypy.ini @@ -0,0 +1,12 @@ +[mypy] +disallow_any_generics = True +disallow_incomplete_defs = True +disallow_subclassing_any = True +disallow_untyped_decorators = True +implicit_reexport = False +namespace_packages = True +no_implicit_optional = True +scripts_are_modules = True +warn_redundant_casts = True +warn_unused_configs = True +warn_unused_ignores = False diff --git a/tests/qemu-iotests/pylintrc b/tests/qemu-iotests/pylintrc index f2c0b522ac..f4f823a991 100644 --- a/tests/qemu-iotests/pylintrc +++ b/tests/qemu-iotests/pylintrc @@ -19,15 +19,40 @@ disable=invalid-name, too-many-public-methods, # pylint warns about Optional[] etc. as unsubscriptable in 3.9 unsubscriptable-object, + # pylint's static analysis causes false positivies for file_path(); + # If we really care to make it statically knowable, we'll use mypy. + unbalanced-tuple-unpacking, # Sometimes we need to disable a newly introduced pylint warning. # Doing so should not produce a warning in older versions of pylint. bad-option-value, # These are temporary, and should be removed: missing-docstring, too-many-return-statements, - too-many-statements + too-many-statements, + consider-using-f-string, + + +[REPORTS] + +# Activate the evaluation score. +score=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +# TODO notes are fine, but FIXMEs or XXXs should probably just be +# fixed (in tests, at least). +notes=FIXME, + XXX, + [FORMAT] # Maximum number of characters on a single line. max-line-length=79 + + +[SIMILARITIES] + +min-similarity-lines=6 diff --git a/tests/qemu-iotests/socket_scm_helper.c b/tests/qemu-iotests/socket_scm_helper.c deleted file mode 100644 index eb76d31aa9..0000000000 --- a/tests/qemu-iotests/socket_scm_helper.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * SCM_RIGHTS with unix socket help program for test - * - * Copyright IBM, Inc. 2013 - * - * Authors: - * Wenchao Xia - * - * This work is licensed under the terms of the GNU LGPL, version 2 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include -#include - -/* #define SOCKET_SCM_DEBUG */ - -/* - * @fd and @fd_to_send will not be checked for validation in this function, - * a blank will be sent as iov data to notify qemu. - */ -static int send_fd(int fd, int fd_to_send) -{ - struct msghdr msg; - struct iovec iov[1]; - int ret; - char control[CMSG_SPACE(sizeof(int))]; - struct cmsghdr *cmsg; - - memset(&msg, 0, sizeof(msg)); - memset(control, 0, sizeof(control)); - - /* Send a blank to notify qemu */ - iov[0].iov_base = (void *)" "; - iov[0].iov_len = 1; - - msg.msg_iov = iov; - msg.msg_iovlen = 1; - - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - cmsg = CMSG_FIRSTHDR(&msg); - - cmsg->cmsg_len = CMSG_LEN(sizeof(int)); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - memcpy(CMSG_DATA(cmsg), &fd_to_send, sizeof(int)); - - do { - ret = sendmsg(fd, &msg, 0); - } while (ret < 0 && errno == EINTR); - - if (ret < 0) { - fprintf(stderr, "Failed to send msg, reason: %s\n", strerror(errno)); - } - - return ret; -} - -/* Convert string to fd number. */ -static int get_fd_num(const char *fd_str, bool silent) -{ - int sock; - char *err; - - errno = 0; - sock = strtol(fd_str, &err, 10); - if (errno) { - if (!silent) { - fprintf(stderr, "Failed in strtol for socket fd, reason: %s\n", - strerror(errno)); - } - return -1; - } - if (!*fd_str || *err || sock < 0) { - if (!silent) { - fprintf(stderr, "bad numerical value for socket fd '%s'\n", fd_str); - } - return -1; - } - - return sock; -} - -/* - * To make things simple, the caller needs to specify: - * 1. socket fd. - * 2. path of the file to be sent. - */ -int main(int argc, char **argv, char **envp) -{ - int sock, fd, ret; - -#ifdef SOCKET_SCM_DEBUG - int i; - for (i = 0; i < argc; i++) { - fprintf(stderr, "Parameter %d: %s\n", i, argv[i]); - } -#endif - - if (argc != 3) { - fprintf(stderr, - "Usage: %s < socket-fd > < file-path >\n", - argv[0]); - return EXIT_FAILURE; - } - - - sock = get_fd_num(argv[1], false); - if (sock < 0) { - return EXIT_FAILURE; - } - - fd = get_fd_num(argv[2], true); - if (fd < 0) { - /* Now only open a file in readonly mode for test purpose. If more - precise control is needed, use python script in file operation, which - is supposed to fork and exec this program. */ - fd = open(argv[2], O_RDONLY); - if (fd < 0) { - fprintf(stderr, "Failed to open file '%s'\n", argv[2]); - return EXIT_FAILURE; - } - } - - ret = send_fd(sock, fd); - if (ret < 0) { - close(fd); - return EXIT_FAILURE; - } - - close(fd); - return EXIT_SUCCESS; -} diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index 0c3fe75636..a864c74b12 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -27,6 +27,7 @@ import subprocess import glob from typing import List, Dict, Any, Optional, ContextManager +DEF_GDB_OPTIONS = 'localhost:12345' def isxfile(path: str) -> bool: return os.path.isfile(path) and os.access(path, os.X_OK) @@ -65,14 +66,15 @@ class TestEnv(ContextManager['TestEnv']): # pylint: disable=too-many-instance-attributes env_variables = ['PYTHONPATH', 'TEST_DIR', 'SOCK_DIR', 'SAMPLE_IMG_DIR', - 'OUTPUT_DIR', 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG', + 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG', 'QEMU_IO_PROG', 'QEMU_NBD_PROG', 'QSD_PROG', - 'SOCKET_SCM_HELPER', 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', + 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', 'QEMU_IO_OPTIONS', 'QEMU_IO_OPTIONS_NO_FMT', 'QEMU_NBD_OPTIONS', 'IMGOPTS', 'IMGFMT', 'IMGPROTO', 'AIOMODE', 'CACHEMODE', 'VALGRIND_QEMU', 'CACHEMODE_IS_DEFAULT', 'IMGFMT_GENERIC', 'IMGOPTSSYNTAX', - 'IMGKEYSECRET', 'QEMU_DEFAULT_MACHINE', 'MALLOC_PERTURB_'] + 'IMGKEYSECRET', 'QEMU_DEFAULT_MACHINE', 'MALLOC_PERTURB_', + 'GDB_OPTIONS', 'PRINT_QEMU'] def prepare_subprocess(self, args: List[str]) -> Dict[str, str]: if self.debug: @@ -104,14 +106,16 @@ class TestEnv(ContextManager['TestEnv']): TEST_DIR SOCK_DIR SAMPLE_IMG_DIR - OUTPUT_DIR """ - self.pythonpath = os.getenv('PYTHONPATH') - if self.pythonpath: - self.pythonpath = self.source_iotests + os.pathsep + \ - self.pythonpath - else: - self.pythonpath = self.source_iotests + + # Path where qemu goodies live in this source tree. + qemu_srctree_path = Path(__file__, '../../../python').resolve() + + self.pythonpath = os.pathsep.join(filter(None, ( + self.source_iotests, + str(qemu_srctree_path), + os.getenv('PYTHONPATH'), + ))) self.test_dir = os.getenv('TEST_DIR', os.path.join(os.getcwd(), 'scratch')) @@ -129,13 +133,10 @@ class TestEnv(ContextManager['TestEnv']): os.path.join(self.source_iotests, 'sample_images')) - self.output_dir = os.getcwd() # OUTPUT_DIR - def init_binaries(self) -> None: """Init binary path variables: PYTHON (for bash tests) QEMU_PROG, QEMU_IMG_PROG, QEMU_IO_PROG, QEMU_NBD_PROG, QSD_PROG - SOCKET_SCM_HELPER """ self.python = sys.executable @@ -169,16 +170,14 @@ class TestEnv(ContextManager['TestEnv']): if not isxfile(b): sys.exit('Not executable: ' + b) - helper_path = os.path.join(self.build_iotests, 'socket_scm_helper') - if isxfile(helper_path): - self.socket_scm_helper = helper_path # SOCKET_SCM_HELPER - def __init__(self, imgfmt: str, imgproto: str, aiomode: str, cachemode: Optional[str] = None, imgopts: Optional[str] = None, misalign: bool = False, debug: bool = False, - valgrind: bool = False) -> None: + valgrind: bool = False, + gdb: bool = False, + qprint: bool = False) -> None: self.imgfmt = imgfmt self.imgproto = imgproto self.aiomode = aiomode @@ -186,6 +185,18 @@ class TestEnv(ContextManager['TestEnv']): self.misalign = misalign self.debug = debug + if qprint: + self.print_qemu = 'y' + + if gdb: + self.gdb_options = os.getenv('GDB_OPTIONS', DEF_GDB_OPTIONS) + if not self.gdb_options: + # cover the case 'export GDB_OPTIONS=' + self.gdb_options = DEF_GDB_OPTIONS + elif 'GDB_OPTIONS' in os.environ: + # to not propagate it in prepare_subprocess() + del os.environ['GDB_OPTIONS'] + if valgrind: self.valgrind_qemu = 'y' @@ -224,6 +235,8 @@ class TestEnv(ContextManager['TestEnv']): ('aarch64', 'virt'), ('avr', 'mega2560'), ('m68k', 'virt'), + ('riscv32', 'virt'), + ('riscv64', 'virt'), ('rx', 'gdbsim-r5f562n8'), ('tricore', 'tricore_testboard') ) @@ -273,19 +286,21 @@ class TestEnv(ContextManager['TestEnv']): def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() - def print_env(self) -> None: + def print_env(self, prefix: str = '') -> None: template = """\ -QEMU -- "{QEMU_PROG}" {QEMU_OPTIONS} -QEMU_IMG -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS} -QEMU_IO -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS} -QEMU_NBD -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS} -IMGFMT -- {IMGFMT}{imgopts} -IMGPROTO -- {IMGPROTO} -PLATFORM -- {platform} -TEST_DIR -- {TEST_DIR} -SOCK_DIR -- {SOCK_DIR} -SOCKET_SCM_HELPER -- {SOCKET_SCM_HELPER} -""" +{prefix}QEMU -- "{QEMU_PROG}" {QEMU_OPTIONS} +{prefix}QEMU_IMG -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS} +{prefix}QEMU_IO -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS} +{prefix}QEMU_NBD -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS} +{prefix}IMGFMT -- {IMGFMT}{imgopts} +{prefix}IMGPROTO -- {IMGPROTO} +{prefix}PLATFORM -- {platform} +{prefix}TEST_DIR -- {TEST_DIR} +{prefix}SOCK_DIR -- {SOCK_DIR} +{prefix}GDB_OPTIONS -- {GDB_OPTIONS} +{prefix}VALGRIND_QEMU -- {VALGRIND_QEMU} +{prefix}PRINT_QEMU_OUTPUT -- {PRINT_QEMU} +{prefix}""" args = collections.defaultdict(str, self.get_env()) @@ -294,5 +309,5 @@ SOCKET_SCM_HELPER -- {SOCKET_SCM_HELPER} u = os.uname() args['platform'] = f'{u.sysname}/{u.machine} {u.nodename} {u.release}' - + args['prefix'] = prefix print(template.format_map(args)) diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py index 4a6ec421ed..5a771da86e 100644 --- a/tests/qemu-iotests/testrunner.py +++ b/tests/qemu-iotests/testrunner.py @@ -25,7 +25,9 @@ import subprocess import contextlib import json import termios +import shutil import sys +from multiprocessing import Pool from contextlib import contextmanager from typing import List, Optional, Iterator, Any, Sequence, Dict, \ ContextManager @@ -126,10 +128,35 @@ class TestResult: class TestRunner(ContextManager['TestRunner']): - def __init__(self, env: TestEnv, makecheck: bool = False, + shared_self = None + + @staticmethod + def proc_run_test(test: str, test_field_width: int) -> TestResult: + # We are in a subprocess, we can't change the runner object! + runner = TestRunner.shared_self + assert runner is not None + return runner.run_test(test, test_field_width, mp=True) + + def run_tests_pool(self, tests: List[str], + test_field_width: int, jobs: int) -> List[TestResult]: + + # passing self directly to Pool.starmap() just doesn't work, because + # it's a context manager. + assert TestRunner.shared_self is None + TestRunner.shared_self = self + + with Pool(jobs) as p: + results = p.starmap(self.proc_run_test, + zip(tests, [test_field_width] * len(tests))) + + TestRunner.shared_self = None + + return results + + def __init__(self, env: TestEnv, tap: bool = False, color: str = 'auto') -> None: self.env = env - self.makecheck = makecheck + self.tap = tap self.last_elapsed = LastElapsedTime('.last-elapsed-cache', env) assert color in ('auto', 'on', 'off') @@ -148,12 +175,13 @@ class TestRunner(ContextManager['TestRunner']): def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self._stack.close() - def test_print_one_line(self, test: str, starttime: str, + def test_print_one_line(self, test: str, + test_field_width: int, + starttime: str, endtime: Optional[str] = None, status: str = '...', lasttime: Optional[float] = None, thistime: Optional[float] = None, description: str = '', - test_field_width: Optional[int] = None, end: str = '\n') -> None: """ Print short test info before/after test run """ test = os.path.basename(test) @@ -161,13 +189,13 @@ class TestRunner(ContextManager['TestRunner']): if test_field_width is None: test_field_width = 8 - if self.makecheck and status != '...': - if status and status != 'pass': - status = f' [{status}]' - else: - status = '' - - print(f' TEST iotest-{self.env.imgfmt}: {test}{status}') + if self.tap: + if status == 'pass': + print(f'ok {self.env.imgfmt} {test}') + elif status == 'fail': + print(f'not ok {self.env.imgfmt} {test}') + elif status == 'not run': + print(f'ok {self.env.imgfmt} {test} # SKIP') return if lasttime: @@ -219,11 +247,19 @@ class TestRunner(ContextManager['TestRunner']): return f'{test}.out' - def do_run_test(self, test: str) -> TestResult: + def do_run_test(self, test: str, mp: bool) -> TestResult: + """ + Run one test + + :param test: test file path + :param mp: if true, we are in a multiprocessing environment, use + personal subdirectories for test run + + Note: this method may be called from subprocess, so it does not + change ``self`` object in any way! + """ + f_test = Path(test) - f_bad = Path(f_test.name + '.out.bad') - f_notrun = Path(f_test.name + '.notrun') - f_casenotrun = Path(f_test.name + '.casenotrun') f_reference = Path(self.find_reference(test)) if not f_test.exists(): @@ -238,11 +274,22 @@ class TestRunner(ContextManager['TestRunner']): description='No qualified output ' f'(expected {f_reference})') - for p in (f_bad, f_notrun, f_casenotrun): - silent_unlink(p) - args = [str(f_test.resolve())] env = self.env.prepare_subprocess(args) + if mp: + # Split test directories, so that tests running in parallel don't + # break each other. + for d in ['TEST_DIR', 'SOCK_DIR']: + env[d] = os.path.join(env[d], f_test.name) + Path(env[d]).mkdir(parents=True, exist_ok=True) + + test_dir = env['TEST_DIR'] + f_bad = Path(test_dir, f_test.name + '.out.bad') + f_notrun = Path(test_dir, f_test.name + '.notrun') + f_casenotrun = Path(test_dir, f_test.name + '.casenotrun') + + for p in (f_notrun, f_casenotrun): + silent_unlink(p) t0 = time.time() with f_bad.open('w', encoding="utf-8") as f: @@ -266,62 +313,101 @@ class TestRunner(ContextManager['TestRunner']): diff=file_diff(str(f_reference), str(f_bad))) if f_notrun.exists(): - return TestResult(status='not run', - description=f_notrun.read_text().strip()) + return TestResult( + status='not run', + description=f_notrun.read_text(encoding='utf-8').strip()) casenotrun = '' if f_casenotrun.exists(): - casenotrun = f_casenotrun.read_text() + casenotrun = f_casenotrun.read_text(encoding='utf-8') diff = file_diff(str(f_reference), str(f_bad)) if diff: + if os.environ.get("QEMU_IOTESTS_REGEN", None) is not None: + shutil.copyfile(str(f_bad), str(f_reference)) + print("########################################") + print("##### REFERENCE FILE UPDATED #####") + print("########################################") return TestResult(status='fail', elapsed=elapsed, description=f'output mismatch (see {f_bad})', diff=diff, casenotrun=casenotrun) else: f_bad.unlink() - self.last_elapsed.update(test, elapsed) return TestResult(status='pass', elapsed=elapsed, casenotrun=casenotrun) def run_test(self, test: str, - test_field_width: Optional[int] = None) -> TestResult: + test_field_width: int, + mp: bool = False) -> TestResult: + """ + Run one test and print short status + + :param test: test file path + :param test_field_width: width for first field of status format + :param mp: if true, we are in a multiprocessing environment, don't try + to rewrite things in stdout + + Note: this method may be called from subprocess, so it does not + change ``self`` object in any way! + """ + last_el = self.last_elapsed.get(test) start = datetime.datetime.now().strftime('%H:%M:%S') - if not self.makecheck: - self.test_print_one_line(test=test, starttime=start, - lasttime=last_el, end='\r', - test_field_width=test_field_width) + if not self.tap: + self.test_print_one_line(test=test, + test_field_width=test_field_width, + status = 'started' if mp else '...', + starttime=start, + lasttime=last_el, + end = '\n' if mp else '\r') + else: + testname = os.path.basename(test) + print(f'# running {self.env.imgfmt} {testname}') - res = self.do_run_test(test) + res = self.do_run_test(test, mp) end = datetime.datetime.now().strftime('%H:%M:%S') - self.test_print_one_line(test=test, status=res.status, + self.test_print_one_line(test=test, + test_field_width=test_field_width, + status=res.status, starttime=start, endtime=end, lasttime=last_el, thistime=res.elapsed, - description=res.description, - test_field_width=test_field_width) + description=res.description) if res.casenotrun: - print(res.casenotrun) + if self.tap: + print('#' + res.casenotrun.replace('\n', '\n#')) + else: + print(res.casenotrun) + sys.stdout.flush() return res - def run_tests(self, tests: List[str]) -> bool: + def run_tests(self, tests: List[str], jobs: int = 1) -> bool: n_run = 0 failed = [] notrun = [] casenotrun = [] - if not self.makecheck: + if self.tap: + self.env.print_env('# ') + print('1..%d' % len(tests)) + else: self.env.print_env() test_field_width = max(len(os.path.basename(t)) for t in tests) + 2 - for t in tests: + if jobs > 1: + results = self.run_tests_pool(tests, test_field_width, jobs) + + for i, t in enumerate(tests): name = os.path.basename(t) - res = self.run_test(t, test_field_width=test_field_width) + + if jobs > 1: + res = results[i] + else: + res = self.run_test(t, test_field_width) assert res.status in ('pass', 'fail', 'not run') @@ -333,26 +419,31 @@ class TestRunner(ContextManager['TestRunner']): if res.status == 'fail': failed.append(name) - if self.makecheck: - self.env.print_env() if res.diff: - print('\n'.join(res.diff)) + if self.tap: + print('\n'.join(res.diff), file=sys.stderr) + else: + print('\n'.join(res.diff)) elif res.status == 'not run': notrun.append(name) + elif res.status == 'pass': + assert res.elapsed is not None + self.last_elapsed.update(t, res.elapsed) + sys.stdout.flush() if res.interrupted: break - if notrun: - print('Not run:', ' '.join(notrun)) + if not self.tap: + if notrun: + print('Not run:', ' '.join(notrun)) - if casenotrun: - print('Some cases not run in:', ' '.join(casenotrun)) + if casenotrun: + print('Some cases not run in:', ' '.join(casenotrun)) - if failed: - print('Failures:', ' '.join(failed)) - print(f'Failed {len(failed)} of {n_run} iotests') - return False - else: - print(f'Passed all {n_run} iotests') - return True + if failed: + print('Failures:', ' '.join(failed)) + print(f'Failed {len(failed)} of {n_run} iotests') + else: + print(f'Passed all {n_run} iotests') + return not failed diff --git a/tests/qemu-iotests/tests/backing-file-invalidation b/tests/qemu-iotests/tests/backing-file-invalidation new file mode 100755 index 0000000000..4eccc80153 --- /dev/null +++ b/tests/qemu-iotests/tests/backing-file-invalidation @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +# group: rw migration +# +# Migrate a VM with a BDS with backing nodes, which runs +# bdrv_invalidate_cache(), which for qcow2 and qed triggers reading the +# backing file string from the image header. Check whether this +# interferes with bdrv_backing_overridden(). +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import json +import os +from typing import Optional + +import iotests +from iotests import qemu_img_create, qemu_img_info + + +image_size = 1 * 1024 * 1024 +imgs = [os.path.join(iotests.test_dir, f'{i}.img') for i in range(0, 4)] + +mig_sock = os.path.join(iotests.sock_dir, 'mig.sock') + + +class TestPostMigrateFilename(iotests.QMPTestCase): + vm_s: Optional[iotests.VM] = None + vm_d: Optional[iotests.VM] = None + + def setUp(self) -> None: + # Create backing chain of three images, where the backing file strings + # are json:{} filenames + qemu_img_create('-f', iotests.imgfmt, imgs[0], str(image_size)) + for i in range(1, 3): + backing = { + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': imgs[i - 1] + } + } + qemu_img_create('-f', iotests.imgfmt, '-F', iotests.imgfmt, + '-b', 'json:' + json.dumps(backing), + imgs[i], str(image_size)) + + def tearDown(self) -> None: + if self.vm_s is not None: + self.vm_s.shutdown() + if self.vm_d is not None: + self.vm_d.shutdown() + + for img in imgs: + try: + os.remove(img) + except OSError: + pass + try: + os.remove(mig_sock) + except OSError: + pass + + def test_migration(self) -> None: + """ + Migrate a VM with the backing chain created in setUp() attached. At + the end of the migration process, the destination will run + bdrv_invalidate_cache(), which for some image formats (qcow2 and qed) + means the backing file string is re-read from the image header. If + this overwrites bs->auto_backing_file, doing so may cause + bdrv_backing_overridden() to become true: The image header reports a + json:{} filename, but when opening it, bdrv_refresh_filename() will + simplify it to a plain simple filename; and when bs->auto_backing_file + and bs->backing->bs->filename differ, bdrv_backing_overridden() becomes + true. + If bdrv_backing_overridden() is true, the BDS will be forced to get a + json:{} filename, which in general is not the end of the world, but not + great. Check whether that happens, i.e. whether migration changes the + node's filename. + """ + + blockdev = { + 'node-name': 'node0', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': imgs[2] + } + } + + self.vm_s = iotests.VM(path_suffix='a') \ + .add_blockdev(json.dumps(blockdev)) + self.vm_d = iotests.VM(path_suffix='b') \ + .add_blockdev(json.dumps(blockdev)) \ + .add_incoming(f'unix:{mig_sock}') + + assert self.vm_s is not None + assert self.vm_d is not None + + self.vm_s.launch() + self.vm_d.launch() + + pre_mig_filename = self.vm_s.node_info('node0')['file'] + + self.vm_s.qmp('migrate', uri=f'unix:{mig_sock}') + + # Wait for migration to be done + self.vm_s.event_wait('STOP') + self.vm_d.event_wait('RESUME') + + post_mig_filename = self.vm_d.node_info('node0')['file'] + + # Verify that the filename hasn't changed from before the migration + self.assertEqual(pre_mig_filename, post_mig_filename) + + self.vm_s.shutdown() + self.vm_s = None + + # For good measure, try creating an overlay and check its backing + # chain below. This is how the issue was originally found. + result = self.vm_d.qmp('blockdev-snapshot-sync', + format=iotests.imgfmt, + snapshot_file=imgs[3], + node_name='node0', + snapshot_node_name='node0-overlay') + self.assert_qmp(result, 'return', {}) + + self.vm_d.shutdown() + self.vm_d = None + + # Check the newly created overlay's backing chain + chain = qemu_img_info('--backing-chain', imgs[3]) + for index, image in enumerate(chain): + self.assertEqual(image['filename'], imgs[3 - index]) + + +if __name__ == '__main__': + # These are the image formats that run their open() function from their + # .bdrv_co_invaliate_cache() implementations, so test them + iotests.main(supported_fmts=['qcow2', 'qed'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/backing-file-invalidation.out b/tests/qemu-iotests/tests/backing-file-invalidation.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/backing-file-invalidation.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/block-status-cache b/tests/qemu-iotests/tests/block-status-cache new file mode 100755 index 0000000000..5a7bc2c149 --- /dev/null +++ b/tests/qemu-iotests/tests/block-status-cache @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test cases for the block-status cache. +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import signal +import iotests +from iotests import qemu_img_create, qemu_img_map, qemu_nbd + + +image_size = 1 * 1024 * 1024 +test_img = os.path.join(iotests.test_dir, 'test.img') + +nbd_pidfile = os.path.join(iotests.test_dir, 'nbd.pid') +nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') + + +class TestBscWithNbd(iotests.QMPTestCase): + def setUp(self) -> None: + """Just create an empty image with a read-only NBD server on it""" + qemu_img_create('-f', iotests.imgfmt, test_img, str(image_size)) + + # Pass --allocation-depth to enable the qemu:allocation-depth context, + # which we are going to query to provoke a block-status inquiry with + # want_zero=false. + assert qemu_nbd(f'--socket={nbd_sock}', + f'--format={iotests.imgfmt}', + '--persistent', + '--allocation-depth', + '--read-only', + f'--pid-file={nbd_pidfile}', + test_img) \ + == 0 + + def tearDown(self) -> None: + with open(nbd_pidfile, encoding='utf-8') as f: + pid = int(f.read()) + os.kill(pid, signal.SIGTERM) + os.remove(nbd_pidfile) + os.remove(test_img) + + def test_with_zero_bug(self) -> None: + """ + Verify that the block-status cache is not corrupted by a + want_zero=false call. + We can provoke a want_zero=false call with `qemu-img map` over NBD with + x-dirty-bitmap=qemu:allocation-depth, so we first run a normal `map` + (which results in want_zero=true), then using said + qemu:allocation-depth context, and finally another normal `map` to + verify that the cache has not been corrupted. + """ + + nbd_img_opts = f'driver=nbd,server.type=unix,server.path={nbd_sock}' + nbd_img_opts_alloc_depth = nbd_img_opts + \ + ',x-dirty-bitmap=qemu:allocation-depth' + + # Normal map, results in want_zero=true. + # This will probably detect an allocated data sector first (qemu likes + # to allocate the first sector to facilitate alignment probing), and + # then the rest to be zero. The BSC will thus contain (if anything) + # one range covering the first sector. + map_pre = qemu_img_map('--image-opts', nbd_img_opts) + + # qemu:allocation-depth maps for want_zero=false. + # want_zero=false should (with the file driver, which the server is + # using) report everything as data. While this is sufficient for + # want_zero=false, this is nothing that should end up in the + # block-status cache. + # Due to a bug, this information did end up in the cache, though, and + # this would lead to wrong information being returned on subsequent + # want_zero=true calls. + # + # We need to run this map twice: On the first call, we probably still + # have the first sector in the cache, and so this will be served from + # the cache; and only the subsequent range will be queried from the + # block driver. This subsequent range will then be entered into the + # cache. + # If we did a want_zero=true call at this point, we would thus get + # correct information: The first sector is not covered by the cache, so + # we would get fresh block-status information from the driver, which + # would return a data range, and this would then go into the cache, + # evicting the wrong range from the want_zero=false call before. + # + # Therefore, we need a second want_zero=false map to reproduce: + # Since the first sector is not in the cache, the query for its status + # will go to the driver, which will return a result that reports the + # whole image to be a single data area. This result will then go into + # the cache, and so the cache will then report the whole image to + # contain data. + # + # Note that once the cache reports the whole image to contain data, any + # subsequent map operation will be served from the cache, and so we can + # never loop too many times here. + for _ in range(2): + # (Ignore the result, this is just to contaminate the cache) + qemu_img_map('--image-opts', nbd_img_opts_alloc_depth) + + # Now let's see whether the cache reports everything as data, or + # whether we get correct information (i.e. the same as we got on our + # first attempt). + map_post = qemu_img_map('--image-opts', nbd_img_opts) + + if map_pre != map_post: + print('ERROR: Map information differs before and after querying ' + + 'qemu:allocation-depth') + print('Before:') + print(map_pre) + print('After:') + print(map_post) + + self.fail("Map information differs") + + +if __name__ == '__main__': + # The block-status cache only works on the protocol layer, so to test it, + # we can only use the raw format + iotests.main(supported_fmts=['raw'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/block-status-cache.out b/tests/qemu-iotests/tests/block-status-cache.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/block-status-cache.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/copy-before-write b/tests/qemu-iotests/tests/copy-before-write new file mode 100755 index 0000000000..2ffe092b31 --- /dev/null +++ b/tests/qemu-iotests/tests/copy-before-write @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +# group: auto backup +# +# Copyright (c) 2022 Virtuozzo International GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import re + +from qemu.machine import QEMUMachine + +import iotests +from iotests import qemu_img_create, qemu_io + + +temp_img = os.path.join(iotests.test_dir, 'temp') +source_img = os.path.join(iotests.test_dir, 'source') +size = '1M' + + +class TestCbwError(iotests.QMPTestCase): + def tearDown(self): + self.vm.shutdown() + os.remove(temp_img) + os.remove(source_img) + + def setUp(self): + qemu_img_create('-f', iotests.imgfmt, source_img, size) + qemu_img_create('-f', iotests.imgfmt, temp_img, size) + qemu_io('-c', 'write 0 1M', source_img) + + opts = ['-nodefaults', '-display', 'none', '-machine', 'none'] + self.vm = QEMUMachine(iotests.qemu_prog, opts, + base_temp_dir=iotests.test_dir, + sock_dir=iotests.sock_dir) + self.vm.launch() + + def do_cbw_error(self, on_cbw_error): + result = self.vm.qmp('blockdev-add', { + 'node-name': 'cbw', + 'driver': 'copy-before-write', + 'on-cbw-error': on_cbw_error, + 'file': { + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source_img, + } + }, + 'target': { + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': temp_img + }, + 'inject-error': [ + { + 'event': 'write_aio', + 'errno': 5, + 'immediately': False, + 'once': True + } + ] + } + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('blockdev-add', { + 'node-name': 'access', + 'driver': 'snapshot-access', + 'file': 'cbw' + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io cbw "write 0 1M"') + self.assert_qmp(result, 'return', '') + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io access "read 0 1M"') + self.assert_qmp(result, 'return', '') + + self.vm.shutdown() + log = self.vm.get_log() + log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) + log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) + log = iotests.filter_qemu_io(log) + return log + + def test_break_snapshot_on_cbw_error(self): + """break-snapshot behavior: + Guest write succeed, but further snapshot-read fails, as snapshot is + broken. + """ + log = self.do_cbw_error('break-snapshot') + + self.assertEqual(log, """\ +wrote 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read failed: Permission denied +""") + + def test_break_guest_write_on_cbw_error(self): + """break-guest-write behavior: + Guest write fails, but snapshot-access continues working and further + snapshot-read succeeds. + """ + log = self.do_cbw_error('break-guest-write') + + self.assertEqual(log, """\ +write failed: Input/output error +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +""") + + def do_cbw_timeout(self, on_cbw_error): + result = self.vm.qmp('object-add', { + 'qom-type': 'throttle-group', + 'id': 'group0', + 'limits': {'bps-write': 300 * 1024} + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('blockdev-add', { + 'node-name': 'cbw', + 'driver': 'copy-before-write', + 'on-cbw-error': on_cbw_error, + 'cbw-timeout': 1, + 'file': { + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source_img, + } + }, + 'target': { + 'driver': 'throttle', + 'throttle-group': 'group0', + 'file': { + 'driver': 'qcow2', + 'file': { + 'driver': 'file', + 'filename': temp_img + } + } + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('blockdev-add', { + 'node-name': 'access', + 'driver': 'snapshot-access', + 'file': 'cbw' + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io cbw "write 0 512K"') + self.assert_qmp(result, 'return', '') + + # We need second write to trigger throttling + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io cbw "write 512K 512K"') + self.assert_qmp(result, 'return', '') + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io access "read 0 1M"') + self.assert_qmp(result, 'return', '') + + self.vm.shutdown() + log = self.vm.get_log() + log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) + log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) + log = iotests.filter_qemu_io(log) + return log + + def test_timeout_break_guest(self): + log = self.do_cbw_timeout('break-guest-write') + # macOS and FreeBSD tend to represent ETIMEDOUT as + # "Operation timed out", when Linux prefer + # "Connection timed out" + log = log.replace('Operation timed out', + 'Connection timed out') + self.assertEqual(log, """\ +wrote 524288/524288 bytes at offset 0 +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +write failed: Connection timed out +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +""") + + def test_timeout_break_snapshot(self): + log = self.do_cbw_timeout('break-snapshot') + self.assertEqual(log, """\ +wrote 524288/524288 bytes at offset 0 +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 524288/524288 bytes at offset 524288 +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read failed: Permission denied +""") + + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2'], + supported_protocols=['file'], + required_fmts=['copy-before-write']) diff --git a/tests/qemu-iotests/tests/copy-before-write.out b/tests/qemu-iotests/tests/copy-before-write.out new file mode 100644 index 0000000000..89968f35d7 --- /dev/null +++ b/tests/qemu-iotests/tests/copy-before-write.out @@ -0,0 +1,5 @@ +.... +---------------------------------------------------------------------- +Ran 4 tests + +OK diff --git a/tests/qemu-iotests/tests/export-incoming-iothread b/tests/qemu-iotests/tests/export-incoming-iothread new file mode 100755 index 0000000000..7679e49103 --- /dev/null +++ b/tests/qemu-iotests/tests/export-incoming-iothread @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# group: rw quick migration +# +# Regression test for issue 945: +# https://gitlab.com/qemu-project/qemu/-/issues/945 +# Test adding an export on top of an iothread-ed block device while in +# -incoming defer. +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import qemu_img_create + + +image_size = 1 * 1024 * 1024 +test_img = os.path.join(iotests.test_dir, 'test.img') +node_name = 'node0' +iothread_id = 'iothr0' + +nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') + + +class TestExportIncomingIothread(iotests.QMPTestCase): + def setUp(self) -> None: + qemu_img_create('-f', iotests.imgfmt, test_img, str(image_size)) + + self.vm = iotests.VM() + self.vm.add_object(f'iothread,id={iothread_id}') + self.vm.add_blockdev(( + f'driver={iotests.imgfmt}', + f'node-name={node_name}', + 'file.driver=file', + f'file.filename={test_img}' + )) + self.vm.add_incoming('defer') + self.vm.launch() + + def tearDown(self): + self.vm.shutdown() + os.remove(test_img) + + def test_export_add(self): + result = self.vm.qmp('nbd-server-start', { + 'addr': { + 'type': 'unix', + 'data': { + 'path': nbd_sock + } + } + }) + self.assert_qmp(result, 'return', {}) + + # Regression test for issue 945: This should not fail an assertion + result = self.vm.qmp('block-export-add', { + 'type': 'nbd', + 'id': 'exp0', + 'node-name': node_name, + 'iothread': iothread_id + }) + self.assert_qmp(result, 'return', {}) + + +if __name__ == '__main__': + iotests.main(supported_fmts=['generic'], + unsupported_fmts=['luks'], # Would need a secret + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/export-incoming-iothread.out b/tests/qemu-iotests/tests/export-incoming-iothread.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/export-incoming-iothread.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io new file mode 100755 index 0000000000..7664f33689 --- /dev/null +++ b/tests/qemu-iotests/tests/graph-changes-while-io @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# group: rw +# +# Test graph changes while I/O is happening +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +from threading import Thread +import iotests +from iotests import imgfmt, qemu_img, qemu_img_create, QMPTestCase, \ + QemuStorageDaemon + + +top = os.path.join(iotests.test_dir, 'top.img') +nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') + + +def do_qemu_img_bench() -> None: + """ + Do some I/O requests on `nbd_sock`. + """ + qemu_img('bench', '-f', 'raw', '-c', '2000000', + f'nbd+unix:///node0?socket={nbd_sock}') + + +class TestGraphChangesWhileIO(QMPTestCase): + def setUp(self) -> None: + # Create an overlay that can be added at runtime on top of the + # null-co block node that will receive I/O + qemu_img_create('-f', imgfmt, '-F', 'raw', '-b', 'null-co://', top) + + # QSD instance with a null-co block node in an I/O thread, + # exported over NBD (on `nbd_sock`, export name "node0") + self.qsd = QemuStorageDaemon( + '--object', 'iothread,id=iothread0', + '--blockdev', 'null-co,node-name=node0,read-zeroes=true', + '--nbd-server', f'addr.type=unix,addr.path={nbd_sock}', + '--export', 'nbd,id=exp0,node-name=node0,iothread=iothread0,' + + 'fixed-iothread=true,writable=true', + qmp=True + ) + + def tearDown(self) -> None: + self.qsd.stop() + + def test_blockdev_add_while_io(self) -> None: + # Run qemu-img bench in the background + bench_thr = Thread(target=do_qemu_img_bench) + bench_thr.start() + + # While qemu-img bench is running, repeatedly add and remove an + # overlay to/from node0 + while bench_thr.is_alive(): + result = self.qsd.qmp('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'overlay', + 'backing': 'node0', + 'file': { + 'driver': 'file', + 'filename': top + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.qsd.qmp('blockdev-del', { + 'node-name': 'overlay' + }) + self.assert_qmp(result, 'return', {}) + + bench_thr.join() + +if __name__ == '__main__': + # Format must support raw backing files + iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/graph-changes-while-io.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/image-fleecing b/tests/qemu-iotests/tests/image-fleecing new file mode 100755 index 0000000000..f6e449d071 --- /dev/null +++ b/tests/qemu-iotests/tests/image-fleecing @@ -0,0 +1,312 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# This test covers the basic fleecing workflow, which provides a +# point-in-time snapshot of a node that can be queried over NBD. +# +# Copyright (C) 2018 Red Hat, Inc. +# John helped, too. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: John Snow + +from subprocess import CalledProcessError + +import iotests +from iotests import log, qemu_img, qemu_io + +iotests.script_initialize( + supported_fmts=['qcow2'], + supported_platforms=['linux'], + required_fmts=['copy-before-write'], + unsupported_imgopts=['compat'] +) + +patterns = [('0x5d', '0', '64k'), + ('0xd5', '1M', '64k'), + ('0xdc', '32M', '64k'), + ('0xcd', '0x3ff0000', '64k')] # 64M - 64K + +overwrite = [('0xab', '0', '64k'), # Full overwrite + ('0xad', '0x00f8000', '64k'), # Partial-left (1M-32K) + ('0x1d', '0x2008000', '64k'), # Partial-right (32M+32K) + ('0xea', '0x3fe0000', '64k')] # Adjacent-left (64M - 128K) + +zeroes = [('0', '0x00f8000', '32k'), # Left-end of partial-left (1M-32K) + ('0', '0x2010000', '32k'), # Right-end of partial-right (32M+64K) + ('0', '0x3fe0000', '64k')] # overwrite[3] + +remainder = [('0xd5', '0x108000', '32k'), # Right-end of partial-left [1] + ('0xdc', '32M', '32k'), # Left-end of partial-right [2] + ('0xcd', '0x3ff0000', '64k')] # patterns[3] + +def do_test(vm, use_cbw, use_snapshot_access_filter, base_img_path, + fleece_img_path, nbd_sock_path=None, + target_img_path=None, + bitmap=False): + push_backup = target_img_path is not None + assert (nbd_sock_path is not None) != push_backup + if push_backup: + assert use_cbw + + log('--- Setting up images ---') + log('') + + qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') + if bitmap: + qemu_img('bitmap', '--add', base_img_path, 'bitmap0') + + if use_snapshot_access_filter: + assert use_cbw + qemu_img('create', '-f', 'raw', fleece_img_path, '64M') + else: + qemu_img('create', '-f', 'qcow2', fleece_img_path, '64M') + + if push_backup: + qemu_img('create', '-f', 'qcow2', target_img_path, '64M') + + for p in patterns: + qemu_io('-f', iotests.imgfmt, + '-c', 'write -P%s %s %s' % p, base_img_path) + + log('Done') + + log('') + log('--- Launching VM ---') + log('') + + src_node = 'source' + tmp_node = 'temp' + qom_path = '/machine/peripheral/sda' + vm.add_blockdev(f'driver={iotests.imgfmt},file.driver=file,' + f'file.filename={base_img_path},node-name={src_node}') + vm.add_device('virtio-scsi') + vm.add_device(f'scsi-hd,id=sda,drive={src_node}') + vm.launch() + log('Done') + + log('') + log('--- Setting up Fleecing Graph ---') + log('') + + + if use_snapshot_access_filter: + log(vm.qmp('blockdev-add', { + 'node-name': tmp_node, + 'driver': 'file', + 'filename': fleece_img_path, + })) + else: + # create tmp_node backed by src_node + log(vm.qmp('blockdev-add', { + 'driver': 'qcow2', + 'node-name': tmp_node, + 'file': { + 'driver': 'file', + 'filename': fleece_img_path, + }, + 'backing': src_node, + })) + + # Establish CBW from source to fleecing node + if use_cbw: + fl_cbw = { + 'driver': 'copy-before-write', + 'node-name': 'fl-cbw', + 'file': src_node, + 'target': tmp_node + } + + if bitmap: + fl_cbw['bitmap'] = {'node': src_node, 'name': 'bitmap0'} + + log(vm.qmp('blockdev-add', fl_cbw)) + + log(vm.qmp('qom-set', path=qom_path, property='drive', value='fl-cbw')) + + if use_snapshot_access_filter: + log(vm.qmp('blockdev-add', { + 'driver': 'snapshot-access', + 'node-name': 'fl-access', + 'file': 'fl-cbw', + })) + else: + log(vm.qmp('blockdev-backup', + job_id='fleecing', + device=src_node, + target=tmp_node, + sync='none')) + + export_node = 'fl-access' if use_snapshot_access_filter else tmp_node + + if push_backup: + log('') + log('--- Starting actual backup ---') + log('') + + log(vm.qmp('blockdev-add', **{ + 'driver': iotests.imgfmt, + 'node-name': 'target', + 'file': { + 'driver': 'file', + 'filename': target_img_path + } + })) + log(vm.qmp('blockdev-backup', device=export_node, + sync='full', target='target', + job_id='push-backup', speed=1)) + else: + log('') + log('--- Setting up NBD Export ---') + log('') + + nbd_uri = 'nbd+unix:///%s?socket=%s' % (export_node, nbd_sock_path) + log(vm.qmp('nbd-server-start', + {'addr': { 'type': 'unix', + 'data': { 'path': nbd_sock_path } } })) + + log(vm.qmp('nbd-server-add', device=export_node)) + + log('') + log('--- Sanity Check ---') + log('') + + for p in patterns + zeroes: + cmd = 'read -P%s %s %s' % p + log(cmd) + + try: + qemu_io('-r', '-f', 'raw', '-c', cmd, nbd_uri) + except CalledProcessError as exc: + if bitmap and p in zeroes: + log(exc.stdout) + else: + raise + + log('') + log('--- Testing COW ---') + log('') + + for p in overwrite: + cmd = 'write -P%s %s %s' % p + log(cmd) + log(vm.hmp_qemu_io(qom_path, cmd, qdev=True)) + + if push_backup: + # Check that previous operations were done during backup, not after + # If backup is already finished, it's possible that it was finished + # even before hmp qemu_io write, and we didn't actually test + # copy-before-write operation. This should not happen, as we use + # speed=1. But worth checking. + result = vm.qmp('query-block-jobs') + assert len(result['return']) == 1 + + result = vm.qmp('block-job-set-speed', device='push-backup', speed=0) + assert result == {'return': {}} + + log(vm.event_wait(name='BLOCK_JOB_COMPLETED', + match={'data': {'device': 'push-backup'}}), + filters=[iotests.filter_qmp_event]) + log(vm.qmp('blockdev-del', node_name='target')) + + log('') + log('--- Verifying Data ---') + log('') + + for p in patterns + zeroes: + cmd = 'read -P%s %s %s' % p + log(cmd) + args = ['-r', '-c', cmd] + if push_backup: + args += [target_img_path] + else: + args += ['-f', 'raw', nbd_uri] + + try: + qemu_io(*args) + except CalledProcessError as exc: + if bitmap and p in zeroes: + log(exc.stdout) + else: + raise + + log('') + log('--- Cleanup ---') + log('') + + if not push_backup: + log(vm.qmp('nbd-server-stop')) + + if use_cbw: + if use_snapshot_access_filter: + log(vm.qmp('blockdev-del', node_name='fl-access')) + log(vm.qmp('qom-set', path=qom_path, property='drive', value=src_node)) + log(vm.qmp('blockdev-del', node_name='fl-cbw')) + else: + log(vm.qmp('block-job-cancel', device='fleecing')) + e = vm.event_wait('BLOCK_JOB_CANCELLED') + assert e is not None + log(e, filters=[iotests.filter_qmp_event]) + + log(vm.qmp('blockdev-del', node_name=tmp_node)) + vm.shutdown() + + log('') + log('--- Confirming writes ---') + log('') + + for p in overwrite + remainder: + cmd = 'read -P%s %s %s' % p + log(cmd) + qemu_io(base_img_path, '-c', cmd) + + log('') + log('Done') + + +def test(use_cbw, use_snapshot_access_filter, + nbd_sock_path=None, target_img_path=None, bitmap=False): + with iotests.FilePath('base.img') as base_img_path, \ + iotests.FilePath('fleece.img') as fleece_img_path, \ + iotests.VM() as vm: + do_test(vm, use_cbw, use_snapshot_access_filter, base_img_path, + fleece_img_path, nbd_sock_path, target_img_path, + bitmap=bitmap) + +def test_pull(use_cbw, use_snapshot_access_filter, bitmap=False): + with iotests.FilePath('nbd.sock', + base_dir=iotests.sock_dir) as nbd_sock_path: + test(use_cbw, use_snapshot_access_filter, nbd_sock_path, None, + bitmap=bitmap) + +def test_push(): + with iotests.FilePath('target.img') as target_img_path: + test(True, True, None, target_img_path) + + +log('=== Test backup(sync=none) based fleecing ===\n') +test_pull(False, False) + +log('=== Test cbw-filter based fleecing ===\n') +test_pull(True, False) + +log('=== Test fleecing-format based fleecing ===\n') +test_pull(True, True) + +log('=== Test fleecing-format based fleecing with bitmap ===\n') +test_pull(True, True, bitmap=True) + +log('=== Test push backup with fleecing ===\n') +test_push() diff --git a/tests/qemu-iotests/tests/image-fleecing.out b/tests/qemu-iotests/tests/image-fleecing.out new file mode 100644 index 0000000000..acfc89ff0e --- /dev/null +++ b/tests/qemu-iotests/tests/image-fleecing.out @@ -0,0 +1,358 @@ +=== Test backup(sync=none) based fleecing === + +--- Setting up images --- + +Done + +--- Launching VM --- + +Done + +--- Setting up Fleecing Graph --- + +{"return": {}} +{"return": {}} + +--- Setting up NBD Export --- + +{"return": {}} +{"return": {}} + +--- Sanity Check --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Testing COW --- + +write -P0xab 0 64k +{"return": ""} +write -P0xad 0x00f8000 64k +{"return": ""} +write -P0x1d 0x2008000 64k +{"return": ""} +write -P0xea 0x3fe0000 64k +{"return": ""} + +--- Verifying Data --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Cleanup --- + +{"return": {}} +{"return": {}} +{"data": {"device": "fleecing", "len": 67108864, "offset": 393216, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_CANCELLED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"return": {}} + +--- Confirming writes --- + +read -P0xab 0 64k +read -P0xad 0x00f8000 64k +read -P0x1d 0x2008000 64k +read -P0xea 0x3fe0000 64k +read -P0xd5 0x108000 32k +read -P0xdc 32M 32k +read -P0xcd 0x3ff0000 64k + +Done +=== Test cbw-filter based fleecing === + +--- Setting up images --- + +Done + +--- Launching VM --- + +Done + +--- Setting up Fleecing Graph --- + +{"return": {}} +{"return": {}} +{"return": {}} + +--- Setting up NBD Export --- + +{"return": {}} +{"return": {}} + +--- Sanity Check --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Testing COW --- + +write -P0xab 0 64k +{"return": ""} +write -P0xad 0x00f8000 64k +{"return": ""} +write -P0x1d 0x2008000 64k +{"return": ""} +write -P0xea 0x3fe0000 64k +{"return": ""} + +--- Verifying Data --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Cleanup --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Confirming writes --- + +read -P0xab 0 64k +read -P0xad 0x00f8000 64k +read -P0x1d 0x2008000 64k +read -P0xea 0x3fe0000 64k +read -P0xd5 0x108000 32k +read -P0xdc 32M 32k +read -P0xcd 0x3ff0000 64k + +Done +=== Test fleecing-format based fleecing === + +--- Setting up images --- + +Done + +--- Launching VM --- + +Done + +--- Setting up Fleecing Graph --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Setting up NBD Export --- + +{"return": {}} +{"return": {}} + +--- Sanity Check --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Testing COW --- + +write -P0xab 0 64k +{"return": ""} +write -P0xad 0x00f8000 64k +{"return": ""} +write -P0x1d 0x2008000 64k +{"return": ""} +write -P0xea 0x3fe0000 64k +{"return": ""} + +--- Verifying Data --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Cleanup --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Confirming writes --- + +read -P0xab 0 64k +read -P0xad 0x00f8000 64k +read -P0x1d 0x2008000 64k +read -P0xea 0x3fe0000 64k +read -P0xd5 0x108000 32k +read -P0xdc 32M 32k +read -P0xcd 0x3ff0000 64k + +Done +=== Test fleecing-format based fleecing with bitmap === + +--- Setting up images --- + +Done + +--- Launching VM --- + +Done + +--- Setting up Fleecing Graph --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Setting up NBD Export --- + +{"return": {}} +{"return": {}} + +--- Sanity Check --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read failed: Invalid argument + +read -P0 0x2010000 32k +read failed: Invalid argument + +read -P0 0x3fe0000 64k +read failed: Invalid argument + + +--- Testing COW --- + +write -P0xab 0 64k +{"return": ""} +write -P0xad 0x00f8000 64k +{"return": ""} +write -P0x1d 0x2008000 64k +{"return": ""} +write -P0xea 0x3fe0000 64k +{"return": ""} + +--- Verifying Data --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read failed: Invalid argument + +read -P0 0x2010000 32k +read failed: Invalid argument + +read -P0 0x3fe0000 64k +read failed: Invalid argument + + +--- Cleanup --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Confirming writes --- + +read -P0xab 0 64k +read -P0xad 0x00f8000 64k +read -P0x1d 0x2008000 64k +read -P0xea 0x3fe0000 64k +read -P0xd5 0x108000 32k +read -P0xdc 32M 32k +read -P0xcd 0x3ff0000 64k + +Done +=== Test push backup with fleecing === + +--- Setting up images --- + +Done + +--- Launching VM --- + +Done + +--- Setting up Fleecing Graph --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Starting actual backup --- + +{"return": {}} +{"return": {}} + +--- Testing COW --- + +write -P0xab 0 64k +{"return": ""} +write -P0xad 0x00f8000 64k +{"return": ""} +write -P0x1d 0x2008000 64k +{"return": ""} +write -P0xea 0x3fe0000 64k +{"return": ""} +{"data": {"device": "push-backup", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"return": {}} + +--- Verifying Data --- + +read -P0x5d 0 64k +read -P0xd5 1M 64k +read -P0xdc 32M 64k +read -P0xcd 0x3ff0000 64k +read -P0 0x00f8000 32k +read -P0 0x2010000 32k +read -P0 0x3fe0000 64k + +--- Cleanup --- + +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} + +--- Confirming writes --- + +read -P0xab 0 64k +read -P0xad 0x00f8000 64k +read -P0x1d 0x2008000 64k +read -P0xea 0x3fe0000 64k +read -P0xd5 0x108000 32k +read -P0xdc 32M 32k +read -P0xcd 0x3ff0000 64k + +Done diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test index 584062b412..fc9c4b4ef4 100755 --- a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test +++ b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test @@ -132,10 +132,10 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase): result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256', node='drive0', name='bitmap0') - self.discards1_sha256 = result['return']['sha256'] + discards1_sha256 = result['return']['sha256'] # Check, that updating the bitmap by discards works - assert self.discards1_sha256 != empty_sha256 + assert discards1_sha256 != empty_sha256 # We want to calculate resulting sha256. Do it in bitmap0, so, disable # other bitmaps @@ -148,7 +148,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase): result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256', node='drive0', name='bitmap0') - self.all_discards_sha256 = result['return']['sha256'] + all_discards_sha256 = result['return']['sha256'] # Now, enable some bitmaps, to be updated during migration for i in range(2, nb_bitmaps, 2): @@ -173,10 +173,11 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase): event_resume = self.vm_b.event_wait('RESUME') self.vm_b_events.append(event_resume) - return event_resume + return (event_resume, discards1_sha256, all_discards_sha256) def test_postcopy_success(self): - event_resume = self.start_postcopy() + event_resume, discards1_sha256, all_discards_sha256 = \ + self.start_postcopy() # enabled bitmaps should be updated apply_discards(self.vm_b, discards2) @@ -217,7 +218,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase): for i in range(0, nb_bitmaps, 5): result = self.vm_b.qmp('x-debug-block-dirty-bitmap-sha256', node='drive0', name='bitmap{}'.format(i)) - sha = self.discards1_sha256 if i % 2 else self.all_discards_sha256 + sha = discards1_sha256 if i % 2 else all_discards_sha256 self.assert_qmp(result, 'return/sha256', sha) def test_early_shutdown_destination(self): @@ -271,4 +272,5 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase): if __name__ == '__main__': - iotests.main(supported_fmts=['qcow2']) + iotests.main(supported_fmts=['qcow2'], + unsupported_imgopts=['compat']) diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test index a5c7bc83e0..59f3357580 100755 --- a/tests/qemu-iotests/tests/migrate-bitmaps-test +++ b/tests/qemu-iotests/tests/migrate-bitmaps-test @@ -19,12 +19,12 @@ # along with this program. If not, see . # -import os -import iotests -import time import itertools import operator +import os import re + +import iotests from iotests import qemu_img, qemu_img_create, Timeout @@ -37,6 +37,12 @@ mig_cmd = 'exec: cat > ' + mig_file incoming_cmd = 'exec: cat ' + mig_file +def get_bitmap_hash(vm): + result = vm.qmp('x-debug-block-dirty-bitmap-sha256', + node='drive0', name='bitmap0') + return result['return']['sha256'] + + class TestDirtyBitmapMigration(iotests.QMPTestCase): def tearDown(self): self.vm_a.shutdown() @@ -62,21 +68,16 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): params['persistent'] = True result = vm.qmp('block-dirty-bitmap-add', **params) - self.assert_qmp(result, 'return', {}); - - def get_bitmap_hash(self, vm): - result = vm.qmp('x-debug-block-dirty-bitmap-sha256', - node='drive0', name='bitmap0') - return result['return']['sha256'] + self.assert_qmp(result, 'return', {}) def check_bitmap(self, vm, sha256): result = vm.qmp('x-debug-block-dirty-bitmap-sha256', node='drive0', name='bitmap0') if sha256: - self.assert_qmp(result, 'return/sha256', sha256); + self.assert_qmp(result, 'return/sha256', sha256) else: self.assert_qmp(result, 'error/desc', - "Dirty bitmap 'bitmap0' not found"); + "Dirty bitmap 'bitmap0' not found") def do_test_migration_resume_source(self, persistent, migrate_bitmaps): granularity = 512 @@ -97,7 +98,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): self.add_bitmap(self.vm_a, granularity, persistent) for r in regions: self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r) - sha256 = self.get_bitmap_hash(self.vm_a) + sha256 = get_bitmap_hash(self.vm_a) result = self.vm_a.qmp('migrate', uri=mig_cmd) while True: @@ -106,7 +107,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): break while True: result = self.vm_a.qmp('query-status') - if (result['return']['status'] == 'postmigrate'): + if result['return']['status'] == 'postmigrate': break # test that bitmap is still here @@ -164,7 +165,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): self.add_bitmap(self.vm_a, granularity, persistent) for r in regions: self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r) - sha256 = self.get_bitmap_hash(self.vm_a) + sha256 = get_bitmap_hash(self.vm_a) if pre_shutdown: self.vm_a.shutdown() @@ -214,27 +215,14 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): self.check_bitmap(self.vm_b, sha256 if persistent else False) -def inject_test_case(klass, name, method, *args, **kwargs): +def inject_test_case(klass, suffix, method, *args, **kwargs): mc = operator.methodcaller(method, *args, **kwargs) - setattr(klass, 'test_' + method + name, lambda self: mc(self)) - -for cmb in list(itertools.product((True, False), repeat=5)): - name = ('_' if cmb[0] else '_not_') + 'persistent_' - name += ('_' if cmb[1] else '_not_') + 'migbitmap_' - name += '_online' if cmb[2] else '_offline' - name += '_shared' if cmb[3] else '_nonshared' - if (cmb[4]): - name += '__pre_shutdown' - - inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration', - *list(cmb)) - -for cmb in list(itertools.product((True, False), repeat=2)): - name = ('_' if cmb[0] else '_not_') + 'persistent_' - name += ('_' if cmb[1] else '_not_') + 'migbitmap' - - inject_test_case(TestDirtyBitmapMigration, name, - 'do_test_migration_resume_source', *list(cmb)) + # We want to add a function attribute to `klass`, so that it is + # correctly converted to a method on instantiation. The + # methodcaller object `mc` is a callable, not a function, so we + # need the lambda to turn it into a function. + # pylint: disable=unnecessary-lambda + setattr(klass, 'test_' + method + suffix, lambda self: mc(self)) class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): @@ -270,7 +258,8 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): self.assert_qmp(result, 'return', {}) # Check that the bitmaps are there - for node in self.vm.qmp('query-named-block-nodes', flat=True)['return']: + nodes = self.vm.qmp('query-named-block-nodes', flat=True)['return'] + for node in nodes: if 'node0' in node['node-name']: self.assert_qmp(node, 'dirty-bitmaps[0]/name', 'bmap0') @@ -287,7 +276,7 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): """ Continue the source after migration. """ - result = self.vm.qmp('migrate', uri=f'exec: cat > /dev/null') + result = self.vm.qmp('migrate', uri='exec: cat > /dev/null') self.assert_qmp(result, 'return', {}) with Timeout(10, 'Migration timeout'): @@ -297,6 +286,31 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): self.assert_qmp(result, 'return', {}) +def main() -> None: + for cmb in list(itertools.product((True, False), repeat=5)): + name = ('_' if cmb[0] else '_not_') + 'persistent_' + name += ('_' if cmb[1] else '_not_') + 'migbitmap_' + name += '_online' if cmb[2] else '_offline' + name += '_shared' if cmb[3] else '_nonshared' + if cmb[4]: + name += '__pre_shutdown' + + inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration', + *list(cmb)) + + for cmb in list(itertools.product((True, False), repeat=2)): + name = ('_' if cmb[0] else '_not_') + 'persistent_' + name += ('_' if cmb[1] else '_not_') + 'migbitmap' + + inject_test_case(TestDirtyBitmapMigration, name, + 'do_test_migration_resume_source', *list(cmb)) + + iotests.main( + supported_fmts=['qcow2'], + supported_protocols=['file'], + unsupported_imgopts=['compat'] + ) + + if __name__ == '__main__': - iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + main() diff --git a/tests/qemu-iotests/tests/migrate-during-backup b/tests/qemu-iotests/tests/migrate-during-backup new file mode 100755 index 0000000000..34103229ee --- /dev/null +++ b/tests/qemu-iotests/tests/migrate-during-backup @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# group: migration +# +# Copyright (c) 2021 Virtuozzo International GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import qemu_img_create, qemu_io + + +disk_a = os.path.join(iotests.test_dir, 'disk_a') +disk_b = os.path.join(iotests.test_dir, 'disk_b') +size = '1M' +mig_file = os.path.join(iotests.test_dir, 'mig_file') +mig_cmd = 'exec: cat > ' + mig_file + + +class TestMigrateDuringBackup(iotests.QMPTestCase): + def tearDown(self): + self.vm.shutdown() + os.remove(disk_a) + os.remove(disk_b) + os.remove(mig_file) + + def setUp(self): + qemu_img_create('-f', iotests.imgfmt, disk_a, size) + qemu_img_create('-f', iotests.imgfmt, disk_b, size) + qemu_io('-c', f'write 0 {size}', disk_a) + + self.vm = iotests.VM().add_drive(disk_a) + self.vm.launch() + result = self.vm.qmp('blockdev-add', { + 'node-name': 'target', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': disk_b + } + }) + self.assert_qmp(result, 'return', {}) + + def test_migrate(self): + result = self.vm.qmp('blockdev-backup', device='drive0', + target='target', sync='full', + speed=1, x_perf={ + 'max-workers': 1, + 'max-chunk': 64 * 1024 + }) + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('job-pause', id='drive0') + self.assert_qmp(result, 'return', {}) + + result = self.vm.qmp('migrate-set-capabilities', + capabilities=[{'capability': 'events', + 'state': True}]) + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp('migrate', uri=mig_cmd) + self.assert_qmp(result, 'return', {}) + + e = self.vm.events_wait((('MIGRATION', + {'data': {'status': 'completed'}}), + ('MIGRATION', + {'data': {'status': 'failed'}}))) + + # Don't assert that e is 'failed' now: this way we'll miss + # possible crash when backup continues :) + + result = self.vm.qmp('block-job-set-speed', device='drive0', + speed=0) + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp('job-resume', id='drive0') + self.assert_qmp(result, 'return', {}) + + # For future: if something changes so that both migration + # and backup pass, let's not miss that moment, as it may + # be a bug as well as improvement. + self.assert_qmp(e, 'data/status', 'failed') + + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/migrate-during-backup.out b/tests/qemu-iotests/tests/migrate-during-backup.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/migrate-during-backup.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/migration-permissions b/tests/qemu-iotests/tests/migration-permissions new file mode 100755 index 0000000000..4e1da369c9 --- /dev/null +++ b/tests/qemu-iotests/tests/migration-permissions @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# group: migration +# +# Copyright (C) 2021 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +from subprocess import CalledProcessError + +import iotests +from iotests import imgfmt, qemu_img_create, qemu_io + + +test_img = os.path.join(iotests.test_dir, 'test.img') +mig_sock = os.path.join(iotests.sock_dir, 'mig.sock') + + +class TestMigrationPermissions(iotests.QMPTestCase): + def setUp(self): + qemu_img_create('-f', imgfmt, test_img, '1M') + + # Set up two VMs (source and destination) accessing the same raw + # image file with a virtio-blk device; prepare the destination for + # migration with .add_incoming() and enable migration events + vms = [None, None] + for i in range(2): + vms[i] = iotests.VM(path_suffix=f'{i}') + vms[i].add_blockdev(f'file,node-name=prot,filename={test_img}') + vms[i].add_blockdev(f'{imgfmt},node-name=fmt,file=prot') + vms[i].add_device('virtio-blk,drive=fmt') + + if i == 1: + vms[i].add_incoming(f'unix:{mig_sock}') + + vms[i].launch() + + result = vms[i].qmp('migrate-set-capabilities', + capabilities=[ + {'capability': 'events', 'state': True} + ]) + self.assert_qmp(result, 'return', {}) + + self.vm_s = vms[0] + self.vm_d = vms[1] + + def tearDown(self): + self.vm_s.shutdown() + self.vm_d.shutdown() + try: + os.remove(mig_sock) + except FileNotFoundError: + pass + os.remove(test_img) + + # Migrate an image in use by a virtio-blk device to another VM and + # verify that the WRITE permission is unshared both before and after + # migration + def test_post_migration_permissions(self): + # Try to access the image R/W, which should fail because virtio-blk + # has not been configured with share-rw=on + emsg = ('ERROR (pre-migration): qemu-io should not be able to ' + 'access this image, but it reported no error') + with self.assertRaises(CalledProcessError, msg=emsg) as ctx: + qemu_io('-f', imgfmt, '-c', 'quit', test_img) + if 'Is another process using the image' not in ctx.exception.stdout: + raise ctx.exception + + # Now migrate the VM + self.vm_s.qmp('migrate', uri=f'unix:{mig_sock}') + assert self.vm_s.wait_migration(None) + assert self.vm_d.wait_migration(None) + + # Try the same qemu-io access again, verifying that the WRITE + # permission remains unshared + emsg = ('ERROR (post-migration): qemu-io should not be able to ' + 'access this image, but it reported no error') + with self.assertRaises(CalledProcessError, msg=emsg) as ctx: + qemu_io('-f', imgfmt, '-c', 'quit', test_img) + if 'Is another process using the image' not in ctx.exception.stdout: + raise ctx.exception + + +if __name__ == '__main__': + # Only works with raw images because we are testing the + # BlockBackend permissions; image format drivers may additionally + # unshare permissions and thus tamper with the result + iotests.main(supported_fmts=['raw'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/migration-permissions.out b/tests/qemu-iotests/tests/migration-permissions.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/migration-permissions.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/mirror-ready-cancel-error b/tests/qemu-iotests/tests/mirror-ready-cancel-error new file mode 100755 index 0000000000..01217459b9 --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-ready-cancel-error @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test what happens when errors occur to a mirror job after it has +# been cancelled in the READY phase +# +# Copyright (C) 2021 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests + + +image_size = 1 * 1024 * 1024 +source = os.path.join(iotests.test_dir, 'source.img') +target = os.path.join(iotests.test_dir, 'target.img') + + +class TestMirrorReadyCancelError(iotests.QMPTestCase): + def setUp(self) -> None: + iotests.qemu_img_create('-f', iotests.imgfmt, source, str(image_size)) + iotests.qemu_img_create('-f', iotests.imgfmt, target, str(image_size)) + + # Ensure that mirror will copy something before READY so the + # target format layer will forward the pre-READY flush to its + # file child + iotests.qemu_io('-c', 'write -P 1 0 64k', source) + + self.vm = iotests.VM() + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(source) + os.remove(target) + + def add_blockdevs(self, once: bool) -> None: + res = self.vm.qmp('blockdev-add', + **{'node-name': 'source', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source + }}) + self.assert_qmp(res, 'return', {}) + + # blkdebug notes: + # Enter state 2 on the first flush, which happens before the + # job enters the READY state. The second flush will happen + # when the job is about to complete, and we want that one to + # fail. + res = self.vm.qmp('blockdev-add', + **{'node-name': 'target', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': target + }, + 'set-state': [{ + 'event': 'flush_to_disk', + 'state': 1, + 'new_state': 2 + }], + 'inject-error': [{ + 'event': 'flush_to_disk', + 'once': once, + 'immediately': True, + 'state': 2 + }]}}) + self.assert_qmp(res, 'return', {}) + + def start_mirror(self) -> None: + res = self.vm.qmp('blockdev-mirror', + job_id='mirror', + device='source', + target='target', + filter_node_name='mirror-top', + sync='full', + on_target_error='stop') + self.assert_qmp(res, 'return', {}) + + def cancel_mirror_with_error(self) -> None: + self.vm.event_wait('BLOCK_JOB_READY') + + # Write something so will not leave the job immediately, but + # flush first (which will fail, thanks to blkdebug) + res = self.vm.qmp('human-monitor-command', + command_line='qemu-io mirror-top "write -P 2 0 64k"') + self.assert_qmp(res, 'return', '') + + # Drain status change events + while self.vm.event_wait('JOB_STATUS_CHANGE', timeout=0.0) is not None: + pass + + res = self.vm.qmp('block-job-cancel', device='mirror') + self.assert_qmp(res, 'return', {}) + + self.vm.event_wait('BLOCK_JOB_ERROR') + + def test_transient_error(self) -> None: + self.add_blockdevs(True) + self.start_mirror() + self.cancel_mirror_with_error() + + while True: + e = self.vm.event_wait('JOB_STATUS_CHANGE') + if e['data']['status'] == 'standby': + # Transient error, try again + self.vm.qmp('block-job-resume', device='mirror') + elif e['data']['status'] == 'null': + break + + def test_persistent_error(self) -> None: + self.add_blockdevs(False) + self.start_mirror() + self.cancel_mirror_with_error() + + while True: + e = self.vm.event_wait('JOB_STATUS_CHANGE') + if e['data']['status'] == 'standby': + # Persistent error, no point in continuing + self.vm.qmp('block-job-cancel', device='mirror', force=True) + elif e['data']['status'] == 'null': + break + + +if __name__ == '__main__': + # LUKS would require special key-secret handling in add_blockdevs() + iotests.main(supported_fmts=['generic'], + unsupported_fmts=['luks'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/mirror-ready-cancel-error.out b/tests/qemu-iotests/tests/mirror-ready-cancel-error.out new file mode 100644 index 0000000000..fbc63e62f8 --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-ready-cancel-error.out @@ -0,0 +1,5 @@ +.. +---------------------------------------------------------------------- +Ran 2 tests + +OK diff --git a/tests/qemu-iotests/tests/mirror-top-perms b/tests/qemu-iotests/tests/mirror-top-perms index 451a0666f8..8bca592708 100755 --- a/tests/qemu-iotests/tests/mirror-top-perms +++ b/tests/qemu-iotests/tests/mirror-top-perms @@ -20,12 +20,11 @@ # import os -import iotests -from iotests import qemu_img -# Import qemu after iotests.py has amended sys.path -# pylint: disable=wrong-import-order -import qemu +from qemu.machine import machine + +import iotests +from iotests import change_log_level, qemu_img image_size = 1 * 1024 * 1024 @@ -34,8 +33,7 @@ source = os.path.join(iotests.test_dir, 'source.img') class TestMirrorTopPerms(iotests.QMPTestCase): def setUp(self): - assert qemu_img('create', '-f', iotests.imgfmt, source, - str(image_size)) == 0 + qemu_img('create', '-f', iotests.imgfmt, source, str(image_size)) self.vm = iotests.VM() self.vm.add_drive(source) self.vm.add_blockdev(f'null-co,node-name=null,size={image_size}') @@ -47,7 +45,7 @@ class TestMirrorTopPerms(iotests.QMPTestCase): def tearDown(self): try: self.vm.shutdown() - except qemu.machine.AbnormalShutdown: + except machine.AbnormalShutdown: pass if self.vm_b is not None: @@ -99,11 +97,13 @@ class TestMirrorTopPerms(iotests.QMPTestCase): self.vm_b.add_blockdev(f'file,node-name=drive0,filename={source}') self.vm_b.add_device('virtio-blk,drive=drive0,share-rw=on') try: - self.vm_b.launch() - print('ERROR: VM B launched successfully, this should not have ' - 'happened') - except qemu.qmp.QMPConnectError: - assert 'Is another process using the image' in self.vm_b.get_log() + # Silence QMP logging errors temporarily. + with change_log_level('qemu.qmp'): + self.vm_b.launch() + print('ERROR: VM B launched successfully, ' + 'this should not have happened') + except machine.VMLaunchFailure as exc: + assert 'Is another process using the image' in exc.output result = self.vm.qmp('block-job-cancel', device='mirror') diff --git a/tests/qemu-iotests/tests/nbd-multiconn b/tests/qemu-iotests/tests/nbd-multiconn new file mode 100755 index 0000000000..b121f2e363 --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-multiconn @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +# group: rw auto quick +# +# Test cases for NBD multi-conn advertisement +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from contextlib import contextmanager +import iotests +from iotests import qemu_img_create, qemu_io + + +disk = os.path.join(iotests.test_dir, 'disk') +size = '4M' +nbd_sock = os.path.join(iotests.sock_dir, 'nbd_sock') +nbd_uri = 'nbd+unix:///{}?socket=' + nbd_sock + + +@contextmanager +def open_nbd(export_name): + h = nbd.NBD() + try: + h.connect_uri(nbd_uri.format(export_name)) + yield h + finally: + h.shutdown() + +class TestNbdMulticonn(iotests.QMPTestCase): + def setUp(self): + qemu_img_create('-f', iotests.imgfmt, disk, size) + qemu_io('-c', 'w -P 1 0 2M', '-c', 'w -P 2 2M 2M', disk) + + self.vm = iotests.VM() + self.vm.launch() + result = self.vm.qmp('blockdev-add', { + 'driver': 'qcow2', + 'node-name': 'n', + 'file': {'driver': 'file', 'filename': disk} + }) + self.assert_qmp(result, 'return', {}) + + def tearDown(self): + self.vm.shutdown() + os.remove(disk) + try: + os.remove(nbd_sock) + except OSError: + pass + + @contextmanager + def run_server(self, max_connections=None): + args = { + 'addr': { + 'type': 'unix', + 'data': {'path': nbd_sock} + } + } + if max_connections is not None: + args['max-connections'] = max_connections + + result = self.vm.qmp('nbd-server-start', args) + self.assert_qmp(result, 'return', {}) + yield + + result = self.vm.qmp('nbd-server-stop') + self.assert_qmp(result, 'return', {}) + + def add_export(self, name, writable=None): + args = { + 'type': 'nbd', + 'id': name, + 'node-name': 'n', + 'name': name, + } + if writable is not None: + args['writable'] = writable + + result = self.vm.qmp('block-export-add', args) + self.assert_qmp(result, 'return', {}) + + def test_default_settings(self): + with self.run_server(): + self.add_export('r') + self.add_export('w', writable=True) + with open_nbd('r') as h: + self.assertTrue(h.can_multi_conn()) + with open_nbd('w') as h: + self.assertTrue(h.can_multi_conn()) + + def test_limited_connections(self): + with self.run_server(max_connections=1): + self.add_export('r') + self.add_export('w', writable=True) + with open_nbd('r') as h: + self.assertFalse(h.can_multi_conn()) + with open_nbd('w') as h: + self.assertFalse(h.can_multi_conn()) + + def test_parallel_writes(self): + with self.run_server(): + self.add_export('w', writable=True) + + clients = [nbd.NBD() for _ in range(3)] + for c in clients: + c.connect_uri(nbd_uri.format('w')) + self.assertTrue(c.can_multi_conn()) + + initial_data = clients[0].pread(1024 * 1024, 0) + self.assertEqual(initial_data, b'\x01' * 1024 * 1024) + + updated_data = b'\x03' * 1024 * 1024 + clients[1].pwrite(updated_data, 0) + clients[2].flush() + current_data = clients[0].pread(1024 * 1024, 0) + + self.assertEqual(updated_data, current_data) + + for i in range(3): + clients[i].shutdown() + + +if __name__ == '__main__': + try: + # Easier to use libnbd than to try and set up parallel + # 'qemu-nbd --list' or 'qemu-io' processes, but not all systems + # have libnbd installed. + import nbd # type: ignore + + iotests.main(supported_fmts=['qcow2']) + except ImportError: + iotests.notrun('libnbd not installed') diff --git a/tests/qemu-iotests/tests/nbd-multiconn.out b/tests/qemu-iotests/tests/nbd-multiconn.out new file mode 100644 index 0000000000..8d7e996700 --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-multiconn.out @@ -0,0 +1,5 @@ +... +---------------------------------------------------------------------- +Ran 3 tests + +OK diff --git a/tests/qemu-iotests/tests/nbd-qemu-allocation.out b/tests/qemu-iotests/tests/nbd-qemu-allocation.out index 0bf1abb063..9d938db24e 100644 --- a/tests/qemu-iotests/tests/nbd-qemu-allocation.out +++ b/tests/qemu-iotests/tests/nbd-qemu-allocation.out @@ -17,7 +17,7 @@ wrote 2097152/2097152 bytes at offset 1048576 exports available: 1 export: '' size: 4194304 - flags: 0x58f ( readonly flush fua df multi cache ) + flags: 0x48f ( readonly flush fua df cache ) min block: 1 opt block: 4096 max block: 33554432 diff --git a/tests/qemu-iotests/tests/nbd-reconnect-on-open b/tests/qemu-iotests/tests/nbd-reconnect-on-open new file mode 100755 index 0000000000..d0b401b060 --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-reconnect-on-open @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# Test nbd reconnect on open +# +# Copyright (c) 2020 Virtuozzo International GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import time + +import iotests +from iotests import qemu_img_create, file_path, qemu_io_popen, qemu_nbd, \ + qemu_io_log, log + +iotests.script_initialize(supported_fmts=['qcow2']) + +disk, nbd_sock = file_path('disk', 'nbd-sock') + + +def create_args(open_timeout): + return ['--image-opts', '-c', 'read 0 1M', + f'driver=nbd,open-timeout={open_timeout},' + f'server.type=unix,server.path={nbd_sock}'] + + +def check_fail_to_connect(open_timeout): + log(f'Check fail to connect with {open_timeout} seconds of timeout') + + start_t = time.time() + qemu_io_log(*create_args(open_timeout), check=False) + delta_t = time.time() - start_t + + max_delta = open_timeout + 0.2 + if open_timeout <= delta_t <= max_delta: + log(f'qemu_io finished in {open_timeout}..{max_delta} seconds, OK') + else: + note = 'too early' if delta_t < open_timeout else 'too long' + log(f'qemu_io finished in {delta_t:.1f} seconds, {note}') + + +qemu_img_create('-f', iotests.imgfmt, disk, '1M') + +# Start NBD client when NBD server is not yet running. It should not fail, but +# wait for 5 seconds for the server to be available. +client = qemu_io_popen(*create_args(5)) + +time.sleep(1) +qemu_nbd('-k', nbd_sock, '-f', iotests.imgfmt, disk) + +# client should succeed +log(client.communicate()[0], filters=[iotests.filter_qemu_io]) + +# Server was started without --persistent flag, so it should be off now. Let's +# check it and at the same time check that with open-timeout=0 client fails +# immediately. +check_fail_to_connect(0) + +# Check that we will fail after non-zero timeout if server is still unavailable +check_fail_to_connect(1) diff --git a/tests/qemu-iotests/tests/nbd-reconnect-on-open.out b/tests/qemu-iotests/tests/nbd-reconnect-on-open.out new file mode 100644 index 0000000000..a35ae30ea4 --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-reconnect-on-open.out @@ -0,0 +1,11 @@ +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +Check fail to connect with 0 seconds of timeout +qemu-io: can't open: Failed to connect to 'TEST_DIR/PID-nbd-sock': No such file or directory + +qemu_io finished in 0..0.2 seconds, OK +Check fail to connect with 1 seconds of timeout +qemu-io: can't open: Failed to connect to 'TEST_DIR/PID-nbd-sock': No such file or directory + +qemu_io finished in 1..1.2 seconds, OK diff --git a/tests/qemu-iotests/tests/parallels-read-bitmap b/tests/qemu-iotests/tests/parallels-read-bitmap index af6b9c5db3..38ab5fa5b2 100755 --- a/tests/qemu-iotests/tests/parallels-read-bitmap +++ b/tests/qemu-iotests/tests/parallels-read-bitmap @@ -18,9 +18,8 @@ # along with this program. If not, see . # -import json import iotests -from iotests import qemu_nbd_popen, qemu_img_pipe, log, file_path +from iotests import qemu_nbd_popen, qemu_img_map, log, file_path iotests.script_initialize(supported_fmts=['parallels']) @@ -36,8 +35,7 @@ iotests.unarchive_sample_image('parallels-with-bitmap', disk) with qemu_nbd_popen('--read-only', f'--socket={nbd_sock}', f'--bitmap={bitmap}', '-f', iotests.imgfmt, disk): - out = qemu_img_pipe('map', '--output=json', '--image-opts', nbd_opts) - chunks = json.loads(out) + chunks = qemu_img_map('--image-opts', nbd_opts) cluster = 64 * 1024 log('dirty clusters (cluster size is 64K):') diff --git a/tests/qemu-iotests/tests/qsd-jobs.out b/tests/qemu-iotests/tests/qsd-jobs.out index 189423354b..c1bc9b8356 100644 --- a/tests/qemu-iotests/tests/qsd-jobs.out +++ b/tests/qemu-iotests/tests/qsd-jobs.out @@ -8,7 +8,7 @@ QMP_VERSION {"return": {}} {"return": {}} {"return": {}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} === Streaming can't get permission on base node === diff --git a/tests/qemu-iotests/tests/remove-bitmap-from-backing b/tests/qemu-iotests/tests/remove-bitmap-from-backing index 8d48fc0f3c..15be32dcb9 100755 --- a/tests/qemu-iotests/tests/remove-bitmap-from-backing +++ b/tests/qemu-iotests/tests/remove-bitmap-from-backing @@ -19,20 +19,21 @@ # import iotests -from iotests import log, qemu_img_create, qemu_img, qemu_img_pipe +from iotests import log, qemu_img_create, qemu_img, qemu_img_info -iotests.script_initialize(supported_fmts=['qcow2']) +iotests.script_initialize(supported_fmts=['qcow2'], + unsupported_imgopts=['compat']) top, base = iotests.file_path('top', 'base') size = '1M' -assert qemu_img_create('-f', iotests.imgfmt, base, size) == 0 -assert qemu_img_create('-f', iotests.imgfmt, '-b', base, - '-F', iotests.imgfmt, top, size) == 0 +qemu_img_create('-f', iotests.imgfmt, base, size) +qemu_img_create('-f', iotests.imgfmt, '-b', base, + '-F', iotests.imgfmt, top, size) -assert qemu_img('bitmap', '--add', base, 'bitmap0') == 0 +qemu_img('bitmap', '--add', base, 'bitmap0') # Just assert that our method of checking bitmaps in the image works. -assert 'bitmaps' in qemu_img_pipe('info', base) +assert 'bitmaps' in qemu_img_info(base)['format-specific']['data'] vm = iotests.VM().add_drive(top, 'backing.node-name=base') vm.launch() @@ -67,5 +68,5 @@ if result != {'return': {}}: vm.shutdown() -if 'bitmaps' in qemu_img_pipe('info', base): +if 'bitmaps' in qemu_img_info(base)['format-specific']['data']: log('ERROR: Bitmap is still in the base image') diff --git a/tests/qemu-iotests/tests/reopen-file b/tests/qemu-iotests/tests/reopen-file new file mode 100755 index 0000000000..8590a94d53 --- /dev/null +++ b/tests/qemu-iotests/tests/reopen-file @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test reopening a format driver's file child +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import imgfmt, qemu_img_create, QMPTestCase + + +image_size = 1 * 1024 * 1024 +test_img = os.path.join(iotests.test_dir, 'test.img') + + +class TestReopenFile(QMPTestCase): + def setUp(self) -> None: + res = qemu_img_create('-f', imgfmt, test_img, str(image_size)) + assert res.returncode == 0 + + # Add format driver node ('format') on top of the file ('file'), then + # add another raw node ('raw') on top of 'file' so for the reopen we + # can just switch from 'file' to 'raw' + self.vm = iotests.VM() + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': imgfmt, + 'node-name': 'format', + 'file': { + 'driver': 'file', + 'node-name': 'file', + 'filename': test_img + } + })) + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': 'raw', + 'node-name': 'raw', + 'file': 'file' + })) + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(test_img) + + # Check if there was any qemu-io run that failed + if 'Pattern verification failed' in self.vm.get_log(): + print('ERROR: Pattern verification failed:') + print(self.vm.get_log()) + self.fail('qemu-io pattern verification failed') + + def test_reopen_file(self) -> None: + result = self.vm.qmp('blockdev-reopen', options=[{ + 'driver': imgfmt, + 'node-name': 'format', + 'file': 'raw' + }]) + self.assert_qmp(result, 'return', {}) + + # Do some I/O to the image to see whether it still works + # (Pattern verification will be checked by tearDown()) + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io format "write -P 42 0 64k"') + self.assert_qmp(result, 'return', '') + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io format "read -P 42 0 64k"') + self.assert_qmp(result, 'return', '') + + +if __name__ == '__main__': + # Must support creating images and reopen + iotests.main(supported_fmts=['qcow', 'qcow2', 'qed', 'raw', 'vdi', 'vhdx', + 'vmdk', 'vpc'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/reopen-file.out b/tests/qemu-iotests/tests/reopen-file.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/reopen-file.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/stream-error-on-reset b/tests/qemu-iotests/tests/stream-error-on-reset new file mode 100755 index 0000000000..5a8c3a9e8d --- /dev/null +++ b/tests/qemu-iotests/tests/stream-error-on-reset @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test what happens when a stream job completes in a blk_drain(). +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import imgfmt, qemu_img_create, qemu_io, QMPTestCase + + +image_size = 1 * 1024 * 1024 +data_size = 64 * 1024 +base = os.path.join(iotests.test_dir, 'base.img') +top = os.path.join(iotests.test_dir, 'top.img') + + +# We want to test completing a stream job in a blk_drain(). +# +# The blk_drain() we are going to use is a virtio-scsi device resetting, +# which we can trigger by resetting the system. +# +# In order to have the block job complete on drain, we (1) throttle its +# base image so we can start the drain after it has begun, but before it +# completes, and (2) make it encounter an I/O error on the ensuing write. +# (If it completes regularly, the completion happens after the drain for +# some reason.) + +class TestStreamErrorOnReset(QMPTestCase): + def setUp(self) -> None: + """ + Create two images: + - base image {base} with {data_size} bytes allocated + - top image {top} without any data allocated + + And the following VM configuration: + - base image throttled to {data_size} + - top image with a blkdebug configuration so the first write access + to it will result in an error + - top image is attached to a virtio-scsi device + """ + qemu_img_create('-f', imgfmt, base, str(image_size)) + qemu_io('-c', f'write 0 {data_size}', base) + qemu_img_create('-f', imgfmt, top, str(image_size)) + + self.vm = iotests.VM() + self.vm.add_args('-accel', 'tcg') # Make throttling work properly + self.vm.add_object(self.vm.qmp_to_opts({ + 'qom-type': 'throttle-group', + 'id': 'thrgr', + 'x-bps-total': str(data_size) + })) + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': imgfmt, + 'node-name': 'base', + 'file': { + 'driver': 'throttle', + 'throttle-group': 'thrgr', + 'file': { + 'driver': 'file', + 'filename': base + } + } + })) + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': imgfmt, + 'node-name': 'top', + 'file': { + 'driver': 'blkdebug', + 'node-name': 'top-blkdebug', + 'inject-error': [{ + 'event': 'pwritev', + 'immediately': 'true', + 'once': 'true' + }], + 'image': { + 'driver': 'file', + 'filename': top + } + }, + 'backing': 'base' + })) + self.vm.add_device(self.vm.qmp_to_opts({ + 'driver': 'virtio-scsi', + 'id': 'vscsi' + })) + self.vm.add_device(self.vm.qmp_to_opts({ + 'driver': 'scsi-hd', + 'bus': 'vscsi.0', + 'drive': 'top' + })) + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(top) + os.remove(base) + + def test_stream_error_on_reset(self) -> None: + # Launch a stream job, which will take at least a second to + # complete, because the base image is throttled (so we can + # get in between it having started and it having completed) + res = self.vm.qmp('block-stream', job_id='stream', device='top') + self.assert_qmp(res, 'return', {}) + + while True: + ev = self.vm.event_wait('JOB_STATUS_CHANGE') + if ev['data']['status'] == 'running': + # Once the stream job is running, reset the system, which + # forces the virtio-scsi device to be reset, thus draining + # the stream job, and making it complete. Completing + # inside of that drain should not result in a segfault. + res = self.vm.qmp('system_reset') + self.assert_qmp(res, 'return', {}) + elif ev['data']['status'] == 'null': + # The test is done once the job is gone + break + + +if __name__ == '__main__': + # Passes with any format with backing file support, but qed and + # qcow1 do not seem to exercise the used-to-be problematic code + # path, so there is no point in having them in this list + iotests.main(supported_fmts=['qcow2', 'vmdk'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/stream-error-on-reset.out b/tests/qemu-iotests/tests/stream-error-on-reset.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/stream-error-on-reset.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/stream-under-throttle b/tests/qemu-iotests/tests/stream-under-throttle new file mode 100755 index 0000000000..8d2d9e1684 --- /dev/null +++ b/tests/qemu-iotests/tests/stream-under-throttle @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# group: rw +# +# Test streaming with throttle nodes on top +# +# Copyright (C) 2022 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import asyncio +import os +from typing import List +import iotests +from iotests import qemu_img_create, qemu_io + + +image_size = 256 * 1024 * 1024 +base_img = os.path.join(iotests.test_dir, 'base.img') +top_img = os.path.join(iotests.test_dir, 'top.img') + + +class TcgVM(iotests.VM): + ''' + Variant of iotests.VM that uses -accel tcg. Simply using + iotests.VM.add_args('-accel', 'tcg') is not sufficient, because that will + put -accel qtest before -accel tcg, and -accel arguments are prioritized in + the order they appear. + ''' + @property + def _base_args(self) -> List[str]: + # Put -accel tcg first so it takes precedence + return ['-accel', 'tcg'] + super()._base_args + + +class TestStreamWithThrottle(iotests.QMPTestCase): + def setUp(self) -> None: + ''' + Create a simple backing chain between two images, write something to + the base image. Attach them to the VM underneath two throttle nodes, + one of which has actually no limits set, but the other does. Then put + a virtio-blk device on top. + This test configuration has been taken from + https://gitlab.com/qemu-project/qemu/-/issues/1215 + ''' + qemu_img_create('-f', iotests.imgfmt, base_img, str(image_size)) + qemu_img_create('-f', iotests.imgfmt, '-b', base_img, '-F', + iotests.imgfmt, top_img, str(image_size)) + + # Write something to stream + qemu_io(base_img, '-c', f'write 0 {image_size}') + + blockdev = { + 'driver': 'throttle', + 'node-name': 'throttled-node', + 'throttle-group': 'thrgr-limited', + 'file': { + 'driver': 'throttle', + 'throttle-group': 'thrgr-unlimited', + 'file': { + 'driver': iotests.imgfmt, + 'node-name': 'unthrottled-node', + 'file': { + 'driver': 'file', + 'filename': top_img + } + } + } + } + + # Issue 1215 is not reproducible in qtest mode, which is why we need to + # create an -accel tcg VM + self.vm = TcgVM() + self.vm.add_object('iothread,id=iothr0') + self.vm.add_object('throttle-group,id=thrgr-unlimited') + self.vm.add_object('throttle-group,id=thrgr-limited,' + 'x-iops-total=10000,x-bps-total=104857600') + self.vm.add_blockdev(self.vm.qmp_to_opts(blockdev)) + self.vm.add_device('virtio-blk,iothread=iothr0,drive=throttled-node') + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(top_img) + os.remove(base_img) + + def test_stream(self) -> None: + ''' + Do a simple stream beneath the two throttle nodes. Should complete + with no problems. + ''' + result = self.vm.qmp('block-stream', + job_id='stream', + device='unthrottled-node') + self.assert_qmp(result, 'return', {}) + + # Should succeed and not time out + try: + self.vm.run_job('stream') + except asyncio.TimeoutError: + # VM may be stuck, kill it before tearDown() + self.vm.kill() + raise + + +if __name__ == '__main__': + # Must support backing images + iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'], + supported_protocols=['file'], + required_fmts=['throttle']) diff --git a/tests/qemu-iotests/tests/stream-under-throttle.out b/tests/qemu-iotests/tests/stream-under-throttle.out new file mode 100644 index 0000000000..ae1213e6f8 --- /dev/null +++ b/tests/qemu-iotests/tests/stream-under-throttle.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qtest/ac97-test.c b/tests/qtest/ac97-test.c index e09f2495d2..74103efdfa 100644 --- a/tests/qtest/ac97-test.c +++ b/tests/qtest/ac97-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" @@ -28,7 +28,7 @@ static void *ac97_get_driver(void *obj, const char *interface) return &ac97->dev; } - fprintf(stderr, "%s not present in e1000e\n", interface); + fprintf(stderr, "%s not present in ac97\n", interface); g_assert_not_reached(); } diff --git a/tests/qtest/acpi-utils.c b/tests/qtest/acpi-utils.c index d2a202efca..673fc97586 100644 --- a/tests/qtest/acpi-utils.c +++ b/tests/qtest/acpi-utils.c @@ -14,7 +14,6 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "qemu/bitmap.h" #include "acpi-utils.h" #include "boot-sector.h" @@ -98,6 +97,20 @@ void acpi_fetch_table(QTestState *qts, uint8_t **aml, uint32_t *aml_len, ACPI_ASSERT_CMP(**aml, sig); } if (verify_checksum) { + if (acpi_calc_checksum(*aml, *aml_len)) { + gint fd, ret; + char *fname = NULL; + GError *error = NULL; + + fprintf(stderr, "Invalid '%.4s'(%d)\n", *aml, *aml_len); + fd = g_file_open_tmp("malformed-XXXXXX.dat", &fname, &error); + g_assert_no_error(error); + fprintf(stderr, "Dumping invalid table into '%s'\n", fname); + ret = qemu_write_full(fd, *aml, *aml_len); + g_assert(ret == *aml_len); + close(fd); + g_free(fname); + } g_assert(!acpi_calc_checksum(*aml, *aml_len)); } } diff --git a/tests/qtest/acpi-utils.h b/tests/qtest/acpi-utils.h index 261784d251..0c86780689 100644 --- a/tests/qtest/acpi-utils.h +++ b/tests/qtest/acpi-utils.h @@ -13,7 +13,7 @@ #ifndef TEST_ACPI_UTILS_H #define TEST_ACPI_UTILS_H -#include "libqos/libqtest.h" +#include "libqtest.h" /* DSDT and SSDTs format */ typedef struct { diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c index 8073ccc205..66652fed04 100644 --- a/tests/qtest/ahci-test.c +++ b/tests/qtest/ahci-test.c @@ -25,12 +25,11 @@ #include "qemu/osdep.h" #include -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/libqos-pc.h" #include "libqos/ahci.h" #include "libqos/pci-pc.h" -#include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qemu/host-utils.h" @@ -45,9 +44,9 @@ #define TEST_IMAGE_SIZE_MB_SMALL 64 /*** Globals ***/ -static char tmp_path[] = "/tmp/qtest.XXXXXX"; -static char debug_path[] = "/tmp/qtest-blkdebug.XXXXXX"; -static char mig_socket[] = "/tmp/qtest-migration.XXXXXX"; +static char *tmp_path; +static char *debug_path; +static char *mig_socket; static bool ahci_pedantic; static const char *imgfmt; static unsigned test_image_size_mb; @@ -1438,10 +1437,10 @@ static void test_ncq_simple(void) static int prepare_iso(size_t size, unsigned char **buf, char **name) { - char cdrom_path[] = "/tmp/qtest.iso.XXXXXX"; + g_autofree char *cdrom_path = NULL; unsigned char *patt; ssize_t ret; - int fd = mkstemp(cdrom_path); + int fd = g_file_open_tmp("qtest.iso.XXXXXX", &cdrom_path, NULL); g_assert(fd != -1); g_assert(buf); @@ -1834,7 +1833,7 @@ static void create_ahci_io_test(enum IOMode type, enum AddrMode addr, int main(int argc, char **argv) { - const char *arch; + const char *arch, *base; int ret; int fd; int c; @@ -1872,8 +1871,22 @@ int main(int argc, char **argv) return 0; } + /* + * "base" stores the starting point where we create temporary files. + * + * On Windows, this is set to the relative path of current working + * directory, because the absolute path causes the blkdebug filename + * parser fail to parse "blkdebug:path/to/config:path/to/image". + */ +#ifndef _WIN32 + base = g_get_tmp_dir(); +#else + base = "."; +#endif + /* Create a temporary image */ - fd = mkstemp(tmp_path); + tmp_path = g_strdup_printf("%s/qtest.XXXXXX", base); + fd = g_mkstemp(tmp_path); g_assert(fd >= 0); if (have_qemu_img()) { imgfmt = "qcow2"; @@ -1890,12 +1903,13 @@ int main(int argc, char **argv) close(fd); /* Create temporary blkdebug instructions */ - fd = mkstemp(debug_path); + debug_path = g_strdup_printf("%s/qtest-blkdebug.XXXXXX", base); + fd = g_mkstemp(debug_path); g_assert(fd >= 0); close(fd); /* Reserve a hollow file to use as a socket for migration tests */ - fd = mkstemp(mig_socket); + fd = g_file_open_tmp("qtest-migration.XXXXXX", &mig_socket, NULL); g_assert(fd >= 0); close(fd); @@ -1948,8 +1962,11 @@ int main(int argc, char **argv) /* Cleanup */ unlink(tmp_path); + g_free(tmp_path); unlink(debug_path); + g_free(debug_path); unlink(mig_socket); + g_free(mig_socket); return ret; } diff --git a/tests/qtest/am53c974-test.c b/tests/qtest/am53c974-test.c index d996866cd4..ed3ac7db20 100644 --- a/tests/qtest/am53c974-test.c +++ b/tests/qtest/am53c974-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" static void test_cmdfifo_underflow_ok(void) @@ -189,6 +189,68 @@ static void test_cancelled_request_ok(void) qtest_quit(s); } +static void test_inflight_cancel_ok(void) +{ + QTestState *s = qtest_init( + "-device am53c974,id=scsi " + "-device scsi-hd,drive=disk0 -drive " + "id=disk0,if=none,file=null-co://,format=raw -nodefaults"); + qtest_outl(s, 0xcf8, 0x80001000); + qtest_inw(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xffffffff); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_inl(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xc001); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_inw(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_inw(s, 0xcfc); + qtest_inb(s, 0xc000); + qtest_outb(s, 0xc008, 0x8); + qtest_outw(s, 0xc00b, 0x4100); + qtest_outb(s, 0xc009, 0x0); + qtest_outb(s, 0xc009, 0x0); + qtest_outw(s, 0xc00b, 0xc212); + qtest_outl(s, 0xc042, 0x2c2c5a88); + qtest_outw(s, 0xc00b, 0xc212); + qtest_outw(s, 0xc00b, 0x415a); + qtest_outl(s, 0xc03f, 0x3060303); + qtest_outl(s, 0xc00b, 0x5afa9054); + qtest_quit(s); +} + +static void test_reset_before_transfer_ok(void) +{ + QTestState *s = qtest_init( + "-device am53c974,id=scsi " + "-device scsi-hd,drive=disk0 -drive " + "id=disk0,if=none,file=null-co://,format=raw -nodefaults"); + + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xc000); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_outw(s, 0xcfc, 0x01); + qtest_outl(s, 0xc007, 0x2500); + qtest_outl(s, 0xc00a, 0x410000); + qtest_outl(s, 0xc00a, 0x410000); + qtest_outw(s, 0xc00b, 0x0200); + qtest_outw(s, 0xc040, 0x03); + qtest_outw(s, 0xc009, 0x00); + qtest_outw(s, 0xc00b, 0x00); + qtest_outw(s, 0xc009, 0x00); + qtest_outw(s, 0xc00b, 0x00); + qtest_outw(s, 0xc009, 0x00); + qtest_outw(s, 0xc003, 0x1000); + qtest_outw(s, 0xc00b, 0x1000); + qtest_outl(s, 0xc00b, 0x9000); + qtest_outw(s, 0xc00b, 0x1000); + qtest_quit(s); +} + int main(int argc, char **argv) { const char *arch = qtest_get_arch(); @@ -212,6 +274,10 @@ int main(int argc, char **argv) test_fifo_underflow_on_write_ok); qtest_add_func("am53c974/test_cancelled_request_ok", test_cancelled_request_ok); + qtest_add_func("am53c974/test_inflight_cancel_ok", + test_inflight_cancel_ok); + qtest_add_func("am53c974/test_reset_before_transfer_ok", + test_reset_before_transfer_ok); } return g_test_run(); diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c index 8252b85bb8..5a14527386 100644 --- a/tests/qtest/arm-cpu-features.c +++ b/tests/qtest/arm-cpu-features.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" #include "qemu/bitops.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qjson.h" @@ -26,21 +26,6 @@ " 'arguments': { 'type': 'full', " #define QUERY_TAIL "}}" -static bool kvm_enabled(QTestState *qts) -{ - QDict *resp, *qdict; - bool enabled; - - resp = qtest_qmp(qts, "{ 'execute': 'query-kvm' }"); - g_assert(qdict_haskey(resp, "return")); - qdict = qdict_get_qdict(resp, "return"); - g_assert(qdict_haskey(qdict, "enabled")); - enabled = qdict_get_bool(qdict, "enabled"); - qobject_unref(resp); - - return enabled; -} - static QDict *do_query_no_props(QTestState *qts, const char *cpu_type) { return qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s }" @@ -473,6 +458,19 @@ static void test_query_cpu_model_expansion(const void *data) assert_has_feature_enabled(qts, "cortex-a57", "pmu"); assert_has_feature_enabled(qts, "cortex-a57", "aarch64"); + assert_has_feature_enabled(qts, "a64fx", "pmu"); + assert_has_feature_enabled(qts, "a64fx", "aarch64"); + /* + * A64FX does not support any other vector lengths besides those + * that are enabled by default(128bit, 256bits, 512bit). + */ + assert_has_feature_enabled(qts, "a64fx", "sve"); + assert_sve_vls(qts, "a64fx", 0xb, NULL); + assert_error(qts, "a64fx", "cannot enable sve384", + "{ 'sve384': true }"); + assert_error(qts, "a64fx", "cannot enable sve640", + "{ 'sve640': true }"); + sve_tests_default(qts, "max"); pauth_tests_default(qts, "max"); @@ -493,14 +491,6 @@ static void test_query_cpu_model_expansion_kvm(const void *data) qts = qtest_init(MACHINE_KVM "-cpu max"); - /* - * These tests target the 'host' CPU type, so KVM must be enabled. - */ - if (!kvm_enabled(qts)) { - qtest_quit(qts); - return; - } - /* Enabling and disabling kvm-no-adjvtime should always work. */ assert_has_feature_disabled(qts, "host", "kvm-no-adjvtime"); assert_set_feature(qts, "host", "kvm-no-adjvtime", true); @@ -624,7 +614,11 @@ int main(int argc, char **argv) * order avoid attempting to run an AArch32 QEMU with KVM on * AArch64 hosts. That won't work and isn't easy to detect. */ - if (g_str_equal(qtest_get_arch(), "aarch64")) { + if (g_str_equal(qtest_get_arch(), "aarch64") && qtest_has_accel("kvm")) { + /* + * This tests target the 'host' CPU type, so register it only if + * KVM is available. + */ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion", NULL, test_query_cpu_model_expansion_kvm); } diff --git a/tests/qtest/aspeed_gpio-test.c b/tests/qtest/aspeed_gpio-test.c new file mode 100644 index 0000000000..d38f51d719 --- /dev/null +++ b/tests/qtest/aspeed_gpio-test.c @@ -0,0 +1,90 @@ +/* + * QTest testcase for the Aspeed GPIO Controller. + * + * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "qapi/qmp/qdict.h" +#include "libqtest-single.h" + +#define AST2600_GPIO_BASE 0x1E780000 + +#define GPIO_ABCD_DATA_VALUE 0x000 +#define GPIO_ABCD_DIRECTION 0x004 + +static void test_set_colocated_pins(const void *data) +{ + QTestState *s = (QTestState *)data; + + /* + * gpioV4-7 occupy bits within a single 32-bit value, so we want to make + * sure that modifying one doesn't affect the other. + */ + qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV4", true); + qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV5", false); + qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV6", true); + qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV7", false); + g_assert(qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV4")); + g_assert(!qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV5")); + g_assert(qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV6")); + g_assert(!qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV7")); +} + +static void test_set_input_pins(const void *data) +{ + QTestState *s = (QTestState *)data; + char name[16]; + uint32_t value; + + qtest_writel(s, AST2600_GPIO_BASE + GPIO_ABCD_DIRECTION, 0x00000000); + for (char c = 'A'; c <= 'D'; c++) { + for (int i = 0; i < 8; i++) { + sprintf(name, "gpio%c%d", c, i); + qtest_qom_set_bool(s, "/machine/soc/gpio", name, true); + } + } + value = qtest_readl(s, AST2600_GPIO_BASE + GPIO_ABCD_DATA_VALUE); + g_assert_cmphex(value, ==, 0xffffffff); + + qtest_writel(s, AST2600_GPIO_BASE + GPIO_ABCD_DATA_VALUE, 0x00000000); + value = qtest_readl(s, AST2600_GPIO_BASE + GPIO_ABCD_DATA_VALUE); + g_assert_cmphex(value, ==, 0xffffffff); +} + +int main(int argc, char **argv) +{ + QTestState *s; + int r; + + g_test_init(&argc, &argv, NULL); + + s = qtest_init("-machine ast2600-evb"); + qtest_add_data_func("/ast2600/gpio/set_colocated_pins", s, + test_set_colocated_pins); + qtest_add_data_func("/ast2600/gpio/set_input_pins", s, test_set_input_pins); + r = g_test_run(); + qtest_quit(s); + + return r; +} diff --git a/tests/qtest/aspeed_hace-test.c b/tests/qtest/aspeed_hace-test.c index 09ee31545e..ce86a44672 100644 --- a/tests/qtest/aspeed_hace-test.c +++ b/tests/qtest/aspeed_hace-test.c @@ -7,8 +7,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" -#include "qemu-common.h" +#include "libqtest.h" #include "qemu/bitops.h" #define HACE_CMD 0x10 @@ -21,6 +20,7 @@ #define HACE_ALGO_SHA512 (BIT(5) | BIT(6)) #define HACE_ALGO_SHA384 (BIT(5) | BIT(6) | BIT(10)) #define HACE_SG_EN BIT(18) +#define HACE_ACCUM_EN BIT(8) #define HACE_STS 0x1c #define HACE_RSA_ISR BIT(13) @@ -96,6 +96,57 @@ static const uint8_t test_result_sg_sha256[] = { 0x55, 0x1e, 0x1e, 0xc5, 0x80, 0xdd, 0x6d, 0x5a, 0x6e, 0xcd, 0xe9, 0xf3, 0xd3, 0x5e, 0x6e, 0x4a, 0x71, 0x7f, 0xbd, 0xe4}; +/* + * The accumulative mode requires firmware to provide internal initial state + * and message padding (including length L at the end of padding). + * + * This test vector is a ascii text "abc" with padding message. + * + * Expected results were generated using command line utitiles: + * + * echo -n -e 'abc' | dd of=/tmp/test + * for hash in sha512sum sha256sum; do $hash /tmp/test; done + */ +static const uint8_t test_vector_accum_512[] = { + 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; + +static const uint8_t test_vector_accum_256[] = { + 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; + +static const uint8_t test_result_accum_sha512[] = { + 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, + 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, + 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, + 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd, + 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, + 0xa5, 0x4c, 0xa4, 0x9f}; + +static const uint8_t test_result_accum_sha256[] = { + 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, + 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, + 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}; static void write_regs(QTestState *s, uint32_t base, uint32_t src, uint32_t length, uint32_t out, uint32_t method) @@ -308,6 +359,88 @@ static void test_sha512_sg(const char *machine, const uint32_t base, qtest_quit(s); } +static void test_sha256_accum(const char *machine, const uint32_t base, + const uint32_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint32_t buffer_addr = src_addr + 0x1000000; + const uint32_t digest_addr = src_addr + 0x4000000; + uint8_t digest[32] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_accum_256) | SG_LIST_LEN_LAST), + cpu_to_le32(buffer_addr) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, buffer_addr, test_vector_accum_256, + sizeof(test_vector_accum_256)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, sizeof(test_vector_accum_256), + digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN | HACE_ACCUM_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_accum_sha256, sizeof(digest)); + + qtest_quit(s); +} + +static void test_sha512_accum(const char *machine, const uint32_t base, + const uint32_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint32_t buffer_addr = src_addr + 0x1000000; + const uint32_t digest_addr = src_addr + 0x4000000; + uint8_t digest[64] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_accum_512) | SG_LIST_LEN_LAST), + cpu_to_le32(buffer_addr) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, buffer_addr, test_vector_accum_512, + sizeof(test_vector_accum_512)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, sizeof(test_vector_accum_512), + digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN | HACE_ACCUM_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_accum_sha512, sizeof(digest)); + + qtest_quit(s); +} + struct masks { uint32_t src; uint32_t dest; @@ -396,6 +529,16 @@ static void test_sha512_sg_ast2600(void) test_sha512_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } +static void test_sha256_accum_ast2600(void) +{ + test_sha256_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); +} + +static void test_sha512_accum_ast2600(void) +{ + test_sha512_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); +} + static void test_addresses_ast2600(void) { test_addresses("-machine ast2600-evb", 0x1e6d0000, &ast2600_masks); @@ -455,6 +598,9 @@ int main(int argc, char **argv) qtest_add_func("ast2600/hace/sha512_sg", test_sha512_sg_ast2600); qtest_add_func("ast2600/hace/sha256_sg", test_sha256_sg_ast2600); + qtest_add_func("ast2600/hace/sha512_accum", test_sha512_accum_ast2600); + qtest_add_func("ast2600/hace/sha256_accum", test_sha256_accum_ast2600); + qtest_add_func("ast2500/hace/addresses", test_addresses_ast2500); qtest_add_func("ast2500/hace/sha512", test_sha512_ast2500); qtest_add_func("ast2500/hace/sha256", test_sha256_ast2500); diff --git a/tests/qtest/aspeed_smc-test.c b/tests/qtest/aspeed_smc-test.c index 87b40a0ef1..c713a3700b 100644 --- a/tests/qtest/aspeed_smc-test.c +++ b/tests/qtest/aspeed_smc-test.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qemu/bswap.h" #include "libqtest-single.h" +#include "qemu/bitops.h" /* * ASPEED SPI Controller registers @@ -40,6 +41,7 @@ #define CTRL_FREADMODE 0x1 #define CTRL_WRITEMODE 0x2 #define CTRL_USERMODE 0x3 +#define SR_WEL BIT(1) #define ASPEED_FMC_BASE 0x1E620000 #define ASPEED_FLASH_BASE 0x20000000 @@ -49,10 +51,14 @@ */ enum { JEDEC_READ = 0x9f, + RDSR = 0x5, + WRDI = 0x4, BULK_ERASE = 0xc7, READ = 0x03, PP = 0x02, + WRSR = 0x1, WREN = 0x6, + SRWD = 0x80, RESET_ENABLE = 0x66, RESET_MEMORY = 0x99, EN_4BYTE_ADDR = 0xB7, @@ -131,6 +137,9 @@ static void flash_reset(void) spi_ctrl_start_user(); writeb(ASPEED_FLASH_BASE, RESET_ENABLE); writeb(ASPEED_FLASH_BASE, RESET_MEMORY); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, BULK_ERASE); + writeb(ASPEED_FLASH_BASE, WRDI); spi_ctrl_stop_user(); spi_conf_remove(CONF_ENABLE_W0); @@ -183,6 +192,24 @@ static void read_page_mem(uint32_t addr, uint32_t *page) } } +static void write_page_mem(uint32_t addr, uint32_t write_value) +{ + spi_ctrl_setmode(CTRL_WRITEMODE, PP); + + for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + writel(ASPEED_FLASH_BASE + addr + i * 4, write_value); + } +} + +static void assert_page_mem(uint32_t addr, uint32_t expected_value) +{ + uint32_t page[FLASH_PAGE_SIZE / 4]; + read_page_mem(addr, page); + for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, expected_value); + } +} + static void test_erase_sector(void) { uint32_t some_page_addr = 0x600 * FLASH_PAGE_SIZE; @@ -191,6 +218,33 @@ static void test_erase_sector(void) spi_conf(CONF_ENABLE_W0); + /* + * Previous page should be full of 0xffs after backend is + * initialized + */ + read_page(some_page_addr - FLASH_PAGE_SIZE, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, PP); + writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4)); + } + spi_ctrl_stop_user(); + + /* Check the page is correctly written */ + read_page(some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, some_page_addr + i * 4); + } + spi_ctrl_start_user(); writeb(ASPEED_FLASH_BASE, WREN); writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); @@ -198,14 +252,7 @@ static void test_erase_sector(void) writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); spi_ctrl_stop_user(); - /* Previous page should be full of zeroes as backend is not - * initialized */ - read_page(some_page_addr - FLASH_PAGE_SIZE, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0x0); - } - - /* But this one was erased */ + /* Check the page is erased */ read_page(some_page_addr, page); for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { g_assert_cmphex(page[i], ==, 0xffffffff); @@ -222,11 +269,31 @@ static void test_erase_all(void) spi_conf(CONF_ENABLE_W0); - /* Check some random page. Should be full of zeroes as backend is - * not initialized */ + /* + * Previous page should be full of 0xffs after backend is + * initialized + */ + read_page(some_page_addr - FLASH_PAGE_SIZE, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, PP); + writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4)); + } + spi_ctrl_stop_user(); + + /* Check the page is correctly written */ read_page(some_page_addr, page); for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0x0); + g_assert_cmphex(page[i], ==, some_page_addr + i * 4); } spi_ctrl_start_user(); @@ -234,7 +301,7 @@ static void test_erase_all(void) writeb(ASPEED_FLASH_BASE, BULK_ERASE); spi_ctrl_stop_user(); - /* Recheck that some random page */ + /* Check the page is erased */ read_page(some_page_addr, page); for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { g_assert_cmphex(page[i], ==, 0xffffffff); @@ -295,6 +362,14 @@ static void test_read_page_mem(void) spi_conf(CONF_ENABLE_W0); spi_ctrl_start_user(); writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, PP); + writel(ASPEED_FLASH_BASE, make_be32(my_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + writel(ASPEED_FLASH_BASE, make_be32(my_page_addr + i * 4)); + } spi_ctrl_stop_user(); spi_conf_remove(CONF_ENABLE_W0); @@ -348,16 +423,200 @@ static void test_write_page_mem(void) flash_reset(); } -static char tmp_path[] = "/tmp/qtest.m25p80.XXXXXX"; +static void test_read_status_reg(void) +{ + uint8_t r; + + spi_conf(CONF_ENABLE_W0); + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + + g_assert_cmphex(r & SR_WEL, ==, 0); + g_assert(!qtest_qom_get_bool + (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + + g_assert_cmphex(r & SR_WEL, ==, SR_WEL); + g_assert(qtest_qom_get_bool + (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WRDI); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + + g_assert_cmphex(r & SR_WEL, ==, 0); + g_assert(!qtest_qom_get_bool + (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); + + flash_reset(); +} + +static void test_status_reg_write_protection(void) +{ + uint8_t r; + + spi_conf(CONF_ENABLE_W0); + + /* default case: WP# is high and SRWD is low -> status register writable */ + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + /* test ability to write SRWD */ + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, SRWD); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + g_assert_cmphex(r & SRWD, ==, SRWD); + + /* WP# high and SRWD high -> status register writable */ + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + /* test ability to write SRWD */ + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, 0); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + g_assert_cmphex(r & SRWD, ==, 0); + + /* WP# low and SRWD low -> status register writable */ + qtest_set_irq_in(global_qtest, + "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 0); + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + /* test ability to write SRWD */ + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, SRWD); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + g_assert_cmphex(r & SRWD, ==, SRWD); + + /* WP# low and SRWD high -> status register NOT writable */ + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + /* test ability to write SRWD */ + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, 0); + writeb(ASPEED_FLASH_BASE, RDSR); + r = readb(ASPEED_FLASH_BASE); + spi_ctrl_stop_user(); + /* write is not successful */ + g_assert_cmphex(r & SRWD, ==, SRWD); + + qtest_set_irq_in(global_qtest, + "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 1); + flash_reset(); +} + +static void test_write_block_protect(void) +{ + uint32_t sector_size = 65536; + uint32_t n_sectors = 512; + + spi_ce_ctrl(1 << CRTL_EXTENDED0); + spi_conf(CONF_ENABLE_W0); + + uint32_t bp_bits = 0b0; + + for (int i = 0; i < 16; i++) { + bp_bits = ((i & 0b1000) << 3) | ((i & 0b0111) << 2); + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, BULK_ERASE); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, bp_bits); + writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); + writeb(ASPEED_FLASH_BASE, WREN); + spi_ctrl_stop_user(); + + uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; + uint32_t protection_start = n_sectors - num_protected_sectors; + uint32_t protection_end = n_sectors; + + for (int sector = 0; sector < n_sectors; sector++) { + uint32_t addr = sector * sector_size; + + assert_page_mem(addr, 0xffffffff); + write_page_mem(addr, make_be32(0xabcdef12)); + + uint32_t expected_value = protection_start <= sector + && sector < protection_end + ? 0xffffffff : 0xabcdef12; + + assert_page_mem(addr, expected_value); + } + } + + flash_reset(); +} + +static void test_write_block_protect_bottom_bit(void) +{ + uint32_t sector_size = 65536; + uint32_t n_sectors = 512; + + spi_ce_ctrl(1 << CRTL_EXTENDED0); + spi_conf(CONF_ENABLE_W0); + + /* top bottom bit is enabled */ + uint32_t bp_bits = 0b00100 << 3; + + for (int i = 0; i < 16; i++) { + bp_bits = (((i & 0b1000) | 0b0100) << 3) | ((i & 0b0111) << 2); + + spi_ctrl_start_user(); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, BULK_ERASE); + writeb(ASPEED_FLASH_BASE, WREN); + writeb(ASPEED_FLASH_BASE, WRSR); + writeb(ASPEED_FLASH_BASE, bp_bits); + writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); + writeb(ASPEED_FLASH_BASE, WREN); + spi_ctrl_stop_user(); + + uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; + uint32_t protection_start = 0; + uint32_t protection_end = num_protected_sectors; + + for (int sector = 0; sector < n_sectors; sector++) { + uint32_t addr = sector * sector_size; + + assert_page_mem(addr, 0xffffffff); + write_page_mem(addr, make_be32(0xabcdef12)); + + uint32_t expected_value = protection_start <= sector + && sector < protection_end + ? 0xffffffff : 0xabcdef12; + + assert_page_mem(addr, expected_value); + } + } + + flash_reset(); +} int main(int argc, char **argv) { + g_autofree char *tmp_path = NULL; int ret; int fd; g_test_init(&argc, &argv, NULL); - fd = mkstemp(tmp_path); + fd = g_file_open_tmp("qtest.m25p80.XXXXXX", &tmp_path, NULL); g_assert(fd >= 0); ret = ftruncate(fd, FLASH_SIZE); g_assert(ret == 0); @@ -373,7 +632,15 @@ int main(int argc, char **argv) qtest_add_func("/ast2400/smc/write_page", test_write_page); qtest_add_func("/ast2400/smc/read_page_mem", test_read_page_mem); qtest_add_func("/ast2400/smc/write_page_mem", test_write_page_mem); + qtest_add_func("/ast2400/smc/read_status_reg", test_read_status_reg); + qtest_add_func("/ast2400/smc/status_reg_write_protection", + test_status_reg_write_protection); + qtest_add_func("/ast2400/smc/write_block_protect", + test_write_block_protect); + qtest_add_func("/ast2400/smc/write_block_protect_bottom_bit", + test_write_block_protect_bottom_bit); + flash_reset(); ret = g_test_run(); qtest_quit(global_qtest); diff --git a/tests/qtest/bcm2835-dma-test.c b/tests/qtest/bcm2835-dma-test.c new file mode 100644 index 0000000000..8293d822b9 --- /dev/null +++ b/tests/qtest/bcm2835-dma-test.c @@ -0,0 +1,118 @@ +/* + * QTest testcase for BCM283x DMA engine (on Raspberry Pi 3) + * and its interrupts coming to Interrupt Controller. + * + * Copyright (c) 2022 Auriga LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +/* Offsets in raspi3b platform: */ +#define RASPI3_DMA_BASE 0x3f007000 +#define RASPI3_IC_BASE 0x3f00b200 + +/* Used register/fields definitions */ + +/* DMA engine registers: */ +#define BCM2708_DMA_CS 0 +#define BCM2708_DMA_ACTIVE (1 << 0) +#define BCM2708_DMA_INT (1 << 2) + +#define BCM2708_DMA_ADDR 0x04 + +#define BCM2708_DMA_INT_STATUS 0xfe0 + +/* DMA Trasfer Info fields: */ +#define BCM2708_DMA_INT_EN (1 << 0) +#define BCM2708_DMA_D_INC (1 << 4) +#define BCM2708_DMA_S_INC (1 << 8) + +/* Interrupt controller registers: */ +#define IRQ_PENDING_BASIC 0x00 +#define IRQ_GPU_PENDING1_AGGR (1 << 8) +#define IRQ_PENDING_1 0x04 +#define IRQ_ENABLE_1 0x10 + +/* Data for the test: */ +#define SCB_ADDR 256 +#define S_ADDR 32 +#define D_ADDR 64 +#define TXFR_LEN 32 +const uint32_t check_data = 0x12345678; + +static void bcm2835_dma_test_interrupt(int dma_c, int irq_line) +{ + uint64_t dma_base = RASPI3_DMA_BASE + dma_c * 0x100; + int gpu_irq_line = 16 + irq_line; + + /* Check that interrupts are silent by default: */ + writel(RASPI3_IC_BASE + IRQ_ENABLE_1, 1 << gpu_irq_line); + int isr = readl(dma_base + BCM2708_DMA_INT_STATUS); + g_assert_cmpint(isr, ==, 0); + uint32_t reg0 = readl(dma_base + BCM2708_DMA_CS); + g_assert_cmpint(reg0, ==, 0); + uint32_t ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC); + g_assert_cmpint(ic_pending, ==, 0); + uint32_t gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1); + g_assert_cmpint(gpu_pending1, ==, 0); + + /* Prepare Control Block: */ + writel(SCB_ADDR + 0, BCM2708_DMA_S_INC | BCM2708_DMA_D_INC | + BCM2708_DMA_INT_EN); /* transfer info */ + writel(SCB_ADDR + 4, S_ADDR); /* source address */ + writel(SCB_ADDR + 8, D_ADDR); /* destination address */ + writel(SCB_ADDR + 12, TXFR_LEN); /* transfer length */ + writel(dma_base + BCM2708_DMA_ADDR, SCB_ADDR); + + writel(S_ADDR, check_data); + for (int word = S_ADDR + 4; word < S_ADDR + TXFR_LEN; word += 4) { + writel(word, ~check_data); + } + /* Perform the transfer: */ + writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_ACTIVE); + + /* Check that destination == source: */ + uint32_t data = readl(D_ADDR); + g_assert_cmpint(data, ==, check_data); + for (int word = D_ADDR + 4; word < D_ADDR + TXFR_LEN; word += 4) { + data = readl(word); + g_assert_cmpint(data, ==, ~check_data); + } + + /* Check that interrupt status is set both in DMA and IC controllers: */ + isr = readl(RASPI3_DMA_BASE + BCM2708_DMA_INT_STATUS); + g_assert_cmpint(isr, ==, 1 << dma_c); + + ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC); + g_assert_cmpint(ic_pending, ==, IRQ_GPU_PENDING1_AGGR); + + gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1); + g_assert_cmpint(gpu_pending1, ==, 1 << gpu_irq_line); + + /* Clean up, clear interrupt: */ + writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_INT); +} + +static void bcm2835_dma_test_interrupts(void) +{ + /* DMA engines 0--10 have separate IRQ lines, 11--14 - only one: */ + bcm2835_dma_test_interrupt(0, 0); + bcm2835_dma_test_interrupt(10, 10); + bcm2835_dma_test_interrupt(11, 11); + bcm2835_dma_test_interrupt(14, 11); +} + +int main(int argc, char **argv) +{ + int ret; + g_test_init(&argc, &argv, NULL); + qtest_add_func("/bcm2835/dma/test_interrupts", + bcm2835_dma_test_interrupts); + qtest_start("-machine raspi3b"); + ret = g_test_run(); + qtest_end(); + return ret; +} diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 51d3a4e239..395d441212 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -57,7 +57,6 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "hw/firmware/smbios.h" #include "qemu/bitmap.h" #include "acpi-utils.h" @@ -73,7 +72,8 @@ #define OEM_ID "TEST" #define OEM_TABLE_ID "OEM" -#define OEM_TEST_ARGS "-machine x-oem-id="OEM_ID",x-oem-table-id="OEM_TABLE_ID +#define OEM_TEST_ARGS "-machine x-oem-id=" OEM_ID ",x-oem-table-id=" \ + OEM_TABLE_ID typedef struct { bool tcg_only; @@ -88,10 +88,12 @@ typedef struct { uint64_t rsdp_addr; uint8_t rsdp_table[36 /* ACPI 2.0+ RSDP size */]; GArray *tables; - uint32_t smbios_ep_addr; - struct smbios_21_entry_point smbios_ep_table; + uint64_t smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE__MAX]; + SmbiosEntryPoint smbios_ep_table; uint16_t smbios_cpu_max_speed; uint16_t smbios_cpu_curr_speed; + uint8_t smbios_core_count; + uint16_t smbios_core_count2; uint8_t *required_struct_types; int required_struct_types_len; QTestState *qts; @@ -271,19 +273,28 @@ static void dump_aml_files(test_data *data, bool rebuild) } } +static bool create_tmp_asl(AcpiSdtTable *sdt) +{ + GError *error = NULL; + gint fd; + + fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error); + g_assert_no_error(error); + close(fd); + + return false; +} + static bool load_asl(GArray *sdts, AcpiSdtTable *sdt) { AcpiSdtTable *temp; GError *error = NULL; GString *command_line = g_string_new(iasl); - gint fd; gchar *out, *out_err; gboolean ret; int i; - fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error); - g_assert_no_error(error); - close(fd); + create_tmp_asl(sdt); /* build command line */ g_string_append_printf(command_line, " -p %s ", sdt->asl_file); @@ -463,11 +474,20 @@ static void test_acpi_asl(test_data *data) err = load_asl(data->tables, sdt); asl = normalize_asl(sdt->asl); - exp_err = load_asl(exp_data.tables, exp_sdt); - exp_asl = normalize_asl(exp_sdt->asl); + /* + * If expected file is empty - it's likely that it was a stub just + * created for step 1 above: we do want to decompile the actual one. + */ + if (exp_sdt->aml_len) { + exp_err = load_asl(exp_data.tables, exp_sdt); + exp_asl = normalize_asl(exp_sdt->asl); + } else { + exp_err = create_tmp_asl(exp_sdt); + exp_asl = g_string_new(""); + } /* TODO: check for warnings */ - g_assert(!err || exp_err); + g_assert(!err || exp_err || !exp_sdt->aml_len); if (g_strcmp0(asl->str, exp_asl->str)) { sdt->tmp_files_retain = true; @@ -515,10 +535,9 @@ static void test_acpi_asl(test_data *data) free_test_data(&exp_data); } -static bool smbios_ep_table_ok(test_data *data) +static bool smbios_ep2_table_ok(test_data *data, uint32_t addr) { - struct smbios_21_entry_point *ep_table = &data->smbios_ep_table; - uint32_t addr = data->smbios_ep_addr; + struct smbios_21_entry_point *ep_table = &data->smbios_ep_table.ep21; qtest_memread(data->qts, addr, ep_table, sizeof(*ep_table)); if (memcmp(ep_table->anchor_string, "_SM_", 4)) { @@ -541,13 +560,29 @@ static bool smbios_ep_table_ok(test_data *data) return true; } -static void test_smbios_entry_point(test_data *data) +static bool smbios_ep3_table_ok(test_data *data, uint64_t addr) +{ + struct smbios_30_entry_point *ep_table = &data->smbios_ep_table.ep30; + + qtest_memread(data->qts, addr, ep_table, sizeof(*ep_table)); + if (memcmp(ep_table->anchor_string, "_SM3_", 5)) { + return false; + } + + if (acpi_calc_checksum((uint8_t *)ep_table, sizeof *ep_table)) { + return false; + } + + return true; +} + +static SmbiosEntryPointType test_smbios_entry_point(test_data *data) { uint32_t off; /* find smbios entry point structure */ for (off = 0xf0000; off < 0x100000; off += 0x10) { - uint8_t sig[] = "_SM_"; + uint8_t sig[] = "_SM_", sig3[] = "_SM3_"; int i; for (i = 0; i < sizeof sig - 1; ++i) { @@ -556,14 +591,30 @@ static void test_smbios_entry_point(test_data *data) if (!memcmp(sig, "_SM_", sizeof sig)) { /* signature match, but is this a valid entry point? */ - data->smbios_ep_addr = off; - if (smbios_ep_table_ok(data)) { + if (smbios_ep2_table_ok(data, off)) { + data->smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE_32] = off; + } + } + + for (i = 0; i < sizeof sig3 - 1; ++i) { + sig3[i] = qtest_readb(data->qts, off + i); + } + + if (!memcmp(sig3, "_SM3_", sizeof sig3)) { + if (smbios_ep3_table_ok(data, off)) { + data->smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE_64] = off; + /* found 64-bit entry point, no need to look for 32-bit one */ break; } } } - g_assert_cmphex(off, <, 0x100000); + /* found at least one entry point */ + g_assert_true(data->smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE_32] || + data->smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE_64]); + + return data->smbios_ep_addr[SMBIOS_ENTRY_POINT_TYPE_64] ? + SMBIOS_ENTRY_POINT_TYPE_64 : SMBIOS_ENTRY_POINT_TYPE_32; } static inline bool smbios_single_instance(uint8_t type) @@ -582,41 +633,61 @@ static inline bool smbios_single_instance(uint8_t type) } } -static bool smbios_cpu_test(test_data *data, uint32_t addr) +static void smbios_cpu_test(test_data *data, uint32_t addr, + SmbiosEntryPointType ep_type) { - uint16_t expect_speed[2]; - uint16_t real; + uint8_t core_count, expected_core_count = data->smbios_core_count; + uint16_t speed, expected_speed[2]; + uint16_t core_count2, expected_core_count2 = data->smbios_core_count2; int offset[2]; int i; /* Check CPU speed for backward compatibility */ offset[0] = offsetof(struct smbios_type_4, max_speed); offset[1] = offsetof(struct smbios_type_4, current_speed); - expect_speed[0] = data->smbios_cpu_max_speed ? : 2000; - expect_speed[1] = data->smbios_cpu_curr_speed ? : 2000; + expected_speed[0] = data->smbios_cpu_max_speed ? : 2000; + expected_speed[1] = data->smbios_cpu_curr_speed ? : 2000; for (i = 0; i < 2; i++) { - real = qtest_readw(data->qts, addr + offset[i]); - if (real != expect_speed[i]) { - fprintf(stderr, "Unexpected SMBIOS CPU speed: real %u expect %u\n", - real, expect_speed[i]); - return false; - } + speed = qtest_readw(data->qts, addr + offset[i]); + g_assert_cmpuint(speed, ==, expected_speed[i]); } - return true; + core_count = qtest_readb(data->qts, + addr + offsetof(struct smbios_type_4, core_count)); + + if (expected_core_count) { + g_assert_cmpuint(core_count, ==, expected_core_count); + } + + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_64) { + core_count2 = qtest_readw(data->qts, + addr + offsetof(struct smbios_type_4, core_count2)); + + /* Core Count has reached its limit, checking Core Count 2 */ + if (expected_core_count == 0xFF && expected_core_count2) { + g_assert_cmpuint(core_count2, ==, expected_core_count2); + } + } } -static void test_smbios_structs(test_data *data) +static void test_smbios_structs(test_data *data, SmbiosEntryPointType ep_type) { DECLARE_BITMAP(struct_bitmap, SMBIOS_MAX_TYPE+1) = { 0 }; - struct smbios_21_entry_point *ep_table = &data->smbios_ep_table; - uint32_t addr = le32_to_cpu(ep_table->structure_table_address); - int i, len, max_len = 0; + + SmbiosEntryPoint *ep_table = &data->smbios_ep_table; + int i = 0, len, max_len = 0; uint8_t type, prv, crt; + uint64_t addr; + + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_32) { + addr = le32_to_cpu(ep_table->ep21.structure_table_address); + } else { + addr = le64_to_cpu(ep_table->ep30.structure_table_address); + } /* walk the smbios tables */ - for (i = 0; i < le16_to_cpu(ep_table->number_of_structures); i++) { + do { /* grab type and formatted area length from struct header */ type = qtest_readb(data->qts, addr); @@ -630,7 +701,7 @@ static void test_smbios_structs(test_data *data) set_bit(type, struct_bitmap); if (type == 4) { - g_assert(smbios_cpu_test(data, addr)); + smbios_cpu_test(data, addr, ep_type); } /* seek to end of unformatted string area of this struct ("\0\0") */ @@ -642,19 +713,33 @@ static void test_smbios_structs(test_data *data) } /* keep track of max. struct size */ - if (max_len < len) { + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_32 && max_len < len) { max_len = len; - g_assert_cmpuint(max_len, <=, ep_table->max_structure_size); + g_assert_cmpuint(max_len, <=, ep_table->ep21.max_structure_size); } /* start of next structure */ addr += len; - } - /* total table length and max struct size must match entry point values */ - g_assert_cmpuint(le16_to_cpu(ep_table->structure_table_length), ==, - addr - le32_to_cpu(ep_table->structure_table_address)); - g_assert_cmpuint(le16_to_cpu(ep_table->max_structure_size), ==, max_len); + /* + * Until all structures have been scanned (ep21) + * or an EOF structure is found (ep30) + */ + } while (ep_type == SMBIOS_ENTRY_POINT_TYPE_32 ? + ++i < le16_to_cpu(ep_table->ep21.number_of_structures) : + type != 127); + + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_32) { + /* + * Total table length and max struct size + * must match entry point values + */ + g_assert_cmpuint(le16_to_cpu(ep_table->ep21.structure_table_length), ==, + addr - le32_to_cpu(ep_table->ep21.structure_table_address)); + + g_assert_cmpuint(le16_to_cpu(ep_table->ep21.max_structure_size), ==, + max_len); + } /* required struct types must all be present */ for (i = 0; i < data->required_struct_types_len; i++) { @@ -707,7 +792,7 @@ static char *test_acpi_create_args(test_data *data, const char *params, } } else { args = g_strdup_printf("-machine %s %s -accel tcg " - "-net none -display none %s " + "-net none %s " "-drive id=hd0,if=none,file=%s,format=raw " "-device %s,drive=hd0 ", data->machine, data->tcg_only ? "" : "-accel kvm", @@ -722,13 +807,6 @@ static void test_acpi_one(const char *params, test_data *data) char *args; bool use_uefi = data->uefi_fl1 && data->uefi_fl2; -#ifndef CONFIG_TCG - if (data->tcg_only) { - g_test_skip("TCG disabled, skipping ACPI tcg_only test"); - return; - } -#endif /* CONFIG_TCG */ - args = test_acpi_create_args(data, params, use_uefi); data->qts = qtest_init(args); test_acpi_load_tables(data, use_uefi); @@ -745,8 +823,8 @@ static void test_acpi_one(const char *params, test_data *data) * https://bugs.launchpad.net/qemu/+bug/1821884 */ if (!use_uefi) { - test_smbios_entry_point(data); - test_smbios_structs(data); + SmbiosEntryPointType ep_type = test_smbios_entry_point(data); + test_smbios_structs(data, ep_type); } qtest_quit(data->qts); @@ -845,6 +923,21 @@ static void test_acpi_q35_tcg(void) free_test_data(&data); } +static void test_acpi_q35_tcg_core_count2(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".core-count2", + .required_struct_types = base_required_struct_types, + .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), + .smbios_core_count = 0xFF, + .smbios_core_count2 = 275, + }; + + test_acpi_one("-machine smbios-entry-point-type=64 -smp 275", &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_bridge(void) { test_data data; @@ -859,6 +952,23 @@ static void test_acpi_q35_tcg_bridge(void) free_test_data(&data); } +static void test_acpi_q35_multif_bridge(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".multi-bridge", + }; + test_acpi_one("-device pcie-root-port,id=pcie-root-port-0," + "multifunction=on," + "port=0x0,chassis=1,addr=0x2,bus=pcie.0 " + "-device pcie-root-port,id=pcie-root-port-1," + "port=0x1,chassis=2,addr=0x3.0x1,bus=pcie.0 " + "-device virtio-balloon,id=balloon0," + "bus=pcie.0,addr=0x4.0x2", + &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_mmio64(void) { test_data data = { @@ -927,6 +1037,21 @@ static void test_acpi_q35_tcg_ipmi(void) free_test_data(&data); } +static void test_acpi_q35_tcg_smbus_ipmi(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".ipmismbus"; + data.required_struct_types = ipmi_required_struct_types; + data.required_struct_types_len = ARRAY_SIZE(ipmi_required_struct_types); + test_acpi_one("-device ipmi-bmc-sim,id=bmc0" + " -device smbus-ipmi,bmc=bmc0", + &data); + free_test_data(&data); +} + static void test_acpi_piix4_tcg_ipmi(void) { test_data data; @@ -1033,6 +1158,19 @@ static void test_acpi_q35_tcg_numamem(void) free_test_data(&data); } +static void test_acpi_q35_kvm_xapic(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".xapic"; + test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M" + " -numa node -numa node,memdev=ram0" + " -machine kernel-irqchip=on -smp 1,maxcpus=288", &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_nosmm(void) { test_data data; @@ -1077,6 +1215,30 @@ static void test_acpi_q35_tcg_nohpet(void) free_test_data(&data); } +static void test_acpi_q35_kvm_dmar(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".dmar"; + test_acpi_one("-machine kernel-irqchip=split -accel kvm" + " -device intel-iommu,intremap=on,device-iotlb=on", &data); + free_test_data(&data); +} + +static void test_acpi_q35_tcg_ivrs(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".ivrs"; + data.tcg_only = true, + test_acpi_one(" -device amd-iommu", &data); + free_test_data(&data); +} + static void test_acpi_piix4_tcg_numamem(void) { test_data data; @@ -1092,16 +1254,16 @@ static void test_acpi_piix4_tcg_numamem(void) uint64_t tpm_tis_base_addr; static void test_acpi_tcg_tpm(const char *machine, const char *tpm_if, - uint64_t base) + uint64_t base, enum TPMVersion tpm_version) { -#ifdef CONFIG_TPM gchar *tmp_dir_name = g_strdup_printf("qemu-test_acpi_%s_tcg_%s.XXXXXX", machine, tpm_if); char *tmp_path = g_dir_make_tmp(tmp_dir_name, NULL); - TestState test; + TPMTestState test; test_data data; GThread *thread; - char *args, *variant = g_strdup_printf(".%s", tpm_if); + const char *suffix = tpm_version == TPM_VERSION_2_0 ? "tpm2" : "tpm12"; + char *args, *variant = g_strdup_printf(".%s.%s", tpm_if, suffix); tpm_tis_base_addr = base; @@ -1113,6 +1275,7 @@ static void test_acpi_tcg_tpm(const char *machine, const char *tpm_if, g_mutex_init(&test.data_mutex); g_cond_init(&test.data_cond); test.data_cond_signal = false; + test.tpm_version = tpm_version; thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); tpm_emu_test_wait_cond(&test); @@ -1138,14 +1301,16 @@ static void test_acpi_tcg_tpm(const char *machine, const char *tpm_if, g_free(tmp_dir_name); g_free(args); free_test_data(&data); -#else - g_test_skip("TPM disabled"); -#endif } -static void test_acpi_q35_tcg_tpm_tis(void) +static void test_acpi_q35_tcg_tpm2_tis(void) { - test_acpi_tcg_tpm("q35", "tis", 0xFED40000); + test_acpi_tcg_tpm("q35", "tis", 0xFED40000, TPM_VERSION_2_0); +} + +static void test_acpi_q35_tcg_tpm12_tis(void) +{ + test_acpi_tcg_tpm("q35", "tis", 0xFED40000, TPM_VERSION_1_2); } static void test_acpi_tcg_dimm_pxm(const char *machine) @@ -1378,6 +1543,164 @@ static void test_acpi_piix4_tcg_acpi_hmat(void) test_acpi_tcg_acpi_hmat(MACHINE_PC); } +static void test_acpi_virt_tcg_acpi_hmat(void) +{ + test_data data = { + .machine = "virt", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * 1024 * 1024, + }; + + data.variant = ".acpihmatvirt"; + + test_acpi_one(" -machine hmat=on" + " -cpu cortex-a57" + " -smp 4,sockets=2" + " -m 256M" + " -object memory-backend-ram,size=64M,id=ram0" + " -object memory-backend-ram,size=64M,id=ram1" + " -object memory-backend-ram,size=128M,id=ram2" + " -numa node,nodeid=0,memdev=ram0" + " -numa node,nodeid=1,memdev=ram1" + " -numa node,nodeid=2,memdev=ram2" + " -numa cpu,node-id=0,socket-id=0" + " -numa cpu,node-id=0,socket-id=0" + " -numa cpu,node-id=1,socket-id=1" + " -numa cpu,node-id=1,socket-id=1" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=10485760" + " -numa hmat-lb,initiator=0,target=1,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=0,target=1,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=5242880" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-latency,latency=30" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=1048576" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=5242880" + " -numa hmat-lb,initiator=1,target=1,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=1,target=1,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=10485760" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-latency,latency=30" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=1048576", + &data); + + free_test_data(&data); +} + +static void test_acpi_q35_tcg_acpi_hmat_noinitiator(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".acpihmat-noinitiator"; + test_acpi_one(" -machine hmat=on" + " -smp 4,sockets=2" + " -m 128M" + " -object memory-backend-ram,size=32M,id=ram0" + " -object memory-backend-ram,size=32M,id=ram1" + " -object memory-backend-ram,size=64M,id=ram2" + " -numa node,nodeid=0,memdev=ram0" + " -numa node,nodeid=1,memdev=ram1" + " -numa node,nodeid=2,memdev=ram2" + " -numa cpu,node-id=0,socket-id=0" + " -numa cpu,node-id=0,socket-id=0" + " -numa cpu,node-id=1,socket-id=1" + " -numa cpu,node-id=1,socket-id=1" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=10485760" + " -numa hmat-lb,initiator=0,target=1,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=0,target=1,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=5242880" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-latency,latency=30" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=1048576" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=5242880" + " -numa hmat-lb,initiator=1,target=1,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=1,target=1,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=10485760" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-latency,latency=30" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=1048576", + &data); + free_test_data(&data); +} + +#ifdef CONFIG_POSIX +static void test_acpi_erst(const char *machine) +{ + gchar *tmp_path = g_dir_make_tmp("qemu-test-erst.XXXXXX", NULL); + gchar *params; + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = machine; + data.variant = ".acpierst"; + params = g_strdup_printf( + " -object memory-backend-file,id=erstnvram," + "mem-path=%s,size=0x10000,share=on" + " -device acpi-erst,memdev=erstnvram", tmp_path); + test_acpi_one(params, &data); + free_test_data(&data); + g_free(params); + g_assert(g_rmdir(tmp_path) == 0); + g_free(tmp_path); +} + +static void test_acpi_piix4_acpi_erst(void) +{ + test_acpi_erst(MACHINE_PC); +} + +static void test_acpi_q35_acpi_erst(void) +{ + test_acpi_erst(MACHINE_Q35); +} + +static void test_acpi_microvm_acpi_erst(void) +{ + gchar *tmp_path = g_dir_make_tmp("qemu-test-erst.XXXXXX", NULL); + gchar *params; + test_data data; + + test_acpi_microvm_prepare(&data); + data.variant = ".pcie"; + data.tcg_only = true; /* need constant host-phys-bits */ + params = g_strdup_printf(" -machine microvm," + "acpi=on,ioapic2=off,rtc=off,pcie=on" + " -object memory-backend-file,id=erstnvram," + "mem-path=%s,size=0x10000,share=on" + " -device acpi-erst,memdev=erstnvram", tmp_path); + test_acpi_one(params, &data); + g_free(params); + g_assert(g_rmdir(tmp_path) == 0); + g_free(tmp_path); + free_test_data(&data); +} +#endif /* CONFIG_POSIX */ + static void test_acpi_virt_tcg(void) { test_data data = { @@ -1390,9 +1713,6 @@ static void test_acpi_virt_tcg(void) .scan_len = 128ULL * 1024 * 1024, }; - test_acpi_one("-cpu cortex-a57", &data); - free_test_data(&data); - data.smbios_cpu_max_speed = 2900; data.smbios_cpu_curr_speed = 2700; test_acpi_one("-cpu cortex-a57 " @@ -1400,14 +1720,136 @@ static void test_acpi_virt_tcg(void) free_test_data(&data); } +static void test_acpi_q35_viot(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".viot", + }; + + /* + * To keep things interesting, two buses bypass the IOMMU. + * VIOT should only describes the other two buses. + */ + test_acpi_one("-machine default_bus_bypass_iommu=on " + "-device virtio-iommu-pci " + "-device pxb-pcie,bus_nr=0x10,id=pcie.100,bus=pcie.0 " + "-device pxb-pcie,bus_nr=0x20,id=pcie.200,bus=pcie.0,bypass_iommu=on " + "-device pxb-pcie,bus_nr=0x30,id=pcie.300,bus=pcie.0", + &data); + free_test_data(&data); +} + +#ifdef CONFIG_POSIX +static void test_acpi_q35_cxl(void) +{ + gchar *tmp_path = g_dir_make_tmp("qemu-test-cxl.XXXXXX", NULL); + gchar *params; + + test_data data = { + .machine = MACHINE_Q35, + .variant = ".cxl", + }; + /* + * A complex CXL setup. + */ + params = g_strdup_printf(" -machine cxl=on" + " -object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M" + " -object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M" + " -object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M" + " -object memory-backend-file,id=cxl-mem4,mem-path=%s,size=256M" + " -object memory-backend-file,id=lsa1,mem-path=%s,size=256M" + " -object memory-backend-file,id=lsa2,mem-path=%s,size=256M" + " -object memory-backend-file,id=lsa3,mem-path=%s,size=256M" + " -object memory-backend-file,id=lsa4,mem-path=%s,size=256M" + " -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1" + " -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2" + " -device cxl-rp,port=0,bus=cxl.1,id=rp1,chassis=0,slot=2" + " -device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1" + " -device cxl-rp,port=1,bus=cxl.1,id=rp2,chassis=0,slot=3" + " -device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2" + " -device cxl-rp,port=0,bus=cxl.2,id=rp3,chassis=0,slot=5" + " -device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3" + " -device cxl-rp,port=1,bus=cxl.2,id=rp4,chassis=0,slot=6" + " -device cxl-type3,bus=rp4,memdev=cxl-mem4,lsa=lsa4" + " -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k," + "cxl-fmw.1.targets.0=cxl.1,cxl-fmw.1.targets.1=cxl.2,cxl-fmw.1.size=4G,cxl-fmw.1.interleave-granularity=8k", + tmp_path, tmp_path, tmp_path, tmp_path, + tmp_path, tmp_path, tmp_path, tmp_path); + test_acpi_one(params, &data); + + g_free(params); + g_assert(g_rmdir(tmp_path) == 0); + g_free(tmp_path); + free_test_data(&data); +} +#endif /* CONFIG_POSIX */ + +static void test_acpi_virt_viot(void) +{ + test_data data = { + .machine = "virt", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * 1024 * 1024, + }; + + test_acpi_one("-cpu cortex-a57 " + "-device virtio-iommu-pci", &data); + free_test_data(&data); +} + +#ifndef _WIN32 +# define DEV_NULL "/dev/null" +#else +# define DEV_NULL "nul" +#endif + +static void test_acpi_q35_slic(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".slic", + }; + + test_acpi_one("-acpitable sig=SLIC,oem_id=\"CRASH \",oem_table_id=ME," + "oem_rev=00002210,asl_compiler_id=qemu," + "asl_compiler_rev=00000000,data=" DEV_NULL, + &data); + free_test_data(&data); +} + +static void test_acpi_q35_applesmc(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".applesmc", + }; + + /* supply fake 64-byte OSK to silence missing key warning */ + test_acpi_one("-device isa-applesmc,osk=any64characterfakeoskisenough" + "topreventinvalidkeywarningsonstderr", &data); + free_test_data(&data); +} + +static void test_acpi_q35_pvpanic_isa(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".pvpanic-isa", + }; + + test_acpi_one("-device pvpanic", &data); + free_test_data(&data); +} + static void test_oem_fields(test_data *data) { int i; - char oem_id[6]; - char oem_table_id[8]; - strpadcpy(oem_id, sizeof oem_id, OEM_ID, ' '); - strpadcpy(oem_table_id, sizeof oem_table_id, OEM_TABLE_ID, ' '); for (i = 0; i < data->tables->len; ++i) { AcpiSdtTable *sdt; @@ -1417,12 +1859,12 @@ static void test_oem_fields(test_data *data) continue; } - g_assert(memcmp(sdt->aml + 10, oem_id, 6) == 0); - g_assert(memcmp(sdt->aml + 16, oem_table_id, 8) == 0); + g_assert(strncmp((char *)sdt->aml + 10, OEM_ID, 6) == 0); + g_assert(strncmp((char *)sdt->aml + 16, OEM_TABLE_ID, 8) == 0); } } -static void test_acpi_oem_fields_pc(void) +static void test_acpi_piix4_oem_fields(void) { test_data data; char *args; @@ -1442,7 +1884,7 @@ static void test_acpi_oem_fields_pc(void) g_free(args); } -static void test_acpi_oem_fields_q35(void) +static void test_acpi_q35_oem_fields(void) { test_data data; char *args; @@ -1462,7 +1904,7 @@ static void test_acpi_oem_fields_q35(void) g_free(args); } -static void test_acpi_oem_fields_microvm(void) +static void test_acpi_microvm_oem_fields(void) { test_data data; char *args; @@ -1479,7 +1921,7 @@ static void test_acpi_oem_fields_microvm(void) g_free(args); } -static void test_acpi_oem_fields_virt(void) +static void test_acpi_virt_oem_fields(void) { test_data data = { .machine = "virt", @@ -1506,6 +1948,8 @@ static void test_acpi_oem_fields_virt(void) int main(int argc, char *argv[]) { const char *arch = qtest_get_arch(); + const bool has_kvm = qtest_has_accel("kvm"); + const bool has_tcg = qtest_has_accel("tcg"); int ret; g_test_init(&argc, &argv, NULL); @@ -1515,58 +1959,110 @@ int main(int argc, char *argv[]) if (ret) { return ret; } - qtest_add_func("acpi/q35/oem-fields", test_acpi_oem_fields_q35); - qtest_add_func("acpi/q35/tpm-tis", test_acpi_q35_tcg_tpm_tis); - qtest_add_func("acpi/piix4", test_acpi_piix4_tcg); - qtest_add_func("acpi/oem-fields", test_acpi_oem_fields_pc); - qtest_add_func("acpi/piix4/bridge", test_acpi_piix4_tcg_bridge); - qtest_add_func("acpi/piix4/pci-hotplug/no_root_hotplug", - test_acpi_piix4_no_root_hotplug); - qtest_add_func("acpi/piix4/pci-hotplug/no_bridge_hotplug", - test_acpi_piix4_no_bridge_hotplug); - qtest_add_func("acpi/piix4/pci-hotplug/off", - test_acpi_piix4_no_acpi_pci_hotplug); - qtest_add_func("acpi/q35", test_acpi_q35_tcg); - qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge); - qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64); - qtest_add_func("acpi/piix4/ipmi", test_acpi_piix4_tcg_ipmi); - qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi); - qtest_add_func("acpi/piix4/cpuhp", test_acpi_piix4_tcg_cphp); - qtest_add_func("acpi/q35/cpuhp", test_acpi_q35_tcg_cphp); - qtest_add_func("acpi/piix4/memhp", test_acpi_piix4_tcg_memhp); - qtest_add_func("acpi/q35/memhp", test_acpi_q35_tcg_memhp); - qtest_add_func("acpi/piix4/numamem", test_acpi_piix4_tcg_numamem); - qtest_add_func("acpi/q35/numamem", test_acpi_q35_tcg_numamem); - qtest_add_func("acpi/piix4/nosmm", test_acpi_piix4_tcg_nosmm); - qtest_add_func("acpi/piix4/smm-compat", - test_acpi_piix4_tcg_smm_compat); - qtest_add_func("acpi/piix4/smm-compat-nosmm", - test_acpi_piix4_tcg_smm_compat_nosmm); - qtest_add_func("acpi/piix4/nohpet", test_acpi_piix4_tcg_nohpet); - qtest_add_func("acpi/q35/nosmm", test_acpi_q35_tcg_nosmm); - qtest_add_func("acpi/q35/smm-compat", - test_acpi_q35_tcg_smm_compat); - qtest_add_func("acpi/q35/smm-compat-nosmm", - test_acpi_q35_tcg_smm_compat_nosmm); - qtest_add_func("acpi/q35/nohpet", test_acpi_q35_tcg_nohpet); - qtest_add_func("acpi/piix4/dimmpxm", test_acpi_piix4_tcg_dimm_pxm); - qtest_add_func("acpi/q35/dimmpxm", test_acpi_q35_tcg_dimm_pxm); - qtest_add_func("acpi/piix4/acpihmat", test_acpi_piix4_tcg_acpi_hmat); - qtest_add_func("acpi/q35/acpihmat", test_acpi_q35_tcg_acpi_hmat); - qtest_add_func("acpi/microvm", test_acpi_microvm_tcg); - qtest_add_func("acpi/microvm/usb", test_acpi_microvm_usb_tcg); - qtest_add_func("acpi/microvm/rtc", test_acpi_microvm_rtc_tcg); - qtest_add_func("acpi/microvm/ioapic2", test_acpi_microvm_ioapic2_tcg); - qtest_add_func("acpi/microvm/oem-fields", test_acpi_oem_fields_microvm); - if (strcmp(arch, "x86_64") == 0) { - qtest_add_func("acpi/microvm/pcie", test_acpi_microvm_pcie_tcg); + if (qtest_has_machine(MACHINE_PC)) { + qtest_add_func("acpi/piix4", test_acpi_piix4_tcg); + qtest_add_func("acpi/piix4/oem-fields", test_acpi_piix4_oem_fields); + qtest_add_func("acpi/piix4/bridge", test_acpi_piix4_tcg_bridge); + qtest_add_func("acpi/piix4/pci-hotplug/no_root_hotplug", + test_acpi_piix4_no_root_hotplug); + qtest_add_func("acpi/piix4/pci-hotplug/no_bridge_hotplug", + test_acpi_piix4_no_bridge_hotplug); + qtest_add_func("acpi/piix4/pci-hotplug/off", + test_acpi_piix4_no_acpi_pci_hotplug); + qtest_add_func("acpi/piix4/ipmi", test_acpi_piix4_tcg_ipmi); + qtest_add_func("acpi/piix4/cpuhp", test_acpi_piix4_tcg_cphp); + qtest_add_func("acpi/piix4/memhp", test_acpi_piix4_tcg_memhp); + qtest_add_func("acpi/piix4/numamem", test_acpi_piix4_tcg_numamem); + qtest_add_func("acpi/piix4/nosmm", test_acpi_piix4_tcg_nosmm); + qtest_add_func("acpi/piix4/smm-compat", + test_acpi_piix4_tcg_smm_compat); + qtest_add_func("acpi/piix4/smm-compat-nosmm", + test_acpi_piix4_tcg_smm_compat_nosmm); + qtest_add_func("acpi/piix4/nohpet", test_acpi_piix4_tcg_nohpet); + qtest_add_func("acpi/piix4/dimmpxm", test_acpi_piix4_tcg_dimm_pxm); + qtest_add_func("acpi/piix4/acpihmat", + test_acpi_piix4_tcg_acpi_hmat); +#ifdef CONFIG_POSIX + qtest_add_func("acpi/piix4/acpierst", test_acpi_piix4_acpi_erst); +#endif + } + if (qtest_has_machine(MACHINE_Q35)) { + qtest_add_func("acpi/q35", test_acpi_q35_tcg); + qtest_add_func("acpi/q35/oem-fields", test_acpi_q35_oem_fields); + if (tpm_model_is_available("-machine q35", "tpm-tis")) { + qtest_add_func("acpi/q35/tpm2-tis", test_acpi_q35_tcg_tpm2_tis); + qtest_add_func("acpi/q35/tpm12-tis", + test_acpi_q35_tcg_tpm12_tis); + } + qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge); + qtest_add_func("acpi/q35/multif-bridge", + test_acpi_q35_multif_bridge); + qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64); + qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi); + qtest_add_func("acpi/q35/smbus/ipmi", test_acpi_q35_tcg_smbus_ipmi); + qtest_add_func("acpi/q35/cpuhp", test_acpi_q35_tcg_cphp); + qtest_add_func("acpi/q35/memhp", test_acpi_q35_tcg_memhp); + qtest_add_func("acpi/q35/numamem", test_acpi_q35_tcg_numamem); + qtest_add_func("acpi/q35/nosmm", test_acpi_q35_tcg_nosmm); + qtest_add_func("acpi/q35/smm-compat", + test_acpi_q35_tcg_smm_compat); + qtest_add_func("acpi/q35/smm-compat-nosmm", + test_acpi_q35_tcg_smm_compat_nosmm); + qtest_add_func("acpi/q35/nohpet", test_acpi_q35_tcg_nohpet); + qtest_add_func("acpi/q35/dimmpxm", test_acpi_q35_tcg_dimm_pxm); + qtest_add_func("acpi/q35/acpihmat", test_acpi_q35_tcg_acpi_hmat); + qtest_add_func("acpi/q35/acpihmat-noinitiator", + test_acpi_q35_tcg_acpi_hmat_noinitiator); +#ifdef CONFIG_POSIX + qtest_add_func("acpi/q35/acpierst", test_acpi_q35_acpi_erst); +#endif + qtest_add_func("acpi/q35/applesmc", test_acpi_q35_applesmc); + qtest_add_func("acpi/q35/pvpanic-isa", test_acpi_q35_pvpanic_isa); + if (has_tcg) { + qtest_add_func("acpi/q35/ivrs", test_acpi_q35_tcg_ivrs); + } + if (has_kvm) { + qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); + qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); + qtest_add_func("acpi/q35/core-count2", + test_acpi_q35_tcg_core_count2); + } + qtest_add_func("acpi/q35/viot", test_acpi_q35_viot); +#ifdef CONFIG_POSIX + qtest_add_func("acpi/q35/cxl", test_acpi_q35_cxl); +#endif + qtest_add_func("acpi/q35/slic", test_acpi_q35_slic); + } + if (qtest_has_machine("microvm")) { + qtest_add_func("acpi/microvm", test_acpi_microvm_tcg); + qtest_add_func("acpi/microvm/usb", test_acpi_microvm_usb_tcg); + qtest_add_func("acpi/microvm/rtc", test_acpi_microvm_rtc_tcg); + qtest_add_func("acpi/microvm/ioapic2", + test_acpi_microvm_ioapic2_tcg); + qtest_add_func("acpi/microvm/oem-fields", + test_acpi_microvm_oem_fields); + if (has_tcg) { + if (strcmp(arch, "x86_64") == 0) { + qtest_add_func("acpi/microvm/pcie", + test_acpi_microvm_pcie_tcg); +#ifdef CONFIG_POSIX + qtest_add_func("acpi/microvm/acpierst", + test_acpi_microvm_acpi_erst); +#endif + } + } } } else if (strcmp(arch, "aarch64") == 0) { - qtest_add_func("acpi/virt", test_acpi_virt_tcg); - qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem); - qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp); - qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb); - qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt); + if (has_tcg) { + qtest_add_func("acpi/virt", test_acpi_virt_tcg); + qtest_add_func("acpi/virt/acpihmatvirt", + test_acpi_virt_tcg_acpi_hmat); + qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem); + qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp); + qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb); + qtest_add_func("acpi/virt/oem-fields", test_acpi_virt_oem_fields); + qtest_add_func("acpi/virt/viot", test_acpi_virt_viot); + } } ret = g_test_run(); boot_sector_cleanup(disk); diff --git a/tests/qtest/boot-order-test.c b/tests/qtest/boot-order-test.c index fac580d6c4..0680d79d6d 100644 --- a/tests/qtest/boot-order-test.c +++ b/tests/qtest/boot-order-test.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "libqos/fw_cfg.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "standard-headers/linux/qemu_fw_cfg.h" @@ -34,6 +34,11 @@ static void test_a_boot_order(const char *machine, uint64_t actual; QTestState *qts; + if (machine && !qtest_has_machine(machine)) { + g_test_skip("Machine is not available"); + return; + } + qts = qtest_initf("-nodefaults%s%s %s", machine ? " -M " : "", machine ?: "", test_args); actual = read_boot_order(qts); diff --git a/tests/qtest/boot-sector.c b/tests/qtest/boot-sector.c index ea8f264661..44a109abd8 100644 --- a/tests/qtest/boot-sector.c +++ b/tests/qtest/boot-sector.c @@ -12,8 +12,7 @@ */ #include "qemu/osdep.h" #include "boot-sector.h" -#include "qemu-common.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #define LOW(x) ((x) & 0xff) #define HIGH(x) ((x) >> 8) diff --git a/tests/qtest/boot-sector.h b/tests/qtest/boot-sector.h index b339fdee4c..6ee6bb4d97 100644 --- a/tests/qtest/boot-sector.h +++ b/tests/qtest/boot-sector.h @@ -14,7 +14,7 @@ #ifndef TEST_BOOT_SECTOR_H #define TEST_BOOT_SECTOR_H -#include "libqos/libqtest.h" +#include "libqtest.h" /* Create boot disk file. fname must be a suitable string for mkstemp() */ int boot_sector_init(char *fname); diff --git a/tests/qtest/boot-serial-test.c b/tests/qtest/boot-serial-test.c index 96849cec91..b216519b62 100644 --- a/tests/qtest/boot-serial-test.c +++ b/tests/qtest/boot-serial-test.c @@ -14,7 +14,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/libqos-spapr.h" static const uint8_t bios_avr[] = { @@ -157,11 +157,11 @@ static testdef_t tests[] = { { "ppc64", "powernv8", "", "OPAL" }, { "ppc64", "powernv9", "", "OPAL" }, { "ppc64", "sam460ex", "-device e1000", "8086 100e" }, - { "i386", "isapc", "-cpu qemu32 -device sga", "SGABIOS" }, - { "i386", "pc", "-device sga", "SGABIOS" }, - { "i386", "q35", "-device sga", "SGABIOS" }, - { "x86_64", "isapc", "-cpu qemu32 -device sga", "SGABIOS" }, - { "x86_64", "q35", "-device sga", "SGABIOS" }, + { "i386", "isapc", "-cpu qemu32 -M graphics=off", "SeaBIOS" }, + { "i386", "pc", "-M graphics=off", "SeaBIOS" }, + { "i386", "q35", "-M graphics=off", "SeaBIOS" }, + { "x86_64", "isapc", "-cpu qemu32 -M graphics=off", "SeaBIOS" }, + { "x86_64", "q35", "-M graphics=off", "SeaBIOS" }, { "sparc", "LX", "", "TMS390S10" }, { "sparc", "SS-4", "", "MB86904" }, { "sparc", "SS-600MP", "", "TMS390Z55" }, @@ -173,7 +173,7 @@ static testdef_t tests[] = { sizeof(kernel_pls3adsp1800), kernel_pls3adsp1800 }, { "microblazeel", "petalogix-ml605", "", "TT", sizeof(kernel_plml605), kernel_plml605 }, - { "arm", "raspi2", "", "TT", sizeof(bios_raspi2), 0, bios_raspi2 }, + { "arm", "raspi2b", "", "TT", sizeof(bios_raspi2), 0, bios_raspi2 }, /* For hppa, force bios to output to serial by disabling graphics. */ { "hppa", "hppa", "-vga none", "SeaBIOS wants SYSTEM HALT" }, { "aarch64", "virt", "-cpu max", "TT", sizeof(kernel_aarch64), @@ -224,15 +224,16 @@ static bool check_guest_output(QTestState *qts, const testdef_t *test, int fd) static void test_machine(const void *data) { const testdef_t *test = data; - char serialtmp[] = "/tmp/qtest-boot-serial-sXXXXXX"; - char codetmp[] = "/tmp/qtest-boot-serial-cXXXXXX"; + g_autofree char *serialtmp = NULL; + g_autofree char *codetmp = NULL; const char *codeparam = ""; const uint8_t *code = NULL; QTestState *qts; int ser_fd; - ser_fd = mkstemp(serialtmp); + ser_fd = g_file_open_tmp("qtest-boot-serial-sXXXXXX", &serialtmp, NULL); g_assert(ser_fd != -1); + close(ser_fd); if (test->kernel) { code = test->kernel; @@ -246,7 +247,7 @@ static void test_machine(const void *data) ssize_t wlen; int code_fd; - code_fd = mkstemp(codetmp); + code_fd = g_file_open_tmp("qtest-boot-serial-cXXXXXX", &codetmp, NULL); g_assert(code_fd != -1); wlen = write(code_fd, code, test->codesize); g_assert(wlen == test->codesize); @@ -266,6 +267,8 @@ static void test_machine(const void *data) unlink(codetmp); } + ser_fd = open(serialtmp, O_RDONLY); + g_assert(ser_fd != -1); if (!check_guest_output(qts, test, ser_fd)) { g_error("Failed to find expected string. Please check '%s'", serialtmp); @@ -285,7 +288,8 @@ int main(int argc, char *argv[]) g_test_init(&argc, &argv, NULL); for (i = 0; tests[i].arch != NULL; i++) { - if (strcmp(arch, tests[i].arch) == 0) { + if (g_str_equal(arch, tests[i].arch) && + qtest_has_machine(tests[i].machine)) { char *name = g_strdup_printf("boot-serial/%s", tests[i].machine); qtest_add_data_func(name, &tests[i], test_machine); g_free(name); diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c index 5af944a5fb..26a2400181 100644 --- a/tests/qtest/cdrom-test.c +++ b/tests/qtest/cdrom-test.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "boot-sector.h" #include "qapi/qmp/qdict.h" @@ -52,7 +52,7 @@ static int prepare_image(const char *arch, char *isoimage) perror("Error creating temporary iso image file"); return -1; } - if (!mkdtemp(srcdir)) { + if (!g_mkdtemp(srcdir)) { perror("Error creating temporary directory"); goto cleanup; } @@ -109,9 +109,11 @@ static void test_cdrom_param(gconstpointer data) static void add_cdrom_param_tests(const char **machines) { while (*machines) { - char *testname = g_strdup_printf("cdrom/param/%s", *machines); - qtest_add_data_func(testname, *machines, test_cdrom_param); - g_free(testname); + if (qtest_has_machine(*machines)) { + char *testname = g_strdup_printf("cdrom/param/%s", *machines); + qtest_add_data_func(testname, *machines, test_cdrom_param); + g_free(testname); + } machines++; } } @@ -136,25 +138,40 @@ static void add_x86_tests(void) * Unstable CI test under load * See https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05509.html */ - if (g_test_slow()) { + if (g_test_slow() && qtest_has_machine("isapc")) { qtest_add_data_func("cdrom/boot/isapc", "-M isapc " "-drive if=ide,media=cdrom,file=", test_cdboot); } - qtest_add_data_func("cdrom/boot/am53c974", - "-device am53c974 -device scsi-cd,drive=cd1 " - "-drive if=none,id=cd1,format=raw,file=", test_cdboot); - qtest_add_data_func("cdrom/boot/dc390", - "-device dc390 -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", test_cdboot); - qtest_add_data_func("cdrom/boot/lsi53c895a", - "-device lsi53c895a -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", test_cdboot); - qtest_add_data_func("cdrom/boot/megasas", "-M q35 " - "-device megasas -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", test_cdboot); - qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 " - "-device megasas-gen2 -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", test_cdboot); + if (qtest_has_device("am53c974")) { + qtest_add_data_func("cdrom/boot/am53c974", + "-device am53c974 -device scsi-cd,drive=cd1 " + "-drive if=none,id=cd1,format=raw,file=", + test_cdboot); + } + if (qtest_has_device("dc390")) { + qtest_add_data_func("cdrom/boot/dc390", + "-device dc390 -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } + if (qtest_has_device("lsi53c895a")) { + qtest_add_data_func("cdrom/boot/lsi53c895a", + "-device lsi53c895a -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } + if (qtest_has_device("megasas")) { + qtest_add_data_func("cdrom/boot/megasas", "-M q35 " + "-device megasas -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } + if (qtest_has_device("megasas-gen2")) { + qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 " + "-device megasas-gen2 -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } } static void add_s390x_tests(void) @@ -169,12 +186,15 @@ static void add_s390x_tests(void) "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " "-device virtio-blk,drive=d2,bootindex=1 " "-drive if=none,id=d2,media=cdrom,file=", test_cdboot); - qtest_add_data_func("cdrom/boot/without-bootindex", - "-device virtio-scsi -device virtio-serial " - "-device x-terminal3270 -device virtio-blk,drive=d1 " - "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " - "-device virtio-blk,drive=d2 " - "-drive if=none,id=d2,media=cdrom,file=", test_cdboot); + if (qtest_has_device("x-terminal3270")) { + qtest_add_data_func("cdrom/boot/without-bootindex", + "-device virtio-scsi -device virtio-serial " + "-device x-terminal3270 -device virtio-blk,drive=d1 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " + "-device virtio-blk,drive=d2 " + "-drive if=none,id=d2,media=cdrom,file=", + test_cdboot); + } } int main(int argc, char **argv) diff --git a/tests/qtest/cpu-plug-test.c b/tests/qtest/cpu-plug-test.c index a1c689414b..7f5dd5f85a 100644 --- a/tests/qtest/cpu-plug-test.c +++ b/tests/qtest/cpu-plug-test.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "libqtest-single.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" diff --git a/tests/qtest/cxl-test.c b/tests/qtest/cxl-test.c new file mode 100644 index 0000000000..61f25a72b6 --- /dev/null +++ b/tests/qtest/cxl-test.c @@ -0,0 +1,155 @@ +/* + * QTest testcase for CXL + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +#define QEMU_PXB_CMD "-machine q35,cxl=on " \ + "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ + "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=4G " + +#define QEMU_2PXB_CMD "-machine q35,cxl=on " \ + "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ + "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ + "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " + +#define QEMU_RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " + +/* Dual ports on first pxb */ +#define QEMU_2RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ + "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " + +/* Dual ports on each of the pxb instances */ +#define QEMU_4RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ + "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " \ + "-device cxl-rp,id=rp2,bus=cxl.1,chassis=0,slot=2 " \ + "-device cxl-rp,id=rp3,bus=cxl.1,chassis=0,slot=3 " + +#define QEMU_T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " + +#define QEMU_2T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ + "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 " + +#define QEMU_4T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ + "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 " \ + "-object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa2,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2,id=cxl-pmem2 " \ + "-object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa3,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3,id=cxl-pmem3 " + +static void cxl_basic_hb(void) +{ + qtest_start("-machine q35,cxl=on"); + qtest_end(); +} + +static void cxl_basic_pxb(void) +{ + qtest_start("-machine q35,cxl=on -device pxb-cxl,bus=pcie.0"); + qtest_end(); +} + +static void cxl_pxb_with_window(void) +{ + qtest_start(QEMU_PXB_CMD); + qtest_end(); +} + +static void cxl_2pxb_with_window(void) +{ + qtest_start(QEMU_2PXB_CMD); + qtest_end(); +} + +static void cxl_root_port(void) +{ + qtest_start(QEMU_PXB_CMD QEMU_RP); + qtest_end(); +} + +static void cxl_2root_port(void) +{ + qtest_start(QEMU_PXB_CMD QEMU_2RP); + qtest_end(); +} + +#ifdef CONFIG_POSIX +static void cxl_t3d(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D, tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); + rmdir(tmpfs); +} + +static void cxl_1pxb_2rp_2t3d(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_2RP QEMU_2T3D, + tmpfs, tmpfs, tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); + rmdir(tmpfs); +} + +static void cxl_2pxb_4rp_4t3d(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_2PXB_CMD QEMU_4RP QEMU_4T3D, + tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, + tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); + rmdir(tmpfs); +} +#endif /* CONFIG_POSIX */ + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb); + qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb); + qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window); + qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window); + qtest_add_func("/pci/cxl/rp", cxl_root_port); + qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port); +#ifdef CONFIG_POSIX + qtest_add_func("/pci/cxl/type3_device", cxl_t3d); + qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d); + qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d); +#endif + return g_test_run(); +} diff --git a/tests/qtest/dbus-display-test.c b/tests/qtest/dbus-display-test.c new file mode 100644 index 0000000000..cb1b62d1d1 --- /dev/null +++ b/tests/qtest/dbus-display-test.c @@ -0,0 +1,256 @@ +#include "qemu/osdep.h" +#include "qemu/dbus.h" +#include +#include +#include "libqtest.h" +#include "ui/dbus-display1.h" + +static GDBusConnection* +test_dbus_p2p_from_fd(int fd) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GSocket) socket = NULL; + g_autoptr(GSocketConnection) socketc = NULL; + GDBusConnection *conn; + + socket = g_socket_new_from_fd(fd, &err); + g_assert_no_error(err); + + socketc = g_socket_connection_factory_create_connection(socket); + g_assert(socketc != NULL); + + conn = g_dbus_connection_new_sync( + G_IO_STREAM(socketc), NULL, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_CLIENT | + G_DBUS_CONNECTION_FLAGS_DELAY_MESSAGE_PROCESSING, + NULL, NULL, &err); + g_assert_no_error(err); + + return conn; +} + +static void +test_setup(QTestState **qts, GDBusConnection **conn) +{ + int pair[2]; + + *qts = qtest_init("-display dbus,p2p=yes -name dbus-test"); + + g_assert_cmpint(socketpair(AF_UNIX, SOCK_STREAM, 0, pair), ==, 0); + + qtest_qmp_add_client(*qts, "@dbus-display", pair[1]); + + *conn = test_dbus_p2p_from_fd(pair[0]); + g_dbus_connection_start_message_processing(*conn); +} + +static void +test_dbus_display_vm(void) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GDBusConnection) conn = NULL; + g_autoptr(QemuDBusDisplay1VMProxy) vm = NULL; + QTestState *qts = NULL; + + test_setup(&qts, &conn); + + vm = QEMU_DBUS_DISPLAY1_VM_PROXY( + qemu_dbus_display1_vm_proxy_new_sync( + conn, + G_DBUS_PROXY_FLAGS_NONE, + NULL, + DBUS_DISPLAY1_ROOT "/VM", + NULL, + &err)); + g_assert_no_error(err); + + g_assert_cmpstr( + qemu_dbus_display1_vm_get_name(QEMU_DBUS_DISPLAY1_VM(vm)), + ==, + "dbus-test"); + qtest_quit(qts); +} + +typedef struct TestDBusConsoleRegister { + GMainLoop *loop; + GThread *thread; + GDBusConnection *listener_conn; + GDBusObjectManagerServer *server; +} TestDBusConsoleRegister; + +static gboolean listener_handle_scanout( + QemuDBusDisplay1Listener *object, + GDBusMethodInvocation *invocation, + guint arg_width, + guint arg_height, + guint arg_stride, + guint arg_pixman_format, + GVariant *arg_data, + TestDBusConsoleRegister *test) +{ + g_main_loop_quit(test->loop); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static void +test_dbus_console_setup_listener(TestDBusConsoleRegister *test) +{ + g_autoptr(GDBusObjectSkeleton) listener = NULL; + g_autoptr(QemuDBusDisplay1ListenerSkeleton) iface = NULL; + + test->server = g_dbus_object_manager_server_new(DBUS_DISPLAY1_ROOT); + listener = g_dbus_object_skeleton_new(DBUS_DISPLAY1_ROOT "/Listener"); + iface = QEMU_DBUS_DISPLAY1_LISTENER_SKELETON( + qemu_dbus_display1_listener_skeleton_new()); + g_object_connect(iface, + "signal::handle-scanout", listener_handle_scanout, test, + NULL); + g_dbus_object_skeleton_add_interface(listener, + G_DBUS_INTERFACE_SKELETON(iface)); + g_dbus_object_manager_server_export(test->server, listener); + g_dbus_object_manager_server_set_connection(test->server, + test->listener_conn); + + g_dbus_connection_start_message_processing(test->listener_conn); +} + +static void +test_dbus_console_registered(GObject *source_object, + GAsyncResult *res, + gpointer user_data) +{ + TestDBusConsoleRegister *test = user_data; + g_autoptr(GError) err = NULL; + + qemu_dbus_display1_console_call_register_listener_finish( + QEMU_DBUS_DISPLAY1_CONSOLE(source_object), + NULL, res, &err); + g_assert_no_error(err); + + test->listener_conn = g_thread_join(test->thread); + test_dbus_console_setup_listener(test); +} + +static gpointer +test_dbus_p2p_server_setup_thread(gpointer data) +{ + return test_dbus_p2p_from_fd(GPOINTER_TO_INT(data)); +} + +static void +test_dbus_display_console(void) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GDBusConnection) conn = NULL; + g_autoptr(QemuDBusDisplay1ConsoleProxy) console = NULL; + g_autoptr(GUnixFDList) fd_list = NULL; + g_autoptr(GMainLoop) loop = NULL; + QTestState *qts = NULL; + int pair[2], idx; + TestDBusConsoleRegister test; + + test_setup(&qts, &conn); + + g_assert_cmpint(socketpair(AF_UNIX, SOCK_STREAM, 0, pair), ==, 0); + fd_list = g_unix_fd_list_new(); + idx = g_unix_fd_list_append(fd_list, pair[1], NULL); + + console = QEMU_DBUS_DISPLAY1_CONSOLE_PROXY( + qemu_dbus_display1_console_proxy_new_sync( + conn, + G_DBUS_PROXY_FLAGS_NONE, + NULL, + "/org/qemu/Display1/Console_0", + NULL, + &err)); + g_assert_no_error(err); + + test.loop = loop = g_main_loop_new(NULL, FALSE); + test.thread = g_thread_new(NULL, test_dbus_p2p_server_setup_thread, + GINT_TO_POINTER(pair[0])); + + qemu_dbus_display1_console_call_register_listener( + QEMU_DBUS_DISPLAY1_CONSOLE(console), + g_variant_new_handle(idx), + G_DBUS_CALL_FLAGS_NONE, + -1, + fd_list, + NULL, + test_dbus_console_registered, + &test); + + g_main_loop_run(loop); + + g_clear_object(&test.server); + g_clear_object(&test.listener_conn); + qtest_quit(qts); +} + +static void +test_dbus_display_keyboard(void) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GDBusConnection) conn = NULL; + g_autoptr(QemuDBusDisplay1KeyboardProxy) keyboard = NULL; + QTestState *qts = NULL; + + test_setup(&qts, &conn); + + keyboard = QEMU_DBUS_DISPLAY1_KEYBOARD_PROXY( + qemu_dbus_display1_keyboard_proxy_new_sync( + conn, + G_DBUS_PROXY_FLAGS_NONE, + NULL, + "/org/qemu/Display1/Console_0", + NULL, + &err)); + g_assert_no_error(err); + + + g_assert_cmpint(qtest_inb(qts, 0x64) & 0x1, ==, 0); + g_assert_cmpint(qtest_inb(qts, 0x60), ==, 0); + + qemu_dbus_display1_keyboard_call_press_sync( + QEMU_DBUS_DISPLAY1_KEYBOARD(keyboard), + 0x1C, /* qnum enter */ + G_DBUS_CALL_FLAGS_NONE, + -1, + NULL, + &err); + g_assert_no_error(err); + + /* may be should wait for interrupt? */ + g_assert_cmpint(qtest_inb(qts, 0x64) & 0x1, ==, 1); + g_assert_cmpint(qtest_inb(qts, 0x60), ==, 0x5A); /* scan code 2 enter */ + + qemu_dbus_display1_keyboard_call_release_sync( + QEMU_DBUS_DISPLAY1_KEYBOARD(keyboard), + 0x1C, /* qnum enter */ + G_DBUS_CALL_FLAGS_NONE, + -1, + NULL, + &err); + g_assert_no_error(err); + + g_assert_cmpint(qtest_inb(qts, 0x64) & 0x1, ==, 1); + g_assert_cmpint(qtest_inb(qts, 0x60), ==, 0xF0); /* scan code 2 release */ + g_assert_cmpint(qtest_inb(qts, 0x60), ==, 0x5A); /* scan code 2 enter */ + + g_assert_cmpint(qemu_dbus_display1_keyboard_get_modifiers( + QEMU_DBUS_DISPLAY1_KEYBOARD(keyboard)), ==, 0); + + qtest_quit(qts); +} + +int +main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/dbus-display/vm", test_dbus_display_vm); + qtest_add_func("/dbus-display/console", test_dbus_display_console); + qtest_add_func("/dbus-display/keyboard", test_dbus_display_keyboard); + + return g_test_run(); +} diff --git a/tests/qtest/dbus-vmstate-test.c b/tests/qtest/dbus-vmstate-test.c index aca9b98b7a..6c990864e3 100644 --- a/tests/qtest/dbus-vmstate-test.c +++ b/tests/qtest/dbus-vmstate-test.c @@ -1,8 +1,7 @@ #include "qemu/osdep.h" #include #include -#include "libqos/libqtest.h" -#include "qemu-common.h" +#include "libqtest.h" #include "dbus-vmstate1.h" #include "migration-helpers.h" @@ -234,7 +233,7 @@ test_dbus_vmstate(Test *test) test->src_qemu = src_qemu; if (test->migrate_fail) { wait_for_migration_fail(src_qemu, true); - qtest_set_expected_status(dst_qemu, 1); + qtest_set_expected_status(dst_qemu, EXIT_FAILURE); } else { wait_for_migration_complete(src_qemu); } diff --git a/tests/qtest/dbus-vmstate1.xml b/tests/qtest/dbus-vmstate1.xml deleted file mode 100644 index cc8563be4c..0000000000 --- a/tests/qtest/dbus-vmstate1.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/tests/qtest/device-introspect-test.c b/tests/qtest/device-introspect-test.c index bbec166dbc..5b0ffe43f5 100644 --- a/tests/qtest/device-introspect-test.c +++ b/tests/qtest/device-introspect-test.c @@ -18,11 +18,10 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/qmp/qstring.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" -#include "libqos/libqtest.h" +#include "libqtest.h" const char common_args[] = "-nodefaults -machine none"; diff --git a/tests/qtest/device-plug-test.c b/tests/qtest/device-plug-test.c index 559d47727a..5a6afa2b57 100644 --- a/tests/qtest/device-plug-test.c +++ b/tests/qtest/device-plug-test.c @@ -11,21 +11,10 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qstring.h" -static void device_del(QTestState *qtest, const char *id) -{ - QDict *resp; - - resp = qtest_qmp(qtest, - "{'execute': 'device_del', 'arguments': { 'id': %s } }", id); - - g_assert(qdict_haskey(resp, "return")); - qobject_unref(resp); -} - static void system_reset(QTestState *qtest) { QDict *resp; @@ -61,18 +50,83 @@ static void wait_device_deleted_event(QTestState *qtest, const char *id) } } -static void test_pci_unplug_request(void) +static void process_device_remove(QTestState *qtest, const char *id) { - QTestState *qtest = qtest_initf("-device virtio-mouse-pci,id=dev0"); - /* * Request device removal. As the guest is not running, the request won't * be processed. However during system reset, the removal will be * handled, removing the device. */ - device_del(qtest, "dev0"); + qtest_qmp_device_del_send(qtest, id); system_reset(qtest); - wait_device_deleted_event(qtest, "dev0"); + wait_device_deleted_event(qtest, id); +} + +static void test_pci_unplug_request(void) +{ + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } + + QTestState *qtest = qtest_initf("%s -device virtio-mouse-pci,id=dev0", + machine_addition); + + process_device_remove(qtest, "dev0"); + + qtest_quit(qtest); +} + +static void test_q35_pci_unplug_request(void) +{ + + QTestState *qtest = qtest_initf("-machine q35 " + "-device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1 " + "-device virtio-mouse-pci,bus=b1,id=dev0"); + + process_device_remove(qtest, "dev0"); + + qtest_quit(qtest); +} + +static void test_pci_unplug_json_request(void) +{ + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } + + QTestState *qtest = qtest_initf( + "%s -device \"{'driver': 'virtio-mouse-pci', 'id': 'dev0'}\"", + machine_addition); + + process_device_remove(qtest, "dev0"); + + qtest_quit(qtest); +} + +static void test_q35_pci_unplug_json_request(void) +{ + const char *port = "-device \"{'driver': 'pcie-root-port', " + "'id': 'p1'}\""; + + const char *bridge = "-device \"{'driver': 'pcie-pci-bridge', " + "'id': 'b1', " + "'bus': 'p1'}\""; + + const char *device = "-device \"{'driver': 'virtio-mouse-pci', " + "'bus': 'b1', " + "'id': 'dev0'}\""; + + QTestState *qtest = qtest_initf("-machine q35 %s %s %s", + port, bridge, device); + + process_device_remove(qtest, "dev0"); qtest_quit(qtest); } @@ -81,7 +135,7 @@ static void test_ccw_unplug(void) { QTestState *qtest = qtest_initf("-device virtio-balloon-ccw,id=dev0"); - device_del(qtest, "dev0"); + qtest_qmp_device_del_send(qtest, "dev0"); wait_device_deleted_event(qtest, "dev0"); qtest_quit(qtest); @@ -95,9 +149,7 @@ static void test_spapr_cpu_unplug_request(void) "-device power9_v2.0-spapr-cpu-core,core-id=1,id=dev0"); /* similar to test_pci_unplug_request */ - device_del(qtest, "dev0"); - system_reset(qtest); - wait_device_deleted_event(qtest, "dev0"); + process_device_remove(qtest, "dev0"); qtest_quit(qtest); } @@ -111,9 +163,7 @@ static void test_spapr_memory_unplug_request(void) "-device pc-dimm,id=dev0,memdev=mem0"); /* similar to test_pci_unplug_request */ - device_del(qtest, "dev0"); - system_reset(qtest); - wait_device_deleted_event(qtest, "dev0"); + process_device_remove(qtest, "dev0"); qtest_quit(qtest); } @@ -125,9 +175,7 @@ static void test_spapr_phb_unplug_request(void) qtest = qtest_initf("-device spapr-pci-host-bridge,index=1,id=dev0"); /* similar to test_pci_unplug_request */ - device_del(qtest, "dev0"); - system_reset(qtest); - wait_device_deleted_event(qtest, "dev0"); + process_device_remove(qtest, "dev0"); qtest_quit(qtest); } @@ -145,6 +193,8 @@ int main(int argc, char **argv) */ qtest_add_func("/device-plug/pci-unplug-request", test_pci_unplug_request); + qtest_add_func("/device-plug/pci-unplug-json-request", + test_pci_unplug_json_request); if (!strcmp(arch, "s390x")) { qtest_add_func("/device-plug/ccw-unplug", @@ -160,5 +210,12 @@ int main(int argc, char **argv) test_spapr_phb_unplug_request); } + if (!strcmp(arch, "x86_64") && qtest_has_machine("q35")) { + qtest_add_func("/device-plug/q35-pci-unplug-request", + test_q35_pci_unplug_request); + qtest_add_func("/device-plug/q35-pci-unplug-json-request", + test_q35_pci_unplug_json_request); + } + return g_test_run(); } diff --git a/tests/qtest/drive_del-test.c b/tests/qtest/drive_del-test.c index 8d08ee9995..9a750395a9 100644 --- a/tests/qtest/drive_del-test.c +++ b/tests/qtest/drive_del-test.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/virtio.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" @@ -123,12 +123,10 @@ static const char *qvirtio_get_dev_type(void) static void device_add(QTestState *qts) { - QDict *response; - char driver[32]; - snprintf(driver, sizeof(driver), "virtio-blk-%s", - qvirtio_get_dev_type()); - - response = qtest_qmp(qts, "{'execute': 'device_add'," + g_autofree char *driver = g_strdup_printf("virtio-blk-%s", + qvirtio_get_dev_type()); + QDict *response = + qtest_qmp(qts, "{'execute': 'device_add'," " 'arguments': {" " 'driver': %s," " 'drive': 'drive0'," @@ -143,11 +141,7 @@ static void device_del(QTestState *qts, bool and_reset) { QDict *response; - response = qtest_qmp(qts, "{'execute': 'device_del'," - " 'arguments': { 'id': 'dev0' } }"); - g_assert(response); - g_assert(qdict_haskey(response, "return")); - qobject_unref(response); + qtest_qmp_device_del_send(qts, "dev0"); if (and_reset) { response = qtest_qmp(qts, "{'execute': 'system_reset' }"); @@ -233,6 +227,32 @@ static void test_drive_del_device_del(void) } static void test_cli_device_del(void) +{ + QTestState *qts; + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } + + /* + * -drive/-device and device_del. Start with a drive used by a + * device that unplugs after reset. + */ + qts = qtest_initf("%s -drive if=none,id=drive0,file=null-co://," + "file.read-zeroes=on,format=raw" + " -device virtio-blk-%s,drive=drive0,id=dev0", + machine_addition, + qvirtio_get_dev_type()); + + device_del(qts, true); + g_assert(!has_drive(qts)); + + qtest_quit(qts); +} + +static void test_cli_device_del_q35(void) { QTestState *qts; @@ -241,8 +261,10 @@ static void test_cli_device_del(void) * device that unplugs after reset. */ qts = qtest_initf("-drive if=none,id=drive0,file=null-co://," - "file.read-zeroes=on,format=raw" - " -device virtio-blk-%s,drive=drive0,id=dev0", + "file.read-zeroes=on,format=raw " + "-machine q35 -device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1 " + "-device virtio-blk-%s,drive=drive0,bus=b1,id=dev0", qvirtio_get_dev_type()); device_del(qts, true); @@ -266,13 +288,19 @@ static void test_empty_device_del(void) static void test_device_add_and_del(void) { QTestState *qts; + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } /* * -drive/device_add and device_del. Start with a drive used by a * device that unplugs after reset. */ - qts = qtest_init("-drive if=none,id=drive0,file=null-co://," - "file.read-zeroes=on,format=raw"); + qts = qtest_initf("%s -drive if=none,id=drive0,file=null-co://," + "file.read-zeroes=on,format=raw", machine_addition); device_add(qts); device_del(qts, true); @@ -281,11 +309,54 @@ static void test_device_add_and_del(void) qtest_quit(qts); } +static void device_add_q35(QTestState *qts) +{ + g_autofree char *driver = g_strdup_printf("virtio-blk-%s", + qvirtio_get_dev_type()); + QDict *response = + qtest_qmp(qts, "{'execute': 'device_add'," + " 'arguments': {" + " 'driver': %s," + " 'drive': 'drive0'," + " 'id': 'dev0'," + " 'bus': 'b1'" + "}}", driver); + g_assert(response); + g_assert(qdict_haskey(response, "return")); + qobject_unref(response); +} + +static void test_device_add_and_del_q35(void) +{ + QTestState *qts; + + /* + * -drive/device_add and device_del. Start with a drive used by a + * device that unplugs after reset. + */ + qts = qtest_initf("-machine q35 -device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1 " + "-drive if=none,id=drive0,file=null-co://," + "file.read-zeroes=on,format=raw"); + + device_add_q35(qts); + device_del(qts, true); + g_assert(!has_drive(qts)); + + qtest_quit(qts); +} + static void test_drive_add_device_add_and_del(void) { QTestState *qts; + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; - qts = qtest_init(""); + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } + + qts = qtest_init(machine_addition); /* * drive_add/device_add and device_del. The drive is used by a @@ -299,14 +370,39 @@ static void test_drive_add_device_add_and_del(void) qtest_quit(qts); } -static void test_blockdev_add_device_add_and_del(void) +static void test_drive_add_device_add_and_del_q35(void) { QTestState *qts; - qts = qtest_init(""); + qts = qtest_init("-machine q35 -device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1"); /* - * blockdev_add/device_add and device_del. The it drive is used by a + * drive_add/device_add and device_del. The drive is used by a + * device that unplugs after reset. + */ + drive_add_with_media(qts); + device_add_q35(qts); + device_del(qts, true); + g_assert(!has_drive(qts)); + + qtest_quit(qts); +} + +static void test_blockdev_add_device_add_and_del(void) +{ + QTestState *qts; + const char *arch = qtest_get_arch(); + const char *machine_addition = ""; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + machine_addition = "-machine pc"; + } + + qts = qtest_init(machine_addition); + + /* + * blockdev_add/device_add and device_del. The drive is used by a * device that unplugs after reset, but it doesn't go away. */ blockdev_add_with_media(qts); @@ -317,6 +413,25 @@ static void test_blockdev_add_device_add_and_del(void) qtest_quit(qts); } +static void test_blockdev_add_device_add_and_del_q35(void) +{ + QTestState *qts; + + qts = qtest_init("-machine q35 -device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1"); + + /* + * blockdev_add/device_add and device_del. The drive is used by a + * device that unplugs after reset, but it doesn't go away. + */ + blockdev_add_with_media(qts); + device_add_q35(qts); + device_del(qts, true); + g_assert(has_blockdev(qts)); + + qtest_quit(qts); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -338,6 +453,17 @@ int main(int argc, char **argv) test_empty_device_del); qtest_add_func("/device_del/blockdev", test_blockdev_add_device_add_and_del); + + if (qtest_has_machine("q35")) { + qtest_add_func("/device_del/drive/cli_device_q35", + test_cli_device_del_q35); + qtest_add_func("/device_del/drive/device_add_q35", + test_device_add_and_del_q35); + qtest_add_func("/device_del/drive/drive_add_device_add_q35", + test_drive_add_device_add_and_del_q35); + qtest_add_func("/device_del/blockdev_q35", + test_blockdev_add_device_add_and_del_q35); + } } return g_test_run(); diff --git a/tests/qtest/ds1338-test.c b/tests/qtest/ds1338-test.c index c5d46bcc64..f6ade9a050 100644 --- a/tests/qtest/ds1338-test.c +++ b/tests/qtest/ds1338-test.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/i2c.h" #define DS1338_ADDR 0x68 diff --git a/tests/qtest/e1000-test.c b/tests/qtest/e1000-test.c index ea286d1793..4e0d7a5607 100644 --- a/tests/qtest/e1000-test.c +++ b/tests/qtest/e1000-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" @@ -35,7 +35,7 @@ static void *e1000_get_driver(void *obj, const char *interface) return &e1000->dev; } - fprintf(stderr, "%s not present in e1000e\n", interface); + fprintf(stderr, "%s not present in e1000\n", interface); g_assert_not_reached(); } diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c index 0273fe4c15..08adc5226d 100644 --- a/tests/qtest/e1000e-test.c +++ b/tests/qtest/e1000e-test.c @@ -25,44 +25,19 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "libqtest-single.h" -#include "qemu-common.h" #include "libqos/pci-pc.h" #include "qemu/sockets.h" #include "qemu/iov.h" #include "qemu/module.h" #include "qemu/bitops.h" -#include "libqos/malloc.h" +#include "libqos/libqos-malloc.h" #include "libqos/e1000e.h" +#include "hw/net/e1000_regs.h" static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { - struct { - uint64_t buffer_addr; - union { - uint32_t data; - struct { - uint16_t length; - uint8_t cso; - uint8_t cmd; - } flags; - } lower; - union { - uint32_t data; - struct { - uint8_t status; - uint8_t css; - uint16_t special; - } fields; - } upper; - } descr; - - static const uint32_t dtyp_data = BIT(20); - static const uint32_t dtyp_ext = BIT(29); - static const uint32_t dcmd_rs = BIT(27); - static const uint32_t dcmd_eop = BIT(24); - static const uint32_t dsta_dd = BIT(0); + struct e1000_tx_desc descr; static const int data_len = 64; char buffer[64]; int ret; @@ -75,10 +50,10 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a /* Prepare TX descriptor */ memset(&descr, 0, sizeof(descr)); descr.buffer_addr = cpu_to_le64(data); - descr.lower.data = cpu_to_le32(dcmd_rs | - dcmd_eop | - dtyp_ext | - dtyp_data | + descr.lower.data = cpu_to_le32(E1000_TXD_CMD_RS | + E1000_TXD_CMD_EOP | + E1000_TXD_CMD_DEXT | + E1000_TXD_DTYP_D | data_len); /* Put descriptor to the ring */ @@ -88,12 +63,13 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a e1000e_wait_isr(d, E1000E_TX0_MSG_ID); /* Check DD bit */ - g_assert_cmphex(le32_to_cpu(descr.upper.data) & dsta_dd, ==, dsta_dd); + g_assert_cmphex(le32_to_cpu(descr.upper.data) & E1000_TXD_STAT_DD, ==, + E1000_TXD_STAT_DD); /* Check data sent to the backend */ - ret = qemu_recv(test_sockets[0], &recv_len, sizeof(recv_len), 0); + ret = recv(test_sockets[0], &recv_len, sizeof(recv_len), 0); g_assert_cmpint(ret, == , sizeof(recv_len)); - ret = qemu_recv(test_sockets[0], buffer, 64, 0); + ret = recv(test_sockets[0], buffer, 64, 0); g_assert_cmpint(ret, >=, 5); g_assert_cmpstr(buffer, == , "TEST"); @@ -103,31 +79,7 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { - union { - struct { - uint64_t buffer_addr; - uint64_t reserved; - } read; - struct { - struct { - uint32_t mrq; - union { - uint32_t rss; - struct { - uint16_t ip_id; - uint16_t csum; - } csum_ip; - } hi_dword; - } lower; - struct { - uint32_t status_error; - uint16_t length; - uint16_t vlan; - } upper; - } wb; - } descr; - - static const uint32_t esta_dd = BIT(0); + union e1000_rx_desc_extended descr; char test[] = "TEST"; int len = htonl(sizeof(test)); @@ -164,7 +116,7 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator /* Check DD bit */ g_assert_cmphex(le32_to_cpu(descr.wb.upper.status_error) & - esta_dd, ==, esta_dd); + E1000_RXD_STAT_DD, ==, E1000_RXD_STAT_DD); /* Check data sent to the backend */ memread(data, buffer, sizeof(buffer)); @@ -235,6 +187,12 @@ static void test_e1000e_multiple_transfers(void *obj, void *data, static void test_e1000e_hotplug(void *obj, void *data, QGuestAllocator * alloc) { QTestState *qts = global_qtest; /* TODO: get rid of global_qtest here */ + QE1000E_PCI *dev = obj; + + if (dev->pci_dev.bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } qtest_qmp_device_add(qts, "e1000e", "e1000e_net", "{'addr': '0x06'}"); qpci_unplug_acpi_device_test(qts, "e1000e_net", 0x06); diff --git a/tests/qtest/eepro100-test.c b/tests/qtest/eepro100-test.c index d72ad099f1..8dbffff0b8 100644 --- a/tests/qtest/eepro100-test.c +++ b/tests/qtest/eepro100-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/endianness-test.c b/tests/qtest/endianness-test.c index 09ecb531f1..222d116fae 100644 --- a/tests/qtest/endianness-test.c +++ b/tests/qtest/endianness-test.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/bswap.h" typedef struct TestCase TestCase; @@ -28,6 +28,7 @@ struct TestCase { static const TestCase test_cases[] = { { "i386", "pc", -1 }, { "mips", "malta", 0x10000000, .bswap = true }, + { "mipsel", "malta", 0x10000000 }, { "mips64", "magnum", 0x90000000, .bswap = true }, { "mips64", "pica61", 0x90000000, .bswap = true }, { "mips64", "malta", 0x10000000, .bswap = true }, @@ -281,7 +282,10 @@ int main(int argc, char **argv) for (i = 0; test_cases[i].arch; i++) { gchar *path; - if (strcmp(test_cases[i].arch, arch) != 0) { + + if (!g_str_equal(test_cases[i].arch, arch) || + !qtest_has_machine(test_cases[i].machine) || + (test_cases[i].superio && !qtest_has_device(test_cases[i].superio))) { continue; } path = g_strdup_printf("endianness/%s", diff --git a/tests/qtest/erst-test.c b/tests/qtest/erst-test.c new file mode 100644 index 0000000000..4e768a126f --- /dev/null +++ b/tests/qtest/erst-test.c @@ -0,0 +1,163 @@ +/* + * QTest testcase for acpi-erst + * + * Copyright (c) 2021 Oracle + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include "libqos/libqos-pc.h" +#include "libqtest.h" + +#include "hw/pci/pci.h" + +static void save_fn(QPCIDevice *dev, int devfn, void *data) +{ + QPCIDevice **pdev = (QPCIDevice **) data; + + *pdev = dev; +} + +static QPCIDevice *get_erst_device(QPCIBus *pcibus) +{ + QPCIDevice *dev; + + dev = NULL; + qpci_device_foreach(pcibus, + PCI_VENDOR_ID_REDHAT, + PCI_DEVICE_ID_REDHAT_ACPI_ERST, + save_fn, &dev); + g_assert(dev != NULL); + + return dev; +} + +typedef struct _ERSTState { + QOSState *qs; + QPCIBar reg_bar, mem_bar; + uint64_t reg_barsize, mem_barsize; + QPCIDevice *dev; +} ERSTState; + +#define ACTION 0 +#define VALUE 8 + +static const char *reg2str(unsigned reg) +{ + switch (reg) { + case 0: + return "ACTION"; + case 8: + return "VALUE"; + default: + return NULL; + } +} + +static inline uint32_t in_reg32(ERSTState *s, unsigned reg) +{ + const char *name = reg2str(reg); + uint32_t res; + + res = qpci_io_readl(s->dev, s->reg_bar, reg); + g_test_message("*%s -> %08x", name, res); + + return res; +} + +static inline uint64_t in_reg64(ERSTState *s, unsigned reg) +{ + const char *name = reg2str(reg); + uint64_t res; + + res = qpci_io_readq(s->dev, s->reg_bar, reg); + g_test_message("*%s -> %016" PRIx64, name, res); + + return res; +} + +static inline void out_reg32(ERSTState *s, unsigned reg, uint32_t v) +{ + const char *name = reg2str(reg); + + g_test_message("%08x -> *%s", v, name); + qpci_io_writel(s->dev, s->reg_bar, reg, v); +} + +static void cleanup_vm(ERSTState *s) +{ + g_free(s->dev); + qtest_shutdown(s->qs); +} + +static void setup_vm_cmd(ERSTState *s, const char *cmd) +{ + const char *arch = qtest_get_arch(); + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + s->qs = qtest_pc_boot(cmd); + } else { + g_printerr("erst-test tests are only available on x86\n"); + exit(EXIT_FAILURE); + } + s->dev = get_erst_device(s->qs->pcibus); + + s->reg_bar = qpci_iomap(s->dev, 0, &s->reg_barsize); + g_assert_cmpuint(s->reg_barsize, ==, 16); + + s->mem_bar = qpci_iomap(s->dev, 1, &s->mem_barsize); + g_assert_cmpuint(s->mem_barsize, ==, 0x2000); + + qpci_device_enable(s->dev); +} + +static void test_acpi_erst_basic(void) +{ + ERSTState state; + uint64_t log_address_range; + uint64_t log_address_length; + uint32_t log_address_attr; + + setup_vm_cmd(&state, + "-object memory-backend-file," + "mem-path=acpi-erst.XXXXXX," + "size=64K," + "share=on," + "id=nvram " + "-device acpi-erst," + "memdev=nvram"); + + out_reg32(&state, ACTION, 0xD); + log_address_range = in_reg64(&state, VALUE); + out_reg32(&state, ACTION, 0xE); + log_address_length = in_reg64(&state, VALUE); + out_reg32(&state, ACTION, 0xF); + log_address_attr = in_reg32(&state, VALUE); + + /* Check log_address_range is not 0, ~0 or base */ + g_assert_cmpuint(log_address_range, !=, 0ULL); + g_assert_cmpuint(log_address_range, !=, ~0ULL); + g_assert_cmpuint(log_address_range, !=, state.reg_bar.addr); + g_assert_cmpuint(log_address_range, ==, state.mem_bar.addr); + + /* Check log_address_length is bar1_size */ + g_assert_cmpuint(log_address_length, ==, state.mem_barsize); + + /* Check log_address_attr is 0 */ + g_assert_cmpuint(log_address_attr, ==, 0); + + cleanup_vm(&state); +} + +int main(int argc, char **argv) +{ + int ret; + + g_test_init(&argc, &argv, NULL); + qtest_add_func("/acpi-erst/basic", test_acpi_erst_basic); + ret = g_test_run(); + return ret; +} diff --git a/tests/qtest/es1370-test.c b/tests/qtest/es1370-test.c index 2fd7fd2d3d..97ab65c435 100644 --- a/tests/qtest/es1370-test.c +++ b/tests/qtest/es1370-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" @@ -28,7 +28,7 @@ static void *es1370_get_driver(void *obj, const char *interface) return &es1370->dev; } - fprintf(stderr, "%s not present in e1000e\n", interface); + fprintf(stderr, "%s not present in es1370\n", interface); g_assert_not_reached(); } diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c index 26b69f7c5c..1f9b99ad6d 100644 --- a/tests/qtest/fdc-test.c +++ b/tests/qtest/fdc-test.c @@ -27,11 +27,13 @@ #include "libqtest-single.h" #include "qapi/qmp/qdict.h" -#include "qemu-common.h" /* TODO actually test the results and get rid of this */ #define qmp_discard_response(...) qobject_unref(qmp(__VA_ARGS__)) +#define DRIVE_FLOPPY_BLANK \ + "-drive if=floppy,file=null-co://,file.read-zeroes=on,format=raw,size=1440k" + #define TEST_IMAGE_SIZE 1440 * 1024 #define FLOPPY_BASE 0x3f0 @@ -66,7 +68,7 @@ enum { DSKCHG = 0x80, }; -static char test_image[] = "/tmp/qtest.XXXXXX"; +static char *test_image; #define assert_bit_set(data, mask) g_assert_cmphex((data) & (mask), ==, (mask)) #define assert_bit_clear(data, mask) g_assert_cmphex((data) & (mask), ==, 0) @@ -546,13 +548,67 @@ static void fuzz_registers(void) } } +static bool qtest_check_clang_sanitizer(void) +{ +#ifdef QEMU_SANITIZE_ADDRESS + return true; +#else + g_test_skip("QEMU not configured using --enable-sanitizers"); + return false; +#endif +} +static void test_cve_2021_20196(void) +{ + QTestState *s; + + if (!qtest_check_clang_sanitizer()) { + return; + } + + s = qtest_initf("-nographic -m 32M -nodefaults " DRIVE_FLOPPY_BLANK); + + qtest_outw(s, 0x3f4, 0x0500); + qtest_outb(s, 0x3f5, 0x00); + qtest_outb(s, 0x3f5, 0x00); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outb(s, 0x3f5, 0x00); + qtest_outw(s, 0x3f1, 0x0400); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outb(s, 0x3f5, 0x00); + qtest_outb(s, 0x3f5, 0x01); + qtest_outw(s, 0x3f1, 0x0500); + qtest_outb(s, 0x3f5, 0x00); + qtest_quit(s); +} + +static void test_cve_2021_3507(void) +{ + QTestState *s; + + s = qtest_initf("-nographic -m 32M -nodefaults " + "-drive file=%s,format=raw,if=floppy,snapshot=on", + test_image); + qtest_outl(s, 0x9, 0x0a0206); + qtest_outw(s, 0x3f4, 0x1600); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0200); + qtest_outw(s, 0x3f4, 0x0200); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_quit(s); +} + int main(int argc, char **argv) { int fd; int ret; /* Create a temporary raw image */ - fd = mkstemp(test_image); + fd = g_file_open_tmp("qtest.XXXXXX", &test_image, NULL); g_assert(fd >= 0); ret = ftruncate(fd, TEST_IMAGE_SIZE); g_assert(ret == 0); @@ -561,7 +617,7 @@ int main(int argc, char **argv) /* Run the tests */ g_test_init(&argc, &argv, NULL); - qtest_start("-device floppy,id=floppy0"); + qtest_start("-machine pc -device floppy,id=floppy0"); qtest_irq_intercept_in(global_qtest, "ioapic"); qtest_add_func("/fdc/cmos", test_cmos); qtest_add_func("/fdc/no_media_on_start", test_no_media_on_start); @@ -576,12 +632,15 @@ int main(int argc, char **argv) qtest_add_func("/fdc/read_no_dma_18", test_read_no_dma_18); qtest_add_func("/fdc/read_no_dma_19", test_read_no_dma_19); qtest_add_func("/fdc/fuzz-registers", fuzz_registers); + qtest_add_func("/fdc/fuzz/cve_2021_20196", test_cve_2021_20196); + qtest_add_func("/fdc/fuzz/cve_2021_3507", test_cve_2021_3507); ret = g_test_run(); /* Cleanup */ qtest_end(); unlink(test_image); + g_free(test_image); return ret; } diff --git a/tests/qtest/fuzz-e1000e-test.c b/tests/qtest/fuzz-e1000e-test.c index 66229e6096..5052883fb6 100644 --- a/tests/qtest/fuzz-e1000e-test.c +++ b/tests/qtest/fuzz-e1000e-test.c @@ -8,7 +8,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * https://bugs.launchpad.net/qemu/+bug/1879531 diff --git a/tests/qtest/fuzz-lsi53c895a-test.c b/tests/qtest/fuzz-lsi53c895a-test.c new file mode 100644 index 0000000000..9b007def26 --- /dev/null +++ b/tests/qtest/fuzz-lsi53c895a-test.c @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QTest fuzzer-generated testcase for LSI53C895A device + * + * Copyright (c) Red Hat + */ + +#include "qemu/osdep.h" +#include "libqtest.h" + +/* + * This used to trigger a DMA reentrancy issue + * leading to memory corruption bugs like stack + * overflow or use-after-free + * https://gitlab.com/qemu-project/qemu/-/issues/1563 + */ +static void test_lsi_dma_reentrancy(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -m 512M -nodefaults " + "-blockdev driver=null-co,node-name=null0 " + "-device lsi53c810 -device scsi-cd,drive=null0"); + + qtest_outl(s, 0xcf8, 0x80000804); /* PCI Command Register */ + qtest_outw(s, 0xcfc, 0x7); /* Enables accesses */ + qtest_outl(s, 0xcf8, 0x80000814); /* Memory Bar 1 */ + qtest_outl(s, 0xcfc, 0xff100000); /* Set MMIO Address*/ + qtest_outl(s, 0xcf8, 0x80000818); /* Memory Bar 2 */ + qtest_outl(s, 0xcfc, 0xff000000); /* Set RAM Address*/ + qtest_writel(s, 0xff000000, 0xc0000024); + qtest_writel(s, 0xff000114, 0x00000080); + qtest_writel(s, 0xff00012c, 0xff000000); + qtest_writel(s, 0xff000004, 0xff000114); + qtest_writel(s, 0xff000008, 0xff100014); + qtest_writel(s, 0xff10002f, 0x000000ff); + + qtest_quit(s); +} + +/* + * This used to trigger a UAF in lsi_do_msgout() + * https://gitlab.com/qemu-project/qemu/-/issues/972 + */ +static void test_lsi_do_msgout_cancel_req(void) +{ + QTestState *s; + + if (sizeof(void *) == 4) { + g_test_skip("memory size too big for 32-bit build"); + return; + } + + s = qtest_init("-M q35 -m 2G -nodefaults " + "-device lsi53c895a,id=scsi " + "-device scsi-hd,drive=disk0 " + "-drive file=null-co://,id=disk0,if=none,format=raw"); + + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outl(s, 0xcf8, 0xc000); + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outl(s, 0xcfc, 0xc000); + qtest_outl(s, 0xcf8, 0x80000804); + qtest_outw(s, 0xcfc, 0x05); + qtest_writeb(s, 0x69736c10, 0x08); + qtest_writeb(s, 0x69736c13, 0x58); + qtest_writeb(s, 0x69736c1a, 0x01); + qtest_writeb(s, 0x69736c1b, 0x06); + qtest_writeb(s, 0x69736c22, 0x01); + qtest_writeb(s, 0x69736c23, 0x07); + qtest_writeb(s, 0x69736c2b, 0x02); + qtest_writeb(s, 0x69736c48, 0x08); + qtest_writeb(s, 0x69736c4b, 0x58); + qtest_writeb(s, 0x69736c52, 0x04); + qtest_writeb(s, 0x69736c53, 0x06); + qtest_writeb(s, 0x69736c5b, 0x02); + qtest_outl(s, 0xc02d, 0x697300); + qtest_writeb(s, 0x5a554662, 0x01); + qtest_writeb(s, 0x5a554663, 0x07); + qtest_writeb(s, 0x5a55466a, 0x10); + qtest_writeb(s, 0x5a55466b, 0x22); + qtest_writeb(s, 0x5a55466c, 0x5a); + qtest_writeb(s, 0x5a55466d, 0x5a); + qtest_writeb(s, 0x5a55466e, 0x34); + qtest_writeb(s, 0x5a55466f, 0x5a); + qtest_writeb(s, 0x5a345a5a, 0x77); + qtest_writeb(s, 0x5a345a5b, 0x55); + qtest_writeb(s, 0x5a345a5c, 0x51); + qtest_writeb(s, 0x5a345a5d, 0x27); + qtest_writeb(s, 0x27515577, 0x41); + qtest_outl(s, 0xc02d, 0x5a5500); + qtest_writeb(s, 0x364001d0, 0x08); + qtest_writeb(s, 0x364001d3, 0x58); + qtest_writeb(s, 0x364001da, 0x01); + qtest_writeb(s, 0x364001db, 0x26); + qtest_writeb(s, 0x364001dc, 0x0d); + qtest_writeb(s, 0x364001dd, 0xae); + qtest_writeb(s, 0x364001de, 0x41); + qtest_writeb(s, 0x364001df, 0x5a); + qtest_writeb(s, 0x5a41ae0d, 0xf8); + qtest_writeb(s, 0x5a41ae0e, 0x36); + qtest_writeb(s, 0x5a41ae0f, 0xd7); + qtest_writeb(s, 0x5a41ae10, 0x36); + qtest_writeb(s, 0x36d736f8, 0x0c); + qtest_writeb(s, 0x36d736f9, 0x80); + qtest_writeb(s, 0x36d736fa, 0x0d); + qtest_outl(s, 0xc02d, 0x364000); + + qtest_quit(s); +} + +/* + * This used to trigger the assert in lsi_do_dma() + * https://bugs.launchpad.net/qemu/+bug/697510 + * https://bugs.launchpad.net/qemu/+bug/1905521 + * https://bugs.launchpad.net/qemu/+bug/1908515 + */ +static void test_lsi_do_dma_empty_queue(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -nographic -monitor none -serial none " + "-drive if=none,id=drive0," + "file=null-co://,file.read-zeroes=on,format=raw " + "-device lsi53c895a,id=scsi0 " + "-device scsi-hd,drive=drive0," + "bus=scsi0.0,channel=0,scsi-id=0,lun=0"); + qtest_outl(s, 0xcf8, 0x80001814); + qtest_outl(s, 0xcfc, 0xe1068000); + qtest_outl(s, 0xcf8, 0x80001818); + qtest_outl(s, 0xcf8, 0x80001804); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80002010); + + qtest_writeb(s, 0xe106802e, 0xff); /* Fill DSP bits 16-23 */ + qtest_writeb(s, 0xe106802f, 0xff); /* Fill DSP bits 24-31: trigger SCRIPT */ + + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("fuzz/lsi53c895a/lsi_do_dma_empty_queue", + test_lsi_do_dma_empty_queue); + + qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req", + test_lsi_do_msgout_cancel_req); + + qtest_add_func("fuzz/lsi53c895a/lsi_dma_reentrancy", + test_lsi_dma_reentrancy); + + return g_test_run(); +} diff --git a/tests/qtest/fuzz-megasas-test.c b/tests/qtest/fuzz-megasas-test.c index 940a76bf25..8d7ed3723a 100644 --- a/tests/qtest/fuzz-megasas-test.c +++ b/tests/qtest/fuzz-megasas-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * This used to trigger the assert in scsi_dma_complete @@ -34,16 +34,42 @@ static void test_lp1878263_megasas_zero_iov_cnt(void) qtest_quit(s); } +/* + * Overflow SGL buffer. + * https://gitlab.com/qemu-project/qemu/-/issues/521 + */ +static void test_gitlab_issue521_megasas_sgl_ovf(void) +{ + QTestState *s = qtest_init("-m 32M -machine q35 " + "-nodefaults -device megasas " + "-device scsi-cd,drive=null0 " + "-blockdev " + "driver=null-co,read-zeroes=on,node-name=null0"); + qtest_outl(s, 0xcf8, 0x80000818); + qtest_outl(s, 0xcfc, 0xc000); + qtest_outl(s, 0xcf8, 0x80000804); + qtest_outw(s, 0xcfc, 0x05); + qtest_bufwrite(s, 0x0, "\x01", 0x1); + qtest_bufwrite(s, 0x7, "\x01", 0x1); + qtest_bufwrite(s, 0x10, "\x02", 0x1); + qtest_bufwrite(s, 0x16, "\x01", 0x1); + qtest_bufwrite(s, 0x28, "\x01", 0x1); + qtest_bufwrite(s, 0x33, "\x01", 0x1); + qtest_outb(s, 0xc040, 0x0); + qtest_outb(s, 0xc040, 0x20); + qtest_outl(s, 0xc040, 0x20000000); + qtest_outb(s, 0xc040, 0x20); + qtest_quit(s); +} + int main(int argc, char **argv) { - const char *arch = qtest_get_arch(); - g_test_init(&argc, &argv, NULL); - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { - qtest_add_func("fuzz/test_lp1878263_megasas_zero_iov_cnt", - test_lp1878263_megasas_zero_iov_cnt); - } + qtest_add_func("fuzz/test_lp1878263_megasas_zero_iov_cnt", + test_lp1878263_megasas_zero_iov_cnt); + qtest_add_func("fuzz/gitlab_issue521_megasas_sgl_ovf", + test_gitlab_issue521_megasas_sgl_ovf); return g_test_run(); } diff --git a/tests/qtest/fuzz-sb16-test.c b/tests/qtest/fuzz-sb16-test.c index f47a8bcdbd..fc445b1871 100644 --- a/tests/qtest/fuzz-sb16-test.c +++ b/tests/qtest/fuzz-sb16-test.c @@ -7,7 +7,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * This used to trigger the assert in audio_calloc @@ -15,7 +15,7 @@ */ static void test_fuzz_sb16_0x1c(void) { - QTestState *s = qtest_init("-M q35 -display none " + QTestState *s = qtest_init("-M q35 " "-device sb16,audiodev=snd0 " "-audiodev none,id=snd0"); qtest_outw(s, 0x22c, 0x41); @@ -27,7 +27,7 @@ static void test_fuzz_sb16_0x1c(void) static void test_fuzz_sb16_0x91(void) { - QTestState *s = qtest_init("-M pc -display none " + QTestState *s = qtest_init("-M pc " "-device sb16,audiodev=none " "-audiodev id=none,driver=none"); qtest_outw(s, 0x22c, 0xf141); @@ -43,7 +43,7 @@ static void test_fuzz_sb16_0x91(void) */ static void test_fuzz_sb16_0xd4(void) { - QTestState *s = qtest_init("-M pc -display none " + QTestState *s = qtest_init("-M pc " "-device sb16,audiodev=none " "-audiodev id=none,driver=none"); qtest_outb(s, 0x22c, 0x41); @@ -55,15 +55,15 @@ static void test_fuzz_sb16_0xd4(void) int main(int argc, char **argv) { - const char *arch = qtest_get_arch(); - g_test_init(&argc, &argv, NULL); - if (strcmp(arch, "i386") == 0) { + if (qtest_has_machine("q35")) { qtest_add_func("fuzz/test_fuzz_sb16/1c", test_fuzz_sb16_0x1c); + } + if (qtest_has_machine("pc")) { qtest_add_func("fuzz/test_fuzz_sb16/91", test_fuzz_sb16_0x91); qtest_add_func("fuzz/test_fuzz_sb16/d4", test_fuzz_sb16_0xd4); - } + } - return g_test_run(); + return g_test_run(); } diff --git a/tests/qtest/fuzz-sdcard-test.c b/tests/qtest/fuzz-sdcard-test.c index ae14305344..cd134cdf55 100644 --- a/tests/qtest/fuzz-sdcard-test.c +++ b/tests/qtest/fuzz-sdcard-test.c @@ -7,7 +7,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * https://gitlab.com/qemu-project/qemu/-/issues/450 @@ -18,7 +18,7 @@ static void oss_fuzz_29225(void) { QTestState *s; - s = qtest_init(" -display none -m 512m -nodefaults -nographic" + s = qtest_init(" -m 512m -nodefaults -nographic" " -device sdhci-pci,sd-spec-version=3" " -device sd-card,drive=d0" " -drive if=none,index=0,file=null-co://,format=raw,id=d0"); @@ -61,7 +61,7 @@ static void oss_fuzz_36217(void) { QTestState *s; - s = qtest_init(" -display none -m 32 -nodefaults -nographic" + s = qtest_init(" -m 32 -nodefaults -nographic" " -device sdhci-pci,sd-spec-version=3 " "-device sd-card,drive=d0 " "-drive if=none,index=0,file=null-co://,format=raw,id=d0"); @@ -87,16 +87,88 @@ static void oss_fuzz_36217(void) qtest_quit(s); } +/* + * https://gitlab.com/qemu-project/qemu/-/issues/451 + * Used to trigger a heap buffer overflow. + */ +static void oss_fuzz_36391(void) +{ + QTestState *s; + + s = qtest_init(" -m 512M -nodefaults -nographic" + " -device sdhci-pci,sd-spec-version=3" + " -device sd-card,drive=drv" + " -drive if=none,index=0,file=null-co://,format=raw,id=drv"); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xe0000000); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_outw(s, 0xcfc, 0x7); + qtest_bufwrite(s, 0xe0000005, "\x73", 0x1); + qtest_bufwrite(s, 0xe0000028, "\x55", 0x1); + qtest_bufwrite(s, 0xe000002c, "\x55", 0x1); + qtest_bufwrite(s, 0x0, "\x65", 0x1); + qtest_bufwrite(s, 0x7, "\x69", 0x1); + qtest_bufwrite(s, 0x8, "\x65", 0x1); + qtest_bufwrite(s, 0xf, "\x69", 0x1); + qtest_bufwrite(s, 0x10, "\x65", 0x1); + qtest_bufwrite(s, 0x17, "\x69", 0x1); + qtest_bufwrite(s, 0x18, "\x65", 0x1); + qtest_bufwrite(s, 0x1f, "\x69", 0x1); + qtest_bufwrite(s, 0x20, "\x65", 0x1); + qtest_bufwrite(s, 0x27, "\x69", 0x1); + qtest_bufwrite(s, 0x28, "\x65", 0x1); + qtest_bufwrite(s, 0x2f, "\x69", 0x1); + qtest_bufwrite(s, 0x30, "\x65", 0x1); + qtest_bufwrite(s, 0x37, "\x69", 0x1); + qtest_bufwrite(s, 0x38, "\x65", 0x1); + qtest_bufwrite(s, 0x3f, "\x69", 0x1); + qtest_bufwrite(s, 0x40, "\x65", 0x1); + qtest_bufwrite(s, 0x47, "\x69", 0x1); + qtest_bufwrite(s, 0x48, "\x65", 0x1); + qtest_bufwrite(s, 0xe000000c, "\x55", 0x1); + qtest_bufwrite(s, 0xe000000e, "\x2c", 0x1); + qtest_bufwrite(s, 0xe000000f, "\x5b", 0x1); + qtest_bufwrite(s, 0xe0000010, "\x06\x46", 0x2); + qtest_bufwrite(s, 0x50, "\x65", 0x1); + qtest_bufwrite(s, 0x57, "\x69", 0x1); + qtest_bufwrite(s, 0x58, "\x65", 0x1); + qtest_bufwrite(s, 0x5f, "\x69", 0x1); + qtest_bufwrite(s, 0x60, "\x65", 0x1); + qtest_bufwrite(s, 0x67, "\x69", 0x1); + qtest_bufwrite(s, 0x68, "\x65", 0x1); + qtest_bufwrite(s, 0x6f, "\x69", 0x1); + qtest_bufwrite(s, 0x70, "\x65", 0x1); + qtest_bufwrite(s, 0x77, "\x69", 0x1); + qtest_bufwrite(s, 0x78, "\x65", 0x1); + qtest_bufwrite(s, 0x7f, "\x69", 0x1); + qtest_bufwrite(s, 0x80, "\x65", 0x1); + qtest_bufwrite(s, 0x87, "\x69", 0x1); + qtest_bufwrite(s, 0x88, "\x65", 0x1); + qtest_bufwrite(s, 0x8f, "\x69", 0x1); + qtest_bufwrite(s, 0x90, "\x65", 0x1); + qtest_bufwrite(s, 0x97, "\x69", 0x1); + qtest_bufwrite(s, 0x98, "\x65", 0x1); + qtest_bufwrite(s, 0xe0000026, "\x5a\x06", 0x2); + qtest_bufwrite(s, 0xe0000028, "\x46\xc0\xc9\xc9", 0x4); + qtest_bufwrite(s, 0xe0000028, "\x55", 0x1); + qtest_bufwrite(s, 0xe000002a, "\x5a", 0x1); + qtest_bufwrite(s, 0xa0, "\x65", 0x1); + qtest_bufwrite(s, 0xa5, "\xff", 0x1); + qtest_bufwrite(s, 0xa6, "\xff", 0x1); + qtest_bufwrite(s, 0xa7, "\xdf", 0x1); + qtest_bufwrite(s, 0xe000000c, "\x27", 0x1); + qtest_bufwrite(s, 0xe000000f, "\x55", 0x1); + + qtest_quit(s); +} + int main(int argc, char **argv) { - const char *arch = qtest_get_arch(); - g_test_init(&argc, &argv, NULL); - if (strcmp(arch, "i386") == 0) { - qtest_add_func("fuzz/sdcard/oss_fuzz_29225", oss_fuzz_29225); - qtest_add_func("fuzz/sdcard/oss_fuzz_36217", oss_fuzz_36217); - } + qtest_add_func("fuzz/sdcard/oss_fuzz_29225", oss_fuzz_29225); + qtest_add_func("fuzz/sdcard/oss_fuzz_36217", oss_fuzz_36217); + qtest_add_func("fuzz/sdcard/oss_fuzz_36391", oss_fuzz_36391); - return g_test_run(); + return g_test_run(); } diff --git a/tests/qtest/fuzz-virtio-scsi-test.c b/tests/qtest/fuzz-virtio-scsi-test.c index aaf6d10e18..e37b48b2cc 100644 --- a/tests/qtest/fuzz-virtio-scsi-test.c +++ b/tests/qtest/fuzz-virtio-scsi-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * Here a MemoryRegionCache pointed to an MMIO region but had a @@ -19,7 +19,7 @@ static void test_mmio_oob_from_memory_region_cache(void) { QTestState *s; - s = qtest_init("-M pc-q35-5.2 -display none -m 512M " + s = qtest_init("-M pc-q35-5.2 -m 512M " "-device virtio-scsi,num_queues=8,addr=03.0 "); qtest_outl(s, 0xcf8, 0x80001811); @@ -62,14 +62,10 @@ static void test_mmio_oob_from_memory_region_cache(void) int main(int argc, char **argv) { - const char *arch = qtest_get_arch(); - g_test_init(&argc, &argv, NULL); - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { - qtest_add_func("fuzz/test_mmio_oob_from_memory_region_cache", - test_mmio_oob_from_memory_region_cache); - } + qtest_add_func("fuzz/test_mmio_oob_from_memory_region_cache", + test_mmio_oob_from_memory_region_cache); return g_test_run(); } diff --git a/tests/qtest/fuzz-xlnx-dp-test.c b/tests/qtest/fuzz-xlnx-dp-test.c new file mode 100644 index 0000000000..e8c483965f --- /dev/null +++ b/tests/qtest/fuzz-xlnx-dp-test.c @@ -0,0 +1,33 @@ +/* + * QTest fuzzer-generated testcase for xlnx-dp display device + * + * Copyright (c) 2021 Qiang Liu + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest.h" + +/* + * This used to trigger the out-of-bounds read in xlnx_dp_read + */ +static void test_fuzz_xlnx_dp_0x3ac(void) +{ + QTestState *s = qtest_init("-M xlnx-zcu102 "); + qtest_readl(s, 0xfd4a03ac); + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + const char *arch = qtest_get_arch(); + + g_test_init(&argc, &argv, NULL); + + if (strcmp(arch, "aarch64") == 0) { + qtest_add_func("fuzz/test_fuzz_xlnx_dp/3ac", test_fuzz_xlnx_dp_0x3ac); + } + + return g_test_run(); +} diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c index 5f77c84983..eb7520544b 100644 --- a/tests/qtest/fuzz/fuzz.c +++ b/tests/qtest/fuzz/fuzz.c @@ -15,13 +15,14 @@ #include +#include "qemu/cutils.h" #include "qemu/datadir.h" #include "sysemu/sysemu.h" #include "sysemu/qtest.h" #include "sysemu/runstate.h" #include "qemu/main-loop.h" #include "qemu/rcu.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/qgraph.h" #include "fuzz.h" @@ -157,8 +158,6 @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp) { char *target_name; - const char *bindir; - char *datadir; GString *cmd_line; gchar *pretty_cmd_line; bool serialize = false; @@ -173,22 +172,6 @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp) target_name = strstr(**argv, "-target-"); if (target_name) { /* The binary name specifies the target */ target_name += strlen("-target-"); - /* - * With oss-fuzz, the executable is kept in the root of a directory (we - * cannot assume the path). All data (including bios binaries) must be - * in the same dir, or a subdir. Thus, we cannot place the pc-bios so - * that it would be in exec_dir/../pc-bios. - * As a workaround, oss-fuzz allows us to use argv[0] to get the - * location of the executable. Using this we add exec_dir/pc-bios to - * the datadirs. - */ - bindir = qemu_get_exec_dir(); - datadir = g_build_filename(bindir, "pc-bios", NULL); - if (g_file_test(datadir, G_FILE_TEST_IS_DIR)) { - qemu_add_data_dir(datadir); - } else { - g_free(datadir); - } } else if (*argc > 1) { /* The target is specified as an argument */ target_name = (*argv)[1]; if (!strstr(target_name, "--fuzz-target=")) { @@ -235,7 +218,7 @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp) g_free(pretty_cmd_line); } - qemu_init(result.we_wordc, result.we_wordv, NULL); + qemu_init(result.we_wordc, result.we_wordv); /* re-enable the rcu atfork, which was previously disabled in qemu_init */ rcu_enable_atfork(); diff --git a/tests/qtest/fuzz/fuzz.h b/tests/qtest/fuzz/fuzz.h index 3a8570e84c..327c1c5a55 100644 --- a/tests/qtest/fuzz/fuzz.h +++ b/tests/qtest/fuzz/fuzz.h @@ -11,13 +11,13 @@ * */ -#ifndef FUZZER_H_ -#define FUZZER_H_ +#ifndef QTEST_FUZZ_H +#define QTEST_FUZZ_H #include "qemu/units.h" #include "qapi/error.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" /** * A libfuzzer fuzzing target @@ -122,4 +122,3 @@ int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size); int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp); #endif - diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c index 6c67522717..afc1d20355 100644 --- a/tests/qtest/fuzz/generic_fuzz.c +++ b/tests/qtest/fuzz/generic_fuzz.c @@ -15,7 +15,7 @@ #include #include "hw/core/cpu.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/pci-pc.h" #include "fuzz.h" #include "fork_fuzz.h" @@ -144,7 +144,7 @@ static void *pattern_alloc(pattern p, size_t len) return buf; } -static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) +static int fuzz_memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) { unsigned access_size_max = mr->ops->valid.max_access_size; @@ -240,10 +240,18 @@ void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr) addr, &addr1, &l, true, MEMTXATTRS_UNSPECIFIED); - if (!(memory_region_is_ram(mr1) || - memory_region_is_romd(mr1)) && mr1 != sparse_mem_mr) { - l = memory_access_size(mr1, l, addr1); - } else { + /* + * If mr1 isn't RAM, address_space_translate doesn't update l. Use + * fuzz_memory_access_size to identify the number of bytes that it + * is safe to write without accidentally writing to another + * MemoryRegion. + */ + if (!memory_region_is_ram(mr1)) { + l = fuzz_memory_access_size(mr1, l, addr1); + } + if (memory_region_is_ram(mr1) || + memory_region_is_romd(mr1) || + mr1 == sparse_mem_mr) { /* ROM/RAM case */ if (qtest_log_enabled) { /* @@ -661,31 +669,41 @@ static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size) uint8_t op; if (fork() == 0) { + struct sigaction sact; + struct itimerval timer; + sigset_t set; /* * Sometimes the fuzzer will find inputs that take quite a long time to * process. Often times, these inputs do not result in new coverage. * Even if these inputs might be interesting, they can slow down the - * fuzzer, overall. Set a timeout to avoid hurting performance, too much + * fuzzer, overall. Set a timeout for each command to avoid hurting + * performance, too much */ if (timeout) { - struct sigaction sact; - struct itimerval timer; sigemptyset(&sact.sa_mask); sact.sa_flags = SA_NODEFER; sact.sa_handler = handle_timeout; sigaction(SIGALRM, &sact, NULL); + sigemptyset(&set); + sigaddset(&set, SIGALRM); + pthread_sigmask(SIG_UNBLOCK, &set, NULL); + memset(&timer, 0, sizeof(timer)); timer.it_value.tv_sec = timeout / USEC_IN_SEC; timer.it_value.tv_usec = timeout % USEC_IN_SEC; - setitimer(ITIMER_VIRTUAL, &timer, NULL); } op_clear_dma_patterns(s, NULL, 0); pci_disabled = false; while (cmd && Size) { + /* Reset the timeout, each time we run a new command */ + if (timeout) { + setitimer(ITIMER_REAL, &timer, NULL); + } + /* Get the length until the next command or end of input */ nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR)); cmd_len = nextcmd ? nextcmd - cmd : Size; @@ -726,14 +744,12 @@ static void usage(void) static int locate_fuzz_memory_regions(Object *child, void *opaque) { - const char *name; MemoryRegion *mr; if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) { mr = MEMORY_REGION(child); if ((memory_region_is_ram(mr) || memory_region_is_ram_device(mr) || memory_region_is_rom(mr)) == false) { - name = object_get_canonical_path_component(child); /* * We don't want duplicate pointers to the same MemoryRegion, so * try to remove copies of the pointer, before adding it. @@ -746,8 +762,13 @@ static int locate_fuzz_memory_regions(Object *child, void *opaque) static int locate_fuzz_objects(Object *child, void *opaque) { + GString *type_name; + GString *path_name; char *pattern = opaque; - if (g_pattern_match_simple(pattern, object_get_typename(child))) { + + type_name = g_string_new(object_get_typename(child)); + g_string_ascii_down(type_name); + if (g_pattern_match_simple(pattern, type_name->str)) { /* Find and save ptrs to any child MemoryRegions */ object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL); @@ -764,8 +785,9 @@ static int locate_fuzz_objects(Object *child, void *opaque) g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child)); } } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) { - if (g_pattern_match_simple(pattern, - object_get_canonical_path_component(child))) { + path_name = g_string_new(object_get_canonical_path_component(child)); + g_string_ascii_down(path_name); + if (g_pattern_match_simple(pattern, path_name->str)) { MemoryRegion *mr; mr = MEMORY_REGION(child); if ((memory_region_is_ram(mr) || @@ -774,7 +796,9 @@ static int locate_fuzz_objects(Object *child, void *opaque) g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true); } } + g_string_free(path_name, true); } + g_string_free(type_name, true); return 0; } @@ -802,6 +826,7 @@ static void generic_pre_fuzz(QTestState *s) MemoryRegion *mr; QPCIBus *pcibus; char **result; + GString *name_pattern; if (!getenv("QEMU_FUZZ_OBJECTS")) { usage(); @@ -831,10 +856,17 @@ static void generic_pre_fuzz(QTestState *s) result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1); for (int i = 0; result[i] != NULL; i++) { + name_pattern = g_string_new(result[i]); + /* + * Make the pattern lowercase. We do the same for all the MemoryRegion + * and Type names so the configs are case-insensitive. + */ + g_string_ascii_down(name_pattern); printf("Matching objects by name %s\n", result[i]); object_child_foreach_recursive(qdev_get_machine(), locate_fuzz_objects, - result[i]); + name_pattern->str); + g_string_free(name_pattern, true); } g_strfreev(result); printf("This process will try to fuzz the following MemoryRegions:\n"); @@ -962,16 +994,16 @@ static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t) g_assert(t->opaque); config = t->opaque; - setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1); + g_setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1); if (config->argfunc) { args = config->argfunc(); - setenv("QEMU_FUZZ_ARGS", args, 1); + g_setenv("QEMU_FUZZ_ARGS", args, 1); g_free(args); } else { g_assert_nonnull(config->args); - setenv("QEMU_FUZZ_ARGS", config->args, 1); + g_setenv("QEMU_FUZZ_ARGS", config->args, 1); } - setenv("QEMU_FUZZ_OBJECTS", config->objects, 1); + g_setenv("QEMU_FUZZ_OBJECTS", config->objects, 1); return generic_fuzz_cmdline(t); } diff --git a/tests/qtest/fuzz/generic_fuzz_configs.h b/tests/qtest/fuzz/generic_fuzz_configs.h index 004c701915..a825b78c14 100644 --- a/tests/qtest/fuzz/generic_fuzz_configs.h +++ b/tests/qtest/fuzz/generic_fuzz_configs.h @@ -20,8 +20,8 @@ typedef struct generic_fuzz_config { } generic_fuzz_config; static inline gchar *generic_fuzzer_virtio_9p_args(void){ - char tmpdir[] = "/tmp/qemu-fuzz.XXXXXX"; - g_assert_nonnull(mkdtemp(tmpdir)); + g_autofree char *tmpdir = g_dir_make_tmp("qemu-fuzz.XXXXXX", NULL); + g_assert_nonnull(tmpdir); return g_strdup_printf("-machine q35 -nodefaults " "-device virtio-9p,fsdev=hshare,mount_tag=hshare " diff --git a/tests/qtest/fuzz/i440fx_fuzz.c b/tests/qtest/fuzz/i440fx_fuzz.c index 86796bff2b..b17fc725df 100644 --- a/tests/qtest/fuzz/i440fx_fuzz.c +++ b/tests/qtest/fuzz/i440fx_fuzz.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/pci.h" #include "tests/qtest/libqos/pci-pc.h" #include "fuzz.h" diff --git a/tests/qtest/fuzz/meson.build b/tests/qtest/fuzz/meson.build index 8af6848cd5..189901d4a2 100644 --- a/tests/qtest/fuzz/meson.build +++ b/tests/qtest/fuzz/meson.build @@ -1,3 +1,7 @@ +if not get_option('fuzzing') + subdir_done() +endif + specific_fuzz_ss.add(files('fuzz.c', 'fork_fuzz.c', 'qos_fuzz.c', 'qtest_wrappers.c'), qos) @@ -9,7 +13,7 @@ specific_fuzz_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio_blk_fuzz. specific_fuzz_ss.add(files('generic_fuzz.c')) fork_fuzz = declare_dependency( - link_args: config_host['FUZZ_EXE_LDFLAGS'].split() + + link_args: fuzz_exe_ldflags + ['-Wl,-wrap,qtest_inb', '-Wl,-wrap,qtest_inw', '-Wl,-wrap,qtest_inl', diff --git a/tests/qtest/fuzz/qos_fuzz.c b/tests/qtest/fuzz/qos_fuzz.c index 7a244c951e..3a3d9c16dd 100644 --- a/tests/qtest/fuzz/qos_fuzz.c +++ b/tests/qtest/fuzz/qos_fuzz.c @@ -19,12 +19,11 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qemu-common.h" #include "exec/memory.h" #include "qemu/main-loop.h" -#include "tests/qtest/libqos/libqtest.h" -#include "tests/qtest/libqos/malloc.h" +#include "tests/qtest/libqtest.h" +#include "tests/qtest/libqos/libqos-malloc.h" #include "tests/qtest/libqos/qgraph.h" #include "tests/qtest/libqos/qgraph_internal.h" #include "tests/qtest/libqos/qos_external.h" diff --git a/tests/qtest/fuzz/virtio_blk_fuzz.c b/tests/qtest/fuzz/virtio_blk_fuzz.c index 623a756fd4..a9fb9ecf6c 100644 --- a/tests/qtest/fuzz/virtio_blk_fuzz.c +++ b/tests/qtest/fuzz/virtio_blk_fuzz.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/virtio-blk.h" #include "tests/qtest/libqos/virtio.h" #include "tests/qtest/libqos/virtio-pci.h" @@ -181,10 +181,10 @@ static void drive_destroy(void *path) static char *drive_create(void) { int fd, ret; - char *t_path = g_strdup("/tmp/qtest.XXXXXX"); + char *t_path; /* Create a temporary raw image */ - fd = mkstemp(t_path); + fd = g_file_open_tmp("qtest.XXXXXX", &t_path, NULL); g_assert_cmpint(fd, >=, 0); ret = ftruncate(fd, TEST_IMAGE_SIZE); g_assert_cmpint(ret, ==, 0); diff --git a/tests/qtest/fuzz/virtio_net_fuzz.c b/tests/qtest/fuzz/virtio_net_fuzz.c index 0e873ab8e2..c2c15f07f0 100644 --- a/tests/qtest/fuzz/virtio_net_fuzz.c +++ b/tests/qtest/fuzz/virtio_net_fuzz.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "standard-headers/linux/virtio_config.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/virtio-net.h" #include "fuzz.h" #include "fork_fuzz.h" @@ -151,7 +151,7 @@ static void *virtio_net_test_setup_socket(GString *cmd_line, void *arg) { int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, sockfds); g_assert_cmpint(ret, !=, -1); - fcntl(sockfds[0], F_SETFL, O_NONBLOCK); + g_unix_set_fd_nonblocking(sockfds[0], true, NULL); sockfds_initialized = true; g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ", sockfds[1]); diff --git a/tests/qtest/fuzz/virtio_scsi_fuzz.c b/tests/qtest/fuzz/virtio_scsi_fuzz.c index 6ff6fabe4a..b3220ef6cb 100644 --- a/tests/qtest/fuzz/virtio_scsi_fuzz.c +++ b/tests/qtest/fuzz/virtio_scsi_fuzz.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" -#include "tests/qtest/libqos/libqtest.h" +#include "tests/qtest/libqtest.h" #include "tests/qtest/libqos/virtio-scsi.h" #include "tests/qtest/libqos/virtio.h" #include "tests/qtest/libqos/virtio-pci.h" diff --git a/tests/qtest/fw_cfg-test.c b/tests/qtest/fw_cfg-test.c index 95b3907c18..5dc807ba23 100644 --- a/tests/qtest/fw_cfg-test.c +++ b/tests/qtest/fw_cfg-test.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "standard-headers/linux/qemu_fw_cfg.h" #include "libqos/fw_cfg.h" #include "qemu/bswap.h" diff --git a/tests/qtest/hd-geo-test.c b/tests/qtest/hd-geo-test.c index 113126ae06..4a7628077b 100644 --- a/tests/qtest/hd-geo-test.c +++ b/tests/qtest/hd-geo-test.c @@ -16,10 +16,9 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/bswap.h" #include "qapi/qmp/qlist.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/fw_cfg.h" #include "libqos/libqos.h" #include "standard-headers/linux/qemu_fw_cfg.h" @@ -28,16 +27,16 @@ static char *create_test_img(int secs) { - char *template = strdup("/tmp/qtest.XXXXXX"); + char *template; int fd, ret; - fd = mkstemp(template); + fd = g_file_open_tmp("qtest.XXXXXX", &template, NULL); g_assert(fd >= 0); ret = ftruncate(fd, (off_t)secs * 512); close(fd); if (ret) { - free(template); + g_free(template); template = NULL; } @@ -178,9 +177,15 @@ static int append_arg(int argc, char *argv[], int argv_sz, char *arg) static int setup_common(char *argv[], int argv_sz) { + int new_argc; memset(cur_ide, 0, sizeof(cur_ide)); - return append_arg(0, argv, argv_sz, - g_strdup("-nodefaults")); + new_argc = append_arg(0, argv, argv_sz, + g_strdup("-nodefaults")); + new_argc = append_arg(new_argc, argv, argv_sz, + g_strdup("-machine")); + new_argc = append_arg(new_argc, argv, argv_sz, + g_strdup("pc")); + return new_argc; } static void setup_mbr(int img_idx, MBRcontents mbr) @@ -417,9 +422,8 @@ static MBRpartitions empty_mbr = { {false, 0, 0, 0, 0, 0, 0, 0, 0}, static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors) { - const char *template = "/tmp/qtest.XXXXXX"; - char *raw_path = strdup(template); - char *qcow2_path = strdup(template); + g_autofree char *raw_path = NULL; + char *qcow2_path; char cmd[100 + 2 * PATH_MAX]; uint8_t buf[512] = {}; int i, ret, fd, offset; @@ -463,7 +467,7 @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors) offset += 0x10; } - fd = mkstemp(raw_path); + fd = g_file_open_tmp("qtest.XXXXXX", &raw_path, NULL); g_assert(fd >= 0); close(fd); @@ -473,7 +477,7 @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors) g_assert(ret == sizeof(buf)); close(fd); - fd = mkstemp(qcow2_path); + fd = g_file_open_tmp("qtest.XXXXXX", &qcow2_path, NULL); g_assert(fd >= 0); close(fd); @@ -501,7 +505,6 @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors) free(qemu_img_abs_path); unlink(raw_path); - free(raw_path); return qcow2_path; } @@ -688,7 +691,8 @@ static void add_virtio_disk(TestArgs *args, args->n_virtio_disks++; } -static void test_override(TestArgs *args, CHSResult expected[]) +static void test_override(TestArgs *args, const char *arch, + CHSResult expected[]) { QTestState *qts; char *joined_args; @@ -697,7 +701,7 @@ static void test_override(TestArgs *args, CHSResult expected[]) joined_args = g_strjoinv(" ", args->argv); - qts = qtest_init(joined_args); + qts = qtest_initf("-machine %s %s", arch, joined_args); fw_cfg = pc_fw_cfg_init(qts); read_bootdevices(fw_cfg, expected); @@ -709,7 +713,7 @@ static void test_override(TestArgs *args, CHSResult expected[]) for (i = 0; i < args->n_drives; i++) { unlink(args->drives[i]); - free(args->drives[i]); + g_free(args->drives[i]); } g_free(args->drives); g_strfreev(args->argv); @@ -734,7 +738,28 @@ static void test_override_ide(void) add_ide_disk(args, 1, 0, 1, 9000, 120, 30); add_ide_disk(args, 2, 1, 0, 0, 1, 1); add_ide_disk(args, 3, 1, 1, 1, 0, 0); - test_override(args, expected); + test_override(args, "pc", expected); +} + +static void test_override_sata(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + {"/pci@i0cf8/pci8086,2922@1f,2/drive@0/disk@0", {10000, 120, 30} }, + {"/pci@i0cf8/pci8086,2922@1f,2/drive@1/disk@0", {9000, 120, 30} }, + {"/pci@i0cf8/pci8086,2922@1f,2/drive@2/disk@0", {0, 1, 1} }, + {"/pci@i0cf8/pci8086,2922@1f,2/drive@3/disk@0", {1, 0, 0} }, + {NULL, {0, 0, 0} } + }; + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_ide_disk(args, 0, 0, 0, 10000, 120, 30); + add_ide_disk(args, 1, 1, 0, 9000, 120, 30); + add_ide_disk(args, 2, 2, 0, 0, 1, 1); + add_ide_disk(args, 3, 3, 0, 1, 0, 0); + test_override(args, "q35", expected); } static void test_override_scsi(void) @@ -756,7 +781,43 @@ static void test_override_scsi(void) add_scsi_disk(args, 1, 0, 0, 1, 0, 9000, 120, 30); add_scsi_disk(args, 2, 0, 0, 2, 0, 1, 0, 0); add_scsi_disk(args, 3, 0, 0, 3, 0, 0, 1, 0); - test_override(args, expected); + test_override(args, "pc", expected); +} + +static void setup_pci_bridge(TestArgs *args, const char *id, const char *rootid) +{ + + char *root, *br; + root = g_strdup_printf("-device pcie-root-port,id=%s", rootid); + br = g_strdup_printf("-device pcie-pci-bridge,bus=%s,id=%s", rootid, id); + + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, root); + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, br); +} + +static void test_override_scsi_q35(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + { "/pci@i0cf8/pci-bridge@1/scsi@3/channel@0/disk@0,0", + {10000, 120, 30} + }, + {"/pci@i0cf8/pci-bridge@1/scsi@3/channel@0/disk@1,0", {9000, 120, 30} }, + {"/pci@i0cf8/pci-bridge@1/scsi@3/channel@0/disk@2,0", {1, 0, 0} }, + {"/pci@i0cf8/pci-bridge@1/scsi@3/channel@0/disk@3,0", {0, 1, 0} }, + {NULL, {0, 0, 0} } + }; + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + setup_pci_bridge(args, "pcie.0", "br"); + add_scsi_controller(args, "lsi53c895a", "br", 3); + add_scsi_disk(args, 0, 0, 0, 0, 0, 10000, 120, 30); + add_scsi_disk(args, 1, 0, 0, 1, 0, 9000, 120, 30); + add_scsi_disk(args, 2, 0, 0, 2, 0, 1, 0, 0); + add_scsi_disk(args, 3, 0, 0, 3, 0, 0, 1, 0); + test_override(args, "q35", expected); } static void test_override_scsi_2_controllers(void) @@ -779,7 +840,7 @@ static void test_override_scsi_2_controllers(void) add_scsi_disk(args, 1, 0, 0, 1, 0, 9000, 120, 30); add_scsi_disk(args, 2, 1, 0, 0, 1, 1, 0, 0); add_scsi_disk(args, 3, 1, 0, 1, 2, 0, 1, 0); - test_override(args, expected); + test_override(args, "pc", expected); } static void test_override_virtio_blk(void) @@ -794,7 +855,23 @@ static void test_override_virtio_blk(void) add_drive_with_mbr(args, empty_mbr, 1); add_virtio_disk(args, 0, "pci.0", 3, 10000, 120, 30); add_virtio_disk(args, 1, "pci.0", 4, 9000, 120, 30); - test_override(args, expected); + test_override(args, "pc", expected); +} + +static void test_override_virtio_blk_q35(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + {"/pci@i0cf8/pci-bridge@1/scsi@3/disk@0,0", {10000, 120, 30} }, + {"/pci@i0cf8/pci-bridge@1/scsi@4/disk@0,0", {9000, 120, 30} }, + {NULL, {0, 0, 0} } + }; + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + setup_pci_bridge(args, "pcie.0", "br"); + add_virtio_disk(args, 0, "br", 3, 10000, 120, 30); + add_virtio_disk(args, 1, "br", 4, 9000, 120, 30); + test_override(args, "q35", expected); } static void test_override_zero_chs(void) @@ -805,16 +882,65 @@ static void test_override_zero_chs(void) }; add_drive_with_mbr(args, empty_mbr, 1); add_ide_disk(args, 0, 1, 1, 0, 0, 0); - test_override(args, expected); + test_override(args, "pc", expected); } -static void test_override_scsi_hot_unplug(void) +static void test_override_zero_chs_q35(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + {NULL, {0, 0, 0} } + }; + add_drive_with_mbr(args, empty_mbr, 1); + add_ide_disk(args, 0, 0, 0, 0, 0, 0); + test_override(args, "q35", expected); +} + +static void test_override_hot_unplug(TestArgs *args, const char *devid, + CHSResult expected[], CHSResult expected2[]) { QTestState *qts; char *joined_args; QFWCFG *fw_cfg; QDict *response; int i; + + joined_args = g_strjoinv(" ", args->argv); + + qts = qtest_initf("%s", joined_args); + fw_cfg = pc_fw_cfg_init(qts); + + read_bootdevices(fw_cfg, expected); + + /* unplug device an restart */ + qtest_qmp_device_del_send(qts, devid); + + response = qtest_qmp(qts, + "{ 'execute': 'system_reset', 'arguments': { }}"); + g_assert(response); + g_assert(!qdict_haskey(response, "error")); + qobject_unref(response); + + qtest_qmp_eventwait(qts, "RESET"); + + read_bootdevices(fw_cfg, expected2); + + g_free(joined_args); + qtest_quit(qts); + + g_free(fw_cfg); + + for (i = 0; i < args->n_drives; i++) { + unlink(args->drives[i]); + g_free(args->drives[i]); + } + g_free(args->drives); + g_strfreev(args->argv); + g_free(args); +} + +static void test_override_scsi_hot_unplug(void) +{ TestArgs *args = create_args(); CHSResult expected[] = { {"/pci@i0cf8/scsi@2/channel@0/disk@0,0", {10000, 120, 30} }, @@ -831,51 +957,50 @@ static void test_override_scsi_hot_unplug(void) add_scsi_disk(args, 0, 0, 0, 0, 0, 10000, 120, 30); add_scsi_disk(args, 1, 0, 0, 1, 0, 20, 20, 20); - joined_args = g_strjoinv(" ", args->argv); + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, + g_strdup("-machine pc")); - qts = qtest_init(joined_args); - fw_cfg = pc_fw_cfg_init(qts); + test_override_hot_unplug(args, "scsi-disk0", expected, expected2); +} - read_bootdevices(fw_cfg, expected); +static void test_override_scsi_hot_unplug_q35(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@2/channel@0/disk@0,0", + {10000, 120, 30} + }, + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@2/channel@0/disk@1,0", + {20, 20, 20} + }, + {NULL, {0, 0, 0} } + }; + CHSResult expected2[] = { + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@2/channel@0/disk@1,0", + {20, 20, 20} + }, + {NULL, {0, 0, 0} } + }; - /* unplug device an restart */ - response = qtest_qmp(qts, - "{ 'execute': 'device_del'," - " 'arguments': {'id': 'scsi-disk0' }}"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); - response = qtest_qmp(qts, - "{ 'execute': 'system_reset', 'arguments': { }}"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, + g_strdup("-device pcie-root-port,id=p0 " + "-device pcie-pci-bridge,bus=p0,id=b1 " + "-machine q35")); - qtest_qmp_eventwait(qts, "RESET"); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_scsi_controller(args, "virtio-scsi-pci", "b1", 2); + add_scsi_disk(args, 0, 0, 0, 0, 0, 10000, 120, 30); + add_scsi_disk(args, 1, 0, 0, 1, 0, 20, 20, 20); - read_bootdevices(fw_cfg, expected2); - - g_free(joined_args); - qtest_quit(qts); - - g_free(fw_cfg); - - for (i = 0; i < args->n_drives; i++) { - unlink(args->drives[i]); - free(args->drives[i]); - } - g_free(args->drives); - g_strfreev(args->argv); - g_free(args); + test_override_hot_unplug(args, "scsi-disk0", expected, expected2); } static void test_override_virtio_hot_unplug(void) { - QTestState *qts; - char *joined_args; - QFWCFG *fw_cfg; - QDict *response; - int i; TestArgs *args = create_args(); CHSResult expected[] = { {"/pci@i0cf8/scsi@2/disk@0,0", {10000, 120, 30} }, @@ -891,42 +1016,45 @@ static void test_override_virtio_hot_unplug(void) add_virtio_disk(args, 0, "pci.0", 2, 10000, 120, 30); add_virtio_disk(args, 1, "pci.0", 3, 20, 20, 20); - joined_args = g_strjoinv(" ", args->argv); + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, + g_strdup("-machine pc")); - qts = qtest_init(joined_args); - fw_cfg = pc_fw_cfg_init(qts); + test_override_hot_unplug(args, "virtio-disk0", expected, expected2); +} - read_bootdevices(fw_cfg, expected); +static void test_override_virtio_hot_unplug_q35(void) +{ + TestArgs *args = create_args(); + CHSResult expected[] = { + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@2/disk@0,0", + {10000, 120, 30} + }, + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@3/disk@0,0", + {20, 20, 20} + }, + {NULL, {0, 0, 0} } + }; + CHSResult expected2[] = { + { + "/pci@i0cf8/pci-bridge@1/pci-bridge@0/scsi@3/disk@0,0", + {20, 20, 20} + }, + {NULL, {0, 0, 0} } + }; - /* unplug device an restart */ - response = qtest_qmp(qts, - "{ 'execute': 'device_del'," - " 'arguments': {'id': 'virtio-disk0' }}"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); - response = qtest_qmp(qts, - "{ 'execute': 'system_reset', 'arguments': { }}"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); + args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, + g_strdup("-device pcie-root-port,id=p0 " + "-device pcie-pci-bridge,bus=p0,id=b1 " + "-machine q35")); - qtest_qmp_eventwait(qts, "RESET"); + add_drive_with_mbr(args, empty_mbr, 1); + add_drive_with_mbr(args, empty_mbr, 1); + add_virtio_disk(args, 0, "b1", 2, 10000, 120, 30); + add_virtio_disk(args, 1, "b1", 3, 20, 20, 20); - read_bootdevices(fw_cfg, expected2); - - g_free(joined_args); - qtest_quit(qts); - - g_free(fw_cfg); - - for (i = 0; i < args->n_drives; i++) { - unlink(args->drives[i]); - free(args->drives[i]); - } - g_free(args->drives); - g_strfreev(args->argv); - g_free(args); + test_override_hot_unplug(args, "virtio-disk0", expected, expected2); } int main(int argc, char **argv) @@ -960,15 +1088,33 @@ int main(int argc, char **argv) qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst); if (have_qemu_img()) { qtest_add_func("hd-geo/override/ide", test_override_ide); - qtest_add_func("hd-geo/override/scsi", test_override_scsi); - qtest_add_func("hd-geo/override/scsi_2_controllers", - test_override_scsi_2_controllers); + if (qtest_has_device("lsi53c895a")) { + qtest_add_func("hd-geo/override/scsi", test_override_scsi); + qtest_add_func("hd-geo/override/scsi_2_controllers", + test_override_scsi_2_controllers); + } qtest_add_func("hd-geo/override/virtio_blk", test_override_virtio_blk); qtest_add_func("hd-geo/override/zero_chs", test_override_zero_chs); qtest_add_func("hd-geo/override/scsi_hot_unplug", test_override_scsi_hot_unplug); qtest_add_func("hd-geo/override/virtio_hot_unplug", test_override_virtio_hot_unplug); + + if (qtest_has_machine("q35")) { + qtest_add_func("hd-geo/override/sata", test_override_sata); + qtest_add_func("hd-geo/override/virtio_blk_q35", + test_override_virtio_blk_q35); + qtest_add_func("hd-geo/override/zero_chs_q35", + test_override_zero_chs_q35); + if (qtest_has_device("lsi53c895a")) { + qtest_add_func("hd-geo/override/scsi_q35", + test_override_scsi_q35); + } + qtest_add_func("hd-geo/override/scsi_hot_unplug_q35", + test_override_scsi_hot_unplug_q35); + qtest_add_func("hd-geo/override/virtio_hot_unplug_q35", + test_override_virtio_hot_unplug_q35); + } } else { g_test_message("QTEST_QEMU_IMG not set or qemu-img missing; " "skipping hd-geo/override/* tests"); @@ -980,7 +1126,7 @@ test_add_done: for (i = 0; i < backend_last; i++) { if (img_file_name[i]) { unlink(img_file_name[i]); - free(img_file_name[i]); + g_free(img_file_name[i]); } } diff --git a/tests/qtest/hexloader-test.c b/tests/qtest/hexloader-test.c index 561502052a..8b7aa2d72d 100644 --- a/tests/qtest/hexloader-test.c +++ b/tests/qtest/hexloader-test.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* Load 'test.hex' and verify that the in-memory contents are as expected. * 'test.hex' is a memory test pattern stored in Hexadecimal Object diff --git a/tests/qtest/i440fx-test.c b/tests/qtest/i440fx-test.c index 1f57d9684b..795fd85343 100644 --- a/tests/qtest/i440fx-test.c +++ b/tests/qtest/i440fx-test.c @@ -35,7 +35,7 @@ static QPCIBus *test_start_get_bus(const TestData *s) { char *cmdline; - cmdline = g_strdup_printf("-smp %d", s->num_cpus); + cmdline = g_strdup_printf("-machine pc -smp %d", s->num_cpus); qtest_start(cmdline); g_free(cmdline); return qpci_new_pc(global_qtest, NULL); @@ -281,51 +281,31 @@ static void test_i440fx_pam(gconstpointer opaque) #define BLOB_SIZE ((size_t)65536) #define ISA_BIOS_MAXSZ ((size_t)(128 * 1024)) -/* Create a blob file, and return its absolute pathname as a dynamically +/* + * Create a blob file, and return its absolute pathname as a dynamically * allocated string. * The file is closed before the function returns. - * In case of error, NULL is returned. The function prints the error message. + * In case of error, the function aborts and prints the error message. */ static char *create_blob_file(void) { - int ret, fd; + int i, fd; char *pathname; GError *error = NULL; + g_autofree uint8_t *buf = g_malloc(BLOB_SIZE); - ret = -1; fd = g_file_open_tmp("blob_XXXXXX", &pathname, &error); - if (fd == -1) { - fprintf(stderr, "unable to create blob file: %s\n", error->message); - g_error_free(error); - } else { - if (ftruncate(fd, BLOB_SIZE) == -1) { - fprintf(stderr, "ftruncate(\"%s\", %zu): %s\n", pathname, - BLOB_SIZE, strerror(errno)); - } else { - void *buf; + g_assert_no_error(error); + close(fd); - buf = mmap(NULL, BLOB_SIZE, PROT_WRITE, MAP_SHARED, fd, 0); - if (buf == MAP_FAILED) { - fprintf(stderr, "mmap(\"%s\", %zu): %s\n", pathname, BLOB_SIZE, - strerror(errno)); - } else { - size_t i; - - for (i = 0; i < BLOB_SIZE; ++i) { - ((uint8_t *)buf)[i] = i; - } - munmap(buf, BLOB_SIZE); - ret = 0; - } - } - close(fd); - if (ret == -1) { - unlink(pathname); - g_free(pathname); - } + for (i = 0; i < BLOB_SIZE; i++) { + buf[i] = i; } - return ret == -1 ? NULL : pathname; + g_file_set_contents(pathname, (char *)buf, BLOB_SIZE, &error); + g_assert_no_error(error); + + return pathname; } static void test_i440fx_firmware(FirmwareTestFixture *fixture, diff --git a/tests/qtest/ide-test.c b/tests/qtest/ide-test.c index 3f8081e77d..dbe1563b23 100644 --- a/tests/qtest/ide-test.c +++ b/tests/qtest/ide-test.c @@ -25,12 +25,11 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/libqos.h" #include "libqos/pci-pc.h" #include "libqos/malloc-pc.h" #include "qapi/qmp/qdict.h" -#include "qemu-common.h" #include "qemu/bswap.h" #include "hw/pci/pci_ids.h" #include "hw/pci/pci_regs.h" @@ -91,6 +90,7 @@ enum { enum { CMD_DSM = 0x06, + CMD_DIAGNOSE = 0x90, CMD_READ_DMA = 0xc8, CMD_WRITE_DMA = 0xca, CMD_FLUSH_CACHE = 0xe7, @@ -122,16 +122,17 @@ enum { static QPCIBus *pcibus = NULL; static QGuestAllocator guest_malloc; -static char tmp_path[] = "/tmp/qtest.XXXXXX"; -static char debug_path[] = "/tmp/qtest-blkdebug.XXXXXX"; +static char *tmp_path[2]; +static char *debug_path; static QTestState *ide_test_start(const char *cmdline_fmt, ...) { QTestState *qts; + g_autofree char *full_fmt = g_strdup_printf("-machine pc %s", cmdline_fmt); va_list ap; va_start(ap, cmdline_fmt); - qts = qtest_vinitf(cmdline_fmt, ap); + qts = qtest_vinitf(full_fmt, ap); va_end(ap); pc_alloc_init(&guest_malloc, qts, 0); @@ -310,7 +311,7 @@ static QTestState *test_bmdma_setup(void) qts = ide_test_start( "-drive file=%s,if=ide,cache=writeback,format=raw " "-global ide-hd.serial=%s -global ide-hd.ver=%s", - tmp_path, "testdisk", "version"); + tmp_path[0], "testdisk", "version"); qtest_irq_intercept_in(qts, "ioapic"); return qts; @@ -574,7 +575,7 @@ static void test_identify(void) qts = ide_test_start( "-drive file=%s,if=ide,cache=writeback,format=raw " "-global ide-hd.serial=%s -global ide-hd.ver=%s", - tmp_path, "testdisk", "version"); + tmp_path[0], "testdisk", "version"); dev = get_pci_device(qts, &bmdma_bar, &ide_bar); @@ -614,6 +615,36 @@ static void test_identify(void) free_pci_device(dev); } +static void test_diagnostic(void) +{ + QTestState *qts; + QPCIDevice *dev; + QPCIBar bmdma_bar, ide_bar; + uint8_t data; + + qts = ide_test_start( + "-blockdev driver=file,node-name=hda,filename=%s " + "-blockdev driver=file,node-name=hdb,filename=%s " + "-device ide-hd,drive=hda,bus=ide.0,unit=0 " + "-device ide-hd,drive=hdb,bus=ide.0,unit=1 ", + tmp_path[0], tmp_path[1]); + + dev = get_pci_device(qts, &bmdma_bar, &ide_bar); + + /* DIAGNOSE command on device 1 */ + qpci_io_writeb(dev, ide_bar, reg_device, DEV); + data = qpci_io_readb(dev, ide_bar, reg_device); + g_assert_cmphex(data & DEV, ==, DEV); + qpci_io_writeb(dev, ide_bar, reg_command, CMD_DIAGNOSE); + + /* Verify that DEVICE is now 0 */ + data = qpci_io_readb(dev, ide_bar, reg_device); + g_assert_cmphex(data & DEV, ==, 0); + + ide_test_quit(qts); + free_pci_device(dev); +} + /* * Write sector 1 with random data to make IDE storage dirty * Needed for flush tests so that flushes actually go though the block layer @@ -662,7 +693,7 @@ static void test_flush(void) qts = ide_test_start( "-drive file=blkdebug::%s,if=ide,cache=writeback,format=raw", - tmp_path); + tmp_path[0]); dev = get_pci_device(qts, &bmdma_bar, &ide_bar); @@ -701,7 +732,7 @@ static void test_flush(void) free_pci_device(dev); } -static void test_retry_flush(const char *machine) +static void test_pci_retry_flush(void) { QTestState *qts; QPCIDevice *dev; @@ -713,7 +744,7 @@ static void test_retry_flush(const char *machine) qts = ide_test_start( "-drive file=blkdebug:%s:%s,if=ide,cache=writeback,format=raw," "rerror=stop,werror=stop", - debug_path, tmp_path); + debug_path, tmp_path[0]); dev = get_pci_device(qts, &bmdma_bar, &ide_bar); @@ -790,16 +821,6 @@ static void test_flush_empty_drive(void) ide_test_quit(qts); } -static void test_pci_retry_flush(void) -{ - test_retry_flush("pc"); -} - -static void test_isa_retry_flush(void) -{ - test_retry_flush("isapc"); -} - typedef struct Read10CDB { uint8_t opcode; uint8_t flags; @@ -902,14 +923,14 @@ static void cdrom_pio_impl(int nblocks) /* Prepopulate the CDROM with an interesting pattern */ generate_pattern(pattern, patt_len, ATAPI_BLOCK_SIZE); - fh = fopen(tmp_path, "w+"); + fh = fopen(tmp_path[0], "wb+"); ret = fwrite(pattern, ATAPI_BLOCK_SIZE, patt_blocks, fh); g_assert_cmpint(ret, ==, patt_blocks); fclose(fh); qts = ide_test_start( "-drive if=none,file=%s,media=cdrom,format=raw,id=sr0,index=0 " - "-device ide-cd,drive=sr0,bus=ide.0", tmp_path); + "-device ide-cd,drive=sr0,bus=ide.0", tmp_path[0]); dev = get_pci_device(qts, &bmdma_bar, &ide_bar); qtest_irq_intercept_in(qts, "ioapic"); @@ -995,7 +1016,7 @@ static void test_cdrom_dma(void) qts = ide_test_start( "-drive if=none,file=%s,media=cdrom,format=raw,id=sr0,index=0 " - "-device ide-cd,drive=sr0,bus=ide.0", tmp_path); + "-device ide-cd,drive=sr0,bus=ide.0", tmp_path[0]); qtest_irq_intercept_in(qts, "ioapic"); guest_buf = guest_alloc(&guest_malloc, len); @@ -1003,7 +1024,7 @@ static void test_cdrom_dma(void) prdt[0].size = cpu_to_le32(len | PRDT_EOT); generate_pattern(pattern, ATAPI_BLOCK_SIZE * 16, ATAPI_BLOCK_SIZE); - fh = fopen(tmp_path, "w+"); + fh = fopen(tmp_path[0], "wb+"); ret = fwrite(pattern, ATAPI_BLOCK_SIZE, 16, fh); g_assert_cmpint(ret, ==, 16); fclose(fh); @@ -1021,26 +1042,47 @@ static void test_cdrom_dma(void) int main(int argc, char **argv) { + const char *base; + int i; int fd; int ret; + /* + * "base" stores the starting point where we create temporary files. + * + * On Windows, this is set to the relative path of current working + * directory, because the absolute path causes the blkdebug filename + * parser fail to parse "blkdebug:path/to/config:path/to/image". + */ +#ifndef _WIN32 + base = g_get_tmp_dir(); +#else + base = "."; +#endif + /* Create temporary blkdebug instructions */ - fd = mkstemp(debug_path); + debug_path = g_strdup_printf("%s/qtest-blkdebug.XXXXXX", base); + fd = g_mkstemp(debug_path); g_assert(fd >= 0); close(fd); /* Create a temporary raw image */ - fd = mkstemp(tmp_path); - g_assert(fd >= 0); - ret = ftruncate(fd, TEST_IMAGE_SIZE); - g_assert(ret == 0); - close(fd); + for (i = 0; i < 2; ++i) { + tmp_path[i] = g_strdup_printf("%s/qtest.XXXXXX", base); + fd = g_mkstemp(tmp_path[i]); + g_assert(fd >= 0); + ret = ftruncate(fd, TEST_IMAGE_SIZE); + g_assert(ret == 0); + close(fd); + } /* Run the tests */ g_test_init(&argc, &argv, NULL); qtest_add_func("/ide/identify", test_identify); + qtest_add_func("/ide/diagnostic", test_diagnostic); + qtest_add_func("/ide/bmdma/simple_rw", test_bmdma_simple_rw); qtest_add_func("/ide/bmdma/trim", test_bmdma_trim); qtest_add_func("/ide/bmdma/various_prdts", test_bmdma_various_prdts); @@ -1050,7 +1092,6 @@ int main(int argc, char **argv) qtest_add_func("/ide/flush/nodev", test_flush_nodev); qtest_add_func("/ide/flush/empty_drive", test_flush_empty_drive); qtest_add_func("/ide/flush/retry_pci", test_pci_retry_flush); - qtest_add_func("/ide/flush/retry_isa", test_isa_retry_flush); qtest_add_func("/ide/cdrom/pio", test_cdrom_pio); qtest_add_func("/ide/cdrom/pio_large", test_cdrom_pio_large); @@ -1059,8 +1100,12 @@ int main(int argc, char **argv) ret = g_test_run(); /* Cleanup */ - unlink(tmp_path); + for (i = 0; i < 2; ++i) { + unlink(tmp_path[i]); + g_free(tmp_path[i]); + } unlink(debug_path); + g_free(debug_path); return ret; } diff --git a/tests/qtest/intel-hda-test.c b/tests/qtest/intel-hda-test.c index fc25ccc33c..d4a8db6fd6 100644 --- a/tests/qtest/intel-hda-test.c +++ b/tests/qtest/intel-hda-test.c @@ -18,7 +18,7 @@ /* Tests only initialization so far. TODO: Replace with functional tests */ static void ich6_test(void) { - qtest_start("-device intel-hda,id=" HDA_ID CODEC_DEVICES); + qtest_start("-machine pc -device intel-hda,id=" HDA_ID CODEC_DEVICES); qtest_end(); } @@ -29,11 +29,48 @@ static void ich9_test(void) qtest_end(); } +/* + * https://gitlab.com/qemu-project/qemu/-/issues/542 + * Used to trigger: + * AddressSanitizer: stack-overflow + */ +static void test_issue542_ich6(void) +{ + QTestState *s; + + s = qtest_init("-nographic -nodefaults -M pc-q35-6.2 " + "-device intel-hda,id=" HDA_ID CODEC_DEVICES); + + qtest_outl(s, 0xcf8, 0x80000804); + qtest_outw(s, 0xcfc, 0x06); + qtest_bufwrite(s, 0xff0d060f, "\x03", 1); + qtest_bufwrite(s, 0x0, "\x12", 1); + qtest_bufwrite(s, 0x2, "\x2a", 1); + qtest_writeb(s, 0x0, 0x12); + qtest_writeb(s, 0x2, 0x2a); + qtest_outl(s, 0xcf8, 0x80000811); + qtest_outl(s, 0xcfc, 0x006a4400); + qtest_bufwrite(s, 0x6a44005a, "\x01", 1); + qtest_bufwrite(s, 0x6a44005c, "\x02", 1); + qtest_bufwrite(s, 0x6a442050, "\x00\x00\x44\x6a", 4); + qtest_bufwrite(s, 0x6a44204a, "\x01", 1); + qtest_bufwrite(s, 0x6a44204c, "\x02", 1); + qtest_bufwrite(s, 0x6a44005c, "\x02", 1); + qtest_bufwrite(s, 0x6a442050, "\x00\x00\x44\x6a", 4); + qtest_bufwrite(s, 0x6a44204a, "\x01", 1); + qtest_bufwrite(s, 0x6a44204c, "\x02", 1); + qtest_quit(s); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - qtest_add_func("/intel-hda/ich6", ich6_test); - qtest_add_func("/intel-hda/ich9", ich9_test); - + if (qtest_has_machine("pc")) { + qtest_add_func("/intel-hda/ich6", ich6_test); + } + if (qtest_has_machine("q35")) { + qtest_add_func("/intel-hda/ich9", ich9_test); + qtest_add_func("/intel-hda/fuzz/issue542", test_issue542_ich6); + } return g_test_run(); } diff --git a/tests/qtest/ipmi-bt-test.c b/tests/qtest/ipmi-bt-test.c index 8492f02a9c..ed431e34e6 100644 --- a/tests/qtest/ipmi-bt-test.c +++ b/tests/qtest/ipmi-bt-test.c @@ -31,7 +31,6 @@ #include "libqtest-single.h" -#include "qemu-common.h" #define IPMI_IRQ 5 @@ -378,7 +377,7 @@ static void test_enable_irq(void) */ static void open_socket(void) { - struct sockaddr_in myaddr; + struct sockaddr_in myaddr = {}; socklen_t addrlen; myaddr.sin_family = AF_INET; diff --git a/tests/qtest/ipoctal232-test.c b/tests/qtest/ipoctal232-test.c index 65ce10b81b..53a8c9b13c 100644 --- a/tests/qtest/ipoctal232-test.c +++ b/tests/qtest/ipoctal232-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" diff --git a/tests/qtest/isl_pmbus_vr-test.c b/tests/qtest/isl_pmbus_vr-test.c new file mode 100644 index 0000000000..5553ea410a --- /dev/null +++ b/tests/qtest/isl_pmbus_vr-test.c @@ -0,0 +1,474 @@ +/* + * QTests for the ISL_PMBUS digital voltage regulators + * + * Copyright 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include +#include "hw/i2c/pmbus_device.h" +#include "hw/sensor/isl_pmbus_vr.h" +#include "libqtest-single.h" +#include "libqos/qgraph.h" +#include "libqos/i2c.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qnum.h" +#include "qemu/bitops.h" + +#define TEST_ID "isl_pmbus_vr-test" +#define TEST_ADDR (0x43) + +static uint16_t qmp_isl_pmbus_vr_get(const char *id, const char *property) +{ + QDict *response; + uint64_t ret; + + response = qmp("{ 'execute': 'qom-get', 'arguments': { 'path': %s, " + "'property': %s } }", id, property); + g_assert(qdict_haskey(response, "return")); + ret = qnum_get_uint(qobject_to(QNum, qdict_get(response, "return"))); + qobject_unref(response); + return ret; +} + +static void qmp_isl_pmbus_vr_set(const char *id, + const char *property, + uint16_t value) +{ + QDict *response; + + response = qmp("{ 'execute': 'qom-set', 'arguments': { 'path': %s, " + "'property': %s, 'value': %u } }", id, property, value); + g_assert(qdict_haskey(response, "return")); + qobject_unref(response); +} + +/* PMBus commands are little endian vs i2c_set16 in i2c.h which is big endian */ +static uint16_t isl_pmbus_vr_i2c_get16(QI2CDevice *i2cdev, uint8_t reg) +{ + uint8_t resp[2]; + i2c_read_block(i2cdev, reg, resp, sizeof(resp)); + return (resp[1] << 8) | resp[0]; +} + +/* PMBus commands are little endian vs i2c_set16 in i2c.h which is big endian */ +static void isl_pmbus_vr_i2c_set16(QI2CDevice *i2cdev, uint8_t reg, + uint16_t value) +{ + uint8_t data[2]; + + data[0] = value & 255; + data[1] = value >> 8; + i2c_write_block(i2cdev, reg, data, sizeof(data)); +} + +static void test_defaults(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t value, i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + value = qmp_isl_pmbus_vr_get(TEST_ID, "vout[0]"); + g_assert_cmpuint(value, ==, ISL_READ_VOUT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IOUT); + g_assert_cmpuint(i2c_value, ==, ISL_READ_IOUT_DEFAULT); + + value = qmp_isl_pmbus_vr_get(TEST_ID, "pout[0]"); + g_assert_cmpuint(value, ==, ISL_READ_POUT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VIN); + g_assert_cmpuint(i2c_value, ==, ISL_READ_VIN_DEFAULT); + + value = qmp_isl_pmbus_vr_get(TEST_ID, "iin[0]"); + g_assert_cmpuint(value, ==, ISL_READ_IIN_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_PIN); + g_assert_cmpuint(i2c_value, ==, ISL_READ_PIN_DEFAULT); + + value = qmp_isl_pmbus_vr_get(TEST_ID, "temp1[0]"); + g_assert_cmpuint(value, ==, ISL_READ_TEMP_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_2); + g_assert_cmpuint(i2c_value, ==, ISL_READ_TEMP_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_CAPABILITY); + g_assert_cmphex(i2c_value, ==, ISL_CAPABILITY_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_OPERATION); + g_assert_cmphex(i2c_value, ==, ISL_OPERATION_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_ON_OFF_CONFIG); + g_assert_cmphex(i2c_value, ==, ISL_ON_OFF_CONFIG_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_VOUT_MODE); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MODE_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_COMMAND); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_COMMAND_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MAX); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MAX_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_HIGH); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MARGIN_HIGH_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_LOW); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MARGIN_LOW_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_TRANSITION_RATE); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_TRANSITION_RATE_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_OV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_OV_FAULT_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_OT_FAULT_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_OT_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_OV_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VIN_OV_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_UV_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VIN_UV_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_IIN_OC_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_IIN_OC_FAULT_LIMIT_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_REVISION); + g_assert_cmphex(i2c_value, ==, ISL_REVISION_DEFAULT); +} + +static void raa228000_test_defaults(void *obj, void *data, + QGuestAllocator *alloc) +{ + uint16_t value, i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + value = qmp_isl_pmbus_vr_get(TEST_ID, "vout[0]"); + g_assert_cmpuint(value, ==, 0); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IOUT); + g_assert_cmpuint(i2c_value, ==, 0); + + value = qmp_isl_pmbus_vr_get(TEST_ID, "pout[0]"); + g_assert_cmpuint(value, ==, 0); + + i2c_value = i2c_get8(i2cdev, PMBUS_CAPABILITY); + g_assert_cmphex(i2c_value, ==, ISL_CAPABILITY_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_OPERATION); + g_assert_cmphex(i2c_value, ==, ISL_OPERATION_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_ON_OFF_CONFIG); + g_assert_cmphex(i2c_value, ==, ISL_ON_OFF_CONFIG_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_VOUT_MODE); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MODE_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_COMMAND); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_COMMAND_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MAX); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MAX_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_HIGH); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MARGIN_HIGH_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_LOW); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_MARGIN_LOW_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_TRANSITION_RATE); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_TRANSITION_RATE_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_OV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VOUT_OV_FAULT_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_OT_FAULT_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_OT_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_OV_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VIN_OV_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_UV_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_VIN_UV_WARN_LIMIT_DEFAULT); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_IIN_OC_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, ISL_IIN_OC_FAULT_LIMIT_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_REVISION); + g_assert_cmphex(i2c_value, ==, ISL_REVISION_DEFAULT); +} + +/* test qmp access */ +static void test_tx_rx(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value, value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + qmp_isl_pmbus_vr_set(TEST_ID, "vin[0]", 200); + value = qmp_isl_pmbus_vr_get(TEST_ID, "vin[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VIN); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "vout[0]", 2500); + value = qmp_isl_pmbus_vr_get(TEST_ID, "vout[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VOUT); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "iin[0]", 300); + value = qmp_isl_pmbus_vr_get(TEST_ID, "iin[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IIN); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "iout[0]", 310); + value = qmp_isl_pmbus_vr_get(TEST_ID, "iout[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IOUT); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "pin[0]", 100); + value = qmp_isl_pmbus_vr_get(TEST_ID, "pin[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_PIN); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "pout[0]", 95); + value = qmp_isl_pmbus_vr_get(TEST_ID, "pout[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_POUT); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "temp1[0]", 26); + value = qmp_isl_pmbus_vr_get(TEST_ID, "temp1[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_1); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "temp2[0]", 27); + value = qmp_isl_pmbus_vr_get(TEST_ID, "temp2[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_2); + g_assert_cmpuint(value, ==, i2c_value); + + qmp_isl_pmbus_vr_set(TEST_ID, "temp3[0]", 28); + value = qmp_isl_pmbus_vr_get(TEST_ID, "temp3[0]"); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_3); + g_assert_cmpuint(value, ==, i2c_value); + +} + +/* test r/w registers */ +static void test_rw_regs(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_COMMAND, 0x1234); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_COMMAND); + g_assert_cmphex(i2c_value, ==, 0x1234); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_TRIM, 0x4567); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_TRIM); + g_assert_cmphex(i2c_value, ==, 0x4567); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_MAX, 0x9876); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MAX); + g_assert_cmphex(i2c_value, ==, 0x9876); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_MARGIN_HIGH, 0xABCD); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_HIGH); + g_assert_cmphex(i2c_value, ==, 0xABCD); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_MARGIN_LOW, 0xA1B2); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MARGIN_LOW); + g_assert_cmphex(i2c_value, ==, 0xA1B2); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_TRANSITION_RATE, 0xDEF1); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_TRANSITION_RATE); + g_assert_cmphex(i2c_value, ==, 0xDEF1); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_DROOP, 0x5678); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_DROOP); + g_assert_cmphex(i2c_value, ==, 0x5678); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_MIN, 0x1234); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_MIN); + g_assert_cmphex(i2c_value, ==, 0x1234); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_OV_FAULT_LIMIT, 0x2345); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_OV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0x2345); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_UV_FAULT_LIMIT, 0xFA12); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VOUT_UV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0xFA12); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_OT_FAULT_LIMIT, 0xF077); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0xF077); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_OT_WARN_LIMIT, 0x7137); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_OT_WARN_LIMIT); + g_assert_cmphex(i2c_value, ==, 0x7137); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VIN_OV_FAULT_LIMIT, 0x3456); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_OV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0x3456); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VIN_UV_FAULT_LIMIT, 0xBADA); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_VIN_UV_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0xBADA); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_IIN_OC_FAULT_LIMIT, 0xB1B0); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_IIN_OC_FAULT_LIMIT); + g_assert_cmphex(i2c_value, ==, 0xB1B0); + + i2c_set8(i2cdev, PMBUS_OPERATION, 0xA); + i2c_value = i2c_get8(i2cdev, PMBUS_OPERATION); + g_assert_cmphex(i2c_value, ==, 0xA); + + i2c_set8(i2cdev, PMBUS_ON_OFF_CONFIG, 0x42); + i2c_value = i2c_get8(i2cdev, PMBUS_ON_OFF_CONFIG); + g_assert_cmphex(i2c_value, ==, 0x42); +} + +/* test that devices with multiple pages can switch between them */ +static void test_pages_rw(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + i2c_set8(i2cdev, PMBUS_PAGE, 1); + i2c_value = i2c_get8(i2cdev, PMBUS_PAGE); + g_assert_cmphex(i2c_value, ==, 1); + + i2c_set8(i2cdev, PMBUS_PAGE, 0); + i2c_value = i2c_get8(i2cdev, PMBUS_PAGE); + g_assert_cmphex(i2c_value, ==, 0); +} + +/* test read-only registers */ +static void test_ro_regs(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_init_value, i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VIN); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_VIN, 0xBEEF); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VIN); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IIN); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_IIN, 0xB00F); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IIN); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VOUT); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_VOUT, 0x1234); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_VOUT); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IOUT); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_IOUT, 0x6547); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_IOUT); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_1); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_TEMPERATURE_1, 0x1597); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_1); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_2); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_TEMPERATURE_2, 0x1897); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_2); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_3); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_TEMPERATURE_3, 0x1007); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_TEMPERATURE_3); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_PIN); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_PIN, 0xDEAD); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_PIN); + g_assert_cmphex(i2c_init_value, ==, i2c_value); + + i2c_init_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_POUT); + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_READ_POUT, 0xD00D); + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_READ_POUT); + g_assert_cmphex(i2c_init_value, ==, i2c_value); +} + +/* test voltage fault handling */ +static void test_voltage_faults(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value; + uint8_t i2c_byte; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_OV_WARN_LIMIT, 5000); + qmp_isl_pmbus_vr_set(TEST_ID, "vout[0]", 5100); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_STATUS_WORD); + i2c_byte = i2c_get8(i2cdev, PMBUS_STATUS_VOUT); + g_assert_true((i2c_value & PB_STATUS_VOUT) != 0); + g_assert_true((i2c_byte & PB_STATUS_VOUT_OV_WARN) != 0); + + qmp_isl_pmbus_vr_set(TEST_ID, "vout[0]", 4500); + i2c_set8(i2cdev, PMBUS_CLEAR_FAULTS, 0); + i2c_byte = i2c_get8(i2cdev, PMBUS_STATUS_VOUT); + g_assert_true((i2c_byte & PB_STATUS_VOUT_OV_WARN) == 0); + + isl_pmbus_vr_i2c_set16(i2cdev, PMBUS_VOUT_UV_WARN_LIMIT, 4600); + + i2c_value = isl_pmbus_vr_i2c_get16(i2cdev, PMBUS_STATUS_WORD); + i2c_byte = i2c_get8(i2cdev, PMBUS_STATUS_VOUT); + g_assert_true((i2c_value & PB_STATUS_VOUT) != 0); + g_assert_true((i2c_byte & PB_STATUS_VOUT_UV_WARN) != 0); + +} + +static void isl_pmbus_vr_register_nodes(void) +{ + QOSGraphEdgeOptions opts = { + .extra_device_opts = "id=" TEST_ID ",address=0x43" + }; + add_qi2c_address(&opts, &(QI2CAddress) { TEST_ADDR }); + + qos_node_create_driver("isl69260", i2c_device_create); + qos_node_consumes("isl69260", "i2c-bus", &opts); + + qos_add_test("test_defaults", "isl69260", test_defaults, NULL); + qos_add_test("test_tx_rx", "isl69260", test_tx_rx, NULL); + qos_add_test("test_rw_regs", "isl69260", test_rw_regs, NULL); + qos_add_test("test_pages_rw", "isl69260", test_pages_rw, NULL); + qos_add_test("test_ro_regs", "isl69260", test_ro_regs, NULL); + qos_add_test("test_ov_faults", "isl69260", test_voltage_faults, NULL); + + qos_node_create_driver("raa229004", i2c_device_create); + qos_node_consumes("raa229004", "i2c-bus", &opts); + + qos_add_test("test_tx_rx", "raa229004", test_tx_rx, NULL); + qos_add_test("test_rw_regs", "raa229004", test_rw_regs, NULL); + qos_add_test("test_pages_rw", "raa229004", test_pages_rw, NULL); + qos_add_test("test_ov_faults", "raa229004", test_voltage_faults, NULL); + + qos_node_create_driver("raa228000", i2c_device_create); + qos_node_consumes("raa228000", "i2c-bus", &opts); + + qos_add_test("test_defaults", "raa228000", raa228000_test_defaults, NULL); + qos_add_test("test_tx_rx", "raa228000", test_tx_rx, NULL); + qos_add_test("test_rw_regs", "raa228000", test_rw_regs, NULL); + qos_add_test("test_ov_faults", "raa228000", test_voltage_faults, NULL); +} +libqos_init(isl_pmbus_vr_register_nodes); diff --git a/tests/qtest/ivshmem-test.c b/tests/qtest/ivshmem-test.c index dfa69424ed..cd550c8935 100644 --- a/tests/qtest/ivshmem-test.c +++ b/tests/qtest/ivshmem-test.c @@ -13,8 +13,7 @@ #include "contrib/ivshmem-server/ivshmem-server.h" #include "libqos/libqos-pc.h" #include "libqos/libqos-spapr.h" -#include "libqos/libqtest.h" -#include "qemu-common.h" +#include "libqtest.h" #define TMPSHMSIZE (1 << 20) static char *tmpshm; @@ -305,6 +304,7 @@ static void setup_vm_with_server(IVState *s, int nvectors) static void test_ivshmem_server(void) { + g_autoptr(GError) err = NULL; IVState state1, state2, *s1, *s2; ServerThread thread; IvshmemServer server; @@ -321,8 +321,8 @@ static void test_ivshmem_server(void) g_assert_cmpint(ret, ==, 0); thread.server = &server; - ret = pipe(thread.pipe); - g_assert_cmpint(ret, ==, 0); + g_unix_open_pipe(thread.pipe, FD_CLOEXEC, &err); + g_assert_no_error(err); thread.thread = g_thread_new("ivshmem-server", server_thread, &thread); g_assert(thread.thread != NULL); @@ -378,6 +378,20 @@ static void test_ivshmem_server(void) close(thread.pipe[0]); } +static void test_ivshmem_hotplug_q35(void) +{ + QTestState *qts = qtest_init("-object memory-backend-ram,size=1M,id=mb1 " + "-device pcie-root-port,id=p1 " + "-device pcie-pci-bridge,bus=p1,id=b1 " + "-machine q35"); + + qtest_qmp_device_add(qts, "ivshmem-plain", "iv1", + "{'memdev': 'mb1', 'bus': 'b1'}"); + qtest_qmp_device_del_send(qts, "iv1"); + + qtest_quit(qts); +} + #define PCI_SLOT_HP 0x06 static void test_ivshmem_hotplug(void) @@ -385,7 +399,12 @@ static void test_ivshmem_hotplug(void) QTestState *qts; const char *arch = qtest_get_arch(); - qts = qtest_init("-object memory-backend-ram,size=1M,id=mb1"); + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + qts = qtest_init("-object memory-backend-ram,size=1M,id=mb1" + " -machine pc"); + } else { + qts = qtest_init("-object memory-backend-ram,size=1M,id=mb1"); + } qtest_qmp_device_add(qts, "ivshmem-plain", "iv1", "{'addr': %s, 'memdev': 'mb1'}", @@ -463,8 +482,8 @@ static gchar *mktempshm(int size, int *fd) int main(int argc, char **argv) { int ret, fd; - const char *arch = qtest_get_arch(); gchar dir[] = "/tmp/ivshmem-test.XXXXXX"; + const char *arch = qtest_get_arch(); g_test_init(&argc, &argv, NULL); @@ -477,8 +496,8 @@ int main(int argc, char **argv) tmpshmem = mmap(0, TMPSHMSIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); g_assert(tmpshmem != MAP_FAILED); /* server */ - if (mkdtemp(dir) == NULL) { - g_error("mkdtemp: %s", g_strerror(errno)); + if (g_mkdtemp(dir) == NULL) { + g_error("g_mkdtemp: %s", g_strerror(errno)); } tmpdir = dir; tmpserver = g_strconcat(tmpdir, "/server", NULL); @@ -488,9 +507,10 @@ int main(int argc, char **argv) qtest_add_func("/ivshmem/memdev", test_ivshmem_memdev); if (g_test_slow()) { qtest_add_func("/ivshmem/pair", test_ivshmem_pair); - if (strcmp(arch, "ppc64") != 0) { - qtest_add_func("/ivshmem/server", test_ivshmem_server); - } + qtest_add_func("/ivshmem/server", test_ivshmem_server); + } + if (!strcmp(arch, "x86_64") && qtest_has_machine("q35")) { + qtest_add_func("/ivshmem/hotplug-q35", test_ivshmem_hotplug_q35); } out: diff --git a/tests/qtest/libqmp.c b/tests/qtest/libqmp.c new file mode 100644 index 0000000000..2b08382e5d --- /dev/null +++ b/tests/qtest/libqmp.c @@ -0,0 +1,258 @@ +/* + * QTest + * + * Copyright IBM, Corp. 2012 + * Copyright Red Hat, Inc. 2012 + * Copyright SUSE LINUX Products GmbH 2013 + * + * Authors: + * Anthony Liguori + * Paolo Bonzini + * Andreas Färber + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "libqmp.h" + +#ifndef _WIN32 +#include +#endif + +#include "qemu/cutils.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "qapi/qmp/json-parser.h" +#include "qapi/qmp/qjson.h" + +#define SOCKET_MAX_FDS 16 + +typedef struct { + JSONMessageParser parser; + QDict *response; +} QMPResponseParser; + +static void socket_send(int fd, const char *buf, size_t size) +{ + ssize_t res = qemu_send_full(fd, buf, size); + + assert(res == size); +} + +static void qmp_response(void *opaque, QObject *obj, Error *err) +{ + QMPResponseParser *qmp = opaque; + + assert(!obj != !err); + + if (err) { + error_prepend(&err, "QMP JSON response parsing failed: "); + error_report_err(err); + abort(); + } + + g_assert(!qmp->response); + qmp->response = qobject_to(QDict, obj); + g_assert(qmp->response); +} + +QDict *qmp_fd_receive(int fd) +{ + QMPResponseParser qmp; + bool log = getenv("QTEST_LOG") != NULL; + + qmp.response = NULL; + json_message_parser_init(&qmp.parser, qmp_response, &qmp, NULL); + while (!qmp.response) { + ssize_t len; + char c; + + len = recv(fd, &c, 1, 0); + if (len == -1 && errno == EINTR) { + continue; + } + + if (len == -1 || len == 0) { + fprintf(stderr, "Broken pipe\n"); + abort(); + } + + if (log) { + g_assert(write(2, &c, 1) == 1); + } + json_message_parser_feed(&qmp.parser, &c, 1); + } + if (log) { + g_assert(write(2, "\n", 1) == 1); + } + json_message_parser_destroy(&qmp.parser); + + return qmp.response; +} + +#ifndef _WIN32 +/* Sends a message and file descriptors to the socket. + * It's needed for qmp-commands like getfd/add-fd */ +static void socket_send_fds(int socket_fd, int *fds, size_t fds_num, + const char *buf, size_t buf_size) +{ + ssize_t ret; + struct msghdr msg = { 0 }; + char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)] = { 0 }; + size_t fdsize = sizeof(int) * fds_num; + struct cmsghdr *cmsg; + struct iovec iov = { .iov_base = (char *)buf, .iov_len = buf_size }; + + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + if (fds && fds_num > 0) { + g_assert_cmpuint(fds_num, <, SOCKET_MAX_FDS); + + msg.msg_control = control; + msg.msg_controllen = CMSG_SPACE(fdsize); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_len = CMSG_LEN(fdsize); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy(CMSG_DATA(cmsg), fds, fdsize); + } + + do { + ret = sendmsg(socket_fd, &msg, 0); + } while (ret < 0 && errno == EINTR); + g_assert_cmpint(ret, >, 0); +} +#endif + +/** + * Allow users to send a message without waiting for the reply, + * in the case that they choose to discard all replies up until + * a particular EVENT is received. + */ +static void +_qmp_fd_vsend_fds(int fd, int *fds, size_t fds_num, + const char *fmt, va_list ap) +{ + QObject *qobj; + +#ifdef _WIN32 + assert(fds_num == 0); +#endif + + /* Going through qobject ensures we escape strings properly */ + qobj = qobject_from_vjsonf_nofail(fmt, ap); + + /* No need to send anything for an empty QObject. */ + if (qobj) { + int log = getenv("QTEST_LOG") != NULL; + GString *str = qobject_to_json(qobj); + + /* + * BUG: QMP doesn't react to input until it sees a newline, an + * object, or an array. Work-around: give it a newline. + */ + g_string_append_c(str, '\n'); + + if (log) { + fprintf(stderr, "%s", str->str); + } + +#ifndef _WIN32 + /* Send QMP request */ + if (fds && fds_num > 0) { + socket_send_fds(fd, fds, fds_num, str->str, str->len); + } else +#endif + { + socket_send(fd, str->str, str->len); + } + + g_string_free(str, true); + qobject_unref(qobj); + } +} + +#ifndef _WIN32 +void qmp_fd_vsend_fds(int fd, int *fds, size_t fds_num, + const char *fmt, va_list ap) +{ + _qmp_fd_vsend_fds(fd, fds, fds_num, fmt, ap); +} +#endif + +void qmp_fd_vsend(int fd, const char *fmt, va_list ap) +{ + _qmp_fd_vsend_fds(fd, NULL, 0, fmt, ap); +} + + +QDict *qmp_fdv(int fd, const char *fmt, va_list ap) +{ + _qmp_fd_vsend_fds(fd, NULL, 0, fmt, ap); + + return qmp_fd_receive(fd); +} + +QDict *qmp_fd(int fd, const char *fmt, ...) +{ + va_list ap; + QDict *response; + + va_start(ap, fmt); + response = qmp_fdv(fd, fmt, ap); + va_end(ap); + return response; +} + +void qmp_fd_send(int fd, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + qmp_fd_vsend(fd, fmt, ap); + va_end(ap); +} + +void qmp_fd_vsend_raw(int fd, const char *fmt, va_list ap) +{ + bool log = getenv("QTEST_LOG") != NULL; + char *str = g_strdup_vprintf(fmt, ap); + + if (log) { + fprintf(stderr, "%s", str); + } + socket_send(fd, str, strlen(str)); + g_free(str); +} + +void qmp_fd_send_raw(int fd, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + qmp_fd_vsend_raw(fd, fmt, ap); + va_end(ap); +} + +bool qmp_rsp_is_err(QDict *rsp) +{ + QDict *error = qdict_get_qdict(rsp, "error"); + qobject_unref(rsp); + return !!error; +} + +void qmp_expect_error_and_unref(QDict *rsp, const char *class) +{ + QDict *error = qdict_get_qdict(rsp, "error"); + + g_assert_cmpstr(qdict_get_try_str(error, "class"), ==, class); + g_assert_nonnull(qdict_get_try_str(error, "desc")); + g_assert(!qdict_haskey(rsp, "return")); + + qobject_unref(rsp); +} diff --git a/tests/qtest/libqmp.h b/tests/qtest/libqmp.h new file mode 100644 index 0000000000..3445b753ff --- /dev/null +++ b/tests/qtest/libqmp.h @@ -0,0 +1,53 @@ +/* + * libqmp test unit + * + * Copyright IBM, Corp. 2012 + * Copyright Red Hat, Inc. 2012 + * Copyright SUSE LINUX Products GmbH 2013 + * + * Authors: + * Anthony Liguori + * Paolo Bonzini + * Andreas Färber + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef LIBQMP_H +#define LIBQMP_H + +#include "qapi/qmp/qdict.h" + +QDict *qmp_fd_receive(int fd); +#ifndef _WIN32 +void qmp_fd_vsend_fds(int fd, int *fds, size_t fds_num, + const char *fmt, va_list ap) G_GNUC_PRINTF(4, 0); +#endif +void qmp_fd_vsend(int fd, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); +void qmp_fd_send(int fd, const char *fmt, ...) G_GNUC_PRINTF(2, 3); +void qmp_fd_send_raw(int fd, const char *fmt, ...) G_GNUC_PRINTF(2, 3); +void qmp_fd_vsend_raw(int fd, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); +QDict *qmp_fdv(int fd, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); +QDict *qmp_fd(int fd, const char *fmt, ...) G_GNUC_PRINTF(2, 3); + +/** + * qmp_rsp_is_err: + * @rsp: QMP response to check for error + * + * Test @rsp for error and discard @rsp. + * Returns 'true' if there is error in @rsp and 'false' otherwise. + */ +bool qmp_rsp_is_err(QDict *rsp); + +/** + * qmp_expect_error_and_unref: + * @rsp: QMP response to check for error + * @class: an error class + * + * Assert the response has the given error class and discard @rsp. + */ +void qmp_expect_error_and_unref(QDict *rsp, const char *class); + +#endif /* LIBQMP_H */ diff --git a/tests/qtest/libqos/aarch64-xlnx-zcu102-machine.c b/tests/qtest/libqos/aarch64-xlnx-zcu102-machine.c index 79631cc7a9..ab24add8eb 100644 --- a/tests/qtest/libqos/aarch64-xlnx-zcu102-machine.c +++ b/tests/qtest/libqos/aarch64-xlnx-zcu102-machine.c @@ -17,9 +17,9 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "sdhci.h" diff --git a/tests/qtest/libqos/ahci.c b/tests/qtest/libqos/ahci.c index fba3e7a954..f53f12aa99 100644 --- a/tests/qtest/libqos/ahci.c +++ b/tests/qtest/libqos/ahci.c @@ -24,11 +24,10 @@ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "ahci.h" #include "pci-pc.h" -#include "qemu-common.h" #include "qemu/host-utils.h" #include "hw/pci/pci_ids.h" @@ -639,8 +638,8 @@ void ahci_exec(AHCIQState *ahci, uint8_t port, AHCIOpts *opts; uint64_t buffer_in; - opts = g_memdup((opts_in == NULL ? &default_opts : opts_in), - sizeof(AHCIOpts)); + opts = g_memdup2((opts_in == NULL ? &default_opts : opts_in), + sizeof(AHCIOpts)); buffer_in = opts->buffer; @@ -860,7 +859,7 @@ AHCICommand *ahci_command_create(uint8_t command_name) g_assert(!props->ncq || props->lba48); /* Defaults and book-keeping */ - cmd->props = g_memdup(props, sizeof(AHCICommandProp)); + cmd->props = g_memdup2(props, sizeof(AHCICommandProp)); cmd->name = command_name; cmd->xbytes = props->size; cmd->prd_size = 4096; diff --git a/tests/qtest/libqos/arm-imx25-pdk-machine.c b/tests/qtest/libqos/arm-imx25-pdk-machine.c index 6692adfa4f..8fe128fae8 100644 --- a/tests/qtest/libqos/arm-imx25-pdk-machine.c +++ b/tests/qtest/libqos/arm-imx25-pdk-machine.c @@ -19,8 +19,8 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" -#include "malloc.h" +#include "../libqtest.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "i2c.h" diff --git a/tests/qtest/libqos/arm-n800-machine.c b/tests/qtest/libqos/arm-n800-machine.c index ff2049c3a7..4e5afe0164 100644 --- a/tests/qtest/libqos/arm-n800-machine.c +++ b/tests/qtest/libqos/arm-n800-machine.c @@ -19,8 +19,8 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" -#include "malloc.h" +#include "../libqtest.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "i2c.h" diff --git a/tests/qtest/libqos/arm-raspi2-machine.c b/tests/qtest/libqos/arm-raspi2-machine.c index 35bb4709a4..367c6c17a5 100644 --- a/tests/qtest/libqos/arm-raspi2-machine.c +++ b/tests/qtest/libqos/arm-raspi2-machine.c @@ -17,9 +17,9 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "sdhci.h" @@ -42,7 +42,7 @@ static void *raspi2_get_driver(void *object, const char *interface) return &machine->alloc; } - fprintf(stderr, "%s not present in arm/raspi2\n", interface); + fprintf(stderr, "%s not present in arm/raspi2b\n", interface); g_assert_not_reached(); } @@ -53,7 +53,7 @@ static QOSGraphObject *raspi2_get_device(void *obj, const char *device) return &machine->sdhci.obj; } - fprintf(stderr, "%s not present in arm/raspi2\n", device); + fprintf(stderr, "%s not present in arm/raspi2b\n", device); g_assert_not_reached(); } @@ -85,8 +85,8 @@ static void *qos_create_machine_arm_raspi2(QTestState *qts) static void raspi2_register_nodes(void) { - qos_node_create_machine("arm/raspi2", qos_create_machine_arm_raspi2); - qos_node_contains("arm/raspi2", "generic-sdhci", NULL); + qos_node_create_machine("arm/raspi2b", qos_create_machine_arm_raspi2); + qos_node_contains("arm/raspi2b", "generic-sdhci", NULL); } libqos_init(raspi2_register_nodes); diff --git a/tests/qtest/libqos/arm-sabrelite-machine.c b/tests/qtest/libqos/arm-sabrelite-machine.c index 72425f0ad4..94f6a20fc7 100644 --- a/tests/qtest/libqos/arm-sabrelite-machine.c +++ b/tests/qtest/libqos/arm-sabrelite-machine.c @@ -17,9 +17,9 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "sdhci.h" diff --git a/tests/qtest/libqos/arm-smdkc210-machine.c b/tests/qtest/libqos/arm-smdkc210-machine.c index 321b8826d4..9bbce924ea 100644 --- a/tests/qtest/libqos/arm-smdkc210-machine.c +++ b/tests/qtest/libqos/arm-smdkc210-machine.c @@ -17,9 +17,9 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "sdhci.h" diff --git a/tests/qtest/libqos/arm-virt-machine.c b/tests/qtest/libqos/arm-virt-machine.c index e0f5932284..4e87405b58 100644 --- a/tests/qtest/libqos/arm-virt-machine.c +++ b/tests/qtest/libqos/arm-virt-machine.c @@ -17,11 +17,13 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "virtio-mmio.h" +#include "generic-pcihost.h" +#include "hw/pci/pci_regs.h" #define ARM_PAGE_SIZE 4096 #define VIRTIO_MMIO_BASE_ADDR 0x0A003E00 @@ -35,6 +37,7 @@ struct QVirtMachine { QOSGraphObject obj; QGuestAllocator alloc; QVirtioMMIODevice virtio_mmio; + QGenericPCIHost bridge; }; static void virt_destructor(QOSGraphObject *obj) @@ -57,11 +60,13 @@ static void *virt_get_driver(void *object, const char *interface) static QOSGraphObject *virt_get_device(void *obj, const char *device) { QVirtMachine *machine = obj; - if (!g_strcmp0(device, "virtio-mmio")) { + if (!g_strcmp0(device, "generic-pcihost")) { + return &machine->bridge.obj; + } else if (!g_strcmp0(device, "virtio-mmio")) { return &machine->virtio_mmio.obj; } - fprintf(stderr, "%s not present in arm/virtio\n", device); + fprintf(stderr, "%s not present in arm/virt\n", device); g_assert_not_reached(); } @@ -76,16 +81,22 @@ static void *qos_create_machine_arm_virt(QTestState *qts) qvirtio_mmio_init_device(&machine->virtio_mmio, qts, VIRTIO_MMIO_BASE_ADDR, VIRTIO_MMIO_SIZE); + qos_create_generic_pcihost(&machine->bridge, qts, &machine->alloc); + machine->obj.get_device = virt_get_device; machine->obj.get_driver = virt_get_driver; machine->obj.destructor = virt_destructor; return machine; } -static void virtio_mmio_register_nodes(void) +static void virt_machine_register_nodes(void) { qos_node_create_machine("arm/virt", qos_create_machine_arm_virt); qos_node_contains("arm/virt", "virtio-mmio", NULL); + + qos_node_create_machine_args("aarch64/virt", qos_create_machine_arm_virt, + " -cpu max"); + qos_node_contains("aarch64/virt", "generic-pcihost", NULL); } -libqos_init(virtio_mmio_register_nodes); +libqos_init(virt_machine_register_nodes); diff --git a/tests/qtest/libqos/arm-xilinx-zynq-a9-machine.c b/tests/qtest/libqos/arm-xilinx-zynq-a9-machine.c index 56e53c745b..daac762a06 100644 --- a/tests/qtest/libqos/arm-xilinx-zynq-a9-machine.c +++ b/tests/qtest/libqos/arm-xilinx-zynq-a9-machine.c @@ -17,9 +17,9 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "sdhci.h" diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c index a451f6168f..80b3e3db90 100644 --- a/tests/qtest/libqos/e1000e.c +++ b/tests/qtest/libqos/e1000e.c @@ -17,59 +17,25 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "hw/net/e1000_regs.h" +#include "hw/pci/pci_ids.h" +#include "../libqtest.h" #include "pci-pc.h" #include "qemu/sockets.h" #include "qemu/iov.h" #include "qemu/module.h" #include "qemu/bitops.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "e1000e.h" -#define E1000E_IMS (0x00d0) +#define E1000E_IVAR_TEST_CFG \ + (((E1000E_RX0_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_RXQ0_SHIFT) | \ + ((E1000E_TX0_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_TXQ0_SHIFT) | \ + ((E1000E_OTHER_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_OTHER_SHIFT) | \ + E1000_IVAR_TX_INT_EVERY_WB) -#define E1000E_STATUS (0x0008) -#define E1000E_STATUS_LU BIT(1) -#define E1000E_STATUS_ASDV1000 BIT(9) - -#define E1000E_CTRL (0x0000) -#define E1000E_CTRL_RESET BIT(26) - -#define E1000E_RCTL (0x0100) -#define E1000E_RCTL_EN BIT(1) -#define E1000E_RCTL_UPE BIT(3) -#define E1000E_RCTL_MPE BIT(4) - -#define E1000E_RFCTL (0x5008) -#define E1000E_RFCTL_EXTEN BIT(15) - -#define E1000E_TCTL (0x0400) -#define E1000E_TCTL_EN BIT(1) - -#define E1000E_CTRL_EXT (0x0018) -#define E1000E_CTRL_EXT_DRV_LOAD BIT(28) -#define E1000E_CTRL_EXT_TXLSFLOW BIT(22) - -#define E1000E_IVAR (0x00E4) -#define E1000E_IVAR_TEST_CFG ((E1000E_RX0_MSG_ID << 0) | BIT(3) | \ - (E1000E_TX0_MSG_ID << 8) | BIT(11) | \ - (E1000E_OTHER_MSG_ID << 16) | BIT(19) | \ - BIT(31)) - -#define E1000E_RING_LEN (0x1000) - -#define E1000E_TDBAL (0x3800) - -#define E1000E_TDBAH (0x3804) -#define E1000E_TDH (0x3810) - -#define E1000E_RDBAL (0x2800) -#define E1000E_RDBAH (0x2804) -#define E1000E_RDH (0x2810) - -#define E1000E_TXD_LEN (16) -#define E1000E_RXD_LEN (16) +#define E1000E_RING_LEN (0x1000) static void e1000e_macreg_write(QE1000E *d, uint32_t reg, uint32_t val) { @@ -87,30 +53,34 @@ void e1000e_tx_ring_push(QE1000E *d, void *descr) { QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); uint32_t tail = e1000e_macreg_read(d, E1000E_TDT); - uint32_t len = e1000e_macreg_read(d, E1000E_TDLEN) / E1000E_TXD_LEN; + uint32_t len = e1000e_macreg_read(d, E1000E_TDLEN) / E1000_RING_DESC_LEN; - qtest_memwrite(d_pci->pci_dev.bus->qts, d->tx_ring + tail * E1000E_TXD_LEN, - descr, E1000E_TXD_LEN); + qtest_memwrite(d_pci->pci_dev.bus->qts, + d->tx_ring + tail * E1000_RING_DESC_LEN, + descr, E1000_RING_DESC_LEN); e1000e_macreg_write(d, E1000E_TDT, (tail + 1) % len); /* Read WB data for the packet transmitted */ - qtest_memread(d_pci->pci_dev.bus->qts, d->tx_ring + tail * E1000E_TXD_LEN, - descr, E1000E_TXD_LEN); + qtest_memread(d_pci->pci_dev.bus->qts, + d->tx_ring + tail * E1000_RING_DESC_LEN, + descr, E1000_RING_DESC_LEN); } void e1000e_rx_ring_push(QE1000E *d, void *descr) { QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); uint32_t tail = e1000e_macreg_read(d, E1000E_RDT); - uint32_t len = e1000e_macreg_read(d, E1000E_RDLEN) / E1000E_RXD_LEN; + uint32_t len = e1000e_macreg_read(d, E1000E_RDLEN) / E1000_RING_DESC_LEN; - qtest_memwrite(d_pci->pci_dev.bus->qts, d->rx_ring + tail * E1000E_RXD_LEN, - descr, E1000E_RXD_LEN); + qtest_memwrite(d_pci->pci_dev.bus->qts, + d->rx_ring + tail * E1000_RING_DESC_LEN, + descr, E1000_RING_DESC_LEN); e1000e_macreg_write(d, E1000E_RDT, (tail + 1) % len); /* Read WB data for the packet received */ - qtest_memread(d_pci->pci_dev.bus->qts, d->rx_ring + tail * E1000E_RXD_LEN, - descr, E1000E_RXD_LEN); + qtest_memread(d_pci->pci_dev.bus->qts, + d->rx_ring + tail * E1000_RING_DESC_LEN, + descr, E1000_RING_DESC_LEN); } static void e1000e_foreach_callback(QPCIDevice *dev, int devfn, void *data) @@ -151,53 +121,53 @@ static void e1000e_pci_start_hw(QOSGraphObject *obj) qpci_device_enable(&d->pci_dev); /* Reset the device */ - val = e1000e_macreg_read(&d->e1000e, E1000E_CTRL); - e1000e_macreg_write(&d->e1000e, E1000E_CTRL, val | E1000E_CTRL_RESET); + val = e1000e_macreg_read(&d->e1000e, E1000_CTRL); + e1000e_macreg_write(&d->e1000e, E1000_CTRL, val | E1000_CTRL_RST | E1000_CTRL_SLU); /* Enable and configure MSI-X */ qpci_msix_enable(&d->pci_dev); - e1000e_macreg_write(&d->e1000e, E1000E_IVAR, E1000E_IVAR_TEST_CFG); + e1000e_macreg_write(&d->e1000e, E1000_IVAR, E1000E_IVAR_TEST_CFG); /* Check the device status - link and speed */ - val = e1000e_macreg_read(&d->e1000e, E1000E_STATUS); - g_assert_cmphex(val & (E1000E_STATUS_LU | E1000E_STATUS_ASDV1000), - ==, E1000E_STATUS_LU | E1000E_STATUS_ASDV1000); + val = e1000e_macreg_read(&d->e1000e, E1000_STATUS); + g_assert_cmphex(val & (E1000_STATUS_LU | E1000_STATUS_ASDV_1000), + ==, E1000_STATUS_LU | E1000_STATUS_ASDV_1000); /* Initialize TX/RX logic */ - e1000e_macreg_write(&d->e1000e, E1000E_RCTL, 0); - e1000e_macreg_write(&d->e1000e, E1000E_TCTL, 0); + e1000e_macreg_write(&d->e1000e, E1000_RCTL, 0); + e1000e_macreg_write(&d->e1000e, E1000_TCTL, 0); /* Notify the device that the driver is ready */ - val = e1000e_macreg_read(&d->e1000e, E1000E_CTRL_EXT); - e1000e_macreg_write(&d->e1000e, E1000E_CTRL_EXT, - val | E1000E_CTRL_EXT_DRV_LOAD | E1000E_CTRL_EXT_TXLSFLOW); + val = e1000e_macreg_read(&d->e1000e, E1000_CTRL_EXT); + e1000e_macreg_write(&d->e1000e, E1000_CTRL_EXT, + val | E1000_CTRL_EXT_DRV_LOAD); - e1000e_macreg_write(&d->e1000e, E1000E_TDBAL, + e1000e_macreg_write(&d->e1000e, E1000_TDBAL, (uint32_t) d->e1000e.tx_ring); - e1000e_macreg_write(&d->e1000e, E1000E_TDBAH, + e1000e_macreg_write(&d->e1000e, E1000_TDBAH, (uint32_t) (d->e1000e.tx_ring >> 32)); e1000e_macreg_write(&d->e1000e, E1000E_TDLEN, E1000E_RING_LEN); e1000e_macreg_write(&d->e1000e, E1000E_TDT, 0); - e1000e_macreg_write(&d->e1000e, E1000E_TDH, 0); + e1000e_macreg_write(&d->e1000e, E1000_TDH, 0); /* Enable transmit */ - e1000e_macreg_write(&d->e1000e, E1000E_TCTL, E1000E_TCTL_EN); - e1000e_macreg_write(&d->e1000e, E1000E_RDBAL, + e1000e_macreg_write(&d->e1000e, E1000_TCTL, E1000_TCTL_EN); + e1000e_macreg_write(&d->e1000e, E1000_RDBAL, (uint32_t)d->e1000e.rx_ring); - e1000e_macreg_write(&d->e1000e, E1000E_RDBAH, + e1000e_macreg_write(&d->e1000e, E1000_RDBAH, (uint32_t)(d->e1000e.rx_ring >> 32)); e1000e_macreg_write(&d->e1000e, E1000E_RDLEN, E1000E_RING_LEN); e1000e_macreg_write(&d->e1000e, E1000E_RDT, 0); - e1000e_macreg_write(&d->e1000e, E1000E_RDH, 0); + e1000e_macreg_write(&d->e1000e, E1000_RDH, 0); /* Enable receive */ - e1000e_macreg_write(&d->e1000e, E1000E_RFCTL, E1000E_RFCTL_EXTEN); - e1000e_macreg_write(&d->e1000e, E1000E_RCTL, E1000E_RCTL_EN | - E1000E_RCTL_UPE | - E1000E_RCTL_MPE); + e1000e_macreg_write(&d->e1000e, E1000_RFCTL, E1000_RFCTL_EXTEN); + e1000e_macreg_write(&d->e1000e, E1000_RCTL, E1000_RCTL_EN | + E1000_RCTL_UPE | + E1000_RCTL_MPE); /* Enable all interrupts */ - e1000e_macreg_write(&d->e1000e, E1000E_IMS, 0xFFFFFFFF); + e1000e_macreg_write(&d->e1000e, E1000_IMS, 0xFFFFFFFF); } @@ -248,8 +218,8 @@ static void *e1000e_pci_create(void *pci_bus, QGuestAllocator *alloc, static void e1000e_register_nodes(void) { QPCIAddress addr = { - .vendor_id = 0x8086, - .device_id = 0x10D3, + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = E1000_DEV_ID_82574L, }; /* FIXME: every test using this node needs to setup a -netdev socket,id=hs0 diff --git a/tests/qtest/libqos/fw_cfg.c b/tests/qtest/libqos/fw_cfg.c index 6b8e1babe5..89f053ccac 100644 --- a/tests/qtest/libqos/fw_cfg.c +++ b/tests/qtest/libqos/fw_cfg.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "fw_cfg.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/bswap.h" #include "hw/nvram/fw_cfg.h" diff --git a/tests/qtest/libqos/fw_cfg.h b/tests/qtest/libqos/fw_cfg.h index c6a7cf8cf0..b0456a15df 100644 --- a/tests/qtest/libqos/fw_cfg.h +++ b/tests/qtest/libqos/fw_cfg.h @@ -13,7 +13,7 @@ #ifndef LIBQOS_FW_CFG_H #define LIBQOS_FW_CFG_H -#include "libqtest.h" +#include "../libqtest.h" typedef struct QFWCFG QFWCFG; diff --git a/tests/qtest/libqos/generic-pcihost.c b/tests/qtest/libqos/generic-pcihost.c new file mode 100644 index 0000000000..3124b0e46b --- /dev/null +++ b/tests/qtest/libqos/generic-pcihost.c @@ -0,0 +1,231 @@ +/* + * libqos PCI bindings for generic PCI + * + * Copyright Red Hat Inc., 2022 + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "../libqtest.h" +#include "generic-pcihost.h" +#include "qapi/qmp/qdict.h" +#include "hw/pci/pci_regs.h" +#include "qemu/host-utils.h" + +#include "qemu/module.h" + +/* QGenericPCIHost */ + +QOSGraphObject *generic_pcihost_get_device(void *obj, const char *device) +{ + QGenericPCIHost *host = obj; + if (!g_strcmp0(device, "pci-bus-generic")) { + return &host->pci.obj; + } + fprintf(stderr, "%s not present in generic-pcihost\n", device); + g_assert_not_reached(); +} + +void qos_create_generic_pcihost(QGenericPCIHost *host, + QTestState *qts, + QGuestAllocator *alloc) +{ + host->obj.get_device = generic_pcihost_get_device; + qpci_init_generic(&host->pci, qts, alloc, false); +} + +static uint8_t qpci_generic_pio_readb(QPCIBus *bus, uint32_t addr) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + return qtest_readb(bus->qts, s->gpex_pio_base + addr); +} + +static void qpci_generic_pio_writeb(QPCIBus *bus, uint32_t addr, uint8_t val) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + qtest_writeb(bus->qts, s->gpex_pio_base + addr, val); +} + +static uint16_t qpci_generic_pio_readw(QPCIBus *bus, uint32_t addr) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + return qtest_readw(bus->qts, s->gpex_pio_base + addr); +} + +static void qpci_generic_pio_writew(QPCIBus *bus, uint32_t addr, uint16_t val) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + qtest_writew(bus->qts, s->gpex_pio_base + addr, val); +} + +static uint32_t qpci_generic_pio_readl(QPCIBus *bus, uint32_t addr) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + return qtest_readl(bus->qts, s->gpex_pio_base + addr); +} + +static void qpci_generic_pio_writel(QPCIBus *bus, uint32_t addr, uint32_t val) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + qtest_writel(bus->qts, s->gpex_pio_base + addr, val); +} + +static uint64_t qpci_generic_pio_readq(QPCIBus *bus, uint32_t addr) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + return qtest_readq(bus->qts, s->gpex_pio_base + addr); +} + +static void qpci_generic_pio_writeq(QPCIBus *bus, uint32_t addr, uint64_t val) +{ + QGenericPCIBus *s = container_of(bus, QGenericPCIBus, bus); + + qtest_writeq(bus->qts, s->gpex_pio_base + addr, val); +} + +static void qpci_generic_memread(QPCIBus *bus, uint32_t addr, void *buf, size_t len) +{ + qtest_memread(bus->qts, addr, buf, len); +} + +static void qpci_generic_memwrite(QPCIBus *bus, uint32_t addr, + const void *buf, size_t len) +{ + qtest_memwrite(bus->qts, addr, buf, len); +} + +static uint8_t qpci_generic_config_readb(QPCIBus *bus, int devfn, uint8_t offset) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + uint8_t val; + + qtest_memread(bus->qts, addr, &val, 1); + return val; +} + +static uint16_t qpci_generic_config_readw(QPCIBus *bus, int devfn, uint8_t offset) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + uint16_t val; + + qtest_memread(bus->qts, addr, &val, 2); + return le16_to_cpu(val); +} + +static uint32_t qpci_generic_config_readl(QPCIBus *bus, int devfn, uint8_t offset) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + uint32_t val; + + qtest_memread(bus->qts, addr, &val, 4); + return le32_to_cpu(val); +} + +static void +qpci_generic_config_writeb(QPCIBus *bus, int devfn, uint8_t offset, uint8_t value) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + + qtest_memwrite(bus->qts, addr, &value, 1); +} + +static void +qpci_generic_config_writew(QPCIBus *bus, int devfn, uint8_t offset, uint16_t value) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + uint16_t val = cpu_to_le16(value); + + qtest_memwrite(bus->qts, addr, &val, 2); +} + +static void +qpci_generic_config_writel(QPCIBus *bus, int devfn, uint8_t offset, uint32_t value) +{ + QGenericPCIBus *gbus = container_of(bus, QGenericPCIBus, bus); + uint64_t addr = gbus->ecam_alloc_ptr + ((0 << 20) | (devfn << 12) | offset); + uint32_t val = cpu_to_le32(value); + + qtest_memwrite(bus->qts, addr, &val, 4); +} + +static void *qpci_generic_get_driver(void *obj, const char *interface) +{ + QGenericPCIBus *qpci = obj; + if (!g_strcmp0(interface, "pci-bus")) { + return &qpci->bus; + } + fprintf(stderr, "%s not present in pci-bus-generic\n", interface); + g_assert_not_reached(); +} + +void qpci_init_generic(QGenericPCIBus *qpci, QTestState *qts, + QGuestAllocator *alloc, bool hotpluggable) +{ + assert(qts); + + qpci->gpex_pio_base = 0x3eff0000; + qpci->bus.not_hotpluggable = !hotpluggable; + qpci->bus.has_buggy_msi = false; + + qpci->bus.pio_readb = qpci_generic_pio_readb; + qpci->bus.pio_readw = qpci_generic_pio_readw; + qpci->bus.pio_readl = qpci_generic_pio_readl; + qpci->bus.pio_readq = qpci_generic_pio_readq; + + qpci->bus.pio_writeb = qpci_generic_pio_writeb; + qpci->bus.pio_writew = qpci_generic_pio_writew; + qpci->bus.pio_writel = qpci_generic_pio_writel; + qpci->bus.pio_writeq = qpci_generic_pio_writeq; + + qpci->bus.memread = qpci_generic_memread; + qpci->bus.memwrite = qpci_generic_memwrite; + + qpci->bus.config_readb = qpci_generic_config_readb; + qpci->bus.config_readw = qpci_generic_config_readw; + qpci->bus.config_readl = qpci_generic_config_readl; + + qpci->bus.config_writeb = qpci_generic_config_writeb; + qpci->bus.config_writew = qpci_generic_config_writew; + qpci->bus.config_writel = qpci_generic_config_writel; + + qpci->bus.qts = qts; + qpci->bus.pio_alloc_ptr = 0x0000; + qpci->bus.pio_limit = 0x10000; + qpci->bus.mmio_alloc_ptr = 0x10000000; + qpci->bus.mmio_limit = 0x2eff0000; + qpci->ecam_alloc_ptr = 0x4010000000; + + qpci->obj.get_driver = qpci_generic_get_driver; +} + +static void qpci_generic_register_nodes(void) +{ + qos_node_create_driver("pci-bus-generic", NULL); + qos_node_produces("pci-bus-generic", "pci-bus"); +} + +static void qpci_generic_pci_register_nodes(void) +{ + qos_node_create_driver("generic-pcihost", NULL); + qos_node_contains("generic-pcihost", "pci-bus-generic", NULL); +} + +libqos_init(qpci_generic_register_nodes); +libqos_init(qpci_generic_pci_register_nodes); diff --git a/tests/qtest/libqos/generic-pcihost.h b/tests/qtest/libqos/generic-pcihost.h new file mode 100644 index 0000000000..6493a8712a --- /dev/null +++ b/tests/qtest/libqos/generic-pcihost.h @@ -0,0 +1,54 @@ +/* + * libqos Generic PCI bindings and generic pci host bridge + * + * Copyright Red Hat Inc., 2022 + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef LIBQOS_GENERIC_PCIHOST_H +#define LIBQOS_GENERIC_PCIHOST_H + +#include "pci.h" +#include "libqos-malloc.h" +#include "qgraph.h" + +typedef struct QGenericPCIBus { + QOSGraphObject obj; + QPCIBus bus; + uint64_t gpex_pio_base; + uint64_t ecam_alloc_ptr; +} QGenericPCIBus; + +/* + * qpci_init_generic(): + * @ret: A valid QGenericPCIBus * pointer + * @qts: The %QTestState + * @alloc: A previously initialized @alloc providing memory for @qts + * @bool: devices can be hotplugged on this bus + * + * This function initializes an already allocated + * QGenericPCIBus object. + */ +void qpci_init_generic(QGenericPCIBus *ret, QTestState *qts, + QGuestAllocator *alloc, bool hotpluggable); + +/* QGenericPCIHost */ + +typedef struct QGenericPCIHost QGenericPCIHost; + +struct QGenericPCIHost { + QOSGraphObject obj; + QGenericPCIBus pci; +}; + +QOSGraphObject *generic_pcihost_get_device(void *obj, const char *device); +void qos_create_generic_pcihost(QGenericPCIHost *host, + QTestState *qts, + QGuestAllocator *alloc); + +#endif diff --git a/tests/qtest/libqos/i2c-imx.c b/tests/qtest/libqos/i2c-imx.c index 8f9a7e3831..710cb926d6 100644 --- a/tests/qtest/libqos/i2c-imx.c +++ b/tests/qtest/libqos/i2c-imx.c @@ -21,7 +21,7 @@ #include "i2c.h" -#include "libqtest.h" +#include "../libqtest.h" #include "hw/i2c/imx_i2c.h" diff --git a/tests/qtest/libqos/i2c-omap.c b/tests/qtest/libqos/i2c-omap.c index eb4e453485..6f98f54820 100644 --- a/tests/qtest/libqos/i2c-omap.c +++ b/tests/qtest/libqos/i2c-omap.c @@ -11,7 +11,7 @@ #include "qemu/bswap.h" -#include "libqtest.h" +#include "../libqtest.h" enum OMAPI2CRegisters { OMAP_I2C_REV = 0x00, diff --git a/tests/qtest/libqos/i2c.c b/tests/qtest/libqos/i2c.c index ade1bdb40e..1a54c004eb 100644 --- a/tests/qtest/libqos/i2c.c +++ b/tests/qtest/libqos/i2c.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" #include "i2c.h" -#include "libqtest.h" +#include "../libqtest.h" void qi2c_send(QI2CDevice *i2cdev, const uint8_t *buf, uint16_t len) { diff --git a/tests/qtest/libqos/i2c.h b/tests/qtest/libqos/i2c.h index 1341bac1c5..d032240922 100644 --- a/tests/qtest/libqos/i2c.h +++ b/tests/qtest/libqos/i2c.h @@ -9,7 +9,7 @@ #ifndef LIBQOS_I2C_H #define LIBQOS_I2C_H -#include "libqtest.h" +#include "../libqtest.h" #include "qgraph.h" typedef struct I2CAdapter I2CAdapter; diff --git a/tests/qtest/libqos/malloc.c b/tests/qtest/libqos/libqos-malloc.c similarity index 99% rename from tests/qtest/libqos/malloc.c rename to tests/qtest/libqos/libqos-malloc.c index f708b01432..d7566972c4 100644 --- a/tests/qtest/libqos/malloc.c +++ b/tests/qtest/libqos/libqos-malloc.c @@ -11,8 +11,7 @@ */ #include "qemu/osdep.h" -#include "malloc.h" -#include "qemu-common.h" +#include "libqos-malloc.h" #include "qemu/host-utils.h" typedef struct MemBlock { diff --git a/tests/qtest/libqos/malloc.h b/tests/qtest/libqos/libqos-malloc.h similarity index 97% rename from tests/qtest/libqos/malloc.h rename to tests/qtest/libqos/libqos-malloc.h index 4d1a2e2bef..bbb8c743cc 100644 --- a/tests/qtest/libqos/malloc.h +++ b/tests/qtest/libqos/libqos-malloc.h @@ -14,7 +14,7 @@ #define LIBQOS_MALLOC_H #include "qemu/queue.h" -#include "libqtest.h" +#include "../libqtest.h" typedef enum { ALLOC_NO_FLAGS = 0x00, diff --git a/tests/qtest/libqos/libqos.c b/tests/qtest/libqos/libqos.c index 2251e864ef..5ffda080ec 100644 --- a/tests/qtest/libqos/libqos.c +++ b/tests/qtest/libqos/libqos.c @@ -1,7 +1,5 @@ #include "qemu/osdep.h" -#include - -#include "libqtest.h" +#include "../libqtest.h" #include "libqos.h" #include "pci.h" #include "qapi/qmp/qdict.h" diff --git a/tests/qtest/libqos/libqos.h b/tests/qtest/libqos/libqos.h index e0b2bfe7ca..9b4dd509f0 100644 --- a/tests/qtest/libqos/libqos.h +++ b/tests/qtest/libqos/libqos.h @@ -1,9 +1,9 @@ #ifndef LIBQOS_H #define LIBQOS_H -#include "libqtest.h" +#include "../libqtest.h" #include "pci.h" -#include "malloc.h" +#include "libqos-malloc.h" typedef struct QOSState QOSState; diff --git a/tests/qtest/libqos/malloc-pc.c b/tests/qtest/libqos/malloc-pc.c index f1e3b392a5..bbd1b4827e 100644 --- a/tests/qtest/libqos/malloc-pc.c +++ b/tests/qtest/libqos/malloc-pc.c @@ -16,8 +16,6 @@ #include "standard-headers/linux/qemu_fw_cfg.h" -#include "qemu-common.h" - #define ALLOC_PAGE_SIZE (4096) void pc_alloc_init(QGuestAllocator *s, QTestState *qts, QAllocOpts flags) diff --git a/tests/qtest/libqos/malloc-pc.h b/tests/qtest/libqos/malloc-pc.h index d8d79853c8..e531473601 100644 --- a/tests/qtest/libqos/malloc-pc.h +++ b/tests/qtest/libqos/malloc-pc.h @@ -13,7 +13,7 @@ #ifndef LIBQOS_MALLOC_PC_H #define LIBQOS_MALLOC_PC_H -#include "malloc.h" +#include "libqos-malloc.h" void pc_alloc_init(QGuestAllocator *s, QTestState *qts, QAllocOpts flags); diff --git a/tests/qtest/libqos/malloc-spapr.c b/tests/qtest/libqos/malloc-spapr.c index 05b306c191..d90ed3c51d 100644 --- a/tests/qtest/libqos/malloc-spapr.c +++ b/tests/qtest/libqos/malloc-spapr.c @@ -8,8 +8,6 @@ #include "qemu/osdep.h" #include "malloc-spapr.h" -#include "qemu-common.h" - #define SPAPR_PAGE_SIZE 4096 /* Memory must be a multiple of 256 MB, diff --git a/tests/qtest/libqos/malloc-spapr.h b/tests/qtest/libqos/malloc-spapr.h index f99572fd71..f544c0d611 100644 --- a/tests/qtest/libqos/malloc-spapr.h +++ b/tests/qtest/libqos/malloc-spapr.h @@ -8,7 +8,7 @@ #ifndef LIBQOS_MALLOC_SPAPR_H #define LIBQOS_MALLOC_SPAPR_H -#include "malloc.h" +#include "libqos-malloc.h" void spapr_alloc_init(QGuestAllocator *s, QTestState *qts, QAllocOpts flags); diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build index 1f5c8f1053..32f028872c 100644 --- a/tests/qtest/libqos/meson.build +++ b/tests/qtest/libqos/meson.build @@ -1,10 +1,14 @@ -libqos_srcs = files('../libqtest.c', +libqos_srcs = files( + '../libqtest.c', + '../libqmp.c', + 'qgraph.c', 'qos_external.c', 'pci.c', 'fw_cfg.c', - 'malloc.c', + 'libqos-malloc.c', 'libqos.c', + 'sdhci-cmd.c', # spapr 'malloc-spapr.c', @@ -29,7 +33,6 @@ libqos_srcs = files('../libqtest.c', 'sdhci.c', 'tpci200.c', 'virtio.c', - 'virtio-9p.c', 'virtio-balloon.c', 'virtio-blk.c', 'vhost-user-blk.c', @@ -40,6 +43,9 @@ libqos_srcs = files('../libqtest.c', 'virtio-rng.c', 'virtio-scsi.c', 'virtio-serial.c', + 'virtio-iommu.c', + 'virtio-gpio.c', + 'generic-pcihost.c', # qgraph machines: 'aarch64-xlnx-zcu102-machine.c', @@ -54,6 +60,10 @@ libqos_srcs = files('../libqtest.c', 'x86_64_pc-machine.c', ) +if have_virtfs + libqos_srcs += files('virtio-9p.c', 'virtio-9p-client.c') +endif + libqos = static_library('qos', libqos_srcs + genh, name_suffix: 'fa', build_by_default: false) diff --git a/tests/qtest/libqos/pci-pc.c b/tests/qtest/libqos/pci-pc.c index f97844289f..96046287ac 100644 --- a/tests/qtest/libqos/pci-pc.c +++ b/tests/qtest/libqos/pci-pc.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "pci-pc.h" #include "qapi/qmp/qdict.h" #include "hw/pci/pci_regs.h" @@ -150,6 +150,7 @@ void qpci_init_pc(QPCIBusPC *qpci, QTestState *qts, QGuestAllocator *alloc) qpci->bus.qts = qts; qpci->bus.pio_alloc_ptr = 0xc000; + qpci->bus.pio_limit = 0x10000; qpci->bus.mmio_alloc_ptr = 0xE0000000; qpci->bus.mmio_limit = 0x100000000ULL; @@ -178,13 +179,7 @@ void qpci_free_pc(QPCIBus *bus) void qpci_unplug_acpi_device_test(QTestState *qts, const char *id, uint8_t slot) { - QDict *response; - - response = qtest_qmp(qts, "{'execute': 'device_del'," - " 'arguments': {'id': %s}}", id); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); + qtest_qmp_device_del_send(qts, id); qtest_outl(qts, ACPI_PCIHP_ADDR + PCI_EJ_BASE, 1 << slot); diff --git a/tests/qtest/libqos/pci-pc.h b/tests/qtest/libqos/pci-pc.h index 49ec9507f2..849bd493de 100644 --- a/tests/qtest/libqos/pci-pc.h +++ b/tests/qtest/libqos/pci-pc.h @@ -14,7 +14,7 @@ #define LIBQOS_PCI_PC_H #include "pci.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" typedef struct QPCIBusPC { diff --git a/tests/qtest/libqos/pci-spapr.c b/tests/qtest/libqos/pci-spapr.c index 262226985f..0f1023e4a7 100644 --- a/tests/qtest/libqos/pci-spapr.c +++ b/tests/qtest/libqos/pci-spapr.c @@ -6,7 +6,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "pci-spapr.h" #include "rtas.h" #include "qgraph.h" @@ -197,6 +197,7 @@ void qpci_init_spapr(QPCIBusSPAPR *qpci, QTestState *qts, qpci->bus.qts = qts; qpci->bus.pio_alloc_ptr = 0xc000; + qpci->bus.pio_limit = 0x10000; qpci->bus.mmio_alloc_ptr = qpci->mmio32.pci_base; qpci->bus.mmio_limit = qpci->mmio32.pci_base + qpci->mmio32.size; diff --git a/tests/qtest/libqos/pci-spapr.h b/tests/qtest/libqos/pci-spapr.h index 20a43718b7..3dbf1e58ae 100644 --- a/tests/qtest/libqos/pci-spapr.h +++ b/tests/qtest/libqos/pci-spapr.h @@ -8,7 +8,7 @@ #ifndef LIBQOS_PCI_SPAPR_H #define LIBQOS_PCI_SPAPR_H -#include "malloc.h" +#include "libqos-malloc.h" #include "pci.h" #include "qgraph.h" diff --git a/tests/qtest/libqos/pci.c b/tests/qtest/libqos/pci.c index e1e96189c8..b23d72346b 100644 --- a/tests/qtest/libqos/pci.c +++ b/tests/qtest/libqos/pci.c @@ -13,6 +13,8 @@ #include "qemu/osdep.h" #include "pci.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" #include "hw/pci/pci_regs.h" #include "qemu/host-utils.h" #include "qgraph.h" @@ -99,6 +101,123 @@ void qpci_device_init(QPCIDevice *dev, QPCIBus *bus, QPCIAddress *addr) g_assert(!addr->device_id || device_id == addr->device_id); } +static uint8_t qpci_find_resource_reserve_capability(QPCIDevice *dev) +{ + uint16_t device_id; + uint8_t cap = 0; + + if (qpci_config_readw(dev, PCI_VENDOR_ID) != PCI_VENDOR_ID_REDHAT) { + return 0; + } + + device_id = qpci_config_readw(dev, PCI_DEVICE_ID); + + if (device_id != PCI_DEVICE_ID_REDHAT_PCIE_RP && + device_id != PCI_DEVICE_ID_REDHAT_BRIDGE) { + return 0; + } + + do { + cap = qpci_find_capability(dev, PCI_CAP_ID_VNDR, cap); + } while (cap && + qpci_config_readb(dev, cap + REDHAT_PCI_CAP_TYPE_OFFSET) != + REDHAT_PCI_CAP_RESOURCE_RESERVE); + if (cap) { + uint8_t cap_len = qpci_config_readb(dev, cap + PCI_CAP_FLAGS); + if (cap_len < REDHAT_PCI_CAP_RES_RESERVE_CAP_SIZE) { + return 0; + } + } + return cap; +} + +static void qpci_secondary_buses_rec(QPCIBus *qbus, int bus, int *pci_bus) +{ + QPCIDevice *dev; + uint16_t class; + uint8_t pribus, secbus, subbus; + int index; + + for (index = 0; index < 32; index++) { + dev = qpci_device_find(qbus, QPCI_DEVFN(bus + index, 0)); + if (dev == NULL) { + continue; + } + class = qpci_config_readw(dev, PCI_CLASS_DEVICE); + if (class == PCI_CLASS_BRIDGE_PCI) { + qpci_config_writeb(dev, PCI_SECONDARY_BUS, 255); + qpci_config_writeb(dev, PCI_SUBORDINATE_BUS, 0); + } + g_free(dev); + } + + for (index = 0; index < 32; index++) { + dev = qpci_device_find(qbus, QPCI_DEVFN(bus + index, 0)); + if (dev == NULL) { + continue; + } + class = qpci_config_readw(dev, PCI_CLASS_DEVICE); + if (class != PCI_CLASS_BRIDGE_PCI) { + g_free(dev); + continue; + } + + pribus = qpci_config_readb(dev, PCI_PRIMARY_BUS); + if (pribus != bus) { + qpci_config_writeb(dev, PCI_PRIMARY_BUS, bus); + } + + secbus = qpci_config_readb(dev, PCI_SECONDARY_BUS); + (*pci_bus)++; + if (*pci_bus != secbus) { + secbus = *pci_bus; + qpci_config_writeb(dev, PCI_SECONDARY_BUS, secbus); + } + + subbus = qpci_config_readb(dev, PCI_SUBORDINATE_BUS); + qpci_config_writeb(dev, PCI_SUBORDINATE_BUS, 255); + + qpci_secondary_buses_rec(qbus, secbus << 5, pci_bus); + + if (subbus != *pci_bus) { + uint8_t res_bus = *pci_bus; + uint8_t cap = qpci_find_resource_reserve_capability(dev); + + if (cap) { + uint32_t tmp_res_bus; + + tmp_res_bus = qpci_config_readl(dev, cap + + REDHAT_PCI_CAP_RES_RESERVE_BUS_RES); + if (tmp_res_bus != (uint32_t)-1) { + res_bus = tmp_res_bus & 0xFF; + if ((uint8_t)(res_bus + secbus) < secbus || + (uint8_t)(res_bus + secbus) < res_bus) { + res_bus = 0; + } + if (secbus + res_bus > *pci_bus) { + res_bus = secbus + res_bus; + } + } + } + subbus = res_bus; + *pci_bus = res_bus; + } + + qpci_config_writeb(dev, PCI_SUBORDINATE_BUS, subbus); + g_free(dev); + } +} + +int qpci_secondary_buses_init(QPCIBus *bus) +{ + int last_bus = 0; + + qpci_secondary_buses_rec(bus, 0, &last_bus); + + return last_bus; +} + + void qpci_device_enable(QPCIDevice *dev) { uint16_t cmd; @@ -279,44 +398,56 @@ void qpci_config_writel(QPCIDevice *dev, uint8_t offset, uint32_t value) uint8_t qpci_io_readb(QPCIDevice *dev, QPCIBar token, uint64_t off) { - if (token.addr < QPCI_PIO_LIMIT) { - return dev->bus->pio_readb(dev->bus, token.addr + off); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + return bus->pio_readb(bus, token.addr + off); } else { uint8_t val; - dev->bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); + + bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); return val; } } uint16_t qpci_io_readw(QPCIDevice *dev, QPCIBar token, uint64_t off) { - if (token.addr < QPCI_PIO_LIMIT) { - return dev->bus->pio_readw(dev->bus, token.addr + off); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + return bus->pio_readw(bus, token.addr + off); } else { uint16_t val; - dev->bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); + + bus->memread(bus, token.addr + off, &val, sizeof(val)); return le16_to_cpu(val); } } uint32_t qpci_io_readl(QPCIDevice *dev, QPCIBar token, uint64_t off) { - if (token.addr < QPCI_PIO_LIMIT) { - return dev->bus->pio_readl(dev->bus, token.addr + off); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + return bus->pio_readl(bus, token.addr + off); } else { uint32_t val; - dev->bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); + + bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); return le32_to_cpu(val); } } uint64_t qpci_io_readq(QPCIDevice *dev, QPCIBar token, uint64_t off) { - if (token.addr < QPCI_PIO_LIMIT) { - return dev->bus->pio_readq(dev->bus, token.addr + off); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + return bus->pio_readq(bus, token.addr + off); } else { uint64_t val; - dev->bus->memread(dev->bus, token.addr + off, &val, sizeof(val)); + + bus->memread(bus, token.addr + off, &val, sizeof(val)); return le64_to_cpu(val); } } @@ -324,57 +455,65 @@ uint64_t qpci_io_readq(QPCIDevice *dev, QPCIBar token, uint64_t off) void qpci_io_writeb(QPCIDevice *dev, QPCIBar token, uint64_t off, uint8_t value) { - if (token.addr < QPCI_PIO_LIMIT) { - dev->bus->pio_writeb(dev->bus, token.addr + off, value); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + bus->pio_writeb(bus, token.addr + off, value); } else { - dev->bus->memwrite(dev->bus, token.addr + off, &value, sizeof(value)); + bus->memwrite(bus, token.addr + off, &value, sizeof(value)); } } void qpci_io_writew(QPCIDevice *dev, QPCIBar token, uint64_t off, uint16_t value) { - if (token.addr < QPCI_PIO_LIMIT) { - dev->bus->pio_writew(dev->bus, token.addr + off, value); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + bus->pio_writew(bus, token.addr + off, value); } else { value = cpu_to_le16(value); - dev->bus->memwrite(dev->bus, token.addr + off, &value, sizeof(value)); + bus->memwrite(bus, token.addr + off, &value, sizeof(value)); } } void qpci_io_writel(QPCIDevice *dev, QPCIBar token, uint64_t off, uint32_t value) { - if (token.addr < QPCI_PIO_LIMIT) { - dev->bus->pio_writel(dev->bus, token.addr + off, value); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + bus->pio_writel(bus, token.addr + off, value); } else { value = cpu_to_le32(value); - dev->bus->memwrite(dev->bus, token.addr + off, &value, sizeof(value)); + bus->memwrite(bus, token.addr + off, &value, sizeof(value)); } } void qpci_io_writeq(QPCIDevice *dev, QPCIBar token, uint64_t off, uint64_t value) { - if (token.addr < QPCI_PIO_LIMIT) { - dev->bus->pio_writeq(dev->bus, token.addr + off, value); + QPCIBus *bus = dev->bus; + + if (token.is_io) { + bus->pio_writeq(bus, token.addr + off, value); } else { value = cpu_to_le64(value); - dev->bus->memwrite(dev->bus, token.addr + off, &value, sizeof(value)); + bus->memwrite(bus, token.addr + off, &value, sizeof(value)); } } void qpci_memread(QPCIDevice *dev, QPCIBar token, uint64_t off, void *buf, size_t len) { - g_assert(token.addr >= QPCI_PIO_LIMIT); + g_assert(!token.is_io); dev->bus->memread(dev->bus, token.addr + off, buf, len); } void qpci_memwrite(QPCIDevice *dev, QPCIBar token, uint64_t off, const void *buf, size_t len) { - g_assert(token.addr >= QPCI_PIO_LIMIT); + g_assert(!token.is_io); dev->bus->memwrite(dev->bus, token.addr + off, buf, len); } @@ -415,9 +554,10 @@ QPCIBar qpci_iomap(QPCIDevice *dev, int barno, uint64_t *sizeptr) loc = QEMU_ALIGN_UP(bus->pio_alloc_ptr, size); g_assert(loc >= bus->pio_alloc_ptr); - g_assert(loc + size <= QPCI_PIO_LIMIT); /* Keep PIO below 64kiB */ + g_assert(loc + size <= bus->pio_limit); bus->pio_alloc_ptr = loc + size; + bar.is_io = true; qpci_config_writel(dev, bar_reg, loc | PCI_BASE_ADDRESS_SPACE_IO); } else { @@ -428,6 +568,7 @@ QPCIBar qpci_iomap(QPCIDevice *dev, int barno, uint64_t *sizeptr) g_assert(loc + size <= bus->mmio_limit); bus->mmio_alloc_ptr = loc + size; + bar.is_io = false; qpci_config_writel(dev, bar_reg, loc); } @@ -443,7 +584,7 @@ void qpci_iounmap(QPCIDevice *dev, QPCIBar bar) QPCIBar qpci_legacy_iomap(QPCIDevice *dev, uint16_t addr) { - QPCIBar bar = { .addr = addr }; + QPCIBar bar = { .addr = addr, .is_io = true }; return bar; } diff --git a/tests/qtest/libqos/pci.h b/tests/qtest/libqos/pci.h index ee64fdecbd..8389614523 100644 --- a/tests/qtest/libqos/pci.h +++ b/tests/qtest/libqos/pci.h @@ -13,11 +13,9 @@ #ifndef LIBQOS_PCI_H #define LIBQOS_PCI_H -#include "libqtest.h" +#include "../libqtest.h" #include "qgraph.h" -#define QPCI_PIO_LIMIT 0x10000 - #define QPCI_DEVFN(dev, fn) (((dev) << 3) | (fn)) typedef struct QPCIDevice QPCIDevice; @@ -51,14 +49,16 @@ struct QPCIBus { uint8_t offset, uint32_t value); QTestState *qts; - uint16_t pio_alloc_ptr; + uint64_t pio_alloc_ptr, pio_limit; uint64_t mmio_alloc_ptr, mmio_limit; bool has_buggy_msi; /* TRUE for spapr, FALSE for pci */ + bool not_hotpluggable; /* TRUE if devices cannot be hotplugged */ }; struct QPCIBar { uint64_t addr; + bool is_io; }; struct QPCIDevice @@ -81,6 +81,7 @@ void qpci_device_foreach(QPCIBus *bus, int vendor_id, int device_id, void *data); QPCIDevice *qpci_device_find(QPCIBus *bus, int devfn); void qpci_device_init(QPCIDevice *dev, QPCIBus *bus, QPCIAddress *addr); +int qpci_secondary_buses_init(QPCIBus *bus); bool qpci_has_buggy_msi(QPCIDevice *dev); bool qpci_check_buggy_msi(QPCIDevice *dev); diff --git a/tests/qtest/libqos/ppc64_pseries-machine.c b/tests/qtest/libqos/ppc64_pseries-machine.c index 24ca179976..364e9c689b 100644 --- a/tests/qtest/libqos/ppc64_pseries-machine.c +++ b/tests/qtest/libqos/ppc64_pseries-machine.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qgraph.h" #include "pci-spapr.h" #include "qemu/module.h" diff --git a/tests/qtest/libqos/qgraph.c b/tests/qtest/libqos/qgraph.c index d1dc491930..0a2dddfafa 100644 --- a/tests/qtest/libqos/qgraph.c +++ b/tests/qtest/libqos/qgraph.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/queue.h" #include "qgraph_internal.h" #include "qgraph.h" @@ -93,7 +93,7 @@ static void add_edge(const char *source, const char *dest, edge->type = type; edge->dest = g_strdup(dest); edge->edge_name = g_strdup(opts->edge_name ?: dest); - edge->arg = g_memdup(opts->arg, opts->size_arg); + edge->arg = g_memdup2(opts->arg, opts->size_arg); edge->before_cmd_line = opts->before_cmd_line ? g_strconcat(" ", opts->before_cmd_line, NULL) : NULL; diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h index 54672350c8..287022a67c 100644 --- a/tests/qtest/libqos/qgraph.h +++ b/tests/qtest/libqos/qgraph.h @@ -21,10 +21,10 @@ #include #include "qemu/module.h" -#include "malloc.h" +#include "libqos-malloc.h" /* maximum path length */ -#define QOS_PATH_MAX_ELEMENT_SIZE 50 +#define QOS_PATH_MAX_ELEMENT_SIZE 64 typedef struct QOSGraphObject QOSGraphObject; typedef struct QOSGraphNode QOSGraphNode; @@ -252,17 +252,17 @@ void qos_node_create_driver_named(const char *name, const char *qemu_name, * This function can be useful when there are multiple devices * with the same node name contained in a machine/other node * - * For example, if ``arm/raspi2`` contains 2 ``generic-sdhci`` + * For example, if ``arm/raspi2b`` contains 2 ``generic-sdhci`` * devices, the right commands will be: * * .. code:: * - * qos_node_create_machine("arm/raspi2"); + * qos_node_create_machine("arm/raspi2b"); * qos_node_create_driver("generic-sdhci", constructor); * // assume rest of the fields are set NULL * QOSGraphEdgeOptions op1 = { .edge_name = "emmc" }; * QOSGraphEdgeOptions op2 = { .edge_name = "sdcard" }; - * qos_node_contains("arm/raspi2", "generic-sdhci", &op1, &op2, NULL); + * qos_node_contains("arm/raspi2b", "generic-sdhci", &op1, &op2, NULL); * * Of course this also requires that the @container's get_device function * should implement a case for "emmc" and "sdcard". @@ -381,7 +381,7 @@ QOSGraphObject *qos_driver_new(QOSGraphNode *node, QOSGraphObject *parent, * mind: only tests with a path down from the actual test case node (leaf) up * to the graph's root node are actually executed by the qtest framework. And * the qtest framework uses QMP to automatically check which QEMU drivers are - * actually currently available, and accordingly qos marks certain pathes as + * actually currently available, and accordingly qos marks certain paths as * 'unavailable' in such cases (e.g. when QEMU was compiled without support for * a certain feature). */ diff --git a/tests/qtest/libqos/qgraph_internal.h b/tests/qtest/libqos/qgraph_internal.h index c0025f5ab9..7d62fd17af 100644 --- a/tests/qtest/libqos/qgraph_internal.h +++ b/tests/qtest/libqos/qgraph_internal.h @@ -230,7 +230,7 @@ void qos_graph_foreach_test_path(QOSTestCallback fn); /** * qos_get_machine_type(): return QEMU machine type for a machine node. * This function requires every machine @name to be in the form - * /, like "arm/raspi2" or "x86_64/pc". + * /, like "arm/raspi2b" or "x86_64/pc". * * The function will validate the format and return a pointer to * @machine to . For example, when passed "x86_64/pc" diff --git a/tests/qtest/libqos/qos_external.c b/tests/qtest/libqos/qos_external.c index 10ee0f75b2..c6bb8bff09 100644 --- a/tests/qtest/libqos/qos_external.c +++ b/tests/qtest/libqos/qos_external.c @@ -18,13 +18,13 @@ #include "qemu/osdep.h" #include -#include "libqtest.h" +#include "../libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qbool.h" #include "qapi/qmp/qstring.h" #include "qemu/module.h" #include "qapi/qmp/qlist.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "qgraph_internal.h" #include "qos_external.h" diff --git a/tests/qtest/libqos/qos_external.h b/tests/qtest/libqos/qos_external.h index 8446e3df0b..ea37364887 100644 --- a/tests/qtest/libqos/qos_external.h +++ b/tests/qtest/libqos/qos_external.h @@ -21,7 +21,7 @@ #include "qgraph.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qapi/qapi-types-machine.h" #include "qapi/qapi-types-qom.h" diff --git a/tests/qtest/libqos/rtas.c b/tests/qtest/libqos/rtas.c index db29d5554d..dedbfb4cb3 100644 --- a/tests/qtest/libqos/rtas.c +++ b/tests/qtest/libqos/rtas.c @@ -4,7 +4,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "rtas.h" static void qrtas_copy_args(QTestState *qts, uint64_t target_args, diff --git a/tests/qtest/libqos/rtas.h b/tests/qtest/libqos/rtas.h index f38f99dfab..be8353d505 100644 --- a/tests/qtest/libqos/rtas.h +++ b/tests/qtest/libqos/rtas.h @@ -5,7 +5,7 @@ #ifndef LIBQOS_RTAS_H #define LIBQOS_RTAS_H -#include "malloc.h" +#include "libqos-malloc.h" int qrtas_get_time_of_day(QTestState *qts, QGuestAllocator *alloc, struct tm *tm, uint32_t *ns); diff --git a/tests/qtest/libqos/sdhci-cmd.c b/tests/qtest/libqos/sdhci-cmd.c new file mode 100644 index 0000000000..a6f073ac1a --- /dev/null +++ b/tests/qtest/libqos/sdhci-cmd.c @@ -0,0 +1,116 @@ +/* + * MMC Host Controller Commands + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "sdhci-cmd.h" +#include "../libqtest.h" + +static ssize_t read_fifo(QTestState *qts, uint64_t reg, char *msg, size_t count) +{ + uint32_t mask = 0xff; + size_t index = 0; + uint32_t msg_frag; + int size; + while (index < count) { + size = count - index; + if (size > 4) { + size = 4; + } + msg_frag = qtest_readl(qts, reg); + while (size > 0) { + msg[index] = msg_frag & mask; + if (msg[index++] == 0) { + return index; + } + msg_frag >>= 8; + --size; + } + } + return index; +} + +static void write_fifo(QTestState *qts, uint64_t reg, const char *msg, + size_t count) +{ + size_t index = 0; + uint32_t msg_frag; + int size; + int frag_i; + while (index < count) { + size = count - index; + if (size > 4) { + size = 4; + } + msg_frag = 0; + frag_i = 0; + while (frag_i < size) { + msg_frag |= ((uint32_t)msg[index++]) << (frag_i * 8); + ++frag_i; + } + qtest_writel(qts, reg, msg_frag); + } +} + +static void fill_block(QTestState *qts, uint64_t reg, int count) +{ + while (--count >= 0) { + qtest_writel(qts, reg, 0); + } +} + +void sdhci_cmd_regs(QTestState *qts, uint64_t base_addr, uint16_t blksize, + uint16_t blkcnt, uint32_t argument, uint16_t trnmod, + uint16_t cmdreg) +{ + qtest_writew(qts, base_addr + SDHC_BLKSIZE, blksize); + qtest_writew(qts, base_addr + SDHC_BLKCNT, blkcnt); + qtest_writel(qts, base_addr + SDHC_ARGUMENT, argument); + qtest_writew(qts, base_addr + SDHC_TRNMOD, trnmod); + qtest_writew(qts, base_addr + SDHC_CMDREG, cmdreg); +} + +ssize_t sdhci_read_cmd(QTestState *qts, uint64_t base_addr, char *msg, + size_t count) +{ + sdhci_cmd_regs(qts, base_addr, count, 1, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_READ | SDHC_TRNS_BLK_CNT_EN, + SDHC_READ_MULTIPLE_BLOCK | SDHC_CMD_DATA_PRESENT); + + /* read sd fifo_buffer */ + ssize_t bytes_read = read_fifo(qts, base_addr + SDHC_BDATA, msg, count); + + sdhci_cmd_regs(qts, base_addr, 0, 0, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_READ | SDHC_TRNS_BLK_CNT_EN, + SDHC_STOP_TRANSMISSION); + + return bytes_read; +} + +void sdhci_write_cmd(QTestState *qts, uint64_t base_addr, const char *msg, + size_t count, size_t blksize) +{ + sdhci_cmd_regs(qts, base_addr, blksize, 1, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_WRITE | SDHC_TRNS_BLK_CNT_EN, + SDHC_WRITE_MULTIPLE_BLOCK | SDHC_CMD_DATA_PRESENT); + + /* write to sd fifo_buffer */ + write_fifo(qts, base_addr + SDHC_BDATA, msg, count); + fill_block(qts, base_addr + SDHC_BDATA, (blksize - count) / 4); + + sdhci_cmd_regs(qts, base_addr, 0, 0, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_WRITE | SDHC_TRNS_BLK_CNT_EN, + SDHC_STOP_TRANSMISSION); +} diff --git a/tests/qtest/libqos/sdhci-cmd.h b/tests/qtest/libqos/sdhci-cmd.h new file mode 100644 index 0000000000..9e61dd4944 --- /dev/null +++ b/tests/qtest/libqos/sdhci-cmd.h @@ -0,0 +1,70 @@ +/* + * MMC Host Controller Commands + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "../libqtest.h" + +/* more details at hw/sd/sdhci-internal.h */ +#define SDHC_BLKSIZE 0x04 +#define SDHC_BLKCNT 0x06 +#define SDHC_ARGUMENT 0x08 +#define SDHC_TRNMOD 0x0C +#define SDHC_CMDREG 0x0E +#define SDHC_BDATA 0x20 +#define SDHC_PRNSTS 0x24 +#define SDHC_BLKGAP 0x2A +#define SDHC_CLKCON 0x2C +#define SDHC_SWRST 0x2F +#define SDHC_CAPAB 0x40 +#define SDHC_MAXCURR 0x48 +#define SDHC_HCVER 0xFE + +/* TRNSMOD Reg */ +#define SDHC_TRNS_BLK_CNT_EN 0x0002 +#define SDHC_TRNS_READ 0x0010 +#define SDHC_TRNS_WRITE 0x0000 +#define SDHC_TRNS_MULTI 0x0020 + +/* CMD Reg */ +#define SDHC_CMD_DATA_PRESENT (1 << 5) +#define SDHC_ALL_SEND_CID (2 << 8) +#define SDHC_SEND_RELATIVE_ADDR (3 << 8) +#define SDHC_SELECT_DESELECT_CARD (7 << 8) +#define SDHC_SEND_CSD (9 << 8) +#define SDHC_STOP_TRANSMISSION (12 << 8) +#define SDHC_READ_MULTIPLE_BLOCK (18 << 8) +#define SDHC_WRITE_MULTIPLE_BLOCK (25 << 8) +#define SDHC_APP_CMD (55 << 8) + +/* SWRST Reg */ +#define SDHC_RESET_ALL 0x01 + +/* CLKCTRL Reg */ +#define SDHC_CLOCK_INT_EN 0x0001 +#define SDHC_CLOCK_INT_STABLE 0x0002 +#define SDHC_CLOCK_SDCLK_EN (1 << 2) + +/* Set registers needed to send commands to SD */ +void sdhci_cmd_regs(QTestState *qts, uint64_t base_addr, uint16_t blksize, + uint16_t blkcnt, uint32_t argument, uint16_t trnmod, + uint16_t cmdreg); + +/* Read at most 1 block of SD using non-DMA */ +ssize_t sdhci_read_cmd(QTestState *qts, uint64_t base_addr, char *msg, + size_t count); + +/* Write at most 1 block of SD using non-DMA */ +void sdhci_write_cmd(QTestState *qts, uint64_t base_addr, const char *msg, + size_t count, size_t blksize); diff --git a/tests/qtest/libqos/sdhci.c b/tests/qtest/libqos/sdhci.c index 65f0d07fc5..71696980f8 100644 --- a/tests/qtest/libqos/sdhci.c +++ b/tests/qtest/libqos/sdhci.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qgraph.h" #include "pci.h" #include "qemu/module.h" diff --git a/tests/qtest/libqos/tpci200.c b/tests/qtest/libqos/tpci200.c index 1787b1f188..8b00603247 100644 --- a/tests/qtest/libqos/tpci200.c +++ b/tests/qtest/libqos/tpci200.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "qgraph.h" #include "pci.h" diff --git a/tests/qtest/libqos/usb.c b/tests/qtest/libqos/usb.c index 8b45b02984..446fdb5796 100644 --- a/tests/qtest/libqos/usb.c +++ b/tests/qtest/libqos/usb.c @@ -12,7 +12,7 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "hw/usb/uhci-regs.h" #include "usb.h" diff --git a/tests/qtest/libqos/vhost-user-blk.c b/tests/qtest/libqos/vhost-user-blk.c index 568c3426ed..2f3c9cb533 100644 --- a/tests/qtest/libqos/vhost-user-blk.c +++ b/tests/qtest/libqos/vhost-user-blk.c @@ -21,7 +21,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "standard-headers/linux/virtio_blk.h" #include "vhost-user-blk.h" diff --git a/tests/qtest/libqos/virtio-9p-client.c b/tests/qtest/libqos/virtio-9p-client.c new file mode 100644 index 0000000000..e4a368e036 --- /dev/null +++ b/tests/qtest/libqos/virtio-9p-client.c @@ -0,0 +1,1049 @@ +/* + * 9P network client for VirtIO 9P test cases (based on QTest) + * + * Copyright (c) 2014 SUSE LINUX Products GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "virtio-9p-client.h" + +#define QVIRTIO_9P_TIMEOUT_US (10 * 1000 * 1000) +static QGuestAllocator *alloc; + +void v9fs_set_allocator(QGuestAllocator *t_alloc) +{ + alloc = t_alloc; +} + +/* + * Used to auto generate new fids. Start with arbitrary high value to avoid + * collision with hard coded fids in basic test code. + */ +static uint32_t fid_generator = 1000; + +static uint32_t genfid(void) +{ + return fid_generator++; +} + +/** + * Splits the @a in string by @a delim into individual (non empty) strings + * and outputs them to @a out. The output array @a out is NULL terminated. + * + * Output array @a out must be freed by calling split_free(). + * + * @returns number of individual elements in output array @a out (without the + * final NULL terminating element) + */ +static int split(const char *in, const char *delim, char ***out) +{ + int n = 0, i = 0; + char *tmp, *p; + + tmp = g_strdup(in); + for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) { + if (strlen(p) > 0) { + ++n; + } + } + g_free(tmp); + + *out = g_new0(char *, n + 1); /* last element NULL delimiter */ + + tmp = g_strdup(in); + for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) { + if (strlen(p) > 0) { + (*out)[i++] = g_strdup(p); + } + } + g_free(tmp); + + return n; +} + +static void split_free(char ***out) +{ + int i; + if (!*out) { + return; + } + for (i = 0; (*out)[i]; ++i) { + g_free((*out)[i]); + } + g_free(*out); + *out = NULL; +} + +void v9fs_memwrite(P9Req *req, const void *addr, size_t len) +{ + qtest_memwrite(req->qts, req->t_msg + req->t_off, addr, len); + req->t_off += len; +} + +void v9fs_memskip(P9Req *req, size_t len) +{ + req->r_off += len; +} + +void v9fs_memread(P9Req *req, void *addr, size_t len) +{ + qtest_memread(req->qts, req->r_msg + req->r_off, addr, len); + req->r_off += len; +} + +void v9fs_uint8_read(P9Req *req, uint8_t *val) +{ + v9fs_memread(req, val, 1); +} + +void v9fs_uint16_write(P9Req *req, uint16_t val) +{ + uint16_t le_val = cpu_to_le16(val); + + v9fs_memwrite(req, &le_val, 2); +} + +void v9fs_uint16_read(P9Req *req, uint16_t *val) +{ + v9fs_memread(req, val, 2); + le16_to_cpus(val); +} + +void v9fs_uint32_write(P9Req *req, uint32_t val) +{ + uint32_t le_val = cpu_to_le32(val); + + v9fs_memwrite(req, &le_val, 4); +} + +void v9fs_uint64_write(P9Req *req, uint64_t val) +{ + uint64_t le_val = cpu_to_le64(val); + + v9fs_memwrite(req, &le_val, 8); +} + +void v9fs_uint32_read(P9Req *req, uint32_t *val) +{ + v9fs_memread(req, val, 4); + le32_to_cpus(val); +} + +void v9fs_uint64_read(P9Req *req, uint64_t *val) +{ + v9fs_memread(req, val, 8); + le64_to_cpus(val); +} + +/* len[2] string[len] */ +uint16_t v9fs_string_size(const char *string) +{ + size_t len = strlen(string); + + g_assert_cmpint(len, <=, UINT16_MAX - 2); + + return 2 + len; +} + +void v9fs_string_write(P9Req *req, const char *string) +{ + int len = strlen(string); + + g_assert_cmpint(len, <=, UINT16_MAX); + + v9fs_uint16_write(req, (uint16_t) len); + v9fs_memwrite(req, string, len); +} + +void v9fs_string_read(P9Req *req, uint16_t *len, char **string) +{ + uint16_t local_len; + + v9fs_uint16_read(req, &local_len); + if (len) { + *len = local_len; + } + if (string) { + *string = g_malloc(local_len + 1); + v9fs_memread(req, *string, local_len); + (*string)[local_len] = 0; + } else { + v9fs_memskip(req, local_len); + } +} + +typedef struct { + uint32_t size; + uint8_t id; + uint16_t tag; +} QEMU_PACKED P9Hdr; + +P9Req *v9fs_req_init(QVirtio9P *v9p, uint32_t size, uint8_t id, + uint16_t tag) +{ + P9Req *req = g_new0(P9Req, 1); + uint32_t total_size = 7; /* 9P header has well-known size of 7 bytes */ + P9Hdr hdr = { + .id = id, + .tag = cpu_to_le16(tag) + }; + + g_assert_cmpint(total_size, <=, UINT32_MAX - size); + total_size += size; + hdr.size = cpu_to_le32(total_size); + + g_assert_cmpint(total_size, <=, P9_MAX_SIZE); + + req->qts = global_qtest; + req->v9p = v9p; + req->t_size = total_size; + req->t_msg = guest_alloc(alloc, req->t_size); + v9fs_memwrite(req, &hdr, 7); + req->tag = tag; + return req; +} + +void v9fs_req_send(P9Req *req) +{ + QVirtio9P *v9p = req->v9p; + + req->r_msg = guest_alloc(alloc, P9_MAX_SIZE); + req->free_head = qvirtqueue_add(req->qts, v9p->vq, req->t_msg, req->t_size, + false, true); + qvirtqueue_add(req->qts, v9p->vq, req->r_msg, P9_MAX_SIZE, true, false); + qvirtqueue_kick(req->qts, v9p->vdev, v9p->vq, req->free_head); + req->t_off = 0; +} + +static const char *rmessage_name(uint8_t id) +{ + return + id == P9_RLERROR ? "RLERROR" : + id == P9_RVERSION ? "RVERSION" : + id == P9_RATTACH ? "RATTACH" : + id == P9_RWALK ? "RWALK" : + id == P9_RLOPEN ? "RLOPEN" : + id == P9_RWRITE ? "RWRITE" : + id == P9_RMKDIR ? "RMKDIR" : + id == P9_RLCREATE ? "RLCREATE" : + id == P9_RSYMLINK ? "RSYMLINK" : + id == P9_RLINK ? "RLINK" : + id == P9_RUNLINKAT ? "RUNLINKAT" : + id == P9_RFLUSH ? "RFLUSH" : + id == P9_RREADDIR ? "READDIR" : + ""; +} + +void v9fs_req_wait_for_reply(P9Req *req, uint32_t *len) +{ + QVirtio9P *v9p = req->v9p; + + qvirtio_wait_used_elem(req->qts, v9p->vdev, v9p->vq, req->free_head, len, + QVIRTIO_9P_TIMEOUT_US); +} + +void v9fs_req_recv(P9Req *req, uint8_t id) +{ + P9Hdr hdr; + + v9fs_memread(req, &hdr, 7); + hdr.size = ldl_le_p(&hdr.size); + hdr.tag = lduw_le_p(&hdr.tag); + + g_assert_cmpint(hdr.size, >=, 7); + g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE); + g_assert_cmpint(hdr.tag, ==, req->tag); + + if (hdr.id != id) { + g_printerr("Received response %d (%s) instead of %d (%s)\n", + hdr.id, rmessage_name(hdr.id), id, rmessage_name(id)); + + if (hdr.id == P9_RLERROR) { + uint32_t err; + v9fs_uint32_read(req, &err); + g_printerr("Rlerror has errno %d (%s)\n", err, strerror(err)); + } + } + g_assert_cmpint(hdr.id, ==, id); +} + +void v9fs_req_free(P9Req *req) +{ + guest_free(alloc, req->t_msg); + guest_free(alloc, req->r_msg); + g_free(req); +} + +/* size[4] Rlerror tag[2] ecode[4] */ +void v9fs_rlerror(P9Req *req, uint32_t *err) +{ + v9fs_req_recv(req, P9_RLERROR); + v9fs_uint32_read(req, err); + v9fs_req_free(req); +} + +/* size[4] Tversion tag[2] msize[4] version[s] */ +TVersionRes v9fs_tversion(TVersionOpt opt) +{ + P9Req *req; + uint32_t err; + uint32_t body_size = 4; + uint16_t string_size; + uint16_t server_len; + g_autofree char *server_version = NULL; + + g_assert(opt.client); + + if (!opt.msize) { + opt.msize = P9_MAX_SIZE; + } + + if (!opt.tag) { + opt.tag = P9_NOTAG; + } + + if (!opt.version) { + opt.version = "9P2000.L"; + } + + string_size = v9fs_string_size(opt.version); + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + req = v9fs_req_init(opt.client, body_size, P9_TVERSION, opt.tag); + + v9fs_uint32_write(req, opt.msize); + v9fs_string_write(req, opt.version); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rversion(req, &server_len, &server_version); + g_assert_cmpmem(server_version, server_len, + opt.version, strlen(opt.version)); + } + req = NULL; /* request was freed */ + } + + return (TVersionRes) { + .req = req, + }; +} + +/* size[4] Rversion tag[2] msize[4] version[s] */ +void v9fs_rversion(P9Req *req, uint16_t *len, char **version) +{ + uint32_t msize; + + v9fs_req_recv(req, P9_RVERSION); + v9fs_uint32_read(req, &msize); + + g_assert_cmpint(msize, ==, P9_MAX_SIZE); + + if (len || version) { + v9fs_string_read(req, len, version); + } + + v9fs_req_free(req); +} + +/* size[4] Tattach tag[2] fid[4] afid[4] uname[s] aname[s] n_uname[4] */ +TAttachRes v9fs_tattach(TAttachOpt opt) +{ + uint32_t err; + const char *uname = ""; /* ignored by QEMU */ + const char *aname = ""; /* ignored by QEMU */ + + g_assert(opt.client); + /* expecting either Rattach or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !opt.rattach.qid); + + if (!opt.requestOnly) { + v9fs_tversion((TVersionOpt) { .client = opt.client }); + } + + if (!opt.n_uname) { + opt.n_uname = getuid(); + } + + P9Req *req = v9fs_req_init(opt.client, 4 + 4 + 2 + 2 + 4, P9_TATTACH, + opt.tag); + + v9fs_uint32_write(req, opt.fid); + v9fs_uint32_write(req, P9_NOFID); + v9fs_string_write(req, uname); + v9fs_string_write(req, aname); + v9fs_uint32_write(req, opt.n_uname); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rattach(req, opt.rattach.qid); + } + req = NULL; /* request was freed */ + } + + return (TAttachRes) { + .req = req, + }; +} + +/* size[4] Rattach tag[2] qid[13] */ +void v9fs_rattach(P9Req *req, v9fs_qid *qid) +{ + v9fs_req_recv(req, P9_RATTACH); + if (qid) { + v9fs_memread(req, qid, 13); + } + v9fs_req_free(req); +} + +/* size[4] Twalk tag[2] fid[4] newfid[4] nwname[2] nwname*(wname[s]) */ +TWalkRes v9fs_twalk(TWalkOpt opt) +{ + P9Req *req; + int i; + uint32_t body_size = 4 + 4 + 2; + uint32_t err; + char **wnames = NULL; + + g_assert(opt.client); + /* expecting either high- or low-level path, both not both */ + g_assert(!opt.path || !(opt.nwname || opt.wnames)); + /* expecting either Rwalk or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !(opt.rwalk.nwqid || opt.rwalk.wqid)); + + if (!opt.newfid) { + opt.newfid = genfid(); + } + + if (opt.path) { + opt.nwname = split(opt.path, "/", &wnames); + opt.wnames = wnames; + } + + for (i = 0; i < opt.nwname; i++) { + uint16_t wname_size = v9fs_string_size(opt.wnames[i]); + + g_assert_cmpint(body_size, <=, UINT32_MAX - wname_size); + body_size += wname_size; + } + req = v9fs_req_init(opt.client, body_size, P9_TWALK, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_uint32_write(req, opt.newfid); + v9fs_uint16_write(req, opt.nwname); + for (i = 0; i < opt.nwname; i++) { + v9fs_string_write(req, opt.wnames[i]); + } + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rwalk(req, opt.rwalk.nwqid, opt.rwalk.wqid); + } + req = NULL; /* request was freed */ + } + + split_free(&wnames); + + return (TWalkRes) { + .newfid = opt.newfid, + .req = req, + }; +} + +/* size[4] Rwalk tag[2] nwqid[2] nwqid*(wqid[13]) */ +void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid) +{ + uint16_t local_nwqid; + + v9fs_req_recv(req, P9_RWALK); + v9fs_uint16_read(req, &local_nwqid); + if (nwqid) { + *nwqid = local_nwqid; + } + if (wqid) { + *wqid = g_malloc(local_nwqid * 13); + v9fs_memread(req, *wqid, local_nwqid * 13); + } + v9fs_req_free(req); +} + +/* size[4] Tgetattr tag[2] fid[4] request_mask[8] */ +TGetAttrRes v9fs_tgetattr(TGetAttrOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either Rgetattr or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !opt.rgetattr.attr); + + if (!opt.request_mask) { + opt.request_mask = P9_GETATTR_ALL; + } + + req = v9fs_req_init(opt.client, 4 + 8, P9_TGETATTR, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_uint64_write(req, opt.request_mask); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rgetattr(req, opt.rgetattr.attr); + } + req = NULL; /* request was freed */ + } + + return (TGetAttrRes) { .req = req }; +} + +/* + * size[4] Rgetattr tag[2] valid[8] qid[13] mode[4] uid[4] gid[4] nlink[8] + * rdev[8] size[8] blksize[8] blocks[8] + * atime_sec[8] atime_nsec[8] mtime_sec[8] mtime_nsec[8] + * ctime_sec[8] ctime_nsec[8] btime_sec[8] btime_nsec[8] + * gen[8] data_version[8] + */ +void v9fs_rgetattr(P9Req *req, v9fs_attr *attr) +{ + v9fs_req_recv(req, P9_RGETATTR); + + v9fs_uint64_read(req, &attr->valid); + v9fs_memread(req, &attr->qid, 13); + v9fs_uint32_read(req, &attr->mode); + v9fs_uint32_read(req, &attr->uid); + v9fs_uint32_read(req, &attr->gid); + v9fs_uint64_read(req, &attr->nlink); + v9fs_uint64_read(req, &attr->rdev); + v9fs_uint64_read(req, &attr->size); + v9fs_uint64_read(req, &attr->blksize); + v9fs_uint64_read(req, &attr->blocks); + v9fs_uint64_read(req, &attr->atime_sec); + v9fs_uint64_read(req, &attr->atime_nsec); + v9fs_uint64_read(req, &attr->mtime_sec); + v9fs_uint64_read(req, &attr->mtime_nsec); + v9fs_uint64_read(req, &attr->ctime_sec); + v9fs_uint64_read(req, &attr->ctime_nsec); + v9fs_uint64_read(req, &attr->btime_sec); + v9fs_uint64_read(req, &attr->btime_nsec); + v9fs_uint64_read(req, &attr->gen); + v9fs_uint64_read(req, &attr->data_version); + + v9fs_req_free(req); +} + +/* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */ +TReadDirRes v9fs_treaddir(TReadDirOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either Rreaddir or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !(opt.rreaddir.count || + opt.rreaddir.nentries || opt.rreaddir.entries)); + + req = v9fs_req_init(opt.client, 4 + 8 + 4, P9_TREADDIR, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_uint64_write(req, opt.offset); + v9fs_uint32_write(req, opt.count); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rreaddir(req, opt.rreaddir.count, opt.rreaddir.nentries, + opt.rreaddir.entries); + } + req = NULL; /* request was freed */ + } + + return (TReadDirRes) { .req = req }; +} + +/* size[4] Rreaddir tag[2] count[4] data[count] */ +void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, + struct V9fsDirent **entries) +{ + uint32_t local_count; + struct V9fsDirent *e = NULL; + uint16_t slen; + uint32_t n = 0; + + v9fs_req_recv(req, P9_RREADDIR); + v9fs_uint32_read(req, &local_count); + + if (count) { + *count = local_count; + } + + for (int32_t togo = (int32_t)local_count; + togo >= 13 + 8 + 1 + 2; + togo -= 13 + 8 + 1 + 2 + slen, ++n) + { + if (!e) { + e = g_new(struct V9fsDirent, 1); + if (entries) { + *entries = e; + } + } else { + e = e->next = g_new(struct V9fsDirent, 1); + } + e->next = NULL; + /* qid[13] offset[8] type[1] name[s] */ + v9fs_memread(req, &e->qid, 13); + v9fs_uint64_read(req, &e->offset); + v9fs_uint8_read(req, &e->type); + v9fs_string_read(req, &slen, &e->name); + } + + if (nentries) { + *nentries = n; + } + + v9fs_req_free(req); +} + +void v9fs_free_dirents(struct V9fsDirent *e) +{ + struct V9fsDirent *next = NULL; + + for (; e; e = next) { + next = e->next; + g_free(e->name); + g_free(e); + } +} + +/* size[4] Tlopen tag[2] fid[4] flags[4] */ +TLOpenRes v9fs_tlopen(TLOpenOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either Rlopen or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !(opt.rlopen.qid || opt.rlopen.iounit)); + + req = v9fs_req_init(opt.client, 4 + 4, P9_TLOPEN, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_uint32_write(req, opt.flags); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rlopen(req, opt.rlopen.qid, opt.rlopen.iounit); + } + req = NULL; /* request was freed */ + } + + return (TLOpenRes) { .req = req }; +} + +/* size[4] Rlopen tag[2] qid[13] iounit[4] */ +void v9fs_rlopen(P9Req *req, v9fs_qid *qid, uint32_t *iounit) +{ + v9fs_req_recv(req, P9_RLOPEN); + if (qid) { + v9fs_memread(req, qid, 13); + } else { + v9fs_memskip(req, 13); + } + if (iounit) { + v9fs_uint32_read(req, iounit); + } + v9fs_req_free(req); +} + +/* size[4] Twrite tag[2] fid[4] offset[8] count[4] data[count] */ +TWriteRes v9fs_twrite(TWriteOpt opt) +{ + P9Req *req; + uint32_t err; + uint32_t body_size = 4 + 8 + 4; + uint32_t written = 0; + + g_assert(opt.client); + + g_assert_cmpint(body_size, <=, UINT32_MAX - opt.count); + body_size += opt.count; + req = v9fs_req_init(opt.client, body_size, P9_TWRITE, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_uint64_write(req, opt.offset); + v9fs_uint32_write(req, opt.count); + v9fs_memwrite(req, opt.data, opt.count); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rwrite(req, &written); + } + req = NULL; /* request was freed */ + } + + return (TWriteRes) { + .req = req, + .count = written + }; +} + +/* size[4] Rwrite tag[2] count[4] */ +void v9fs_rwrite(P9Req *req, uint32_t *count) +{ + v9fs_req_recv(req, P9_RWRITE); + if (count) { + v9fs_uint32_read(req, count); + } + v9fs_req_free(req); +} + +/* size[4] Tflush tag[2] oldtag[2] */ +TFlushRes v9fs_tflush(TFlushOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + + req = v9fs_req_init(opt.client, 2, P9_TFLUSH, opt.tag); + v9fs_uint32_write(req, opt.oldtag); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rflush(req); + } + req = NULL; /* request was freed */ + } + + return (TFlushRes) { .req = req }; +} + +/* size[4] Rflush tag[2] */ +void v9fs_rflush(P9Req *req) +{ + v9fs_req_recv(req, P9_RFLUSH); + v9fs_req_free(req); +} + +/* size[4] Tmkdir tag[2] dfid[4] name[s] mode[4] gid[4] */ +TMkdirRes v9fs_tmkdir(TMkdirOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either hi-level atPath or low-level dfid, but not both */ + g_assert(!opt.atPath || !opt.dfid); + /* expecting either Rmkdir or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !opt.rmkdir.qid); + + if (opt.atPath) { + opt.dfid = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.atPath }).newfid; + } + + if (!opt.mode) { + opt.mode = 0750; + } + + uint32_t body_size = 4 + 4 + 4; + uint16_t string_size = v9fs_string_size(opt.name); + + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + + req = v9fs_req_init(opt.client, body_size, P9_TMKDIR, opt.tag); + v9fs_uint32_write(req, opt.dfid); + v9fs_string_write(req, opt.name); + v9fs_uint32_write(req, opt.mode); + v9fs_uint32_write(req, opt.gid); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rmkdir(req, opt.rmkdir.qid); + } + req = NULL; /* request was freed */ + } + + return (TMkdirRes) { .req = req }; +} + +/* size[4] Rmkdir tag[2] qid[13] */ +void v9fs_rmkdir(P9Req *req, v9fs_qid *qid) +{ + v9fs_req_recv(req, P9_RMKDIR); + if (qid) { + v9fs_memread(req, qid, 13); + } else { + v9fs_memskip(req, 13); + } + v9fs_req_free(req); +} + +/* size[4] Tlcreate tag[2] fid[4] name[s] flags[4] mode[4] gid[4] */ +TlcreateRes v9fs_tlcreate(TlcreateOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either hi-level atPath or low-level fid, but not both */ + g_assert(!opt.atPath || !opt.fid); + /* expecting either Rlcreate or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !(opt.rlcreate.qid || opt.rlcreate.iounit)); + + if (opt.atPath) { + opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.atPath }).newfid; + } + + if (!opt.mode) { + opt.mode = 0750; + } + + uint32_t body_size = 4 + 4 + 4 + 4; + uint16_t string_size = v9fs_string_size(opt.name); + + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + + req = v9fs_req_init(opt.client, body_size, P9_TLCREATE, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_string_write(req, opt.name); + v9fs_uint32_write(req, opt.flags); + v9fs_uint32_write(req, opt.mode); + v9fs_uint32_write(req, opt.gid); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rlcreate(req, opt.rlcreate.qid, opt.rlcreate.iounit); + } + req = NULL; /* request was freed */ + } + + return (TlcreateRes) { .req = req }; +} + +/* size[4] Rlcreate tag[2] qid[13] iounit[4] */ +void v9fs_rlcreate(P9Req *req, v9fs_qid *qid, uint32_t *iounit) +{ + v9fs_req_recv(req, P9_RLCREATE); + if (qid) { + v9fs_memread(req, qid, 13); + } else { + v9fs_memskip(req, 13); + } + if (iounit) { + v9fs_uint32_read(req, iounit); + } + v9fs_req_free(req); +} + +/* size[4] Tsymlink tag[2] fid[4] name[s] symtgt[s] gid[4] */ +TsymlinkRes v9fs_tsymlink(TsymlinkOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either hi-level atPath or low-level fid, but not both */ + g_assert(!opt.atPath || !opt.fid); + /* expecting either Rsymlink or Rlerror, but obviously not both */ + g_assert(!opt.expectErr || !opt.rsymlink.qid); + + if (opt.atPath) { + opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.atPath }).newfid; + } + + uint32_t body_size = 4 + 4; + uint16_t string_size = v9fs_string_size(opt.name) + + v9fs_string_size(opt.symtgt); + + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + + req = v9fs_req_init(opt.client, body_size, P9_TSYMLINK, opt.tag); + v9fs_uint32_write(req, opt.fid); + v9fs_string_write(req, opt.name); + v9fs_string_write(req, opt.symtgt); + v9fs_uint32_write(req, opt.gid); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rsymlink(req, opt.rsymlink.qid); + } + req = NULL; /* request was freed */ + } + + return (TsymlinkRes) { .req = req }; +} + +/* size[4] Rsymlink tag[2] qid[13] */ +void v9fs_rsymlink(P9Req *req, v9fs_qid *qid) +{ + v9fs_req_recv(req, P9_RSYMLINK); + if (qid) { + v9fs_memread(req, qid, 13); + } else { + v9fs_memskip(req, 13); + } + v9fs_req_free(req); +} + +/* size[4] Tlink tag[2] dfid[4] fid[4] name[s] */ +TlinkRes v9fs_tlink(TlinkOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either hi-level atPath or low-level dfid, but not both */ + g_assert(!opt.atPath || !opt.dfid); + /* expecting either hi-level toPath or low-level fid, but not both */ + g_assert(!opt.toPath || !opt.fid); + + if (opt.atPath) { + opt.dfid = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.atPath }).newfid; + } + if (opt.toPath) { + opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.toPath }).newfid; + } + + uint32_t body_size = 4 + 4; + uint16_t string_size = v9fs_string_size(opt.name); + + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + + req = v9fs_req_init(opt.client, body_size, P9_TLINK, opt.tag); + v9fs_uint32_write(req, opt.dfid); + v9fs_uint32_write(req, opt.fid); + v9fs_string_write(req, opt.name); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rlink(req); + } + req = NULL; /* request was freed */ + } + + return (TlinkRes) { .req = req }; +} + +/* size[4] Rlink tag[2] */ +void v9fs_rlink(P9Req *req) +{ + v9fs_req_recv(req, P9_RLINK); + v9fs_req_free(req); +} + +/* size[4] Tunlinkat tag[2] dirfd[4] name[s] flags[4] */ +TunlinkatRes v9fs_tunlinkat(TunlinkatOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + /* expecting either hi-level atPath or low-level dirfd, but not both */ + g_assert(!opt.atPath || !opt.dirfd); + + if (opt.atPath) { + opt.dirfd = v9fs_twalk((TWalkOpt) { .client = opt.client, + .path = opt.atPath }).newfid; + } + + uint32_t body_size = 4 + 4; + uint16_t string_size = v9fs_string_size(opt.name); + + g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); + body_size += string_size; + + req = v9fs_req_init(opt.client, body_size, P9_TUNLINKAT, opt.tag); + v9fs_uint32_write(req, opt.dirfd); + v9fs_string_write(req, opt.name); + v9fs_uint32_write(req, opt.flags); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_runlinkat(req); + } + req = NULL; /* request was freed */ + } + + return (TunlinkatRes) { .req = req }; +} + +/* size[4] Runlinkat tag[2] */ +void v9fs_runlinkat(P9Req *req) +{ + v9fs_req_recv(req, P9_RUNLINKAT); + v9fs_req_free(req); +} diff --git a/tests/qtest/libqos/virtio-9p-client.h b/tests/qtest/libqos/virtio-9p-client.h new file mode 100644 index 0000000000..78228eb97d --- /dev/null +++ b/tests/qtest/libqos/virtio-9p-client.h @@ -0,0 +1,494 @@ +/* + * 9P network client for VirtIO 9P test cases (based on QTest) + * + * Copyright (c) 2014 SUSE LINUX Products GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#ifndef TESTS_LIBQOS_VIRTIO_9P_CLIENT_H +#define TESTS_LIBQOS_VIRTIO_9P_CLIENT_H + +#include "hw/9pfs/9p.h" +#include "hw/9pfs/9p-synth.h" +#include "virtio-9p.h" +#include "qgraph.h" +#include "tests/qtest/libqtest-single.h" + +#define P9_MAX_SIZE 4096 /* Max size of a T-message or R-message */ + +typedef struct { + QTestState *qts; + QVirtio9P *v9p; + uint16_t tag; + uint64_t t_msg; + uint32_t t_size; + uint64_t r_msg; + /* No r_size, it is hardcoded to P9_MAX_SIZE */ + size_t t_off; + size_t r_off; + uint32_t free_head; +} P9Req; + +/* type[1] version[4] path[8] */ +typedef char v9fs_qid[13]; + +typedef struct v9fs_attr { + uint64_t valid; + v9fs_qid qid; + uint32_t mode; + uint32_t uid; + uint32_t gid; + uint64_t nlink; + uint64_t rdev; + uint64_t size; + uint64_t blksize; + uint64_t blocks; + uint64_t atime_sec; + uint64_t atime_nsec; + uint64_t mtime_sec; + uint64_t mtime_nsec; + uint64_t ctime_sec; + uint64_t ctime_nsec; + uint64_t btime_sec; + uint64_t btime_nsec; + uint64_t gen; + uint64_t data_version; +} v9fs_attr; + +#define P9_GETATTR_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ +#define P9_GETATTR_ALL 0x00003fffULL /* Mask for ALL fields */ + +struct V9fsDirent { + v9fs_qid qid; + uint64_t offset; + uint8_t type; + char *name; + struct V9fsDirent *next; +}; + +/* options for 'Twalk' 9p request */ +typedef struct TWalkOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of directory from where walk should start (optional) */ + uint32_t fid; + /* file ID for target directory being walked to (optional) */ + uint32_t newfid; + /* low level variant of path to walk to (optional) */ + uint16_t nwname; + char **wnames; + /* high level variant of path to walk to (optional) */ + const char *path; + /* data being received from 9p server as 'Rwalk' response (optional) */ + struct { + uint16_t *nwqid; + v9fs_qid **wqid; + } rwalk; + /* only send Twalk request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TWalkOpt; + +/* result of 'Twalk' 9p request */ +typedef struct TWalkRes { + /* file ID of target directory been walked to */ + uint32_t newfid; + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TWalkRes; + +/* options for 'Tversion' 9p request */ +typedef struct TVersionOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* maximum message size that can be handled by client (optional) */ + uint32_t msize; + /* protocol version (optional) */ + const char *version; + /* only send Tversion request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TVersionOpt; + +/* result of 'Tversion' 9p request */ +typedef struct TVersionRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TVersionRes; + +/* options for 'Tattach' 9p request */ +typedef struct TAttachOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID to be associated with root of file tree (optional) */ + uint32_t fid; + /* numerical uid of user being introduced to server (optional) */ + uint32_t n_uname; + /* data being received from 9p server as 'Rattach' response (optional) */ + struct { + /* server's idea of the root of the file tree */ + v9fs_qid *qid; + } rattach; + /* only send Tattach request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TAttachOpt; + +/* result of 'Tattach' 9p request */ +typedef struct TAttachRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TAttachRes; + +/* options for 'Tgetattr' 9p request */ +typedef struct TGetAttrOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of file/dir whose attributes shall be retrieved (required) */ + uint32_t fid; + /* bitmask indicating attribute fields to be retrieved (optional) */ + uint64_t request_mask; + /* data being received from 9p server as 'Rgetattr' response (optional) */ + struct { + v9fs_attr *attr; + } rgetattr; + /* only send Tgetattr request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TGetAttrOpt; + +/* result of 'Tgetattr' 9p request */ +typedef struct TGetAttrRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TGetAttrRes; + +/* options for 'Treaddir' 9p request */ +typedef struct TReadDirOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of directory whose entries shall be retrieved (required) */ + uint32_t fid; + /* offset in entries stream, i.e. for multiple requests (optional) */ + uint64_t offset; + /* maximum bytes to be returned by server (required) */ + uint32_t count; + /* data being received from 9p server as 'Rreaddir' response (optional) */ + struct { + uint32_t *count; + uint32_t *nentries; + struct V9fsDirent **entries; + } rreaddir; + /* only send Treaddir request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TReadDirOpt; + +/* result of 'Treaddir' 9p request */ +typedef struct TReadDirRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TReadDirRes; + +/* options for 'Tlopen' 9p request */ +typedef struct TLOpenOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of file / directory to be opened (required) */ + uint32_t fid; + /* Linux open(2) flags such as O_RDONLY, O_RDWR, O_WRONLY (optional) */ + uint32_t flags; + /* data being received from 9p server as 'Rlopen' response (optional) */ + struct { + v9fs_qid *qid; + uint32_t *iounit; + } rlopen; + /* only send Tlopen request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TLOpenOpt; + +/* result of 'Tlopen' 9p request */ +typedef struct TLOpenRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TLOpenRes; + +/* options for 'Twrite' 9p request */ +typedef struct TWriteOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of file to write to (required) */ + uint32_t fid; + /* start position of write from beginning of file (optional) */ + uint64_t offset; + /* how many bytes to write */ + uint32_t count; + /* data to be written */ + const void *data; + /* only send Twrite request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TWriteOpt; + +/* result of 'Twrite' 9p request */ +typedef struct TWriteRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; + /* amount of bytes written */ + uint32_t count; +} TWriteRes; + +/* options for 'Tflush' 9p request */ +typedef struct TFlushOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* message to flush (required) */ + uint16_t oldtag; + /* only send Tflush request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TFlushOpt; + +/* result of 'Tflush' 9p request */ +typedef struct TFlushRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TFlushRes; + +/* options for 'Tmkdir' 9p request */ +typedef struct TMkdirOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* low level variant of directory where new one shall be created */ + uint32_t dfid; + /* high-level variant of directory where new one shall be created */ + const char *atPath; + /* New directory's name (required) */ + const char *name; + /* Linux mkdir(2) mode bits (optional) */ + uint32_t mode; + /* effective group ID of caller */ + uint32_t gid; + /* data being received from 9p server as 'Rmkdir' response (optional) */ + struct { + /* QID of newly created directory */ + v9fs_qid *qid; + } rmkdir; + /* only send Tmkdir request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TMkdirOpt; + +/* result of 'TMkdir' 9p request */ +typedef struct TMkdirRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TMkdirRes; + +/* options for 'Tlcreate' 9p request */ +typedef struct TlcreateOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* low-level variant of directory where new file shall be created */ + uint32_t fid; + /* high-level variant of directory where new file shall be created */ + const char *atPath; + /* name of new file (required) */ + const char *name; + /* Linux kernel intent bits */ + uint32_t flags; + /* Linux create(2) mode bits */ + uint32_t mode; + /* effective group ID of caller */ + uint32_t gid; + /* data being received from 9p server as 'Rlcreate' response (optional) */ + struct { + v9fs_qid *qid; + uint32_t *iounit; + } rlcreate; + /* only send Tlcreate request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TlcreateOpt; + +/* result of 'Tlcreate' 9p request */ +typedef struct TlcreateRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TlcreateRes; + +/* options for 'Tsymlink' 9p request */ +typedef struct TsymlinkOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* low-level variant of directory where symlink shall be created */ + uint32_t fid; + /* high-level variant of directory where symlink shall be created */ + const char *atPath; + /* name of symlink (required) */ + const char *name; + /* where symlink will point to (required) */ + const char *symtgt; + /* effective group ID of caller */ + uint32_t gid; + /* data being received from 9p server as 'Rsymlink' response (optional) */ + struct { + v9fs_qid *qid; + } rsymlink; + /* only send Tsymlink request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TsymlinkOpt; + +/* result of 'Tsymlink' 9p request */ +typedef struct TsymlinkRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TsymlinkRes; + +/* options for 'Tlink' 9p request */ +typedef struct TlinkOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* low-level variant of directory where hard link shall be created */ + uint32_t dfid; + /* high-level variant of directory where hard link shall be created */ + const char *atPath; + /* low-level variant of target referenced by new hard link */ + uint32_t fid; + /* high-level variant of target referenced by new hard link */ + const char *toPath; + /* name of hard link (required) */ + const char *name; + /* only send Tlink request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TlinkOpt; + +/* result of 'Tlink' 9p request */ +typedef struct TlinkRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TlinkRes; + +/* options for 'Tunlinkat' 9p request */ +typedef struct TunlinkatOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* low-level variant of directory where name shall be unlinked */ + uint32_t dirfd; + /* high-level variant of directory where name shall be unlinked */ + const char *atPath; + /* name of directory entry to be unlinked (required) */ + const char *name; + /* Linux unlinkat(2) flags */ + uint32_t flags; + /* only send Tunlinkat request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TunlinkatOpt; + +/* result of 'Tunlinkat' 9p request */ +typedef struct TunlinkatRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TunlinkatRes; + +void v9fs_set_allocator(QGuestAllocator *t_alloc); +void v9fs_memwrite(P9Req *req, const void *addr, size_t len); +void v9fs_memskip(P9Req *req, size_t len); +void v9fs_memread(P9Req *req, void *addr, size_t len); +void v9fs_uint8_read(P9Req *req, uint8_t *val); +void v9fs_uint16_write(P9Req *req, uint16_t val); +void v9fs_uint16_read(P9Req *req, uint16_t *val); +void v9fs_uint32_write(P9Req *req, uint32_t val); +void v9fs_uint64_write(P9Req *req, uint64_t val); +void v9fs_uint32_read(P9Req *req, uint32_t *val); +void v9fs_uint64_read(P9Req *req, uint64_t *val); +uint16_t v9fs_string_size(const char *string); +void v9fs_string_write(P9Req *req, const char *string); +void v9fs_string_read(P9Req *req, uint16_t *len, char **string); +P9Req *v9fs_req_init(QVirtio9P *v9p, uint32_t size, uint8_t id, + uint16_t tag); +void v9fs_req_send(P9Req *req); +void v9fs_req_wait_for_reply(P9Req *req, uint32_t *len); +void v9fs_req_recv(P9Req *req, uint8_t id); +void v9fs_req_free(P9Req *req); +void v9fs_rlerror(P9Req *req, uint32_t *err); +TVersionRes v9fs_tversion(TVersionOpt); +void v9fs_rversion(P9Req *req, uint16_t *len, char **version); +TAttachRes v9fs_tattach(TAttachOpt); +void v9fs_rattach(P9Req *req, v9fs_qid *qid); +TWalkRes v9fs_twalk(TWalkOpt opt); +void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid); +TGetAttrRes v9fs_tgetattr(TGetAttrOpt); +void v9fs_rgetattr(P9Req *req, v9fs_attr *attr); +TReadDirRes v9fs_treaddir(TReadDirOpt); +void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, + struct V9fsDirent **entries); +void v9fs_free_dirents(struct V9fsDirent *e); +TLOpenRes v9fs_tlopen(TLOpenOpt); +void v9fs_rlopen(P9Req *req, v9fs_qid *qid, uint32_t *iounit); +TWriteRes v9fs_twrite(TWriteOpt); +void v9fs_rwrite(P9Req *req, uint32_t *count); +TFlushRes v9fs_tflush(TFlushOpt); +void v9fs_rflush(P9Req *req); +TMkdirRes v9fs_tmkdir(TMkdirOpt); +void v9fs_rmkdir(P9Req *req, v9fs_qid *qid); +TlcreateRes v9fs_tlcreate(TlcreateOpt); +void v9fs_rlcreate(P9Req *req, v9fs_qid *qid, uint32_t *iounit); +TsymlinkRes v9fs_tsymlink(TsymlinkOpt); +void v9fs_rsymlink(P9Req *req, v9fs_qid *qid); +TlinkRes v9fs_tlink(TlinkOpt); +void v9fs_rlink(P9Req *req); +TunlinkatRes v9fs_tunlinkat(TunlinkatOpt); +void v9fs_runlinkat(P9Req *req); + +#endif diff --git a/tests/qtest/libqos/virtio-9p.c b/tests/qtest/libqos/virtio-9p.c index b4e1143288..7f21028256 100644 --- a/tests/qtest/libqos/virtio-9p.c +++ b/tests/qtest/libqos/virtio-9p.c @@ -22,7 +22,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "standard-headers/linux/virtio_ids.h" #include "virtio-9p.h" @@ -31,37 +31,29 @@ static QGuestAllocator *alloc; static char *local_test_path; -/* Concatenates the passed 2 pathes. Returned result must be freed. */ +/* Concatenates the passed 2 paths. Returned result must be freed. */ static char *concat_path(const char* a, const char* b) { return g_build_filename(a, b, NULL); } -static void init_local_test_path(void) -{ - char *pwd = g_get_current_dir(); - char *template = concat_path(pwd, "qtest-9p-local-XXXXXX"); - local_test_path = mkdtemp(template); - if (!local_test_path) { - g_test_message("mkdtemp('%s') failed: %s", template, strerror(errno)); - } - g_assert(local_test_path); - g_free(pwd); -} - void virtio_9p_create_local_test_dir(void) { + g_assert(local_test_path == NULL); struct stat st; - int res; + g_autofree char *pwd = g_get_current_dir(); + /* + * template gets cached into local_test_path and freed in + * virtio_9p_remove_local_test_dir(). + */ + char *template = concat_path(pwd, "qtest-9p-local-XXXXXX"); - init_local_test_path(); + local_test_path = g_mkdtemp(template); + if (!local_test_path) { + g_test_message("g_mkdtemp('%s') failed: %s", template, strerror(errno)); + } g_assert(local_test_path != NULL); - res = mkdir(local_test_path, 0777); - if (res < 0) { - g_test_message("mkdir('%s') failed: %s", local_test_path, - strerror(errno)); - } /* ensure test directory exists now ... */ g_assert(stat(local_test_path, &st) == 0); @@ -72,12 +64,13 @@ void virtio_9p_create_local_test_dir(void) void virtio_9p_remove_local_test_dir(void) { g_assert(local_test_path != NULL); - char *cmd = g_strdup_printf("rm -fr '%s'\n", local_test_path); + g_autofree char *cmd = g_strdup_printf("rm -fr '%s'\n", local_test_path); int res = system(cmd); if (res < 0) { /* ignore error, dummy check to prevent compiler error */ } - g_free(cmd); + g_free(local_test_path); + local_test_path = NULL; } char *virtio_9p_test_path(const char *path) @@ -221,8 +214,8 @@ static void *virtio_9p_pci_create(void *pci_bus, QGuestAllocator *t_alloc, static void regex_replace(GString *haystack, const char *pattern, const char *replace_fmt, ...) { - GRegex *regex; - char *replace, *s; + g_autoptr(GRegex) regex = NULL; + g_autofree char *replace = NULL, *s = NULL; va_list argp; va_start(argp, replace_fmt); @@ -232,9 +225,6 @@ static void regex_replace(GString *haystack, const char *pattern, regex = g_regex_new(pattern, 0, 0, NULL); s = g_regex_replace(regex, haystack->str, -1, 0, replace, 0, NULL); g_string_assign(haystack, s); - g_free(s); - g_regex_unref(regex); - g_free(replace); } void virtio_9p_assign_local_driver(GString *cmd_line, const char *args) diff --git a/tests/qtest/libqos/virtio-balloon.c b/tests/qtest/libqos/virtio-balloon.c index a3da5c234d..29b5d17584 100644 --- a/tests/qtest/libqos/virtio-balloon.c +++ b/tests/qtest/libqos/virtio-balloon.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "qgraph.h" #include "virtio-balloon.h" diff --git a/tests/qtest/libqos/virtio-blk.c b/tests/qtest/libqos/virtio-blk.c index 5da02591bc..ee4943f32b 100644 --- a/tests/qtest/libqos/virtio-blk.c +++ b/tests/qtest/libqos/virtio-blk.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "standard-headers/linux/virtio_blk.h" #include "qgraph.h" diff --git a/tests/qtest/libqos/virtio-gpio.c b/tests/qtest/libqos/virtio-gpio.c new file mode 100644 index 0000000000..f22d7b5eb5 --- /dev/null +++ b/tests/qtest/libqos/virtio-gpio.c @@ -0,0 +1,172 @@ +/* + * virtio-gpio nodes for testing + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "standard-headers/linux/virtio_config.h" +#include "../libqtest.h" +#include "qemu/module.h" +#include "qgraph.h" +#include "virtio-gpio.h" + +static QGuestAllocator *alloc; + +static void virtio_gpio_cleanup(QVhostUserGPIO *gpio) +{ + QVirtioDevice *vdev = gpio->vdev; + int i; + + for (i = 0; i < 2; i++) { + qvirtqueue_cleanup(vdev->bus, gpio->queues[i], alloc); + } + g_free(gpio->queues); +} + +/* + * This handles the VirtIO setup from the point of view of the driver + * frontend and therefor doesn't present any vhost specific features + * and in fact masks of the re-used bit. + */ +static void virtio_gpio_setup(QVhostUserGPIO *gpio) +{ + QVirtioDevice *vdev = gpio->vdev; + uint64_t features; + int i; + + features = qvirtio_get_features(vdev); + features &= ~QVIRTIO_F_BAD_FEATURE; + qvirtio_set_features(vdev, features); + + gpio->queues = g_new(QVirtQueue *, 2); + for (i = 0; i < 2; i++) { + gpio->queues[i] = qvirtqueue_setup(vdev, alloc, i); + } + qvirtio_set_driver_ok(vdev); +} + +static void *qvirtio_gpio_get_driver(QVhostUserGPIO *v_gpio, + const char *interface) +{ + if (!g_strcmp0(interface, "vhost-user-gpio")) { + return v_gpio; + } + if (!g_strcmp0(interface, "virtio")) { + return v_gpio->vdev; + } + + g_assert_not_reached(); +} + +static void *qvirtio_gpio_device_get_driver(void *object, + const char *interface) +{ + QVhostUserGPIODevice *v_gpio = object; + return qvirtio_gpio_get_driver(&v_gpio->gpio, interface); +} + +/* virtio-gpio (mmio) */ +static void qvirtio_gpio_device_destructor(QOSGraphObject *obj) +{ + QVhostUserGPIODevice *gpio_dev = (QVhostUserGPIODevice *) obj; + virtio_gpio_cleanup(&gpio_dev->gpio); +} + +static void qvirtio_gpio_device_start_hw(QOSGraphObject *obj) +{ + QVhostUserGPIODevice *gpio_dev = (QVhostUserGPIODevice *) obj; + virtio_gpio_setup(&gpio_dev->gpio); +} + +static void *virtio_gpio_device_create(void *virtio_dev, + QGuestAllocator *t_alloc, + void *addr) +{ + QVhostUserGPIODevice *virtio_device = g_new0(QVhostUserGPIODevice, 1); + QVhostUserGPIO *interface = &virtio_device->gpio; + + interface->vdev = virtio_dev; + alloc = t_alloc; + + virtio_device->obj.get_driver = qvirtio_gpio_device_get_driver; + virtio_device->obj.start_hw = qvirtio_gpio_device_start_hw; + virtio_device->obj.destructor = qvirtio_gpio_device_destructor; + + return &virtio_device->obj; +} + +/* virtio-gpio-pci */ +static void qvirtio_gpio_pci_destructor(QOSGraphObject *obj) +{ + QVhostUserGPIOPCI *gpio_pci = (QVhostUserGPIOPCI *) obj; + QOSGraphObject *pci_vobj = &gpio_pci->pci_vdev.obj; + + virtio_gpio_cleanup(&gpio_pci->gpio); + qvirtio_pci_destructor(pci_vobj); +} + +static void qvirtio_gpio_pci_start_hw(QOSGraphObject *obj) +{ + QVhostUserGPIOPCI *gpio_pci = (QVhostUserGPIOPCI *) obj; + QOSGraphObject *pci_vobj = &gpio_pci->pci_vdev.obj; + + qvirtio_pci_start_hw(pci_vobj); + virtio_gpio_setup(&gpio_pci->gpio); +} + +static void *qvirtio_gpio_pci_get_driver(void *object, const char *interface) +{ + QVhostUserGPIOPCI *v_gpio = object; + + if (!g_strcmp0(interface, "pci-device")) { + return v_gpio->pci_vdev.pdev; + } + return qvirtio_gpio_get_driver(&v_gpio->gpio, interface); +} + +static void *virtio_gpio_pci_create(void *pci_bus, QGuestAllocator *t_alloc, + void *addr) +{ + QVhostUserGPIOPCI *virtio_spci = g_new0(QVhostUserGPIOPCI, 1); + QVhostUserGPIO *interface = &virtio_spci->gpio; + QOSGraphObject *obj = &virtio_spci->pci_vdev.obj; + + virtio_pci_init(&virtio_spci->pci_vdev, pci_bus, addr); + interface->vdev = &virtio_spci->pci_vdev.vdev; + alloc = t_alloc; + + obj->get_driver = qvirtio_gpio_pci_get_driver; + obj->start_hw = qvirtio_gpio_pci_start_hw; + obj->destructor = qvirtio_gpio_pci_destructor; + + return obj; +} + +static void virtio_gpio_register_nodes(void) +{ + QPCIAddress addr = { + .devfn = QPCI_DEVFN(4, 0), + }; + + QOSGraphEdgeOptions edge_opts = { }; + + /* vhost-user-gpio-device */ + edge_opts.extra_device_opts = "id=gpio0,chardev=chr-vhost-user-test " + "-global virtio-mmio.force-legacy=false"; + qos_node_create_driver("vhost-user-gpio-device", + virtio_gpio_device_create); + qos_node_consumes("vhost-user-gpio-device", "virtio-bus", &edge_opts); + qos_node_produces("vhost-user-gpio-device", "vhost-user-gpio"); + + /* virtio-gpio-pci */ + edge_opts.extra_device_opts = "id=gpio0,addr=04.0,chardev=chr-vhost-user-test"; + add_qpci_address(&edge_opts, &addr); + qos_node_create_driver("vhost-user-gpio-pci", virtio_gpio_pci_create); + qos_node_consumes("vhost-user-gpio-pci", "pci-bus", &edge_opts); + qos_node_produces("vhost-user-gpio-pci", "vhost-user-gpio"); +} + +libqos_init(virtio_gpio_register_nodes); diff --git a/tests/qtest/libqos/virtio-gpio.h b/tests/qtest/libqos/virtio-gpio.h new file mode 100644 index 0000000000..f11d41bd19 --- /dev/null +++ b/tests/qtest/libqos/virtio-gpio.h @@ -0,0 +1,35 @@ +/* + * virtio-gpio structures + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TESTS_LIBQOS_VIRTIO_GPIO_H +#define TESTS_LIBQOS_VIRTIO_GPIO_H + +#include "qgraph.h" +#include "virtio.h" +#include "virtio-pci.h" + +typedef struct QVhostUserGPIO QVhostUserGPIO; +typedef struct QVhostUserGPIOPCI QVhostUserGPIOPCI; +typedef struct QVhostUserGPIODevice QVhostUserGPIODevice; + +struct QVhostUserGPIO { + QVirtioDevice *vdev; + QVirtQueue **queues; +}; + +struct QVhostUserGPIOPCI { + QVirtioPCIDevice pci_vdev; + QVhostUserGPIO gpio; +}; + +struct QVhostUserGPIODevice { + QOSGraphObject obj; + QVhostUserGPIO gpio; +}; + +#endif diff --git a/tests/qtest/libqos/virtio-iommu.c b/tests/qtest/libqos/virtio-iommu.c new file mode 100644 index 0000000000..afc7d14e9a --- /dev/null +++ b/tests/qtest/libqos/virtio-iommu.c @@ -0,0 +1,126 @@ +/* + * libqos driver virtio-iommu-pci framework + * + * Copyright (c) 2021 Red Hat, Inc. + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "../libqtest.h" +#include "qemu/module.h" +#include "qgraph.h" +#include "virtio-iommu.h" +#include "hw/virtio/virtio-iommu.h" + +static QGuestAllocator *alloc; + +/* virtio-iommu-device */ +static void *qvirtio_iommu_get_driver(QVirtioIOMMU *v_iommu, + const char *interface) +{ + if (!g_strcmp0(interface, "virtio-iommu")) { + return v_iommu; + } + if (!g_strcmp0(interface, "virtio")) { + return v_iommu->vdev; + } + + fprintf(stderr, "%s not present in virtio-iommu-device\n", interface); + g_assert_not_reached(); +} + +static void virtio_iommu_cleanup(QVirtioIOMMU *interface) +{ + qvirtqueue_cleanup(interface->vdev->bus, interface->vq, alloc); +} + +static void virtio_iommu_setup(QVirtioIOMMU *interface) +{ + QVirtioDevice *vdev = interface->vdev; + uint64_t features; + + features = qvirtio_get_features(vdev); + features &= ~(QVIRTIO_F_BAD_FEATURE | + (1ull << VIRTIO_RING_F_INDIRECT_DESC) | + (1ull << VIRTIO_RING_F_EVENT_IDX) | + (1ull << VIRTIO_IOMMU_F_BYPASS)); + qvirtio_set_features(vdev, features); + interface->vq = qvirtqueue_setup(interface->vdev, alloc, 0); + qvirtio_set_driver_ok(interface->vdev); +} + +/* virtio-iommu-pci */ +static void *qvirtio_iommu_pci_get_driver(void *object, const char *interface) +{ + QVirtioIOMMUPCI *v_iommu = object; + if (!g_strcmp0(interface, "pci-device")) { + return v_iommu->pci_vdev.pdev; + } + return qvirtio_iommu_get_driver(&v_iommu->iommu, interface); +} + +static void qvirtio_iommu_pci_destructor(QOSGraphObject *obj) +{ + QVirtioIOMMUPCI *iommu_pci = (QVirtioIOMMUPCI *) obj; + QVirtioIOMMU *interface = &iommu_pci->iommu; + QOSGraphObject *pci_vobj = &iommu_pci->pci_vdev.obj; + + virtio_iommu_cleanup(interface); + qvirtio_pci_destructor(pci_vobj); +} + +static void qvirtio_iommu_pci_start_hw(QOSGraphObject *obj) +{ + QVirtioIOMMUPCI *iommu_pci = (QVirtioIOMMUPCI *) obj; + QVirtioIOMMU *interface = &iommu_pci->iommu; + QOSGraphObject *pci_vobj = &iommu_pci->pci_vdev.obj; + + qvirtio_pci_start_hw(pci_vobj); + virtio_iommu_setup(interface); +} + + +static void *virtio_iommu_pci_create(void *pci_bus, QGuestAllocator *t_alloc, + void *addr) +{ + QVirtioIOMMUPCI *virtio_rpci = g_new0(QVirtioIOMMUPCI, 1); + QVirtioIOMMU *interface = &virtio_rpci->iommu; + QOSGraphObject *obj = &virtio_rpci->pci_vdev.obj; + + virtio_pci_init(&virtio_rpci->pci_vdev, pci_bus, addr); + interface->vdev = &virtio_rpci->pci_vdev.vdev; + alloc = t_alloc; + + obj->get_driver = qvirtio_iommu_pci_get_driver; + obj->start_hw = qvirtio_iommu_pci_start_hw; + obj->destructor = qvirtio_iommu_pci_destructor; + + return obj; +} + +static void virtio_iommu_register_nodes(void) +{ + QPCIAddress addr = { + .devfn = QPCI_DEVFN(4, 0), + }; + + QOSGraphEdgeOptions opts = { + .extra_device_opts = "addr=04.0", + }; + + /* virtio-iommu-pci */ + add_qpci_address(&opts, &addr); + qos_node_create_driver("virtio-iommu-pci", virtio_iommu_pci_create); + qos_node_consumes("virtio-iommu-pci", "pci-bus", &opts); + qos_node_produces("virtio-iommu-pci", "pci-device"); + qos_node_produces("virtio-iommu-pci", "virtio"); + qos_node_produces("virtio-iommu-pci", "virtio-iommu"); +} + +libqos_init(virtio_iommu_register_nodes); diff --git a/tests/qtest/libqos/virtio-iommu.h b/tests/qtest/libqos/virtio-iommu.h new file mode 100644 index 0000000000..d753761958 --- /dev/null +++ b/tests/qtest/libqos/virtio-iommu.h @@ -0,0 +1,40 @@ +/* + * libqos driver virtio-iommu-pci framework + * + * Copyright (c) 2021 Red Hat, Inc. + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#ifndef TESTS_LIBQOS_VIRTIO_IOMMU_H +#define TESTS_LIBQOS_VIRTIO_IOMMU_H + +#include "qgraph.h" +#include "virtio.h" +#include "virtio-pci.h" + +typedef struct QVirtioIOMMU QVirtioIOMMU; +typedef struct QVirtioIOMMUPCI QVirtioIOMMUPCI; +typedef struct QVirtioIOMMUDevice QVirtioIOMMUDevice; + +struct QVirtioIOMMU { + QVirtioDevice *vdev; + QVirtQueue *vq; +}; + +struct QVirtioIOMMUPCI { + QVirtioPCIDevice pci_vdev; + QVirtioIOMMU iommu; +}; + +struct QVirtioIOMMUDevice { + QOSGraphObject obj; + QVirtioIOMMU iommu; +}; + +#endif diff --git a/tests/qtest/libqos/virtio-mmio.c b/tests/qtest/libqos/virtio-mmio.c index 75efda3029..bd0b1d890b 100644 --- a/tests/qtest/libqos/virtio-mmio.c +++ b/tests/qtest/libqos/virtio-mmio.c @@ -8,11 +8,11 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "virtio.h" #include "virtio-mmio.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "qgraph.h" #include "standard-headers/linux/virtio_ring.h" diff --git a/tests/qtest/libqos/virtio-net.c b/tests/qtest/libqos/virtio-net.c index 1cae07f60d..2ac73ac0b4 100644 --- a/tests/qtest/libqos/virtio-net.c +++ b/tests/qtest/libqos/virtio-net.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "qgraph.h" #include "virtio-net.h" diff --git a/tests/qtest/libqos/virtio-pci.c b/tests/qtest/libqos/virtio-pci.c index cd3c0f5bf3..485b8f6b7e 100644 --- a/tests/qtest/libqos/virtio-pci.c +++ b/tests/qtest/libqos/virtio-pci.c @@ -8,12 +8,12 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "virtio.h" #include "virtio-pci.h" #include "pci.h" #include "pci-pc.h" -#include "malloc.h" +#include "libqos-malloc.h" #include "malloc-pc.h" #include "qgraph.h" #include "standard-headers/linux/virtio_ring.h" diff --git a/tests/qtest/libqos/virtio-rng.c b/tests/qtest/libqos/virtio-rng.c index 2e09dd7c48..078e3abaa7 100644 --- a/tests/qtest/libqos/virtio-rng.c +++ b/tests/qtest/libqos/virtio-rng.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "qgraph.h" #include "virtio-rng.h" diff --git a/tests/qtest/libqos/virtio-scsi.c b/tests/qtest/libqos/virtio-scsi.c index 5644e32fc3..c4d0461420 100644 --- a/tests/qtest/libqos/virtio-scsi.c +++ b/tests/qtest/libqos/virtio-scsi.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "standard-headers/linux/virtio_ids.h" #include "qgraph.h" diff --git a/tests/qtest/libqos/virtio-serial.c b/tests/qtest/libqos/virtio-serial.c index ee34afd95a..1d689c3e38 100644 --- a/tests/qtest/libqos/virtio-serial.c +++ b/tests/qtest/libqos/virtio-serial.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qemu/module.h" #include "qgraph.h" #include "virtio-serial.h" diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c index 6fe7bf9555..410513225f 100644 --- a/tests/qtest/libqos/virtio.c +++ b/tests/qtest/libqos/virtio.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "qemu/bswap.h" -#include "libqtest.h" +#include "../libqtest.h" #include "virtio.h" #include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_ring.h" @@ -101,6 +101,8 @@ uint64_t qvirtio_get_features(QVirtioDevice *d) void qvirtio_set_features(QVirtioDevice *d, uint64_t features) { + g_assert(!(features & QVIRTIO_F_BAD_FEATURE)); + d->features = features; d->bus->set_features(d, features); @@ -108,7 +110,7 @@ void qvirtio_set_features(QVirtioDevice *d, uint64_t features) * This could be a separate function for drivers that want to access * configuration space before setting FEATURES_OK, but no existing users * need that and it's less code for callers if this is done implicitly. - */ + */ if (features & (1ull << VIRTIO_F_VERSION_1)) { uint8_t status = d->bus->get_status(d) | VIRTIO_CONFIG_S_FEATURES_OK; @@ -260,6 +262,8 @@ void qvring_init(QTestState *qts, const QGuestAllocator *alloc, QVirtQueue *vq, /* vq->used->flags */ qvirtio_writew(vq->vdev, qts, vq->used, 0); + /* vq->used->idx */ + qvirtio_writew(vq->vdev, qts, vq->used + 2, 0); /* vq->used->avail_event */ qvirtio_writew(vq->vdev, qts, vq->used + 2 + sizeof(struct vring_used_elem) * vq->size, 0); diff --git a/tests/qtest/libqos/virtio.h b/tests/qtest/libqos/virtio.h index b8bd06e1b8..7adc7cbd10 100644 --- a/tests/qtest/libqos/virtio.h +++ b/tests/qtest/libqos/virtio.h @@ -10,7 +10,7 @@ #ifndef LIBQOS_VIRTIO_H #define LIBQOS_VIRTIO_H -#include "malloc.h" +#include "libqos-malloc.h" #include "standard-headers/linux/virtio_ring.h" #define QVIRTIO_F_BAD_FEATURE 0x40000000ull diff --git a/tests/qtest/libqos/x86_64_pc-machine.c b/tests/qtest/libqos/x86_64_pc-machine.c index ad96742a92..dce0c9463a 100644 --- a/tests/qtest/libqos/x86_64_pc-machine.c +++ b/tests/qtest/libqos/x86_64_pc-machine.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" -#include "libqtest.h" +#include "../libqtest.h" #include "qgraph.h" #include "pci-pc.h" #include "qemu/module.h" diff --git a/tests/qtest/libqtest-single.h b/tests/qtest/libqtest-single.h index 0d7f568678..851724cbcb 100644 --- a/tests/qtest/libqtest-single.h +++ b/tests/qtest/libqtest-single.h @@ -11,9 +11,13 @@ #ifndef LIBQTEST_SINGLE_H #define LIBQTEST_SINGLE_H -#include "libqos/libqtest.h" +#include "libqtest.h" +#ifndef _WIN32 QTestState *global_qtest __attribute__((common, weak)); +#else +__declspec(selectany) QTestState *global_qtest; +#endif /** * qtest_start: @@ -52,7 +56,7 @@ static inline void qtest_end(void) * * Sends a QMP message to QEMU and returns the response. */ -GCC_FMT_ATTR(1, 2) +G_GNUC_PRINTF(1, 2) static inline QDict *qmp(const char *fmt, ...) { va_list ap; diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 825b13a44c..2fbc3b88f3 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -16,25 +16,38 @@ #include "qemu/osdep.h" +#ifndef _WIN32 #include #include #include +#endif /* _WIN32 */ +#ifdef __linux__ +#include +#endif /* __linux__ */ -#include "libqos/libqtest.h" -#include "qemu-common.h" +#include "libqtest.h" +#include "libqmp.h" #include "qemu/ctype.h" #include "qemu/cutils.h" -#include "qapi/error.h" -#include "qapi/qmp/json-parser.h" +#include "qemu/sockets.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qjson.h" #include "qapi/qmp/qlist.h" #include "qapi/qmp/qstring.h" #define MAX_IRQ 256 -#define SOCKET_TIMEOUT 50 -#define SOCKET_MAX_FDS 16 +#ifndef _WIN32 +# define SOCKET_TIMEOUT 50 +# define CMD_EXEC "exec " +# define DEV_STDERR "/dev/fd/2" +# define DEV_NULL "/dev/null" +#else +# define SOCKET_TIMEOUT 50000 +# define CMD_EXEC "" +# define DEV_STDERR "2" +# define DEV_NULL "nul" +#endif typedef void (*QTestSendFn)(QTestState *s, const char *buf); typedef void (*ExternalSendFn)(void *s, const char *buf); @@ -58,6 +71,9 @@ struct QTestState int qmp_fd; pid_t qemu_pid; /* our child QEMU process */ int wstatus; +#ifdef _WIN32 + DWORD exit_code; +#endif int expected_status; bool big_endian; bool irq_level[MAX_IRQ]; @@ -67,7 +83,7 @@ struct QTestState }; static GHookList abrt_hooks; -static struct sigaction sigact_old; +static void (*sighandler_old)(int); static int qtest_query_target_endianness(QTestState *s); @@ -91,14 +107,22 @@ static int socket_accept(int sock) struct sockaddr_un addr; socklen_t addrlen; int ret; + /* + * timeout unit of blocking receive calls is different among platfoms. + * It's in seconds on non-Windows platforms but milliseconds on Windows. + */ +#ifndef _WIN32 struct timeval timeout = { .tv_sec = SOCKET_TIMEOUT, .tv_usec = 0 }; +#else + DWORD timeout = SOCKET_TIMEOUT; +#endif - if (qemu_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, - (void *)&timeout, sizeof(timeout))) { + if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, + (void *)&timeout, sizeof(timeout))) { fprintf(stderr, "%s failed to set SO_RCVTIMEO: %s\n", __func__, strerror(errno)); - close(sock); + closesocket(sock); return -1; } @@ -109,7 +133,7 @@ static int socket_accept(int sock) if (ret == -1) { fprintf(stderr, "%s failed: %s\n", __func__, strerror(errno)); } - close(sock); + closesocket(sock); return ret; } @@ -119,10 +143,18 @@ bool qtest_probe_child(QTestState *s) pid_t pid = s->qemu_pid; if (pid != -1) { +#ifndef _WIN32 pid = waitpid(pid, &s->wstatus, WNOHANG); if (pid == 0) { return true; } +#else + GetExitCodeProcess((HANDLE)pid, &s->exit_code); + if (s->exit_code == STILL_ACTIVE) { + return true; + } + CloseHandle((HANDLE)pid); +#endif s->qemu_pid = -1; } return false; @@ -133,24 +165,14 @@ void qtest_set_expected_status(QTestState *s, int status) s->expected_status = status; } -void qtest_kill_qemu(QTestState *s) +static void qtest_check_status(QTestState *s) { - pid_t pid = s->qemu_pid; - int wstatus; - - /* Skip wait if qtest_probe_child already reaped. */ - if (pid != -1) { - kill(pid, SIGTERM); - TFR(pid = waitpid(s->qemu_pid, &s->wstatus, 0)); - assert(pid == s->qemu_pid); - s->qemu_pid = -1; - } - /* * Check whether qemu exited with expected exit status; anything else is * fishy and should be logged with as much detail as possible. */ - wstatus = s->wstatus; +#ifndef _WIN32 + int wstatus = s->wstatus; if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != s->expected_status) { fprintf(stderr, "%s:%d: kill_qemu() tried to terminate QEMU " "process but encountered exit status %d (expected %d)\n", @@ -166,6 +188,50 @@ void qtest_kill_qemu(QTestState *s) __FILE__, __LINE__, sig, signame, dump); abort(); } +#else + if (s->exit_code != s->expected_status) { + fprintf(stderr, "%s:%d: kill_qemu() tried to terminate QEMU " + "process but encountered exit status %ld (expected %d)\n", + __FILE__, __LINE__, s->exit_code, s->expected_status); + abort(); + } +#endif +} + +void qtest_wait_qemu(QTestState *s) +{ +#ifndef _WIN32 + pid_t pid; + + TFR(pid = waitpid(s->qemu_pid, &s->wstatus, 0)); + assert(pid == s->qemu_pid); +#else + DWORD ret; + + ret = WaitForSingleObject((HANDLE)s->qemu_pid, INFINITE); + assert(ret == WAIT_OBJECT_0); + GetExitCodeProcess((HANDLE)s->qemu_pid, &s->exit_code); + CloseHandle((HANDLE)s->qemu_pid); +#endif + + qtest_check_status(s); +} + +void qtest_kill_qemu(QTestState *s) +{ + /* Skip wait if qtest_probe_child() already reaped */ + if (s->qemu_pid != -1) { +#ifndef _WIN32 + kill(s->qemu_pid, SIGTERM); +#else + TerminateProcess((HANDLE)s->qemu_pid, s->expected_status); +#endif + qtest_wait_qemu(s); + s->qemu_pid = -1; + return; + } + + qtest_check_status(s); } static void kill_qemu_hook_func(void *s) @@ -180,20 +246,12 @@ static void sigabrt_handler(int signo) static void setup_sigabrt_handler(void) { - struct sigaction sigact; - - /* Catch SIGABRT to clean up on g_assert() failure */ - sigact = (struct sigaction){ - .sa_handler = sigabrt_handler, - .sa_flags = SA_RESETHAND, - }; - sigemptyset(&sigact.sa_mask); - sigaction(SIGABRT, &sigact, &sigact_old); + sighandler_old = signal(SIGABRT, sigabrt_handler); } static void cleanup_sigabrt_handler(void) { - sigaction(SIGABRT, &sigact_old, NULL); + signal(SIGABRT, sighandler_old); } static bool hook_list_is_empty(GHookList *hook_list) @@ -201,11 +259,11 @@ static bool hook_list_is_empty(GHookList *hook_list) GHook *hook = g_hook_first_valid(hook_list, TRUE); if (!hook) { - return false; + return true; } g_hook_unref(hook_list, hook); - return true; + return false; } void qtest_add_abrt_handler(GHookFunc fn, const void *data) @@ -252,6 +310,38 @@ static const char *qtest_qemu_binary(void) return qemu_bin; } +#ifdef _WIN32 +static pid_t qtest_create_process(char *cmd) +{ + STARTUPINFO si; + PROCESS_INFORMATION pi; + BOOL ret; + + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + ZeroMemory(&pi, sizeof(pi)); + + ret = CreateProcess(NULL, /* module name */ + cmd, /* command line */ + NULL, /* process handle not inheritable */ + NULL, /* thread handle not inheritable */ + FALSE, /* set handle inheritance to FALSE */ + 0, /* No creation flags */ + NULL, /* use parent's environment block */ + NULL, /* use parent's starting directory */ + &si, /* pointer to STARTUPINFO structure */ + &pi /* pointer to PROCESS_INFORMATION structure */ + ); + if (ret == 0) { + fprintf(stderr, "%s:%d: unable to create a new process (%s)\n", + __FILE__, __LINE__, strerror(GetLastError())); + abort(); + } + + return (pid_t)pi.hProcess; +} +#endif /* _WIN32 */ + QTestState *qtest_init_without_qmp_handshake(const char *extra_args) { QTestState *s; @@ -260,11 +350,16 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args) gchar *qmp_socket_path; gchar *command; const char *qemu_binary = qtest_qemu_binary(); + const char *trace = g_getenv("QTEST_TRACE"); + g_autofree char *tracearg = trace ? + g_strdup_printf("-trace %s ", trace) : g_strdup(""); s = g_new(QTestState, 1); - socket_path = g_strdup_printf("/tmp/qtest-%d.sock", getpid()); - qmp_socket_path = g_strdup_printf("/tmp/qtest-%d.qmp", getpid()); + socket_path = g_strdup_printf("%s/qtest-%d.sock", + g_get_tmp_dir(), getpid()); + qmp_socket_path = g_strdup_printf("%s/qtest-%d.qmp", + g_get_tmp_dir(), getpid()); /* It's possible that if an earlier test run crashed it might * have left a stale unix socket lying around. Delete any @@ -274,6 +369,7 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args) unlink(socket_path); unlink(qmp_socket_path); + socket_init(); sock = init_socket(socket_path); qmpsock = init_socket(qmp_socket_path); @@ -282,15 +378,16 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args) qtest_add_abrt_handler(kill_qemu_hook_func, s); - command = g_strdup_printf("exec %s " + command = g_strdup_printf(CMD_EXEC "%s %s" "-qtest unix:%s " "-qtest-log %s " "-chardev socket,path=%s,id=char0 " "-mon chardev=char0,mode=control " "-display none " "%s" - " -accel qtest", qemu_binary, socket_path, - getenv("QTEST_LOG") ? "/dev/fd/2" : "/dev/null", + " -accel qtest", + qemu_binary, tracearg, socket_path, + getenv("QTEST_LOG") ? DEV_STDERR : DEV_NULL, qmp_socket_path, extra_args ?: ""); @@ -299,12 +396,32 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args) s->pending_events = NULL; s->wstatus = 0; s->expected_status = 0; +#ifndef _WIN32 s->qemu_pid = fork(); if (s->qemu_pid == 0) { - g_setenv("QEMU_AUDIO_DRV", "none", true); +#ifdef __linux__ + /* + * Although we register a ABRT handler to kill off QEMU + * when g_assert() triggers, we want an extra safety + * net. The QEMU process might be non-functional and + * thus not have responded to SIGTERM. The test script + * might also have crashed with SEGV, in which case the + * cleanup handlers won't ever run. + * + * This PR_SET_PDEATHSIG setup will ensure any remaining + * QEMU will get terminated with SIGKILL in these cases. + */ + prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); +#endif /* __linux__ */ + if (!g_setenv("QEMU_AUDIO_DRV", "none", true)) { + exit(1); + } execlp("/bin/sh", "sh", "-c", command, NULL); exit(1); } +#else + s->qemu_pid = qtest_create_process(command); +#endif /* _WIN32 */ g_free(command); s->fd = socket_accept(sock); @@ -323,9 +440,19 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args) s->irq_level[i] = false; } + /* + * Stopping QEMU for debugging is not supported on Windows. + * + * Using DebugActiveProcess() API can suspend the QEMU process, + * but gdb cannot attach to the process. Using the undocumented + * NtSuspendProcess() can suspend the QEMU process and gdb can + * attach to the process, but gdb cannot resume it. + */ +#ifndef _WIN32 if (getenv("QTEST_STOP")) { kill(s->qemu_pid, SIGSTOP); } +#endif /* ask endianness of the target */ @@ -371,12 +498,15 @@ QTestState *qtest_initf(const char *fmt, ...) QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd) { int sock_fd_init; - char *sock_path, sock_dir[] = "/tmp/qtest-serial-XXXXXX"; + g_autofree char *sock_dir = NULL; + char *sock_path; QTestState *qts; - g_assert_true(mkdtemp(sock_dir) != NULL); + sock_dir = g_dir_make_tmp("qtest-serial-XXXXXX", NULL); + g_assert_true(sock_dir != NULL); sock_path = g_strdup_printf("%s/sock", sock_dir); + socket_init(); sock_fd_init = init_socket(sock_path); qts = qtest_initf("-chardev socket,id=s0,path=%s -serial chardev:s0 %s", @@ -398,8 +528,8 @@ void qtest_quit(QTestState *s) qtest_remove_abrt_handler(s); qtest_kill_qemu(s); - close(s->fd); - close(s->qmp_fd); + closesocket(s->fd); + closesocket(s->qmp_fd); g_string_free(s->rx, true); for (GList *it = s->pending_events; it != NULL; it = it->next) { @@ -413,21 +543,9 @@ void qtest_quit(QTestState *s) static void socket_send(int fd, const char *buf, size_t size) { - size_t offset; + ssize_t res = qemu_send_full(fd, buf, size); - offset = 0; - while (offset < size) { - ssize_t len; - - len = write(fd, buf + offset, size - offset); - if (len == -1 && errno == EINTR) { - continue; - } - - g_assert_cmpint(len, >, 0); - - offset += len; - } + assert(res == size); } static void qtest_client_socket_send(QTestState *s, const char *buf) @@ -435,7 +553,7 @@ static void qtest_client_socket_send(QTestState *s, const char *buf) socket_send(s->fd, buf, strlen(buf)); } -static void GCC_FMT_ATTR(2, 3) qtest_sendf(QTestState *s, const char *fmt, ...) +static void G_GNUC_PRINTF(2, 3) qtest_sendf(QTestState *s, const char *fmt, ...) { va_list ap; @@ -447,40 +565,6 @@ static void GCC_FMT_ATTR(2, 3) qtest_sendf(QTestState *s, const char *fmt, ...) g_free(str); } -/* Sends a message and file descriptors to the socket. - * It's needed for qmp-commands like getfd/add-fd */ -static void socket_send_fds(int socket_fd, int *fds, size_t fds_num, - const char *buf, size_t buf_size) -{ - ssize_t ret; - struct msghdr msg = { 0 }; - char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)] = { 0 }; - size_t fdsize = sizeof(int) * fds_num; - struct cmsghdr *cmsg; - struct iovec iov = { .iov_base = (char *)buf, .iov_len = buf_size }; - - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - if (fds && fds_num > 0) { - g_assert_cmpuint(fds_num, <, SOCKET_MAX_FDS); - - msg.msg_control = control; - msg.msg_controllen = CMSG_SPACE(fdsize); - - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_len = CMSG_LEN(fdsize); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - memcpy(CMSG_DATA(cmsg), fds, fdsize); - } - - do { - ret = sendmsg(socket_fd, &msg, 0); - } while (ret < 0 && errno == EINTR); - g_assert_cmpint(ret, >, 0); -} - static GString *qtest_client_socket_recv_line(QTestState *s) { GString *line; @@ -491,7 +575,7 @@ static GString *qtest_client_socket_recv_line(QTestState *s) ssize_t len; char buffer[1024]; - len = read(s->fd, buffer, sizeof(buffer)); + len = recv(s->fd, buffer, sizeof(buffer), 0); if (len == -1 && errno == EINTR) { continue; } @@ -575,59 +659,6 @@ static int qtest_query_target_endianness(QTestState *s) return big_endian; } -typedef struct { - JSONMessageParser parser; - QDict *response; -} QMPResponseParser; - -static void qmp_response(void *opaque, QObject *obj, Error *err) -{ - QMPResponseParser *qmp = opaque; - - assert(!obj != !err); - - if (err) { - error_prepend(&err, "QMP JSON response parsing failed: "); - error_report_err(err); - abort(); - } - - g_assert(!qmp->response); - qmp->response = qobject_to(QDict, obj); - g_assert(qmp->response); -} - -QDict *qmp_fd_receive(int fd) -{ - QMPResponseParser qmp; - bool log = getenv("QTEST_LOG") != NULL; - - qmp.response = NULL; - json_message_parser_init(&qmp.parser, qmp_response, &qmp, NULL); - while (!qmp.response) { - ssize_t len; - char c; - - len = read(fd, &c, 1); - if (len == -1 && errno == EINTR) { - continue; - } - - if (len == -1 || len == 0) { - fprintf(stderr, "Broken pipe\n"); - abort(); - } - - if (log) { - len = write(2, &c, 1); - } - json_message_parser_feed(&qmp.parser, &c, 1); - } - json_message_parser_destroy(&qmp.parser); - - return qmp.response; -} - QDict *qtest_qmp_receive(QTestState *s) { while (true) { @@ -668,68 +699,20 @@ int qtest_socket_server(const char *socket_path) return sock; } -/** - * Allow users to send a message without waiting for the reply, - * in the case that they choose to discard all replies up until - * a particular EVENT is received. - */ -void qmp_fd_vsend_fds(int fd, int *fds, size_t fds_num, - const char *fmt, va_list ap) -{ - QObject *qobj; - - /* Going through qobject ensures we escape strings properly */ - qobj = qobject_from_vjsonf_nofail(fmt, ap); - - /* No need to send anything for an empty QObject. */ - if (qobj) { - int log = getenv("QTEST_LOG") != NULL; - GString *str = qobject_to_json(qobj); - - /* - * BUG: QMP doesn't react to input until it sees a newline, an - * object, or an array. Work-around: give it a newline. - */ - g_string_append_c(str, '\n'); - - if (log) { - fprintf(stderr, "%s", str->str); - } - /* Send QMP request */ - if (fds && fds_num > 0) { - socket_send_fds(fd, fds, fds_num, str->str, str->len); - } else { - socket_send(fd, str->str, str->len); - } - - g_string_free(str, true); - qobject_unref(qobj); - } -} - -void qmp_fd_vsend(int fd, const char *fmt, va_list ap) -{ - qmp_fd_vsend_fds(fd, NULL, 0, fmt, ap); -} - +#ifndef _WIN32 void qtest_qmp_vsend_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, va_list ap) { qmp_fd_vsend_fds(s->qmp_fd, fds, fds_num, fmt, ap); } +#endif void qtest_qmp_vsend(QTestState *s, const char *fmt, va_list ap) { - qmp_fd_vsend_fds(s->qmp_fd, NULL, 0, fmt, ap); -} - -QDict *qmp_fdv(int fd, const char *fmt, va_list ap) -{ - qmp_fd_vsend_fds(fd, NULL, 0, fmt, ap); - - return qmp_fd_receive(fd); + qmp_fd_vsend(s->qmp_fd, fmt, ap); } +#ifndef _WIN32 QDict *qtest_vqmp_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, va_list ap) { @@ -738,6 +721,7 @@ QDict *qtest_vqmp_fds(QTestState *s, int *fds, size_t fds_num, /* Receive reply */ return qtest_qmp_receive(s); } +#endif QDict *qtest_vqmp(QTestState *s, const char *fmt, va_list ap) { @@ -747,26 +731,7 @@ QDict *qtest_vqmp(QTestState *s, const char *fmt, va_list ap) return qtest_qmp_receive(s); } -QDict *qmp_fd(int fd, const char *fmt, ...) -{ - va_list ap; - QDict *response; - - va_start(ap, fmt); - response = qmp_fdv(fd, fmt, ap); - va_end(ap); - return response; -} - -void qmp_fd_send(int fd, const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - qmp_fd_vsend(fd, fmt, ap); - va_end(ap); -} - +#ifndef _WIN32 QDict *qtest_qmp_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, ...) { @@ -778,6 +743,7 @@ QDict *qtest_qmp_fds(QTestState *s, int *fds, size_t fds_num, va_end(ap); return response; } +#endif QDict *qtest_qmp(QTestState *s, const char *fmt, ...) { @@ -799,27 +765,6 @@ void qtest_qmp_send(QTestState *s, const char *fmt, ...) va_end(ap); } -void qmp_fd_vsend_raw(int fd, const char *fmt, va_list ap) -{ - bool log = getenv("QTEST_LOG") != NULL; - char *str = g_strdup_vprintf(fmt, ap); - - if (log) { - fprintf(stderr, "%s", str); - } - socket_send(fd, str, strlen(str)); - g_free(str); -} - -void qmp_fd_send_raw(int fd, const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - qmp_fd_vsend_raw(fd, fmt, ap); - va_end(ap); -} - void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...) { va_list ap; @@ -920,6 +865,33 @@ const char *qtest_get_arch(void) return end + 1; } +bool qtest_has_accel(const char *accel_name) +{ + if (g_str_equal(accel_name, "tcg")) { +#if defined(CONFIG_TCG) + return true; +#else + return false; +#endif + } else if (g_str_equal(accel_name, "kvm")) { + int i; + const char *arch = qtest_get_arch(); + const char *targets[] = { CONFIG_KVM_TARGETS }; + + for (i = 0; i < ARRAY_SIZE(targets); i++) { + if (!strncmp(targets[i], arch, strlen(arch))) { + if (!access("/dev/kvm", R_OK | W_OK)) { + return true; + } + } + } + } else { + /* not implemented */ + g_assert_not_reached(); + } + return false; +} + bool qtest_get_irq(QTestState *s, int num) { /* dummy operation in order to make sure irq is up to date */ @@ -1292,16 +1264,29 @@ static bool qtest_is_old_versioned_machine(const char *mname) return res; } -void qtest_cb_for_every_machine(void (*cb)(const char *machine), - bool skip_old_versioned) +struct MachInfo { + char *name; + char *alias; +}; + +/* + * Returns an array with pointers to the available machine names. + * The terminating entry has the name set to NULL. + */ +static struct MachInfo *qtest_get_machines(void) { + static struct MachInfo *machines; QDict *response, *minfo; QList *list; const QListEntry *p; QObject *qobj; QString *qstr; - const char *mname; QTestState *qts; + int idx; + + if (machines) { + return machines; + } qts = qtest_init("-machine none"); response = qtest_qmp(qts, "{ 'execute': 'query-machines' }"); @@ -1309,25 +1294,115 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine), list = qdict_get_qlist(response, "return"); g_assert(list); - for (p = qlist_first(list); p; p = qlist_next(p)) { + machines = g_new(struct MachInfo, qlist_size(list) + 1); + + for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) { minfo = qobject_to(QDict, qlist_entry_obj(p)); g_assert(minfo); + qobj = qdict_get(minfo, "name"); g_assert(qobj); qstr = qobject_to(QString, qobj); g_assert(qstr); - mname = qstring_get_str(qstr); - /* Ignore machines that cannot be used for qtests */ - if (!strncmp("xenfv", mname, 5) || g_str_equal("xenpv", mname)) { - continue; - } - if (!skip_old_versioned || !qtest_is_old_versioned_machine(mname)) { - cb(mname); + machines[idx].name = g_strdup(qstring_get_str(qstr)); + + qobj = qdict_get(minfo, "alias"); + if (qobj) { /* The alias is optional */ + qstr = qobject_to(QString, qobj); + g_assert(qstr); + machines[idx].alias = g_strdup(qstring_get_str(qstr)); + } else { + machines[idx].alias = NULL; } } qtest_quit(qts); qobject_unref(response); + + memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */ + return machines; +} + +void qtest_cb_for_every_machine(void (*cb)(const char *machine), + bool skip_old_versioned) +{ + struct MachInfo *machines; + int i; + + machines = qtest_get_machines(); + + for (i = 0; machines[i].name != NULL; i++) { + /* Ignore machines that cannot be used for qtests */ + if (!strncmp("xenfv", machines[i].name, 5) || + g_str_equal("xenpv", machines[i].name)) { + continue; + } + if (!skip_old_versioned || + !qtest_is_old_versioned_machine(machines[i].name)) { + cb(machines[i].name); + } + } +} + +bool qtest_has_machine(const char *machine) +{ + struct MachInfo *machines; + int i; + + machines = qtest_get_machines(); + + for (i = 0; machines[i].name != NULL; i++) { + if (g_str_equal(machine, machines[i].name) || + (machines[i].alias && g_str_equal(machine, machines[i].alias))) { + return true; + } + } + + return false; +} + +bool qtest_has_device(const char *device) +{ + static QList *list; + const QListEntry *p; + QObject *qobj; + QString *qstr; + QDict *devinfo; + int idx; + + if (!list) { + QDict *resp; + QDict *args; + QTestState *qts = qtest_init("-machine none"); + + args = qdict_new(); + qdict_put_bool(args, "abstract", false); + qdict_put_str(args, "implements", "device"); + + resp = qtest_qmp(qts, "{'execute': 'qom-list-types', 'arguments': %p }", + args); + g_assert(qdict_haskey(resp, "return")); + list = qdict_get_qlist(resp, "return"); + qobject_ref(list); + qobject_unref(resp); + + qtest_quit(qts); + } + + for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) { + devinfo = qobject_to(QDict, qlist_entry_obj(p)); + g_assert(devinfo); + + qobj = qdict_get(devinfo, "name"); + g_assert(qobj); + qstr = qobject_to(QString, qobj); + g_assert(qstr); + if (g_str_equal(qstring_get_str(qstr), device)) { + return true; + } + } + + return false; } /* @@ -1365,6 +1440,27 @@ void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, qobject_unref(args); } +#ifndef _WIN32 +void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd) +{ + QDict *resp; + + resp = qtest_qmp_fds(qts, &fd, 1, "{'execute': 'getfd'," + "'arguments': {'fdname': 'fdname'}}"); + g_assert(resp); + g_assert(!qdict_haskey(resp, "event")); /* We don't expect any events */ + g_assert(!qdict_haskey(resp, "error")); + qobject_unref(resp); + + resp = qtest_qmp( + qts, "{'execute': 'add_client'," + "'arguments': {'protocol': %s, 'fdname': 'fdname'}}", protocol); + g_assert(resp); + g_assert(!qdict_haskey(resp, "event")); /* We don't expect any events */ + g_assert(!qdict_haskey(resp, "error")); + qobject_unref(resp); +} +#endif /* * Generic hot-unplugging test via the device_del QMP command. @@ -1382,36 +1478,22 @@ void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, * * {"return": {}} */ +void qtest_qmp_device_del_send(QTestState *qts, const char *id) +{ + QDict *rsp = qtest_qmp(qts, "{'execute': 'device_del', " + "'arguments': {'id': %s}}", id); + g_assert(rsp); + g_assert(qdict_haskey(rsp, "return")); + g_assert(!qdict_haskey(rsp, "error")); + qobject_unref(rsp); +} + void qtest_qmp_device_del(QTestState *qts, const char *id) { - QDict *rsp; - - rsp = qtest_qmp(qts, "{'execute': 'device_del', 'arguments': {'id': %s}}", - id); - - g_assert(qdict_haskey(rsp, "return")); - qobject_unref(rsp); + qtest_qmp_device_del_send(qts, id); qtest_qmp_eventwait(qts, "DEVICE_DELETED"); } -bool qmp_rsp_is_err(QDict *rsp) -{ - QDict *error = qdict_get_qdict(rsp, "error"); - qobject_unref(rsp); - return !!error; -} - -void qmp_expect_error_and_unref(QDict *rsp, const char *class) -{ - QDict *error = qdict_get_qdict(rsp, "error"); - - g_assert_cmpstr(qdict_get_try_str(error, "class"), ==, class); - g_assert_nonnull(qdict_get_try_str(error, "desc")); - g_assert(!qdict_haskey(rsp, "return")); - - qobject_unref(rsp); -} - static void qtest_client_set_tx_handler(QTestState *s, QTestSendFn send) { @@ -1465,7 +1547,7 @@ QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch, * way, qtest_get_arch works for inproc qtest. */ gchar *bin_path = g_strconcat("/qemu-system-", arch, NULL); - setenv("QTEST_QEMU_BINARY", bin_path, 0); + g_setenv("QTEST_QEMU_BINARY", bin_path, 0); g_free(bin_path); return qts; @@ -1481,3 +1563,27 @@ void qtest_client_inproc_recv(void *opaque, const char *str) g_string_append(qts->rx, str); return; } + +void qtest_qom_set_bool(QTestState *s, const char *path, const char *property, + bool value) +{ + QDict *r; + + r = qtest_qmp(s, "{ 'execute': 'qom-set', 'arguments': " + "{ 'path': %s, 'property': %s, 'value': %i } }", + path, property, value); + qobject_unref(r); +} + +bool qtest_qom_get_bool(QTestState *s, const char *path, const char *property) +{ + QDict *r; + bool b; + + r = qtest_qmp(s, "{ 'execute': 'qom-get', 'arguments': " + "{ 'path': %s, 'property': %s } }", path, property); + b = qdict_get_bool(r, "return"); + qobject_unref(r); + + return b; +} diff --git a/tests/qtest/libqos/libqtest.h b/tests/qtest/libqtest.h similarity index 88% rename from tests/qtest/libqos/libqtest.h rename to tests/qtest/libqtest.h index a68dcd79d4..fcf1c3c3b3 100644 --- a/tests/qtest/libqos/libqtest.h +++ b/tests/qtest/libqtest.h @@ -19,6 +19,7 @@ #include "qapi/qmp/qobject.h" #include "qapi/qmp/qdict.h" +#include "libqmp.h" typedef struct QTestState QTestState; @@ -31,7 +32,7 @@ typedef struct QTestState QTestState; * * Returns: #QTestState instance. */ -QTestState *qtest_initf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +QTestState *qtest_initf(const char *fmt, ...) G_GNUC_PRINTF(1, 2); /** * qtest_vinitf: @@ -43,7 +44,7 @@ QTestState *qtest_initf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); * * Returns: #QTestState instance. */ -QTestState *qtest_vinitf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); +QTestState *qtest_vinitf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); /** * qtest_init: @@ -74,6 +75,15 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args); */ QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd); +/** + * qtest_wait_qemu: + * @s: #QTestState instance to operate on. + * + * Wait for the QEMU process to terminate. It is safe to call this function + * multiple times. + */ +void qtest_wait_qemu(QTestState *s); + /** * qtest_kill_qemu: * @s: #QTestState instance to operate on. @@ -93,6 +103,7 @@ void qtest_kill_qemu(QTestState *s); */ void qtest_quit(QTestState *s); +#ifndef _WIN32 /** * qtest_qmp_fds: * @s: #QTestState instance to operate on. @@ -106,7 +117,8 @@ void qtest_quit(QTestState *s); */ QDict *qtest_qmp_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, ...) - GCC_FMT_ATTR(4, 5); + G_GNUC_PRINTF(4, 5); +#endif /* _WIN32 */ /** * qtest_qmp: @@ -118,7 +130,7 @@ QDict *qtest_qmp_fds(QTestState *s, int *fds, size_t fds_num, * Sends a QMP message to QEMU and returns the response. */ QDict *qtest_qmp(QTestState *s, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /** * qtest_qmp_send: @@ -130,7 +142,7 @@ QDict *qtest_qmp(QTestState *s, const char *fmt, ...) * Sends a QMP message to QEMU and leaves the response in the stream. */ void qtest_qmp_send(QTestState *s, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /** * qtest_qmp_send_raw: @@ -141,7 +153,7 @@ void qtest_qmp_send(QTestState *s, const char *fmt, ...) * this is useful for negative tests. */ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /** * qtest_socket_server: @@ -151,6 +163,7 @@ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...) */ int qtest_socket_server(const char *socket_path); +#ifndef _WIN32 /** * qtest_vqmp_fds: * @s: #QTestState instance to operate on. @@ -165,7 +178,8 @@ int qtest_socket_server(const char *socket_path); */ QDict *qtest_vqmp_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, va_list ap) - GCC_FMT_ATTR(4, 0); + G_GNUC_PRINTF(4, 0); +#endif /* _WIN32 */ /** * qtest_vqmp: @@ -178,8 +192,9 @@ QDict *qtest_vqmp_fds(QTestState *s, int *fds, size_t fds_num, * Sends a QMP message to QEMU and returns the response. */ QDict *qtest_vqmp(QTestState *s, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); + G_GNUC_PRINTF(2, 0); +#ifndef _WIN32 /** * qtest_qmp_vsend_fds: * @s: #QTestState instance to operate on. @@ -194,7 +209,8 @@ QDict *qtest_vqmp(QTestState *s, const char *fmt, va_list ap) */ void qtest_qmp_vsend_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, va_list ap) - GCC_FMT_ATTR(4, 0); + G_GNUC_PRINTF(4, 0); +#endif /* _WIN32 */ /** * qtest_qmp_vsend: @@ -207,7 +223,7 @@ void qtest_qmp_vsend_fds(QTestState *s, int *fds, size_t fds_num, * Sends a QMP message to QEMU and leaves the response in the stream. */ void qtest_qmp_vsend(QTestState *s, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); + G_GNUC_PRINTF(2, 0); /** * qtest_qmp_receive_dict: @@ -269,7 +285,7 @@ QDict *qtest_qmp_event_ref(QTestState *s, const char *event); * * Returns: the command's output. The caller should g_free() it. */ -char *qtest_hmp(QTestState *s, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +char *qtest_hmp(QTestState *s, const char *fmt, ...) G_GNUC_PRINTF(2, 3); /** * qtest_hmpv: @@ -283,7 +299,7 @@ char *qtest_hmp(QTestState *s, const char *fmt, ...) GCC_FMT_ATTR(2, 3); * Returns: the command's output. The caller should g_free() it. */ char *qtest_vhmp(QTestState *s, const char *fmt, va_list ap) - GCC_FMT_ATTR(2, 0); + G_GNUC_PRINTF(2, 0); void qtest_module_load(QTestState *s, const char *prefix, const char *libname); @@ -588,6 +604,14 @@ bool qtest_big_endian(QTestState *s); */ const char *qtest_get_arch(void); +/** + * qtest_has_accel: + * @accel_name: Accelerator name to check for. + * + * Returns: true if the accelerator is built in. + */ +bool qtest_has_accel(const char *accel_name); + /** * qtest_add_func: * @str: Test case path. @@ -680,17 +704,7 @@ void qtest_remove_abrt_handler(void *data); * the response. */ void qtest_qmp_assert_success(QTestState *qts, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); - -QDict *qmp_fd_receive(int fd); -void qmp_fd_vsend_fds(int fd, int *fds, size_t fds_num, - const char *fmt, va_list ap) GCC_FMT_ATTR(4, 0); -void qmp_fd_vsend(int fd, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0); -void qmp_fd_send(int fd, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -void qmp_fd_send_raw(int fd, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -void qmp_fd_vsend_raw(int fd, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0); -QDict *qmp_fdv(int fd, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0); -QDict *qmp_fd(int fd, const char *fmt, ...) GCC_FMT_ATTR(2, 3); + G_GNUC_PRINTF(2, 3); /** * qtest_cb_for_every_machine: @@ -702,11 +716,27 @@ QDict *qmp_fd(int fd, const char *fmt, ...) GCC_FMT_ATTR(2, 3); void qtest_cb_for_every_machine(void (*cb)(const char *machine), bool skip_old_versioned); +/** + * qtest_has_machine: + * @machine: The machine to look for + * + * Returns: true if the machine is available in the target binary. + */ +bool qtest_has_machine(const char *machine); + +/** + * qtest_has_device: + * @device: The device to look for + * + * Returns: true if the device is available in the target binary. + */ +bool qtest_has_device(const char *device); + /** * qtest_qmp_device_add_qdict: * @qts: QTestState instance to operate on * @drv: Name of the device that should be added - * @arguments: QDict with properties for the device to intialize + * @arguments: QDict with properties for the device to initialize * * Generic hot-plugging test via the device_add QMP command with properties * supplied in form of QDict. Use NULL for empty properties list. @@ -726,7 +756,28 @@ void qtest_qmp_device_add_qdict(QTestState *qts, const char *drv, * Generic hot-plugging test via the device_add QMP command. */ void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, - const char *fmt, ...) GCC_FMT_ATTR(4, 5); + const char *fmt, ...) G_GNUC_PRINTF(4, 5); + +#ifndef _WIN32 +/** + * qtest_qmp_add_client: + * @qts: QTestState instance to operate on + * @protocol: the protocol to add to + * @fd: the client file-descriptor + * + * Call QMP ``getfd`` followed by ``add_client`` with the given @fd. + */ +void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd); +#endif /* _WIN32 */ + +/** + * qtest_qmp_device_del_send: + * @qts: QTestState instance to operate on + * @id: Identification string + * + * Generic hot-unplugging test via the device_del QMP command. + */ +void qtest_qmp_device_del_send(QTestState *qts, const char *id); /** * qtest_qmp_device_del: @@ -734,27 +785,10 @@ void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, * @id: Identification string * * Generic hot-unplugging test via the device_del QMP command. + * Waiting for command completion event. */ void qtest_qmp_device_del(QTestState *qts, const char *id); -/** - * qmp_rsp_is_err: - * @rsp: QMP response to check for error - * - * Test @rsp for error and discard @rsp. - * Returns 'true' if there is error in @rsp and 'false' otherwise. - */ -bool qmp_rsp_is_err(QDict *rsp); - -/** - * qmp_expect_error_and_unref: - * @rsp: QMP response to check for error - * @class: an error class - * - * Assert the response has the given error class and discard @rsp. - */ -void qmp_expect_error_and_unref(QDict *rsp, const char *class); - /** * qtest_probe_child: * @s: QTestState instance to operate on. @@ -776,4 +810,26 @@ QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch, void (*send)(void*, const char*)); void qtest_client_inproc_recv(void *opaque, const char *str); + +/** + * qtest_qom_set_bool: + * @s: QTestState instance to operate on. + * @path: Path to the property being set. + * @property: Property being set. + * @value: Value to set the property. + * + * Set the property with passed in value. + */ +void qtest_qom_set_bool(QTestState *s, const char *path, const char *property, + bool value); + +/** + * qtest_qom_get_bool: + * @s: QTestState instance to operate on. + * @path: Path to the property being retrieved. + * @property: Property from where the value is being retrieved. + * + * Returns: Value retrieved from property. + */ +bool qtest_qom_get_bool(QTestState *s, const char *path, const char *property); #endif diff --git a/tests/qtest/lpc-ich9-test.c b/tests/qtest/lpc-ich9-test.c index fe0bef9980..8ac95b89f7 100644 --- a/tests/qtest/lpc-ich9-test.c +++ b/tests/qtest/lpc-ich9-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" static void test_lp1878642_pci_bus_get_irq_level_assert(void) { diff --git a/tests/qtest/lsm303dlhc-mag-test.c b/tests/qtest/lsm303dlhc-mag-test.c new file mode 100644 index 0000000000..0f64e7fc67 --- /dev/null +++ b/tests/qtest/lsm303dlhc-mag-test.c @@ -0,0 +1,148 @@ +/* + * QTest testcase for the LSM303DLHC I2C magnetometer + * + * Copyright (C) 2021 Linaro Ltd. + * Written by Kevin Townsend + * + * Based on: https://www.st.com/resource/en/datasheet/lsm303dlhc.pdf + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" +#include "libqos/qgraph.h" +#include "libqos/i2c.h" +#include "qapi/qmp/qdict.h" + +#define LSM303DLHC_MAG_TEST_ID "lsm303dlhc_mag-test" +#define LSM303DLHC_MAG_REG_CRA 0x00 +#define LSM303DLHC_MAG_REG_CRB 0x01 +#define LSM303DLHC_MAG_REG_OUT_X_H 0x03 +#define LSM303DLHC_MAG_REG_OUT_Z_H 0x05 +#define LSM303DLHC_MAG_REG_OUT_Y_H 0x07 +#define LSM303DLHC_MAG_REG_IRC 0x0C +#define LSM303DLHC_MAG_REG_TEMP_OUT_H 0x31 + +static int qmp_lsm303dlhc_mag_get_property(const char *id, const char *prop) +{ + QDict *response; + int ret; + + response = qmp("{ 'execute': 'qom-get', 'arguments': { 'path': %s, " + "'property': %s } }", id, prop); + g_assert(qdict_haskey(response, "return")); + ret = qdict_get_int(response, "return"); + qobject_unref(response); + return ret; +} + +static void qmp_lsm303dlhc_mag_set_property(const char *id, const char *prop, + int value) +{ + QDict *response; + + response = qmp("{ 'execute': 'qom-set', 'arguments': { 'path': %s, " + "'property': %s, 'value': %d } }", id, prop, value); + g_assert(qdict_haskey(response, "return")); + qobject_unref(response); +} + +static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) +{ + int64_t value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + /* Check default value for CRB */ + g_assert_cmphex(i2c_get8(i2cdev, LSM303DLHC_MAG_REG_CRB), ==, 0x20); + + /* Set x to 1.0 gauss and verify the value */ + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-x", 100000); + value = qmp_lsm303dlhc_mag_get_property( + LSM303DLHC_MAG_TEST_ID, "mag-x"); + g_assert_cmpint(value, ==, 100000); + + /* Set y to 1.5 gauss and verify the value */ + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-y", 150000); + value = qmp_lsm303dlhc_mag_get_property( + LSM303DLHC_MAG_TEST_ID, "mag-y"); + g_assert_cmpint(value, ==, 150000); + + /* Set z to 0.5 gauss and verify the value */ + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-z", 50000); + value = qmp_lsm303dlhc_mag_get_property( + LSM303DLHC_MAG_TEST_ID, "mag-z"); + g_assert_cmpint(value, ==, 50000); + + /* Set temperature to 23.6 C and verify the value */ + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, + "temperature", 23600); + value = qmp_lsm303dlhc_mag_get_property( + LSM303DLHC_MAG_TEST_ID, "temperature"); + /* Should return 23.5 C due to 0.125°C steps. */ + g_assert_cmpint(value, ==, 23500); + + /* Read raw x axis registers (1 gauss = 1100 at +/-1.3 g gain) */ + value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_X_H); + g_assert_cmphex(value, ==, 1100); + + /* Read raw y axis registers (1.5 gauss = 1650 at +/- 1.3 g gain = ) */ + value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_Y_H); + g_assert_cmphex(value, ==, 1650); + + /* Read raw z axis registers (0.5 gauss = 490 at +/- 1.3 g gain = ) */ + value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_Z_H); + g_assert_cmphex(value, ==, 490); + + /* Read raw temperature registers with temp disabled (CRA = 0x10) */ + value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_TEMP_OUT_H); + g_assert_cmphex(value, ==, 0); + + /* Enable temperature reads (CRA = 0x90) */ + i2c_set8(i2cdev, LSM303DLHC_MAG_REG_CRA, 0x90); + + /* Read raw temp registers (23.5 C = 188 at 1 lsb = 0.125 C) */ + value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_TEMP_OUT_H); + g_assert_cmphex(value, ==, 188); +} + +static void reg_wraparound(void *obj, void *data, QGuestAllocator *alloc) +{ + uint8_t value[4]; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + /* Set x to 1.0 gauss, and y to 1.5 gauss for known test values */ + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-x", 100000); + qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-y", 150000); + + /* Check that requesting 4 bytes starting at Y_H wraps around to X_L */ + i2c_read_block(i2cdev, LSM303DLHC_MAG_REG_OUT_Y_H, value, 4); + /* 1.5 gauss = 1650 lsb = 0x672 */ + g_assert_cmphex(value[0], ==, 0x06); + g_assert_cmphex(value[1], ==, 0x72); + /* 1.0 gauss = 1100 lsb = 0x44C */ + g_assert_cmphex(value[2], ==, 0x04); + g_assert_cmphex(value[3], ==, 0x4C); + + /* Check that requesting LSM303DLHC_MAG_REG_IRC wraps around to CRA */ + i2c_read_block(i2cdev, LSM303DLHC_MAG_REG_IRC, value, 2); + /* Default value for IRC = 0x33 */ + g_assert_cmphex(value[0], ==, 0x33); + /* Default value for CRA = 0x10 */ + g_assert_cmphex(value[1], ==, 0x10); +} + +static void lsm303dlhc_mag_register_nodes(void) +{ + QOSGraphEdgeOptions opts = { + .extra_device_opts = "id=" LSM303DLHC_MAG_TEST_ID ",address=0x1e" + }; + add_qi2c_address(&opts, &(QI2CAddress) { 0x1E }); + + qos_node_create_driver("lsm303dlhc_mag", i2c_device_create); + qos_node_consumes("lsm303dlhc_mag", "i2c-bus", &opts); + + qos_add_test("tx-rx", "lsm303dlhc_mag", send_and_receive, NULL); + qos_add_test("regwrap", "lsm303dlhc_mag", reg_wraparound, NULL); +} +libqos_init(lsm303dlhc_mag_register_nodes); diff --git a/tests/qtest/m48t59-test.c b/tests/qtest/m48t59-test.c index 6db3234100..843d2ced8e 100644 --- a/tests/qtest/m48t59-test.c +++ b/tests/qtest/m48t59-test.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #define RTC_SECONDS 0x9 #define RTC_MINUTES 0xa @@ -137,7 +137,7 @@ static void cmos_get_date_time(QTestState *s, struct tm *date) date->tm_mday = mday; date->tm_mon = mon - 1; date->tm_year = base_year + year - 1900; -#ifndef __sun__ +#if !defined(__sun__) && !defined(_WIN32) date->tm_gmtoff = 0; #endif diff --git a/tests/qtest/machine-none-test.c b/tests/qtest/machine-none-test.c index 138101b46a..31cc0bfb01 100644 --- a/tests/qtest/machine-none-test.c +++ b/tests/qtest/machine-none-test.c @@ -12,9 +12,8 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" @@ -55,6 +54,7 @@ static struct arch2cpu cpus_map[] = { { "riscv64", "rv64" }, { "riscv32", "rv32" }, { "rx", "rx62n" }, + { "loongarch64", "la464"}, }; static const char *get_cpu_model_by_arch(const char *arch) @@ -81,7 +81,7 @@ static void test_machine_cpu_cli(void) " add it to cpus_map\n", arch); return; /* TODO: die here to force all targets have a test */ } - qts = qtest_initf("-machine none -cpu '%s'", cpu_model); + qts = qtest_initf("-machine none -cpu \"%s\"", cpu_model); response = qtest_qmp(qts, "{ 'execute': 'quit' }"); g_assert(qdict_haskey(response, "return")); diff --git a/tests/qtest/megasas-test.c b/tests/qtest/megasas-test.c index eae70ff95f..d6796b9bd7 100644 --- a/tests/qtest/megasas-test.c +++ b/tests/qtest/megasas-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/bswap.h" #include "qemu/module.h" #include "libqos/qgraph.h" diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 2bc3efd49f..c07a5b1a5f 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -17,12 +17,7 @@ slow_qtests = { 'test-hmp' : 120, } -qtests_generic = \ - (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ - (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ - (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ - (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ - [ +qtests_generic = [ 'cdrom-test', 'device-introspect-test', 'machine-none-test', @@ -31,6 +26,7 @@ qtests_generic = \ 'qom-test', 'test-hmp', 'qos-test', + 'readconfig-test', ] if config_host.has_key('CONFIG_MODULES') qtests_generic += [ 'modules-test' ] @@ -40,13 +36,20 @@ qtests_pci = \ (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) + \ (config_all_devices.has_key('CONFIG_IVSHMEM_DEVICE') ? ['ivshmem-test'] : []) +qtests_cxl = \ + (config_all_devices.has_key('CONFIG_CXL') ? ['cxl-test'] : []) + +qtests_filter = \ + (slirp.found() ? ['test-netfilter'] : []) + \ + (config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ + (config_host.has_key('CONFIG_POSIX') ? ['test-filter-redirector'] : []) + qtests_i386 = \ - (slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ + (slirp.found() ? ['pxe-test'] : []) + \ + qtests_filter + \ (have_tools ? ['ahci-test'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_SGA') ? ['boot-serial-test'] : []) + \ - (config_all_devices.has_key('CONFIG_RTL8139_PCI') ? ['test-filter-redirector'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_IPMI_KCS') ? ['ipmi-kcs-test'] : []) + \ (config_host.has_key('CONFIG_LINUX') and \ config_all_devices.has_key('CONFIG_ISA_IPMI_BT') ? ['ipmi-bt-test'] : []) + \ @@ -67,13 +70,25 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_TPM_TIS_ISA') ? ['tpm-tis-swtpm-test'] : []) + \ (config_all_devices.has_key('CONFIG_RTL8139_PCI') ? ['rtl8139-test'] : []) + \ (config_all_devices.has_key('CONFIG_E1000E_PCI_EXPRESS') ? ['fuzz-e1000e-test'] : []) + \ + (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ + (config_all_devices.has_key('CONFIG_LSI_SCSI_PCI') ? ['fuzz-lsi53c895a-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ + (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ + (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ (config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \ + (config_host.has_key('CONFIG_POSIX') and \ + config_all_devices.has_key('CONFIG_ACPI_ERST') ? ['erst-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VIRTIO_NET') and \ + config_all_devices.has_key('CONFIG_Q35') and \ + config_all_devices.has_key('CONFIG_VIRTIO_PCI') and \ + slirp.found() ? ['virtio-net-failover'] : []) + \ + (unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ qtests_pci + \ + qtests_cxl + \ ['fdc-test', 'ide-test', 'hd-geo-test', 'boot-order-test', - 'bios-tables-test', 'rtc-test', 'i440fx-test', 'fw_cfg-test', @@ -85,17 +100,21 @@ qtests_i386 = \ 'vmgenid-test', 'migration-test', 'test-x86-cpuid-compat', - 'numa-test'] + 'numa-test' + ] + +if dbus_display + qtests_i386 += ['dbus-display-test'] +endif dbus_daemon = find_program('dbus-daemon', required: false) -if dbus_daemon.found() and config_host.has_key('GDBUS_CODEGEN') +if dbus_daemon.found() and gdbus_codegen.found() # Temporarily disabled due to Patchew failures: #qtests_i386 += ['dbus-vmstate-test'] dbus_vmstate1 = custom_target('dbus-vmstate description', output: ['dbus-vmstate1.h', 'dbus-vmstate1.c'], - input: files('dbus-vmstate1.xml'), - command: [config_host['GDBUS_CODEGEN'], - '@INPUT@', + input: meson.project_source_root() / 'backends/dbus-vmstate1.xml', + command: [gdbus_codegen, '@INPUT@', '--interface-prefix', 'org.qemu', '--generate-c-code', '@BASENAME@']).to_list() else @@ -104,52 +123,60 @@ endif qtests_x86_64 = qtests_i386 -qtests_alpha = [ 'boot-serial-test' ] + \ +qtests_alpha = ['boot-serial-test'] + \ + qtests_filter + \ (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) qtests_avr = [ 'boot-serial-test' ] -qtests_hppa = [ 'boot-serial-test' ] + \ +qtests_hppa = ['boot-serial-test'] + \ + qtests_filter + \ (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) -qtests_m68k = [ 'boot-serial-test' ] -qtests_microblaze = [ 'boot-serial-test' ] +qtests_m68k = ['boot-serial-test'] + \ + qtests_filter + +qtests_microblaze = ['boot-serial-test'] + \ + qtests_filter + qtests_microblazeel = qtests_microblaze qtests_mips = \ + qtests_filter + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) -qtests_mips64 = \ - (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ - (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) - -qtests_mips64el = \ - (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ - (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) +qtests_mipsel = qtests_mips +qtests_mips64 = qtests_mips +qtests_mips64el = qtests_mips qtests_ppc = \ + qtests_filter + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \ - ['boot-order-test', 'prom-env-test', 'boot-serial-test'] \ + (config_all_devices.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ + (config_all_devices.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ + ['boot-order-test'] qtests_ppc64 = \ + qtests_ppc + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['rtas-test'] : []) + \ - (slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \ + (slirp.found() ? ['pxe-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_XHCI_NEC') ? ['usb-hcd-xhci-test'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ qtests_pci + ['migration-test', 'numa-test', 'cpu-plug-test', 'drive_del-test'] qtests_sh4 = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) qtests_sh4eb = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) -qtests_sparc = ['prom-env-test', 'm48t59-test', 'boot-serial-test'] +qtests_sparc = ['prom-env-test', 'm48t59-test', 'boot-serial-test'] + \ + qtests_filter qtests_sparc64 = \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ + qtests_filter + \ ['prom-env-test', 'boot-serial-test'] qtests_npcm7xx = \ @@ -157,13 +184,15 @@ qtests_npcm7xx = \ 'npcm7xx_gpio-test', 'npcm7xx_pwm-test', 'npcm7xx_rng-test', + 'npcm7xx_sdhci-test', 'npcm7xx_smbus-test', 'npcm7xx_timer-test', 'npcm7xx_watchdog_timer-test'] + \ (slirp.found() ? ['npcm7xx_emc-test'] : []) qtests_aspeed = \ ['aspeed_hace-test', - 'aspeed_smc-test'] + 'aspeed_smc-test', + 'aspeed_gpio-test'] qtests_arm = \ (config_all_devices.has_key('CONFIG_MPS2') ? ['sse-timer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \ @@ -180,14 +209,15 @@ qtests_arm = \ # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional qtests_aarch64 = \ - (cpu != 'arm' ? ['bios-tables-test'] : []) + \ + (cpu != 'arm' and unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-test'] : []) + \ (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-swtpm-test'] : []) + \ + (config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \ ['arm-cpu-features', 'numa-test', 'boot-serial-test', - 'xlnx-can-test', - 'migration-test'] + 'migration-test', + 'bcm2835-dma-test'] qtests_s390x = \ (slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \ @@ -206,10 +236,11 @@ qos_test_ss.add( 'adm1272-test.c', 'ds1338-test.c', 'e1000-test.c', - 'e1000e-test.c', 'eepro100-test.c', 'es1370-test.c', 'ipoctal232-test.c', + 'lsm303dlhc-mag-test.c', + 'isl_pmbus_vr-test.c', 'max34451-test.c', 'megasas-test.c', 'ne2000-test.c', @@ -229,24 +260,41 @@ qos_test_ss.add( 'virtio-rng-test.c', 'virtio-scsi-test.c', 'virtio-serial-test.c', + 'virtio-iommu-test.c', 'vmxnet3-test.c', ) +if config_host.has_key('CONFIG_POSIX') + qos_test_ss.add(files('e1000e-test.c')) +endif if have_virtfs qos_test_ss.add(files('virtio-9p-test.c')) endif -qos_test_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user-test.c')) +if have_vhost_user + qos_test_ss.add(files('vhost-user-test.c')) +endif if have_tools and have_vhost_user_blk_server qos_test_ss.add(files('vhost-user-blk-test.c')) endif tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c'] +migration_files = [files('migration-helpers.c')] +if gnutls.found() + migration_files += [files('../unit/crypto-tls-psk-helpers.c'), gnutls] + + if tasn1.found() + migration_files += [files('../unit/crypto-tls-x509-helpers.c', + '../unit/pkix_asn1_tab.c'), tasn1] + endif +endif + qtests = { 'bios-tables-test': [io, 'boot-sector.c', 'acpi-utils.c', 'tpm-emu.c'], 'cdrom-test': files('boot-sector.c'), 'dbus-vmstate-test': files('migration-helpers.c') + dbus_vmstate1, + 'erst-test': files('erst-test.c'), 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], - 'migration-test': files('migration-helpers.c'), + 'migration-test': migration_files, 'pxe-test': files('boot-sector.c'), 'qos-test': [chardev, io, qos_test_ss.apply(config_host, strict: false).sources()], 'tpm-crb-swtpm-test': [io, tpmemu_files], @@ -258,6 +306,16 @@ qtests = { 'vmgenid-test': files('boot-sector.c', 'acpi-utils.c'), } +gvnc = dependency('gvnc-1.0', required: false) +if gvnc.found() + qtests += {'vnc-display-test': [gvnc]} + qtests_generic += [ 'vnc-display-test' ] +endif + +if dbus_display + qtests += {'dbus-display-test': [dbus_display1, gio]} +endif + qtest_executables = {} foreach dir : target_dirs if not dir.endswith('-softmmu') @@ -268,13 +326,13 @@ foreach dir : target_dirs qtest_emulator = emulators['qemu-system-' + target_base] target_qtests = get_variable('qtests_' + target_base, []) + qtests_generic - test_deps = [] + test_deps = roms qtest_env = environment() if have_tools qtest_env.set('QTEST_QEMU_IMG', './qemu-img') test_deps += [qemu_img] endif - qtest_env.set('G_TEST_DBUS_DAEMON', meson.source_root() / 'tests/dbus-vmstate-daemon.sh') + qtest_env.set('G_TEST_DBUS_DAEMON', meson.project_source_root() / 'tests/dbus-vmstate-daemon.sh') qtest_env.set('QTEST_QEMU_BINARY', './qemu-system-' + target_base) if have_tools and have_vhost_user_blk_server qtest_env.set('QTEST_QEMU_STORAGE_DAEMON_BINARY', './storage-daemon/qemu-storage-daemon') @@ -298,10 +356,9 @@ foreach dir : target_dirs test: executable(test, src, dependencies: deps) } endif - # FIXME: missing dependency on the emulator binary and qemu-img test('qtest-@0@/@1@'.format(target_base, test), qtest_executables[test], - depends: [test_deps, qtest_emulator], + depends: [test_deps, qtest_emulator, emulator_modules], env: qtest_env, args: ['--tap', '-k'], protocol: 'tap', diff --git a/tests/qtest/microbit-test.c b/tests/qtest/microbit-test.c index 2b255579df..4bc267020b 100644 --- a/tests/qtest/microbit-test.c +++ b/tests/qtest/microbit-test.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "exec/hwaddr.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "hw/arm/nrf51.h" #include "hw/char/nrf51_uart.h" @@ -51,7 +51,7 @@ static void uart_rw_to_rxd(QTestState *qts, int sock_fd, const char *in, { int i, in_len = strlen(in); - g_assert_true(write(sock_fd, in, in_len) == in_len); + g_assert_true(send(sock_fd, in, in_len, 0) == in_len); for (i = 0; i < in_len; i++) { g_assert_true(uart_wait_for_event(qts, NRF51_UART_BASE + A_UART_RXDRDY)); @@ -77,7 +77,7 @@ static void test_nrf51_uart(void) char s[10]; QTestState *qts = qtest_init_with_serial("-M microbit", &sock_fd); - g_assert_true(write(sock_fd, "c", 1) == 1); + g_assert_true(send(sock_fd, "c", 1, 0) == 1); g_assert_cmphex(qtest_readl(qts, NRF51_UART_BASE + A_UART_RXD), ==, 0x00); qtest_writel(qts, NRF51_UART_BASE + A_UART_ENABLE, 0x04); @@ -97,17 +97,17 @@ static void test_nrf51_uart(void) qtest_writel(qts, NRF51_UART_BASE + A_UART_STARTTX, 0x01); uart_w_to_txd(qts, "d"); - g_assert_true(read(sock_fd, s, 10) == 1); + g_assert_true(recv(sock_fd, s, 10, 0) == 1); g_assert_cmphex(s[0], ==, 'd'); qtest_writel(qts, NRF51_UART_BASE + A_UART_SUSPEND, 0x01); qtest_writel(qts, NRF51_UART_BASE + A_UART_TXD, 'h'); qtest_writel(qts, NRF51_UART_BASE + A_UART_STARTTX, 0x01); uart_w_to_txd(qts, "world"); - g_assert_true(read(sock_fd, s, 10) == 5); + g_assert_true(recv(sock_fd, s, 10, 0) == 5); g_assert_true(memcmp(s, "world", 5) == 0); - close(sock_fd); + closesocket(sock_fd); qtest_quit(qts); } @@ -447,11 +447,11 @@ static void test_nrf51_timer(void) timer_set_bitmode(qts, NRF51_TIMER_WIDTH_16); /* 16 MHz Timer */ timer_set_prescaler(qts, 0); - /* Swept over in first step */ + /* Swept over, during the first step */ timer_set_cc(qts, 0, 2); - /* Barely miss on first step */ + /* Barely miss, after the second step */ timer_set_cc(qts, 1, 162); - /* Spot on on third step */ + /* Spot on, after the third step */ timer_set_cc(qts, 2, 480); timer_assert_events(qts, 0, 0, 0, 0); diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c index 4ee26014b7..f6f3c6680f 100644 --- a/tests/qtest/migration-helpers.c +++ b/tests/qtest/migration-helpers.c @@ -15,6 +15,14 @@ #include "migration-helpers.h" +/* + * Number of seconds we wait when looking for migration + * status changes, to avoid test suite hanging forever + * when things go wrong. Needs to be higher enough to + * avoid false positives on loaded hosts. + */ +#define MIGRATION_STATUS_WAIT_TIMEOUT 120 + bool got_stop; static void check_stop_event(QTestState *who) @@ -26,6 +34,7 @@ static void check_stop_event(QTestState *who) } } +#ifndef _WIN32 /* * Events can get in the way of responses we are actually waiting for. */ @@ -50,6 +59,7 @@ QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...) return ret; } +#endif /* * Events can get in the way of responses we are actually waiting for. @@ -75,6 +85,28 @@ QDict *wait_command(QTestState *who, const char *command, ...) return ret; } +/* + * Execute the qmp command only + */ +QDict *qmp_command(QTestState *who, const char *command, ...) +{ + va_list ap; + QDict *resp, *ret; + + va_start(ap, command); + resp = qtest_vqmp(who, command, ap); + va_end(ap); + + g_assert(!qdict_haskey(resp, "error")); + g_assert(qdict_haskey(resp, "return")); + + ret = qdict_get_qdict(resp, "return"); + qobject_ref(ret); + qobject_unref(resp); + + return ret; +} + /* * Send QMP command "migrate". * Arguments are built from @fmt... (formatted like @@ -107,6 +139,19 @@ QDict *migrate_query(QTestState *who) return wait_command(who, "{ 'execute': 'query-migrate' }"); } +QDict *migrate_query_not_failed(QTestState *who) +{ + const char *status; + QDict *rsp = migrate_query(who); + status = qdict_get_str(rsp, "status"); + if (g_str_equal(status, "failed")) { + g_printerr("query-migrate shows failed migration: %s\n", + qdict_get_str(rsp, "error-desc")); + } + g_assert(!g_str_equal(status, "failed")); + return rsp; +} + /* * Note: caller is responsible to free the returned object via * g_free() after use @@ -153,8 +198,11 @@ static bool check_migration_status(QTestState *who, const char *goal, void wait_for_migration_status(QTestState *who, const char *goal, const char **ungoals) { + g_test_timer_start(); while (!check_migration_status(who, goal, ungoals)) { usleep(1000); + + g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); } } @@ -165,6 +213,7 @@ void wait_for_migration_complete(QTestState *who) void wait_for_migration_fail(QTestState *from, bool allow_active) { + g_test_timer_start(); QDict *rsp_return; char *status; bool failed; @@ -180,6 +229,8 @@ void wait_for_migration_fail(QTestState *from, bool allow_active) g_assert(result); failed = !strcmp(status, "failed"); g_free(status); + + g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); } while (!failed); /* Is the machine currently running? */ diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h index d63bba9630..db0684de48 100644 --- a/tests/qtest/migration-helpers.h +++ b/tests/qtest/migration-helpers.h @@ -9,23 +9,29 @@ * See the COPYING file in the top-level directory. * */ -#ifndef MIGRATION_HELPERS_H_ -#define MIGRATION_HELPERS_H_ -#include "libqos/libqtest.h" +#ifndef MIGRATION_HELPERS_H +#define MIGRATION_HELPERS_H + +#include "libqtest.h" extern bool got_stop; -GCC_FMT_ATTR(3, 4) +#ifndef _WIN32 +G_GNUC_PRINTF(3, 4) QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...); +#endif -GCC_FMT_ATTR(2, 3) +G_GNUC_PRINTF(2, 3) QDict *wait_command(QTestState *who, const char *command, ...); -GCC_FMT_ATTR(3, 4) +QDict *qmp_command(QTestState *who, const char *command, ...); + +G_GNUC_PRINTF(3, 4) void migrate_qmp(QTestState *who, const char *uri, const char *fmt, ...); QDict *migrate_query(QTestState *who); +QDict *migrate_query_not_failed(QTestState *who); void wait_for_migration_status(QTestState *who, const char *goal, const char **ungoals); @@ -34,4 +40,4 @@ void wait_for_migration_complete(QTestState *who); void wait_for_migration_fail(QTestState *from, bool allow_active); -#endif /* MIGRATION_HELPERS_H_ */ +#endif /* MIGRATION_HELPERS_H */ diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index cc5e83d98a..dbde726adf 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "qemu/module.h" @@ -23,9 +23,17 @@ #include "qapi/qapi-visit-sockets.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" +#include "crypto/tlscredspsk.h" +#include "qapi/qmp/qlist.h" #include "migration-helpers.h" #include "tests/migration/migration-test.h" +#ifdef CONFIG_GNUTLS +# include "tests/unit/crypto-tls-psk-helpers.h" +# ifdef CONFIG_TASN1 +# include "tests/unit/crypto-tls-x509-helpers.h" +# endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ /* For dirty ring test; so far only x86_64 is supported */ #if defined(__linux__) && defined(HOST_X86_64) @@ -39,8 +47,11 @@ unsigned start_address; unsigned end_address; static bool uffd_feature_thread_id; -/* A downtime where the test really should converge */ -#define CONVERGE_DOWNTIME 1000 +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ #if defined(__linux__) #include @@ -91,7 +102,7 @@ static bool ufd_version_check(void) #endif -static const char *tmpfs; +static char *tmpfs; /* The boot file modifies memory area in [start_address, end_address) * repeatedly. It outputs a 'B' at a fixed rate while it's still running. @@ -174,7 +185,7 @@ static int64_t read_ram_property_int(QTestState *who, const char *property) QDict *rsp_return, *rsp_ram; int64_t result; - rsp_return = migrate_query(who); + rsp_return = migrate_query_not_failed(who); if (!qdict_haskey(rsp_return, "ram")) { /* Still in setup */ result = 0; @@ -191,7 +202,7 @@ static int64_t read_migrate_property_int(QTestState *who, const char *property) QDict *rsp_return; int64_t result; - rsp_return = migrate_query(who); + rsp_return = migrate_query_not_failed(who); result = qdict_get_try_int(rsp_return, property, 0); qobject_unref(rsp_return); return result; @@ -206,7 +217,7 @@ static void read_blocktime(QTestState *who) { QDict *rsp_return; - rsp_return = migrate_query(who); + rsp_return = migrate_query_not_failed(who); g_assert(qdict_haskey(rsp_return, "postcopy-blocktime")); qobject_unref(rsp_return); } @@ -395,6 +406,20 @@ static void migrate_set_parameter_str(QTestState *who, const char *parameter, migrate_check_parameter_str(who, parameter, value); } +static void migrate_ensure_non_converge(QTestState *who) +{ + /* Can't converge with 1ms downtime + 30 mbs bandwidth limit */ + migrate_set_parameter_int(who, "max-bandwidth", 30 * 1000 * 1000); + migrate_set_parameter_int(who, "downtime-limit", 1); +} + +static void migrate_ensure_converge(QTestState *who) +{ + /* Should converge with 30s downtime + 1 gbs bandwidth limit */ + migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000); + migrate_set_parameter_int(who, "downtime-limit", 30 * 1000); +} + static void migrate_pause(QTestState *who) { QDict *rsp; @@ -474,25 +499,85 @@ typedef struct { bool only_target; /* Use dirty ring if true; dirty logging otherwise */ bool use_dirty_ring; - char *opts_source; - char *opts_target; + const char *opts_source; + const char *opts_target; } MigrateStart; -static MigrateStart *migrate_start_new(void) -{ - MigrateStart *args = g_new0(MigrateStart, 1); +/* + * A hook that runs after the src and dst QEMUs have been + * created, but before the migration is started. This can + * be used to set migration parameters and capabilities. + * + * Returns: NULL, or a pointer to opaque state to be + * later passed to the TestMigrateFinishHook + */ +typedef void * (*TestMigrateStartHook)(QTestState *from, + QTestState *to); - args->opts_source = g_strdup(""); - args->opts_target = g_strdup(""); - return args; -} +/* + * A hook that runs after the migration has finished, + * regardless of whether it succeeded or failed, but + * before QEMU has terminated (unless it self-terminated + * due to migration error) + * + * @opaque is a pointer to state previously returned + * by the TestMigrateStartHook if any, or NULL. + */ +typedef void (*TestMigrateFinishHook)(QTestState *from, + QTestState *to, + void *opaque); -static void migrate_start_destroy(MigrateStart *args) -{ - g_free(args->opts_source); - g_free(args->opts_target); - g_free(args); -} +typedef struct { + /* Optional: fine tune start parameters */ + MigrateStart start; + + /* Required: the URI for the dst QEMU to listen on */ + const char *listen_uri; + + /* + * Optional: the URI for the src QEMU to connect to + * If NULL, then it will query the dst QEMU for its actual + * listening address and use that as the connect address. + * This allows for dynamically picking a free TCP port. + */ + const char *connect_uri; + + /* Optional: callback to run at start to set migration parameters */ + TestMigrateStartHook start_hook; + /* Optional: callback to run at finish to cleanup */ + TestMigrateFinishHook finish_hook; + + /* + * Optional: normally we expect the migration process to complete. + * + * There can be a variety of reasons and stages in which failure + * can happen during tests. + * + * If a failure is expected to happen at time of establishing + * the connection, then MIG_TEST_FAIL will indicate that the dst + * QEMU is expected to stay running and accept future migration + * connections. + * + * If a failure is expected to happen while processing the + * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate + * that the dst QEMU is expected to quit with non-zero exit status + */ + enum { + /* This test should succeed, the default */ + MIG_TEST_SUCCEED = 0, + /* This test should fail, dest qemu should keep alive */ + MIG_TEST_FAIL, + /* This test should fail, dest qemu should fail with abnormal status */ + MIG_TEST_FAIL_DEST_QUIT_ERR, + } result; + + /* Optional: set number of migration passes to wait for */ + unsigned int iterations; + + /* Postcopy specific fields */ + void *postcopy_data; + bool postcopy_preempt; +} MigrateCommon; static int test_migrate_start(QTestState **from, QTestState **to, const char *uri, MigrateStart *args) @@ -508,13 +593,11 @@ static int test_migrate_start(QTestState **from, QTestState **to, const char *arch = qtest_get_arch(); const char *machine_opts = NULL; const char *memory_size; - int ret = 0; if (args->use_shmem) { if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { g_test_skip("/dev/shm is not supported"); - ret = -1; - goto out; + return -1; } } @@ -564,7 +647,16 @@ static int test_migrate_start(QTestState **from, QTestState **to, } if (!getenv("QTEST_LOG") && args->hide_stderr) { +#ifndef _WIN32 ignore_stderr = "2>/dev/null"; +#else + /* + * On Windows the QEMU executable is created via CreateProcess() and + * IO redirection does not work, so don't bother adding IO redirection + * to the command line. + */ + ignore_stderr = ""; +#endif } else { ignore_stderr = ""; } @@ -590,7 +682,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, machine_opts ? " -machine " : "", machine_opts ? machine_opts : "", memory_size, tmpfs, - arch_source, shmem_opts, args->opts_source, + arch_source, shmem_opts, + args->opts_source ? args->opts_source : "", ignore_stderr); if (!args->only_target) { *from = qtest_init(cmd_source); @@ -608,7 +701,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, machine_opts ? machine_opts : "", memory_size, tmpfs, uri, arch_target, shmem_opts, - args->opts_target, ignore_stderr); + args->opts_target ? args->opts_target : "", + ignore_stderr); *to = qtest_init(cmd_target); /* @@ -619,9 +713,7 @@ static int test_migrate_start(QTestState **from, QTestState **to, unlink(shmem_path); } -out: - migrate_start_destroy(args); - return ret; + return 0; } static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest) @@ -658,27 +750,373 @@ static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest) cleanup("dest_serial"); } +#ifdef CONFIG_GNUTLS +struct TestMigrateTLSPSKData { + char *workdir; + char *workdiralt; + char *pskfile; + char *pskfilealt; +}; + +static void * +test_migrate_tls_psk_start_common(QTestState *from, + QTestState *to, + bool mismatch) +{ + struct TestMigrateTLSPSKData *data = + g_new0(struct TestMigrateTLSPSKData, 1); + QDict *rsp; + + data->workdir = g_strdup_printf("%s/tlscredspsk0", tmpfs); + data->pskfile = g_strdup_printf("%s/%s", data->workdir, + QCRYPTO_TLS_CREDS_PSKFILE); + g_mkdir_with_parents(data->workdir, 0700); + test_tls_psk_init(data->pskfile); + + if (mismatch) { + data->workdiralt = g_strdup_printf("%s/tlscredspskalt0", tmpfs); + data->pskfilealt = g_strdup_printf("%s/%s", data->workdiralt, + QCRYPTO_TLS_CREDS_PSKFILE); + g_mkdir_with_parents(data->workdiralt, 0700); + test_tls_psk_init_alt(data->pskfilealt); + } + + rsp = wait_command(from, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-psk'," + " 'id': 'tlscredspsk0'," + " 'endpoint': 'client'," + " 'dir': %s," + " 'username': 'qemu'} }", + data->workdir); + qobject_unref(rsp); + + rsp = wait_command(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-psk'," + " 'id': 'tlscredspsk0'," + " 'endpoint': 'server'," + " 'dir': %s } }", + mismatch ? data->workdiralt : data->workdir); + qobject_unref(rsp); + + migrate_set_parameter_str(from, "tls-creds", "tlscredspsk0"); + migrate_set_parameter_str(to, "tls-creds", "tlscredspsk0"); + + return data; +} + +static void * +test_migrate_tls_psk_start_match(QTestState *from, + QTestState *to) +{ + return test_migrate_tls_psk_start_common(from, to, false); +} + +static void * +test_migrate_tls_psk_start_mismatch(QTestState *from, + QTestState *to) +{ + return test_migrate_tls_psk_start_common(from, to, true); +} + +static void +test_migrate_tls_psk_finish(QTestState *from, + QTestState *to, + void *opaque) +{ + struct TestMigrateTLSPSKData *data = opaque; + + test_tls_psk_cleanup(data->pskfile); + if (data->pskfilealt) { + test_tls_psk_cleanup(data->pskfilealt); + } + rmdir(data->workdir); + if (data->workdiralt) { + rmdir(data->workdiralt); + } + + g_free(data->workdiralt); + g_free(data->pskfilealt); + g_free(data->workdir); + g_free(data->pskfile); + g_free(data); +} + +#ifdef CONFIG_TASN1 +typedef struct { + char *workdir; + char *keyfile; + char *cacert; + char *servercert; + char *serverkey; + char *clientcert; + char *clientkey; +} TestMigrateTLSX509Data; + +typedef struct { + bool verifyclient; + bool clientcert; + bool hostileclient; + bool authzclient; + const char *certhostname; + const char *certipaddr; +} TestMigrateTLSX509; + +static void * +test_migrate_tls_x509_start_common(QTestState *from, + QTestState *to, + TestMigrateTLSX509 *args) +{ + TestMigrateTLSX509Data *data = g_new0(TestMigrateTLSX509Data, 1); + QDict *rsp; + + data->workdir = g_strdup_printf("%s/tlscredsx5090", tmpfs); + data->keyfile = g_strdup_printf("%s/key.pem", data->workdir); + + data->cacert = g_strdup_printf("%s/ca-cert.pem", data->workdir); + data->serverkey = g_strdup_printf("%s/server-key.pem", data->workdir); + data->servercert = g_strdup_printf("%s/server-cert.pem", data->workdir); + if (args->clientcert) { + data->clientkey = g_strdup_printf("%s/client-key.pem", data->workdir); + data->clientcert = g_strdup_printf("%s/client-cert.pem", data->workdir); + } + + g_mkdir_with_parents(data->workdir, 0700); + + test_tls_init(data->keyfile); +#ifndef _WIN32 + g_assert(link(data->keyfile, data->serverkey) == 0); +#else + g_assert(CreateHardLink(data->serverkey, data->keyfile, NULL) != 0); +#endif + if (args->clientcert) { +#ifndef _WIN32 + g_assert(link(data->keyfile, data->clientkey) == 0); +#else + g_assert(CreateHardLink(data->clientkey, data->keyfile, NULL) != 0); +#endif + } + + TLS_ROOT_REQ_SIMPLE(cacertreq, data->cacert); + if (args->clientcert) { + TLS_CERT_REQ_SIMPLE_CLIENT(servercertreq, cacertreq, + args->hostileclient ? + QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME : + QCRYPTO_TLS_TEST_CLIENT_NAME, + data->clientcert); + } + + TLS_CERT_REQ_SIMPLE_SERVER(clientcertreq, cacertreq, + data->servercert, + args->certhostname, + args->certipaddr); + + rsp = wait_command(from, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-x509'," + " 'id': 'tlscredsx509client0'," + " 'endpoint': 'client'," + " 'dir': %s," + " 'sanity-check': true," + " 'verify-peer': true} }", + data->workdir); + qobject_unref(rsp); + migrate_set_parameter_str(from, "tls-creds", "tlscredsx509client0"); + if (args->certhostname) { + migrate_set_parameter_str(from, "tls-hostname", args->certhostname); + } + + rsp = wait_command(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-x509'," + " 'id': 'tlscredsx509server0'," + " 'endpoint': 'server'," + " 'dir': %s," + " 'sanity-check': true," + " 'verify-peer': %i} }", + data->workdir, args->verifyclient); + qobject_unref(rsp); + migrate_set_parameter_str(to, "tls-creds", "tlscredsx509server0"); + + if (args->authzclient) { + rsp = wait_command(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'authz-simple'," + " 'id': 'tlsauthz0'," + " 'identity': %s} }", + "CN=" QCRYPTO_TLS_TEST_CLIENT_NAME); + migrate_set_parameter_str(to, "tls-authz", "tlsauthz0"); + } + + return data; +} + +/* + * The normal case: match server's cert hostname against + * whatever host we were telling QEMU to connect to (if any) + */ +static void * +test_migrate_tls_x509_start_default_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certipaddr = "127.0.0.1" + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +/* + * The unusual case: the server's cert is different from + * the address we're telling QEMU to connect to (if any), + * so we must give QEMU an explicit hostname to validate + */ +static void * +test_migrate_tls_x509_start_override_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certhostname = "qemu.org", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +/* + * The unusual case: the server's cert is different from + * the address we're telling QEMU to connect to, and so we + * expect the client to reject the server + */ +static void * +test_migrate_tls_x509_start_mismatch_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certipaddr = "10.0.0.1", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +static void * +test_migrate_tls_x509_start_friendly_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .authzclient = true, + .certipaddr = "127.0.0.1", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +static void * +test_migrate_tls_x509_start_hostile_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .hostileclient = true, + .authzclient = true, + .certipaddr = "127.0.0.1", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +/* + * The case with no client certificate presented, + * and no server verification + */ +static void * +test_migrate_tls_x509_start_allow_anon_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .certipaddr = "127.0.0.1", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +/* + * The case with no client certificate presented, + * and server verification rejecting + */ +static void * +test_migrate_tls_x509_start_reject_anon_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .certipaddr = "127.0.0.1", + }; + return test_migrate_tls_x509_start_common(from, to, &args); +} + +static void +test_migrate_tls_x509_finish(QTestState *from, + QTestState *to, + void *opaque) +{ + TestMigrateTLSX509Data *data = opaque; + + test_tls_cleanup(data->keyfile); + g_free(data->keyfile); + + unlink(data->cacert); + g_free(data->cacert); + unlink(data->servercert); + g_free(data->servercert); + unlink(data->serverkey); + g_free(data->serverkey); + + if (data->clientcert) { + unlink(data->clientcert); + g_free(data->clientcert); + } + if (data->clientkey) { + unlink(data->clientkey); + g_free(data->clientkey); + } + + rmdir(data->workdir); + g_free(data->workdir); + + g_free(data); +} +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + static int migrate_postcopy_prepare(QTestState **from_ptr, QTestState **to_ptr, - MigrateStart *args) + MigrateCommon *args) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); QTestState *from, *to; - if (test_migrate_start(&from, &to, uri, args)) { + if (test_migrate_start(&from, &to, uri, &args->start)) { return -1; } + if (args->start_hook) { + args->postcopy_data = args->start_hook(from, to); + } + migrate_set_capability(from, "postcopy-ram", true); migrate_set_capability(to, "postcopy-ram", true); migrate_set_capability(to, "postcopy-blocktime", true); - /* We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - migrate_set_parameter_int(from, "max-bandwidth", 30000000); - migrate_set_parameter_int(from, "downtime-limit", 1); + if (args->postcopy_preempt) { + migrate_set_capability(from, "postcopy-preempt", true); + migrate_set_capability(to, "postcopy-preempt", true); + } + + migrate_ensure_non_converge(from); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); @@ -693,7 +1131,8 @@ static int migrate_postcopy_prepare(QTestState **from_ptr, return 0; } -static void migrate_postcopy_complete(QTestState *from, QTestState *to) +static void migrate_postcopy_complete(QTestState *from, QTestState *to, + MigrateCommon *args) { wait_for_migration_complete(from); @@ -704,28 +1143,71 @@ static void migrate_postcopy_complete(QTestState *from, QTestState *to) read_blocktime(to); } + if (args->finish_hook) { + args->finish_hook(from, to, args->postcopy_data); + args->postcopy_data = NULL; + } + test_migrate_end(from, to, true); } -static void test_postcopy(void) +static void test_postcopy_common(MigrateCommon *args) { - MigrateStart *args = migrate_start_new(); QTestState *from, *to; if (migrate_postcopy_prepare(&from, &to, args)) { return; } migrate_postcopy_start(from, to); - migrate_postcopy_complete(from, to); + migrate_postcopy_complete(from, to, args); } -static void test_postcopy_recovery(void) +static void test_postcopy(void) +{ + MigrateCommon args = { }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + }; + + test_postcopy_common(&args); +} + +#ifdef CONFIG_GNUTLS +static void test_postcopy_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt_tls_psk(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_common(&args); +} +#endif + +static void test_postcopy_recovery_common(MigrateCommon *args) { - MigrateStart *args = migrate_start_new(); QTestState *from, *to; g_autofree char *uri = NULL; - args->hide_stderr = true; + /* Always hide errors for postcopy recover tests since they're expected */ + args->start.hide_stderr = true; if (migrate_postcopy_prepare(&from, &to, args)) { return; @@ -778,17 +1260,59 @@ static void test_postcopy_recovery(void) /* Restore the postcopy bandwidth to unlimited */ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0); - migrate_postcopy_complete(from, to); + migrate_postcopy_complete(from, to, args); } +static void test_postcopy_recovery(void) +{ + MigrateCommon args = { }; + + test_postcopy_recovery_common(&args); +} + +#ifdef CONFIG_GNUTLS +static void test_postcopy_recovery_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_recovery_common(&args); +} +#endif + +static void test_postcopy_preempt_recovery(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + }; + + test_postcopy_recovery_common(&args); +} + +#ifdef CONFIG_GNUTLS +/* This contains preempt+recovery+tls test altogether */ +static void test_postcopy_preempt_all(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_recovery_common(&args); +} +#endif + static void test_baddest(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .hide_stderr = true + }; QTestState *from, *to; - args->hide_stderr = true; - - if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", args)) { + if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) { return; } migrate_qmp(from, "tcp:127.0.0.1:0", "{}"); @@ -796,60 +1320,146 @@ static void test_baddest(void) test_migrate_end(from, to, false); } -static void test_precopy_unix_common(bool dirty_ring) +static void test_precopy_common(MigrateCommon *args) { - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateStart *args = migrate_start_new(); QTestState *from, *to; + void *data_hook = NULL; - args->use_dirty_ring = dirty_ring; - - if (test_migrate_start(&from, &to, uri, args)) { + if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) { return; } - /* We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge*/ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); + migrate_ensure_non_converge(from); - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - migrate_qmp(from, uri, "{}"); - - wait_for_migration_pass(from); - - migrate_set_parameter_int(from, "downtime-limit", CONVERGE_DOWNTIME); - - if (!got_stop) { - qtest_qmp_eventwait(from, "STOP"); + if (args->start_hook) { + data_hook = args->start_hook(from, to); } - qtest_qmp_eventwait(to, "RESUME"); + /* Wait for the first serial output from the source */ + if (args->result == MIG_TEST_SUCCEED) { + wait_for_serial("src_serial"); + } - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); + if (!args->connect_uri) { + g_autofree char *local_connect_uri = + migrate_get_socket_address(to, "socket-address"); + migrate_qmp(from, local_connect_uri, "{}"); + } else { + migrate_qmp(from, args->connect_uri, "{}"); + } - test_migrate_end(from, to, true); + + if (args->result != MIG_TEST_SUCCEED) { + bool allow_active = args->result == MIG_TEST_FAIL; + wait_for_migration_fail(from, allow_active); + + if (args->result == MIG_TEST_FAIL_DEST_QUIT_ERR) { + qtest_set_expected_status(to, EXIT_FAILURE); + } + } else { + if (args->iterations) { + while (args->iterations--) { + wait_for_migration_pass(from); + } + } else { + wait_for_migration_pass(from); + } + + migrate_ensure_converge(from); + + /* We do this first, as it has a timeout to stop us + * hanging forever if migration didn't converge */ + wait_for_migration_complete(from); + + if (!got_stop) { + qtest_qmp_eventwait(from, "STOP"); + } + + qtest_qmp_eventwait(to, "RESUME"); + + wait_for_serial("dest_serial"); + } + + if (args->finish_hook) { + args->finish_hook(from, to, data_hook); + } + + test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED); } -static void test_precopy_unix(void) +static void test_precopy_unix_plain(void) { - /* Using default dirty logging */ - test_precopy_unix_common(false); + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + }; + + test_precopy_common(&args); } + static void test_precopy_unix_dirty_ring(void) { - /* Using dirty ring tracking */ - test_precopy_unix_common(true); + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .start = { + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + }; + + test_precopy_common(&args); } +#ifdef CONFIG_GNUTLS +static void test_precopy_unix_tls_psk(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_precopy_unix_tls_x509_default_host(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .connect_uri = uri, + .listen_uri = uri, + .start_hook = test_migrate_tls_x509_start_default_host, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL_DEST_QUIT_ERR, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_tls_x509_override_host(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = test_migrate_tls_x509_start_override_host, + .finish_hook = test_migrate_tls_x509_finish, + }; + + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + #if 0 /* Currently upset on aarch64 TCG */ static void test_ignore_shared(void) @@ -887,126 +1497,168 @@ static void test_ignore_shared(void) } #endif -static void test_xbzrle(const char *uri) +static void * +test_migrate_xbzrle_start(QTestState *from, + QTestState *to) { - MigrateStart *args = migrate_start_new(); - QTestState *from, *to; - - if (test_migrate_start(&from, &to, uri, args)) { - return; - } - - /* - * We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge*/ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); - migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432); migrate_set_capability(from, "xbzrle", true); migrate_set_capability(to, "xbzrle", true); - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - migrate_qmp(from, uri, "{}"); - - wait_for_migration_pass(from); - /* Make sure we have 2 passes, so the xbzrle cache gets a workout */ - wait_for_migration_pass(from); - - /* 1000ms should converge */ - migrate_set_parameter_int(from, "downtime-limit", 1000); - - if (!got_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - - test_migrate_end(from, to, true); + return NULL; } -static void test_xbzrle_unix(void) +static void test_precopy_unix_xbzrle(void) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, - test_xbzrle(uri); + .start_hook = test_migrate_xbzrle_start, + + .iterations = 2, + }; + + test_precopy_common(&args); } -static void test_precopy_tcp(void) +static void test_precopy_tcp_plain(void) { - MigrateStart *args = migrate_start_new(); - g_autofree char *uri = NULL; - QTestState *from, *to; + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + }; - if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", args)) { - return; - } - - /* - * We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge*/ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - uri = migrate_get_socket_address(to, "socket-address"); - - migrate_qmp(from, uri, "{}"); - - wait_for_migration_pass(from); - - migrate_set_parameter_int(from, "downtime-limit", CONVERGE_DOWNTIME); - - if (!got_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - - test_migrate_end(from, to, true); + test_precopy_common(&args); } -static void test_migrate_fd_proto(void) +#ifdef CONFIG_GNUTLS +static void test_precopy_tcp_tls_psk_match(void) { - MigrateStart *args = migrate_start_new(); - QTestState *from, *to; + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_psk_mismatch(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_psk_start_mismatch, + .finish_hook = test_migrate_tls_psk_finish, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_precopy_tcp_tls_x509_default_host(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_default_host, + .finish_hook = test_migrate_tls_x509_finish, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_override_host(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_override_host, + .finish_hook = test_migrate_tls_x509_finish, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_mismatch_host(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_mismatch_host, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL_DEST_QUIT_ERR, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_friendly_client(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_friendly_client, + .finish_hook = test_migrate_tls_x509_finish, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_hostile_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_hostile_client, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_allow_anon_client(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_allow_anon_client, + .finish_hook = test_migrate_tls_x509_finish, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_reject_anon_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = test_migrate_tls_x509_start_reject_anon_client, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + +#ifndef _WIN32 +static void *test_migrate_fd_start_hook(QTestState *from, + QTestState *to) +{ + QDict *rsp; int ret; int pair[2]; - QDict *rsp; - const char *error_desc; - - if (test_migrate_start(&from, &to, "defer", args)) { - return; - } - - /* - * We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge */ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); /* Create two connected sockets for migration */ ret = socketpair(PF_LOCAL, SOCK_STREAM, 0, pair); @@ -1031,17 +1683,15 @@ static void test_migrate_fd_proto(void) qobject_unref(rsp); close(pair[1]); - /* Start migration to the 2nd socket*/ - migrate_qmp(from, "fd:fd-mig", "{}"); + return NULL; +} - wait_for_migration_pass(from); - - migrate_set_parameter_int(from, "downtime-limit", CONVERGE_DOWNTIME); - - if (!got_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - qtest_qmp_eventwait(to, "RESUME"); +static void test_migrate_fd_finish_hook(QTestState *from, + QTestState *to, + void *opaque) +{ + QDict *rsp; + const char *error_desc; /* Test closing fds */ /* We assume, that QEMU removes named fd from its list, @@ -1059,13 +1709,20 @@ static void test_migrate_fd_proto(void) error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc"); g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found"); qobject_unref(rsp); - - /* Complete migration */ - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - test_migrate_end(from, to, true); } +static void test_migrate_fd_proto(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .connect_uri = "fd:fd-mig", + .start_hook = test_migrate_fd_start_hook, + .finish_hook = test_migrate_fd_finish_hook + }; + test_precopy_common(&args); +} +#endif /* _WIN32 */ + static void do_test_validate_uuid(MigrateStart *args, bool should_fail) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); @@ -1089,7 +1746,7 @@ static void do_test_validate_uuid(MigrateStart *args, bool should_fail) migrate_qmp(from, uri, "{}"); if (should_fail) { - qtest_set_expected_status(to, 1); + qtest_set_expected_status(to, EXIT_FAILURE); wait_for_migration_fail(from, true); } else { wait_for_migration_complete(from); @@ -1100,53 +1757,51 @@ static void do_test_validate_uuid(MigrateStart *args, bool should_fail) static void test_validate_uuid(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .opts_target = "-uuid 11111111-1111-1111-1111-111111111111", + }; - g_free(args->opts_source); - g_free(args->opts_target); - args->opts_source = g_strdup("-uuid 11111111-1111-1111-1111-111111111111"); - args->opts_target = g_strdup("-uuid 11111111-1111-1111-1111-111111111111"); - do_test_validate_uuid(args, false); + do_test_validate_uuid(&args, false); } static void test_validate_uuid_error(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", + .hide_stderr = true, + }; - g_free(args->opts_source); - g_free(args->opts_target); - args->opts_source = g_strdup("-uuid 11111111-1111-1111-1111-111111111111"); - args->opts_target = g_strdup("-uuid 22222222-2222-2222-2222-222222222222"); - args->hide_stderr = true; - do_test_validate_uuid(args, true); + do_test_validate_uuid(&args, true); } static void test_validate_uuid_src_not_set(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", + .hide_stderr = true, + }; - g_free(args->opts_target); - args->opts_target = g_strdup("-uuid 22222222-2222-2222-2222-222222222222"); - args->hide_stderr = true; - do_test_validate_uuid(args, false); + do_test_validate_uuid(&args, false); } static void test_validate_uuid_dst_not_set(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .hide_stderr = true, + }; - g_free(args->opts_source); - args->opts_source = g_strdup("-uuid 11111111-1111-1111-1111-111111111111"); - args->hide_stderr = true; - do_test_validate_uuid(args, false); + do_test_validate_uuid(&args, false); } static void test_migrate_auto_converge(void) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateStart *args = migrate_start_new(); + MigrateStart args = {}; QTestState *from, *to; - int64_t remaining, percentage; + int64_t percentage; /* * We want the test to be stable and as fast as possible. @@ -1154,16 +1809,8 @@ static void test_migrate_auto_converge(void) * so we need to decrease a bandwidth. */ const int64_t init_pct = 5, inc_pct = 50, max_pct = 95; - const int64_t max_bandwidth = 400000000; /* ~400Mb/s */ - const int64_t downtime_limit = 250; /* 250ms */ - /* - * We migrate through unix-socket (> 500Mb/s). - * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s). - * So, we can predict expected_threshold - */ - const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000; - if (test_migrate_start(&from, &to, uri, args)) { + if (test_migrate_start(&from, &to, uri, &args)) { return; } @@ -1176,8 +1823,7 @@ static void test_migrate_auto_converge(void) * Set the initial parameters so that the migration could not converge * without throttling. */ - migrate_set_parameter_int(from, "downtime-limit", 1); - migrate_set_parameter_int(from, "max-bandwidth", 100000000); /* ~100Mb/s */ + migrate_ensure_non_converge(from); /* To check remaining size after precopy */ migrate_set_capability(from, "pause-before-switchover", true); @@ -1197,8 +1843,7 @@ static void test_migrate_auto_converge(void) /* The first percentage of throttling should be equal to init_pct */ g_assert_cmpint(percentage, ==, init_pct); /* Now, when we tested that throttling works, let it converge */ - migrate_set_parameter_int(from, "downtime-limit", downtime_limit); - migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth); + migrate_ensure_converge(from); /* * Wait for pre-switchover status to check last throttle percentage @@ -1209,11 +1854,6 @@ static void test_migrate_auto_converge(void) /* The final percentage of throttling shouldn't be greater than max_pct */ percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); g_assert_cmpint(percentage, <=, max_pct); - - remaining = read_ram_property_int(from, "remaining"); - g_assert_cmpint(remaining, <, - (expected_threshold + expected_threshold / 100)); - migrate_continue(from, "pre-switchover"); qtest_qmp_eventwait(to, "RESUME"); @@ -1221,30 +1861,15 @@ static void test_migrate_auto_converge(void) wait_for_serial("dest_serial"); wait_for_migration_complete(from); - test_migrate_end(from, to, true); } -static void test_multifd_tcp(const char *method) +static void * +test_migrate_precopy_tcp_multifd_start_common(QTestState *from, + QTestState *to, + const char *method) { - MigrateStart *args = migrate_start_new(); - QTestState *from, *to; QDict *rsp; - g_autofree char *uri = NULL; - - if (test_migrate_start(&from, &to, "defer", args)) { - return; - } - - /* - * We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge*/ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); migrate_set_parameter_int(from, "multifd-channels", 16); migrate_set_parameter_int(to, "multifd-channels", 16); @@ -1260,44 +1885,218 @@ static void test_multifd_tcp(const char *method) " 'arguments': { 'uri': 'tcp:127.0.0.1:0' }}"); qobject_unref(rsp); - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - uri = migrate_get_socket_address(to, "socket-address"); - - migrate_qmp(from, uri, "{}"); - - wait_for_migration_pass(from); - - migrate_set_parameter_int(from, "downtime-limit", CONVERGE_DOWNTIME); - - if (!got_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - test_migrate_end(from, to, true); + return NULL; } +static void * +test_migrate_precopy_tcp_multifd_start(QTestState *from, + QTestState *to) +{ + return test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); +} + +static void * +test_migrate_precopy_tcp_multifd_zlib_start(QTestState *from, + QTestState *to) +{ + return test_migrate_precopy_tcp_multifd_start_common(from, to, "zlib"); +} + +#ifdef CONFIG_ZSTD +static void * +test_migrate_precopy_tcp_multifd_zstd_start(QTestState *from, + QTestState *to) +{ + return test_migrate_precopy_tcp_multifd_start_common(from, to, "zstd"); +} +#endif /* CONFIG_ZSTD */ + static void test_multifd_tcp_none(void) { - test_multifd_tcp("none"); + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_precopy_tcp_multifd_start, + }; + test_precopy_common(&args); } static void test_multifd_tcp_zlib(void) { - test_multifd_tcp("zlib"); + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_precopy_tcp_multifd_zlib_start, + }; + test_precopy_common(&args); } #ifdef CONFIG_ZSTD static void test_multifd_tcp_zstd(void) { - test_multifd_tcp("zstd"); + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_precopy_tcp_multifd_zstd_start, + }; + test_precopy_common(&args); } #endif +#ifdef CONFIG_GNUTLS +static void * +test_migrate_multifd_tcp_tls_psk_start_match(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_psk_start_match(from, to); +} + +static void * +test_migrate_multifd_tcp_tls_psk_start_mismatch(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_psk_start_mismatch(from, to); +} + +#ifdef CONFIG_TASN1 +static void * +test_migrate_multifd_tls_x509_start_default_host(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_x509_start_default_host(from, to); +} + +static void * +test_migrate_multifd_tls_x509_start_override_host(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_x509_start_override_host(from, to); +} + +static void * +test_migrate_multifd_tls_x509_start_mismatch_host(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_x509_start_mismatch_host(from, to); +} + +static void * +test_migrate_multifd_tls_x509_start_allow_anon_client(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_x509_start_allow_anon_client(from, to); +} + +static void * +test_migrate_multifd_tls_x509_start_reject_anon_client(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + return test_migrate_tls_x509_start_reject_anon_client(from, to); +} +#endif /* CONFIG_TASN1 */ + +static void test_multifd_tcp_tls_psk_match(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tcp_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_psk_mismatch(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tcp_tls_psk_start_mismatch, + .finish_hook = test_migrate_tls_psk_finish, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_multifd_tcp_tls_x509_default_host(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tls_x509_start_default_host, + .finish_hook = test_migrate_tls_x509_finish, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_override_host(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tls_x509_start_override_host, + .finish_hook = test_migrate_tls_x509_finish, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_mismatch_host(void) +{ + /* + * This has different behaviour to the non-multifd case. + * + * In non-multifd case when client aborts due to mismatched + * cert host, the server has already started trying to load + * migration state, and so it exits with I/O failure. + * + * In multifd case when client aborts due to mismatched + * cert host, the server is still waiting for the other + * multifd connections to arrive so hasn't started trying + * to load migration state, and thus just aborts the migration + * without exiting. + */ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tls_x509_start_mismatch_host, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_allow_anon_client(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tls_x509_start_allow_anon_client, + .finish_hook = test_migrate_tls_x509_finish, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_reject_anon_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "defer", + .start_hook = test_migrate_multifd_tls_x509_start_reject_anon_client, + .finish_hook = test_migrate_tls_x509_finish, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + /* * This test does: * source target @@ -1311,26 +2110,18 @@ static void test_multifd_tcp_zstd(void) */ static void test_multifd_tcp_cancel(void) { - MigrateStart *args = migrate_start_new(); + MigrateStart args = { + .hide_stderr = true, + }; QTestState *from, *to, *to2; QDict *rsp; g_autofree char *uri = NULL; - args->hide_stderr = true; - - if (test_migrate_start(&from, &to, "defer", args)) { + if (test_migrate_start(&from, &to, "defer", &args)) { return; } - /* - * We want to pick a speed slow enough that the test completes - * quickly, but that it doesn't complete precopy even on a slow - * machine, so also set the downtime. - */ - /* 1 ms should make it not converge*/ - migrate_set_parameter_int(from, "downtime-limit", 1); - /* 300MB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 30000000); + migrate_ensure_non_converge(from); migrate_set_parameter_int(from, "multifd-channels", 16); migrate_set_parameter_int(to, "multifd-channels", 16); @@ -1354,10 +2145,15 @@ static void test_multifd_tcp_cancel(void) migrate_cancel(from); - args = migrate_start_new(); - args->only_target = true; + /* Make sure QEMU process "to" exited */ + qtest_set_expected_status(to, EXIT_FAILURE); + qtest_wait_qemu(to); - if (test_migrate_start(&from, &to2, "defer", args)) { + args = (MigrateStart){ + .only_target = true, + }; + + if (test_migrate_start(&from, &to2, "defer", &args)) { return; } @@ -1375,10 +2171,7 @@ static void test_multifd_tcp_cancel(void) wait_for_migration_status(from, "cancelled", NULL); - /* 300ms it should converge */ - migrate_set_parameter_int(from, "downtime-limit", 300); - /* 1GB/s */ - migrate_set_parameter_int(from, "max-bandwidth", 1000000000); + migrate_ensure_converge(from); migrate_qmp(from, uri, "{}"); @@ -1394,6 +2187,253 @@ static void test_multifd_tcp_cancel(void) test_migrate_end(from, to2, true); } +static void calc_dirty_rate(QTestState *who, uint64_t calc_time) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'calc-dirty-rate'," + "'arguments': { " + "'calc-time': %" PRIu64 "," + "'mode': 'dirty-ring' }}", + calc_time)); +} + +static QDict *query_dirty_rate(QTestState *who) +{ + return qmp_command(who, "{ 'execute': 'query-dirty-rate' }"); +} + +static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'set-vcpu-dirty-limit'," + "'arguments': { " + "'dirty-rate': %" PRIu64 " } }", + dirtyrate)); +} + +static void cancel_vcpu_dirty_limit(QTestState *who) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'cancel-vcpu-dirty-limit' }")); +} + +static QDict *query_vcpu_dirty_limit(QTestState *who) +{ + QDict *rsp; + + rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }"); + g_assert(!qdict_haskey(rsp, "error")); + g_assert(qdict_haskey(rsp, "return")); + + return rsp; +} + +static bool calc_dirtyrate_ready(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + + return g_strcmp0(status, "measuring"); +} + +static void wait_for_calc_dirtyrate_complete(QTestState *who, + int64_t time_s) +{ + int max_try_count = 10000; + usleep(time_s * 1000000); + + while (!calc_dirtyrate_ready(who) && max_try_count--) { + usleep(1000); + } + + /* + * Set the timeout with 10 s(max_try_count * 1000us), + * if dirtyrate measurement not complete, fail test. + */ + g_assert_cmpint(max_try_count, !=, 0); +} + +static int64_t get_dirty_rate(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + g_assert_cmpstr(status, ==, "measured"); + + rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static int64_t get_limit_rate(QTestState *who) +{ + QDict *rsp_return; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_vcpu_dirty_limit(who); + g_assert(rsp_return); + + rates = qdict_get_qlist(rsp_return, "return"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "limit-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static QTestState *dirtylimit_start_vm(void) +{ + QTestState *vm = NULL; + g_autofree gchar *cmd = NULL; + const char *arch = qtest_get_arch(); + g_autofree char *bootpath = NULL; + + assert((strcmp(arch, "x86_64") == 0)); + bootpath = g_strdup_printf("%s/bootsect", tmpfs); + assert(sizeof(x86_bootsect) == 512); + init_bootfile(bootpath, x86_bootsect, sizeof(x86_bootsect)); + + cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " + "-name dirtylimit-test,debug-threads=on " + "-m 150M -smp 1 " + "-serial file:%s/vm_serial " + "-drive file=%s,format=raw ", + tmpfs, bootpath); + + vm = qtest_init(cmd); + return vm; +} + +static void dirtylimit_stop_vm(QTestState *vm) +{ + qtest_quit(vm); + cleanup("bootsect"); + cleanup("vm_serial"); +} + +static void test_vcpu_dirty_limit(void) +{ + QTestState *vm; + int64_t origin_rate; + int64_t quota_rate; + int64_t rate ; + int max_try_count = 20; + int hit = 0; + + /* Start vm for vcpu dirtylimit test */ + vm = dirtylimit_start_vm(); + + /* Wait for the first serial output from the vm*/ + wait_for_serial("vm_serial"); + + /* Do dirtyrate measurement with calc time equals 1s */ + calc_dirty_rate(vm, 1); + + /* Sleep calc time and wait for calc dirtyrate complete */ + wait_for_calc_dirtyrate_complete(vm, 1); + + /* Query original dirty page rate */ + origin_rate = get_dirty_rate(vm); + + /* VM booted from bootsect should dirty memory steadily */ + assert(origin_rate != 0); + + /* Setup quota dirty page rate at half of origin */ + quota_rate = origin_rate / 2; + + /* Set dirtylimit */ + dirtylimit_set_all(vm, quota_rate); + + /* + * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit + * works literally + */ + g_assert_cmpint(quota_rate, ==, get_limit_rate(vm)); + + /* Sleep a bit to check if it take effect */ + usleep(2000000); + + /* + * Check if dirtylimit take effect realistically, set the + * timeout with 20 s(max_try_count * 1s), if dirtylimit + * doesn't take effect, fail test. + */ + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume hitting if current rate is less + * than quota rate (within accepting error) + */ + if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + + hit = 0; + max_try_count = 20; + + /* Check if dirtylimit cancellation take effect */ + cancel_vcpu_dirty_limit(vm); + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume dirtylimit be canceled if current rate is + * greater than quota rate (within accepting error) + */ + if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + dirtylimit_stop_vm(vm); +} + static bool kvm_dirty_ring_supported(void) { #if defined(__linux__) && defined(HOST_X86_64) @@ -1419,23 +2459,21 @@ static bool kvm_dirty_ring_supported(void) int main(int argc, char **argv) { - char template[] = "/tmp/migration-test-XXXXXX"; + const bool has_kvm = qtest_has_accel("kvm"); + const bool has_uffd = ufd_version_check(); + const char *arch = qtest_get_arch(); + g_autoptr(GError) err = NULL; int ret; g_test_init(&argc, &argv, NULL); - if (!ufd_version_check()) { - return g_test_run(); - } - /* * On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG * is touchy due to race conditions on dirty bits (especially on PPC for * some reason) */ - if (g_str_equal(qtest_get_arch(), "ppc64") && - (access("/sys/module/kvm_hv", F_OK) || - access("/dev/kvm", R_OK | W_OK))) { + if (g_str_equal(arch, "ppc64") && + (!has_kvm || access("/sys/module/kvm_hv", F_OK))) { g_test_message("Skipping test: kvm_hv not available"); return g_test_run(); } @@ -1444,34 +2482,86 @@ int main(int argc, char **argv) * Similar to ppc64, s390x seems to be touchy with TCG, so disable it * there until the problems are resolved */ - if (g_str_equal(qtest_get_arch(), "s390x")) { -#if defined(HOST_S390X) - if (access("/dev/kvm", R_OK | W_OK)) { - g_test_message("Skipping test: kvm not available"); - return g_test_run(); - } -#else - g_test_message("Skipping test: Need s390x host to work properly"); + if (g_str_equal(arch, "s390x") && !has_kvm) { + g_test_message("Skipping test: s390x host with KVM is required"); return g_test_run(); -#endif } - tmpfs = mkdtemp(template); + tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err); if (!tmpfs) { - g_test_message("mkdtemp on path (%s): %s", template, strerror(errno)); + g_test_message("Can't create temporary directory in %s: %s", + g_get_tmp_dir(), err->message); } g_assert(tmpfs); module_call_init(MODULE_INIT_QOM); - qtest_add_func("/migration/postcopy/unix", test_postcopy); - qtest_add_func("/migration/postcopy/recovery", test_postcopy_recovery); + if (has_uffd) { + qtest_add_func("/migration/postcopy/plain", test_postcopy); + qtest_add_func("/migration/postcopy/recovery/plain", + test_postcopy_recovery); + qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt); + qtest_add_func("/migration/postcopy/preempt/recovery/plain", + test_postcopy_preempt_recovery); + } + qtest_add_func("/migration/bad_dest", test_baddest); - qtest_add_func("/migration/precopy/unix", test_precopy_unix); - qtest_add_func("/migration/precopy/tcp", test_precopy_tcp); + qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain); + qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle); +#ifdef CONFIG_GNUTLS + qtest_add_func("/migration/precopy/unix/tls/psk", + test_precopy_unix_tls_psk); + + if (has_uffd) { + /* + * NOTE: psk test is enough for postcopy, as other types of TLS + * channels are tested under precopy. Here what we want to test is the + * general postcopy path that has TLS channel enabled. + */ + qtest_add_func("/migration/postcopy/tls/psk", test_postcopy_tls_psk); + qtest_add_func("/migration/postcopy/recovery/tls/psk", + test_postcopy_recovery_tls_psk); + qtest_add_func("/migration/postcopy/preempt/tls/psk", + test_postcopy_preempt_tls_psk); + qtest_add_func("/migration/postcopy/preempt/recovery/tls/psk", + test_postcopy_preempt_all); + } +#ifdef CONFIG_TASN1 + qtest_add_func("/migration/precopy/unix/tls/x509/default-host", + test_precopy_unix_tls_x509_default_host); + qtest_add_func("/migration/precopy/unix/tls/x509/override-host", + test_precopy_unix_tls_x509_override_host); +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + + qtest_add_func("/migration/precopy/tcp/plain", test_precopy_tcp_plain); +#ifdef CONFIG_GNUTLS + qtest_add_func("/migration/precopy/tcp/tls/psk/match", + test_precopy_tcp_tls_psk_match); + qtest_add_func("/migration/precopy/tcp/tls/psk/mismatch", + test_precopy_tcp_tls_psk_mismatch); +#ifdef CONFIG_TASN1 + qtest_add_func("/migration/precopy/tcp/tls/x509/default-host", + test_precopy_tcp_tls_x509_default_host); + qtest_add_func("/migration/precopy/tcp/tls/x509/override-host", + test_precopy_tcp_tls_x509_override_host); + qtest_add_func("/migration/precopy/tcp/tls/x509/mismatch-host", + test_precopy_tcp_tls_x509_mismatch_host); + qtest_add_func("/migration/precopy/tcp/tls/x509/friendly-client", + test_precopy_tcp_tls_x509_friendly_client); + qtest_add_func("/migration/precopy/tcp/tls/x509/hostile-client", + test_precopy_tcp_tls_x509_hostile_client); + qtest_add_func("/migration/precopy/tcp/tls/x509/allow-anon-client", + test_precopy_tcp_tls_x509_allow_anon_client); + qtest_add_func("/migration/precopy/tcp/tls/x509/reject-anon-client", + test_precopy_tcp_tls_x509_reject_anon_client); +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ + /* qtest_add_func("/migration/ignore_shared", test_ignore_shared); */ - qtest_add_func("/migration/xbzrle/unix", test_xbzrle_unix); +#ifndef _WIN32 qtest_add_func("/migration/fd_proto", test_migrate_fd_proto); +#endif qtest_add_func("/migration/validate_uuid", test_validate_uuid); qtest_add_func("/migration/validate_uuid_error", test_validate_uuid_error); qtest_add_func("/migration/validate_uuid_src_not_set", @@ -1480,16 +2570,40 @@ int main(int argc, char **argv) test_validate_uuid_dst_not_set); qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); - qtest_add_func("/migration/multifd/tcp/none", test_multifd_tcp_none); - qtest_add_func("/migration/multifd/tcp/cancel", test_multifd_tcp_cancel); - qtest_add_func("/migration/multifd/tcp/zlib", test_multifd_tcp_zlib); + qtest_add_func("/migration/multifd/tcp/plain/none", + test_multifd_tcp_none); + qtest_add_func("/migration/multifd/tcp/plain/cancel", + test_multifd_tcp_cancel); + qtest_add_func("/migration/multifd/tcp/plain/zlib", + test_multifd_tcp_zlib); #ifdef CONFIG_ZSTD - qtest_add_func("/migration/multifd/tcp/zstd", test_multifd_tcp_zstd); + qtest_add_func("/migration/multifd/tcp/plain/zstd", + test_multifd_tcp_zstd); #endif +#ifdef CONFIG_GNUTLS + qtest_add_func("/migration/multifd/tcp/tls/psk/match", + test_multifd_tcp_tls_psk_match); + qtest_add_func("/migration/multifd/tcp/tls/psk/mismatch", + test_multifd_tcp_tls_psk_mismatch); +#ifdef CONFIG_TASN1 + qtest_add_func("/migration/multifd/tcp/tls/x509/default-host", + test_multifd_tcp_tls_x509_default_host); + qtest_add_func("/migration/multifd/tcp/tls/x509/override-host", + test_multifd_tcp_tls_x509_override_host); + qtest_add_func("/migration/multifd/tcp/tls/x509/mismatch-host", + test_multifd_tcp_tls_x509_mismatch_host); + qtest_add_func("/migration/multifd/tcp/tls/x509/allow-anon-client", + test_multifd_tcp_tls_x509_allow_anon_client); + qtest_add_func("/migration/multifd/tcp/tls/x509/reject-anon-client", + test_multifd_tcp_tls_x509_reject_anon_client); +#endif /* CONFIG_TASN1 */ +#endif /* CONFIG_GNUTLS */ - if (kvm_dirty_ring_supported()) { + if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { qtest_add_func("/migration/dirty_ring", test_precopy_unix_dirty_ring); + qtest_add_func("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); } ret = g_test_run(); @@ -1501,6 +2615,7 @@ int main(int argc, char **argv) g_test_message("unable to rmdir: path (%s): %s", tmpfs, strerror(errno)); } + g_free(tmpfs); return ret; } diff --git a/tests/qtest/modules-test.c b/tests/qtest/modules-test.c index c238b3f422..be2575ae6d 100644 --- a/tests/qtest/modules-test.c +++ b/tests/qtest/modules-test.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" const char common_args[] = "-nodefaults -machine none"; @@ -16,6 +16,9 @@ static void test_modules_load(const void *data) int main(int argc, char *argv[]) { const char *modules[] = { +#ifdef CONFIG_BLKIO + "block-", "blkio", +#endif #ifdef CONFIG_CURL "block-", "curl", #endif diff --git a/tests/qtest/ne2000-test.c b/tests/qtest/ne2000-test.c index 43cfc4535a..3fc0e555d5 100644 --- a/tests/qtest/ne2000-test.c +++ b/tests/qtest/ne2000-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c index 5ce8ce13b3..8048044d28 100644 --- a/tests/qtest/npcm7xx_adc-test.c +++ b/tests/qtest/npcm7xx_adc-test.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/timer.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #define REF_HZ (25000000) @@ -50,7 +50,7 @@ #define CON_INT BIT(18) #define CON_EN BIT(17) #define CON_RST BIT(16) -#define CON_CONV BIT(14) +#define CON_CONV BIT(13) #define CON_DIV(rv) extract32(rv, 1, 8) #define FST_RDST BIT(1) diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c index 9eec71d87c..b046f1d76a 100644 --- a/tests/qtest/npcm7xx_emc-test.c +++ b/tests/qtest/npcm7xx_emc-test.c @@ -15,7 +15,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "libqos/libqos.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qnum.h" @@ -210,6 +209,7 @@ static int emc_module_index(const EMCModule *mod) return diff; } +#ifndef _WIN32 static void packet_test_clear(void *sockets) { int *test_sockets = sockets; @@ -244,6 +244,7 @@ static int *packet_test_init(int module_num, GString *cmd_line) g_test_queue_destroy(packet_test_clear, test_sockets); return test_sockets; } +#endif /* _WIN32 */ static uint32_t emc_read(QTestState *qts, const EMCModule *mod, NPCM7xxPWMRegister regno) @@ -251,6 +252,7 @@ static uint32_t emc_read(QTestState *qts, const EMCModule *mod, return qtest_readl(qts, mod->base_addr + regno * sizeof(uint32_t)); } +#ifndef _WIN32 static void emc_write(QTestState *qts, const EMCModule *mod, NPCM7xxPWMRegister regno, uint32_t value) { @@ -340,6 +342,7 @@ static bool emc_soft_reset(QTestState *qts, const EMCModule *mod) g_message("%s: Timeout expired", __func__); return false; } +#endif /* _WIN32 */ /* Check emc registers are reset to default value. */ static void test_init(gconstpointer test_data) @@ -378,7 +381,8 @@ static void test_init(gconstpointer test_data) #undef CHECK_REG - for (i = 0; i < NUM_CAMML_REGS; ++i) { + /* Skip over the MAC address registers, which is BASE+0 */ + for (i = 1; i < NUM_CAMML_REGS; ++i) { g_assert_cmpuint(emc_read(qts, mod, REG_CAMM_BASE + i * 2), ==, 0); g_assert_cmpuint(emc_read(qts, mod, REG_CAML_BASE + i * 2), ==, @@ -388,6 +392,7 @@ static void test_init(gconstpointer test_data) qtest_quit(qts); } +#ifndef _WIN32 static bool emc_wait_irq(QTestState *qts, const EMCModule *mod, int step, bool is_tx) { @@ -521,12 +526,12 @@ static void emc_send_verify1(QTestState *qts, const EMCModule *mod, int fd, /* Check data sent to the backend. */ recv_len = ~0; - ret = qemu_recv(fd, &recv_len, sizeof(recv_len), MSG_DONTWAIT); + ret = recv(fd, &recv_len, sizeof(recv_len), MSG_DONTWAIT); g_assert_cmpint(ret, == , sizeof(recv_len)); g_assert(wait_socket_readable(fd)); memset(buffer, 0xff, sizeof(buffer)); - ret = qemu_recv(fd, buffer, test_size, MSG_DONTWAIT); + ret = recv(fd, buffer, test_size, MSG_DONTWAIT); g_assert_cmpmem(buffer, ret, test_data, test_size); } @@ -844,6 +849,7 @@ static void test_rx(gconstpointer test_data) qtest_quit(qts); } +#endif /* _WIN32 */ static void emc_add_test(const char *name, const TestData* td, GTestDataFunc fn) @@ -866,8 +872,10 @@ int main(int argc, char **argv) td->module = &emc_module_list[i]; add_test(init, td); +#ifndef _WIN32 add_test(tx, td); add_test(rx, td); +#endif } return g_test_run(); diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c index a54fd70d27..e320a625c4 100644 --- a/tests/qtest/npcm7xx_pwm-test.c +++ b/tests/qtest/npcm7xx_pwm-test.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qnum.h" @@ -268,6 +268,9 @@ static void mft_qom_set(QTestState *qts, int index, const char *name, path, name, value); /* The qom set message returns successfully. */ g_assert_true(qdict_haskey(response, "return")); + + qobject_unref(response); + g_free(path); } static uint32_t get_pll(uint32_t con) diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c index 797f832e53..35b1c1f5f6 100644 --- a/tests/qtest/npcm7xx_rng-test.c +++ b/tests/qtest/npcm7xx_rng-test.c @@ -20,7 +20,7 @@ #include "libqtest-single.h" #include "qemu/bitops.h" -#include "qemu-common.h" +#include "qemu/cutils.h" #define RNG_BASE_ADDR 0xf000b000 diff --git a/tests/qtest/npcm7xx_sdhci-test.c b/tests/qtest/npcm7xx_sdhci-test.c new file mode 100644 index 0000000000..5d68540e52 --- /dev/null +++ b/tests/qtest/npcm7xx_sdhci-test.c @@ -0,0 +1,215 @@ +/* + * QTests for NPCM7xx SD-3.0 / MMC-4.51 Host Controller + * + * Copyright (c) 2022 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/sd/npcm7xx_sdhci.h" + +#include "libqtest.h" +#include "libqtest-single.h" +#include "libqos/sdhci-cmd.h" + +#define NPCM7XX_REG_SIZE 0x100 +#define NPCM7XX_MMC_BA 0xF0842000 +#define NPCM7XX_BLK_SIZE 512 +#define NPCM7XX_TEST_IMAGE_SIZE (1 << 20) + +char *sd_path; + +static QTestState *setup_sd_card(void) +{ + QTestState *qts = qtest_initf( + "-machine kudo-bmc " + "-device sd-card,drive=drive0 " + "-drive id=drive0,if=none,file=%s,format=raw,auto-read-only=off", + sd_path); + + qtest_writew(qts, NPCM7XX_MMC_BA + SDHC_SWRST, SDHC_RESET_ALL); + qtest_writew(qts, NPCM7XX_MMC_BA + SDHC_CLKCON, + SDHC_CLOCK_SDCLK_EN | SDHC_CLOCK_INT_STABLE | + SDHC_CLOCK_INT_EN); + sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_APP_CMD); + sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0x41200000, 0, (41 << 8)); + sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_ALL_SEND_CID); + sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_SEND_RELATIVE_ADDR); + sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0x45670000, 0, + SDHC_SELECT_DESELECT_CARD); + + return qts; +} + +static void write_sdread(QTestState *qts, const char *msg) +{ + int fd, ret; + size_t len = strlen(msg); + char *rmsg = g_malloc(len); + + /* write message to sd */ + fd = open(sd_path, O_WRONLY); + g_assert(fd >= 0); + ret = write(fd, msg, len); + close(fd); + g_assert(ret == len); + + /* read message using sdhci */ + ret = sdhci_read_cmd(qts, NPCM7XX_MMC_BA, rmsg, len); + g_assert(ret == len); + g_assert(!memcmp(rmsg, msg, len)); + + g_free(rmsg); +} + +/* Check MMC can read values from sd */ +static void test_read_sd(void) +{ + QTestState *qts = setup_sd_card(); + + write_sdread(qts, "hello world"); + write_sdread(qts, "goodbye"); + + qtest_quit(qts); +} + +static void sdwrite_read(QTestState *qts, const char *msg) +{ + int fd, ret; + size_t len = strlen(msg); + char *rmsg = g_malloc(len); + + /* write message using sdhci */ + sdhci_write_cmd(qts, NPCM7XX_MMC_BA, msg, len, NPCM7XX_BLK_SIZE); + + /* read message from sd */ + fd = open(sd_path, O_RDONLY); + g_assert(fd >= 0); + ret = read(fd, rmsg, len); + close(fd); + g_assert(ret == len); + + g_assert(!memcmp(rmsg, msg, len)); + + g_free(rmsg); +} + +/* Check MMC can write values to sd */ +static void test_write_sd(void) +{ + QTestState *qts = setup_sd_card(); + + sdwrite_read(qts, "hello world"); + sdwrite_read(qts, "goodbye"); + + qtest_quit(qts); +} + +/* Check SDHCI has correct default values. */ +static void test_reset(void) +{ + QTestState *qts = qtest_init("-machine kudo-bmc"); + uint64_t addr = NPCM7XX_MMC_BA; + uint64_t end_addr = addr + NPCM7XX_REG_SIZE; + uint16_t prstvals_resets[] = {NPCM7XX_PRSTVALS_0_RESET, + NPCM7XX_PRSTVALS_1_RESET, + 0, + NPCM7XX_PRSTVALS_3_RESET, + 0, + 0}; + int i; + uint32_t mask; + + while (addr < end_addr) { + switch (addr - NPCM7XX_MMC_BA) { + case SDHC_PRNSTS: + /* + * ignores bits 20 to 24: they are changed when reading registers + */ + mask = 0x1f00000; + g_assert_cmphex(qtest_readl(qts, addr) | mask, ==, + NPCM7XX_PRSNTS_RESET | mask); + addr += 4; + break; + case SDHC_BLKGAP: + g_assert_cmphex(qtest_readb(qts, addr), ==, NPCM7XX_BLKGAP_RESET); + addr += 1; + break; + case SDHC_CAPAB: + g_assert_cmphex(qtest_readq(qts, addr), ==, NPCM7XX_CAPAB_RESET); + addr += 8; + break; + case SDHC_MAXCURR: + g_assert_cmphex(qtest_readq(qts, addr), ==, NPCM7XX_MAXCURR_RESET); + addr += 8; + break; + case SDHC_HCVER: + g_assert_cmphex(qtest_readw(qts, addr), ==, NPCM7XX_HCVER_RESET); + addr += 2; + break; + case NPCM7XX_PRSTVALS: + for (i = 0; i < NPCM7XX_PRSTVALS_SIZE; ++i) { + g_assert_cmphex(qtest_readw(qts, addr + 2 * i), ==, + prstvals_resets[i]); + } + addr += NPCM7XX_PRSTVALS_SIZE * 2; + break; + default: + g_assert_cmphex(qtest_readb(qts, addr), ==, 0); + addr += 1; + } + } + + qtest_quit(qts); +} + +static void drive_destroy(void) +{ + unlink(sd_path); + g_free(sd_path); +} + +static void drive_create(void) +{ + int fd, ret; + GError *error = NULL; + + /* Create a temporary raw image */ + fd = g_file_open_tmp("sdhci_XXXXXX", &sd_path, &error); + if (fd == -1) { + fprintf(stderr, "unable to create sdhci file: %s\n", error->message); + g_error_free(error); + } + g_assert(sd_path != NULL); + + ret = ftruncate(fd, NPCM7XX_TEST_IMAGE_SIZE); + g_assert_cmpint(ret, ==, 0); + g_message("%s", sd_path); + close(fd); +} + +int main(int argc, char **argv) +{ + int ret; + + drive_create(); + + g_test_init(&argc, &argv, NULL); + + qtest_add_func("npcm7xx_sdhci/reset", test_reset); + qtest_add_func("npcm7xx_sdhci/write_sd", test_write_sd); + qtest_add_func("npcm7xx_sdhci/read_sd", test_read_sd); + + ret = g_test_run(); + drive_destroy(); + return ret; +} diff --git a/tests/qtest/npcm7xx_smbus-test.c b/tests/qtest/npcm7xx_smbus-test.c index 6b3038ac59..a878cdc001 100644 --- a/tests/qtest/npcm7xx_smbus-test.c +++ b/tests/qtest/npcm7xx_smbus-test.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "libqos/i2c.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "hw/sensor/tmp105_regs.h" #define NR_SMBUS_DEVICES 16 diff --git a/tests/qtest/npcm7xx_watchdog_timer-test.c b/tests/qtest/npcm7xx_watchdog_timer-test.c index 3aae5a0438..4773a673b2 100644 --- a/tests/qtest/npcm7xx_watchdog_timer-test.c +++ b/tests/qtest/npcm7xx_watchdog_timer-test.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #define WTCR_OFFSET 0x1c diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c index c677cd63c4..c5eb13f349 100644 --- a/tests/qtest/numa-test.c +++ b/tests/qtest/numa-test.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" @@ -42,7 +42,8 @@ static void test_def_cpu_split(const void *data) g_autofree char *s = NULL; g_autofree char *cli = NULL; - cli = make_cli(data, "-machine smp.cpus=8 -numa node,memdev=ram -numa node"); + cli = make_cli(data, "-machine smp.cpus=8,smp.sockets=8 " + "-numa node,memdev=ram -numa node"); qts = qtest_init(cli); s = qtest_hmp(qts, "info numa"); @@ -222,17 +223,18 @@ static void aarch64_numa_cpu(const void *data) QTestState *qts; g_autofree char *cli = NULL; - cli = make_cli(data, "-machine smp.cpus=2 " + cli = make_cli(data, "-machine " + "smp.cpus=2,smp.sockets=2,smp.clusters=1,smp.cores=1,smp.threads=1 " "-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 " - "-numa cpu,node-id=1,thread-id=0 " - "-numa cpu,node-id=0,thread-id=1"); + "-numa cpu,node-id=0,socket-id=1,cluster-id=0,core-id=0,thread-id=0 " + "-numa cpu,node-id=1,socket-id=0,cluster-id=0,core-id=0,thread-id=0"); qts = qtest_init(cli); cpus = get_cpus(qts, &resp); g_assert(cpus); while ((e = qlist_pop(cpus))) { QDict *cpu, *props; - int64_t thread, node; + int64_t socket, cluster, core, thread, node; cpu = qobject_to(QDict, e); g_assert(qdict_haskey(cpu, "props")); @@ -240,12 +242,18 @@ static void aarch64_numa_cpu(const void *data) g_assert(qdict_haskey(props, "node-id")); node = qdict_get_int(props, "node-id"); + g_assert(qdict_haskey(props, "socket-id")); + socket = qdict_get_int(props, "socket-id"); + g_assert(qdict_haskey(props, "cluster-id")); + cluster = qdict_get_int(props, "cluster-id"); + g_assert(qdict_haskey(props, "core-id")); + core = qdict_get_int(props, "core-id"); g_assert(qdict_haskey(props, "thread-id")); thread = qdict_get_int(props, "thread-id"); - if (thread == 0) { + if (socket == 0 && cluster == 0 && core == 0 && thread == 0) { g_assert_cmpint(node, ==, 1); - } else if (thread == 1) { + } else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) { g_assert_cmpint(node, ==, 0); } else { g_assert(false); @@ -265,7 +273,8 @@ static void pc_dynamic_cpu_cfg(const void *data) QTestState *qs; g_autofree char *cli = NULL; - cli = make_cli(data, "-nodefaults --preconfig -machine smp.cpus=2"); + cli = make_cli(data, "-nodefaults --preconfig " + "-machine smp.cpus=2,smp.sockets=2"); qs = qtest_init(cli); /* create 2 numa nodes */ diff --git a/tests/qtest/nvme-test.c b/tests/qtest/nvme-test.c index f8bafb5d70..008d189b0f 100644 --- a/tests/qtest/nvme-test.c +++ b/tests/qtest/nvme-test.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/units.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/pci.h" #include "include/block/nvme.h" diff --git a/tests/qtest/pca9552-test.c b/tests/qtest/pca9552-test.c index 42a1312665..d80ed93cd3 100644 --- a/tests/qtest/pca9552-test.c +++ b/tests/qtest/pca9552-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" #include "hw/misc/pca9552_regs.h" diff --git a/tests/qtest/pci-test.c b/tests/qtest/pci-test.c index e15d4d94d1..4b2092b949 100644 --- a/tests/qtest/pci-test.c +++ b/tests/qtest/pci-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/pcnet-test.c b/tests/qtest/pcnet-test.c index 7583aeb3c3..900944fa7e 100644 --- a/tests/qtest/pcnet-test.c +++ b/tests/qtest/pcnet-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/pflash-cfi02-test.c b/tests/qtest/pflash-cfi02-test.c index 6168edc821..0b52c2ca5c 100644 --- a/tests/qtest/pflash-cfi02-test.c +++ b/tests/qtest/pflash-cfi02-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* * To test the pflash_cfi02 device, we run QEMU with the musicpal machine with @@ -56,7 +56,7 @@ typedef struct { QTestState *qtest; } FlashConfig; -static char image_path[] = "/tmp/qtest.XXXXXX"; +static char *image_path; /* * The pflash implementation allows some parameters to be unspecified. We want @@ -608,6 +608,7 @@ static void test_cfi_in_autoselect(const void *opaque) static void cleanup(void *opaque) { unlink(image_path); + g_free(image_path); } /* @@ -635,16 +636,14 @@ static const FlashConfig configuration[] = { int main(int argc, char **argv) { - int fd = mkstemp(image_path); - if (fd == -1) { - g_printerr("Failed to create temporary file %s: %s\n", image_path, - strerror(errno)); - exit(EXIT_FAILURE); - } + GError *err = NULL; + int fd = g_file_open_tmp("qtest.XXXXXX", &image_path, &err); + g_assert_no_error(err); + if (ftruncate(fd, UNIFORM_FLASH_SIZE) < 0) { int error_code = errno; close(fd); - unlink(image_path); + cleanup(NULL); g_printerr("Failed to truncate file %s to %u MB: %s\n", image_path, UNIFORM_FLASH_SIZE, strerror(error_code)); exit(EXIT_FAILURE); diff --git a/tests/qtest/pnv-xscom-test.c b/tests/qtest/pnv-xscom-test.c index c8d4043362..2c46d5cf6d 100644 --- a/tests/qtest/pnv-xscom-test.c +++ b/tests/qtest/pnv-xscom-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" typedef enum PnvChipType { PNV_CHIP_POWER8E, /* AKA Murano (default) */ diff --git a/tests/qtest/prom-env-test.c b/tests/qtest/prom-env-test.c index f41d80154a..39ccb59797 100644 --- a/tests/qtest/prom-env-test.c +++ b/tests/qtest/prom-env-test.c @@ -20,7 +20,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/libqos-spapr.h" #define MAGIC 0xcafec0de @@ -58,8 +58,8 @@ static void test_machine(const void *machine) " -machine " PSERIES_DEFAULT_CAPABILITIES; } - qts = qtest_initf("-M %s -accel tcg %s -prom-env 'use-nvramrc?=true' " - "-prom-env 'nvramrc=%x %x l!' ", (const char *)machine, + qts = qtest_initf("-M %s -accel tcg %s -prom-env \"use-nvramrc?=true\" " + "-prom-env \"nvramrc=%x %x l!\" ", (const char *)machine, extra_args, MAGIC, ADDRESS); check_guest_memory(qts); qtest_quit(qts); @@ -71,9 +71,11 @@ static void add_tests(const char *machines[]) char *name; for (i = 0; machines[i] != NULL; i++) { - name = g_strdup_printf("prom-env/%s", machines[i]); - qtest_add_data_func(name, machines[i], test_machine); - g_free(name); + if (qtest_has_machine(machines[i])) { + name = g_strdup_printf("prom-env/%s", machines[i]); + qtest_add_data_func(name, machines[i], test_machine); + g_free(name); + } } } diff --git a/tests/qtest/pvpanic-pci-test.c b/tests/qtest/pvpanic-pci-test.c index 2358852d35..c82c365c26 100644 --- a/tests/qtest/pvpanic-pci-test.c +++ b/tests/qtest/pvpanic-pci-test.c @@ -12,7 +12,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" diff --git a/tests/qtest/pvpanic-test.c b/tests/qtest/pvpanic-test.c index 6dcad2db49..bc7b7dfc39 100644 --- a/tests/qtest/pvpanic-test.c +++ b/tests/qtest/pvpanic-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" static void test_panic_nopause(void) diff --git a/tests/qtest/pxe-test.c b/tests/qtest/pxe-test.c index 32bbae33c5..52f0b5c67c 100644 --- a/tests/qtest/pxe-test.c +++ b/tests/qtest/pxe-test.c @@ -14,8 +14,7 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "boot-sector.h" #include "libqos/libqos-spapr.h" diff --git a/tests/qtest/q35-test.c b/tests/qtest/q35-test.c index b7cf144990..c922d81bc0 100644 --- a/tests/qtest/q35-test.c +++ b/tests/qtest/q35-test.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" #include "hw/pci-host/q35.h" diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index c98b78d033..897e4e937b 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/error.h" #include "qapi/qapi-visit-introspect.h" #include "qapi/qmp/qdict.h" @@ -46,6 +46,14 @@ static int query_error_class(const char *cmd) { "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE }, { "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR }, { "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR }, +#ifndef CONFIG_PROFILER + { "x-query-profile", ERROR_CLASS_GENERIC_ERROR }, +#endif + /* Only valid with a USB bus added */ + { "x-query-usb", ERROR_CLASS_GENERIC_ERROR }, + /* Only valid with accel=tcg */ + { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, + { "x-query-opcount", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; int i; @@ -95,11 +103,16 @@ static bool query_is_ignored(const char *cmd) "query-gic-capabilities", /* arm */ /* Success depends on target-specific build configuration: */ "query-pci", /* CONFIG_PCI */ + "x-query-virtio", /* CONFIG_VIRTIO */ /* Success depends on launching SEV guest */ "query-sev-launch-measure", /* Success depends on Host or Hypervisor SEV support */ "query-sev", "query-sev-capabilities", + "query-sgx", + "query-sgx-capabilities", + /* Success depends on enabling dirty page rate limit */ + "query-vcpu-dirty-limit", NULL }; int i; diff --git a/tests/qtest/qmp-test.c b/tests/qtest/qmp-test.c index cd27fae3de..22957fa49c 100644 --- a/tests/qtest/qmp-test.c +++ b/tests/qtest/qmp-test.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/error.h" #include "qapi/qapi-visit-control.h" #include "qapi/qmp/qdict.h" @@ -159,16 +159,19 @@ static void test_qmp_protocol(void) qtest_quit(qts); } +#ifndef _WIN32 + /* Out-of-band tests */ -char tmpdir[] = "/tmp/qmp-test-XXXXXX"; +char *tmpdir; char *fifo_name; static void setup_blocking_cmd(void) { - if (!mkdtemp(tmpdir)) { - g_error("mkdtemp: %s", strerror(errno)); - } + GError *err = NULL; + tmpdir = g_dir_make_tmp("qmp-test-XXXXXX", &err); + g_assert_no_error(err); + fifo_name = g_strdup_printf("%s/fifo", tmpdir); if (mkfifo(fifo_name, 0666)) { g_error("mkfifo: %s", strerror(errno)); @@ -179,6 +182,7 @@ static void cleanup_blocking_cmd(void) { unlink(fifo_name); rmdir(tmpdir); + g_free(tmpdir); } static void send_cmd_that_blocks(QTestState *s, const char *id) @@ -277,6 +281,8 @@ static void test_qmp_oob(void) qtest_quit(qts); } +#endif /* _WIN32 */ + /* Preconfig tests */ static void test_qmp_preconfig(void) @@ -336,7 +342,10 @@ int main(int argc, char *argv[]) g_test_init(&argc, &argv, NULL); qtest_add_func("qmp/protocol", test_qmp_protocol); +#ifndef _WIN32 + /* This case calls mkfifo() which does not exist on win32 */ qtest_add_func("qmp/oob", test_qmp_oob); +#endif qtest_add_func("qmp/preconfig", test_qmp_preconfig); qtest_add_func("qmp/missing-any-arg", test_qmp_missing_any_arg); diff --git a/tests/qtest/qom-test.c b/tests/qtest/qom-test.c index eb34af843b..13510bc349 100644 --- a/tests/qtest/qom-test.c +++ b/tests/qtest/qom-test.c @@ -9,11 +9,10 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" #include "qemu/cutils.h" -#include "libqos/libqtest.h" +#include "libqtest.h" static void test_properties(QTestState *qts, const char *path, bool recurse) { @@ -21,6 +20,7 @@ static void test_properties(QTestState *qts, const char *path, bool recurse) QDict *response, *tuple, *tmp; QList *list; QListEntry *entry; + GSList *children = NULL, *links = NULL; g_test_message("Obtaining properties of %s", path); response = qtest_qmp(qts, "{ 'execute': 'qom-list'," @@ -42,11 +42,14 @@ static void test_properties(QTestState *qts, const char *path, bool recurse) if (is_child || is_link) { child_path = g_strdup_printf("%s/%s", path, qdict_get_str(tuple, "name")); - test_properties(qts, child_path, is_child); - g_free(child_path); + if (is_child) { + children = g_slist_prepend(children, child_path); + } else { + links = g_slist_prepend(links, child_path); + } } else { const char *prop = qdict_get_str(tuple, "name"); - g_test_message("Testing property %s.%s", path, prop); + g_test_message("-> %s", prop); tmp = qtest_qmp(qts, "{ 'execute': 'qom-get'," " 'arguments': { 'path': %s, 'property': %s } }", @@ -56,6 +59,18 @@ static void test_properties(QTestState *qts, const char *path, bool recurse) qobject_unref(tmp); } } + + while (links) { + test_properties(qts, links->data, false); + g_free(links->data); + links = g_slist_delete_link(links, links); + } + while (children) { + test_properties(qts, children->data, true); + g_free(children->data); + children = g_slist_delete_link(children, children); + } + qobject_unref(response); } diff --git a/tests/qtest/qos-test.c b/tests/qtest/qos-test.c index f97d0a08fd..5da4091ec3 100644 --- a/tests/qtest/qos-test.c +++ b/tests/qtest/qos-test.c @@ -25,7 +25,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-machine.h" #include "qapi/qapi-visit-qom.h" -#include "libqos/malloc.h" +#include "libqos/libqos-malloc.h" #include "libqos/qgraph.h" #include "libqos/qgraph_internal.h" #include "libqos/qos_external.h" @@ -185,7 +185,9 @@ static void run_one_test(const void *arg) static void subprocess_run_one_test(const void *arg) { const gchar *path = arg; - g_test_trap_subprocess(path, 0, 0); + g_test_trap_subprocess(path, 180 * G_USEC_PER_SEC, + G_TEST_SUBPROCESS_INHERIT_STDOUT | + G_TEST_SUBPROCESS_INHERIT_STDERR); g_test_trap_assert_passed(); } @@ -319,6 +321,11 @@ static void walk_path(QOSGraphNode *orig_path, int len) int main(int argc, char **argv, char** envp) { g_test_init(&argc, &argv, NULL); + + if (g_test_subprocess()) { + qos_printf("qos_test running single test in subprocess\n"); + } + if (g_test_verbose()) { qos_printf("ENVIRONMENT VARIABLES: {\n"); for (char **env = envp; *env != 0; env++) { diff --git a/tests/qtest/readconfig-test.c b/tests/qtest/readconfig-test.c new file mode 100644 index 0000000000..c7a9b0c7dd --- /dev/null +++ b/tests/qtest/readconfig-test.c @@ -0,0 +1,195 @@ +/* + * Validate -readconfig + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-machine.h" +#include "qapi/qapi-visit-qom.h" +#include "qapi/qapi-visit-ui.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/qmp/qstring.h" +#include "qemu/units.h" + +static QTestState *qtest_init_with_config(const char *cfgdata) +{ + GError *error = NULL; + g_autofree char *args = NULL; + int cfgfd = -1; + g_autofree char *cfgpath = NULL; + QTestState *qts; + ssize_t ret; + + cfgfd = g_file_open_tmp("readconfig-test-XXXXXX", &cfgpath, &error); + g_assert_no_error(error); + g_assert_cmpint(cfgfd, >=, 0); + + ret = qemu_write_full(cfgfd, cfgdata, strlen(cfgdata)); + close(cfgfd); + if (ret < 0) { + unlink(cfgpath); + } + g_assert_cmpint(ret, ==, strlen(cfgdata)); + + args = g_strdup_printf("-nodefaults -machine none -readconfig %s", cfgpath); + + qts = qtest_init(args); + + unlink(cfgpath); + + return qts; +} + +static void test_x86_memdev_resp(QObject *res) +{ + Visitor *v; + g_autoptr(MemdevList) memdevs = NULL; + Memdev *memdev; + + g_assert(res); + v = qobject_input_visitor_new(res); + visit_type_MemdevList(v, NULL, &memdevs, &error_abort); + + g_assert(memdevs); + g_assert(memdevs->value); + g_assert(!memdevs->next); + + memdev = memdevs->value; + g_assert_cmpstr(memdev->id, ==, "ram"); + g_assert_cmpint(memdev->size, ==, 200 * MiB); + + visit_free(v); +} + +static void test_x86_memdev(void) +{ + QDict *resp; + QTestState *qts; + const char *cfgdata = + "[memory]\n" + "size = \"200\""; + + qts = qtest_init_with_config(cfgdata); + /* Test valid command */ + resp = qtest_qmp(qts, "{ 'execute': 'query-memdev' }"); + test_x86_memdev_resp(qdict_get(resp, "return")); + qobject_unref(resp); + + qtest_quit(qts); +} + + +#ifdef CONFIG_SPICE +static void test_spice_resp(QObject *res) +{ + Visitor *v; + g_autoptr(SpiceInfo) spice = NULL; + + g_assert(res); + v = qobject_input_visitor_new(res); + visit_type_SpiceInfo(v, "spice", &spice, &error_abort); + + g_assert(spice); + g_assert(spice->enabled); + + visit_free(v); +} + +static void test_spice(void) +{ + QDict *resp; + QTestState *qts; + const char *cfgdata = + "[spice]\n" + "disable-ticketing = \"on\"\n" + "unix = \"on\"\n"; + + qts = qtest_init_with_config(cfgdata); + /* Test valid command */ + resp = qtest_qmp(qts, "{ 'execute': 'query-spice' }"); + test_spice_resp(qdict_get(resp, "return")); + qobject_unref(resp); + + qtest_quit(qts); +} +#endif + +static void test_object_rng_resp(QObject *res) +{ + Visitor *v; + g_autoptr(ObjectPropertyInfoList) objs = NULL; + ObjectPropertyInfoList *tmp; + ObjectPropertyInfo *obj; + bool seen_rng = false; + + g_assert(res); + v = qobject_input_visitor_new(res); + visit_type_ObjectPropertyInfoList(v, NULL, &objs, &error_abort); + + g_assert(objs); + tmp = objs; + while (tmp) { + g_assert(tmp->value); + + obj = tmp->value; + if (g_str_equal(obj->name, "rng0") && + g_str_equal(obj->type, "child")) { + seen_rng = true; + break; + } + + tmp = tmp->next; + } + + g_assert(seen_rng); + + visit_free(v); +} + +static void test_object_rng(void) +{ + QDict *resp; + QTestState *qts; + const char *cfgdata = + "[object]\n" + "qom-type = \"rng-builtin\"\n" + "id = \"rng0\"\n"; + + qts = qtest_init_with_config(cfgdata); + /* Test valid command */ + resp = qtest_qmp(qts, + "{ 'execute': 'qom-list'," + " 'arguments': {'path': '/objects' }}"); + test_object_rng_resp(qdict_get(resp, "return")); + qobject_unref(resp); + + qtest_quit(qts); +} + +int main(int argc, char *argv[]) +{ + const char *arch; + g_test_init(&argc, &argv, NULL); + + arch = qtest_get_arch(); + + if (g_str_equal(arch, "i386") || + g_str_equal(arch, "x86_64")) { + qtest_add_func("readconfig/x86/memdev", test_x86_memdev); + } +#ifdef CONFIG_SPICE + qtest_add_func("readconfig/spice", test_spice); +#endif + + qtest_add_func("readconfig/object-rng", test_object_rng); + + return g_test_run(); +} diff --git a/tests/qtest/rtas-test.c b/tests/qtest/rtas-test.c index 5f1194a6eb..50df60e5b2 100644 --- a/tests/qtest/rtas-test.c +++ b/tests/qtest/rtas-test.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/libqos-spapr.h" #include "libqos/rtas.h" diff --git a/tests/qtest/rtc-test.c b/tests/qtest/rtc-test.c index 8126ab1bdb..02ed4e1238 100644 --- a/tests/qtest/rtc-test.c +++ b/tests/qtest/rtc-test.c @@ -111,7 +111,7 @@ static void cmos_get_date_time(struct tm *date) date->tm_mday = mday; date->tm_mon = mon - 1; date->tm_year = base_year + year - 1900; -#ifndef __sun__ +#if !defined(__sun__) && !defined(_WIN32) date->tm_gmtoff = 0; #endif diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c index 4506049264..8fa3313cc3 100644 --- a/tests/qtest/rtl8139-test.c +++ b/tests/qtest/rtl8139-test.c @@ -11,7 +11,6 @@ #include "libqtest-single.h" #include "libqos/pci-pc.h" #include "qemu/timer.h" -#include "qemu-common.h" /* Tests only initialization so far. TODO: Replace with functional tests */ static void nop(void) diff --git a/tests/qtest/sdhci-test.c b/tests/qtest/sdhci-test.c index a110cfe321..6275e7626c 100644 --- a/tests/qtest/sdhci-test.c +++ b/tests/qtest/sdhci-test.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "hw/registerfields.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/pci-pc.h" #include "hw/pci/pci.h" diff --git a/tests/qtest/spapr-phb-test.c b/tests/qtest/spapr-phb-test.c index ea8d596507..093dc22f2f 100644 --- a/tests/qtest/spapr-phb-test.c +++ b/tests/qtest/spapr-phb-test.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" diff --git a/tests/qtest/tco-test.c b/tests/qtest/tco-test.c index 47bc7ad301..254f735370 100644 --- a/tests/qtest/tco-test.c +++ b/tests/qtest/tco-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" #include "qapi/qmp/qdict.h" diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c index bc0dee64dd..c8b0a92b53 100644 --- a/tests/qtest/test-filter-mirror.c +++ b/tests/qtest/test-filter-mirror.c @@ -9,8 +9,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" @@ -28,13 +27,8 @@ static void test_mirror(void) char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); - const char *devstr = "e1000"; QTestState *qts; - if (g_str_equal(qtest_get_arch(), "s390x")) { - devstr = "virtio-net-ccw"; - } - ret = socketpair(PF_UNIX, SOCK_STREAM, 0, send_sock); g_assert_cmpint(ret, !=, -1); @@ -42,11 +36,10 @@ static void test_mirror(void) g_assert_cmpint(ret, !=, -1); qts = qtest_initf( - "-netdev socket,id=qtest-bn0,fd=%d " - "-device %s,netdev=qtest-bn0,id=qtest-e0 " + "-nic socket,id=qtest-bn0,fd=%d " "-chardev socket,id=mirror0,fd=%d " "-object filter-mirror,id=qtest-f0,netdev=qtest-bn0,queue=tx,outdev=mirror0 " - , send_sock[1], devstr, recv_sock[1]); + , send_sock[1], recv_sock[1]); struct iovec iov[] = { { @@ -64,13 +57,13 @@ static void test_mirror(void) g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(send_sock[0]); - ret = qemu_recv(recv_sock[0], &len, sizeof(len), 0); + ret = recv(recv_sock[0], &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); - ret = qemu_recv(recv_sock[0], recv_buf, len, 0); + ret = recv(recv_sock[0], recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); diff --git a/tests/qtest/test-filter-redirector.c b/tests/qtest/test-filter-redirector.c index 4269b2cdd9..24ca9280f8 100644 --- a/tests/qtest/test-filter-redirector.c +++ b/tests/qtest/test-filter-redirector.c @@ -51,8 +51,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" @@ -62,16 +61,6 @@ /* TODO actually test the results and get rid of this */ #define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) -static const char *get_devstr(void) -{ - if (g_str_equal(qtest_get_arch(), "s390x")) { - return "virtio-net-ccw"; - } - - return "rtl8139"; -} - - static void test_redirector_tx(void) { int backend_sock[2], recv_sock; @@ -93,8 +82,7 @@ static void test_redirector_tx(void) g_assert_cmpint(ret, !=, -1); qts = qtest_initf( - "-netdev socket,id=qtest-bn0,fd=%d " - "-device %s,netdev=qtest-bn0,id=qtest-e0 " + "-nic socket,id=qtest-bn0,fd=%d " "-chardev socket,id=redirector0,path=%s,server=on,wait=off " "-chardev socket,id=redirector1,path=%s,server=on,wait=off " "-chardev socket,id=redirector2,path=%s " @@ -103,7 +91,7 @@ static void test_redirector_tx(void) "-object filter-redirector,id=qtest-f1,netdev=qtest-bn0," "queue=tx,indev=redirector2 " "-object filter-redirector,id=qtest-f2,netdev=qtest-bn0," - "queue=tx,outdev=redirector1 ", backend_sock[1], get_devstr(), + "queue=tx,outdev=redirector1 ", backend_sock[1], sock_path0, sock_path1, sock_path0); recv_sock = unix_connect(sock_path1, NULL); @@ -126,13 +114,13 @@ static void test_redirector_tx(void) g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(backend_sock[0]); - ret = qemu_recv(recv_sock, &len, sizeof(len), 0); + ret = recv(recv_sock, &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); - ret = qemu_recv(recv_sock, recv_buf, len, 0); + ret = recv(recv_sock, recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); @@ -163,8 +151,7 @@ static void test_redirector_rx(void) g_assert_cmpint(ret, !=, -1); qts = qtest_initf( - "-netdev socket,id=qtest-bn0,fd=%d " - "-device %s,netdev=qtest-bn0,id=qtest-e0 " + "-nic socket,id=qtest-bn0,fd=%d " "-chardev socket,id=redirector0,path=%s,server=on,wait=off " "-chardev socket,id=redirector1,path=%s,server=on,wait=off " "-chardev socket,id=redirector2,path=%s " @@ -173,7 +160,7 @@ static void test_redirector_rx(void) "-object filter-redirector,id=qtest-f1,netdev=qtest-bn0," "queue=rx,outdev=redirector2 " "-object filter-redirector,id=qtest-f2,netdev=qtest-bn0," - "queue=rx,indev=redirector1 ", backend_sock[1], get_devstr(), + "queue=rx,indev=redirector1 ", backend_sock[1], sock_path0, sock_path1, sock_path0); struct iovec iov[] = { @@ -194,13 +181,13 @@ static void test_redirector_rx(void) ret = iov_send(send_sock, iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); - ret = qemu_recv(backend_sock[0], &len, sizeof(len), 0); + ret = recv(backend_sock[0], &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); - ret = qemu_recv(backend_sock[0], recv_buf, len, 0); + ret = recv(backend_sock[0], recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); close(send_sock); diff --git a/tests/qtest/test-hmp.c b/tests/qtest/test-hmp.c index 413eb95d2a..f8b22abe4c 100644 --- a/tests/qtest/test-hmp.c +++ b/tests/qtest/test-hmp.c @@ -15,7 +15,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" static int verbose; diff --git a/tests/qtest/test-netfilter.c b/tests/qtest/test-netfilter.c index 785b6f3226..b09ef7fae9 100644 --- a/tests/qtest/test-netfilter.c +++ b/tests/qtest/test-netfilter.c @@ -178,11 +178,6 @@ int main(int argc, char **argv) { int ret; char *args; - const char *devstr = "e1000"; - - if (g_str_equal(qtest_get_arch(), "s390x")) { - devstr = "virtio-net-ccw"; - } g_test_init(&argc, &argv, NULL); qtest_add_func("/netfilter/addremove_one", add_one_netfilter); @@ -192,8 +187,7 @@ int main(int argc, char **argv) qtest_add_func("/netfilter/remove_netdev_multi", remove_netdev_with_multi_netfilter); - args = g_strdup_printf("-netdev user,id=qtest-bn0 " - "-device %s,netdev=qtest-bn0", devstr); + args = g_strdup_printf("-nic user,id=qtest-bn0"); qtest_start(args); ret = g_test_run(); diff --git a/tests/qtest/test-x86-cpuid-compat.c b/tests/qtest/test-x86-cpuid-compat.c index f28848e06e..b39c9055b3 100644 --- a/tests/qtest/test-x86-cpuid-compat.c +++ b/tests/qtest/test-x86-cpuid-compat.c @@ -1,5 +1,4 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" #include "qapi/qmp/qnum.h" @@ -302,54 +301,65 @@ int main(int argc, char **argv) /* Check compatibility of old machine-types that didn't * auto-increase level/xlevel/xlevel2: */ - - add_cpuid_test("x86/cpuid/auto-level/pc-2.7", - "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on", - "level", 1); - add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7", - "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on", - "xlevel", 0); - add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7", - "-machine pc-i440fx-2.7 -cpu 486,xstore=on", - "xlevel2", 0); + if (qtest_has_machine("pc-i440fx-2.7")) { + add_cpuid_test("x86/cpuid/auto-level/pc-2.7", + "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on", + "level", 1); + add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7", + "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on", + "xlevel", 0); + add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7", + "-machine pc-i440fx-2.7 -cpu 486,xstore=on", + "xlevel2", 0); + } /* * QEMU 1.4.0 had auto-level enabled for CPUID[7], already, * and the compat code that sets default level shouldn't * disable the auto-level=7 code: */ - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off", - "-machine pc-i440fx-1.4 -cpu Nehalem", - "level", 2); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on", - "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on", - "level", 7); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off", - "-machine pc-i440fx-2.3 -cpu Penryn", - "level", 4); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on", - "-machine pc-i440fx-2.3 -cpu Penryn,erms=on", - "level", 7); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off", - "-machine pc-i440fx-2.9 -cpu Conroe", - "level", 10); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on", - "-machine pc-i440fx-2.9 -cpu Conroe,erms=on", - "level", 10); + if (qtest_has_machine("pc-i440fx-1.4")) { + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off", + "-machine pc-i440fx-1.4 -cpu Nehalem", + "level", 2); + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on", + "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on", + "level", 7); + } + if (qtest_has_machine("pc-i440fx-2.3")) { + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off", + "-machine pc-i440fx-2.3 -cpu Penryn", + "level", 4); + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on", + "-machine pc-i440fx-2.3 -cpu Penryn,erms=on", + "level", 7); + } + if (qtest_has_machine("pc-i440fx-2.9")) { + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off", + "-machine pc-i440fx-2.9 -cpu Conroe", + "level", 10); + add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on", + "-machine pc-i440fx-2.9 -cpu Conroe,erms=on", + "level", 10); + } /* * xlevel doesn't have any feature that triggers auto-level * code on old machine-types. Just check that the compat code * is working correctly: */ - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3", - "-machine pc-i440fx-2.3 -cpu SandyBridge", - "xlevel", 0x8000000a); - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off", - "-machine pc-i440fx-2.4 -cpu SandyBridge,", - "xlevel", 0x80000008); - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on", - "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on", - "xlevel", 0x80000008); + if (qtest_has_machine("pc-i440fx-2.3")) { + add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3", + "-machine pc-i440fx-2.3 -cpu SandyBridge", + "xlevel", 0x8000000a); + } + if (qtest_has_machine("pc-i440fx-2.4")) { + add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off", + "-machine pc-i440fx-2.4 -cpu SandyBridge,", + "xlevel", 0x80000008); + add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on", + "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on", + "xlevel", 0x80000008); + } /* Test feature parsing */ add_feature_test("x86/cpuid/features/plus", diff --git a/tests/qtest/tpm-crb-swtpm-test.c b/tests/qtest/tpm-crb-swtpm-test.c index 1d82a48c04..40254f762f 100644 --- a/tests/qtest/tpm-crb-swtpm-test.c +++ b/tests/qtest/tpm-crb-swtpm-test.c @@ -13,9 +13,8 @@ */ #include "qemu/osdep.h" -#include -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "tpm-tests.h" #include "hw/acpi/tpm.h" @@ -62,9 +61,9 @@ int main(int argc, char **argv) tpm_crb_swtpm_migration_test); ret = g_test_run(); - g_rmdir(ts.dst_tpm_path); + tpm_util_rmdir(ts.dst_tpm_path); g_free(ts.dst_tpm_path); - g_rmdir(ts.src_tpm_path); + tpm_util_rmdir(ts.src_tpm_path); g_free(ts.src_tpm_path); g_free(ts.uri); diff --git a/tests/qtest/tpm-crb-test.c b/tests/qtest/tpm-crb-test.c index ed533900d1..7b94453390 100644 --- a/tests/qtest/tpm-crb-test.c +++ b/tests/qtest/tpm-crb-test.c @@ -26,7 +26,7 @@ uint64_t tpm_tis_base_addr = TPM_TIS_ADDR_BASE; static void tpm_crb_test(const void *data) { - const TestState *s = data; + const TPMTestState *s = data; uint32_t intfid = readl(TPM_CRB_ADDR_BASE + A_CRB_INTF_ID); uint32_t csize = readl(TPM_CRB_ADDR_BASE + A_CRB_CTRL_CMD_SIZE); uint64_t caddr = readq(TPM_CRB_ADDR_BASE + A_CRB_CTRL_CMD_LADDR); @@ -145,7 +145,7 @@ int main(int argc, char **argv) int ret; char *args, *tmp_path = g_dir_make_tmp("qemu-tpm-crb-test.XXXXXX", NULL); GThread *thread; - TestState test; + TPMTestState test; module_call_init(MODULE_INIT_QOM); g_test_init(&argc, &argv, NULL); @@ -156,6 +156,7 @@ int main(int argc, char **argv) g_mutex_init(&test.data_mutex); g_cond_init(&test.data_cond); test.data_cond_signal = false; + test.tpm_version = TPM_VERSION_2_0; thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); tpm_emu_test_wait_cond(&test); diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c index 2e8eb7b94f..2994d1cf42 100644 --- a/tests/qtest/tpm-emu.c +++ b/tests/qtest/tpm-emu.c @@ -16,9 +16,11 @@ #include "backends/tpm/tpm_ioctl.h" #include "io/channel-socket.h" #include "qapi/error.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qstring.h" #include "tpm-emu.h" -void tpm_emu_test_wait_cond(TestState *s) +void tpm_emu_test_wait_cond(TPMTestState *s) { gint64 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; @@ -36,7 +38,7 @@ void tpm_emu_test_wait_cond(TestState *s) static void *tpm_emu_tpm_thread(void *data) { - TestState *s = data; + TPMTestState *s = data; QIOChannel *ioc = s->tpm_ioc; s->tpm_msg = g_new(struct tpm_hdr, 1); @@ -56,9 +58,21 @@ static void *tpm_emu_tpm_thread(void *data) s->tpm_msg->code = be32_to_cpu(s->tpm_msg->code); /* reply error */ - s->tpm_msg->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); - s->tpm_msg->len = cpu_to_be32(sizeof(struct tpm_hdr)); - s->tpm_msg->code = cpu_to_be32(TPM_RC_FAILURE); + switch (s->tpm_version) { + case TPM_VERSION_2_0: + s->tpm_msg->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + s->tpm_msg->len = cpu_to_be32(sizeof(struct tpm_hdr)); + s->tpm_msg->code = cpu_to_be32(TPM_RC_FAILURE); + break; + case TPM_VERSION_1_2: + s->tpm_msg->tag = cpu_to_be16(TPM_TAG_RSP_COMMAND); + s->tpm_msg->len = cpu_to_be32(sizeof(struct tpm_hdr)); + s->tpm_msg->code = cpu_to_be32(TPM_FAIL); + break; + default: + g_debug("unsupport TPM version %u", s->tpm_version); + g_assert_not_reached(); + } qio_channel_write(ioc, (char *)s->tpm_msg, be32_to_cpu(s->tpm_msg->len), &error_abort); } @@ -71,7 +85,7 @@ static void *tpm_emu_tpm_thread(void *data) void *tpm_emu_ctrl_thread(void *data) { - TestState *s = data; + TPMTestState *s = data; QIOChannelSocket *lioc = qio_channel_socket_new(); QIOChannel *ioc; @@ -180,3 +194,39 @@ void *tpm_emu_ctrl_thread(void *data) object_unref(OBJECT(lioc)); return NULL; } + +bool tpm_model_is_available(const char *args, const char *tpm_if) +{ + QTestState *qts; + QDict *rsp_tpm; + bool ret = false; + + qts = qtest_init(args); + if (!qts) { + return false; + } + + rsp_tpm = qtest_qmp(qts, "{ 'execute': 'query-tpm'}"); + if (!qdict_haskey(rsp_tpm, "error")) { + QDict *rsp_models = qtest_qmp(qts, + "{ 'execute': 'query-tpm-models'}"); + if (qdict_haskey(rsp_models, "return")) { + QList *models = qdict_get_qlist(rsp_models, "return"); + QListEntry *e; + + QLIST_FOREACH_ENTRY(models, e) { + QString *s = qobject_to(QString, qlist_entry_obj(e)); + const char *ename = qstring_get_str(s); + if (!strcmp(ename, tpm_if)) { + ret = true; + break; + } + } + } + qobject_unref(rsp_models); + } + qobject_unref(rsp_tpm); + qtest_quit(qts); + + return ret; +} diff --git a/tests/qtest/tpm-emu.h b/tests/qtest/tpm-emu.h index 73f3bed0c4..712cee9b7a 100644 --- a/tests/qtest/tpm-emu.h +++ b/tests/qtest/tpm-emu.h @@ -16,8 +16,13 @@ #define TPM_RC_FAILURE 0x101 #define TPM2_ST_NO_SESSIONS 0x8001 +#define TPM_FAIL 9 +#define TPM_TAG_RSP_COMMAND 0xc4 + #include "qemu/sockets.h" #include "io/channel.h" +#include "sysemu/tpm.h" +#include "libqtest.h" struct tpm_hdr { uint16_t tag; @@ -26,7 +31,14 @@ struct tpm_hdr { char buffer[]; } QEMU_PACKED; -typedef struct TestState { +#ifndef CONFIG_TPM +enum TPMVersion { + TPM_VERSION_1_2 = 1, + TPM_VERSION_2_0 = 2, +}; +#endif + +typedef struct TPMTestState { GMutex data_mutex; GCond data_cond; bool data_cond_signal; @@ -34,9 +46,11 @@ typedef struct TestState { QIOChannel *tpm_ioc; GThread *emu_tpm_thread; struct tpm_hdr *tpm_msg; -} TestState; + enum TPMVersion tpm_version; +} TPMTestState; -void tpm_emu_test_wait_cond(TestState *s); +void tpm_emu_test_wait_cond(TPMTestState *s); void *tpm_emu_ctrl_thread(void *data); +bool tpm_model_is_available(const char *args, const char *tpm_if); #endif /* TESTS_TPM_EMU_H */ diff --git a/tests/qtest/tpm-tis-device-swtpm-test.c b/tests/qtest/tpm-tis-device-swtpm-test.c index f7126eff9e..8c067fddd4 100644 --- a/tests/qtest/tpm-tis-device-swtpm-test.c +++ b/tests/qtest/tpm-tis-device-swtpm-test.c @@ -14,9 +14,8 @@ */ #include "qemu/osdep.h" -#include -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "tpm-tests.h" #include "hw/acpi/tpm.h" @@ -66,9 +65,9 @@ int main(int argc, char **argv) tpm_tis_swtpm_migration_test); ret = g_test_run(); - g_rmdir(ts.dst_tpm_path); + tpm_util_rmdir(ts.dst_tpm_path); g_free(ts.dst_tpm_path); - g_rmdir(ts.src_tpm_path); + tpm_util_rmdir(ts.src_tpm_path); g_free(ts.src_tpm_path); g_free(ts.uri); diff --git a/tests/qtest/tpm-tis-device-test.c b/tests/qtest/tpm-tis-device-test.c index 63ed36440f..3ddefb51ec 100644 --- a/tests/qtest/tpm-tis-device-test.c +++ b/tests/qtest/tpm-tis-device-test.c @@ -33,7 +33,7 @@ int main(int argc, char **argv) { char *tmp_path = g_dir_make_tmp("qemu-tpm-tis-device-test.XXXXXX", NULL); GThread *thread; - TestState test; + TPMTestState test; char *args; int ret; @@ -46,6 +46,7 @@ int main(int argc, char **argv) g_mutex_init(&test.data_mutex); g_cond_init(&test.data_cond); test.data_cond_signal = false; + test.tpm_version = TPM_VERSION_2_0; thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); tpm_emu_test_wait_cond(&test); diff --git a/tests/qtest/tpm-tis-swtpm-test.c b/tests/qtest/tpm-tis-swtpm-test.c index fa590e68f1..11539c0a52 100644 --- a/tests/qtest/tpm-tis-swtpm-test.c +++ b/tests/qtest/tpm-tis-swtpm-test.c @@ -13,9 +13,8 @@ */ #include "qemu/osdep.h" -#include -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "tpm-tests.h" #include "hw/acpi/tpm.h" @@ -61,9 +60,9 @@ int main(int argc, char **argv) tpm_tis_swtpm_migration_test); ret = g_test_run(); - g_rmdir(ts.dst_tpm_path); + tpm_util_rmdir(ts.dst_tpm_path); g_free(ts.dst_tpm_path); - g_rmdir(ts.src_tpm_path); + tpm_util_rmdir(ts.src_tpm_path); g_free(ts.src_tpm_path); g_free(ts.uri); diff --git a/tests/qtest/tpm-tis-test.c b/tests/qtest/tpm-tis-test.c index 79ffbc943e..a4a25ba745 100644 --- a/tests/qtest/tpm-tis-test.c +++ b/tests/qtest/tpm-tis-test.c @@ -29,7 +29,7 @@ int main(int argc, char **argv) int ret; char *args, *tmp_path = g_dir_make_tmp("qemu-tpm-tis-test.XXXXXX", NULL); GThread *thread; - TestState test; + TPMTestState test; module_call_init(MODULE_INIT_QOM); g_test_init(&argc, &argv, NULL); @@ -40,6 +40,7 @@ int main(int argc, char **argv) g_mutex_init(&test.data_mutex); g_cond_init(&test.data_cond); test.data_cond_signal = false; + test.tpm_version = TPM_VERSION_2_0; thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); tpm_emu_test_wait_cond(&test); diff --git a/tests/qtest/tpm-tis-util.c b/tests/qtest/tpm-tis-util.c index 9aff503fd8..939893bf01 100644 --- a/tests/qtest/tpm-tis-util.c +++ b/tests/qtest/tpm-tis-util.c @@ -373,7 +373,7 @@ void tpm_tis_test_check_access_reg_release(const void *data) */ void tpm_tis_test_check_transmit(const void *data) { - const TestState *s = data; + const TPMTestState *s = data; uint8_t access; uint32_t sts; uint16_t bcount; diff --git a/tests/qtest/tpm-util.c b/tests/qtest/tpm-util.c index 3a40ff3f96..a7efe2d0d2 100644 --- a/tests/qtest/tpm-util.c +++ b/tests/qtest/tpm-util.c @@ -13,9 +13,10 @@ */ #include "qemu/osdep.h" +#include #include "hw/acpi/tpm.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "tpm-util.h" #include "qapi/qmp/qdict.h" @@ -292,3 +293,21 @@ void tpm_util_migration_start_qemu(QTestState **src_qemu, g_free(src_qemu_args); g_free(dst_qemu_args); } + +/* Remove directory with remainders of swtpm */ +void tpm_util_rmdir(const char *path) +{ + char *filename; + int ret; + + filename = g_strdup_printf("%s/tpm2-00.permall", path); + g_unlink(filename); + g_free(filename); + + filename = g_strdup_printf("%s/.lock", path); + g_unlink(filename); + g_free(filename); + + ret = g_rmdir(path); + g_assert(!ret); +} diff --git a/tests/qtest/tpm-util.h b/tests/qtest/tpm-util.h index 3b97d69017..80720afac0 100644 --- a/tests/qtest/tpm-util.h +++ b/tests/qtest/tpm-util.h @@ -53,5 +53,6 @@ void tpm_util_migration_start_qemu(QTestState **src_qemu, const char *machine_options); void tpm_util_wait_for_migration_complete(QTestState *who); +void tpm_util_rmdir(const char *path); #endif /* TESTS_TPM_UTIL_H */ diff --git a/tests/qtest/tulip-test.c b/tests/qtest/tulip-test.c index da16cbfafc..2fb6c4d5a7 100644 --- a/tests/qtest/tulip-test.c +++ b/tests/qtest/tulip-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c index 6f108a1b62..07a4c2d500 100644 --- a/tests/qtest/vhost-user-blk-test.c +++ b/tests/qtest/vhost-user-blk-test.c @@ -37,7 +37,7 @@ typedef struct QVirtioBlkReq { uint8_t status; } QVirtioBlkReq; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN static const bool host_is_big_endian = true; #else static const bool host_is_big_endian; /* false */ @@ -676,6 +676,11 @@ static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc) QVirtioPCIDevice *dev; QTestState *qts = dev1->pdev->bus->qts; + if (dev1->pdev->bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } + /* plug secondary disk */ qtest_qmp_device_add(qts, "vhost-user-blk-pci", "drv1", "{'addr': %s, 'chardev': 'char2'}", @@ -703,6 +708,11 @@ static void multiqueue(void *obj, void *data, QGuestAllocator *t_alloc) uint64_t features; uint16_t num_queues; + if (pdev1->pdev->bus->not_hotpluggable) { + g_test_skip("bus pci.0 does not support hotplug"); + return; + } + /* * The primary device has 1 queue and VIRTIO_BLK_F_MQ is not enabled. The * VIRTIO specification allows VIRTIO_BLK_F_MQ to be enabled when there is @@ -831,7 +841,8 @@ static char *create_listen_socket(int *fd) char *path; /* No race because our pid makes the path unique */ - path = g_strdup_printf("/tmp/qtest-%d-sock.XXXXXX", getpid()); + path = g_strdup_printf("%s/qtest-%d-sock.XXXXXX", + g_get_tmp_dir(), getpid()); tmp_fd = mkstemp(path); g_assert_cmpint(tmp_fd, >=, 0); close(tmp_fd); @@ -906,9 +917,9 @@ static void start_vhost_user_blk(GString *cmd_line, int vus_instances, img_path = drive_create(); g_string_append_printf(storage_daemon_command, "--blockdev driver=file,node-name=disk%d,filename=%s " - "--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s," + "--export type=vhost-user-blk,id=disk%d,addr.type=fd,addr.str=%d," "node-name=disk%i,writable=on,num-queues=%d ", - i, img_path, i, sock_path, i, num_queues); + i, img_path, i, fd, i, num_queues); g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ", i + 1, sock_path); diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index 3d6337fb5c..bf9f7c4248 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -26,11 +26,13 @@ #include "libqos/virtio-pci.h" #include "libqos/malloc-pc.h" +#include "libqos/qgraph_internal.h" #include "hw/virtio/virtio-net.h" #include "standard-headers/linux/vhost_types.h" #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_net.h" +#include "standard-headers/linux/virtio_gpio.h" #ifdef CONFIG_LINUX #include @@ -42,7 +44,7 @@ #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \ " -numa node,memdev=mem" #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s" -#define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce" +#define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on" #define HUGETLBFS_MAGIC 0x958458f6 @@ -52,9 +54,12 @@ #define VHOST_MAX_VIRTQUEUES 0x100 #define VHOST_USER_F_PROTOCOL_FEATURES 30 +#define VIRTIO_F_VERSION_1 32 + #define VHOST_USER_PROTOCOL_F_MQ 0 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6 +#define VHOST_USER_PROTOCOL_F_CONFIG 9 #define VHOST_LOG_PAGE 0x1000 @@ -78,6 +83,8 @@ typedef enum VhostUserRequest { VHOST_USER_SET_PROTOCOL_FEATURES = 16, VHOST_USER_GET_QUEUE_NUM = 17, VHOST_USER_SET_VRING_ENABLE = 18, + VHOST_USER_GET_CONFIG = 24, + VHOST_USER_SET_CONFIG = 25, VHOST_USER_MAX } VhostUserRequest; @@ -137,6 +144,7 @@ enum { enum { VHOST_USER_NET, + VHOST_USER_GPIO, }; typedef struct TestServer { @@ -168,10 +176,11 @@ struct vhost_user_ops { const char *chr_opts); /* VHOST-USER commands. */ + uint64_t (*get_features)(TestServer *s); void (*set_features)(TestServer *s, CharBackend *chr, - VhostUserMsg *msg); + VhostUserMsg *msg); void (*get_protocol_features)(TestServer *s, - CharBackend *chr, VhostUserMsg *msg); + CharBackend *chr, VhostUserMsg *msg); }; static const char *init_hugepagefs(void); @@ -194,6 +203,19 @@ static void append_vhost_net_opts(TestServer *s, GString *cmd_line, chr_opts, s->chr_name); } +/* + * For GPIO there are no other magic devices we need to add (like + * block or netdev) so all we need to worry about is the vhost-user + * chardev socket. + */ +static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line, + const char *chr_opts) +{ + g_string_append_printf(cmd_line, QEMU_CMD_CHR, + s->chr_name, s->socket_path, + chr_opts); +} + static void append_mem_opts(TestServer *server, GString *cmd_line, int size, enum test_memfd memfd) { @@ -302,6 +324,7 @@ static int chr_can_read(void *opaque) static void chr_read(void *opaque, const uint8_t *buf, int size) { + g_autoptr(GError) err = NULL; TestServer *s = opaque; CharBackend *chr = &s->chr; VhostUserMsg msg; @@ -315,7 +338,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) } if (size != VHOST_USER_HDR_SIZE) { - g_test_message("Wrong message size received %d", size); + qos_printf("%s: Wrong message size received %d\n", __func__, size); return; } @@ -326,28 +349,30 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) p += VHOST_USER_HDR_SIZE; size = qemu_chr_fe_read_all(chr, p, msg.size); if (size != msg.size) { - g_test_message("Wrong message size received %d != %d", - size, msg.size); + qos_printf("%s: Wrong message size received %d != %d\n", + __func__, size, msg.size); return; } } switch (msg.request) { case VHOST_USER_GET_FEATURES: + /* Mandatory for tests to define get_features */ + g_assert(s->vu_ops->get_features); + /* send back features to qemu */ msg.flags |= VHOST_USER_REPLY_MASK; msg.size = sizeof(m.payload.u64); - msg.payload.u64 = 0x1ULL << VHOST_F_LOG_ALL | - 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; - if (s->queues > 1) { - msg.payload.u64 |= 0x1ULL << VIRTIO_NET_F_MQ; - } + if (s->test_flags >= TEST_FLAGS_BAD) { msg.payload.u64 = 0; s->test_flags = TEST_FLAGS_END; + } else { + msg.payload.u64 = s->vu_ops->get_features(s); } - p = (uint8_t *) &msg; - qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); + + qemu_chr_fe_write_all(chr, (uint8_t *) &msg, + VHOST_USER_HDR_SIZE + msg.size); break; case VHOST_USER_SET_FEATURES: @@ -356,12 +381,55 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) } break; + case VHOST_USER_SET_OWNER: + /* + * We don't need to do anything here, the remote is just + * letting us know it is in charge. Just log it. + */ + qos_printf("set_owner: start of session\n"); + break; + case VHOST_USER_GET_PROTOCOL_FEATURES: if (s->vu_ops->get_protocol_features) { s->vu_ops->get_protocol_features(s, chr, &msg); } break; + case VHOST_USER_GET_CONFIG: + /* + * Treat GET_CONFIG as a NOP and just reply and let the guest + * consider we have updated its memory. Tests currently don't + * require working configs. + */ + msg.flags |= VHOST_USER_REPLY_MASK; + p = (uint8_t *) &msg; + qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); + break; + + case VHOST_USER_SET_PROTOCOL_FEATURES: + /* + * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for + * the remote end to send this. There is no handshake reply so + * just log the details for debugging. + */ + qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64); + break; + + /* + * A real vhost-user backend would actually set the size and + * address of the vrings but we can simply report them. + */ + case VHOST_USER_SET_VRING_NUM: + qos_printf("set_vring_num: %d/%d\n", + msg.payload.state.index, msg.payload.state.num); + break; + case VHOST_USER_SET_VRING_ADDR: + qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n", + msg.payload.addr.avail_user_addr, + msg.payload.addr.desc_user_addr, + msg.payload.addr.used_user_addr); + break; + case VHOST_USER_GET_VRING_BASE: /* send back vring base to qemu */ msg.flags |= VHOST_USER_REPLY_MASK; @@ -394,7 +462,8 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * The receive function forces it to be blocking, * so revert it back to non-blocking. */ - qemu_set_nonblock(fd); + g_unix_set_fd_nonblocking(fd, true, &err); + g_assert_no_error(err); break; case VHOST_USER_SET_LOG_BASE: @@ -425,7 +494,18 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); break; + case VHOST_USER_SET_VRING_ENABLE: + /* + * Another case we ignore as we don't need to respond. With a + * fully functioning vhost-user we would enable/disable the + * vring monitoring. + */ + qos_printf("set_vring(%d)=%s\n", msg.payload.state.index, + msg.payload.state.num ? "enabled" : "disabled"); + break; + default: + qos_printf("vhost-user: un-handled message: %d\n", msg.request); break; } @@ -448,7 +528,7 @@ static const char *init_hugepagefs(void) } if (access(path, R_OK | W_OK | X_OK)) { - g_test_message("access on path (%s): %s", path, strerror(errno)); + qos_printf("access on path (%s): %s", path, strerror(errno)); g_test_fail(); return NULL; } @@ -458,13 +538,13 @@ static const char *init_hugepagefs(void) } while (ret != 0 && errno == EINTR); if (ret != 0) { - g_test_message("statfs on path (%s): %s", path, strerror(errno)); + qos_printf("statfs on path (%s): %s", path, strerror(errno)); g_test_fail(); return NULL; } if (fs.f_type != HUGETLBFS_MAGIC) { - g_test_message("Warning: path not on HugeTLBFS: %s", path); + qos_printf("Warning: path not on HugeTLBFS: %s", path); g_test_fail(); return NULL; } @@ -480,8 +560,8 @@ static TestServer *test_server_new(const gchar *name, struct vhost_user_ops *ops) { TestServer *server = g_new0(TestServer, 1); - char template[] = "/tmp/vhost-test-XXXXXX"; - const char *tmpfs; + g_autofree const char *tmpfs = NULL; + GError *err = NULL; server->context = g_main_context_new(); server->loop = g_main_loop_new(server->context, FALSE); @@ -489,9 +569,11 @@ static TestServer *test_server_new(const gchar *name, /* run the main loop thread so the chardev may operate */ server->thread = g_thread_new(NULL, thread_function, server->loop); - tmpfs = mkdtemp(template); + tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err); if (!tmpfs) { - g_test_message("mkdtemp on path (%s): %s", template, strerror(errno)); + g_test_message("Can't create temporary directory in %s: %s", + g_get_tmp_dir(), err->message); + g_error_free(err); } g_assert(tmpfs); @@ -522,14 +604,13 @@ static void chr_event(void *opaque, QEMUChrEvent event) static void test_server_create_chr(TestServer *server, const gchar *opt) { - gchar *chr_path; + g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s", + server->socket_path, opt); Chardev *chr; - chr_path = g_strdup_printf("unix:%s%s", server->socket_path, opt); chr = qemu_chr_new(server->chr_name, chr_path, server->context); - g_free(chr_path); + g_assert(chr); - g_assert_nonnull(chr); qemu_chr_fe_init(&server->chr, chr, &error_abort); qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read, chr_event, NULL, server, server->context, true); @@ -935,11 +1016,23 @@ static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc) wait_for_rings_started(s, s->queues * 2); } -static void vu_net_set_features(TestServer *s, CharBackend *chr, - VhostUserMsg *msg) + +static uint64_t vu_net_get_features(TestServer *s) { - g_assert_cmpint(msg->payload.u64 & - (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES), !=, 0ULL); + uint64_t features = 0x1ULL << VHOST_F_LOG_ALL | + 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; + + if (s->queues > 1) { + features |= 0x1ULL << VIRTIO_NET_F_MQ; + } + + return features; +} + +static void vu_net_set_features(TestServer *s, CharBackend *chr, + VhostUserMsg *msg) +{ + g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES)); if (s->test_flags == TEST_FLAGS_DISCONNECT) { qemu_chr_fe_disconnect(chr); s->test_flags = TEST_FLAGS_BAD; @@ -966,6 +1059,7 @@ static struct vhost_user_ops g_vu_net_ops = { .append_opts = append_vhost_net_opts, + .get_features = vu_net_get_features, .set_features = vu_net_set_features, .get_protocol_features = vu_net_get_protocol_features, }; @@ -995,20 +1089,17 @@ static void register_vhost_user_test(void) "virtio-net", test_migrate, &opts); - /* keeps failing on build-system since Aug 15 2017 */ - if (getenv("QTEST_VHOST_USER_FIXME")) { - opts.before = vhost_user_test_setup_reconnect; - qos_add_test("vhost-user/reconnect", "virtio-net", - test_reconnect, &opts); + opts.before = vhost_user_test_setup_reconnect; + qos_add_test("vhost-user/reconnect", "virtio-net", + test_reconnect, &opts); - opts.before = vhost_user_test_setup_connect_fail; - qos_add_test("vhost-user/connect-fail", "virtio-net", - test_vhost_user_started, &opts); + opts.before = vhost_user_test_setup_connect_fail; + qos_add_test("vhost-user/connect-fail", "virtio-net", + test_vhost_user_started, &opts); - opts.before = vhost_user_test_setup_flags_mismatch; - qos_add_test("vhost-user/flags-mismatch", "virtio-net", - test_vhost_user_started, &opts); - } + opts.before = vhost_user_test_setup_flags_mismatch; + qos_add_test("vhost-user/flags-mismatch", "virtio-net", + test_vhost_user_started, &opts); opts.before = vhost_user_test_setup_multiqueue; opts.edge.extra_device_opts = "mq=on"; @@ -1017,3 +1108,51 @@ static void register_vhost_user_test(void) test_multiqueue, &opts); } libqos_init(register_vhost_user_test); + +static uint64_t vu_gpio_get_features(TestServer *s) +{ + return 0x1ULL << VIRTIO_F_VERSION_1 | + 0x1ULL << VIRTIO_GPIO_F_IRQ | + 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; +} + +/* + * This stub can't handle all the message types but we should reply + * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it + * talking to a read vhost-user daemon. + */ +static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr, + VhostUserMsg *msg) +{ + /* send back features to qemu */ + msg->flags |= VHOST_USER_REPLY_MASK; + msg->size = sizeof(m.payload.u64); + msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; + + qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); +} + +static struct vhost_user_ops g_vu_gpio_ops = { + .type = VHOST_USER_GPIO, + + .append_opts = append_vhost_gpio_opts, + + .get_features = vu_gpio_get_features, + .set_features = vu_net_set_features, + .get_protocol_features = vu_gpio_get_protocol_features, +}; + +static void register_vhost_gpio_test(void) +{ + QOSGraphTestOptions opts = { + .before = vhost_user_test_setup, + .subprocess = true, + .arg = &g_vu_gpio_ops, + }; + + qemu_add_opts(&qemu_chardev_opts); + + qos_add_test("read-guest-mem/memfile", + "vhost-user-gpio", test_read_guest_mem, &opts); +} +libqos_init(register_vhost_gpio_test); diff --git a/tests/qtest/virtio-9p-test.c b/tests/qtest/virtio-9p-test.c index 41fed41de1..65e69491e5 100644 --- a/tests/qtest/virtio-9p-test.c +++ b/tests/qtest/virtio-9p-test.c @@ -13,78 +13,29 @@ */ #include "qemu/osdep.h" -#include "libqtest-single.h" #include "qemu/module.h" -#include "hw/9pfs/9p.h" -#include "hw/9pfs/9p-synth.h" -#include "libqos/virtio-9p.h" -#include "libqos/qgraph.h" +#include "libqos/virtio-9p-client.h" -#define QVIRTIO_9P_TIMEOUT_US (10 * 1000 * 1000) -static QGuestAllocator *alloc; - -/* - * Used to auto generate new fids. Start with arbitrary high value to avoid - * collision with hard coded fids in basic test code. - */ -static uint32_t fid_generator = 1000; - -static uint32_t genfid(void) -{ - return fid_generator++; -} - -/** - * Splits the @a in string by @a delim into individual (non empty) strings - * and outputs them to @a out. The output array @a out is NULL terminated. - * - * Output array @a out must be freed by calling split_free(). - * - * @returns number of individual elements in output array @a out (without the - * final NULL terminating element) - */ -static int split(const char *in, const char *delim, char ***out) -{ - int n = 0, i = 0; - char *tmp, *p; - - tmp = g_strdup(in); - for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) { - if (strlen(p) > 0) { - ++n; - } - } - g_free(tmp); - - *out = g_new0(char *, n + 1); /* last element NULL delimiter */ - - tmp = g_strdup(in); - for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) { - if (strlen(p) > 0) { - (*out)[i++] = g_strdup(p); - } - } - g_free(tmp); - - return n; -} - -static void split_free(char ***out) -{ - int i; - for (i = 0; (*out)[i]; ++i) { - g_free((*out)[i]); - } - g_free(*out); - *out = NULL; -} +#define twalk(...) v9fs_twalk((TWalkOpt) __VA_ARGS__) +#define tversion(...) v9fs_tversion((TVersionOpt) __VA_ARGS__) +#define tattach(...) v9fs_tattach((TAttachOpt) __VA_ARGS__) +#define tgetattr(...) v9fs_tgetattr((TGetAttrOpt) __VA_ARGS__) +#define treaddir(...) v9fs_treaddir((TReadDirOpt) __VA_ARGS__) +#define tlopen(...) v9fs_tlopen((TLOpenOpt) __VA_ARGS__) +#define twrite(...) v9fs_twrite((TWriteOpt) __VA_ARGS__) +#define tflush(...) v9fs_tflush((TFlushOpt) __VA_ARGS__) +#define tmkdir(...) v9fs_tmkdir((TMkdirOpt) __VA_ARGS__) +#define tlcreate(...) v9fs_tlcreate((TlcreateOpt) __VA_ARGS__) +#define tsymlink(...) v9fs_tsymlink((TsymlinkOpt) __VA_ARGS__) +#define tlink(...) v9fs_tlink((TlinkOpt) __VA_ARGS__) +#define tunlinkat(...) v9fs_tunlinkat((TunlinkatOpt) __VA_ARGS__) static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); size_t tag_len = qvirtio_config_readw(v9p->vdev, 0); - char *tag; + g_autofree char *tag = NULL; int i; g_assert_cmpint(tag_len, ==, strlen(MOUNT_TAG)); @@ -94,569 +45,51 @@ static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc) tag[i] = qvirtio_config_readb(v9p->vdev, i + 2); } g_assert_cmpmem(tag, tag_len, MOUNT_TAG, tag_len); - g_free(tag); } -#define P9_MAX_SIZE 4096 /* Max size of a T-message or R-message */ - -typedef struct { - QTestState *qts; - QVirtio9P *v9p; - uint16_t tag; - uint64_t t_msg; - uint32_t t_size; - uint64_t r_msg; - /* No r_size, it is hardcoded to P9_MAX_SIZE */ - size_t t_off; - size_t r_off; - uint32_t free_head; -} P9Req; - -static void v9fs_memwrite(P9Req *req, const void *addr, size_t len) +static inline bool is_same_qid(v9fs_qid a, v9fs_qid b) { - qtest_memwrite(req->qts, req->t_msg + req->t_off, addr, len); - req->t_off += len; -} - -static void v9fs_memskip(P9Req *req, size_t len) -{ - req->r_off += len; -} - -static void v9fs_memread(P9Req *req, void *addr, size_t len) -{ - qtest_memread(req->qts, req->r_msg + req->r_off, addr, len); - req->r_off += len; -} - -static void v9fs_uint8_read(P9Req *req, uint8_t *val) -{ - v9fs_memread(req, val, 1); -} - -static void v9fs_uint16_write(P9Req *req, uint16_t val) -{ - uint16_t le_val = cpu_to_le16(val); - - v9fs_memwrite(req, &le_val, 2); -} - -static void v9fs_uint16_read(P9Req *req, uint16_t *val) -{ - v9fs_memread(req, val, 2); - le16_to_cpus(val); -} - -static void v9fs_uint32_write(P9Req *req, uint32_t val) -{ - uint32_t le_val = cpu_to_le32(val); - - v9fs_memwrite(req, &le_val, 4); -} - -static void v9fs_uint64_write(P9Req *req, uint64_t val) -{ - uint64_t le_val = cpu_to_le64(val); - - v9fs_memwrite(req, &le_val, 8); -} - -static void v9fs_uint32_read(P9Req *req, uint32_t *val) -{ - v9fs_memread(req, val, 4); - le32_to_cpus(val); -} - -static void v9fs_uint64_read(P9Req *req, uint64_t *val) -{ - v9fs_memread(req, val, 8); - le64_to_cpus(val); -} - -/* len[2] string[len] */ -static uint16_t v9fs_string_size(const char *string) -{ - size_t len = strlen(string); - - g_assert_cmpint(len, <=, UINT16_MAX - 2); - - return 2 + len; -} - -static void v9fs_string_write(P9Req *req, const char *string) -{ - int len = strlen(string); - - g_assert_cmpint(len, <=, UINT16_MAX); - - v9fs_uint16_write(req, (uint16_t) len); - v9fs_memwrite(req, string, len); -} - -static void v9fs_string_read(P9Req *req, uint16_t *len, char **string) -{ - uint16_t local_len; - - v9fs_uint16_read(req, &local_len); - if (len) { - *len = local_len; - } - if (string) { - *string = g_malloc(local_len + 1); - v9fs_memread(req, *string, local_len); - (*string)[local_len] = 0; - } else { - v9fs_memskip(req, local_len); - } -} - - typedef struct { - uint32_t size; - uint8_t id; - uint16_t tag; -} QEMU_PACKED P9Hdr; - -static P9Req *v9fs_req_init(QVirtio9P *v9p, uint32_t size, uint8_t id, - uint16_t tag) -{ - P9Req *req = g_new0(P9Req, 1); - uint32_t total_size = 7; /* 9P header has well-known size of 7 bytes */ - P9Hdr hdr = { - .id = id, - .tag = cpu_to_le16(tag) - }; - - g_assert_cmpint(total_size, <=, UINT32_MAX - size); - total_size += size; - hdr.size = cpu_to_le32(total_size); - - g_assert_cmpint(total_size, <=, P9_MAX_SIZE); - - req->qts = global_qtest; - req->v9p = v9p; - req->t_size = total_size; - req->t_msg = guest_alloc(alloc, req->t_size); - v9fs_memwrite(req, &hdr, 7); - req->tag = tag; - return req; -} - -static void v9fs_req_send(P9Req *req) -{ - QVirtio9P *v9p = req->v9p; - - req->r_msg = guest_alloc(alloc, P9_MAX_SIZE); - req->free_head = qvirtqueue_add(req->qts, v9p->vq, req->t_msg, req->t_size, - false, true); - qvirtqueue_add(req->qts, v9p->vq, req->r_msg, P9_MAX_SIZE, true, false); - qvirtqueue_kick(req->qts, v9p->vdev, v9p->vq, req->free_head); - req->t_off = 0; -} - -static const char *rmessage_name(uint8_t id) -{ - return - id == P9_RLERROR ? "RLERROR" : - id == P9_RVERSION ? "RVERSION" : - id == P9_RATTACH ? "RATTACH" : - id == P9_RWALK ? "RWALK" : - id == P9_RLOPEN ? "RLOPEN" : - id == P9_RWRITE ? "RWRITE" : - id == P9_RMKDIR ? "RMKDIR" : - id == P9_RLCREATE ? "RLCREATE" : - id == P9_RSYMLINK ? "RSYMLINK" : - id == P9_RLINK ? "RLINK" : - id == P9_RUNLINKAT ? "RUNLINKAT" : - id == P9_RFLUSH ? "RFLUSH" : - id == P9_RREADDIR ? "READDIR" : - ""; -} - -static void v9fs_req_wait_for_reply(P9Req *req, uint32_t *len) -{ - QVirtio9P *v9p = req->v9p; - - qvirtio_wait_used_elem(req->qts, v9p->vdev, v9p->vq, req->free_head, len, - QVIRTIO_9P_TIMEOUT_US); -} - -static void v9fs_req_recv(P9Req *req, uint8_t id) -{ - P9Hdr hdr; - - v9fs_memread(req, &hdr, 7); - hdr.size = ldl_le_p(&hdr.size); - hdr.tag = lduw_le_p(&hdr.tag); - - g_assert_cmpint(hdr.size, >=, 7); - g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE); - g_assert_cmpint(hdr.tag, ==, req->tag); - - if (hdr.id != id) { - g_printerr("Received response %d (%s) instead of %d (%s)\n", - hdr.id, rmessage_name(hdr.id), id, rmessage_name(id)); - - if (hdr.id == P9_RLERROR) { - uint32_t err; - v9fs_uint32_read(req, &err); - g_printerr("Rlerror has errno %d (%s)\n", err, strerror(err)); - } - } - g_assert_cmpint(hdr.id, ==, id); -} - -static void v9fs_req_free(P9Req *req) -{ - guest_free(alloc, req->t_msg); - guest_free(alloc, req->r_msg); - g_free(req); -} - -/* size[4] Rlerror tag[2] ecode[4] */ -static void v9fs_rlerror(P9Req *req, uint32_t *err) -{ - v9fs_req_recv(req, P9_RLERROR); - v9fs_uint32_read(req, err); - v9fs_req_free(req); -} - -/* size[4] Tversion tag[2] msize[4] version[s] */ -static P9Req *v9fs_tversion(QVirtio9P *v9p, uint32_t msize, const char *version, - uint16_t tag) -{ - P9Req *req; - uint32_t body_size = 4; - uint16_t string_size = v9fs_string_size(version); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - req = v9fs_req_init(v9p, body_size, P9_TVERSION, tag); - - v9fs_uint32_write(req, msize); - v9fs_string_write(req, version); - v9fs_req_send(req); - return req; -} - -/* size[4] Rversion tag[2] msize[4] version[s] */ -static void v9fs_rversion(P9Req *req, uint16_t *len, char **version) -{ - uint32_t msize; - - v9fs_req_recv(req, P9_RVERSION); - v9fs_uint32_read(req, &msize); - - g_assert_cmpint(msize, ==, P9_MAX_SIZE); - - if (len || version) { - v9fs_string_read(req, len, version); - } - - v9fs_req_free(req); -} - -/* size[4] Tattach tag[2] fid[4] afid[4] uname[s] aname[s] n_uname[4] */ -static P9Req *v9fs_tattach(QVirtio9P *v9p, uint32_t fid, uint32_t n_uname, - uint16_t tag) -{ - const char *uname = ""; /* ignored by QEMU */ - const char *aname = ""; /* ignored by QEMU */ - P9Req *req = v9fs_req_init(v9p, 4 + 4 + 2 + 2 + 4, P9_TATTACH, tag); - - v9fs_uint32_write(req, fid); - v9fs_uint32_write(req, P9_NOFID); - v9fs_string_write(req, uname); - v9fs_string_write(req, aname); - v9fs_uint32_write(req, n_uname); - v9fs_req_send(req); - return req; -} - -typedef char v9fs_qid[13]; - -/* size[4] Rattach tag[2] qid[13] */ -static void v9fs_rattach(P9Req *req, v9fs_qid *qid) -{ - v9fs_req_recv(req, P9_RATTACH); - if (qid) { - v9fs_memread(req, qid, 13); - } - v9fs_req_free(req); -} - -/* size[4] Twalk tag[2] fid[4] newfid[4] nwname[2] nwname*(wname[s]) */ -static P9Req *v9fs_twalk(QVirtio9P *v9p, uint32_t fid, uint32_t newfid, - uint16_t nwname, char *const wnames[], uint16_t tag) -{ - P9Req *req; - int i; - uint32_t body_size = 4 + 4 + 2; - - for (i = 0; i < nwname; i++) { - uint16_t wname_size = v9fs_string_size(wnames[i]); - - g_assert_cmpint(body_size, <=, UINT32_MAX - wname_size); - body_size += wname_size; - } - req = v9fs_req_init(v9p, body_size, P9_TWALK, tag); - v9fs_uint32_write(req, fid); - v9fs_uint32_write(req, newfid); - v9fs_uint16_write(req, nwname); - for (i = 0; i < nwname; i++) { - v9fs_string_write(req, wnames[i]); - } - v9fs_req_send(req); - return req; -} - -/* size[4] Rwalk tag[2] nwqid[2] nwqid*(wqid[13]) */ -static void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid) -{ - uint16_t local_nwqid; - - v9fs_req_recv(req, P9_RWALK); - v9fs_uint16_read(req, &local_nwqid); - if (nwqid) { - *nwqid = local_nwqid; - } - if (wqid) { - *wqid = g_malloc(local_nwqid * 13); - v9fs_memread(req, *wqid, local_nwqid * 13); - } - v9fs_req_free(req); -} - -/* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */ -static P9Req *v9fs_treaddir(QVirtio9P *v9p, uint32_t fid, uint64_t offset, - uint32_t count, uint16_t tag) -{ - P9Req *req; - - req = v9fs_req_init(v9p, 4 + 8 + 4, P9_TREADDIR, tag); - v9fs_uint32_write(req, fid); - v9fs_uint64_write(req, offset); - v9fs_uint32_write(req, count); - v9fs_req_send(req); - return req; -} - -struct V9fsDirent { - v9fs_qid qid; - uint64_t offset; - uint8_t type; - char *name; - struct V9fsDirent *next; -}; - -/* size[4] Rreaddir tag[2] count[4] data[count] */ -static void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, - struct V9fsDirent **entries) -{ - uint32_t local_count; - struct V9fsDirent *e = NULL; - uint16_t slen; - uint32_t n = 0; - - v9fs_req_recv(req, P9_RREADDIR); - v9fs_uint32_read(req, &local_count); - - if (count) { - *count = local_count; - } - - for (int32_t togo = (int32_t)local_count; - togo >= 13 + 8 + 1 + 2; - togo -= 13 + 8 + 1 + 2 + slen, ++n) - { - if (!e) { - e = g_malloc(sizeof(struct V9fsDirent)); - if (entries) { - *entries = e; - } - } else { - e = e->next = g_malloc(sizeof(struct V9fsDirent)); - } - e->next = NULL; - /* qid[13] offset[8] type[1] name[s] */ - v9fs_memread(req, &e->qid, 13); - v9fs_uint64_read(req, &e->offset); - v9fs_uint8_read(req, &e->type); - v9fs_string_read(req, &slen, &e->name); - } - - if (nentries) { - *nentries = n; - } - - v9fs_req_free(req); -} - -static void v9fs_free_dirents(struct V9fsDirent *e) -{ - struct V9fsDirent *next = NULL; - - for (; e; e = next) { - next = e->next; - g_free(e->name); - g_free(e); - } -} - -/* size[4] Tlopen tag[2] fid[4] flags[4] */ -static P9Req *v9fs_tlopen(QVirtio9P *v9p, uint32_t fid, uint32_t flags, - uint16_t tag) -{ - P9Req *req; - - req = v9fs_req_init(v9p, 4 + 4, P9_TLOPEN, tag); - v9fs_uint32_write(req, fid); - v9fs_uint32_write(req, flags); - v9fs_req_send(req); - return req; -} - -/* size[4] Rlopen tag[2] qid[13] iounit[4] */ -static void v9fs_rlopen(P9Req *req, v9fs_qid *qid, uint32_t *iounit) -{ - v9fs_req_recv(req, P9_RLOPEN); - if (qid) { - v9fs_memread(req, qid, 13); - } else { - v9fs_memskip(req, 13); - } - if (iounit) { - v9fs_uint32_read(req, iounit); - } - v9fs_req_free(req); -} - -/* size[4] Twrite tag[2] fid[4] offset[8] count[4] data[count] */ -static P9Req *v9fs_twrite(QVirtio9P *v9p, uint32_t fid, uint64_t offset, - uint32_t count, const void *data, uint16_t tag) -{ - P9Req *req; - uint32_t body_size = 4 + 8 + 4; - - g_assert_cmpint(body_size, <=, UINT32_MAX - count); - body_size += count; - req = v9fs_req_init(v9p, body_size, P9_TWRITE, tag); - v9fs_uint32_write(req, fid); - v9fs_uint64_write(req, offset); - v9fs_uint32_write(req, count); - v9fs_memwrite(req, data, count); - v9fs_req_send(req); - return req; -} - -/* size[4] Rwrite tag[2] count[4] */ -static void v9fs_rwrite(P9Req *req, uint32_t *count) -{ - v9fs_req_recv(req, P9_RWRITE); - if (count) { - v9fs_uint32_read(req, count); - } - v9fs_req_free(req); -} - -/* size[4] Tflush tag[2] oldtag[2] */ -static P9Req *v9fs_tflush(QVirtio9P *v9p, uint16_t oldtag, uint16_t tag) -{ - P9Req *req; - - req = v9fs_req_init(v9p, 2, P9_TFLUSH, tag); - v9fs_uint32_write(req, oldtag); - v9fs_req_send(req); - return req; -} - -/* size[4] Rflush tag[2] */ -static void v9fs_rflush(P9Req *req) -{ - v9fs_req_recv(req, P9_RFLUSH); - v9fs_req_free(req); -} - -static void do_version(QVirtio9P *v9p) -{ - const char *version = "9P2000.L"; - uint16_t server_len; - char *server_version; - P9Req *req; - - req = v9fs_tversion(v9p, P9_MAX_SIZE, version, P9_NOTAG); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rversion(req, &server_len, &server_version); - - g_assert_cmpmem(server_version, server_len, version, strlen(version)); - - g_free(server_version); -} - -/* utility function: walk to requested dir and return fid for that dir */ -static uint32_t do_walk(QVirtio9P *v9p, const char *path) -{ - char **wnames; - P9Req *req; - const uint32_t fid = genfid(); - - int nwnames = split(path, "/", &wnames); - - req = v9fs_twalk(v9p, 0, fid, nwnames, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, NULL); - - split_free(&wnames); - return fid; + /* don't compare QID version for checking for file ID equalness */ + return a[0] == b[0] && memcmp(&a[5], &b[5], 8) == 0; } static void fs_version(void *obj, void *data, QGuestAllocator *t_alloc) { - alloc = t_alloc; - do_version(obj); -} - -static void do_attach(QVirtio9P *v9p) -{ - P9Req *req; - - do_version(v9p); - req = v9fs_tattach(v9p, 0, getuid(), 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rattach(req, NULL); + v9fs_set_allocator(t_alloc); + tversion({ .client = obj }); } static void fs_attach(void *obj, void *data, QGuestAllocator *t_alloc) { - alloc = t_alloc; - do_attach(obj); + v9fs_set_allocator(t_alloc); + tattach({ .client = obj }); } static void fs_walk(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); char *wnames[P9_MAXWELEM]; uint16_t nwqid; - v9fs_qid *wqid; + g_autofree v9fs_qid *wqid = NULL; int i; - P9Req *req; for (i = 0; i < P9_MAXWELEM; i++) { wnames[i] = g_strdup_printf(QTEST_V9FS_SYNTH_WALK_FILE, i); } - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, P9_MAXWELEM, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, &nwqid, &wqid); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, + .nwname = P9_MAXWELEM, .wnames = wnames, + .rwalk = { .nwqid = &nwqid, .wqid = &wqid } + }); g_assert_cmpint(nwqid, ==, P9_MAXWELEM); for (i = 0; i < P9_MAXWELEM; i++) { g_free(wnames[i]); } - - g_free(wqid); } static bool fs_dirents_contain_name(struct V9fsDirent *e, const char* name) @@ -669,192 +102,37 @@ static bool fs_dirents_contain_name(struct V9fsDirent *e, const char* name) return false; } -/* size[4] Tmkdir tag[2] dfid[4] name[s] mode[4] gid[4] */ -static P9Req *v9fs_tmkdir(QVirtio9P *v9p, uint32_t dfid, const char *name, - uint32_t mode, uint32_t gid, uint16_t tag) -{ - P9Req *req; - - uint32_t body_size = 4 + 4 + 4; - uint16_t string_size = v9fs_string_size(name); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - - req = v9fs_req_init(v9p, body_size, P9_TMKDIR, tag); - v9fs_uint32_write(req, dfid); - v9fs_string_write(req, name); - v9fs_uint32_write(req, mode); - v9fs_uint32_write(req, gid); - v9fs_req_send(req); - return req; -} - -/* size[4] Rmkdir tag[2] qid[13] */ -static void v9fs_rmkdir(P9Req *req, v9fs_qid *qid) -{ - v9fs_req_recv(req, P9_RMKDIR); - if (qid) { - v9fs_memread(req, qid, 13); - } else { - v9fs_memskip(req, 13); - } - v9fs_req_free(req); -} - -/* size[4] Tlcreate tag[2] fid[4] name[s] flags[4] mode[4] gid[4] */ -static P9Req *v9fs_tlcreate(QVirtio9P *v9p, uint32_t fid, const char *name, - uint32_t flags, uint32_t mode, uint32_t gid, - uint16_t tag) -{ - P9Req *req; - - uint32_t body_size = 4 + 4 + 4 + 4; - uint16_t string_size = v9fs_string_size(name); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - - req = v9fs_req_init(v9p, body_size, P9_TLCREATE, tag); - v9fs_uint32_write(req, fid); - v9fs_string_write(req, name); - v9fs_uint32_write(req, flags); - v9fs_uint32_write(req, mode); - v9fs_uint32_write(req, gid); - v9fs_req_send(req); - return req; -} - -/* size[4] Rlcreate tag[2] qid[13] iounit[4] */ -static void v9fs_rlcreate(P9Req *req, v9fs_qid *qid, uint32_t *iounit) -{ - v9fs_req_recv(req, P9_RLCREATE); - if (qid) { - v9fs_memread(req, qid, 13); - } else { - v9fs_memskip(req, 13); - } - if (iounit) { - v9fs_uint32_read(req, iounit); - } - v9fs_req_free(req); -} - -/* size[4] Tsymlink tag[2] fid[4] name[s] symtgt[s] gid[4] */ -static P9Req *v9fs_tsymlink(QVirtio9P *v9p, uint32_t fid, const char *name, - const char *symtgt, uint32_t gid, uint16_t tag) -{ - P9Req *req; - - uint32_t body_size = 4 + 4; - uint16_t string_size = v9fs_string_size(name) + v9fs_string_size(symtgt); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - - req = v9fs_req_init(v9p, body_size, P9_TSYMLINK, tag); - v9fs_uint32_write(req, fid); - v9fs_string_write(req, name); - v9fs_string_write(req, symtgt); - v9fs_uint32_write(req, gid); - v9fs_req_send(req); - return req; -} - -/* size[4] Rsymlink tag[2] qid[13] */ -static void v9fs_rsymlink(P9Req *req, v9fs_qid *qid) -{ - v9fs_req_recv(req, P9_RSYMLINK); - if (qid) { - v9fs_memread(req, qid, 13); - } else { - v9fs_memskip(req, 13); - } - v9fs_req_free(req); -} - -/* size[4] Tlink tag[2] dfid[4] fid[4] name[s] */ -static P9Req *v9fs_tlink(QVirtio9P *v9p, uint32_t dfid, uint32_t fid, - const char *name, uint16_t tag) -{ - P9Req *req; - - uint32_t body_size = 4 + 4; - uint16_t string_size = v9fs_string_size(name); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - - req = v9fs_req_init(v9p, body_size, P9_TLINK, tag); - v9fs_uint32_write(req, dfid); - v9fs_uint32_write(req, fid); - v9fs_string_write(req, name); - v9fs_req_send(req); - return req; -} - -/* size[4] Rlink tag[2] */ -static void v9fs_rlink(P9Req *req) -{ - v9fs_req_recv(req, P9_RLINK); - v9fs_req_free(req); -} - -/* size[4] Tunlinkat tag[2] dirfd[4] name[s] flags[4] */ -static P9Req *v9fs_tunlinkat(QVirtio9P *v9p, uint32_t dirfd, const char *name, - uint32_t flags, uint16_t tag) -{ - P9Req *req; - - uint32_t body_size = 4 + 4; - uint16_t string_size = v9fs_string_size(name); - - g_assert_cmpint(body_size, <=, UINT32_MAX - string_size); - body_size += string_size; - - req = v9fs_req_init(v9p, body_size, P9_TUNLINKAT, tag); - v9fs_uint32_write(req, dirfd); - v9fs_string_write(req, name); - v9fs_uint32_write(req, flags); - v9fs_req_send(req); - return req; -} - -/* size[4] Runlinkat tag[2] */ -static void v9fs_runlinkat(P9Req *req) -{ - v9fs_req_recv(req, P9_RUNLINKAT); - v9fs_req_free(req); -} - /* basic readdir test where reply fits into a single response message */ static void fs_readdir(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_READDIR_DIR) }; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_READDIR_DIR) }; uint16_t nqid; v9fs_qid qid; uint32_t count, nentries; struct V9fsDirent *entries = NULL; - P9Req *req; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, &nqid, NULL); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, + .nwname = 1, .wnames = wnames, .rwalk.nwqid = &nqid + }); g_assert_cmpint(nqid, ==, 1); - req = v9fs_tlopen(v9p, 1, O_DIRECTORY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, &qid, NULL); + tlopen({ + .client = v9p, .fid = 1, .flags = O_DIRECTORY, .rlopen.qid = &qid + }); /* * submit count = msize - 11, because 11 is the header size of Rreaddir */ - req = v9fs_treaddir(v9p, 1, 0, P9_MAX_SIZE - 11, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rreaddir(req, &count, &nentries, &entries); + treaddir({ + .client = v9p, .fid = 1, .offset = 0, .count = P9_MAX_SIZE - 11, + .rreaddir = { + .count = &count, .nentries = &nentries, .entries = &entries + } + }); /* * Assuming msize (P9_MAX_SIZE) is large enough so we can retrieve all @@ -872,9 +150,9 @@ static void fs_readdir(void *obj, void *data, QGuestAllocator *t_alloc) g_assert_cmpint(fs_dirents_contain_name(entries, "."), ==, true); g_assert_cmpint(fs_dirents_contain_name(entries, ".."), ==, true); for (int i = 0; i < QTEST_V9FS_SYNTH_READDIR_NFILES; ++i) { - char *name = g_strdup_printf(QTEST_V9FS_SYNTH_READDIR_FILE, i); + g_autofree char *name = + g_strdup_printf(QTEST_V9FS_SYNTH_READDIR_FILE, i); g_assert_cmpint(fs_dirents_contain_name(entries, name), ==, true); - g_free(name); } v9fs_free_dirents(entries); @@ -884,16 +162,15 @@ static void fs_readdir(void *obj, void *data, QGuestAllocator *t_alloc) /* readdir test where overall request is split over several messages */ static void do_readdir_split(QVirtio9P *v9p, uint32_t count) { - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_READDIR_DIR) }; + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_READDIR_DIR) }; uint16_t nqid; v9fs_qid qid; uint32_t nentries, npartialentries; struct V9fsDirent *entries, *tail, *partialentries; - P9Req *req; int fid; uint64_t offset; - do_attach(v9p); + tattach({ .client = v9p }); fid = 1; offset = 0; @@ -901,14 +178,15 @@ static void do_readdir_split(QVirtio9P *v9p, uint32_t count) nentries = 0; tail = NULL; - req = v9fs_twalk(v9p, 0, fid, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, &nqid, NULL); + twalk({ + .client = v9p, .fid = 0, .newfid = fid, + .nwname = 1, .wnames = wnames, .rwalk.nwqid = &nqid + }); g_assert_cmpint(nqid, ==, 1); - req = v9fs_tlopen(v9p, fid, O_DIRECTORY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, &qid, NULL); + tlopen({ + .client = v9p, .fid = fid, .flags = O_DIRECTORY, .rlopen.qid = &qid + }); /* * send as many Treaddir requests as required to get all directory @@ -918,9 +196,13 @@ static void do_readdir_split(QVirtio9P *v9p, uint32_t count) npartialentries = 0; partialentries = NULL; - req = v9fs_treaddir(v9p, fid, offset, count, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rreaddir(req, &count, &npartialentries, &partialentries); + treaddir({ + .client = v9p, .fid = fid, .offset = offset, .count = count, + .rreaddir = { + .count = &count, .nentries = &npartialentries, + .entries = &partialentries + } + }); if (npartialentries > 0 && partialentries) { if (!entries) { entries = partialentries; @@ -964,59 +246,131 @@ static void do_readdir_split(QVirtio9P *v9p, uint32_t count) static void fs_walk_no_slash(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup(" /") }; - P9Req *req; - uint32_t err; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup(" /") }; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlerror(req, &err); - - g_assert_cmpint(err, ==, ENOENT); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames, + .expectErr = ENOENT + }); g_free(wnames[0]); } +static void fs_walk_nonexistent(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QVirtio9P *v9p = obj; + v9fs_set_allocator(t_alloc); + + tattach({ .client = v9p }); + /* + * The 9p2000 protocol spec says: "If the first element cannot be walked + * for any reason, Rerror is returned." + */ + twalk({ .client = v9p, .path = "non-existent", .expectErr = ENOENT }); +} + +static void fs_walk_2nd_nonexistent(void *obj, void *data, + QGuestAllocator *t_alloc) +{ + QVirtio9P *v9p = obj; + v9fs_set_allocator(t_alloc); + v9fs_qid root_qid; + uint16_t nwqid; + uint32_t fid; + g_autofree v9fs_qid *wqid = NULL; + g_autofree char *path = g_strdup_printf( + QTEST_V9FS_SYNTH_WALK_FILE "/non-existent", 0 + ); + + tattach({ .client = v9p, .rattach.qid = &root_qid }); + fid = twalk({ + .client = v9p, .path = path, + .rwalk = { .nwqid = &nwqid, .wqid = &wqid } + }).newfid; + /* + * The 9p2000 protocol spec says: "nwqid is therefore either nwname or the + * index of the first elementwise walk that failed." + */ + assert(nwqid == 1); + + /* returned QID wqid[0] is file ID of 1st subdir */ + g_assert(wqid && wqid[0] && !is_same_qid(root_qid, wqid[0])); + + /* expect fid being unaffected by walk above */ + tgetattr({ + .client = v9p, .fid = fid, .request_mask = P9_GETATTR_BASIC, + .expectErr = ENOENT + }); +} + +static void fs_walk_none(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QVirtio9P *v9p = obj; + v9fs_set_allocator(t_alloc); + v9fs_qid root_qid; + g_autofree v9fs_qid *wqid = NULL; + struct v9fs_attr attr; + + tversion({ .client = v9p }); + tattach({ + .client = v9p, .fid = 0, .n_uname = getuid(), + .rattach.qid = &root_qid + }); + + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 0, .wnames = NULL, + .rwalk.wqid = &wqid + }); + + /* special case: no QID is returned if nwname=0 was sent */ + g_assert(wqid == NULL); + + tgetattr({ + .client = v9p, .fid = 1, .request_mask = P9_GETATTR_BASIC, + .rgetattr.attr = &attr + }); + + g_assert(is_same_qid(root_qid, attr.qid)); +} + static void fs_walk_dotdot(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup("..") }; - v9fs_qid root_qid, *wqid; - P9Req *req; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup("..") }; + v9fs_qid root_qid; + g_autofree v9fs_qid *wqid = NULL; - do_version(v9p); - req = v9fs_tattach(v9p, 0, getuid(), 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rattach(req, &root_qid); + tversion({ .client = v9p }); + tattach({ + .client = v9p, .fid = 0, .n_uname = getuid(), + .rattach.qid = &root_qid + }); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, &wqid); /* We now we'll get one qid */ + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames, + .rwalk.wqid = &wqid /* We now we'll get one qid */ + }); g_assert_cmpmem(&root_qid, 13, wqid[0], 13); - g_free(wqid); g_free(wnames[0]); } static void fs_lopen(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_LOPEN_FILE) }; - P9Req *req; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_LOPEN_FILE) }; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, NULL); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames + }); - req = v9fs_tlopen(v9p, 1, O_WRONLY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, NULL, NULL); + tlopen({ .client = v9p, .fid = 1, .flags = O_WRONLY }); g_free(wnames[0]); } @@ -1024,56 +378,57 @@ static void fs_lopen(void *obj, void *data, QGuestAllocator *t_alloc) static void fs_write(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); static const uint32_t write_count = P9_MAX_SIZE / 2; - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_WRITE_FILE) }; - char *buf = g_malloc0(write_count); + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_WRITE_FILE) }; + g_autofree char *buf = g_malloc0(write_count); uint32_t count; - P9Req *req; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, NULL); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames + }); - req = v9fs_tlopen(v9p, 1, O_WRONLY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, NULL, NULL); + tlopen({ .client = v9p, .fid = 1, .flags = O_WRONLY }); - req = v9fs_twrite(v9p, 1, 0, write_count, buf, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwrite(req, &count); + count = twrite({ + .client = v9p, .fid = 1, .offset = 0, .count = write_count, + .data = buf + }).count; g_assert_cmpint(count, ==, write_count); - g_free(buf); g_free(wnames[0]); } static void fs_flush_success(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_FLUSH_FILE) }; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_FLUSH_FILE) }; P9Req *req, *flush_req; uint32_t reply_len; uint8_t should_block; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, NULL); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames + }); - req = v9fs_tlopen(v9p, 1, O_WRONLY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, NULL, NULL); + tlopen({ .client = v9p, .fid = 1, .flags = O_WRONLY }); /* This will cause the 9p server to try to write data to the backend, * until the write request gets cancelled. */ should_block = 1; - req = v9fs_twrite(v9p, 1, 0, sizeof(should_block), &should_block, 0); + req = twrite({ + .client = v9p, .fid = 1, .offset = 0, + .count = sizeof(should_block), .data = &should_block, + .requestOnly = true + }).req; - flush_req = v9fs_tflush(v9p, req->tag, 1); + flush_req = tflush({ + .client = v9p, .oldtag = req->tag, .tag = 1, .requestOnly = true + }).req; /* The write request is supposed to be flushed: the server should just * mark the write request as used and reply to the flush request. @@ -1089,28 +444,32 @@ static void fs_flush_success(void *obj, void *data, QGuestAllocator *t_alloc) static void fs_flush_ignored(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; - char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_FLUSH_FILE) }; + v9fs_set_allocator(t_alloc); + char *wnames[] = { g_strdup(QTEST_V9FS_SYNTH_FLUSH_FILE) }; P9Req *req, *flush_req; uint32_t count; uint8_t should_block; - do_attach(v9p); - req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rwalk(req, NULL, NULL); + tattach({ .client = v9p }); + twalk({ + .client = v9p, .fid = 0, .newfid = 1, .nwname = 1, .wnames = wnames + }); - req = v9fs_tlopen(v9p, 1, O_WRONLY, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlopen(req, NULL, NULL); + tlopen({ .client = v9p, .fid = 1, .flags = O_WRONLY }); /* This will cause the write request to complete right away, before it * could be actually cancelled. */ should_block = 0; - req = v9fs_twrite(v9p, 1, 0, sizeof(should_block), &should_block, 0); + req = twrite({ + .client = v9p, .fid = 1, .offset = 0, + .count = sizeof(should_block), .data = &should_block, + .requestOnly = true + }).req; - flush_req = v9fs_tflush(v9p, req->tag, 1); + flush_req = tflush({ + .client = v9p, .oldtag = req->tag, .tag = 1, .requestOnly = true + }).req; /* The write request is supposed to complete. The server should * reply to the write request and the flush request. @@ -1123,107 +482,24 @@ static void fs_flush_ignored(void *obj, void *data, QGuestAllocator *t_alloc) g_free(wnames[0]); } -static void do_mkdir(QVirtio9P *v9p, const char *path, const char *cname) -{ - char *const name = g_strdup(cname); - uint32_t fid; - P9Req *req; - - fid = do_walk(v9p, path); - - req = v9fs_tmkdir(v9p, fid, name, 0750, 0, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rmkdir(req, NULL); - - g_free(name); -} - -/* create a regular file with Tlcreate and return file's fid */ -static uint32_t do_lcreate(QVirtio9P *v9p, const char *path, - const char *cname) -{ - char *const name = g_strdup(cname); - uint32_t fid; - P9Req *req; - - fid = do_walk(v9p, path); - - req = v9fs_tlcreate(v9p, fid, name, 0, 0750, 0, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlcreate(req, NULL, NULL); - - g_free(name); - return fid; -} - -/* create symlink named @a clink in directory @a path pointing to @a to */ -static void do_symlink(QVirtio9P *v9p, const char *path, const char *clink, - const char *to) -{ - char *const name = g_strdup(clink); - char *const dst = g_strdup(to); - uint32_t fid; - P9Req *req; - - fid = do_walk(v9p, path); - - req = v9fs_tsymlink(v9p, fid, name, dst, 0, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rsymlink(req, NULL); - - g_free(dst); - g_free(name); -} - -/* create a hard link named @a clink in directory @a path pointing to @a to */ -static void do_hardlink(QVirtio9P *v9p, const char *path, const char *clink, - const char *to) -{ - uint32_t dfid, fid; - P9Req *req; - - dfid = do_walk(v9p, path); - fid = do_walk(v9p, to); - - req = v9fs_tlink(v9p, dfid, fid, clink, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_rlink(req); -} - -static void do_unlinkat(QVirtio9P *v9p, const char *atpath, const char *rpath, - uint32_t flags) -{ - char *const name = g_strdup(rpath); - uint32_t fid; - P9Req *req; - - fid = do_walk(v9p, atpath); - - req = v9fs_tunlinkat(v9p, fid, name, flags, 0); - v9fs_req_wait_for_reply(req, NULL); - v9fs_runlinkat(req); - - g_free(name); -} - static void fs_readdir_split_128(void *obj, void *data, QGuestAllocator *t_alloc) { - alloc = t_alloc; + v9fs_set_allocator(t_alloc); do_readdir_split(obj, 128); } static void fs_readdir_split_256(void *obj, void *data, QGuestAllocator *t_alloc) { - alloc = t_alloc; + v9fs_set_allocator(t_alloc); do_readdir_split(obj, 256); } static void fs_readdir_split_512(void *obj, void *data, QGuestAllocator *t_alloc) { - alloc = t_alloc; + v9fs_set_allocator(t_alloc); do_readdir_split(obj, 512); } @@ -1233,157 +509,153 @@ static void fs_readdir_split_512(void *obj, void *data, static void fs_create_dir(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *root_path = virtio_9p_test_path(""); - char *new_dir = virtio_9p_test_path("01"); + g_autofree char *root_path = virtio_9p_test_path(""); + g_autofree char *new_dir = virtio_9p_test_path("01"); g_assert(root_path != NULL); - do_attach(v9p); - do_mkdir(v9p, "/", "01"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "01" }); /* check if created directory really exists now ... */ g_assert(stat(new_dir, &st) == 0); /* ... and is actually a directory */ g_assert((st.st_mode & S_IFMT) == S_IFDIR); - - g_free(new_dir); - g_free(root_path); } static void fs_unlinkat_dir(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *root_path = virtio_9p_test_path(""); - char *new_dir = virtio_9p_test_path("02"); + g_autofree char *root_path = virtio_9p_test_path(""); + g_autofree char *new_dir = virtio_9p_test_path("02"); g_assert(root_path != NULL); - do_attach(v9p); - do_mkdir(v9p, "/", "02"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "02" }); /* check if created directory really exists now ... */ g_assert(stat(new_dir, &st) == 0); /* ... and is actually a directory */ g_assert((st.st_mode & S_IFMT) == S_IFDIR); - do_unlinkat(v9p, "/", "02", AT_REMOVEDIR); + tunlinkat({ + .client = v9p, .atPath = "/", .name = "02", + .flags = P9_DOTL_AT_REMOVEDIR + }); /* directory should be gone now */ g_assert(stat(new_dir, &st) != 0); - - g_free(new_dir); - g_free(root_path); } static void fs_create_file(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *new_file = virtio_9p_test_path("03/1st_file"); + g_autofree char *new_file = virtio_9p_test_path("03/1st_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "03"); - do_lcreate(v9p, "03", "1st_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "03" }); + tlcreate({ .client = v9p, .atPath = "03", .name = "1st_file" }); /* check if created file exists now ... */ g_assert(stat(new_file, &st) == 0); /* ... and is a regular file */ g_assert((st.st_mode & S_IFMT) == S_IFREG); - - g_free(new_file); } static void fs_unlinkat_file(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *new_file = virtio_9p_test_path("04/doa_file"); + g_autofree char *new_file = virtio_9p_test_path("04/doa_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "04"); - do_lcreate(v9p, "04", "doa_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "04" }); + tlcreate({ .client = v9p, .atPath = "04", .name = "doa_file" }); /* check if created file exists now ... */ g_assert(stat(new_file, &st) == 0); /* ... and is a regular file */ g_assert((st.st_mode & S_IFMT) == S_IFREG); - do_unlinkat(v9p, "04", "doa_file", 0); + tunlinkat({ .client = v9p, .atPath = "04", .name = "doa_file" }); /* file should be gone now */ g_assert(stat(new_file, &st) != 0); - - g_free(new_file); } static void fs_symlink_file(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *real_file = virtio_9p_test_path("05/real_file"); - char *symlink_file = virtio_9p_test_path("05/symlink_file"); + g_autofree char *real_file = virtio_9p_test_path("05/real_file"); + g_autofree char *symlink_file = virtio_9p_test_path("05/symlink_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "05"); - do_lcreate(v9p, "05", "real_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "05" }); + tlcreate({ .client = v9p, .atPath = "05", .name = "real_file" }); g_assert(stat(real_file, &st) == 0); g_assert((st.st_mode & S_IFMT) == S_IFREG); - do_symlink(v9p, "05", "symlink_file", "real_file"); + tsymlink({ + .client = v9p, .atPath = "05", .name = "symlink_file", + .symtgt = "real_file" + }); /* check if created link exists now */ g_assert(stat(symlink_file, &st) == 0); - - g_free(symlink_file); - g_free(real_file); } static void fs_unlinkat_symlink(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st; - char *real_file = virtio_9p_test_path("06/real_file"); - char *symlink_file = virtio_9p_test_path("06/symlink_file"); + g_autofree char *real_file = virtio_9p_test_path("06/real_file"); + g_autofree char *symlink_file = virtio_9p_test_path("06/symlink_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "06"); - do_lcreate(v9p, "06", "real_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "06" }); + tlcreate({ .client = v9p, .atPath = "06", .name = "real_file" }); g_assert(stat(real_file, &st) == 0); g_assert((st.st_mode & S_IFMT) == S_IFREG); - do_symlink(v9p, "06", "symlink_file", "real_file"); + tsymlink({ + .client = v9p, .atPath = "06", .name = "symlink_file", + .symtgt = "real_file" + }); g_assert(stat(symlink_file, &st) == 0); - do_unlinkat(v9p, "06", "symlink_file", 0); + tunlinkat({ .client = v9p, .atPath = "06", .name = "symlink_file" }); /* symlink should be gone now */ g_assert(stat(symlink_file, &st) != 0); - - g_free(symlink_file); - g_free(real_file); } static void fs_hardlink_file(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st_real, st_link; - char *real_file = virtio_9p_test_path("07/real_file"); - char *hardlink_file = virtio_9p_test_path("07/hardlink_file"); + g_autofree char *real_file = virtio_9p_test_path("07/real_file"); + g_autofree char *hardlink_file = virtio_9p_test_path("07/hardlink_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "07"); - do_lcreate(v9p, "07", "real_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "07" }); + tlcreate({ .client = v9p, .atPath = "07", .name = "real_file" }); g_assert(stat(real_file, &st_real) == 0); g_assert((st_real.st_mode & S_IFMT) == S_IFREG); - do_hardlink(v9p, "07", "hardlink_file", "07/real_file"); + tlink({ + .client = v9p, .atPath = "07", .name = "hardlink_file", + .toPath = "07/real_file" + }); /* check if link exists now ... */ g_assert(stat(hardlink_file, &st_link) == 0); @@ -1391,37 +663,34 @@ static void fs_hardlink_file(void *obj, void *data, QGuestAllocator *t_alloc) g_assert((st_link.st_mode & S_IFMT) == S_IFREG); g_assert(st_link.st_dev == st_real.st_dev); g_assert(st_link.st_ino == st_real.st_ino); - - g_free(hardlink_file); - g_free(real_file); } static void fs_unlinkat_hardlink(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtio9P *v9p = obj; - alloc = t_alloc; + v9fs_set_allocator(t_alloc); struct stat st_real, st_link; - char *real_file = virtio_9p_test_path("08/real_file"); - char *hardlink_file = virtio_9p_test_path("08/hardlink_file"); + g_autofree char *real_file = virtio_9p_test_path("08/real_file"); + g_autofree char *hardlink_file = virtio_9p_test_path("08/hardlink_file"); - do_attach(v9p); - do_mkdir(v9p, "/", "08"); - do_lcreate(v9p, "08", "real_file"); + tattach({ .client = v9p }); + tmkdir({ .client = v9p, .atPath = "/", .name = "08" }); + tlcreate({ .client = v9p, .atPath = "08", .name = "real_file" }); g_assert(stat(real_file, &st_real) == 0); g_assert((st_real.st_mode & S_IFMT) == S_IFREG); - do_hardlink(v9p, "08", "hardlink_file", "08/real_file"); + tlink({ + .client = v9p, .atPath = "08", .name = "hardlink_file", + .toPath = "08/real_file" + }); g_assert(stat(hardlink_file, &st_link) == 0); - do_unlinkat(v9p, "08", "hardlink_file", 0); + tunlinkat({ .client = v9p, .atPath = "08", .name = "hardlink_file" }); /* symlink should be gone now */ g_assert(stat(hardlink_file, &st_link) != 0); /* and old file should still exist */ g_assert(stat(real_file, &st_real) == 0); - - g_free(hardlink_file); - g_free(real_file); } static void *assign_9p_local_driver(GString *cmd_line, void *arg) @@ -1443,8 +712,13 @@ static void register_virtio_9p_test(void) qos_add_test("synth/walk/basic", "virtio-9p", fs_walk, &opts); qos_add_test("synth/walk/no_slash", "virtio-9p", fs_walk_no_slash, &opts); + qos_add_test("synth/walk/none", "virtio-9p", fs_walk_none, &opts); qos_add_test("synth/walk/dotdot_from_root", "virtio-9p", fs_walk_dotdot, &opts); + qos_add_test("synth/walk/non_existent", "virtio-9p", fs_walk_nonexistent, + &opts); + qos_add_test("synth/walk/2nd_non_existent", "virtio-9p", + fs_walk_2nd_nonexistent, &opts); qos_add_test("synth/lopen/basic", "virtio-9p", fs_lopen, &opts); qos_add_test("synth/write/basic", "virtio-9p", fs_write, &opts); qos_add_test("synth/flush/success", "virtio-9p", fs_flush_success, diff --git a/tests/qtest/virtio-blk-test.c b/tests/qtest/virtio-blk-test.c index 2a23698211..19c01f808b 100644 --- a/tests/qtest/virtio-blk-test.c +++ b/tests/qtest/virtio-blk-test.c @@ -33,7 +33,7 @@ typedef struct QVirtioBlkReq { } QVirtioBlkReq; -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN const bool host_is_big_endian = true; #else const bool host_is_big_endian; /* false */ @@ -49,10 +49,10 @@ static void drive_destroy(void *path) static char *drive_create(void) { int fd, ret; - char *t_path = g_strdup("/tmp/qtest.XXXXXX"); + char *t_path; /* Create a temporary raw image */ - fd = mkstemp(t_path); + fd = g_file_open_tmp("qtest.XXXXXX", &t_path, NULL); g_assert_cmpint(fd, >=, 0); ret = ftruncate(fd, TEST_IMAGE_SIZE); g_assert_cmpint(ret, ==, 0); @@ -701,6 +701,11 @@ static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc) QVirtioPCIDevice *dev; QTestState *qts = dev1->pdev->bus->qts; + if (dev1->pdev->bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } + /* plug secondary disk */ qtest_qmp_device_add(qts, "virtio-blk-pci", "drv1", "{'addr': %s, 'drive': 'drive1'}", diff --git a/tests/qtest/virtio-iommu-test.c b/tests/qtest/virtio-iommu-test.c new file mode 100644 index 0000000000..068e7a9e6c --- /dev/null +++ b/tests/qtest/virtio-iommu-test.c @@ -0,0 +1,328 @@ +/* + * QTest testcase for VirtIO IOMMU + * + * Copyright (c) 2021 Red Hat, Inc. + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" +#include "qemu/module.h" +#include "libqos/qgraph.h" +#include "libqos/virtio-iommu.h" +#include "hw/virtio/virtio-iommu.h" + +#define PCI_SLOT_HP 0x06 +#define QVIRTIO_IOMMU_TIMEOUT_US (30 * 1000 * 1000) + +static QGuestAllocator *alloc; + +static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QVirtioIOMMU *v_iommu = obj; + QVirtioDevice *dev = v_iommu->vdev; + uint64_t input_range_start = qvirtio_config_readq(dev, 8); + uint64_t input_range_end = qvirtio_config_readq(dev, 16); + uint32_t domain_range_start = qvirtio_config_readl(dev, 24); + uint32_t domain_range_end = qvirtio_config_readl(dev, 28); + uint8_t bypass = qvirtio_config_readb(dev, 36); + + g_assert_cmpint(input_range_start, ==, 0); + g_assert_cmphex(input_range_end, ==, UINT64_MAX); + g_assert_cmpint(domain_range_start, ==, 0); + g_assert_cmpint(domain_range_end, ==, UINT32_MAX); + g_assert_cmpint(bypass, ==, 1); +} + +static int read_tail_status(struct virtio_iommu_req_tail *buffer) +{ + int i; + + for (i = 0; i < 3; i++) { + g_assert_cmpint(buffer->reserved[i], ==, 0); + } + return buffer->status; +} + +/** + * send_attach_detach - Send an attach/detach command to the device + * @type: VIRTIO_IOMMU_T_ATTACH/VIRTIO_IOMMU_T_DETACH + * @domain: domain the endpoint is attached to + * @ep: endpoint + */ +static int send_attach_detach(QTestState *qts, QVirtioIOMMU *v_iommu, + uint8_t type, uint32_t domain, uint32_t ep) +{ + QVirtioDevice *dev = v_iommu->vdev; + QVirtQueue *vq = v_iommu->vq; + uint64_t ro_addr, wr_addr; + uint32_t free_head; + struct virtio_iommu_req_attach req = {}; /* same layout as detach */ + size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); + size_t wr_size = sizeof(struct virtio_iommu_req_tail); + struct virtio_iommu_req_tail buffer; + int ret; + + req.head.type = type; + req.domain = cpu_to_le32(domain); + req.endpoint = cpu_to_le32(ep); + + ro_addr = guest_alloc(alloc, ro_size); + wr_addr = guest_alloc(alloc, wr_size); + + qtest_memwrite(qts, ro_addr, &req, ro_size); + free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); + qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); + qvirtqueue_kick(qts, dev, vq, free_head); + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, + QVIRTIO_IOMMU_TIMEOUT_US); + qtest_memread(qts, wr_addr, &buffer, wr_size); + ret = read_tail_status(&buffer); + guest_free(alloc, ro_addr); + guest_free(alloc, wr_addr); + return ret; +} + +/** + * send_map - Send a map command to the device + * @domain: domain the new mapping is attached to + * @virt_start: iova start + * @virt_end: iova end + * @phys_start: base physical address + * @flags: mapping flags + */ +static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu, + uint32_t domain, uint64_t virt_start, uint64_t virt_end, + uint64_t phys_start, uint32_t flags) +{ + QVirtioDevice *dev = v_iommu->vdev; + QVirtQueue *vq = v_iommu->vq; + uint64_t ro_addr, wr_addr; + uint32_t free_head; + struct virtio_iommu_req_map req; + size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); + size_t wr_size = sizeof(struct virtio_iommu_req_tail); + struct virtio_iommu_req_tail buffer; + int ret; + + req.head.type = VIRTIO_IOMMU_T_MAP; + req.domain = cpu_to_le32(domain); + req.virt_start = cpu_to_le64(virt_start); + req.virt_end = cpu_to_le64(virt_end); + req.phys_start = cpu_to_le64(phys_start); + req.flags = cpu_to_le32(flags); + + ro_addr = guest_alloc(alloc, ro_size); + wr_addr = guest_alloc(alloc, wr_size); + + qtest_memwrite(qts, ro_addr, &req, ro_size); + free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); + qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); + qvirtqueue_kick(qts, dev, vq, free_head); + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, + QVIRTIO_IOMMU_TIMEOUT_US); + qtest_memread(qts, wr_addr, &buffer, wr_size); + ret = read_tail_status(&buffer); + guest_free(alloc, ro_addr); + guest_free(alloc, wr_addr); + return ret; +} + +/** + * send_unmap - Send an unmap command to the device + * @domain: domain the new binding is attached to + * @virt_start: iova start + * @virt_end: iova end + */ +static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu, + uint32_t domain, uint64_t virt_start, uint64_t virt_end) +{ + QVirtioDevice *dev = v_iommu->vdev; + QVirtQueue *vq = v_iommu->vq; + uint64_t ro_addr, wr_addr; + uint32_t free_head; + struct virtio_iommu_req_unmap req; + size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); + size_t wr_size = sizeof(struct virtio_iommu_req_tail); + struct virtio_iommu_req_tail buffer; + int ret; + + req.head.type = VIRTIO_IOMMU_T_UNMAP; + req.domain = cpu_to_le32(domain); + req.virt_start = cpu_to_le64(virt_start); + req.virt_end = cpu_to_le64(virt_end); + + ro_addr = guest_alloc(alloc, ro_size); + wr_addr = guest_alloc(alloc, wr_size); + + qtest_memwrite(qts, ro_addr, &req, ro_size); + free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); + qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); + qvirtqueue_kick(qts, dev, vq, free_head); + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, + QVIRTIO_IOMMU_TIMEOUT_US); + qtest_memread(qts, wr_addr, &buffer, wr_size); + ret = read_tail_status(&buffer); + guest_free(alloc, ro_addr); + guest_free(alloc, wr_addr); + return ret; +} + +static void test_attach_detach(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QVirtioIOMMU *v_iommu = obj; + QTestState *qts = global_qtest; + int ret; + + alloc = t_alloc; + + /* type, domain, ep */ + + /* attach ep0 to domain 0 */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 0); + g_assert_cmpint(ret, ==, 0); + + /* attach a non existing device */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 444); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); + + /* detach a non existing device (1) */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 1); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); + + /* move ep0 from domain 0 to domain 1 */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); + g_assert_cmpint(ret, ==, 0); + + /* detach ep0 from domain 0 */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 0); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); + + /* detach ep0 from domain 1 */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0); + g_assert_cmpint(ret, ==, 0); + + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000, + VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 0x2000, 0x2FFF, 0xb1000, + VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0); + g_assert_cmpint(ret, ==, 0); +} + +/* Test map/unmap scenari documented in the spec */ +static void test_map_unmap(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QVirtioIOMMU *v_iommu = obj; + QTestState *qts = global_qtest; + int ret; + + alloc = t_alloc; + + /* attach ep0 to domain 1 */ + ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); + g_assert_cmpint(ret, ==, 0); + + ret = send_map(qts, v_iommu, 0, 0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); + + /* domain, virt start, virt end, phys start, flags */ + ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + + /* send a new mapping overlapping the previous one */ + ret = send_map(qts, v_iommu, 1, 0, 0xFFFF, 0xb1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); + + ret = send_unmap(qts, v_iommu, 4, 0x10, 0xFFF); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); + + ret = send_unmap(qts, v_iommu, 1, 0x10, 0xFFF); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); + + ret = send_unmap(qts, v_iommu, 1, 0, 0x1000); + g_assert_cmpint(ret, ==, 0); /* unmap everything */ + + /* Spec example sequence */ + + /* 1 */ + ret = send_unmap(qts, v_iommu, 1, 0, 4); + g_assert_cmpint(ret, ==, 0); /* doesn't unmap anything */ + + /* 2 */ + ret = send_map(qts, v_iommu, 1, 0, 9, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 9); + g_assert_cmpint(ret, ==, 0); /* unmaps [0,9] */ + + /* 3 */ + ret = send_map(qts, v_iommu, 1, 0, 4, 0xb1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 5, 9, 0xb2000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 9); + g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [5,9] */ + + /* 4 */ + ret = send_map(qts, v_iommu, 1, 0, 9, 0xc1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + + ret = send_unmap(qts, v_iommu, 1, 0, 4); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); /* doesn't unmap anything */ + + ret = send_unmap(qts, v_iommu, 1, 0, 10); + g_assert_cmpint(ret, ==, 0); + + /* 5 */ + ret = send_map(qts, v_iommu, 1, 0, 4, 0xd1000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 5, 9, 0xd2000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 4); + g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */ + + ret = send_unmap(qts, v_iommu, 1, 5, 9); + g_assert_cmpint(ret, ==, 0); + + /* 6 */ + ret = send_map(qts, v_iommu, 1, 0, 4, 0xe2000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 9); + g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */ + + /* 7 */ + ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 14); + g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [10,14] */ + + ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, 0); + ret = send_unmap(qts, v_iommu, 1, 0, 4); + g_assert_cmpint(ret, ==, 0); /* only unmaps [0,4] */ + ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); + g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); /* 10-14 still is mapped */ +} + +static void register_virtio_iommu_test(void) +{ + qos_add_test("config", "virtio-iommu", pci_config, NULL); + qos_add_test("attach_detach", "virtio-iommu", test_attach_detach, NULL); + qos_add_test("map_unmap", "virtio-iommu", test_map_unmap, NULL); +} + +libqos_init(register_virtio_iommu_test); diff --git a/tests/qtest/virtio-net-failover.c b/tests/qtest/virtio-net-failover.c new file mode 100644 index 0000000000..4a809590bf --- /dev/null +++ b/tests/qtest/virtio-net-failover.c @@ -0,0 +1,1903 @@ +/* + * QTest testcase for virtio-net failover + * + * See docs/system/virtio-net-failover.rst + * + * Copyright (c) 2021 Red Hat, Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" +#include "libqos/pci.h" +#include "libqos/pci-pc.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qjson.h" +#include "libqos/malloc-pc.h" +#include "libqos/virtio-pci.h" +#include "hw/pci/pci.h" + +#define VIRTIO_NET_F_STANDBY 62 + +#define ACPI_PCIHP_ADDR_ICH9 0x0cc0 +#define PCI_EJ_BASE 0x0008 +#define PCI_SEL_BASE 0x0010 + +#define BASE_MACHINE "-M q35 -nodefaults " \ + "-device pcie-root-port,id=root0,addr=0x1,bus=pcie.0,chassis=1 " \ + "-device pcie-root-port,id=root1,addr=0x2,bus=pcie.0,chassis=2 " + +#define MAC_PRIMARY0 "52:54:00:11:11:11" +#define MAC_STANDBY0 "52:54:00:22:22:22" +#define MAC_PRIMARY1 "52:54:00:33:33:33" +#define MAC_STANDBY1 "52:54:00:44:44:44" + +static QGuestAllocator guest_malloc; +static QPCIBus *pcibus; + +static QTestState *machine_start(const char *args, int numbus) +{ + QTestState *qts; + QPCIDevice *dev; + int bus; + + qts = qtest_init(args); + + pc_alloc_init(&guest_malloc, qts, 0); + pcibus = qpci_new_pc(qts, &guest_malloc); + g_assert(qpci_secondary_buses_init(pcibus) == numbus); + + for (bus = 1; bus <= numbus; bus++) { + dev = qpci_device_find(pcibus, QPCI_DEVFN(bus, 0)); + g_assert_nonnull(dev); + + qpci_device_enable(dev); + qpci_iomap(dev, 4, NULL); + + g_free(dev); + } + + return qts; +} + +static void machine_stop(QTestState *qts) +{ + qpci_free_pc(pcibus); + alloc_destroy(&guest_malloc); + qtest_quit(qts); +} + +static void test_error_id(void) +{ + QTestState *qts; + QDict *resp; + QDict *err; + + qts = machine_start(BASE_MACHINE + "-device virtio-net,bus=root0,id=standby0,failover=on", + 2); + + resp = qtest_qmp(qts, "{'execute': 'device_add'," + "'arguments': {" + "'driver': 'virtio-net'," + "'bus': 'root1'," + "'failover_pair_id': 'standby0'" + "} }"); + g_assert(qdict_haskey(resp, "error")); + + err = qdict_get_qdict(resp, "error"); + g_assert(qdict_haskey(err, "desc")); + + g_assert_cmpstr(qdict_get_str(err, "desc"), ==, + "Device with failover_pair_id needs to have id"); + + qobject_unref(resp); + + machine_stop(qts); +} + +static void test_error_pcie(void) +{ + QTestState *qts; + QDict *resp; + QDict *err; + + qts = machine_start(BASE_MACHINE + "-device virtio-net,bus=root0,id=standby0,failover=on", + 2); + + resp = qtest_qmp(qts, "{'execute': 'device_add'," + "'arguments': {" + "'driver': 'virtio-net'," + "'id': 'primary0'," + "'bus': 'pcie.0'," + "'failover_pair_id': 'standby0'" + "} }"); + g_assert(qdict_haskey(resp, "error")); + + err = qdict_get_qdict(resp, "error"); + g_assert(qdict_haskey(err, "desc")); + + g_assert_cmpstr(qdict_get_str(err, "desc"), ==, + "Bus 'pcie.0' does not support hotplugging"); + + qobject_unref(resp); + + machine_stop(qts); +} + +static QDict *find_device(QDict *bus, const char *name) +{ + const QObject *obj; + QList *devices; + QList *list; + + devices = qdict_get_qlist(bus, "devices"); + if (devices == NULL) { + return NULL; + } + + list = qlist_copy(devices); + while ((obj = qlist_pop(list))) { + QDict *device; + + device = qobject_to(QDict, obj); + + if (qdict_haskey(device, "pci_bridge")) { + QDict *bridge; + QDict *bridge_device; + + bridge = qdict_get_qdict(device, "pci_bridge"); + + if (qdict_haskey(bridge, "devices")) { + bridge_device = find_device(bridge, name); + if (bridge_device) { + qobject_unref(device); + qobject_unref(list); + return bridge_device; + } + } + } + + if (!qdict_haskey(device, "qdev_id")) { + qobject_unref(device); + continue; + } + + if (strcmp(qdict_get_str(device, "qdev_id"), name) == 0) { + qobject_unref(list); + return device; + } + qobject_unref(device); + } + qobject_unref(list); + + return NULL; +} + +static QDict *get_bus(QTestState *qts, int num) +{ + QObject *obj; + QDict *resp; + QList *ret; + + resp = qtest_qmp(qts, "{ 'execute': 'query-pci' }"); + g_assert(qdict_haskey(resp, "return")); + + ret = qdict_get_qlist(resp, "return"); + g_assert_nonnull(ret); + + while ((obj = qlist_pop(ret))) { + QDict *bus; + + bus = qobject_to(QDict, obj); + if (!qdict_haskey(bus, "bus")) { + qobject_unref(bus); + continue; + } + if (qdict_get_int(bus, "bus") == num) { + qobject_unref(resp); + return bus; + } + qobject_ref(bus); + } + qobject_unref(resp); + + return NULL; +} + +static char *get_mac(QTestState *qts, const char *name) +{ + QDict *resp; + char *mac; + + resp = qtest_qmp(qts, "{ 'execute': 'qom-get', " + "'arguments': { " + "'path': %s, " + "'property': 'mac' } }", name); + + g_assert(qdict_haskey(resp, "return")); + + mac = g_strdup(qdict_get_str(resp, "return")); + + qobject_unref(resp); + + return mac; +} + +#define check_one_card(qts, present, id, mac) \ +do { \ + QDict *device; \ + QDict *bus; \ + char *addr; \ + bus = get_bus(qts, 0); \ + device = find_device(bus, id); \ + if (present) { \ + char *path; \ + g_assert_nonnull(device); \ + qobject_unref(device); \ + path = g_strdup_printf("/machine/peripheral/%s", id); \ + addr = get_mac(qts, path); \ + g_free(path); \ + g_assert_cmpstr(mac, ==, addr); \ + g_free(addr); \ + } else { \ + g_assert_null(device); \ + } \ + qobject_unref(bus); \ +} while (0) + +static QDict *get_failover_negociated_event(QTestState *qts) +{ + QDict *resp; + QDict *data; + + resp = qtest_qmp_eventwait_ref(qts, "FAILOVER_NEGOTIATED"); + g_assert(qdict_haskey(resp, "data")); + + data = qdict_get_qdict(resp, "data"); + g_assert(qdict_haskey(data, "device-id")); + qobject_ref(data); + qobject_unref(resp); + + return data; +} + +static QVirtioPCIDevice *start_virtio_net_internal(QTestState *qts, + int bus, int slot, + uint64_t *features) +{ + QVirtioPCIDevice *dev; + QPCIAddress addr; + + addr.devfn = QPCI_DEVFN((bus << 5) + slot, 0); + dev = virtio_pci_new(pcibus, &addr); + g_assert_nonnull(dev); + qvirtio_pci_device_enable(dev); + qvirtio_start_device(&dev->vdev); + *features &= qvirtio_get_features(&dev->vdev); + qvirtio_set_features(&dev->vdev, *features); + qvirtio_set_driver_ok(&dev->vdev); + return dev; +} + +static QVirtioPCIDevice *start_virtio_net(QTestState *qts, int bus, int slot, + const char *id, bool failover) +{ + QVirtioPCIDevice *dev; + uint64_t features; + + features = ~(QVIRTIO_F_BAD_FEATURE | + (1ull << VIRTIO_RING_F_INDIRECT_DESC) | + (1ull << VIRTIO_RING_F_EVENT_IDX)); + + dev = start_virtio_net_internal(qts, bus, slot, &features); + + g_assert(!!(features & (1ull << VIRTIO_NET_F_STANDBY)) == failover); + + if (failover) { + QDict *resp; + + resp = get_failover_negociated_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, id); + qobject_unref(resp); + } + + return dev; +} + +static void test_on(void) +{ + QTestState *qts; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=on,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby0,netdev=hs1,mac="MAC_PRIMARY0, + 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + machine_stop(qts); +} + +static void test_on_mismatch(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=on,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby1,netdev=hs1,mac="MAC_PRIMARY0, + 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_off(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=off,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby0,netdev=hs1,mac="MAC_PRIMARY0, + 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", false); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_enabled(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=on,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby0,netdev=hs1,mac="MAC_PRIMARY0" ", + 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_guest_off(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + uint64_t features; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=on,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby0,netdev=hs1,mac="MAC_PRIMARY0" ", + 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + features = ~(QVIRTIO_F_BAD_FEATURE | + (1ull << VIRTIO_RING_F_INDIRECT_DESC) | + (1ull << VIRTIO_RING_F_EVENT_IDX) | + (1ull << VIRTIO_NET_F_STANDBY)); + + vdev = start_virtio_net_internal(qts, 1, 0, &features); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_hotplug_1(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-device virtio-net,bus=root0,id=standby0," + "failover=on,netdev=hs0,mac="MAC_STANDBY0" " + "-netdev user,id=hs1 ", 2); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_hotplug_1_reverse(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-device virtio-net,bus=root1,id=primary0," + "failover_pair_id=standby0,netdev=hs1,mac="MAC_PRIMARY0" ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_hotplug_2(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_hotplug_2_reverse(void) +{ + QTestState *qts; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_STANDBY0"'}"); + + /* + * XXX: sounds like a bug: + * The primary should be hidden until the virtio-net driver + * negotiates the VIRTIO_NET_F_STANDBY feature by start_virtio_net() + */ + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +#ifndef _WIN32 +static QDict *migrate_status(QTestState *qts) +{ + QDict *resp, *ret; + + resp = qtest_qmp(qts, "{ 'execute': 'query-migrate' }"); + g_assert(qdict_haskey(resp, "return")); + + ret = qdict_get_qdict(resp, "return"); + g_assert(qdict_haskey(ret, "status")); + qobject_ref(ret); + qobject_unref(resp); + + return ret; +} + +static QDict *get_unplug_primary_event(QTestState *qts) +{ + QDict *resp; + QDict *data; + + resp = qtest_qmp_eventwait_ref(qts, "UNPLUG_PRIMARY"); + g_assert(qdict_haskey(resp, "data")); + + data = qdict_get_qdict(resp, "data"); + g_assert(qdict_haskey(data, "device-id")); + qobject_ref(data); + qobject_unref(resp); + + return data; +} + +static void test_migrate_out(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* the event is sent when QEMU asks the OS to unplug the card */ + resp = get_unplug_primary_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "primary0"); + qobject_unref(resp); + + /* wait the end of the migration setup phase */ + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "wait-unplug") == 0) { + qobject_unref(ret); + break; + } + + /* The migration must not start if the card is not ejected */ + g_assert_cmpstr(status, !=, "active"); + g_assert_cmpstr(status, !=, "completed"); + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + + qobject_unref(ret); + } + + if (g_test_slow()) { + /* check we stay in wait-unplug while the card is not ejected */ + for (int i = 0; i < 5; i++) { + sleep(1); + ret = migrate_status(qts); + status = qdict_get_str(ret, "status"); + g_assert_cmpstr(status, ==, "wait-unplug"); + qobject_unref(ret); + } + } + + /* OS unplugs the cards, QEMU can move from wait-unplug state */ + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_EJ_BASE, 1); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + qobject_unref(ret); + } + + qtest_qmp_eventwait(qts, "STOP"); + + /* + * in fact, the card is ejected from the point of view of kernel + * but not really from QEMU to be able to hotplug it back if + * migration fails. So we can't check that: + * check_one_card(qts, true, "standby0", MAC_STANDBY0); + * check_one_card(qts, false, "primary0", MAC_PRIMARY0); + */ + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static QDict *get_migration_event(QTestState *qts) +{ + QDict *resp; + QDict *data; + + resp = qtest_qmp_eventwait_ref(qts, "MIGRATION"); + g_assert(qdict_haskey(resp, "data")); + + data = qdict_get_qdict(resp, "data"); + g_assert(qdict_haskey(data, "status")); + qobject_ref(data); + qobject_unref(resp); + + return data; +} + +static void test_migrate_in(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat %s", (gchar *)opaque); + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-incoming defer ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate-incoming', 'arguments': %p}", + args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + resp = get_migration_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "status"), ==, "setup"); + qobject_unref(resp); + + resp = get_failover_negociated_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0"); + qobject_unref(resp); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qtest_qmp_eventwait(qts, "RESUME"); + + ret = migrate_status(qts); + g_assert_cmpstr(qdict_get_str(ret, "status"), ==, "completed"); + qobject_unref(ret); + + machine_stop(qts); +} + +static void test_off_migrate_out(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'off'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", false); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + qobject_unref(ret); + } + + qtest_qmp_eventwait(qts, "STOP"); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_off_migrate_in(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat %s", (gchar *)opaque); + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-incoming defer ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'off'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate-incoming', 'arguments': %p}", + args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + resp = get_migration_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "status"), ==, "setup"); + qobject_unref(resp); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qtest_qmp_eventwait(qts, "RESUME"); + + ret = migrate_status(qts); + g_assert_cmpstr(qdict_get_str(ret, "status"), ==, "completed"); + qobject_unref(ret); + + machine_stop(qts); +} + +static void test_guest_off_migrate_out(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + uint64_t features; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + features = ~(QVIRTIO_F_BAD_FEATURE | + (1ull << VIRTIO_RING_F_INDIRECT_DESC) | + (1ull << VIRTIO_RING_F_EVENT_IDX) | + (1ull << VIRTIO_NET_F_STANDBY)); + + vdev = start_virtio_net_internal(qts, 1, 0, &features); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + qobject_unref(ret); + } + + qtest_qmp_eventwait(qts, "STOP"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_guest_off_migrate_in(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat %s", (gchar *)opaque); + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-incoming defer ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate-incoming', 'arguments': %p}", + args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + resp = get_migration_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "status"), ==, "setup"); + qobject_unref(resp); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_eventwait(qts, "RESUME"); + + ret = migrate_status(qts); + g_assert_cmpstr(qdict_get_str(ret, "status"), ==, "completed"); + qobject_unref(ret); + + machine_stop(qts); +} + +static void test_migrate_guest_off_abort(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + uint64_t features; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + features = ~(QVIRTIO_F_BAD_FEATURE | + (1ull << VIRTIO_RING_F_INDIRECT_DESC) | + (1ull << VIRTIO_RING_F_EVENT_IDX) | + (1ull << VIRTIO_NET_F_STANDBY)); + + vdev = start_virtio_net_internal(qts, 1, 0, &features); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + g_test_skip("Failed to cancel the migration"); + qobject_unref(ret); + goto out; + } + if (strcmp(status, "active") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + qobject_unref(ret); + } + + resp = qtest_qmp(qts, "{ 'execute': 'migrate_cancel' }"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + g_test_skip("Failed to cancel the migration"); + qobject_unref(ret); + goto out; + } + if (strcmp(status, "cancelled") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "active"); + qobject_unref(ret); + } + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + +out: + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_migrate_abort_wait_unplug(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* the event is sent when QEMU asks the OS to unplug the card */ + resp = get_unplug_primary_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "primary0"); + qobject_unref(resp); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate_cancel' }"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* migration has been cancelled while the unplug was in progress */ + + /* while the card is not ejected, we must be in "cancelling" state */ + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + g_assert_cmpstr(status, ==, "cancelling"); + qobject_unref(ret); + + /* OS unplugs the cards, QEMU can move from wait-unplug state */ + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_EJ_BASE, 1); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "cancelled") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, ==, "cancelling"); + qobject_unref(ret); + } + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_migrate_abort_active(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* the event is sent when QEMU asks the OS to unplug the card */ + resp = get_unplug_primary_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "primary0"); + qobject_unref(resp); + + /* OS unplugs the cards, QEMU can move from wait-unplug state */ + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_EJ_BASE, 1); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + g_assert_cmpstr(status, !=, "failed"); + if (strcmp(status, "wait-unplug") != 0) { + qobject_unref(ret); + break; + } + qobject_unref(ret); + } + + resp = qtest_qmp(qts, "{ 'execute': 'migrate_cancel' }"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + g_test_skip("Failed to cancel the migration"); + qobject_unref(ret); + goto out; + } + if (strcmp(status, "cancelled") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "active"); + qobject_unref(ret); + } + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + +out: + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_migrate_off_abort(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'off'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", false); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "active") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + qobject_unref(ret); + } + + resp = qtest_qmp(qts, "{ 'execute': 'migrate_cancel' }"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + g_test_skip("Failed to cancel the migration"); + qobject_unref(ret); + goto out; + } + if (strcmp(status, "cancelled") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "active"); + qobject_unref(ret); + } + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + +out: + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_migrate_abort_timeout(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status; + int total; + QVirtioPCIDevice *vdev; + + qts = machine_start(BASE_MACHINE + "-netdev user,id=hs0 " + "-netdev user,id=hs1 ", + 2); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + vdev = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* the event is sent when QEMU asks the OS to unplug the card */ + resp = get_unplug_primary_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "primary0"); + qobject_unref(resp); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate_cancel' }"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* migration has been cancelled while the unplug was in progress */ + + /* while the card is not ejected, we must be in "cancelling" state */ + + total = 0; + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "cancelled") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, ==, "cancelling"); + g_assert(qdict_haskey(ret, "total-time")); + total = qdict_get_int(ret, "total-time"); + qobject_unref(ret); + } + + /* + * migration timeout in this case is 30 seconds + * check we exit on the timeout (ms) + */ + g_assert_cmpint(total, >, 30000); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + + qos_object_destroy((QOSGraphObject *)vdev); + machine_stop(qts); +} + +static void test_multi_out(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat > %s", (gchar *)opaque); + const gchar *status, *expected; + QVirtioPCIDevice *vdev0, *vdev1; + + qts = machine_start(BASE_MACHINE + "-device pcie-root-port,id=root2,addr=0x3,bus=pcie.0,chassis=3 " + "-device pcie-root-port,id=root3,addr=0x4,bus=pcie.0,chassis=4 " + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-netdev user,id=hs2 " + "-netdev user,id=hs3 ", + 4); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + vdev0 = start_virtio_net(qts, 1, 0, "standby0", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "standby1", + "{'bus': 'root2'," + "'failover': 'on'," + "'netdev': 'hs2'," + "'mac': '"MAC_STANDBY1"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "primary1", + "{'bus': 'root3'," + "'failover_pair_id': 'standby1'," + "'netdev': 'hs3'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY1"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + vdev1 = start_virtio_net(qts, 3, 0, "standby1", true); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, true, "primary1", MAC_PRIMARY1); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate', 'arguments': %p}", args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + /* the event is sent when QEMU asks the OS to unplug the card */ + resp = get_unplug_primary_event(qts); + if (strcmp(qdict_get_str(resp, "device-id"), "primary0") == 0) { + expected = "primary1"; + } else if (strcmp(qdict_get_str(resp, "device-id"), "primary1") == 0) { + expected = "primary0"; + } else { + g_assert_not_reached(); + } + qobject_unref(resp); + + resp = get_unplug_primary_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, expected); + qobject_unref(resp); + + /* wait the end of the migration setup phase */ + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "wait-unplug") == 0) { + qobject_unref(ret); + break; + } + + /* The migration must not start if the card is not ejected */ + g_assert_cmpstr(status, !=, "active"); + g_assert_cmpstr(status, !=, "completed"); + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + + qobject_unref(ret); + } + + /* OS unplugs primary1, but we must wait the second */ + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_EJ_BASE, 1); + + ret = migrate_status(qts); + status = qdict_get_str(ret, "status"); + g_assert_cmpstr(status, ==, "wait-unplug"); + qobject_unref(ret); + + if (g_test_slow()) { + /* check we stay in wait-unplug while the card is not ejected */ + for (int i = 0; i < 5; i++) { + sleep(1); + ret = migrate_status(qts); + status = qdict_get_str(ret, "status"); + g_assert_cmpstr(status, ==, "wait-unplug"); + qobject_unref(ret); + } + } + + /* OS unplugs primary0, QEMU can move from wait-unplug state */ + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_SEL_BASE, 2); + qtest_outl(qts, ACPI_PCIHP_ADDR_ICH9 + PCI_EJ_BASE, 1); + + while (true) { + ret = migrate_status(qts); + + status = qdict_get_str(ret, "status"); + if (strcmp(status, "completed") == 0) { + qobject_unref(ret); + break; + } + g_assert_cmpstr(status, !=, "failed"); + g_assert_cmpstr(status, !=, "cancelling"); + g_assert_cmpstr(status, !=, "cancelled"); + qobject_unref(ret); + } + + qtest_qmp_eventwait(qts, "STOP"); + + qos_object_destroy((QOSGraphObject *)vdev0); + qos_object_destroy((QOSGraphObject *)vdev1); + machine_stop(qts); +} + +static void test_multi_in(gconstpointer opaque) +{ + QTestState *qts; + QDict *resp, *args, *ret; + g_autofree gchar *uri = g_strdup_printf("exec: cat %s", (gchar *)opaque); + + qts = machine_start(BASE_MACHINE + "-device pcie-root-port,id=root2,addr=0x3,bus=pcie.0,chassis=3 " + "-device pcie-root-port,id=root3,addr=0x4,bus=pcie.0,chassis=4 " + "-netdev user,id=hs0 " + "-netdev user,id=hs1 " + "-netdev user,id=hs2 " + "-netdev user,id=hs3 " + "-incoming defer ", + 4); + + check_one_card(qts, false, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "standby0", + "{'bus': 'root0'," + "'failover': 'on'," + "'netdev': 'hs0'," + "'mac': '"MAC_STANDBY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "primary0", + "{'bus': 'root1'," + "'failover_pair_id': 'standby0'," + "'netdev': 'hs1'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY0"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, false, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "standby1", + "{'bus': 'root2'," + "'failover': 'on'," + "'netdev': 'hs2'," + "'mac': '"MAC_STANDBY1"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + qtest_qmp_device_add(qts, "virtio-net", "primary1", + "{'bus': 'root3'," + "'failover_pair_id': 'standby1'," + "'netdev': 'hs3'," + "'rombar': 0," + "'romfile': ''," + "'mac': '"MAC_PRIMARY1"'}"); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, false, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, false, "primary1", MAC_PRIMARY1); + + args = qdict_from_jsonf_nofail("{}"); + g_assert_nonnull(args); + qdict_put_str(args, "uri", uri); + + resp = qtest_qmp(qts, "{ 'execute': 'migrate-incoming', 'arguments': %p}", + args); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + + resp = get_migration_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "status"), ==, "setup"); + qobject_unref(resp); + + resp = get_failover_negociated_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0"); + qobject_unref(resp); + + resp = get_failover_negociated_event(qts); + g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby1"); + qobject_unref(resp); + + check_one_card(qts, true, "standby0", MAC_STANDBY0); + check_one_card(qts, true, "primary0", MAC_PRIMARY0); + check_one_card(qts, true, "standby1", MAC_STANDBY1); + check_one_card(qts, true, "primary1", MAC_PRIMARY1); + + qtest_qmp_eventwait(qts, "RESUME"); + + ret = migrate_status(qts); + g_assert_cmpstr(qdict_get_str(ret, "status"), ==, "completed"); + qobject_unref(ret); + + machine_stop(qts); +} +#endif /* _WIN32 */ + +int main(int argc, char **argv) +{ + gchar *tmpfile; + int ret; + + g_test_init(&argc, &argv, NULL); + + ret = g_file_open_tmp("failover_test_migrate-XXXXXX", &tmpfile, NULL); + g_assert_true(ret >= 0); + close(ret); + + /* parameters tests */ + qtest_add_func("failover-virtio-net/params/error/id", test_error_id); + qtest_add_func("failover-virtio-net/params/error/pcie", test_error_pcie); + qtest_add_func("failover-virtio-net/params/on", test_on); + qtest_add_func("failover-virtio-net/params/on_mismatch", + test_on_mismatch); + qtest_add_func("failover-virtio-net/params/off", test_off); + qtest_add_func("failover-virtio-net/params/enabled", test_enabled); + qtest_add_func("failover-virtio-net/params/guest_off", test_guest_off); + + /* hotplug tests */ + qtest_add_func("failover-virtio-net/hotplug/1", test_hotplug_1); + qtest_add_func("failover-virtio-net/hotplug/1_reverse", + test_hotplug_1_reverse); + qtest_add_func("failover-virtio-net/hotplug/2", test_hotplug_2); + qtest_add_func("failover-virtio-net/hotplug/2_reverse", + test_hotplug_2_reverse); + +#ifndef _WIN32 + /* + * These migration tests cases use the exec migration protocol, + * which is unsupported on Windows. + */ + qtest_add_data_func("failover-virtio-net/migrate/on/out", tmpfile, + test_migrate_out); + qtest_add_data_func("failover-virtio-net/migrate/on/in", tmpfile, + test_migrate_in); + qtest_add_data_func("failover-virtio-net/migrate/off/out", tmpfile, + test_off_migrate_out); + qtest_add_data_func("failover-virtio-net/migrate/off/in", tmpfile, + test_off_migrate_in); + qtest_add_data_func("failover-virtio-net/migrate/off/abort", tmpfile, + test_migrate_off_abort); + qtest_add_data_func("failover-virtio-net/migrate/guest_off/out", tmpfile, + test_guest_off_migrate_out); + qtest_add_data_func("failover-virtio-net/migrate/guest_off/in", tmpfile, + test_guest_off_migrate_in); + qtest_add_data_func("failover-virtio-net/migrate/guest_off/abort", tmpfile, + test_migrate_guest_off_abort); + qtest_add_data_func("failover-virtio-net/migrate/abort/wait-unplug", + tmpfile, test_migrate_abort_wait_unplug); + qtest_add_data_func("failover-virtio-net/migrate/abort/active", tmpfile, + test_migrate_abort_active); + if (g_test_slow()) { + qtest_add_data_func("failover-virtio-net/migrate/abort/timeout", + tmpfile, test_migrate_abort_timeout); + } + qtest_add_data_func("failover-virtio-net/migrate/multi/out", + tmpfile, test_multi_out); + qtest_add_data_func("failover-virtio-net/migrate/multi/in", + tmpfile, test_multi_in); +#endif /* _WIN32 */ + + ret = g_test_run(); + + unlink(tmpfile); + g_free(tmpfile); + + return ret; +} diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c index a08e2ffe12..dff43f0f60 100644 --- a/tests/qtest/virtio-net-test.c +++ b/tests/qtest/virtio-net-test.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "libqtest-single.h" #include "qemu/iov.h" #include "qemu/module.h" @@ -87,11 +86,11 @@ static void tx_test(QVirtioDevice *dev, QVIRTIO_NET_TIMEOUT_US); guest_free(alloc, req_addr); - ret = qemu_recv(socket, &len, sizeof(len), 0); + ret = recv(socket, &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); - ret = qemu_recv(socket, buffer, len, 0); + ret = recv(socket, buffer, len, 0); g_assert_cmpstr(buffer, ==, "TEST"); } @@ -166,14 +165,17 @@ static void stop_cont_test(void *obj, void *data, QGuestAllocator *t_alloc) rx_stop_cont_test(dev, t_alloc, rx, sv[0]); } -#endif - static void hotplug(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioPCIDevice *dev = obj; QTestState *qts = dev->pdev->bus->qts; const char *arch = qtest_get_arch(); + if (dev->pdev->bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } + qtest_qmp_device_add(qts, "virtio-net-pci", "net1", "{'addr': %s}", stringify(PCI_SLOT_HP)); @@ -202,11 +204,11 @@ static void announce_self(void *obj, void *data, QGuestAllocator *t_alloc) qobject_unref(rsp); /* Catch the first packet and make sure it's a RARP */ - ret = qemu_recv(sv[0], &len, sizeof(len), 0); + ret = recv(sv[0], &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); - ret = qemu_recv(sv[0], buffer, len, 0); + ret = recv(sv[0], buffer, len, 0); g_assert_cmpint(*proto, ==, htons(ETH_P_RARP)); /* @@ -230,7 +232,7 @@ static void announce_self(void *obj, void *data, QGuestAllocator *t_alloc) while (true) { int saved_err; - ret = qemu_recv(sv[0], buffer, 60, MSG_DONTWAIT); + ret = recv(sv[0], buffer, 60, MSG_DONTWAIT); saved_err = errno; now = g_get_monotonic_time(); g_assert_cmpint(now, <, deadline); @@ -282,6 +284,8 @@ static void *virtio_net_test_setup(GString *cmd_line, void *arg) return sv; } +#endif /* _WIN32 */ + static void large_tx(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioNet *dev = obj; @@ -315,16 +319,15 @@ static void *virtio_net_test_setup_nosocket(GString *cmd_line, void *arg) static void register_virtio_net_test(void) { - QOSGraphTestOptions opts = { - .before = virtio_net_test_setup, - }; + QOSGraphTestOptions opts = { 0 }; - qos_add_test("hotplug", "virtio-pci", hotplug, &opts); #ifndef _WIN32 + opts.before = virtio_net_test_setup; + qos_add_test("hotplug", "virtio-net-pci", hotplug, &opts); qos_add_test("basic", "virtio-net", send_recv_test, &opts); qos_add_test("rx_stop_cont", "virtio-net", stop_cont_test, &opts); -#endif qos_add_test("announce-self", "virtio-net", announce_self, &opts); +#endif /* These tests do not need a loopback backend. */ opts.before = virtio_net_test_setup_nosocket; diff --git a/tests/qtest/virtio-rng-test.c b/tests/qtest/virtio-rng-test.c index e6b8cd8e0c..64e131cd8c 100644 --- a/tests/qtest/virtio-rng-test.c +++ b/tests/qtest/virtio-rng-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/virtio-rng.h" @@ -20,6 +20,11 @@ static void rng_hotplug(void *obj, void *data, QGuestAllocator *alloc) QVirtioPCIDevice *dev = obj; QTestState *qts = dev->pdev->bus->qts; + if (dev->pdev->bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } + const char *arch = qtest_get_arch(); qtest_qmp_device_add(qts, "virtio-rng-pci", "rng1", diff --git a/tests/qtest/virtio-scsi-test.c b/tests/qtest/virtio-scsi-test.c index 8ceb12aacd..ceaa7f2415 100644 --- a/tests/qtest/virtio-scsi-test.c +++ b/tests/qtest/virtio-scsi-test.c @@ -268,7 +268,7 @@ static void test_iothread_attach_node(void *obj, void *data, QVirtioSCSIPCI *scsi_pci = obj; QVirtioSCSI *scsi = &scsi_pci->scsi; QVirtioSCSIQueues *vs; - char tmp_path[] = "/tmp/qtest.XXXXXX"; + g_autofree char *tmp_path = NULL; int fd; int ret; @@ -282,7 +282,7 @@ static void test_iothread_attach_node(void *obj, void *data, vs = qvirtio_scsi_init(scsi->vdev); /* Create a temporary qcow2 overlay*/ - fd = mkstemp(tmp_path); + fd = g_file_open_tmp("qtest.XXXXXX", &tmp_path, NULL); g_assert(fd >= 0); close(fd); diff --git a/tests/qtest/virtio-test.c b/tests/qtest/virtio-test.c index 6313417630..f7c6afdcf1 100644 --- a/tests/qtest/virtio-test.c +++ b/tests/qtest/virtio-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/vmgenid-test.c b/tests/qtest/vmgenid-test.c index 6781a51447..efba76e716 100644 --- a/tests/qtest/vmgenid-test.c +++ b/tests/qtest/vmgenid-test.c @@ -14,7 +14,7 @@ #include "hw/acpi/acpi-defs.h" #include "boot-sector.h" #include "acpi-utils.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #define VGID_GUID "324e6eaf-d1d1-4bf6-bf41-b9bb6c91fb87" diff --git a/tests/qtest/vmxnet3-test.c b/tests/qtest/vmxnet3-test.c index 97c23fd3a8..a81025252c 100644 --- a/tests/qtest/vmxnet3-test.c +++ b/tests/qtest/vmxnet3-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qemu/module.h" #include "libqos/qgraph.h" #include "libqos/pci.h" diff --git a/tests/qtest/vnc-display-test.c b/tests/qtest/vnc-display-test.c new file mode 100644 index 0000000000..e2a9d682bb --- /dev/null +++ b/tests/qtest/vnc-display-test.c @@ -0,0 +1,103 @@ +/* + * VNC display tests + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/sockets.h" +#include "libqtest.h" +#include +#include + +typedef struct Test { + QTestState *qts; + VncConnection *conn; + GMainLoop *loop; +} Test; + +static void on_vnc_error(VncConnection* self, + const char* msg) +{ + g_error("vnc-error: %s", msg); +} + +static void on_vnc_auth_failure(VncConnection *self, + const char *msg) +{ + g_error("vnc-auth-failure: %s", msg); +} + +static bool +test_setup(Test *test) +{ +#ifdef WIN32 + g_test_skip("Not supported on Windows yet"); + return false; +#else + int pair[2]; + + test->qts = qtest_init("-vnc none -name vnc-test"); + + g_assert_cmpint(qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, pair), ==, 0); + + qtest_qmp_add_client(test->qts, "vnc", pair[1]); + + test->conn = vnc_connection_new(); + g_signal_connect(test->conn, "vnc-error", + G_CALLBACK(on_vnc_error), NULL); + g_signal_connect(test->conn, "vnc-auth-failure", + G_CALLBACK(on_vnc_auth_failure), NULL); + vnc_connection_set_auth_type(test->conn, VNC_CONNECTION_AUTH_NONE); + vnc_connection_open_fd(test->conn, pair[0]); + + test->loop = g_main_loop_new(NULL, FALSE); + return true; +#endif +} + +static void +test_vnc_basic_on_vnc_initialized(VncConnection *self, + Test *test) +{ + const char *name = vnc_connection_get_name(test->conn); + + g_assert_cmpstr(name, ==, "QEMU (vnc-test)"); + g_main_loop_quit(test->loop); +} + +static void +test_vnc_basic(void) +{ + Test test; + + if (!test_setup(&test)) { + return; + } + + g_signal_connect(test.conn, "vnc-initialized", + G_CALLBACK(test_vnc_basic_on_vnc_initialized), &test); + + g_main_loop_run(test.loop); + + qtest_quit(test.qts); + g_object_unref(test.conn); + g_main_loop_unref(test.loop); +} + +int +main(int argc, char **argv) +{ + if (getenv("GTK_VNC_DEBUG")) { + vnc_util_set_debug(true); + } + + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/vnc-display/basic", test_vnc_basic); + + return g_test_run(); +} diff --git a/tests/qtest/wdt_ib700-test.c b/tests/qtest/wdt_ib700-test.c index 6c36e43fb8..797288d939 100644 --- a/tests/qtest/wdt_ib700-test.c +++ b/tests/qtest/wdt_ib700-test.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qemu/timer.h" diff --git a/tests/qtest/xlnx-can-test.c b/tests/qtest/xlnx-can-test.c index 54de71a686..89610fc499 100644 --- a/tests/qtest/xlnx-can-test.c +++ b/tests/qtest/xlnx-can-test.c @@ -25,7 +25,7 @@ */ #include "qemu/osdep.h" -#include "libqos/libqtest.h" +#include "libqtest.h" /* Base address. */ #define CAN0_BASE_ADDR 0xFF060000 diff --git a/tests/requirements.txt b/tests/requirements.txt index a21b59b443..0ba561b6bd 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,5 +1,6 @@ # Add Python module requirements, one per line, to be installed # in the tests/venv Python virtual environment. For more info, # refer to: https://pip.pypa.io/en/stable/user_guide/#id1 +# Note that qemu.git/python/ is always implicitly installed. avocado-framework==88.1 pycdlib==1.11.0 diff --git a/tests/tcg/Makefile.prereqs b/tests/tcg/Makefile.prereqs deleted file mode 100644 index 9a29604a83..0000000000 --- a/tests/tcg/Makefile.prereqs +++ /dev/null @@ -1,18 +0,0 @@ -# -*- Mode: makefile -*- -# -# TCG Compiler Probe -# -# This Makefile fragment is included multiple times in the main make -# script to probe for available compilers. This is used to build up a -# selection of required docker targets before we invoke a sub-make for -# each target. - -DOCKER_IMAGE:= - --include $(BUILD_DIR)/tests/tcg/config-$(PROBE_TARGET).mak - -ifneq ($(DOCKER_IMAGE),) -build-tcg-tests-$(PROBE_TARGET): docker-image-$(DOCKER_IMAGE) -endif -$(BUILD_DIR)/tests/tcg/config_$(PROBE_TARGET).mak: config-host.mak -config-host.mak: $(SRC_PATH)/tests/tcg/configure.sh diff --git a/tests/tcg/Makefile.qemu b/tests/tcg/Makefile.qemu deleted file mode 100644 index 84c8543878..0000000000 --- a/tests/tcg/Makefile.qemu +++ /dev/null @@ -1,121 +0,0 @@ -# -*- Mode: makefile -*- -# -# TCG tests (per-target rules) -# -# This Makefile fragment is included from the build-tcg target, once -# for each target we build. We have two options for compiling, either -# using a configured guest compiler or calling one of our docker images -# to do it for us. -# - -# The configure script fills in extra information about -# useful docker images or alternative compiler flags. - -# Usage: $(call quiet-command,command and args,"NAME","args to print") -# This will run "command and args", and either: -# if V=1 just print the whole command and args -# otherwise print the 'quiet' output in the format " NAME args to print" -# NAME should be a short name of the command, 7 letters or fewer. -# If called with only a single argument, will print nothing in quiet mode. -quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1 -quiet-@ = $(if $(V),,@) -quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) - -CROSS_CC_GUEST:= -CROSS_AS_GUEST:= -CROSS_LD_GUEST:= -DOCKER_IMAGE:= - --include tests/tcg/config-$(TARGET).mak - -GUEST_BUILD= -TCG_MAKE=../Makefile.target - -# We also need the Docker make rules to depend on -SKIP_DOCKER_BUILD=1 -include $(SRC_PATH)/tests/docker/Makefile.include - -# Support installed Cross Compilers - -ifdef CROSS_CC_GUEST - -.PHONY: cross-build-guest-tests -cross-build-guest-tests: - $(call quiet-command, \ - (mkdir -p tests/tcg/$(TARGET) && cd tests/tcg/$(TARGET) && \ - $(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" CC="$(CROSS_CC_GUEST)" \ - $(if $(CROSS_AS_GUEST),AS="$(CROSS_AS_GUEST)") \ - $(if $(CROSS_LD_GUEST),LD="$(CROSS_LD_GUEST)") \ - SRC_PATH="$(SRC_PATH)" BUILD_STATIC=$(CROSS_CC_GUEST_STATIC) \ - EXTRA_CFLAGS="$(CROSS_CC_GUEST_CFLAGS)"), \ - "BUILD","$(TARGET) guest-tests with $(CROSS_CC_GUEST)") - -GUEST_BUILD=cross-build-guest-tests - -endif - -# Support building with Docker - -ifneq ($(DOCKER_IMAGE),) - -DOCKER_COMPILE_CMD="$(DOCKER_SCRIPT) cc \ - --cc $(DOCKER_CROSS_CC_GUEST) \ - -i qemu/$(DOCKER_IMAGE) \ - -s $(SRC_PATH) -- " - -DOCKER_AS_CMD=$(if $(DOCKER_CROSS_AS_GUEST),"$(DOCKER_SCRIPT) cc \ - --cc $(DOCKER_CROSS_AS_GUEST) \ - -i qemu/$(DOCKER_IMAGE) \ - -s $(SRC_PATH) -- ") - -DOCKER_LD_CMD=$(if $(DOCKER_CROSS_LD_GUEST),"$(DOCKER_SCRIPT) cc \ - --cc $(DOCKER_CROSS_LD_GUEST) \ - -i qemu/$(DOCKER_IMAGE) \ - -s $(SRC_PATH) -- ") - - -.PHONY: docker-build-guest-tests -docker-build-guest-tests: docker-image-$(DOCKER_IMAGE) - $(call quiet-command, \ - (mkdir -p tests/tcg/$(TARGET) && cd tests/tcg/$(TARGET) && \ - $(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" CC=$(DOCKER_COMPILE_CMD) \ - $(if $(DOCKER_AS_CMD),AS=$(DOCKER_AS_CMD)) \ - $(if $(DOCKER_LD_CMD),LD=$(DOCKER_LD_CMD)) \ - SRC_PATH="$(SRC_PATH)" BUILD_STATIC=y \ - EXTRA_CFLAGS="$(CROSS_CC_GUEST_CFLAGS)"), \ - "BUILD","$(TARGET) guest-tests with docker qemu/$(DOCKER_IMAGE)") - -GUEST_BUILD=docker-build-guest-tests - -endif - -# Final targets -all: - @echo "Do not invoke this Makefile directly"; exit 1 - -.PHONY: guest-tests - -ifneq ($(GUEST_BUILD),) -guest-tests: $(GUEST_BUILD) - -run-guest-tests: guest-tests - $(call quiet-command, \ - (cd tests/tcg/$(TARGET) && \ - $(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" \ - SRC_PATH="$(SRC_PATH)" SPEED=$(SPEED) run), \ - "RUN", "tests for $(TARGET_NAME)") - -else -guest-tests: - $(call quiet-command, true, "BUILD", \ - "$(TARGET) guest-tests SKIPPED") - -run-guest-tests: - $(call quiet-command, true, "RUN", \ - "tests for $(TARGET) SKIPPED") -endif - -# It doesn't matter if these don't exits -.PHONY: clean-guest-tests -clean-guest-tests: - rm -rf tests/tcg/$(TARGET) diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 63cf1b2573..14bc013181 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -30,23 +30,35 @@ # all: --include ../../../config-host.mak --include ../config-$(TARGET).mak -ifeq ($(CONFIG_USER_ONLY),y) --include $(SRC_PATH)/default-configs/targets/$(TARGET).mak +-include ../config-host.mak +-include config-target.mak + +# Get semihosting definitions for user-mode emulation +ifeq ($(filter %-softmmu, $(TARGET)),) +-include $(SRC_PATH)/configs/targets/$(TARGET).mak endif # for including , in command strings COMMA := , +NULL := +SPACE := $(NULL) # +TARGET_PREFIX=tests/tcg/$(TARGET):$(SPACE) -quiet-command = $(if $(V),$1,$(if $(2),@printf " %-7s %s\n" $2 $3 && $1, @$1)) +quiet-@ = $(if $(V),,@$(if $1,printf " %-7s %s\n" "$(strip $1)" "$(strip $2)" && )) +quiet-command = $(call quiet-@,$2,$3)$1 + +cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>&1 +cc-option = if $(call cc-test, $1); then \ + echo "$(TARGET_PREFIX)$1 detected" && echo "$(strip $2)=y" >&3; else \ + echo "$(TARGET_PREFIX)$1 not detected"; fi # $1 = test name, $2 = cmd, $3 = desc -ifdef CONFIG_USER_ONLY +ifeq ($(filter %-softmmu, $(TARGET)),) run-test = $(call quiet-command, timeout --foreground $(TIMEOUT) $2 > $1.out, \ - "TEST",$3) + TEST,$(or $3, $*, $<) on $(TARGET_NAME)) else -run-test = $(call quiet-command, timeout --foreground $(TIMEOUT) $2,"TEST",$3) +run-test = $(call quiet-command, timeout --foreground $(TIMEOUT) $2, \ + TEST,$(or $3, $*, $<) on $(TARGET_NAME)) endif # $1 = test name, $2 = reference @@ -54,7 +66,7 @@ endif # we know it failed and then force failure at the end. diff-out = $(call quiet-command, diff -q $1.out $2 || \ (diff -u $1.out $2 | head -n 10 && false), \ - "DIFF","$1.out with $2") + DIFF,$1.out with $2) # $1 = test name, $2 = reason skip-test = @printf " SKIPPED %s on $(TARGET_NAME) because %s\n" $1 $2 @@ -75,22 +87,20 @@ EXTRA_TESTS= # Start with a blank slate, the build targets get to add stuff first CFLAGS= -QEMU_CFLAGS= LDFLAGS= QEMU_OPTS= # If TCG debugging, or TCI is enabled things are a lot slower -ifneq ($(CONFIG_TCG_INTERPRETER),) +# ??? Makefile no longer has any indication that TCI is enabled, +# but for the record: +# 15s original default +# 60s with --enable-debug +# 90s with --enable-tcg-interpreter TIMEOUT=90 -else ifneq ($(CONFIG_DEBUG_TCG),) -TIMEOUT=60 -else -TIMEOUT=15 -endif -ifdef CONFIG_USER_ONLY +ifeq ($(filter %-softmmu, $(TARGET)),) # The order we include is important. We include multiarch first and # then the target. If there are common tests shared between # sub-targets (e.g. ARM & AArch64) then it is up to @@ -107,10 +117,13 @@ endif %: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) +%: %.S + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) else # For softmmu targets we include a different Makefile fragement as the # build options for bare programs are usually pretty different. They # are expected to provide their own build recipes. +EXTRA_CFLAGS += -ffreestanding -include $(SRC_PATH)/tests/tcg/minilib/Makefile.target -include $(SRC_PATH)/tests/tcg/multiarch/system/Makefile.softmmu-target -include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.softmmu-target @@ -144,7 +157,6 @@ PLUGINS=$(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c))) $(foreach p,$(PLUGINS), \ $(foreach t,$(TESTS),\ $(eval run-plugin-$(t)-with-$(p): $t $p) \ - $(eval run-plugin-$(t)-with-$(p): TIMEOUT=60) \ $(eval RUN_TESTS+=run-plugin-$(t)-with-$(p)))) endif @@ -153,23 +165,21 @@ extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) RUN_TESTS+=$(EXTRA_RUNS) -ifdef CONFIG_USER_ONLY +ifeq ($(filter %-softmmu, $(TARGET)),) run-%: % - $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<, "$< on $(TARGET_NAME)") + $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<) run-plugin-%: $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \ -d plugin -D $*.pout \ - $(call strip-plugin,$<), \ - "$* on $(TARGET_NAME)") + $(call strip-plugin,$<)) else run-%: % $(call run-test, $<, \ $(QEMU) -monitor none -display none \ -chardev file$(COMMA)path=$<.out$(COMMA)id=output \ - $(QEMU_OPTS) $<, \ - "$< on $(TARGET_NAME)") + $(QEMU_OPTS) $<) run-plugin-%: $(call run-test, $@, \ @@ -177,8 +187,7 @@ run-plugin-%: -chardev file$(COMMA)path=$@.out$(COMMA)id=output \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \ -d plugin -D $*.pout \ - $(QEMU_OPTS) $(call strip-plugin,$<), \ - "$* on $(TARGET_NAME)") + $(QEMU_OPTS) $(call strip-plugin,$<)) endif gdb-%: % @@ -187,4 +196,8 @@ gdb-%: % .PHONY: run run: $(RUN_TESTS) -# There is no clean target, the calling make just rm's the tests build dir +clean: + rm -f $(TESTS) *.o $(CLEANFILES) + +distclean: + rm -f config-cc.mak config-target.mak ../config-$(TARGET).mak diff --git a/tests/tcg/aarch64/Makefile.softmmu-target b/tests/tcg/aarch64/Makefile.softmmu-target index a7286ac295..a1368905f5 100644 --- a/tests/tcg/aarch64/Makefile.softmmu-target +++ b/tests/tcg/aarch64/Makefile.softmmu-target @@ -19,6 +19,11 @@ EXTRA_RUNS+=$(MULTIARCH_RUNS) CFLAGS+=-nostdlib -ggdb -O0 $(MINILIB_INC) LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc +config-cc.mak: Makefile + $(quiet-@)( \ + $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3)) 3> config-cc.mak +-include config-cc.mak + # building head blobs .PRECIOUS: $(CRT_OBJS) @@ -50,8 +55,7 @@ run-memory-record: memory-record memory $(QEMU) -monitor none -display none \ -chardev file$(COMMA)path=$<.out$(COMMA)id=output \ -icount shift=5$(COMMA)rr=record$(COMMA)rrfile=record.bin \ - $(QEMU_OPTS) memory, \ - "$< on $(TARGET_NAME)") + $(QEMU_OPTS) memory) .PHONY: memory-replay run-memory-replay: memory-replay run-memory-record @@ -59,12 +63,11 @@ run-memory-replay: memory-replay run-memory-record $(QEMU) -monitor none -display none \ -chardev file$(COMMA)path=$<.out$(COMMA)id=output \ -icount shift=5$(COMMA)rr=replay$(COMMA)rrfile=record.bin \ - $(QEMU_OPTS) memory, \ - "$< on $(TARGET_NAME)") + $(QEMU_OPTS) memory) EXTRA_RUNS+=run-memory-replay -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_3),) +ifneq ($(CROSS_CC_HAS_ARMV8_3),) pauth-3: CFLAGS += -march=armv8.3-a else pauth-3: diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index 2c05c90d17..fc8d90ed69 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -8,8 +8,8 @@ VPATH += $(ARM_SRC) AARCH64_SRC=$(SRC_PATH)/tests/tcg/aarch64 VPATH += $(AARCH64_SRC) -# Float-convert Tests -AARCH64_TESTS=fcvt +# Base architecture tests +AARCH64_TESTS=fcvt pcalign-a64 fcvt: LDFLAGS+=-lm @@ -17,8 +17,17 @@ run-fcvt: fcvt $(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)") $(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref) +config-cc.mak: Makefile + $(quiet-@)( \ + $(call cc-option,-march=armv8.1-a+sve, CROSS_CC_HAS_SVE); \ + $(call cc-option,-march=armv8.1-a+sve2, CROSS_CC_HAS_SVE2); \ + $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \ + $(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \ + $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE)) 3> config-cc.mak +-include config-cc.mak + # Pauth Tests -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_3),) +ifneq ($(CROSS_CC_HAS_ARMV8_3),) AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5 pauth-%: CFLAGS += -march=armv8.3-a run-pauth-%: QEMU_OPTS += -cpu max @@ -27,21 +36,21 @@ endif # BTI Tests # bti-1 tests the elf notes, so we require special compiler support. -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_BTI),) -AARCH64_TESTS += bti-1 -bti-1: CFLAGS += -mbranch-protection=standard -bti-1: LDFLAGS += -nostdlib +ifneq ($(CROSS_CC_HAS_ARMV8_BTI),) +AARCH64_TESTS += bti-1 bti-3 +bti-1 bti-3: CFLAGS += -mbranch-protection=standard +bti-1 bti-3: LDFLAGS += -nostdlib endif # bti-2 tests PROT_BTI, so no special compiler support required. AARCH64_TESTS += bti-2 # MTE Tests -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),) +ifneq ($(CROSS_CC_HAS_ARMV8_MTE),) AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 mte-7 mte-%: CFLAGS += -march=armv8.5-a+memtag endif -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_SVE),) +ifneq ($(CROSS_CC_HAS_SVE),) # System Registers Tests AARCH64_TESTS += sysregs sysregs: CFLAGS+=-march=armv8.1-a+sve @@ -50,6 +59,23 @@ sysregs: CFLAGS+=-march=armv8.1-a+sve AARCH64_TESTS += sve-ioctls sve-ioctls: CFLAGS+=-march=armv8.1-a+sve +# Vector SHA1 +sha1-vector: CFLAGS=-O3 +sha1-vector: sha1.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) +run-sha1-vector: sha1-vector run-sha1 + $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<) + $(call diff-out, sha1-vector, sha1.out) + +TESTS += sha1-vector + +# Vector versions of sha512 (-O3 triggers vectorisation) +sha512-vector: CFLAGS=-O3 +sha512-vector: sha512.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +TESTS += sha512-vector + ifneq ($(HAVE_GDB_BIN),) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py @@ -58,18 +84,22 @@ run-gdbstub-sysregs: sysregs --gdb $(HAVE_GDB_BIN) \ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ --bin $< --test $(AARCH64_SRC)/gdbstub/test-sve.py, \ - "basic gdbstub SVE support") + basic gdbstub SVE support) run-gdbstub-sve-ioctls: sve-ioctls $(call run-test, $@, $(GDB_SCRIPT) \ --gdb $(HAVE_GDB_BIN) \ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ --bin $< --test $(AARCH64_SRC)/gdbstub/test-sve-ioctl.py, \ - "basic gdbstub SVE ZLEN support") + basic gdbstub SVE ZLEN support) EXTRA_RUNS += run-gdbstub-sysregs run-gdbstub-sve-ioctls endif +endif +ifneq ($(CROSS_CC_HAS_SVE2),) +AARCH64_TESTS += test-826 +test-826: CFLAGS+=-march=armv8.1-a+sve2 endif TESTS += $(AARCH64_TESTS) diff --git a/tests/tcg/aarch64/bti-3.c b/tests/tcg/aarch64/bti-3.c new file mode 100644 index 0000000000..a852856d9a --- /dev/null +++ b/tests/tcg/aarch64/bti-3.c @@ -0,0 +1,42 @@ +/* + * BTI vs PACIASP + */ + +#include "bti-crt.inc.c" + +static void skip2_sigill(int sig, siginfo_t *info, ucontext_t *uc) +{ + uc->uc_mcontext.pc += 8; + uc->uc_mcontext.pstate = 1; +} + +#define BTYPE_1() \ + asm("mov %0,#1; adr x16, 1f; br x16; 1: hint #25; mov %0,#0" \ + : "=r"(skipped) : : "x16", "x30") + +#define BTYPE_2() \ + asm("mov %0,#1; adr x16, 1f; blr x16; 1: hint #25; mov %0,#0" \ + : "=r"(skipped) : : "x16", "x30") + +#define BTYPE_3() \ + asm("mov %0,#1; adr x15, 1f; br x15; 1: hint #25; mov %0,#0" \ + : "=r"(skipped) : : "x15", "x30") + +#define TEST(WHICH, EXPECT) \ + do { WHICH(); fail += skipped ^ EXPECT; } while (0) + +int main() +{ + int fail = 0; + int skipped; + + /* Signal-like with SA_SIGINFO. */ + signal_info(SIGILL, skip2_sigill); + + /* With SCTLR_EL1.BT0 set, PACIASP is not compatible with type=3. */ + TEST(BTYPE_1, 0); + TEST(BTYPE_2, 0); + TEST(BTYPE_3, 1); + + return fail; +} diff --git a/tests/tcg/aarch64/float_convd.ref b/tests/tcg/aarch64/float_convd.ref new file mode 100644 index 0000000000..cc5e2ef08b --- /dev/null +++ b/tests/tcg/aarch64/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/aarch64/pcalign-a64.c b/tests/tcg/aarch64/pcalign-a64.c new file mode 100644 index 0000000000..6b9277f919 --- /dev/null +++ b/tests/tcg/aarch64/pcalign-a64.c @@ -0,0 +1,37 @@ +/* Test PC misalignment exception */ + +#include +#include +#include +#include + +static void *expected; + +static void sigbus(int sig, siginfo_t *info, void *vuc) +{ + assert(info->si_code == BUS_ADRALN); + assert(info->si_addr == expected); + exit(EXIT_SUCCESS); +} + +int main() +{ + void *tmp; + + struct sigaction sa = { + .sa_sigaction = sigbus, + .sa_flags = SA_SIGINFO + }; + + if (sigaction(SIGBUS, &sa, NULL) < 0) { + perror("sigaction"); + return EXIT_FAILURE; + } + + asm volatile("adr %0, 1f + 1\n\t" + "str %0, %1\n\t" + "br %0\n" + "1:" + : "=&r"(tmp), "=m"(expected)); + abort(); +} diff --git a/tests/tcg/aarch64/system/pauth-3.c b/tests/tcg/aarch64/system/pauth-3.c index 42eff4d5ea..77a467277b 100644 --- a/tests/tcg/aarch64/system/pauth-3.c +++ b/tests/tcg/aarch64/system/pauth-3.c @@ -1,4 +1,4 @@ -#include +#include #include int main() diff --git a/tests/tcg/aarch64/system/semiconsole.c b/tests/tcg/aarch64/system/semiconsole.c index bfe7c9e26b..81324c639f 100644 --- a/tests/tcg/aarch64/system/semiconsole.c +++ b/tests/tcg/aarch64/system/semiconsole.c @@ -6,7 +6,7 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#include +#include #include #define SYS_READC 0x7 diff --git a/tests/tcg/aarch64/system/semiheap.c b/tests/tcg/aarch64/system/semiheap.c new file mode 100644 index 0000000000..a254bd8982 --- /dev/null +++ b/tests/tcg/aarch64/system/semiheap.c @@ -0,0 +1,93 @@ +/* + * Semihosting System HEAPINFO Test + * + * Copyright (c) 2021 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include +#include + +#define SYS_HEAPINFO 0x16 + +uintptr_t __semi_call(uintptr_t type, uintptr_t arg0) +{ + register uintptr_t t asm("x0") = type; + register uintptr_t a0 asm("x1") = arg0; + asm("hlt 0xf000" + : "=r" (t) + : "r" (t), "r" (a0) + : "memory" ); + + return t; +} + +int main(int argc, char *argv[argc]) +{ + struct { + void *heap_base; + void *heap_limit; + void *stack_base; + void *stack_limit; + } info = { }; + void *ptr_to_info = (void *) &info; + uint32_t *ptr_to_heap; + int i; + + ml_printf("Semihosting Heap Info Test\n"); + + __semi_call(SYS_HEAPINFO, (uintptr_t) &ptr_to_info); + + if (info.heap_base == NULL || info.heap_limit == NULL) { + ml_printf("null heap: %p -> %p\n", info.heap_base, info.heap_limit); + return -1; + } + + /* Error if heap base is above limit */ + if ((uintptr_t) info.heap_base >= (uintptr_t) info.heap_limit) { + ml_printf("heap base %p >= heap_limit %p\n", + info.heap_base, info.heap_limit); + return -2; + } + + if (info.stack_base == NULL) { + ml_printf("null stack: %p -> %p\n", info.stack_base, info.stack_limit); + return -3; + } + + /* + * boot.S put our stack somewhere inside the data segment of the + * ELF file, and we know that SYS_HEAPINFO won't pick a range + * that overlaps with part of a loaded ELF file. So the info + * struct (on the stack) should not be inside the reported heap. + */ + if (ptr_to_info > info.heap_base && ptr_to_info < info.heap_limit) { + ml_printf("info appears to be inside the heap: %p in %p:%p\n", + ptr_to_info, info.heap_base, info.heap_limit); + return -4; + } + + ml_printf("heap: %p -> %p\n", info.heap_base, info.heap_limit); + ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base); + + /* finally can we read/write the heap */ + ptr_to_heap = (uint32_t *) info.heap_base; + for (i = 0; i < 512; i++) { + *ptr_to_heap++ = i; + } + ptr_to_heap = (uint32_t *) info.heap_base; + for (i = 0; i < 512; i++) { + uint32_t tmp = *ptr_to_heap; + if (tmp != i) { + ml_printf("unexpected value in heap: %d @ %p", tmp, ptr_to_heap); + return -5; + } + ptr_to_heap++; + } + ml_printf("r/w to heap upto %p\n", ptr_to_heap); + + ml_printf("Passed HeapInfo checks\n"); + return 0; +} diff --git a/tests/tcg/aarch64/test-826.c b/tests/tcg/aarch64/test-826.c new file mode 100644 index 0000000000..f59740a8c5 --- /dev/null +++ b/tests/tcg/aarch64/test-826.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include +#include + +static void *expected; + +void sigsegv(int sig, siginfo_t *info, void *vuc) +{ + ucontext_t *uc = vuc; + + assert(info->si_addr == expected); + uc->uc_mcontext.pc += 4; +} + +int main() +{ + struct sigaction sa = { + .sa_sigaction = sigsegv, + .sa_flags = SA_SIGINFO + }; + + void *page; + long ofs; + + if (sigaction(SIGSEGV, &sa, NULL) < 0) { + perror("sigaction"); + return EXIT_FAILURE; + } + + page = mmap(0, getpagesize(), PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (page == MAP_FAILED) { + perror("mmap"); + return EXIT_FAILURE; + } + + ofs = 0x124; + expected = page + ofs; + + asm("ptrue p0.d, vl1\n\t" + "dup z0.d, %0\n\t" + "ldnt1h {z1.d}, p0/z, [z0.d, %1]\n\t" + "dup z1.d, %1\n\t" + "ldnt1h {z0.d}, p0/z, [z1.d, %0]" + : : "r"(page), "r"(ofs) : "v0", "v1"); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/arm/Makefile.softmmu-target b/tests/tcg/arm/Makefile.softmmu-target index 3fe237ba39..7df88ddea8 100644 --- a/tests/tcg/arm/Makefile.softmmu-target +++ b/tests/tcg/arm/Makefile.softmmu-target @@ -20,7 +20,7 @@ LDFLAGS+=-nostdlib -N -static # Specific Test Rules -test-armv6m-undef: EXTRA_CFLAGS+=-mcpu=cortex-m0 +test-armv6m-undef: EXTRA_CFLAGS+=-mcpu=cortex-m0 -mfloat-abi=soft run-test-armv6m-undef: QEMU_OPTS+=-semihosting -M microbit -kernel run-plugin-test-armv6m-undef-%: QEMU_OPTS+=-semihosting -M microbit -kernel diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target index 5ab59ed6ce..b3b1504a1c 100644 --- a/tests/tcg/arm/Makefile.target +++ b/tests/tcg/arm/Makefile.target @@ -26,9 +26,13 @@ ARM_TESTS += fcvt fcvt: LDFLAGS+=-lm # fcvt: CFLAGS+=-march=armv8.2-a+fp16 -mfpu=neon-fp-armv8 run-fcvt: fcvt - $(call run-test,fcvt,$(QEMU) $<,"$< on $(TARGET_NAME)") + $(call run-test,fcvt,$(QEMU) $<) $(call diff-out,fcvt,$(ARM_SRC)/fcvt.ref) +# PC alignment test +ARM_TESTS += pcalign-a32 +pcalign-a32: CFLAGS+=-marm + ifeq ($(CONFIG_ARM_COMPATIBLE_SEMIHOSTING),y) # Semihosting smoke test for linux-user @@ -40,13 +44,12 @@ semihosting-arm: semihosting.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-semihosting-arm: semihosting-arm - $(call run-test,$<,$(QEMU) $< 2> $<.err, "$< on $(TARGET_NAME)") + $(call run-test,$<,$(QEMU) $< 2> $<.err) run-plugin-semihosting-arm-with-%: $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \ - $(call strip-plugin,$<) 2> $<.err, \ - "$< on $(TARGET_NAME) with $*") + $(call strip-plugin,$<) 2> $<.err) ARM_TESTS += semiconsole-arm @@ -66,6 +69,23 @@ endif ARM_TESTS += commpage +# Vector SHA1 +sha1-vector: CFLAGS=-O3 +sha1-vector: sha1.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) +run-sha1-vector: sha1-vector run-sha1 + $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<) + $(call diff-out, sha1-vector, sha1.out) + +ARM_TESTS += sha1-vector + +# Vector versions of sha512 (-O3 triggers vectorisation) +sha512-vector: CFLAGS=-O3 +sha512-vector: sha512.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +ARM_TESTS += sha512-vector + TESTS += $(ARM_TESTS) # On ARM Linux only supports 4k pages diff --git a/tests/tcg/arm/float_convd.ref b/tests/tcg/arm/float_convd.ref new file mode 100644 index 0000000000..5032c2ee2e --- /dev/null +++ b/tests/tcg/arm/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (INEXACT ) + to uint32: 2 (OK) + to uint64: 2 (INEXACT ) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (INEXACT ) + to uint32: 65503 (OK) + to uint64: 65503 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (INEXACT ) + to uint32: 65504 (OK) + to uint64: 65504 (INEXACT ) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (INEXACT ) + to uint32: 65505 (OK) + to uint64: 65505 (INEXACT ) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (INEXACT ) + to uint32: 131007 (OK) + to uint64: 131007 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (INEXACT ) + to uint32: 131008 (OK) + to uint64: 131008 (INEXACT ) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (INEXACT ) + to uint32: 131009 (OK) + to uint64: 131009 (INEXACT ) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (INEXACT ) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (INEXACT ) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (INEXACT ) + to uint32: 2 (OK) + to uint64: 2 (INEXACT ) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (INEXACT ) + to uint32: 65503 (OK) + to uint64: 65503 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (INEXACT ) + to uint32: 65504 (OK) + to uint64: 65504 (INEXACT ) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (INEXACT ) + to uint32: 65505 (OK) + to uint64: 65505 (INEXACT ) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (INEXACT ) + to uint32: 131007 (OK) + to uint64: 131007 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (INEXACT ) + to uint32: 131008 (OK) + to uint64: 131008 (INEXACT ) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (INEXACT ) + to uint32: 131009 (OK) + to uint64: 131009 (INEXACT ) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (INEXACT ) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (INEXACT ) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (INEXACT ) + to uint32: 2 (OK) + to uint64: 2 (INEXACT ) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (INEXACT ) + to uint32: 65503 (OK) + to uint64: 65503 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (INEXACT ) + to uint32: 65504 (OK) + to uint64: 65504 (INEXACT ) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (INEXACT ) + to uint32: 65505 (OK) + to uint64: 65505 (INEXACT ) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (INEXACT ) + to uint32: 131007 (OK) + to uint64: 131007 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (INEXACT ) + to uint32: 131008 (OK) + to uint64: 131008 (INEXACT ) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (INEXACT ) + to uint32: 131009 (OK) + to uint64: 131009 (INEXACT ) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (INEXACT ) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (INEXACT ) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: 1 (INEXACT INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (UNDERFLOW INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (UNDERFLOW INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (INEXACT ) + to uint32: 1 (OK) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (INEXACT ) + to uint32: 2 (OK) + to uint64: 2 (INEXACT ) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (INEXACT ) + to uint32: 65503 (OK) + to uint64: 65503 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (INEXACT ) + to uint32: 65504 (OK) + to uint64: 65504 (INEXACT ) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (INEXACT ) + to uint32: 65505 (OK) + to uint64: 65505 (INEXACT ) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (INEXACT ) + to uint32: 131007 (OK) + to uint64: 131007 (INEXACT ) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (INEXACT ) + to uint32: 131008 (OK) + to uint64: 131008 (INEXACT ) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (INEXACT ) + to uint32: 131009 (OK) + to uint64: 131009 (INEXACT ) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (INEXACT ) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (INEXACT ) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: -1 (INEXACT INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/arm/pcalign-a32.c b/tests/tcg/arm/pcalign-a32.c new file mode 100644 index 0000000000..3c9c8cc97b --- /dev/null +++ b/tests/tcg/arm/pcalign-a32.c @@ -0,0 +1,46 @@ +/* Test PC misalignment exception */ + +#ifdef __thumb__ +#error "This test must be compiled for ARM" +#endif + +#include +#include +#include +#include + +static void *expected; + +static void sigbus(int sig, siginfo_t *info, void *vuc) +{ + assert(info->si_code == BUS_ADRALN); + assert(info->si_addr == expected); + exit(EXIT_SUCCESS); +} + +int main() +{ + void *tmp; + + struct sigaction sa = { + .sa_sigaction = sigbus, + .sa_flags = SA_SIGINFO + }; + + if (sigaction(SIGBUS, &sa, NULL) < 0) { + perror("sigaction"); + return EXIT_FAILURE; + } + + asm volatile("adr %0, 1f + 2\n\t" + "str %0, %1\n\t" + "bx %0\n" + "1:" + : "=&r"(tmp), "=m"(expected)); + + /* + * From v8, it is CONSTRAINED UNPREDICTABLE whether BXWritePC aligns + * the address or not. If so, we can legitimately fall through. + */ + return EXIT_SUCCESS; +} diff --git a/tests/tcg/configure.sh b/tests/tcg/configure.sh deleted file mode 100755 index 1f985ccfc0..0000000000 --- a/tests/tcg/configure.sh +++ /dev/null @@ -1,330 +0,0 @@ -#! /bin/sh - -if test -z "$source_path"; then - echo Do not invoke this script directly. It is called - echo automatically by configure. - exit 1 -fi - -write_c_skeleton() { - cat > $TMPC </dev/null 2>&1 -} - -do_compiler() { - # Run the compiler, capturing its output to the log. First argument - # is compiler binary to execute. - local compiler="$1" - shift - if test -n "$BASH_VERSION"; then eval ' - echo >>config.log " -funcs: ${FUNCNAME[*]} -lines: ${BASH_LINENO[*]}" - '; fi - echo $compiler "$@" >> config.log - $compiler "$@" >> config.log 2>&1 || return $? -} - - -TMPDIR1="config-temp" -TMPC="${TMPDIR1}/qemu-conf.c" -TMPE="${TMPDIR1}/qemu-conf.exe" - -container="no" -if test $use_containers = "yes"; then - if has "docker" || has "podman"; then - container=$($python $source_path/tests/docker/docker.py probe) - fi -fi - -# cross compilers defaults, can be overridden with --cross-cc-ARCH -: ${cross_cc_aarch64="aarch64-linux-gnu-gcc"} -: ${cross_cc_aarch64_be="$cross_cc_aarch64"} -: ${cross_cc_cflags_aarch64_be="-mbig-endian"} -: $(cross_cc_alpha="alpha-linux-gnu-gcc") -: ${cross_cc_arm="arm-linux-gnueabihf-gcc"} -: ${cross_cc_cflags_armeb="-mbig-endian"} -: ${cross_cc_hexagon="hexagon-unknown-linux-musl-clang"} -: ${cross_cc_cflags_hexagon="-mv67 -O2 -static"} -: ${cross_cc_hppa="hppa-linux-gnu-gcc"} -: ${cross_cc_i386="i686-linux-gnu-gcc"} -: ${cross_cc_cflags_i386="-m32"} -: ${cross_cc_m68k="m68k-linux-gnu-gcc"} -: $(cross_cc_mips64el="mips64el-linux-gnuabi64-gcc") -: $(cross_cc_mips64="mips64-linux-gnuabi64-gcc") -: $(cross_cc_mipsel="mipsel-linux-gnu-gcc") -: $(cross_cc_mips="mips-linux-gnu-gcc") -: ${cross_cc_ppc="powerpc-linux-gnu-gcc"} -: ${cross_cc_cflags_ppc="-m32"} -: ${cross_cc_ppc64="powerpc64-linux-gnu-gcc"} -: ${cross_cc_ppc64le="powerpc64le-linux-gnu-gcc"} -: $(cross_cc_riscv64="riscv64-linux-gnu-gcc") -: ${cross_cc_s390x="s390x-linux-gnu-gcc"} -: $(cross_cc_sh4="sh4-linux-gnu-gcc") -: ${cross_cc_cflags_sparc="-m32 -mv8plus -mcpu=ultrasparc"} -: ${cross_cc_sparc64="sparc64-linux-gnu-gcc"} -: ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"} -: ${cross_cc_x86_64="x86_64-linux-gnu-gcc"} -: ${cross_cc_cflags_x86_64="-m64"} - -# tricore is special as it doesn't have a compiler -: ${cross_as_tricore="tricore-as"} -: ${cross_ld_tricore="tricore-ld"} - -for target in $target_list; do - arch=${target%%-*} - - # reset all container fields - container_image= - container_hosts= - container_cross_cc= - container_cross_as= - container_cross_ld= - - # suppress clang - supress_clang= - - case $target in - aarch64-*) - # We don't have any bigendian build tools so we only use this for AArch64 - container_hosts="x86_64 aarch64" - container_image=debian-arm64-test-cross - container_cross_cc=aarch64-linux-gnu-gcc-10 - ;; - alpha-*) - container_hosts=x86_64 - container_image=debian-alpha-cross - container_cross_cc=alpha-linux-gnu-gcc - ;; - arm-*) - # We don't have any bigendian build tools so we only use this for ARM - container_hosts="x86_64 aarch64" - container_image=debian-armhf-cross - container_cross_cc=arm-linux-gnueabihf-gcc - ;; - cris-*) - container_hosts=x86_64 - container_image=fedora-cris-cross - container_cross_cc=cris-linux-gnu-gcc - ;; - hexagon-*) - container_hosts=x86_64 - container_image=debian-hexagon-cross - container_cross_cc=hexagon-unknown-linux-musl-clang - ;; - hppa-*) - container_hosts=x86_64 - container_image=debian-hppa-cross - container_cross_cc=hppa-linux-gnu-gcc - ;; - i386-*) - container_hosts=x86_64 - container_image=fedora-i386-cross - container_cross_cc=gcc - supress_clang=yes - ;; - m68k-*) - container_hosts=x86_64 - container_image=debian-m68k-cross - container_cross_cc=m68k-linux-gnu-gcc - ;; - mips64el-*) - container_hosts=x86_64 - container_image=debian-mips64el-cross - container_cross_cc=mips64el-linux-gnuabi64-gcc - ;; - mips64-*) - container_hosts=x86_64 - container_image=debian-mips64-cross - container_cross_cc=mips64-linux-gnuabi64-gcc - ;; - mipsel-*) - container_hosts=x86_64 - container_image=debian-mipsel-cross - container_cross_cc=mipsel-linux-gnu-gcc - ;; - mips-*) - container_hosts=x86_64 - container_image=debian-mips-cross - container_cross_cc=mips-linux-gnu-gcc - ;; - ppc-*|ppc64abi32-*) - container_hosts=x86_64 - container_image=debian-powerpc-test-cross - container_cross_cc=powerpc-linux-gnu-gcc-10 - ;; - ppc64-*|ppc64le-*) - container_hosts=x86_64 - container_image=debian-powerpc-test-cross - container_cross_cc=${target%%-*}-linux-gnu-gcc-10 - container_cross_cc=powerpc${container_cross_cc#ppc} - ;; - riscv64-*) - container_hosts=x86_64 - container_image=debian-riscv64-cross - container_cross_cc=riscv64-linux-gnu-gcc - ;; - s390x-*) - container_hosts=x86_64 - container_image=debian-s390x-cross - container_cross_cc=s390x-linux-gnu-gcc - ;; - sh4-*) - container_hosts=x86_64 - container_image=debian-sh4-cross - container_cross_cc=sh4-linux-gnu-gcc - ;; - sparc64-*) - container_hosts=x86_64 - container_image=debian-sparc64-cross - container_cross_cc=sparc64-linux-gnu-gcc - ;; - tricore-softmmu) - container_hosts=x86_64 - container_image=debian-tricore-cross - container_cross_as=tricore-as - container_cross_ld=tricore-ld - ;; - x86_64-*) - container_hosts="aarch64 ppc64el x86_64" - container_image=debian-amd64-cross - container_cross_cc=x86_64-linux-gnu-gcc - supress_clang=yes - ;; - xtensa*-softmmu) - container_hosts=x86_64 - container_image=debian-xtensa-cross - - # default to the dc232b cpu - container_cross_cc=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf-gcc - ;; - esac - - config_target_mak=tests/tcg/config-$target.mak - - echo "# Automatically generated by configure - do not modify" > $config_target_mak - echo "TARGET_NAME=$arch" >> $config_target_mak - echo "target=$target" >> $config_target_mak - case $target in - *-linux-user | *-bsd-user) - echo "CONFIG_USER_ONLY=y" >> $config_target_mak - echo "QEMU=$PWD/qemu-$arch" >> $config_target_mak - ;; - *-softmmu) - echo "CONFIG_SOFTMMU=y" >> $config_target_mak - echo "QEMU=$PWD/qemu-system-$arch" >> $config_target_mak - ;; - esac - - eval "target_compiler_cflags=\${cross_cc_cflags_$arch}" - echo "CROSS_CC_GUEST_CFLAGS=$target_compiler_cflags" >> $config_target_mak - - got_cross_cc=no - - if eval test "x\"\${cross_cc_$arch}\"" != xyes; then - eval "target_compiler=\"\${cross_cc_$arch}\"" - - if has $target_compiler; then - if test "$supress_clang" = yes && - $target_compiler --version | grep -qi "clang"; then - got_cross_cc=no - else - write_c_skeleton - if ! do_compiler "$target_compiler" $target_compiler_cflags \ - -o $TMPE $TMPC -static ; then - # For host systems we might get away with building without -static - if do_compiler "$target_compiler" $target_compiler_cflags \ - -o $TMPE $TMPC ; then - got_cross_cc=yes - echo "CROSS_CC_GUEST_STATIC=y" >> $config_target_mak - echo "CROSS_CC_GUEST=$target_compiler" >> $config_target_mak - fi - else - got_cross_cc=yes - echo "CROSS_CC_GUEST_STATIC=y" >> $config_target_mak - echo "CROSS_CC_GUEST=$target_compiler" >> $config_target_mak - fi - fi - fi - - # Special handling for assembler only tests - eval "target_as=\"\${cross_as_$arch}\"" - eval "target_ld=\"\${cross_ld_$arch}\"" - if has $target_as && has $target_ld; then - case $target in - tricore-softmmu) - echo "CROSS_CC_GUEST=$target_as" >> $config_target_mak - echo "CROSS_AS_GUEST=$target_as" >> $config_target_mak - echo "CROSS_LD_GUEST=$target_ld" >> $config_target_mak - got_cross_cc=yes - ;; - esac - fi - fi - - if test $got_cross_cc = yes; then - # Test for compiler features for optional tests. We only do this - # for cross compilers because ensuring the docker containers based - # compilers is a requirememt for adding a new test that needs a - # compiler feature. - - case $target in - aarch64-*) - if do_compiler "$target_compiler" $target_compiler_cflags \ - -march=armv8.1-a+sve -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_SVE=y" >> $config_target_mak - fi - if do_compiler "$target_compiler" $target_compiler_cflags \ - -march=armv8.3-a -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_ARMV8_3=y" >> $config_target_mak - fi - if do_compiler "$target_compiler" $target_compiler_cflags \ - -mbranch-protection=standard -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak - fi - if do_compiler "$target_compiler" $target_compiler_cflags \ - -march=armv8.5-a+memtag -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak - fi - ;; - ppc*) - if do_compiler "$target_compiler" $target_compiler_cflags \ - -mpower8-vector -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> $config_target_mak - fi - if do_compiler "$target_compiler" $target_compiler_cflags \ - -mpower10 -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_POWER10=y" >> $config_target_mak - fi - ;; - i386-linux-user) - if do_compiler "$target_compiler" $target_compiler_cflags \ - -Werror -fno-pie -o $TMPE $TMPC; then - echo "CROSS_CC_HAS_I386_NOPIE=y" >> $config_target_mak - fi - ;; - esac - elif test $got_cross_cc = no && test "$container" != no && \ - test -n "$container_image"; then - for host in $container_hosts; do - if test "$host" = "$ARCH"; then - echo "DOCKER_IMAGE=$container_image" >> $config_target_mak - echo "DOCKER_CROSS_CC_GUEST=$container_cross_cc" >> \ - $config_target_mak - if test -n "$container_cross_as"; then - echo "DOCKER_CROSS_AS_GUEST=$container_cross_as" >> \ - $config_target_mak - fi - if test -n "$container_cross_ld"; then - echo "DOCKER_CROSS_LD_GUEST=$container_cross_ld" >> \ - $config_target_mak - fi - fi - done - fi -done diff --git a/tests/tcg/cris/Makefile.target b/tests/tcg/cris/Makefile.target index e72d3cbdb2..372287bd03 100644 --- a/tests/tcg/cris/Makefile.target +++ b/tests/tcg/cris/Makefile.target @@ -56,4 +56,4 @@ SIMG:=cris-axis-linux-gnu-run # e.g.: make -f ../../tests/tcg/Makefile run-check_orm-on-sim run-%-on-sim: - $(call run-test, $<, $(SIMG) $<, "$< on $(TARGET_NAME) with SIM") + $(call run-test, $<, $(SIMG) $<) diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target index 050cd61c1a..96a4d7a614 100644 --- a/tests/tcg/hexagon/Makefile.target +++ b/tests/tcg/hexagon/Makefile.target @@ -1,5 +1,5 @@ ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -28,16 +28,25 @@ first: $(HEX_SRC)/first.S $(CC) -static -mv67 -nostdlib $^ -o $@ HEX_TESTS = first +HEX_TESTS += hex_sigsegv HEX_TESTS += misc +HEX_TESTS += usr HEX_TESTS += preg_alias HEX_TESTS += dual_stores HEX_TESTS += multi_result HEX_TESTS += mem_noshuf +HEX_TESTS += mem_noshuf_exception HEX_TESTS += circ HEX_TESTS += brev HEX_TESTS += load_unpack HEX_TESTS += load_align HEX_TESTS += atomics HEX_TESTS += fpstuff +HEX_TESTS += overflow TESTS += $(HEX_TESTS) + +# This test has to be compiled for the -mv67t target +usr: usr.c + $(CC) $(CFLAGS) -mv67t -O2 -Wno-inline-asm -Wno-expansion-to-defined $< -o $@ $(LDFLAGS) + diff --git a/tests/tcg/hexagon/circ.c b/tests/tcg/hexagon/circ.c index 67a1aa3054..354416eb6d 100644 --- a/tests/tcg/hexagon/circ.c +++ b/tests/tcg/hexagon/circ.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -415,7 +415,8 @@ static void circ_test_v3(void) { int *p = wbuf; int size = 15; - int K = 4; /* 64 bytes */ + /* set high bit in K to test unsigned extract in fcirc */ + int K = 8; /* 1024 bytes */ int element; int i; diff --git a/tests/tcg/hexagon/float_convd.ref b/tests/tcg/hexagon/float_convd.ref new file mode 100644 index 0000000000..aba1e13e35 --- /dev/null +++ b/tests/tcg/hexagon/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.3d5054450ed000000000p-1023:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.5731f750864200000000p-1023:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.3d5054450ed000000000p-1023:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.5731f750864200000000p-1023:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.3d5054450ed000000000p-1023:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.5731f750864200000000p-1023:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.3d5054450ed000000000p-1023:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.5731f750864200000000p-1023:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(-nan:0xffffffff) (OK) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(-nan:0xffffffff) (INVALID) + to int32: -1 (INVALID) + to int64: -1 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) diff --git a/tests/tcg/hexagon/float_convs.ref b/tests/tcg/hexagon/float_convs.ref index 9ec9ffc08d..a5505c337b 100644 --- a/tests/tcg/hexagon/float_convs.ref +++ b/tests/tcg/hexagon/float_convs.ref @@ -18,31 +18,31 @@ from single: f32(-inf:0xff800000) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) - to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (INEXACT ) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) - to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) - to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) - to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (INEXACT ) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) - to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (INEXACT ) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) @@ -72,19 +72,19 @@ from single: f32(0x1.00000000000000000000p-25:0x33000000) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) - to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) - to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.00000c00000000000000p-14:0x38800006) - to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) @@ -96,7 +96,7 @@ from single: f32(0x1.00000000000000000000p+0:0x3f800000) to uint32: 1 (OK) to uint64: 1 (OK) from single: f32(0x1.00400000000000000000p+0:0x3f802000) - to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (INEXACT ) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) to int32: 1 (INEXACT ) to int64: 1 (INEXACT ) to uint32: 1 (INEXACT ) @@ -108,61 +108,61 @@ from single: f32(0x1.00000000000000000000p+1:0x40000000) to uint32: 2 (OK) to uint64: 2 (OK) from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) - to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) to int32: 2 (INEXACT ) to int64: 2 (INEXACT ) to uint32: 2 (INEXACT ) to uint64: 2 (INEXACT ) from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) - to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) to int32: 3 (INEXACT ) to int64: 3 (INEXACT ) to uint32: 3 (INEXACT ) to uint64: 3 (INEXACT ) from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) - to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (INEXACT ) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) to int32: 65503 (OK) to int64: 65503 (OK) to uint32: 65503 (OK) to uint64: 65503 (OK) from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) - to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) to int32: 65504 (OK) to int64: 65504 (OK) to uint32: 65504 (OK) to uint64: 65504 (OK) from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) - to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (INEXACT ) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) to int32: 65505 (OK) to int64: 65505 (OK) to uint32: 65505 (OK) to uint64: 65505 (OK) from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) - to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (INEXACT ) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) to int32: 131007 (OK) to int64: 131007 (OK) to uint32: 131007 (OK) to uint64: 131007 (OK) from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) - to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) to int32: 131008 (OK) to int64: 131008 (OK) to uint32: 131008 (OK) to uint64: 131008 (OK) from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) - to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (INEXACT ) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) to int32: 131009 (OK) to int64: 131009 (OK) to uint32: 131009 (OK) to uint64: 131009 (OK) from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) - to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (INEXACT ) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) to uint64: -1 (INVALID) from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) - to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (INEXACT ) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) @@ -205,31 +205,31 @@ from single: f32(-inf:0xff800000) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) - to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (INEXACT ) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) - to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) - to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) - to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (INEXACT ) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) - to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (INEXACT ) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) @@ -259,19 +259,19 @@ from single: f32(0x1.00000000000000000000p-25:0x33000000) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) - to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) - to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.00000c00000000000000p-14:0x38800006) - to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) @@ -283,7 +283,7 @@ from single: f32(0x1.00000000000000000000p+0:0x3f800000) to uint32: 1 (OK) to uint64: 1 (OK) from single: f32(0x1.00400000000000000000p+0:0x3f802000) - to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (INEXACT ) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) to int32: 1 (INEXACT ) to int64: 1 (INEXACT ) to uint32: 1 (INEXACT ) @@ -295,61 +295,61 @@ from single: f32(0x1.00000000000000000000p+1:0x40000000) to uint32: 2 (OK) to uint64: 2 (OK) from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) - to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) to int32: 2 (INEXACT ) to int64: 2 (INEXACT ) to uint32: 2 (INEXACT ) to uint64: 2 (INEXACT ) from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) - to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) to int32: 3 (INEXACT ) to int64: 3 (INEXACT ) to uint32: 3 (INEXACT ) to uint64: 3 (INEXACT ) from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) - to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (INEXACT ) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) to int32: 65503 (OK) to int64: 65503 (OK) to uint32: 65503 (OK) to uint64: 65503 (OK) from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) - to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) to int32: 65504 (OK) to int64: 65504 (OK) to uint32: 65504 (OK) to uint64: 65504 (OK) from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) - to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (INEXACT ) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) to int32: 65505 (OK) to int64: 65505 (OK) to uint32: 65505 (OK) to uint64: 65505 (OK) from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) - to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (INEXACT ) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) to int32: 131007 (OK) to int64: 131007 (OK) to uint32: 131007 (OK) to uint64: 131007 (OK) from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) - to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) to int32: 131008 (OK) to int64: 131008 (OK) to uint32: 131008 (OK) to uint64: 131008 (OK) from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) - to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (INEXACT ) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) to int32: 131009 (OK) to int64: 131009 (OK) to uint32: 131009 (OK) to uint64: 131009 (OK) from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) - to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (INEXACT ) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) to uint64: -1 (INVALID) from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) - to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (INEXACT ) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) @@ -392,31 +392,31 @@ from single: f32(-inf:0xff800000) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) - to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (INEXACT ) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) - to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) - to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) - to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (INEXACT ) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) - to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (INEXACT ) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) @@ -446,19 +446,19 @@ from single: f32(0x1.00000000000000000000p-25:0x33000000) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) - to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) - to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.00000c00000000000000p-14:0x38800006) - to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) @@ -470,7 +470,7 @@ from single: f32(0x1.00000000000000000000p+0:0x3f800000) to uint32: 1 (OK) to uint64: 1 (OK) from single: f32(0x1.00400000000000000000p+0:0x3f802000) - to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (INEXACT ) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) to int32: 1 (INEXACT ) to int64: 1 (INEXACT ) to uint32: 1 (INEXACT ) @@ -482,61 +482,61 @@ from single: f32(0x1.00000000000000000000p+1:0x40000000) to uint32: 2 (OK) to uint64: 2 (OK) from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) - to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) to int32: 2 (INEXACT ) to int64: 2 (INEXACT ) to uint32: 2 (INEXACT ) to uint64: 2 (INEXACT ) from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) - to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) to int32: 3 (INEXACT ) to int64: 3 (INEXACT ) to uint32: 3 (INEXACT ) to uint64: 3 (INEXACT ) from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) - to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (INEXACT ) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) to int32: 65503 (OK) to int64: 65503 (OK) to uint32: 65503 (OK) to uint64: 65503 (OK) from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) - to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) to int32: 65504 (OK) to int64: 65504 (OK) to uint32: 65504 (OK) to uint64: 65504 (OK) from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) - to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (INEXACT ) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) to int32: 65505 (OK) to int64: 65505 (OK) to uint32: 65505 (OK) to uint64: 65505 (OK) from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) - to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (INEXACT ) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) to int32: 131007 (OK) to int64: 131007 (OK) to uint32: 131007 (OK) to uint64: 131007 (OK) from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) - to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) to int32: 131008 (OK) to int64: 131008 (OK) to uint32: 131008 (OK) to uint64: 131008 (OK) from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) - to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (INEXACT ) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) to int32: 131009 (OK) to int64: 131009 (OK) to uint32: 131009 (OK) to uint64: 131009 (OK) from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) - to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (INEXACT ) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) to uint64: -1 (INVALID) from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) - to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (INEXACT ) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) @@ -579,31 +579,31 @@ from single: f32(-inf:0xff800000) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) - to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (INEXACT ) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) - to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) - to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) to int32: -2147483648 (INVALID) to int64: -9223372036854775808 (INVALID) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) - to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (INEXACT ) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) to uint64: 0 (INVALID) from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) - to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (INEXACT ) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INVALID) @@ -633,19 +633,19 @@ from single: f32(0x1.00000000000000000000p-25:0x33000000) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) - to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) - to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) to uint64: 0 (INEXACT ) from single: f32(0x1.00000c00000000000000p-14:0x38800006) - to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) to int32: 0 (INEXACT ) to int64: 0 (INEXACT ) to uint32: 0 (INEXACT ) @@ -657,7 +657,7 @@ from single: f32(0x1.00000000000000000000p+0:0x3f800000) to uint32: 1 (OK) to uint64: 1 (OK) from single: f32(0x1.00400000000000000000p+0:0x3f802000) - to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (INEXACT ) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) to int32: 1 (INEXACT ) to int64: 1 (INEXACT ) to uint32: 1 (INEXACT ) @@ -669,61 +669,61 @@ from single: f32(0x1.00000000000000000000p+1:0x40000000) to uint32: 2 (OK) to uint64: 2 (OK) from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) - to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) to int32: 2 (INEXACT ) to int64: 2 (INEXACT ) to uint32: 2 (INEXACT ) to uint64: 2 (INEXACT ) from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) - to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) to int32: 3 (INEXACT ) to int64: 3 (INEXACT ) to uint32: 3 (INEXACT ) to uint64: 3 (INEXACT ) from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) - to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (INEXACT ) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) to int32: 65503 (OK) to int64: 65503 (OK) to uint32: 65503 (OK) to uint64: 65503 (OK) from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) - to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) to int32: 65504 (OK) to int64: 65504 (OK) to uint32: 65504 (OK) to uint64: 65504 (OK) from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) - to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (INEXACT ) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) to int32: 65505 (OK) to int64: 65505 (OK) to uint32: 65505 (OK) to uint64: 65505 (OK) from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) - to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (INEXACT ) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) to int32: 131007 (OK) to int64: 131007 (OK) to uint32: 131007 (OK) to uint64: 131007 (OK) from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) - to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (INEXACT ) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) to int32: 131008 (OK) to int64: 131008 (OK) to uint32: 131008 (OK) to uint64: 131008 (OK) from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) - to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (INEXACT ) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) to int32: 131009 (OK) to int64: 131009 (OK) to uint32: 131009 (OK) to uint64: 131009 (OK) from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) - to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (INEXACT ) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) to uint64: -1 (INVALID) from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) - to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (INEXACT ) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) to int32: 2147483647 (INVALID) to int64: 9223372036854775807 (INVALID) to uint32: -1 (INVALID) diff --git a/tests/tcg/hexagon/float_madds.ref b/tests/tcg/hexagon/float_madds.ref index ceed3bbbfb..a08c616057 100644 --- a/tests/tcg/hexagon/float_madds.ref +++ b/tests/tcg/hexagon/float_madds.ref @@ -44,7 +44,7 @@ res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) -res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (7/1) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) @@ -90,15 +90,15 @@ res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/0) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) -res: f32(0x1.80400000000000000000p+1:0x40402000) flags=INEXACT (15/1) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/2) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) -res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=INEXACT (16/0) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=INEXACT (16/1) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) @@ -236,7 +236,7 @@ res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (7/0) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) -res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (7/1) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) @@ -282,15 +282,15 @@ res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/1) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/0) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) -res: f32(0x1.80400000000000000000p+1:0x40402000) flags=INEXACT (15/1) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/2) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) -res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=INEXACT (16/0) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=INEXACT (16/1) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) res: f32(0x1.2e23d400000000000000p+2:0x409711ea) flags=INEXACT (16/2) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) @@ -428,7 +428,7 @@ res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) -res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (7/1) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) @@ -474,15 +474,15 @@ res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/0) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) -res: f32(0x1.80400000000000000000p+1:0x40402000) flags=INEXACT (15/1) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/2) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) -res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=INEXACT (16/0) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=INEXACT (16/1) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) @@ -620,7 +620,7 @@ res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) -res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (7/1) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) @@ -666,15 +666,15 @@ res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/0) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) -res: f32(0x1.80400000000000000000p+1:0x40402000) flags=INEXACT (15/1) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.80200000000000000000p+1:0x40401000) flags=INEXACT (15/2) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) -res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=INEXACT (16/0) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) -res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=INEXACT (16/1) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) diff --git a/tests/tcg/hexagon/fpstuff.c b/tests/tcg/hexagon/fpstuff.c index 0dff429f4c..56bf562a40 100644 --- a/tests/tcg/hexagon/fpstuff.c +++ b/tests/tcg/hexagon/fpstuff.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2020-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2020-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,8 +38,11 @@ const int SF_NaN_special = 0x7f800001; const int SF_ANY = 0x3f800000; const int SF_HEX_NAN = 0xffffffff; const int SF_small_neg = 0xab98fba8; +const int SF_denorm = 0x00000001; +const int SF_random = 0x346001d6; -const long long DF_NaN = 0x7ff8000000000000ULL; +const long long DF_QNaN = 0x7ff8000000000000ULL; +const long long DF_SNaN = 0x7ff7000000000000ULL; const long long DF_ANY = 0x3f80000000000000ULL; const long long DF_HEX_NAN = 0xffffffffffffffffULL; const long long DF_small_neg = 0xbd731f7500000000ULL; @@ -126,7 +129,7 @@ static void check_compare_exception(void) "p0 = dfcmp.eq(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -135,7 +138,7 @@ static void check_compare_exception(void) "p0 = dfcmp.gt(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -144,7 +147,7 @@ static void check_compare_exception(void) "p0 = dfcmp.ge(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -206,7 +209,7 @@ static void check_dfminmax(void) int usr; /* - * Execute dfmin/dfmax instructions with one operand as NaN + * Execute dfmin/dfmax instructions with one operand as SNaN * Check that * Result is the other operand * Invalid bit in USR is set @@ -214,7 +217,7 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_ANY) : "r2", "usr"); check64(minmax, DF_ANY); check_fpstatus(usr, FPINVF); @@ -222,13 +225,35 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_ANY) : "r2", "usr"); check64(minmax, DF_ANY); check_fpstatus(usr, FPINVF); /* - * Execute dfmin/dfmax instructions with both operands NaN + * Execute dfmin/dfmax instructions with one operand as QNaN + * Check that + * Result is the other operand + * No bit in USR is set + */ + asm (CLEAR_FPSTATUS + "%0 = dfmin(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "r2", "usr"); + check64(minmax, DF_ANY); + check_fpstatus(usr, 0); + + asm (CLEAR_FPSTATUS + "%0 = dfmax(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "r2", "usr"); + check64(minmax, DF_ANY); + check_fpstatus(usr, 0); + + /* + * Execute dfmin/dfmax instructions with both operands SNaN * Check that * Result is DF_HEX_NAN * Invalid bit in USR is set @@ -236,7 +261,7 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_NaN), "r"(DF_NaN) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_SNaN) : "r2", "usr"); check64(minmax, DF_HEX_NAN); check_fpstatus(usr, FPINVF); @@ -244,16 +269,39 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_NaN), "r"(DF_NaN) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_SNaN) : "r2", "usr"); check64(minmax, DF_HEX_NAN); check_fpstatus(usr, FPINVF); + + /* + * Execute dfmin/dfmax instructions with both operands QNaN + * Check that + * Result is DF_HEX_NAN + * No bit in USR is set + */ + asm (CLEAR_FPSTATUS + "%0 = dfmin(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_QNaN) + : "r2", "usr"); + check64(minmax, DF_HEX_NAN); + check_fpstatus(usr, 0); + + asm (CLEAR_FPSTATUS + "%0 = dfmax(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_QNaN) + : "r2", "usr"); + check64(minmax, DF_HEX_NAN); + check_fpstatus(usr, 0); } -static void check_recip_exception(void) +static void check_sfrecipa(void) { int result; int usr; + int pred; /* * Check that sfrecipa doesn't set status bits when @@ -329,6 +377,17 @@ static void check_recip_exception(void) : "r2", "p0", "usr"); check32(result, 0x3f800000); check_fpstatus(usr, 0); + + /* + * Check that sfrecipa properly handles denorm + */ + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = p0\n\t" + : "=r"(result), "=r"(pred) : "r"(SF_denorm), "r"(SF_random) + : "p0", "usr"); + check32(result, 0x6a920001); + check32(pred, 0x80); } static void check_canonical_NaN(void) @@ -411,7 +470,7 @@ static void check_canonical_NaN(void) asm(CLEAR_FPSTATUS "%0 = convert_df2sf(%2)\n\t" "%1 = usr\n\t" - : "=r"(sf_result), "=r"(usr) : "r"(DF_NaN) + : "=r"(sf_result), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check32(sf_result, SF_HEX_NAN); check_fpstatus(usr, 0); @@ -419,7 +478,7 @@ static void check_canonical_NaN(void) asm(CLEAR_FPSTATUS "%0 = dfadd(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(df_result), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) : "r2", "usr"); check64(df_result, DF_HEX_NAN); check_fpstatus(usr, 0); @@ -427,7 +486,7 @@ static void check_canonical_NaN(void) asm(CLEAR_FPSTATUS "%0 = dfsub(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(df_result), "=r"(usr) : "r"(DF_NaN), "r"(DF_ANY) + : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) : "r2", "usr"); check64(df_result, DF_HEX_NAN); check_fpstatus(usr, 0); @@ -455,6 +514,28 @@ static void check_invsqrta(void) check32(predval, 0x0); } +static void check_sffixupn(void) +{ + int result; + + /* Check that sffixupn properly deals with denorm */ + asm volatile("%0 = sffixupn(%1, %2)\n\t" + : "=r"(result) + : "r"(SF_random), "r"(SF_denorm)); + check32(result, 0x246001d6); +} + +static void check_sffixupd(void) +{ + int result; + + /* Check that sffixupd properly deals with denorm */ + asm volatile("%0 = sffixupd(%1, %2)\n\t" + : "=r"(result) + : "r"(SF_denorm), "r"(SF_random)); + check32(result, 0x146001d6); +} + static void check_float2int_convs() { int res32; @@ -567,7 +648,7 @@ static void check_float2int_convs() asm(CLEAR_FPSTATUS "%0 = convert_df2w(%2)\n\t" "%1 = usr\n\t" - : "=r"(res32), "=r"(usr) : "r"(DF_NaN) + : "=r"(res32), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check32(res32, -1); check_fpstatus(usr, FPINVF); @@ -575,7 +656,7 @@ static void check_float2int_convs() asm(CLEAR_FPSTATUS "%0 = convert_df2w(%2):chop\n\t" "%1 = usr\n\t" - : "=r"(res32), "=r"(usr) : "r"(DF_NaN) + : "=r"(res32), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check32(res32, -1); check_fpstatus(usr, FPINVF); @@ -583,7 +664,7 @@ static void check_float2int_convs() asm(CLEAR_FPSTATUS "%0 = convert_df2d(%2)\n\t" "%1 = usr\n\t" - : "=r"(res64), "=r"(usr) : "r"(DF_NaN) + : "=r"(res64), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check64(res64, -1); check_fpstatus(usr, FPINVF); @@ -591,7 +672,7 @@ static void check_float2int_convs() asm(CLEAR_FPSTATUS "%0 = convert_df2d(%2):chop\n\t" "%1 = usr\n\t" - : "=r"(res64), "=r"(usr) : "r"(DF_NaN) + : "=r"(res64), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check64(res64, -1); check_fpstatus(usr, FPINVF); @@ -602,9 +683,11 @@ int main() check_compare_exception(); check_sfminmax(); check_dfminmax(); - check_recip_exception(); + check_sfrecipa(); check_canonical_NaN(); check_invsqrta(); + check_sffixupn(); + check_sffixupd(); check_float2int_convs(); puts(err ? "FAIL" : "PASS"); diff --git a/tests/tcg/hexagon/hex_sigsegv.c b/tests/tcg/hexagon/hex_sigsegv.c new file mode 100644 index 0000000000..dc2b349257 --- /dev/null +++ b/tests/tcg/hexagon/hex_sigsegv.c @@ -0,0 +1,106 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Test the VLIW semantics of two stores in a packet + * + * When a packet has 2 stores, either both commit or neither commit. + * We test this with a packet that does stores to both NULL and a global + * variable, "should_not_change". After the SIGSEGV is caught, we check + * that the "should_not_change" value is the same. + */ + +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char uint8_t; + +int err; +int segv_caught; + +#define SHOULD_NOT_CHANGE_VAL 5 +int should_not_change = SHOULD_NOT_CHANGE_VAL; + +#define BUF_SIZE 300 +unsigned char buf[BUF_SIZE]; + + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static void __chk_error(const char *filename, int line, int ret) +{ + if (ret < 0) { + printf("ERROR %s:%d - %d\n", filename, line, ret); + err++; + } +} + +#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) + +jmp_buf jmp_env; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + check(sig, SIGSEGV); + segv_caught = 1; + longjmp(jmp_env, 1); +} + +int main() +{ + struct sigaction act; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + chk_error(sigaction(SIGSEGV, &act, NULL)); + if (setjmp(jmp_env) == 0) { + asm volatile("r18 = ##should_not_change\n\t" + "r19 = #0\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " memw(r19) = #0\n\t" + "}\n\t" + : : : "r18", "r19", "memory"); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + chk_error(sigaction(SIGSEGV, &act, NULL)); + + check(segv_caught, 1); + check(should_not_change, SHOULD_NOT_CHANGE_VAL); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/hvx_histogram.c b/tests/tcg/hexagon/hvx_histogram.c new file mode 100644 index 0000000000..43377a9abb --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram.c @@ -0,0 +1,88 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include "hvx_histogram_row.h" + +const int vector_len = 128; +const int width = 275; +const int height = 20; +const int stride = (width + vector_len - 1) & -vector_len; + +int err; + +static uint8_t input[height][stride] __attribute__((aligned(128))) = { +#include "hvx_histogram_input.h" +}; + +static int result[256] __attribute__((aligned(128))); +static int expect[256] __attribute__((aligned(128))); + +static void check(void) +{ + for (int i = 0; i < 256; i++) { + int res = result[i]; + int exp = expect[i]; + if (res != exp) { + printf("ERROR at %3d: 0x%04x != 0x%04x\n", + i, res, exp); + err++; + } + } +} + +static void ref_histogram(uint8_t *src, int stride, int width, int height, + int *hist) +{ + for (int i = 0; i < 256; i++) { + hist[i] = 0; + } + + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + hist[src[i * stride + j]]++; + } + } +} + +static void hvx_histogram(uint8_t *src, int stride, int width, int height, + int *hist) +{ + int n = 8192 / width; + + for (int i = 0; i < 256; i++) { + hist[i] = 0; + } + + for (int i = 0; i < height; i += n) { + int k = height - i > n ? n : height - i; + hvx_histogram_row(src, stride, width, k, hist); + src += n * stride; + } +} + +int main() +{ + ref_histogram(&input[0][0], stride, width, height, expect); + hvx_histogram(&input[0][0], stride, width, height, result); + check(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/hvx_histogram_input.h b/tests/tcg/hexagon/hvx_histogram_input.h new file mode 100644 index 0000000000..2f9109255e --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_input.h @@ -0,0 +1,717 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + { 0x26, 0x32, 0x2e, 0x2e, 0x2d, 0x2c, 0x2d, 0x2d, + 0x2c, 0x2e, 0x31, 0x33, 0x36, 0x39, 0x3b, 0x3f, + 0x42, 0x46, 0x4a, 0x4c, 0x51, 0x53, 0x53, 0x54, + 0x56, 0x57, 0x58, 0x57, 0x56, 0x52, 0x51, 0x4f, + 0x4c, 0x49, 0x47, 0x42, 0x3e, 0x3b, 0x38, 0x35, + 0x33, 0x30, 0x2e, 0x2c, 0x2b, 0x2a, 0x2a, 0x28, + 0x28, 0x27, 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2e, + 0x2f, 0x33, 0x36, 0x38, 0x3c, 0x3d, 0x40, 0x42, + 0x43, 0x42, 0x43, 0x44, 0x43, 0x41, 0x40, 0x3b, + 0x3b, 0x3a, 0x38, 0x35, 0x32, 0x2f, 0x2c, 0x29, + 0x27, 0x26, 0x23, 0x21, 0x1e, 0x1c, 0x1a, 0x19, + 0x17, 0x15, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0d, 0x0e, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0d, 0x0c, 0x0f, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x12, 0x14, 0x16, 0x17, 0x19, + 0x1c, 0x1d, 0x21, 0x25, 0x27, 0x29, 0x2b, 0x2f, + 0x31, 0x33, 0x36, 0x38, 0x39, 0x3a, 0x3b, 0x3c, + 0x3c, 0x3d, 0x3e, 0x3e, 0x3c, 0x3b, 0x3a, 0x39, + 0x39, 0x3a, 0x3a, 0x3a, 0x3a, 0x3c, 0x3e, 0x43, + 0x47, 0x4a, 0x4d, 0x51, 0x51, 0x54, 0x56, 0x56, + 0x57, 0x56, 0x53, 0x4f, 0x4b, 0x47, 0x43, 0x41, + 0x3e, 0x3c, 0x3a, 0x37, 0x36, 0x33, 0x32, 0x34, + 0x34, 0x34, 0x34, 0x35, 0x36, 0x39, 0x3d, 0x3d, + 0x3f, 0x40, 0x40, 0x40, 0x40, 0x3e, 0x40, 0x40, + 0x42, 0x44, 0x47, 0x48, 0x4b, 0x4e, 0x56, 0x5c, + 0x62, 0x68, 0x6f, 0x73, 0x76, 0x79, 0x7a, 0x7c, + 0x7e, 0x7c, 0x78, 0x72, 0x6e, 0x69, 0x65, 0x60, + 0x5b, 0x56, 0x52, 0x4d, 0x4a, 0x48, 0x47, 0x46, + 0x44, 0x43, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, + 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3b, 0x38, 0x37, + 0x36, 0x35, 0x36, 0x35, 0x36, 0x37, 0x38, 0x3c, + 0x3d, 0x3f, 0x42, 0x44, 0x46, 0x48, 0x4b, 0x4c, + 0x4e, 0x4e, 0x4d, 0x4c, 0x4a, 0x48, 0x49, 0x49, + 0x4b, 0x4d, 0x4e, }, + { 0x23, 0x2d, 0x29, 0x29, 0x28, 0x28, 0x29, 0x29, + 0x28, 0x2b, 0x2d, 0x2f, 0x32, 0x34, 0x36, 0x3a, + 0x3d, 0x41, 0x44, 0x47, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x51, 0x51, 0x4f, 0x4c, 0x4b, 0x48, + 0x46, 0x44, 0x40, 0x3d, 0x39, 0x36, 0x34, 0x30, + 0x2f, 0x2d, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, + 0x25, 0x24, 0x24, 0x24, 0x26, 0x28, 0x28, 0x2a, + 0x2b, 0x2e, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3d, 0x3e, 0x3e, 0x3e, 0x3c, 0x3b, 0x38, + 0x37, 0x35, 0x33, 0x30, 0x2e, 0x2b, 0x27, 0x25, + 0x24, 0x21, 0x20, 0x1d, 0x1b, 0x1a, 0x18, 0x16, + 0x15, 0x14, 0x13, 0x12, 0x10, 0x11, 0x10, 0x0e, + 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, 0x0c, 0x0b, + 0x0b, 0x0b, 0x0c, 0x0b, 0x0b, 0x09, 0x0a, 0x0b, + 0x0b, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0d, 0x0e, + 0x0e, 0x0f, 0x0f, 0x11, 0x12, 0x15, 0x15, 0x17, + 0x1a, 0x1c, 0x1f, 0x22, 0x25, 0x26, 0x29, 0x2a, + 0x2d, 0x30, 0x33, 0x34, 0x35, 0x35, 0x37, 0x37, + 0x39, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x36, 0x37, + 0x35, 0x36, 0x35, 0x35, 0x36, 0x37, 0x3a, 0x3e, + 0x40, 0x43, 0x48, 0x49, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x4f, 0x4c, 0x48, 0x45, 0x41, 0x3e, 0x3b, + 0x3a, 0x37, 0x36, 0x33, 0x32, 0x31, 0x30, 0x31, + 0x32, 0x31, 0x31, 0x31, 0x31, 0x34, 0x37, 0x38, + 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3b, 0x3d, 0x3e, + 0x3f, 0x40, 0x43, 0x44, 0x47, 0x4b, 0x4f, 0x56, + 0x5a, 0x60, 0x66, 0x69, 0x6a, 0x6e, 0x71, 0x72, + 0x73, 0x72, 0x6d, 0x69, 0x66, 0x60, 0x5c, 0x59, + 0x54, 0x50, 0x4d, 0x48, 0x46, 0x44, 0x44, 0x43, + 0x42, 0x41, 0x41, 0x40, 0x3f, 0x3f, 0x3e, 0x3d, + 0x3d, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x35, 0x35, + 0x34, 0x34, 0x35, 0x34, 0x35, 0x36, 0x39, 0x3c, + 0x3d, 0x3e, 0x41, 0x43, 0x44, 0x46, 0x48, 0x49, + 0x4a, 0x49, 0x48, 0x47, 0x45, 0x43, 0x43, 0x44, + 0x45, 0x47, 0x48, }, + { 0x23, 0x2d, 0x2a, 0x2a, 0x29, 0x29, 0x2a, 0x2a, + 0x29, 0x2c, 0x2d, 0x2f, 0x32, 0x34, 0x36, 0x3a, + 0x3d, 0x40, 0x44, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x51, 0x51, 0x4f, 0x4c, 0x4b, 0x48, + 0x46, 0x44, 0x40, 0x3d, 0x39, 0x36, 0x34, 0x30, + 0x2f, 0x2d, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, + 0x25, 0x24, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2a, + 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3e, 0x3d, 0x3e, 0x3c, 0x3c, 0x3a, + 0x37, 0x35, 0x33, 0x30, 0x2f, 0x2b, 0x28, 0x26, + 0x24, 0x21, 0x20, 0x1e, 0x1c, 0x1b, 0x18, 0x17, + 0x16, 0x14, 0x13, 0x12, 0x10, 0x10, 0x0f, 0x0e, + 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0b, 0x0b, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0c, + 0x0c, 0x0b, 0x0b, 0x0c, 0x0d, 0x0c, 0x0e, 0x0e, + 0x0e, 0x0f, 0x11, 0x11, 0x13, 0x14, 0x16, 0x18, + 0x1a, 0x1d, 0x1f, 0x22, 0x25, 0x26, 0x29, 0x2b, + 0x2d, 0x31, 0x33, 0x34, 0x36, 0x37, 0x38, 0x38, + 0x39, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x37, 0x37, + 0x35, 0x36, 0x35, 0x36, 0x35, 0x38, 0x3a, 0x3e, + 0x40, 0x41, 0x45, 0x47, 0x49, 0x4a, 0x4c, 0x4d, + 0x4e, 0x4d, 0x4a, 0x47, 0x44, 0x40, 0x3d, 0x3b, + 0x39, 0x37, 0x34, 0x34, 0x32, 0x31, 0x31, 0x33, + 0x32, 0x31, 0x32, 0x33, 0x32, 0x36, 0x38, 0x39, + 0x3b, 0x3c, 0x3c, 0x3c, 0x3d, 0x3d, 0x3e, 0x3e, + 0x41, 0x42, 0x43, 0x45, 0x48, 0x4c, 0x50, 0x56, + 0x5b, 0x5f, 0x62, 0x67, 0x69, 0x6c, 0x6e, 0x6e, + 0x70, 0x6f, 0x6b, 0x67, 0x63, 0x5e, 0x5b, 0x58, + 0x54, 0x51, 0x4e, 0x4a, 0x48, 0x46, 0x46, 0x46, + 0x45, 0x46, 0x44, 0x43, 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3f, 0x3e, 0x3c, 0x3b, 0x3a, 0x39, + 0x39, 0x39, 0x38, 0x37, 0x37, 0x3a, 0x3e, 0x40, + 0x42, 0x43, 0x47, 0x47, 0x48, 0x4a, 0x4b, 0x4c, + 0x4c, 0x4b, 0x4a, 0x48, 0x46, 0x44, 0x43, 0x45, + 0x45, 0x46, 0x47, }, + { 0x21, 0x2b, 0x28, 0x28, 0x28, 0x28, 0x29, 0x29, + 0x28, 0x2a, 0x2d, 0x30, 0x32, 0x34, 0x37, 0x3a, + 0x3c, 0x40, 0x44, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x48, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2b, 0x2a, 0x28, 0x27, 0x26, 0x25, + 0x24, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2d, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3e, 0x3e, 0x3e, 0x3d, 0x3c, 0x3a, + 0x37, 0x35, 0x33, 0x30, 0x2f, 0x2b, 0x28, 0x26, + 0x25, 0x21, 0x20, 0x1e, 0x1c, 0x19, 0x19, 0x18, + 0x17, 0x15, 0x15, 0x12, 0x11, 0x11, 0x11, 0x0f, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x13, 0x13, 0x15, 0x16, 0x18, + 0x1a, 0x1c, 0x1f, 0x22, 0x25, 0x28, 0x29, 0x2d, + 0x2f, 0x32, 0x34, 0x35, 0x36, 0x37, 0x38, 0x38, + 0x39, 0x3a, 0x39, 0x39, 0x37, 0x36, 0x37, 0x36, + 0x35, 0x35, 0x37, 0x35, 0x36, 0x37, 0x3a, 0x3d, + 0x3e, 0x41, 0x43, 0x46, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x49, 0x47, 0x45, 0x42, 0x3f, 0x3d, 0x3b, + 0x3a, 0x38, 0x36, 0x34, 0x32, 0x32, 0x32, 0x32, + 0x32, 0x31, 0x33, 0x32, 0x34, 0x37, 0x38, 0x38, + 0x3a, 0x3b, 0x3d, 0x3d, 0x3d, 0x3e, 0x3f, 0x41, + 0x42, 0x44, 0x44, 0x46, 0x49, 0x4d, 0x50, 0x54, + 0x58, 0x5c, 0x61, 0x63, 0x65, 0x69, 0x6a, 0x6c, + 0x6d, 0x6c, 0x68, 0x64, 0x61, 0x5c, 0x59, 0x57, + 0x53, 0x51, 0x4f, 0x4c, 0x4a, 0x48, 0x48, 0x49, + 0x49, 0x48, 0x48, 0x48, 0x47, 0x47, 0x46, 0x46, + 0x45, 0x44, 0x42, 0x41, 0x3f, 0x3e, 0x3c, 0x3c, + 0x3c, 0x3d, 0x3c, 0x3c, 0x3c, 0x3e, 0x41, 0x43, + 0x46, 0x48, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4e, + 0x4e, 0x4d, 0x4b, 0x49, 0x47, 0x44, 0x44, 0x45, + 0x45, 0x45, 0x46, }, + { 0x22, 0x2b, 0x27, 0x27, 0x27, 0x27, 0x28, 0x28, + 0x28, 0x2a, 0x2c, 0x2f, 0x30, 0x34, 0x37, 0x3b, + 0x3d, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x47, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2b, 0x2a, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3a, 0x3b, + 0x3d, 0x3e, 0x3e, 0x3f, 0x3f, 0x3d, 0x3c, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2e, 0x2c, 0x29, 0x26, + 0x25, 0x22, 0x20, 0x1e, 0x1c, 0x1a, 0x19, 0x18, + 0x16, 0x15, 0x14, 0x12, 0x10, 0x11, 0x11, 0x0f, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0c, 0x0d, 0x0c, + 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0c, 0x0d, 0x0d, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x13, 0x13, 0x15, 0x15, 0x18, + 0x19, 0x1d, 0x1f, 0x21, 0x24, 0x27, 0x2a, 0x2c, + 0x30, 0x33, 0x35, 0x36, 0x37, 0x38, 0x39, 0x39, + 0x3a, 0x3a, 0x39, 0x39, 0x37, 0x36, 0x37, 0x36, + 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x39, 0x3a, + 0x3d, 0x3e, 0x41, 0x43, 0x43, 0x45, 0x46, 0x46, + 0x47, 0x46, 0x44, 0x42, 0x40, 0x3d, 0x3a, 0x39, + 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x32, 0x32, + 0x32, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3c, 0x3c, 0x3e, 0x3e, 0x3e, 0x41, 0x43, + 0x44, 0x45, 0x46, 0x48, 0x49, 0x4c, 0x51, 0x54, + 0x56, 0x5a, 0x5f, 0x61, 0x63, 0x65, 0x67, 0x69, + 0x6a, 0x69, 0x67, 0x61, 0x5f, 0x5b, 0x58, 0x56, + 0x54, 0x51, 0x50, 0x4e, 0x4c, 0x4a, 0x4b, 0x4c, + 0x4c, 0x4b, 0x4b, 0x4b, 0x4b, 0x49, 0x4a, 0x49, + 0x49, 0x48, 0x46, 0x44, 0x42, 0x41, 0x40, 0x3f, + 0x3f, 0x40, 0x40, 0x40, 0x40, 0x42, 0x46, 0x49, + 0x4b, 0x4c, 0x4f, 0x4f, 0x50, 0x52, 0x51, 0x51, + 0x50, 0x4f, 0x4c, 0x4a, 0x48, 0x46, 0x45, 0x44, + 0x44, 0x45, 0x46, }, + { 0x21, 0x2a, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, + 0x27, 0x29, 0x2d, 0x2f, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x48, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x2f, + 0x2f, 0x2d, 0x2a, 0x2a, 0x27, 0x26, 0x25, 0x24, + 0x22, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x31, 0x34, 0x37, 0x39, 0x3a, 0x3c, + 0x3d, 0x3e, 0x3f, 0x40, 0x3f, 0x3d, 0x3d, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2e, 0x2c, 0x29, 0x26, + 0x25, 0x22, 0x21, 0x1f, 0x1d, 0x1b, 0x19, 0x18, + 0x16, 0x14, 0x14, 0x13, 0x11, 0x11, 0x11, 0x0f, + 0x0f, 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, + 0x0d, 0x0d, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0c, + 0x0c, 0x0d, 0x0d, 0x0d, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x13, 0x13, 0x14, 0x15, 0x17, 0x19, + 0x1a, 0x1d, 0x1f, 0x22, 0x25, 0x27, 0x2a, 0x2e, + 0x31, 0x33, 0x35, 0x38, 0x39, 0x3a, 0x3b, 0x3b, + 0x3c, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x38, 0x37, + 0x36, 0x36, 0x37, 0x36, 0x37, 0x38, 0x38, 0x3a, + 0x3b, 0x3e, 0x40, 0x40, 0x41, 0x42, 0x43, 0x42, + 0x43, 0x42, 0x40, 0x40, 0x3f, 0x3c, 0x3b, 0x39, + 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x33, + 0x32, 0x32, 0x34, 0x35, 0x35, 0x36, 0x39, 0x39, + 0x3a, 0x3c, 0x3c, 0x3f, 0x40, 0x41, 0x43, 0x45, + 0x45, 0x47, 0x48, 0x4a, 0x4b, 0x4d, 0x50, 0x53, + 0x56, 0x59, 0x5c, 0x5f, 0x60, 0x65, 0x64, 0x66, + 0x68, 0x66, 0x64, 0x61, 0x5e, 0x5a, 0x59, 0x56, + 0x54, 0x52, 0x51, 0x50, 0x4e, 0x4c, 0x4d, 0x4f, + 0x4f, 0x4f, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, + 0x4c, 0x4b, 0x49, 0x47, 0x45, 0x44, 0x43, 0x43, + 0x42, 0x43, 0x44, 0x44, 0x46, 0x47, 0x49, 0x4d, + 0x4f, 0x51, 0x53, 0x54, 0x53, 0x54, 0x54, 0x53, + 0x53, 0x51, 0x4e, 0x4b, 0x4a, 0x47, 0x45, 0x44, + 0x44, 0x45, 0x46, }, + { 0x20, 0x28, 0x26, 0x26, 0x25, 0x24, 0x27, 0x27, + 0x27, 0x29, 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4a, 0x49, + 0x45, 0x43, 0x3f, 0x3c, 0x3a, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3f, 0x40, 0x3e, 0x3d, 0x3d, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2f, 0x2c, 0x29, 0x27, + 0x25, 0x21, 0x21, 0x1f, 0x1c, 0x1d, 0x19, 0x18, + 0x16, 0x15, 0x15, 0x13, 0x12, 0x11, 0x11, 0x0f, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0e, 0x0e, 0x0e, 0x0f, 0x10, + 0x10, 0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x1a, + 0x1c, 0x1d, 0x20, 0x22, 0x25, 0x27, 0x2a, 0x2e, + 0x30, 0x34, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3b, + 0x3c, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, + 0x36, 0x36, 0x38, 0x37, 0x37, 0x37, 0x38, 0x3a, + 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x40, 0x40, + 0x42, 0x40, 0x3f, 0x3e, 0x3d, 0x3b, 0x3a, 0x39, + 0x37, 0x36, 0x36, 0x35, 0x34, 0x34, 0x33, 0x33, + 0x33, 0x34, 0x35, 0x35, 0x35, 0x36, 0x38, 0x39, + 0x3a, 0x3b, 0x3d, 0x3f, 0x42, 0x43, 0x45, 0x45, + 0x46, 0x48, 0x49, 0x4b, 0x4b, 0x4d, 0x50, 0x53, + 0x56, 0x57, 0x5a, 0x5c, 0x5e, 0x61, 0x63, 0x65, + 0x66, 0x64, 0x62, 0x5f, 0x5c, 0x59, 0x58, 0x56, + 0x55, 0x54, 0x52, 0x51, 0x50, 0x51, 0x51, 0x52, + 0x52, 0x52, 0x52, 0x52, 0x51, 0x51, 0x51, 0x50, + 0x4f, 0x4e, 0x4c, 0x4a, 0x47, 0x46, 0x45, 0x45, + 0x45, 0x46, 0x46, 0x46, 0x4a, 0x4c, 0x4d, 0x52, + 0x54, 0x56, 0x58, 0x58, 0x56, 0x57, 0x57, 0x56, + 0x55, 0x53, 0x50, 0x4d, 0x49, 0x45, 0x44, 0x44, + 0x43, 0x44, 0x45, }, + { 0x1f, 0x27, 0x24, 0x23, 0x25, 0x24, 0x25, 0x26, + 0x26, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3d, 0x41, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x49, + 0x45, 0x43, 0x3f, 0x3c, 0x3a, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x25, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3e, 0x3f, 0x3f, 0x40, 0x3e, 0x3d, 0x3c, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x30, 0x2c, 0x29, 0x28, + 0x25, 0x23, 0x22, 0x1f, 0x1c, 0x1c, 0x18, 0x18, + 0x16, 0x14, 0x14, 0x13, 0x11, 0x11, 0x11, 0x0f, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0c, 0x0b, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0d, 0x0e, 0x0e, 0x0f, 0x0d, 0x0f, 0x10, 0x10, + 0x10, 0x11, 0x13, 0x14, 0x15, 0x16, 0x19, 0x1a, + 0x1c, 0x1f, 0x20, 0x23, 0x26, 0x28, 0x2a, 0x2e, + 0x31, 0x35, 0x38, 0x39, 0x3a, 0x3c, 0x3d, 0x3d, + 0x3e, 0x3e, 0x3d, 0x3c, 0x3a, 0x3a, 0x39, 0x39, + 0x38, 0x37, 0x38, 0x38, 0x37, 0x38, 0x39, 0x3a, + 0x3c, 0x3c, 0x3d, 0x3e, 0x3f, 0x3f, 0x40, 0x3f, + 0x41, 0x40, 0x3e, 0x3e, 0x3d, 0x3b, 0x3b, 0x39, + 0x37, 0x37, 0x35, 0x36, 0x34, 0x34, 0x34, 0x35, + 0x35, 0x34, 0x34, 0x35, 0x35, 0x37, 0x38, 0x39, + 0x3a, 0x3c, 0x3f, 0x3f, 0x43, 0x43, 0x45, 0x47, + 0x48, 0x48, 0x4a, 0x4b, 0x4e, 0x4d, 0x51, 0x53, + 0x56, 0x58, 0x59, 0x5b, 0x5d, 0x60, 0x62, 0x63, + 0x64, 0x63, 0x61, 0x5e, 0x5c, 0x5a, 0x57, 0x56, + 0x55, 0x54, 0x53, 0x52, 0x51, 0x51, 0x52, 0x52, + 0x54, 0x54, 0x55, 0x55, 0x55, 0x54, 0x54, 0x53, + 0x52, 0x50, 0x4e, 0x4d, 0x4b, 0x4a, 0x48, 0x48, + 0x48, 0x48, 0x4a, 0x4b, 0x4d, 0x4f, 0x52, 0x55, + 0x58, 0x5a, 0x5b, 0x5b, 0x5b, 0x5b, 0x5a, 0x59, + 0x58, 0x55, 0x51, 0x4e, 0x4a, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1e, 0x26, 0x23, 0x23, 0x25, 0x24, 0x25, 0x26, + 0x26, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x48, + 0x46, 0x44, 0x3f, 0x3b, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3d, + 0x3e, 0x3f, 0x41, 0x41, 0x40, 0x3e, 0x3d, 0x3b, + 0x38, 0x37, 0x34, 0x32, 0x30, 0x2c, 0x2a, 0x27, + 0x26, 0x23, 0x22, 0x20, 0x1d, 0x1b, 0x1a, 0x19, + 0x17, 0x15, 0x15, 0x13, 0x12, 0x12, 0x11, 0x0f, + 0x11, 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0e, 0x0f, 0x10, 0x10, 0x11, 0x11, + 0x11, 0x13, 0x16, 0x15, 0x15, 0x18, 0x1a, 0x1b, + 0x1d, 0x20, 0x22, 0x24, 0x27, 0x29, 0x2c, 0x30, + 0x33, 0x37, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3e, + 0x40, 0x40, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3a, + 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3b, 0x3d, + 0x3d, 0x3f, 0x40, 0x40, 0x3f, 0x41, 0x41, 0x41, + 0x41, 0x41, 0x40, 0x40, 0x3f, 0x3e, 0x3c, 0x3b, + 0x3a, 0x39, 0x37, 0x36, 0x36, 0x35, 0x35, 0x36, + 0x36, 0x35, 0x35, 0x36, 0x36, 0x38, 0x39, 0x39, + 0x3b, 0x3c, 0x3e, 0x40, 0x41, 0x43, 0x45, 0x47, + 0x48, 0x48, 0x4b, 0x4c, 0x4d, 0x4f, 0x51, 0x53, + 0x56, 0x56, 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x62, + 0x63, 0x63, 0x61, 0x5e, 0x5c, 0x5a, 0x59, 0x57, + 0x56, 0x54, 0x54, 0x53, 0x52, 0x53, 0x53, 0x55, + 0x56, 0x56, 0x57, 0x57, 0x57, 0x57, 0x56, 0x56, + 0x55, 0x53, 0x51, 0x4f, 0x4d, 0x4b, 0x49, 0x4b, + 0x4b, 0x4c, 0x4d, 0x4e, 0x51, 0x53, 0x55, 0x58, + 0x5b, 0x5c, 0x60, 0x60, 0x5f, 0x5e, 0x5d, 0x5c, + 0x5a, 0x57, 0x53, 0x4f, 0x4b, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1d, 0x25, 0x22, 0x22, 0x23, 0x23, 0x24, 0x25, + 0x25, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x47, + 0x45, 0x43, 0x3f, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2b, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3c, 0x3d, + 0x3e, 0x3f, 0x40, 0x41, 0x40, 0x3e, 0x3d, 0x3b, + 0x39, 0x36, 0x34, 0x32, 0x30, 0x2d, 0x2a, 0x26, + 0x26, 0x24, 0x22, 0x1f, 0x1d, 0x1c, 0x1a, 0x19, + 0x18, 0x16, 0x15, 0x14, 0x12, 0x12, 0x12, 0x10, + 0x10, 0x0f, 0x0e, 0x10, 0x0e, 0x0e, 0x0d, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0e, 0x0d, 0x0e, + 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x11, 0x11, 0x12, + 0x13, 0x14, 0x16, 0x16, 0x18, 0x1a, 0x1b, 0x1c, + 0x1e, 0x21, 0x23, 0x25, 0x28, 0x2a, 0x2e, 0x32, + 0x34, 0x38, 0x3a, 0x3c, 0x3d, 0x3f, 0x40, 0x42, + 0x43, 0x43, 0x43, 0x42, 0x40, 0x3e, 0x3e, 0x3c, + 0x3b, 0x3b, 0x3c, 0x3a, 0x3b, 0x3b, 0x3e, 0x3e, + 0x40, 0x3f, 0x41, 0x41, 0x41, 0x42, 0x42, 0x43, + 0x42, 0x41, 0x41, 0x41, 0x40, 0x3e, 0x3d, 0x3c, + 0x3b, 0x3a, 0x39, 0x37, 0x36, 0x35, 0x36, 0x37, + 0x35, 0x36, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3b, 0x3d, 0x3e, 0x40, 0x41, 0x41, 0x44, 0x46, + 0x48, 0x48, 0x4a, 0x4c, 0x4d, 0x4f, 0x51, 0x53, + 0x55, 0x57, 0x59, 0x5a, 0x5b, 0x5e, 0x5f, 0x61, + 0x62, 0x61, 0x60, 0x5e, 0x5c, 0x5a, 0x59, 0x58, + 0x56, 0x55, 0x54, 0x53, 0x53, 0x54, 0x54, 0x55, + 0x57, 0x57, 0x58, 0x59, 0x5a, 0x58, 0x59, 0x58, + 0x57, 0x55, 0x53, 0x52, 0x4f, 0x4e, 0x4d, 0x4d, + 0x4d, 0x4f, 0x51, 0x50, 0x54, 0x56, 0x59, 0x5c, + 0x5f, 0x61, 0x64, 0x64, 0x63, 0x61, 0x5e, 0x5e, + 0x5c, 0x59, 0x54, 0x50, 0x4c, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1c, 0x24, 0x21, 0x21, 0x21, 0x22, 0x23, 0x23, + 0x25, 0x27, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4c, 0x50, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4e, 0x4b, 0x4a, 0x49, + 0x45, 0x42, 0x3f, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2b, 0x2f, 0x32, 0x34, 0x38, 0x39, 0x3c, 0x3d, + 0x3e, 0x3e, 0x40, 0x41, 0x40, 0x3e, 0x3c, 0x3a, + 0x39, 0x37, 0x35, 0x33, 0x30, 0x2d, 0x2b, 0x28, + 0x26, 0x23, 0x23, 0x20, 0x1e, 0x1b, 0x19, 0x19, + 0x17, 0x16, 0x15, 0x14, 0x12, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0e, 0x10, 0x0e, 0x0d, 0x0c, 0x0c, + 0x0c, 0x0d, 0x0d, 0x0d, 0x0d, 0x0e, 0x0d, 0x0e, + 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x11, 0x12, 0x14, + 0x14, 0x14, 0x16, 0x18, 0x19, 0x1b, 0x1c, 0x1e, + 0x20, 0x23, 0x26, 0x27, 0x29, 0x2c, 0x2f, 0x33, + 0x36, 0x38, 0x3b, 0x3e, 0x3e, 0x42, 0x43, 0x46, + 0x46, 0x46, 0x46, 0x44, 0x42, 0x41, 0x3f, 0x3e, + 0x3d, 0x3d, 0x3e, 0x3d, 0x3d, 0x3e, 0x3e, 0x40, + 0x40, 0x40, 0x43, 0x43, 0x42, 0x43, 0x45, 0x43, + 0x43, 0x43, 0x42, 0x42, 0x41, 0x40, 0x40, 0x3e, + 0x3c, 0x3a, 0x3a, 0x38, 0x36, 0x36, 0x36, 0x36, + 0x37, 0x37, 0x36, 0x38, 0x38, 0x39, 0x3b, 0x3b, + 0x3e, 0x3e, 0x3e, 0x40, 0x41, 0x43, 0x45, 0x46, + 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x4f, 0x51, 0x54, + 0x56, 0x57, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x60, + 0x61, 0x61, 0x60, 0x5f, 0x5c, 0x5a, 0x59, 0x58, + 0x57, 0x57, 0x55, 0x54, 0x53, 0x55, 0x55, 0x58, + 0x58, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, + 0x5a, 0x59, 0x56, 0x54, 0x53, 0x4e, 0x4e, 0x50, + 0x50, 0x51, 0x52, 0x52, 0x57, 0x59, 0x5d, 0x60, + 0x63, 0x63, 0x66, 0x66, 0x66, 0x64, 0x63, 0x61, + 0x60, 0x5b, 0x55, 0x51, 0x4d, 0x48, 0x45, 0x44, + 0x43, 0x43, 0x43, }, + { 0x1b, 0x23, 0x20, 0x21, 0x22, 0x22, 0x23, 0x24, + 0x26, 0x27, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3d, 0x42, 0x46, 0x49, 0x4a, 0x4c, 0x4f, 0x4f, + 0x50, 0x50, 0x52, 0x50, 0x4e, 0x4b, 0x4b, 0x49, + 0x45, 0x42, 0x3e, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x35, 0x38, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3e, 0x40, 0x41, 0x40, 0x3f, 0x3d, 0x3b, + 0x3a, 0x38, 0x36, 0x33, 0x30, 0x2d, 0x2b, 0x29, + 0x27, 0x24, 0x24, 0x21, 0x1e, 0x1c, 0x1b, 0x1a, + 0x18, 0x17, 0x16, 0x15, 0x13, 0x12, 0x10, 0x0f, + 0x10, 0x0f, 0x0e, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0d, 0x0d, 0x0e, 0x0e, 0x0e, 0x0f, 0x0e, 0x0f, + 0x10, 0x11, 0x11, 0x12, 0x13, 0x13, 0x14, 0x15, + 0x15, 0x16, 0x17, 0x1a, 0x1b, 0x1d, 0x1e, 0x20, + 0x21, 0x25, 0x27, 0x29, 0x2b, 0x2d, 0x31, 0x35, + 0x37, 0x39, 0x3c, 0x3f, 0x40, 0x43, 0x46, 0x47, + 0x4a, 0x49, 0x48, 0x46, 0x45, 0x43, 0x42, 0x41, + 0x3f, 0x40, 0x3f, 0x3f, 0x40, 0x3f, 0x41, 0x43, + 0x43, 0x43, 0x44, 0x45, 0x45, 0x45, 0x45, 0x45, + 0x45, 0x45, 0x44, 0x43, 0x43, 0x42, 0x42, 0x40, + 0x3e, 0x3d, 0x3c, 0x39, 0x38, 0x38, 0x38, 0x38, + 0x38, 0x36, 0x38, 0x39, 0x39, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3e, 0x3f, 0x41, 0x42, 0x42, 0x43, 0x45, + 0x46, 0x49, 0x4b, 0x4d, 0x4f, 0x50, 0x53, 0x54, + 0x57, 0x58, 0x5a, 0x5c, 0x5b, 0x5e, 0x60, 0x61, + 0x60, 0x60, 0x5f, 0x5f, 0x5d, 0x5b, 0x5b, 0x59, + 0x58, 0x57, 0x56, 0x55, 0x55, 0x55, 0x57, 0x59, + 0x5b, 0x5b, 0x5d, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, + 0x5d, 0x5b, 0x59, 0x56, 0x54, 0x51, 0x51, 0x51, + 0x52, 0x55, 0x56, 0x56, 0x5a, 0x5d, 0x5f, 0x63, + 0x66, 0x68, 0x6b, 0x6b, 0x68, 0x67, 0x66, 0x64, + 0x61, 0x5d, 0x57, 0x52, 0x4f, 0x49, 0x46, 0x45, + 0x43, 0x43, 0x43, }, + { 0x1a, 0x22, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x26, 0x27, 0x2a, 0x2d, 0x31, 0x33, 0x37, 0x3b, + 0x3d, 0x41, 0x46, 0x49, 0x4a, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4e, 0x4b, 0x4b, 0x48, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2d, 0x2f, 0x32, 0x35, 0x39, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3f, 0x40, 0x41, 0x40, 0x3f, 0x3e, 0x3c, + 0x3a, 0x38, 0x36, 0x33, 0x31, 0x2d, 0x2c, 0x29, + 0x27, 0x26, 0x24, 0x21, 0x1f, 0x1d, 0x1c, 0x1a, + 0x19, 0x18, 0x16, 0x15, 0x14, 0x13, 0x12, 0x10, + 0x11, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, + 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0f, 0x0f, 0x10, + 0x11, 0x12, 0x12, 0x13, 0x15, 0x15, 0x16, 0x16, + 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x21, + 0x22, 0x25, 0x27, 0x2a, 0x2c, 0x2e, 0x33, 0x36, + 0x39, 0x3a, 0x3d, 0x40, 0x41, 0x45, 0x47, 0x4a, + 0x4c, 0x4d, 0x4c, 0x4a, 0x48, 0x45, 0x44, 0x41, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x43, 0x43, 0x44, + 0x45, 0x47, 0x47, 0x48, 0x47, 0x48, 0x47, 0x47, + 0x48, 0x48, 0x46, 0x46, 0x46, 0x43, 0x43, 0x41, + 0x3f, 0x3e, 0x3b, 0x39, 0x38, 0x37, 0x37, 0x37, + 0x38, 0x38, 0x37, 0x39, 0x39, 0x3a, 0x3c, 0x3e, + 0x3e, 0x3f, 0x3f, 0x3f, 0x42, 0x43, 0x43, 0x45, + 0x47, 0x48, 0x4b, 0x4c, 0x4e, 0x50, 0x51, 0x54, + 0x56, 0x58, 0x5a, 0x5c, 0x5c, 0x5f, 0x5f, 0x5f, + 0x61, 0x60, 0x5f, 0x5f, 0x5e, 0x5b, 0x5c, 0x5b, + 0x59, 0x59, 0x57, 0x56, 0x55, 0x56, 0x57, 0x59, + 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5e, 0x5d, + 0x5e, 0x5c, 0x5a, 0x57, 0x55, 0x52, 0x51, 0x52, + 0x53, 0x55, 0x57, 0x58, 0x5c, 0x5e, 0x61, 0x65, + 0x69, 0x6b, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x59, 0x53, 0x4d, 0x48, 0x46, 0x45, + 0x44, 0x44, 0x43, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x20, 0x21, 0x23, 0x24, + 0x25, 0x28, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3e, 0x41, 0x46, 0x49, 0x4b, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x32, 0x30, + 0x2f, 0x2d, 0x29, 0x27, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x35, 0x38, 0x3b, 0x3c, 0x3e, + 0x3f, 0x3f, 0x40, 0x41, 0x40, 0x3f, 0x3e, 0x3c, + 0x3a, 0x39, 0x36, 0x34, 0x31, 0x2d, 0x2c, 0x29, + 0x27, 0x26, 0x24, 0x21, 0x1f, 0x1d, 0x1c, 0x1a, + 0x19, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x10, + 0x11, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0f, 0x0f, 0x10, + 0x11, 0x13, 0x14, 0x14, 0x15, 0x16, 0x17, 0x19, + 0x19, 0x1a, 0x1c, 0x1d, 0x1e, 0x20, 0x22, 0x24, + 0x25, 0x27, 0x29, 0x2c, 0x2e, 0x31, 0x35, 0x38, + 0x3a, 0x3d, 0x41, 0x42, 0x45, 0x48, 0x4c, 0x4e, + 0x4f, 0x4f, 0x4f, 0x4d, 0x4b, 0x49, 0x47, 0x47, + 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x46, 0x47, + 0x48, 0x49, 0x4b, 0x4b, 0x4a, 0x4b, 0x4b, 0x4a, + 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x46, 0x46, 0x44, + 0x42, 0x41, 0x3d, 0x3b, 0x3a, 0x38, 0x38, 0x38, + 0x37, 0x37, 0x39, 0x38, 0x3a, 0x3a, 0x3c, 0x3c, + 0x3e, 0x40, 0x40, 0x41, 0x43, 0x43, 0x45, 0x46, + 0x48, 0x49, 0x4b, 0x4e, 0x4f, 0x50, 0x53, 0x55, + 0x57, 0x59, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + 0x60, 0x60, 0x5f, 0x5f, 0x5e, 0x5c, 0x5b, 0x5a, + 0x59, 0x58, 0x57, 0x57, 0x56, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5d, + 0x5c, 0x5b, 0x58, 0x57, 0x54, 0x52, 0x52, 0x53, + 0x54, 0x57, 0x58, 0x58, 0x5b, 0x5e, 0x62, 0x65, + 0x69, 0x6b, 0x6d, 0x6c, 0x6a, 0x69, 0x67, 0x64, + 0x62, 0x5e, 0x59, 0x54, 0x4d, 0x48, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x20, 0x21, 0x23, 0x24, + 0x25, 0x28, 0x2a, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x42, 0x47, 0x49, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x51, 0x50, 0x50, 0x4c, 0x4a, 0x47, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x32, 0x31, + 0x2f, 0x2d, 0x29, 0x27, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x25, 0x25, 0x26, 0x27, 0x29, 0x2b, + 0x2c, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x40, 0x40, 0x41, 0x42, 0x41, 0x3f, 0x3f, 0x3d, + 0x3b, 0x39, 0x36, 0x33, 0x32, 0x2e, 0x2d, 0x2a, + 0x27, 0x26, 0x25, 0x22, 0x1f, 0x1d, 0x1c, 0x1b, + 0x19, 0x17, 0x17, 0x16, 0x15, 0x14, 0x12, 0x11, + 0x11, 0x11, 0x10, 0x10, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x10, 0x11, 0x10, 0x11, 0x11, 0x12, + 0x11, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1b, + 0x1c, 0x1c, 0x1e, 0x20, 0x21, 0x22, 0x23, 0x25, + 0x27, 0x2a, 0x2c, 0x2f, 0x31, 0x35, 0x38, 0x3b, + 0x3d, 0x40, 0x44, 0x47, 0x49, 0x4c, 0x4f, 0x51, + 0x53, 0x53, 0x53, 0x51, 0x50, 0x4e, 0x4c, 0x4b, + 0x4a, 0x49, 0x49, 0x49, 0x49, 0x4a, 0x4a, 0x4d, + 0x4e, 0x4e, 0x4f, 0x50, 0x4f, 0x50, 0x51, 0x50, + 0x50, 0x4e, 0x4d, 0x4c, 0x4b, 0x48, 0x48, 0x47, + 0x44, 0x42, 0x3f, 0x3d, 0x3b, 0x3a, 0x39, 0x39, + 0x39, 0x38, 0x39, 0x3b, 0x3a, 0x3c, 0x3e, 0x3d, + 0x40, 0x40, 0x40, 0x42, 0x42, 0x42, 0x45, 0x46, + 0x47, 0x49, 0x4c, 0x4e, 0x50, 0x50, 0x53, 0x56, + 0x58, 0x59, 0x5d, 0x5d, 0x5e, 0x60, 0x61, 0x61, + 0x62, 0x61, 0x60, 0x60, 0x5e, 0x5d, 0x5d, 0x5b, + 0x57, 0x58, 0x56, 0x55, 0x55, 0x56, 0x56, 0x59, + 0x59, 0x58, 0x5a, 0x5a, 0x5a, 0x5c, 0x5c, 0x5c, + 0x5b, 0x5b, 0x58, 0x57, 0x54, 0x53, 0x52, 0x53, + 0x54, 0x57, 0x58, 0x59, 0x5c, 0x5f, 0x63, 0x67, + 0x6b, 0x6d, 0x6e, 0x6e, 0x6b, 0x6a, 0x68, 0x64, + 0x62, 0x5e, 0x58, 0x53, 0x4f, 0x49, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x19, 0x20, 0x1e, 0x1e, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2a, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x41, 0x46, 0x49, 0x4a, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x51, 0x4f, 0x4f, 0x4d, 0x49, 0x47, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x36, 0x32, 0x31, + 0x2f, 0x2d, 0x29, 0x27, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x25, 0x25, 0x26, 0x28, 0x29, 0x2b, + 0x2c, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x3f, 0x3f, 0x41, 0x42, 0x41, 0x3f, 0x3f, 0x3d, + 0x3c, 0x39, 0x36, 0x33, 0x32, 0x2e, 0x2d, 0x2a, + 0x27, 0x26, 0x25, 0x22, 0x1f, 0x1e, 0x1d, 0x1b, + 0x1a, 0x17, 0x17, 0x17, 0x14, 0x14, 0x12, 0x11, + 0x11, 0x12, 0x11, 0x11, 0x10, 0x10, 0x10, 0x10, + 0x10, 0x10, 0x11, 0x11, 0x11, 0x12, 0x13, 0x14, + 0x14, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1c, 0x1e, + 0x1e, 0x1f, 0x22, 0x23, 0x23, 0x24, 0x25, 0x27, + 0x2a, 0x2d, 0x2f, 0x31, 0x35, 0x38, 0x3a, 0x3e, + 0x41, 0x44, 0x48, 0x4b, 0x4d, 0x51, 0x53, 0x55, + 0x57, 0x57, 0x56, 0x55, 0x54, 0x52, 0x52, 0x50, + 0x4e, 0x50, 0x4e, 0x4d, 0x4d, 0x4d, 0x4f, 0x51, + 0x51, 0x52, 0x54, 0x55, 0x55, 0x55, 0x57, 0x55, + 0x54, 0x53, 0x52, 0x4e, 0x4d, 0x4b, 0x4a, 0x49, + 0x46, 0x44, 0x41, 0x3f, 0x3d, 0x3b, 0x3a, 0x3a, + 0x39, 0x39, 0x39, 0x39, 0x3a, 0x3b, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x44, 0x44, 0x45, 0x47, + 0x49, 0x49, 0x4a, 0x4d, 0x50, 0x51, 0x53, 0x57, + 0x5a, 0x5b, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x62, + 0x63, 0x62, 0x60, 0x60, 0x5e, 0x5c, 0x5c, 0x59, + 0x58, 0x56, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, + 0x56, 0x56, 0x57, 0x58, 0x58, 0x59, 0x5a, 0x59, + 0x58, 0x57, 0x56, 0x55, 0x54, 0x52, 0x53, 0x53, + 0x53, 0x56, 0x57, 0x59, 0x5b, 0x5e, 0x62, 0x66, + 0x6a, 0x6c, 0x6d, 0x6e, 0x6b, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x58, 0x54, 0x50, 0x4a, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x49, 0x4a, 0x4d, 0x4e, 0x4e, + 0x51, 0x52, 0x50, 0x4f, 0x4f, 0x4c, 0x49, 0x48, + 0x45, 0x42, 0x3e, 0x3b, 0x39, 0x36, 0x32, 0x32, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x25, 0x28, 0x29, 0x2b, + 0x2d, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x3f, 0x3f, 0x41, 0x42, 0x41, 0x3f, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x33, 0x32, 0x2f, 0x2d, 0x2b, + 0x28, 0x26, 0x25, 0x22, 0x20, 0x1e, 0x1d, 0x1b, + 0x1a, 0x17, 0x17, 0x16, 0x14, 0x14, 0x12, 0x11, + 0x12, 0x11, 0x11, 0x11, 0x11, 0x10, 0x10, 0x10, + 0x10, 0x11, 0x12, 0x12, 0x12, 0x13, 0x14, 0x14, + 0x16, 0x18, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, + 0x21, 0x22, 0x23, 0x25, 0x26, 0x26, 0x28, 0x2a, + 0x2c, 0x2e, 0x32, 0x34, 0x39, 0x39, 0x3d, 0x41, + 0x45, 0x47, 0x4c, 0x4e, 0x51, 0x54, 0x56, 0x58, + 0x5b, 0x5c, 0x5a, 0x59, 0x58, 0x56, 0x55, 0x53, + 0x53, 0x52, 0x52, 0x51, 0x52, 0x52, 0x53, 0x55, + 0x57, 0x58, 0x5a, 0x5a, 0x59, 0x5b, 0x59, 0x59, + 0x58, 0x57, 0x55, 0x53, 0x51, 0x4e, 0x4c, 0x4a, + 0x48, 0x46, 0x43, 0x40, 0x3e, 0x3c, 0x3b, 0x3b, + 0x38, 0x39, 0x38, 0x39, 0x3a, 0x3d, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x43, 0x44, 0x45, 0x46, 0x48, + 0x4a, 0x4b, 0x4d, 0x4e, 0x50, 0x52, 0x54, 0x56, + 0x59, 0x5c, 0x5e, 0x5f, 0x60, 0x62, 0x62, 0x63, + 0x63, 0x63, 0x61, 0x5f, 0x5e, 0x5d, 0x5c, 0x5b, + 0x59, 0x56, 0x56, 0x55, 0x54, 0x53, 0x53, 0x54, + 0x55, 0x54, 0x55, 0x55, 0x55, 0x57, 0x58, 0x57, + 0x57, 0x56, 0x55, 0x54, 0x54, 0x52, 0x52, 0x53, + 0x54, 0x55, 0x57, 0x58, 0x5b, 0x5e, 0x62, 0x65, + 0x69, 0x6b, 0x6d, 0x6e, 0x6a, 0x69, 0x67, 0x63, + 0x61, 0x5d, 0x58, 0x54, 0x4f, 0x4b, 0x48, 0x47, + 0x46, 0x45, 0x45, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4e, 0x4f, + 0x51, 0x52, 0x50, 0x50, 0x4f, 0x4c, 0x4a, 0x48, + 0x45, 0x42, 0x3f, 0x3b, 0x39, 0x36, 0x32, 0x31, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x28, 0x29, 0x2b, + 0x2d, 0x30, 0x33, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x42, 0x43, 0x42, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x32, 0x2f, 0x2d, 0x2c, + 0x2a, 0x27, 0x26, 0x23, 0x20, 0x1e, 0x1d, 0x1c, + 0x1a, 0x18, 0x18, 0x17, 0x15, 0x16, 0x14, 0x12, + 0x12, 0x12, 0x12, 0x12, 0x12, 0x11, 0x11, 0x12, + 0x12, 0x12, 0x13, 0x14, 0x14, 0x14, 0x15, 0x16, + 0x17, 0x19, 0x1b, 0x1c, 0x1e, 0x20, 0x20, 0x22, + 0x24, 0x25, 0x26, 0x27, 0x28, 0x2a, 0x2c, 0x2c, + 0x2f, 0x32, 0x35, 0x37, 0x3b, 0x3c, 0x41, 0x45, + 0x48, 0x4c, 0x50, 0x52, 0x54, 0x57, 0x5a, 0x5c, + 0x5f, 0x5f, 0x5f, 0x5d, 0x5c, 0x5b, 0x5a, 0x58, + 0x57, 0x57, 0x57, 0x56, 0x56, 0x57, 0x57, 0x5a, + 0x5c, 0x5e, 0x5f, 0x61, 0x5f, 0x5f, 0x5f, 0x5e, + 0x5d, 0x5c, 0x5a, 0x57, 0x55, 0x52, 0x4f, 0x4e, + 0x4a, 0x47, 0x46, 0x42, 0x41, 0x3e, 0x3d, 0x3c, + 0x3b, 0x3a, 0x39, 0x39, 0x3b, 0x3c, 0x3d, 0x3f, + 0x40, 0x42, 0x42, 0x44, 0x45, 0x46, 0x49, 0x49, + 0x4b, 0x4c, 0x4e, 0x4f, 0x51, 0x54, 0x57, 0x58, + 0x5b, 0x5d, 0x61, 0x61, 0x61, 0x63, 0x65, 0x65, + 0x64, 0x64, 0x62, 0x61, 0x60, 0x5e, 0x5d, 0x5c, + 0x59, 0x58, 0x56, 0x54, 0x53, 0x53, 0x53, 0x54, + 0x54, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, + 0x56, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x53, + 0x55, 0x56, 0x57, 0x58, 0x5b, 0x5e, 0x62, 0x66, + 0x69, 0x6b, 0x6d, 0x6d, 0x6b, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x58, 0x55, 0x50, 0x4b, 0x48, 0x47, + 0x46, 0x46, 0x46, }, + { 0x1a, 0x20, 0x1e, 0x1f, 0x1f, 0x21, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4f, 0x4f, + 0x51, 0x52, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x45, 0x42, 0x3f, 0x3b, 0x38, 0x36, 0x32, 0x31, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x28, 0x29, 0x2b, + 0x2e, 0x30, 0x33, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x41, 0x42, 0x41, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x33, 0x30, 0x2e, 0x2b, + 0x29, 0x26, 0x24, 0x24, 0x20, 0x1f, 0x1d, 0x1d, + 0x1a, 0x19, 0x17, 0x16, 0x16, 0x16, 0x16, 0x14, + 0x13, 0x12, 0x13, 0x13, 0x13, 0x12, 0x12, 0x13, + 0x13, 0x14, 0x15, 0x15, 0x14, 0x15, 0x16, 0x18, + 0x19, 0x1b, 0x1c, 0x1e, 0x20, 0x21, 0x22, 0x24, + 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2c, 0x2d, 0x2f, + 0x32, 0x35, 0x37, 0x3a, 0x3c, 0x3e, 0x44, 0x48, + 0x4c, 0x50, 0x54, 0x56, 0x58, 0x5b, 0x5e, 0x60, + 0x61, 0x63, 0x62, 0x61, 0x60, 0x5f, 0x5e, 0x5e, + 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x5b, 0x5c, 0x5e, + 0x60, 0x63, 0x64, 0x65, 0x63, 0x62, 0x63, 0x63, + 0x61, 0x60, 0x5e, 0x5b, 0x58, 0x55, 0x51, 0x4f, + 0x4c, 0x4a, 0x47, 0x44, 0x42, 0x41, 0x3e, 0x3c, + 0x3b, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3e, 0x3f, + 0x40, 0x42, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4a, + 0x4c, 0x4c, 0x4f, 0x51, 0x52, 0x55, 0x58, 0x5b, + 0x5c, 0x5f, 0x61, 0x62, 0x63, 0x64, 0x64, 0x65, + 0x66, 0x65, 0x63, 0x62, 0x5f, 0x5e, 0x5e, 0x5c, + 0x5b, 0x58, 0x56, 0x55, 0x54, 0x53, 0x52, 0x53, + 0x52, 0x52, 0x52, 0x52, 0x52, 0x53, 0x55, 0x55, + 0x55, 0x53, 0x53, 0x53, 0x52, 0x51, 0x52, 0x52, + 0x55, 0x55, 0x58, 0x58, 0x5b, 0x5d, 0x61, 0x65, + 0x68, 0x6a, 0x6c, 0x6b, 0x69, 0x68, 0x67, 0x64, + 0x61, 0x5e, 0x58, 0x54, 0x4f, 0x4b, 0x49, 0x48, + 0x47, 0x46, 0x45, }, + { 0x19, 0x20, 0x1d, 0x1f, 0x1f, 0x20, 0x23, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4f, 0x4f, + 0x51, 0x52, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x44, 0x42, 0x3f, 0x3a, 0x38, 0x36, 0x32, 0x30, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2b, + 0x2e, 0x30, 0x34, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x41, 0x42, 0x41, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x33, 0x30, 0x2e, 0x2b, + 0x29, 0x27, 0x25, 0x24, 0x21, 0x1f, 0x1e, 0x1c, + 0x1b, 0x19, 0x17, 0x16, 0x16, 0x16, 0x16, 0x14, + 0x13, 0x12, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x14, 0x15, 0x14, 0x14, 0x14, 0x17, 0x19, + 0x1a, 0x1c, 0x1e, 0x20, 0x21, 0x23, 0x24, 0x26, + 0x29, 0x29, 0x2b, 0x2c, 0x2d, 0x2e, 0x30, 0x31, + 0x34, 0x38, 0x3b, 0x3c, 0x3f, 0x42, 0x47, 0x4c, + 0x50, 0x54, 0x57, 0x5b, 0x5c, 0x5e, 0x62, 0x63, + 0x66, 0x66, 0x66, 0x65, 0x64, 0x63, 0x61, 0x62, + 0x60, 0x60, 0x5f, 0x5e, 0x5e, 0x5f, 0x60, 0x62, + 0x65, 0x67, 0x69, 0x6a, 0x69, 0x68, 0x69, 0x67, + 0x66, 0x64, 0x62, 0x5f, 0x5c, 0x58, 0x54, 0x51, + 0x4e, 0x4b, 0x49, 0x45, 0x43, 0x41, 0x40, 0x3e, + 0x3c, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x41, 0x42, 0x44, 0x46, 0x46, 0x48, 0x49, 0x4b, + 0x4d, 0x50, 0x51, 0x53, 0x55, 0x57, 0x58, 0x5c, + 0x5f, 0x60, 0x63, 0x64, 0x64, 0x65, 0x66, 0x66, + 0x66, 0x65, 0x65, 0x63, 0x61, 0x5f, 0x5e, 0x5c, + 0x5a, 0x58, 0x56, 0x55, 0x54, 0x53, 0x52, 0x52, + 0x53, 0x52, 0x52, 0x52, 0x52, 0x53, 0x53, 0x53, + 0x54, 0x53, 0x53, 0x52, 0x53, 0x51, 0x53, 0x53, + 0x55, 0x57, 0x58, 0x59, 0x5b, 0x5d, 0x62, 0x64, + 0x68, 0x6a, 0x6c, 0x6b, 0x69, 0x68, 0x67, 0x64, + 0x61, 0x5d, 0x57, 0x54, 0x50, 0x4a, 0x48, 0x47, + 0x46, 0x45, 0x45, }, diff --git a/tests/tcg/hexagon/hvx_histogram_row.S b/tests/tcg/hexagon/hvx_histogram_row.S new file mode 100644 index 0000000000..5e42c33145 --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_row.S @@ -0,0 +1,294 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + +/* + * void hvx_histogram_row(uint8_t *src, => r0 + * int stride, => r1 + * int width, => r2 + * int height, => r3 + * int *hist => r4) + */ + .text + .p2align 2 + .global hvx_histogram_row + .type hvx_histogram_row, @function +hvx_histogram_row: + { r2 = lsr(r2, #7) /* size / VLEN */ + r5 = and(r2, #127) /* size % VLEN */ + v1 = #0 + v0 = #0 + } + /* + * Step 1: Clean the whole vector register file + */ + { v3:2 = v1:0 + v5:4 = v1:0 + p0 = cmp.gt(r2, #0) /* P0 = (width / VLEN > 0) */ + p1 = cmp.eq(r5, #0) /* P1 = (width % VLEN == 0) */ + } + { q0 = vsetq(r5) + v7:6 = v1:0 + } + { v9:8 = v1:0 + v11:10 = v1:0 + } + { v13:12 = v1:0 + v15:14 = v1:0 + } + { v17:16 = v1:0 + v19:18 = v1:0 + } + { v21:20 = v1:0 + v23:22 = v1:0 + } + { v25:24 = v1:0 + v27:26 = v1:0 + } + { v29:28 = v1:0 + v31:30 = v1:0 + r10 = add(r0, r1) /* R10 = &src[2 * stride] */ + loop1(.outerloop, r3) + } + + /* + * Step 2: vhist + */ + .falign +.outerloop: + { if (!p0) jump .loopend + loop0(.innerloop, r2) + } + + .falign +.innerloop: + { v12.tmp = vmem(R0++#1) + vhist + }:endloop0 + + .falign +.loopend: + if (p1) jump .skip /* if (width % VLEN == 0) done with current row */ + { v13.tmp = vmem(r0 + #0) + vhist(q0) + } + + .falign +.skip: + { r0 = r10 /* R0 = &src[(i + 1) * stride] */ + r10 = add(r10, r1) /* R10 = &src[(i + 2) * stride] */ + }:endloop1 + + + /* + * Step 3: Sum up the data + */ + { v0.h = vshuff(v0.h) + r10 = ##0x00010001 + } + v1.h = vshuff(v1.h) + { V2.h = vshuff(v2.h) + v0.w = vdmpy(v0.h, r10.h):sat + } + { v3.h = vshuff(v3.h) + v1.w = vdmpy(v1.h, r10.h):sat + } + { v4.h = vshuff(V4.h) + v2.w = vdmpy(v2.h, r10.h):sat + } + { v5.h = vshuff(v5.h) + v3.w = vdmpy(v3.h, r10.h):sat + } + { v6.h = vshuff(v6.h) + v4.w = vdmpy(v4.h, r10.h):sat + } + { v7.h = vshuff(v7.h) + v5.w = vdmpy(v5.h, r10.h):sat + } + { v8.h = vshuff(V8.h) + v6.w = vdmpy(v6.h, r10.h):sat + } + { v9.h = vshuff(V9.h) + v7.w = vdmpy(v7.h, r10.h):sat + } + { v10.h = vshuff(v10.h) + v8.w = vdmpy(v8.h, r10.h):sat + } + { v11.h = vshuff(v11.h) + v9.w = vdmpy(v9.h, r10.h):sat + } + { v12.h = vshuff(v12.h) + v10.w = vdmpy(v10.h, r10.h):sat + } + { v13.h = vshuff(V13.h) + v11.w = vdmpy(v11.h, r10.h):sat + } + { v14.h = vshuff(v14.h) + v12.w = vdmpy(v12.h, r10.h):sat + } + { v15.h = vshuff(v15.h) + v13.w = vdmpy(v13.h, r10.h):sat + } + { v16.h = vshuff(v16.h) + v14.w = vdmpy(v14.h, r10.h):sat + } + { v17.h = vshuff(v17.h) + v15.w = vdmpy(v15.h, r10.h):sat + } + { v18.h = vshuff(v18.h) + v16.w = vdmpy(v16.h, r10.h):sat + } + { v19.h = vshuff(v19.h) + v17.w = vdmpy(v17.h, r10.h):sat + } + { v20.h = vshuff(v20.h) + v18.W = vdmpy(v18.h, r10.h):sat + } + { v21.h = vshuff(v21.h) + v19.w = vdmpy(v19.h, r10.h):sat + } + { v22.h = vshuff(v22.h) + v20.w = vdmpy(v20.h, r10.h):sat + } + { v23.h = vshuff(v23.h) + v21.w = vdmpy(v21.h, r10.h):sat + } + { v24.h = vshuff(v24.h) + v22.w = vdmpy(v22.h, r10.h):sat + } + { v25.h = vshuff(v25.h) + v23.w = vdmpy(v23.h, r10.h):sat + } + { v26.h = vshuff(v26.h) + v24.w = vdmpy(v24.h, r10.h):sat + } + { v27.h = vshuff(V27.h) + v25.w = vdmpy(v25.h, r10.h):sat + } + { v28.h = vshuff(v28.h) + v26.w = vdmpy(v26.h, r10.h):sat + } + { v29.h = vshuff(v29.h) + v27.w = vdmpy(v27.h, r10.h):sat + } + { v30.h = vshuff(v30.h) + v28.w = vdmpy(v28.h, r10.h):sat + } + { v31.h = vshuff(v31.h) + v29.w = vdmpy(v29.h, r10.h):sat + r28 = #32 + } + { vshuff(v1, v0, r28) + v30.w = vdmpy(v30.h, r10.h):sat + } + { vshuff(v3, v2, r28) + v31.w = vdmpy(v31.h, r10.h):sat + } + { vshuff(v5, v4, r28) + v0.w = vadd(v1.w, v0.w) + v2.w = vadd(v3.w, v2.w) + } + { vshuff(v7, v6, r28) + r7 = #64 + } + { vshuff(v9, v8, r28) + v4.w = vadd(v5.w, v4.w) + v6.w = vadd(v7.w, v6.w) + } + vshuff(v11, v10, r28) + { vshuff(v13, v12, r28) + v8.w = vadd(v9.w, v8.w) + v10.w = vadd(v11.w, v10.w) + } + vshuff(v15, v14, r28) + { vshuff(v17, v16, r28) + v12.w = vadd(v13.w, v12.w) + v14.w = vadd(v15.w, v14.w) + } + vshuff(v19, v18, r28) + { vshuff(v21, v20, r28) + v16.w = vadd(v17.w, v16.w) + v18.w = vadd(v19.w, v18.w) + } + vshuff(v23, v22, r28) + { vshuff(v25, v24, r28) + v20.w = vadd(v21.w, v20.w) + v22.w = vadd(v23.w, v22.w) + } + vshuff(v27, v26, r28) + { vshuff(v29, v28, r28) + v24.w = vadd(v25.w, v24.w) + v26.w = vadd(v27.w, v26.w) + } + vshuff(v31, v30, r28) + { v28.w = vadd(v29.w, v28.w) + vshuff(v2, v0, r7) + } + { v30.w = vadd(v31.w, v30.w) + vshuff(v6, v4, r7) + v0.w = vadd(v0.w, v2.w) + } + { vshuff(v10, v8, r7) + v1.tmp = vmem(r4 + #0) /* update hist[0-31] */ + v0.w = vadd(v0.w, v1.w) + vmem(r4++#1) = v0.new + } + { vshuff(v14, v12, r7) + v4.w = vadd(v4.w, v6.w) + v8.w = vadd(v8.w, v10.w) + } + { vshuff(v18, v16, r7) + v1.tmp = vmem(r4 + #0) /* update hist[32-63] */ + v4.w = vadd(v4.w, v1.w) + vmem(r4++#1) = v4.new + } + { vshuff(v22, v20, r7) + v12.w = vadd(v12.w, v14.w) + V16.w = vadd(v16.w, v18.w) + } + { vshuff(v26, v24, r7) + v1.tmp = vmem(r4 + #0) /* update hist[64-95] */ + v8.w = vadd(v8.w, v1.w) + vmem(r4++#1) = v8.new + } + { vshuff(v30, v28, r7) + v1.tmp = vmem(r4 + #0) /* update hist[96-127] */ + v12.w = vadd(v12.w, v1.w) + vmem(r4++#1) = v12.new + } + + { v20.w = vadd(v20.w, v22.w) + v1.tmp = vmem(r4 + #0) /* update hist[128-159] */ + v16.w = vadd(v16.w, v1.w) + vmem(r4++#1) = v16.new + } + { v24.w = vadd(v24.w, v26.w) + v1.tmp = vmem(r4 + #0) /* update hist[160-191] */ + v20.w = vadd(v20.w, v1.w) + vmem(r4++#1) = v20.new + } + { v28.w = vadd(v28.w, v30.w) + v1.tmp = vmem(r4 + #0) /* update hist[192-223] */ + v24.w = vadd(v24.w, v1.w) + vmem(r4++#1) = v24.new + } + { v1.tmp = vmem(r4 + #0) /* update hist[224-255] */ + v28.w = vadd(v28.w, v1.w) + vmem(r4++#1) = v28.new + } + jumpr r31 + .size hvx_histogram_row, .-hvx_histogram_row diff --git a/tests/tcg/hexagon/hvx_histogram_row.h b/tests/tcg/hexagon/hvx_histogram_row.h new file mode 100644 index 0000000000..6a4531a92d --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_row.h @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HVX_HISTOGRAM_ROW_H +#define HVX_HISTOGRAM_ROW_H + +void hvx_histogram_row(uint8_t *src, int stride, int width, int height, + int *hist); + +#endif diff --git a/tests/tcg/hexagon/hvx_misc.c b/tests/tcg/hexagon/hvx_misc.c new file mode 100644 index 0000000000..6e2c9ab3cd --- /dev/null +++ b/tests/tcg/hexagon/hvx_misc.c @@ -0,0 +1,583 @@ +/* + * Copyright(c) 2021-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include + +int err; + +static void __check(int line, int i, int j, uint64_t result, uint64_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: [%d][%d] 0x%016llx != 0x%016llx\n", + line, i, j, result, expect); + err++; + } +} + +#define check(RES, EXP) __check(__LINE__, RES, EXP) + +#define MAX_VEC_SIZE_BYTES 128 + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; + int32_t w[MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; + int16_t h[MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; + int8_t b[MAX_VEC_SIZE_BYTES / 1]; +} MMVector; + +#define BUFSIZE 16 +#define OUTSIZE 16 +#define MASKMOD 3 + +MMVector buffer0[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector buffer1[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector mask[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector output[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector expect[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); + +#define CHECK_OUTPUT_FUNC(FIELD, FIELDSZ) \ +static void check_output_##FIELD(int line, size_t num_vectors) \ +{ \ + for (int i = 0; i < num_vectors; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + __check(line, i, j, output[i].FIELD[j], expect[i].FIELD[j]); \ + } \ + } \ +} + +CHECK_OUTPUT_FUNC(d, 8) +CHECK_OUTPUT_FUNC(w, 4) +CHECK_OUTPUT_FUNC(h, 2) +CHECK_OUTPUT_FUNC(b, 1) + +static void init_buffers(void) +{ + int counter0 = 0; + int counter1 = 17; + for (int i = 0; i < BUFSIZE; i++) { + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { + buffer0[i].b[j] = counter0++; + buffer1[i].b[j] = counter1++; + } + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + mask[i].w[j] = (i + j % MASKMOD == 0) ? 0 : 1; + } + } +} + +static void test_load_tmp(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + for (int i = 0; i < BUFSIZE; i++) { + /* + * Load into v12 as .tmp, then use it in the next packet + * Should get the new value within the same packet and + * the old value in the next packet + */ + asm("v3 = vmem(%0 + #0)\n\t" + "r1 = #1\n\t" + "v12 = vsplat(r1)\n\t" + "{\n\t" + " v12.tmp = vmem(%1 + #0)\n\t" + " v4.w = vadd(v12.w, v3.w)\n\t" + "}\n\t" + "v4.w = vadd(v4.w, v12.w)\n\t" + "vmem(%2 + #0) = v4\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "r1", "v12", "v3", "v4", "v6", "memory"); + p0 += sizeof(MMVector); + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].w[j] = buffer0[i].w[j] + buffer1[i].w[j] + 1; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_load_cur(void) +{ + void *p0 = buffer0; + void *pout = output; + + for (int i = 0; i < BUFSIZE; i++) { + asm("{\n\t" + " v2.cur = vmem(%0 + #0)\n\t" + " vmem(%1 + #0) = v2\n\t" + "}\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + p0 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].uw[j] = buffer0[i].uw[j]; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_load_aligned(void) +{ + /* Aligned loads ignore the low bits of the address */ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 13; + + p0 += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_load_unaligned(void) +{ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 12; + + p0 += offset; /* Create an unaligned address */ + asm("v2 = vmemu(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + memcpy(expect, &buffer0[0].ub[offset], sizeof(MMVector)); + + check_output_w(__LINE__, 1); +} + +static void test_store_aligned(void) +{ + /* Aligned stores ignore the low bits of the address */ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 13; + + pout += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_store_unaligned(void) +{ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 12; + + pout += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmemu(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + memcpy(expect, buffer0, 2 * sizeof(MMVector)); + memcpy(&expect[0].ub[offset], buffer0, sizeof(MMVector)); + + check_output_w(__LINE__, 2); +} + +static void test_masked_store(bool invert) +{ + void *p0 = buffer0; + void *pmask = mask; + void *pout = output; + + memset(expect, 0xff, sizeof(expect)); + memset(output, 0xff, sizeof(expect)); + + for (int i = 0; i < BUFSIZE; i++) { + if (invert) { + asm("r4 = #0\n\t" + "v4 = vsplat(r4)\n\t" + "v5 = vmem(%0 + #0)\n\t" + "q0 = vcmp.eq(v4.w, v5.w)\n\t" + "v5 = vmem(%1)\n\t" + "if (!q0) vmem(%2) = v5\n\t" /* Inverted test */ + : : "r"(pmask), "r"(p0), "r"(pout) + : "r4", "v4", "v5", "q0", "memory"); + } else { + asm("r4 = #0\n\t" + "v4 = vsplat(r4)\n\t" + "v5 = vmem(%0 + #0)\n\t" + "q0 = vcmp.eq(v4.w, v5.w)\n\t" + "v5 = vmem(%1)\n\t" + "if (q0) vmem(%2) = v5\n\t" /* Non-inverted test */ + : : "r"(pmask), "r"(p0), "r"(pout) + : "r4", "v4", "v5", "q0", "memory"); + } + p0 += sizeof(MMVector); + pmask += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + if (invert) { + if (i + j % MASKMOD != 0) { + expect[i].w[j] = buffer0[i].w[j]; + } + } else { + if (i + j % MASKMOD == 0) { + expect[i].w[j] = buffer0[i].w[j]; + } + } + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_new_value_store(void) +{ + void *p0 = buffer0; + void *pout = output; + + asm("{\n\t" + " v2 = vmem(%0 + #0)\n\t" + " vmem(%1 + #0) = v2.new\n\t" + "}\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_max_temps() +{ + void *p0 = buffer0; + void *pout = output; + + asm("v0 = vmem(%0 + #0)\n\t" + "v1 = vmem(%0 + #1)\n\t" + "v2 = vmem(%0 + #2)\n\t" + "v3 = vmem(%0 + #3)\n\t" + "v4 = vmem(%0 + #4)\n\t" + "{\n\t" + " v1:0.w = vadd(v3:2.w, v1:0.w)\n\t" + " v2.b = vshuffe(v3.b, v2.b)\n\t" + " v3.w = vadd(v1.w, v4.w)\n\t" + " v4.tmp = vmem(%0 + #5)\n\t" + "}\n\t" + "vmem(%1 + #0) = v0\n\t" + "vmem(%1 + #1) = v1\n\t" + "vmem(%1 + #2) = v2\n\t" + "vmem(%1 + #3) = v3\n\t" + "vmem(%1 + #4) = v4\n\t" + : : "r"(p0), "r"(pout) : "memory"); + + /* The first two vectors come from the vadd-pair instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { + expect[0].w[i] = buffer0[0].w[i] + buffer0[2].w[i]; + expect[1].w[i] = buffer0[1].w[i] + buffer0[3].w[i]; + } + /* The third vector comes from the vshuffe instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 2; i++) { + expect[2].uh[i] = (buffer0[2].uh[i] & 0xff) | + (buffer0[3].uh[i] & 0xff) << 8; + } + /* The fourth vector comes from the vadd-single instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { + expect[3].w[i] = buffer0[1].w[i] + buffer0[5].w[i]; + } + /* + * The fifth vector comes from the load to v4 + * make sure the .tmp is dropped + */ + expect[4] = buffer0[4]; + + check_output_b(__LINE__, 5); +} + +#define VEC_OP1(ASM, EL, IN, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ")\n\t" \ + "vmem(%1 + #0) = v2\n\t" \ + : : "r"(IN), "r"(OUT) : "v2", "memory") + +#define VEC_OP2(ASM, EL, IN0, IN1, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ", v3" #EL ")\n\t" \ + "vmem(%2 + #0) = v2\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT) : "v2", "v3", "memory") + +#define TEST_VEC_OP1(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static void test_##NAME(void) \ +{ \ + void *pin = buffer0; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP1(ASM, EL, pin, pout); \ + pin += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = OP buffer0[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define TEST_VEC_OP2(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static void test_##NAME(void) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP2(ASM, EL, p0, p1, pout); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = buffer0[i].FIELD[j] OP buffer1[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define THRESHOLD 31 + +#define PRED_OP2(ASM, IN0, IN1, OUT, INV) \ + asm("r4 = #%3\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "v2 = vmem(%0 + #0)\n\t" \ + "q0 = vcmp.gt(v2.b, v1.b)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "q1 = vcmp.gt(v3.b, v1.b)\n\t" \ + "q2 = " #ASM "(q0, " INV "q1)\n\t" \ + "r4 = #0xff\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "if (q2) vmem(%2 + #0) = v1\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT), "i"(THRESHOLD) \ + : "r4", "v1", "v2", "v3", "q0", "q1", "q2", "memory") + +#define TEST_PRED_OP2(NAME, ASM, OP, INV) \ +static void test_##NAME(bool invert) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + memset(output, 0, sizeof(expect)); \ + for (int i = 0; i < BUFSIZE; i++) { \ + PRED_OP2(ASM, p0, p1, pout, INV); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { \ + bool p0 = (buffer0[i].b[j] > THRESHOLD); \ + bool p1 = (buffer1[i].b[j] > THRESHOLD); \ + if (invert) { \ + expect[i].b[j] = (p0 OP !p1) ? 0xff : 0x00; \ + } else { \ + expect[i].b[j] = (p0 OP p1) ? 0xff : 0x00; \ + } \ + } \ + } \ + check_output_b(__LINE__, BUFSIZE); \ +} + +TEST_VEC_OP2(vadd_w, vadd, .w, w, 4, +) +TEST_VEC_OP2(vadd_h, vadd, .h, h, 2, +) +TEST_VEC_OP2(vadd_b, vadd, .b, b, 1, +) +TEST_VEC_OP2(vsub_w, vsub, .w, w, 4, -) +TEST_VEC_OP2(vsub_h, vsub, .h, h, 2, -) +TEST_VEC_OP2(vsub_b, vsub, .b, b, 1, -) +TEST_VEC_OP2(vxor, vxor, , d, 8, ^) +TEST_VEC_OP2(vand, vand, , d, 8, &) +TEST_VEC_OP2(vor, vor, , d, 8, |) +TEST_VEC_OP1(vnot, vnot, , d, 8, ~) + +TEST_PRED_OP2(pred_or, or, |, "") +TEST_PRED_OP2(pred_or_n, or, |, "!") +TEST_PRED_OP2(pred_and, and, &, "") +TEST_PRED_OP2(pred_and_n, and, &, "!") +TEST_PRED_OP2(pred_xor, xor, ^, "") + +static void test_vadduwsat(void) +{ + /* + * Test for saturation by adding two numbers that add to more than UINT_MAX + * and make sure the result saturates to UINT_MAX + */ + const uint32_t x = 0xffff0000; + const uint32_t y = 0x000fffff; + + memset(expect, 0x12, sizeof(MMVector)); + memset(output, 0x34, sizeof(MMVector)); + + asm volatile ("v10 = vsplat(%0)\n\t" + "v11 = vsplat(%1)\n\t" + "v21.uw = vadd(v11.uw, v10.uw):sat\n\t" + "vmem(%2+#0) = v21\n\t" + : /* no outputs */ + : "r"(x), "r"(y), "r"(output) + : "v10", "v11", "v21", "memory"); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[0].uw[j] = UINT_MAX; + } + + check_output_w(__LINE__, 1); +} + +static void test_vsubuwsat_dv(void) +{ + /* + * Test for saturation by subtracting two numbers where the result is + * negative and make sure the result saturates to zero + * + * vsubuwsat_dv operates on an HVX register pair, so we'll have a + * pair of subtractions + * w - x < 0 + * y - z < 0 + */ + const uint32_t w = 0x000000b7; + const uint32_t x = 0xffffff4e; + const uint32_t y = 0x31fe88e7; + const uint32_t z = 0x7fffff79; + + memset(expect, 0x12, sizeof(MMVector) * 2); + memset(output, 0x34, sizeof(MMVector) * 2); + + asm volatile ("v16 = vsplat(%0)\n\t" + "v17 = vsplat(%1)\n\t" + "v26 = vsplat(%2)\n\t" + "v27 = vsplat(%3)\n\t" + "v25:24.uw = vsub(v17:16.uw, v27:26.uw):sat\n\t" + "vmem(%4+#0) = v24\n\t" + "vmem(%4+#1) = v25\n\t" + : /* no outputs */ + : "r"(w), "r"(y), "r"(x), "r"(z), "r"(output) + : "v16", "v17", "v24", "v25", "v26", "v27", "memory"); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[0].uw[j] = 0x00000000; + expect[1].uw[j] = 0x00000000; + } + + check_output_w(__LINE__, 2); +} + +static void test_vshuff(void) +{ + /* Test that vshuff works when the two operands are the same register */ + const uint32_t splat = 0x089be55c; + const uint32_t shuff = 0x454fa926; + MMVector v0, v1; + + memset(expect, 0x12, sizeof(MMVector)); + memset(output, 0x34, sizeof(MMVector)); + + asm volatile("v25 = vsplat(%0)\n\t" + "vshuff(v25, v25, %1)\n\t" + "vmem(%2 + #0) = v25\n\t" + : /* no outputs */ + : "r"(splat), "r"(shuff), "r"(output) + : "v25", "memory"); + + /* + * The semantics of Hexagon are the operands are pass-by-value, so create + * two copies of the vsplat result. + */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { + v0.uw[i] = splat; + v1.uw[i] = splat; + } + /* Do the vshuff operation */ + for (int offset = 1; offset < MAX_VEC_SIZE_BYTES; offset <<= 1) { + if (shuff & offset) { + for (int k = 0; k < MAX_VEC_SIZE_BYTES; k++) { + if (!(k & offset)) { + uint8_t tmp = v0.ub[k]; + v0.ub[k] = v1.ub[k + offset]; + v1.ub[k + offset] = tmp; + } + } + } + } + /* Put the result in the expect buffer for verification */ + expect[0] = v1; + + check_output_b(__LINE__, 1); +} + +int main() +{ + init_buffers(); + + test_load_tmp(); + test_load_cur(); + test_load_aligned(); + test_load_unaligned(); + test_store_aligned(); + test_store_unaligned(); + test_masked_store(false); + test_masked_store(true); + test_new_value_store(); + test_max_temps(); + + test_vadd_w(); + test_vadd_h(); + test_vadd_b(); + test_vsub_w(); + test_vsub_h(); + test_vsub_b(); + test_vxor(); + test_vand(); + test_vor(); + test_vnot(); + + test_pred_or(false); + test_pred_or_n(true); + test_pred_and(false); + test_pred_and_n(true); + test_pred_xor(false); + + test_vadduwsat(); + test_vsubuwsat_dv(); + + test_vshuff(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/load_unpack.c b/tests/tcg/hexagon/load_unpack.c index 3575a37a28..4aa26fc388 100644 --- a/tests/tcg/hexagon/load_unpack.c +++ b/tests/tcg/hexagon/load_unpack.c @@ -245,7 +245,7 @@ TEST_pr(loadbsw4_pr, long long, S, 4, 0x0000ff000000ff00LL, */ #define BxW_LOAD_pbr(SZ, RES, PTR) \ __asm__( \ - "r4 = #(1 << (16 - 3))\n\t" \ + "r4 = #(1 << (16 - 4))\n\t" \ "m0 = r4\n\t" \ "%0 = mem" #SZ "(%1++m0:brev)\n\t" \ : "=r"(RES), "+r"(PTR) \ @@ -273,15 +273,15 @@ void test_##NAME(void) \ } TEST_pbr(loadbzw2_pbr, int, Z, 0x00000000, - 0x00020081, 0x00060085, 0x00040083, 0x00080087) + 0x00020081, 0x000a0089, 0x00060085, 0x000e008d) TEST_pbr(loadbsw2_pbr, int, S, 0x0000ff00, - 0x00020081, 0x00060085, 0x00040083, 0x00080087) + 0x00020081, 0x000aff89, 0x0006ff85, 0x000eff8d) TEST_pbr(loadbzw4_pbr, long long, Z, 0x0000000000000000LL, - 0x0004008300020081LL, 0x0008008700060085LL, - 0x0006008500040083LL, 0x000a008900080087LL) + 0x0004008300020081LL, 0x000c008b000a0089LL, + 0x0008008700060085LL, 0x0010008f000e008dLL) TEST_pbr(loadbsw4_pbr, long long, S, 0x0000ff000000ff00LL, - 0x0004008300020081LL, 0x0008008700060085LL, - 0x0006008500040083LL, 0x000a008900080087LL) + 0x0004008300020081LL, 0x000cff8b000aff89LL, + 0x0008ff870006ff85LL, 0x0010ff8f000eff8dLL) /* **************************************************************************** diff --git a/tests/tcg/hexagon/mem_noshuf.c b/tests/tcg/hexagon/mem_noshuf.c index dd714d5e98..0f4064e700 100644 --- a/tests/tcg/hexagon/mem_noshuf.c +++ b/tests/tcg/hexagon/mem_noshuf.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -84,6 +84,70 @@ MEM_NOSHUF32(mem_noshuf_sd_luh, long long, unsigned short, memd, memuh) MEM_NOSHUF32(mem_noshuf_sd_lw, long long, signed int, memd, memw) MEM_NOSHUF64(mem_noshuf_sd_ld, long long, signed long long, memd, memd) +static inline int pred_lw_sw(int pred, int *p, int *q, int x, int y) +{ + int ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "{\n\t" + " memw(%1) = %4\n\t" + " if (!p0) %0 = memw(%2)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + +static inline int pred_lw_sw_pi(int pred, int *p, int *q, int x, int y) +{ + int ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "r7 = %2\n\t" + "{\n\t" + " memw(%1) = %4\n\t" + " if (!p0) %0 = memw(r7++#4)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "r7", "p0", "memory"); + return ret; +} + +static inline long long pred_ld_sd(int pred, long long *p, long long *q, + long long x, long long y) +{ + unsigned long long ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "{\n\t" + " memd(%1) = %4\n\t" + " if (!p0) %0 = memd(%2)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + +static inline long long pred_ld_sd_pi(int pred, long long *p, long long *q, + long long x, long long y) +{ + long long ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "r7 = %2\n\t" + "{\n\t" + " memd(%1) = %4\n\t" + " if (!p0) %0 = memd(r7++#8)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + static inline unsigned int cancel_sw_lb(int pred, int *p, signed char *q, int x) { unsigned int ret; @@ -126,18 +190,22 @@ typedef union { int err; -static void check32(int n, int expect) +#define check32(n, expect) check32_(n, expect, __LINE__) + +static void check32_(int n, int expect, int line) { if (n != expect) { - printf("ERROR: 0x%08x != 0x%08x\n", n, expect); + printf("ERROR: 0x%08x != 0x%08x, line %d\n", n, expect, line); err++; } } -static void check64(long long n, long long expect) +#define check64(n, expect) check64_(n, expect, __LINE__) + +static void check64_(long long n, long long expect, int line) { if (n != expect) { - printf("ERROR: 0x%08llx != 0x%08llx\n", n, expect); + printf("ERROR: 0x%08llx != 0x%08llx, line %d\n", n, expect, line); err++; } } @@ -323,6 +391,50 @@ int main() res64 = mem_noshuf_sd_ld(&n.d[0], &n.d[1], 0x123456789abcdef0LL); check64(res64, 0xffffffffffffffffLL); + n.w[0] = ~0; + res32 = pred_lw_sw(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0x12345678); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0xc0ffeeda); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw_pi(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0x12345678); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw_pi(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0xc0ffeeda); + check32(n.w[0], 0xc0ffeeda); + + n.d[0] = ~0LL; + res64 = pred_ld_sd(0, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0x1234567812345678LL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd(1, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0xc0ffeedac0ffeedaLL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd_pi(0, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0x1234567812345678LL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd_pi(1, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0xc0ffeedac0ffeedaLL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + puts(err ? "FAIL" : "PASS"); return err; } diff --git a/tests/tcg/hexagon/mem_noshuf_exception.c b/tests/tcg/hexagon/mem_noshuf_exception.c new file mode 100644 index 0000000000..08660ea3e1 --- /dev/null +++ b/tests/tcg/hexagon/mem_noshuf_exception.c @@ -0,0 +1,146 @@ +/* + * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Test the VLIW semantics of exceptions with mem_noshuf + * + * When a packet has the :mem_noshuf attribute, the semantics dictate + * That the load will get the data from the store if the addresses overlap. + * To accomplish this, we perform the store first. However, we have to + * handle the case where the store raises an exception. In that case, the + * store should not alter the machine state. + * + * We test this with a mem_noshuf packet with a store to a global variable, + * "should_not_change" and a load from NULL. After the SIGSEGV is caught, + * we check * that the "should_not_change" value is the same. + * + * We also check that a predicated load where the predicate is false doesn't + * raise an exception and allows the store to happen. + */ + +#include +#include +#include +#include +#include +#include +#include + +int err; +int segv_caught; + +#define SHOULD_NOT_CHANGE_VAL 5 +int should_not_change = SHOULD_NOT_CHANGE_VAL; + +#define OK_TO_CHANGE_VAL 13 +int ok_to_change = OK_TO_CHANGE_VAL; + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static void __chk_error(const char *filename, int line, int ret) +{ + if (ret < 0) { + printf("ERROR %s:%d - %d\n", filename, line, ret); + err++; + } +} + +#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) + +jmp_buf jmp_env; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + check(sig, SIGSEGV); + segv_caught = 1; + longjmp(jmp_env, 1); +} + +int main() +{ + struct sigaction act; + int dummy32; + long long dummy64; + void *p; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + chk_error(sigaction(SIGSEGV, &act, NULL)); + if (setjmp(jmp_env) == 0) { + asm volatile("r18 = ##should_not_change\n\t" + "r19 = #0\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " %0 = memw(r19)\n\t" + "}:mem_noshuf\n\t" + : "=r"(dummy32) : : "r18", "r19", "memory"); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + chk_error(sigaction(SIGSEGV, &act, NULL)); + + check(segv_caught, 1); + check(should_not_change, SHOULD_NOT_CHANGE_VAL); + + /* + * Check that a predicated load where the predicate is false doesn't + * raise an exception and allows the store to happen. + */ + asm volatile("r18 = ##ok_to_change\n\t" + "r19 = #0\n\t" + "p0 = cmp.gt(r0, r0)\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " if (p0) %0 = memw(r19)\n\t" + "}:mem_noshuf\n\t" + : "=r"(dummy32) : : "r18", "r19", "p0", "memory"); + + check(ok_to_change, 7); + + /* + * Also check that the post-increment doesn't happen when the + * predicate is false. + */ + ok_to_change = OK_TO_CHANGE_VAL; + p = NULL; + asm volatile("r18 = ##ok_to_change\n\t" + "p0 = cmp.gt(r0, r0)\n\t" + "{\n\t" + " memw(r18) = #9\n\t" + " if (p0) %1 = memd(%0 ++ #8)\n\t" + "}:mem_noshuf\n\t" + : "+r"(p), "=r"(dummy64) : : "r18", "p0", "memory"); + + check(ok_to_change, 9); + check((int)p, (int)NULL); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/overflow.c b/tests/tcg/hexagon/overflow.c new file mode 100644 index 0000000000..94087851b0 --- /dev/null +++ b/tests/tcg/hexagon/overflow.c @@ -0,0 +1,166 @@ +/* + * Copyright(c) 2021-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + + +int err; + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static int satub(int src, int *p, int *ovf_result) +{ + int result; + int usr; + + /* + * This instruction can set bit 0 (OVF/overflow) in usr + * Clear the bit first, then return that bit to the caller + * + * We also store the src into *p in the same packet, so we + * can ensure the overflow doesn't get set when an exception + * is generated. + */ + asm volatile("r2 = usr\n\t" + "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ + "usr = r2\n\t" + "{\n\t" + " %0 = satub(%2)\n\t" + " memw(%3) = %2\n\t" + "}\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) + : "r"(src), "r"(p) + : "r2", "usr", "memory"); + *ovf_result = (usr & 1); + return result; +} + +int read_usr_overflow(void) +{ + int result; + asm volatile("%0 = usr\n\t" : "=r"(result)); + return result & 1; +} + +int get_usr_overflow(int usr) +{ + return usr & 1; +} + +int get_usr_fp_invalid(int usr) +{ + return (usr >> 1) & 1; +} + +int get_usr_lpcfg(int usr) +{ + return (usr >> 8) & 0x3; +} + +jmp_buf jmp_env; +int usr_overflow; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + usr_overflow = read_usr_overflow(); + longjmp(jmp_env, 1); +} + +static void test_packet(void) +{ + int convres; + int satres; + int usr; + + asm("r2 = usr\n\t" + "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ + "r2 = clrbit(r2, #1)\n\t" /* clear FP invalid bit */ + "usr = r2\n\t" + "{\n\t" + " %0 = convert_sf2uw(%3):chop\n\t" + " %1 = satb(%4)\n\t" + "}\n\t" + "%2 = usr\n\t" + : "=r"(convres), "=r"(satres), "=r"(usr) + : "r"(0x6a051b86), "r"(0x0410eec0) + : "r2", "usr"); + + check(convres, 0xffffffff); + check(satres, 0x7f); + check(get_usr_overflow(usr), 1); + check(get_usr_fp_invalid(usr), 1); + + asm("r2 = usr\n\t" + "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ + "usr = r2\n\t" + "%2 = r2\n\t" + "p3 = sp3loop0(1f, #1)\n\t" + "1:\n\t" + "{\n\t" + " %0 = satb(%2)\n\t" + "}:endloop0\n\t" + "%1 = usr\n\t" + : "=r"(satres), "=r"(usr) + : "r"(0x0410eec0) + : "r2", "usr", "p3", "sa0", "lc0"); + + check(satres, 0x7f); + check(get_usr_overflow(usr), 1); + check(get_usr_lpcfg(usr), 2); +} + +int main() +{ + struct sigaction act; + int ovf; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + sigaction(SIGSEGV, &act, NULL); + if (setjmp(jmp_env) == 0) { + satub(300, 0, &ovf); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + + check(usr_overflow, 0); + + test_packet(); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/preg_alias.c b/tests/tcg/hexagon/preg_alias.c index 0cac469b78..b44a8112b4 100644 --- a/tests/tcg/hexagon/preg_alias.c +++ b/tests/tcg/hexagon/preg_alias.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -57,17 +57,15 @@ typedef union { static inline void creg_alias(int cval, PRegs *pregs) { - unsigned char val; - asm volatile("c4 = %0" : : "r"(cval)); - - asm volatile("%0 = p0" : "=r"(val)); - pregs->pregs.p0 = val; - asm volatile("%0 = p1" : "=r"(val)); - pregs->pregs.p1 = val; - asm volatile("%0 = p2" : "=r"(val)); - pregs->pregs.p2 = val; - asm volatile("%0 = p3" : "=r"(val)); - pregs->pregs.p3 = val; + asm("c4 = %4\n\t" + "%0 = p0\n\t" + "%1 = p1\n\t" + "%2 = p2\n\t" + "%3 = p3\n\t" + : "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1), + "=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3) + : "r"(cval) + : "p0", "p1", "p2", "p3"); } int err; @@ -83,22 +81,58 @@ static void check(int val, int expect) static inline void creg_alias_pair(unsigned int cval, PRegs *pregs) { unsigned long long cval_pair = (0xdeadbeefULL << 32) | cval; - unsigned char val; int c5; - asm volatile("c5:4 = %0" : : "r"(cval_pair)); - asm volatile("%0 = p0" : "=r"(val)); - pregs->pregs.p0 = val; - asm volatile("%0 = p1" : "=r"(val)); - pregs->pregs.p1 = val; - asm volatile("%0 = p2" : "=r"(val)); - pregs->pregs.p2 = val; - asm volatile("%0 = p3" : "=r"(val)); - pregs->pregs.p3 = val; - asm volatile("%0 = c5" : "=r"(c5)); + asm ("c5:4 = %5\n\t" + "%0 = p0\n\t" + "%1 = p1\n\t" + "%2 = p2\n\t" + "%3 = p3\n\t" + "%4 = c5\n\t" + : "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1), + "=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3), "=r"(c5) + : "r"(cval_pair) + : "p0", "p1", "p2", "p3"); + check(c5, 0xdeadbeef); } +static void test_packet(void) +{ + /* + * Test that setting c4 inside a packet doesn't impact the predicates + * that are read during the packet. + */ + + int result; + int old_val = 0x0000001c; + + /* Test a predicated register transfer */ + result = old_val; + asm ( + "c4 = %1\n\t" + "{\n\t" + " c4 = %2\n\t" + " if (!p2) %0 = %3\n\t" + "}\n\t" + : "+r"(result) + : "r"(0xffffffff), "r"(0xff00ffff), "r"(0x837ed653) + : "p0", "p1", "p2", "p3"); + check(result, old_val); + + /* Test a predicated store */ + result = 0xffffffff; + asm ("c4 = %0\n\t" + "{\n\t" + " c4 = %1\n\t" + " if (!p2) memw(%2) = #0\n\t" + "}\n\t" + : + : "r"(0), "r"(0xffffffff), "r"(&result) + : "p0", "p1", "p2", "p3", "memory"); + check(result, 0x0); +} + int main() { int c4; @@ -164,6 +198,8 @@ int main() creg_alias_pair(0xffffffff, &pregs); check(pregs.creg, 0xffffffff); + test_packet(); + puts(err ? "FAIL" : "PASS"); return err; } diff --git a/tests/tcg/hexagon/scatter_gather.c b/tests/tcg/hexagon/scatter_gather.c new file mode 100644 index 0000000000..b93eb18133 --- /dev/null +++ b/tests/tcg/hexagon/scatter_gather.c @@ -0,0 +1,1011 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * This example tests the HVX scatter/gather instructions + * + * See section 5.13 of the V68 HVX Programmer's Reference + * + * There are 3 main classes operations + * _16 16-bit elements and 16-bit offsets + * _32 32-bit elements and 32-bit offsets + * _16_32 16-bit elements and 32-bit offsets + * + * There are also masked and accumulate versions + */ + +#include +#include +#include +#include + +typedef long HVX_Vector __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); +typedef long HVX_VectorPair __attribute__((__vector_size__(256))) + __attribute__((aligned(128))); +typedef long HVX_VectorPred __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + +#define VSCATTER_16(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermh_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_32(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermw_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermwq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhw_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhwq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermh_add_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_32_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermw_add_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhw_add_128B((int)BASE, RGN, OFF, VALS) + +#define VGATHER_16(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermh_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_16_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) +#define VGATHER_32(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermw_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) +#define VGATHER_16_32(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhw_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_16_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) + +#define VSHUFF_H(V) \ + __builtin_HEXAGON_V6_vshuffh_128B(V) +#define VSPLAT_H(X) \ + __builtin_HEXAGON_V6_lvsplath_128B(X) +#define VAND_VAL(PRED, VAL) \ + __builtin_HEXAGON_V6_vandvrt_128B(PRED, VAL) +#define VDEAL_H(V) \ + __builtin_HEXAGON_V6_vdealh_128B(V) + +int err; + +/* define the number of rows/cols in a square matrix */ +#define MATRIX_SIZE 64 + +/* define the size of the scatter buffer */ +#define SCATTER_BUFFER_SIZE (MATRIX_SIZE * MATRIX_SIZE) + +/* fake vtcm - put buffers together and force alignment */ +static struct { + unsigned short vscatter16[SCATTER_BUFFER_SIZE]; + unsigned short vgather16[MATRIX_SIZE]; + unsigned int vscatter32[SCATTER_BUFFER_SIZE]; + unsigned int vgather32[MATRIX_SIZE]; + unsigned short vscatter16_32[SCATTER_BUFFER_SIZE]; + unsigned short vgather16_32[MATRIX_SIZE]; +} vtcm __attribute__((aligned(0x10000))); + +/* declare the arrays of reference values */ +unsigned short vscatter16_ref[SCATTER_BUFFER_SIZE]; +unsigned short vgather16_ref[MATRIX_SIZE]; +unsigned int vscatter32_ref[SCATTER_BUFFER_SIZE]; +unsigned int vgather32_ref[MATRIX_SIZE]; +unsigned short vscatter16_32_ref[SCATTER_BUFFER_SIZE]; +unsigned short vgather16_32_ref[MATRIX_SIZE]; + +/* declare the arrays of offsets */ +unsigned short half_offsets[MATRIX_SIZE]; +unsigned int word_offsets[MATRIX_SIZE]; + +/* declare the arrays of values */ +unsigned short half_values[MATRIX_SIZE]; +unsigned short half_values_acc[MATRIX_SIZE]; +unsigned short half_values_masked[MATRIX_SIZE]; +unsigned int word_values[MATRIX_SIZE]; +unsigned int word_values_acc[MATRIX_SIZE]; +unsigned int word_values_masked[MATRIX_SIZE]; + +/* declare the arrays of predicates */ +unsigned short half_predicates[MATRIX_SIZE]; +unsigned int word_predicates[MATRIX_SIZE]; + +/* make this big enough for all the intrinsics */ +const size_t region_len = sizeof(vtcm); + +/* optionally add sync instructions */ +#define SYNC_VECTOR 1 + +static void sync_scatter(void *addr) +{ +#if SYNC_VECTOR + /* + * Do the scatter release followed by a dummy load to complete the + * synchronization. Normally the dummy load would be deferred as + * long as possible to minimize stalls. + */ + asm volatile("vmem(%0 + #0):scatter_release\n" : : "r"(addr)); + /* use volatile to force the load */ + volatile HVX_Vector vDummy = *(HVX_Vector *)addr; vDummy = vDummy; +#endif +} + +static void sync_gather(void *addr) +{ +#if SYNC_VECTOR + /* use volatile to force the load */ + volatile HVX_Vector vDummy = *(HVX_Vector *)addr; vDummy = vDummy; +#endif +} + +/* optionally print the results */ +#define PRINT_DATA 0 + +#define FILL_CHAR '.' + +/* fill vtcm scratch with ee */ +void prefill_vtcm_scratch(void) +{ + memset(&vtcm, FILL_CHAR, sizeof(vtcm)); +} + +/* create byte offsets to be a diagonal of the matrix with 16 bit elements */ +void create_offsets_values_preds_16(void) +{ + unsigned short half_element = 0; + unsigned short half_element_masked = 0; + char letter = 'A'; + char letter_masked = '@'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + half_offsets[i] = i * (2 * MATRIX_SIZE + 2); + + half_element = 0; + half_element_masked = 0; + for (int j = 0; j < 2; j++) { + half_element |= letter << j * 8; + half_element_masked |= letter_masked << j * 8; + } + + half_values[i] = half_element; + half_values_acc[i] = ((i % 10) << 8) + (i % 10); + half_values_masked[i] = half_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'M') { + letter = 'A'; + } + + half_predicates[i] = (i % 3 == 0 || i % 5 == 0) ? ~0 : 0; + } +} + +/* create byte offsets to be a diagonal of the matrix with 32 bit elements */ +void create_offsets_values_preds_32(void) +{ + unsigned int word_element = 0; + unsigned int word_element_masked = 0; + char letter = 'A'; + char letter_masked = '&'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + word_offsets[i] = i * (4 * MATRIX_SIZE + 4); + + word_element = 0; + word_element_masked = 0; + for (int j = 0; j < 4; j++) { + word_element |= letter << j * 8; + word_element_masked |= letter_masked << j * 8; + } + + word_values[i] = word_element; + word_values_acc[i] = ((i % 10) << 8) + (i % 10); + word_values_masked[i] = word_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'M') { + letter = 'A'; + } + + word_predicates[i] = (i % 4 == 0 || i % 7 == 0) ? ~0 : 0; + } +} + +/* + * create byte offsets to be a diagonal of the matrix with 16 bit elements + * and 32 bit offsets + */ +void create_offsets_values_preds_16_32(void) +{ + unsigned short half_element = 0; + unsigned short half_element_masked = 0; + char letter = 'D'; + char letter_masked = '$'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + word_offsets[i] = i * (2 * MATRIX_SIZE + 2); + + half_element = 0; + half_element_masked = 0; + for (int j = 0; j < 2; j++) { + half_element |= letter << j * 8; + half_element_masked |= letter_masked << j * 8; + } + + half_values[i] = half_element; + half_values_acc[i] = ((i % 10) << 8) + (i % 10); + half_values_masked[i] = half_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'P') { + letter = 'D'; + } + + half_predicates[i] = (i % 2 == 0 || i % 13 == 0) ? ~0 : 0; + } +} + +/* scatter the 16 bit elements using intrinsics */ +void vector_scatter_16(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values; + + VSCATTER_16(&vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter-accumulate the 16 bit elements using intrinsics */ +void vector_scatter_16_acc(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values_acc; + + VSCATTER_16_ACC(&vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 16 bit elements using intrinsics */ +void vector_scatter_16_masked(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values_masked; + HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + VSCATTER_16_MASKED(preds, &vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 32 bit elements using intrinsics */ +void vector_scatter_32(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values; + HVX_Vector valueshi = *(HVX_Vector *)&word_values[MATRIX_SIZE / 2]; + + VSCATTER_32(&vtcm.vscatter32, region_len, offsetslo, valueslo); + VSCATTER_32(&vtcm.vscatter32, region_len, offsetshi, valueshi); + + sync_scatter(vtcm.vscatter32); +} + +/* scatter-acc the 32 bit elements using intrinsics */ +void vector_scatter_32_acc(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values_acc; + HVX_Vector valueshi = *(HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2]; + + VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetslo, valueslo); + VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetshi, valueshi); + + sync_scatter(vtcm.vscatter32); +} + +/* scatter the 32 bit elements using intrinsics */ +void vector_scatter_32_masked(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values_masked; + HVX_Vector valueshi = *(HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2]; + HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; + HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; + HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); + HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + + VSCATTER_32_MASKED(predslo, &vtcm.vscatter32, region_len, offsetslo, + valueslo); + VSCATTER_32_MASKED(predshi, &vtcm.vscatter32, region_len, offsetshi, + valueshi); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values; + values = VSHUFF_H(values); + + VSCATTER_16_32(&vtcm.vscatter16_32, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* scatter-acc the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32_acc(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values_acc; + values = VSHUFF_H(values); + + VSCATTER_16_32_ACC(&vtcm.vscatter16_32, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* masked scatter the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32_masked(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + HVX_Vector pred_reg; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values_masked; + values = VSHUFF_H(values); + + pred_reg = *(HVX_Vector *)half_predicates; + pred_reg = VSHUFF_H(pred_reg); + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + VSCATTER_16_32_MASKED(preds, &vtcm.vscatter16_32, region_len, offsets, + values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* gather the elements from the scatter16 buffer */ +void vector_gather_16(void) +{ + HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + + VGATHER_16(vgather, &vtcm.vscatter16, region_len, offsets); + + sync_gather(vgather); +} + +static unsigned short gather_16_masked_init(void) +{ + char letter = '?'; + return letter | (letter << 8); +} + +void vector_gather_16_masked(void) +{ + HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + *vgather = VSPLAT_H(gather_16_masked_init()); + VGATHER_16_MASKED(vgather, preds, &vtcm.vscatter16, region_len, offsets); + + sync_gather(vgather); +} + +/* gather the elements from the scatter32 buffer */ +void vector_gather_32(void) +{ + HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; + HVX_Vector *vgatherhi = + (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + + VGATHER_32(vgatherlo, &vtcm.vscatter32, region_len, offsetslo); + VGATHER_32(vgatherhi, &vtcm.vscatter32, region_len, offsetshi); + + sync_gather(vgatherhi); +} + +static unsigned int gather_32_masked_init(void) +{ + char letter = '?'; + return letter | (letter << 8) | (letter << 16) | (letter << 24); +} + +void vector_gather_32_masked(void) +{ + HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; + HVX_Vector *vgatherhi = + (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; + HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); + HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; + HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + + *vgatherlo = VSPLAT_H(gather_32_masked_init()); + *vgatherhi = VSPLAT_H(gather_32_masked_init()); + VGATHER_32_MASKED(vgatherlo, predslo, &vtcm.vscatter32, region_len, + offsetslo); + VGATHER_32_MASKED(vgatherhi, predshi, &vtcm.vscatter32, region_len, + offsetshi); + + sync_gather(vgatherlo); + sync_gather(vgatherhi); +} + +/* gather the elements from the scatter16_32 buffer */ +void vector_gather_16_32(void) +{ + HVX_Vector *vgather; + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the vtcm address to gather from */ + vgather = (HVX_Vector *)&vtcm.vgather16_32; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + VGATHER_16_32(vgather, &vtcm.vscatter16_32, region_len, offsets); + + /* deal the elements to get the order back */ + values = *(HVX_Vector *)vgather; + values = VDEAL_H(values); + + /* write it back to vtcm address */ + *(HVX_Vector *)vgather = values; +} + +void vector_gather_16_32_masked(void) +{ + HVX_Vector *vgather; + HVX_VectorPair offsets; + HVX_Vector pred_reg; + HVX_VectorPred preds; + HVX_Vector values; + + /* get the vtcm address to gather from */ + vgather = (HVX_Vector *)&vtcm.vgather16_32; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + pred_reg = *(HVX_Vector *)half_predicates; + pred_reg = VSHUFF_H(pred_reg); + preds = VAND_VAL(pred_reg, ~0); + + *vgather = VSPLAT_H(gather_16_masked_init()); + VGATHER_16_32_MASKED(vgather, preds, &vtcm.vscatter16_32, region_len, + offsets); + + /* deal the elements to get the order back */ + values = *(HVX_Vector *)vgather; + values = VDEAL_H(values); + + /* write it back to vtcm address */ + *(HVX_Vector *)vgather = values; +} + +static void check_buffer(const char *name, void *c, void *r, size_t size) +{ + char *check = (char *)c; + char *ref = (char *)r; + for (int i = 0; i < size; i++) { + if (check[i] != ref[i]) { + printf("ERROR %s [%d]: 0x%x (%c) != 0x%x (%c)\n", name, i, + check[i], check[i], ref[i], ref[i]); + err++; + } + } +} + +/* + * These scalar functions are the C equivalents of the vector functions that + * use HVX + */ + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16[half_offsets[i] / 2] = half_values[i]; + } +} + +void check_scatter_16() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16_acc(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16[half_offsets[i] / 2] += half_values_acc[i]; + } +} + +void check_scatter_16_acc() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + scalar_scatter_16_acc(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16_masked(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (half_predicates[i]) { + vscatter16[half_offsets[i] / 2] = half_values_masked[i]; + } + } + +} + +void check_scatter_16_masked() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + scalar_scatter_16_acc(vscatter16_ref); + scalar_scatter_16_masked(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter32[word_offsets[i] / 4] = word_values[i]; + } +} + +void check_scatter_32() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32_acc(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter32[word_offsets[i] / 4] += word_values_acc[i]; + } +} + +void check_scatter_32_acc() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + scalar_scatter_32_acc(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32_masked(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (word_predicates[i]) { + vscatter32[word_offsets[i] / 4] = word_values_masked[i]; + } + } +} + +void check_scatter_32_masked() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + scalar_scatter_32_acc(vscatter32_ref); + scalar_scatter_32_masked(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_16_32(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16_32[word_offsets[i] / 2] = half_values[i]; + } +} + +void check_scatter_16_32() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_16_32_acc(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16_32[word_offsets[i] / 2] += half_values_acc[i]; + } +} + +void check_scatter_16_32_acc() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + scalar_scatter_16_32_acc(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +void scalar_scatter_16_32_masked(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (half_predicates[i]) { + vscatter16_32[word_offsets[i] / 2] = half_values_masked[i]; + } + } +} + +void check_scatter_16_32_masked() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + scalar_scatter_16_32_acc(vscatter16_32_ref); + scalar_scatter_16_32_masked(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_16(unsigned short *vgather16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather16[i] = vtcm.vscatter16[half_offsets[i] / 2]; + } +} + +void check_gather_16() +{ + memset(vgather16_ref, 0, MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16(vgather16_ref); + check_buffer(__func__, vtcm.vgather16, vgather16_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +void scalar_gather_16_masked(unsigned short *vgather16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (half_predicates[i]) { + vgather16[i] = vtcm.vscatter16[half_offsets[i] / 2]; + } + } +} + +void check_gather_16_masked() +{ + memset(vgather16_ref, gather_16_masked_init(), + MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_masked(vgather16_ref); + check_buffer(__func__, vtcm.vgather16, vgather16_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_32(unsigned int *vgather32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather32[i] = vtcm.vscatter32[word_offsets[i] / 4]; + } +} + +void check_gather_32(void) +{ + memset(vgather32_ref, 0, MATRIX_SIZE * sizeof(unsigned int)); + scalar_gather_32(vgather32_ref); + check_buffer(__func__, vtcm.vgather32, vgather32_ref, + MATRIX_SIZE * sizeof(unsigned int)); +} + +void scalar_gather_32_masked(unsigned int *vgather32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (word_predicates[i]) { + vgather32[i] = vtcm.vscatter32[word_offsets[i] / 4]; + } + } +} + + +void check_gather_32_masked(void) +{ + memset(vgather32_ref, gather_32_masked_init(), + MATRIX_SIZE * sizeof(unsigned int)); + scalar_gather_32_masked(vgather32_ref); + check_buffer(__func__, vtcm.vgather32, + vgather32_ref, MATRIX_SIZE * sizeof(unsigned int)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_16_32(unsigned short *vgather16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather16_32[i] = vtcm.vscatter16_32[word_offsets[i] / 2]; + } +} + +void check_gather_16_32(void) +{ + memset(vgather16_32_ref, 0, MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_32(vgather16_32_ref); + check_buffer(__func__, vtcm.vgather16_32, vgather16_32_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +void scalar_gather_16_32_masked(unsigned short *vgather16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (half_predicates[i]) { + vgather16_32[i] = vtcm.vscatter16_32[word_offsets[i] / 2]; + } + } + +} + +void check_gather_16_32_masked(void) +{ + memset(vgather16_32_ref, gather_16_masked_init(), + MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_32_masked(vgather16_32_ref); + check_buffer(__func__, vtcm.vgather16_32, vgather16_32_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +/* print scatter16 buffer */ +void print_scatter16_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 2; j++) { + printf("%c", (char)((vtcm.vscatter16[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 16 buffer */ +void print_gather_result_16(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 2; j++) { + printf("%c", (char)((vtcm.vgather16[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the scatter32 buffer */ +void print_scatter32_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 32 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 4; j++) { + printf("%c", (char)((vtcm.vscatter32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 32 buffer */ +void print_gather_result_32(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 32 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 4; j++) { + printf("%c", (char)((vtcm.vgather32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the scatter16_32 buffer */ +void print_scatter16_32_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16_32 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 2; j++) { + printf("%c", + (unsigned char)((vtcm.vscatter16_32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 16_32 buffer */ +void print_gather_result_16_32(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16_32 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 2; j++) { + printf("%c", + (unsigned char)((vtcm.vgather16_32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +int main() +{ + prefill_vtcm_scratch(); + + /* 16 bit elements with 16 bit offsets */ + create_offsets_values_preds_16(); + + vector_scatter_16(); + print_scatter16_buffer(); + check_scatter_16(); + + vector_gather_16(); + print_gather_result_16(); + check_gather_16(); + + vector_gather_16_masked(); + print_gather_result_16(); + check_gather_16_masked(); + + vector_scatter_16_acc(); + print_scatter16_buffer(); + check_scatter_16_acc(); + + vector_scatter_16_masked(); + print_scatter16_buffer(); + check_scatter_16_masked(); + + /* 32 bit elements with 32 bit offsets */ + create_offsets_values_preds_32(); + + vector_scatter_32(); + print_scatter32_buffer(); + check_scatter_32(); + + vector_gather_32(); + print_gather_result_32(); + check_gather_32(); + + vector_gather_32_masked(); + print_gather_result_32(); + check_gather_32_masked(); + + vector_scatter_32_acc(); + print_scatter32_buffer(); + check_scatter_32_acc(); + + vector_scatter_32_masked(); + print_scatter32_buffer(); + check_scatter_32_masked(); + + /* 16 bit elements with 32 bit offsets */ + create_offsets_values_preds_16_32(); + + vector_scatter_16_32(); + print_scatter16_32_buffer(); + check_scatter_16_32(); + + vector_gather_16_32(); + print_gather_result_16_32(); + check_gather_16_32(); + + vector_gather_16_32_masked(); + print_gather_result_16_32(); + check_gather_16_32_masked(); + + vector_scatter_16_32_acc(); + print_scatter16_32_buffer(); + check_scatter_16_32_acc(); + + vector_scatter_16_32_masked(); + print_scatter16_32_buffer(); + check_scatter_16_32_masked(); + + puts(err ? "FAIL" : "PASS"); + return err; +} diff --git a/tests/tcg/hexagon/usr.c b/tests/tcg/hexagon/usr.c new file mode 100644 index 0000000000..fb4514989c --- /dev/null +++ b/tests/tcg/hexagon/usr.c @@ -0,0 +1,1151 @@ +/* + * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Test instructions that might set bits in user status register (USR) + */ + +#include +#include + +int err; + +static void __check(int line, uint32_t val, uint32_t expect) +{ + if (val != expect) { + printf("ERROR at line %d: %d != %d\n", line, val, expect); + err++; + } +} + +#define check(RES, EXP) __check(__LINE__, RES, EXP) + +static void __check32(int line, uint32_t val, uint32_t expect) +{ + if (val != expect) { + printf("ERROR at line %d: 0x%08x != 0x%08x\n", line, val, expect); + err++; + } +} + +#define check32(RES, EXP) __check32(__LINE__, RES, EXP) + +static void __check64(int line, uint64_t val, uint64_t expect) +{ + if (val != expect) { + printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", line, val, expect); + err++; + } +} + +#define check64(RES, EXP) __check64(__LINE__, RES, EXP) + +/* + * Some of the instructions tested are only available on certain versions + * of the Hexagon core + */ +#define CORE_HAS_AUDIO (__HEXAGON_ARCH__ >= 67 && defined(__HEXAGON_AUDIO__)) +#define CORE_IS_V67 (__HEXAGON_ARCH__ >= 67) + +/* Define the bits in Hexagon USR register */ +#define USR_OVF_BIT 0 /* Sticky saturation overflow */ +#define USR_FPINVF_BIT 1 /* IEEE FP invalid sticky flag */ +#define USR_FPDBZF_BIT 2 /* IEEE FP divide-by-zero sticky flag */ +#define USR_FPOVFF_BIT 3 /* IEEE FP overflow sticky flag */ +#define USR_FPUNFF_BIT 4 /* IEEE FP underflow sticky flag */ +#define USR_FPINPF_BIT 5 /* IEEE FP inexact sticky flag */ + +/* Corresponding values in USR */ +#define USR_CLEAR 0 +#define USR_OVF (1 << USR_OVF_BIT) +#define USR_FPINVF (1 << USR_FPINVF_BIT) +#define USR_FPDBZF (1 << USR_FPDBZF_BIT) +#define USR_FPOVFF (1 << USR_FPOVFF_BIT) +#define USR_FPUNFF (1 << USR_FPUNFF_BIT) +#define USR_FPINPF (1 << USR_FPINPF_BIT) + +/* Some useful floating point values */ +const uint32_t SF_INF = 0x7f800000; +const uint32_t SF_QNaN = 0x7fc00000; +const uint32_t SF_SNaN = 0x7fb00000; +const uint32_t SF_QNaN_neg = 0xffc00000; +const uint32_t SF_SNaN_neg = 0xffb00000; +const uint32_t SF_HEX_NaN = 0xffffffff; +const uint32_t SF_zero = 0x00000000; +const uint32_t SF_zero_neg = 0x80000000; +const uint32_t SF_one = 0x3f800000; +const uint32_t SF_one_recip = 0x3f7f0001; /* 0.9960... */ +const uint32_t SF_one_invsqrta = 0x3f7f0000; /* 0.99609375 */ +const uint32_t SF_two = 0x40000000; +const uint32_t SF_four = 0x40800000; +const uint32_t SF_small_neg = 0xab98fba8; +const uint32_t SF_large_pos = 0x5afa572e; + +const uint64_t DF_QNaN = 0x7ff8000000000000ULL; +const uint64_t DF_SNaN = 0x7ff7000000000000ULL; +const uint64_t DF_QNaN_neg = 0xfff8000000000000ULL; +const uint64_t DF_SNaN_neg = 0xfff7000000000000ULL; +const uint64_t DF_HEX_NaN = 0xffffffffffffffffULL; +const uint64_t DF_zero = 0x0000000000000000ULL; +const uint64_t DF_zero_neg = 0x8000000000000000ULL; +const uint64_t DF_any = 0x3f80000000000000ULL; +const uint64_t DF_one = 0x3ff0000000000000ULL; +const uint64_t DF_one_hh = 0x3ff001ff80000000ULL; /* 1.00048... */ +const uint64_t DF_small_neg = 0xbd731f7500000000ULL; +const uint64_t DF_large_pos = 0x7f80000000000001ULL; + +/* + * Templates for functions to execute an instruction + * + * The templates vary by the number of arguments and the types of the args + * and result. We use one letter in the macro name for the result and each + * argument: + * x unknown (specified in a subsequent template) or don't care + * R register (32 bits) + * P pair (64 bits) + * p predicate + * I immediate + * Xx read/write + */ + +/* Clear bits 0-5 in USR */ +#define CLEAR_USRBITS \ + "r2 = usr\n\t" \ + "r2 = and(r2, #0xffffffc0)\n\t" \ + "usr = r2\n\t" + +/* Template for instructions with one register operand */ +#define FUNC_x_OP_x(RESTYPE, SRCTYPE, NAME, INSN) \ +static RESTYPE NAME(SRCTYPE src, uint32_t *usr_result) \ +{ \ + RESTYPE result; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = usr\n\t" \ + : "=r"(result), "=r"(usr) \ + : "r"(src) \ + : "r2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_R_OP_R(NAME, INSN) \ +FUNC_x_OP_x(uint32_t, uint32_t, NAME, INSN) + +#define FUNC_R_OP_P(NAME, INSN) \ +FUNC_x_OP_x(uint32_t, uint64_t, NAME, INSN) + +#define FUNC_P_OP_P(NAME, INSN) \ +FUNC_x_OP_x(uint64_t, uint64_t, NAME, INSN) + +#define FUNC_P_OP_R(NAME, INSN) \ +FUNC_x_OP_x(uint64_t, uint32_t, NAME, INSN) + +/* + * Template for instructions with a register and predicate result + * and one register operand + */ +#define FUNC_xp_OP_x(RESTYPE, SRCTYPE, NAME, INSN) \ +static RESTYPE NAME(SRCTYPE src, uint8_t *pred_result, uint32_t *usr_result) \ +{ \ + RESTYPE result; \ + uint8_t pred; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = p2\n\t" \ + "%2 = usr\n\t" \ + : "=r"(result), "=r"(pred), "=r"(usr) \ + : "r"(src) \ + : "r2", "p2", "usr"); \ + *pred_result = pred; \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_Rp_OP_R(NAME, INSN) \ +FUNC_xp_OP_x(uint32_t, uint32_t, NAME, INSN) + +/* Template for instructions with two register operands */ +#define FUNC_x_OP_xx(RESTYPE, SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static RESTYPE NAME(SRC1TYPE src1, SRC2TYPE src2, uint32_t *usr_result) \ +{ \ + RESTYPE result; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = usr\n\t" \ + : "=r"(result), "=r"(usr) \ + : "r"(src1), "r"(src2) \ + : "r2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_P_OP_PP(NAME, INSN) \ +FUNC_x_OP_xx(uint64_t, uint64_t, uint64_t, NAME, INSN) + +#define FUNC_R_OP_PP(NAME, INSN) \ +FUNC_x_OP_xx(uint32_t, uint64_t, uint64_t, NAME, INSN) + +#define FUNC_P_OP_RR(NAME, INSN) \ +FUNC_x_OP_xx(uint64_t, uint32_t, uint32_t, NAME, INSN) + +#define FUNC_R_OP_RR(NAME, INSN) \ +FUNC_x_OP_xx(uint32_t, uint32_t, uint32_t, NAME, INSN) + +#define FUNC_R_OP_PR(NAME, INSN) \ +FUNC_x_OP_xx(uint32_t, uint64_t, uint32_t, NAME, INSN) + +#define FUNC_P_OP_PR(NAME, INSN) \ +FUNC_x_OP_xx(uint64_t, uint64_t, uint32_t, NAME, INSN) + +/* + * Template for instructions with a register and predicate result + * and two register operands + */ +#define FUNC_xp_OP_xx(RESTYPE, SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static RESTYPE NAME(SRC1TYPE src1, SRC2TYPE src2, \ + uint8_t *pred_result, uint32_t *usr_result) \ +{ \ + RESTYPE result; \ + uint8_t pred; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = p2\n\t" \ + "%2 = usr\n\t" \ + : "=r"(result), "=r"(pred), "=r"(usr) \ + : "r"(src1), "r"(src2) \ + : "r2", "p2", "usr"); \ + *pred_result = pred; \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_Rp_OP_RR(NAME, INSN) \ +FUNC_xp_OP_xx(uint32_t, uint32_t, uint32_t, NAME, INSN) + +/* Template for instructions with one register and one immediate */ +#define FUNC_x_OP_xI(RESTYPE, SRC1TYPE, NAME, INSN) \ +static RESTYPE NAME(SRC1TYPE src1, int32_t src2, uint32_t *usr_result) \ +{ \ + RESTYPE result; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = usr\n\t" \ + : "=r"(result), "=r"(usr) \ + : "r"(src1), "i"(src2) \ + : "r2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_R_OP_RI(NAME, INSN) \ +FUNC_x_OP_xI(uint32_t, uint32_t, NAME, INSN) + +#define FUNC_R_OP_PI(NAME, INSN) \ +FUNC_x_OP_xI(uint32_t, uint64_t, NAME, INSN) + +/* + * Template for instructions with a read/write result + * and two register operands + */ +#define FUNC_Xx_OP_xx(RESTYPE, SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static RESTYPE NAME(RESTYPE result, SRC1TYPE src1, SRC2TYPE src2, \ + uint32_t *usr_result) \ +{ \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = usr\n\t" \ + : "+r"(result), "=r"(usr) \ + : "r"(src1), "r"(src2) \ + : "r2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_XR_OP_RR(NAME, INSN) \ +FUNC_Xx_OP_xx(uint32_t, uint32_t, uint32_t, NAME, INSN) + +#define FUNC_XP_OP_PP(NAME, INSN) \ +FUNC_Xx_OP_xx(uint64_t, uint64_t, uint64_t, NAME, INSN) + +#define FUNC_XP_OP_RR(NAME, INSN) \ +FUNC_Xx_OP_xx(uint64_t, uint32_t, uint32_t, NAME, INSN) + +/* + * Template for instructions with a read/write result + * and two register operands + */ +#define FUNC_Xxp_OP_xx(RESTYPE, SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static RESTYPE NAME(RESTYPE result, SRC1TYPE src1, SRC2TYPE src2, \ + uint8_t *pred_result, uint32_t *usr_result) \ +{ \ + uint32_t usr; \ + uint8_t pred; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%1 = p2\n\t" \ + "%2 = usr\n\t" \ + : "+r"(result), "=r"(pred), "=r"(usr) \ + : "r"(src1), "r"(src2) \ + : "r2", "usr"); \ + *pred_result = pred; \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_XPp_OP_PP(NAME, INSN) \ +FUNC_Xxp_OP_xx(uint64_t, uint64_t, uint64_t, NAME, INSN) + +/* + * Template for instructions with a read/write result and + * two register and one predicate operands + */ +#define FUNC_Xx_OP_xxp(RESTYPE, SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static RESTYPE NAME(RESTYPE result, SRC1TYPE src1, SRC2TYPE src2, uint8_t pred,\ + uint32_t *usr_result) \ +{ \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + "p2 = %4\n\t" \ + INSN "\n\t" \ + "%1 = usr\n\t" \ + : "+r"(result), "=r"(usr) \ + : "r"(src1), "r"(src2), "r"(pred) \ + : "r2", "p2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_XR_OP_RRp(NAME, INSN) \ +FUNC_Xx_OP_xxp(uint32_t, uint32_t, uint32_t, NAME, INSN) + +/* Template for compare instructions with two register operands */ +#define FUNC_CMP_xx(SRC1TYPE, SRC2TYPE, NAME, INSN) \ +static uint32_t NAME(SRC1TYPE src1, SRC2TYPE src2, uint32_t *usr_result) \ +{ \ + uint32_t result; \ + uint32_t usr; \ + asm(CLEAR_USRBITS \ + INSN "\n\t" \ + "%0 = p1\n\t" \ + "%1 = usr\n\t" \ + : "=r"(result), "=r"(usr) \ + : "r"(src1), "r"(src2) \ + : "p1", "r2", "usr"); \ + *usr_result = usr & 0x3f; \ + return result; \ +} + +#define FUNC_CMP_RR(NAME, INSN) \ +FUNC_CMP_xx(uint32_t, uint32_t, NAME, INSN) + +#define FUNC_CMP_PP(NAME, INSN) \ +FUNC_CMP_xx(uint64_t, uint64_t, NAME, INSN) + +/* + * Function declarations using the templates + */ +FUNC_R_OP_R(satub, "%0 = satub(%2)") +FUNC_P_OP_PP(vaddubs, "%0 = vaddub(%2, %3):sat") +FUNC_P_OP_PP(vadduhs, "%0 = vadduh(%2, %3):sat") +FUNC_P_OP_PP(vsububs, "%0 = vsubub(%2, %3):sat") +FUNC_P_OP_PP(vsubuhs, "%0 = vsubuh(%2, %3):sat") + +/* Add vector of half integers with saturation and pack to unsigned bytes */ +FUNC_R_OP_PP(vaddhubs, "%0 = vaddhub(%2, %3):sat") + +/* Vector saturate half to unsigned byte */ +FUNC_R_OP_P(vsathub, "%0 = vsathub(%2)") + +/* Similar to above but takes a 32-bit argument */ +FUNC_R_OP_R(svsathub, "%0 = vsathub(%2)") + +/* Vector saturate word to unsigned half */ +FUNC_P_OP_P(vsatwuh_nopack, "%0 = vsatwuh(%2)") + +/* Similar to above but returns a 32-bit result */ +FUNC_R_OP_P(vsatwuh, "%0 = vsatwuh(%2)") + +/* Vector arithmetic shift halfwords with saturate and pack */ +FUNC_R_OP_PI(asrhub_sat, "%0 = vasrhub(%2, #%3):sat") + +/* Vector arithmetic shift halfwords with round, saturate and pack */ +FUNC_R_OP_PI(asrhub_rnd_sat, "%0 = vasrhub(%2, #%3):raw") + +FUNC_R_OP_RR(addsat, "%0 = add(%2, %3):sat") +/* Similar to above but with register pairs */ +FUNC_P_OP_PP(addpsat, "%0 = add(%2, %3):sat") + +FUNC_XR_OP_RR(mpy_acc_sat_hh_s0, "%0 += mpy(%2.H, %3.H):sat") +FUNC_R_OP_RR(mpy_sat_hh_s1, "%0 = mpy(%2.H, %3.H):<<1:sat") +FUNC_R_OP_RR(mpy_sat_rnd_hh_s1, "%0 = mpy(%2.H, %3.H):<<1:rnd:sat") +FUNC_R_OP_RR(mpy_up_s1_sat, "%0 = mpy(%2, %3):<<1:sat") +FUNC_P_OP_RR(vmpy2s_s1, "%0 = vmpyh(%2, %3):<<1:sat") +FUNC_P_OP_RR(vmpy2su_s1, "%0 = vmpyhsu(%2, %3):<<1:sat") +FUNC_R_OP_RR(vmpy2s_s1pack, "%0 = vmpyh(%2, %3):<<1:rnd:sat") +FUNC_P_OP_PP(vmpy2es_s1, "%0 = vmpyeh(%2, %3):<<1:sat") +FUNC_R_OP_PP(vdmpyrs_s1, "%0 = vdmpy(%2, %3):<<1:rnd:sat") +FUNC_XP_OP_PP(vdmacs_s0, "%0 += vdmpy(%2, %3):sat") +FUNC_R_OP_RR(cmpyrs_s0, "%0 = cmpy(%2, %3):rnd:sat") +FUNC_XP_OP_RR(cmacs_s0, "%0 += cmpy(%2, %3):sat") +FUNC_XP_OP_RR(cnacs_s0, "%0 -= cmpy(%2, %3):sat") +FUNC_P_OP_PP(vrcmpys_s1_h, "%0 = vrcmpys(%2, %3):<<1:sat:raw:hi") +FUNC_XP_OP_PP(mmacls_s0, "%0 += vmpyweh(%2, %3):sat") +FUNC_R_OP_RR(hmmpyl_rs1, "%0 = mpy(%2, %3.L):<<1:rnd:sat") +FUNC_XP_OP_PP(mmaculs_s0, "%0 += vmpyweuh(%2, %3):sat") +FUNC_R_OP_PR(cmpyi_wh, "%0 = cmpyiwh(%2, %3):<<1:rnd:sat") +FUNC_P_OP_PP(vcmpy_s0_sat_i, "%0 = vcmpyi(%2, %3):sat") +FUNC_P_OP_PR(vcrotate, "%0 = vcrotate(%2, %3)") +FUNC_P_OP_PR(vcnegh, "%0 = vcnegh(%2, %3)") + +#if CORE_HAS_AUDIO +FUNC_R_OP_PP(wcmpyrw, "%0 = cmpyrw(%2, %3):<<1:sat") +#endif + +FUNC_R_OP_RR(addh_l16_sat_ll, "%0 = add(%2.L, %3.L):sat") +FUNC_P_OP_P(vconj, "%0 = vconj(%2):sat") +FUNC_P_OP_PP(vxaddsubw, "%0 = vxaddsubw(%2, %3):sat") +FUNC_P_OP_P(vabshsat, "%0 = vabsh(%2):sat") +FUNC_P_OP_PP(vnavgwr, "%0 = vnavgw(%2, %3):rnd:sat") +FUNC_R_OP_RI(round_ri_sat, "%0 = round(%2, #%3):sat") +FUNC_R_OP_RR(asr_r_r_sat, "%0 = asr(%2, %3):sat") + +FUNC_XPp_OP_PP(ACS, "%0, p2 = vacsh(%3, %4)") + +/* Floating point */ +FUNC_R_OP_RR(sfmin, "%0 = sfmin(%2, %3)") +FUNC_R_OP_RR(sfmax, "%0 = sfmax(%2, %3)") +FUNC_R_OP_RR(sfadd, "%0 = sfadd(%2, %3)") +FUNC_R_OP_RR(sfsub, "%0 = sfsub(%2, %3)") +FUNC_R_OP_RR(sfmpy, "%0 = sfmpy(%2, %3)") +FUNC_XR_OP_RR(sffma, "%0 += sfmpy(%2, %3)") +FUNC_XR_OP_RR(sffms, "%0 -= sfmpy(%2, %3)") +FUNC_CMP_RR(sfcmpuo, "p1 = sfcmp.uo(%2, %3)") +FUNC_CMP_RR(sfcmpeq, "p1 = sfcmp.eq(%2, %3)") +FUNC_CMP_RR(sfcmpgt, "p1 = sfcmp.gt(%2, %3)") +FUNC_CMP_RR(sfcmpge, "p1 = sfcmp.ge(%2, %3)") + +FUNC_P_OP_PP(dfadd, "%0 = dfadd(%2, %3)") +FUNC_P_OP_PP(dfsub, "%0 = dfsub(%2, %3)") + +#if CORE_IS_V67 +FUNC_P_OP_PP(dfmin, "%0 = dfmin(%2, %3)") +FUNC_P_OP_PP(dfmax, "%0 = dfmax(%2, %3)") +FUNC_XP_OP_PP(dfmpyhh, "%0 += dfmpyhh(%2, %3)") +#endif + +FUNC_CMP_PP(dfcmpuo, "p1 = dfcmp.uo(%2, %3)") +FUNC_CMP_PP(dfcmpeq, "p1 = dfcmp.eq(%2, %3)") +FUNC_CMP_PP(dfcmpgt, "p1 = dfcmp.gt(%2, %3)") +FUNC_CMP_PP(dfcmpge, "p1 = dfcmp.ge(%2, %3)") + +/* Conversions from sf */ +FUNC_P_OP_R(conv_sf2df, "%0 = convert_sf2df(%2)") +FUNC_R_OP_R(conv_sf2uw, "%0 = convert_sf2uw(%2)") +FUNC_R_OP_R(conv_sf2w, "%0 = convert_sf2w(%2)") +FUNC_P_OP_R(conv_sf2ud, "%0 = convert_sf2ud(%2)") +FUNC_P_OP_R(conv_sf2d, "%0 = convert_sf2d(%2)") +FUNC_R_OP_R(conv_sf2uw_chop, "%0 = convert_sf2uw(%2):chop") +FUNC_R_OP_R(conv_sf2w_chop, "%0 = convert_sf2w(%2):chop") +FUNC_P_OP_R(conv_sf2ud_chop, "%0 = convert_sf2ud(%2):chop") +FUNC_P_OP_R(conv_sf2d_chop, "%0 = convert_sf2d(%2):chop") + +/* Conversions from df */ +FUNC_R_OP_P(conv_df2sf, "%0 = convert_df2sf(%2)") +FUNC_R_OP_P(conv_df2uw, "%0 = convert_df2uw(%2)") +FUNC_R_OP_P(conv_df2w, "%0 = convert_df2w(%2)") +FUNC_P_OP_P(conv_df2ud, "%0 = convert_df2ud(%2)") +FUNC_P_OP_P(conv_df2d, "%0 = convert_df2d(%2)") +FUNC_R_OP_P(conv_df2uw_chop, "%0 = convert_df2uw(%2):chop") +FUNC_R_OP_P(conv_df2w_chop, "%0 = convert_df2w(%2):chop") +FUNC_P_OP_P(conv_df2ud_chop, "%0 = convert_df2ud(%2):chop") +FUNC_P_OP_P(conv_df2d_chop, "%0 = convert_df2d(%2):chop") + +/* Integer to float conversions */ +FUNC_R_OP_R(conv_uw2sf, "%0 = convert_uw2sf(%2)") +FUNC_R_OP_R(conv_w2sf, "%0 = convert_w2sf(%2)") +FUNC_R_OP_P(conv_ud2sf, "%0 = convert_ud2sf(%2)") +FUNC_R_OP_P(conv_d2sf, "%0 = convert_d2sf(%2)") + +/* Special purpose floating point instructions */ +FUNC_XR_OP_RRp(sffma_sc, "%0 += sfmpy(%2, %3, p2):scale") +FUNC_Rp_OP_RR(sfrecipa, "%0, p2 = sfrecipa(%3, %4)") +FUNC_R_OP_RR(sffixupn, "%0 = sffixupn(%2, %3)") +FUNC_R_OP_RR(sffixupd, "%0 = sffixupd(%2, %3)") +FUNC_R_OP_R(sffixupr, "%0 = sffixupr(%2)") +FUNC_Rp_OP_R(sfinvsqrta, "%0, p2 = sfinvsqrta(%3)") + +/* + * Templates for test cases + * + * Same naming convention as the function templates + */ +#define TEST_x_OP_x(RESTYPE, CHECKFN, SRCTYPE, FUNC, SRC, RES, USR_RES) \ + do { \ + RESTYPE result; \ + SRCTYPE src = SRC; \ + uint32_t usr_result; \ + result = FUNC(src, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_R_OP_R(FUNC, SRC, RES, USR_RES) \ +TEST_x_OP_x(uint32_t, check32, uint32_t, FUNC, SRC, RES, USR_RES) + +#define TEST_R_OP_P(FUNC, SRC, RES, USR_RES) \ +TEST_x_OP_x(uint32_t, check32, uint64_t, FUNC, SRC, RES, USR_RES) + +#define TEST_P_OP_P(FUNC, SRC, RES, USR_RES) \ +TEST_x_OP_x(uint64_t, check64, uint64_t, FUNC, SRC, RES, USR_RES) + +#define TEST_P_OP_R(FUNC, SRC, RES, USR_RES) \ +TEST_x_OP_x(uint64_t, check64, uint32_t, FUNC, SRC, RES, USR_RES) + +#define TEST_xp_OP_x(RESTYPE, CHECKFN, SRCTYPE, FUNC, SRC, \ + RES, PRED_RES, USR_RES) \ + do { \ + RESTYPE result; \ + SRCTYPE src = SRC; \ + uint8_t pred_result; \ + uint32_t usr_result; \ + result = FUNC(src, &pred_result, &usr_result); \ + CHECKFN(result, RES); \ + check(pred_result, PRED_RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_Rp_OP_R(FUNC, SRC, RES, PRED_RES, USR_RES) \ +TEST_xp_OP_x(uint32_t, check32, uint32_t, FUNC, SRC, RES, PRED_RES, USR_RES) + +#define TEST_x_OP_xx(RESTYPE, CHECKFN, SRC1TYPE, SRC2TYPE, \ + FUNC, SRC1, SRC2, RES, USR_RES) \ + do { \ + RESTYPE result; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint32_t usr_result; \ + result = FUNC(src1, src2, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_P_OP_PP(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint64_t, check64, uint64_t, uint64_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_R_OP_PP(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint32_t, check32, uint64_t, uint64_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_P_OP_RR(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint64_t, check64, uint32_t, uint32_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_R_OP_RR(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint32_t, check32, uint32_t, uint32_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_R_OP_PR(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint32_t, check32, uint64_t, uint32_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_P_OP_PR(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xx(uint64_t, check64, uint64_t, uint32_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_xp_OP_xx(RESTYPE, CHECKFN, SRC1TYPE, SRC2TYPE, FUNC, SRC1, SRC2, \ + RES, PRED_RES, USR_RES) \ + do { \ + RESTYPE result; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint8_t pred_result; \ + uint32_t usr_result; \ + result = FUNC(src1, src2, &pred_result, &usr_result); \ + CHECKFN(result, RES); \ + check(pred_result, PRED_RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_Rp_OP_RR(FUNC, SRC1, SRC2, RES, PRED_RES, USR_RES) \ +TEST_xp_OP_xx(uint32_t, check32, uint32_t, uint32_t, FUNC, SRC1, SRC2, \ + RES, PRED_RES, USR_RES) + +#define TEST_x_OP_xI(RESTYPE, CHECKFN, SRC1TYPE, \ + FUNC, SRC1, SRC2, RES, USR_RES) \ + do { \ + RESTYPE result; \ + SRC1TYPE src1 = SRC1; \ + uint32_t src2 = SRC2; \ + uint32_t usr_result; \ + result = FUNC(src1, src2, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_R_OP_RI(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xI(uint32_t, check32, uint32_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_R_OP_PI(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_x_OP_xI(uint32_t, check64, uint64_t, \ + FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_Xx_OP_xx(RESTYPE, CHECKFN, SRC1TYPE, SRC2TYPE, \ + FUNC, RESIN, SRC1, SRC2, RES, USR_RES) \ + do { \ + RESTYPE result = RESIN; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint32_t usr_result; \ + result = FUNC(result, src1, src2, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_XR_OP_RR(FUNC, RESIN, SRC1, SRC2, RES, USR_RES) \ +TEST_Xx_OP_xx(uint32_t, check32, uint32_t, uint32_t, \ + FUNC, RESIN, SRC1, SRC2, RES, USR_RES) + +#define TEST_XP_OP_PP(FUNC, RESIN, SRC1, SRC2, RES, USR_RES) \ +TEST_Xx_OP_xx(uint64_t, check64, uint64_t, uint64_t, \ + FUNC, RESIN, SRC1, SRC2, RES, USR_RES) + +#define TEST_XP_OP_RR(FUNC, RESIN, SRC1, SRC2, RES, USR_RES) \ +TEST_Xx_OP_xx(uint64_t, check64, uint32_t, uint32_t, \ + FUNC, RESIN, SRC1, SRC2, RES, USR_RES) + +#define TEST_Xxp_OP_xx(RESTYPE, CHECKFN, SRC1TYPE, SRC2TYPE, \ + FUNC, RESIN, SRC1, SRC2, RES, PRED_RES, USR_RES) \ + do { \ + RESTYPE result = RESIN; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint8_t pred_res; \ + uint32_t usr_result; \ + result = FUNC(result, src1, src2, &pred_res, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_XPp_OP_PP(FUNC, RESIN, SRC1, SRC2, RES, PRED_RES, USR_RES) \ +TEST_Xxp_OP_xx(uint64_t, check64, uint64_t, uint64_t, FUNC, RESIN, SRC1, SRC2, \ + RES, PRED_RES, USR_RES) + +#define TEST_Xx_OP_xxp(RESTYPE, CHECKFN, SRC1TYPE, SRC2TYPE, \ + FUNC, RESIN, SRC1, SRC2, PRED, RES, USR_RES) \ + do { \ + RESTYPE result = RESIN; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint8_t pred = PRED; \ + uint32_t usr_result; \ + result = FUNC(result, src1, src2, pred, &usr_result); \ + CHECKFN(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_XR_OP_RRp(FUNC, RESIN, SRC1, SRC2, PRED, RES, USR_RES) \ +TEST_Xx_OP_xxp(uint32_t, check32, uint32_t, uint32_t, \ + FUNC, RESIN, SRC1, SRC2, PRED, RES, USR_RES) + +#define TEST_CMP_xx(SRC1TYPE, SRC2TYPE, \ + FUNC, SRC1, SRC2, RES, USR_RES) \ + do { \ + uint32_t result; \ + SRC1TYPE src1 = SRC1; \ + SRC2TYPE src2 = SRC2; \ + uint32_t usr_result; \ + result = FUNC(src1, src2, &usr_result); \ + check(result, RES); \ + check(usr_result, USR_RES); \ + } while (0) + +#define TEST_CMP_RR(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_CMP_xx(uint32_t, uint32_t, FUNC, SRC1, SRC2, RES, USR_RES) + +#define TEST_CMP_PP(FUNC, SRC1, SRC2, RES, USR_RES) \ +TEST_CMP_xx(uint64_t, uint64_t, FUNC, SRC1, SRC2, RES, USR_RES) + +int main() +{ + TEST_R_OP_R(satub, 0, 0, USR_CLEAR); + TEST_R_OP_R(satub, 0xff, 0xff, USR_CLEAR); + TEST_R_OP_R(satub, 0xfff, 0xff, USR_OVF); + TEST_R_OP_R(satub, -1, 0, USR_OVF); + + TEST_P_OP_PP(vaddubs, 0xfeLL, 0x01LL, 0xffLL, USR_CLEAR); + TEST_P_OP_PP(vaddubs, 0xffLL, 0xffLL, 0xffLL, USR_OVF); + + TEST_P_OP_PP(vadduhs, 0xfffeLL, 0x1LL, 0xffffLL, USR_CLEAR); + TEST_P_OP_PP(vadduhs, 0xffffLL, 0x1LL, 0xffffLL, USR_OVF); + + TEST_P_OP_PP(vsububs, 0x0807060504030201LL, 0x0101010101010101LL, + 0x0706050403020100LL, USR_CLEAR); + TEST_P_OP_PP(vsububs, 0x0807060504030201LL, 0x0202020202020202LL, + 0x0605040302010000LL, USR_OVF); + + TEST_P_OP_PP(vsubuhs, 0x0004000300020001LL, 0x0001000100010001LL, + 0x0003000200010000LL, USR_CLEAR); + TEST_P_OP_PP(vsubuhs, 0x0004000300020001LL, 0x0002000200020002LL, + 0x0002000100000000LL, USR_OVF); + + TEST_R_OP_PP(vaddhubs, 0x0004000300020001LL, 0x0001000100010001LL, + 0x05040302, USR_CLEAR); + TEST_R_OP_PP(vaddhubs, 0x7fff000300020001LL, 0x0002000200020002LL, + 0xff050403, USR_OVF); + + TEST_R_OP_P(vsathub, 0x0001000300020001LL, 0x01030201, USR_CLEAR); + TEST_R_OP_P(vsathub, 0x010000700080ffffLL, 0xff708000, USR_OVF); + + TEST_R_OP_P(vsatwuh, 0x0000ffff00000001LL, 0xffff0001, USR_CLEAR); + TEST_R_OP_P(vsatwuh, 0x800000000000ffffLL, 0x0000ffff, USR_OVF); + + TEST_P_OP_P(vsatwuh_nopack, 0x0000ffff00000001LL, 0x0000ffff00000001LL, + USR_CLEAR); + TEST_P_OP_P(vsatwuh_nopack, 0x800000000000ffffLL, 0x000000000000ffffLL, + USR_OVF); + + TEST_R_OP_R(svsathub, 0x00020001, 0x0201, USR_CLEAR); + TEST_R_OP_R(svsathub, 0x0080ffff, 0x8000, USR_OVF); + + TEST_R_OP_PI(asrhub_sat, 0x004f003f002f001fLL, 3, 0x09070503, + USR_CLEAR); + TEST_R_OP_PI(asrhub_sat, 0x004fffff8fff001fLL, 3, 0x09000003, + USR_OVF); + + TEST_R_OP_PI(asrhub_rnd_sat, 0x004f003f002f001fLL, 2, 0x0a080604, + USR_CLEAR); + TEST_R_OP_PI(asrhub_rnd_sat, 0x004fffff8fff001fLL, 2, 0x0a000004, + USR_OVF); + + TEST_R_OP_RR(addsat, 1, 2, 3, + USR_CLEAR); + TEST_R_OP_RR(addsat, 0x7fffffff, 0x00000010, 0x7fffffff, + USR_OVF); + TEST_R_OP_RR(addsat, 0x80000000, 0x80000006, 0x80000000, + USR_OVF); + + TEST_P_OP_PP(addpsat, 1LL, 2LL, 3LL, USR_CLEAR); + /* overflow to max positive */ + TEST_P_OP_PP(addpsat, 0x7ffffffffffffff0LL, 0x0000000000000010LL, + 0x7fffffffffffffffLL, USR_OVF); + /* overflow to min negative */ + TEST_P_OP_PP(addpsat, 0x8000000000000003LL, 0x8000000000000006LL, + 0x8000000000000000LL, USR_OVF); + + TEST_XR_OP_RR(mpy_acc_sat_hh_s0, 0x7fffffff, 0xffff0000, 0x11110000, + 0x7fffeeee, USR_CLEAR); + TEST_XR_OP_RR(mpy_acc_sat_hh_s0, 0x7fffffff, 0x7fff0000, 0x7fff0000, + 0x7fffffff, USR_OVF); + + TEST_R_OP_RR(mpy_sat_hh_s1, 0xffff0000, 0x11110000, 0xffffddde, + USR_CLEAR); + TEST_R_OP_RR(mpy_sat_hh_s1, 0x7fff0000, 0x7fff0000, 0x7ffe0002, + USR_CLEAR); + TEST_R_OP_RR(mpy_sat_hh_s1, 0x80000000, 0x80000000, 0x7fffffff, + USR_OVF); + + TEST_R_OP_RR(mpy_sat_rnd_hh_s1, 0xffff0000, 0x11110000, 0x00005dde, + USR_CLEAR); + TEST_R_OP_RR(mpy_sat_rnd_hh_s1, 0x7fff0000, 0x7fff0000, 0x7ffe8002, + USR_CLEAR); + TEST_R_OP_RR(mpy_sat_rnd_hh_s1, 0x80000000, 0x80000000, 0x7fffffff, + USR_OVF); + + TEST_R_OP_RR(mpy_up_s1_sat, 0xffff0000, 0x11110000, 0xffffddde, + USR_CLEAR); + TEST_R_OP_RR(mpy_up_s1_sat, 0x7fff0000, 0x7fff0000, 0x7ffe0002, + USR_CLEAR); + TEST_R_OP_RR(mpy_up_s1_sat, 0x80000000, 0x80000000, 0x7fffffff, + USR_OVF); + + TEST_P_OP_RR(vmpy2s_s1, 0x7fff0000, 0x7fff0000, 0x7ffe000200000000LL, + USR_CLEAR); + TEST_P_OP_RR(vmpy2s_s1, 0x80000000, 0x80000000, 0x7fffffff00000000LL, + USR_OVF); + + TEST_P_OP_RR(vmpy2su_s1, 0x7fff0000, 0x7fff0000, 0x7ffe000200000000LL, + USR_CLEAR); + TEST_P_OP_RR(vmpy2su_s1, 0xffffbd97, 0xffffffff, 0xfffe000280000000LL, + USR_OVF); + + TEST_R_OP_RR(vmpy2s_s1pack, 0x7fff0000, 0x7fff0000, 0x7ffe0000, + USR_CLEAR); + TEST_R_OP_RR(vmpy2s_s1pack, 0x80008000, 0x80008000, 0x7fff7fff, + USR_OVF); + + TEST_P_OP_PP(vmpy2es_s1, 0x7fff7fff7fff7fffLL, 0x1fff1fff1fff1fffLL, + 0x1ffec0021ffec002LL, USR_CLEAR); + TEST_P_OP_PP(vmpy2es_s1, 0x8000800080008000LL, 0x8000800080008000LL, + 0x7fffffff7fffffffLL, USR_OVF); + + TEST_R_OP_PP(vdmpyrs_s1, 0x7fff7fff7fff7fffLL, 0x1fff1fff1fff1fffLL, + 0x3ffe3ffe, USR_CLEAR); + TEST_R_OP_PP(vdmpyrs_s1, 0x8000800080008000LL, 0x8000800080008000LL, + 0x7fff7fffLL, USR_OVF); + + TEST_XP_OP_PP(vdmacs_s0, 0x0fffffffULL, 0x00ff00ff00ff00ffLL, + 0x00ff00ff00ff00ffLL, 0x0001fc021001fc01LL, USR_CLEAR); + TEST_XP_OP_PP(vdmacs_s0, 0x01111111ULL, 0x8000800080001000LL, + 0x8000800080008000LL, 0x7fffffff39111111LL, USR_OVF); + + TEST_R_OP_RR(cmpyrs_s0, 0x7fff0000, 0x7fff0000, 0x0000c001, + USR_CLEAR); + TEST_R_OP_RR(cmpyrs_s0, 0x80008000, 0x80008000, 0x7fff0000, + USR_OVF); + + TEST_XP_OP_RR(cmacs_s0, 0x0fffffff, 0x7fff0000, 0x7fff0000, + 0x00000000d000fffeLL, USR_CLEAR); + TEST_XP_OP_RR(cmacs_s0, 0x0fff1111, 0x80008000, 0x80008000, + 0x7fffffff0fff1111LL, USR_OVF); + + TEST_XP_OP_RR(cnacs_s0, 0x000000108fffffffULL, 0x7fff0000, 0x7fff0000, + 0x00000010cfff0000ULL, USR_CLEAR); + TEST_XP_OP_RR(cnacs_s0, 0x000000108ff1111fULL, 0x00002001, 0x00007ffd, + 0x0000001080000000ULL, USR_OVF); + + TEST_P_OP_PP(vrcmpys_s1_h, 0x00ff00ff00ff00ffLL, 0x00ff00ff00ff00ffLL, + 0x0003f8040003f804LL, USR_CLEAR); + TEST_P_OP_PP(vrcmpys_s1_h, 0x8000800080008000LL, 0x8000800080008000LL, + 0x7fffffff7fffffffLL, USR_OVF); + + TEST_XP_OP_PP(mmacls_s0, 0x6fffffff, 0x00ff00ff00ff00ffLL, + 0x00ff00ff00ff00ffLL, 0x0000fe017000fe00LL, USR_CLEAR); + TEST_XP_OP_PP(mmacls_s0, 0x6f1111ff, 0x8000800080008000LL, + 0x1000100080008000LL, 0xf80008007fffffffLL, USR_OVF); + + TEST_R_OP_RR(hmmpyl_rs1, 0x7fff0000, 0x7fff0001, 0x0000fffe, + USR_CLEAR); + TEST_R_OP_RR(hmmpyl_rs1, 0x80000000, 0x80008000, 0x7fffffff, + USR_OVF); + + TEST_XP_OP_PP(mmaculs_s0, 0x000000007fffffffULL, 0xffff800080008000LL, + 0xffff800080008000LL, 0xffffc00040003fffLL, USR_CLEAR); + TEST_XP_OP_PP(mmaculs_s0, 0x000011107fffffffULL, 0x00ff00ff00ff00ffLL, + 0x00ff00ff001100ffLL, 0x00010f117fffffffLL, USR_OVF); + + TEST_R_OP_PR(cmpyi_wh, 0x7fff000000000000LL, 0x7fff0001, 0x0000fffe, + USR_CLEAR); + TEST_R_OP_PR(cmpyi_wh, 0x8000000000000000LL, 0x80008000, 0x7fffffff, + USR_OVF); + + TEST_P_OP_PP(vcmpy_s0_sat_i, 0x00ff00ff00ff00ffLL, 0x00ff00ff00ff00ffLL, + 0x0001fc020001fc02LL, USR_CLEAR); + TEST_P_OP_PP(vcmpy_s0_sat_i, 0x8000800080008000LL, 0x8000800080008000LL, + 0x7fffffff7fffffffLL, USR_OVF); + + TEST_P_OP_PR(vcrotate, 0x8000000000000000LL, 0x00000002, + 0x8000000000000000LL, USR_CLEAR); + TEST_P_OP_PR(vcrotate, 0x7fff80007fff8000LL, 0x00000001, + 0x7fff80007fff7fffLL, USR_OVF); + + TEST_P_OP_PR(vcnegh, 0x8000000000000000LL, 0x00000002, + 0x8000000000000000LL, USR_CLEAR); + TEST_P_OP_PR(vcnegh, 0x7fff80007fff8000LL, 0x00000001, + 0x7fff80007fff7fffLL, USR_OVF); + +#if CORE_HAS_AUDIO + TEST_R_OP_PP(wcmpyrw, 0x8765432101234567LL, 0x00000002ffffffffLL, + 0x00000001, USR_CLEAR); + TEST_R_OP_PP(wcmpyrw, 0x800000007fffffffLL, 0x000000ff7fffffffLL, + 0x7fffffff, USR_OVF); + TEST_R_OP_PP(wcmpyrw, 0x7fffffff80000000LL, 0x7fffffff000000ffLL, + 0x80000000, USR_OVF); +#else + printf("Audio instructions skipped\n"); +#endif + + TEST_R_OP_RR(addh_l16_sat_ll, 0x0000ffff, 0x00000002, 0x00000001, + USR_CLEAR); + TEST_R_OP_RR(addh_l16_sat_ll, 0x00007fff, 0x00000005, 0x00007fff, + USR_OVF); + TEST_R_OP_RR(addh_l16_sat_ll, 0x00008000, 0x00008000, 0xffff8000, + USR_OVF); + + TEST_P_OP_P(vconj, 0x0000ffff00000001LL, 0x0000ffff00000001LL, USR_CLEAR); + TEST_P_OP_P(vconj, 0x800000000000ffffLL, 0x7fff00000000ffffLL, USR_OVF); + + TEST_P_OP_PP(vxaddsubw, 0x8765432101234567LL, 0x00000002ffffffffLL, + 0x8765432201234569LL, USR_CLEAR); + TEST_P_OP_PP(vxaddsubw, 0x7fffffff7fffffffLL, 0xffffffffffffffffLL, + 0x7fffffff7ffffffeLL, USR_OVF); + TEST_P_OP_PP(vxaddsubw, 0x800000000fffffffLL, 0x0000000a00000008LL, + 0x8000000010000009LL, USR_OVF); + + TEST_P_OP_P(vabshsat, 0x0001000afffff800LL, 0x0001000a00010800LL, + USR_CLEAR); + TEST_P_OP_P(vabshsat, 0x8000000b000c000aLL, 0x7fff000b000c000aLL, + USR_OVF); + + TEST_P_OP_PP(vnavgwr, 0x8765432101234567LL, 0x00000002ffffffffLL, + 0xc3b2a1900091a2b4LL, USR_CLEAR); + TEST_P_OP_PP(vnavgwr, 0x7fffffff8000000aLL, 0x80000000ffffffffLL, + 0x7fffffffc0000006LL, USR_OVF); + + TEST_R_OP_RI(round_ri_sat, 0x0000ffff, 2, 0x00004000, USR_CLEAR); + TEST_R_OP_RI(round_ri_sat, 0x7fffffff, 2, 0x1fffffff, USR_OVF); + + TEST_R_OP_RR(asr_r_r_sat, 0x0000ffff, 0x00000002, 0x00003fff, + USR_CLEAR); + TEST_R_OP_RR(asr_r_r_sat, 0x00ffffff, 0xfffffff5, 0x7fffffff, + USR_OVF); + TEST_R_OP_RR(asr_r_r_sat, 0x80000000, 0xfffffff5, 0x80000000, + USR_OVF); + + TEST_XPp_OP_PP(ACS, 0x0004000300020001ULL, 0x0001000200030004ULL, + 0x0000000000000000ULL, 0x0004000300030004ULL, 0xf0, + USR_CLEAR); + TEST_XPp_OP_PP(ACS, 0x0004000300020001ULL, 0x0001000200030004ULL, + 0x000affff000d0000ULL, 0x000e0003000f0004ULL, 0xcc, + USR_CLEAR); + TEST_XPp_OP_PP(ACS, 0x00047fff00020001ULL, 0x00017fff00030004ULL, + 0x000a0fff000d0000ULL, 0x000e7fff000f0004ULL, 0xfc, + USR_OVF); + TEST_XPp_OP_PP(ACS, 0x00047fff00020001ULL, 0x00017fff00030004ULL, + 0x000a0fff000d0000ULL, 0x000e7fff000f0004ULL, 0xf0, + USR_OVF); + + /* Floating point */ + TEST_R_OP_RR(sfmin, SF_one, SF_small_neg, SF_small_neg, USR_CLEAR); + TEST_R_OP_RR(sfmin, SF_one, SF_SNaN, SF_one, USR_FPINVF); + TEST_R_OP_RR(sfmin, SF_SNaN, SF_one, SF_one, USR_FPINVF); + TEST_R_OP_RR(sfmin, SF_one, SF_QNaN, SF_one, USR_CLEAR); + TEST_R_OP_RR(sfmin, SF_QNaN, SF_one, SF_one, USR_CLEAR); + TEST_R_OP_RR(sfmin, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmin, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmin, SF_zero, SF_zero_neg, SF_zero_neg, USR_CLEAR); + TEST_R_OP_RR(sfmin, SF_zero_neg, SF_zero, SF_zero_neg, USR_CLEAR); + + TEST_R_OP_RR(sfmax, SF_one, SF_small_neg, SF_one, USR_CLEAR); + TEST_R_OP_RR(sfmax, SF_one, SF_SNaN, SF_one, USR_FPINVF); + TEST_R_OP_RR(sfmax, SF_SNaN, SF_one, SF_one, USR_FPINVF); + TEST_R_OP_RR(sfmax, SF_one, SF_QNaN, SF_one, USR_CLEAR); + TEST_R_OP_RR(sfmax, SF_QNaN, SF_one, SF_one, USR_CLEAR); + TEST_R_OP_RR(sfmax, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmax, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmax, SF_zero, SF_zero_neg, SF_zero, USR_CLEAR); + TEST_R_OP_RR(sfmax, SF_zero_neg, SF_zero, SF_zero, USR_CLEAR); + + TEST_R_OP_RR(sfadd, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sfadd, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfadd, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfadd, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_R_OP_RR(sfsub, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sfsub, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfsub, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfsub, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_R_OP_RR(sfmpy, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sfmpy, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmpy, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sfmpy, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_XR_OP_RR(sffma, SF_one, SF_one, SF_one, SF_two, USR_CLEAR); + TEST_XR_OP_RR(sffma, SF_zero, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_XR_OP_RR(sffma, SF_zero, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_XR_OP_RR(sffma, SF_zero, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_XR_OP_RR(sffma, SF_zero, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_XR_OP_RR(sffms, SF_one, SF_one, SF_one, SF_zero, USR_CLEAR); + TEST_XR_OP_RR(sffms, SF_zero, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_XR_OP_RR(sffms, SF_zero, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_XR_OP_RR(sffms, SF_zero, SF_QNaN, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_XR_OP_RR(sffms, SF_zero, SF_SNaN, SF_QNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_CMP_RR(sfcmpuo, SF_one, SF_large_pos, 0x00, USR_CLEAR); + TEST_CMP_RR(sfcmpuo, SF_INF, SF_large_pos, 0x00, USR_CLEAR); + TEST_CMP_RR(sfcmpuo, SF_QNaN, SF_large_pos, 0xff, USR_CLEAR); + TEST_CMP_RR(sfcmpuo, SF_QNaN_neg, SF_large_pos, 0xff, USR_CLEAR); + TEST_CMP_RR(sfcmpuo, SF_SNaN, SF_large_pos, 0xff, USR_FPINVF); + TEST_CMP_RR(sfcmpuo, SF_SNaN_neg, SF_large_pos, 0xff, USR_FPINVF); + TEST_CMP_RR(sfcmpuo, SF_QNaN, SF_QNaN, 0xff, USR_CLEAR); + TEST_CMP_RR(sfcmpuo, SF_QNaN, SF_SNaN, 0xff, USR_FPINVF); + + TEST_CMP_RR(sfcmpeq, SF_one, SF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_RR(sfcmpeq, SF_one, SF_SNaN, 0x00, USR_FPINVF); + TEST_CMP_RR(sfcmpgt, SF_one, SF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_RR(sfcmpgt, SF_one, SF_SNaN, 0x00, USR_FPINVF); + TEST_CMP_RR(sfcmpge, SF_one, SF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_RR(sfcmpge, SF_one, SF_SNaN, 0x00, USR_FPINVF); + + TEST_P_OP_PP(dfadd, DF_any, DF_QNaN, DF_HEX_NaN, USR_CLEAR); + TEST_P_OP_PP(dfadd, DF_any, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfadd, DF_QNaN, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfadd, DF_SNaN, DF_QNaN, DF_HEX_NaN, USR_FPINVF); + + TEST_P_OP_PP(dfsub, DF_any, DF_QNaN, DF_HEX_NaN, USR_CLEAR); + TEST_P_OP_PP(dfsub, DF_any, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfsub, DF_QNaN, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfsub, DF_SNaN, DF_QNaN, DF_HEX_NaN, USR_FPINVF); + +#if CORE_IS_V67 + TEST_P_OP_PP(dfmin, DF_any, DF_small_neg, DF_small_neg, USR_CLEAR); + TEST_P_OP_PP(dfmin, DF_any, DF_SNaN, DF_any, USR_FPINVF); + TEST_P_OP_PP(dfmin, DF_SNaN, DF_any, DF_any, USR_FPINVF); + TEST_P_OP_PP(dfmin, DF_any, DF_QNaN, DF_any, USR_CLEAR); + TEST_P_OP_PP(dfmin, DF_QNaN, DF_any, DF_any, USR_CLEAR); + TEST_P_OP_PP(dfmin, DF_SNaN, DF_QNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfmin, DF_QNaN, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfmin, DF_zero, DF_zero_neg, DF_zero_neg, USR_CLEAR); + TEST_P_OP_PP(dfmin, DF_zero_neg, DF_zero, DF_zero_neg, USR_CLEAR); + + TEST_P_OP_PP(dfmax, DF_any, DF_small_neg, DF_any, USR_CLEAR); + TEST_P_OP_PP(dfmax, DF_any, DF_SNaN, DF_any, USR_FPINVF); + TEST_P_OP_PP(dfmax, DF_SNaN, DF_any, DF_any, USR_FPINVF); + TEST_P_OP_PP(dfmax, DF_any, DF_QNaN, DF_any, USR_CLEAR); + TEST_P_OP_PP(dfmax, DF_QNaN, DF_any, DF_any, USR_CLEAR); + TEST_P_OP_PP(dfmax, DF_SNaN, DF_QNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfmax, DF_QNaN, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_P_OP_PP(dfmax, DF_zero, DF_zero_neg, DF_zero, USR_CLEAR); + TEST_P_OP_PP(dfmax, DF_zero_neg, DF_zero, DF_zero, USR_CLEAR); + + TEST_XP_OP_PP(dfmpyhh, DF_one, DF_one, DF_one, DF_one_hh, USR_CLEAR); + TEST_XP_OP_PP(dfmpyhh, DF_zero, DF_any, DF_QNaN, DF_HEX_NaN, USR_CLEAR); + TEST_XP_OP_PP(dfmpyhh, DF_zero, DF_any, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_XP_OP_PP(dfmpyhh, DF_zero, DF_QNaN, DF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_XP_OP_PP(dfmpyhh, DF_zero, DF_SNaN, DF_QNaN, DF_HEX_NaN, USR_FPINVF); +#else + printf("v67 instructions skipped\n"); +#endif + + TEST_CMP_PP(dfcmpuo, DF_small_neg, DF_any, 0x00, USR_CLEAR); + TEST_CMP_PP(dfcmpuo, DF_large_pos, DF_any, 0x00, USR_CLEAR); + TEST_CMP_PP(dfcmpuo, DF_QNaN, DF_any, 0xff, USR_CLEAR); + TEST_CMP_PP(dfcmpuo, DF_QNaN_neg, DF_any, 0xff, USR_CLEAR); + TEST_CMP_PP(dfcmpuo, DF_SNaN, DF_any, 0xff, USR_FPINVF); + TEST_CMP_PP(dfcmpuo, DF_SNaN_neg, DF_any, 0xff, USR_FPINVF); + TEST_CMP_PP(dfcmpuo, DF_QNaN, DF_QNaN, 0xff, USR_CLEAR); + TEST_CMP_PP(dfcmpuo, DF_QNaN, DF_SNaN, 0xff, USR_FPINVF); + + TEST_CMP_PP(dfcmpeq, DF_any, DF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_PP(dfcmpeq, DF_any, DF_SNaN, 0x00, USR_FPINVF); + TEST_CMP_PP(dfcmpgt, DF_any, DF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_PP(dfcmpgt, DF_any, DF_SNaN, 0x00, USR_FPINVF); + TEST_CMP_PP(dfcmpge, DF_any, DF_QNaN, 0x00, USR_CLEAR); + TEST_CMP_PP(dfcmpge, DF_any, DF_SNaN, 0x00, USR_FPINVF); + + TEST_P_OP_R(conv_sf2df, SF_QNaN, DF_HEX_NaN, USR_CLEAR); + TEST_P_OP_R(conv_sf2df, SF_SNaN, DF_HEX_NaN, USR_FPINVF); + TEST_R_OP_R(conv_sf2uw, SF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2uw, SF_SNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2w, SF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2w, SF_SNaN, 0xffffffff, USR_FPINVF); + TEST_P_OP_R(conv_sf2ud, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2ud, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2d, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2d, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_R_OP_R(conv_sf2uw_chop, SF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2uw_chop, SF_SNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2w_chop, SF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_R(conv_sf2w_chop, SF_SNaN, 0xffffffff, USR_FPINVF); + TEST_P_OP_R(conv_sf2ud_chop, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2ud_chop, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2d_chop, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_R(conv_sf2d_chop, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + + TEST_R_OP_P(conv_df2sf, DF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_P(conv_df2sf, DF_SNaN, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_P(conv_df2uw, DF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2uw, DF_SNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2w, DF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2w, DF_SNaN, 0xffffffff, USR_FPINVF); + TEST_P_OP_P(conv_df2ud, DF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2ud, DF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2d, DF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2d, DF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_R_OP_P(conv_df2uw_chop, DF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2uw_chop, DF_SNaN, 0xffffffff, USR_FPINVF); + + /* Test for typo in HELPER(conv_df2uw_chop) */ + TEST_R_OP_P(conv_df2uw_chop, 0xffffff7f00000001ULL, 0xffffffff, USR_FPINVF); + + TEST_R_OP_P(conv_df2w_chop, DF_QNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2w_chop, DF_SNaN, 0xffffffff, USR_FPINVF); + TEST_P_OP_P(conv_df2ud_chop, DF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2ud_chop, DF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2d_chop, DF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_P_OP_P(conv_df2d_chop, DF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + + TEST_R_OP_R(conv_uw2sf, 0x00000001, SF_one, USR_CLEAR); + TEST_R_OP_R(conv_uw2sf, 0x010020a5, 0x4b801052, USR_FPINPF); + TEST_R_OP_R(conv_w2sf, 0x00000001, SF_one, USR_CLEAR); + TEST_R_OP_R(conv_w2sf, 0x010020a5, 0x4b801052, USR_FPINPF); + TEST_R_OP_P(conv_ud2sf, 0x0000000000000001ULL, SF_one, USR_CLEAR); + TEST_R_OP_P(conv_ud2sf, 0x00000000010020a5ULL, 0x4b801052, USR_FPINPF); + TEST_R_OP_P(conv_d2sf, 0x0000000000000001ULL, SF_one, USR_CLEAR); + TEST_R_OP_P(conv_d2sf, 0x00000000010020a5ULL, 0x4b801052, USR_FPINPF); + + TEST_XR_OP_RRp(sffma_sc, SF_one, SF_one, SF_one, 1, SF_four, + USR_CLEAR); + TEST_XR_OP_RRp(sffma_sc, SF_QNaN, SF_one, SF_one, 1, SF_HEX_NaN, + USR_CLEAR); + TEST_XR_OP_RRp(sffma_sc, SF_one, SF_QNaN, SF_one, 1, SF_HEX_NaN, + USR_CLEAR); + TEST_XR_OP_RRp(sffma_sc, SF_one, SF_one, SF_QNaN, 1, SF_HEX_NaN, + USR_CLEAR); + TEST_XR_OP_RRp(sffma_sc, SF_SNaN, SF_one, SF_one, 1, SF_HEX_NaN, + USR_FPINVF); + TEST_XR_OP_RRp(sffma_sc, SF_one, SF_SNaN, SF_one, 1, SF_HEX_NaN, + USR_FPINVF); + TEST_XR_OP_RRp(sffma_sc, SF_one, SF_one, SF_SNaN, 1, SF_HEX_NaN, + USR_FPINVF); + + TEST_Rp_OP_RR(sfrecipa, SF_one, SF_one, SF_one_recip, 0x00, + USR_CLEAR); + TEST_Rp_OP_RR(sfrecipa, SF_QNaN, SF_one, SF_HEX_NaN, 0x00, + USR_CLEAR); + TEST_Rp_OP_RR(sfrecipa, SF_one, SF_QNaN, SF_HEX_NaN, 0x00, + USR_CLEAR); + TEST_Rp_OP_RR(sfrecipa, SF_one, SF_SNaN, SF_HEX_NaN, 0x00, + USR_FPINVF); + TEST_Rp_OP_RR(sfrecipa, SF_SNaN, SF_one, SF_HEX_NaN, 0x00, + USR_FPINVF); + + TEST_R_OP_RR(sffixupn, SF_one, SF_one, SF_one, USR_CLEAR); + TEST_R_OP_RR(sffixupn, SF_QNaN, SF_one, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sffixupn, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sffixupn, SF_SNaN, SF_one, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sffixupn, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_R_OP_RR(sffixupd, SF_one, SF_one, SF_one, USR_CLEAR); + TEST_R_OP_RR(sffixupd, SF_QNaN, SF_one, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sffixupd, SF_one, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_RR(sffixupd, SF_SNaN, SF_one, SF_HEX_NaN, USR_FPINVF); + TEST_R_OP_RR(sffixupd, SF_one, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_R_OP_R(sffixupr, SF_one, SF_one, USR_CLEAR); + TEST_R_OP_R(sffixupr, SF_QNaN, SF_HEX_NaN, USR_CLEAR); + TEST_R_OP_R(sffixupr, SF_SNaN, SF_HEX_NaN, USR_FPINVF); + + TEST_Rp_OP_R(sfinvsqrta, SF_one, SF_one_invsqrta, 0x00, USR_CLEAR); + TEST_Rp_OP_R(sfinvsqrta, SF_zero, SF_one, 0x00, USR_CLEAR); + TEST_Rp_OP_R(sfinvsqrta, SF_QNaN, SF_HEX_NaN, 0x00, USR_CLEAR); + TEST_Rp_OP_R(sfinvsqrta, SF_small_neg, SF_HEX_NaN, 0x00, USR_FPINVF); + TEST_Rp_OP_R(sfinvsqrta, SF_SNaN, SF_HEX_NaN, 0x00, USR_FPINVF); + + puts(err ? "FAIL" : "PASS"); + return err; +} diff --git a/tests/tcg/hexagon/vector_add_int.c b/tests/tcg/hexagon/vector_add_int.c new file mode 100644 index 0000000000..d6010ea14b --- /dev/null +++ b/tests/tcg/hexagon/vector_add_int.c @@ -0,0 +1,61 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include + +int gA[401]; +int gB[401]; +int gC[401]; + +void vector_add_int() +{ + int i; + for (i = 0; i < 400; i++) { + gA[i] = gB[i] + gC[i]; + } +} + +int main() +{ + int error = 0; + int i; + for (i = 0; i < 400; i++) { + gB[i] = i * 2; + gC[i] = i * 3; + } + gA[400] = 17; + vector_add_int(); + for (i = 0; i < 400; i++) { + if (gA[i] != i * 5) { + error++; + printf("ERROR: gB[%d] = %d\t", i, gB[i]); + printf("gC[%d] = %d\t", i, gC[i]); + printf("gA[%d] = %d\n", i, gA[i]); + } + } + if (gA[400] != 17) { + error++; + printf("ERROR: Overran the buffer\n"); + } + if (!error) { + printf("PASS\n"); + return 0; + } else { + printf("FAIL\n"); + return 1; + } +} diff --git a/tests/tcg/hppa/Makefile.target b/tests/tcg/hppa/Makefile.target index 473864d1d4..b78e6b4849 100644 --- a/tests/tcg/hppa/Makefile.target +++ b/tests/tcg/hppa/Makefile.target @@ -5,3 +5,15 @@ # On parisc Linux supports 4K/16K/64K (but currently only 4k works) EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-16384 run-test-mmap-65536 +# This triggers failures for hppa-linux about 1% of the time +# HPPA is the odd target that can't use the sigtramp page; +# it requires the full vdso with dwarf2 unwind info. +run-signals: signals + $(call skip-test, $<, "BROKEN awaiting vdso support") +run-plugin-signals-with-%: + $(call skip-test, $<, "BROKEN awaiting vdso support") + +VPATH += $(SRC_PATH)/tests/tcg/hppa +TESTS += stby + +stby: CFLAGS += -pthread diff --git a/tests/tcg/hppa/stby.c b/tests/tcg/hppa/stby.c new file mode 100644 index 0000000000..36bd5f723c --- /dev/null +++ b/tests/tcg/hppa/stby.c @@ -0,0 +1,87 @@ +/* Test STBY */ + +#include +#include +#include +#include + + +struct S { + unsigned a; + unsigned b; + unsigned c; +}; + +static void check(const struct S *s, unsigned e, + const char *which, const char *insn, int ofs) +{ + int err = 0; + + if (s->a != 0) { + fprintf(stderr, "%s %s %d: garbage before word 0x%08x\n", + which, insn, ofs, s->a); + err = 1; + } + if (s->c != 0) { + fprintf(stderr, "%s %s %d: garbage after word 0x%08x\n", + which, insn, ofs, s->c); + err = 1; + } + if (s->b != e) { + fprintf(stderr, "%s %s %d: 0x%08x != 0x%08x\n", + which, insn, ofs, s->b, e); + err = 1; + } + + if (err) { + exit(1); + } +} + +#define TEST(INSN, OFS, E) \ + do { \ + s.b = 0; \ + asm volatile(INSN " %1, " #OFS "(%0)" \ + : : "r"(&s.b), "r" (0x11223344) : "memory"); \ + check(&s, E, which, INSN, OFS); \ + } while (0) + +static void test(const char *which) +{ + struct S s = { }; + + TEST("stby,b", 0, 0x11223344); + TEST("stby,b", 1, 0x00223344); + TEST("stby,b", 2, 0x00003344); + TEST("stby,b", 3, 0x00000044); + + TEST("stby,e", 0, 0x00000000); + TEST("stby,e", 1, 0x11000000); + TEST("stby,e", 2, 0x11220000); + TEST("stby,e", 3, 0x11223300); +} + +static void *child(void *x) +{ + return NULL; +} + +int main() +{ + int err; + pthread_t thr; + + /* Run test in serial mode */ + test("serial"); + + /* Create a dummy thread to start parallel mode. */ + err = pthread_create(&thr, NULL, child, NULL); + if (err != 0) { + fprintf(stderr, "pthread_create: %s\n", strerror(err)); + return 2; + } + + /* Run test in parallel mode */ + test("parallel"); + return 0; +} diff --git a/tests/tcg/i386/Makefile.softmmu-target b/tests/tcg/i386/Makefile.softmmu-target index fa9b1b9f90..ed922d59c8 100644 --- a/tests/tcg/i386/Makefile.softmmu-target +++ b/tests/tcg/i386/Makefile.softmmu-target @@ -38,10 +38,9 @@ run-plugin-%-with-libinsn.so: $(call run-test, $@, \ $(QEMU) -monitor none -display none \ -chardev file$(COMMA)path=$@.out$(COMMA)id=output \ - -plugin ../../plugin/libinsn.so$(COMMA)arg=inline \ + -plugin ../../plugin/libinsn.so$(COMMA)inline=on \ -d plugin -D $*-with-libinsn.so.pout \ - $(QEMU_OPTS) $*, \ - "$* on $(TARGET_NAME)") + $(QEMU_OPTS) $*) # Running QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index b0a2128980..bafd8c2180 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -5,10 +5,16 @@ I386_SRC=$(SRC_PATH)/tests/tcg/i386 # Set search path for all sources VPATH += $(I386_SRC) +config-cc.mak: Makefile + $(quiet-@)( \ + $(call cc-option,-fno-pie, CROSS_CC_HAS_I386_NOPIE)) 3> config-cc.mak + +-include config-cc.mak + I386_SRCS=$(notdir $(wildcard $(I386_SRC)/*.c)) ALL_X86_TESTS=$(I386_SRCS:.c=) -SKIP_I386_TESTS=test-i386-ssse3 -X86_64_TESTS:=$(filter test-i386-ssse3, $(ALL_X86_TESTS)) +SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx +X86_64_TESTS:=$(filter test-i386-adcox test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max @@ -18,9 +24,14 @@ test-i386-pcmpistri: CFLAGS += -msse4.2 run-test-i386-pcmpistri: QEMU_OPTS += -cpu max run-plugin-test-i386-pcmpistri-%: QEMU_OPTS += -cpu max +test-i386-bmi2: CFLAGS=-O2 run-test-i386-bmi2: QEMU_OPTS += -cpu max run-plugin-test-i386-bmi2-%: QEMU_OPTS += -cpu max +test-i386-adcox: CFLAGS=-O2 +run-test-i386-adcox: QEMU_OPTS += -cpu max +run-plugin-test-i386-adcox-%: QEMU_OPTS += -cpu max + # # hello-i386 is a barebones app # @@ -30,7 +41,7 @@ hello-i386: LDFLAGS+=-nostdlib # test-386 includes a couple of additional objects that need to be # linked together, we also need a no-pie capable compiler due to the # non-pic calls into 16-bit mode -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_I386_NOPIE),) +ifneq ($(CROSS_CC_HAS_I386_NOPIE),) test-i386: CFLAGS += -fno-pie test-i386: test-i386.c test-i386-code16.S test-i386-vm86.S test-i386.h test-i386-shift.h test-i386-muldiv.h @@ -52,7 +63,7 @@ test-i386-fprem.ref: test-i386-fprem run-test-i386-fprem: TIMEOUT=60 run-test-i386-fprem: test-i386-fprem test-i386-fprem.ref - $(call run-test,test-i386-fprem, $(QEMU) $<,"$< on $(TARGET_NAME)") + $(call run-test,test-i386-fprem, $(QEMU) $<) $(call diff-out,test-i386-fprem, test-i386-fprem.ref) else SKIP_I386_TESTS+=test-i386-fprem @@ -61,12 +72,8 @@ endif # non-inline runs will trigger the duplicate instruction heuristics in libinsn.so run-plugin-%-with-libinsn.so: $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ - -plugin ../../plugin/libinsn.so$(COMMA)arg=inline \ - -d plugin -D $*-with-libinsn.so.pout $*, \ - "$* (inline) on $(TARGET_NAME)") - -run-plugin-signals-with-libinsn.so: - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") + -plugin ../../plugin/libinsn.so$(COMMA)inline=on \ + -d plugin -D $*-with-libinsn.so.pout $*) # Update TESTS I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) @@ -74,3 +81,37 @@ TESTS=$(MULTIARCH_TESTS) $(I386_TESTS) # On i386 and x86_64 Linux only supports 4k pages (large pages are a different hack) EXTRA_RUNS+=run-test-mmap-4096 + +sha512-sse: CFLAGS=-msse4.1 -O3 +sha512-sse: sha512.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +run-sha512-sse: QEMU_OPTS+=-cpu max +run-plugin-sha512-sse-with-%: QEMU_OPTS+=-cpu max + +TESTS+=sha512-sse + +CLEANFILES += test-avx.h test-mmx.h test-3dnow.h +test-3dnow.h: test-mmx.py x86.csv + $(PYTHON) $(I386_SRC)/test-mmx.py $(I386_SRC)/x86.csv $@ 3DNOW + +test-mmx.h: test-mmx.py x86.csv + $(PYTHON) $(I386_SRC)/test-mmx.py $(I386_SRC)/x86.csv $@ MMX SSE SSE2 SSE3 SSSE3 + +test-avx.h: test-avx.py x86.csv + $(PYTHON) $(I386_SRC)/test-avx.py $(I386_SRC)/x86.csv $@ + +test-3dnow: CFLAGS += -masm=intel -O -I. +run-test-3dnow: QEMU_OPTS += -cpu max +run-plugin-test-3dnow: QEMU_OPTS += -cpu max +test-3dnow: test-3dnow.h + +test-mmx: CFLAGS += -masm=intel -O -I. +run-test-mmx: QEMU_OPTS += -cpu max +run-plugin-test-mmx: QEMU_OPTS += -cpu max +test-mmx: test-mmx.h + +test-avx: CFLAGS += -mavx -masm=intel -O -I. +run-test-avx: QEMU_OPTS += -cpu max +run-plugin-test-avx: QEMU_OPTS += -cpu max +test-avx: test-avx.h diff --git a/tests/tcg/i386/README b/tests/tcg/i386/README index 09e88f30dc..403d10dad8 100644 --- a/tests/tcg/i386/README +++ b/tests/tcg/i386/README @@ -15,6 +15,15 @@ The Linux system call vm86() is used to test vm86 emulation. Various exceptions are raised to test most of the x86 user space exception reporting. +test-avx +-------- + +This program executes most SSE/AVX instructions and generates a text output, +for comparison with the output obtained with a real CPU or another emulator. + +test-avx.h is generate from x86.csv by test-avx.py +x86.csv comes from https://github.com/quasilyte/avx512test + linux-test ---------- diff --git a/tests/tcg/i386/float_convd.conf b/tests/tcg/i386/float_convd.conf new file mode 100644 index 0000000000..7f4040cef8 --- /dev/null +++ b/tests/tcg/i386/float_convd.conf @@ -0,0 +1,988 @@ +### Rounding to nearest +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to single: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to single: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to single: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.00000000000000000000p+1:0xc0000000) + to single: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from single: f32(-0x1.00000000000000000000p+0:0xbf800000) + to single: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from single: f32(-0x0.00000000000000000000p+0:0x80000000) + to single: f64(-0x0.00000000000000000000p+0:0x008000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to single: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to single: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to single: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to single: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to single: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to single: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to single: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to single: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to single: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to single: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to single: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to single: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to single: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to single: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to single: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to single: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.00000000000000000000p+31:0x4f000000) + to single: f64(0x1.00000000000000000000p+31:0x0041e0000000000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding upwards +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to single: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) + to single: f64(-0x1.1874b000000000000000p+103:0x00c661874b00000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) + to single: f64(-0x1.c0bab400000000000000p+99:0x00c62c0bab40000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.00000000000000000000p+1:0xc0000000) + to single: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from single: f32(-0x1.00000000000000000000p+0:0xbf800000) + to single: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from single: f32(-0x0.00000000000000000000p+0:0x80000000) + to single: f64(-0x0.00000000000000000000p+0:0x008000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to single: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to single: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000200000000000000p-25:0x33000001) + to single: f64(0x1.00000200000000000000p-25:0x003e60000020000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) + to single: f64(0x1.ffffe800000000000000p-25:0x003e6ffffe80000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) + to single: f64(0x1.ff801c00000000000000p-15:0x003f0ff801c0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000e00000000000000p-14:0x38800007) + to single: f64(0x1.00000e00000000000000p-14:0x003f100000e0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to single: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p-149:0x00000001) + to single: f64(0x1.00000000000000000000p-149:0x0036a0000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-149:0x00000001) + to single: f64(0x1.00000000000000000000p-149:0x0036a0000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-149:0x00000001) + to single: f64(0x1.00000000000000000000p-149:0x0036a0000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to single: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) + to single: f64(0x1.5bf0aa00000000000000p+1:0x004005bf0aa0000000) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to single: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to single: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to single: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to single: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to single: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to single: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to single: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.00000000000000000000p+31:0x4f000000) + to single: f64(0x1.00000000000000000000p+31:0x0041e0000000000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding downwards +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to single: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to single: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to single: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.00000000000000000000p+1:0xc0000000) + to single: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from single: f32(-0x1.00000000000000000000p+0:0xbf800000) + to single: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from single: f32(-0x1.00000000000000000000p-149:0x80000001) + to single: f64(-0x1.00000000000000000000p-149:0x00b6a0000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to single: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to single: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to single: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to single: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to single: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to single: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to single: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to single: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to single: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb400000000000000p+1:0x40490fda) + to single: f64(0x1.921fb400000000000000p+1:0x00400921fb40000000) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to single: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to single: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to single: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to single: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to single: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to single: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.fffffe00000000000000p+30:0x4effffff) + to single: f64(0x1.fffffe00000000000000p+30:0x0041dfffffe0000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding to zero +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to single: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to single: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to single: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) + to single: f64(-0x1.1874b000000000000000p+103:0x00c661874b00000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) + to single: f64(-0x1.c0bab400000000000000p+99:0x00c62c0bab40000000) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.00000000000000000000p+1:0xc0000000) + to single: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from single: f32(-0x1.00000000000000000000p+0:0xbf800000) + to single: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from single: f32(-0x0.00000000000000000000p+0:0x80000000) + to single: f64(-0x0.00000000000000000000p+0:0x008000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to single: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to single: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to single: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to single: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to single: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to single: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to single: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to single: f64(0x0.00000000000000000000p+0:00000000000000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to single: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to single: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to single: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb400000000000000p+1:0x40490fda) + to single: f64(0x1.921fb400000000000000p+1:0x00400921fb40000000) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to single: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to single: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to single: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to single: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to single: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to single: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.fffffe00000000000000p+30:0x4effffff) + to single: f64(0x1.fffffe00000000000000p+30:0x0041dfffffe0000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to single: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to single: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fc00000) + to single: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to single: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) diff --git a/tests/tcg/i386/float_convs.ref b/tests/tcg/i386/float_convs.ref new file mode 100644 index 0000000000..9de86910a8 --- /dev/null +++ b/tests/tcg/i386/float_convs.ref @@ -0,0 +1,748 @@ +### Rounding to nearest +from single: f32(-nan:0xffe00000) + to double: f64(-nan:0x00fffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to double: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding upwards +from single: f32(-nan:0xffe00000) + to double: f64(-nan:0x00fffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to double: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding downwards +from single: f32(-nan:0xffe00000) + to double: f64(-nan:0x00fffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to double: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding to zero +from single: f32(-nan:0xffe00000) + to double: f64(-nan:0x00fffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fe00000) + to double: f64(nan:0x007ffc000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) diff --git a/tests/tcg/i386/test-3dnow.c b/tests/tcg/i386/test-3dnow.c new file mode 100644 index 0000000000..67abc68677 --- /dev/null +++ b/tests/tcg/i386/test-3dnow.c @@ -0,0 +1,3 @@ +#define EMMS "femms" +#define TEST_FILE "test-3dnow.h" +#include "test-mmx.c" diff --git a/tests/tcg/i386/test-avx.c b/tests/tcg/i386/test-avx.c new file mode 100644 index 0000000000..c39c0e5bce --- /dev/null +++ b/tests/tcg/i386/test-avx.c @@ -0,0 +1,364 @@ +#include +#include +#include +#include + +typedef void (*testfn)(void); + +typedef struct { + uint64_t q0, q1, q2, q3; +} __attribute__((aligned(32))) v4di; + +typedef struct { + uint64_t mm[8]; + v4di ymm[16]; + uint64_t r[16]; + uint64_t flags; + uint32_t ff; + uint64_t pad; + v4di mem[4]; + v4di mem0[4]; +} reg_state; + +typedef struct { + int n; + testfn fn; + const char *s; + reg_state *init; +} TestDef; + +reg_state initI; +reg_state initF16; +reg_state initF32; +reg_state initF64; + +static void dump_ymm(const char *name, int n, const v4di *r, int ff) +{ + printf("%s%d = %016lx %016lx %016lx %016lx\n", + name, n, r->q3, r->q2, r->q1, r->q0); + if (ff == 64) { + double v[4]; + memcpy(v, r, sizeof(v)); + printf(" %16g %16g %16g %16g\n", + v[3], v[2], v[1], v[0]); + } else if (ff == 32) { + float v[8]; + memcpy(v, r, sizeof(v)); + printf(" %8g %8g %8g %8g %8g %8g %8g %8g\n", + v[7], v[6], v[5], v[4], v[3], v[2], v[1], v[0]); + } +} + +static void dump_regs(reg_state *s) +{ + int i; + + for (i = 0; i < 16; i++) { + dump_ymm("ymm", i, &s->ymm[i], 0); + } + for (i = 0; i < 4; i++) { + dump_ymm("mem", i, &s->mem0[i], 0); + } +} + +static void compare_state(const reg_state *a, const reg_state *b) +{ + int i; + for (i = 0; i < 8; i++) { + if (a->mm[i] != b->mm[i]) { + printf("MM%d = %016lx\n", i, b->mm[i]); + } + } + for (i = 0; i < 16; i++) { + if (a->r[i] != b->r[i]) { + printf("r%d = %016lx\n", i, b->r[i]); + } + } + for (i = 0; i < 16; i++) { + if (memcmp(&a->ymm[i], &b->ymm[i], 32)) { + dump_ymm("ymm", i, &b->ymm[i], a->ff); + } + } + for (i = 0; i < 4; i++) { + if (memcmp(&a->mem0[i], &a->mem[i], 32)) { + dump_ymm("mem", i, &a->mem[i], a->ff); + } + } + if (a->flags != b->flags) { + printf("FLAGS = %016lx\n", b->flags); + } +} + +#define LOADMM(r, o) "movq " #r ", " #o "[%0]\n\t" +#define LOADYMM(r, o) "vmovdqa " #r ", " #o "[%0]\n\t" +#define STOREMM(r, o) "movq " #o "[%1], " #r "\n\t" +#define STOREYMM(r, o) "vmovdqa " #o "[%1], " #r "\n\t" +#define MMREG(F) \ + F(mm0, 0x00) \ + F(mm1, 0x08) \ + F(mm2, 0x10) \ + F(mm3, 0x18) \ + F(mm4, 0x20) \ + F(mm5, 0x28) \ + F(mm6, 0x30) \ + F(mm7, 0x38) +#define YMMREG(F) \ + F(ymm0, 0x040) \ + F(ymm1, 0x060) \ + F(ymm2, 0x080) \ + F(ymm3, 0x0a0) \ + F(ymm4, 0x0c0) \ + F(ymm5, 0x0e0) \ + F(ymm6, 0x100) \ + F(ymm7, 0x120) \ + F(ymm8, 0x140) \ + F(ymm9, 0x160) \ + F(ymm10, 0x180) \ + F(ymm11, 0x1a0) \ + F(ymm12, 0x1c0) \ + F(ymm13, 0x1e0) \ + F(ymm14, 0x200) \ + F(ymm15, 0x220) +#define LOADREG(r, o) "mov " #r ", " #o "[rax]\n\t" +#define STOREREG(r, o) "mov " #o "[rax], " #r "\n\t" +#define REG(F) \ + F(rbx, 0x248) \ + F(rcx, 0x250) \ + F(rdx, 0x258) \ + F(rsi, 0x260) \ + F(rdi, 0x268) \ + F(r8, 0x280) \ + F(r9, 0x288) \ + F(r10, 0x290) \ + F(r11, 0x298) \ + F(r12, 0x2a0) \ + F(r13, 0x2a8) \ + F(r14, 0x2b0) \ + F(r15, 0x2b8) \ + +static void run_test(const TestDef *t) +{ + reg_state result; + reg_state *init = t->init; + memcpy(init->mem, init->mem0, sizeof(init->mem)); + printf("%5d %s\n", t->n, t->s); + asm volatile( + MMREG(LOADMM) + YMMREG(LOADYMM) + "sub rsp, 128\n\t" + "push rax\n\t" + "push rbx\n\t" + "push rcx\n\t" + "push rdx\n\t" + "push %1\n\t" + "push %2\n\t" + "mov rax, %0\n\t" + "pushf\n\t" + "pop rbx\n\t" + "shr rbx, 8\n\t" + "shl rbx, 8\n\t" + "mov rcx, 0x2c0[rax]\n\t" + "and rcx, 0xff\n\t" + "or rbx, rcx\n\t" + "push rbx\n\t" + "popf\n\t" + REG(LOADREG) + "mov rax, 0x240[rax]\n\t" + "call [rsp]\n\t" + "mov [rsp], rax\n\t" + "mov rax, 8[rsp]\n\t" + REG(STOREREG) + "mov rbx, [rsp]\n\t" + "mov 0x240[rax], rbx\n\t" + "mov rbx, 0\n\t" + "mov 0x270[rax], rbx\n\t" + "mov 0x278[rax], rbx\n\t" + "pushf\n\t" + "pop rbx\n\t" + "and rbx, 0xff\n\t" + "mov 0x2c0[rax], rbx\n\t" + "add rsp, 16\n\t" + "pop rdx\n\t" + "pop rcx\n\t" + "pop rbx\n\t" + "pop rax\n\t" + "add rsp, 128\n\t" + MMREG(STOREMM) + YMMREG(STOREYMM) + : : "r"(init), "r"(&result), "r"(t->fn) + : "memory", "cc", + "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", + "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", + "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", + "ymm12", "ymm13", "ymm14", "ymm15" + ); + compare_state(init, &result); +} + +#define TEST(n, cmd, type) \ +static void __attribute__((naked)) test_##n(void) \ +{ \ + asm volatile(cmd); \ + asm volatile("ret"); \ +} +#include "test-avx.h" + + +static const TestDef test_table[] = { +#define TEST(n, cmd, type) {n, test_##n, cmd, &init##type}, +#include "test-avx.h" + {-1, NULL, "", NULL} +}; + +static void run_all(void) +{ + const TestDef *t; + for (t = test_table; t->fn; t++) { + run_test(t); + } +} + +#define ARRAY_LEN(x) (sizeof(x) / sizeof(x[0])) + +uint16_t val_f16[] = { 0x4000, 0xbc00, 0x44cd, 0x3a66, 0x4200, 0x7a1a, 0x4780, 0x4826 }; +float val_f32[] = {2.0, -1.0, 4.8, 0.8, 3, -42.0, 5e6, 7.5, 8.3}; +double val_f64[] = {2.0, -1.0, 4.8, 0.8, 3, -42.0, 5e6, 7.5}; +v4di val_i64[] = { + {0x3d6b3b6a9e4118f2lu, 0x355ae76d2774d78clu, + 0xac3ff76c4daa4b28lu, 0xe7fabd204cb54083lu}, + {0xd851c54a56bf1f29lu, 0x4a84d1d50bf4c4fflu, + 0x56621e553d52b56clu, 0xd0069553da8f584alu}, + {0x5826475e2c5fd799lu, 0xfd32edc01243f5e9lu, + 0x738ba2c66d3fe126lu, 0x5707219c6e6c26b4lu}, +}; + +v4di deadbeef = {0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull, + 0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull}; +v4di indexq = {0x000000000000001full, 0x000000000000008full, + 0xffffffffffffffffull, 0xffffffffffffff5full}; +v4di indexd = {0x00000002000000efull, 0xfffffff500000010ull, + 0x0000000afffffff0ull, 0x000000000000000eull}; + +v4di gather_mem[0x20]; + +void init_f16reg(v4di *r) +{ + memset(r, 0, sizeof(*r)); + memcpy(r, val_f16, sizeof(val_f16)); +} + +void init_f32reg(v4di *r) +{ + static int n; + float v[8]; + int i; + for (i = 0; i < 8; i++) { + v[i] = val_f32[n++]; + if (n == ARRAY_LEN(val_f32)) { + n = 0; + } + } + memcpy(r, v, sizeof(*r)); +} + +void init_f64reg(v4di *r) +{ + static int n; + double v[4]; + int i; + for (i = 0; i < 4; i++) { + v[i] = val_f64[n++]; + if (n == ARRAY_LEN(val_f64)) { + n = 0; + } + } + memcpy(r, v, sizeof(*r)); +} + +void init_intreg(v4di *r) +{ + static uint64_t mask; + static int n; + + r->q0 = val_i64[n].q0 ^ mask; + r->q1 = val_i64[n].q1 ^ mask; + r->q2 = val_i64[n].q2 ^ mask; + r->q3 = val_i64[n].q3 ^ mask; + n++; + if (n == ARRAY_LEN(val_i64)) { + n = 0; + mask *= 0x104C11DB7; + } +} + +static void init_all(reg_state *s) +{ + int i; + + s->r[3] = (uint64_t)&s->mem[0]; /* rdx */ + s->r[4] = (uint64_t)&gather_mem[ARRAY_LEN(gather_mem) / 2]; /* rsi */ + s->r[5] = (uint64_t)&s->mem[2]; /* rdi */ + s->flags = 2; + for (i = 0; i < 16; i++) { + s->ymm[i] = deadbeef; + } + s->ymm[13] = indexd; + s->ymm[14] = indexq; + for (i = 0; i < 4; i++) { + s->mem0[i] = deadbeef; + } +} + +int main(int argc, char *argv[]) +{ + int i; + + init_all(&initI); + init_intreg(&initI.ymm[10]); + init_intreg(&initI.ymm[11]); + init_intreg(&initI.ymm[12]); + init_intreg(&initI.mem0[1]); + printf("Int:\n"); + dump_regs(&initI); + + init_all(&initF16); + init_f16reg(&initF16.ymm[10]); + init_f16reg(&initF16.ymm[11]); + init_f16reg(&initF16.ymm[12]); + init_f16reg(&initF16.mem0[1]); + initF16.ff = 16; + printf("F16:\n"); + dump_regs(&initF16); + + init_all(&initF32); + init_f32reg(&initF32.ymm[10]); + init_f32reg(&initF32.ymm[11]); + init_f32reg(&initF32.ymm[12]); + init_f32reg(&initF32.mem0[1]); + initF32.ff = 32; + printf("F32:\n"); + dump_regs(&initF32); + + init_all(&initF64); + init_f64reg(&initF64.ymm[10]); + init_f64reg(&initF64.ymm[11]); + init_f64reg(&initF64.ymm[12]); + init_f64reg(&initF64.mem0[1]); + initF64.ff = 64; + printf("F64:\n"); + dump_regs(&initF64); + + for (i = 0; i < ARRAY_LEN(gather_mem); i++) { + init_intreg(&gather_mem[i]); + } + + if (argc > 1) { + int n = atoi(argv[1]); + run_test(&test_table[n]); + } else { + run_all(); + } + return 0; +} diff --git a/tests/tcg/i386/test-avx.py b/tests/tcg/i386/test-avx.py new file mode 100755 index 0000000000..d9ca00a49e --- /dev/null +++ b/tests/tcg/i386/test-avx.py @@ -0,0 +1,375 @@ +#! /usr/bin/env python3 + +# Generate test-avx.h from x86.csv + +import csv +import sys +from fnmatch import fnmatch + +archs = [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE4_1", "SSE4_2", + "AES", "AVX", "AVX2", "AES+AVX", "VAES+AVX", + "F16C", "FMA", +] + +ignore = set(["FISTTP", + "LDMXCSR", "VLDMXCSR", "STMXCSR", "VSTMXCSR"]) + +imask = { + 'vBLENDPD': 0xff, + 'vBLENDPS': 0x0f, + 'CMP[PS][SD]': 0x07, + 'VCMP[PS][SD]': 0x1f, + 'vCVTPS2PH': 0x7, + 'vDPPD': 0x33, + 'vDPPS': 0xff, + 'vEXTRACTPS': 0x03, + 'vINSERTPS': 0xff, + 'MPSADBW': 0x7, + 'VMPSADBW': 0x3f, + 'vPALIGNR': 0x3f, + 'vPBLENDW': 0xff, + 'vPCMP[EI]STR*': 0x0f, + 'vPEXTRB': 0x0f, + 'vPEXTRW': 0x07, + 'vPEXTRD': 0x03, + 'vPEXTRQ': 0x01, + 'vPINSRB': 0x0f, + 'vPINSRW': 0x07, + 'vPINSRD': 0x03, + 'vPINSRQ': 0x01, + 'vPSHUF[DW]': 0xff, + 'vPSHUF[LH]W': 0xff, + 'vPS[LR][AL][WDQ]': 0x3f, + 'vPS[RL]LDQ': 0x1f, + 'vROUND[PS][SD]': 0x7, + 'vSHUFPD': 0x0f, + 'vSHUFPS': 0xff, + 'vAESKEYGENASSIST': 0xff, + 'VEXTRACT[FI]128': 0x01, + 'VINSERT[FI]128': 0x01, + 'VPBLENDD': 0xff, + 'VPERM2[FI]128': 0x33, + 'VPERMPD': 0xff, + 'VPERMQ': 0xff, + 'VPERMILPS': 0xff, + 'VPERMILPD': 0x0f, + } + +def strip_comments(x): + for l in x: + if l != '' and l[0] != '#': + yield l + +def reg_w(w): + if w == 8: + return 'al' + elif w == 16: + return 'ax' + elif w == 32: + return 'eax' + elif w == 64: + return 'rax' + raise Exception("bad reg_w %d" % w) + +def mem_w(w): + if w == 8: + t = "BYTE" + elif w == 16: + t = "WORD" + elif w == 32: + t = "DWORD" + elif w == 64: + t = "QWORD" + elif w == 128: + t = "XMMWORD" + elif w == 256: + t = "YMMWORD" + else: + raise Exception() + + return t + " PTR 32[rdx]" + +class XMMArg(): + isxmm = True + def __init__(self, reg, mw): + if mw not in [0, 8, 16, 32, 64, 128, 256]: + raise Exception("Bad /m width: %s" % w) + self.reg = reg + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return "%smm%d" % (self.reg, n) + +class MMArg(): + isxmm = True + def __init__(self, mw): + if mw not in [0, 32, 64]: + raise Exception("Bad mem width: %s" % mw) + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + return "mm%d" % (n & 7) + +def match(op, pattern): + if pattern[0] == 'v': + return fnmatch(op, pattern[1:]) or fnmatch(op, 'V'+pattern[1:]) + return fnmatch(op, pattern) + +class ArgVSIB(): + isxmm = True + ismem = False + def __init__(self, reg, w): + if w not in [32, 64]: + raise Exception("Bad vsib width: %s" % w) + self.w = w + self.reg = reg + def regstr(self, n): + reg = "%smm%d" % (self.reg, n >> 2) + return "[rsi + %s * %d]" % (reg, 1 << (n & 3)) + +class ArgImm8u(): + isxmm = False + ismem = False + def __init__(self, op): + for k, v in imask.items(): + if match(op, k): + self.mask = imask[k]; + return + raise Exception("Unknown immediate") + def vals(self): + mask = self.mask + yield 0 + n = 0 + while n != mask: + n += 1 + while (n & ~mask) != 0: + n += (n & ~mask) + yield n + +class ArgRM(): + isxmm = False + def __init__(self, rw, mw): + if rw not in [8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + if mw not in [0, 8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + self.rw = rw + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return reg_w(self.rw) + +class ArgMem(): + isxmm = False + ismem = True + def __init__(self, w): + if w not in [8, 16, 32, 64, 128, 256]: + raise Exception("Bad mem width: %s" % w) + self.w = w + def regstr(self, n): + return mem_w(self.w) + +class SkipInstruction(Exception): + pass + +def ArgGenerator(arg, op): + if arg[:3] == 'xmm' or arg[:3] == "ymm": + if "/" in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + return XMMArg(arg[0], int(m[1:])); + else: + return XMMArg(arg[0], 0); + elif arg[:2] == 'mm': + if "/" in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + return MMArg(int(m[1:])); + else: + return MMArg(0); + elif arg[:4] == 'imm8': + return ArgImm8u(op); + elif arg == '': + return None + elif arg[0] == 'r': + if '/m' in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + mw = int(m[1:]) + if r == 'r': + rw = mw + else: + rw = int(r[1:]) + return ArgRM(rw, mw) + + return ArgRM(int(arg[1:]), 0); + elif arg[0] == 'm': + return ArgMem(int(arg[1:])) + elif arg[:2] == 'vm': + return ArgVSIB(arg[-1], int(arg[2:-1])) + else: + raise Exception("Unrecognised arg: %s", arg) + +class InsnGenerator: + def __init__(self, op, args): + self.op = op + if op[-2:] in ["PH", "PS", "PD", "SS", "SD"]: + if op[-1] == 'H': + self.optype = 'F16' + elif op[-1] == 'S': + self.optype = 'F32' + else: + self.optype = 'F64' + else: + self.optype = 'I' + + try: + self.args = list(ArgGenerator(a, op) for a in args) + if not any((x.isxmm for x in self.args)): + raise SkipInstruction + if len(self.args) > 0 and self.args[-1] is None: + self.args = self.args[:-1] + except SkipInstruction: + raise + except Exception as e: + raise Exception("Bad arg %s: %s" % (op, e)) + + def gen(self): + regs = (10, 11, 12) + dest = 9 + + nreg = len(self.args) + if nreg == 0: + yield self.op + return + if isinstance(self.args[-1], ArgImm8u): + nreg -= 1 + immarg = self.args[-1] + else: + immarg = None + memarg = -1 + for n, arg in enumerate(self.args): + if arg.ismem: + memarg = n + + if (self.op.startswith("VGATHER") or self.op.startswith("VPGATHER")): + if "GATHERD" in self.op: + ireg = 13 << 2 + else: + ireg = 14 << 2 + regset = [ + (dest, ireg | 0, regs[0]), + (dest, ireg | 1, regs[0]), + (dest, ireg | 2, regs[0]), + (dest, ireg | 3, regs[0]), + ] + if memarg >= 0: + raise Exception("vsib with memory: %s" % self.op) + elif nreg == 1: + regset = [(regs[0],)] + if memarg == 0: + regset += [(-1,)] + elif nreg == 2: + regset = [ + (regs[0], regs[1]), + (regs[0], regs[0]), + ] + if memarg == 0: + regset += [(-1, regs[0])] + elif memarg == 1: + regset += [(dest, -1)] + elif nreg == 3: + regset = [ + (dest, regs[0], regs[1]), + (dest, regs[0], regs[0]), + (regs[0], regs[0], regs[1]), + (regs[0], regs[1], regs[0]), + (regs[0], regs[0], regs[0]), + ] + if memarg == 2: + regset += [ + (dest, regs[0], -1), + (regs[0], regs[0], -1), + ] + elif memarg > 0: + raise Exception("Memarg %d" % memarg) + elif nreg == 4: + regset = [ + (dest, regs[0], regs[1], regs[2]), + (dest, regs[0], regs[0], regs[1]), + (dest, regs[0], regs[1], regs[0]), + (dest, regs[1], regs[0], regs[0]), + (dest, regs[0], regs[0], regs[0]), + (regs[0], regs[0], regs[1], regs[2]), + (regs[0], regs[1], regs[0], regs[2]), + (regs[0], regs[1], regs[2], regs[0]), + (regs[0], regs[0], regs[0], regs[1]), + (regs[0], regs[0], regs[1], regs[0]), + (regs[0], regs[1], regs[0], regs[0]), + (regs[0], regs[0], regs[0], regs[0]), + ] + if memarg == 2: + regset += [ + (dest, regs[0], -1, regs[1]), + (dest, regs[0], -1, regs[0]), + (regs[0], regs[0], -1, regs[1]), + (regs[0], regs[1], -1, regs[0]), + (regs[0], regs[0], -1, regs[0]), + ] + elif memarg > 0: + raise Exception("Memarg4 %d" % memarg) + else: + raise Exception("Too many regs: %s(%d)" % (self.op, nreg)) + + for regv in regset: + argstr = [] + for i in range(nreg): + arg = self.args[i] + argstr.append(arg.regstr(regv[i])) + if immarg is None: + yield self.op + ' ' + ','.join(argstr) + else: + for immval in immarg.vals(): + yield self.op + ' ' + ','.join(argstr) + ',' + str(immval) + +def split0(s): + if s == '': + return [] + return s.split(',') + +def main(): + n = 0 + if len(sys.argv) != 3: + print("Usage: test-avx.py x86.csv test-avx.h") + exit(1) + csvfile = open(sys.argv[1], 'r', newline='') + with open(sys.argv[2], "w") as outf: + outf.write("// Generated by test-avx.py. Do not edit.\n") + for row in csv.reader(strip_comments(csvfile)): + insn = row[0].replace(',', '').split() + if insn[0] in ignore: + continue + cpuid = row[6] + if cpuid in archs: + try: + g = InsnGenerator(insn[0], insn[1:]) + for insn in g.gen(): + outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) + n += 1 + except SkipInstruction: + pass + outf.write("#undef TEST\n") + csvfile.close() + +if __name__ == "__main__": + main() diff --git a/tests/tcg/i386/test-i386-adcox.c b/tests/tcg/i386/test-i386-adcox.c new file mode 100644 index 0000000000..16169efff8 --- /dev/null +++ b/tests/tcg/i386/test-i386-adcox.c @@ -0,0 +1,75 @@ +/* See if various BMI2 instructions give expected results */ +#include +#include +#include + +#define CC_C 1 +#define CC_O (1 << 11) + +#ifdef __x86_64__ +#define REG uint64_t +#else +#define REG uint32_t +#endif + +void test_adox_adcx(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_operand) +{ + REG flags; + REG out_adcx, out_adox; + + asm("pushf; pop %0" : "=r"(flags)); + flags &= ~(CC_C | CC_O); + flags |= (in_c ? CC_C : 0); + flags |= (in_o ? CC_O : 0); + + out_adcx = adcx_operand; + out_adox = adox_operand; + asm("push %0; popf;" + "adox %3, %2;" + "adcx %3, %1;" + "pushf; pop %0" + : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) + : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + + assert(out_adcx == in_c + adcx_operand - 1); + assert(out_adox == in_o + adox_operand - 1); + assert(!!(flags & CC_C) == (in_c || adcx_operand)); + assert(!!(flags & CC_O) == (in_o || adox_operand)); +} + +void test_adcx_adox(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_operand) +{ + REG flags; + REG out_adcx, out_adox; + + asm("pushf; pop %0" : "=r"(flags)); + flags &= ~(CC_C | CC_O); + flags |= (in_c ? CC_C : 0); + flags |= (in_o ? CC_O : 0); + + out_adcx = adcx_operand; + out_adox = adox_operand; + asm("push %0; popf;" + "adcx %3, %1;" + "adox %3, %2;" + "pushf; pop %0" + : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) + : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + + assert(out_adcx == in_c + adcx_operand - 1); + assert(out_adox == in_o + adox_operand - 1); + assert(!!(flags & CC_C) == (in_c || adcx_operand)); + assert(!!(flags & CC_O) == (in_o || adox_operand)); +} + +int main(int argc, char *argv[]) { + /* try all combinations of input CF, input OF, CF from op1+op2, OF from op2+op1 */ + int i; + for (i = 0; i <= 15; i++) { + printf("%d\n", i); + test_adcx_adox(!!(i & 1), !!(i & 2), !!(i & 4), !!(i & 8)); + test_adox_adcx(!!(i & 1), !!(i & 2), !!(i & 4), !!(i & 8)); + } + return 0; +} + diff --git a/tests/tcg/i386/test-i386-bmi2.c b/tests/tcg/i386/test-i386-bmi2.c index 935a4d2a73..0244df7987 100644 --- a/tests/tcg/i386/test-i386-bmi2.c +++ b/tests/tcg/i386/test-i386-bmi2.c @@ -1,41 +1,213 @@ /* See if various BMI2 instructions give expected results */ #include #include +#include + +#ifdef __x86_64 +typedef uint64_t reg_t; +#else +typedef uint32_t reg_t; +#endif + +#define insn1q(name, arg0) \ +static inline reg_t name##q(reg_t arg0) \ +{ \ + reg_t result64; \ + asm volatile (#name "q %1, %0" : "=r"(result64) : "rm"(arg0)); \ + return result64; \ +} + +#define insn1l(name, arg0) \ +static inline reg_t name##l(reg_t arg0) \ +{ \ + reg_t result32; \ + asm volatile (#name "l %k1, %k0" : "=r"(result32) : "rm"(arg0)); \ + return result32; \ +} + +#define insn2q(name, arg0, c0, arg1, c1) \ +static inline reg_t name##q(reg_t arg0, reg_t arg1) \ +{ \ + reg_t result64; \ + asm volatile (#name "q %2, %1, %0" : "=r"(result64) : c0(arg0), c1(arg1)); \ + return result64; \ +} + +#define insn2l(name, arg0, c0, arg1, c1) \ +static inline reg_t name##l(reg_t arg0, reg_t arg1) \ +{ \ + reg_t result32; \ + asm volatile (#name "l %k2, %k1, %k0" : "=r"(result32) : c0(arg0), c1(arg1)); \ + return result32; \ +} + +#ifdef __x86_64 +insn2q(pext, src, "r", mask, "rm") +insn2q(pdep, src, "r", mask, "rm") +insn2q(andn, clear, "rm", val, "r") +insn2q(bextr, range, "rm", val, "r") +insn2q(bzhi, pos, "rm", val, "r") +insn2q(rorx, val, "r", n, "i") +insn2q(sarx, val, "rm", n, "r") +insn2q(shlx, val, "rm", n, "r") +insn2q(shrx, val, "rm", n, "r") +insn1q(blsi, src) +insn1q(blsmsk, src) +insn1q(blsr, src) +#endif +insn2l(pext, src, "r", mask, "rm") +insn2l(pdep, src, "r", mask, "rm") +insn2l(andn, clear, "rm", val, "r") +insn2l(bextr, range, "rm", val, "r") +insn2l(bzhi, pos, "rm", val, "r") +insn2l(rorx, val, "r", n, "i") +insn2l(sarx, val, "rm", n, "r") +insn2l(shlx, val, "rm", n, "r") +insn2l(shrx, val, "rm", n, "r") +insn1l(blsi, src) +insn1l(blsmsk, src) +insn1l(blsr, src) int main(int argc, char *argv[]) { uint64_t ehlo = 0x202020204f4c4845ull; uint64_t mask = 0xa080800302020001ull; - uint32_t result32; + reg_t result; #ifdef __x86_64 - uint64_t result64; - /* 64 bits */ - asm volatile ("pextq %2, %1, %0" : "=r"(result64) : "r"(ehlo), "m"(mask)); - assert(result64 == 133); + result = andnq(mask, ehlo); + assert(result == 0x002020204d4c4844); - asm volatile ("pdepq %2, %1, %0" : "=r"(result64) : "r"(result64), "m"(mask)); - assert(result64 == (ehlo & mask)); + result = pextq(ehlo, mask); + assert(result == 133); - asm volatile ("pextq %2, %1, %0" : "=r"(result64) : "r"(-1ull), "m"(mask)); - assert(result64 == 511); /* mask has 9 bits set */ + result = pdepq(result, mask); + assert(result == (ehlo & mask)); - asm volatile ("pdepq %2, %1, %0" : "=r"(result64) : "r"(-1ull), "m"(mask)); - assert(result64 == mask); + result = pextq(-1ull, mask); + assert(result == 511); /* mask has 9 bits set */ + + result = pdepq(-1ull, mask); + assert(result == mask); + + result = bextrq(mask, 0x3f00); + assert(result == (mask & ~INT64_MIN)); + + result = bextrq(mask, 0x1038); + assert(result == 0xa0); + + result = bextrq(mask, 0x10f8); + assert(result == 0); + + result = bextrq(0xfedcba9876543210ull, 0x7f00); + assert(result == 0xfedcba9876543210ull); + + result = blsiq(0x30); + assert(result == 0x10); + + result = blsiq(0x30ull << 32); + assert(result == 0x10ull << 32); + + result = blsmskq(0x30); + assert(result == 0x1f); + + result = blsrq(0x30); + assert(result == 0x20); + + result = blsrq(0x30ull << 32); + assert(result == 0x20ull << 32); + + result = bzhiq(mask, 0x3f); + assert(result == (mask & ~INT64_MIN)); + + result = bzhiq(mask, 0x1f); + assert(result == (mask & ~(-1 << 30))); + + result = bzhiq(mask, 0x40); + assert(result == mask); + + result = rorxq(0x2132435465768798, 8); + assert(result == 0x9821324354657687); + + result = sarxq(0xffeeddccbbaa9988, 8); + assert(result == 0xffffeeddccbbaa99); + + result = sarxq(0x77eeddccbbaa9988, 8 | 64); + assert(result == 0x0077eeddccbbaa99); + + result = shrxq(0xffeeddccbbaa9988, 8); + assert(result == 0x00ffeeddccbbaa99); + + result = shrxq(0x77eeddccbbaa9988, 8 | 192); + assert(result == 0x0077eeddccbbaa99); + + result = shlxq(0xffeeddccbbaa9988, 8); + assert(result == 0xeeddccbbaa998800); #endif /* 32 bits */ - asm volatile ("pextl %2, %k1, %k0" : "=r"(result32) : "r"((uint32_t) ehlo), "m"(mask)); - assert(result32 == 5); + result = andnl(mask, ehlo); + assert(result == 0x04d4c4844); - asm volatile ("pdepl %2, %k1, %k0" : "=r"(result32) : "r"(result32), "m"(mask)); - assert(result32 == (uint32_t)(ehlo & mask)); + result = pextl((uint32_t) ehlo, mask); + assert(result == 5); - asm volatile ("pextl %2, %k1, %k0" : "=r"(result32) : "r"(-1ull), "m"(mask)); - assert(result32 == 7); /* mask has 3 bits set */ + result = pdepl(result, mask); + assert(result == (uint32_t)(ehlo & mask)); - asm volatile ("pdepl %2, %k1, %k0" : "=r"(result32) : "r"(-1ull), "m"(mask)); - assert(result32 == (uint32_t)mask); + result = pextl(-1u, mask); + assert(result == 7); /* mask has 3 bits set */ + + result = pdepl(-1u, mask); + assert(result == (uint32_t)mask); + + result = bextrl(mask, 0x1f00); + assert(result == (mask & ~INT32_MIN)); + + result = bextrl(ehlo, 0x1018); + assert(result == 0x4f); + + result = bextrl(mask, 0x1038); + assert(result == 0); + + result = bextrl((reg_t)0x8f635a775ad3b9b4ull, 0x3018); + assert(result == 0x5a); + + result = bextrl((reg_t)0xfedcba9876543210ull, 0x7f00); + assert(result == 0x76543210u); + + result = bextrl(-1, 0); + assert(result == 0); + + result = blsil(0xffff); + assert(result == 1); + + result = blsmskl(0x300); + assert(result == 0x1ff); + + result = blsrl(0xffc); + assert(result == 0xff8); + + result = bzhil(mask, 0xf); + assert(result == 1); + + result = rorxl(0x65768798, 8); + assert(result == 0x98657687); + + result = sarxl(0xffeeddcc, 8); + assert(result == 0xffffeedd); + + result = sarxl(0x77eeddcc, 8 | 32); + assert(result == 0x0077eedd); + + result = shrxl(0xffeeddcc, 8); + assert(result == 0x00ffeedd); + + result = shrxl(0x77eeddcc, 8 | 128); + assert(result == 0x0077eedd); + + result = shlxl(0xffeeddcc, 8); + assert(result == 0xeeddcc00); return 0; } diff --git a/tests/tcg/i386/test-i386-fp-exceptions.c b/tests/tcg/i386/test-i386-fp-exceptions.c index dfb7117c17..d445f13c33 100644 --- a/tests/tcg/i386/test-i386-fp-exceptions.c +++ b/tests/tcg/i386/test-i386-fp-exceptions.c @@ -423,35 +423,35 @@ int main(void) } __asm__ volatile ("fnclex"); - __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (1.5L) : "st"); + __asm__ volatile ("fistps %0" : "=m" (res_16) : "t" (1.5L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != PE) { printf("FAIL: fistp inexact\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (32767.5L) : "st"); + __asm__ volatile ("fistps %0" : "=m" (res_16) : "t" (32767.5L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fistp 32767.5\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (-32768.51L) : "st"); + __asm__ volatile ("fistps %0" : "=m" (res_16) : "t" (-32768.51L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fistp -32768.51\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (ld_nan) : "st"); + __asm__ volatile ("fistps %0" : "=m" (res_16) : "t" (ld_nan) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fistp nan\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) : + __asm__ volatile ("fistps %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { @@ -538,49 +538,49 @@ int main(void) } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (1.5L) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (1.5L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != PE) { printf("FAIL: fisttp inexact\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (32768.0L) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (32768.0L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fisttp 32768\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (32768.5L) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (32768.5L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fisttp 32768.5\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (-32769.0L) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (-32769.0L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fisttp -32769\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (-32769.5L) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (-32769.5L) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fisttp -32769.5\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (ld_nan) : "st"); + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (ld_nan) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { printf("FAIL: fisttp nan\n"); ret = 1; } __asm__ volatile ("fnclex"); - __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) : + __asm__ volatile ("fisttps %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) : "st"); __asm__ volatile ("fnstsw" : "=a" (sw)); if ((sw & EXC) != IE) { diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c index 18d5609665..864c4e620d 100644 --- a/tests/tcg/i386/test-i386.c +++ b/tests/tcg/i386/test-i386.c @@ -34,15 +34,8 @@ #endif //#define LINUX_VM86_IOPL_FIX //#define TEST_P4_FLAGS -#ifdef __SSE__ -#define TEST_SSE #define TEST_CMOV 1 #define TEST_FCOMI 1 -#else -#undef TEST_SSE -#define TEST_CMOV 1 -#define TEST_FCOMI 1 -#endif #if defined(__x86_64__) #define FMT64X "%016lx" @@ -866,7 +859,7 @@ void test_fcvt(double a) uint16_t val16; val16 = (fpuc & ~0x0c00) | (i << 10); asm volatile ("fldcw %0" : : "m" (val16)); - asm volatile ("fist %0" : "=m" (wa) : "t" (a)); + asm volatile ("fists %0" : "=m" (wa) : "t" (a)); asm volatile ("fistl %0" : "=m" (ia) : "t" (a)); asm volatile ("fistpll %0" : "=m" (lla) : "t" (a) : "st"); asm volatile ("frndint ; fstl %0" : "=m" (ra) : "t" (a)); @@ -1998,7 +1991,7 @@ uint8_t code[] = { 0xc3, /* ret */ }; -asm(".section \".data\"\n" +asm(".section \".data_x\",\"awx\"\n" "smc_code2:\n" "movl 4(%esp), %eax\n" "movl %eax, smc_patch_addr2 + 1\n" @@ -2104,568 +2097,6 @@ static void test_enter(void) TEST_ENTER("w", uint16_t, 31); } -#ifdef TEST_SSE - -typedef int __m64 __attribute__ ((vector_size(8))); -typedef float __m128 __attribute__ ((vector_size(16))); - -typedef union { - double d[2]; - float s[4]; - uint32_t l[4]; - uint64_t q[2]; - __m128 dq; -} XMMReg; - -static uint64_t __attribute__((aligned(16))) test_values[4][2] = { - { 0x456723c698694873, 0xdc515cff944a58ec }, - { 0x1f297ccd58bad7ab, 0x41f21efba9e3e146 }, - { 0x007c62c2085427f8, 0x231be9e8cde7438d }, - { 0x0f76255a085427f8, 0xc233e9e8c4c9439a }, -}; - -#define SSE_OP(op)\ -{\ - asm volatile (#op " %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - r.q[1], r.q[0]);\ -} - -#define SSE_OP2(op)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - b.q[0] = test_values[2*i+1][0];\ - b.q[1] = test_values[2*i+1][1];\ - SSE_OP(op);\ - }\ -} - -#define MMX_OP2(op)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - b.q[0] = test_values[2*i+1][0];\ - asm volatile (#op " %2, %0" : "=y" (r.q[0]) : "0" (a.q[0]), "y" (b.q[0]));\ - printf("%-9s: a=" FMT64X " b=" FMT64X " r=" FMT64X "\n",\ - #op,\ - a.q[0],\ - b.q[0],\ - r.q[0]);\ - }\ - SSE_OP2(op);\ -} - -#define SHUF_OP(op, ib)\ -{\ - a.q[0] = test_values[0][0];\ - a.q[1] = test_values[0][1];\ - b.q[0] = test_values[1][0];\ - b.q[1] = test_values[1][1];\ - asm volatile (#op " $" #ib ", %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - ib,\ - r.q[1], r.q[0]);\ -} - -#define PSHUF_OP(op, ib)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " $" #ib ", %1, %0" : "=x" (r.dq) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - ib,\ - r.q[1], r.q[0]);\ - }\ -} - -#define SHIFT_IM(op, ib)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " $" #ib ", %0" : "=x" (r.dq) : "0" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - ib,\ - r.q[1], r.q[0]);\ - }\ -} - -#define SHIFT_OP(op, ib)\ -{\ - int i;\ - SHIFT_IM(op, ib);\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - b.q[0] = ib;\ - b.q[1] = 0;\ - asm volatile (#op " %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - r.q[1], r.q[0]);\ - }\ -} - -#define MOVMSK(op)\ -{\ - int i, reg;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " %1, %0" : "=r" (reg) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=%08x\n",\ - #op,\ - a.q[1], a.q[0],\ - reg);\ - }\ -} - -#define SSE_OPS(a) \ -SSE_OP(a ## ps);\ -SSE_OP(a ## ss); - -#define SSE_OPD(a) \ -SSE_OP(a ## pd);\ -SSE_OP(a ## sd); - -#define SSE_COMI(op, field)\ -{\ - unsigned long eflags;\ - XMMReg a, b;\ - a.field[0] = a1;\ - b.field[0] = b1;\ - asm volatile (#op " %2, %1\n"\ - "pushf\n"\ - "pop %0\n"\ - : "=rm" (eflags)\ - : "x" (a.dq), "x" (b.dq));\ - printf("%-9s: a=%f b=%f cc=%04lx\n",\ - #op, a1, b1,\ - eflags & (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));\ -} - -void test_sse_comi(double a1, double b1) -{ - SSE_COMI(ucomiss, s); - SSE_COMI(ucomisd, d); - SSE_COMI(comiss, s); - SSE_COMI(comisd, d); -} - -#define CVT_OP_XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - r.q[1], r.q[0]);\ -} - -/* Force %xmm0 usage to avoid the case where both register index are 0 - to test instruction decoding more extensively */ -#define CVT_OP_XMM2MMX(op)\ -{\ - asm volatile (#op " %1, %0" : "=y" (r.q[0]) : "x" (a.dq) \ - : "%xmm0"); \ - asm volatile("emms\n"); \ - printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - r.q[0]);\ -} - -#define CVT_OP_MMX2XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "y" (a.q[0]));\ - asm volatile("emms\n"); \ - printf("%-9s: a=" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[0],\ - r.q[1], r.q[0]);\ -} - -#define CVT_OP_REG2XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "r" (a.l[0]));\ - printf("%-9s: a=%08x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.l[0],\ - r.q[1], r.q[0]);\ -} - -#define CVT_OP_XMM2REG(op)\ -{\ - asm volatile (#op " %1, %0" : "=r" (r.l[0]) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=%08x\n",\ - #op,\ - a.q[1], a.q[0],\ - r.l[0]);\ -} - -struct fpxstate { - uint16_t fpuc; - uint16_t fpus; - uint16_t fptag; - uint16_t fop; - uint32_t fpuip; - uint16_t cs_sel; - uint16_t dummy0; - uint32_t fpudp; - uint16_t ds_sel; - uint16_t dummy1; - uint32_t mxcsr; - uint32_t mxcsr_mask; - uint8_t fpregs1[8 * 16]; - uint8_t xmm_regs[8 * 16]; - uint8_t dummy2[224]; -}; - -static struct fpxstate fpx_state __attribute__((aligned(16))); -static struct fpxstate fpx_state2 __attribute__((aligned(16))); - -void test_fxsave(void) -{ - struct fpxstate *fp = &fpx_state; - struct fpxstate *fp2 = &fpx_state2; - int i, nb_xmm; - XMMReg a, b; - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - b.q[0] = test_values[1][0]; - b.q[1] = test_values[1][1]; - - asm("movdqa %2, %%xmm0\n" - "movdqa %3, %%xmm7\n" -#if defined(__x86_64__) - "movdqa %2, %%xmm15\n" -#endif - " fld1\n" - " fldpi\n" - " fldln2\n" - " fxsave %0\n" - " fxrstor %0\n" - " fxsave %1\n" - " fninit\n" - : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp) - : "m" (a), "m" (b)); - printf("fpuc=%04x\n", fp->fpuc); - printf("fpus=%04x\n", fp->fpus); - printf("fptag=%04x\n", fp->fptag); - for(i = 0; i < 3; i++) { - printf("ST%d: " FMT64X " %04x\n", - i, - *(uint64_t *)&fp->fpregs1[i * 16], - *(uint16_t *)&fp->fpregs1[i * 16 + 8]); - } - printf("mxcsr=%08x\n", fp->mxcsr & 0x1f80); -#if defined(__x86_64__) - nb_xmm = 16; -#else - nb_xmm = 8; -#endif - for(i = 0; i < nb_xmm; i++) { - printf("xmm%d: " FMT64X "" FMT64X "\n", - i, - *(uint64_t *)&fp->xmm_regs[i * 16], - *(uint64_t *)&fp->xmm_regs[i * 16 + 8]); - } -} - -void test_sse(void) -{ - XMMReg r, a, b; - int i; - - MMX_OP2(punpcklbw); - MMX_OP2(punpcklwd); - MMX_OP2(punpckldq); - MMX_OP2(packsswb); - MMX_OP2(pcmpgtb); - MMX_OP2(pcmpgtw); - MMX_OP2(pcmpgtd); - MMX_OP2(packuswb); - MMX_OP2(punpckhbw); - MMX_OP2(punpckhwd); - MMX_OP2(punpckhdq); - MMX_OP2(packssdw); - MMX_OP2(pcmpeqb); - MMX_OP2(pcmpeqw); - MMX_OP2(pcmpeqd); - - MMX_OP2(paddq); - MMX_OP2(pmullw); - MMX_OP2(psubusb); - MMX_OP2(psubusw); - MMX_OP2(pminub); - MMX_OP2(pand); - MMX_OP2(paddusb); - MMX_OP2(paddusw); - MMX_OP2(pmaxub); - MMX_OP2(pandn); - - MMX_OP2(pmulhuw); - MMX_OP2(pmulhw); - - MMX_OP2(psubsb); - MMX_OP2(psubsw); - MMX_OP2(pminsw); - MMX_OP2(por); - MMX_OP2(paddsb); - MMX_OP2(paddsw); - MMX_OP2(pmaxsw); - MMX_OP2(pxor); - MMX_OP2(pmuludq); - MMX_OP2(pmaddwd); - MMX_OP2(psadbw); - MMX_OP2(psubb); - MMX_OP2(psubw); - MMX_OP2(psubd); - MMX_OP2(psubq); - MMX_OP2(paddb); - MMX_OP2(paddw); - MMX_OP2(paddd); - - MMX_OP2(pavgb); - MMX_OP2(pavgw); - - asm volatile ("pinsrw $1, %1, %0" : "=y" (r.q[0]) : "r" (0x12345678)); - printf("%-9s: r=" FMT64X "\n", "pinsrw", r.q[0]); - - asm volatile ("pinsrw $5, %1, %0" : "=x" (r.dq) : "r" (0x12345678)); - printf("%-9s: r=" FMT64X "" FMT64X "\n", "pinsrw", r.q[1], r.q[0]); - - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - asm volatile ("pextrw $1, %1, %0" : "=r" (r.l[0]) : "y" (a.q[0])); - printf("%-9s: r=%08x\n", "pextrw", r.l[0]); - - asm volatile ("pextrw $5, %1, %0" : "=r" (r.l[0]) : "x" (a.dq)); - printf("%-9s: r=%08x\n", "pextrw", r.l[0]); - - asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "y" (a.q[0])); - printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); - - asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "x" (a.dq)); - printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); - - { - r.q[0] = -1; - r.q[1] = -1; - - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - b.q[0] = test_values[1][0]; - b.q[1] = test_values[1][1]; - asm volatile("maskmovq %1, %0" : - : "y" (a.q[0]), "y" (b.q[0]), "D" (&r) - : "memory"); - printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n", - "maskmov", - r.q[0], - a.q[0], - b.q[0]); - asm volatile("maskmovdqu %1, %0" : - : "x" (a.dq), "x" (b.dq), "D" (&r) - : "memory"); - printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n", - "maskmov", - r.q[1], r.q[0], - a.q[1], a.q[0], - b.q[1], b.q[0]); - } - - asm volatile ("emms"); - - SSE_OP2(punpcklqdq); - SSE_OP2(punpckhqdq); - SSE_OP2(andps); - SSE_OP2(andpd); - SSE_OP2(andnps); - SSE_OP2(andnpd); - SSE_OP2(orps); - SSE_OP2(orpd); - SSE_OP2(xorps); - SSE_OP2(xorpd); - - SSE_OP2(unpcklps); - SSE_OP2(unpcklpd); - SSE_OP2(unpckhps); - SSE_OP2(unpckhpd); - - SHUF_OP(shufps, 0x78); - SHUF_OP(shufpd, 0x02); - - PSHUF_OP(pshufd, 0x78); - PSHUF_OP(pshuflw, 0x78); - PSHUF_OP(pshufhw, 0x78); - - SHIFT_OP(psrlw, 7); - SHIFT_OP(psrlw, 16); - SHIFT_OP(psraw, 7); - SHIFT_OP(psraw, 16); - SHIFT_OP(psllw, 7); - SHIFT_OP(psllw, 16); - - SHIFT_OP(psrld, 7); - SHIFT_OP(psrld, 32); - SHIFT_OP(psrad, 7); - SHIFT_OP(psrad, 32); - SHIFT_OP(pslld, 7); - SHIFT_OP(pslld, 32); - - SHIFT_OP(psrlq, 7); - SHIFT_OP(psrlq, 32); - SHIFT_OP(psllq, 7); - SHIFT_OP(psllq, 32); - - SHIFT_IM(psrldq, 16); - SHIFT_IM(psrldq, 7); - SHIFT_IM(pslldq, 16); - SHIFT_IM(pslldq, 7); - - MOVMSK(movmskps); - MOVMSK(movmskpd); - - /* FPU specific ops */ - - { - uint32_t mxcsr; - asm volatile("stmxcsr %0" : "=m" (mxcsr)); - printf("mxcsr=%08x\n", mxcsr & 0x1f80); - asm volatile("ldmxcsr %0" : : "m" (mxcsr)); - } - - test_sse_comi(2, -1); - test_sse_comi(2, 2); - test_sse_comi(2, 3); - test_sse_comi(2, q_nan.d); - test_sse_comi(q_nan.d, -1); - - for(i = 0; i < 2; i++) { - a.s[0] = 2.7; - a.s[1] = 3.4; - a.s[2] = 4; - a.s[3] = -6.3; - b.s[0] = 45.7; - b.s[1] = 353.4; - b.s[2] = 4; - b.s[3] = 56.3; - if (i == 1) { - a.s[0] = q_nan.d; - b.s[3] = q_nan.d; - } - - SSE_OPS(add); - SSE_OPS(mul); - SSE_OPS(sub); - SSE_OPS(min); - SSE_OPS(div); - SSE_OPS(max); - SSE_OPS(sqrt); - SSE_OPS(cmpeq); - SSE_OPS(cmplt); - SSE_OPS(cmple); - SSE_OPS(cmpunord); - SSE_OPS(cmpneq); - SSE_OPS(cmpnlt); - SSE_OPS(cmpnle); - SSE_OPS(cmpord); - - - a.d[0] = 2.7; - a.d[1] = -3.4; - b.d[0] = 45.7; - b.d[1] = -53.4; - if (i == 1) { - a.d[0] = q_nan.d; - b.d[1] = q_nan.d; - } - SSE_OPD(add); - SSE_OPD(mul); - SSE_OPD(sub); - SSE_OPD(min); - SSE_OPD(div); - SSE_OPD(max); - SSE_OPD(sqrt); - SSE_OPD(cmpeq); - SSE_OPD(cmplt); - SSE_OPD(cmple); - SSE_OPD(cmpunord); - SSE_OPD(cmpneq); - SSE_OPD(cmpnlt); - SSE_OPD(cmpnle); - SSE_OPD(cmpord); - } - - /* float to float/int */ - a.s[0] = 2.7; - a.s[1] = 3.4; - a.s[2] = 4; - a.s[3] = -6.3; - CVT_OP_XMM(cvtps2pd); - CVT_OP_XMM(cvtss2sd); - CVT_OP_XMM2MMX(cvtps2pi); - CVT_OP_XMM2MMX(cvttps2pi); - CVT_OP_XMM2REG(cvtss2si); - CVT_OP_XMM2REG(cvttss2si); - CVT_OP_XMM(cvtps2dq); - CVT_OP_XMM(cvttps2dq); - - a.d[0] = 2.6; - a.d[1] = -3.4; - CVT_OP_XMM(cvtpd2ps); - CVT_OP_XMM(cvtsd2ss); - CVT_OP_XMM2MMX(cvtpd2pi); - CVT_OP_XMM2MMX(cvttpd2pi); - CVT_OP_XMM2REG(cvtsd2si); - CVT_OP_XMM2REG(cvttsd2si); - CVT_OP_XMM(cvtpd2dq); - CVT_OP_XMM(cvttpd2dq); - - /* sse/mmx moves */ - CVT_OP_XMM2MMX(movdq2q); - CVT_OP_MMX2XMM(movq2dq); - - /* int to float */ - a.l[0] = -6; - a.l[1] = 2; - a.l[2] = 100; - a.l[3] = -60000; - CVT_OP_MMX2XMM(cvtpi2ps); - CVT_OP_MMX2XMM(cvtpi2pd); - CVT_OP_REG2XMM(cvtsi2ss); - CVT_OP_REG2XMM(cvtsi2sd); - CVT_OP_XMM(cvtdq2ps); - CVT_OP_XMM(cvtdq2pd); - - /* XXX: test PNI insns */ -#if 0 - SSE_OP2(movshdup); -#endif - asm volatile ("emms"); -} - -#endif - #define TEST_CONV_RAX(op)\ {\ unsigned long a, r;\ @@ -2756,9 +2187,5 @@ int main(int argc, char **argv) #endif test_enter(); test_conv(); -#ifdef TEST_SSE - test_sse(); - test_fxsave(); -#endif return 0; } diff --git a/tests/tcg/i386/test-mmx.c b/tests/tcg/i386/test-mmx.c new file mode 100644 index 0000000000..09e5d583ff --- /dev/null +++ b/tests/tcg/i386/test-mmx.c @@ -0,0 +1,315 @@ +#include +#include +#include +#include + +#ifndef TEST_FILE +#define TEST_FILE "test-mmx.h" +#endif +#ifndef EMMS +#define EMMS "emms" +#endif + +typedef void (*testfn)(void); + +typedef struct { + uint64_t q0, q1; +} __attribute__((aligned(16))) v2di; + +typedef struct { + uint64_t mm[8]; + v2di xmm[8]; + uint64_t r[16]; + uint64_t flags; + uint32_t ff; + uint64_t pad; + v2di mem[4]; + v2di mem0[4]; +} reg_state; + +typedef struct { + int n; + testfn fn; + const char *s; + reg_state *init; +} TestDef; + +reg_state initI; +reg_state initF32; +reg_state initF64; + +static void dump_mmx(int n, const uint64_t *r, int ff) +{ + if (ff == 32) { + float v[2]; + memcpy(v, r, sizeof(v)); + printf("MM%d = %016lx %8g %8g\n", n, *r, v[1], v[0]); + } else { + printf("MM%d = %016lx\n", n, *r); + } +} + +static void dump_xmm(const char *name, int n, const v2di *r, int ff) +{ + printf("%s%d = %016lx %016lx\n", + name, n, r->q1, r->q0); + if (ff == 32) { + float v[4]; + memcpy(v, r, sizeof(v)); + printf(" %8g %8g %8g %8g\n", + v[3], v[2], v[1], v[0]); + } +} + +static void dump_regs(reg_state *s, int ff) +{ + int i; + + for (i = 0; i < 8; i++) { + dump_mmx(i, &s->mm[i], ff); + } + for (i = 0; i < 4; i++) { + dump_xmm("mem", i, &s->mem0[i], 0); + } +} + +static void compare_state(const reg_state *a, const reg_state *b) +{ + int i; + for (i = 0; i < 8; i++) { + if (a->mm[i] != b->mm[i]) { + printf("MM%d = %016lx\n", i, b->mm[i]); + } + } + for (i = 0; i < 16; i++) { + if (a->r[i] != b->r[i]) { + printf("r%d = %016lx\n", i, b->r[i]); + } + } + for (i = 0; i < 8; i++) { + if (memcmp(&a->xmm[i], &b->xmm[i], 8)) { + dump_xmm("xmm", i, &b->xmm[i], a->ff); + } + } + for (i = 0; i < 4; i++) { + if (memcmp(&a->mem0[i], &a->mem[i], 16)) { + dump_xmm("mem", i, &a->mem[i], a->ff); + } + } + if (a->flags != b->flags) { + printf("FLAGS = %016lx\n", b->flags); + } +} + +#define LOADMM(r, o) "movq " #r ", " #o "[%0]\n\t" +#define LOADXMM(r, o) "movdqa " #r ", " #o "[%0]\n\t" +#define STOREMM(r, o) "movq " #o "[%1], " #r "\n\t" +#define STOREXMM(r, o) "movdqa " #o "[%1], " #r "\n\t" +#define MMREG(F) \ + F(mm0, 0x00) \ + F(mm1, 0x08) \ + F(mm2, 0x10) \ + F(mm3, 0x18) \ + F(mm4, 0x20) \ + F(mm5, 0x28) \ + F(mm6, 0x30) \ + F(mm7, 0x38) +#define XMMREG(F) \ + F(xmm0, 0x040) \ + F(xmm1, 0x050) \ + F(xmm2, 0x060) \ + F(xmm3, 0x070) \ + F(xmm4, 0x080) \ + F(xmm5, 0x090) \ + F(xmm6, 0x0a0) \ + F(xmm7, 0x0b0) +#define LOADREG(r, o) "mov " #r ", " #o "[rax]\n\t" +#define STOREREG(r, o) "mov " #o "[rax], " #r "\n\t" +#define REG(F) \ + F(rbx, 0xc8) \ + F(rcx, 0xd0) \ + F(rdx, 0xd8) \ + F(rsi, 0xe0) \ + F(rdi, 0xe8) \ + F(r8, 0x100) \ + F(r9, 0x108) \ + F(r10, 0x110) \ + F(r11, 0x118) \ + F(r12, 0x120) \ + F(r13, 0x128) \ + F(r14, 0x130) \ + F(r15, 0x138) \ + +static void run_test(const TestDef *t) +{ + reg_state result; + reg_state *init = t->init; + memcpy(init->mem, init->mem0, sizeof(init->mem)); + printf("%5d %s\n", t->n, t->s); + asm volatile( + MMREG(LOADMM) + XMMREG(LOADXMM) + "sub rsp, 128\n\t" + "push rax\n\t" + "push rbx\n\t" + "push rcx\n\t" + "push rdx\n\t" + "push %1\n\t" + "push %2\n\t" + "mov rax, %0\n\t" + "pushf\n\t" + "pop rbx\n\t" + "shr rbx, 8\n\t" + "shl rbx, 8\n\t" + "mov rcx, 0x140[rax]\n\t" + "and rcx, 0xff\n\t" + "or rbx, rcx\n\t" + "push rbx\n\t" + "popf\n\t" + REG(LOADREG) + "mov rax, 0xc0[rax]\n\t" + "call [rsp]\n\t" + "mov [rsp], rax\n\t" + "mov rax, 8[rsp]\n\t" + REG(STOREREG) + "mov rbx, [rsp]\n\t" + "mov 0xc0[rax], rbx\n\t" + "mov rbx, 0\n\t" + "mov 0xf0[rax], rbx\n\t" + "mov 0xf8[rax], rbx\n\t" + "pushf\n\t" + "pop rbx\n\t" + "and rbx, 0xff\n\t" + "mov 0x140[rax], rbx\n\t" + "add rsp, 16\n\t" + "pop rdx\n\t" + "pop rcx\n\t" + "pop rbx\n\t" + "pop rax\n\t" + "add rsp, 128\n\t" + MMREG(STOREMM) + EMMS "\n\t" + XMMREG(STOREXMM) + : : "r"(init), "r"(&result), "r"(t->fn) + : "memory", "cc", + "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", + "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", + "xmm12", "xmm13", "xmm14", "xmm15" + ); + compare_state(init, &result); +} + +#define TEST(n, cmd, type) \ +static void __attribute__((naked)) test_##n(void) \ +{ \ + asm volatile(cmd); \ + asm volatile("ret"); \ +} +#include TEST_FILE + + +static const TestDef test_table[] = { +#define TEST(n, cmd, type) {n, test_##n, cmd, &init##type}, +#include TEST_FILE + {-1, NULL, "", NULL} +}; + +static void run_all(void) +{ + const TestDef *t; + for (t = test_table; t->fn; t++) { + run_test(t); + } +} + +#define ARRAY_LEN(x) (sizeof(x) / sizeof(x[0])) + +float val_f32[] = {2.0, -1.0, 4.8, 0.8, 3, -42.0, 5e6, 7.5, 8.3}; +uint64_t val_i64[] = { + 0x3d6b3b6a9e4118f2lu, 0x355ae76d2774d78clu, + 0xd851c54a56bf1f29lu, 0x4a84d1d50bf4c4fflu, + 0x5826475e2c5fd799lu, 0xfd32edc01243f5e9lu, +}; + +v2di deadbeef = {0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull}; + +void init_f32reg(uint64_t *r) +{ + static int n; + float v[2]; + int i; + for (i = 0; i < 2; i++) { + v[i] = val_f32[n++]; + if (n == ARRAY_LEN(val_f32)) { + n = 0; + } + } + memcpy(r, v, sizeof(*r)); +} + +void init_intreg(uint64_t *r) +{ + static uint64_t mask; + static int n; + + *r = val_i64[n] ^ mask; + n++; + if (n == ARRAY_LEN(val_i64)) { + n = 0; + mask *= 0x104C11DB7; + } +} + +static void init_all(reg_state *s) +{ + int i; + + for (i = 0; i < 16; i++) { + init_intreg(&s->r[i]); + } + s->r[3] = (uint64_t)&s->mem[0]; /* rdx */ + s->r[5] = (uint64_t)&s->mem[2]; /* rdi */ + s->r[6] = 0; + s->r[7] = 0; + s->flags = 2; + for (i = 0; i < 8; i++) { + s->xmm[i] = deadbeef; + memcpy(&s->mm[i], &s->xmm[i], sizeof(s->mm[i])); + } + for (i = 0; i < 2; i++) { + s->mem0[i] = deadbeef; + } +} + +int main(int argc, char *argv[]) +{ + init_all(&initI); + init_intreg(&initI.mm[5]); + init_intreg(&initI.mm[6]); + init_intreg(&initI.mm[7]); + init_intreg(&initI.mem0[1].q0); + init_intreg(&initI.mem0[1].q1); + printf("Int:\n"); + dump_regs(&initI, 0); + + init_all(&initF32); + init_f32reg(&initF32.mm[5]); + init_f32reg(&initF32.mm[6]); + init_f32reg(&initF32.mm[7]); + init_f32reg(&initF32.mem0[1].q0); + init_f32reg(&initF32.mem0[1].q1); + initF32.ff = 32; + printf("F32:\n"); + dump_regs(&initF32, 32); + + if (argc > 1) { + int n = atoi(argv[1]); + run_test(&test_table[n]); + } else { + run_all(); + } + return 0; +} diff --git a/tests/tcg/i386/test-mmx.py b/tests/tcg/i386/test-mmx.py new file mode 100755 index 0000000000..392315e176 --- /dev/null +++ b/tests/tcg/i386/test-mmx.py @@ -0,0 +1,244 @@ +#! /usr/bin/env python3 + +# Generate test-avx.h from x86.csv + +import csv +import sys +from fnmatch import fnmatch + +ignore = set(["EMMS", "FEMMS", "FISTTP", + "LDMXCSR", "VLDMXCSR", "STMXCSR", "VSTMXCSR"]) + +imask = { + 'PALIGNR': 0x3f, + 'PEXTRB': 0x0f, + 'PEXTRW': 0x07, + 'PEXTRD': 0x03, + 'PEXTRQ': 0x01, + 'PINSRB': 0x0f, + 'PINSRW': 0x07, + 'PINSRD': 0x03, + 'PINSRQ': 0x01, + 'PSHUF[DW]': 0xff, + 'PSHUF[LH]W': 0xff, + 'PS[LR][AL][WDQ]': 0x3f, +} + +def strip_comments(x): + for l in x: + if l != '' and l[0] != '#': + yield l + +def reg_w(w): + if w == 8: + return 'al' + elif w == 16: + return 'ax' + elif w == 32: + return 'eax' + elif w == 64: + return 'rax' + raise Exception("bad reg_w %d" % w) + +def mem_w(w): + if w == 8: + t = "BYTE" + elif w == 16: + t = "WORD" + elif w == 32: + t = "DWORD" + elif w == 64: + t = "QWORD" + else: + raise Exception() + + return t + " PTR 32[rdx]" + +class MMArg(): + isxmm = True + + def __init__(self, mw): + if mw not in [0, 32, 64]: + raise Exception("Bad /m width: %s" % w) + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return "mm%d" % (n, ) + +def match(op, pattern): + return fnmatch(op, pattern) + +class ArgImm8u(): + isxmm = False + ismem = False + def __init__(self, op): + for k, v in imask.items(): + if match(op, k): + self.mask = imask[k]; + return + raise Exception("Unknown immediate") + def vals(self): + mask = self.mask + yield 0 + n = 0 + while n != mask: + n += 1 + while (n & ~mask) != 0: + n += (n & ~mask) + yield n + +class ArgRM(): + isxmm = False + def __init__(self, rw, mw): + if rw not in [8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + if mw not in [0, 8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + self.rw = rw + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return reg_w(self.rw) + +class ArgMem(): + isxmm = False + ismem = True + def __init__(self, w): + if w not in [8, 16, 32, 64, 128, 256]: + raise Exception("Bad mem width: %s" % w) + self.w = w + def regstr(self, n): + return mem_w(self.w) + +class SkipInstruction(Exception): + pass + +def ArgGenerator(arg, op): + if arg[:2] == 'mm': + if "/" in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + return MMArg(int(m[1:])); + else: + return MMArg(0); + elif arg[:4] == 'imm8': + return ArgImm8u(op); + elif arg[0] == 'r': + if '/m' in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + mw = int(m[1:]) + if r == 'r': + rw = mw + else: + rw = int(r[1:]) + return ArgRM(rw, mw) + + return ArgRM(int(arg[1:]), 0); + elif arg[0] == 'm': + return ArgMem(int(arg[1:])) + else: + raise SkipInstruction + +class InsnGenerator: + def __init__(self, op, args): + self.op = op + if op[0:2] == "PF": + self.optype = 'F32' + else: + self.optype = 'I' + + try: + self.args = list(ArgGenerator(a, op) for a in args) + if len(self.args) > 0 and self.args[-1] is None: + self.args = self.args[:-1] + except SkipInstruction: + raise + except Exception as e: + raise Exception("Bad arg %s: %s" % (op, e)) + + def gen(self): + regs = (5, 6, 7) + dest = 4 + + nreg = len(self.args) + if nreg == 0: + yield self.op + return + if isinstance(self.args[-1], ArgImm8u): + nreg -= 1 + immarg = self.args[-1] + else: + immarg = None + memarg = -1 + for n, arg in enumerate(self.args): + if arg.ismem: + memarg = n + + if nreg == 1: + regset = [(regs[0],)] + if memarg == 0: + regset += [(-1,)] + elif nreg == 2: + regset = [ + (regs[0], regs[1]), + (regs[0], regs[0]), + ] + if memarg == 0: + regset += [(-1, regs[0])] + elif memarg == 1: + regset += [(dest, -1)] + else: + raise Exception("Too many regs: %s(%d)" % (self.op, nreg)) + + for regv in regset: + argstr = [] + for i in range(nreg): + arg = self.args[i] + argstr.append(arg.regstr(regv[i])) + if immarg is None: + yield self.op + ' ' + ','.join(argstr) + else: + for immval in immarg.vals(): + yield self.op + ' ' + ','.join(argstr) + ',' + str(immval) + +def split0(s): + if s == '': + return [] + return s.split(',') + +def main(): + n = 0 + if len(sys.argv) <= 3: + print("Usage: test-mmx.py x86.csv test-mmx.h CPUID...") + exit(1) + csvfile = open(sys.argv[1], 'r', newline='') + archs = sys.argv[3:] + with open(sys.argv[2], "w") as outf: + outf.write("// Generated by test-mmx.py. Do not edit.\n") + for row in csv.reader(strip_comments(csvfile)): + insn = row[0].replace(',', '').split() + if insn[0] in ignore: + continue + cpuid = row[6] + if cpuid in archs: + try: + g = InsnGenerator(insn[0], insn[1:]) + for insn in g.gen(): + outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) + n += 1 + except SkipInstruction: + pass + outf.write("#undef TEST\n") + csvfile.close() + +if __name__ == "__main__": + main() diff --git a/tests/tcg/i386/x86.csv b/tests/tcg/i386/x86.csv new file mode 100644 index 0000000000..c43bf42dd3 --- /dev/null +++ b/tests/tcg/i386/x86.csv @@ -0,0 +1,4658 @@ +# x86 instruction set description version 0.2x, 2018-05-08 +# +# https://golang.org/x/arch/x86 +# +# The latest version of the CSV file is +# available online at https://golang.org/s/x86.csv. +# +# This file contains a block of comment lines, each beginning with #, +# followed by entries in CSV format. All the # comments are at the top +# of the file, so a reader can skip past the comments and hand the +# rest of the file to a standard CSV reader. +# Each CSV line contains these fields: +# +# 1. The Intel manual instruction mnemonic. For example, "SHR r/m32, imm8". +# +# 2. The Go assembler instruction mnemonic. For example, "SHRL imm8, r/m32". +# +# 3. The GNU binutils instruction mnemonic. For example, "shrl imm8, r/m32". +# +# 4. The instruction encoding. For example, "C1 /4 ib". +# +# 5. The validity of the instruction in 32-bit (aka compatiblity, legacy) mode. +# +# 6. The validity of the instruction in 64-bit mode. +# +# 7. The CPUID feature flags that signal support for the instruction. +# +# 8. Additional comma-separated tags containing hints about the instruction. +# +# 9. The read/write actions of the instruction on the arguments used in +# the Intel mnemonic. For example, "rw,r" to denote that "SHR r/m32, imm8" +# reads and writes its first argument but only reads its second argument. +# +# 10. Whether the opcode used in the Intel mnemonic has encoding forms +# distinguished only by operand size, like most arithmetic instructions. +# The string "Y" indicates yes, the string "" indicates no. +# +# 11. The data size of the operation in bits. In general this is the size corresponding +# to the Go and GNU assembler opcode suffix. +# Mnemonics (the opcode string) +# +# The instruction mnemonics are as used in the Intel manual, with a few exceptions. +# +# Mnemonics claiming general memory forms but that really require fixed addressing modes +# are omitted in favor of their equivalents with implicit arguments.. +# For example, "CMPS m16, m16" (really CMPS [SI], [DI]) is omitted in favor of "CMPSW". +# +# Instruction forms with an explicit REP, REPE, or REPNE prefix are also omitted. +# Encoders and decoders are expected to handle those prefixes separately. +# +# Perhaps most significantly, the argument syntaxes used in the mnemonic indicate +# exactly how to derive the argument from the instruction encoding, or vice versa. +# +# Immediate values: imm8, imm8u, imm16, imm16u, imm32, imm64. +# Immediates are signed by default; the u suffixes indicates an unsigned value. +# Immediates may have bitfield-like modifier that specifies how much bits +# are used. For example, imm8u:4 is encoded like 8bit immediate, +# but only 4bits are meaningful while the others are ignored or must be 0. +# +# Memory operands. The forms m, m128, m14/28byte, m16, m16&16, m16&32, m16&64, m16:16, m16:32, +# m16:64, m16int, m256, m2byte, m32, m32&32, m32fp, m32int, m512byte, m64, m64fp, m64int, +# m8, m80bcd, m80dec, m80fp, m94/108byte. These operands always correspond to the +# memory address specified by the r/m half of the modrm encoding. +# +# Integer registers. +# The forms r8, r16, r32, r64 indicate a register selected by the modrm reg encoding. +# The forms rmr16, rmr32, rmr64 indicate a register (never memory) selected by the modrm r/m encoding. +# The forms r/m8, r/m16, r/m32, and r/m64 indicate a register or memory selected by the modrm r/m encoding. +# Forms with two sizes, like r32/m16 also indicate a register or memory selected by the modrm r/m encodng, +# but the size for a register argument differs from the size of a memory argument. +# The forms r8V, r16V, r32V, r64V indicate a register selected by the VEX.vvvv bits. +# +# Multimedia registers. +# The forms mm1, xmm1, and ymm1 indicate a multimedia register selected by the +# modrm reg encoding. +# The forms mm2, xmm2, and ymm2 indicate a register (never memory) selected by +# the modrm r/m encoding. +# The forms mm2/m64, xmm2/m128, and so on indicate a register or memory +# selected by the modrm r/m encoding. +# The forms xmmV and ymmV indicate a register selected by the VEX.vvvv bits. +# The forms xmmI and ymmI indicate a register selected by the top four bits of an /is4 immediate byte. +# +# Bound registers. +# The form bnd1 indicates a bound register selected by the modrm reg encoding. +# The form bnd2 indicates a bound register (never memory) selected by the modrm r/m encoding. +# The forms bnd2/m64 and bnd2/m128 indicate a register or memorys selected by the modrm r/m encoding. +# TODO: Describe mib. +# +# One-of-a-kind operands: rel8, rel16, rel32, ptr16:16, ptr16:32, +# moffs8, moffs16, moffs32, moffs64, vm32x, vm32y, vm64x, and vm64y +# are all as in the Intel manual. +# +# Encodings +# +# The encodings are also as used in the Intel manual, with automated corrections. +# For example, the Intel manual sometimes omits the modrm /r indicator or other trailing bytes, +# and it also contains typographical errors. +# These problems are corrected so that the CSV data may be used to generate +# tools for processing x86 machine code. +# See https://golang.org/x/arch/x86/x86map for one such generator. +# +# Valid32 and Valid64 +# +# These columns hold validity abbreviations as defined in the Intel manual: +# V, I, N.E., N.P., N.S., or N.I. +# Tools processing the data are typically only concerned with whether the +# column is "V" (valid) or not. +# This data is also corrected compared to the manual. +# For example, the manual lists many instruction forms using REX bytes +# with an incorrect "V" in the Valid32 column. +# +# CPUID Feature Flags +# +# This column specifies CPUID feature flags that must be present in order +# to use the instruction. If multiple flags are required, +# they are listed separated by plus signs, as in PCLMULQDQ+AVX. +# The column can also list one of the values 486, Pentium, PentiumII, and P6, +# indicating that the instruction was introduced on that architecture version. +# +# Tags +# +# The tag column does not correspond to a traditional column in the Intel manual tables. +# Instead, it is itself a comma-separated list of tags or hints derived by analysis +# of the instruction set or the instruction encodings. +# +# The tags address16, address32, and address64 indicate that the instruction form +# applies when using the specified addressing size. It may therefore be necessary to use an +# address size prefix byte to access the instruction. +# If two address tags are listed, the instruction can be used with either of those +# address sizes. An instruction will never list all three address sizes. +# (In fact, today, no instruction lists two address sizes, but that may change.) +# +# The tags operand16, operand32, and operand64 indicate that the instruction form +# applies when using the specified operand size. It may therefore be necessary to use an +# operand size prefix byte to access the instruction. +# If two operand tags are listed, the instruction can be used with either of those +# operand sizes. An instruction will never list all three operand sizes. +# For some instructions, default64 is used instead of operand64, +# which specifies data promotion to 64-bit. +# For instructions with different possible data sizes, +# it also describes that default data size is 64-bit instead of 32-bit. +# Using refining prefix like 0x66 will lead to 32-bit operation (if supported). +# +# The tags modrm_regonly or modrm_memonly indicate that the modrm byte's +# r/m encoding must specify a register or memory, respectively. +# Especially in newer instructions, the modrm constraint may be the only way +# to distinguish two instruction forms. For example the MOVHLPS and MOVLPS +# instructions share the same encoding, except that the former requires the +# modrm byte's r/m to indicate a register, while the latter requires it to indicate memory. +# +# The tags pseudo and pseudo64 indicate that this instruction form is redundant +# with others listed in the table and should be ignored when generating disassembly +# or instruction scanning programs. The pseudo64 tag is reserved for the case where +# the manual lists an instruction twice, once with the optional 64-bit mode REX byte. +# Since most decoders will handle the REX byte separately, the form with the +# unnecessary REX is tagged pseudo64. +# +# The amd tag marks AMD-specific instructions. +# As an example, all instructions of SSE4a have such tag. +# +# The AVX512-specific tags: scaleX and bscaleX. +# scale1, scale2, scale4, scale8, scale16, scale32, scale64 specify +# the compressed displacement multiplier (scaling). +# For example, if displacement is 128 and scale32 is set, +# disp8 value should be calculated as 128/32. +# bscale4 and bscale8 have the same meaning, but are used +# when instruction uses embedded broadcast feature. +# If instruction does not have bscaleX tag, it does not support EVEX broadcasting. +# +# Related packages (can be a good source of additional documentation): +# x86csv - read and manipulate x86.csv +# x86spec - x86.csv generator +# x86map - x86asm table generator based on x86.csv +# x86avxgen - cmd/internal/obj/x86 optab generator based x86.csv +# All listed packages are located at golang.org/x/arch/x86/. +"PUSH imm32","-/PUSHL/PUSHQ imm32","-/pushl/pushq imm32","68 id","V","N.S.","","operand32","r","Y","" +"PUSH imm32","-/PUSHL/PUSHQ imm32","-/pushl/pushq imm32","68 id","N.S.","V","","default64","r","Y","" +"AAA","AAA","aaa","37","V","N.S.","","","","","" +"AAD","AAD","aad","D5 0A","V","I","","pseudo","","","" +"AAD imm8u","AAD imm8u","aad imm8u","D5 ib","V","N.S.","","","r","","" +"AAM","AAM","aam","D4 0A","V","I","","pseudo","","","" +"AAM imm8u","AAM imm8u","aam imm8u","D4 ib","V","N.S.","","","r","","" +"AAS","AAS","aas","3F","V","N.S.","","","","","" +"ADC AL, imm8","ADCB imm8, AL","adcb imm8, AL","14 ib","V","V","","","rw,r","Y","8" +"ADC r/m8, imm8","ADCB imm8, r/m8","adcb imm8, r/m8","80 /2 ib","V","V","","","rw,r","Y","8" +"ADC r/m8, imm8","ADCB imm8, r/m8","adcb imm8, r/m8","82 /2 ib","V","N.S.","","","rw,r","Y","8" +"ADC r/m8, imm8","ADCB imm8, r/m8","adcb imm8, r/m8","REX 80 /2 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"ADC r8, r/m8","ADCB r/m8, r8","adcb r/m8, r8","12 /r","V","V","","","rw,r","Y","8" +"ADC r8, r/m8","ADCB r/m8, r8","adcb r/m8, r8","REX 12 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"ADC r/m8, r8","ADCB r8, r/m8","adcb r8, r/m8","10 /r","V","V","","","rw,r","Y","8" +"ADC r/m8, r8","ADCB r8, r/m8","adcb r8, r/m8","REX 10 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"ADC EAX, imm32","ADCL imm32, EAX","adcl imm32, EAX","15 id","V","V","","operand32","rw,r","Y","32" +"ADC r/m32, imm32","ADCL imm32, r/m32","adcl imm32, r/m32","81 /2 id","V","V","","operand32","rw,r","Y","32" +"ADC r/m32, imm8","ADCL imm8, r/m32","adcl imm8, r/m32","83 /2 ib","V","V","","operand32","rw,r","Y","32" +"ADC r32, r/m32","ADCL r/m32, r32","adcl r/m32, r32","13 /r","V","V","","operand32","rw,r","Y","32" +"ADC r/m32, r32","ADCL r32, r/m32","adcl r32, r/m32","11 /r","V","V","","operand32","rw,r","Y","32" +"ADC RAX, imm32","ADCQ imm32, RAX","adcq imm32, RAX","REX.W 15 id","N.S.","V","","","rw,r","Y","64" +"ADC r/m64, imm32","ADCQ imm32, r/m64","adcq imm32, r/m64","REX.W 81 /2 id","N.S.","V","","","rw,r","Y","64" +"ADC r/m64, imm8","ADCQ imm8, r/m64","adcq imm8, r/m64","REX.W 83 /2 ib","N.S.","V","","","rw,r","Y","64" +"ADC r64, r/m64","ADCQ r/m64, r64","adcq r/m64, r64","REX.W 13 /r","N.S.","V","","","rw,r","Y","64" +"ADC r/m64, r64","ADCQ r64, r/m64","adcq r64, r/m64","REX.W 11 /r","N.S.","V","","","rw,r","Y","64" +"ADC AX, imm16","ADCW imm16, AX","adcw imm16, AX","15 iw","V","V","","operand16","rw,r","Y","16" +"ADC r/m16, imm16","ADCW imm16, r/m16","adcw imm16, r/m16","81 /2 iw","V","V","","operand16","rw,r","Y","16" +"ADC r/m16, imm8","ADCW imm8, r/m16","adcw imm8, r/m16","83 /2 ib","V","V","","operand16","rw,r","Y","16" +"ADC r16, r/m16","ADCW r/m16, r16","adcw r/m16, r16","13 /r","V","V","","operand16","rw,r","Y","16" +"ADC r/m16, r16","ADCW r16, r/m16","adcw r16, r/m16","11 /r","V","V","","operand16","rw,r","Y","16" +"ADCX r32, r/m32","ADCXL r/m32, r32","adcxl r/m32, r32","66 0F 38 F6 /r","V","V","ADX","operand16,operand32","rw,r","Y","32" +"ADCX r64, r/m64","ADCXQ r/m64, r64","adcxq r/m64, r64","66 REX.W 0F 38 F6 /r","N.S.","V","ADX","","rw,r","Y","64" +"ADD AL, imm8","ADDB imm8, AL","addb imm8, AL","04 ib","V","V","","","rw,r","Y","8" +"ADD r/m8, imm8","ADDB imm8, r/m8","addb imm8, r/m8","80 /0 ib","V","V","","","rw,r","Y","8" +"ADD r/m8, imm8","ADDB imm8, r/m8","addb imm8, r/m8","82 /0 ib","V","N.S.","","","rw,r","Y","8" +"ADD r/m8, imm8","ADDB imm8, r/m8","addb imm8, r/m8","REX 80 /0 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"ADD r8, r/m8","ADDB r/m8, r8","addb r/m8, r8","02 /r","V","V","","","rw,r","Y","8" +"ADD r8, r/m8","ADDB r/m8, r8","addb r/m8, r8","REX 02 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"ADD r/m8, r8","ADDB r8, r/m8","addb r8, r/m8","00 /r","V","V","","","rw,r","Y","8" +"ADD r/m8, r8","ADDB r8, r/m8","addb r8, r/m8","REX 00 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"ADD EAX, imm32","ADDL imm32, EAX","addl imm32, EAX","05 id","V","V","","operand32","rw,r","Y","32" +"ADD r/m32, imm32","ADDL imm32, r/m32","addl imm32, r/m32","81 /0 id","V","V","","operand32","rw,r","Y","32" +"ADD r/m32, imm8","ADDL imm8, r/m32","addl imm8, r/m32","83 /0 ib","V","V","","operand32","rw,r","Y","32" +"ADD r32, r/m32","ADDL r/m32, r32","addl r/m32, r32","03 /r","V","V","","operand32","rw,r","Y","32" +"ADD r/m32, r32","ADDL r32, r/m32","addl r32, r/m32","01 /r","V","V","","operand32","rw,r","Y","32" +"ADDPD xmm1, xmm2/m128","ADDPD xmm2/m128, xmm1","addpd xmm2/m128, xmm1","66 0F 58 /r","V","V","SSE2","","rw,r","","" +"ADDPS xmm1, xmm2/m128","ADDPS xmm2/m128, xmm1","addps xmm2/m128, xmm1","0F 58 /r","V","V","SSE","","rw,r","","" +"ADD RAX, imm32","ADDQ imm32, RAX","addq imm32, RAX","REX.W 05 id","N.S.","V","","","rw,r","Y","64" +"ADD r/m64, imm32","ADDQ imm32, r/m64","addq imm32, r/m64","REX.W 81 /0 id","N.S.","V","","","rw,r","Y","64" +"ADD r/m64, imm8","ADDQ imm8, r/m64","addq imm8, r/m64","REX.W 83 /0 ib","N.S.","V","","","rw,r","Y","64" +"ADD r64, r/m64","ADDQ r/m64, r64","addq r/m64, r64","REX.W 03 /r","N.S.","V","","","rw,r","Y","64" +"ADD r/m64, r64","ADDQ r64, r/m64","addq r64, r/m64","REX.W 01 /r","N.S.","V","","","rw,r","Y","64" +"ADDSD xmm1, xmm2/m64","ADDSD xmm2/m64, xmm1","addsd xmm2/m64, xmm1","F2 0F 58 /r","V","V","SSE2","","rw,r","","" +"ADDSS xmm1, xmm2/m32","ADDSS xmm2/m32, xmm1","addss xmm2/m32, xmm1","F3 0F 58 /r","V","V","SSE","","rw,r","","" +"ADDSUBPD xmm1, xmm2/m128","ADDSUBPD xmm2/m128, xmm1","addsubpd xmm2/m128, xmm1","66 0F D0 /r","V","V","SSE3","","rw,r","","" +"ADDSUBPS xmm1, xmm2/m128","ADDSUBPS xmm2/m128, xmm1","addsubps xmm2/m128, xmm1","F2 0F D0 /r","V","V","SSE3","","rw,r","","" +"ADD AX, imm16","ADDW imm16, AX","addw imm16, AX","05 iw","V","V","","operand16","rw,r","Y","16" +"ADD r/m16, imm16","ADDW imm16, r/m16","addw imm16, r/m16","81 /0 iw","V","V","","operand16","rw,r","Y","16" +"ADD r/m16, imm8","ADDW imm8, r/m16","addw imm8, r/m16","83 /0 ib","V","V","","operand16","rw,r","Y","16" +"ADD r16, r/m16","ADDW r/m16, r16","addw r/m16, r16","03 /r","V","V","","operand16","rw,r","Y","16" +"ADD r/m16, r16","ADDW r16, r/m16","addw r16, r/m16","01 /r","V","V","","operand16","rw,r","Y","16" +"ADOX r32, r/m32","ADOXL r/m32, r32","adoxl r/m32, r32","F3 0F 38 F6 /r","V","V","ADX","operand16,operand32","rw,r","Y","32" +"ADOX r64, r/m64","ADOXQ r/m64, r64","adoxq r/m64, r64","F3 REX.W 0F 38 F6 /r","N.S.","V","ADX","","rw,r","Y","64" +"AESDEC xmm1, xmm2/m128","AESDEC xmm2/m128, xmm1","aesdec xmm2/m128, xmm1","66 0F 38 DE /r","V","V","AES","","rw,r","","" +"AESDECLAST xmm1, xmm2/m128","AESDECLAST xmm2/m128, xmm1","aesdeclast xmm2/m128, xmm1","66 0F 38 DF /r","V","V","AES","","rw,r","","" +"AESENC xmm1, xmm2/m128","AESENC xmm2/m128, xmm1","aesenc xmm2/m128, xmm1","66 0F 38 DC /r","V","V","AES","","rw,r","","" +"AESENCLAST xmm1, xmm2/m128","AESENCLAST xmm2/m128, xmm1","aesenclast xmm2/m128, xmm1","66 0F 38 DD /r","V","V","AES","","rw,r","","" +"AESIMC xmm1, xmm2/m128","AESIMC xmm2/m128, xmm1","aesimc xmm2/m128, xmm1","66 0F 38 DB /r","V","V","AES","","w,r","","" +"AESKEYGENASSIST xmm1, xmm2/m128, imm8u","AESKEYGENASSIST imm8u, xmm2/m128, xmm1","aeskeygenassist imm8u, xmm2/m128, xmm1","66 0F 3A DF /r ib","V","V","AES","","w,r,r","","" +"AND AL, imm8","ANDB imm8, AL","andb imm8, AL","24 ib","V","V","","","rw,r","Y","8" +"AND r/m8, imm8","ANDB imm8, r/m8","andb imm8, r/m8","REX 80 /4 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"AND r/m8, imm8u","ANDB imm8u, r/m8","andb imm8u, r/m8","80 /4 ib","V","V","","","rw,r","Y","8" +"AND r/m8, imm8u","ANDB imm8u, r/m8","andb imm8u, r/m8","82 /4 ib","V","N.S.","","","rw,r","Y","8" +"AND r8, r/m8","ANDB r/m8, r8","andb r/m8, r8","22 /r","V","V","","","rw,r","Y","8" +"AND r8, r/m8","ANDB r/m8, r8","andb r/m8, r8","REX 22 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"AND r/m8, r8","ANDB r8, r/m8","andb r8, r/m8","20 /r","V","V","","","rw,r","Y","8" +"AND r/m8, r8","ANDB r8, r/m8","andb r8, r/m8","REX 20 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"AND EAX, imm32","ANDL imm32, EAX","andl imm32, EAX","25 id","V","V","","operand32","rw,r","Y","32" +"AND r/m32, imm32","ANDL imm32, r/m32","andl imm32, r/m32","81 /4 id","V","V","","operand32","rw,r","Y","32" +"AND r/m32, imm8","ANDL imm8, r/m32","andl imm8, r/m32","83 /4 ib","V","V","","operand32","rw,r","Y","32" +"AND r32, r/m32","ANDL r/m32, r32","andl r/m32, r32","23 /r","V","V","","operand32","rw,r","Y","32" +"AND r/m32, r32","ANDL r32, r/m32","andl r32, r/m32","21 /r","V","V","","operand32","rw,r","Y","32" +"ANDN r32, r32V, r/m32","ANDNL r/m32, r32V, r32","andnl r/m32, r32V, r32","VEX.DDS.128.0F38.W0 F2 /r","V","V","BMI1","","rw,r,r","Y","32" +"ANDNPD xmm1, xmm2/m128","ANDNPD xmm2/m128, xmm1","andnpd xmm2/m128, xmm1","66 0F 55 /r","V","V","SSE2","","rw,r","","" +"ANDNPS xmm1, xmm2/m128","ANDNPS xmm2/m128, xmm1","andnps xmm2/m128, xmm1","0F 55 /r","V","V","SSE","","rw,r","","" +"ANDN r64, r64V, r/m64","ANDNQ r/m64, r64V, r64","andnq r/m64, r64V, r64","VEX.DDS.128.0F38.W1 F2 /r","N.S.","V","BMI1","","rw,r,r","Y","64" +"ANDPD xmm1, xmm2/m128","ANDPD xmm2/m128, xmm1","andpd xmm2/m128, xmm1","66 0F 54 /r","V","V","SSE2","","rw,r","","" +"ANDPS xmm1, xmm2/m128","ANDPS xmm2/m128, xmm1","andps xmm2/m128, xmm1","0F 54 /r","V","V","SSE","","rw,r","","" +"AND RAX, imm32","ANDQ imm32, RAX","andq imm32, RAX","REX.W 25 id","N.S.","V","","","rw,r","Y","64" +"AND r/m64, imm32","ANDQ imm32, r/m64","andq imm32, r/m64","REX.W 81 /4 id","N.S.","V","","","rw,r","Y","64" +"AND r/m64, imm8","ANDQ imm8, r/m64","andq imm8, r/m64","REX.W 83 /4 ib","N.S.","V","","","rw,r","Y","64" +"AND r64, r/m64","ANDQ r/m64, r64","andq r/m64, r64","REX.W 23 /r","N.S.","V","","","rw,r","Y","64" +"AND r/m64, r64","ANDQ r64, r/m64","andq r64, r/m64","REX.W 21 /r","N.S.","V","","","rw,r","Y","64" +"AND AX, imm16","ANDW imm16, AX","andw imm16, AX","25 iw","V","V","","operand16","rw,r","Y","16" +"AND r/m16, imm16","ANDW imm16, r/m16","andw imm16, r/m16","81 /4 iw","V","V","","operand16","rw,r","Y","16" +"AND r/m16, imm8","ANDW imm8, r/m16","andw imm8, r/m16","83 /4 ib","V","V","","operand16","rw,r","Y","16" +"AND r16, r/m16","ANDW r/m16, r16","andw r/m16, r16","23 /r","V","V","","operand16","rw,r","Y","16" +"AND r/m16, r16","ANDW r16, r/m16","andw r16, r/m16","21 /r","V","V","","operand16","rw,r","Y","16" +"ARPL r/m16, r16","ARPL r16, r/m16","arpl r16, r/m16","63 /r","V","N.S.","","","rw,r","","" +"BEXTR r32, r/m32, r32V","BEXTRL r32V, r/m32, r32","bextrl r32V, r/m32, r32","VEX.NDS.128.0F38.W0 F7 /r","V","V","BMI1","","w,r,r","Y","32" +"BEXTR r64, r/m64, r64V","BEXTRQ r64V, r/m64, r64","bextrq r64V, r/m64, r64","VEX.NDS.128.0F38.W1 F7 /r","N.S.","V","BMI1","","w,r,r","Y","64" +"BEXTR_XOP r32, r/m32, imm32u","BEXTR_XOPL imm32u, r/m32, r32","bextr_xopl imm32u, r/m32, r32","XOP.128.0A.WIG 10 /r","V","V","TBM","amd,operand16,operand32","w,r,r","Y","32" +"BEXTR_XOP r64, r/m64, imm32u","BEXTR_XOPQ imm32u, r/m64, r64","bextr_xopq imm32u, r/m64, r64","XOP.128.0A.WIG 10 /r","N.S.","V","TBM","amd,operand64","w,r,r","Y","64" +"BLCFILL r32V, r/m32","BLCFILLL r/m32, r32V","blcfill r/m32, r32V","XOP.NDD.128.09.WIG 01 /1","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLCFILL r64V, r/m64","BLCFILLQ r/m64, r64V","blcfill r/m64, r64V","XOP.NDD.128.09.W1 01 /1","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLCIC r32V, r/m32","BLCICL r/m32, r32V","blcicl r/m32, r32V","XOP.NDD.128.09.WIG 01 /5","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLCIC r64V, r/m64","BLCICQ r/m64, r64V","blcicq r/m64, r64V","XOP.NDD.128.09.WIG 01 /5","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLCI r32V, r/m32","BLCIL r/m32, r32V","blcil r/m32, r32V","XOP.NDD.128.09.WIG 02 /6","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLCI r64V, r/m64","BLCIQ r/m64, r64V","blciq r/m64, r64V","XOP.NDD.128.09.WIG 02 /6","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLCMSK r32V, r/m32","BLCMSKL r/m32, r32V","blcmskl r/m32, r32V","XOP.NDD.128.09.WIG 02 /1","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLCMSK r64V, r/m64","BLCMSKQ r/m64, r64V","blcmskq r/m64, r64V","XOP.NDD.128.09.WIG 02 /1","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLCS r32V, r/m32","BLCSL r/m32, r32V","blcsl r/m32, r32V","XOP.NDD.128.09.WIG 01 /3","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLCS r64V, r/m64","BLCSQ r/m64, r64V","blcsq r/m64, r64V","XOP.NDD.128.09.WIG 01 /3","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLENDPD xmm1, xmm2/m128, imm8u","BLENDPD imm8u, xmm2/m128, xmm1","blendpd imm8u, xmm2/m128, xmm1","66 0F 3A 0D /r ib","V","V","SSE4_1","","rw,r,r","","" +"BLENDPS xmm1, xmm2/m128, imm8u","BLENDPS imm8u, xmm2/m128, xmm1","blendps imm8u, xmm2/m128, xmm1","66 0F 3A 0C /r ib","V","V","SSE4_1","","rw,r,r","","" +"BLENDVPD xmm1, xmm2/m128, ","BLENDVPD , xmm2/m128, xmm1","blendvpd , xmm2/m128, xmm1","66 0F 38 15 /r","V","V","SSE4_1","","rw,r,r","","" +"BLENDVPS xmm1, xmm2/m128, ","BLENDVPS , xmm2/m128, xmm1","blendvps , xmm2/m128, xmm1","66 0F 38 14 /r","V","V","SSE4_1","","rw,r,r","","" +"BLSFILL r32V, r/m32","BLSFILLL r/m32, r32V","blsfill r/m32, r32V","XOP.NDD.128.09.WIG 01 /2","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLSFILL r64V, r/m64","BLSFILLQ r/m64, r64V","blsfill r/m64, r64V","XOP.NDD.128.09.W1 01 /2","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLSIC r32V, r/m32","BLSICL r/m32, r32V","blsicl r/m32, r32V","XOP.NDD.128.09.WIG 01 /6","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"BLSIC r64V, r/m64","BLSICQ r/m64, r64V","blsicq r/m64, r64V","XOP.NDD.128.09.WIG 01 /6","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"BLSI r32V, r/m32","BLSIL r/m32, r32V","blsil r/m32, r32V","VEX.NDD.128.0F38.W0 F3 /3","V","V","BMI1","","w,r","Y","32" +"BLSI r64V, r/m64","BLSIQ r/m64, r64V","blsiq r/m64, r64V","VEX.NDD.128.0F38.W1 F3 /3","N.S.","V","BMI1","","w,r","Y","64" +"BLSMSK r32V, r/m32","BLSMSKL r/m32, r32V","blsmskl r/m32, r32V","VEX.NDD.128.0F38.W0 F3 /2","V","V","BMI1","","w,r","Y","32" +"BLSMSK r64V, r/m64","BLSMSKQ r/m64, r64V","blsmskq r/m64, r64V","VEX.NDD.128.0F38.W1 F3 /2","N.S.","V","BMI1","","w,r","Y","64" +"BLSR r32V, r/m32","BLSRL r/m32, r32V","blsrl r/m32, r32V","VEX.NDD.128.0F38.W0 F3 /1","V","V","BMI1","","w,r","Y","32" +"BLSR r64V, r/m64","BLSRQ r/m64, r64V","blsrq r/m64, r64V","VEX.NDD.128.0F38.W1 F3 /1","N.S.","V","BMI1","","w,r","Y","64" +"BNDCL bnd1, r/m32","BNDCL r/m32, bnd1","bndcl r/m32, bnd1","F3 0F 1A /r","V","N.S.","MPX","","r,r","","" +"BNDCL bnd1, r/m64","BNDCL r/m64, bnd1","bndcl r/m64, bnd1","F3 0F 1A /r","N.S.","V","MPX","","r,r","","" +"BNDCN bnd1, r/m32","BNDCN r/m32, bnd1","bndcn r/m32, bnd1","F2 0F 1B /r","V","N.S.","MPX","","r,r","","" +"BNDCN bnd1, r/m64","BNDCN r/m64, bnd1","bndcn r/m64, bnd1","F2 0F 1B /r","N.S.","V","MPX","","r,r","","" +"BNDCU bnd1, r/m32","BNDCU r/m32, bnd1","bndcu r/m32, bnd1","F2 0F 1A /r","V","N.S.","MPX","","r,r","","" +"BNDCU bnd1, r/m64","BNDCU r/m64, bnd1","bndcu r/m64, bnd1","F2 0F 1A /r","N.S.","V","MPX","","r,r","","" +"BNDLDX bnd1, mib","BNDLDX mib, bnd1","bndldx mib, bnd1","0F 1A /r","V","V","MPX","modrm_memonly","w,r","","" +"BNDMK bnd1, m32","BNDMK m32, bnd1","bndmk m32, bnd1","F3 0F 1B /r","V","N.S.","MPX","modrm_memonly","w,r","","" +"BNDMK bnd1, m64","BNDMK m64, bnd1","bndmk m64, bnd1","F3 0F 1B /r","N.S.","V","MPX","modrm_memonly","w,r","","" +"BNDMOV bnd2/m128, bnd1","BNDMOV bnd1, bnd2/m128","bndmov bnd1, bnd2/m128","66 0F 1B /r","N.S.","V","MPX","","w,r","","" +"BNDMOV bnd2/m64, bnd1","BNDMOV bnd1, bnd2/m64","bndmov bnd1, bnd2/m64","66 0F 1B /r","V","N.S.","MPX","","w,r","","" +"BNDMOV bnd1, bnd2/m128","BNDMOV bnd2/m128, bnd1","bndmov bnd2/m128, bnd1","66 0F 1A /r","N.S.","V","MPX","","w,r","","" +"BNDMOV bnd1, bnd2/m64","BNDMOV bnd2/m64, bnd1","bndmov bnd2/m64, bnd1","66 0F 1A /r","V","N.S.","MPX","","w,r","","" +"BNDSTX mib, bnd1","BNDSTX bnd1, mib","bndstx bnd1, mib","0F 1B /r","V","V","MPX","modrm_memonly","w,r","","" +"BOUND r32, m32&32","BOUNDL m32&32, r32","boundl r32, m32&32","62 /r","V","N.S.","","modrm_memonly,operand32","r,r","Y","32" +"BOUND r16, m16&16","BOUNDW m16&16, r16","boundw r16, m16&16","62 /r","V","N.S.","","modrm_memonly,operand16","r,r","Y","16" +"BSF r32, r/m32","BSFL r/m32, r32","bsfl r/m32, r32","0F BC /r","V","V","","operand32","rw,r","Y","32" +"BSF r32, r/m32","BSFL r/m32, r32","bsfl r/m32, r32","F3 0F BC /r","V","V","","operand32","rw,r","Y","32" +"BSF r64, r/m64","BSFQ r/m64, r64","bsfq r/m64, r64","F3 REX.W 0F BC /r","N.S.","V","","","rw,r","Y","64" +"BSF r64, r/m64","BSFQ r/m64, r64","bsfq r/m64, r64","REX.W 0F BC /r","N.S.","V","","","rw,r","Y","64" +"BSF r16, r/m16","BSFW r/m16, r16","bsfw r/m16, r16","0F BC /r","V","V","","operand16","rw,r","Y","16" +"BSF r16, r/m16","BSFW r/m16, r16","bsfw r/m16, r16","F3 0F BC /r","V","V","","operand16","rw,r","Y","16" +"BSR r32, r/m32","BSRL r/m32, r32","bsrl r/m32, r32","0F BD /r","V","V","","operand32","rw,r","Y","32" +"BSR r32, r/m32","BSRL r/m32, r32","bsrl r/m32, r32","F3 0F BD /r","V","V","","operand32","rw,r","Y","32" +"BSR r64, r/m64","BSRQ r/m64, r64","bsrq r/m64, r64","F3 REX.W 0F BD /r","N.S.","V","","","rw,r","Y","64" +"BSR r64, r/m64","BSRQ r/m64, r64","bsrq r/m64, r64","REX.W 0F BD /r","N.S.","V","","","rw,r","Y","64" +"BSR r16, r/m16","BSRW r/m16, r16","bsrw r/m16, r16","0F BD /r","V","V","","operand16","rw,r","Y","16" +"BSR r16, r/m16","BSRW r/m16, r16","bsrw r/m16, r16","F3 0F BD /r","V","V","","operand16","rw,r","Y","16" +"BSWAP r32op","BSWAPL r32op","bswap r32op","0F C8+rd","V","V","486","operand32","rw","Y","32" +"BSWAP r64op","BSWAPQ r64op","bswap r64op","REX.W 0F C8+ro","N.S.","V","486","","rw","Y","64" +"BSWAP r16op","BSWAPW r16op","bswap r16op","0F C8+rw","V","V","486","operand16","rw","Y","16" +"BTC r/m32, imm8u","BTCL imm8u, r/m32","btcl imm8u, r/m32","0F BA /7 ib","V","V","","operand32","rw,r","Y","32" +"BTC r/m32, r32","BTCL r32, r/m32","btcl r32, r/m32","0F BB /r","V","V","","operand32","rw,r","Y","32" +"BTC r/m64, imm8u","BTCQ imm8u, r/m64","btcq imm8u, r/m64","REX.W 0F BA /7 ib","N.S.","V","","","rw,r","Y","64" +"BTC r/m64, r64","BTCQ r64, r/m64","btcq r64, r/m64","REX.W 0F BB /r","N.S.","V","","","rw,r","Y","64" +"BTC r/m16, imm8u","BTCW imm8u, r/m16","btcw imm8u, r/m16","0F BA /7 ib","V","V","","operand16","rw,r","Y","16" +"BTC r/m16, r16","BTCW r16, r/m16","btcw r16, r/m16","0F BB /r","V","V","","operand16","rw,r","Y","16" +"BT r/m32, imm8u","BTL imm8u, r/m32","btl imm8u, r/m32","0F BA /4 ib","V","V","","operand32","r,r","Y","32" +"BT r/m32, r32","BTL r32, r/m32","btl r32, r/m32","0F A3 /r","V","V","","operand32","r,r","Y","32" +"BT r/m64, imm8u","BTQ imm8u, r/m64","btq imm8u, r/m64","REX.W 0F BA /4 ib","N.S.","V","","","r,r","Y","64" +"BT r/m64, r64","BTQ r64, r/m64","btq r64, r/m64","REX.W 0F A3 /r","N.S.","V","","","r,r","Y","64" +"BTR r/m32, imm8u","BTRL imm8u, r/m32","btrl imm8u, r/m32","0F BA /6 ib","V","V","","operand32","rw,r","Y","32" +"BTR r/m32, r32","BTRL r32, r/m32","btrl r32, r/m32","0F B3 /r","V","V","","operand32","rw,r","Y","32" +"BTR r/m64, imm8u","BTRQ imm8u, r/m64","btrq imm8u, r/m64","REX.W 0F BA /6 ib","N.S.","V","","","rw,r","Y","64" +"BTR r/m64, r64","BTRQ r64, r/m64","btrq r64, r/m64","REX.W 0F B3 /r","N.S.","V","","","rw,r","Y","64" +"BTR r/m16, imm8u","BTRW imm8u, r/m16","btrw imm8u, r/m16","0F BA /6 ib","V","V","","operand16","rw,r","Y","16" +"BTR r/m16, r16","BTRW r16, r/m16","btrw r16, r/m16","0F B3 /r","V","V","","operand16","rw,r","Y","16" +"BTS r/m32, imm8u","BTSL imm8u, r/m32","btsl imm8u, r/m32","0F BA /5 ib","V","V","","operand32","rw,r","Y","32" +"BTS r/m32, r32","BTSL r32, r/m32","btsl r32, r/m32","0F AB /r","V","V","","operand32","rw,r","Y","32" +"BTS r/m64, imm8u","BTSQ imm8u, r/m64","btsq imm8u, r/m64","REX.W 0F BA /5 ib","N.S.","V","","","rw,r","Y","64" +"BTS r/m64, r64","BTSQ r64, r/m64","btsq r64, r/m64","REX.W 0F AB /r","N.S.","V","","","rw,r","Y","64" +"BTS r/m16, imm8u","BTSW imm8u, r/m16","btsw imm8u, r/m16","0F BA /5 ib","V","V","","operand16","rw,r","Y","16" +"BTS r/m16, r16","BTSW r16, r/m16","btsw r16, r/m16","0F AB /r","V","V","","operand16","rw,r","Y","16" +"BT r/m16, imm8u","BTW imm8u, r/m16","btw imm8u, r/m16","0F BA /4 ib","V","V","","operand16","r,r","Y","16" +"BT r/m16, r16","BTW r16, r/m16","btw r16, r/m16","0F A3 /r","V","V","","operand16","r,r","Y","16" +"BZHI r32, r/m32, r32V","BZHIL r32V, r/m32, r32","bzhil r32V, r/m32, r32","VEX.NDS.128.0F38.W0 F5 /r","V","V","BMI2","","w,r,r","Y","32" +"BZHI r64, r/m64, r64V","BZHIQ r64V, r/m64, r64","bzhiq r64V, r/m64, r64","VEX.NDS.128.0F38.W1 F5 /r","N.S.","V","BMI2","","w,r,r","Y","64" +"CALL rel16","CALL rel16","call rel16","E8 cw","V","N.S.","","operand16","r","Y","" +"CALL rel32","CALL rel32","call rel32","E8 cd","V","N.S.","","operand32","r","Y","" +"CALL rel32","CALL rel32","call rel32","E8 cd","N.S.","V","","default64","r","Y","" +"CALL r/m32","CALLL* r/m32","calll* r/m32","FF /2","V","N.S.","","operand32","r","Y","32" +"CALL r/m64","CALLQ* r/m64","callq* r/m64","FF /2","N.S.","V","","default64","r","Y","64" +"CALL r/m16","CALLW* r/m16","callw* r/m16","FF /2","V","N.S.","","operand16","r","Y","16" +"CBW","CBW","cbtw","98","V","V","","operand16","","","" +"CDQ","CDQ","cltd","99","V","V","","operand32","","","" +"CDQE","CDQE","cltq","REX.W 98","N.S.","V","","","","","" +"CLAC","CLAC","clac","0F 01 CA","V","V","","","","","" +"CLC","CLC","clc","F8","V","V","","","","","" +"CLD","CLD","cld","FC","V","V","","","","","" +"CLFLUSH m8","CLFLUSH m8","clflush m8","0F AE /7","V","V","","modrm_memonly","r","","" +"CLFLUSHOPT m8","CLFLUSHOPT m8","clflushopt m8","66 0F AE /7","V","V","","modrm_memonly","r","","" +"CLGI","CLGI","clgi","0F 01 DD","V","V","SVM","amd","","","" +"CLI","CLI","cli","FA","V","V","","","","","" +"CLRSSBSY m64","CLRSSBSY m64","clrssbsy m64","F3 0F AE /6","V","V","CET","modrm_memonly","w","","" +"CLTS","CLTS","clts","0F 06","V","V","","","","","" +"CLWB m8","CLWB m8","clwb m8","66 0F AE /6","V","V","CLWB","modrm_memonly","r","","" +"CLZERO EAX","CLZEROL EAX","clzerol EAX","0F 01 FC","V","V","CLZERO","amd,modrm_regonly,operand32","r","Y","32" +"CLZERO RAX","CLZEROQ RAX","clzeroq RAX","REX.W 0F 01 FC","N.S.","V","CLZERO","amd,modrm_regonly","r","Y","64" +"CLZERO AX","CLZEROW AX","clzerow AX","0F 01 FC","V","V","CLZERO","amd,modrm_regonly,operand16","r","Y","16" +"CMC","CMC","cmc","F5","V","V","","","","","" +"CMOVC r16, r/m16","CMOVC r/m16, r16","cmovc r/m16, r16","0F 42 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVC r32, r/m32","CMOVC r/m32, r32","cmovc r/m32, r32","0F 42 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVC r64, r/m64","CMOVC r/m64, r64","cmovc r/m64, r64","REX.W 0F 42 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVAE r32, r/m32","CMOVLCC r/m32, r32","cmovael r/m32, r32","0F 43 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVB r32, r/m32","CMOVLCS r/m32, r32","cmovbl r/m32, r32","0F 42 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVE r32, r/m32","CMOVLEQ r/m32, r32","cmovel r/m32, r32","0F 44 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVGE r32, r/m32","CMOVLGE r/m32, r32","cmovgel r/m32, r32","0F 4D /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVG r32, r/m32","CMOVLGT r/m32, r32","cmovgl r/m32, r32","0F 4F /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVA r32, r/m32","CMOVLHI r/m32, r32","cmoval r/m32, r32","0F 47 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVLE r32, r/m32","CMOVLLE r/m32, r32","cmovlel r/m32, r32","0F 4E /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVBE r32, r/m32","CMOVLLS r/m32, r32","cmovbel r/m32, r32","0F 46 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVL r32, r/m32","CMOVLLT r/m32, r32","cmovll r/m32, r32","0F 4C /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVS r32, r/m32","CMOVLMI r/m32, r32","cmovsl r/m32, r32","0F 48 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVNE r32, r/m32","CMOVLNE r/m32, r32","cmovnel r/m32, r32","0F 45 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVNO r32, r/m32","CMOVLOC r/m32, r32","cmovnol r/m32, r32","0F 41 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVO r32, r/m32","CMOVLOS r/m32, r32","cmovol r/m32, r32","0F 40 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVNP r32, r/m32","CMOVLPC r/m32, r32","cmovnpl r/m32, r32","0F 4B /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVNS r32, r/m32","CMOVLPL r/m32, r32","cmovnsl r/m32, r32","0F 49 /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVP r32, r/m32","CMOVLPS r/m32, r32","cmovpl r/m32, r32","0F 4A /r","V","V","","P6,operand32","rw,r","Y","32" +"CMOVNA r16, r/m16","CMOVNA r/m16, r16","cmovna r/m16, r16","0F 46 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNA r32, r/m32","CMOVNA r/m32, r32","cmovna r/m32, r32","0F 46 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNA r64, r/m64","CMOVNA r/m64, r64","cmovna r/m64, r64","REX.W 0F 46 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNAE r16, r/m16","CMOVNAE r/m16, r16","cmovnae r/m16, r16","0F 42 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNAE r32, r/m32","CMOVNAE r/m32, r32","cmovnae r/m32, r32","0F 42 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNAE r64, r/m64","CMOVNAE r/m64, r64","cmovnae r/m64, r64","REX.W 0F 42 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNB r16, r/m16","CMOVNB r/m16, r16","cmovnb r/m16, r16","0F 43 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNB r32, r/m32","CMOVNB r/m32, r32","cmovnb r/m32, r32","0F 43 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNB r64, r/m64","CMOVNB r/m64, r64","cmovnb r/m64, r64","REX.W 0F 43 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNBE r16, r/m16","CMOVNBE r/m16, r16","cmovnbe r/m16, r16","0F 47 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNBE r32, r/m32","CMOVNBE r/m32, r32","cmovnbe r/m32, r32","0F 47 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNBE r64, r/m64","CMOVNBE r/m64, r64","cmovnbe r/m64, r64","REX.W 0F 47 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNC r16, r/m16","CMOVNC r/m16, r16","cmovnc r/m16, r16","0F 43 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNC r32, r/m32","CMOVNC r/m32, r32","cmovnc r/m32, r32","0F 43 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNC r64, r/m64","CMOVNC r/m64, r64","cmovnc r/m64, r64","REX.W 0F 43 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNG r16, r/m16","CMOVNG r/m16, r16","cmovng r/m16, r16","0F 4E /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNG r32, r/m32","CMOVNG r/m32, r32","cmovng r/m32, r32","0F 4E /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNG r64, r/m64","CMOVNG r/m64, r64","cmovng r/m64, r64","REX.W 0F 4E /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNGE r16, r/m16","CMOVNGE r/m16, r16","cmovnge r/m16, r16","0F 4C /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNGE r32, r/m32","CMOVNGE r/m32, r32","cmovnge r/m32, r32","0F 4C /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNGE r64, r/m64","CMOVNGE r/m64, r64","cmovnge r/m64, r64","REX.W 0F 4C /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNL r16, r/m16","CMOVNL r/m16, r16","cmovnl r/m16, r16","0F 4D /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNL r32, r/m32","CMOVNL r/m32, r32","cmovnl r/m32, r32","0F 4D /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNL r64, r/m64","CMOVNL r/m64, r64","cmovnl r/m64, r64","REX.W 0F 4D /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNLE r16, r/m16","CMOVNLE r/m16, r16","cmovnle r/m16, r16","0F 4F /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNLE r32, r/m32","CMOVNLE r/m32, r32","cmovnle r/m32, r32","0F 4F /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNLE r64, r/m64","CMOVNLE r/m64, r64","cmovnle r/m64, r64","REX.W 0F 4F /r","N.E.","V","","pseudo","rw,r","","" +"CMOVNZ r16, r/m16","CMOVNZ r/m16, r16","cmovnz r/m16, r16","0F 45 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVNZ r32, r/m32","CMOVNZ r/m32, r32","cmovnz r/m32, r32","0F 45 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVNZ r64, r/m64","CMOVNZ r/m64, r64","cmovnz r/m64, r64","REX.W 0F 45 /r","N.E.","V","","pseudo","rw,r","","" +"CMOVPE r16, r/m16","CMOVPE r/m16, r16","cmovpe r/m16, r16","0F 4A /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVPE r32, r/m32","CMOVPE r/m32, r32","cmovpe r/m32, r32","0F 4A /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVPE r64, r/m64","CMOVPE r/m64, r64","cmovpe r/m64, r64","REX.W 0F 4A /r","N.E.","V","","pseudo","rw,r","","" +"CMOVPO r16, r/m16","CMOVPO r/m16, r16","cmovpo r/m16, r16","0F 4B /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVPO r32, r/m32","CMOVPO r/m32, r32","cmovpo r/m32, r32","0F 4B /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVPO r64, r/m64","CMOVPO r/m64, r64","cmovpo r/m64, r64","REX.W 0F 4B /r","N.E.","V","","pseudo","rw,r","","" +"CMOVAE r64, r/m64","CMOVQCC r/m64, r64","cmovaeq r/m64, r64","REX.W 0F 43 /r","N.S.","V","","","rw,r","Y","64" +"CMOVB r64, r/m64","CMOVQCS r/m64, r64","cmovbq r/m64, r64","REX.W 0F 42 /r","N.S.","V","","","rw,r","Y","64" +"CMOVE r64, r/m64","CMOVQEQ r/m64, r64","cmoveq r/m64, r64","REX.W 0F 44 /r","N.S.","V","","","rw,r","Y","64" +"CMOVGE r64, r/m64","CMOVQGE r/m64, r64","cmovgeq r/m64, r64","REX.W 0F 4D /r","N.S.","V","","","rw,r","Y","64" +"CMOVG r64, r/m64","CMOVQGT r/m64, r64","cmovgq r/m64, r64","REX.W 0F 4F /r","N.S.","V","","","rw,r","Y","64" +"CMOVA r64, r/m64","CMOVQHI r/m64, r64","cmovaq r/m64, r64","REX.W 0F 47 /r","N.S.","V","","","rw,r","Y","64" +"CMOVLE r64, r/m64","CMOVQLE r/m64, r64","cmovleq r/m64, r64","REX.W 0F 4E /r","N.S.","V","","","rw,r","Y","64" +"CMOVBE r64, r/m64","CMOVQLS r/m64, r64","cmovbeq r/m64, r64","REX.W 0F 46 /r","N.S.","V","","","rw,r","Y","64" +"CMOVL r64, r/m64","CMOVQLT r/m64, r64","cmovlq r/m64, r64","REX.W 0F 4C /r","N.S.","V","","","rw,r","Y","64" +"CMOVS r64, r/m64","CMOVQMI r/m64, r64","cmovsq r/m64, r64","REX.W 0F 48 /r","N.S.","V","","","rw,r","Y","64" +"CMOVNE r64, r/m64","CMOVQNE r/m64, r64","cmovneq r/m64, r64","REX.W 0F 45 /r","N.S.","V","","","rw,r","Y","64" +"CMOVNO r64, r/m64","CMOVQOC r/m64, r64","cmovnoq r/m64, r64","REX.W 0F 41 /r","N.S.","V","","","rw,r","Y","64" +"CMOVO r64, r/m64","CMOVQOS r/m64, r64","cmovoq r/m64, r64","REX.W 0F 40 /r","N.S.","V","","","rw,r","Y","64" +"CMOVNP r64, r/m64","CMOVQPC r/m64, r64","cmovnpq r/m64, r64","REX.W 0F 4B /r","N.S.","V","","","rw,r","Y","64" +"CMOVNS r64, r/m64","CMOVQPL r/m64, r64","cmovnsq r/m64, r64","REX.W 0F 49 /r","N.S.","V","","","rw,r","Y","64" +"CMOVP r64, r/m64","CMOVQPS r/m64, r64","cmovpq r/m64, r64","REX.W 0F 4A /r","N.S.","V","","","rw,r","Y","64" +"CMOVAE r16, r/m16","CMOVWCC r/m16, r16","cmovaew r/m16, r16","0F 43 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVB r16, r/m16","CMOVWCS r/m16, r16","cmovbw r/m16, r16","0F 42 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVE r16, r/m16","CMOVWEQ r/m16, r16","cmovew r/m16, r16","0F 44 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVGE r16, r/m16","CMOVWGE r/m16, r16","cmovgew r/m16, r16","0F 4D /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVG r16, r/m16","CMOVWGT r/m16, r16","cmovgw r/m16, r16","0F 4F /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVA r16, r/m16","CMOVWHI r/m16, r16","cmovaw r/m16, r16","0F 47 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVLE r16, r/m16","CMOVWLE r/m16, r16","cmovlew r/m16, r16","0F 4E /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVBE r16, r/m16","CMOVWLS r/m16, r16","cmovbew r/m16, r16","0F 46 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVL r16, r/m16","CMOVWLT r/m16, r16","cmovlw r/m16, r16","0F 4C /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVS r16, r/m16","CMOVWMI r/m16, r16","cmovsw r/m16, r16","0F 48 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVNE r16, r/m16","CMOVWNE r/m16, r16","cmovnew r/m16, r16","0F 45 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVNO r16, r/m16","CMOVWOC r/m16, r16","cmovnow r/m16, r16","0F 41 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVO r16, r/m16","CMOVWOS r/m16, r16","cmovow r/m16, r16","0F 40 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVNP r16, r/m16","CMOVWPC r/m16, r16","cmovnpw r/m16, r16","0F 4B /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVNS r16, r/m16","CMOVWPL r/m16, r16","cmovnsw r/m16, r16","0F 49 /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVP r16, r/m16","CMOVWPS r/m16, r16","cmovpw r/m16, r16","0F 4A /r","V","V","","P6,operand16","rw,r","Y","16" +"CMOVZ r16, r/m16","CMOVZ r/m16, r16","cmovz r/m16, r16","0F 44 /r","V","V","","P6,operand16,pseudo","rw,r","","" +"CMOVZ r32, r/m32","CMOVZ r/m32, r32","cmovz r/m32, r32","0F 44 /r","V","V","","P6,operand32,pseudo","rw,r","","" +"CMOVZ r64, r/m64","CMOVZ r/m64, r64","cmovz r/m64, r64","REX.W 0F 44 /r","N.E.","V","","pseudo","rw,r","","" +"CMP AL, imm8","CMPB AL, imm8","cmpb imm8, AL","3C ib","V","V","","","r,r","Y","8" +"CMP r/m8, imm8","CMPB r/m8, imm8","cmpb imm8, r/m8","80 /7 ib","V","V","","","r,r","Y","8" +"CMP r/m8, imm8","CMPB r/m8, imm8","cmpb imm8, r/m8","82 /7 ib","V","N.S.","","","r,r","Y","8" +"CMP r/m8, imm8","CMPB r/m8, imm8","cmpb imm8, r/m8","REX 80 /7 ib","N.E.","V","","pseudo64","r,r","Y","8" +"CMP r/m8, r8","CMPB r/m8, r8","cmpb r8, r/m8","38 /r","V","V","","","r,r","Y","8" +"CMP r/m8, r8","CMPB r/m8, r8","cmpb r8, r/m8","REX 38 /r","N.E.","V","","pseudo64","r,r","Y","8" +"CMP r8, r/m8","CMPB r8, r/m8","cmpb r/m8, r8","3A /r","V","V","","","r,r","Y","8" +"CMP r8, r/m8","CMPB r8, r/m8","cmpb r/m8, r8","REX 3A /r","N.E.","V","","pseudo64","r,r","Y","8" +"CMP EAX, imm32","CMPL EAX, imm32","cmpl imm32, EAX","3D id","V","V","","operand32","r,r","Y","32" +"CMP r/m32, imm32","CMPL r/m32, imm32","cmpl imm32, r/m32","81 /7 id","V","V","","operand32","r,r","Y","32" +"CMP r/m32, imm8","CMPL r/m32, imm8","cmpl imm8, r/m32","83 /7 ib","V","V","","operand32","r,r","Y","32" +"CMP r/m32, r32","CMPL r/m32, r32","cmpl r32, r/m32","39 /r","V","V","","operand32","r,r","Y","32" +"CMP r32, r/m32","CMPL r32, r/m32","cmpl r/m32, r32","3B /r","V","V","","operand32","r,r","Y","32" +"CMPPD xmm1, xmm2/m128, imm8u","CMPPD imm8u, xmm1, xmm2/m128","cmppd imm8u, xmm2/m128, xmm1","66 0F C2 /r ib","V","V","SSE2","","rw,r,r","","" +"CMPPS xmm1, xmm2/m128, imm8u","CMPPS imm8u, xmm1, xmm2/m128","cmpps imm8u, xmm2/m128, xmm1","0F C2 /r ib","V","V","SSE","","rw,r,r","","" +"CMP RAX, imm32","CMPQ RAX, imm32","cmpq imm32, RAX","REX.W 3D id","N.S.","V","","","r,r","Y","64" +"CMP r/m64, imm32","CMPQ r/m64, imm32","cmpq imm32, r/m64","REX.W 81 /7 id","N.S.","V","","","r,r","Y","64" +"CMP r/m64, imm8","CMPQ r/m64, imm8","cmpq imm8, r/m64","REX.W 83 /7 ib","N.S.","V","","","r,r","Y","64" +"CMP r/m64, r64","CMPQ r/m64, r64","cmpq r64, r/m64","REX.W 39 /r","N.S.","V","","","r,r","Y","64" +"CMP r64, r/m64","CMPQ r64, r/m64","cmpq r/m64, r64","REX.W 3B /r","N.S.","V","","","r,r","Y","64" +"CMPSB","CMPSB","cmpsb","A6","V","V","","","","","" +"CMPSD xmm1, xmm2/m64, imm8u","CMPSD imm8u, xmm1, xmm2/m64","cmpsd imm8u, xmm2/m64, xmm1","F2 0F C2 /r ib","V","V","SSE2","","rw,r,r","","" +"CMPSD","CMPSL","cmpsl","A7","V","V","","operand32","","","" +"CMPSQ","CMPSQ","cmpsq","REX.W A7","N.S.","V","","","","","" +"CMPSS xmm1, xmm2/m32, imm8u","CMPSS imm8u, xmm1, xmm2/m32","cmpss imm8u, xmm2/m32, xmm1","F3 0F C2 /r ib","V","V","SSE","","rw,r,r","","" +"CMPSW","CMPSW","cmpsw","A7","V","V","","operand16","","","" +"CMP AX, imm16","CMPW AX, imm16","cmpw imm16, AX","3D iw","V","V","","operand16","r,r","Y","16" +"CMP r/m16, imm16","CMPW r/m16, imm16","cmpw imm16, r/m16","81 /7 iw","V","V","","operand16","r,r","Y","16" +"CMP r/m16, imm8","CMPW r/m16, imm8","cmpw imm8, r/m16","83 /7 ib","V","V","","operand16","r,r","Y","16" +"CMP r/m16, r16","CMPW r/m16, r16","cmpw r16, r/m16","39 /r","V","V","","operand16","r,r","Y","16" +"CMP r16, r/m16","CMPW r16, r/m16","cmpw r/m16, r16","3B /r","V","V","","operand16","r,r","Y","16" +"CMPXCHG16B m128","CMPXCHG16B m128","cmpxchg16b m128","REX.W 0F C7 /1","N.S.","V","","modrm_memonly","rw","","" +"CMPXCHG8B m64","CMPXCHG8B m64","cmpxchg8b m64","0F C7 /1","V","V","Pentium","modrm_memonly,operand16,operand32","rw","","" +"CMPXCHG r/m8, r8","CMPXCHGB r8, r/m8","cmpxchgb r8, r/m8","0F B0 /r","V","V","486","","rw,r","Y","8" +"CMPXCHG r/m8, r8","CMPXCHGB r8, r/m8","cmpxchgb r8, r/m8","REX 0F B0 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"CMPXCHG r/m32, r32","CMPXCHGL r32, r/m32","cmpxchgl r32, r/m32","0F B1 /r","V","V","486","operand32","rw,r","Y","32" +"CMPXCHG r/m64, r64","CMPXCHGQ r64, r/m64","cmpxchgq r64, r/m64","REX.W 0F B1 /r","N.S.","V","486","","rw,r","Y","64" +"CMPXCHG r/m16, r16","CMPXCHGW r16, r/m16","cmpxchgw r16, r/m16","0F B1 /r","V","V","486","operand16","rw,r","Y","16" +"COMISD xmm1, xmm2/m64","COMISD xmm2/m64, xmm1","comisd xmm2/m64, xmm1","66 0F 2F /r","V","V","SSE2","","r,r","","" +"COMISS xmm1, xmm2/m32","COMISS xmm2/m32, xmm1","comiss xmm2/m32, xmm1","0F 2F /r","V","V","SSE","","r,r","","" +"CPUID","CPUID","cpuid","0F A2","V","V","486","","","","" +"CQO","CQO","cqto","REX.W 99","N.S.","V","","","","","" +"CRC32 r32, r/m8","CRC32B r/m8, r32","crc32b r/m8, r32","F2 0F 38 F0 /r","V","V","SSE4_2","operand16,operand32","rw,r","Y","8" +"CRC32 r32, r/m8","CRC32B r/m8, r32","crc32b r/m8, r32","F2 REX 0F 38 F0 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"CRC32 r64, r/m8","CRC32B r/m8, r64","crc32b r/m8, r64","F2 REX.W 0F 38 F0 /r","N.S.","V","SSE4_2","","rw,r","Y","8" +"CRC32 r32, r/m32","CRC32L r/m32, r32","crc32l r/m32, r32","F2 0F 38 F1 /r","V","V","SSE4_2","operand32","rw,r","Y","32" +"CRC32 r64, r/m64","CRC32Q r/m64, r64","crc32q r/m64, r64","F2 REX.W 0F 38 F1 /r","N.S.","V","SSE4_2","","rw,r","Y","64" +"CRC32 r32, r/m16","CRC32W r/m16, r32","crc32w r/m16, r32","F2 0F 38 F1 /r","V","V","SSE4_2","operand16","rw,r","Y","16" +"CVTPD2PI mm1, xmm2/m128","CVTPD2PI xmm2/m128, mm1","cvtpd2pi xmm2/m128, mm1","66 0F 2D /r","V","V","SSE2","","w,r","","" +"CVTPD2DQ xmm1, xmm2/m128","CVTPD2PL xmm2/m128, xmm1","cvtpd2dq xmm2/m128, xmm1","F2 0F E6 /r","V","V","SSE2","","w,r","","" +"CVTPD2PS xmm1, xmm2/m128","CVTPD2PS xmm2/m128, xmm1","cvtpd2ps xmm2/m128, xmm1","66 0F 5A /r","V","V","SSE2","","w,r","","" +"CVTPI2PD xmm1, mm2/m64","CVTPI2PD mm2/m64, xmm1","cvtpi2pd mm2/m64, xmm1","66 0F 2A /r","V","V","SSE2","","w,r","","" +"CVTPI2PS xmm1, mm2/m64","CVTPI2PS mm2/m64, xmm1","cvtpi2ps mm2/m64, xmm1","0F 2A /r","V","V","SSE","","w,r","","" +"CVTDQ2PD xmm1, xmm2/m64","CVTPL2PD xmm2/m64, xmm1","cvtdq2pd xmm2/m64, xmm1","F3 0F E6 /r","V","V","SSE2","","w,r","","" +"CVTDQ2PS xmm1, xmm2/m128","CVTPL2PS xmm2/m128, xmm1","cvtdq2ps xmm2/m128, xmm1","0F 5B /r","V","V","SSE2","","w,r","","" +"CVTPS2PD xmm1, xmm2/m64","CVTPS2PD xmm2/m64, xmm1","cvtps2pd xmm2/m64, xmm1","0F 5A /r","V","V","SSE2","","w,r","","" +"CVTPS2PI mm1, xmm2/m64","CVTPS2PI xmm2/m64, mm1","cvtps2pi xmm2/m64, mm1","0F 2D /r","V","V","SSE","","w,r","","" +"CVTPS2DQ xmm1, xmm2/m128","CVTPS2PL xmm2/m128, xmm1","cvtps2dq xmm2/m128, xmm1","66 0F 5B /r","V","V","SSE2","","w,r","","" +"CVTSD2SI r32, xmm2/m64","CVTSD2SL xmm2/m64, r32","cvtsd2si xmm2/m64, r32","F2 0F 2D /r","V","V","SSE2","operand16,operand32","w,r","Y","32" +"CVTSD2SI r64, xmm2/m64","CVTSD2SL xmm2/m64, r64","cvtsd2siq xmm2/m64, r64","F2 REX.W 0F 2D /r","N.S.","V","SSE2","","w,r","Y","64" +"CVTSD2SS xmm1, xmm2/m64","CVTSD2SS xmm2/m64, xmm1","cvtsd2ss xmm2/m64, xmm1","F2 0F 5A /r","V","V","SSE2","","w,r","","" +"CVTSI2SD xmm1, r/m32","CVTSL2SD r/m32, xmm1","cvtsi2sdl r/m32, xmm1","F2 0F 2A /r","V","V","SSE2","operand16,operand32","w,r","Y","32" +"CVTSI2SS xmm1, r/m32","CVTSL2SS r/m32, xmm1","cvtsi2ssl r/m32, xmm1","F3 0F 2A /r","V","V","SSE","operand16,operand32","w,r","Y","32" +"CVTSI2SD xmm1, r/m64","CVTSQ2SD r/m64, xmm1","cvtsi2sdq r/m64, xmm1","F2 REX.W 0F 2A /r","N.S.","V","SSE2","","w,r","Y","64" +"CVTSI2SS xmm1, r/m64","CVTSQ2SS r/m64, xmm1","cvtsi2ssq r/m64, xmm1","F3 REX.W 0F 2A /r","N.S.","V","SSE","","w,r","Y","64" +"CVTSS2SD xmm1, xmm2/m32","CVTSS2SD xmm2/m32, xmm1","cvtss2sd xmm2/m32, xmm1","F3 0F 5A /r","V","V","SSE2","","w,r","","" +"CVTSS2SI r32, xmm2/m32","CVTSS2SL xmm2/m32, r32","cvtss2si xmm2/m32, r32","F3 0F 2D /r","V","V","SSE","operand16,operand32","w,r","Y","32" +"CVTSS2SI r64, xmm2/m32","CVTSS2SL xmm2/m32, r64","cvtss2siq xmm2/m32, r64","F3 REX.W 0F 2D /r","N.S.","V","SSE","","w,r","Y","64" +"CVTTPD2PI mm1, xmm2/m128","CVTTPD2PI xmm2/m128, mm1","cvttpd2pi xmm2/m128, mm1","66 0F 2C /r","V","V","SSE2","","w,r","","" +"CVTTPD2DQ xmm1, xmm2/m128","CVTTPD2PL xmm2/m128, xmm1","cvttpd2dq xmm2/m128, xmm1","66 0F E6 /r","V","V","SSE2","","w,r","","" +"CVTTPS2PI mm1, xmm2/m64","CVTTPS2PI xmm2/m64, mm1","cvttps2pi xmm2/m64, mm1","0F 2C /r","V","V","SSE","","w,r","","" +"CVTTPS2DQ xmm1, xmm2/m128","CVTTPS2PL xmm2/m128, xmm1","cvttps2dq xmm2/m128, xmm1","F3 0F 5B /r","V","V","SSE2","","w,r","","" +"CVTTSD2SI r32, xmm2/m64","CVTTSD2SL xmm2/m64, r32","cvttsd2si xmm2/m64, r32","F2 0F 2C /r","V","V","SSE2","operand16,operand32","w,r","Y","32" +"CVTTSD2SI r64, xmm2/m64","CVTTSD2SL xmm2/m64, r64","cvttsd2siq xmm2/m64, r64","F2 REX.W 0F 2C /r","N.S.","V","SSE2","","w,r","Y","64" +"CVTTSS2SI r32, xmm2/m32","CVTTSS2SL xmm2/m32, r32","cvttss2si xmm2/m32, r32","F3 0F 2C /r","V","V","SSE","operand16,operand32","w,r","Y","32" +"CVTTSS2SI r64, xmm2/m32","CVTTSS2SL xmm2/m32, r64","cvttss2siq xmm2/m32, r64","F3 REX.W 0F 2C /r","N.S.","V","SSE","","w,r","Y","64" +"CWD","CWD","cwtd","99","V","V","","operand16","","","" +"CWDE","CWDE","cwtl","98","V","V","","operand32","","","" +"DAA","DAA","daa","27","V","N.S.","","","","","" +"DAS","DAS","das","2F","V","N.S.","","","","","" +"DEC r/m8","DECB r/m8","decb r/m8","FE /1","V","V","","","rw","Y","8" +"DEC r/m8","DECB r/m8","decb r/m8","REX FE /1","N.E.","V","","pseudo64","rw","Y","8" +"DEC r/m32","DECL r/m32","decl r/m32","FF /1","V","V","","operand32","rw","Y","32" +"DEC r32op","DECL r32op","decl r32op","48+rd","V","N.S.","","operand32","rw","Y","32" +"DEC r/m64","DECQ r/m64","decq r/m64","REX.W FF /1","N.S.","V","","","rw","Y","64" +"DEC r/m16","DECW r/m16","decw r/m16","FF /1","V","V","","operand16","rw","Y","16" +"DEC r16op","DECW r16op","decw r16op","48+rw","V","N.S.","","operand16","rw","Y","16" +"DIV r/m8","DIVB r/m8","divb r/m8","F6 /6","V","V","","","r","Y","8" +"DIV r/m8","DIVB r/m8","divb r/m8","REX F6 /6","N.E.","V","","pseudo64","w","Y","8" +"DIV r/m32","DIVL r/m32","divl r/m32","F7 /6","V","V","","operand32","r","Y","32" +"DIVPD xmm1, xmm2/m128","DIVPD xmm2/m128, xmm1","divpd xmm2/m128, xmm1","66 0F 5E /r","V","V","SSE2","","rw,r","","" +"DIVPS xmm1, xmm2/m128","DIVPS xmm2/m128, xmm1","divps xmm2/m128, xmm1","0F 5E /r","V","V","SSE","","rw,r","","" +"DIV r/m64","DIVQ r/m64","divq r/m64","REX.W F7 /6","N.S.","V","","","r","Y","64" +"DIVSD xmm1, xmm2/m64","DIVSD xmm2/m64, xmm1","divsd xmm2/m64, xmm1","F2 0F 5E /r","V","V","SSE2","","rw,r","","" +"DIVSS xmm1, xmm2/m32","DIVSS xmm2/m32, xmm1","divss xmm2/m32, xmm1","F3 0F 5E /r","V","V","SSE","","rw,r","","" +"DIV r/m16","DIVW r/m16","divw r/m16","F7 /6","V","V","","operand16","r","Y","16" +"DPPD xmm1, xmm2/m128, imm8u","DPPD imm8u, xmm2/m128, xmm1","dppd imm8u, xmm2/m128, xmm1","66 0F 3A 41 /r ib","V","V","SSE4_1","","rw,r,r","","" +"DPPS xmm1, xmm2/m128, imm8u","DPPS imm8u, xmm2/m128, xmm1","dpps imm8u, xmm2/m128, xmm1","66 0F 3A 40 /r ib","V","V","SSE4_1","","rw,r,r","","" +"EMMS","EMMS","emms","0F 77","V","V","MMX","","","","" +"ENCLS","ENCLS","encls","0F 01 CF","V","V","","","","","" +"ENCLU","ENCLU","enclu","0F 01 D7","V","V","","","","","" +"ENDBR32","ENDBR32","endbr32","F3 0F 1E FB","V","V","CET","","","","" +"ENDBR64","ENDBR64","endbr64","F3 0F 1E FA","V","V","CET","","","Y","" +"ENTER imm16, 0","ENTER 0, imm16","enter imm16, 0","C8 iw 00","V","V","","pseudo","r,r","","" +"ENTER imm16, 1","ENTER 1, imm16","enter imm16, 1","C8 iw 01","V","V","","pseudo","r,r","","" +"ENTER imm16, imm8b","ENTERW/ENTERL/ENTERQ imm8b, imm16","enterw/enterl/enterq imm16, imm8b","C8 iw ib","V","V","","","r,r","","" +"EXTRACTPS r/m32, xmm1, imm8u:2","EXTRACTPS imm8u:2, xmm1, r/m32","extractps imm8u:2, xmm1, r/m32","66 0F 3A 17 /r ib","V","V","SSE4_1","","w,r,r","","" +"EXTRQ xmm1, imm8u, imm8u","EXTRQ imm8u, imm8u, xmm1","extrq imm8u, imm8u, xmm1","66 0F 78 /0 ib ib","V","V","SSE4a","amd,modrm_regonly","w,r,r","","" +"EXTRQ xmm1, xmm2","EXTRQ xmm2, xmm1","extrq xmm2, xmm1","66 0F 79 /r","V","V","SSE4a","amd,modrm_regonly","w,r","","" +"F2XM1","F2XM1","f2xm1","D9 F0","V","V","","","","","" +"FABS","FABS","fabs","D9 E1","V","V","","","","","" +"FADD ST(i), ST(0)","FADDD ST(0), ST(i)","fadd ST(0), ST(i)","DC C0+i","V","V","","","rw,r","Y","" +"FADD ST(0), ST(i)","FADDD ST(i), ST(0)","fadd ST(i), ST(0)","D8 C0+i","V","V","","","rw,r","Y","" +"FADD ST(0), m32fp","FADDD m32fp, ST(0)","fadds m32fp, ST(0)","D8 /0","V","V","","","rw,r","Y","32" +"FADD ST(0), m64fp","FADDD m64fp, ST(0)","faddl m64fp, ST(0)","DC /0","V","V","","","rw,r","Y","64" +"FADDP","FADDDP","faddp","DE C1","V","V","","pseudo","","","" +"FADDP ST(i), ST(0)","FADDDP ST(0), ST(i)","faddp ST(0), ST(i)","DE C0+i","V","V","","","rw,r","","" +"FBLD ST(0), m80dec","FBLD m80dec, ST(0)","fbld m80dec, ST(0)","DF /4","V","V","","","w,r","","" +"FBSTP m80dec, ST(0)","FBSTP ST(0), m80dec","fbstp ST(0), m80dec","DF /6","V","V","","","w,r","","" +"FCHS","FCHS","fchs","D9 E0","V","V","","","","","" +"FCLEX","FCLEX","fclex","9B DB E2","V","V","","pseudo","","","" +"FCMOVB ST(0), ST(i)","FCMOVB ST(i), ST(0)","fcmovb ST(i), ST(0)","DA C0+i","V","V","","P6","rw,r","","" +"FCMOVBE ST(0), ST(i)","FCMOVBE ST(i), ST(0)","fcmovbe ST(i), ST(0)","DA D0+i","V","V","","P6","rw,r","","" +"FCMOVE ST(0), ST(i)","FCMOVE ST(i), ST(0)","fcmove ST(i), ST(0)","DA C8+i","V","V","","P6","rw,r","","" +"FCMOVNB ST(0), ST(i)","FCMOVNB ST(i), ST(0)","fcmovnb ST(i), ST(0)","DB C0+i","V","V","","P6","rw,r","","" +"FCMOVNBE ST(0), ST(i)","FCMOVNBE ST(i), ST(0)","fcmovnbe ST(i), ST(0)","DB D0+i","V","V","","P6","rw,r","","" +"FCMOVNE ST(0), ST(i)","FCMOVNE ST(i), ST(0)","fcmovne ST(i), ST(0)","DB C8+i","V","V","","P6","rw,r","","" +"FCMOVNU ST(0), ST(i)","FCMOVNU ST(i), ST(0)","fcmovnu ST(i), ST(0)","DB D8+i","V","V","","P6","rw,r","","" +"FCMOVU ST(0), ST(i)","FCMOVU ST(i), ST(0)","fcmovu ST(i), ST(0)","DA D8+i","V","V","","P6","rw,r","","" +"FCOM","FCOMD","fcom","D8 D1","V","V","","pseudo","","Y","" +"FCOM ST(0), ST(i)","FCOMD ST(i), ST(0)","fcom ST(i), ST(0)","D8 D0+i","V","V","","","r,r","Y","" +"FCOM ST(0), ST(i)","FCOMD ST(i), ST(0)","fcom ST(i), ST(0)","DC D0+i","V","V","","","r,r","Y","" +"FCOM ST(0), m32fp","FCOMD m32fp, ST(0)","fcoms m32fp, ST(0)","D8 /2","V","V","","","r,r","Y","32" +"FCOM ST(0), m64fp","FCOMD m64fp, ST(0)","fcoml m64fp, ST(0)","DC /2","V","V","","","r,r","Y","64" +"FCOMP ST(0), m32fp","FCOMFP m32fp, ST(0)","fcomps m32fp, ST(0)","D8 /3","V","V","","","r,r","Y","32" +"FCOMI ST(0), ST(i)","FCOMI ST(i), ST(0)","fcomi ST(i), ST(0)","DB F0+i","V","V","PPRO","P6","r,r","","" +"FCOMIP ST(0), ST(i)","FCOMIP ST(i), ST(0)","fcomip ST(i), ST(0)","DF F0+i","V","V","PPRO","P6","r,r","","" +"FCOMP","FCOMP","fcomp","D8 D9","V","V","","pseudo","","Y","" +"FCOMP ST(0), ST(i)","FCOMP ST(i), ST(0)","fcomp ST(i), ST(0)","D8 D8+i","V","V","","","r,r","Y","" +"FCOMP ST(0), ST(i)","FCOMP ST(i), ST(0)","fcomp ST(i), ST(0)","DC D8+i","V","V","","","r,r","Y","" +"FCOMP ST(0), ST(i)","FCOMP ST(i), ST(0)","fcomp ST(i), ST(0)","DE D0+i","V","V","","","r,r","Y","" +"FCOMP ST(0), m64fp","FCOMPL m64fp, ST(0)","fcompl m64fp, ST(0)","DC /3","V","V","","","r,r","Y","64" +"FCOMPP","FCOMPP","fcompp","DE D9","V","V","","","","","" +"FCOS","FCOS","fcos","D9 FF","V","V","","","","","" +"FDECSTP","FDECSTP","fdecstp","D9 F6","V","V","","","","","" +"FDISI8087_NOP","FDISI8087_NOP","fdisi8087_nop","DB E1","V","V","","","","","" +"FDIVR ST(i), ST(0)","FDIVD ST(0), ST(i)","fdiv ST(0), ST(i)","DC F0+i","V","V","","","rw,r","Y","" +"FDIV ST(i), ST(0)","FDIVD ST(0), ST(i)","fdivr ST(0), ST(i)","DC F8+i","V","V","","","rw,r","Y","" +"FDIV ST(0), ST(i)","FDIVD ST(i), ST(0)","fdiv ST(i), ST(0)","D8 F0+i","V","V","","","rw,r","Y","" +"FDIV ST(0), m32fp","FDIVD m32fp, ST(0)","fdivs m32fp, ST(0)","D8 /6","V","V","","","rw,r","Y","32" +"FDIV ST(0), m64fp","FDIVD m64fp, ST(0)","fdivl m64fp, ST(0)","DC /6","V","V","","","rw,r","Y","64" +"FDIVR ST(0), m32fp","FDIVFR m32fp, ST(0)","fdivrs m32fp, ST(0)","D8 /7","V","V","","","rw,r","Y","32" +"FDIVP","FDIVP","fdivp","DE F9","V","V","","pseudo","","","" +"FDIVRP ST(i), ST(0)","FDIVP ST(0), ST(i)","fdivp ST(0), ST(i)","DE F0+i","V","V","","","rw,r","","" +"FDIVR ST(0), ST(i)","FDIVR ST(i), ST(0)","fdivr ST(i), ST(0)","D8 F8+i","V","V","","","rw,r","Y","" +"FDIVR ST(0), m64fp","FDIVRL m64fp, ST(0)","fdivrl m64fp, ST(0)","DC /7","V","V","","","rw,r","Y","64" +"FDIVRP","FDIVRP","fdivrp","DE F1","V","V","","pseudo","","","" +"FDIVP ST(i), ST(0)","FDIVRP ST(0), ST(i)","fdivrp ST(0), ST(i)","DE F8+i","V","V","","","rw,r","","" +"FEMMS","FEMMS","femms","0F 0E","V","V","3DNOW","amd","","","" +"FENI8087_NOP","FENI8087_NOP","feni8087_nop","DB E0","V","V","","","","","" +"FFREE ST(i)","FFREE ST(i)","ffree ST(i)","DD C0+i","V","V","","","r","","" +"FFREEP ST(i)","FFREEP ST(i)","ffreep ST(i)","DF C0+i","V","V","","","r","","" +"FIADD ST(0), m16int","FIADD m16int, ST(0)","fiadd m16int, ST(0)","DE /0","V","V","","","rw,r","Y","" +"FIADD ST(0), m32int","FIADDL m32int, ST(0)","fiaddl m32int, ST(0)","DA /0","V","V","","","rw,r","Y","32" +"FICOM ST(0), m16int","FICOM m16int, ST(0)","ficom m16int, ST(0)","DE /2","V","V","","","r,r","Y","" +"FICOM ST(0), m32int","FICOML m32int, ST(0)","ficoml m32int, ST(0)","DA /2","V","V","","","r,r","Y","32" +"FICOMP ST(0), m16int","FICOMP m16int, ST(0)","ficomp m16int, ST(0)","DE /3","V","V","","","r,r","Y","" +"FICOMP ST(0), m32int","FICOMPL m32int, ST(0)","ficompl m32int, ST(0)","DA /3","V","V","","","r,r","Y","32" +"FIDIV ST(0), m16int","FIDIV m16int, ST(0)","fidiv m16int, ST(0)","DE /6","V","V","","","rw,r","Y","" +"FIDIV ST(0), m32int","FIDIVL m32int, ST(0)","fidivl m32int, ST(0)","DA /6","V","V","","","rw,r","Y","32" +"FIDIVR ST(0), m16int","FIDIVR m16int, ST(0)","fidivr m16int, ST(0)","DE /7","V","V","","","rw,r","Y","" +"FIDIVR ST(0), m32int","FIDIVRL m32int, ST(0)","fidivrl m32int, ST(0)","DA /7","V","V","","","rw,r","Y","32" +"FILD ST(0), m16int","FILD m16int, ST(0)","fild m16int, ST(0)","DF /0","V","V","","","w,r","Y","" +"FILD ST(0), m32int","FILDL m32int, ST(0)","fildl m32int, ST(0)","DB /0","V","V","","","w,r","Y","32" +"FILD ST(0), m64int","FILDLL m64int, ST(0)","fildll m64int, ST(0)","DF /5","V","V","","","w,r","Y","64" +"FIMUL ST(0), m16int","FIMUL m16int, ST(0)","fimul m16int, ST(0)","DE /1","V","V","","","rw,r","Y","" +"FIMUL ST(0), m32int","FIMULL m32int, ST(0)","fimull m32int, ST(0)","DA /1","V","V","","","rw,r","Y","32" +"FINCSTP","FINCSTP","fincstp","D9 F7","V","V","","","","","" +"FINIT","FINIT","finit","9B DB E3","V","V","","pseudo","","","" +"FIST m16int, ST(0)","FIST ST(0), m16int","fist ST(0), m16int","DF /2","V","V","","","w,r","Y","" +"FIST m32int, ST(0)","FISTL ST(0), m32int","fistl ST(0), m32int","DB /2","V","V","","","w,r","Y","32" +"FISTP m16int, ST(0)","FISTP ST(0), m16int","fistp ST(0), m16int","DF /3","V","V","","","w,r","Y","" +"FISTP m32int, ST(0)","FISTPL ST(0), m32int","fistpl ST(0), m32int","DB /3","V","V","","","w,r","Y","32" +"FISTP m64int, ST(0)","FISTPLL ST(0), m64int","fistpll ST(0), m64int","DF /7","V","V","","","w,r","Y","64" +"FISTTP m16int, ST(0)","FISTTP ST(0), m16int","fisttp ST(0), m16int","DF /1","V","V","SSE3","modrm_memonly","w,r","Y","" +"FISTTP m32int, ST(0)","FISTTPL ST(0), m32int","fisttpl ST(0), m32int","DB /1","V","V","SSE3","modrm_memonly","w,r","Y","32" +"FISTTP m64int, ST(0)","FISTTPLL ST(0), m64int","fisttpll ST(0), m64int","DD /1","V","V","SSE3","modrm_memonly","w,r","Y","64" +"FISUB ST(0), m16int","FISUB m16int, ST(0)","fisub m16int, ST(0)","DE /4","V","V","","","rw,r","Y","" +"FISUB ST(0), m32int","FISUBL m32int, ST(0)","fisubl m32int, ST(0)","DA /4","V","V","","","rw,r","Y","32" +"FISUBR ST(0), m16int","FISUBR m16int, ST(0)","fisubr m16int, ST(0)","DE /5","V","V","","","rw,r","Y","" +"FISUBR ST(0), m32int","FISUBRL m32int, ST(0)","fisubrl m32int, ST(0)","DA /5","V","V","","","rw,r","Y","32" +"FLD ST(0), ST(i)","FLD ST(i), ST(0)","fld ST(i), ST(0)","D9 C0+i","V","V","","","w,r","Y","" +"FLD1","FLD1","fld1","D9 E8","V","V","","","","","" +"FLDCW m2byte","FLDCW m2byte","fldcw m2byte","D9 /5","V","V","","","r","","" +"FLDENV m28byte","FLDENV m28byte","fldenv m28byte","D9 /4","V","V","","operand32,operand64","r","","" +"FLDENV m14byte","FLDENVS m14byte","fldenv m14byte","D9 /4","V","V","","operand16","r","","" +"FLD ST(0), m64fp","FLDL m64fp, ST(0)","fldl m64fp, ST(0)","DD /0","V","V","","","w,r","Y","64" +"FLDL2E","FLDL2E","fldl2e","D9 EA","V","V","","","","","" +"FLDL2T","FLDL2T","fldl2t","D9 E9","V","V","","","","","" +"FLDLG2","FLDLG2","fldlg2","D9 EC","V","V","","","","","" +"FLDLN2","FLDLN2","fldln2","D9 ED","V","V","","","","","" +"FLDPI","FLDPI","fldpi","D9 EB","V","V","","","","","" +"FLD ST(0), m32fp","FLDS m32fp, ST(0)","flds m32fp, ST(0)","D9 /0","V","V","","","w,r","Y","32" +"FLD ST(0), m80fp","FLDT m80fp, ST(0)","fldt m80fp, ST(0)","DB /5","V","V","","","w,r","Y","80" +"FLDZ","FLDZ","fldz","D9 EE","V","V","","","","","" +"FMUL ST(i), ST(0)","FMUL ST(0), ST(i)","fmul ST(0), ST(i)","DC C8+i","V","V","","","rw,r","Y","" +"FMUL ST(0), ST(i)","FMUL ST(i), ST(0)","fmul ST(i), ST(0)","D8 C8+i","V","V","","","rw,r","Y","" +"FMUL ST(0), m64fp","FMULL m64fp, ST(0)","fmull m64fp, ST(0)","DC /1","V","V","","","rw,r","Y","64" +"FMULP","FMULP","fmulp","DE C9","V","V","","pseudo","","","" +"FMULP ST(i), ST(0)","FMULP ST(0), ST(i)","fmulp ST(0), ST(i)","DE C8+i","V","V","","","rw,r","","" +"FMUL ST(0), m32fp","FMULS m32fp, ST(0)","fmuls m32fp, ST(0)","D8 /1","V","V","","","rw,r","Y","32" +"FNCLEX","FNCLEX","fnclex","DB E2","V","V","","","","","" +"FNINIT","FNINIT","fninit","DB E3","V","V","","","","","" +"FNOP","FNOP","fnop","D9 D0","V","V","","","","","" +"FNSAVE m108byte","FNSAVE m108byte","fnsave m108byte","DD /6","V","V","","operand32,operand64","w","","" +"FNSAVE m94byte","FNSAVES m94byte","fnsave m94byte","DD /6","V","V","","operand16","w","","" +"FNSTCW m2byte","FNSTCW m2byte","fnstcw m2byte","D9 /7","V","V","","","w","","" +"FNSTENV m28byte","FNSTENV m28byte","fnstenv m28byte","D9 /6","V","V","","operand32,operand64","w","","" +"FNSTENV m14byte","FNSTENVS m14byte","fnstenv m14byte","D9 /6","V","V","","operand16","w","","" +"FNSTSW AX","FNSTSW AX","fnstsw AX","DF E0","V","V","","","w","","" +"FNSTSW m2byte","FNSTSW m2byte","fnstsw m2byte","DD /7","V","V","","","w","","" +"FPATAN","FPATAN","fpatan","D9 F3","V","V","","","","","" +"FPREM","FPREM","fprem","D9 F8","V","V","","","","","" +"FPREM1","FPREM1","fprem1","D9 F5","V","V","","","","","" +"FPTAN","FPTAN","fptan","D9 F2","V","V","","","","","" +"FRNDINT","FRNDINT","frndint","D9 FC","V","V","","","","","" +"FRSTOR m108byte","FRSTOR m108byte","frstor m108byte","DD /4","V","V","","operand32,operand64","r","","" +"FRSTOR m94byte","FRSTORS m94byte","frstor m94byte","DD /4","V","V","","operand16","r","","" +"FSAVE m94/108byte","FSAVE m94/108byte","fsave m94/108byte","9B DD /6","V","V","","pseudo","w","","" +"FSCALE","FSCALE","fscale","D9 FD","V","V","","","","","" +"FSETPM287_NOP","FSETPM287_NOP","fsetpm287_nop","DB E4","V","V","","","","","" +"FSIN","FSIN","fsin","D9 FE","V","V","","","","","" +"FSINCOS","FSINCOS","fsincos","D9 FB","V","V","","","","","" +"FSQRT","FSQRT","fsqrt","D9 FA","V","V","","","","","" +"FST ST(i), ST(0)","FST ST(0), ST(i)","fst ST(0), ST(i)","DD D0+i","V","V","","","w,r","Y","" +"FSTCW m2byte","FSTCW m2byte","fstcw m2byte","9B D9 /7","V","V","","pseudo","w","","" +"FSTENV m14/28byte","FSTENV m14/28byte","fstenv m14/28byte","9B D9 /6","V","V","","pseudo","w","","" +"FST m64fp, ST(0)","FSTL ST(0), m64fp","fstl ST(0), m64fp","DD /2","V","V","","","w,r","Y","64" +"FSTP ST(i), ST(0)","FSTP ST(0), ST(i)","fstp ST(0), ST(i)","DD D8+i","V","V","","","w,r","Y","" +"FSTP ST(i), ST(0)","FSTP ST(0), ST(i)","fstp ST(0), ST(i)","DF D0+i","V","V","","","w,r","Y","" +"FSTP ST(i), ST(0)","FSTP ST(0), ST(i)","fstp ST(0), ST(i)","DF D8+i","V","V","","","w,r","Y","" +"FSTP m64fp, ST(0)","FSTPL ST(0), m64fp","fstpl ST(0), m64fp","DD /3","V","V","","","w,r","Y","64" +"FSTPNCE ST(i), ST(0)","FSTPNCE ST(0), ST(i)","fstpnce ST(0), ST(i)","D9 D8+i","V","V","","","w,r","","" +"FSTP m32fp, ST(0)","FSTPS ST(0), m32fp","fstps ST(0), m32fp","D9 /3","V","V","","","w,r","Y","32" +"FSTP m80fp, ST(0)","FSTPT ST(0), m80fp","fstpt ST(0), m80fp","DB /7","V","V","","","w,r","Y","80" +"FST m32fp, ST(0)","FSTS ST(0), m32fp","fsts ST(0), m32fp","D9 /2","V","V","","","w,r","Y","32" +"FSTSW AX","FSTSW AX","fstsw AX","9B DF E0","V","V","","pseudo","w","","" +"FSTSW m2byte","FSTSW m2byte","fstsw m2byte","9B DD /7","V","V","","pseudo","w","","" +"FSUBR ST(i), ST(0)","FSUB ST(0), ST(i)","fsub ST(0), ST(i)","DC E0+i","V","V","","","rw,r","Y","" +"FSUB ST(0), ST(i)","FSUB ST(i), ST(0)","fsub ST(i), ST(0)","D8 E0+i","V","V","","","rw,r","Y","" +"FSUB ST(0), m64fp","FSUBL m64fp, ST(0)","fsubl m64fp, ST(0)","DC /4","V","V","","","rw,r","Y","64" +"FSUBP","FSUBP","fsubp","DE E9","V","V","","pseudo","","","" +"FSUBRP ST(i), ST(0)","FSUBP ST(0), ST(i)","fsubp ST(0), ST(i)","DE E0+i","V","V","","","rw,r","","" +"FSUB ST(i), ST(0)","FSUBR ST(0), ST(i)","fsubr ST(0), ST(i)","DC E8+i","V","V","","","rw,r","Y","" +"FSUBR ST(0), ST(i)","FSUBR ST(i), ST(0)","fsubr ST(i), ST(0)","D8 E8+i","V","V","","","rw,r","Y","" +"FSUBR ST(0), m64fp","FSUBRL m64fp, ST(0)","fsubrl m64fp, ST(0)","DC /5","V","V","","","rw,r","Y","64" +"FSUBRP","FSUBRP","fsubrp","DE E1","V","V","","pseudo","","","" +"FSUBP ST(i), ST(0)","FSUBRP ST(0), ST(i)","fsubrp ST(0), ST(i)","DE E8+i","V","V","","","rw,r","","" +"FSUBR ST(0), m32fp","FSUBRS m32fp, ST(0)","fsubrs m32fp, ST(0)","D8 /5","V","V","","","rw,r","Y","32" +"FSUB ST(0), m32fp","FSUBS m32fp, ST(0)","fsubs m32fp, ST(0)","D8 /4","V","V","","","rw,r","Y","32" +"FTST","FTST","ftst","D9 E4","V","V","","","","","" +"FUCOM","FUCOM","fucom","DD E1","V","V","","pseudo","","","" +"FUCOM ST(0), ST(i)","FUCOM ST(i), ST(0)","fucom ST(i), ST(0)","DD E0+i","V","V","","","r,r","","" +"FUCOMI ST(0), ST(i)","FUCOMI ST(i), ST(0)","fucomi ST(i), ST(0)","DB E8+i","V","V","PPRO","P6","r,r","","" +"FUCOMIP ST(0), ST(i)","FUCOMIP ST(i), ST(0)","fucomip ST(i), ST(0)","DF E8+i","V","V","PPRO","P6","r,r","","" +"FUCOMP","FUCOMP","fucomp","DD E9","V","V","","pseudo","","","" +"FUCOMP ST(0), ST(i)","FUCOMP ST(i), ST(0)","fucomp ST(i), ST(0)","DD E8+i","V","V","","","r,r","","" +"FUCOMPP","FUCOMPP","fucompp","DA E9","V","V","","","","","" +"FWAIT","FWAIT","fwait","9B","V","V","","","","","" +"FXAM","FXAM","fxam","D9 E5","V","V","","","","","" +"FXCH","FXCH","fxch","D9 C9","V","V","","pseudo","","","" +"FXCH ST(0), ST(i)","FXCH ST(i), ST(0)","fxch ST(i), ST(0)","D9 C8+i","V","V","","","rw,rw","","" +"FXCH_ALIAS1 ST(0), ST(i)","FXCH_ALIAS1 ST(i), ST(0)","fxch_alias1 ST(i), ST(0)","DD C8+i","V","V","","","rw,rw","","" +"FXCH_ALIAS2 ST(0), ST(i)","FXCH_ALIAS2 ST(i), ST(0)","fxch_alias2 ST(i), ST(0)","DF C8+i","V","V","","","rw,rw","","" +"FXRSTOR m512byte","FXRSTOR m512byte","fxrstor m512byte","0F AE /1","V","V","","modrm_memonly,operand16,operand32","r","","" +"FXRSTOR64 m512byte","FXRSTOR64 m512byte","fxrstor64 m512byte","REX.W 0F AE /1","N.S.","V","","modrm_memonly","r","","" +"FXSAVE m512byte","FXSAVE m512byte","fxsave m512byte","0F AE /0","V","V","","modrm_memonly,operand16,operand32","w","","" +"FXSAVE64 m512byte","FXSAVE64 m512byte","fxsave64 m512byte","REX.W 0F AE /0","N.S.","V","","modrm_memonly","w","","" +"FXTRACT","FXTRACT","fxtract","D9 F4","V","V","","","","","" +"FYL2X","FYL2X","fyl2x","D9 F1","V","V","","","","","" +"FYL2XP1","FYL2XP1","fyl2xp1","D9 F9","V","V","","","","","" +"GETSEC","GETSEC","getsec","0F 37","V","V","SMX","","","","" +"GF2P8AFFINEINVQB xmm1, xmm2/m128, imm8u","GF2P8AFFINEINVQB imm8u, xmm2/m128, xmm1","gf2p8affineinvqb imm8u, xmm2/m128, xmm1","66 0F 3A CF /r ib","V","V","GFNI","","rw,r,r","","" +"GF2P8AFFINEQB xmm1, xmm2/m128, imm8u","GF2P8AFFINEQB imm8u, xmm2/m128, xmm1","gf2p8affineqb imm8u, xmm2/m128, xmm1","66 0F 3A CE /r ib","V","V","GFNI","","rw,r,r","","" +"GF2P8MULB xmm1, xmm2/m128","GF2P8MULB xmm2/m128, xmm1","gf2p8mulb xmm2/m128, xmm1","66 0F 38 CF /r","V","V","GFNI","","rw,r","","" +"HADDPD xmm1, xmm2/m128","HADDPD xmm2/m128, xmm1","haddpd xmm2/m128, xmm1","66 0F 7C /r","V","V","SSE3","","rw,r","","" +"HADDPS xmm1, xmm2/m128","HADDPS xmm2/m128, xmm1","haddps xmm2/m128, xmm1","F2 0F 7C /r","V","V","SSE3","","rw,r","","" +"HLT","HLT","hlt","F4","V","V","","","","","" +"HSUBPD xmm1, xmm2/m128","HSUBPD xmm2/m128, xmm1","hsubpd xmm2/m128, xmm1","66 0F 7D /r","V","V","SSE3","","rw,r","","" +"HSUBPS xmm1, xmm2/m128","HSUBPS xmm2/m128, xmm1","hsubps xmm2/m128, xmm1","F2 0F 7D /r","V","V","SSE3","","rw,r","","" +"ICEBP","ICEBP","icebp","F1","V","V","","","","","" +"IDIV r/m8","IDIVB r/m8","idivb r/m8","F6 /7","V","V","","","r","Y","8" +"IDIV r/m8","IDIVB r/m8","idivb r/m8","REX F6 /7","N.E.","V","","pseudo64","r","Y","8" +"IDIV r/m32","IDIVL r/m32","idivl r/m32","F7 /7","V","V","","operand32","r","Y","32" +"IDIV r/m64","IDIVQ r/m64","idivq r/m64","REX.W F7 /7","N.S.","V","","","r","Y","64" +"IDIV r/m16","IDIVW r/m16","idivw r/m16","F7 /7","V","V","","operand16","r","Y","16" +"IMUL r32, r/m32, imm32","IMUL3 imm32, r/m32, r32","imull imm32, r/m32, r32","69 /r id","V","V","","operand32","w,r,r","Y","32" +"IMUL r64, r/m64, imm32","IMUL3 imm32, r/m64, r64","imulq imm32, r/m64, r64","REX.W 69 /r id","N.S.","V","","","w,r,r","Y","64" +"IMUL r16, r/m16, imm8","IMUL3 imm8, r/m16, r16","imulw imm8, r/m16, r16","6B /r ib","V","V","","operand16","w,r,r","Y","16" +"IMUL r32, r/m32, imm8","IMUL3 imm8, r/m32, r32","imull imm8, r/m32, r32","6B /r ib","V","V","","operand32","w,r,r","Y","32" +"IMUL r64, r/m64, imm8","IMUL3 imm8, r/m64, r64","imulq imm8, r/m64, r64","REX.W 6B /r ib","N.S.","V","","","w,r,r","Y","64" +"IMUL r/m8","IMULB r/m8","imulb r/m8","F6 /5","V","V","","","r","Y","8" +"IMUL r/m32","IMULL r/m32","imull r/m32","F7 /5","V","V","","operand32","r","Y","32" +"IMUL r32, r/m32","IMULL r/m32, r32","imull r/m32, r32","0F AF /r","V","V","","operand32","rw,r","Y","32" +"IMUL r/m64","IMULQ r/m64","imulq r/m64","REX.W F7 /5","N.S.","V","","","r","Y","64" +"IMUL r64, r/m64","IMULQ r/m64, r64","imulq r/m64, r64","REX.W 0F AF /r","N.S.","V","","","rw,r","Y","64" +"IMUL r16, r/m16, imm16","IMULW imm16, r/m16, r16","imulw imm16, r/m16, r16","69 /r iw","V","V","","operand16","w,r,r","Y","16" +"IMUL r/m16","IMULW r/m16","imulw r/m16","F7 /5","V","V","","operand16","r","Y","16" +"IMUL r16, r/m16","IMULW r/m16, r16","imulw r/m16, r16","0F AF /r","V","V","","operand16","rw,r","Y","16" +"IN AL, DX","INB DX, AL","inb DX, AL","EC","V","V","","","w,r","Y","8" +"IN AL, imm8u","INB imm8u, AL","inb imm8u, AL","E4 ib","V","V","","","w,r","Y","8" +"INC r/m8","INCB r/m8","incb r/m8","FE /0","V","V","","","rw","Y","8" +"INC r/m8","INCB r/m8","incb r/m8","REX FE /0","N.E.","V","","pseudo64","rw","Y","8" +"INC r/m32","INCL r/m32","incl r/m32","FF /0","V","V","","operand32","rw","Y","32" +"INC r32op","INCL r32op","incl r32op","40+rd","V","N.S.","","operand32","rw","Y","32" +"INC r/m64","INCQ r/m64","incq r/m64","REX.W FF /0","N.S.","V","","","rw","Y","64" +"INCSSPD rmr32","INCSSPD rmr32","incsspd rmr32","F3 0F AE /5","V","V","CET","modrm_regonly,operand16,operand32","r","","" +"INCSSPQ rmr64","INCSSPQ rmr64","incsspq rmr64","F3 REX.W 0F AE /5","N.S.","V","CET","modrm_regonly","r","","" +"INC r/m16","INCW r/m16","incw r/m16","FF /0","V","V","","operand16","rw","Y","16" +"INC r16op","INCW r16op","incw r16op","40+rw","V","N.S.","","operand16","rw","Y","16" +"IN EAX, DX","INL DX, EAX","inl DX, EAX","ED","V","V","","operand32,operand64","w,r","Y","32" +"IN EAX, imm8u","INL imm8u, EAX","inl imm8u, EAX","E5 ib","V","V","","operand32,operand64","w,r","Y","32" +"INSB","INSB","insb","6C","V","V","","","","","" +"INSERTPS xmm1, xmm2/m32, imm8u","INSERTPS imm8u, xmm2/m32, xmm1","insertps imm8u, xmm2/m32, xmm1","66 0F 3A 21 /r ib","V","V","SSE4_1","","rw,r,r","","" +"INSERTQ xmm1, xmm2, imm8u, imm8u","INSERTQ imm8u, imm8u, xmm2, xmm1","insertq imm8u, imm8u, xmm2, xmm1","F2 0F 78 /r ib ib","V","V","SSE4a","amd,modrm_regonly","w,r,r,r","","" +"INSERTQ xmm1, xmm2","INSERTQ xmm2, xmm1","insertq xmm2, xmm1","F2 0F 79 /r","V","V","SSE4a","amd,modrm_regonly","w,r","","" +"INSD","INSL","insl","6D","V","V","","operand32,operand64","","","" +"INSW","INSW","insw","6D","V","V","","operand16","","","" +"INT 3","INT 3","int 3","CC","V","V","","","r","","" +"INT imm8u","INT imm8u","int imm8u","CD ib","V","V","","","r","","" +"INTO","INTO","into","CE","V","N.S.","","","","","" +"INVD","INVD","invd","0F 08","V","V","486","","","","" +"INVEPT r32, m128","INVEPT m128, r32","invept m128, r32","66 0F 38 80 /r","V","N.S.","VTX","modrm_memonly","r,r","","" +"INVEPT r64, m128","INVEPT m128, r64","invept m128, r64","66 0F 38 80 /r","N.S.","V","VTX","default64,modrm_memonly","r,r","","" +"INVLPG m","INVLPG m","invlpg m","0F 01 /7","V","V","486","modrm_memonly","r","","" +"INVLPGA EAX, ECX","INVLPGAL ECX, EAX","invlpgal ECX, EAX","0F 01 DF","V","V","SVM","amd,modrm_regonly,operand32","r,r","Y","32" +"INVLPGA RAX, ECX","INVLPGAQ ECX, RAX","invlpgaq ECX, RAX","REX.W 0F 01 DF","N.S.","V","SVM","amd,modrm_regonly","r,r","Y","64" +"INVLPGA AX, ECX","INVLPGAW ECX, AX","invlpgaw ECX, AX","0F 01 DF","V","V","SVM","amd,modrm_regonly,operand16","r,r","Y","16" +"INVPCID r32, m128","INVPCID m128, r32","invpcid m128, r32","66 0F 38 82 /r","V","N.S.","INVPCID","modrm_memonly","r,r","","" +"INVPCID r64, m128","INVPCID m128, r64","invpcid m128, r64","66 0F 38 82 /r","N.S.","V","INVPCID","default64,modrm_memonly","r,r","","" +"INVVPID r32, m128","INVVPID m128, r32","invvpid m128, r32","66 0F 38 81 /r","V","N.S.","VTX","modrm_memonly","r,r","","" +"INVVPID r64, m128","INVVPID m128, r64","invvpid m128, r64","66 0F 38 81 /r","N.S.","V","VTX","default64,modrm_memonly","r,r","","" +"IN AX, DX","INW DX, AX","inw DX, AX","ED","V","V","","operand16","w,r","Y","16" +"IN AX, imm8u","INW imm8u, AX","inw imm8u, AX","E5 ib","V","V","","operand16","w,r","Y","16" +"IRETD","IRETL","iretl","CF","V","V","","operand32","","","" +"IRETQ","IRETQ","iretq","REX.W CF","N.S.","V","","","","","" +"IRET","IRETW","iretw","CF","V","V","","operand16","","","" +"JA rel16","JA rel16","ja rel16","0F 87 cw","V","N.S.","","operand16","r","","" +"JA rel32","JA rel32","ja rel32","0F 87 cd","V","N.S.","","operand32","r","","" +"JA rel32","JA rel32","ja rel32","0F 87 cd","N.S.","V","","default64","r","","" +"JA rel8","JA rel8","ja rel8","77 cb","N.S.","V","","default64","r","","" +"JA rel8","JA rel8","ja rel8","77 cb","V","N.S.","","","r","","" +"JAE rel16","JAE rel16","jae rel16","0F 83 cw","V","N.S.","","operand16","r","","" +"JAE rel32","JAE rel32","jae rel32","0F 83 cd","N.S.","V","","default64","r","","" +"JAE rel32","JAE rel32","jae rel32","0F 83 cd","V","N.S.","","operand32","r","","" +"JAE rel8","JAE rel8","jae rel8","73 cb","V","N.S.","","","r","","" +"JAE rel8","JAE rel8","jae rel8","73 cb","N.S.","V","","default64","r","","" +"JB rel16","JB rel16","jb rel16","0F 82 cw","V","N.S.","","operand16","r","","" +"JB rel32","JB rel32","jb rel32","0F 82 cd","V","N.S.","","operand32","r","","" +"JB rel32","JB rel32","jb rel32","0F 82 cd","N.S.","V","","default64","r","","" +"JB rel8","JB rel8","jb rel8","72 cb","N.S.","V","","default64","r","","" +"JB rel8","JB rel8","jb rel8","72 cb","V","N.S.","","","r","","" +"JBE rel16","JBE rel16","jbe rel16","0F 86 cw","V","N.S.","","operand16","r","","" +"JBE rel32","JBE rel32","jbe rel32","0F 86 cd","V","N.S.","","operand32","r","","" +"JBE rel32","JBE rel32","jbe rel32","0F 86 cd","N.S.","V","","default64","r","","" +"JBE rel8","JBE rel8","jbe rel8","76 cb","V","N.S.","","","r","","" +"JBE rel8","JBE rel8","jbe rel8","76 cb","N.S.","V","","default64","r","","" +"JC rel16","JC rel16","jc rel16","0F 82 cw","V","N.S.","","pseudo","r","","" +"JC rel32","JC rel32","jc rel32","0F 82 cd","V","V","","pseudo","r","","" +"JC rel8","JC rel8","jc rel8","72 cb","V","V","","pseudo","r","","" +"JCXZ rel8","JCXZ rel8","jcxz rel8","E3 cb","V","N.S.","","address16","r","","" +"JE rel16","JE rel16","je rel16","0F 84 cw","V","N.S.","","operand16","r","","" +"JE rel32","JE rel32","je rel32","0F 84 cd","V","N.S.","","operand32","r","","" +"JE rel32","JE rel32","je rel32","0F 84 cd","N.S.","V","","default64","r","","" +"JE rel8","JE rel8","je rel8","74 cb","N.S.","V","","default64","r","","" +"JE rel8","JE rel8","je rel8","74 cb","V","N.S.","","","r","","" +"JECXZ rel8","JECXZ rel8","jecxz rel8","E3 cb","V","V","","address32","r","","" +"JG rel16","JG rel16","jg rel16","0F 8F cw","V","N.S.","","operand16","r","","" +"JG rel32","JG rel32","jg rel32","0F 8F cd","N.S.","V","","default64","r","","" +"JG rel32","JG rel32","jg rel32","0F 8F cd","V","N.S.","","operand32","r","","" +"JG rel8","JG rel8","jg rel8","7F cb","V","N.S.","","","r","","" +"JG rel8","JG rel8","jg rel8","7F cb","N.S.","V","","default64","r","","" +"JGE rel16","JGE rel16","jge rel16","0F 8D cw","V","N.S.","","operand16","r","","" +"JGE rel32","JGE rel32","jge rel32","0F 8D cd","V","N.S.","","operand32","r","","" +"JGE rel32","JGE rel32","jge rel32","0F 8D cd","N.S.","V","","default64","r","","" +"JGE rel8","JGE rel8","jge rel8","7D cb","N.S.","V","","default64","r","","" +"JGE rel8","JGE rel8","jge rel8","7D cb","V","N.S.","","","r","","" +"JL rel16","JL rel16","jl rel16","0F 8C cw","V","N.S.","","operand16","r","","" +"JL rel32","JL rel32","jl rel32","0F 8C cd","V","N.S.","","operand32","r","","" +"JL rel32","JL rel32","jl rel32","0F 8C cd","N.S.","V","","default64","r","","" +"JL rel8","JL rel8","jl rel8","7C cb","V","N.S.","","","r","","" +"JL rel8","JL rel8","jl rel8","7C cb","N.S.","V","","default64","r","","" +"JLE rel16","JLE rel16","jle rel16","0F 8E cw","V","N.S.","","operand16","r","","" +"JLE rel32","JLE rel32","jle rel32","0F 8E cd","V","N.S.","","operand32","r","","" +"JLE rel32","JLE rel32","jle rel32","0F 8E cd","N.S.","V","","default64","r","","" +"JLE rel8","JLE rel8","jle rel8","7E cb","N.S.","V","","default64","r","","" +"JLE rel8","JLE rel8","jle rel8","7E cb","V","N.S.","","","r","","" +"JMP rel16","JMP rel16","jmp rel16","E9 cw","V","N.S.","","operand16","r","Y","" +"JMP rel32","JMP rel32","jmp rel32","E9 cd","N.S.","V","","default64","r","Y","" +"JMP rel32","JMP rel32","jmp rel32","E9 cd","V","N.S.","","operand32","r","Y","" +"JMP rel8","JMP rel8","jmp rel8","EB cb","N.S.","V","","default64","r","Y","" +"JMP rel8","JMP rel8","jmp rel8","EB cb","V","N.S.","","","r","Y","" +"JMP r/m32","JMPL* r/m32","jmpl* r/m32","FF /4","V","N.S.","","operand32","r","Y","32" +"JMP r/m64","JMPQ* r/m64","jmpq* r/m64","FF /4","N.S.","V","","","r","Y","64" +"JMP r/m16","JMPW* r/m16","jmpw* r/m16","FF /4","V","N.S.","","operand16","r","Y","16" +"JNA rel16","JNA rel16","jna rel16","0F 86 cw","V","N.S.","","pseudo","r","","" +"JNA rel32","JNA rel32","jna rel32","0F 86 cd","V","V","","pseudo","r","","" +"JNA rel8","JNA rel8","jna rel8","76 cb","V","V","","pseudo","r","","" +"JNAE rel16","JNAE rel16","jnae rel16","0F 82 cw","V","N.S.","","pseudo","r","","" +"JNAE rel32","JNAE rel32","jnae rel32","0F 82 cd","V","V","","pseudo","r","","" +"JNAE rel8","JNAE rel8","jnae rel8","72 cb","V","V","","pseudo","r","","" +"JNB rel16","JNB rel16","jnb rel16","0F 83 cw","V","N.S.","","pseudo","r","","" +"JNB rel32","JNB rel32","jnb rel32","0F 83 cd","V","V","","pseudo","r","","" +"JNB rel8","JNB rel8","jnb rel8","73 cb","V","V","","pseudo","r","","" +"JNBE rel16","JNBE rel16","jnbe rel16","0F 87 cw","V","N.S.","","pseudo","r","","" +"JNBE rel32","JNBE rel32","jnbe rel32","0F 87 cd","V","V","","pseudo","r","","" +"JNBE rel8","JNBE rel8","jnbe rel8","77 cb","V","V","","pseudo","r","","" +"JNC rel16","JNC rel16","jnc rel16","0F 83 cw","V","N.S.","","pseudo","r","","" +"JNC rel32","JNC rel32","jnc rel32","0F 83 cd","V","V","","pseudo","r","","" +"JNC rel8","JNC rel8","jnc rel8","73 cb","V","V","","pseudo","r","","" +"JNE rel16","JNE rel16","jne rel16","0F 85 cw","V","N.S.","","operand16","r","","" +"JNE rel32","JNE rel32","jne rel32","0F 85 cd","N.S.","V","","default64","r","","" +"JNE rel32","JNE rel32","jne rel32","0F 85 cd","V","N.S.","","operand32","r","","" +"JNE rel8","JNE rel8","jne rel8","75 cb","V","N.S.","","","r","","" +"JNE rel8","JNE rel8","jne rel8","75 cb","N.S.","V","","default64","r","","" +"JNG rel16","JNG rel16","jng rel16","0F 8E cw","V","N.S.","","pseudo","r","","" +"JNG rel32","JNG rel32","jng rel32","0F 8E cd","V","V","","pseudo","r","","" +"JNG rel8","JNG rel8","jng rel8","7E cb","V","V","","pseudo","r","","" +"JNGE rel16","JNGE rel16","jnge rel16","0F 8C cw","V","N.S.","","pseudo","r","","" +"JNGE rel32","JNGE rel32","jnge rel32","0F 8C cd","V","V","","pseudo","r","","" +"JNGE rel8","JNGE rel8","jnge rel8","7C cb","V","V","","pseudo","r","","" +"JNL rel16","JNL rel16","jnl rel16","0F 8D cw","V","N.S.","","pseudo","r","","" +"JNL rel32","JNL rel32","jnl rel32","0F 8D cd","V","V","","pseudo","r","","" +"JNL rel8","JNL rel8","jnl rel8","7D cb","V","V","","pseudo","r","","" +"JNLE rel16","JNLE rel16","jnle rel16","0F 8F cw","V","N.S.","","pseudo","r","","" +"JNLE rel32","JNLE rel32","jnle rel32","0F 8F cd","V","V","","pseudo","r","","" +"JNLE rel8","JNLE rel8","jnle rel8","7F cb","V","V","","pseudo","r","","" +"JNO rel16","JNO rel16","jno rel16","0F 81 cw","V","N.S.","","operand16","r","","" +"JNO rel32","JNO rel32","jno rel32","0F 81 cd","V","N.S.","","operand32","r","","" +"JNO rel32","JNO rel32","jno rel32","0F 81 cd","N.S.","V","","default64","r","","" +"JNO rel8","JNO rel8","jno rel8","71 cb","V","N.S.","","","r","","" +"JNO rel8","JNO rel8","jno rel8","71 cb","N.S.","V","","default64","r","","" +"JNP rel16","JNP rel16","jnp rel16","0F 8B cw","V","N.S.","","operand16","r","","" +"JNP rel32","JNP rel32","jnp rel32","0F 8B cd","V","N.S.","","operand32","r","","" +"JNP rel32","JNP rel32","jnp rel32","0F 8B cd","N.S.","V","","default64","r","","" +"JNP rel8","JNP rel8","jnp rel8","7B cb","N.S.","V","","default64","r","","" +"JNP rel8","JNP rel8","jnp rel8","7B cb","V","N.S.","","","r","","" +"JNS rel16","JNS rel16","jns rel16","0F 89 cw","V","N.S.","","operand16","r","","" +"JNS rel32","JNS rel32","jns rel32","0F 89 cd","N.S.","V","","default64","r","","" +"JNS rel32","JNS rel32","jns rel32","0F 89 cd","V","N.S.","","operand32","r","","" +"JNS rel8","JNS rel8","jns rel8","79 cb","V","N.S.","","","r","","" +"JNS rel8","JNS rel8","jns rel8","79 cb","N.S.","V","","default64","r","","" +"JNZ rel16","JNZ rel16","jnz rel16","0F 85 cw","V","N.S.","","pseudo","r","","" +"JNZ rel32","JNZ rel32","jnz rel32","0F 85 cd","V","V","","pseudo","r","","" +"JNZ rel8","JNZ rel8","jnz rel8","75 cb","V","V","","pseudo","r","","" +"JO rel16","JO rel16","jo rel16","0F 80 cw","V","N.S.","","operand16","r","","" +"JO rel32","JO rel32","jo rel32","0F 80 cd","V","N.S.","","operand32","r","","" +"JO rel32","JO rel32","jo rel32","0F 80 cd","N.S.","V","","default64","r","","" +"JO rel8","JO rel8","jo rel8","70 cb","V","N.S.","","","r","","" +"JO rel8","JO rel8","jo rel8","70 cb","N.S.","V","","default64","r","","" +"JP rel16","JP rel16","jp rel16","0F 8A cw","V","N.S.","","operand16","r","","" +"JP rel32","JP rel32","jp rel32","0F 8A cd","N.S.","V","","default64","r","","" +"JP rel32","JP rel32","jp rel32","0F 8A cd","V","N.S.","","operand32","r","","" +"JP rel8","JP rel8","jp rel8","7A cb","N.S.","V","","default64","r","","" +"JP rel8","JP rel8","jp rel8","7A cb","V","N.S.","","","r","","" +"JPE rel16","JPE rel16","jpe rel16","0F 8A cw","V","N.S.","","pseudo","r","","" +"JPE rel32","JPE rel32","jpe rel32","0F 8A cd","V","V","","pseudo","r","","" +"JPE rel8","JPE rel8","jpe rel8","7A cb","V","V","","pseudo","r","","" +"JPO rel16","JPO rel16","jpo rel16","0F 8B cw","V","N.S.","","pseudo","r","","" +"JPO rel32","JPO rel32","jpo rel32","0F 8B cd","V","V","","pseudo","r","","" +"JPO rel8","JPO rel8","jpo rel8","7B cb","V","V","","pseudo","r","","" +"JRCXZ rel8","JRCXZ rel8","jrcxz rel8","E3 cb","N.S.","V","","address64","r","","" +"JS rel16","JS rel16","js rel16","0F 88 cw","V","N.S.","","operand16","r","","" +"JS rel32","JS rel32","js rel32","0F 88 cd","V","N.S.","","operand32","r","","" +"JS rel32","JS rel32","js rel32","0F 88 cd","N.S.","V","","default64","r","","" +"JS rel8","JS rel8","js rel8","78 cb","V","N.S.","","","r","","" +"JS rel8","JS rel8","js rel8","78 cb","N.S.","V","","default64","r","","" +"JZ rel16","JZ rel16","jz rel16","0F 84 cw","V","N.S.","","operand16,pseudo","r","","" +"JZ rel32","JZ rel32","jz rel32","0F 84 cd","V","V","","operand32,pseudo","r","","" +"JZ rel8","JZ rel8","jz rel8","74 cb","V","V","","pseudo","r","","" +"KADDB k1, kV, k2","KADDB k2, kV, k1","kaddb k2, kV, k1","VEX.NDS.256.66.0F.W0 4A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KADDD k1, kV, k2","KADDD k2, kV, k1","kaddd k2, kV, k1","VEX.NDS.256.66.0F.W1 4A /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KADDQ k1, kV, k2","KADDQ k2, kV, k1","kaddq k2, kV, k1","VEX.NDS.256.0F.W1 4A /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KADDW k1, kV, k2","KADDW k2, kV, k1","kaddw k2, kV, k1","VEX.NDS.256.0F.W0 4A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KANDB k1, kV, k2","KANDB k2, kV, k1","kandb k2, kV, k1","VEX.NDS.256.66.0F.W0 41 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KANDD k1, kV, k2","KANDD k2, kV, k1","kandd k2, kV, k1","VEX.NDS.256.66.0F.W1 41 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KANDNB k1, kV, k2","KANDNB k2, kV, k1","kandnb k2, kV, k1","VEX.NDS.256.66.0F.W0 42 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KANDND k1, kV, k2","KANDND k2, kV, k1","kandnd k2, kV, k1","VEX.NDS.256.66.0F.W1 42 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KANDNQ k1, kV, k2","KANDNQ k2, kV, k1","kandnq k2, kV, k1","VEX.NDS.256.0F.W1 42 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KANDNW k1, kV, k2","KANDNW k2, kV, k1","kandnw k2, kV, k1","VEX.NDS.256.0F.W0 42 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KANDQ k1, kV, k2","KANDQ k2, kV, k1","kandq k2, kV, k1","VEX.NDS.256.0F.W1 41 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KANDW k1, kV, k2","KANDW k2, kV, k1","kandw k2, kV, k1","VEX.NDS.256.0F.W0 41 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KMOVB m8, k1","KMOVB k1, m8","kmovb k1, m8","VEX.128.66.0F.W0 91 /r","V","V","AVX512DQ","modrm_memonly","w,r","","" +"KMOVB r32, k2","KMOVB k2, r32","kmovb k2, r32","VEX.128.66.0F.W0 93 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"KMOVB k1, k2/m8","KMOVB k2/m8, k1","kmovb k2/m8, k1","VEX.128.66.0F.W0 90 /r","V","V","AVX512DQ","","w,r","","" +"KMOVB k1, rmr32","KMOVB rmr32, k1","kmovb rmr32, k1","VEX.128.66.0F.W0 92 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"KMOVD m32, k1","KMOVD k1, m32","kmovd k1, m32","VEX.128.66.0F.W1 91 /r","V","V","AVX512BW","modrm_memonly","w,r","","" +"KMOVD r32, k2","KMOVD k2, r32","kmovd k2, r32","VEX.128.F2.0F.W0 93 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"KMOVD k1, k2/m32","KMOVD k2/m32, k1","kmovd k2/m32, k1","VEX.128.66.0F.W1 90 /r","V","V","AVX512BW","","w,r","","" +"KMOVD k1, rmr32","KMOVD rmr32, k1","kmovd rmr32, k1","VEX.128.F2.0F.W0 92 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"KMOVQ m64, k1","KMOVQ k1, m64","kmovq k1, m64","VEX.128.0F.W1 91 /r","V","V","AVX512BW","modrm_memonly","w,r","","" +"KMOVQ r64, k2","KMOVQ k2, r64","kmovq k2, r64","VEX.128.F2.0F.W1 93 /r","N.S.","V","AVX512BW","modrm_regonly","w,r","","" +"KMOVQ k1, k2/m64","KMOVQ k2/m64, k1","kmovq k2/m64, k1","VEX.128.0F.W1 90 /r","V","V","AVX512BW","","w,r","","" +"KMOVQ k1, rmr64","KMOVQ rmr64, k1","kmovq rmr64, k1","VEX.128.F2.0F.W1 92 /r","N.S.","V","AVX512BW","modrm_regonly","w,r","","" +"KMOVW m16, k1","KMOVW k1, m16","kmovw k1, m16","VEX.128.0F.W0 91 /r","V","V","AVX512F","modrm_memonly","w,r","","" +"KMOVW r32, k2","KMOVW k2, r32","kmovw k2, r32","VEX.128.0F.W0 93 /r","V","V","AVX512F","modrm_regonly","w,r","","" +"KMOVW k1, k2/m16","KMOVW k2/m16, k1","kmovw k2/m16, k1","VEX.128.0F.W0 90 /r","V","V","AVX512F","","w,r","","" +"KMOVW k1, rmr32","KMOVW rmr32, k1","kmovw rmr32, k1","VEX.128.0F.W0 92 /r","V","V","AVX512F","modrm_regonly","w,r","","" +"KNOTB k1, k2","KNOTB k2, k1","knotb k2, k1","VEX.128.66.0F.W0 44 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"KNOTD k1, k2","KNOTD k2, k1","knotd k2, k1","VEX.128.66.0F.W1 44 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"KNOTQ k1, k2","KNOTQ k2, k1","knotq k2, k1","VEX.128.0F.W1 44 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"KNOTW k1, k2","KNOTW k2, k1","knotw k2, k1","VEX.128.0F.W0 44 /r","V","V","AVX512F","modrm_regonly","w,r","","" +"KORB k1, kV, k2","KORB k2, kV, k1","korb k2, kV, k1","VEX.NDS.256.66.0F.W0 45 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KORD k1, kV, k2","KORD k2, kV, k1","kord k2, kV, k1","VEX.NDS.256.66.0F.W1 45 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KORQ k1, kV, k2","KORQ k2, kV, k1","korq k2, kV, k1","VEX.NDS.256.0F.W1 45 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KORTESTB k1, k2","KORTESTB k2, k1","kortestb k2, k1","VEX.128.66.0F.W0 98 /r","V","V","AVX512DQ","modrm_regonly","r,r","","" +"KORTESTD k1, k2","KORTESTD k2, k1","kortestd k2, k1","VEX.128.66.0F.W1 98 /r","V","V","AVX512BW","modrm_regonly","r,r","","" +"KORTESTQ k1, k2","KORTESTQ k2, k1","kortestq k2, k1","VEX.128.0F.W1 98 /r","V","V","AVX512BW","modrm_regonly","r,r","","" +"KORTESTW k1, k2","KORTESTW k2, k1","kortestw k2, k1","VEX.128.0F.W0 98 /r","V","V","AVX512F","modrm_regonly","r,r","","" +"KORW k1, kV, k2","KORW k2, kV, k1","korw k2, kV, k1","VEX.NDS.256.0F.W0 45 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KSHIFTLB k1, k2, imm8u","KSHIFTLB imm8u, k2, k1","kshiftlb imm8u, k2, k1","VEX.128.66.0F3A.W0 32 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KSHIFTLD k1, k2, imm8u","KSHIFTLD imm8u, k2, k1","kshiftld imm8u, k2, k1","VEX.128.66.0F3A.W0 33 /r ib","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KSHIFTLQ k1, k2, imm8u","KSHIFTLQ imm8u, k2, k1","kshiftlq imm8u, k2, k1","VEX.128.66.0F3A.W1 33 /r ib","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KSHIFTLW k1, k2, imm8u","KSHIFTLW imm8u, k2, k1","kshiftlw imm8u, k2, k1","VEX.128.66.0F3A.W1 32 /r ib","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KSHIFTRB k1, k2, imm8u","KSHIFTRB imm8u, k2, k1","kshiftrb imm8u, k2, k1","VEX.128.66.0F3A.W0 30 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KSHIFTRD k1, k2, imm8u","KSHIFTRD imm8u, k2, k1","kshiftrd imm8u, k2, k1","VEX.128.66.0F3A.W0 31 /r ib","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KSHIFTRQ k1, k2, imm8u","KSHIFTRQ imm8u, k2, k1","kshiftrq imm8u, k2, k1","VEX.128.66.0F3A.W1 31 /r ib","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KSHIFTRW k1, k2, imm8u","KSHIFTRW imm8u, k2, k1","kshiftrw imm8u, k2, k1","VEX.128.66.0F3A.W1 30 /r ib","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KTESTB k1, k2","KTESTB k2, k1","ktestb k2, k1","VEX.128.66.0F.W0 99 /r","V","V","AVX512DQ","modrm_regonly","r,r","","" +"KTESTD k1, k2","KTESTD k2, k1","ktestd k2, k1","VEX.128.66.0F.W1 99 /r","V","V","AVX512BW","modrm_regonly","r,r","","" +"KTESTQ k1, k2","KTESTQ k2, k1","ktestq k2, k1","VEX.128.0F.W1 99 /r","V","V","AVX512BW","modrm_regonly","r,r","","" +"KTESTW k1, k2","KTESTW k2, k1","ktestw k2, k1","VEX.128.0F.W0 99 /r","V","V","AVX512DQ","modrm_regonly","r,r","","" +"KUNPCKBW k1, kV, k2","KUNPCKBW k2, kV, k1","kunpckbw k2, kV, k1","VEX.NDS.256.66.0F.W0 4B /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KUNPCKDQ k1, kV, k2","KUNPCKDQ k2, kV, k1","kunpckdq k2, kV, k1","VEX.NDS.256.0F.W1 4B /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KUNPCKWD k1, kV, k2","KUNPCKWD k2, kV, k1","kunpckwd k2, kV, k1","VEX.NDS.256.0F.W0 4B /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KXNORB k1, kV, k2","KXNORB k2, kV, k1","kxnorb k2, kV, k1","VEX.NDS.256.66.0F.W0 46 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KXNORD k1, kV, k2","KXNORD k2, kV, k1","kxnord k2, kV, k1","VEX.NDS.256.66.0F.W1 46 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KXNORQ k1, kV, k2","KXNORQ k2, kV, k1","kxnorq k2, kV, k1","VEX.NDS.256.0F.W1 46 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KXNORW k1, kV, k2","KXNORW k2, kV, k1","kxnorw k2, kV, k1","VEX.NDS.256.0F.W0 46 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"KXORB k1, kV, k2","KXORB k2, kV, k1","kxorb k2, kV, k1","VEX.NDS.256.66.0F.W0 47 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"KXORD k1, kV, k2","KXORD k2, kV, k1","kxord k2, kV, k1","VEX.NDS.256.66.0F.W1 47 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KXORQ k1, kV, k2","KXORQ k2, kV, k1","kxorq k2, kV, k1","VEX.NDS.256.0F.W1 47 /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"KXORW k1, kV, k2","KXORW k2, kV, k1","kxorw k2, kV, k1","VEX.NDS.256.0F.W0 47 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"LAHF","LAHF","lahf","9F","V","V","LAHFSAHF","","","","" +"LAR r32, r32/m16","LARL r32/m16, r32","larl r32/m16, r32","0F 02 /r","V","V","","operand32","rw,r","Y","32" +"LAR r64, r64/m16","LARQ r64/m16, r64","larq r64/m16, r64","REX.W 0F 02 /r","N.S.","V","","","rw,r","Y","64" +"LAR r16, r/m16","LARW r/m16, r16","larw r/m16, r16","0F 02 /r","V","V","","operand16","rw,r","Y","16" +"CALL_FAR ptr16:32","LCALLL ptr16:32","lcalll ptr16:32","9A cd iw","V","N.S.","","operand32","r","Y","" +"CALL_FAR m16:32","LCALLL* m16:32","lcalll* m16:32","FF /3","V","V","","modrm_memonly,operand32","r","Y","" +"CALL_FAR m16:64","LCALLQ* m16:64","lcallq* m16:64","REX.W FF /3","N.S.","V","","modrm_memonly","r","Y","" +"CALL_FAR ptr16:16","LCALLW ptr16:16","lcallw ptr16:16","9A cw iw","V","N.S.","","operand16","r","Y","" +"CALL_FAR m16:16","LCALLW* m16:16","lcallw* m16:16","FF /3","V","V","","modrm_memonly,operand16","r","Y","" +"LDDQU xmm1, m128","LDDQU m128, xmm1","lddqu m128, xmm1","F2 0F F0 /r","V","V","SSE3","modrm_memonly","w,r","","" +"LDMXCSR m32","LDMXCSR m32","ldmxcsr m32","0F AE /2","V","V","SSE","modrm_memonly","r","","" +"LDS r32, m16:32","LDSL m16:32, r32","ldsl m16:32, r32","C5 /r","V","N.S.","","modrm_memonly,operand32","w,r","Y","32" +"LDS r16, m16:16","LDSW m16:16, r16","ldsw m16:16, r16","C5 /r","V","N.S.","","modrm_memonly,operand16","w,r","Y","16" +"LEA r32, m","LEAL m, r32","leal m, r32","8D /r","V","V","","modrm_memonly,operand32","w,r","Y","32" +"LEA r64, m","LEAQ m, r64","leaq m, r64","REX.W 8D /r","N.S.","V","","modrm_memonly","w,r","Y","64" +"LEAVE","LEAVEW/LEAVEL/LEAVEQ","leavew/leavel/leaveq","C9","N.S.","V","","default64","","Y","" +"LEAVE","LEAVEW/LEAVEL/LEAVEQ","leavew/leavel/leaveq","C9","V","N.S.","","operand32","","Y","" +"LEAVE","LEAVEW/LEAVEL/LEAVEQ","leavew/leavel/leaveq","C9","V","V","","operand16","","Y","" +"LEA r16, m","LEAW m, r16","leaw m, r16","8D /r","V","V","","modrm_memonly,operand16","w,r","Y","16" +"LES r32, m16:32","LESL m16:32, r32","lesl m16:32, r32","C4 /r","V","N.S.","","modrm_memonly,operand32","w,r","Y","32" +"LES r16, m16:16","LESW m16:16, r16","lesw m16:16, r16","C4 /r","V","N.S.","","modrm_memonly,operand16","w,r","Y","16" +"LFENCE","LFENCE","lfence","0F AE /5","V","V","SSE2","","","","" +"LFS r32, m16:32","LFSL m16:32, r32","lfsl m16:32, r32","0F B4 /r","V","V","","modrm_memonly,operand32","w,r","Y","32" +"LFS r64, m16:64","LFSQ m16:64, r64","lfsq m16:64, r64","REX.W 0F B4 /r","N.S.","V","","modrm_memonly","w,r","Y","64" +"LFS r16, m16:16","LFSW m16:16, r16","lfsw m16:16, r16","0F B4 /r","V","V","","modrm_memonly,operand16","w,r","Y","16" +"LGDT m16&64","LGDT m16&64","lgdt m16&64","0F 01 /2","N.S.","V","","default64,modrm_memonly","r","","" +"LGDT m16&32","LGDTW/LGDTL m16&32","lgdtw/lgdtl m16&32","0F 01 /2","V","N.S.","","modrm_memonly","r","","" +"LGS r32, m16:32","LGSL m16:32, r32","lgsl m16:32, r32","0F B5 /r","V","V","","modrm_memonly,operand32","w,r","Y","32" +"LGS r64, m16:64","LGSQ m16:64, r64","lgsq m16:64, r64","REX.W 0F B5 /r","N.S.","V","","modrm_memonly","w,r","Y","64" +"LGS r16, m16:16","LGSW m16:16, r16","lgsw m16:16, r16","0F B5 /r","V","V","","modrm_memonly,operand16","w,r","Y","16" +"LIDT m16&64","LIDT m16&64","lidt m16&64","0F 01 /3","N.S.","V","","default64,modrm_memonly","r","","" +"LIDT m16&32","LIDTW/LIDTL m16&32","lidtw/lidtl m16&32","0F 01 /3","V","N.S.","","modrm_memonly","r","","" +"JMP_FAR ptr16:32","LJMPL ptr16:32","ljmpl ptr16:32","EA cd iw","V","N.S.","","operand32","r","Y","" +"JMP_FAR m16:32","LJMPL* m16:32","ljmpl* m16:32","FF /5","V","V","","modrm_memonly,operand32","r","Y","" +"JMP_FAR m16:64","LJMPQ* m16:64","ljmpq* m16:64","REX.W FF /5","N.S.","V","","modrm_memonly","r","Y","" +"JMP_FAR ptr16:16","LJMPW ptr16:16","ljmpw ptr16:16","EA cw iw","V","N.S.","","operand16","r","Y","" +"JMP_FAR m16:16","LJMPW* m16:16","ljmpw* m16:16","FF /5","V","V","","modrm_memonly,operand16","r","Y","" +"LLDT r/m16","LLDT r/m16","lldt r/m16","0F 00 /2","V","V","","","r","","" +"LLWPCB rmr32","LLWPCBL rmr32","llwpcbl rmr32","XOP.128.09.W0 12 /0","V","V","XOP","amd,modrm_regonly,operand16,operand32","w","Y","32" +"LLWPCB rmr64","LLWPCBQ rmr64","llwpcbq rmr64","XOP.128.09.W0 12 /0","N.S.","V","XOP","amd,modrm_regonly,operand64","w","Y","64" +"LMSW r/m16","LMSW r/m16","lmsw r/m16","0F 01 /6","V","V","","","r","","" +"LOCK","LOCK","lock","F0","V","V","","pseudo","","","" +"LODSB","LODSB","lodsb","AC","V","V","","","","","" +"LODSD","LODSL","lodsl","AD","V","V","","operand32","","","" +"LODSQ","LODSQ","lodsq","REX.W AD","N.S.","V","","","","","" +"LODSW","LODSW","lodsw","AD","V","V","","operand16","","","" +"LOOP rel8","LOOP rel8","loop rel8","E2 cb","V","V","","","r","","" +"LOOPE rel8","LOOPEQ rel8","loope rel8","E1 cb","V","V","","","r","","" +"LOOPNE rel8","LOOPNE rel8","loopne rel8","E0 cb","V","V","","","r","","" +"LSL r32, r32/m16","LSLL r32/m16, r32","lsll r32/m16, r32","0F 03 /r","V","V","","operand32","rw,r","Y","32" +"LSL r64, r32/m16","LSLQ r32/m16, r64","lslq r32/m16, r64","REX.W 0F 03 /r","N.S.","V","","","rw,r","Y","64" +"LSL r16, r/m16","LSLW r/m16, r16","lslw r/m16, r16","0F 03 /r","V","V","","operand16","rw,r","Y","16" +"LSS r32, m16:32","LSSL m16:32, r32","lssl m16:32, r32","0F B2 /r","V","V","","modrm_memonly,operand32","w,r","Y","32" +"LSS r64, m16:64","LSSQ m16:64, r64","lssq m16:64, r64","REX.W 0F B2 /r","N.S.","V","","modrm_memonly","w,r","Y","64" +"LSS r16, m16:16","LSSW m16:16, r16","lssw m16:16, r16","0F B2 /r","V","V","","modrm_memonly,operand16","w,r","Y","16" +"LTR r/m16","LTR r/m16","ltr r/m16","0F 00 /3","V","V","","","r","","" +"LWPINS r32V, r/m32, imm32u","LWPINS imm32u, r/m32, r32V","lwpins imm32u, r/m32, r32V","XOP.NDD.128.0A.W0 12 /0","V","V","XOP","amd,operand16,operand32","w,r,r","","" +"LWPINS r64V, r64/m32, imm32u","LWPINS imm32u, r64/m32, r64V","lwpins imm32u, r64/m32, r64V","XOP.NDD.128.0A.W0 12 /0","N.S.","V","XOP","amd,operand64","w,r,r","","" +"LWPVAL r32V, r/m32, imm32u","LWPVAL imm32u, r/m32, r32V","lwpval imm32u, r/m32, r32V","XOP.NDD.128.0A.W0 12 /1","V","V","XOP","amd,operand16,operand32","w,r,r","","" +"LWPVAL r64V, r64/m32, imm32u","LWPVAL imm32u, r64/m32, r64V","lwpval imm32u, r64/m32, r64V","XOP.NDD.128.0A.W0 12 /1","N.S.","V","XOP","amd,operand64","w,r,r","","" +"LZCNT r32, r/m32","LZCNTL r/m32, r32","lzcntl r/m32, r32","F3 0F BD /r","V","V","LZCNT","operand32","w,r","Y","32" +"LZCNT r32, r/m32","LZCNTL r/m32, r32","lzcntl r/m32, r32","F3 0F BD /r","V","V","AMD","amd,operand32","w,r","Y","32" +"LZCNT r64, r/m64","LZCNTQ r/m64, r64","lzcntq r/m64, r64","F3 REX.W 0F BD /r","N.S.","V","AMD","amd","w,r","Y","64" +"LZCNT r64, r/m64","LZCNTQ r/m64, r64","lzcntq r/m64, r64","F3 REX.W 0F BD /r","N.S.","V","LZCNT","","w,r","Y","64" +"LZCNT r16, r/m16","LZCNTW r/m16, r16","lzcntw r/m16, r16","F3 0F BD /r","V","V","AMD","amd,operand16","w,r","Y","16" +"LZCNT r16, r/m16","LZCNTW r/m16, r16","lzcntw r/m16, r16","F3 0F BD /r","V","V","LZCNT","operand16","w,r","Y","16" +"MASKMOVDQU xmm1, xmm2","MASKMOVOU xmm2, xmm1","maskmovdqu xmm2, xmm1","66 0F F7 /r","V","V","SSE2","modrm_regonly","r,r","","" +"MASKMOVQ mm1, mm2","MASKMOVQ mm2, mm1","maskmovq mm2, mm1","0F F7 /r","V","V","MMX","modrm_regonly","r,r","","" +"MAXPD xmm1, xmm2/m128","MAXPD xmm2/m128, xmm1","maxpd xmm2/m128, xmm1","66 0F 5F /r","V","V","SSE2","","rw,r","","" +"MAXPS xmm1, xmm2/m128","MAXPS xmm2/m128, xmm1","maxps xmm2/m128, xmm1","0F 5F /r","V","V","SSE","","rw,r","","" +"MAXSD xmm1, xmm2/m64","MAXSD xmm2/m64, xmm1","maxsd xmm2/m64, xmm1","F2 0F 5F /r","V","V","SSE2","","rw,r","","" +"MAXSS xmm1, xmm2/m32","MAXSS xmm2/m32, xmm1","maxss xmm2/m32, xmm1","F3 0F 5F /r","V","V","SSE","","rw,r","","" +"MFENCE","MFENCE","mfence","0F AE /6","V","V","SSE2","","","","" +"MINPD xmm1, xmm2/m128","MINPD xmm2/m128, xmm1","minpd xmm2/m128, xmm1","66 0F 5D /r","V","V","SSE2","","rw,r","","" +"MINPS xmm1, xmm2/m128","MINPS xmm2/m128, xmm1","minps xmm2/m128, xmm1","0F 5D /r","V","V","SSE","","rw,r","","" +"MINSD xmm1, xmm2/m64","MINSD xmm2/m64, xmm1","minsd xmm2/m64, xmm1","F2 0F 5D /r","V","V","SSE2","","rw,r","","" +"MINSS xmm1, xmm2/m32","MINSS xmm2/m32, xmm1","minss xmm2/m32, xmm1","F3 0F 5D /r","V","V","SSE","","rw,r","","" +"MONITOR","MONITOR","monitor","0F 01 C8","V","V","MONITOR","","","","" +"MOVAPD xmm2/m128, xmm1","MOVAPD xmm1, xmm2/m128","movapd xmm1, xmm2/m128","66 0F 29 /r","V","V","SSE2","","w,r","","" +"MOVAPD xmm1, xmm2/m128","MOVAPD xmm2/m128, xmm1","movapd xmm2/m128, xmm1","66 0F 28 /r","V","V","SSE2","","w,r","","" +"MOVAPS xmm2/m128, xmm1","MOVAPS xmm1, xmm2/m128","movaps xmm1, xmm2/m128","0F 29 /r","V","V","SSE","","w,r","","" +"MOVAPS xmm1, xmm2/m128","MOVAPS xmm2/m128, xmm1","movaps xmm2/m128, xmm1","0F 28 /r","V","V","SSE","","w,r","","" +"MOV r/m8, imm8u","MOVB imm8u, r/m8","movb imm8u, r/m8","C6 /0 ib","V","V","","","w,r","Y","8" +"MOV r/m8, imm8u","MOVB imm8u, r/m8","movb imm8u, r/m8","REX C6 /0 ib","N.E.","V","","pseudo64","w,r","Y","8" +"MOV r8op, imm8u","MOVB imm8u, r8op","movb imm8u, r8op","B0+rb ib","V","V","","","w,r","Y","8" +"MOV r8op, imm8u","MOVB imm8u, r8op","movb imm8u, r8op","REX B0+rb ib","N.E.","V","","pseudo64","w,r","Y","8" +"MOV r8, r/m8","MOVB r/m8, r8","movb r/m8, r8","8A /r","V","V","","","w,r","Y","8" +"MOV r8, r/m8","MOVB r/m8, r8","movb r/m8, r8","REX 8A /r","N.E.","V","","pseudo64","w,r","Y","8" +"MOV r/m8, r8","MOVB r8, r/m8","movb r8, r/m8","88 /r","V","V","","","w,r","Y","8" +"MOV r/m8, r8","MOVB r8, r/m8","movb r8, r/m8","REX 88 /r","N.E.","V","","pseudo64","w,r","Y","8" +"MOV moffs8, AL","MOVB/MOVB/MOVABSB AL, moffs8","movb/movb/movabsb AL, moffs8","A2 cm","V","V","","","w,r","Y","8" +"MOV moffs8, AL","MOVB/MOVB/MOVABSB AL, moffs8","movb/movb/movabsb AL, moffs8","REX.W A2 cm","N.E.","V","","pseudo","w,r","Y","8" +"MOV AL, moffs8","MOVB/MOVB/MOVABSB moffs8, AL","movb/movb/movabsb moffs8, AL","A0 cm","V","V","","","w,r","Y","8" +"MOV AL, moffs8","MOVB/MOVB/MOVABSB moffs8, AL","movb/movb/movabsb moffs8, AL","REX.W A0 cm","N.E.","V","","pseudo","w,r","Y","8" +"MOVBE r32, m32","MOVBELL m32, r32","movbell m32, r32","0F 38 F0 /r","V","V","MOVBE","modrm_memonly,operand32","w,r","Y","32" +"MOVBE m32, r32","MOVBELL r32, m32","movbell r32, m32","0F 38 F1 /r","V","V","MOVBE","modrm_memonly,operand32","w,r","Y","32" +"MOVBE r64, m64","MOVBEQQ m64, r64","movbeqq m64, r64","REX.W 0F 38 F0 /r","N.S.","V","MOVBE","modrm_memonly","w,r","Y","64" +"MOVBE m64, r64","MOVBEQQ r64, m64","movbeqq r64, m64","REX.W 0F 38 F1 /r","N.S.","V","MOVBE","modrm_memonly","w,r","Y","64" +"MOVBE r16, m16","MOVBEWW m16, r16","movbeww m16, r16","0F 38 F0 /r","V","V","MOVBE","modrm_memonly,operand16","w,r","Y","16" +"MOVBE m16, r16","MOVBEWW r16, m16","movbeww r16, m16","0F 38 F1 /r","V","V","MOVBE","modrm_memonly,operand16","w,r","Y","16" +"MOVSX r32, r/m8","MOVBLSX r/m8, r32","movsbl r/m8, r32","0F BE /r","V","V","","operand32","w,r","Y","32" +"MOVZX r32, r/m8","MOVBLZX r/m8, r32","movzbl r/m8, r32","0F B6 /r","V","V","","operand32","w,r","Y","32" +"MOVSX r64, r/m8","MOVBQSX r/m8, r64","movsbq r/m8, r64","REX.W 0F BE /r","N.S.","V","","","w,r","Y","64" +"MOVZX r64, r/m8","MOVBQZX r/m8, r64","movzbq r/m8, r64","REX.W 0F B6 /r","N.S.","V","","","w,r","Y","64" +"MOVSX r16, r/m8","MOVBWSX r/m8, r16","movsbw r/m8, r16","0F BE /r","V","V","","operand16","w,r","Y","16" +"MOVZX r16, r/m8","MOVBWZX r/m8, r16","movzbw r/m8, r16","0F B6 /r","V","V","","operand16","w,r","Y","16" +"MOVD r/m32, mm1","MOVD mm1, r/m32","movd mm1, r/m32","0F 7E /r","V","V","MMX","operand16,operand32","w,r","","" +"MOVD mm1, r/m32","MOVD r/m32, mm1","movd r/m32, mm1","0F 6E /r","V","V","MMX","operand16,operand32","w,r","","" +"MOVD xmm1, r/m32","MOVD r/m32, xmm1","movd r/m32, xmm1","66 0F 6E /r","V","V","SSE2","operand16,operand32","w,r","","" +"MOVD r/m32, xmm1","MOVD xmm1, r/m32","movd xmm1, r/m32","66 0F 7E /r","V","V","SSE2","operand16,operand32","w,r","","" +"MOVDDUP xmm1, xmm2/m64","MOVDDUP xmm2/m64, xmm1","movddup xmm2/m64, xmm1","F2 0F 12 /r","V","V","SSE3","","w,r","","" +"MOVHLPS xmm1, xmm2","MOVHLPS xmm2, xmm1","movhlps xmm2, xmm1","0F 12 /r","V","V","SSE","modrm_regonly","w,r","","" +"MOVHPD xmm1, m64","MOVHPD m64, xmm1","movhpd m64, xmm1","66 0F 16 /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVHPD m64, xmm1","MOVHPD xmm1, m64","movhpd xmm1, m64","66 0F 17 /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVHPS xmm1, m64","MOVHPS m64, xmm1","movhps m64, xmm1","0F 16 /r","V","V","SSE","modrm_memonly","w,r","","" +"MOVHPS m64, xmm1","MOVHPS xmm1, m64","movhps xmm1, m64","0F 17 /r","V","V","SSE","modrm_memonly","w,r","","" +"MOV rmr32, CR0-CR7","MOVL CR0-CR7, rmr32","movl CR0-CR7, rmr32","0F 20 /r","V","N.S.","","","w,r","Y","32" +"MOV rmr32, DR0-DR7","MOVL DR0-DR7, rmr32","movl DR0-DR7, rmr32","0F 21 /r","V","N.S.","","","w,r","Y","32" +"MOV moffs32, EAX","MOVL EAX, moffs32","movl EAX, moffs32","A3 cm","V","V","","operand32","w,r","Y","32" +"MOV r/m32, imm32","MOVL imm32, r/m32","movl imm32, r/m32","C7 /0 id","V","V","","operand32","w,r","Y","32" +"MOV r32op, imm32u","MOVL imm32u, r32op","movl imm32u, r32op","B8+rd id","V","V","","operand32","w,r","Y","32" +"MOV EAX, moffs32","MOVL moffs32, EAX","movl moffs32, EAX","A1 cm","V","V","","operand32","w,r","Y","32" +"MOV r32, r/m32","MOVL r/m32, r32","movl r/m32, r32","8B /r","V","V","","operand32","w,r","Y","32" +"MOV r/m32, r32","MOVL r32, r/m32","movl r32, r/m32","89 /r","V","V","","operand32","w,r","Y","32" +"MOV CR0-CR7, rmr32","MOVL rmr32, CR0-CR7","movl rmr32, CR0-CR7","0F 22 /r","V","N.S.","","","w,r","Y","32" +"MOV DR0-DR7, rmr32","MOVL rmr32, DR0-DR7","movl rmr32, DR0-DR7","0F 23 /r","V","N.S.","","","w,r","Y","32" +"MOVLHPS xmm1, xmm2","MOVLHPS xmm2, xmm1","movlhps xmm2, xmm1","0F 16 /r","V","V","SSE","modrm_regonly","w,r","","" +"MOVLPD xmm1, m64","MOVLPD m64, xmm1","movlpd m64, xmm1","66 0F 12 /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVLPD m64, xmm1","MOVLPD xmm1, m64","movlpd xmm1, m64","66 0F 13 /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVLPS xmm1, m64","MOVLPS m64, xmm1","movlps m64, xmm1","0F 12 /r","V","V","SSE","modrm_memonly","w,r","","" +"MOVLPS m64, xmm1","MOVLPS xmm1, m64","movlps xmm1, m64","0F 13 /r","V","V","SSE","modrm_memonly","w,r","","" +"MOVSXD r32, r/m32","MOVLQSX r/m32, r32","movsxdl r/m32, r32","63 /r","N.S.","V","","operand32","w,r","Y","32" +"MOVSXD r64, r/m32","MOVLQSX r/m32, r64","movslq r/m32, r64","REX.W 63 /r","N.S.","V","","","w,r","Y","64" +"MOVMSKPD r32, xmm2","MOVMSKPD xmm2, r32","movmskpd xmm2, r32","66 0F 50 /r","V","V","SSE2","modrm_regonly","w,r","","" +"MOVMSKPS r32, xmm2","MOVMSKPS xmm2, r32","movmskps xmm2, r32","0F 50 /r","V","V","SSE","modrm_regonly","w,r","","" +"MOVNTDQA xmm1, m128","MOVNTDQA m128, xmm1","movntdqa m128, xmm1","66 0F 38 2A /r","V","V","SSE4_1","modrm_memonly","w,r","","" +"MOVNTI m32, r32","MOVNTIL r32, m32","movntil r32, m32","0F C3 /r","V","V","SSE2","modrm_memonly,operand16,operand32","w,r","Y","32" +"MOVNTI m64, r64","MOVNTIQ r64, m64","movntiq r64, m64","REX.W 0F C3 /r","N.S.","V","SSE2","modrm_memonly","w,r","Y","64" +"MOVNTDQ m128, xmm1","MOVNTO xmm1, m128","movntdq xmm1, m128","66 0F E7 /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVNTPD m128, xmm1","MOVNTPD xmm1, m128","movntpd xmm1, m128","66 0F 2B /r","V","V","SSE2","modrm_memonly","w,r","","" +"MOVNTPS m128, xmm1","MOVNTPS xmm1, m128","movntps xmm1, m128","0F 2B /r","V","V","SSE","modrm_memonly","w,r","","" +"MOVNTQ m64, mm1","MOVNTQ mm1, m64","movntq mm1, m64","0F E7 /r","V","V","MMX","modrm_memonly","w,r","","" +"MOVNTSD m64, xmm1","MOVNTSD xmm1, m64","movntsd xmm1, m64","F2 0F 2B /r","V","V","SSE4a","amd,modrm_memonly","w,r","","" +"MOVNTSS m32, xmm1","MOVNTSS xmm1, m32","movntss xmm1, m32","F3 0F 2B /r","V","V","SSE4a","amd,modrm_memonly","w,r","","" +"MOVDQA xmm2/m128, xmm1","MOVO xmm1, xmm2/m128","movdqa xmm1, xmm2/m128","66 0F 7F /r","V","V","SSE2","","w,r","","" +"MOVDQA xmm1, xmm2/m128","MOVO xmm2/m128, xmm1","movdqa xmm2/m128, xmm1","66 0F 6F /r","V","V","SSE2","","w,r","","" +"MOVDQU xmm2/m128, xmm1","MOVOU xmm1, xmm2/m128","movdqu xmm1, xmm2/m128","F3 0F 7F /r","V","V","SSE2","","w,r","","" +"MOVDQU xmm1, xmm2/m128","MOVOU xmm2/m128, xmm1","movdqu xmm2/m128, xmm1","F3 0F 6F /r","V","V","SSE2","","w,r","","" +"MOV rmr64, CR0-CR7","MOVQ CR0-CR7, rmr64","movq CR0-CR7, rmr64","0F 20 /r","N.S.","V","","default64","w,r","Y","64" +"MOV rmr64, CR8","MOVQ CR8, rmr64","movq CR8, rmr64","REX.R + 0F 20 /0","N.E.","V","","modrm_regonly,pseudo","w,r","Y","64" +"MOV rmr64, DR0-DR7","MOVQ DR0-DR7, rmr64","movq DR0-DR7, rmr64","0F 21 /r","N.S.","V","","default64","w,r","Y","64" +"MOV moffs64, RAX","MOVQ RAX, moffs64","movabsq RAX, moffs64","REX.W A3 cm","N.S.","V","","","w,r","Y","64" +"MOV r/m64, imm32","MOVQ imm32, r/m64","movq imm32, r/m64","REX.W C7 /0 id","N.S.","V","","","w,r","Y","64" +"MOV r64op, imm64u","MOVQ imm64u, r64op","movq imm64u, r64op","REX.W B8+ro io","N.S.","V","","","w,r","Y","64" +"MOVQ mm2/m64, mm1","MOVQ mm1, mm2/m64","movq mm1, mm2/m64","0F 7F /r","V","V","MMX","","w,r","","" +"MOVQ r/m64, mm1","MOVQ mm1, r/m64","movq mm1, r/m64","REX.W 0F 7E /r","N.S.","V","MMX","","w,r","","" +"MOVQ mm1, mm2/m64","MOVQ mm2/m64, mm1","movq mm2/m64, mm1","0F 6F /r","V","V","MMX","","w,r","","" +"MOV RAX, moffs64","MOVQ moffs64, RAX","movabsq moffs64, RAX","REX.W A1 cm","N.S.","V","","","w,r","Y","64" +"MOVQ mm1, r/m64","MOVQ r/m64, mm1","movq r/m64, mm1","REX.W 0F 6E /r","N.S.","V","MMX","","w,r","","" +"MOV r64, r/m64","MOVQ r/m64, r64","movq r/m64, r64","REX.W 8B /r","N.S.","V","","","w,r","Y","64" +"MOVQ xmm1, r/m64","MOVQ r/m64, xmm1","movq r/m64, xmm1","66 REX.W 0F 6E /r","N.S.","V","SSE2","","w,r","","" +"MOV r/m64, r64","MOVQ r64, r/m64","movq r64, r/m64","REX.W 89 /r","N.S.","V","","","w,r","Y","64" +"MOV CR0-CR7, rmr64","MOVQ rmr64, CR0-CR7","movq rmr64, CR0-CR7","0F 22 /r","N.S.","V","","default64","w,r","Y","64" +"MOV CR8, rmr64","MOVQ rmr64, CR8","movq rmr64, CR8","REX.R + 0F 22 /0","N.E.","V","","modrm_regonly,pseudo","w,r","Y","64" +"MOV DR0-DR7, rmr64","MOVQ rmr64, DR0-DR7","movq rmr64, DR0-DR7","0F 23 /r","N.S.","V","","default64","w,r","Y","64" +"MOVQ r/m64, xmm1","MOVQ xmm1, r/m64","movq xmm1, r/m64","66 REX.W 0F 7E /r","N.S.","V","SSE2","","w,r","","" +"MOVQ xmm2/m64, xmm1","MOVQ xmm1, xmm2/m64","movq xmm1, xmm2/m64","66 0F D6 /r","V","V","SSE2","","w,r","","" +"MOVDQ2Q mm1, xmm2","MOVQ xmm2, mm1","movdq2q xmm2, mm1","F2 0F D6 /r","V","V","SSE2","modrm_regonly","w,r","","" +"MOVQ xmm1, xmm2/m64","MOVQ xmm2/m64, xmm1","movq xmm2/m64, xmm1","F3 0F 7E /r","V","V","SSE2","","w,r","","" +"MOVQ2DQ xmm1, mm2","MOVQOZX mm2, xmm1","movq2dq mm2, xmm1","F3 0F D6 /r","V","V","SSE2","modrm_regonly","w,r","","" +"MOVSB","MOVSB","movsb","A4","V","V","","","","","" +"MOVSD xmm2/m64, xmm1","MOVSD xmm1, xmm2/m64","movsd xmm1, xmm2/m64","F2 0F 11 /r","V","V","SSE2","","w,r","","" +"MOVSD xmm1, xmm2/m64","MOVSD xmm2/m64, xmm1","movsd xmm2/m64, xmm1","F2 0F 10 /r","V","V","SSE2","","w,r","","" +"MOVSHDUP xmm1, xmm2/m128","MOVSHDUP xmm2/m128, xmm1","movshdup xmm2/m128, xmm1","F3 0F 16 /r","V","V","SSE3","","w,r","","" +"MOVSD","MOVSL","movsl","A5","V","V","","operand32","","","" +"MOVSLDUP xmm1, xmm2/m128","MOVSLDUP xmm2/m128, xmm1","movsldup xmm2/m128, xmm1","F3 0F 12 /r","V","V","SSE3","","w,r","","" +"MOVSQ","MOVSQ","movsq","REX.W A5","N.S.","V","","","","","" +"MOVSS xmm2/m32, xmm1","MOVSS xmm1, xmm2/m32","movss xmm1, xmm2/m32","F3 0F 11 /r","V","V","SSE","","w,r","","" +"MOVSS xmm1, xmm2/m32","MOVSS xmm2/m32, xmm1","movss xmm2/m32, xmm1","F3 0F 10 /r","V","V","SSE","","w,r","","" +"MOVSW","MOVSW","movsw","A5","V","V","","operand16","","","" +"MOVSX r16, r/m16","MOVSWW r/m16, r16","movsww r/m16, r16","0F BF /r","V","V","","operand16","w,r","Y","16" +"MOVUPD xmm2/m128, xmm1","MOVUPD xmm1, xmm2/m128","movupd xmm1, xmm2/m128","66 0F 11 /r","V","V","SSE2","","w,r","","" +"MOVUPD xmm1, xmm2/m128","MOVUPD xmm2/m128, xmm1","movupd xmm2/m128, xmm1","66 0F 10 /r","V","V","SSE2","","w,r","","" +"MOVUPS xmm2/m128, xmm1","MOVUPS xmm1, xmm2/m128","movups xmm1, xmm2/m128","0F 11 /r","V","V","SSE","","w,r","","" +"MOVUPS xmm1, xmm2/m128","MOVUPS xmm2/m128, xmm1","movups xmm2/m128, xmm1","0F 10 /r","V","V","SSE","","w,r","","" +"MOV moffs16, AX","MOVW AX, moffs16","movw AX, moffs16","A3 cm","V","V","","operand16","w,r","Y","16" +"MOV r/m16, Sreg","MOVW Sreg, r/m16","movw Sreg, r/m16","8C /r","V","V","","operand16","w,r","Y","16" +"MOV r/m16, imm16","MOVW imm16, r/m16","movw imm16, r/m16","C7 /0 iw","V","V","","operand16","w,r","Y","16" +"MOV r16op, imm16u","MOVW imm16u, r16op","movw imm16u, r16op","B8+rw iw","V","V","","operand16","w,r","Y","16" +"MOV AX, moffs16","MOVW moffs16, AX","movw moffs16, AX","A1 cm","V","V","","operand16","w,r","Y","16" +"MOV Sreg, r/m16","MOVW r/m16, Sreg","movw r/m16, Sreg","8E /r","V","V","","","w,r","Y","16" +"MOV r16, r/m16","MOVW r/m16, r16","movw r/m16, r16","8B /r","V","V","","operand16","w,r","Y","16" +"MOV r/m16, r16","MOVW r16, r/m16","movw r16, r/m16","89 /r","V","V","","operand16","w,r","Y","16" +"MOVSX r32, r/m16","MOVWLSX r/m16, r32","movswl r/m16, r32","0F BF /r","V","V","","operand32","w,r","Y","32" +"MOVZX r32, r/m16","MOVWLZX r/m16, r32","movzwl r/m16, r32","0F B7 /r","V","V","","operand32","w,r","Y","32" +"MOVSX r64, r/m16","MOVWQSX r/m16, r64","movswq r/m16, r64","REX.W 0F BF /r","N.S.","V","","","w,r","Y","64" +"MOVSXD r16, r/m32","MOVWQSX r/m32, r16","movsxdw r/m32, r16","63 /r","N.S.","V","","operand16","w,r","Y","16" +"MOVZX r64, r/m16","MOVWQZX r/m16, r64","movzwq r/m16, r64","REX.W 0F B7 /r","N.S.","V","","","w,r","Y","64" +"MOVZX r16, r/m16","MOVZWW r/m16, r16","movzww r/m16, r16","0F B7 /r","V","V","","operand16","w,r","Y","16" +"MOV r32/m16, Sreg","MOV{L/W} Sreg, r32/m16","mov{l/w} Sreg, r32/m16","8C /r","V","V","","operand32","w,r","Y","" +"MOV r64/m16, Sreg","MOV{Q/W} Sreg, r64/m16","mov{q/w} Sreg, r64/m16","REX.W 8C /r","N.S.","V","","","w,r","Y","" +"MPSADBW xmm1, xmm2/m128, imm8u","MPSADBW imm8u, xmm2/m128, xmm1","mpsadbw imm8u, xmm2/m128, xmm1","66 0F 3A 42 /r ib","V","V","SSE4_1","","rw,r,r","","" +"MUL r/m8","MULB r/m8","mulb r/m8","F6 /4","V","V","","","r","Y","8" +"MUL r/m8","MULB r/m8","mulb r/m8","REX F6 /4","N.E.","V","","pseudo64","r","Y","8" +"MUL r/m32","MULL r/m32","mull r/m32","F7 /4","V","V","","operand32","r","Y","32" +"MULPD xmm1, xmm2/m128","MULPD xmm2/m128, xmm1","mulpd xmm2/m128, xmm1","66 0F 59 /r","V","V","SSE2","","rw,r","","" +"MULPS xmm1, xmm2/m128","MULPS xmm2/m128, xmm1","mulps xmm2/m128, xmm1","0F 59 /r","V","V","SSE","","rw,r","","" +"MUL r/m64","MULQ r/m64","mulq r/m64","REX.W F7 /4","N.S.","V","","","r","Y","64" +"MULSD xmm1, xmm2/m64","MULSD xmm2/m64, xmm1","mulsd xmm2/m64, xmm1","F2 0F 59 /r","V","V","SSE2","","rw,r","","" +"MULSS xmm1, xmm2/m32","MULSS xmm2/m32, xmm1","mulss xmm2/m32, xmm1","F3 0F 59 /r","V","V","SSE","","rw,r","","" +"MUL r/m16","MULW r/m16","mulw r/m16","F7 /4","V","V","","operand16","r","Y","16" +"MULX r32, r32V, r/m32","MULXL r/m32, r32V, r32","mulxl r/m32, r32V, r32","VEX.NDD.128.F2.0F38.W0 F6 /r","V","V","BMI2","","w,w,r","Y","32" +"MULX r64, r64V, r/m64","MULXQ r/m64, r64V, r64","mulxq r/m64, r64V, r64","VEX.NDD.128.F2.0F38.W1 F6 /r","N.S.","V","BMI2","","w,w,r","Y","64" +"MWAIT","MWAIT","mwait","0F 01 C9","V","V","MONITOR","","","","" +"NEG r/m8","NEGB r/m8","negb r/m8","F6 /3","V","V","","","rw","Y","8" +"NEG r/m8","NEGB r/m8","negb r/m8","REX F6 /3","N.E.","V","","pseudo64","rw","Y","8" +"NEG r/m32","NEGL r/m32","negl r/m32","F7 /3","V","V","","operand32","rw","Y","32" +"NEG r/m64","NEGQ r/m64","negq r/m64","REX.W F7 /3","N.S.","V","","","rw","Y","64" +"NEG r/m16","NEGW r/m16","negw r/m16","F7 /3","V","V","","operand16","rw","Y","16" +"NOP","NOP","nop","90","V","V","","pseudo","","Y","" +"NOP","NOP","nop","90+rd","V","V","","operand32,operand64","","Y","" +"NOP","NOP","nop","90+rw","V","V","","operand16,operand64","","Y","" +"NOP","NOP","nop","F3 90+rd","V","V","","operand32","","Y","" +"NOP","NOP","nop","F3 90+rw","V","V","","operand16","","Y","" +"NOP r/m32","NOPL r/m32","nopl r/m32","0F 18 /4","V","V","","operand32","r","Y","32" +"NOP r/m32","NOPL r/m32","nopl r/m32","0F 18 /5","V","V","","operand32","r","Y","32" +"NOP r/m32","NOPL r/m32","nopl r/m32","0F 18 /6","V","V","","operand32","r","Y","32" +"NOP r/m32","NOPL r/m32","nopl r/m32","0F 18 /7","V","V","","operand32","r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 19 /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1A /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1B /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1C /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1D /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1E /r","V","V","PPRO","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1E /r","V","V","","operand32","r,r","Y","32" +"NOP r/m32, r32","NOPL r32, r/m32","nopl r32, r/m32","0F 1F /r","V","V","","operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","0F 0D /r","V","V","PRFCHW","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","0F 1A /r","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","0F 1B /r","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","66 0F 1E /r","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F2 0F 1E /r","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1B /r","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /0","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /1","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /2","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /3","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /4","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /5","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E /6","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E F8","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E F9","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FA","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FB","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FC","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FD","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FE","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32, r32","NOPL r32, rmr32","nopl r32, rmr32","F3 0F 1E FF","V","V","PPRO","modrm_regonly,operand32","r,r","Y","32" +"NOP rmr32","NOPL rmr32","nopl rmr32","0F 18 /0","V","V","","modrm_regonly,operand32","r","Y","32" +"NOP rmr32","NOPL rmr32","nopl rmr32","0F 18 /1","V","V","","modrm_regonly,operand32","r","Y","32" +"NOP rmr32","NOPL rmr32","nopl rmr32","0F 18 /2","V","V","","modrm_regonly,operand32","r","Y","32" +"NOP rmr32","NOPL rmr32","nopl rmr32","0F 18 /3","V","V","","modrm_regonly,operand32","r","Y","32" +"NOP r/m64","NOPQ r/m64","nopq r/m64","REX.W 0F 18 /4","N.S.","V","","","r","Y","64" +"NOP r/m64","NOPQ r/m64","nopq r/m64","REX.W 0F 18 /5","N.S.","V","","","r","Y","64" +"NOP r/m64","NOPQ r/m64","nopq r/m64","REX.W 0F 18 /6","N.S.","V","","","r","Y","64" +"NOP r/m64","NOPQ r/m64","nopq r/m64","REX.W 0F 18 /7","N.S.","V","","","r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 19 /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1A /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1B /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1C /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1D /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1E /r","N.S.","V","","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1E /r","N.S.","V","PPRO","","r,r","Y","64" +"NOP r/m64, r64","NOPQ r64, r/m64","nopq r64, r/m64","REX.W 0F 1F /r","N.S.","V","","","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","66 REX.W 0F 1E /r","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F2 REX.W 0F 1E /r","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1B /r","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /0","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /1","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /2","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /3","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /4","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /5","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E /6","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E F8","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E F9","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FA","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FB","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FC","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FD","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FE","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","F3 REX.W 0F 1E FF","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","REX.W 0F 0D /r","N.S.","V","PRFCHW","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","REX.W 0F 1A /r","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64, r64","NOPQ r64, rmr64","nopq r64, rmr64","REX.W 0F 1B /r","N.S.","V","PPRO","modrm_regonly","r,r","Y","64" +"NOP rmr64","NOPQ rmr64","nopq rmr64","REX.W 0F 18 /0","N.S.","V","","modrm_regonly","r","Y","64" +"NOP rmr64","NOPQ rmr64","nopq rmr64","REX.W 0F 18 /1","N.S.","V","","modrm_regonly","r","Y","64" +"NOP rmr64","NOPQ rmr64","nopq rmr64","REX.W 0F 18 /2","N.S.","V","","modrm_regonly","r","Y","64" +"NOP rmr64","NOPQ rmr64","nopq rmr64","REX.W 0F 18 /3","N.S.","V","","modrm_regonly","r","Y","64" +"NOP r/m16","NOPW r/m16","nopw r/m16","0F 18 /4","V","V","","operand16","r","Y","16" +"NOP r/m16","NOPW r/m16","nopw r/m16","0F 18 /5","V","V","","operand16","r","Y","16" +"NOP r/m16","NOPW r/m16","nopw r/m16","0F 18 /6","V","V","","operand16","r","Y","16" +"NOP r/m16","NOPW r/m16","nopw r/m16","0F 18 /7","V","V","","operand16","r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 19 /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1A /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1B /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1C /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1D /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1E /r","V","V","","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1E /r","V","V","PPRO","operand16","r,r","Y","16" +"NOP r/m16, r16","NOPW r16, r/m16","nopw r16, r/m16","0F 1F /r","V","V","","operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","0F 0D /r","V","V","PRFCHW","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","0F 1A /r","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","0F 1B /r","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","66 0F 1E /r","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F2 0F 1E /r","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1B /r","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /0","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /1","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /2","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /3","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /4","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /5","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E /6","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E F8","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E F9","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FA","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FB","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FC","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FD","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FE","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16, r16","NOPW r16, rmr16","nopw r16, rmr16","F3 0F 1E FF","V","V","PPRO","modrm_regonly,operand16","r,r","Y","16" +"NOP rmr16","NOPW rmr16","nopw rmr16","0F 18 /0","V","V","","modrm_regonly,operand16","r","Y","16" +"NOP rmr16","NOPW rmr16","nopw rmr16","0F 18 /1","V","V","","modrm_regonly,operand16","r","Y","16" +"NOP rmr16","NOPW rmr16","nopw rmr16","0F 18 /2","V","V","","modrm_regonly,operand16","r","Y","16" +"NOP rmr16","NOPW rmr16","nopw rmr16","0F 18 /3","V","V","","modrm_regonly,operand16","r","Y","16" +"NOT r/m8","NOTB r/m8","notb r/m8","F6 /2","V","V","","","rw","Y","8" +"NOT r/m8","NOTB r/m8","notb r/m8","REX F6 /2","N.E.","V","","pseudo64","rw","Y","8" +"NOT r/m32","NOTL r/m32","notl r/m32","F7 /2","V","V","","operand32","rw","Y","32" +"NOT r/m64","NOTQ r/m64","notq r/m64","REX.W F7 /2","N.S.","V","","","rw","Y","64" +"NOT r/m16","NOTW r/m16","notw r/m16","F7 /2","V","V","","operand16","rw","Y","16" +"OR r/m8, imm8","ORB imm8, r/m8","orb imm8, r/m8","80 /1 ib","V","V","","","rw,r","Y","8" +"OR r/m8, imm8","ORB imm8, r/m8","orb imm8, r/m8","82 /1 ib","V","N.S.","","","rw,r","Y","8" +"OR r/m8, imm8","ORB imm8, r/m8","orb imm8, r/m8","REX 80 /1 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"OR AL, imm8u","ORB imm8u, AL","orb imm8u, AL","0C ib","V","V","","","rw,r","Y","8" +"OR r8, r/m8","ORB r/m8, r8","orb r/m8, r8","0A /r","V","V","","","rw,r","Y","8" +"OR r8, r/m8","ORB r/m8, r8","orb r/m8, r8","REX 0A /r","N.E.","V","","pseudo64","rw,r","Y","8" +"OR r/m8, r8","ORB r8, r/m8","orb r8, r/m8","08 /r","V","V","","","rw,r","Y","8" +"OR r/m8, r8","ORB r8, r/m8","orb r8, r/m8","REX 08 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"OR EAX, imm32","ORL imm32, EAX","orl imm32, EAX","0D id","V","V","","operand32","rw,r","Y","32" +"OR r/m32, imm32","ORL imm32, r/m32","orl imm32, r/m32","81 /1 id","V","V","","operand32","rw,r","Y","32" +"OR r/m32, imm8","ORL imm8, r/m32","orl imm8, r/m32","83 /1 ib","V","V","","operand32","rw,r","Y","32" +"OR r32, r/m32","ORL r/m32, r32","orl r/m32, r32","0B /r","V","V","","operand32","rw,r","Y","32" +"OR r/m32, r32","ORL r32, r/m32","orl r32, r/m32","09 /r","V","V","","operand32","rw,r","Y","32" +"ORPD xmm1, xmm2/m128","ORPD xmm2/m128, xmm1","orpd xmm2/m128, xmm1","66 0F 56 /r","V","V","SSE2","","rw,r","","" +"ORPS xmm1, xmm2/m128","ORPS xmm2/m128, xmm1","orps xmm2/m128, xmm1","0F 56 /r","V","V","SSE","","rw,r","","" +"OR RAX, imm32","ORQ imm32, RAX","orq imm32, RAX","REX.W 0D id","N.S.","V","","","rw,r","Y","64" +"OR r/m64, imm32","ORQ imm32, r/m64","orq imm32, r/m64","REX.W 81 /1 id","N.S.","V","","","rw,r","Y","64" +"OR r/m64, imm8","ORQ imm8, r/m64","orq imm8, r/m64","REX.W 83 /1 ib","N.S.","V","","","rw,r","Y","64" +"OR r64, r/m64","ORQ r/m64, r64","orq r/m64, r64","REX.W 0B /r","N.S.","V","","","rw,r","Y","64" +"OR r/m64, r64","ORQ r64, r/m64","orq r64, r/m64","REX.W 09 /r","N.S.","V","","","rw,r","Y","64" +"OR AX, imm16","ORW imm16, AX","orw imm16, AX","0D iw","V","V","","operand16","rw,r","Y","16" +"OR r/m16, imm16","ORW imm16, r/m16","orw imm16, r/m16","81 /1 iw","V","V","","operand16","rw,r","Y","16" +"OR r/m16, imm8","ORW imm8, r/m16","orw imm8, r/m16","83 /1 ib","V","V","","operand16","rw,r","Y","16" +"OR r16, r/m16","ORW r/m16, r16","orw r/m16, r16","0B /r","V","V","","operand16","rw,r","Y","16" +"OR r/m16, r16","ORW r16, r/m16","orw r16, r/m16","09 /r","V","V","","operand16","rw,r","Y","16" +"OUT DX, AL","OUTB AL, DX","outb AL, DX","EE","V","V","","","r,r","Y","8" +"OUT imm8u, AL","OUTB AL, imm8u","outb AL, imm8u","E6 ib","V","V","","","r,r","Y","8" +"OUT DX, EAX","OUTL EAX, DX","outl EAX, DX","EF","V","V","","operand32,operand64","r,r","Y","32" +"OUT imm8u, EAX","OUTL EAX, imm8u","outl EAX, imm8u","E7 ib","V","V","","operand32,operand64","r,r","Y","32" +"OUTSB","OUTSB","outsb","6E","V","V","","","","","" +"OUTSD","OUTSL","outsl","6F","V","V","","operand32,operand64","","","" +"OUTSW","OUTSW","outsw","6F","V","V","","operand16","","","" +"OUT DX, AX","OUTW AX, DX","outw AX, DX","EF","V","V","","operand16","r,r","Y","16" +"OUT imm8u, AX","OUTW AX, imm8u","outw AX, imm8u","E7 ib","V","V","","operand16","r,r","Y","16" +"PABSB mm1, mm2/m64","PABSB mm2/m64, mm1","pabsb mm2/m64, mm1","0F 38 1C /r","V","V","SSSE3","","w,r","","" +"PABSB xmm1, xmm2/m128","PABSB xmm2/m128, xmm1","pabsb xmm2/m128, xmm1","66 0F 38 1C /r","V","V","SSSE3","","w,r","","" +"PABSD mm1, mm2/m64","PABSD mm2/m64, mm1","pabsd mm2/m64, mm1","0F 38 1E /r","V","V","SSSE3","","w,r","","" +"PABSD xmm1, xmm2/m128","PABSD xmm2/m128, xmm1","pabsd xmm2/m128, xmm1","66 0F 38 1E /r","V","V","SSSE3","","w,r","","" +"PABSW mm1, mm2/m64","PABSW mm2/m64, mm1","pabsw mm2/m64, mm1","0F 38 1D /r","V","V","SSSE3","","w,r","","" +"PABSW xmm1, xmm2/m128","PABSW xmm2/m128, xmm1","pabsw xmm2/m128, xmm1","66 0F 38 1D /r","V","V","SSSE3","","w,r","","" +"PACKSSDW mm1, mm2/m64","PACKSSLW mm2/m64, mm1","packssdw mm2/m64, mm1","0F 6B /r","V","V","MMX","","rw,r","","" +"PACKSSDW xmm1, xmm2/m128","PACKSSLW xmm2/m128, xmm1","packssdw xmm2/m128, xmm1","66 0F 6B /r","V","V","SSE2","","rw,r","","" +"PACKSSWB mm1, mm2/m64","PACKSSWB mm2/m64, mm1","packsswb mm2/m64, mm1","0F 63 /r","V","V","MMX","","rw,r","","" +"PACKSSWB xmm1, xmm2/m128","PACKSSWB xmm2/m128, xmm1","packsswb xmm2/m128, xmm1","66 0F 63 /r","V","V","SSE2","","rw,r","","" +"PACKUSDW xmm1, xmm2/m128","PACKUSDW xmm2/m128, xmm1","packusdw xmm2/m128, xmm1","66 0F 38 2B /r","V","V","SSE4_1","","rw,r","","" +"PACKUSWB mm1, mm2/m64","PACKUSWB mm2/m64, mm1","packuswb mm2/m64, mm1","0F 67 /r","V","V","MMX","","rw,r","","" +"PACKUSWB xmm1, xmm2/m128","PACKUSWB xmm2/m128, xmm1","packuswb xmm2/m128, xmm1","66 0F 67 /r","V","V","SSE2","","rw,r","","" +"PADDB mm1, mm2/m64","PADDB mm2/m64, mm1","paddb mm2/m64, mm1","0F FC /r","V","V","MMX","","rw,r","","" +"PADDB xmm1, xmm2/m128","PADDB xmm2/m128, xmm1","paddb xmm2/m128, xmm1","66 0F FC /r","V","V","SSE2","","rw,r","","" +"PADDD mm1, mm2/m64","PADDL mm2/m64, mm1","paddd mm2/m64, mm1","0F FE /r","V","V","MMX","","rw,r","","" +"PADDD xmm1, xmm2/m128","PADDL xmm2/m128, xmm1","paddd xmm2/m128, xmm1","66 0F FE /r","V","V","SSE2","","rw,r","","" +"PADDQ mm1, mm2/m64","PADDQ mm2/m64, mm1","paddq mm2/m64, mm1","0F D4 /r","V","V","SSE2","","rw,r","","" +"PADDQ xmm1, xmm2/m128","PADDQ xmm2/m128, xmm1","paddq xmm2/m128, xmm1","66 0F D4 /r","V","V","SSE2","","rw,r","","" +"PADDSB mm1, mm2/m64","PADDSB mm2/m64, mm1","paddsb mm2/m64, mm1","0F EC /r","V","V","MMX","","rw,r","","" +"PADDSB xmm1, xmm2/m128","PADDSB xmm2/m128, xmm1","paddsb xmm2/m128, xmm1","66 0F EC /r","V","V","SSE2","","rw,r","","" +"PADDSW mm1, mm2/m64","PADDSW mm2/m64, mm1","paddsw mm2/m64, mm1","0F ED /r","V","V","MMX","","rw,r","","" +"PADDSW xmm1, xmm2/m128","PADDSW xmm2/m128, xmm1","paddsw xmm2/m128, xmm1","66 0F ED /r","V","V","SSE2","","rw,r","","" +"PADDUSB mm1, mm2/m64","PADDUSB mm2/m64, mm1","paddusb mm2/m64, mm1","0F DC /r","V","V","MMX","","rw,r","","" +"PADDUSB xmm1, xmm2/m128","PADDUSB xmm2/m128, xmm1","paddusb xmm2/m128, xmm1","66 0F DC /r","V","V","SSE2","","rw,r","","" +"PADDUSW mm1, mm2/m64","PADDUSW mm2/m64, mm1","paddusw mm2/m64, mm1","0F DD /r","V","V","MMX","","rw,r","","" +"PADDUSW xmm1, xmm2/m128","PADDUSW xmm2/m128, xmm1","paddusw xmm2/m128, xmm1","66 0F DD /r","V","V","SSE2","","rw,r","","" +"PADDW mm1, mm2/m64","PADDW mm2/m64, mm1","paddw mm2/m64, mm1","0F FD /r","V","V","MMX","","rw,r","","" +"PADDW xmm1, xmm2/m128","PADDW xmm2/m128, xmm1","paddw xmm2/m128, xmm1","66 0F FD /r","V","V","SSE2","","rw,r","","" +"PALIGNR mm1, mm2/m64, imm8u","PALIGNR imm8u, mm2/m64, mm1","palignr imm8u, mm2/m64, mm1","0F 3A 0F /r ib","V","V","SSSE3","","rw,r,r","","" +"PALIGNR xmm1, xmm2/m128, imm8u","PALIGNR imm8u, xmm2/m128, xmm1","palignr imm8u, xmm2/m128, xmm1","66 0F 3A 0F /r ib","V","V","SSSE3","","rw,r,r","","" +"PAND mm1, mm2/m64","PAND mm2/m64, mm1","pand mm2/m64, mm1","0F DB /r","V","V","MMX","","rw,r","","" +"PAND xmm1, xmm2/m128","PAND xmm2/m128, xmm1","pand xmm2/m128, xmm1","66 0F DB /r","V","V","SSE2","","rw,r","","" +"PANDN mm1, mm2/m64","PANDN mm2/m64, mm1","pandn mm2/m64, mm1","0F DF /r","V","V","MMX","","rw,r","","" +"PANDN xmm1, xmm2/m128","PANDN xmm2/m128, xmm1","pandn xmm2/m128, xmm1","66 0F DF /r","V","V","SSE2","","rw,r","","" +"PAUSE","PAUSE","pause","F3 90","V","V","","pseudo","","","" +"PAUSE","PAUSE","pause","F3 90+rd","V","V","","operand32","","Y","" +"PAUSE","PAUSE","pause","F3 90+rw","V","V","","operand16,operand64","","Y","" +"PAVGB mm1, mm2/m64","PAVGB mm2/m64, mm1","pavgb mm2/m64, mm1","0F E0 /r","V","V","MMX","","rw,r","","" +"PAVGB xmm1, xmm2/m128","PAVGB xmm2/m128, xmm1","pavgb xmm2/m128, xmm1","66 0F E0 /r","V","V","SSE2","","rw,r","","" +"PAVGUSB mm1, mm2/m64","PAVGUSB mm2/m64, mm1","pavgusb mm2/m64, mm1","0F 0F BF /r","V","V","3DNOW","amd","rw,r","","" +"PAVGW mm1, mm2/m64","PAVGW mm2/m64, mm1","pavgw mm2/m64, mm1","0F E3 /r","V","V","MMX","","rw,r","","" +"PAVGW xmm1, xmm2/m128","PAVGW xmm2/m128, xmm1","pavgw xmm2/m128, xmm1","66 0F E3 /r","V","V","SSE2","","rw,r","","" +"PBLENDVB xmm1, xmm2/m128, ","PBLENDVB , xmm2/m128, xmm1","pblendvb , xmm2/m128, xmm1","66 0F 38 10 /r","V","V","SSE4_1","","rw,r,r","","" +"PBLENDW xmm1, xmm2/m128, imm8u","PBLENDW imm8u, xmm2/m128, xmm1","pblendw imm8u, xmm2/m128, xmm1","66 0F 3A 0E /r ib","V","V","SSE4_1","","rw,r,r","","" +"PCLMULQDQ xmm1, xmm2/m128, imm8u","PCLMULQDQ imm8u, xmm2/m128, xmm1","pclmulqdq imm8u, xmm2/m128, xmm1","66 0F 3A 44 /r ib","V","V","PCLMULQDQ","","rw,r,r","","" +"PCMPEQB mm1, mm2/m64","PCMPEQB mm2/m64, mm1","pcmpeqb mm2/m64, mm1","0F 74 /r","V","V","MMX","","rw,r","","" +"PCMPEQB xmm1, xmm2/m128","PCMPEQB xmm2/m128, xmm1","pcmpeqb xmm2/m128, xmm1","66 0F 74 /r","V","V","SSE2","","rw,r","","" +"PCMPEQD mm1, mm2/m64","PCMPEQL mm2/m64, mm1","pcmpeqd mm2/m64, mm1","0F 76 /r","V","V","MMX","","rw,r","","" +"PCMPEQD xmm1, xmm2/m128","PCMPEQL xmm2/m128, xmm1","pcmpeqd xmm2/m128, xmm1","66 0F 76 /r","V","V","SSE2","","rw,r","","" +"PCMPEQQ xmm1, xmm2/m128","PCMPEQQ xmm2/m128, xmm1","pcmpeqq xmm2/m128, xmm1","66 0F 38 29 /r","V","V","SSE4_1","","rw,r","","" +"PCMPEQW mm1, mm2/m64","PCMPEQW mm2/m64, mm1","pcmpeqw mm2/m64, mm1","0F 75 /r","V","V","MMX","","rw,r","","" +"PCMPEQW xmm1, xmm2/m128","PCMPEQW xmm2/m128, xmm1","pcmpeqw xmm2/m128, xmm1","66 0F 75 /r","V","V","SSE2","","rw,r","","" +"PCMPESTRI xmm1, xmm2/m128, imm8u","PCMPESTRI imm8u, xmm2/m128, xmm1","pcmpestri imm8u, xmm2/m128, xmm1","66 0F 3A 61 /r ib","V","V","SSE4_2","","r,r,r","","" +"PCMPESTRM xmm1, xmm2/m128, imm8u","PCMPESTRM imm8u, xmm2/m128, xmm1","pcmpestrm imm8u, xmm2/m128, xmm1","66 0F 3A 60 /r ib","V","V","SSE4_2","","r,r,r","","" +"PCMPGTB mm1, mm2/m64","PCMPGTB mm2/m64, mm1","pcmpgtb mm2/m64, mm1","0F 64 /r","V","V","MMX","","rw,r","","" +"PCMPGTB xmm1, xmm2/m128","PCMPGTB xmm2/m128, xmm1","pcmpgtb xmm2/m128, xmm1","66 0F 64 /r","V","V","SSE2","","rw,r","","" +"PCMPGTD mm1, mm2/m64","PCMPGTL mm2/m64, mm1","pcmpgtd mm2/m64, mm1","0F 66 /r","V","V","MMX","","rw,r","","" +"PCMPGTD xmm1, xmm2/m128","PCMPGTL xmm2/m128, xmm1","pcmpgtd xmm2/m128, xmm1","66 0F 66 /r","V","V","SSE2","","rw,r","","" +"PCMPGTQ xmm1, xmm2/m128","PCMPGTQ xmm2/m128, xmm1","pcmpgtq xmm2/m128, xmm1","66 0F 38 37 /r","V","V","SSE4_2","","rw,r","","" +"PCMPGTW mm1, mm2/m64","PCMPGTW mm2/m64, mm1","pcmpgtw mm2/m64, mm1","0F 65 /r","V","V","MMX","","rw,r","","" +"PCMPGTW xmm1, xmm2/m128","PCMPGTW xmm2/m128, xmm1","pcmpgtw xmm2/m128, xmm1","66 0F 65 /r","V","V","SSE2","","rw,r","","" +"PCMPISTRI xmm1, xmm2/m128, imm8u","PCMPISTRI imm8u, xmm2/m128, xmm1","pcmpistri imm8u, xmm2/m128, xmm1","66 0F 3A 63 /r ib","V","V","SSE4_2","","r,r,r","","" +"PCMPISTRM xmm1, xmm2/m128, imm8u","PCMPISTRM imm8u, xmm2/m128, xmm1","pcmpistrm imm8u, xmm2/m128, xmm1","66 0F 3A 62 /r ib","V","V","SSE4_2","","r,r,r","","" +"PDEP r32, r32V, r/m32","PDEPL r/m32, r32V, r32","pdepl r/m32, r32V, r32","VEX.DDS.128.F2.0F38.W0 F5 /r","V","V","BMI2","","rw,r,r","Y","32" +"PDEP r64, r64V, r/m64","PDEPQ r/m64, r64V, r64","pdepq r/m64, r64V, r64","VEX.DDS.128.F2.0F38.W1 F5 /r","N.S.","V","BMI2","","rw,r,r","Y","64" +"PEXT r32, r32V, r/m32","PEXTL r/m32, r32V, r32","pextl r/m32, r32V, r32","VEX.DDS.128.F3.0F38.W0 F5 /r","V","V","BMI2","","rw,r,r","Y","32" +"PEXT r64, r64V, r/m64","PEXTQ r/m64, r64V, r64","pextq r/m64, r64V, r64","VEX.DDS.128.F3.0F38.W1 F5 /r","N.S.","V","BMI2","","rw,r,r","Y","64" +"PEXTRB r32/m8, xmm1, imm8u","PEXTRB imm8u, xmm1, r32/m8","pextrb imm8u, xmm1, r32/m8","66 0F 3A 14 /r ib","V","V","SSE4_1","","w,r,r","","" +"PEXTRD r/m32, xmm1, imm8u","PEXTRD imm8u, xmm1, r/m32","pextrd imm8u, xmm1, r/m32","66 0F 3A 16 /r ib","V","V","SSE4_1","operand16,operand32","w,r,r","","" +"PEXTRQ r/m64, xmm1, imm8u","PEXTRQ imm8u, xmm1, r/m64","pextrq imm8u, xmm1, r/m64","66 REX.W 0F 3A 16 /r ib","N.S.","V","SSE4_1","","w,r,r","","" +"PEXTRW r32, mm2, imm8u","PEXTRW imm8u, mm2, r32","pextrw imm8u, mm2, r32","0F C5 /r ib","V","V","MMX","modrm_regonly","w,r,r","","" +"PEXTRW r32/m16, xmm1, imm8u","PEXTRW imm8u, xmm1, r32/m16","pextrw imm8u, xmm1, r32/m16","66 0F 3A 15 /r ib","V","V","SSE4_1","","w,r,r","","" +"PEXTRW r32, xmm2, imm8u","PEXTRW imm8u, xmm2, r32","pextrw imm8u, xmm2, r32","66 0F C5 /r ib","V","V","SSE2","modrm_regonly","w,r,r","","" +"PF2ID mm1, mm2/m64","PF2ID mm2/m64, mm1","pf2id mm2/m64, mm1","0F 0F 1D /r","V","V","3DNOW","amd","rw,r","","" +"PF2IW mm1, mm2/m64","PF2IW mm2/m64, mm1","pf2iw mm2/m64, mm1","0F 0F 1C /r","V","V","3DNOW","amd","rw,r","","" +"PFACC mm1, mm2/m64","PFACC mm2/m64, mm1","pfacc mm2/m64, mm1","0F 0F AE /r","V","V","3DNOW","amd","rw,r","","" +"PFADD mm1, mm2/m64","PFADD mm2/m64, mm1","pfadd mm2/m64, mm1","0F 0F 9E /r","V","V","3DNOW","amd","rw,r","","" +"PFCMPEQ mm1, mm2/m64","PFCMPEQ mm2/m64, mm1","pfcmpeq mm2/m64, mm1","0F 0F B0 /r","V","V","3DNOW","amd","rw,r","","" +"PFCMPGE mm1, mm2/m64","PFCMPGE mm2/m64, mm1","pfcmpge mm2/m64, mm1","0F 0F 90 /r","V","V","3DNOW","amd","rw,r","","" +"PFCMPGT mm1, mm2/m64","PFCMPGT mm2/m64, mm1","pfcmpgt mm2/m64, mm1","0F 0F A0 /r","V","V","3DNOW","amd","rw,r","","" +"PFMAX mm1, mm2/m64","PFMAX mm2/m64, mm1","pfmax mm2/m64, mm1","0F 0F A4 /r","V","V","3DNOW","amd","rw,r","","" +"PFMIN mm1, mm2/m64","PFMIN mm2/m64, mm1","pfmin mm2/m64, mm1","0F 0F 94 /r","V","V","3DNOW","amd","rw,r","","" +"PFMUL mm1, mm2/m64","PFMUL mm2/m64, mm1","pfmul mm2/m64, mm1","0F 0F B4 /r","V","V","3DNOW","amd","rw,r","","" +"PFNACC mm1, mm2/m64","PFNACC mm2/m64, mm1","pfnacc mm2/m64, mm1","0F 0F 8A /r","V","V","3DNOW","amd","rw,r","","" +"PFPNACC mm1, mm2/m64","PFPNACC mm2/m64, mm1","pfpnacc mm2/m64, mm1","0F 0F 8E /r","V","V","3DNOW","amd","rw,r","","" +"PFRCP mm1, mm2/m64","PFRCP mm2/m64, mm1","pfrcp mm2/m64, mm1","0F 0F 96 /r","V","V","3DNOW","amd","rw,r","","" +"PFRCPIT1 mm1, mm2/m64","PFRCPIT1 mm2/m64, mm1","pfrcpit1 mm2/m64, mm1","0F 0F A6 /r","V","V","3DNOW","amd","rw,r","","" +"PFRCPIT2 mm1, mm2/m64","PFRCPIT2 mm2/m64, mm1","pfrcpit2 mm2/m64, mm1","0F 0F B6 /r","V","V","3DNOW","amd","rw,r","","" +"PFRSQIT1 mm1, mm2/m64","PFRSQIT1 mm2/m64, mm1","pfrsqit1 mm2/m64, mm1","0F 0F A7 /r","V","V","3DNOW","amd","rw,r","","" +"PFRSQRT mm1, mm2/m64","PFRSQRT mm2/m64, mm1","pfrsqrt mm2/m64, mm1","0F 0F 97 /r","V","V","3DNOW","amd","rw,r","","" +"PFSUB mm1, mm2/m64","PFSUB mm2/m64, mm1","pfsub mm2/m64, mm1","0F 0F 9A /r","V","V","3DNOW","amd","rw,r","","" +"PFSUBR mm1, mm2/m64","PFSUBR mm2/m64, mm1","pfsubr mm2/m64, mm1","0F 0F AA /r","V","V","3DNOW","amd","rw,r","","" +"PHADDD mm1, mm2/m64","PHADDD mm2/m64, mm1","phaddd mm2/m64, mm1","0F 38 02 /r","V","V","SSSE3","","rw,r","","" +"PHADDD xmm1, xmm2/m128","PHADDD xmm2/m128, xmm1","phaddd xmm2/m128, xmm1","66 0F 38 02 /r","V","V","SSSE3","","rw,r","","" +"PHADDSW mm1, mm2/m64","PHADDSW mm2/m64, mm1","phaddsw mm2/m64, mm1","0F 38 03 /r","V","V","SSSE3","","rw,r","","" +"PHADDSW xmm1, xmm2/m128","PHADDSW xmm2/m128, xmm1","phaddsw xmm2/m128, xmm1","66 0F 38 03 /r","V","V","SSSE3","","rw,r","","" +"PHADDW mm1, mm2/m64","PHADDW mm2/m64, mm1","phaddw mm2/m64, mm1","0F 38 01 /r","V","V","SSSE3","","rw,r","","" +"PHADDW xmm1, xmm2/m128","PHADDW xmm2/m128, xmm1","phaddw xmm2/m128, xmm1","66 0F 38 01 /r","V","V","SSSE3","","rw,r","","" +"PHMINPOSUW xmm1, xmm2/m128","PHMINPOSUW xmm2/m128, xmm1","phminposuw xmm2/m128, xmm1","66 0F 38 41 /r","V","V","SSE4_1","","w,r","","" +"PHSUBD mm1, mm2/m64","PHSUBD mm2/m64, mm1","phsubd mm2/m64, mm1","0F 38 06 /r","V","V","SSSE3","","rw,r","","" +"PHSUBD xmm1, xmm2/m128","PHSUBD xmm2/m128, xmm1","phsubd xmm2/m128, xmm1","66 0F 38 06 /r","V","V","SSSE3","","rw,r","","" +"PHSUBSW mm1, mm2/m64","PHSUBSW mm2/m64, mm1","phsubsw mm2/m64, mm1","0F 38 07 /r","V","V","SSSE3","","rw,r","","" +"PHSUBSW xmm1, xmm2/m128","PHSUBSW xmm2/m128, xmm1","phsubsw xmm2/m128, xmm1","66 0F 38 07 /r","V","V","SSSE3","","rw,r","","" +"PHSUBW mm1, mm2/m64","PHSUBW mm2/m64, mm1","phsubw mm2/m64, mm1","0F 38 05 /r","V","V","SSSE3","","rw,r","","" +"PHSUBW xmm1, xmm2/m128","PHSUBW xmm2/m128, xmm1","phsubw xmm2/m128, xmm1","66 0F 38 05 /r","V","V","SSSE3","","rw,r","","" +"PI2FD mm1, mm2/m64","PI2FD mm2/m64, mm1","pi2fd mm2/m64, mm1","0F 0F 0D /r","V","V","3DNOW","amd","rw,r","","" +"PI2FW mm1, mm2/m64","PI2FW mm2/m64, mm1","pi2fw mm2/m64, mm1","0F 0F 0C /r","V","V","3DNOW","amd","rw,r","","" +"PINSRB xmm1, r32/m8, imm8u","PINSRB imm8u, r32/m8, xmm1","pinsrb imm8u, r32/m8, xmm1","66 0F 3A 20 /r ib","V","V","SSE4_1","","rw,r,r","","" +"PINSRD xmm1, r/m32, imm8u","PINSRD imm8u, r/m32, xmm1","pinsrd imm8u, r/m32, xmm1","66 0F 3A 22 /r ib","V","V","SSE4_1","operand16,operand32","rw,r,r","","" +"PINSRQ xmm1, r/m64, imm8u","PINSRQ imm8u, r/m64, xmm1","pinsrq imm8u, r/m64, xmm1","66 REX.W 0F 3A 22 /r ib","N.S.","V","SSE4_1","","rw,r,r","","" +"PINSRW mm1, r32/m16, imm8u","PINSRW imm8u, r32/m16, mm1","pinsrw imm8u, r32/m16, mm1","0F C4 /r ib","V","V","MMX","","rw,r,r","","" +"PINSRW xmm1, r32/m16, imm8u","PINSRW imm8u, r32/m16, xmm1","pinsrw imm8u, r32/m16, xmm1","66 0F C4 /r ib","V","V","SSE2","","rw,r,r","","" +"PMADDUBSW mm1, mm2/m64","PMADDUBSW mm2/m64, mm1","pmaddubsw mm2/m64, mm1","0F 38 04 /r","V","V","SSSE3","","rw,r","","" +"PMADDUBSW xmm1, xmm2/m128","PMADDUBSW xmm2/m128, xmm1","pmaddubsw xmm2/m128, xmm1","66 0F 38 04 /r","V","V","SSSE3","","rw,r","","" +"PMADDWD mm1, mm2/m64","PMADDWL mm2/m64, mm1","pmaddwd mm2/m64, mm1","0F F5 /r","V","V","MMX","","rw,r","","" +"PMADDWD xmm1, xmm2/m128","PMADDWL xmm2/m128, xmm1","pmaddwd xmm2/m128, xmm1","66 0F F5 /r","V","V","SSE2","","rw,r","","" +"PMAXSB xmm1, xmm2/m128","PMAXSB xmm2/m128, xmm1","pmaxsb xmm2/m128, xmm1","66 0F 38 3C /r","V","V","SSE4_1","","rw,r","","" +"PMAXSD xmm1, xmm2/m128","PMAXSD xmm2/m128, xmm1","pmaxsd xmm2/m128, xmm1","66 0F 38 3D /r","V","V","SSE4_1","","rw,r","","" +"PMAXSW mm1, mm2/m64","PMAXSW mm2/m64, mm1","pmaxsw mm2/m64, mm1","0F EE /r","V","V","MMX","","rw,r","","" +"PMAXSW xmm1, xmm2/m128","PMAXSW xmm2/m128, xmm1","pmaxsw xmm2/m128, xmm1","66 0F EE /r","V","V","SSE2","","rw,r","","" +"PMAXUB mm1, mm2/m64","PMAXUB mm2/m64, mm1","pmaxub mm2/m64, mm1","0F DE /r","V","V","MMX","","rw,r","","" +"PMAXUB xmm1, xmm2/m128","PMAXUB xmm2/m128, xmm1","pmaxub xmm2/m128, xmm1","66 0F DE /r","V","V","SSE2","","rw,r","","" +"PMAXUD xmm1, xmm2/m128","PMAXUD xmm2/m128, xmm1","pmaxud xmm2/m128, xmm1","66 0F 38 3F /r","V","V","SSE4_1","","rw,r","","" +"PMAXUW xmm1, xmm2/m128","PMAXUW xmm2/m128, xmm1","pmaxuw xmm2/m128, xmm1","66 0F 38 3E /r","V","V","SSE4_1","","rw,r","","" +"PMINSB xmm1, xmm2/m128","PMINSB xmm2/m128, xmm1","pminsb xmm2/m128, xmm1","66 0F 38 38 /r","V","V","SSE4_1","","rw,r","","" +"PMINSD xmm1, xmm2/m128","PMINSD xmm2/m128, xmm1","pminsd xmm2/m128, xmm1","66 0F 38 39 /r","V","V","SSE4_1","","rw,r","","" +"PMINSW mm1, mm2/m64","PMINSW mm2/m64, mm1","pminsw mm2/m64, mm1","0F EA /r","V","V","MMX","","rw,r","","" +"PMINSW xmm1, xmm2/m128","PMINSW xmm2/m128, xmm1","pminsw xmm2/m128, xmm1","66 0F EA /r","V","V","SSE2","","rw,r","","" +"PMINUB mm1, mm2/m64","PMINUB mm2/m64, mm1","pminub mm2/m64, mm1","0F DA /r","V","V","MMX","","rw,r","","" +"PMINUB xmm1, xmm2/m128","PMINUB xmm2/m128, xmm1","pminub xmm2/m128, xmm1","66 0F DA /r","V","V","SSE2","","rw,r","","" +"PMINUD xmm1, xmm2/m128","PMINUD xmm2/m128, xmm1","pminud xmm2/m128, xmm1","66 0F 38 3B /r","V","V","SSE4_1","","rw,r","","" +"PMINUW xmm1, xmm2/m128","PMINUW xmm2/m128, xmm1","pminuw xmm2/m128, xmm1","66 0F 38 3A /r","V","V","SSE4_1","","rw,r","","" +"PMOVMSKB r32, mm2","PMOVMSKB mm2, r32","pmovmskb mm2, r32","0F D7 /r","V","V","SSE","modrm_regonly","w,r","","" +"PMOVMSKB r32, xmm2","PMOVMSKB xmm2, r32","pmovmskb xmm2, r32","66 0F D7 /r","V","V","SSE2","modrm_regonly","w,r","","" +"PMOVSXBD xmm1, xmm2/m32","PMOVSXBD xmm2/m32, xmm1","pmovsxbd xmm2/m32, xmm1","66 0F 38 21 /r","V","V","SSE4_1","","w,r","","" +"PMOVSXBQ xmm1, xmm2/m16","PMOVSXBQ xmm2/m16, xmm1","pmovsxbq xmm2/m16, xmm1","66 0F 38 22 /r","V","V","SSE4_1","","w,r","","" +"PMOVSXBW xmm1, xmm2/m64","PMOVSXBW xmm2/m64, xmm1","pmovsxbw xmm2/m64, xmm1","66 0F 38 20 /r","V","V","SSE4_1","","w,r","","" +"PMOVSXDQ xmm1, xmm2/m64","PMOVSXDQ xmm2/m64, xmm1","pmovsxdq xmm2/m64, xmm1","66 0F 38 25 /r","V","V","SSE4_1","","w,r","","" +"PMOVSXWD xmm1, xmm2/m64","PMOVSXWD xmm2/m64, xmm1","pmovsxwd xmm2/m64, xmm1","66 0F 38 23 /r","V","V","SSE4_1","","w,r","","" +"PMOVSXWQ xmm1, xmm2/m32","PMOVSXWQ xmm2/m32, xmm1","pmovsxwq xmm2/m32, xmm1","66 0F 38 24 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXBD xmm1, xmm2/m32","PMOVZXBD xmm2/m32, xmm1","pmovzxbd xmm2/m32, xmm1","66 0F 38 31 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXBQ xmm1, xmm2/m16","PMOVZXBQ xmm2/m16, xmm1","pmovzxbq xmm2/m16, xmm1","66 0F 38 32 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXBW xmm1, xmm2/m64","PMOVZXBW xmm2/m64, xmm1","pmovzxbw xmm2/m64, xmm1","66 0F 38 30 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXDQ xmm1, xmm2/m64","PMOVZXDQ xmm2/m64, xmm1","pmovzxdq xmm2/m64, xmm1","66 0F 38 35 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXWD xmm1, xmm2/m64","PMOVZXWD xmm2/m64, xmm1","pmovzxwd xmm2/m64, xmm1","66 0F 38 33 /r","V","V","SSE4_1","","w,r","","" +"PMOVZXWQ xmm1, xmm2/m32","PMOVZXWQ xmm2/m32, xmm1","pmovzxwq xmm2/m32, xmm1","66 0F 38 34 /r","V","V","SSE4_1","","w,r","","" +"PMULDQ xmm1, xmm2/m128","PMULDQ xmm2/m128, xmm1","pmuldq xmm2/m128, xmm1","66 0F 38 28 /r","V","V","SSE4_1","","rw,r","","" +"PMULHRSW mm1, mm2/m64","PMULHRSW mm2/m64, mm1","pmulhrsw mm2/m64, mm1","0F 38 0B /r","V","V","SSSE3","","rw,r","","" +"PMULHRSW xmm1, xmm2/m128","PMULHRSW xmm2/m128, xmm1","pmulhrsw xmm2/m128, xmm1","66 0F 38 0B /r","V","V","SSSE3","","rw,r","","" +"PMULHRW mm1, mm2/m64","PMULHRW mm2/m64, mm1","pmulhrw mm2/m64, mm1","0F 0F B7 /r","V","V","3DNOW","amd","rw,r","","" +"PMULHUW mm1, mm2/m64","PMULHUW mm2/m64, mm1","pmulhuw mm2/m64, mm1","0F E4 /r","V","V","MMX","","rw,r","","" +"PMULHUW xmm1, xmm2/m128","PMULHUW xmm2/m128, xmm1","pmulhuw xmm2/m128, xmm1","66 0F E4 /r","V","V","SSE2","","rw,r","","" +"PMULHW mm1, mm2/m64","PMULHW mm2/m64, mm1","pmulhw mm2/m64, mm1","0F E5 /r","V","V","MMX","","rw,r","","" +"PMULHW xmm1, xmm2/m128","PMULHW xmm2/m128, xmm1","pmulhw xmm2/m128, xmm1","66 0F E5 /r","V","V","SSE2","","rw,r","","" +"PMULLD xmm1, xmm2/m128","PMULLD xmm2/m128, xmm1","pmulld xmm2/m128, xmm1","66 0F 38 40 /r","V","V","SSE4_1","","rw,r","","" +"PMULLW mm1, mm2/m64","PMULLW mm2/m64, mm1","pmullw mm2/m64, mm1","0F D5 /r","V","V","MMX","","rw,r","","" +"PMULLW xmm1, xmm2/m128","PMULLW xmm2/m128, xmm1","pmullw xmm2/m128, xmm1","66 0F D5 /r","V","V","SSE2","","rw,r","","" +"PMULUDQ mm1, mm2/m64","PMULULQ mm2/m64, mm1","pmuludq mm2/m64, mm1","0F F4 /r","V","V","SSE2","","rw,r","","" +"PMULUDQ xmm1, xmm2/m128","PMULULQ xmm2/m128, xmm1","pmuludq xmm2/m128, xmm1","66 0F F4 /r","V","V","SSE2","","rw,r","","" +"POPAD","POPAL","popal","61","V","N.S.","","operand32","","","" +"POPA","POPAW","popaw","61","V","N.S.","","operand16","","","" +"POPCNT r32, r/m32","POPCNTL r/m32, r32","popcntl r/m32, r32","F3 0F B8 /r","V","V","POPCNT","operand32","w,r","Y","32" +"POPCNT r64, r/m64","POPCNTQ r/m64, r64","popcntq r/m64, r64","F3 REX.W 0F B8 /r","N.S.","V","POPCNT","","w,r","Y","64" +"POPCNT r16, r/m16","POPCNTW r/m16, r16","popcntw r/m16, r16","F3 0F B8 /r","V","V","POPCNT","operand16","w,r","Y","16" +"POPFD","POPFL","popfl","9D","V","N.S.","","operand32","","","" +"POPFQ","POPFQ","popfq","9D","N.S.","V","","default64","","","" +"POPF","POPFW","popfw","9D","V","V","","operand16","","","" +"POP r/m32","POPL r/m32","popl r/m32","8F /0","V","N.S.","","operand32","w","Y","32" +"POP r32op","POPL r32op","popl r32op","58+rd","V","N.S.","","operand32","w","Y","32" +"POP r/m64","POPQ r/m64","popq r/m64","8F /0","N.S.","V","","default64","w","Y","64" +"POP r64op","POPQ r64op","popq r64op","58+ro","N.S.","V","","default64","w","Y","64" +"POP r/m16","POPW r/m16","popw r/m16","8F /0","V","V","","operand16","w","Y","16" +"POP r16op","POPW r16op","popw r16op","58+rw","V","V","","operand16","w","Y","16" +"POP DS","POPW/POPL/POPQ DS","popw/popl/popq DS","1F","V","N.S.","","","w","Y","" +"POP ES","POPW/POPL/POPQ ES","popw/popl/popq ES","07","V","N.S.","","","w","Y","" +"POP FS","POPW/POPL/POPQ FS","popw/popl/popq FS","0F A1","N.S.","V","","default64","w","Y","" +"POP FS","POPW/POPL/POPQ FS","popw/popl/popq FS","0F A1","V","N.S.","","operand32","w","Y","" +"POP FS","POPW/POPL/POPQ FS","popw/popl/popq FS","0F A1","V","V","","operand16","w","Y","" +"POP GS","POPW/POPL/POPQ GS","popw/popl/popq GS","0F A9","N.S.","V","","default64","w","Y","" +"POP GS","POPW/POPL/POPQ GS","popw/popl/popq GS","0F A9","V","V","","operand16","w","Y","" +"POP GS","POPW/POPL/POPQ GS","popw/popl/popq GS","0F A9","V","N.S.","","operand32","w","Y","" +"POP SS","POPW/POPL/POPQ SS","popw/popl/popq SS","17","V","N.S.","","","w","Y","" +"POR mm1, mm2/m64","POR mm2/m64, mm1","por mm2/m64, mm1","0F EB /r","V","V","MMX","","rw,r","","" +"POR xmm1, xmm2/m128","POR xmm2/m128, xmm1","por xmm2/m128, xmm1","66 0F EB /r","V","V","SSE2","","rw,r","","" +"PREFETCHNTA m8","PREFETCHNTA m8","prefetchnta m8","0F 18 /0","V","V","","modrm_memonly","r","","" +"PREFETCHT0 m8","PREFETCHT0 m8","prefetcht0 m8","0F 18 /1","V","V","","modrm_memonly","r","","" +"PREFETCHT1 m8","PREFETCHT1 m8","prefetcht1 m8","0F 18 /2","V","V","","modrm_memonly","r","","" +"PREFETCHT2 m8","PREFETCHT2 m8","prefetcht2 m8","0F 18 /3","V","V","","modrm_memonly","r","","" +"PREFETCHW m8","PREFETCHW m8","prefetchw m8","0F 0D /1","V","V","PRFCHW","modrm_memonly","r","","" +"PREFETCHWT1 m8","PREFETCHWT1 m8","prefetchwt1 m8","0F 0D /2","V","V","PREFETCHWT1","modrm_memonly","r","","" +"PREFETCHW_ALIAS m8","PREFETCHW_ALIAS m8","prefetchw_alias m8","0F 0D /3","V","V","PRFCHW","modrm_memonly","r","","" +"PREFETCH_EXCLUSIVE m8","PREFETCH_EXCLUSIVE m8","prefetch_exclusive m8","0F 0D /0","V","V","PRFCHW","modrm_memonly","r","","" +"PREFETCH_RESERVED m8","PREFETCH_RESERVED m8","prefetch_reserved m8","0F 0D /2","V","V","PRFCHW","modrm_memonly","r","Y","" +"PREFETCH_RESERVED m8","PREFETCH_RESERVED m8","prefetch_reserved m8","0F 0D /4","V","V","PRFCHW","modrm_memonly","r","Y","" +"PREFETCH_RESERVED m8","PREFETCH_RESERVED m8","prefetch_reserved m8","0F 0D /5","V","V","PRFCHW","modrm_memonly","r","Y","" +"PREFETCH_RESERVED m8","PREFETCH_RESERVED m8","prefetch_reserved m8","0F 0D /6","V","V","PRFCHW","modrm_memonly","r","Y","" +"PREFETCH_RESERVED m8","PREFETCH_RESERVED m8","prefetch_reserved m8","0F 0D /7","V","V","PRFCHW","modrm_memonly","r","Y","" +"PSADBW mm1, mm2/m64","PSADBW mm2/m64, mm1","psadbw mm2/m64, mm1","0F F6 /r","V","V","MMX","","rw,r","","" +"PSADBW xmm1, xmm2/m128","PSADBW xmm2/m128, xmm1","psadbw xmm2/m128, xmm1","66 0F F6 /r","V","V","SSE2","","rw,r","","" +"PSHUFB mm1, mm2/m64","PSHUFB mm2/m64, mm1","pshufb mm2/m64, mm1","0F 38 00 /r","V","V","SSSE3","","rw,r","","" +"PSHUFB xmm1, xmm2/m128","PSHUFB xmm2/m128, xmm1","pshufb xmm2/m128, xmm1","66 0F 38 00 /r","V","V","SSSE3","","rw,r","","" +"PSHUFD xmm1, xmm2/m128, imm8u","PSHUFD imm8u, xmm2/m128, xmm1","pshufd imm8u, xmm2/m128, xmm1","66 0F 70 /r ib","V","V","SSE2","","w,r,r","","" +"PSHUFHW xmm1, xmm2/m128, imm8u","PSHUFHW imm8u, xmm2/m128, xmm1","pshufhw imm8u, xmm2/m128, xmm1","F3 0F 70 /r ib","V","V","SSE2","","w,r,r","","" +"PSHUFLW xmm1, xmm2/m128, imm8u","PSHUFLW imm8u, xmm2/m128, xmm1","pshuflw imm8u, xmm2/m128, xmm1","F2 0F 70 /r ib","V","V","SSE2","","w,r,r","","" +"PSHUFW mm1, mm2/m64, imm8u","PSHUFW imm8u, mm2/m64, mm1","pshufw imm8u, mm2/m64, mm1","0F 70 /r ib","V","V","MMX","","w,r,r","","" +"PSIGNB mm1, mm2/m64","PSIGNB mm2/m64, mm1","psignb mm2/m64, mm1","0F 38 08 /r","V","V","SSSE3","","rw,r","","" +"PSIGNB xmm1, xmm2/m128","PSIGNB xmm2/m128, xmm1","psignb xmm2/m128, xmm1","66 0F 38 08 /r","V","V","SSSE3","","rw,r","","" +"PSIGND mm1, mm2/m64","PSIGND mm2/m64, mm1","psignd mm2/m64, mm1","0F 38 0A /r","V","V","SSSE3","","rw,r","","" +"PSIGND xmm1, xmm2/m128","PSIGND xmm2/m128, xmm1","psignd xmm2/m128, xmm1","66 0F 38 0A /r","V","V","SSSE3","","rw,r","","" +"PSIGNW mm1, mm2/m64","PSIGNW mm2/m64, mm1","psignw mm2/m64, mm1","0F 38 09 /r","V","V","SSSE3","","rw,r","","" +"PSIGNW xmm1, xmm2/m128","PSIGNW xmm2/m128, xmm1","psignw xmm2/m128, xmm1","66 0F 38 09 /r","V","V","SSSE3","","rw,r","","" +"PSLLD mm2, imm8u","PSLLL imm8u, mm2","pslld imm8u, mm2","0F 72 /6 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSLLD xmm2, imm8u","PSLLL imm8u, xmm2","pslld imm8u, xmm2","66 0F 72 /6 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSLLD mm1, mm2/m64","PSLLL mm2/m64, mm1","pslld mm2/m64, mm1","0F F2 /r","V","V","MMX","","rw,r","","" +"PSLLD xmm1, xmm2/m128","PSLLL xmm2/m128, xmm1","pslld xmm2/m128, xmm1","66 0F F2 /r","V","V","SSE2","","rw,r","","" +"PSLLDQ xmm2, imm8u","PSLLO imm8u, xmm2","pslldq imm8u, xmm2","66 0F 73 /7 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSLLQ mm2, imm8u","PSLLQ imm8u, mm2","psllq imm8u, mm2","0F 73 /6 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSLLQ xmm2, imm8u","PSLLQ imm8u, xmm2","psllq imm8u, xmm2","66 0F 73 /6 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSLLQ mm1, mm2/m64","PSLLQ mm2/m64, mm1","psllq mm2/m64, mm1","0F F3 /r","V","V","MMX","","rw,r","","" +"PSLLQ xmm1, xmm2/m128","PSLLQ xmm2/m128, xmm1","psllq xmm2/m128, xmm1","66 0F F3 /r","V","V","SSE2","","rw,r","","" +"PSLLW mm2, imm8u","PSLLW imm8u, mm2","psllw imm8u, mm2","0F 71 /6 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSLLW xmm2, imm8u","PSLLW imm8u, xmm2","psllw imm8u, xmm2","66 0F 71 /6 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSLLW mm1, mm2/m64","PSLLW mm2/m64, mm1","psllw mm2/m64, mm1","0F F1 /r","V","V","MMX","","rw,r","","" +"PSLLW xmm1, xmm2/m128","PSLLW xmm2/m128, xmm1","psllw xmm2/m128, xmm1","66 0F F1 /r","V","V","SSE2","","rw,r","","" +"PSRAD mm2, imm8u","PSRAL imm8u, mm2","psrad imm8u, mm2","0F 72 /4 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSRAD xmm2, imm8u","PSRAL imm8u, xmm2","psrad imm8u, xmm2","66 0F 72 /4 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRAD mm1, mm2/m64","PSRAL mm2/m64, mm1","psrad mm2/m64, mm1","0F E2 /r","V","V","MMX","","rw,r","","" +"PSRAD xmm1, xmm2/m128","PSRAL xmm2/m128, xmm1","psrad xmm2/m128, xmm1","66 0F E2 /r","V","V","SSE2","","rw,r","","" +"PSRAW mm2, imm8u","PSRAW imm8u, mm2","psraw imm8u, mm2","0F 71 /4 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSRAW xmm2, imm8u","PSRAW imm8u, xmm2","psraw imm8u, xmm2","66 0F 71 /4 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRAW mm1, mm2/m64","PSRAW mm2/m64, mm1","psraw mm2/m64, mm1","0F E1 /r","V","V","MMX","","rw,r","","" +"PSRAW xmm1, xmm2/m128","PSRAW xmm2/m128, xmm1","psraw xmm2/m128, xmm1","66 0F E1 /r","V","V","SSE2","","rw,r","","" +"PSRLD mm2, imm8u","PSRLL imm8u, mm2","psrld imm8u, mm2","0F 72 /2 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSRLD xmm2, imm8u","PSRLL imm8u, xmm2","psrld imm8u, xmm2","66 0F 72 /2 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRLD mm1, mm2/m64","PSRLL mm2/m64, mm1","psrld mm2/m64, mm1","0F D2 /r","V","V","MMX","","rw,r","","" +"PSRLD xmm1, xmm2/m128","PSRLL xmm2/m128, xmm1","psrld xmm2/m128, xmm1","66 0F D2 /r","V","V","SSE2","","rw,r","","" +"PSRLDQ xmm2, imm8u","PSRLO imm8u, xmm2","psrldq imm8u, xmm2","66 0F 73 /3 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRLQ mm2, imm8u","PSRLQ imm8u, mm2","psrlq imm8u, mm2","0F 73 /2 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSRLQ xmm2, imm8u","PSRLQ imm8u, xmm2","psrlq imm8u, xmm2","66 0F 73 /2 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRLQ mm1, mm2/m64","PSRLQ mm2/m64, mm1","psrlq mm2/m64, mm1","0F D3 /r","V","V","MMX","","rw,r","","" +"PSRLQ xmm1, xmm2/m128","PSRLQ xmm2/m128, xmm1","psrlq xmm2/m128, xmm1","66 0F D3 /r","V","V","SSE2","","rw,r","","" +"PSRLW mm2, imm8u","PSRLW imm8u, mm2","psrlw imm8u, mm2","0F 71 /2 ib","V","V","MMX","modrm_regonly","rw,r","","" +"PSRLW xmm2, imm8u","PSRLW imm8u, xmm2","psrlw imm8u, xmm2","66 0F 71 /2 ib","V","V","SSE2","modrm_regonly","rw,r","","" +"PSRLW mm1, mm2/m64","PSRLW mm2/m64, mm1","psrlw mm2/m64, mm1","0F D1 /r","V","V","MMX","","rw,r","","" +"PSRLW xmm1, xmm2/m128","PSRLW xmm2/m128, xmm1","psrlw xmm2/m128, xmm1","66 0F D1 /r","V","V","SSE2","","rw,r","","" +"PSUBB mm1, mm2/m64","PSUBB mm2/m64, mm1","psubb mm2/m64, mm1","0F F8 /r","V","V","MMX","","rw,r","","" +"PSUBB xmm1, xmm2/m128","PSUBB xmm2/m128, xmm1","psubb xmm2/m128, xmm1","66 0F F8 /r","V","V","SSE2","","rw,r","","" +"PSUBD mm1, mm2/m64","PSUBL mm2/m64, mm1","psubd mm2/m64, mm1","0F FA /r","V","V","MMX","","rw,r","","" +"PSUBD xmm1, xmm2/m128","PSUBL xmm2/m128, xmm1","psubd xmm2/m128, xmm1","66 0F FA /r","V","V","SSE2","","rw,r","","" +"PSUBQ mm1, mm2/m64","PSUBQ mm2/m64, mm1","psubq mm2/m64, mm1","0F FB /r","V","V","SSE2","","rw,r","","" +"PSUBQ xmm1, xmm2/m128","PSUBQ xmm2/m128, xmm1","psubq xmm2/m128, xmm1","66 0F FB /r","V","V","SSE2","","rw,r","","" +"PSUBSB mm1, mm2/m64","PSUBSB mm2/m64, mm1","psubsb mm2/m64, mm1","0F E8 /r","V","V","MMX","","rw,r","","" +"PSUBSB xmm1, xmm2/m128","PSUBSB xmm2/m128, xmm1","psubsb xmm2/m128, xmm1","66 0F E8 /r","V","V","SSE2","","rw,r","","" +"PSUBSW mm1, mm2/m64","PSUBSW mm2/m64, mm1","psubsw mm2/m64, mm1","0F E9 /r","V","V","MMX","","rw,r","","" +"PSUBSW xmm1, xmm2/m128","PSUBSW xmm2/m128, xmm1","psubsw xmm2/m128, xmm1","66 0F E9 /r","V","V","SSE2","","rw,r","","" +"PSUBUSB mm1, mm2/m64","PSUBUSB mm2/m64, mm1","psubusb mm2/m64, mm1","0F D8 /r","V","V","MMX","","rw,r","","" +"PSUBUSB xmm1, xmm2/m128","PSUBUSB xmm2/m128, xmm1","psubusb xmm2/m128, xmm1","66 0F D8 /r","V","V","SSE2","","rw,r","","" +"PSUBUSW mm1, mm2/m64","PSUBUSW mm2/m64, mm1","psubusw mm2/m64, mm1","0F D9 /r","V","V","MMX","","rw,r","","" +"PSUBUSW xmm1, xmm2/m128","PSUBUSW xmm2/m128, xmm1","psubusw xmm2/m128, xmm1","66 0F D9 /r","V","V","SSE2","","rw,r","","" +"PSUBW mm1, mm2/m64","PSUBW mm2/m64, mm1","psubw mm2/m64, mm1","0F F9 /r","V","V","MMX","","rw,r","","" +"PSUBW xmm1, xmm2/m128","PSUBW xmm2/m128, xmm1","psubw xmm2/m128, xmm1","66 0F F9 /r","V","V","SSE2","","rw,r","","" +"PSWAPD mm1, mm2/m64","PSWAPD mm2/m64, mm1","pswapd mm2/m64, mm1","0F 0F BB /r","V","V","3DNOW","amd","rw,r","","" +"PTEST xmm1, xmm2/m128","PTEST xmm2/m128, xmm1","ptest xmm2/m128, xmm1","66 0F 38 17 /r","V","V","SSE4_1","","r,r","","" +"PTWRITE r/m32","PTWRITEL r/m32","ptwritel r/m32","F3 0F AE /4","V","V","","operand16,operand32","r","Y","32" +"PTWRITE r/m64","PTWRITEQ r/m64","ptwriteq r/m64","F3 REX.W 0F AE /4","N.S.","V","","","r","Y","64" +"PUNPCKHBW mm1, mm2/m64","PUNPCKHBW mm2/m64, mm1","punpckhbw mm2/m64, mm1","0F 68 /r","V","V","MMX","","rw,r","","" +"PUNPCKHBW xmm1, xmm2/m128","PUNPCKHBW xmm2/m128, xmm1","punpckhbw xmm2/m128, xmm1","66 0F 68 /r","V","V","SSE2","","rw,r","","" +"PUNPCKHDQ mm1, mm2/m64","PUNPCKHLQ mm2/m64, mm1","punpckhdq mm2/m64, mm1","0F 6A /r","V","V","MMX","","rw,r","","" +"PUNPCKHDQ xmm1, xmm2/m128","PUNPCKHLQ xmm2/m128, xmm1","punpckhdq xmm2/m128, xmm1","66 0F 6A /r","V","V","SSE2","","rw,r","","" +"PUNPCKHQDQ xmm1, xmm2/m128","PUNPCKHQDQ xmm2/m128, xmm1","punpckhqdq xmm2/m128, xmm1","66 0F 6D /r","V","V","SSE2","","rw,r","","" +"PUNPCKHWD mm1, mm2/m64","PUNPCKHWL mm2/m64, mm1","punpckhwd mm2/m64, mm1","0F 69 /r","V","V","MMX","","rw,r","","" +"PUNPCKHWD xmm1, xmm2/m128","PUNPCKHWL xmm2/m128, xmm1","punpckhwd xmm2/m128, xmm1","66 0F 69 /r","V","V","SSE2","","rw,r","","" +"PUNPCKLBW mm1, mm2/m32","PUNPCKLBW mm2/m32, mm1","punpcklbw mm2/m32, mm1","0F 60 /r","V","V","MMX","","rw,r","","" +"PUNPCKLBW xmm1, xmm2/m128","PUNPCKLBW xmm2/m128, xmm1","punpcklbw xmm2/m128, xmm1","66 0F 60 /r","V","V","SSE2","","rw,r","","" +"PUNPCKLDQ mm1, mm2/m32","PUNPCKLLQ mm2/m32, mm1","punpckldq mm2/m32, mm1","0F 62 /r","V","V","MMX","","rw,r","","" +"PUNPCKLDQ xmm1, xmm2/m128","PUNPCKLLQ xmm2/m128, xmm1","punpckldq xmm2/m128, xmm1","66 0F 62 /r","V","V","SSE2","","rw,r","","" +"PUNPCKLQDQ xmm1, xmm2/m128","PUNPCKLQDQ xmm2/m128, xmm1","punpcklqdq xmm2/m128, xmm1","66 0F 6C /r","V","V","SSE2","","rw,r","","" +"PUNPCKLWD mm1, mm2/m32","PUNPCKLWL mm2/m32, mm1","punpcklwd mm2/m32, mm1","0F 61 /r","V","V","MMX","","rw,r","","" +"PUNPCKLWD xmm1, xmm2/m128","PUNPCKLWL xmm2/m128, xmm1","punpcklwd xmm2/m128, xmm1","66 0F 61 /r","V","V","SSE2","","rw,r","","" +"PUSHAD","PUSHAL","pushal","60","V","N.S.","","operand32","","","" +"PUSHA","PUSHAW","pushaw","60","V","N.S.","","operand16","","","" +"PUSHFD","PUSHFL","pushfl","9C","V","N.S.","","operand32","","","" +"PUSHFQ","PUSHFQ","pushfq","9C","N.S.","V","","default64","","","" +"PUSHF","PUSHFW","pushfw","9C","V","V","","operand16","","","" +"PUSH r/m32","PUSHL r/m32","pushl r/m32","FF /6","V","N.S.","","operand32","r","Y","32" +"PUSH r32op","PUSHL r32op","pushl r32op","50+rd","V","N.S.","","operand32","r","Y","32" +"PUSH r/m64","PUSHQ r/m64","pushq r/m64","FF /6","N.S.","V","","default64","r","Y","64" +"PUSH r64op","PUSHQ r64op","pushq r64op","50+ro","N.S.","V","","default64","r","Y","64" +"PUSH imm16","PUSHW imm16","pushw imm16","68 iw","V","V","","operand16","r","Y","" +"PUSH r/m16","PUSHW r/m16","pushw r/m16","FF /6","V","V","","operand16","r","Y","16" +"PUSH r16op","PUSHW r16op","pushw r16op","50+rw","V","V","","operand16","r","Y","16" +"PUSH CS","PUSHW/PUSHL/PUSHQ CS","pushw/pushl/pushq CS","0E","V","N.S.","","","r","Y","" +"PUSH DS","PUSHW/PUSHL/PUSHQ DS","pushw/pushl/pushq DS","1E","V","N.S.","","","r","Y","" +"PUSH ES","PUSHW/PUSHL/PUSHQ ES","pushw/pushl/pushq ES","06","V","N.S.","","","r","Y","" +"PUSH FS","PUSHW/PUSHL/PUSHQ FS","pushw/pushl/pushq FS","0F A0","V","V","","operand16","r","Y","" +"PUSH FS","PUSHW/PUSHL/PUSHQ FS","pushw/pushl/pushq FS","0F A0","N.S.","V","","default64","r","Y","" +"PUSH FS","PUSHW/PUSHL/PUSHQ FS","pushw/pushl/pushq FS","0F A0","V","N.S.","","operand32","r","Y","" +"PUSH GS","PUSHW/PUSHL/PUSHQ GS","pushw/pushl/pushq GS","0F A8","N.S.","V","","default64","r","Y","" +"PUSH GS","PUSHW/PUSHL/PUSHQ GS","pushw/pushl/pushq GS","0F A8","V","N.S.","","operand32","r","Y","" +"PUSH GS","PUSHW/PUSHL/PUSHQ GS","pushw/pushl/pushq GS","0F A8","V","V","","operand16","r","Y","" +"PUSH SS","PUSHW/PUSHL/PUSHQ SS","pushw/pushl/pushq SS","16","V","N.S.","","","r","Y","" +"PUSH imm8","PUSHW/PUSHL/PUSHQ imm8","pushw/pushl/pushq imm8","6A ib","V","N.S.","","operand32","r","Y","" +"PUSH imm8","PUSHW/PUSHL/PUSHQ imm8","pushw/pushl/pushq imm8","6A ib","N.S.","V","","default64","r","Y","" +"PUSH imm8","PUSHW/PUSHL/PUSHQ imm8","pushw/pushl/pushq imm8","6A ib","V","V","","operand16","r","Y","" +"PXOR mm1, mm2/m64","PXOR mm2/m64, mm1","pxor mm2/m64, mm1","0F EF /r","V","V","MMX","","rw,r","","" +"PXOR xmm1, xmm2/m128","PXOR xmm2/m128, xmm1","pxor xmm2/m128, xmm1","66 0F EF /r","V","V","SSE2","","rw,r","","" +"RCL r/m8, 1","RCLB 1, r/m8","rclb 1, r/m8","D0 /2","V","V","","","rw,r","Y","8" +"RCL r/m8, 1","RCLB 1, r/m8","rclb 1, r/m8","REX D0 /2","N.E.","V","","pseudo64","w,r","Y","8" +"RCL r/m8, CL","RCLB CL, r/m8","rclb CL, r/m8","D2 /2","V","V","","","rw,r","Y","8" +"RCL r/m8, CL","RCLB CL, r/m8","rclb CL, r/m8","REX D2 /2","N.E.","V","","pseudo64","w,r","Y","8" +"RCL r/m8, imm8","RCLB imm8, r/m8","rclb imm8, r/m8","REX C0 /2 ib","N.E.","V","","pseudo64","w,r","Y","8" +"RCL r/m8, imm8u","RCLB imm8u, r/m8","rclb imm8u, r/m8","C0 /2 ib","V","V","","","rw,r","Y","8" +"RCL r/m32, 1","RCLL 1, r/m32","rcll 1, r/m32","D1 /2","V","V","","operand32","rw,r","Y","32" +"RCL r/m32, CL","RCLL CL, r/m32","rcll CL, r/m32","D3 /2","V","V","","operand32","rw,r","Y","32" +"RCL r/m32, imm8u","RCLL imm8u, r/m32","rcll imm8u, r/m32","C1 /2 ib","V","V","","operand32","rw,r","Y","32" +"RCL r/m64, 1","RCLQ 1, r/m64","rclq 1, r/m64","REX.W D1 /2","N.S.","V","","","rw,r","Y","64" +"RCL r/m64, CL","RCLQ CL, r/m64","rclq CL, r/m64","REX.W D3 /2","N.S.","V","","","rw,r","Y","64" +"RCL r/m64, imm8u","RCLQ imm8u, r/m64","rclq imm8u, r/m64","REX.W C1 /2 ib","N.S.","V","","","rw,r","Y","64" +"RCL r/m16, 1","RCLW 1, r/m16","rclw 1, r/m16","D1 /2","V","V","","operand16","rw,r","Y","16" +"RCL r/m16, CL","RCLW CL, r/m16","rclw CL, r/m16","D3 /2","V","V","","operand16","rw,r","Y","16" +"RCL r/m16, imm8u","RCLW imm8u, r/m16","rclw imm8u, r/m16","C1 /2 ib","V","V","","operand16","rw,r","Y","16" +"RCPPS xmm1, xmm2/m128","RCPPS xmm2/m128, xmm1","rcpps xmm2/m128, xmm1","0F 53 /r","V","V","SSE","","w,r","","" +"RCPSS xmm1, xmm2/m32","RCPSS xmm2/m32, xmm1","rcpss xmm2/m32, xmm1","F3 0F 53 /r","V","V","SSE","","w,r","","" +"RCR r/m8, 1","RCRB 1, r/m8","rcrb 1, r/m8","D0 /3","V","V","","","rw,r","Y","8" +"RCR r/m8, 1","RCRB 1, r/m8","rcrb 1, r/m8","REX D0 /3","N.E.","V","","pseudo64","w,r","Y","8" +"RCR r/m8, CL","RCRB CL, r/m8","rcrb CL, r/m8","D2 /3","V","V","","","rw,r","Y","8" +"RCR r/m8, CL","RCRB CL, r/m8","rcrb CL, r/m8","REX D2 /3","N.E.","V","","pseudo64","w,r","Y","8" +"RCR r/m8, imm8","RCRB imm8, r/m8","rcrb imm8, r/m8","REX C0 /3 ib","N.E.","V","","pseudo64","w,r","Y","8" +"RCR r/m8, imm8u","RCRB imm8u, r/m8","rcrb imm8u, r/m8","C0 /3 ib","V","V","","","rw,r","Y","8" +"RCR r/m32, 1","RCRL 1, r/m32","rcrl 1, r/m32","D1 /3","V","V","","operand32","rw,r","Y","32" +"RCR r/m32, CL","RCRL CL, r/m32","rcrl CL, r/m32","D3 /3","V","V","","operand32","rw,r","Y","32" +"RCR r/m32, imm8u","RCRL imm8u, r/m32","rcrl imm8u, r/m32","C1 /3 ib","V","V","","operand32","rw,r","Y","32" +"RCR r/m64, 1","RCRQ 1, r/m64","rcrq 1, r/m64","REX.W D1 /3","N.S.","V","","","rw,r","Y","64" +"RCR r/m64, CL","RCRQ CL, r/m64","rcrq CL, r/m64","REX.W D3 /3","N.S.","V","","","rw,r","Y","64" +"RCR r/m64, imm8u","RCRQ imm8u, r/m64","rcrq imm8u, r/m64","REX.W C1 /3 ib","N.S.","V","","","rw,r","Y","64" +"RCR r/m16, 1","RCRW 1, r/m16","rcrw 1, r/m16","D1 /3","V","V","","operand16","rw,r","Y","16" +"RCR r/m16, CL","RCRW CL, r/m16","rcrw CL, r/m16","D3 /3","V","V","","operand16","rw,r","Y","16" +"RCR r/m16, imm8u","RCRW imm8u, r/m16","rcrw imm8u, r/m16","C1 /3 ib","V","V","","operand16","rw,r","Y","16" +"RDFSBASE rmr32","RDFSBASEL rmr32","rdfsbase rmr32","F3 0F AE /0","N.S.","V","FSGSBASE","modrm_regonly,operand16,operand32","w","Y","32" +"RDFSBASE rmr64","RDFSBASEQ rmr64","rdfsbase rmr64","F3 REX.W 0F AE /0","N.S.","V","FSGSBASE","modrm_regonly","w","Y","64" +"RDGSBASE rmr32","RDGSBASEL rmr32","rdgsbase rmr32","F3 0F AE /1","N.S.","V","FSGSBASE","modrm_regonly,operand16,operand32","w","Y","32" +"RDGSBASE rmr64","RDGSBASEQ rmr64","rdgsbase rmr64","F3 REX.W 0F AE /1","N.S.","V","FSGSBASE","modrm_regonly","w","Y","64" +"RDMSR","RDMSR","rdmsr","0F 32","V","V","Pentium","","","","" +"RDPKRU","RDPKRU","rdpkru","0F 01 EE","V","V","PKU","","","","" +"RDPMC","RDPMC","rdpmc","0F 33","V","V","","","","","" +"RDRAND rmr32","RDRANDL rmr32","rdrand rmr32","0F C7 /6","V","V","RDRAND","modrm_regonly,operand32","w","Y","32" +"RDRAND rmr64","RDRANDQ rmr64","rdrand rmr64","REX.W 0F C7 /6","N.S.","V","RDRAND","modrm_regonly","w","Y","64" +"RDRAND rmr16","RDRANDW rmr16","rdrand rmr16","0F C7 /6","V","V","RDRAND","modrm_regonly,operand16","w","Y","16" +"RDSEED rmr32","RDSEEDL rmr32","rdseed rmr32","0F C7 /7","V","V","RDSEED","modrm_regonly,operand32","w","Y","32" +"RDSEED rmr64","RDSEEDQ rmr64","rdseed rmr64","REX.W 0F C7 /7","N.S.","V","RDSEED","modrm_regonly","w","Y","64" +"RDSEED rmr16","RDSEEDW rmr16","rdseed rmr16","0F C7 /7","V","V","RDSEED","modrm_regonly,operand16","w","Y","16" +"RDSSPD rmr32","RDSSPD rmr32","rdsspd rmr32","F3 0F 1E /1","V","V","CET","modrm_regonly,operand16,operand32","w","","" +"RDSSPQ rmr64","RDSSPQ rmr64","rdsspq rmr64","F3 REX.W 0F 1E /1","N.S.","V","CET","modrm_regonly","w","","" +"RDTSC","RDTSC","rdtsc","0F 31","V","V","Pentium","","","","" +"RDTSCP","RDTSCP","rdtscp","0F 01 F9","V","V","RDTSCP","","","","" +"RET_FAR","RETFW/RETFL/RETFQ","lretw/lretl/lretl","CB","V","V","","","","","" +"RET_FAR imm16u","RETFW/RETFL/RETFQ imm16u","lretw/lretl/lretl imm16u","CA iw","V","V","","","r","","" +"RET","RETW/RETL/RETQ","retw/retl/retq","C3","N.S.","V","","default64","","","" +"RET","RETW/RETL/RETQ","retw/retl/retq","C3","V","N.S.","","","","","" +"RET imm16u","RETW/RETL/RETQ imm16u","retw/retl/retq imm16u","C2 iw","N.S.","V","","default64","r","","" +"RET imm16u","RETW/RETL/RETQ imm16u","retw/retl/retq imm16u","C2 iw","V","N.S.","","","r","","" +"ROL r/m8, 1","ROLB 1, r/m8","rolb 1, r/m8","D0 /0","V","V","","","rw,r","Y","8" +"ROL r/m8, 1","ROLB 1, r/m8","rolb 1, r/m8","REX D0 /0","N.E.","V","","pseudo64","w,r","Y","8" +"ROL r/m8, CL","ROLB CL, r/m8","rolb CL, r/m8","D2 /0","V","V","","","rw,r","Y","8" +"ROL r/m8, CL","ROLB CL, r/m8","rolb CL, r/m8","REX D2 /0","N.E.","V","","pseudo64","w,r","Y","8" +"ROL r/m8, imm8","ROLB imm8, r/m8","rolb imm8, r/m8","REX C0 /0 ib","N.E.","V","","pseudo64","w,r","Y","8" +"ROL r/m8, imm8u","ROLB imm8u, r/m8","rolb imm8u, r/m8","C0 /0 ib","V","V","","","rw,r","Y","8" +"ROL r/m32, 1","ROLL 1, r/m32","roll 1, r/m32","D1 /0","V","V","","operand32","rw,r","Y","32" +"ROL r/m32, CL","ROLL CL, r/m32","roll CL, r/m32","D3 /0","V","V","","operand32","rw,r","Y","32" +"ROL r/m32, imm8u","ROLL imm8u, r/m32","roll imm8u, r/m32","C1 /0 ib","V","V","","operand32","rw,r","Y","32" +"ROL r/m64, 1","ROLQ 1, r/m64","rolq 1, r/m64","REX.W D1 /0","N.S.","V","","","rw,r","Y","64" +"ROL r/m64, CL","ROLQ CL, r/m64","rolq CL, r/m64","REX.W D3 /0","N.S.","V","","","rw,r","Y","64" +"ROL r/m64, imm8u","ROLQ imm8u, r/m64","rolq imm8u, r/m64","REX.W C1 /0 ib","N.S.","V","","","rw,r","Y","64" +"ROL r/m16, 1","ROLW 1, r/m16","rolw 1, r/m16","D1 /0","V","V","","operand16","rw,r","Y","16" +"ROL r/m16, CL","ROLW CL, r/m16","rolw CL, r/m16","D3 /0","V","V","","operand16","rw,r","Y","16" +"ROL r/m16, imm8u","ROLW imm8u, r/m16","rolw imm8u, r/m16","C1 /0 ib","V","V","","operand16","rw,r","Y","16" +"ROR r/m8, 1","RORB 1, r/m8","rorb 1, r/m8","D0 /1","V","V","","","rw,r","Y","8" +"ROR r/m8, 1","RORB 1, r/m8","rorb 1, r/m8","REX D0 /1","N.E.","V","","pseudo64","w,r","Y","8" +"ROR r/m8, CL","RORB CL, r/m8","rorb CL, r/m8","D2 /1","V","V","","","rw,r","Y","8" +"ROR r/m8, CL","RORB CL, r/m8","rorb CL, r/m8","REX D2 /1","N.E.","V","","pseudo64","w,r","Y","8" +"ROR r/m8, imm8","RORB imm8, r/m8","rorb imm8, r/m8","REX C0 /1 ib","N.E.","V","","pseudo64","w,r","Y","8" +"ROR r/m8, imm8u","RORB imm8u, r/m8","rorb imm8u, r/m8","C0 /1 ib","V","V","","","rw,r","Y","8" +"ROR r/m32, 1","RORL 1, r/m32","rorl 1, r/m32","D1 /1","V","V","","operand32","rw,r","Y","32" +"ROR r/m32, CL","RORL CL, r/m32","rorl CL, r/m32","D3 /1","V","V","","operand32","rw,r","Y","32" +"ROR r/m32, imm8u","RORL imm8u, r/m32","rorl imm8u, r/m32","C1 /1 ib","V","V","","operand32","rw,r","Y","32" +"ROR r/m64, 1","RORQ 1, r/m64","rorq 1, r/m64","REX.W D1 /1","N.S.","V","","","rw,r","Y","64" +"ROR r/m64, CL","RORQ CL, r/m64","rorq CL, r/m64","REX.W D3 /1","N.S.","V","","","rw,r","Y","64" +"ROR r/m64, imm8u","RORQ imm8u, r/m64","rorq imm8u, r/m64","REX.W C1 /1 ib","N.S.","V","","","rw,r","Y","64" +"ROR r/m16, 1","RORW 1, r/m16","rorw 1, r/m16","D1 /1","V","V","","operand16","rw,r","Y","16" +"ROR r/m16, CL","RORW CL, r/m16","rorw CL, r/m16","D3 /1","V","V","","operand16","rw,r","Y","16" +"ROR r/m16, imm8u","RORW imm8u, r/m16","rorw imm8u, r/m16","C1 /1 ib","V","V","","operand16","rw,r","Y","16" +"RORX r32, r/m32, imm8u","RORXL imm8u, r/m32, r32","rorxl imm8u, r/m32, r32","VEX.128.F2.0F3A.W0 F0 /r ib","V","V","BMI2","","w,r,r","Y","32" +"RORX r64, r/m64, imm8u","RORXQ imm8u, r/m64, r64","rorxq imm8u, r/m64, r64","VEX.128.F2.0F3A.W1 F0 /r ib","N.S.","V","BMI2","","w,r,r","Y","64" +"ROUNDPD xmm1, xmm2/m128, imm8u","ROUNDPD imm8u, xmm2/m128, xmm1","roundpd imm8u, xmm2/m128, xmm1","66 0F 3A 09 /r ib","V","V","SSE4_1","","w,r,r","","" +"ROUNDPS xmm1, xmm2/m128, imm8u","ROUNDPS imm8u, xmm2/m128, xmm1","roundps imm8u, xmm2/m128, xmm1","66 0F 3A 08 /r ib","V","V","SSE4_1","","w,r,r","","" +"ROUNDSD xmm1, xmm2/m64, imm8u","ROUNDSD imm8u, xmm2/m64, xmm1","roundsd imm8u, xmm2/m64, xmm1","66 0F 3A 0B /r ib","V","V","SSE4_1","","w,r,r","","" +"ROUNDSS xmm1, xmm2/m32, imm8u","ROUNDSS imm8u, xmm2/m32, xmm1","roundss imm8u, xmm2/m32, xmm1","66 0F 3A 0A /r ib","V","V","SSE4_1","","w,r,r","","" +"RSM","RSM","rsm","0F AA","V","V","","","","","" +"RSQRTPS xmm1, xmm2/m128","RSQRTPS xmm2/m128, xmm1","rsqrtps xmm2/m128, xmm1","0F 52 /r","V","V","SSE","","w,r","","" +"RSQRTSS xmm1, xmm2/m32","RSQRTSS xmm2/m32, xmm1","rsqrtss xmm2/m32, xmm1","F3 0F 52 /r","V","V","SSE","","w,r","","" +"RSTORSSP m64","RSTORSSP m64","rstorssp m64","F3 0F 01 /5","V","V","CET","modrm_memonly","rw","","" +"SAHF","SAHF","sahf","9E","V","V","LAHFSAHF","","","","" +"SAL r/m8, 1","SALB 1, r/m8","salb 1, r/m8","D0 /4","V","V","","pseudo","rw,r","Y","8" +"SAL r/m8, 1","SALB 1, r/m8","salb 1, r/m8","REX D0 /4","N.E.","V","","pseudo","rw,r","Y","8" +"SAL r/m8, CL","SALB CL, r/m8","salb CL, r/m8","D2 /4","V","V","","pseudo","rw,r","Y","8" +"SAL r/m8, CL","SALB CL, r/m8","salb CL, r/m8","REX D2 /4","N.E.","V","","pseudo","rw,r","Y","8" +"SAL r/m8, imm8","SALB imm8, r/m8","salb imm8, r/m8","C0 /4 ib","V","V","","pseudo","rw,r","Y","8" +"SAL r/m8, imm8","SALB imm8, r/m8","salb imm8, r/m8","REX C0 /4 ib","N.E.","V","","pseudo","rw,r","Y","8" +"SALC","SALC","salc","D6","V","N.S.","","","","","" +"SAL r/m32, 1","SALL 1, r/m32","sall 1, r/m32","D1 /4","V","V","","operand32,pseudo","rw,r","Y","32" +"SAL r/m32, CL","SALL CL, r/m32","sall CL, r/m32","D3 /4","V","V","","operand32,pseudo","rw,r","Y","32" +"SAL r/m32, imm8","SALL imm8, r/m32","sall imm8, r/m32","C1 /4 ib","V","V","","operand32,pseudo","rw,r","Y","32" +"SAL r/m64, 1","SALQ 1, r/m64","salq 1, r/m64","REX.W D1 /4","N.E.","V","","pseudo","rw,r","Y","64" +"SAL r/m64, CL","SALQ CL, r/m64","salq CL, r/m64","REX.W D3 /4","N.E.","V","","pseudo","rw,r","Y","64" +"SAL r/m64, imm8","SALQ imm8, r/m64","salq imm8, r/m64","REX.W C1 /4 ib","N.E.","V","","pseudo","rw,r","Y","64" +"SAL r/m16, 1","SALW 1, r/m16","salw 1, r/m16","D1 /4","V","V","","operand16,pseudo","rw,r","Y","16" +"SAL r/m16, CL","SALW CL, r/m16","salw CL, r/m16","D3 /4","V","V","","operand16,pseudo","rw,r","Y","16" +"SAL r/m16, imm8","SALW imm8, r/m16","salw imm8, r/m16","C1 /4 ib","V","V","","operand16,pseudo","rw,r","Y","16" +"SAR r/m8, 1","SARB 1, r/m8","sarb 1, r/m8","D0 /7","V","V","","","rw,r","Y","8" +"SAR r/m8, 1","SARB 1, r/m8","sarb 1, r/m8","REX D0 /7","N.E.","V","","pseudo64","rw,r","Y","8" +"SAR r/m8, CL","SARB CL, r/m8","sarb CL, r/m8","D2 /7","V","V","","","rw,r","Y","8" +"SAR r/m8, CL","SARB CL, r/m8","sarb CL, r/m8","REX D2 /7","N.E.","V","","pseudo64","rw,r","Y","8" +"SAR r/m8, imm8","SARB imm8, r/m8","sarb imm8, r/m8","REX C0 /7 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"SAR r/m8, imm8u","SARB imm8u, r/m8","sarb imm8u, r/m8","C0 /7 ib","V","V","","","rw,r","Y","8" +"SAR r/m32, 1","SARL 1, r/m32","sarl 1, r/m32","D1 /7","V","V","","operand32","rw,r","Y","32" +"SAR r/m32, CL","SARL CL, r/m32","sarl CL, r/m32","D3 /7","V","V","","operand32","rw,r","Y","32" +"SAR r/m32, imm8u","SARL imm8u, r/m32","sarl imm8u, r/m32","C1 /7 ib","V","V","","operand32","rw,r","Y","32" +"SAR r/m64, 1","SARQ 1, r/m64","sarq 1, r/m64","REX.W D1 /7","N.S.","V","","","rw,r","Y","64" +"SAR r/m64, CL","SARQ CL, r/m64","sarq CL, r/m64","REX.W D3 /7","N.S.","V","","","rw,r","Y","64" +"SAR r/m64, imm8u","SARQ imm8u, r/m64","sarq imm8u, r/m64","REX.W C1 /7 ib","N.S.","V","","","rw,r","Y","64" +"SAR r/m16, 1","SARW 1, r/m16","sarw 1, r/m16","D1 /7","V","V","","operand16","rw,r","Y","16" +"SAR r/m16, CL","SARW CL, r/m16","sarw CL, r/m16","D3 /7","V","V","","operand16","rw,r","Y","16" +"SAR r/m16, imm8u","SARW imm8u, r/m16","sarw imm8u, r/m16","C1 /7 ib","V","V","","operand16","rw,r","Y","16" +"SARX r32, r/m32, r32V","SARXL r32V, r/m32, r32","sarxl r32V, r/m32, r32","VEX.NDS.128.F3.0F38.W0 F7 /r","V","V","BMI2","","w,r,r","Y","32" +"SARX r64, r/m64, r64V","SARXQ r64V, r/m64, r64","sarxq r64V, r/m64, r64","VEX.NDS.128.F3.0F38.W1 F7 /r","N.S.","V","BMI2","","w,r,r","Y","64" +"SAVESSP","SAVESSP","savessp","F3 0F 01 EA","V","V","CET","","","","" +"SBB AL, imm8","SBBB imm8, AL","sbbb imm8, AL","1C ib","V","V","","","rw,r","Y","8" +"SBB r/m8, imm8","SBBB imm8, r/m8","sbbb imm8, r/m8","80 /3 ib","V","V","","","rw,r","Y","8" +"SBB r/m8, imm8","SBBB imm8, r/m8","sbbb imm8, r/m8","82 /3 ib","V","N.S.","","","rw,r","Y","8" +"SBB r/m8, imm8","SBBB imm8, r/m8","sbbb imm8, r/m8","REX 80 /3 ib","N.E.","V","","pseudo64","w,r","Y","8" +"SBB r8, r/m8","SBBB r/m8, r8","sbbb r/m8, r8","1A /r","V","V","","","rw,r","Y","8" +"SBB r8, r/m8","SBBB r/m8, r8","sbbb r/m8, r8","REX 1A /r","N.E.","V","","pseudo64","w,r","Y","8" +"SBB r/m8, r8","SBBB r8, r/m8","sbbb r8, r/m8","18 /r","V","V","","","rw,r","Y","8" +"SBB r/m8, r8","SBBB r8, r/m8","sbbb r8, r/m8","REX 18 /r","N.E.","V","","pseudo64","w,r","Y","8" +"SBB EAX, imm32","SBBL imm32, EAX","sbbl imm32, EAX","1D id","V","V","","operand32","rw,r","Y","32" +"SBB r/m32, imm32","SBBL imm32, r/m32","sbbl imm32, r/m32","81 /3 id","V","V","","operand32","rw,r","Y","32" +"SBB r/m32, imm8","SBBL imm8, r/m32","sbbl imm8, r/m32","83 /3 ib","V","V","","operand32","rw,r","Y","32" +"SBB r32, r/m32","SBBL r/m32, r32","sbbl r/m32, r32","1B /r","V","V","","operand32","rw,r","Y","32" +"SBB r/m32, r32","SBBL r32, r/m32","sbbl r32, r/m32","19 /r","V","V","","operand32","rw,r","Y","32" +"SBB RAX, imm32","SBBQ imm32, RAX","sbbq imm32, RAX","REX.W 1D id","N.S.","V","","","rw,r","Y","64" +"SBB r/m64, imm32","SBBQ imm32, r/m64","sbbq imm32, r/m64","REX.W 81 /3 id","N.S.","V","","","rw,r","Y","64" +"SBB r/m64, imm8","SBBQ imm8, r/m64","sbbq imm8, r/m64","REX.W 83 /3 ib","N.S.","V","","","rw,r","Y","64" +"SBB r64, r/m64","SBBQ r/m64, r64","sbbq r/m64, r64","REX.W 1B /r","N.S.","V","","","rw,r","Y","64" +"SBB r/m64, r64","SBBQ r64, r/m64","sbbq r64, r/m64","REX.W 19 /r","N.S.","V","","","rw,r","Y","64" +"SBB AX, imm16","SBBW imm16, AX","sbbw imm16, AX","1D iw","V","V","","operand16","rw,r","Y","16" +"SBB r/m16, imm16","SBBW imm16, r/m16","sbbw imm16, r/m16","81 /3 iw","V","V","","operand16","rw,r","Y","16" +"SBB r/m16, imm8","SBBW imm8, r/m16","sbbw imm8, r/m16","83 /3 ib","V","V","","operand16","rw,r","Y","16" +"SBB r16, r/m16","SBBW r/m16, r16","sbbw r/m16, r16","1B /r","V","V","","operand16","rw,r","Y","16" +"SBB r/m16, r16","SBBW r16, r/m16","sbbw r16, r/m16","19 /r","V","V","","operand16","rw,r","Y","16" +"SCASB","SCASB","scasb","AE","V","V","","","","","" +"SCASD","SCASL","scasl","AF","V","V","","operand32","","","" +"SCASQ","SCASQ","scasq","REX.W AF","N.S.","V","","","","","" +"SCASW","SCASW","scasw","AF","V","V","","operand16","","","" +"SETAE r/m8","SETCC r/m8","setae r/m8","0F 93 /r","V","V","","","w","","" +"SETNB r/m8","SETCC r/m8","setnb r/m8","0F 93 /r","V","V","","pseudo","r","","" +"SETNC r/m8","SETCC r/m8","setnc r/m8","0F 93 /r","V","V","","pseudo","r","","" +"SETAE r/m8","SETCC r/m8","setae r/m8","REX 0F 93 /r","N.E.","V","","pseudo64","r","","" +"SETNB r/m8","SETCC r/m8","setnb r/m8","REX 0F 93 /r","N.E.","V","","pseudo","r","","" +"SETNC r/m8","SETCC r/m8","setnc r/m8","REX 0F 93 /r","N.E.","V","","pseudo","r","","" +"SETB r/m8","SETCS r/m8","setb r/m8","0F 92 /r","V","V","","","w","","" +"SETC r/m8","SETCS r/m8","setc r/m8","0F 92 /r","V","V","","pseudo","r","","" +"SETNAE r/m8","SETCS r/m8","setnae r/m8","0F 92 /r","V","V","","pseudo","r","","" +"SETB r/m8","SETCS r/m8","setb r/m8","REX 0F 92 /r","N.E.","V","","pseudo64","r","","" +"SETC r/m8","SETCS r/m8","setc r/m8","REX 0F 92 /r","N.E.","V","","pseudo","r","","" +"SETNAE r/m8","SETCS r/m8","setnae r/m8","REX 0F 92 /r","N.E.","V","","pseudo","r","","" +"SETE r/m8","SETEQ r/m8","sete r/m8","0F 94 /r","V","V","","","w","","" +"SETZ r/m8","SETEQ r/m8","setz r/m8","0F 94 /r","V","V","","pseudo","r","","" +"SETE r/m8","SETEQ r/m8","sete r/m8","REX 0F 94 /r","N.E.","V","","pseudo64","r","","" +"SETZ r/m8","SETEQ r/m8","setz r/m8","REX 0F 94 /r","N.E.","V","","pseudo","r","","" +"SETGE r/m8","SETGE r/m8","setge r/m8","0F 9D /r","V","V","","","w","","" +"SETNL r/m8","SETGE r/m8","setnl r/m8","0F 9D /r","V","V","","pseudo","r","","" +"SETGE r/m8","SETGE r/m8","setge r/m8","REX 0F 9D /r","N.E.","V","","pseudo64","r","","" +"SETNL r/m8","SETGE r/m8","setnl r/m8","REX 0F 9D /r","N.E.","V","","pseudo","r","","" +"SETG r/m8","SETGT r/m8","setg r/m8","0F 9F /r","V","V","","","w","","" +"SETNLE r/m8","SETGT r/m8","setnle r/m8","0F 9F /r","V","V","","pseudo","r","","" +"SETG r/m8","SETGT r/m8","setg r/m8","REX 0F 9F /r","N.E.","V","","pseudo64","r","","" +"SETNLE r/m8","SETGT r/m8","setnle r/m8","REX 0F 9F /r","N.E.","V","","pseudo","r","","" +"SETA r/m8","SETHI r/m8","seta r/m8","0F 97 /r","V","V","","","w","","" +"SETNBE r/m8","SETHI r/m8","setnbe r/m8","0F 97 /r","V","V","","pseudo","r","","" +"SETA r/m8","SETHI r/m8","seta r/m8","REX 0F 97 /r","N.E.","V","","pseudo64","r","","" +"SETNBE r/m8","SETHI r/m8","setnbe r/m8","REX 0F 97 /r","N.E.","V","","pseudo","r","","" +"SETLE r/m8","SETLE r/m8","setle r/m8","0F 9E /r","V","V","","","w","","" +"SETNG r/m8","SETLE r/m8","setng r/m8","0F 9E /r","V","V","","pseudo","r","","" +"SETLE r/m8","SETLE r/m8","setle r/m8","REX 0F 9E /r","N.E.","V","","pseudo64","r","","" +"SETNG r/m8","SETLE r/m8","setng r/m8","REX 0F 9E /r","N.E.","V","","pseudo","r","","" +"SETBE r/m8","SETLS r/m8","setbe r/m8","0F 96 /r","V","V","","","w","","" +"SETNA r/m8","SETLS r/m8","setna r/m8","0F 96 /r","V","V","","pseudo","r","","" +"SETBE r/m8","SETLS r/m8","setbe r/m8","REX 0F 96 /r","N.E.","V","","pseudo64","r","","" +"SETNA r/m8","SETLS r/m8","setna r/m8","REX 0F 96 /r","N.E.","V","","pseudo","r","","" +"SETL r/m8","SETLT r/m8","setl r/m8","0F 9C /r","V","V","","","w","","" +"SETNGE r/m8","SETLT r/m8","setnge r/m8","0F 9C /r","V","V","","pseudo","r","","" +"SETL r/m8","SETLT r/m8","setl r/m8","REX 0F 9C /r","N.E.","V","","pseudo64","r","","" +"SETNGE r/m8","SETLT r/m8","setnge r/m8","REX 0F 9C /r","N.E.","V","","pseudo","r","","" +"SETS r/m8","SETMI r/m8","sets r/m8","0F 98 /r","V","V","","","w","","" +"SETS r/m8","SETMI r/m8","sets r/m8","REX 0F 98 /r","N.E.","V","","pseudo64","r","","" +"SETNE r/m8","SETNE r/m8","setne r/m8","0F 95 /r","V","V","","","w","","" +"SETNZ r/m8","SETNE r/m8","setnz r/m8","0F 95 /r","V","V","","pseudo","r","","" +"SETNE r/m8","SETNE r/m8","setne r/m8","REX 0F 95 /r","N.E.","V","","pseudo64","r","","" +"SETNZ r/m8","SETNE r/m8","setnz r/m8","REX 0F 95 /r","N.E.","V","","pseudo","r","","" +"SETNO r/m8","SETOC r/m8","setno r/m8","0F 91 /r","V","V","","","w","","" +"SETNO r/m8","SETOC r/m8","setno r/m8","REX 0F 91 /r","N.E.","V","","pseudo64","r","","" +"SETO r/m8","SETOS r/m8","seto r/m8","0F 90 /r","V","V","","","w","","" +"SETO r/m8","SETOS r/m8","seto r/m8","REX 0F 90 /r","N.E.","V","","pseudo64","r","","" +"SETNP r/m8","SETPC r/m8","setnp r/m8","0F 9B /r","V","V","","","w","","" +"SETPO r/m8","SETPC r/m8","setpo r/m8","0F 9B /r","V","V","","pseudo","r","","" +"SETNP r/m8","SETPC r/m8","setnp r/m8","REX 0F 9B /r","N.E.","V","","pseudo64","r","","" +"SETPO r/m8","SETPC r/m8","setpo r/m8","REX 0F 9B /r","N.E.","V","","pseudo","r","","" +"SETNS r/m8","SETPL r/m8","setns r/m8","0F 99 /r","V","V","","","w","","" +"SETNS r/m8","SETPL r/m8","setns r/m8","REX 0F 99 /r","N.E.","V","","pseudo64","r","","" +"SETP r/m8","SETPS r/m8","setp r/m8","0F 9A /r","V","V","","","w","","" +"SETPE r/m8","SETPS r/m8","setpe r/m8","0F 9A /r","V","V","","pseudo","r","","" +"SETP r/m8","SETPS r/m8","setp r/m8","REX 0F 9A /r","N.E.","V","","pseudo64","r","","" +"SETPE r/m8","SETPS r/m8","setpe r/m8","REX 0F 9A /r","N.E.","V","","pseudo","r","","" +"SETSSBSY","SETSSBSY","setssbsy","F3 0F 01 E8","V","V","CET","","","","" +"SFENCE","SFENCE","sfence","0F AE /7","V","V","SSE","","","","" +"SGDT m16&32","SGDT m16&32","sgdt m16&32","0F 01 /0","V","N.S.","","modrm_memonly","w","","" +"SGDT m16&64","SGDT m16&64","sgdt m16&64","0F 01 /0","N.S.","V","","default64,modrm_memonly","w","","" +"SHA1MSG1 xmm1, xmm2/m128","SHA1MSG1 xmm2/m128, xmm1","sha1msg1 xmm2/m128, xmm1","0F 38 C9 /r","V","V","SHA","","rw,r","","" +"SHA1MSG2 xmm1, xmm2/m128","SHA1MSG2 xmm2/m128, xmm1","sha1msg2 xmm2/m128, xmm1","0F 38 CA /r","V","V","SHA","","rw,r","","" +"SHA1NEXTE xmm1, xmm2/m128","SHA1NEXTE xmm2/m128, xmm1","sha1nexte xmm2/m128, xmm1","0F 38 C8 /r","V","V","SHA","","rw,r","","" +"SHA1RNDS4 xmm1, xmm2/m128, imm8u:2","SHA1RNDS4 imm8u:2, xmm2/m128, xmm1","sha1rnds4 imm8u:2, xmm2/m128, xmm1","0F 3A CC /r ib","V","V","SHA","","rw,r,r","","" +"SHA256MSG1 xmm1, xmm2/m128","SHA256MSG1 xmm2/m128, xmm1","sha256msg1 xmm2/m128, xmm1","0F 38 CC /r","V","V","SHA","","rw,r","","" +"SHA256MSG2 xmm1, xmm2/m128","SHA256MSG2 xmm2/m128, xmm1","sha256msg2 xmm2/m128, xmm1","0F 38 CD /r","V","V","SHA","","rw,r","","" +"SHA256RNDS2 xmm1, xmm2/m128, ","SHA256RNDS2 , xmm2/m128, xmm1","sha256rnds2 , xmm2/m128, xmm1","0F 38 CB /r","V","V","SHA","","rw,r,r","","" +"SHL r/m8, 1","SHLB 1, r/m8","shlb 1, r/m8","D0 /4","V","V","","","rw,r","Y","8" +"SHL r/m8, 1","SHLB 1, r/m8","shlb 1, r/m8","D0 /6","V","V","","","rw,r","Y","8" +"SHL r/m8, 1","SHLB 1, r/m8","shlb 1, r/m8","REX D0 /4","N.E.","V","","pseudo64","rw,r","Y","8" +"SHL r/m8, CL","SHLB CL, r/m8","shlb CL, r/m8","D2 /4","V","V","","","rw,r","Y","8" +"SHL r/m8, CL","SHLB CL, r/m8","shlb CL, r/m8","D2 /6","V","V","","","rw,r","Y","8" +"SHL r/m8, CL","SHLB CL, r/m8","shlb CL, r/m8","REX D2 /4","N.E.","V","","pseudo64","rw,r","Y","8" +"SHL r/m8, imm8","SHLB imm8, r/m8","shlb imm8, r/m8","REX C0 /4 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"SHL r/m8, imm8u","SHLB imm8u, r/m8","shlb imm8u, r/m8","C0 /4 ib","V","V","","","rw,r","Y","8" +"SHL r/m8, imm8u","SHLB imm8u, r/m8","shlb imm8u, r/m8","C0 /6 ib","V","V","","","rw,r","Y","8" +"SHL r/m32, 1","SHLL 1, r/m32","shll 1, r/m32","D1 /4","V","V","","operand32","rw,r","Y","32" +"SHL r/m32, 1","SHLL 1, r/m32","shll 1, r/m32","D1 /6","V","V","","operand32","rw,r","Y","32" +"SHL r/m32, CL","SHLL CL, r/m32","shll CL, r/m32","D3 /4","V","V","","operand32","rw,r","Y","32" +"SHL r/m32, CL","SHLL CL, r/m32","shll CL, r/m32","D3 /6","V","V","","operand32","rw,r","Y","32" +"SHLD r/m32, r32, CL","SHLL CL, r32, r/m32","shldl CL, r32, r/m32","0F A5 /r","V","V","","operand32","rw,r,r","Y","32" +"SHL r/m32, imm8u","SHLL imm8u, r/m32","shll imm8u, r/m32","C1 /4 ib","V","V","","operand32","rw,r","Y","32" +"SHL r/m32, imm8u","SHLL imm8u, r/m32","shll imm8u, r/m32","C1 /6 ib","V","V","","operand32","rw,r","Y","32" +"SHLD r/m32, r32, imm8u","SHLL imm8u, r32, r/m32","shldl imm8u, r32, r/m32","0F A4 /r ib","V","V","","operand32","rw,r,r","Y","32" +"SHL r/m64, 1","SHLQ 1, r/m64","shlq 1, r/m64","REX.W D1 /4","N.S.","V","","","rw,r","Y","64" +"SHL r/m64, 1","SHLQ 1, r/m64","shlq 1, r/m64","REX.W D1 /6","N.S.","V","","","rw,r","Y","64" +"SHL r/m64, CL","SHLQ CL, r/m64","shlq CL, r/m64","REX.W D3 /4","N.S.","V","","","rw,r","Y","64" +"SHL r/m64, CL","SHLQ CL, r/m64","shlq CL, r/m64","REX.W D3 /6","N.S.","V","","","rw,r","Y","64" +"SHLD r/m64, r64, CL","SHLQ CL, r64, r/m64","shldq CL, r64, r/m64","REX.W 0F A5 /r","N.S.","V","","","rw,r,r","Y","64" +"SHL r/m64, imm8u","SHLQ imm8u, r/m64","shlq imm8u, r/m64","REX.W C1 /4 ib","N.S.","V","","","rw,r","Y","64" +"SHL r/m64, imm8u","SHLQ imm8u, r/m64","shlq imm8u, r/m64","REX.W C1 /6 ib","N.S.","V","","","rw,r","Y","64" +"SHLD r/m64, r64, imm8u","SHLQ imm8u, r64, r/m64","shldq imm8u, r64, r/m64","REX.W 0F A4 /r ib","N.S.","V","","","rw,r,r","Y","64" +"SHL r/m16, 1","SHLW 1, r/m16","shlw 1, r/m16","D1 /4","V","V","","operand16","rw,r","Y","16" +"SHL r/m16, 1","SHLW 1, r/m16","shlw 1, r/m16","D1 /6","V","V","","operand16","rw,r","Y","16" +"SHL r/m16, CL","SHLW CL, r/m16","shlw CL, r/m16","D3 /4","V","V","","operand16","rw,r","Y","16" +"SHL r/m16, CL","SHLW CL, r/m16","shlw CL, r/m16","D3 /6","V","V","","operand16","rw,r","Y","16" +"SHLD r/m16, r16, CL","SHLW CL, r16, r/m16","shldw CL, r16, r/m16","0F A5 /r","V","V","","operand16","rw,r,r","Y","16" +"SHL r/m16, imm8u","SHLW imm8u, r/m16","shlw imm8u, r/m16","C1 /4 ib","V","V","","operand16","rw,r","Y","16" +"SHL r/m16, imm8u","SHLW imm8u, r/m16","shlw imm8u, r/m16","C1 /6 ib","V","V","","operand16","rw,r","Y","16" +"SHLD r/m16, r16, imm8u","SHLW imm8u, r16, r/m16","shldw imm8u, r16, r/m16","0F A4 /r ib","V","V","","operand16","rw,r,r","Y","16" +"SHLX r32, r/m32, r32V","SHLXL r32V, r/m32, r32","shlxl r32V, r/m32, r32","VEX.NDS.128.66.0F38.W0 F7 /r","V","V","BMI2","","w,r,r","Y","32" +"SHLX r64, r/m64, r64V","SHLXQ r64V, r/m64, r64","shlxq r64V, r/m64, r64","VEX.NDS.128.66.0F38.W1 F7 /r","N.S.","V","BMI2","","w,r,r","Y","64" +"SHR r/m8, 1","SHRB 1, r/m8","shrb 1, r/m8","D0 /5","V","V","","","rw,r","Y","8" +"SHR r/m8, 1","SHRB 1, r/m8","shrb 1, r/m8","REX D0 /5","N.E.","V","","pseudo64","rw,r","Y","8" +"SHR r/m8, CL","SHRB CL, r/m8","shrb CL, r/m8","D2 /5","V","V","","","rw,r","Y","8" +"SHR r/m8, CL","SHRB CL, r/m8","shrb CL, r/m8","REX D2 /5","N.E.","V","","pseudo64","rw,r","Y","8" +"SHR r/m8, imm8","SHRB imm8, r/m8","shrb imm8, r/m8","REX C0 /5 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"SHR r/m8, imm8u","SHRB imm8u, r/m8","shrb imm8u, r/m8","C0 /5 ib","V","V","","","rw,r","Y","8" +"SHR r/m32, 1","SHRL 1, r/m32","shrl 1, r/m32","D1 /5","V","V","","operand32","rw,r","Y","32" +"SHR r/m32, CL","SHRL CL, r/m32","shrl CL, r/m32","D3 /5","V","V","","operand32","rw,r","Y","32" +"SHRD r/m32, r32, CL","SHRL CL, r32, r/m32","shrdl CL, r32, r/m32","0F AD /r","V","V","","operand32","rw,r,r","Y","32" +"SHR r/m32, imm8u","SHRL imm8u, r/m32","shrl imm8u, r/m32","C1 /5 ib","V","V","","operand32","rw,r","Y","32" +"SHRD r/m32, r32, imm8u","SHRL imm8u, r32, r/m32","shrdl imm8u, r32, r/m32","0F AC /r ib","V","V","","operand32","rw,r,r","Y","32" +"SHR r/m64, 1","SHRQ 1, r/m64","shrq 1, r/m64","REX.W D1 /5","N.S.","V","","","rw,r","Y","64" +"SHR r/m64, CL","SHRQ CL, r/m64","shrq CL, r/m64","REX.W D3 /5","N.S.","V","","","rw,r","Y","64" +"SHRD r/m64, r64, CL","SHRQ CL, r64, r/m64","shrdq CL, r64, r/m64","REX.W 0F AD /r","N.S.","V","","","rw,r,r","Y","64" +"SHR r/m64, imm8u","SHRQ imm8u, r/m64","shrq imm8u, r/m64","REX.W C1 /5 ib","N.S.","V","","","rw,r","Y","64" +"SHRD r/m64, r64, imm8u","SHRQ imm8u, r64, r/m64","shrdq imm8u, r64, r/m64","REX.W 0F AC /r ib","N.S.","V","","","rw,r,r","Y","64" +"SHR r/m16, 1","SHRW 1, r/m16","shrw 1, r/m16","D1 /5","V","V","","operand16","rw,r","Y","16" +"SHR r/m16, CL","SHRW CL, r/m16","shrw CL, r/m16","D3 /5","V","V","","operand16","rw,r","Y","16" +"SHRD r/m16, r16, CL","SHRW CL, r16, r/m16","shrdw CL, r16, r/m16","0F AD /r","V","V","","operand16","rw,r,r","Y","16" +"SHR r/m16, imm8u","SHRW imm8u, r/m16","shrw imm8u, r/m16","C1 /5 ib","V","V","","operand16","rw,r","Y","16" +"SHRD r/m16, r16, imm8u","SHRW imm8u, r16, r/m16","shrdw imm8u, r16, r/m16","0F AC /r ib","V","V","","operand16","rw,r,r","Y","16" +"SHRX r32, r/m32, r32V","SHRXL r32V, r/m32, r32","shrxl r32V, r/m32, r32","VEX.NDS.128.F2.0F38.W0 F7 /r","V","V","BMI2","","w,r,r","Y","32" +"SHRX r64, r/m64, r64V","SHRXQ r64V, r/m64, r64","shrxq r64V, r/m64, r64","VEX.NDS.128.F2.0F38.W1 F7 /r","N.S.","V","BMI2","","w,r,r","Y","64" +"SHUFPD xmm1, xmm2/m128, imm8u","SHUFPD imm8u, xmm2/m128, xmm1","shufpd imm8u, xmm2/m128, xmm1","66 0F C6 /r ib","V","V","SSE2","","rw,r,r","","" +"SHUFPS xmm1, xmm2/m128, imm8u","SHUFPS imm8u, xmm2/m128, xmm1","shufps imm8u, xmm2/m128, xmm1","0F C6 /r ib","V","V","SSE","","rw,r,r","","" +"SIDT m16&32","SIDT m16&32","sidt m16&32","0F 01 /1","V","N.S.","","modrm_memonly","w","","" +"SIDT m16&64","SIDT m16&64","sidt m16&64","0F 01 /1","N.S.","V","","default64,modrm_memonly","w","","" +"SKINIT EAX","SKINIT EAX","skinit EAX","0F 01 DE","V","V","SVM","amd,modrm_regonly","r","","" +"SLDT r/m16","SLDTW r/m16","sldtw r/m16","0F 00 /0","V","V","","operand16","w","Y","16" +"SLDT r32/m16","SLDT{L/W} r32/m16","sldt{l/w} r32/m16","0F 00 /0","V","V","","operand32","w","Y","" +"SLDT r64/m16","SLDT{Q/W} r64/m16","sldt{q/w} r64/m16","REX.W 0F 00 /0","N.S.","V","","","w","Y","" +"SLWPCB rmr32","SLWPCBL rmr32","slwpcbl rmr32","XOP.128.09.W0 12 /1","V","V","XOP","amd,modrm_regonly,operand16,operand32","w","Y","32" +"SLWPCB rmr64","SLWPCBQ rmr64","slwpcbq rmr64","XOP.128.09.W0 12 /1","N.S.","V","XOP","amd,modrm_regonly,operand64","w","Y","64" +"SMSW r/m16","SMSWW r/m16","smsww r/m16","0F 01 /4","V","V","","operand16","w","Y","16" +"SMSW r32/m16","SMSW{L/W} r32/m16","smsw{l/w} r32/m16","0F 01 /4","V","V","","operand32","w","Y","" +"SMSW r64/m16","SMSW{Q/W} r64/m16","smsw{q/w} r64/m16","REX.W 0F 01 /4","N.S.","V","","","w","Y","" +"SQRTPD xmm1, xmm2/m128","SQRTPD xmm2/m128, xmm1","sqrtpd xmm2/m128, xmm1","66 0F 51 /r","V","V","SSE2","","w,r","","" +"SQRTPS xmm1, xmm2/m128","SQRTPS xmm2/m128, xmm1","sqrtps xmm2/m128, xmm1","0F 51 /r","V","V","SSE","","w,r","","" +"SQRTSD xmm1, xmm2/m64","SQRTSD xmm2/m64, xmm1","sqrtsd xmm2/m64, xmm1","F2 0F 51 /r","V","V","SSE2","","w,r","","" +"SQRTSS xmm1, xmm2/m32","SQRTSS xmm2/m32, xmm1","sqrtss xmm2/m32, xmm1","F3 0F 51 /r","V","V","SSE","","w,r","","" +"STAC","STAC","stac","0F 01 CB","V","V","","","","","" +"STC","STC","stc","F9","V","V","","","","","" +"STD","STD","std","FD","V","V","","","","","" +"STGI","STGI","stgi","0F 01 DC","V","V","SVM","amd","","","" +"STI","STI","sti","FB","V","V","","","","","" +"STMXCSR m32","STMXCSR m32","stmxcsr m32","0F AE /3","V","V","SSE","modrm_memonly","w","","" +"STOSB","STOSB","stosb","AA","V","V","","","","","" +"STOSD","STOSL","stosl","AB","V","V","","operand32","","","" +"STOSQ","STOSQ","stosq","REX.W AB","N.S.","V","","","","","" +"STOSW","STOSW","stosw","AB","V","V","","operand16","","","" +"STR r/m16","STRW r/m16","strw r/m16","0F 00 /1","V","V","","operand16","w","Y","16" +"STR r32/m16","STR{L/W} r32/m16","str{l/w} r32/m16","0F 00 /1","V","V","","operand32","w","Y","" +"STR r64/m16","STR{Q/W} r64/m16","str{q/w} r64/m16","REX.W 0F 00 /1","N.S.","V","","","w","Y","" +"SUB AL, imm8","SUBB imm8, AL","subb imm8, AL","2C ib","V","V","","","rw,r","Y","8" +"SUB r/m8, imm8","SUBB imm8, r/m8","subb imm8, r/m8","80 /5 ib","V","V","","","rw,r","Y","8" +"SUB r/m8, imm8","SUBB imm8, r/m8","subb imm8, r/m8","82 /5 ib","V","N.S.","","","rw,r","Y","8" +"SUB r/m8, imm8","SUBB imm8, r/m8","subb imm8, r/m8","REX 80 /5 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"SUB r8, r/m8","SUBB r/m8, r8","subb r/m8, r8","2A /r","V","V","","","rw,r","Y","8" +"SUB r8, r/m8","SUBB r/m8, r8","subb r/m8, r8","REX 2A /r","N.E.","V","","pseudo64","rw,r","Y","8" +"SUB r/m8, r8","SUBB r8, r/m8","subb r8, r/m8","28 /r","V","V","","","rw,r","Y","8" +"SUB r/m8, r8","SUBB r8, r/m8","subb r8, r/m8","REX 28 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"SUB EAX, imm32","SUBL imm32, EAX","subl imm32, EAX","2D id","V","V","","operand32","rw,r","Y","32" +"SUB r/m32, imm32","SUBL imm32, r/m32","subl imm32, r/m32","81 /5 id","V","V","","operand32","rw,r","Y","32" +"SUB r/m32, imm8","SUBL imm8, r/m32","subl imm8, r/m32","83 /5 ib","V","V","","operand32","rw,r","Y","32" +"SUB r32, r/m32","SUBL r/m32, r32","subl r/m32, r32","2B /r","V","V","","operand32","rw,r","Y","32" +"SUB r/m32, r32","SUBL r32, r/m32","subl r32, r/m32","29 /r","V","V","","operand32","rw,r","Y","32" +"SUBPD xmm1, xmm2/m128","SUBPD xmm2/m128, xmm1","subpd xmm2/m128, xmm1","66 0F 5C /r","V","V","SSE2","","rw,r","","" +"SUBPS xmm1, xmm2/m128","SUBPS xmm2/m128, xmm1","subps xmm2/m128, xmm1","0F 5C /r","V","V","SSE","","rw,r","","" +"SUB RAX, imm32","SUBQ imm32, RAX","subq imm32, RAX","REX.W 2D id","N.S.","V","","","rw,r","Y","64" +"SUB r/m64, imm32","SUBQ imm32, r/m64","subq imm32, r/m64","REX.W 81 /5 id","N.S.","V","","","rw,r","Y","64" +"SUB r/m64, imm8","SUBQ imm8, r/m64","subq imm8, r/m64","REX.W 83 /5 ib","N.S.","V","","","rw,r","Y","64" +"SUB r64, r/m64","SUBQ r/m64, r64","subq r/m64, r64","REX.W 2B /r","N.S.","V","","","rw,r","Y","64" +"SUB r/m64, r64","SUBQ r64, r/m64","subq r64, r/m64","REX.W 29 /r","N.S.","V","","","rw,r","Y","64" +"SUBSD xmm1, xmm2/m64","SUBSD xmm2/m64, xmm1","subsd xmm2/m64, xmm1","F2 0F 5C /r","V","V","SSE2","","rw,r","","" +"SUBSS xmm1, xmm2/m32","SUBSS xmm2/m32, xmm1","subss xmm2/m32, xmm1","F3 0F 5C /r","V","V","SSE","","rw,r","","" +"SUB AX, imm16","SUBW imm16, AX","subw imm16, AX","2D iw","V","V","","operand16","rw,r","Y","16" +"SUB r/m16, imm16","SUBW imm16, r/m16","subw imm16, r/m16","81 /5 iw","V","V","","operand16","rw,r","Y","16" +"SUB r/m16, imm8","SUBW imm8, r/m16","subw imm8, r/m16","83 /5 ib","V","V","","operand16","rw,r","Y","16" +"SUB r16, r/m16","SUBW r/m16, r16","subw r/m16, r16","2B /r","V","V","","operand16","rw,r","Y","16" +"SUB r/m16, r16","SUBW r16, r/m16","subw r16, r/m16","29 /r","V","V","","operand16","rw,r","Y","16" +"SWAPGS","SWAPGS","swapgs","0F 01 F8","N.S.","V","","","","","" +"SYSCALL","SYSCALL","syscall","0F 05","N.S.","V","","default64","","","" +"SYSCALL","SYSCALL","syscall","0F 05","V","N.S.","AMD","amd","","","" +"SYSENTER","SYSENTER","sysenter","0F 34","V","V","PPRO","","","","" +"SYSEXIT","SYSEXIT","sysexit","0F 35","V","V","PPRO","","","","" +"SYSEXIT","SYSEXIT","sysexit","REX.W 0F 35","N.E.","V","","pseudo","","","" +"SYSRET","SYSRET","sysretw/sysretl/sysretl","0F 07","V","N.S.","AMD","amd","","","" +"SYSRET","SYSRET","sysretw/sysretl/sysretl","0F 07","N.S.","V","","operand32,operand64","","","" +"SYSRET","SYSRET","sysretw/sysretl/sysretl","REX.W 0F 07","I","V","","pseudo","","","" +"T1MSKC r32V, r/m32","T1MSKCL r/m32, r32V","t1mskcl r/m32, r32V","XOP.NDD.128.09.WIG 01 /7","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"T1MSKC r64V, r/m64","T1MSKCQ r/m64, r64V","t1mskcq r/m64, r64V","XOP.NDD.128.09.WIG 01 /7","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"TEST AL, imm8","TESTB imm8, AL","testb imm8, AL","A8 ib","V","V","","","r,r","Y","8" +"TEST r/m8, imm8","TESTB imm8, r/m8","testb imm8, r/m8","F6 /0 ib","V","V","","","r,r","Y","8" +"TEST r/m8, imm8","TESTB imm8, r/m8","testb imm8, r/m8","F6 /1 ib","V","V","","","r,r","Y","8" +"TEST r/m8, imm8","TESTB imm8, r/m8","testb imm8, r/m8","REX F6 /0 ib","N.E.","V","","pseudo64","r,r","Y","8" +"TEST r/m8, r8","TESTB r8, r/m8","testb r8, r/m8","84 /r","V","V","","","r,r","Y","8" +"TEST r/m8, r8","TESTB r8, r/m8","testb r8, r/m8","REX 84 /r","N.E.","V","","pseudo64","r,r","Y","8" +"TEST EAX, imm32","TESTL imm32, EAX","testl imm32, EAX","A9 id","V","V","","operand32","r,r","Y","32" +"TEST r/m32, imm32","TESTL imm32, r/m32","testl imm32, r/m32","F7 /0 id","V","V","","operand32","r,r","Y","32" +"TEST r/m32, imm32","TESTL imm32, r/m32","testl imm32, r/m32","F7 /1 id","V","V","","operand32","r,r","Y","32" +"TEST r/m32, r32","TESTL r32, r/m32","testl r32, r/m32","85 /r","V","V","","operand32","r,r","Y","32" +"TEST RAX, imm32","TESTQ imm32, RAX","testq imm32, RAX","REX.W A9 id","N.S.","V","","","r,r","Y","64" +"TEST r/m64, imm32","TESTQ imm32, r/m64","testq imm32, r/m64","REX.W F7 /0 id","N.S.","V","","","r,r","Y","64" +"TEST r/m64, imm32","TESTQ imm32, r/m64","testq imm32, r/m64","REX.W F7 /1 id","N.S.","V","","","r,r","Y","64" +"TEST r/m64, r64","TESTQ r64, r/m64","testq r64, r/m64","REX.W 85 /r","N.S.","V","","","r,r","Y","64" +"TEST AX, imm16","TESTW imm16, AX","testw imm16, AX","A9 iw","V","V","","operand16","r,r","Y","16" +"TEST r/m16, imm16","TESTW imm16, r/m16","testw imm16, r/m16","F7 /0 iw","V","V","","operand16","r,r","Y","16" +"TEST r/m16, imm16","TESTW imm16, r/m16","testw imm16, r/m16","F7 /1 iw","V","V","","operand16","r,r","Y","16" +"TEST r/m16, r16","TESTW r16, r/m16","testw r16, r/m16","85 /r","V","V","","operand16","r,r","Y","16" +"TZCNT r32, r/m32","TZCNTL r/m32, r32","tzcntl r/m32, r32","F3 0F BC /r","V","V","BMI1","operand32","w,r","Y","32" +"TZCNT r64, r/m64","TZCNTQ r/m64, r64","tzcntq r/m64, r64","F3 REX.W 0F BC /r","N.S.","V","BMI1","","w,r","Y","64" +"TZCNT r16, r/m16","TZCNTW r/m16, r16","tzcntw r/m16, r16","F3 0F BC /r","V","V","BMI1","operand16","w,r","Y","16" +"TZMSK r32V, r/m32","TZMSKL r/m32, r32V","tzmskl r/m32, r32V","XOP.NDD.128.09.WIG 01 /4","V","V","TBM","amd,operand16,operand32","w,r","Y","32" +"TZMSK r64V, r/m64","TZMSKQ r/m64, r64V","tzmskq r/m64, r64V","XOP.NDD.128.09.WIG 01 /4","N.S.","V","TBM","amd,operand64","w,r","Y","64" +"UCOMISD xmm1, xmm2/m64","UCOMISD xmm2/m64, xmm1","ucomisd xmm2/m64, xmm1","66 0F 2E /r","V","V","SSE2","","r,r","","" +"UCOMISS xmm1, xmm2/m32","UCOMISS xmm2/m32, xmm1","ucomiss xmm2/m32, xmm1","0F 2E /r","V","V","SSE","","r,r","","" +"UD0 r32, r/m32","UD0 r/m32, r32","ud0 r/m32, r32","0F FF /r","V","V","PPRO","","r,r","","" +"UD1 r32, r/m32","UD1 r/m32, r32","ud1 r/m32, r32","0F B9 /r","V","V","PPRO","","r,r","","" +"UD2","UD2","ud2","0F 0B","V","V","PPRO","","","","" +"UNPCKHPD xmm1, xmm2/m128","UNPCKHPD xmm2/m128, xmm1","unpckhpd xmm2/m128, xmm1","66 0F 15 /r","V","V","SSE2","","rw,r","","" +"UNPCKHPS xmm1, xmm2/m128","UNPCKHPS xmm2/m128, xmm1","unpckhps xmm2/m128, xmm1","0F 15 /r","V","V","SSE","","rw,r","","" +"UNPCKLPD xmm1, xmm2/m128","UNPCKLPD xmm2/m128, xmm1","unpcklpd xmm2/m128, xmm1","66 0F 14 /r","V","V","SSE2","","rw,r","","" +"UNPCKLPS xmm1, xmm2/m128","UNPCKLPS xmm2/m128, xmm1","unpcklps xmm2/m128, xmm1","0F 14 /r","V","V","SSE","","rw,r","","" +"V4FMADDPS zmm1, {k}{z}, zmmV+3, m128","V4FMADDPS m128, zmmV+3, {k}{z}, zmm1","v4fmaddps m128, zmmV+3, {k}{z}, zmm1","EVEX.DDS.512.F2.0F38.W0 9A /r","V","V","AVX512_4FMAPS","modrm_memonly,scale16","rw,r,r,r","","" +"V4FMADDSS xmm1, {k}{z}, xmmV+3, m128","V4FMADDSS m128, xmmV+3, {k}{z}, xmm1","v4fmaddss m128, xmmV+3, {k}{z}, xmm1","EVEX.DDS.LIG.F2.0F38.W0 9B /r","V","V","AVX512_4FMAPS","modrm_memonly,scale16","rw,r,r,r","","" +"V4FNMADDPS zmm1, {k}{z}, zmmV+3, m128","V4FNMADDPS m128, zmmV+3, {k}{z}, zmm1","v4fnmaddps m128, zmmV+3, {k}{z}, zmm1","EVEX.DDS.512.F2.0F38.W0 AA /r","V","V","AVX512_4FMAPS","modrm_memonly,scale16","rw,r,r,r","","" +"V4FNMADDSS xmm1, {k}{z}, xmmV+3, m128","V4FNMADDSS m128, xmmV+3, {k}{z}, xmm1","v4fnmaddss m128, xmmV+3, {k}{z}, xmm1","EVEX.DDS.LIG.F2.0F38.W0 AB /r","V","V","AVX512_4FMAPS","modrm_memonly,scale16","rw,r,r,r","","" +"VADDPD xmm1, xmmV, xmm2/m128","VADDPD xmm2/m128, xmmV, xmm1","vaddpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VADDPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vaddpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 58 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VADDPD ymm1, ymmV, ymm2/m256","VADDPD ymm2/m256, ymmV, ymm1","vaddpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VADDPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vaddpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 58 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VADDPD zmm1{er}, {k}{z}, zmmV, zmm2","VADDPD zmm2, zmmV, {k}{z}, zmm1{er}","vaddpd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F.W1 58 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VADDPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VADDPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vaddpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 58 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VADDPS xmm1, xmmV, xmm2/m128","VADDPS xmm2/m128, xmmV, xmm1","vaddps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VADDPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vaddps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 58 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VADDPS ymm1, ymmV, ymm2/m256","VADDPS ymm2/m256, ymmV, ymm1","vaddps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VADDPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vaddps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 58 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VADDPS zmm1{er}, {k}{z}, zmmV, zmm2","VADDPS zmm2, zmmV, {k}{z}, zmm1{er}","vaddps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.0F.W0 58 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VADDPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VADDPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vaddps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 58 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VADDSD xmm1{er}, {k}{z}, xmmV, xmm2","VADDSD xmm2, xmmV, {k}{z}, xmm1{er}","vaddsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 58 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VADDSD xmm1, xmmV, xmm2/m64","VADDSD xmm2/m64, xmmV, xmm1","vaddsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDSD xmm1, {k}{z}, xmmV, xmm2/m64","VADDSD xmm2/m64, xmmV, {k}{z}, xmm1","vaddsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 58 /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VADDSS xmm1{er}, {k}{z}, xmmV, xmm2","VADDSS xmm2, xmmV, {k}{z}, xmm1{er}","vaddss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F3.0F.W0 58 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VADDSS xmm1, xmmV, xmm2/m32","VADDSS xmm2/m32, xmmV, xmm1","vaddss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 58 /r","V","V","AVX","","w,r,r","","" +"VADDSS xmm1, {k}{z}, xmmV, xmm2/m32","VADDSS xmm2/m32, xmmV, {k}{z}, xmm1","vaddss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 58 /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VADDSUBPD xmm1, xmmV, xmm2/m128","VADDSUBPD xmm2/m128, xmmV, xmm1","vaddsubpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D0 /r","V","V","AVX","","w,r,r","","" +"VADDSUBPD ymm1, ymmV, ymm2/m256","VADDSUBPD ymm2/m256, ymmV, ymm1","vaddsubpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D0 /r","V","V","AVX","","w,r,r","","" +"VADDSUBPS xmm1, xmmV, xmm2/m128","VADDSUBPS xmm2/m128, xmmV, xmm1","vaddsubps xmm2/m128, xmmV, xmm1","VEX.NDS.128.F2.0F.WIG D0 /r","V","V","AVX","","w,r,r","","" +"VADDSUBPS ymm1, ymmV, ymm2/m256","VADDSUBPS ymm2/m256, ymmV, ymm1","vaddsubps ymm2/m256, ymmV, ymm1","VEX.NDS.256.F2.0F.WIG D0 /r","V","V","AVX","","w,r,r","","" +"VAESDEC xmm1, xmmV, xmm2/m128","VAESDEC xmm2/m128, xmmV, xmm1","vaesdec xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F38.WIG DE /r","V","V","AES+AVX512VL","scale16","w,r,r","","" +"VAESDEC xmm1, xmmV, xmm2/m128","VAESDEC xmm2/m128, xmmV, xmm1","vaesdec xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG DE /r","V","V","AES+AVX","","w,r,r","","" +"VAESDEC ymm1, ymmV, ymm2/m256","VAESDEC ymm2/m256, ymmV, ymm1","vaesdec ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F38.WIG DE /r","V","V","AES+AVX512VL","scale32","w,r,r","","" +"VAESDEC ymm1, ymmV, ymm2/m256","VAESDEC ymm2/m256, ymmV, ymm1","vaesdec ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG DE /r","V","V","VAES+AVX","","w,r,r","","" +"VAESDEC zmm1, zmmV, zmm2/m512","VAESDEC zmm2/m512, zmmV, zmm1","vaesdec zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F38.WIG DE /r","V","V","AES+AVX512F","scale64","w,r,r","","" +"VAESDECLAST xmm1, xmmV, xmm2/m128","VAESDECLAST xmm2/m128, xmmV, xmm1","vaesdeclast xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F38.WIG DF /r","V","V","AES+AVX512VL","scale16","w,r,r","","" +"VAESDECLAST xmm1, xmmV, xmm2/m128","VAESDECLAST xmm2/m128, xmmV, xmm1","vaesdeclast xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG DF /r","V","V","AES+AVX","","w,r,r","","" +"VAESDECLAST ymm1, ymmV, ymm2/m256","VAESDECLAST ymm2/m256, ymmV, ymm1","vaesdeclast ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F38.WIG DF /r","V","V","AES+AVX512VL","scale32","w,r,r","","" +"VAESDECLAST ymm1, ymmV, ymm2/m256","VAESDECLAST ymm2/m256, ymmV, ymm1","vaesdeclast ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG DF /r","V","V","VAES+AVX","","w,r,r","","" +"VAESDECLAST zmm1, zmmV, zmm2/m512","VAESDECLAST zmm2/m512, zmmV, zmm1","vaesdeclast zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F38.WIG DF /r","V","V","AES+AVX512F","scale64","w,r,r","","" +"VAESENC xmm1, xmmV, xmm2/m128","VAESENC xmm2/m128, xmmV, xmm1","vaesenc xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F38.WIG DC /r","V","V","AES+AVX512VL","scale16","w,r,r","","" +"VAESENC xmm1, xmmV, xmm2/m128","VAESENC xmm2/m128, xmmV, xmm1","vaesenc xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG DC /r","V","V","AES+AVX","","w,r,r","","" +"VAESENC ymm1, ymmV, ymm2/m256","VAESENC ymm2/m256, ymmV, ymm1","vaesenc ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F38.WIG DC /r","V","V","AES+AVX512VL","scale32","w,r,r","","" +"VAESENC ymm1, ymmV, ymm2/m256","VAESENC ymm2/m256, ymmV, ymm1","vaesenc ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG DC /r","V","V","VAES+AVX","","w,r,r","","" +"VAESENC zmm1, zmmV, zmm2/m512","VAESENC zmm2/m512, zmmV, zmm1","vaesenc zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F38.WIG DC /r","V","V","AES+AVX512F","scale64","w,r,r","","" +"VAESENCLAST xmm1, xmmV, xmm2/m128","VAESENCLAST xmm2/m128, xmmV, xmm1","vaesenclast xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F38.WIG DD /r","V","V","AES+AVX512VL","scale16","w,r,r","","" +"VAESENCLAST xmm1, xmmV, xmm2/m128","VAESENCLAST xmm2/m128, xmmV, xmm1","vaesenclast xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG DD /r","V","V","AES+AVX","","w,r,r","","" +"VAESENCLAST ymm1, ymmV, ymm2/m256","VAESENCLAST ymm2/m256, ymmV, ymm1","vaesenclast ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F38.WIG DD /r","V","V","AES+AVX512VL","scale32","w,r,r","","" +"VAESENCLAST ymm1, ymmV, ymm2/m256","VAESENCLAST ymm2/m256, ymmV, ymm1","vaesenclast ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG DD /r","V","V","VAES+AVX","","w,r,r","","" +"VAESENCLAST zmm1, zmmV, zmm2/m512","VAESENCLAST zmm2/m512, zmmV, zmm1","vaesenclast zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F38.WIG DD /r","V","V","AES+AVX512F","scale64","w,r,r","","" +"VAESIMC xmm1, xmm2/m128","VAESIMC xmm2/m128, xmm1","vaesimc xmm2/m128, xmm1","VEX.128.66.0F38.WIG DB /r","V","V","AES+AVX","","w,r","","" +"VAESKEYGENASSIST xmm1, xmm2/m128, imm8u","VAESKEYGENASSIST imm8u, xmm2/m128, xmm1","vaeskeygenassist imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG DF /r ib","V","V","AES+AVX","","w,r,r","","" +"VALIGND xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VALIGND imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","valignd imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W0 03 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VALIGND ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VALIGND imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","valignd imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 03 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VALIGND zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VALIGND imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","valignd imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 03 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VALIGNQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VALIGNQ imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","valignq imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 03 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VALIGNQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VALIGNQ imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","valignq imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 03 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VALIGNQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VALIGNQ imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","valignq imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 03 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VANDNPD xmm1, xmmV, xmm2/m128","VANDNPD xmm2/m128, xmmV, xmm1","vandnpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 55 /r","V","V","AVX","","w,r,r","","" +"VANDNPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VANDNPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vandnpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 55 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VANDNPD ymm1, ymmV, ymm2/m256","VANDNPD ymm2/m256, ymmV, ymm1","vandnpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 55 /r","V","V","AVX","","w,r,r","","" +"VANDNPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VANDNPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vandnpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 55 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VANDNPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VANDNPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vandnpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 55 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VANDNPS xmm1, xmmV, xmm2/m128","VANDNPS xmm2/m128, xmmV, xmm1","vandnps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 55 /r","V","V","AVX","","w,r,r","","" +"VANDNPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VANDNPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vandnps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 55 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VANDNPS ymm1, ymmV, ymm2/m256","VANDNPS ymm2/m256, ymmV, ymm1","vandnps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 55 /r","V","V","AVX","","w,r,r","","" +"VANDNPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VANDNPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vandnps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 55 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VANDNPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VANDNPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vandnps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 55 /r","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","","" +"VANDPD xmm1, xmmV, xmm2/m128","VANDPD xmm2/m128, xmmV, xmm1","vandpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 54 /r","V","V","AVX","","w,r,r","","" +"VANDPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VANDPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vandpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 54 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VANDPD ymm1, ymmV, ymm2/m256","VANDPD ymm2/m256, ymmV, ymm1","vandpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 54 /r","V","V","AVX","","w,r,r","","" +"VANDPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VANDPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vandpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 54 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VANDPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VANDPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vandpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 54 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VANDPS xmm1, xmmV, xmm2/m128","VANDPS xmm2/m128, xmmV, xmm1","vandps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 54 /r","V","V","AVX","","w,r,r","","" +"VANDPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VANDPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vandps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 54 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VANDPS ymm1, ymmV, ymm2/m256","VANDPS ymm2/m256, ymmV, ymm1","vandps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 54 /r","V","V","AVX","","w,r,r","","" +"VANDPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VANDPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vandps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 54 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VANDPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VANDPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vandps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 54 /r","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","","" +"VBLENDMPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VBLENDMPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vblendmpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 65 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VBLENDMPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VBLENDMPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vblendmpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 65 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VBLENDMPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VBLENDMPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vblendmpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 65 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VBLENDMPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VBLENDMPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vblendmps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 65 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VBLENDMPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VBLENDMPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vblendmps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 65 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VBLENDMPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VBLENDMPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vblendmps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 65 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VBLENDPD xmm1, xmmV, xmm2/m128, imm8u","VBLENDPD imm8u, xmm2/m128, xmmV, xmm1","vblendpd imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 0D /r ib","V","V","AVX","","w,r,r,r","","" +"VBLENDPD ymm1, ymmV, ymm2/m256, imm8u","VBLENDPD imm8u, ymm2/m256, ymmV, ymm1","vblendpd imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 0D /r ib","V","V","AVX","","w,r,r,r","","" +"VBLENDPS xmm1, xmmV, xmm2/m128, imm8u","VBLENDPS imm8u, xmm2/m128, xmmV, xmm1","vblendps imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 0C /r ib","V","V","AVX","","w,r,r,r","","" +"VBLENDPS ymm1, ymmV, ymm2/m256, imm8u","VBLENDPS imm8u, ymm2/m256, ymmV, ymm1","vblendps imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 0C /r ib","V","V","AVX","","w,r,r,r","","" +"VBLENDVPD xmm1, xmmV, xmm2/m128, xmmIH","VBLENDVPD xmmIH, xmm2/m128, xmmV, xmm1","vblendvpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 4B /r /is4","V","V","AVX","","w,r,r,r","","" +"VBLENDVPD ymm1, ymmV, ymm2/m256, ymmIH","VBLENDVPD ymmIH, ymm2/m256, ymmV, ymm1","vblendvpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 4B /r /is4","V","V","AVX","","w,r,r,r","","" +"VBLENDVPS xmm1, xmmV, xmm2/m128, xmmIH","VBLENDVPS xmmIH, xmm2/m128, xmmV, xmm1","vblendvps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 4A /r /is4","V","V","AVX","","w,r,r,r","","" +"VBLENDVPS ymm1, ymmV, ymm2/m256, ymmIH","VBLENDVPS ymmIH, ymm2/m256, ymmV, ymm1","vblendvps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 4A /r /is4","V","V","AVX","","w,r,r,r","","" +"VBROADCASTF128 ymm1, m128","VBROADCASTF128 m128, ymm1","vbroadcastf128 m128, ymm1","VEX.256.66.0F38.W0 1A /r","V","V","AVX","modrm_memonly","w,r","","" +"VBROADCASTF32X2 ymm1, {k}{z}, xmm2/m64","VBROADCASTF32X2 xmm2/m64, {k}{z}, ymm1","vbroadcastf32x2 xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.W0 19 /r","V","V","AVX512DQ+AVX512VL","scale8","w,r,r","","" +"VBROADCASTF32X2 zmm1, {k}{z}, xmm2/m64","VBROADCASTF32X2 xmm2/m64, {k}{z}, zmm1","vbroadcastf32x2 xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.W0 19 /r","V","V","AVX512DQ","scale8","w,r,r","","" +"VBROADCASTF32X4 ymm1, {k}{z}, m128","VBROADCASTF32X4 m128, {k}{z}, ymm1","vbroadcastf32x4 m128, {k}{z}, ymm1","EVEX.256.66.0F38.W0 1A /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTF32X4 zmm1, {k}{z}, m128","VBROADCASTF32X4 m128, {k}{z}, zmm1","vbroadcastf32x4 m128, {k}{z}, zmm1","EVEX.512.66.0F38.W0 1A /r","V","V","AVX512F","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTF32X8 zmm1, {k}{z}, m256","VBROADCASTF32X8 m256, {k}{z}, zmm1","vbroadcastf32x8 m256, {k}{z}, zmm1","EVEX.512.66.0F38.W0 1B /r","V","V","AVX512DQ","modrm_memonly,scale32","w,r,r","","" +"VBROADCASTF64X2 ymm1, {k}{z}, m128","VBROADCASTF64X2 m128, {k}{z}, ymm1","vbroadcastf64x2 m128, {k}{z}, ymm1","EVEX.256.66.0F38.W1 1A /r","V","V","AVX512DQ+AVX512VL","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTF64X2 zmm1, {k}{z}, m128","VBROADCASTF64X2 m128, {k}{z}, zmm1","vbroadcastf64x2 m128, {k}{z}, zmm1","EVEX.512.66.0F38.W1 1A /r","V","V","AVX512DQ","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTF64X4 zmm1, {k}{z}, m256","VBROADCASTF64X4 m256, {k}{z}, zmm1","vbroadcastf64x4 m256, {k}{z}, zmm1","EVEX.512.66.0F38.W1 1B /r","V","V","AVX512F","modrm_memonly,scale32","w,r,r","","" +"VBROADCASTI128 ymm1, m128","VBROADCASTI128 m128, ymm1","vbroadcasti128 m128, ymm1","VEX.256.66.0F38.W0 5A /r","V","V","AVX2","modrm_memonly","w,r","","" +"VBROADCASTI32X2 xmm1, {k}{z}, xmm2/m64","VBROADCASTI32X2 xmm2/m64, {k}{z}, xmm1","vbroadcasti32x2 xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.W0 59 /r","V","V","AVX512DQ+AVX512VL","scale8","w,r,r","","" +"VBROADCASTI32X2 ymm1, {k}{z}, xmm2/m64","VBROADCASTI32X2 xmm2/m64, {k}{z}, ymm1","vbroadcasti32x2 xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.W0 59 /r","V","V","AVX512DQ+AVX512VL","scale8","w,r,r","","" +"VBROADCASTI32X2 zmm1, {k}{z}, xmm2/m64","VBROADCASTI32X2 xmm2/m64, {k}{z}, zmm1","vbroadcasti32x2 xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.W0 59 /r","V","V","AVX512DQ","scale8","w,r,r","","" +"VBROADCASTI32X4 ymm1, {k}{z}, m128","VBROADCASTI32X4 m128, {k}{z}, ymm1","vbroadcasti32x4 m128, {k}{z}, ymm1","EVEX.256.66.0F38.W0 5A /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTI32X4 zmm1, {k}{z}, m128","VBROADCASTI32X4 m128, {k}{z}, zmm1","vbroadcasti32x4 m128, {k}{z}, zmm1","EVEX.512.66.0F38.W0 5A /r","V","V","AVX512F","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTI32X8 zmm1, {k}{z}, m256","VBROADCASTI32X8 m256, {k}{z}, zmm1","vbroadcasti32x8 m256, {k}{z}, zmm1","EVEX.512.66.0F38.W0 5B /r","V","V","AVX512DQ","modrm_memonly,scale32","w,r,r","","" +"VBROADCASTI64X2 ymm1, {k}{z}, m128","VBROADCASTI64X2 m128, {k}{z}, ymm1","vbroadcasti64x2 m128, {k}{z}, ymm1","EVEX.256.66.0F38.W1 5A /r","V","V","AVX512DQ+AVX512VL","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTI64X2 zmm1, {k}{z}, m128","VBROADCASTI64X2 m128, {k}{z}, zmm1","vbroadcasti64x2 m128, {k}{z}, zmm1","EVEX.512.66.0F38.W1 5A /r","V","V","AVX512DQ","modrm_memonly,scale16","w,r,r","","" +"VBROADCASTI64X4 zmm1, {k}{z}, m256","VBROADCASTI64X4 m256, {k}{z}, zmm1","vbroadcasti64x4 m256, {k}{z}, zmm1","EVEX.512.66.0F38.W1 5B /r","V","V","AVX512F","modrm_memonly,scale32","w,r,r","","" +"VBROADCASTSD ymm1, m64","VBROADCASTSD m64, ymm1","vbroadcastsd m64, ymm1","VEX.256.66.0F38.W0 19 /r","V","V","AVX","modrm_memonly","w,r","","" +"VBROADCASTSD ymm1, xmm2","VBROADCASTSD xmm2, ymm1","vbroadcastsd xmm2, ymm1","VEX.256.66.0F38.W0 19 /r","V","V","AVX2","modrm_regonly","w,r","","" +"VBROADCASTSD ymm1, {k}{z}, xmm2/m64","VBROADCASTSD xmm2/m64, {k}{z}, ymm1","vbroadcastsd xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.W1 19 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VBROADCASTSD zmm1, {k}{z}, xmm2/m64","VBROADCASTSD xmm2/m64, {k}{z}, zmm1","vbroadcastsd xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.W1 19 /r","V","V","AVX512F","scale8","w,r,r","","" +"VBROADCASTSS xmm1, m32","VBROADCASTSS m32, xmm1","vbroadcastss m32, xmm1","VEX.128.66.0F38.W0 18 /r","V","V","AVX","modrm_memonly","w,r","","" +"VBROADCASTSS ymm1, m32","VBROADCASTSS m32, ymm1","vbroadcastss m32, ymm1","VEX.256.66.0F38.W0 18 /r","V","V","AVX","modrm_memonly","w,r","","" +"VBROADCASTSS xmm1, xmm2","VBROADCASTSS xmm2, xmm1","vbroadcastss xmm2, xmm1","VEX.128.66.0F38.W0 18 /r","V","V","AVX2","modrm_regonly","w,r","","" +"VBROADCASTSS ymm1, xmm2","VBROADCASTSS xmm2, ymm1","vbroadcastss xmm2, ymm1","VEX.256.66.0F38.W0 18 /r","V","V","AVX2","modrm_regonly","w,r","","" +"VBROADCASTSS xmm1, {k}{z}, xmm2/m32","VBROADCASTSS xmm2/m32, {k}{z}, xmm1","vbroadcastss xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.W0 18 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VBROADCASTSS ymm1, {k}{z}, xmm2/m32","VBROADCASTSS xmm2/m32, {k}{z}, ymm1","vbroadcastss xmm2/m32, {k}{z}, ymm1","EVEX.256.66.0F38.W0 18 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VBROADCASTSS zmm1, {k}{z}, xmm2/m32","VBROADCASTSS xmm2/m32, {k}{z}, zmm1","vbroadcastss xmm2/m32, {k}{z}, zmm1","EVEX.512.66.0F38.W0 18 /r","V","V","AVX512F","scale4","w,r,r","","" +"VCMPPD xmm1, xmmV, xmm2/m128, imm8u","VCMPPD imm8u, xmm2/m128, xmmV, xmm1","vcmppd imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPPD k1, {k}, xmmV, xmm2/m128/m64bcst, imm8u","VCMPPD imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","vcmppd imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F.W1 C2 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VCMPPD ymm1, ymmV, ymm2/m256, imm8u","VCMPPD imm8u, ymm2/m256, ymmV, ymm1","vcmppd imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPPD k1, {k}, ymmV, ymm2/m256/m64bcst, imm8u","VCMPPD imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","vcmppd imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F.W1 C2 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VCMPPD k1{sae}, {k}, zmmV, zmm2, imm8u","VCMPPD imm8u, zmm2, zmmV, {k}, k1{sae}","vcmppd imm8u, zmm2, zmmV, {k}, k1{sae}","EVEX.NDS.512.66.0F.W1 C2 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VCMPPD k1, {k}, zmmV, zmm2/m512/m64bcst, imm8u","VCMPPD imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","vcmppd imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F.W1 C2 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VCMPPS xmm1, xmmV, xmm2/m128, imm8u","VCMPPS imm8u, xmm2/m128, xmmV, xmm1","vcmpps imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPPS k1, {k}, xmmV, xmm2/m128/m32bcst, imm8u","VCMPPS imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","vcmpps imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.0F.W0 C2 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VCMPPS ymm1, ymmV, ymm2/m256, imm8u","VCMPPS imm8u, ymm2/m256, ymmV, ymm1","vcmpps imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPPS k1, {k}, ymmV, ymm2/m256/m32bcst, imm8u","VCMPPS imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","vcmpps imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.0F.W0 C2 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VCMPPS k1{sae}, {k}, zmmV, zmm2, imm8u","VCMPPS imm8u, zmm2, zmmV, {k}, k1{sae}","vcmpps imm8u, zmm2, zmmV, {k}, k1{sae}","EVEX.NDS.512.0F.W0 C2 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VCMPPS k1, {k}, zmmV, zmm2/m512/m32bcst, imm8u","VCMPPS imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","vcmpps imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.0F.W0 C2 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VCMPSD k1{sae}, {k}, xmmV, xmm2, imm8u","VCMPSD imm8u, xmm2, xmmV, {k}, k1{sae}","vcmpsd imm8u, xmm2, xmmV, {k}, k1{sae}","EVEX.NDS.128.F2.0F.W1 C2 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VCMPSD xmm1, xmmV, xmm2/m64, imm8u","VCMPSD imm8u, xmm2/m64, xmmV, xmm1","vcmpsd imm8u, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPSD k1, {k}, xmmV, xmm2/m64, imm8u","VCMPSD imm8u, xmm2/m64, xmmV, {k}, k1","vcmpsd imm8u, xmm2/m64, xmmV, {k}, k1","EVEX.NDS.LIG.F2.0F.W1 C2 /r ib","V","V","AVX512F","scale8","w,r,r,r,r","","" +"VCMPSS k1{sae}, {k}, xmmV, xmm2, imm8u","VCMPSS imm8u, xmm2, xmmV, {k}, k1{sae}","vcmpss imm8u, xmm2, xmmV, {k}, k1{sae}","EVEX.NDS.128.F3.0F.W0 C2 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VCMPSS xmm1, xmmV, xmm2/m32, imm8u","VCMPSS imm8u, xmm2/m32, xmmV, xmm1","vcmpss imm8u, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG C2 /r ib","V","V","AVX","","w,r,r,r","","" +"VCMPSS k1, {k}, xmmV, xmm2/m32, imm8u","VCMPSS imm8u, xmm2/m32, xmmV, {k}, k1","vcmpss imm8u, xmm2/m32, xmmV, {k}, k1","EVEX.NDS.LIG.F3.0F.W0 C2 /r ib","V","V","AVX512F","scale4","w,r,r,r,r","","" +"VCOMISD xmm1{sae}, xmm2","VCOMISD xmm2, xmm1{sae}","vcomisd xmm2, xmm1{sae}","EVEX.128.66.0F.W1 2F /r","V","V","AVX512F","modrm_regonly","r,r","","" +"VCOMISD xmm1, xmm2/m64","VCOMISD xmm2/m64, xmm1","vcomisd xmm2/m64, xmm1","EVEX.LIG.66.0F.W1 2F /r","V","V","AVX512F","scale8","r,r","","" +"VCOMISD xmm1, xmm2/m64","VCOMISD xmm2/m64, xmm1","vcomisd xmm2/m64, xmm1","VEX.LIG.66.0F.WIG 2F /r","V","V","AVX","","r,r","","" +"VCOMISS xmm1{sae}, xmm2","VCOMISS xmm2, xmm1{sae}","vcomiss xmm2, xmm1{sae}","EVEX.128.0F.W0 2F /r","V","V","AVX512F","modrm_regonly","r,r","","" +"VCOMISS xmm1, xmm2/m32","VCOMISS xmm2/m32, xmm1","vcomiss xmm2/m32, xmm1","EVEX.LIG.0F.W0 2F /r","V","V","AVX512F","scale4","r,r","","" +"VCOMISS xmm1, xmm2/m32","VCOMISS xmm2/m32, xmm1","vcomiss xmm2/m32, xmm1","VEX.LIG.0F.WIG 2F /r","V","V","AVX","","r,r","","" +"VCOMPRESSPD xmm2/m128, {k}{z}, xmm1","VCOMPRESSPD xmm1, {k}{z}, xmm2/m128","vcompresspd xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W1 8A /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VCOMPRESSPD ymm2/m256, {k}{z}, ymm1","VCOMPRESSPD ymm1, {k}{z}, ymm2/m256","vcompresspd ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W1 8A /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VCOMPRESSPD zmm2/m512, {k}{z}, zmm1","VCOMPRESSPD zmm1, {k}{z}, zmm2/m512","vcompresspd zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W1 8A /r","V","V","AVX512F","scale8","w,r,r","","" +"VCOMPRESSPS xmm2/m128, {k}{z}, xmm1","VCOMPRESSPS xmm1, {k}{z}, xmm2/m128","vcompressps xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W0 8A /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VCOMPRESSPS ymm2/m256, {k}{z}, ymm1","VCOMPRESSPS ymm1, {k}{z}, ymm2/m256","vcompressps ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W0 8A /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VCOMPRESSPS zmm2/m512, {k}{z}, zmm1","VCOMPRESSPS zmm1, {k}{z}, zmm2/m512","vcompressps zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W0 8A /r","V","V","AVX512F","scale4","w,r,r","","" +"VCVTDQ2PD ymm1, xmm2/m128","VCVTDQ2PD xmm2/m128, ymm1","vcvtdq2pd xmm2/m128, ymm1","VEX.256.F3.0F.WIG E6 /r","V","V","AVX","","w,r","","" +"VCVTDQ2PD xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTDQ2PD xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtdq2pd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.F3.0F.W0 E6 /r","V","V","AVX512F+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTDQ2PD ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTDQ2PD xmm2/m256/m32bcst, {k}{z}, ymm1","vcvtdq2pd xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.F3.0F.W0 E6 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTDQ2PD xmm1, xmm2/m64","VCVTDQ2PD xmm2/m64, xmm1","vcvtdq2pd xmm2/m64, xmm1","VEX.128.F3.0F.WIG E6 /r","V","V","AVX","","w,r","","" +"VCVTDQ2PD zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTDQ2PD ymm2/m512/m32bcst, {k}{z}, zmm1","vcvtdq2pd ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.F3.0F.W0 E6 /r","V","V","AVX512F","bscale4,scale32","w,r,r","","" +"VCVTDQ2PS xmm1, xmm2/m128","VCVTDQ2PS xmm2/m128, xmm1","vcvtdq2ps xmm2/m128, xmm1","VEX.128.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTDQ2PS xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTDQ2PS xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtdq2ps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTDQ2PS ymm1, ymm2/m256","VCVTDQ2PS ymm2/m256, ymm1","vcvtdq2ps ymm2/m256, ymm1","VEX.256.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTDQ2PS ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTDQ2PS ymm2/m256/m32bcst, {k}{z}, ymm1","vcvtdq2ps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTDQ2PS zmm1{er}, {k}{z}, zmm2","VCVTDQ2PS zmm2, {k}{z}, zmm1{er}","vcvtdq2ps zmm2, {k}{z}, zmm1{er}","EVEX.512.0F.W0 5B /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTDQ2PS zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTDQ2PS zmm2/m512/m32bcst, {k}{z}, zmm1","vcvtdq2ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.0F.W0 5B /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTPD2DQ ymm1{er}, {k}{z}, zmm2","VCVTPD2DQ zmm2, {k}{z}, ymm1{er}","vcvtpd2dq zmm2, {k}{z}, ymm1{er}","EVEX.512.F2.0F.W1 E6 /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","" +"VCVTPD2DQ ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTPD2DQ zmm2/m512/m64bcst, {k}{z}, ymm1","vcvtpd2dq zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.F2.0F.W1 E6 /r","V","V","AVX512F","bscale8,scale64","w,r,r","Y","512" +"VCVTPD2DQ xmm1, xmm2/m128","VCVTPD2DQX xmm2/m128, xmm1","vcvtpd2dqx xmm2/m128, xmm1","VEX.128.F2.0F.WIG E6 /r","V","V","AVX","","w,r","Y","128" +"VCVTPD2DQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTPD2DQX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtpd2dqx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.F2.0F.W1 E6 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTPD2DQ xmm1, ymm2/m256","VCVTPD2DQY ymm2/m256, xmm1","vcvtpd2dqy ymm2/m256, xmm1","VEX.256.F2.0F.WIG E6 /r","V","V","AVX","","w,r","Y","256" +"VCVTPD2DQ xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTPD2DQY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvtpd2dqy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.F2.0F.W1 E6 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTPD2PS ymm1{er}, {k}{z}, zmm2","VCVTPD2PS zmm2, {k}{z}, ymm1{er}","vcvtpd2ps zmm2, {k}{z}, ymm1{er}","EVEX.512.66.0F.W1 5A /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","" +"VCVTPD2PS ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTPD2PS zmm2/m512/m64bcst, {k}{z}, ymm1","vcvtpd2ps zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.66.0F.W1 5A /r","V","V","AVX512F","bscale8,scale64","w,r,r","Y","512" +"VCVTPD2PS xmm1, xmm2/m128","VCVTPD2PSX xmm2/m128, xmm1","vcvtpd2psx xmm2/m128, xmm1","VEX.128.66.0F.WIG 5A /r","V","V","AVX","","w,r","Y","128" +"VCVTPD2PS xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTPD2PSX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtpd2psx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 5A /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTPD2PS xmm1, ymm2/m256","VCVTPD2PSY ymm2/m256, xmm1","vcvtpd2psy ymm2/m256, xmm1","VEX.256.66.0F.WIG 5A /r","V","V","AVX","","w,r","Y","256" +"VCVTPD2PS xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTPD2PSY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvtpd2psy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.66.0F.W1 5A /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTPD2QQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTPD2QQ xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtpd2qq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 7B /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTPD2QQ ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTPD2QQ ymm2/m256/m64bcst, {k}{z}, ymm1","vcvtpd2qq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F.W1 7B /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTPD2QQ zmm1{er}, {k}{z}, zmm2","VCVTPD2QQ zmm2, {k}{z}, zmm1{er}","vcvtpd2qq zmm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W1 7B /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTPD2QQ zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTPD2QQ zmm2/m512/m64bcst, {k}{z}, zmm1","vcvtpd2qq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F.W1 7B /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTPD2UDQ ymm1{er}, {k}{z}, zmm2","VCVTPD2UDQ zmm2, {k}{z}, ymm1{er}","vcvtpd2udq zmm2, {k}{z}, ymm1{er}","EVEX.512.0F.W1 79 /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","" +"VCVTPD2UDQ ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTPD2UDQ zmm2/m512/m64bcst, {k}{z}, ymm1","vcvtpd2udq zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.0F.W1 79 /r","V","V","AVX512F","bscale8,scale64","w,r,r","Y","512" +"VCVTPD2UDQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTPD2UDQX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtpd2udqx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.0F.W1 79 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTPD2UDQ xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTPD2UDQY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvtpd2udqy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.0F.W1 79 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTPD2UQQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTPD2UQQ xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtpd2uqq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 79 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTPD2UQQ ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTPD2UQQ ymm2/m256/m64bcst, {k}{z}, ymm1","vcvtpd2uqq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F.W1 79 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTPD2UQQ zmm1{er}, {k}{z}, zmm2","VCVTPD2UQQ zmm2, {k}{z}, zmm1{er}","vcvtpd2uqq zmm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W1 79 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTPD2UQQ zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTPD2UQQ zmm2/m512/m64bcst, {k}{z}, zmm1","vcvtpd2uqq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F.W1 79 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTPH2PS ymm1, xmm2/m128","VCVTPH2PS xmm2/m128, ymm1","vcvtph2ps xmm2/m128, ymm1","VEX.256.66.0F38.W0 13 /r","V","V","F16C","","w,r","","" +"VCVTPH2PS ymm1, {k}{z}, xmm2/m128","VCVTPH2PS xmm2/m128, {k}{z}, ymm1","vcvtph2ps xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.W0 13 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VCVTPH2PS xmm1, xmm2/m64","VCVTPH2PS xmm2/m64, xmm1","vcvtph2ps xmm2/m64, xmm1","VEX.128.66.0F38.W0 13 /r","V","V","F16C","","w,r","","" +"VCVTPH2PS xmm1, {k}{z}, xmm2/m64","VCVTPH2PS xmm2/m64, {k}{z}, xmm1","vcvtph2ps xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.W0 13 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VCVTPH2PS zmm1{sae}, {k}{z}, ymm2","VCVTPH2PS ymm2, {k}{z}, zmm1{sae}","vcvtph2ps ymm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W0 13 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTPH2PS zmm1, {k}{z}, ymm2/m256","VCVTPH2PS ymm2/m256, {k}{z}, zmm1","vcvtph2ps ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.W0 13 /r","V","V","AVX512F","scale32","w,r,r","","" +"VCVTPS2DQ xmm1, xmm2/m128","VCVTPS2DQ xmm2/m128, xmm1","vcvtps2dq xmm2/m128, xmm1","VEX.128.66.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTPS2DQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTPS2DQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtps2dq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTPS2DQ ymm1, ymm2/m256","VCVTPS2DQ ymm2/m256, ymm1","vcvtps2dq ymm2/m256, ymm1","VEX.256.66.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTPS2DQ ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTPS2DQ ymm2/m256/m32bcst, {k}{z}, ymm1","vcvtps2dq ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTPS2DQ zmm1{er}, {k}{z}, zmm2","VCVTPS2DQ zmm2, {k}{z}, zmm1{er}","vcvtps2dq zmm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W0 5B /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTPS2DQ zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTPS2DQ zmm2/m512/m32bcst, {k}{z}, zmm1","vcvtps2dq zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 5B /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTPS2PD ymm1, xmm2/m128","VCVTPS2PD xmm2/m128, ymm1","vcvtps2pd xmm2/m128, ymm1","VEX.256.0F.WIG 5A /r","V","V","AVX","","w,r","","" +"VCVTPS2PD xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTPS2PD xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtps2pd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.0F.W0 5A /r","V","V","AVX512F+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTPS2PD ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTPS2PD xmm2/m256/m32bcst, {k}{z}, ymm1","vcvtps2pd xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.0F.W0 5A /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTPS2PD xmm1, xmm2/m64","VCVTPS2PD xmm2/m64, xmm1","vcvtps2pd xmm2/m64, xmm1","VEX.128.0F.WIG 5A /r","V","V","AVX","","w,r","","" +"VCVTPS2PD zmm1{sae}, {k}{z}, ymm2","VCVTPS2PD ymm2, {k}{z}, zmm1{sae}","vcvtps2pd ymm2, {k}{z}, zmm1{sae}","EVEX.512.0F.W0 5A /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTPS2PD zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTPS2PD ymm2/m512/m32bcst, {k}{z}, zmm1","vcvtps2pd ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.0F.W0 5A /r","V","V","AVX512F","bscale4,scale32","w,r,r","","" +"VCVTPS2PH xmm2/m64, xmm1, imm8u","VCVTPS2PH imm8u, xmm1, xmm2/m64","vcvtps2ph imm8u, xmm1, xmm2/m64","VEX.128.66.0F3A.W0 1D /r ib","V","V","F16C","","w,r,r","","" +"VCVTPS2PH xmm2/m64, {k}{z}, xmm1, imm8u","VCVTPS2PH imm8u, xmm1, {k}{z}, xmm2/m64","vcvtps2ph imm8u, xmm1, {k}{z}, xmm2/m64","EVEX.128.66.0F3A.W0 1D /r ib","V","V","AVX512F+AVX512VL","scale8","w,r,r,r","","" +"VCVTPS2PH xmm2/m128, ymm1, imm8u","VCVTPS2PH imm8u, ymm1, xmm2/m128","vcvtps2ph imm8u, ymm1, xmm2/m128","VEX.256.66.0F3A.W0 1D /r ib","V","V","F16C","","w,r,r","","" +"VCVTPS2PH xmm2/m128, {k}{z}, ymm1, imm8u","VCVTPS2PH imm8u, ymm1, {k}{z}, xmm2/m128","vcvtps2ph imm8u, ymm1, {k}{z}, xmm2/m128","EVEX.256.66.0F3A.W0 1D /r ib","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VCVTPS2PH ymm2/m256, {k}{z}, zmm1, imm8u","VCVTPS2PH imm8u, zmm1, {k}{z}, ymm2/m256","vcvtps2ph imm8u, zmm1, {k}{z}, ymm2/m256","EVEX.512.66.0F3A.W0 1D /r ib","V","V","AVX512F","scale32","w,r,r,r","","" +"VCVTPS2PH ymm2{sae}, {k}{z}, zmm1, imm8u","VCVTPS2PH imm8u, zmm1, {k}{z}, ymm2{sae}","vcvtps2ph imm8u, zmm1, {k}{z}, ymm2{sae}","EVEX.512.66.0F3A.W0 1D /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VCVTPS2QQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTPS2QQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtps2qq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 7B /r","V","V","AVX512DQ+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTPS2QQ ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTPS2QQ xmm2/m256/m32bcst, {k}{z}, ymm1","vcvtps2qq xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 7B /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTPS2QQ zmm1{er}, {k}{z}, ymm2","VCVTPS2QQ ymm2, {k}{z}, zmm1{er}","vcvtps2qq ymm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W0 7B /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTPS2QQ zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTPS2QQ ymm2/m512/m32bcst, {k}{z}, zmm1","vcvtps2qq ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 7B /r","V","V","AVX512DQ","bscale4,scale32","w,r,r","","" +"VCVTPS2UDQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTPS2UDQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtps2udq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.0F.W0 79 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTPS2UDQ ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTPS2UDQ ymm2/m256/m32bcst, {k}{z}, ymm1","vcvtps2udq ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.0F.W0 79 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTPS2UDQ zmm1{er}, {k}{z}, zmm2","VCVTPS2UDQ zmm2, {k}{z}, zmm1{er}","vcvtps2udq zmm2, {k}{z}, zmm1{er}","EVEX.512.0F.W0 79 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTPS2UDQ zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTPS2UDQ zmm2/m512/m32bcst, {k}{z}, zmm1","vcvtps2udq zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.0F.W0 79 /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTPS2UQQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTPS2UQQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtps2uqq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 79 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTPS2UQQ ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTPS2UQQ xmm2/m256/m32bcst, {k}{z}, ymm1","vcvtps2uqq xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 79 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTPS2UQQ zmm1{er}, {k}{z}, ymm2","VCVTPS2UQQ ymm2, {k}{z}, zmm1{er}","vcvtps2uqq ymm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W0 79 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTPS2UQQ zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTPS2UQQ ymm2/m512/m32bcst, {k}{z}, zmm1","vcvtps2uqq ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 79 /r","V","V","AVX512DQ","bscale4,scale32","w,r,r","","" +"VCVTQQ2PD xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTQQ2PD xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtqq2pd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.F3.0F.W1 E6 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTQQ2PD ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTQQ2PD ymm2/m256/m64bcst, {k}{z}, ymm1","vcvtqq2pd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.F3.0F.W1 E6 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTQQ2PD zmm1{er}, {k}{z}, zmm2","VCVTQQ2PD zmm2, {k}{z}, zmm1{er}","vcvtqq2pd zmm2, {k}{z}, zmm1{er}","EVEX.512.F3.0F.W1 E6 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTQQ2PD zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTQQ2PD zmm2/m512/m64bcst, {k}{z}, zmm1","vcvtqq2pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.F3.0F.W1 E6 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTQQ2PS ymm1{er}, {k}{z}, zmm2","VCVTQQ2PS zmm2, {k}{z}, ymm1{er}","vcvtqq2ps zmm2, {k}{z}, ymm1{er}","EVEX.512.0F.W1 5B /r","V","V","AVX512DQ","modrm_regonly","w,r,r","Y","" +"VCVTQQ2PS ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTQQ2PS zmm2/m512/m64bcst, {k}{z}, ymm1","vcvtqq2ps zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.0F.W1 5B /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","Y","512" +"VCVTQQ2PS xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTQQ2PSX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtqq2psx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.0F.W1 5B /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTQQ2PS xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTQQ2PSY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvtqq2psy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.0F.W1 5B /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTSD2SI r32{er}, xmm2","VCVTSD2SI xmm2, r32{er}","vcvtsd2si xmm2, r32{er}","EVEX.128.F2.0F.W0 2D /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTSD2SI r32, xmm2/m64","VCVTSD2SI xmm2/m64, r32","vcvtsd2si xmm2/m64, r32","EVEX.LIG.F2.0F.W0 2D /r","V","V","AVX512F","scale8","w,r","Y","32" +"VCVTSD2SI r32, xmm2/m64","VCVTSD2SI xmm2/m64, r32","vcvtsd2si xmm2/m64, r32","VEX.LIG.F2.0F.W0 2D /r","V","V","AVX","","w,r","Y","32" +"VCVTSD2SI r64{er}, xmm2","VCVTSD2SIQ xmm2, r64{er}","vcvtsd2siq xmm2, r64{er}","EVEX.128.F2.0F.W1 2D /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTSD2SI r64, xmm2/m64","VCVTSD2SIQ xmm2/m64, r64","vcvtsd2siq xmm2/m64, r64","EVEX.LIG.F2.0F.W1 2D /r","N.S.","V","AVX512F","scale8","w,r","Y","64" +"VCVTSD2SI r64, xmm2/m64","VCVTSD2SIQ xmm2/m64, r64","vcvtsd2siq xmm2/m64, r64","VEX.LIG.F2.0F.W1 2D /r","N.S.","V","AVX","","w,r","Y","64" +"VCVTSD2SS xmm1{er}, {k}{z}, xmmV, xmm2","VCVTSD2SS xmm2, xmmV, {k}{z}, xmm1{er}","vcvtsd2ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 5A /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VCVTSD2SS xmm1, xmmV, xmm2/m64","VCVTSD2SS xmm2/m64, xmmV, xmm1","vcvtsd2ss xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 5A /r","V","V","AVX","","w,r,r","","" +"VCVTSD2SS xmm1, {k}{z}, xmmV, xmm2/m64","VCVTSD2SS xmm2/m64, xmmV, {k}{z}, xmm1","vcvtsd2ss xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 5A /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VCVTSD2USI r32{er}, xmm2","VCVTSD2USIL xmm2, r32{er}","vcvtsd2usi xmm2, r32{er}","EVEX.128.F2.0F.W0 79 /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTSD2USI r32, xmm2/m64","VCVTSD2USIL xmm2/m64, r32","vcvtsd2usi xmm2/m64, r32","EVEX.LIG.F2.0F.W0 79 /r","V","V","AVX512F","scale8","w,r","Y","32" +"VCVTSD2USI r64{er}, xmm2","VCVTSD2USIQ xmm2, r64{er}","vcvtsd2usi xmm2, r64{er}","EVEX.128.F2.0F.W1 79 /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTSD2USI r64, xmm2/m64","VCVTSD2USIQ xmm2/m64, r64","vcvtsd2usi xmm2/m64, r64","EVEX.LIG.F2.0F.W1 79 /r","N.S.","V","AVX512F","scale8","w,r","Y","64" +"VCVTSI2SD xmm1, xmmV, r/m32","VCVTSI2SDL r/m32, xmmV, xmm1","vcvtsi2sdl r/m32, xmmV, xmm1","EVEX.NDS.LIG.F2.0F.W0 2A /r","V","V","AVX512F","scale4","w,r,r","Y","32" +"VCVTSI2SD xmm1, xmmV, r/m32","VCVTSI2SDL r/m32, xmmV, xmm1","vcvtsi2sdl r/m32, xmmV, xmm1","VEX.NDS.LIG.F2.0F.W0 2A /r","V","V","AVX","","w,r,r","Y","32" +"VCVTSI2SD xmm1, xmmV, r/m64","VCVTSI2SDQ r/m64, xmmV, xmm1","vcvtsi2sdq r/m64, xmmV, xmm1","EVEX.NDS.LIG.F2.0F.W1 2A /r","N.S.","V","AVX512F","scale8","w,r,r","Y","64" +"VCVTSI2SD xmm1, xmmV, r/m64","VCVTSI2SDQ r/m64, xmmV, xmm1","vcvtsi2sdq r/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.W1 2A /r","N.S.","V","AVX","","w,r,r","Y","64" +"VCVTSI2SD xmm1{er}, xmmV, rmr64","VCVTSI2SDQ rmr64, xmmV, xmm1{er}","vcvtsi2sdq rmr64, xmmV, xmm1{er}","EVEX.NDS.128.F2.0F.W1 2A /r","N.S.","V","AVX512F","modrm_regonly","w,r,r","Y","64" +"VCVTSI2SS xmm1, xmmV, r/m32","VCVTSI2SSL r/m32, xmmV, xmm1","vcvtsi2ssl r/m32, xmmV, xmm1","EVEX.NDS.LIG.F3.0F.W0 2A /r","V","V","AVX512F","scale4","w,r,r","Y","32" +"VCVTSI2SS xmm1, xmmV, r/m32","VCVTSI2SSL r/m32, xmmV, xmm1","vcvtsi2ssl r/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.W0 2A /r","V","V","AVX","","w,r,r","Y","32" +"VCVTSI2SS xmm1{er}, xmmV, rmr32","VCVTSI2SSL rmr32, xmmV, xmm1{er}","vcvtsi2ssl rmr32, xmmV, xmm1{er}","EVEX.NDS.128.F3.0F.W0 2A /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","32" +"VCVTSI2SS xmm1, xmmV, r/m64","VCVTSI2SSQ r/m64, xmmV, xmm1","vcvtsi2ssq r/m64, xmmV, xmm1","EVEX.NDS.LIG.F3.0F.W1 2A /r","N.S.","V","AVX512F","scale8","w,r,r","Y","64" +"VCVTSI2SS xmm1, xmmV, r/m64","VCVTSI2SSQ r/m64, xmmV, xmm1","vcvtsi2ssq r/m64, xmmV, xmm1","VEX.NDS.LIG.F3.0F.W1 2A /r","N.S.","V","AVX","","w,r,r","Y","64" +"VCVTSI2SS xmm1{er}, xmmV, rmr64","VCVTSI2SSQ rmr64, xmmV, xmm1{er}","vcvtsi2ssq rmr64, xmmV, xmm1{er}","EVEX.NDS.128.F3.0F.W1 2A /r","N.S.","V","AVX512F","modrm_regonly","w,r,r","Y","64" +"VCVTSS2SD xmm1{sae}, {k}{z}, xmmV, xmm2","VCVTSS2SD xmm2, xmmV, {k}{z}, xmm1{sae}","vcvtss2sd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.F3.0F.W0 5A /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VCVTSS2SD xmm1, xmmV, xmm2/m32","VCVTSS2SD xmm2/m32, xmmV, xmm1","vcvtss2sd xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 5A /r","V","V","AVX","","w,r,r","","" +"VCVTSS2SD xmm1, {k}{z}, xmmV, xmm2/m32","VCVTSS2SD xmm2/m32, xmmV, {k}{z}, xmm1","vcvtss2sd xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 5A /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VCVTSS2SI r32{er}, xmm2","VCVTSS2SI xmm2, r32{er}","vcvtss2si xmm2, r32{er}","EVEX.128.F3.0F.W0 2D /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTSS2SI r32, xmm2/m32","VCVTSS2SI xmm2/m32, r32","vcvtss2si xmm2/m32, r32","EVEX.LIG.F3.0F.W0 2D /r","V","V","AVX512F","scale4","w,r","Y","32" +"VCVTSS2SI r32, xmm2/m32","VCVTSS2SI xmm2/m32, r32","vcvtss2si xmm2/m32, r32","VEX.LIG.F3.0F.W0 2D /r","V","V","AVX","","w,r","Y","32" +"VCVTSS2SI r64{er}, xmm2","VCVTSS2SIQ xmm2, r64{er}","vcvtss2siq xmm2, r64{er}","EVEX.128.F3.0F.W1 2D /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTSS2SI r64, xmm2/m32","VCVTSS2SIQ xmm2/m32, r64","vcvtss2siq xmm2/m32, r64","EVEX.LIG.F3.0F.W1 2D /r","N.S.","V","AVX512F","scale4","w,r","Y","64" +"VCVTSS2SI r64, xmm2/m32","VCVTSS2SIQ xmm2/m32, r64","vcvtss2siq xmm2/m32, r64","VEX.LIG.F3.0F.W1 2D /r","N.S.","V","AVX","","w,r","Y","64" +"VCVTSS2USI r32{er}, xmm2","VCVTSS2USIL xmm2, r32{er}","vcvtss2usil xmm2, r32{er}","EVEX.128.F3.0F.W0 79 /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTSS2USI r32, xmm2/m32","VCVTSS2USIL xmm2/m32, r32","vcvtss2usil xmm2/m32, r32","EVEX.LIG.F3.0F.W0 79 /r","V","V","AVX512F","scale4","w,r","Y","32" +"VCVTSS2USI r64{er}, xmm2","VCVTSS2USIQ xmm2, r64{er}","vcvtss2usiq xmm2, r64{er}","EVEX.128.F3.0F.W1 79 /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTSS2USI r64, xmm2/m32","VCVTSS2USIQ xmm2/m32, r64","vcvtss2usiq xmm2/m32, r64","EVEX.LIG.F3.0F.W1 79 /r","N.S.","V","AVX512F","scale4","w,r","Y","64" +"VCVTTPD2DQ ymm1{sae}, {k}{z}, zmm2","VCVTTPD2DQ zmm2, {k}{z}, ymm1{sae}","vcvttpd2dq zmm2, {k}{z}, ymm1{sae}","EVEX.512.66.0F.W1 E6 /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","" +"VCVTTPD2DQ ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTTPD2DQ zmm2/m512/m64bcst, {k}{z}, ymm1","vcvttpd2dq zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.66.0F.W1 E6 /r","V","V","AVX512F","bscale8,scale64","w,r,r","Y","512" +"VCVTTPD2DQ xmm1, xmm2/m128","VCVTTPD2DQX xmm2/m128, xmm1","vcvttpd2dqx xmm2/m128, xmm1","VEX.128.66.0F.WIG E6 /r","V","V","AVX","","w,r","Y","128" +"VCVTTPD2DQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTTPD2DQX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvttpd2dqx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 E6 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTTPD2DQ xmm1, ymm2/m256","VCVTTPD2DQY ymm2/m256, xmm1","vcvttpd2dqy ymm2/m256, xmm1","VEX.256.66.0F.WIG E6 /r","V","V","AVX","","w,r","Y","256" +"VCVTTPD2DQ xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTTPD2DQY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvttpd2dqy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.66.0F.W1 E6 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTTPD2QQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTTPD2QQ xmm2/m128/m64bcst, {k}{z}, xmm1","vcvttpd2qq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTTPD2QQ ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTTPD2QQ ymm2/m256/m64bcst, {k}{z}, ymm1","vcvttpd2qq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTTPD2QQ zmm1{sae}, {k}{z}, zmm2","VCVTTPD2QQ zmm2, {k}{z}, zmm1{sae}","vcvttpd2qq zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F.W1 7A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTTPD2QQ zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTTPD2QQ zmm2/m512/m64bcst, {k}{z}, zmm1","vcvttpd2qq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F.W1 7A /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTTPD2UDQ ymm1{sae}, {k}{z}, zmm2","VCVTTPD2UDQ zmm2, {k}{z}, ymm1{sae}","vcvttpd2udq zmm2, {k}{z}, ymm1{sae}","EVEX.512.0F.W1 78 /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","" +"VCVTTPD2UDQ ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTTPD2UDQ zmm2/m512/m64bcst, {k}{z}, ymm1","vcvttpd2udq zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.0F.W1 78 /r","V","V","AVX512F","bscale8,scale64","w,r,r","Y","512" +"VCVTTPD2UDQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTTPD2UDQX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvttpd2udqx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.0F.W1 78 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTTPD2UDQ xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTTPD2UDQY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvttpd2udqy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.0F.W1 78 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTTPD2UQQ xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTTPD2UQQ xmm2/m128/m64bcst, {k}{z}, xmm1","vcvttpd2uqq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 78 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTTPD2UQQ ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTTPD2UQQ ymm2/m256/m64bcst, {k}{z}, ymm1","vcvttpd2uqq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F.W1 78 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTTPD2UQQ zmm1{sae}, {k}{z}, zmm2","VCVTTPD2UQQ zmm2, {k}{z}, zmm1{sae}","vcvttpd2uqq zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F.W1 78 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTTPD2UQQ zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTTPD2UQQ zmm2/m512/m64bcst, {k}{z}, zmm1","vcvttpd2uqq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F.W1 78 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTTPS2DQ xmm1, xmm2/m128","VCVTTPS2DQ xmm2/m128, xmm1","vcvttps2dq xmm2/m128, xmm1","VEX.128.F3.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTTPS2DQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTTPS2DQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvttps2dq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.F3.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTTPS2DQ ymm1, ymm2/m256","VCVTTPS2DQ ymm2/m256, ymm1","vcvttps2dq ymm2/m256, ymm1","VEX.256.F3.0F.WIG 5B /r","V","V","AVX","","w,r","","" +"VCVTTPS2DQ ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTTPS2DQ ymm2/m256/m32bcst, {k}{z}, ymm1","vcvttps2dq ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.F3.0F.W0 5B /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTTPS2DQ zmm1{sae}, {k}{z}, zmm2","VCVTTPS2DQ zmm2, {k}{z}, zmm1{sae}","vcvttps2dq zmm2, {k}{z}, zmm1{sae}","EVEX.512.F3.0F.W0 5B /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTTPS2DQ zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTTPS2DQ zmm2/m512/m32bcst, {k}{z}, zmm1","vcvttps2dq zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.F3.0F.W0 5B /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTTPS2QQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTTPS2QQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvttps2qq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 7A /r","V","V","AVX512DQ+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTTPS2QQ ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTTPS2QQ xmm2/m256/m32bcst, {k}{z}, ymm1","vcvttps2qq xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 7A /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTTPS2QQ zmm1{sae}, {k}{z}, ymm2","VCVTTPS2QQ ymm2, {k}{z}, zmm1{sae}","vcvttps2qq ymm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F.W0 7A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTTPS2QQ zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTTPS2QQ ymm2/m512/m32bcst, {k}{z}, zmm1","vcvttps2qq ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 7A /r","V","V","AVX512DQ","bscale4,scale32","w,r,r","","" +"VCVTTPS2UDQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTTPS2UDQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvttps2udq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.0F.W0 78 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTTPS2UDQ ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTTPS2UDQ ymm2/m256/m32bcst, {k}{z}, ymm1","vcvttps2udq ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.0F.W0 78 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTTPS2UDQ zmm1{sae}, {k}{z}, zmm2","VCVTTPS2UDQ zmm2, {k}{z}, zmm1{sae}","vcvttps2udq zmm2, {k}{z}, zmm1{sae}","EVEX.512.0F.W0 78 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTTPS2UDQ zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTTPS2UDQ zmm2/m512/m32bcst, {k}{z}, zmm1","vcvttps2udq zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.0F.W0 78 /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTTPS2UQQ xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTTPS2UQQ xmm2/m128/m32bcst, {k}{z}, xmm1","vcvttps2uqq xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 78 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTTPS2UQQ ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTTPS2UQQ xmm2/m256/m32bcst, {k}{z}, ymm1","vcvttps2uqq xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 78 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTTPS2UQQ zmm1{sae}, {k}{z}, ymm2","VCVTTPS2UQQ ymm2, {k}{z}, zmm1{sae}","vcvttps2uqq ymm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F.W0 78 /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTTPS2UQQ zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTTPS2UQQ ymm2/m512/m32bcst, {k}{z}, zmm1","vcvttps2uqq ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 78 /r","V","V","AVX512DQ","bscale4,scale32","w,r,r","","" +"VCVTTSD2SI r32{sae}, xmm2","VCVTTSD2SI xmm2, r32{sae}","vcvttsd2si xmm2, r32{sae}","EVEX.128.F2.0F.W0 2C /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTTSD2SI r32, xmm2/m64","VCVTTSD2SI xmm2/m64, r32","vcvttsd2si xmm2/m64, r32","EVEX.LIG.F2.0F.W0 2C /r","V","V","AVX512F","scale8","w,r","Y","32" +"VCVTTSD2SI r32, xmm2/m64","VCVTTSD2SI xmm2/m64, r32","vcvttsd2si xmm2/m64, r32","VEX.LIG.F2.0F.W0 2C /r","V","V","AVX","","w,r","Y","32" +"VCVTTSD2SI r64{sae}, xmm2","VCVTTSD2SIQ xmm2, r64{sae}","vcvttsd2siq xmm2, r64{sae}","EVEX.128.F2.0F.W1 2C /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTTSD2SI r64, xmm2/m64","VCVTTSD2SIQ xmm2/m64, r64","vcvttsd2siq xmm2/m64, r64","EVEX.LIG.F2.0F.W1 2C /r","N.S.","V","AVX512F","scale8","w,r","Y","64" +"VCVTTSD2SI r64, xmm2/m64","VCVTTSD2SIQ xmm2/m64, r64","vcvttsd2siq xmm2/m64, r64","VEX.LIG.F2.0F.W1 2C /r","N.S.","V","AVX","","w,r","Y","64" +"VCVTTSD2USI r32{sae}, xmm2","VCVTTSD2USIL xmm2, r32{sae}","vcvttsd2usil xmm2, r32{sae}","EVEX.128.F2.0F.W0 78 /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTTSD2USI r32, xmm2/m64","VCVTTSD2USIL xmm2/m64, r32","vcvttsd2usil xmm2/m64, r32","EVEX.LIG.F2.0F.W0 78 /r","V","V","AVX512F","scale8","w,r","Y","32" +"VCVTTSD2USI r64{sae}, xmm2","VCVTTSD2USIQ xmm2, r64{sae}","vcvttsd2usiq xmm2, r64{sae}","EVEX.128.F2.0F.W1 78 /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTTSD2USI r64, xmm2/m64","VCVTTSD2USIQ xmm2/m64, r64","vcvttsd2usiq xmm2/m64, r64","EVEX.LIG.F2.0F.W1 78 /r","N.S.","V","AVX512F","scale8","w,r","Y","64" +"VCVTTSS2SI r32{sae}, xmm2","VCVTTSS2SI xmm2, r32{sae}","vcvttss2si xmm2, r32{sae}","EVEX.128.F3.0F.W0 2C /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTTSS2SI r32, xmm2/m32","VCVTTSS2SI xmm2/m32, r32","vcvttss2si xmm2/m32, r32","EVEX.LIG.F3.0F.W0 2C /r","V","V","AVX512F","scale4","w,r","Y","32" +"VCVTTSS2SI r32, xmm2/m32","VCVTTSS2SI xmm2/m32, r32","vcvttss2si xmm2/m32, r32","VEX.LIG.F3.0F.W0 2C /r","V","V","AVX","","w,r","Y","32" +"VCVTTSS2SI r64{sae}, xmm2","VCVTTSS2SIQ xmm2, r64{sae}","vcvttss2siq xmm2, r64{sae}","EVEX.128.F3.0F.W1 2C /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTTSS2SI r64, xmm2/m32","VCVTTSS2SIQ xmm2/m32, r64","vcvttss2siq xmm2/m32, r64","EVEX.LIG.F3.0F.W1 2C /r","N.S.","V","AVX512F","scale4","w,r","Y","64" +"VCVTTSS2SI r64, xmm2/m32","VCVTTSS2SIQ xmm2/m32, r64","vcvttss2siq xmm2/m32, r64","VEX.LIG.F3.0F.W1 2C /r","N.S.","V","AVX","","w,r","Y","64" +"VCVTTSS2USI r32{sae}, xmm2","VCVTTSS2USIL xmm2, r32{sae}","vcvttss2usil xmm2, r32{sae}","EVEX.128.F3.0F.W0 78 /r","V","V","AVX512F","modrm_regonly","w,r","Y","32" +"VCVTTSS2USI r32, xmm2/m32","VCVTTSS2USIL xmm2/m32, r32","vcvttss2usil xmm2/m32, r32","EVEX.LIG.F3.0F.W0 78 /r","V","V","AVX512F","scale4","w,r","Y","32" +"VCVTTSS2USI r64{sae}, xmm2","VCVTTSS2USIQ xmm2, r64{sae}","vcvttss2usiq xmm2, r64{sae}","EVEX.128.F3.0F.W1 78 /r","N.S.","V","AVX512F","modrm_regonly","w,r","Y","64" +"VCVTTSS2USI r64, xmm2/m32","VCVTTSS2USIQ xmm2/m32, r64","vcvttss2usiq xmm2/m32, r64","EVEX.LIG.F3.0F.W1 78 /r","N.S.","V","AVX512F","scale4","w,r","Y","64" +"VCVTUDQ2PD xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTUDQ2PD xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtudq2pd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.F3.0F.W0 7A /r","V","V","AVX512F+AVX512VL","bscale4,scale8","w,r,r","","" +"VCVTUDQ2PD ymm1, {k}{z}, xmm2/m256/m32bcst","VCVTUDQ2PD xmm2/m256/m32bcst, {k}{z}, ymm1","vcvtudq2pd xmm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.F3.0F.W0 7A /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTUDQ2PD zmm1, {k}{z}, ymm2/m512/m32bcst","VCVTUDQ2PD ymm2/m512/m32bcst, {k}{z}, zmm1","vcvtudq2pd ymm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.F3.0F.W0 7A /r","V","V","AVX512F","bscale4,scale32","w,r,r","","" +"VCVTUDQ2PS xmm1, {k}{z}, xmm2/m128/m32bcst","VCVTUDQ2PS xmm2/m128/m32bcst, {k}{z}, xmm1","vcvtudq2ps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.F2.0F.W0 7A /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VCVTUDQ2PS ymm1, {k}{z}, ymm2/m256/m32bcst","VCVTUDQ2PS ymm2/m256/m32bcst, {k}{z}, ymm1","vcvtudq2ps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.F2.0F.W0 7A /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VCVTUDQ2PS zmm1{er}, {k}{z}, zmm2","VCVTUDQ2PS zmm2, {k}{z}, zmm1{er}","vcvtudq2ps zmm2, {k}{z}, zmm1{er}","EVEX.512.F2.0F.W0 7A /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VCVTUDQ2PS zmm1, {k}{z}, zmm2/m512/m32bcst","VCVTUDQ2PS zmm2/m512/m32bcst, {k}{z}, zmm1","vcvtudq2ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.F2.0F.W0 7A /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VCVTUQQ2PD xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTUQQ2PD xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtuqq2pd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.F3.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VCVTUQQ2PD ymm1, {k}{z}, ymm2/m256/m64bcst","VCVTUQQ2PD ymm2/m256/m64bcst, {k}{z}, ymm1","vcvtuqq2pd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.F3.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VCVTUQQ2PD zmm1{er}, {k}{z}, zmm2","VCVTUQQ2PD zmm2, {k}{z}, zmm1{er}","vcvtuqq2pd zmm2, {k}{z}, zmm1{er}","EVEX.512.F3.0F.W1 7A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","","" +"VCVTUQQ2PD zmm1, {k}{z}, zmm2/m512/m64bcst","VCVTUQQ2PD zmm2/m512/m64bcst, {k}{z}, zmm1","vcvtuqq2pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.F3.0F.W1 7A /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","","" +"VCVTUQQ2PS ymm1{er}, {k}{z}, zmm2","VCVTUQQ2PS zmm2, {k}{z}, ymm1{er}","vcvtuqq2ps zmm2, {k}{z}, ymm1{er}","EVEX.512.F2.0F.W1 7A /r","V","V","AVX512DQ","modrm_regonly","w,r,r","Y","" +"VCVTUQQ2PS ymm1, {k}{z}, zmm2/m512/m64bcst","VCVTUQQ2PS zmm2/m512/m64bcst, {k}{z}, ymm1","vcvtuqq2ps zmm2/m512/m64bcst, {k}{z}, ymm1","EVEX.512.F2.0F.W1 7A /r","V","V","AVX512DQ","bscale8,scale64","w,r,r","Y","512" +"VCVTUQQ2PS xmm1, {k}{z}, xmm2/m128/m64bcst","VCVTUQQ2PSX xmm2/m128/m64bcst, {k}{z}, xmm1","vcvtuqq2psx xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.F2.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r","Y","128" +"VCVTUQQ2PS xmm1, {k}{z}, ymm2/m256/m64bcst","VCVTUQQ2PSY ymm2/m256/m64bcst, {k}{z}, xmm1","vcvtuqq2psy ymm2/m256/m64bcst, {k}{z}, xmm1","EVEX.256.F2.0F.W1 7A /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r","Y","256" +"VCVTUSI2SD xmm1, xmmV, r/m32","VCVTUSI2SDL r/m32, xmmV, xmm1","vcvtusi2sd r/m32, xmmV, xmm1","EVEX.NDS.LIG.F2.0F.W0 7B /r","V","V","AVX512F","scale4","w,r,r","Y","32" +"VCVTUSI2SD xmm1, xmmV, r/m64","VCVTUSI2SDQ r/m64, xmmV, xmm1","vcvtusi2sd r/m64, xmmV, xmm1","EVEX.NDS.LIG.F2.0F.W1 7B /r","N.S.","V","AVX512F","scale8","w,r,r","Y","64" +"VCVTUSI2SD xmm1{er}, xmmV, rmr64","VCVTUSI2SDQ rmr64, xmmV, xmm1{er}","vcvtusi2sd rmr64, xmmV, xmm1{er}","EVEX.NDS.128.F2.0F.W1 7B /r","N.S.","V","AVX512F","modrm_regonly","w,r,r","Y","64" +"VCVTUSI2SS xmm1, xmmV, r/m32","VCVTUSI2SSL r/m32, xmmV, xmm1","vcvtusi2ssl r/m32, xmmV, xmm1","EVEX.NDS.LIG.F3.0F.W0 7B /r","V","V","AVX512F","scale4","w,r,r","Y","32" +"VCVTUSI2SS xmm1{er}, xmmV, rmr32","VCVTUSI2SSL rmr32, xmmV, xmm1{er}","vcvtusi2ssl rmr32, xmmV, xmm1{er}","EVEX.NDS.128.F3.0F.W0 7B /r","V","V","AVX512F","modrm_regonly","w,r,r","Y","32" +"VCVTUSI2SS xmm1, xmmV, r/m64","VCVTUSI2SSQ r/m64, xmmV, xmm1","vcvtusi2ssq r/m64, xmmV, xmm1","EVEX.NDS.LIG.F3.0F.W1 7B /r","N.S.","V","AVX512F","scale8","w,r,r","Y","64" +"VCVTUSI2SS xmm1{er}, xmmV, rmr64","VCVTUSI2SSQ rmr64, xmmV, xmm1{er}","vcvtusi2ssq rmr64, xmmV, xmm1{er}","EVEX.NDS.128.F3.0F.W1 7B /r","N.S.","V","AVX512F","modrm_regonly","w,r,r","Y","64" +"VDBPSADBW xmm1, {k}{z}, xmmV, xmm2/m128, imm8u","VDBPSADBW imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","vdbpsadbw imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W0 42 /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VDBPSADBW ymm1, {k}{z}, ymmV, ymm2/m256, imm8u","VDBPSADBW imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","vdbpsadbw imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 42 /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VDBPSADBW zmm1, {k}{z}, zmmV, zmm2/m512, imm8u","VDBPSADBW imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","vdbpsadbw imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 42 /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VDIVPD xmm1, xmmV, xmm2/m128","VDIVPD xmm2/m128, xmmV, xmm1","vdivpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VDIVPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vdivpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 5E /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VDIVPD ymm1, ymmV, ymm2/m256","VDIVPD ymm2/m256, ymmV, ymm1","vdivpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VDIVPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vdivpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 5E /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VDIVPD zmm1{er}, {k}{z}, zmmV, zmm2","VDIVPD zmm2, zmmV, {k}{z}, zmm1{er}","vdivpd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F.W1 5E /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VDIVPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VDIVPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vdivpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 5E /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VDIVPS xmm1, xmmV, xmm2/m128","VDIVPS xmm2/m128, xmmV, xmm1","vdivps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VDIVPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vdivps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 5E /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VDIVPS ymm1, ymmV, ymm2/m256","VDIVPS ymm2/m256, ymmV, ymm1","vdivps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VDIVPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vdivps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 5E /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VDIVPS zmm1{er}, {k}{z}, zmmV, zmm2","VDIVPS zmm2, zmmV, {k}{z}, zmm1{er}","vdivps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.0F.W0 5E /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VDIVPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VDIVPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vdivps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 5E /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VDIVSD xmm1{er}, {k}{z}, xmmV, xmm2","VDIVSD xmm2, xmmV, {k}{z}, xmm1{er}","vdivsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 5E /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VDIVSD xmm1, xmmV, xmm2/m64","VDIVSD xmm2/m64, xmmV, xmm1","vdivsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVSD xmm1, {k}{z}, xmmV, xmm2/m64","VDIVSD xmm2/m64, xmmV, {k}{z}, xmm1","vdivsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 5E /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VDIVSS xmm1{er}, {k}{z}, xmmV, xmm2","VDIVSS xmm2, xmmV, {k}{z}, xmm1{er}","vdivss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F3.0F.W0 5E /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VDIVSS xmm1, xmmV, xmm2/m32","VDIVSS xmm2/m32, xmmV, xmm1","vdivss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 5E /r","V","V","AVX","","w,r,r","","" +"VDIVSS xmm1, {k}{z}, xmmV, xmm2/m32","VDIVSS xmm2/m32, xmmV, {k}{z}, xmm1","vdivss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 5E /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VDPPD xmm1, xmmV, xmm2/m128, imm8u","VDPPD imm8u, xmm2/m128, xmmV, xmm1","vdppd imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 41 /r ib","V","V","AVX","","w,r,r,r","","" +"VDPPS xmm1, xmmV, xmm2/m128, imm8u","VDPPS imm8u, xmm2/m128, xmmV, xmm1","vdpps imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 40 /r ib","V","V","AVX","","w,r,r,r","","" +"VDPPS ymm1, ymmV, ymm2/m256, imm8u","VDPPS imm8u, ymm2/m256, ymmV, ymm1","vdpps imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 40 /r ib","V","V","AVX","","w,r,r,r","","" +"VERR r/m16","VERR r/m16","verr r/m16","0F 00 /4","V","V","","","r","","" +"VERW r/m16","VERW r/m16","verw r/m16","0F 00 /5","V","V","","","r","","" +"VEXP2PD zmm1{sae}, {k}{z}, zmm2","VEXP2PD zmm2, {k}{z}, zmm1{sae}","vexp2pd zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W1 C8 /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VEXP2PD zmm1, {k}{z}, zmm2/m512/m64bcst","VEXP2PD zmm2/m512/m64bcst, {k}{z}, zmm1","vexp2pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 C8 /r","V","V","AVX512ER","bscale8,scale64","w,r,r","","" +"VEXP2PS zmm1{sae}, {k}{z}, zmm2","VEXP2PS zmm2, {k}{z}, zmm1{sae}","vexp2ps zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W0 C8 /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VEXP2PS zmm1, {k}{z}, zmm2/m512/m32bcst","VEXP2PS zmm2/m512/m32bcst, {k}{z}, zmm1","vexp2ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 C8 /r","V","V","AVX512ER","bscale4,scale64","w,r,r","","" +"VEXPANDPD xmm1, {k}{z}, xmm2/m128","VEXPANDPD xmm2/m128, {k}{z}, xmm1","vexpandpd xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W1 88 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VEXPANDPD ymm1, {k}{z}, ymm2/m256","VEXPANDPD ymm2/m256, {k}{z}, ymm1","vexpandpd ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W1 88 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VEXPANDPD zmm1, {k}{z}, zmm2/m512","VEXPANDPD zmm2/m512, {k}{z}, zmm1","vexpandpd zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W1 88 /r","V","V","AVX512F","scale8","w,r,r","","" +"VEXPANDPS xmm1, {k}{z}, xmm2/m128","VEXPANDPS xmm2/m128, {k}{z}, xmm1","vexpandps xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W0 88 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VEXPANDPS ymm1, {k}{z}, ymm2/m256","VEXPANDPS ymm2/m256, {k}{z}, ymm1","vexpandps ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W0 88 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VEXPANDPS zmm1, {k}{z}, zmm2/m512","VEXPANDPS zmm2/m512, {k}{z}, zmm1","vexpandps zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W0 88 /r","V","V","AVX512F","scale4","w,r,r","","" +"VEXTRACTF128 xmm2/m128, ymm1, imm8u:1","VEXTRACTF128 imm8u:1, ymm1, xmm2/m128","vextractf128 imm8u:1, ymm1, xmm2/m128","VEX.256.66.0F3A.W0 19 /r ib","V","V","AVX","","w,r,r","","" +"VEXTRACTF32X4 xmm2/m128, {k}{z}, ymm1, imm8u:1","VEXTRACTF32X4 imm8u:1, ymm1, {k}{z}, xmm2/m128","vextractf32x4 imm8u:1, ymm1, {k}{z}, xmm2/m128","EVEX.256.66.0F3A.W0 19 /r ib","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VEXTRACTF32X4 xmm2/m128, {k}{z}, zmm1, imm8u:2","VEXTRACTF32X4 imm8u:2, zmm1, {k}{z}, xmm2/m128","vextractf32x4 imm8u:2, zmm1, {k}{z}, xmm2/m128","EVEX.512.66.0F3A.W0 19 /r ib","V","V","AVX512F","scale16","w,r,r,r","","" +"VEXTRACTF32X8 ymm2/m256, {k}{z}, zmm1, imm8u:1","VEXTRACTF32X8 imm8u:1, zmm1, {k}{z}, ymm2/m256","vextractf32x8 imm8u:1, zmm1, {k}{z}, ymm2/m256","EVEX.512.66.0F3A.W0 1B /r ib","V","V","AVX512DQ","scale32","w,r,r,r","","" +"VEXTRACTF64X2 xmm2/m128, {k}{z}, ymm1, imm8u:1","VEXTRACTF64X2 imm8u:1, ymm1, {k}{z}, xmm2/m128","vextractf64x2 imm8u:1, ymm1, {k}{z}, xmm2/m128","EVEX.256.66.0F3A.W1 19 /r ib","V","V","AVX512DQ+AVX512VL","scale16","w,r,r,r","","" +"VEXTRACTF64X2 xmm2/m128, {k}{z}, zmm1, imm8u:2","VEXTRACTF64X2 imm8u:2, zmm1, {k}{z}, xmm2/m128","vextractf64x2 imm8u:2, zmm1, {k}{z}, xmm2/m128","EVEX.512.66.0F3A.W1 19 /r ib","V","V","AVX512DQ","scale16","w,r,r,r","","" +"VEXTRACTF64X4 ymm2/m256, {k}{z}, zmm1, imm8u","VEXTRACTF64X4 imm8u, zmm1, {k}{z}, ymm2/m256","vextractf64x4 imm8u, zmm1, {k}{z}, ymm2/m256","EVEX.512.66.0F3A.W1 1B /r ib","V","V","AVX512F","scale32","w,r,r,r","","" +"VEXTRACTI128 xmm2/m128, ymm1, imm8u:1","VEXTRACTI128 imm8u:1, ymm1, xmm2/m128","vextracti128 imm8u:1, ymm1, xmm2/m128","VEX.256.66.0F3A.W0 39 /r ib","V","V","AVX2","","w,r,r","","" +"VEXTRACTI32X4 xmm2/m128, {k}{z}, ymm1, imm8u:1","VEXTRACTI32X4 imm8u:1, ymm1, {k}{z}, xmm2/m128","vextracti32x4 imm8u:1, ymm1, {k}{z}, xmm2/m128","EVEX.256.66.0F3A.W0 39 /r ib","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VEXTRACTI32X4 xmm2/m128, {k}{z}, zmm1, imm8u:2","VEXTRACTI32X4 imm8u:2, zmm1, {k}{z}, xmm2/m128","vextracti32x4 imm8u:2, zmm1, {k}{z}, xmm2/m128","EVEX.512.66.0F3A.W0 39 /r ib","V","V","AVX512F","scale16","w,r,r,r","","" +"VEXTRACTI32X8 ymm2/m256, {k}{z}, zmm1, imm8u:1","VEXTRACTI32X8 imm8u:1, zmm1, {k}{z}, ymm2/m256","vextracti32x8 imm8u:1, zmm1, {k}{z}, ymm2/m256","EVEX.512.66.0F3A.W0 3B /r ib","V","V","AVX512DQ","scale32","w,r,r,r","","" +"VEXTRACTI64X2 xmm2/m128, {k}{z}, ymm1, imm8u:1","VEXTRACTI64X2 imm8u:1, ymm1, {k}{z}, xmm2/m128","vextracti64x2 imm8u:1, ymm1, {k}{z}, xmm2/m128","EVEX.256.66.0F3A.W1 39 /r ib","V","V","AVX512DQ+AVX512VL","scale16","w,r,r,r","","" +"VEXTRACTI64X2 xmm2/m128, {k}{z}, zmm1, imm8u:2","VEXTRACTI64X2 imm8u:2, zmm1, {k}{z}, xmm2/m128","vextracti64x2 imm8u:2, zmm1, {k}{z}, xmm2/m128","EVEX.512.66.0F3A.W1 39 /r ib","V","V","AVX512DQ","scale16","w,r,r,r","","" +"VEXTRACTI64X4 ymm2/m256, {k}{z}, zmm1, imm8u:1","VEXTRACTI64X4 imm8u:1, zmm1, {k}{z}, ymm2/m256","vextracti64x4 imm8u:1, zmm1, {k}{z}, ymm2/m256","EVEX.512.66.0F3A.W1 3B /r ib","V","V","AVX512F","scale32","w,r,r,r","","" +"VEXTRACTPS r/m32, xmm1, imm8u:2","VEXTRACTPS imm8u:2, xmm1, r/m32","vextractps imm8u:2, xmm1, r/m32","EVEX.128.66.0F3A.WIG 17 /r ib","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VEXTRACTPS r/m32, xmm1, imm8u:2","VEXTRACTPS imm8u:2, xmm1, r/m32","vextractps imm8u:2, xmm1, r/m32","VEX.128.66.0F3A.WIG 17 /r ib","V","V","AVX","","w,r,r","","" +"VFIXUPIMMPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VFIXUPIMMPD imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfixupimmpd imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F3A.W1 54 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r,r","","" +"VFIXUPIMMPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VFIXUPIMMPD imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfixupimmpd imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F3A.W1 54 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r,r","","" +"VFIXUPIMMPD zmm1{sae}, {k}{z}, zmmV, zmm2, imm8u","VFIXUPIMMPD imm8u, zmm2, zmmV, {k}{z}, zmm1{sae}","vfixupimmpd imm8u, zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.DDS.512.66.0F3A.W1 54 /r ib","V","V","AVX512F","modrm_regonly","rw,r,r,r,r","","" +"VFIXUPIMMPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VFIXUPIMMPD imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfixupimmpd imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F3A.W1 54 /r ib","V","V","AVX512F","bscale8,scale64","rw,r,r,r,r","","" +"VFIXUPIMMPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VFIXUPIMMPS imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfixupimmps imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F3A.W0 54 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r,r","","" +"VFIXUPIMMPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VFIXUPIMMPS imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfixupimmps imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F3A.W0 54 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r,r","","" +"VFIXUPIMMPS zmm1{sae}, {k}{z}, zmmV, zmm2, imm8u","VFIXUPIMMPS imm8u, zmm2, zmmV, {k}{z}, zmm1{sae}","vfixupimmps imm8u, zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.DDS.512.66.0F3A.W0 54 /r ib","V","V","AVX512F","modrm_regonly","rw,r,r,r,r","","" +"VFIXUPIMMPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VFIXUPIMMPS imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfixupimmps imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F3A.W0 54 /r ib","V","V","AVX512F","bscale4,scale64","rw,r,r,r,r","","" +"VFIXUPIMMSD xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VFIXUPIMMSD imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vfixupimmsd imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.DDS.128.66.0F3A.W1 55 /r ib","V","V","AVX512F","modrm_regonly","rw,r,r,r,r","","" +"VFIXUPIMMSD xmm1, {k}{z}, xmmV, xmm2/m64, imm8u","VFIXUPIMMSD imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","vfixupimmsd imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F3A.W1 55 /r ib","V","V","AVX512F","scale8","rw,r,r,r,r","","" +"VFIXUPIMMSS xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VFIXUPIMMSS imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vfixupimmss imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.DDS.128.66.0F3A.W0 55 /r ib","V","V","AVX512F","modrm_regonly","rw,r,r,r,r","","" +"VFIXUPIMMSS xmm1, {k}{z}, xmmV, xmm2/m32, imm8u","VFIXUPIMMSS imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","vfixupimmss imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F3A.W0 55 /r ib","V","V","AVX512F","scale4","rw,r,r,r,r","","" +"VFMADD132PD xmm1, xmmV, xmm2/m128","VFMADD132PD xmm2/m128, xmmV, xmm1","vfmadd132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 98 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADD132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmadd132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 98 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADD132PD ymm1, ymmV, ymm2/m256","VFMADD132PD ymm2/m256, ymmV, ymm1","vfmadd132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 98 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADD132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmadd132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 98 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADD132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 98 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADD132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmadd132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 98 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADD132PS xmm1, xmmV, xmm2/m128","VFMADD132PS xmm2/m128, xmmV, xmm1","vfmadd132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 98 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADD132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmadd132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 98 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADD132PS ymm1, ymmV, ymm2/m256","VFMADD132PS ymm2/m256, ymmV, ymm1","vfmadd132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 98 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADD132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmadd132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 98 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADD132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 98 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADD132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmadd132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 98 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADD132SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD132SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd132sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 99 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD132SD xmm1, xmmV, xmm2/m64","VFMADD132SD xmm2/m64, xmmV, xmm1","vfmadd132sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 99 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMADD132SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmadd132sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 99 /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMADD132SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD132SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd132ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 99 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD132SS xmm1, xmmV, xmm2/m32","VFMADD132SS xmm2/m32, xmmV, xmm1","vfmadd132ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 99 /r","V","V","FMA","","rw,r,r","","" +"VFMADD132SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMADD132SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmadd132ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 99 /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMADD213PD xmm1, xmmV, xmm2/m128","VFMADD213PD xmm2/m128, xmmV, xmm1","vfmadd213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 A8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADD213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmadd213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 A8 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADD213PD ymm1, ymmV, ymm2/m256","VFMADD213PD ymm2/m256, ymmV, ymm1","vfmadd213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 A8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADD213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmadd213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 A8 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADD213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 A8 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADD213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmadd213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 A8 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADD213PS xmm1, xmmV, xmm2/m128","VFMADD213PS xmm2/m128, xmmV, xmm1","vfmadd213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 A8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADD213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmadd213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 A8 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADD213PS ymm1, ymmV, ymm2/m256","VFMADD213PS ymm2/m256, ymmV, ymm1","vfmadd213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 A8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADD213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmadd213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 A8 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADD213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 A8 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADD213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmadd213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 A8 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADD213SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD213SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd213sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 A9 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD213SD xmm1, xmmV, xmm2/m64","VFMADD213SD xmm2/m64, xmmV, xmm1","vfmadd213sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 A9 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMADD213SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmadd213sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 A9 /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMADD213SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD213SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd213ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 A9 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD213SS xmm1, xmmV, xmm2/m32","VFMADD213SS xmm2/m32, xmmV, xmm1","vfmadd213ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 A9 /r","V","V","FMA","","rw,r,r","","" +"VFMADD213SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMADD213SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmadd213ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 A9 /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMADD231PD xmm1, xmmV, xmm2/m128","VFMADD231PD xmm2/m128, xmmV, xmm1","vfmadd231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 B8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADD231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmadd231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 B8 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADD231PD ymm1, ymmV, ymm2/m256","VFMADD231PD ymm2/m256, ymmV, ymm1","vfmadd231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 B8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADD231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmadd231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 B8 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADD231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 B8 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADD231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmadd231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 B8 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADD231PS xmm1, xmmV, xmm2/m128","VFMADD231PS xmm2/m128, xmmV, xmm1","vfmadd231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 B8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADD231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmadd231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 B8 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADD231PS ymm1, ymmV, ymm2/m256","VFMADD231PS ymm2/m256, ymmV, ymm1","vfmadd231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 B8 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADD231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmadd231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 B8 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADD231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADD231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmadd231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 B8 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADD231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmadd231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 B8 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADD231SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD231SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd231sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 B9 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD231SD xmm1, xmmV, xmm2/m64","VFMADD231SD xmm2/m64, xmmV, xmm1","vfmadd231sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 B9 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMADD231SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmadd231sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 B9 /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMADD231SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMADD231SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmadd231ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 B9 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADD231SS xmm1, xmmV, xmm2/m32","VFMADD231SS xmm2/m32, xmmV, xmm1","vfmadd231ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 B9 /r","V","V","FMA","","rw,r,r","","" +"VFMADD231SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMADD231SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmadd231ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 B9 /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMADDPD xmm1, xmmV, xmmIH, xmm2/m128","VFMADDPD xmm2/m128, xmmIH, xmmV, xmm1","vfmaddpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 69 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPD xmm1, xmmV, xmm2/m128, xmmIH","VFMADDPD xmmIH, xmm2/m128, xmmV, xmm1","vfmaddpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 69 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPD ymm1, ymmV, ymmIH, ymm2/m256","VFMADDPD ymm2/m256, ymmIH, ymmV, ymm1","vfmaddpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 69 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPD ymm1, ymmV, ymm2/m256, ymmIH","VFMADDPD ymmIH, ymm2/m256, ymmV, ymm1","vfmaddpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 69 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPS xmm1, xmmV, xmmIH, xmm2/m128","VFMADDPS xmm2/m128, xmmIH, xmmV, xmm1","vfmaddps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 68 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPS xmm1, xmmV, xmm2/m128, xmmIH","VFMADDPS xmmIH, xmm2/m128, xmmV, xmm1","vfmaddps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 68 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPS ymm1, ymmV, ymmIH, ymm2/m256","VFMADDPS ymm2/m256, ymmIH, ymmV, ymm1","vfmaddps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 68 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDPS ymm1, ymmV, ymm2/m256, ymmIH","VFMADDPS ymmIH, ymm2/m256, ymmV, ymm1","vfmaddps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 68 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSD xmm1, xmmV, xmmIH, xmm2/m64","VFMADDSD xmm2/m64, xmmIH, xmmV, xmm1","vfmaddsd xmm2/m64, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 6B /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSD xmm1, xmmV, xmm2/m64, xmmIH","VFMADDSD xmmIH, xmm2/m64, xmmV, xmm1","vfmaddsd xmmIH, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 6B /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSS xmm1, xmmV, xmmIH, xmm2/m32","VFMADDSS xmm2/m32, xmmIH, xmmV, xmm1","vfmaddss xmm2/m32, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 6A /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSS xmm1, xmmV, xmm2/m32, xmmIH","VFMADDSS xmmIH, xmm2/m32, xmmV, xmm1","vfmaddss xmmIH, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 6A /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUB132PD xmm1, xmmV, xmm2/m128","VFMADDSUB132PD xmm2/m128, xmmV, xmm1","vfmaddsub132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 96 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADDSUB132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmaddsub132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 96 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADDSUB132PD ymm1, ymmV, ymm2/m256","VFMADDSUB132PD ymm2/m256, ymmV, ymm1","vfmaddsub132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 96 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADDSUB132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmaddsub132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 96 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADDSUB132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 96 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADDSUB132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmaddsub132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 96 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADDSUB132PS xmm1, xmmV, xmm2/m128","VFMADDSUB132PS xmm2/m128, xmmV, xmm1","vfmaddsub132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 96 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADDSUB132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmaddsub132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 96 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADDSUB132PS ymm1, ymmV, ymm2/m256","VFMADDSUB132PS ymm2/m256, ymmV, ymm1","vfmaddsub132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 96 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADDSUB132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmaddsub132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 96 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADDSUB132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 96 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADDSUB132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmaddsub132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 96 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADDSUB213PD xmm1, xmmV, xmm2/m128","VFMADDSUB213PD xmm2/m128, xmmV, xmm1","vfmaddsub213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 A6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADDSUB213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmaddsub213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 A6 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADDSUB213PD ymm1, ymmV, ymm2/m256","VFMADDSUB213PD ymm2/m256, ymmV, ymm1","vfmaddsub213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 A6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADDSUB213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmaddsub213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 A6 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADDSUB213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 A6 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADDSUB213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmaddsub213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 A6 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADDSUB213PS xmm1, xmmV, xmm2/m128","VFMADDSUB213PS xmm2/m128, xmmV, xmm1","vfmaddsub213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 A6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADDSUB213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmaddsub213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 A6 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADDSUB213PS ymm1, ymmV, ymm2/m256","VFMADDSUB213PS ymm2/m256, ymmV, ymm1","vfmaddsub213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 A6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADDSUB213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmaddsub213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 A6 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADDSUB213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 A6 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADDSUB213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmaddsub213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 A6 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADDSUB231PD xmm1, xmmV, xmm2/m128","VFMADDSUB231PD xmm2/m128, xmmV, xmm1","vfmaddsub231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 B6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMADDSUB231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmaddsub231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 B6 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMADDSUB231PD ymm1, ymmV, ymm2/m256","VFMADDSUB231PD ymm2/m256, ymmV, ymm1","vfmaddsub231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 B6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMADDSUB231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmaddsub231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 B6 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMADDSUB231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 B6 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMADDSUB231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmaddsub231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 B6 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMADDSUB231PS xmm1, xmmV, xmm2/m128","VFMADDSUB231PS xmm2/m128, xmmV, xmm1","vfmaddsub231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 B6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMADDSUB231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmaddsub231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 B6 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMADDSUB231PS ymm1, ymmV, ymm2/m256","VFMADDSUB231PS ymm2/m256, ymmV, ymm1","vfmaddsub231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 B6 /r","V","V","FMA","","rw,r,r","","" +"VFMADDSUB231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMADDSUB231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmaddsub231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 B6 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMADDSUB231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMADDSUB231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmaddsub231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 B6 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMADDSUB231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMADDSUB231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmaddsub231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 B6 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMADDSUBPD xmm1, xmmV, xmmIH, xmm2/m128","VFMADDSUBPD xmm2/m128, xmmIH, xmmV, xmm1","vfmaddsubpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 5D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPD xmm1, xmmV, xmm2/m128, xmmIH","VFMADDSUBPD xmmIH, xmm2/m128, xmmV, xmm1","vfmaddsubpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 5D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPD ymm1, ymmV, ymmIH, ymm2/m256","VFMADDSUBPD ymm2/m256, ymmIH, ymmV, ymm1","vfmaddsubpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 5D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPD ymm1, ymmV, ymm2/m256, ymmIH","VFMADDSUBPD ymmIH, ymm2/m256, ymmV, ymm1","vfmaddsubpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 5D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPS xmm1, xmmV, xmmIH, xmm2/m128","VFMADDSUBPS xmm2/m128, xmmIH, xmmV, xmm1","vfmaddsubps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 5C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPS xmm1, xmmV, xmm2/m128, xmmIH","VFMADDSUBPS xmmIH, xmm2/m128, xmmV, xmm1","vfmaddsubps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 5C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPS ymm1, ymmV, ymmIH, ymm2/m256","VFMADDSUBPS ymm2/m256, ymmIH, ymmV, ymm1","vfmaddsubps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 5C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMADDSUBPS ymm1, ymmV, ymm2/m256, ymmIH","VFMADDSUBPS ymmIH, ymm2/m256, ymmV, ymm1","vfmaddsubps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 5C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUB132PD xmm1, xmmV, xmm2/m128","VFMSUB132PD xmm2/m128, xmmV, xmm1","vfmsub132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 9A /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUB132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsub132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 9A /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUB132PD ymm1, ymmV, ymm2/m256","VFMSUB132PD ymm2/m256, ymmV, ymm1","vfmsub132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 9A /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUB132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsub132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 9A /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUB132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 9A /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUB132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsub132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 9A /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUB132PS xmm1, xmmV, xmm2/m128","VFMSUB132PS xmm2/m128, xmmV, xmm1","vfmsub132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 9A /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUB132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsub132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 9A /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUB132PS ymm1, ymmV, ymm2/m256","VFMSUB132PS ymm2/m256, ymmV, ymm1","vfmsub132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 9A /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUB132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsub132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 9A /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUB132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 9A /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUB132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsub132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 9A /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUB132SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB132SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub132sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 9B /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB132SD xmm1, xmmV, xmm2/m64","VFMSUB132SD xmm2/m64, xmmV, xmm1","vfmsub132sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 9B /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMSUB132SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmsub132sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 9B /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMSUB132SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB132SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub132ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 9B /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB132SS xmm1, xmmV, xmm2/m32","VFMSUB132SS xmm2/m32, xmmV, xmm1","vfmsub132ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 9B /r","V","V","FMA","","rw,r,r","","" +"VFMSUB132SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMSUB132SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmsub132ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 9B /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMSUB213PD xmm1, xmmV, xmm2/m128","VFMSUB213PD xmm2/m128, xmmV, xmm1","vfmsub213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 AA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUB213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsub213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 AA /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUB213PD ymm1, ymmV, ymm2/m256","VFMSUB213PD ymm2/m256, ymmV, ymm1","vfmsub213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 AA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUB213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsub213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 AA /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUB213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 AA /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUB213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsub213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 AA /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUB213PS xmm1, xmmV, xmm2/m128","VFMSUB213PS xmm2/m128, xmmV, xmm1","vfmsub213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 AA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUB213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsub213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 AA /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUB213PS ymm1, ymmV, ymm2/m256","VFMSUB213PS ymm2/m256, ymmV, ymm1","vfmsub213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 AA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUB213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsub213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 AA /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUB213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 AA /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUB213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsub213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 AA /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUB213SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB213SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub213sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 AB /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB213SD xmm1, xmmV, xmm2/m64","VFMSUB213SD xmm2/m64, xmmV, xmm1","vfmsub213sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 AB /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMSUB213SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmsub213sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 AB /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMSUB213SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB213SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub213ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 AB /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB213SS xmm1, xmmV, xmm2/m32","VFMSUB213SS xmm2/m32, xmmV, xmm1","vfmsub213ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 AB /r","V","V","FMA","","rw,r,r","","" +"VFMSUB213SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMSUB213SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmsub213ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 AB /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMSUB231PD xmm1, xmmV, xmm2/m128","VFMSUB231PD xmm2/m128, xmmV, xmm1","vfmsub231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 BA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUB231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsub231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 BA /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUB231PD ymm1, ymmV, ymm2/m256","VFMSUB231PD ymm2/m256, ymmV, ymm1","vfmsub231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 BA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUB231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsub231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 BA /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUB231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 BA /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUB231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsub231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 BA /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUB231PS xmm1, xmmV, xmm2/m128","VFMSUB231PS xmm2/m128, xmmV, xmm1","vfmsub231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 BA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUB231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsub231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 BA /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUB231PS ymm1, ymmV, ymm2/m256","VFMSUB231PS ymm2/m256, ymmV, ymm1","vfmsub231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 BA /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUB231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsub231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 BA /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUB231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUB231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsub231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 BA /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUB231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsub231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 BA /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUB231SD xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB231SD xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub231sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 BB /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB231SD xmm1, xmmV, xmm2/m64","VFMSUB231SD xmm2/m64, xmmV, xmm1","vfmsub231sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 BB /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231SD xmm1, {k}{z}, xmmV, xmm2/m64","VFMSUB231SD xmm2/m64, xmmV, {k}{z}, xmm1","vfmsub231sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 BB /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFMSUB231SS xmm1{er}, {k}{z}, xmmV, xmm2","VFMSUB231SS xmm2, xmmV, {k}{z}, xmm1{er}","vfmsub231ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 BB /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUB231SS xmm1, xmmV, xmm2/m32","VFMSUB231SS xmm2/m32, xmmV, xmm1","vfmsub231ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 BB /r","V","V","FMA","","rw,r,r","","" +"VFMSUB231SS xmm1, {k}{z}, xmmV, xmm2/m32","VFMSUB231SS xmm2/m32, xmmV, {k}{z}, xmm1","vfmsub231ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 BB /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFMSUBADD132PD xmm1, xmmV, xmm2/m128","VFMSUBADD132PD xmm2/m128, xmmV, xmm1","vfmsubadd132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 97 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUBADD132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsubadd132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 97 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUBADD132PD ymm1, ymmV, ymm2/m256","VFMSUBADD132PD ymm2/m256, ymmV, ymm1","vfmsubadd132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 97 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUBADD132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsubadd132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 97 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUBADD132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 97 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUBADD132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsubadd132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 97 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUBADD132PS xmm1, xmmV, xmm2/m128","VFMSUBADD132PS xmm2/m128, xmmV, xmm1","vfmsubadd132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 97 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUBADD132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsubadd132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 97 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUBADD132PS ymm1, ymmV, ymm2/m256","VFMSUBADD132PS ymm2/m256, ymmV, ymm1","vfmsubadd132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 97 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUBADD132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsubadd132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 97 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUBADD132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 97 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUBADD132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsubadd132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 97 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUBADD213PD xmm1, xmmV, xmm2/m128","VFMSUBADD213PD xmm2/m128, xmmV, xmm1","vfmsubadd213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 A7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUBADD213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsubadd213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 A7 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUBADD213PD ymm1, ymmV, ymm2/m256","VFMSUBADD213PD ymm2/m256, ymmV, ymm1","vfmsubadd213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 A7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUBADD213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsubadd213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 A7 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUBADD213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 A7 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUBADD213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsubadd213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 A7 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUBADD213PS xmm1, xmmV, xmm2/m128","VFMSUBADD213PS xmm2/m128, xmmV, xmm1","vfmsubadd213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 A7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUBADD213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsubadd213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 A7 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUBADD213PS ymm1, ymmV, ymm2/m256","VFMSUBADD213PS ymm2/m256, ymmV, ymm1","vfmsubadd213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 A7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUBADD213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsubadd213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 A7 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUBADD213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 A7 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUBADD213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsubadd213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 A7 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUBADD231PD xmm1, xmmV, xmm2/m128","VFMSUBADD231PD xmm2/m128, xmmV, xmm1","vfmsubadd231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 B7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFMSUBADD231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfmsubadd231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 B7 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFMSUBADD231PD ymm1, ymmV, ymm2/m256","VFMSUBADD231PD ymm2/m256, ymmV, ymm1","vfmsubadd231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 B7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFMSUBADD231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfmsubadd231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 B7 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFMSUBADD231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 B7 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFMSUBADD231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfmsubadd231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 B7 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFMSUBADD231PS xmm1, xmmV, xmm2/m128","VFMSUBADD231PS xmm2/m128, xmmV, xmm1","vfmsubadd231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 B7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFMSUBADD231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfmsubadd231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 B7 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFMSUBADD231PS ymm1, ymmV, ymm2/m256","VFMSUBADD231PS ymm2/m256, ymmV, ymm1","vfmsubadd231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 B7 /r","V","V","FMA","","rw,r,r","","" +"VFMSUBADD231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFMSUBADD231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfmsubadd231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 B7 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFMSUBADD231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFMSUBADD231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfmsubadd231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 B7 /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFMSUBADD231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFMSUBADD231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfmsubadd231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 B7 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFMSUBADDPD xmm1, xmmV, xmmIH, xmm2/m128","VFMSUBADDPD xmm2/m128, xmmIH, xmmV, xmm1","vfmsubaddpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 5F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPD xmm1, xmmV, xmm2/m128, xmmIH","VFMSUBADDPD xmmIH, xmm2/m128, xmmV, xmm1","vfmsubaddpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 5F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPD ymm1, ymmV, ymmIH, ymm2/m256","VFMSUBADDPD ymm2/m256, ymmIH, ymmV, ymm1","vfmsubaddpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 5F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPD ymm1, ymmV, ymm2/m256, ymmIH","VFMSUBADDPD ymmIH, ymm2/m256, ymmV, ymm1","vfmsubaddpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 5F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPS xmm1, xmmV, xmmIH, xmm2/m128","VFMSUBADDPS xmm2/m128, xmmIH, xmmV, xmm1","vfmsubaddps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 5E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPS xmm1, xmmV, xmm2/m128, xmmIH","VFMSUBADDPS xmmIH, xmm2/m128, xmmV, xmm1","vfmsubaddps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 5E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPS ymm1, ymmV, ymmIH, ymm2/m256","VFMSUBADDPS ymm2/m256, ymmIH, ymmV, ymm1","vfmsubaddps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 5E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBADDPS ymm1, ymmV, ymm2/m256, ymmIH","VFMSUBADDPS ymmIH, ymm2/m256, ymmV, ymm1","vfmsubaddps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 5E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPD xmm1, xmmV, xmmIH, xmm2/m128","VFMSUBPD xmm2/m128, xmmIH, xmmV, xmm1","vfmsubpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 6D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPD xmm1, xmmV, xmm2/m128, xmmIH","VFMSUBPD xmmIH, xmm2/m128, xmmV, xmm1","vfmsubpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 6D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPD ymm1, ymmV, ymmIH, ymm2/m256","VFMSUBPD ymm2/m256, ymmIH, ymmV, ymm1","vfmsubpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 6D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPD ymm1, ymmV, ymm2/m256, ymmIH","VFMSUBPD ymmIH, ymm2/m256, ymmV, ymm1","vfmsubpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 6D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPS xmm1, xmmV, xmmIH, xmm2/m128","VFMSUBPS xmm2/m128, xmmIH, xmmV, xmm1","vfmsubps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 6C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPS xmm1, xmmV, xmm2/m128, xmmIH","VFMSUBPS xmmIH, xmm2/m128, xmmV, xmm1","vfmsubps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 6C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPS ymm1, ymmV, ymmIH, ymm2/m256","VFMSUBPS ymm2/m256, ymmIH, ymmV, ymm1","vfmsubps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 6C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBPS ymm1, ymmV, ymm2/m256, ymmIH","VFMSUBPS ymmIH, ymm2/m256, ymmV, ymm1","vfmsubps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 6C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBSD xmm1, xmmV, xmmIH, xmm2/m64","VFMSUBSD xmm2/m64, xmmIH, xmmV, xmm1","vfmsubsd xmm2/m64, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 6F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBSD xmm1, xmmV, xmm2/m64, xmmIH","VFMSUBSD xmmIH, xmm2/m64, xmmV, xmm1","vfmsubsd xmmIH, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 6F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBSS xmm1, xmmV, xmmIH, xmm2/m32","VFMSUBSS xmm2/m32, xmmIH, xmmV, xmm1","vfmsubss xmm2/m32, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 6E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFMSUBSS xmm1, xmmV, xmm2/m32, xmmIH","VFMSUBSS xmmIH, xmm2/m32, xmmV, xmm1","vfmsubss xmmIH, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 6E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADD132PD xmm1, xmmV, xmm2/m128","VFNMADD132PD xmm2/m128, xmmV, xmm1","vfnmadd132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 9C /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMADD132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmadd132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 9C /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMADD132PD ymm1, ymmV, ymm2/m256","VFNMADD132PD ymm2/m256, ymmV, ymm1","vfnmadd132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 9C /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMADD132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmadd132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 9C /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMADD132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 9C /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMADD132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmadd132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 9C /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMADD132PS xmm1, xmmV, xmm2/m128","VFNMADD132PS xmm2/m128, xmmV, xmm1","vfnmadd132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 9C /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMADD132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmadd132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 9C /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMADD132PS ymm1, ymmV, ymm2/m256","VFNMADD132PS ymm2/m256, ymmV, ymm1","vfnmadd132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 9C /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMADD132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmadd132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 9C /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMADD132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 9C /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMADD132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmadd132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 9C /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMADD132SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD132SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd132sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 9D /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD132SD xmm1, xmmV, xmm2/m64","VFNMADD132SD xmm2/m64, xmmV, xmm1","vfnmadd132sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 9D /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMADD132SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmadd132sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 9D /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMADD132SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD132SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd132ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 9D /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD132SS xmm1, xmmV, xmm2/m32","VFNMADD132SS xmm2/m32, xmmV, xmm1","vfnmadd132ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 9D /r","V","V","FMA","","rw,r,r","","" +"VFNMADD132SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMADD132SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmadd132ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 9D /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMADD213PD xmm1, xmmV, xmm2/m128","VFNMADD213PD xmm2/m128, xmmV, xmm1","vfnmadd213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 AC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMADD213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmadd213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 AC /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMADD213PD ymm1, ymmV, ymm2/m256","VFNMADD213PD ymm2/m256, ymmV, ymm1","vfnmadd213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 AC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMADD213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmadd213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 AC /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMADD213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 AC /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMADD213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmadd213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 AC /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMADD213PS xmm1, xmmV, xmm2/m128","VFNMADD213PS xmm2/m128, xmmV, xmm1","vfnmadd213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 AC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMADD213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmadd213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 AC /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMADD213PS ymm1, ymmV, ymm2/m256","VFNMADD213PS ymm2/m256, ymmV, ymm1","vfnmadd213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 AC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMADD213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmadd213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 AC /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMADD213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 AC /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMADD213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmadd213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 AC /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMADD213SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD213SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd213sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 AD /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD213SD xmm1, xmmV, xmm2/m64","VFNMADD213SD xmm2/m64, xmmV, xmm1","vfnmadd213sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 AD /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMADD213SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmadd213sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 AD /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMADD213SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD213SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd213ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 AD /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD213SS xmm1, xmmV, xmm2/m32","VFNMADD213SS xmm2/m32, xmmV, xmm1","vfnmadd213ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 AD /r","V","V","FMA","","rw,r,r","","" +"VFNMADD213SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMADD213SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmadd213ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 AD /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMADD231PD xmm1, xmmV, xmm2/m128","VFNMADD231PD xmm2/m128, xmmV, xmm1","vfnmadd231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 BC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMADD231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmadd231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 BC /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMADD231PD ymm1, ymmV, ymm2/m256","VFNMADD231PD ymm2/m256, ymmV, ymm1","vfnmadd231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 BC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMADD231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmadd231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 BC /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMADD231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 BC /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMADD231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmadd231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 BC /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMADD231PS xmm1, xmmV, xmm2/m128","VFNMADD231PS xmm2/m128, xmmV, xmm1","vfnmadd231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 BC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMADD231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmadd231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 BC /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMADD231PS ymm1, ymmV, ymm2/m256","VFNMADD231PS ymm2/m256, ymmV, ymm1","vfnmadd231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 BC /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMADD231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmadd231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 BC /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMADD231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMADD231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmadd231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 BC /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMADD231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmadd231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 BC /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMADD231SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD231SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd231sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 BD /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD231SD xmm1, xmmV, xmm2/m64","VFNMADD231SD xmm2/m64, xmmV, xmm1","vfnmadd231sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 BD /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMADD231SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmadd231sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 BD /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMADD231SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMADD231SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmadd231ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 BD /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMADD231SS xmm1, xmmV, xmm2/m32","VFNMADD231SS xmm2/m32, xmmV, xmm1","vfnmadd231ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 BD /r","V","V","FMA","","rw,r,r","","" +"VFNMADD231SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMADD231SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmadd231ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 BD /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMADDPD xmm1, xmmV, xmmIH, xmm2/m128","VFNMADDPD xmm2/m128, xmmIH, xmmV, xmm1","vfnmaddpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 79 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPD xmm1, xmmV, xmm2/m128, xmmIH","VFNMADDPD xmmIH, xmm2/m128, xmmV, xmm1","vfnmaddpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 79 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPD ymm1, ymmV, ymmIH, ymm2/m256","VFNMADDPD ymm2/m256, ymmIH, ymmV, ymm1","vfnmaddpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 79 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPD ymm1, ymmV, ymm2/m256, ymmIH","VFNMADDPD ymmIH, ymm2/m256, ymmV, ymm1","vfnmaddpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 79 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPS xmm1, xmmV, xmmIH, xmm2/m128","VFNMADDPS xmm2/m128, xmmIH, xmmV, xmm1","vfnmaddps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 78 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPS xmm1, xmmV, xmm2/m128, xmmIH","VFNMADDPS xmmIH, xmm2/m128, xmmV, xmm1","vfnmaddps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 78 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPS ymm1, ymmV, ymmIH, ymm2/m256","VFNMADDPS ymm2/m256, ymmIH, ymmV, ymm1","vfnmaddps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 78 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDPS ymm1, ymmV, ymm2/m256, ymmIH","VFNMADDPS ymmIH, ymm2/m256, ymmV, ymm1","vfnmaddps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 78 /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDSD xmm1, xmmV, xmmIH, xmm2/m64","VFNMADDSD xmm2/m64, xmmIH, xmmV, xmm1","vfnmaddsd xmm2/m64, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 7B /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDSD xmm1, xmmV, xmm2/m64, xmmIH","VFNMADDSD xmmIH, xmm2/m64, xmmV, xmm1","vfnmaddsd xmmIH, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 7B /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDSS xmm1, xmmV, xmmIH, xmm2/m32","VFNMADDSS xmm2/m32, xmmIH, xmmV, xmm1","vfnmaddss xmm2/m32, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 7A /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMADDSS xmm1, xmmV, xmm2/m32, xmmIH","VFNMADDSS xmmIH, xmm2/m32, xmmV, xmm1","vfnmaddss xmmIH, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 7A /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUB132PD xmm1, xmmV, xmm2/m128","VFNMSUB132PD xmm2/m128, xmmV, xmm1","vfnmsub132pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 9E /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMSUB132PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmsub132pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 9E /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMSUB132PD ymm1, ymmV, ymm2/m256","VFNMSUB132PD ymm2/m256, ymmV, ymm1","vfnmsub132pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 9E /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMSUB132PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmsub132pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 9E /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMSUB132PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB132PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub132pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 9E /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB132PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMSUB132PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmsub132pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 9E /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMSUB132PS xmm1, xmmV, xmm2/m128","VFNMSUB132PS xmm2/m128, xmmV, xmm1","vfnmsub132ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 9E /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMSUB132PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmsub132ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 9E /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMSUB132PS ymm1, ymmV, ymm2/m256","VFNMSUB132PS ymm2/m256, ymmV, ymm1","vfnmsub132ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 9E /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMSUB132PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmsub132ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 9E /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMSUB132PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB132PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub132ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 9E /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB132PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMSUB132PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmsub132ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 9E /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMSUB132SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB132SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub132sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 9F /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB132SD xmm1, xmmV, xmm2/m64","VFNMSUB132SD xmm2/m64, xmmV, xmm1","vfnmsub132sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 9F /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMSUB132SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmsub132sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 9F /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMSUB132SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB132SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub132ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 9F /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB132SS xmm1, xmmV, xmm2/m32","VFNMSUB132SS xmm2/m32, xmmV, xmm1","vfnmsub132ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 9F /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB132SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMSUB132SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmsub132ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 9F /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMSUB213PD xmm1, xmmV, xmm2/m128","VFNMSUB213PD xmm2/m128, xmmV, xmm1","vfnmsub213pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 AE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMSUB213PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmsub213pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 AE /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMSUB213PD ymm1, ymmV, ymm2/m256","VFNMSUB213PD ymm2/m256, ymmV, ymm1","vfnmsub213pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 AE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMSUB213PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmsub213pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 AE /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMSUB213PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB213PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub213pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 AE /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB213PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMSUB213PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmsub213pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 AE /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMSUB213PS xmm1, xmmV, xmm2/m128","VFNMSUB213PS xmm2/m128, xmmV, xmm1","vfnmsub213ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 AE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMSUB213PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmsub213ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 AE /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMSUB213PS ymm1, ymmV, ymm2/m256","VFNMSUB213PS ymm2/m256, ymmV, ymm1","vfnmsub213ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 AE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMSUB213PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmsub213ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 AE /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMSUB213PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB213PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub213ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 AE /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB213PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMSUB213PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmsub213ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 AE /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMSUB213SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB213SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub213sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 AF /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB213SD xmm1, xmmV, xmm2/m64","VFNMSUB213SD xmm2/m64, xmmV, xmm1","vfnmsub213sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 AF /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMSUB213SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmsub213sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 AF /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMSUB213SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB213SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub213ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 AF /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB213SS xmm1, xmmV, xmm2/m32","VFNMSUB213SS xmm2/m32, xmmV, xmm1","vfnmsub213ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 AF /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB213SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMSUB213SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmsub213ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 AF /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMSUB231PD xmm1, xmmV, xmm2/m128","VFNMSUB231PD xmm2/m128, xmmV, xmm1","vfnmsub231pd xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W1 BE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VFNMSUB231PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vfnmsub231pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 BE /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VFNMSUB231PD ymm1, ymmV, ymm2/m256","VFNMSUB231PD ymm2/m256, ymmV, ymm1","vfnmsub231pd ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W1 BE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VFNMSUB231PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vfnmsub231pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 BE /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VFNMSUB231PD zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB231PD zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub231pd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W1 BE /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB231PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VFNMSUB231PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vfnmsub231pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 BE /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VFNMSUB231PS xmm1, xmmV, xmm2/m128","VFNMSUB231PS xmm2/m128, xmmV, xmm1","vfnmsub231ps xmm2/m128, xmmV, xmm1","VEX.DDS.128.66.0F38.W0 BE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VFNMSUB231PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vfnmsub231ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 BE /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VFNMSUB231PS ymm1, ymmV, ymm2/m256","VFNMSUB231PS ymm2/m256, ymmV, ymm1","vfnmsub231ps ymm2/m256, ymmV, ymm1","VEX.DDS.256.66.0F38.W0 BE /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VFNMSUB231PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vfnmsub231ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 BE /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VFNMSUB231PS zmm1{er}, {k}{z}, zmmV, zmm2","VFNMSUB231PS zmm2, zmmV, {k}{z}, zmm1{er}","vfnmsub231ps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.DDS.512.66.0F38.W0 BE /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB231PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VFNMSUB231PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vfnmsub231ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 BE /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VFNMSUB231SD xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB231SD xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub231sd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W1 BF /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB231SD xmm1, xmmV, xmm2/m64","VFNMSUB231SD xmm2/m64, xmmV, xmm1","vfnmsub231sd xmm2/m64, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W1 BF /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231SD xmm1, {k}{z}, xmmV, xmm2/m64","VFNMSUB231SD xmm2/m64, xmmV, {k}{z}, xmm1","vfnmsub231sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W1 BF /r","V","V","AVX512F","scale8","rw,r,r,r","","" +"VFNMSUB231SS xmm1{er}, {k}{z}, xmmV, xmm2","VFNMSUB231SS xmm2, xmmV, {k}{z}, xmm1{er}","vfnmsub231ss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.DDS.128.66.0F38.W0 BF /r","V","V","AVX512F","modrm_regonly","rw,r,r,r","","" +"VFNMSUB231SS xmm1, xmmV, xmm2/m32","VFNMSUB231SS xmm2/m32, xmmV, xmm1","vfnmsub231ss xmm2/m32, xmmV, xmm1","VEX.DDS.LIG.66.0F38.W0 BF /r","V","V","FMA","","rw,r,r","","" +"VFNMSUB231SS xmm1, {k}{z}, xmmV, xmm2/m32","VFNMSUB231SS xmm2/m32, xmmV, {k}{z}, xmm1","vfnmsub231ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.DDS.LIG.66.0F38.W0 BF /r","V","V","AVX512F","scale4","rw,r,r,r","","" +"VFNMSUBPD xmm1, xmmV, xmmIH, xmm2/m128","VFNMSUBPD xmm2/m128, xmmIH, xmmV, xmm1","vfnmsubpd xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 7D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPD xmm1, xmmV, xmm2/m128, xmmIH","VFNMSUBPD xmmIH, xmm2/m128, xmmV, xmm1","vfnmsubpd xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 7D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPD ymm1, ymmV, ymmIH, ymm2/m256","VFNMSUBPD ymm2/m256, ymmIH, ymmV, ymm1","vfnmsubpd ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 7D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPD ymm1, ymmV, ymm2/m256, ymmIH","VFNMSUBPD ymmIH, ymm2/m256, ymmV, ymm1","vfnmsubpd ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 7D /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPS xmm1, xmmV, xmmIH, xmm2/m128","VFNMSUBPS xmm2/m128, xmmIH, xmmV, xmm1","vfnmsubps xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 7C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPS xmm1, xmmV, xmm2/m128, xmmIH","VFNMSUBPS xmmIH, xmm2/m128, xmmV, xmm1","vfnmsubps xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 7C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPS ymm1, ymmV, ymmIH, ymm2/m256","VFNMSUBPS ymm2/m256, ymmIH, ymmV, ymm1","vfnmsubps ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 7C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBPS ymm1, ymmV, ymm2/m256, ymmIH","VFNMSUBPS ymmIH, ymm2/m256, ymmV, ymm1","vfnmsubps ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 7C /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBSD xmm1, xmmV, xmmIH, xmm2/m64","VFNMSUBSD xmm2/m64, xmmIH, xmmV, xmm1","vfnmsubsd xmm2/m64, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 7F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBSD xmm1, xmmV, xmm2/m64, xmmIH","VFNMSUBSD xmmIH, xmm2/m64, xmmV, xmm1","vfnmsubsd xmmIH, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 7F /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBSS xmm1, xmmV, xmmIH, xmm2/m32","VFNMSUBSS xmm2/m32, xmmIH, xmmV, xmm1","vfnmsubss xmm2/m32, xmmIH, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W1 7E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFNMSUBSS xmm1, xmmV, xmm2/m32, xmmIH","VFNMSUBSS xmmIH, xmm2/m32, xmmV, xmm1","vfnmsubss xmmIH, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.W0 7E /r /is4","V","V","FMA4","amd","w,r,r,r","","" +"VFPCLASSPD k1, {k}, xmm2/m128/m64bcst, imm8u","VFPCLASSPDX imm8u, xmm2/m128/m64bcst, {k}, k1","vfpclasspdx imm8u, xmm2/m128/m64bcst, {k}, k1","EVEX.128.66.0F3A.W1 66 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","Y","128" +"VFPCLASSPD k1, {k}, ymm2/m256/m64bcst, imm8u","VFPCLASSPDY imm8u, ymm2/m256/m64bcst, {k}, k1","vfpclasspdy imm8u, ymm2/m256/m64bcst, {k}, k1","EVEX.256.66.0F3A.W1 66 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","Y","256" +"VFPCLASSPD k1, {k}, zmm2/m512/m64bcst, imm8u","VFPCLASSPDZ imm8u, zmm2/m512/m64bcst, {k}, k1","vfpclasspdz imm8u, zmm2/m512/m64bcst, {k}, k1","EVEX.512.66.0F3A.W1 66 /r ib","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","Y","512" +"VFPCLASSPS k1, {k}, xmm2/m128/m32bcst, imm8u","VFPCLASSPSX imm8u, xmm2/m128/m32bcst, {k}, k1","vfpclasspsx imm8u, xmm2/m128/m32bcst, {k}, k1","EVEX.128.66.0F3A.W0 66 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","Y","128" +"VFPCLASSPS k1, {k}, ymm2/m256/m32bcst, imm8u","VFPCLASSPSY imm8u, ymm2/m256/m32bcst, {k}, k1","vfpclasspsy imm8u, ymm2/m256/m32bcst, {k}, k1","EVEX.256.66.0F3A.W0 66 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","Y","256" +"VFPCLASSPS k1, {k}, zmm2/m512/m32bcst, imm8u","VFPCLASSPSZ imm8u, zmm2/m512/m32bcst, {k}, k1","vfpclasspsz imm8u, zmm2/m512/m32bcst, {k}, k1","EVEX.512.66.0F3A.W0 66 /r ib","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","Y","512" +"VFPCLASSSD k1, {k}, xmm2/m64, imm8u","VFPCLASSSD imm8u, xmm2/m64, {k}, k1","vfpclasssd imm8u, xmm2/m64, {k}, k1","EVEX.LIG.66.0F3A.W1 67 /r ib","V","V","AVX512DQ","scale8","w,r,r,r","","" +"VFPCLASSSS k1, {k}, xmm2/m32, imm8u","VFPCLASSSS imm8u, xmm2/m32, {k}, k1","vfpclassss imm8u, xmm2/m32, {k}, k1","EVEX.LIG.66.0F3A.W0 67 /r ib","V","V","AVX512DQ","scale4","w,r,r,r","","" +"VFRCZPD xmm1, xmm2/m128","VFRCZPD xmm2/m128, xmm1","vfrczpd xmm2/m128, xmm1","XOP.128.09.W0 81 /r","V","V","XOP","amd","w,r","","" +"VFRCZPD ymm1, ymm2/m256","VFRCZPD ymm2/m256, ymm1","vfrczpd ymm2/m256, ymm1","XOP.256.09.W0 81 /r","V","V","XOP","amd","w,r","","" +"VFRCZPS xmm1, xmm2/m128","VFRCZPS xmm2/m128, xmm1","vfrczps xmm2/m128, xmm1","XOP.128.09.W0 80 /r","V","V","XOP","amd","w,r","","" +"VFRCZPS ymm1, ymm2/m256","VFRCZPS ymm2/m256, ymm1","vfrczps ymm2/m256, ymm1","XOP.256.09.W0 80 /r","V","V","XOP","amd","w,r","","" +"VFRCZSD xmm1, xmm2/m64","VFRCZSD xmm2/m64, xmm1","vfrczsd xmm2/m64, xmm1","XOP.128.09.W0 83 /r","V","V","XOP","amd","w,r","","" +"VFRCZSS xmm1, xmm2/m32","VFRCZSS xmm2/m32, xmm1","vfrczss xmm2/m32, xmm1","XOP.128.09.W0 82 /r","V","V","XOP","amd","w,r","","" +"VGATHERDPD xmm1, {k1-k7}, vm32x","VGATHERDPD vm32x, {k1-k7}, xmm1","vgatherdpd vm32x, {k1-k7}, xmm1","EVEX.128.66.0F38.W1 92 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VGATHERDPD ymm1, {k1-k7}, vm32x","VGATHERDPD vm32x, {k1-k7}, ymm1","vgatherdpd vm32x, {k1-k7}, ymm1","EVEX.256.66.0F38.W1 92 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VGATHERDPD zmm1, {k1-k7}, vm32y","VGATHERDPD vm32y, {k1-k7}, zmm1","vgatherdpd vm32y, {k1-k7}, zmm1","EVEX.512.66.0F38.W1 92 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VGATHERDPD xmm1, vm32x, xmmV","VGATHERDPD xmmV, vm32x, xmm1","vgatherdpd xmmV, vm32x, xmm1","VEX.DDS.128.66.0F38.W1 92 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERDPD ymm1, vm32x, ymmV","VGATHERDPD ymmV, vm32x, ymm1","vgatherdpd ymmV, vm32x, ymm1","VEX.DDS.256.66.0F38.W1 92 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERDPS xmm1, {k1-k7}, vm32x","VGATHERDPS vm32x, {k1-k7}, xmm1","vgatherdps vm32x, {k1-k7}, xmm1","EVEX.128.66.0F38.W0 92 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VGATHERDPS ymm1, {k1-k7}, vm32y","VGATHERDPS vm32y, {k1-k7}, ymm1","vgatherdps vm32y, {k1-k7}, ymm1","EVEX.256.66.0F38.W0 92 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VGATHERDPS zmm1, {k1-k7}, vm32z","VGATHERDPS vm32z, {k1-k7}, zmm1","vgatherdps vm32z, {k1-k7}, zmm1","EVEX.512.66.0F38.W0 92 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VGATHERDPS xmm1, vm32x, xmmV","VGATHERDPS xmmV, vm32x, xmm1","vgatherdps xmmV, vm32x, xmm1","VEX.DDS.128.66.0F38.W0 92 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERDPS ymm1, vm32y, ymmV","VGATHERDPS ymmV, vm32y, ymm1","vgatherdps ymmV, vm32y, ymm1","VEX.DDS.256.66.0F38.W0 92 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERPF0DPD vm32y, {k1-k7}","VGATHERPF0DPD {k1-k7}, vm32y","vgatherpf0dpd {k1-k7}, vm32y","EVEX.512.66.0F38.W1 C6 /1","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VGATHERPF0DPS vm32z, {k1-k7}","VGATHERPF0DPS {k1-k7}, vm32z","vgatherpf0dps {k1-k7}, vm32z","EVEX.512.66.0F38.W0 C6 /1","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VGATHERPF0QPD vm64z, {k1-k7}","VGATHERPF0QPD {k1-k7}, vm64z","vgatherpf0qpd {k1-k7}, vm64z","EVEX.512.66.0F38.W1 C7 /1","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VGATHERPF0QPS vm64z, {k1-k7}","VGATHERPF0QPS {k1-k7}, vm64z","vgatherpf0qps {k1-k7}, vm64z","EVEX.512.66.0F38.W0 C7 /1","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VGATHERPF1DPD vm32y, {k1-k7}","VGATHERPF1DPD {k1-k7}, vm32y","vgatherpf1dpd {k1-k7}, vm32y","EVEX.512.66.0F38.W1 C6 /2","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VGATHERPF1DPS vm32z, {k1-k7}","VGATHERPF1DPS {k1-k7}, vm32z","vgatherpf1dps {k1-k7}, vm32z","EVEX.512.66.0F38.W0 C6 /2","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VGATHERPF1QPD vm64z, {k1-k7}","VGATHERPF1QPD {k1-k7}, vm64z","vgatherpf1qpd {k1-k7}, vm64z","EVEX.512.66.0F38.W1 C7 /2","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VGATHERPF1QPS vm64z, {k1-k7}","VGATHERPF1QPS {k1-k7}, vm64z","vgatherpf1qps {k1-k7}, vm64z","EVEX.512.66.0F38.W0 C7 /2","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VGATHERQPD xmm1, {k1-k7}, vm64x","VGATHERQPD vm64x, {k1-k7}, xmm1","vgatherqpd vm64x, {k1-k7}, xmm1","EVEX.128.66.0F38.W1 93 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VGATHERQPD ymm1, {k1-k7}, vm64y","VGATHERQPD vm64y, {k1-k7}, ymm1","vgatherqpd vm64y, {k1-k7}, ymm1","EVEX.256.66.0F38.W1 93 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VGATHERQPD zmm1, {k1-k7}, vm64z","VGATHERQPD vm64z, {k1-k7}, zmm1","vgatherqpd vm64z, {k1-k7}, zmm1","EVEX.512.66.0F38.W1 93 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VGATHERQPD xmm1, vm64x, xmmV","VGATHERQPD xmmV, vm64x, xmm1","vgatherqpd xmmV, vm64x, xmm1","VEX.DDS.128.66.0F38.W1 93 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERQPD ymm1, vm64y, ymmV","VGATHERQPD ymmV, vm64y, ymm1","vgatherqpd ymmV, vm64y, ymm1","VEX.DDS.256.66.0F38.W1 93 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERQPS xmm1, {k1-k7}, vm64x","VGATHERQPS vm64x, {k1-k7}, xmm1","vgatherqps vm64x, {k1-k7}, xmm1","EVEX.128.66.0F38.W0 93 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VGATHERQPS xmm1, {k1-k7}, vm64y","VGATHERQPS vm64y, {k1-k7}, xmm1","vgatherqps vm64y, {k1-k7}, xmm1","EVEX.256.66.0F38.W0 93 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VGATHERQPS ymm1, {k1-k7}, vm64z","VGATHERQPS vm64z, {k1-k7}, ymm1","vgatherqps vm64z, {k1-k7}, ymm1","EVEX.512.66.0F38.W0 93 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VGATHERQPS xmm1, vm64x, xmmV","VGATHERQPS xmmV, vm64x, xmm1","vgatherqps xmmV, vm64x, xmm1","VEX.DDS.128.66.0F38.W0 93 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGATHERQPS xmm1, vm64y, xmmV","VGATHERQPS xmmV, vm64y, xmm1","vgatherqps xmmV, vm64y, xmm1","VEX.DDS.256.66.0F38.W0 93 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VGETEXPPD xmm1, {k}{z}, xmm2/m128/m64bcst","VGETEXPPD xmm2/m128/m64bcst, {k}{z}, xmm1","vgetexppd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 42 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","","" +"VGETEXPPD ymm1, {k}{z}, ymm2/m256/m64bcst","VGETEXPPD ymm2/m256/m64bcst, {k}{z}, ymm1","vgetexppd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 42 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","","" +"VGETEXPPD zmm1{sae}, {k}{z}, zmm2","VGETEXPPD zmm2, {k}{z}, zmm1{sae}","vgetexppd zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W1 42 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VGETEXPPD zmm1, {k}{z}, zmm2/m512/m64bcst","VGETEXPPD zmm2/m512/m64bcst, {k}{z}, zmm1","vgetexppd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 42 /r","V","V","AVX512F","bscale8,scale64","w,r,r","","" +"VGETEXPPS xmm1, {k}{z}, xmm2/m128/m32bcst","VGETEXPPS xmm2/m128/m32bcst, {k}{z}, xmm1","vgetexpps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 42 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VGETEXPPS ymm1, {k}{z}, ymm2/m256/m32bcst","VGETEXPPS ymm2/m256/m32bcst, {k}{z}, ymm1","vgetexpps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 42 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VGETEXPPS zmm1{sae}, {k}{z}, zmm2","VGETEXPPS zmm2, {k}{z}, zmm1{sae}","vgetexpps zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W0 42 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VGETEXPPS zmm1, {k}{z}, zmm2/m512/m32bcst","VGETEXPPS zmm2/m512/m32bcst, {k}{z}, zmm1","vgetexpps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 42 /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VGETEXPSD xmm1{sae}, {k}{z}, xmmV, xmm2","VGETEXPSD xmm2, xmmV, {k}{z}, xmm1{sae}","vgetexpsd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W1 43 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VGETEXPSD xmm1, {k}{z}, xmmV, xmm2/m64","VGETEXPSD xmm2/m64, xmmV, {k}{z}, xmm1","vgetexpsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 43 /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VGETEXPSS xmm1{sae}, {k}{z}, xmmV, xmm2","VGETEXPSS xmm2, xmmV, {k}{z}, xmm1{sae}","vgetexpss xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W0 43 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VGETEXPSS xmm1, {k}{z}, xmmV, xmm2/m32","VGETEXPSS xmm2/m32, xmmV, {k}{z}, xmm1","vgetexpss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 43 /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VGETMANTPD xmm1, {k}{z}, xmm2/m128/m64bcst, imm8u:4","VGETMANTPD imm8u:4, xmm2/m128/m64bcst, {k}{z}, xmm1","vgetmantpd imm8u:4, xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W1 26 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VGETMANTPD ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u:4","VGETMANTPD imm8u:4, ymm2/m256/m64bcst, {k}{z}, ymm1","vgetmantpd imm8u:4, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 26 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VGETMANTPD zmm1{sae}, {k}{z}, zmm2, imm8u:4","VGETMANTPD imm8u:4, zmm2, {k}{z}, zmm1{sae}","vgetmantpd imm8u:4, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W1 26 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VGETMANTPD zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u:4","VGETMANTPD imm8u:4, zmm2/m512/m64bcst, {k}{z}, zmm1","vgetmantpd imm8u:4, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 26 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VGETMANTPS xmm1, {k}{z}, xmm2/m128/m32bcst, imm8u:4","VGETMANTPS imm8u:4, xmm2/m128/m32bcst, {k}{z}, xmm1","vgetmantps imm8u:4, xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W0 26 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VGETMANTPS ymm1, {k}{z}, ymm2/m256/m32bcst, imm8u:4","VGETMANTPS imm8u:4, ymm2/m256/m32bcst, {k}{z}, ymm1","vgetmantps imm8u:4, ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W0 26 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VGETMANTPS zmm1{sae}, {k}{z}, zmm2, imm8u:4","VGETMANTPS imm8u:4, zmm2, {k}{z}, zmm1{sae}","vgetmantps imm8u:4, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W0 26 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VGETMANTPS zmm1, {k}{z}, zmm2/m512/m32bcst, imm8u:4","VGETMANTPS imm8u:4, zmm2/m512/m32bcst, {k}{z}, zmm1","vgetmantps imm8u:4, zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W0 26 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VGETMANTSD xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u:4","VGETMANTSD imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","vgetmantsd imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W1 27 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VGETMANTSD xmm1, {k}{z}, xmmV, xmm2/m64, imm8u:4","VGETMANTSD imm8u:4, xmm2/m64, xmmV, {k}{z}, xmm1","vgetmantsd imm8u:4, xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W1 27 /r ib","V","V","AVX512F","scale8","w,r,r,r,r","","" +"VGETMANTSS xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u:4","VGETMANTSS imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","vgetmantss imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W0 27 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VGETMANTSS xmm1, {k}{z}, xmmV, xmm2/m32, imm8u:4","VGETMANTSS imm8u:4, xmm2/m32, xmmV, {k}{z}, xmm1","vgetmantss imm8u:4, xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W0 27 /r ib","V","V","AVX512F","scale4","w,r,r,r,r","","" +"VGF2P8AFFINEINVQB xmm1, xmmV, xmm2/m128, imm8u","VGF2P8AFFINEINVQB imm8u, xmm2/m128, xmmV, xmm1","vgf2p8affineinvqb imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 CF /r ib","V","V","GFNI+AVX","","w,r,r,r","","" +"VGF2P8AFFINEINVQB xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VGF2P8AFFINEINVQB imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vgf2p8affineinvqb imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 CF /r ib","V","V","GFNI+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VGF2P8AFFINEINVQB ymm1, ymmV, ymm2/m256, imm8u","VGF2P8AFFINEINVQB imm8u, ymm2/m256, ymmV, ymm1","vgf2p8affineinvqb imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 CF /r ib","V","V","GFNI+AVX","","w,r,r,r","","" +"VGF2P8AFFINEINVQB ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VGF2P8AFFINEINVQB imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vgf2p8affineinvqb imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 CF /r ib","V","V","GFNI+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VGF2P8AFFINEINVQB zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VGF2P8AFFINEINVQB imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vgf2p8affineinvqb imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 CF /r ib","V","V","GFNI+AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VGF2P8AFFINEQB xmm1, xmmV, xmm2/m128, imm8u","VGF2P8AFFINEQB imm8u, xmm2/m128, xmmV, xmm1","vgf2p8affineqb imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 CE /r ib","V","V","GFNI+AVX","","w,r,r,r","","" +"VGF2P8AFFINEQB xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VGF2P8AFFINEQB imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vgf2p8affineqb imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 CE /r ib","V","V","GFNI+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VGF2P8AFFINEQB ymm1, ymmV, ymm2/m256, imm8u","VGF2P8AFFINEQB imm8u, ymm2/m256, ymmV, ymm1","vgf2p8affineqb imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 CE /r ib","V","V","GFNI+AVX","","w,r,r,r","","" +"VGF2P8AFFINEQB ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VGF2P8AFFINEQB imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vgf2p8affineqb imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 CE /r ib","V","V","GFNI+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VGF2P8AFFINEQB zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VGF2P8AFFINEQB imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vgf2p8affineqb imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 CE /r ib","V","V","GFNI+AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VGF2P8MULB xmm1, xmmV, xmm2/m128","VGF2P8MULB xmm2/m128, xmmV, xmm1","vgf2p8mulb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 CF /r","V","V","GFNI+AVX","","w,r,r","","" +"VGF2P8MULB xmm1, {k}{z}, xmmV, xmm2/m128","VGF2P8MULB xmm2/m128, xmmV, {k}{z}, xmm1","vgf2p8mulb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 CF /r","V","V","GFNI+AVX512VL","scale16","w,r,r,r","","" +"VGF2P8MULB ymm1, ymmV, ymm2/m256","VGF2P8MULB ymm2/m256, ymmV, ymm1","vgf2p8mulb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 CF /r","V","V","GFNI+AVX","","w,r,r","","" +"VGF2P8MULB ymm1, {k}{z}, ymmV, ymm2/m256","VGF2P8MULB ymm2/m256, ymmV, {k}{z}, ymm1","vgf2p8mulb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 CF /r","V","V","GFNI+AVX512VL","scale32","w,r,r,r","","" +"VGF2P8MULB zmm1, {k}{z}, zmmV, zmm2/m512","VGF2P8MULB zmm2/m512, zmmV, {k}{z}, zmm1","vgf2p8mulb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 CF /r","V","V","GFNI+AVX512F","scale64","w,r,r,r","","" +"VHADDPD xmm1, xmmV, xmm2/m128","VHADDPD xmm2/m128, xmmV, xmm1","vhaddpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 7C /r","V","V","AVX","","w,r,r","","" +"VHADDPD ymm1, ymmV, ymm2/m256","VHADDPD ymm2/m256, ymmV, ymm1","vhaddpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 7C /r","V","V","AVX","","w,r,r","","" +"VHADDPS xmm1, xmmV, xmm2/m128","VHADDPS xmm2/m128, xmmV, xmm1","vhaddps xmm2/m128, xmmV, xmm1","VEX.NDS.128.F2.0F.WIG 7C /r","V","V","AVX","","w,r,r","","" +"VHADDPS ymm1, ymmV, ymm2/m256","VHADDPS ymm2/m256, ymmV, ymm1","vhaddps ymm2/m256, ymmV, ymm1","VEX.NDS.256.F2.0F.WIG 7C /r","V","V","AVX","","w,r,r","","" +"VHSUBPD xmm1, xmmV, xmm2/m128","VHSUBPD xmm2/m128, xmmV, xmm1","vhsubpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 7D /r","V","V","AVX","","w,r,r","","" +"VHSUBPD ymm1, ymmV, ymm2/m256","VHSUBPD ymm2/m256, ymmV, ymm1","vhsubpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 7D /r","V","V","AVX","","w,r,r","","" +"VHSUBPS xmm1, xmmV, xmm2/m128","VHSUBPS xmm2/m128, xmmV, xmm1","vhsubps xmm2/m128, xmmV, xmm1","VEX.NDS.128.F2.0F.WIG 7D /r","V","V","AVX","","w,r,r","","" +"VHSUBPS ymm1, ymmV, ymm2/m256","VHSUBPS ymm2/m256, ymmV, ymm1","vhsubps ymm2/m256, ymmV, ymm1","VEX.NDS.256.F2.0F.WIG 7D /r","V","V","AVX","","w,r,r","","" +"VINSERTF128 ymm1, ymmV, xmm2/m128, imm8u:1","VINSERTF128 imm8u:1, xmm2/m128, ymmV, ymm1","vinsertf128 imm8u:1, xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 18 /r ib","V","V","AVX","","w,r,r,r","","" +"VINSERTF32X4 ymm1, {k}{z}, ymmV, xmm2/m128, imm8u:1","VINSERTF32X4 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","vinsertf32x4 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 18 /r ib","V","V","AVX512F+AVX512VL","scale16","w,r,r,r,r","","" +"VINSERTF32X4 zmm1, {k}{z}, zmmV, xmm2/m128, imm8u:2","VINSERTF32X4 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","vinsertf32x4 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 18 /r ib","V","V","AVX512F","scale16","w,r,r,r,r","","" +"VINSERTF32X8 zmm1, {k}{z}, zmmV, ymm2/m256, imm8u:1","VINSERTF32X8 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","vinsertf32x8 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 1A /r ib","V","V","AVX512DQ","scale32","w,r,r,r,r","","" +"VINSERTF64X2 ymm1, {k}{z}, ymmV, xmm2/m128, imm8u:1","VINSERTF64X2 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","vinsertf64x2 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 18 /r ib","V","V","AVX512DQ+AVX512VL","scale16","w,r,r,r,r","","" +"VINSERTF64X2 zmm1, {k}{z}, zmmV, xmm2/m128, imm8u:2","VINSERTF64X2 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","vinsertf64x2 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 18 /r ib","V","V","AVX512DQ","scale16","w,r,r,r,r","","" +"VINSERTF64X4 zmm1, {k}{z}, zmmV, ymm2/m256, imm8u:1","VINSERTF64X4 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","vinsertf64x4 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 1A /r ib","V","V","AVX512F","scale32","w,r,r,r,r","","" +"VINSERTI128 ymm1, ymmV, xmm2/m128, imm8u:1","VINSERTI128 imm8u:1, xmm2/m128, ymmV, ymm1","vinserti128 imm8u:1, xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 38 /r ib","V","V","AVX2","","w,r,r,r","","" +"VINSERTI32X4 ymm1, {k}{z}, ymmV, xmm2/m128, imm8u:1","VINSERTI32X4 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","vinserti32x4 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 38 /r ib","V","V","AVX512F+AVX512VL","scale16","w,r,r,r,r","","" +"VINSERTI32X4 zmm1, {k}{z}, zmmV, xmm2/m128, imm8u:2","VINSERTI32X4 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","vinserti32x4 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 38 /r ib","V","V","AVX512F","scale16","w,r,r,r,r","","" +"VINSERTI32X8 zmm1, {k}{z}, zmmV, ymm2/m256, imm8u:1","VINSERTI32X8 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","vinserti32x8 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 3A /r ib","V","V","AVX512DQ","scale32","w,r,r,r,r","","" +"VINSERTI64X2 ymm1, {k}{z}, ymmV, xmm2/m128, imm8u:1","VINSERTI64X2 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","vinserti64x2 imm8u:1, xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 38 /r ib","V","V","AVX512DQ+AVX512VL","scale16","w,r,r,r,r","","" +"VINSERTI64X2 zmm1, {k}{z}, zmmV, xmm2/m128, imm8u:2","VINSERTI64X2 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","vinserti64x2 imm8u:2, xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 38 /r ib","V","V","AVX512DQ","scale16","w,r,r,r,r","","" +"VINSERTI64X4 zmm1, {k}{z}, zmmV, ymm2/m256, imm8u:1","VINSERTI64X4 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","vinserti64x4 imm8u:1, ymm2/m256, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 3A /r ib","V","V","AVX512F","scale32","w,r,r,r,r","","" +"VINSERTPS xmm1, xmmV, xmm2/m32, imm8u","VINSERTPS imm8u, xmm2/m32, xmmV, xmm1","vinsertps imm8u, xmm2/m32, xmmV, xmm1","EVEX.NDS.128.66.0F3A.W0 21 /r ib","V","V","AVX512F+AVX512VL","scale4","w,r,r,r","","" +"VINSERTPS xmm1, xmmV, xmm2/m32, imm8u","VINSERTPS imm8u, xmm2/m32, xmmV, xmm1","vinsertps imm8u, xmm2/m32, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 21 /r ib","V","V","AVX","","w,r,r,r","","" +"VLDDQU xmm1, m128","VLDDQU m128, xmm1","vlddqu m128, xmm1","VEX.128.F2.0F.WIG F0 /r","V","V","AVX","modrm_memonly","w,r","","" +"VLDDQU ymm1, m256","VLDDQU m256, ymm1","vlddqu m256, ymm1","VEX.256.F2.0F.WIG F0 /r","V","V","AVX","modrm_memonly","w,r","","" +"VLDMXCSR m32","VLDMXCSR m32","vldmxcsr m32","VEX.128.0F.WIG AE /2","V","V","AVX","modrm_memonly","r","","" +"VMASKMOVDQU xmm1, xmm2","VMASKMOVDQU xmm2, xmm1","vmaskmovdqu xmm2, xmm1","VEX.128.66.0F.WIG F7 /r","V","V","AVX","modrm_regonly","r,r","","" +"VMASKMOVPD xmm1, xmmV, m128","VMASKMOVPD m128, xmmV, xmm1","vmaskmovpd m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 2D /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPD ymm1, ymmV, m256","VMASKMOVPD m256, ymmV, ymm1","vmaskmovpd m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 2D /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPD m128, xmmV, xmm1","VMASKMOVPD xmm1, xmmV, m128","vmaskmovpd xmm1, xmmV, m128","VEX.NDS.128.66.0F38.W0 2F /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPD m256, ymmV, ymm1","VMASKMOVPD ymm1, ymmV, m256","vmaskmovpd ymm1, ymmV, m256","VEX.NDS.256.66.0F38.W0 2F /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPS xmm1, xmmV, m128","VMASKMOVPS m128, xmmV, xmm1","vmaskmovps m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 2C /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPS ymm1, ymmV, m256","VMASKMOVPS m256, ymmV, ymm1","vmaskmovps m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 2C /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPS m128, xmmV, xmm1","VMASKMOVPS xmm1, xmmV, m128","vmaskmovps xmm1, xmmV, m128","VEX.NDS.128.66.0F38.W0 2E /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMASKMOVPS m256, ymmV, ymm1","VMASKMOVPS ymm1, ymmV, m256","vmaskmovps ymm1, ymmV, m256","VEX.NDS.256.66.0F38.W0 2E /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMAXPD xmm1, xmmV, xmm2/m128","VMAXPD xmm2/m128, xmmV, xmm1","vmaxpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VMAXPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vmaxpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 5F /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VMAXPD ymm1, ymmV, ymm2/m256","VMAXPD ymm2/m256, ymmV, ymm1","vmaxpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VMAXPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vmaxpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 5F /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VMAXPD zmm1{sae}, {k}{z}, zmmV, zmm2","VMAXPD zmm2, zmmV, {k}{z}, zmm1{sae}","vmaxpd zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.66.0F.W1 5F /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMAXPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VMAXPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vmaxpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 5F /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VMAXPS xmm1, xmmV, xmm2/m128","VMAXPS xmm2/m128, xmmV, xmm1","vmaxps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VMAXPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vmaxps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 5F /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VMAXPS ymm1, ymmV, ymm2/m256","VMAXPS ymm2/m256, ymmV, ymm1","vmaxps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VMAXPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vmaxps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 5F /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VMAXPS zmm1{sae}, {k}{z}, zmmV, zmm2","VMAXPS zmm2, zmmV, {k}{z}, zmm1{sae}","vmaxps zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.0F.W0 5F /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMAXPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VMAXPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vmaxps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 5F /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VMAXSD xmm1{sae}, {k}{z}, xmmV, xmm2","VMAXSD xmm2, xmmV, {k}{z}, xmm1{sae}","vmaxsd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.F2.0F.W1 5F /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMAXSD xmm1, xmmV, xmm2/m64","VMAXSD xmm2/m64, xmmV, xmm1","vmaxsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXSD xmm1, {k}{z}, xmmV, xmm2/m64","VMAXSD xmm2/m64, xmmV, {k}{z}, xmm1","vmaxsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 5F /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VMAXSS xmm1{sae}, {k}{z}, xmmV, xmm2","VMAXSS xmm2, xmmV, {k}{z}, xmm1{sae}","vmaxss xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.F3.0F.W0 5F /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMAXSS xmm1, xmmV, xmm2/m32","VMAXSS xmm2/m32, xmmV, xmm1","vmaxss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 5F /r","V","V","AVX","","w,r,r","","" +"VMAXSS xmm1, {k}{z}, xmmV, xmm2/m32","VMAXSS xmm2/m32, xmmV, {k}{z}, xmm1","vmaxss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 5F /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VMCALL","VMCALL","vmcall","0F 01 C1","V","V","VTX","","","","" +"VMCLEAR m64","VMCLEAR m64","vmclear m64","66 0F C7 /6","V","V","VTX","modrm_memonly","r","","" +"VMFUNC","VMFUNC","vmfunc","0F 01 D4","V","V","","","","","" +"VMINPD xmm1, xmmV, xmm2/m128","VMINPD xmm2/m128, xmmV, xmm1","vminpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VMINPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vminpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 5D /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VMINPD ymm1, ymmV, ymm2/m256","VMINPD ymm2/m256, ymmV, ymm1","vminpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VMINPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vminpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 5D /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VMINPD zmm1{sae}, {k}{z}, zmmV, zmm2","VMINPD zmm2, zmmV, {k}{z}, zmm1{sae}","vminpd zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.66.0F.W1 5D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMINPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VMINPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vminpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 5D /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VMINPS xmm1, xmmV, xmm2/m128","VMINPS xmm2/m128, xmmV, xmm1","vminps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VMINPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vminps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 5D /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VMINPS ymm1, ymmV, ymm2/m256","VMINPS ymm2/m256, ymmV, ymm1","vminps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VMINPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vminps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 5D /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VMINPS zmm1{sae}, {k}{z}, zmmV, zmm2","VMINPS zmm2, zmmV, {k}{z}, zmm1{sae}","vminps zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.0F.W0 5D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMINPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VMINPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vminps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 5D /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VMINSD xmm1{sae}, {k}{z}, xmmV, xmm2","VMINSD xmm2, xmmV, {k}{z}, xmm1{sae}","vminsd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.F2.0F.W1 5D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMINSD xmm1, xmmV, xmm2/m64","VMINSD xmm2/m64, xmmV, xmm1","vminsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINSD xmm1, {k}{z}, xmmV, xmm2/m64","VMINSD xmm2/m64, xmmV, {k}{z}, xmm1","vminsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 5D /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VMINSS xmm1{sae}, {k}{z}, xmmV, xmm2","VMINSS xmm2, xmmV, {k}{z}, xmm1{sae}","vminss xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.F3.0F.W0 5D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMINSS xmm1, xmmV, xmm2/m32","VMINSS xmm2/m32, xmmV, xmm1","vminss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 5D /r","V","V","AVX","","w,r,r","","" +"VMINSS xmm1, {k}{z}, xmmV, xmm2/m32","VMINSS xmm2/m32, xmmV, {k}{z}, xmm1","vminss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 5D /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VMLAUNCH","VMLAUNCH","vmlaunch","0F 01 C2","V","V","VTX","","","","" +"VMLOAD EAX","VMLOADL EAX","vmloadl EAX","0F 01 DA","V","V","SVM","amd,modrm_regonly,operand32","r","Y","32" +"VMLOAD RAX","VMLOADQ RAX","vmloadq RAX","REX.W 0F 01 DA","N.S.","V","SVM","amd,modrm_regonly","r","Y","64" +"VMLOAD AX","VMLOADW AX","vmloadw AX","0F 01 DA","V","V","SVM","amd,modrm_regonly,operand16","r","Y","16" +"VMMCALL","VMMCALL","vmmcall","0F 01 D9","V","V","SVM","amd","","","" +"VMOVAPD xmm2/m128, xmm1","VMOVAPD xmm1, xmm2/m128","vmovapd xmm1, xmm2/m128","VEX.128.66.0F.WIG 29 /r","V","V","AVX","","w,r","","" +"VMOVAPD xmm2/m128, {k}{z}, xmm1","VMOVAPD xmm1, {k}{z}, xmm2/m128","vmovapd xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F.W1 29 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVAPD xmm1, xmm2/m128","VMOVAPD xmm2/m128, xmm1","vmovapd xmm2/m128, xmm1","VEX.128.66.0F.WIG 28 /r","V","V","AVX","","w,r","","" +"VMOVAPD xmm1, {k}{z}, xmm2/m128","VMOVAPD xmm2/m128, {k}{z}, xmm1","vmovapd xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F.W1 28 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVAPD ymm2/m256, ymm1","VMOVAPD ymm1, ymm2/m256","vmovapd ymm1, ymm2/m256","VEX.256.66.0F.WIG 29 /r","V","V","AVX","","w,r","","" +"VMOVAPD ymm2/m256, {k}{z}, ymm1","VMOVAPD ymm1, {k}{z}, ymm2/m256","vmovapd ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F.W1 29 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVAPD ymm1, ymm2/m256","VMOVAPD ymm2/m256, ymm1","vmovapd ymm2/m256, ymm1","VEX.256.66.0F.WIG 28 /r","V","V","AVX","","w,r","","" +"VMOVAPD ymm1, {k}{z}, ymm2/m256","VMOVAPD ymm2/m256, {k}{z}, ymm1","vmovapd ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F.W1 28 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVAPD zmm2/m512, {k}{z}, zmm1","VMOVAPD zmm1, {k}{z}, zmm2/m512","vmovapd zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F.W1 29 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVAPD zmm1, {k}{z}, zmm2/m512","VMOVAPD zmm2/m512, {k}{z}, zmm1","vmovapd zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F.W1 28 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVAPS xmm2/m128, xmm1","VMOVAPS xmm1, xmm2/m128","vmovaps xmm1, xmm2/m128","VEX.128.0F.WIG 29 /r","V","V","AVX","","w,r","","" +"VMOVAPS xmm2/m128, {k}{z}, xmm1","VMOVAPS xmm1, {k}{z}, xmm2/m128","vmovaps xmm1, {k}{z}, xmm2/m128","EVEX.128.0F.W0 29 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVAPS xmm1, xmm2/m128","VMOVAPS xmm2/m128, xmm1","vmovaps xmm2/m128, xmm1","VEX.128.0F.WIG 28 /r","V","V","AVX","","w,r","","" +"VMOVAPS xmm1, {k}{z}, xmm2/m128","VMOVAPS xmm2/m128, {k}{z}, xmm1","vmovaps xmm2/m128, {k}{z}, xmm1","EVEX.128.0F.W0 28 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVAPS ymm2/m256, ymm1","VMOVAPS ymm1, ymm2/m256","vmovaps ymm1, ymm2/m256","VEX.256.0F.WIG 29 /r","V","V","AVX","","w,r","","" +"VMOVAPS ymm2/m256, {k}{z}, ymm1","VMOVAPS ymm1, {k}{z}, ymm2/m256","vmovaps ymm1, {k}{z}, ymm2/m256","EVEX.256.0F.W0 29 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVAPS ymm1, ymm2/m256","VMOVAPS ymm2/m256, ymm1","vmovaps ymm2/m256, ymm1","VEX.256.0F.WIG 28 /r","V","V","AVX","","w,r","","" +"VMOVAPS ymm1, {k}{z}, ymm2/m256","VMOVAPS ymm2/m256, {k}{z}, ymm1","vmovaps ymm2/m256, {k}{z}, ymm1","EVEX.256.0F.W0 28 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVAPS zmm2/m512, {k}{z}, zmm1","VMOVAPS zmm1, {k}{z}, zmm2/m512","vmovaps zmm1, {k}{z}, zmm2/m512","EVEX.512.0F.W0 29 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVAPS zmm1, {k}{z}, zmm2/m512","VMOVAPS zmm2/m512, {k}{z}, zmm1","vmovaps zmm2/m512, {k}{z}, zmm1","EVEX.512.0F.W0 28 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVD xmm1, r/m32","VMOVD r/m32, xmm1","vmovd r/m32, xmm1","EVEX.128.66.0F.W0 6E /r","V","V","AVX512F+AVX512VL","scale4","w,r","","" +"VMOVD xmm1, r/m32","VMOVD r/m32, xmm1","vmovd r/m32, xmm1","VEX.128.66.0F.W0 6E /r","V","V","AVX","","w,r","","" +"VMOVD r/m32, xmm1","VMOVD xmm1, r/m32","vmovd xmm1, r/m32","EVEX.128.66.0F.W0 7E /r","V","V","AVX512F+AVX512VL","scale4","w,r","","" +"VMOVD r/m32, xmm1","VMOVD xmm1, r/m32","vmovd xmm1, r/m32","VEX.128.66.0F.W0 7E /r","V","V","AVX","","w,r","","" +"VMOVDDUP xmm1, xmm2/m64","VMOVDDUP xmm2/m64, xmm1","vmovddup xmm2/m64, xmm1","VEX.128.F2.0F.WIG 12 /r","V","V","AVX","","w,r","","" +"VMOVDDUP xmm1, {k}{z}, xmm2/m64","VMOVDDUP xmm2/m64, {k}{z}, xmm1","vmovddup xmm2/m64, {k}{z}, xmm1","EVEX.128.F2.0F.W1 12 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VMOVDDUP ymm1, ymm2/m256","VMOVDDUP ymm2/m256, ymm1","vmovddup ymm2/m256, ymm1","VEX.256.F2.0F.WIG 12 /r","V","V","AVX","","w,r","","" +"VMOVDDUP ymm1, {k}{z}, ymm2/m256","VMOVDDUP ymm2/m256, {k}{z}, ymm1","vmovddup ymm2/m256, {k}{z}, ymm1","EVEX.256.F2.0F.W1 12 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVDDUP zmm1, {k}{z}, zmm2/m512","VMOVDDUP zmm2/m512, {k}{z}, zmm1","vmovddup zmm2/m512, {k}{z}, zmm1","EVEX.512.F2.0F.W1 12 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVDQA xmm2/m128, xmm1","VMOVDQA xmm1, xmm2/m128","vmovdqa xmm1, xmm2/m128","VEX.128.66.0F.WIG 7F /r","V","V","AVX","","w,r","","" +"VMOVDQA xmm1, xmm2/m128","VMOVDQA xmm2/m128, xmm1","vmovdqa xmm2/m128, xmm1","VEX.128.66.0F.WIG 6F /r","V","V","AVX","","w,r","","" +"VMOVDQA ymm2/m256, ymm1","VMOVDQA ymm1, ymm2/m256","vmovdqa ymm1, ymm2/m256","VEX.256.66.0F.WIG 7F /r","V","V","AVX","","w,r","","" +"VMOVDQA ymm1, ymm2/m256","VMOVDQA ymm2/m256, ymm1","vmovdqa ymm2/m256, ymm1","VEX.256.66.0F.WIG 6F /r","V","V","AVX","","w,r","","" +"VMOVDQA32 xmm2/m128, {k}{z}, xmm1","VMOVDQA32 xmm1, {k}{z}, xmm2/m128","vmovdqa32 xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F.W0 7F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVDQA32 xmm1, {k}{z}, xmm2/m128","VMOVDQA32 xmm2/m128, {k}{z}, xmm1","vmovdqa32 xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F.W0 6F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVDQA32 ymm2/m256, {k}{z}, ymm1","VMOVDQA32 ymm1, {k}{z}, ymm2/m256","vmovdqa32 ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F.W0 7F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVDQA32 ymm1, {k}{z}, ymm2/m256","VMOVDQA32 ymm2/m256, {k}{z}, ymm1","vmovdqa32 ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F.W0 6F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVDQA32 zmm2/m512, {k}{z}, zmm1","VMOVDQA32 zmm1, {k}{z}, zmm2/m512","vmovdqa32 zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F.W0 7F /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVDQA32 zmm1, {k}{z}, zmm2/m512","VMOVDQA32 zmm2/m512, {k}{z}, zmm1","vmovdqa32 zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F.W0 6F /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVDQA64 xmm2/m128, {k}{z}, xmm1","VMOVDQA64 xmm1, {k}{z}, xmm2/m128","vmovdqa64 xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F.W1 7F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQA64 xmm1, {k}{z}, xmm2/m128","VMOVDQA64 xmm2/m128, {k}{z}, xmm1","vmovdqa64 xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F.W1 6F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQA64 ymm2/m256, {k}{z}, ymm1","VMOVDQA64 ymm1, {k}{z}, ymm2/m256","vmovdqa64 ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F.W1 7F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQA64 ymm1, {k}{z}, ymm2/m256","VMOVDQA64 ymm2/m256, {k}{z}, ymm1","vmovdqa64 ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F.W1 6F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQA64 zmm2/m512, {k}{z}, zmm1","VMOVDQA64 zmm1, {k}{z}, zmm2/m512","vmovdqa64 zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F.W1 7F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQA64 zmm1, {k}{z}, zmm2/m512","VMOVDQA64 zmm2/m512, {k}{z}, zmm1","vmovdqa64 zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F.W1 6F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQU xmm2/m128, xmm1","VMOVDQU xmm1, xmm2/m128","vmovdqu xmm1, xmm2/m128","VEX.128.F3.0F.WIG 7F /r","V","V","AVX","","w,r","","" +"VMOVDQU xmm1, xmm2/m128","VMOVDQU xmm2/m128, xmm1","vmovdqu xmm2/m128, xmm1","VEX.128.F3.0F.WIG 6F /r","V","V","AVX","","w,r","","" +"VMOVDQU ymm2/m256, ymm1","VMOVDQU ymm1, ymm2/m256","vmovdqu ymm1, ymm2/m256","VEX.256.F3.0F.WIG 7F /r","V","V","AVX","","w,r","","" +"VMOVDQU ymm1, ymm2/m256","VMOVDQU ymm2/m256, ymm1","vmovdqu ymm2/m256, ymm1","VEX.256.F3.0F.WIG 6F /r","V","V","AVX","","w,r","","" +"VMOVDQU16 xmm2/m128, {k}{z}, xmm1","VMOVDQU16 xmm1, {k}{z}, xmm2/m128","vmovdqu16 xmm1, {k}{z}, xmm2/m128","EVEX.128.F2.0F.W1 7F /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VMOVDQU16 xmm1, {k}{z}, xmm2/m128","VMOVDQU16 xmm2/m128, {k}{z}, xmm1","vmovdqu16 xmm2/m128, {k}{z}, xmm1","EVEX.128.F2.0F.W1 6F /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VMOVDQU16 ymm2/m256, {k}{z}, ymm1","VMOVDQU16 ymm1, {k}{z}, ymm2/m256","vmovdqu16 ymm1, {k}{z}, ymm2/m256","EVEX.256.F2.0F.W1 7F /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VMOVDQU16 ymm1, {k}{z}, ymm2/m256","VMOVDQU16 ymm2/m256, {k}{z}, ymm1","vmovdqu16 ymm2/m256, {k}{z}, ymm1","EVEX.256.F2.0F.W1 6F /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VMOVDQU16 zmm2/m512, {k}{z}, zmm1","VMOVDQU16 zmm1, {k}{z}, zmm2/m512","vmovdqu16 zmm1, {k}{z}, zmm2/m512","EVEX.512.F2.0F.W1 7F /r","V","V","AVX512BW","scale64","w,r,r","","" +"VMOVDQU16 zmm1, {k}{z}, zmm2/m512","VMOVDQU16 zmm2/m512, {k}{z}, zmm1","vmovdqu16 zmm2/m512, {k}{z}, zmm1","EVEX.512.F2.0F.W1 6F /r","V","V","AVX512BW","scale64","w,r,r","","" +"VMOVDQU32 xmm2/m128, {k}{z}, xmm1","VMOVDQU32 xmm1, {k}{z}, xmm2/m128","vmovdqu32 xmm1, {k}{z}, xmm2/m128","EVEX.128.F3.0F.W0 7F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU32 xmm1, {k}{z}, xmm2/m128","VMOVDQU32 xmm2/m128, {k}{z}, xmm1","vmovdqu32 xmm2/m128, {k}{z}, xmm1","EVEX.128.F3.0F.W0 6F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU32 ymm2/m256, {k}{z}, ymm1","VMOVDQU32 ymm1, {k}{z}, ymm2/m256","vmovdqu32 ymm1, {k}{z}, ymm2/m256","EVEX.256.F3.0F.W0 7F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU32 ymm1, {k}{z}, ymm2/m256","VMOVDQU32 ymm2/m256, {k}{z}, ymm1","vmovdqu32 ymm2/m256, {k}{z}, ymm1","EVEX.256.F3.0F.W0 6F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU32 zmm2/m512, {k}{z}, zmm1","VMOVDQU32 zmm1, {k}{z}, zmm2/m512","vmovdqu32 zmm1, {k}{z}, zmm2/m512","EVEX.512.F3.0F.W0 7F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQU32 zmm1, {k}{z}, zmm2/m512","VMOVDQU32 zmm2/m512, {k}{z}, zmm1","vmovdqu32 zmm2/m512, {k}{z}, zmm1","EVEX.512.F3.0F.W0 6F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQU64 xmm2/m128, {k}{z}, xmm1","VMOVDQU64 xmm1, {k}{z}, xmm2/m128","vmovdqu64 xmm1, {k}{z}, xmm2/m128","EVEX.128.F3.0F.W1 7F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU64 xmm1, {k}{z}, xmm2/m128","VMOVDQU64 xmm2/m128, {k}{z}, xmm1","vmovdqu64 xmm2/m128, {k}{z}, xmm1","EVEX.128.F3.0F.W1 6F /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU64 ymm2/m256, {k}{z}, ymm1","VMOVDQU64 ymm1, {k}{z}, ymm2/m256","vmovdqu64 ymm1, {k}{z}, ymm2/m256","EVEX.256.F3.0F.W1 7F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU64 ymm1, {k}{z}, ymm2/m256","VMOVDQU64 ymm2/m256, {k}{z}, ymm1","vmovdqu64 ymm2/m256, {k}{z}, ymm1","EVEX.256.F3.0F.W1 6F /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU64 zmm2/m512, {k}{z}, zmm1","VMOVDQU64 zmm1, {k}{z}, zmm2/m512","vmovdqu64 zmm1, {k}{z}, zmm2/m512","EVEX.512.F3.0F.W1 7F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQU64 zmm1, {k}{z}, zmm2/m512","VMOVDQU64 zmm2/m512, {k}{z}, zmm1","vmovdqu64 zmm2/m512, {k}{z}, zmm1","EVEX.512.F3.0F.W1 6F /r","V","V","AVX512F","scale64","w,r,r","Y","512" +"VMOVDQU8 xmm2/m128, {k}{z}, xmm1","VMOVDQU8 xmm1, {k}{z}, xmm2/m128","vmovdqu8 xmm1, {k}{z}, xmm2/m128","EVEX.128.F2.0F.W0 7F /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU8 xmm1, {k}{z}, xmm2/m128","VMOVDQU8 xmm2/m128, {k}{z}, xmm1","vmovdqu8 xmm2/m128, {k}{z}, xmm1","EVEX.128.F2.0F.W0 6F /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","Y","128" +"VMOVDQU8 ymm2/m256, {k}{z}, ymm1","VMOVDQU8 ymm1, {k}{z}, ymm2/m256","vmovdqu8 ymm1, {k}{z}, ymm2/m256","EVEX.256.F2.0F.W0 7F /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU8 ymm1, {k}{z}, ymm2/m256","VMOVDQU8 ymm2/m256, {k}{z}, ymm1","vmovdqu8 ymm2/m256, {k}{z}, ymm1","EVEX.256.F2.0F.W0 6F /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","Y","256" +"VMOVDQU8 zmm2/m512, {k}{z}, zmm1","VMOVDQU8 zmm1, {k}{z}, zmm2/m512","vmovdqu8 zmm1, {k}{z}, zmm2/m512","EVEX.512.F2.0F.W0 7F /r","V","V","AVX512BW","scale64","w,r,r","Y","512" +"VMOVDQU8 zmm1, {k}{z}, zmm2/m512","VMOVDQU8 zmm2/m512, {k}{z}, zmm1","vmovdqu8 zmm2/m512, {k}{z}, zmm1","EVEX.512.F2.0F.W0 6F /r","V","V","AVX512BW","scale64","w,r,r","Y","512" +"VMOVHLPS xmm1, xmmV, xmm2","VMOVHLPS xmm2, xmmV, xmm1","vmovhlps xmm2, xmmV, xmm1","EVEX.NDS.128.0F.W0 12 /r","V","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VMOVHLPS xmm1, xmmV, xmm2","VMOVHLPS xmm2, xmmV, xmm1","vmovhlps xmm2, xmmV, xmm1","VEX.NDS.128.0F.WIG 12 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVHPD xmm1, xmmV, m64","VMOVHPD m64, xmmV, xmm1","vmovhpd m64, xmmV, xmm1","EVEX.NDS.LIG.66.0F.W1 16 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r,r","","" +"VMOVHPD xmm1, xmmV, m64","VMOVHPD m64, xmmV, xmm1","vmovhpd m64, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 16 /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMOVHPD m64, xmm1","VMOVHPD xmm1, m64","vmovhpd xmm1, m64","EVEX.LIG.66.0F.W1 17 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r","","" +"VMOVHPD m64, xmm1","VMOVHPD xmm1, m64","vmovhpd xmm1, m64","VEX.128.66.0F.WIG 17 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVHPS xmm1, xmmV, m64","VMOVHPS m64, xmmV, xmm1","vmovhps m64, xmmV, xmm1","EVEX.NDS.128.0F.W0 16 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r,r","","" +"VMOVHPS xmm1, xmmV, m64","VMOVHPS m64, xmmV, xmm1","vmovhps m64, xmmV, xmm1","VEX.NDS.128.0F.WIG 16 /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMOVHPS m64, xmm1","VMOVHPS xmm1, m64","vmovhps xmm1, m64","EVEX.128.0F.W0 17 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r","","" +"VMOVHPS m64, xmm1","VMOVHPS xmm1, m64","vmovhps xmm1, m64","VEX.128.0F.WIG 17 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVLHPS xmm1, xmmV, xmm2","VMOVLHPS xmm2, xmmV, xmm1","vmovlhps xmm2, xmmV, xmm1","EVEX.NDS.128.0F.W0 16 /r","V","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VMOVLHPS xmm1, xmmV, xmm2","VMOVLHPS xmm2, xmmV, xmm1","vmovlhps xmm2, xmmV, xmm1","VEX.NDS.128.0F.WIG 16 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVLPD xmm1, xmmV, m64","VMOVLPD m64, xmmV, xmm1","vmovlpd m64, xmmV, xmm1","EVEX.NDS.LIG.66.0F.W1 12 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r,r","","" +"VMOVLPD xmm1, xmmV, m64","VMOVLPD m64, xmmV, xmm1","vmovlpd m64, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 12 /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMOVLPD m64, xmm1","VMOVLPD xmm1, m64","vmovlpd xmm1, m64","EVEX.LIG.66.0F.W1 13 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r","","" +"VMOVLPD m64, xmm1","VMOVLPD xmm1, m64","vmovlpd xmm1, m64","VEX.128.66.0F.WIG 13 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVLPS xmm1, xmmV, m64","VMOVLPS m64, xmmV, xmm1","vmovlps m64, xmmV, xmm1","EVEX.NDS.128.0F.W0 12 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r,r","","" +"VMOVLPS xmm1, xmmV, m64","VMOVLPS m64, xmmV, xmm1","vmovlps m64, xmmV, xmm1","VEX.NDS.128.0F.WIG 12 /r","V","V","AVX","modrm_memonly","w,r,r","","" +"VMOVLPS m64, xmm1","VMOVLPS xmm1, m64","vmovlps xmm1, m64","EVEX.128.0F.W0 13 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,r","","" +"VMOVLPS m64, xmm1","VMOVLPS xmm1, m64","vmovlps xmm1, m64","VEX.128.0F.WIG 13 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVMSKPD r32, xmm2","VMOVMSKPD xmm2, r32","vmovmskpd xmm2, r32","VEX.128.66.0F.WIG 50 /r","V","V","AVX","modrm_regonly","w,r","","" +"VMOVMSKPD r32, ymm2","VMOVMSKPD ymm2, r32","vmovmskpd ymm2, r32","VEX.256.66.0F.WIG 50 /r","V","V","AVX","modrm_regonly","w,r","","" +"VMOVMSKPS r32, xmm2","VMOVMSKPS xmm2, r32","vmovmskps xmm2, r32","VEX.128.0F.WIG 50 /r","V","V","AVX","modrm_regonly","w,r","","" +"VMOVMSKPS r32, ymm2","VMOVMSKPS ymm2, r32","vmovmskps ymm2, r32","VEX.256.0F.WIG 50 /r","V","V","AVX","modrm_regonly","w,r","","" +"VMOVNTDQ m128, xmm1","VMOVNTDQ xmm1, m128","vmovntdq xmm1, m128","EVEX.128.66.0F.W0 E7 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r","","" +"VMOVNTDQ m128, xmm1","VMOVNTDQ xmm1, m128","vmovntdq xmm1, m128","VEX.128.66.0F.WIG E7 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTDQ m256, ymm1","VMOVNTDQ ymm1, m256","vmovntdq ymm1, m256","EVEX.256.66.0F.W0 E7 /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale32","w,r","","" +"VMOVNTDQ m256, ymm1","VMOVNTDQ ymm1, m256","vmovntdq ymm1, m256","VEX.256.66.0F.WIG E7 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTDQ m512, zmm1","VMOVNTDQ zmm1, m512","vmovntdq zmm1, m512","EVEX.512.66.0F.W0 E7 /r","V","V","AVX512F","modrm_memonly,scale64","w,r","","" +"VMOVNTDQA xmm1, m128","VMOVNTDQA m128, xmm1","vmovntdqa m128, xmm1","EVEX.128.66.0F38.W0 2A /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r","","" +"VMOVNTDQA xmm1, m128","VMOVNTDQA m128, xmm1","vmovntdqa m128, xmm1","VEX.128.66.0F38.WIG 2A /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTDQA ymm1, m256","VMOVNTDQA m256, ymm1","vmovntdqa m256, ymm1","EVEX.256.66.0F38.W0 2A /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale32","w,r","","" +"VMOVNTDQA ymm1, m256","VMOVNTDQA m256, ymm1","vmovntdqa m256, ymm1","VEX.256.66.0F38.WIG 2A /r","V","V","AVX2","modrm_memonly","w,r","","" +"VMOVNTDQA zmm1, m512","VMOVNTDQA m512, zmm1","vmovntdqa m512, zmm1","EVEX.512.66.0F38.W0 2A /r","V","V","AVX512F","modrm_memonly,scale64","w,r","","" +"VMOVNTPD m128, xmm1","VMOVNTPD xmm1, m128","vmovntpd xmm1, m128","EVEX.128.66.0F.W1 2B /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r","","" +"VMOVNTPD m128, xmm1","VMOVNTPD xmm1, m128","vmovntpd xmm1, m128","VEX.128.66.0F.WIG 2B /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTPD m256, ymm1","VMOVNTPD ymm1, m256","vmovntpd ymm1, m256","EVEX.256.66.0F.W1 2B /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale32","w,r","","" +"VMOVNTPD m256, ymm1","VMOVNTPD ymm1, m256","vmovntpd ymm1, m256","VEX.256.66.0F.WIG 2B /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTPD m512, zmm1","VMOVNTPD zmm1, m512","vmovntpd zmm1, m512","EVEX.512.66.0F.W1 2B /r","V","V","AVX512F","modrm_memonly,scale64","w,r","","" +"VMOVNTPS m128, xmm1","VMOVNTPS xmm1, m128","vmovntps xmm1, m128","EVEX.128.0F.W0 2B /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale16","w,r","","" +"VMOVNTPS m128, xmm1","VMOVNTPS xmm1, m128","vmovntps xmm1, m128","VEX.128.0F.WIG 2B /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTPS m256, ymm1","VMOVNTPS ymm1, m256","vmovntps ymm1, m256","EVEX.256.0F.W0 2B /r","V","V","AVX512F+AVX512VL","modrm_memonly,scale32","w,r","","" +"VMOVNTPS m256, ymm1","VMOVNTPS ymm1, m256","vmovntps ymm1, m256","VEX.256.0F.WIG 2B /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVNTPS m512, zmm1","VMOVNTPS zmm1, m512","vmovntps zmm1, m512","EVEX.512.0F.W0 2B /r","V","V","AVX512F","modrm_memonly,scale64","w,r","","" +"VMOVQ xmm1, r/m64","VMOVQ r/m64, xmm1","vmovq r/m64, xmm1","EVEX.128.66.0F.W1 6E /r","N.S.","V","AVX512F+AVX512VL","scale8","w,r","","" +"VMOVQ xmm1, r/m64","VMOVQ r/m64, xmm1","vmovq r/m64, xmm1","VEX.128.66.0F.W1 6E /r","N.S.","V","AVX","","w,r","","" +"VMOVQ r/m64, xmm1","VMOVQ xmm1, r/m64","vmovq xmm1, r/m64","EVEX.128.66.0F.W1 7E /r","N.S.","V","AVX512F+AVX512VL","scale8","w,r","","" +"VMOVQ r/m64, xmm1","VMOVQ xmm1, r/m64","vmovq xmm1, r/m64","VEX.128.66.0F.W1 7E /r","N.S.","V","AVX","","w,r","","" +"VMOVQ xmm2/m64, xmm1","VMOVQ xmm1, xmm2/m64","vmovq xmm1, xmm2/m64","EVEX.LIG.66.0F.W1 D6 /r","V","V","AVX512F+AVX512VL","scale8","w,r","","" +"VMOVQ xmm2/m64, xmm1","VMOVQ xmm1, xmm2/m64","vmovq xmm1, xmm2/m64","VEX.128.66.0F.WIG D6 /r","V","V","AVX","","w,r","","" +"VMOVQ xmm1, xmm2/m64","VMOVQ xmm2/m64, xmm1","vmovq xmm2/m64, xmm1","EVEX.LIG.F3.0F.W1 7E /r","V","V","AVX512F+AVX512VL","scale8","w,r","","" +"VMOVQ xmm1, xmm2/m64","VMOVQ xmm2/m64, xmm1","vmovq xmm2/m64, xmm1","VEX.128.F3.0F.WIG 7E /r","V","V","AVX","","w,r","","" +"VMOVSD xmm1, m64","VMOVSD m64, xmm1","vmovsd m64, xmm1","VEX.LIG.F2.0F.WIG 10 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVSD xmm1, {k}{z}, m64","VMOVSD m64, {k}{z}, xmm1","vmovsd m64, {k}{z}, xmm1","EVEX.LIG.F2.0F.W1 10 /r","V","V","AVX512F","modrm_memonly,scale8","w,r,r","","" +"VMOVSD m64, xmm1","VMOVSD xmm1, m64","vmovsd xmm1, m64","VEX.LIG.F2.0F.WIG 11 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVSD xmm2, xmmV, xmm1","VMOVSD xmm1, xmmV, xmm2","vmovsd xmm1, xmmV, xmm2","VEX.NDS.LIG.F2.0F.WIG 11 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVSD xmm2, {k}{z}, xmmV, xmm1","VMOVSD xmm1, xmmV, {k}{z}, xmm2","vmovsd xmm1, xmmV, {k}{z}, xmm2","EVEX.NDS.LIG.F2.0F.W1 11 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMOVSD m64, {k}, xmm1","VMOVSD xmm1, {k}, m64","vmovsd xmm1, {k}, m64","EVEX.LIG.F2.0F.W1 11 /r","V","V","AVX512F","modrm_memonly,scale8","w,r,r","","" +"VMOVSD xmm1, xmmV, xmm2","VMOVSD xmm2, xmmV, xmm1","vmovsd xmm2, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 10 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVSD xmm1, {k}{z}, xmmV, xmm2","VMOVSD xmm2, xmmV, {k}{z}, xmm1","vmovsd xmm2, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 10 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMOVSHDUP xmm1, xmm2/m128","VMOVSHDUP xmm2/m128, xmm1","vmovshdup xmm2/m128, xmm1","VEX.128.F3.0F.WIG 16 /r","V","V","AVX","","w,r","","" +"VMOVSHDUP xmm1, {k}{z}, xmm2/m128","VMOVSHDUP xmm2/m128, {k}{z}, xmm1","vmovshdup xmm2/m128, {k}{z}, xmm1","EVEX.128.F3.0F.W0 16 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVSHDUP ymm1, ymm2/m256","VMOVSHDUP ymm2/m256, ymm1","vmovshdup ymm2/m256, ymm1","VEX.256.F3.0F.WIG 16 /r","V","V","AVX","","w,r","","" +"VMOVSHDUP ymm1, {k}{z}, ymm2/m256","VMOVSHDUP ymm2/m256, {k}{z}, ymm1","vmovshdup ymm2/m256, {k}{z}, ymm1","EVEX.256.F3.0F.W0 16 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVSHDUP zmm1, {k}{z}, zmm2/m512","VMOVSHDUP zmm2/m512, {k}{z}, zmm1","vmovshdup zmm2/m512, {k}{z}, zmm1","EVEX.512.F3.0F.W0 16 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVSLDUP xmm1, xmm2/m128","VMOVSLDUP xmm2/m128, xmm1","vmovsldup xmm2/m128, xmm1","VEX.128.F3.0F.WIG 12 /r","V","V","AVX","","w,r","","" +"VMOVSLDUP xmm1, {k}{z}, xmm2/m128","VMOVSLDUP xmm2/m128, {k}{z}, xmm1","vmovsldup xmm2/m128, {k}{z}, xmm1","EVEX.128.F3.0F.W0 12 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVSLDUP ymm1, ymm2/m256","VMOVSLDUP ymm2/m256, ymm1","vmovsldup ymm2/m256, ymm1","VEX.256.F3.0F.WIG 12 /r","V","V","AVX","","w,r","","" +"VMOVSLDUP ymm1, {k}{z}, ymm2/m256","VMOVSLDUP ymm2/m256, {k}{z}, ymm1","vmovsldup ymm2/m256, {k}{z}, ymm1","EVEX.256.F3.0F.W0 12 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVSLDUP zmm1, {k}{z}, zmm2/m512","VMOVSLDUP zmm2/m512, {k}{z}, zmm1","vmovsldup zmm2/m512, {k}{z}, zmm1","EVEX.512.F3.0F.W0 12 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVSS xmm1, m32","VMOVSS m32, xmm1","vmovss m32, xmm1","VEX.LIG.F3.0F.WIG 10 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVSS xmm1, {k}{z}, m32","VMOVSS m32, {k}{z}, xmm1","vmovss m32, {k}{z}, xmm1","EVEX.LIG.F3.0F.W0 10 /r","V","V","AVX512F","modrm_memonly,scale4","w,r,r","","" +"VMOVSS m32, xmm1","VMOVSS xmm1, m32","vmovss xmm1, m32","VEX.LIG.F3.0F.WIG 11 /r","V","V","AVX","modrm_memonly","w,r","","" +"VMOVSS xmm2, xmmV, xmm1","VMOVSS xmm1, xmmV, xmm2","vmovss xmm1, xmmV, xmm2","VEX.NDS.LIG.F3.0F.WIG 11 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVSS xmm2, {k}{z}, xmmV, xmm1","VMOVSS xmm1, xmmV, {k}{z}, xmm2","vmovss xmm1, xmmV, {k}{z}, xmm2","EVEX.NDS.LIG.F3.0F.W0 11 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMOVSS m32, {k}, xmm1","VMOVSS xmm1, {k}, m32","vmovss xmm1, {k}, m32","EVEX.LIG.F3.0F.W0 11 /r","V","V","AVX512F","modrm_memonly,scale4","w,r,r","","" +"VMOVSS xmm1, xmmV, xmm2","VMOVSS xmm2, xmmV, xmm1","vmovss xmm2, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 10 /r","V","V","AVX","modrm_regonly","w,r,r","","" +"VMOVSS xmm1, {k}{z}, xmmV, xmm2","VMOVSS xmm2, xmmV, {k}{z}, xmm1","vmovss xmm2, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 10 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMOVUPD xmm2/m128, xmm1","VMOVUPD xmm1, xmm2/m128","vmovupd xmm1, xmm2/m128","VEX.128.66.0F.WIG 11 /r","V","V","AVX","","w,r","","" +"VMOVUPD xmm2/m128, {k}{z}, xmm1","VMOVUPD xmm1, {k}{z}, xmm2/m128","vmovupd xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F.W1 11 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVUPD xmm1, xmm2/m128","VMOVUPD xmm2/m128, xmm1","vmovupd xmm2/m128, xmm1","VEX.128.66.0F.WIG 10 /r","V","V","AVX","","w,r","","" +"VMOVUPD xmm1, {k}{z}, xmm2/m128","VMOVUPD xmm2/m128, {k}{z}, xmm1","vmovupd xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F.W1 10 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVUPD ymm2/m256, ymm1","VMOVUPD ymm1, ymm2/m256","vmovupd ymm1, ymm2/m256","VEX.256.66.0F.WIG 11 /r","V","V","AVX","","w,r","","" +"VMOVUPD ymm2/m256, {k}{z}, ymm1","VMOVUPD ymm1, {k}{z}, ymm2/m256","vmovupd ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F.W1 11 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVUPD ymm1, ymm2/m256","VMOVUPD ymm2/m256, ymm1","vmovupd ymm2/m256, ymm1","VEX.256.66.0F.WIG 10 /r","V","V","AVX","","w,r","","" +"VMOVUPD ymm1, {k}{z}, ymm2/m256","VMOVUPD ymm2/m256, {k}{z}, ymm1","vmovupd ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F.W1 10 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVUPD zmm2/m512, {k}{z}, zmm1","VMOVUPD zmm1, {k}{z}, zmm2/m512","vmovupd zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F.W1 11 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVUPD zmm1, {k}{z}, zmm2/m512","VMOVUPD zmm2/m512, {k}{z}, zmm1","vmovupd zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F.W1 10 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVUPS xmm2/m128, xmm1","VMOVUPS xmm1, xmm2/m128","vmovups xmm1, xmm2/m128","VEX.128.0F.WIG 11 /r","V","V","AVX","","w,r","","" +"VMOVUPS xmm2/m128, {k}{z}, xmm1","VMOVUPS xmm1, {k}{z}, xmm2/m128","vmovups xmm1, {k}{z}, xmm2/m128","EVEX.128.0F.W0 11 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVUPS xmm1, xmm2/m128","VMOVUPS xmm2/m128, xmm1","vmovups xmm2/m128, xmm1","VEX.128.0F.WIG 10 /r","V","V","AVX","","w,r","","" +"VMOVUPS xmm1, {k}{z}, xmm2/m128","VMOVUPS xmm2/m128, {k}{z}, xmm1","vmovups xmm2/m128, {k}{z}, xmm1","EVEX.128.0F.W0 10 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VMOVUPS ymm2/m256, ymm1","VMOVUPS ymm1, ymm2/m256","vmovups ymm1, ymm2/m256","VEX.256.0F.WIG 11 /r","V","V","AVX","","w,r","","" +"VMOVUPS ymm2/m256, {k}{z}, ymm1","VMOVUPS ymm1, {k}{z}, ymm2/m256","vmovups ymm1, {k}{z}, ymm2/m256","EVEX.256.0F.W0 11 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVUPS ymm1, ymm2/m256","VMOVUPS ymm2/m256, ymm1","vmovups ymm2/m256, ymm1","VEX.256.0F.WIG 10 /r","V","V","AVX","","w,r","","" +"VMOVUPS ymm1, {k}{z}, ymm2/m256","VMOVUPS ymm2/m256, {k}{z}, ymm1","vmovups ymm2/m256, {k}{z}, ymm1","EVEX.256.0F.W0 10 /r","V","V","AVX512F+AVX512VL","scale32","w,r,r","","" +"VMOVUPS zmm2/m512, {k}{z}, zmm1","VMOVUPS zmm1, {k}{z}, zmm2/m512","vmovups zmm1, {k}{z}, zmm2/m512","EVEX.512.0F.W0 11 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMOVUPS zmm1, {k}{z}, zmm2/m512","VMOVUPS zmm2/m512, {k}{z}, zmm1","vmovups zmm2/m512, {k}{z}, zmm1","EVEX.512.0F.W0 10 /r","V","V","AVX512F","scale64","w,r,r","","" +"VMPSADBW xmm1, xmmV, xmm2/m128, imm8u","VMPSADBW imm8u, xmm2/m128, xmmV, xmm1","vmpsadbw imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 42 /r ib","V","V","AVX","","w,r,r,r","","" +"VMPSADBW ymm1, ymmV, ymm2/m256, imm8u","VMPSADBW imm8u, ymm2/m256, ymmV, ymm1","vmpsadbw imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 42 /r ib","V","V","AVX2","","w,r,r,r","","" +"VMPTRLD m64","VMPTRLD m64","vmptrld m64","0F C7 /6","V","V","VTX","modrm_memonly","r","","" +"VMPTRST m64","VMPTRST m64","vmptrst m64","0F C7 /7","V","V","VTX","modrm_memonly","w","","" +"VMREAD r/m32, r32","VMREAD r32, r/m32","vmread r32, r/m32","0F 78 /r","V","N.S.","VTX","","rw,r","","" +"VMREAD r/m64, r64","VMREAD r64, r/m64","vmread r64, r/m64","0F 78 /r","N.S.","V","VTX","default64","rw,r","","" +"VMRESUME","VMRESUME","vmresume","0F 01 C3","V","V","VTX","","","","" +"VMRUN EAX","VMRUNL EAX","vmrunl EAX","0F 01 D8","V","V","SVM","amd,modrm_regonly,operand32","r","Y","32" +"VMRUN RAX","VMRUNQ RAX","vmrunq RAX","REX.W 0F 01 D8","N.S.","V","SVM","amd,modrm_regonly","r","Y","64" +"VMRUN AX","VMRUNW AX","vmrunw AX","0F 01 D8","V","V","SVM","amd,modrm_regonly,operand16","r","Y","16" +"VMSAVE","VMSAVE","vmsave","0F 01 DB","V","V","SVM","amd","","","" +"VMULPD xmm1, xmmV, xmm2/m128","VMULPD xmm2/m128, xmmV, xmm1","vmulpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VMULPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vmulpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 59 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VMULPD ymm1, ymmV, ymm2/m256","VMULPD ymm2/m256, ymmV, ymm1","vmulpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VMULPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vmulpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 59 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VMULPD zmm1{er}, {k}{z}, zmmV, zmm2","VMULPD zmm2, zmmV, {k}{z}, zmm1{er}","vmulpd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F.W1 59 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMULPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VMULPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vmulpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 59 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VMULPS xmm1, xmmV, xmm2/m128","VMULPS xmm2/m128, xmmV, xmm1","vmulps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VMULPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vmulps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 59 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VMULPS ymm1, ymmV, ymm2/m256","VMULPS ymm2/m256, ymmV, ymm1","vmulps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VMULPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vmulps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 59 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VMULPS zmm1{er}, {k}{z}, zmmV, zmm2","VMULPS zmm2, zmmV, {k}{z}, zmm1{er}","vmulps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.0F.W0 59 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMULPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VMULPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vmulps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 59 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VMULSD xmm1{er}, {k}{z}, xmmV, xmm2","VMULSD xmm2, xmmV, {k}{z}, xmm1{er}","vmulsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 59 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMULSD xmm1, xmmV, xmm2/m64","VMULSD xmm2/m64, xmmV, xmm1","vmulsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULSD xmm1, {k}{z}, xmmV, xmm2/m64","VMULSD xmm2/m64, xmmV, {k}{z}, xmm1","vmulsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 59 /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VMULSS xmm1{er}, {k}{z}, xmmV, xmm2","VMULSS xmm2, xmmV, {k}{z}, xmm1{er}","vmulss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F3.0F.W0 59 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VMULSS xmm1, xmmV, xmm2/m32","VMULSS xmm2/m32, xmmV, xmm1","vmulss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 59 /r","V","V","AVX","","w,r,r","","" +"VMULSS xmm1, {k}{z}, xmmV, xmm2/m32","VMULSS xmm2/m32, xmmV, {k}{z}, xmm1","vmulss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 59 /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VMWRITE r32, r/m32","VMWRITE r/m32, r32","vmwrite r/m32, r32","0F 79 /r","V","N.S.","VTX","","r,r","","" +"VMWRITE r64, r/m64","VMWRITE r/m64, r64","vmwrite r/m64, r64","0F 79 /r","N.S.","V","VTX","default64","r,r","","" +"VMXOFF","VMXOFF","vmxoff","0F 01 C4","V","V","VTX","","","","" +"VMXON m64","VMXON m64","vmxon m64","F3 0F C7 /6","V","V","VTX","modrm_memonly","r","","" +"VORPD xmm1, xmmV, xmm2/m128","VORPD xmm2/m128, xmmV, xmm1","vorpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 56 /r","V","V","AVX","","w,r,r","","" +"VORPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VORPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vorpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 56 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VORPD ymm1, ymmV, ymm2/m256","VORPD ymm2/m256, ymmV, ymm1","vorpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 56 /r","V","V","AVX","","w,r,r","","" +"VORPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VORPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vorpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 56 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VORPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VORPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vorpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 56 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VORPS xmm1, xmmV, xmm2/m128","VORPS xmm2/m128, xmmV, xmm1","vorps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 56 /r","V","V","AVX","","w,r,r","","" +"VORPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VORPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vorps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 56 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VORPS ymm1, ymmV, ymm2/m256","VORPS ymm2/m256, ymmV, ymm1","vorps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 56 /r","V","V","AVX","","w,r,r","","" +"VORPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VORPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vorps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 56 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VORPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VORPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vorps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 56 /r","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","","" +"VP4DPWSSD zmm1, {k}{z}, zmmV+3, m128","VP4DPWSSD m128, zmmV+3, {k}{z}, zmm1","vp4dpwssd m128, zmmV+3, {k}{z}, zmm1","EVEX.DDS.512.F2.0F38.W0 52 /r","V","V","AVX512_4VNNIW","modrm_memonly,scale16","rw,r,r,r","","" +"VP4DPWSSDS zmm1, {k}{z}, zmmV+3, m128","VP4DPWSSDS m128, zmmV+3, {k}{z}, zmm1","vp4dpwssds m128, zmmV+3, {k}{z}, zmm1","EVEX.DDS.512.F2.0F38.W0 53 /r","V","V","AVX512_4VNNIW","modrm_memonly,scale16","rw,r,r,r","","" +"VPABSB xmm1, xmm2/m128","VPABSB xmm2/m128, xmm1","vpabsb xmm2/m128, xmm1","VEX.128.66.0F38.WIG 1C /r","V","V","AVX","","w,r","","" +"VPABSB xmm1, {k}{z}, xmm2/m128","VPABSB xmm2/m128, {k}{z}, xmm1","vpabsb xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 1C /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPABSB ymm1, ymm2/m256","VPABSB ymm2/m256, ymm1","vpabsb ymm2/m256, ymm1","VEX.256.66.0F38.WIG 1C /r","V","V","AVX2","","w,r","","" +"VPABSB ymm1, {k}{z}, ymm2/m256","VPABSB ymm2/m256, {k}{z}, ymm1","vpabsb ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 1C /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VPABSB zmm1, {k}{z}, zmm2/m512","VPABSB zmm2/m512, {k}{z}, zmm1","vpabsb zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 1C /r","V","V","AVX512BW","scale64","w,r,r","","" +"VPABSD xmm1, xmm2/m128","VPABSD xmm2/m128, xmm1","vpabsd xmm2/m128, xmm1","VEX.128.66.0F38.WIG 1E /r","V","V","AVX","","w,r","","" +"VPABSD xmm1, {k}{z}, xmm2/m128/m32bcst","VPABSD xmm2/m128/m32bcst, {k}{z}, xmm1","vpabsd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 1E /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VPABSD ymm1, ymm2/m256","VPABSD ymm2/m256, ymm1","vpabsd ymm2/m256, ymm1","VEX.256.66.0F38.WIG 1E /r","V","V","AVX2","","w,r","","" +"VPABSD ymm1, {k}{z}, ymm2/m256/m32bcst","VPABSD ymm2/m256/m32bcst, {k}{z}, ymm1","vpabsd ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 1E /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VPABSD zmm1, {k}{z}, zmm2/m512/m32bcst","VPABSD zmm2/m512/m32bcst, {k}{z}, zmm1","vpabsd zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 1E /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VPABSQ xmm1, {k}{z}, xmm2/m128/m64bcst","VPABSQ xmm2/m128/m64bcst, {k}{z}, xmm1","vpabsq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 1F /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","","" +"VPABSQ ymm1, {k}{z}, ymm2/m256/m64bcst","VPABSQ ymm2/m256/m64bcst, {k}{z}, ymm1","vpabsq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 1F /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","","" +"VPABSQ zmm1, {k}{z}, zmm2/m512/m64bcst","VPABSQ zmm2/m512/m64bcst, {k}{z}, zmm1","vpabsq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 1F /r","V","V","AVX512F","bscale8,scale64","w,r,r","","" +"VPABSW xmm1, xmm2/m128","VPABSW xmm2/m128, xmm1","vpabsw xmm2/m128, xmm1","VEX.128.66.0F38.WIG 1D /r","V","V","AVX","","w,r","","" +"VPABSW xmm1, {k}{z}, xmm2/m128","VPABSW xmm2/m128, {k}{z}, xmm1","vpabsw xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 1D /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPABSW ymm1, ymm2/m256","VPABSW ymm2/m256, ymm1","vpabsw ymm2/m256, ymm1","VEX.256.66.0F38.WIG 1D /r","V","V","AVX2","","w,r","","" +"VPABSW ymm1, {k}{z}, ymm2/m256","VPABSW ymm2/m256, {k}{z}, ymm1","vpabsw ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 1D /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VPABSW zmm1, {k}{z}, zmm2/m512","VPABSW zmm2/m512, {k}{z}, zmm1","vpabsw zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 1D /r","V","V","AVX512BW","scale64","w,r,r","","" +"VPACKSSDW xmm1, xmmV, xmm2/m128","VPACKSSDW xmm2/m128, xmmV, xmm1","vpackssdw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 6B /r","V","V","AVX","","w,r,r","","" +"VPACKSSDW xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPACKSSDW xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpackssdw xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 6B /r","V","V","AVX512BW+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPACKSSDW ymm1, ymmV, ymm2/m256","VPACKSSDW ymm2/m256, ymmV, ymm1","vpackssdw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 6B /r","V","V","AVX2","","w,r,r","","" +"VPACKSSDW ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPACKSSDW ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpackssdw ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 6B /r","V","V","AVX512BW+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPACKSSDW zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPACKSSDW zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpackssdw zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 6B /r","V","V","AVX512BW","bscale4,scale64","w,r,r,r","","" +"VPACKSSWB xmm1, xmmV, xmm2/m128","VPACKSSWB xmm2/m128, xmmV, xmm1","vpacksswb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 63 /r","V","V","AVX","","w,r,r","","" +"VPACKSSWB xmm1, {k}{z}, xmmV, xmm2/m128","VPACKSSWB xmm2/m128, xmmV, {k}{z}, xmm1","vpacksswb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 63 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPACKSSWB ymm1, ymmV, ymm2/m256","VPACKSSWB ymm2/m256, ymmV, ymm1","vpacksswb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 63 /r","V","V","AVX2","","w,r,r","","" +"VPACKSSWB ymm1, {k}{z}, ymmV, ymm2/m256","VPACKSSWB ymm2/m256, ymmV, {k}{z}, ymm1","vpacksswb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 63 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPACKSSWB zmm1, {k}{z}, zmmV, zmm2/m512","VPACKSSWB zmm2/m512, zmmV, {k}{z}, zmm1","vpacksswb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 63 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPACKUSDW xmm1, xmmV, xmm2/m128","VPACKUSDW xmm2/m128, xmmV, xmm1","vpackusdw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 2B /r","V","V","AVX","","w,r,r","","" +"VPACKUSDW xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPACKUSDW xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpackusdw xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 2B /r","V","V","AVX512BW+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPACKUSDW ymm1, ymmV, ymm2/m256","VPACKUSDW ymm2/m256, ymmV, ymm1","vpackusdw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 2B /r","V","V","AVX2","","w,r,r","","" +"VPACKUSDW ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPACKUSDW ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpackusdw ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 2B /r","V","V","AVX512BW+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPACKUSDW zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPACKUSDW zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpackusdw zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 2B /r","V","V","AVX512BW","bscale4,scale64","w,r,r,r","","" +"VPACKUSWB xmm1, xmmV, xmm2/m128","VPACKUSWB xmm2/m128, xmmV, xmm1","vpackuswb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 67 /r","V","V","AVX","","w,r,r","","" +"VPACKUSWB xmm1, {k}{z}, xmmV, xmm2/m128","VPACKUSWB xmm2/m128, xmmV, {k}{z}, xmm1","vpackuswb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 67 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPACKUSWB ymm1, ymmV, ymm2/m256","VPACKUSWB ymm2/m256, ymmV, ymm1","vpackuswb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 67 /r","V","V","AVX2","","w,r,r","","" +"VPACKUSWB ymm1, {k}{z}, ymmV, ymm2/m256","VPACKUSWB ymm2/m256, ymmV, {k}{z}, ymm1","vpackuswb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 67 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPACKUSWB zmm1, {k}{z}, zmmV, zmm2/m512","VPACKUSWB zmm2/m512, zmmV, {k}{z}, zmm1","vpackuswb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 67 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDB xmm1, xmmV, xmm2/m128","VPADDB xmm2/m128, xmmV, xmm1","vpaddb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG FC /r","V","V","AVX","","w,r,r","","" +"VPADDB xmm1, {k}{z}, xmmV, xmm2/m128","VPADDB xmm2/m128, xmmV, {k}{z}, xmm1","vpaddb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG FC /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDB ymm1, ymmV, ymm2/m256","VPADDB ymm2/m256, ymmV, ymm1","vpaddb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG FC /r","V","V","AVX2","","w,r,r","","" +"VPADDB ymm1, {k}{z}, ymmV, ymm2/m256","VPADDB ymm2/m256, ymmV, {k}{z}, ymm1","vpaddb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG FC /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDB zmm1, {k}{z}, zmmV, zmm2/m512","VPADDB zmm2/m512, zmmV, {k}{z}, zmm1","vpaddb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG FC /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDD xmm1, xmmV, xmm2/m128","VPADDD xmm2/m128, xmmV, xmm1","vpaddd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG FE /r","V","V","AVX","","w,r,r","","" +"VPADDD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPADDD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpaddd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 FE /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPADDD ymm1, ymmV, ymm2/m256","VPADDD ymm2/m256, ymmV, ymm1","vpaddd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG FE /r","V","V","AVX2","","w,r,r","","" +"VPADDD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPADDD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpaddd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 FE /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPADDD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPADDD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpaddd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 FE /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPADDQ xmm1, xmmV, xmm2/m128","VPADDQ xmm2/m128, xmmV, xmm1","vpaddq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D4 /r","V","V","AVX","","w,r,r","","" +"VPADDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPADDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpaddq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 D4 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPADDQ ymm1, ymmV, ymm2/m256","VPADDQ ymm2/m256, ymmV, ymm1","vpaddq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D4 /r","V","V","AVX2","","w,r,r","","" +"VPADDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPADDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpaddq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 D4 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPADDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPADDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpaddq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 D4 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPADDSB xmm1, xmmV, xmm2/m128","VPADDSB xmm2/m128, xmmV, xmm1","vpaddsb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG EC /r","V","V","AVX","","w,r,r","","" +"VPADDSB xmm1, {k}{z}, xmmV, xmm2/m128","VPADDSB xmm2/m128, xmmV, {k}{z}, xmm1","vpaddsb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG EC /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDSB ymm1, ymmV, ymm2/m256","VPADDSB ymm2/m256, ymmV, ymm1","vpaddsb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG EC /r","V","V","AVX2","","w,r,r","","" +"VPADDSB ymm1, {k}{z}, ymmV, ymm2/m256","VPADDSB ymm2/m256, ymmV, {k}{z}, ymm1","vpaddsb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG EC /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDSB zmm1, {k}{z}, zmmV, zmm2/m512","VPADDSB zmm2/m512, zmmV, {k}{z}, zmm1","vpaddsb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG EC /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDSW xmm1, xmmV, xmm2/m128","VPADDSW xmm2/m128, xmmV, xmm1","vpaddsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG ED /r","V","V","AVX","","w,r,r","","" +"VPADDSW xmm1, {k}{z}, xmmV, xmm2/m128","VPADDSW xmm2/m128, xmmV, {k}{z}, xmm1","vpaddsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG ED /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDSW ymm1, ymmV, ymm2/m256","VPADDSW ymm2/m256, ymmV, ymm1","vpaddsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG ED /r","V","V","AVX2","","w,r,r","","" +"VPADDSW ymm1, {k}{z}, ymmV, ymm2/m256","VPADDSW ymm2/m256, ymmV, {k}{z}, ymm1","vpaddsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG ED /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDSW zmm1, {k}{z}, zmmV, zmm2/m512","VPADDSW zmm2/m512, zmmV, {k}{z}, zmm1","vpaddsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG ED /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDUSB xmm1, xmmV, xmm2/m128","VPADDUSB xmm2/m128, xmmV, xmm1","vpaddusb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DC /r","V","V","AVX","","w,r,r","","" +"VPADDUSB xmm1, {k}{z}, xmmV, xmm2/m128","VPADDUSB xmm2/m128, xmmV, {k}{z}, xmm1","vpaddusb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG DC /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDUSB ymm1, ymmV, ymm2/m256","VPADDUSB ymm2/m256, ymmV, ymm1","vpaddusb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DC /r","V","V","AVX2","","w,r,r","","" +"VPADDUSB ymm1, {k}{z}, ymmV, ymm2/m256","VPADDUSB ymm2/m256, ymmV, {k}{z}, ymm1","vpaddusb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG DC /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDUSB zmm1, {k}{z}, zmmV, zmm2/m512","VPADDUSB zmm2/m512, zmmV, {k}{z}, zmm1","vpaddusb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG DC /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDUSW xmm1, xmmV, xmm2/m128","VPADDUSW xmm2/m128, xmmV, xmm1","vpaddusw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DD /r","V","V","AVX","","w,r,r","","" +"VPADDUSW xmm1, {k}{z}, xmmV, xmm2/m128","VPADDUSW xmm2/m128, xmmV, {k}{z}, xmm1","vpaddusw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG DD /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDUSW ymm1, ymmV, ymm2/m256","VPADDUSW ymm2/m256, ymmV, ymm1","vpaddusw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DD /r","V","V","AVX2","","w,r,r","","" +"VPADDUSW ymm1, {k}{z}, ymmV, ymm2/m256","VPADDUSW ymm2/m256, ymmV, {k}{z}, ymm1","vpaddusw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG DD /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDUSW zmm1, {k}{z}, zmmV, zmm2/m512","VPADDUSW zmm2/m512, zmmV, {k}{z}, zmm1","vpaddusw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG DD /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPADDW xmm1, xmmV, xmm2/m128","VPADDW xmm2/m128, xmmV, xmm1","vpaddw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG FD /r","V","V","AVX","","w,r,r","","" +"VPADDW xmm1, {k}{z}, xmmV, xmm2/m128","VPADDW xmm2/m128, xmmV, {k}{z}, xmm1","vpaddw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG FD /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPADDW ymm1, ymmV, ymm2/m256","VPADDW ymm2/m256, ymmV, ymm1","vpaddw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG FD /r","V","V","AVX2","","w,r,r","","" +"VPADDW ymm1, {k}{z}, ymmV, ymm2/m256","VPADDW ymm2/m256, ymmV, {k}{z}, ymm1","vpaddw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG FD /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPADDW zmm1, {k}{z}, zmmV, zmm2/m512","VPADDW zmm2/m512, zmmV, {k}{z}, zmm1","vpaddw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG FD /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPALIGNR xmm1, xmmV, xmm2/m128, imm8u","VPALIGNR imm8u, xmm2/m128, xmmV, xmm1","vpalignr imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 0F /r ib","V","V","AVX","","w,r,r,r","","" +"VPALIGNR xmm1, {k}{z}, xmmV, xmm2/m128, imm8u","VPALIGNR imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","vpalignr imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.WIG 0F /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VPALIGNR ymm1, ymmV, ymm2/m256, imm8u","VPALIGNR imm8u, ymm2/m256, ymmV, ymm1","vpalignr imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 0F /r ib","V","V","AVX2","","w,r,r,r","","" +"VPALIGNR ymm1, {k}{z}, ymmV, ymm2/m256, imm8u","VPALIGNR imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","vpalignr imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.WIG 0F /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VPALIGNR zmm1, {k}{z}, zmmV, zmm2/m512, imm8u","VPALIGNR imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","vpalignr imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.WIG 0F /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VPAND xmm1, xmmV, xmm2/m128","VPAND xmm2/m128, xmmV, xmm1","vpand xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DB /r","V","V","AVX","","w,r,r","","" +"VPAND ymm1, ymmV, ymm2/m256","VPAND ymm2/m256, ymmV, ymm1","vpand ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DB /r","V","V","AVX2","","w,r,r","","" +"VPANDD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPANDD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpandd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 DB /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPANDD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPANDD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpandd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 DB /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPANDD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPANDD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpandd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 DB /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPANDN xmm1, xmmV, xmm2/m128","VPANDN xmm2/m128, xmmV, xmm1","vpandn xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DF /r","V","V","AVX","","w,r,r","","" +"VPANDN ymm1, ymmV, ymm2/m256","VPANDN ymm2/m256, ymmV, ymm1","vpandn ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DF /r","V","V","AVX2","","w,r,r","","" +"VPANDND xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPANDND xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpandnd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 DF /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPANDND ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPANDND ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpandnd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 DF /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPANDND zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPANDND zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpandnd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 DF /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPANDNQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPANDNQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpandnq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 DF /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPANDNQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPANDNQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpandnq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 DF /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPANDNQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPANDNQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpandnq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 DF /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPANDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPANDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpandq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 DB /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPANDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPANDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpandq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 DB /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPANDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPANDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpandq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 DB /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPAVGB xmm1, xmmV, xmm2/m128","VPAVGB xmm2/m128, xmmV, xmm1","vpavgb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E0 /r","V","V","AVX","","w,r,r","","" +"VPAVGB xmm1, {k}{z}, xmmV, xmm2/m128","VPAVGB xmm2/m128, xmmV, {k}{z}, xmm1","vpavgb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E0 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPAVGB ymm1, ymmV, ymm2/m256","VPAVGB ymm2/m256, ymmV, ymm1","vpavgb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E0 /r","V","V","AVX2","","w,r,r","","" +"VPAVGB ymm1, {k}{z}, ymmV, ymm2/m256","VPAVGB ymm2/m256, ymmV, {k}{z}, ymm1","vpavgb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E0 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPAVGB zmm1, {k}{z}, zmmV, zmm2/m512","VPAVGB zmm2/m512, zmmV, {k}{z}, zmm1","vpavgb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E0 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPAVGW xmm1, xmmV, xmm2/m128","VPAVGW xmm2/m128, xmmV, xmm1","vpavgw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E3 /r","V","V","AVX","","w,r,r","","" +"VPAVGW xmm1, {k}{z}, xmmV, xmm2/m128","VPAVGW xmm2/m128, xmmV, {k}{z}, xmm1","vpavgw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E3 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPAVGW ymm1, ymmV, ymm2/m256","VPAVGW ymm2/m256, ymmV, ymm1","vpavgw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E3 /r","V","V","AVX2","","w,r,r","","" +"VPAVGW ymm1, {k}{z}, ymmV, ymm2/m256","VPAVGW ymm2/m256, ymmV, {k}{z}, ymm1","vpavgw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E3 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPAVGW zmm1, {k}{z}, zmmV, zmm2/m512","VPAVGW zmm2/m512, zmmV, {k}{z}, zmm1","vpavgw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E3 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPBLENDD xmm1, xmmV, xmm2/m128, imm8u","VPBLENDD imm8u, xmm2/m128, xmmV, xmm1","vpblendd imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 02 /r ib","V","V","AVX2","","w,r,r,r","","" +"VPBLENDD ymm1, ymmV, ymm2/m256, imm8u","VPBLENDD imm8u, ymm2/m256, ymmV, ymm1","vpblendd imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 02 /r ib","V","V","AVX2","","w,r,r,r","","" +"VPBLENDMB xmm1, {k}{z}, xmmV, xmm2/m128","VPBLENDMB xmm2/m128, xmmV, {k}{z}, xmm1","vpblendmb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 66 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPBLENDMB ymm1, {k}{z}, ymmV, ymm2/m256","VPBLENDMB ymm2/m256, ymmV, {k}{z}, ymm1","vpblendmb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 66 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPBLENDMB zmm1, {k}{z}, zmmV, zmm2/m512","VPBLENDMB zmm2/m512, zmmV, {k}{z}, zmm1","vpblendmb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 66 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPBLENDMD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPBLENDMD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpblendmd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 64 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPBLENDMD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPBLENDMD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpblendmd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 64 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPBLENDMD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPBLENDMD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpblendmd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 64 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPBLENDMQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPBLENDMQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpblendmq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 64 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPBLENDMQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPBLENDMQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpblendmq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 64 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPBLENDMQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPBLENDMQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpblendmq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 64 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPBLENDMW xmm1, {k}{z}, xmmV, xmm2/m128","VPBLENDMW xmm2/m128, xmmV, {k}{z}, xmm1","vpblendmw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 66 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPBLENDMW ymm1, {k}{z}, ymmV, ymm2/m256","VPBLENDMW ymm2/m256, ymmV, {k}{z}, ymm1","vpblendmw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 66 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPBLENDMW zmm1, {k}{z}, zmmV, zmm2/m512","VPBLENDMW zmm2/m512, zmmV, {k}{z}, zmm1","vpblendmw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 66 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPBLENDVB xmm1, xmmV, xmm2/m128, xmmIH","VPBLENDVB xmmIH, xmm2/m128, xmmV, xmm1","vpblendvb xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 4C /r /is4","V","V","AVX","","w,r,r,r","","" +"VPBLENDVB ymm1, ymmV, ymm2/m256, ymmIH","VPBLENDVB ymmIH, ymm2/m256, ymmV, ymm1","vpblendvb ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 4C /r /is4","V","V","AVX2","","w,r,r,r","","" +"VPBLENDW xmm1, xmmV, xmm2/m128, imm8u","VPBLENDW imm8u, xmm2/m128, xmmV, xmm1","vpblendw imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 0E /r ib","V","V","AVX","","w,r,r,r","","" +"VPBLENDW ymm1, ymmV, ymm2/m256, imm8u","VPBLENDW imm8u, ymm2/m256, ymmV, ymm1","vpblendw imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 0E /r ib","V","V","AVX2","","w,r,r,r","","" +"VPBROADCASTB xmm1, {k}{z}, rmr32","VPBROADCASTB rmr32, {k}{z}, xmm1","vpbroadcastb rmr32, {k}{z}, xmm1","EVEX.128.66.0F38.W0 7A /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTB ymm1, {k}{z}, rmr32","VPBROADCASTB rmr32, {k}{z}, ymm1","vpbroadcastb rmr32, {k}{z}, ymm1","EVEX.256.66.0F38.W0 7A /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTB zmm1, {k}{z}, rmr32","VPBROADCASTB rmr32, {k}{z}, zmm1","vpbroadcastb rmr32, {k}{z}, zmm1","EVEX.512.66.0F38.W0 7A /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"VPBROADCASTB xmm1, xmm2/m8","VPBROADCASTB xmm2/m8, xmm1","vpbroadcastb xmm2/m8, xmm1","VEX.128.66.0F38.W0 78 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTB ymm1, xmm2/m8","VPBROADCASTB xmm2/m8, ymm1","vpbroadcastb xmm2/m8, ymm1","VEX.256.66.0F38.W0 78 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTB xmm1, {k}{z}, xmm2/m8","VPBROADCASTB xmm2/m8, {k}{z}, xmm1","vpbroadcastb xmm2/m8, {k}{z}, xmm1","EVEX.128.66.0F38.W0 78 /r","V","V","AVX512BW+AVX512VL","scale1","w,r,r","","" +"VPBROADCASTB ymm1, {k}{z}, xmm2/m8","VPBROADCASTB xmm2/m8, {k}{z}, ymm1","vpbroadcastb xmm2/m8, {k}{z}, ymm1","EVEX.256.66.0F38.W0 78 /r","V","V","AVX512BW+AVX512VL","scale1","w,r,r","","" +"VPBROADCASTB zmm1, {k}{z}, xmm2/m8","VPBROADCASTB xmm2/m8, {k}{z}, zmm1","vpbroadcastb xmm2/m8, {k}{z}, zmm1","EVEX.512.66.0F38.W0 78 /r","V","V","AVX512BW","scale1","w,r,r","","" +"VPBROADCASTD xmm1, {k}{z}, rmr32","VPBROADCASTD rmr32, {k}{z}, xmm1","vpbroadcastd rmr32, {k}{z}, xmm1","EVEX.128.66.0F38.W0 7C /r","V","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTD ymm1, {k}{z}, rmr32","VPBROADCASTD rmr32, {k}{z}, ymm1","vpbroadcastd rmr32, {k}{z}, ymm1","EVEX.256.66.0F38.W0 7C /r","V","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTD zmm1, {k}{z}, rmr32","VPBROADCASTD rmr32, {k}{z}, zmm1","vpbroadcastd rmr32, {k}{z}, zmm1","EVEX.512.66.0F38.W0 7C /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VPBROADCASTD xmm1, xmm2/m32","VPBROADCASTD xmm2/m32, xmm1","vpbroadcastd xmm2/m32, xmm1","VEX.128.66.0F38.W0 58 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTD ymm1, xmm2/m32","VPBROADCASTD xmm2/m32, ymm1","vpbroadcastd xmm2/m32, ymm1","VEX.256.66.0F38.W0 58 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTD xmm1, {k}{z}, xmm2/m32","VPBROADCASTD xmm2/m32, {k}{z}, xmm1","vpbroadcastd xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.W0 58 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPBROADCASTD ymm1, {k}{z}, xmm2/m32","VPBROADCASTD xmm2/m32, {k}{z}, ymm1","vpbroadcastd xmm2/m32, {k}{z}, ymm1","EVEX.256.66.0F38.W0 58 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPBROADCASTD zmm1, {k}{z}, xmm2/m32","VPBROADCASTD xmm2/m32, {k}{z}, zmm1","vpbroadcastd xmm2/m32, {k}{z}, zmm1","EVEX.512.66.0F38.W0 58 /r","V","V","AVX512F","scale4","w,r,r","","" +"VPBROADCASTMB2Q xmm1, k2","VPBROADCASTMB2Q k2, xmm1","vpbroadcastmb2q k2, xmm1","EVEX.128.F3.0F38.W1 2A /r","V","V","AVX512CD+AVX512VL","modrm_regonly","w,r","","" +"VPBROADCASTMB2Q ymm1, k2","VPBROADCASTMB2Q k2, ymm1","vpbroadcastmb2q k2, ymm1","EVEX.256.F3.0F38.W1 2A /r","V","V","AVX512CD+AVX512VL","modrm_regonly","w,r","","" +"VPBROADCASTMB2Q zmm1, k2","VPBROADCASTMB2Q k2, zmm1","vpbroadcastmb2q k2, zmm1","EVEX.512.F3.0F38.W1 2A /r","V","V","AVX512CD","modrm_regonly","w,r","","" +"VPBROADCASTMW2D xmm1, k2","VPBROADCASTMW2D k2, xmm1","vpbroadcastmw2d k2, xmm1","EVEX.128.F3.0F38.W0 3A /r","V","V","AVX512CD+AVX512VL","modrm_regonly","w,r","","" +"VPBROADCASTMW2D ymm1, k2","VPBROADCASTMW2D k2, ymm1","vpbroadcastmw2d k2, ymm1","EVEX.256.F3.0F38.W0 3A /r","V","V","AVX512CD+AVX512VL","modrm_regonly","w,r","","" +"VPBROADCASTMW2D zmm1, k2","VPBROADCASTMW2D k2, zmm1","vpbroadcastmw2d k2, zmm1","EVEX.512.F3.0F38.W0 3A /r","V","V","AVX512CD","modrm_regonly","w,r","","" +"VPBROADCASTQ xmm1, {k}{z}, rmr64","VPBROADCASTQ rmr64, {k}{z}, xmm1","vpbroadcastq rmr64, {k}{z}, xmm1","EVEX.128.66.0F38.W1 7C /r","N.S.","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTQ ymm1, {k}{z}, rmr64","VPBROADCASTQ rmr64, {k}{z}, ymm1","vpbroadcastq rmr64, {k}{z}, ymm1","EVEX.256.66.0F38.W1 7C /r","N.S.","V","AVX512F+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTQ zmm1, {k}{z}, rmr64","VPBROADCASTQ rmr64, {k}{z}, zmm1","vpbroadcastq rmr64, {k}{z}, zmm1","EVEX.512.66.0F38.W1 7C /r","N.S.","V","AVX512F","modrm_regonly","w,r,r","","" +"VPBROADCASTQ xmm1, xmm2/m64","VPBROADCASTQ xmm2/m64, xmm1","vpbroadcastq xmm2/m64, xmm1","VEX.128.66.0F38.W0 59 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTQ ymm1, xmm2/m64","VPBROADCASTQ xmm2/m64, ymm1","vpbroadcastq xmm2/m64, ymm1","VEX.256.66.0F38.W0 59 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTQ xmm1, {k}{z}, xmm2/m64","VPBROADCASTQ xmm2/m64, {k}{z}, xmm1","vpbroadcastq xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.W1 59 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPBROADCASTQ ymm1, {k}{z}, xmm2/m64","VPBROADCASTQ xmm2/m64, {k}{z}, ymm1","vpbroadcastq xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.W1 59 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPBROADCASTQ zmm1, {k}{z}, xmm2/m64","VPBROADCASTQ xmm2/m64, {k}{z}, zmm1","vpbroadcastq xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.W1 59 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPBROADCASTW xmm1, {k}{z}, rmr32","VPBROADCASTW rmr32, {k}{z}, xmm1","vpbroadcastw rmr32, {k}{z}, xmm1","EVEX.128.66.0F38.W0 7B /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTW ymm1, {k}{z}, rmr32","VPBROADCASTW rmr32, {k}{z}, ymm1","vpbroadcastw rmr32, {k}{z}, ymm1","EVEX.256.66.0F38.W0 7B /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r,r","","" +"VPBROADCASTW zmm1, {k}{z}, rmr32","VPBROADCASTW rmr32, {k}{z}, zmm1","vpbroadcastw rmr32, {k}{z}, zmm1","EVEX.512.66.0F38.W0 7B /r","V","V","AVX512BW","modrm_regonly","w,r,r","","" +"VPBROADCASTW xmm1, xmm2/m16","VPBROADCASTW xmm2/m16, xmm1","vpbroadcastw xmm2/m16, xmm1","VEX.128.66.0F38.W0 79 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTW ymm1, xmm2/m16","VPBROADCASTW xmm2/m16, ymm1","vpbroadcastw xmm2/m16, ymm1","VEX.256.66.0F38.W0 79 /r","V","V","AVX2","","w,r","","" +"VPBROADCASTW xmm1, {k}{z}, xmm2/m16","VPBROADCASTW xmm2/m16, {k}{z}, xmm1","vpbroadcastw xmm2/m16, {k}{z}, xmm1","EVEX.128.66.0F38.W0 79 /r","V","V","AVX512BW+AVX512VL","scale2","w,r,r","","" +"VPBROADCASTW ymm1, {k}{z}, xmm2/m16","VPBROADCASTW xmm2/m16, {k}{z}, ymm1","vpbroadcastw xmm2/m16, {k}{z}, ymm1","EVEX.256.66.0F38.W0 79 /r","V","V","AVX512BW+AVX512VL","scale2","w,r,r","","" +"VPBROADCASTW zmm1, {k}{z}, xmm2/m16","VPBROADCASTW xmm2/m16, {k}{z}, zmm1","vpbroadcastw xmm2/m16, {k}{z}, zmm1","EVEX.512.66.0F38.W0 79 /r","V","V","AVX512BW","scale2","w,r,r","","" +"VPCLMULQDQ xmm1, xmmV, xmm2/m128, imm8u","VPCLMULQDQ imm8u, xmm2/m128, xmmV, xmm1","vpclmulqdq imm8u, xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F3A.WIG 44 /r ib","V","V","VPCLMULQDQ+AVX512VL","scale16","w,r,r,r","","" +"VPCLMULQDQ xmm1, xmmV, xmm2/m128, imm8u","VPCLMULQDQ imm8u, xmm2/m128, xmmV, xmm1","vpclmulqdq imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 44 /r ib","V","V","PCLMULQDQ+AVX","","w,r,r,r","","" +"VPCLMULQDQ ymm1, ymmV, ymm2/m256, imm8u","VPCLMULQDQ imm8u, ymm2/m256, ymmV, ymm1","vpclmulqdq imm8u, ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F3A.WIG 44 /r ib","V","V","VPCLMULQDQ+AVX512VL","scale32","w,r,r,r","","" +"VPCLMULQDQ ymm1, ymmV, ymm2/m256, imm8u","VPCLMULQDQ imm8u, ymm2/m256, ymmV, ymm1","vpclmulqdq imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.WIG 44 /r ib","V","V","VPCLMULQDQ","","w,r,r,r","","" +"VPCLMULQDQ zmm1, zmmV, zmm2/m512, imm8u","VPCLMULQDQ imm8u, zmm2/m512, zmmV, zmm1","vpclmulqdq imm8u, zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F3A.WIG 44 /r ib","V","V","VPCLMULQDQ+AVX512F","scale64","w,r,r,r","","" +"VPCMOV xmm1, xmmV, xmmIH, xmm2/m128","VPCMOV xmm2/m128, xmmIH, xmmV, xmm1","vpcmov xmm2/m128, xmmIH, xmmV, xmm1","XOP.NDS.128.08.W1 A2 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPCMOV xmm1, xmmV, xmm2/m128, xmmIH","VPCMOV xmmIH, xmm2/m128, xmmV, xmm1","vpcmov xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 A2 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPCMOV ymm1, ymmV, ymmIH, ymm2/m256","VPCMOV ymm2/m256, ymmIH, ymmV, ymm1","vpcmov ymm2/m256, ymmIH, ymmV, ymm1","XOP.NDS.256.08.W1 A2 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPCMOV ymm1, ymmV, ymm2/m256, ymmIH","VPCMOV ymmIH, ymm2/m256, ymmV, ymm1","vpcmov ymmIH, ymm2/m256, ymmV, ymm1","XOP.NDS.256.08.W0 A2 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPCMPB k1, {k}, xmmV, xmm2/m128, imm8u","VPCMPB imm8u, xmm2/m128, xmmV, {k}, k1","vpcmpb imm8u, xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W0 3F /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VPCMPB k1, {k}, ymmV, ymm2/m256, imm8u","VPCMPB imm8u, ymm2/m256, ymmV, {k}, k1","vpcmpb imm8u, ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W0 3F /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VPCMPB k1, {k}, zmmV, zmm2/m512, imm8u","VPCMPB imm8u, zmm2/m512, zmmV, {k}, k1","vpcmpb imm8u, zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W0 3F /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VPCMPD k1, {k}, xmmV, xmm2/m128/m32bcst, imm8u","VPCMPD imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","vpcmpd imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W0 1F /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VPCMPD k1, {k}, ymmV, ymm2/m256/m32bcst, imm8u","VPCMPD imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","vpcmpd imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W0 1F /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VPCMPD k1, {k}, zmmV, zmm2/m512/m32bcst, imm8u","VPCMPD imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","vpcmpd imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W0 1F /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VPCMPEQB xmm1, xmmV, xmm2/m128","VPCMPEQB xmm2/m128, xmmV, xmm1","vpcmpeqb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 74 /r","V","V","AVX","","w,r,r","","" +"VPCMPEQB k1, {k}, xmmV, xmm2/m128","VPCMPEQB xmm2/m128, xmmV, {k}, k1","vpcmpeqb xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F.WIG 74 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPCMPEQB ymm1, ymmV, ymm2/m256","VPCMPEQB ymm2/m256, ymmV, ymm1","vpcmpeqb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 74 /r","V","V","AVX2","","w,r,r","","" +"VPCMPEQB k1, {k}, ymmV, ymm2/m256","VPCMPEQB ymm2/m256, ymmV, {k}, k1","vpcmpeqb ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F.WIG 74 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPCMPEQB k1, {k}, zmmV, zmm2/m512","VPCMPEQB zmm2/m512, zmmV, {k}, k1","vpcmpeqb zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F.WIG 74 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPCMPEQD xmm1, xmmV, xmm2/m128","VPCMPEQD xmm2/m128, xmmV, xmm1","vpcmpeqd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 76 /r","V","V","AVX","","w,r,r","","" +"VPCMPEQD k1, {k}, xmmV, xmm2/m128/m32bcst","VPCMPEQD xmm2/m128/m32bcst, xmmV, {k}, k1","vpcmpeqd xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F.W0 76 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPCMPEQD ymm1, ymmV, ymm2/m256","VPCMPEQD ymm2/m256, ymmV, ymm1","vpcmpeqd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 76 /r","V","V","AVX2","","w,r,r","","" +"VPCMPEQD k1, {k}, ymmV, ymm2/m256/m32bcst","VPCMPEQD ymm2/m256/m32bcst, ymmV, {k}, k1","vpcmpeqd ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F.W0 76 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPCMPEQD k1, {k}, zmmV, zmm2/m512/m32bcst","VPCMPEQD zmm2/m512/m32bcst, zmmV, {k}, k1","vpcmpeqd zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F.W0 76 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPCMPEQQ xmm1, xmmV, xmm2/m128","VPCMPEQQ xmm2/m128, xmmV, xmm1","vpcmpeqq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 29 /r","V","V","AVX","","w,r,r","","" +"VPCMPEQQ k1, {k}, xmmV, xmm2/m128/m64bcst","VPCMPEQQ xmm2/m128/m64bcst, xmmV, {k}, k1","vpcmpeqq xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W1 29 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPCMPEQQ ymm1, ymmV, ymm2/m256","VPCMPEQQ ymm2/m256, ymmV, ymm1","vpcmpeqq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 29 /r","V","V","AVX2","","w,r,r","","" +"VPCMPEQQ k1, {k}, ymmV, ymm2/m256/m64bcst","VPCMPEQQ ymm2/m256/m64bcst, ymmV, {k}, k1","vpcmpeqq ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W1 29 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPCMPEQQ k1, {k}, zmmV, zmm2/m512/m64bcst","VPCMPEQQ zmm2/m512/m64bcst, zmmV, {k}, k1","vpcmpeqq zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W1 29 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPCMPEQW xmm1, xmmV, xmm2/m128","VPCMPEQW xmm2/m128, xmmV, xmm1","vpcmpeqw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 75 /r","V","V","AVX","","w,r,r","","" +"VPCMPEQW k1, {k}, xmmV, xmm2/m128","VPCMPEQW xmm2/m128, xmmV, {k}, k1","vpcmpeqw xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F.WIG 75 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPCMPEQW ymm1, ymmV, ymm2/m256","VPCMPEQW ymm2/m256, ymmV, ymm1","vpcmpeqw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 75 /r","V","V","AVX2","","w,r,r","","" +"VPCMPEQW k1, {k}, ymmV, ymm2/m256","VPCMPEQW ymm2/m256, ymmV, {k}, k1","vpcmpeqw ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F.WIG 75 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPCMPEQW k1, {k}, zmmV, zmm2/m512","VPCMPEQW zmm2/m512, zmmV, {k}, k1","vpcmpeqw zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F.WIG 75 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPCMPESTRI xmm1, xmm2/m128, imm8u","VPCMPESTRI imm8u, xmm2/m128, xmm1","vpcmpestri imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 61 /r ib","V","V","AVX","","r,r,r","","" +"VPCMPESTRM xmm1, xmm2/m128, imm8u","VPCMPESTRM imm8u, xmm2/m128, xmm1","vpcmpestrm imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 60 /r ib","V","V","AVX","","r,r,r","","" +"VPCMPGTB xmm1, xmmV, xmm2/m128","VPCMPGTB xmm2/m128, xmmV, xmm1","vpcmpgtb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 64 /r","V","V","AVX","","w,r,r","","" +"VPCMPGTB k1, {k}, xmmV, xmm2/m128","VPCMPGTB xmm2/m128, xmmV, {k}, k1","vpcmpgtb xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F.WIG 64 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPCMPGTB ymm1, ymmV, ymm2/m256","VPCMPGTB ymm2/m256, ymmV, ymm1","vpcmpgtb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 64 /r","V","V","AVX2","","w,r,r","","" +"VPCMPGTB k1, {k}, ymmV, ymm2/m256","VPCMPGTB ymm2/m256, ymmV, {k}, k1","vpcmpgtb ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F.WIG 64 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPCMPGTB k1, {k}, zmmV, zmm2/m512","VPCMPGTB zmm2/m512, zmmV, {k}, k1","vpcmpgtb zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F.WIG 64 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPCMPGTD xmm1, xmmV, xmm2/m128","VPCMPGTD xmm2/m128, xmmV, xmm1","vpcmpgtd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 66 /r","V","V","AVX","","w,r,r","","" +"VPCMPGTD k1, {k}, xmmV, xmm2/m128/m32bcst","VPCMPGTD xmm2/m128/m32bcst, xmmV, {k}, k1","vpcmpgtd xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F.W0 66 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPCMPGTD ymm1, ymmV, ymm2/m256","VPCMPGTD ymm2/m256, ymmV, ymm1","vpcmpgtd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 66 /r","V","V","AVX2","","w,r,r","","" +"VPCMPGTD k1, {k}, ymmV, ymm2/m256/m32bcst","VPCMPGTD ymm2/m256/m32bcst, ymmV, {k}, k1","vpcmpgtd ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F.W0 66 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPCMPGTD k1, {k}, zmmV, zmm2/m512/m32bcst","VPCMPGTD zmm2/m512/m32bcst, zmmV, {k}, k1","vpcmpgtd zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F.W0 66 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPCMPGTQ xmm1, xmmV, xmm2/m128","VPCMPGTQ xmm2/m128, xmmV, xmm1","vpcmpgtq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 37 /r","V","V","AVX","","w,r,r","","" +"VPCMPGTQ k1, {k}, xmmV, xmm2/m128/m64bcst","VPCMPGTQ xmm2/m128/m64bcst, xmmV, {k}, k1","vpcmpgtq xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W1 37 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPCMPGTQ ymm1, ymmV, ymm2/m256","VPCMPGTQ ymm2/m256, ymmV, ymm1","vpcmpgtq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 37 /r","V","V","AVX2","","w,r,r","","" +"VPCMPGTQ k1, {k}, ymmV, ymm2/m256/m64bcst","VPCMPGTQ ymm2/m256/m64bcst, ymmV, {k}, k1","vpcmpgtq ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W1 37 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPCMPGTQ k1, {k}, zmmV, zmm2/m512/m64bcst","VPCMPGTQ zmm2/m512/m64bcst, zmmV, {k}, k1","vpcmpgtq zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W1 37 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPCMPGTW xmm1, xmmV, xmm2/m128","VPCMPGTW xmm2/m128, xmmV, xmm1","vpcmpgtw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 65 /r","V","V","AVX","","w,r,r","","" +"VPCMPGTW k1, {k}, xmmV, xmm2/m128","VPCMPGTW xmm2/m128, xmmV, {k}, k1","vpcmpgtw xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F.WIG 65 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPCMPGTW ymm1, ymmV, ymm2/m256","VPCMPGTW ymm2/m256, ymmV, ymm1","vpcmpgtw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 65 /r","V","V","AVX2","","w,r,r","","" +"VPCMPGTW k1, {k}, ymmV, ymm2/m256","VPCMPGTW ymm2/m256, ymmV, {k}, k1","vpcmpgtw ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F.WIG 65 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPCMPGTW k1, {k}, zmmV, zmm2/m512","VPCMPGTW zmm2/m512, zmmV, {k}, k1","vpcmpgtw zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F.WIG 65 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPCMPISTRI xmm1, xmm2/m128, imm8u","VPCMPISTRI imm8u, xmm2/m128, xmm1","vpcmpistri imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 63 /r ib","V","V","AVX","","r,r,r","","" +"VPCMPISTRM xmm1, xmm2/m128, imm8u","VPCMPISTRM imm8u, xmm2/m128, xmm1","vpcmpistrm imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 62 /r ib","V","V","AVX","","r,r,r","","" +"VPCMPQ k1, {k}, xmmV, xmm2/m128/m64bcst, imm8u","VPCMPQ imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","vpcmpq imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W1 1F /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VPCMPQ k1, {k}, ymmV, ymm2/m256/m64bcst, imm8u","VPCMPQ imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","vpcmpq imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W1 1F /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VPCMPQ k1, {k}, zmmV, zmm2/m512/m64bcst, imm8u","VPCMPQ imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","vpcmpq imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W1 1F /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VPCMPUB k1, {k}, xmmV, xmm2/m128, imm8u","VPCMPUB imm8u, xmm2/m128, xmmV, {k}, k1","vpcmpub imm8u, xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W0 3E /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VPCMPUB k1, {k}, ymmV, ymm2/m256, imm8u","VPCMPUB imm8u, ymm2/m256, ymmV, {k}, k1","vpcmpub imm8u, ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W0 3E /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VPCMPUB k1, {k}, zmmV, zmm2/m512, imm8u","VPCMPUB imm8u, zmm2/m512, zmmV, {k}, k1","vpcmpub imm8u, zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W0 3E /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VPCMPUD k1, {k}, xmmV, xmm2/m128/m32bcst, imm8u","VPCMPUD imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","vpcmpud imm8u, xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W0 1E /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VPCMPUD k1, {k}, ymmV, ymm2/m256/m32bcst, imm8u","VPCMPUD imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","vpcmpud imm8u, ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W0 1E /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VPCMPUD k1, {k}, zmmV, zmm2/m512/m32bcst, imm8u","VPCMPUD imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","vpcmpud imm8u, zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W0 1E /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VPCMPUQ k1, {k}, xmmV, xmm2/m128/m64bcst, imm8u","VPCMPUQ imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","vpcmpuq imm8u, xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W1 1E /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VPCMPUQ k1, {k}, ymmV, ymm2/m256/m64bcst, imm8u","VPCMPUQ imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","vpcmpuq imm8u, ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W1 1E /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VPCMPUQ k1, {k}, zmmV, zmm2/m512/m64bcst, imm8u","VPCMPUQ imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","vpcmpuq imm8u, zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W1 1E /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VPCMPUW k1, {k}, xmmV, xmm2/m128, imm8u","VPCMPUW imm8u, xmm2/m128, xmmV, {k}, k1","vpcmpuw imm8u, xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W1 3E /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VPCMPUW k1, {k}, ymmV, ymm2/m256, imm8u","VPCMPUW imm8u, ymm2/m256, ymmV, {k}, k1","vpcmpuw imm8u, ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W1 3E /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VPCMPUW k1, {k}, zmmV, zmm2/m512, imm8u","VPCMPUW imm8u, zmm2/m512, zmmV, {k}, k1","vpcmpuw imm8u, zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W1 3E /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VPCMPW k1, {k}, xmmV, xmm2/m128, imm8u","VPCMPW imm8u, xmm2/m128, xmmV, {k}, k1","vpcmpw imm8u, xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F3A.W1 3F /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r,r","","" +"VPCMPW k1, {k}, ymmV, ymm2/m256, imm8u","VPCMPW imm8u, ymm2/m256, ymmV, {k}, k1","vpcmpw imm8u, ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F3A.W1 3F /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r,r","","" +"VPCMPW k1, {k}, zmmV, zmm2/m512, imm8u","VPCMPW imm8u, zmm2/m512, zmmV, {k}, k1","vpcmpw imm8u, zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F3A.W1 3F /r ib","V","V","AVX512BW","scale64","w,r,r,r,r","","" +"VPCOMB xmm1, xmmV, xmm2/m128, imm8u","VPCOMB imm8u, xmm2/m128, xmmV, xmm1","vpcomb imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 CC /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMD xmm1, xmmV, xmm2/m128, imm8u","VPCOMD imm8u, xmm2/m128, xmmV, xmm1","vpcomd imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 CE /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMPRESSB xmm2/m128, {k}{z}, xmm1","VPCOMPRESSB xmm1, {k}{z}, xmm2/m128","vpcompressb xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W0 63 /r","V","V","AVX512_VBMI2+AVX512VL","scale1","w,r,r","","" +"VPCOMPRESSB ymm2/m256, {k}{z}, ymm1","VPCOMPRESSB ymm1, {k}{z}, ymm2/m256","vpcompressb ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W0 63 /r","V","V","AVX512_VBMI2+AVX512VL","scale1","w,r,r","","" +"VPCOMPRESSB zmm2/m512, {k}{z}, zmm1","VPCOMPRESSB zmm1, {k}{z}, zmm2/m512","vpcompressb zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W0 63 /r","V","V","AVX512_VBMI2","scale1","w,r,r","","" +"VPCOMPRESSD xmm2/m128, {k}{z}, xmm1","VPCOMPRESSD xmm1, {k}{z}, xmm2/m128","vpcompressd xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W0 8B /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPCOMPRESSD ymm2/m256, {k}{z}, ymm1","VPCOMPRESSD ymm1, {k}{z}, ymm2/m256","vpcompressd ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W0 8B /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPCOMPRESSD zmm2/m512, {k}{z}, zmm1","VPCOMPRESSD zmm1, {k}{z}, zmm2/m512","vpcompressd zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W0 8B /r","V","V","AVX512F","scale4","w,r,r","","" +"VPCOMPRESSQ xmm2/m128, {k}{z}, xmm1","VPCOMPRESSQ xmm1, {k}{z}, xmm2/m128","vpcompressq xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W1 8B /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPCOMPRESSQ ymm2/m256, {k}{z}, ymm1","VPCOMPRESSQ ymm1, {k}{z}, ymm2/m256","vpcompressq ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W1 8B /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPCOMPRESSQ zmm2/m512, {k}{z}, zmm1","VPCOMPRESSQ zmm1, {k}{z}, zmm2/m512","vpcompressq zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W1 8B /r","V","V","AVX512F","scale8","w,r,r","","" +"VPCOMPRESSW xmm2/m128, {k}{z}, xmm1","VPCOMPRESSW xmm1, {k}{z}, xmm2/m128","vpcompressw xmm1, {k}{z}, xmm2/m128","EVEX.128.66.0F38.W1 63 /r","V","V","AVX512_VBMI2+AVX512VL","scale2","w,r,r","","" +"VPCOMPRESSW ymm2/m256, {k}{z}, ymm1","VPCOMPRESSW ymm1, {k}{z}, ymm2/m256","vpcompressw ymm1, {k}{z}, ymm2/m256","EVEX.256.66.0F38.W1 63 /r","V","V","AVX512_VBMI2+AVX512VL","scale2","w,r,r","","" +"VPCOMPRESSW zmm2/m512, {k}{z}, zmm1","VPCOMPRESSW zmm1, {k}{z}, zmm2/m512","vpcompressw zmm1, {k}{z}, zmm2/m512","EVEX.512.66.0F38.W1 63 /r","V","V","AVX512_VBMI2","scale2","w,r,r","","" +"VPCOMQ xmm1, xmmV, xmm2/m128, imm8u","VPCOMQ imm8u, xmm2/m128, xmmV, xmm1","vpcomq imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 CF /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMUB xmm1, xmmV, xmm2/m128, imm8u","VPCOMUB imm8u, xmm2/m128, xmmV, xmm1","vpcomub imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 EC /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMUD xmm1, xmmV, xmm2/m128, imm8u","VPCOMUD imm8u, xmm2/m128, xmmV, xmm1","vpcomud imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 EE /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMUQ xmm1, xmmV, xmm2/m128, imm8u","VPCOMUQ imm8u, xmm2/m128, xmmV, xmm1","vpcomuq imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 EF /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMUW xmm1, xmmV, xmm2/m128, imm8u","VPCOMUW imm8u, xmm2/m128, xmmV, xmm1","vpcomuw imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 ED /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCOMW xmm1, xmmV, xmm2/m128, imm8u","VPCOMW imm8u, xmm2/m128, xmmV, xmm1","vpcomw imm8u, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 CD /r ib","V","V","XOP","amd","w,r,r,r","","" +"VPCONFLICTD xmm1, {k}{z}, xmm2/m128/m32bcst","VPCONFLICTD xmm2/m128/m32bcst, {k}{z}, xmm1","vpconflictd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 C4 /r","V","V","AVX512CD+AVX512VL","bscale4,scale16","w,r,r","","" +"VPCONFLICTD ymm1, {k}{z}, ymm2/m256/m32bcst","VPCONFLICTD ymm2/m256/m32bcst, {k}{z}, ymm1","vpconflictd ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 C4 /r","V","V","AVX512CD+AVX512VL","bscale4,scale32","w,r,r","","" +"VPCONFLICTD zmm1, {k}{z}, zmm2/m512/m32bcst","VPCONFLICTD zmm2/m512/m32bcst, {k}{z}, zmm1","vpconflictd zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 C4 /r","V","V","AVX512CD","bscale4,scale64","w,r,r","","" +"VPCONFLICTQ xmm1, {k}{z}, xmm2/m128/m64bcst","VPCONFLICTQ xmm2/m128/m64bcst, {k}{z}, xmm1","vpconflictq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 C4 /r","V","V","AVX512CD+AVX512VL","bscale8,scale16","w,r,r","","" +"VPCONFLICTQ ymm1, {k}{z}, ymm2/m256/m64bcst","VPCONFLICTQ ymm2/m256/m64bcst, {k}{z}, ymm1","vpconflictq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 C4 /r","V","V","AVX512CD+AVX512VL","bscale8,scale32","w,r,r","","" +"VPCONFLICTQ zmm1, {k}{z}, zmm2/m512/m64bcst","VPCONFLICTQ zmm2/m512/m64bcst, {k}{z}, zmm1","vpconflictq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 C4 /r","V","V","AVX512CD","bscale8,scale64","w,r,r","","" +"VPDPBUSD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPDPBUSD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpdpbusd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 50 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPDPBUSD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPDPBUSD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpdpbusd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 50 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPDPBUSD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPDPBUSD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpdpbusd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 50 /r","V","V","AVX512_VNNI","bscale4,scale64","rw,r,r,r","","" +"VPDPBUSDS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPDPBUSDS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpdpbusds xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 51 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPDPBUSDS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPDPBUSDS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpdpbusds ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 51 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPDPBUSDS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPDPBUSDS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpdpbusds zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 51 /r","V","V","AVX512_VNNI","bscale4,scale64","rw,r,r,r","","" +"VPDPWSSD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPDPWSSD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpdpwssd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 52 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPDPWSSD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPDPWSSD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpdpwssd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 52 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPDPWSSD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPDPWSSD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpdpwssd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 52 /r","V","V","AVX512_VNNI","bscale4,scale64","rw,r,r,r","","" +"VPDPWSSDS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPDPWSSDS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpdpwssds xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 53 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPDPWSSDS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPDPWSSDS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpdpwssds ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 53 /r","V","V","AVX512_VNNI+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPDPWSSDS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPDPWSSDS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpdpwssds zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 53 /r","V","V","AVX512_VNNI","bscale4,scale64","rw,r,r,r","","" +"VPERM2F128 ymm1, ymmV, ymm2/m256, imm8u","VPERM2F128 imm8u, ymm2/m256, ymmV, ymm1","vperm2f128 imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 06 /r ib","V","V","AVX","","w,r,r,r","","" +"VPERM2I128 ymm1, ymmV, ymm2/m256, imm8u","VPERM2I128 imm8u, ymm2/m256, ymmV, ymm1","vperm2i128 imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 46 /r ib","V","V","AVX2","","w,r,r,r","","" +"VPERMB xmm1, {k}{z}, xmmV, xmm2/m128","VPERMB xmm2/m128, xmmV, {k}{z}, xmm1","vpermb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 8D /r","V","V","AVX512_VBMI+AVX512VL","scale16","w,r,r,r","","" +"VPERMB ymm1, {k}{z}, ymmV, ymm2/m256","VPERMB ymm2/m256, ymmV, {k}{z}, ymm1","vpermb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 8D /r","V","V","AVX512_VBMI+AVX512VL","scale32","w,r,r,r","","" +"VPERMB zmm1, {k}{z}, zmmV, zmm2/m512","VPERMB zmm2/m512, zmmV, {k}{z}, zmm1","vpermb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 8D /r","V","V","AVX512_VBMI","scale64","w,r,r,r","","" +"VPERMD ymm1, ymmV, ymm2/m256","VPERMD ymm2/m256, ymmV, ymm1","vpermd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 36 /r","V","V","AVX2","","w,r,r","","" +"VPERMD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 36 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPERMD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 36 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPERMI2B xmm1, {k}{z}, xmmV, xmm2/m128","VPERMI2B xmm2/m128, xmmV, {k}{z}, xmm1","vpermi2b xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 75 /r","V","V","AVX512_VBMI+AVX512VL","scale16","rw,r,r,r","","" +"VPERMI2B ymm1, {k}{z}, ymmV, ymm2/m256","VPERMI2B ymm2/m256, ymmV, {k}{z}, ymm1","vpermi2b ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 75 /r","V","V","AVX512_VBMI+AVX512VL","scale32","rw,r,r,r","","" +"VPERMI2B zmm1, {k}{z}, zmmV, zmm2/m512","VPERMI2B zmm2/m512, zmmV, {k}{z}, zmm1","vpermi2b zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 75 /r","V","V","AVX512_VBMI","scale64","rw,r,r,r","","" +"VPERMI2D xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPERMI2D xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpermi2d xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 76 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPERMI2D ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMI2D ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermi2d ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 76 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPERMI2D zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMI2D zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermi2d zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 76 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VPERMI2PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPERMI2PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpermi2pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 77 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPERMI2PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMI2PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermi2pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 77 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPERMI2PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMI2PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermi2pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 77 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VPERMI2PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPERMI2PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpermi2ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 77 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPERMI2PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMI2PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermi2ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 77 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPERMI2PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMI2PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermi2ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 77 /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VPERMI2Q xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPERMI2Q xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpermi2q xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 76 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPERMI2Q ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMI2Q ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermi2q ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 76 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPERMI2Q zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMI2Q zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermi2q zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 76 /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VPERMI2W xmm1, {k}{z}, xmmV, xmm2/m128","VPERMI2W xmm2/m128, xmmV, {k}{z}, xmm1","vpermi2w xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 75 /r","V","V","AVX512BW+AVX512VL","scale16","rw,r,r,r","","" +"VPERMI2W ymm1, {k}{z}, ymmV, ymm2/m256","VPERMI2W ymm2/m256, ymmV, {k}{z}, ymm1","vpermi2w ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 75 /r","V","V","AVX512BW+AVX512VL","scale32","rw,r,r,r","","" +"VPERMI2W zmm1, {k}{z}, zmmV, zmm2/m512","VPERMI2W zmm2/m512, zmmV, {k}{z}, zmm1","vpermi2w zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 75 /r","V","V","AVX512BW","scale64","rw,r,r,r","","" +"VPERMIL2PD xmm1, xmmV, xmmIH, xmm2/m128, imm8u","VPERMIL2PD imm8u, xmm2/m128, xmmIH, xmmV, xmm1","vpermil2pd imm8u, xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 49 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PD xmm1, xmmV, xmm2/m128, xmmIH, imm8u","VPERMIL2PD imm8u, xmmIH, xmm2/m128, xmmV, xmm1","vpermil2pd imm8u, xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 49 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PD ymm1, ymmV, ymmIH, ymm2/m256, imm8u","VPERMIL2PD imm8u, ymm2/m256, ymmIH, ymmV, ymm1","vpermil2pd imm8u, ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 49 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PD ymm1, ymmV, ymm2/m256, ymmIH, imm8u","VPERMIL2PD imm8u, ymmIH, ymm2/m256, ymmV, ymm1","vpermil2pd imm8u, ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 49 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PS xmm1, xmmV, xmmIH, xmm2/m128, imm8u","VPERMIL2PS imm8u, xmm2/m128, xmmIH, xmmV, xmm1","vpermil2ps imm8u, xmm2/m128, xmmIH, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 48 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PS xmm1, xmmV, xmm2/m128, xmmIH, imm8u","VPERMIL2PS imm8u, xmmIH, xmm2/m128, xmmV, xmm1","vpermil2ps imm8u, xmmIH, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 48 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PS ymm1, ymmV, ymmIH, ymm2/m256, imm8u","VPERMIL2PS imm8u, ymm2/m256, ymmIH, ymmV, ymm1","vpermil2ps imm8u, ymm2/m256, ymmIH, ymmV, ymm1","VEX.NDS.256.66.0F3A.W1 48 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMIL2PS ymm1, ymmV, ymm2/m256, ymmIH, imm8u","VPERMIL2PS imm8u, ymmIH, ymm2/m256, ymmV, ymm1","vpermil2ps imm8u, ymmIH, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F3A.W0 48 /r /is4","V","V","XOP","amd","w,r,r,r,r","","" +"VPERMILPD xmm1, xmm2/m128, imm8u","VPERMILPD imm8u, xmm2/m128, xmm1","vpermilpd imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.W0 05 /r ib","V","V","AVX","","w,r,r","","" +"VPERMILPD xmm1, {k}{z}, xmm2/m128/m64bcst, imm8u","VPERMILPD imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","vpermilpd imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W1 05 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPERMILPD ymm1, ymm2/m256, imm8u","VPERMILPD imm8u, ymm2/m256, ymm1","vpermilpd imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.W0 05 /r ib","V","V","AVX","","w,r,r","","" +"VPERMILPD ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u","VPERMILPD imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","vpermilpd imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 05 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMILPD zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u","VPERMILPD imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","vpermilpd imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 05 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMILPD xmm1, xmmV, xmm2/m128","VPERMILPD xmm2/m128, xmmV, xmm1","vpermilpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 0D /r","V","V","AVX","","w,r,r","","" +"VPERMILPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPERMILPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpermilpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 0D /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPERMILPD ymm1, ymmV, ymm2/m256","VPERMILPD ymm2/m256, ymmV, ymm1","vpermilpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 0D /r","V","V","AVX","","w,r,r","","" +"VPERMILPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMILPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermilpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 0D /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMILPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMILPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermilpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 0D /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMILPS xmm1, xmm2/m128, imm8u","VPERMILPS imm8u, xmm2/m128, xmm1","vpermilps imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.W0 04 /r ib","V","V","AVX","","w,r,r","","" +"VPERMILPS xmm1, {k}{z}, xmm2/m128/m32bcst, imm8u","VPERMILPS imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","vpermilps imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W0 04 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPERMILPS ymm1, ymm2/m256, imm8u","VPERMILPS imm8u, ymm2/m256, ymm1","vpermilps imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.W0 04 /r ib","V","V","AVX","","w,r,r","","" +"VPERMILPS ymm1, {k}{z}, ymm2/m256/m32bcst, imm8u","VPERMILPS imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","vpermilps imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W0 04 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPERMILPS zmm1, {k}{z}, zmm2/m512/m32bcst, imm8u","VPERMILPS imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","vpermilps imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W0 04 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPERMILPS xmm1, xmmV, xmm2/m128","VPERMILPS xmm2/m128, xmmV, xmm1","vpermilps xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 0C /r","V","V","AVX","","w,r,r","","" +"VPERMILPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPERMILPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpermilps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 0C /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPERMILPS ymm1, ymmV, ymm2/m256","VPERMILPS ymm2/m256, ymmV, ymm1","vpermilps ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 0C /r","V","V","AVX","","w,r,r","","" +"VPERMILPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMILPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermilps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 0C /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPERMILPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMILPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermilps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 0C /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPERMPD ymm1, ymm2/m256, imm8u","VPERMPD imm8u, ymm2/m256, ymm1","vpermpd imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.W1 01 /r ib","V","V","AVX2","","w,r,r","","" +"VPERMPD ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u","VPERMPD imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","vpermpd imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 01 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMPD zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u","VPERMPD imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","vpermpd imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 01 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 16 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 16 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMPS ymm1, ymmV, ymm2/m256","VPERMPS ymm2/m256, ymmV, ymm1","vpermps ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 16 /r","V","V","AVX2","","w,r,r","","" +"VPERMPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 16 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPERMPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 16 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPERMQ ymm1, ymm2/m256, imm8u","VPERMQ imm8u, ymm2/m256, ymm1","vpermq imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.W1 00 /r ib","V","V","AVX2","","w,r,r","","" +"VPERMQ ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u","VPERMQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","vpermq imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 00 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMQ zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u","VPERMQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","vpermq imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 00 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 36 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPERMQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 36 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPERMT2B xmm1, {k}{z}, xmmV, xmm2/m128","VPERMT2B xmm2/m128, xmmV, {k}{z}, xmm1","vpermt2b xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 7D /r","V","V","AVX512_VBMI+AVX512VL","scale16","rw,r,r,r","","" +"VPERMT2B ymm1, {k}{z}, ymmV, ymm2/m256","VPERMT2B ymm2/m256, ymmV, {k}{z}, ymm1","vpermt2b ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 7D /r","V","V","AVX512_VBMI+AVX512VL","scale32","rw,r,r,r","","" +"VPERMT2B zmm1, {k}{z}, zmmV, zmm2/m512","VPERMT2B zmm2/m512, zmmV, {k}{z}, zmm1","vpermt2b zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 7D /r","V","V","AVX512_VBMI","scale64","rw,r,r,r","","" +"VPERMT2D xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPERMT2D xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpermt2d xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 7E /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPERMT2D ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMT2D ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermt2d ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 7E /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPERMT2D zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMT2D zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermt2d zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 7E /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VPERMT2PD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPERMT2PD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpermt2pd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 7F /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPERMT2PD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMT2PD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermt2pd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 7F /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPERMT2PD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMT2PD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermt2pd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 7F /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VPERMT2PS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPERMT2PS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpermt2ps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 7F /r","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPERMT2PS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPERMT2PS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpermt2ps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 7F /r","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPERMT2PS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPERMT2PS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpermt2ps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 7F /r","V","V","AVX512F","bscale4,scale64","rw,r,r,r","","" +"VPERMT2Q xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPERMT2Q xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpermt2q xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 7E /r","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPERMT2Q ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPERMT2Q ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpermt2q ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 7E /r","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPERMT2Q zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPERMT2Q zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpermt2q zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 7E /r","V","V","AVX512F","bscale8,scale64","rw,r,r,r","","" +"VPERMT2W xmm1, {k}{z}, xmmV, xmm2/m128","VPERMT2W xmm2/m128, xmmV, {k}{z}, xmm1","vpermt2w xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 7D /r","V","V","AVX512BW+AVX512VL","scale16","rw,r,r,r","","" +"VPERMT2W ymm1, {k}{z}, ymmV, ymm2/m256","VPERMT2W ymm2/m256, ymmV, {k}{z}, ymm1","vpermt2w ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 7D /r","V","V","AVX512BW+AVX512VL","scale32","rw,r,r,r","","" +"VPERMT2W zmm1, {k}{z}, zmmV, zmm2/m512","VPERMT2W zmm2/m512, zmmV, {k}{z}, zmm1","vpermt2w zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 7D /r","V","V","AVX512BW","scale64","rw,r,r,r","","" +"VPERMW xmm1, {k}{z}, xmmV, xmm2/m128","VPERMW xmm2/m128, xmmV, {k}{z}, xmm1","vpermw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 8D /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPERMW ymm1, {k}{z}, ymmV, ymm2/m256","VPERMW ymm2/m256, ymmV, {k}{z}, ymm1","vpermw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 8D /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPERMW zmm1, {k}{z}, zmmV, zmm2/m512","VPERMW zmm2/m512, zmmV, {k}{z}, zmm1","vpermw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 8D /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPEXPANDB xmm1, {k}{z}, xmm2/m128","VPEXPANDB xmm2/m128, {k}{z}, xmm1","vpexpandb xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W0 62 /r","V","V","AVX512_VBMI2+AVX512VL","scale1","w,r,r","","" +"VPEXPANDB ymm1, {k}{z}, ymm2/m256","VPEXPANDB ymm2/m256, {k}{z}, ymm1","vpexpandb ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W0 62 /r","V","V","AVX512_VBMI2+AVX512VL","scale1","w,r,r","","" +"VPEXPANDB zmm1, {k}{z}, zmm2/m512","VPEXPANDB zmm2/m512, {k}{z}, zmm1","vpexpandb zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W0 62 /r","V","V","AVX512_VBMI2","scale1","w,r,r","","" +"VPEXPANDD xmm1, {k}{z}, xmm2/m128","VPEXPANDD xmm2/m128, {k}{z}, xmm1","vpexpandd xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W0 89 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPEXPANDD ymm1, {k}{z}, ymm2/m256","VPEXPANDD ymm2/m256, {k}{z}, ymm1","vpexpandd ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W0 89 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPEXPANDD zmm1, {k}{z}, zmm2/m512","VPEXPANDD zmm2/m512, {k}{z}, zmm1","vpexpandd zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W0 89 /r","V","V","AVX512F","scale4","w,r,r","","" +"VPEXPANDQ xmm1, {k}{z}, xmm2/m128","VPEXPANDQ xmm2/m128, {k}{z}, xmm1","vpexpandq xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W1 89 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPEXPANDQ ymm1, {k}{z}, ymm2/m256","VPEXPANDQ ymm2/m256, {k}{z}, ymm1","vpexpandq ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W1 89 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPEXPANDQ zmm1, {k}{z}, zmm2/m512","VPEXPANDQ zmm2/m512, {k}{z}, zmm1","vpexpandq zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W1 89 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPEXPANDW xmm1, {k}{z}, xmm2/m128","VPEXPANDW xmm2/m128, {k}{z}, xmm1","vpexpandw xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W1 62 /r","V","V","AVX512_VBMI2+AVX512VL","scale2","w,r,r","","" +"VPEXPANDW ymm1, {k}{z}, ymm2/m256","VPEXPANDW ymm2/m256, {k}{z}, ymm1","vpexpandw ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W1 62 /r","V","V","AVX512_VBMI2+AVX512VL","scale2","w,r,r","","" +"VPEXPANDW zmm1, {k}{z}, zmm2/m512","VPEXPANDW zmm2/m512, {k}{z}, zmm1","vpexpandw zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W1 62 /r","V","V","AVX512_VBMI2","scale2","w,r,r","","" +"VPEXTRB r32/m8, xmm1, imm8u","VPEXTRB imm8u, xmm1, r32/m8","vpextrb imm8u, xmm1, r32/m8","EVEX.128.66.0F3A.WIG 14 /r ib","V","V","AVX512BW+AVX512VL","scale1","w,r,r","","" +"VPEXTRB r32/m8, xmm1, imm8u","VPEXTRB imm8u, xmm1, r32/m8","vpextrb imm8u, xmm1, r32/m8","VEX.128.66.0F3A.WIG 14 /r ib","V","V","AVX","","w,r,r","","" +"VPEXTRD r/m32, xmm1, imm8u","VPEXTRD imm8u, xmm1, r/m32","vpextrd imm8u, xmm1, r/m32","EVEX.128.66.0F3A.W0 16 /r ib","V","V","AVX512DQ+AVX512VL","scale4","w,r,r","","" +"VPEXTRD r/m32, xmm1, imm8u","VPEXTRD imm8u, xmm1, r/m32","vpextrd imm8u, xmm1, r/m32","VEX.128.66.0F3A.W0 16 /r ib","V","V","AVX","","w,r,r","","" +"VPEXTRQ r/m64, xmm1, imm8u","VPEXTRQ imm8u, xmm1, r/m64","vpextrq imm8u, xmm1, r/m64","EVEX.128.66.0F3A.W1 16 /r ib","N.S.","V","AVX512DQ+AVX512VL","scale8","w,r,r","","" +"VPEXTRQ r/m64, xmm1, imm8u","VPEXTRQ imm8u, xmm1, r/m64","vpextrq imm8u, xmm1, r/m64","VEX.128.66.0F3A.W1 16 /r ib","N.S.","V","AVX","","w,r,r","","" +"VPEXTRW r32/m16, xmm1, imm8u","VPEXTRW imm8u, xmm1, r32/m16","vpextrw imm8u, xmm1, r32/m16","EVEX.128.66.0F3A.WIG 15 /r ib","V","V","AVX512BW+AVX512VL","scale2","w,r,r","","" +"VPEXTRW r32/m16, xmm1, imm8u","VPEXTRW imm8u, xmm1, r32/m16","vpextrw imm8u, xmm1, r32/m16","VEX.128.66.0F3A.WIG 15 /r ib","V","V","AVX","","w,r,r","","" +"VPEXTRW r32, xmm2, imm8u","VPEXTRW imm8u, xmm2, r32","vpextrw imm8u, xmm2, r32","EVEX.128.66.0F.WIG C5 /r ib","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r,r","","" +"VPEXTRW r32, xmm2, imm8u","VPEXTRW imm8u, xmm2, r32","vpextrw imm8u, xmm2, r32","VEX.128.66.0F.WIG C5 /r ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPGATHERDD xmm1, {k1-k7}, vm32x","VPGATHERDD vm32x, {k1-k7}, xmm1","vpgatherdd vm32x, {k1-k7}, xmm1","EVEX.128.66.0F38.W0 90 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERDD ymm1, {k1-k7}, vm32y","VPGATHERDD vm32y, {k1-k7}, ymm1","vpgatherdd vm32y, {k1-k7}, ymm1","EVEX.256.66.0F38.W0 90 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERDD zmm1, {k1-k7}, vm32z","VPGATHERDD vm32z, {k1-k7}, zmm1","vpgatherdd vm32z, {k1-k7}, zmm1","EVEX.512.66.0F38.W0 90 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERDD xmm1, vm32x, xmmV","VPGATHERDD xmmV, vm32x, xmm1","vpgatherdd xmmV, vm32x, xmm1","VEX.DDS.128.66.0F38.W0 90 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERDD ymm1, vm32y, ymmV","VPGATHERDD ymmV, vm32y, ymm1","vpgatherdd ymmV, vm32y, ymm1","VEX.DDS.256.66.0F38.W0 90 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERDQ xmm1, {k1-k7}, vm32x","VPGATHERDQ vm32x, {k1-k7}, xmm1","vpgatherdq vm32x, {k1-k7}, xmm1","EVEX.128.66.0F38.W1 90 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERDQ ymm1, {k1-k7}, vm32x","VPGATHERDQ vm32x, {k1-k7}, ymm1","vpgatherdq vm32x, {k1-k7}, ymm1","EVEX.256.66.0F38.W1 90 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERDQ zmm1, {k1-k7}, vm32y","VPGATHERDQ vm32y, {k1-k7}, zmm1","vpgatherdq vm32y, {k1-k7}, zmm1","EVEX.512.66.0F38.W1 90 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERDQ xmm1, vm32x, xmmV","VPGATHERDQ xmmV, vm32x, xmm1","vpgatherdq xmmV, vm32x, xmm1","VEX.DDS.128.66.0F38.W1 90 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERDQ ymm1, vm32x, ymmV","VPGATHERDQ ymmV, vm32x, ymm1","vpgatherdq ymmV, vm32x, ymm1","VEX.DDS.256.66.0F38.W1 90 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERQD xmm1, {k1-k7}, vm64x","VPGATHERQD vm64x, {k1-k7}, xmm1","vpgatherqd vm64x, {k1-k7}, xmm1","EVEX.128.66.0F38.W0 91 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERQD xmm1, {k1-k7}, vm64y","VPGATHERQD vm64y, {k1-k7}, xmm1","vpgatherqd vm64y, {k1-k7}, xmm1","EVEX.256.66.0F38.W0 91 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERQD ymm1, {k1-k7}, vm64z","VPGATHERQD vm64z, {k1-k7}, ymm1","vpgatherqd vm64z, {k1-k7}, ymm1","EVEX.512.66.0F38.W0 91 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VPGATHERQD xmm1, vm64x, xmmV","VPGATHERQD xmmV, vm64x, xmm1","vpgatherqd xmmV, vm64x, xmm1","VEX.DDS.128.66.0F38.W0 91 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERQD xmm1, vm64y, xmmV","VPGATHERQD xmmV, vm64y, xmm1","vpgatherqd xmmV, vm64y, xmm1","VEX.DDS.256.66.0F38.W0 91 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERQQ xmm1, {k1-k7}, vm64x","VPGATHERQQ vm64x, {k1-k7}, xmm1","vpgatherqq vm64x, {k1-k7}, xmm1","EVEX.128.66.0F38.W1 91 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERQQ ymm1, {k1-k7}, vm64y","VPGATHERQQ vm64y, {k1-k7}, ymm1","vpgatherqq vm64y, {k1-k7}, ymm1","EVEX.256.66.0F38.W1 91 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERQQ zmm1, {k1-k7}, vm64z","VPGATHERQQ vm64z, {k1-k7}, zmm1","vpgatherqq vm64z, {k1-k7}, zmm1","EVEX.512.66.0F38.W1 91 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VPGATHERQQ xmm1, vm64x, xmmV","VPGATHERQQ xmmV, vm64x, xmm1","vpgatherqq xmmV, vm64x, xmm1","VEX.DDS.128.66.0F38.W1 91 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPGATHERQQ ymm1, vm64y, ymmV","VPGATHERQQ ymmV, vm64y, ymm1","vpgatherqq ymmV, vm64y, ymm1","VEX.DDS.256.66.0F38.W1 91 /r","V","V","AVX2","modrm_memonly","rw,r,rw","","" +"VPHADDBD xmm1, xmm2/m128","VPHADDBD xmm2/m128, xmm1","vphaddbd xmm2/m128, xmm1","XOP.128.09.W0 C2 /r","V","V","XOP","amd","w,r","","" +"VPHADDBQ xmm1, xmm2/m128","VPHADDBQ xmm2/m128, xmm1","vphaddbq xmm2/m128, xmm1","XOP.128.09.W0 C3 /r","V","V","XOP","amd","w,r","","" +"VPHADDBW xmm1, xmm2/m128","VPHADDBW xmm2/m128, xmm1","vphaddbw xmm2/m128, xmm1","XOP.128.09.W0 C1 /r","V","V","XOP","amd","w,r","","" +"VPHADDD xmm1, xmmV, xmm2/m128","VPHADDD xmm2/m128, xmmV, xmm1","vphaddd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 02 /r","V","V","AVX","","w,r,r","","" +"VPHADDD ymm1, ymmV, ymm2/m256","VPHADDD ymm2/m256, ymmV, ymm1","vphaddd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 02 /r","V","V","AVX2","","w,r,r","","" +"VPHADDDQ xmm1, xmm2/m128","VPHADDDQ xmm2/m128, xmm1","vphadddq xmm2/m128, xmm1","XOP.128.09.W0 CB /r","V","V","XOP","amd","w,r","","" +"VPHADDSW xmm1, xmmV, xmm2/m128","VPHADDSW xmm2/m128, xmmV, xmm1","vphaddsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 03 /r","V","V","AVX","","w,r,r","","" +"VPHADDSW ymm1, ymmV, ymm2/m256","VPHADDSW ymm2/m256, ymmV, ymm1","vphaddsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 03 /r","V","V","AVX2","","w,r,r","","" +"VPHADDUBD xmm1, xmm2/m128","VPHADDUBD xmm2/m128, xmm1","vphaddubd xmm2/m128, xmm1","XOP.128.09.W0 D2 /r","V","V","XOP","amd","w,r","","" +"VPHADDUBQ xmm1, xmm2/m128","VPHADDUBQ xmm2/m128, xmm1","vphaddubq xmm2/m128, xmm1","XOP.128.09.W0 D3 /r","V","V","XOP","amd","w,r","","" +"VPHADDUBW xmm1, xmm2/m128","VPHADDUBW xmm2/m128, xmm1","vphaddubw xmm2/m128, xmm1","XOP.128.09.W0 D1 /r","V","V","XOP","amd","w,r","","" +"VPHADDUDQ xmm1, xmm2/m128","VPHADDUDQ xmm2/m128, xmm1","vphaddudq xmm2/m128, xmm1","XOP.128.09.W0 DB /r","V","V","XOP","amd","w,r","","" +"VPHADDUWD xmm1, xmm2/m128","VPHADDUWD xmm2/m128, xmm1","vphadduwd xmm2/m128, xmm1","XOP.128.09.W0 D6 /r","V","V","XOP","amd","w,r","","" +"VPHADDUWQ xmm1, xmm2/m128","VPHADDUWQ xmm2/m128, xmm1","vphadduwq xmm2/m128, xmm1","XOP.128.09.W0 D7 /r","V","V","XOP","amd","w,r","","" +"VPHADDW xmm1, xmmV, xmm2/m128","VPHADDW xmm2/m128, xmmV, xmm1","vphaddw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 01 /r","V","V","AVX","","w,r,r","","" +"VPHADDW ymm1, ymmV, ymm2/m256","VPHADDW ymm2/m256, ymmV, ymm1","vphaddw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 01 /r","V","V","AVX2","","w,r,r","","" +"VPHADDWD xmm1, xmm2/m128","VPHADDWD xmm2/m128, xmm1","vphaddwd xmm2/m128, xmm1","XOP.128.09.W0 C6 /r","V","V","XOP","amd","w,r","","" +"VPHADDWQ xmm1, xmm2/m128","VPHADDWQ xmm2/m128, xmm1","vphaddwq xmm2/m128, xmm1","XOP.128.09.W0 C7 /r","V","V","XOP","amd","w,r","","" +"VPHMINPOSUW xmm1, xmm2/m128","VPHMINPOSUW xmm2/m128, xmm1","vphminposuw xmm2/m128, xmm1","VEX.128.66.0F38.WIG 41 /r","V","V","AVX","","w,r","","" +"VPHSUBBW xmm1, xmm2/m128","VPHSUBBW xmm2/m128, xmm1","vphsubbw xmm2/m128, xmm1","XOP.128.09.W0 E1 /r","V","V","XOP","amd","w,r","","" +"VPHSUBD xmm1, xmmV, xmm2/m128","VPHSUBD xmm2/m128, xmmV, xmm1","vphsubd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 06 /r","V","V","AVX","","w,r,r","","" +"VPHSUBD ymm1, ymmV, ymm2/m256","VPHSUBD ymm2/m256, ymmV, ymm1","vphsubd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 06 /r","V","V","AVX2","","w,r,r","","" +"VPHSUBDQ xmm1, xmm2/m128","VPHSUBDQ xmm2/m128, xmm1","vphsubdq xmm2/m128, xmm1","XOP.128.09.W0 E3 /r","V","V","XOP","amd","w,r","","" +"VPHSUBSW xmm1, xmmV, xmm2/m128","VPHSUBSW xmm2/m128, xmmV, xmm1","vphsubsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 07 /r","V","V","AVX","","w,r,r","","" +"VPHSUBSW ymm1, ymmV, ymm2/m256","VPHSUBSW ymm2/m256, ymmV, ymm1","vphsubsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 07 /r","V","V","AVX2","","w,r,r","","" +"VPHSUBW xmm1, xmmV, xmm2/m128","VPHSUBW xmm2/m128, xmmV, xmm1","vphsubw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 05 /r","V","V","AVX","","w,r,r","","" +"VPHSUBW ymm1, ymmV, ymm2/m256","VPHSUBW ymm2/m256, ymmV, ymm1","vphsubw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 05 /r","V","V","AVX2","","w,r,r","","" +"VPHSUBWD xmm1, xmm2/m128","VPHSUBWD xmm2/m128, xmm1","vphsubwd xmm2/m128, xmm1","XOP.128.09.W0 E2 /r","V","V","XOP","amd","w,r","","" +"VPINSRB xmm1, xmmV, r32/m8, imm8u","VPINSRB imm8u, r32/m8, xmmV, xmm1","vpinsrb imm8u, r32/m8, xmmV, xmm1","EVEX.NDS.128.66.0F3A.WIG 20 /r ib","V","V","AVX512BW+AVX512VL","scale1","w,r,r,r","","" +"VPINSRB xmm1, xmmV, r32/m8, imm8u","VPINSRB imm8u, r32/m8, xmmV, xmm1","vpinsrb imm8u, r32/m8, xmmV, xmm1","VEX.NDS.128.66.0F3A.WIG 20 /r ib","V","V","AVX","","w,r,r,r","","" +"VPINSRD xmm1, xmmV, r/m32, imm8u","VPINSRD imm8u, r/m32, xmmV, xmm1","vpinsrd imm8u, r/m32, xmmV, xmm1","EVEX.NDS.128.66.0F3A.W0 22 /r ib","V","V","AVX512DQ+AVX512VL","scale4","w,r,r,r","","" +"VPINSRD xmm1, xmmV, r/m32, imm8u","VPINSRD imm8u, r/m32, xmmV, xmm1","vpinsrd imm8u, r/m32, xmmV, xmm1","VEX.NDS.128.66.0F3A.W0 22 /r ib","V","V","AVX","","w,r,r,r","","" +"VPINSRQ xmm1, xmmV, r/m64, imm8u","VPINSRQ imm8u, r/m64, xmmV, xmm1","vpinsrq imm8u, r/m64, xmmV, xmm1","EVEX.NDS.128.66.0F3A.W1 22 /r ib","N.S.","V","AVX512DQ+AVX512VL","scale8","w,r,r,r","","" +"VPINSRQ xmm1, xmmV, r/m64, imm8u","VPINSRQ imm8u, r/m64, xmmV, xmm1","vpinsrq imm8u, r/m64, xmmV, xmm1","VEX.NDS.128.66.0F3A.W1 22 /r ib","N.S.","V","AVX","","w,r,r,r","","" +"VPINSRW xmm1, xmmV, r32/m16, imm8u","VPINSRW imm8u, r32/m16, xmmV, xmm1","vpinsrw imm8u, r32/m16, xmmV, xmm1","EVEX.NDS.128.66.0F.WIG C4 /r ib","V","V","AVX512BW+AVX512VL","scale2","w,r,r,r","","" +"VPINSRW xmm1, xmmV, r32/m16, imm8u","VPINSRW imm8u, r32/m16, xmmV, xmm1","vpinsrw imm8u, r32/m16, xmmV, xmm1","VEX.NDS.128.66.0F.WIG C4 /r ib","V","V","AVX","","w,r,r,r","","" +"VPLZCNTD xmm1, {k}{z}, xmm2/m128/m32bcst","VPLZCNTD xmm2/m128/m32bcst, {k}{z}, xmm1","vplzcntd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 44 /r","V","V","AVX512CD+AVX512VL","bscale4,scale16","w,r,r","","" +"VPLZCNTD ymm1, {k}{z}, ymm2/m256/m32bcst","VPLZCNTD ymm2/m256/m32bcst, {k}{z}, ymm1","vplzcntd ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 44 /r","V","V","AVX512CD+AVX512VL","bscale4,scale32","w,r,r","","" +"VPLZCNTD zmm1, {k}{z}, zmm2/m512/m32bcst","VPLZCNTD zmm2/m512/m32bcst, {k}{z}, zmm1","vplzcntd zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 44 /r","V","V","AVX512CD","bscale4,scale64","w,r,r","","" +"VPLZCNTQ xmm1, {k}{z}, xmm2/m128/m64bcst","VPLZCNTQ xmm2/m128/m64bcst, {k}{z}, xmm1","vplzcntq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 44 /r","V","V","AVX512CD+AVX512VL","bscale8,scale16","w,r,r","","" +"VPLZCNTQ ymm1, {k}{z}, ymm2/m256/m64bcst","VPLZCNTQ ymm2/m256/m64bcst, {k}{z}, ymm1","vplzcntq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 44 /r","V","V","AVX512CD+AVX512VL","bscale8,scale32","w,r,r","","" +"VPLZCNTQ zmm1, {k}{z}, zmm2/m512/m64bcst","VPLZCNTQ zmm2/m512/m64bcst, {k}{z}, zmm1","vplzcntq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 44 /r","V","V","AVX512CD","bscale8,scale64","w,r,r","","" +"VPMACSDD xmm1, xmmV, xmm2/m128, xmmIH","VPMACSDD xmmIH, xmm2/m128, xmmV, xmm1","vpmacsdd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 9E /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSDQH xmm1, xmmV, xmm2/m128, xmmIH","VPMACSDQH xmmIH, xmm2/m128, xmmV, xmm1","vpmacsdqh xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 9F /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSDQL xmm1, xmmV, xmm2/m128, xmmIH","VPMACSDQL xmmIH, xmm2/m128, xmmV, xmm1","vpmacsdql xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 97 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSSDD xmm1, xmmV, xmm2/m128, xmmIH","VPMACSSDD xmmIH, xmm2/m128, xmmV, xmm1","vpmacssdd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 8E /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSSDQH xmm1, xmmV, xmm2/m128, xmmIH","VPMACSSDQH xmmIH, xmm2/m128, xmmV, xmm1","vpmacssdqh xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 8F /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSSDQL xmm1, xmmV, xmm2/m128, xmmIH","VPMACSSDQL xmmIH, xmm2/m128, xmmV, xmm1","vpmacssdql xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 87 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSSWD xmm1, xmmV, xmm2/m128, xmmIH","VPMACSSWD xmmIH, xmm2/m128, xmmV, xmm1","vpmacsswd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 86 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSSWW xmm1, xmmV, xmm2/m128, xmmIH","VPMACSSWW xmmIH, xmm2/m128, xmmV, xmm1","vpmacssww xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 85 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSWD xmm1, xmmV, xmm2/m128, xmmIH","VPMACSWD xmmIH, xmm2/m128, xmmV, xmm1","vpmacswd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 96 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMACSWW xmm1, xmmV, xmm2/m128, xmmIH","VPMACSWW xmmIH, xmm2/m128, xmmV, xmm1","vpmacsww xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 95 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMADCSSWD xmm1, xmmV, xmm2/m128, xmmIH","VPMADCSSWD xmmIH, xmm2/m128, xmmV, xmm1","vpmadcsswd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 A6 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMADCSWD xmm1, xmmV, xmm2/m128, xmmIH","VPMADCSWD xmmIH, xmm2/m128, xmmV, xmm1","vpmadcswd xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 B6 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPMADD52HUQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMADD52HUQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmadd52huq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 B5 /r","V","V","AVX512_IFMA+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPMADD52HUQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMADD52HUQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmadd52huq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 B5 /r","V","V","AVX512_IFMA+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPMADD52HUQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMADD52HUQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmadd52huq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 B5 /r","V","V","AVX512_IFMA","bscale8,scale64","rw,r,r,r","","" +"VPMADD52LUQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMADD52LUQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmadd52luq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 B4 /r","V","V","AVX512_IFMA+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPMADD52LUQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMADD52LUQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmadd52luq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 B4 /r","V","V","AVX512_IFMA+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPMADD52LUQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMADD52LUQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmadd52luq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 B4 /r","V","V","AVX512_IFMA","bscale8,scale64","rw,r,r,r","","" +"VPMADDUBSW xmm1, xmmV, xmm2/m128","VPMADDUBSW xmm2/m128, xmmV, xmm1","vpmaddubsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 04 /r","V","V","AVX","","w,r,r","","" +"VPMADDUBSW xmm1, {k}{z}, xmmV, xmm2/m128","VPMADDUBSW xmm2/m128, xmmV, {k}{z}, xmm1","vpmaddubsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 04 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMADDUBSW ymm1, ymmV, ymm2/m256","VPMADDUBSW ymm2/m256, ymmV, ymm1","vpmaddubsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 04 /r","V","V","AVX2","","w,r,r","","" +"VPMADDUBSW ymm1, {k}{z}, ymmV, ymm2/m256","VPMADDUBSW ymm2/m256, ymmV, {k}{z}, ymm1","vpmaddubsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 04 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMADDUBSW zmm1, {k}{z}, zmmV, zmm2/m512","VPMADDUBSW zmm2/m512, zmmV, {k}{z}, zmm1","vpmaddubsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 04 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMADDWD xmm1, xmmV, xmm2/m128","VPMADDWD xmm2/m128, xmmV, xmm1","vpmaddwd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F5 /r","V","V","AVX","","w,r,r","","" +"VPMADDWD xmm1, {k}{z}, xmmV, xmm2/m128","VPMADDWD xmm2/m128, xmmV, {k}{z}, xmm1","vpmaddwd xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG F5 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMADDWD ymm1, ymmV, ymm2/m256","VPMADDWD ymm2/m256, ymmV, ymm1","vpmaddwd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F5 /r","V","V","AVX2","","w,r,r","","" +"VPMADDWD ymm1, {k}{z}, ymmV, ymm2/m256","VPMADDWD ymm2/m256, ymmV, {k}{z}, ymm1","vpmaddwd ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG F5 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMADDWD zmm1, {k}{z}, zmmV, zmm2/m512","VPMADDWD zmm2/m512, zmmV, {k}{z}, zmm1","vpmaddwd zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG F5 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMASKMOVD xmm1, xmmV, m128","VPMASKMOVD m128, xmmV, xmm1","vpmaskmovd m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 8C /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVD ymm1, ymmV, m256","VPMASKMOVD m256, ymmV, ymm1","vpmaskmovd m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 8C /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVD m128, xmmV, xmm1","VPMASKMOVD xmm1, xmmV, m128","vpmaskmovd xmm1, xmmV, m128","VEX.NDS.128.66.0F38.W0 8E /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVD m256, ymmV, ymm1","VPMASKMOVD ymm1, ymmV, m256","vpmaskmovd ymm1, ymmV, m256","VEX.NDS.256.66.0F38.W0 8E /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVQ xmm1, xmmV, m128","VPMASKMOVQ m128, xmmV, xmm1","vpmaskmovq m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W1 8C /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVQ ymm1, ymmV, m256","VPMASKMOVQ m256, ymmV, ymm1","vpmaskmovq m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W1 8C /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVQ m128, xmmV, xmm1","VPMASKMOVQ xmm1, xmmV, m128","vpmaskmovq xmm1, xmmV, m128","VEX.NDS.128.66.0F38.W1 8E /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMASKMOVQ m256, ymmV, ymm1","VPMASKMOVQ ymm1, ymmV, m256","vpmaskmovq ymm1, ymmV, m256","VEX.NDS.256.66.0F38.W1 8E /r","V","V","AVX2","modrm_memonly","w,r,r","","" +"VPMAXSB xmm1, xmmV, xmm2/m128","VPMAXSB xmm2/m128, xmmV, xmm1","vpmaxsb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3C /r","V","V","AVX","","w,r,r","","" +"VPMAXSB xmm1, {k}{z}, xmmV, xmm2/m128","VPMAXSB xmm2/m128, xmmV, {k}{z}, xmm1","vpmaxsb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 3C /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMAXSB ymm1, ymmV, ymm2/m256","VPMAXSB ymm2/m256, ymmV, ymm1","vpmaxsb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3C /r","V","V","AVX2","","w,r,r","","" +"VPMAXSB ymm1, {k}{z}, ymmV, ymm2/m256","VPMAXSB ymm2/m256, ymmV, {k}{z}, ymm1","vpmaxsb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 3C /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMAXSB zmm1, {k}{z}, zmmV, zmm2/m512","VPMAXSB zmm2/m512, zmmV, {k}{z}, zmm1","vpmaxsb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 3C /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMAXSD xmm1, xmmV, xmm2/m128","VPMAXSD xmm2/m128, xmmV, xmm1","vpmaxsd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3D /r","V","V","AVX","","w,r,r","","" +"VPMAXSD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPMAXSD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpmaxsd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 3D /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPMAXSD ymm1, ymmV, ymm2/m256","VPMAXSD ymm2/m256, ymmV, ymm1","vpmaxsd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3D /r","V","V","AVX2","","w,r,r","","" +"VPMAXSD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPMAXSD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpmaxsd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 3D /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPMAXSD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPMAXSD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpmaxsd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 3D /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPMAXSQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMAXSQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmaxsq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 3D /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMAXSQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMAXSQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmaxsq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 3D /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMAXSQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMAXSQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmaxsq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 3D /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPMAXSW xmm1, xmmV, xmm2/m128","VPMAXSW xmm2/m128, xmmV, xmm1","vpmaxsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG EE /r","V","V","AVX","","w,r,r","","" +"VPMAXSW xmm1, {k}{z}, xmmV, xmm2/m128","VPMAXSW xmm2/m128, xmmV, {k}{z}, xmm1","vpmaxsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG EE /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMAXSW ymm1, ymmV, ymm2/m256","VPMAXSW ymm2/m256, ymmV, ymm1","vpmaxsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG EE /r","V","V","AVX2","","w,r,r","","" +"VPMAXSW ymm1, {k}{z}, ymmV, ymm2/m256","VPMAXSW ymm2/m256, ymmV, {k}{z}, ymm1","vpmaxsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG EE /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMAXSW zmm1, {k}{z}, zmmV, zmm2/m512","VPMAXSW zmm2/m512, zmmV, {k}{z}, zmm1","vpmaxsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG EE /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMAXUB xmm1, xmmV, xmm2/m128","VPMAXUB xmm2/m128, xmmV, xmm1","vpmaxub xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DE /r","V","V","AVX","","w,r,r","","" +"VPMAXUB xmm1, {k}{z}, xmmV, xmm2/m128","VPMAXUB xmm2/m128, xmmV, {k}{z}, xmm1","vpmaxub xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG DE /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMAXUB ymm1, ymmV, ymm2/m256","VPMAXUB ymm2/m256, ymmV, ymm1","vpmaxub ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DE /r","V","V","AVX2","","w,r,r","","" +"VPMAXUB ymm1, {k}{z}, ymmV, ymm2/m256","VPMAXUB ymm2/m256, ymmV, {k}{z}, ymm1","vpmaxub ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG DE /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMAXUB zmm1, {k}{z}, zmmV, zmm2/m512","VPMAXUB zmm2/m512, zmmV, {k}{z}, zmm1","vpmaxub zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG DE /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMAXUD xmm1, xmmV, xmm2/m128","VPMAXUD xmm2/m128, xmmV, xmm1","vpmaxud xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3F /r","V","V","AVX","","w,r,r","","" +"VPMAXUD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPMAXUD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpmaxud xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 3F /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPMAXUD ymm1, ymmV, ymm2/m256","VPMAXUD ymm2/m256, ymmV, ymm1","vpmaxud ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3F /r","V","V","AVX2","","w,r,r","","" +"VPMAXUD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPMAXUD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpmaxud ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 3F /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPMAXUD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPMAXUD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpmaxud zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 3F /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPMAXUQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMAXUQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmaxuq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 3F /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMAXUQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMAXUQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmaxuq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 3F /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMAXUQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMAXUQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmaxuq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 3F /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPMAXUW xmm1, xmmV, xmm2/m128","VPMAXUW xmm2/m128, xmmV, xmm1","vpmaxuw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3E /r","V","V","AVX","","w,r,r","","" +"VPMAXUW xmm1, {k}{z}, xmmV, xmm2/m128","VPMAXUW xmm2/m128, xmmV, {k}{z}, xmm1","vpmaxuw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 3E /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMAXUW ymm1, ymmV, ymm2/m256","VPMAXUW ymm2/m256, ymmV, ymm1","vpmaxuw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3E /r","V","V","AVX2","","w,r,r","","" +"VPMAXUW ymm1, {k}{z}, ymmV, ymm2/m256","VPMAXUW ymm2/m256, ymmV, {k}{z}, ymm1","vpmaxuw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 3E /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMAXUW zmm1, {k}{z}, zmmV, zmm2/m512","VPMAXUW zmm2/m512, zmmV, {k}{z}, zmm1","vpmaxuw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 3E /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMINSB xmm1, xmmV, xmm2/m128","VPMINSB xmm2/m128, xmmV, xmm1","vpminsb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 38 /r","V","V","AVX","","w,r,r","","" +"VPMINSB xmm1, {k}{z}, xmmV, xmm2/m128","VPMINSB xmm2/m128, xmmV, {k}{z}, xmm1","vpminsb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 38 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMINSB ymm1, ymmV, ymm2/m256","VPMINSB ymm2/m256, ymmV, ymm1","vpminsb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 38 /r","V","V","AVX2","","w,r,r","","" +"VPMINSB ymm1, {k}{z}, ymmV, ymm2/m256","VPMINSB ymm2/m256, ymmV, {k}{z}, ymm1","vpminsb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 38 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMINSB zmm1, {k}{z}, zmmV, zmm2/m512","VPMINSB zmm2/m512, zmmV, {k}{z}, zmm1","vpminsb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 38 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMINSD xmm1, xmmV, xmm2/m128","VPMINSD xmm2/m128, xmmV, xmm1","vpminsd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 39 /r","V","V","AVX","","w,r,r","","" +"VPMINSD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPMINSD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpminsd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 39 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPMINSD ymm1, ymmV, ymm2/m256","VPMINSD ymm2/m256, ymmV, ymm1","vpminsd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 39 /r","V","V","AVX2","","w,r,r","","" +"VPMINSD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPMINSD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpminsd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 39 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPMINSD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPMINSD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpminsd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 39 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPMINSQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMINSQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpminsq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 39 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMINSQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMINSQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpminsq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 39 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMINSQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMINSQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpminsq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 39 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPMINSW xmm1, xmmV, xmm2/m128","VPMINSW xmm2/m128, xmmV, xmm1","vpminsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG EA /r","V","V","AVX","","w,r,r","","" +"VPMINSW xmm1, {k}{z}, xmmV, xmm2/m128","VPMINSW xmm2/m128, xmmV, {k}{z}, xmm1","vpminsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG EA /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMINSW ymm1, ymmV, ymm2/m256","VPMINSW ymm2/m256, ymmV, ymm1","vpminsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG EA /r","V","V","AVX2","","w,r,r","","" +"VPMINSW ymm1, {k}{z}, ymmV, ymm2/m256","VPMINSW ymm2/m256, ymmV, {k}{z}, ymm1","vpminsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG EA /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMINSW zmm1, {k}{z}, zmmV, zmm2/m512","VPMINSW zmm2/m512, zmmV, {k}{z}, zmm1","vpminsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG EA /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMINUB xmm1, xmmV, xmm2/m128","VPMINUB xmm2/m128, xmmV, xmm1","vpminub xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG DA /r","V","V","AVX","","w,r,r","","" +"VPMINUB xmm1, {k}{z}, xmmV, xmm2/m128","VPMINUB xmm2/m128, xmmV, {k}{z}, xmm1","vpminub xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG DA /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMINUB ymm1, ymmV, ymm2/m256","VPMINUB ymm2/m256, ymmV, ymm1","vpminub ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG DA /r","V","V","AVX2","","w,r,r","","" +"VPMINUB ymm1, {k}{z}, ymmV, ymm2/m256","VPMINUB ymm2/m256, ymmV, {k}{z}, ymm1","vpminub ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG DA /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMINUB zmm1, {k}{z}, zmmV, zmm2/m512","VPMINUB zmm2/m512, zmmV, {k}{z}, zmm1","vpminub zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG DA /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMINUD xmm1, xmmV, xmm2/m128","VPMINUD xmm2/m128, xmmV, xmm1","vpminud xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3B /r","V","V","AVX","","w,r,r","","" +"VPMINUD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPMINUD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpminud xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 3B /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPMINUD ymm1, ymmV, ymm2/m256","VPMINUD ymm2/m256, ymmV, ymm1","vpminud ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3B /r","V","V","AVX2","","w,r,r","","" +"VPMINUD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPMINUD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpminud ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 3B /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPMINUD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPMINUD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpminud zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 3B /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPMINUQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMINUQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpminuq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 3B /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMINUQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMINUQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpminuq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 3B /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMINUQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMINUQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpminuq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 3B /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPMINUW xmm1, xmmV, xmm2/m128","VPMINUW xmm2/m128, xmmV, xmm1","vpminuw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 3A /r","V","V","AVX","","w,r,r","","" +"VPMINUW xmm1, {k}{z}, xmmV, xmm2/m128","VPMINUW xmm2/m128, xmmV, {k}{z}, xmm1","vpminuw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 3A /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMINUW ymm1, ymmV, ymm2/m256","VPMINUW ymm2/m256, ymmV, ymm1","vpminuw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 3A /r","V","V","AVX2","","w,r,r","","" +"VPMINUW ymm1, {k}{z}, ymmV, ymm2/m256","VPMINUW ymm2/m256, ymmV, {k}{z}, ymm1","vpminuw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 3A /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMINUW zmm1, {k}{z}, zmmV, zmm2/m512","VPMINUW zmm2/m512, zmmV, {k}{z}, zmm1","vpminuw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 3A /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMOVB2M k1, xmm2","VPMOVB2M xmm2, k1","vpmovb2m xmm2, k1","EVEX.128.F3.0F38.W0 29 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVB2M k1, ymm2","VPMOVB2M ymm2, k1","vpmovb2m ymm2, k1","EVEX.256.F3.0F38.W0 29 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVB2M k1, zmm2","VPMOVB2M zmm2, k1","vpmovb2m zmm2, k1","EVEX.512.F3.0F38.W0 29 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"VPMOVD2M k1, xmm2","VPMOVD2M xmm2, k1","vpmovd2m xmm2, k1","EVEX.128.F3.0F38.W0 39 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVD2M k1, ymm2","VPMOVD2M ymm2, k1","vpmovd2m ymm2, k1","EVEX.256.F3.0F38.W0 39 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVD2M k1, zmm2","VPMOVD2M zmm2, k1","vpmovd2m zmm2, k1","EVEX.512.F3.0F38.W0 39 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"VPMOVDB xmm2/m32, {k}{z}, xmm1","VPMOVDB xmm1, {k}{z}, xmm2/m32","vpmovdb xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 31 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVDB xmm2/m64, {k}{z}, ymm1","VPMOVDB ymm1, {k}{z}, xmm2/m64","vpmovdb ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 31 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVDB xmm2/m128, {k}{z}, zmm1","VPMOVDB zmm1, {k}{z}, xmm2/m128","vpmovdb zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 31 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVDW xmm2/m64, {k}{z}, xmm1","VPMOVDW xmm1, {k}{z}, xmm2/m64","vpmovdw xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 33 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVDW xmm2/m128, {k}{z}, ymm1","VPMOVDW ymm1, {k}{z}, xmm2/m128","vpmovdw ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 33 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVDW ymm2/m256, {k}{z}, zmm1","VPMOVDW zmm1, {k}{z}, ymm2/m256","vpmovdw zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 33 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVM2B xmm1, k2","VPMOVM2B k2, xmm1","vpmovm2b k2, xmm1","EVEX.128.F3.0F38.W0 28 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2B ymm1, k2","VPMOVM2B k2, ymm1","vpmovm2b k2, ymm1","EVEX.256.F3.0F38.W0 28 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2B zmm1, k2","VPMOVM2B k2, zmm1","vpmovm2b k2, zmm1","EVEX.512.F3.0F38.W0 28 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"VPMOVM2D xmm1, k2","VPMOVM2D k2, xmm1","vpmovm2d k2, xmm1","EVEX.128.F3.0F38.W0 38 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2D ymm1, k2","VPMOVM2D k2, ymm1","vpmovm2d k2, ymm1","EVEX.256.F3.0F38.W0 38 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2D zmm1, k2","VPMOVM2D k2, zmm1","vpmovm2d k2, zmm1","EVEX.512.F3.0F38.W0 38 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"VPMOVM2Q xmm1, k2","VPMOVM2Q k2, xmm1","vpmovm2q k2, xmm1","EVEX.128.F3.0F38.W1 38 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2Q ymm1, k2","VPMOVM2Q k2, ymm1","vpmovm2q k2, ymm1","EVEX.256.F3.0F38.W1 38 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2Q zmm1, k2","VPMOVM2Q k2, zmm1","vpmovm2q k2, zmm1","EVEX.512.F3.0F38.W1 38 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"VPMOVM2W xmm1, k2","VPMOVM2W k2, xmm1","vpmovm2w k2, xmm1","EVEX.128.F3.0F38.W1 28 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2W ymm1, k2","VPMOVM2W k2, ymm1","vpmovm2w k2, ymm1","EVEX.256.F3.0F38.W1 28 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVM2W zmm1, k2","VPMOVM2W k2, zmm1","vpmovm2w k2, zmm1","EVEX.512.F3.0F38.W1 28 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"VPMOVMSKB r32, xmm2","VPMOVMSKB xmm2, r32","vpmovmskb xmm2, r32","VEX.128.66.0F.WIG D7 /r","V","V","AVX","modrm_regonly","w,r","","" +"VPMOVMSKB r32, ymm2","VPMOVMSKB ymm2, r32","vpmovmskb ymm2, r32","VEX.256.66.0F.WIG D7 /r","V","V","AVX2","modrm_regonly","w,r","","" +"VPMOVQ2M k1, xmm2","VPMOVQ2M xmm2, k1","vpmovq2m xmm2, k1","EVEX.128.F3.0F38.W1 39 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVQ2M k1, ymm2","VPMOVQ2M ymm2, k1","vpmovq2m ymm2, k1","EVEX.256.F3.0F38.W1 39 /r","V","V","AVX512DQ+AVX512VL","modrm_regonly","w,r","","" +"VPMOVQ2M k1, zmm2","VPMOVQ2M zmm2, k1","vpmovq2m zmm2, k1","EVEX.512.F3.0F38.W1 39 /r","V","V","AVX512DQ","modrm_regonly","w,r","","" +"VPMOVQB xmm2/m16, {k}{z}, xmm1","VPMOVQB xmm1, {k}{z}, xmm2/m16","vpmovqb xmm1, {k}{z}, xmm2/m16","EVEX.128.F3.0F38.W0 32 /r","V","V","AVX512F+AVX512VL","scale2","w,r,r","","" +"VPMOVQB xmm2/m32, {k}{z}, ymm1","VPMOVQB ymm1, {k}{z}, xmm2/m32","vpmovqb ymm1, {k}{z}, xmm2/m32","EVEX.256.F3.0F38.W0 32 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVQB xmm2/m64, {k}{z}, zmm1","VPMOVQB zmm1, {k}{z}, xmm2/m64","vpmovqb zmm1, {k}{z}, xmm2/m64","EVEX.512.F3.0F38.W0 32 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPMOVQD xmm2/m64, {k}{z}, xmm1","VPMOVQD xmm1, {k}{z}, xmm2/m64","vpmovqd xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 35 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVQD xmm2/m128, {k}{z}, ymm1","VPMOVQD ymm1, {k}{z}, xmm2/m128","vpmovqd ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 35 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVQD ymm2/m256, {k}{z}, zmm1","VPMOVQD zmm1, {k}{z}, ymm2/m256","vpmovqd zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 35 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVQW xmm2/m32, {k}{z}, xmm1","VPMOVQW xmm1, {k}{z}, xmm2/m32","vpmovqw xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 34 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVQW xmm2/m64, {k}{z}, ymm1","VPMOVQW ymm1, {k}{z}, xmm2/m64","vpmovqw ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 34 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVQW xmm2/m128, {k}{z}, zmm1","VPMOVQW zmm1, {k}{z}, xmm2/m128","vpmovqw zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 34 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVSDB xmm2/m32, {k}{z}, xmm1","VPMOVSDB xmm1, {k}{z}, xmm2/m32","vpmovsdb xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 21 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSDB xmm2/m64, {k}{z}, ymm1","VPMOVSDB ymm1, {k}{z}, xmm2/m64","vpmovsdb ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 21 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSDB xmm2/m128, {k}{z}, zmm1","VPMOVSDB zmm1, {k}{z}, xmm2/m128","vpmovsdb zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 21 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVSDW xmm2/m64, {k}{z}, xmm1","VPMOVSDW xmm1, {k}{z}, xmm2/m64","vpmovsdw xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 23 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSDW xmm2/m128, {k}{z}, ymm1","VPMOVSDW ymm1, {k}{z}, xmm2/m128","vpmovsdw ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 23 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVSDW ymm2/m256, {k}{z}, zmm1","VPMOVSDW zmm1, {k}{z}, ymm2/m256","vpmovsdw zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 23 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVSQB xmm2/m16, {k}{z}, xmm1","VPMOVSQB xmm1, {k}{z}, xmm2/m16","vpmovsqb xmm1, {k}{z}, xmm2/m16","EVEX.128.F3.0F38.W0 22 /r","V","V","AVX512F+AVX512VL","scale2","w,r,r","","" +"VPMOVSQB xmm2/m32, {k}{z}, ymm1","VPMOVSQB ymm1, {k}{z}, xmm2/m32","vpmovsqb ymm1, {k}{z}, xmm2/m32","EVEX.256.F3.0F38.W0 22 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSQB xmm2/m64, {k}{z}, zmm1","VPMOVSQB zmm1, {k}{z}, xmm2/m64","vpmovsqb zmm1, {k}{z}, xmm2/m64","EVEX.512.F3.0F38.W0 22 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPMOVSQD xmm2/m64, {k}{z}, xmm1","VPMOVSQD xmm1, {k}{z}, xmm2/m64","vpmovsqd xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 25 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSQD xmm2/m128, {k}{z}, ymm1","VPMOVSQD ymm1, {k}{z}, xmm2/m128","vpmovsqd ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 25 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVSQD ymm2/m256, {k}{z}, zmm1","VPMOVSQD zmm1, {k}{z}, ymm2/m256","vpmovsqd zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 25 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVSQW xmm2/m32, {k}{z}, xmm1","VPMOVSQW xmm1, {k}{z}, xmm2/m32","vpmovsqw xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 24 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSQW xmm2/m64, {k}{z}, ymm1","VPMOVSQW ymm1, {k}{z}, xmm2/m64","vpmovsqw ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 24 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSQW xmm2/m128, {k}{z}, zmm1","VPMOVSQW zmm1, {k}{z}, xmm2/m128","vpmovsqw zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 24 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVSWB xmm2/m64, {k}{z}, xmm1","VPMOVSWB xmm1, {k}{z}, xmm2/m64","vpmovswb xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 20 /r","V","V","AVX512BW+AVX512VL","scale8","w,r,r","","" +"VPMOVSWB xmm2/m128, {k}{z}, ymm1","VPMOVSWB ymm1, {k}{z}, xmm2/m128","vpmovswb ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 20 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPMOVSWB ymm2/m256, {k}{z}, zmm1","VPMOVSWB zmm1, {k}{z}, ymm2/m256","vpmovswb zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 20 /r","V","V","AVX512BW","scale32","w,r,r","","" +"VPMOVSXBD zmm1, {k}{z}, xmm2/m128","VPMOVSXBD xmm2/m128, {k}{z}, zmm1","vpmovsxbd xmm2/m128, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 21 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVSXBD xmm1, xmm2/m32","VPMOVSXBD xmm2/m32, xmm1","vpmovsxbd xmm2/m32, xmm1","VEX.128.66.0F38.WIG 21 /r","V","V","AVX","","w,r","","" +"VPMOVSXBD xmm1, {k}{z}, xmm2/m32","VPMOVSXBD xmm2/m32, {k}{z}, xmm1","vpmovsxbd xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 21 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSXBD ymm1, xmm2/m64","VPMOVSXBD xmm2/m64, ymm1","vpmovsxbd xmm2/m64, ymm1","VEX.256.66.0F38.WIG 21 /r","V","V","AVX2","","w,r","","" +"VPMOVSXBD ymm1, {k}{z}, xmm2/m64","VPMOVSXBD xmm2/m64, {k}{z}, ymm1","vpmovsxbd xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 21 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSXBQ xmm1, xmm2/m16","VPMOVSXBQ xmm2/m16, xmm1","vpmovsxbq xmm2/m16, xmm1","VEX.128.66.0F38.WIG 22 /r","V","V","AVX","","w,r","","" +"VPMOVSXBQ xmm1, {k}{z}, xmm2/m16","VPMOVSXBQ xmm2/m16, {k}{z}, xmm1","vpmovsxbq xmm2/m16, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 22 /r","V","V","AVX512F+AVX512VL","scale2","w,r,r","","" +"VPMOVSXBQ ymm1, xmm2/m32","VPMOVSXBQ xmm2/m32, ymm1","vpmovsxbq xmm2/m32, ymm1","VEX.256.66.0F38.WIG 22 /r","V","V","AVX2","","w,r","","" +"VPMOVSXBQ ymm1, {k}{z}, xmm2/m32","VPMOVSXBQ xmm2/m32, {k}{z}, ymm1","vpmovsxbq xmm2/m32, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 22 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSXBQ zmm1, {k}{z}, xmm2/m64","VPMOVSXBQ xmm2/m64, {k}{z}, zmm1","vpmovsxbq xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 22 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPMOVSXBW ymm1, xmm2/m128","VPMOVSXBW xmm2/m128, ymm1","vpmovsxbw xmm2/m128, ymm1","VEX.256.66.0F38.WIG 20 /r","V","V","AVX2","","w,r","","" +"VPMOVSXBW ymm1, {k}{z}, xmm2/m128","VPMOVSXBW xmm2/m128, {k}{z}, ymm1","vpmovsxbw xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 20 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPMOVSXBW xmm1, xmm2/m64","VPMOVSXBW xmm2/m64, xmm1","vpmovsxbw xmm2/m64, xmm1","VEX.128.66.0F38.WIG 20 /r","V","V","AVX","","w,r","","" +"VPMOVSXBW xmm1, {k}{z}, xmm2/m64","VPMOVSXBW xmm2/m64, {k}{z}, xmm1","vpmovsxbw xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 20 /r","V","V","AVX512BW+AVX512VL","scale8","w,r,r","","" +"VPMOVSXBW zmm1, {k}{z}, ymm2/m256","VPMOVSXBW ymm2/m256, {k}{z}, zmm1","vpmovsxbw ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 20 /r","V","V","AVX512BW","scale32","w,r,r","","" +"VPMOVSXDQ ymm1, xmm2/m128","VPMOVSXDQ xmm2/m128, ymm1","vpmovsxdq xmm2/m128, ymm1","VEX.256.66.0F38.WIG 25 /r","V","V","AVX2","","w,r","","" +"VPMOVSXDQ ymm1, {k}{z}, xmm2/m128","VPMOVSXDQ xmm2/m128, {k}{z}, ymm1","vpmovsxdq xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.W0 25 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVSXDQ xmm1, xmm2/m64","VPMOVSXDQ xmm2/m64, xmm1","vpmovsxdq xmm2/m64, xmm1","VEX.128.66.0F38.WIG 25 /r","V","V","AVX","","w,r","","" +"VPMOVSXDQ xmm1, {k}{z}, xmm2/m64","VPMOVSXDQ xmm2/m64, {k}{z}, xmm1","vpmovsxdq xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.W0 25 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSXDQ zmm1, {k}{z}, ymm2/m256","VPMOVSXDQ ymm2/m256, {k}{z}, zmm1","vpmovsxdq ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.W0 25 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVSXWD ymm1, xmm2/m128","VPMOVSXWD xmm2/m128, ymm1","vpmovsxwd xmm2/m128, ymm1","VEX.256.66.0F38.WIG 23 /r","V","V","AVX2","","w,r","","" +"VPMOVSXWD ymm1, {k}{z}, xmm2/m128","VPMOVSXWD xmm2/m128, {k}{z}, ymm1","vpmovsxwd xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 23 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVSXWD xmm1, xmm2/m64","VPMOVSXWD xmm2/m64, xmm1","vpmovsxwd xmm2/m64, xmm1","VEX.128.66.0F38.WIG 23 /r","V","V","AVX","","w,r","","" +"VPMOVSXWD xmm1, {k}{z}, xmm2/m64","VPMOVSXWD xmm2/m64, {k}{z}, xmm1","vpmovsxwd xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 23 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVSXWD zmm1, {k}{z}, ymm2/m256","VPMOVSXWD ymm2/m256, {k}{z}, zmm1","vpmovsxwd ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 23 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVSXWQ zmm1, {k}{z}, xmm2/m128","VPMOVSXWQ xmm2/m128, {k}{z}, zmm1","vpmovsxwq xmm2/m128, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 24 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVSXWQ xmm1, xmm2/m32","VPMOVSXWQ xmm2/m32, xmm1","vpmovsxwq xmm2/m32, xmm1","VEX.128.66.0F38.WIG 24 /r","V","V","AVX","","w,r","","" +"VPMOVSXWQ xmm1, {k}{z}, xmm2/m32","VPMOVSXWQ xmm2/m32, {k}{z}, xmm1","vpmovsxwq xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 24 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVSXWQ ymm1, xmm2/m64","VPMOVSXWQ xmm2/m64, ymm1","vpmovsxwq xmm2/m64, ymm1","VEX.256.66.0F38.WIG 24 /r","V","V","AVX2","","w,r","","" +"VPMOVSXWQ ymm1, {k}{z}, xmm2/m64","VPMOVSXWQ xmm2/m64, {k}{z}, ymm1","vpmovsxwq xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 24 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVUSDB xmm2/m32, {k}{z}, xmm1","VPMOVUSDB xmm1, {k}{z}, xmm2/m32","vpmovusdb xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 11 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVUSDB xmm2/m64, {k}{z}, ymm1","VPMOVUSDB ymm1, {k}{z}, xmm2/m64","vpmovusdb ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 11 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVUSDB xmm2/m128, {k}{z}, zmm1","VPMOVUSDB zmm1, {k}{z}, xmm2/m128","vpmovusdb zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 11 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVUSDW xmm2/m64, {k}{z}, xmm1","VPMOVUSDW xmm1, {k}{z}, xmm2/m64","vpmovusdw xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 13 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVUSDW xmm2/m128, {k}{z}, ymm1","VPMOVUSDW ymm1, {k}{z}, xmm2/m128","vpmovusdw ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 13 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVUSDW ymm2/m256, {k}{z}, zmm1","VPMOVUSDW zmm1, {k}{z}, ymm2/m256","vpmovusdw zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 13 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVUSQB xmm2/m16, {k}{z}, xmm1","VPMOVUSQB xmm1, {k}{z}, xmm2/m16","vpmovusqb xmm1, {k}{z}, xmm2/m16","EVEX.128.F3.0F38.W0 12 /r","V","V","AVX512F+AVX512VL","scale2","w,r,r","","" +"VPMOVUSQB xmm2/m32, {k}{z}, ymm1","VPMOVUSQB ymm1, {k}{z}, xmm2/m32","vpmovusqb ymm1, {k}{z}, xmm2/m32","EVEX.256.F3.0F38.W0 12 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVUSQB xmm2/m64, {k}{z}, zmm1","VPMOVUSQB zmm1, {k}{z}, xmm2/m64","vpmovusqb zmm1, {k}{z}, xmm2/m64","EVEX.512.F3.0F38.W0 12 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPMOVUSQD xmm2/m64, {k}{z}, xmm1","VPMOVUSQD xmm1, {k}{z}, xmm2/m64","vpmovusqd xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 15 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVUSQD xmm2/m128, {k}{z}, ymm1","VPMOVUSQD ymm1, {k}{z}, xmm2/m128","vpmovusqd ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 15 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVUSQD ymm2/m256, {k}{z}, zmm1","VPMOVUSQD zmm1, {k}{z}, ymm2/m256","vpmovusqd zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 15 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVUSQW xmm2/m32, {k}{z}, xmm1","VPMOVUSQW xmm1, {k}{z}, xmm2/m32","vpmovusqw xmm1, {k}{z}, xmm2/m32","EVEX.128.F3.0F38.W0 14 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVUSQW xmm2/m64, {k}{z}, ymm1","VPMOVUSQW ymm1, {k}{z}, xmm2/m64","vpmovusqw ymm1, {k}{z}, xmm2/m64","EVEX.256.F3.0F38.W0 14 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVUSQW xmm2/m128, {k}{z}, zmm1","VPMOVUSQW zmm1, {k}{z}, xmm2/m128","vpmovusqw zmm1, {k}{z}, xmm2/m128","EVEX.512.F3.0F38.W0 14 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVUSWB xmm2/m64, {k}{z}, xmm1","VPMOVUSWB xmm1, {k}{z}, xmm2/m64","vpmovuswb xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 10 /r","V","V","AVX512BW+AVX512VL","scale8","w,r,r","","" +"VPMOVUSWB xmm2/m128, {k}{z}, ymm1","VPMOVUSWB ymm1, {k}{z}, xmm2/m128","vpmovuswb ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 10 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPMOVUSWB ymm2/m256, {k}{z}, zmm1","VPMOVUSWB zmm1, {k}{z}, ymm2/m256","vpmovuswb zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 10 /r","V","V","AVX512BW","scale32","w,r,r","","" +"VPMOVW2M k1, xmm2","VPMOVW2M xmm2, k1","vpmovw2m xmm2, k1","EVEX.128.F3.0F38.W1 29 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVW2M k1, ymm2","VPMOVW2M ymm2, k1","vpmovw2m ymm2, k1","EVEX.256.F3.0F38.W1 29 /r","V","V","AVX512BW+AVX512VL","modrm_regonly","w,r","","" +"VPMOVW2M k1, zmm2","VPMOVW2M zmm2, k1","vpmovw2m zmm2, k1","EVEX.512.F3.0F38.W1 29 /r","V","V","AVX512BW","modrm_regonly","w,r","","" +"VPMOVWB xmm2/m64, {k}{z}, xmm1","VPMOVWB xmm1, {k}{z}, xmm2/m64","vpmovwb xmm1, {k}{z}, xmm2/m64","EVEX.128.F3.0F38.W0 30 /r","V","V","AVX512BW+AVX512VL","scale8","w,r,r","","" +"VPMOVWB xmm2/m128, {k}{z}, ymm1","VPMOVWB ymm1, {k}{z}, xmm2/m128","vpmovwb ymm1, {k}{z}, xmm2/m128","EVEX.256.F3.0F38.W0 30 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPMOVWB ymm2/m256, {k}{z}, zmm1","VPMOVWB zmm1, {k}{z}, ymm2/m256","vpmovwb zmm1, {k}{z}, ymm2/m256","EVEX.512.F3.0F38.W0 30 /r","V","V","AVX512BW","scale32","w,r,r","","" +"VPMOVZXBD zmm1, {k}{z}, xmm2/m128","VPMOVZXBD xmm2/m128, {k}{z}, zmm1","vpmovzxbd xmm2/m128, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 31 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVZXBD xmm1, xmm2/m32","VPMOVZXBD xmm2/m32, xmm1","vpmovzxbd xmm2/m32, xmm1","VEX.128.66.0F38.WIG 31 /r","V","V","AVX","","w,r","","" +"VPMOVZXBD xmm1, {k}{z}, xmm2/m32","VPMOVZXBD xmm2/m32, {k}{z}, xmm1","vpmovzxbd xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 31 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVZXBD ymm1, xmm2/m64","VPMOVZXBD xmm2/m64, ymm1","vpmovzxbd xmm2/m64, ymm1","VEX.256.66.0F38.WIG 31 /r","V","V","AVX2","","w,r","","" +"VPMOVZXBD ymm1, {k}{z}, xmm2/m64","VPMOVZXBD xmm2/m64, {k}{z}, ymm1","vpmovzxbd xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 31 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVZXBQ xmm1, xmm2/m16","VPMOVZXBQ xmm2/m16, xmm1","vpmovzxbq xmm2/m16, xmm1","VEX.128.66.0F38.WIG 32 /r","V","V","AVX","","w,r","","" +"VPMOVZXBQ xmm1, {k}{z}, xmm2/m16","VPMOVZXBQ xmm2/m16, {k}{z}, xmm1","vpmovzxbq xmm2/m16, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 32 /r","V","V","AVX512F+AVX512VL","scale2","w,r,r","","" +"VPMOVZXBQ ymm1, xmm2/m32","VPMOVZXBQ xmm2/m32, ymm1","vpmovzxbq xmm2/m32, ymm1","VEX.256.66.0F38.WIG 32 /r","V","V","AVX2","","w,r","","" +"VPMOVZXBQ ymm1, {k}{z}, xmm2/m32","VPMOVZXBQ xmm2/m32, {k}{z}, ymm1","vpmovzxbq xmm2/m32, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 32 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVZXBQ zmm1, {k}{z}, xmm2/m64","VPMOVZXBQ xmm2/m64, {k}{z}, zmm1","vpmovzxbq xmm2/m64, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 32 /r","V","V","AVX512F","scale8","w,r,r","","" +"VPMOVZXBW ymm1, xmm2/m128","VPMOVZXBW xmm2/m128, ymm1","vpmovzxbw xmm2/m128, ymm1","VEX.256.66.0F38.WIG 30 /r","V","V","AVX2","","w,r","","" +"VPMOVZXBW ymm1, {k}{z}, xmm2/m128","VPMOVZXBW xmm2/m128, {k}{z}, ymm1","vpmovzxbw xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 30 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPMOVZXBW xmm1, xmm2/m64","VPMOVZXBW xmm2/m64, xmm1","vpmovzxbw xmm2/m64, xmm1","VEX.128.66.0F38.WIG 30 /r","V","V","AVX","","w,r","","" +"VPMOVZXBW xmm1, {k}{z}, xmm2/m64","VPMOVZXBW xmm2/m64, {k}{z}, xmm1","vpmovzxbw xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 30 /r","V","V","AVX512BW+AVX512VL","scale8","w,r,r","","" +"VPMOVZXBW zmm1, {k}{z}, ymm2/m256","VPMOVZXBW ymm2/m256, {k}{z}, zmm1","vpmovzxbw ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 30 /r","V","V","AVX512BW","scale32","w,r,r","","" +"VPMOVZXDQ ymm1, xmm2/m128","VPMOVZXDQ xmm2/m128, ymm1","vpmovzxdq xmm2/m128, ymm1","VEX.256.66.0F38.WIG 35 /r","V","V","AVX2","","w,r","","" +"VPMOVZXDQ ymm1, {k}{z}, xmm2/m128","VPMOVZXDQ xmm2/m128, {k}{z}, ymm1","vpmovzxdq xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.W0 35 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVZXDQ xmm1, xmm2/m64","VPMOVZXDQ xmm2/m64, xmm1","vpmovzxdq xmm2/m64, xmm1","VEX.128.66.0F38.WIG 35 /r","V","V","AVX","","w,r","","" +"VPMOVZXDQ xmm1, {k}{z}, xmm2/m64","VPMOVZXDQ xmm2/m64, {k}{z}, xmm1","vpmovzxdq xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.W0 35 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVZXDQ zmm1, {k}{z}, ymm2/m256","VPMOVZXDQ ymm2/m256, {k}{z}, zmm1","vpmovzxdq ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.W0 35 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVZXWD ymm1, xmm2/m128","VPMOVZXWD xmm2/m128, ymm1","vpmovzxwd xmm2/m128, ymm1","VEX.256.66.0F38.WIG 33 /r","V","V","AVX2","","w,r","","" +"VPMOVZXWD ymm1, {k}{z}, xmm2/m128","VPMOVZXWD xmm2/m128, {k}{z}, ymm1","vpmovzxwd xmm2/m128, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 33 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r","","" +"VPMOVZXWD xmm1, xmm2/m64","VPMOVZXWD xmm2/m64, xmm1","vpmovzxwd xmm2/m64, xmm1","VEX.128.66.0F38.WIG 33 /r","V","V","AVX","","w,r","","" +"VPMOVZXWD xmm1, {k}{z}, xmm2/m64","VPMOVZXWD xmm2/m64, {k}{z}, xmm1","vpmovzxwd xmm2/m64, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 33 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMOVZXWD zmm1, {k}{z}, ymm2/m256","VPMOVZXWD ymm2/m256, {k}{z}, zmm1","vpmovzxwd ymm2/m256, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 33 /r","V","V","AVX512F","scale32","w,r,r","","" +"VPMOVZXWQ zmm1, {k}{z}, xmm2/m128","VPMOVZXWQ xmm2/m128, {k}{z}, zmm1","vpmovzxwq xmm2/m128, {k}{z}, zmm1","EVEX.512.66.0F38.WIG 34 /r","V","V","AVX512F","scale16","w,r,r","","" +"VPMOVZXWQ xmm1, xmm2/m32","VPMOVZXWQ xmm2/m32, xmm1","vpmovzxwq xmm2/m32, xmm1","VEX.128.66.0F38.WIG 34 /r","V","V","AVX","","w,r","","" +"VPMOVZXWQ xmm1, {k}{z}, xmm2/m32","VPMOVZXWQ xmm2/m32, {k}{z}, xmm1","vpmovzxwq xmm2/m32, {k}{z}, xmm1","EVEX.128.66.0F38.WIG 34 /r","V","V","AVX512F+AVX512VL","scale4","w,r,r","","" +"VPMOVZXWQ ymm1, xmm2/m64","VPMOVZXWQ xmm2/m64, ymm1","vpmovzxwq xmm2/m64, ymm1","VEX.256.66.0F38.WIG 34 /r","V","V","AVX2","","w,r","","" +"VPMOVZXWQ ymm1, {k}{z}, xmm2/m64","VPMOVZXWQ xmm2/m64, {k}{z}, ymm1","vpmovzxwq xmm2/m64, {k}{z}, ymm1","EVEX.256.66.0F38.WIG 34 /r","V","V","AVX512F+AVX512VL","scale8","w,r,r","","" +"VPMULDQ xmm1, xmmV, xmm2/m128","VPMULDQ xmm2/m128, xmmV, xmm1","vpmuldq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 28 /r","V","V","AVX","","w,r,r","","" +"VPMULDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMULDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmuldq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 28 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMULDQ ymm1, ymmV, ymm2/m256","VPMULDQ ymm2/m256, ymmV, ymm1","vpmuldq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 28 /r","V","V","AVX2","","w,r,r","","" +"VPMULDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMULDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmuldq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 28 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMULDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMULDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmuldq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 28 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPMULHRSW xmm1, xmmV, xmm2/m128","VPMULHRSW xmm2/m128, xmmV, xmm1","vpmulhrsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 0B /r","V","V","AVX","","w,r,r","","" +"VPMULHRSW xmm1, {k}{z}, xmmV, xmm2/m128","VPMULHRSW xmm2/m128, xmmV, {k}{z}, xmm1","vpmulhrsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 0B /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMULHRSW ymm1, ymmV, ymm2/m256","VPMULHRSW ymm2/m256, ymmV, ymm1","vpmulhrsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 0B /r","V","V","AVX2","","w,r,r","","" +"VPMULHRSW ymm1, {k}{z}, ymmV, ymm2/m256","VPMULHRSW ymm2/m256, ymmV, {k}{z}, ymm1","vpmulhrsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 0B /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMULHRSW zmm1, {k}{z}, zmmV, zmm2/m512","VPMULHRSW zmm2/m512, zmmV, {k}{z}, zmm1","vpmulhrsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 0B /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMULHUW xmm1, xmmV, xmm2/m128","VPMULHUW xmm2/m128, xmmV, xmm1","vpmulhuw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E4 /r","V","V","AVX","","w,r,r","","" +"VPMULHUW xmm1, {k}{z}, xmmV, xmm2/m128","VPMULHUW xmm2/m128, xmmV, {k}{z}, xmm1","vpmulhuw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E4 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMULHUW ymm1, ymmV, ymm2/m256","VPMULHUW ymm2/m256, ymmV, ymm1","vpmulhuw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E4 /r","V","V","AVX2","","w,r,r","","" +"VPMULHUW ymm1, {k}{z}, ymmV, ymm2/m256","VPMULHUW ymm2/m256, ymmV, {k}{z}, ymm1","vpmulhuw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E4 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMULHUW zmm1, {k}{z}, zmmV, zmm2/m512","VPMULHUW zmm2/m512, zmmV, {k}{z}, zmm1","vpmulhuw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E4 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMULHW xmm1, xmmV, xmm2/m128","VPMULHW xmm2/m128, xmmV, xmm1","vpmulhw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E5 /r","V","V","AVX","","w,r,r","","" +"VPMULHW xmm1, {k}{z}, xmmV, xmm2/m128","VPMULHW xmm2/m128, xmmV, {k}{z}, xmm1","vpmulhw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E5 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMULHW ymm1, ymmV, ymm2/m256","VPMULHW ymm2/m256, ymmV, ymm1","vpmulhw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E5 /r","V","V","AVX2","","w,r,r","","" +"VPMULHW ymm1, {k}{z}, ymmV, ymm2/m256","VPMULHW ymm2/m256, ymmV, {k}{z}, ymm1","vpmulhw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E5 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMULHW zmm1, {k}{z}, zmmV, zmm2/m512","VPMULHW zmm2/m512, zmmV, {k}{z}, zmm1","vpmulhw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E5 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMULLD xmm1, xmmV, xmm2/m128","VPMULLD xmm2/m128, xmmV, xmm1","vpmulld xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 40 /r","V","V","AVX","","w,r,r","","" +"VPMULLD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPMULLD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpmulld xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 40 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPMULLD ymm1, ymmV, ymm2/m256","VPMULLD ymm2/m256, ymmV, ymm1","vpmulld ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 40 /r","V","V","AVX2","","w,r,r","","" +"VPMULLD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPMULLD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpmulld ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 40 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPMULLD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPMULLD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpmulld zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 40 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPMULLQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMULLQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmullq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 40 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMULLQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMULLQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmullq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 40 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMULLQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMULLQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmullq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 40 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VPMULLW xmm1, xmmV, xmm2/m128","VPMULLW xmm2/m128, xmmV, xmm1","vpmullw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D5 /r","V","V","AVX","","w,r,r","","" +"VPMULLW xmm1, {k}{z}, xmmV, xmm2/m128","VPMULLW xmm2/m128, xmmV, {k}{z}, xmm1","vpmullw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG D5 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPMULLW ymm1, ymmV, ymm2/m256","VPMULLW ymm2/m256, ymmV, ymm1","vpmullw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D5 /r","V","V","AVX2","","w,r,r","","" +"VPMULLW ymm1, {k}{z}, ymmV, ymm2/m256","VPMULLW ymm2/m256, ymmV, {k}{z}, ymm1","vpmullw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG D5 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPMULLW zmm1, {k}{z}, zmmV, zmm2/m512","VPMULLW zmm2/m512, zmmV, {k}{z}, zmm1","vpmullw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG D5 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPMULTISHIFTQB xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMULTISHIFTQB xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmultishiftqb xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 83 /r","V","V","AVX512_VBMI+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMULTISHIFTQB ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMULTISHIFTQB ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmultishiftqb ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 83 /r","V","V","AVX512_VBMI+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMULTISHIFTQB zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMULTISHIFTQB zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmultishiftqb zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 83 /r","V","V","AVX512_VBMI","bscale8,scale64","w,r,r,r","","" +"VPMULUDQ xmm1, xmmV, xmm2/m128","VPMULUDQ xmm2/m128, xmmV, xmm1","vpmuludq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F4 /r","V","V","AVX","","w,r,r","","" +"VPMULUDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPMULUDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpmuludq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 F4 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPMULUDQ ymm1, ymmV, ymm2/m256","VPMULUDQ ymm2/m256, ymmV, ymm1","vpmuludq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F4 /r","V","V","AVX2","","w,r,r","","" +"VPMULUDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPMULUDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpmuludq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 F4 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPMULUDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPMULUDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpmuludq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 F4 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPOPCNTB xmm1, {k}{z}, xmm2/m128","VPOPCNTB xmm2/m128, {k}{z}, xmm1","vpopcntb xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W0 54 /r","V","V","AVX512_BITALG+AVX512VL","scale16","w,r,r","","" +"VPOPCNTB ymm1, {k}{z}, ymm2/m256","VPOPCNTB ymm2/m256, {k}{z}, ymm1","vpopcntb ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W0 54 /r","V","V","AVX512_BITALG+AVX512VL","scale32","w,r,r","","" +"VPOPCNTB zmm1, {k}{z}, zmm2/m512","VPOPCNTB zmm2/m512, {k}{z}, zmm1","vpopcntb zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W0 54 /r","V","V","AVX512_BITALG","scale64","w,r,r","","" +"VPOPCNTD xmm1, {k}{z}, xmm2/m128/m32bcst","VPOPCNTD xmm2/m128/m32bcst, {k}{z}, xmm1","vpopcntd xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 55 /r","V","V","AVX512_VPOPCNTDQ+AVX512VL","bscale4,scale16","w,r,r","","" +"VPOPCNTD ymm1, {k}{z}, ymm2/m256/m32bcst","VPOPCNTD ymm2/m256/m32bcst, {k}{z}, ymm1","vpopcntd ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 55 /r","V","V","AVX512_VPOPCNTDQ+AVX512VL","bscale4,scale32","w,r,r","","" +"VPOPCNTD zmm1, {k}{z}, zmm2/m512/m32bcst","VPOPCNTD zmm2/m512/m32bcst, {k}{z}, zmm1","vpopcntd zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 55 /r","V","V","AVX512_VPOPCNTDQ","bscale4,scale64","w,r,r","","" +"VPOPCNTQ xmm1, {k}{z}, xmm2/m128/m64bcst","VPOPCNTQ xmm2/m128/m64bcst, {k}{z}, xmm1","vpopcntq xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 55 /r","V","V","AVX512_VPOPCNTDQ+AVX512VL","bscale8,scale16","w,r,r","","" +"VPOPCNTQ ymm1, {k}{z}, ymm2/m256/m64bcst","VPOPCNTQ ymm2/m256/m64bcst, {k}{z}, ymm1","vpopcntq ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 55 /r","V","V","AVX512_VPOPCNTDQ+AVX512VL","bscale8,scale32","w,r,r","","" +"VPOPCNTQ zmm1, {k}{z}, zmm2/m512/m64bcst","VPOPCNTQ zmm2/m512/m64bcst, {k}{z}, zmm1","vpopcntq zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 55 /r","V","V","AVX512_VPOPCNTDQ","bscale8,scale64","w,r,r","","" +"VPOPCNTW xmm1, {k}{z}, xmm2/m128","VPOPCNTW xmm2/m128, {k}{z}, xmm1","vpopcntw xmm2/m128, {k}{z}, xmm1","EVEX.128.66.0F38.W1 54 /r","V","V","AVX512_BITALG+AVX512VL","scale16","w,r,r","","" +"VPOPCNTW ymm1, {k}{z}, ymm2/m256","VPOPCNTW ymm2/m256, {k}{z}, ymm1","vpopcntw ymm2/m256, {k}{z}, ymm1","EVEX.256.66.0F38.W1 54 /r","V","V","AVX512_BITALG+AVX512VL","scale32","w,r,r","","" +"VPOPCNTW zmm1, {k}{z}, zmm2/m512","VPOPCNTW zmm2/m512, {k}{z}, zmm1","vpopcntw zmm2/m512, {k}{z}, zmm1","EVEX.512.66.0F38.W1 54 /r","V","V","AVX512_BITALG","scale64","w,r,r","","" +"VPOR xmm1, xmmV, xmm2/m128","VPOR xmm2/m128, xmmV, xmm1","vpor xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG EB /r","V","V","AVX","","w,r,r","","" +"VPOR ymm1, ymmV, ymm2/m256","VPOR ymm2/m256, ymmV, ymm1","vpor ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG EB /r","V","V","AVX2","","w,r,r","","" +"VPORD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPORD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpord xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 EB /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPORD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPORD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpord ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 EB /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPORD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPORD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpord zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 EB /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPORQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPORQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vporq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 EB /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPORQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPORQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vporq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 EB /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPORQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPORQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vporq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 EB /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPPERM xmm1, xmmV, xmmIH, xmm2/m128","VPPERM xmm2/m128, xmmIH, xmmV, xmm1","vpperm xmm2/m128, xmmIH, xmmV, xmm1","XOP.NDS.128.08.W1 A3 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPPERM xmm1, xmmV, xmm2/m128, xmmIH","VPPERM xmmIH, xmm2/m128, xmmV, xmm1","vpperm xmmIH, xmm2/m128, xmmV, xmm1","XOP.NDS.128.08.W0 A3 /r /is4","V","V","XOP","amd","w,r,r,r","","" +"VPROLD xmmV, {k}{z}, xmm2/m128/m32bcst, imm8u","VPROLD imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","vprold imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W0 72 /1 ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPROLD ymmV, {k}{z}, ymm2/m256/m32bcst, imm8u","VPROLD imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","vprold imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W0 72 /1 ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPROLD zmmV, {k}{z}, zmm2/m512/m32bcst, imm8u","VPROLD imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","vprold imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W0 72 /1 ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPROLQ xmmV, {k}{z}, xmm2/m128/m64bcst, imm8u","VPROLQ imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","vprolq imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W1 72 /1 ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPROLQ ymmV, {k}{z}, ymm2/m256/m64bcst, imm8u","VPROLQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","vprolq imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W1 72 /1 ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPROLQ zmmV, {k}{z}, zmm2/m512/m64bcst, imm8u","VPROLQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","vprolq imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W1 72 /1 ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPROLVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPROLVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vprolvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 15 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPROLVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPROLVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vprolvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 15 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPROLVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPROLVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vprolvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 15 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPROLVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPROLVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vprolvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 15 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPROLVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPROLVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vprolvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 15 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPROLVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPROLVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vprolvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 15 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPRORD xmmV, {k}{z}, xmm2/m128/m32bcst, imm8u","VPRORD imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","vprord imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W0 72 /0 ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPRORD ymmV, {k}{z}, ymm2/m256/m32bcst, imm8u","VPRORD imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","vprord imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W0 72 /0 ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPRORD zmmV, {k}{z}, zmm2/m512/m32bcst, imm8u","VPRORD imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","vprord imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W0 72 /0 ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPRORQ xmmV, {k}{z}, xmm2/m128/m64bcst, imm8u","VPRORQ imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","vprorq imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W1 72 /0 ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPRORQ ymmV, {k}{z}, ymm2/m256/m64bcst, imm8u","VPRORQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","vprorq imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W1 72 /0 ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPRORQ zmmV, {k}{z}, zmm2/m512/m64bcst, imm8u","VPRORQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","vprorq imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W1 72 /0 ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPRORVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPRORVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vprorvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 14 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPRORVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPRORVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vprorvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 14 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPRORVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPRORVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vprorvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 14 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPRORVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPRORVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vprorvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 14 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPRORVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPRORVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vprorvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 14 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPRORVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPRORVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vprorvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 14 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPROTB xmm1, xmm2/m128, imm8u","VPROTB imm8u, xmm2/m128, xmm1","vprotb imm8u, xmm2/m128, xmm1","XOP.128.08.W0 C0 /r ib","V","V","XOP","amd","w,r,r","","" +"VPROTB xmm1, xmmV, xmm2/m128","VPROTB xmm2/m128, xmmV, xmm1","vprotb xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 90 /r","V","V","XOP","amd","w,r,r","","" +"VPROTB xmm1, xmm2/m128, xmmV","VPROTB xmmV, xmm2/m128, xmm1","vprotb xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 90 /r","V","V","XOP","amd","w,r,r","","" +"VPROTD xmm1, xmm2/m128, imm8u","VPROTD imm8u, xmm2/m128, xmm1","vprotd imm8u, xmm2/m128, xmm1","XOP.128.08.W0 C2 /r ib","V","V","XOP","amd","w,r,r","","" +"VPROTD xmm1, xmmV, xmm2/m128","VPROTD xmm2/m128, xmmV, xmm1","vprotd xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 92 /r","V","V","XOP","amd","w,r,r","","" +"VPROTD xmm1, xmm2/m128, xmmV","VPROTD xmmV, xmm2/m128, xmm1","vprotd xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 92 /r","V","V","XOP","amd","w,r,r","","" +"VPROTQ xmm1, xmm2/m128, imm8u","VPROTQ imm8u, xmm2/m128, xmm1","vprotq imm8u, xmm2/m128, xmm1","XOP.128.08.W0 C3 /r ib","V","V","XOP","amd","w,r,r","","" +"VPROTQ xmm1, xmmV, xmm2/m128","VPROTQ xmm2/m128, xmmV, xmm1","vprotq xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 93 /r","V","V","XOP","amd","w,r,r","","" +"VPROTQ xmm1, xmm2/m128, xmmV","VPROTQ xmmV, xmm2/m128, xmm1","vprotq xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 93 /r","V","V","XOP","amd","w,r,r","","" +"VPROTW xmm1, xmm2/m128, imm8u","VPROTW imm8u, xmm2/m128, xmm1","vprotw imm8u, xmm2/m128, xmm1","XOP.128.08.W0 C1 /r ib","V","V","XOP","amd","w,r,r","","" +"VPROTW xmm1, xmmV, xmm2/m128","VPROTW xmm2/m128, xmmV, xmm1","vprotw xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 91 /r","V","V","XOP","amd","w,r,r","","" +"VPROTW xmm1, xmm2/m128, xmmV","VPROTW xmmV, xmm2/m128, xmm1","vprotw xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 91 /r","V","V","XOP","amd","w,r,r","","" +"VPSADBW xmm1, xmmV, xmm2/m128","VPSADBW xmm2/m128, xmmV, xmm1","vpsadbw xmm2/m128, xmmV, xmm1","EVEX.NDS.128.66.0F.WIG F6 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPSADBW xmm1, xmmV, xmm2/m128","VPSADBW xmm2/m128, xmmV, xmm1","vpsadbw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F6 /r","V","V","AVX","","w,r,r","","" +"VPSADBW ymm1, ymmV, ymm2/m256","VPSADBW ymm2/m256, ymmV, ymm1","vpsadbw ymm2/m256, ymmV, ymm1","EVEX.NDS.256.66.0F.WIG F6 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VPSADBW ymm1, ymmV, ymm2/m256","VPSADBW ymm2/m256, ymmV, ymm1","vpsadbw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F6 /r","V","V","AVX2","","w,r,r","","" +"VPSADBW zmm1, zmmV, zmm2/m512","VPSADBW zmm2/m512, zmmV, zmm1","vpsadbw zmm2/m512, zmmV, zmm1","EVEX.NDS.512.66.0F.WIG F6 /r","V","V","AVX512BW","scale64","w,r,r","","" +"VPSCATTERDD vm32x, {k1-k7}, xmm1","VPSCATTERDD xmm1, {k1-k7}, vm32x","vpscatterdd xmm1, {k1-k7}, vm32x","EVEX.128.66.0F38.W0 A0 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERDD vm32y, {k1-k7}, ymm1","VPSCATTERDD ymm1, {k1-k7}, vm32y","vpscatterdd ymm1, {k1-k7}, vm32y","EVEX.256.66.0F38.W0 A0 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERDD vm32z, {k1-k7}, zmm1","VPSCATTERDD zmm1, {k1-k7}, vm32z","vpscatterdd zmm1, {k1-k7}, vm32z","EVEX.512.66.0F38.W0 A0 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERDQ vm32x, {k1-k7}, xmm1","VPSCATTERDQ xmm1, {k1-k7}, vm32x","vpscatterdq xmm1, {k1-k7}, vm32x","EVEX.128.66.0F38.W1 A0 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPSCATTERDQ vm32x, {k1-k7}, ymm1","VPSCATTERDQ ymm1, {k1-k7}, vm32x","vpscatterdq ymm1, {k1-k7}, vm32x","EVEX.256.66.0F38.W1 A0 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPSCATTERDQ vm32y, {k1-k7}, zmm1","VPSCATTERDQ zmm1, {k1-k7}, vm32y","vpscatterdq zmm1, {k1-k7}, vm32y","EVEX.512.66.0F38.W1 A0 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VPSCATTERQD vm64x, {k1-k7}, xmm1","VPSCATTERQD xmm1, {k1-k7}, vm64x","vpscatterqd xmm1, {k1-k7}, vm64x","EVEX.128.66.0F38.W0 A1 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERQD vm64y, {k1-k7}, xmm1","VPSCATTERQD xmm1, {k1-k7}, vm64y","vpscatterqd xmm1, {k1-k7}, vm64y","EVEX.256.66.0F38.W0 A1 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERQD vm64z, {k1-k7}, ymm1","VPSCATTERQD ymm1, {k1-k7}, vm64z","vpscatterqd ymm1, {k1-k7}, vm64z","EVEX.512.66.0F38.W0 A1 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VPSCATTERQQ vm64x, {k1-k7}, xmm1","VPSCATTERQQ xmm1, {k1-k7}, vm64x","vpscatterqq xmm1, {k1-k7}, vm64x","EVEX.128.66.0F38.W1 A1 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPSCATTERQQ vm64y, {k1-k7}, ymm1","VPSCATTERQQ ymm1, {k1-k7}, vm64y","vpscatterqq ymm1, {k1-k7}, vm64y","EVEX.256.66.0F38.W1 A1 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VPSCATTERQQ vm64z, {k1-k7}, zmm1","VPSCATTERQQ zmm1, {k1-k7}, vm64z","vpscatterqq zmm1, {k1-k7}, vm64z","EVEX.512.66.0F38.W1 A1 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VPSHAB xmm1, xmmV, xmm2/m128","VPSHAB xmm2/m128, xmmV, xmm1","vpshab xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 98 /r","V","V","XOP","amd","w,r,r","","" +"VPSHAB xmm1, xmm2/m128, xmmV","VPSHAB xmmV, xmm2/m128, xmm1","vpshab xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 98 /r","V","V","XOP","amd","w,r,r","","" +"VPSHAD xmm1, xmmV, xmm2/m128","VPSHAD xmm2/m128, xmmV, xmm1","vpshad xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 9A /r","V","V","XOP","amd","w,r,r","","" +"VPSHAD xmm1, xmm2/m128, xmmV","VPSHAD xmmV, xmm2/m128, xmm1","vpshad xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 9A /r","V","V","XOP","amd","w,r,r","","" +"VPSHAQ xmm1, xmmV, xmm2/m128","VPSHAQ xmm2/m128, xmmV, xmm1","vpshaq xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 9B /r","V","V","XOP","amd","w,r,r","","" +"VPSHAQ xmm1, xmm2/m128, xmmV","VPSHAQ xmmV, xmm2/m128, xmm1","vpshaq xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 9B /r","V","V","XOP","amd","w,r,r","","" +"VPSHAW xmm1, xmmV, xmm2/m128","VPSHAW xmm2/m128, xmmV, xmm1","vpshaw xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 99 /r","V","V","XOP","amd","w,r,r","","" +"VPSHAW xmm1, xmm2/m128, xmmV","VPSHAW xmmV, xmm2/m128, xmm1","vpshaw xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 99 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLB xmm1, xmmV, xmm2/m128","VPSHLB xmm2/m128, xmmV, xmm1","vpshlb xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 94 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLB xmm1, xmm2/m128, xmmV","VPSHLB xmmV, xmm2/m128, xmm1","vpshlb xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 94 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLD xmm1, xmmV, xmm2/m128","VPSHLD xmm2/m128, xmmV, xmm1","vpshld xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 96 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLD xmm1, xmm2/m128, xmmV","VPSHLD xmmV, xmm2/m128, xmm1","vpshld xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 96 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLDD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VPSHLDD imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpshldd imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W0 71 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VPSHLDD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VPSHLDD imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpshldd imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 71 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VPSHLDD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VPSHLDD imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpshldd imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 71 /r ib","V","V","AVX512_VBMI2","bscale4,scale64","w,r,r,r,r","","" +"VPSHLDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VPSHLDQ imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpshldq imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 71 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VPSHLDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VPSHLDQ imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpshldq imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 71 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VPSHLDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VPSHLDQ imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpshldq imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 71 /r ib","V","V","AVX512_VBMI2","bscale8,scale64","w,r,r,r,r","","" +"VPSHLDVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSHLDVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpshldvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 71 /r","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPSHLDVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSHLDVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpshldvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 71 /r","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPSHLDVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSHLDVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpshldvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 71 /r","V","V","AVX512_VBMI2","bscale4,scale64","rw,r,r,r","","" +"VPSHLDVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSHLDVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpshldvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 71 /r","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPSHLDVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSHLDVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpshldvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 71 /r","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPSHLDVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSHLDVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpshldvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 71 /r","V","V","AVX512_VBMI2","bscale8,scale64","rw,r,r,r","","" +"VPSHLDVW xmm1, {k}{z}, xmmV, xmm2/m128","VPSHLDVW xmm2/m128, xmmV, {k}{z}, xmm1","vpshldvw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 70 /r","V","V","AVX512_VBMI2+AVX512VL","scale16","rw,r,r,r","","" +"VPSHLDVW ymm1, {k}{z}, ymmV, ymm2/m256","VPSHLDVW ymm2/m256, ymmV, {k}{z}, ymm1","vpshldvw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 70 /r","V","V","AVX512_VBMI2+AVX512VL","scale32","rw,r,r,r","","" +"VPSHLDVW zmm1, {k}{z}, zmmV, zmm2/m512","VPSHLDVW zmm2/m512, zmmV, {k}{z}, zmm1","vpshldvw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 70 /r","V","V","AVX512_VBMI2","scale64","rw,r,r,r","","" +"VPSHLDW xmm1, {k}{z}, xmmV, xmm2/m128, imm8u","VPSHLDW imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","vpshldw imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 70 /r ib","V","V","AVX512_VBMI2+AVX512VL","scale16","w,r,r,r,r","","" +"VPSHLDW ymm1, {k}{z}, ymmV, ymm2/m256, imm8u","VPSHLDW imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","vpshldw imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 70 /r ib","V","V","AVX512_VBMI2+AVX512VL","scale32","w,r,r,r,r","","" +"VPSHLDW zmm1, {k}{z}, zmmV, zmm2/m512, imm8u","VPSHLDW imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","vpshldw imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 70 /r ib","V","V","AVX512_VBMI2","scale64","w,r,r,r,r","","" +"VPSHLQ xmm1, xmmV, xmm2/m128","VPSHLQ xmm2/m128, xmmV, xmm1","vpshlq xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 97 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLQ xmm1, xmm2/m128, xmmV","VPSHLQ xmmV, xmm2/m128, xmm1","vpshlq xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 97 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLW xmm1, xmmV, xmm2/m128","VPSHLW xmm2/m128, xmmV, xmm1","vpshlw xmm2/m128, xmmV, xmm1","XOP.NDS.128.09.W1 95 /r","V","V","XOP","amd","w,r,r","","" +"VPSHLW xmm1, xmm2/m128, xmmV","VPSHLW xmmV, xmm2/m128, xmm1","vpshlw xmmV, xmm2/m128, xmm1","XOP.NDS.128.09.W0 95 /r","V","V","XOP","amd","w,r,r","","" +"VPSHRDD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VPSHRDD imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpshrdd imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W0 73 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VPSHRDD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VPSHRDD imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpshrdd imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 73 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VPSHRDD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VPSHRDD imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpshrdd imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 73 /r ib","V","V","AVX512_VBMI2","bscale4,scale64","w,r,r,r,r","","" +"VPSHRDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VPSHRDQ imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpshrdq imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 73 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VPSHRDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VPSHRDQ imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpshrdq imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 73 /r ib","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VPSHRDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VPSHRDQ imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpshrdq imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 73 /r ib","V","V","AVX512_VBMI2","bscale8,scale64","w,r,r,r,r","","" +"VPSHRDVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSHRDVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpshrdvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W0 73 /r","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale16","rw,r,r,r","","" +"VPSHRDVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSHRDVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpshrdvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W0 73 /r","V","V","AVX512_VBMI2+AVX512VL","bscale4,scale32","rw,r,r,r","","" +"VPSHRDVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSHRDVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpshrdvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W0 73 /r","V","V","AVX512_VBMI2","bscale4,scale64","rw,r,r,r","","" +"VPSHRDVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSHRDVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpshrdvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 73 /r","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale16","rw,r,r,r","","" +"VPSHRDVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSHRDVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpshrdvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 73 /r","V","V","AVX512_VBMI2+AVX512VL","bscale8,scale32","rw,r,r,r","","" +"VPSHRDVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSHRDVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpshrdvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 73 /r","V","V","AVX512_VBMI2","bscale8,scale64","rw,r,r,r","","" +"VPSHRDVW xmm1, {k}{z}, xmmV, xmm2/m128","VPSHRDVW xmm2/m128, xmmV, {k}{z}, xmm1","vpshrdvw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F38.W1 72 /r","V","V","AVX512_VBMI2+AVX512VL","scale16","rw,r,r,r","","" +"VPSHRDVW ymm1, {k}{z}, ymmV, ymm2/m256","VPSHRDVW ymm2/m256, ymmV, {k}{z}, ymm1","vpshrdvw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F38.W1 72 /r","V","V","AVX512_VBMI2+AVX512VL","scale32","rw,r,r,r","","" +"VPSHRDVW zmm1, {k}{z}, zmmV, zmm2/m512","VPSHRDVW zmm2/m512, zmmV, {k}{z}, zmm1","vpshrdvw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F38.W1 72 /r","V","V","AVX512_VBMI2","scale64","rw,r,r,r","","" +"VPSHRDW xmm1, {k}{z}, xmmV, xmm2/m128, imm8u","VPSHRDW imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","vpshrdw imm8u, xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 72 /r ib","V","V","AVX512_VBMI2+AVX512VL","scale16","w,r,r,r,r","","" +"VPSHRDW ymm1, {k}{z}, ymmV, ymm2/m256, imm8u","VPSHRDW imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","vpshrdw imm8u, ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 72 /r ib","V","V","AVX512_VBMI2+AVX512VL","scale32","w,r,r,r,r","","" +"VPSHRDW zmm1, {k}{z}, zmmV, zmm2/m512, imm8u","VPSHRDW imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","vpshrdw imm8u, zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 72 /r ib","V","V","AVX512_VBMI2","scale64","w,r,r,r,r","","" +"VPSHUFB xmm1, xmmV, xmm2/m128","VPSHUFB xmm2/m128, xmmV, xmm1","vpshufb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 00 /r","V","V","AVX","","w,r,r","","" +"VPSHUFB xmm1, {k}{z}, xmmV, xmm2/m128","VPSHUFB xmm2/m128, xmmV, {k}{z}, xmm1","vpshufb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.WIG 00 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSHUFB ymm1, ymmV, ymm2/m256","VPSHUFB ymm2/m256, ymmV, ymm1","vpshufb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 00 /r","V","V","AVX2","","w,r,r","","" +"VPSHUFB ymm1, {k}{z}, ymmV, ymm2/m256","VPSHUFB ymm2/m256, ymmV, {k}{z}, ymm1","vpshufb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.WIG 00 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSHUFB zmm1, {k}{z}, zmmV, zmm2/m512","VPSHUFB zmm2/m512, zmmV, {k}{z}, zmm1","vpshufb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.WIG 00 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSHUFBITQMB k1, {k}, xmmV, xmm2/m128","VPSHUFBITQMB xmm2/m128, xmmV, {k}, k1","vpshufbitqmb xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W0 8F /r","V","V","AVX512_BITALG+AVX512VL","scale16","w,r,r,r","","" +"VPSHUFBITQMB k1, {k}, ymmV, ymm2/m256","VPSHUFBITQMB ymm2/m256, ymmV, {k}, k1","vpshufbitqmb ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W0 8F /r","V","V","AVX512_BITALG+AVX512VL","scale32","w,r,r,r","","" +"VPSHUFBITQMB k1, {k}, zmmV, zmm2/m512","VPSHUFBITQMB zmm2/m512, zmmV, {k}, k1","vpshufbitqmb zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W0 8F /r","V","V","AVX512_BITALG","scale64","w,r,r,r","","" +"VPSHUFD xmm1, xmm2/m128, imm8u","VPSHUFD imm8u, xmm2/m128, xmm1","vpshufd imm8u, xmm2/m128, xmm1","VEX.128.66.0F.WIG 70 /r ib","V","V","AVX","","w,r,r","","" +"VPSHUFD xmm1, {k}{z}, xmm2/m128/m32bcst, imm8u","VPSHUFD imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","vpshufd imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F.W0 70 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSHUFD ymm1, ymm2/m256, imm8u","VPSHUFD imm8u, ymm2/m256, ymm1","vpshufd imm8u, ymm2/m256, ymm1","VEX.256.66.0F.WIG 70 /r ib","V","V","AVX2","","w,r,r","","" +"VPSHUFD ymm1, {k}{z}, ymm2/m256/m32bcst, imm8u","VPSHUFD imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","vpshufd imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F.W0 70 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSHUFD zmm1, {k}{z}, zmm2/m512/m32bcst, imm8u","VPSHUFD imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","vpshufd imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F.W0 70 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSHUFHW xmm1, xmm2/m128, imm8u","VPSHUFHW imm8u, xmm2/m128, xmm1","vpshufhw imm8u, xmm2/m128, xmm1","VEX.128.F3.0F.WIG 70 /r ib","V","V","AVX","","w,r,r","","" +"VPSHUFHW xmm1, {k}{z}, xmm2/m128, imm8u","VPSHUFHW imm8u, xmm2/m128, {k}{z}, xmm1","vpshufhw imm8u, xmm2/m128, {k}{z}, xmm1","EVEX.128.F3.0F.WIG 70 /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSHUFHW ymm1, ymm2/m256, imm8u","VPSHUFHW imm8u, ymm2/m256, ymm1","vpshufhw imm8u, ymm2/m256, ymm1","VEX.256.F3.0F.WIG 70 /r ib","V","V","AVX2","","w,r,r","","" +"VPSHUFHW ymm1, {k}{z}, ymm2/m256, imm8u","VPSHUFHW imm8u, ymm2/m256, {k}{z}, ymm1","vpshufhw imm8u, ymm2/m256, {k}{z}, ymm1","EVEX.256.F3.0F.WIG 70 /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSHUFHW zmm1, {k}{z}, zmm2/m512, imm8u","VPSHUFHW imm8u, zmm2/m512, {k}{z}, zmm1","vpshufhw imm8u, zmm2/m512, {k}{z}, zmm1","EVEX.512.F3.0F.WIG 70 /r ib","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSHUFLW xmm1, xmm2/m128, imm8u","VPSHUFLW imm8u, xmm2/m128, xmm1","vpshuflw imm8u, xmm2/m128, xmm1","VEX.128.F2.0F.WIG 70 /r ib","V","V","AVX","","w,r,r","","" +"VPSHUFLW xmm1, {k}{z}, xmm2/m128, imm8u","VPSHUFLW imm8u, xmm2/m128, {k}{z}, xmm1","vpshuflw imm8u, xmm2/m128, {k}{z}, xmm1","EVEX.128.F2.0F.WIG 70 /r ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSHUFLW ymm1, ymm2/m256, imm8u","VPSHUFLW imm8u, ymm2/m256, ymm1","vpshuflw imm8u, ymm2/m256, ymm1","VEX.256.F2.0F.WIG 70 /r ib","V","V","AVX2","","w,r,r","","" +"VPSHUFLW ymm1, {k}{z}, ymm2/m256, imm8u","VPSHUFLW imm8u, ymm2/m256, {k}{z}, ymm1","vpshuflw imm8u, ymm2/m256, {k}{z}, ymm1","EVEX.256.F2.0F.WIG 70 /r ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSHUFLW zmm1, {k}{z}, zmm2/m512, imm8u","VPSHUFLW imm8u, zmm2/m512, {k}{z}, zmm1","vpshuflw imm8u, zmm2/m512, {k}{z}, zmm1","EVEX.512.F2.0F.WIG 70 /r ib","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSIGNB xmm1, xmmV, xmm2/m128","VPSIGNB xmm2/m128, xmmV, xmm1","vpsignb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 08 /r","V","V","AVX","","w,r,r","","" +"VPSIGNB ymm1, ymmV, ymm2/m256","VPSIGNB ymm2/m256, ymmV, ymm1","vpsignb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 08 /r","V","V","AVX2","","w,r,r","","" +"VPSIGND xmm1, xmmV, xmm2/m128","VPSIGND xmm2/m128, xmmV, xmm1","vpsignd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 0A /r","V","V","AVX","","w,r,r","","" +"VPSIGND ymm1, ymmV, ymm2/m256","VPSIGND ymm2/m256, ymmV, ymm1","vpsignd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 0A /r","V","V","AVX2","","w,r,r","","" +"VPSIGNW xmm1, xmmV, xmm2/m128","VPSIGNW xmm2/m128, xmmV, xmm1","vpsignw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.WIG 09 /r","V","V","AVX","","w,r,r","","" +"VPSIGNW ymm1, ymmV, ymm2/m256","VPSIGNW ymm2/m256, ymmV, ymm1","vpsignw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.WIG 09 /r","V","V","AVX2","","w,r,r","","" +"VPSLLD xmmV, xmm2, imm8u","VPSLLD imm8u, xmm2, xmmV","vpslld imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 72 /6 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSLLD xmmV, {k}{z}, xmm2/m128/m32bcst, imm8u","VPSLLD imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","vpslld imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W0 72 /6 ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSLLD ymmV, ymm2, imm8u","VPSLLD imm8u, ymm2, ymmV","vpslld imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 72 /6 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSLLD ymmV, {k}{z}, ymm2/m256/m32bcst, imm8u","VPSLLD imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","vpslld imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W0 72 /6 ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSLLD zmmV, {k}{z}, zmm2/m512/m32bcst, imm8u","VPSLLD imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","vpslld imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W0 72 /6 ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSLLD xmm1, xmmV, xmm2/m128","VPSLLD xmm2/m128, xmmV, xmm1","vpslld xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F2 /r","V","V","AVX","","w,r,r","","" +"VPSLLD xmm1, {k}{z}, xmmV, xmm2/m128","VPSLLD xmm2/m128, xmmV, {k}{z}, xmm1","vpslld xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 F2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSLLD ymm1, ymmV, xmm2/m128","VPSLLD xmm2/m128, ymmV, ymm1","vpslld xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F2 /r","V","V","AVX2","","w,r,r","","" +"VPSLLD ymm1, {k}{z}, ymmV, xmm2/m128","VPSLLD xmm2/m128, ymmV, {k}{z}, ymm1","vpslld xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 F2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSLLD zmm1, {k}{z}, zmmV, xmm2/m128","VPSLLD xmm2/m128, zmmV, {k}{z}, zmm1","vpslld xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 F2 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSLLDQ xmmV, xmm2, imm8u","VPSLLDQ imm8u, xmm2, xmmV","vpslldq imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 73 /7 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSLLDQ xmmV, xmm2/m128, imm8u","VPSLLDQ imm8u, xmm2/m128, xmmV","vpslldq imm8u, xmm2/m128, xmmV","EVEX.NDD.128.66.0F.WIG 73 /7 ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPSLLDQ ymmV, ymm2, imm8u","VPSLLDQ imm8u, ymm2, ymmV","vpslldq imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 73 /7 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSLLDQ ymmV, ymm2/m256, imm8u","VPSLLDQ imm8u, ymm2/m256, ymmV","vpslldq imm8u, ymm2/m256, ymmV","EVEX.NDD.256.66.0F.WIG 73 /7 ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VPSLLDQ zmmV, zmm2/m512, imm8u","VPSLLDQ imm8u, zmm2/m512, zmmV","vpslldq imm8u, zmm2/m512, zmmV","EVEX.NDD.512.66.0F.WIG 73 /7 ib","V","V","AVX512BW","scale64","w,r,r","","" +"VPSLLQ xmmV, xmm2, imm8u","VPSLLQ imm8u, xmm2, xmmV","vpsllq imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 73 /6 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSLLQ xmmV, {k}{z}, xmm2/m128/m64bcst, imm8u","VPSLLQ imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","vpsllq imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W1 73 /6 ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSLLQ ymmV, ymm2, imm8u","VPSLLQ imm8u, ymm2, ymmV","vpsllq imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 73 /6 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSLLQ ymmV, {k}{z}, ymm2/m256/m64bcst, imm8u","VPSLLQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","vpsllq imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W1 73 /6 ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSLLQ zmmV, {k}{z}, zmm2/m512/m64bcst, imm8u","VPSLLQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","vpsllq imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W1 73 /6 ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSLLQ xmm1, xmmV, xmm2/m128","VPSLLQ xmm2/m128, xmmV, xmm1","vpsllq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F3 /r","V","V","AVX","","w,r,r","","" +"VPSLLQ xmm1, {k}{z}, xmmV, xmm2/m128","VPSLLQ xmm2/m128, xmmV, {k}{z}, xmm1","vpsllq xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 F3 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSLLQ ymm1, ymmV, xmm2/m128","VPSLLQ xmm2/m128, ymmV, ymm1","vpsllq xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F3 /r","V","V","AVX2","","w,r,r","","" +"VPSLLQ ymm1, {k}{z}, ymmV, xmm2/m128","VPSLLQ xmm2/m128, ymmV, {k}{z}, ymm1","vpsllq xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 F3 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSLLQ zmm1, {k}{z}, zmmV, xmm2/m128","VPSLLQ xmm2/m128, zmmV, {k}{z}, zmm1","vpsllq xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 F3 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSLLVD xmm1, xmmV, xmm2/m128","VPSLLVD xmm2/m128, xmmV, xmm1","vpsllvd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 47 /r","V","V","AVX2","","w,r,r","","" +"VPSLLVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSLLVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpsllvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 47 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSLLVD ymm1, ymmV, ymm2/m256","VPSLLVD ymm2/m256, ymmV, ymm1","vpsllvd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 47 /r","V","V","AVX2","","w,r,r","","" +"VPSLLVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSLLVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpsllvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 47 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSLLVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSLLVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpsllvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 47 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSLLVQ xmm1, xmmV, xmm2/m128","VPSLLVQ xmm2/m128, xmmV, xmm1","vpsllvq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W1 47 /r","V","V","AVX2","","w,r,r","","" +"VPSLLVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSLLVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpsllvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 47 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSLLVQ ymm1, ymmV, ymm2/m256","VPSLLVQ ymm2/m256, ymmV, ymm1","vpsllvq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W1 47 /r","V","V","AVX2","","w,r,r","","" +"VPSLLVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSLLVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpsllvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 47 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSLLVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSLLVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpsllvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 47 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSLLVW xmm1, {k}{z}, xmmV, xmm2/m128","VPSLLVW xmm2/m128, xmmV, {k}{z}, xmm1","vpsllvw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 12 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSLLVW ymm1, {k}{z}, ymmV, ymm2/m256","VPSLLVW ymm2/m256, ymmV, {k}{z}, ymm1","vpsllvw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 12 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSLLVW zmm1, {k}{z}, zmmV, zmm2/m512","VPSLLVW zmm2/m512, zmmV, {k}{z}, zmm1","vpsllvw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 12 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSLLW xmmV, xmm2, imm8u","VPSLLW imm8u, xmm2, xmmV","vpsllw imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 71 /6 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSLLW xmmV, {k}{z}, xmm2/m128, imm8u","VPSLLW imm8u, xmm2/m128, {k}{z}, xmmV","vpsllw imm8u, xmm2/m128, {k}{z}, xmmV","EVEX.NDD.128.66.0F.WIG 71 /6 ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSLLW ymmV, ymm2, imm8u","VPSLLW imm8u, ymm2, ymmV","vpsllw imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 71 /6 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSLLW ymmV, {k}{z}, ymm2/m256, imm8u","VPSLLW imm8u, ymm2/m256, {k}{z}, ymmV","vpsllw imm8u, ymm2/m256, {k}{z}, ymmV","EVEX.NDD.256.66.0F.WIG 71 /6 ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSLLW zmmV, {k}{z}, zmm2/m512, imm8u","VPSLLW imm8u, zmm2/m512, {k}{z}, zmmV","vpsllw imm8u, zmm2/m512, {k}{z}, zmmV","EVEX.NDD.512.66.0F.WIG 71 /6 ib","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSLLW xmm1, xmmV, xmm2/m128","VPSLLW xmm2/m128, xmmV, xmm1","vpsllw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F1 /r","V","V","AVX","","w,r,r","","" +"VPSLLW xmm1, {k}{z}, xmmV, xmm2/m128","VPSLLW xmm2/m128, xmmV, {k}{z}, xmm1","vpsllw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG F1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSLLW ymm1, ymmV, xmm2/m128","VPSLLW xmm2/m128, ymmV, ymm1","vpsllw xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F1 /r","V","V","AVX2","","w,r,r","","" +"VPSLLW ymm1, {k}{z}, ymmV, xmm2/m128","VPSLLW xmm2/m128, ymmV, {k}{z}, ymm1","vpsllw xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG F1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSLLW zmm1, {k}{z}, zmmV, xmm2/m128","VPSLLW xmm2/m128, zmmV, {k}{z}, zmm1","vpsllw xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG F1 /r","V","V","AVX512BW","scale16","w,r,r,r","","" +"VPSRAD xmmV, xmm2, imm8u","VPSRAD imm8u, xmm2, xmmV","vpsrad imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 72 /4 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRAD xmmV, {k}{z}, xmm2/m128/m32bcst, imm8u","VPSRAD imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","vpsrad imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W0 72 /4 ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSRAD ymmV, ymm2, imm8u","VPSRAD imm8u, ymm2, ymmV","vpsrad imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 72 /4 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRAD ymmV, {k}{z}, ymm2/m256/m32bcst, imm8u","VPSRAD imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","vpsrad imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W0 72 /4 ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSRAD zmmV, {k}{z}, zmm2/m512/m32bcst, imm8u","VPSRAD imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","vpsrad imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W0 72 /4 ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSRAD xmm1, xmmV, xmm2/m128","VPSRAD xmm2/m128, xmmV, xmm1","vpsrad xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E2 /r","V","V","AVX","","w,r,r","","" +"VPSRAD xmm1, {k}{z}, xmmV, xmm2/m128","VPSRAD xmm2/m128, xmmV, {k}{z}, xmm1","vpsrad xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 E2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRAD ymm1, ymmV, xmm2/m128","VPSRAD xmm2/m128, ymmV, ymm1","vpsrad xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E2 /r","V","V","AVX2","","w,r,r","","" +"VPSRAD ymm1, {k}{z}, ymmV, xmm2/m128","VPSRAD xmm2/m128, ymmV, {k}{z}, ymm1","vpsrad xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 E2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRAD zmm1, {k}{z}, zmmV, xmm2/m128","VPSRAD xmm2/m128, zmmV, {k}{z}, zmm1","vpsrad xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 E2 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSRAQ xmmV, {k}{z}, xmm2/m128/m64bcst, imm8u","VPSRAQ imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","vpsraq imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W1 72 /4 ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSRAQ ymmV, {k}{z}, ymm2/m256/m64bcst, imm8u","VPSRAQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","vpsraq imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W1 72 /4 ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSRAQ zmmV, {k}{z}, zmm2/m512/m64bcst, imm8u","VPSRAQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","vpsraq imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W1 72 /4 ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSRAQ xmm1, {k}{z}, xmmV, xmm2/m128","VPSRAQ xmm2/m128, xmmV, {k}{z}, xmm1","vpsraq xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 E2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRAQ ymm1, {k}{z}, ymmV, xmm2/m128","VPSRAQ xmm2/m128, ymmV, {k}{z}, ymm1","vpsraq xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 E2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRAQ zmm1, {k}{z}, zmmV, xmm2/m128","VPSRAQ xmm2/m128, zmmV, {k}{z}, zmm1","vpsraq xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 E2 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSRAVD xmm1, xmmV, xmm2/m128","VPSRAVD xmm2/m128, xmmV, xmm1","vpsravd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 46 /r","V","V","AVX2","","w,r,r","","" +"VPSRAVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSRAVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpsravd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 46 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSRAVD ymm1, ymmV, ymm2/m256","VPSRAVD ymm2/m256, ymmV, ymm1","vpsravd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 46 /r","V","V","AVX2","","w,r,r","","" +"VPSRAVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSRAVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpsravd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 46 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSRAVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSRAVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpsravd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 46 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSRAVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSRAVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpsravq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 46 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSRAVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSRAVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpsravq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 46 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSRAVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSRAVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpsravq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 46 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSRAVW xmm1, {k}{z}, xmmV, xmm2/m128","VPSRAVW xmm2/m128, xmmV, {k}{z}, xmm1","vpsravw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 11 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRAVW ymm1, {k}{z}, ymmV, ymm2/m256","VPSRAVW ymm2/m256, ymmV, {k}{z}, ymm1","vpsravw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 11 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSRAVW zmm1, {k}{z}, zmmV, zmm2/m512","VPSRAVW zmm2/m512, zmmV, {k}{z}, zmm1","vpsravw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 11 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSRAW xmmV, xmm2, imm8u","VPSRAW imm8u, xmm2, xmmV","vpsraw imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 71 /4 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRAW xmmV, {k}{z}, xmm2/m128, imm8u","VPSRAW imm8u, xmm2/m128, {k}{z}, xmmV","vpsraw imm8u, xmm2/m128, {k}{z}, xmmV","EVEX.NDD.128.66.0F.WIG 71 /4 ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRAW ymmV, ymm2, imm8u","VPSRAW imm8u, ymm2, ymmV","vpsraw imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 71 /4 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRAW ymmV, {k}{z}, ymm2/m256, imm8u","VPSRAW imm8u, ymm2/m256, {k}{z}, ymmV","vpsraw imm8u, ymm2/m256, {k}{z}, ymmV","EVEX.NDD.256.66.0F.WIG 71 /4 ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSRAW zmmV, {k}{z}, zmm2/m512, imm8u","VPSRAW imm8u, zmm2/m512, {k}{z}, zmmV","vpsraw imm8u, zmm2/m512, {k}{z}, zmmV","EVEX.NDD.512.66.0F.WIG 71 /4 ib","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSRAW xmm1, xmmV, xmm2/m128","VPSRAW xmm2/m128, xmmV, xmm1","vpsraw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E1 /r","V","V","AVX","","w,r,r","","" +"VPSRAW xmm1, {k}{z}, xmmV, xmm2/m128","VPSRAW xmm2/m128, xmmV, {k}{z}, xmm1","vpsraw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRAW ymm1, ymmV, xmm2/m128","VPSRAW xmm2/m128, ymmV, ymm1","vpsraw xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E1 /r","V","V","AVX2","","w,r,r","","" +"VPSRAW ymm1, {k}{z}, ymmV, xmm2/m128","VPSRAW xmm2/m128, ymmV, {k}{z}, ymm1","vpsraw xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRAW zmm1, {k}{z}, zmmV, xmm2/m128","VPSRAW xmm2/m128, zmmV, {k}{z}, zmm1","vpsraw xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E1 /r","V","V","AVX512BW","scale16","w,r,r,r","","" +"VPSRLD xmmV, xmm2, imm8u","VPSRLD imm8u, xmm2, xmmV","vpsrld imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 72 /2 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRLD xmmV, {k}{z}, xmm2/m128/m32bcst, imm8u","VPSRLD imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","vpsrld imm8u, xmm2/m128/m32bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W0 72 /2 ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSRLD ymmV, ymm2, imm8u","VPSRLD imm8u, ymm2, ymmV","vpsrld imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 72 /2 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRLD ymmV, {k}{z}, ymm2/m256/m32bcst, imm8u","VPSRLD imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","vpsrld imm8u, ymm2/m256/m32bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W0 72 /2 ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSRLD zmmV, {k}{z}, zmm2/m512/m32bcst, imm8u","VPSRLD imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","vpsrld imm8u, zmm2/m512/m32bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W0 72 /2 ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSRLD xmm1, xmmV, xmm2/m128","VPSRLD xmm2/m128, xmmV, xmm1","vpsrld xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D2 /r","V","V","AVX","","w,r,r","","" +"VPSRLD xmm1, {k}{z}, xmmV, xmm2/m128","VPSRLD xmm2/m128, xmmV, {k}{z}, xmm1","vpsrld xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 D2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRLD ymm1, ymmV, xmm2/m128","VPSRLD xmm2/m128, ymmV, ymm1","vpsrld xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D2 /r","V","V","AVX2","","w,r,r","","" +"VPSRLD ymm1, {k}{z}, ymmV, xmm2/m128","VPSRLD xmm2/m128, ymmV, {k}{z}, ymm1","vpsrld xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 D2 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRLD zmm1, {k}{z}, zmmV, xmm2/m128","VPSRLD xmm2/m128, zmmV, {k}{z}, zmm1","vpsrld xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 D2 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSRLDQ xmmV, xmm2, imm8u","VPSRLDQ imm8u, xmm2, xmmV","vpsrldq imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 73 /3 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRLDQ xmmV, xmm2/m128, imm8u","VPSRLDQ imm8u, xmm2/m128, xmmV","vpsrldq imm8u, xmm2/m128, xmmV","EVEX.NDD.128.66.0F.WIG 73 /3 ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r","","" +"VPSRLDQ ymmV, ymm2, imm8u","VPSRLDQ imm8u, ymm2, ymmV","vpsrldq imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 73 /3 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRLDQ ymmV, ymm2/m256, imm8u","VPSRLDQ imm8u, ymm2/m256, ymmV","vpsrldq imm8u, ymm2/m256, ymmV","EVEX.NDD.256.66.0F.WIG 73 /3 ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r","","" +"VPSRLDQ zmmV, zmm2/m512, imm8u","VPSRLDQ imm8u, zmm2/m512, zmmV","vpsrldq imm8u, zmm2/m512, zmmV","EVEX.NDD.512.66.0F.WIG 73 /3 ib","V","V","AVX512BW","scale64","w,r,r","","" +"VPSRLQ xmmV, xmm2, imm8u","VPSRLQ imm8u, xmm2, xmmV","vpsrlq imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 73 /2 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRLQ xmmV, {k}{z}, xmm2/m128/m64bcst, imm8u","VPSRLQ imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","vpsrlq imm8u, xmm2/m128/m64bcst, {k}{z}, xmmV","EVEX.NDD.128.66.0F.W1 73 /2 ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSRLQ ymmV, ymm2, imm8u","VPSRLQ imm8u, ymm2, ymmV","vpsrlq imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 73 /2 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRLQ ymmV, {k}{z}, ymm2/m256/m64bcst, imm8u","VPSRLQ imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","vpsrlq imm8u, ymm2/m256/m64bcst, {k}{z}, ymmV","EVEX.NDD.256.66.0F.W1 73 /2 ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSRLQ zmmV, {k}{z}, zmm2/m512/m64bcst, imm8u","VPSRLQ imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","vpsrlq imm8u, zmm2/m512/m64bcst, {k}{z}, zmmV","EVEX.NDD.512.66.0F.W1 73 /2 ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSRLQ xmm1, xmmV, xmm2/m128","VPSRLQ xmm2/m128, xmmV, xmm1","vpsrlq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D3 /r","V","V","AVX","","w,r,r","","" +"VPSRLQ xmm1, {k}{z}, xmmV, xmm2/m128","VPSRLQ xmm2/m128, xmmV, {k}{z}, xmm1","vpsrlq xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 D3 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRLQ ymm1, ymmV, xmm2/m128","VPSRLQ xmm2/m128, ymmV, ymm1","vpsrlq xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D3 /r","V","V","AVX2","","w,r,r","","" +"VPSRLQ ymm1, {k}{z}, ymmV, xmm2/m128","VPSRLQ xmm2/m128, ymmV, {k}{z}, ymm1","vpsrlq xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 D3 /r","V","V","AVX512F+AVX512VL","scale16","w,r,r,r","","" +"VPSRLQ zmm1, {k}{z}, zmmV, xmm2/m128","VPSRLQ xmm2/m128, zmmV, {k}{z}, zmm1","vpsrlq xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 D3 /r","V","V","AVX512F","scale16","w,r,r,r","","" +"VPSRLVD xmm1, xmmV, xmm2/m128","VPSRLVD xmm2/m128, xmmV, xmm1","vpsrlvd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W0 45 /r","V","V","AVX2","","w,r,r","","" +"VPSRLVD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSRLVD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpsrlvd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 45 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSRLVD ymm1, ymmV, ymm2/m256","VPSRLVD ymm2/m256, ymmV, ymm1","vpsrlvd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W0 45 /r","V","V","AVX2","","w,r,r","","" +"VPSRLVD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSRLVD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpsrlvd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 45 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSRLVD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSRLVD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpsrlvd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 45 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSRLVQ xmm1, xmmV, xmm2/m128","VPSRLVQ xmm2/m128, xmmV, xmm1","vpsrlvq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F38.W1 45 /r","V","V","AVX2","","w,r,r","","" +"VPSRLVQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSRLVQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpsrlvq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 45 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSRLVQ ymm1, ymmV, ymm2/m256","VPSRLVQ ymm2/m256, ymmV, ymm1","vpsrlvq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F38.W1 45 /r","V","V","AVX2","","w,r,r","","" +"VPSRLVQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSRLVQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpsrlvq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 45 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSRLVQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSRLVQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpsrlvq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 45 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSRLVW xmm1, {k}{z}, xmmV, xmm2/m128","VPSRLVW xmm2/m128, xmmV, {k}{z}, xmm1","vpsrlvw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 10 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRLVW ymm1, {k}{z}, ymmV, ymm2/m256","VPSRLVW ymm2/m256, ymmV, {k}{z}, ymm1","vpsrlvw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 10 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSRLVW zmm1, {k}{z}, zmmV, zmm2/m512","VPSRLVW zmm2/m512, zmmV, {k}{z}, zmm1","vpsrlvw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 10 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSRLW xmmV, xmm2, imm8u","VPSRLW imm8u, xmm2, xmmV","vpsrlw imm8u, xmm2, xmmV","VEX.NDD.128.66.0F.WIG 71 /2 ib","V","V","AVX","modrm_regonly","w,r,r","","" +"VPSRLW xmmV, {k}{z}, xmm2/m128, imm8u","VPSRLW imm8u, xmm2/m128, {k}{z}, xmmV","vpsrlw imm8u, xmm2/m128, {k}{z}, xmmV","EVEX.NDD.128.66.0F.WIG 71 /2 ib","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRLW ymmV, ymm2, imm8u","VPSRLW imm8u, ymm2, ymmV","vpsrlw imm8u, ymm2, ymmV","VEX.NDD.256.66.0F.WIG 71 /2 ib","V","V","AVX2","modrm_regonly","w,r,r","","" +"VPSRLW ymmV, {k}{z}, ymm2/m256, imm8u","VPSRLW imm8u, ymm2/m256, {k}{z}, ymmV","vpsrlw imm8u, ymm2/m256, {k}{z}, ymmV","EVEX.NDD.256.66.0F.WIG 71 /2 ib","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSRLW zmmV, {k}{z}, zmm2/m512, imm8u","VPSRLW imm8u, zmm2/m512, {k}{z}, zmmV","vpsrlw imm8u, zmm2/m512, {k}{z}, zmmV","EVEX.NDD.512.66.0F.WIG 71 /2 ib","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSRLW xmm1, xmmV, xmm2/m128","VPSRLW xmm2/m128, xmmV, xmm1","vpsrlw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D1 /r","V","V","AVX","","w,r,r","","" +"VPSRLW xmm1, {k}{z}, xmmV, xmm2/m128","VPSRLW xmm2/m128, xmmV, {k}{z}, xmm1","vpsrlw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG D1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRLW ymm1, ymmV, xmm2/m128","VPSRLW xmm2/m128, ymmV, ymm1","vpsrlw xmm2/m128, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D1 /r","V","V","AVX2","","w,r,r","","" +"VPSRLW ymm1, {k}{z}, ymmV, xmm2/m128","VPSRLW xmm2/m128, ymmV, {k}{z}, ymm1","vpsrlw xmm2/m128, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG D1 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSRLW zmm1, {k}{z}, zmmV, xmm2/m128","VPSRLW xmm2/m128, zmmV, {k}{z}, zmm1","vpsrlw xmm2/m128, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG D1 /r","V","V","AVX512BW","scale16","w,r,r,r","","" +"VPSUBB xmm1, xmmV, xmm2/m128","VPSUBB xmm2/m128, xmmV, xmm1","vpsubb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F8 /r","V","V","AVX","","w,r,r","","" +"VPSUBB xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBB xmm2/m128, xmmV, {k}{z}, xmm1","vpsubb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG F8 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBB ymm1, ymmV, ymm2/m256","VPSUBB ymm2/m256, ymmV, ymm1","vpsubb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F8 /r","V","V","AVX2","","w,r,r","","" +"VPSUBB ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBB ymm2/m256, ymmV, {k}{z}, ymm1","vpsubb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG F8 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBB zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBB zmm2/m512, zmmV, {k}{z}, zmm1","vpsubb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG F8 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSUBD xmm1, xmmV, xmm2/m128","VPSUBD xmm2/m128, xmmV, xmm1","vpsubd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG FA /r","V","V","AVX","","w,r,r","","" +"VPSUBD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPSUBD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpsubd xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 FA /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPSUBD ymm1, ymmV, ymm2/m256","VPSUBD ymm2/m256, ymmV, ymm1","vpsubd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG FA /r","V","V","AVX2","","w,r,r","","" +"VPSUBD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPSUBD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpsubd ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 FA /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPSUBD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPSUBD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpsubd zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 FA /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPSUBQ xmm1, xmmV, xmm2/m128","VPSUBQ xmm2/m128, xmmV, xmm1","vpsubq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG FB /r","V","V","AVX","","w,r,r","","" +"VPSUBQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPSUBQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpsubq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 FB /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPSUBQ ymm1, ymmV, ymm2/m256","VPSUBQ ymm2/m256, ymmV, ymm1","vpsubq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG FB /r","V","V","AVX2","","w,r,r","","" +"VPSUBQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPSUBQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpsubq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 FB /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPSUBQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPSUBQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpsubq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 FB /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPSUBSB xmm1, xmmV, xmm2/m128","VPSUBSB xmm2/m128, xmmV, xmm1","vpsubsb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E8 /r","V","V","AVX","","w,r,r","","" +"VPSUBSB xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBSB xmm2/m128, xmmV, {k}{z}, xmm1","vpsubsb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E8 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBSB ymm1, ymmV, ymm2/m256","VPSUBSB ymm2/m256, ymmV, ymm1","vpsubsb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E8 /r","V","V","AVX2","","w,r,r","","" +"VPSUBSB ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBSB ymm2/m256, ymmV, {k}{z}, ymm1","vpsubsb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E8 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBSB zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBSB zmm2/m512, zmmV, {k}{z}, zmm1","vpsubsb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E8 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSUBSW xmm1, xmmV, xmm2/m128","VPSUBSW xmm2/m128, xmmV, xmm1","vpsubsw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG E9 /r","V","V","AVX","","w,r,r","","" +"VPSUBSW xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBSW xmm2/m128, xmmV, {k}{z}, xmm1","vpsubsw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG E9 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBSW ymm1, ymmV, ymm2/m256","VPSUBSW ymm2/m256, ymmV, ymm1","vpsubsw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG E9 /r","V","V","AVX2","","w,r,r","","" +"VPSUBSW ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBSW ymm2/m256, ymmV, {k}{z}, ymm1","vpsubsw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG E9 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBSW zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBSW zmm2/m512, zmmV, {k}{z}, zmm1","vpsubsw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG E9 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSUBUSB xmm1, xmmV, xmm2/m128","VPSUBUSB xmm2/m128, xmmV, xmm1","vpsubusb xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D8 /r","V","V","AVX","","w,r,r","","" +"VPSUBUSB xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBUSB xmm2/m128, xmmV, {k}{z}, xmm1","vpsubusb xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG D8 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBUSB ymm1, ymmV, ymm2/m256","VPSUBUSB ymm2/m256, ymmV, ymm1","vpsubusb ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D8 /r","V","V","AVX2","","w,r,r","","" +"VPSUBUSB ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBUSB ymm2/m256, ymmV, {k}{z}, ymm1","vpsubusb ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG D8 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBUSB zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBUSB zmm2/m512, zmmV, {k}{z}, zmm1","vpsubusb zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG D8 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSUBUSW xmm1, xmmV, xmm2/m128","VPSUBUSW xmm2/m128, xmmV, xmm1","vpsubusw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG D9 /r","V","V","AVX","","w,r,r","","" +"VPSUBUSW xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBUSW xmm2/m128, xmmV, {k}{z}, xmm1","vpsubusw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG D9 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBUSW ymm1, ymmV, ymm2/m256","VPSUBUSW ymm2/m256, ymmV, ymm1","vpsubusw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG D9 /r","V","V","AVX2","","w,r,r","","" +"VPSUBUSW ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBUSW ymm2/m256, ymmV, {k}{z}, ymm1","vpsubusw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG D9 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBUSW zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBUSW zmm2/m512, zmmV, {k}{z}, zmm1","vpsubusw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG D9 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPSUBW xmm1, xmmV, xmm2/m128","VPSUBW xmm2/m128, xmmV, xmm1","vpsubw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG F9 /r","V","V","AVX","","w,r,r","","" +"VPSUBW xmm1, {k}{z}, xmmV, xmm2/m128","VPSUBW xmm2/m128, xmmV, {k}{z}, xmm1","vpsubw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG F9 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPSUBW ymm1, ymmV, ymm2/m256","VPSUBW ymm2/m256, ymmV, ymm1","vpsubw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG F9 /r","V","V","AVX2","","w,r,r","","" +"VPSUBW ymm1, {k}{z}, ymmV, ymm2/m256","VPSUBW ymm2/m256, ymmV, {k}{z}, ymm1","vpsubw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG F9 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPSUBW zmm1, {k}{z}, zmmV, zmm2/m512","VPSUBW zmm2/m512, zmmV, {k}{z}, zmm1","vpsubw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG F9 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPTERNLOGD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VPTERNLOGD imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpternlogd imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F3A.W0 25 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","rw,r,r,r,r","","" +"VPTERNLOGD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VPTERNLOGD imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpternlogd imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F3A.W0 25 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","rw,r,r,r,r","","" +"VPTERNLOGD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VPTERNLOGD imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpternlogd imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F3A.W0 25 /r ib","V","V","AVX512F","bscale4,scale64","rw,r,r,r,r","","" +"VPTERNLOGQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VPTERNLOGQ imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpternlogq imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.DDS.128.66.0F3A.W1 25 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","rw,r,r,r,r","","" +"VPTERNLOGQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VPTERNLOGQ imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpternlogq imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.DDS.256.66.0F3A.W1 25 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","rw,r,r,r,r","","" +"VPTERNLOGQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VPTERNLOGQ imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpternlogq imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.DDS.512.66.0F3A.W1 25 /r ib","V","V","AVX512F","bscale8,scale64","rw,r,r,r,r","","" +"VPTEST xmm1, xmm2/m128","VPTEST xmm2/m128, xmm1","vptest xmm2/m128, xmm1","VEX.128.66.0F38.WIG 17 /r","V","V","AVX","","r,r","","" +"VPTEST ymm1, ymm2/m256","VPTEST ymm2/m256, ymm1","vptest ymm2/m256, ymm1","VEX.256.66.0F38.WIG 17 /r","V","V","AVX","","r,r","","" +"VPTESTMB k1, {k}, xmmV, xmm2/m128","VPTESTMB xmm2/m128, xmmV, {k}, k1","vptestmb xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W0 26 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPTESTMB k1, {k}, ymmV, ymm2/m256","VPTESTMB ymm2/m256, ymmV, {k}, k1","vptestmb ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W0 26 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPTESTMB k1, {k}, zmmV, zmm2/m512","VPTESTMB zmm2/m512, zmmV, {k}, k1","vptestmb zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W0 26 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPTESTMD k1, {k}, xmmV, xmm2/m128/m32bcst","VPTESTMD xmm2/m128/m32bcst, xmmV, {k}, k1","vptestmd xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W0 27 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPTESTMD k1, {k}, ymmV, ymm2/m256/m32bcst","VPTESTMD ymm2/m256/m32bcst, ymmV, {k}, k1","vptestmd ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W0 27 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPTESTMD k1, {k}, zmmV, zmm2/m512/m32bcst","VPTESTMD zmm2/m512/m32bcst, zmmV, {k}, k1","vptestmd zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W0 27 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPTESTMQ k1, {k}, xmmV, xmm2/m128/m64bcst","VPTESTMQ xmm2/m128/m64bcst, xmmV, {k}, k1","vptestmq xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W1 27 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPTESTMQ k1, {k}, ymmV, ymm2/m256/m64bcst","VPTESTMQ ymm2/m256/m64bcst, ymmV, {k}, k1","vptestmq ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W1 27 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPTESTMQ k1, {k}, zmmV, zmm2/m512/m64bcst","VPTESTMQ zmm2/m512/m64bcst, zmmV, {k}, k1","vptestmq zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W1 27 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPTESTMW k1, {k}, xmmV, xmm2/m128","VPTESTMW xmm2/m128, xmmV, {k}, k1","vptestmw xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.66.0F38.W1 26 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPTESTMW k1, {k}, ymmV, ymm2/m256","VPTESTMW ymm2/m256, ymmV, {k}, k1","vptestmw ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.66.0F38.W1 26 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPTESTMW k1, {k}, zmmV, zmm2/m512","VPTESTMW zmm2/m512, zmmV, {k}, k1","vptestmw zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.66.0F38.W1 26 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPTESTNMB k1, {k}, xmmV, xmm2/m128","VPTESTNMB xmm2/m128, xmmV, {k}, k1","vptestnmb xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.F3.0F38.W0 26 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPTESTNMB k1, {k}, ymmV, ymm2/m256","VPTESTNMB ymm2/m256, ymmV, {k}, k1","vptestnmb ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.F3.0F38.W0 26 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPTESTNMB k1, {k}, zmmV, zmm2/m512","VPTESTNMB zmm2/m512, zmmV, {k}, k1","vptestnmb zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.F3.0F38.W0 26 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPTESTNMD k1, {k}, xmmV, xmm2/m128/m32bcst","VPTESTNMD xmm2/m128/m32bcst, xmmV, {k}, k1","vptestnmd xmm2/m128/m32bcst, xmmV, {k}, k1","EVEX.NDS.128.F3.0F38.W0 27 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPTESTNMD k1, {k}, ymmV, ymm2/m256/m32bcst","VPTESTNMD ymm2/m256/m32bcst, ymmV, {k}, k1","vptestnmd ymm2/m256/m32bcst, ymmV, {k}, k1","EVEX.NDS.256.F3.0F38.W0 27 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPTESTNMD k1, {k}, zmmV, zmm2/m512/m32bcst","VPTESTNMD zmm2/m512/m32bcst, zmmV, {k}, k1","vptestnmd zmm2/m512/m32bcst, zmmV, {k}, k1","EVEX.NDS.512.F3.0F38.W0 27 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPTESTNMQ k1, {k}, xmmV, xmm2/m128/m64bcst","VPTESTNMQ xmm2/m128/m64bcst, xmmV, {k}, k1","vptestnmq xmm2/m128/m64bcst, xmmV, {k}, k1","EVEX.NDS.128.F3.0F38.W1 27 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPTESTNMQ k1, {k}, ymmV, ymm2/m256/m64bcst","VPTESTNMQ ymm2/m256/m64bcst, ymmV, {k}, k1","vptestnmq ymm2/m256/m64bcst, ymmV, {k}, k1","EVEX.NDS.256.F3.0F38.W1 27 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPTESTNMQ k1, {k}, zmmV, zmm2/m512/m64bcst","VPTESTNMQ zmm2/m512/m64bcst, zmmV, {k}, k1","vptestnmq zmm2/m512/m64bcst, zmmV, {k}, k1","EVEX.NDS.512.F3.0F38.W1 27 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPTESTNMW k1, {k}, xmmV, xmm2/m128","VPTESTNMW xmm2/m128, xmmV, {k}, k1","vptestnmw xmm2/m128, xmmV, {k}, k1","EVEX.NDS.128.F3.0F38.W1 26 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPTESTNMW k1, {k}, ymmV, ymm2/m256","VPTESTNMW ymm2/m256, ymmV, {k}, k1","vptestnmw ymm2/m256, ymmV, {k}, k1","EVEX.NDS.256.F3.0F38.W1 26 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPTESTNMW k1, {k}, zmmV, zmm2/m512","VPTESTNMW zmm2/m512, zmmV, {k}, k1","vptestnmw zmm2/m512, zmmV, {k}, k1","EVEX.NDS.512.F3.0F38.W1 26 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPUNPCKHBW xmm1, xmmV, xmm2/m128","VPUNPCKHBW xmm2/m128, xmmV, xmm1","vpunpckhbw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 68 /r","V","V","AVX","","w,r,r","","" +"VPUNPCKHBW xmm1, {k}{z}, xmmV, xmm2/m128","VPUNPCKHBW xmm2/m128, xmmV, {k}{z}, xmm1","vpunpckhbw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 68 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPUNPCKHBW ymm1, ymmV, ymm2/m256","VPUNPCKHBW ymm2/m256, ymmV, ymm1","vpunpckhbw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 68 /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKHBW ymm1, {k}{z}, ymmV, ymm2/m256","VPUNPCKHBW ymm2/m256, ymmV, {k}{z}, ymm1","vpunpckhbw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 68 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPUNPCKHBW zmm1, {k}{z}, zmmV, zmm2/m512","VPUNPCKHBW zmm2/m512, zmmV, {k}{z}, zmm1","vpunpckhbw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 68 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPUNPCKHDQ xmm1, xmmV, xmm2/m128","VPUNPCKHDQ xmm2/m128, xmmV, xmm1","vpunpckhdq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 6A /r","V","V","AVX","","w,r,r","","" +"VPUNPCKHDQ xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPUNPCKHDQ xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpunpckhdq xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 6A /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPUNPCKHDQ ymm1, ymmV, ymm2/m256","VPUNPCKHDQ ymm2/m256, ymmV, ymm1","vpunpckhdq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 6A /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKHDQ ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPUNPCKHDQ ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpunpckhdq ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 6A /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPUNPCKHDQ zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPUNPCKHDQ zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpunpckhdq zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 6A /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPUNPCKHQDQ xmm1, xmmV, xmm2/m128","VPUNPCKHQDQ xmm2/m128, xmmV, xmm1","vpunpckhqdq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 6D /r","V","V","AVX","","w,r,r","","" +"VPUNPCKHQDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPUNPCKHQDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpunpckhqdq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 6D /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPUNPCKHQDQ ymm1, ymmV, ymm2/m256","VPUNPCKHQDQ ymm2/m256, ymmV, ymm1","vpunpckhqdq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 6D /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKHQDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPUNPCKHQDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpunpckhqdq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 6D /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPUNPCKHQDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPUNPCKHQDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpunpckhqdq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 6D /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPUNPCKHWD xmm1, xmmV, xmm2/m128","VPUNPCKHWD xmm2/m128, xmmV, xmm1","vpunpckhwd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 69 /r","V","V","AVX","","w,r,r","","" +"VPUNPCKHWD xmm1, {k}{z}, xmmV, xmm2/m128","VPUNPCKHWD xmm2/m128, xmmV, {k}{z}, xmm1","vpunpckhwd xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 69 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPUNPCKHWD ymm1, ymmV, ymm2/m256","VPUNPCKHWD ymm2/m256, ymmV, ymm1","vpunpckhwd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 69 /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKHWD ymm1, {k}{z}, ymmV, ymm2/m256","VPUNPCKHWD ymm2/m256, ymmV, {k}{z}, ymm1","vpunpckhwd ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 69 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPUNPCKHWD zmm1, {k}{z}, zmmV, zmm2/m512","VPUNPCKHWD zmm2/m512, zmmV, {k}{z}, zmm1","vpunpckhwd zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 69 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPUNPCKLBW xmm1, xmmV, xmm2/m128","VPUNPCKLBW xmm2/m128, xmmV, xmm1","vpunpcklbw xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 60 /r","V","V","AVX","","w,r,r","","" +"VPUNPCKLBW xmm1, {k}{z}, xmmV, xmm2/m128","VPUNPCKLBW xmm2/m128, xmmV, {k}{z}, xmm1","vpunpcklbw xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 60 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPUNPCKLBW ymm1, ymmV, ymm2/m256","VPUNPCKLBW ymm2/m256, ymmV, ymm1","vpunpcklbw ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 60 /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKLBW ymm1, {k}{z}, ymmV, ymm2/m256","VPUNPCKLBW ymm2/m256, ymmV, {k}{z}, ymm1","vpunpcklbw ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 60 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPUNPCKLBW zmm1, {k}{z}, zmmV, zmm2/m512","VPUNPCKLBW zmm2/m512, zmmV, {k}{z}, zmm1","vpunpcklbw zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 60 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPUNPCKLDQ xmm1, xmmV, xmm2/m128","VPUNPCKLDQ xmm2/m128, xmmV, xmm1","vpunpckldq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 62 /r","V","V","AVX","","w,r,r","","" +"VPUNPCKLDQ xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPUNPCKLDQ xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpunpckldq xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 62 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPUNPCKLDQ ymm1, ymmV, ymm2/m256","VPUNPCKLDQ ymm2/m256, ymmV, ymm1","vpunpckldq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 62 /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKLDQ ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPUNPCKLDQ ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpunpckldq ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 62 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPUNPCKLDQ zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPUNPCKLDQ zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpunpckldq zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 62 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPUNPCKLQDQ xmm1, xmmV, xmm2/m128","VPUNPCKLQDQ xmm2/m128, xmmV, xmm1","vpunpcklqdq xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 6C /r","V","V","AVX","","w,r,r","","" +"VPUNPCKLQDQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPUNPCKLQDQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpunpcklqdq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 6C /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPUNPCKLQDQ ymm1, ymmV, ymm2/m256","VPUNPCKLQDQ ymm2/m256, ymmV, ymm1","vpunpcklqdq ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 6C /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKLQDQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPUNPCKLQDQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpunpcklqdq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 6C /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPUNPCKLQDQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPUNPCKLQDQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpunpcklqdq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 6C /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VPUNPCKLWD xmm1, xmmV, xmm2/m128","VPUNPCKLWD xmm2/m128, xmmV, xmm1","vpunpcklwd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 61 /r","V","V","AVX","","w,r,r","","" +"VPUNPCKLWD xmm1, {k}{z}, xmmV, xmm2/m128","VPUNPCKLWD xmm2/m128, xmmV, {k}{z}, xmm1","vpunpcklwd xmm2/m128, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.WIG 61 /r","V","V","AVX512BW+AVX512VL","scale16","w,r,r,r","","" +"VPUNPCKLWD ymm1, ymmV, ymm2/m256","VPUNPCKLWD ymm2/m256, ymmV, ymm1","vpunpcklwd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 61 /r","V","V","AVX2","","w,r,r","","" +"VPUNPCKLWD ymm1, {k}{z}, ymmV, ymm2/m256","VPUNPCKLWD ymm2/m256, ymmV, {k}{z}, ymm1","vpunpcklwd ymm2/m256, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.WIG 61 /r","V","V","AVX512BW+AVX512VL","scale32","w,r,r,r","","" +"VPUNPCKLWD zmm1, {k}{z}, zmmV, zmm2/m512","VPUNPCKLWD zmm2/m512, zmmV, {k}{z}, zmm1","vpunpcklwd zmm2/m512, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.WIG 61 /r","V","V","AVX512BW","scale64","w,r,r,r","","" +"VPXOR xmm1, xmmV, xmm2/m128","VPXOR xmm2/m128, xmmV, xmm1","vpxor xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG EF /r","V","V","AVX","","w,r,r","","" +"VPXOR ymm1, ymmV, ymm2/m256","VPXOR ymm2/m256, ymmV, ymm1","vpxor ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG EF /r","V","V","AVX2","","w,r,r","","" +"VPXORD xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VPXORD xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vpxord xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W0 EF /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VPXORD ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VPXORD ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vpxord ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W0 EF /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VPXORD zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VPXORD zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vpxord zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W0 EF /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VPXORQ xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VPXORQ xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vpxorq xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 EF /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VPXORQ ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VPXORQ ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vpxorq ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 EF /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VPXORQ zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VPXORQ zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vpxorq zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 EF /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VRANGEPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u:4","VRANGEPD imm8u:4, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vrangepd imm8u:4, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W1 50 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VRANGEPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u:4","VRANGEPD imm8u:4, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vrangepd imm8u:4, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 50 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VRANGEPD zmm1{sae}, {k}{z}, zmmV, zmm2, imm8u:4","VRANGEPD imm8u:4, zmm2, zmmV, {k}{z}, zmm1{sae}","vrangepd imm8u:4, zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.66.0F3A.W1 50 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VRANGEPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u:4","VRANGEPD imm8u:4, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vrangepd imm8u:4, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 50 /r ib","V","V","AVX512DQ","bscale8,scale64","w,r,r,r,r","","" +"VRANGEPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u:4","VRANGEPS imm8u:4, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vrangeps imm8u:4, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F3A.W0 50 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VRANGEPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u:4","VRANGEPS imm8u:4, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vrangeps imm8u:4, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 50 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VRANGEPS zmm1{sae}, {k}{z}, zmmV, zmm2, imm8u:4","VRANGEPS imm8u:4, zmm2, zmmV, {k}{z}, zmm1{sae}","vrangeps imm8u:4, zmm2, zmmV, {k}{z}, zmm1{sae}","EVEX.NDS.512.66.0F3A.W0 50 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VRANGEPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u:4","VRANGEPS imm8u:4, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vrangeps imm8u:4, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 50 /r ib","V","V","AVX512DQ","bscale4,scale64","w,r,r,r,r","","" +"VRANGESD xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u:4","VRANGESD imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","vrangesd imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W1 51 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VRANGESD xmm1, {k}{z}, xmmV, xmm2/m64, imm8u:4","VRANGESD imm8u:4, xmm2/m64, xmmV, {k}{z}, xmm1","vrangesd imm8u:4, xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W1 51 /r ib","V","V","AVX512DQ","scale8","w,r,r,r,r","","" +"VRANGESS xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u:4","VRANGESS imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","vrangess imm8u:4, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W0 51 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VRANGESS xmm1, {k}{z}, xmmV, xmm2/m32, imm8u:4","VRANGESS imm8u:4, xmm2/m32, xmmV, {k}{z}, xmm1","vrangess imm8u:4, xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W0 51 /r ib","V","V","AVX512DQ","scale4","w,r,r,r,r","","" +"VRCP14PD xmm1, {k}{z}, xmm2/m128/m64bcst","VRCP14PD xmm2/m128/m64bcst, {k}{z}, xmm1","vrcp14pd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 4C /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","","" +"VRCP14PD ymm1, {k}{z}, ymm2/m256/m64bcst","VRCP14PD ymm2/m256/m64bcst, {k}{z}, ymm1","vrcp14pd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 4C /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","","" +"VRCP14PD zmm1, {k}{z}, zmm2/m512/m64bcst","VRCP14PD zmm2/m512/m64bcst, {k}{z}, zmm1","vrcp14pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 4C /r","V","V","AVX512F","bscale8,scale64","w,r,r","","" +"VRCP14PS xmm1, {k}{z}, xmm2/m128/m32bcst","VRCP14PS xmm2/m128/m32bcst, {k}{z}, xmm1","vrcp14ps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 4C /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VRCP14PS ymm1, {k}{z}, ymm2/m256/m32bcst","VRCP14PS ymm2/m256/m32bcst, {k}{z}, ymm1","vrcp14ps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 4C /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VRCP14PS zmm1, {k}{z}, zmm2/m512/m32bcst","VRCP14PS zmm2/m512/m32bcst, {k}{z}, zmm1","vrcp14ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 4C /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VRCP14SD xmm1, {k}{z}, xmmV, xmm2/m64","VRCP14SD xmm2/m64, xmmV, {k}{z}, xmm1","vrcp14sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 4D /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VRCP14SS xmm1, {k}{z}, xmmV, xmm2/m32","VRCP14SS xmm2/m32, xmmV, {k}{z}, xmm1","vrcp14ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 4D /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VRCP28PD zmm1{sae}, {k}{z}, zmm2","VRCP28PD zmm2, {k}{z}, zmm1{sae}","vrcp28pd zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W1 CA /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VRCP28PD zmm1, {k}{z}, zmm2/m512/m64bcst","VRCP28PD zmm2/m512/m64bcst, {k}{z}, zmm1","vrcp28pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 CA /r","V","V","AVX512ER","bscale8,scale64","w,r,r","","" +"VRCP28PS zmm1{sae}, {k}{z}, zmm2","VRCP28PS zmm2, {k}{z}, zmm1{sae}","vrcp28ps zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W0 CA /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VRCP28PS zmm1, {k}{z}, zmm2/m512/m32bcst","VRCP28PS zmm2/m512/m32bcst, {k}{z}, zmm1","vrcp28ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 CA /r","V","V","AVX512ER","bscale4,scale64","w,r,r","","" +"VRCP28SD xmm1{sae}, {k}{z}, xmmV, xmm2","VRCP28SD xmm2, xmmV, {k}{z}, xmm1{sae}","vrcp28sd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W1 CB /r","V","V","AVX512ER","modrm_regonly","w,r,r,r","","" +"VRCP28SD xmm1, {k}{z}, xmmV, xmm2/m64","VRCP28SD xmm2/m64, xmmV, {k}{z}, xmm1","vrcp28sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 CB /r","V","V","AVX512ER","scale8","w,r,r,r","","" +"VRCP28SS xmm1{sae}, {k}{z}, xmmV, xmm2","VRCP28SS xmm2, xmmV, {k}{z}, xmm1{sae}","vrcp28ss xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W0 CB /r","V","V","AVX512ER","modrm_regonly","w,r,r,r","","" +"VRCP28SS xmm1, {k}{z}, xmmV, xmm2/m32","VRCP28SS xmm2/m32, xmmV, {k}{z}, xmm1","vrcp28ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 CB /r","V","V","AVX512ER","scale4","w,r,r,r","","" +"VRCPPS xmm1, xmm2/m128","VRCPPS xmm2/m128, xmm1","vrcpps xmm2/m128, xmm1","VEX.128.0F.WIG 53 /r","V","V","AVX","","w,r","","" +"VRCPPS ymm1, ymm2/m256","VRCPPS ymm2/m256, ymm1","vrcpps ymm2/m256, ymm1","VEX.256.0F.WIG 53 /r","V","V","AVX","","w,r","","" +"VRCPSS xmm1, xmmV, xmm2/m32","VRCPSS xmm2/m32, xmmV, xmm1","vrcpss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 53 /r","V","V","AVX","","w,r,r","","" +"VREDUCEPD xmm1, {k}{z}, xmm2/m128/m64bcst, imm8u","VREDUCEPD imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","vreducepd imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W1 56 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VREDUCEPD ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u","VREDUCEPD imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","vreducepd imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 56 /r ib","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VREDUCEPD zmm1{sae}, {k}{z}, zmm2, imm8u","VREDUCEPD imm8u, zmm2, {k}{z}, zmm1{sae}","vreducepd imm8u, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W1 56 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r","","" +"VREDUCEPD zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u","VREDUCEPD imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","vreducepd imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 56 /r ib","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VREDUCEPS xmm1, {k}{z}, xmm2/m128/m32bcst, imm8u","VREDUCEPS imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","vreduceps imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W0 56 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VREDUCEPS ymm1, {k}{z}, ymm2/m256/m32bcst, imm8u","VREDUCEPS imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","vreduceps imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W0 56 /r ib","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VREDUCEPS zmm1{sae}, {k}{z}, zmm2, imm8u","VREDUCEPS imm8u, zmm2, {k}{z}, zmm1{sae}","vreduceps imm8u, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W0 56 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r","","" +"VREDUCEPS zmm1, {k}{z}, zmm2/m512/m32bcst, imm8u","VREDUCEPS imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","vreduceps imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W0 56 /r ib","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","","" +"VREDUCESD xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VREDUCESD imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vreducesd imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W1 57 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VREDUCESD xmm1, {k}{z}, xmmV, xmm2/m64, imm8u","VREDUCESD imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","vreducesd imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W1 57 /r ib","V","V","AVX512DQ","scale8","w,r,r,r,r","","" +"VREDUCESS xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VREDUCESS imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vreducess imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W0 57 /r ib","V","V","AVX512DQ","modrm_regonly","w,r,r,r,r","","" +"VREDUCESS xmm1, {k}{z}, xmmV, xmm2/m32, imm8u","VREDUCESS imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","vreducess imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W0 57 /r ib","V","V","AVX512DQ","scale4","w,r,r,r,r","","" +"VRNDSCALEPD xmm1, {k}{z}, xmm2/m128/m64bcst, imm8u","VRNDSCALEPD imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","vrndscalepd imm8u, xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W1 09 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VRNDSCALEPD ymm1, {k}{z}, ymm2/m256/m64bcst, imm8u","VRNDSCALEPD imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","vrndscalepd imm8u, ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W1 09 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VRNDSCALEPD zmm1{sae}, {k}{z}, zmm2, imm8u","VRNDSCALEPD imm8u, zmm2, {k}{z}, zmm1{sae}","vrndscalepd imm8u, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W1 09 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VRNDSCALEPD zmm1, {k}{z}, zmm2/m512/m64bcst, imm8u","VRNDSCALEPD imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","vrndscalepd imm8u, zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W1 09 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VRNDSCALEPS xmm1, {k}{z}, xmm2/m128/m32bcst, imm8u","VRNDSCALEPS imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","vrndscaleps imm8u, xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F3A.W0 08 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VRNDSCALEPS ymm1, {k}{z}, ymm2/m256/m32bcst, imm8u","VRNDSCALEPS imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","vrndscaleps imm8u, ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F3A.W0 08 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VRNDSCALEPS zmm1{sae}, {k}{z}, zmm2, imm8u","VRNDSCALEPS imm8u, zmm2, {k}{z}, zmm1{sae}","vrndscaleps imm8u, zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F3A.W0 08 /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VRNDSCALEPS zmm1, {k}{z}, zmm2/m512/m32bcst, imm8u","VRNDSCALEPS imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","vrndscaleps imm8u, zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F3A.W0 08 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VRNDSCALESD xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VRNDSCALESD imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vrndscalesd imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W1 0B /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VRNDSCALESD xmm1, {k}{z}, xmmV, xmm2/m64, imm8u","VRNDSCALESD imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","vrndscalesd imm8u, xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W1 0B /r ib","V","V","AVX512F","scale8","w,r,r,r,r","","" +"VRNDSCALESS xmm1{sae}, {k}{z}, xmmV, xmm2, imm8u","VRNDSCALESS imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","vrndscaless imm8u, xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F3A.W0 0A /r ib","V","V","AVX512F","modrm_regonly","w,r,r,r,r","","" +"VRNDSCALESS xmm1, {k}{z}, xmmV, xmm2/m32, imm8u","VRNDSCALESS imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","vrndscaless imm8u, xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F3A.W0 0A /r ib","V","V","AVX512F","scale4","w,r,r,r,r","","" +"VROUNDPD xmm1, xmm2/m128, imm8u","VROUNDPD imm8u, xmm2/m128, xmm1","vroundpd imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 09 /r ib","V","V","AVX","","w,r,r","","" +"VROUNDPD ymm1, ymm2/m256, imm8u","VROUNDPD imm8u, ymm2/m256, ymm1","vroundpd imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.WIG 09 /r ib","V","V","AVX","","w,r,r","","" +"VROUNDPS xmm1, xmm2/m128, imm8u","VROUNDPS imm8u, xmm2/m128, xmm1","vroundps imm8u, xmm2/m128, xmm1","VEX.128.66.0F3A.WIG 08 /r ib","V","V","AVX","","w,r,r","","" +"VROUNDPS ymm1, ymm2/m256, imm8u","VROUNDPS imm8u, ymm2/m256, ymm1","vroundps imm8u, ymm2/m256, ymm1","VEX.256.66.0F3A.WIG 08 /r ib","V","V","AVX","","w,r,r","","" +"VROUNDSD xmm1, xmmV, xmm2/m64, imm8u","VROUNDSD imm8u, xmm2/m64, xmmV, xmm1","vroundsd imm8u, xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.WIG 0B /r ib","V","V","AVX","","w,r,r,r","","" +"VROUNDSS xmm1, xmmV, xmm2/m32, imm8u","VROUNDSS imm8u, xmm2/m32, xmmV, xmm1","vroundss imm8u, xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.66.0F3A.WIG 0A /r ib","V","V","AVX","","w,r,r,r","","" +"VRSQRT14PD xmm1, {k}{z}, xmm2/m128/m64bcst","VRSQRT14PD xmm2/m128/m64bcst, {k}{z}, xmm1","vrsqrt14pd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W1 4E /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","","" +"VRSQRT14PD ymm1, {k}{z}, ymm2/m256/m64bcst","VRSQRT14PD ymm2/m256/m64bcst, {k}{z}, ymm1","vrsqrt14pd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W1 4E /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","","" +"VRSQRT14PD zmm1, {k}{z}, zmm2/m512/m64bcst","VRSQRT14PD zmm2/m512/m64bcst, {k}{z}, zmm1","vrsqrt14pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 4E /r","V","V","AVX512F","bscale8,scale64","w,r,r","","" +"VRSQRT14PS xmm1, {k}{z}, xmm2/m128/m32bcst","VRSQRT14PS xmm2/m128/m32bcst, {k}{z}, xmm1","vrsqrt14ps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.66.0F38.W0 4E /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VRSQRT14PS ymm1, {k}{z}, ymm2/m256/m32bcst","VRSQRT14PS ymm2/m256/m32bcst, {k}{z}, ymm1","vrsqrt14ps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.66.0F38.W0 4E /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VRSQRT14PS zmm1, {k}{z}, zmm2/m512/m32bcst","VRSQRT14PS zmm2/m512/m32bcst, {k}{z}, zmm1","vrsqrt14ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 4E /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VRSQRT14SD xmm1, {k}{z}, xmmV, xmm2/m64","VRSQRT14SD xmm2/m64, xmmV, {k}{z}, xmm1","vrsqrt14sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 4F /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VRSQRT14SS xmm1, {k}{z}, xmmV, xmm2/m32","VRSQRT14SS xmm2/m32, xmmV, {k}{z}, xmm1","vrsqrt14ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 4F /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VRSQRT28PD zmm1{sae}, {k}{z}, zmm2","VRSQRT28PD zmm2, {k}{z}, zmm1{sae}","vrsqrt28pd zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W1 CC /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VRSQRT28PD zmm1, {k}{z}, zmm2/m512/m64bcst","VRSQRT28PD zmm2/m512/m64bcst, {k}{z}, zmm1","vrsqrt28pd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W1 CC /r","V","V","AVX512ER","bscale8,scale64","w,r,r","","" +"VRSQRT28PS zmm1{sae}, {k}{z}, zmm2","VRSQRT28PS zmm2, {k}{z}, zmm1{sae}","vrsqrt28ps zmm2, {k}{z}, zmm1{sae}","EVEX.512.66.0F38.W0 CC /r","V","V","AVX512ER","modrm_regonly","w,r,r","","" +"VRSQRT28PS zmm1, {k}{z}, zmm2/m512/m32bcst","VRSQRT28PS zmm2/m512/m32bcst, {k}{z}, zmm1","vrsqrt28ps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.66.0F38.W0 CC /r","V","V","AVX512ER","bscale4,scale64","w,r,r","","" +"VRSQRT28SD xmm1{sae}, {k}{z}, xmmV, xmm2","VRSQRT28SD xmm2, xmmV, {k}{z}, xmm1{sae}","vrsqrt28sd xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W1 CD /r","V","V","AVX512ER","modrm_regonly","w,r,r,r","","" +"VRSQRT28SD xmm1, {k}{z}, xmmV, xmm2/m64","VRSQRT28SD xmm2/m64, xmmV, {k}{z}, xmm1","vrsqrt28sd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 CD /r","V","V","AVX512ER","scale8","w,r,r,r","","" +"VRSQRT28SS xmm1{sae}, {k}{z}, xmmV, xmm2","VRSQRT28SS xmm2, xmmV, {k}{z}, xmm1{sae}","vrsqrt28ss xmm2, xmmV, {k}{z}, xmm1{sae}","EVEX.NDS.128.66.0F38.W0 CD /r","V","V","AVX512ER","modrm_regonly","w,r,r,r","","" +"VRSQRT28SS xmm1, {k}{z}, xmmV, xmm2/m32","VRSQRT28SS xmm2/m32, xmmV, {k}{z}, xmm1","vrsqrt28ss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 CD /r","V","V","AVX512ER","scale4","w,r,r,r","","" +"VRSQRTPS xmm1, xmm2/m128","VRSQRTPS xmm2/m128, xmm1","vrsqrtps xmm2/m128, xmm1","VEX.128.0F.WIG 52 /r","V","V","AVX","","w,r","","" +"VRSQRTPS ymm1, ymm2/m256","VRSQRTPS ymm2/m256, ymm1","vrsqrtps ymm2/m256, ymm1","VEX.256.0F.WIG 52 /r","V","V","AVX","","w,r","","" +"VRSQRTSS xmm1, xmmV, xmm2/m32","VRSQRTSS xmm2/m32, xmmV, xmm1","vrsqrtss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 52 /r","V","V","AVX","","w,r,r","","" +"VSCALEFPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VSCALEFPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vscalefpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W1 2C /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VSCALEFPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VSCALEFPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vscalefpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W1 2C /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VSCALEFPD zmm1{er}, {k}{z}, zmmV, zmm2","VSCALEFPD zmm2, zmmV, {k}{z}, zmm1{er}","vscalefpd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F38.W1 2C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSCALEFPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VSCALEFPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vscalefpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W1 2C /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VSCALEFPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VSCALEFPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vscalefps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F38.W0 2C /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VSCALEFPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VSCALEFPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vscalefps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F38.W0 2C /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VSCALEFPS zmm1{er}, {k}{z}, zmmV, zmm2","VSCALEFPS zmm2, zmmV, {k}{z}, zmm1{er}","vscalefps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F38.W0 2C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSCALEFPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VSCALEFPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vscalefps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F38.W0 2C /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VSCALEFSD xmm1{er}, {k}{z}, xmmV, xmm2","VSCALEFSD xmm2, xmmV, {k}{z}, xmm1{er}","vscalefsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.66.0F38.W1 2D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSCALEFSD xmm1, {k}{z}, xmmV, xmm2/m64","VSCALEFSD xmm2/m64, xmmV, {k}{z}, xmm1","vscalefsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W1 2D /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VSCALEFSS xmm1{er}, {k}{z}, xmmV, xmm2","VSCALEFSS xmm2, xmmV, {k}{z}, xmm1{er}","vscalefss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.66.0F38.W0 2D /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSCALEFSS xmm1, {k}{z}, xmmV, xmm2/m32","VSCALEFSS xmm2/m32, xmmV, {k}{z}, xmm1","vscalefss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.66.0F38.W0 2D /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VSCATTERDPD vm32x, {k1-k7}, xmm1","VSCATTERDPD xmm1, {k1-k7}, vm32x","vscatterdpd xmm1, {k1-k7}, vm32x","EVEX.128.66.0F38.W1 A2 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERDPD vm32x, {k1-k7}, ymm1","VSCATTERDPD ymm1, {k1-k7}, vm32x","vscatterdpd ymm1, {k1-k7}, vm32x","EVEX.256.66.0F38.W1 A2 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERDPD vm32y, {k1-k7}, zmm1","VSCATTERDPD zmm1, {k1-k7}, vm32y","vscatterdpd zmm1, {k1-k7}, vm32y","EVEX.512.66.0F38.W1 A2 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERDPS vm32x, {k1-k7}, xmm1","VSCATTERDPS xmm1, {k1-k7}, vm32x","vscatterdps xmm1, {k1-k7}, vm32x","EVEX.128.66.0F38.W0 A2 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VSCATTERDPS vm32y, {k1-k7}, ymm1","VSCATTERDPS ymm1, {k1-k7}, vm32y","vscatterdps ymm1, {k1-k7}, vm32y","EVEX.256.66.0F38.W0 A2 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VSCATTERDPS vm32z, {k1-k7}, zmm1","VSCATTERDPS zmm1, {k1-k7}, vm32z","vscatterdps zmm1, {k1-k7}, vm32z","EVEX.512.66.0F38.W0 A2 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VSCATTERPF0DPD vm32y, {k1-k7}","VSCATTERPF0DPD {k1-k7}, vm32y","vscatterpf0dpd {k1-k7}, vm32y","EVEX.512.66.0F38.W1 C6 /5","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VSCATTERPF0DPS vm32z, {k1-k7}","VSCATTERPF0DPS {k1-k7}, vm32z","vscatterpf0dps {k1-k7}, vm32z","EVEX.512.66.0F38.W0 C6 /5","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VSCATTERPF0QPD vm64z, {k1-k7}","VSCATTERPF0QPD {k1-k7}, vm64z","vscatterpf0qpd {k1-k7}, vm64z","EVEX.512.66.0F38.W1 C7 /5","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VSCATTERPF0QPS vm64z, {k1-k7}","VSCATTERPF0QPS {k1-k7}, vm64z","vscatterpf0qps {k1-k7}, vm64z","EVEX.512.66.0F38.W0 C7 /5","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VSCATTERPF1DPD vm32y, {k1-k7}","VSCATTERPF1DPD {k1-k7}, vm32y","vscatterpf1dpd {k1-k7}, vm32y","EVEX.512.66.0F38.W1 C6 /6","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VSCATTERPF1DPS vm32z, {k1-k7}","VSCATTERPF1DPS {k1-k7}, vm32z","vscatterpf1dps {k1-k7}, vm32z","EVEX.512.66.0F38.W0 C6 /6","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VSCATTERPF1QPD vm64z, {k1-k7}","VSCATTERPF1QPD {k1-k7}, vm64z","vscatterpf1qpd {k1-k7}, vm64z","EVEX.512.66.0F38.W1 C7 /6","V","V","AVX512PF","modrm_memonly,scale8","r,rw","","" +"VSCATTERPF1QPS vm64z, {k1-k7}","VSCATTERPF1QPS {k1-k7}, vm64z","vscatterpf1qps {k1-k7}, vm64z","EVEX.512.66.0F38.W0 C7 /6","V","V","AVX512PF","modrm_memonly,scale4","r,rw","","" +"VSCATTERQPD vm64x, {k1-k7}, xmm1","VSCATTERQPD xmm1, {k1-k7}, vm64x","vscatterqpd xmm1, {k1-k7}, vm64x","EVEX.128.66.0F38.W1 A3 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERQPD vm64y, {k1-k7}, ymm1","VSCATTERQPD ymm1, {k1-k7}, vm64y","vscatterqpd ymm1, {k1-k7}, vm64y","EVEX.256.66.0F38.W1 A3 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERQPD vm64z, {k1-k7}, zmm1","VSCATTERQPD zmm1, {k1-k7}, vm64z","vscatterqpd zmm1, {k1-k7}, vm64z","EVEX.512.66.0F38.W1 A3 /vsib","V","V","AVX512F","modrm_memonly,scale8","w,rw,r","","" +"VSCATTERQPS vm64x, {k1-k7}, xmm1","VSCATTERQPS xmm1, {k1-k7}, vm64x","vscatterqps xmm1, {k1-k7}, vm64x","EVEX.128.66.0F38.W0 A3 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VSCATTERQPS vm64y, {k1-k7}, xmm1","VSCATTERQPS xmm1, {k1-k7}, vm64y","vscatterqps xmm1, {k1-k7}, vm64y","EVEX.256.66.0F38.W0 A3 /vsib","V","V","AVX512F+AVX512VL","modrm_memonly,scale4","w,rw,r","","" +"VSCATTERQPS vm64z, {k1-k7}, ymm1","VSCATTERQPS ymm1, {k1-k7}, vm64z","vscatterqps ymm1, {k1-k7}, vm64z","EVEX.512.66.0F38.W0 A3 /vsib","V","V","AVX512F","modrm_memonly,scale4","w,rw,r","","" +"VSHUFF32X4 ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VSHUFF32X4 imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vshuff32x4 imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 23 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VSHUFF32X4 zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VSHUFF32X4 imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vshuff32x4 imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 23 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VSHUFF64X2 ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VSHUFF64X2 imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vshuff64x2 imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 23 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VSHUFF64X2 zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VSHUFF64X2 imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vshuff64x2 imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 23 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VSHUFI32X4 ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VSHUFI32X4 imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vshufi32x4 imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W0 43 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VSHUFI32X4 zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VSHUFI32X4 imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vshufi32x4 imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W0 43 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VSHUFI64X2 ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VSHUFI64X2 imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vshufi64x2 imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F3A.W1 43 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VSHUFI64X2 zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VSHUFI64X2 imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vshufi64x2 imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F3A.W1 43 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VSHUFPD xmm1, xmmV, xmm2/m128, imm8u","VSHUFPD imm8u, xmm2/m128, xmmV, xmm1","vshufpd imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG C6 /r ib","V","V","AVX","","w,r,r,r","","" +"VSHUFPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst, imm8u","VSHUFPD imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vshufpd imm8u, xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 C6 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r,r","","" +"VSHUFPD ymm1, ymmV, ymm2/m256, imm8u","VSHUFPD imm8u, ymm2/m256, ymmV, ymm1","vshufpd imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG C6 /r ib","V","V","AVX","","w,r,r,r","","" +"VSHUFPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst, imm8u","VSHUFPD imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vshufpd imm8u, ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 C6 /r ib","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r,r","","" +"VSHUFPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst, imm8u","VSHUFPD imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vshufpd imm8u, zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 C6 /r ib","V","V","AVX512F","bscale8,scale64","w,r,r,r,r","","" +"VSHUFPS xmm1, xmmV, xmm2/m128, imm8u","VSHUFPS imm8u, xmm2/m128, xmmV, xmm1","vshufps imm8u, xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG C6 /r ib","V","V","AVX","","w,r,r,r","","" +"VSHUFPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst, imm8u","VSHUFPS imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vshufps imm8u, xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 C6 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r,r","","" +"VSHUFPS ymm1, ymmV, ymm2/m256, imm8u","VSHUFPS imm8u, ymm2/m256, ymmV, ymm1","vshufps imm8u, ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG C6 /r ib","V","V","AVX","","w,r,r,r","","" +"VSHUFPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst, imm8u","VSHUFPS imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vshufps imm8u, ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 C6 /r ib","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r,r","","" +"VSHUFPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst, imm8u","VSHUFPS imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vshufps imm8u, zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 C6 /r ib","V","V","AVX512F","bscale4,scale64","w,r,r,r,r","","" +"VSQRTPD xmm1, xmm2/m128","VSQRTPD xmm2/m128, xmm1","vsqrtpd xmm2/m128, xmm1","VEX.128.66.0F.WIG 51 /r","V","V","AVX","","w,r","","" +"VSQRTPD xmm1, {k}{z}, xmm2/m128/m64bcst","VSQRTPD xmm2/m128/m64bcst, {k}{z}, xmm1","vsqrtpd xmm2/m128/m64bcst, {k}{z}, xmm1","EVEX.128.66.0F.W1 51 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r","","" +"VSQRTPD ymm1, ymm2/m256","VSQRTPD ymm2/m256, ymm1","vsqrtpd ymm2/m256, ymm1","VEX.256.66.0F.WIG 51 /r","V","V","AVX","","w,r","","" +"VSQRTPD ymm1, {k}{z}, ymm2/m256/m64bcst","VSQRTPD ymm2/m256/m64bcst, {k}{z}, ymm1","vsqrtpd ymm2/m256/m64bcst, {k}{z}, ymm1","EVEX.256.66.0F.W1 51 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r","","" +"VSQRTPD zmm1{er}, {k}{z}, zmm2","VSQRTPD zmm2, {k}{z}, zmm1{er}","vsqrtpd zmm2, {k}{z}, zmm1{er}","EVEX.512.66.0F.W1 51 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VSQRTPD zmm1, {k}{z}, zmm2/m512/m64bcst","VSQRTPD zmm2/m512/m64bcst, {k}{z}, zmm1","vsqrtpd zmm2/m512/m64bcst, {k}{z}, zmm1","EVEX.512.66.0F.W1 51 /r","V","V","AVX512F","bscale8,scale64","w,r,r","","" +"VSQRTPS xmm1, xmm2/m128","VSQRTPS xmm2/m128, xmm1","vsqrtps xmm2/m128, xmm1","VEX.128.0F.WIG 51 /r","V","V","AVX","","w,r","","" +"VSQRTPS xmm1, {k}{z}, xmm2/m128/m32bcst","VSQRTPS xmm2/m128/m32bcst, {k}{z}, xmm1","vsqrtps xmm2/m128/m32bcst, {k}{z}, xmm1","EVEX.128.0F.W0 51 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r","","" +"VSQRTPS ymm1, ymm2/m256","VSQRTPS ymm2/m256, ymm1","vsqrtps ymm2/m256, ymm1","VEX.256.0F.WIG 51 /r","V","V","AVX","","w,r","","" +"VSQRTPS ymm1, {k}{z}, ymm2/m256/m32bcst","VSQRTPS ymm2/m256/m32bcst, {k}{z}, ymm1","vsqrtps ymm2/m256/m32bcst, {k}{z}, ymm1","EVEX.256.0F.W0 51 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r","","" +"VSQRTPS zmm1{er}, {k}{z}, zmm2","VSQRTPS zmm2, {k}{z}, zmm1{er}","vsqrtps zmm2, {k}{z}, zmm1{er}","EVEX.512.0F.W0 51 /r","V","V","AVX512F","modrm_regonly","w,r,r","","" +"VSQRTPS zmm1, {k}{z}, zmm2/m512/m32bcst","VSQRTPS zmm2/m512/m32bcst, {k}{z}, zmm1","vsqrtps zmm2/m512/m32bcst, {k}{z}, zmm1","EVEX.512.0F.W0 51 /r","V","V","AVX512F","bscale4,scale64","w,r,r","","" +"VSQRTSD xmm1{er}, {k}{z}, xmmV, xmm2","VSQRTSD xmm2, xmmV, {k}{z}, xmm1{er}","vsqrtsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 51 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSQRTSD xmm1, xmmV, xmm2/m64","VSQRTSD xmm2/m64, xmmV, xmm1","vsqrtsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 51 /r","V","V","AVX","","w,r,r","","" +"VSQRTSD xmm1, {k}{z}, xmmV, xmm2/m64","VSQRTSD xmm2/m64, xmmV, {k}{z}, xmm1","vsqrtsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 51 /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VSQRTSS xmm1{er}, {k}{z}, xmmV, xmm2","VSQRTSS xmm2, xmmV, {k}{z}, xmm1{er}","vsqrtss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F3.0F.W0 51 /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSQRTSS xmm1, xmmV, xmm2/m32","VSQRTSS xmm2/m32, xmmV, xmm1","vsqrtss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 51 /r","V","V","AVX","","w,r,r","","" +"VSQRTSS xmm1, {k}{z}, xmmV, xmm2/m32","VSQRTSS xmm2/m32, xmmV, {k}{z}, xmm1","vsqrtss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 51 /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VSTMXCSR m32","VSTMXCSR m32","vstmxcsr m32","VEX.128.0F.WIG AE /3","V","V","AVX","modrm_memonly","w","","" +"VSUBPD xmm1, xmmV, xmm2/m128","VSUBPD xmm2/m128, xmmV, xmm1","vsubpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VSUBPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vsubpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 5C /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VSUBPD ymm1, ymmV, ymm2/m256","VSUBPD ymm2/m256, ymmV, ymm1","vsubpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VSUBPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vsubpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 5C /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VSUBPD zmm1{er}, {k}{z}, zmmV, zmm2","VSUBPD zmm2, zmmV, {k}{z}, zmm1{er}","vsubpd zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.66.0F.W1 5C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSUBPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VSUBPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vsubpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 5C /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VSUBPS xmm1, xmmV, xmm2/m128","VSUBPS xmm2/m128, xmmV, xmm1","vsubps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VSUBPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vsubps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 5C /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VSUBPS ymm1, ymmV, ymm2/m256","VSUBPS ymm2/m256, ymmV, ymm1","vsubps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VSUBPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vsubps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 5C /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VSUBPS zmm1{er}, {k}{z}, zmmV, zmm2","VSUBPS zmm2, zmmV, {k}{z}, zmm1{er}","vsubps zmm2, zmmV, {k}{z}, zmm1{er}","EVEX.NDS.512.0F.W0 5C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSUBPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VSUBPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vsubps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 5C /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VSUBSD xmm1{er}, {k}{z}, xmmV, xmm2","VSUBSD xmm2, xmmV, {k}{z}, xmm1{er}","vsubsd xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F2.0F.W1 5C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSUBSD xmm1, xmmV, xmm2/m64","VSUBSD xmm2/m64, xmmV, xmm1","vsubsd xmm2/m64, xmmV, xmm1","VEX.NDS.LIG.F2.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBSD xmm1, {k}{z}, xmmV, xmm2/m64","VSUBSD xmm2/m64, xmmV, {k}{z}, xmm1","vsubsd xmm2/m64, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F2.0F.W1 5C /r","V","V","AVX512F","scale8","w,r,r,r","","" +"VSUBSS xmm1{er}, {k}{z}, xmmV, xmm2","VSUBSS xmm2, xmmV, {k}{z}, xmm1{er}","vsubss xmm2, xmmV, {k}{z}, xmm1{er}","EVEX.NDS.128.F3.0F.W0 5C /r","V","V","AVX512F","modrm_regonly","w,r,r,r","","" +"VSUBSS xmm1, xmmV, xmm2/m32","VSUBSS xmm2/m32, xmmV, xmm1","vsubss xmm2/m32, xmmV, xmm1","VEX.NDS.LIG.F3.0F.WIG 5C /r","V","V","AVX","","w,r,r","","" +"VSUBSS xmm1, {k}{z}, xmmV, xmm2/m32","VSUBSS xmm2/m32, xmmV, {k}{z}, xmm1","vsubss xmm2/m32, xmmV, {k}{z}, xmm1","EVEX.NDS.LIG.F3.0F.W0 5C /r","V","V","AVX512F","scale4","w,r,r,r","","" +"VTESTPD xmm1, xmm2/m128","VTESTPD xmm2/m128, xmm1","vtestpd xmm2/m128, xmm1","VEX.128.66.0F38.W0 0F /r","V","V","AVX","","r,r","","" +"VTESTPD ymm1, ymm2/m256","VTESTPD ymm2/m256, ymm1","vtestpd ymm2/m256, ymm1","VEX.256.66.0F38.W0 0F /r","V","V","AVX","","r,r","","" +"VTESTPS xmm1, xmm2/m128","VTESTPS xmm2/m128, xmm1","vtestps xmm2/m128, xmm1","VEX.128.66.0F38.W0 0E /r","V","V","AVX","","r,r","","" +"VTESTPS ymm1, ymm2/m256","VTESTPS ymm2/m256, ymm1","vtestps ymm2/m256, ymm1","VEX.256.66.0F38.W0 0E /r","V","V","AVX","","r,r","","" +"VUCOMISD xmm1{sae}, xmm2","VUCOMISD xmm2, xmm1{sae}","vucomisd xmm2, xmm1{sae}","EVEX.128.66.0F.W1 2E /r","V","V","AVX512F","modrm_regonly","r,r","","" +"VUCOMISD xmm1, xmm2/m64","VUCOMISD xmm2/m64, xmm1","vucomisd xmm2/m64, xmm1","EVEX.LIG.66.0F.W1 2E /r","V","V","AVX512F","scale8","r,r","","" +"VUCOMISD xmm1, xmm2/m64","VUCOMISD xmm2/m64, xmm1","vucomisd xmm2/m64, xmm1","VEX.LIG.66.0F.WIG 2E /r","V","V","AVX","","r,r","","" +"VUCOMISS xmm1{sae}, xmm2","VUCOMISS xmm2, xmm1{sae}","vucomiss xmm2, xmm1{sae}","EVEX.128.0F.W0 2E /r","V","V","AVX512F","modrm_regonly","r,r","","" +"VUCOMISS xmm1, xmm2/m32","VUCOMISS xmm2/m32, xmm1","vucomiss xmm2/m32, xmm1","EVEX.LIG.0F.W0 2E /r","V","V","AVX512F","scale4","r,r","","" +"VUCOMISS xmm1, xmm2/m32","VUCOMISS xmm2/m32, xmm1","vucomiss xmm2/m32, xmm1","VEX.LIG.0F.WIG 2E /r","V","V","AVX","","r,r","","" +"VUNPCKHPD xmm1, xmmV, xmm2/m128","VUNPCKHPD xmm2/m128, xmmV, xmm1","vunpckhpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 15 /r","V","V","AVX","","w,r,r","","" +"VUNPCKHPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VUNPCKHPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vunpckhpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 15 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VUNPCKHPD ymm1, ymmV, ymm2/m256","VUNPCKHPD ymm2/m256, ymmV, ymm1","vunpckhpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 15 /r","V","V","AVX","","w,r,r","","" +"VUNPCKHPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VUNPCKHPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vunpckhpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 15 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VUNPCKHPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VUNPCKHPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vunpckhpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 15 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VUNPCKHPS xmm1, xmmV, xmm2/m128","VUNPCKHPS xmm2/m128, xmmV, xmm1","vunpckhps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 15 /r","V","V","AVX","","w,r,r","","" +"VUNPCKHPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VUNPCKHPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vunpckhps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 15 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VUNPCKHPS ymm1, ymmV, ymm2/m256","VUNPCKHPS ymm2/m256, ymmV, ymm1","vunpckhps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 15 /r","V","V","AVX","","w,r,r","","" +"VUNPCKHPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VUNPCKHPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vunpckhps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 15 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VUNPCKHPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VUNPCKHPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vunpckhps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 15 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VUNPCKLPD xmm1, xmmV, xmm2/m128","VUNPCKLPD xmm2/m128, xmmV, xmm1","vunpcklpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 14 /r","V","V","AVX","","w,r,r","","" +"VUNPCKLPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VUNPCKLPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vunpcklpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 14 /r","V","V","AVX512F+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VUNPCKLPD ymm1, ymmV, ymm2/m256","VUNPCKLPD ymm2/m256, ymmV, ymm1","vunpcklpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 14 /r","V","V","AVX","","w,r,r","","" +"VUNPCKLPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VUNPCKLPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vunpcklpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 14 /r","V","V","AVX512F+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VUNPCKLPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VUNPCKLPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vunpcklpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 14 /r","V","V","AVX512F","bscale8,scale64","w,r,r,r","","" +"VUNPCKLPS xmm1, xmmV, xmm2/m128","VUNPCKLPS xmm2/m128, xmmV, xmm1","vunpcklps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 14 /r","V","V","AVX","","w,r,r","","" +"VUNPCKLPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VUNPCKLPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vunpcklps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 14 /r","V","V","AVX512F+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VUNPCKLPS ymm1, ymmV, ymm2/m256","VUNPCKLPS ymm2/m256, ymmV, ymm1","vunpcklps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 14 /r","V","V","AVX","","w,r,r","","" +"VUNPCKLPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VUNPCKLPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vunpcklps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 14 /r","V","V","AVX512F+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VUNPCKLPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VUNPCKLPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vunpcklps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 14 /r","V","V","AVX512F","bscale4,scale64","w,r,r,r","","" +"VXORPD xmm1, xmmV, xmm2/m128","VXORPD xmm2/m128, xmmV, xmm1","vxorpd xmm2/m128, xmmV, xmm1","VEX.NDS.128.66.0F.WIG 57 /r","V","V","AVX","","w,r,r","","" +"VXORPD xmm1, {k}{z}, xmmV, xmm2/m128/m64bcst","VXORPD xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","vxorpd xmm2/m128/m64bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.66.0F.W1 57 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale16","w,r,r,r","","" +"VXORPD ymm1, ymmV, ymm2/m256","VXORPD ymm2/m256, ymmV, ymm1","vxorpd ymm2/m256, ymmV, ymm1","VEX.NDS.256.66.0F.WIG 57 /r","V","V","AVX","","w,r,r","","" +"VXORPD ymm1, {k}{z}, ymmV, ymm2/m256/m64bcst","VXORPD ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","vxorpd ymm2/m256/m64bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.66.0F.W1 57 /r","V","V","AVX512DQ+AVX512VL","bscale8,scale32","w,r,r,r","","" +"VXORPD zmm1, {k}{z}, zmmV, zmm2/m512/m64bcst","VXORPD zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","vxorpd zmm2/m512/m64bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.66.0F.W1 57 /r","V","V","AVX512DQ","bscale8,scale64","w,r,r,r","","" +"VXORPS xmm1, xmmV, xmm2/m128","VXORPS xmm2/m128, xmmV, xmm1","vxorps xmm2/m128, xmmV, xmm1","VEX.NDS.128.0F.WIG 57 /r","V","V","AVX","","w,r,r","","" +"VXORPS xmm1, {k}{z}, xmmV, xmm2/m128/m32bcst","VXORPS xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","vxorps xmm2/m128/m32bcst, xmmV, {k}{z}, xmm1","EVEX.NDS.128.0F.W0 57 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale16","w,r,r,r","","" +"VXORPS ymm1, ymmV, ymm2/m256","VXORPS ymm2/m256, ymmV, ymm1","vxorps ymm2/m256, ymmV, ymm1","VEX.NDS.256.0F.WIG 57 /r","V","V","AVX","","w,r,r","","" +"VXORPS ymm1, {k}{z}, ymmV, ymm2/m256/m32bcst","VXORPS ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","vxorps ymm2/m256/m32bcst, ymmV, {k}{z}, ymm1","EVEX.NDS.256.0F.W0 57 /r","V","V","AVX512DQ+AVX512VL","bscale4,scale32","w,r,r,r","","" +"VXORPS zmm1, {k}{z}, zmmV, zmm2/m512/m32bcst","VXORPS zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","vxorps zmm2/m512/m32bcst, zmmV, {k}{z}, zmm1","EVEX.NDS.512.0F.W0 57 /r","V","V","AVX512DQ","bscale4,scale64","w,r,r,r","","" +"VZEROALL","VZEROALL","vzeroall","VEX.256.0F.WIG 77","V","V","AVX","","","","" +"VZEROUPPER","VZEROUPPER","vzeroupper","VEX.128.0F.WIG 77","V","V","AVX","","","","" +"WAIT","WAIT","wait","9B","V","V","","pseudo","","","" +"WBINVD","WBINVD","wbinvd","0F 09","V","V","486","","","","" +"WRFSBASE rmr32","WRFSBASE rmr32","wrfsbase rmr32","F3 0F AE /2","N.S.","V","FSGSBASE","modrm_regonly,operand16,operand32","r","Y","32" +"WRFSBASE rmr64","WRFSBASE rmr64","wrfsbase rmr64","F3 REX.W 0F AE /2","N.S.","V","FSGSBASE","modrm_regonly","r","Y","64" +"WRGSBASE rmr32","WRGSBASE rmr32","wrgsbase rmr32","F3 0F AE /3","N.S.","V","FSGSBASE","modrm_regonly,operand16,operand32","r","Y","32" +"WRGSBASE rmr64","WRGSBASE rmr64","wrgsbase rmr64","F3 REX.W 0F AE /3","N.S.","V","FSGSBASE","modrm_regonly","r","Y","64" +"WRMSR","WRMSR","wrmsr","0F 30","V","V","Pentium","","","","" +"WRPKRU","WRPKRU","wrpkru","0F 01 EF","V","V","PKU","","","","" +"WRSSD m32, r32","WRSSD r32, m32","wrssd r32, m32","0F 38 F6 /r","V","V","CET","modrm_memonly,operand16,operand32","w,r","","" +"WRSSQ m64, r64","WRSSQ r64, m64","wrssq r64, m64","REX.W 0F 38 F6 /r","N.S.","V","CET","modrm_memonly","w,r","","" +"WRUSSD m32, r32","WRUSSD r32, m32","wrussd r32, m32","66 0F 38 F5 /r","V","V","CET","modrm_memonly,operand16,operand32","w,r","","" +"WRUSSQ m64, r64","WRUSSQ r64, m64","wrussq r64, m64","66 REX.W 0F 38 F5 /r","N.S.","V","CET","modrm_memonly","w,r","","" +"XABORT imm8u","XABORT imm8u","xabort imm8u","C6 F8 ib","V","V","RTM","modrm_regonly","r","","" +"XACQUIRE","XACQUIRE","xacquire","F2","V","V","HLE","pseudo","","","" +"XADD r/m8, r8","XADDB r8, r/m8","xaddb r8, r/m8","0F C0 /r","V","V","486","","rw,rw","Y","8" +"XADD r/m8, r8","XADDB r8, r/m8","xaddb r8, r/m8","REX 0F C0 /r","N.E.","V","","pseudo64","rw,w","Y","8" +"XADD r/m32, r32","XADDL r32, r/m32","xaddl r32, r/m32","0F C1 /r","V","V","486","operand32","rw,rw","Y","32" +"XADD r/m64, r64","XADDQ r64, r/m64","xaddq r64, r/m64","REX.W 0F C1 /r","N.S.","V","486","","rw,rw","Y","64" +"XADD r/m16, r16","XADDW r16, r/m16","xaddw r16, r/m16","0F C1 /r","V","V","486","operand16","rw,rw","Y","16" +"XBEGIN rel16","XBEGIN rel16","xbegin rel16","C7 F8 cw","V","V","RTM","modrm_regonly,operand16","r","","" +"XBEGIN rel32","XBEGIN rel32","xbegin rel32","C7 F8 cd","V","V","RTM","modrm_regonly,operand32,operand64","r","","" +"XCHG r8, r/m8","XCHGB r/m8, r8","xchgb r/m8, r8","86 /r","V","V","","pseudo","w,r","Y","8" +"XCHG r8, r/m8","XCHGB r/m8, r8","xchgb r/m8, r8","REX 86 /r","N.E.","V","","pseudo","w,r","Y","8" +"XCHG r/m8, r8","XCHGB r8, r/m8","xchgb r8, r/m8","86 /r","V","V","","","rw,rw","Y","8" +"XCHG r/m8, r8","XCHGB r8, r/m8","xchgb r8, r/m8","REX 86 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"XCHG r32op, EAX","XCHGL EAX, r32op","xchgl EAX, r32op","90+rd","V","V","","operand32","rw,rw","Y","32" +"XCHG r32, r/m32","XCHGL r/m32, r32","xchgl r/m32, r32","87 /r","V","V","","operand32,pseudo","w,r","Y","32" +"XCHG r/m32, r32","XCHGL r32, r/m32","xchgl r32, r/m32","87 /r","V","V","","operand32","rw,rw","Y","32" +"XCHG EAX, r32op","XCHGL r32op, EAX","xchgl r32op, EAX","90+rd","V","V","","operand32,pseudo","rw,rw","Y","32" +"XCHG r64op, RAX","XCHGQ RAX, r64op","xchgq RAX, r64op","REX.W 90+ro","N.S.","V","","","rw,rw","Y","64" +"XCHG r64, r/m64","XCHGQ r/m64, r64","xchgq r/m64, r64","REX.W 87 /r","N.E.","V","","pseudo","w,r","Y","64" +"XCHG r/m64, r64","XCHGQ r64, r/m64","xchgq r64, r/m64","REX.W 87 /r","N.S.","V","","","rw,rw","Y","64" +"XCHG RAX, r64op","XCHGQ r64op, RAX","xchgq r64op, RAX","REX.W 90+rd","N.E.","V","","pseudo","rw,rw","Y","64" +"XCHG r16op, AX","XCHGW AX, r16op","xchgw AX, r16op","90+rw","V","V","","operand16","rw,rw","Y","16" +"XCHG r16, r/m16","XCHGW r/m16, r16","xchgw r/m16, r16","87 /r","V","V","","operand16,pseudo","w,r","Y","16" +"XCHG r/m16, r16","XCHGW r16, r/m16","xchgw r16, r/m16","87 /r","V","V","","operand16","rw,rw","Y","16" +"XCHG AX, r16op","XCHGW r16op, AX","xchgw r16op, AX","90+rw","V","V","","operand16,pseudo","rw,rw","Y","16" +"XEND","XEND","xend","0F 01 D5","V","V","RTM","","","","" +"XGETBV","XGETBV","xgetbv","0F 01 D0","V","V","XSAVE","","","","" +"XLATB","XLAT","xlat","D7","V","V","","","","","" +"XLATB","XLAT","xlat","REX.W D7","N.E.","V","","pseudo","","","" +"XOR r/m8, imm8","XORB imm8, r/m8","xorb imm8, r/m8","REX 80 /6 ib","N.E.","V","","pseudo64","rw,r","Y","8" +"XOR AL, imm8u","XORB imm8u, AL","xorb imm8u, AL","34 ib","V","V","","","rw,r","Y","8" +"XOR r/m8, imm8u","XORB imm8u, r/m8","xorb imm8u, r/m8","80 /6 ib","V","V","","","rw,r","Y","8" +"XOR r/m8, imm8u","XORB imm8u, r/m8","xorb imm8u, r/m8","82 /6 ib","V","N.S.","","","rw,r","Y","8" +"XOR r8, r/m8","XORB r/m8, r8","xorb r/m8, r8","32 /r","V","V","","","rw,r","Y","8" +"XOR r8, r/m8","XORB r/m8, r8","xorb r/m8, r8","REX 32 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"XOR r/m8, r8","XORB r8, r/m8","xorb r8, r/m8","30 /r","V","V","","","rw,r","Y","8" +"XOR r/m8, r8","XORB r8, r/m8","xorb r8, r/m8","REX 30 /r","N.E.","V","","pseudo64","rw,r","Y","8" +"XOR EAX, imm32","XORL imm32, EAX","xorl imm32, EAX","35 id","V","V","","operand32","rw,r","Y","32" +"XOR r/m32, imm32","XORL imm32, r/m32","xorl imm32, r/m32","81 /6 id","V","V","","operand32","rw,r","Y","32" +"XOR r/m32, imm8","XORL imm8, r/m32","xorl imm8, r/m32","83 /6 ib","V","V","","operand32","rw,r","Y","32" +"XOR r32, r/m32","XORL r/m32, r32","xorl r/m32, r32","33 /r","V","V","","operand32","rw,r","Y","32" +"XOR r/m32, r32","XORL r32, r/m32","xorl r32, r/m32","31 /r","V","V","","operand32","rw,r","Y","32" +"XORPD xmm1, xmm2/m128","XORPD xmm2/m128, xmm1","xorpd xmm2/m128, xmm1","66 0F 57 /r","V","V","SSE2","","rw,r","","" +"XORPS xmm1, xmm2/m128","XORPS xmm2/m128, xmm1","xorps xmm2/m128, xmm1","0F 57 /r","V","V","SSE","","rw,r","","" +"XOR RAX, imm32","XORQ imm32, RAX","xorq imm32, RAX","REX.W 35 id","N.S.","V","","","rw,r","Y","64" +"XOR r/m64, imm32","XORQ imm32, r/m64","xorq imm32, r/m64","REX.W 81 /6 id","N.S.","V","","","rw,r","Y","64" +"XOR r/m64, imm8","XORQ imm8, r/m64","xorq imm8, r/m64","REX.W 83 /6 ib","N.S.","V","","","rw,r","Y","64" +"XOR r64, r/m64","XORQ r/m64, r64","xorq r/m64, r64","REX.W 33 /r","N.S.","V","","","rw,r","Y","64" +"XOR r/m64, r64","XORQ r64, r/m64","xorq r64, r/m64","REX.W 31 /r","N.S.","V","","","rw,r","Y","64" +"XOR AX, imm16","XORW imm16, AX","xorw imm16, AX","35 iw","V","V","","operand16","rw,r","Y","16" +"XOR r/m16, imm16","XORW imm16, r/m16","xorw imm16, r/m16","81 /6 iw","V","V","","operand16","rw,r","Y","16" +"XOR r/m16, imm8","XORW imm8, r/m16","xorw imm8, r/m16","83 /6 ib","V","V","","operand16","rw,r","Y","16" +"XOR r16, r/m16","XORW r/m16, r16","xorw r/m16, r16","33 /r","V","V","","operand16","rw,r","Y","16" +"XOR r/m16, r16","XORW r16, r/m16","xorw r16, r/m16","31 /r","V","V","","operand16","rw,r","Y","16" +"XRELEASE","XRELEASE","xrelease","F3","V","V","HLE","pseudo","","","" +"XRSTOR mem","XRSTOR mem","xrstor mem","0F AE /5","V","V","XSAVE","modrm_memonly,operand16,operand32","r","","" +"XRSTOR64 mem","XRSTOR64 mem","xrstor64 mem","REX.W 0F AE /5","N.S.","V","XSAVE","modrm_memonly","r","","" +"XRSTORS mem","XRSTORS mem","xrstors mem","0F C7 /3","V","V","XSAVES","modrm_memonly,operand16,operand32","r","","" +"XRSTORS64 mem","XRSTORS64 mem","xrstors64 mem","REX.W 0F C7 /3","N.S.","V","XSAVES","modrm_memonly","r","","" +"XSAVE mem","XSAVE mem","xsave mem","0F AE /4","V","V","XSAVE","modrm_memonly,operand16,operand32","w","","" +"XSAVE64 mem","XSAVE64 mem","xsave64 mem","REX.W 0F AE /4","N.S.","V","XSAVE","modrm_memonly","w","","" +"XSAVEC mem","XSAVEC mem","xsavec mem","0F C7 /4","V","V","XSAVEC","modrm_memonly,operand16,operand32","w","","" +"XSAVEC64 mem","XSAVEC64 mem","xsavec64 mem","REX.W 0F C7 /4","N.S.","V","XSAVEC","modrm_memonly","w","","" +"XSAVEOPT mem","XSAVEOPT mem","xsaveopt mem","0F AE /6","V","V","XSAVEOPT","modrm_memonly,operand16,operand32","w","","" +"XSAVEOPT64 mem","XSAVEOPT64 mem","xsaveopt64 mem","REX.W 0F AE /6","N.S.","V","XSAVEOPT","modrm_memonly","w","","" +"XSAVES mem","XSAVES mem","xsaves mem","0F C7 /5","V","V","XSAVES","modrm_memonly,operand16,operand32","w","","" +"XSAVES64 mem","XSAVES64 mem","xsaves64 mem","REX.W 0F C7 /5","N.S.","V","XSAVES","modrm_memonly","w","","" +"XSETBV","XSETBV","xsetbv","0F 01 D1","V","V","XSAVE","","","","" +"XTEST","XTEST","xtest","0F 01 D6","V","V","HLE or RTM","","","","" diff --git a/tests/tcg/loongarch64/Makefile.softmmu-target b/tests/tcg/loongarch64/Makefile.softmmu-target new file mode 100644 index 0000000000..908f3a8c0f --- /dev/null +++ b/tests/tcg/loongarch64/Makefile.softmmu-target @@ -0,0 +1,33 @@ +# +# Loongarch64 system tests +# + +LOONGARCH64_SYSTEM_SRC=$(SRC_PATH)/tests/tcg/loongarch64/system +VPATH+=$(LOONGARCH64_SYSTEM_SRC) + +# These objects provide the basic boot code and helper functions for all tests +CRT_OBJS=boot.o + +LOONGARCH64_TEST_SRCS=$(wildcard $(LOONGARCH64_SYSTEM_SRC)/*.c) +LOONGARCH64_TESTS = $(patsubst $(LOONGARCH64_SYSTEM_SRC)/%.c, %, $(LOONGARCH64_TEST_SRCS)) + +CRT_PATH=$(LOONGARCH64_SYSTEM_SRC) +LINK_SCRIPT=$(LOONGARCH64_SYSTEM_SRC)/kernel.ld +LDFLAGS=-Wl,-T$(LINK_SCRIPT) +TESTS+=$(LOONGARCH64_TESTS) $(MULTIARCH_TESTS) +CFLAGS+=-nostdlib -g -O1 -march=loongarch64 -mabi=lp64d $(MINILIB_INC) +LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc + +# building head blobs +.PRECIOUS: $(CRT_OBJS) + +%.o: $(CRT_PATH)/%.S + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@ + +# Build and link the tests +%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS) + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +memory: CFLAGS+=-DCHECK_UNALIGNED=0 +# Running +QEMU_OPTS+=-serial chardev:output -kernel diff --git a/tests/tcg/loongarch64/Makefile.target b/tests/tcg/loongarch64/Makefile.target new file mode 100644 index 0000000000..00030a1026 --- /dev/null +++ b/tests/tcg/loongarch64/Makefile.target @@ -0,0 +1,20 @@ +# -*- Mode: makefile -*- +# +# LoongArch64 specific tweaks + +# Loongarch64 doesn't support gdb, so skip the EXTRA_RUNS +EXTRA_RUNS = + +LOONGARCH64_SRC=$(SRC_PATH)/tests/tcg/loongarch64 +VPATH += $(LOONGARCH64_SRC) + +LDFLAGS+=-lm + +LOONGARCH64_TESTS = test_bit +LOONGARCH64_TESTS += test_div +LOONGARCH64_TESTS += test_fclass +LOONGARCH64_TESTS += test_fpcom +LOONGARCH64_TESTS += test_pcadd +LOONGARCH64_TESTS += test_fcsr + +TESTS += $(LOONGARCH64_TESTS) diff --git a/tests/tcg/loongarch64/float_convd.ref b/tests/tcg/loongarch64/float_convd.ref new file mode 100644 index 0000000000..08d3dfa2fe --- /dev/null +++ b/tests/tcg/loongarch64/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/loongarch64/float_convs.ref b/tests/tcg/loongarch64/float_convs.ref new file mode 100644 index 0000000000..66c7679dec --- /dev/null +++ b/tests/tcg/loongarch64/float_convs.ref @@ -0,0 +1,748 @@ +### Rounding to nearest +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/loongarch64/float_madds.ref b/tests/tcg/loongarch64/float_madds.ref new file mode 100644 index 0000000000..21c0539887 --- /dev/null +++ b/tests/tcg/loongarch64/float_madds.ref @@ -0,0 +1,768 @@ +### Rounding to nearest +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding upwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe800000000000000p-25:0x337ffff4) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe800000000000000p-50:0x26fffff4) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000200000000000000p-25:0x33000001) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00080000000000000000p-25:0x33000400) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f400000000000000p-24:0x338000fa) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000e00000000000000p-14:0x38800007) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf600000000000000p-24:0x3387fdfb) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000200000000000000p+0:0x3f800001) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d400000000000000p+2:0x409711ea) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458200000000000000p+3:0x4128a2c1) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0600000000000000p+3:0x41100603) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1600000000000000p+15:0x477fe78b) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56200000000000000p+17:0x482de2b1) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0a00000000000000p+31:0x4f7fbf05) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+31:0x4f7fc005) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+32:0x4fffc005) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8a00000000000000p+33:0x507fbfc5) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800a00000000000000p+33:0x507fc005) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab800000000000000p+99:0x71605d5c) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c082a000000000000000p+116:0x79e04150) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-148:0x00000002) flags=UNDERFLOW INEXACT (32/0) +### Rounding downwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x1.00000000000000000000p-149:0x80000001) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding to zero +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) diff --git a/tests/tcg/loongarch64/system/boot.S b/tests/tcg/loongarch64/system/boot.S new file mode 100644 index 0000000000..67eb1c04ce --- /dev/null +++ b/tests/tcg/loongarch64/system/boot.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Minimal LoongArch system boot code. + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#include "regdef.h" + + .global _start + .align 16 +_start: + la.local t0, stack_end + move sp, t0 + bl main + + .type _start 2 + .size _start, .-_start + + .global _exit + .align 16 +_exit: +2: /* QEMU ACPI poweroff */ + li.w t0, 0xff + li.w t1, 0x10080010 + st.w t0, t1, 0 + idle 0 + bl 2b + + .type _exit 2 + .size _exit, .-_exit + + .global __sys_outc +__sys_outc: + li.d t1, 1000000 +loop: + lu12i.w t2, 0x1fe00 + ori t0, t2, 0x1e5 + ld.bu t0, t0, 0 + andi t0, t0, 0x20 + ext.w.b t0, t0 + bnez t0, in + addi.w t1, t1, -1 + bnez t1, loop +in: + ext.w.b a0, a0 + lu12i.w t0, 0x1fe00 + ori t0, t0, 0x1e0 + st.b a0, t0, 0 + jirl $r0, ra, 0 + + .data + .align 4 +stack: + .space 65536 +stack_end: diff --git a/tests/tcg/loongarch64/system/kernel.ld b/tests/tcg/loongarch64/system/kernel.ld new file mode 100644 index 0000000000..f1a7c0168c --- /dev/null +++ b/tests/tcg/loongarch64/system/kernel.ld @@ -0,0 +1,30 @@ +ENTRY(_start) + +SECTIONS +{ + /* Linux kernel legacy start address. */ + . = 0x9000000000200000; + _text = .; + .text : { + *(.text) + } + .rodata : { + *(.rodata) + } + _etext = .; + + . = ALIGN(8192); + _data = .; + .got : { + *(.got) + } + .data : { + *(.sdata) + *(.data) + } + _edata = .; + .bss : { + *(.bss) + } + _end = .; +} diff --git a/tests/tcg/loongarch64/system/regdef.h b/tests/tcg/loongarch64/system/regdef.h new file mode 100644 index 0000000000..faa09b2377 --- /dev/null +++ b/tests/tcg/loongarch64/system/regdef.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ +#ifndef _ASM_REGDEF_H +#define _ASM_REGDEF_H + +#define zero $r0 /* wired zero */ +#define ra $r1 /* return address */ +#define tp $r2 +#define sp $r3 /* stack pointer */ +#define v0 $r4 /* return value - caller saved */ +#define v1 $r5 +#define a0 $r4 /* argument registers */ +#define a1 $r5 +#define a2 $r6 +#define a3 $r7 +#define a4 $r8 +#define a5 $r9 +#define a6 $r10 +#define a7 $r11 +#define t0 $r12 /* caller saved */ +#define t1 $r13 +#define t2 $r14 +#define t3 $r15 +#define t4 $r16 +#define t5 $r17 +#define t6 $r18 +#define t7 $r19 +#define t8 $r20 + /* $r21: Temporarily reserved */ +#define fp $r22 /* frame pointer */ +#define s0 $r23 /* callee saved */ +#define s1 $r24 +#define s2 $r25 +#define s3 $r26 +#define s4 $r27 +#define s5 $r28 +#define s6 $r29 +#define s7 $r30 +#define s8 $r31 + +#define gr0 $r0 +#define gr1 $r1 +#define gr2 $r2 +#define gr3 $r3 +#define gr4 $r4 +#define gr5 $r5 +#define gr6 $r6 +#define gr7 $r7 +#define gr8 $r8 +#define gr9 $r9 +#define gr10 $r10 +#define gr11 $r11 +#define gr12 $r12 +#define gr13 $r13 +#define gr14 $r14 +#define gr15 $r15 +#define gr16 $r16 +#define gr17 $r17 +#define gr18 $r18 +#define gr19 $r19 +#define gr20 $r20 +#define gr21 $r21 +#define gr22 $r22 +#define gr23 $r23 +#define gr24 $r24 +#define gr25 $r25 +#define gr26 $r26 +#define gr27 $r27 +#define gr28 $r28 +#define gr29 $r29 +#define gr30 $r30 +#define gr31 $r31 + +#define STT_NOTYPE 0 +#define STT_OBJECT 1 +#define STT_FUNC 2 +#define STT_SECTION 3 +#define STT_FILE 4 +#define STT_COMMON 5 +#define STT_TLS 6 + +#define ASM_NL ; + +#endif /* _ASM_REGDEF_H */ diff --git a/tests/tcg/loongarch64/test_bit.c b/tests/tcg/loongarch64/test_bit.c new file mode 100644 index 0000000000..a6d9904909 --- /dev/null +++ b/tests/tcg/loongarch64/test_bit.c @@ -0,0 +1,88 @@ +#include +#include + +#define ARRAY_SIZE(X) (sizeof(X) / sizeof(*(X))) +#define TEST_CLO(N) \ +static uint64_t test_clo_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("clo."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CLZ(N) \ +static uint64_t test_clz_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("clz."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CTO(N) \ +static uint64_t test_cto_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("cto."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CTZ(N) \ +static uint64_t test_ctz_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("ctz."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +TEST_CLO(w) +TEST_CLO(d) +TEST_CLZ(w) +TEST_CLZ(d) +TEST_CTO(w) +TEST_CTO(d) +TEST_CTZ(w) +TEST_CTZ(d) + +struct vector { + uint64_t (*func)(uint64_t); + uint64_t u; + uint64_t r; +}; + +static struct vector vectors[] = { + {test_clo_w, 0xfff11fff392476ab, 0}, + {test_clo_d, 0xabd28a64000000, 0}, + {test_clz_w, 0xfaffff42392476ab, 2}, + {test_clz_d, 0xabd28a64000000, 8}, + {test_cto_w, 0xfff11fff392476ab, 2}, + {test_cto_d, 0xabd28a64000000, 0}, + {test_ctz_w, 0xfaffff42392476ab, 0}, + {test_ctz_d, 0xabd28a64000000, 26}, +}; + +int main() +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vectors); i++) { + assert((*vectors[i].func)(vectors[i].u) == vectors[i].r); + } + + return 0; +} diff --git a/tests/tcg/loongarch64/test_div.c b/tests/tcg/loongarch64/test_div.c new file mode 100644 index 0000000000..6c31fe97ae --- /dev/null +++ b/tests/tcg/loongarch64/test_div.c @@ -0,0 +1,54 @@ +#include +#include +#include + +#define TEST_DIV(N, M) \ +static void test_div_ ##N(uint ## M ## _t rj, \ + uint ## M ## _t rk, \ + uint64_t rm) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("div."#N" %0,%1,%2\n\t" \ + : "=r"(rd) \ + : "r"(rj), "r"(rk) \ + : ); \ + assert(rd == rm); \ +} + +#define TEST_MOD(N, M) \ +static void test_mod_ ##N(uint ## M ## _t rj, \ + uint ## M ## _t rk, \ + uint64_t rm) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("mod."#N" %0,%1,%2\n\t" \ + : "=r"(rd) \ + : "r"(rj), "r"(rk) \ + : ); \ + assert(rd == rm); \ +} + +TEST_DIV(w, 32) +TEST_DIV(wu, 32) +TEST_DIV(d, 64) +TEST_DIV(du, 64) +TEST_MOD(w, 32) +TEST_MOD(wu, 32) +TEST_MOD(d, 64) +TEST_MOD(du, 64) + +int main(void) +{ + test_div_w(0xffaced97, 0xc36abcde, 0x0); + test_div_wu(0xffaced97, 0xc36abcde, 0x1); + test_div_d(0xffaced973582005f, 0xef56832a358b, 0xffffffffffffffa8); + test_div_du(0xffaced973582005f, 0xef56832a358b, 0x11179); + test_mod_w(0x7cf18c32, 0xa04da650, 0x1d3f3282); + test_mod_wu(0x7cf18c32, 0xc04da650, 0x7cf18c32); + test_mod_d(0x7cf18c3200000000, 0xa04da65000000000, 0x1d3f328200000000); + test_mod_du(0x7cf18c3200000000, 0xc04da65000000000, 0x7cf18c3200000000); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_fclass.c b/tests/tcg/loongarch64/test_fclass.c new file mode 100644 index 0000000000..7ba1d2c151 --- /dev/null +++ b/tests/tcg/loongarch64/test_fclass.c @@ -0,0 +1,130 @@ +#include + +/* float class */ +#define FLOAT_CLASS_SIGNALING_NAN 0x001 +#define FLOAT_CLASS_QUIET_NAN 0x002 +#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 +#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 +#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 +#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 +#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 +#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 +#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 +#define FLOAT_CLASS_POSITIVE_ZERO 0x200 + +#define TEST_FCLASS(N) \ +void test_fclass_##N(long s) \ +{ \ + double fd; \ + long rd; \ + \ + asm volatile("fclass."#N" %0, %2\n\t" \ + "movfr2gr."#N" %1, %2\n\t" \ + : "=f"(fd), "=r"(rd) \ + : "f"(s) \ + : ); \ + switch (rd) { \ + case FLOAT_CLASS_SIGNALING_NAN: \ + case FLOAT_CLASS_QUIET_NAN: \ + case FLOAT_CLASS_NEGATIVE_INFINITY: \ + case FLOAT_CLASS_NEGATIVE_NORMAL: \ + case FLOAT_CLASS_NEGATIVE_SUBNORMAL: \ + case FLOAT_CLASS_NEGATIVE_ZERO: \ + case FLOAT_CLASS_POSITIVE_INFINITY: \ + case FLOAT_CLASS_POSITIVE_NORMAL: \ + case FLOAT_CLASS_POSITIVE_SUBNORMAL: \ + case FLOAT_CLASS_POSITIVE_ZERO: \ + break; \ + default: \ + printf("fclass."#N" test failed.\n"); \ + break; \ + } \ +} + +/* + * float format + * type | S | Exponent | Fraction | example value + * 31 | 30 --23 | 22 | 21 --0 | + * | bit | + * SNAN 0/1 | 0xFF | 0 | !=0 | 0x7FBFFFFF + * QNAN 0/1 | 0xFF | 1 | | 0x7FCFFFFF + * -infinity 1 | 0xFF | 0 | 0xFF800000 + * -normal 1 | [1, 0xFE] | [0, 0x7FFFFF]| 0xFF7FFFFF + * -subnormal 1 | 0 | !=0 | 0x807FFFFF + * -0 1 | 0 | 0 | 0x80000000 + * +infinity 0 | 0xFF | 0 | 0x7F800000 + * +normal 0 | [1, 0xFE] | [0, 0x7FFFFF]| 0x7F7FFFFF + * +subnormal 0 | 0 | !=0 | 0x007FFFFF + * +0 0 | 0 | 0 | 0x00000000 + */ + +long float_snan = 0x7FBFFFFF; +long float_qnan = 0x7FCFFFFF; +long float_neg_infinity = 0xFF800000; +long float_neg_normal = 0xFF7FFFFF; +long float_neg_subnormal = 0x807FFFFF; +long float_neg_zero = 0x80000000; +long float_post_infinity = 0x7F800000; +long float_post_normal = 0x7F7FFFFF; +long float_post_subnormal = 0x007FFFFF; +long float_post_zero = 0x00000000; + +/* + * double format + * type | S | Exponent | Fraction | example value + * 63 | 62 -- 52 | 51 | 50 -- 0 | + * | bit | + * SNAN 0/1 | 0x7FF | 0 | !=0 | 0x7FF7FFFFFFFFFFFF + * QNAN 0/1 | 0x7FF | 1 | | 0x7FFFFFFFFFFFFFFF + * -infinity 1 | 0x7FF | 0 | 0xFFF0000000000000 + * -normal 1 |[1, 0x7FE] | | 0xFFEFFFFFFFFFFFFF + * -subnormal 1 | 0 | !=0 | 0x8007FFFFFFFFFFFF + * -0 1 | 0 | 0 | 0x8000000000000000 + * +infinity 0 | 0x7FF | 0 | 0x7FF0000000000000 + * +normal 0 |[1, 0x7FE] | | 0x7FEFFFFFFFFFFFFF + * +subnormal 0 | 0 | !=0 | 0x000FFFFFFFFFFFFF + * +0 0 | 0 | 0 | 0x0000000000000000 + */ + +long double_snan = 0x7FF7FFFFFFFFFFFF; +long double_qnan = 0x7FFFFFFFFFFFFFFF; +long double_neg_infinity = 0xFFF0000000000000; +long double_neg_normal = 0xFFEFFFFFFFFFFFFF; +long double_neg_subnormal = 0x8007FFFFFFFFFFFF; +long double_neg_zero = 0x8000000000000000; +long double_post_infinity = 0x7FF0000000000000; +long double_post_normal = 0x7FEFFFFFFFFFFFFF; +long double_post_subnormal = 0x000FFFFFFFFFFFFF; +long double_post_zero = 0x0000000000000000; + +TEST_FCLASS(s) +TEST_FCLASS(d) + +int main() +{ + /* fclass.s */ + test_fclass_s(float_snan); + test_fclass_s(float_qnan); + test_fclass_s(float_neg_infinity); + test_fclass_s(float_neg_normal); + test_fclass_s(float_neg_subnormal); + test_fclass_s(float_neg_zero); + test_fclass_s(float_post_infinity); + test_fclass_s(float_post_normal); + test_fclass_s(float_post_subnormal); + test_fclass_s(float_post_zero); + + /* fclass.d */ + test_fclass_d(double_snan); + test_fclass_d(double_qnan); + test_fclass_d(double_neg_infinity); + test_fclass_d(double_neg_normal); + test_fclass_d(double_neg_subnormal); + test_fclass_d(double_neg_zero); + test_fclass_d(double_post_infinity); + test_fclass_d(double_post_normal); + test_fclass_d(double_post_subnormal); + test_fclass_d(double_post_zero); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_fcsr.c b/tests/tcg/loongarch64/test_fcsr.c new file mode 100644 index 0000000000..ad3609eb99 --- /dev/null +++ b/tests/tcg/loongarch64/test_fcsr.c @@ -0,0 +1,15 @@ +#include + +int main() +{ + unsigned fcsr; + + asm("movgr2fcsr $r0,$r0\n\t" + "movgr2fr.d $f0,$r0\n\t" + "fdiv.d $f0,$f0,$f0\n\t" + "movfcsr2gr %0,$r0" + : "=r"(fcsr) : : "f0"); + + assert(fcsr & (16 << 16)); /* Invalid */ + return 0; +} diff --git a/tests/tcg/loongarch64/test_fpcom.c b/tests/tcg/loongarch64/test_fpcom.c new file mode 100644 index 0000000000..9e81f767f9 --- /dev/null +++ b/tests/tcg/loongarch64/test_fpcom.c @@ -0,0 +1,37 @@ +#include + +#define TEST_COMP(N) \ +void test_##N(float fj, float fk) \ +{ \ + int rd = 0; \ + \ + asm volatile("fcmp."#N".s $fcc6,%1,%2\n" \ + "movcf2gr %0, $fcc6\n" \ + : "=r"(rd) \ + : "f"(fj), "f"(fk) \ + : ); \ + assert(rd == 1); \ +} + +TEST_COMP(ceq) +TEST_COMP(clt) +TEST_COMP(cle) +TEST_COMP(cne) +TEST_COMP(seq) +TEST_COMP(slt) +TEST_COMP(sle) +TEST_COMP(sne) + +int main() +{ + test_ceq(0xff700102, 0xff700102); + test_clt(0x00730007, 0xff730007); + test_cle(0xff70130a, 0xff70130b); + test_cne(0x1238acde, 0xff71111f); + test_seq(0xff766618, 0xff766619); + test_slt(0xff78881c, 0xff78901d); + test_sle(0xff780b22, 0xff790b22); + test_sne(0xff7bcd25, 0xff7a26cf); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_pcadd.c b/tests/tcg/loongarch64/test_pcadd.c new file mode 100644 index 0000000000..da2a64db82 --- /dev/null +++ b/tests/tcg/loongarch64/test_pcadd.c @@ -0,0 +1,38 @@ +#include +#include +#include + +#define TEST_PCADDU(N) \ +void test_##N(int a) \ +{ \ + uint64_t rd1 = 0; \ + uint64_t rd2 = 0; \ + uint64_t rm, rn; \ + \ + asm volatile(""#N" %0, 0x104\n\t" \ + ""#N" %1, 0x12345\n\t" \ + : "=r"(rd1), "=r"(rd2) \ + : ); \ + rm = rd2 - rd1; \ + if (!strcmp(#N, "pcalau12i")) { \ + rn = ((0x12345UL - 0x104) << a) & ~0xfff; \ + } else { \ + rn = ((0x12345UL - 0x104) << a) + 4; \ + } \ + assert(rm == rn); \ +} + +TEST_PCADDU(pcaddi) +TEST_PCADDU(pcaddu12i) +TEST_PCADDU(pcaddu18i) +TEST_PCADDU(pcalau12i) + +int main() +{ + test_pcaddi(2); + test_pcaddu12i(12); + test_pcaddu18i(18); + test_pcalau12i(12); + + return 0; +} diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target index 62f109eef4..1163c7ef03 100644 --- a/tests/tcg/m68k/Makefile.target +++ b/tests/tcg/m68k/Makefile.target @@ -3,5 +3,8 @@ # m68k specific tweaks - specifically masking out broken tests # +VPATH += $(SRC_PATH)/tests/tcg/m68k +TESTS += trap + # On m68k Linux supports 4k and 8k pages (but 8k is currently broken) EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192 diff --git a/tests/tcg/m68k/trap.c b/tests/tcg/m68k/trap.c new file mode 100644 index 0000000000..96cac18d4d --- /dev/null +++ b/tests/tcg/m68k/trap.c @@ -0,0 +1,129 @@ +/* + * Test m68k trap addresses. + */ + +#define _GNU_SOURCE 1 +#include +#include +#include + +static int expect_sig; +static int expect_si_code; +static void *expect_si_addr; +static greg_t expect_mc_pc; +static volatile int got_signal; + +static void sig_handler(int sig, siginfo_t *si, void *puc) +{ + ucontext_t *uc = puc; + mcontext_t *mc = &uc->uc_mcontext; + + assert(sig == expect_sig); + assert(si->si_code == expect_si_code); + assert(si->si_addr == expect_si_addr); + assert(mc->gregs[R_PC] == expect_mc_pc); + + got_signal = 1; +} + +#define FMT_INS [ad] "a"(&expect_si_addr), [pc] "a"(&expect_mc_pc) +#define FMT0_STR(S) \ + "move.l #1f, (%[ad])\n\tmove.l #1f, (%[pc])\n" S "\n1:\n" +#define FMT2_STR(S) \ + "move.l #0f, (%[ad])\n\tmove.l #1f, (%[pc])\n" S "\n1:\n" + +#define CHECK_SIG do { assert(got_signal); got_signal = 0; } while (0) + +int main(int argc, char **argv) +{ + struct sigaction act = { + .sa_sigaction = sig_handler, + .sa_flags = SA_SIGINFO + }; + int t0, t1; + + sigaction(SIGILL, &act, NULL); + sigaction(SIGTRAP, &act, NULL); + sigaction(SIGFPE, &act, NULL); + + expect_sig = SIGFPE; + expect_si_code = FPE_INTOVF; + asm volatile(FMT2_STR("0:\tchk %0, %1") : : "d"(0), "d"(-1), FMT_INS); + CHECK_SIG; + +#if 0 + /* FIXME: chk2 not correctly translated. */ + int bounds[2] = { 0, 1 }; + asm volatile(FMT2_STR("0:\tchk2.l %0, %1") + : : "m"(bounds), "d"(2), FMT_INS); + CHECK_SIG; +#endif + + asm volatile(FMT2_STR("cmp.l %0, %1\n0:\ttrapv") + : : "d"(INT_MIN), "d"(1), FMT_INS); + CHECK_SIG; + + asm volatile(FMT2_STR("cmp.l %0, %0\n0:\ttrapeq") + : : "d"(0), FMT_INS); + CHECK_SIG; + + asm volatile(FMT2_STR("cmp.l %0, %0\n0:\ttrapeq.w #0x1234") + : : "d"(0), FMT_INS); + CHECK_SIG; + + asm volatile(FMT2_STR("cmp.l %0, %0\n0:\ttrapeq.l #0x12345678") + : : "d"(0), FMT_INS); + CHECK_SIG; + + asm volatile(FMT2_STR("fcmp.x %0, %0\n0:\tftrapeq") + : : "f"(0.0L), FMT_INS); + CHECK_SIG; + + expect_si_code = FPE_INTDIV; + + asm volatile(FMT2_STR("0:\tdivs.w %1, %0") + : "=d"(t0) : "d"(0), "0"(1), FMT_INS); + CHECK_SIG; + + asm volatile(FMT2_STR("0:\tdivsl.l %2, %1:%0") + : "=d"(t0), "=d"(t1) : "d"(0), "0"(1), FMT_INS); + CHECK_SIG; + + expect_sig = SIGILL; + expect_si_code = ILL_ILLTRP; + asm volatile(FMT0_STR("trap #1") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #2") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #3") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #4") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #5") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #6") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #7") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #8") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #9") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #10") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #11") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #12") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #13") : : FMT_INS); + CHECK_SIG; + asm volatile(FMT0_STR("trap #14") : : FMT_INS); + CHECK_SIG; + + expect_sig = SIGTRAP; + expect_si_code = TRAP_BRKPT; + asm volatile(FMT0_STR("trap #15") : : FMT_INS); + CHECK_SIG; + + return 0; +} diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index 85a6fb7a2e..5f0fee1aad 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -8,9 +8,13 @@ MULTIARCH_SRC=$(SRC_PATH)/tests/tcg/multiarch # Set search path for all sources -VPATH += $(MULTIARCH_SRC) -MULTIARCH_SRCS =$(notdir $(wildcard $(MULTIARCH_SRC)/*.c)) -MULTIARCH_TESTS =$(filter-out float_helpers, $(MULTIARCH_SRCS:.c=)) +VPATH += $(MULTIARCH_SRC) +MULTIARCH_SRCS = $(notdir $(wildcard $(MULTIARCH_SRC)/*.c)) +ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET)) +VPATH += $(MULTIARCH_SRC)/linux +MULTIARCH_SRCS += $(notdir $(wildcard $(MULTIARCH_SRC)/linux/*.c)) +endif +MULTIARCH_TESTS = $(MULTIARCH_SRCS:.c=) # # The following are any additional rules needed to build things @@ -18,11 +22,11 @@ MULTIARCH_TESTS =$(filter-out float_helpers, $(MULTIARCH_SRCS:.c=)) float_%: LDFLAGS+=-lm -float_%: float_%.c float_helpers.c - $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< $(MULTIARCH_SRC)/float_helpers.c -o $@ $(LDFLAGS) +float_%: float_%.c libs/float_helpers.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< $(MULTIARCH_SRC)/libs/float_helpers.c -o $@ $(LDFLAGS) run-float_%: float_% - $(call run-test,$<, $(QEMU) $(QEMU_OPTS) $<,"$< on $(TARGET_NAME)") + $(call run-test,$<, $(QEMU) $(QEMU_OPTS) $<) $(call conditional-diff-out,$<,$(SRC_PATH)/tests/tcg/$(TARGET_NAME)/$<.ref) @@ -32,13 +36,8 @@ threadcount: LDFLAGS+=-lpthread signals: LDFLAGS+=-lrt -lpthread -# This triggers failures on s390x hosts about 4% of the time -# This triggers failures for hppa-linux about 1% of the time -run-signals: signals - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") - -run-plugin-signals-with-%: - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") +munmap-pthread: CFLAGS+=-pthread +munmap-pthread: LDFLAGS+=-pthread # We define the runner for test-mmap after the individual # architectures have defined their supported pages sizes. If no @@ -46,13 +45,11 @@ run-plugin-signals-with-%: # default case (host page size) run-test-mmap: test-mmap - $(call run-test, test-mmap, $(QEMU) $<, \ - "$< (default) on $(TARGET_NAME)") + $(call run-test, test-mmap, $(QEMU) $<, $< (default)) # additional page sizes (defined by each architecture adding to EXTRA_RUNS) run-test-mmap-%: test-mmap - $(call run-test, test-mmap-$*, $(QEMU) -p $* $<,\ - "$< ($* byte pages) on $(TARGET_NAME)") + $(call run-test, test-mmap-$*, $(QEMU) -p $* $<, $< ($* byte pages)) ifneq ($(HAVE_GDB_BIN),) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py @@ -62,22 +59,28 @@ run-gdbstub-sha1: sha1 --gdb $(HAVE_GDB_BIN) \ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ --bin $< --test $(MULTIARCH_SRC)/gdbstub/sha1.py, \ - "basic gdbstub support") - -EXTRA_RUNS += run-gdbstub-sha1 + basic gdbstub support) run-gdbstub-qxfer-auxv-read: sha1 $(call run-test, $@, $(GDB_SCRIPT) \ --gdb $(HAVE_GDB_BIN) \ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-auxv-read.py, \ - "basic gdbstub qXfer:auxv:read support") + basic gdbstub qXfer:auxv:read support) + +run-gdbstub-thread-breakpoint: testthread + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-thread-breakpoint.py, \ + hitting a breakpoint on non-main thread) else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb") endif -EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read +EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \ + run-gdbstub-thread-breakpoint # ARM Compatible Semi Hosting Tests # @@ -92,13 +95,13 @@ VPATH += $(MULTIARCH_SRC)/arm-compat-semi semihosting: CFLAGS+=-I$(SRC_PATH)/tests/tcg/$(TARGET_NAME) run-semihosting: semihosting - $(call run-test,$<,$(QEMU) $< 2> $<.err, "$< on $(TARGET_NAME)") + $(call run-test,$<,$(QEMU) $< 2> $<.err) run-plugin-semihosting-with-%: $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \ $(call strip-plugin,$<) 2> $<.err, \ - "$< on $(TARGET_NAME) with $*") + $< with $*) semiconsole: CFLAGS+=-I$(SRC_PATH)/tests/tcg/$(TARGET_NAME) diff --git a/tests/tcg/multiarch/float_convd.c b/tests/tcg/multiarch/float_convd.c new file mode 100644 index 0000000000..0a1f0f93dc --- /dev/null +++ b/tests/tcg/multiarch/float_convd.c @@ -0,0 +1,106 @@ +/* + * Floating Point Convert Doubles to Various + * + * Copyright (c) 2019 Linaro + * + * SPDX-License-Identifier: GPL-3.0-or-later + */ + +#include +#include +#include +#include +#include + + +#include "float_helpers.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +typedef struct { + int flag; + char *desc; +} float_mapping; + +float_mapping round_flags[] = { + { FE_TONEAREST, "to nearest" }, +#ifdef FE_UPWARD + { FE_UPWARD, "upwards" }, +#endif +#ifdef FE_DOWNWARD + { FE_DOWNWARD, "downwards" }, +#endif +#ifdef FE_TOWARDZERO + { FE_TOWARDZERO, "to zero" } +#endif +}; + +static void print_input(double input) +{ + char *in_fmt = fmt_f64(input); + printf("from double: %s\n", in_fmt); + free(in_fmt); +} + +static void convert_double_to_single(double input) +{ + float output; + char *out_fmt, *flag_fmt; + + feclearexcept(FE_ALL_EXCEPT); + + output = input; + + flag_fmt = fmt_flags(); + out_fmt = fmt_f32(output); + printf(" to single: %s (%s)\n", out_fmt, flag_fmt); + free(out_fmt); + free(flag_fmt); +} + +#define xstr(a) str(a) +#define str(a) #a + +#define CONVERT_DOUBLE_TO_INT(TYPE, FMT) \ + static void convert_double_to_ ## TYPE(double input) \ + { \ + TYPE ## _t output; \ + char *flag_fmt; \ + const char to[] = "to " xstr(TYPE); \ + feclearexcept(FE_ALL_EXCEPT); \ + output = input; \ + flag_fmt = fmt_flags(); \ + printf("%11s: %" FMT " (%s)\n", to, output, flag_fmt); \ + free(flag_fmt); \ + } + +CONVERT_DOUBLE_TO_INT( int32, PRId32) +CONVERT_DOUBLE_TO_INT(uint32, PRId32) +CONVERT_DOUBLE_TO_INT( int64, PRId64) +CONVERT_DOUBLE_TO_INT(uint64, PRId64) + +int main(int argc, char *argv[argc]) +{ + int i, j, nums; + + nums = get_num_f64(); + + for (i = 0; i < ARRAY_SIZE(round_flags); ++i) { + if (fesetround(round_flags[i].flag) != 0) { + printf("### Rounding %s skipped\n", round_flags[i].desc); + continue; + } + printf("### Rounding %s\n", round_flags[i].desc); + for (j = 0; j < nums; j++) { + double input = get_f64(j); + print_input(input); + convert_double_to_single(input); + convert_double_to_int32(input); + convert_double_to_int64(input); + convert_double_to_uint32(input); + convert_double_to_uint64(input); + } + } + + return 0; +} diff --git a/tests/tcg/multiarch/float_convs.c b/tests/tcg/multiarch/float_convs.c index e9be75c2d5..2e4fa55324 100644 --- a/tests/tcg/multiarch/float_convs.c +++ b/tests/tcg/multiarch/float_convs.c @@ -51,8 +51,8 @@ static void convert_single_to_double(float input) output = input; - out_fmt = fmt_f64(output); flag_fmt = fmt_flags(); + out_fmt = fmt_f64(output); printf(" to double: %s (%s)\n", out_fmt, flag_fmt); free(out_fmt); free(flag_fmt); diff --git a/tests/tcg/multiarch/float_madds.c b/tests/tcg/multiarch/float_madds.c index e422608ccd..4888f8641f 100644 --- a/tests/tcg/multiarch/float_madds.c +++ b/tests/tcg/multiarch/float_madds.c @@ -54,8 +54,8 @@ static void print_result(float r, int j, int k) { char *r_fmt, *flag_fmt; - r_fmt = fmt_f32(r); flag_fmt = fmt_flags(); + r_fmt = fmt_f32(r); printf("res: %s flags=%s (%d/%d)\n", r_fmt, flag_fmt, j, k); diff --git a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py new file mode 100644 index 0000000000..798d508bc7 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py @@ -0,0 +1,60 @@ +from __future__ import print_function +# +# Test auxiliary vector is loaded via gdbstub +# +# This is launched via tests/guest-debug/run-test.py +# + +import gdb +import sys + +failcount = 0 + +def report(cond, msg): + "Report success/fail of test" + if cond: + print ("PASS: %s" % (msg)) + else: + print ("FAIL: %s" % (msg)) + global failcount + failcount += 1 + +def run_test(): + "Run through the tests one by one" + + sym, ok = gdb.lookup_symbol("thread1_func") + gdb.execute("b thread1_func") + gdb.execute("c") + + frame = gdb.selected_frame() + report(str(frame.function()) == "thread1_func", "break @ %s"%frame) + +# +# This runs as the script it sourced (via -x, via run-test.py) +# +try: + inferior = gdb.selected_inferior() + arch = inferior.architecture() + print("ATTACHED: %s" % arch.name()) +except (gdb.error, AttributeError): + print("SKIPPING (not connected)", file=sys.stderr) + exit(0) + +if gdb.parse_and_eval('$pc') == 0: + print("SKIP: PC not set") + exit(0) + +try: + # These are not very useful in scripts + gdb.execute("set pagination off") + gdb.execute("set confirm off") + + # Run the actual tests + run_test() +except (gdb.error): + print ("GDB Exception: %s" % (sys.exc_info()[0])) + failcount += 1 + pass + +print("All tests complete: %d failures" % failcount) +exit(failcount) diff --git a/tests/tcg/multiarch/float_helpers.c b/tests/tcg/multiarch/libs/float_helpers.c similarity index 99% rename from tests/tcg/multiarch/float_helpers.c rename to tests/tcg/multiarch/libs/float_helpers.c index bc530e5732..4e68d2b659 100644 --- a/tests/tcg/multiarch/float_helpers.c +++ b/tests/tcg/multiarch/libs/float_helpers.c @@ -22,7 +22,7 @@ #include #include -#include "float_helpers.h" +#include "../float_helpers.h" #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tests/tcg/multiarch/linux/linux-madvise.c b/tests/tcg/multiarch/linux/linux-madvise.c new file mode 100644 index 0000000000..29d0997e68 --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-madvise.c @@ -0,0 +1,70 @@ +#include +#include +#include +#include + +static void test_anonymous(void) +{ + int pagesize = getpagesize(); + char *page; + int ret; + + page = mmap(NULL, pagesize, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + assert(page != MAP_FAILED); + + /* Check that mprotect() does not interfere with MADV_DONTNEED. */ + ret = mprotect(page, pagesize, PROT_READ | PROT_WRITE); + assert(ret == 0); + + /* Check that MADV_DONTNEED clears the page. */ + *page = 42; + ret = madvise(page, pagesize, MADV_DONTNEED); + assert(ret == 0); + assert(*page == 0); + + ret = munmap(page, pagesize); + assert(ret == 0); +} + +static void test_file(void) +{ + char tempname[] = "/tmp/.cmadviseXXXXXX"; + int pagesize = getpagesize(); + ssize_t written; + char c = 42; + char *page; + int ret; + int fd; + + fd = mkstemp(tempname); + assert(fd != -1); + ret = unlink(tempname); + assert(ret == 0); + written = write(fd, &c, sizeof(c)); + assert(written == sizeof(c)); + page = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE, fd, 0); + assert(page != MAP_FAILED); + + /* Check that mprotect() does not interfere with MADV_DONTNEED. */ + ret = mprotect(page, pagesize, PROT_READ | PROT_WRITE); + assert(ret == 0); + + /* Check that MADV_DONTNEED resets the page. */ + *page = 0; + ret = madvise(page, pagesize, MADV_DONTNEED); + assert(ret == 0); + assert(*page == c); + + ret = munmap(page, pagesize); + assert(ret == 0); + ret = close(fd); + assert(ret == 0); +} + +int main(void) +{ + test_anonymous(); + test_file(); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/linux-test.c b/tests/tcg/multiarch/linux/linux-test.c similarity index 98% rename from tests/tcg/multiarch/linux-test.c rename to tests/tcg/multiarch/linux/linux-test.c index c8c6aeddeb..64f57cb287 100644 --- a/tests/tcg/multiarch/linux-test.c +++ b/tests/tcg/multiarch/linux/linux-test.c @@ -251,7 +251,7 @@ static void test_time(void) static int server_socket(void) { int val, fd; - struct sockaddr_in sockaddr; + struct sockaddr_in sockaddr = {}; /* server socket */ fd = chk_error(socket(PF_INET, SOCK_STREAM, 0)); @@ -263,7 +263,7 @@ static int server_socket(void) sockaddr.sin_port = htons(0); /* choose random ephemeral port) */ sockaddr.sin_addr.s_addr = 0; chk_error(bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr))); - chk_error(listen(fd, 0)); + chk_error(listen(fd, 1)); return fd; } @@ -271,7 +271,7 @@ static int server_socket(void) static int client_socket(uint16_t port) { int fd; - struct sockaddr_in sockaddr; + struct sockaddr_in sockaddr = {}; /* server socket */ fd = chk_error(socket(PF_INET, SOCK_STREAM, 0)); @@ -354,13 +354,17 @@ static void test_pipe(void) if (FD_ISSET(fds[0], &rfds)) { chk_error(read(fds[0], &ch, 1)); rcount++; - if (rcount >= WCOUNT_MAX) + if (rcount >= WCOUNT_MAX) { break; + } } if (FD_ISSET(fds[1], &wfds)) { ch = 'a'; chk_error(write(fds[1], &ch, 1)); wcount++; + if (wcount >= WCOUNT_MAX) { + break; + } } } } diff --git a/tests/tcg/multiarch/munmap-pthread.c b/tests/tcg/multiarch/munmap-pthread.c new file mode 100644 index 0000000000..d7143b00d5 --- /dev/null +++ b/tests/tcg/multiarch/munmap-pthread.c @@ -0,0 +1,79 @@ +/* Test that munmap() and thread creation do not race. */ +#include +#include +#include +#include +#include +#include +#include + +static const char nop_func[] = { +#if defined(__aarch64__) + 0xc0, 0x03, 0x5f, 0xd6, /* ret */ +#elif defined(__alpha__) + 0x01, 0x80, 0xFA, 0x6B, /* ret */ +#elif defined(__arm__) + 0x1e, 0xff, 0x2f, 0xe1, /* bx lr */ +#elif defined(__riscv) + 0x67, 0x80, 0x00, 0x00, /* ret */ +#elif defined(__s390__) + 0x07, 0xfe, /* br %r14 */ +#elif defined(__i386__) || defined(__x86_64__) + 0xc3, /* ret */ +#endif +}; + +static void *thread_mmap_munmap(void *arg) +{ + volatile bool *run = arg; + char *p; + int ret; + + while (*run) { + p = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + assert(p != MAP_FAILED); + + /* Create a small translation block. */ + memcpy(p, nop_func, sizeof(nop_func)); + ((void(*)(void))p)(); + + ret = munmap(p, getpagesize()); + assert(ret == 0); + } + + return NULL; +} + +static void *thread_dummy(void *arg) +{ + return NULL; +} + +int main(void) +{ + pthread_t mmap_munmap, dummy; + volatile bool run = true; + int i, ret; + + /* Without a template, nothing to test. */ + if (sizeof(nop_func) == 0) { + return EXIT_SUCCESS; + } + + ret = pthread_create(&mmap_munmap, NULL, thread_mmap_munmap, (void *)&run); + assert(ret == 0); + + for (i = 0; i < 1000; i++) { + ret = pthread_create(&dummy, NULL, thread_dummy, NULL); + assert(ret == 0); + ret = pthread_join(dummy, NULL); + assert(ret == 0); + } + + run = false; + ret = pthread_join(mmap_munmap, NULL); + assert(ret == 0); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/noexec.c.inc b/tests/tcg/multiarch/noexec.c.inc new file mode 100644 index 0000000000..2ef539b721 --- /dev/null +++ b/tests/tcg/multiarch/noexec.c.inc @@ -0,0 +1,139 @@ +/* + * Common code for arch-specific MMU_INST_FETCH fault testing. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Forward declarations. */ + +static void *arch_mcontext_pc(const mcontext_t *ctx); +static int arch_mcontext_arg(const mcontext_t *ctx); +static void arch_flush(void *p, int len); + +/* Testing infrastructure. */ + +struct noexec_test { + const char *name; + const char *test_code; + int test_len; + int page_ofs; + int entry_ofs; + int expected_si_ofs; + int expected_pc_ofs; + int expected_arg; +}; + +static void *page_base; +static int page_size; +static const struct noexec_test *current_noexec_test; + +static void handle_err(const char *syscall) +{ + printf("[ FAILED ] %s: %s\n", syscall, strerror(errno)); + exit(EXIT_FAILURE); +} + +static void handle_segv(int sig, siginfo_t *info, void *ucontext) +{ + const struct noexec_test *test = current_noexec_test; + const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext; + void *expected_si; + void *expected_pc; + void *pc; + int arg; + + if (test == NULL) { + printf("[ FAILED ] unexpected SEGV\n"); + exit(EXIT_FAILURE); + } + current_noexec_test = NULL; + + expected_si = page_base + test->expected_si_ofs; + if (info->si_addr != expected_si) { + printf("[ FAILED ] wrong si_addr (%p != %p)\n", + info->si_addr, expected_si); + exit(EXIT_FAILURE); + } + + pc = arch_mcontext_pc(mc); + expected_pc = page_base + test->expected_pc_ofs; + if (pc != expected_pc) { + printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc); + exit(EXIT_FAILURE); + } + + arg = arch_mcontext_arg(mc); + if (arg != test->expected_arg) { + printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg); + exit(EXIT_FAILURE); + } + + if (mprotect(page_base, page_size, + PROT_READ | PROT_WRITE | PROT_EXEC) < 0) { + handle_err("mprotect"); + } +} + +static void test_noexec_1(const struct noexec_test *test) +{ + void *start = page_base + test->page_ofs; + void (*fn)(int arg) = page_base + test->entry_ofs; + + memcpy(start, test->test_code, test->test_len); + arch_flush(start, test->test_len); + + /* Trigger TB creation in order to test invalidation. */ + fn(0); + + if (mprotect(page_base, page_size, PROT_NONE) < 0) { + handle_err("mprotect"); + } + + /* Trigger SEGV and check that handle_segv() ran. */ + current_noexec_test = test; + fn(0); + assert(current_noexec_test == NULL); +} + +static int test_noexec(struct noexec_test *tests, size_t n_tests) +{ + struct sigaction act; + size_t i; + + memset(&act, 0, sizeof(act)); + act.sa_sigaction = handle_segv; + act.sa_flags = SA_SIGINFO; + if (sigaction(SIGSEGV, &act, NULL) < 0) { + handle_err("sigaction"); + } + + page_size = getpagesize(); + page_base = mmap(NULL, 2 * page_size, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (page_base == MAP_FAILED) { + handle_err("mmap"); + } + page_base += page_size; + + for (i = 0; i < n_tests; i++) { + struct noexec_test *test = &tests[i]; + + printf("[ RUN ] %s\n", test->name); + test_noexec_1(test); + printf("[ OK ]\n"); + } + + printf("[ PASSED ]\n"); + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/overflow.c b/tests/tcg/multiarch/overflow.c new file mode 100644 index 0000000000..1c59c2cb70 --- /dev/null +++ b/tests/tcg/multiarch/overflow.c @@ -0,0 +1,58 @@ +#include + +int overflow_add_32(int x, int y) +{ + int res; + return __builtin_add_overflow(x, y, &res); +} + +int overflow_add_64(long long x, long long y) +{ + long long res; + return __builtin_add_overflow(x, y, &res); +} + +int overflow_sub_32(int x, int y) +{ + int res; + return __builtin_sub_overflow(x, y, &res); +} + +int overflow_sub_64(long long x, long long y) +{ + long long res; + return __builtin_sub_overflow(x, y, &res); +} + +int a1_add = -2147483648; +int b1_add = -2147483648; +long long a2_add = -9223372036854775808ULL; +long long b2_add = -9223372036854775808ULL; + +int a1_sub; +int b1_sub = -2147483648; +long long a2_sub = 0L; +long long b2_sub = -9223372036854775808ULL; + +int main() +{ + int ret = 0; + + if (!overflow_add_32(a1_add, b1_add)) { + fprintf(stderr, "data overflow while adding 32 bits\n"); + ret = 1; + } + if (!overflow_add_64(a2_add, b2_add)) { + fprintf(stderr, "data overflow while adding 64 bits\n"); + ret = 1; + } + if (!overflow_sub_32(a1_sub, b1_sub)) { + fprintf(stderr, "data overflow while subtracting 32 bits\n"); + ret = 1; + } + if (!overflow_sub_64(a2_sub, b2_sub)) { + fprintf(stderr, "data overflow while subtracting 64 bits\n"); + ret = 1; + } + return ret; +} diff --git a/tests/tcg/multiarch/sha1.c b/tests/tcg/multiarch/sha1.c index 87bfbcdf52..0081bd7657 100644 --- a/tests/tcg/multiarch/sha1.c +++ b/tests/tcg/multiarch/sha1.c @@ -43,7 +43,6 @@ void SHA1Init(SHA1_CTX* context); void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len); void SHA1Final(unsigned char digest[20], SHA1_CTX* context); /* ================ end of sha1.h ================ */ -#include #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) diff --git a/tests/tcg/multiarch/sha512.c b/tests/tcg/multiarch/sha512.c new file mode 100644 index 0000000000..e1729828b9 --- /dev/null +++ b/tests/tcg/multiarch/sha512.c @@ -0,0 +1,990 @@ +/* + * sha512 test based on CCAN: https://ccodearchive.net/info/crypto/sha512.html + * + * src/crypto/sha512.cpp commit f914f1a746d7f91951c1da262a4a749dd3ebfa71 + * Copyright (c) 2014 The Bitcoin Core developers + * Distributed under the MIT software license, see: + * http://www.opensource.org/licenses/mit-license.php. + * + * SPDX-License-Identifier: MIT CC0-1.0 + */ +#define _GNU_SOURCE /* See feature_test_macros(7) */ + +#include +#include +#include +#include +#include +#include + +/* Required portions from endian.h */ + +/** + * BSWAP_64 - reverse bytes in a constant uint64_t value. + * @val: constantvalue whose bytes to swap. + * + * Designed to be usable in constant-requiring initializers. + * + * Example: + * struct mystruct { + * char buf[BSWAP_64(0xff00000000000000ULL)]; + * }; + */ +#define BSWAP_64(val) \ + ((((uint64_t)(val) & 0x00000000000000ffULL) << 56) \ + | (((uint64_t)(val) & 0x000000000000ff00ULL) << 40) \ + | (((uint64_t)(val) & 0x0000000000ff0000ULL) << 24) \ + | (((uint64_t)(val) & 0x00000000ff000000ULL) << 8) \ + | (((uint64_t)(val) & 0x000000ff00000000ULL) >> 8) \ + | (((uint64_t)(val) & 0x0000ff0000000000ULL) >> 24) \ + | (((uint64_t)(val) & 0x00ff000000000000ULL) >> 40) \ + | (((uint64_t)(val) & 0xff00000000000000ULL) >> 56)) + + +typedef uint64_t beint64_t; + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + +/** + * CPU_TO_BE64 - convert a constant uint64_t value to big-endian + * @native: constant to convert + */ +#define CPU_TO_BE64(native) ((beint64_t)(native)) +/** + * BE64_TO_CPU - convert a big-endian uint64_t constant + * @le_val: big-endian constant to convert + */ +#define BE64_TO_CPU(le_val) ((uint64_t)(le_val)) + +#else /* ... HAVE_LITTLE_ENDIAN */ +#define CPU_TO_BE64(native) ((beint64_t)BSWAP_64(native)) +#define BE64_TO_CPU(le_val) BSWAP_64((uint64_t)le_val) +#endif /* HAVE_LITTE_ENDIAN */ + +/** + * cpu_to_be64 - convert a uint64_t value to big endian. + * @native: value to convert + */ +static inline beint64_t cpu_to_be64(uint64_t native) +{ + return CPU_TO_BE64(native); +} + +/** + * be64_to_cpu - convert a big-endian uint64_t value + * @be_val: big-endian value to convert + */ +static inline uint64_t be64_to_cpu(beint64_t be_val) +{ + return BE64_TO_CPU(be_val); +} + +/* From compiler.h */ + +#ifndef UNUSED +/** + * UNUSED - a parameter is unused + * + * Some compilers (eg. gcc with -W or -Wunused) warn about unused + * function parameters. This suppresses such warnings and indicates + * to the reader that it's deliberate. + * + * Example: + * // This is used as a callback, so needs to have this prototype. + * static int some_callback(void *unused UNUSED) + * { + * return 0; + * } + */ +#define UNUSED __attribute__((__unused__)) +#endif + +/* From sha512.h */ + +/** + * struct sha512 - structure representing a completed SHA512. + * @u.u8: an unsigned char array. + * @u.u64: a 64-bit integer array. + * + * Other fields may be added to the union in future. + */ +struct sha512 { + union { + uint64_t u64[8]; + unsigned char u8[64]; + } u; +}; + +/** + * sha512 - return sha512 of an object. + * @sha512: the sha512 to fill in + * @p: pointer to memory, + * @size: the number of bytes pointed to by @p + * + * The bytes pointed to by @p is SHA512 hashed into @sha512. This is + * equivalent to sha512_init(), sha512_update() then sha512_done(). + */ +void sha512(struct sha512 *sha, const void *p, size_t size); + +/** + * struct sha512_ctx - structure to store running context for sha512 + */ +struct sha512_ctx { + uint64_t s[8]; + union { + uint64_t u64[16]; + unsigned char u8[128]; + } buf; + size_t bytes; +}; + +/** + * sha512_init - initialize an SHA512 context. + * @ctx: the sha512_ctx to initialize + * + * This must be called before sha512_update or sha512_done, or + * alternately you can assign SHA512_INIT. + * + * If it was already initialized, this forgets anything which was + * hashed before. + * + * Example: + * static void hash_all(const char **arr, struct sha512 *hash) + * { + * size_t i; + * struct sha512_ctx ctx; + * + * sha512_init(&ctx); + * for (i = 0; arr[i]; i++) + * sha512_update(&ctx, arr[i], strlen(arr[i])); + * sha512_done(&ctx, hash); + * } + */ +void sha512_init(struct sha512_ctx *ctx); + +/** + * SHA512_INIT - initializer for an SHA512 context. + * + * This can be used to statically initialize an SHA512 context (instead + * of sha512_init()). + * + * Example: + * static void hash_all(const char **arr, struct sha512 *hash) + * { + * size_t i; + * struct sha512_ctx ctx = SHA512_INIT; + * + * for (i = 0; arr[i]; i++) + * sha512_update(&ctx, arr[i], strlen(arr[i])); + * sha512_done(&ctx, hash); + * } + */ +#define SHA512_INIT \ + { { 0x6a09e667f3bcc908ull, 0xbb67ae8584caa73bull, \ + 0x3c6ef372fe94f82bull, 0xa54ff53a5f1d36f1ull, \ + 0x510e527fade682d1ull, 0x9b05688c2b3e6c1full, \ + 0x1f83d9abfb41bd6bull, 0x5be0cd19137e2179ull }, \ + { { 0 } }, 0 } + +/** + * sha512_update - include some memory in the hash. + * @ctx: the sha512_ctx to use + * @p: pointer to memory, + * @size: the number of bytes pointed to by @p + * + * You can call this multiple times to hash more data, before calling + * sha512_done(). + */ +void sha512_update(struct sha512_ctx *ctx, const void *p, size_t size); + +/** + * sha512_done - finish SHA512 and return the hash + * @ctx: the sha512_ctx to complete + * @res: the hash to return. + * + * Note that @ctx is *destroyed* by this, and must be reinitialized. + * To avoid that, pass a copy instead. + */ +void sha512_done(struct sha512_ctx *sha512, struct sha512 *res); + +/* From sha512.c */ + +/* + * SHA512 core code translated from the Bitcoin project's C++: + * + * src/crypto/sha512.cpp commit f914f1a746d7f91951c1da262a4a749dd3ebfa71 + * Copyright (c) 2014 The Bitcoin Core developers + * Distributed under the MIT software license, see the accompanying + * file COPYING or http://www.opensource.org/licenses/mit-license.php. + */ +/* #include */ +/* #include */ +#include +#include +#include + +static void invalidate_sha512(struct sha512_ctx *ctx) +{ + ctx->bytes = (size_t)-1; +} + +static void check_sha512(struct sha512_ctx *ctx UNUSED) +{ + assert(ctx->bytes != (size_t)-1); +} + +static uint64_t Ch(uint64_t x, uint64_t y, uint64_t z) +{ + return z ^ (x & (y ^ z)); +} +static uint64_t Maj(uint64_t x, uint64_t y, uint64_t z) +{ + return (x & y) | (z & (x | y)); +} +static uint64_t Sigma0(uint64_t x) +{ + return (x >> 28 | x << 36) ^ (x >> 34 | x << 30) ^ (x >> 39 | x << 25); +} +static uint64_t Sigma1(uint64_t x) +{ + return (x >> 14 | x << 50) ^ (x >> 18 | x << 46) ^ (x >> 41 | x << 23); +} +static uint64_t sigma0(uint64_t x) +{ + return (x >> 1 | x << 63) ^ (x >> 8 | x << 56) ^ (x >> 7); +} +static uint64_t sigma1(uint64_t x) +{ + return (x >> 19 | x << 45) ^ (x >> 61 | x << 3) ^ (x >> 6); +} + +/** One round of SHA-512. */ +static void Round(uint64_t a, uint64_t b, uint64_t c, uint64_t *d, uint64_t e, uint64_t f, uint64_t g, uint64_t *h, uint64_t k, uint64_t w) +{ + uint64_t t1 = *h + Sigma1(e) + Ch(e, f, g) + k + w; + uint64_t t2 = Sigma0(a) + Maj(a, b, c); + *d += t1; + *h = t1 + t2; +} + +/** Perform one SHA-512 transformation, processing a 128-byte chunk. */ +static void Transform(uint64_t *s, const uint64_t *chunk) +{ + uint64_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; + uint64_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; + + Round(a, b, c, &d, e, f, g, &h, 0x428a2f98d728ae22ull, w0 = be64_to_cpu(chunk[0])); + Round(h, a, b, &c, d, e, f, &g, 0x7137449123ef65cdull, w1 = be64_to_cpu(chunk[1])); + Round(g, h, a, &b, c, d, e, &f, 0xb5c0fbcfec4d3b2full, w2 = be64_to_cpu(chunk[2])); + Round(f, g, h, &a, b, c, d, &e, 0xe9b5dba58189dbbcull, w3 = be64_to_cpu(chunk[3])); + Round(e, f, g, &h, a, b, c, &d, 0x3956c25bf348b538ull, w4 = be64_to_cpu(chunk[4])); + Round(d, e, f, &g, h, a, b, &c, 0x59f111f1b605d019ull, w5 = be64_to_cpu(chunk[5])); + Round(c, d, e, &f, g, h, a, &b, 0x923f82a4af194f9bull, w6 = be64_to_cpu(chunk[6])); + Round(b, c, d, &e, f, g, h, &a, 0xab1c5ed5da6d8118ull, w7 = be64_to_cpu(chunk[7])); + Round(a, b, c, &d, e, f, g, &h, 0xd807aa98a3030242ull, w8 = be64_to_cpu(chunk[8])); + Round(h, a, b, &c, d, e, f, &g, 0x12835b0145706fbeull, w9 = be64_to_cpu(chunk[9])); + Round(g, h, a, &b, c, d, e, &f, 0x243185be4ee4b28cull, w10 = be64_to_cpu(chunk[10])); + Round(f, g, h, &a, b, c, d, &e, 0x550c7dc3d5ffb4e2ull, w11 = be64_to_cpu(chunk[11])); + Round(e, f, g, &h, a, b, c, &d, 0x72be5d74f27b896full, w12 = be64_to_cpu(chunk[12])); + Round(d, e, f, &g, h, a, b, &c, 0x80deb1fe3b1696b1ull, w13 = be64_to_cpu(chunk[13])); + Round(c, d, e, &f, g, h, a, &b, 0x9bdc06a725c71235ull, w14 = be64_to_cpu(chunk[14])); + Round(b, c, d, &e, f, g, h, &a, 0xc19bf174cf692694ull, w15 = be64_to_cpu(chunk[15])); + + Round(a, b, c, &d, e, f, g, &h, 0xe49b69c19ef14ad2ull, w0 += sigma1(w14) + w9 + sigma0(w1)); + Round(h, a, b, &c, d, e, f, &g, 0xefbe4786384f25e3ull, w1 += sigma1(w15) + w10 + sigma0(w2)); + Round(g, h, a, &b, c, d, e, &f, 0x0fc19dc68b8cd5b5ull, w2 += sigma1(w0) + w11 + sigma0(w3)); + Round(f, g, h, &a, b, c, d, &e, 0x240ca1cc77ac9c65ull, w3 += sigma1(w1) + w12 + sigma0(w4)); + Round(e, f, g, &h, a, b, c, &d, 0x2de92c6f592b0275ull, w4 += sigma1(w2) + w13 + sigma0(w5)); + Round(d, e, f, &g, h, a, b, &c, 0x4a7484aa6ea6e483ull, w5 += sigma1(w3) + w14 + sigma0(w6)); + Round(c, d, e, &f, g, h, a, &b, 0x5cb0a9dcbd41fbd4ull, w6 += sigma1(w4) + w15 + sigma0(w7)); + Round(b, c, d, &e, f, g, h, &a, 0x76f988da831153b5ull, w7 += sigma1(w5) + w0 + sigma0(w8)); + Round(a, b, c, &d, e, f, g, &h, 0x983e5152ee66dfabull, w8 += sigma1(w6) + w1 + sigma0(w9)); + Round(h, a, b, &c, d, e, f, &g, 0xa831c66d2db43210ull, w9 += sigma1(w7) + w2 + sigma0(w10)); + Round(g, h, a, &b, c, d, e, &f, 0xb00327c898fb213full, w10 += sigma1(w8) + w3 + sigma0(w11)); + Round(f, g, h, &a, b, c, d, &e, 0xbf597fc7beef0ee4ull, w11 += sigma1(w9) + w4 + sigma0(w12)); + Round(e, f, g, &h, a, b, c, &d, 0xc6e00bf33da88fc2ull, w12 += sigma1(w10) + w5 + sigma0(w13)); + Round(d, e, f, &g, h, a, b, &c, 0xd5a79147930aa725ull, w13 += sigma1(w11) + w6 + sigma0(w14)); + Round(c, d, e, &f, g, h, a, &b, 0x06ca6351e003826full, w14 += sigma1(w12) + w7 + sigma0(w15)); + Round(b, c, d, &e, f, g, h, &a, 0x142929670a0e6e70ull, w15 += sigma1(w13) + w8 + sigma0(w0)); + + Round(a, b, c, &d, e, f, g, &h, 0x27b70a8546d22ffcull, w0 += sigma1(w14) + w9 + sigma0(w1)); + Round(h, a, b, &c, d, e, f, &g, 0x2e1b21385c26c926ull, w1 += sigma1(w15) + w10 + sigma0(w2)); + Round(g, h, a, &b, c, d, e, &f, 0x4d2c6dfc5ac42aedull, w2 += sigma1(w0) + w11 + sigma0(w3)); + Round(f, g, h, &a, b, c, d, &e, 0x53380d139d95b3dfull, w3 += sigma1(w1) + w12 + sigma0(w4)); + Round(e, f, g, &h, a, b, c, &d, 0x650a73548baf63deull, w4 += sigma1(w2) + w13 + sigma0(w5)); + Round(d, e, f, &g, h, a, b, &c, 0x766a0abb3c77b2a8ull, w5 += sigma1(w3) + w14 + sigma0(w6)); + Round(c, d, e, &f, g, h, a, &b, 0x81c2c92e47edaee6ull, w6 += sigma1(w4) + w15 + sigma0(w7)); + Round(b, c, d, &e, f, g, h, &a, 0x92722c851482353bull, w7 += sigma1(w5) + w0 + sigma0(w8)); + Round(a, b, c, &d, e, f, g, &h, 0xa2bfe8a14cf10364ull, w8 += sigma1(w6) + w1 + sigma0(w9)); + Round(h, a, b, &c, d, e, f, &g, 0xa81a664bbc423001ull, w9 += sigma1(w7) + w2 + sigma0(w10)); + Round(g, h, a, &b, c, d, e, &f, 0xc24b8b70d0f89791ull, w10 += sigma1(w8) + w3 + sigma0(w11)); + Round(f, g, h, &a, b, c, d, &e, 0xc76c51a30654be30ull, w11 += sigma1(w9) + w4 + sigma0(w12)); + Round(e, f, g, &h, a, b, c, &d, 0xd192e819d6ef5218ull, w12 += sigma1(w10) + w5 + sigma0(w13)); + Round(d, e, f, &g, h, a, b, &c, 0xd69906245565a910ull, w13 += sigma1(w11) + w6 + sigma0(w14)); + Round(c, d, e, &f, g, h, a, &b, 0xf40e35855771202aull, w14 += sigma1(w12) + w7 + sigma0(w15)); + Round(b, c, d, &e, f, g, h, &a, 0x106aa07032bbd1b8ull, w15 += sigma1(w13) + w8 + sigma0(w0)); + + Round(a, b, c, &d, e, f, g, &h, 0x19a4c116b8d2d0c8ull, w0 += sigma1(w14) + w9 + sigma0(w1)); + Round(h, a, b, &c, d, e, f, &g, 0x1e376c085141ab53ull, w1 += sigma1(w15) + w10 + sigma0(w2)); + Round(g, h, a, &b, c, d, e, &f, 0x2748774cdf8eeb99ull, w2 += sigma1(w0) + w11 + sigma0(w3)); + Round(f, g, h, &a, b, c, d, &e, 0x34b0bcb5e19b48a8ull, w3 += sigma1(w1) + w12 + sigma0(w4)); + Round(e, f, g, &h, a, b, c, &d, 0x391c0cb3c5c95a63ull, w4 += sigma1(w2) + w13 + sigma0(w5)); + Round(d, e, f, &g, h, a, b, &c, 0x4ed8aa4ae3418acbull, w5 += sigma1(w3) + w14 + sigma0(w6)); + Round(c, d, e, &f, g, h, a, &b, 0x5b9cca4f7763e373ull, w6 += sigma1(w4) + w15 + sigma0(w7)); + Round(b, c, d, &e, f, g, h, &a, 0x682e6ff3d6b2b8a3ull, w7 += sigma1(w5) + w0 + sigma0(w8)); + Round(a, b, c, &d, e, f, g, &h, 0x748f82ee5defb2fcull, w8 += sigma1(w6) + w1 + sigma0(w9)); + Round(h, a, b, &c, d, e, f, &g, 0x78a5636f43172f60ull, w9 += sigma1(w7) + w2 + sigma0(w10)); + Round(g, h, a, &b, c, d, e, &f, 0x84c87814a1f0ab72ull, w10 += sigma1(w8) + w3 + sigma0(w11)); + Round(f, g, h, &a, b, c, d, &e, 0x8cc702081a6439ecull, w11 += sigma1(w9) + w4 + sigma0(w12)); + Round(e, f, g, &h, a, b, c, &d, 0x90befffa23631e28ull, w12 += sigma1(w10) + w5 + sigma0(w13)); + Round(d, e, f, &g, h, a, b, &c, 0xa4506cebde82bde9ull, w13 += sigma1(w11) + w6 + sigma0(w14)); + Round(c, d, e, &f, g, h, a, &b, 0xbef9a3f7b2c67915ull, w14 += sigma1(w12) + w7 + sigma0(w15)); + Round(b, c, d, &e, f, g, h, &a, 0xc67178f2e372532bull, w15 += sigma1(w13) + w8 + sigma0(w0)); + + Round(a, b, c, &d, e, f, g, &h, 0xca273eceea26619cull, w0 += sigma1(w14) + w9 + sigma0(w1)); + Round(h, a, b, &c, d, e, f, &g, 0xd186b8c721c0c207ull, w1 += sigma1(w15) + w10 + sigma0(w2)); + Round(g, h, a, &b, c, d, e, &f, 0xeada7dd6cde0eb1eull, w2 += sigma1(w0) + w11 + sigma0(w3)); + Round(f, g, h, &a, b, c, d, &e, 0xf57d4f7fee6ed178ull, w3 += sigma1(w1) + w12 + sigma0(w4)); + Round(e, f, g, &h, a, b, c, &d, 0x06f067aa72176fbaull, w4 += sigma1(w2) + w13 + sigma0(w5)); + Round(d, e, f, &g, h, a, b, &c, 0x0a637dc5a2c898a6ull, w5 += sigma1(w3) + w14 + sigma0(w6)); + Round(c, d, e, &f, g, h, a, &b, 0x113f9804bef90daeull, w6 += sigma1(w4) + w15 + sigma0(w7)); + Round(b, c, d, &e, f, g, h, &a, 0x1b710b35131c471bull, w7 += sigma1(w5) + w0 + sigma0(w8)); + Round(a, b, c, &d, e, f, g, &h, 0x28db77f523047d84ull, w8 += sigma1(w6) + w1 + sigma0(w9)); + Round(h, a, b, &c, d, e, f, &g, 0x32caab7b40c72493ull, w9 += sigma1(w7) + w2 + sigma0(w10)); + Round(g, h, a, &b, c, d, e, &f, 0x3c9ebe0a15c9bebcull, w10 += sigma1(w8) + w3 + sigma0(w11)); + Round(f, g, h, &a, b, c, d, &e, 0x431d67c49c100d4cull, w11 += sigma1(w9) + w4 + sigma0(w12)); + Round(e, f, g, &h, a, b, c, &d, 0x4cc5d4becb3e42b6ull, w12 += sigma1(w10) + w5 + sigma0(w13)); + Round(d, e, f, &g, h, a, b, &c, 0x597f299cfc657e2aull, w13 += sigma1(w11) + w6 + sigma0(w14)); + Round(c, d, e, &f, g, h, a, &b, 0x5fcb6fab3ad6faecull, w14 + sigma1(w12) + w7 + sigma0(w15)); + Round(b, c, d, &e, f, g, h, &a, 0x6c44198c4a475817ull, w15 + sigma1(w13) + w8 + sigma0(w0)); + + s[0] += a; + s[1] += b; + s[2] += c; + s[3] += d; + s[4] += e; + s[5] += f; + s[6] += g; + s[7] += h; +} + +static bool alignment_ok(const void *p UNUSED, size_t n UNUSED) +{ +#if HAVE_UNALIGNED_ACCESS + return true; +#else + return ((size_t)p % n == 0); +#endif +} + +static void add(struct sha512_ctx *ctx, const void *p, size_t len) +{ + const unsigned char *data = p; + size_t bufsize = ctx->bytes % 128; + + if (bufsize + len >= 128) { + /* Fill the buffer, and process it. */ + memcpy(ctx->buf.u8 + bufsize, data, 128 - bufsize); + ctx->bytes += 128 - bufsize; + data += 128 - bufsize; + len -= 128 - bufsize; + Transform(ctx->s, ctx->buf.u64); + bufsize = 0; + } + + while (len >= 128) { + /* Process full chunks directly from the source. */ + if (alignment_ok(data, sizeof(uint64_t))) + Transform(ctx->s, (const uint64_t *)data); + else { + memcpy(ctx->buf.u8, data, sizeof(ctx->buf)); + Transform(ctx->s, ctx->buf.u64); + } + ctx->bytes += 128; + data += 128; + len -= 128; + } + + if (len) { + /* Fill the buffer with what remains. */ + memcpy(ctx->buf.u8 + bufsize, data, len); + ctx->bytes += len; + } +} + +void sha512_init(struct sha512_ctx *ctx) +{ + struct sha512_ctx init = SHA512_INIT; + *ctx = init; +} + +void sha512_update(struct sha512_ctx *ctx, const void *p, size_t size) +{ + check_sha512(ctx); + add(ctx, p, size); +} + +void sha512_done(struct sha512_ctx *ctx, struct sha512 *res) +{ + static const unsigned char pad[128] = { 0x80 }; + uint64_t sizedesc[2] = { 0, 0 }; + size_t i; + + sizedesc[1] = cpu_to_be64((uint64_t)ctx->bytes << 3); + + /* Add '1' bit to terminate, then all 0 bits, up to next block - 16. */ + add(ctx, pad, 1 + ((256 - 16 - (ctx->bytes % 128) - 1) % 128)); + /* Add number of bits of data (big endian) */ + add(ctx, sizedesc, sizeof(sizedesc)); + for (i = 0; i < sizeof(ctx->s) / sizeof(ctx->s[0]); i++) + res->u.u64[i] = cpu_to_be64(ctx->s[i]); + invalidate_sha512(ctx); +} + +void sha512(struct sha512 *sha, const void *p, size_t size) +{ + struct sha512_ctx ctx; + + sha512_init(&ctx); + sha512_update(&ctx, p, size); + sha512_done(&ctx, sha); +} + +/* From hex.h */ +/** + * hex_decode - Unpack a hex string. + * @str: the hexidecimal string + * @slen: the length of @str + * @buf: the buffer to write the data into + * @bufsize: the length of @buf + * + * Returns false if there are any characters which aren't 0-9, a-f or A-F, + * of the string wasn't the right length for @bufsize. + * + * Example: + * unsigned char data[20]; + * + * if (!hex_decode(argv[1], strlen(argv[1]), data, 20)) + * printf("String is malformed!\n"); + */ +bool hex_decode(const char *str, size_t slen, void *buf, size_t bufsize); + +/** + * hex_encode - Create a nul-terminated hex string + * @buf: the buffer to read the data from + * @bufsize: the length of @buf + * @dest: the string to fill + * @destsize: the max size of the string + * + * Returns true if the string, including terminator, fit in @destsize; + * + * Example: + * unsigned char buf[] = { 0x1F, 0x2F }; + * char str[5]; + * + * if (!hex_encode(buf, sizeof(buf), str, sizeof(str))) + * abort(); + */ +bool hex_encode(const void *buf, size_t bufsize, char *dest, size_t destsize); + +/** + * hex_str_size - Calculate how big a nul-terminated hex string is + * @bytes: bytes of data to represent + * + * Example: + * unsigned char buf[] = { 0x1F, 0x2F }; + * char str[hex_str_size(sizeof(buf))]; + * + * hex_encode(buf, sizeof(buf), str, sizeof(str)); + */ +static inline size_t hex_str_size(size_t bytes) +{ + return 2 * bytes + 1; +} + +/* From hex.c */ +static bool char_to_hex(unsigned char *val, char c) +{ + if (c >= '0' && c <= '9') { + *val = c - '0'; + return true; + } + if (c >= 'a' && c <= 'f') { + *val = c - 'a' + 10; + return true; + } + if (c >= 'A' && c <= 'F') { + *val = c - 'A' + 10; + return true; + } + return false; +} + +bool hex_decode(const char *str, size_t slen, void *buf, size_t bufsize) +{ + unsigned char v1, v2; + unsigned char *p = buf; + + while (slen > 1) { + if (!char_to_hex(&v1, str[0]) || !char_to_hex(&v2, str[1])) + return false; + if (!bufsize) + return false; + *(p++) = (v1 << 4) | v2; + str += 2; + slen -= 2; + bufsize--; + } + return slen == 0 && bufsize == 0; +} + +static char hexchar(unsigned int val) +{ + if (val < 10) + return '0' + val; + if (val < 16) + return 'a' + val - 10; + abort(); +} + +bool hex_encode(const void *buf, size_t bufsize, char *dest, size_t destsize) +{ + size_t i; + + if (destsize < hex_str_size(bufsize)) + return false; + + for (i = 0; i < bufsize; i++) { + unsigned int c = ((const unsigned char *)buf)[i]; + *(dest++) = hexchar(c >> 4); + *(dest++) = hexchar(c & 0xF); + } + *dest = '\0'; + + return true; +} + +/* From tap.h */ +/** + * plan_tests - announce the number of tests you plan to run + * @tests: the number of tests + * + * This should be the first call in your test program: it allows tracing + * of failures which mean that not all tests are run. + * + * If you don't know how many tests will actually be run, assume all of them + * and use skip() if you don't actually run some tests. + * + * Example: + * plan_tests(13); + */ +void plan_tests(unsigned int tests); + +/** + * ok1 - Simple conditional test + * @e: the expression which we expect to be true. + * + * This is the simplest kind of test: if the expression is true, the + * test passes. The name of the test which is printed will simply be + * file name, line number, and the expression itself. + * + * Example: + * ok1(somefunc() == 1); + */ +# define ok1(e) ((e) ? \ + _gen_result(1, __func__, __FILE__, __LINE__, "%s", #e) : \ + _gen_result(0, __func__, __FILE__, __LINE__, "%s", #e)) + +/** + * exit_status - the value that main should return. + * + * For maximum compatibility your test program should return a particular exit + * code (ie. 0 if all tests were run, and every test which was expected to + * succeed succeeded). + * + * Example: + * exit(exit_status()); + */ +int exit_status(void); + +/** + * tap_fail_callback - function to call when we fail + * + * This can be used to ease debugging, or exit on the first failure. + */ +void (*tap_fail_callback)(void); + +/* From tap.c */ + +static int no_plan = 0; +static int skip_all = 0; +static int have_plan = 0; +static unsigned int test_count = 0; /* Number of tests that have been run */ +static unsigned int e_tests = 0; /* Expected number of tests to run */ +static unsigned int failures = 0; /* Number of tests that failed */ +static char *todo_msg = NULL; +static const char *todo_msg_fixed = "libtap malloc issue"; +static int todo = 0; +static int test_died = 0; +static int test_pid; + +static void +_expected_tests(unsigned int tests) +{ + printf("1..%d\n", tests); + e_tests = tests; +} + +static void +diagv(const char *fmt, va_list ap) +{ + fputs("# ", stdout); + vfprintf(stdout, fmt, ap); + fputs("\n", stdout); +} + +static void +_diag(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + diagv(fmt, ap); + va_end(ap); +} + +/* + * Generate a test result. + * + * ok -- boolean, indicates whether or not the test passed. + * test_name -- the name of the test, may be NULL + * test_comment -- a comment to print afterwards, may be NULL + */ +unsigned int +_gen_result(int ok, const char *func, const char *file, unsigned int line, + const char *test_name, ...) +{ + va_list ap; + char *local_test_name = NULL; + char *c; + int name_is_digits; + + test_count++; + + /* Start by taking the test name and performing any printf() + expansions on it */ + if(test_name != NULL) { + va_start(ap, test_name); + if (vasprintf(&local_test_name, test_name, ap) < 0) + local_test_name = NULL; + va_end(ap); + + /* Make sure the test name contains more than digits + and spaces. Emit an error message and exit if it + does */ + if(local_test_name) { + name_is_digits = 1; + for(c = local_test_name; *c != '\0'; c++) { + if(!isdigit((unsigned char)*c) + && !isspace((unsigned char)*c)) { + name_is_digits = 0; + break; + } + } + + if(name_is_digits) { + _diag(" You named your test '%s'. You shouldn't use numbers for your test names.", local_test_name); + _diag(" Very confusing."); + } + } + } + + if(!ok) { + printf("not "); + failures++; + } + + printf("ok %d", test_count); + + if(test_name != NULL) { + printf(" - "); + + /* Print the test name, escaping any '#' characters it + might contain */ + if(local_test_name != NULL) { + flockfile(stdout); + for(c = local_test_name; *c != '\0'; c++) { + if(*c == '#') + fputc('\\', stdout); + fputc((int)*c, stdout); + } + funlockfile(stdout); + } else { /* vasprintf() failed, use a fixed message */ + printf("%s", todo_msg_fixed); + } + } + + /* If we're in a todo_start() block then flag the test as being + TODO. todo_msg should contain the message to print at this + point. If it's NULL then asprintf() failed, and we should + use the fixed message. + + This is not counted as a failure, so decrement the counter if + the test failed. */ + if(todo) { + printf(" # TODO %s", todo_msg ? todo_msg : todo_msg_fixed); + if(!ok) + failures--; + } + + printf("\n"); + + if(!ok) + _diag(" Failed %stest (%s:%s() at line %d)", + todo ? "(TODO) " : "", file, func, line); + + free(local_test_name); + + if (!ok && tap_fail_callback) + tap_fail_callback(); + + /* We only care (when testing) that ok is positive, but here we + specifically only want to return 1 or 0 */ + return ok ? 1 : 0; +} + +/* + * Cleanup at the end of the run, produce any final output that might be + * required. + */ +static void +_cleanup(void) +{ + /* If we forked, don't do cleanup in child! */ + if (getpid() != test_pid) + return; + + /* If plan_no_plan() wasn't called, and we don't have a plan, + and we're not skipping everything, then something happened + before we could produce any output */ + if(!no_plan && !have_plan && !skip_all) { + _diag("Looks like your test died before it could output anything."); + return; + } + + if(test_died) { + _diag("Looks like your test died just after %d.", test_count); + return; + } + + + /* No plan provided, but now we know how many tests were run, and can + print the header at the end */ + if(!skip_all && (no_plan || !have_plan)) { + printf("1..%d\n", test_count); + } + + if((have_plan && !no_plan) && e_tests < test_count) { + _diag("Looks like you planned %d tests but ran %d extra.", + e_tests, test_count - e_tests); + return; + } + + if((have_plan || !no_plan) && e_tests > test_count) { + _diag("Looks like you planned %d tests but only ran %d.", + e_tests, test_count); + if(failures) { + _diag("Looks like you failed %d tests of %d run.", + failures, test_count); + } + return; + } + + if(failures) + _diag("Looks like you failed %d tests of %d.", + failures, test_count); + +} + +/* + * Initialise the TAP library. Will only do so once, however many times it's + * called. + */ +static void +_tap_init(void) +{ + static int run_once = 0; + + if(!run_once) { + test_pid = getpid(); + atexit(_cleanup); + + /* stdout needs to be unbuffered so that the output appears + in the same place relative to stderr output as it does + with Test::Harness */ +// setbuf(stdout, 0); + run_once = 1; + } +} + +/* + * Note the number of tests that will be run. + */ +void +plan_tests(unsigned int tests) +{ + + _tap_init(); + + if(have_plan != 0) { + fprintf(stderr, "You tried to plan twice!\n"); + test_died = 1; + exit(255); + } + + if(tests == 0) { + fprintf(stderr, "You said to run 0 tests! You've got to run something.\n"); + test_died = 1; + exit(255); + } + + have_plan = 1; + + _expected_tests(tests); +} + +static int +exit_status_(void) +{ + int r; + + /* If there's no plan, just return the number of failures */ + if(no_plan || !have_plan) { + return failures; + } + + /* Ran too many tests? Return the number of tests that were run + that shouldn't have been */ + if(e_tests < test_count) { + r = test_count - e_tests; + return r; + } + + /* Return the number of tests that failed + the number of tests + that weren't run */ + r = failures + e_tests - test_count; + + return r; +} + +int +exit_status(void) +{ + int r = exit_status_(); + if (r > 255) + r = 255; + return r; +} + +/* From run-test-vectors.c */ + +/* Test vectors. */ +struct test { + const char *vector; + size_t repetitions; + const char *expected; +}; + +static const char ZEROES[] = + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000000"; + +static struct test tests[] = { + /* http://csrc.nist.gov/groups/STM/cavp/secure-hashing.html ShortMsg */ + { "21", 1, + "3831a6a6155e509dee59a7f451eb35324d8f8f2df6e3708894740f98fdee2388" + "9f4de5adb0c5010dfb555cda77c8ab5dc902094c52de3278f35a75ebc25f093a" }, + { "9083", 1, + "55586ebba48768aeb323655ab6f4298fc9f670964fc2e5f2731e34dfa4b0c09e" + "6e1e12e3d7286b3145c61c2047fb1a2a1297f36da64160b31fa4c8c2cddd2fb4" }, + { "0a55db", 1, + "7952585e5330cb247d72bae696fc8a6b0f7d0804577e347d99bc1b11e52f3849" + "85a428449382306a89261ae143c2f3fb613804ab20b42dc097e5bf4a96ef919b" }, + { "23be86d5", 1, + "76d42c8eadea35a69990c63a762f330614a4699977f058adb988f406fb0be8f2" + "ea3dce3a2bbd1d827b70b9b299ae6f9e5058ee97b50bd4922d6d37ddc761f8eb" }, + { "eb0ca946c1", 1, + "d39ecedfe6e705a821aee4f58bfc489c3d9433eb4ac1b03a97e321a2586b40dd" + "0522f40fa5aef36afff591a78c916bfc6d1ca515c4983dd8695b1ec7951d723e" }, + { "38667f39277b", 1, + "85708b8ff05d974d6af0801c152b95f5fa5c06af9a35230c5bea2752f031f9bd" + "84bd844717b3add308a70dc777f90813c20b47b16385664eefc88449f04f2131" }, + { "b39f71aaa8a108", 1, + "258b8efa05b4a06b1e63c7a3f925c5ef11fa03e3d47d631bf4d474983783d8c0" + "b09449009e842fc9fa15de586c67cf8955a17d790b20f41dadf67ee8cdcdfce6" }, + { "dc28484ebfd293d62ac759d5754bdf502423e4d419fa79020805134b2ce3dff7" + "38c7556c91d810adbad8dd210f041296b73c2185d4646c97fc0a5b69ed49ac8c" + "7ced0bd1cfd7e3c3cca47374d189247da6811a40b0ab097067ed4ad40ade2e47" + "91e39204e398b3204971445822a1be0dd93af8", 1, + "615115d2e8b62e345adaa4bdb95395a3b4fe27d71c4a111b86c1841463c5f03d" + "6b20d164a39948ab08ae060720d05c10f6022e5c8caf2fa3bca2e04d9c539ded" }, + { "fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67" + "a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043" + "aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c" + "7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8", + 1, + "a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c" + "9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc" }, + /* http://www.di-mgt.com.au/sha_testvectors.html */ + { ZEROES, 1, + "7be9fda48f4179e611c698a73cff09faf72869431efee6eaad14de0cb44bbf66" + "503f752b7a8eb17083355f3ce6eb7d2806f236b25af96a24e22b887405c20081" } +}; + +static void *xmalloc(size_t size) +{ + char * ret; + ret = malloc(size); + if (ret == NULL) { + perror("malloc"); + abort(); + } + return ret; +} + +static bool do_test(const struct test *t) +{ + struct sha512 h; + char got[128 + 1]; + bool passed; + size_t i, vector_len = strlen(t->vector) / 2; + void *vector = xmalloc(vector_len); + + hex_decode(t->vector, vector_len * 2, vector, vector_len); + + for (i = 0; i < t->repetitions; i++) { + sha512(&h, vector, vector_len); + if (t->repetitions > 1) + memcpy(vector, &h, sizeof(h)); + } + + hex_encode(&h, sizeof(h), got, sizeof(got)); + + passed = strcmp(t->expected, got) == 0; + free(vector); + return passed; +} + +int main(void) +{ + const size_t num_tests = sizeof(tests) / sizeof(tests[0]); + size_t i; + + /* This is how many tests you plan to run */ + plan_tests(num_tests); + + for (i = 0; i < num_tests; i++) + ok1(do_test(&tests[i])); + + /* This exits depending on whether all tests passed */ + return exit_status(); +} diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c new file mode 100644 index 0000000000..8134c5fd56 --- /dev/null +++ b/tests/tcg/multiarch/sigbus.c @@ -0,0 +1,68 @@ +#define _GNU_SOURCE 1 + +#include +#include +#include +#include + + +unsigned long long x = 0x8877665544332211ull; +void * volatile p = (void *)&x + 1; + +void sigbus(int sig, siginfo_t *info, void *uc) +{ + assert(sig == SIGBUS); + assert(info->si_signo == SIGBUS); +#ifdef BUS_ADRALN + assert(info->si_code == BUS_ADRALN); +#endif + assert(info->si_addr == p); + exit(EXIT_SUCCESS); +} + +int main() +{ + struct sigaction sa = { + .sa_sigaction = sigbus, + .sa_flags = SA_SIGINFO + }; + int allow_fail = 0; + int tmp; + + tmp = sigaction(SIGBUS, &sa, NULL); + assert(tmp == 0); + + /* + * Select an operation that's likely to enforce alignment. + * On many guests that support unaligned accesses by default, + * this is often an atomic operation. + */ +#if defined(__aarch64__) + asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory"); +#elif defined(__alpha__) + asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory"); +#elif defined(__arm__) + asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory"); +#elif defined(__powerpc__) + asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory"); +#elif defined(__riscv_atomic) + asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory"); +#else + /* No insn known to fault unaligned -- try for a straight load. */ + allow_fail = 1; + tmp = *(volatile int *)p; +#endif + + assert(allow_fail); + + /* + * We didn't see a signal. + * We might as well validate the unaligned load worked. + */ + if (BYTE_ORDER == LITTLE_ENDIAN) { + assert(tmp == 0x55443322); + } else { + assert(tmp == 0x77665544); + } + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target index 625ed792c6..368b64d531 100644 --- a/tests/tcg/multiarch/system/Makefile.softmmu-target +++ b/tests/tcg/multiarch/system/Makefile.softmmu-target @@ -25,7 +25,7 @@ run-gdbstub-memory: memory --qargs \ "-monitor none -display none -chardev file$(COMMA)path=$<.out$(COMMA)id=output $(QEMU_OPTS)" \ --bin $< --test $(MULTIARCH_SRC)/gdbstub/memory.py, \ - "softmmu gdbstub support") + softmmu gdbstub support) else run-gdbstub-%: diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c index 41c7f66e2e..214f7d4f54 100644 --- a/tests/tcg/multiarch/system/memory.c +++ b/tests/tcg/multiarch/system/memory.c @@ -12,7 +12,7 @@ * - sign extension when loading */ -#include +#include #include #include diff --git a/tests/tcg/nios2/10m50-ghrd.ld b/tests/tcg/nios2/10m50-ghrd.ld new file mode 100644 index 0000000000..71cdda450c --- /dev/null +++ b/tests/tcg/nios2/10m50-ghrd.ld @@ -0,0 +1,70 @@ +/* + * Link script for the Nios2 10m50-ghrd board. + * + * Copyright Linaro Ltd 2022 + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +MEMORY +{ + tpf (rx) : ORIGIN = 0xc0000000, LENGTH = 1K + ram (rwx) : ORIGIN = 0xc8000000, LENGTH = 128M +} + +PHDRS +{ + RAM PT_LOAD; +} + +ENTRY(_start) +EXTERN(_start) +EXTERN(_interrupt) +EXTERN(_fast_tlb_miss) + +SECTIONS +{ + /* Begin at the (hardcoded) _interrupt entry point. */ + .text 0xc8000120 : { + *(.text.intr) + *(.text .text.* .gnu.linkonce.t.*) + } >ram :RAM + + .rodata : ALIGN(4) { + *(.rodata .rodata.* .gnu.linkonce.r.*) + } > ram :RAM + + .eh_frame_hdr : ALIGN (4) { + KEEP (*(.eh_frame_hdr)) + *(.eh_frame_entry .eh_frame_entry.*) + } >ram :RAM + .eh_frame : ALIGN (4) { + KEEP (*(.eh_frame)) *(.eh_frame.*) + } >ram :RAM + + .data : ALIGN(4) { + *(.shdata) + *(.data .data.* .gnu.linkonce.d.*) + } >ram :RAM + + HIDDEN (_gp = ALIGN(16) + 0x7ff0); + PROVIDE_HIDDEN (gp = _gp); + .got : ALIGN(4) { + *(.got.plt) *(.igot.plt) *(.got) *(.igot) + } >ram :RAM + + .sdata : ALIGN(4) { + *(.sdata .sdata.* .gnu.linkonce.s.*) + } >ram :RAM + + .bss : ALIGN(4) { + __bss_start = ABSOLUTE(.); + *(.sbss .sbss.* .gnu.linkonce.sb.*) + *(.scommon) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(4); + __bss_end = ABSOLUTE(.); + } >ram :RAM + + __stack = ORIGIN(ram) + LENGTH(ram); +} diff --git a/tests/tcg/nios2/Makefile.softmmu-target b/tests/tcg/nios2/Makefile.softmmu-target new file mode 100644 index 0000000000..bc7fd55060 --- /dev/null +++ b/tests/tcg/nios2/Makefile.softmmu-target @@ -0,0 +1,32 @@ +# +# Nios2 system tests +# +# Copyright Linaro Ltd 2022 +# SPDX-License-Identifier: GPL-2.0-or-later +# + +NIOS2_SYSTEM_SRC = $(SRC_PATH)/tests/tcg/nios2 +VPATH += $(NIOS2_SYSTEM_SRC) + +# These objects provide the basic boot code and helper functions for all tests +CRT_OBJS = boot.o intr.o $(MINILIB_OBJS) +LINK_SCRIPT = $(NIOS2_SYSTEM_SRC)/10m50-ghrd.ld + +CFLAGS += -nostdlib -g -O0 $(MINILIB_INC) +LDFLAGS += -Wl,-T$(LINK_SCRIPT) -static -nostdlib $(CRT_OBJS) -lgcc + +%.o: %.S + $(call quiet-command, $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@, AS, $@) + +%.o: %.c + $(call quiet-command, $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $@, CC, $@) + +# Build and link the tests +%: %.o $(LINK_SCRIPT) $(CRT_OBJS) + $(call quiet-command, $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS), LD, $@) + +QEMU_OPTS = -M 10m50-ghrd,vic=on -semihosting-config enable=on,target=native,chardev=output -kernel + +memory: CFLAGS+=-DCHECK_UNALIGNED=0 +TESTS += $(MULTIARCH_TESTS) +TESTS += test-shadow-1 diff --git a/tests/tcg/nios2/boot.S b/tests/tcg/nios2/boot.S new file mode 100644 index 0000000000..f6771cbc81 --- /dev/null +++ b/tests/tcg/nios2/boot.S @@ -0,0 +1,218 @@ +/* + * Minimal Nios2 system boot code. + * + * Copyright Linaro Ltd 2022 + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "semicall.h" + + .text + .set noat + +_start: + /* Linker script defines stack at end of ram. */ + movia sp, __stack + + /* Install trampoline to _fast_tlb_miss at hardcoded vector. */ + movia r4, 0xc0000100 + movia r5, _ftm_tramp + movi r6, .L__ftm_end - _ftm_tramp + call memcpy + + /* Zero the bss to satisfy C. */ + movia r4, __bss_start + movia r6, __bss_end + sub r6, r6, r4 + movi r5, 0 + call memset + + /* Test! */ + call main + + /* Exit with main's return value. */ + movi r4, HOSTED_EXIT + mov r5, r2 + semihosting_call + + .globl _start + .type _start, @function + .size _start, . - _start + +_ftm_tramp: + movia et, _fast_tlb_miss + jmp et +.L__ftm_end: + + .type _ftm_tramp, @function + .size _ftm_tramp, . - _ftm_tramp + +#define dst r4 +#define src r5 +#define len r6 + +memcpy: + /* Store return value right away, per API */ + mov r2, dst + + /* Check for both dst and src aligned. */ + or at, dst, src + andi at, at, 3 + bne at, zero, .L_mc_test1 + + /* Copy blocks of 8. */ + + movi at, 8 + bltu len, at, .L_mc_test4 + +.L_mc_loop8: + ldw r8, 0(src) + ldw r9, 4(src) + addi src, src, 8 + addi dst, dst, 8 + subi len, len, 8 + stw r8, -8(dst) + stw r9, -4(dst) + bgeu len, at, .L_mc_loop8 + + /* Copy final aligned block of 4. */ + +.L_mc_test4: + movi at, 4 + bltu len, at, .L_mc_test1 + + ldw r8, 0(src) + addi src, src, 4 + addi dst, dst, 4 + subi len, len, 4 + stw r8, -4(dst) + + /* Copy single bytes to finish. */ + +.L_mc_test1: + beq len, zero, .L_mc_done + +.L_mc_loop1: + ldb r8, 0(src) + addi src, src, 1 + addi dst, dst, 1 + subi len, len, 1 + stb r8, -1(dst) + bne len, zero, .L_mc_loop1 + +.L_mc_done: + ret + +#undef dst +#undef src +#undef len + + .global memcpy + .type memcpy, @function + .size memcpy, . - memcpy + +#define dst r4 +#define val r5 +#define len r6 + +memset: + /* Store return value right away, per API */ + mov r2, dst + + /* Check for small blocks; fall back to bytewise. */ + movi r3, 8 + bltu len, r3, .L_ms_test1 + + /* Replicate the byte across the word. */ + andi val, val, 0xff + slli at, val, 8 + or val, val, at + slli at, val, 16 + or val, val, at + + /* Check for destination alignment; realign if needed. */ + andi at, dst, 3 + bne at, zero, .L_ms_align + + /* Set blocks of 8. */ + +.L_ms_loop8: + stw val, 0(dst) + stw val, 4(dst) + addi dst, dst, 8 + subi len, len, 8 + bgeu len, r3, .L_ms_loop8 + + /* Set final aligned block of 4. */ + +.L_ms_test4: + movi at, 4 + bltu len, at, .L_ms_test1 + + stw r8, 0(dst) + addi dst, dst, 4 + subi len, len, 4 + stw r8, -4(dst) + + /* Set single bytes to finish. */ + +.L_ms_test1: + beq len, zero, .L_ms_done + +.L_ms_loop1: + stb r8, 0(dst) + addi dst, dst, 1 + subi len, len, 1 + bne len, zero, .L_ms_loop1 + +.L_ms_done: + ret + + /* Realign for a large block, len >= 8. */ +.L_ms_align: + andi at, dst, 1 + beq at, zero, 2f + + stb val, 0(dst) + addi dst, dst, 1 + subi len, len, 1 + +2: andi at, dst, 2 + beq at, zero, 4f + + sth val, 0(dst) + addi dst, dst, 2 + subi len, len, 2 + +4: bgeu len, r3, .L_ms_loop8 + br .L_ms_test4 + +#undef dst +#undef val +#undef len + + .global memset + .type memset, @function + .size memset, . - memset + +/* + * void __sys_outc(char c); + */ +__sys_outc: + subi sp, sp, 16 + stb r4, 0(sp) /* buffer[0] = c */ + movi at, 1 + stw at, 4(sp) /* STDOUT_FILENO */ + stw sp, 8(sp) /* buffer */ + stw at, 12(sp) /* len */ + + movi r4, HOSTED_WRITE + addi r5, sp, 4 + semihosting_call + + addi sp, sp, 16 + ret + + .global __sys_outc + .type __sys_outc, @function + .size __sys_outc, . - __sys_outc diff --git a/tests/tcg/nios2/intr.S b/tests/tcg/nios2/intr.S new file mode 100644 index 0000000000..c1730692ba --- /dev/null +++ b/tests/tcg/nios2/intr.S @@ -0,0 +1,31 @@ +/* + * Minimal Nios2 system boot code -- exit on interrupt. + * + * Copyright Linaro Ltd 2022 + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "semicall.h" + + .section .text.intr, "ax" + .global _interrupt + .type _interrupt, @function + +_interrupt: + rdctl r5, exception /* extract exception.CAUSE */ + srli r5, r5, 2 + movi r4, HOSTED_EXIT + semihosting_call + + .size _interrupt, . - _interrupt + + .text + .global _fast_tlb_miss + .type _fast_tlb_miss, @function + +_fast_tlb_miss: + movi r5, 32 + movi r4, HOSTED_EXIT + semihosting_call + + .size _fast_tlb_miss, . - _fast_tlb_miss diff --git a/tests/tcg/nios2/semicall.h b/tests/tcg/nios2/semicall.h new file mode 100644 index 0000000000..6ad4978099 --- /dev/null +++ b/tests/tcg/nios2/semicall.h @@ -0,0 +1,28 @@ +/* + * Nios2 semihosting interface. + * + * Copyright Linaro Ltd 2022 + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SEMICALL_H +#define SEMICALL_H + +#define HOSTED_EXIT 0 +#define HOSTED_INIT_SIM 1 +#define HOSTED_OPEN 2 +#define HOSTED_CLOSE 3 +#define HOSTED_READ 4 +#define HOSTED_WRITE 5 +#define HOSTED_LSEEK 6 +#define HOSTED_RENAME 7 +#define HOSTED_UNLINK 8 +#define HOSTED_STAT 9 +#define HOSTED_FSTAT 10 +#define HOSTED_GETTIMEOFDAY 11 +#define HOSTED_ISATTY 12 +#define HOSTED_SYSTEM 13 + +#define semihosting_call break 1 + +#endif /* SEMICALL_H */ diff --git a/tests/tcg/nios2/test-shadow-1.S b/tests/tcg/nios2/test-shadow-1.S new file mode 100644 index 0000000000..79ef69db12 --- /dev/null +++ b/tests/tcg/nios2/test-shadow-1.S @@ -0,0 +1,40 @@ +/* + * Regression test for TCG indirect global lowering. + * + * Copyright Linaro Ltd 2022 + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "semicall.h" + + .text + .set noat + .align 2 + .globl main + .type main, @function + +main: + /* Initialize r0 in shadow register set 1. */ + movhi at, 1 /* PRS=1, CRS=0, RSIE=0, PIE=0 */ + wrctl status, at + wrprs zero, zero + + /* Change current register set to 1. */ + movi at, 1 << 10 /* PRS=0, CRS=1, RSIE=0, PIE=0 */ + wrctl estatus, at + movia ea, 1f + eret + + /* Load address for callr, then end TB. */ +1: movia at, 3f + br 2f + + /* Test case! TCG abort on indirect lowering across brcond. */ +2: callr at + + /* exit(0) */ +3: movi r4, HOSTED_EXIT + movi r5, 0 + semihosting_call + + .size main, . - main diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index a6a4ddaeca..f081f1c683 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -3,24 +3,37 @@ # ppc64 specific tweaks VPATH += $(SRC_PATH)/tests/tcg/ppc64 -VPATH += $(SRC_PATH)/tests/tcg/ppc64le -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER8_VECTOR),) -PPC64_TESTS=bcdsub +config-cc.mak: Makefile + $(quiet-@)( \ + $(call cc-option,-mpower8-vector, CROSS_CC_HAS_POWER8_VECTOR); \ + $(call cc-option,-mpower10, CROSS_CC_HAS_POWER10)) 3> config-cc.mak + +-include config-cc.mak + +ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),) +PPC64_TESTS=bcdsub non_signalling_xscv endif -bcdsub: CFLAGS += -mpower8-vector +$(PPC64_TESTS): CFLAGS += -mpower8-vector -PPC64_TESTS += byte_reverse -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER10),) +PPC64_TESTS += mtfsf +PPC64_TESTS += mffsce + +ifneq ($(CROSS_CC_HAS_POWER10),) +PPC64_TESTS += byte_reverse sha512-vector +endif +byte_reverse: CFLAGS += -mcpu=power10 run-byte_reverse: QEMU_OPTS+=-cpu POWER10 run-plugin-byte_reverse-with-%: QEMU_OPTS+=-cpu POWER10 -else -byte_reverse: - $(call skip-test, "BUILD of $@", "missing compiler support") -run-byte_reverse: - $(call skip-test, "RUN of byte_reverse", "not built") -run-plugin-byte_reverse-with-%: - $(call skip-test, "RUN of byte_reverse ($*)", "not built") -endif + +sha512-vector: CFLAGS +=-mcpu=power10 -O3 +sha512-vector: sha512.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +run-sha512-vector: QEMU_OPTS+=-cpu POWER10 +run-plugin-sha512-vector-with-%: QEMU_OPTS+=-cpu POWER10 + +PPC64_TESTS += signal_save_restore_xer +PPC64_TESTS += xxspltw TESTS += $(PPC64_TESTS) diff --git a/tests/tcg/ppc64/bcdsub.c b/tests/tcg/ppc64/bcdsub.c new file mode 100644 index 0000000000..87c8c44a44 --- /dev/null +++ b/tests/tcg/ppc64/bcdsub.c @@ -0,0 +1,134 @@ +#include +#include +#include +#include + +#define CRF_LT (1 << 3) +#define CRF_GT (1 << 2) +#define CRF_EQ (1 << 1) +#define CRF_SO (1 << 0) +#define UNDEF 0 + +#ifdef __has_builtin +#if !__has_builtin(__builtin_bcdsub) +#define NO_BUILTIN_BCDSUB +#endif +#endif + +#ifdef NO_BUILTIN_BCDSUB +#define BCDSUB(T, A, B, PS) \ + ".long 4 << 26 | (" #T ") << 21 | (" #A ") << 16 | (" #B ") << 11" \ + " | 1 << 10 | (" #PS ") << 9 | 65\n\t" +#else +#define BCDSUB(T, A, B, PS) "bcdsub. " #T ", " #A ", " #B ", " #PS "\n\t" +#endif + +#define TEST(AH, AL, BH, BL, PS, TH, TL, CR6) \ + do { \ + int cr = 0; \ + uint64_t th, tl; \ + /* \ + * Use GPR pairs to load the VSR values and place the resulting VSR and\ + * CR6 in th, tl, and cr. Note that we avoid newer instructions (e.g., \ + * mtvsrdd/mfvsrld) so we can run this test on POWER8 machines. \ + */ \ + asm ("mtvsrd 32, %3\n\t" \ + "mtvsrd 33, %4\n\t" \ + "xxmrghd 32, 32, 33\n\t" \ + "mtvsrd 33, %5\n\t" \ + "mtvsrd 34, %6\n\t" \ + "xxmrghd 33, 33, 34\n\t" \ + BCDSUB(0, 0, 1, PS) \ + "mfocrf %0, 0b10\n\t" \ + "mfvsrd %1, 32\n\t" \ + "xxswapd 32, 32\n\t" \ + "mfvsrd %2, 32\n\t" \ + : "=r" (cr), "=r" (th), "=r" (tl) \ + : "r" (AH), "r" (AL), "r" (BH), "r" (BL) \ + : "v0", "v1", "v2"); \ + if (TH != UNDEF || TL != UNDEF) { \ + assert(tl == TL); \ + assert(th == TH); \ + } \ + assert((cr >> 4) == CR6); \ + } while (0) + +/* + * Unbounded result is equal to zero: + * sign = (PS) ? 0b1111 : 0b1100 + * CR6 = 0b0010 + */ +void test_bcdsub_eq(void) +{ + /* maximum positive BCD value */ + TEST(0x9999999999999999, 0x999999999999999c, + 0x9999999999999999, 0x999999999999999c, + 0, 0x0, 0xc, CRF_EQ); + TEST(0x9999999999999999, 0x999999999999999c, + 0x9999999999999999, 0x999999999999999c, + 1, 0x0, 0xf, CRF_EQ); +} + +/* + * Unbounded result is greater than zero: + * sign = (PS) ? 0b1111 : 0b1100 + * CR6 = (overflow) ? 0b0101 : 0b0100 + */ +void test_bcdsub_gt(void) +{ + /* maximum positive and negative one BCD values */ + TEST(0x9999999999999999, 0x999999999999999c, 0x0, 0x1d, 0, + 0x0, 0xc, (CRF_GT | CRF_SO)); + TEST(0x9999999999999999, 0x999999999999999c, 0x0, 0x1d, 1, + 0x0, 0xf, (CRF_GT | CRF_SO)); + + TEST(0x9999999999999999, 0x999999999999998c, 0x0, 0x1d, 0, + 0x9999999999999999, 0x999999999999999c, CRF_GT); + TEST(0x9999999999999999, 0x999999999999998c, 0x0, 0x1d, 1, + 0x9999999999999999, 0x999999999999999f, CRF_GT); +} + +/* + * Unbounded result is less than zero: + * sign = 0b1101 + * CR6 = (overflow) ? 0b1001 : 0b1000 + */ +void test_bcdsub_lt(void) +{ + /* positive zero and positive one BCD values */ + TEST(0x0, 0xc, 0x0, 0x1c, 0, 0x0, 0x1d, CRF_LT); + TEST(0x0, 0xc, 0x0, 0x1c, 1, 0x0, 0x1d, CRF_LT); + + /* maximum negative and positive one BCD values */ + TEST(0x9999999999999999, 0x999999999999999d, 0x0, 0x1c, 0, + 0x0, 0xd, (CRF_LT | CRF_SO)); + TEST(0x9999999999999999, 0x999999999999999d, 0x0, 0x1c, 1, + 0x0, 0xd, (CRF_LT | CRF_SO)); +} + +void test_bcdsub_invalid(void) +{ + TEST(0x0, 0x1c, 0x0, 0xf00, 0, UNDEF, UNDEF, CRF_SO); + TEST(0x0, 0x1c, 0x0, 0xf00, 1, UNDEF, UNDEF, CRF_SO); + + TEST(0x0, 0xf00, 0x0, 0x1c, 0, UNDEF, UNDEF, CRF_SO); + TEST(0x0, 0xf00, 0x0, 0x1c, 1, UNDEF, UNDEF, CRF_SO); + + TEST(0x0, 0xbad, 0x0, 0xf00, 0, UNDEF, UNDEF, CRF_SO); + TEST(0x0, 0xbad, 0x0, 0xf00, 1, UNDEF, UNDEF, CRF_SO); +} + +int main(void) +{ + struct sigaction action; + + action.sa_handler = _exit; + sigaction(SIGABRT, &action, NULL); + + test_bcdsub_eq(); + test_bcdsub_gt(); + test_bcdsub_lt(); + test_bcdsub_invalid(); + + return 0; +} diff --git a/tests/tcg/ppc64le/byte_reverse.c b/tests/tcg/ppc64/byte_reverse.c similarity index 100% rename from tests/tcg/ppc64le/byte_reverse.c rename to tests/tcg/ppc64/byte_reverse.c diff --git a/tests/tcg/ppc64/mffsce.c b/tests/tcg/ppc64/mffsce.c new file mode 100644 index 0000000000..20d882cb45 --- /dev/null +++ b/tests/tcg/ppc64/mffsce.c @@ -0,0 +1,37 @@ +#include +#include +#include + +#define MTFSF(FLM, FRB) asm volatile ("mtfsf %0, %1" :: "i" (FLM), "f" (FRB)) +#define MFFS(FRT) asm("mffs %0" : "=f" (FRT)) +#define MFFSCE(FRT) asm("mffsce %0" : "=f" (FRT)) + +#define PPC_BIT_NR(nr) (63 - (nr)) + +#define FP_VE (1ull << PPC_BIT_NR(56)) +#define FP_UE (1ull << PPC_BIT_NR(58)) +#define FP_ZE (1ull << PPC_BIT_NR(59)) +#define FP_XE (1ull << PPC_BIT_NR(60)) +#define FP_NI (1ull << PPC_BIT_NR(61)) +#define FP_RN1 (1ull << PPC_BIT_NR(63)) + +int main(void) +{ + uint64_t frt, fpscr; + uint64_t test_value = FP_VE | FP_UE | FP_ZE | + FP_XE | FP_NI | FP_RN1; + MTFSF(0b11111111, test_value); /* set test value to cpu fpscr */ + MFFSCE(frt); + MFFS(fpscr); /* read the value that mffsce stored to cpu fpscr */ + + /* the returned value should be as the cpu fpscr was before */ + assert((frt & 0xff) == test_value); + + /* + * the cpu fpscr last 3 bits should be unchanged + * and enable bits should be unset + */ + assert((fpscr & 0xff) == (test_value & 0x7)); + + return 0; +} diff --git a/tests/tcg/ppc64/mtfsf.c b/tests/tcg/ppc64/mtfsf.c new file mode 100644 index 0000000000..bed5b1afa4 --- /dev/null +++ b/tests/tcg/ppc64/mtfsf.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include + +#define MTFSF(FLM, FRB) asm volatile ("mtfsf %0, %1" :: "i" (FLM), "f" (FRB)) +#define MFFS(FRT) asm("mffs %0" : "=f" (FRT)) + +#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */ +#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */ +#define FPSCR_FI 17 /* Floating-point fraction inexact */ + +#define FP_VE (1ull << FPSCR_VE) +#define FP_VXSOFT (1ull << FPSCR_VXSOFT) +#define FP_FI (1ull << FPSCR_FI) + +void sigfpe_handler(int sig, siginfo_t *si, void *ucontext) +{ + if (si->si_code == FPE_FLTINV) { + exit(0); + } + exit(1); +} + +int main(void) +{ + uint64_t fpscr; + + struct sigaction sa = { + .sa_sigaction = sigfpe_handler, + .sa_flags = SA_SIGINFO + }; + + /* + * Enable the MSR bits F0 and F1 to enable exceptions. + * This shouldn't be needed in linux-user as these bits are enabled by + * default, but this allows to execute either in a VM or a real machine + * to compare the behaviors. + */ + prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE); + + /* First test if the FI bit is being set correctly */ + MTFSF(0b11111111, FP_FI); + MFFS(fpscr); + assert((fpscr & FP_FI) != 0); + + /* Then test if the deferred exception is being called correctly */ + sigaction(SIGFPE, &sa, NULL); + + /* + * Although the VXSOFT exception has been chosen, based on test in a Power9 + * any combination of exception bit + its enabling bit should work. + * But if a different exception is chosen si_code check should + * change accordingly. + */ + MTFSF(0b11111111, FP_VE | FP_VXSOFT); + + return 1; +} diff --git a/tests/tcg/ppc64/non_signalling_xscv.c b/tests/tcg/ppc64/non_signalling_xscv.c new file mode 100644 index 0000000000..836df71ef0 --- /dev/null +++ b/tests/tcg/ppc64/non_signalling_xscv.c @@ -0,0 +1,37 @@ +#include +#include +#include +#include + +#define TEST(INSN, B_HI, B_LO, T_HI, T_LO) \ + do { \ + uint64_t th, tl, bh = B_HI, bl = B_LO; \ + asm("mtvsrd 32, %2\n\t" \ + "mtvsrd 33, %3\n\t" \ + "xxmrghd 32, 32, 33\n\t" \ + INSN " 32, 32\n\t" \ + "mfvsrd %0, 32\n\t" \ + "xxswapd 32, 32\n\t" \ + "mfvsrd %1, 32\n\t" \ + : "=r" (th), "=r" (tl) \ + : "r" (bh), "r" (bl) \ + : "v0", "v1"); \ + printf(INSN "(0x%016" PRIx64 "%016" PRIx64 ") = 0x%016" PRIx64 \ + "%016" PRIx64 "\n", bh, bl, th, tl); \ + assert(th == T_HI && tl == T_LO); \ + } while (0) + +int main(void) +{ + /* SNaN shouldn't be silenced */ + TEST("xscvspdpn", 0x7fbfffff00000000ULL, 0x0, 0x7ff7ffffe0000000ULL, 0x0); + TEST("xscvdpspn", 0x7ff7ffffffffffffULL, 0x0, 0x7fbfffff7fbfffffULL, 0x0); + + /* + * SNaN inputs having no significant bits in the upper 23 bits of the + * signifcand will return Infinity as the result. + */ + TEST("xscvdpspn", 0x7ff000001fffffffULL, 0x0, 0x7f8000007f800000ULL, 0x0); + + return 0; +} diff --git a/tests/tcg/ppc64/signal_save_restore_xer.c b/tests/tcg/ppc64/signal_save_restore_xer.c new file mode 100644 index 0000000000..9227f4f455 --- /dev/null +++ b/tests/tcg/ppc64/signal_save_restore_xer.c @@ -0,0 +1,42 @@ +#include +#include +#include +#include + +#define XER_SO (1 << 31) +#define XER_OV (1 << 30) +#define XER_CA (1 << 29) +#define XER_OV32 (1 << 19) +#define XER_CA32 (1 << 18) + +uint64_t saved; + +void sigtrap_handler(int sig, siginfo_t *si, void *ucontext) +{ + ucontext_t *uc = ucontext; + uc->uc_mcontext.regs->nip += 4; + saved = uc->uc_mcontext.regs->xer; + uc->uc_mcontext.regs->xer |= XER_OV | XER_OV32; +} + +int main(void) +{ + uint64_t initial = XER_CA | XER_CA32, restored; + struct sigaction sa = { + .sa_sigaction = sigtrap_handler, + .sa_flags = SA_SIGINFO + }; + + sigaction(SIGTRAP, &sa, NULL); + + asm("mtspr 1, %1\n\t" + "trap\n\t" + "mfspr %0, 1\n\t" + : "=r" (restored) + : "r" (initial)); + + assert(saved == initial); + assert(restored == (XER_OV | XER_OV32 | XER_CA | XER_CA32)); + + return 0; +} diff --git a/tests/tcg/ppc64/xxspltw.c b/tests/tcg/ppc64/xxspltw.c new file mode 100644 index 0000000000..4cff78bfdc --- /dev/null +++ b/tests/tcg/ppc64/xxspltw.c @@ -0,0 +1,46 @@ +#include +#include +#include +#include + +#define WORD_A 0xAAAAAAAAUL +#define WORD_B 0xBBBBBBBBUL +#define WORD_C 0xCCCCCCCCUL +#define WORD_D 0xDDDDDDDDUL + +#define DWORD_HI (WORD_A << 32 | WORD_B) +#define DWORD_LO (WORD_C << 32 | WORD_D) + +#define TEST(HI, LO, UIM, RES) \ + do { \ + union { \ + uint64_t u; \ + double f; \ + } h = { .u = HI }, l = { .u = LO }; \ + /* \ + * Use a pair of FPRs to load the VSR avoiding insns \ + * newer than xxswapd. \ + */ \ + asm("xxmrghd 32, %0, %1\n\t" \ + "xxspltw 32, 32, %2\n\t" \ + "xxmrghd %0, 32, %0\n\t" \ + "xxswapd 32, 32\n\t" \ + "xxmrghd %1, 32, %1\n\t" \ + : "+f" (h.f), "+f" (l.f) \ + : "i" (UIM) \ + : "v0"); \ + printf("xxspltw(0x%016" PRIx64 "%016" PRIx64 ", %d) =" \ + " %016" PRIx64 "%016" PRIx64 "\n", HI, LO, UIM, \ + h.u, l.u); \ + assert(h.u == (RES)); \ + assert(l.u == (RES)); \ + } while (0) + +int main(void) +{ + TEST(DWORD_HI, DWORD_LO, 0, WORD_A << 32 | WORD_A); + TEST(DWORD_HI, DWORD_LO, 1, WORD_B << 32 | WORD_B); + TEST(DWORD_HI, DWORD_LO, 2, WORD_C << 32 | WORD_C); + TEST(DWORD_HI, DWORD_LO, 3, WORD_D << 32 | WORD_D); + return 0; +} diff --git a/tests/tcg/ppc64le/Makefile.target b/tests/tcg/ppc64le/Makefile.target index c0c14ffbad..daad5118a5 100644 --- a/tests/tcg/ppc64le/Makefile.target +++ b/tests/tcg/ppc64le/Makefile.target @@ -4,22 +4,4 @@ VPATH += $(SRC_PATH)/tests/tcg/ppc64le -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER8_VECTOR),) -PPC64LE_TESTS=bcdsub -endif -bcdsub: CFLAGS += -mpower8-vector - -PPC64LE_TESTS += byte_reverse -ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER10),) -run-byte_reverse: QEMU_OPTS+=-cpu POWER10 -run-plugin-byte_reverse-with-%: QEMU_OPTS+=-cpu POWER10 -else -byte_reverse: - $(call skip-test, "BUILD of $@", "missing compiler support") -run-byte_reverse: - $(call skip-test, "RUN of byte_reverse", "not built") -run-plugin-byte_reverse-with-%: - $(call skip-test, "RUN of byte_reverse ($*)", "not built") -endif - -TESTS += $(PPC64LE_TESTS) +include $(SRC_PATH)/tests/tcg/ppc64/Makefile.target diff --git a/tests/tcg/ppc64le/bcdsub.c b/tests/tcg/ppc64le/bcdsub.c deleted file mode 100644 index 8c188cae6d..0000000000 --- a/tests/tcg/ppc64le/bcdsub.c +++ /dev/null @@ -1,130 +0,0 @@ -#include -#include -#include - -#define CRF_LT (1 << 3) -#define CRF_GT (1 << 2) -#define CRF_EQ (1 << 1) -#define CRF_SO (1 << 0) -#define UNDEF 0 - -#define BCDSUB(vra, vrb, ps) \ - asm ("bcdsub. %1,%2,%3,%4;" \ - "mfocrf %0,0b10;" \ - : "=r" (cr), "=v" (vrt) \ - : "v" (vra), "v" (vrb), "i" (ps) \ - : ); - -#define TEST(vra, vrb, ps, exp_res, exp_cr6) \ - do { \ - __int128 vrt = 0; \ - int cr = 0; \ - BCDSUB(vra, vrb, ps); \ - if (exp_res) \ - assert(vrt == exp_res); \ - assert((cr >> 4) == exp_cr6); \ - } while (0) - - -/* - * Unbounded result is equal to zero: - * sign = (PS) ? 0b1111 : 0b1100 - * CR6 = 0b0010 - */ -void test_bcdsub_eq(void) -{ - __int128 a, b; - - /* maximum positive BCD value */ - a = b = (((__int128) 0x9999999999999999) << 64 | 0x999999999999999c); - - TEST(a, b, 0, 0xc, CRF_EQ); - TEST(a, b, 1, 0xf, CRF_EQ); -} - -/* - * Unbounded result is greater than zero: - * sign = (PS) ? 0b1111 : 0b1100 - * CR6 = (overflow) ? 0b0101 : 0b0100 - */ -void test_bcdsub_gt(void) -{ - __int128 a, b, c; - - /* maximum positive BCD value */ - a = (((__int128) 0x9999999999999999) << 64 | 0x999999999999999c); - - /* negative one BCD value */ - b = (__int128) 0x1d; - - TEST(a, b, 0, 0xc, (CRF_GT | CRF_SO)); - TEST(a, b, 1, 0xf, (CRF_GT | CRF_SO)); - - c = (((__int128) 0x9999999999999999) << 64 | 0x999999999999998c); - - TEST(c, b, 0, a, CRF_GT); - TEST(c, b, 1, (a | 0x3), CRF_GT); -} - -/* - * Unbounded result is less than zero: - * sign = 0b1101 - * CR6 = (overflow) ? 0b1001 : 0b1000 - */ -void test_bcdsub_lt(void) -{ - __int128 a, b; - - /* positive zero BCD value */ - a = (__int128) 0xc; - - /* positive one BCD value */ - b = (__int128) 0x1c; - - TEST(a, b, 0, 0x1d, CRF_LT); - TEST(a, b, 1, 0x1d, CRF_LT); - - /* maximum negative BCD value */ - a = (((__int128) 0x9999999999999999) << 64 | 0x999999999999999d); - - /* positive one BCD value */ - b = (__int128) 0x1c; - - TEST(a, b, 0, 0xd, (CRF_LT | CRF_SO)); - TEST(a, b, 1, 0xd, (CRF_LT | CRF_SO)); -} - -void test_bcdsub_invalid(void) -{ - __int128 a, b; - - /* positive one BCD value */ - a = (__int128) 0x1c; - b = 0xf00; - - TEST(a, b, 0, UNDEF, CRF_SO); - TEST(a, b, 1, UNDEF, CRF_SO); - - TEST(b, a, 0, UNDEF, CRF_SO); - TEST(b, a, 1, UNDEF, CRF_SO); - - a = 0xbad; - - TEST(a, b, 0, UNDEF, CRF_SO); - TEST(a, b, 1, UNDEF, CRF_SO); -} - -int main(void) -{ - struct sigaction action; - - action.sa_handler = _exit; - sigaction(SIGABRT, &action, NULL); - - test_bcdsub_eq(); - test_bcdsub_gt(); - test_bcdsub_lt(); - test_bcdsub_invalid(); - - return 0; -} diff --git a/tests/tcg/ppc64le/float_convs.ref b/tests/tcg/ppc64le/float_convs.ref new file mode 100644 index 0000000000..6e6f636834 --- /dev/null +++ b/tests/tcg/ppc64le/float_convs.ref @@ -0,0 +1,748 @@ +### Rounding to nearest +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ff4000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/ppc64le/float_madds.ref b/tests/tcg/ppc64le/float_madds.ref new file mode 100644 index 0000000000..e66917cb80 --- /dev/null +++ b/tests/tcg/ppc64le/float_madds.ref @@ -0,0 +1,768 @@ +### Rounding to nearest +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(nan:0x7fc00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding upwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe800000000000000p-25:0x337ffff4) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe800000000000000p-50:0x26fffff4) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000200000000000000p-25:0x33000001) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00080000000000000000p-25:0x33000400) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f400000000000000p-24:0x338000fa) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000e00000000000000p-14:0x38800007) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf600000000000000p-24:0x3387fdfb) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000200000000000000p+0:0x3f800001) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d400000000000000p+2:0x409711ea) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458200000000000000p+3:0x4128a2c1) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0600000000000000p+3:0x41100603) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1600000000000000p+15:0x477fe78b) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56200000000000000p+17:0x482de2b1) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0a00000000000000p+31:0x4f7fbf05) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+31:0x4f7fc005) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+32:0x4fffc005) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8a00000000000000p+33:0x507fbfc5) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800a00000000000000p+33:0x507fc005) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab800000000000000p+99:0x71605d5c) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c082a000000000000000p+116:0x79e04150) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(nan:0x7fc00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-148:0x00000002) flags=UNDERFLOW INEXACT (32/0) +### Rounding downwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x1.00000000000000000000p-149:0x80000001) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(nan:0x7fc00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding to zero +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(nan:0x7fc00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffc00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) diff --git a/tests/tcg/riscv64/Makefile.softmmu-target b/tests/tcg/riscv64/Makefile.softmmu-target new file mode 100644 index 0000000000..e22cdb34c5 --- /dev/null +++ b/tests/tcg/riscv64/Makefile.softmmu-target @@ -0,0 +1,21 @@ +# +# RISC-V system tests +# + +TEST_SRC = $(SRC_PATH)/tests/tcg/riscv64 +VPATH += $(TEST_SRC) + +LINK_SCRIPT = $(TEST_SRC)/semihost.ld +LDFLAGS = -T $(LINK_SCRIPT) +CFLAGS += -g -Og + +%.o: %.S + $(CC) $(CFLAGS) $< -c -o $@ +%: %.o $(LINK_SCRIPT) + $(LD) $(LDFLAGS) $< -o $@ + +QEMU_OPTS += -M virt -display none -semihosting -device loader,file= + +EXTRA_RUNS += run-issue1060 +run-issue1060: issue1060 + $(call run-test, $<, $(QEMU) $(QEMU_OPTS)$<) diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target new file mode 100644 index 0000000000..cc3ed65ffd --- /dev/null +++ b/tests/tcg/riscv64/Makefile.target @@ -0,0 +1,12 @@ +# -*- Mode: makefile -*- +# RISC-V specific tweaks + +VPATH += $(SRC_PATH)/tests/tcg/riscv64 +TESTS += test-div +TESTS += noexec + +# Disable compressed instructions for test-noc +TESTS += test-noc +test-noc: LDFLAGS = -nostdlib -static +run-test-noc: QEMU_OPTS += -cpu rv64,c=false +run-plugin-test-noc-%: QEMU_OPTS += -cpu rv64,c=false diff --git a/tests/tcg/riscv64/issue1060.S b/tests/tcg/riscv64/issue1060.S new file mode 100644 index 0000000000..17b7fe1be2 --- /dev/null +++ b/tests/tcg/riscv64/issue1060.S @@ -0,0 +1,53 @@ + .option norvc + + .text + .global _start +_start: + lla t0, trap + csrw mtvec, t0 + + # These are all illegal instructions + csrw time, x0 + .insn i CUSTOM_0, 0, x0, x0, 0x321 + csrw time, x0 + .insn i CUSTOM_0, 0, x0, x0, 0x123 + csrw cycle, x0 + + # Success! + li a0, 0 + j _exit + +trap: + # When an instruction traps, compare it to the insn in memory. + csrr t0, mepc + csrr t1, mtval + lwu t2, 0(t0) + bne t1, t2, fail + + # Skip the insn and continue. + addi t0, t0, 4 + csrw mepc, t0 + mret + +fail: + li a0, 1 + +# Exit code in a0 +_exit: + lla a1, semiargs + li t0, 0x20026 # ADP_Stopped_ApplicationExit + sd t0, 0(a1) + sd a0, 8(a1) + li a0, 0x20 # TARGET_SYS_EXIT_EXTENDED + + # Semihosting call sequence + .balign 16 + slli zero, zero, 0x1f + ebreak + srai zero, zero, 0x7 + j . + + .data + .balign 16 +semiargs: + .space 16 diff --git a/tests/tcg/riscv64/noexec.c b/tests/tcg/riscv64/noexec.c new file mode 100644 index 0000000000..86f64b28db --- /dev/null +++ b/tests/tcg/riscv64/noexec.c @@ -0,0 +1,79 @@ +#include "../multiarch/noexec.c.inc" + +static void *arch_mcontext_pc(const mcontext_t *ctx) +{ + return (void *)ctx->__gregs[REG_PC]; +} + +static int arch_mcontext_arg(const mcontext_t *ctx) +{ + return ctx->__gregs[REG_A0]; +} + +static void arch_flush(void *p, int len) +{ + __builtin___clear_cache(p, p + len); +} + +extern char noexec_1[]; +extern char noexec_2[]; +extern char noexec_end[]; + +asm(".option push\n" + ".option norvc\n" + "noexec_1:\n" + " li a0,1\n" /* a0 is 0 on entry, set 1. */ + "noexec_2:\n" + " li a0,2\n" /* a0 is 0/1; set 2. */ + " ret\n" + "noexec_end:\n" + ".option pop"); + +int main(void) +{ + struct noexec_test noexec_tests[] = { + { + .name = "fallthrough", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = noexec_1 - noexec_2, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 1, + }, + { + .name = "jump", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = 0, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 0, + }, + { + .name = "fallthrough [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = noexec_1 - noexec_2 - 2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 1, + }, + { + .name = "jump [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = -2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 0, + }, + }; + + return test_noexec(noexec_tests, + sizeof(noexec_tests) / sizeof(noexec_tests[0])); +} diff --git a/tests/tcg/riscv64/semihost.ld b/tests/tcg/riscv64/semihost.ld new file mode 100644 index 0000000000..a59cc56b28 --- /dev/null +++ b/tests/tcg/riscv64/semihost.ld @@ -0,0 +1,21 @@ +ENTRY(_start) + +SECTIONS +{ + /* virt machine, RAM starts at 2gb */ + . = 0x80000000; + .text : { + *(.text) + } + .rodata : { + *(.rodata) + } + /* align r/w section to next 2mb */ + . = ALIGN(1 << 21); + .data : { + *(.data) + } + .bss : { + *(.bss) + } +} diff --git a/tests/tcg/riscv64/test-div.c b/tests/tcg/riscv64/test-div.c new file mode 100644 index 0000000000..a90480be3f --- /dev/null +++ b/tests/tcg/riscv64/test-div.c @@ -0,0 +1,58 @@ +#include +#include + +struct TestS { + long x, y, q, r; +}; + +static struct TestS test_s[] = { + { 4, 2, 2, 0 }, /* normal cases */ + { 9, 7, 1, 2 }, + { 0, 0, -1, 0 }, /* div by zero cases */ + { 9, 0, -1, 9 }, + { LONG_MIN, -1, LONG_MIN, 0 }, /* overflow case */ +}; + +struct TestU { + unsigned long x, y, q, r; +}; + +static struct TestU test_u[] = { + { 4, 2, 2, 0 }, /* normal cases */ + { 9, 7, 1, 2 }, + { 0, 0, ULONG_MAX, 0 }, /* div by zero cases */ + { 9, 0, ULONG_MAX, 9 }, +}; + +#define ARRAY_SIZE(X) (sizeof(X) / sizeof(*(X))) + +int main (void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(test_s); i++) { + long q, r; + + asm("div %0, %2, %3\n\t" + "rem %1, %2, %3" + : "=&r" (q), "=r" (r) + : "r" (test_s[i].x), "r" (test_s[i].y)); + + assert(q == test_s[i].q); + assert(r == test_s[i].r); + } + + for (i = 0; i < ARRAY_SIZE(test_u); i++) { + unsigned long q, r; + + asm("divu %0, %2, %3\n\t" + "remu %1, %2, %3" + : "=&r" (q), "=r" (r) + : "r" (test_u[i].x), "r" (test_u[i].y)); + + assert(q == test_u[i].q); + assert(r == test_u[i].r); + } + + return 0; +} diff --git a/tests/tcg/riscv64/test-noc.S b/tests/tcg/riscv64/test-noc.S new file mode 100644 index 0000000000..e29d60c8b3 --- /dev/null +++ b/tests/tcg/riscv64/test-noc.S @@ -0,0 +1,32 @@ +#include + + .text + .globl _start +_start: + .option norvc + li a0, 4 /* SIGILL */ + la a1, sa + li a2, 0 + li a3, 8 + li a7, __NR_rt_sigaction + scall + + .option rvc + li a0, 1 + j exit + .option norvc + +pass: + li a0, 0 +exit: + li a7, __NR_exit + scall + + .data + /* struct kernel_sigaction sa = { .sa_handler = pass }; */ + .type sa, @object + .size sa, 32 +sa: + .dword pass + .zero 24 + diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target new file mode 100644 index 0000000000..a34fa68473 --- /dev/null +++ b/tests/tcg/s390x/Makefile.softmmu-target @@ -0,0 +1,9 @@ +S390X_SRC=$(SRC_PATH)/tests/tcg/s390x +VPATH+=$(S390X_SRC) +QEMU_OPTS=-action panic=exit-failure -kernel + +%: %.S + $(CC) -march=z13 -m64 -nostartfiles -static -Wl,-Ttext=0 \ + -Wl,--build-id=none $< -o $@ + +TESTS += unaligned-lowcore diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index bd084c7840..cb90d4183d 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -1,11 +1,76 @@ -VPATH+=$(SRC_PATH)/tests/tcg/s390x +S390X_SRC=$(SRC_PATH)/tests/tcg/s390x +VPATH+=$(S390X_SRC) CFLAGS+=-march=zEC12 -m64 + +config-cc.mak: Makefile + $(quiet-@)( \ + $(call cc-option,-march=z14, CROSS_CC_HAS_Z14); \ + $(call cc-option,-march=z15, CROSS_CC_HAS_Z15)) 3> config-cc.mak +-include config-cc.mak + TESTS+=hello-s390x TESTS+=csst TESTS+=ipm TESTS+=exrl-trt TESTS+=exrl-trtr TESTS+=pack +TESTS+=mie3-compl +TESTS+=mie3-mvcrl +TESTS+=mie3-sel TESTS+=mvo TESTS+=mvc +TESTS+=shift TESTS+=trap +TESTS+=signals-s390x +TESTS+=branch-relative-long +TESTS+=noexec + +Z13_TESTS=vistr +Z13_TESTS+=lcbb +Z13_TESTS+=locfhr +$(Z13_TESTS): CFLAGS+=-march=z13 -O2 +TESTS+=$(Z13_TESTS) + +ifneq ($(CROSS_CC_HAS_Z14),) +Z14_TESTS=vfminmax +vfminmax: LDFLAGS+=-lm +$(Z14_TESTS): CFLAGS+=-march=z14 -O2 +TESTS+=$(Z14_TESTS) +endif + +ifneq ($(CROSS_CC_HAS_Z15),) +Z15_TESTS=vxeh2_vs +Z15_TESTS+=vxeh2_vcvt +Z15_TESTS+=vxeh2_vlstr +$(Z15_TESTS): CFLAGS+=-march=z15 -O2 +TESTS+=$(Z15_TESTS) +endif + +ifneq ($(HAVE_GDB_BIN),) +GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py + +run-gdbstub-signals-s390x: signals-s390x + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(S390X_SRC)/gdbstub/test-signals-s390x.py, \ + mixing signals and debugging) + +hello-s390x-asm: CFLAGS+=-nostdlib + +run-gdbstub-svc: hello-s390x-asm + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(S390X_SRC)/gdbstub/test-svc.py, \ + single-stepping svc) + +EXTRA_RUNS += run-gdbstub-signals-s390x run-gdbstub-svc +endif + +# MVX versions of sha512 +sha512-mvx: CFLAGS=-march=z13 -mvx -O3 +sha512-mvx: sha512.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +TESTS+=sha512-mvx diff --git a/tests/tcg/s390x/branch-relative-long.c b/tests/tcg/s390x/branch-relative-long.c new file mode 100644 index 0000000000..8ce9f1c2e5 --- /dev/null +++ b/tests/tcg/s390x/branch-relative-long.c @@ -0,0 +1,68 @@ +#include +#include +#include +#include + +#define DEFINE_ASM(_name, _code) \ + extern const char _name[]; \ + extern const char _name ## _end[]; \ + asm(" .globl " #_name "\n" \ + #_name ":\n" \ + " " _code "\n" \ + " .globl " #_name "_end\n" \ + #_name "_end:\n"); + +DEFINE_ASM(br_r14, "br %r14"); +DEFINE_ASM(brasl_r0, "brasl %r0,-0x100000000"); +DEFINE_ASM(brcl_0xf, "brcl 0xf,-0x100000000"); + +struct test { + const char *code; + const char *code_end; +}; + +static const struct test tests[] = { + { + .code = brasl_r0, + .code_end = brasl_r0_end, + }, + { + .code = brcl_0xf, + .code_end = brcl_0xf_end, + }, +}; + +int main(void) +{ + unsigned char *buf; + size_t length = 0; + size_t i; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + size_t test_length = 0x100000000 + (tests[i].code_end - tests[i].code); + + if (test_length > length) { + length = test_length; + } + } + + buf = mmap(NULL, length, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (buf == MAP_FAILED) { + perror("SKIP: mmap() failed"); + return 0; + } + + memcpy(buf, br_r14, br_r14_end - br_r14); + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + void (*code)(void) = (void *)(buf + 0x100000000); + + memcpy(code, tests[i].code, tests[i].code_end - tests[i].code); + code(); + memset(code, 0, tests[i].code_end - tests[i].code); + } + + munmap(buf, length); + + return 0; +} diff --git a/tests/tcg/s390x/exrl-trt.c b/tests/tcg/s390x/exrl-trt.c index 16711a3181..451f777b9d 100644 --- a/tests/tcg/s390x/exrl-trt.c +++ b/tests/tcg/s390x/exrl-trt.c @@ -5,8 +5,8 @@ int main(void) { char op1[] = "hello"; char op2[256]; - uint64_t r1 = 0xffffffffffffffffull; - uint64_t r2 = 0xffffffffffffffffull; + register uint64_t r1 asm("r1") = 0xffffffffffffffffull; + register uint64_t r2 asm("r2") = 0xffffffffffffffffull; uint64_t cc; int i; @@ -21,8 +21,6 @@ int main(void) " j 2f\n" "1: trt 0(1,%[op1]),%[op2]\n" "2: exrl %[op1_len],1b\n" - " lgr %[r1],%%r1\n" - " lgr %[r2],%%r2\n" " ipm %[cc]\n" : [r1] "+r" (r1), [r2] "+r" (r2), @@ -30,7 +28,7 @@ int main(void) : [op1] "a" (&op1), [op1_len] "a" (5), [op2] "Q" (op2) - : "r1", "r2", "cc"); + : "cc"); cc = (cc >> 28) & 3; if (cc != 2) { write(1, "bad cc\n", 7); diff --git a/tests/tcg/s390x/exrl-trtr.c b/tests/tcg/s390x/exrl-trtr.c index 5f30cda6bd..422f7f385a 100644 --- a/tests/tcg/s390x/exrl-trtr.c +++ b/tests/tcg/s390x/exrl-trtr.c @@ -5,8 +5,8 @@ int main(void) { char op1[] = {0, 1, 2, 3}; char op2[256]; - uint64_t r1 = 0xffffffffffffffffull; - uint64_t r2 = 0xffffffffffffffffull; + register uint64_t r1 asm("r1") = 0xffffffffffffffffull; + register uint64_t r2 asm("r2") = 0xffffffffffffffffull; uint64_t cc; int i; @@ -21,8 +21,6 @@ int main(void) " j 2f\n" "1: trtr 3(1,%[op1]),%[op2]\n" "2: exrl %[op1_len],1b\n" - " lgr %[r1],%%r1\n" - " lgr %[r2],%%r2\n" " ipm %[cc]\n" : [r1] "+r" (r1), [r2] "+r" (r2), @@ -30,7 +28,7 @@ int main(void) : [op1] "a" (&op1), [op1_len] "a" (3), [op2] "Q" (op2) - : "r1", "r2", "cc"); + : "cc"); cc = (cc >> 28) & 3; if (cc != 1) { write(1, "bad cc\n", 7); diff --git a/tests/tcg/s390x/gdbstub/test-signals-s390x.py b/tests/tcg/s390x/gdbstub/test-signals-s390x.py new file mode 100644 index 0000000000..80a284b475 --- /dev/null +++ b/tests/tcg/s390x/gdbstub/test-signals-s390x.py @@ -0,0 +1,76 @@ +from __future__ import print_function + +# +# Test that signals and debugging mix well together on s390x. +# +# This is launched via tests/guest-debug/run-test.py +# + +import gdb +import sys + +failcount = 0 + + +def report(cond, msg): + """Report success/fail of test""" + if cond: + print("PASS: %s" % (msg)) + else: + print("FAIL: %s" % (msg)) + global failcount + failcount += 1 + + +def run_test(): + """Run through the tests one by one""" + illegal_op = gdb.Breakpoint("illegal_op") + stg = gdb.Breakpoint("stg") + mvc_8 = gdb.Breakpoint("mvc_8") + + # Expect the following events: + # 1x illegal_op breakpoint + # 2x stg breakpoint, segv, breakpoint + # 2x mvc_8 breakpoint, segv, breakpoint + for _ in range(14): + gdb.execute("c") + report(illegal_op.hit_count == 1, "illegal_op.hit_count == 1") + report(stg.hit_count == 4, "stg.hit_count == 4") + report(mvc_8.hit_count == 4, "mvc_8.hit_count == 4") + + # The test must succeed. + gdb.Breakpoint("_exit") + gdb.execute("c") + status = int(gdb.parse_and_eval("$r2")) + report(status == 0, "status == 0"); + + +# +# This runs as the script it sourced (via -x, via run-test.py) +# +try: + inferior = gdb.selected_inferior() + arch = inferior.architecture() + print("ATTACHED: %s" % arch.name()) +except (gdb.error, AttributeError): + print("SKIPPING (not connected)", file=sys.stderr) + exit(0) + +if gdb.parse_and_eval("$pc") == 0: + print("SKIP: PC not set") + exit(0) + +try: + # These are not very useful in scripts + gdb.execute("set pagination off") + gdb.execute("set confirm off") + + # Run the actual tests + run_test() +except (gdb.error): + print("GDB Exception: %s" % (sys.exc_info()[0])) + failcount += 1 + pass + +print("All tests complete: %d failures" % failcount) +exit(failcount) diff --git a/tests/tcg/s390x/gdbstub/test-svc.py b/tests/tcg/s390x/gdbstub/test-svc.py new file mode 100644 index 0000000000..7851ca7284 --- /dev/null +++ b/tests/tcg/s390x/gdbstub/test-svc.py @@ -0,0 +1,64 @@ +"""Test single-stepping SVC. + +This runs as a sourced script (via -x, via run-test.py).""" +from __future__ import print_function +import gdb +import sys + + +n_failures = 0 + + +def report(cond, msg): + """Report success/fail of a test""" + if cond: + print("PASS: {}".format(msg)) + else: + print("FAIL: {}".format(msg)) + global n_failures + n_failures += 1 + + +def run_test(): + """Run through the tests one by one""" + report("lghi\t" in gdb.execute("x/i $pc", False, True), "insn #1") + gdb.execute("si") + report("larl\t" in gdb.execute("x/i $pc", False, True), "insn #2") + gdb.execute("si") + report("lghi\t" in gdb.execute("x/i $pc", False, True), "insn #3") + gdb.execute("si") + report("svc\t" in gdb.execute("x/i $pc", False, True), "insn #4") + gdb.execute("si") + report("xgr\t" in gdb.execute("x/i $pc", False, True), "insn #5") + gdb.execute("si") + report("svc\t" in gdb.execute("x/i $pc", False, True), "insn #6") + gdb.execute("si") + + +def main(): + """Prepare the environment and run through the tests""" + try: + inferior = gdb.selected_inferior() + print("ATTACHED: {}".format(inferior.architecture().name())) + except (gdb.error, AttributeError): + print("SKIPPING (not connected)") + exit(0) + + if gdb.parse_and_eval('$pc') == 0: + print("SKIP: PC not set") + exit(0) + + try: + # These are not very useful in scripts + gdb.execute("set pagination off") + gdb.execute("set confirm off") + + # Run the actual tests + run_test() + except gdb.error: + report(False, "GDB Exception: {}".format(sys.exc_info()[0])) + print("All tests complete: %d failures" % n_failures) + exit(n_failures) + + +main() diff --git a/tests/tcg/s390x/hello-s390x-asm.S b/tests/tcg/s390x/hello-s390x-asm.S new file mode 100644 index 0000000000..2e9faa1604 --- /dev/null +++ b/tests/tcg/s390x/hello-s390x-asm.S @@ -0,0 +1,20 @@ +/* + * Hello, World! in assembly. + */ + +.globl _start +_start: + +/* puts("Hello, World!"); */ +lghi %r2,1 +larl %r3,foo +lghi %r4,foo_end-foo +svc 4 + +/* exit(0); */ +xgr %r2,%r2 +svc 1 + +.align 2 +foo: .asciz "Hello, World!\n" +foo_end: diff --git a/tests/tcg/s390x/lcbb.c b/tests/tcg/s390x/lcbb.c new file mode 100644 index 0000000000..8d368e0998 --- /dev/null +++ b/tests/tcg/s390x/lcbb.c @@ -0,0 +1,51 @@ +/* + * Test the LCBB instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static inline __attribute__((__always_inline__)) void +lcbb(long *r1, void *dxb2, int m3, int *cc) +{ + asm("lcbb %[r1],%[dxb2],%[m3]\n" + "ipm %[cc]" + : [r1] "+r" (*r1), [cc] "=r" (*cc) + : [dxb2] "R" (*(char *)dxb2), [m3] "i" (m3) + : "cc"); + *cc = (*cc >> 28) & 3; +} + +static char buf[0x1000] __attribute__((aligned(0x1000))); + +static inline __attribute__((__always_inline__)) void +test_lcbb(void *p, int m3, int exp_r1, int exp_cc) +{ + long r1 = 0xfedcba9876543210; + int cc; + + lcbb(&r1, p, m3, &cc); + assert(r1 == (0xfedcba9800000000 | exp_r1)); + assert(cc == exp_cc); +} + +int main(void) +{ + test_lcbb(&buf[0], 0, 16, 0); + test_lcbb(&buf[63], 0, 1, 3); + test_lcbb(&buf[0], 1, 16, 0); + test_lcbb(&buf[127], 1, 1, 3); + test_lcbb(&buf[0], 2, 16, 0); + test_lcbb(&buf[255], 2, 1, 3); + test_lcbb(&buf[0], 3, 16, 0); + test_lcbb(&buf[511], 3, 1, 3); + test_lcbb(&buf[0], 4, 16, 0); + test_lcbb(&buf[1023], 4, 1, 3); + test_lcbb(&buf[0], 5, 16, 0); + test_lcbb(&buf[2047], 5, 1, 3); + test_lcbb(&buf[0], 6, 16, 0); + test_lcbb(&buf[4095], 6, 1, 3); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/locfhr.c b/tests/tcg/s390x/locfhr.c new file mode 100644 index 0000000000..ab9ff6e449 --- /dev/null +++ b/tests/tcg/s390x/locfhr.c @@ -0,0 +1,29 @@ +/* + * Test the LOCFHR instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static inline __attribute__((__always_inline__)) long +locfhr(long r1, long r2, int m3, int cc) +{ + cc <<= 28; + asm("spm %[cc]\n" + "locfhr %[r1],%[r2],%[m3]\n" + : [r1] "+r" (r1) + : [cc] "r" (cc), [r2] "r" (r2), [m3] "i" (m3) + : "cc"); + return r1; +} + +int main(void) +{ + assert(locfhr(0x1111111122222222, 0x3333333344444444, 8, 0) == + 0x3333333322222222); + assert(locfhr(0x5555555566666666, 0x7777777788888888, 11, 1) == + 0x5555555566666666); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/mie3-compl.c b/tests/tcg/s390x/mie3-compl.c new file mode 100644 index 0000000000..35649f3b02 --- /dev/null +++ b/tests/tcg/s390x/mie3-compl.c @@ -0,0 +1,48 @@ +#include + +#define FbinOp(S, ASM) uint64_t S(uint64_t a, uint64_t b) \ +{ \ + uint64_t res = 0; \ + asm ("llihf %[res],801\n" ASM \ + : [res]"=&r"(res) : [a]"r"(a), [b]"r"(b) : "cc"); \ + return res; \ +} + +/* AND WITH COMPLEMENT */ +FbinOp(_ncrk, ".insn rrf, 0xB9F50000, %[res], %[b], %[a], 0\n") +FbinOp(_ncgrk, ".insn rrf, 0xB9E50000, %[res], %[b], %[a], 0\n") + +/* NAND */ +FbinOp(_nnrk, ".insn rrf, 0xB9740000, %[res], %[b], %[a], 0\n") +FbinOp(_nngrk, ".insn rrf, 0xB9640000, %[res], %[b], %[a], 0\n") + +/* NOT XOR */ +FbinOp(_nxrk, ".insn rrf, 0xB9770000, %[res], %[b], %[a], 0\n") +FbinOp(_nxgrk, ".insn rrf, 0xB9670000, %[res], %[b], %[a], 0\n") + +/* NOR */ +FbinOp(_nork, ".insn rrf, 0xB9760000, %[res], %[b], %[a], 0\n") +FbinOp(_nogrk, ".insn rrf, 0xB9660000, %[res], %[b], %[a], 0\n") + +/* OR WITH COMPLEMENT */ +FbinOp(_ocrk, ".insn rrf, 0xB9750000, %[res], %[b], %[a], 0\n") +FbinOp(_ocgrk, ".insn rrf, 0xB9650000, %[res], %[b], %[a], 0\n") + +int main(int argc, char *argv[]) +{ + if (_ncrk(0xFF88, 0xAA11) != 0x0000032100000011ull || + _nnrk(0xFF88, 0xAA11) != 0x00000321FFFF55FFull || + _nork(0xFF88, 0xAA11) != 0x00000321FFFF0066ull || + _nxrk(0xFF88, 0xAA11) != 0x00000321FFFFAA66ull || + _ocrk(0xFF88, 0xAA11) != 0x00000321FFFFAA77ull || + _ncgrk(0xFF88, 0xAA11) != 0x0000000000000011ull || + _nngrk(0xFF88, 0xAA11) != 0xFFFFFFFFFFFF55FFull || + _nogrk(0xFF88, 0xAA11) != 0xFFFFFFFFFFFF0066ull || + _nxgrk(0xFF88, 0xAA11) != 0xFFFFFFFFFFFFAA66ull || + _ocgrk(0xFF88, 0xAA11) != 0xFFFFFFFFFFFFAA77ull) + { + return 1; + } + + return 0; +} diff --git a/tests/tcg/s390x/mie3-mvcrl.c b/tests/tcg/s390x/mie3-mvcrl.c new file mode 100644 index 0000000000..93c7b0a290 --- /dev/null +++ b/tests/tcg/s390x/mie3-mvcrl.c @@ -0,0 +1,29 @@ +#include +#include + + +static inline void mvcrl_8(const char *dst, const char *src) +{ + asm volatile ( + "llill %%r0, 8\n" + ".insn sse, 0xE50A00000000, 0(%[dst]), 0(%[src])" + : : [dst] "d" (dst), [src] "d" (src) + : "r0", "memory"); +} + + +int main(int argc, char *argv[]) +{ + const char *alpha = "abcdefghijklmnop"; + + /* array missing 'i' */ + char tstr[17] = "abcdefghjklmnop\0" ; + + /* mvcrl reference use: 'open a hole in an array' */ + mvcrl_8(tstr + 9, tstr + 8); + + /* place missing 'i' */ + tstr[8] = 'i'; + + return strncmp(alpha, tstr, 16ul); +} diff --git a/tests/tcg/s390x/mie3-sel.c b/tests/tcg/s390x/mie3-sel.c new file mode 100644 index 0000000000..0dfd532ed4 --- /dev/null +++ b/tests/tcg/s390x/mie3-sel.c @@ -0,0 +1,33 @@ +#include + + +#define Fi3(S, ASM) uint64_t S(uint64_t a, uint64_t b, uint64_t c) \ +{ \ +asm volatile ( \ + "ltgr %[c], %[c]\n" \ + ASM \ + : [c] "+r" (c) \ + : [a] "r" (a) \ + , [b] "r" (b) \ +); \ + return c; \ +} + +Fi3 (_selre, ".insn rrf, 0xB9F00000, %[c], %[b], %[a], 8\n") +Fi3 (_selgrz, ".insn rrf, 0xB9E30000, %[c], %[b], %[a], 8\n") +Fi3 (_selfhrnz, ".insn rrf, 0xB9C00000, %[c], %[b], %[a], 7\n") + + +int main(int argc, char *argv[]) +{ + uint64_t a = ~0, b = ~0, c = ~0; + + a = _selre(0x066600000066ull, 0x066600000006ull, a); + b = _selgrz(0xF00D00000005ull, 0xF00D00000055ull, b); + c = _selfhrnz(0x043200000044ull, 0x065400000004ull, c); + + return (int) ( + (0xFFFFFFFF00000066ull != a) || + (0x0000F00D00000005ull != b) || + (0x00000654FFFFFFFFull != c)); +} diff --git a/tests/tcg/s390x/mvc.c b/tests/tcg/s390x/mvc.c index aa552d52e5..7ae4c44550 100644 --- a/tests/tcg/s390x/mvc.c +++ b/tests/tcg/s390x/mvc.c @@ -20,8 +20,8 @@ static inline void mvc_256(const char *dst, const char *src) asm volatile ( " mvc 0(256,%[dst]),0(%[src])\n" : - : [dst] "d" (dst), - [src] "d" (src) + : [dst] "a" (dst), + [src] "a" (src) : "memory"); } diff --git a/tests/tcg/s390x/mvo.c b/tests/tcg/s390x/mvo.c index 5546fe2a97..0c3ecdde2e 100644 --- a/tests/tcg/s390x/mvo.c +++ b/tests/tcg/s390x/mvo.c @@ -11,8 +11,8 @@ int main(void) asm volatile ( " mvo 0(4,%[dest]),0(3,%[src])\n" : - : [dest] "d" (dest + 1), - [src] "d" (src + 1) + : [dest] "a" (dest + 1), + [src] "a" (src + 1) : "memory"); for (i = 0; i < sizeof(expected); i++) { diff --git a/tests/tcg/s390x/noexec.c b/tests/tcg/s390x/noexec.c new file mode 100644 index 0000000000..15d007d07f --- /dev/null +++ b/tests/tcg/s390x/noexec.c @@ -0,0 +1,106 @@ +#include "../multiarch/noexec.c.inc" + +static void *arch_mcontext_pc(const mcontext_t *ctx) +{ + return (void *)ctx->psw.addr; +} + +static int arch_mcontext_arg(const mcontext_t *ctx) +{ + return ctx->gregs[2]; +} + +static void arch_flush(void *p, int len) +{ +} + +extern char noexec_1[]; +extern char noexec_2[]; +extern char noexec_end[]; + +asm("noexec_1:\n" + " lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */ + "noexec_2:\n" + " lgfi %r2,2\n" /* %r2 is 0/1; set 2. */ + " br %r14\n" /* return */ + "noexec_end:"); + +extern char exrl_1[]; +extern char exrl_2[]; +extern char exrl_end[]; + +asm("exrl_1:\n" + " exrl %r0, exrl_2\n" + " br %r14\n" + "exrl_2:\n" + " lgfi %r2,2\n" + "exrl_end:"); + +int main(void) +{ + struct noexec_test noexec_tests[] = { + { + .name = "fallthrough", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = noexec_1 - noexec_2, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 1, + }, + { + .name = "jump", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = 0, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 0, + }, + { + .name = "exrl", + .test_code = exrl_1, + .test_len = exrl_end - exrl_1, + .page_ofs = exrl_1 - exrl_2, + .entry_ofs = exrl_1 - exrl_2, + .expected_si_ofs = 0, + .expected_pc_ofs = exrl_1 - exrl_2, + .expected_arg = 0, + }, + { + .name = "fallthrough [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = noexec_1 - noexec_2 - 2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 1, + }, + { + .name = "jump [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = -2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 0, + }, + { + .name = "exrl [cross]", + .test_code = exrl_1, + .test_len = exrl_end - exrl_1, + .page_ofs = exrl_1 - exrl_2 - 2, + .entry_ofs = exrl_1 - exrl_2 - 2, + .expected_si_ofs = 0, + .expected_pc_ofs = exrl_1 - exrl_2 - 2, + .expected_arg = 0, + }, + }; + + return test_noexec(noexec_tests, + sizeof(noexec_tests) / sizeof(noexec_tests[0])); +} diff --git a/tests/tcg/s390x/pack.c b/tests/tcg/s390x/pack.c index 4be36f29a7..55e7e214e8 100644 --- a/tests/tcg/s390x/pack.c +++ b/tests/tcg/s390x/pack.c @@ -9,7 +9,7 @@ int main(void) asm volatile( " pack 2(4,%[data]),2(4,%[data])\n" : - : [data] "r" (&data[0]) + : [data] "a" (&data[0]) : "memory"); for (i = 0; i < 8; i++) { if (data[i] != exp[i]) { diff --git a/tests/tcg/s390x/shift.c b/tests/tcg/s390x/shift.c new file mode 100644 index 0000000000..29594fec5c --- /dev/null +++ b/tests/tcg/s390x/shift.c @@ -0,0 +1,270 @@ +#include +#include +#include + +#define DEFINE_SHIFT_SINGLE_COMMON(_name, _insn_str) \ + static uint64_t _name(uint64_t op1, uint64_t op2, uint64_t *cc) \ + { \ + asm(" sll %[cc],28\n" \ + " spm %[cc]\n" \ + " " _insn_str "\n" \ + " ipm %[cc]\n" \ + " srl %[cc],28" \ + : [op1] "+&r" (op1), \ + [cc] "+&r" (*cc) \ + : [op2] "r" (op2) \ + : "cc"); \ + return op1; \ + } +#define DEFINE_SHIFT_SINGLE_2(_insn, _offset) \ + DEFINE_SHIFT_SINGLE_COMMON(_insn ## _ ## _offset, \ + #_insn " %[op1]," #_offset "(%[op2])") +#define DEFINE_SHIFT_SINGLE_3(_insn, _offset) \ + DEFINE_SHIFT_SINGLE_COMMON(_insn ## _ ## _offset, \ + #_insn " %[op1],%[op1]," #_offset "(%[op2])") +#define DEFINE_SHIFT_DOUBLE(_insn, _offset) \ + static uint64_t _insn ## _ ## _offset(uint64_t op1, uint64_t op2, \ + uint64_t *cc) \ + { \ + uint32_t op1h = op1 >> 32; \ + uint32_t op1l = op1 & 0xffffffff; \ + register uint32_t r2 asm("2") = op1h; \ + register uint32_t r3 asm("3") = op1l; \ + \ + asm(" sll %[cc],28\n" \ + " spm %[cc]\n" \ + " " #_insn " %[r2]," #_offset "(%[op2])\n" \ + " ipm %[cc]\n" \ + " srl %[cc],28" \ + : [r2] "+&r" (r2), \ + [r3] "+&r" (r3), \ + [cc] "+&r" (*cc) \ + : [op2] "r" (op2) \ + : "cc"); \ + op1h = r2; \ + op1l = r3; \ + return (((uint64_t)op1h) << 32) | op1l; \ + } + +DEFINE_SHIFT_SINGLE_3(rll, 0x4cf3b); +DEFINE_SHIFT_SINGLE_3(rllg, 0x697c9); +DEFINE_SHIFT_SINGLE_2(sla, 0x4b0); +DEFINE_SHIFT_SINGLE_2(sla, 0xd54); +DEFINE_SHIFT_SINGLE_3(slak, 0x2832c); +DEFINE_SHIFT_SINGLE_3(slag, 0x66cc4); +DEFINE_SHIFT_SINGLE_3(slag, 0xd54); +DEFINE_SHIFT_SINGLE_2(sll, 0xd04); +DEFINE_SHIFT_SINGLE_3(sllk, 0x2699f); +DEFINE_SHIFT_SINGLE_3(sllg, 0x59df9); +DEFINE_SHIFT_SINGLE_2(sra, 0x67e); +DEFINE_SHIFT_SINGLE_3(srak, 0x60943); +DEFINE_SHIFT_SINGLE_3(srag, 0x6b048); +DEFINE_SHIFT_SINGLE_2(srl, 0x035); +DEFINE_SHIFT_SINGLE_3(srlk, 0x43dfc); +DEFINE_SHIFT_SINGLE_3(srlg, 0x27227); +DEFINE_SHIFT_DOUBLE(slda, 0x38b); +DEFINE_SHIFT_DOUBLE(sldl, 0x031); +DEFINE_SHIFT_DOUBLE(srda, 0x36f); +DEFINE_SHIFT_DOUBLE(srdl, 0x99a); + +struct shift_test { + const char *name; + uint64_t (*insn)(uint64_t, uint64_t, uint64_t *); + uint64_t op1; + uint64_t op2; + uint64_t exp_result; + uint64_t exp_cc; +}; + +static const struct shift_test tests[] = { + { + .name = "rll", + .insn = rll_0x4cf3b, + .op1 = 0xecbd589a45c248f5ull, + .op2 = 0x62e5508ccb4c99fdull, + .exp_result = 0xecbd589af545c248ull, + .exp_cc = 0, + }, + { + .name = "rllg", + .insn = rllg_0x697c9, + .op1 = 0xaa2d54c1b729f7f4ull, + .op2 = 0x5ffcf7465f5cd71full, + .exp_result = 0x29f7f4aa2d54c1b7ull, + .exp_cc = 0, + }, + { + .name = "sla-1", + .insn = sla_0x4b0, + .op1 = 0x8bf21fb67cca0e96ull, + .op2 = 0x3ddf2f53347d3030ull, + .exp_result = 0x8bf21fb600000000ull, + .exp_cc = 3, + }, + { + .name = "sla-2", + .insn = sla_0xd54, + .op1 = 0xe4faaed5def0e926ull, + .op2 = 0x18d586fab239cbeeull, + .exp_result = 0xe4faaed5fbc3a498ull, + .exp_cc = 3, + }, + { + .name = "slak", + .insn = slak_0x2832c, + .op1 = 0x7300bf78707f09f9ull, + .op2 = 0x4d193b85bb5cb39bull, + .exp_result = 0x7300bf783f84fc80ull, + .exp_cc = 3, + }, + { + .name = "slag-1", + .insn = slag_0x66cc4, + .op1 = 0xe805966de1a77762ull, + .op2 = 0x0e92953f6aa91c6bull, + .exp_result = 0xbbb1000000000000ull, + .exp_cc = 3, + }, + { + .name = "slag-2", + .insn = slag_0xd54, + .op1 = 0xdef0e92600000000ull, + .op2 = 0x18d586fab239cbeeull, + .exp_result = 0xfbc3a49800000000ull, + .exp_cc = 3, + }, + { + .name = "sll", + .insn = sll_0xd04, + .op1 = 0xb90281a3105939dfull, + .op2 = 0xb5e4df7e082e4c5eull, + .exp_result = 0xb90281a300000000ull, + .exp_cc = 0, + }, + { + .name = "sllk", + .insn = sllk_0x2699f, + .op1 = 0x777c6cf116f99557ull, + .op2 = 0xe0556cf112e5a458ull, + .exp_result = 0x777c6cf100000000ull, + .exp_cc = 0, + }, + { + .name = "sllg", + .insn = sllg_0x59df9, + .op1 = 0xcdf86cbfbc0f3557ull, + .op2 = 0x325a45acf99c6d3dull, + .exp_result = 0x55c0000000000000ull, + .exp_cc = 0, + }, + { + .name = "sra", + .insn = sra_0x67e, + .op1 = 0xb878f048d5354183ull, + .op2 = 0x9e27d13195931f79ull, + .exp_result = 0xb878f048ffffffffull, + .exp_cc = 1, + }, + { + .name = "srak", + .insn = srak_0x60943, + .op1 = 0xb6ceb5a429cedb35ull, + .op2 = 0x352354900ae34d7aull, + .exp_result = 0xb6ceb5a400000000ull, + .exp_cc = 0, + }, + { + .name = "srag", + .insn = srag_0x6b048, + .op1 = 0xd54dd4468676c63bull, + .op2 = 0x84d026db7b4dca28ull, + .exp_result = 0xffffffffffffd54dull, + .exp_cc = 1, + }, + { + .name = "srl", + .insn = srl_0x035, + .op1 = 0x09be503ef826815full, + .op2 = 0xbba8d1a0e542d5c1ull, + .exp_result = 0x9be503e00000000ull, + .exp_cc = 0, + }, + { + .name = "srlk", + .insn = srlk_0x43dfc, + .op1 = 0x540d6c8de71aee2aull, + .op2 = 0x0000000000000000ull, + .exp_result = 0x540d6c8d00000000ull, + .exp_cc = 0, + }, + { + .name = "srlg", + .insn = srlg_0x27227, + .op1 = 0x26f7123c1c447a34ull, + .op2 = 0x0000000000000000ull, + .exp_result = 0x00000000004dee24ull, + .exp_cc = 0, + }, + { + .name = "slda", + .insn = slda_0x38b, + .op1 = 0x7988f722dd5bbe7cull, + .op2 = 0x9aed3f95b4d78cc2ull, + .exp_result = 0x1ee45bab77cf8000ull, + .exp_cc = 3, + }, + { + .name = "sldl", + .insn = sldl_0x031, + .op1 = 0xaae2918dce2b049aull, + .op2 = 0x0000000000000000ull, + .exp_result = 0x0934000000000000ull, + .exp_cc = 0, + }, + { + .name = "srda", + .insn = srda_0x36f, + .op1 = 0x0cd4ed9228a50978ull, + .op2 = 0x72b046f0848b8cc9ull, + .exp_result = 0x000000000000000cull, + .exp_cc = 2, + }, + { + .name = "srdl", + .insn = srdl_0x99a, + .op1 = 0x1018611c41689a1dull, + .op2 = 0x2907e150c50ba319ull, + .exp_result = 0x0000000000000203ull, + .exp_cc = 0, + }, +}; + +int main(void) +{ + int ret = 0; + size_t i; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + uint64_t result; + uint64_t cc = 0; + + result = tests[i].insn(tests[i].op1, tests[i].op2, &cc); + if (result != tests[i].exp_result) { + fprintf(stderr, + "bad %s result:\n" + "actual = 0x%" PRIx64 "\n" + "expected = 0x%" PRIx64 "\n", + tests[i].name, result, tests[i].exp_result); + ret = 1; + } + if (cc != tests[i].exp_cc) { + fprintf(stderr, + "bad %s cc:\n" + "actual = %" PRIu64 "\n" + "expected = %" PRIu64 "\n", + tests[i].name, cc, tests[i].exp_cc); + ret = 1; + } + } + return ret; +} diff --git a/tests/tcg/s390x/signals-s390x.c b/tests/tcg/s390x/signals-s390x.c new file mode 100644 index 0000000000..48c3b6cdfd --- /dev/null +++ b/tests/tcg/s390x/signals-s390x.c @@ -0,0 +1,206 @@ +#include +#include +#include +#include +#include +#include +#include + +/* + * Various instructions that generate SIGILL and SIGSEGV. They could have been + * defined in a separate .s file, but this would complicate the build, so the + * inline asm is used instead. + */ + +#define DEFINE_ASM_FUNCTION(name, body) \ + asm(".globl " #name "\n" \ + #name ":\n" \ + ".cfi_startproc\n" \ + body "\n" \ + "br %r14\n" \ + ".cfi_endproc"); + +void illegal_op(void); +extern const char after_illegal_op; +DEFINE_ASM_FUNCTION(illegal_op, + ".byte 0x00,0x00\n" + ".globl after_illegal_op\n" + "after_illegal_op:") + +void stg(void *dst, unsigned long src); +DEFINE_ASM_FUNCTION(stg, "stg %r3,0(%r2)") + +void mvc_8(void *dst, void *src); +DEFINE_ASM_FUNCTION(mvc_8, "mvc 0(8,%r2),0(%r3)") + +extern const char return_from_main_1; + +static void safe_puts(const char *s) +{ + write(0, s, strlen(s)); + write(0, "\n", 1); +} + +enum exception { + exception_operation, + exception_translation, + exception_protection, +}; + +static struct { + int sig; + void *addr; + unsigned long psw_addr; + enum exception exception; +} expected; + +static void handle_signal(int sig, siginfo_t *info, void *ucontext) +{ + int err, i, n_frames; + void *frames[16]; + void *page; + + if (sig != expected.sig) { + safe_puts("[ FAILED ] wrong signal"); + _exit(1); + } + + if (info->si_addr != expected.addr) { + safe_puts("[ FAILED ] wrong si_addr"); + _exit(1); + } + + if (((ucontext_t *)ucontext)->uc_mcontext.psw.addr != expected.psw_addr) { + safe_puts("[ FAILED ] wrong psw.addr"); + _exit(1); + } + + switch (expected.exception) { + case exception_translation: + page = mmap(expected.addr, 4096, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (page != expected.addr) { + safe_puts("[ FAILED ] mmap() failed"); + _exit(1); + } + break; + case exception_protection: + err = mprotect(expected.addr, 4096, PROT_READ | PROT_WRITE); + if (err != 0) { + safe_puts("[ FAILED ] mprotect() failed"); + _exit(1); + } + break; + default: + break; + } + + n_frames = backtrace(frames, sizeof(frames) / sizeof(frames[0])); + for (i = 0; i < n_frames; i++) { + if (frames[i] == &return_from_main_1) { + break; + } + } + if (i == n_frames) { + safe_puts("[ FAILED ] backtrace() is broken"); + _exit(1); + } +} + +static void check_sigsegv(void *func, enum exception exception, + unsigned long val) +{ + int prot; + unsigned long *page; + unsigned long *addr; + int err; + + prot = exception == exception_translation ? PROT_NONE : PROT_READ; + page = mmap(NULL, 4096, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + assert(page != MAP_FAILED); + if (exception == exception_translation) { + /* Hopefully nothing will be mapped at this address. */ + err = munmap(page, 4096); + assert(err == 0); + } + addr = page + (val & 0x1ff); + + expected.sig = SIGSEGV; + expected.addr = page; + expected.psw_addr = (unsigned long)func; + expected.exception = exception; + if (func == stg) { + stg(addr, val); + } else { + assert(func == mvc_8); + mvc_8(addr, &val); + } + assert(*addr == val); + + err = munmap(page, 4096); + assert(err == 0); +} + +int main_1(void) +{ + struct sigaction act; + int err; + + memset(&act, 0, sizeof(act)); + act.sa_sigaction = handle_signal; + act.sa_flags = SA_SIGINFO; + err = sigaction(SIGILL, &act, NULL); + assert(err == 0); + err = sigaction(SIGSEGV, &act, NULL); + assert(err == 0); + + safe_puts("[ RUN ] Operation exception"); + expected.sig = SIGILL; + expected.addr = illegal_op; + expected.psw_addr = (unsigned long)&after_illegal_op; + expected.exception = exception_operation; + illegal_op(); + safe_puts("[ OK ]"); + + safe_puts("[ RUN ] Translation exception from stg"); + check_sigsegv(stg, exception_translation, 42); + safe_puts("[ OK ]"); + + safe_puts("[ RUN ] Translation exception from mvc"); + check_sigsegv(mvc_8, exception_translation, 4242); + safe_puts("[ OK ]"); + + safe_puts("[ RUN ] Protection exception from stg"); + check_sigsegv(stg, exception_protection, 424242); + safe_puts("[ OK ]"); + + safe_puts("[ RUN ] Protection exception from mvc"); + check_sigsegv(mvc_8, exception_protection, 42424242); + safe_puts("[ OK ]"); + + safe_puts("[ PASSED ]"); + + _exit(0); +} + +/* + * Define main() in assembly in order to test that unwinding from signal + * handlers until main() works. This way we can define a specific point that + * the unwinder should reach. This is also better than defining main() in C + * and using inline assembly to call main_1(), since it's not easy to get all + * the clobbers right. + */ + +DEFINE_ASM_FUNCTION(main, + "stmg %r14,%r15,112(%r15)\n" + ".cfi_offset 14,-48\n" + ".cfi_offset 15,-40\n" + "lay %r15,-160(%r15)\n" + ".cfi_def_cfa_offset 320\n" + "brasl %r14,main_1\n" + ".globl return_from_main_1\n" + "return_from_main_1:\n" + "lmg %r14,%r15,272(%r15)\n" + ".cfi_restore 15\n" + ".cfi_restore 14\n" + ".cfi_def_cfa_offset 160"); diff --git a/tests/tcg/s390x/unaligned-lowcore.S b/tests/tcg/s390x/unaligned-lowcore.S new file mode 100644 index 0000000000..f5da2ae64c --- /dev/null +++ b/tests/tcg/s390x/unaligned-lowcore.S @@ -0,0 +1,19 @@ + .org 0x1D0 /* program new PSW */ + .quad 0x2000000000000,0 /* disabled wait */ + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + lctlg %c0,%c0,_c0 + vst %v0,_unaligned + lpswe quiesce_psw + + .align 8 +quiesce_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +_c0: + .quad 0x10060000 /* lowcore protection, AFP, VX */ + + .byte 0 +_unaligned: + .octa 0 diff --git a/tests/tcg/s390x/vfminmax.c b/tests/tcg/s390x/vfminmax.c new file mode 100644 index 0000000000..22629df160 --- /dev/null +++ b/tests/tcg/s390x/vfminmax.c @@ -0,0 +1,411 @@ +#define _GNU_SOURCE +#include +#include +#include +#include + +/* + * vfmin/vfmax instruction execution. + */ +#define VFMIN 0xEE +#define VFMAX 0xEF + +extern char insn[6]; +asm(".pushsection .rwx,\"awx\",@progbits\n" + ".globl insn\n" + /* e7 89 a0 00 2e ef */ + "insn: vfmaxsb %v24,%v25,%v26,0\n" + ".popsection\n"); + +static void vfminmax(unsigned int op, + unsigned int m4, unsigned int m5, unsigned int m6, + void *v1, const void *v2, const void *v3) +{ + insn[3] = (m6 << 4) | m5; + insn[4] = (m4 << 4) | 0x0e; + insn[5] = op; + + asm("vl %%v25,%[v2]\n" + "vl %%v26,%[v3]\n" + "ex 0,%[insn]\n" + "vst %%v24,%[v1]\n" + : [v1] "=m" (*(char (*)[16])v1) + : [v2] "m" (*(char (*)[16])v2) + , [v3] "m" (*(char (*)[16])v3) + , [insn] "m"(insn) + : "v24", "v25", "v26"); +} + +/* + * Floating-point value classes. + */ +#define N_FORMATS 3 +#define N_SIGNED_CLASSES 8 +static const size_t float_sizes[N_FORMATS] = { + /* M4 == 2: short */ 4, + /* M4 == 3: long */ 8, + /* M4 == 4: extended */ 16, +}; +static const size_t e_bits[N_FORMATS] = { + /* M4 == 2: short */ 8, + /* M4 == 3: long */ 11, + /* M4 == 4: extended */ 15, +}; +static const unsigned char signed_floats[N_FORMATS][N_SIGNED_CLASSES][2][16] = { + /* M4 == 2: short */ + { + /* -inf */ {{0xff, 0x80, 0x00, 0x00}, + {0xff, 0x80, 0x00, 0x00}}, + /* -Fn */ {{0xc2, 0x28, 0x00, 0x00}, + {0xc2, 0x29, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x42, 0x28, 0x00, 0x00}, + {0x42, 0x2a, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0x80, 0x00, 0x00}, + {0x7f, 0x80, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xbf, 0xff, 0xff}, + {0x7f, 0xbf, 0xff, 0xfd}}, + }, + + /* M4 == 3: long */ + { + /* -inf */ {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -Fn */ {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, + }, + + /* M4 == 4: extended */ + { + /* -inf */ {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -Fn */ {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, + }, +}; + +/* + * PoP tables as close to the original as possible. + */ +struct signed_test { + int op; + int m6; + const char *m6_desc; + const char *table[N_SIGNED_CLASSES][N_SIGNED_CLASSES]; +} signed_tests[] = { + { + .op = VFMIN, + .m6 = 0, + .m6_desc = "IEEE MinNum", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMIN, + .m6 = 1, + .m6_desc = "JAVA Math.Min()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* -0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* QNaN */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMIN, + .m6 = 2, + .m6_desc = "C-style Min Macro", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* -0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* QNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + }, + }, + { + .op = VFMIN, + .m6 = 3, + .m6_desc = "C++ algorithm.min()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* QNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + { + .op = VFMIN, + .m6 = 4, + .m6_desc = "fmin()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + + { + .op = VFMAX, + .m6 = 0, + .m6_desc = "IEEE MaxNum", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMAX, + .m6 = 1, + .m6_desc = "JAVA Math.Max()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* QNaN */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMAX, + .m6 = 2, + .m6_desc = "C-style Max Macro", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* -0 */ "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +0 */ "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* QNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + }, + }, + { + .op = VFMAX, + .m6 = 3, + .m6_desc = "C++ algorithm.max()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* QNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + { + .op = VFMAX, + .m6 = 4, + .m6_desc = "fmax()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, +}; + +static void dump_v(FILE *f, const void *v, size_t n) +{ + for (int i = 0; i < n; i++) { + fprintf(f, "%02x", ((const unsigned char *)v)[i]); + } +} + +static int signed_test(struct signed_test *test, int m4, int m5, + const void *v1_exp, bool xi_exp, + const void *v2, const void *v3) +{ + size_t n = (m5 & 8) ? float_sizes[m4 - 2] : 16; + char v1[16]; + bool xi; + + feclearexcept(FE_ALL_EXCEPT); + vfminmax(test->op, m4, m5, test->m6, v1, v2, v3); + xi = fetestexcept(FE_ALL_EXCEPT) == FE_INVALID; + + if (memcmp(v1, v1_exp, n) != 0 || xi != xi_exp) { + fprintf(stderr, "[ FAILED ] %s ", test->m6_desc); + dump_v(stderr, v2, n); + fprintf(stderr, ", "); + dump_v(stderr, v3, n); + fprintf(stderr, ", %d, %d, %d: actual=", m4, m5, test->m6); + dump_v(stderr, v1, n); + fprintf(stderr, "/%d, expected=", (int)xi); + dump_v(stderr, v1_exp, n); + fprintf(stderr, "/%d\n", (int)xi_exp); + return 1; + } + + return 0; +} + +static void snan_to_qnan(char *v, int m4) +{ + size_t bit = 1 + e_bits[m4 - 2]; + v[bit / 8] |= 1 << (7 - (bit % 8)); +} + +int main(void) +{ + int ret = 0; + size_t i; + + for (i = 0; i < sizeof(signed_tests) / sizeof(signed_tests[0]); i++) { + struct signed_test *test = &signed_tests[i]; + int m4; + + for (m4 = 2; m4 <= 4; m4++) { + const unsigned char (*floats)[2][16] = signed_floats[m4 - 2]; + size_t float_size = float_sizes[m4 - 2]; + int m5; + + for (m5 = 0; m5 <= 8; m5 += 8) { + char v1_exp[16], v2[16], v3[16]; + bool xi_exp = false; + int pos = 0; + int i2; + + for (i2 = 0; i2 < N_SIGNED_CLASSES * 2; i2++) { + int i3; + + for (i3 = 0; i3 < N_SIGNED_CLASSES * 2; i3++) { + const char *spec = test->table[i2 / 2][i3 / 2]; + + memcpy(&v2[pos], floats[i2 / 2][i2 % 2], float_size); + memcpy(&v3[pos], floats[i3 / 2][i3 % 2], float_size); + if (strcmp(spec, "T(a)") == 0 || + strcmp(spec, "Xi: T(a)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + } else if (strcmp(spec, "T(b)") == 0 || + strcmp(spec, "Xi: T(b)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + } else if (strcmp(spec, "Xi: T(a*)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + snan_to_qnan(&v1_exp[pos], m4); + } else if (strcmp(spec, "Xi: T(b*)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + snan_to_qnan(&v1_exp[pos], m4); + } else if (strcmp(spec, "T(M(a,b))") == 0) { + /* + * Comparing floats is risky, since the compiler + * might generate the same instruction that we are + * testing. Compare ints instead. This works, + * because we get here only for +-Fn, and the + * corresponding test values have identical + * exponents. + */ + int v2_int = *(int *)&v2[pos]; + int v3_int = *(int *)&v3[pos]; + + if ((v2_int < v3_int) == + ((test->op == VFMIN) != (v2_int < 0))) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + } else { + memcpy(&v1_exp[pos], &v3[pos], float_size); + } + } else { + fprintf(stderr, "Unexpected spec: %s\n", spec); + return 1; + } + xi_exp |= spec[0] == 'X'; + pos += float_size; + + if ((m5 & 8) || pos == 16) { + ret |= signed_test(test, m4, m5, + v1_exp, xi_exp, v2, v3); + pos = 0; + xi_exp = false; + } + } + } + + if (pos != 0) { + ret |= signed_test(test, m4, m5, v1_exp, xi_exp, v2, v3); + } + } + } + } + + return ret; +} diff --git a/tests/tcg/s390x/vistr.c b/tests/tcg/s390x/vistr.c new file mode 100644 index 0000000000..8e3e987d71 --- /dev/null +++ b/tests/tcg/s390x/vistr.c @@ -0,0 +1,45 @@ +/* + * Test the VECTOR ISOLATE STRING (vistr) instruction + */ +#include +#include +#include "vx.h" + +static inline void vistr(S390Vector *v1, S390Vector *v2, + const uint8_t m3, const uint8_t m5) +{ + asm volatile("vistr %[v1], %[v2], %[m3], %[m5]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [m3] "i" (m3) + , [m5] "i" (m5) + : "cc"); +} + +int main(int argc, char *argv[]) +{ + S390Vector vd = {}; + S390Vector vs16 = { + .h[0] = 0x1234, .h[1] = 0x0056, .h[2] = 0x7800, .h[3] = 0x0000, + .h[4] = 0x0078, .h[5] = 0x0000, .h[6] = 0x6543, .h[7] = 0x2100 + }; + S390Vector vs32 = { + .w[0] = 0x12340000, .w[1] = 0x78654300, + .w[2] = 0x0, .w[3] = 0x12, + }; + + vistr(&vd, &vs16, 1, 0); + if (vd.h[0] != 0x1234 || vd.h[1] != 0x0056 || vd.h[2] != 0x7800 || + vd.h[3] || vd.h[4] || vd.h[5] || vd.h[6] || vd.h[7]) { + puts("ERROR: vitrh failed!"); + return 1; + } + + vistr(&vd, &vs32, 2, 0); + if (vd.w[0] != 0x12340000 || vd.w[1] != 0x78654300 || vd.w[2] || vd.w[3]) { + puts("ERROR: vitrf failed!"); + return 1; + } + + return 0; +} diff --git a/tests/tcg/s390x/vx.h b/tests/tcg/s390x/vx.h new file mode 100644 index 0000000000..02e7fd518a --- /dev/null +++ b/tests/tcg/s390x/vx.h @@ -0,0 +1,19 @@ +#ifndef QEMU_TESTS_S390X_VX_H +#define QEMU_TESTS_S390X_VX_H + +typedef union S390Vector { + uint64_t d[2]; /* doubleword */ + uint32_t w[4]; /* word */ + uint16_t h[8]; /* halfword */ + uint8_t b[16]; /* byte */ + float f[4]; /* float32 */ + double fd[2]; /* float64 */ + __uint128_t v; +} S390Vector; + +#define ES8 0 +#define ES16 1 +#define ES32 2 +#define ES64 3 + +#endif /* QEMU_TESTS_S390X_VX_H */ diff --git a/tests/tcg/s390x/vxeh2_vcvt.c b/tests/tcg/s390x/vxeh2_vcvt.c new file mode 100644 index 0000000000..d6e551c16e --- /dev/null +++ b/tests/tcg/s390x/vxeh2_vcvt.c @@ -0,0 +1,88 @@ +/* + * vxeh2_vcvt: vector-enhancements facility 2 vector convert * + */ +#include +#include "vx.h" + +#define M_S 8 +#define M4_XxC 4 +#define M4_def M4_XxC + +static inline void vcfps(S390Vector *v1, S390Vector *v2, + const uint8_t m3, const uint8_t m4, const uint8_t m5) +{ + asm volatile("vcfps %[v1], %[v2], %[m3], %[m4], %[m5]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [m3] "i" (m3) + , [m4] "i" (m4) + , [m5] "i" (m5)); +} + +static inline void vcfpl(S390Vector *v1, S390Vector *v2, + const uint8_t m3, const uint8_t m4, const uint8_t m5) +{ + asm volatile("vcfpl %[v1], %[v2], %[m3], %[m4], %[m5]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [m3] "i" (m3) + , [m4] "i" (m4) + , [m5] "i" (m5)); +} + +static inline void vcsfp(S390Vector *v1, S390Vector *v2, + const uint8_t m3, const uint8_t m4, const uint8_t m5) +{ + asm volatile("vcsfp %[v1], %[v2], %[m3], %[m4], %[m5]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [m3] "i" (m3) + , [m4] "i" (m4) + , [m5] "i" (m5)); +} + +static inline void vclfp(S390Vector *v1, S390Vector *v2, + const uint8_t m3, const uint8_t m4, const uint8_t m5) +{ + asm volatile("vclfp %[v1], %[v2], %[m3], %[m4], %[m5]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [m3] "i" (m3) + , [m4] "i" (m4) + , [m5] "i" (m5)); +} + +int main(int argc, char *argv[]) +{ + S390Vector vd; + S390Vector vs_i32 = { .w[0] = 1, .w[1] = 64, .w[2] = 1024, .w[3] = -10 }; + S390Vector vs_u32 = { .w[0] = 2, .w[1] = 32, .w[2] = 4096, .w[3] = 8888 }; + S390Vector vs_f32 = { .f[0] = 3.987, .f[1] = 5.123, + .f[2] = 4.499, .f[3] = 0.512 }; + + vd.d[0] = vd.d[1] = 0; + vcfps(&vd, &vs_i32, 2, M4_def, 0); + if (1 != vd.f[0] || 1024 != vd.f[2] || 64 != vd.f[1] || -10 != vd.f[3]) { + return 1; + } + + vd.d[0] = vd.d[1] = 0; + vcfpl(&vd, &vs_u32, 2, M4_def, 0); + if (2 != vd.f[0] || 4096 != vd.f[2] || 32 != vd.f[1] || 8888 != vd.f[3]) { + return 1; + } + + vd.d[0] = vd.d[1] = 0; + vcsfp(&vd, &vs_f32, 2, M4_def, 0); + if (4 != vd.w[0] || 4 != vd.w[2] || 5 != vd.w[1] || 1 != vd.w[3]) { + return 1; + } + + vd.d[0] = vd.d[1] = 0; + vclfp(&vd, &vs_f32, 2, M4_def, 0); + if (4 != vd.w[0] || 4 != vd.w[2] || 5 != vd.w[1] || 1 != vd.w[3]) { + return 1; + } + + return 0; +} diff --git a/tests/tcg/s390x/vxeh2_vlstr.c b/tests/tcg/s390x/vxeh2_vlstr.c new file mode 100644 index 0000000000..cf971150cf --- /dev/null +++ b/tests/tcg/s390x/vxeh2_vlstr.c @@ -0,0 +1,139 @@ +/* + * vxeh2_vlstr: vector-enhancements facility 2 vector load/store reversed * + */ +#include +#include "vx.h" + +#define vtst(v1, v2) \ + if (v1.d[0] != v2.d[0] || v1.d[1] != v2.d[1]) { \ + return 1; \ + } + +static inline void vler(S390Vector *v1, const void *va, uint8_t m3) +{ + asm volatile("vler %[v1], 0(%[va]), %[m3]\n" + : [v1] "+v" (v1->v) + : [va] "a" (va) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vster(S390Vector *v1, const void *va, uint8_t m3) +{ + asm volatile("vster %[v1], 0(%[va]), %[m3]\n" + : [va] "+a" (va) + : [v1] "v" (v1->v) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vlbr(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vlbr %[v1], 0(%[va]), %[m3]\n" + : [v1] "+v" (v1->v) + : [va] "a" (va) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vstbr(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vstbr %[v1], 0(%[va]), %[m3]\n" + : [va] "+a" (va) + : [v1] "v" (v1->v) + , [m3] "i" (m3) + : "memory"); +} + + +static inline void vlebrh(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vlebrh %[v1], 0(%[va]), %[m3]\n" + : [v1] "+v" (v1->v) + : [va] "a" (va) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vstebrh(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vstebrh %[v1], 0(%[va]), %[m3]\n" + : [va] "+a" (va) + : [v1] "v" (v1->v) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vllebrz(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vllebrz %[v1], 0(%[va]), %[m3]\n" + : [v1] "+v" (v1->v) + : [va] "a" (va) + , [m3] "i" (m3) + : "memory"); +} + +static inline void vlbrrep(S390Vector *v1, void *va, const uint8_t m3) +{ + asm volatile("vlbrrep %[v1], 0(%[va]), %[m3]\n" + : [v1] "+v" (v1->v) + : [va] "a" (va) + , [m3] "i" (m3) + : "memory"); +} + +int main(int argc, char *argv[]) +{ + S390Vector vd = { .d[0] = 0, .d[1] = 0 }; + S390Vector vs = { .d[0] = 0x8FEEDDCCBBAA9988ull, + .d[1] = 0x7766554433221107ull }; + + const S390Vector vt_v_er16 = { + .h[0] = 0x1107, .h[1] = 0x3322, .h[2] = 0x5544, .h[3] = 0x7766, + .h[4] = 0x9988, .h[5] = 0xBBAA, .h[6] = 0xDDCC, .h[7] = 0x8FEE }; + + const S390Vector vt_v_br16 = { + .h[0] = 0xEE8F, .h[1] = 0xCCDD, .h[2] = 0xAABB, .h[3] = 0x8899, + .h[4] = 0x6677, .h[5] = 0x4455, .h[6] = 0x2233, .h[7] = 0x0711 }; + + int ix; + uint64_t ss64 = 0xFEEDFACE0BADBEEFull, sd64 = 0; + + vler(&vd, &vs, ES16); + vtst(vd, vt_v_er16); + + vster(&vs, &vd, ES16); + vtst(vd, vt_v_er16); + + vlbr(&vd, &vs, ES16); + vtst(vd, vt_v_br16); + + vstbr(&vs, &vd, ES16); + vtst(vd, vt_v_br16); + + vlebrh(&vd, &ss64, 5); + if (0xEDFE != vd.h[5]) { + return 1; + } + + vstebrh(&vs, (uint8_t *)&sd64 + 4, 7); + if (0x0000000007110000ull != sd64) { + return 1; + } + + vllebrz(&vd, (uint8_t *)&ss64 + 3, 2); + for (ix = 0; ix < 4; ix++) { + if (vd.w[ix] != (ix != 1 ? 0 : 0xBEAD0BCE)) { + return 1; + } + } + + vlbrrep(&vd, (uint8_t *)&ss64 + 4, 1); + for (ix = 0; ix < 8; ix++) { + if (0xAD0B != vd.h[ix]) { + return 1; + } + } + + return 0; +} diff --git a/tests/tcg/s390x/vxeh2_vs.c b/tests/tcg/s390x/vxeh2_vs.c new file mode 100644 index 0000000000..b7ef419d79 --- /dev/null +++ b/tests/tcg/s390x/vxeh2_vs.c @@ -0,0 +1,93 @@ +/* + * vxeh2_vs: vector-enhancements facility 2 vector shift + */ +#include +#include "vx.h" + +#define vtst(v1, v2) \ + if (v1.d[0] != v2.d[0] || v1.d[1] != v2.d[1]) { \ + return 1; \ + } + +static inline void vsl(S390Vector *v1, S390Vector *v2, S390Vector *v3) +{ + asm volatile("vsl %[v1], %[v2], %[v3]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [v3] "v" (v3->v)); +} + +static inline void vsra(S390Vector *v1, S390Vector *v2, S390Vector *v3) +{ + asm volatile("vsra %[v1], %[v2], %[v3]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [v3] "v" (v3->v)); +} + +static inline void vsrl(S390Vector *v1, S390Vector *v2, S390Vector *v3) +{ + asm volatile("vsrl %[v1], %[v2], %[v3]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [v3] "v" (v3->v)); +} + +static inline void vsld(S390Vector *v1, S390Vector *v2, + S390Vector *v3, const uint8_t I) +{ + asm volatile("vsld %[v1], %[v2], %[v3], %[I]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [v3] "v" (v3->v) + , [I] "i" (I & 7)); +} + +static inline void vsrd(S390Vector *v1, S390Vector *v2, + S390Vector *v3, const uint8_t I) +{ + asm volatile("vsrd %[v1], %[v2], %[v3], %[I]\n" + : [v1] "=v" (v1->v) + : [v2] "v" (v2->v) + , [v3] "v" (v3->v) + , [I] "i" (I & 7)); +} + +int main(int argc, char *argv[]) +{ + const S390Vector vt_vsl = { .d[0] = 0x7FEDBB32D5AA311Dull, + .d[1] = 0xBB65AA10912220C0ull }; + const S390Vector vt_vsra = { .d[0] = 0xF1FE6E7399AA5466ull, + .d[1] = 0x0E762A5188221044ull }; + const S390Vector vt_vsrl = { .d[0] = 0x11FE6E7399AA5466ull, + .d[1] = 0x0E762A5188221044ull }; + const S390Vector vt_vsld = { .d[0] = 0x7F76EE65DD54CC43ull, + .d[1] = 0xBB32AA2199108838ull }; + const S390Vector vt_vsrd = { .d[0] = 0x0E060802040E000Aull, + .d[1] = 0x0C060802040E000Aull }; + S390Vector vs = { .d[0] = 0x8FEEDDCCBBAA9988ull, + .d[1] = 0x7766554433221107ull }; + S390Vector vd = { .d[0] = 0, .d[1] = 0 }; + S390Vector vsi = { .d[0] = 0, .d[1] = 0 }; + + for (int ix = 0; ix < 16; ix++) { + vsi.b[ix] = (1 + (5 ^ ~ix)) & 7; + } + + vsl(&vd, &vs, &vsi); + vtst(vd, vt_vsl); + + vsra(&vd, &vs, &vsi); + vtst(vd, vt_vsra); + + vsrl(&vd, &vs, &vsi); + vtst(vd, vt_vsrl); + + vsld(&vd, &vs, &vsi, 3); + vtst(vd, vt_vsld); + + vsrd(&vd, &vs, &vsi, 15); + vtst(vd, vt_vsrd); + + return 0; +} diff --git a/tests/tcg/sh4/Makefile.target b/tests/tcg/sh4/Makefile.target index 9d18d44612..47c39a44b6 100644 --- a/tests/tcg/sh4/Makefile.target +++ b/tests/tcg/sh4/Makefile.target @@ -5,3 +5,10 @@ # On sh Linux supports 4k, 8k, 16k and 64k pages (but only 4k currently works) EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192 run-test-mmap-16384 run-test-mmap-65536 + +# This triggers failures for sh4-linux about 10% of the time. +# Random SIGSEGV at unpredictable guest address, cause unknown. +run-signals: signals + $(call skip-test, $<, "BROKEN") +run-plugin-signals-with-%: + $(call skip-test, $<, "BROKEN") diff --git a/tests/tcg/x86_64/Makefile.softmmu-target b/tests/tcg/x86_64/Makefile.softmmu-target index 9896319f0e..7207fee94c 100644 --- a/tests/tcg/x86_64/Makefile.softmmu-target +++ b/tests/tcg/x86_64/Makefile.softmmu-target @@ -38,10 +38,9 @@ run-plugin-%-with-libinsn.so: $(call run-test, $@, \ $(QEMU) -monitor none -display none \ -chardev file$(COMMA)path=$@.out$(COMMA)id=output \ - -plugin ../../plugin/libinsn.so$(COMMA)arg=inline \ + -plugin ../../plugin/libinsn.so$(COMMA)inline=on \ -d plugin -D $*-with-libinsn.so.pout \ - $(QEMU_OPTS) $*, \ - "$* on $(TARGET_NAME)") + $(QEMU_OPTS) $*) # Running QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target index 2151ea6302..4eac78293f 100644 --- a/tests/tcg/x86_64/Makefile.target +++ b/tests/tcg/x86_64/Makefile.target @@ -8,13 +8,21 @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target +ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET)) X86_64_TESTS += vsyscall +X86_64_TESTS += noexec +X86_64_TESTS += cmpxchg TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64 -QEMU_OPTS += -cpu max +else +TESTS=$(MULTIARCH_TESTS) +endif + +run-test-i386-ssse3: QEMU_OPTS += -cpu max +run-plugin-test-i386-ssse3-%: QEMU_OPTS += -cpu max test-x86_64: LDFLAGS+=-lm -lc test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) -vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c +%: $(SRC_PATH)/tests/tcg/x86_64/%.c $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) diff --git a/tests/tcg/x86_64/cmpxchg.c b/tests/tcg/x86_64/cmpxchg.c new file mode 100644 index 0000000000..5891735161 --- /dev/null +++ b/tests/tcg/x86_64/cmpxchg.c @@ -0,0 +1,42 @@ +#include + +static int mem; + +static unsigned long test_cmpxchgb(unsigned long orig) +{ + unsigned long ret; + mem = orig; + asm("cmpxchgb %b[cmp],%[mem]" + : [ mem ] "+m"(mem), [ rax ] "=a"(ret) + : [ cmp ] "r"(0x77), "a"(orig)); + return ret; +} + +static unsigned long test_cmpxchgw(unsigned long orig) +{ + unsigned long ret; + mem = orig; + asm("cmpxchgw %w[cmp],%[mem]" + : [ mem ] "+m"(mem), [ rax ] "=a"(ret) + : [ cmp ] "r"(0x7777), "a"(orig)); + return ret; +} + +static unsigned long test_cmpxchgl(unsigned long orig) +{ + unsigned long ret; + mem = orig; + asm("cmpxchgl %[cmp],%[mem]" + : [ mem ] "+m"(mem), [ rax ] "=a"(ret) + : [ cmp ] "r"(0x77777777u), "a"(orig)); + return ret; +} + +int main() +{ + unsigned long test = 0xdeadbeef12345678ull; + assert(test == test_cmpxchgb(test)); + assert(test == test_cmpxchgw(test)); + assert(test == test_cmpxchgl(test)); + return 0; +} diff --git a/tests/tcg/x86_64/float_convd.ref b/tests/tcg/x86_64/float_convd.ref new file mode 100644 index 0000000000..a71bff42cc --- /dev/null +++ b/tests/tcg/x86_64/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) diff --git a/tests/tcg/x86_64/float_convs.ref b/tests/tcg/x86_64/float_convs.ref new file mode 100644 index 0000000000..54a094f795 --- /dev/null +++ b/tests/tcg/x86_64/float_convs.ref @@ -0,0 +1,748 @@ +### Rounding to nearest +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding upwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding downwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +### Rounding to zero +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: 0 (INVALID) + to uint64: -9223372036854775808 (INVALID) diff --git a/tests/tcg/x86_64/noexec.c b/tests/tcg/x86_64/noexec.c new file mode 100644 index 0000000000..9b124901be --- /dev/null +++ b/tests/tcg/x86_64/noexec.c @@ -0,0 +1,75 @@ +#include "../multiarch/noexec.c.inc" + +static void *arch_mcontext_pc(const mcontext_t *ctx) +{ + return (void *)ctx->gregs[REG_RIP]; +} + +int arch_mcontext_arg(const mcontext_t *ctx) +{ + return ctx->gregs[REG_RDI]; +} + +static void arch_flush(void *p, int len) +{ +} + +extern char noexec_1[]; +extern char noexec_2[]; +extern char noexec_end[]; + +asm("noexec_1:\n" + " movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */ + "noexec_2:\n" + " movq $2,%rdi\n" /* %rdi is 0/1; set 2. */ + " ret\n" + "noexec_end:"); + +int main(void) +{ + struct noexec_test noexec_tests[] = { + { + .name = "fallthrough", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = noexec_1 - noexec_2, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 1, + }, + { + .name = "jump", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2, + .entry_ofs = 0, + .expected_si_ofs = 0, + .expected_pc_ofs = 0, + .expected_arg = 0, + }, + { + .name = "fallthrough [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = noexec_1 - noexec_2 - 2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 1, + }, + { + .name = "jump [cross]", + .test_code = noexec_1, + .test_len = noexec_end - noexec_1, + .page_ofs = noexec_1 - noexec_2 - 2, + .entry_ofs = -2, + .expected_si_ofs = 0, + .expected_pc_ofs = -2, + .expected_arg = 0, + }, + }; + + return test_noexec(noexec_tests, + sizeof(noexec_tests) / sizeof(noexec_tests[0])); +} diff --git a/tests/tcg/x86_64/system/boot.S b/tests/tcg/x86_64/system/boot.S index f8a2fcc839..ed0f638406 100644 --- a/tests/tcg/x86_64/system/boot.S +++ b/tests/tcg/x86_64/system/boot.S @@ -56,7 +56,7 @@ * * - `ebx`: contains the physical memory address where the loader has placed * the boot start info structure. - * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. + * - `cr0`: bit 0 (PE) must be set. All the other writable bits are cleared. * - `cr4`: all bits are cleared. * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’ * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified. diff --git a/tests/tcg/x86_64/system/kernel.ld b/tests/tcg/x86_64/system/kernel.ld index 49c12b04ae..ca5d6bd850 100644 --- a/tests/tcg/x86_64/system/kernel.ld +++ b/tests/tcg/x86_64/system/kernel.ld @@ -16,7 +16,10 @@ SECTIONS { *(.rodata) } :text - /* Keep build ID and PVH notes in same section */ + /DISCARD/ : { + *(.note.gnu*) + } + .notes : { *(.note.*) } :note diff --git a/tests/tcg/xtensa/Makefile.softmmu-target b/tests/tcg/xtensa/Makefile.softmmu-target index 9530cac2ad..973e55298e 100644 --- a/tests/tcg/xtensa/Makefile.softmmu-target +++ b/tests/tcg/xtensa/Makefile.softmmu-target @@ -2,7 +2,7 @@ # Xtensa softmmu tests # -ifneq ($(TARGET_WORDS_BIGENDIAN),y) +ifneq ($(TARGET_BIG_ENDIAN),y) XTENSA_SRC = $(SRC_PATH)/tests/tcg/xtensa XTENSA_ALL = $(filter-out $(XTENSA_SRC)/linker.ld.S,$(wildcard $(XTENSA_SRC)/*.S)) diff --git a/tests/tcg/xtensa/crt.S b/tests/tcg/xtensa/crt.S index d9846acace..909872cd38 100644 --- a/tests/tcg/xtensa/crt.S +++ b/tests/tcg/xtensa/crt.S @@ -8,10 +8,12 @@ .text .global _start _start: +#if XCHAL_HAVE_WINDOWED movi a2, 1 wsr a2, windowstart movi a2, 0 wsr a2, windowbase +#endif movi a1, _fstack movi a2, 0x4000f wsr a2, ps diff --git a/tests/tcg/xtensa/test_break.S b/tests/tcg/xtensa/test_break.S index 3379a3f9f0..3aa18b5cec 100644 --- a/tests/tcg/xtensa/test_break.S +++ b/tests/tcg/xtensa/test_break.S @@ -200,64 +200,70 @@ test_end .endm #if XCHAL_NUM_DBREAK +#define DB0 0 +#if XCHAL_NUM_DBREAK > 1 +#define DB1 1 +#else +#define DB1 0 +#endif test dbreak_exact - dbreak_test 0, 0x4000003f, 0xd000007f, 0xd000007f, l8ui - dbreak_test 1, 0x4000003e, 0xd000007e, 0xd000007e, l16ui - dbreak_test 0, 0x4000003c, 0xd000007c, 0xd000007c, l32i + dbreak_test DB0, 0x4000003f, 0xd000007f, 0xd000007f, l8ui + dbreak_test DB1, 0x4000003e, 0xd000007e, 0xd000007e, l16ui + dbreak_test DB0, 0x4000003c, 0xd000007c, 0xd000007c, l32i - dbreak_test 1, 0x8000003f, 0xd000007f, 0xd000007f, s8i - dbreak_test 0, 0x8000003e, 0xd000007e, 0xd000007e, s16i - dbreak_test 1, 0x8000003c, 0xd000007c, 0xd000007c, s32i + dbreak_test DB1, 0x8000003f, 0xd000007f, 0xd000007f, s8i + dbreak_test DB0, 0x8000003e, 0xd000007e, 0xd000007e, s16i + dbreak_test DB1, 0x8000003c, 0xd000007c, 0xd000007c, s32i test_end -test dbreak_overlap - dbreak_test 0, 0x4000003f, 0xd000007d, 0xd000007c, l16ui - dbreak_test 1, 0x4000003f, 0xd000007d, 0xd000007c, l32i +test DBdbreak_overlap + dbreak_test DB0, 0x4000003f, 0xd000007d, 0xd000007c, l16ui + dbreak_test DB1, 0x4000003f, 0xd000007d, 0xd000007c, l32i - dbreak_test 0, 0x4000003e, 0xd000007e, 0xd000007f, l8ui - dbreak_test 1, 0x4000003e, 0xd000007e, 0xd000007c, l32i + dbreak_test DB0, 0x4000003e, 0xd000007e, 0xd000007f, l8ui + dbreak_test DB1, 0x4000003e, 0xd000007e, 0xd000007c, l32i - dbreak_test 0, 0x4000003c, 0xd000007c, 0xd000007d, l8ui - dbreak_test 1, 0x4000003c, 0xd000007c, 0xd000007c, l16ui + dbreak_test DB0, 0x4000003c, 0xd000007c, 0xd000007d, l8ui + dbreak_test DB1, 0x4000003c, 0xd000007c, 0xd000007c, l16ui - dbreak_test 0, 0x40000038, 0xd0000078, 0xd000007b, l8ui - dbreak_test 1, 0x40000038, 0xd0000078, 0xd000007a, l16ui - dbreak_test 0, 0x40000038, 0xd0000078, 0xd000007c, l32i + dbreak_test DB0, 0x40000038, 0xd0000078, 0xd000007b, l8ui + dbreak_test DB1, 0x40000038, 0xd0000078, 0xd000007a, l16ui + dbreak_test DB0, 0x40000038, 0xd0000078, 0xd000007c, l32i - dbreak_test 1, 0x40000030, 0xd0000070, 0xd0000075, l8ui - dbreak_test 0, 0x40000030, 0xd0000070, 0xd0000076, l16ui - dbreak_test 1, 0x40000030, 0xd0000070, 0xd0000078, l32i + dbreak_test DB1, 0x40000030, 0xd0000070, 0xd0000075, l8ui + dbreak_test DB0, 0x40000030, 0xd0000070, 0xd0000076, l16ui + dbreak_test DB1, 0x40000030, 0xd0000070, 0xd0000078, l32i - dbreak_test 0, 0x40000020, 0xd0000060, 0xd000006f, l8ui - dbreak_test 1, 0x40000020, 0xd0000060, 0xd0000070, l16ui - dbreak_test 0, 0x40000020, 0xd0000060, 0xd0000074, l32i + dbreak_test DB0, 0x40000020, 0xd0000060, 0xd000006f, l8ui + dbreak_test DB1, 0x40000020, 0xd0000060, 0xd0000070, l16ui + dbreak_test DB0, 0x40000020, 0xd0000060, 0xd0000074, l32i - dbreak_test 0, 0x8000003f, 0xd000007d, 0xd000007c, s16i - dbreak_test 1, 0x8000003f, 0xd000007d, 0xd000007c, s32i + dbreak_test DB0, 0x8000003f, 0xd000007d, 0xd000007c, s16i + dbreak_test DB1, 0x8000003f, 0xd000007d, 0xd000007c, s32i - dbreak_test 0, 0x8000003e, 0xd000007e, 0xd000007f, s8i - dbreak_test 1, 0x8000003e, 0xd000007e, 0xd000007c, s32i + dbreak_test DB0, 0x8000003e, 0xd000007e, 0xd000007f, s8i + dbreak_test DB1, 0x8000003e, 0xd000007e, 0xd000007c, s32i - dbreak_test 0, 0x8000003c, 0xd000007c, 0xd000007d, s8i - dbreak_test 1, 0x8000003c, 0xd000007c, 0xd000007c, s16i + dbreak_test DB0, 0x8000003c, 0xd000007c, 0xd000007d, s8i + dbreak_test DB1, 0x8000003c, 0xd000007c, 0xd000007c, s16i - dbreak_test 0, 0x80000038, 0xd0000078, 0xd000007b, s8i - dbreak_test 1, 0x80000038, 0xd0000078, 0xd000007a, s16i - dbreak_test 0, 0x80000038, 0xd0000078, 0xd000007c, s32i + dbreak_test DB0, 0x80000038, 0xd0000078, 0xd000007b, s8i + dbreak_test DB1, 0x80000038, 0xd0000078, 0xd000007a, s16i + dbreak_test DB0, 0x80000038, 0xd0000078, 0xd000007c, s32i - dbreak_test 1, 0x80000030, 0xd0000070, 0xd0000075, s8i - dbreak_test 0, 0x80000030, 0xd0000070, 0xd0000076, s16i - dbreak_test 1, 0x80000030, 0xd0000070, 0xd0000078, s32i + dbreak_test DB1, 0x80000030, 0xd0000070, 0xd0000075, s8i + dbreak_test DB0, 0x80000030, 0xd0000070, 0xd0000076, s16i + dbreak_test DB1, 0x80000030, 0xd0000070, 0xd0000078, s32i - dbreak_test 0, 0x80000020, 0xd0000060, 0xd000006f, s8i - dbreak_test 1, 0x80000020, 0xd0000060, 0xd0000070, s16i - dbreak_test 0, 0x80000020, 0xd0000060, 0xd0000074, s32i + dbreak_test DB0, 0x80000020, 0xd0000060, 0xd000006f, s8i + dbreak_test DB1, 0x80000020, 0xd0000060, 0xd0000070, s16i + dbreak_test DB0, 0x80000020, 0xd0000060, 0xd0000074, s32i test_end -test dbreak_invalid - dbreak_test 0, 0x40000030, 0xd0000071, 0xd0000070, l16ui - dbreak_test 1, 0x40000035, 0xd0000072, 0xd0000070, l32i +test DBdbreak_invalid + dbreak_test DB0, 0x40000030, 0xd0000071, 0xd0000070, l16ui + dbreak_test DB1, 0x40000035, 0xd0000072, 0xd0000070, l32i test_end #endif diff --git a/tests/tcg/xtensa/test_mmu.S b/tests/tcg/xtensa/test_mmu.S index 4cbd6ef4f9..1006c8cf77 100644 --- a/tests/tcg/xtensa/test_mmu.S +++ b/tests/tcg/xtensa/test_mmu.S @@ -2,7 +2,9 @@ test_suite mmu -#if XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY +#if XCHAL_HAVE_PTP_MMU +#define BASE 0x20000000 +#define TLB_BASE 0x80000000 .purgem test_init @@ -29,17 +31,27 @@ test_suite mmu idtlb a2 movi a2, 0x00000009 idtlb a2 +#if XCHAL_HAVE_SPANNING_WAY + movi a2, BASE | XCHAL_SPANNING_WAY + idtlb a2 + iitlb a2 + movi a2, TLB_BASE | XCHAL_SPANNING_WAY + idtlb a2 + iitlb a2 + movi a2, TLB_BASE + wsr a2, ptevaddr +#endif .endm test tlb_group movi a2, 0x04000002 /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 witlb a2, a3 movi a3, 0x00200004 rdtlb0 a1, a3 ritlb0 a2, a3 - movi a3, 0x01000001 + movi a3, BASE + 0x01000001 assert eq, a1, a3 assert eq, a2, a3 movi a3, 0x00200004 @@ -48,17 +60,17 @@ test tlb_group movi a3, 0x04000002 assert eq, a1, a3 assert eq, a2, a3 - movi a3, 0x01234567 + movi a3, BASE + 0x01234567 pdtlb a1, a3 pitlb a2, a3 - movi a3, 0x01234014 + movi a3, BASE + 0x01234014 assert eq, a1, a3 - movi a3, 0x0123400c + movi a3, BASE + 0x0123400c assert eq, a2, a3 movi a3, 0x00200004 idtlb a3 iitlb a3 - movi a3, 0x01234567 + movi a3, BASE + 0x01234567 pdtlb a1, a3 pitlb a2, a3 movi a3, 0x00000010 @@ -72,7 +84,7 @@ test_end test itlb_miss set_vector kernel, 1f - movi a3, 0x00100000 + movi a3, BASE + 0x00100000 jx a3 test_fail 1: @@ -86,7 +98,7 @@ test_end test dtlb_miss set_vector kernel, 1f - movi a3, 0x00100000 + movi a3, BASE + 0x00100000 l8ui a2, a3, 0 test_fail 1: @@ -116,11 +128,11 @@ test dtlb_multi_hit set_vector kernel, 1f movi a2, 0x04000002 /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200007 /* VPN */ + movi a3, BASE + 0x01200007 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200000 + movi a3, BASE + 0x01200000 pdtlb a2, a3 test_fail 1: @@ -168,15 +180,18 @@ test load_store_privilege and a3, a3, a1 movi a1, 4 or a3, a3, a1 + movi a5, BASE + add a3, a3, a5 witlb a2, a3 movi a3, 10f movi a1, 0x000fffff and a1, a3, a1 + add a1, a1, a5 movi a2, 0x04000003 /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200001 + movi a3, BASE + 0x01200001 movi a2, 0x4004f jx a1 10: @@ -192,6 +207,7 @@ test load_store_privilege movi a3, 1b movi a1, 0x000fffff and a3, a3, a1 + add a3, a3, a5 assert eq, a2, a3 rsr a2, exccause movi a3, 26 @@ -206,9 +222,9 @@ test cring_load_store_privilege set_vector double, 2f movi a2, 0x04000003 /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200004 + movi a3, BASE + 0x01200004 movi a2, 0x4005f /* ring 1 + excm => cring == 0 */ wsr a2, ps isync @@ -245,10 +261,13 @@ test inst_fetch_prohibited and a3, a3, a1 movi a1, 4 or a3, a3, a1 + movi a5, BASE + add a3, a3, a5 witlb a2, a3 movi a3, 10f movi a1, 0x000fffff and a1, a3, a1 + add a1, a1, a5 jx a1 .align 4 10: @@ -268,9 +287,9 @@ test load_prohibited set_vector kernel, 2f movi a2, 0x0400000c /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200002 + movi a3, BASE + 0x01200002 1: l8ui a2, a3, 0 test_fail @@ -289,9 +308,9 @@ test store_prohibited set_vector kernel, 2f movi a2, 0x04000001 /* PPN */ - movi a3, 0x01200004 /* VPN */ + movi a3, BASE + 0x01200004 /* VPN */ wdtlb a2, a3 - movi a3, 0x01200003 + movi a3, BASE + 0x01200003 l8ui a2, a3, 0 1: s8i a2, a3, 0 @@ -311,10 +330,10 @@ test_end * and DTLB way 7 to cover this PTE, ring=pt_ring, attr=pt_attr */ .macro pt_setup pt_ring, pt_attr, pte_ring, vaddr, paddr, pte_attr - movi a2, 0x80000000 + movi a2, TLB_BASE wsr a2, ptevaddr - movi a3, 0x80000007 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ + movi a3, TLB_BASE | 7 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ movi a4, 0x04000003 | ((\pt_ring) << 4) /* PADDR 64M */ wdtlb a4, a3 isync @@ -324,7 +343,7 @@ test_end add a2, a1, a2 s32i a3, a2, 0 - movi a3, 0x80000007 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ + movi a3, TLB_BASE | 7 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ movi a4, 0x04000000 | ((\pt_ring) << 4) | (\pt_attr) /* PADDR 64M */ wdtlb a4, a3 isync @@ -343,10 +362,13 @@ test_end and a3, a3, a1 movi a1, 4 or a3, a3, a1 + movi a5, BASE + add a3, a3, a5 witlb a2, a3 movi a3, 10f movi a1, 0x000fffff and a1, a3, a1 + add a1, a1, a5 movi a2, 0 wsr a2, excvaddr @@ -396,6 +418,8 @@ test_end movi a2, (\vaddr) movi a1, 0xfffff and a1, a1, a2 + movi a5, BASE + add a1, a1, a5 rsr a2, epc1 assert eq, a1, a2 .endm @@ -403,7 +427,7 @@ test_end test dtlb_autoload set_vector kernel, 0 - pt_setup 0, 3, 1, 0x1000, 0x1000, 3 + pt_setup 0, 3, 1, BASE + 0x1000, 0x1000, 3 assert_no_auto_tlb l8ui a1, a3, 0 @@ -418,8 +442,8 @@ test autoload_load_store_privilege set_vector kernel, 0 set_vector double, 2f - pt_setup 0, 3, 0, 0x2000, 0x2000, 3 - movi a3, 0x2004 + pt_setup 0, 3, 0, BASE + 0x2000, 0x2000, 3 + movi a3, BASE + 0x2004 assert_no_auto_tlb movi a2, 0x4005f /* ring 1 + excm => cring == 0 */ @@ -441,7 +465,7 @@ test_end test autoload_pte_load_prohibited set_vector kernel, 2f - pt_setup 0, 3, 0, 0x3000, 0, 0xc + pt_setup 0, 3, 0, BASE + 0x3000, 0, 0xc assert_no_auto_tlb 1: l32i a2, a3, 0 @@ -458,7 +482,7 @@ test_end test autoload_pt_load_prohibited set_vector kernel, 2f - pt_setup 0, 0xc, 0, 0x4000, 0x4000, 3 + pt_setup 0, 0xc, 0, BASE + 0x4000, 0x4000, 3 assert_no_auto_tlb 1: l32i a2, a3, 0 @@ -474,8 +498,8 @@ test_end test autoload_pt_privilege set_vector kernel, 2f - pt_setup 0, 3, 1, 0x5000, 0, 3 - go_ring 1, 0, 0x5001 + pt_setup 0, 3, 1, BASE + 0x5000, 0, 3 + go_ring 1, 0, BASE + 0x5001 l8ui a2, a3, 0 1: @@ -491,8 +515,8 @@ test_end test autoload_pte_privilege set_vector kernel, 2f - pt_setup 0, 3, 0, 0x6000, 0, 3 - go_ring 1, 0, 0x6001 + pt_setup 0, 3, 0, BASE + 0x6000, 0, 3 + go_ring 1, 0, BASE + 0x6001 1: l8ui a2, a3, 0 syscall @@ -507,9 +531,9 @@ test_end test autoload_3_level_pt set_vector kernel, 2f - pt_setup 1, 3, 1, 0x00400000, 0, 3 - pt_setup 1, 3, 1, 0x80001000, 0x2000000, 3 - go_ring 1, 0, 0x00400001 + pt_setup 1, 3, 1, BASE + 0x00400000, 0, 3 + pt_setup 1, 3, 1, TLB_BASE + ((BASE + 0x00400000) >> 10), 0x2000000, 3 + go_ring 1, 0, BASE + 0x00400001 1: l8ui a2, a3, 0 syscall @@ -526,14 +550,14 @@ test cross_page_insn set_vector kernel, 2f movi a2, 0x04000003 /* PPN */ - movi a3, 0x00007000 /* VPN */ + movi a3, BASE + 0x00007000 /* VPN */ witlb a2, a3 wdtlb a2, a3 - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ witlb a2, a3 wdtlb a2, a3 - movi a2, 0x00007fff + movi a2, BASE + 0x00007fff movi a3, 20f movi a4, 21f sub a4, a4, a3 @@ -543,8 +567,8 @@ test cross_page_insn addi a2, a2, 1 addi a3, a3, 1 1: - movi a2, 0x00007fff - movi a3, 0x00008000 + movi a2, BASE + 0x00007fff + movi a3, BASE + 0x00008000 /* DTLB: OK, ITLB: OK */ jx a2 @@ -560,20 +584,20 @@ test cross_page_insn movi a3, 1 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x8002 + movi a3, BASE + 0x8002 assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007fff + movi a3, BASE + 0x00007fff assert ne, a2, a3 reset_ps set_vector kernel, 3f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ wdtlb a2, a3 - movi a2, 0x00007fff - movi a3, 0x00008000 + movi a2, BASE + 0x00007fff + movi a3, BASE + 0x00008000 /* DTLB: FAIL, ITLB: OK */ jx a2 3: @@ -581,22 +605,22 @@ test cross_page_insn movi a3, 28 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7fff + movi a3, BASE + 0x7fff assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007fff + movi a3, BASE + 0x00007fff assert eq, a2, a3 reset_ps set_vector kernel, 4f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ witlb a2, a3 movi a2, 0x04000003 /* PPN */ wdtlb a2, a3 - movi a2, 0x00007fff - movi a3, 0x00008000 + movi a2, BASE + 0x00007fff + movi a3, BASE + 0x00008000 /* DTLB: OK, ITLB: FAIL */ jx a2 4: @@ -604,20 +628,20 @@ test cross_page_insn movi a3, 20 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7fff + movi a3, BASE + 0x7fff assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007fff + movi a3, BASE + 0x00007fff assert eq, a2, a3 reset_ps set_vector kernel, 5f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ wdtlb a2, a3 - movi a2, 0x00007fff - movi a3, 0x00008000 + movi a2, BASE + 0x00007fff + movi a3, BASE + 0x00008000 /* DTLB: FAIL, ITLB: FAIL */ jx a2 5: @@ -625,10 +649,10 @@ test cross_page_insn movi a3, 20 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7fff + movi a3, BASE + 0x7fff assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007fff + movi a3, BASE + 0x00007fff assert eq, a2, a3 test_end @@ -636,14 +660,14 @@ test cross_page_tb set_vector kernel, 2f movi a2, 0x04000003 /* PPN */ - movi a3, 0x00007000 /* VPN */ + movi a3, BASE + 0x00007000 /* VPN */ witlb a2, a3 wdtlb a2, a3 - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ witlb a2, a3 wdtlb a2, a3 - movi a2, 0x00007ffc + movi a2, BASE + 0x00007ffc movi a3, 20f movi a4, 21f sub a4, a4, a3 @@ -653,8 +677,8 @@ test cross_page_tb addi a2, a2, 1 addi a3, a3, 1 1: - movi a2, 0x00007ffc - movi a3, 0x00008000 + movi a2, BASE + 0x00007ffc + movi a3, BASE + 0x00008000 /* DTLB: OK, ITLB: OK */ jx a2 @@ -670,20 +694,20 @@ test cross_page_tb movi a3, 1 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7fff + movi a3, BASE + 0x7fff assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007ffc + movi a3, BASE + 0x00007ffc assert ne, a2, a3 reset_ps set_vector kernel, 3f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ wdtlb a2, a3 - movi a2, 0x00007ffc - movi a3, 0x00008000 + movi a2, BASE + 0x00007ffc + movi a3, BASE + 0x00008000 /* DTLB: FAIL, ITLB: OK */ jx a2 3: @@ -691,22 +715,22 @@ test cross_page_tb movi a3, 28 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7ffc + movi a3, BASE + 0x7ffc assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007ffc + movi a3, BASE + 0x00007ffc assert eq, a2, a3 reset_ps set_vector kernel, 4f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ witlb a2, a3 movi a2, 0x04000003 /* PPN */ wdtlb a2, a3 - movi a2, 0x00007ffc - movi a3, 0x00008000 + movi a2, BASE + 0x00007ffc + movi a3, BASE + 0x00008000 /* DTLB: OK, ITLB: FAIL */ jx a2 4: @@ -714,20 +738,20 @@ test cross_page_tb movi a3, 20 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7fff + movi a3, BASE + 0x7fff assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007ffc + movi a3, BASE + 0x00007ffc assert ne, a2, a3 reset_ps set_vector kernel, 5f movi a2, 0x0400000c /* PPN */ - movi a3, 0x00008000 /* VPN */ + movi a3, BASE + 0x00008000 /* VPN */ wdtlb a2, a3 - movi a2, 0x00007ffc - movi a3, 0x00008000 + movi a2, BASE + 0x00007ffc + movi a3, BASE + 0x00008000 /* DTLB: FAIL, ITLB: FAIL */ jx a2 5: @@ -735,10 +759,10 @@ test cross_page_tb movi a3, 28 assert eq, a2, a3 rsr a2, epc1 - movi a3, 0x7ffc + movi a3, BASE + 0x7ffc assert eq, a2, a3 rsr a2, excsave1 - movi a3, 0x00007ffc + movi a3, BASE + 0x00007ffc assert eq, a2, a3 test_end diff --git a/tests/tcg/xtensa/test_phys_mem.S b/tests/tcg/xtensa/test_phys_mem.S index 9bb3ee3866..f935a70294 100644 --- a/tests/tcg/xtensa/test_phys_mem.S +++ b/tests/tcg/xtensa/test_phys_mem.S @@ -2,7 +2,7 @@ test_suite phys_mem -#if XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY +#if XCHAL_HAVE_PTP_MMU .purgem test_init @@ -13,6 +13,14 @@ test_suite phys_mem witlb a2, a3 movi a2, 0xc0000000 wsr a2, ptevaddr +#if XCHAL_HAVE_SPANNING_WAY + movi a2, 0xc0000000 | XCHAL_SPANNING_WAY + idtlb a2 + iitlb a2 + movi a2, 0x20000000 | XCHAL_SPANNING_WAY + idtlb a2 + iitlb a2 +#endif .endm test inst_fetch_get_pte_no_phys diff --git a/tests/tcg/xtensa/test_sr.S b/tests/tcg/xtensa/test_sr.S index b1a91a0637..34441c7aff 100644 --- a/tests/tcg/xtensa/test_sr.S +++ b/tests/tcg/xtensa/test_sr.S @@ -221,6 +221,8 @@ test_sr_mask /*scompare1*/12, 0, 0 #if XCHAL_HAVE_VECBASE test_sr vecbase, 1 +movi a2, XCHAL_VECBASE_RESET_VADDR +wsr a2, vecbase #else test_sr_mask /*vecbase*/231, 0, 0 #endif diff --git a/tests/tcg/xtensa/test_timer.S b/tests/tcg/xtensa/test_timer.S index 1ec8e20883..2a06eebad8 100644 --- a/tests/tcg/xtensa/test_timer.S +++ b/tests/tcg/xtensa/test_timer.S @@ -38,6 +38,28 @@ test_end #if XCHAL_NUM_TIMERS +#if INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 +#define TIMER0_VECTOR kernel +#else +#define TIMER0_VECTOR glue(level, INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT)) +#endif + +#if XCHAL_NUM_TIMERS > 1 +#if INTERRUPT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1 +#define TIMER1_VECTOR kernel +#else +#define TIMER1_VECTOR glue(level, INTERRUPT_LEVEL(XCHAL_TIMER1_INTERRUPT)) +#endif +#endif + +#if XCHAL_NUM_TIMERS > 2 +#if INTERRUPT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1 +#define TIMER2_VECTOR kernel +#else +#define TIMER2_VECTOR glue(level, INTERRUPT_LEVEL(XCHAL_TIMER2_INTERRUPT)) +#endif +#endif + test ccount_update_deadline movi a2, 0 wsr a2, intenable @@ -90,9 +112,8 @@ test ccompare assert nei, a5, 0 test_end -#if INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 test ccompare0_interrupt - set_vector kernel, 2f + set_vector TIMER0_VECTOR, 2f movi a2, 0 wsr a2, intenable rsr a2, interrupt @@ -115,20 +136,21 @@ test ccompare0_interrupt movi a2, 1 << XCHAL_TIMER0_INTERRUPT wsr a2, intenable rsil a2, 0 - loop a3, 1f - nop 1: + addi a3, a3, -1 + bnez a3, 1b test_fail 2: +#if INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 rsr a2, exccause assert eqi, a2, 4 /* LEVEL1_INTERRUPT_CAUSE */ -test_end #endif +test_end #if XCHAL_NUM_TIMERS > 1 test ccompare1_interrupt - set_vector glue(level, INTERRUPT_LEVEL(XCHAL_TIMER1_INTERRUPT)), 2f + set_vector TIMER1_VECTOR, 2f movi a2, 0 wsr a2, intenable rsr a2, interrupt @@ -148,18 +170,22 @@ test ccompare1_interrupt movi a2, 1 << XCHAL_TIMER1_INTERRUPT wsr a2, intenable rsil a2, INTERRUPT_LEVEL(XCHAL_TIMER1_INTERRUPT) - 1 - loop a3, 1f - nop 1: + addi a3, a3, -1 + bnez a3, 1b test_fail 2: +#if INTERRUPT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1 + rsr a2, exccause + assert eqi, a2, 4 /* LEVEL1_INTERRUPT_CAUSE */ +#endif test_end #endif #if XCHAL_NUM_TIMERS > 2 test ccompare2_interrupt - set_vector glue(level, INTERRUPT_LEVEL(XCHAL_TIMER2_INTERRUPT)), 2f + set_vector TIMER2_VECTOR, 2f movi a2, 0 wsr a2, intenable rsr a2, interrupt @@ -177,17 +203,21 @@ test ccompare2_interrupt movi a2, 1 << XCHAL_TIMER2_INTERRUPT wsr a2, intenable rsil a2, INTERRUPT_LEVEL(XCHAL_TIMER2_INTERRUPT) - 1 - loop a3, 1f - nop 1: + addi a3, a3, -1 + bnez a3, 1b test_fail 2: +#if INTERRUPT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1 + rsr a2, exccause + assert eqi, a2, 4 /* LEVEL1_INTERRUPT_CAUSE */ +#endif test_end #endif test ccompare_interrupt_masked - set_vector kernel, 2f + set_vector TIMER0_VECTOR, 2f movi a2, 0 wsr a2, intenable rsr a2, interrupt @@ -197,7 +227,7 @@ test ccompare_interrupt_masked wsr a2, ccompare2 #endif - movi a3, 2 * WAIT_LOOPS + movi a3, WAIT_LOOPS make_ccount_delta a2, a15 #if XCHAL_NUM_TIMERS > 1 wsr a2, ccompare1 @@ -211,17 +241,20 @@ test ccompare_interrupt_masked movi a2, 1 << XCHAL_TIMER0_INTERRUPT wsr a2, intenable rsil a2, 0 - loop a3, 1f - nop 1: + addi a3, a3, -1 + bnez a3, 1b + test_fail 2: +#if INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 rsr a2, exccause assert eqi, a2, 4 /* LEVEL1_INTERRUPT_CAUSE */ +#endif test_end test ccompare_interrupt_masked_waiti - set_vector kernel, 2f + set_vector TIMER0_VECTOR, 2f movi a2, 0 wsr a2, intenable rsr a2, interrupt @@ -231,7 +264,6 @@ test ccompare_interrupt_masked_waiti wsr a2, ccompare2 #endif - movi a3, 2 * WAIT_LOOPS make_ccount_delta a2, a15 #if XCHAL_NUM_TIMERS > 1 wsr a2, ccompare1 @@ -247,8 +279,10 @@ test ccompare_interrupt_masked_waiti waiti 0 test_fail 2: +#if INTERRUPT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 rsr a2, exccause assert eqi, a2, 4 /* LEVEL1_INTERRUPT_CAUSE */ +#endif test_end #endif diff --git a/tests/unit/check-block-qdict.c b/tests/unit/check-block-qdict.c index 5a25825093..751c58e737 100644 --- a/tests/unit/check-block-qdict.c +++ b/tests/unit/check-block-qdict.c @@ -504,7 +504,7 @@ static void qdict_crumple_test_empty(void) src = qdict_new(); dst = qobject_to(QDict, qdict_crumple(src, &error_abort)); - + g_assert(dst); g_assert_cmpint(qdict_size(dst), ==, 0); qobject_unref(src); diff --git a/tests/unit/check-qjson.c b/tests/unit/check-qjson.c index c845f91d43..c4e0f851bf 100644 --- a/tests/unit/check-qjson.c +++ b/tests/unit/check-qjson.c @@ -21,7 +21,6 @@ #include "qapi/qmp/qnum.h" #include "qapi/qmp/qstring.h" #include "qemu/unicode.h" -#include "qemu-common.h" static QString *from_json_str(const char *jstr, bool single, Error **errp) { diff --git a/tests/unit/check-qnull.c b/tests/unit/check-qnull.c index ebf21db83c..5ceacc65d7 100644 --- a/tests/unit/check-qnull.c +++ b/tests/unit/check-qnull.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" #include "qapi/qmp/qnull.h" -#include "qemu-common.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "qapi/error.h" diff --git a/tests/unit/check-qnum.c b/tests/unit/check-qnum.c index b85fca2302..bf7fe45bac 100644 --- a/tests/unit/check-qnum.c +++ b/tests/unit/check-qnum.c @@ -15,7 +15,6 @@ #include "qemu/osdep.h" #include "qapi/qmp/qnum.h" -#include "qemu-common.h" /* * Public Interface test-cases diff --git a/tests/unit/check-qobject.c b/tests/unit/check-qobject.c index c1713d15af..022b7c74fe 100644 --- a/tests/unit/check-qobject.c +++ b/tests/unit/check-qobject.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "block/qdict.h" #include "qapi/error.h" #include "qapi/qmp/qbool.h" #include "qapi/qmp/qdict.h" @@ -16,7 +15,6 @@ #include "qapi/qmp/qnull.h" #include "qapi/qmp/qnum.h" #include "qapi/qmp/qstring.h" -#include "qemu-common.h" #include @@ -75,21 +73,6 @@ static void do_test_equality(bool expected, int _, ...) #define check_unequal(...) \ do_test_equality(false, 0, __VA_ARGS__, &test_equality_end_of_arguments) -static void do_free_all(int _, ...) -{ - va_list ap; - QObject *obj; - - va_start(ap, _); - while ((obj = va_arg(ap, QObject *)) != NULL) { - qobject_unref(obj); - } - va_end(ap); -} - -#define free_all(...) \ - do_free_all(0, __VA_ARGS__, NULL) - static void qobject_is_equal_null_test(void) { check_unequal(qnull(), NULL); @@ -97,15 +80,14 @@ static void qobject_is_equal_null_test(void) static void qobject_is_equal_num_test(void) { - QNum *u0, *i0, *d0, *dnan, *um42, *im42, *dm42; + g_autoptr(QNum) u0 = qnum_from_uint(0u); + g_autoptr(QNum) i0 = qnum_from_int(0); + g_autoptr(QNum) d0 = qnum_from_double(0.0); + g_autoptr(QNum) dnan = qnum_from_double(NAN); + g_autoptr(QNum) um42 = qnum_from_uint((uint64_t)-42); + g_autoptr(QNum) im42 = qnum_from_int(-42); + g_autoptr(QNum) dm42 = qnum_from_double(-42.0); - u0 = qnum_from_uint(0u); - i0 = qnum_from_int(0); - d0 = qnum_from_double(0.0); - dnan = qnum_from_double(NAN); - um42 = qnum_from_uint((uint64_t)-42); - im42 = qnum_from_int(-42); - dm42 = qnum_from_double(-42.0); /* Integers representing a mathematically equal number should * compare equal */ @@ -122,60 +104,45 @@ static void qobject_is_equal_num_test(void) check_unequal(um42, im42); check_unequal(um42, dm42); check_unequal(im42, dm42); - - free_all(u0, i0, d0, dnan, um42, im42, dm42); } static void qobject_is_equal_bool_test(void) { - QBool *btrue_0, *btrue_1, *bfalse_0, *bfalse_1; - - btrue_0 = qbool_from_bool(true); - btrue_1 = qbool_from_bool(true); - bfalse_0 = qbool_from_bool(false); - bfalse_1 = qbool_from_bool(false); + g_autoptr(QBool) btrue_0 = qbool_from_bool(true); + g_autoptr(QBool) btrue_1 = qbool_from_bool(true); + g_autoptr(QBool) bfalse_0 = qbool_from_bool(false); + g_autoptr(QBool) bfalse_1 = qbool_from_bool(false); check_equal(btrue_0, btrue_1); check_equal(bfalse_0, bfalse_1); check_unequal(btrue_0, bfalse_0); - - free_all(btrue_0, btrue_1, bfalse_0, bfalse_1); } static void qobject_is_equal_string_test(void) { - QString *str_base, *str_whitespace_0, *str_whitespace_1, *str_whitespace_2; - QString *str_whitespace_3, *str_case, *str_built; - - str_base = qstring_from_str("foo"); - str_whitespace_0 = qstring_from_str(" foo"); - str_whitespace_1 = qstring_from_str("foo "); - str_whitespace_2 = qstring_from_str("foo\b"); - str_whitespace_3 = qstring_from_str("fooo\b"); - str_case = qstring_from_str("Foo"); - + g_autoptr(QString) str_base = qstring_from_str("foo"); + g_autoptr(QString) str_whitespace_0 = qstring_from_str(" foo"); + g_autoptr(QString) str_whitespace_1 = qstring_from_str("foo "); + g_autoptr(QString) str_whitespace_2 = qstring_from_str("foo\b"); + g_autoptr(QString) str_whitespace_3 = qstring_from_str("fooo\b"); + g_autoptr(QString) str_case = qstring_from_str("Foo"); /* Should yield "foo" */ - str_built = qstring_from_substr("buffoon", 3, 6); + g_autoptr(QString) str_built = qstring_from_substr("buffoon", 3, 6); check_unequal(str_base, str_whitespace_0, str_whitespace_1, str_whitespace_2, str_whitespace_3, str_case); check_equal(str_base, str_built); - - free_all(str_base, str_whitespace_0, str_whitespace_1, str_whitespace_2, - str_whitespace_3, str_case, str_built); } static void qobject_is_equal_list_test(void) { - QList *list_0, *list_1, *list_cloned; - QList *list_reordered, *list_longer, *list_shorter; - - list_0 = qlist_new(); - list_1 = qlist_new(); - list_reordered = qlist_new(); - list_longer = qlist_new(); - list_shorter = qlist_new(); + g_autoptr(QList) list_0 = qlist_new(); + g_autoptr(QList) list_1 = qlist_new(); + g_autoptr(QList) list_reordered = qlist_new(); + g_autoptr(QList) list_longer = qlist_new(); + g_autoptr(QList) list_shorter = qlist_new(); + g_autoptr(QList) list_cloned = NULL; qlist_append_int(list_0, 1); qlist_append_int(list_0, 2); @@ -206,26 +173,19 @@ static void qobject_is_equal_list_test(void) * itself */ qlist_append(list_0, qnum_from_double(NAN)); g_assert(qobject_is_equal(QOBJECT(list_0), QOBJECT(list_0)) == false); - - free_all(list_0, list_1, list_cloned, list_reordered, list_longer, - list_shorter); } static void qobject_is_equal_dict_test(void) { - QDict *dict_0, *dict_1, *dict_cloned; - QDict *dict_different_key, *dict_different_value, *dict_different_null_key; - QDict *dict_longer, *dict_shorter, *dict_nested; - QDict *dict_crumpled; - - dict_0 = qdict_new(); - dict_1 = qdict_new(); - dict_different_key = qdict_new(); - dict_different_value = qdict_new(); - dict_different_null_key = qdict_new(); - dict_longer = qdict_new(); - dict_shorter = qdict_new(); - dict_nested = qdict_new(); + g_autoptr(QDict) dict_cloned = NULL; + g_autoptr(QDict) dict_0 = qdict_new(); + g_autoptr(QDict) dict_1 = qdict_new(); + g_autoptr(QDict) dict_different_key = qdict_new(); + g_autoptr(QDict) dict_different_value = qdict_new(); + g_autoptr(QDict) dict_different_null_key = qdict_new(); + g_autoptr(QDict) dict_longer = qdict_new(); + g_autoptr(QDict) dict_shorter = qdict_new(); + g_autoptr(QDict) dict_nested = qdict_new(); qdict_put_int(dict_0, "f.o", 1); qdict_put_int(dict_0, "bar", 2); @@ -275,41 +235,25 @@ static void qobject_is_equal_dict_test(void) dict_different_null_key, dict_longer, dict_shorter, dict_nested); - dict_crumpled = qobject_to(QDict, qdict_crumple(dict_1, &error_abort)); - check_equal(dict_crumpled, dict_nested); - - qdict_flatten(dict_nested); - check_equal(dict_0, dict_nested); - /* Containing an NaN value will make this dict compare unequal to * itself */ qdict_put(dict_0, "NaN", qnum_from_double(NAN)); g_assert(qobject_is_equal(QOBJECT(dict_0), QOBJECT(dict_0)) == false); - - free_all(dict_0, dict_1, dict_cloned, dict_different_key, - dict_different_value, dict_different_null_key, dict_longer, - dict_shorter, dict_nested, dict_crumpled); } static void qobject_is_equal_conversion_test(void) { - QNum *u0, *i0, *d0; - QString *s0, *s_empty; - QBool *bfalse; - - u0 = qnum_from_uint(0u); - i0 = qnum_from_int(0); - d0 = qnum_from_double(0.0); - s0 = qstring_from_str("0"); - s_empty = qstring_new(); - bfalse = qbool_from_bool(false); + g_autoptr(QNum) u0 = qnum_from_uint(0u); + g_autoptr(QNum) i0 = qnum_from_int(0); + g_autoptr(QNum) d0 = qnum_from_double(0.0); + g_autoptr(QString) s0 = qstring_from_str("0"); + g_autoptr(QString) s_empty = qstring_new(); + g_autoptr(QBool) bfalse = qbool_from_bool(false); /* No automatic type conversion */ check_unequal(u0, s0, s_empty, bfalse, qnull(), NULL); check_unequal(i0, s0, s_empty, bfalse, qnull(), NULL); check_unequal(d0, s0, s_empty, bfalse, qnull(), NULL); - - free_all(u0, i0, d0, s0, s_empty, bfalse); } int main(int argc, char **argv) diff --git a/tests/unit/check-qom-proplist.c b/tests/unit/check-qom-proplist.c index 48503e0dff..79d4a8b89d 100644 --- a/tests/unit/check-qom-proplist.c +++ b/tests/unit/check-qom-proplist.c @@ -27,6 +27,7 @@ #include "qom/object.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/keyval.h" #include "qemu/config-file.h" #include "qom/object_interfaces.h" @@ -488,7 +489,7 @@ static void test_dummy_badenum(void) g_assert(dobj == NULL); g_assert(err != NULL); g_assert_cmpstr(error_get_pretty(err), ==, - "Invalid parameter 'yeti'"); + "Parameter 'av' does not accept value 'yeti'"); g_assert(object_resolve_path_component(parent, "dummy0") == NULL); diff --git a/tests/unit/check-qstring.c b/tests/unit/check-qstring.c index 4bf9772093..bd861f4f8b 100644 --- a/tests/unit/check-qstring.c +++ b/tests/unit/check-qstring.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" #include "qapi/qmp/qstring.h" -#include "qemu-common.h" /* * Public Interface test-cases diff --git a/tests/unit/crypto-tls-psk-helpers.c b/tests/unit/crypto-tls-psk-helpers.c index 7f8a488961..c6cc740772 100644 --- a/tests/unit/crypto-tls-psk-helpers.c +++ b/tests/unit/crypto-tls-psk-helpers.c @@ -24,18 +24,29 @@ #include "crypto-tls-psk-helpers.h" #include "qemu/sockets.h" -void test_tls_psk_init(const char *pskfile) +static void +test_tls_psk_init_common(const char *pskfile, const char *user, const char *key) { - FILE *fp; + g_autoptr(GError) gerr = NULL; + g_autofree char *line = g_strdup_printf("%s:%s\n", user, key); - fp = fopen(pskfile, "w"); - if (fp == NULL) { - g_critical("Failed to create pskfile %s", pskfile); + g_file_set_contents(pskfile, line, strlen(line), &gerr); + if (gerr != NULL) { + g_critical("Failed to create pskfile %s: %s", pskfile, gerr->message); abort(); } - /* Don't do this in real applications! Use psktool. */ - fprintf(fp, "qemu:009d5638c40fde0c\n"); - fclose(fp); +} + +void test_tls_psk_init(const char *pskfile) +{ + /* Don't hard code a key like this in real applications! Use psktool. */ + test_tls_psk_init_common(pskfile, "qemu", "009d5638c40fde0c"); +} + +void test_tls_psk_init_alt(const char *pskfile) +{ + /* Don't hard code a key like this in real applications! Use psktool. */ + test_tls_psk_init_common(pskfile, "qemu", "10ffa6a2c42f0388"); } void test_tls_psk_cleanup(const char *pskfile) diff --git a/tests/unit/crypto-tls-psk-helpers.h b/tests/unit/crypto-tls-psk-helpers.h index faa645c629..67f8bdda71 100644 --- a/tests/unit/crypto-tls-psk-helpers.h +++ b/tests/unit/crypto-tls-psk-helpers.h @@ -24,6 +24,7 @@ #include void test_tls_psk_init(const char *keyfile); +void test_tls_psk_init_alt(const char *keyfile); void test_tls_psk_cleanup(const char *keyfile); #endif diff --git a/tests/unit/crypto-tls-x509-helpers.c b/tests/unit/crypto-tls-x509-helpers.c index fc609b3fd4..e9937f60d8 100644 --- a/tests/unit/crypto-tls-x509-helpers.c +++ b/tests/unit/crypto-tls-x509-helpers.c @@ -168,9 +168,19 @@ test_tls_get_ipaddr(const char *addrstr, hints.ai_flags = AI_NUMERICHOST; g_assert(getaddrinfo(addrstr, NULL, &hints, &res) == 0); - *datalen = res->ai_addrlen; - *data = g_new(char, *datalen); - memcpy(*data, res->ai_addr, *datalen); + if (res->ai_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *)res->ai_addr; + *datalen = sizeof(in->sin_addr); + *data = g_new(char, *datalen); + memcpy(*data, &in->sin_addr, *datalen); + } else if (res->ai_family == AF_INET6) { + struct sockaddr_in6 *in = (struct sockaddr_in6 *)res->ai_addr; + *datalen = sizeof(in->sin6_addr); + *data = g_new(char, *datalen); + memcpy(*data, &in->sin6_addr, *datalen); + } else { + g_assert_not_reached(); + } freeaddrinfo(res); } diff --git a/tests/unit/crypto-tls-x509-helpers.h b/tests/unit/crypto-tls-x509-helpers.h index cf6329e653..247e7160eb 100644 --- a/tests/unit/crypto-tls-x509-helpers.h +++ b/tests/unit/crypto-tls-x509-helpers.h @@ -26,6 +26,9 @@ #include +#define QCRYPTO_TLS_TEST_CLIENT_NAME "ACME QEMU Client" +#define QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME "ACME Hostile Client" + /* * This contains parameter about how to generate * certificates. @@ -118,6 +121,56 @@ void test_tls_cleanup(const char *keyfile); }; \ test_tls_generate_cert(&varname, NULL) +# define TLS_ROOT_REQ_SIMPLE(varname, fname) \ + QCryptoTLSTestCertReq varname = { \ + .filename = fname, \ + .cn = "qemu-CA", \ + .basicConstraintsEnable = true, \ + .basicConstraintsCritical = true, \ + .basicConstraintsIsCA = true, \ + .keyUsageEnable = true, \ + .keyUsageCritical = true, \ + .keyUsageValue = GNUTLS_KEY_KEY_CERT_SIGN, \ + }; \ + test_tls_generate_cert(&varname, NULL) + +# define TLS_CERT_REQ_SIMPLE_CLIENT(varname, cavarname, cname, fname) \ + QCryptoTLSTestCertReq varname = { \ + .filename = fname, \ + .cn = cname, \ + .basicConstraintsEnable = true, \ + .basicConstraintsCritical = true, \ + .basicConstraintsIsCA = false, \ + .keyUsageEnable = true, \ + .keyUsageCritical = true, \ + .keyUsageValue = \ + GNUTLS_KEY_DIGITAL_SIGNATURE | GNUTLS_KEY_KEY_ENCIPHERMENT, \ + .keyPurposeEnable = true, \ + .keyPurposeCritical = true, \ + .keyPurposeOID1 = GNUTLS_KP_TLS_WWW_CLIENT, \ + }; \ + test_tls_generate_cert(&varname, cavarname.crt) + +# define TLS_CERT_REQ_SIMPLE_SERVER(varname, cavarname, fname, \ + hostname, ipaddr) \ + QCryptoTLSTestCertReq varname = { \ + .filename = fname, \ + .cn = hostname ? hostname : ipaddr, \ + .altname1 = hostname, \ + .ipaddr1 = ipaddr, \ + .basicConstraintsEnable = true, \ + .basicConstraintsCritical = true, \ + .basicConstraintsIsCA = false, \ + .keyUsageEnable = true, \ + .keyUsageCritical = true, \ + .keyUsageValue = \ + GNUTLS_KEY_DIGITAL_SIGNATURE | GNUTLS_KEY_KEY_ENCIPHERMENT, \ + .keyPurposeEnable = true, \ + .keyPurposeCritical = true, \ + .keyPurposeOID1 = GNUTLS_KP_TLS_WWW_SERVER, \ + }; \ + test_tls_generate_cert(&varname, cavarname.crt) + extern const asn1_static_node pkix_asn1_tab[]; #endif diff --git a/tests/unit/io-channel-helpers.c b/tests/unit/io-channel-helpers.c index ff156ed3c4..c0799c21c2 100644 --- a/tests/unit/io-channel-helpers.c +++ b/tests/unit/io-channel-helpers.c @@ -25,7 +25,6 @@ struct QIOChannelTest { QIOChannel *src; QIOChannel *dst; - bool blocking; size_t len; size_t niov; char *input; @@ -42,8 +41,6 @@ static gpointer test_io_thread_writer(gpointer opaque) { QIOChannelTest *data = opaque; - qio_channel_set_blocking(data->src, data->blocking, NULL); - qio_channel_writev_all(data->src, data->inputv, data->niov, @@ -58,8 +55,6 @@ static gpointer test_io_thread_reader(gpointer opaque) { QIOChannelTest *data = opaque; - qio_channel_set_blocking(data->dst, data->blocking, NULL); - qio_channel_readv_all(data->dst, data->outputv, data->niov, @@ -113,7 +108,9 @@ void qio_channel_test_run_threads(QIOChannelTest *test, test->src = src; test->dst = dst; - test->blocking = blocking; + + qio_channel_set_blocking(test->dst, blocking, NULL); + qio_channel_set_blocking(test->src, blocking, NULL); reader = g_thread_new("reader", test_io_thread_reader, diff --git a/tests/unit/meson.build b/tests/unit/meson.build index 5736d285b2..b497a41378 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -23,6 +23,7 @@ tests = { # all code tested by test-x86-cpuid is inside topology.h 'test-x86-cpuid': [], 'test-cutils': [], + 'test-div128': [], 'test-shift128': [], 'test-mul64': [], # all code tested by test-int128 is inside int128.h @@ -43,14 +44,19 @@ tests = { 'test-keyval': [testqapi], 'test-logging': [], 'test-uuid': [], - 'ptimer-test': ['ptimer-test-stubs.c', meson.source_root() / 'hw/core/ptimer.c'], + 'ptimer-test': ['ptimer-test-stubs.c', meson.project_source_root() / 'hw/core/ptimer.c'], 'test-qapi-util': [], + 'test-smp-parse': [qom, meson.project_source_root() / 'hw/core/machine-smp.c'], } if have_system or have_tools tests += { 'test-qmp-event': [testqapi], } + + if seccomp.found() + tests += {'test-seccomp': ['../../softmmu/qemu-seccomp.c', seccomp]} + endif endif if have_block @@ -71,7 +77,9 @@ if have_block 'test-crypto-hash': [crypto], 'test-crypto-hmac': [crypto], 'test-crypto-cipher': [crypto], + 'test-crypto-akcipher': [crypto], 'test-crypto-secret': [crypto, keyutils], + 'test-crypto-der': [crypto], 'test-authz-simple': [authz], 'test-authz-list': [authz], 'test-authz-listfile': [authz], @@ -80,6 +88,7 @@ if have_block 'test-io-channel-file': ['io-channel-helpers.c', io], 'test-io-channel-command': ['io-channel-helpers.c', io], 'test-io-channel-buffer': ['io-channel-helpers.c', io], + 'test-io-channel-null': [io], 'test-crypto-ivgen': [io], 'test-crypto-afsplit': [io], 'test-crypto-block': [io], @@ -104,13 +113,13 @@ if have_block if 'CONFIG_POSIX' in config_host tests += {'test-image-locking': [testblock]} endif - if 'CONFIG_REPLICATION' in config_host + if config_host_data.get('CONFIG_REPLICATION') tests += {'test-replication': [testblock]} endif if nettle.found() or gcrypt.found() tests += {'test-crypto-pbkdf': [io]} endif - if 'CONFIG_EPOLL_CREATE1' in config_host + if config_host_data.get('CONFIG_EPOLL_CREATE1') tests += {'test-fdmon-epoll': [testblock]} endif endif @@ -127,7 +136,7 @@ if have_system 'test-vmstate': [migration, io], 'test-yank': ['socket-helpers.c', qom, io, chardev] } - if 'CONFIG_INOTIFY1' in config_host + if config_host_data.get('CONFIG_INOTIFY1') tests += {'test-util-filemonitor': []} endif @@ -142,15 +151,13 @@ if have_system endif tests += { - 'test-qdev-global-props': [qom, hwcore, testqapi] + 'test-qdev-global-props': [qom, hwcore] } endif endif -if 'CONFIG_TSAN' not in config_host and \ - 'CONFIG_GUEST_AGENT' in config_host and \ - 'CONFIG_LINUX' in config_host - tests += {'test-qga': ['../qtest/libqtest.c']} +if have_ga and targetos == 'linux' + tests += {'test-qga': ['../qtest/libqmp.c']} test_deps += {'test-qga': qga} endif diff --git a/tests/unit/ptimer-test-stubs.c b/tests/unit/ptimer-test-stubs.c index 2a3ef58799..f5e75a96b6 100644 --- a/tests/unit/ptimer-test-stubs.c +++ b/tests/unit/ptimer-test-stubs.c @@ -12,7 +12,6 @@ #include "qemu/main-loop.h" #include "sysemu/replay.h" #include "migration/vmstate.h" -#include "sysemu/cpu-timers.h" #include "ptimer-test.h" diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c index 9176b96c1c..04b5f4e3d0 100644 --- a/tests/unit/ptimer-test.c +++ b/tests/unit/ptimer-test.c @@ -768,8 +768,8 @@ static void add_ptimer_tests(uint8_t policy) char policy_name[256] = ""; char *tmp; - if (policy == PTIMER_POLICY_DEFAULT) { - g_sprintf(policy_name, "default"); + if (policy == PTIMER_POLICY_LEGACY) { + g_sprintf(policy_name, "legacy"); } if (policy & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { @@ -798,71 +798,71 @@ static void add_ptimer_tests(uint8_t policy) g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/set_count policy=%s", policy_name), - g_memdup(&policy, 1), check_set_count, g_free); + g_memdup2(&policy, 1), check_set_count, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/set_limit policy=%s", policy_name), - g_memdup(&policy, 1), check_set_limit, g_free); + g_memdup2(&policy, 1), check_set_limit, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/oneshot policy=%s", policy_name), - g_memdup(&policy, 1), check_oneshot, g_free); + g_memdup2(&policy, 1), check_oneshot, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/periodic policy=%s", policy_name), - g_memdup(&policy, 1), check_periodic, g_free); + g_memdup2(&policy, 1), check_periodic, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_mode_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_mode_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_mode_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_period_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_period_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_period_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_freq_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_freq_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_freq_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/run_with_period_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_run_with_period_0, g_free); + g_memdup2(&policy, 1), check_run_with_period_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/run_with_delta_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_run_with_delta_0, g_free); + g_memdup2(&policy, 1), check_run_with_delta_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/periodic_with_load_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_periodic_with_load_0, g_free); + g_memdup2(&policy, 1), check_periodic_with_load_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/oneshot_with_load_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_oneshot_with_load_0, g_free); + g_memdup2(&policy, 1), check_oneshot_with_load_0, g_free); g_free(tmp); } static void add_all_ptimer_policies_comb_tests(void) { int last_policy = PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT; - int policy = PTIMER_POLICY_DEFAULT; + int policy = PTIMER_POLICY_LEGACY; for (; policy < (last_policy << 1); policy++) { if ((policy & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) && diff --git a/tests/unit/rcutorture.c b/tests/unit/rcutorture.c index de6f649058..495a4e6f42 100644 --- a/tests/unit/rcutorture.c +++ b/tests/unit/rcutorture.c @@ -122,7 +122,7 @@ static void *rcu_read_perf_test(void *arg) rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); qatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); @@ -148,7 +148,7 @@ static void *rcu_update_perf_test(void *arg) rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); qatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); @@ -253,7 +253,7 @@ static void *rcu_read_stress_test(void *arg) rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); while (goflag == GOFLAG_INIT) { g_usleep(1000); } @@ -304,7 +304,7 @@ static void *rcu_update_stress_test(void *arg) struct rcu_stress *cp = qatomic_read(&rcu_stress_current); rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); while (goflag == GOFLAG_INIT) { g_usleep(1000); @@ -347,7 +347,7 @@ static void *rcu_fake_update_stress_test(void *arg) { rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); while (goflag == GOFLAG_INIT) { g_usleep(1000); } diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c index f704fd1a69..eecadf3a3c 100644 --- a/tests/unit/socket-helpers.c +++ b/tests/unit/socket-helpers.c @@ -19,7 +19,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/sockets.h" #include "socket-helpers.h" @@ -89,7 +88,7 @@ static int socket_can_bind_connect(const char *hostname, int family) goto cleanup; } - qemu_set_nonblock(cfd); + qemu_socket_set_nonblock(cfd); if (connect(cfd, (struct sockaddr *)&ss, sslen) < 0) { if (errno == EINPROGRESS) { check_soerr = true; @@ -105,7 +104,7 @@ static int socket_can_bind_connect(const char *hostname, int family) } if (check_soerr) { - if (qemu_getsockopt(cfd, SOL_SOCKET, SO_ERROR, &soerr, &soerrlen) < 0) { + if (getsockopt(cfd, SOL_SOCKET, SO_ERROR, &soerr, &soerrlen) < 0) { goto cleanup; } if (soerr) { @@ -155,3 +154,19 @@ int socket_check_protocol_support(bool *has_ipv4, bool *has_ipv6) return 0; } + +void socket_check_afunix_support(bool *has_afunix) +{ + int fd; + + fd = socket(PF_UNIX, SOCK_STREAM, 0); + closesocket(fd); + +#ifdef _WIN32 + *has_afunix = (fd != (int)INVALID_SOCKET); +#else + *has_afunix = (fd >= 0); +#endif + + return; +} diff --git a/tests/unit/socket-helpers.h b/tests/unit/socket-helpers.h index 512a004811..ed8477ceb3 100644 --- a/tests/unit/socket-helpers.h +++ b/tests/unit/socket-helpers.h @@ -32,4 +32,13 @@ */ int socket_check_protocol_support(bool *has_ipv4, bool *has_ipv6); +/* + * @has_afunix: set to true on return if unix socket support is available + * + * Check whether unix domain socket support is available for use. + * On success, @has_afunix will be set to indicate whether AF_UNIX protocol + * is available. + */ +void socket_check_afunix_support(bool *has_afunix); + #endif diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 6feeb9a4a9..178048d2f2 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -130,7 +130,7 @@ static void *test_acquire_thread(void *opaque) static void set_event_notifier(AioContext *ctx, EventNotifier *notifier, EventNotifierHandler *handler) { - aio_set_event_notifier(ctx, notifier, false, handler, NULL); + aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL); } static void dummy_notifier_read(EventNotifier *n) @@ -390,7 +390,7 @@ static void test_aio_external_client(void) for (i = 1; i < 3; i++) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL); event_notifier_set(&data.e); for (j = 0; j < i; j++) { aio_disable_external(ctx); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index ce071b5fc5..09dc4a4891 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -65,8 +65,9 @@ static void co_reenter_bh(void *opaque) } static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVTestState *s = bs->opaque; @@ -771,6 +772,7 @@ static void test_iothread_drain_subtree(void) typedef struct TestBlockJob { BlockJob common; + BlockDriverState *bs; int run_ret; int prepare_ret; bool running; @@ -782,7 +784,7 @@ static int test_job_prepare(Job *job) TestBlockJob *s = container_of(job, TestBlockJob, common.job); /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ - blk_flush(s->common.blk); + bdrv_flush(s->bs); return s->prepare_ret; } @@ -791,7 +793,7 @@ static void test_job_commit(Job *job) TestBlockJob *s = container_of(job, TestBlockJob, common.job); /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ - blk_flush(s->common.blk); + bdrv_flush(s->bs); } static void test_job_abort(Job *job) @@ -799,7 +801,7 @@ static void test_job_abort(Job *job) TestBlockJob *s = container_of(job, TestBlockJob, common.job); /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ - blk_flush(s->common.blk); + bdrv_flush(s->bs); } static int coroutine_fn test_job_run(Job *job, Error **errp) @@ -914,6 +916,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); + tjob->bs = src; job = &tjob->common; block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); @@ -927,9 +930,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } + aio_context_release(ctx); job_start(&job->job); - aio_context_release(ctx); if (use_iothread) { /* job_co_entry() is run in the I/O thread, wait for the actual job @@ -940,63 +943,85 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(tjob->running); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(tjob->running); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } do_drain_begin_unlocked(drain_type, drain_bs); - if (drain_type == BDRV_DRAIN_ALL) { - /* bdrv_drain_all() drains both src and target */ - g_assert_cmpint(job->job.pause_count, ==, 2); - } else { - g_assert_cmpint(job->job.pause_count, ==, 1); + WITH_JOB_LOCK_GUARD() { + if (drain_type == BDRV_DRAIN_ALL) { + /* bdrv_drain_all() drains both src and target */ + g_assert_cmpint(job->job.pause_count, ==, 2); + } else { + g_assert_cmpint(job->job.pause_count, ==, 1); + } + g_assert_true(job->job.paused); + g_assert_false(job->job.busy); /* The job is paused */ } - g_assert_true(job->job.paused); - g_assert_false(job->job.busy); /* The job is paused */ do_drain_end_unlocked(drain_type, drain_bs); if (use_iothread) { - /* paused is reset in the I/O thread, wait for it */ + /* + * Here we are waiting for the paused status to change, + * so don't bother protecting the read every time. + * + * paused is reset in the I/O thread, wait for it + */ while (job->job.paused) { aio_poll(qemu_get_aio_context(), false); } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } do_drain_begin_unlocked(drain_type, target); - if (drain_type == BDRV_DRAIN_ALL) { - /* bdrv_drain_all() drains both src and target */ - g_assert_cmpint(job->job.pause_count, ==, 2); - } else { - g_assert_cmpint(job->job.pause_count, ==, 1); + WITH_JOB_LOCK_GUARD() { + if (drain_type == BDRV_DRAIN_ALL) { + /* bdrv_drain_all() drains both src and target */ + g_assert_cmpint(job->job.pause_count, ==, 2); + } else { + g_assert_cmpint(job->job.pause_count, ==, 1); + } + g_assert_true(job->job.paused); + g_assert_false(job->job.busy); /* The job is paused */ } - g_assert_true(job->job.paused); - g_assert_false(job->job.busy); /* The job is paused */ do_drain_end_unlocked(drain_type, target); if (use_iothread) { - /* paused is reset in the I/O thread, wait for it */ + /* + * Here we are waiting for the paused status to change, + * so don't bother protecting the read every time. + * + * paused is reset in the I/O thread, wait for it + */ while (job->job.paused) { aio_poll(qemu_get_aio_context(), false); } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } - aio_context_acquire(ctx); - ret = job_complete_sync(&job->job, &error_abort); + WITH_JOB_LOCK_GUARD() { + ret = job_complete_sync_locked(&job->job, &error_abort); + } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); + aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); @@ -1106,8 +1131,9 @@ static void bdrv_test_top_close(BlockDriverState *bs) } static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVTestTopState *tts = bs->opaque; return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); @@ -1512,16 +1538,16 @@ static void test_set_aio_context(void) &error_abort); bdrv_drained_begin(bs); - bdrv_try_set_aio_context(bs, ctx_a, &error_abort); + bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); aio_context_acquire(ctx_a); bdrv_drained_end(bs); bdrv_drained_begin(bs); - bdrv_try_set_aio_context(bs, ctx_b, &error_abort); + bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); aio_context_release(ctx_a); aio_context_acquire(ctx_b); - bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort); + bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); aio_context_release(ctx_b); bdrv_drained_end(bs); @@ -1536,6 +1562,7 @@ typedef struct TestDropBackingBlockJob { bool should_complete; bool *did_complete; BlockDriverState *detach_also; + BlockDriverState *bs; } TestDropBackingBlockJob; static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) @@ -1555,7 +1582,7 @@ static void test_drop_backing_job_commit(Job *job) TestDropBackingBlockJob *s = container_of(job, TestDropBackingBlockJob, common.job); - bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort); + bdrv_set_backing_hd(s->bs, NULL, &error_abort); bdrv_set_backing_hd(s->detach_also, NULL, &error_abort); *s->did_complete = true; @@ -1655,6 +1682,7 @@ static void test_blockjob_commit_by_drained_end(void) job = block_job_create("job", &test_drop_backing_job_driver, NULL, bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); + job->bs = bs_parents[2]; job->detach_also = bs_parents[0]; job->did_complete = &job_has_completed; @@ -1802,9 +1830,8 @@ static void test_drop_intermediate_poll(void) for (i = 0; i < 3; i++) { if (i) { /* Takes the reference to chain[i - 1] */ - chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1], - "chain", &chain_child_class, - BDRV_CHILD_COW, &error_abort); + bdrv_attach_child(chain[i], chain[i - 1], "chain", + &chain_child_class, BDRV_CHILD_COW, &error_abort); } } @@ -1855,10 +1882,10 @@ static void bdrv_replace_test_close(BlockDriverState *bs) * Set .has_read to true and return success. */ static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - int flags) + BdrvRequestFlags flags) { BDRVReplaceTestState *s = bs->opaque; @@ -1942,6 +1969,7 @@ static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs) static BlockDriver bdrv_replace_test = { .format_name = "replace_test", .instance_size = sizeof(BDRVReplaceTestState), + .supports_backing = true, .bdrv_close = bdrv_replace_test_close, .bdrv_co_preadv = bdrv_replace_test_co_preadv, @@ -2021,9 +2049,8 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); - parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child", - &child_of_bds, BDRV_CHILD_COW, - &error_abort); + bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, + BDRV_CHILD_COW, &error_abort); for (i = 0; i < old_drain_count; i++) { bdrv_drained_begin(old_child_bs); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index a6e3bb79be..c522591531 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -26,6 +26,8 @@ static BlockDriver bdrv_pass_through = { .format_name = "pass-through", + .is_filter = true, + .filtered_child_is_backing = true, .bdrv_child_perm = bdrv_default_perms, }; @@ -57,6 +59,8 @@ static void exclusive_write_perms(BlockDriverState *bs, BdrvChild *c, static BlockDriver bdrv_exclusive_writer = { .format_name = "exclusive-writer", + .is_filter = true, + .filtered_child_is_backing = true, .bdrv_child_perm = exclusive_write_perms, }; @@ -134,7 +138,7 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); bdrv_attach_child(filter, bs, "child", &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); + BDRV_CHILD_DATA, &error_abort); ret = bdrv_append(filter, bs, NULL); g_assert_cmpint(ret, <, 0); @@ -228,11 +232,14 @@ static void test_parallel_exclusive_write(void) */ bdrv_ref(base); - bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_DATA, + bdrv_attach_child(top, fl1, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, + bdrv_attach_child(fl1, base, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, + bdrv_attach_child(fl2, base, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); bdrv_replace_node(fl1, fl2, &error_abort); @@ -241,13 +248,26 @@ static void test_parallel_exclusive_write(void) bdrv_unref(top); } -static void write_to_file_perms(BlockDriverState *bs, BdrvChild *c, - BdrvChildRole role, - BlockReopenQueue *reopen_queue, - uint64_t perm, uint64_t shared, - uint64_t *nperm, uint64_t *nshared) +/* + * write-to-selected node may have several DATA children, one of them may be + * "selected". Exclusive write permission is taken on selected child. + * + * We don't realize write handler itself, as we need only to test how permission + * update works. + */ +typedef struct BDRVWriteToSelectedState { + BdrvChild *selected; +} BDRVWriteToSelectedState; + +static void write_to_selected_perms(BlockDriverState *bs, BdrvChild *c, + BdrvChildRole role, + BlockReopenQueue *reopen_queue, + uint64_t perm, uint64_t shared, + uint64_t *nperm, uint64_t *nshared) { - if (bs->file && c == bs->file) { + BDRVWriteToSelectedState *s = bs->opaque; + + if (s->selected && c == s->selected) { *nperm = BLK_PERM_WRITE; *nshared = BLK_PERM_ALL & ~BLK_PERM_WRITE; } else { @@ -256,9 +276,10 @@ static void write_to_file_perms(BlockDriverState *bs, BdrvChild *c, } } -static BlockDriver bdrv_write_to_file = { - .format_name = "tricky-perm", - .bdrv_child_perm = write_to_file_perms, +static BlockDriver bdrv_write_to_selected = { + .format_name = "write-to-selected", + .instance_size = sizeof(BDRVWriteToSelectedState), + .bdrv_child_perm = write_to_selected_perms, }; @@ -266,15 +287,18 @@ static BlockDriver bdrv_write_to_file = { * The following test shows that topological-sort order is required for * permission update, simple DFS is not enough. * - * Consider the block driver which has two filter children: one active - * with exclusive write access and one inactive with no specific - * permissions. + * Consider the block driver (write-to-selected) which has two children: one is + * selected so we have exclusive write access to it and for the other one we + * don't need any specific permissions. * * And, these two children has a common base child, like this: + * (additional "top" on top is used in test just because the only public + * function to update permission should get a specific child to update. + * Making bdrv_refresh_perms() public just for this test isn't worth it) * - * ┌─────┐ ┌──────┐ - * │ fl2 │ ◀── │ top │ - * └─────┘ └──────┘ + * ┌─────┐ ┌───────────────────┐ ┌─────┐ + * │ fl2 │ ◀── │ write-to-selected │ ◀── │ top │ + * └─────┘ └───────────────────┘ └─────┘ * │ │ * │ │ w * │ ▼ @@ -290,14 +314,14 @@ static BlockDriver bdrv_write_to_file = { * * So, exclusive write is propagated. * - * Assume, we want to make fl2 active instead of fl1. - * So, we set some option for top driver and do permission update. + * Assume, we want to select fl2 instead of fl1. + * So, we set some option for write-to-selected driver and do permission update. * * With simple DFS, if permission update goes first through - * top->fl1->base branch it will succeed: it firstly drop exclusive write - * permissions and than apply them for another BdrvChildren. - * But if permission update goes first through top->fl2->base branch it - * will fail, as when we try to update fl2->base child, old not yet + * write-to-selected -> fl1 -> base branch it will succeed: it firstly drop + * exclusive write permissions and than apply them for another BdrvChildren. + * But if permission update goes first through write-to-selected -> fl2 -> base + * branch it will fail, as when we try to update fl2->base child, old not yet * updated fl1->base child will be in conflict. * * With topological-sort order we always update parents before children, so fl1 @@ -306,9 +330,10 @@ static BlockDriver bdrv_write_to_file = { static void test_parallel_perm_update(void) { BlockDriverState *top = no_perm_node("top"); - BlockDriverState *tricky = - bdrv_new_open_driver(&bdrv_write_to_file, "tricky", BDRV_O_RDWR, + BlockDriverState *ws = + bdrv_new_open_driver(&bdrv_write_to_selected, "ws", BDRV_O_RDWR, &error_abort); + BDRVWriteToSelectedState *s = ws->opaque; BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl1 = pass_through_node("fl1"); BlockDriverState *fl2 = pass_through_node("fl2"); @@ -320,33 +345,35 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); - bdrv_attach_child(top, tricky, "file", &child_of_bds, BDRV_CHILD_DATA, + bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - c_fl1 = bdrv_attach_child(tricky, fl1, "first", &child_of_bds, - BDRV_CHILD_FILTERED, &error_abort); - c_fl2 = bdrv_attach_child(tricky, fl2, "second", &child_of_bds, - BDRV_CHILD_FILTERED, &error_abort); - bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, + c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds, + BDRV_CHILD_DATA, &error_abort); + c_fl2 = bdrv_attach_child(ws, fl2, "second", &child_of_bds, + BDRV_CHILD_DATA, &error_abort); + bdrv_attach_child(fl1, base, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, + bdrv_attach_child(fl2, base, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); /* Select fl1 as first child to be active */ - tricky->file = c_fl1; + s->selected = c_fl1; bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); assert(c_fl1->perm & BLK_PERM_WRITE); assert(!(c_fl2->perm & BLK_PERM_WRITE)); /* Now, try to switch active child and update permissions */ - tricky->file = c_fl2; + s->selected = c_fl2; bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); assert(c_fl2->perm & BLK_PERM_WRITE); assert(!(c_fl1->perm & BLK_PERM_WRITE)); /* Switch once more, to not care about real child order in the list */ - tricky->file = c_fl1; + s->selected = c_fl1; bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); assert(c_fl1->perm & BLK_PERM_WRITE); @@ -379,7 +406,8 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); - bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_COW, + bdrv_attach_child(top, base, "backing", &child_of_bds, + BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); bdrv_append(fl, base, &error_abort); diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index c39e70b2f5..8ca5adec5e 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -31,15 +31,24 @@ #include "qemu/main-loop.h" #include "iothread.h" -static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + return 0; +} + +static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return 0; } static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return 0; } @@ -66,8 +75,8 @@ static BlockDriver bdrv_test = { .format_name = "test", .instance_size = 1, - .bdrv_co_preadv = bdrv_test_co_prwv, - .bdrv_co_pwritev = bdrv_test_co_prwv, + .bdrv_co_preadv = bdrv_test_co_preadv, + .bdrv_co_pwritev = bdrv_test_co_pwritev, .bdrv_co_pdiscard = bdrv_test_co_pdiscard, .bdrv_co_truncate = bdrv_test_co_truncate, .bdrv_co_block_status = bdrv_test_co_block_status, @@ -79,11 +88,11 @@ static void test_sync_op_pread(BdrvChild *c) int ret; /* Success */ - ret = bdrv_pread(c, 0, buf, sizeof(buf)); - g_assert_cmpint(ret, ==, 512); + ret = bdrv_pread(c, 0, sizeof(buf), buf, 0); + g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ - ret = bdrv_pread(c, -2, buf, sizeof(buf)); + ret = bdrv_pread(c, -2, sizeof(buf), buf, 0); g_assert_cmpint(ret, ==, -EIO); } @@ -93,11 +102,11 @@ static void test_sync_op_pwrite(BdrvChild *c) int ret; /* Success */ - ret = bdrv_pwrite(c, 0, buf, sizeof(buf)); - g_assert_cmpint(ret, ==, 512); + ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0); + g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ - ret = bdrv_pwrite(c, -2, buf, sizeof(buf)); + ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0); g_assert_cmpint(ret, ==, -EIO); } @@ -107,11 +116,11 @@ static void test_sync_op_blk_pread(BlockBackend *blk) int ret; /* Success */ - ret = blk_pread(blk, 0, buf, sizeof(buf)); - g_assert_cmpint(ret, ==, 512); + ret = blk_pread(blk, 0, sizeof(buf), buf, 0); + g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ - ret = blk_pread(blk, -2, buf, sizeof(buf)); + ret = blk_pread(blk, -2, sizeof(buf), buf, 0); g_assert_cmpint(ret, ==, -EIO); } @@ -121,11 +130,98 @@ static void test_sync_op_blk_pwrite(BlockBackend *blk) int ret; /* Success */ - ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0); - g_assert_cmpint(ret, ==, 512); + ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0); + g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ - ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0); + ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_preadv(BlockBackend *blk) +{ + uint8_t buf[512]; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); + int ret; + + /* Success */ + ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_pwritev(BlockBackend *blk) +{ + uint8_t buf[512] = { 0 }; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); + int ret; + + /* Success */ + ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_preadv_part(BlockBackend *blk) +{ + uint8_t buf[512]; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); + int ret; + + /* Success */ + ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_pwritev_part(BlockBackend *blk) +{ + uint8_t buf[512] = { 0 }; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); + int ret; + + /* Success */ + ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk) +{ + uint8_t buf[512] = { 0 }; + int ret; + + /* Late error: Not supported */ + ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf); + g_assert_cmpint(ret, ==, -ENOTSUP); + + /* Early error: Negative offset */ + ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf); + g_assert_cmpint(ret, ==, -EIO); +} + +static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk) +{ + int ret; + + /* Success */ + ret = blk_pwrite_zeroes(blk, 0, 512, 0); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_pwrite_zeroes(blk, -2, 512, 0); g_assert_cmpint(ret, ==, -EIO); } @@ -202,6 +298,19 @@ static void test_sync_op_truncate(BdrvChild *c) c->bs->open_flags |= BDRV_O_RDWR; } +static void test_sync_op_blk_truncate(BlockBackend *blk) +{ + int ret; + + /* Normal success path */ + ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL); + g_assert_cmpint(ret, ==, 0); + + /* Early error: Negative offset */ + ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL); + g_assert_cmpint(ret, ==, -EINVAL); +} + static void test_sync_op_block_status(BdrvChild *c) { int ret; @@ -270,10 +379,10 @@ static void test_sync_op_check(BdrvChild *c) g_assert_cmpint(ret, ==, -ENOTSUP); } -static void test_sync_op_invalidate_cache(BdrvChild *c) +static void test_sync_op_activate(BdrvChild *c) { /* Early success: Image is not inactive */ - bdrv_invalidate_cache(c->bs, NULL); + bdrv_activate(c->bs, NULL); } @@ -292,6 +401,30 @@ const SyncOpTest sync_op_tests[] = { .name = "/sync-op/pwrite", .fn = test_sync_op_pwrite, .blkfn = test_sync_op_blk_pwrite, + }, { + .name = "/sync-op/preadv", + .fn = NULL, + .blkfn = test_sync_op_blk_preadv, + }, { + .name = "/sync-op/pwritev", + .fn = NULL, + .blkfn = test_sync_op_blk_pwritev, + }, { + .name = "/sync-op/preadv_part", + .fn = NULL, + .blkfn = test_sync_op_blk_preadv_part, + }, { + .name = "/sync-op/pwritev_part", + .fn = NULL, + .blkfn = test_sync_op_blk_pwritev_part, + }, { + .name = "/sync-op/pwrite_compressed", + .fn = NULL, + .blkfn = test_sync_op_blk_pwrite_compressed, + }, { + .name = "/sync-op/pwrite_zeroes", + .fn = NULL, + .blkfn = test_sync_op_blk_pwrite_zeroes, }, { .name = "/sync-op/load_vmstate", .fn = test_sync_op_load_vmstate, @@ -305,6 +438,7 @@ const SyncOpTest sync_op_tests[] = { }, { .name = "/sync-op/truncate", .fn = test_sync_op_truncate, + .blkfn = test_sync_op_blk_truncate, }, { .name = "/sync-op/block_status", .fn = test_sync_op_block_status, @@ -316,8 +450,8 @@ const SyncOpTest sync_op_tests[] = { .name = "/sync-op/check", .fn = test_sync_op_check, }, { - .name = "/sync-op/invalidate_cache", - .fn = test_sync_op_invalidate_cache, + .name = "/sync-op/activate", + .fn = test_sync_op_activate, }, }; @@ -340,7 +474,9 @@ static void test_sync_op(const void *opaque) blk_set_aio_context(blk, ctx, &error_abort); aio_context_acquire(ctx); - t->fn(c); + if (t->fn) { + t->fn(c); + } if (t->blkfn) { t->blkfn(blk); } @@ -446,8 +582,10 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } + WITH_JOB_LOCK_GUARD() { + job_complete_sync_locked(&tjob->common.job, &error_abort); + } aio_context_acquire(ctx); - job_complete_sync(&tjob->common.job, &error_abort); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); aio_context_release(ctx); @@ -621,11 +759,13 @@ static void test_propagate_mirror(void) BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, &error_abort); - job = job_get("job0"); + WITH_JOB_LOCK_GUARD() { + job = job_get_locked("job0"); + } filter = bdrv_find_node("filter_node"); /* Change the AioContext of src */ - bdrv_try_set_aio_context(src, ctx, &error_abort); + bdrv_try_change_aio_context(src, ctx, NULL, &error_abort); g_assert(bdrv_get_aio_context(src) == ctx); g_assert(bdrv_get_aio_context(target) == ctx); g_assert(bdrv_get_aio_context(filter) == ctx); @@ -633,7 +773,7 @@ static void test_propagate_mirror(void) /* Change the AioContext of target */ aio_context_acquire(ctx); - bdrv_try_set_aio_context(target, main_ctx, &error_abort); + bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); aio_context_release(ctx); g_assert(bdrv_get_aio_context(src) == main_ctx); g_assert(bdrv_get_aio_context(target) == main_ctx); @@ -643,7 +783,7 @@ static void test_propagate_mirror(void) blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); blk_insert_bs(blk, src, &error_abort); - bdrv_try_set_aio_context(target, ctx, &local_err); + bdrv_try_change_aio_context(target, ctx, NULL, &local_err); error_free_or_abort(&local_err); g_assert(blk_get_aio_context(blk) == main_ctx); @@ -654,7 +794,7 @@ static void test_propagate_mirror(void) /* ...unless we explicitly allow it */ aio_context_acquire(ctx); blk_set_allow_aio_context_change(blk, true); - bdrv_try_set_aio_context(target, ctx, &error_abort); + bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); @@ -666,7 +806,7 @@ static void test_propagate_mirror(void) aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - bdrv_try_set_aio_context(target, main_ctx, &error_abort); + bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); aio_context_release(ctx); blk_unref(blk); diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c index 8bd13b9949..d3b0bb24be 100644 --- a/tests/unit/test-blockjob-txn.c +++ b/tests/unit/test-blockjob-txn.c @@ -25,14 +25,6 @@ typedef struct { int *result; } TestBlockJob; -static void test_block_job_clean(Job *job) -{ - BlockJob *bjob = container_of(job, BlockJob, job); - BlockDriverState *bs = blk_bs(bjob->blk); - - bdrv_unref(bs); -} - static int coroutine_fn test_block_job_run(Job *job, Error **errp) { TestBlockJob *s = container_of(job, TestBlockJob, common.job); @@ -73,7 +65,6 @@ static const BlockJobDriver test_block_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .run = test_block_job_run, - .clean = test_block_job_clean, }, }; @@ -105,6 +96,7 @@ static BlockJob *test_block_job_start(unsigned int iterations, s = block_job_create(job_id, &test_block_job_driver, txn, bs, 0, BLK_PERM_ALL, 0, JOB_DEFAULT, test_block_job_cb, data, &error_abort); + bdrv_unref(bs); /* referenced by job now */ s->iterations = iterations; s->use_timer = use_timer; s->rc = rc; @@ -124,8 +116,10 @@ static void test_single_job(int expected) job = test_block_job_start(1, true, expected, &result, txn); job_start(&job->job); - if (expected == -ECANCELED) { - job_cancel(&job->job, false); + WITH_JOB_LOCK_GUARD() { + if (expected == -ECANCELED) { + job_cancel_locked(&job->job, false); + } } while (result == -EINPROGRESS) { @@ -168,13 +162,15 @@ static void test_pair_jobs(int expected1, int expected2) /* Release our reference now to trigger as many nice * use-after-free bugs as possible. */ - job_txn_unref(txn); + WITH_JOB_LOCK_GUARD() { + job_txn_unref_locked(txn); - if (expected1 == -ECANCELED) { - job_cancel(&job1->job, false); - } - if (expected2 == -ECANCELED) { - job_cancel(&job2->job, false); + if (expected1 == -ECANCELED) { + job_cancel_locked(&job1->job, false); + } + if (expected2 == -ECANCELED) { + job_cancel_locked(&job2->job, false); + } } while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { @@ -227,7 +223,9 @@ static void test_pair_jobs_fail_cancel_race(void) job_start(&job1->job); job_start(&job2->job); - job_cancel(&job1->job, false); + WITH_JOB_LOCK_GUARD() { + job_cancel_locked(&job1->job, false); + } /* Now make job2 finish before the main loop kicks jobs. This simulates * the race between a pending kick and another job completing. diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index dcacfa6c7c..c0426bd10c 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -211,8 +211,11 @@ static CancelJob *create_common(Job **pjob) bjob = mk_job(blk, "Steve", &test_cancel_driver, true, JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); job = &bjob->job; - job_ref(job); - assert(job->status == JOB_STATUS_CREATED); + WITH_JOB_LOCK_GUARD() { + job_ref_locked(job); + assert(job->status == JOB_STATUS_CREATED); + } + s = container_of(bjob, CancelJob, common); s->blk = blk; @@ -225,21 +228,22 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx; + AioContext *ctx = job->job.aio_context; - ctx = job->job.aio_context; - aio_context_acquire(ctx); - - job_cancel_sync(&job->job); - if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { - Job *dummy = &job->job; - job_dismiss(&dummy, &error_abort); + job_cancel_sync(&job->job, true); + WITH_JOB_LOCK_GUARD() { + if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { + Job *dummy = &job->job; + job_dismiss_locked(&dummy, &error_abort); + } + assert(job->job.status == JOB_STATUS_NULL); + job_unref_locked(&job->job); } - assert(job->job.status == JOB_STATUS_NULL); - job_unref(&job->job); - destroy_blk(blk); + aio_context_acquire(ctx); + destroy_blk(blk); aio_context_release(ctx); + } static void test_cancel_created(void) @@ -251,6 +255,13 @@ static void test_cancel_created(void) cancel_common(s); } +static void assert_job_status_is(Job *job, int status) +{ + WITH_JOB_LOCK_GUARD() { + assert(job->status == status); + } +} + static void test_cancel_running(void) { Job *job; @@ -259,7 +270,7 @@ static void test_cancel_running(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); cancel_common(s); } @@ -272,11 +283,12 @@ static void test_cancel_paused(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); - - job_user_pause(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_RUNNING); + job_user_pause_locked(job, &error_abort); + } job_enter(job); - assert(job->status == JOB_STATUS_PAUSED); + assert_job_status_is(job, JOB_STATUS_PAUSED); cancel_common(s); } @@ -289,11 +301,11 @@ static void test_cancel_ready(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); cancel_common(s); } @@ -306,15 +318,16 @@ static void test_cancel_standby(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_user_pause(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_user_pause_locked(job, &error_abort); + } job_enter(job); - assert(job->status == JOB_STATUS_STANDBY); + assert_job_status_is(job, JOB_STATUS_STANDBY); cancel_common(s); } @@ -327,20 +340,21 @@ static void test_cancel_pending(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_complete(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_complete_locked(job, &error_abort); + } job_enter(job); while (!job->deferred_to_main_loop) { aio_poll(qemu_get_aio_context(), true); } - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); aio_poll(qemu_get_aio_context(), true); - assert(job->status == JOB_STATUS_PENDING); + assert_job_status_is(job, JOB_STATUS_PENDING); cancel_common(s); } @@ -353,25 +367,26 @@ static void test_cancel_concluded(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_complete(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_complete_locked(job, &error_abort); + } job_enter(job); while (!job->deferred_to_main_loop) { aio_poll(qemu_get_aio_context(), true); } - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); aio_poll(qemu_get_aio_context(), true); - assert(job->status == JOB_STATUS_PENDING); + assert_job_status_is(job, JOB_STATUS_PENDING); - aio_context_acquire(job->aio_context); - job_finalize(job, &error_abort); - aio_context_release(job->aio_context); - assert(job->status == JOB_STATUS_CONCLUDED); + WITH_JOB_LOCK_GUARD() { + job_finalize_locked(job, &error_abort); + assert(job->status == JOB_STATUS_CONCLUDED); + } cancel_common(s); } @@ -417,7 +432,7 @@ static const BlockJobDriver test_yielding_driver = { }; /* - * Test that job_complete() works even on jobs that are in a paused + * Test that job_complete_locked() works even on jobs that are in a paused * state (i.e., STANDBY). * * To do this, run YieldingJob in an IO thread, get it into the READY @@ -425,7 +440,7 @@ static const BlockJobDriver test_yielding_driver = { * acquire the context so the job will not be entered and will thus * remain on STANDBY. * - * job_complete() should still work without error. + * job_complete_locked() should still work without error. * * Note that on the QMP interface, it is impossible to lock an IO * thread before a drained section ends. In practice, the @@ -459,37 +474,44 @@ static void test_complete_in_standby(void) bjob = mk_job(blk, "job", &test_yielding_driver, true, JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); job = &bjob->job; - assert(job->status == JOB_STATUS_CREATED); + assert_job_status_is(job, JOB_STATUS_CREATED); /* Wait for the job to become READY */ job_start(job); - aio_context_acquire(ctx); - AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); - aio_context_release(ctx); + /* + * Here we are waiting for the status to change, so don't bother + * protecting the read every time. + */ + AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); /* Begin the drained section, pausing the job */ bdrv_drain_all_begin(); - assert(job->status == JOB_STATUS_STANDBY); + assert_job_status_is(job, JOB_STATUS_STANDBY); + /* Lock the IO thread to prevent the job from being run */ aio_context_acquire(ctx); /* This will schedule the job to resume it */ bdrv_drain_all_end(); + aio_context_release(ctx); - /* But the job cannot run, so it will remain on standby */ - assert(job->status == JOB_STATUS_STANDBY); + WITH_JOB_LOCK_GUARD() { + /* But the job cannot run, so it will remain on standby */ + assert(job->status == JOB_STATUS_STANDBY); - /* Even though the job is on standby, this should work */ - job_complete(job, &error_abort); + /* Even though the job is on standby, this should work */ + job_complete_locked(job, &error_abort); - /* The test is done now, clean up. */ - job_finish_sync(job, NULL, &error_abort); - assert(job->status == JOB_STATUS_PENDING); + /* The test is done now, clean up. */ + job_finish_sync_locked(job, NULL, &error_abort); + assert(job->status == JOB_STATUS_PENDING); - job_finalize(job, &error_abort); - assert(job->status == JOB_STATUS_CONCLUDED); + job_finalize_locked(job, &error_abort); + assert(job->status == JOB_STATUS_CONCLUDED); - job_dismiss(&job, &error_abort); + job_dismiss_locked(&job, &error_abort); + } + aio_context_acquire(ctx); destroy_blk(blk); aio_context_release(ctx); iothread_join(iothread); diff --git a/tests/unit/test-clone-visitor.c b/tests/unit/test-clone-visitor.c index 4944b3d857..ce67585305 100644 --- a/tests/unit/test-clone-visitor.c +++ b/tests/unit/test-clone-visitor.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/clone-visitor.h" #include "test-qapi-visit.h" @@ -63,7 +62,7 @@ static void test_clone_alternate(void) qapi_free_AltEnumBool(s_dst); } -static void test_clone_list_union(void) +static void test_clone_list(void) { uint8List *src = NULL, *dst; uint8List *tmp = NULL; @@ -99,18 +98,26 @@ static void test_clone_empty(void) static void test_clone_complex1(void) { - UserDefListUnion *src, *dst; + UserDefFlatUnion *src, *dst; - src = g_new0(UserDefListUnion, 1); - src->type = USER_DEF_LIST_UNION_KIND_STRING; + src = g_new0(UserDefFlatUnion, 1); + src->integer = 123; + src->string = g_strdup("abc"); + src->enum1 = ENUM_ONE_VALUE1; + src->u.value1.boolean = true; - dst = QAPI_CLONE(UserDefListUnion, src); + dst = QAPI_CLONE(UserDefFlatUnion, src); g_assert(dst); - g_assert_cmpint(dst->type, ==, src->type); - g_assert(!dst->u.string.data); - qapi_free_UserDefListUnion(src); - qapi_free_UserDefListUnion(dst); + g_assert_cmpint(dst->integer, ==, 123); + g_assert_cmpstr(dst->string, ==, "abc"); + g_assert_cmpint(dst->enum1, ==, ENUM_ONE_VALUE1); + g_assert(dst->u.value1.boolean); + g_assert(!dst->u.value1.has_a_b); + g_assert_cmpint(dst->u.value1.a_b, ==, 0); + + qapi_free_UserDefFlatUnion(src); + qapi_free_UserDefFlatUnion(dst); } static void test_clone_complex2(void) @@ -145,42 +152,48 @@ static void test_clone_complex2(void) static void test_clone_complex3(void) { - __org_qemu_x_Struct2 *src, *dst; - __org_qemu_x_Union1List *tmp; + UserDefOneList *src, *dst, *tail; + UserDefOne *elt; - src = g_new0(__org_qemu_x_Struct2, 1); - tmp = src->array = g_new0(__org_qemu_x_Union1List, 1); - tmp->value = g_new0(__org_qemu_x_Union1, 1); - tmp->value->type = ORG_QEMU_X_UNION1_KIND___ORG_QEMU_X_BRANCH; - tmp->value->u.__org_qemu_x_branch.data = g_strdup("one"); - tmp = tmp->next = g_new0(__org_qemu_x_Union1List, 1); - tmp->value = g_new0(__org_qemu_x_Union1, 1); - tmp->value->type = ORG_QEMU_X_UNION1_KIND___ORG_QEMU_X_BRANCH; - tmp->value->u.__org_qemu_x_branch.data = g_strdup("two"); - tmp = tmp->next = g_new0(__org_qemu_x_Union1List, 1); - tmp->value = g_new0(__org_qemu_x_Union1, 1); - tmp->value->type = ORG_QEMU_X_UNION1_KIND___ORG_QEMU_X_BRANCH; - tmp->value->u.__org_qemu_x_branch.data = g_strdup("three"); + src = NULL; + elt = g_new0(UserDefOne, 1); + elt->integer = 3; + elt->string = g_strdup("three"); + elt->has_enum1 = true; + elt->enum1 = ENUM_ONE_VALUE3; + QAPI_LIST_PREPEND(src, elt); + elt = g_new0(UserDefOne, 1); + elt->integer = 2; + elt->string = g_strdup("two"); + QAPI_LIST_PREPEND(src, elt); + elt = g_new0(UserDefOne, 1); + elt->integer = 1; + elt->string = g_strdup("one"); + QAPI_LIST_PREPEND(src, elt); + + dst = QAPI_CLONE(UserDefOneList, src); - dst = QAPI_CLONE(__org_qemu_x_Struct2, src); g_assert(dst); - tmp = dst->array; - g_assert(tmp); - g_assert(tmp->value); - g_assert_cmpstr(tmp->value->u.__org_qemu_x_branch.data, ==, "one"); - tmp = tmp->next; - g_assert(tmp); - g_assert(tmp->value); - g_assert_cmpstr(tmp->value->u.__org_qemu_x_branch.data, ==, "two"); - tmp = tmp->next; - g_assert(tmp); - g_assert(tmp->value); - g_assert_cmpstr(tmp->value->u.__org_qemu_x_branch.data, ==, "three"); - tmp = tmp->next; - g_assert(!tmp); + tail = dst; + elt = tail->value; + g_assert_cmpint(elt->integer, ==, 1); + g_assert_cmpstr(elt->string, ==, "one"); + g_assert(!elt->has_enum1); + tail = tail->next; + elt = tail->value; + g_assert_cmpint(elt->integer, ==, 2); + g_assert_cmpstr(elt->string, ==, "two"); + g_assert(!elt->has_enum1); + tail = tail->next; + elt = tail->value; + g_assert_cmpint(elt->integer, ==, 3); + g_assert_cmpstr(elt->string, ==, "three"); + g_assert(elt->has_enum1); + g_assert_cmpint(elt->enum1, ==, ENUM_ONE_VALUE3); + g_assert(!tail->next); - qapi_free___org_qemu_x_Struct2(src); - qapi_free___org_qemu_x_Struct2(dst); + qapi_free_UserDefOneList(src); + qapi_free_UserDefOneList(dst); } int main(int argc, char **argv) @@ -189,7 +202,7 @@ int main(int argc, char **argv) g_test_add_func("/visitor/clone/struct", test_clone_struct); g_test_add_func("/visitor/clone/alternate", test_clone_alternate); - g_test_add_func("/visitor/clone/list_union", test_clone_list_union); + g_test_add_func("/visitor/clone/list", test_clone_list); g_test_add_func("/visitor/clone/empty", test_clone_empty); g_test_add_func("/visitor/clone/complex1", test_clone_complex1); g_test_add_func("/visitor/clone/complex2", test_clone_complex2); diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c index aa77a3bcb3..e16b80c245 100644 --- a/tests/unit/test-coroutine.c +++ b/tests/unit/test-coroutine.c @@ -610,7 +610,7 @@ static void perf_baseline(void) g_test_message("Function call %u iterations: %f s", maxcycles, duration); } -static __attribute__((noinline)) void perf_cost_func(void *opaque) +static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque) { qemu_coroutine_yield(); } diff --git a/tests/unit/test-crypto-akcipher.c b/tests/unit/test-crypto-akcipher.c new file mode 100644 index 0000000000..4f1f4214dd --- /dev/null +++ b/tests/unit/test-crypto-akcipher.c @@ -0,0 +1,990 @@ +/* + * QEMU Crypto cipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" + +#include "crypto/init.h" +#include "crypto/akcipher.h" +#include "qapi/error.h" + +static const uint8_t rsa1024_private_key[] = { + 0x30, 0x82, 0x02, 0x5c, 0x02, 0x01, 0x00, 0x02, + 0x81, 0x81, 0x00, 0xe6, 0x4d, 0x76, 0x4f, 0xb2, + 0x97, 0x09, 0xad, 0x9d, 0x17, 0x33, 0xf2, 0x30, + 0x42, 0x83, 0xa9, 0xcb, 0x49, 0xa4, 0x2e, 0x59, + 0x5e, 0x75, 0x51, 0xd1, 0xac, 0xc8, 0x86, 0x3e, + 0xdb, 0x72, 0x2e, 0xb2, 0xf7, 0xc3, 0x5b, 0xc7, + 0xea, 0xed, 0x30, 0xd1, 0xf7, 0x37, 0xee, 0x9d, + 0x36, 0x59, 0x6f, 0xf8, 0xce, 0xc0, 0x5c, 0x82, + 0x80, 0x37, 0x83, 0xd7, 0x45, 0x6a, 0xe9, 0xea, + 0xc5, 0x3a, 0x59, 0x6b, 0x34, 0x31, 0x44, 0x00, + 0x74, 0xa7, 0x29, 0xab, 0x79, 0x4a, 0xbd, 0xe8, + 0x25, 0x35, 0x01, 0x11, 0x40, 0xbf, 0x31, 0xbd, + 0xd3, 0xe0, 0x68, 0x1e, 0xd5, 0x5b, 0x2f, 0xe9, + 0x20, 0xf2, 0x9f, 0x46, 0x35, 0x30, 0xa8, 0xf1, + 0xfe, 0xef, 0xd8, 0x76, 0x23, 0x46, 0x34, 0x70, + 0xa1, 0xce, 0xc6, 0x65, 0x6d, 0xb0, 0x94, 0x7e, + 0xe5, 0x92, 0x45, 0x7b, 0xaa, 0xbb, 0x95, 0x97, + 0x77, 0xcd, 0xd3, 0x02, 0x03, 0x01, 0x00, 0x01, + 0x02, 0x81, 0x80, 0x30, 0x6a, 0xc4, 0x9e, 0xc8, + 0xba, 0xfc, 0x2b, 0xe5, 0xc4, 0xc5, 0x04, 0xfb, + 0xa4, 0x60, 0x2d, 0xc8, 0x31, 0x39, 0x35, 0x0d, + 0x50, 0xd0, 0x75, 0x5d, 0x11, 0x68, 0x2e, 0xe0, + 0xf4, 0x1d, 0xb3, 0x37, 0xa8, 0xe3, 0x07, 0x5e, + 0xa6, 0x43, 0x2b, 0x6a, 0x59, 0x01, 0x07, 0x47, + 0x41, 0xef, 0xd7, 0x9c, 0x85, 0x4a, 0xe7, 0xa7, + 0xff, 0xf0, 0xab, 0xe5, 0x0c, 0x11, 0x08, 0x10, + 0x75, 0x5a, 0x68, 0xa0, 0x08, 0x03, 0xc9, 0x40, + 0x79, 0x67, 0x1d, 0x65, 0x89, 0x2d, 0x08, 0xf9, + 0xb5, 0x1b, 0x7d, 0xd2, 0x41, 0x3b, 0x33, 0xf2, + 0x47, 0x2f, 0x9c, 0x0b, 0xd5, 0xaf, 0xcb, 0xdb, + 0xbb, 0x37, 0x63, 0x03, 0xf8, 0xe7, 0x2e, 0xc7, + 0x3c, 0x86, 0x9f, 0xc2, 0x9b, 0xb4, 0x70, 0x6a, + 0x4d, 0x7c, 0xe4, 0x1b, 0x3a, 0xa9, 0xae, 0xd7, + 0xce, 0x7f, 0x56, 0xc2, 0x73, 0x5e, 0x58, 0x63, + 0xd5, 0x86, 0x41, 0x02, 0x41, 0x00, 0xf6, 0x56, + 0x69, 0xec, 0xef, 0x65, 0x95, 0xdc, 0x25, 0x47, + 0xe0, 0x6f, 0xb0, 0x4f, 0x79, 0x77, 0x0a, 0x5e, + 0x46, 0xcb, 0xbd, 0x0b, 0x71, 0x51, 0x2a, 0xa4, + 0x65, 0x29, 0x18, 0xc6, 0x30, 0xa0, 0x95, 0x4c, + 0x4b, 0xbe, 0x8c, 0x40, 0xe3, 0x9c, 0x23, 0x02, + 0x14, 0x43, 0xe9, 0x64, 0xea, 0xe3, 0xa8, 0xe2, + 0x1a, 0xd5, 0xf9, 0x5c, 0xe0, 0x36, 0x2c, 0x97, + 0xda, 0xd5, 0xc7, 0x46, 0xce, 0x11, 0x02, 0x41, + 0x00, 0xef, 0x56, 0x08, 0xb8, 0x29, 0xa5, 0xa6, + 0x7c, 0xf7, 0x5f, 0xb4, 0xf5, 0x63, 0xe7, 0xeb, + 0x45, 0xfd, 0x89, 0xaa, 0x94, 0xa6, 0x3d, 0x0b, + 0xd9, 0x04, 0x6f, 0x78, 0xe0, 0xbb, 0xa2, 0xd4, + 0x29, 0x83, 0x17, 0x95, 0x6f, 0x50, 0x3d, 0x40, + 0x5d, 0xe5, 0x24, 0xda, 0xc2, 0x23, 0x50, 0x86, + 0xa8, 0x34, 0xc8, 0x6f, 0xec, 0x7f, 0xb6, 0x45, + 0x3a, 0xdd, 0x78, 0x9b, 0xee, 0xa1, 0xe4, 0x09, + 0xa3, 0x02, 0x40, 0x5c, 0xd6, 0x66, 0x67, 0x58, + 0x35, 0xc5, 0xcb, 0xc8, 0xf5, 0x14, 0xbd, 0xa3, + 0x09, 0xe0, 0xb2, 0x1f, 0x63, 0x36, 0x75, 0x34, + 0x52, 0xea, 0xaa, 0xf7, 0x52, 0x2b, 0x99, 0xd8, + 0x6f, 0x61, 0x06, 0x34, 0x1e, 0x23, 0xf1, 0xb5, + 0x34, 0x03, 0x53, 0xe5, 0xd1, 0xb3, 0xc7, 0x80, + 0x5f, 0x7b, 0x32, 0xbf, 0x84, 0x2f, 0x2e, 0xf3, + 0x22, 0xb0, 0x91, 0x5a, 0x2f, 0x04, 0xd7, 0x4a, + 0x9a, 0x01, 0xb1, 0x02, 0x40, 0x34, 0x0b, 0x26, + 0x4c, 0x3d, 0xaa, 0x2a, 0xc0, 0xe3, 0xdd, 0xe8, + 0xf0, 0xaf, 0x6f, 0xe0, 0x06, 0x51, 0x32, 0x9d, + 0x68, 0x43, 0x99, 0xe4, 0xb8, 0xa5, 0x31, 0x44, + 0x3c, 0xc2, 0x30, 0x8f, 0x28, 0x13, 0xbc, 0x8e, + 0x1f, 0x2d, 0x78, 0x94, 0x45, 0x96, 0xad, 0x63, + 0xf0, 0x71, 0x53, 0x72, 0x64, 0xa3, 0x4d, 0xae, + 0xa0, 0xe3, 0xc8, 0x93, 0xd7, 0x50, 0x0f, 0x89, + 0x00, 0xe4, 0x2d, 0x3d, 0x37, 0x02, 0x41, 0x00, + 0xbe, 0xa6, 0x08, 0xe0, 0xc8, 0x15, 0x2a, 0x47, + 0xcb, 0xd5, 0xec, 0x93, 0xd3, 0xaa, 0x12, 0x82, + 0xaf, 0xac, 0x51, 0x5a, 0x5b, 0xa7, 0x93, 0x4b, + 0xb9, 0xab, 0x00, 0xfa, 0x5a, 0xea, 0x34, 0xe4, + 0x80, 0xf1, 0x44, 0x6a, 0x65, 0xe4, 0x33, 0x99, + 0xfb, 0x54, 0xd7, 0x89, 0x5a, 0x1b, 0xd6, 0x2b, + 0xcc, 0x6e, 0x4b, 0x19, 0xa0, 0x6d, 0x93, 0x9f, + 0xc3, 0x91, 0x7a, 0xa5, 0xd8, 0x59, 0x0e, 0x9e, +}; + +static const uint8_t rsa1024_public_key[] = { + 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xe6, + 0x4d, 0x76, 0x4f, 0xb2, 0x97, 0x09, 0xad, 0x9d, + 0x17, 0x33, 0xf2, 0x30, 0x42, 0x83, 0xa9, 0xcb, + 0x49, 0xa4, 0x2e, 0x59, 0x5e, 0x75, 0x51, 0xd1, + 0xac, 0xc8, 0x86, 0x3e, 0xdb, 0x72, 0x2e, 0xb2, + 0xf7, 0xc3, 0x5b, 0xc7, 0xea, 0xed, 0x30, 0xd1, + 0xf7, 0x37, 0xee, 0x9d, 0x36, 0x59, 0x6f, 0xf8, + 0xce, 0xc0, 0x5c, 0x82, 0x80, 0x37, 0x83, 0xd7, + 0x45, 0x6a, 0xe9, 0xea, 0xc5, 0x3a, 0x59, 0x6b, + 0x34, 0x31, 0x44, 0x00, 0x74, 0xa7, 0x29, 0xab, + 0x79, 0x4a, 0xbd, 0xe8, 0x25, 0x35, 0x01, 0x11, + 0x40, 0xbf, 0x31, 0xbd, 0xd3, 0xe0, 0x68, 0x1e, + 0xd5, 0x5b, 0x2f, 0xe9, 0x20, 0xf2, 0x9f, 0x46, + 0x35, 0x30, 0xa8, 0xf1, 0xfe, 0xef, 0xd8, 0x76, + 0x23, 0x46, 0x34, 0x70, 0xa1, 0xce, 0xc6, 0x65, + 0x6d, 0xb0, 0x94, 0x7e, 0xe5, 0x92, 0x45, 0x7b, + 0xaa, 0xbb, 0x95, 0x97, 0x77, 0xcd, 0xd3, 0x02, + 0x03, 0x01, 0x00, 0x01, +}; + +static const uint8_t rsa2048_private_key[] = { + 0x30, 0x82, 0x04, 0xa4, 0x02, 0x01, 0x00, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xbd, 0x9c, 0x83, 0x6b, + 0x0e, 0x8e, 0xcf, 0xfa, 0xaa, 0x4f, 0x6a, 0xf4, + 0xe3, 0x52, 0x0f, 0xa5, 0xd0, 0xbe, 0x5e, 0x7f, + 0x08, 0x24, 0xba, 0x87, 0x46, 0xfb, 0x28, 0x93, + 0xe5, 0xe5, 0x81, 0x42, 0xc0, 0xf9, 0x17, 0xc7, + 0x81, 0x01, 0xf4, 0x18, 0x6a, 0x17, 0xf5, 0x57, + 0x20, 0x37, 0xcf, 0xf9, 0x74, 0x5e, 0xe1, 0x48, + 0x6a, 0x71, 0x0a, 0x0f, 0x79, 0x72, 0x2b, 0x46, + 0x10, 0x53, 0xdc, 0x14, 0x43, 0xbd, 0xbc, 0x6d, + 0x15, 0x6f, 0x15, 0x4e, 0xf0, 0x0d, 0x89, 0x39, + 0x02, 0xc3, 0x68, 0x5c, 0xa8, 0xfc, 0xed, 0x64, + 0x9d, 0x98, 0xb7, 0xcd, 0x83, 0x66, 0x93, 0xc3, + 0xd9, 0x57, 0xa0, 0x21, 0x93, 0xad, 0x5c, 0x75, + 0x69, 0x88, 0x9e, 0x81, 0xdc, 0x7f, 0x1d, 0xd5, + 0xbd, 0x1c, 0xc1, 0x30, 0x56, 0xa5, 0xda, 0x99, + 0x46, 0xa6, 0x6d, 0x0e, 0x6f, 0x5e, 0x51, 0x34, + 0x49, 0x73, 0xc3, 0x67, 0x49, 0x7e, 0x21, 0x2a, + 0x20, 0xa7, 0x2b, 0x92, 0x73, 0x1d, 0xa5, 0x25, + 0x2a, 0xd0, 0x3a, 0x89, 0x75, 0xb2, 0xbb, 0x19, + 0x37, 0x78, 0x48, 0xd2, 0xf2, 0x2a, 0x6d, 0x9e, + 0xc6, 0x26, 0xca, 0x46, 0x8c, 0xf1, 0x42, 0x2a, + 0x31, 0xb2, 0xfc, 0xe7, 0x55, 0x51, 0xff, 0x07, + 0x13, 0x5b, 0x36, 0x59, 0x2b, 0x43, 0x30, 0x4b, + 0x05, 0x5c, 0xd2, 0x45, 0xa0, 0xa0, 0x7c, 0x17, + 0x5b, 0x07, 0xbb, 0x5d, 0x83, 0x80, 0x92, 0x6d, + 0x87, 0x1a, 0x43, 0xac, 0xc7, 0x6b, 0x8d, 0x11, + 0x60, 0x27, 0xd2, 0xdf, 0xdb, 0x71, 0x02, 0x55, + 0x6e, 0xb5, 0xca, 0x4d, 0xda, 0x59, 0x0d, 0xb8, + 0x8c, 0xcd, 0xd3, 0x0e, 0x55, 0xa0, 0xa4, 0x8d, + 0xa0, 0x14, 0x10, 0x48, 0x42, 0x35, 0x56, 0x08, + 0xf7, 0x29, 0x5f, 0xa2, 0xea, 0xa4, 0x5e, 0x8e, + 0x99, 0x56, 0xaa, 0x5a, 0x8c, 0x23, 0x8f, 0x35, + 0x22, 0x8a, 0xff, 0xed, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x02, 0x82, 0x01, 0x00, 0x4e, 0x4a, 0xf3, + 0x44, 0xe0, 0x64, 0xfd, 0xe1, 0xde, 0x33, 0x1e, + 0xd1, 0xf1, 0x8f, 0x6f, 0xe0, 0xa2, 0xfa, 0x08, + 0x60, 0xe1, 0xc6, 0xf0, 0xb2, 0x6d, 0x0f, 0xc6, + 0x28, 0x93, 0xb4, 0x19, 0x94, 0xab, 0xc3, 0xef, + 0x1a, 0xb4, 0xdd, 0x4e, 0xa2, 0x4a, 0x24, 0x8c, + 0x6c, 0xa6, 0x64, 0x05, 0x5f, 0x56, 0xba, 0xda, + 0xc1, 0x21, 0x1a, 0x7d, 0xf1, 0xf7, 0xce, 0xb9, + 0xa9, 0x9b, 0x92, 0x54, 0xfc, 0x95, 0x20, 0x22, + 0x4e, 0xd4, 0x9b, 0xe2, 0xab, 0x8e, 0x99, 0xb8, + 0x40, 0xaf, 0x30, 0x6a, 0xc6, 0x60, 0x0c, 0xd8, + 0x25, 0x44, 0xa1, 0xcb, 0xbb, 0x73, 0x77, 0x86, + 0xaa, 0x46, 0xf3, 0x54, 0xae, 0xa8, 0xa0, 0xdb, + 0xdd, 0xab, 0x6e, 0xfb, 0x2c, 0x5a, 0x14, 0xaf, + 0x08, 0x13, 0xa7, 0x6c, 0xe9, 0xfd, 0xcd, 0x4c, + 0x1f, 0x20, 0x3a, 0x16, 0x2b, 0xf0, 0xb6, 0x7c, + 0x47, 0x5f, 0xd1, 0x0a, 0x2c, 0xc4, 0xa5, 0x68, + 0xd0, 0x43, 0x75, 0x6b, 0x65, 0xaa, 0x32, 0xc6, + 0x99, 0x06, 0xcb, 0x8f, 0xe6, 0x8d, 0xce, 0xbf, + 0x4d, 0x0d, 0x7b, 0x22, 0x2a, 0x8a, 0xcb, 0x7d, + 0x7f, 0x16, 0x48, 0x85, 0xf1, 0x86, 0xcb, 0x54, + 0xb9, 0x39, 0xd4, 0xbc, 0xe3, 0x2d, 0x27, 0x59, + 0xf6, 0x81, 0x5e, 0x94, 0x45, 0xdf, 0xb9, 0x22, + 0xaf, 0x64, 0x0d, 0x14, 0xec, 0x8c, 0xeb, 0x71, + 0xac, 0xee, 0x09, 0x4c, 0xbf, 0x34, 0xf9, 0xf4, + 0x66, 0x77, 0x36, 0x3b, 0x41, 0x74, 0x01, 0x4f, + 0xfc, 0x56, 0x83, 0xba, 0x14, 0xb0, 0x2f, 0xdd, + 0x4d, 0xb9, 0x3f, 0xdf, 0x71, 0xbe, 0x7b, 0xba, + 0x66, 0xc8, 0xc5, 0x42, 0xc9, 0xba, 0x18, 0x63, + 0x45, 0x07, 0x2f, 0x84, 0x3e, 0xc3, 0xfb, 0x47, + 0xda, 0xd4, 0x1d, 0x0e, 0x9d, 0x96, 0xc0, 0xea, + 0xee, 0x45, 0x2f, 0xe1, 0x62, 0x23, 0xee, 0xef, + 0x3d, 0x5e, 0x55, 0xa1, 0x0d, 0x02, 0x81, 0x81, + 0x00, 0xeb, 0x76, 0x88, 0xd3, 0xae, 0x3f, 0x1d, + 0xf2, 0x49, 0xe0, 0x37, 0x49, 0x83, 0x82, 0x6c, + 0xf7, 0xf1, 0x17, 0x30, 0x75, 0x2e, 0x89, 0x06, + 0x88, 0x56, 0x32, 0xf6, 0xfa, 0x58, 0xcb, 0x3c, + 0x98, 0x67, 0xc3, 0xde, 0x10, 0x82, 0xe5, 0xfa, + 0xfa, 0x52, 0x47, 0x8d, 0xd7, 0x00, 0xc6, 0xcb, + 0xf7, 0xf6, 0x57, 0x9b, 0x6e, 0x0c, 0xac, 0xe8, + 0x3b, 0xd1, 0xde, 0xb5, 0x34, 0xaf, 0x8b, 0x2a, + 0xb0, 0x2d, 0x01, 0xeb, 0x7c, 0xa0, 0x42, 0x26, + 0xbb, 0x2b, 0x43, 0x0e, 0x1d, 0xe2, 0x4e, 0xc9, + 0xc1, 0x0a, 0x67, 0x1d, 0xfc, 0x83, 0x25, 0xce, + 0xb2, 0x18, 0xd9, 0x0d, 0x70, 0xf5, 0xa3, 0x5a, + 0x9c, 0x99, 0xdd, 0x47, 0xa1, 0x57, 0xe7, 0x20, + 0xde, 0xa1, 0x29, 0x8d, 0x96, 0x62, 0xf9, 0x26, + 0x95, 0x51, 0xa6, 0xe7, 0x09, 0x8b, 0xba, 0x16, + 0x8b, 0x19, 0x5b, 0xf9, 0x27, 0x0d, 0xc5, 0xd6, + 0x5f, 0x02, 0x81, 0x81, 0x00, 0xce, 0x26, 0x31, + 0xb5, 0x43, 0x53, 0x95, 0x39, 0xdd, 0x01, 0x98, + 0x8b, 0x3d, 0x27, 0xeb, 0x0b, 0x87, 0x1c, 0x95, + 0xfc, 0x3e, 0x36, 0x51, 0x31, 0xb5, 0xea, 0x59, + 0x56, 0xc0, 0x97, 0x62, 0xf0, 0x63, 0x2b, 0xb6, + 0x30, 0x9b, 0xdf, 0x19, 0x10, 0xe9, 0xa0, 0x3d, + 0xea, 0x54, 0x5a, 0xe6, 0xc6, 0x9e, 0x7e, 0xb5, + 0xf0, 0xb0, 0x54, 0xef, 0xc3, 0xe1, 0x47, 0xa6, + 0x95, 0xc7, 0xe4, 0xa3, 0x4a, 0x30, 0x68, 0x24, + 0x98, 0x7d, 0xc1, 0x34, 0xa9, 0xcb, 0xbc, 0x3c, + 0x08, 0x9c, 0x7d, 0x0c, 0xa2, 0xb7, 0x60, 0xaa, + 0x38, 0x08, 0x16, 0xa6, 0x7f, 0xdb, 0xd2, 0xb1, + 0x67, 0xe7, 0x93, 0x8e, 0xbb, 0x7e, 0xb9, 0xb5, + 0xd0, 0xd0, 0x9f, 0x7b, 0xcc, 0x46, 0xe6, 0x74, + 0x78, 0x1a, 0x96, 0xd6, 0xd7, 0x74, 0x34, 0x54, + 0x3b, 0x54, 0x55, 0x7f, 0x89, 0x81, 0xbc, 0x40, + 0x55, 0x87, 0x24, 0x95, 0x33, 0x02, 0x81, 0x81, + 0x00, 0xb0, 0x18, 0x5d, 0x2a, 0x1a, 0x95, 0x9f, + 0x9a, 0xd5, 0x3f, 0x37, 0x79, 0xe6, 0x3d, 0x83, + 0xab, 0x46, 0x86, 0x36, 0x3a, 0x5d, 0x0c, 0x23, + 0x73, 0x91, 0x2b, 0xda, 0x63, 0xce, 0x46, 0x68, + 0xd1, 0xfe, 0x40, 0x90, 0xf2, 0x3e, 0x43, 0x2b, + 0x19, 0x4c, 0xb1, 0xb0, 0xd5, 0x8c, 0x02, 0x21, + 0x07, 0x18, 0x17, 0xda, 0xe9, 0x49, 0xd7, 0x82, + 0x73, 0x42, 0x78, 0xd1, 0x82, 0x4e, 0x8a, 0xc0, + 0xe9, 0x33, 0x2f, 0xcd, 0x62, 0xce, 0x23, 0xca, + 0xfd, 0x8d, 0xd4, 0x3f, 0x59, 0x80, 0x27, 0xb6, + 0x61, 0x85, 0x9b, 0x2a, 0xe4, 0xef, 0x5c, 0x36, + 0x22, 0x21, 0xcd, 0x2a, 0x6d, 0x41, 0x77, 0xe2, + 0xcb, 0x5d, 0x93, 0x0d, 0x00, 0x10, 0x52, 0x8d, + 0xd5, 0x92, 0x28, 0x16, 0x78, 0xd3, 0x1a, 0x4c, + 0x8d, 0xbd, 0x9c, 0x1a, 0x0b, 0x9c, 0x91, 0x16, + 0x4c, 0xff, 0x31, 0x36, 0xbb, 0xcb, 0x64, 0x1a, + 0xf7, 0x02, 0x81, 0x80, 0x32, 0x65, 0x09, 0xdf, + 0xca, 0xee, 0xa2, 0xdb, 0x3b, 0x58, 0xc9, 0x86, + 0xb8, 0x53, 0x8a, 0xd5, 0x0d, 0x99, 0x82, 0x5c, + 0xe0, 0x84, 0x7c, 0xc2, 0xcf, 0x3a, 0xd3, 0xce, + 0x2e, 0x54, 0x93, 0xbe, 0x3a, 0x30, 0x14, 0x60, + 0xbb, 0xaa, 0x05, 0x41, 0xaa, 0x2b, 0x1f, 0x17, + 0xaa, 0xb9, 0x72, 0x12, 0xf9, 0xe9, 0xf5, 0xe6, + 0x39, 0xe4, 0xf9, 0x9c, 0x03, 0xf5, 0x75, 0x16, + 0xc6, 0x7f, 0xf1, 0x1f, 0x10, 0xc8, 0x54, 0xb1, + 0xe6, 0x84, 0x15, 0xb0, 0xb0, 0x7a, 0x7a, 0x9e, + 0x8c, 0x4a, 0xd1, 0x8c, 0xf1, 0x91, 0x32, 0xeb, + 0x71, 0xa6, 0xbf, 0xdb, 0x1f, 0xcc, 0xd8, 0xcb, + 0x92, 0xc3, 0xf2, 0xaf, 0x89, 0x22, 0x32, 0xfd, + 0x32, 0x12, 0xda, 0xbb, 0xac, 0x55, 0x68, 0x01, + 0x78, 0x56, 0x89, 0x7c, 0xb0, 0x0e, 0x9e, 0xcc, + 0xc6, 0x28, 0x04, 0x7e, 0x83, 0xf5, 0x96, 0x30, + 0x92, 0x51, 0xf2, 0x1b, 0x02, 0x81, 0x81, 0x00, + 0x83, 0x6d, 0xd1, 0x98, 0x90, 0x41, 0x8c, 0xa7, + 0x92, 0x83, 0xac, 0x89, 0x05, 0x0c, 0x79, 0x67, + 0x90, 0xb6, 0xa1, 0xf3, 0x2f, 0xca, 0xf0, 0x15, + 0xe0, 0x30, 0x58, 0xe9, 0x4f, 0xcb, 0x4c, 0x56, + 0x56, 0x56, 0x14, 0x3f, 0x1b, 0x79, 0xb6, 0xef, + 0x57, 0x4b, 0x28, 0xbd, 0xb0, 0xe6, 0x0c, 0x49, + 0x4b, 0xbe, 0xe1, 0x57, 0x28, 0x2a, 0x23, 0x5e, + 0xc4, 0xa2, 0x19, 0x4b, 0x00, 0x67, 0x78, 0xd9, + 0x26, 0x6e, 0x17, 0x25, 0xce, 0xe4, 0xfd, 0xde, + 0x86, 0xa8, 0x5a, 0x67, 0x47, 0x6b, 0x15, 0x09, + 0xe1, 0xec, 0x8e, 0x62, 0x98, 0x91, 0x6f, 0xc0, + 0x98, 0x0c, 0x70, 0x0e, 0x7d, 0xbe, 0x63, 0xbd, + 0x12, 0x5a, 0x98, 0x1c, 0xe3, 0x0c, 0xfb, 0xc7, + 0xfb, 0x1b, 0xbd, 0x02, 0x87, 0xcc, 0x0c, 0xbb, + 0xc2, 0xd4, 0xb6, 0xc1, 0xa1, 0x23, 0xd3, 0x1e, + 0x21, 0x6f, 0x48, 0xba, 0x0e, 0x2e, 0xc7, 0x42 +}; + +static const uint8_t rsa2048_public_key[] = { + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0xbd, 0x9c, 0x83, 0x6b, 0x0e, 0x8e, 0xcf, + 0xfa, 0xaa, 0x4f, 0x6a, 0xf4, 0xe3, 0x52, 0x0f, + 0xa5, 0xd0, 0xbe, 0x5e, 0x7f, 0x08, 0x24, 0xba, + 0x87, 0x46, 0xfb, 0x28, 0x93, 0xe5, 0xe5, 0x81, + 0x42, 0xc0, 0xf9, 0x17, 0xc7, 0x81, 0x01, 0xf4, + 0x18, 0x6a, 0x17, 0xf5, 0x57, 0x20, 0x37, 0xcf, + 0xf9, 0x74, 0x5e, 0xe1, 0x48, 0x6a, 0x71, 0x0a, + 0x0f, 0x79, 0x72, 0x2b, 0x46, 0x10, 0x53, 0xdc, + 0x14, 0x43, 0xbd, 0xbc, 0x6d, 0x15, 0x6f, 0x15, + 0x4e, 0xf0, 0x0d, 0x89, 0x39, 0x02, 0xc3, 0x68, + 0x5c, 0xa8, 0xfc, 0xed, 0x64, 0x9d, 0x98, 0xb7, + 0xcd, 0x83, 0x66, 0x93, 0xc3, 0xd9, 0x57, 0xa0, + 0x21, 0x93, 0xad, 0x5c, 0x75, 0x69, 0x88, 0x9e, + 0x81, 0xdc, 0x7f, 0x1d, 0xd5, 0xbd, 0x1c, 0xc1, + 0x30, 0x56, 0xa5, 0xda, 0x99, 0x46, 0xa6, 0x6d, + 0x0e, 0x6f, 0x5e, 0x51, 0x34, 0x49, 0x73, 0xc3, + 0x67, 0x49, 0x7e, 0x21, 0x2a, 0x20, 0xa7, 0x2b, + 0x92, 0x73, 0x1d, 0xa5, 0x25, 0x2a, 0xd0, 0x3a, + 0x89, 0x75, 0xb2, 0xbb, 0x19, 0x37, 0x78, 0x48, + 0xd2, 0xf2, 0x2a, 0x6d, 0x9e, 0xc6, 0x26, 0xca, + 0x46, 0x8c, 0xf1, 0x42, 0x2a, 0x31, 0xb2, 0xfc, + 0xe7, 0x55, 0x51, 0xff, 0x07, 0x13, 0x5b, 0x36, + 0x59, 0x2b, 0x43, 0x30, 0x4b, 0x05, 0x5c, 0xd2, + 0x45, 0xa0, 0xa0, 0x7c, 0x17, 0x5b, 0x07, 0xbb, + 0x5d, 0x83, 0x80, 0x92, 0x6d, 0x87, 0x1a, 0x43, + 0xac, 0xc7, 0x6b, 0x8d, 0x11, 0x60, 0x27, 0xd2, + 0xdf, 0xdb, 0x71, 0x02, 0x55, 0x6e, 0xb5, 0xca, + 0x4d, 0xda, 0x59, 0x0d, 0xb8, 0x8c, 0xcd, 0xd3, + 0x0e, 0x55, 0xa0, 0xa4, 0x8d, 0xa0, 0x14, 0x10, + 0x48, 0x42, 0x35, 0x56, 0x08, 0xf7, 0x29, 0x5f, + 0xa2, 0xea, 0xa4, 0x5e, 0x8e, 0x99, 0x56, 0xaa, + 0x5a, 0x8c, 0x23, 0x8f, 0x35, 0x22, 0x8a, 0xff, + 0xed, 0x02, 0x03, 0x01, 0x00, 0x01 +}; + +static const uint8_t test_sha1_dgst[] = { + 0x3c, 0x05, 0x19, 0x34, 0x29, 0x19, 0xc7, 0xe0, + 0x87, 0xb6, 0x24, 0xf9, 0x58, 0xac, 0xa4, 0xd4, + 0xb2, 0xd9, 0x03, 0x9e, +}; + +static const uint8_t exp_signature_rsa2048_pkcs1[] = { + 0x4e, 0x82, 0x56, 0x4c, 0x84, 0x66, 0xca, 0x1e, + 0xc6, 0x92, 0x46, 0x20, 0x02, 0x6b, 0x64, 0x46, + 0x15, 0x6b, 0x24, 0xf2, 0xbb, 0xfa, 0x44, 0x3c, + 0xaf, 0x42, 0xc8, 0x41, 0xfd, 0xce, 0xed, 0x95, + 0x34, 0xaf, 0x25, 0x09, 0xd1, 0x06, 0x94, 0xaa, + 0x52, 0xd4, 0x29, 0xc8, 0x52, 0x34, 0x67, 0x59, + 0x4f, 0x5a, 0xfd, 0x23, 0x30, 0x5e, 0xc7, 0x1e, + 0xa6, 0xe0, 0x1b, 0x23, 0xca, 0x82, 0x47, 0x9a, + 0x2e, 0x2c, 0x66, 0x45, 0x5a, 0x12, 0xa9, 0x15, + 0xbf, 0xd6, 0xd6, 0xfa, 0x8d, 0x60, 0x99, 0x89, + 0x91, 0x39, 0x06, 0xb7, 0xd3, 0x9a, 0xef, 0x15, + 0x7b, 0x95, 0x87, 0x77, 0x2c, 0x41, 0xd4, 0x71, + 0xd5, 0xdf, 0x22, 0x7b, 0x01, 0xe2, 0xc1, 0xfb, + 0xb9, 0x4e, 0x0c, 0x9b, 0xd5, 0x04, 0xed, 0x2b, + 0x7e, 0x73, 0x53, 0xaa, 0x33, 0x89, 0x9d, 0x95, + 0x28, 0x8f, 0x8b, 0x80, 0x34, 0x7a, 0xea, 0xe3, + 0x66, 0x8a, 0xa8, 0xad, 0xed, 0x91, 0x43, 0xdd, + 0x77, 0xe5, 0xd7, 0x16, 0xda, 0xa8, 0x00, 0x29, + 0x3f, 0x9f, 0xe0, 0x1d, 0x42, 0x9d, 0x35, 0x5d, + 0x0f, 0xf3, 0x90, 0x27, 0x3a, 0x8c, 0x46, 0x13, + 0x53, 0x3e, 0x3b, 0x38, 0x77, 0xf8, 0x57, 0x61, + 0xbc, 0xc4, 0x54, 0x68, 0x48, 0xae, 0x58, 0x03, + 0x33, 0x94, 0x3f, 0x18, 0x1e, 0xb3, 0x3f, 0x79, + 0xa7, 0x26, 0x92, 0x5d, 0x32, 0x2a, 0xdb, 0xe6, + 0x3a, 0xe8, 0xd7, 0xaa, 0x91, 0xfe, 0x9f, 0x06, + 0x26, 0x68, 0x8c, 0x27, 0x31, 0xb0, 0x04, 0x9e, + 0x94, 0x79, 0x63, 0xa1, 0xc7, 0xe8, 0x5b, 0x8c, + 0xd3, 0xf1, 0x88, 0x58, 0x31, 0x2f, 0x4e, 0x11, + 0x00, 0xfe, 0x29, 0xad, 0x2c, 0xa9, 0x8e, 0x63, + 0xd8, 0x7d, 0xc5, 0xa1, 0x71, 0xfa, 0x08, 0x29, + 0xea, 0xd6, 0x6c, 0x53, 0x00, 0x52, 0xa0, 0xed, + 0x6b, 0x7c, 0x67, 0x50, 0x71, 0x2d, 0x96, 0x7a, +}; + +static const uint8_t exp_signature_rsa1024_pkcs1[] = { + 0x6b, 0x5b, 0xbb, 0x3b, 0x1f, 0x08, 0xd8, 0xc0, + 0x4a, 0xf1, 0x5a, 0x12, 0xc2, 0x39, 0x14, 0x65, + 0x4f, 0xda, 0x79, 0x67, 0xf2, 0x89, 0x25, 0xad, + 0x9e, 0x7e, 0xba, 0xa8, 0x34, 0x15, 0x03, 0xdd, + 0x80, 0x6b, 0x01, 0xd7, 0x4a, 0xf3, 0xd6, 0xef, + 0x1e, 0x48, 0xf3, 0xbc, 0x75, 0x1a, 0xc4, 0x2c, + 0x90, 0x15, 0x9f, 0x21, 0x24, 0x98, 0x21, 0xef, + 0x6d, 0x3b, 0xf3, 0x82, 0x8f, 0x8d, 0xd8, 0x48, + 0x37, 0x16, 0x19, 0x8e, 0x3c, 0x64, 0xa0, 0x9e, + 0xf7, 0x0c, 0xd9, 0x5c, 0xc6, 0x13, 0xc4, 0x5f, + 0xf8, 0xf3, 0x59, 0x5b, 0xd0, 0x33, 0x95, 0x98, + 0xde, 0x67, 0x25, 0x58, 0x46, 0xba, 0xee, 0x0f, + 0x47, 0x7a, 0x7f, 0xd0, 0xe4, 0x77, 0x09, 0x17, + 0xe9, 0x81, 0x6e, 0x2d, 0x33, 0x9b, 0x13, 0x0b, + 0xc9, 0xb2, 0x0c, 0x2c, 0xb5, 0xdf, 0x52, 0x8f, + 0xab, 0x0d, 0xc6, 0x59, 0x1d, 0xc7, 0x33, 0x7b, +}; + +static const uint8_t test_plaintext[] = { + 0x00, 0x44, 0xbc, 0x6f, 0x77, 0xfb, 0xe2, 0xa4, + 0x98, 0x9e, 0xf5, 0x33, 0xa0, 0xbd, 0x81, 0xb9, + 0xf1, 0x44, 0x7f, 0x79, 0x89, 0x23, 0xe5, 0x46, + 0x66, 0x9f, 0x98, 0x95, 0x6f, 0x56, 0x78, 0xf6, + 0xf5, 0xac, 0x9c, 0xda, 0xc2, 0x79, 0x59, 0xf0, + 0x1b, 0x03, 0xfa, 0x46, 0x1c, 0x1f, 0x18, 0x07, + 0xce, 0xad, 0xed, 0x3d, 0x11, 0xf9, 0x1b, 0x26, + 0x4a, 0x97, 0x28, 0x71, 0x5f, 0x2c, 0x5e, 0x58, + 0xf0, 0xd6, 0xbf, 0xa4, 0x12, 0xd0, 0x1d, 0x07, + 0xcb, 0x73, 0x66, 0xb6, 0xa4, 0x09, 0xaf, 0x5d, + 0xe9, 0x14, 0x14, 0xaf, 0x69, 0xd6, 0xee, 0x0a, + 0xfc, 0xca, 0xac, 0x94, 0x47, 0xd5, 0x9d, 0x5b, + 0x2b, 0xfb, 0xce, 0x9d, 0x04, 0xc1, 0xaf, 0xa5, + 0xa1, 0x8d, 0xa9, 0x48, 0xa8, 0x65, 0xe6, 0x9f, + 0x74, 0x78, 0x16, 0x32, 0x93, 0xb5, 0x21, 0xb9, + 0x9f, 0x3f, 0xc1, 0xe5, 0xa2, 0x50, 0x8b, 0x12, + 0xfb, 0x3e, 0xb0, 0x8a, 0x00, 0xc7, 0x20, 0x56, + 0xb3, 0xb1, 0x29, 0x95, 0x89, 0xd6, 0x50, 0xf5, + 0x37, 0x38, 0x8e, 0x12, 0xf1, 0xba, 0x82, 0x37, + 0x34, 0x68, 0x4b, 0xe8, 0xe3, 0x11, 0x1c, 0x46, + 0xf9, 0x63, 0x3a, 0xd6, 0xf3, 0x3f, 0x55, 0xa6, + 0xbd, 0x89, 0xf1, 0x2d, 0x38, 0x91, 0x7c, 0xc2, + 0x4d, 0xf1, 0x69, 0x82, 0x6d, 0x71, 0x77, 0xf4, + 0xfc, 0x43, 0x20, 0x6f, 0x43, 0xb9, 0x43, 0xd1, + 0x65, 0xbd, 0xca, 0xb1, 0x43, 0x87, 0xf8, 0xc8, + 0x76, 0x21, 0xa9, 0xeb, 0x3e, 0x9a, 0xef, 0xc9, + 0x0e, 0x79, 0xbc, 0xf0, 0xf8, 0xc8, 0xe2, 0xbc, + 0x33, 0x35, 0x3e, 0xfc, 0xf9, 0x44, 0x69, 0x06, + 0x7c, 0x7f, 0x5d, 0xa2, 0x9e, 0xab, 0xc2, 0x82, + 0xa0, 0xfb, 0xc5, 0x79, 0x57, 0x8c, 0xf1, 0x1c, + 0x51, 0x64, 0x4c, 0x56, 0x08, 0x80, 0x32, 0xf4, + 0x97, 0x8f, 0x6f, 0xb2, 0x16, 0xa6, 0x9d, 0x71, +}; + +static const uint8_t exp_ciphertext_rsa1024_raw[] = { + 0x01, 0xa0, 0xc2, 0x94, 0x9f, 0xd6, 0xbe, 0x8d, + 0xe9, 0x24, 0xaa, 0x9c, 0x67, 0xd7, 0xe3, 0x04, + 0x34, 0xbf, 0xd3, 0x27, 0xa1, 0x43, 0xeb, 0x60, + 0x6b, 0x5b, 0x64, 0x15, 0x55, 0x16, 0x98, 0x35, + 0xc2, 0x59, 0xa7, 0xf7, 0x24, 0xf7, 0x05, 0xb9, + 0xe8, 0x56, 0x6f, 0xf2, 0x7d, 0x8b, 0x3c, 0xcb, + 0xa6, 0xc2, 0xac, 0x0c, 0x37, 0x8c, 0x70, 0x70, + 0x55, 0x05, 0x07, 0x0d, 0x63, 0x6b, 0x7d, 0x5f, + 0xae, 0x03, 0x1e, 0x55, 0x05, 0xbb, 0xa8, 0xe7, + 0xff, 0xa0, 0x8c, 0x5b, 0x6b, 0x01, 0x48, 0x2e, + 0x4f, 0x7f, 0xe2, 0x74, 0xc6, 0x32, 0xa7, 0x2d, + 0xdb, 0x91, 0x9b, 0x67, 0x4d, 0x71, 0xf9, 0x8c, + 0x42, 0x43, 0x75, 0x4e, 0xd0, 0x0e, 0x7c, 0xa0, + 0x97, 0x1a, 0x5f, 0x8e, 0x6f, 0xe4, 0xfa, 0x16, + 0x1d, 0x59, 0x0e, 0x0b, 0x11, 0x12, 0xa3, 0x0c, + 0xa6, 0x55, 0xe6, 0xdb, 0xa7, 0x71, 0xa6, 0xff, +}; + +static const uint8_t exp_ciphertext_rsa1024_pkcs1[] = { + 0x93, 0x78, 0x6a, 0x76, 0xb8, 0x94, 0xea, 0xe4, + 0x32, 0x79, 0x01, 0x8b, 0xc1, 0xcb, 0x2e, 0x2d, + 0xfe, 0xdc, 0x9b, 0xe3, 0xe9, 0x23, 0xe4, 0x0a, + 0xb0, 0x6b, 0x9f, 0x6b, 0x62, 0xf5, 0x3d, 0xf0, + 0x78, 0x84, 0x77, 0x21, 0xad, 0x0b, 0x30, 0x30, + 0x94, 0xe2, 0x18, 0xc4, 0x9b, 0x12, 0x06, 0xc8, + 0xaa, 0xf7, 0x30, 0xe4, 0xc8, 0x64, 0xe7, 0x51, + 0xf1, 0x6a, 0xe1, 0xa2, 0x58, 0x7a, 0x02, 0x9c, + 0x8e, 0xf0, 0x2d, 0x25, 0x6b, 0xb7, 0x25, 0x5e, + 0x05, 0xaf, 0x38, 0xb2, 0x69, 0x5e, 0x6c, 0x75, + 0x6e, 0x27, 0xba, 0x5d, 0x7d, 0x35, 0x72, 0xb7, + 0x25, 0xd4, 0xaa, 0xb2, 0x4b, 0x9e, 0x6b, 0x82, + 0xb2, 0x32, 0xe2, 0x13, 0x1d, 0x00, 0x21, 0x08, + 0xae, 0x14, 0xbb, 0xc0, 0x40, 0xb7, 0x0d, 0xd5, + 0x0e, 0x4d, 0x6d, 0x9a, 0x70, 0x86, 0xe9, 0xfc, + 0x67, 0x2b, 0xa4, 0x11, 0x45, 0xb6, 0xc4, 0x2f, +}; + +static const uint8_t exp_ciphertext_rsa2048_raw[] = { + 0x09, 0x7b, 0x9e, 0x7c, 0x10, 0x1f, 0x73, 0xb4, + 0x5f, 0xdb, 0x4f, 0x05, 0xe7, 0xfc, 0x9e, 0x35, + 0x48, 0xd8, 0xc8, 0xf5, 0xac, 0x6d, 0xb4, 0xb0, + 0xd4, 0xf7, 0x69, 0x0f, 0x30, 0x78, 0xbb, 0x55, + 0x67, 0x66, 0x66, 0x05, 0xf4, 0x77, 0xe2, 0x30, + 0xa5, 0x94, 0x10, 0xa3, 0xcb, 0xee, 0x13, 0x9f, + 0x47, 0x1b, 0x2e, 0xf9, 0xfd, 0x94, 0x09, 0xbd, + 0x26, 0x6e, 0x84, 0xc7, 0x5c, 0x42, 0x20, 0x76, + 0x72, 0x83, 0x75, 0x68, 0xa4, 0x18, 0x2d, 0x76, + 0x62, 0xc3, 0xab, 0xc0, 0xc9, 0x36, 0x59, 0xe0, + 0xa9, 0x70, 0x1f, 0xff, 0x97, 0x07, 0x0d, 0x88, + 0xc2, 0xd8, 0x51, 0x35, 0xf7, 0xb0, 0x50, 0xe4, + 0x9f, 0x3d, 0xd4, 0x71, 0x8b, 0x40, 0x89, 0x71, + 0x6c, 0xd8, 0xc2, 0x63, 0xb6, 0x3a, 0xce, 0xb1, + 0x32, 0xf1, 0xc6, 0x11, 0x31, 0x25, 0x48, 0xcf, + 0xeb, 0xbc, 0xd3, 0x9b, 0xc5, 0xbd, 0xd2, 0x57, + 0x73, 0x9b, 0x20, 0xb8, 0xdf, 0xbe, 0xb8, 0x40, + 0xb6, 0xac, 0x24, 0xdb, 0x94, 0x6a, 0x93, 0x43, + 0x4a, 0xa8, 0xa3, 0xcf, 0xd5, 0x61, 0x1b, 0x46, + 0x1d, 0x6f, 0x57, 0xec, 0xa6, 0xd0, 0x44, 0x05, + 0x48, 0xb8, 0x90, 0x80, 0x23, 0x8e, 0x5f, 0xb0, + 0x4b, 0x6f, 0xe3, 0xf9, 0xb0, 0x04, 0x60, 0xae, + 0x80, 0xcf, 0xa5, 0x5c, 0x11, 0xe4, 0xce, 0x57, + 0x5b, 0xbb, 0xde, 0x92, 0xfc, 0xe7, 0x3f, 0xe0, + 0xfc, 0x06, 0xc8, 0xf3, 0x8c, 0xac, 0x86, 0x09, + 0x31, 0xe5, 0x7e, 0xfb, 0x5d, 0xa7, 0x57, 0xf8, + 0x1d, 0x23, 0x9d, 0xa3, 0xeb, 0x53, 0x28, 0xde, + 0xbf, 0x53, 0xef, 0x35, 0x3c, 0x7e, 0x3c, 0x1b, + 0x76, 0x9d, 0x09, 0x25, 0x43, 0xd4, 0x8b, 0xca, + 0xda, 0x45, 0x5b, 0xdc, 0x9f, 0x57, 0x5a, 0x30, + 0x2e, 0xe9, 0x73, 0x68, 0x28, 0xfa, 0x40, 0xb0, + 0x7c, 0x31, 0xd7, 0x8b, 0x4e, 0x99, 0x94, 0xf1, +}; + +static const uint8_t exp_ciphertext_rsa2048_pkcs1[] = { + 0xa5, 0x19, 0x19, 0x34, 0xad, 0xf6, 0xd2, 0xbe, + 0xed, 0x8f, 0xe5, 0xfe, 0xa2, 0xa5, 0x20, 0x08, + 0x15, 0x53, 0x7c, 0x68, 0x28, 0xae, 0x07, 0xb2, + 0x4c, 0x5d, 0xee, 0xc1, 0xc6, 0xdc, 0xd6, 0x8b, + 0xc6, 0xba, 0x46, 0xe1, 0x16, 0xa9, 0x04, 0x72, + 0xdf, 0x8f, 0x1e, 0x97, 0x2a, 0x55, 0xe7, 0xac, + 0x08, 0x0d, 0x61, 0xe8, 0x64, 0x8b, 0x6f, 0x96, + 0x0e, 0xbb, 0x8a, 0x30, 0xb3, 0x73, 0x28, 0x61, + 0x16, 0x89, 0x90, 0x88, 0x8e, 0xda, 0x22, 0xe6, + 0x42, 0x16, 0xc7, 0xe8, 0x30, 0x0d, 0x7f, 0x44, + 0x1e, 0xef, 0xe6, 0xdb, 0x78, 0x54, 0x89, 0xa5, + 0x60, 0x67, 0xb3, 0x35, 0x2d, 0x79, 0x49, 0xcf, + 0xe6, 0x8f, 0xf3, 0x64, 0x52, 0x1c, 0x6c, 0x43, + 0x7e, 0xb0, 0xde, 0x55, 0xdf, 0xbe, 0xb7, 0xb1, + 0xdb, 0x02, 0xee, 0x76, 0x96, 0xcc, 0x0b, 0x97, + 0x8c, 0x23, 0xaa, 0x7d, 0x4c, 0x47, 0x28, 0x41, + 0x7a, 0x20, 0x39, 0x1f, 0x64, 0x0b, 0xf1, 0x74, + 0xf1, 0x29, 0xda, 0xe9, 0x3a, 0x36, 0xa6, 0x88, + 0xb8, 0xc0, 0x21, 0xb8, 0x9b, 0x5d, 0x90, 0x85, + 0xa3, 0x30, 0x61, 0x17, 0x8c, 0x74, 0x63, 0xd5, + 0x0f, 0x95, 0xdc, 0xc8, 0x4f, 0xa7, 0x24, 0x55, + 0x40, 0xe2, 0x84, 0x57, 0x65, 0x06, 0x11, 0x30, + 0x2b, 0x9e, 0x32, 0x95, 0x39, 0xf2, 0x1a, 0x3f, + 0xab, 0xcd, 0x7b, 0x7f, 0x9c, 0xf0, 0x00, 0x50, + 0x7c, 0xf4, 0xbe, 0xcb, 0x80, 0xea, 0x66, 0xba, + 0x0e, 0x7b, 0x46, 0x0b, 0x25, 0xe0, 0xc1, 0x03, + 0x29, 0x11, 0x2d, 0x69, 0x4f, 0x21, 0xa2, 0x58, + 0x37, 0x4b, 0x84, 0x15, 0xb3, 0x65, 0x3a, 0xac, + 0xd4, 0xd0, 0xf6, 0xdf, 0x4b, 0x82, 0xca, 0x9e, + 0xbb, 0xbe, 0x3c, 0x4d, 0xd5, 0xbf, 0x00, 0xd6, + 0x12, 0x48, 0x72, 0x0b, 0xc7, 0xf8, 0xe1, 0xcd, + 0xd0, 0x28, 0x03, 0x19, 0xa6, 0x06, 0x13, 0x45, +}; + +static const uint8_t rsa_private_key_lack_element[] = { + /* RSAPrivateKey, offset: 0, length: 176 */ + 0x30, 0x81, 0xb0, + /* version, offset: 4, length: 1 */ + 0x02, 0x01, 0x00, + /* n, offset: 7, length: 65 */ + 0x02, 0x41, + 0x00, 0xb9, 0xe1, 0x22, 0xdb, 0x56, 0x2f, 0xb6, + 0xf7, 0xf0, 0x0a, 0x87, 0x43, 0x07, 0x12, 0xdb, + 0x6d, 0xb6, 0x2b, 0x41, 0x8d, 0x2c, 0x3c, 0xa5, + 0xdd, 0x78, 0x9a, 0x8f, 0xab, 0x8e, 0xf2, 0x4a, + 0xc8, 0x34, 0x0c, 0x12, 0x4f, 0x11, 0x90, 0xc6, + 0xc2, 0xa5, 0xd0, 0xcd, 0xfb, 0xfc, 0x2c, 0x95, + 0x56, 0x82, 0xdf, 0x39, 0xf3, 0x3b, 0x1d, 0x62, + 0x26, 0x97, 0xb7, 0x93, 0x25, 0xc7, 0xec, 0x7e, + 0xf7, + /* e, offset: 74, length: 3 */ + 0x02, 0x03, 0x01, 0x00, 0x01, + /* d, offset: 79, length: 64 */ + 0x02, 0x40, + 0x1e, 0x80, 0xfe, 0xda, 0x65, 0xdb, 0x70, 0xb8, + 0x61, 0x91, 0x28, 0xbf, 0x6c, 0x32, 0xc1, 0x05, + 0xd1, 0x26, 0x6a, 0x1c, 0x83, 0xcc, 0xf4, 0x1f, + 0x53, 0x42, 0x72, 0x1f, 0x62, 0x57, 0x0a, 0xc4, + 0x66, 0x76, 0x30, 0x87, 0xb9, 0xb1, 0xb9, 0x6a, + 0x63, 0xfd, 0x8f, 0x3e, 0xfc, 0x35, 0x3f, 0xd6, + 0x2e, 0x6c, 0xc8, 0x70, 0x8a, 0x17, 0xc1, 0x28, + 0x6a, 0xfe, 0x51, 0x56, 0xb3, 0x92, 0x6f, 0x09, + /* p, offset: 145, length: 33 */ + 0x02, 0x21, + 0x00, 0xe3, 0x2e, 0x2d, 0x8d, 0xba, 0x1c, 0x34, + 0x4c, 0x49, 0x9f, 0xc1, 0xa6, 0xdd, 0xd7, 0x13, + 0x8d, 0x05, 0x48, 0xdd, 0xff, 0x5c, 0x30, 0xbc, + 0x6b, 0xc4, 0x18, 0x9d, 0xfc, 0xa2, 0xd0, 0x9b, + 0x4d, + /* q, offset: 180, length: 33 */ + 0x02, 0x21, + 0x00, 0xd1, 0x75, 0xaf, 0x4b, 0xc6, 0x1a, 0xb0, + 0x98, 0x14, 0x42, 0xae, 0x33, 0xf3, 0x44, 0xde, + 0x21, 0xcb, 0x04, 0xda, 0xfb, 0x1e, 0x35, 0x92, + 0xcd, 0x69, 0xc0, 0x83, 0x06, 0x83, 0x8e, 0x39, + 0x53, + /* lack element: dp, dq, u */ +}; + +static const uint8_t rsa_public_key_lack_element[] = { + /* RSAPublicKey, offset: 0, length: 67 */ + 0x30, 0x81, 0x43, + /* n, offset: 7, length: 65 */ + 0x02, 0x41, + 0x00, 0xb9, 0xe1, 0x22, 0xdb, 0x56, 0x2f, 0xb6, + 0xf7, 0xf0, 0x0a, 0x87, 0x43, 0x07, 0x12, 0xdb, + 0x6d, 0xb6, 0x2b, 0x41, 0x8d, 0x2c, 0x3c, 0xa5, + 0xdd, 0x78, 0x9a, 0x8f, 0xab, 0x8e, 0xf2, 0x4a, + 0xc8, 0x34, 0x0c, 0x12, 0x4f, 0x11, 0x90, 0xc6, + 0xc2, 0xa5, 0xd0, 0xcd, 0xfb, 0xfc, 0x2c, 0x95, + 0x56, 0x82, 0xdf, 0x39, 0xf3, 0x3b, 0x1d, 0x62, + 0x26, 0x97, 0xb7, 0x93, 0x25, 0xc7, 0xec, 0x7e, + 0xf7, + /* lack element: e */ +}; + +static const uint8_t rsa_public_key_empty_element[] = { + /* RSAPublicKey, offset: 0, length: 69 */ + 0x30, 0x81, 0x45, + /* n, offset: 7, length: 65 */ + 0x02, 0x41, + 0x00, 0xb9, 0xe1, 0x22, 0xdb, 0x56, 0x2f, 0xb6, + 0xf7, 0xf0, 0x0a, 0x87, 0x43, 0x07, 0x12, 0xdb, + 0x6d, 0xb6, 0x2b, 0x41, 0x8d, 0x2c, 0x3c, 0xa5, + 0xdd, 0x78, 0x9a, 0x8f, 0xab, 0x8e, 0xf2, 0x4a, + 0xc8, 0x34, 0x0c, 0x12, 0x4f, 0x11, 0x90, 0xc6, + 0xc2, 0xa5, 0xd0, 0xcd, 0xfb, 0xfc, 0x2c, 0x95, + 0x56, 0x82, 0xdf, 0x39, 0xf3, 0x3b, 0x1d, 0x62, + 0x26, 0x97, 0xb7, 0x93, 0x25, 0xc7, 0xec, 0x7e, + 0xf7, + /* e: empty element */ + 0x02, 0x00, +}; + +static const uint8_t rsa_private_key_empty_element[] = { + /* RSAPrivateKey, offset: 0, length: 19 */ + 0x30, 0x81, 0x13, + /* version, offset: 4, length: 1 */ + 0x02, 0x01, 0x00, + /* n: empty element */ + 0x02, 0x00, + /* e: empty element */ + 0x02, 0x00, + /* d: empty element */ + 0x02, 0x00, + /* p: empty element */ + 0x02, 0x00, + /* q: empty element */ + 0x02, 0x00, + /* dp: empty element */ + 0x02, 0x00, + /* dq: empty element */ + 0x02, 0x00, + /* u: empty element */ + 0x02, 0x00, +}; + +static const uint8_t rsa_public_key_invalid_length_val[] = { + /* RSAPublicKey, INVALID length: 313 */ + 0x30, 0x82, 0x01, 0x39, + /* n, offset: 7, length: 65 */ + 0x02, 0x41, + 0x00, 0xb9, 0xe1, 0x22, 0xdb, 0x56, 0x2f, 0xb6, + 0xf7, 0xf0, 0x0a, 0x87, 0x43, 0x07, 0x12, 0xdb, + 0x6d, 0xb6, 0x2b, 0x41, 0x8d, 0x2c, 0x3c, 0xa5, + 0xdd, 0x78, 0x9a, 0x8f, 0xab, 0x8e, 0xf2, 0x4a, + 0xc8, 0x34, 0x0c, 0x12, 0x4f, 0x11, 0x90, 0xc6, + 0xc2, 0xa5, 0xd0, 0xcd, 0xfb, 0xfc, 0x2c, 0x95, + 0x56, 0x82, 0xdf, 0x39, 0xf3, 0x3b, 0x1d, 0x62, + 0x26, 0x97, 0xb7, 0x93, 0x25, 0xc7, 0xec, 0x7e, + 0xf7, + /* e, */ + 0x02, 0x03, 0x01, 0x00, 0x01, /* INTEGER, offset: 74, length: 3 */ +}; + +static const uint8_t rsa_public_key_extra_elem[] = { + /* RSAPublicKey, length: 80 */ + 0x30, 0x81, 0x50, + /* n, offset: 7, length: 65 */ + 0x02, 0x41, + 0x00, 0xb9, 0xe1, 0x22, 0xdb, 0x56, 0x2f, 0xb6, + 0xf7, 0xf0, 0x0a, 0x87, 0x43, 0x07, 0x12, 0xdb, + 0x6d, 0xb6, 0x2b, 0x41, 0x8d, 0x2c, 0x3c, 0xa5, + 0xdd, 0x78, 0x9a, 0x8f, 0xab, 0x8e, 0xf2, 0x4a, + 0xc8, 0x34, 0x0c, 0x12, 0x4f, 0x11, 0x90, 0xc6, + 0xc2, 0xa5, 0xd0, 0xcd, 0xfb, 0xfc, 0x2c, 0x95, + 0x56, 0x82, 0xdf, 0x39, 0xf3, 0x3b, 0x1d, 0x62, + 0x26, 0x97, 0xb7, 0x93, 0x25, 0xc7, 0xec, 0x7e, + 0xf7, + /* e, offset: 74, length: 3 */ + 0x02, 0x03, 0x01, 0x00, 0x01, + /* Additional integer field, length 3 */ + 0x02, 0x06, 0xe1, 0x22, 0xdb, 0xe1, 0x22, 0xdb, +}; + +typedef struct QCryptoRSAKeyTestData QCryptoRSAKeyTestData; +struct QCryptoRSAKeyTestData { + const char *path; + QCryptoAkCipherKeyType key_type; + QCryptoAkCipherOptions opt; + const uint8_t *key; + size_t keylen; + bool is_valid_key; + size_t exp_key_len; +}; + +typedef struct QCryptoAkCipherTestData QCryptoAkCipherTestData; +struct QCryptoAkCipherTestData { + const char *path; + QCryptoAkCipherOptions opt; + + const uint8_t *priv_key; + size_t priv_key_len; + const uint8_t *pub_key; + size_t pub_key_len; + + const uint8_t *plaintext; + size_t plen; + const uint8_t *ciphertext; + size_t clen; + const uint8_t *dgst; + size_t dlen; + const uint8_t *signature; + size_t slen; +}; + +static QCryptoRSAKeyTestData rsakey_test_data[] = { + { + .path = "/crypto/akcipher/rsakey-1024-public", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa1024_public_key, + .keylen = sizeof(rsa1024_public_key), + .is_valid_key = true, + .exp_key_len = 128, + }, + { + .path = "/crypto/akcipher/rsakey-1024-private", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key = rsa1024_private_key, + .keylen = sizeof(rsa1024_private_key), + .is_valid_key = true, + .exp_key_len = 128, + }, + { + .path = "/crypto/akcipher/rsakey-2048-public", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa2048_public_key, + .keylen = sizeof(rsa2048_public_key), + .is_valid_key = true, + .exp_key_len = 256, + }, + { + .path = "/crypto/akcipher/rsakey-2048-private", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key = rsa2048_private_key, + .keylen = sizeof(rsa2048_private_key), + .is_valid_key = true, + .exp_key_len = 256, + }, + { + .path = "/crypto/akcipher/rsakey-public-lack-elem", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa_public_key_lack_element, + .keylen = sizeof(rsa_public_key_lack_element), + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-private-lack-elem", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key = rsa_private_key_lack_element, + .keylen = sizeof(rsa_private_key_lack_element), + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-public-empty-elem", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa_public_key_empty_element, + .keylen = sizeof(rsa_public_key_empty_element), + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-private-empty-elem", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key = rsa_private_key_empty_element, + .keylen = sizeof(rsa_private_key_empty_element), + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-public-empty-key", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = NULL, + .keylen = 0, + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-private-empty-key", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key = NULL, + .keylen = 0, + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-public-invalid-length-val", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa_public_key_invalid_length_val, + .keylen = sizeof(rsa_public_key_invalid_length_val), + .is_valid_key = false, + }, + { + .path = "/crypto/akcipher/rsakey-public-extra-elem", + .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key = rsa_public_key_extra_elem, + .keylen = sizeof(rsa_public_key_extra_elem), + .is_valid_key = false, + }, +}; + +static QCryptoAkCipherTestData akcipher_test_data[] = { + /* rsa1024 with raw padding */ + { + .path = "/crypto/akcipher/rsa1024-raw", + .opt = { + .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .u.rsa = { + .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW, + }, + }, + .pub_key = rsa1024_public_key, + .pub_key_len = sizeof(rsa1024_public_key), + .priv_key = rsa1024_private_key, + .priv_key_len = sizeof(rsa1024_private_key), + + .plaintext = test_plaintext, + .plen = 128, + .ciphertext = exp_ciphertext_rsa1024_raw, + .clen = sizeof(exp_ciphertext_rsa1024_raw), + }, + + /* rsa1024 with pkcs1 padding */ + { + .path = "/crypto/akcipher/rsa1024-pkcs1", + .opt = { + .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .u.rsa = { + .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, + .hash_alg = QCRYPTO_HASH_ALG_SHA1, + }, + }, + .pub_key = rsa1024_public_key, + .pub_key_len = sizeof(rsa1024_public_key), + .priv_key = rsa1024_private_key, + .priv_key_len = sizeof(rsa1024_private_key), + + .plaintext = test_plaintext, + .plen = 64, + .ciphertext = exp_ciphertext_rsa1024_pkcs1, + .clen = sizeof(exp_ciphertext_rsa1024_pkcs1), + .dgst = test_sha1_dgst, + .dlen = sizeof(test_sha1_dgst), + .signature = exp_signature_rsa1024_pkcs1, + .slen = sizeof(exp_signature_rsa1024_pkcs1), + }, + + /* rsa2048 with raw padding */ + { + .path = "/crypto/akcipher/rsa2048-raw", + .opt = { + .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .u.rsa = { + .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW, + }, + }, + .pub_key = rsa2048_public_key, + .pub_key_len = sizeof(rsa2048_public_key), + .priv_key = rsa2048_private_key, + .priv_key_len = sizeof(rsa2048_private_key), + + .plaintext = test_plaintext, + .plen = 256, + .ciphertext = exp_ciphertext_rsa2048_raw, + .clen = sizeof(exp_ciphertext_rsa2048_raw), + }, + + /* rsa2048 with pkcs1 padding */ + { + .path = "/crypto/akcipher/rsa2048-pkcs1", + .opt = { + .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .u.rsa = { + .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, + .hash_alg = QCRYPTO_HASH_ALG_SHA1, + }, + }, + .pub_key = rsa2048_public_key, + .pub_key_len = sizeof(rsa2048_public_key), + .priv_key = rsa2048_private_key, + .priv_key_len = sizeof(rsa2048_private_key), + + .plaintext = test_plaintext, + .plen = 128, + .ciphertext = exp_ciphertext_rsa2048_pkcs1, + .clen = sizeof(exp_ciphertext_rsa2048_pkcs1), + .dgst = test_sha1_dgst, + .dlen = sizeof(test_sha1_dgst), + .signature = exp_signature_rsa2048_pkcs1, + .slen = sizeof(exp_signature_rsa2048_pkcs1), + }, + +}; + +static void test_akcipher(const void *opaque) +{ + const QCryptoAkCipherTestData *data = opaque; + g_autofree uint8_t *plaintext = NULL; + g_autofree uint8_t *ciphertext = NULL; + g_autofree uint8_t *signature = NULL; + QCryptoAkCipher *pub_key, *priv_key; + + if (!qcrypto_akcipher_supports((QCryptoAkCipherOptions *)&data->opt)) { + return; + } + pub_key = qcrypto_akcipher_new(&data->opt, + QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + data->pub_key, data->pub_key_len, + &error_abort); + g_assert(pub_key != NULL); + priv_key = qcrypto_akcipher_new(&data->opt, + QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + data->priv_key, data->priv_key_len, + &error_abort); + g_assert(priv_key != NULL); + + if (data->plaintext != NULL) { + + ciphertext = g_new0(uint8_t, data->clen); + g_assert(qcrypto_akcipher_encrypt(pub_key, data->plaintext, data->plen, + ciphertext, data->clen, + &error_abort) > 0); + + /** + * In the asymmetric encryption algorithms, the ciphertext generated + * each time may be different, here only compare the decrypted + * plaintext + */ + plaintext = g_new0(uint8_t, data->clen); + g_assert(qcrypto_akcipher_decrypt(priv_key, ciphertext, + data->clen, plaintext, + data->plen, + &error_abort) == data->plen); + g_assert(!memcmp(plaintext, data->plaintext, data->plen)); + } + + if (data->signature != NULL) { + signature = g_new(uint8_t, data->slen); + g_assert(qcrypto_akcipher_sign(priv_key, data->dgst, data->dlen, + signature, data->slen, + &error_abort) > 0); + /** + * The signature generated each time may be different, here only check + * the verification. + */ + g_assert(qcrypto_akcipher_verify(pub_key, data->signature, data->slen, + data->dgst, data->dlen, + &error_abort) == 0); + g_assert(qcrypto_akcipher_verify(pub_key, signature, data->slen, + data->dgst, data->dlen, + &error_abort) == 0); + ++signature[0]; + /* Here error should be ignored */ + g_assert(qcrypto_akcipher_verify(pub_key, signature, data->slen, + data->dgst, data->dlen, NULL) != 0); + } + + qcrypto_akcipher_free(pub_key); + qcrypto_akcipher_free(priv_key); +} + +static void test_rsakey(const void *opaque) +{ + const QCryptoRSAKeyTestData *data = (const QCryptoRSAKeyTestData *)opaque; + QCryptoAkCipherOptions opt = { + .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .u.rsa = { + .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, + .hash_alg = QCRYPTO_HASH_ALG_SHA1, + } + }; + g_autoptr(QCryptoAkCipher) key = qcrypto_akcipher_new( + &opt, data->key_type, data->key, data->keylen, NULL); + + if (!qcrypto_akcipher_supports(&opt)) { + return; + } + + if (!data->is_valid_key) { + g_assert(key == NULL); + return; + } + + g_assert(key != NULL); + g_assert(qcrypto_akcipher_max_ciphertext_len(key) == data->exp_key_len); + g_assert(qcrypto_akcipher_max_plaintext_len(key) == data->exp_key_len); + g_assert(qcrypto_akcipher_max_signature_len(key) == data->exp_key_len); + g_assert(qcrypto_akcipher_max_dgst_len(key) == data->exp_key_len); +} + +int main(int argc, char **argv) +{ + size_t i; + g_test_init(&argc, &argv, NULL); + g_assert(qcrypto_init(NULL) == 0); + + for (i = 0; i < G_N_ELEMENTS(akcipher_test_data); i++) { + g_test_add_data_func(akcipher_test_data[i].path, + &akcipher_test_data[i], + test_akcipher); + } + for (i = 0; i < G_N_ELEMENTS(rsakey_test_data); i++) { + g_test_add_data_func(rsakey_test_data[i].path, + &rsakey_test_data[i], + test_rsakey); + } + + return g_test_run(); +} diff --git a/tests/unit/test-crypto-block.c b/tests/unit/test-crypto-block.c index 3b1f0d509f..b629e240a9 100644 --- a/tests/unit/test-crypto-block.c +++ b/tests/unit/test-crypto-block.c @@ -22,6 +22,7 @@ #include "qapi/error.h" #include "crypto/init.h" #include "crypto/block.h" +#include "crypto/block-luks-priv.h" #include "qemu/buffer.h" #include "qemu/module.h" #include "crypto/secret.h" @@ -30,7 +31,8 @@ #endif #if (defined(_WIN32) || defined RUSAGE_THREAD) && \ - (defined(CONFIG_NETTLE) || defined(CONFIG_GCRYPT)) + (defined(CONFIG_NETTLE) || defined(CONFIG_GCRYPT) || \ + defined(CONFIG_GNUTLS_CRYPTO)) #define TEST_LUKS #else #undef TEST_LUKS @@ -188,12 +190,12 @@ static struct QCryptoBlockTestData { }; -static ssize_t test_block_read_func(QCryptoBlock *block, - size_t offset, - uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int test_block_read_func(QCryptoBlock *block, + size_t offset, + uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp) { Buffer *header = opaque; @@ -201,14 +203,14 @@ static ssize_t test_block_read_func(QCryptoBlock *block, memcpy(buf, header->buffer + offset, buflen); - return buflen; + return 0; } -static ssize_t test_block_init_func(QCryptoBlock *block, - size_t headerlen, - void *opaque, - Error **errp) +static int test_block_init_func(QCryptoBlock *block, + size_t headerlen, + void *opaque, + Error **errp) { Buffer *header = opaque; @@ -216,16 +218,16 @@ static ssize_t test_block_init_func(QCryptoBlock *block, buffer_reserve(header, headerlen); - return headerlen; + return 0; } -static ssize_t test_block_write_func(QCryptoBlock *block, - size_t offset, - const uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int test_block_write_func(QCryptoBlock *block, + size_t offset, + const uint8_t *buf, + size_t buflen, + void *opaque, + Error **errp) { Buffer *header = opaque; @@ -234,7 +236,7 @@ static ssize_t test_block_write_func(QCryptoBlock *block, memcpy(header->buffer + offset, buf, buflen); header->offset = offset + buflen; - return buflen; + return 0; } @@ -344,6 +346,230 @@ static void test_block(gconstpointer opaque) } +#ifdef TEST_LUKS +typedef const char *(*LuksHeaderDoBadStuff)(QCryptoBlockLUKSHeader *hdr); + +static void +test_luks_bad_header(gconstpointer data) +{ + LuksHeaderDoBadStuff badstuff = data; + QCryptoBlock *blk; + Buffer buf; + Object *sec = test_block_secret(); + QCryptoBlockLUKSHeader hdr; + Error *err = NULL; + const char *msg; + + memset(&buf, 0, sizeof(buf)); + buffer_init(&buf, "header"); + + /* Correctly create the volume initially */ + blk = qcrypto_block_create(&luks_create_opts_default, NULL, + test_block_init_func, + test_block_write_func, + &buf, + &error_abort); + g_assert(blk); + + qcrypto_block_free(blk); + + /* Mangle it in some unpleasant way */ + g_assert(buf.offset >= sizeof(hdr)); + memcpy(&hdr, buf.buffer, sizeof(hdr)); + qcrypto_block_luks_to_disk_endian(&hdr); + + msg = badstuff(&hdr); + + qcrypto_block_luks_from_disk_endian(&hdr); + memcpy(buf.buffer, &hdr, sizeof(hdr)); + + /* Check that we fail to open it again */ + blk = qcrypto_block_open(&luks_open_opts, NULL, + test_block_read_func, + &buf, + 0, + 1, + &err); + g_assert(!blk); + g_assert(err); + + g_assert_cmpstr(error_get_pretty(err), ==, msg); + error_free(err); + + object_unparent(sec); + + buffer_free(&buf); +} + +static const char *luks_bad_null_term_cipher_name(QCryptoBlockLUKSHeader *hdr) +{ + /* Replace NUL termination with spaces */ + char *offset = hdr->cipher_name + strlen(hdr->cipher_name); + memset(offset, ' ', sizeof(hdr->cipher_name) - (offset - hdr->cipher_name)); + + return "LUKS header cipher name is not NUL terminated"; +} + +static const char *luks_bad_null_term_cipher_mode(QCryptoBlockLUKSHeader *hdr) +{ + /* Replace NUL termination with spaces */ + char *offset = hdr->cipher_mode + strlen(hdr->cipher_mode); + memset(offset, ' ', sizeof(hdr->cipher_mode) - (offset - hdr->cipher_mode)); + + return "LUKS header cipher mode is not NUL terminated"; +} + +static const char *luks_bad_null_term_hash_spec(QCryptoBlockLUKSHeader *hdr) +{ + /* Replace NUL termination with spaces */ + char *offset = hdr->hash_spec + strlen(hdr->hash_spec); + memset(offset, ' ', sizeof(hdr->hash_spec) - (offset - hdr->hash_spec)); + + return "LUKS header hash spec is not NUL terminated"; +} + +static const char *luks_bad_cipher_name_empty(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_name, "", 1); + + return "Algorithm '' with key size 32 bytes not supported"; +} + +static const char *luks_bad_cipher_name_unknown(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_name, "aess", 5); + + return "Algorithm 'aess' with key size 32 bytes not supported"; +} + +static const char *luks_bad_cipher_xts_size(QCryptoBlockLUKSHeader *hdr) +{ + hdr->master_key_len = 33; + + return "XTS cipher key length should be a multiple of 2"; +} + +static const char *luks_bad_cipher_cbc_size(QCryptoBlockLUKSHeader *hdr) +{ + hdr->master_key_len = 33; + memcpy(hdr->cipher_mode, "cbc-essiv", 10); + + return "Algorithm 'aes' with key size 33 bytes not supported"; +} + +static const char *luks_bad_cipher_mode_empty(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "", 1); + + return "Unexpected cipher mode string format ''"; +} + +static const char *luks_bad_cipher_mode_unknown(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xfs", 4); + + return "Unexpected cipher mode string format 'xfs'"; +} + +static const char *luks_bad_ivgen_separator(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xts:plain64", 12); + + return "Unexpected cipher mode string format 'xts:plain64'"; +} + +static const char *luks_bad_ivgen_name_empty(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xts-", 5); + + return "IV generator '' not supported"; +} + +static const char *luks_bad_ivgen_name_unknown(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xts-plain65", 12); + + return "IV generator 'plain65' not supported"; +} + +static const char *luks_bad_ivgen_hash_empty(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xts-plain65:", 13); + + return "Hash algorithm '' not supported"; +} + +static const char *luks_bad_ivgen_hash_unknown(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->cipher_mode, "xts-plain65:sha257", 19); + + return "Hash algorithm 'sha257' not supported"; +} + +static const char *luks_bad_hash_spec_empty(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->hash_spec, "", 1); + + return "Hash algorithm '' not supported"; +} + +static const char *luks_bad_hash_spec_unknown(QCryptoBlockLUKSHeader *hdr) +{ + memcpy(hdr->hash_spec, "sha2566", 8); + + return "Hash algorithm 'sha2566' not supported"; +} + +static const char *luks_bad_stripes(QCryptoBlockLUKSHeader *hdr) +{ + hdr->key_slots[0].stripes = 3999; + + return "Keyslot 0 is corrupted (stripes 3999 != 4000)"; +} + +static const char *luks_bad_key_overlap_header(QCryptoBlockLUKSHeader *hdr) +{ + hdr->key_slots[0].key_offset_sector = 2; + + return "Keyslot 0 is overlapping with the LUKS header"; +} + +static const char *luks_bad_key_overlap_key(QCryptoBlockLUKSHeader *hdr) +{ + hdr->key_slots[0].key_offset_sector = hdr->key_slots[1].key_offset_sector; + + return "Keyslots 0 and 1 are overlapping in the header"; +} + +static const char *luks_bad_key_overlap_payload(QCryptoBlockLUKSHeader *hdr) +{ + hdr->key_slots[0].key_offset_sector = hdr->payload_offset_sector + 42; + + return "Keyslot 0 is overlapping with the encrypted payload"; +} + +static const char *luks_bad_payload_overlap_header(QCryptoBlockLUKSHeader *hdr) +{ + hdr->payload_offset_sector = 2; + + return "LUKS payload is overlapping with the header"; +} + +static const char *luks_bad_key_iterations(QCryptoBlockLUKSHeader *hdr) +{ + hdr->key_slots[0].iterations = 0; + + return "Keyslot 0 iteration count is zero"; +} + +static const char *luks_bad_iterations(QCryptoBlockLUKSHeader *hdr) +{ + hdr->master_key_iterations = 0; + + return "LUKS key iteration count is zero"; +} +#endif + int main(int argc, char **argv) { gsize i; @@ -364,5 +590,79 @@ int main(int argc, char **argv) } } +#ifdef TEST_LUKS + if (g_test_slow()) { + g_test_add_data_func("/crypto/block/luks/bad/cipher-name-nul-term", + luks_bad_null_term_cipher_name, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-mode-nul-term", + luks_bad_null_term_cipher_mode, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/hash-spec-nul-term", + luks_bad_null_term_hash_spec, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-name-empty", + luks_bad_cipher_name_empty, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-name-unknown", + luks_bad_cipher_name_unknown, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-xts-size", + luks_bad_cipher_xts_size, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-cbc-size", + luks_bad_cipher_cbc_size, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-mode-empty", + luks_bad_cipher_mode_empty, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/cipher-mode-unknown", + luks_bad_cipher_mode_unknown, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/ivgen-separator", + luks_bad_ivgen_separator, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/ivgen-name-empty", + luks_bad_ivgen_name_empty, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/ivgen-name-unknown", + luks_bad_ivgen_name_unknown, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/ivgen-hash-empty", + luks_bad_ivgen_hash_empty, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/ivgen-hash-unknown", + luks_bad_ivgen_hash_unknown, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/hash-spec-empty", + luks_bad_hash_spec_empty, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/hash-spec-unknown", + luks_bad_hash_spec_unknown, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/stripes", + luks_bad_stripes, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/key-overlap-header", + luks_bad_key_overlap_header, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/key-overlap-key", + luks_bad_key_overlap_key, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/key-overlap-payload", + luks_bad_key_overlap_payload, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/payload-overlap-header", + luks_bad_payload_overlap_header, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/iterations", + luks_bad_iterations, + test_luks_bad_header); + g_test_add_data_func("/crypto/block/luks/bad/key-iterations", + luks_bad_key_iterations, + test_luks_bad_header); + } +#endif + return g_test_run(); } diff --git a/tests/unit/test-crypto-der.c b/tests/unit/test-crypto-der.c new file mode 100644 index 0000000000..d218a7f170 --- /dev/null +++ b/tests/unit/test-crypto-der.c @@ -0,0 +1,372 @@ +/* + * QEMU Crypto akcipher algorithms + * + * Copyright (c) 2022 Bytedance + * Author: lei he + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "crypto/der.h" + +/* rsa(512) private key, generated by openssl */ +static const uint8_t test_rsa512_priv_key[] = + "\x30\x82\x01\x39" /* SEQUENCE, offset: 0, length: 313 */ + "\x02\x01\x00" /* INTEGER, offset: 4, length: 1 */ + "\x02\x41" /* INTEGER, offset: 7, length: 65 */ + "\x00\xb9\xe1\x22\xdb\x56\x2f\xb6\xf7\xf0\x0a\x87\x43\x07\x12\xdb" + "\x6d\xb6\x2b\x41\x8d\x2c\x3c\xa5\xdd\x78\x9a\x8f\xab\x8e\xf2\x4a" + "\xc8\x34\x0c\x12\x4f\x11\x90\xc6\xc2\xa5\xd0\xcd\xfb\xfc\x2c\x95" + "\x56\x82\xdf\x39\xf3\x3b\x1d\x62\x26\x97\xb7\x93\x25\xc7\xec\x7e" + "\xf7" + "\x02\x03\x01\x00\x01" /* INTEGER, offset: 74, length: 3 */ + "\x02\x40" /* INTEGER, offset: 79, length: 64 */ + "\x1e\x80\xfe\xda\x65\xdb\x70\xb8\x61\x91\x28\xbf\x6c\x32\xc1\x05" + "\xd1\x26\x6a\x1c\x83\xcc\xf4\x1f\x53\x42\x72\x1f\x62\x57\x0a\xc4" + "\x66\x76\x30\x87\xb9\xb1\xb9\x6a\x63\xfd\x8f\x3e\xfc\x35\x3f\xd6" + "\x2e\x6c\xc8\x70\x8a\x17\xc1\x28\x6a\xfe\x51\x56\xb3\x92\x6f\x09" + "\x02\x21" /* INTEGER, offset: 145, length: 33 */ + "\x00\xe3\x2e\x2d\x8d\xba\x1c\x34\x4c\x49\x9f\xc1\xa6\xdd\xd7\x13" + "\x8d\x05\x48\xdd\xff\x5c\x30\xbc\x6b\xc4\x18\x9d\xfc\xa2\xd0\x9b" + "\x4d" + "\x02\x21" /* INTEGER, offset: 180, length: 33 */ + "\x00\xd1\x75\xaf\x4b\xc6\x1a\xb0\x98\x14\x42\xae\x33\xf3\x44\xde" + "\x21\xcb\x04\xda\xfb\x1e\x35\x92\xcd\x69\xc0\x83\x06\x83\x8e\x39" + "\x53" + "\x02\x20" /* INTEGER, offset: 215, length: 32 */ + "\x68\x8d\x2a\xf7\xcb\xcc\x09\x21\x86\xcc\x98\x21\xc4\x7c\xa4\x09" + "\xc5\x81\xd8\x71\x1a\x2b\x6f\xbb\xa4\xde\xb3\x6e\xbe\x3b\x85\x0d" + "\x02\x20" /* INTEGER, offset: 249, length: 32 */ + "\x64\x06\x0e\xef\xe0\x6a\x5e\x6a\x41\x42\x96\x6d\xb8\x7d\xea\x95" + "\xb8\x9d\x58\xf5\x12\x38\x03\x22\x94\x9d\x99\xf4\x42\x5e\x68\x81" + "\x02\x20" /* INTEGER, offset: 283, length: 32 */ + "\x7f\x1d\x87\xe8\x55\x30\x75\xc7\x29\xec\xc9\x65\x76\x5a\x6a\xa3" + "\x4a\x6e\xe1\x26\x65\xd1\x76\xd5\xb9\xd1\x8b\xa8\x73\xe2\x6a\x9e"; + +static const uint8_t test_rsa2048_priv_key[] = + "\x30\x82\x04\xa6" /* SEQUENCE, offset: 0, length 1190 */ + "\x02\x01\x00" /* INTEGER, offset: 4, length: 1 */ + "\x02\x82\x01\x01" /* INTEGER, offset: 7, length: 257 */ + "\x00\xd1\x48\xc2\xc1\x1d\x4f\x94\xf2\xbb\x9b\xe2\x2d\xe1\xea\x4c" + "\xce\x41\x72\xe3\x41\x7e\x9d\x91\x85\xa3\x4e\xe1\x2c\xf6\x52\x6d" + "\xf9\x84\x64\xdf\x87\x28\x4a\xc9\x9d\x78\x93\x47\xc8\xd9\x66\x2e" + "\xf4\xc6\xf0\x32\x15\x1a\xe8\xaf\x5a\xca\x3a\xd3\x3e\xf6\xde\x86" + "\xdd\x9b\xa6\x4d\x74\x58\xf0\x11\x7f\x66\xd5\x1c\xd8\xde\xa3\xf8" + "\xa3\xfc\x33\x55\x89\xa9\xc3\xea\x5b\x2e\x31\x06\xf8\xcb\x9e\x6e" + "\xb2\x68\x0d\xe6\xc3\x5c\x2d\xf8\xa2\xbd\x00\x1a\xf6\xb6\xdd\x14" + "\x8d\x11\x6d\x2d\xc6\x0c\x09\xe6\xf6\xb9\x8b\x87\x4c\x9f\x4d\x63" + "\xd3\x94\xf4\x32\xca\xcf\x5e\xbf\xe2\x7f\x73\x5a\x65\xec\x82\x0d" + "\x7f\x30\x25\x03\xd4\x3a\xff\xa2\xe8\xd6\xb5\x1f\x4f\x36\x64\x61" + "\xc3\x5f\xb2\x9e\x0c\x53\x04\x19\x34\x99\xe8\xe3\xe6\xd3\x2f\x45" + "\x58\x8e\x5d\x54\x5a\xa0\xc0\x5e\x51\x9b\x22\x15\xec\x26\x6f\x72" + "\x68\xe9\xbf\x5d\x1d\xb5\xd9\xe4\x81\x1a\x92\x66\xa8\xcb\x73\x46" + "\xab\x96\x7b\xf8\x9c\xf5\xb5\x9e\x2b\x13\x71\xe0\x01\x0c\x59\x1b" + "\x63\x9f\xb7\xd1\xcd\x47\x8e\xc7\x3a\xbe\xcb\x47\xa7\x23\x43\xa7" + "\x7d\xbd\x2c\x4e\x22\x37\xcc\xf9\x1b\x1b\xbb\xed\xec\xf0\x47\x92" + "\x43" + "\x02\x03\x01\x00\x01" /* INTEGER, offset 268, length 3 */ + "\x02\x82\x01\x01" /* INTEGER, offset 273, length 257 */ + "\x00\x8d\x21\x97\x0c\x29\x9a\xf8\x23\xf4\x76\x3b\xc1\x9b\x3e\xa8" + "\x8a\xd2\xc2\x0a\x14\xa9\xb0\xd2\x68\x9f\x67\x5b\x1c\x3a\x03\xfe" + "\x5b\xac\x77\x65\xf1\xbc\x2f\x2a\xe5\x01\x61\xb8\x9f\xee\x53\x25" + "\x49\x36\x3a\xd6\x5b\x3b\x29\x3c\xcf\x69\xde\xdf\x83\xef\x70\xc2" + "\xdc\x00\xd1\xd6\x1b\xa6\xba\x45\xe2\x77\x53\x31\xbf\xe1\xec\x0b" + "\x89\x72\x52\x9f\xd5\x54\xe1\x64\x52\x16\xc5\x43\x21\x56\x16\xc2" + "\x29\x97\x58\x00\x8d\x2f\xc5\x64\x8d\x42\x0d\x27\x21\xc6\xd1\x31" + "\xc1\xab\xc5\xc7\x7f\x6d\xb0\xe3\xca\xef\xf6\xf2\xc7\xae\x09\xbf" + "\x4d\xc0\x4e\x90\x2c\x28\xb9\xcc\x22\x74\xf2\xd5\xff\x4d\x86\xf6" + "\xec\x45\x1f\xbf\x25\x4c\x30\x26\x76\x4f\x09\x13\x83\xef\x35\x73" + "\xa3\xa2\xb1\x40\xcf\x07\x7a\x83\xae\xea\x00\xea\x74\xc7\x54\x6a" + "\x88\x19\xed\x35\xd3\x7e\x5e\xac\x51\xc1\x1e\x5e\x2c\x57\x72\x20" + "\x10\x6a\x0c\x47\xe1\xf0\x36\x70\xd2\xa7\x57\x64\x47\x46\x9f\xca" + "\x23\x8a\x48\x50\x1d\x33\x6a\x86\x46\x69\xed\x54\x65\x6b\x9e\xab" + "\x1f\x84\x87\xf4\x92\x8a\x6c\x44\x20\xaa\x8d\xd8\x50\xde\x45\x74" + "\xe0\xa8\xc7\xb9\x38\x74\x24\x51\x33\xf0\x39\x54\x6c\x11\xae\xc2" + "\x29" + "\x02\x81\x81" /* INTEGER, offset 534, length 129 */ + "\x00\xe8\x26\xd1\xf9\xa0\xd3\x0e\x3f\x2f\x89\x9b\x94\x16\x12\xd1" + "\xae\x3c\x53\x9c\xcf\xc6\xf7\x03\xf5\xdf\x39\xdc\x25\x5d\xcb\xb8" + "\xb9\x74\x3e\x3b\x36\xf6\xa0\x8d\xb1\x0e\xd8\xfe\x8c\xcd\x01\x13" + "\x77\x73\x08\x0f\x32\xbd\xe6\x95\xdc\xd0\x14\x7d\x44\xdc\x3e\xd9" + "\xaa\x8a\x32\xe6\x0e\x76\xb6\x05\xc5\x6b\x87\x78\x9a\x32\xe2\xf8" + "\x78\xba\x58\x75\x58\xd5\x26\x9d\x9a\x0f\xb6\xca\xb5\x27\xd8\x58" + "\xae\x3f\x49\x54\xd2\x2b\xac\x28\x39\x88\x31\x42\x12\x08\xea\x0b" + "\x39\x58\xae\xf3\x82\xa0\xe2\x75\x7c\x96\xa9\xb8\x57\x29\x6d\xd7" + "\x37" + "\x02\x81\x81" /* INTEGER, offset 666, length 129 */ + "\x00\xe6\xc8\x91\x50\x49\x97\x56\x70\x6e\x25\xf5\x77\x25\xa5\x41" + "\xfe\xd7\x25\x1b\xc1\x4a\xff\x37\x44\x2b\x46\xa0\xdf\xe8\x02\x09" + "\xdd\xa8\x41\xa1\x12\x84\x3c\xf8\xc2\x13\x3e\xb8\x4b\x22\x01\xac" + "\xa6\x09\xb2\xe9\xcd\xc8\x51\xee\xde\xa3\x1e\x6b\xfe\xb1\xf8\xb6" + "\x9e\x48\x36\x62\x0b\x05\xfa\x38\xc1\x06\x04\x58\x95\x4d\x25\x13" + "\x6d\x0b\x12\x0b\xc9\x6d\x59\xfc\x33\x03\x36\x01\x12\x09\x72\x74" + "\x5e\x98\x65\x66\x2f\x3a\xde\xd8\xd4\xee\x6f\x82\xe6\x36\x49\x12" + "\x6a\x94\x28\xe9\x28\x9e\xef\x29\xdc\xdf\xab\x94\x65\x02\x4e\x4b" + "\x55" + "\x02\x81\x81" /* INTEGER, offset 798, length 129 */ + "\x00\xc9\xda\xb7\x48\x6e\x66\x15\x45\x2b\x78\x63\x26\x67\xeb\x05" + "\x16\x92\xad\xc0\xf3\x88\xf4\xcf\x24\xc2\x6b\xf4\xd7\x28\xaf\x32" + "\x77\x4e\x73\xad\xd9\x24\xa8\x85\x8b\x26\x75\xd7\x1f\x66\x41\x41" + "\x43\xe3\x69\x66\x8d\xa0\x41\x16\x9d\x60\xef\xef\xdc\x28\x05\x1e" + "\x0e\x03\x0c\x2e\xac\xf4\xdb\x60\x39\x40\x3e\x12\xc7\x40\xe7\xc9" + "\x54\x6f\xf2\xea\x55\xcb\x40\x40\x58\xec\xc0\xeb\x90\x88\x8c\xbc" + "\xcf\x05\x88\x25\x90\x79\x18\xc0\x01\x06\x42\x8e\x48\x50\x27\xf0" + "\x8a\x74\x69\xea\xa1\xf2\x71\xf5\xe5\xd6\xba\xcb\xe6\x3d\xc7\x9c" + "\x11" + "\x02\x81\x81" /* INTEGER, offset 930, length 129 */ + "\x00\xc9\xf5\x04\xad\x34\xe9\x39\xdc\x83\x97\xb6\x3a\x40\xf8\x60" + "\x4b\x69\xec\xf0\x5f\xf3\x88\x69\xcd\xbe\xed\x3c\xc5\x14\x5c\x0c" + "\x54\x2b\xf4\xda\xc6\xc0\x70\x36\xe4\x67\x41\x00\xb7\xc7\x17\x9e" + "\x05\x63\x01\x6d\x77\x06\x71\x24\xcf\x32\x01\xe2\x51\xed\x5e\x90" + "\x38\xed\x4a\xa1\xfb\xb1\x8c\x69\xf4\x08\x96\xef\x0a\x20\x8b\x6c" + "\x77\x85\x33\x92\x9a\xff\x95\xba\x8c\xcd\xa7\x89\xc2\x46\x00\x21" + "\xf3\xd1\xfb\x12\x34\x0c\x99\x8d\x38\xb1\x3b\x66\x5a\x9d\x70\xce" + "\xab\xf3\xe1\xe5\x40\x05\xed\x97\x3d\xd1\x82\x6e\x07\x02\xc0\x8f" + "\x4d" + "\x02\x81\x81" /* INTEGER, offset 1062, length 129 */ + "\x00\xe4\x96\x79\xa8\x6a\x70\xdd\x67\x42\xff\x15\x11\x9e\x01\x71" + "\xac\xf1\x70\x7d\x87\xe2\x6e\x0c\x4d\xbb\x21\x15\xbb\xa7\x4e\x0c" + "\x09\x7e\x82\xca\x91\xbe\xd0\xdd\x9c\x8c\xb0\x77\x64\x30\x1b\x7e" + "\xbb\x69\xcb\x4c\xde\xd6\x6a\xb9\x72\x15\x79\xdc\x05\x99\x69\x8b" + "\x24\xa1\xad\x13\x35\x31\xc0\x0b\xf1\xd2\x06\x7c\x94\x1a\x21\x2f" + "\x02\xb9\xf0\xd0\xbb\xf7\xb7\x78\xf9\x3d\x76\x60\xd6\x6b\x5f\x35" + "\x88\x14\x33\xe6\xbc\xca\x6b\x88\x90\x57\x3b\x0c\xa3\x6e\x47\xdf" + "\x4e\x2f\x4c\xf9\xab\x97\x38\xe4\x20\x32\x32\x96\xc8\x9e\x79\xd3" + "\x12"; + +static const uint8_t test_ecdsa_p192_priv_key[] = + "\x30\x53" /* SEQUENCE, offset 0, length 83 */ + "\x02\x01\x01" /* INTEGER, offset 2, length 1 */ + "\x04\x18" /* OCTET STRING, offset 5, length 24 */ + "\xcb\xc8\x86\x0e\x66\x3c\xf7\x5a\x44\x13\xb8\xef\xea\x1d\x7b\xa6" + "\x1c\xda\xf4\x1b\xc7\x67\x6b\x35" + "\xa1\x34" /* CONTEXT SPECIFIC 1, offset 31, length 52 */ + "\x03\x32" /* BIT STRING, offset 33, length 50 */ + "\x00\x04\xc4\x16\xb3\xff\xac\xd5\x87\x98\xf7\xd9\x45\xfe\xd3\x5c" + "\x17\x9d\xb2\x36\x22\xcc\x07\xb3\x6d\x3c\x4e\x04\x5f\xeb\xb6\x52" + "\x58\xfb\x36\x10\x52\xb7\x01\x62\x0e\x94\x51\x1d\xe2\xef\x10\x82" + "\x88\x78"; + +static const uint8_t test_ecdsa_p256_priv_key[] = + "\x30\x77" /* SEQUENCE, offset 0, length 119 */ + "\x02\x01\x01" /* INTEGER, offset 2, length 1 */ + "\x04\x20" /* OCTET STRING, offset 5, length 32 */ + "\xf6\x92\xdd\x29\x1c\x6e\xef\xb6\xb2\x73\x9f\x40\x1b\xb3\x2a\x28" + "\xd2\x37\xd6\x4a\x5b\xe4\x40\x4c\x6a\x95\x99\xfa\xf7\x92\x49\xbe" + "\xa0\x0a" /* CONTEXT SPECIFIC 0, offset 39, length 10 */ + "\x06\x08" /* OID, offset 41, length 8 */ + "\x2a\x86\x48\xce\x3d\x03\x01\x07" + "\xa1\x44" /* CONTEXT SPECIFIC 1, offset 51, length 68 */ + "\x03\x42" /* BIT STRING, offset 53, length 66 */ + "\x00\x04\xed\x42\x9c\x67\x79\xbe\x46\x83\x88\x3e\x8c\xc1\x33\xf3" + "\xc3\xf6\x2c\xf3\x13\x6a\x00\xc2\xc9\x3e\x87\x7f\x86\x39\xe6\xae" + "\xe3\xb9\xba\x2f\x58\x63\x32\x62\x62\x54\x07\x27\xf9\x5a\x3a\xc7" + "\x3a\x6b\x5b\xbc\x0d\x33\xba\xbb\xd4\xa3\xff\x4f\x9e\xdd\xf5\x59" + "\xc0\xf6"; + +#define MAX_CHECKER_COUNT 32 + +static int qcrypto_wrapped_decode_ctx_tag0(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *opaque, + Error **errp) +{ + return qcrypto_der_decode_ctx_tag(data, dlen, 0, cb, opaque, errp); +} + +static int qcrypto_wrapped_decode_ctx_tag1(const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *opaque, + Error **errp) +{ + return qcrypto_der_decode_ctx_tag(data, dlen, 1, cb, opaque, errp); +} + +typedef struct QCryptoAns1DecoderResultChecker QCryptoAns1DecoderResultChecker; +struct QCryptoAns1DecoderResultChecker { + int (*action) (const uint8_t **data, size_t *dlen, + QCryptoDERDecodeCb cb, void *opaque, Error **errp); + QCryptoDERDecodeCb cb; + bool constructed; + const uint8_t *exp_value; + size_t exp_vlen; +}; + +typedef struct QCryptoAns1DecoderTestData QCryptoAns1DecoderTestData; +struct QCryptoAns1DecoderTestData { + const char *path; + const uint8_t *test_data; + size_t test_data_len; + QCryptoAns1DecoderResultChecker checker[MAX_CHECKER_COUNT]; +}; + +typedef struct QCryptoAns1DecoderTestContext QCryptoAns1DecoderTestContext; +struct QCryptoAns1DecoderTestContext { + const uint8_t *data; + size_t dlen; +}; + +static int checker_callback(void *opaque, const uint8_t *value, + size_t vlen, Error **errp) +{ + QCryptoAns1DecoderResultChecker *checker = + (QCryptoAns1DecoderResultChecker *)opaque; + + g_assert(value == checker->exp_value); + g_assert(vlen == checker->exp_vlen); + return 0; +} + +static void test_ans1(const void *opaque) +{ + const QCryptoAns1DecoderTestData *test_data = + (QCryptoAns1DecoderTestData *)opaque; + QCryptoAns1DecoderTestContext ctx[MAX_CHECKER_COUNT]; + int seq_depth = 0, checker_idx = 0; + ctx[seq_depth].data = test_data->test_data; + ctx[seq_depth].dlen = test_data->test_data_len; + bool all_checker_completed = false; + + do { + const QCryptoAns1DecoderResultChecker *checker = + &test_data->checker[checker_idx++]; + QCryptoAns1DecoderTestContext *c = &ctx[seq_depth]; + if (!checker->action) { + all_checker_completed = true; + break; + } + g_assert(checker->action(&c->data, &c->dlen, checker_callback, + (void *)checker, &error_abort) + == checker->exp_vlen); + if (checker->constructed) { + ++seq_depth; + ctx[seq_depth].data = checker->exp_value; + ctx[seq_depth].dlen = checker->exp_vlen; + } + while (seq_depth != 0 && ctx[seq_depth].dlen == 0) { + --seq_depth; + } + + } while (true); + g_assert(seq_depth == 0); + g_assert(ctx[seq_depth].dlen == 0); + g_assert(all_checker_completed); +} + +static QCryptoAns1DecoderTestData test_data[] = { +{ + .path = "/crypto/der/parse-rsa512-priv-key", + .test_data = test_rsa512_priv_key, + .test_data_len = sizeof(test_rsa512_priv_key) - 1, + .checker = { + { qcrypto_der_decode_seq, checker_callback, true, + test_rsa512_priv_key + 4, 313 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 4 + 2, 1 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 7 + 2, 65 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 74 + 2, 3 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 79 + 2, 64 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 145 + 2, 33 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 180 + 2, 33 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 215 + 2, 32 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 249 + 2, 32 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa512_priv_key + 283 + 2, 32 }, + }, +}, +{ + .path = "/crypto/der/parse-rsa2048-priv-key", + .test_data = test_rsa2048_priv_key, + .test_data_len = sizeof(test_rsa2048_priv_key) - 1, + .checker = { + { qcrypto_der_decode_seq, checker_callback, true, + test_rsa2048_priv_key + 4, 1190 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 4 + 2, 1 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 7 + 4, 257 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 268 + 2, 3 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 273 + 4, 257 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 534 + 3, 129 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 666 + 3, 129 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 798 + 3, 129 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 930 + 3, 129 }, + { qcrypto_der_decode_int, checker_callback, false, + test_rsa2048_priv_key + 1062 + 3, 129 }, + }, +}, +{ + .path = "/crypto/der/parse-ecdsa-p192-priv-key", + .test_data = test_ecdsa_p192_priv_key, + .test_data_len = sizeof(test_ecdsa_p192_priv_key) - 1, + .checker = { + { qcrypto_der_decode_seq, checker_callback, true, + test_ecdsa_p192_priv_key + 2, 83 }, + { qcrypto_der_decode_int, checker_callback, false, + test_ecdsa_p192_priv_key + 2 + 2, 1 }, + { qcrypto_der_decode_octet_str, checker_callback, false, + test_ecdsa_p192_priv_key + 5 + 2, 24 }, + { qcrypto_wrapped_decode_ctx_tag1, checker_callback, true, + test_ecdsa_p192_priv_key + 31 + 2, 52 }, + { qcrypto_der_decode_bit_str , checker_callback, false, + test_ecdsa_p192_priv_key + 33 + 2, 50 }, + }, +}, +{ + .path = "/crypto/der/parse-ecdsa-p256-priv-key", + .test_data = test_ecdsa_p256_priv_key, + .test_data_len = sizeof(test_ecdsa_p256_priv_key) - 1, + .checker = { + { qcrypto_der_decode_seq, checker_callback, true, + test_ecdsa_p256_priv_key + 2, 119 }, + { qcrypto_der_decode_int, checker_callback, false, + test_ecdsa_p256_priv_key + 2 + 2, 1 }, + { qcrypto_der_decode_octet_str, checker_callback, false, + test_ecdsa_p256_priv_key + 5 + 2, 32 }, + { qcrypto_wrapped_decode_ctx_tag0, checker_callback, true, + test_ecdsa_p256_priv_key + 39 + 2, 10 }, + { qcrypto_der_decode_oid, checker_callback, false, + test_ecdsa_p256_priv_key + 41 + 2, 8 }, + { qcrypto_wrapped_decode_ctx_tag1, checker_callback, true, + test_ecdsa_p256_priv_key + 51 + 2, 68 }, + { qcrypto_der_decode_bit_str , checker_callback, false, + test_ecdsa_p256_priv_key + 53 + 2, 66 }, + }, +}, +}; + +int main(int argc, char **argv) +{ + size_t i; + g_test_init(&argc, &argv, NULL); + + for (i = 0; i < G_N_ELEMENTS(test_data); i++) { + g_test_add_data_func(test_data[i].path, &test_data[i], test_ans1); + } + + return g_test_run(); +} diff --git a/tests/unit/test-crypto-tlscredsx509.c b/tests/unit/test-crypto-tlscredsx509.c index aab4149b56..3c25d75ca1 100644 --- a/tests/unit/test-crypto-tlscredsx509.c +++ b/tests/unit/test-crypto-tlscredsx509.c @@ -75,7 +75,7 @@ static void test_tls_creds(const void *opaque) QCryptoTLSCreds *creds; #define CERT_DIR "tests/test-crypto-tlscredsx509-certs/" - mkdir(CERT_DIR, 0700); + g_mkdir_with_parents(CERT_DIR, 0700); unlink(CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); if (data->isServer) { @@ -141,7 +141,7 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); g_setenv("GNUTLS_FORCE_FIPS_MODE", "2", 1); - mkdir(WORKDIR, 0700); + g_mkdir_with_parents(WORKDIR, 0700); test_tls_init(KEYFILE); diff --git a/tests/unit/test-crypto-tlssession.c b/tests/unit/test-crypto-tlssession.c index 5f0da9192c..615a1344b4 100644 --- a/tests/unit/test-crypto-tlssession.c +++ b/tests/unit/test-crypto-tlssession.c @@ -90,8 +90,8 @@ static void test_crypto_tls_session_psk(void) * thread, so we need these non-blocking to avoid deadlock * of ourselves */ - qemu_set_nonblock(channel[0]); - qemu_set_nonblock(channel[1]); + qemu_socket_set_nonblock(channel[0]); + qemu_socket_set_nonblock(channel[1]); clientCreds = test_tls_creds_psk_create( QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, @@ -244,13 +244,13 @@ static void test_crypto_tls_session_x509(const void *opaque) * thread, so we need these non-blocking to avoid deadlock * of ourselves */ - qemu_set_nonblock(channel[0]); - qemu_set_nonblock(channel[1]); + qemu_socket_set_nonblock(channel[0]); + qemu_socket_set_nonblock(channel[1]); #define CLIENT_CERT_DIR "tests/test-crypto-tlssession-client/" #define SERVER_CERT_DIR "tests/test-crypto-tlssession-server/" - mkdir(CLIENT_CERT_DIR, 0700); - mkdir(SERVER_CERT_DIR, 0700); + g_mkdir_with_parents(CLIENT_CERT_DIR, 0700); + g_mkdir_with_parents(SERVER_CERT_DIR, 0700); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_CERT); @@ -398,7 +398,7 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); g_setenv("GNUTLS_FORCE_FIPS_MODE", "2", 1); - mkdir(WORKDIR, 0700); + g_mkdir_with_parents(WORKDIR, 0700); test_tls_init(KEYFILE); test_tls_psk_init(PSKFILE); @@ -512,12 +512,19 @@ int main(int argc, char **argv) false, true, "wiki.qemu.org", NULL); TEST_SESS_REG(altname4, cacertreq.filename, + servercertalt1req.filename, clientcertreq.filename, + false, false, "192.168.122.1", NULL); + TEST_SESS_REG(altname5, cacertreq.filename, + servercertalt1req.filename, clientcertreq.filename, + false, false, "fec0::dead:beaf", NULL); + + TEST_SESS_REG(altname6, cacertreq.filename, servercertalt2req.filename, clientcertreq.filename, false, true, "qemu.org", NULL); - TEST_SESS_REG(altname5, cacertreq.filename, + TEST_SESS_REG(altname7, cacertreq.filename, servercertalt2req.filename, clientcertreq.filename, false, false, "www.qemu.org", NULL); - TEST_SESS_REG(altname6, cacertreq.filename, + TEST_SESS_REG(altname8, cacertreq.filename, servercertalt2req.filename, clientcertreq.filename, false, false, "wiki.qemu.org", NULL); diff --git a/tests/unit/test-cutils.c b/tests/unit/test-cutils.c index 98671f1ac3..86caddcf64 100644 --- a/tests/unit/test-cutils.c +++ b/tests/unit/test-cutils.c @@ -2450,6 +2450,76 @@ static void test_qemu_strtosz_metric(void) g_assert(endptr == str + 7); } +static void test_freq_to_str(void) +{ + char *str; + + str = freq_to_str(999); + g_assert_cmpstr(str, ==, "999 Hz"); + g_free(str); + + str = freq_to_str(1000); + g_assert_cmpstr(str, ==, "1 KHz"); + g_free(str); + + str = freq_to_str(1010); + g_assert_cmpstr(str, ==, "1.01 KHz"); + g_free(str); +} + +static void test_size_to_str(void) +{ + char *str; + + str = size_to_str(0); + g_assert_cmpstr(str, ==, "0 B"); + g_free(str); + + str = size_to_str(1); + g_assert_cmpstr(str, ==, "1 B"); + g_free(str); + + str = size_to_str(1016); + g_assert_cmpstr(str, ==, "0.992 KiB"); + g_free(str); + + str = size_to_str(1024); + g_assert_cmpstr(str, ==, "1 KiB"); + g_free(str); + + str = size_to_str(512ull << 20); + g_assert_cmpstr(str, ==, "512 MiB"); + g_free(str); +} + +static void test_iec_binary_prefix(void) +{ + g_assert_cmpstr(iec_binary_prefix(0), ==, ""); + g_assert_cmpstr(iec_binary_prefix(10), ==, "Ki"); + g_assert_cmpstr(iec_binary_prefix(20), ==, "Mi"); + g_assert_cmpstr(iec_binary_prefix(30), ==, "Gi"); + g_assert_cmpstr(iec_binary_prefix(40), ==, "Ti"); + g_assert_cmpstr(iec_binary_prefix(50), ==, "Pi"); + g_assert_cmpstr(iec_binary_prefix(60), ==, "Ei"); +} + +static void test_si_prefix(void) +{ + g_assert_cmpstr(si_prefix(-18), ==, "a"); + g_assert_cmpstr(si_prefix(-15), ==, "f"); + g_assert_cmpstr(si_prefix(-12), ==, "p"); + g_assert_cmpstr(si_prefix(-9), ==, "n"); + g_assert_cmpstr(si_prefix(-6), ==, "u"); + g_assert_cmpstr(si_prefix(-3), ==, "m"); + g_assert_cmpstr(si_prefix(0), ==, ""); + g_assert_cmpstr(si_prefix(3), ==, "K"); + g_assert_cmpstr(si_prefix(6), ==, "M"); + g_assert_cmpstr(si_prefix(9), ==, "G"); + g_assert_cmpstr(si_prefix(12), ==, "T"); + g_assert_cmpstr(si_prefix(15), ==, "P"); + g_assert_cmpstr(si_prefix(18), ==, "E"); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -2729,5 +2799,13 @@ int main(int argc, char **argv) g_test_add_func("/cutils/strtosz/metric", test_qemu_strtosz_metric); + g_test_add_func("/cutils/size_to_str", + test_size_to_str); + g_test_add_func("/cutils/freq_to_str", + test_freq_to_str); + g_test_add_func("/cutils/iec_binary_prefix", + test_iec_binary_prefix); + g_test_add_func("/cutils/si_prefix", + test_si_prefix); return g_test_run(); } diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c new file mode 100644 index 0000000000..0bc25fe4a8 --- /dev/null +++ b/tests/unit/test-div128.c @@ -0,0 +1,197 @@ +/* + * Test 128-bit division functions + * + * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" + +typedef struct { + uint64_t high; + uint64_t low; + uint64_t rhigh; + uint64_t rlow; + uint64_t divisor; + uint64_t remainder; +} test_data_unsigned; + +typedef struct { + int64_t high; + uint64_t low; + int64_t rhigh; + uint64_t rlow; + int64_t divisor; + int64_t remainder; +} test_data_signed; + +static const test_data_unsigned test_table_unsigned[] = { + /* Dividend fits in 64 bits */ + { 0x0000000000000000ULL, 0x0000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000000ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0x0000000000000003ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000002ULL, 0x0000000000000001ULL}, + { 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0xa000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000002ULL, + 0x4000000000000000ULL, 0x2000000000000000ULL}, + { 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x8000000000000000ULL, 0x0000000000000000ULL}, + + /* Dividend > 64 bits, with MSB 0 */ + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0000000000000001ULL, 0x000000000000000dULL, + 0x123456789abcdefeULL, 0x03456789abcdf03bULL}, + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0123456789abcdefULL, 0xeefedcba98765432ULL, + 0x0000000000000010ULL, 0x0000000000000001ULL}, + + /* Dividend > 64 bits, with MSB 1 */ + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL, + 0x0000000000000010ULL, 0x000000000000000fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL, + 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL}, + + /** + * Divisor == 64 bits, with MSB 1 + * and high 64 bits of dividend >= divisor + * (for testing normalization) + */ + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0xfddbb9977553310aULL, + 0x8000000000000001ULL, 0x78899aabbccddf05ULL}, + + /* Dividend > 64 bits, divisor almost as big */ + { 0x0000000000000001ULL, 0x23456789abcdef01ULL, + 0x0000000000000000ULL, 0x000000000000000fULL, + 0x123456789abcdefeULL, 0x123456789abcde1fULL}, +}; + +static const test_data_signed test_table_signed[] = { + /* Positive dividend, positive/negative divisors */ + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000001LL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x00000000005e30a7ULL, + 0x0000000000000002LL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, + 0xfffffffffffffffeLL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x0000000000178c29ULL, + 0x0000000000000008LL, 0x0000000000000006LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, + 0xfffffffffffffff8LL, 0x0000000000000006LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x000000000000550dULL, + 0x0000000000000237LL, 0x0000000000000183LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, + 0xfffffffffffffdc9LL, 0x0000000000000183LL}, + + /* Negative dividend, positive/negative divisors */ + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000001LL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, + 0x0000000000000002LL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x00000000005e30a7ULL, + 0xfffffffffffffffeLL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, + 0x0000000000000008LL, 0xfffffffffffffffaLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x0000000000178c29ULL, + 0xfffffffffffffff8LL, 0xfffffffffffffffaLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, + 0x0000000000000237LL, 0xfffffffffffffe7dLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x000000000000550dULL, + 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL}, +}; + +static void test_divu128(void) +{ + int i; + uint64_t rem; + test_data_unsigned tmp; + + for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) { + tmp = test_table_unsigned[i]; + + rem = divu128(&tmp.low, &tmp.high, tmp.divisor); + g_assert_cmpuint(tmp.low, ==, tmp.rlow); + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); + g_assert_cmpuint(rem, ==, tmp.remainder); + } +} + +static void test_divs128(void) +{ + int i; + int64_t rem; + test_data_signed tmp; + + for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) { + tmp = test_table_signed[i]; + + rem = divs128(&tmp.low, &tmp.high, tmp.divisor); + g_assert_cmpuint(tmp.low, ==, tmp.rlow); + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); + g_assert_cmpuint(rem, ==, tmp.remainder); + } +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + g_test_add_func("/host-utils/test_divu128", test_divu128); + g_test_add_func("/host-utils/test_divs128", test_divs128); + return g_test_run(); +} diff --git a/tests/unit/test-fdmon-epoll.c b/tests/unit/test-fdmon-epoll.c index 11fd8a2fa9..ef5a856d09 100644 --- a/tests/unit/test-fdmon-epoll.c +++ b/tests/unit/test-fdmon-epoll.c @@ -22,14 +22,14 @@ static void add_event_notifiers(EventNotifier *notifiers, size_t n) for (size_t i = 0; i < n; i++) { event_notifier_init(¬ifiers[i], false); aio_set_event_notifier(ctx, ¬ifiers[i], false, - dummy_fd_handler, NULL); + dummy_fd_handler, NULL, NULL); } } static void remove_event_notifiers(EventNotifier *notifiers, size_t n) { for (size_t i = 0; i < n; i++) { - aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL); + aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL, NULL); event_notifier_cleanup(¬ifiers[i]); } } diff --git a/tests/unit/test-forward-visitor.c b/tests/unit/test-forward-visitor.c index 348f7e4e81..eea8ffc072 100644 --- a/tests/unit/test-forward-visitor.c +++ b/tests/unit/test-forward-visitor.c @@ -9,14 +9,13 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/forward-visitor.h" #include "qapi/qobject-input-visitor.h" #include "qapi/error.h" #include "qapi/qmp/qobject.h" #include "qapi/qmp/qdict.h" #include "test-qapi-visit.h" -#include "qemu/option.h" +#include "qemu/keyval.h" typedef bool GenericVisitor (Visitor *, const char *, void **, Error **); #define CAST_VISIT_TYPE(fn) ((GenericVisitor *)(fn)) diff --git a/tests/unit/test-hbitmap.c b/tests/unit/test-hbitmap.c index b6726cf76b..a4fe067917 100644 --- a/tests/unit/test-hbitmap.c +++ b/tests/unit/test-hbitmap.c @@ -113,7 +113,7 @@ static void hbitmap_test_truncate_impl(TestHBitmapData *data, n = hbitmap_test_array_size(size); m = hbitmap_test_array_size(data->old_size); - data->bits = g_realloc(data->bits, sizeof(unsigned long) * n); + data->bits = g_renew(unsigned long, data->bits, n); if (n > m) { memset(&data->bits[m], 0x00, sizeof(unsigned long) * (n - m)); } diff --git a/tests/unit/test-image-locking.c b/tests/unit/test-image-locking.c index ba057bd66c..2624cec6a0 100644 --- a/tests/unit/test-image-locking.c +++ b/tests/unit/test-image-locking.c @@ -76,10 +76,10 @@ static void check_locked_bytes(int fd, uint64_t perm_locks, static void test_image_locking_basic(void) { BlockBackend *blk1, *blk2, *blk3; - char img_path[] = "/tmp/qtest.XXXXXX"; + g_autofree char *img_path = NULL; uint64_t perm, shared_perm; - int fd = mkstemp(img_path); + int fd = g_file_open_tmp("qemu-tst-img-lock.XXXXXX", &img_path, NULL); assert(fd >= 0); perm = BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ; @@ -117,10 +117,10 @@ static void test_image_locking_basic(void) static void test_set_perm_abort(void) { BlockBackend *blk1, *blk2; - char img_path[] = "/tmp/qtest.XXXXXX"; + g_autofree char *img_path = NULL; uint64_t perm, shared_perm; int r; - int fd = mkstemp(img_path); + int fd = g_file_open_tmp("qemu-tst-img-lock.XXXXXX", &img_path, NULL); assert(fd >= 0); perm = BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ; @@ -140,6 +140,8 @@ static void test_set_perm_abort(void) check_locked_bytes(fd, perm, ~shared_perm); blk_unref(blk1); blk_unref(blk2); + close(fd); + unlink(img_path); } int main(int argc, char **argv) diff --git a/tests/unit/test-int128.c b/tests/unit/test-int128.c index b86a3c76e6..25db2455e8 100644 --- a/tests/unit/test-int128.c +++ b/tests/unit/test-int128.c @@ -206,6 +206,55 @@ static void test_rshift(void) test_rshift_one(0xFFFE8000U, 0, 0xFFFFFFFFFFFFFFFEULL, 0x8000000000000000ULL); } +static void __attribute__((__noinline__)) ATTRIBUTE_NOCLONE +test_urshift_one(uint32_t x, int n, uint64_t h, uint64_t l) +{ + Int128 a = expand(x); + Int128 r = int128_urshift(a, n); + g_assert_cmpuint(int128_getlo(r), ==, l); + g_assert_cmpuint(int128_gethi(r), ==, h); +} + +static void test_urshift(void) +{ + test_urshift_one(0x00010000U, 64, 0x0000000000000000ULL, + 0x0000000000000001ULL); + test_urshift_one(0x80010000U, 64, 0x0000000000000000ULL, + 0x8000000000000001ULL); + test_urshift_one(0x7FFE0000U, 64, 0x0000000000000000ULL, + 0x7FFFFFFFFFFFFFFEULL); + test_urshift_one(0xFFFE0000U, 64, 0x0000000000000000ULL, + 0xFFFFFFFFFFFFFFFEULL); + test_urshift_one(0x00010000U, 60, 0x0000000000000000ULL, + 0x0000000000000010ULL); + test_urshift_one(0x80010000U, 60, 0x0000000000000008ULL, + 0x0000000000000010ULL); + test_urshift_one(0x00018000U, 60, 0x0000000000000000ULL, + 0x0000000000000018ULL); + test_urshift_one(0x80018000U, 60, 0x0000000000000008ULL, + 0x0000000000000018ULL); + test_urshift_one(0x7FFE0000U, 60, 0x0000000000000007ULL, + 0xFFFFFFFFFFFFFFE0ULL); + test_urshift_one(0xFFFE0000U, 60, 0x000000000000000FULL, + 0xFFFFFFFFFFFFFFE0ULL); + test_urshift_one(0x7FFE8000U, 60, 0x0000000000000007ULL, + 0xFFFFFFFFFFFFFFE8ULL); + test_urshift_one(0xFFFE8000U, 60, 0x000000000000000FULL, + 0xFFFFFFFFFFFFFFE8ULL); + test_urshift_one(0x00018000U, 0, 0x0000000000000001ULL, + 0x8000000000000000ULL); + test_urshift_one(0x80018000U, 0, 0x8000000000000001ULL, + 0x8000000000000000ULL); + test_urshift_one(0x7FFE0000U, 0, 0x7FFFFFFFFFFFFFFEULL, + 0x0000000000000000ULL); + test_urshift_one(0xFFFE0000U, 0, 0xFFFFFFFFFFFFFFFEULL, + 0x0000000000000000ULL); + test_urshift_one(0x7FFE8000U, 0, 0x7FFFFFFFFFFFFFFEULL, + 0x8000000000000000ULL); + test_urshift_one(0xFFFE8000U, 0, 0xFFFFFFFFFFFFFFFEULL, + 0x8000000000000000ULL); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -219,5 +268,6 @@ int main(int argc, char **argv) g_test_add_func("/int128/int128_ge", test_ge); g_test_add_func("/int128/int128_gt", test_gt); g_test_add_func("/int128/int128_rshift", test_rshift); + g_test_add_func("/int128/int128_urshift", test_urshift); return g_test_run(); } diff --git a/tests/unit/test-io-channel-command.c b/tests/unit/test-io-channel-command.c index 99056e07c0..19f72eab96 100644 --- a/tests/unit/test-io-channel-command.c +++ b/tests/unit/test-io-channel-command.c @@ -19,37 +19,34 @@ */ #include "qemu/osdep.h" +#include #include "io/channel-command.h" #include "io-channel-helpers.h" #include "qapi/error.h" #include "qemu/module.h" -#ifndef WIN32 +#define TEST_FIFO "test-io-channel-command.fifo" + +static char *socat = NULL; + static void test_io_channel_command_fifo(bool async) { -#define TEST_FIFO "tests/test-io-channel-command.fifo" + g_autofree gchar *tmpdir = g_dir_make_tmp("qemu-test-io-channel.XXXXXX", NULL); + g_autofree gchar *fifo = g_strdup_printf("%s/%s", tmpdir, TEST_FIFO); + g_autofree gchar *srcargs = g_strdup_printf("%s - PIPE:%s,wronly", socat, fifo); + g_autofree gchar *dstargs = g_strdup_printf("%s PIPE:%s,rdonly -", socat, fifo); + g_auto(GStrv) srcargv = g_strsplit(srcargs, " ", -1); + g_auto(GStrv) dstargv = g_strsplit(dstargs, " ", -1); QIOChannel *src, *dst; QIOChannelTest *test; - const char *srcfifo = "PIPE:" TEST_FIFO ",wronly"; - const char *dstfifo = "PIPE:" TEST_FIFO ",rdonly"; - const char *srcargv[] = { - "/bin/socat", "-", srcfifo, NULL, - }; - const char *dstargv[] = { - "/bin/socat", dstfifo, "-", NULL, - }; - unlink(TEST_FIFO); - if (access("/bin/socat", X_OK) < 0) { - return; /* Pretend success if socat is not present */ - } - if (mkfifo(TEST_FIFO, 0600) < 0) { - abort(); - } - src = QIO_CHANNEL(qio_channel_command_new_spawn(srcargv, + src = QIO_CHANNEL(qio_channel_command_new_spawn((const char **) srcargv, O_WRONLY, &error_abort)); - dst = QIO_CHANNEL(qio_channel_command_new_spawn(dstargv, + /* try to avoid a race to create the socket */ + g_usleep(1000); + + dst = QIO_CHANNEL(qio_channel_command_new_spawn((const char **) dstargv, O_RDONLY, &error_abort)); @@ -60,17 +57,27 @@ static void test_io_channel_command_fifo(bool async) object_unref(OBJECT(src)); object_unref(OBJECT(dst)); - unlink(TEST_FIFO); + g_rmdir(tmpdir); } static void test_io_channel_command_fifo_async(void) { + if (!socat) { + g_test_skip("socat is not found in PATH"); + return; + } + test_io_channel_command_fifo(true); } static void test_io_channel_command_fifo_sync(void) { + if (!socat) { + g_test_skip("socat is not found in PATH"); + return; + } + test_io_channel_command_fifo(false); } @@ -80,11 +87,12 @@ static void test_io_channel_command_echo(bool async) QIOChannel *ioc; QIOChannelTest *test; const char *socatargv[] = { - "/bin/socat", "-", "-", NULL, + socat, "-", "-", NULL, }; - if (access("/bin/socat", X_OK) < 0) { - return; /* Pretend success if socat is not present */ + if (!socat) { + g_test_skip("socat is not found in PATH"); + return; } ioc = QIO_CHANNEL(qio_channel_command_new_spawn(socatargv, @@ -107,7 +115,6 @@ static void test_io_channel_command_echo_sync(void) { test_io_channel_command_echo(false); } -#endif int main(int argc, char **argv) { @@ -115,7 +122,8 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); -#ifndef WIN32 + socat = g_find_program_in_path("socat"); + g_test_add_func("/io/channel/command/fifo/sync", test_io_channel_command_fifo_sync); g_test_add_func("/io/channel/command/fifo/async", @@ -124,7 +132,6 @@ int main(int argc, char **argv) test_io_channel_command_echo_sync); g_test_add_func("/io/channel/command/echo/async", test_io_channel_command_echo_async); -#endif return g_test_run(); } diff --git a/tests/unit/test-io-channel-file.c b/tests/unit/test-io-channel-file.c index 29038e67b6..1977006ce9 100644 --- a/tests/unit/test-io-channel-file.c +++ b/tests/unit/test-io-channel-file.c @@ -109,7 +109,7 @@ static void test_io_channel_pipe(bool async) QIOChannelTest *test; int fd[2]; - if (pipe(fd) < 0) { + if (!g_unix_open_pipe(fd, FD_CLOEXEC, NULL)) { perror("pipe"); abort(); } diff --git a/tests/unit/test-io-channel-null.c b/tests/unit/test-io-channel-null.c new file mode 100644 index 0000000000..b3aab17ccc --- /dev/null +++ b/tests/unit/test-io-channel-null.c @@ -0,0 +1,95 @@ +/* + * QEMU I/O channel null test + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "io/channel-null.h" +#include "qapi/error.h" + +static gboolean test_io_channel_watch(QIOChannel *ioc, + GIOCondition condition, + gpointer opaque) +{ + GIOCondition *gotcond = opaque; + *gotcond = condition; + return G_SOURCE_REMOVE; +} + +static void test_io_channel_null_io(void) +{ + g_autoptr(QIOChannelNull) null = qio_channel_null_new(); + char buf[1024]; + GIOCondition gotcond = 0; + Error *local_err = NULL; + + g_assert(qio_channel_write(QIO_CHANNEL(null), + "Hello World", 11, + &error_abort) == 11); + + g_assert(qio_channel_read(QIO_CHANNEL(null), + buf, sizeof(buf), + &error_abort) == 0); + + qio_channel_add_watch(QIO_CHANNEL(null), + G_IO_IN, + test_io_channel_watch, + &gotcond, + NULL); + + g_main_context_iteration(NULL, false); + + g_assert(gotcond == G_IO_IN); + + qio_channel_add_watch(QIO_CHANNEL(null), + G_IO_IN | G_IO_OUT, + test_io_channel_watch, + &gotcond, + NULL); + + g_main_context_iteration(NULL, false); + + g_assert(gotcond == (G_IO_IN | G_IO_OUT)); + + qio_channel_close(QIO_CHANNEL(null), &error_abort); + + g_assert(qio_channel_write(QIO_CHANNEL(null), + "Hello World", 11, + &local_err) == -1); + g_assert_nonnull(local_err); + + g_clear_pointer(&local_err, error_free); + + g_assert(qio_channel_read(QIO_CHANNEL(null), + buf, sizeof(buf), + &local_err) == -1); + g_assert_nonnull(local_err); + + g_clear_pointer(&local_err, error_free); +} + +int main(int argc, char **argv) +{ + module_call_init(MODULE_INIT_QOM); + + g_test_init(&argc, &argv, NULL); + + g_test_add_func("/io/channel/null/io", test_io_channel_null_io); + + return g_test_run(); +} diff --git a/tests/unit/test-io-channel-socket.c b/tests/unit/test-io-channel-socket.c index c49eec1f03..b36a5d972a 100644 --- a/tests/unit/test-io-channel-socket.c +++ b/tests/unit/test-io-channel-socket.c @@ -179,10 +179,12 @@ static void test_io_channel(bool async, test_io_channel_setup_async(listen_addr, connect_addr, &srv, &src, &dst); +#ifndef _WIN32 g_assert(!passFD || qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_FD_PASS)); g_assert(!passFD || qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_FD_PASS)); +#endif g_assert(qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_SHUTDOWN)); g_assert(qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_SHUTDOWN)); @@ -206,10 +208,12 @@ static void test_io_channel(bool async, test_io_channel_setup_async(listen_addr, connect_addr, &srv, &src, &dst); +#ifndef _WIN32 g_assert(!passFD || qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_FD_PASS)); g_assert(!passFD || qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_FD_PASS)); +#endif g_assert(qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_SHUTDOWN)); g_assert(qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_SHUTDOWN)); @@ -236,10 +240,12 @@ static void test_io_channel(bool async, test_io_channel_setup_sync(listen_addr, connect_addr, &srv, &src, &dst); +#ifndef _WIN32 g_assert(!passFD || qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_FD_PASS)); g_assert(!passFD || qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_FD_PASS)); +#endif g_assert(qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_SHUTDOWN)); g_assert(qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_SHUTDOWN)); @@ -263,10 +269,12 @@ static void test_io_channel(bool async, test_io_channel_setup_sync(listen_addr, connect_addr, &srv, &src, &dst); +#ifndef _WIN32 g_assert(!passFD || qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_FD_PASS)); g_assert(!passFD || qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_FD_PASS)); +#endif g_assert(qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_SHUTDOWN)); g_assert(qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_SHUTDOWN)); @@ -367,7 +375,6 @@ static void test_io_channel_ipv6_async(void) } -#ifndef _WIN32 static void test_io_channel_unix(bool async) { SocketAddress *listen_addr = g_new0(SocketAddress, 1); @@ -398,6 +405,7 @@ static void test_io_channel_unix_async(void) return test_io_channel_unix(true); } +#ifndef _WIN32 static void test_io_channel_unix_fd_pass(void) { SocketAddress *listen_addr = g_new0(SocketAddress, 1); @@ -444,6 +452,7 @@ static void test_io_channel_unix_fd_pass(void) G_N_ELEMENTS(iosend), fdsend, G_N_ELEMENTS(fdsend), + 0, &error_abort); qio_channel_readv_full(dst, @@ -490,6 +499,7 @@ static void test_io_channel_unix_fd_pass(void) } g_free(fdrecv); } +#endif /* _WIN32 */ static void test_io_channel_unix_listen_cleanup(void) { @@ -521,9 +531,6 @@ static void test_io_channel_unix_listen_cleanup(void) unlink(TEST_SOCKET); } -#endif /* _WIN32 */ - - static void test_io_channel_ipv4_fd(void) { QIOChannel *ioc; @@ -554,7 +561,7 @@ static void test_io_channel_ipv4_fd(void) int main(int argc, char **argv) { - bool has_ipv4, has_ipv6; + bool has_ipv4, has_ipv6, has_afunix; module_call_init(MODULE_INIT_QOM); qemu_init_main_loop(&error_abort); @@ -587,16 +594,19 @@ int main(int argc, char **argv) test_io_channel_ipv6_async); } + socket_check_afunix_support(&has_afunix); + if (has_afunix) { + g_test_add_func("/io/channel/socket/unix-sync", + test_io_channel_unix_sync); + g_test_add_func("/io/channel/socket/unix-async", + test_io_channel_unix_async); #ifndef _WIN32 - g_test_add_func("/io/channel/socket/unix-sync", - test_io_channel_unix_sync); - g_test_add_func("/io/channel/socket/unix-async", - test_io_channel_unix_async); - g_test_add_func("/io/channel/socket/unix-fd-pass", - test_io_channel_unix_fd_pass); - g_test_add_func("/io/channel/socket/unix-listen-cleanup", - test_io_channel_unix_listen_cleanup); -#endif /* _WIN32 */ + g_test_add_func("/io/channel/socket/unix-fd-pass", + test_io_channel_unix_fd_pass); +#endif + g_test_add_func("/io/channel/socket/unix-listen-cleanup", + test_io_channel_unix_listen_cleanup); + } end: return g_test_run(); diff --git a/tests/unit/test-io-channel-tls.c b/tests/unit/test-io-channel-tls.c index f6fb988c01..cc39247556 100644 --- a/tests/unit/test-io-channel-tls.c +++ b/tests/unit/test-io-channel-tls.c @@ -125,8 +125,8 @@ static void test_io_channel_tls(const void *opaque) #define CLIENT_CERT_DIR "tests/test-io-channel-tls-client/" #define SERVER_CERT_DIR "tests/test-io-channel-tls-server/" - mkdir(CLIENT_CERT_DIR, 0700); - mkdir(SERVER_CERT_DIR, 0700); + g_mkdir_with_parents(CLIENT_CERT_DIR, 0700); + g_mkdir_with_parents(SERVER_CERT_DIR, 0700); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_CERT); @@ -273,7 +273,7 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); g_setenv("GNUTLS_FORCE_FIPS_MODE", "2", 1); - mkdir(WORKDIR, 0700); + g_mkdir_with_parents(WORKDIR, 0700); test_tls_init(KEYFILE); diff --git a/tests/unit/test-iov.c b/tests/unit/test-iov.c index 5371066fb6..6f7623d310 100644 --- a/tests/unit/test-iov.c +++ b/tests/unit/test-iov.c @@ -1,5 +1,4 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/iov.h" #include "qemu/sockets.h" @@ -173,7 +172,7 @@ static void test_io(void) } iov_from_buf(iov, niov, 0, buf, sz); - siov = g_memdup(iov, sizeof(*iov) * niov); + siov = g_memdup2(iov, sizeof(*iov) * niov); if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) { perror("socketpair"); @@ -187,7 +186,7 @@ static void test_io(void) close(sv[0]); FD_SET(sv[1], &fds); - fcntl(sv[1], F_SETFL, O_RDWR|O_NONBLOCK); + g_unix_set_fd_nonblocking(sv[1], true, NULL); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[1], SOL_SOCKET, SO_SNDBUF, &r, sizeof(r)); @@ -221,7 +220,7 @@ static void test_io(void) close(sv[1]); FD_SET(sv[0], &fds); - fcntl(sv[0], F_SETFL, O_RDWR|O_NONBLOCK); + g_unix_set_fd_nonblocking(sv[0], true, NULL); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[0], SOL_SOCKET, SO_RCVBUF, &r, sizeof(r)); usleep(500000); @@ -350,7 +349,7 @@ static void test_discard_front_undo(void) /* Discard zero bytes */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, 0, &undo); @@ -361,7 +360,7 @@ static void test_discard_front_undo(void) /* Discard more bytes than vector size */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); @@ -373,7 +372,7 @@ static void test_discard_front_undo(void) /* Discard entire vector */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); @@ -385,7 +384,7 @@ static void test_discard_front_undo(void) /* Discard within first element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = g_test_rand_int_range(1, iov->iov_len); @@ -397,7 +396,7 @@ static void test_discard_front_undo(void) /* Discard entire first element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, iov->iov_len, &undo); @@ -408,7 +407,7 @@ static void test_discard_front_undo(void) /* Discard within second element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov->iov_len + g_test_rand_int_range(1, iov[1].iov_len); @@ -499,7 +498,7 @@ static void test_discard_back_undo(void) /* Discard zero bytes */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; iov_discard_back_undoable(iov, &iov_cnt_tmp, 0, &undo); iov_discard_undo(&undo); @@ -509,7 +508,7 @@ static void test_discard_back_undo(void) /* Discard more bytes than vector size */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); iov_discard_back_undoable(iov, &iov_cnt_tmp, size + 1, &undo); @@ -520,7 +519,7 @@ static void test_discard_back_undo(void) /* Discard entire vector */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -531,7 +530,7 @@ static void test_discard_back_undo(void) /* Discard within last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = g_test_rand_int_range(1, iov[iov_cnt - 1].iov_len); iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -542,7 +541,7 @@ static void test_discard_back_undo(void) /* Discard entire last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov[iov_cnt - 1].iov_len; iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -553,7 +552,7 @@ static void test_discard_back_undo(void) /* Discard within second-to-last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov[iov_cnt - 1].iov_len + g_test_rand_int_range(1, iov[iov_cnt - 2].iov_len); diff --git a/tests/unit/test-keyval.c b/tests/unit/test-keyval.c index af0581ae6c..4dc52c7a1a 100644 --- a/tests/unit/test-keyval.c +++ b/tests/unit/test-keyval.c @@ -19,7 +19,7 @@ #include "qapi/qobject-input-visitor.h" #include "test-qapi-visit.h" #include "qemu/cutils.h" -#include "qemu/option.h" +#include "qemu/keyval.h" static void test_keyval_parse(void) { diff --git a/tests/unit/test-logging.c b/tests/unit/test-logging.c index ccb819f193..66dbc82a56 100644 --- a/tests/unit/test-logging.c +++ b/tests/unit/test-logging.c @@ -27,9 +27,9 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/log.h" +#include "qemu/rcu.h" static void test_parse_range(void) { @@ -110,12 +110,10 @@ static void test_parse_path(gconstpointer data) static void test_logfile_write(gconstpointer data) { - QemuLogFile *logfile; - QemuLogFile *logfile2; + FILE *logfile0, *logfile1; gchar const *dir = data; - g_autofree gchar *file_path = NULL; + g_autofree gchar *file_path0 = NULL; g_autofree gchar *file_path1 = NULL; - FILE *orig_fd; /* * Before starting test, set log flags, to ensure the file gets @@ -123,30 +121,29 @@ static void test_logfile_write(gconstpointer data) * In cases where a logging backend other than log is used, * this is needed. */ - qemu_set_log(CPU_LOG_TB_OUT_ASM); - file_path = g_build_filename(dir, "qemu_test_log_write0.log", NULL); + qemu_set_log(CPU_LOG_TB_OUT_ASM, &error_abort); + file_path0 = g_build_filename(dir, "qemu_test_log_write0.log", NULL); file_path1 = g_build_filename(dir, "qemu_test_log_write1.log", NULL); /* * Test that even if an open file handle is changed, * our handle remains valid due to RCU. */ - qemu_set_log_filename(file_path, &error_abort); - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - orig_fd = logfile->fd; - g_assert(logfile && logfile->fd); - fprintf(logfile->fd, "%s 1st write to file\n", __func__); - fflush(logfile->fd); + qemu_set_log_filename(file_path0, &error_abort); + logfile0 = qemu_log_trylock(); + g_assert(logfile0); + fprintf(logfile0, "%s 1st write to file\n", __func__); + fflush(logfile0); /* Change the logfile and ensure that the handle is still valid. */ qemu_set_log_filename(file_path1, &error_abort); - logfile2 = qatomic_rcu_read(&qemu_logfile); - g_assert(logfile->fd == orig_fd); - g_assert(logfile2->fd != logfile->fd); - fprintf(logfile->fd, "%s 2nd write to file\n", __func__); - fflush(logfile->fd); - rcu_read_unlock(); + logfile1 = qemu_log_trylock(); + g_assert(logfile1); + g_assert(logfile0 != logfile1); + fprintf(logfile0, "%s 2nd write to file\n", __func__); + fflush(logfile0); + qemu_log_unlock(logfile0); + qemu_log_unlock(logfile1); } static void test_logfile_lock(gconstpointer data) @@ -163,7 +160,7 @@ static void test_logfile_lock(gconstpointer data) * our handle remains valid for use due to RCU. */ qemu_set_log_filename(file_path, &error_abort); - logfile = qemu_log_lock(); + logfile = qemu_log_trylock(); g_assert(logfile); fprintf(logfile, "%s 1st write to file\n", __func__); fflush(logfile); @@ -172,7 +169,7 @@ static void test_logfile_lock(gconstpointer data) * Initiate a close file and make sure our handle remains * valid since we still have the logfile lock. */ - qemu_log_close(); + qemu_set_log_filename_flags(NULL, 0, &error_abort); fprintf(logfile, "%s 2nd write to file\n", __func__); fflush(logfile); qemu_log_unlock(logfile); @@ -210,7 +207,7 @@ int main(int argc, char **argv) tmp_path, test_logfile_lock); rc = g_test_run(); - qemu_log_close(); + qemu_set_log_filename_flags(NULL, 0, &error_abort); drain_call_rcu(); rmdir_full(tmp_path); diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index 5cb140d1b5..b4e0a14573 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -4,7 +4,7 @@ #include #include -#include "../qtest/libqos/libqtest.h" +#include "../qtest/libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qlist.h" @@ -32,6 +32,7 @@ static int connect_qga(char *path) g_usleep(G_USEC_PER_SEC); } if (i++ == 10) { + close(s); return -1; } } while (ret == -1); @@ -52,12 +53,15 @@ fixture_setup(TestFixture *fixture, gconstpointer data, gchar **envp) { const gchar *extra_arg = data; GError *error = NULL; - gchar *cwd, *path, *cmd, **argv = NULL; + g_autofree char *cwd = NULL; + g_autofree char *path = NULL; + g_autofree char *cmd = NULL; + g_auto(GStrv) argv = NULL; fixture->loop = g_main_loop_new(NULL, FALSE); - fixture->test_dir = g_strdup("/tmp/qgatest.XXXXXX"); - g_assert_nonnull(mkdtemp(fixture->test_dir)); + fixture->test_dir = g_strdup_printf("%s/qgatest.XXXXXX", g_get_tmp_dir()); + g_assert_nonnull(g_mkdtemp(fixture->test_dir)); path = g_build_filename(fixture->test_dir, "sock", NULL); cwd = g_get_current_dir(); @@ -78,17 +82,12 @@ fixture_setup(TestFixture *fixture, gconstpointer data, gchar **envp) fixture->fd = connect_qga(path); g_assert_cmpint(fixture->fd, !=, -1); - - g_strfreev(argv); - g_free(cmd); - g_free(cwd); - g_free(path); } static void fixture_tear_down(TestFixture *fixture, gconstpointer data) { - gchar *tmp; + g_autofree char *tmp = NULL; kill(fixture->pid, SIGTERM); @@ -107,7 +106,6 @@ fixture_tear_down(TestFixture *fixture, gconstpointer data) tmp = g_build_filename(fixture->test_dir, "sock", NULL); g_unlink(tmp); - g_free(tmp); g_rmdir(fixture->test_dir); g_free(fixture->test_dir); @@ -122,7 +120,7 @@ static void qmp_assertion_message_error(const char *domain, QDict *dict) { const char *class, *desc; - char *s; + g_autofree char *s = NULL; QDict *error; error = qdict_get_qdict(dict, "error"); @@ -131,7 +129,6 @@ static void qmp_assertion_message_error(const char *domain, s = g_strdup_printf("assertion failed %s: %s %s", expr, class, desc); g_assertion_message(domain, file, line, func, s); - g_free(s); } #define qmp_assert_no_error(err) do { \ @@ -146,7 +143,7 @@ static void test_qga_sync_delimited(gconstpointer fix) const TestFixture *fixture = fix; guint32 v, r = g_test_rand_int(); unsigned char c; - QDict *ret; + g_autoptr(QDict) ret = NULL; qmp_fd_send_raw(fixture->fd, "\xff"); qmp_fd_send(fixture->fd, @@ -180,15 +177,13 @@ static void test_qga_sync_delimited(gconstpointer fix) v = qdict_get_int(ret, "return"); g_assert_cmpint(r, ==, v); - - qobject_unref(ret); } static void test_qga_sync(gconstpointer fix) { const TestFixture *fixture = fix; guint32 v, r = g_test_rand_int(); - QDict *ret; + g_autoptr(QDict) ret = NULL; /* * TODO guest-sync is inherently limited: we cannot distinguish @@ -210,33 +205,27 @@ static void test_qga_sync(gconstpointer fix) v = qdict_get_int(ret, "return"); g_assert_cmpint(r, ==, v); - - qobject_unref(ret); } static void test_qga_ping(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; ret = qmp_fd(fixture->fd, "{'execute': 'guest-ping'}"); g_assert_nonnull(ret); qmp_assert_no_error(ret); - - qobject_unref(ret); } static void test_qga_id(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; ret = qmp_fd(fixture->fd, "{'execute': 'guest-ping', 'id': 1}"); g_assert_nonnull(ret); qmp_assert_no_error(ret); g_assert_cmpint(qdict_get_int(ret, "id"), ==, 1); - - qobject_unref(ret); } static void test_qga_invalid_oob(gconstpointer fix) @@ -253,7 +242,8 @@ static void test_qga_invalid_oob(gconstpointer fix) static void test_qga_invalid_args(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *error; + g_autoptr(QDict) ret = NULL; + QDict *error; const gchar *class, *desc; ret = qmp_fd(fixture->fd, "{'execute': 'guest-ping', " @@ -266,14 +256,13 @@ static void test_qga_invalid_args(gconstpointer fix) g_assert_cmpstr(class, ==, "GenericError"); g_assert_cmpstr(desc, ==, "Parameter 'foo' is unexpected"); - - qobject_unref(ret); } static void test_qga_invalid_cmd(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *error; + g_autoptr(QDict) ret = NULL; + QDict *error; const gchar *class, *desc; ret = qmp_fd(fixture->fd, "{'execute': 'guest-invalid-cmd'}"); @@ -285,14 +274,13 @@ static void test_qga_invalid_cmd(gconstpointer fix) g_assert_cmpstr(class, ==, "CommandNotFound"); g_assert_cmpint(strlen(desc), >, 0); - - qobject_unref(ret); } static void test_qga_info(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + QDict *val; const gchar *version; ret = qmp_fd(fixture->fd, "{'execute': 'guest-info'}"); @@ -302,14 +290,12 @@ static void test_qga_info(gconstpointer fix) val = qdict_get_qdict(ret, "return"); version = qdict_get_try_str(val, "version"); g_assert_cmpstr(version, ==, QEMU_VERSION); - - qobject_unref(ret); } static void test_qga_get_vcpus(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; QList *list; const QListEntry *entry; @@ -322,14 +308,12 @@ static void test_qga_get_vcpus(gconstpointer fix) entry = qlist_first(list); g_assert(qdict_haskey(qobject_to(QDict, entry->value), "online")); g_assert(qdict_haskey(qobject_to(QDict, entry->value), "logical-id")); - - qobject_unref(ret); } static void test_qga_get_fsinfo(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; QList *list; const QListEntry *entry; @@ -346,14 +330,13 @@ static void test_qga_get_fsinfo(gconstpointer fix) g_assert(qdict_haskey(qobject_to(QDict, entry->value), "type")); g_assert(qdict_haskey(qobject_to(QDict, entry->value), "disk")); } - - qobject_unref(ret); } static void test_qga_get_memory_block_info(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + QDict *val; int64_t size; ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-memory-block-info'}"); @@ -366,14 +349,12 @@ static void test_qga_get_memory_block_info(gconstpointer fix) size = qdict_get_int(val, "size"); g_assert_cmpint(size, >, 0); } - - qobject_unref(ret); } static void test_qga_get_memory_blocks(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; QList *list; const QListEntry *entry; @@ -391,14 +372,12 @@ static void test_qga_get_memory_blocks(gconstpointer fix) g_assert(qdict_haskey(qobject_to(QDict, entry->value), "online")); } } - - qobject_unref(ret); } static void test_qga_network_get_interfaces(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; QList *list; const QListEntry *entry; @@ -410,8 +389,6 @@ static void test_qga_network_get_interfaces(gconstpointer fix) list = qdict_get_qlist(ret, "return"); entry = qlist_first(list); g_assert(qdict_haskey(qobject_to(QDict, entry->value), "name")); - - qobject_unref(ret); } static void test_qga_file_ops(gconstpointer fix) @@ -642,7 +619,7 @@ static void test_qga_file_write_read(gconstpointer fix) static void test_qga_get_time(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; int64_t time; ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-time'}"); @@ -651,11 +628,9 @@ static void test_qga_get_time(gconstpointer fix) time = qdict_get_int(ret, "return"); g_assert_cmpint(time, >, 0); - - qobject_unref(ret); } -static void test_qga_blacklist(gconstpointer data) +static void test_qga_blockedrpcs(gconstpointer data) { TestFixture fix; QDict *ret, *error; @@ -663,7 +638,7 @@ static void test_qga_blacklist(gconstpointer data) fixture_setup(&fix, "-b guest-ping,guest-get-time", NULL); - /* check blacklist */ + /* check blocked RPCs */ ret = qmp_fd(fix.fd, "{'execute': 'guest-ping'}"); g_assert_nonnull(ret); error = qdict_get_qdict(ret, "error"); @@ -693,18 +668,22 @@ static void test_qga_blacklist(gconstpointer data) static void test_qga_config(gconstpointer data) { GError *error = NULL; - char *cwd, *cmd, *out, *err, *str, **strv, **argv = NULL; + g_autofree char *out = NULL; + g_autofree char *err = NULL; + g_autofree char *cwd = NULL; + g_autofree char *cmd = NULL; + g_auto(GStrv) argv = NULL; + g_auto(GStrv) strv = NULL; + g_autoptr(GKeyFile) kf = NULL; + char *str; char *env[2]; int status; gsize n; - GKeyFile *kf; cwd = g_get_current_dir(); cmd = g_strdup_printf("%s%cqga%cqemu-ga -D", cwd, G_DIR_SEPARATOR, G_DIR_SEPARATOR); - g_free(cwd); g_shell_parse_argv(cmd, NULL, &argv, &error); - g_free(cmd); g_assert_no_error(error); env[0] = g_strdup_printf("QGA_CONF=tests%cdata%ctest-qga-config", @@ -712,7 +691,6 @@ static void test_qga_config(gconstpointer data) env[1] = NULL; g_spawn_sync(NULL, argv, env, 0, NULL, NULL, &out, &err, &status, &error); - g_strfreev(argv); g_assert_no_error(error); g_assert_cmpstr(err, ==, ""); @@ -752,25 +730,21 @@ static void test_qga_config(gconstpointer data) g_assert_true(g_key_file_get_boolean(kf, "general", "verbose", &error)); g_assert_no_error(error); - strv = g_key_file_get_string_list(kf, "general", "blacklist", &n, &error); + strv = g_key_file_get_string_list(kf, "general", "block-rpcs", &n, &error); g_assert_cmpint(n, ==, 2); g_assert_true(g_strv_contains((const char * const *)strv, "guest-ping")); g_assert_true(g_strv_contains((const char * const *)strv, "guest-get-time")); g_assert_no_error(error); - g_strfreev(strv); - g_free(out); - g_free(err); g_free(env[0]); - g_key_file_free(kf); } static void test_qga_fsfreeze_status(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; const gchar *status; ret = qmp_fd(fixture->fd, "{'execute': 'guest-fsfreeze-status'}"); @@ -779,16 +753,15 @@ static void test_qga_fsfreeze_status(gconstpointer fix) status = qdict_get_try_str(ret, "return"); g_assert_cmpstr(status, ==, "thawed"); - - qobject_unref(ret); } static void test_qga_guest_exec(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + QDict *val; const gchar *out; - guchar *decoded; + g_autofree guchar *decoded = NULL; int64_t pid, now, exitcode; gsize len; bool exited; @@ -827,14 +800,13 @@ static void test_qga_guest_exec(gconstpointer fix) decoded = g_base64_decode(out, &len); g_assert_cmpint(len, ==, 12); g_assert_cmpstr((char *)decoded, ==, "\" test_str \""); - g_free(decoded); - qobject_unref(ret); } static void test_qga_guest_exec_invalid(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *error; + g_autoptr(QDict) ret = NULL; + QDict *error; const gchar *class, *desc; /* invalid command */ @@ -859,13 +831,13 @@ static void test_qga_guest_exec_invalid(gconstpointer fix) desc = qdict_get_str(error, "desc"); g_assert_cmpstr(class, ==, "GenericError"); g_assert_cmpint(strlen(desc), >, 0); - qobject_unref(ret); } static void test_qga_guest_get_host_name(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + QDict *val; ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-host-name'}"); g_assert_nonnull(ret); @@ -873,14 +845,13 @@ static void test_qga_guest_get_host_name(gconstpointer fix) val = qdict_get_qdict(ret, "return"); g_assert(qdict_haskey(val, "host-name")); - - qobject_unref(ret); } static void test_qga_guest_get_timezone(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + QDict *val; ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-timezone'}"); g_assert_nonnull(ret); @@ -889,14 +860,12 @@ static void test_qga_guest_get_timezone(gconstpointer fix) /* Make sure there's at least offset */ val = qdict_get_qdict(ret, "return"); g_assert(qdict_haskey(val, "offset")); - - qobject_unref(ret); } static void test_qga_guest_get_users(gconstpointer fix) { const TestFixture *fixture = fix; - QDict *ret; + g_autoptr(QDict) ret = NULL; QList *val; ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-users'}"); @@ -906,23 +875,20 @@ static void test_qga_guest_get_users(gconstpointer fix) /* There is not much to test here */ val = qdict_get_qlist(ret, "return"); g_assert_nonnull(val); - - qobject_unref(ret); } static void test_qga_guest_get_osinfo(gconstpointer data) { TestFixture fixture; const gchar *str; - gchar *cwd, *env[2]; - QDict *ret, *val; + g_autoptr(QDict) ret = NULL; + char *env[2]; + QDict *val; - cwd = g_get_current_dir(); env[0] = g_strdup_printf( - "QGA_OS_RELEASE=%s%ctests%cdata%ctest-qga-os-release", - cwd, G_DIR_SEPARATOR, G_DIR_SEPARATOR, G_DIR_SEPARATOR); + "QGA_OS_RELEASE=%s%c..%cdata%ctest-qga-os-release", + g_test_get_dir(G_TEST_DIST), G_DIR_SEPARATOR, G_DIR_SEPARATOR, G_DIR_SEPARATOR); env[1] = NULL; - g_free(cwd); fixture_setup(&fixture, NULL, env); ret = qmp_fd(fixture.fd, "{'execute': 'guest-get-osinfo'}"); @@ -959,7 +925,6 @@ static void test_qga_guest_get_osinfo(gconstpointer data) g_assert_nonnull(str); g_assert_cmpstr(str, ==, "unit-test"); - qobject_unref(ret); g_free(env[0]); fixture_tear_down(&fixture, NULL); } @@ -969,6 +934,13 @@ int main(int argc, char **argv) TestFixture fix; int ret; +#ifdef QEMU_SANITIZE_THREAD + { + g_test_skip("tsan enabled, https://github.com/google/sanitizers/issues/1116"); + return 0; + } +#endif + setlocale (LC_ALL, ""); g_test_init(&argc, &argv, NULL); fixture_setup(&fix, NULL, NULL); @@ -997,7 +969,7 @@ int main(int argc, char **argv) g_test_add_data_func("/qga/fsfreeze-status", &fix, test_qga_fsfreeze_status); - g_test_add_data_func("/qga/blacklist", NULL, test_qga_blacklist); + g_test_add_data_func("/qga/blockedrpcs", NULL, test_qga_blockedrpcs); g_test_add_data_func("/qga/config", NULL, test_qga_config); g_test_add_data_func("/qga/guest-exec", &fix, test_qga_guest_exec); g_test_add_data_func("/qga/guest-exec-invalid", &fix, diff --git a/tests/unit/test-qgraph.c b/tests/unit/test-qgraph.c index f819430e2c..334c76c8e7 100644 --- a/tests/unit/test-qgraph.c +++ b/tests/unit/test-qgraph.c @@ -21,7 +21,7 @@ #include "../qtest/libqos/qgraph_internal.h" #define MACHINE_PC "x86_64/pc" -#define MACHINE_RASPI2 "arm/raspi2" +#define MACHINE_RASPI2 "arm/raspi2b" #define I440FX "i440FX-pcihost" #define PCIBUS_PC "pcibus-pc" #define SDHCI "sdhci" diff --git a/tests/unit/test-qmp-cmds.c b/tests/unit/test-qmp-cmds.c index 1b0b7d99df..6085c09995 100644 --- a/tests/unit/test-qmp-cmds.c +++ b/tests/unit/test-qmp-cmds.c @@ -51,6 +51,7 @@ FeatureStruct1 *qmp_test_features0(bool has_fs0, FeatureStruct0 *fs0, bool has_cfs1, CondFeatureStruct1 *cfs1, bool has_cfs2, CondFeatureStruct2 *cfs2, bool has_cfs3, CondFeatureStruct3 *cfs3, + bool has_cfs4, CondFeatureStruct4 *cfs4, Error **errp) { return g_new0(FeatureStruct1, 1); @@ -81,8 +82,8 @@ UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, Error **errp) { UserDefTwo *ret; - UserDefOne *ud1c = g_malloc0(sizeof(UserDefOne)); - UserDefOne *ud1d = g_malloc0(sizeof(UserDefOne)); + UserDefOne *ud1c = g_new0(UserDefOne, 1); + UserDefOne *ud1d = g_new0(UserDefOne, 1); ud1c->string = strdup(ud1a->string); ud1c->integer = ud1a->integer; @@ -118,7 +119,7 @@ void qmp_boxed_struct(UserDefZero *arg, Error **errp) { } -void qmp_boxed_union(UserDefListUnion *arg, Error **errp) +void qmp_boxed_union(UserDefFlatUnion *arg, Error **errp) { } @@ -126,22 +127,16 @@ void qmp_boxed_empty(Empty1 *arg, Error **errp) { } -__org_qemu_x_Union1 *qmp___org_qemu_x_command(__org_qemu_x_EnumList *a, - __org_qemu_x_StructList *b, - __org_qemu_x_Union2 *c, - __org_qemu_x_Alt *d, - Error **errp) +void qmp___org_qemu_x_command(__org_qemu_x_EnumList *a, + __org_qemu_x_StructList *b, + __org_qemu_x_Union *c, + __org_qemu_x_Alt *d, + Error **errp) { - __org_qemu_x_Union1 *ret = g_new0(__org_qemu_x_Union1, 1); - - ret->type = ORG_QEMU_X_UNION1_KIND___ORG_QEMU_X_BRANCH; - ret->u.__org_qemu_x_branch.data = strdup("blah1"); - /* Also test that 'wchar-t' was munged to 'q_wchar_t' */ if (b && b->value && !b->value->has_q_wchar_t) { b->value->q_wchar_t = 1; } - return ret; } @@ -349,23 +344,23 @@ static void test_dealloc_types(void) UserDefOne *ud1test, *ud1a, *ud1b; UserDefOneList *ud1list; - ud1test = g_malloc0(sizeof(UserDefOne)); + ud1test = g_new0(UserDefOne, 1); ud1test->integer = 42; ud1test->string = g_strdup("hi there 42"); qapi_free_UserDefOne(ud1test); - ud1a = g_malloc0(sizeof(UserDefOne)); + ud1a = g_new0(UserDefOne, 1); ud1a->integer = 43; ud1a->string = g_strdup("hi there 43"); - ud1b = g_malloc0(sizeof(UserDefOne)); + ud1b = g_new0(UserDefOne, 1); ud1b->integer = 44; ud1b->string = g_strdup("hi there 44"); - ud1list = g_malloc0(sizeof(UserDefOneList)); + ud1list = g_new0(UserDefOneList, 1); ud1list->value = ud1a; - ud1list->next = g_malloc0(sizeof(UserDefOneList)); + ud1list->next = g_new0(UserDefOneList, 1); ud1list->next->value = ud1b; qapi_free_UserDefOneList(ud1list); diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index d58c3b78f2..7d961d716a 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/compat-policy.h" #include "qapi/error.h" #include "qapi/qmp/qbool.h" diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c index e41b91a2a6..5f614afdbf 100644 --- a/tests/unit/test-qobject-input-visitor.c +++ b/tests/unit/test-qobject-input-visitor.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qapi/qapi-visit-introspect.h" #include "qapi/qobject-input-visitor.h" @@ -64,7 +63,7 @@ static Visitor *test_init_internal(TestInputVisitorData *data, bool keyval, return data->qiv; } -static GCC_FMT_ATTR(3, 4) +static G_GNUC_PRINTF(3, 4) Visitor *visitor_input_test_init_full(TestInputVisitorData *data, bool keyval, const char *json_string, ...) @@ -79,7 +78,7 @@ Visitor *visitor_input_test_init_full(TestInputVisitorData *data, return v; } -static GCC_FMT_ATTR(2, 3) +static G_GNUC_PRINTF(2, 3) Visitor *visitor_input_test_init(TestInputVisitorData *data, const char *json_string, ...) { @@ -448,9 +447,8 @@ static void test_visitor_in_list(TestInputVisitorData *data, g_assert(head != NULL); for (i = 0, item = head; item; item = item->next, i++) { - char string[12]; + g_autofree char *string = g_strdup_printf("string%d", i); - snprintf(string, sizeof(string), "string%d", i); g_assert_cmpstr(item->value->string, ==, string); g_assert_cmpint(item->value->integer, ==, 42 + i); } @@ -464,6 +462,151 @@ static void test_visitor_in_list(TestInputVisitorData *data, g_assert(!head); } +static void test_visitor_in_list_struct(TestInputVisitorData *data, + const void *unused) +{ + const char *int_member[] = { + "integer", "s8", "s16", "s32", "s64", "u8", "u16", "u32", "u64" }; + g_autoptr(GString) json = g_string_new(""); + int i, j; + const char *sep; + g_autoptr(ArrayStruct) arrs = NULL; + Visitor *v; + intList *int_list; + int8List *s8_list; + int16List *s16_list; + int32List *s32_list; + int64List *s64_list; + uint8List *u8_list; + uint16List *u16_list; + uint32List *u32_list; + uint64List *u64_list; + numberList *num_list; + boolList *bool_list; + strList *str_list; + + g_string_append_printf(json, "{"); + + for (i = 0; i < G_N_ELEMENTS(int_member); i++) { + g_string_append_printf(json, "'%s': [", int_member[i]); + sep = ""; + for (j = 0; j < 32; j++) { + g_string_append_printf(json, "%s%d", sep, j); + sep = ", "; + } + g_string_append_printf(json, "], "); + } + + g_string_append_printf(json, "'number': ["); + sep = ""; + for (i = 0; i < 32; i++) { + g_string_append_printf(json, "%s%f", sep, (double)i / 3); + sep = ", "; + } + g_string_append_printf(json, "], "); + + g_string_append_printf(json, "'boolean': ["); + sep = ""; + for (i = 0; i < 32; i++) { + g_string_append_printf(json, "%s%s", + sep, i % 3 == 0 ? "true" : "false"); + sep = ", "; + } + g_string_append_printf(json, "], "); + + g_string_append_printf(json, "'string': ["); + sep = ""; + for (i = 0; i < 32; i++) { + g_string_append_printf(json, "%s'%d'", sep, i); + sep = ", "; + } + g_string_append_printf(json, "]"); + + g_string_append_printf(json, "}"); + + v = visitor_input_test_init_raw(data, json->str); + visit_type_ArrayStruct(v, NULL, &arrs, &error_abort); + + i = 0; + for (int_list = arrs->integer; int_list; int_list = int_list->next) { + g_assert_cmpint(int_list->value, ==, i); + i++; + } + + i = 0; + for (s8_list = arrs->s8; s8_list; s8_list = s8_list->next) { + g_assert_cmpint(s8_list->value, ==, i); + i++; + } + + i = 0; + for (s16_list = arrs->s16; s16_list; s16_list = s16_list->next) { + g_assert_cmpint(s16_list->value, ==, i); + i++; + } + + i = 0; + for (s32_list = arrs->s32; s32_list; s32_list = s32_list->next) { + g_assert_cmpint(s32_list->value, ==, i); + i++; + } + + i = 0; + for (s64_list = arrs->s64; s64_list; s64_list = s64_list->next) { + g_assert_cmpint(s64_list->value, ==, i); + i++; + } + + i = 0; + for (u8_list = arrs->u8; u8_list; u8_list = u8_list->next) { + g_assert_cmpint(u8_list->value, ==, i); + i++; + } + + i = 0; + for (u16_list = arrs->u16; u16_list; u16_list = u16_list->next) { + g_assert_cmpint(u16_list->value, ==, i); + i++; + } + + i = 0; + for (u32_list = arrs->u32; u32_list; u32_list = u32_list->next) { + g_assert_cmpint(u32_list->value, ==, i); + i++; + } + + i = 0; + for (u64_list = arrs->u64; u64_list; u64_list = u64_list->next) { + g_assert_cmpint(u64_list->value, ==, i); + i++; + } + + i = 0; + for (num_list = arrs->number; num_list; num_list = num_list->next) { + char expected[32], actual[32]; + + sprintf(expected, "%.6f", (double)i / 3); + sprintf(actual, "%.6f", num_list->value); + g_assert_cmpstr(expected, ==, actual); + i++; + } + + i = 0; + for (bool_list = arrs->boolean; bool_list; bool_list = bool_list->next) { + g_assert_cmpint(bool_list->value, ==, i % 3 == 0); + i++; + } + + i = 0; + for (str_list = arrs->string; str_list; str_list = str_list->next) { + char expected[32]; + + sprintf(expected, "%d", i); + g_assert_cmpstr(str_list->value, ==, expected); + i++; + } +} + static void test_visitor_in_any(TestInputVisitorData *data, const void *unused) { @@ -631,6 +774,7 @@ static void test_visitor_in_alternate_number(TestInputVisitorData *data, AltEnumNum *aen; AltNumEnum *ans; AltEnumInt *asi; + AltListInt *ali; /* Parsing an int */ @@ -657,6 +801,12 @@ static void test_visitor_in_alternate_number(TestInputVisitorData *data, g_assert_cmpint(asi->u.i, ==, 42); qapi_free_AltEnumInt(asi); + v = visitor_input_test_init(data, "42"); + visit_type_AltListInt(v, NULL, &ali, &error_abort); + g_assert_cmpint(ali->type, ==, QTYPE_QNUM); + g_assert_cmpint(ali->u.i, ==, 42); + qapi_free_AltListInt(ali); + /* Parsing a double */ v = visitor_input_test_init(data, "42.5"); @@ -682,274 +832,35 @@ static void test_visitor_in_alternate_number(TestInputVisitorData *data, qapi_free_AltEnumInt(asi); } -static void test_list_union_integer_helper(TestInputVisitorData *data, - const void *unused, - UserDefListUnionKind kind) +static void test_visitor_in_alternate_list(TestInputVisitorData *data, + const void *unused) { - g_autoptr(UserDefListUnion) cvalue = NULL; + intList *item; Visitor *v; - GString *gstr_list = g_string_new(""); - GString *gstr_union = g_string_new(""); + AltListInt *ali; int i; - for (i = 0; i < 32; i++) { - g_string_append_printf(gstr_list, "%d", i); - if (i != 31) { - g_string_append(gstr_list, ", "); - } - } - g_string_append_printf(gstr_union, "{ 'type': '%s', 'data': [ %s ] }", - UserDefListUnionKind_str(kind), - gstr_list->str); - v = visitor_input_test_init_raw(data, gstr_union->str); + v = visitor_input_test_init(data, "[ 42, 43, 44 ]"); + visit_type_AltListInt(v, NULL, &ali, &error_abort); + g_assert(ali != NULL); - visit_type_UserDefListUnion(v, NULL, &cvalue, &error_abort); - g_assert(cvalue != NULL); - g_assert_cmpint(cvalue->type, ==, kind); - - switch (kind) { - case USER_DEF_LIST_UNION_KIND_INTEGER: { - intList *elem = NULL; - for (i = 0, elem = cvalue->u.integer.data; - elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S8: { - int8List *elem = NULL; - for (i = 0, elem = cvalue->u.s8.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S16: { - int16List *elem = NULL; - for (i = 0, elem = cvalue->u.s16.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S32: { - int32List *elem = NULL; - for (i = 0, elem = cvalue->u.s32.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S64: { - int64List *elem = NULL; - for (i = 0, elem = cvalue->u.s64.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U8: { - uint8List *elem = NULL; - for (i = 0, elem = cvalue->u.u8.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U16: { - uint16List *elem = NULL; - for (i = 0, elem = cvalue->u.u16.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U32: { - uint32List *elem = NULL; - for (i = 0, elem = cvalue->u.u32.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U64: { - uint64List *elem = NULL; - for (i = 0, elem = cvalue->u.u64.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, i); - } - break; - } - default: - g_assert_not_reached(); + g_assert_cmpint(ali->type, ==, QTYPE_QLIST); + for (i = 0, item = ali->u.l; item; item = item->next, i++) { + g_assert_cmpint(item->value, ==, 42 + i); } - g_string_free(gstr_union, true); - g_string_free(gstr_list, true); -} + qapi_free_AltListInt(ali); + ali = NULL; -static void test_visitor_in_list_union_int(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_INTEGER); -} + /* An empty list is valid */ + v = visitor_input_test_init(data, "[]"); + visit_type_AltListInt(v, NULL, &ali, &error_abort); + g_assert(ali != NULL); -static void test_visitor_in_list_union_int8(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_S8); -} - -static void test_visitor_in_list_union_int16(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_S16); -} - -static void test_visitor_in_list_union_int32(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_S32); -} - -static void test_visitor_in_list_union_int64(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_S64); -} - -static void test_visitor_in_list_union_uint8(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_U8); -} - -static void test_visitor_in_list_union_uint16(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_U16); -} - -static void test_visitor_in_list_union_uint32(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_U32); -} - -static void test_visitor_in_list_union_uint64(TestInputVisitorData *data, - const void *unused) -{ - test_list_union_integer_helper(data, unused, - USER_DEF_LIST_UNION_KIND_U64); -} - -static void test_visitor_in_list_union_bool(TestInputVisitorData *data, - const void *unused) -{ - g_autoptr(UserDefListUnion) cvalue = NULL; - boolList *elem = NULL; - Visitor *v; - GString *gstr_list = g_string_new(""); - GString *gstr_union = g_string_new(""); - int i; - - for (i = 0; i < 32; i++) { - g_string_append_printf(gstr_list, "%s", - (i % 3 == 0) ? "true" : "false"); - if (i != 31) { - g_string_append(gstr_list, ", "); - } - } - g_string_append_printf(gstr_union, "{ 'type': 'boolean', 'data': [ %s ] }", - gstr_list->str); - v = visitor_input_test_init_raw(data, gstr_union->str); - - visit_type_UserDefListUnion(v, NULL, &cvalue, &error_abort); - g_assert(cvalue != NULL); - g_assert_cmpint(cvalue->type, ==, USER_DEF_LIST_UNION_KIND_BOOLEAN); - - for (i = 0, elem = cvalue->u.boolean.data; elem; elem = elem->next, i++) { - g_assert_cmpint(elem->value, ==, (i % 3 == 0) ? 1 : 0); - } - - g_string_free(gstr_union, true); - g_string_free(gstr_list, true); -} - -static void test_visitor_in_list_union_string(TestInputVisitorData *data, - const void *unused) -{ - g_autoptr(UserDefListUnion) cvalue = NULL; - strList *elem = NULL; - Visitor *v; - GString *gstr_list = g_string_new(""); - GString *gstr_union = g_string_new(""); - int i; - - for (i = 0; i < 32; i++) { - g_string_append_printf(gstr_list, "'%d'", i); - if (i != 31) { - g_string_append(gstr_list, ", "); - } - } - g_string_append_printf(gstr_union, "{ 'type': 'string', 'data': [ %s ] }", - gstr_list->str); - v = visitor_input_test_init_raw(data, gstr_union->str); - - visit_type_UserDefListUnion(v, NULL, &cvalue, &error_abort); - g_assert(cvalue != NULL); - g_assert_cmpint(cvalue->type, ==, USER_DEF_LIST_UNION_KIND_STRING); - - for (i = 0, elem = cvalue->u.string.data; elem; elem = elem->next, i++) { - gchar str[8]; - sprintf(str, "%d", i); - g_assert_cmpstr(elem->value, ==, str); - } - - g_string_free(gstr_union, true); - g_string_free(gstr_list, true); -} - -#define DOUBLE_STR_MAX 16 - -static void test_visitor_in_list_union_number(TestInputVisitorData *data, - const void *unused) -{ - g_autoptr(UserDefListUnion) cvalue = NULL; - numberList *elem = NULL; - Visitor *v; - GString *gstr_list = g_string_new(""); - GString *gstr_union = g_string_new(""); - int i; - - for (i = 0; i < 32; i++) { - g_string_append_printf(gstr_list, "%f", (double)i / 3); - if (i != 31) { - g_string_append(gstr_list, ", "); - } - } - g_string_append_printf(gstr_union, "{ 'type': 'number', 'data': [ %s ] }", - gstr_list->str); - v = visitor_input_test_init_raw(data, gstr_union->str); - - visit_type_UserDefListUnion(v, NULL, &cvalue, &error_abort); - g_assert(cvalue != NULL); - g_assert_cmpint(cvalue->type, ==, USER_DEF_LIST_UNION_KIND_NUMBER); - - for (i = 0, elem = cvalue->u.number.data; elem; elem = elem->next, i++) { - GString *double_expected = g_string_new(""); - GString *double_actual = g_string_new(""); - - g_string_printf(double_expected, "%.6f", (double)i / 3); - g_string_printf(double_actual, "%.6f", elem->value); - g_assert_cmpstr(double_expected->str, ==, double_actual->str); - - g_string_free(double_expected, true); - g_string_free(double_actual, true); - } - - g_string_free(gstr_union, true); - g_string_free(gstr_list, true); + g_assert_cmpint(ali->type, ==, QTYPE_QLIST); + g_assert(!ali->u.l); + qapi_free_AltListInt(ali); + ali = NULL; } static void input_visitor_test_add(const char *testpath, @@ -1184,21 +1095,6 @@ static void test_visitor_in_fail_list_nested(TestInputVisitorData *data, visit_end_list(v, NULL); } -static void test_visitor_in_fail_union_list(TestInputVisitorData *data, - const void *unused) -{ - UserDefListUnion *tmp = NULL; - Error *err = NULL; - Visitor *v; - - v = visitor_input_test_init(data, - "{ 'type': 'integer', 'data' : [ 'string' ] }"); - - visit_type_UserDefListUnion(v, NULL, &tmp, &err); - error_free_or_abort(&err); - g_assert(!tmp); -} - static void test_visitor_in_fail_union_flat(TestInputVisitorData *data, const void *unused) { @@ -1206,7 +1102,7 @@ static void test_visitor_in_fail_union_flat(TestInputVisitorData *data, Error *err = NULL; Visitor *v; - v = visitor_input_test_init(data, "{ 'string': 'c', 'integer': 41, 'boolean': true }"); + v = visitor_input_test_init(data, "{ 'enum1': 'value2', 'string': 'c', 'integer': 41, 'boolean': true }"); visit_type_UserDefFlatUnion(v, NULL, &tmp, &err); error_free_or_abort(&err); @@ -1310,6 +1206,8 @@ int main(int argc, char **argv) NULL, test_visitor_in_struct); input_visitor_test_add("/visitor/input/struct-nested", NULL, test_visitor_in_struct_nested); + input_visitor_test_add("/visitor/input/list2", + NULL, test_visitor_in_list_struct); input_visitor_test_add("/visitor/input/list", NULL, test_visitor_in_list); input_visitor_test_add("/visitor/input/any", @@ -1326,30 +1224,8 @@ int main(int argc, char **argv) NULL, test_visitor_in_wrong_type); input_visitor_test_add("/visitor/input/alternate-number", NULL, test_visitor_in_alternate_number); - input_visitor_test_add("/visitor/input/list_union/int", - NULL, test_visitor_in_list_union_int); - input_visitor_test_add("/visitor/input/list_union/int8", - NULL, test_visitor_in_list_union_int8); - input_visitor_test_add("/visitor/input/list_union/int16", - NULL, test_visitor_in_list_union_int16); - input_visitor_test_add("/visitor/input/list_union/int32", - NULL, test_visitor_in_list_union_int32); - input_visitor_test_add("/visitor/input/list_union/int64", - NULL, test_visitor_in_list_union_int64); - input_visitor_test_add("/visitor/input/list_union/uint8", - NULL, test_visitor_in_list_union_uint8); - input_visitor_test_add("/visitor/input/list_union/uint16", - NULL, test_visitor_in_list_union_uint16); - input_visitor_test_add("/visitor/input/list_union/uint32", - NULL, test_visitor_in_list_union_uint32); - input_visitor_test_add("/visitor/input/list_union/uint64", - NULL, test_visitor_in_list_union_uint64); - input_visitor_test_add("/visitor/input/list_union/bool", - NULL, test_visitor_in_list_union_bool); - input_visitor_test_add("/visitor/input/list_union/str", - NULL, test_visitor_in_list_union_string); - input_visitor_test_add("/visitor/input/list_union/number", - NULL, test_visitor_in_list_union_number); + input_visitor_test_add("/visitor/input/alternate-list", + NULL, test_visitor_in_alternate_list); input_visitor_test_add("/visitor/input/fail/struct", NULL, test_visitor_in_fail_struct); input_visitor_test_add("/visitor/input/fail/struct-nested", @@ -1368,8 +1244,6 @@ int main(int argc, char **argv) NULL, test_visitor_in_fail_union_flat_no_discrim); input_visitor_test_add("/visitor/input/fail/alternate", NULL, test_visitor_in_fail_alternate); - input_visitor_test_add("/visitor/input/fail/union-list", - NULL, test_visitor_in_fail_union_list); input_visitor_test_add("/visitor/input/qapi-introspect", NULL, test_visitor_in_qmp_introspect); diff --git a/tests/unit/test-qobject-output-visitor.c b/tests/unit/test-qobject-output-visitor.c index 9dc1e075e7..66b27fad66 100644 --- a/tests/unit/test-qobject-output-visitor.c +++ b/tests/unit/test-qobject-output-visitor.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qapi/qobject-output-visitor.h" #include "test-qapi-visit.h" @@ -338,7 +337,7 @@ static void test_visitor_out_union_flat(TestOutputVisitorData *data, { QDict *qdict; - UserDefFlatUnion *tmp = g_malloc0(sizeof(UserDefFlatUnion)); + UserDefFlatUnion *tmp = g_new0(UserDefFlatUnion, 1); tmp->enum1 = ENUM_ONE_VALUE1; tmp->string = g_strdup("str"); tmp->integer = 41; @@ -437,289 +436,118 @@ static void test_visitor_out_null(TestOutputVisitorData *data, g_assert(qobject_type(nil) == QTYPE_QNULL); } -static void init_list_union(UserDefListUnion *cvalue) -{ - int i; - switch (cvalue->type) { - case USER_DEF_LIST_UNION_KIND_INTEGER: { - intList **tail = &cvalue->u.integer.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S8: { - int8List **tail = &cvalue->u.s8.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S16: { - int16List **tail = &cvalue->u.s16.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S32: { - int32List **tail = &cvalue->u.s32.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_S64: { - int64List **tail = &cvalue->u.s64.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U8: { - uint8List **tail = &cvalue->u.u8.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U16: { - uint16List **tail = &cvalue->u.u16.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U32: { - uint32List **tail = &cvalue->u.u32.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_U64: { - uint64List **tail = &cvalue->u.u64.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, i); - } - break; - } - case USER_DEF_LIST_UNION_KIND_BOOLEAN: { - boolList **tail = &cvalue->u.boolean.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, QEMU_IS_ALIGNED(i, 3)); - } - break; - } - case USER_DEF_LIST_UNION_KIND_STRING: { - strList **tail = &cvalue->u.string.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, g_strdup_printf("%d", i)); - } - break; - } - case USER_DEF_LIST_UNION_KIND_NUMBER: { - numberList **tail = &cvalue->u.number.data; - for (i = 0; i < 32; i++) { - QAPI_LIST_APPEND(tail, (double)i / 3); - } - break; - } - default: - g_assert_not_reached(); - } -} - -static void check_list_union(QObject *qobj, - UserDefListUnionKind kind) +static void test_visitor_out_list_struct(TestOutputVisitorData *data, + const void *unused) { + const char *int_member[] = { + "integer", "s8", "s16", "s32", "s64", "u8", "u16", "u32", "u64" }; + g_autoptr(ArrayStruct) arrs = g_new0(ArrayStruct, 1); + int i, j; QDict *qdict; QList *qlist; - int i; + QListEntry *e; - qdict = qobject_to(QDict, qobj); - g_assert(qdict); - g_assert(qdict_haskey(qdict, "data")); - qlist = qlist_copy(qobject_to(QList, qdict_get(qdict, "data"))); - - switch (kind) { - case USER_DEF_LIST_UNION_KIND_U8: - case USER_DEF_LIST_UNION_KIND_U16: - case USER_DEF_LIST_UNION_KIND_U32: - case USER_DEF_LIST_UNION_KIND_U64: - for (i = 0; i < 32; i++) { - QObject *tmp; - QNum *qvalue; - uint64_t val; - - tmp = qlist_peek(qlist); - g_assert(tmp); - qvalue = qobject_to(QNum, tmp); - g_assert(qnum_get_try_uint(qvalue, &val)); - g_assert_cmpint(val, ==, i); - qobject_unref(qlist_pop(qlist)); - } - break; - - case USER_DEF_LIST_UNION_KIND_S8: - case USER_DEF_LIST_UNION_KIND_S16: - case USER_DEF_LIST_UNION_KIND_S32: - case USER_DEF_LIST_UNION_KIND_S64: - /* - * All integer elements in JSON arrays get stored into QNums - * when we convert to QObjects, so we can check them all in - * the same fashion, so simply fall through here. - */ - case USER_DEF_LIST_UNION_KIND_INTEGER: - for (i = 0; i < 32; i++) { - QObject *tmp; - QNum *qvalue; - int64_t val; - - tmp = qlist_peek(qlist); - g_assert(tmp); - qvalue = qobject_to(QNum, tmp); - g_assert(qnum_get_try_int(qvalue, &val)); - g_assert_cmpint(val, ==, i); - qobject_unref(qlist_pop(qlist)); - } - break; - case USER_DEF_LIST_UNION_KIND_BOOLEAN: - for (i = 0; i < 32; i++) { - QObject *tmp; - QBool *qvalue; - tmp = qlist_peek(qlist); - g_assert(tmp); - qvalue = qobject_to(QBool, tmp); - g_assert_cmpint(qbool_get_bool(qvalue), ==, i % 3 == 0); - qobject_unref(qlist_pop(qlist)); - } - break; - case USER_DEF_LIST_UNION_KIND_STRING: - for (i = 0; i < 32; i++) { - QObject *tmp; - QString *qvalue; - gchar str[8]; - tmp = qlist_peek(qlist); - g_assert(tmp); - qvalue = qobject_to(QString, tmp); - sprintf(str, "%d", i); - g_assert_cmpstr(qstring_get_str(qvalue), ==, str); - qobject_unref(qlist_pop(qlist)); - } - break; - case USER_DEF_LIST_UNION_KIND_NUMBER: - for (i = 0; i < 32; i++) { - QObject *tmp; - QNum *qvalue; - GString *double_expected = g_string_new(""); - GString *double_actual = g_string_new(""); - - tmp = qlist_peek(qlist); - g_assert(tmp); - qvalue = qobject_to(QNum, tmp); - g_string_printf(double_expected, "%.6f", (double)i / 3); - g_string_printf(double_actual, "%.6f", qnum_get_double(qvalue)); - g_assert_cmpstr(double_actual->str, ==, double_expected->str); - - qobject_unref(qlist_pop(qlist)); - g_string_free(double_expected, true); - g_string_free(double_actual, true); - } - break; - default: - g_assert_not_reached(); + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->integer, i); } - qobject_unref(qlist); -} -static void test_list_union(TestOutputVisitorData *data, - const void *unused, - UserDefListUnionKind kind) -{ - UserDefListUnion *cvalue = g_new0(UserDefListUnion, 1); - QObject *obj; + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->s8, i); + } - cvalue->type = kind; - init_list_union(cvalue); + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->s16, i); + } - visit_type_UserDefListUnion(data->ov, NULL, &cvalue, &error_abort); + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->s32, i); + } - obj = visitor_get(data); - check_list_union(obj, cvalue->type); - qapi_free_UserDefListUnion(cvalue); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->s64, i); + } -static void test_visitor_out_list_union_int(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_INTEGER); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->u8, i); + } -static void test_visitor_out_list_union_int8(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_S8); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->u16, i); + } -static void test_visitor_out_list_union_int16(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_S16); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->u32, i); + } -static void test_visitor_out_list_union_int32(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_S32); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->u64, i); + } -static void test_visitor_out_list_union_int64(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_S64); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->number, (double)i / 3); + } -static void test_visitor_out_list_union_uint8(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_U8); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->boolean, QEMU_IS_ALIGNED(i, 3)); + } -static void test_visitor_out_list_union_uint16(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_U16); -} + for (i = 31; i >= 0; i--) { + QAPI_LIST_PREPEND(arrs->string, g_strdup_printf("%d", i)); + } -static void test_visitor_out_list_union_uint32(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_U32); -} + visit_type_ArrayStruct(data->ov, NULL, &arrs, &error_abort); -static void test_visitor_out_list_union_uint64(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_U64); -} + qdict = qobject_to(QDict, visitor_get(data)); + g_assert(qdict); -static void test_visitor_out_list_union_bool(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_BOOLEAN); -} + for (i = 0; i < G_N_ELEMENTS(int_member); i++) { + qlist = qdict_get_qlist(qdict, int_member[i]); + g_assert(qlist); + j = 0; + QLIST_FOREACH_ENTRY(qlist, e) { + QNum *qvalue = qobject_to(QNum, qlist_entry_obj(e)); + g_assert(qvalue); + g_assert_cmpint(qnum_get_int(qvalue), ==, j); + j++; + } + } -static void test_visitor_out_list_union_str(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_STRING); -} + qlist = qdict_get_qlist(qdict, "number"); + g_assert(qlist); + i = 0; + QLIST_FOREACH_ENTRY(qlist, e) { + QNum *qvalue = qobject_to(QNum, qlist_entry_obj(e)); + char expected[32], actual[32]; -static void test_visitor_out_list_union_number(TestOutputVisitorData *data, - const void *unused) -{ - test_list_union(data, unused, USER_DEF_LIST_UNION_KIND_NUMBER); + g_assert(qvalue); + sprintf(expected, "%.6f", (double)i / 3); + sprintf(actual, "%.6f", qnum_get_double(qvalue)); + g_assert_cmpstr(actual, ==, expected); + i++; + } + + qlist = qdict_get_qlist(qdict, "boolean"); + g_assert(qlist); + i = 0; + QLIST_FOREACH_ENTRY(qlist, e) { + QBool *qvalue = qobject_to(QBool, qlist_entry_obj(e)); + g_assert(qvalue); + g_assert_cmpint(qbool_get_bool(qvalue), ==, i % 3 == 0); + i++; + } + + qlist = qdict_get_qlist(qdict, "string"); + g_assert(qlist); + i = 0; + QLIST_FOREACH_ENTRY(qlist, e) { + QString *qvalue = qobject_to(QString, qlist_entry_obj(e)); + char expected[32]; + + g_assert(qvalue); + sprintf(expected, "%d", i); + g_assert_cmpstr(qstring_get_str(qvalue), ==, expected); + i++; + } } static void output_visitor_test_add(const char *testpath, @@ -764,42 +592,8 @@ int main(int argc, char **argv) &out_visitor_data, test_visitor_out_alternate); output_visitor_test_add("/visitor/output/null", &out_visitor_data, test_visitor_out_null); - output_visitor_test_add("/visitor/output/list_union/int", - &out_visitor_data, - test_visitor_out_list_union_int); - output_visitor_test_add("/visitor/output/list_union/int8", - &out_visitor_data, - test_visitor_out_list_union_int8); - output_visitor_test_add("/visitor/output/list_union/int16", - &out_visitor_data, - test_visitor_out_list_union_int16); - output_visitor_test_add("/visitor/output/list_union/int32", - &out_visitor_data, - test_visitor_out_list_union_int32); - output_visitor_test_add("/visitor/output/list_union/int64", - &out_visitor_data, - test_visitor_out_list_union_int64); - output_visitor_test_add("/visitor/output/list_union/uint8", - &out_visitor_data, - test_visitor_out_list_union_uint8); - output_visitor_test_add("/visitor/output/list_union/uint16", - &out_visitor_data, - test_visitor_out_list_union_uint16); - output_visitor_test_add("/visitor/output/list_union/uint32", - &out_visitor_data, - test_visitor_out_list_union_uint32); - output_visitor_test_add("/visitor/output/list_union/uint64", - &out_visitor_data, - test_visitor_out_list_union_uint64); - output_visitor_test_add("/visitor/output/list_union/bool", - &out_visitor_data, - test_visitor_out_list_union_bool); - output_visitor_test_add("/visitor/output/list_union/string", - &out_visitor_data, - test_visitor_out_list_union_str); - output_visitor_test_add("/visitor/output/list_union/number", - &out_visitor_data, - test_visitor_out_list_union_number); + output_visitor_test_add("/visitor/output/list_struct", + &out_visitor_data, test_visitor_out_list_struct); g_test_run(); diff --git a/tests/unit/test-rcu-list.c b/tests/unit/test-rcu-list.c index 49641e1936..64b81ae058 100644 --- a/tests/unit/test-rcu-list.c +++ b/tests/unit/test-rcu-list.c @@ -171,7 +171,7 @@ static void *rcu_q_reader(void *arg) rcu_register_thread(); - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); qatomic_inc(&nthreadsrunning); while (qatomic_read(&goflag) == GOFLAG_INIT) { g_usleep(1000); @@ -206,7 +206,7 @@ static void *rcu_q_updater(void *arg) long long n_removed_local = 0; struct list_element *el, *prev_el; - *(struct rcu_reader_data **)arg = &rcu_reader; + *(struct rcu_reader_data **)arg = get_ptr_rcu_reader(); qatomic_inc(&nthreadsrunning); while (qatomic_read(&goflag) == GOFLAG_INIT) { g_usleep(1000); diff --git a/tests/unit/test-seccomp.c b/tests/unit/test-seccomp.c new file mode 100644 index 0000000000..3d7771e46c --- /dev/null +++ b/tests/unit/test-seccomp.c @@ -0,0 +1,269 @@ +/* + * QEMU seccomp test suite + * + * Copyright (c) 2021 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/config-file.h" +#include "qemu/option.h" +#include "sysemu/seccomp.h" +#include "qapi/error.h" +#include "qemu/module.h" + +#include +#include + +static void test_seccomp_helper(const char *args, bool killed, + int errnum, int (*doit)(void)) +{ + if (g_test_subprocess()) { + QemuOptsList *olist; + QemuOpts *opts; + int ret; + + module_call_init(MODULE_INIT_OPTS); + olist = qemu_find_opts("sandbox"); + g_assert(olist != NULL); + + opts = qemu_opts_parse_noisily(olist, args, true); + g_assert(opts != NULL); + + parse_sandbox(NULL, opts, &error_abort); + + /* Running in a child process */ + ret = doit(); + + if (errnum != 0) { + g_assert(ret != 0); + g_assert(errno == errnum); + } else { + g_assert(ret == 0); + } + + _exit(0); + } else { + /* Running in main test process, spawning the child */ + g_test_trap_subprocess(NULL, 0, 0); + if (killed) { + g_test_trap_assert_failed(); + } else { + g_test_trap_assert_passed(); + } + } +} + + +static void test_seccomp_killed(const char *args, int (*doit)(void)) +{ + test_seccomp_helper(args, true, 0, doit); +} + +static void test_seccomp_errno(const char *args, int errnum, int (*doit)(void)) +{ + test_seccomp_helper(args, false, errnum, doit); +} + +static void test_seccomp_passed(const char *args, int (*doit)(void)) +{ + test_seccomp_helper(args, false, 0, doit); +} + +#ifdef SYS_fork +static int doit_sys_fork(void) +{ + int ret = syscall(SYS_fork); + if (ret < 0) { + return ret; + } + if (ret == 0) { + _exit(0); + } + return 0; +} + +static void test_seccomp_sys_fork_on_nospawn(void) +{ + test_seccomp_killed("on,spawn=deny", doit_sys_fork); +} + +static void test_seccomp_sys_fork_on(void) +{ + test_seccomp_passed("on", doit_sys_fork); +} + +static void test_seccomp_sys_fork_off(void) +{ + test_seccomp_passed("off", doit_sys_fork); +} +#endif + +static int doit_fork(void) +{ + int ret = fork(); + if (ret < 0) { + return ret; + } + if (ret == 0) { + _exit(0); + } + return 0; +} + +static void test_seccomp_fork_on_nospawn(void) +{ + test_seccomp_killed("on,spawn=deny", doit_fork); +} + +static void test_seccomp_fork_on(void) +{ + test_seccomp_passed("on", doit_fork); +} + +static void test_seccomp_fork_off(void) +{ + test_seccomp_passed("off", doit_fork); +} + +static void *noop(void *arg) +{ + return arg; +} + +static int doit_thread(void) +{ + pthread_t th; + int ret = pthread_create(&th, NULL, noop, NULL); + if (ret != 0) { + errno = ret; + return -1; + } else { + pthread_join(th, NULL); + return 0; + } +} + +static void test_seccomp_thread_on(void) +{ + test_seccomp_passed("on", doit_thread); +} + +static void test_seccomp_thread_on_nospawn(void) +{ + test_seccomp_passed("on,spawn=deny", doit_thread); +} + +static void test_seccomp_thread_off(void) +{ + test_seccomp_passed("off", doit_thread); +} + +static int doit_sched(void) +{ + struct sched_param param = { .sched_priority = 0 }; + return sched_setscheduler(getpid(), SCHED_OTHER, ¶m); +} + +static void test_seccomp_sched_on_nores(void) +{ + test_seccomp_errno("on,resourcecontrol=deny", EPERM, doit_sched); +} + +static void test_seccomp_sched_on(void) +{ + test_seccomp_passed("on", doit_sched); +} + +static void test_seccomp_sched_off(void) +{ + test_seccomp_passed("off", doit_sched); +} + +static bool can_play_with_seccomp(void) +{ + g_autofree char *status = NULL; + g_auto(GStrv) lines = NULL; + size_t i; + + if (!g_file_get_contents("/proc/self/status", &status, NULL, NULL)) { + return false; + } + + lines = g_strsplit(status, "\n", 0); + + for (i = 0; lines[i] != NULL; i++) { + if (g_str_has_prefix(lines[i], "Seccomp:")) { + /* + * "Seccomp: 1" or "Seccomp: 2" indicate we're already + * confined, probably as we're inside a container. In + * this case our tests might get unexpected results, + * so we can't run reliably + */ + if (!strchr(lines[i], '0')) { + return false; + } + + return true; + } + } + + /* Doesn't look like seccomp is enabled in the kernel */ + return false; +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + if (can_play_with_seccomp()) { +#ifdef SYS_fork + g_test_add_func("/softmmu/seccomp/sys-fork/on", + test_seccomp_sys_fork_on); + g_test_add_func("/softmmu/seccomp/sys-fork/on-nospawn", + test_seccomp_sys_fork_on_nospawn); + g_test_add_func("/softmmu/seccomp/sys-fork/off", + test_seccomp_sys_fork_off); +#endif + + g_test_add_func("/softmmu/seccomp/fork/on", + test_seccomp_fork_on); + g_test_add_func("/softmmu/seccomp/fork/on-nospawn", + test_seccomp_fork_on_nospawn); + g_test_add_func("/softmmu/seccomp/fork/off", + test_seccomp_fork_off); + + g_test_add_func("/softmmu/seccomp/thread/on", + test_seccomp_thread_on); + g_test_add_func("/softmmu/seccomp/thread/on-nospawn", + test_seccomp_thread_on_nospawn); + g_test_add_func("/softmmu/seccomp/thread/off", + test_seccomp_thread_off); + + if (doit_sched() == 0) { + /* + * musl doesn't impl sched_setscheduler, hence + * we check above if it works first + */ + g_test_add_func("/softmmu/seccomp/sched/on", + test_seccomp_sched_on); + g_test_add_func("/softmmu/seccomp/sched/on-nores", + test_seccomp_sched_on_nores); + g_test_add_func("/softmmu/seccomp/sched/off", + test_seccomp_sched_off); + } + } + return g_test_run(); +} diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c new file mode 100644 index 0000000000..fdc39a846c --- /dev/null +++ b/tests/unit/test-smp-parse.c @@ -0,0 +1,749 @@ +/* + * SMP parsing unit-tests + * + * Copyright (c) 2021 Huawei Technologies Co., Ltd + * + * Authors: + * Yanan Wang + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qom/object.h" +#include "qemu/module.h" +#include "qapi/error.h" + +#include "hw/boards.h" + +#define T true +#define F false + +#define MIN_CPUS 1 /* set the min CPUs supported by the machine as 1 */ +#define MAX_CPUS 512 /* set the max CPUs supported by the machine as 512 */ + +#define SMP_MACHINE_NAME "TEST-SMP" + +/* + * Used to define the generic 3-level CPU topology hierarchy + * -sockets/cores/threads + */ +#define SMP_CONFIG_GENERIC(ha, a, hb, b, hc, c, hd, d, he, e) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_cores = hc, .cores = c, \ + .has_threads = hd, .threads = d, \ + .has_maxcpus = he, .maxcpus = e, \ + } + +#define CPU_TOPOLOGY_GENERIC(a, b, c, d, e) \ + { \ + .cpus = a, \ + .sockets = b, \ + .cores = c, \ + .threads = d, \ + .max_cpus = e, \ + } + +/* + * Currently a 4-level topology hierarchy is supported on PC machines + * -sockets/dies/cores/threads + */ +#define SMP_CONFIG_WITH_DIES(ha, a, hb, b, hc, c, hd, d, he, e, hf, f) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_dies = hc, .dies = c, \ + .has_cores = hd, .cores = d, \ + .has_threads = he, .threads = e, \ + .has_maxcpus = hf, .maxcpus = f, \ + } + +/* + * Currently a 4-level topology hierarchy is supported on ARM virt machines + * -sockets/clusters/cores/threads + */ +#define SMP_CONFIG_WITH_CLUSTERS(ha, a, hb, b, hc, c, hd, d, he, e, hf, f) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_clusters = hc, .clusters = c, \ + .has_cores = hd, .cores = d, \ + .has_threads = he, .threads = e, \ + .has_maxcpus = hf, .maxcpus = f, \ + } + +/** + * @config - the given SMP configuration + * @expect_prefer_sockets - the expected parsing result for the + * valid configuration, when sockets are preferred over cores + * @expect_prefer_cores - the expected parsing result for the + * valid configuration, when cores are preferred over sockets + * @expect_error - the expected error report when the given + * configuration is invalid + */ +typedef struct SMPTestData { + SMPConfiguration config; + CpuTopology expect_prefer_sockets; + CpuTopology expect_prefer_cores; + const char *expect_error; +} SMPTestData; + +/* + * List all the possible valid sub-collections of the generic 5 + * topology parameters (i.e. cpus/maxcpus/sockets/cores/threads), + * then test the automatic calculation algorithm of the missing + * values in the parser. + */ +static const struct SMPTestData data_generic_valid[] = { + { + /* config: no configuration provided + * expect: cpus=1,sockets=1,cores=1,threads=1,maxcpus=1 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(1, 1, 1, 1, 1), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(1, 1, 1, 1, 1), + }, { + /* config: -smp 8 + * prefer_sockets: cpus=8,sockets=8,cores=1,threads=1,maxcpus=8 + * prefer_cores: cpus=8,sockets=1,cores=8,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 8, 1, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 8, 1, 8), + }, { + /* config: -smp sockets=2 + * expect: cpus=2,sockets=2,cores=1,threads=1,maxcpus=2 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(2, 2, 1, 1, 2), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(2, 2, 1, 1, 2), + }, { + /* config: -smp cores=4 + * expect: cpus=4,sockets=1,cores=4,threads=1,maxcpus=4 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(4, 1, 4, 1, 4), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(4, 1, 4, 1, 4), + }, { + /* config: -smp threads=2 + * expect: cpus=2,sockets=1,cores=1,threads=2,maxcpus=2 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(2, 1, 1, 2, 2), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(2, 1, 1, 2, 2), + }, { + /* config: -smp maxcpus=16 + * prefer_sockets: cpus=16,sockets=16,cores=1,threads=1,maxcpus=16 + * prefer_cores: cpus=16,sockets=1,cores=16,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 16, 1, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 1, 16, 1, 16), + }, { + /* config: -smp 8,sockets=2 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,threads=2 + * prefer_sockets: cpus=8,sockets=4,cores=1,threads=2,maxcpus=8 + * prefer_cores: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 4, 1, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp 8,maxcpus=16 + * prefer_sockets: cpus=8,sockets=16,cores=1,threads=1,maxcpus=16 + * prefer_cores: cpus=8,sockets=1,cores=16,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 16, 1, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 16, 1, 16), + }, { + /* config: -smp sockets=2,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp sockets=2,threads=2 + * expect: cpus=4,sockets=2,cores=1,threads=2,maxcpus=4 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(4, 2, 1, 2, 4), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(4, 2, 1, 2, 4), + }, { + /* config: -smp sockets=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=8,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 8, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 8, 1, 16), + }, { + /* config: -smp cores=4,threads=2 + * expect: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp cores=4,maxcpus=16 + * expect: cpus=16,sockets=4,cores=4,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 4, 4, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 4, 4, 1, 16), + }, { + /* config: -smp threads=2,maxcpus=16 + * prefer_sockets: cpus=16,sockets=8,cores=1,threads=2,maxcpus=16 + * prefer_cores: cpus=16,sockets=1,cores=8,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 8, 1, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 1, 8, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,sockets=2,threads=2 + * expect: cpus=8,sockets=2,cores=2,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 2, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 2, 2, 8), + }, { + /* config: -smp 8,sockets=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=8,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 8, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 8, 1, 16), + }, { + /* config: -smp 8,cores=4,threads=2 + * expect: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp 8,cores=4,maxcpus=16 + * expect: cpus=8,sockets=4,cores=4,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 4, 4, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 4, 4, 1, 16), + }, { + /* config: -smp 8,threads=2,maxcpus=16 + * prefer_sockets: cpus=8,sockets=8,cores=1,threads=2,maxcpus=16 + * prefer_cores: cpus=8,sockets=1,cores=8,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 8, 1, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 8, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,threads=2 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp cores=4,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4,threads=1 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 1, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,sockets=2,cores=4,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp 8,cores=4,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, +}; + +static const struct SMPTestData data_generic_invalid[] = { + { + /* config: -smp 2,dies=2 */ + .config = SMP_CONFIG_WITH_DIES(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_error = "dies not supported by this machine's CPU topology", + }, { + /* config: -smp 2,clusters=2 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_error = "clusters not supported by this machine's CPU topology", + }, { + /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 8), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * cores (4) * threads (2) " + "!= maxcpus (8)", + }, { + /* config: -smp 18,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 18, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * cores (4) * threads (2) " + "== maxcpus (16) < smp_cpus (18)", + }, { + /* config: -smp 1 + * should tweak the supported min CPUs to 2 for testing */ + .config = SMP_CONFIG_GENERIC(T, 1, F, 0, F, 0, F, 0, F, 0), + .expect_error = "Invalid SMP CPUs 1. The min CPUs supported " + "by machine '" SMP_MACHINE_NAME "' is 2", + }, { + /* config: -smp 512 + * should tweak the supported max CPUs to 511 for testing */ + .config = SMP_CONFIG_GENERIC(T, 512, F, 0, F, 0, F, 0, F, 0), + .expect_error = "Invalid SMP CPUs 512. The max CPUs supported " + "by machine '" SMP_MACHINE_NAME "' is 511", + }, +}; + +static const struct SMPTestData data_with_dies_invalid[] = { + { + /* config: -smp 16,sockets=2,dies=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_DIES(T, 16, T, 2, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * dies (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,sockets=2,dies=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_DIES(T, 34, T, 2, T, 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * dies (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static const struct SMPTestData data_with_clusters_invalid[] = { + { + /* config: -smp 16,sockets=2,clusters=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 16, T, 2, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * clusters (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,sockets=2,clusters=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 34, T, 2, T, 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * clusters (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static char *smp_config_to_string(const SMPConfiguration *config) +{ + return g_strdup_printf( + "(SMPConfiguration) {\n" + " .has_cpus = %5s, cpus = %" PRId64 ",\n" + " .has_sockets = %5s, sockets = %" PRId64 ",\n" + " .has_dies = %5s, dies = %" PRId64 ",\n" + " .has_clusters = %5s, clusters = %" PRId64 ",\n" + " .has_cores = %5s, cores = %" PRId64 ",\n" + " .has_threads = %5s, threads = %" PRId64 ",\n" + " .has_maxcpus = %5s, maxcpus = %" PRId64 ",\n" + "}", + config->has_cpus ? "true" : "false", config->cpus, + config->has_sockets ? "true" : "false", config->sockets, + config->has_dies ? "true" : "false", config->dies, + config->has_clusters ? "true" : "false", config->clusters, + config->has_cores ? "true" : "false", config->cores, + config->has_threads ? "true" : "false", config->threads, + config->has_maxcpus ? "true" : "false", config->maxcpus); +} + +static char *cpu_topology_to_string(const CpuTopology *topo) +{ + return g_strdup_printf( + "(CpuTopology) {\n" + " .cpus = %u,\n" + " .sockets = %u,\n" + " .dies = %u,\n" + " .clusters = %u,\n" + " .cores = %u,\n" + " .threads = %u,\n" + " .max_cpus = %u,\n" + "}", + topo->cpus, topo->sockets, topo->dies, topo->clusters, + topo->cores, topo->threads, topo->max_cpus); +} + +static void check_parse(MachineState *ms, const SMPConfiguration *config, + const CpuTopology *expect_topo, const char *expect_err, + bool is_valid) +{ + g_autofree char *config_str = smp_config_to_string(config); + g_autofree char *expect_topo_str = cpu_topology_to_string(expect_topo); + g_autofree char *output_topo_str = NULL; + Error *err = NULL; + + /* call the generic parser */ + machine_parse_smp_config(ms, config, &err); + + output_topo_str = cpu_topology_to_string(&ms->smp); + + /* when the configuration is supposed to be valid */ + if (is_valid) { + if ((err == NULL) && + (ms->smp.cpus == expect_topo->cpus) && + (ms->smp.sockets == expect_topo->sockets) && + (ms->smp.dies == expect_topo->dies) && + (ms->smp.clusters == expect_topo->clusters) && + (ms->smp.cores == expect_topo->cores) && + (ms->smp.threads == expect_topo->threads) && + (ms->smp.max_cpus == expect_topo->max_cpus)) { + return; + } + + if (err != NULL) { + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: yes\n" + "Expected topology: %s\n\n" + "Result is valid: no\n" + "Output error report: %s\n", + config_str, expect_topo_str, error_get_pretty(err)); + goto end; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: yes\n" + "Expected topology: %s\n\n" + "Result is valid: yes\n" + "Output topology: %s\n", + config_str, expect_topo_str, output_topo_str); + goto end; + } + + /* when the configuration is supposed to be invalid */ + if (err != NULL) { + if (expect_err == NULL || + g_str_equal(expect_err, error_get_pretty(err))) { + error_free(err); + return; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: no\n" + "Expected error report: %s\n\n" + "Result is valid: no\n" + "Output error report: %s\n", + config_str, expect_err, error_get_pretty(err)); + goto end; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: no\n" + "Expected error report: %s\n\n" + "Result is valid: yes\n" + "Output topology: %s\n", + config_str, expect_err, output_topo_str); + +end: + if (err != NULL) { + error_free(err); + } + + abort(); +} + +static void smp_parse_test(MachineState *ms, SMPTestData *data, bool is_valid) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + mc->smp_props.prefer_sockets = true; + check_parse(ms, &data->config, &data->expect_prefer_sockets, + data->expect_error, is_valid); + + mc->smp_props.prefer_sockets = false; + check_parse(ms, &data->config, &data->expect_prefer_cores, + data->expect_error, is_valid); +} + +/* The parsed results of the unsupported parameters should be 1 */ +static void unsupported_params_init(const MachineClass *mc, SMPTestData *data) +{ + if (!mc->smp_props.dies_supported) { + data->expect_prefer_sockets.dies = 1; + data->expect_prefer_cores.dies = 1; + } + + if (!mc->smp_props.clusters_supported) { + data->expect_prefer_sockets.clusters = 1; + data->expect_prefer_cores.clusters = 1; + } +} + +static void machine_base_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->min_cpus = MIN_CPUS; + mc->max_cpus = MAX_CPUS; + + mc->name = g_strdup(SMP_MACHINE_NAME); +} + +static void machine_generic_invalid_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + /* Force invalid min CPUs and max CPUs */ + mc->min_cpus = 2; + mc->max_cpus = 511; +} + +static void machine_with_dies_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.dies_supported = true; +} + +static void machine_with_clusters_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.clusters_supported = true; +} + +static void test_generic_valid(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, true); + + /* Unsupported parameters can be provided with their values as 1 */ + data.config.has_dies = true; + data.config.dies = 1; + smp_parse_test(ms, &data, true); + } + + object_unref(obj); +} + +static void test_generic_invalid(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_invalid); i++) { + data = data_generic_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_with_dies(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_dies = 2; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* when dies parameter is omitted, it will be set as 1 */ + data.expect_prefer_sockets.dies = 1; + data.expect_prefer_cores.dies = 1; + + smp_parse_test(ms, &data, true); + + /* when dies parameter is specified */ + data.config.has_dies = true; + data.config.dies = num_dies; + if (data.config.has_cpus) { + data.config.cpus *= num_dies; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_dies; + } + + data.expect_prefer_sockets.dies = num_dies; + data.expect_prefer_sockets.cpus *= num_dies; + data.expect_prefer_sockets.max_cpus *= num_dies; + data.expect_prefer_cores.dies = num_dies; + data.expect_prefer_cores.cpus *= num_dies; + data.expect_prefer_cores.max_cpus *= num_dies; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_dies_invalid); i++) { + data = data_with_dies_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_with_clusters(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_clusters = 2; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* when clusters parameter is omitted, it will be set as 1 */ + data.expect_prefer_sockets.clusters = 1; + data.expect_prefer_cores.clusters = 1; + + smp_parse_test(ms, &data, true); + + /* when clusters parameter is specified */ + data.config.has_clusters = true; + data.config.clusters = num_clusters; + if (data.config.has_cpus) { + data.config.cpus *= num_clusters; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_clusters; + } + + data.expect_prefer_sockets.clusters = num_clusters; + data.expect_prefer_sockets.cpus *= num_clusters; + data.expect_prefer_sockets.max_cpus *= num_clusters; + data.expect_prefer_cores.clusters = num_clusters; + data.expect_prefer_cores.cpus *= num_clusters; + data.expect_prefer_cores.max_cpus *= num_clusters; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_clusters_invalid); i++) { + data = data_with_clusters_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +/* Type info of the tested machine */ +static const TypeInfo smp_machine_types[] = { + { + .name = TYPE_MACHINE, + .parent = TYPE_OBJECT, + .abstract = true, + .class_init = machine_base_class_init, + .class_size = sizeof(MachineClass), + .instance_size = sizeof(MachineState), + }, { + .name = MACHINE_TYPE_NAME("smp-generic-valid"), + .parent = TYPE_MACHINE, + }, { + .name = MACHINE_TYPE_NAME("smp-generic-invalid"), + .parent = TYPE_MACHINE, + .class_init = machine_generic_invalid_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-dies"), + .parent = TYPE_MACHINE, + .class_init = machine_with_dies_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-clusters"), + .parent = TYPE_MACHINE, + .class_init = machine_with_clusters_class_init, + } +}; + +DEFINE_TYPES(smp_machine_types) + +int main(int argc, char *argv[]) +{ + module_call_init(MODULE_INIT_QOM); + + g_test_init(&argc, &argv, NULL); + + g_test_add_data_func("/test-smp-parse/generic/valid", + MACHINE_TYPE_NAME("smp-generic-valid"), + test_generic_valid); + g_test_add_data_func("/test-smp-parse/generic/invalid", + MACHINE_TYPE_NAME("smp-generic-invalid"), + test_generic_invalid); + g_test_add_data_func("/test-smp-parse/with_dies", + MACHINE_TYPE_NAME("smp-with-dies"), + test_with_dies); + g_test_add_data_func("/test-smp-parse/with_clusters", + MACHINE_TYPE_NAME("smp-with-clusters"), + test_with_clusters); + + g_test_run(); + + return 0; +} diff --git a/tests/unit/test-string-input-visitor.c b/tests/unit/test-string-input-visitor.c index 249faafc9d..25094d3ffc 100644 --- a/tests/unit/test-string-input-visitor.c +++ b/tests/unit/test-string-input-visitor.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qapi/string-input-visitor.h" #include "test-qapi-visit.h" diff --git a/tests/unit/test-string-output-visitor.c b/tests/unit/test-string-output-visitor.c index e2bedc5c7c..7ef305361e 100644 --- a/tests/unit/test-string-output-visitor.c +++ b/tests/unit/test-string-output-visitor.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "qapi/string-output-visitor.h" #include "test-qapi-visit.h" diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c index 70dc6314a1..6020e65d69 100644 --- a/tests/unit/test-thread-pool.c +++ b/tests/unit/test-thread-pool.c @@ -1,5 +1,4 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "block/aio.h" #include "block/thread-pool.h" #include "block/block.h" diff --git a/tests/unit/test-util-sockets.c b/tests/unit/test-util-sockets.c index 72b9246529..63909ccb2b 100644 --- a/tests/unit/test-util-sockets.c +++ b/tests/unit/test-util-sockets.c @@ -19,7 +19,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/sockets.h" #include "qapi/error.h" #include "socket-helpers.h" @@ -305,9 +304,11 @@ static void test_socket_unix_abstract(void) }; int i; + i = g_file_open_tmp("unix-XXXXXX", &addr.u.q_unix.path, NULL); + g_assert_true(i >= 0); + close(i); + addr.type = SOCKET_ADDRESS_TYPE_UNIX; - addr.u.q_unix.path = g_strdup_printf("unix-%d-%u", - getpid(), g_random_int()); addr.u.q_unix.has_abstract = true; addr.u.q_unix.abstract = true; addr.u.q_unix.has_tight = false; diff --git a/tests/unit/test-visitor-serialization.c b/tests/unit/test-visitor-serialization.c index 4629958647..667e8fed82 100644 --- a/tests/unit/test-visitor-serialization.c +++ b/tests/unit/test-visitor-serialization.c @@ -14,7 +14,6 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "test-qapi-visit.h" #include "qapi/error.h" #include "qapi/qmp/qjson.h" @@ -428,131 +427,117 @@ static void test_primitive_lists(gconstpointer opaque) ops->deserialize((void **)&pl_copy_ptr, serialize_data, visit_primitive_list, &error_abort); - i = 0; + + switch (pl_copy.type) { + case PTYPE_STRING: + cur_head = pl_copy.value.strings; + break; + case PTYPE_INTEGER: + cur_head = pl_copy.value.integers; + break; + case PTYPE_S8: + cur_head = pl_copy.value.s8_integers; + break; + case PTYPE_S16: + cur_head = pl_copy.value.s16_integers; + break; + case PTYPE_S32: + cur_head = pl_copy.value.s32_integers; + break; + case PTYPE_S64: + cur_head = pl_copy.value.s64_integers; + break; + case PTYPE_U8: + cur_head = pl_copy.value.u8_integers; + break; + case PTYPE_U16: + cur_head = pl_copy.value.u16_integers; + break; + case PTYPE_U32: + cur_head = pl_copy.value.u32_integers; + break; + case PTYPE_U64: + cur_head = pl_copy.value.u64_integers; + break; + case PTYPE_NUMBER: + cur_head = pl_copy.value.numbers; + break; + case PTYPE_BOOLEAN: + cur_head = pl_copy.value.booleans; + break; + default: + g_assert_not_reached(); + } /* compare our deserialized list of primitives to the original */ - do { + i = 0; + while (cur_head) { switch (pl_copy.type) { case PTYPE_STRING: { - strList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.strings; - } + strList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpstr(pt->value.string, ==, ptr->value); break; } case PTYPE_INTEGER: { - intList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.integers; - } + intList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.integer, ==, ptr->value); break; } case PTYPE_S8: { - int8List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s8_integers; - } + int8List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s8, ==, ptr->value); break; } case PTYPE_S16: { - int16List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s16_integers; - } + int16List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s16, ==, ptr->value); break; } case PTYPE_S32: { - int32List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s32_integers; - } + int32List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s32, ==, ptr->value); break; } case PTYPE_S64: { - int64List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s64_integers; - } + int64List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s64, ==, ptr->value); break; } case PTYPE_U8: { - uint8List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u8_integers; - } + uint8List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u8, ==, ptr->value); break; } case PTYPE_U16: { - uint16List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u16_integers; - } + uint16List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u16, ==, ptr->value); break; } case PTYPE_U32: { - uint32List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u32_integers; - } + uint32List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u32, ==, ptr->value); break; } case PTYPE_U64: { - uint64List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u64_integers; - } + uint64List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u64, ==, ptr->value); break; } case PTYPE_NUMBER: { - numberList *ptr; GString *double_expected = g_string_new(""); GString *double_actual = g_string_new(""); - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.numbers; - } + numberList *ptr = cur_head; + cur_head = ptr->next; /* we serialize with %f for our reference visitors, so rather than * fuzzy floating math to test "equality", just compare the * formatted values @@ -565,13 +550,8 @@ static void test_primitive_lists(gconstpointer opaque) break; } case PTYPE_BOOLEAN: { - boolList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.booleans; - } + boolList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(!!pt->value.boolean, ==, !!ptr->value); break; } @@ -579,9 +559,9 @@ static void test_primitive_lists(gconstpointer opaque) g_assert_not_reached(); } i++; - } while (cur_head); + } - g_assert_cmpint(i, ==, 33); + g_assert_cmpint(i, ==, 32); ops->cleanup(serialize_data); dealloc_helper(&pl, visit_primitive_list, &error_abort); diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 4688c03ea7..541bb4f63e 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -28,7 +28,6 @@ #include "migration/vmstate.h" #include "migration/qemu-file-types.h" #include "../migration/qemu-file.h" -#include "../migration/qemu-file-channel.h" #include "../migration/savevm.h" #include "qemu/coroutine.h" #include "qemu/module.h" @@ -52,9 +51,9 @@ static QEMUFile *open_test_file(bool write) } ioc = QIO_CHANNEL(qio_channel_file_new_fd(fd)); if (write) { - f = qemu_fopen_channel_output(ioc); + f = qemu_file_new_output(ioc); } else { - f = qemu_fopen_channel_input(ioc); + f = qemu_file_new_input(ioc); } object_unref(OBJECT(ioc)); return f; @@ -88,17 +87,16 @@ static void save_buffer(const uint8_t *buf, size_t buf_size) static void compare_vmstate(const uint8_t *wire, size_t size) { QEMUFile *f = open_test_file(false); - uint8_t result[size]; + g_autofree uint8_t *result = g_malloc(size); /* read back as binary */ - g_assert_cmpint(qemu_get_buffer(f, result, sizeof(result)), ==, - sizeof(result)); + g_assert_cmpint(qemu_get_buffer(f, result, size), ==, size); g_assert(!qemu_file_get_error(f)); /* Compare that what is on the file is the same that what we expected to be there */ - SUCCESS(memcmp(result, wire, sizeof(result))); + SUCCESS(memcmp(result, wire, size)); /* Must reach EOF */ qemu_get_byte(f); @@ -1002,22 +1000,22 @@ static TestGTreeDomain *create_first_domain(void) TestGTreeMapping *map_a, *map_b; TestGTreeInterval *a, *b; - domain = g_malloc0(sizeof(TestGTreeDomain)); + domain = g_new0(TestGTreeDomain, 1); domain->id = 6; - a = g_malloc0(sizeof(TestGTreeInterval)); + a = g_new0(TestGTreeInterval, 1); a->low = 0x1000; a->high = 0x1FFF; - b = g_malloc0(sizeof(TestGTreeInterval)); + b = g_new0(TestGTreeInterval, 1); b->low = 0x4000; b->high = 0x4FFF; - map_a = g_malloc0(sizeof(TestGTreeMapping)); + map_a = g_new0(TestGTreeMapping, 1); map_a->phys_addr = 0xa000; map_a->flags = 1; - map_b = g_malloc0(sizeof(TestGTreeMapping)); + map_b = g_new0(TestGTreeMapping, 1); map_b->phys_addr = 0xe0000; map_b->flags = 2; @@ -1120,7 +1118,7 @@ static void diff_iommu(TestGTreeIOMMU *iommu1, TestGTreeIOMMU *iommu2) static void test_gtree_load_domain(void) { - TestGTreeDomain *dest_domain = g_malloc0(sizeof(TestGTreeDomain)); + TestGTreeDomain *dest_domain = g_new0(TestGTreeDomain, 1); TestGTreeDomain *orig_domain = create_first_domain(); QEMUFile *fload, *fsave; char eof; @@ -1185,7 +1183,7 @@ uint8_t iommu_dump[] = { static TestGTreeIOMMU *create_iommu(void) { - TestGTreeIOMMU *iommu = g_malloc0(sizeof(TestGTreeIOMMU)); + TestGTreeIOMMU *iommu = g_new0(TestGTreeIOMMU, 1); TestGTreeDomain *first_domain = create_first_domain(); TestGTreeDomain *second_domain; TestGTreeMapping *map_c; @@ -1196,7 +1194,7 @@ static TestGTreeIOMMU *create_iommu(void) NULL, destroy_domain); - second_domain = g_malloc0(sizeof(TestGTreeDomain)); + second_domain = g_new0(TestGTreeDomain, 1); second_domain->id = 5; second_domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, NULL, @@ -1206,11 +1204,11 @@ static TestGTreeIOMMU *create_iommu(void) g_tree_insert(iommu->domains, GUINT_TO_POINTER(6), first_domain); g_tree_insert(iommu->domains, (gpointer)0x0000000000000005, second_domain); - c = g_malloc0(sizeof(TestGTreeInterval)); + c = g_new0(TestGTreeInterval, 1); c->low = 0x1000000; c->high = 0x1FFFFFF; - map_c = g_malloc0(sizeof(TestGTreeMapping)); + map_c = g_new0(TestGTreeMapping, 1); map_c->phys_addr = 0xF000000; map_c->flags = 0x3; @@ -1235,7 +1233,7 @@ static void test_gtree_save_iommu(void) static void test_gtree_load_iommu(void) { - TestGTreeIOMMU *dest_iommu = g_malloc0(sizeof(TestGTreeIOMMU)); + TestGTreeIOMMU *dest_iommu = g_new0(TestGTreeIOMMU, 1); TestGTreeIOMMU *orig_iommu = create_iommu(); QEMUFile *fsave, *fload; char eof; @@ -1274,11 +1272,11 @@ static uint8_t qlist_dump[] = { static TestQListContainer *alloc_container(void) { - TestQListElement *a = g_malloc(sizeof(TestQListElement)); - TestQListElement *b = g_malloc(sizeof(TestQListElement)); - TestQListElement *c = g_malloc(sizeof(TestQListElement)); - TestQListElement *d = g_malloc(sizeof(TestQListElement)); - TestQListContainer *container = g_malloc(sizeof(TestQListContainer)); + TestQListElement *a = g_new(TestQListElement, 1); + TestQListElement *b = g_new(TestQListElement, 1); + TestQListElement *c = g_new(TestQListElement, 1); + TestQListElement *d = g_new(TestQListElement, 1); + TestQListContainer *container = g_new(TestQListContainer, 1); a->id = 0x0a; b->id = 0x0b00; @@ -1332,11 +1330,11 @@ static void manipulate_container(TestQListContainer *c) TestQListElement *prev = NULL, *iter = QLIST_FIRST(&c->list); TestQListElement *elem; - elem = g_malloc(sizeof(TestQListElement)); + elem = g_new(TestQListElement, 1); elem->id = 0x12; QLIST_INSERT_AFTER(iter, elem, next); - elem = g_malloc(sizeof(TestQListElement)); + elem = g_new(TestQListElement, 1); elem->id = 0x13; QLIST_INSERT_HEAD(&c->list, elem, next); @@ -1345,11 +1343,11 @@ static void manipulate_container(TestQListContainer *c) iter = QLIST_NEXT(iter, next); } - elem = g_malloc(sizeof(TestQListElement)); + elem = g_new(TestQListElement, 1); elem->id = 0x14; QLIST_INSERT_BEFORE(prev, elem, next); - elem = g_malloc(sizeof(TestQListElement)); + elem = g_new(TestQListElement, 1); elem->id = 0x15; QLIST_INSERT_AFTER(prev, elem, next); @@ -1370,7 +1368,7 @@ static void test_load_qlist(void) { QEMUFile *fsave, *fload; TestQListContainer *orig_container = alloc_container(); - TestQListContainer *dest_container = g_malloc0(sizeof(TestQListContainer)); + TestQListContainer *dest_container = g_new0(TestQListContainer, 1); char eof; QLIST_INIT(&dest_container->list); diff --git a/tests/unit/test-xbzrle.c b/tests/unit/test-xbzrle.c index 795d6f1cba..ef951b6e54 100644 --- a/tests/unit/test-xbzrle.c +++ b/tests/unit/test-xbzrle.c @@ -11,7 +11,6 @@ * */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "../migration/xbzrle.h" diff --git a/tests/unit/test-yank.c b/tests/unit/test-yank.c index 2383d2908c..e6c036a64d 100644 --- a/tests/unit/test-yank.c +++ b/tests/unit/test-yank.c @@ -88,7 +88,7 @@ static void char_change_test(gconstpointer opaque) .type = CHARDEV_BACKEND_KIND_SOCKET, .u.socket.data = &(ChardevSocket) { .addr = &(SocketAddressLegacy) { - .type = SOCKET_ADDRESS_LEGACY_KIND_INET, + .type = SOCKET_ADDRESS_TYPE_INET, .u.inet.data = &addr->u.inet }, .has_server = true, @@ -102,7 +102,7 @@ static void char_change_test(gconstpointer opaque) .type = CHARDEV_BACKEND_KIND_UDP, .u.udp.data = &(ChardevUdp) { .remote = &(SocketAddressLegacy) { - .type = SOCKET_ADDRESS_LEGACY_KIND_UNIX, + .type = SOCKET_ADDRESS_TYPE_UNIX, .u.q_unix.data = &(UnixSocketAddress) { .path = (char *)"" } @@ -114,7 +114,7 @@ static void char_change_test(gconstpointer opaque) .type = CHARDEV_BACKEND_KIND_SOCKET, .u.socket.data = &(ChardevSocket) { .addr = &(SocketAddressLegacy) { - .type = SOCKET_ADDRESS_LEGACY_KIND_INET, + .type = SOCKET_ADDRESS_TYPE_INET, .u.inet.data = &(InetSocketAddress) { .host = (char *)"127.0.0.1", .port = (char *)"0" diff --git a/tests/vhost-user-bridge.c b/tests/vhost-user-bridge.c index 24815920b2..fecdf915e7 100644 --- a/tests/vhost-user-bridge.c +++ b/tests/vhost-user-bridge.c @@ -468,8 +468,8 @@ vubr_queue_set_started(VuDev *dev, int qidx, bool started) if (started && vubr->notifier.fd >= 0) { vu_set_queue_host_notifier(dev, vq, vubr->notifier.fd, - qemu_real_host_page_size, - qidx * qemu_real_host_page_size); + qemu_real_host_page_size(), + qidx * qemu_real_host_page_size()); } if (qidx % 2 == 1) { @@ -540,6 +540,11 @@ vubr_new(const char *path, bool client) CallbackFunc cb; size_t len; + if (strlen(path) >= sizeof(un.sun_path)) { + fprintf(stderr, "unix domain socket path '%s' is too long\n", path); + exit(1); + } + /* Get a UNIX socket. */ dev->sock = socket(AF_UNIX, SOCK_STREAM, 0); if (dev->sock == -1) { @@ -596,7 +601,7 @@ static void *notifier_thread(void *arg) { VuDev *dev = (VuDev *)arg; VubrDev *vubr = container_of(dev, VubrDev, vudev); - int pagesize = qemu_real_host_page_size; + int pagesize = qemu_real_host_page_size(); int qidx; while (true) { @@ -626,15 +631,14 @@ static void *notifier_thread(void *arg) static void vubr_host_notifier_setup(VubrDev *dev) { - char template[] = "/tmp/vubr-XXXXXX"; pthread_t thread; size_t length; void *addr; int fd; - length = qemu_real_host_page_size * VHOST_USER_BRIDGE_MAX_QUEUES; + length = qemu_real_host_page_size() * VHOST_USER_BRIDGE_MAX_QUEUES; - fd = mkstemp(template); + fd = g_file_open_tmp("vubr-XXXXXX", NULL, NULL); if (fd < 0) { vubr_die("mkstemp()"); } @@ -826,7 +830,7 @@ main(int argc, char *argv[]) out: fprintf(stderr, "Usage: %s ", argv[0]); fprintf(stderr, "[-c] [-H] [-u ud_socket_path] [-l lhost:lport] [-r rhost:rport]\n"); - fprintf(stderr, "\t-u path to unix doman socket. default: %s\n", + fprintf(stderr, "\t-u path to unix domain socket. default: %s\n", DEFAULT_UD_SOCKET); fprintf(stderr, "\t-l local host and port. default: %s:%s\n", DEFAULT_LHOST, DEFAULT_LPORT); diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include index f3a3a1c751..2cc2203d09 100644 --- a/tests/vm/Makefile.include +++ b/tests/vm/Makefile.include @@ -1,17 +1,34 @@ # Makefile for VM tests +# Hack to allow running in an unconfigured build tree +ifeq ($(wildcard $(SRC_PATH)/config-host.mak),) +VM_PYTHON = PYTHONPATH=$(SRC_PATH)/python /usr/bin/env python3 +VM_VENV = +HOST_ARCH := $(shell uname -m) +else +VM_PYTHON = $(TESTS_PYTHON) +VM_VENV = check-venv +HOST_ARCH = $(ARCH) +endif + .PHONY: vm-build-all vm-clean-all EFI_AARCH64 = $(wildcard $(BUILD_DIR)/pc-bios/edk2-aarch64-code.fd) -IMAGES := freebsd netbsd openbsd centos fedora haiku.x86_64 +X86_IMAGES := freebsd netbsd openbsd haiku.x86_64 ifneq ($(GENISOIMAGE),) -IMAGES += ubuntu.i386 centos +X86_IMAGES += centos ifneq ($(EFI_AARCH64),) -IMAGES += ubuntu.aarch64 centos.aarch64 +ARM64_IMAGES += ubuntu.aarch64 centos.aarch64 endif endif +ifeq ($(HOST_ARCH),x86_64) +IMAGES=$(X86_IMAGES) $(if $(USE_TCG),$(ARM64_IMAGES)) +else ifeq ($(HOST_ARCH),aarch64) +IMAGES=$(ARM64_IMAGES) $(if $(USE_TCG),$(X86_IMAGES)) +endif + IMAGES_DIR := $(HOME)/.cache/qemu-vm/images IMAGE_FILES := $(patsubst %, $(IMAGES_DIR)/%.img, $(IMAGES)) @@ -28,10 +45,8 @@ vm-help vm-test: @echo " vm-build-freebsd - Build QEMU in FreeBSD VM" @echo " vm-build-netbsd - Build QEMU in NetBSD VM" @echo " vm-build-openbsd - Build QEMU in OpenBSD VM" - @echo " vm-build-fedora - Build QEMU in Fedora VM" ifneq ($(GENISOIMAGE),) @echo " vm-build-centos - Build QEMU in CentOS VM, with Docker" - @echo " vm-build-ubuntu.i386 - Build QEMU in ubuntu i386 VM" ifneq ($(EFI_AARCH64),) @echo " vm-build-ubuntu.aarch64 - Build QEMU in ubuntu aarch64 VM" @echo " vm-build-centos.aarch64 - Build QEMU in CentOS aarch64 VM" @@ -43,7 +58,7 @@ else endif @echo " vm-build-haiku.x86_64 - Build QEMU in Haiku VM" @echo "" - @echo " vm-build-all - Build QEMU in all VMs" + @echo " vm-build-all - Build QEMU in: $(IMAGES)" @echo " vm-clean-all - Clean up VM images" @echo @echo "For trouble-shooting:" @@ -52,21 +67,22 @@ endif @echo @echo "Special variables:" @echo " BUILD_TARGET=foo - Override the build target" - @echo " TARGET_LIST=a,b,c - Override target list in builds" - @echo ' EXTRA_CONFIGURE_OPTS="..."' - @echo " J=[0..9]* - Override the -jN parameter for make commands" @echo " DEBUG=1 - Enable verbose output on host and interactive debugging" + @echo ' EXTRA_CONFIGURE_OPTS="..." - Pass to configure step' + @echo " J=[0..9]* - Override the -jN parameter for make commands" @echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm " - @echo " V=1 - Enable verbose ouput on host and guest commands" - @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build." + @echo " USE_TCG=1 - Use TCG for cross-arch images" @echo " QEMU=/path/to/qemu - Change path to QEMU binary" - @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool" ifeq ($(HAVE_PYTHON_YAML),yes) @echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file." else @echo " (install python3-yaml to enable support for yaml file to configure a VM.)" endif @echo " See conf_example_*.yml for file format details." + @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool" + @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build." + @echo " TARGET_LIST=a,b,c - Override target list in builds" + @echo " V=1 - Enable verbose ouput on host and guest commands" vm-build-all: $(addprefix vm-build-, $(IMAGES)) @@ -75,10 +91,11 @@ vm-clean-all: $(IMAGES_DIR)/%.img: $(SRC_PATH)/tests/vm/% \ $(SRC_PATH)/tests/vm/basevm.py \ - $(SRC_PATH)/tests/vm/Makefile.include + $(SRC_PATH)/tests/vm/Makefile.include \ + $(VM_VENV) @mkdir -p $(IMAGES_DIR) $(call quiet-command, \ - $(PYTHON) $< \ + $(VM_PYTHON) $< \ $(if $(V)$(DEBUG), --debug) \ $(if $(GENISOIMAGE),--genisoimage $(GENISOIMAGE)) \ $(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \ @@ -90,11 +107,10 @@ $(IMAGES_DIR)/%.img: $(SRC_PATH)/tests/vm/% \ --build-image $@, \ " VM-IMAGE $*") - # Build in VM $(IMAGE) -vm-build-%: $(IMAGES_DIR)/%.img +vm-build-%: $(IMAGES_DIR)/%.img $(VM_VENV) $(call quiet-command, \ - $(PYTHON) $(SRC_PATH)/tests/vm/$* \ + $(VM_PYTHON) $(SRC_PATH)/tests/vm/$* \ $(if $(V)$(DEBUG), --debug) \ $(if $(DEBUG), --interactive) \ $(if $(J),--jobs $(J)) \ @@ -118,9 +134,9 @@ vm-boot-serial-%: $(IMAGES_DIR)/%.img -device virtio-net-pci,netdev=vnet \ || true -vm-boot-ssh-%: $(IMAGES_DIR)/%.img +vm-boot-ssh-%: $(IMAGES_DIR)/%.img $(VM_VENV) $(call quiet-command, \ - $(PYTHON) $(SRC_PATH)/tests/vm/$* \ + $(VM_PYTHON) $(SRC_PATH)/tests/vm/$* \ $(if $(J),--jobs $(J)) \ $(if $(V)$(DEBUG), --debug) \ $(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \ diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index 254e11c932..2276364c42 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -18,9 +18,6 @@ import socket import logging import time import datetime -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.machine import QEMUMachine -from qemu.utils import get_info_usernet_hostfwd_port, kvm_available import subprocess import hashlib import argparse @@ -31,6 +28,9 @@ import multiprocessing import traceback import shlex +from qemu.machine import QEMUMachine +from qemu.utils import get_info_usernet_hostfwd_port, kvm_available + SSH_KEY_FILE = os.path.join(os.path.dirname(__file__), "..", "keys", "id_rsa") SSH_PUB_KEY_FILE = os.path.join(os.path.dirname(__file__), @@ -99,6 +99,11 @@ class BaseVM(object): self._source_path = args.source_path # Allow input config to override defaults. self._config = DEFAULT_CONFIG.copy() + + # 1GB per core, minimum of 4. This is only a default. + mem = max(4, args.jobs) + self._config['memory'] = f"{mem}G" + if config != None: self._config.update(config) self.validate_ssh_keys() @@ -228,7 +233,8 @@ class BaseVM(object): "-o", "UserKnownHostsFile=" + os.devnull, "-o", "ConnectTimeout={}".format(self._config["ssh_timeout"]), - "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file] + "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file, + "-o", "IdentitiesOnly=yes"] # If not in debug mode, set ssh to quiet mode to # avoid printing the results of commands. if not self.debug: diff --git a/tests/vm/centos b/tests/vm/centos index 5c7bc1c1a9..097a9ca14d 100755 --- a/tests/vm/centos +++ b/tests/vm/centos @@ -1,8 +1,8 @@ #!/usr/bin/env python3 # -# CentOS image +# CentOS 8 Stream image # -# Copyright 2018 Red Hat Inc. +# Copyright 2018, 2022 Red Hat Inc. # # Authors: # Fam Zheng @@ -28,13 +28,12 @@ class CentosVM(basevm.BaseVM): tar -xf $SRC_ARCHIVE; make docker-test-block@centos8 {verbose} J={jobs} NETWORK=1; make docker-test-quick@centos8 {verbose} J={jobs} NETWORK=1; - make docker-test-mingw@fedora {verbose} J={jobs} NETWORK=1; """ def build_image(self, img): - cimg = self._download_with_cache("https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.3.2011-20201204.2.x86_64.qcow2") + cimg = self._download_with_cache("https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20220125.1.x86_64.qcow2") img_tmp = img + ".tmp" - subprocess.check_call(["ln", "-f", cimg, img_tmp]) + subprocess.check_call(['cp', '-f', cimg, img_tmp]) self.exec_qemu_img("resize", img_tmp, "50G") self.boot(img_tmp, extra_args = ["-cdrom", self.gen_cloud_init_iso()]) self.wait_ssh() diff --git a/tests/vm/centos.aarch64 b/tests/vm/centos.aarch64 index 81c3004c3c..2de7ef6992 100755 --- a/tests/vm/centos.aarch64 +++ b/tests/vm/centos.aarch64 @@ -20,151 +20,38 @@ import time import traceback import aarch64vm + DEFAULT_CONFIG = { 'cpu' : "max", 'machine' : "virt,gic-version=max", - 'install_cmds' : "yum install -y make ninja-build git python3 gcc gcc-c++ flex bison, "\ - "yum install -y glib2-devel pixman-devel zlib-devel, "\ - "yum install -y perl-Test-Harness, "\ - "alternatives --set python /usr/bin/python3, "\ - "sudo dnf config-manager "\ - "--add-repo=https://download.docker.com/linux/centos/docker-ce.repo,"\ - "sudo dnf install --nobest -y docker-ce.aarch64,"\ - "systemctl enable docker", + 'install_cmds' : ( + "dnf config-manager --set-enabled powertools, " + "dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo, " + "dnf install -y make ninja-build git python38 gcc gcc-c++ flex bison "\ + "glib2-devel perl pixman-devel zlib-devel docker-ce.aarch64, " + "systemctl enable docker, " + ), # We increase beyond the default time since during boot # it can take some time (many seconds) to log into the VM. 'ssh_timeout' : 60, } + class CentosAarch64VM(basevm.BaseVM): - name = "centos.aarch64" + name = "centos8.aarch64" arch = "aarch64" - login_prompt = "localhost login:" - prompt = '[root@localhost ~]#' - image_name = "CentOS-8-aarch64-1905-dvd1.iso" - image_link = "http://mirrors.usc.edu/pub/linux/distributions/centos/8.0.1905/isos/aarch64/" + image_name = "CentOS-Stream-GenericCloud-8-20220125.1.aarch64.qcow2" + image_link = "https://cloud.centos.org/centos/8-stream/aarch64/images/" image_link += image_name BUILD_SCRIPT = """ set -e; cd $(mktemp -d); - sudo chmod a+r /dev/vdb; - tar --checkpoint=.10 -xf /dev/vdb; + export SRC_ARCHIVE=/dev/vdb; + sudo chmod a+r $SRC_ARCHIVE; + tar -xf $SRC_ARCHIVE; ./configure {configure_opts}; make --output-sync {target} -j{jobs} {verbose}; """ - def set_key_perm(self): - """Set permissions properly on certain files to allow - ssh access.""" - self.console_wait_send(self.prompt, - "/usr/sbin/restorecon -R -v /root/.ssh\n") - self.console_wait_send(self.prompt, - "/usr/sbin/restorecon -R -v "\ - "/home/{}/.ssh\n".format(self._config["guest_user"])) - - def create_kickstart(self): - """Generate the kickstart file used to generate the centos image.""" - # Start with the template for the kickstart. - ks_file = self._source_path + "/tests/vm/centos-8-aarch64.ks" - subprocess.check_call("cp {} ./ks.cfg".format(ks_file), shell=True) - # Append the ssh keys to the kickstart file - # as the post processing phase of installation. - with open("ks.cfg", "a") as f: - # Add in the root pw and guest user. - rootpw = "rootpw --plaintext {}\n" - f.write(rootpw.format(self._config["root_pass"])) - add_user = "user --groups=wheel --name={} "\ - "--password={} --plaintext\n" - f.write(add_user.format(self._config["guest_user"], - self._config["guest_pass"])) - # Add the ssh keys. - f.write("%post --log=/root/ks-post.log\n") - f.write("mkdir -p /root/.ssh\n") - addkey = 'echo "{}" >> /root/.ssh/authorized_keys\n' - addkey_cmd = addkey.format(self._config["ssh_pub_key"]) - f.write(addkey_cmd) - f.write('mkdir -p /home/{}/.ssh\n'.format(self._config["guest_user"])) - addkey = 'echo "{}" >> /home/{}/.ssh/authorized_keys\n' - addkey_cmd = addkey.format(self._config["ssh_pub_key"], - self._config["guest_user"]) - f.write(addkey_cmd) - f.write("%end\n") - # Take our kickstart file and create an .iso from it. - # The .iso will be provided to qemu as we boot - # from the install dvd. - # Anaconda will recognize the label "OEMDRV" and will - # start the automated installation. - gen_iso_img = 'genisoimage -output ks.iso -volid "OEMDRV" ks.cfg' - subprocess.check_call(gen_iso_img, shell=True) - - def wait_for_shutdown(self): - """We wait for qemu to shutdown the VM and exit. - While this happens we display the console view - for easier debugging.""" - # The image creation is essentially done, - # so whether or not the wait is successful we want to - # wait for qemu to exit (the self.wait()) before we return. - try: - self.console_wait("reboot: Power down") - except Exception as e: - sys.stderr.write("Exception hit\n") - if isinstance(e, SystemExit) and e.code == 0: - return 0 - traceback.print_exc() - finally: - self.wait() - - def build_base_image(self, dest_img): - """Run through the centos installer to create - a base image with name dest_img.""" - # We create the temp image, and only rename - # to destination when we are done. - img = dest_img + ".tmp" - # Create an empty image. - # We will provide this as the install destination. - qemu_img_create = "qemu-img create {} 50G".format(img) - subprocess.check_call(qemu_img_create, shell=True) - - # Create our kickstart file to be fed to the installer. - self.create_kickstart() - # Boot the install dvd with the params as our ks.iso - os_img = self._download_with_cache(self.image_link) - dvd_iso = "centos-8-dvd.iso" - subprocess.check_call(["cp", "-f", os_img, dvd_iso]) - extra_args = "-cdrom ks.iso" - extra_args += " -drive file={},if=none,id=drive1,cache=writeback" - extra_args += " -device virtio-blk,drive=drive1,bootindex=1" - extra_args = extra_args.format(dvd_iso).split(" ") - self.boot(img, extra_args=extra_args) - self.console_wait_send("change the selection", "\n") - # We seem to need to hit esc (chr(27)) twice to abort the - # media check, which takes a long time. - # Waiting a bit seems to be more reliable before hitting esc. - self.console_wait("Checking") - time.sleep(5) - self.console_wait_send("Checking", chr(27)) - time.sleep(5) - self.console_wait_send("Checking", chr(27)) - print("Found Checking") - # Give sufficient time for the installer to create the image. - self.console_init(timeout=7200) - self.wait_for_shutdown() - os.rename(img, dest_img) - print("Done with base image build: {}".format(dest_img)) - - def check_create_base_img(self, img_base, img_dest): - """Create a base image using the installer. - We will use the base image if it exists. - This helps cut down on install time in case we - need to restart image creation, - since the base image creation can take a long time.""" - if not os.path.exists(img_base): - print("Generate new base image: {}".format(img_base)) - self.build_base_image(img_base); - else: - print("Use existing base image: {}".format(img_base)) - # Save a copy of the base image and copy it to dest. - # which we will use going forward. - subprocess.check_call(["cp", img_base, img_dest]) def boot(self, img, extra_args=None): aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64) @@ -186,42 +73,28 @@ class CentosAarch64VM(basevm.BaseVM): super(CentosAarch64VM, self).boot(img, extra_args=extra_args) def build_image(self, img): + cimg = self._download_with_cache(self.image_link) img_tmp = img + ".tmp" - self.check_create_base_img(img + ".base", img_tmp) - - # Boot the new image for the first time to finish installation. - self.boot(img_tmp) - self.console_init() - self.console_wait_send(self.login_prompt, "root\n") - self.console_wait_send("Password:", - "{}\n".format(self._config["root_pass"])) - - self.set_key_perm() - self.console_wait_send(self.prompt, "rpm -q centos-release\n") - enable_adapter = "sed -i 's/ONBOOT=no/ONBOOT=yes/g'" \ - " /etc/sysconfig/network-scripts/ifcfg-enp0s1\n" - self.console_wait_send(self.prompt, enable_adapter) - self.console_wait_send(self.prompt, "ifup enp0s1\n") - self.console_wait_send(self.prompt, - 'echo "qemu ALL=(ALL) NOPASSWD:ALL" | '\ - 'sudo tee /etc/sudoers.d/qemu\n') - self.console_wait(self.prompt) - - # Rest of the commands we issue through ssh. + subprocess.run(['cp', '-f', cimg, img_tmp]) + self.exec_qemu_img("resize", img_tmp, "50G") + self.boot(img_tmp, extra_args = ["-cdrom", self.gen_cloud_init_iso()]) self.wait_ssh(wait_root=True) + self.ssh_root_check("touch /etc/cloud/cloud-init.disabled") # If the user chooses *not* to do the second phase, # then we will jump right to the graceful shutdown if self._config['install_cmds'] != "": install_cmds = self._config['install_cmds'].split(',') for cmd in install_cmds: - self.ssh_root(cmd) + self.ssh_root_check(cmd) + self.ssh_root("poweroff") - self.wait_for_shutdown() + self.wait() os.rename(img_tmp, img) print("image creation complete: {}".format(img)) return 0 + if __name__ == "__main__": defaults = aarch64vm.get_config_defaults(CentosAarch64VM, DEFAULT_CONFIG) sys.exit(basevm.main(CentosAarch64VM, defaults)) diff --git a/tests/vm/fedora b/tests/vm/fedora deleted file mode 100755 index b977efe4a2..0000000000 --- a/tests/vm/fedora +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env python3 -# -# Fedora VM image -# -# Copyright 2019 Red Hat Inc. -# -# Authors: -# Gerd Hoffmann -# -# This code is licensed under the GPL version 2 or later. See -# the COPYING file in the top-level directory. -# - -import os -import re -import sys -import time -import socket -import subprocess -import basevm - -class FedoraVM(basevm.BaseVM): - name = "fedora" - arch = "x86_64" - - base = "https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/30/" - link = base + "Server/x86_64/iso/Fedora-Server-netinst-x86_64-30-1.2.iso" - repo = base + "Server/x86_64/os/" - full = base + "Everything/x86_64/os/" - csum = "5e4eac4566d8c572bfb3bcf54b7d6c82006ec3c6c882a2c9235c6d3494d7b100" - size = "20G" - pkgs = [ - # tools - 'git-core', - 'gcc', 'binutils', 'make', 'ninja-build', - - # perl - 'perl-Test-Harness', - - # libs: usb - '"pkgconfig(libusb-1.0)"', - '"pkgconfig(libusbredirparser-0.5)"', - - # libs: crypto - '"pkgconfig(gnutls)"', - - # libs: ui - '"pkgconfig(sdl2)"', - '"pkgconfig(gtk+-3.0)"', - '"pkgconfig(ncursesw)"', - - # libs: audio - '"pkgconfig(libpulse)"', - '"pkgconfig(alsa)"', - - # libs: migration - '"pkgconfig(libzstd)"', -] - - BUILD_SCRIPT = """ - set -e; - rm -rf /home/qemu/qemu-test.* - cd $(mktemp -d /home/qemu/qemu-test.XXXXXX); - mkdir src build; cd src; - tar -xf /dev/vdb; - cd ../build - ../src/configure --python=python3 {configure_opts}; - gmake --output-sync -j{jobs} {target} {verbose}; - """ - - def build_image(self, img): - self.print_step("Downloading install iso") - cimg = self._download_with_cache(self.link, sha256sum=self.csum) - img_tmp = img + ".tmp" - iso = img + ".install.iso" - - self.print_step("Preparing iso and disk image") - subprocess.check_call(["cp", "-f", cimg, iso]) - self.exec_qemu_img("create", "-f", "qcow2", img_tmp, self.size) - self.print_step("Booting installer") - self.boot(img_tmp, extra_args = [ - "-bios", "pc-bios/bios-256k.bin", - "-machine", "graphics=off", - "-device", "VGA", - "-cdrom", iso - ]) - self.console_init(300) - self.console_wait("installation process.") - time.sleep(0.3) - self.console_send("\t") - time.sleep(0.3) - self.console_send(" console=ttyS0") - proxy = os.environ.get("http_proxy") - if not proxy is None: - self.console_send(" proxy=%s" % proxy) - self.console_send(" inst.proxy=%s" % proxy) - self.console_send(" inst.repo=%s" % self.repo) - self.console_send("\n") - - self.console_wait_send("2) Use text mode", "2\n") - - self.console_wait_send("5) [!] Installation Dest", "5\n") - self.console_wait_send("1) [x]", "c\n") - self.console_wait_send("2) [ ] Use All Space", "2\n") - self.console_wait_send("2) [x] Use All Space", "c\n") - self.console_wait_send("1) [ ] Standard Part", "1\n") - self.console_wait_send("1) [x] Standard Part", "c\n") - - self.console_wait_send("7) [!] Root password", "7\n") - self.console_wait("Password:") - self.console_send("%s\n" % self._config["root_pass"]) - self.console_wait("Password (confirm):") - self.console_send("%s\n" % self._config["root_pass"]) - - self.console_wait_send("8) [ ] User creation", "8\n") - self.console_wait_send("1) [ ] Create user", "1\n") - self.console_wait_send("3) User name", "3\n") - self.console_wait_send("ENTER:", "%s\n" % self._config["guest_user"]) - self.console_wait_send("4) [ ] Use password", "4\n") - self.console_wait_send("5) Password", "5\n") - self.console_wait("Password:") - self.console_send("%s\n" % self._config["guest_pass"]) - self.console_wait("Password (confirm):") - self.console_send("%s\n" % self._config["guest_pass"]) - self.console_wait_send("7) Groups", "c\n") - - while True: - good = self.console_wait("3) [x] Installation", - "3) [!] Installation") - self.console_send("r\n") - if good: - break - time.sleep(10) - - while True: - good = self.console_wait("4) [x] Software", - "4) [!] Software") - self.console_send("r\n") - if good: - break - time.sleep(10) - self.console_send("r\n" % self._config["guest_pass"]) - - self.console_wait_send("'b' to begin install", "b\n") - - self.print_step("Installation started now, this will take a while") - - self.console_wait_send("Installation complete", "\n") - self.print_step("Installation finished, rebooting") - - # setup qemu user - prompt = " ~]$" - self.console_ssh_init(prompt, self._config["guest_user"], - self._config["guest_pass"]) - self.console_wait_send(prompt, "exit\n") - - # setup root user - prompt = " ~]#" - self.console_ssh_init(prompt, "root", self._config["root_pass"]) - self.console_sshd_config(prompt) - - # setup virtio-blk #1 (tarfile) - self.console_wait(prompt) - self.console_send("echo 'KERNEL==\"vdb\" MODE=\"666\"' >> %s\n" % - "/etc/udev/rules.d/99-qemu.rules") - - self.print_step("Configuration finished, rebooting") - self.console_wait_send(prompt, "reboot\n") - self.console_wait("login:") - self.wait_ssh() - - self.print_step("Installing packages") - self.ssh_root_check("rm -vf /etc/yum.repos.d/fedora*.repo\n") - self.ssh_root_check("echo '[fedora]' >> /etc/yum.repos.d/qemu.repo\n") - self.ssh_root_check("echo 'baseurl=%s' >> /etc/yum.repos.d/qemu.repo\n" % self.full) - self.ssh_root_check("echo 'gpgcheck=0' >> /etc/yum.repos.d/qemu.repo\n") - self.ssh_root_check("dnf install -y %s\n" % " ".join(self.pkgs)) - - # shutdown - self.ssh_root(self.poweroff) - self.console_wait("sleep state S5") - self.wait() - - if os.path.exists(img): - os.remove(img) - os.rename(img_tmp, img) - os.remove(iso) - self.print_step("All done") - -if __name__ == "__main__": - sys.exit(basevm.main(FedoraVM)) diff --git a/tests/vm/freebsd b/tests/vm/freebsd index 6e20e84322..d6ff4461ba 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -28,8 +28,8 @@ class FreeBSDVM(basevm.BaseVM): name = "freebsd" arch = "x86_64" - link = "https://download.freebsd.org/ftp/releases/ISO-IMAGES/12.2/FreeBSD-12.2-RELEASE-amd64-disc1.iso.xz" - csum = "a4530246cafbf1dd42a9bd3ea441ca9a78a6a0cd070278cbdf63f3a6f803ecae" + link = "https://download.freebsd.org/ftp/releases/ISO-IMAGES/12.3/FreeBSD-12.3-RELEASE-amd64-disc1.iso.xz" + csum = "36dd0de50f1fe5f0a88e181e94657656de26fb64254412f74e80e128e8b938b4" size = "20G" pkgs = [ # build tools @@ -63,10 +63,14 @@ class FreeBSDVM(basevm.BaseVM): # libs: migration "zstd", + + # libs: networking + "libslirp", + + # libs: sndio + "sndio", ] - # TODO: Enable gnutls again once FreeBSD's libtasn1 got fixed - # See: https://gitlab.com/gnutls/libtasn1/-/merge_requests/71 BUILD_SCRIPT = """ set -e; rm -rf /home/qemu/qemu-test.* @@ -74,7 +78,7 @@ class FreeBSDVM(basevm.BaseVM): mkdir src build; cd src; tar -xf /dev/vtbd1; cd ../build - ../src/configure --python=python3.7 --disable-gnutls {configure_opts}; + ../src/configure --python=python3.7 {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ @@ -97,7 +101,6 @@ class FreeBSDVM(basevm.BaseVM): self.print_step("Booting installer") self.boot(img_tmp, extra_args = [ - "-bios", "pc-bios/bios-256k.bin", "-machine", "graphics=off", "-device", "VGA", "-cdrom", iso diff --git a/tests/vm/haiku.x86_64 b/tests/vm/haiku.x86_64 index 2eb736dae1..29668bc272 100755 --- a/tests/vm/haiku.x86_64 +++ b/tests/vm/haiku.x86_64 @@ -2,7 +2,7 @@ # # Haiku VM image # -# Copyright 2020 Haiku, Inc. +# Copyright 2020-2022 Haiku, Inc. # # Authors: # Alexander von Gluck IV @@ -48,8 +48,8 @@ class HaikuVM(basevm.BaseVM): name = "haiku" arch = "x86_64" - link = "https://app.vagrantup.com/haiku-os/boxes/r1beta2-x86_64/versions/20200702/providers/libvirt.box" - csum = "41c38b316e0cbdbc66b5dbaf3612b866700a4f35807cb1eb266a5bf83e9e68d5" + link = "https://app.vagrantup.com/haiku-os/boxes/r1beta3-x86_64/versions/20220216/providers/libvirt.box" + csum = "e67d4aacbcc687013d5cc91990ddd86cc5d70a5d28432ae2691944f8ce5d5041" poweroff = "shutdown" @@ -71,6 +71,7 @@ class HaikuVM(basevm.BaseVM): "devel:libpixman_1", "devel:libpng16", "devel:libsdl2_2.0", + "devel:libslirp", "devel:libsnappy", "devel:libssh2", "devel:libtasn1", @@ -89,7 +90,7 @@ class HaikuVM(basevm.BaseVM): mkdir -p /usr/bin ln -s /boot/system/bin/env /usr/bin/env cd ../build - ../src/configure --disable-slirp {configure_opts}; + ../src/configure {configure_opts}; make --output-sync -j{jobs} {target} {verbose}; """ @@ -99,7 +100,7 @@ class HaikuVM(basevm.BaseVM): self.print_step("Extracting disk image") - subprocess.check_call(["tar", "xzf", tarball, "./box.img", "-O"], + subprocess.check_call(["tar", "xzf", tarball, "box.img", "-O"], stdout=open(img, 'wb')) self.print_step("Preparing disk image") diff --git a/tests/vm/netbsd b/tests/vm/netbsd index 4cc58df130..aa54338dfa 100755 --- a/tests/vm/netbsd +++ b/tests/vm/netbsd @@ -22,8 +22,8 @@ class NetBSDVM(basevm.BaseVM): name = "netbsd" arch = "x86_64" - link = "https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.2/images/NetBSD-9.2-amd64.iso" - csum = "5ee0ea101f73386b9b424f5d1041e371db3c42fdd6f4e4518dc79c4a08f31d43091ebe93425c9f0dcaaed2b51131836fe6774f33f89030b58d64709b35fda72f" + link = "https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.3/images/NetBSD-9.3-amd64.iso" + csum = "2bfce544f762a579f61478e7106c436fc48731ff25cf6f79b392ba5752e6f5ec130364286f7471716290a5f033637cf56aacee7fedb91095face59adf36300c3" size = "20G" pkgs = [ # tools @@ -46,13 +46,17 @@ class NetBSDVM(basevm.BaseVM): "jpeg", "png", - # libs: ui + # libs: ui + "capstone", "SDL2", "gtk3+", "libxkbcommon", # libs: migration "zstd", + + # libs: networking + "libslirp", ] BUILD_SCRIPT = """ @@ -85,7 +89,6 @@ class NetBSDVM(basevm.BaseVM): self.print_step("Booting installer") self.boot(img_tmp, extra_args = [ - "-bios", "pc-bios/bios-256k.bin", "-machine", "graphics=off", "-cdrom", iso ]) diff --git a/tests/vm/openbsd b/tests/vm/openbsd index c4c78a80f1..eaeb201e91 100755 --- a/tests/vm/openbsd +++ b/tests/vm/openbsd @@ -22,8 +22,8 @@ class OpenBSDVM(basevm.BaseVM): name = "openbsd" arch = "x86_64" - link = "https://cdn.openbsd.org/pub/OpenBSD/6.9/amd64/install69.iso" - csum = "140d26548aec680e34bb5f82295414228e7f61e4f5e7951af066014fda2d6e43" + link = "https://cdn.openbsd.org/pub/OpenBSD/7.2/amd64/install72.iso" + csum = "0369ef40a3329efcb978c578c7fdc7bda71e502aecec930a74b44160928c91d3" size = "20G" pkgs = [ # tools @@ -48,13 +48,17 @@ class OpenBSDVM(basevm.BaseVM): "jpeg", "png", - # libs: ui + # libs: ui + "capstone", "sdl2", "gtk+3", "libxkbcommon", # libs: migration "zstd", + + # libs: networking + "libslirp", ] BUILD_SCRIPT = """ @@ -81,7 +85,6 @@ class OpenBSDVM(basevm.BaseVM): self.print_step("Booting installer") self.boot(img_tmp, extra_args = [ - "-bios", "pc-bios/bios-256k.bin", "-machine", "graphics=off", "-device", "VGA", "-cdrom", iso @@ -95,10 +98,9 @@ class OpenBSDVM(basevm.BaseVM): self.console_wait_send("Terminal type", "xterm\n") self.console_wait_send("System hostname", "openbsd\n") self.console_wait_send("Which network interface", "vio0\n") - self.console_wait_send("IPv4 address", "dhcp\n") + self.console_wait_send("IPv4 address", "autoconf\n") self.console_wait_send("IPv6 address", "none\n") self.console_wait_send("Which network interface", "done\n") - self.console_wait_send("DNS domain name", "localnet\n") self.console_wait("Password for root account") self.console_send("%s\n" % self._config["root_pass"]) self.console_wait("Password for root account") diff --git a/tests/vm/ubuntu.aarch64 b/tests/vm/ubuntu.aarch64 index b291945a7e..666947393b 100755 --- a/tests/vm/ubuntu.aarch64 +++ b/tests/vm/ubuntu.aarch64 @@ -32,9 +32,13 @@ DEFAULT_CONFIG = { class UbuntuAarch64VM(ubuntuvm.UbuntuVM): name = "ubuntu.aarch64" arch = "aarch64" - image_name = "ubuntu-18.04-server-cloudimg-arm64.img" - image_link = "https://cloud-images.ubuntu.com/releases/18.04/release/" + image_name - image_sha256="0fdcba761965735a8a903d8b88df8e47f156f48715c00508e4315c506d7d3cb1" + # NOTE: The Ubuntu 20.04 cloud images are periodically updated. The + # fixed image chosen below is the latest release at time of + # writing. Using a rolling latest instead would mean that the SHA + # would be incorrect at an indeterminate point in the future. + image_name = "focal-server-cloudimg-arm64.img" + image_link = "https://cloud-images.ubuntu.com/focal/20220615/" + image_name + image_sha256="95a027336e197debe88c92ff2e554598e23c409139e1e750b71b3b820b514832" BUILD_SCRIPT = """ set -e; cd $(mktemp -d); diff --git a/tests/vm/ubuntu.i386 b/tests/vm/ubuntu.i386 deleted file mode 100755 index 47681b6f87..0000000000 --- a/tests/vm/ubuntu.i386 +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# -# Ubuntu i386 image -# -# Copyright 2017 Red Hat Inc. -# -# Authors: -# Fam Zheng -# -# This code is licensed under the GPL version 2 or later. See -# the COPYING file in the top-level directory. -# - -import sys -import basevm -import ubuntuvm - -DEFAULT_CONFIG = { - 'install_cmds' : "apt-get update,"\ - "apt-get build-dep -y qemu,"\ - "apt-get install -y libfdt-dev language-pack-en ninja-build", -} - -class UbuntuX86VM(ubuntuvm.UbuntuVM): - name = "ubuntu.i386" - arch = "i386" - image_link="https://cloud-images.ubuntu.com/releases/bionic/"\ - "release-20191114/ubuntu-18.04-server-cloudimg-i386.img" - image_sha256="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef" - BUILD_SCRIPT = """ - set -e; - cd $(mktemp -d); - sudo chmod a+r /dev/vdb; - tar -xf /dev/vdb; - ./configure {configure_opts}; - make --output-sync {target} -j{jobs} {verbose}; - """ - -if __name__ == "__main__": - sys.exit(basevm.main(UbuntuX86VM, DEFAULT_CONFIG)) diff --git a/tools/meson.build b/tools/meson.build index 3e5a0abfa2..10eb3a043f 100644 --- a/tools/meson.build +++ b/tools/meson.build @@ -1,25 +1,12 @@ -have_virtiofsd = (targetos == 'linux' and - have_tools and - seccomp.found() and - libcap_ng.found() and - 'CONFIG_VHOST_USER' in config_host) - -if get_option('virtiofsd').enabled() - if not have_virtiofsd - if targetos != 'linux' - error('virtiofsd requires Linux') - elif not seccomp.found() or not libcap_ng.found() - error('virtiofsd requires libcap-ng-devel and seccomp-devel') - elif 'CONFIG_VHOST_USER' not in config_host - error('virtiofsd needs vhost-user support') - else - # Disabled all the tools but virtiofsd. - have_virtiofsd = true - endif - endif -elif get_option('virtiofsd').disabled() or not have_system - have_virtiofsd = false -endif +have_virtiofsd = get_option('virtiofsd') \ + .require(targetos == 'linux', + error_message: 'virtiofsd requires Linux') \ + .require(seccomp.found() and libcap_ng.found(), + error_message: 'virtiofsd requires libcap-ng-devel and seccomp-devel') \ + .require(have_vhost_user, + error_message: 'virtiofsd needs vhost-user-support') \ + .disable_auto_if(not have_tools and not have_system) \ + .allowed() if have_virtiofsd subdir('virtiofsd') diff --git a/tools/virtiofsd/fuse_common.h b/tools/virtiofsd/fuse_common.h index 0c2665b977..bf46954dab 100644 --- a/tools/virtiofsd/fuse_common.h +++ b/tools/virtiofsd/fuse_common.h @@ -377,6 +377,11 @@ struct fuse_file_info { */ #define FUSE_CAP_SETXATTR_EXT (1 << 29) +/** + * Indicates that file server supports creating file security context + */ +#define FUSE_CAP_SECURITY_CTX (1ULL << 32) + /** * Ioctl flags * @@ -439,7 +444,7 @@ struct fuse_conn_info { /** * Capability flags that the kernel supports (read-only) */ - unsigned capable; + uint64_t capable; /** * Capability flags that the filesystem wants to enable. @@ -447,7 +452,7 @@ struct fuse_conn_info { * libfuse attempts to initialize this field with * reasonable default values before calling the init() handler. */ - unsigned want; + uint64_t want; /** * Maximum number of pending "background" requests. A diff --git a/tools/virtiofsd/fuse_i.h b/tools/virtiofsd/fuse_i.h index 492e002181..a5572fa4ae 100644 --- a/tools/virtiofsd/fuse_i.h +++ b/tools/virtiofsd/fuse_i.h @@ -15,6 +15,12 @@ struct fv_VuDev; struct fv_QueueInfo; +struct fuse_security_context { + const char *name; + uint32_t ctxlen; + const void *ctx; +}; + struct fuse_req { struct fuse_session *se; uint64_t unique; @@ -35,6 +41,7 @@ struct fuse_req { } u; struct fuse_req *next; struct fuse_req *prev; + struct fuse_security_context secctx; }; struct fuse_notify_req { diff --git a/tools/virtiofsd/fuse_lowlevel.c b/tools/virtiofsd/fuse_lowlevel.c index e4679c73ab..2f08471627 100644 --- a/tools/virtiofsd/fuse_lowlevel.c +++ b/tools/virtiofsd/fuse_lowlevel.c @@ -886,11 +886,63 @@ static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, } } +static int parse_secctx_fill_req(fuse_req_t req, struct fuse_mbuf_iter *iter) +{ + struct fuse_secctx_header *fsecctx_header; + struct fuse_secctx *fsecctx; + const void *secctx; + const char *name; + + fsecctx_header = fuse_mbuf_iter_advance(iter, sizeof(*fsecctx_header)); + if (!fsecctx_header) { + return -EINVAL; + } + + /* + * As of now maximum of one security context is supported. It can + * change in future though. + */ + if (fsecctx_header->nr_secctx > 1) { + return -EINVAL; + } + + /* No security context sent. Maybe no LSM supports it */ + if (!fsecctx_header->nr_secctx) { + return 0; + } + + fsecctx = fuse_mbuf_iter_advance(iter, sizeof(*fsecctx)); + if (!fsecctx) { + return -EINVAL; + } + + /* struct fsecctx with zero sized context is not expected */ + if (!fsecctx->size) { + return -EINVAL; + } + name = fuse_mbuf_iter_advance_str(iter); + if (!name) { + return -EINVAL; + } + + secctx = fuse_mbuf_iter_advance(iter, fsecctx->size); + if (!secctx) { + return -EINVAL; + } + + req->secctx.name = name; + req->secctx.ctx = secctx; + req->secctx.ctxlen = fsecctx->size; + return 0; +} + static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, struct fuse_mbuf_iter *iter) { struct fuse_mknod_in *arg; const char *name; + bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX; + int err; arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); name = fuse_mbuf_iter_advance_str(iter); @@ -901,6 +953,14 @@ static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, req->ctx.umask = arg->umask; + if (secctx_enabled) { + err = parse_secctx_fill_req(req, iter); + if (err) { + fuse_reply_err(req, -err); + return; + } + } + if (req->se->op.mknod) { req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev); } else { @@ -913,6 +973,8 @@ static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, { struct fuse_mkdir_in *arg; const char *name; + bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX; + int err; arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); name = fuse_mbuf_iter_advance_str(iter); @@ -923,6 +985,14 @@ static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, req->ctx.umask = arg->umask; + if (secctx_enabled) { + err = parse_secctx_fill_req(req, iter); + if (err) { + fuse_reply_err(req, err); + return; + } + } + if (req->se->op.mkdir) { req->se->op.mkdir(req, nodeid, name, arg->mode); } else { @@ -969,12 +1039,22 @@ static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, { const char *name = fuse_mbuf_iter_advance_str(iter); const char *linkname = fuse_mbuf_iter_advance_str(iter); + bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX; + int err; if (!name || !linkname) { fuse_reply_err(req, EINVAL); return; } + if (secctx_enabled) { + err = parse_secctx_fill_req(req, iter); + if (err) { + fuse_reply_err(req, err); + return; + } + } + if (req->se->op.symlink) { req->se->op.symlink(req, linkname, nodeid, name); } else { @@ -1048,6 +1128,8 @@ static void do_link(fuse_req_t req, fuse_ino_t nodeid, static void do_create(fuse_req_t req, fuse_ino_t nodeid, struct fuse_mbuf_iter *iter) { + bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX; + if (req->se->op.create) { struct fuse_create_in *arg; struct fuse_file_info fi; @@ -1060,6 +1142,15 @@ static void do_create(fuse_req_t req, fuse_ino_t nodeid, return; } + if (secctx_enabled) { + int err; + err = parse_secctx_fill_req(req, iter); + if (err) { + fuse_reply_err(req, err); + return; + } + } + memset(&fi, 0, sizeof(fi)); fi.flags = arg->flags; fi.kill_priv = arg->open_flags & FUSE_OPEN_KILL_SUIDGID; @@ -1876,15 +1967,30 @@ static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, } } +static void do_syncfs(fuse_req_t req, fuse_ino_t nodeid, + struct fuse_mbuf_iter *iter) +{ + if (req->se->op.syncfs) { + req->se->op.syncfs(req, nodeid); + } else { + fuse_reply_err(req, ENOSYS); + } +} + static void do_init(fuse_req_t req, fuse_ino_t nodeid, struct fuse_mbuf_iter *iter) { size_t compat_size = offsetof(struct fuse_init_in, max_readahead); + size_t compat2_size = offsetof(struct fuse_init_in, flags) + + sizeof(uint32_t); + /* Fuse structure extended with minor version 36 */ + size_t compat3_size = endof(struct fuse_init_in, unused); struct fuse_init_in *arg; struct fuse_init_out outarg; struct fuse_session *se = req->se; size_t bufsize = se->bufsize; size_t outargsize = sizeof(outarg); + uint64_t flags = 0; (void)nodeid; @@ -1897,15 +2003,29 @@ static void do_init(fuse_req_t req, fuse_ino_t nodeid, /* ...and now consume the new fields. */ if (arg->major == 7 && arg->minor >= 6) { - if (!fuse_mbuf_iter_advance(iter, sizeof(*arg) - compat_size)) { + if (!fuse_mbuf_iter_advance(iter, compat2_size - compat_size)) { fuse_reply_err(req, EINVAL); return; } + flags |= arg->flags; + } + + /* + * fuse_init_in was extended again with minor version 36. Just read + * current known size of fuse_init so that future extension and + * header rebase does not cause breakage. + */ + if (sizeof(*arg) > compat2_size && (arg->flags & FUSE_INIT_EXT)) { + if (!fuse_mbuf_iter_advance(iter, compat3_size - compat2_size)) { + fuse_reply_err(req, EINVAL); + return; + } + flags |= (uint64_t) arg->flags2 << 32; } fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor); if (arg->major == 7 && arg->minor >= 6) { - fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags); + fuse_log(FUSE_LOG_DEBUG, "flags=0x%016" PRIx64 "\n", flags); fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n", arg->max_readahead); } se->conn.proto_major = arg->major; @@ -1933,70 +2053,73 @@ static void do_init(fuse_req_t req, fuse_ino_t nodeid, if (arg->max_readahead < se->conn.max_readahead) { se->conn.max_readahead = arg->max_readahead; } - if (arg->flags & FUSE_ASYNC_READ) { + if (flags & FUSE_ASYNC_READ) { se->conn.capable |= FUSE_CAP_ASYNC_READ; } - if (arg->flags & FUSE_POSIX_LOCKS) { + if (flags & FUSE_POSIX_LOCKS) { se->conn.capable |= FUSE_CAP_POSIX_LOCKS; } - if (arg->flags & FUSE_ATOMIC_O_TRUNC) { + if (flags & FUSE_ATOMIC_O_TRUNC) { se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC; } - if (arg->flags & FUSE_EXPORT_SUPPORT) { + if (flags & FUSE_EXPORT_SUPPORT) { se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT; } - if (arg->flags & FUSE_DONT_MASK) { + if (flags & FUSE_DONT_MASK) { se->conn.capable |= FUSE_CAP_DONT_MASK; } - if (arg->flags & FUSE_FLOCK_LOCKS) { + if (flags & FUSE_FLOCK_LOCKS) { se->conn.capable |= FUSE_CAP_FLOCK_LOCKS; } - if (arg->flags & FUSE_AUTO_INVAL_DATA) { + if (flags & FUSE_AUTO_INVAL_DATA) { se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA; } - if (arg->flags & FUSE_DO_READDIRPLUS) { + if (flags & FUSE_DO_READDIRPLUS) { se->conn.capable |= FUSE_CAP_READDIRPLUS; } - if (arg->flags & FUSE_READDIRPLUS_AUTO) { + if (flags & FUSE_READDIRPLUS_AUTO) { se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO; } - if (arg->flags & FUSE_ASYNC_DIO) { + if (flags & FUSE_ASYNC_DIO) { se->conn.capable |= FUSE_CAP_ASYNC_DIO; } - if (arg->flags & FUSE_WRITEBACK_CACHE) { + if (flags & FUSE_WRITEBACK_CACHE) { se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE; } - if (arg->flags & FUSE_NO_OPEN_SUPPORT) { + if (flags & FUSE_NO_OPEN_SUPPORT) { se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT; } - if (arg->flags & FUSE_PARALLEL_DIROPS) { + if (flags & FUSE_PARALLEL_DIROPS) { se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS; } - if (arg->flags & FUSE_POSIX_ACL) { + if (flags & FUSE_POSIX_ACL) { se->conn.capable |= FUSE_CAP_POSIX_ACL; } - if (arg->flags & FUSE_HANDLE_KILLPRIV) { + if (flags & FUSE_HANDLE_KILLPRIV) { se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV; } - if (arg->flags & FUSE_NO_OPENDIR_SUPPORT) { + if (flags & FUSE_NO_OPENDIR_SUPPORT) { se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT; } - if (!(arg->flags & FUSE_MAX_PAGES)) { + if (!(flags & FUSE_MAX_PAGES)) { size_t max_bufsize = FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize() + FUSE_BUFFER_HEADER_SIZE; if (bufsize > max_bufsize) { bufsize = max_bufsize; } } - if (arg->flags & FUSE_SUBMOUNTS) { + if (flags & FUSE_SUBMOUNTS) { se->conn.capable |= FUSE_CAP_SUBMOUNTS; } - if (arg->flags & FUSE_HANDLE_KILLPRIV_V2) { + if (flags & FUSE_HANDLE_KILLPRIV_V2) { se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV_V2; } - if (arg->flags & FUSE_SETXATTR_EXT) { + if (flags & FUSE_SETXATTR_EXT) { se->conn.capable |= FUSE_CAP_SETXATTR_EXT; } + if (flags & FUSE_SECURITY_CTX) { + se->conn.capable |= FUSE_CAP_SECURITY_CTX; + } #ifdef HAVE_SPLICE #ifdef HAVE_VMSPLICE se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE; @@ -2051,7 +2174,7 @@ static void do_init(fuse_req_t req, fuse_ino_t nodeid, if (se->conn.want & (~se->conn.capable)) { fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities " - "0x%x that are not supported by kernel, aborting.\n", + "0x%" PRIx64 " that are not supported by kernel, aborting.\n", se->conn.want & (~se->conn.capable)); fuse_reply_err(req, EPROTO); se->error = -EPROTO; @@ -2062,7 +2185,7 @@ static void do_init(fuse_req_t req, fuse_ino_t nodeid, if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) { se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE; } - if (arg->flags & FUSE_MAX_PAGES) { + if (flags & FUSE_MAX_PAGES) { outarg.flags |= FUSE_MAX_PAGES; outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1; } @@ -2136,8 +2259,14 @@ static void do_init(fuse_req_t req, fuse_ino_t nodeid, outarg.flags |= FUSE_SETXATTR_EXT; } + if (se->conn.want & FUSE_CAP_SECURITY_CTX) { + /* bits 32..63 get shifted down 32 bits into the flags2 field */ + outarg.flags2 |= FUSE_SECURITY_CTX >> 32; + } + fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor); - fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags); + fuse_log(FUSE_LOG_DEBUG, " flags2=0x%08x flags=0x%08x\n", outarg.flags2, + outarg.flags); fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n", outarg.max_readahead); fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write); fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n", outarg.max_background); @@ -2280,6 +2409,7 @@ static struct { [FUSE_RENAME2] = { do_rename2, "RENAME2" }, [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" }, [FUSE_LSEEK] = { do_lseek, "LSEEK" }, + [FUSE_SYNCFS] = { do_syncfs, "SYNCFS" }, }; #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0])) diff --git a/tools/virtiofsd/fuse_lowlevel.h b/tools/virtiofsd/fuse_lowlevel.h index 4b4e8c9724..b889dae4de 100644 --- a/tools/virtiofsd/fuse_lowlevel.h +++ b/tools/virtiofsd/fuse_lowlevel.h @@ -1226,6 +1226,19 @@ struct fuse_lowlevel_ops { */ void (*lseek)(fuse_req_t req, fuse_ino_t ino, off_t off, int whence, struct fuse_file_info *fi); + + /** + * Synchronize file system content + * + * If this request is answered with an error code of ENOSYS, + * this is treated as success and future calls to syncfs() will + * succeed automatically without being sent to the filesystem + * process. + * + * @param req request handle + * @param ino the inode number + */ + void (*syncfs)(fuse_req_t req, fuse_ino_t ino); }; /** @@ -1603,7 +1616,7 @@ int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, * parent/name * * To avoid a deadlock this function must not be called in the - * execution path of a related filesytem operation or within any code + * execution path of a related filesystem operation or within any code * that could hold a lock that could be needed to execute such an * operation. As of kernel 4.18, a "related operation" is a lookup(), * symlink(), mknod(), mkdir(), unlink(), rename(), link() or create() @@ -1636,7 +1649,7 @@ int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, * that the dentry has been deleted. * * To avoid a deadlock this function must not be called while - * executing a related filesytem operation or while holding a lock + * executing a related filesystem operation or while holding a lock * that could be needed to execute such an operation (see the * description of fuse_lowlevel_notify_inval_entry() for more * details). diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c index fc2564a603..9368e292e4 100644 --- a/tools/virtiofsd/fuse_virtio.c +++ b/tools/virtiofsd/fuse_virtio.c @@ -82,12 +82,6 @@ struct fv_VuDev { struct fv_QueueInfo **qi; }; -/* From spec */ -struct virtio_fs_config { - char tag[36]; - uint32_t num_queues; -}; - /* Callback from libvhost-user */ static uint64_t fv_get_features(VuDev *dev) { @@ -249,6 +243,21 @@ static void vu_dispatch_unlock(struct fv_VuDev *vud) assert(ret == 0); } +static void vq_send_element(struct fv_QueueInfo *qi, VuVirtqElement *elem, + ssize_t len) +{ + struct fuse_session *se = qi->virtio_dev->se; + VuDev *dev = &se->virtio_dev->dev; + VuVirtq *q = vu_get_queue(dev, qi->qidx); + + vu_dispatch_rdlock(qi->virtio_dev); + pthread_mutex_lock(&qi->vq_lock); + vu_queue_push(dev, q, elem, len); + vu_queue_notify(dev, q); + pthread_mutex_unlock(&qi->vq_lock); + vu_dispatch_unlock(qi->virtio_dev); +} + /* * Called back by ll whenever it wants to send a reply/message back * The 1st element of the iov starts with the fuse_out_header @@ -259,8 +268,6 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch, { FVRequest *req = container_of(ch, FVRequest, ch); struct fv_QueueInfo *qi = ch->qi; - VuDev *dev = &se->virtio_dev->dev; - VuVirtq *q = vu_get_queue(dev, qi->qidx); VuVirtqElement *elem = &req->elem; int ret = 0; @@ -302,13 +309,7 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch, copy_iov(iov, count, in_sg, in_num, tosend_len); - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, tosend_len); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); - + vq_send_element(qi, elem, tosend_len); req->reply_sent = true; err: @@ -327,8 +328,6 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, { FVRequest *req = container_of(ch, FVRequest, ch); struct fv_QueueInfo *qi = ch->qi; - VuDev *dev = &se->virtio_dev->dev; - VuVirtq *q = vu_get_queue(dev, qi->qidx); VuVirtqElement *elem = &req->elem; int ret = 0; g_autofree struct iovec *in_sg_cpy = NULL; @@ -380,7 +379,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, copy_iov(iov, count, in_sg, in_num, iov_len); /* - * Build a copy of the the in_sg iov so we can skip bits in it, + * Build a copy of the in_sg iov so we can skip bits in it, * including changing the offsets */ in_sg_cpy = g_new(struct iovec, in_num); @@ -436,12 +435,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, out_sg->len = tosend_len; } - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, tosend_len); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); + vq_send_element(qi, elem, tosend_len); req->reply_sent = true; return 0; } @@ -453,7 +447,6 @@ static void fv_queue_worker(gpointer data, gpointer user_data) { struct fv_QueueInfo *qi = user_data; struct fuse_session *se = qi->virtio_dev->se; - struct VuDev *dev = &qi->virtio_dev->dev; FVRequest *req = data; VuVirtqElement *elem = &req->elem; struct fuse_buf fbuf = {}; @@ -595,17 +588,9 @@ out: /* If the request has no reply, still recycle the virtqueue element */ if (!req->reply_sent) { - struct VuVirtq *q = vu_get_queue(dev, qi->qidx); - fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__, elem->index); - - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, 0); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); + vq_send_element(qi, elem, 0); } pthread_mutex_destroy(&req->ch.lock); @@ -716,6 +701,7 @@ static void *fv_queue_thread(void *opaque) /* Process all the requests. */ if (!se->thread_pool_size && req_list != NULL) { + req_list = g_list_reverse(req_list); g_list_foreach(req_list, fv_queue_worker, qi); g_list_free(req_list); req_list = NULL; @@ -754,6 +740,18 @@ static void fv_queue_cleanup_thread(struct fv_VuDev *vud, int qidx) vud->qi[qidx] = NULL; } +static void stop_all_queues(struct fv_VuDev *vud) +{ + for (int i = 0; i < vud->nqueues; i++) { + if (!vud->qi[i]) { + continue; + } + + fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i); + fv_queue_cleanup_thread(vud, i); + } +} + /* Callback from libvhost-user on start or stop of a queue */ static void fv_queue_set_started(VuDev *dev, int qidx, bool started) { @@ -884,15 +882,7 @@ int virtio_loop(struct fuse_session *se) * Make sure all fv_queue_thread()s quit on exit, as we're about to * free virtio dev and fuse session, no one should access them anymore. */ - for (int i = 0; i < se->virtio_dev->nqueues; i++) { - if (!se->virtio_dev->qi[i]) { - continue; - } - - fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i); - fv_queue_cleanup_thread(se->virtio_dev, i); - } - + stop_all_queues(se->virtio_dev); fuse_log(FUSE_LOG_INFO, "%s: Exit\n", __func__); return 0; @@ -911,10 +901,12 @@ static bool fv_socket_lock(struct fuse_session *se) { g_autofree gchar *sk_name = NULL; g_autofree gchar *pidfile = NULL; + g_autofree gchar *state = NULL; g_autofree gchar *dir = NULL; Error *local_err = NULL; - dir = qemu_get_local_state_pathname("run/virtiofsd"); + state = qemu_get_local_state_dir(); + dir = g_build_filename(state, "run", "virtiofsd", NULL); if (g_mkdir_with_parents(dir, S_IRWXU) < 0) { fuse_log(FUSE_LOG_ERR, "%s: Failed to create directory %s: %s\n", @@ -998,6 +990,13 @@ static int fv_create_listen_socket(struct fuse_session *se) "vhost socket failed to set group to %s (%d): %m\n", se->vu_socket_group, g->gr_gid); } + } else { + fuse_log(FUSE_LOG_ERR, + "vhost socket: unable to find group '%s'\n", + se->vu_socket_group); + close(listen_sock); + umask(old_umask); + return -1; } } umask(old_umask); diff --git a/tools/virtiofsd/helper.c b/tools/virtiofsd/helper.c index a8295d975a..f5f66f292c 100644 --- a/tools/virtiofsd/helper.c +++ b/tools/virtiofsd/helper.c @@ -187,6 +187,10 @@ void fuse_cmdline_help(void) " default: no_allow_direct_io\n" " -o announce_submounts Announce sub-mount points to the guest\n" " -o posix_acl/no_posix_acl Enable/Disable posix_acl. (default: disabled)\n" + " -o security_label/no_security_label Enable/Disable security label. (default: disabled)\n" + " -o killpriv_v2/no_killpriv_v2\n" + " Enable/Disable FUSE_HANDLE_KILLPRIV_V2.\n" + " (default: enabled as long as client supports it)\n" ); } @@ -271,7 +275,7 @@ int fuse_daemonize(int foreground) int waiter[2]; char completed; - if (pipe(waiter)) { + if (!g_unix_open_pipe(waiter, FD_CLOEXEC, NULL)) { fuse_log(FUSE_LOG_ERR, "fuse_daemonize: pipe: %s\n", strerror(errno)); return -1; diff --git a/tools/virtiofsd/passthrough_ll.c b/tools/virtiofsd/passthrough_ll.c index 38b2af8599..20f0f41f99 100644 --- a/tools/virtiofsd/passthrough_ll.c +++ b/tools/virtiofsd/passthrough_ll.c @@ -38,7 +38,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" #include "qemu-version.h" -#include "qemu-common.h" +#include "qemu/help-texts.h" #include "fuse_virtio.h" #include "fuse_log.h" #include "fuse_lowlevel.h" @@ -54,6 +54,7 @@ #include #include #include +#include #include "qemu/cutils.h" #include "passthrough_helpers.h" @@ -172,10 +173,15 @@ struct lo_data { /* An O_PATH file descriptor to /proc/self/fd/ */ int proc_self_fd; + /* An O_PATH file descriptor to /proc/self/task/ */ + int proc_self_task; int user_killpriv_v2, killpriv_v2; /* If set, virtiofsd is responsible for setting umask during creation */ bool change_umask; int user_posix_acl, posix_acl; + /* Keeps track if /proc//attr/fscreate should be used or not */ + bool use_fscreate; + int user_security_label; }; static const struct fuse_opt lo_opts[] = { @@ -210,6 +216,8 @@ static const struct fuse_opt lo_opts[] = { { "no_killpriv_v2", offsetof(struct lo_data, user_killpriv_v2), 0 }, { "posix_acl", offsetof(struct lo_data, user_posix_acl), 1 }, { "no_posix_acl", offsetof(struct lo_data, user_posix_acl), 0 }, + { "security_label", offsetof(struct lo_data, user_security_label), 1 }, + { "no_security_label", offsetof(struct lo_data, user_security_label), 0 }, FUSE_OPT_END }; static bool use_syslog = false; @@ -229,6 +237,11 @@ static struct lo_inode *lo_find(struct lo_data *lo, struct stat *st, static int xattr_map_client(const struct lo_data *lo, const char *client_name, char **out_name); +#define FCHDIR_NOFAIL(fd) do { \ + int fchdir_res = fchdir(fd); \ + assert(fchdir_res == 0); \ + } while (0) + static bool is_dot_or_dotdot(const char *name) { return name[0] == '.' && @@ -255,6 +268,70 @@ static struct lo_data *lo_data(fuse_req_t req) return (struct lo_data *)fuse_req_userdata(req); } +/* + * Tries to figure out if /proc//attr/fscreate is usable or not. With + * selinux=0, read from fscreate returns -EINVAL. + * + * TODO: Link with libselinux and use is_selinux_enabled() instead down + * the line. It probably will be more reliable indicator. + */ +static bool is_fscreate_usable(struct lo_data *lo) +{ + char procname[64]; + int fscreate_fd; + size_t bytes_read; + + sprintf(procname, "%ld/attr/fscreate", syscall(SYS_gettid)); + fscreate_fd = openat(lo->proc_self_task, procname, O_RDWR); + if (fscreate_fd == -1) { + return false; + } + + bytes_read = read(fscreate_fd, procname, 64); + close(fscreate_fd); + if (bytes_read == -1) { + return false; + } + return true; +} + +/* Helpers to set/reset fscreate */ +static int open_set_proc_fscreate(struct lo_data *lo, const void *ctx, + size_t ctxlen, int *fd) +{ + char procname[64]; + int fscreate_fd, err = 0; + size_t written; + + sprintf(procname, "%ld/attr/fscreate", syscall(SYS_gettid)); + fscreate_fd = openat(lo->proc_self_task, procname, O_WRONLY); + err = fscreate_fd == -1 ? errno : 0; + if (err) { + return err; + } + + written = write(fscreate_fd, ctx, ctxlen); + err = written == -1 ? errno : 0; + if (err) { + goto out; + } + + *fd = fscreate_fd; + return 0; +out: + close(fscreate_fd); + return err; +} + +static void close_reset_proc_fscreate(int fd) +{ + if ((write(fd, NULL, 0)) == -1) { + fuse_log(FUSE_LOG_WARNING, "Failed to reset fscreate. err=%d\n", errno); + } + close(fd); + return; +} + /* * Load capng's state from our saved state if the current thread * hadn't previously been loaded. @@ -690,19 +767,10 @@ static void lo_init(void *userdata, struct fuse_conn_info *conn) fuse_log(FUSE_LOG_DEBUG, "lo_init: enabling killpriv_v2\n"); conn->want |= FUSE_CAP_HANDLE_KILLPRIV_V2; lo->killpriv_v2 = 1; - } else if (lo->user_killpriv_v2 == -1 && - conn->capable & FUSE_CAP_HANDLE_KILLPRIV_V2) { - /* - * User did not specify a value for killpriv_v2. By default enable it - * if connection offers this capability - */ - fuse_log(FUSE_LOG_DEBUG, "lo_init: enabling killpriv_v2\n"); - conn->want |= FUSE_CAP_HANDLE_KILLPRIV_V2; - lo->killpriv_v2 = 1; } else { /* - * Either user specified to disable killpriv_v2, or connection does - * not offer this capability. Disable killpriv_v2 in both the cases + * Either user specified to disable killpriv_v2, or did not + * specify anything. Disable killpriv_v2 in both the cases. */ fuse_log(FUSE_LOG_DEBUG, "lo_init: disabling killpriv_v2\n"); conn->want &= ~FUSE_CAP_HANDLE_KILLPRIV_V2; @@ -734,6 +802,17 @@ static void lo_init(void *userdata, struct fuse_conn_info *conn) fuse_log(FUSE_LOG_DEBUG, "lo_init: disabling posix_acl\n"); conn->want &= ~FUSE_CAP_POSIX_ACL; } + + if (lo->user_security_label == 1) { + if (!(conn->capable & FUSE_CAP_SECURITY_CTX)) { + fuse_log(FUSE_LOG_ERR, "lo_init: Can not enable security label." + " kernel does not support FUSE_SECURITY_CTX capability.\n"); + } + conn->want |= FUSE_CAP_SECURITY_CTX; + } else { + fuse_log(FUSE_LOG_DEBUG, "lo_init: disabling security label\n"); + conn->want &= ~FUSE_CAP_SECURITY_CTX; + } } static void lo_getattr(fuse_req_t req, fuse_ino_t ino, @@ -951,7 +1030,7 @@ static int do_statx(struct lo_data *lo, int dirfd, const char *pathname, { int res; -#if defined(CONFIG_STATX) && defined(STATX_MNT_ID) +#if defined(CONFIG_STATX) && defined(CONFIG_STATX_MNT_ID) if (lo->use_statx) { struct statx statxbuf; @@ -1161,6 +1240,30 @@ static void lo_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) #define OURSYS_setresuid SYS_setresuid #endif +static void drop_supplementary_groups(void) +{ + int ret; + + ret = getgroups(0, NULL); + if (ret == -1) { + fuse_log(FUSE_LOG_ERR, "getgroups() failed with error=%d:%s\n", + errno, strerror(errno)); + exit(1); + } + + if (!ret) { + return; + } + + /* Drop all supplementary groups. We should not need it */ + ret = setgroups(0, NULL); + if (ret == -1) { + fuse_log(FUSE_LOG_ERR, "setgroups() failed with error=%d:%s\n", + errno, strerror(errno)); + exit(1); + } +} + /* * Change to uid/gid of caller so that file is created with * ownership of caller. @@ -1259,16 +1362,103 @@ static void lo_restore_cred_gain_cap(struct lo_cred *old, bool restore_umask, } } +static int do_mknod_symlink_secctx(fuse_req_t req, struct lo_inode *dir, + const char *name, const char *secctx_name) +{ + int path_fd, err; + char procname[64]; + struct lo_data *lo = lo_data(req); + + if (!req->secctx.ctxlen) { + return 0; + } + + /* Open newly created element with O_PATH */ + path_fd = openat(dir->fd, name, O_PATH | O_NOFOLLOW); + err = path_fd == -1 ? errno : 0; + if (err) { + return err; + } + sprintf(procname, "%i", path_fd); + FCHDIR_NOFAIL(lo->proc_self_fd); + /* Set security context. This is not atomic w.r.t file creation */ + err = setxattr(procname, secctx_name, req->secctx.ctx, req->secctx.ctxlen, + 0); + if (err) { + err = errno; + } + FCHDIR_NOFAIL(lo->root.fd); + close(path_fd); + return err; +} + +static int do_mknod_symlink(fuse_req_t req, struct lo_inode *dir, + const char *name, mode_t mode, dev_t rdev, + const char *link) +{ + int err, fscreate_fd = -1; + const char *secctx_name = req->secctx.name; + struct lo_cred old = {}; + struct lo_data *lo = lo_data(req); + char *mapped_name = NULL; + bool secctx_enabled = req->secctx.ctxlen; + bool do_fscreate = false; + + if (secctx_enabled && lo->xattrmap) { + err = xattr_map_client(lo, req->secctx.name, &mapped_name); + if (err < 0) { + return -err; + } + secctx_name = mapped_name; + } + + /* + * If security xattr has not been remapped and selinux is enabled on + * host, set fscreate and no need to do a setxattr() after file creation + */ + if (secctx_enabled && !mapped_name && lo->use_fscreate) { + do_fscreate = true; + err = open_set_proc_fscreate(lo, req->secctx.ctx, req->secctx.ctxlen, + &fscreate_fd); + if (err) { + goto out; + } + } + + err = lo_change_cred(req, &old, lo->change_umask && !S_ISLNK(mode)); + if (err) { + goto out; + } + + err = mknod_wrapper(dir->fd, name, link, mode, rdev); + err = err == -1 ? errno : 0; + lo_restore_cred(&old, lo->change_umask && !S_ISLNK(mode)); + if (err) { + goto out; + } + + if (!do_fscreate) { + err = do_mknod_symlink_secctx(req, dir, name, secctx_name); + if (err) { + unlinkat(dir->fd, name, S_ISDIR(mode) ? AT_REMOVEDIR : 0); + } + } +out: + if (fscreate_fd != -1) { + close_reset_proc_fscreate(fscreate_fd); + } + g_free(mapped_name); + return err; +} + static void lo_mknod_symlink(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, dev_t rdev, const char *link) { - int res; int saverr; struct lo_data *lo = lo_data(req); struct lo_inode *dir; struct fuse_entry_param e; - struct lo_cred old = {}; if (is_empty(name)) { fuse_reply_err(req, ENOENT); @@ -1286,21 +1476,11 @@ static void lo_mknod_symlink(fuse_req_t req, fuse_ino_t parent, return; } - saverr = lo_change_cred(req, &old, lo->change_umask && !S_ISLNK(mode)); + saverr = do_mknod_symlink(req, dir, name, mode, rdev, link); if (saverr) { goto out; } - res = mknod_wrapper(dir->fd, name, link, mode, rdev); - - saverr = errno; - - lo_restore_cred(&old, lo->change_umask && !S_ISLNK(mode)); - - if (res == -1) { - goto out; - } - saverr = lo_do_lookup(req, parent, name, &e, NULL); if (saverr) { goto out; @@ -1976,6 +2156,190 @@ static int lo_do_open(struct lo_data *lo, struct lo_inode *inode, return 0; } +static int do_create_nosecctx(fuse_req_t req, struct lo_inode *parent_inode, + const char *name, mode_t mode, + struct fuse_file_info *fi, int *open_fd, + bool tmpfile) +{ + int err, fd; + struct lo_cred old = {}; + struct lo_data *lo = lo_data(req); + int flags; + + if (tmpfile) { + flags = fi->flags | O_TMPFILE; + /* + * Don't use O_EXCL as we want to link file later. Also reset O_CREAT + * otherwise openat() returns -EINVAL. + */ + flags &= ~(O_CREAT | O_EXCL); + + /* O_TMPFILE needs either O_RDWR or O_WRONLY */ + if ((flags & O_ACCMODE) == O_RDONLY) { + flags |= O_RDWR; + } + } else { + flags = fi->flags | O_CREAT | O_EXCL; + } + + err = lo_change_cred(req, &old, lo->change_umask); + if (err) { + return err; + } + + /* Try to create a new file but don't open existing files */ + fd = openat(parent_inode->fd, name, flags, mode); + err = fd == -1 ? errno : 0; + lo_restore_cred(&old, lo->change_umask); + if (!err) { + *open_fd = fd; + } + return err; +} + +static int do_create_secctx_fscreate(fuse_req_t req, + struct lo_inode *parent_inode, + const char *name, mode_t mode, + struct fuse_file_info *fi, int *open_fd) +{ + int err = 0, fd = -1, fscreate_fd = -1; + struct lo_data *lo = lo_data(req); + + err = open_set_proc_fscreate(lo, req->secctx.ctx, req->secctx.ctxlen, + &fscreate_fd); + if (err) { + return err; + } + + err = do_create_nosecctx(req, parent_inode, name, mode, fi, &fd, false); + + close_reset_proc_fscreate(fscreate_fd); + if (!err) { + *open_fd = fd; + } + return err; +} + +static int do_create_secctx_tmpfile(fuse_req_t req, + struct lo_inode *parent_inode, + const char *name, mode_t mode, + struct fuse_file_info *fi, + const char *secctx_name, int *open_fd) +{ + int err, fd = -1; + struct lo_data *lo = lo_data(req); + char procname[64]; + + err = do_create_nosecctx(req, parent_inode, ".", mode, fi, &fd, true); + if (err) { + return err; + } + + err = fsetxattr(fd, secctx_name, req->secctx.ctx, req->secctx.ctxlen, 0); + if (err) { + err = errno; + goto out; + } + + /* Security context set on file. Link it in place */ + sprintf(procname, "%d", fd); + FCHDIR_NOFAIL(lo->proc_self_fd); + err = linkat(AT_FDCWD, procname, parent_inode->fd, name, + AT_SYMLINK_FOLLOW); + err = err == -1 ? errno : 0; + FCHDIR_NOFAIL(lo->root.fd); + +out: + if (!err) { + *open_fd = fd; + } else if (fd != -1) { + close(fd); + } + return err; +} + +static int do_create_secctx_noatomic(fuse_req_t req, + struct lo_inode *parent_inode, + const char *name, mode_t mode, + struct fuse_file_info *fi, + const char *secctx_name, int *open_fd) +{ + int err = 0, fd = -1; + + err = do_create_nosecctx(req, parent_inode, name, mode, fi, &fd, false); + if (err) { + goto out; + } + + /* Set security context. This is not atomic w.r.t file creation */ + err = fsetxattr(fd, secctx_name, req->secctx.ctx, req->secctx.ctxlen, 0); + err = err == -1 ? errno : 0; +out: + if (!err) { + *open_fd = fd; + } else { + if (fd != -1) { + close(fd); + unlinkat(parent_inode->fd, name, 0); + } + } + return err; +} + +static int do_lo_create(fuse_req_t req, struct lo_inode *parent_inode, + const char *name, mode_t mode, + struct fuse_file_info *fi, int *open_fd) +{ + struct lo_data *lo = lo_data(req); + char *mapped_name = NULL; + int err; + const char *ctxname = req->secctx.name; + bool secctx_enabled = req->secctx.ctxlen; + + if (secctx_enabled && lo->xattrmap) { + err = xattr_map_client(lo, req->secctx.name, &mapped_name); + if (err < 0) { + return -err; + } + + ctxname = mapped_name; + } + + if (secctx_enabled) { + /* + * If security.selinux has not been remapped and selinux is enabled, + * use fscreate to set context before file creation. If not, use + * tmpfile method for regular files. Otherwise fallback to + * non-atomic method of file creation and xattr setting. + */ + if (!mapped_name && lo->use_fscreate) { + err = do_create_secctx_fscreate(req, parent_inode, name, mode, fi, + open_fd); + goto out; + } else if (S_ISREG(mode)) { + err = do_create_secctx_tmpfile(req, parent_inode, name, mode, fi, + ctxname, open_fd); + /* + * If filesystem does not support O_TMPFILE, fallback to non-atomic + * method. + */ + if (!err || err != EOPNOTSUPP) { + goto out; + } + } + + err = do_create_secctx_noatomic(req, parent_inode, name, mode, fi, + ctxname, open_fd); + } else { + err = do_create_nosecctx(req, parent_inode, name, mode, fi, open_fd, + false); + } + +out: + g_free(mapped_name); + return err; +} + static void lo_create(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, struct fuse_file_info *fi) { @@ -1985,7 +2349,6 @@ static void lo_create(fuse_req_t req, fuse_ino_t parent, const char *name, struct lo_inode *inode = NULL; struct fuse_entry_param e; int err; - struct lo_cred old = {}; fuse_log(FUSE_LOG_DEBUG, "lo_create(parent=%" PRIu64 ", name=%s)" " kill_priv=%d\n", parent, name, fi->kill_priv); @@ -2001,18 +2364,9 @@ static void lo_create(fuse_req_t req, fuse_ino_t parent, const char *name, return; } - err = lo_change_cred(req, &old, lo->change_umask); - if (err) { - goto out; - } - update_open_flags(lo->writeback, lo->allow_direct_io, fi); - /* Try to create a new file but don't open existing files */ - fd = openat(parent_inode->fd, name, fi->flags | O_CREAT | O_EXCL, mode); - err = fd == -1 ? errno : 0; - - lo_restore_cred(&old, lo->change_umask); + err = do_lo_create(req, parent_inode, name, mode, fi, &fd); /* Ignore the error if file exists and O_EXCL was not given */ if (err && (err != EEXIST || (fi->flags & O_EXCL))) { @@ -2442,6 +2796,15 @@ static void lo_flock(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, int res; (void)ino; + if (!(op & LOCK_NB)) { + /* + * Blocking flock can deadlock as there is only one thread + * serving the queue. + */ + fuse_reply_err(req, EOPNOTSUPP); + return; + } + res = flock(lo_fi_fd(req, fi), op); fuse_reply_err(req, res == -1 ? errno : 0); @@ -2465,6 +2828,11 @@ static void lo_flock(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, * Automatically reversed on read */ #define XATTR_MAP_FLAG_PREFIX (1 << 2) +/* + * The attribute is unsupported; + * ENOTSUP on write, hidden on read. + */ +#define XATTR_MAP_FLAG_UNSUPPORTED (1 << 3) /* scopes */ /* Apply rule to get/set/remove */ @@ -2636,6 +3004,8 @@ static void parse_xattrmap(struct lo_data *lo) tmp_entry.flags |= XATTR_MAP_FLAG_OK; } else if (strstart(map, "bad", &map)) { tmp_entry.flags |= XATTR_MAP_FLAG_BAD; + } else if (strstart(map, "unsupported", &map)) { + tmp_entry.flags |= XATTR_MAP_FLAG_UNSUPPORTED; } else if (strstart(map, "map", &map)) { /* * map is sugar that adds a number of rules, and must be @@ -2646,8 +3016,8 @@ static void parse_xattrmap(struct lo_data *lo) } else { fuse_log(FUSE_LOG_ERR, "%s: Unexpected type;" - "Expecting 'prefix', 'ok', 'bad' or 'map' in rule %zu\n", - __func__, lo->xattr_map_nentries); + "Expecting 'prefix', 'ok', 'bad', 'unsupported' or 'map'" + " in rule %zu\n", __func__, lo->xattr_map_nentries); exit(1); } @@ -2749,6 +3119,9 @@ static int xattr_map_client(const struct lo_data *lo, const char *client_name, if (cur_entry->flags & XATTR_MAP_FLAG_BAD) { return -EPERM; } + if (cur_entry->flags & XATTR_MAP_FLAG_UNSUPPORTED) { + return -ENOTSUP; + } if (cur_entry->flags & XATTR_MAP_FLAG_OK) { /* Unmodified name */ return 0; @@ -2788,7 +3161,8 @@ static int xattr_map_server(const struct lo_data *lo, const char *server_name, if ((cur_entry->flags & XATTR_MAP_FLAG_SERVER) && (strstart(server_name, cur_entry->prepend, &end))) { - if (cur_entry->flags & XATTR_MAP_FLAG_BAD) { + if (cur_entry->flags & XATTR_MAP_FLAG_BAD || + cur_entry->flags & XATTR_MAP_FLAG_UNSUPPORTED) { return -ENODATA; } if (cur_entry->flags & XATTR_MAP_FLAG_OK) { @@ -2806,11 +3180,6 @@ static int xattr_map_server(const struct lo_data *lo, const char *server_name, return -ENODATA; } -#define FCHDIR_NOFAIL(fd) do { \ - int fchdir_res = fchdir(fd); \ - assert(fchdir_res == 0); \ - } while (0) - static bool block_xattr(struct lo_data *lo, const char *name) { /* @@ -3321,6 +3690,49 @@ static void lo_lseek(fuse_req_t req, fuse_ino_t ino, off_t off, int whence, } } +static int lo_do_syncfs(struct lo_data *lo, struct lo_inode *inode) +{ + int fd, ret = 0; + + fuse_log(FUSE_LOG_DEBUG, "lo_do_syncfs(ino=%" PRIu64 ")\n", + inode->fuse_ino); + + fd = lo_inode_open(lo, inode, O_RDONLY); + if (fd < 0) { + return -fd; + } + + if (syncfs(fd) < 0) { + ret = errno; + } + + close(fd); + return ret; +} + +static void lo_syncfs(fuse_req_t req, fuse_ino_t ino) +{ + struct lo_data *lo = lo_data(req); + struct lo_inode *inode = lo_inode(req, ino); + int err; + + if (!inode) { + fuse_reply_err(req, EBADF); + return; + } + + err = lo_do_syncfs(lo, inode); + lo_inode_put(lo, &inode); + + /* + * If submounts aren't announced, the client only sends a request to + * sync the root inode. TODO: Track submounts internally and iterate + * over them as well. + */ + + fuse_reply_err(req, err); +} + static void lo_destroy(void *userdata) { struct lo_data *lo = (struct lo_data *)userdata; @@ -3381,6 +3793,7 @@ static struct fuse_lowlevel_ops lo_oper = { .copy_file_range = lo_copy_file_range, #endif .lseek = lo_lseek, + .syncfs = lo_syncfs, .destroy = lo_destroy, }; @@ -3472,6 +3885,15 @@ static void setup_namespaces(struct lo_data *lo, struct fuse_session *se) exit(1); } + /* Get the /proc/self/task descriptor */ + lo->proc_self_task = open("/proc/self/task/", O_PATH); + if (lo->proc_self_task == -1) { + fuse_log(FUSE_LOG_ERR, "open(/proc/self/task, O_PATH): %m\n"); + exit(1); + } + + lo->use_fscreate = is_fscreate_usable(lo); + /* * We only need /proc/self/fd. Prevent ".." from accessing parent * directories of /proc/self/fd by bind-mounting it over /proc. Since / was @@ -3688,6 +4110,14 @@ static void setup_chroot(struct lo_data *lo) exit(1); } + lo->proc_self_task = open("/proc/self/task", O_PATH); + if (lo->proc_self_fd == -1) { + fuse_log(FUSE_LOG_ERR, "open(\"/proc/self/task\", O_PATH): %m\n"); + exit(1); + } + + lo->use_fscreate = is_fscreate_usable(lo); + /* * Make the shared directory the file system root so that FUSE_OPEN * (lo_open()) cannot escape the shared directory by opening a symlink. @@ -3755,6 +4185,7 @@ static void setup_nofile_rlimit(unsigned long rlimit_nofile) static void log_func(enum fuse_log_level level, const char *fmt, va_list ap) { g_autofree char *localfmt = NULL; + char buf[64]; if (current_log_level < level) { return; @@ -3767,9 +4198,11 @@ static void log_func(enum fuse_log_level level, const char *fmt, va_list ap) fmt); } else { g_autoptr(GDateTime) now = g_date_time_new_now_utc(); - g_autofree char *nowstr = g_date_time_format(now, "%Y-%m-%d %H:%M:%S.%f%z"); + g_autofree char *nowstr = g_date_time_format(now, + "%Y-%m-%d %H:%M:%S.%%06d%z"); + snprintf(buf, 64, nowstr, g_date_time_get_microsecond(now)); localfmt = g_strdup_printf("[%s] [ID: %08ld] %s", - nowstr, syscall(__NR_gettid), fmt); + buf, syscall(__NR_gettid), fmt); } fmt = localfmt; } @@ -3873,6 +4306,10 @@ static void fuse_lo_data_cleanup(struct lo_data *lo) close(lo->proc_self_fd); } + if (lo->proc_self_task >= 0) { + close(lo->proc_self_task); + } + if (lo->root.fd >= 0) { close(lo->root.fd); } @@ -3900,8 +4337,10 @@ int main(int argc, char *argv[]) .posix_lock = 0, .allow_direct_io = 0, .proc_self_fd = -1, + .proc_self_task = -1, .user_killpriv_v2 = -1, .user_posix_acl = -1, + .user_security_label = -1, }; struct lo_map_elem *root_elem; struct lo_map_elem *reserve_elem; @@ -3915,6 +4354,8 @@ int main(int argc, char *argv[]) qemu_init_exec_dir(argv[0]); + drop_supplementary_groups(); + pthread_mutex_init(&lo.mutex, NULL); lo.inodes = g_hash_table_new(lo_key_hash, lo_key_equal); lo.root.fd = -1; diff --git a/tools/virtiofsd/passthrough_seccomp.c b/tools/virtiofsd/passthrough_seccomp.c index f49ed94b5e..0033dab493 100644 --- a/tools/virtiofsd/passthrough_seccomp.c +++ b/tools/virtiofsd/passthrough_seccomp.c @@ -51,6 +51,7 @@ static const int syscall_allowlist[] = { SCMP_SYS(fsetxattr), SCMP_SYS(fstat), SCMP_SYS(fstatfs), + SCMP_SYS(fstatfs64), SCMP_SYS(fsync), SCMP_SYS(ftruncate), SCMP_SYS(futex), @@ -90,6 +91,9 @@ static const int syscall_allowlist[] = { SCMP_SYS(renameat2), SCMP_SYS(removexattr), SCMP_SYS(restart_syscall), +#ifdef __NR_rseq + SCMP_SYS(rseq), /* required since glibc 2.35 */ +#endif SCMP_SYS(rt_sigaction), SCMP_SYS(rt_sigprocmask), SCMP_SYS(rt_sigreturn), @@ -106,7 +110,9 @@ static const int syscall_allowlist[] = { #endif SCMP_SYS(set_robust_list), SCMP_SYS(setxattr), + SCMP_SYS(sigreturn), SCMP_SYS(symlinkat), + SCMP_SYS(syncfs), SCMP_SYS(time), /* Rarely needed, except on static builds */ SCMP_SYS(tgkill), SCMP_SYS(unlinkat), diff --git a/tools/virtiofsd/passthrough_seccomp.h b/tools/virtiofsd/passthrough_seccomp.h index a3ab073f08..12674fc050 100644 --- a/tools/virtiofsd/passthrough_seccomp.h +++ b/tools/virtiofsd/passthrough_seccomp.h @@ -6,10 +6,9 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef VIRTIOFSD_SECCOMP_H -#define VIRTIOFSD_SECCOMP_H - +#ifndef VIRTIOFSD_PASSTHROUGH_SECCOMP_H +#define VIRTIOFSD_PASSTHROUGH_SECCOMP_H void setup_seccomp(bool enable_syslog); -#endif /* VIRTIOFSD_SECCOMP_H */ +#endif /* VIRTIOFSD_PASSTHROUGH_SECCOMP_H */ diff --git a/trace-events b/trace-events index c4cca29939..035f3d570d 100644 --- a/trace-events +++ b/trace-events @@ -46,34 +46,6 @@ ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_ memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u" memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64 -# gdbstub.c -gdbstub_op_start(const char *device) "Starting gdbstub using device %s" -gdbstub_op_exiting(uint8_t code) "notifying exit with code=0x%02x" -gdbstub_op_continue(void) "Continuing all CPUs" -gdbstub_op_continue_cpu(int cpu_index) "Continuing CPU %d" -gdbstub_op_stepping(int cpu_index) "Stepping CPU %d" -gdbstub_op_extra_info(const char *info) "Thread extra info: %s" -gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 "" -gdbstub_hit_internal_error(void) "RUN_STATE_INTERNAL_ERROR" -gdbstub_hit_break(void) "RUN_STATE_DEBUG" -gdbstub_hit_paused(void) "RUN_STATE_PAUSED" -gdbstub_hit_shutdown(void) "RUN_STATE_SHUTDOWN" -gdbstub_hit_io_error(void) "RUN_STATE_IO_ERROR" -gdbstub_hit_watchdog(void) "RUN_STATE_WATCHDOG" -gdbstub_hit_unknown(int state) "Unknown run state=0x%x" -gdbstub_io_reply(const char *message) "Sent: %s" -gdbstub_io_binaryreply(size_t ofs, const char *line) "0x%04zx: %s" -gdbstub_io_command(const char *command) "Received: %s" -gdbstub_io_got_ack(void) "Got ACK" -gdbstub_io_got_unexpected(uint8_t ch) "Got 0x%02x when expecting ACK/NACK" -gdbstub_err_got_nack(void) "Got NACK, retransmitting" -gdbstub_err_garbage(uint8_t ch) "received garbage between packets: 0x%02x" -gdbstub_err_overrun(void) "command buffer overrun, dropping command" -gdbstub_err_invalid_repeat(uint8_t ch) "got invalid RLE count: 0x%02x" -gdbstub_err_invalid_rle(void) "got invalid RLE sequence" -gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x" -gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x" - # job.c job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)" @@ -117,30 +89,6 @@ vcpu guest_cpu_exit(void) # Targets: all vcpu guest_cpu_reset(void) -# tcg/tcg-op.c - -# @vaddr: Access' virtual address. -# @info : Access' information (see below). -# -# Start virtual memory access (before any potential access violation). -# -# Does not include memory accesses performed by devices. -# -# Access information can be parsed as: -# -# struct mem_info { -# uint8_t size_shift : 4; /* interpreted as "1 << size_shift" bytes */ -# bool sign_extend: 1; /* sign-extended */ -# uint8_t endianness : 1; /* 0: little, 1: big */ -# bool store : 1; /* whether it is a store operation */ -# pad : 1; -# uint8_t mmuidx : 4; /* mmuidx (softmmu only) */ -# }; -# -# Mode: user, softmmu -# Targets: TCG(all) -vcpu tcg guest_mem_before(TCGv vaddr, uint16_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d" - # include/user/syscall-trace.h # @num: System call number. diff --git a/trace/control-target.c b/trace/control-target.c index 8418673c18..232c97a4a1 100644 --- a/trace/control-target.c +++ b/trace/control-target.c @@ -65,7 +65,7 @@ static void trace_event_synchronize_vcpu_state_dynamic( { bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); - cpu_tb_jmp_cache_clear(vcpu); + tcg_flush_jmp_cache(vcpu); } void trace_event_set_vcpu_state_dynamic(CPUState *vcpu, diff --git a/trace/control.c b/trace/control.c index d5b68e846e..6c77cc6318 100644 --- a/trace/control.c +++ b/trace/control.c @@ -161,7 +161,7 @@ void trace_list_events(FILE *f) fprintf(f, "This list of names of trace points may be incomplete " "when using the DTrace/SystemTap backends.\n" "Run 'qemu-trace-stap list %s' to print the full list.\n", - error_get_progname()); + g_get_prgname()); #endif } diff --git a/trace/mem.h b/trace/mem.h deleted file mode 100644 index 2f27e7bdf0..0000000000 --- a/trace/mem.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Helper functions for guest memory tracing - * - * Copyright (C) 2016 Lluís Vilanova - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef TRACE__MEM_H -#define TRACE__MEM_H - -#include "tcg/tcg.h" - -#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */ -#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */ -#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */ -#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */ -#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */ - -/** - * trace_mem_build_info: - * - * Return a value for the 'info' argument in guest memory access traces. - */ -static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend, - MemOp endianness, bool store, - unsigned int mmu_idx) -{ - uint16_t res; - - res = size_shift & TRACE_MEM_SZ_SHIFT_MASK; - if (sign_extend) { - res |= TRACE_MEM_SE; - } - if (endianness == MO_BE) { - res |= TRACE_MEM_BE; - } - if (store) { - res |= TRACE_MEM_ST; - } -#ifdef CONFIG_SOFTMMU - res |= mmu_idx << TRACE_MEM_MMU_SHIFT; -#endif - return res; -} - - -/** - * trace_mem_get_info: - * - * Return a value for the 'info' argument in guest memory access traces. - */ -static inline uint16_t trace_mem_get_info(MemOp op, - unsigned int mmu_idx, - bool store) -{ - return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), - op & MO_BSWAP, store, - mmu_idx); -} - -#endif /* TRACE__MEM_H */ diff --git a/trace/meson.build b/trace/meson.build index ef18f11d64..26b54714d5 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -2,10 +2,15 @@ specific_ss.add(files('control-target.c')) trace_events_files = [] -foreach dir : [ '.' ] + trace_events_subdirs - trace_events_file = meson.source_root() / dir / 'trace-events' +foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events + if item in qapi_trace_events + trace_events_file = item + group_name = item.full_path().split('/')[-1].underscorify() + else + trace_events_file = meson.project_source_root() / item / 'trace-events' + group_name = item == '.' ? 'root' : item.underscorify() + endif trace_events_files += [ trace_events_file ] - group_name = dir == '.' ? 'root' : dir.underscorify() group = '--group=' + group_name fmt = '@0@-' + group_name + '.@1@' @@ -20,7 +25,7 @@ foreach dir : [ '.' ] + trace_events_subdirs input: trace_events_file, command: [ tracetool, group, '--format=c', '@INPUT@', '@OUTPUT@' ], depend_files: tracetool_depends) - if 'CONFIG_TRACE_UST' in config_host + if 'ust' in get_option('trace_backends') trace_ust_h = custom_target(fmt.format('trace-ust', 'h'), output: fmt.format('trace-ust', 'h'), input: trace_events_file, @@ -30,7 +35,7 @@ foreach dir : [ '.' ] + trace_events_subdirs genh += trace_ust_h endif trace_ss.add(trace_h, trace_c) - if 'CONFIG_TRACE_DTRACE' in config_host + if 'dtrace' in get_option('trace_backends') trace_dtrace = custom_target(fmt.format('trace-dtrace', 'dtrace'), output: fmt.format('trace-dtrace', 'dtrace'), input: trace_events_file, @@ -39,13 +44,13 @@ foreach dir : [ '.' ] + trace_events_subdirs trace_dtrace_h = custom_target(fmt.format('trace-dtrace', 'h'), output: fmt.format('trace-dtrace', 'h'), input: trace_dtrace, - command: [ 'dtrace', '-DSTAP_SDT_V2', '-o', '@OUTPUT@', '-h', '-s', '@INPUT@' ]) + command: [ dtrace, '-DSTAP_SDT_V2', '-o', '@OUTPUT@', '-h', '-s', '@INPUT@' ]) trace_ss.add(trace_dtrace_h) if host_machine.system() != 'darwin' trace_dtrace_o = custom_target(fmt.format('trace-dtrace', 'o'), output: fmt.format('trace-dtrace', 'o'), input: trace_dtrace, - command: [ 'dtrace', '-DSTAP_SDT_V2', '-o', '@OUTPUT@', '-G', '-s', '@INPUT@' ]) + command: [ dtrace, '-DSTAP_SDT_V2', '-o', '@OUTPUT@', '-G', '-s', '@INPUT@' ]) trace_ss.add(trace_dtrace_o) endif @@ -61,21 +66,7 @@ trace_events_all = custom_target('trace-events-all', install: true, install_dir: qemu_datadir) -foreach d : [ - ['generated-tcg-tracers.h', 'tcg-h'], - ['generated-helpers.c', 'tcg-helper-c'], - ['generated-helpers.h', 'tcg-helper-h'], - ['generated-helpers-wrappers.h', 'tcg-helper-wrapper-h'], -] - gen = custom_target(d[0], - output: d[0], - input: meson.source_root() / 'trace-events', - command: [ tracetool, '--group=root', '--format=@0@'.format(d[1]), '@INPUT@', '@OUTPUT@' ], - depend_files: tracetool_depends) - specific_ss.add(when: 'CONFIG_TCG', if_true: gen) -endforeach - -if 'CONFIG_TRACE_UST' in config_host +if 'ust' in get_option('trace_backends') trace_ust_all_h = custom_target('trace-ust-all.h', output: 'trace-ust-all.h', input: trace_events_files, @@ -90,7 +81,11 @@ if 'CONFIG_TRACE_UST' in config_host genh += trace_ust_all_h endif -trace_ss.add(when: 'CONFIG_TRACE_SIMPLE', if_true: files('simple.c')) -trace_ss.add(when: 'CONFIG_TRACE_FTRACE', if_true: files('ftrace.c')) +if 'simple' in get_option('trace_backends') + trace_ss.add(files('simple.c')) +endif +if 'ftrace' in get_option('trace_backends') + trace_ss.add(files('ftrace.c')) +endif trace_ss.add(files('control.c')) trace_ss.add(files('qmp.c')) diff --git a/trace/simple.c b/trace/simple.c index ac499edee0..18af590cf7 100644 --- a/trace/simple.c +++ b/trace/simple.c @@ -364,7 +364,7 @@ void st_set_trace_file(const char *file) if (!file) { /* Type cast needed for Windows where getpid() returns an int. */ - trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid()); + trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE "-" FMT_pid, (pid_t)getpid()); } else { trace_file_name = g_strdup_printf("%s", file); } diff --git a/ui/clipboard.c b/ui/clipboard.c index 3525b30178..3d14bffaf8 100644 --- a/ui/clipboard.c +++ b/ui/clipboard.c @@ -1,22 +1,89 @@ #include "qemu/osdep.h" #include "ui/clipboard.h" +#include "trace.h" static NotifierList clipboard_notifiers = NOTIFIER_LIST_INITIALIZER(clipboard_notifiers); +static QemuClipboardInfo *cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT]; + void qemu_clipboard_peer_register(QemuClipboardPeer *peer) { - notifier_list_add(&clipboard_notifiers, &peer->update); + notifier_list_add(&clipboard_notifiers, &peer->notifier); } void qemu_clipboard_peer_unregister(QemuClipboardPeer *peer) { - notifier_remove(&peer->update); + int i; + + for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) { + qemu_clipboard_peer_release(peer, i); + } + notifier_remove(&peer->notifier); +} + +bool qemu_clipboard_peer_owns(QemuClipboardPeer *peer, + QemuClipboardSelection selection) +{ + QemuClipboardInfo *info = qemu_clipboard_info(selection); + + return info && info->owner == peer; +} + +void qemu_clipboard_peer_release(QemuClipboardPeer *peer, + QemuClipboardSelection selection) +{ + g_autoptr(QemuClipboardInfo) info = NULL; + + if (qemu_clipboard_peer_owns(peer, selection)) { + /* set empty clipboard info */ + info = qemu_clipboard_info_new(NULL, selection); + qemu_clipboard_update(info); + } +} + +bool qemu_clipboard_check_serial(QemuClipboardInfo *info, bool client) +{ + bool ok; + + if (!info->has_serial || + !cbinfo[info->selection] || + !cbinfo[info->selection]->has_serial) { + trace_clipboard_check_serial(-1, -1, true); + return true; + } + + if (client) { + ok = info->serial >= cbinfo[info->selection]->serial; + } else { + ok = info->serial > cbinfo[info->selection]->serial; + } + + trace_clipboard_check_serial(cbinfo[info->selection]->serial, info->serial, ok); + return ok; } void qemu_clipboard_update(QemuClipboardInfo *info) { - notifier_list_notify(&clipboard_notifiers, info); + QemuClipboardNotify notify = { + .type = QEMU_CLIPBOARD_UPDATE_INFO, + .info = info, + }; + assert(info->selection < QEMU_CLIPBOARD_SELECTION__COUNT); + + notifier_list_notify(&clipboard_notifiers, ¬ify); + + if (cbinfo[info->selection] != info) { + qemu_clipboard_info_unref(cbinfo[info->selection]); + cbinfo[info->selection] = qemu_clipboard_info_ref(info); + } +} + +QemuClipboardInfo *qemu_clipboard_info(QemuClipboardSelection selection) +{ + assert(selection < QEMU_CLIPBOARD_SELECTION__COUNT); + + return cbinfo[selection]; } QemuClipboardInfo *qemu_clipboard_info_new(QemuClipboardPeer *owner, @@ -69,6 +136,20 @@ void qemu_clipboard_request(QemuClipboardInfo *info, info->owner->request(info, type); } +void qemu_clipboard_reset_serial(void) +{ + QemuClipboardNotify notify = { .type = QEMU_CLIPBOARD_RESET_SERIAL }; + int i; + + for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) { + QemuClipboardInfo *info = qemu_clipboard_info(i); + if (info) { + info->serial = 0; + } + } + notifier_list_notify(&clipboard_notifiers, ¬ify); +} + void qemu_clipboard_set_data(QemuClipboardPeer *peer, QemuClipboardInfo *info, QemuClipboardType type, diff --git a/ui/cocoa.m b/ui/cocoa.m index 68a6302184..660d3e0935 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -27,13 +27,15 @@ #import #include -#include "qemu-common.h" +#include "qemu/help-texts.h" +#include "qemu-main.h" #include "ui/clipboard.h" #include "ui/console.h" #include "ui/input.h" #include "ui/kbd-state.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" +#include "sysemu/runstate-action.h" #include "sysemu/cpu-throttle.h" #include "qapi/error.h" #include "qapi/qapi-commands-block.h" @@ -83,7 +85,7 @@ static void cocoa_switch(DisplayChangeListener *dcl, static void cocoa_refresh(DisplayChangeListener *dcl); -static NSWindow *normalWindow, *about_window; +static NSWindow *normalWindow; static const DisplayChangeListenerOps dcl_ops = { .dpy_name = "cocoa", .dpy_gfx_update = cocoa_update, @@ -95,15 +97,12 @@ static DisplayChangeListener dcl = { }; static int last_buttons; static int cursor_hide = 1; +static int left_command_key_enabled = 1; +static bool swap_opt_cmd; -static int gArgc; -static char **gArgv; static bool stretch_video; static NSTextField *pauseLabel; -static NSArray * supportedImageFileTypes; -static QemuSemaphore display_init_sem; -static QemuSemaphore app_started_sem; static bool allow_events; static NSInteger cbchangecount = -1; @@ -309,11 +308,13 @@ static void handleAnyDeviceErrors(Error * err) BOOL isMouseGrabbed; BOOL isFullscreen; BOOL isAbsoluteEnabled; + CFMachPortRef eventsTap; } - (void) switchSurface:(pixman_image_t *)image; - (void) grabMouse; - (void) ungrabMouse; - (void) toggleFullScreen:(id)sender; +- (void) setFullGrab:(id)sender; - (void) handleMonitorInput:(NSEvent *)event; - (bool) handleEvent:(NSEvent *)event; - (bool) handleEventLocked:(NSEvent *)event; @@ -336,6 +337,19 @@ static void handleAnyDeviceErrors(Error * err) QemuCocoaView *cocoaView; +static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEventRef cgEvent, void *userInfo) +{ + QemuCocoaView *cocoaView = userInfo; + NSEvent *event = [NSEvent eventWithCGEvent:cgEvent]; + if ([cocoaView isMouseGrabbed] && [cocoaView handleEvent:event]) { + COCOA_DEBUG("Global events tap: qemu handled the event, capturing!\n"); + return NULL; + } + COCOA_DEBUG("Global events tap: qemu did not handle the event, letting it through...\n"); + + return cgEvent; +} + @implementation QemuCocoaView - (id)initWithFrame:(NSRect)frameRect { @@ -361,6 +375,11 @@ QemuCocoaView *cocoaView; } qkbd_state_free(kbd); + + if (eventsTap) { + CFRelease(eventsTap); + } + [super dealloc]; } @@ -523,8 +542,9 @@ QemuCocoaView *cocoaView; } } -- (void) updateUIInfo +- (void) updateUIInfoLocked { + /* Must be called with the iothread lock, i.e. via updateUIInfo */ NSSize frameSize; QemuUIInfo info; @@ -537,8 +557,20 @@ QemuCocoaView *cocoaView; CGDirectDisplayID display = [[description objectForKey:@"NSScreenNumber"] unsignedIntValue]; NSSize screenSize = [[[self window] screen] frame].size; CGSize screenPhysicalSize = CGDisplayScreenSize(display); + CVDisplayLinkRef displayLink; frameSize = isFullscreen ? screenSize : [self frame].size; + + if (!CVDisplayLinkCreateWithCGDisplay(display, &displayLink)) { + CVTime period = CVDisplayLinkGetNominalOutputVideoRefreshPeriod(displayLink); + CVDisplayLinkRelease(displayLink); + if (!(period.flags & kCVTimeIsIndefinite)) { + update_displaychangelistener(&dcl, + 1000 * period.timeValue / period.timeScale); + info.refresh_rate = (int64_t)1000 * period.timeScale / period.timeValue; + } + } + info.width_mm = frameSize.width / screenSize.width * screenPhysicalSize.width; info.height_mm = frameSize.height / screenSize.height * screenPhysicalSize.height; } else { @@ -552,7 +584,26 @@ QemuCocoaView *cocoaView; info.width = frameSize.width; info.height = frameSize.height; - dpy_set_ui_info(dcl.con, &info); + dpy_set_ui_info(dcl.con, &info, TRUE); +} + +- (void) updateUIInfo +{ + if (!allow_events) { + /* + * Don't try to tell QEMU about UI information in the application + * startup phase -- we haven't yet registered dcl with the QEMU UI + * layer. + * When cocoa_display_init() does register the dcl, the UI layer + * will call cocoa_switch(), which will call updateUIInfo, so + * we don't lose any information here. + */ + return; + } + + with_iothread_lock(^{ + [self updateUIInfoLocked]; + }); } - (void)viewDidMoveToWindow @@ -635,6 +686,36 @@ QemuCocoaView *cocoaView; } } +- (void) setFullGrab:(id)sender +{ + COCOA_DEBUG("QemuCocoaView: setFullGrab\n"); + + CGEventMask mask = CGEventMaskBit(kCGEventKeyDown) | CGEventMaskBit(kCGEventKeyUp) | CGEventMaskBit(kCGEventFlagsChanged); + eventsTap = CGEventTapCreate(kCGHIDEventTap, kCGHeadInsertEventTap, kCGEventTapOptionDefault, + mask, handleTapEvent, self); + if (!eventsTap) { + warn_report("Could not create event tap, system key combos will not be captured.\n"); + return; + } else { + COCOA_DEBUG("Global events tap created! Will capture system key combos.\n"); + } + + CFRunLoopRef runLoop = CFRunLoopGetCurrent(); + if (!runLoop) { + warn_report("Could not obtain current CF RunLoop, system key combos will not be captured.\n"); + return; + } + + CFRunLoopSourceRef tapEventsSrc = CFMachPortCreateRunLoopSource(kCFAllocatorDefault, eventsTap, 0); + if (!tapEventsSrc ) { + warn_report("Could not obtain current CF RunLoop, system key combos will not be captured.\n"); + return; + } + + CFRunLoopAddSource(runLoop, tapEventsSrc, kCFRunLoopDefaultMode); + CFRelease(tapEventsSrc); +} + - (void) toggleKey: (int)keycode { qkbd_state_key_event(kbd, keycode, !qkbd_state_key_get(kbd, keycode)); } @@ -652,7 +733,7 @@ QemuCocoaView *cocoaView; /* translates Macintosh keycodes to QEMU's keysym */ - int without_control_translation[] = { + static const int without_control_translation[] = { [0 ... 0xff] = 0, // invalid key [kVK_UpArrow] = QEMU_KEY_UP, @@ -667,7 +748,7 @@ QemuCocoaView *cocoaView; [kVK_Delete] = QEMU_KEY_BACKSPACE, }; - int with_control_translation[] = { + static const int with_control_translation[] = { [0 ... 0xff] = 0, // invalid key [kVK_UpArrow] = QEMU_KEY_CTRL_UP, @@ -705,16 +786,6 @@ QemuCocoaView *cocoaView; - (bool) handleEvent:(NSEvent *)event { - if(!allow_events) { - /* - * Just let OSX have all events that arrive before - * applicationDidFinishLaunching. - * This avoids a deadlock on the iothread lock, which cocoa_display_init() - * will not drop until after the app_started_sem is posted. (In theory - * there should not be any such events, but OSX Catalina now emits some.) - */ - return false; - } return bool_with_iothread_lock(^{ return [self handleEventLocked:event]; }); @@ -727,7 +798,6 @@ QemuCocoaView *cocoaView; int buttons = 0; int keycode = 0; bool mouse_event = false; - static bool switched_to_fullscreen = false; // Location of event in virtual screen coordinates NSPoint p = [self screenLocationOfEvent:event]; NSUInteger modifiers = [event modifierFlags]; @@ -784,12 +854,22 @@ QemuCocoaView *cocoaView; qkbd_state_key_event(kbd, Q_KEY_CODE_CTRL_R, false); } if (!(modifiers & NSEventModifierFlagOption)) { - qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); - qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); + if (swap_opt_cmd) { + qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); + qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); + } else { + qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); + qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); + } } if (!(modifiers & NSEventModifierFlagCommand)) { - qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); - qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); + if (swap_opt_cmd) { + qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); + qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); + } else { + qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); + qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); + } } switch ([event type]) { @@ -821,28 +901,45 @@ QemuCocoaView *cocoaView; case kVK_Option: if (!!(modifiers & NSEventModifierFlagOption)) { - [self toggleKey:Q_KEY_CODE_ALT]; + if (swap_opt_cmd) { + [self toggleKey:Q_KEY_CODE_META_L]; + } else { + [self toggleKey:Q_KEY_CODE_ALT]; + } } break; case kVK_RightOption: if (!!(modifiers & NSEventModifierFlagOption)) { - [self toggleKey:Q_KEY_CODE_ALT_R]; + if (swap_opt_cmd) { + [self toggleKey:Q_KEY_CODE_META_R]; + } else { + [self toggleKey:Q_KEY_CODE_ALT_R]; + } } break; /* Don't pass command key changes to guest unless mouse is grabbed */ case kVK_Command: if (isMouseGrabbed && - !!(modifiers & NSEventModifierFlagCommand)) { - [self toggleKey:Q_KEY_CODE_META_L]; + !!(modifiers & NSEventModifierFlagCommand) && + left_command_key_enabled) { + if (swap_opt_cmd) { + [self toggleKey:Q_KEY_CODE_ALT]; + } else { + [self toggleKey:Q_KEY_CODE_META_L]; + } } break; case kVK_RightCommand: if (isMouseGrabbed && !!(modifiers & NSEventModifierFlagCommand)) { - [self toggleKey:Q_KEY_CODE_META_R]; + if (swap_opt_cmd) { + [self toggleKey:Q_KEY_CODE_ALT_R]; + } else { + [self toggleKey:Q_KEY_CODE_META_R]; + } } break; } @@ -852,13 +949,6 @@ QemuCocoaView *cocoaView; // forward command key combos to the host UI unless the mouse is grabbed if (!isMouseGrabbed && ([event modifierFlags] & NSEventModifierFlagCommand)) { - /* - * Prevent the command key from being stuck down in the guest - * when using Command-F to switch to full screen mode. - */ - if (keycode == Q_KEY_CODE_F) { - switched_to_fullscreen = true; - } return false; } @@ -970,21 +1060,27 @@ QemuCocoaView *cocoaView; */ /* - * When deltaY is zero, it means that this scrolling event was - * either horizontal, or so fine that it only appears in - * scrollingDeltaY. So we drop the event. + * We shouldn't have got a scroll event when deltaY and delta Y + * are zero, hence no harm in dropping the event */ - if ([event deltaY] != 0) { + if ([event deltaY] != 0 || [event deltaX] != 0) { /* Determine if this is a scroll up or scroll down event */ - buttons = ([event deltaY] > 0) ? + if ([event deltaY] != 0) { + buttons = ([event deltaY] > 0) ? INPUT_BUTTON_WHEEL_UP : INPUT_BUTTON_WHEEL_DOWN; + } else if ([event deltaX] != 0) { + buttons = ([event deltaX] > 0) ? + INPUT_BUTTON_WHEEL_LEFT : INPUT_BUTTON_WHEEL_RIGHT; + } + qemu_input_queue_btn(dcl.con, buttons, true); qemu_input_event_sync(); qemu_input_queue_btn(dcl.con, buttons, false); qemu_input_event_sync(); } + /* - * Since deltaY also reports scroll wheel events we prevent mouse + * Since deltaX/deltaY also report scroll wheel events we prevent mouse * movement code from executing. */ mouse_event = false; @@ -1115,7 +1211,6 @@ QemuCocoaView *cocoaView; - (BOOL)verifyQuit; - (void)openDocumentation:(NSString *)filename; - (IBAction) do_about_menu_item: (id) sender; -- (void)make_about_window; - (void)adjustSpeed:(id)sender; @end @@ -1161,12 +1256,6 @@ QemuCocoaView *cocoaView; [pauseLabel setFont: [NSFont fontWithName: @"Helvetica" size: 90]]; [pauseLabel setTextColor: [NSColor blackColor]]; [pauseLabel sizeToFit]; - - // set the supported image file types that can be opened - supportedImageFileTypes = [NSArray arrayWithObjects: @"img", @"iso", @"dmg", - @"qcow", @"qcow2", @"cloop", @"vmdk", @"cdr", - @"toast", nil]; - [self make_about_window]; } return self; } @@ -1184,15 +1273,16 @@ QemuCocoaView *cocoaView; { COCOA_DEBUG("QemuCocoaAppController: applicationDidFinishLaunching\n"); allow_events = true; - /* Tell cocoa_display_init to proceed */ - qemu_sem_post(&app_started_sem); } - (void)applicationWillTerminate:(NSNotification *)aNotification { COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); - qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); + with_iothread_lock(^{ + shutdown_action = SHUTDOWN_ACTION_POWEROFF; + qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); + }); /* * Sleep here, because returning will cause OSX to kill us @@ -1241,6 +1331,7 @@ QemuCocoaView *cocoaView; - (void) applicationWillResignActive: (NSNotification *)aNotification { COCOA_DEBUG("QemuCocoaAppController: applicationWillResignActive\n"); + [cocoaView ungrabMouse]; [cocoaView raiseAllKeys]; } @@ -1260,6 +1351,13 @@ QemuCocoaView *cocoaView; [cocoaView toggleFullScreen:sender]; } +- (void) setFullGrab:(id)sender +{ + COCOA_DEBUG("QemuCocoaAppController: setFullGrab\n"); + + [cocoaView setFullGrab:sender]; +} + /* Tries to find then open the specified filename */ - (void) openDocumentation: (NSString *) filename { @@ -1408,7 +1506,6 @@ QemuCocoaView *cocoaView; openPanel = [NSOpenPanel openPanel]; [openPanel setCanChooseFiles: YES]; [openPanel setAllowsMultipleSelection: NO]; - [openPanel setAllowedFileTypes: supportedImageFileTypes]; if([openPanel runModal] == NSModalResponseOK) { NSString * file = [[[openPanel URLs] objectAtIndex: 0] path]; if(file == nil) { @@ -1426,6 +1523,7 @@ QemuCocoaView *cocoaView; [file cStringUsingEncoding: NSASCIIStringEncoding], true, "raw", + true, false, false, 0, &err); }); @@ -1451,92 +1549,29 @@ QemuCocoaView *cocoaView; /* The action method for the About menu item */ - (IBAction) do_about_menu_item: (id) sender { - [about_window makeKeyAndOrderFront: nil]; -} - -/* Create and display the about dialog */ -- (void)make_about_window -{ - /* Make the window */ - int x = 0, y = 0, about_width = 400, about_height = 200; - NSRect window_rect = NSMakeRect(x, y, about_width, about_height); - about_window = [[NSWindow alloc] initWithContentRect:window_rect - styleMask:NSWindowStyleMaskTitled | NSWindowStyleMaskClosable | - NSWindowStyleMaskMiniaturizable - backing:NSBackingStoreBuffered - defer:NO]; - [about_window setTitle: @"About"]; - [about_window setReleasedWhenClosed: NO]; - [about_window center]; - NSView *superView = [about_window contentView]; - - /* Create the dimensions of the picture */ - int picture_width = 80, picture_height = 80; - x = (about_width - picture_width)/2; - y = about_height - picture_height - 10; - NSRect picture_rect = NSMakeRect(x, y, picture_width, picture_height); - - /* Make the picture of QEMU */ - NSImageView *picture_view = [[NSImageView alloc] initWithFrame: - picture_rect]; - char *qemu_image_path_c = get_relocated_path(CONFIG_QEMU_ICONDIR "/hicolor/512x512/apps/qemu.png"); - NSString *qemu_image_path = [NSString stringWithUTF8String:qemu_image_path_c]; - g_free(qemu_image_path_c); - NSImage *qemu_image = [[NSImage alloc] initWithContentsOfFile:qemu_image_path]; - [picture_view setImage: qemu_image]; - [picture_view setImageScaling: NSImageScaleProportionallyUpOrDown]; - [superView addSubview: picture_view]; - - /* Make the name label */ - NSBundle *bundle = [NSBundle mainBundle]; - if (bundle) { - x = 0; - y = y - 25; - int name_width = about_width, name_height = 20; - NSRect name_rect = NSMakeRect(x, y, name_width, name_height); - NSTextField *name_label = [[NSTextField alloc] initWithFrame: name_rect]; - [name_label setEditable: NO]; - [name_label setBezeled: NO]; - [name_label setDrawsBackground: NO]; - [name_label setAlignment: NSTextAlignmentCenter]; - NSString *qemu_name = [[bundle executablePath] lastPathComponent]; - [name_label setStringValue: qemu_name]; - [superView addSubview: name_label]; + NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + char *icon_path_c = get_relocated_path(CONFIG_QEMU_ICONDIR "/hicolor/512x512/apps/qemu.png"); + NSString *icon_path = [NSString stringWithUTF8String:icon_path_c]; + g_free(icon_path_c); + NSImage *icon = [[NSImage alloc] initWithContentsOfFile:icon_path]; + NSString *version = @"QEMU emulator version " QEMU_FULL_VERSION; + NSString *copyright = @QEMU_COPYRIGHT; + NSDictionary *options; + if (icon) { + options = @{ + NSAboutPanelOptionApplicationIcon : icon, + NSAboutPanelOptionApplicationVersion : version, + @"Copyright" : copyright, + }; + [icon release]; + } else { + options = @{ + NSAboutPanelOptionApplicationVersion : version, + @"Copyright" : copyright, + }; } - - /* Set the version label's attributes */ - x = 0; - y = 50; - int version_width = about_width, version_height = 20; - NSRect version_rect = NSMakeRect(x, y, version_width, version_height); - NSTextField *version_label = [[NSTextField alloc] initWithFrame: - version_rect]; - [version_label setEditable: NO]; - [version_label setBezeled: NO]; - [version_label setAlignment: NSTextAlignmentCenter]; - [version_label setDrawsBackground: NO]; - - /* Create the version string*/ - NSString *version_string; - version_string = [[NSString alloc] initWithFormat: - @"QEMU emulator version %s", QEMU_FULL_VERSION]; - [version_label setStringValue: version_string]; - [superView addSubview: version_label]; - - /* Make copyright label */ - x = 0; - y = 35; - int copyright_width = about_width, copyright_height = 20; - NSRect copyright_rect = NSMakeRect(x, y, copyright_width, copyright_height); - NSTextField *copyright_label = [[NSTextField alloc] initWithFrame: - copyright_rect]; - [copyright_label setEditable: NO]; - [copyright_label setBezeled: NO]; - [copyright_label setDrawsBackground: NO]; - [copyright_label setAlignment: NSTextAlignmentCenter]; - [copyright_label setStringValue: [NSString stringWithFormat: @"%s", - QEMU_COPYRIGHT]]; - [superView addSubview: copyright_label]; + [NSApp orderFrontStandardAboutPanelWithOptions:options]; + [pool release]; } /* Used by the Speed menu items */ @@ -1591,11 +1626,15 @@ static void create_initial_menus(void) NSMenuItem *menuItem; [NSApp setMainMenu:[[NSMenu alloc] init]]; + [NSApp setServicesMenu:[[NSMenu alloc] initWithTitle:@"Services"]]; // Application menu menu = [[NSMenu alloc] initWithTitle:@""]; [menu addItemWithTitle:@"About QEMU" action:@selector(do_about_menu_item:) keyEquivalent:@""]; // About QEMU [menu addItem:[NSMenuItem separatorItem]]; //Separator + menuItem = [menu addItemWithTitle:@"Services" action:nil keyEquivalent:@""]; + [menuItem setSubmenu:[NSApp servicesMenu]]; + [menu addItem:[NSMenuItem separatorItem]]; [menu addItemWithTitle:@"Hide QEMU" action:@selector(hide:) keyEquivalent:@"h"]; //Hide QEMU menuItem = (NSMenuItem *)[menu addItemWithTitle:@"Hide Others" action:@selector(hideOtherApplications:) keyEquivalent:@"h"]; // Hide Others [menuItem setKeyEquivalentModifierMask:(NSEventModifierFlagOption|NSEventModifierFlagCommand)]; @@ -1674,7 +1713,9 @@ static void create_initial_menus(void) /* Returns a name for a given console */ static NSString * getConsoleName(QemuConsole * console) { - return [NSString stringWithFormat: @"%s", qemu_console_get_label(console)]; + g_autofree char *label = qemu_console_get_label(console); + + return [NSString stringWithUTF8String:label]; } /* Add an entry to the View menu for each console */ @@ -1709,11 +1750,6 @@ static void addRemovableDevicesMenuItems(void) currentDevice = qmp_query_block(NULL); pointerToFree = currentDevice; - if(currentDevice == NULL) { - NSBeep(); - QEMU_Alert(@"Failed to query for block devices!"); - return; - } menu = [[[NSApp mainMenu] itemWithTitle:@"Machine"] submenu]; @@ -1808,14 +1844,12 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, static QemuClipboardPeer cbpeer = { .name = "cocoa", - .update = { .notify = cocoa_clipboard_notify }, + .notifier = { .notify = cocoa_clipboard_notify }, .request = cocoa_clipboard_request }; -static void cocoa_clipboard_notify(Notifier *notifier, void *data) +static void cocoa_clipboard_update_info(QemuClipboardInfo *info) { - QemuClipboardInfo *info = data; - if (info->owner == &cbpeer || info->selection != QEMU_CLIPBOARD_SELECTION_CLIPBOARD) { return; } @@ -1831,19 +1865,35 @@ static void cocoa_clipboard_notify(Notifier *notifier, void *data) qemu_event_set(&cbevent); } +static void cocoa_clipboard_notify(Notifier *notifier, void *data) +{ + QemuClipboardNotify *notify = data; + + switch (notify->type) { + case QEMU_CLIPBOARD_UPDATE_INFO: + cocoa_clipboard_update_info(notify->info); + return; + case QEMU_CLIPBOARD_RESET_SERIAL: + /* ignore */ + return; + } +} + static void cocoa_clipboard_request(QemuClipboardInfo *info, QemuClipboardType type) { + NSAutoreleasePool *pool; NSData *text; switch (type) { case QEMU_CLIPBOARD_TYPE_TEXT: + pool = [[NSAutoreleasePool alloc] init]; text = [[NSPasteboard generalPasteboard] dataForType:NSPasteboardTypeString]; if (text) { qemu_clipboard_set_data(&cbpeer, info, type, [text length], [text bytes], true); - [text release]; } + [pool release]; break; default: break; @@ -1853,92 +1903,45 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, /* * The startup process for the OSX/Cocoa UI is complicated, because * OSX insists that the UI runs on the initial main thread, and so we - * need to start a second thread which runs the vl.c qemu_main(): - * - * Initial thread: 2nd thread: + * need to start a second thread which runs the qemu_default_main(): * in main(): - * create qemu-main thread - * wait on display_init semaphore - * call qemu_main() - * ... - * in cocoa_display_init(): - * post the display_init semaphore - * wait on app_started semaphore - * create application, menus, etc - * enter OSX run loop - * in applicationDidFinishLaunching: - * post app_started semaphore - * tell main thread to fullscreen if needed - * [...] - * run qemu main-loop - * - * We do this in two stages so that we don't do the creation of the - * GUI application menus and so on for command line options like --help - * where we want to just print text to stdout and exit immediately. + * in cocoa_display_init(): + * assign cocoa_main to qemu_main + * create application, menus, etc + * in cocoa_main(): + * create qemu-main thread + * enter OSX run loop */ static void *call_qemu_main(void *opaque) { int status; - COCOA_DEBUG("Second thread: calling qemu_main()\n"); - status = qemu_main(gArgc, gArgv, *_NSGetEnviron()); - COCOA_DEBUG("Second thread: qemu_main() returned, exiting\n"); + COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); + qemu_mutex_lock_iothread(); + status = qemu_default_main(); + qemu_mutex_unlock_iothread(); + COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); [cbowner release]; exit(status); } -int main (int argc, char **argv) { +static int cocoa_main() +{ QemuThread thread; - COCOA_DEBUG("Entered main()\n"); - gArgc = argc; - gArgv = argv; - - qemu_sem_init(&display_init_sem, 0); - qemu_sem_init(&app_started_sem, 0); + COCOA_DEBUG("Entered %s()\n", __func__); + qemu_mutex_unlock_iothread(); qemu_thread_create(&thread, "qemu_main", call_qemu_main, NULL, QEMU_THREAD_DETACHED); - COCOA_DEBUG("Main thread: waiting for display_init_sem\n"); - qemu_sem_wait(&display_init_sem); - COCOA_DEBUG("Main thread: initializing app\n"); - - NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; - - // Pull this console process up to being a fully-fledged graphical - // app with a menubar and Dock icon - ProcessSerialNumber psn = { 0, kCurrentProcess }; - TransformProcessType(&psn, kProcessTransformToForegroundApplication); - - [QemuApplication sharedApplication]; - - create_initial_menus(); - - /* - * Create the menu entries which depend on QEMU state (for consoles - * and removeable devices). These make calls back into QEMU functions, - * which is OK because at this point we know that the second thread - * holds the iothread lock and is synchronously waiting for us to - * finish. - */ - add_console_menu_entries(); - addRemovableDevicesMenuItems(); - - // Create an Application controller - QemuCocoaAppController *appController = [[QemuCocoaAppController alloc] init]; - [NSApp setDelegate:appController]; - // Start the main event loop COCOA_DEBUG("Main thread: entering OSX run loop\n"); [NSApp run]; - COCOA_DEBUG("Main thread: left OSX run loop, exiting\n"); + COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n"); - [appController release]; - [pool release]; - - return 0; + abort(); } @@ -1947,8 +1950,6 @@ int main (int argc, char **argv) { static void cocoa_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { - NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; - COCOA_DEBUG("qemu_cocoa: cocoa_update\n"); dispatch_async(dispatch_get_main_queue(), ^{ @@ -1964,20 +1965,15 @@ static void cocoa_update(DisplayChangeListener *dcl, } [cocoaView setNeedsDisplayInRect:rect]; }); - - [pool release]; } static void cocoa_switch(DisplayChangeListener *dcl, DisplaySurface *surface) { - NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; pixman_image_t *image = surface->image; COCOA_DEBUG("qemu_cocoa: cocoa_switch\n"); - [cocoaView updateUIInfo]; - // The DisplaySurface will be freed as soon as this callback returns. // We take a reference to the underlying pixman image here so it does // not disappear from under our feet; the switchSurface method will @@ -1985,9 +1981,9 @@ static void cocoa_switch(DisplayChangeListener *dcl, pixman_image_ref(image); dispatch_async(dispatch_get_main_queue(), ^{ + [cocoaView updateUIInfo]; [cocoaView switchSurface:image]; }); - [pool release]; } static void cocoa_refresh(DisplayChangeListener *dcl) @@ -2024,23 +2020,54 @@ static void cocoa_refresh(DisplayChangeListener *dcl) static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) { + NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; + COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); - /* Tell main thread to go ahead and create the app and enter the run loop */ - qemu_sem_post(&display_init_sem); - qemu_sem_wait(&app_started_sem); - COCOA_DEBUG("cocoa_display_init: app start completed\n"); + qemu_main = cocoa_main; + + // Pull this console process up to being a fully-fledged graphical + // app with a menubar and Dock icon + ProcessSerialNumber psn = { 0, kCurrentProcess }; + TransformProcessType(&psn, kProcessTransformToForegroundApplication); + + [QemuApplication sharedApplication]; + + create_initial_menus(); + + /* + * Create the menu entries which depend on QEMU state (for consoles + * and removeable devices). These make calls back into QEMU functions, + * which is OK because at this point we know that the second thread + * holds the iothread lock and is synchronously waiting for us to + * finish. + */ + add_console_menu_entries(); + addRemovableDevicesMenuItems(); + + // Create an Application controller + QemuCocoaAppController *controller = [[QemuCocoaAppController alloc] init]; + [NSApp setDelegate:controller]; /* if fullscreen mode is to be used */ if (opts->has_full_screen && opts->full_screen) { - dispatch_async(dispatch_get_main_queue(), ^{ - [NSApp activateIgnoringOtherApps: YES]; - [(QemuCocoaAppController *)[[NSApplication sharedApplication] delegate] toggleFullScreen: nil]; - }); + [NSApp activateIgnoringOtherApps: YES]; + [controller toggleFullScreen: nil]; } + if (opts->u.cocoa.has_full_grab && opts->u.cocoa.full_grab) { + [controller setFullGrab: nil]; + } + if (opts->has_show_cursor && opts->show_cursor) { cursor_hide = 0; } + if (opts->u.cocoa.has_swap_opt_cmd) { + swap_opt_cmd = opts->u.cocoa.swap_opt_cmd; + } + + if (opts->u.cocoa.has_left_command_key && !opts->u.cocoa.left_command_key) { + left_command_key_enabled = 0; + } // register vga output callbacks register_displaychangelistener(&dcl); @@ -2048,6 +2075,8 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) qemu_event_init(&cbevent, false); cbowner = [[QemuCocoaPasteboardTypeOwner alloc] init]; qemu_clipboard_peer_register(&cbpeer); + + [pool release]; } static QemuDisplay qemu_display_cocoa = { diff --git a/ui/console-gl.c b/ui/console-gl.c index 7c9894a51d..8e3c9a3c8c 100644 --- a/ui/console-gl.c +++ b/ui/console-gl.c @@ -49,6 +49,10 @@ void surface_gl_create_texture(QemuGLShader *gls, assert(gls); assert(QEMU_IS_ALIGNED(surface_stride(surface), surface_bytes_per_pixel(surface))); + if (surface->texture) { + return; + } + switch (surface->format) { case PIXMAN_BE_b8g8r8x8: case PIXMAN_BE_b8g8r8a8: diff --git a/ui/console.c b/ui/console.c index 1103b65314..646202214a 100644 --- a/ui/console.c +++ b/ui/console.c @@ -27,14 +27,19 @@ #include "hw/qdev-core.h" #include "qapi/error.h" #include "qapi/qapi-commands-ui.h" +#include "qemu/fifo8.h" +#include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/timer.h" -#include "chardev/char-fe.h" +#include "chardev/char.h" #include "trace.h" #include "exec/memory.h" #include "io/channel-file.h" #include "qom/object.h" +#ifdef CONFIG_PNG +#include +#endif #define DEFAULT_BACKSCROLL 512 #define CONSOLE_CURSOR_PERIOD 500 @@ -62,57 +67,6 @@ enum TTYState { TTY_STATE_CSI, }; -typedef struct QEMUFIFO { - uint8_t *buf; - int buf_size; - int count, wptr, rptr; -} QEMUFIFO; - -static int qemu_fifo_write(QEMUFIFO *f, const uint8_t *buf, int len1) -{ - int l, len; - - l = f->buf_size - f->count; - if (len1 > l) - len1 = l; - len = len1; - while (len > 0) { - l = f->buf_size - f->wptr; - if (l > len) - l = len; - memcpy(f->buf + f->wptr, buf, l); - f->wptr += l; - if (f->wptr >= f->buf_size) - f->wptr = 0; - buf += l; - len -= l; - } - f->count += len1; - return len1; -} - -static int qemu_fifo_read(QEMUFIFO *f, uint8_t *buf, int len1) -{ - int l, len; - - if (len1 > f->count) - len1 = f->count; - len = len1; - while (len > 0) { - l = f->buf_size - f->rptr; - if (l > len) - l = len; - memcpy(buf, f->buf + f->rptr, l); - f->rptr += l; - if (f->rptr >= f->buf_size) - f->rptr = 0; - buf += l; - len -= l; - } - f->count -= len1; - return len1; -} - typedef enum { GRAPHIC_CONSOLE, TEXT_CONSOLE, @@ -126,9 +80,11 @@ struct QemuConsole { console_type_t console_type; DisplayState *ds; DisplaySurface *surface; + DisplayScanout scanout; int dcls; - DisplayChangeListener *gl; - bool gl_block; + DisplayGLCtx *gl; + int gl_block; + QEMUTimer *gl_unblock_timer; int window_id; /* Graphic console state. */ @@ -165,9 +121,7 @@ struct QemuConsole { Chardev *chr; /* fifo for key pressed */ - QEMUFIFO out_fifo; - uint8_t out_fifo_buf[16]; - QEMUTimer *kbd_timer; + Fifo8 out_fifo; CoQueue dump_queue; QTAILQ_ENTRY(QemuConsole) next; @@ -196,6 +150,9 @@ static void dpy_refresh(DisplayState *s); static DisplayState *get_alloc_displaystate(void); static void text_console_update_cursor_timer(void); static void text_console_update_cursor(void *opaque); +static bool displaychangelistener_has_dmabuf(DisplayChangeListener *dcl); +static bool console_compatible_with(QemuConsole *con, + DisplayChangeListener *dcl, Error **errp); static void gui_update(void *opaque) { @@ -203,7 +160,6 @@ static void gui_update(void *opaque) uint64_t dcl_interval; DisplayState *ds = opaque; DisplayChangeListener *dcl; - QemuConsole *con; ds->refreshing = true; dpy_refresh(ds); @@ -218,11 +174,6 @@ static void gui_update(void *opaque) } if (ds->update_interval != interval) { ds->update_interval = interval; - QTAILQ_FOREACH(con, &consoles, next) { - if (con->hw_ops->update_interval) { - con->hw_ops->update_interval(con->hw, interval); - } - } trace_console_refresh(interval); } ds->last_update = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -264,7 +215,7 @@ static void gui_setup_refresh(DisplayState *ds) void graphic_hw_update_done(QemuConsole *con) { if (con) { - qemu_co_queue_restart_all(&con->dump_queue); + qemu_co_enter_all(&con->dump_queue, NULL); } } @@ -284,22 +235,36 @@ void graphic_hw_update(QemuConsole *con) } } -void graphic_hw_gl_block(QemuConsole *con, bool block) +static void graphic_hw_gl_unblock_timer(void *opaque) { - assert(con != NULL); - - con->gl_block = block; - if (con->hw_ops->gl_block) { - con->hw_ops->gl_block(con->hw, block); - } + warn_report("console: no gl-unblock within one second"); } -void graphic_hw_gl_flushed(QemuConsole *con) +void graphic_hw_gl_block(QemuConsole *con, bool block) { + uint64_t timeout; assert(con != NULL); - if (con->hw_ops->gl_flushed) { - con->hw_ops->gl_flushed(con->hw); + if (block) { + con->gl_block++; + } else { + con->gl_block--; + } + assert(con->gl_block >= 0); + if (!con->hw_ops->gl_block) { + return; + } + if ((block && con->gl_block != 1) || (!block && con->gl_block != 0)) { + return; + } + con->hw_ops->gl_block(con->hw, block); + + if (block) { + timeout = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + timeout += 1000; /* one sec */ + timer_mod(con->gl_unblock_timer, timeout); + } else { + timer_del(con->gl_unblock_timer); } } @@ -323,6 +288,88 @@ void graphic_hw_invalidate(QemuConsole *con) } } +#ifdef CONFIG_PNG +/** + * png_save: Take a screenshot as PNG + * + * Saves screendump as a PNG file + * + * Returns true for success or false for error. + * + * @fd: File descriptor for PNG file. + * @image: Image data in pixman format. + * @errp: Pointer to an error. + */ +static bool png_save(int fd, pixman_image_t *image, Error **errp) +{ + int width = pixman_image_get_width(image); + int height = pixman_image_get_height(image); + png_struct *png_ptr; + png_info *info_ptr; + g_autoptr(pixman_image_t) linebuf = + qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, width); + uint8_t *buf = (uint8_t *)pixman_image_get_data(linebuf); + FILE *f = fdopen(fd, "wb"); + int y; + if (!f) { + error_setg_errno(errp, errno, + "Failed to create file from file descriptor"); + return false; + } + + png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, + NULL, NULL); + if (!png_ptr) { + error_setg(errp, "PNG creation failed. Unable to write struct"); + fclose(f); + return false; + } + + info_ptr = png_create_info_struct(png_ptr); + + if (!info_ptr) { + error_setg(errp, "PNG creation failed. Unable to write info"); + fclose(f); + png_destroy_write_struct(&png_ptr, &info_ptr); + return false; + } + + png_init_io(png_ptr, f); + + png_set_IHDR(png_ptr, info_ptr, width, height, 8, + PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, + PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); + + png_write_info(png_ptr, info_ptr); + + for (y = 0; y < height; ++y) { + qemu_pixman_linebuf_fill(linebuf, image, width, 0, y); + png_write_row(png_ptr, buf); + } + + png_write_end(png_ptr, NULL); + + png_destroy_write_struct(&png_ptr, &info_ptr); + + if (fclose(f) != 0) { + error_setg_errno(errp, errno, + "PNG creation failed. Unable to close file"); + return false; + } + + return true; +} + +#else /* no png support */ + +static bool png_save(int fd, pixman_image_t *image, Error **errp) +{ + error_setg(errp, "Enable PNG support with libpng for screendump"); + return false; +} + +#endif /* CONFIG_PNG */ + static bool ppm_save(int fd, pixman_image_t *image, Error **errp) { int width = pixman_image_get_width(image); @@ -361,7 +408,8 @@ static void graphic_hw_update_bh(void *con) /* Safety: coroutine-only, concurrent-coroutine safe, main thread only */ void coroutine_fn qmp_screendump(const char *filename, bool has_device, const char *device, - bool has_head, int64_t head, Error **errp) + bool has_head, int64_t head, + bool has_format, ImageFormat format, Error **errp) { g_autoptr(pixman_image_t) image = NULL; QemuConsole *con; @@ -417,8 +465,16 @@ qmp_screendump(const char *filename, bool has_device, const char *device, * yields and releases the BQL. It could produce corrupted dump, but * it should be otherwise safe. */ - if (!ppm_save(fd, image, errp)) { - qemu_unlink(filename); + if (has_format && format == IMAGE_FORMAT_PNG) { + /* PNG format specified for screendump */ + if (!png_save(fd, image, errp)) { + qemu_unlink(filename); + } + } else { + /* PPM format specified/default for screendump */ + if (!ppm_save(fd, image, errp)) { + qemu_unlink(filename); + } } } @@ -517,6 +573,8 @@ static void text_console_resize(QemuConsole *s) TextCell *cells, *c, *c1; int w1, x, y, last_width; + assert(s->scanout.kind == SCANOUT_SURFACE); + last_width = s->width; s->width = surface_width(s->surface) / FONT_WIDTH; s->height = surface_height(s->surface) / FONT_HEIGHT; @@ -1088,6 +1146,83 @@ static void console_putchar(QemuConsole *s, int ch) } } +static void displaychangelistener_gfx_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface, + bool update) +{ + if (dcl->ops->dpy_gfx_switch) { + dcl->ops->dpy_gfx_switch(dcl, new_surface); + } + + if (update && dcl->ops->dpy_gfx_update) { + dcl->ops->dpy_gfx_update(dcl, 0, 0, + surface_width(new_surface), + surface_height(new_surface)); + } +} + +static void dpy_gfx_create_texture(QemuConsole *con, DisplaySurface *surface) +{ + if (con->gl && con->gl->ops->dpy_gl_ctx_create_texture) { + con->gl->ops->dpy_gl_ctx_create_texture(con->gl, surface); + } +} + +static void dpy_gfx_destroy_texture(QemuConsole *con, DisplaySurface *surface) +{ + if (con->gl && con->gl->ops->dpy_gl_ctx_destroy_texture) { + con->gl->ops->dpy_gl_ctx_destroy_texture(con->gl, surface); + } +} + +static void dpy_gfx_update_texture(QemuConsole *con, DisplaySurface *surface, + int x, int y, int w, int h) +{ + if (con->gl && con->gl->ops->dpy_gl_ctx_update_texture) { + con->gl->ops->dpy_gl_ctx_update_texture(con->gl, surface, x, y, w, h); + } +} + +static void displaychangelistener_display_console(DisplayChangeListener *dcl, + QemuConsole *con, + Error **errp) +{ + static const char nodev[] = + "This VM has no graphic display device."; + static DisplaySurface *dummy; + + if (!con || !console_compatible_with(con, dcl, errp)) { + if (!dummy) { + dummy = qemu_create_placeholder_surface(640, 480, nodev); + } + if (con) { + dpy_gfx_create_texture(con, dummy); + } + displaychangelistener_gfx_switch(dcl, dummy, TRUE); + return; + } + + dpy_gfx_create_texture(con, con->surface); + displaychangelistener_gfx_switch(dcl, con->surface, + con->scanout.kind == SCANOUT_SURFACE); + + if (con->scanout.kind == SCANOUT_DMABUF && + displaychangelistener_has_dmabuf(dcl)) { + dcl->ops->dpy_gl_scanout_dmabuf(dcl, con->scanout.dmabuf); + } else if (con->scanout.kind == SCANOUT_TEXTURE && + dcl->ops->dpy_gl_scanout_texture) { + dcl->ops->dpy_gl_scanout_texture(dcl, + con->scanout.texture.backing_id, + con->scanout.texture.backing_y_0_top, + con->scanout.texture.backing_width, + con->scanout.texture.backing_height, + con->scanout.texture.x, + con->scanout.texture.y, + con->scanout.texture.width, + con->scanout.texture.height); + } +} + void console_select(unsigned int index) { DisplayChangeListener *dcl; @@ -1104,13 +1239,7 @@ void console_select(unsigned int index) if (dcl->con != NULL) { continue; } - if (dcl->ops->dpy_gfx_switch) { - dcl->ops->dpy_gfx_switch(dcl, s->surface); - } - } - if (s->surface) { - dpy_gfx_update(s, 0, 0, surface_width(s->surface), - surface_height(s->surface)); + displaychangelistener_display_console(dcl, s, NULL); } } if (ds->have_text) { @@ -1157,25 +1286,20 @@ static int vc_chr_write(Chardev *chr, const uint8_t *buf, int len) return len; } -static void kbd_send_chars(void *opaque) +static void kbd_send_chars(QemuConsole *s) { - QemuConsole *s = opaque; - int len; - uint8_t buf[16]; + uint32_t len, avail; len = qemu_chr_be_can_write(s->chr); - if (len > s->out_fifo.count) - len = s->out_fifo.count; - if (len > 0) { - if (len > sizeof(buf)) - len = sizeof(buf); - qemu_fifo_read(&s->out_fifo, buf, len); - qemu_chr_be_write(s->chr, buf, len); - } - /* characters are pending: we send them a bit later (XXX: - horrible, should change char device API) */ - if (s->out_fifo.count > 0) { - timer_mod(s->kbd_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1); + avail = fifo8_num_used(&s->out_fifo); + while (len > 0 && avail > 0) { + const uint8_t *buf; + uint32_t size; + + buf = fifo8_pop_buf(&s->out_fifo, MIN(len, avail), &size); + qemu_chr_be_write(s->chr, buf, size); + len = qemu_chr_be_can_write(s->chr); + avail -= size; } } @@ -1183,8 +1307,8 @@ static void kbd_send_chars(void *opaque) void kbd_put_keysym_console(QemuConsole *s, int keysym) { uint8_t buf[16], *q; - CharBackend *be; int c; + uint32_t num_free; if (!s || (s->console_type == GRAPHIC_CONSOLE)) return; @@ -1226,11 +1350,9 @@ void kbd_put_keysym_console(QemuConsole *s, int keysym) if (s->echo) { vc_chr_write(s->chr, buf, q - buf); } - be = s->chr->be; - if (be && be->chr_read) { - qemu_fifo_write(&s->out_fifo, buf, q - buf); - kbd_send_chars(s); - } + num_free = fifo8_num_free(&s->out_fifo); + fifo8_push_all(&s->out_fifo, buf, MIN(num_free, q - buf)); + kbd_send_chars(s); break; } } @@ -1245,6 +1367,7 @@ static const int qcode_to_keysym[Q_KEY_CODE__MAX] = { [Q_KEY_CODE_PGUP] = QEMU_KEY_PAGEUP, [Q_KEY_CODE_PGDN] = QEMU_KEY_PAGEDOWN, [Q_KEY_CODE_DELETE] = QEMU_KEY_DELETE, + [Q_KEY_CODE_TAB] = QEMU_KEY_TAB, [Q_KEY_CODE_BACKSPACE] = QEMU_KEY_BACKSPACE, }; @@ -1478,14 +1601,20 @@ static bool displaychangelistener_has_dmabuf(DisplayChangeListener *dcl) return false; } -static bool dpy_compatible_with(QemuConsole *con, - DisplayChangeListener *dcl, Error **errp) +static bool console_compatible_with(QemuConsole *con, + DisplayChangeListener *dcl, Error **errp) { - ERRP_GUARD(); int flags; flags = con->hw_ops->get_flags ? con->hw_ops->get_flags(con->hw) : 0; + if (console_has_gl(con) && + !con->gl->ops->dpy_gl_ctx_is_compatible_dcl(con->gl, dcl)) { + error_setg(errp, "Display %s is incompatible with the GL context", + dcl->ops->dpy_name); + return false; + } + if (flags & GRAPHIC_FLAGS_GL && !console_has_gl(con)) { error_setg(errp, "The console requires a GL context."); @@ -1502,31 +1631,22 @@ static bool dpy_compatible_with(QemuConsole *con, return true; } -void register_displaychangelistener(DisplayChangeListener *dcl) +void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *gl) { - static const char nodev[] = - "This VM has no graphic display device."; - static DisplaySurface *dummy; - QemuConsole *con; - Error *err = NULL; - - assert(!dcl->ds); - - if (dcl->ops->dpy_gl_ctx_create) { - /* display has opengl support */ - assert(dcl->con); - if (dcl->con->gl) { - fprintf(stderr, "can't register two opengl displays (%s, %s)\n", - dcl->ops->dpy_name, dcl->con->gl->ops->dpy_name); - exit(1); - } - dcl->con->gl = dcl; - } - - if (dcl->con && !dpy_compatible_with(dcl->con, dcl, &err)) { - error_report_err(err); + /* display has opengl support */ + assert(con); + if (con->gl) { + error_report("The console already has an OpenGL context."); exit(1); } + con->gl = gl; +} + +void register_displaychangelistener(DisplayChangeListener *dcl) +{ + QemuConsole *con; + + assert(!dcl->ds); trace_displaychangelistener_register(dcl, dcl->ops->dpy_name); dcl->ds = get_alloc_displaystate(); @@ -1538,16 +1658,7 @@ void register_displaychangelistener(DisplayChangeListener *dcl) } else { con = active_console; } - if (dcl->ops->dpy_gfx_switch) { - if (con) { - dcl->ops->dpy_gfx_switch(dcl, con->surface); - } else { - if (!dummy) { - dummy = qemu_create_placeholder_surface(640, 480, nodev); - } - dcl->ops->dpy_gfx_switch(dcl, dummy); - } - } + displaychangelistener_display_console(dcl, con, dcl->con ? &error_fatal : NULL); text_console_update_cursor(NULL); } @@ -1599,7 +1710,7 @@ const QemuUIInfo *dpy_get_ui_info(const QemuConsole *con) return &con->ui_info; } -int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info) +int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info, bool delay) { if (con == NULL) { con = active_console; @@ -1619,7 +1730,8 @@ int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info) * go notify the guest. */ con->ui_info = *info; - timer_mod(con->ui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); + timer_mod(con->ui_timer, + qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + (delay ? 1000 : 0)); return 0; } @@ -1627,13 +1739,9 @@ void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h) { DisplayState *s = con->ds; DisplayChangeListener *dcl; - int width = w; - int height = h; + int width = qemu_console_get_width(con, x + w); + int height = qemu_console_get_height(con, y + h); - if (con->surface) { - width = surface_width(con->surface); - height = surface_height(con->surface); - } x = MAX(x, 0); y = MAX(y, 0); x = MIN(x, width); @@ -1644,6 +1752,7 @@ void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h) if (!qemu_console_is_visible(con)) { return; } + dpy_gfx_update_texture(con, con->surface, x, y, w, h); QLIST_FOREACH(dcl, &s->listeners, next) { if (con != (dcl->con ? dcl->con : active_console)) { continue; @@ -1656,12 +1765,10 @@ void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h) void dpy_gfx_update_full(QemuConsole *con) { - if (!con->surface) { - return; - } - dpy_gfx_update(con, 0, 0, - surface_width(con->surface), - surface_height(con->surface)); + int w = qemu_console_get_width(con, 0); + int h = qemu_console_get_height(con, 0); + + dpy_gfx_update(con, 0, 0, w, h); } void dpy_gfx_replace_surface(QemuConsole *con, @@ -1688,15 +1795,16 @@ void dpy_gfx_replace_surface(QemuConsole *con, assert(old_surface != surface); + con->scanout.kind = SCANOUT_SURFACE; con->surface = surface; + dpy_gfx_create_texture(con, surface); QLIST_FOREACH(dcl, &s->listeners, next) { if (con != (dcl->con ? dcl->con : active_console)) { continue; } - if (dcl->ops->dpy_gfx_switch) { - dcl->ops->dpy_gfx_switch(dcl, surface); - } + displaychangelistener_gfx_switch(dcl, surface, FALSE); } + dpy_gfx_destroy_texture(con, old_surface); qemu_free_displaysurface(old_surface); } @@ -1860,8 +1968,20 @@ int dpy_gl_ctx_make_current(QemuConsole *con, QEMUGLContext ctx) void dpy_gl_scanout_disable(QemuConsole *con) { - assert(con->gl); - con->gl->ops->dpy_gl_scanout_disable(con->gl); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; + + if (con->scanout.kind != SCANOUT_SURFACE) { + con->scanout.kind = SCANOUT_NONE; + } + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_scanout_disable) { + dcl->ops->dpy_gl_scanout_disable(dcl); + } + } } void dpy_gl_scanout_texture(QemuConsole *con, @@ -1872,56 +1992,112 @@ void dpy_gl_scanout_texture(QemuConsole *con, uint32_t x, uint32_t y, uint32_t width, uint32_t height) { - assert(con->gl); - con->gl->ops->dpy_gl_scanout_texture(con->gl, backing_id, - backing_y_0_top, - backing_width, backing_height, - x, y, width, height); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; + + con->scanout.kind = SCANOUT_TEXTURE; + con->scanout.texture = (ScanoutTexture) { + backing_id, backing_y_0_top, backing_width, backing_height, + x, y, width, height + }; + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_scanout_texture) { + dcl->ops->dpy_gl_scanout_texture(dcl, backing_id, + backing_y_0_top, + backing_width, backing_height, + x, y, width, height); + } + } } void dpy_gl_scanout_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf) { - assert(con->gl); - con->gl->ops->dpy_gl_scanout_dmabuf(con->gl, dmabuf); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; + + con->scanout.kind = SCANOUT_DMABUF; + con->scanout.dmabuf = dmabuf; + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_scanout_dmabuf) { + dcl->ops->dpy_gl_scanout_dmabuf(dcl, dmabuf); + } + } } void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf, bool have_hot, uint32_t hot_x, uint32_t hot_y) { - assert(con->gl); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; - if (con->gl->ops->dpy_gl_cursor_dmabuf) { - con->gl->ops->dpy_gl_cursor_dmabuf(con->gl, dmabuf, + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_cursor_dmabuf) { + dcl->ops->dpy_gl_cursor_dmabuf(dcl, dmabuf, have_hot, hot_x, hot_y); + } } } void dpy_gl_cursor_position(QemuConsole *con, uint32_t pos_x, uint32_t pos_y) { - assert(con->gl); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; - if (con->gl->ops->dpy_gl_cursor_position) { - con->gl->ops->dpy_gl_cursor_position(con->gl, pos_x, pos_y); + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_cursor_position) { + dcl->ops->dpy_gl_cursor_position(dcl, pos_x, pos_y); + } } } void dpy_gl_release_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf) { - assert(con->gl); + DisplayState *s = con->ds; + DisplayChangeListener *dcl; - if (con->gl->ops->dpy_gl_release_dmabuf) { - con->gl->ops->dpy_gl_release_dmabuf(con->gl, dmabuf); + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_release_dmabuf) { + dcl->ops->dpy_gl_release_dmabuf(dcl, dmabuf); + } } } void dpy_gl_update(QemuConsole *con, uint32_t x, uint32_t y, uint32_t w, uint32_t h) { + DisplayState *s = con->ds; + DisplayChangeListener *dcl; + assert(con->gl); - con->gl->ops->dpy_gl_update(con->gl, x, y, w, h); + + graphic_hw_gl_block(con, true); + QLIST_FOREACH(dcl, &s->listeners, next) { + if (con != (dcl->con ? dcl->con : active_console)) { + continue; + } + if (dcl->ops->dpy_gl_update) { + dcl->ops->dpy_gl_update(dcl, x, y, w, h); + } + } + graphic_hw_gl_block(con, false); } /***********************************************************/ @@ -1990,10 +2166,8 @@ QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, s = qemu_console_lookup_unused(); if (s) { trace_console_gfx_reuse(s->index); - if (s->surface) { - width = surface_width(s->surface); - height = surface_height(s->surface); - } + width = qemu_console_get_width(s, 0); + height = qemu_console_get_height(s, 0); } else { trace_console_gfx_new(); s = new_console(ds, GRAPHIC_CONSOLE, head); @@ -2008,6 +2182,8 @@ QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, surface = qemu_create_placeholder_surface(width, height, noinit); dpy_gfx_replace_surface(s, surface); + s->gl_unblock_timer = timer_new_ms(QEMU_CLOCK_REALTIME, + graphic_hw_gl_unblock_timer, s); return s; } @@ -2020,13 +2196,8 @@ void graphic_console_close(QemuConsole *con) static const char unplugged[] = "Guest display has been unplugged"; DisplaySurface *surface; - int width = 640; - int height = 480; - - if (con->surface) { - width = surface_width(con->surface); - height = surface_height(con->surface); - } + int width = qemu_console_get_width(con, 640); + int height = qemu_console_get_height(con, 480); trace_console_gfx_close(con->index); object_property_set_link(OBJECT(con), "device", NULL, &error_abort); @@ -2142,11 +2313,50 @@ bool qemu_console_is_gl_blocked(QemuConsole *con) return con->gl_block; } +bool qemu_console_is_multihead(DeviceState *dev) +{ + QemuConsole *con; + Object *obj; + uint32_t f = 0xffffffff; + uint32_t h; + + QTAILQ_FOREACH(con, &consoles, next) { + obj = object_property_get_link(OBJECT(con), + "device", &error_abort); + if (DEVICE(obj) != dev) { + continue; + } + + h = object_property_get_uint(OBJECT(con), + "head", &error_abort); + if (f == 0xffffffff) { + f = h; + } else if (h != f) { + return true; + } + } + return false; +} + char *qemu_console_get_label(QemuConsole *con) { if (con->console_type == GRAPHIC_CONSOLE) { if (con->device) { - return g_strdup(object_get_typename(con->device)); + DeviceState *dev; + bool multihead; + + dev = DEVICE(con->device); + multihead = qemu_console_is_multihead(dev); + if (multihead) { + return g_strdup_printf("%s.%d", dev->id ? + dev->id : + object_get_typename(con->device), + con->head); + } else { + return g_strdup_printf("%s", dev->id ? + dev->id : + object_get_typename(con->device)); + } } return g_strdup("VGA"); } else { @@ -2178,7 +2388,19 @@ int qemu_console_get_width(QemuConsole *con, int fallback) if (con == NULL) { con = active_console; } - return con ? surface_width(con->surface) : fallback; + if (con == NULL) { + return fallback; + } + switch (con->scanout.kind) { + case SCANOUT_DMABUF: + return con->scanout.dmabuf->width; + case SCANOUT_TEXTURE: + return con->scanout.texture.width; + case SCANOUT_SURFACE: + return surface_width(con->surface); + default: + return fallback; + } } int qemu_console_get_height(QemuConsole *con, int fallback) @@ -2186,7 +2408,27 @@ int qemu_console_get_height(QemuConsole *con, int fallback) if (con == NULL) { con = active_console; } - return con ? surface_height(con->surface) : fallback; + if (con == NULL) { + return fallback; + } + switch (con->scanout.kind) { + case SCANOUT_DMABUF: + return con->scanout.dmabuf->height; + case SCANOUT_TEXTURE: + return con->scanout.texture.height; + case SCANOUT_SURFACE: + return surface_height(con->surface); + default: + return fallback; + } +} + +static void vc_chr_accept_input(Chardev *chr) +{ + VCChardev *drv = VC_CHARDEV(chr); + QemuConsole *s = drv->console; + + kbd_send_chars(s); } static void vc_chr_set_echo(Chardev *chr, bool echo) @@ -2236,9 +2478,7 @@ static void text_console_do_init(Chardev *chr, DisplayState *ds) int g_width = 80 * FONT_WIDTH; int g_height = 24 * FONT_HEIGHT; - s->out_fifo.buf = s->out_fifo_buf; - s->out_fifo.buf_size = sizeof(s->out_fifo_buf); - s->kbd_timer = timer_new_ms(QEMU_CLOCK_REALTIME, kbd_send_chars, s); + fifo8_create(&s->out_fifo, 16); s->ds = ds; s->y_displayed = 0; @@ -2246,12 +2486,13 @@ static void text_console_do_init(Chardev *chr, DisplayState *ds) s->total_height = DEFAULT_BACKSCROLL; s->x = 0; s->y = 0; - if (!s->surface) { - if (active_console && active_console->surface) { - g_width = surface_width(active_console->surface); - g_height = surface_height(active_console->surface); + if (s->scanout.kind != SCANOUT_SURFACE) { + if (active_console && active_console->scanout.kind == SCANOUT_SURFACE) { + g_width = qemu_console_get_width(active_console, g_width); + g_height = qemu_console_get_height(active_console, g_height); } s->surface = qemu_create_displaysurface(g_width, g_height); + s->scanout.kind = SCANOUT_SURFACE; } s->hw_ops = &text_console_ops; @@ -2310,6 +2551,7 @@ static void vc_chr_open(Chardev *chr, s = new_console(NULL, TEXT_CONSOLE, 0); } else { s = new_console(NULL, TEXT_CONSOLE_FIXED_SIZE, 0); + s->scanout.kind = SCANOUT_SURFACE; s->surface = qemu_create_displaysurface(width, height); } @@ -2333,13 +2575,14 @@ static void vc_chr_open(Chardev *chr, void qemu_console_resize(QemuConsole *s, int width, int height) { - DisplaySurface *surface; + DisplaySurface *surface = qemu_console_surface(s); assert(s->console_type == GRAPHIC_CONSOLE); - if (s->surface && (s->surface->flags & QEMU_ALLOCATED_FLAG) && - pixman_image_get_width(s->surface->image) == width && - pixman_image_get_height(s->surface->image) == height) { + if ((s->scanout.kind != SCANOUT_SURFACE || + (surface && surface->flags & QEMU_ALLOCATED_FLAG)) && + qemu_console_get_width(s, -1) == width && + qemu_console_get_height(s, -1) == height) { return; } @@ -2349,7 +2592,12 @@ void qemu_console_resize(QemuConsole *s, int width, int height) DisplaySurface *qemu_console_surface(QemuConsole *console) { - return console->surface; + switch (console->scanout.kind) { + case SCANOUT_SURFACE: + return console->surface; + default: + return NULL; + } } PixelFormat qemu_default_pixelformat(int bpp) @@ -2384,7 +2632,11 @@ bool qemu_display_find_default(DisplayOptions *opts) for (i = 0; i < (int)ARRAY_SIZE(prio); i++) { if (dpys[prio[i]] == NULL) { - ui_module_load_one(DisplayType_str(prio[i])); + Error *local_err = NULL; + int rv = ui_module_load(DisplayType_str(prio[i]), &local_err); + if (rv < 0) { + error_report_err(local_err); + } } if (dpys[prio[i]] == NULL) { continue; @@ -2402,7 +2654,11 @@ void qemu_display_early_init(DisplayOptions *opts) return; } if (dpys[opts->type] == NULL) { - ui_module_load_one(DisplayType_str(opts->type)); + Error *local_err = NULL; + int rv = ui_module_load(DisplayType_str(opts->type), &local_err); + if (rv < 0) { + error_report_err(local_err); + } } if (dpys[opts->type] == NULL) { error_report("Display '%s' is not available.", @@ -2432,7 +2688,11 @@ void qemu_display_help(void) printf("none\n"); for (idx = DISPLAY_TYPE_NONE; idx < DISPLAY_TYPE__MAX; idx++) { if (!dpys[idx]) { - ui_module_load_one(DisplayType_str(idx)); + Error *local_err = NULL; + int rv = ui_module_load(DisplayType_str(idx), &local_err); + if (rv < 0) { + error_report_err(local_err); + } } if (dpys[idx]) { printf("%s\n", DisplayType_str(dpys[idx]->type)); @@ -2488,6 +2748,7 @@ static void char_vc_class_init(ObjectClass *oc, void *data) cc->parse = qemu_chr_parse_vc; cc->open = vc_chr_open; cc->chr_write = vc_chr_write; + cc->chr_accept_input = vc_chr_accept_input; cc->chr_set_echo = vc_chr_set_echo; } diff --git a/ui/curses.c b/ui/curses.c index e4f9588c3e..de962faa7c 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -38,6 +38,10 @@ #include "ui/input.h" #include "sysemu/sysemu.h" +#if defined(__APPLE__) || defined(__OpenBSD__) +#define _XOPEN_SOURCE_EXTENDED 1 +#endif + /* KEY_EVENT is defined in wincon.h and in curses.h. Avoid redefinition. */ #undef KEY_EVENT #include @@ -65,7 +69,7 @@ static void curses_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { console_ch_t *line; - cchar_t curses_line[width]; + g_autofree cchar_t *curses_line = g_new(cchar_t, width); wchar_t wch[CCHARW_MAX]; attr_t attrs; short colors; diff --git a/ui/cursor.c b/ui/cursor.c index 1d62ddd4d0..835f0802f9 100644 --- a/ui/cursor.c +++ b/ui/cursor.c @@ -46,6 +46,8 @@ static QEMUCursor *cursor_parse_xpm(const char *xpm[]) /* parse pixel data */ c = cursor_alloc(width, height); + assert(c != NULL); + for (pixel = 0, y = 0; y < height; y++, line++) { for (x = 0; x < height; x++, pixel++) { idx = xpm[line][x]; @@ -91,7 +93,11 @@ QEMUCursor *cursor_builtin_left_ptr(void) QEMUCursor *cursor_alloc(int width, int height) { QEMUCursor *c; - int datasize = width * height * sizeof(uint32_t); + size_t datasize = width * height * sizeof(uint32_t); + + if (width > 512 || height > 512) { + return NULL; + } c = g_malloc0(sizeof(QEMUCursor) + datasize); c->width = width; diff --git a/ui/dbus-chardev.c b/ui/dbus-chardev.c new file mode 100644 index 0000000000..940ef937cd --- /dev/null +++ b/ui/dbus-chardev.c @@ -0,0 +1,296 @@ +/* + * QEMU DBus display + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/config-file.h" +#include "qemu/option.h" + +#include + +#include "dbus.h" + +static char * +dbus_display_chardev_path(DBusChardev *chr) +{ + return g_strdup_printf(DBUS_DISPLAY1_ROOT "/Chardev_%s", + CHARDEV(chr)->label); +} + +static void +dbus_display_chardev_export(DBusDisplay *dpy, DBusChardev *chr) +{ + g_autoptr(GDBusObjectSkeleton) sk = NULL; + g_autofree char *path = dbus_display_chardev_path(chr); + + if (chr->exported) { + return; + } + + sk = g_dbus_object_skeleton_new(path); + g_dbus_object_skeleton_add_interface( + sk, G_DBUS_INTERFACE_SKELETON(chr->iface)); + g_dbus_object_manager_server_export(dpy->server, sk); + chr->exported = true; +} + +static void +dbus_display_chardev_unexport(DBusDisplay *dpy, DBusChardev *chr) +{ + g_autofree char *path = dbus_display_chardev_path(chr); + + if (!chr->exported) { + return; + } + + g_dbus_object_manager_server_unexport(dpy->server, path); + chr->exported = false; +} + +static int +dbus_display_chardev_foreach(Object *obj, void *data) +{ + DBusDisplay *dpy = DBUS_DISPLAY(data); + + if (!CHARDEV_IS_DBUS(obj)) { + return 0; + } + + dbus_display_chardev_export(dpy, DBUS_CHARDEV(obj)); + + return 0; +} + +static void +dbus_display_on_notify(Notifier *notifier, void *data) +{ + DBusDisplay *dpy = container_of(notifier, DBusDisplay, notifier); + DBusDisplayEvent *event = data; + + switch (event->type) { + case DBUS_DISPLAY_CHARDEV_OPEN: + dbus_display_chardev_export(dpy, event->chardev); + break; + case DBUS_DISPLAY_CHARDEV_CLOSE: + dbus_display_chardev_unexport(dpy, event->chardev); + break; + } +} + +void +dbus_chardev_init(DBusDisplay *dpy) +{ + dpy->notifier.notify = dbus_display_on_notify; + dbus_display_notifier_add(&dpy->notifier); + + object_child_foreach(container_get(object_get_root(), "/chardevs"), + dbus_display_chardev_foreach, dpy); +} + +static gboolean +dbus_chr_register( + DBusChardev *dc, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_stream, + QemuDBusDisplay1Chardev *object) +{ + g_autoptr(GError) err = NULL; + int fd; + + fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_stream), &err); + if (err) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't get peer FD: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (qemu_chr_add_client(CHARDEV(dc), fd) < 0) { + g_dbus_method_invocation_return_error(invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't register FD!"); + close(fd); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + g_object_set(dc->iface, + "owner", g_dbus_method_invocation_get_sender(invocation), + NULL); + + qemu_dbus_display1_chardev_complete_register(object, invocation, NULL); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_chr_send_break( + DBusChardev *dc, + GDBusMethodInvocation *invocation, + QemuDBusDisplay1Chardev *object) +{ + qemu_chr_be_event(CHARDEV(dc), CHR_EVENT_BREAK); + + qemu_dbus_display1_chardev_complete_send_break(object, invocation); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static void +dbus_chr_open(Chardev *chr, ChardevBackend *backend, + bool *be_opened, Error **errp) +{ + ERRP_GUARD(); + + DBusChardev *dc = DBUS_CHARDEV(chr); + DBusDisplayEvent event = { + .type = DBUS_DISPLAY_CHARDEV_OPEN, + .chardev = dc, + }; + g_autoptr(ChardevBackend) be = NULL; + g_autoptr(QemuOpts) opts = NULL; + + dc->iface = qemu_dbus_display1_chardev_skeleton_new(); + g_object_set(dc->iface, "name", backend->u.dbus.data->name, NULL); + g_object_connect(dc->iface, + "swapped-signal::handle-register", + dbus_chr_register, dc, + "swapped-signal::handle-send-break", + dbus_chr_send_break, dc, + NULL); + + dbus_display_notify(&event); + + be = g_new0(ChardevBackend, 1); + opts = qemu_opts_create(qemu_find_opts("chardev"), NULL, 0, &error_abort); + qemu_opt_set(opts, "server", "on", &error_abort); + qemu_opt_set(opts, "wait", "off", &error_abort); + CHARDEV_CLASS(object_class_by_name(TYPE_CHARDEV_SOCKET))->parse( + opts, be, errp); + if (*errp) { + return; + } + CHARDEV_CLASS(object_class_by_name(TYPE_CHARDEV_SOCKET))->open( + chr, be, be_opened, errp); +} + +static void +dbus_chr_set_fe_open(Chardev *chr, int fe_open) +{ + DBusChardev *dc = DBUS_CHARDEV(chr); + + g_object_set(dc->iface, "feopened", fe_open, NULL); +} + +static void +dbus_chr_set_echo(Chardev *chr, bool echo) +{ + DBusChardev *dc = DBUS_CHARDEV(chr); + + g_object_set(dc->iface, "echo", echo, NULL); +} + +static void +dbus_chr_be_event(Chardev *chr, QEMUChrEvent event) +{ + DBusChardev *dc = DBUS_CHARDEV(chr); + DBusChardevClass *klass = DBUS_CHARDEV_GET_CLASS(chr); + + switch (event) { + case CHR_EVENT_CLOSED: + if (dc->iface) { + /* on finalize, iface is set to NULL */ + g_object_set(dc->iface, "owner", "", NULL); + } + break; + default: + break; + }; + + klass->parent_chr_be_event(chr, event); +} + +static void +dbus_chr_parse(QemuOpts *opts, ChardevBackend *backend, + Error **errp) +{ + const char *name = qemu_opt_get(opts, "name"); + ChardevDBus *dbus; + + if (name == NULL) { + error_setg(errp, "chardev: dbus: no name given"); + return; + } + + backend->type = CHARDEV_BACKEND_KIND_DBUS; + dbus = backend->u.dbus.data = g_new0(ChardevDBus, 1); + qemu_chr_parse_common(opts, qapi_ChardevDBus_base(dbus)); + dbus->name = g_strdup(name); +} + +static void +char_dbus_class_init(ObjectClass *oc, void *data) +{ + DBusChardevClass *klass = DBUS_CHARDEV_CLASS(oc); + ChardevClass *cc = CHARDEV_CLASS(oc); + + cc->parse = dbus_chr_parse; + cc->open = dbus_chr_open; + cc->chr_set_fe_open = dbus_chr_set_fe_open; + cc->chr_set_echo = dbus_chr_set_echo; + klass->parent_chr_be_event = cc->chr_be_event; + cc->chr_be_event = dbus_chr_be_event; +} + +static void +char_dbus_finalize(Object *obj) +{ + DBusChardev *dc = DBUS_CHARDEV(obj); + DBusDisplayEvent event = { + .type = DBUS_DISPLAY_CHARDEV_CLOSE, + .chardev = dc, + }; + + dbus_display_notify(&event); + g_clear_object(&dc->iface); +} + +static const TypeInfo char_dbus_type_info = { + .name = TYPE_CHARDEV_DBUS, + .parent = TYPE_CHARDEV_SOCKET, + .class_size = sizeof(DBusChardevClass), + .instance_size = sizeof(DBusChardev), + .instance_finalize = char_dbus_finalize, + .class_init = char_dbus_class_init, +}; +module_obj(TYPE_CHARDEV_DBUS); + +static void +register_types(void) +{ + type_register_static(&char_dbus_type_info); +} + +type_init(register_types); diff --git a/ui/dbus-clipboard.c b/ui/dbus-clipboard.c new file mode 100644 index 0000000000..5843d26cd2 --- /dev/null +++ b/ui/dbus-clipboard.c @@ -0,0 +1,457 @@ +/* + * QEMU DBus display + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/dbus.h" +#include "qemu/main-loop.h" +#include "qom/object_interfaces.h" +#include "sysemu/sysemu.h" +#include "qapi/error.h" +#include "trace.h" + +#include "dbus.h" + +#define MIME_TEXT_PLAIN_UTF8 "text/plain;charset=utf-8" + +static void +dbus_clipboard_complete_request( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation, + QemuClipboardInfo *info, + QemuClipboardType type) +{ + GVariant *v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + info->types[type].data, + info->types[type].size, + TRUE, + (GDestroyNotify)qemu_clipboard_info_unref, + qemu_clipboard_info_ref(info)); + + qemu_dbus_display1_clipboard_complete_request( + dpy->clipboard, invocation, + MIME_TEXT_PLAIN_UTF8, v_data); +} + +static void +dbus_clipboard_update_info(DBusDisplay *dpy, QemuClipboardInfo *info) +{ + bool self_update = info->owner == &dpy->clipboard_peer; + const char *mime[QEMU_CLIPBOARD_TYPE__COUNT + 1] = { 0, }; + DBusClipboardRequest *req; + int i = 0; + + if (info->owner == NULL) { + if (dpy->clipboard_proxy) { + qemu_dbus_display1_clipboard_call_release( + dpy->clipboard_proxy, + info->selection, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + return; + } + + if (self_update || !info->has_serial) { + return; + } + + req = &dpy->clipboard_request[info->selection]; + if (req->invocation && info->types[req->type].data) { + dbus_clipboard_complete_request(dpy, req->invocation, info, req->type); + g_clear_object(&req->invocation); + g_source_remove(req->timeout_id); + req->timeout_id = 0; + return; + } + + if (info->types[QEMU_CLIPBOARD_TYPE_TEXT].available) { + mime[i++] = MIME_TEXT_PLAIN_UTF8; + } + + if (i > 0) { + if (dpy->clipboard_proxy) { + qemu_dbus_display1_clipboard_call_grab( + dpy->clipboard_proxy, + info->selection, + info->serial, + mime, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + } + } +} + +static void +dbus_clipboard_reset_serial(DBusDisplay *dpy) +{ + if (dpy->clipboard_proxy) { + qemu_dbus_display1_clipboard_call_register( + dpy->clipboard_proxy, + G_DBUS_CALL_FLAGS_NONE, + -1, NULL, NULL, NULL); + } +} + +static void +dbus_clipboard_notify(Notifier *notifier, void *data) +{ + DBusDisplay *dpy = + container_of(notifier, DBusDisplay, clipboard_peer.notifier); + QemuClipboardNotify *notify = data; + + switch (notify->type) { + case QEMU_CLIPBOARD_UPDATE_INFO: + dbus_clipboard_update_info(dpy, notify->info); + return; + case QEMU_CLIPBOARD_RESET_SERIAL: + dbus_clipboard_reset_serial(dpy); + return; + } +} + +static void +dbus_clipboard_qemu_request(QemuClipboardInfo *info, + QemuClipboardType type) +{ + DBusDisplay *dpy = container_of(info->owner, DBusDisplay, clipboard_peer); + g_autofree char *mime = NULL; + g_autoptr(GVariant) v_data = NULL; + g_autoptr(GError) err = NULL; + const char *data = NULL; + const char *mimes[] = { MIME_TEXT_PLAIN_UTF8, NULL }; + size_t n; + + if (type != QEMU_CLIPBOARD_TYPE_TEXT) { + /* unsupported atm */ + return; + } + + if (dpy->clipboard_proxy) { + if (!qemu_dbus_display1_clipboard_call_request_sync( + dpy->clipboard_proxy, + info->selection, + mimes, + G_DBUS_CALL_FLAGS_NONE, -1, &mime, &v_data, NULL, &err)) { + error_report("Failed to request clipboard: %s", err->message); + return; + } + + if (g_strcmp0(mime, MIME_TEXT_PLAIN_UTF8)) { + error_report("Unsupported returned MIME: %s", mime); + return; + } + + data = g_variant_get_fixed_array(v_data, &n, 1); + qemu_clipboard_set_data(&dpy->clipboard_peer, info, type, + n, data, true); + } +} + +static void +dbus_clipboard_request_cancelled(DBusClipboardRequest *req) +{ + if (!req->invocation) { + return; + } + + g_dbus_method_invocation_return_error( + req->invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Cancelled clipboard request"); + + g_clear_object(&req->invocation); + g_source_remove(req->timeout_id); + req->timeout_id = 0; +} + +static void +dbus_clipboard_unregister_proxy(DBusDisplay *dpy) +{ + const char *name = NULL; + int i; + + for (i = 0; i < G_N_ELEMENTS(dpy->clipboard_request); ++i) { + dbus_clipboard_request_cancelled(&dpy->clipboard_request[i]); + } + + if (!dpy->clipboard_proxy) { + return; + } + + name = g_dbus_proxy_get_name(G_DBUS_PROXY(dpy->clipboard_proxy)); + trace_dbus_clipboard_unregister(name); + g_clear_object(&dpy->clipboard_proxy); +} + +static void +dbus_on_clipboard_proxy_name_owner_changed( + DBusDisplay *dpy, + GObject *object, + GParamSpec *pspec) +{ + dbus_clipboard_unregister_proxy(dpy); +} + +static gboolean +dbus_clipboard_register( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation) +{ + g_autoptr(GError) err = NULL; + const char *name = NULL; + + if (dpy->clipboard_proxy) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Clipboard peer already registered!"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + dpy->clipboard_proxy = + qemu_dbus_display1_clipboard_proxy_new_sync( + g_dbus_method_invocation_get_connection(invocation), + G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, + g_dbus_method_invocation_get_sender(invocation), + "/org/qemu/Display1/Clipboard", + NULL, + &err); + if (!dpy->clipboard_proxy) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Failed to setup proxy: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + name = g_dbus_proxy_get_name(G_DBUS_PROXY(dpy->clipboard_proxy)); + trace_dbus_clipboard_register(name); + + g_object_connect(dpy->clipboard_proxy, + "swapped-signal::notify::g-name-owner", + dbus_on_clipboard_proxy_name_owner_changed, dpy, + NULL); + qemu_clipboard_reset_serial(); + + qemu_dbus_display1_clipboard_complete_register(dpy->clipboard, invocation); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_clipboard_check_caller(DBusDisplay *dpy, GDBusMethodInvocation *invocation) +{ + if (!dpy->clipboard_proxy || + g_strcmp0(g_dbus_proxy_get_name(G_DBUS_PROXY(dpy->clipboard_proxy)), + g_dbus_method_invocation_get_sender(invocation))) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Unregistered caller"); + return FALSE; + } + + return TRUE; +} + +static gboolean +dbus_clipboard_unregister( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation) +{ + if (!dbus_clipboard_check_caller(dpy, invocation)) { + return DBUS_METHOD_INVOCATION_HANDLED; + } + + dbus_clipboard_unregister_proxy(dpy); + + qemu_dbus_display1_clipboard_complete_unregister( + dpy->clipboard, invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_clipboard_grab( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation, + gint arg_selection, + guint arg_serial, + const gchar *const *arg_mimes) +{ + QemuClipboardSelection s = arg_selection; + g_autoptr(QemuClipboardInfo) info = NULL; + + if (!dbus_clipboard_check_caller(dpy, invocation)) { + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (s >= QEMU_CLIPBOARD_SELECTION__COUNT) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Invalid clipboard selection: %d", arg_selection); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + info = qemu_clipboard_info_new(&dpy->clipboard_peer, s); + if (g_strv_contains(arg_mimes, MIME_TEXT_PLAIN_UTF8)) { + info->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; + } + info->serial = arg_serial; + info->has_serial = true; + if (qemu_clipboard_check_serial(info, true)) { + qemu_clipboard_update(info); + } else { + trace_dbus_clipboard_grab_failed(); + } + + qemu_dbus_display1_clipboard_complete_grab(dpy->clipboard, invocation); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_clipboard_release( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation, + gint arg_selection) +{ + if (!dbus_clipboard_check_caller(dpy, invocation)) { + return DBUS_METHOD_INVOCATION_HANDLED; + } + + qemu_clipboard_peer_release(&dpy->clipboard_peer, arg_selection); + + qemu_dbus_display1_clipboard_complete_release(dpy->clipboard, invocation); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_clipboard_request_timeout(gpointer user_data) +{ + dbus_clipboard_request_cancelled(user_data); + return G_SOURCE_REMOVE; +} + +static gboolean +dbus_clipboard_request( + DBusDisplay *dpy, + GDBusMethodInvocation *invocation, + gint arg_selection, + const gchar *const *arg_mimes) +{ + QemuClipboardSelection s = arg_selection; + QemuClipboardType type = QEMU_CLIPBOARD_TYPE_TEXT; + QemuClipboardInfo *info = NULL; + + if (!dbus_clipboard_check_caller(dpy, invocation)) { + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (s >= QEMU_CLIPBOARD_SELECTION__COUNT) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Invalid clipboard selection: %d", arg_selection); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (dpy->clipboard_request[s].invocation) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Pending request"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + info = qemu_clipboard_info(s); + if (!info || !info->owner || info->owner == &dpy->clipboard_peer) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Empty clipboard"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (!g_strv_contains(arg_mimes, MIME_TEXT_PLAIN_UTF8) || + !info->types[type].available) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Unhandled MIME types requested"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + if (info->types[type].data) { + dbus_clipboard_complete_request(dpy, invocation, info, type); + } else { + qemu_clipboard_request(info, type); + + dpy->clipboard_request[s].invocation = g_object_ref(invocation); + dpy->clipboard_request[s].type = type; + dpy->clipboard_request[s].timeout_id = + g_timeout_add_seconds(5, dbus_clipboard_request_timeout, + &dpy->clipboard_request[s]); + } + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +void +dbus_clipboard_init(DBusDisplay *dpy) +{ + g_autoptr(GDBusObjectSkeleton) clipboard = NULL; + + assert(!dpy->clipboard); + + clipboard = g_dbus_object_skeleton_new(DBUS_DISPLAY1_ROOT "/Clipboard"); + dpy->clipboard = qemu_dbus_display1_clipboard_skeleton_new(); + g_object_connect(dpy->clipboard, + "swapped-signal::handle-register", + dbus_clipboard_register, dpy, + "swapped-signal::handle-unregister", + dbus_clipboard_unregister, dpy, + "swapped-signal::handle-grab", + dbus_clipboard_grab, dpy, + "swapped-signal::handle-release", + dbus_clipboard_release, dpy, + "swapped-signal::handle-request", + dbus_clipboard_request, dpy, + NULL); + + g_dbus_object_skeleton_add_interface( + G_DBUS_OBJECT_SKELETON(clipboard), + G_DBUS_INTERFACE_SKELETON(dpy->clipboard)); + g_dbus_object_manager_server_export(dpy->server, clipboard); + dpy->clipboard_peer.name = "dbus"; + dpy->clipboard_peer.notifier.notify = dbus_clipboard_notify; + dpy->clipboard_peer.request = dbus_clipboard_qemu_request; + qemu_clipboard_peer_register(&dpy->clipboard_peer); +} diff --git a/ui/dbus-console.c b/ui/dbus-console.c new file mode 100644 index 0000000000..898a4ac8a5 --- /dev/null +++ b/ui/dbus-console.c @@ -0,0 +1,496 @@ +/* + * QEMU DBus display console + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "ui/input.h" +#include "ui/kbd-state.h" +#include "trace.h" + +#include + +#include "dbus.h" + +struct _DBusDisplayConsole { + GDBusObjectSkeleton parent_instance; + DisplayChangeListener dcl; + + DBusDisplay *display; + GHashTable *listeners; + QemuDBusDisplay1Console *iface; + + QemuDBusDisplay1Keyboard *iface_kbd; + QKbdState *kbd; + + QemuDBusDisplay1Mouse *iface_mouse; + gboolean last_set; + guint last_x; + guint last_y; + Notifier mouse_mode_notifier; +}; + +G_DEFINE_TYPE(DBusDisplayConsole, + dbus_display_console, + G_TYPE_DBUS_OBJECT_SKELETON) + +static void +dbus_display_console_set_size(DBusDisplayConsole *ddc, + uint32_t width, uint32_t height) +{ + g_object_set(ddc->iface, + "width", width, + "height", height, + NULL); +} + +static void +dbus_gfx_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + DBusDisplayConsole *ddc = container_of(dcl, DBusDisplayConsole, dcl); + + dbus_display_console_set_size(ddc, + surface_width(new_surface), + surface_height(new_surface)); +} + +static void +dbus_gfx_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ +} + +static void +dbus_gl_scanout_disable(DisplayChangeListener *dcl) +{ +} + +static void +dbus_gl_scanout_texture(DisplayChangeListener *dcl, + uint32_t tex_id, + bool backing_y_0_top, + uint32_t backing_width, + uint32_t backing_height, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ + DBusDisplayConsole *ddc = container_of(dcl, DBusDisplayConsole, dcl); + + dbus_display_console_set_size(ddc, w, h); +} + +static void +dbus_gl_scanout_dmabuf(DisplayChangeListener *dcl, + QemuDmaBuf *dmabuf) +{ + DBusDisplayConsole *ddc = container_of(dcl, DBusDisplayConsole, dcl); + + dbus_display_console_set_size(ddc, + dmabuf->width, + dmabuf->height); +} + +static void +dbus_gl_scanout_update(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ +} + +const DisplayChangeListenerOps dbus_console_dcl_ops = { + .dpy_name = "dbus-console", + .dpy_gfx_switch = dbus_gfx_switch, + .dpy_gfx_update = dbus_gfx_update, + .dpy_gl_scanout_disable = dbus_gl_scanout_disable, + .dpy_gl_scanout_texture = dbus_gl_scanout_texture, + .dpy_gl_scanout_dmabuf = dbus_gl_scanout_dmabuf, + .dpy_gl_update = dbus_gl_scanout_update, +}; + +static void +dbus_display_console_init(DBusDisplayConsole *object) +{ + DBusDisplayConsole *ddc = DBUS_DISPLAY_CONSOLE(object); + + ddc->listeners = g_hash_table_new_full(g_str_hash, g_str_equal, + NULL, g_object_unref); + ddc->dcl.ops = &dbus_console_dcl_ops; +} + +static void +dbus_display_console_dispose(GObject *object) +{ + DBusDisplayConsole *ddc = DBUS_DISPLAY_CONSOLE(object); + + unregister_displaychangelistener(&ddc->dcl); + g_clear_object(&ddc->iface_kbd); + g_clear_object(&ddc->iface); + g_clear_pointer(&ddc->listeners, g_hash_table_unref); + g_clear_pointer(&ddc->kbd, qkbd_state_free); + + G_OBJECT_CLASS(dbus_display_console_parent_class)->dispose(object); +} + +static void +dbus_display_console_class_init(DBusDisplayConsoleClass *klass) +{ + GObjectClass *gobject_class = G_OBJECT_CLASS(klass); + + gobject_class->dispose = dbus_display_console_dispose; +} + +static void +listener_vanished_cb(DBusDisplayListener *listener) +{ + DBusDisplayConsole *ddc = dbus_display_listener_get_console(listener); + const char *name = dbus_display_listener_get_bus_name(listener); + + trace_dbus_listener_vanished(name); + + g_hash_table_remove(ddc->listeners, name); + qkbd_state_lift_all_keys(ddc->kbd); +} + +static gboolean +dbus_console_set_ui_info(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint16 arg_width_mm, + guint16 arg_height_mm, + gint arg_xoff, + gint arg_yoff, + guint arg_width, + guint arg_height) +{ + QemuUIInfo info = { + .width_mm = arg_width_mm, + .height_mm = arg_height_mm, + .xoff = arg_xoff, + .yoff = arg_yoff, + .width = arg_width, + .height = arg_height, + }; + + if (!dpy_ui_info_supported(ddc->dcl.con)) { + g_dbus_method_invocation_return_error(invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_UNSUPPORTED, + "SetUIInfo is not supported"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + dpy_set_ui_info(ddc->dcl.con, &info, false); + qemu_dbus_display1_console_complete_set_uiinfo(ddc->iface, invocation); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_console_register_listener(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_listener) +{ + const char *sender = g_dbus_method_invocation_get_sender(invocation); + GDBusConnection *listener_conn; + g_autoptr(GError) err = NULL; + g_autoptr(GSocket) socket = NULL; + g_autoptr(GSocketConnection) socket_conn = NULL; + g_autofree char *guid = g_dbus_generate_guid(); + DBusDisplayListener *listener; + int fd; + + if (sender && g_hash_table_contains(ddc->listeners, sender)) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_INVALID, + "`%s` is already registered!", + sender); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err); + if (err) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't get peer fd: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + socket = g_socket_new_from_fd(fd, &err); + if (err) { + g_dbus_method_invocation_return_error( + invocation, + DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_FAILED, + "Couldn't make a socket: %s", err->message); + close(fd); + return DBUS_METHOD_INVOCATION_HANDLED; + } + socket_conn = g_socket_connection_factory_create_connection(socket); + + qemu_dbus_display1_console_complete_register_listener( + ddc->iface, invocation, NULL); + + listener_conn = g_dbus_connection_new_sync( + G_IO_STREAM(socket_conn), + guid, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER, + NULL, NULL, &err); + if (err) { + error_report("Failed to setup peer connection: %s", err->message); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + listener = dbus_display_listener_new(sender, listener_conn, ddc); + if (!listener) { + return DBUS_METHOD_INVOCATION_HANDLED; + } + + g_hash_table_insert(ddc->listeners, + (gpointer)dbus_display_listener_get_bus_name(listener), + listener); + g_object_connect(listener_conn, + "swapped-signal::closed", listener_vanished_cb, listener, + NULL); + + trace_dbus_registered_listener(sender); + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_kbd_press(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint arg_keycode) +{ + QKeyCode qcode = qemu_input_key_number_to_qcode(arg_keycode); + + trace_dbus_kbd_press(arg_keycode); + + qkbd_state_key_event(ddc->kbd, qcode, true); + + qemu_dbus_display1_keyboard_complete_press(ddc->iface_kbd, invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_kbd_release(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint arg_keycode) +{ + QKeyCode qcode = qemu_input_key_number_to_qcode(arg_keycode); + + trace_dbus_kbd_release(arg_keycode); + + qkbd_state_key_event(ddc->kbd, qcode, false); + + qemu_dbus_display1_keyboard_complete_release(ddc->iface_kbd, invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static void +dbus_kbd_qemu_leds_updated(void *data, int ledstate) +{ + DBusDisplayConsole *ddc = DBUS_DISPLAY_CONSOLE(data); + + qemu_dbus_display1_keyboard_set_modifiers(ddc->iface_kbd, ledstate); +} + +static gboolean +dbus_mouse_rel_motion(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + int dx, int dy) +{ + trace_dbus_mouse_rel_motion(dx, dy); + + if (qemu_input_is_absolute()) { + g_dbus_method_invocation_return_error( + invocation, DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_INVALID, + "Mouse is not relative"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + qemu_input_queue_rel(ddc->dcl.con, INPUT_AXIS_X, dx); + qemu_input_queue_rel(ddc->dcl.con, INPUT_AXIS_Y, dy); + qemu_input_event_sync(); + + qemu_dbus_display1_mouse_complete_rel_motion(ddc->iface_mouse, + invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_mouse_set_pos(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint x, guint y) +{ + int width, height; + + trace_dbus_mouse_set_pos(x, y); + + if (!qemu_input_is_absolute()) { + g_dbus_method_invocation_return_error( + invocation, DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_INVALID, + "Mouse is not absolute"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + + width = qemu_console_get_width(ddc->dcl.con, 0); + height = qemu_console_get_height(ddc->dcl.con, 0); + if (x >= width || y >= height) { + g_dbus_method_invocation_return_error( + invocation, DBUS_DISPLAY_ERROR, + DBUS_DISPLAY_ERROR_INVALID, + "Invalid mouse position"); + return DBUS_METHOD_INVOCATION_HANDLED; + } + qemu_input_queue_abs(ddc->dcl.con, INPUT_AXIS_X, x, 0, width); + qemu_input_queue_abs(ddc->dcl.con, INPUT_AXIS_Y, y, 0, height); + qemu_input_event_sync(); + + qemu_dbus_display1_mouse_complete_set_abs_position(ddc->iface_mouse, + invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_mouse_press(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint button) +{ + trace_dbus_mouse_press(button); + + qemu_input_queue_btn(ddc->dcl.con, button, true); + qemu_input_event_sync(); + + qemu_dbus_display1_mouse_complete_press(ddc->iface_mouse, invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static gboolean +dbus_mouse_release(DBusDisplayConsole *ddc, + GDBusMethodInvocation *invocation, + guint button) +{ + trace_dbus_mouse_release(button); + + qemu_input_queue_btn(ddc->dcl.con, button, false); + qemu_input_event_sync(); + + qemu_dbus_display1_mouse_complete_release(ddc->iface_mouse, invocation); + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +static void +dbus_mouse_mode_change(Notifier *notify, void *data) +{ + DBusDisplayConsole *ddc = + container_of(notify, DBusDisplayConsole, mouse_mode_notifier); + + g_object_set(ddc->iface_mouse, + "is-absolute", qemu_input_is_absolute(), + NULL); +} + +int dbus_display_console_get_index(DBusDisplayConsole *ddc) +{ + return qemu_console_get_index(ddc->dcl.con); +} + +DBusDisplayConsole * +dbus_display_console_new(DBusDisplay *display, QemuConsole *con) +{ + g_autofree char *path = NULL; + g_autofree char *label = NULL; + char device_addr[256] = ""; + DBusDisplayConsole *ddc; + int idx; + + assert(display); + assert(con); + + label = qemu_console_get_label(con); + idx = qemu_console_get_index(con); + path = g_strdup_printf(DBUS_DISPLAY1_ROOT "/Console_%d", idx); + ddc = g_object_new(DBUS_DISPLAY_TYPE_CONSOLE, + "g-object-path", path, + NULL); + ddc->display = display; + ddc->dcl.con = con; + /* handle errors, and skip non graphics? */ + qemu_console_fill_device_address( + con, device_addr, sizeof(device_addr), NULL); + + ddc->iface = qemu_dbus_display1_console_skeleton_new(); + g_object_set(ddc->iface, + "label", label, + "type", qemu_console_is_graphic(con) ? "Graphic" : "Text", + "head", qemu_console_get_head(con), + "width", qemu_console_get_width(con, 0), + "height", qemu_console_get_height(con, 0), + "device-address", device_addr, + NULL); + g_object_connect(ddc->iface, + "swapped-signal::handle-register-listener", + dbus_console_register_listener, ddc, + "swapped-signal::handle-set-uiinfo", + dbus_console_set_ui_info, ddc, + NULL); + g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(ddc), + G_DBUS_INTERFACE_SKELETON(ddc->iface)); + + ddc->kbd = qkbd_state_init(con); + ddc->iface_kbd = qemu_dbus_display1_keyboard_skeleton_new(); + qemu_add_led_event_handler(dbus_kbd_qemu_leds_updated, ddc); + g_object_connect(ddc->iface_kbd, + "swapped-signal::handle-press", dbus_kbd_press, ddc, + "swapped-signal::handle-release", dbus_kbd_release, ddc, + NULL); + g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(ddc), + G_DBUS_INTERFACE_SKELETON(ddc->iface_kbd)); + + ddc->iface_mouse = qemu_dbus_display1_mouse_skeleton_new(); + g_object_connect(ddc->iface_mouse, + "swapped-signal::handle-set-abs-position", dbus_mouse_set_pos, ddc, + "swapped-signal::handle-rel-motion", dbus_mouse_rel_motion, ddc, + "swapped-signal::handle-press", dbus_mouse_press, ddc, + "swapped-signal::handle-release", dbus_mouse_release, ddc, + NULL); + g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(ddc), + G_DBUS_INTERFACE_SKELETON(ddc->iface_mouse)); + + register_displaychangelistener(&ddc->dcl); + ddc->mouse_mode_notifier.notify = dbus_mouse_mode_change; + qemu_add_mouse_mode_change_notifier(&ddc->mouse_mode_notifier); + + return ddc; +} diff --git a/ui/dbus-display1.xml b/ui/dbus-display1.xml new file mode 100644 index 0000000000..c3b2293376 --- /dev/null +++ b/ui/dbus-display1.xml @@ -0,0 +1,761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ui/dbus-error.c b/ui/dbus-error.c new file mode 100644 index 0000000000..85a9194d57 --- /dev/null +++ b/ui/dbus-error.c @@ -0,0 +1,48 @@ +/* + * QEMU DBus display errors + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "dbus.h" + +static const GDBusErrorEntry dbus_display_error_entries[] = { + { DBUS_DISPLAY_ERROR_FAILED, "org.qemu.Display1.Error.Failed" }, + { DBUS_DISPLAY_ERROR_INVALID, "org.qemu.Display1.Error.Invalid" }, + { DBUS_DISPLAY_ERROR_UNSUPPORTED, "org.qemu.Display1.Error.Unsupported" }, +}; + +G_STATIC_ASSERT(G_N_ELEMENTS(dbus_display_error_entries) == + DBUS_DISPLAY_N_ERRORS); + +GQuark +dbus_display_error_quark(void) +{ + static gsize quark; + + g_dbus_error_register_error_domain( + "dbus-display-error-quark", + &quark, + dbus_display_error_entries, + G_N_ELEMENTS(dbus_display_error_entries)); + + return (GQuark)quark; +} diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c new file mode 100644 index 0000000000..f9fc8eda51 --- /dev/null +++ b/ui/dbus-listener.c @@ -0,0 +1,478 @@ +/* + * QEMU DBus display console + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "dbus.h" +#include + +#include "ui/shader.h" +#include "ui/egl-helpers.h" +#include "ui/egl-context.h" +#include "trace.h" + +struct _DBusDisplayListener { + GObject parent; + + char *bus_name; + DBusDisplayConsole *console; + GDBusConnection *conn; + + QemuDBusDisplay1Listener *proxy; + + DisplayChangeListener dcl; + DisplaySurface *ds; + int gl_updates; +}; + +G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT) + +static void dbus_update_gl_cb(GObject *source_object, + GAsyncResult *res, + gpointer user_data) +{ + g_autoptr(GError) err = NULL; + DBusDisplayListener *ddl = user_data; + + if (!qemu_dbus_display1_listener_call_update_dmabuf_finish(ddl->proxy, + res, &err)) { + error_report("Failed to call update: %s", err->message); + } + + graphic_hw_gl_block(ddl->dcl.con, false); + g_object_unref(ddl); +} + +static void dbus_call_update_gl(DBusDisplayListener *ddl, + int x, int y, int w, int h) +{ + graphic_hw_gl_block(ddl->dcl.con, true); + glFlush(); + qemu_dbus_display1_listener_call_update_dmabuf(ddl->proxy, + x, y, w, h, + G_DBUS_CALL_FLAGS_NONE, + DBUS_DEFAULT_TIMEOUT, NULL, + dbus_update_gl_cb, + g_object_ref(ddl)); +} + +static void dbus_scanout_disable(DisplayChangeListener *dcl) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + ddl->ds = NULL; + qemu_dbus_display1_listener_call_disable( + ddl->proxy, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static void dbus_scanout_dmabuf(DisplayChangeListener *dcl, + QemuDmaBuf *dmabuf) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + g_autoptr(GError) err = NULL; + g_autoptr(GUnixFDList) fd_list = NULL; + + fd_list = g_unix_fd_list_new(); + if (g_unix_fd_list_append(fd_list, dmabuf->fd, &err) != 0) { + error_report("Failed to setup dmabuf fdlist: %s", err->message); + return; + } + + qemu_dbus_display1_listener_call_scanout_dmabuf( + ddl->proxy, + g_variant_new_handle(0), + dmabuf->width, + dmabuf->height, + dmabuf->stride, + dmabuf->fourcc, + dmabuf->modifier, + dmabuf->y0_top, + G_DBUS_CALL_FLAGS_NONE, + -1, + fd_list, + NULL, NULL, NULL); +} + +static void dbus_scanout_texture(DisplayChangeListener *dcl, + uint32_t tex_id, + bool backing_y_0_top, + uint32_t backing_width, + uint32_t backing_height, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ + QemuDmaBuf dmabuf = { + .width = backing_width, + .height = backing_height, + .y0_top = backing_y_0_top, + }; + + assert(tex_id); + dmabuf.fd = egl_get_fd_for_texture( + tex_id, (EGLint *)&dmabuf.stride, + (EGLint *)&dmabuf.fourcc, + &dmabuf.modifier); + if (dmabuf.fd < 0) { + error_report("%s: failed to get fd for texture", __func__); + return; + } + + dbus_scanout_dmabuf(dcl, &dmabuf); + close(dmabuf.fd); +} + +static void dbus_cursor_dmabuf(DisplayChangeListener *dcl, + QemuDmaBuf *dmabuf, bool have_hot, + uint32_t hot_x, uint32_t hot_y) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + DisplaySurface *ds; + GVariant *v_data = NULL; + egl_fb cursor_fb; + + if (!dmabuf) { + qemu_dbus_display1_listener_call_mouse_set( + ddl->proxy, 0, 0, false, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); + return; + } + + egl_dmabuf_import_texture(dmabuf); + if (!dmabuf->texture) { + return; + } + egl_fb_setup_for_tex(&cursor_fb, dmabuf->width, dmabuf->height, + dmabuf->texture, false); + ds = qemu_create_displaysurface(dmabuf->width, dmabuf->height); + egl_fb_read(ds, &cursor_fb); + + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + surface_data(ds), + surface_width(ds) * surface_height(ds) * 4, + TRUE, + (GDestroyNotify)qemu_free_displaysurface, + ds); + qemu_dbus_display1_listener_call_cursor_define( + ddl->proxy, + surface_width(ds), + surface_height(ds), + hot_x, + hot_y, + v_data, + G_DBUS_CALL_FLAGS_NONE, + -1, + NULL, + NULL, + NULL); +} + +static void dbus_cursor_position(DisplayChangeListener *dcl, + uint32_t pos_x, uint32_t pos_y) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + qemu_dbus_display1_listener_call_mouse_set( + ddl->proxy, pos_x, pos_y, true, + G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static void dbus_release_dmabuf(DisplayChangeListener *dcl, + QemuDmaBuf *dmabuf) +{ + dbus_scanout_disable(dcl); +} + +static void dbus_scanout_update(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + dbus_call_update_gl(ddl, x, y, w, h); +} + +static void dbus_gl_refresh(DisplayChangeListener *dcl) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + graphic_hw_update(dcl->con); + + if (!ddl->ds || qemu_console_is_gl_blocked(ddl->dcl.con)) { + return; + } + + if (ddl->gl_updates) { + dbus_call_update_gl(ddl, 0, 0, + surface_width(ddl->ds), surface_height(ddl->ds)); + ddl->gl_updates = 0; + } +} + +static void dbus_refresh(DisplayChangeListener *dcl) +{ + graphic_hw_update(dcl->con); +} + +static void dbus_gl_gfx_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + ddl->gl_updates++; +} + +static void dbus_gfx_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + pixman_image_t *img; + GVariant *v_data; + size_t stride; + + assert(ddl->ds); + stride = w * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(surface_format(ddl->ds)), 8); + + trace_dbus_update(x, y, w, h); + + if (x == 0 && y == 0 && w == surface_width(ddl->ds) && h == surface_height(ddl->ds)) { + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + surface_data(ddl->ds), + surface_stride(ddl->ds) * surface_height(ddl->ds), + TRUE, + (GDestroyNotify)pixman_image_unref, + pixman_image_ref(ddl->ds->image)); + qemu_dbus_display1_listener_call_scanout( + ddl->proxy, + surface_width(ddl->ds), + surface_height(ddl->ds), + surface_stride(ddl->ds), + surface_format(ddl->ds), + v_data, + G_DBUS_CALL_FLAGS_NONE, + DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); + return; + } + + /* make a copy, since gvariant only handles linear data */ + img = pixman_image_create_bits(surface_format(ddl->ds), + w, h, NULL, stride); + pixman_image_composite(PIXMAN_OP_SRC, ddl->ds->image, NULL, img, + x, y, 0, 0, 0, 0, w, h); + + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + pixman_image_get_data(img), + pixman_image_get_stride(img) * h, + TRUE, + (GDestroyNotify)pixman_image_unref, + img); + qemu_dbus_display1_listener_call_update(ddl->proxy, + x, y, w, h, pixman_image_get_stride(img), pixman_image_get_format(img), + v_data, + G_DBUS_CALL_FLAGS_NONE, + DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); +} + +static void dbus_gl_gfx_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + ddl->ds = new_surface; + if (ddl->ds) { + int width = surface_width(ddl->ds); + int height = surface_height(ddl->ds); + + /* TODO: lazy send dmabuf (there are unnecessary sent otherwise) */ + dbus_scanout_texture(&ddl->dcl, ddl->ds->texture, false, + width, height, 0, 0, width, height); + } +} + +static void dbus_gfx_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + ddl->ds = new_surface; + if (!ddl->ds) { + /* why not call disable instead? */ + return; + } +} + +static void dbus_mouse_set(DisplayChangeListener *dcl, + int x, int y, int on) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + + qemu_dbus_display1_listener_call_mouse_set( + ddl->proxy, x, y, on, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); +} + +static void dbus_cursor_define(DisplayChangeListener *dcl, + QEMUCursor *c) +{ + DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); + GVariant *v_data = NULL; + + cursor_get(c); + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + c->data, + c->width * c->height * 4, + TRUE, + (GDestroyNotify)cursor_put, + c); + + qemu_dbus_display1_listener_call_cursor_define( + ddl->proxy, + c->width, + c->height, + c->hot_x, + c->hot_y, + v_data, + G_DBUS_CALL_FLAGS_NONE, + -1, + NULL, + NULL, + NULL); +} + +const DisplayChangeListenerOps dbus_gl_dcl_ops = { + .dpy_name = "dbus-gl", + .dpy_gfx_update = dbus_gl_gfx_update, + .dpy_gfx_switch = dbus_gl_gfx_switch, + .dpy_gfx_check_format = console_gl_check_format, + .dpy_refresh = dbus_gl_refresh, + .dpy_mouse_set = dbus_mouse_set, + .dpy_cursor_define = dbus_cursor_define, + + .dpy_gl_scanout_disable = dbus_scanout_disable, + .dpy_gl_scanout_texture = dbus_scanout_texture, + .dpy_gl_scanout_dmabuf = dbus_scanout_dmabuf, + .dpy_gl_cursor_dmabuf = dbus_cursor_dmabuf, + .dpy_gl_cursor_position = dbus_cursor_position, + .dpy_gl_release_dmabuf = dbus_release_dmabuf, + .dpy_gl_update = dbus_scanout_update, +}; + +const DisplayChangeListenerOps dbus_dcl_ops = { + .dpy_name = "dbus", + .dpy_gfx_update = dbus_gfx_update, + .dpy_gfx_switch = dbus_gfx_switch, + .dpy_refresh = dbus_refresh, + .dpy_mouse_set = dbus_mouse_set, + .dpy_cursor_define = dbus_cursor_define, +}; + +static void +dbus_display_listener_dispose(GObject *object) +{ + DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(object); + + unregister_displaychangelistener(&ddl->dcl); + g_clear_object(&ddl->conn); + g_clear_pointer(&ddl->bus_name, g_free); + g_clear_object(&ddl->proxy); + + G_OBJECT_CLASS(dbus_display_listener_parent_class)->dispose(object); +} + +static void +dbus_display_listener_constructed(GObject *object) +{ + DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(object); + + if (display_opengl) { + ddl->dcl.ops = &dbus_gl_dcl_ops; + } else { + ddl->dcl.ops = &dbus_dcl_ops; + } + + G_OBJECT_CLASS(dbus_display_listener_parent_class)->constructed(object); +} + +static void +dbus_display_listener_class_init(DBusDisplayListenerClass *klass) +{ + GObjectClass *object_class = G_OBJECT_CLASS(klass); + + object_class->dispose = dbus_display_listener_dispose; + object_class->constructed = dbus_display_listener_constructed; +} + +static void +dbus_display_listener_init(DBusDisplayListener *ddl) +{ +} + +const char * +dbus_display_listener_get_bus_name(DBusDisplayListener *ddl) +{ + return ddl->bus_name ?: "p2p"; +} + +DBusDisplayConsole * +dbus_display_listener_get_console(DBusDisplayListener *ddl) +{ + return ddl->console; +} + +DBusDisplayListener * +dbus_display_listener_new(const char *bus_name, + GDBusConnection *conn, + DBusDisplayConsole *console) +{ + DBusDisplayListener *ddl; + QemuConsole *con; + g_autoptr(GError) err = NULL; + + ddl = g_object_new(DBUS_DISPLAY_TYPE_LISTENER, NULL); + ddl->proxy = + qemu_dbus_display1_listener_proxy_new_sync(conn, + G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, + NULL, + "/org/qemu/Display1/Listener", + NULL, + &err); + if (!ddl->proxy) { + error_report("Failed to setup proxy: %s", err->message); + g_object_unref(conn); + g_object_unref(ddl); + return NULL; + } + + ddl->bus_name = g_strdup(bus_name); + ddl->conn = conn; + ddl->console = console; + + con = qemu_console_lookup_by_index(dbus_display_console_get_index(console)); + assert(con); + ddl->dcl.con = con; + register_displaychangelistener(&ddl->dcl); + + return ddl; +} diff --git a/ui/dbus-module.c b/ui/dbus-module.c new file mode 100644 index 0000000000..c8771fe48c --- /dev/null +++ b/ui/dbus-module.c @@ -0,0 +1,35 @@ +/* + * D-Bus module support. + * + * Copyright (C) 2021 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "ui/dbus-module.h" + +int using_dbus_display; + +static bool +qemu_dbus_display_add_client(int csock, Error **errp) +{ + error_setg(errp, "D-Bus display isn't enabled"); + return false; +} + +struct QemuDBusDisplayOps qemu_dbus_display = { + .add_client = qemu_dbus_display_add_client, +}; diff --git a/ui/dbus.c b/ui/dbus.c new file mode 100644 index 0000000000..32d88dc94a --- /dev/null +++ b/ui/dbus.c @@ -0,0 +1,518 @@ +/* + * QEMU DBus display + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/dbus.h" +#include "qemu/main-loop.h" +#include "qemu/option.h" +#include "qom/object_interfaces.h" +#include "sysemu/sysemu.h" +#include "ui/dbus-module.h" +#include "ui/egl-helpers.h" +#include "ui/egl-context.h" +#include "audio/audio.h" +#include "audio/audio_int.h" +#include "qapi/error.h" +#include "trace.h" + +#include "dbus.h" + +static DBusDisplay *dbus_display; + +static QEMUGLContext dbus_create_context(DisplayGLCtx *dgc, + QEMUGLParams *params) +{ + eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, + qemu_egl_rn_ctx); + return qemu_egl_create_context(dgc, params); +} + +static bool +dbus_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + return dcl->ops == &dbus_gl_dcl_ops || dcl->ops == &dbus_console_dcl_ops; +} + +static void +dbus_create_texture(DisplayGLCtx *ctx, DisplaySurface *surface) +{ + surface_gl_create_texture(ctx->gls, surface); +} + +static void +dbus_destroy_texture(DisplayGLCtx *ctx, DisplaySurface *surface) +{ + surface_gl_destroy_texture(ctx->gls, surface); +} + +static void +dbus_update_texture(DisplayGLCtx *ctx, DisplaySurface *surface, + int x, int y, int w, int h) +{ + surface_gl_update_texture(ctx->gls, surface, x, y, w, h); +} + +static const DisplayGLCtxOps dbus_gl_ops = { + .dpy_gl_ctx_is_compatible_dcl = dbus_is_compatible_dcl, + .dpy_gl_ctx_create = dbus_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = qemu_egl_make_context_current, + .dpy_gl_ctx_create_texture = dbus_create_texture, + .dpy_gl_ctx_destroy_texture = dbus_destroy_texture, + .dpy_gl_ctx_update_texture = dbus_update_texture, +}; + +static NotifierList dbus_display_notifiers = + NOTIFIER_LIST_INITIALIZER(dbus_display_notifiers); + +void +dbus_display_notifier_add(Notifier *notifier) +{ + notifier_list_add(&dbus_display_notifiers, notifier); +} + +static void +dbus_display_notifier_remove(Notifier *notifier) +{ + notifier_remove(notifier); +} + +void +dbus_display_notify(DBusDisplayEvent *event) +{ + notifier_list_notify(&dbus_display_notifiers, event); +} + +static void +dbus_display_init(Object *o) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + g_autoptr(GDBusObjectSkeleton) vm = NULL; + + dd->glctx.ops = &dbus_gl_ops; + if (display_opengl) { + dd->glctx.gls = qemu_gl_init_shader(); + } + dd->iface = qemu_dbus_display1_vm_skeleton_new(); + dd->consoles = g_ptr_array_new_with_free_func(g_object_unref); + + dd->server = g_dbus_object_manager_server_new(DBUS_DISPLAY1_ROOT); + + vm = g_dbus_object_skeleton_new(DBUS_DISPLAY1_ROOT "/VM"); + g_dbus_object_skeleton_add_interface( + vm, G_DBUS_INTERFACE_SKELETON(dd->iface)); + g_dbus_object_manager_server_export(dd->server, vm); + + dbus_clipboard_init(dd); + dbus_chardev_init(dd); +} + +static void +dbus_display_finalize(Object *o) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + if (dd->notifier.notify) { + dbus_display_notifier_remove(&dd->notifier); + } + + qemu_clipboard_peer_unregister(&dd->clipboard_peer); + g_clear_object(&dd->clipboard); + + g_clear_object(&dd->server); + g_clear_pointer(&dd->consoles, g_ptr_array_unref); + if (dd->add_client_cancellable) { + g_cancellable_cancel(dd->add_client_cancellable); + } + g_clear_object(&dd->add_client_cancellable); + g_clear_object(&dd->bus); + g_clear_object(&dd->iface); + g_free(dd->dbus_addr); + g_free(dd->audiodev); + g_clear_pointer(&dd->glctx.gls, qemu_gl_fini_shader); + dbus_display = NULL; +} + +static bool +dbus_display_add_console(DBusDisplay *dd, int idx, Error **errp) +{ + QemuConsole *con; + DBusDisplayConsole *dbus_console; + + con = qemu_console_lookup_by_index(idx); + assert(con); + + if (qemu_console_is_graphic(con) && + dd->gl_mode != DISPLAYGL_MODE_OFF) { + qemu_console_set_display_gl_ctx(con, &dd->glctx); + } + + dbus_console = dbus_display_console_new(dd, con); + g_ptr_array_insert(dd->consoles, idx, dbus_console); + g_dbus_object_manager_server_export(dd->server, + G_DBUS_OBJECT_SKELETON(dbus_console)); + return true; +} + +static void +dbus_display_complete(UserCreatable *uc, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(uc); + g_autoptr(GError) err = NULL; + g_autofree char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid); + g_autoptr(GArray) consoles = NULL; + GVariant *console_ids; + int idx; + + if (!object_resolve_path_type("", TYPE_DBUS_DISPLAY, NULL)) { + error_setg(errp, "There is already an instance of %s", + TYPE_DBUS_DISPLAY); + return; + } + + if (dd->p2p) { + /* wait for dbus_display_add_client() */ + dbus_display = dd; + } else if (dd->dbus_addr && *dd->dbus_addr) { + dd->bus = g_dbus_connection_new_for_address_sync(dd->dbus_addr, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_CLIENT | + G_DBUS_CONNECTION_FLAGS_MESSAGE_BUS_CONNECTION, + NULL, NULL, &err); + } else { + dd->bus = g_bus_get_sync(G_BUS_TYPE_SESSION, NULL, &err); + } + if (err) { + error_setg(errp, "failed to connect to DBus: %s", err->message); + return; + } + + if (dd->audiodev && *dd->audiodev) { + AudioState *audio_state = audio_state_by_name(dd->audiodev); + if (!audio_state) { + error_setg(errp, "Audiodev '%s' not found", dd->audiodev); + return; + } + if (!g_str_equal(audio_state->drv->name, "dbus")) { + error_setg(errp, "Audiodev '%s' is not compatible with DBus", + dd->audiodev); + return; + } + audio_state->drv->set_dbus_server(audio_state, dd->server); + } + + consoles = g_array_new(FALSE, FALSE, sizeof(guint32)); + for (idx = 0;; idx++) { + if (!qemu_console_lookup_by_index(idx)) { + break; + } + if (!dbus_display_add_console(dd, idx, errp)) { + return; + } + g_array_append_val(consoles, idx); + } + + console_ids = g_variant_new_from_data( + G_VARIANT_TYPE("au"), + consoles->data, consoles->len * sizeof(guint32), TRUE, + (GDestroyNotify)g_array_unref, consoles); + g_steal_pointer(&consoles); + g_object_set(dd->iface, + "name", qemu_name ?: "QEMU " QEMU_VERSION, + "uuid", uuid, + "console-ids", console_ids, + NULL); + + if (dd->bus) { + g_dbus_object_manager_server_set_connection(dd->server, dd->bus); + g_bus_own_name_on_connection(dd->bus, "org.qemu", + G_BUS_NAME_OWNER_FLAGS_NONE, + NULL, NULL, NULL, NULL); + } +} + +static void +dbus_display_add_client_ready(GObject *source_object, + GAsyncResult *res, + gpointer user_data) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GDBusConnection) conn = NULL; + + g_clear_object(&dbus_display->add_client_cancellable); + + conn = g_dbus_connection_new_finish(res, &err); + if (!conn) { + error_printf("Failed to accept D-Bus client: %s", err->message); + } + + g_dbus_object_manager_server_set_connection(dbus_display->server, conn); + g_dbus_connection_start_message_processing(conn); +} + + +static bool +dbus_display_add_client(int csock, Error **errp) +{ + g_autoptr(GError) err = NULL; + g_autoptr(GSocket) socket = NULL; + g_autoptr(GSocketConnection) conn = NULL; + g_autofree char *guid = g_dbus_generate_guid(); + + if (!dbus_display) { + error_setg(errp, "p2p connections not accepted in bus mode"); + return false; + } + + if (dbus_display->add_client_cancellable) { + g_cancellable_cancel(dbus_display->add_client_cancellable); + } + + socket = g_socket_new_from_fd(csock, &err); + if (!socket) { + error_setg(errp, "Failed to setup D-Bus socket: %s", err->message); + return false; + } + + conn = g_socket_connection_factory_create_connection(socket); + + dbus_display->add_client_cancellable = g_cancellable_new(); + + g_dbus_connection_new(G_IO_STREAM(conn), + guid, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER | + G_DBUS_CONNECTION_FLAGS_DELAY_MESSAGE_PROCESSING, + NULL, + dbus_display->add_client_cancellable, + dbus_display_add_client_ready, + NULL); + + return true; +} + +static bool +get_dbus_p2p(Object *o, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + return dd->p2p; +} + +static void +set_dbus_p2p(Object *o, bool p2p, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + dd->p2p = p2p; +} + +static char * +get_dbus_addr(Object *o, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + return g_strdup(dd->dbus_addr); +} + +static void +set_dbus_addr(Object *o, const char *str, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + g_free(dd->dbus_addr); + dd->dbus_addr = g_strdup(str); +} + +static char * +get_audiodev(Object *o, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + return g_strdup(dd->audiodev); +} + +static void +set_audiodev(Object *o, const char *str, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + g_free(dd->audiodev); + dd->audiodev = g_strdup(str); +} + + +static int +get_gl_mode(Object *o, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + return dd->gl_mode; +} + +static void +set_gl_mode(Object *o, int val, Error **errp) +{ + DBusDisplay *dd = DBUS_DISPLAY(o); + + dd->gl_mode = val; +} + +static void +dbus_display_class_init(ObjectClass *oc, void *data) +{ + UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); + + ucc->complete = dbus_display_complete; + object_class_property_add_bool(oc, "p2p", get_dbus_p2p, set_dbus_p2p); + object_class_property_add_str(oc, "addr", get_dbus_addr, set_dbus_addr); + object_class_property_add_str(oc, "audiodev", get_audiodev, set_audiodev); + object_class_property_add_enum(oc, "gl-mode", + "DisplayGLMode", &DisplayGLMode_lookup, + get_gl_mode, set_gl_mode); +} + +#define TYPE_CHARDEV_VC "chardev-vc" + +typedef struct DBusVCClass { + DBusChardevClass parent_class; + + void (*parent_parse)(QemuOpts *opts, ChardevBackend *b, Error **errp); +} DBusVCClass; + +DECLARE_CLASS_CHECKERS(DBusVCClass, DBUS_VC, + TYPE_CHARDEV_VC) + +static void +dbus_vc_parse(QemuOpts *opts, ChardevBackend *backend, + Error **errp) +{ + DBusVCClass *klass = DBUS_VC_CLASS(object_class_by_name(TYPE_CHARDEV_VC)); + const char *name = qemu_opt_get(opts, "name"); + const char *id = qemu_opts_id(opts); + + if (name == NULL) { + if (g_str_has_prefix(id, "compat_monitor")) { + name = "org.qemu.monitor.hmp.0"; + } else if (g_str_has_prefix(id, "serial")) { + name = "org.qemu.console.serial.0"; + } else { + name = ""; + } + if (!qemu_opt_set(opts, "name", name, errp)) { + return; + } + } + + klass->parent_parse(opts, backend, errp); +} + +static void +dbus_vc_class_init(ObjectClass *oc, void *data) +{ + DBusVCClass *klass = DBUS_VC_CLASS(oc); + ChardevClass *cc = CHARDEV_CLASS(oc); + + klass->parent_parse = cc->parse; + cc->parse = dbus_vc_parse; +} + +static const TypeInfo dbus_vc_type_info = { + .name = TYPE_CHARDEV_VC, + .parent = TYPE_CHARDEV_DBUS, + .class_size = sizeof(DBusVCClass), + .class_init = dbus_vc_class_init, +}; + +static void +early_dbus_init(DisplayOptions *opts) +{ + DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF; + + if (mode != DISPLAYGL_MODE_OFF) { + if (egl_rendernode_init(opts->u.dbus.rendernode, mode) < 0) { + error_report("dbus: render node init failed"); + exit(1); + } + + display_opengl = 1; + } + + type_register(&dbus_vc_type_info); +} + +static void +dbus_init(DisplayState *ds, DisplayOptions *opts) +{ + DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF; + + if (opts->u.dbus.addr && opts->u.dbus.p2p) { + error_report("dbus: can't accept both addr=X and p2p=yes options"); + exit(1); + } + + using_dbus_display = 1; + + object_new_with_props(TYPE_DBUS_DISPLAY, + object_get_objects_root(), + "dbus-display", &error_fatal, + "addr", opts->u.dbus.addr ?: "", + "audiodev", opts->u.dbus.audiodev ?: "", + "gl-mode", DisplayGLMode_str(mode), + "p2p", yes_no(opts->u.dbus.p2p), + NULL); +} + +static const TypeInfo dbus_display_info = { + .name = TYPE_DBUS_DISPLAY, + .parent = TYPE_OBJECT, + .instance_size = sizeof(DBusDisplay), + .instance_init = dbus_display_init, + .instance_finalize = dbus_display_finalize, + .class_init = dbus_display_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static QemuDisplay qemu_display_dbus = { + .type = DISPLAY_TYPE_DBUS, + .early_init = early_dbus_init, + .init = dbus_init, +}; + +static void register_dbus(void) +{ + qemu_dbus_display = (struct QemuDBusDisplayOps) { + .add_client = dbus_display_add_client, + }; + type_register_static(&dbus_display_info); + qemu_display_register(&qemu_display_dbus); +} + +type_init(register_dbus); + +#ifdef CONFIG_OPENGL +module_dep("ui-opengl"); +#endif diff --git a/ui/dbus.h b/ui/dbus.h new file mode 100644 index 0000000000..9c149e7b41 --- /dev/null +++ b/ui/dbus.h @@ -0,0 +1,148 @@ +/* + * QEMU DBus display + * + * Copyright (c) 2021 Marc-André Lureau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef UI_DBUS_H +#define UI_DBUS_H + +#include "chardev/char-socket.h" +#include "qemu/dbus.h" +#include "qom/object.h" +#include "ui/console.h" +#include "ui/clipboard.h" + +#include "ui/dbus-display1.h" + +typedef struct DBusClipboardRequest { + GDBusMethodInvocation *invocation; + QemuClipboardType type; + guint timeout_id; +} DBusClipboardRequest; + +struct DBusDisplay { + Object parent; + + DisplayGLMode gl_mode; + bool p2p; + char *dbus_addr; + char *audiodev; + DisplayGLCtx glctx; + + GDBusConnection *bus; + GDBusObjectManagerServer *server; + QemuDBusDisplay1VM *iface; + GPtrArray *consoles; + GCancellable *add_client_cancellable; + + QemuClipboardPeer clipboard_peer; + QemuDBusDisplay1Clipboard *clipboard; + QemuDBusDisplay1Clipboard *clipboard_proxy; + DBusClipboardRequest clipboard_request[QEMU_CLIPBOARD_SELECTION__COUNT]; + + Notifier notifier; +}; + +#define TYPE_DBUS_DISPLAY "dbus-display" +OBJECT_DECLARE_SIMPLE_TYPE(DBusDisplay, DBUS_DISPLAY) + +void dbus_display_notifier_add(Notifier *notifier); + +#define DBUS_DISPLAY_TYPE_CONSOLE dbus_display_console_get_type() +G_DECLARE_FINAL_TYPE(DBusDisplayConsole, + dbus_display_console, + DBUS_DISPLAY, + CONSOLE, + GDBusObjectSkeleton) + +DBusDisplayConsole * +dbus_display_console_new(DBusDisplay *display, QemuConsole *con); + +int +dbus_display_console_get_index(DBusDisplayConsole *ddc); + + +extern const DisplayChangeListenerOps dbus_console_dcl_ops; + +#define DBUS_DISPLAY_TYPE_LISTENER dbus_display_listener_get_type() +G_DECLARE_FINAL_TYPE(DBusDisplayListener, + dbus_display_listener, + DBUS_DISPLAY, + LISTENER, + GObject) + +DBusDisplayListener * +dbus_display_listener_new(const char *bus_name, + GDBusConnection *conn, + DBusDisplayConsole *console); + +DBusDisplayConsole * +dbus_display_listener_get_console(DBusDisplayListener *ddl); + +const char * +dbus_display_listener_get_bus_name(DBusDisplayListener *ddl); + +extern const DisplayChangeListenerOps dbus_gl_dcl_ops; +extern const DisplayChangeListenerOps dbus_dcl_ops; + +#define TYPE_CHARDEV_DBUS "chardev-dbus" + +typedef struct DBusChardevClass { + SocketChardevClass parent_class; + + void (*parent_chr_be_event)(Chardev *s, QEMUChrEvent event); +} DBusChardevClass; + +DECLARE_CLASS_CHECKERS(DBusChardevClass, DBUS_CHARDEV, + TYPE_CHARDEV_DBUS) + +typedef struct DBusChardev { + SocketChardev parent; + + bool exported; + QemuDBusDisplay1Chardev *iface; +} DBusChardev; + +DECLARE_INSTANCE_CHECKER(DBusChardev, DBUS_CHARDEV, TYPE_CHARDEV_DBUS) + +#define CHARDEV_IS_DBUS(chr) \ + object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_DBUS) + +typedef enum { + DBUS_DISPLAY_CHARDEV_OPEN, + DBUS_DISPLAY_CHARDEV_CLOSE, +} DBusDisplayEventType; + +typedef struct DBusDisplayEvent { + DBusDisplayEventType type; + union { + DBusChardev *chardev; + }; +} DBusDisplayEvent; + +void dbus_display_notify(DBusDisplayEvent *event); + +void dbus_chardev_init(DBusDisplay *dpy); + +void dbus_clipboard_init(DBusDisplay *dpy); + +#endif /* UI_DBUS_H */ diff --git a/ui/egl-context.c b/ui/egl-context.c index 368ffa49d8..eb5f520fc4 100644 --- a/ui/egl-context.c +++ b/ui/egl-context.c @@ -1,7 +1,7 @@ #include "qemu/osdep.h" #include "ui/egl-context.h" -QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, +QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { EGLContext ctx; @@ -24,12 +24,12 @@ QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, return ctx; } -void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) +void qemu_egl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx) { eglDestroyContext(qemu_egl_display, ctx); } -int qemu_egl_make_context_current(DisplayChangeListener *dcl, +int qemu_egl_make_context_current(DisplayGLCtx *dgc, QEMUGLContext ctx) { return eglMakeCurrent(qemu_egl_display, diff --git a/ui/egl-headless.c b/ui/egl-headless.c index a26a2520c4..7a30fd9777 100644 --- a/ui/egl-headless.c +++ b/ui/egl-headless.c @@ -38,12 +38,12 @@ static void egl_gfx_switch(DisplayChangeListener *dcl, edpy->ds = new_surface; } -static QEMUGLContext egl_create_context(DisplayChangeListener *dcl, +static QEMUGLContext egl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, qemu_egl_rn_ctx); - return qemu_egl_create_context(dcl, params); + return qemu_egl_create_context(dgc, params); } static void egl_scanout_disable(DisplayChangeListener *dcl) @@ -157,10 +157,6 @@ static const DisplayChangeListenerOps egl_ops = { .dpy_gfx_update = egl_gfx_update, .dpy_gfx_switch = egl_gfx_switch, - .dpy_gl_ctx_create = egl_create_context, - .dpy_gl_ctx_destroy = qemu_egl_destroy_context, - .dpy_gl_ctx_make_current = qemu_egl_make_context_current, - .dpy_gl_scanout_disable = egl_scanout_disable, .dpy_gl_scanout_texture = egl_scanout_texture, .dpy_gl_scanout_dmabuf = egl_scanout_dmabuf, @@ -170,6 +166,28 @@ static const DisplayChangeListenerOps egl_ops = { .dpy_gl_update = egl_scanout_flush, }; +static bool +egl_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + if (!dcl->ops->dpy_gl_update) { + /* + * egl-headless is compatible with all 2d listeners, as it blits the GL + * updates on the 2d console surface. + */ + return true; + } + + return dcl->ops == &egl_ops; +} + +static const DisplayGLCtxOps eglctx_ops = { + .dpy_gl_ctx_is_compatible_dcl = egl_is_compatible_dcl, + .dpy_gl_ctx_create = egl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = qemu_egl_make_context_current, +}; + static void early_egl_headless_init(DisplayOptions *opts) { display_opengl = 1; @@ -188,6 +206,8 @@ static void egl_headless_init(DisplayState *ds, DisplayOptions *opts) } for (idx = 0;; idx++) { + DisplayGLCtx *ctx; + con = qemu_console_lookup_by_index(idx); if (!con || !qemu_console_is_graphic(con)) { break; @@ -197,6 +217,9 @@ static void egl_headless_init(DisplayState *ds, DisplayOptions *opts) edpy->dcl.con = con; edpy->dcl.ops = &egl_ops; edpy->gls = qemu_gl_init_shader(); + ctx = g_new0(DisplayGLCtx, 1); + ctx->ops = &eglctx_ops; + qemu_console_set_display_gl_ctx(con, ctx); register_displaychangelistener(&edpy->dcl); } } diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c index 6d0cb2b5cb..3a88245b67 100644 --- a/ui/egl-helpers.c +++ b/ui/egl-helpers.c @@ -90,14 +90,31 @@ void egl_fb_setup_new_tex(egl_fb *fb, int width, int height) void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip) { - GLuint y1, y2; + GLuint x1 = 0; + GLuint y1 = 0; + GLuint x2, y2; + GLuint w = src->width; + GLuint h = src->height; glBindFramebuffer(GL_READ_FRAMEBUFFER, src->framebuffer); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, dst->framebuffer); glViewport(0, 0, dst->width, dst->height); - y1 = flip ? src->height : 0; - y2 = flip ? 0 : src->height; - glBlitFramebuffer(0, y1, src->width, y2, + + if (src->dmabuf) { + x1 = src->dmabuf->x; + y1 = src->dmabuf->y; + w = src->dmabuf->scanout_width; + h = src->dmabuf->scanout_height; + } + + w = (x1 + w) > src->width ? src->width - x1 : w; + h = (y1 + h) > src->height ? src->height - y1 : h; + + y2 = flip ? y1 : h + y1; + y1 = flip ? h + y1 : y1; + x2 = x1 + w; + + glBlitFramebuffer(x1, y1, x2, y2, 0, 0, dst->width, dst->height, GL_COLOR_BUFFER_BIT, GL_LINEAR); } @@ -287,6 +304,32 @@ void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf) dmabuf->texture = 0; } +void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf) +{ + EGLSyncKHR sync; + + if (epoxy_has_egl_extension(qemu_egl_display, + "EGL_KHR_fence_sync") && + epoxy_has_egl_extension(qemu_egl_display, + "EGL_ANDROID_native_fence_sync")) { + sync = eglCreateSyncKHR(qemu_egl_display, + EGL_SYNC_NATIVE_FENCE_ANDROID, NULL); + if (sync != EGL_NO_SYNC_KHR) { + dmabuf->sync = sync; + } + } +} + +void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf) +{ + if (dmabuf->sync) { + dmabuf->fence_fd = eglDupNativeFenceFDANDROID(qemu_egl_display, + dmabuf->sync); + eglDestroySyncKHR(qemu_egl_display, dmabuf->sync); + dmabuf->sync = NULL; + } +} + #endif /* CONFIG_GBM */ /* ---------------------------------------------------------------------- */ diff --git a/ui/gtk-clipboard.c b/ui/gtk-clipboard.c index bff28d2030..8d8a636fd1 100644 --- a/ui/gtk-clipboard.c +++ b/ui/gtk-clipboard.c @@ -19,7 +19,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/main-loop.h" #include "ui/gtk.h" @@ -45,24 +44,24 @@ static void gd_clipboard_get_data(GtkClipboard *clipboard, GtkDisplayState *gd = data; QemuClipboardSelection s = gd_find_selection(gd, clipboard); QemuClipboardType type = QEMU_CLIPBOARD_TYPE_TEXT; - QemuClipboardInfo *info = qemu_clipboard_info_ref(gd->cbinfo[s]); + g_autoptr(QemuClipboardInfo) info = NULL; + + info = qemu_clipboard_info_ref(qemu_clipboard_info(s)); qemu_clipboard_request(info, type); - while (info == gd->cbinfo[s] && + while (info == qemu_clipboard_info(s) && info->types[type].available && info->types[type].data == NULL) { main_loop_wait(false); } - if (info == gd->cbinfo[s] && gd->cbowner[s]) { + if (info == qemu_clipboard_info(s) && gd->cbowner[s]) { gtk_selection_data_set_text(selection_data, info->types[type].data, info->types[type].size); } else { /* clipboard owner changed while waiting for the data */ } - - qemu_clipboard_info_unref(info); } static void gd_clipboard_clear(GtkClipboard *clipboard, @@ -74,19 +73,16 @@ static void gd_clipboard_clear(GtkClipboard *clipboard, gd->cbowner[s] = false; } -static void gd_clipboard_notify(Notifier *notifier, void *data) +static void gd_clipboard_update_info(GtkDisplayState *gd, + QemuClipboardInfo *info) { - GtkDisplayState *gd = container_of(notifier, GtkDisplayState, cbpeer.update); - QemuClipboardInfo *info = data; QemuClipboardSelection s = info->selection; bool self_update = info->owner == &gd->cbpeer; - if (info != gd->cbinfo[s]) { - qemu_clipboard_info_unref(gd->cbinfo[s]); - gd->cbinfo[s] = qemu_clipboard_info_ref(info); + if (info != qemu_clipboard_info(s)) { gd->cbpending[s] = 0; if (!self_update) { - GtkTargetList *list; + g_autoptr(GtkTargetList) list = NULL; GtkTargetEntry *targets; gint n_targets; @@ -97,15 +93,16 @@ static void gd_clipboard_notify(Notifier *notifier, void *data) targets = gtk_target_table_new_from_list(list, &n_targets); gtk_clipboard_clear(gd->gtkcb[s]); - gd->cbowner[s] = true; - gtk_clipboard_set_with_data(gd->gtkcb[s], - targets, n_targets, - gd_clipboard_get_data, - gd_clipboard_clear, - gd); + if (targets) { + gd->cbowner[s] = true; + gtk_clipboard_set_with_data(gd->gtkcb[s], + targets, n_targets, + gd_clipboard_get_data, + gd_clipboard_clear, + gd); - gtk_target_table_free(targets, n_targets); - gtk_target_list_unref(list); + gtk_target_table_free(targets, n_targets); + } } return; } @@ -120,6 +117,22 @@ static void gd_clipboard_notify(Notifier *notifier, void *data) */ } +static void gd_clipboard_notify(Notifier *notifier, void *data) +{ + GtkDisplayState *gd = + container_of(notifier, GtkDisplayState, cbpeer.notifier); + QemuClipboardNotify *notify = data; + + switch (notify->type) { + case QEMU_CLIPBOARD_UPDATE_INFO: + gd_clipboard_update_info(gd, notify->info); + return; + case QEMU_CLIPBOARD_RESET_SERIAL: + /* ignore */ + return; + } +} + static void gd_clipboard_request(QemuClipboardInfo *info, QemuClipboardType type) { @@ -155,7 +168,7 @@ static void gd_owner_change(GtkClipboard *clipboard, switch (event->owner_change.reason) { - case GDK_SETTING_ACTION_NEW: + case GDK_OWNER_CHANGE_NEW_OWNER: info = qemu_clipboard_info_new(&gd->cbpeer, s); if (gtk_clipboard_wait_is_text_available(clipboard)) { info->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; @@ -165,6 +178,8 @@ static void gd_owner_change(GtkClipboard *clipboard, qemu_clipboard_info_unref(info); break; default: + qemu_clipboard_peer_release(&gd->cbpeer, s); + gd->cbowner[s] = false; break; } } @@ -172,16 +187,16 @@ static void gd_owner_change(GtkClipboard *clipboard, void gd_clipboard_init(GtkDisplayState *gd) { gd->cbpeer.name = "gtk"; - gd->cbpeer.update.notify = gd_clipboard_notify; + gd->cbpeer.notifier.notify = gd_clipboard_notify; gd->cbpeer.request = gd_clipboard_request; qemu_clipboard_peer_register(&gd->cbpeer); gd->gtkcb[QEMU_CLIPBOARD_SELECTION_CLIPBOARD] = - gtk_clipboard_get(gdk_atom_intern("CLIPBOARD", FALSE)); + gtk_clipboard_get(GDK_SELECTION_CLIPBOARD); gd->gtkcb[QEMU_CLIPBOARD_SELECTION_PRIMARY] = - gtk_clipboard_get(gdk_atom_intern("PRIMARY", FALSE)); + gtk_clipboard_get(GDK_SELECTION_PRIMARY); gd->gtkcb[QEMU_CLIPBOARD_SELECTION_SECONDARY] = - gtk_clipboard_get(gdk_atom_intern("SECONDARY", FALSE)); + gtk_clipboard_get(GDK_SELECTION_SECONDARY); g_signal_connect(gd->gtkcb[QEMU_CLIPBOARD_SELECTION_CLIPBOARD], "owner-change", G_CALLBACK(gd_owner_change), gd); diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index 2a2e6d3a17..e99e3b0d8c 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -12,6 +12,7 @@ */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" #include "trace.h" @@ -62,6 +63,9 @@ void gd_egl_init(VirtualConsole *vc) void gd_egl_draw(VirtualConsole *vc) { GdkWindow *window; +#ifdef CONFIG_GBM + QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; +#endif int ww, wh; if (!vc->gfx.gls) { @@ -73,10 +77,31 @@ void gd_egl_draw(VirtualConsole *vc) wh = gdk_window_get_height(window); if (vc->gfx.scanout_mode) { +#ifdef CONFIG_GBM + if (dmabuf) { + if (!dmabuf->draw_submitted) { + return; + } else { + dmabuf->draw_submitted = false; + } + } +#endif gd_egl_scanout_flush(&vc->gfx.dcl, 0, 0, vc->gfx.w, vc->gfx.h); - vc->gfx.scale_x = (double)ww / vc->gfx.w; - vc->gfx.scale_y = (double)wh / vc->gfx.h; + vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds); + vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds); + + glFlush(); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_fence(dmabuf); + if (dmabuf->fence_fd > 0) { + qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); + return; + } + graphic_hw_gl_block(vc->gfx.dcl.con, false); + } +#endif } else { if (!vc->gfx.ds) { return; @@ -91,10 +116,9 @@ void gd_egl_draw(VirtualConsole *vc) vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds); vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds); - } - glFlush(); - graphic_hw_gl_flushed(vc->gfx.dcl.con); + glFlush(); + } } void gd_egl_update(DisplayChangeListener *dcl, @@ -116,8 +140,8 @@ void gd_egl_refresh(DisplayChangeListener *dcl) { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); - vc->gfx.dcl.update_interval = gd_monitor_update_interval( - vc->window ? vc->window : vc->gfx.drawing_area); + gd_update_monitor_refresh_rate( + vc, vc->window ? vc->window : vc->gfx.drawing_area); if (!vc->gfx.esurface) { gd_egl_init(vc); @@ -126,8 +150,15 @@ void gd_egl_refresh(DisplayChangeListener *dcl) } vc->gfx.gls = qemu_gl_init_shader(); if (vc->gfx.ds) { + surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds); surface_gl_create_texture(vc->gfx.gls, vc->gfx.ds); } +#ifdef CONFIG_GBM + if (vc->gfx.guest_fb.dmabuf) { + egl_dmabuf_release_texture(vc->gfx.guest_fb.dmabuf); + gd_egl_scanout_dmabuf(dcl, vc->gfx.guest_fb.dmabuf); + } +#endif } graphic_hw_update(dcl->con); @@ -152,6 +183,8 @@ void gd_egl_switch(DisplayChangeListener *dcl, surface_height(vc->gfx.ds) == surface_height(surface)) { resized = false; } + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds); vc->gfx.ds = surface; @@ -162,16 +195,19 @@ void gd_egl_switch(DisplayChangeListener *dcl, if (resized) { gd_update_windowsize(vc); } + + eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, + EGL_NO_CONTEXT); } -QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl, +QEMUGLContext gd_egl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { - VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + VirtualConsole *vc = container_of(dgc, VirtualConsole, gfx.dgc); eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, vc->gfx.esurface, vc->gfx.ectx); - return qemu_egl_create_context(dcl, params); + return qemu_egl_create_context(dgc, params); } void gd_egl_scanout_disable(DisplayChangeListener *dcl) @@ -209,14 +245,24 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, QemuDmaBuf *dmabuf) { #ifdef CONFIG_GBM + VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); + egl_dmabuf_import_texture(dmabuf); if (!dmabuf->texture) { return; } gd_egl_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, - 0, 0, dmabuf->width, dmabuf->height); + dmabuf->y0_top, dmabuf->width, dmabuf->height, + dmabuf->x, dmabuf->y, dmabuf->scanout_width, + dmabuf->scanout_height); + + if (dmabuf->allow_fences) { + vc->gfx.guest_fb.dmabuf = dmabuf; + } #endif } @@ -249,14 +295,6 @@ void gd_egl_cursor_position(DisplayChangeListener *dcl, vc->gfx.cursor_y = pos_y * vc->gfx.scale_y; } -void gd_egl_release_dmabuf(DisplayChangeListener *dcl, - QemuDmaBuf *dmabuf) -{ -#ifdef CONFIG_GBM - egl_dmabuf_release_texture(dmabuf); -#endif -} - void gd_egl_scanout_flush(DisplayChangeListener *dcl, uint32_t x, uint32_t y, uint32_t w, uint32_t h) { @@ -289,9 +327,31 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl, egl_fb_blit(&vc->gfx.win_fb, &vc->gfx.guest_fb, !vc->gfx.y0_top); } +#ifdef CONFIG_GBM + if (vc->gfx.guest_fb.dmabuf) { + egl_dmabuf_create_sync(vc->gfx.guest_fb.dmabuf); + } +#endif + eglSwapBuffers(qemu_egl_display, vc->gfx.esurface); } +void gd_egl_flush(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, uint32_t w, uint32_t h) +{ + VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + GtkWidget *area = vc->gfx.drawing_area; + + if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) { + graphic_hw_gl_block(vc->gfx.dcl.con, true); + vc->gfx.guest_fb.dmabuf->draw_submitted = true; + gtk_widget_queue_draw_area(area, x, y, w, h); + return; + } + + gd_egl_scanout_flush(&vc->gfx.dcl, x, y, w, h); +} + void gtk_egl_init(DisplayGLMode mode) { GdkDisplay *gdk_display = gdk_display_get_default(); @@ -304,10 +364,10 @@ void gtk_egl_init(DisplayGLMode mode) display_opengl = 1; } -int gd_egl_make_current(DisplayChangeListener *dcl, +int gd_egl_make_current(DisplayGLCtx *dgc, QEMUGLContext ctx) { - VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + VirtualConsole *vc = container_of(dgc, VirtualConsole, gfx.dgc); return eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, vc->gfx.esurface, ctx); diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index dd5783fec7..1605818bd1 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" #include "trace.h" @@ -37,21 +38,35 @@ static void gtk_gl_area_set_scanout_mode(VirtualConsole *vc, bool scanout) void gd_gl_area_draw(VirtualConsole *vc) { - int ww, wh, y1, y2; +#ifdef CONFIG_GBM + QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; +#endif + int ww, wh, ws, y1, y2; if (!vc->gfx.gls) { return; } gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area)); - ww = gtk_widget_get_allocated_width(vc->gfx.drawing_area); - wh = gtk_widget_get_allocated_height(vc->gfx.drawing_area); + ws = gdk_window_get_scale_factor(gtk_widget_get_window(vc->gfx.drawing_area)); + ww = gtk_widget_get_allocated_width(vc->gfx.drawing_area) * ws; + wh = gtk_widget_get_allocated_height(vc->gfx.drawing_area) * ws; if (vc->gfx.scanout_mode) { if (!vc->gfx.guest_fb.framebuffer) { return; } +#ifdef CONFIG_GBM + if (dmabuf) { + if (!dmabuf->draw_submitted) { + return; + } else { + dmabuf->draw_submitted = false; + } + } +#endif + glBindFramebuffer(GL_READ_FRAMEBUFFER, vc->gfx.guest_fb.framebuffer); /* GtkGLArea sets GL_DRAW_FRAMEBUFFER for us */ @@ -61,6 +76,22 @@ void gd_gl_area_draw(VirtualConsole *vc) glBlitFramebuffer(0, y1, vc->gfx.w, y2, 0, 0, ww, wh, GL_COLOR_BUFFER_BIT, GL_NEAREST); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_sync(dmabuf); + } +#endif + glFlush(); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_fence(dmabuf); + if (dmabuf->fence_fd > 0) { + qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); + return; + } + graphic_hw_gl_block(vc->gfx.dcl.con, false); + } +#endif } else { if (!vc->gfx.ds) { return; @@ -70,9 +101,6 @@ void gd_gl_area_draw(VirtualConsole *vc) surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, ww, wh); surface_gl_render_texture(vc->gfx.gls, vc->gfx.ds); } - - glFlush(); - graphic_hw_gl_flushed(vc->gfx.dcl.con); } void gd_gl_area_update(DisplayChangeListener *dcl, @@ -93,6 +121,8 @@ void gd_gl_area_refresh(DisplayChangeListener *dcl) { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + gd_update_monitor_refresh_rate(vc, vc->window ? vc->window : vc->gfx.drawing_area); + if (!vc->gfx.gls) { if (!gtk_widget_get_realized(vc->gfx.drawing_area)) { return; @@ -139,15 +169,32 @@ void gd_gl_area_switch(DisplayChangeListener *dcl, } } -QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, +static int gd_cmp_gl_context_version(int major, int minor, QEMUGLParams *params) +{ + if (major > params->major_ver) { + return 1; + } + if (major < params->major_ver) { + return -1; + } + if (minor > params->minor_ver) { + return 1; + } + if (minor < params->minor_ver) { + return -1; + } + return 0; +} + +QEMUGLContext gd_gl_area_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { - VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + VirtualConsole *vc = container_of(dgc, VirtualConsole, gfx.dgc); GdkWindow *window; GdkGLContext *ctx; GError *err = NULL; + int major, minor; - gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area)); window = gtk_widget_get_window(vc->gfx.drawing_area); ctx = gdk_window_create_gl_context(window, &err); if (err) { @@ -165,12 +212,30 @@ QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, g_clear_object(&ctx); return NULL; } + + gdk_gl_context_make_current(ctx); + gdk_gl_context_get_version(ctx, &major, &minor); + gdk_gl_context_clear_current(); + gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area)); + + if (gd_cmp_gl_context_version(major, minor, params) == -1) { + /* created ctx version < requested version */ + g_clear_object(&ctx); + } + + trace_gd_gl_area_create_context(ctx, params->major_ver, params->minor_ver); return ctx; } -void gd_gl_area_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) +void gd_gl_area_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx) { - /* FIXME */ + GdkGLContext *current_ctx = gdk_gl_context_get_current(); + + trace_gd_gl_area_destroy_context(ctx, current_ctx); + if (ctx == current_ctx) { + gdk_gl_context_clear_current(); + } + g_clear_object(&ctx); } void gd_gl_area_scanout_texture(DisplayChangeListener *dcl, @@ -213,6 +278,10 @@ void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) { + graphic_hw_gl_block(vc->gfx.dcl.con, true); + vc->gfx.guest_fb.dmabuf->draw_submitted = true; + } gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area)); } @@ -229,8 +298,13 @@ void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, } gd_gl_area_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, - 0, 0, dmabuf->width, dmabuf->height); + dmabuf->y0_top, dmabuf->width, dmabuf->height, + dmabuf->x, dmabuf->y, dmabuf->scanout_width, + dmabuf->scanout_height); + + if (dmabuf->allow_fences) { + vc->gfx.guest_fb.dmabuf = dmabuf; + } #endif } @@ -239,7 +313,7 @@ void gtk_gl_area_init(void) display_opengl = 1; } -int gd_gl_area_make_current(DisplayChangeListener *dcl, +int gd_gl_area_make_current(DisplayGLCtx *dgc, QEMUGLContext ctx) { gdk_gl_context_make_current(ctx); diff --git a/ui/gtk.c b/ui/gtk.c index cfb0728d1f..e681e8c319 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -36,6 +36,7 @@ #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" #include "qemu/cutils.h" +#include "qemu/main-loop.h" #include "ui/console.h" #include "ui/gtk.h" @@ -575,6 +576,25 @@ static bool gd_has_dmabuf(DisplayChangeListener *dcl) return vc->gfx.has_dmabuf; } +static void gd_gl_release_dmabuf(DisplayChangeListener *dcl, + QemuDmaBuf *dmabuf) +{ +#ifdef CONFIG_GBM + egl_dmabuf_release_texture(dmabuf); +#endif +} + +void gd_hw_gl_flushed(void *vcon) +{ + VirtualConsole *vc = vcon; + QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; + + qemu_set_fd_handler(dmabuf->fence_fd, NULL, NULL, NULL); + close(dmabuf->fence_fd); + dmabuf->fence_fd = -1; + graphic_hw_gl_block(vc->gfx.dcl.con, false); +} + /** DisplayState Callbacks (opengl version) **/ static const DisplayChangeListenerOps dcl_gl_area_ops = { @@ -586,18 +606,29 @@ static const DisplayChangeListenerOps dcl_gl_area_ops = { .dpy_mouse_set = gd_mouse_set, .dpy_cursor_define = gd_cursor_define, - .dpy_gl_ctx_create = gd_gl_area_create_context, - .dpy_gl_ctx_destroy = gd_gl_area_destroy_context, - .dpy_gl_ctx_make_current = gd_gl_area_make_current, .dpy_gl_scanout_texture = gd_gl_area_scanout_texture, .dpy_gl_scanout_disable = gd_gl_area_scanout_disable, .dpy_gl_update = gd_gl_area_scanout_flush, .dpy_gl_scanout_dmabuf = gd_gl_area_scanout_dmabuf, + .dpy_gl_release_dmabuf = gd_gl_release_dmabuf, .dpy_has_dmabuf = gd_has_dmabuf, }; -#ifdef CONFIG_X11 +static bool +gd_gl_area_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + return dcl->ops == &dcl_gl_area_ops; +} +static const DisplayGLCtxOps gl_area_ctx_ops = { + .dpy_gl_ctx_is_compatible_dcl = gd_gl_area_is_compatible_dcl, + .dpy_gl_ctx_create = gd_gl_area_create_context, + .dpy_gl_ctx_destroy = gd_gl_area_destroy_context, + .dpy_gl_ctx_make_current = gd_gl_area_make_current, +}; + +#ifdef CONFIG_X11 static const DisplayChangeListenerOps dcl_egl_ops = { .dpy_name = "gtk-egl", .dpy_gfx_update = gd_egl_update, @@ -607,19 +638,29 @@ static const DisplayChangeListenerOps dcl_egl_ops = { .dpy_mouse_set = gd_mouse_set, .dpy_cursor_define = gd_cursor_define, - .dpy_gl_ctx_create = gd_egl_create_context, - .dpy_gl_ctx_destroy = qemu_egl_destroy_context, - .dpy_gl_ctx_make_current = gd_egl_make_current, .dpy_gl_scanout_disable = gd_egl_scanout_disable, .dpy_gl_scanout_texture = gd_egl_scanout_texture, .dpy_gl_scanout_dmabuf = gd_egl_scanout_dmabuf, .dpy_gl_cursor_dmabuf = gd_egl_cursor_dmabuf, .dpy_gl_cursor_position = gd_egl_cursor_position, - .dpy_gl_release_dmabuf = gd_egl_release_dmabuf, - .dpy_gl_update = gd_egl_scanout_flush, + .dpy_gl_update = gd_egl_flush, + .dpy_gl_release_dmabuf = gd_gl_release_dmabuf, .dpy_has_dmabuf = gd_has_dmabuf, }; +static bool +gd_egl_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + return dcl->ops == &dcl_egl_ops; +} + +static const DisplayGLCtxOps egl_ctx_ops = { + .dpy_gl_ctx_is_compatible_dcl = gd_egl_is_compatible_dcl, + .dpy_gl_ctx_create = gd_egl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = gd_egl_make_current, +}; #endif #endif /* CONFIG_OPENGL */ @@ -640,9 +681,13 @@ static void gd_mouse_mode_change(Notifier *notify, void *data) s = container_of(notify, GtkDisplayState, mouse_mode_notifier); /* release the grab at switching to absolute mode */ - if (qemu_input_is_absolute() && gd_is_grab_active(s)) { - gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), - FALSE); + if (qemu_input_is_absolute() && s->ptr_owner) { + if (!s->ptr_owner->window) { + gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), + FALSE); + } else { + gd_ungrab_pointer(s); + } } for (i = 0; i < s->nb_vcs; i++) { VirtualConsole *vc = &s->vc[i]; @@ -669,14 +714,23 @@ static gboolean gd_window_close(GtkWidget *widget, GdkEvent *event, return TRUE; } -static void gd_set_ui_info(VirtualConsole *vc, gint width, gint height) +static void gd_set_ui_refresh_rate(VirtualConsole *vc, int refresh_rate) { QemuUIInfo info; - memset(&info, 0, sizeof(info)); + info = *dpy_get_ui_info(vc->gfx.dcl.con); + info.refresh_rate = refresh_rate; + dpy_set_ui_info(vc->gfx.dcl.con, &info, true); +} + +static void gd_set_ui_size(VirtualConsole *vc, gint width, gint height) +{ + QemuUIInfo info; + + info = *dpy_get_ui_info(vc->gfx.dcl.con); info.width = width; info.height = height; - dpy_set_ui_info(vc->gfx.dcl.con, &info); + dpy_set_ui_info(vc->gfx.dcl.con, &info, true); } #if defined(CONFIG_OPENGL) @@ -697,33 +751,32 @@ static void gd_resize_event(GtkGLArea *area, { VirtualConsole *vc = (void *)opaque; - gd_set_ui_info(vc, width, height); + gd_set_ui_size(vc, width, height); } #endif -/* - * If available, return the update interval of the monitor in ms, - * else return 0 (the default update interval). - */ -int gd_monitor_update_interval(GtkWidget *widget) +void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget) { #ifdef GDK_VERSION_3_22 GdkWindow *win = gtk_widget_get_window(widget); + int refresh_rate; if (win) { GdkDisplay *dpy = gtk_widget_get_display(widget); GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win); - int refresh_rate = gdk_monitor_get_refresh_rate(monitor); /* [mHz] */ - - if (refresh_rate) { - /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ - return MIN(1000 * 1000 / refresh_rate, - GUI_REFRESH_INTERVAL_DEFAULT); - } + refresh_rate = gdk_monitor_get_refresh_rate(monitor); /* [mHz] */ + } else { + refresh_rate = 0; } + + gd_set_ui_refresh_rate(vc, refresh_rate); + + /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ + vc->gfx.dcl.update_interval = refresh_rate ? + MIN(1000 * 1000 / refresh_rate, GUI_REFRESH_INTERVAL_DEFAULT) : + GUI_REFRESH_INTERVAL_DEFAULT; #endif - return 0; } static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) @@ -756,9 +809,11 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) if (!vc->gfx.ds) { return FALSE; } + if (!vc->gfx.surface) { + return FALSE; + } - vc->gfx.dcl.update_interval = - gd_monitor_update_interval(vc->window ? vc->window : s->window); + gd_update_monitor_refresh_rate(vc, vc->window ? vc->window : s->window); fbw = surface_width(vc->gfx.ds); fbh = surface_height(vc->gfx.ds); @@ -816,7 +871,7 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, int x, y; int mx, my; int fbh, fbw; - int ww, wh; + int ww, wh, ws; if (!vc->gfx.ds) { return TRUE; @@ -825,8 +880,9 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x; fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y; - ww = gdk_window_get_width(gtk_widget_get_window(vc->gfx.drawing_area)); - wh = gdk_window_get_height(gtk_widget_get_window(vc->gfx.drawing_area)); + ww = gtk_widget_get_allocated_width(widget); + wh = gtk_widget_get_allocated_height(widget); + ws = gtk_widget_get_scale_factor(widget); mx = my = 0; if (ww > fbw) { @@ -836,8 +892,8 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, my = (wh - fbh) / 2; } - x = (motion->x - mx) / vc->gfx.scale_x; - y = (motion->y - my) / vc->gfx.scale_y; + x = (motion->x - mx) / vc->gfx.scale_x * ws; + y = (motion->y - my) / vc->gfx.scale_y * ws; if (qemu_input_is_absolute()) { if (x < 0 || y < 0 || @@ -925,6 +981,10 @@ static gboolean gd_button_event(GtkWidget *widget, GdkEventButton *button, return TRUE; } + if (button->type == GDK_2BUTTON_PRESS || button->type == GDK_3BUTTON_PRESS) { + return TRUE; + } + qemu_input_queue_btn(vc->gfx.dcl.con, btn, button->type == GDK_BUTTON_PRESS); qemu_input_event_sync(); @@ -935,33 +995,63 @@ static gboolean gd_scroll_event(GtkWidget *widget, GdkEventScroll *scroll, void *opaque) { VirtualConsole *vc = opaque; - InputButton btn; + InputButton btn_vertical; + InputButton btn_horizontal; + bool has_vertical = false; + bool has_horizontal = false; if (scroll->direction == GDK_SCROLL_UP) { - btn = INPUT_BUTTON_WHEEL_UP; + btn_vertical = INPUT_BUTTON_WHEEL_UP; + has_vertical = true; } else if (scroll->direction == GDK_SCROLL_DOWN) { - btn = INPUT_BUTTON_WHEEL_DOWN; + btn_vertical = INPUT_BUTTON_WHEEL_DOWN; + has_vertical = true; + } else if (scroll->direction == GDK_SCROLL_LEFT) { + btn_horizontal = INPUT_BUTTON_WHEEL_LEFT; + has_horizontal = true; + } else if (scroll->direction == GDK_SCROLL_RIGHT) { + btn_horizontal = INPUT_BUTTON_WHEEL_RIGHT; + has_horizontal = true; } else if (scroll->direction == GDK_SCROLL_SMOOTH) { gdouble delta_x, delta_y; if (!gdk_event_get_scroll_deltas((GdkEvent *)scroll, &delta_x, &delta_y)) { return TRUE; } - if (delta_y == 0) { - return TRUE; - } else if (delta_y > 0) { - btn = INPUT_BUTTON_WHEEL_DOWN; + + if (delta_y > 0) { + btn_vertical = INPUT_BUTTON_WHEEL_DOWN; + has_vertical = true; + } else if (delta_y < 0) { + btn_vertical = INPUT_BUTTON_WHEEL_UP; + has_vertical = true; + } else if (delta_x > 0) { + btn_horizontal = INPUT_BUTTON_WHEEL_RIGHT; + has_horizontal = true; + } else if (delta_x < 0) { + btn_horizontal = INPUT_BUTTON_WHEEL_LEFT; + has_horizontal = true; } else { - btn = INPUT_BUTTON_WHEEL_UP; + return TRUE; } } else { return TRUE; } - qemu_input_queue_btn(vc->gfx.dcl.con, btn, true); - qemu_input_event_sync(); - qemu_input_queue_btn(vc->gfx.dcl.con, btn, false); - qemu_input_event_sync(); + if (has_vertical) { + qemu_input_queue_btn(vc->gfx.dcl.con, btn_vertical, true); + qemu_input_event_sync(); + qemu_input_queue_btn(vc->gfx.dcl.con, btn_vertical, false); + qemu_input_event_sync(); + } + + if (has_horizontal) { + qemu_input_queue_btn(vc->gfx.dcl.con, btn_horizontal, true); + qemu_input_event_sync(); + qemu_input_queue_btn(vc->gfx.dcl.con, btn_horizontal, false); + qemu_input_event_sync(); + } + return TRUE; } @@ -1220,6 +1310,16 @@ static gboolean gd_tab_window_close(GtkWidget *widget, GdkEvent *event, vc->tab_item, vc->label); gtk_widget_destroy(vc->window); vc->window = NULL; +#if defined(CONFIG_OPENGL) + if (vc->gfx.esurface) { + eglDestroySurface(qemu_egl_display, vc->gfx.esurface); + vc->gfx.esurface = NULL; + } + if (vc->gfx.ectx) { + eglDestroyContext(qemu_egl_display, vc->gfx.ectx); + vc->gfx.ectx = NULL; + } +#endif return TRUE; } @@ -1249,6 +1349,16 @@ static void gd_menu_untabify(GtkMenuItem *item, void *opaque) if (!vc->window) { gtk_widget_set_sensitive(vc->menu_item, false); vc->window = gtk_window_new(GTK_WINDOW_TOPLEVEL); +#if defined(CONFIG_OPENGL) + if (vc->gfx.esurface) { + eglDestroySurface(qemu_egl_display, vc->gfx.esurface); + vc->gfx.esurface = NULL; + } + if (vc->gfx.esurface) { + eglDestroyContext(qemu_egl_display, vc->gfx.ectx); + vc->gfx.ectx = NULL; + } +#endif gd_widget_reparent(s->notebook, vc->window, vc->tab_item); g_signal_connect(vc->window, "delete-event", @@ -1590,7 +1700,7 @@ static gboolean gd_configure(GtkWidget *widget, { VirtualConsole *vc = opaque; - gd_set_ui_info(vc, cfg->width, cfg->height); + gd_set_ui_size(vc, cfg->width, cfg->height); return FALSE; } @@ -1651,7 +1761,7 @@ static void gd_vc_send_chars(VirtualConsole *vc) uint32_t size; buf = fifo8_pop_buf(&vc->vte.out_fifo, MIN(len, avail), &size); - qemu_chr_be_write(vc->vte.chr, (uint8_t *)buf, size); + qemu_chr_be_write(vc->vte.chr, buf, size); len = qemu_chr_be_can_write(vc->vte.chr); avail -= size; } @@ -1671,7 +1781,9 @@ static void gd_vc_chr_accept_input(Chardev *chr) VCChardev *vcd = VC_CHARDEV(chr); VirtualConsole *vc = vcd->console; - gd_vc_send_chars(vc); + if (vc) { + gd_vc_send_chars(vc); + } } static void gd_vc_chr_set_echo(Chardev *chr, bool echo) @@ -1987,6 +2099,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, g_signal_connect(vc->gfx.drawing_area, "realize", G_CALLBACK(gl_area_realize), vc); vc->gfx.dcl.ops = &dcl_gl_area_ops; + vc->gfx.dgc.ops = &gl_area_ctx_ops; } else { #ifdef CONFIG_X11 vc->gfx.drawing_area = gtk_drawing_area_new(); @@ -2001,6 +2114,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, gtk_widget_set_double_buffered(vc->gfx.drawing_area, FALSE); #pragma GCC diagnostic pop vc->gfx.dcl.ops = &dcl_egl_ops; + vc->gfx.dgc.ops = &egl_ctx_ops; vc->gfx.has_dmabuf = qemu_egl_has_dmabuf(); #else abort(); @@ -2035,6 +2149,9 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, vc->gfx.kbd = qkbd_state_init(con); vc->gfx.dcl.con = con; + if (display_opengl) { + qemu_console_set_display_gl_ctx(con, &vc->gfx.dgc); + } register_displaychangelistener(&vc->gfx.dcl); gd_connect_vc_gfx_signals(vc); @@ -2054,7 +2171,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, return group; } -static GtkWidget *gd_create_menu_view(GtkDisplayState *s) +static GtkWidget *gd_create_menu_view(GtkDisplayState *s, DisplayOptions *opts) { GSList *group = NULL; GtkWidget *view_menu; @@ -2152,7 +2269,8 @@ static GtkWidget *gd_create_menu_view(GtkDisplayState *s) s->show_menubar_item = gtk_check_menu_item_new_with_mnemonic( _("Show Menubar")); gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->show_menubar_item), - TRUE); + !opts->u.gtk.has_show_menubar || + opts->u.gtk.show_menubar); gtk_accel_group_connect(s->accel_group, GDK_KEY_m, HOTKEY_MODIFIERS, 0, g_cclosure_new_swap(G_CALLBACK(gd_accel_show_menubar), s, NULL)); gtk_accel_label_set_accel( @@ -2163,13 +2281,13 @@ static GtkWidget *gd_create_menu_view(GtkDisplayState *s) return view_menu; } -static void gd_create_menus(GtkDisplayState *s) +static void gd_create_menus(GtkDisplayState *s, DisplayOptions *opts) { GtkSettings *settings; s->accel_group = gtk_accel_group_new(); s->machine_menu = gd_create_menu_machine(s); - s->view_menu = gd_create_menu_view(s); + s->view_menu = gd_create_menu_view(s, opts); s->machine_menu_item = gtk_menu_item_new_with_mnemonic(_("_Machine")); gtk_menu_item_set_submenu(GTK_MENU_ITEM(s->machine_menu_item), @@ -2246,7 +2364,7 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) gtk_window_set_icon_name(GTK_WINDOW(s->window), "qemu"); - gd_create_menus(s); + gd_create_menus(s, opts); gd_connect_signals(s); @@ -2261,6 +2379,10 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) gtk_container_add(GTK_CONTAINER(s->window), s->vbox); gtk_widget_show_all(s->window); + if (opts->u.gtk.has_show_menubar && + !opts->u.gtk.show_menubar) { + gtk_widget_hide(s->menu_bar); + } vc = gd_vc_find_current(s); gtk_widget_set_sensitive(s->view_menu, vc != NULL); @@ -2277,7 +2399,13 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) opts->u.gtk.grab_on_hover) { gtk_menu_item_activate(GTK_MENU_ITEM(s->grab_on_hover_item)); } + if (opts->u.gtk.has_show_tabs && + opts->u.gtk.show_tabs) { + gtk_menu_item_activate(GTK_MENU_ITEM(s->show_tabs_item)); + } +#ifdef CONFIG_GTK_CLIPBOARD gd_clipboard_init(s); +#endif /* CONFIG_GTK_CLIPBOARD */ } static void early_gtk_display_init(DisplayOptions *opts) diff --git a/ui/input-legacy.c b/ui/input-legacy.c index 9fc78a639b..46ea74e44d 100644 --- a/ui/input-legacy.c +++ b/ui/input-legacy.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qapi/qapi-commands-ui.h" #include "ui/console.h" #include "keymaps.h" @@ -179,6 +180,20 @@ static void legacy_mouse_event(DeviceState *dev, QemuConsole *src, 1, s->buttons); } + if (btn->down && btn->button == INPUT_BUTTON_WHEEL_RIGHT) { + s->qemu_put_mouse_event(s->qemu_put_mouse_event_opaque, + s->axis[INPUT_AXIS_X], + s->axis[INPUT_AXIS_Y], + -2, + s->buttons); + } + if (btn->down && btn->button == INPUT_BUTTON_WHEEL_LEFT) { + s->qemu_put_mouse_event(s->qemu_put_mouse_event_opaque, + s->axis[INPUT_AXIS_X], + s->axis[INPUT_AXIS_Y], + 2, + s->buttons); + } break; case INPUT_EVENT_KIND_ABS: move = evt->u.abs.data; diff --git a/ui/input-linux.c b/ui/input-linux.c index 05c0c98819..e572a2e905 100644 --- a/ui/input-linux.c +++ b/ui/input-linux.c @@ -316,7 +316,10 @@ static void input_linux_complete(UserCreatable *uc, Error **errp) error_setg_file_open(errp, errno, il->evdev); return; } - qemu_set_nonblock(il->fd); + if (!g_unix_set_fd_nonblocking(il->fd, true, NULL)) { + error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + return; + } rc = ioctl(il->fd, EVIOCGVERSION, &ver); if (rc < 0) { diff --git a/ui/input.c b/ui/input.c index 8ac407dec4..e2a90af889 100644 --- a/ui/input.c +++ b/ui/input.c @@ -364,7 +364,7 @@ void qemu_input_event_send(QemuConsole *src, InputEvent *evt) * when 'alt+print' was pressed. This flaw is now fixed and the * 'sysrq' key serves no further purpose. We normalize it to * 'print', so that downstream receivers of the event don't - * neeed to deal with this mistake + * need to deal with this mistake */ if (evt->type == INPUT_EVENT_KIND_KEY && evt->u.key.data->key->u.qcode.data == Q_KEY_CODE_SYSRQ) { diff --git a/ui/keymaps.c b/ui/keymaps.c index d4a647464b..6ceaa97085 100644 --- a/ui/keymaps.c +++ b/ui/keymaps.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/datadir.h" #include "keymaps.h" #include "trace.h" diff --git a/ui/meson.build b/ui/meson.build index 9b54489518..d09be2dbc0 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -1,6 +1,8 @@ softmmu_ss.add(pixman) specific_ss.add(when: ['CONFIG_SOFTMMU'], if_true: pixman) # for the include path +specific_ss.add(when: ['CONFIG_SOFTMMU'], if_true: opengl) # for the include path +softmmu_ss.add(png) softmmu_ss.add(files( 'clipboard.c', 'console.c', @@ -12,7 +14,7 @@ softmmu_ss.add(files( 'kbd-state.c', 'keymaps.c', 'qemu-pixman.c', - 'udmabuf.c', + 'util.c', )) subdir('thirdparty') @@ -39,21 +41,26 @@ xemu_ss.add(xemu_cocoa) endif if 'CONFIG_LINUX' in config_host -xemu_ss.add(xemu_gtk) +xemu_ss.add(gtk) endif -xemu_ss.add(when: 'CONFIG_LINUX', if_true: [xemu_gtk, files('xemu-os-utils-linux.c')]) +xemu_ss.add(when: 'CONFIG_LINUX', if_true: [gtk, files('xemu-os-utils-linux.c')]) xemu_ss.add(when: 'CONFIG_WIN32', if_true: files('xemu-os-utils-windows.c')) xemu_ss.add(when: 'CONFIG_DARWIN', if_true: files('xemu-os-utils-macos.m')) -xemu_ss.add(when: 'CONFIG_RENDERDOC', if_true: [libdl]) xemu_ss.add(imgui, implot, stb_image, noc, sdl, opengl, openssl, fa, fpng, json, httplib) softmmu_ss.add_all(xemu_ss) +if dbus_display + softmmu_ss.add(files('dbus-module.c')) +endif softmmu_ss.add([spice_headers, files('spice-module.c')]) softmmu_ss.add(when: spice_protocol, if_true: files('vdagent.c')) -softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('input-linux.c')) +softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files( + 'input-linux.c', + 'udmabuf.c', +)) softmmu_ss.add(when: cocoa, if_true: files('cocoa.m')) vnc_ss = ss.source_set() @@ -69,11 +76,10 @@ vnc_ss.add(files( 'vnc-jobs.c', 'vnc-clipboard.c', )) -vnc_ss.add(zlib, png, jpeg, gnutls) +vnc_ss.add(zlib, jpeg, gnutls) vnc_ss.add(when: sasl, if_true: files('vnc-auth-sasl.c')) softmmu_ss.add_all(when: vnc, if_true: vnc_ss) softmmu_ss.add(when: vnc, if_false: files('vnc-stubs.c')) -specific_ss.add(when: ['CONFIG_SOFTMMU'], if_true: opengl) ui_modules = {} @@ -87,29 +93,56 @@ if curses.found() ui_modules += {'curses' : curses_ss} endif -if config_host.has_key('CONFIG_OPENGL') +softmmu_ss.add(opengl) +if opengl.found() opengl_ss = ss.source_set() opengl_ss.add(gbm) - opengl_ss.add(when: [opengl, pixman, 'CONFIG_OPENGL'], + opengl_ss.add(when: [opengl, pixman], if_true: files('shader.c', 'console-gl.c', 'egl-helpers.c', 'egl-context.c')) ui_modules += {'opengl' : opengl_ss} endif -if config_host.has_key('CONFIG_OPENGL') and gbm.found() +if opengl.found() and gbm.found() egl_headless_ss = ss.source_set() - egl_headless_ss.add(when: [opengl, gbm, pixman, 'CONFIG_OPENGL'], + egl_headless_ss.add(when: [opengl, gbm, pixman], if_true: files('egl-headless.c')) ui_modules += {'egl-headless' : egl_headless_ss} endif +if dbus_display + dbus_ss = ss.source_set() + dbus_display1 = custom_target('dbus-display gdbus-codegen', + output: ['dbus-display1.h', 'dbus-display1.c'], + input: files('dbus-display1.xml'), + command: [gdbus_codegen, '@INPUT@', + '--glib-min-required', '2.64', + '--output-directory', meson.current_build_dir(), + '--interface-prefix', 'org.qemu.', + '--c-namespace', 'QemuDBus', + '--generate-c-code', '@BASENAME@']) + dbus_ss.add(when: [gio, pixman, opengl, gbm], + if_true: [files( + 'dbus-chardev.c', + 'dbus-clipboard.c', + 'dbus-console.c', + 'dbus-error.c', + 'dbus-listener.c', + 'dbus.c', + ), dbus_display1]) + ui_modules += {'dbus' : dbus_ss} +endif + if gtk.found() softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('win32-kbd-hook.c')) gtk_ss = ss.source_set() - gtk_ss.add(gtk, vte, pixman, files('gtk.c', 'gtk-clipboard.c')) + gtk_ss.add(gtk, vte, pixman, files('gtk.c')) + if have_gtk_clipboard + gtk_ss.add(files('gtk-clipboard.c')) + endif gtk_ss.add(when: x11, if_true: files('x_keymap.c')) - gtk_ss.add(when: [opengl, 'CONFIG_OPENGL'], if_true: files('gtk-gl-area.c')) - gtk_ss.add(when: [x11, opengl, 'CONFIG_OPENGL'], if_true: files('gtk-egl.c')) + gtk_ss.add(when: opengl, if_true: files('gtk-gl-area.c')) + gtk_ss.add(when: [x11, opengl], if_true: files('gtk-egl.c')) ui_modules += {'gtk' : gtk_ss} endif @@ -122,12 +155,12 @@ if sdl.found() 'sdl2-input.c', 'sdl2.c', )) - sdl_ss.add(when: [opengl, 'CONFIG_OPENGL'], if_true: files('sdl2-gl.c')) + sdl_ss.add(when: opengl, if_true: files('sdl2-gl.c')) sdl_ss.add(when: x11, if_true: files('x_keymap.c')) ui_modules += {'sdl' : sdl_ss} endif -if config_host.has_key('CONFIG_SPICE') +if spice.found() spice_core_ss = ss.source_set() spice_core_ss.add(spice, pixman, files( 'spice-core.c', @@ -137,7 +170,7 @@ if config_host.has_key('CONFIG_SPICE') ui_modules += {'spice-core' : spice_core_ss} endif -if config_host.has_key('CONFIG_SPICE') and config_host.has_key('CONFIG_GIO') +if spice.found() and gio.found() spice_ss = ss.source_set() spice_ss.add(spice, gio, pixman, files('spice-app.c')) ui_modules += {'spice-app': spice_ss} @@ -145,8 +178,6 @@ endif endif # xemu_enable_extra_ui_modules -keymap_gen = find_program('keycodemapdb/tools/keymap-gen') - keymaps = [ ['atset1', 'qcode'], ['linux', 'qcode'], @@ -174,7 +205,7 @@ if have_system or xkbcommon.found() output: output, capture: true, input: files('keycodemapdb/data/keymaps.csv'), - command: [python.full_path(), files('keycodemapdb/tools/keymap-gen'), + command: [python, files('keycodemapdb/tools/keymap-gen'), 'code-map', '--lang', 'glib2', '--varname', 'qemu_input_map_@0@_to_@1@'.format(e[0], e[1]), diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c index a21d2deed9..bbfa70eac3 100644 --- a/ui/sdl2-gl.c +++ b/ui/sdl2-gl.c @@ -58,7 +58,6 @@ static void sdl2_gl_render_surface(struct sdl2_console *scon) surface_gl_render_texture(scon->gls, scon->surface); SDL_GL_SwapWindow(scon->real_window); - graphic_hw_gl_flushed(scon->dcl.con); } void sdl2_gl_update(DisplayChangeListener *dcl, @@ -68,6 +67,10 @@ void sdl2_gl_update(DisplayChangeListener *dcl, assert(scon->opengl); + if (!scon->real_window) { + return; + } + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); surface_gl_update_texture(scon->gls, scon->surface, x, y, w, h); scon->updates++; @@ -133,10 +136,10 @@ void sdl2_gl_redraw(struct sdl2_console *scon) } } -QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, +QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { - struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + struct sdl2_console *scon = container_of(dgc, struct sdl2_console, dgc); SDL_GLContext ctx; assert(scon->opengl); @@ -168,17 +171,17 @@ QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, return (QEMUGLContext)ctx; } -void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) +void sdl2_gl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx) { SDL_GLContext sdlctx = (SDL_GLContext)ctx; SDL_GL_DeleteContext(sdlctx); } -int sdl2_gl_make_context_current(DisplayChangeListener *dcl, +int sdl2_gl_make_context_current(DisplayGLCtx *dgc, QEMUGLContext ctx) { - struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + struct sdl2_console *scon = container_of(dgc, struct sdl2_console, dgc); SDL_GLContext sdlctx = (SDL_GLContext)ctx; assert(scon->opengl); @@ -241,5 +244,4 @@ void sdl2_gl_scanout_flush(DisplayChangeListener *dcl, egl_fb_blit(&scon->win_fb, &scon->guest_fb, !scon->y0_top); SDL_GL_SwapWindow(scon->real_window); - graphic_hw_gl_flushed(dcl->con); } diff --git a/ui/sdl2.c b/ui/sdl2.c index 17c0ec30eb..d630459b78 100644 --- a/ui/sdl2.c +++ b/ui/sdl2.c @@ -33,12 +33,15 @@ #include "sysemu/runstate-action.h" #include "sysemu/sysemu.h" #include "ui/win32-kbd-hook.h" +#include "qemu/log.h" static int sdl2_num_outputs; static struct sdl2_console *sdl2_console; static SDL_Surface *guest_sprite_surface; static int gui_grab; /* if true, all keyboard/mouse events are grabbed */ +static bool alt_grab; +static bool ctrl_grab; static int gui_saved_grab; static int gui_fullscreen; @@ -535,6 +538,10 @@ static void handle_mousewheel(SDL_Event *ev) btn = INPUT_BUTTON_WHEEL_UP; } else if (wev->y < 0) { btn = INPUT_BUTTON_WHEEL_DOWN; + } else if (wev->x < 0) { + btn = INPUT_BUTTON_WHEEL_RIGHT; + } else if (wev->x > 0) { + btn = INPUT_BUTTON_WHEEL_LEFT; } else { return; } @@ -561,7 +568,7 @@ static void handle_windowevent(SDL_Event *ev) memset(&info, 0, sizeof(info)); info.width = ev->window.data1; info.height = ev->window.data2; - dpy_set_ui_info(scon->dcl.con, &info); + dpy_set_ui_info(scon->dcl.con, &info, true); } sdl2_redraw(scon); break; @@ -778,13 +785,24 @@ static const DisplayChangeListenerOps dcl_gl_ops = { .dpy_mouse_set = sdl_mouse_warp, .dpy_cursor_define = sdl_mouse_define, - .dpy_gl_ctx_create = sdl2_gl_create_context, - .dpy_gl_ctx_destroy = sdl2_gl_destroy_context, - .dpy_gl_ctx_make_current = sdl2_gl_make_context_current, .dpy_gl_scanout_disable = sdl2_gl_scanout_disable, .dpy_gl_scanout_texture = sdl2_gl_scanout_texture, .dpy_gl_update = sdl2_gl_scanout_flush, }; + +static bool +sdl2_gl_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + return dcl->ops == &dcl_gl_ops; +} + +static const DisplayGLCtxOps gl_ctx_ops = { + .dpy_gl_ctx_is_compatible_dcl = sdl2_gl_is_compatible_dcl, + .dpy_gl_ctx_create = sdl2_gl_create_context, + .dpy_gl_ctx_destroy = sdl2_gl_destroy_context, + .dpy_gl_ctx_make_current = sdl2_gl_make_context_current, +}; #endif static void sdl2_display_early_init(DisplayOptions *o) @@ -831,12 +849,27 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR /* only available since SDL 2.0.8 */ SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0"); #endif +#ifndef CONFIG_WIN32 + /* QEMU uses its own low level keyboard hook procecure on Windows */ SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); +#endif +#ifdef SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED + SDL_SetHint(SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED, "0"); +#endif + SDL_SetHint(SDL_HINT_WINDOWS_NO_CLOSE_ON_ALT_F4, "1"); memset(&info, 0, sizeof(info)); SDL_VERSION(&info.version); gui_fullscreen = o->has_full_screen && o->full_screen; + if (o->u.sdl.has_grab_mod) { + if (o->u.sdl.grab_mod == HOT_KEY_MOD_LSHIFT_LCTRL_LALT) { + alt_grab = true; + } else if (o->u.sdl.grab_mod == HOT_KEY_MOD_RCTRL) { + ctrl_grab = true; + } + } + for (i = 0;; i++) { QemuConsole *con = qemu_console_lookup_by_index(i); if (!con) { @@ -860,12 +893,16 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) #ifdef CONFIG_OPENGL sdl2_console[i].opengl = display_opengl; sdl2_console[i].dcl.ops = display_opengl ? &dcl_gl_ops : &dcl_2d_ops; + sdl2_console[i].dgc.ops = display_opengl ? &gl_ctx_ops : NULL; #else sdl2_console[i].opengl = 0; sdl2_console[i].dcl.ops = &dcl_2d_ops; #endif sdl2_console[i].dcl.con = con; sdl2_console[i].kbd = qkbd_state_init(con); + if (display_opengl) { + qemu_console_set_display_gl_ctx(con, &sdl2_console[i].dgc); + } register_displaychangelistener(&sdl2_console[i].dcl); #if defined(SDL_VIDEO_DRIVER_WINDOWS) || defined(SDL_VIDEO_DRIVER_X11) diff --git a/ui/shader.c b/ui/shader.c index e8b8d321b7..ab448c41d4 100644 --- a/ui/shader.c +++ b/ui/shader.c @@ -130,15 +130,17 @@ static GLuint qemu_gl_create_link_program(GLuint vert, GLuint frag) static GLuint qemu_gl_create_compile_link_program(const GLchar *vert_src, const GLchar *frag_src) { - GLuint vert_shader, frag_shader, program; + GLuint vert_shader, frag_shader, program = 0; vert_shader = qemu_gl_create_compile_shader(GL_VERTEX_SHADER, vert_src); frag_shader = qemu_gl_create_compile_shader(GL_FRAGMENT_SHADER, frag_src); if (!vert_shader || !frag_shader) { - return 0; + goto end; } program = qemu_gl_create_link_program(vert_shader, frag_shader); + +end: glDeleteShader(vert_shader); glDeleteShader(frag_shader); @@ -170,5 +172,8 @@ void qemu_gl_fini_shader(QemuGLShader *gls) if (!gls) { return; } + glDeleteProgram(gls->texture_blit_prog); + glDeleteProgram(gls->texture_blit_flip_prog); + glDeleteProgram(gls->texture_blit_vao); g_free(gls); } diff --git a/ui/spice-core.c b/ui/spice-core.c index 0371055e6c..c3ac20ad43 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -671,18 +671,13 @@ static void qemu_spice_init(void) } passwordSecret = qemu_opt_get(opts, "password-secret"); if (passwordSecret) { - Error *local_err = NULL; if (qemu_opt_get(opts, "password")) { error_report("'password' option is mutually exclusive with " "'password-secret'"); exit(1); } password = qcrypto_secret_lookup_as_utf8(passwordSecret, - &local_err); - if (!password) { - error_report_err(local_err); - exit(1); - } + &error_fatal); } else { str = qemu_opt_get(opts, "password"); if (str) { @@ -889,56 +884,6 @@ bool qemu_spice_have_display_interface(QemuConsole *con) return false; } -/* - * Recursively (in reverse order) appends addresses of PCI devices as it moves - * up in the PCI hierarchy. - * - * @returns true on success, false when the buffer wasn't large enough - */ -static bool append_pci_address(char *buf, size_t buf_size, const PCIDevice *pci) -{ - PCIBus *bus = pci_get_bus(pci); - /* - * equivalent to if (!pci_bus_is_root(bus)), but the function is not built - * with PCI_CONFIG=n, avoid using an #ifdef by checking directly - */ - if (bus->parent_dev != NULL) { - append_pci_address(buf, buf_size, bus->parent_dev); - } - - size_t len = strlen(buf); - ssize_t written = snprintf(buf + len, buf_size - len, "/%02x.%x", - PCI_SLOT(pci->devfn), PCI_FUNC(pci->devfn)); - - return written > 0 && written < buf_size - len; -} - -bool qemu_spice_fill_device_address(QemuConsole *con, - char *device_address, - size_t size) -{ - DeviceState *dev = DEVICE(object_property_get_link(OBJECT(con), - "device", - &error_abort)); - PCIDevice *pci = (PCIDevice *) object_dynamic_cast(OBJECT(dev), - TYPE_PCI_DEVICE); - - if (pci == NULL) { - warn_report("Setting device address of a display device to SPICE: " - "Not a PCI device."); - return false; - } - - strncpy(device_address, "pci/0000", size); - if (!append_pci_address(device_address, size, pci)) { - warn_report("Setting device address of a display device to SPICE: " - "Too many PCI devices in the chain."); - return false; - } - - return true; -} - int qemu_spice_add_display_interface(QXLInstance *qxlin, QemuConsole *con) { if (g_slist_find(spice_consoles, con)) { diff --git a/ui/spice-display.c b/ui/spice-display.c index f59c69882d..494168e7fe 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -500,10 +500,17 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd) /* spice display interface callbacks */ +#if SPICE_HAS_ATTACHED_WORKER +static void interface_attached_worker(QXLInstance *sin) +{ + /* nothing to do */ +} +#else static void interface_attach_worker(QXLInstance *sin, QXLWorker *qxl_worker) { /* nothing to do */ } +#endif static void interface_set_compression_level(QXLInstance *sin, int level) { @@ -692,7 +699,7 @@ static int interface_client_monitors_config(QXLInstance *sin, } trace_qemu_spice_ui_info(ssd->qxl.id, info.width, info.height); - dpy_set_ui_info(ssd->dcl.con, &info); + dpy_set_ui_info(ssd->dcl.con, &info, false); return 1; } @@ -702,7 +709,11 @@ static const QXLInterface dpy_interface = { .base.major_version = SPICE_INTERFACE_QXL_MAJOR, .base.minor_version = SPICE_INTERFACE_QXL_MINOR, +#if SPICE_HAS_ATTACHED_WORKER + .attached_worker = interface_attached_worker, +#else .attache_worker = interface_attach_worker, +#endif .set_compression_level = interface_set_compression_level, #if SPICE_NEEDS_SET_MM_TIME .set_mm_time = interface_set_mm_time, @@ -830,7 +841,6 @@ static void qemu_spice_gl_unblock_bh(void *opaque) SimpleSpiceDisplay *ssd = opaque; qemu_spice_gl_block(ssd, false); - graphic_hw_gl_flushed(ssd->dcl.con); } static void qemu_spice_gl_block_timer(void *opaque) @@ -909,12 +919,12 @@ static void spice_gl_switch(DisplayChangeListener *dcl, } } -static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener *dcl, +static QEMUGLContext qemu_spice_gl_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, qemu_egl_rn_ctx); - return qemu_egl_create_context(dcl, params); + return qemu_egl_create_context(dgc, params); } static void qemu_spice_gl_scanout_disable(DisplayChangeListener *dcl) @@ -1106,10 +1116,6 @@ static const DisplayChangeListenerOps display_listener_gl_ops = { .dpy_mouse_set = display_mouse_set, .dpy_cursor_define = display_mouse_define, - .dpy_gl_ctx_create = qemu_spice_gl_create_context, - .dpy_gl_ctx_destroy = qemu_egl_destroy_context, - .dpy_gl_ctx_make_current = qemu_egl_make_context_current, - .dpy_gl_scanout_disable = qemu_spice_gl_scanout_disable, .dpy_gl_scanout_texture = qemu_spice_gl_scanout_texture, .dpy_gl_scanout_dmabuf = qemu_spice_gl_scanout_dmabuf, @@ -1119,6 +1125,20 @@ static const DisplayChangeListenerOps display_listener_gl_ops = { .dpy_gl_update = qemu_spice_gl_update, }; +static bool +qemu_spice_is_compatible_dcl(DisplayGLCtx *dgc, + DisplayChangeListener *dcl) +{ + return dcl->ops == &display_listener_gl_ops; +} + +static const DisplayGLCtxOps gl_ctx_ops = { + .dpy_gl_ctx_is_compatible_dcl = qemu_spice_is_compatible_dcl, + .dpy_gl_ctx_create = qemu_spice_gl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = qemu_egl_make_context_current, +}; + #endif /* HAVE_SPICE_GL */ static void qemu_spice_display_init_one(QemuConsole *con) @@ -1131,6 +1151,7 @@ static void qemu_spice_display_init_one(QemuConsole *con) #ifdef HAVE_SPICE_GL if (spice_opengl) { ssd->dcl.ops = &display_listener_gl_ops; + ssd->dgc.ops = &gl_ctx_ops; ssd->gl_unblock_bh = qemu_bh_new(qemu_spice_gl_unblock_bh, ssd); ssd->gl_unblock_timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_spice_gl_block_timer, ssd); @@ -1145,17 +1166,23 @@ static void qemu_spice_display_init_one(QemuConsole *con) qemu_spice_add_display_interface(&ssd->qxl, con); #if SPICE_SERVER_VERSION >= 0x000e02 /* release 0.14.2 */ + Error *err = NULL; char device_address[256] = ""; - if (qemu_spice_fill_device_address(con, device_address, 256)) { + if (qemu_console_fill_device_address(con, device_address, 256, &err)) { spice_qxl_set_device_info(&ssd->qxl, device_address, qemu_console_get_head(con), 1); + } else { + error_report_err(err); } #endif qemu_spice_create_host_memslot(ssd); + if (spice_opengl) { + qemu_console_set_display_gl_ctx(con, &ssd->dgc); + } register_displaychangelistener(&ssd->dcl); } diff --git a/ui/thirdparty/meson.build b/ui/thirdparty/meson.build index 528b9f40be..213e26edb3 100644 --- a/ui/thirdparty/meson.build +++ b/ui/thirdparty/meson.build @@ -34,7 +34,7 @@ implot = declare_dependency(link_with: libimplot, include_directories: 'implot') noc_ss = ss.source_set() -noc_ss.add(when: 'CONFIG_LINUX', if_true: [xemu_gtk, files('noc_file_dialog/noc_file_dialog_gtk.c')]) +noc_ss.add(when: 'CONFIG_LINUX', if_true: [gtk, files('noc_file_dialog/noc_file_dialog_gtk.c')]) noc_ss.add(when: 'CONFIG_WIN32', if_true: files('noc_file_dialog/noc_file_dialog_win32.cc')) noc_ss.add(when: 'CONFIG_DARWIN', if_true: files('noc_file_dialog/noc_file_dialog_macos.m')) noc_ss = noc_ss.apply(config_all, strict: false) diff --git a/ui/trace-events b/ui/trace-events index 1b5f87bc09..977577fbba 100644 --- a/ui/trace-events +++ b/ui/trace-events @@ -26,6 +26,8 @@ gd_key_event(const char *tab, int gdk_keycode, int qkeycode, const char *action) gd_grab(const char *tab, const char *device, const char *reason) "tab=%s, dev=%s, reason=%s" gd_ungrab(const char *tab, const char *device) "tab=%s, dev=%s" gd_keymap_windowing(const char *name) "backend=%s" +gd_gl_area_create_context(void *ctx, int major, int minor) "ctx=%p, major=%d, minor=%d" +gd_gl_area_destroy_context(void *ctx, void *current_ctx) "ctx=%p, current_ctx=%p" # vnc-auth-sasl.c # vnc-auth-vencrypt.c @@ -125,12 +127,33 @@ xkeymap_vendor(const char *name) "vendor '%s'" xkeymap_keycodes(const char *name) "keycodes '%s'" xkeymap_keymap(const char *name) "keymap '%s'" +# clipboard.c +clipboard_check_serial(int cur, int recv, bool ok) "cur:%d recv:%d %d" + # vdagent.c vdagent_open(void) "" vdagent_close(void) "" +vdagent_disconnect(void) "" vdagent_send(const char *name) "msg %s" +vdagent_send_empty_clipboard(void) "" vdagent_recv_chunk(uint32_t size) "size %d" vdagent_recv_msg(const char *name, uint32_t size) "msg %s, size %d" vdagent_peer_cap(const char *name) "cap %s" vdagent_cb_grab_selection(const char *name) "selection %s" +vdagent_cb_grab_discard(const char *name, int cur, int recv) "selection %s, cur:%d recv:%d" vdagent_cb_grab_type(const char *name) "type %s" +vdagent_cb_serial_discard(uint32_t current, uint32_t received) "current=%u, received=%u" + +# dbus.c +dbus_registered_listener(const char *bus_name) "peer %s" +dbus_listener_vanished(const char *bus_name) "peer %s" +dbus_kbd_press(unsigned int keycode) "keycode %u" +dbus_kbd_release(unsigned int keycode) "keycode %u" +dbus_mouse_press(unsigned int button) "button %u" +dbus_mouse_release(unsigned int button) "button %u" +dbus_mouse_set_pos(unsigned int x, unsigned int y) "x=%u, y=%u" +dbus_mouse_rel_motion(int dx, int dy) "dx=%d, dy=%d" +dbus_update(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d" +dbus_clipboard_grab_failed(void) "" +dbus_clipboard_register(const char *bus_name) "peer %s" +dbus_clipboard_unregister(const char *bus_name) "peer %s" diff --git a/ui/udmabuf.c b/ui/udmabuf.c index 23abe1e7eb..cebceb2610 100644 --- a/ui/udmabuf.c +++ b/ui/udmabuf.c @@ -8,8 +8,6 @@ #include "qapi/error.h" #include "ui/console.h" -#ifdef CONFIG_LINUX - #include #include @@ -29,12 +27,3 @@ int udmabuf_fd(void) } return udmabuf; } - -#else - -int udmabuf_fd(void) -{ - return -1; -} - -#endif diff --git a/ui/util.c b/ui/util.c new file mode 100644 index 0000000000..7e8fc1ea53 --- /dev/null +++ b/ui/util.c @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2021 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" + +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "qapi/error.h" +#include "ui/console.h" + +/* + * Recursively (in reverse order) appends addresses of PCI devices as it moves + * up in the PCI hierarchy. + * + * @returns true on success, false when the buffer wasn't large enough + */ +static bool append_pci_address(char *buf, size_t buf_size, const PCIDevice *pci) +{ + PCIBus *bus = pci_get_bus(pci); + /* + * equivalent to if (!pci_bus_is_root(bus)), but the function is not built + * with PCI_CONFIG=n, avoid using an #ifdef by checking directly + */ + if (bus->parent_dev != NULL) { + append_pci_address(buf, buf_size, bus->parent_dev); + } + + size_t len = strlen(buf); + ssize_t written = snprintf(buf + len, buf_size - len, "/%02x.%x", + PCI_SLOT(pci->devfn), PCI_FUNC(pci->devfn)); + + return written > 0 && written < buf_size - len; +} + +bool qemu_console_fill_device_address(QemuConsole *con, + char *device_address, + size_t size, + Error **errp) +{ + ERRP_GUARD(); + DeviceState *dev = DEVICE(object_property_get_link(OBJECT(con), + "device", + &error_abort)); + PCIDevice *pci = (PCIDevice *) object_dynamic_cast(OBJECT(dev), + TYPE_PCI_DEVICE); + + if (pci == NULL) { + error_setg(errp, "Setting device address of a display device: " + "Not a PCI device."); + return false; + } + + strncpy(device_address, "pci/0000", size); + if (!append_pci_address(device_address, size, pci)) { + error_setg(errp, "Setting device address of a display device: " + "Too many PCI devices in the chain."); + return false; + } + + return true; +} diff --git a/ui/vdagent.c b/ui/vdagent.c index a253a8fe63..4bf50f0c4d 100644 --- a/ui/vdagent.c +++ b/ui/vdagent.c @@ -1,11 +1,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "include/qemu-common.h" #include "chardev/char.h" #include "qemu/buffer.h" #include "qemu/option.h" #include "qemu/units.h" #include "hw/qdev-core.h" +#include "migration/blocker.h" #include "ui/clipboard.h" #include "ui/console.h" #include "ui/input.h" @@ -16,6 +16,14 @@ #include "spice/vd_agent.h" +#define CHECK_SPICE_PROTOCOL_VERSION(major, minor, micro) \ + (CONFIG_SPICE_PROTOCOL_MAJOR > (major) || \ + (CONFIG_SPICE_PROTOCOL_MAJOR == (major) && \ + CONFIG_SPICE_PROTOCOL_MINOR > (minor)) || \ + (CONFIG_SPICE_PROTOCOL_MAJOR == (major) && \ + CONFIG_SPICE_PROTOCOL_MINOR == (minor) && \ + CONFIG_SPICE_PROTOCOL_MICRO >= (micro))) + #define VDAGENT_BUFFER_LIMIT (1 * MiB) #define VDAGENT_MOUSE_DEFAULT true #define VDAGENT_CLIPBOARD_DEFAULT false @@ -23,6 +31,9 @@ struct VDAgentChardev { Chardev parent; + /* TODO: migration isn't yet supported */ + Error *migration_blocker; + /* config */ bool mouse; bool clipboard; @@ -47,7 +58,7 @@ struct VDAgentChardev { /* clipboard */ QemuClipboardPeer cbpeer; - QemuClipboardInfo *cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT]; + uint32_t last_serial[QEMU_CLIPBOARD_SELECTION__COUNT]; uint32_t cbpending[QEMU_CLIPBOARD_SELECTION__COUNT]; }; typedef struct VDAgentChardev VDAgentChardev; @@ -76,8 +87,10 @@ static const char *cap_name[] = { [VD_AGENT_CAP_MONITORS_CONFIG_POSITION] = "monitors-config-position", [VD_AGENT_CAP_FILE_XFER_DISABLED] = "file-xfer-disabled", [VD_AGENT_CAP_FILE_XFER_DETAILED_ERRORS] = "file-xfer-detailed-errors", -#if 0 +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 0) [VD_AGENT_CAP_GRAPHICS_DEVICE_INFO] = "graphics-device-info", +#endif +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 1) [VD_AGENT_CAP_CLIPBOARD_NO_RELEASE_ON_REGRAB] = "clipboard-no-release-on-regrab", [VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL] = "clipboard-grab-serial", #endif @@ -99,7 +112,7 @@ static const char *msg_name[] = { [VD_AGENT_CLIENT_DISCONNECTED] = "client-disconnected", [VD_AGENT_MAX_CLIPBOARD] = "max-clipboard", [VD_AGENT_AUDIO_VOLUME_SYNC] = "audio-volume-sync", -#if 0 +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 0) [VD_AGENT_GRAPHICS_DEVICE_INFO] = "graphics-device-info", #endif }; @@ -117,7 +130,7 @@ static const char *type_name[] = { [VD_AGENT_CLIPBOARD_IMAGE_BMP] = "bmp", [VD_AGENT_CLIPBOARD_IMAGE_TIFF] = "tiff", [VD_AGENT_CLIPBOARD_IMAGE_JPG] = "jpg", -#if 0 +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 3) [VD_AGENT_CLIPBOARD_FILE_LIST] = "files", #endif }; @@ -190,6 +203,9 @@ static void vdagent_send_caps(VDAgentChardev *vd) if (vd->clipboard) { caps->caps[0] |= (1 << VD_AGENT_CAP_CLIPBOARD_BY_DEMAND); caps->caps[0] |= (1 << VD_AGENT_CAP_CLIPBOARD_SELECTION); +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 1) + caps->caps[0] |= (1 << VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL); +#endif } vdagent_send_msg(vd, msg); @@ -320,7 +336,8 @@ static void vdagent_send_clipboard_grab(VDAgentChardev *vd, { g_autofree VDAgentMessage *msg = g_malloc0(sizeof(VDAgentMessage) + - sizeof(uint32_t) * (QEMU_CLIPBOARD_TYPE__COUNT + 1)); + sizeof(uint32_t) * (QEMU_CLIPBOARD_TYPE__COUNT + 1) + + sizeof(uint32_t)); uint8_t *s = msg->data; uint32_t *data = (uint32_t *)msg->data; uint32_t q, type; @@ -333,6 +350,19 @@ static void vdagent_send_clipboard_grab(VDAgentChardev *vd, return; } +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 1) + if (vd->caps & (1 << VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL)) { + if (!info->has_serial) { + /* client should win */ + info->serial = vd->last_serial[info->selection]++; + info->has_serial = true; + } + *data = info->serial; + data++; + msg->size += sizeof(uint32_t); + } +#endif + for (q = 0; q < QEMU_CLIPBOARD_TYPE__COUNT; q++) { type = type_qemu_to_vdagent(q); if (type != VD_AGENT_CLIPBOARD_NONE && info->types[q].available) { @@ -346,6 +376,24 @@ static void vdagent_send_clipboard_grab(VDAgentChardev *vd, vdagent_send_msg(vd, msg); } +static void vdagent_send_clipboard_release(VDAgentChardev *vd, + QemuClipboardInfo *info) +{ + g_autofree VDAgentMessage *msg = g_malloc0(sizeof(VDAgentMessage) + + sizeof(uint32_t)); + + if (have_selection(vd)) { + uint8_t *s = msg->data; + *s = info->selection; + msg->size += sizeof(uint32_t); + } else if (info->selection != QEMU_CLIPBOARD_SELECTION_CLIPBOARD) { + return; + } + + msg->type = VD_AGENT_CLIPBOARD_RELEASE; + vdagent_send_msg(vd, msg); +} + static void vdagent_send_clipboard_data(VDAgentChardev *vd, QemuClipboardInfo *info, QemuClipboardType type) @@ -376,20 +424,31 @@ static void vdagent_send_clipboard_data(VDAgentChardev *vd, vdagent_send_msg(vd, msg); } -static void vdagent_clipboard_notify(Notifier *notifier, void *data) +static void vdagent_send_empty_clipboard_data(VDAgentChardev *vd, + QemuClipboardSelection selection, + QemuClipboardType type) +{ + g_autoptr(QemuClipboardInfo) info = qemu_clipboard_info_new(&vd->cbpeer, selection); + + trace_vdagent_send_empty_clipboard(); + vdagent_send_clipboard_data(vd, info, type); +} + +static void vdagent_clipboard_update_info(VDAgentChardev *vd, + QemuClipboardInfo *info) { - VDAgentChardev *vd = container_of(notifier, VDAgentChardev, cbpeer.update); - QemuClipboardInfo *info = data; QemuClipboardSelection s = info->selection; QemuClipboardType type; bool self_update = info->owner == &vd->cbpeer; - if (info != vd->cbinfo[s]) { - qemu_clipboard_info_unref(vd->cbinfo[s]); - vd->cbinfo[s] = qemu_clipboard_info_ref(info); + if (info != qemu_clipboard_info(s)) { vd->cbpending[s] = 0; if (!self_update) { - vdagent_send_clipboard_grab(vd, info); + if (info->owner) { + vdagent_send_clipboard_grab(vd, info); + } else { + vdagent_send_clipboard_release(vd, info); + } } return; } @@ -406,6 +465,31 @@ static void vdagent_clipboard_notify(Notifier *notifier, void *data) } } +static void vdagent_clipboard_reset_serial(VDAgentChardev *vd) +{ + Chardev *chr = CHARDEV(vd); + + /* reopen the agent connection to reset the serial state */ + qemu_chr_be_event(chr, CHR_EVENT_CLOSED); + /* OPENED again after the guest disconnected, see set_fe_open */ +} + +static void vdagent_clipboard_notify(Notifier *notifier, void *data) +{ + VDAgentChardev *vd = + container_of(notifier, VDAgentChardev, cbpeer.notifier); + QemuClipboardNotify *notify = data; + + switch (notify->type) { + case QEMU_CLIPBOARD_UPDATE_INFO: + vdagent_clipboard_update_info(vd, notify->info); + return; + case QEMU_CLIPBOARD_RESET_SERIAL: + vdagent_clipboard_reset_serial(vd); + return; + } +} + static void vdagent_clipboard_request(QemuClipboardInfo *info, QemuClipboardType qtype) { @@ -433,13 +517,116 @@ static void vdagent_clipboard_request(QemuClipboardInfo *info, vdagent_send_msg(vd, msg); } +static void vdagent_clipboard_recv_grab(VDAgentChardev *vd, uint8_t s, uint32_t size, void *data) +{ + g_autoptr(QemuClipboardInfo) info = NULL; + + trace_vdagent_cb_grab_selection(GET_NAME(sel_name, s)); + info = qemu_clipboard_info_new(&vd->cbpeer, s); +#if CHECK_SPICE_PROTOCOL_VERSION(0, 14, 1) + if (vd->caps & (1 << VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL)) { + if (size < sizeof(uint32_t)) { + /* this shouldn't happen! */ + return; + } + + info->has_serial = true; + info->serial = *(uint32_t *)data; + if (info->serial < vd->last_serial[s]) { + trace_vdagent_cb_grab_discard(GET_NAME(sel_name, s), + vd->last_serial[s], info->serial); + /* discard lower-ordering guest grab */ + return; + } + vd->last_serial[s] = info->serial; + data += sizeof(uint32_t); + size -= sizeof(uint32_t); + } +#endif + if (size > sizeof(uint32_t) * 10) { + /* + * spice has 6 types as of 2021. Limiting to 10 entries + * so we have some wiggle room. + */ + return; + } + while (size >= sizeof(uint32_t)) { + trace_vdagent_cb_grab_type(GET_NAME(type_name, *(uint32_t *)data)); + switch (*(uint32_t *)data) { + case VD_AGENT_CLIPBOARD_UTF8_TEXT: + info->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; + break; + default: + break; + } + data += sizeof(uint32_t); + size -= sizeof(uint32_t); + } + qemu_clipboard_update(info); +} + +static void vdagent_clipboard_recv_request(VDAgentChardev *vd, uint8_t s, uint32_t size, void *data) +{ + QemuClipboardType type; + QemuClipboardInfo *info; + + if (size < sizeof(uint32_t)) { + return; + } + switch (*(uint32_t *)data) { + case VD_AGENT_CLIPBOARD_UTF8_TEXT: + type = QEMU_CLIPBOARD_TYPE_TEXT; + break; + default: + return; + } + + info = qemu_clipboard_info(s); + if (info && info->types[type].available && info->owner != &vd->cbpeer) { + if (info->types[type].data) { + vdagent_send_clipboard_data(vd, info, type); + } else { + vd->cbpending[s] |= (1 << type); + qemu_clipboard_request(info, type); + } + } else { + vdagent_send_empty_clipboard_data(vd, s, type); + } +} + +static void vdagent_clipboard_recv_data(VDAgentChardev *vd, uint8_t s, uint32_t size, void *data) +{ + QemuClipboardType type; + + if (size < sizeof(uint32_t)) { + return; + } + switch (*(uint32_t *)data) { + case VD_AGENT_CLIPBOARD_UTF8_TEXT: + type = QEMU_CLIPBOARD_TYPE_TEXT; + break; + default: + return; + } + data += 4; + size -= 4; + + if (qemu_clipboard_peer_owns(&vd->cbpeer, s)) { + qemu_clipboard_set_data(&vd->cbpeer, qemu_clipboard_info(s), + type, size, data, true); + } +} + +static void vdagent_clipboard_recv_release(VDAgentChardev *vd, uint8_t s) +{ + qemu_clipboard_peer_release(&vd->cbpeer, s); +} + static void vdagent_chr_recv_clipboard(VDAgentChardev *vd, VDAgentMessage *msg) { uint8_t s = VD_AGENT_CLIPBOARD_SELECTION_CLIPBOARD; uint32_t size = msg->size; void *data = msg->data; - QemuClipboardInfo *info; - QemuClipboardType type; if (have_selection(vd)) { if (size < 4) { @@ -455,77 +642,15 @@ static void vdagent_chr_recv_clipboard(VDAgentChardev *vd, VDAgentMessage *msg) switch (msg->type) { case VD_AGENT_CLIPBOARD_GRAB: - trace_vdagent_cb_grab_selection(GET_NAME(sel_name, s)); - info = qemu_clipboard_info_new(&vd->cbpeer, s); - if (size > sizeof(uint32_t) * 10) { - /* - * spice has 6 types as of 2021. Limiting to 10 entries - * so we we have some wiggle room. - */ - return; - } - while (size >= sizeof(uint32_t)) { - trace_vdagent_cb_grab_type(GET_NAME(type_name, *(uint32_t *)data)); - switch (*(uint32_t *)data) { - case VD_AGENT_CLIPBOARD_UTF8_TEXT: - info->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; - break; - default: - break; - } - data += sizeof(uint32_t); - size -= sizeof(uint32_t); - } - qemu_clipboard_update(info); - qemu_clipboard_info_unref(info); - break; + return vdagent_clipboard_recv_grab(vd, s, size, data); case VD_AGENT_CLIPBOARD_REQUEST: - if (size < sizeof(uint32_t)) { - return; - } - switch (*(uint32_t *)data) { - case VD_AGENT_CLIPBOARD_UTF8_TEXT: - type = QEMU_CLIPBOARD_TYPE_TEXT; - break; - default: - return; - } - if (vd->cbinfo[s] && - vd->cbinfo[s]->types[type].available && - vd->cbinfo[s]->owner != &vd->cbpeer) { - if (vd->cbinfo[s]->types[type].data) { - vdagent_send_clipboard_data(vd, vd->cbinfo[s], type); - } else { - vd->cbpending[s] |= (1 << type); - qemu_clipboard_request(vd->cbinfo[s], type); - } - } - break; + return vdagent_clipboard_recv_request(vd, s, size, data); case VD_AGENT_CLIPBOARD: /* data */ - if (size < sizeof(uint32_t)) { - return; - } - switch (*(uint32_t *)data) { - case VD_AGENT_CLIPBOARD_UTF8_TEXT: - type = QEMU_CLIPBOARD_TYPE_TEXT; - break; - default: - return; - } - data += 4; - size -= 4; - qemu_clipboard_set_data(&vd->cbpeer, vd->cbinfo[s], type, - size, data, true); - break; - case VD_AGENT_CLIPBOARD_RELEASE: /* data */ - if (vd->cbinfo[s] && - vd->cbinfo[s]->owner == &vd->cbpeer) { - /* set empty clipboard info */ - info = qemu_clipboard_info_new(NULL, s); - qemu_clipboard_update(info); - qemu_clipboard_info_unref(info); - } - break; + return vdagent_clipboard_recv_data(vd, s, size, data); + case VD_AGENT_CLIPBOARD_RELEASE: + return vdagent_clipboard_recv_release(vd, s); + default: + g_assert_not_reached(); } } @@ -540,7 +665,7 @@ static void vdagent_chr_open(Chardev *chr, VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(chr); ChardevQemuVDAgent *cfg = backend->u.qemu_vdagent.data; -#if defined(HOST_WORDS_BIGENDIAN) +#if HOST_BIG_ENDIAN /* * TODO: vdagent protocol is defined to be LE, * so we have to byteswap everything on BE hosts. @@ -549,6 +674,10 @@ static void vdagent_chr_open(Chardev *chr, return; #endif + if (migrate_add_blocker(vd->migration_blocker, errp) != 0) { + return; + } + vd->mouse = VDAGENT_MOUSE_DEFAULT; if (cfg->has_mouse) { vd->mouse = cfg->mouse; @@ -590,9 +719,12 @@ static void vdagent_chr_recv_caps(VDAgentChardev *vd, VDAgentMessage *msg) if (have_mouse(vd) && vd->mouse_hs) { qemu_input_handler_activate(vd->mouse_hs); } - if (have_clipboard(vd) && vd->cbpeer.update.notify == NULL) { + + memset(vd->last_serial, 0, sizeof(vd->last_serial)); + + if (have_clipboard(vd) && vd->cbpeer.notifier.notify == NULL) { vd->cbpeer.name = "vdagent"; - vd->cbpeer.update.notify = vdagent_clipboard_notify; + vd->cbpeer.notifier.notify = vdagent_clipboard_notify; vd->cbpeer.request = vdagent_clipboard_request; qemu_clipboard_peer_register(&vd->cbpeer); } @@ -723,22 +855,29 @@ static void vdagent_chr_accept_input(Chardev *chr) vdagent_send_buf(vd); } +static void vdagent_disconnect(VDAgentChardev *vd) +{ + trace_vdagent_disconnect(); + + buffer_reset(&vd->outbuf); + vdagent_reset_bufs(vd); + vd->caps = 0; + if (vd->mouse_hs) { + qemu_input_handler_deactivate(vd->mouse_hs); + } + if (vd->cbpeer.notifier.notify) { + qemu_clipboard_peer_unregister(&vd->cbpeer); + memset(&vd->cbpeer, 0, sizeof(vd->cbpeer)); + } +} + static void vdagent_chr_set_fe_open(struct Chardev *chr, int fe_open) { - VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(chr); - if (!fe_open) { trace_vdagent_close(); - /* reset state */ - vdagent_reset_bufs(vd); - vd->caps = 0; - if (vd->mouse_hs) { - qemu_input_handler_deactivate(vd->mouse_hs); - } - if (vd->cbpeer.update.notify) { - qemu_clipboard_peer_unregister(&vd->cbpeer); - memset(&vd->cbpeer, 0, sizeof(vd->cbpeer)); - } + /* To reset_serial, we CLOSED our side. Make sure the other end knows we + * are ready again. */ + qemu_chr_be_event(chr, CHR_EVENT_OPENED); return; } @@ -777,13 +916,18 @@ static void vdagent_chr_init(Object *obj) VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj); buffer_init(&vd->outbuf, "vdagent-outbuf"); + error_setg(&vd->migration_blocker, + "The vdagent chardev doesn't yet support migration"); } static void vdagent_chr_fini(Object *obj) { VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj); + migrate_del_blocker(vd->migration_blocker); + vdagent_disconnect(vd); buffer_free(&vd->outbuf); + error_free(vd->migration_blocker); } static const TypeInfo vdagent_chr_type_info = { diff --git a/ui/vgafont.h b/ui/vgafont.h index 3606dd7338..7e1fc473f7 100644 --- a/ui/vgafont.h +++ b/ui/vgafont.h @@ -1,4611 +1,4611 @@ static const uint8_t vgafont16[256 * 16] = { - /* 0 0x00 '^@' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 1 0x01 '^A' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x81, /* 10000001 */ - 0xa5, /* 10100101 */ - 0x81, /* 10000001 */ - 0x81, /* 10000001 */ - 0xbd, /* 10111101 */ - 0x99, /* 10011001 */ - 0x81, /* 10000001 */ - 0x81, /* 10000001 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 2 0x02 '^B' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0xff, /* 11111111 */ - 0xdb, /* 11011011 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xc3, /* 11000011 */ - 0xe7, /* 11100111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 3 0x03 '^C' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x6c, /* 01101100 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0x7c, /* 01111100 */ - 0x38, /* 00111000 */ - 0x10, /* 00010000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 4 0x04 '^D' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x7c, /* 01111100 */ - 0xfe, /* 11111110 */ - 0x7c, /* 01111100 */ - 0x38, /* 00111000 */ - 0x10, /* 00010000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 5 0x05 '^E' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0xe7, /* 11100111 */ - 0xe7, /* 11100111 */ - 0xe7, /* 11100111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 6 0x06 '^F' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x7e, /* 01111110 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 7 0x07 '^G' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 8 0x08 '^H' */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xe7, /* 11100111 */ - 0xc3, /* 11000011 */ - 0xc3, /* 11000011 */ - 0xe7, /* 11100111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - - /* 9 0x09 '^I' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x42, /* 01000010 */ - 0x42, /* 01000010 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 10 0x0a '^J' */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xc3, /* 11000011 */ - 0x99, /* 10011001 */ - 0xbd, /* 10111101 */ - 0xbd, /* 10111101 */ - 0x99, /* 10011001 */ - 0xc3, /* 11000011 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - - /* 11 0x0b '^K' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1e, /* 00011110 */ - 0x0e, /* 00001110 */ - 0x1a, /* 00011010 */ - 0x32, /* 00110010 */ - 0x78, /* 01111000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 12 0x0c '^L' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 13 0x0d '^M' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3f, /* 00111111 */ - 0x33, /* 00110011 */ - 0x3f, /* 00111111 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x70, /* 01110000 */ - 0xf0, /* 11110000 */ - 0xe0, /* 11100000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 14 0x0e '^N' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7f, /* 01111111 */ - 0x63, /* 01100011 */ - 0x7f, /* 01111111 */ - 0x63, /* 01100011 */ - 0x63, /* 01100011 */ - 0x63, /* 01100011 */ - 0x63, /* 01100011 */ - 0x67, /* 01100111 */ - 0xe7, /* 11100111 */ - 0xe6, /* 11100110 */ - 0xc0, /* 11000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 15 0x0f '^O' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xdb, /* 11011011 */ - 0x3c, /* 00111100 */ - 0xe7, /* 11100111 */ - 0x3c, /* 00111100 */ - 0xdb, /* 11011011 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 16 0x10 '^P' */ - 0x00, /* 00000000 */ - 0x80, /* 10000000 */ - 0xc0, /* 11000000 */ - 0xe0, /* 11100000 */ - 0xf0, /* 11110000 */ - 0xf8, /* 11111000 */ - 0xfe, /* 11111110 */ - 0xf8, /* 11111000 */ - 0xf0, /* 11110000 */ - 0xe0, /* 11100000 */ - 0xc0, /* 11000000 */ - 0x80, /* 10000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 17 0x11 '^Q' */ - 0x00, /* 00000000 */ - 0x02, /* 00000010 */ - 0x06, /* 00000110 */ - 0x0e, /* 00001110 */ - 0x1e, /* 00011110 */ - 0x3e, /* 00111110 */ - 0xfe, /* 11111110 */ - 0x3e, /* 00111110 */ - 0x1e, /* 00011110 */ - 0x0e, /* 00001110 */ - 0x06, /* 00000110 */ - 0x02, /* 00000010 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 18 0x12 '^R' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 19 0x13 '^S' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 20 0x14 '^T' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7f, /* 01111111 */ - 0xdb, /* 11011011 */ - 0xdb, /* 11011011 */ - 0xdb, /* 11011011 */ - 0x7b, /* 01111011 */ - 0x1b, /* 00011011 */ - 0x1b, /* 00011011 */ - 0x1b, /* 00011011 */ - 0x1b, /* 00011011 */ - 0x1b, /* 00011011 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 21 0x15 '^U' */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0x60, /* 01100000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x0c, /* 00001100 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 22 0x16 '^V' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 23 0x17 '^W' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 24 0x18 '^X' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 25 0x19 '^Y' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 26 0x1a '^Z' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0xfe, /* 11111110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 27 0x1b '^[' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xfe, /* 11111110 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 28 0x1c '^\' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 29 0x1d '^]' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x28, /* 00101000 */ - 0x6c, /* 01101100 */ - 0xfe, /* 11111110 */ - 0x6c, /* 01101100 */ - 0x28, /* 00101000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 30 0x1e '^^' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x38, /* 00111000 */ - 0x7c, /* 01111100 */ - 0x7c, /* 01111100 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 31 0x1f '^_' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0x7c, /* 01111100 */ - 0x7c, /* 01111100 */ - 0x38, /* 00111000 */ - 0x38, /* 00111000 */ - 0x10, /* 00010000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 32 0x20 ' ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 33 0x21 '!' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 34 0x22 '"' */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x24, /* 00100100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 35 0x23 '#' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0xfe, /* 11111110 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0xfe, /* 11111110 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 36 0x24 '$' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc2, /* 11000010 */ - 0xc0, /* 11000000 */ - 0x7c, /* 01111100 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x86, /* 10000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 37 0x25 '%' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc2, /* 11000010 */ - 0xc6, /* 11000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc6, /* 11000110 */ - 0x86, /* 10000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 38 0x26 '&' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 39 0x27 ''' */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 40 0x28 '(' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 41 0x29 ')' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 42 0x2a '*' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0xff, /* 11111111 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 43 0x2b '+' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 44 0x2c ',' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 45 0x2d '-' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 46 0x2e '.' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 47 0x2f '/' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x02, /* 00000010 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0x80, /* 10000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 48 0x30 '0' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 49 0x31 '1' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x38, /* 00111000 */ - 0x78, /* 01111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 50 0x32 '2' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 51 0x33 '3' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x3c, /* 00111100 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 52 0x34 '4' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x0c, /* 00001100 */ - 0x1c, /* 00011100 */ - 0x3c, /* 00111100 */ - 0x6c, /* 01101100 */ - 0xcc, /* 11001100 */ - 0xfe, /* 11111110 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x1e, /* 00011110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 53 0x35 '5' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xfc, /* 11111100 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 54 0x36 '6' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xfc, /* 11111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 55 0x37 '7' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 56 0x38 '8' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 57 0x39 '9' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7e, /* 01111110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 58 0x3a ':' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 59 0x3b ';' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 60 0x3c '<' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x06, /* 00000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 61 0x3d '=' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 62 0x3e '>' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 63 0x3f '?' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 64 0x40 '@' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xde, /* 11011110 */ - 0xde, /* 11011110 */ - 0xde, /* 11011110 */ - 0xdc, /* 11011100 */ - 0xc0, /* 11000000 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 65 0x41 'A' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 66 0x42 'B' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfc, /* 11111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0xfc, /* 11111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 67 0x43 'C' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0xc2, /* 11000010 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc2, /* 11000010 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 68 0x44 'D' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xf8, /* 11111000 */ - 0x6c, /* 01101100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x6c, /* 01101100 */ - 0xf8, /* 11111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 69 0x45 'E' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x66, /* 01100110 */ - 0x62, /* 01100010 */ - 0x68, /* 01101000 */ - 0x78, /* 01111000 */ - 0x68, /* 01101000 */ - 0x60, /* 01100000 */ - 0x62, /* 01100010 */ - 0x66, /* 01100110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 70 0x46 'F' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x66, /* 01100110 */ - 0x62, /* 01100010 */ - 0x68, /* 01101000 */ - 0x78, /* 01111000 */ - 0x68, /* 01101000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xf0, /* 11110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 71 0x47 'G' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0xc2, /* 11000010 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xde, /* 11011110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x66, /* 01100110 */ - 0x3a, /* 00111010 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 72 0x48 'H' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 73 0x49 'I' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 74 0x4a 'J' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1e, /* 00011110 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 75 0x4b 'K' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xe6, /* 11100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x6c, /* 01101100 */ - 0x78, /* 01111000 */ - 0x78, /* 01111000 */ - 0x6c, /* 01101100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0xe6, /* 11100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 76 0x4c 'L' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xf0, /* 11110000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x62, /* 01100010 */ - 0x66, /* 01100110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 77 0x4d 'M' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xee, /* 11101110 */ - 0xfe, /* 11111110 */ - 0xfe, /* 11111110 */ - 0xd6, /* 11010110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 78 0x4e 'N' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xe6, /* 11100110 */ - 0xf6, /* 11110110 */ - 0xfe, /* 11111110 */ - 0xde, /* 11011110 */ - 0xce, /* 11001110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 79 0x4f 'O' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 80 0x50 'P' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfc, /* 11111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xf0, /* 11110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 81 0x51 'Q' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xd6, /* 11010110 */ - 0xde, /* 11011110 */ - 0x7c, /* 01111100 */ - 0x0c, /* 00001100 */ - 0x0e, /* 00001110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 82 0x52 'R' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfc, /* 11111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x6c, /* 01101100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0xe6, /* 11100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 83 0x53 'S' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x60, /* 01100000 */ - 0x38, /* 00111000 */ - 0x0c, /* 00001100 */ - 0x06, /* 00000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 84 0x54 'T' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x5a, /* 01011010 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 85 0x55 'U' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 86 0x56 'V' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x10, /* 00010000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 87 0x57 'W' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xfe, /* 11111110 */ - 0xee, /* 11101110 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 88 0x58 'X' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x7c, /* 01111100 */ - 0x38, /* 00111000 */ - 0x38, /* 00111000 */ - 0x7c, /* 01111100 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 89 0x59 'Y' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 90 0x5a 'Z' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0x86, /* 10000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc2, /* 11000010 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 91 0x5b '[' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 92 0x5c '\' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x80, /* 10000000 */ - 0xc0, /* 11000000 */ - 0xe0, /* 11100000 */ - 0x70, /* 01110000 */ - 0x38, /* 00111000 */ - 0x1c, /* 00011100 */ - 0x0e, /* 00001110 */ - 0x06, /* 00000110 */ - 0x02, /* 00000010 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 93 0x5d ']' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 94 0x5e '^' */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 95 0x5f '_' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 96 0x60 '`' */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 97 0x61 'a' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 98 0x62 'b' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xe0, /* 11100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x78, /* 01111000 */ - 0x6c, /* 01101100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 99 0x63 'c' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 100 0x64 'd' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1c, /* 00011100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x3c, /* 00111100 */ - 0x6c, /* 01101100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 101 0x65 'e' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 102 0x66 'f' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1c, /* 00011100 */ - 0x36, /* 00110110 */ - 0x32, /* 00110010 */ - 0x30, /* 00110000 */ - 0x78, /* 01111000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 103 0x67 'g' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x7c, /* 01111100 */ - 0x0c, /* 00001100 */ - 0xcc, /* 11001100 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - - /* 104 0x68 'h' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xe0, /* 11100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x6c, /* 01101100 */ - 0x76, /* 01110110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0xe6, /* 11100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 105 0x69 'i' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 106 0x6a 'j' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x00, /* 00000000 */ - 0x0e, /* 00001110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - - /* 107 0x6b 'k' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xe0, /* 11100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x66, /* 01100110 */ - 0x6c, /* 01101100 */ - 0x78, /* 01111000 */ - 0x78, /* 01111000 */ - 0x6c, /* 01101100 */ - 0x66, /* 01100110 */ - 0xe6, /* 11100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 108 0x6c 'l' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 109 0x6d 'm' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xec, /* 11101100 */ - 0xfe, /* 11111110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 110 0x6e 'n' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xdc, /* 11011100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 111 0x6f 'o' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 112 0x70 'p' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xdc, /* 11011100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xf0, /* 11110000 */ - 0x00, /* 00000000 */ - - /* 113 0x71 'q' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x7c, /* 01111100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x1e, /* 00011110 */ - 0x00, /* 00000000 */ - - /* 114 0x72 'r' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xdc, /* 11011100 */ - 0x76, /* 01110110 */ - 0x66, /* 01100110 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xf0, /* 11110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 115 0x73 's' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0x60, /* 01100000 */ - 0x38, /* 00111000 */ - 0x0c, /* 00001100 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 116 0x74 't' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0xfc, /* 11111100 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x36, /* 00110110 */ - 0x1c, /* 00011100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 117 0x75 'u' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 118 0x76 'v' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 119 0x77 'w' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xd6, /* 11010110 */ - 0xfe, /* 11111110 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 120 0x78 'x' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x38, /* 00111000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 121 0x79 'y' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7e, /* 01111110 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0xf8, /* 11111000 */ - 0x00, /* 00000000 */ - - /* 122 0x7a 'z' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xcc, /* 11001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 123 0x7b '{' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x0e, /* 00001110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x70, /* 01110000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x0e, /* 00001110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 124 0x7c '|' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 125 0x7d '}' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x70, /* 01110000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x0e, /* 00001110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 126 0x7e '~' */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 127 0x7f '' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 128 0x80 '€' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0xc2, /* 11000010 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc2, /* 11000010 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 129 0x81 '' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 130 0x82 '‚' */ - 0x00, /* 00000000 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 131 0x83 'ƒ' */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 132 0x84 '„' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 133 0x85 '…' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 134 0x86 '†' */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 135 0x87 '‡' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x18, /* 00011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 136 0x88 'ˆ' */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 137 0x89 '‰' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 138 0x8a 'Š' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 139 0x8b '‹' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 140 0x8c 'Œ' */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 141 0x8d '' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 142 0x8e 'Ž' */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 143 0x8f '' */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 144 0x90 '' */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x66, /* 01100110 */ - 0x62, /* 01100010 */ - 0x68, /* 01101000 */ - 0x78, /* 01111000 */ - 0x68, /* 01101000 */ - 0x62, /* 01100010 */ - 0x66, /* 01100110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 145 0x91 '‘' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xec, /* 11101100 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x7e, /* 01111110 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0x6e, /* 01101110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 146 0x92 '’' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3e, /* 00111110 */ - 0x6c, /* 01101100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xfe, /* 11111110 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xce, /* 11001110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 147 0x93 '“' */ - 0x00, /* 00000000 */ - 0x10, /* 00010000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 148 0x94 '”' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 149 0x95 '•' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 150 0x96 '–' */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x78, /* 01111000 */ - 0xcc, /* 11001100 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 151 0x97 '—' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 152 0x98 '˜' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7e, /* 01111110 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x78, /* 01111000 */ - 0x00, /* 00000000 */ - - /* 153 0x99 '™' */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 154 0x9a 'š' */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 155 0x9b '›' */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 156 0x9c 'œ' */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x64, /* 01100100 */ - 0x60, /* 01100000 */ - 0xf0, /* 11110000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xe6, /* 11100110 */ - 0xfc, /* 11111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 157 0x9d '' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 158 0x9e 'ž' */ - 0x00, /* 00000000 */ - 0xf8, /* 11111000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xf8, /* 11111000 */ - 0xc4, /* 11000100 */ - 0xcc, /* 11001100 */ - 0xde, /* 11011110 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 159 0x9f 'Ÿ' */ - 0x00, /* 00000000 */ - 0x0e, /* 00001110 */ - 0x1b, /* 00011011 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xd8, /* 11011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 160 0xa0 ' ' */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0x0c, /* 00001100 */ - 0x7c, /* 01111100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 161 0xa1 '¡' */ - 0x00, /* 00000000 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 162 0xa2 '¢' */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 163 0xa3 '£' */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x00, /* 00000000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 164 0xa4 '¤' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x00, /* 00000000 */ - 0xdc, /* 11011100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 165 0xa5 '¥' */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x00, /* 00000000 */ - 0xc6, /* 11000110 */ - 0xe6, /* 11100110 */ - 0xf6, /* 11110110 */ - 0xfe, /* 11111110 */ - 0xde, /* 11011110 */ - 0xce, /* 11001110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 166 0xa6 '¦' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x3e, /* 00111110 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 167 0xa7 '§' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 168 0xa8 '¨' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x7c, /* 01111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 169 0xa9 '©' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 170 0xaa 'ª' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 171 0xab '«' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0xe0, /* 11100000 */ - 0x62, /* 01100010 */ - 0x66, /* 01100110 */ - 0x6c, /* 01101100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xdc, /* 11011100 */ - 0x86, /* 10000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x3e, /* 00111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 172 0xac '¬' */ - 0x00, /* 00000000 */ - 0x60, /* 01100000 */ - 0xe0, /* 11100000 */ - 0x62, /* 01100010 */ - 0x66, /* 01100110 */ - 0x6c, /* 01101100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x66, /* 01100110 */ - 0xce, /* 11001110 */ - 0x9a, /* 10011010 */ - 0x3f, /* 00111111 */ - 0x06, /* 00000110 */ - 0x06, /* 00000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 173 0xad '­' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 174 0xae '®' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x36, /* 00110110 */ - 0x6c, /* 01101100 */ - 0xd8, /* 11011000 */ - 0x6c, /* 01101100 */ - 0x36, /* 00110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 175 0xaf '¯' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xd8, /* 11011000 */ - 0x6c, /* 01101100 */ - 0x36, /* 00110110 */ - 0x6c, /* 01101100 */ - 0xd8, /* 11011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 176 0xb0 '°' */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - 0x11, /* 00010001 */ - 0x44, /* 01000100 */ - - /* 177 0xb1 '±' */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - 0x55, /* 01010101 */ - 0xaa, /* 10101010 */ - - /* 178 0xb2 '²' */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - 0xdd, /* 11011101 */ - 0x77, /* 01110111 */ - - /* 179 0xb3 '³' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 180 0xb4 '´' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 181 0xb5 'µ' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 182 0xb6 '¶' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xf6, /* 11110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 183 0xb7 '·' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 184 0xb8 '¸' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 185 0xb9 '¹' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xf6, /* 11110110 */ - 0x06, /* 00000110 */ - 0xf6, /* 11110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 186 0xba 'º' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 187 0xbb '»' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x06, /* 00000110 */ - 0xf6, /* 11110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 188 0xbc '¼' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xf6, /* 11110110 */ - 0x06, /* 00000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 189 0xbd '½' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 190 0xbe '¾' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 191 0xbf '¿' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xf8, /* 11111000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 192 0xc0 'À' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 193 0xc1 'Á' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 194 0xc2 'Â' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 195 0xc3 'Ã' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 196 0xc4 'Ä' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 197 0xc5 'Å' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xff, /* 11111111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 198 0xc6 'Æ' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 199 0xc7 'Ç' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x37, /* 00110111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 200 0xc8 'È' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x37, /* 00110111 */ - 0x30, /* 00110000 */ - 0x3f, /* 00111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 201 0xc9 'É' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3f, /* 00111111 */ - 0x30, /* 00110000 */ - 0x37, /* 00110111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 202 0xca 'Ê' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xf7, /* 11110111 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 203 0xcb 'Ë' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0xf7, /* 11110111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 204 0xcc 'Ì' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x37, /* 00110111 */ - 0x30, /* 00110000 */ - 0x37, /* 00110111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 205 0xcd 'Í' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 206 0xce 'Î' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xf7, /* 11110111 */ - 0x00, /* 00000000 */ - 0xf7, /* 11110111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 207 0xcf 'Ï' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 208 0xd0 'Ð' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 209 0xd1 'Ñ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 210 0xd2 'Ò' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 211 0xd3 'Ó' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x3f, /* 00111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 212 0xd4 'Ô' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 213 0xd5 'Õ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 214 0xd6 'Ö' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x3f, /* 00111111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 215 0xd7 '×' */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0xff, /* 11111111 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - - /* 216 0xd8 'Ø' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xff, /* 11111111 */ - 0x18, /* 00011000 */ - 0xff, /* 11111111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 217 0xd9 'Ù' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xf8, /* 11111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 218 0xda 'Ú' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1f, /* 00011111 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 219 0xdb 'Û' */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - - /* 220 0xdc 'Ü' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - - /* 221 0xdd 'Ý' */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - 0xf0, /* 11110000 */ - - /* 222 0xde 'Þ' */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - 0x0f, /* 00001111 */ - - /* 223 0xdf 'ß' */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0xff, /* 11111111 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 224 0xe0 'à' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xdc, /* 11011100 */ - 0x76, /* 01110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 225 0xe1 'á' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x78, /* 01111000 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xcc, /* 11001100 */ - 0xd8, /* 11011000 */ - 0xcc, /* 11001100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xcc, /* 11001100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 226 0xe2 'â' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0xc0, /* 11000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 227 0xe3 'ã' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 228 0xe4 'ä' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 229 0xe5 'å' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 230 0xe6 'æ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x7c, /* 01111100 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0x00, /* 00000000 */ - - /* 231 0xe7 'ç' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 232 0xe8 'è' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 233 0xe9 'é' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xfe, /* 11111110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 234 0xea 'ê' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0xee, /* 11101110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 235 0xeb 'ë' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1e, /* 00011110 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x3e, /* 00111110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x66, /* 01100110 */ - 0x3c, /* 00111100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 236 0xec 'ì' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0xdb, /* 11011011 */ - 0xdb, /* 11011011 */ - 0xdb, /* 11011011 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 237 0xed 'í' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x03, /* 00000011 */ - 0x06, /* 00000110 */ - 0x7e, /* 01111110 */ - 0xdb, /* 11011011 */ - 0xdb, /* 11011011 */ - 0xf3, /* 11110011 */ - 0x7e, /* 01111110 */ - 0x60, /* 01100000 */ - 0xc0, /* 11000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 238 0xee 'î' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x1c, /* 00011100 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x7c, /* 01111100 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x1c, /* 00011100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 239 0xef 'ï' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7c, /* 01111100 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0xc6, /* 11000110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 240 0xf0 'ð' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0xfe, /* 11111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 241 0xf1 'ñ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x7e, /* 01111110 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 242 0xf2 'ò' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x06, /* 00000110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 243 0xf3 'ó' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x30, /* 00110000 */ - 0x60, /* 01100000 */ - 0x30, /* 00110000 */ - 0x18, /* 00011000 */ - 0x0c, /* 00001100 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 244 0xf4 'ô' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x0e, /* 00001110 */ - 0x1b, /* 00011011 */ - 0x1b, /* 00011011 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - - /* 245 0xf5 'õ' */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0xd8, /* 11011000 */ - 0x70, /* 01110000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 246 0xf6 'ö' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 247 0xf7 '÷' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x00, /* 00000000 */ - 0x76, /* 01110110 */ - 0xdc, /* 11011100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 248 0xf8 'ø' */ - 0x00, /* 00000000 */ - 0x38, /* 00111000 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x38, /* 00111000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 249 0xf9 'ù' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 250 0xfa 'ú' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x18, /* 00011000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 251 0xfb 'û' */ - 0x00, /* 00000000 */ - 0x0f, /* 00001111 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0x0c, /* 00001100 */ - 0xec, /* 11101100 */ - 0x6c, /* 01101100 */ - 0x6c, /* 01101100 */ - 0x3c, /* 00111100 */ - 0x1c, /* 00011100 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 252 0xfc 'ü' */ - 0x00, /* 00000000 */ - 0x6c, /* 01101100 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x36, /* 00110110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 253 0xfd 'ý' */ - 0x00, /* 00000000 */ - 0x3c, /* 00111100 */ - 0x66, /* 01100110 */ - 0x0c, /* 00001100 */ - 0x18, /* 00011000 */ - 0x32, /* 00110010 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 254 0xfe 'þ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x7e, /* 01111110 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - - /* 255 0xff 'ÿ' */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ - 0x00, /* 00000000 */ + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 1 0x01 '^A' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x81, /* 10000001 */ + 0xa5, /* 10100101 */ + 0x81, /* 10000001 */ + 0x81, /* 10000001 */ + 0xbd, /* 10111101 */ + 0x99, /* 10011001 */ + 0x81, /* 10000001 */ + 0x81, /* 10000001 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 2 0x02 '^B' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0xff, /* 11111111 */ + 0xdb, /* 11011011 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xc3, /* 11000011 */ + 0xe7, /* 11100111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 3 0x03 '^C' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x6c, /* 01101100 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0x7c, /* 01111100 */ + 0x38, /* 00111000 */ + 0x10, /* 00010000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 4 0x04 '^D' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x7c, /* 01111100 */ + 0xfe, /* 11111110 */ + 0x7c, /* 01111100 */ + 0x38, /* 00111000 */ + 0x10, /* 00010000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 5 0x05 '^E' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0xe7, /* 11100111 */ + 0xe7, /* 11100111 */ + 0xe7, /* 11100111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 6 0x06 '^F' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x7e, /* 01111110 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 7 0x07 '^G' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 8 0x08 '^H' */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xe7, /* 11100111 */ + 0xc3, /* 11000011 */ + 0xc3, /* 11000011 */ + 0xe7, /* 11100111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + + /* 9 0x09 '^I' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x42, /* 01000010 */ + 0x42, /* 01000010 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 10 0x0a '^J' */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xc3, /* 11000011 */ + 0x99, /* 10011001 */ + 0xbd, /* 10111101 */ + 0xbd, /* 10111101 */ + 0x99, /* 10011001 */ + 0xc3, /* 11000011 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + + /* 11 0x0b '^K' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1e, /* 00011110 */ + 0x0e, /* 00001110 */ + 0x1a, /* 00011010 */ + 0x32, /* 00110010 */ + 0x78, /* 01111000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 12 0x0c '^L' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 13 0x0d '^M' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3f, /* 00111111 */ + 0x33, /* 00110011 */ + 0x3f, /* 00111111 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x70, /* 01110000 */ + 0xf0, /* 11110000 */ + 0xe0, /* 11100000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 14 0x0e '^N' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7f, /* 01111111 */ + 0x63, /* 01100011 */ + 0x7f, /* 01111111 */ + 0x63, /* 01100011 */ + 0x63, /* 01100011 */ + 0x63, /* 01100011 */ + 0x63, /* 01100011 */ + 0x67, /* 01100111 */ + 0xe7, /* 11100111 */ + 0xe6, /* 11100110 */ + 0xc0, /* 11000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 15 0x0f '^O' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xdb, /* 11011011 */ + 0x3c, /* 00111100 */ + 0xe7, /* 11100111 */ + 0x3c, /* 00111100 */ + 0xdb, /* 11011011 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 16 0x10 '^P' */ + 0x00, /* 00000000 */ + 0x80, /* 10000000 */ + 0xc0, /* 11000000 */ + 0xe0, /* 11100000 */ + 0xf0, /* 11110000 */ + 0xf8, /* 11111000 */ + 0xfe, /* 11111110 */ + 0xf8, /* 11111000 */ + 0xf0, /* 11110000 */ + 0xe0, /* 11100000 */ + 0xc0, /* 11000000 */ + 0x80, /* 10000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 17 0x11 '^Q' */ + 0x00, /* 00000000 */ + 0x02, /* 00000010 */ + 0x06, /* 00000110 */ + 0x0e, /* 00001110 */ + 0x1e, /* 00011110 */ + 0x3e, /* 00111110 */ + 0xfe, /* 11111110 */ + 0x3e, /* 00111110 */ + 0x1e, /* 00011110 */ + 0x0e, /* 00001110 */ + 0x06, /* 00000110 */ + 0x02, /* 00000010 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 18 0x12 '^R' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 19 0x13 '^S' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 20 0x14 '^T' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7f, /* 01111111 */ + 0xdb, /* 11011011 */ + 0xdb, /* 11011011 */ + 0xdb, /* 11011011 */ + 0x7b, /* 01111011 */ + 0x1b, /* 00011011 */ + 0x1b, /* 00011011 */ + 0x1b, /* 00011011 */ + 0x1b, /* 00011011 */ + 0x1b, /* 00011011 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 21 0x15 '^U' */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0x60, /* 01100000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x0c, /* 00001100 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 22 0x16 '^V' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 23 0x17 '^W' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 24 0x18 '^X' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 25 0x19 '^Y' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 26 0x1a '^Z' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0xfe, /* 11111110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 27 0x1b '^[' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xfe, /* 11111110 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 28 0x1c '^\' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 29 0x1d '^]' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x28, /* 00101000 */ + 0x6c, /* 01101100 */ + 0xfe, /* 11111110 */ + 0x6c, /* 01101100 */ + 0x28, /* 00101000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 30 0x1e '^^' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x38, /* 00111000 */ + 0x7c, /* 01111100 */ + 0x7c, /* 01111100 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 31 0x1f '^_' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0x7c, /* 01111100 */ + 0x7c, /* 01111100 */ + 0x38, /* 00111000 */ + 0x38, /* 00111000 */ + 0x10, /* 00010000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 32 0x20 ' ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 33 0x21 '!' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 34 0x22 '"' */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x24, /* 00100100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 35 0x23 '#' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0xfe, /* 11111110 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0xfe, /* 11111110 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 36 0x24 '$' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc2, /* 11000010 */ + 0xc0, /* 11000000 */ + 0x7c, /* 01111100 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x86, /* 10000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 37 0x25 '%' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc2, /* 11000010 */ + 0xc6, /* 11000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc6, /* 11000110 */ + 0x86, /* 10000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 38 0x26 '&' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 39 0x27 ''' */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 40 0x28 '(' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 41 0x29 ')' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 42 0x2a '*' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0xff, /* 11111111 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 43 0x2b '+' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 44 0x2c ',' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 45 0x2d '-' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 46 0x2e '.' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 47 0x2f '/' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x02, /* 00000010 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0x80, /* 10000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 48 0x30 '0' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 49 0x31 '1' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x38, /* 00111000 */ + 0x78, /* 01111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 50 0x32 '2' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 51 0x33 '3' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x3c, /* 00111100 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 52 0x34 '4' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x0c, /* 00001100 */ + 0x1c, /* 00011100 */ + 0x3c, /* 00111100 */ + 0x6c, /* 01101100 */ + 0xcc, /* 11001100 */ + 0xfe, /* 11111110 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x1e, /* 00011110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 53 0x35 '5' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xfc, /* 11111100 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 54 0x36 '6' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xfc, /* 11111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 55 0x37 '7' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 56 0x38 '8' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 57 0x39 '9' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7e, /* 01111110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 58 0x3a ':' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 59 0x3b ';' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 60 0x3c '<' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x06, /* 00000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 61 0x3d '=' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 62 0x3e '>' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 63 0x3f '?' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 64 0x40 '@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xde, /* 11011110 */ + 0xde, /* 11011110 */ + 0xde, /* 11011110 */ + 0xdc, /* 11011100 */ + 0xc0, /* 11000000 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 65 0x41 'A' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 66 0x42 'B' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfc, /* 11111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0xfc, /* 11111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 67 0x43 'C' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0xc2, /* 11000010 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc2, /* 11000010 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 68 0x44 'D' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xf8, /* 11111000 */ + 0x6c, /* 01101100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x6c, /* 01101100 */ + 0xf8, /* 11111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 69 0x45 'E' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x66, /* 01100110 */ + 0x62, /* 01100010 */ + 0x68, /* 01101000 */ + 0x78, /* 01111000 */ + 0x68, /* 01101000 */ + 0x60, /* 01100000 */ + 0x62, /* 01100010 */ + 0x66, /* 01100110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 70 0x46 'F' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x66, /* 01100110 */ + 0x62, /* 01100010 */ + 0x68, /* 01101000 */ + 0x78, /* 01111000 */ + 0x68, /* 01101000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xf0, /* 11110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 71 0x47 'G' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0xc2, /* 11000010 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xde, /* 11011110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x66, /* 01100110 */ + 0x3a, /* 00111010 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 72 0x48 'H' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 73 0x49 'I' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 74 0x4a 'J' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1e, /* 00011110 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 75 0x4b 'K' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xe6, /* 11100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x6c, /* 01101100 */ + 0x78, /* 01111000 */ + 0x78, /* 01111000 */ + 0x6c, /* 01101100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0xe6, /* 11100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 76 0x4c 'L' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xf0, /* 11110000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x62, /* 01100010 */ + 0x66, /* 01100110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 77 0x4d 'M' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xee, /* 11101110 */ + 0xfe, /* 11111110 */ + 0xfe, /* 11111110 */ + 0xd6, /* 11010110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 78 0x4e 'N' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xe6, /* 11100110 */ + 0xf6, /* 11110110 */ + 0xfe, /* 11111110 */ + 0xde, /* 11011110 */ + 0xce, /* 11001110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 79 0x4f 'O' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 80 0x50 'P' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfc, /* 11111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xf0, /* 11110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 81 0x51 'Q' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xd6, /* 11010110 */ + 0xde, /* 11011110 */ + 0x7c, /* 01111100 */ + 0x0c, /* 00001100 */ + 0x0e, /* 00001110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 82 0x52 'R' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfc, /* 11111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x6c, /* 01101100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0xe6, /* 11100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 83 0x53 'S' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x60, /* 01100000 */ + 0x38, /* 00111000 */ + 0x0c, /* 00001100 */ + 0x06, /* 00000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 84 0x54 'T' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x5a, /* 01011010 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 85 0x55 'U' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 86 0x56 'V' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x10, /* 00010000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 87 0x57 'W' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xfe, /* 11111110 */ + 0xee, /* 11101110 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 88 0x58 'X' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x7c, /* 01111100 */ + 0x38, /* 00111000 */ + 0x38, /* 00111000 */ + 0x7c, /* 01111100 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 89 0x59 'Y' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 90 0x5a 'Z' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0x86, /* 10000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc2, /* 11000010 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 91 0x5b '[' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 92 0x5c '\' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x80, /* 10000000 */ + 0xc0, /* 11000000 */ + 0xe0, /* 11100000 */ + 0x70, /* 01110000 */ + 0x38, /* 00111000 */ + 0x1c, /* 00011100 */ + 0x0e, /* 00001110 */ + 0x06, /* 00000110 */ + 0x02, /* 00000010 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 93 0x5d ']' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 94 0x5e '^' */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 95 0x5f '_' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 96 0x60 '`' */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 97 0x61 'a' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 98 0x62 'b' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xe0, /* 11100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x78, /* 01111000 */ + 0x6c, /* 01101100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 99 0x63 'c' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 100 0x64 'd' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1c, /* 00011100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x3c, /* 00111100 */ + 0x6c, /* 01101100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 101 0x65 'e' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 102 0x66 'f' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1c, /* 00011100 */ + 0x36, /* 00110110 */ + 0x32, /* 00110010 */ + 0x30, /* 00110000 */ + 0x78, /* 01111000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 103 0x67 'g' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x7c, /* 01111100 */ + 0x0c, /* 00001100 */ + 0xcc, /* 11001100 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + + /* 104 0x68 'h' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xe0, /* 11100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x6c, /* 01101100 */ + 0x76, /* 01110110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0xe6, /* 11100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 105 0x69 'i' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 106 0x6a 'j' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x00, /* 00000000 */ + 0x0e, /* 00001110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + + /* 107 0x6b 'k' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xe0, /* 11100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x66, /* 01100110 */ + 0x6c, /* 01101100 */ + 0x78, /* 01111000 */ + 0x78, /* 01111000 */ + 0x6c, /* 01101100 */ + 0x66, /* 01100110 */ + 0xe6, /* 11100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 108 0x6c 'l' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 109 0x6d 'm' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xec, /* 11101100 */ + 0xfe, /* 11111110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 110 0x6e 'n' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xdc, /* 11011100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 111 0x6f 'o' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 112 0x70 'p' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xdc, /* 11011100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xf0, /* 11110000 */ + 0x00, /* 00000000 */ + + /* 113 0x71 'q' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x7c, /* 01111100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x1e, /* 00011110 */ + 0x00, /* 00000000 */ + + /* 114 0x72 'r' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xdc, /* 11011100 */ + 0x76, /* 01110110 */ + 0x66, /* 01100110 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xf0, /* 11110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 115 0x73 's' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0x60, /* 01100000 */ + 0x38, /* 00111000 */ + 0x0c, /* 00001100 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 116 0x74 't' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0xfc, /* 11111100 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x36, /* 00110110 */ + 0x1c, /* 00011100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 117 0x75 'u' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 118 0x76 'v' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 119 0x77 'w' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xd6, /* 11010110 */ + 0xfe, /* 11111110 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 120 0x78 'x' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x38, /* 00111000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 121 0x79 'y' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7e, /* 01111110 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0xf8, /* 11111000 */ + 0x00, /* 00000000 */ + + /* 122 0x7a 'z' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xcc, /* 11001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 123 0x7b '{' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x0e, /* 00001110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x70, /* 01110000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x0e, /* 00001110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 124 0x7c '|' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 125 0x7d '}' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x70, /* 01110000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x0e, /* 00001110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 126 0x7e '~' */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 127 0x7f '' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 128 0x80 '€' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0xc2, /* 11000010 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc2, /* 11000010 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 129 0x81 '' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 130 0x82 '‚' */ + 0x00, /* 00000000 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 131 0x83 'ƒ' */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 132 0x84 '„' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 133 0x85 '…' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 134 0x86 '†' */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 135 0x87 '‡' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x18, /* 00011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 136 0x88 'ˆ' */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 137 0x89 '‰' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 138 0x8a 'Š' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 139 0x8b '‹' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 140 0x8c 'Œ' */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 141 0x8d '' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 142 0x8e 'Ž' */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 143 0x8f '' */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 144 0x90 '' */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x66, /* 01100110 */ + 0x62, /* 01100010 */ + 0x68, /* 01101000 */ + 0x78, /* 01111000 */ + 0x68, /* 01101000 */ + 0x62, /* 01100010 */ + 0x66, /* 01100110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 145 0x91 '‘' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xec, /* 11101100 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x7e, /* 01111110 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0x6e, /* 01101110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 146 0x92 '’' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3e, /* 00111110 */ + 0x6c, /* 01101100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xfe, /* 11111110 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xce, /* 11001110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 147 0x93 '“' */ + 0x00, /* 00000000 */ + 0x10, /* 00010000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 148 0x94 '”' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 149 0x95 '•' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 150 0x96 '–' */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x78, /* 01111000 */ + 0xcc, /* 11001100 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 151 0x97 '—' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 152 0x98 '˜' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7e, /* 01111110 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x78, /* 01111000 */ + 0x00, /* 00000000 */ + + /* 153 0x99 '™' */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 154 0x9a 'š' */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 155 0x9b '›' */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 156 0x9c 'œ' */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x64, /* 01100100 */ + 0x60, /* 01100000 */ + 0xf0, /* 11110000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xe6, /* 11100110 */ + 0xfc, /* 11111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 157 0x9d '' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 158 0x9e 'ž' */ + 0x00, /* 00000000 */ + 0xf8, /* 11111000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xf8, /* 11111000 */ + 0xc4, /* 11000100 */ + 0xcc, /* 11001100 */ + 0xde, /* 11011110 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 159 0x9f 'Ÿ' */ + 0x00, /* 00000000 */ + 0x0e, /* 00001110 */ + 0x1b, /* 00011011 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xd8, /* 11011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 160 0xa0 ' ' */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0x0c, /* 00001100 */ + 0x7c, /* 01111100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 161 0xa1 '¡' */ + 0x00, /* 00000000 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 162 0xa2 '¢' */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 163 0xa3 '£' */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x00, /* 00000000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 164 0xa4 '¤' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x00, /* 00000000 */ + 0xdc, /* 11011100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 165 0xa5 '¥' */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x00, /* 00000000 */ + 0xc6, /* 11000110 */ + 0xe6, /* 11100110 */ + 0xf6, /* 11110110 */ + 0xfe, /* 11111110 */ + 0xde, /* 11011110 */ + 0xce, /* 11001110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 166 0xa6 '¦' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x3e, /* 00111110 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 167 0xa7 '§' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 168 0xa8 '¨' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x7c, /* 01111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 169 0xa9 '©' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 170 0xaa 'ª' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 171 0xab '«' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0xe0, /* 11100000 */ + 0x62, /* 01100010 */ + 0x66, /* 01100110 */ + 0x6c, /* 01101100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xdc, /* 11011100 */ + 0x86, /* 10000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x3e, /* 00111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 172 0xac '¬' */ + 0x00, /* 00000000 */ + 0x60, /* 01100000 */ + 0xe0, /* 11100000 */ + 0x62, /* 01100010 */ + 0x66, /* 01100110 */ + 0x6c, /* 01101100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x66, /* 01100110 */ + 0xce, /* 11001110 */ + 0x9a, /* 10011010 */ + 0x3f, /* 00111111 */ + 0x06, /* 00000110 */ + 0x06, /* 00000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 173 0xad '­' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 174 0xae '®' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x36, /* 00110110 */ + 0x6c, /* 01101100 */ + 0xd8, /* 11011000 */ + 0x6c, /* 01101100 */ + 0x36, /* 00110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 175 0xaf '¯' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xd8, /* 11011000 */ + 0x6c, /* 01101100 */ + 0x36, /* 00110110 */ + 0x6c, /* 01101100 */ + 0xd8, /* 11011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 176 0xb0 '°' */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + 0x11, /* 00010001 */ + 0x44, /* 01000100 */ + + /* 177 0xb1 '±' */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + 0x55, /* 01010101 */ + 0xaa, /* 10101010 */ + + /* 178 0xb2 '²' */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + 0xdd, /* 11011101 */ + 0x77, /* 01110111 */ + + /* 179 0xb3 '³' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 180 0xb4 '´' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 181 0xb5 'µ' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 182 0xb6 '¶' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xf6, /* 11110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 183 0xb7 '·' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 184 0xb8 '¸' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 185 0xb9 '¹' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xf6, /* 11110110 */ + 0x06, /* 00000110 */ + 0xf6, /* 11110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 186 0xba 'º' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 187 0xbb '»' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x06, /* 00000110 */ + 0xf6, /* 11110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 188 0xbc '¼' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xf6, /* 11110110 */ + 0x06, /* 00000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 189 0xbd '½' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 190 0xbe '¾' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 191 0xbf '¿' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xf8, /* 11111000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 192 0xc0 'À' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 193 0xc1 'Á' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 194 0xc2 'Â' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 195 0xc3 'Ã' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 196 0xc4 'Ä' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 197 0xc5 'Å' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xff, /* 11111111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 198 0xc6 'Æ' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 199 0xc7 'Ç' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x37, /* 00110111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 200 0xc8 'È' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x37, /* 00110111 */ + 0x30, /* 00110000 */ + 0x3f, /* 00111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 201 0xc9 'É' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3f, /* 00111111 */ + 0x30, /* 00110000 */ + 0x37, /* 00110111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 202 0xca 'Ê' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xf7, /* 11110111 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 203 0xcb 'Ë' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0xf7, /* 11110111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 204 0xcc 'Ì' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x37, /* 00110111 */ + 0x30, /* 00110000 */ + 0x37, /* 00110111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 205 0xcd 'Í' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 206 0xce 'Î' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xf7, /* 11110111 */ + 0x00, /* 00000000 */ + 0xf7, /* 11110111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 207 0xcf 'Ï' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 208 0xd0 'Ð' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 209 0xd1 'Ñ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 210 0xd2 'Ò' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 211 0xd3 'Ó' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x3f, /* 00111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 212 0xd4 'Ô' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 213 0xd5 'Õ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 214 0xd6 'Ö' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x3f, /* 00111111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 215 0xd7 '×' */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0xff, /* 11111111 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + + /* 216 0xd8 'Ø' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xff, /* 11111111 */ + 0x18, /* 00011000 */ + 0xff, /* 11111111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 217 0xd9 'Ù' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xf8, /* 11111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 218 0xda 'Ú' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1f, /* 00011111 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 219 0xdb 'Û' */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + + /* 220 0xdc 'Ü' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + + /* 221 0xdd 'Ý' */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + 0xf0, /* 11110000 */ + + /* 222 0xde 'Þ' */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + 0x0f, /* 00001111 */ + + /* 223 0xdf 'ß' */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0xff, /* 11111111 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 224 0xe0 'à' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xdc, /* 11011100 */ + 0x76, /* 01110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 225 0xe1 'á' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x78, /* 01111000 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xcc, /* 11001100 */ + 0xd8, /* 11011000 */ + 0xcc, /* 11001100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xcc, /* 11001100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 226 0xe2 'â' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0xc0, /* 11000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 227 0xe3 'ã' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 228 0xe4 'ä' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 229 0xe5 'å' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 230 0xe6 'æ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x7c, /* 01111100 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0x00, /* 00000000 */ + + /* 231 0xe7 'ç' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 232 0xe8 'è' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 233 0xe9 'é' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xfe, /* 11111110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 234 0xea 'ê' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0xee, /* 11101110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 235 0xeb 'ë' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1e, /* 00011110 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x3e, /* 00111110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x66, /* 01100110 */ + 0x3c, /* 00111100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 236 0xec 'ì' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0xdb, /* 11011011 */ + 0xdb, /* 11011011 */ + 0xdb, /* 11011011 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 237 0xed 'í' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x03, /* 00000011 */ + 0x06, /* 00000110 */ + 0x7e, /* 01111110 */ + 0xdb, /* 11011011 */ + 0xdb, /* 11011011 */ + 0xf3, /* 11110011 */ + 0x7e, /* 01111110 */ + 0x60, /* 01100000 */ + 0xc0, /* 11000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 238 0xee 'î' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x1c, /* 00011100 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x7c, /* 01111100 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x1c, /* 00011100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 239 0xef 'ï' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7c, /* 01111100 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0xc6, /* 11000110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 240 0xf0 'ð' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0xfe, /* 11111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 241 0xf1 'ñ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x7e, /* 01111110 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 242 0xf2 'ò' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x06, /* 00000110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 243 0xf3 'ó' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x30, /* 00110000 */ + 0x60, /* 01100000 */ + 0x30, /* 00110000 */ + 0x18, /* 00011000 */ + 0x0c, /* 00001100 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 244 0xf4 'ô' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x0e, /* 00001110 */ + 0x1b, /* 00011011 */ + 0x1b, /* 00011011 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + + /* 245 0xf5 'õ' */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0xd8, /* 11011000 */ + 0x70, /* 01110000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 246 0xf6 'ö' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 247 0xf7 '÷' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x00, /* 00000000 */ + 0x76, /* 01110110 */ + 0xdc, /* 11011100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 248 0xf8 'ø' */ + 0x00, /* 00000000 */ + 0x38, /* 00111000 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x38, /* 00111000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 249 0xf9 'ù' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 250 0xfa 'ú' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x18, /* 00011000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 251 0xfb 'û' */ + 0x00, /* 00000000 */ + 0x0f, /* 00001111 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0x0c, /* 00001100 */ + 0xec, /* 11101100 */ + 0x6c, /* 01101100 */ + 0x6c, /* 01101100 */ + 0x3c, /* 00111100 */ + 0x1c, /* 00011100 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 252 0xfc 'ü' */ + 0x00, /* 00000000 */ + 0x6c, /* 01101100 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x36, /* 00110110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 253 0xfd 'ý' */ + 0x00, /* 00000000 */ + 0x3c, /* 00111100 */ + 0x66, /* 01100110 */ + 0x0c, /* 00001100 */ + 0x18, /* 00011000 */ + 0x32, /* 00110010 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 254 0xfe 'þ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x7e, /* 01111110 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + + /* 255 0xff 'ÿ' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ }; diff --git a/ui/vnc-clipboard.c b/ui/vnc-clipboard.c index 9f077965d0..8aeadfaa21 100644 --- a/ui/vnc-clipboard.c +++ b/ui/vnc-clipboard.c @@ -23,7 +23,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "vnc.h" #include "vnc-jobs.h" @@ -189,10 +188,8 @@ static void vnc_clipboard_provide(VncState *vs, vnc_flush(vs); } -static void vnc_clipboard_notify(Notifier *notifier, void *data) +static void vnc_clipboard_update_info(VncState *vs, QemuClipboardInfo *info) { - VncState *vs = container_of(notifier, VncState, cbpeer.update); - QemuClipboardInfo *info = data; QemuClipboardType type; bool self_update = info->owner == &vs->cbpeer; uint32_t flags = 0; @@ -223,6 +220,21 @@ static void vnc_clipboard_notify(Notifier *notifier, void *data) } } +static void vnc_clipboard_notify(Notifier *notifier, void *data) +{ + VncState *vs = container_of(notifier, VncState, cbpeer.notifier); + QemuClipboardNotify *notify = data; + + switch (notify->type) { + case QEMU_CLIPBOARD_UPDATE_INFO: + vnc_clipboard_update_info(vs, notify->info); + return; + case QEMU_CLIPBOARD_RESET_SERIAL: + /* ignore */ + return; + } +} + static void vnc_clipboard_request(QemuClipboardInfo *info, QemuClipboardType type) { @@ -316,8 +328,10 @@ void vnc_server_cut_text_caps(VncState *vs) caps[1] = 0; vnc_clipboard_send(vs, 2, caps); - vs->cbpeer.name = "vnc"; - vs->cbpeer.update.notify = vnc_clipboard_notify; - vs->cbpeer.request = vnc_clipboard_request; - qemu_clipboard_peer_register(&vs->cbpeer); + if (!vs->cbpeer.notifier.notify) { + vs->cbpeer.name = "vnc"; + vs->cbpeer.notifier.notify = vnc_clipboard_notify; + vs->cbpeer.request = vnc_clipboard_request; + qemu_clipboard_peer_register(&vs->cbpeer); + } } diff --git a/ui/vnc-enc-hextile.c b/ui/vnc-enc-hextile.c index 4215bd7daf..c763256f29 100644 --- a/ui/vnc-enc-hextile.c +++ b/ui/vnc-enc-hextile.c @@ -50,8 +50,8 @@ int vnc_hextile_send_framebuffer_update(VncState *vs, int x, int has_fg, has_bg; uint8_t *last_fg, *last_bg; - last_fg = (uint8_t *) g_malloc(VNC_SERVER_FB_BYTES); - last_bg = (uint8_t *) g_malloc(VNC_SERVER_FB_BYTES); + last_fg = g_malloc(VNC_SERVER_FB_BYTES); + last_bg = g_malloc(VNC_SERVER_FB_BYTES); has_fg = has_bg = 0; for (j = y; j < (y + h); j += 16) { for (i = x; i < (x + w); i += 16) { diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c index cebd35841a..09200d71b8 100644 --- a/ui/vnc-enc-tight.c +++ b/ui/vnc-enc-tight.c @@ -32,9 +32,9 @@ INT32 definitions between jmorecfg.h (included by jpeglib.h) and Win32 basetsd.h (included by windows.h). */ -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG /* The following define is needed by pngconf.h. Otherwise it won't compile, - because setjmp.h was already included by qemu-common.h. */ + because setjmp.h was already included by osdep.h. */ #define PNG_SKIP_SETJMP_CHECK #include #endif @@ -95,7 +95,7 @@ static const struct { }; #endif -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG static const struct { int png_zlib_level, png_filters; } tight_png_conf[] = { @@ -919,7 +919,7 @@ static int send_full_color_rect(VncState *vs, int x, int y, int w, int h) int stream = 0; ssize_t bytes; -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG if (tight_can_send_png_rect(vs, w, h)) { return send_png_rect(vs, x, y, w, h, NULL); } @@ -966,7 +966,7 @@ static int send_mono_rect(VncState *vs, int x, int y, int stream = 1; int level = tight_conf[vs->tight->compression].mono_zlib_level; -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG if (tight_can_send_png_rect(vs, w, h)) { int ret; int bpp = vs->client_pf.bytes_per_pixel * 8; @@ -1020,7 +1020,7 @@ static int send_mono_rect(VncState *vs, int x, int y, struct palette_cb_priv { VncState *vs; uint8_t *header; -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG png_colorp png_palette; #endif }; @@ -1082,7 +1082,7 @@ static int send_palette_rect(VncState *vs, int x, int y, int colors; ssize_t bytes; -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG if (tight_can_send_png_rect(vs, w, h)) { return send_png_rect(vs, x, y, w, h, palette); } @@ -1233,7 +1233,7 @@ static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality) /* * PNG compression stuff. */ -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG static void write_png_palette(int idx, uint32_t pix, void *opaque) { struct palette_cb_priv *priv = opaque; @@ -1379,7 +1379,7 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h, buffer_reset(&vs->tight->png); return 1; } -#endif /* CONFIG_VNC_PNG */ +#endif /* CONFIG_PNG */ static void vnc_tight_start(VncState *vs) { @@ -1477,7 +1477,7 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h) #endif if (!color_count_palette) { - color_count_palette = g_malloc(sizeof(VncPalette)); + color_count_palette = g_new(VncPalette, 1); vnc_tight_cleanup_notifier.notify = vnc_tight_cleanup; qemu_thread_atexit_add(&vnc_tight_cleanup_notifier); } @@ -1706,7 +1706,7 @@ void vnc_tight_clear(VncState *vs) #ifdef CONFIG_VNC_JPEG buffer_free(&vs->tight->jpeg); #endif -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG buffer_free(&vs->tight->png); #endif } diff --git a/ui/vnc-enc-zywrle-template.c b/ui/vnc-enc-zywrle-template.c index e9be55966e..4afa583e76 100644 --- a/ui/vnc-enc-zywrle-template.c +++ b/ui/vnc-enc-zywrle-template.c @@ -86,17 +86,17 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #undef L_2 #if ZYWRLE_ENDIAN == ENDIAN_BIG -# define S_0 1 -# define S_1 0 -# define L_0 3 -# define L_1 2 -# define L_2 1 +# define S_0 1 +# define S_1 0 +# define L_0 3 +# define L_1 2 +# define L_2 1 #else -# define S_0 0 -# define S_1 1 -# define L_0 0 -# define L_1 1 -# define L_2 2 +# define S_0 0 +# define S_1 1 +# define L_0 0 +# define L_1 1 +# define L_2 2 #endif #define ZYWRLE_QUANTIZE diff --git a/ui/vnc-enc-zywrle.h b/ui/vnc-enc-zywrle.h index 9b7f698975..e661ec117d 100644 --- a/ui/vnc-enc-zywrle.h +++ b/ui/vnc-enc-zywrle.h @@ -51,14 +51,14 @@ static const unsigned int zywrle_param[3][3]={ {0x0000F000, 0x00000000, 0x00000000}, {0x0000C000, 0x00F0F0F0, 0x00000000}, {0x0000C000, 0x00C0C0C0, 0x00F0F0F0}, -/* {0x0000FF00, 0x00000000, 0x00000000}, +/* {0x0000FF00, 0x00000000, 0x00000000}, {0x0000FF00, 0x00FFFFFF, 0x00000000}, {0x0000FF00, 0x00FFFFFF, 0x00FFFFFF}, */ }; #else /* Type B:Non liner quantization filter. */ static const int8_t zywrle_conv[4][256]={ -{ /* bi=5, bo=5 r=0.0:PSNR=24.849 */ +{ /* bi=5, bo=5 r=0.0:PSNR=24.849 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -92,7 +92,7 @@ static const int8_t zywrle_conv[4][256]={ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, -{ /* bi=5, bo=5 r=2.0:PSNR=74.031 */ +{ /* bi=5, bo=5 r=2.0:PSNR=74.031 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, @@ -126,7 +126,7 @@ static const int8_t zywrle_conv[4][256]={ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, -{ /* bi=5, bo=4 r=2.0:PSNR=64.441 */ +{ /* bi=5, bo=4 r=2.0:PSNR=64.441 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -160,7 +160,7 @@ static const int8_t zywrle_conv[4][256]={ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, -{ /* bi=5, bo=2 r=2.0:PSNR=43.175 */ +{ /* bi=5, bo=2 r=2.0:PSNR=43.175 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -274,14 +274,14 @@ static inline void harr(int8_t *px0, int8_t *px1) x1 += x0; if (((x1 ^ orgx1) & 0x80) == 0) { /* |x1| > |x0| */ - x0 -= x1; /* H = -B */ + x0 -= x1; /* H = -B */ } } else { /* same sign */ x0 -= x1; if (((x0 ^ orgx0) & 0x80) == 0) { /* |x0| > |x1| */ - x1 += x0; /* L = A */ + x1 += x0; /* L = A */ } } *px0 = (int8_t)x1; @@ -585,7 +585,7 @@ static inline void wavelet(int *buf, int width, int height, int level) } \ } while (0) -#define ZYWRLE_PACK_COEFF(buf, data, t, width, height, scanline, level) \ +#define ZYWRLE_PACK_COEFF(buf, data, t, width, height, scanline, level) \ ZYWRLE_TRANSFER_COEFF(buf, data, t, width, height, scanline, level, \ ZYWRLE_LOAD_COEFF(ph, r, g, b); \ ZYWRLE_SAVE_PIXEL(data, r, g, b);) diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c index 4562bf8928..fcca7ec632 100644 --- a/ui/vnc-jobs.c +++ b/ui/vnc-jobs.c @@ -250,12 +250,13 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) /* Here job can only be NULL if queue->exit is true */ job = QTAILQ_FIRST(&queue->jobs); vnc_unlock_queue(queue); - assert(job->vs->magic == VNC_MAGIC); if (queue->exit) { return -1; } + assert(job->vs->magic == VNC_MAGIC); + vnc_lock_output(job->vs); if (job->vs->ioc == NULL || job->vs->abort == true) { vnc_unlock_output(job->vs); @@ -373,7 +374,7 @@ void vnc_start_worker_thread(void) VncJobQueue *q; if (vnc_worker_thread_running()) - return ; + return; q = vnc_queue_init(); qemu_thread_create(&q->thread, "vnc_worker", vnc_worker_thread, q, diff --git a/ui/vnc.c b/ui/vnc.c index af02522e84..1856d57380 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -25,7 +25,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "vnc.h" #include "vnc-jobs.h" #include "trace.h" @@ -55,6 +54,7 @@ #include "qemu/cutils.h" #include "qemu/help_option.h" #include "io/dns-resolver.h" +#include "monitor/monitor.h" #define VNC_REFRESH_INTERVAL_BASE GUI_REFRESH_INTERVAL_DEFAULT #define VNC_REFRESH_INTERVAL_INC 50 @@ -1354,12 +1354,12 @@ void vnc_disconnect_finish(VncState *vs) /* last client gone */ vnc_update_server_surface(vs->vd); } - if (vs->cbpeer.update.notify) { + vnc_unlock_output(vs); + + if (vs->cbpeer.notifier.notify) { qemu_clipboard_peer_unregister(&vs->cbpeer); } - vnc_unlock_output(vs); - qemu_mutex_destroy(&vs->output_mutex); if (vs->bh != NULL) { qemu_bh_delete(vs->bh); @@ -2165,7 +2165,7 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) vs->features |= VNC_FEATURE_TIGHT_MASK; vs->vnc_encoding = enc; break; -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG case VNC_ENCODING_TIGHT_PNG: vs->features |= VNC_FEATURE_TIGHT_PNG_MASK; vs->vnc_encoding = enc; @@ -2340,7 +2340,7 @@ static void pixel_format_message (VncState *vs) { vnc_write_u8(vs, vs->client_pf.bits_per_pixel); /* bits-per-pixel */ vnc_write_u8(vs, vs->client_pf.depth); /* depth */ -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN vnc_write_u8(vs, 1); /* big-endian-flag */ #else vnc_write_u8(vs, 0); /* big-endian-flag */ @@ -2442,8 +2442,8 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 1) { return 8; } + uint32_t dlen = abs(read_s32(data, 4)); if (len == 8) { - uint32_t dlen = abs(read_s32(data, 4)); if (dlen > (1 << 20)) { error_report("vnc: client_cut_text msg payload has %u bytes" " which exceeds our limit of 1MB.", dlen); @@ -2456,8 +2456,13 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) } if (read_s32(data, 4) < 0) { - vnc_client_cut_text_ext(vs, abs(read_s32(data, 4)), - read_u32(data, 8), data + 12); + if (dlen < 4) { + error_report("vnc: malformed payload (header less than 4 bytes)" + " in extended clipboard pseudo-encoding."); + vnc_client_error(vs); + break; + } + vnc_client_cut_text_ext(vs, dlen, read_u32(data, 8), data + 12); break; } vnc_client_cut_text(vs, read_u32(data, 4), data + 8); @@ -2596,7 +2601,7 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) memset(&info, 0, sizeof(info)); info.width = w; info.height = h; - dpy_set_ui_info(vs->vd->dcl.con, &info); + dpy_set_ui_info(vs->vd->dcl.con, &info, false); vnc_desktop_resize_ext(vs, 4 /* Request forwarded */); } else { vnc_desktop_resize_ext(vs, 3 /* Invalid screen layout */); @@ -3080,7 +3085,7 @@ static void vnc_rect_updated(VncDisplay *vd, int x, int y, struct timeval * tv) rect = vnc_stat_rect(vd, x, y); if (rect->updated) { - return ; + return; } rect->times[rect->idx] = *tv; rect->idx = (rect->idx + 1) % ARRAY_SIZE(rect->times); @@ -3098,6 +3103,9 @@ static int vnc_refresh_server_surface(VncDisplay *vd) VncState *vs; int has_dirty = 0; pixman_image_t *tmpbuf = NULL; + unsigned long offset; + int x; + uint8_t *guest_ptr, *server_ptr; struct timeval tv = { 0, 0 }; @@ -3106,6 +3114,13 @@ static int vnc_refresh_server_surface(VncDisplay *vd) has_dirty = vnc_update_stats(vd, &tv); } + offset = find_next_bit((unsigned long *) &vd->guest.dirty, + height * VNC_DIRTY_BPL(&vd->guest), 0); + if (offset == height * VNC_DIRTY_BPL(&vd->guest)) { + /* no dirty bits in guest surface */ + return has_dirty; + } + /* * Walk through the guest dirty map. * Check and copy modified bits from guest to server surface. @@ -3130,15 +3145,6 @@ static int vnc_refresh_server_surface(VncDisplay *vd) line_bytes = MIN(server_stride, guest_ll); for (;;) { - int x; - uint8_t *guest_ptr, *server_ptr; - unsigned long offset = find_next_bit((unsigned long *) &vd->guest.dirty, - height * VNC_DIRTY_BPL(&vd->guest), - y * VNC_DIRTY_BPL(&vd->guest)); - if (offset == height * VNC_DIRTY_BPL(&vd->guest)) { - /* no more dirty bits */ - break; - } y = offset / VNC_DIRTY_BPL(&vd->guest); x = offset % VNC_DIRTY_BPL(&vd->guest); @@ -3177,6 +3183,13 @@ static int vnc_refresh_server_surface(VncDisplay *vd) } y++; + offset = find_next_bit((unsigned long *) &vd->guest.dirty, + height * VNC_DIRTY_BPL(&vd->guest), + y * VNC_DIRTY_BPL(&vd->guest)); + if (offset == height * VNC_DIRTY_BPL(&vd->guest)) { + /* no more dirty bits */ + break; + } } qemu_pixman_image_unref(tmpbuf); return has_dirty; @@ -3248,7 +3261,7 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, #ifdef CONFIG_VNC_JPEG buffer_init(&vs->tight->jpeg, "vnc-tight-jpeg/%p", sioc); #endif -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG buffer_init(&vs->tight->png, "vnc-tight-png/%p", sioc); #endif buffer_init(&vs->zlib.zlib, "vnc-zlib/%p", sioc); @@ -3752,7 +3765,7 @@ static int vnc_display_get_address(const char *addrstr, addr->type = SOCKET_ADDRESS_TYPE_INET; inet = &addr->u.inet; - if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { + if (hostlen && addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { inet->host = g_strndup(addrstr + 1, hostlen - 2); } else { inet->host = g_strndup(addrstr, hostlen); @@ -3812,30 +3825,19 @@ static int vnc_display_get_address(const char *addrstr, return ret; } -static void vnc_free_addresses(SocketAddress ***retsaddr, - size_t *retnsaddr) -{ - size_t i; - - for (i = 0; i < *retnsaddr; i++) { - qapi_free_SocketAddress((*retsaddr)[i]); - } - g_free(*retsaddr); - - *retsaddr = NULL; - *retnsaddr = 0; -} - static int vnc_display_get_addresses(QemuOpts *opts, bool reverse, - SocketAddress ***retsaddr, - size_t *retnsaddr, - SocketAddress ***retwsaddr, - size_t *retnwsaddr, + SocketAddressList **saddr_list_ret, + SocketAddressList **wsaddr_list_ret, Error **errp) { SocketAddress *saddr = NULL; SocketAddress *wsaddr = NULL; + g_autoptr(SocketAddressList) saddr_list = NULL; + SocketAddressList **saddr_tail = &saddr_list; + SocketAddress *single_saddr = NULL; + g_autoptr(SocketAddressList) wsaddr_list = NULL; + SocketAddressList **wsaddr_tail = &wsaddr_list; QemuOptsIter addriter; const char *addr; int to = qemu_opt_get_number(opts, "to", 0); @@ -3844,23 +3846,16 @@ static int vnc_display_get_addresses(QemuOpts *opts, bool ipv4 = qemu_opt_get_bool(opts, "ipv4", false); bool ipv6 = qemu_opt_get_bool(opts, "ipv6", false); int displaynum = -1; - int ret = -1; - - *retsaddr = NULL; - *retnsaddr = 0; - *retwsaddr = NULL; - *retnwsaddr = 0; addr = qemu_opt_get(opts, "vnc"); if (addr == NULL || g_str_equal(addr, "none")) { - ret = 0; - goto cleanup; + return 0; } if (qemu_opt_get(opts, "websocket") && !qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA1)) { error_setg(errp, "SHA1 hash support is required for websockets"); - goto cleanup; + return -1; } qemu_opt_iter_init(&addriter, opts, "vnc"); @@ -3871,7 +3866,7 @@ static int vnc_display_get_addresses(QemuOpts *opts, ipv4, ipv6, &saddr, errp); if (rv < 0) { - goto cleanup; + return -1; } /* Historical compat - first listen address can be used * to set the default websocket port @@ -3879,13 +3874,16 @@ static int vnc_display_get_addresses(QemuOpts *opts, if (displaynum == -1) { displaynum = rv; } - *retsaddr = g_renew(SocketAddress *, *retsaddr, *retnsaddr + 1); - (*retsaddr)[(*retnsaddr)++] = saddr; + QAPI_LIST_APPEND(saddr_tail, saddr); } - /* If we had multiple primary displays, we don't do defaults - * for websocket, and require explicit config instead. */ - if (*retnsaddr > 1) { + if (saddr_list && !saddr_list->next) { + single_saddr = saddr_list->value; + } else { + /* + * If we had multiple primary displays, we don't do defaults + * for websocket, and require explicit config instead. + */ displaynum = -1; } @@ -3895,57 +3893,50 @@ static int vnc_display_get_addresses(QemuOpts *opts, has_ipv4, has_ipv6, ipv4, ipv6, &wsaddr, errp) < 0) { - goto cleanup; + return -1; } /* Historical compat - if only a single listen address was * provided, then this is used to set the default listen * address for websocket too */ - if (*retnsaddr == 1 && - (*retsaddr)[0]->type == SOCKET_ADDRESS_TYPE_INET && + if (single_saddr && + single_saddr->type == SOCKET_ADDRESS_TYPE_INET && wsaddr->type == SOCKET_ADDRESS_TYPE_INET && g_str_equal(wsaddr->u.inet.host, "") && - !g_str_equal((*retsaddr)[0]->u.inet.host, "")) { + !g_str_equal(single_saddr->u.inet.host, "")) { g_free(wsaddr->u.inet.host); - wsaddr->u.inet.host = g_strdup((*retsaddr)[0]->u.inet.host); + wsaddr->u.inet.host = g_strdup(single_saddr->u.inet.host); } - *retwsaddr = g_renew(SocketAddress *, *retwsaddr, *retnwsaddr + 1); - (*retwsaddr)[(*retnwsaddr)++] = wsaddr; + QAPI_LIST_APPEND(wsaddr_tail, wsaddr); } - ret = 0; - cleanup: - if (ret < 0) { - vnc_free_addresses(retsaddr, retnsaddr); - vnc_free_addresses(retwsaddr, retnwsaddr); - } - return ret; + *saddr_list_ret = g_steal_pointer(&saddr_list); + *wsaddr_list_ret = g_steal_pointer(&wsaddr_list); + return 0; } static int vnc_display_connect(VncDisplay *vd, - SocketAddress **saddr, - size_t nsaddr, - SocketAddress **wsaddr, - size_t nwsaddr, + SocketAddressList *saddr_list, + SocketAddressList *wsaddr_list, Error **errp) { /* connect to viewer */ QIOChannelSocket *sioc = NULL; - if (nwsaddr != 0) { + if (wsaddr_list) { error_setg(errp, "Cannot use websockets in reverse mode"); return -1; } - if (nsaddr != 1) { + if (!saddr_list || saddr_list->next) { error_setg(errp, "Expected a single address in reverse mode"); return -1; } /* TODO SOCKET_ADDRESS_TYPE_FD when fd has AF_UNIX */ - vd->is_unix = saddr[0]->type == SOCKET_ADDRESS_TYPE_UNIX; + vd->is_unix = saddr_list->value->type == SOCKET_ADDRESS_TYPE_UNIX; sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(sioc), "vnc-reverse"); - if (qio_channel_socket_connect_sync(sioc, saddr[0], errp) < 0) { + if (qio_channel_socket_connect_sync(sioc, saddr_list->value, errp) < 0) { object_unref(OBJECT(sioc)); return -1; } @@ -3956,20 +3947,18 @@ static int vnc_display_connect(VncDisplay *vd, static int vnc_display_listen(VncDisplay *vd, - SocketAddress **saddr, - size_t nsaddr, - SocketAddress **wsaddr, - size_t nwsaddr, + SocketAddressList *saddr_list, + SocketAddressList *wsaddr_list, Error **errp) { - size_t i; + SocketAddressList *el; - if (nsaddr) { + if (saddr_list) { vd->listener = qio_net_listener_new(); qio_net_listener_set_name(vd->listener, "vnc-listen"); - for (i = 0; i < nsaddr; i++) { + for (el = saddr_list; el; el = el->next) { if (qio_net_listener_open_sync(vd->listener, - saddr[i], 1, + el->value, 1, errp) < 0) { return -1; } @@ -3979,12 +3968,12 @@ static int vnc_display_listen(VncDisplay *vd, vnc_listen_io, vd, NULL); } - if (nwsaddr) { + if (wsaddr_list) { vd->wslistener = qio_net_listener_new(); qio_net_listener_set_name(vd->wslistener, "vnc-ws-listen"); - for (i = 0; i < nwsaddr; i++) { + for (el = wsaddr_list; el; el = el->next) { if (qio_net_listener_open_sync(vd->wslistener, - wsaddr[i], 1, + el->value, 1, errp) < 0) { return -1; } @@ -3997,13 +3986,36 @@ static int vnc_display_listen(VncDisplay *vd, return 0; } +bool vnc_display_update(DisplayUpdateOptionsVNC *arg, Error **errp) +{ + VncDisplay *vd = vnc_display_find(NULL); + + if (!vd) { + error_setg(errp, "Can not find vnc display"); + return false; + } + + if (arg->has_addresses) { + if (vd->listener) { + qio_net_listener_disconnect(vd->listener); + object_unref(OBJECT(vd->listener)); + vd->listener = NULL; + } + + if (vnc_display_listen(vd, arg->addresses, NULL, errp) < 0) { + return false; + } + } + + return true; +} void vnc_display_open(const char *id, Error **errp) { VncDisplay *vd = vnc_display_find(id); QemuOpts *opts = qemu_opts_find(&qemu_vnc_opts, id); - SocketAddress **saddr = NULL, **wsaddr = NULL; - size_t nsaddr, nwsaddr; + g_autoptr(SocketAddressList) saddr_list = NULL; + g_autoptr(SocketAddressList) wsaddr_list = NULL; const char *share, *device_id; QemuConsole *con; bool password = false; @@ -4028,8 +4040,8 @@ void vnc_display_open(const char *id, Error **errp) } reverse = qemu_opt_get_bool(opts, "reverse", false); - if (vnc_display_get_addresses(opts, reverse, &saddr, &nsaddr, - &wsaddr, &nwsaddr, errp) < 0) { + if (vnc_display_get_addresses(opts, reverse, &saddr_list, &wsaddr_list, + errp) < 0) { goto fail; } @@ -4051,13 +4063,6 @@ void vnc_display_open(const char *id, Error **errp) password = qemu_opt_get_bool(opts, "password", false); } if (password) { - if (fips_get_state()) { - error_setg(errp, - "VNC password auth disabled due to FIPS mode, " - "consider using the VeNCrypt or SASL authentication " - "methods as an alternative"); - goto fail; - } if (!qcrypto_cipher_supports( QCRYPTO_CIPHER_ALG_DES, QCRYPTO_CIPHER_MODE_ECB)) { error_setg(errp, @@ -4211,16 +4216,16 @@ void vnc_display_open(const char *id, Error **errp) } qkbd_state_set_delay(vd->kbd, key_delay_ms); - if (saddr == NULL) { - goto cleanup; + if (saddr_list == NULL) { + return; } if (reverse) { - if (vnc_display_connect(vd, saddr, nsaddr, wsaddr, nwsaddr, errp) < 0) { + if (vnc_display_connect(vd, saddr_list, wsaddr_list, errp) < 0) { goto fail; } } else { - if (vnc_display_listen(vd, saddr, nsaddr, wsaddr, nwsaddr, errp) < 0) { + if (vnc_display_listen(vd, saddr_list, wsaddr_list, errp) < 0) { goto fail; } } @@ -4229,14 +4234,11 @@ void vnc_display_open(const char *id, Error **errp) vnc_display_print_local_addr(vd); } - cleanup: - vnc_free_addresses(&saddr, &nsaddr); - vnc_free_addresses(&wsaddr, &nwsaddr); + /* Success */ return; fail: vnc_display_close(vd); - goto cleanup; } void vnc_display_add_client(const char *id, int csock, bool skipauth) diff --git a/ui/vnc.h b/ui/vnc.h index a7149831f9..a60fb13115 100644 --- a/ui/vnc.h +++ b/ui/vnc.h @@ -201,7 +201,7 @@ typedef struct VncTight { #ifdef CONFIG_VNC_JPEG Buffer jpeg; #endif -#ifdef CONFIG_VNC_PNG +#ifdef CONFIG_PNG Buffer png; #endif int levels[4]; diff --git a/ui/vnc_keysym.h b/ui/vnc_keysym.h index e8a2ec73c5..016405e74b 100644 --- a/ui/vnc_keysym.h +++ b/ui/vnc_keysym.h @@ -102,7 +102,7 @@ static const name2keysym_t name2keysym[]={ /* latin 1 extensions */ { "nobreakspace", 0x0a0}, { "exclamdown", 0x0a1}, -{ "cent", 0x0a2}, +{ "cent", 0x0a2}, { "sterling", 0x0a3}, { "currency", 0x0a4}, { "yen", 0x0a5}, diff --git a/ui/xemu-input.c b/ui/xemu-input.c index 0db2932368..aa93283096 100644 --- a/ui/xemu-input.c +++ b/ui/xemu-input.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "qapi/error.h" diff --git a/ui/xemu-monitor.c b/ui/xemu-monitor.c index 4018d9e397..2f064d3d1d 100644 --- a/ui/xemu-monitor.c +++ b/ui/xemu-monitor.c @@ -23,7 +23,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" #include "sysemu/sysemu.h" diff --git a/ui/xemu-net.c b/ui/xemu-net.c index 9d2bdce3f8..d7dbd5de11 100644 --- a/ui/xemu-net.c +++ b/ui/xemu-net.c @@ -23,7 +23,6 @@ #include "xemu-settings.h" #include "qemu/osdep.h" -#include "qemu-common.h" #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "qapi/error.h" diff --git a/ui/xemu-snapshots.c b/ui/xemu-snapshots.c index 5a695257c1..25f227e6fb 100644 --- a/ui/xemu-snapshots.c +++ b/ui/xemu-snapshots.c @@ -33,7 +33,6 @@ #include "qapi/error.h" #include "qapi/qapi-commands-block.h" #include "sysemu/runstate.h" -#include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" diff --git a/ui/xemu.c b/ui/xemu.c index 261dfd93f2..b06e19eb1c 100644 --- a/ui/xemu.c +++ b/ui/xemu.c @@ -33,7 +33,7 @@ #include "qemu/main-loop.h" #include "qemu/rcu.h" #include "qemu-version.h" -#include "qemu-common.h" +#include "qemu-main.h" #include "qapi/error.h" #include "qapi/qapi-commands-block.h" #include "qapi/qmp/qdict.h" @@ -100,6 +100,8 @@ static int sdl2_num_outputs; static struct sdl2_console *sdl2_console; static SDL_Surface *guest_sprite_surface; static int gui_grab; /* if true, all keyboard/mouse events are grabbed */ +static bool alt_grab; +static bool ctrl_grab; static int gui_saved_grab; static int gui_fullscreen; static int gui_grab_code = KMOD_LALT | KMOD_LCTRL; @@ -595,7 +597,7 @@ static void handle_windowevent(SDL_Event *ev) memset(&info, 0, sizeof(info)); info.width = ev->window.data1; info.height = ev->window.data2; - dpy_set_ui_info(scon->dcl.con, &info); + dpy_set_ui_info(scon->dcl.con, &info, true); if (!gui_fullscreen) { g_config.display.window.last_width = ev->window.data1; @@ -792,13 +794,8 @@ static const DisplayChangeListenerOps dcl_gl_ops = { .dpy_gfx_update = sdl2_gl_update, .dpy_gfx_switch = sdl2_gl_switch, .dpy_gfx_check_format = xb_console_gl_check_format, - // .dpy_refresh = sdl2_gl_refresh, .dpy_mouse_set = sdl_mouse_warp, .dpy_cursor_define = sdl_mouse_define, - - .dpy_gl_ctx_create = sdl2_gl_create_context, - .dpy_gl_ctx_destroy = sdl2_gl_destroy_context, - .dpy_gl_ctx_make_current = sdl2_gl_make_context_current, .dpy_gl_scanout_disable = sdl2_gl_scanout_disable, .dpy_gl_scanout_texture = sdl2_gl_scanout_texture, .dpy_gl_update = sdl2_gl_scanout_flush, @@ -1436,14 +1433,14 @@ int gArgc; char **gArgv; // vl.c -int qemu_main(int argc, char **argv, char **envp); static void *call_qemu_main(void *opaque) { int status; DPRINTF("Second thread: calling qemu_main()\n"); - status = qemu_main(gArgc, gArgv, NULL); + qemu_init(gArgc, gArgv); + status = qemu_main(); DPRINTF("Second thread: qemu_main() returned, exiting\n"); exit(status); } @@ -1582,7 +1579,7 @@ void xemu_load_disc(const char *path, Error **errp) xemu_settings_set_string(&g_config.sys.files.dvd_path, ""); qmp_blockdev_change_medium(true, "ide0-cd1", false, NULL, path, - false, "", false, 0, + false, "", false, false, false, 0, &error); if (error) { error_propagate(errp, error); diff --git a/ui/xui/common.hh b/ui/xui/common.hh index 13240c84e4..118d8b8542 100644 --- a/ui/xui/common.hh +++ b/ui/xui/common.hh @@ -36,7 +36,6 @@ extern "C" { // Include necessary QEMU headers #include "qemu/osdep.h" -#include "qemu-common.h" #include "qapi/error.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" diff --git a/ui/xui/update.hh b/ui/xui/update.hh index f77336b04d..cdb93f19ea 100644 --- a/ui/xui/update.hh +++ b/ui/xui/update.hh @@ -24,7 +24,6 @@ extern "C" { #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/thread.h" } diff --git a/ui/xui/xemu-hud.h b/ui/xui/xemu-hud.h index aa6e3d9acf..81e82aff18 100644 --- a/ui/xui/xemu-hud.h +++ b/ui/xui/xemu-hud.h @@ -24,6 +24,7 @@ #define XEMU_HUD_H #include +#include #ifdef __cplusplus extern "C" { diff --git a/util/aio-posix.c b/util/aio-posix.c index 2b86777e91..731f3826c0 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -15,6 +15,7 @@ #include "qemu/osdep.h" #include "block/block.h" +#include "block/thread-pool.h" #include "qemu/main-loop.h" #include "qemu/rcu.h" #include "qemu/rcu_queue.h" @@ -40,6 +41,14 @@ void aio_add_ready_handler(AioHandlerList *ready_list, QLIST_INSERT_HEAD(ready_list, node, node_ready); } +static void aio_add_poll_ready_handler(AioHandlerList *ready_list, + AioHandler *node) +{ + QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */ + node->poll_ready = true; + QLIST_INSERT_HEAD(ready_list, node, node_ready); +} + static AioHandler *find_aio_handler(AioContext *ctx, int fd) { AioHandler *node; @@ -67,6 +76,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) } node->pfd.revents = 0; + node->poll_ready = false; /* If the fd monitor has already marked it deleted, leave it alone */ if (QLIST_IS_INSERTED(node, node_deleted)) { @@ -93,6 +103,7 @@ void aio_set_fd_handler(AioContext *ctx, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, + IOHandler *io_poll_ready, void *opaque) { AioHandler *node; @@ -101,6 +112,10 @@ void aio_set_fd_handler(AioContext *ctx, bool deleted = false; int poll_disable_change; + if (io_poll && !io_poll_ready) { + io_poll = NULL; /* polling only makes sense if there is a handler */ + } + qemu_lockcnt_lock(&ctx->list_lock); node = find_aio_handler(ctx, fd); @@ -127,6 +142,7 @@ void aio_set_fd_handler(AioContext *ctx, new_node->io_read = io_read; new_node->io_write = io_write; new_node->io_poll = io_poll; + new_node->io_poll_ready = io_poll_ready; new_node->opaque = opaque; new_node->is_external = is_external; @@ -182,10 +198,12 @@ void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, bool is_external, EventNotifierHandler *io_read, - AioPollFn *io_poll) + AioPollFn *io_poll, + EventNotifierHandler *io_poll_ready) { aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, - (IOHandler *)io_read, NULL, io_poll, notifier); + (IOHandler *)io_read, NULL, io_poll, + (IOHandler *)io_poll_ready, notifier); } void aio_set_event_notifier_poll(AioContext *ctx, @@ -198,7 +216,8 @@ void aio_set_event_notifier_poll(AioContext *ctx, (IOHandler *)io_poll_end); } -static bool poll_set_started(AioContext *ctx, bool started) +static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list, + bool started) { AioHandler *node; bool progress = false; @@ -228,8 +247,9 @@ static bool poll_set_started(AioContext *ctx, bool started) } /* Poll one last time in case ->io_poll_end() raced with the event */ - if (!started) { - progress = node->io_poll(node->opaque) || progress; + if (!started && node->io_poll(node->opaque)) { + aio_add_poll_ready_handler(ready_list, node); + progress = true; } } qemu_lockcnt_dec(&ctx->list_lock); @@ -240,8 +260,11 @@ static bool poll_set_started(AioContext *ctx, bool started) bool aio_prepare(AioContext *ctx) { + AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list); + /* Poll mode cannot be used with glib's event loop, disable it. */ - poll_set_started(ctx, false); + poll_set_started(ctx, &ready_list, false); + /* TODO what to do with this list? */ return false; } @@ -260,6 +283,7 @@ bool aio_pending(AioContext *ctx) QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { int revents; + /* TODO should this check poll ready? */ revents = node->pfd.revents & node->pfd.events; if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && aio_node_check(ctx, node->is_external)) { @@ -301,11 +325,15 @@ static void aio_free_deleted_handlers(AioContext *ctx) static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) { bool progress = false; + bool poll_ready; int revents; revents = node->pfd.revents & node->pfd.events; node->pfd.revents = 0; + poll_ready = node->poll_ready; + node->poll_ready = false; + /* * Start polling AioHandlers when they become ready because activity is * likely to continue. Note that starvation is theoretically possible when @@ -321,6 +349,18 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) } QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); } + if (!QLIST_IS_INSERTED(node, node_deleted) && + poll_ready && revents == 0 && + aio_node_check(ctx, node->is_external) && + node->io_poll_ready) { + node->io_poll_ready(node->opaque); + + /* + * Return early since revents was zero. aio_notify() does not count as + * progress. + */ + return node->opaque != &ctx->notifier; + } if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && @@ -387,6 +427,7 @@ void aio_dispatch(AioContext *ctx) } static bool run_poll_handlers_once(AioContext *ctx, + AioHandlerList *ready_list, int64_t now, int64_t *timeout) { @@ -397,6 +438,8 @@ static bool run_poll_handlers_once(AioContext *ctx, QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { if (aio_node_check(ctx, node->is_external) && node->io_poll(node->opaque)) { + aio_add_poll_ready_handler(ready_list, node); + node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; /* @@ -420,7 +463,9 @@ static bool fdmon_supports_polling(AioContext *ctx) return ctx->fdmon_ops->need_wait != aio_poll_disabled; } -static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) +static bool remove_idle_poll_handlers(AioContext *ctx, + AioHandlerList *ready_list, + int64_t now) { AioHandler *node; AioHandler *tmp; @@ -451,7 +496,10 @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) * Nevermind about re-adding the handler in the rare case where * this causes progress. */ - progress = node->io_poll(node->opaque) || progress; + if (node->io_poll(node->opaque)) { + aio_add_poll_ready_handler(ready_list, node); + progress = true; + } } } } @@ -461,6 +509,7 @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) /* run_poll_handlers: * @ctx: the AioContext + * @ready_list: the list to place ready handlers on * @max_ns: maximum time to poll for, in nanoseconds * * Polls for a given time. @@ -469,7 +518,8 @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) * * Returns: true if progress was made, false otherwise */ -static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) +static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list, + int64_t max_ns, int64_t *timeout) { bool progress; int64_t start_time, elapsed_time; @@ -490,13 +540,15 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); do { - progress = run_poll_handlers_once(ctx, start_time, timeout); + progress = run_poll_handlers_once(ctx, ready_list, + start_time, timeout); elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time; max_ns = qemu_soonest_timeout(*timeout, max_ns); assert(!(max_ns && progress)); } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx)); - if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) { + if (remove_idle_poll_handlers(ctx, ready_list, + start_time + elapsed_time)) { *timeout = 0; progress = true; } @@ -514,6 +566,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) /* try_poll_mode: * @ctx: the AioContext + * @ready_list: list to add handlers that need to be run * @timeout: timeout for blocking wait, computed by the caller and updated if * polling succeeds. * @@ -521,7 +574,8 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) * * Returns: true if progress was made, false otherwise */ -static bool try_poll_mode(AioContext *ctx, int64_t *timeout) +static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list, + int64_t *timeout) { int64_t max_ns; @@ -531,14 +585,14 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) { - poll_set_started(ctx, true); + poll_set_started(ctx, ready_list, true); - if (run_poll_handlers(ctx, max_ns, timeout)) { + if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) { return true; } } - if (poll_set_started(ctx, false)) { + if (poll_set_started(ctx, ready_list, false)) { *timeout = 0; return true; } @@ -549,7 +603,6 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) bool aio_poll(AioContext *ctx, bool blocking) { AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list); - int ret = 0; bool progress; bool use_notify_me; int64_t timeout; @@ -574,7 +627,7 @@ bool aio_poll(AioContext *ctx, bool blocking) } timeout = blocking ? aio_compute_timeout(ctx) : 0; - progress = try_poll_mode(ctx, &timeout); + progress = try_poll_mode(ctx, &ready_list, &timeout); assert(!(timeout && progress)); /* @@ -604,7 +657,7 @@ bool aio_poll(AioContext *ctx, bool blocking) * system call---a single round of run_poll_handlers_once suffices. */ if (timeout || ctx->fdmon_ops->need_wait(ctx)) { - ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout); + ctx->fdmon_ops->wait(ctx, &ready_list, timeout); } if (use_notify_me) { @@ -657,10 +710,7 @@ bool aio_poll(AioContext *ctx, bool blocking) } progress |= aio_bh_poll(ctx); - - if (ret > 0) { - progress |= aio_dispatch_ready_handlers(ctx, &ready_list); - } + progress |= aio_dispatch_ready_handlers(ctx, &ready_list); aio_free_deleted_handlers(ctx); diff --git a/util/aio-posix.h b/util/aio-posix.h index c80c04506a..80b927c7f4 100644 --- a/util/aio-posix.h +++ b/util/aio-posix.h @@ -24,6 +24,7 @@ struct AioHandler { IOHandler *io_read; IOHandler *io_write; AioPollFn *io_poll; + IOHandler *io_poll_ready; IOHandler *io_poll_begin; IOHandler *io_poll_end; void *opaque; @@ -36,6 +37,7 @@ struct AioHandler { unsigned flags; /* see fdmon-io_uring.c */ #endif int64_t poll_idle_timeout; /* when to stop userspace polling */ + bool poll_ready; /* has polling detected an event? */ bool is_external; }; diff --git a/util/aio-wait.c b/util/aio-wait.c index bdb3d3af22..98c5accd29 100644 --- a/util/aio-wait.c +++ b/util/aio-wait.c @@ -35,7 +35,21 @@ static void dummy_bh_cb(void *opaque) void aio_wait_kick(void) { - /* The barrier (or an atomic op) is in the caller. */ + /* + * Paired with smp_mb in AIO_WAIT_WHILE. Here we have: + * write(condition); + * aio_wait_kick() { + * smp_mb(); + * read(num_waiters); + * } + * + * And in AIO_WAIT_WHILE: + * write(num_waiters); + * smp_mb(); + * read(condition); + */ + smp_mb(); + if (qatomic_read(&global_aio_wait.num_waiters)) { aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL); } diff --git a/util/aio-win32.c b/util/aio-win32.c index d5b09a1193..80cfe012ad 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -16,7 +16,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "block/block.h" #include "qemu/main-loop.h" #include "qemu/queue.h" @@ -68,6 +67,7 @@ void aio_set_fd_handler(AioContext *ctx, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, + IOHandler *io_poll_ready, void *opaque) { /* fd is a SOCKET in our case */ @@ -136,7 +136,8 @@ void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, bool is_external, EventNotifierHandler *io_notify, - AioPollFn *io_poll) + AioPollFn *io_poll, + EventNotifierHandler *io_poll_ready) { AioHandler *node; @@ -325,9 +326,9 @@ void aio_dispatch(AioContext *ctx) bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; - HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; + HANDLE events[MAXIMUM_WAIT_OBJECTS]; bool progress, have_select_revents, first; - int count; + unsigned count; int timeout; /* @@ -368,6 +369,7 @@ bool aio_poll(AioContext *ctx, bool blocking) QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { if (!node->deleted && node->io_notify && aio_node_check(ctx, node->is_external)) { + assert(count < MAXIMUM_WAIT_OBJECTS); events[count++] = event_notifier_get_handle(node->e); } } diff --git a/util/async-teardown.c b/util/async-teardown.c new file mode 100644 index 0000000000..62bfce1b3c --- /dev/null +++ b/util/async-teardown.c @@ -0,0 +1,150 @@ +/* + * Asynchronous teardown + * + * Copyright IBM, Corp. 2022 + * + * Authors: + * Claudio Imbrenda + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qemu/osdep.h" +#include "qemu/async-teardown.h" + +#ifdef _SC_THREAD_STACK_MIN +#define CLONE_STACK_SIZE sysconf(_SC_THREAD_STACK_MIN) +#else +#define CLONE_STACK_SIZE 16384 +#endif + +static pid_t the_ppid; + +/* + * Close all open file descriptors. + */ +static void close_all_open_fd(void) +{ + struct dirent *de; + int fd, dfd; + DIR *dir; + +#ifdef CONFIG_CLOSE_RANGE + int r = close_range(0, ~0U, 0); + if (!r) { + /* Success, no need to try other ways. */ + return; + } +#endif + + dir = opendir("/proc/self/fd"); + if (!dir) { + /* If /proc is not mounted, there is nothing that can be done. */ + return; + } + /* Avoid closing the directory. */ + dfd = dirfd(dir); + + for (de = readdir(dir); de; de = readdir(dir)) { + fd = atoi(de->d_name); + if (fd != dfd) { + close(fd); + } + } + closedir(dir); +} + +static void hup_handler(int signal) +{ + /* Check every second if this process has been reparented. */ + while (the_ppid == getppid()) { + /* sleep() is safe to use in a signal handler. */ + sleep(1); + } + + /* At this point the parent process has terminated completely. */ + _exit(0); +} + +static int async_teardown_fn(void *arg) +{ + struct sigaction sa = { .sa_handler = hup_handler }; + sigset_t hup_signal; + char name[16]; + + /* Set a meaningful name for this process. */ + snprintf(name, 16, "cleanup/%d", the_ppid); + prctl(PR_SET_NAME, (unsigned long)name); + + /* + * Close all file descriptors that might have been inherited from the + * main qemu process when doing clone, needed to make libvirt happy. + * Not using close_range for increased compatibility with older kernels. + */ + close_all_open_fd(); + + /* Set up a handler for SIGHUP and unblock SIGHUP. */ + sigaction(SIGHUP, &sa, NULL); + sigemptyset(&hup_signal); + sigaddset(&hup_signal, SIGHUP); + sigprocmask(SIG_UNBLOCK, &hup_signal, NULL); + + /* Ask to receive SIGHUP when the parent dies. */ + prctl(PR_SET_PDEATHSIG, SIGHUP); + + /* + * Sleep forever, unless the parent process has already terminated. The + * only interruption can come from the SIGHUP signal, which in normal + * operation is received when the parent process dies. + */ + if (the_ppid == getppid()) { + pause(); + } + + /* At this point the parent process has terminated completely. */ + _exit(0); +} + +/* + * Allocate a new stack of a reasonable size, and return a pointer to its top. + */ +static void *new_stack_for_clone(void) +{ + size_t stack_size = CLONE_STACK_SIZE; + char *stack_ptr; + + /* Allocate a new stack and get a pointer to its top. */ + stack_ptr = qemu_alloc_stack(&stack_size); +#if !defined(HOST_HPPA) + /* The top is at the end of the area, except on HPPA. */ + stack_ptr += stack_size; +#endif + + return stack_ptr; +} + +/* + * Block all signals, start (clone) a new process sharing the address space + * with qemu (CLONE_VM), then restore signals. + */ +void init_async_teardown(void) +{ + sigset_t all_signals, old_signals; + + the_ppid = getpid(); + + sigfillset(&all_signals); + sigprocmask(SIG_BLOCK, &all_signals, &old_signals); + clone(async_teardown_fn, new_stack_for_clone(), CLONE_VM, NULL); + sigprocmask(SIG_SETMASK, &old_signals, NULL); +} diff --git a/util/async.c b/util/async.c index 6f6717a34b..f449c3444e 100644 --- a/util/async.c +++ b/util/async.c @@ -32,6 +32,8 @@ #include "qemu/rcu_queue.h" #include "block/raw-aio.h" #include "qemu/coroutine_int.h" +#include "qemu/coroutine-tls.h" +#include "sysemu/cpu-timers.h" #include "trace.h" /***********************************************************/ @@ -83,6 +85,13 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) } aio_notify(ctx); + /* + * Workaround for record/replay. + * vCPU execution should be suspended when new BH is set. + * This is needed to avoid guest timeouts caused + * by the long cycles of the execution. + */ + icount_notify_exit(); } /* Only called from aio_bh_poll() and aio_ctx_finalize() */ @@ -149,7 +158,21 @@ int aio_bh_poll(AioContext *ctx) int ret = 0; QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); + + /* + * GCC13 [-Werror=dangling-pointer=] complains that the local variable + * 'slice' is being stored in the global 'ctx->bh_slice_list' but the + * list is emptied before this function returns. + */ +#if !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); +#if !defined(__clang__) +#pragma GCC diagnostic pop +#endif while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { QEMUBH *bh; @@ -362,7 +385,7 @@ aio_ctx_finalize(GSource *source) g_free(bh); } - aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL); + aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_rec_mutex_destroy(&ctx->lock); qemu_lockcnt_destroy(&ctx->list_lock); @@ -485,6 +508,11 @@ static bool aio_context_notifier_poll(void *opaque) return qatomic_read(&ctx->notified); } +static void aio_context_notifier_poll_ready(EventNotifier *e) +{ + /* Do nothing, we just wanted to kick the event loop */ +} + static void co_schedule_bh_cb(void *opaque) { AioContext *ctx = opaque; @@ -536,7 +564,8 @@ AioContext *aio_context_new(Error **errp) aio_set_event_notifier(ctx, &ctx->notifier, false, aio_context_notifier_cb, - aio_context_notifier_poll); + aio_context_notifier_poll, + aio_context_notifier_poll_ready); #ifdef CONFIG_LINUX_AIO ctx->linux_aio = NULL; #endif @@ -556,6 +585,9 @@ AioContext *aio_context_new(Error **errp) ctx->aio_max_batch = 0; + ctx->thread_pool_min = 0; + ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; + return ctx; fail: g_source_destroy(&ctx->source); @@ -669,12 +701,13 @@ void aio_context_release(AioContext *ctx) qemu_rec_mutex_unlock(&ctx->lock); } -static __thread AioContext *my_aiocontext; +QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) AioContext *qemu_get_current_aio_context(void) { - if (my_aiocontext) { - return my_aiocontext; + AioContext *ctx = get_my_aiocontext(); + if (ctx) { + return ctx; } if (qemu_mutex_iothread_locked()) { /* Possibly in a vCPU thread. */ @@ -685,6 +718,23 @@ AioContext *qemu_get_current_aio_context(void) void qemu_set_current_aio_context(AioContext *ctx) { - assert(!my_aiocontext); - my_aiocontext = ctx; + assert(!get_my_aiocontext()); + set_my_aiocontext(ctx); +} + +void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, + int64_t max, Error **errp) +{ + + if (min > max || !max || min > INT_MAX || max > INT_MAX) { + error_setg(errp, "bad thread-pool-min/thread-pool-max values"); + return; + } + + ctx->thread_pool_min = min; + ctx->thread_pool_max = max; + + if (ctx->thread_pool) { + thread_pool_update_params(ctx->thread_pool, ctx); + } } diff --git a/util/atomic64.c b/util/atomic64.c index 93037d5b11..c20d071d8e 100644 --- a/util/atomic64.c +++ b/util/atomic64.c @@ -7,6 +7,8 @@ #include "qemu/osdep.h" #include "qemu/atomic.h" #include "qemu/thread.h" +#include "qemu/cacheinfo.h" +#include "qemu/memalign.h" #ifdef CONFIG_ATOMIC64 #error This file must only be compiled if !CONFIG_ATOMIC64 diff --git a/util/bitmap.c b/util/bitmap.c index 1f201393ae..8d12e90a5a 100644 --- a/util/bitmap.c +++ b/util/bitmap.c @@ -240,6 +240,51 @@ void bitmap_clear(unsigned long *map, long start, long nr) } } +bool bitmap_test_and_clear(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + bool dirty = false; + + assert(start >= 0 && nr >= 0); + + /* First word */ + if (nr - bits_to_clear > 0) { + if ((*p) & mask_to_clear) { + dirty = true; + } + *p &= ~mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + p++; + } + + /* Full words */ + if (bits_to_clear == BITS_PER_LONG) { + while (nr >= BITS_PER_LONG) { + if (*p) { + dirty = true; + *p = 0; + } + nr -= BITS_PER_LONG; + p++; + } + } + + /* Last word */ + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + if ((*p) & mask_to_clear) { + dirty = true; + } + *p &= ~mask_to_clear; + } + + return dirty; +} + bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); @@ -376,7 +421,7 @@ static void bitmap_to_from_le(unsigned long *dst, { long len = BITS_TO_LONGS(nbits); -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN long index; for (index = 0; index < len; index++) { diff --git a/util/bufferiszero.c b/util/bufferiszero.c index 695bb4ce28..ec3cd4ca15 100644 --- a/util/bufferiszero.c +++ b/util/bufferiszero.c @@ -272,7 +272,7 @@ static void init_accel(unsigned cache) static void __attribute__((constructor)) init_cpuid_cache(void) { - int max = __get_cpuid_max(0, NULL); + unsigned max = __get_cpuid_max(0, NULL); int a, b, c, d; unsigned cache = 0; diff --git a/util/cacheflush.c b/util/cacheflush.c index 933355b0c9..2c2c73e085 100644 --- a/util/cacheflush.c +++ b/util/cacheflush.c @@ -1,5 +1,5 @@ /* - * Flush the host cpu caches. + * Info about, and flushing the host cpu caches. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -7,9 +7,220 @@ #include "qemu/osdep.h" #include "qemu/cacheflush.h" +#include "qemu/cacheinfo.h" #include "qemu/bitops.h" +#include "qemu/host-utils.h" +#include "qemu/atomic.h" +int qemu_icache_linesize = 0; +int qemu_icache_linesize_log; +int qemu_dcache_linesize = 0; +int qemu_dcache_linesize_log; + +/* + * Operating system specific cache detection mechanisms. + */ + +#if defined(_WIN32) + +static void sys_cache_info(int *isize, int *dsize) +{ + SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf; + DWORD size = 0; + BOOL success; + size_t i, n; + + /* + * Check for the required buffer size first. Note that if the zero + * size we use for the probe results in success, then there is no + * data available; fail in that case. + */ + success = GetLogicalProcessorInformation(0, &size); + if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + return; + } + + n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n); + if (!GetLogicalProcessorInformation(buf, &size)) { + goto fail; + } + + for (i = 0; i < n; i++) { + if (buf[i].Relationship == RelationCache + && buf[i].Cache.Level == 1) { + switch (buf[i].Cache.Type) { + case CacheUnified: + *isize = *dsize = buf[i].Cache.LineSize; + break; + case CacheInstruction: + *isize = buf[i].Cache.LineSize; + break; + case CacheData: + *dsize = buf[i].Cache.LineSize; + break; + default: + break; + } + } + } + fail: + g_free(buf); +} + +#elif defined(CONFIG_DARWIN) +# include +static void sys_cache_info(int *isize, int *dsize) +{ + /* There's only a single sysctl for both I/D cache line sizes. */ + long size; + size_t len = sizeof(size); + if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) { + *isize = *dsize = size; + } +} +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +# include +static void sys_cache_info(int *isize, int *dsize) +{ + /* There's only a single sysctl for both I/D cache line sizes. */ + int size; + size_t len = sizeof(size); + if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) { + *isize = *dsize = size; + } +} +#else +/* POSIX */ + +static void sys_cache_info(int *isize, int *dsize) +{ +# ifdef _SC_LEVEL1_ICACHE_LINESIZE + int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); + if (tmp_isize > 0) { + *isize = tmp_isize; + } +# endif +# ifdef _SC_LEVEL1_DCACHE_LINESIZE + int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); + if (tmp_dsize > 0) { + *dsize = tmp_dsize; + } +# endif +} +#endif /* sys_cache_info */ + + +/* + * Architecture (+ OS) specific cache detection mechanisms. + */ + +#if defined(__powerpc__) +static bool have_coherent_icache; +#endif + +#if defined(__aarch64__) && !defined(CONFIG_DARWIN) +/* Apple does not expose CTR_EL0, so we must use system interfaces. */ +static uint64_t save_ctr_el0; +static void arch_cache_info(int *isize, int *dsize) +{ + uint64_t ctr; + + /* + * The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1, + * but (at least under Linux) these are marked protected by the + * kernel. However, CTR_EL0 contains the minimum linesize in the + * entire hierarchy, and is used by userspace cache flushing. + * + * We will also use this value in flush_idcache_range. + */ + asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr)); + save_ctr_el0 = ctr; + + if (*isize == 0 || *dsize == 0) { + if (*isize == 0) { + *isize = 4 << (ctr & 0xf); + } + if (*dsize == 0) { + *dsize = 4 << ((ctr >> 16) & 0xf); + } + } +} + +#elif defined(_ARCH_PPC) && defined(__linux__) +# include "elf.h" + +static void arch_cache_info(int *isize, int *dsize) +{ + if (*isize == 0) { + *isize = qemu_getauxval(AT_ICACHEBSIZE); + } + if (*dsize == 0) { + *dsize = qemu_getauxval(AT_DCACHEBSIZE); + } + have_coherent_icache = qemu_getauxval(AT_HWCAP) & PPC_FEATURE_ICACHE_SNOOP; +} + +#else +static void arch_cache_info(int *isize, int *dsize) { } +#endif /* arch_cache_info */ + +/* + * ... and if all else fails ... + */ + +static void fallback_cache_info(int *isize, int *dsize) +{ + /* If we can only find one of the two, assume they're the same. */ + if (*isize) { + if (*dsize) { + /* Success! */ + } else { + *dsize = *isize; + } + } else if (*dsize) { + *isize = *dsize; + } else { +#if defined(_ARCH_PPC) + /* + * For PPC, we're going to use the cache sizes computed for + * flush_idcache_range. Which means that we must use the + * architecture minimum. + */ + *isize = *dsize = 16; +#else + /* Otherwise, 64 bytes is not uncommon. */ + *isize = *dsize = 64; +#endif + } +} + +static void __attribute__((constructor)) init_cache_info(void) +{ + int isize = 0, dsize = 0; + + sys_cache_info(&isize, &dsize); + arch_cache_info(&isize, &dsize); + fallback_cache_info(&isize, &dsize); + + assert((isize & (isize - 1)) == 0); + assert((dsize & (dsize - 1)) == 0); + + qemu_icache_linesize = isize; + qemu_icache_linesize_log = ctz32(isize); + qemu_dcache_linesize = dsize; + qemu_dcache_linesize_log = ctz32(dsize); + + qatomic64_init(); +} + + +/* + * Architecture (+ OS) specific cache flushing mechanisms. + */ + #if defined(__i386__) || defined(__x86_64__) || defined(__s390__) /* Caches are coherent and do not require flushing; symbol inline. */ @@ -27,17 +238,6 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) } #else -/* - * TODO: unify this with cacheinfo.c. - * We want to save the whole contents of CTR_EL0, so that we - * have more than the linesize, but also IDC and DIC. - */ -static uint64_t save_ctr_el0; -static void __attribute__((constructor)) init_ctr_el0(void) -{ - asm volatile("mrs\t%0, ctr_el0" : "=r"(save_ctr_el0)); -} - /* * This is a copy of gcc's __aarch64_sync_cache_range, modified * to fit this three-operand interface. @@ -47,8 +247,8 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) const unsigned CTR_IDC = 1u << 28; const unsigned CTR_DIC = 1u << 29; const uint64_t ctr_el0 = save_ctr_el0; - const uintptr_t icache_lsize = 4 << extract64(ctr_el0, 0, 4); - const uintptr_t dcache_lsize = 4 << extract64(ctr_el0, 16, 4); + const uintptr_t icache_lsize = qemu_icache_linesize; + const uintptr_t dcache_lsize = qemu_dcache_linesize; uintptr_t p; /* @@ -103,8 +303,24 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) { uintptr_t p, b, e; - size_t dsize = qemu_dcache_linesize; - size_t isize = qemu_icache_linesize; + size_t dsize, isize; + + /* + * Some processors have coherent caches and support a simplified + * flushing procedure. See + * POWER9 UM, 4.6.2.2 Instruction Cache Block Invalidate (icbi) + * https://ibm.ent.box.com/s/tmklq90ze7aj8f4n32er1mu3sy9u8k3k + */ + if (have_coherent_icache) { + asm volatile ("sync\n\t" + "icbi 0,%0\n\t" + "isync" + : : "r"(rx) : "memory"); + return; + } + + dsize = qemu_dcache_linesize; + isize = qemu_icache_linesize; b = rw & ~(dsize - 1); e = (rw + len + dsize - 1) & ~(dsize - 1); diff --git a/util/cacheinfo.c b/util/cacheinfo.c deleted file mode 100644 index b182f0b693..0000000000 --- a/util/cacheinfo.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * cacheinfo.c - helpers to query the host about its caches - * - * Copyright (C) 2017, Emilio G. Cota - * License: GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include "qemu/host-utils.h" -#include "qemu/atomic.h" - -int qemu_icache_linesize = 0; -int qemu_icache_linesize_log; -int qemu_dcache_linesize = 0; -int qemu_dcache_linesize_log; - -/* - * Operating system specific detection mechanisms. - */ - -#if defined(_WIN32) - -static void sys_cache_info(int *isize, int *dsize) -{ - SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf; - DWORD size = 0; - BOOL success; - size_t i, n; - - /* Check for the required buffer size first. Note that if the zero - size we use for the probe results in success, then there is no - data available; fail in that case. */ - success = GetLogicalProcessorInformation(0, &size); - if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { - return; - } - - n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); - size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); - buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n); - if (!GetLogicalProcessorInformation(buf, &size)) { - goto fail; - } - - for (i = 0; i < n; i++) { - if (buf[i].Relationship == RelationCache - && buf[i].Cache.Level == 1) { - switch (buf[i].Cache.Type) { - case CacheUnified: - *isize = *dsize = buf[i].Cache.LineSize; - break; - case CacheInstruction: - *isize = buf[i].Cache.LineSize; - break; - case CacheData: - *dsize = buf[i].Cache.LineSize; - break; - default: - break; - } - } - } - fail: - g_free(buf); -} - -#elif defined(__APPLE__) -# include -static void sys_cache_info(int *isize, int *dsize) -{ - /* There's only a single sysctl for both I/D cache line sizes. */ - long size; - size_t len = sizeof(size); - if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) { - *isize = *dsize = size; - } -} -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -# include -static void sys_cache_info(int *isize, int *dsize) -{ - /* There's only a single sysctl for both I/D cache line sizes. */ - int size; - size_t len = sizeof(size); - if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) { - *isize = *dsize = size; - } -} -#else -/* POSIX */ - -static void sys_cache_info(int *isize, int *dsize) -{ -# ifdef _SC_LEVEL1_ICACHE_LINESIZE - int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); - if (tmp_isize > 0) { - *isize = tmp_isize; - } -# endif -# ifdef _SC_LEVEL1_DCACHE_LINESIZE - int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); - if (tmp_dsize > 0) { - *dsize = tmp_dsize; - } -# endif -} -#endif /* sys_cache_info */ - -/* - * Architecture (+ OS) specific detection mechanisms. - */ - -#if defined(__aarch64__) - -static void arch_cache_info(int *isize, int *dsize) -{ - if (*isize == 0 || *dsize == 0) { - uint64_t ctr; - - /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1, - but (at least under Linux) these are marked protected by the - kernel. However, CTR_EL0 contains the minimum linesize in the - entire hierarchy, and is used by userspace cache flushing. */ - asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr)); - if (*isize == 0) { - *isize = 4 << (ctr & 0xf); - } - if (*dsize == 0) { - *dsize = 4 << ((ctr >> 16) & 0xf); - } - } -} - -#elif defined(_ARCH_PPC) && defined(__linux__) -# include "elf.h" - -static void arch_cache_info(int *isize, int *dsize) -{ - if (*isize == 0) { - *isize = qemu_getauxval(AT_ICACHEBSIZE); - } - if (*dsize == 0) { - *dsize = qemu_getauxval(AT_DCACHEBSIZE); - } -} - -#else -static void arch_cache_info(int *isize, int *dsize) { } -#endif /* arch_cache_info */ - -/* - * ... and if all else fails ... - */ - -static void fallback_cache_info(int *isize, int *dsize) -{ - /* If we can only find one of the two, assume they're the same. */ - if (*isize) { - if (*dsize) { - /* Success! */ - } else { - *dsize = *isize; - } - } else if (*dsize) { - *isize = *dsize; - } else { -#if defined(_ARCH_PPC) - /* - * For PPC, we're going to use the cache sizes computed for - * flush_idcache_range. Which means that we must use the - * architecture minimum. - */ - *isize = *dsize = 16; -#else - /* Otherwise, 64 bytes is not uncommon. */ - *isize = *dsize = 64; -#endif - } -} - -static void __attribute__((constructor)) init_cache_info(void) -{ - int isize = 0, dsize = 0; - - sys_cache_info(&isize, &dsize); - arch_cache_info(&isize, &dsize); - fallback_cache_info(&isize, &dsize); - - assert((isize & (isize - 1)) == 0); - assert((dsize & (dsize - 1)) == 0); - - qemu_icache_linesize = isize; - qemu_icache_linesize_log = ctz32(isize); - qemu_dcache_linesize = dsize; - qemu_dcache_linesize_log = ctz32(dsize); - - qatomic64_init(); -} diff --git a/util/compatfd.c b/util/compatfd.c index a8ec525c6c..147e39e2c6 100644 --- a/util/compatfd.c +++ b/util/compatfd.c @@ -17,7 +17,7 @@ #include "qemu/thread.h" #if defined(CONFIG_SIGNALFD) -#include +#include #endif struct sigfd_compat_info { @@ -42,25 +42,11 @@ static void *sigwait_compat(void *opaque) } } else { struct qemu_signalfd_siginfo buffer; - size_t offset = 0; - memset(&buffer, 0, sizeof(buffer)); buffer.ssi_signo = sig; - while (offset < sizeof(buffer)) { - ssize_t len; - - len = write(info->fd, (char *)&buffer + offset, - sizeof(buffer) - offset); - if (len == -1 && errno == EINTR) { - continue; - } - - if (len <= 0) { - return NULL; - } - - offset += len; + if (qemu_write_full(info->fd, &buffer, sizeof(buffer)) != sizeof(buffer)) { + return NULL; } } } @@ -74,14 +60,11 @@ static int qemu_signalfd_compat(const sigset_t *mask) info = g_malloc(sizeof(*info)); - if (pipe(fds) == -1) { + if (!g_unix_open_pipe(fds, FD_CLOEXEC, NULL)) { g_free(info); return -1; } - qemu_set_cloexec(fds[0]); - qemu_set_cloexec(fds[1]); - memcpy(&info->mask, mask, sizeof(*mask)); info->fd = fds[1]; @@ -96,9 +79,8 @@ int qemu_signalfd(const sigset_t *mask) #if defined(CONFIG_SIGNALFD) int ret; - ret = syscall(SYS_signalfd, -1, mask, _NSIG / 8); + ret = signalfd(-1, mask, SFD_CLOEXEC); if (ret != -1) { - qemu_set_cloexec(ret); return ret; } #endif diff --git a/util/coroutine-sigaltstack.c b/util/coroutine-sigaltstack.c index e99b8a4f9c..e2690c5f41 100644 --- a/util/coroutine-sigaltstack.c +++ b/util/coroutine-sigaltstack.c @@ -27,7 +27,6 @@ #endif #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "qemu/coroutine_int.h" #ifdef CONFIG_SAFESTACK diff --git a/util/coroutine-ucontext.c b/util/coroutine-ucontext.c index 904b375192..ddc98fb4f8 100644 --- a/util/coroutine-ucontext.c +++ b/util/coroutine-ucontext.c @@ -25,12 +25,13 @@ #include "qemu/osdep.h" #include #include "qemu/coroutine_int.h" +#include "qemu/coroutine-tls.h" #ifdef CONFIG_VALGRIND_H #include #endif -#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer) +#ifdef QEMU_SANITIZE_ADDRESS #ifdef CONFIG_ASAN_IFACE_FIBER #define CONFIG_ASAN 1 #include @@ -66,8 +67,8 @@ typedef struct { /** * Per-thread coroutine bookkeeping */ -static __thread CoroutineUContext leader; -static __thread Coroutine *current; +QEMU_DEFINE_STATIC_CO_TLS(Coroutine *, current); +QEMU_DEFINE_STATIC_CO_TLS(CoroutineUContext, leader); /* * va_args to makecontext() must be type 'int', so passing @@ -97,14 +98,15 @@ static inline __attribute__((always_inline)) void finish_switch_fiber(void *fake_stack_save) { #ifdef CONFIG_ASAN + CoroutineUContext *leaderp = get_ptr_leader(); const void *bottom_old; size_t size_old; __sanitizer_finish_switch_fiber(fake_stack_save, &bottom_old, &size_old); - if (!leader.stack) { - leader.stack = (void *)bottom_old; - leader.stack_size = size_old; + if (!leaderp->stack) { + leaderp->stack = (void *)bottom_old; + leaderp->stack_size = size_old; } #endif #ifdef CONFIG_TSAN @@ -161,8 +163,10 @@ static void coroutine_trampoline(int i0, int i1) /* Initialize longjmp environment and switch back the caller */ if (!sigsetjmp(self->env, 0)) { - start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, leader.stack, - leader.stack_size); + CoroutineUContext *leaderp = get_ptr_leader(); + + start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, + leaderp->stack, leaderp->stack_size); start_switch_fiber_tsan(&fake_stack_save, self, true); /* true=caller */ siglongjmp(*(sigjmp_buf *)co->entry_arg, 1); } @@ -297,7 +301,7 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_, int ret; void *fake_stack_save = NULL; - current = to_; + set_current(to_); ret = sigsetjmp(from->env, 0); if (ret == 0) { @@ -315,18 +319,24 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_, Coroutine *qemu_coroutine_self(void) { - if (!current) { - current = &leader.base; + Coroutine *self = get_current(); + CoroutineUContext *leaderp = get_ptr_leader(); + + if (!self) { + self = &leaderp->base; + set_current(self); } #ifdef CONFIG_TSAN - if (!leader.tsan_co_fiber) { - leader.tsan_co_fiber = __tsan_get_current_fiber(); + if (!leaderp->tsan_co_fiber) { + leaderp->tsan_co_fiber = __tsan_get_current_fiber(); } #endif - return current; + return self; } bool qemu_in_coroutine(void) { - return current && current->caller; + Coroutine *self = get_current(); + + return self && self->caller; } diff --git a/util/coroutine-win32.c b/util/coroutine-win32.c index de6bd4fd3e..7db2e8f8c8 100644 --- a/util/coroutine-win32.c +++ b/util/coroutine-win32.c @@ -23,8 +23,8 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/coroutine_int.h" +#include "qemu/coroutine-tls.h" typedef struct { @@ -34,8 +34,8 @@ typedef struct CoroutineAction action; } CoroutineWin32; -static __thread CoroutineWin32 leader; -static __thread Coroutine *current; +QEMU_DEFINE_STATIC_CO_TLS(CoroutineWin32, leader); +QEMU_DEFINE_STATIC_CO_TLS(Coroutine *, current); /* This function is marked noinline to prevent GCC from inlining it * into coroutine_trampoline(). If we allow it to do that then it @@ -52,7 +52,7 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_, CoroutineWin32 *from = DO_UPCAST(CoroutineWin32, base, from_); CoroutineWin32 *to = DO_UPCAST(CoroutineWin32, base, to_); - current = to_; + set_current(to_); to->action = action; SwitchToFiber(to->fiber); @@ -89,14 +89,21 @@ void qemu_coroutine_delete(Coroutine *co_) Coroutine *qemu_coroutine_self(void) { + Coroutine *current = get_current(); + if (!current) { - current = &leader.base; - leader.fiber = ConvertThreadToFiber(NULL); + CoroutineWin32 *leader = get_ptr_leader(); + + current = &leader->base; + set_current(current); + leader->fiber = ConvertThreadToFiber(NULL); } return current; } bool qemu_in_coroutine(void) { + Coroutine *current = get_current(); + return current && current->caller; } diff --git a/util/cutils.c b/util/cutils.c index c9b91e7535..def9c746ce 100644 --- a/util/cutils.c +++ b/util/cutils.c @@ -26,10 +26,28 @@ #include "qemu/host-utils.h" #include -#include "qemu-common.h" -#include "qemu/sockets.h" -#include "qemu/iov.h" -#include "net/net.h" +#ifdef __FreeBSD__ +#include +#include +#endif + +#ifdef __NetBSD__ +#include +#endif + +#ifdef __HAIKU__ +#include +#endif + +#ifdef __APPLE__ +#include +#endif + +#ifdef G_OS_WIN32 +#include +#include +#endif + #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -148,77 +166,6 @@ time_t mktimegm(struct tm *tm) return t; } -/* - * Make sure data goes on disk, but if possible do not bother to - * write out the inode just for timestamp updates. - * - * Unfortunately even in 2009 many operating systems do not support - * fdatasync and have to fall back to fsync. - */ -int qemu_fdatasync(int fd) -{ -#ifdef CONFIG_FDATASYNC - return fdatasync(fd); -#else - return fsync(fd); -#endif -} - -/** - * Sync changes made to the memory mapped file back to the backing - * storage. For POSIX compliant systems this will fallback - * to regular msync call. Otherwise it will trigger whole file sync - * (including the metadata case there is no support to skip that otherwise) - * - * @addr - start of the memory area to be synced - * @length - length of the are to be synced - * @fd - file descriptor for the file to be synced - * (mandatory only for POSIX non-compliant systems) - */ -int qemu_msync(void *addr, size_t length, int fd) -{ -#ifdef CONFIG_POSIX - size_t align_mask = ~(qemu_real_host_page_size - 1); - - /** - * There are no strict reqs as per the length of mapping - * to be synced. Still the length needs to follow the address - * alignment changes. Additionally - round the size to the multiple - * of PAGE_SIZE - */ - length += ((uintptr_t)addr & (qemu_real_host_page_size - 1)); - length = (length + ~align_mask) & align_mask; - - addr = (void *)((uintptr_t)addr & align_mask); - - return msync(addr, length, MS_SYNC); -#else /* CONFIG_POSIX */ - /** - * Perform the sync based on the file descriptor - * The sync range will most probably be wider than the one - * requested - but it will still get the job done - */ - return qemu_fdatasync(fd); -#endif /* CONFIG_POSIX */ -} - -#ifndef _WIN32 -/* Sets a specific flag */ -int fcntl_setfl(int fd, int flag) -{ - int flags; - - flags = fcntl(fd, F_GETFL); - if (flags == -1) - return -errno; - - if (fcntl(fd, F_SETFL, flags | flag) == -1) - return -errno; - - return 0; -} -#endif - static int64_t suffix_mul(char suffix, int64_t unit) { switch (qemu_toupper(suffix)) { @@ -938,17 +885,23 @@ int parse_debug_env(const char *name, int max, int initial) return debug; } -/* - * Helper to print ethernet mac address - */ -const char *qemu_ether_ntoa(const MACAddr *mac) +const char *si_prefix(unsigned int exp10) { - static char ret[18]; + static const char *prefixes[] = { + "a", "f", "p", "n", "u", "m", "", "K", "M", "G", "T", "P", "E" + }; - snprintf(ret, sizeof(ret), "%02x:%02x:%02x:%02x:%02x:%02x", - mac->a[0], mac->a[1], mac->a[2], mac->a[3], mac->a[4], mac->a[5]); + exp10 += 18; + assert(exp10 % 3 == 0 && exp10 / 3 < ARRAY_SIZE(prefixes)); + return prefixes[exp10 / 3]; +} - return ret; +const char *iec_binary_prefix(unsigned int exp2) +{ + static const char *prefixes[] = { "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei" }; + + assert(exp2 % 10 == 0 && exp2 / 10 < ARRAY_SIZE(prefixes)); + return prefixes[exp2 / 10]; } /* @@ -959,7 +912,6 @@ const char *qemu_ether_ntoa(const MACAddr *mac) */ char *size_to_str(uint64_t val) { - static const char *suffixes[] = { "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei" }; uint64_t div; int i; @@ -970,25 +922,23 @@ char *size_to_str(uint64_t val) * (see e41b509d68afb1f for more info) */ frexp(val / (1000.0 / 1024.0), &i); - i = (i - 1) / 10; - div = 1ULL << (i * 10); + i = (i - 1) / 10 * 10; + div = 1ULL << i; - return g_strdup_printf("%0.3g %sB", (double)val / div, suffixes[i]); + return g_strdup_printf("%0.3g %sB", (double)val / div, iec_binary_prefix(i)); } char *freq_to_str(uint64_t freq_hz) { - static const char *const suffixes[] = { "", "K", "M", "G", "T", "P", "E" }; double freq = freq_hz; - size_t idx = 0; + size_t exp10 = 0; while (freq >= 1000.0) { freq /= 1000.0; - idx++; + exp10 += 3; } - assert(idx < ARRAY_SIZE(suffixes)); - return g_strdup_printf("%0.3g %sHz", freq, suffixes[idx]); + return g_strdup_printf("%0.3g %sHz", freq, si_prefix(exp10)); } int qemu_pstrcmp0(const char **str1, const char **str2) @@ -1019,6 +969,114 @@ static inline const char *next_component(const char *dir, int *p_len) return dir; } +static const char *exec_dir; + +void qemu_init_exec_dir(const char *argv0) +{ +#ifdef G_OS_WIN32 + char *p; + char buf[MAX_PATH]; + DWORD len; + + if (exec_dir) { + return; + } + + len = GetModuleFileName(NULL, buf, sizeof(buf) - 1); + if (len == 0) { + return; + } + + buf[len] = 0; + p = buf + len - 1; + while (p != buf && *p != '\\') { + p--; + } + *p = 0; + if (access(buf, R_OK) == 0) { + exec_dir = g_strdup(buf); + } else { + exec_dir = CONFIG_BINDIR; + } +#else + char *p = NULL; + char buf[PATH_MAX]; + + if (exec_dir) { + return; + } + +#if defined(__linux__) + { + int len; + len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); + if (len > 0) { + buf[len] = 0; + p = buf; + } + } +#elif defined(__FreeBSD__) \ + || (defined(__NetBSD__) && defined(KERN_PROC_PATHNAME)) + { +#if defined(__FreeBSD__) + static int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1}; +#else + static int mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME}; +#endif + size_t len = sizeof(buf) - 1; + + *buf = '\0'; + if (!sysctl(mib, ARRAY_SIZE(mib), buf, &len, NULL, 0) && + *buf) { + buf[sizeof(buf) - 1] = '\0'; + p = buf; + } + } +#elif defined(__APPLE__) + { + char fpath[PATH_MAX]; + uint32_t len = sizeof(fpath); + if (_NSGetExecutablePath(fpath, &len) == 0) { + p = realpath(fpath, buf); + if (!p) { + return; + } + } + } +#elif defined(__HAIKU__) + { + image_info ii; + int32_t c = 0; + + *buf = '\0'; + while (get_next_image_info(0, &c, &ii) == B_OK) { + if (ii.type == B_APP_IMAGE) { + strncpy(buf, ii.name, sizeof(buf)); + buf[sizeof(buf) - 1] = 0; + p = buf; + break; + } + } + } +#endif + /* If we don't have any way of figuring out the actual executable + location then try argv[0]. */ + if (!p && argv0) { + p = realpath(argv0, buf); + } + if (p) { + exec_dir = g_path_get_dirname(p); + } else { + exec_dir = CONFIG_BINDIR; + } +#endif +} + +const char *qemu_get_exec_dir(void) +{ + return exec_dir; +} + char *get_relocated_path(const char *dir) { size_t prefix_len = strlen(CONFIG_PREFIX); @@ -1029,31 +1087,52 @@ char *get_relocated_path(const char *dir) /* Fail if qemu_init_exec_dir was not called. */ assert(exec_dir[0]); - if (!starts_with_prefix(dir) || !starts_with_prefix(bindir)) { - return g_strdup(dir); - } result = g_string_new(exec_dir); + g_string_append(result, "/qemu-bundle"); + if (access(result->str, R_OK) == 0) { +#ifdef G_OS_WIN32 + size_t size = mbsrtowcs(NULL, &dir, 0, &(mbstate_t){0}) + 1; + PWSTR wdir = g_new(WCHAR, size); + mbsrtowcs(wdir, &dir, size, &(mbstate_t){0}); - /* Advance over common components. */ - len_dir = len_bindir = prefix_len; - do { - dir += len_dir; - bindir += len_bindir; - dir = next_component(dir, &len_dir); - bindir = next_component(bindir, &len_bindir); - } while (len_dir && len_dir == len_bindir && !memcmp(dir, bindir, len_dir)); + PCWSTR wdir_skipped_root; + PathCchSkipRoot(wdir, &wdir_skipped_root); - /* Ascend from bindir to the common prefix with dir. */ - while (len_bindir) { - bindir += len_bindir; - g_string_append(result, "/.."); - bindir = next_component(bindir, &len_bindir); + size = wcsrtombs(NULL, &wdir_skipped_root, 0, &(mbstate_t){0}); + char *cursor = result->str + result->len; + g_string_set_size(result, result->len + size); + wcsrtombs(cursor, &wdir_skipped_root, size + 1, &(mbstate_t){0}); + g_free(wdir); +#else + g_string_append(result, dir); +#endif + } else if (!starts_with_prefix(dir) || !starts_with_prefix(bindir)) { + g_string_assign(result, dir); + } else { + g_string_assign(result, exec_dir); + + /* Advance over common components. */ + len_dir = len_bindir = prefix_len; + do { + dir += len_dir; + bindir += len_bindir; + dir = next_component(dir, &len_dir); + bindir = next_component(bindir, &len_bindir); + } while (len_dir && len_dir == len_bindir && !memcmp(dir, bindir, len_dir)); + + /* Ascend from bindir to the common prefix with dir. */ + while (len_bindir) { + bindir += len_bindir; + g_string_append(result, "/.."); + bindir = next_component(bindir, &len_bindir); + } + + if (*dir) { + assert(G_IS_DIR_SEPARATOR(dir[-1])); + g_string_append(result, dir - 1); + } } - if (*dir) { - assert(G_IS_DIR_SEPARATOR(dir[-1])); - g_string_append(result, dir - 1); - } return g_string_free(result, false); } diff --git a/util/envlist.c b/util/envlist.c index 2bcc13f094..ab5553498a 100644 --- a/util/envlist.c +++ b/util/envlist.c @@ -217,7 +217,7 @@ envlist_to_environ(const envlist_t *envlist, size_t *count) struct envlist_entry *entry; char **env, **penv; - penv = env = g_malloc((envlist->el_count + 1) * sizeof(char *)); + penv = env = g_new(char *, envlist->el_count + 1); for (entry = envlist->el_entries.lh_first; entry != NULL; entry = entry->ev_link.le_next) { diff --git a/util/qemu-error.c b/util/error-report.c similarity index 93% rename from util/qemu-error.c rename to util/error-report.c index 52a9e013c4..5edb2e6040 100644 --- a/util/qemu-error.c +++ b/util/error-report.c @@ -40,17 +40,6 @@ int error_printf(const char *fmt, ...) return ret; } -int error_printf_unless_qmp(const char *fmt, ...) -{ - va_list ap; - int ret; - - va_start(ap, fmt); - ret = error_vprintf_unless_qmp(fmt, ap); - va_end(ap); - return ret; -} - static Location std_loc = { .kind = LOC_NONE }; @@ -146,22 +135,6 @@ void loc_set_file(const char *fname, int lno) } } -static const char *progname; - -/* - * Set the program name for error_print_loc(). - */ -static void error_set_progname(const char *argv0) -{ - const char *p = strrchr(argv0, '/'); - progname = p ? p + 1 : argv0; -} - -const char *error_get_progname(void) -{ - return progname; -} - /* * Print current location to current monitor if we have one, else to stderr. */ @@ -171,8 +144,8 @@ static void print_loc(void) int i; const char *const *argp; - if (!monitor_cur() && progname) { - fprintf(stderr, "%s:", progname); + if (!monitor_cur() && g_get_prgname()) { + error_printf("%s:", g_get_prgname()); sep = " "; } switch (cur_loc->kind) { @@ -196,6 +169,23 @@ static void print_loc(void) } } +static char * +real_time_iso8601(void) +{ +#if GLIB_CHECK_VERSION(2,62,0) + g_autoptr(GDateTime) dt = g_date_time_new_now_utc(); + /* ignore deprecation warning, since GLIB_VERSION_MAX_ALLOWED is 2.56 */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + return g_date_time_format_iso8601(dt); +#pragma GCC diagnostic pop +#else + GTimeVal tv; + g_get_current_time(&tv); + return g_time_val_to_iso8601(&tv); +#endif +} + /* * Print a message to current monitor if we have one, else to stderr. * @report_type is the type of message: error, warning or informational. @@ -205,12 +195,10 @@ static void print_loc(void) */ static void vreport(report_type type, const char *fmt, va_list ap) { - GTimeVal tv; gchar *timestr; if (message_with_timestamp && !monitor_cur()) { - g_get_current_time(&tv); - timestr = g_time_val_to_iso8601(&tv); + timestr = real_time_iso8601(); error_printf("%s ", timestr); g_free(timestr); } @@ -400,8 +388,10 @@ static void qemu_log_func(const gchar *log_domain, void error_init(const char *argv0) { + const char *p = strrchr(argv0, '/'); + /* Set the program name for error_print_loc(). */ - error_set_progname(argv0); + g_set_prgname(p ? p + 1 : argv0); /* * This sets up glib logging so libraries using it also print their logs diff --git a/util/event_notifier-posix.c b/util/event_notifier-posix.c index 8307013c5d..76420c5b56 100644 --- a/util/event_notifier-posix.c +++ b/util/event_notifier-posix.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/event_notifier.h" #include "qemu/main-loop.h" @@ -50,16 +49,14 @@ int event_notifier_init(EventNotifier *e, int active) if (errno != ENOSYS) { return -errno; } - if (qemu_pipe(fds) < 0) { + if (!g_unix_open_pipe(fds, FD_CLOEXEC, NULL)) { return -errno; } - ret = fcntl_setfl(fds[0], O_NONBLOCK); - if (ret < 0) { + if (!g_unix_set_fd_nonblocking(fds[0], true, NULL)) { ret = -errno; goto fail; } - ret = fcntl_setfl(fds[1], O_NONBLOCK); - if (ret < 0) { + if (!g_unix_set_fd_nonblocking(fds[1], true, NULL)) { ret = -errno; goto fail; } @@ -99,6 +96,11 @@ int event_notifier_get_fd(const EventNotifier *e) return e->rfd; } +int event_notifier_get_wfd(const EventNotifier *e) +{ + return e->wfd; +} + int event_notifier_set(EventNotifier *e) { static const uint64_t value = 1; diff --git a/util/event_notifier-win32.c b/util/event_notifier-win32.c index 62c53b0a99..9352da4699 100644 --- a/util/event_notifier-win32.c +++ b/util/event_notifier-win32.c @@ -11,7 +11,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/event_notifier.h" #include "qemu/main-loop.h" diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index e11a8a022e..1683aa1105 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -127,6 +127,8 @@ static bool fdmon_epoll_try_enable(AioContext *ctx) bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) { + bool ok; + if (ctx->epollfd < 0) { return false; } @@ -136,14 +138,23 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) return false; } - if (npfd >= EPOLL_ENABLE_THRESHOLD) { - if (fdmon_epoll_try_enable(ctx)) { - return true; - } else { - fdmon_epoll_disable(ctx); - } + if (npfd < EPOLL_ENABLE_THRESHOLD) { + return false; } - return false; + + /* The list must not change while we add fds to epoll */ + if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { + return false; + } + + ok = fdmon_epoll_try_enable(ctx); + + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); + + if (!ok) { + fdmon_epoll_disable(ctx); + } + return ok; } void fdmon_epoll_setup(AioContext *ctx) diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index 1461dfa407..ab43052dd7 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -179,7 +179,11 @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node) { struct io_uring_sqe *sqe = get_sqe(ctx); +#ifdef LIBURING_HAVE_DATA64 + io_uring_prep_poll_remove(sqe, (__u64)(uintptr_t)node); +#else io_uring_prep_poll_remove(sqe, node); +#endif } /* Add a timeout that self-cancels when another cqe becomes ready */ diff --git a/util/hbitmap.c b/util/hbitmap.c index 305b894a63..297db35fb1 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -301,6 +301,39 @@ bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, return true; } +bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count, + int64_t *pnum) +{ + int64_t next_dirty, next_zero; + + assert(start >= 0); + assert(count > 0); + assert(start + count <= hb->orig_size); + + next_dirty = hbitmap_next_dirty(hb, start, count); + if (next_dirty == -1) { + *pnum = count; + return false; + } + + if (next_dirty > start) { + *pnum = next_dirty - start; + return false; + } + + assert(next_dirty == start); + + next_zero = hbitmap_next_zero(hb, start, count); + if (next_zero == -1) { + *pnum = count; + return true; + } + + assert(next_zero > start); + *pnum = next_zero - start; + return false; +} + bool hbitmap_empty(const HBitmap *hb) { return hb->count == 0; @@ -829,7 +862,7 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) } old = hb->sizes[i]; hb->sizes[i] = size; - hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long)); + hb->levels[i] = g_renew(unsigned long, hb->levels[i], size); if (!shrink) { memset(&hb->levels[i][old], 0x00, (size - old) * sizeof(*hb->levels[i])); @@ -840,11 +873,6 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) } } -bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) -{ - return (a->orig_size == b->orig_size); -} - /** * hbitmap_sparse_merge: performs dst = dst | src * works with differing granularities. @@ -868,28 +896,24 @@ static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) * Given HBitmaps A and B, let R := A (BITOR) B. * Bitmaps A and B will not be modified, * except when bitmap R is an alias of A or B. - * - * @return true if the merge was successful, - * false if it was not attempted. + * Bitmaps must have same size. */ -bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) +void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) { int i; uint64_t j; - if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) { - return false; - } - assert(hbitmap_can_merge(b, result)); + assert(a->orig_size == result->orig_size); + assert(b->orig_size == result->orig_size); if ((!hbitmap_count(a) && result == b) || (!hbitmap_count(b) && result == a)) { - return true; + return; } if (!hbitmap_count(a) && !hbitmap_count(b)) { hbitmap_reset_all(result); - return true; + return; } if (a->granularity != b->granularity) { @@ -902,7 +926,7 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) if (b != result) { hbitmap_sparse_merge(result, b); } - return true; + return; } /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. @@ -918,8 +942,6 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) /* Recompute the dirty count */ result->count = hb_count_between(result, 0, result->size - 1); - - return true; } char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) diff --git a/util/hexdump.c b/util/hexdump.c index 2c105a8846..9921114b3c 100644 --- a/util/hexdump.c +++ b/util/hexdump.c @@ -14,7 +14,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/cutils.h" void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr, unsigned int len, bool ascii) diff --git a/util/host-utils.c b/util/host-utils.c index 7b9322071d..fb91bcba82 100644 --- a/util/host-utils.c +++ b/util/host-utils.c @@ -34,7 +34,7 @@ static inline void mul64(uint64_t *plow, uint64_t *phigh, typedef union { uint64_t ll; struct { -#ifdef HOST_WORDS_BIGENDIAN +#if HOST_BIG_ENDIAN uint32_t high, low; #else uint32_t low, high; @@ -86,78 +86,119 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) *phigh = rh; } -/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ -/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ -/* remainder via phigh. */ -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +/* + * Unsigned 128-by-64 division. + * Returns the remainder. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) { uint64_t dhi = *phigh; uint64_t dlo = *plow; - unsigned i; - uint64_t carry = 0; + uint64_t rem, dhighest; + int sh; - if (divisor == 0) { - return 1; - } else if (dhi == 0) { + if (divisor == 0 || dhi == 0) { *plow = dlo / divisor; - *phigh = dlo % divisor; - return 0; - } else if (dhi > divisor) { - return 1; + *phigh = 0; + return dlo % divisor; } else { + sh = clz64(divisor); - for (i = 0; i < 64; i++) { - carry = dhi >> 63; - dhi = (dhi << 1) | (dlo >> 63); - if (carry || (dhi >= divisor)) { - dhi -= divisor; - carry = 1; - } else { - carry = 0; + if (dhi < divisor) { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor <<= sh; + dhi = (dhi << sh) | (dlo >> (64 - sh)); + dlo <<= sh; } - dlo = (dlo << 1) | carry; + + *phigh = 0; + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); + } else { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor <<= sh; + dhighest = dhi >> (64 - sh); + dhi = (dhi << sh) | (dlo >> (64 - sh)); + dlo <<= sh; + + *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor); + } else { + /** + * dhi >= divisor + * Since the MSB of divisor is set (sh == 0), + * (dhi - divisor) < divisor + * + * Thus, the high part of the quotient is 1, and we can + * calculate the low part with a single call to udiv_qrnnd + * after subtracting divisor from dhi + */ + dhi -= divisor; + *phigh = 1; + } + + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); } - *plow = dlo; - *phigh = dhi; - return 0; + /* + * since the dividend/divisor might have been normalized, + * the remainder might also have to be shifted back + */ + return rem >> sh; } } -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +/* + * Signed 128-by-64 division. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor) { - int sgn_dvdnd = *phigh < 0; - int sgn_divsr = divisor < 0; - int overflow = 0; + bool neg_quotient = false, neg_remainder = false; + uint64_t unsig_hi = *phigh, unsig_lo = *plow; + uint64_t rem; - if (sgn_dvdnd) { - *plow = ~(*plow); - *phigh = ~(*phigh); - if (*plow == (int64_t)-1) { - *plow = 0; - (*phigh)++; - } else { - (*plow)++; - } - } + if (*phigh < 0) { + neg_quotient = !neg_quotient; + neg_remainder = !neg_remainder; - if (sgn_divsr) { - divisor = 0 - divisor; - } - - overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); - - if (sgn_dvdnd ^ sgn_divsr) { - *plow = 0 - *plow; - } - - if (!overflow) { - if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { - overflow = 1; + if (unsig_lo == 0) { + unsig_hi = -unsig_hi; + } else { + unsig_hi = ~unsig_hi; + unsig_lo = -unsig_lo; } } - return overflow; + if (divisor < 0) { + neg_quotient = !neg_quotient; + + divisor = -divisor; + } + + rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor); + + if (neg_quotient) { + if (unsig_lo == 0) { + *phigh = -unsig_hi; + *plow = 0; + } else { + *phigh = ~unsig_hi; + *plow = -unsig_lo; + } + } else { + *phigh = unsig_hi; + *plow = unsig_lo; + } + + if (neg_remainder) { + return -rem; + } else { + return rem; + } } #endif @@ -225,3 +266,183 @@ void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow) *plow = *plow << shift; } } + +/* + * Unsigned 256-by-128 division. + * Returns the remainder via r. + * Returns lower 128 bit of quotient. + * Needs a normalized divisor (most significant bit set to 1). + * + * Adapted from include/qemu/host-utils.h udiv_qrnnd, + * from the GNU Multi Precision Library - longlong.h __udiv_qrnnd + * (https://gmplib.org/repo/gmp/file/tip/longlong.h) + * + * Licensed under the GPLv2/LGPLv3 + */ +static Int128 udiv256_qrnnd(Int128 *r, Int128 n1, Int128 n0, Int128 d) +{ + Int128 d0, d1, q0, q1, r1, r0, m; + uint64_t mp0, mp1; + + d0 = int128_make64(int128_getlo(d)); + d1 = int128_make64(int128_gethi(d)); + + r1 = int128_remu(n1, d1); + q1 = int128_divu(n1, d1); + mp0 = int128_getlo(q1); + mp1 = int128_gethi(q1); + mulu128(&mp0, &mp1, int128_getlo(d0)); + m = int128_make128(mp0, mp1); + r1 = int128_make128(int128_gethi(n0), int128_getlo(r1)); + if (int128_ult(r1, m)) { + q1 = int128_sub(q1, int128_one()); + r1 = int128_add(r1, d); + if (int128_uge(r1, d)) { + if (int128_ult(r1, m)) { + q1 = int128_sub(q1, int128_one()); + r1 = int128_add(r1, d); + } + } + } + r1 = int128_sub(r1, m); + + r0 = int128_remu(r1, d1); + q0 = int128_divu(r1, d1); + mp0 = int128_getlo(q0); + mp1 = int128_gethi(q0); + mulu128(&mp0, &mp1, int128_getlo(d0)); + m = int128_make128(mp0, mp1); + r0 = int128_make128(int128_getlo(n0), int128_getlo(r0)); + if (int128_ult(r0, m)) { + q0 = int128_sub(q0, int128_one()); + r0 = int128_add(r0, d); + if (int128_uge(r0, d)) { + if (int128_ult(r0, m)) { + q0 = int128_sub(q0, int128_one()); + r0 = int128_add(r0, d); + } + } + } + r0 = int128_sub(r0, m); + + *r = r0; + return int128_or(int128_lshift(q1, 64), q0); +} + +/* + * Unsigned 256-by-128 division. + * Returns the remainder. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor) +{ + Int128 dhi = *phigh; + Int128 dlo = *plow; + Int128 rem, dhighest; + int sh; + + if (!int128_nz(divisor) || !int128_nz(dhi)) { + *plow = int128_divu(dlo, divisor); + *phigh = int128_zero(); + return int128_remu(dlo, divisor); + } else { + sh = clz128(divisor); + + if (int128_ult(dhi, divisor)) { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor = int128_lshift(divisor, sh); + dhi = int128_or(int128_lshift(dhi, sh), + int128_urshift(dlo, (128 - sh))); + dlo = int128_lshift(dlo, sh); + } + + *phigh = int128_zero(); + *plow = udiv256_qrnnd(&rem, dhi, dlo, divisor); + } else { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor = int128_lshift(divisor, sh); + dhighest = int128_rshift(dhi, (128 - sh)); + dhi = int128_or(int128_lshift(dhi, sh), + int128_urshift(dlo, (128 - sh))); + dlo = int128_lshift(dlo, sh); + + *phigh = udiv256_qrnnd(&dhi, dhighest, dhi, divisor); + } else { + /* + * dhi >= divisor + * Since the MSB of divisor is set (sh == 0), + * (dhi - divisor) < divisor + * + * Thus, the high part of the quotient is 1, and we can + * calculate the low part with a single call to udiv_qrnnd + * after subtracting divisor from dhi + */ + dhi = int128_sub(dhi, divisor); + *phigh = int128_one(); + } + + *plow = udiv256_qrnnd(&rem, dhi, dlo, divisor); + } + + /* + * since the dividend/divisor might have been normalized, + * the remainder might also have to be shifted back + */ + rem = int128_urshift(rem, sh); + return rem; + } +} + +/* + * Signed 256-by-128 division. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor) +{ + bool neg_quotient = false, neg_remainder = false; + Int128 unsig_hi = *phigh, unsig_lo = *plow; + Int128 rem; + + if (!int128_nonneg(*phigh)) { + neg_quotient = !neg_quotient; + neg_remainder = !neg_remainder; + + if (!int128_nz(unsig_lo)) { + unsig_hi = int128_neg(unsig_hi); + } else { + unsig_hi = int128_not(unsig_hi); + unsig_lo = int128_neg(unsig_lo); + } + } + + if (!int128_nonneg(divisor)) { + neg_quotient = !neg_quotient; + + divisor = int128_neg(divisor); + } + + rem = divu256(&unsig_lo, &unsig_hi, divisor); + + if (neg_quotient) { + if (!int128_nz(unsig_lo)) { + *phigh = int128_neg(unsig_hi); + *plow = int128_zero(); + } else { + *phigh = int128_not(unsig_hi); + *plow = int128_neg(unsig_lo); + } + } else { + *phigh = unsig_hi; + *plow = unsig_lo; + } + + if (neg_remainder) { + return int128_neg(rem); + } else { + return rem; + } +} diff --git a/util/int128.c b/util/int128.c new file mode 100644 index 0000000000..ed8f25fef1 --- /dev/null +++ b/util/int128.c @@ -0,0 +1,147 @@ +/* + * 128-bit division and remainder for compilers not supporting __int128 + * + * Copyright (c) 2021 Frédéric Pétrot + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/int128.h" + +#ifndef CONFIG_INT128 + +/* + * Division and remainder algorithms for 128-bit due to Stefan Kanthak, + * https://skanthak.homepage.t-online.de/integer.html#udivmodti4 + * Preconditions: + * - function should never be called with v equals to 0, it has to + * be dealt with beforehand + * - quotien pointer must be valid + */ +static Int128 divrem128(Int128 u, Int128 v, Int128 *q) +{ + Int128 qq; + uint64_t hi, lo, tmp; + int s = clz64(v.hi); + + if (s == 64) { + /* we have uu÷0v => let's use divu128 */ + hi = u.hi; + lo = u.lo; + tmp = divu128(&lo, &hi, v.lo); + *q = int128_make128(lo, hi); + return int128_make128(tmp, 0); + } else { + hi = int128_gethi(int128_lshift(v, s)); + + if (hi > u.hi) { + lo = u.lo; + tmp = u.hi; + divu128(&lo, &tmp, hi); + lo = int128_gethi(int128_lshift(int128_make128(lo, 0), s)); + } else { /* prevent overflow */ + lo = u.lo; + tmp = u.hi - hi; + divu128(&lo, &tmp, hi); + lo = int128_gethi(int128_lshift(int128_make128(lo, 1), s)); + } + + qq = int128_make64(lo); + + tmp = lo * v.hi; + mulu64(&lo, &hi, lo, v.lo); + hi += tmp; + + if (hi < tmp /* quotient * divisor >= 2**128 > dividend */ + || hi > u.hi /* quotient * divisor > dividend */ + || (hi == u.hi && lo > u.lo)) { + qq.lo -= 1; + mulu64(&lo, &hi, qq.lo, v.lo); + hi += qq.lo * v.hi; + } + + *q = qq; + u.hi -= hi + (u.lo < lo); + u.lo -= lo; + return u; + } +} + +Int128 int128_divu(Int128 a, Int128 b) +{ + Int128 q; + divrem128(a, b, &q); + return q; +} + +Int128 int128_remu(Int128 a, Int128 b) +{ + Int128 q; + return divrem128(a, b, &q); +} + +Int128 int128_divs(Int128 a, Int128 b) +{ + Int128 q; + bool sgna = !int128_nonneg(a); + bool sgnb = !int128_nonneg(b); + + if (sgna) { + a = int128_neg(a); + } + + if (sgnb) { + b = int128_neg(b); + } + + divrem128(a, b, &q); + + if (sgna != sgnb) { + q = int128_neg(q); + } + + return q; +} + +Int128 int128_rems(Int128 a, Int128 b) +{ + Int128 q, r; + bool sgna = !int128_nonneg(a); + bool sgnb = !int128_nonneg(b); + + if (sgna) { + a = int128_neg(a); + } + + if (sgnb) { + b = int128_neg(b); + } + + r = divrem128(a, b, &q); + + if (sgna) { + r = int128_neg(r); + } + + return r; +} + +#endif diff --git a/util/iov.c b/util/iov.c index 58c7b3eeee..b4be580022 100644 --- a/util/iov.c +++ b/util/iov.c @@ -17,7 +17,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/iov.h" #include "qemu/sockets.h" #include "qemu/cutils.h" @@ -112,12 +111,17 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) /*XXX Note: windows has WSASend() and WSARecv() */ unsigned i = 0; ssize_t ret = 0; + ssize_t off = 0; while (i < iov_cnt) { ssize_t r = do_send - ? send(sockfd, iov[i].iov_base, iov[i].iov_len, 0) - : recv(sockfd, iov[i].iov_base, iov[i].iov_len, 0); + ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0) + : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0); if (r > 0) { ret += r; + off += r; + if (off < iov[i].iov_len) { + continue; + } } else if (!r) { break; } else if (errno == EINTR) { @@ -130,6 +134,7 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) } break; } + off = 0; i++; } return ret; diff --git a/util/iova-tree.c b/util/iova-tree.c index 7990692cbd..536789797e 100644 --- a/util/iova-tree.c +++ b/util/iova-tree.c @@ -16,6 +16,45 @@ struct IOVATree { GTree *tree; }; +/* Args to pass to iova_tree_alloc foreach function. */ +struct IOVATreeAllocArgs { + /* Size of the desired allocation */ + size_t new_size; + + /* The minimum address allowed in the allocation */ + hwaddr iova_begin; + + /* Map at the left of the hole, can be NULL if "this" is first one */ + const DMAMap *prev; + + /* Map at the right of the hole, can be NULL if "prev" is the last one */ + const DMAMap *this; + + /* If found, we fill in the IOVA here */ + hwaddr iova_result; + + /* Whether have we found a valid IOVA */ + bool iova_found; +}; + +typedef struct IOVATreeFindIOVAArgs { + const DMAMap *needle; + const DMAMap *result; +} IOVATreeFindIOVAArgs; + +/** + * Iterate args to the next hole + * + * @args: The alloc arguments + * @next: The next mapping in the tree. Can be NULL to signal the last one + */ +static void iova_tree_alloc_args_iterate(struct IOVATreeAllocArgs *args, + const DMAMap *next) +{ + args->prev = args->this; + args->this = next; +} + static int iova_tree_compare(gconstpointer a, gconstpointer b, gpointer data) { const DMAMap *m1 = a, *m2 = b; @@ -42,14 +81,43 @@ IOVATree *iova_tree_new(void) return iova_tree; } -DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map) +const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map) { return g_tree_lookup(tree->tree, map); } -DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova) +static gboolean iova_tree_find_address_iterator(gpointer key, gpointer value, + gpointer data) { - DMAMap map = { .iova = iova, .size = 0 }; + const DMAMap *map = key; + IOVATreeFindIOVAArgs *args = data; + const DMAMap *needle; + + g_assert(key == value); + + needle = args->needle; + if (map->translated_addr + map->size < needle->translated_addr || + needle->translated_addr + needle->size < map->translated_addr) { + return false; + } + + args->result = map; + return true; +} + +const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map) +{ + IOVATreeFindIOVAArgs args = { + .needle = map, + }; + + g_tree_foreach(tree->tree, iova_tree_find_address_iterator, &args); + return args.result; +} + +const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova) +{ + const DMAMap map = { .iova = iova, .size = 0 }; return iova_tree_find(tree, &map); } @@ -60,7 +128,7 @@ static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range) g_tree_insert(gtree, range, range); } -int iova_tree_insert(IOVATree *tree, DMAMap *map) +int iova_tree_insert(IOVATree *tree, const DMAMap *map) { DMAMap *new; @@ -96,15 +164,115 @@ void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator) g_tree_foreach(tree->tree, iova_tree_traverse, iterator); } -int iova_tree_remove(IOVATree *tree, DMAMap *map) +void iova_tree_remove(IOVATree *tree, DMAMap map) { - DMAMap *overlap; + const DMAMap *overlap; - while ((overlap = iova_tree_find(tree, map))) { + while ((overlap = iova_tree_find(tree, &map))) { g_tree_remove(tree->tree, overlap); } +} - return IOVA_OK; +/** + * Try to find an unallocated IOVA range between prev and this elements. + * + * @args: Arguments to allocation + * + * Cases: + * + * (1) !prev, !this: No entries allocated, always succeed + * + * (2) !prev, this: We're iterating at the 1st element. + * + * (3) prev, !this: We're iterating at the last element. + * + * (4) prev, this: this is the most common case, we'll try to find a hole + * between "prev" and "this" mapping. + * + * Note that this function assumes the last valid iova is HWADDR_MAX, but it + * searches linearly so it's easy to discard the result if it's not the case. + */ +static void iova_tree_alloc_map_in_hole(struct IOVATreeAllocArgs *args) +{ + const DMAMap *prev = args->prev, *this = args->this; + uint64_t hole_start, hole_last; + + if (this && this->iova + this->size < args->iova_begin) { + return; + } + + hole_start = MAX(prev ? prev->iova + prev->size + 1 : 0, args->iova_begin); + hole_last = this ? this->iova : HWADDR_MAX; + + if (hole_last - hole_start > args->new_size) { + args->iova_result = hole_start; + args->iova_found = true; + } +} + +/** + * Foreach dma node in the tree, compare if there is a hole with its previous + * node (or minimum iova address allowed) and the node. + * + * @key: Node iterating + * @value: Node iterating + * @pargs: Struct to communicate with the outside world + * + * Return: false to keep iterating, true if needs break. + */ +static gboolean iova_tree_alloc_traverse(gpointer key, gpointer value, + gpointer pargs) +{ + struct IOVATreeAllocArgs *args = pargs; + DMAMap *node = value; + + assert(key == value); + + iova_tree_alloc_args_iterate(args, node); + iova_tree_alloc_map_in_hole(args); + return args->iova_found; +} + +int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin, + hwaddr iova_last) +{ + struct IOVATreeAllocArgs args = { + .new_size = map->size, + .iova_begin = iova_begin, + }; + + if (unlikely(iova_last < iova_begin)) { + return IOVA_ERR_INVALID; + } + + /* + * Find a valid hole for the mapping + * + * Assuming low iova_begin, so no need to do a binary search to + * locate the first node. + * + * TODO: Replace all this with g_tree_node_first/next/last when available + * (from glib since 2.68). To do it with g_tree_foreach complicates the + * code a lot. + * + */ + g_tree_foreach(tree->tree, iova_tree_alloc_traverse, &args); + if (!args.iova_found) { + /* + * Either tree is empty or the last hole is still not checked. + * g_tree_foreach does not compare (last, iova_last] range, so we check + * it here. + */ + iova_tree_alloc_args_iterate(&args, NULL); + iova_tree_alloc_map_in_hole(&args); + } + + if (!args.iova_found || args.iova_result + map->size > iova_last) { + return IOVA_ERR_NOMEM; + } + + map->iova = args.iova_result; + return iova_tree_insert(tree, map); } void iova_tree_destroy(IOVATree *tree) diff --git a/util/keyval.c b/util/keyval.c index 904337c8a1..66a5b4740f 100644 --- a/util/keyval.c +++ b/util/keyval.c @@ -16,7 +16,9 @@ * key-vals = [ key-val { ',' key-val } [ ',' ] ] * key-val = key '=' val | help * key = key-fragment { '.' key-fragment } - * key-fragment = / [^=,.]+ / + * key-fragment = qapi-name | index + * qapi-name = '__' / [a-z0-9.-]+ / '_' / [A-Za-z][A-Za-z0-9_-]* / + * index = / [0-9]+ / * val = { / [^,]+ / | ',,' } * help = 'help' | '?' * @@ -93,8 +95,8 @@ #include "qapi/qmp/qlist.h" #include "qapi/qmp/qstring.h" #include "qemu/cutils.h" +#include "qemu/keyval.h" #include "qemu/help_option.h" -#include "qemu/option.h" /* * Convert @key to a list index. diff --git a/util/log.c b/util/log.c index 2ee1500bee..c2198badf2 100644 --- a/util/log.c +++ b/util/log.c @@ -26,153 +26,308 @@ #include "trace/control.h" #include "qemu/thread.h" #include "qemu/lockable.h" +#include "qemu/rcu.h" +#ifdef CONFIG_LINUX +#include +#endif + + +typedef struct RCUCloseFILE { + struct rcu_head rcu; + FILE *fd; +} RCUCloseFILE; + +/* Mutex covering the other global_* variables. */ +static QemuMutex global_mutex; +static char *global_filename; +static FILE *global_file; +static __thread FILE *thread_file; +static __thread Notifier qemu_log_thread_cleanup_notifier; -static char *logfilename; -static QemuMutex qemu_logfile_mutex; -QemuLogFile *qemu_logfile; int qemu_loglevel; -static int log_append = 0; +static bool log_append; +static bool log_per_thread; static GArray *debug_regions; -/* Return the number of characters emitted. */ -int qemu_log(const char *fmt, ...) +/* Returns true if qemu_log() will really write somewhere. */ +bool qemu_log_enabled(void) { - int ret = 0; - QemuLogFile *logfile; + return log_per_thread || qatomic_read(&global_file) != NULL; +} - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - va_list ap; - va_start(ap, fmt); - ret = vfprintf(logfile->fd, fmt, ap); - va_end(ap); +/* Returns true if qemu_log() will write somewhere other than stderr. */ +bool qemu_log_separate(void) +{ + if (log_per_thread) { + return true; + } else { + FILE *logfile = qatomic_read(&global_file); + return logfile && logfile != stderr; + } +} - /* Don't pass back error results. */ - if (ret < 0) { - ret = 0; +static int log_thread_id(void) +{ +#ifdef CONFIG_GETTID + return gettid(); +#elif defined(SYS_gettid) + return syscall(SYS_gettid); +#else + static int counter; + return qatomic_fetch_inc(&counter); +#endif +} + +static void qemu_log_thread_cleanup(Notifier *n, void *unused) +{ + fclose(thread_file); + thread_file = NULL; +} + +/* Lock/unlock output. */ + +FILE *qemu_log_trylock(void) +{ + FILE *logfile; + + logfile = thread_file; + if (!logfile) { + if (log_per_thread) { + g_autofree char *filename + = g_strdup_printf(global_filename, log_thread_id()); + logfile = fopen(filename, "w"); + if (!logfile) { + return NULL; + } + thread_file = logfile; + qemu_log_thread_cleanup_notifier.notify = qemu_log_thread_cleanup; + qemu_thread_atexit_add(&qemu_log_thread_cleanup_notifier); + } else { + rcu_read_lock(); + /* + * FIXME: typeof_strip_qual, as used by qatomic_rcu_read, + * does not work with pointers to undefined structures, + * such as we have with struct _IO_FILE and musl libc. + * Since all we want is a read of a pointer, cast to void**, + * which does work with typeof_strip_qual. + */ + logfile = qatomic_rcu_read((void **)&global_file); + if (!logfile) { + rcu_read_unlock(); + return NULL; + } } } - rcu_read_unlock(); - return ret; + + qemu_flockfile(logfile); + return logfile; } -static void __attribute__((__constructor__)) qemu_logfile_init(void) +void qemu_log_unlock(FILE *logfile) { - qemu_mutex_init(&qemu_logfile_mutex); -} - -static void qemu_logfile_free(QemuLogFile *logfile) -{ - g_assert(logfile); - - if (logfile->fd != stderr) { - fclose(logfile->fd); + if (logfile) { + fflush(logfile); + qemu_funlockfile(logfile); + if (!log_per_thread) { + rcu_read_unlock(); + } } - g_free(logfile); } -static bool log_uses_own_buffers; +void qemu_log(const char *fmt, ...) +{ + FILE *f = qemu_log_trylock(); + if (f) { + va_list ap; + + va_start(ap, fmt); + vfprintf(f, fmt, ap); + va_end(ap); + qemu_log_unlock(f); + } +} + +static void __attribute__((__constructor__)) startup(void) +{ + qemu_mutex_init(&global_mutex); +} + +static void rcu_close_file(RCUCloseFILE *r) +{ + fclose(r->fd); + g_free(r); +} + +/** + * valid_filename_template: + * + * Validate the filename template. Require %d if per_thread, allow it + * otherwise; require no other % within the template. + */ + +typedef enum { + vft_error, + vft_stderr, + vft_strdup, + vft_pid_printf, +} ValidFilenameTemplateResult; + +static ValidFilenameTemplateResult +valid_filename_template(const char *filename, bool per_thread, Error **errp) +{ + if (filename) { + char *pidstr = strstr(filename, "%"); + + if (pidstr) { + /* We only accept one %d, no other format strings */ + if (pidstr[1] != 'd' || strchr(pidstr + 2, '%')) { + error_setg(errp, "Bad logfile template: %s", filename); + return 0; + } + return per_thread ? vft_strdup : vft_pid_printf; + } + } + if (per_thread) { + error_setg(errp, "Filename template with '%%d' required for 'tid'"); + return vft_error; + } + return filename ? vft_strdup : vft_stderr; +} /* enable or disable low levels log */ -void qemu_set_log(int log_flags) +static bool qemu_set_log_internal(const char *filename, bool changed_name, + int log_flags, Error **errp) { - bool need_to_open_file = false; - QemuLogFile *logfile; + bool need_to_open_file; + bool daemonized; + bool per_thread; + FILE *logfile; - qemu_loglevel = log_flags; + QEMU_LOCK_GUARD(&global_mutex); + logfile = global_file; + + /* The per-thread flag is immutable. */ + if (log_per_thread) { + log_flags |= LOG_PER_THREAD; + } else { + if (global_filename) { + log_flags &= ~LOG_PER_THREAD; + } + } + + per_thread = log_flags & LOG_PER_THREAD; + + if (changed_name) { + char *newname = NULL; + + /* + * Once threads start opening their own log files, we have no + * easy mechanism to tell them all to close and re-open. + * There seems little cause to do so either -- this option + * will most often be used at user-only startup. + */ + if (log_per_thread) { + error_setg(errp, "Cannot change log filename after setting 'tid'"); + return false; + } + + switch (valid_filename_template(filename, per_thread, errp)) { + case vft_error: + return false; + case vft_stderr: + break; + case vft_strdup: + newname = g_strdup(filename); + break; + case vft_pid_printf: + newname = g_strdup_printf(filename, getpid()); + break; + } + + g_free(global_filename); + global_filename = newname; + filename = newname; + } else { + filename = global_filename; + if (per_thread && + valid_filename_template(filename, true, errp) == vft_error) { + return false; + } + } + + /* Once the per-thread flag is set, it cannot be unset. */ + if (per_thread) { + log_per_thread = true; + } + /* The flag itself is not relevant for need_to_open_file. */ + log_flags &= ~LOG_PER_THREAD; #ifdef CONFIG_TRACE_LOG - qemu_loglevel |= LOG_TRACE; + log_flags |= LOG_TRACE; #endif + qemu_loglevel = log_flags; + /* * In all cases we only log if qemu_loglevel is set. * Also: + * If per-thread, open the file for each thread in qemu_log_lock. * If not daemonized we will always log either to stderr - * or to a file (if there is a logfilename). - * If we are daemonized, - * we will only log if there is a logfilename. + * or to a file (if there is a filename). + * If we are daemonized, we will only log if there is a filename. */ - if (qemu_loglevel && (!is_daemonized() || logfilename)) { - need_to_open_file = true; + daemonized = is_daemonized(); + need_to_open_file = log_flags && !per_thread && (!daemonized || filename); + + if (logfile && (!need_to_open_file || changed_name)) { + qatomic_rcu_set(&global_file, NULL); + if (logfile != stderr) { + RCUCloseFILE *r = g_new0(RCUCloseFILE, 1); + r->fd = logfile; + call_rcu(r, rcu_close_file, rcu); + } + logfile = NULL; } - QEMU_LOCK_GUARD(&qemu_logfile_mutex); - if (qemu_logfile && !need_to_open_file) { - logfile = qemu_logfile; - qatomic_rcu_set(&qemu_logfile, NULL); - call_rcu(logfile, qemu_logfile_free, rcu); - } else if (!qemu_logfile && need_to_open_file) { - logfile = g_new0(QemuLogFile, 1); - if (logfilename) { - logfile->fd = fopen(logfilename, log_append ? "a" : "w"); - if (!logfile->fd) { - g_free(logfile); - perror(logfilename); - _exit(1); + + if (!logfile && need_to_open_file) { + if (filename) { + logfile = fopen(filename, log_append ? "a" : "w"); + if (!logfile) { + error_setg_errno(errp, errno, "Error opening logfile %s", + filename); + return false; } /* In case we are a daemon redirect stderr to logfile */ - if (is_daemonized()) { - dup2(fileno(logfile->fd), STDERR_FILENO); - fclose(logfile->fd); - /* This will skip closing logfile in qemu_log_close() */ - logfile->fd = stderr; + if (daemonized) { + dup2(fileno(logfile), STDERR_FILENO); + fclose(logfile); + /* This will skip closing logfile in rcu_close_file. */ + logfile = stderr; } } else { /* Default to stderr if no log file specified */ - assert(!is_daemonized()); - logfile->fd = stderr; + assert(!daemonized); + logfile = stderr; } - /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ - if (log_uses_own_buffers) { - static char logfile_buf[4096]; - setvbuf(logfile->fd, logfile_buf, _IOLBF, sizeof(logfile_buf)); - } else { -#if defined(_WIN32) - /* Win32 doesn't support line-buffering, so use unbuffered output. */ - setvbuf(logfile->fd, NULL, _IONBF, 0); -#else - setvbuf(logfile->fd, NULL, _IOLBF, 0); -#endif - log_append = 1; - } - qatomic_rcu_set(&qemu_logfile, logfile); + log_append = 1; + + qatomic_rcu_set(&global_file, logfile); } + return true; } -void qemu_log_needs_buffers(void) +bool qemu_set_log(int log_flags, Error **errp) { - log_uses_own_buffers = true; + return qemu_set_log_internal(NULL, false, log_flags, errp); } -/* - * Allow the user to include %d in their logfile which will be - * substituted with the current PID. This is useful for debugging many - * nested linux-user tasks but will result in lots of logs. - * - * filename may be NULL. In that case, log output is sent to stderr - */ -void qemu_set_log_filename(const char *filename, Error **errp) +bool qemu_set_log_filename(const char *filename, Error **errp) { - g_free(logfilename); - logfilename = NULL; + return qemu_set_log_internal(filename, true, qemu_loglevel, errp); +} - if (filename) { - char *pidstr = strstr(filename, "%"); - if (pidstr) { - /* We only accept one %d, no other format strings */ - if (pidstr[1] != 'd' || strchr(pidstr + 2, '%')) { - error_setg(errp, "Bad logfile format: %s", filename); - return; - } else { - logfilename = g_strdup_printf(filename, getpid()); - } - } else { - logfilename = g_strdup(filename); - } - } - - qemu_log_close(); - qemu_set_log(qemu_loglevel); +bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp) +{ + return qemu_set_log_internal(name, true, flags, errp); } /* Returns true if addr is in our debug filter or no filter defined @@ -266,34 +421,6 @@ out: g_strfreev(ranges); } -/* fflush() the log file */ -void qemu_log_flush(void) -{ - QemuLogFile *logfile; - - rcu_read_lock(); - logfile = qatomic_rcu_read(&qemu_logfile); - if (logfile) { - fflush(logfile->fd); - } - rcu_read_unlock(); -} - -/* Close the log file */ -void qemu_log_close(void) -{ - QemuLogFile *logfile; - - qemu_mutex_lock(&qemu_logfile_mutex); - logfile = qemu_logfile; - - if (logfile) { - qatomic_rcu_set(&qemu_logfile, NULL); - call_rcu(logfile, qemu_logfile_free, rcu); - } - qemu_mutex_unlock(&qemu_logfile_mutex); -} - const QEMULogItem qemu_log_items[] = { { CPU_LOG_TB_OUT_ASM, "out_asm", "show generated host assembly code for each compiled TB" }, @@ -334,6 +461,8 @@ const QEMULogItem qemu_log_items[] = { #endif { LOG_STRACE, "strace", "log every user-mode syscall, its input, and its result" }, + { LOG_PER_THREAD, "tid", + "open a separate log file per thread; filename must contain '%d'" }, { 0, NULL, NULL }, }; diff --git a/util/main-loop.c b/util/main-loop.c index bd732e825a..479ec2c9ba 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -30,9 +30,11 @@ #include "sysemu/replay.h" #include "qemu/main-loop.h" #include "block/aio.h" +#include "block/thread-pool.h" #include "qemu/error-report.h" #include "qemu/queue.h" #include "qemu/compiler.h" +#include "qom/object.h" #ifdef XBOX /* FIXME: This is a slightly incomplete implementation of moving @@ -136,7 +138,7 @@ static int qemu_signal_init(Error **errp) return -errno; } - fcntl_setfl(sigfd, O_NONBLOCK); + g_unix_set_fd_nonblocking(sigfd, true, NULL); qemu_set_fd_handler(sigfd, sigfd_handler, NULL, (void *)(intptr_t)sigfd); @@ -238,6 +240,69 @@ int qemu_init_main_loop(Error **errp) return 0; } +static void main_loop_update_params(EventLoopBase *base, Error **errp) +{ + ERRP_GUARD(); + + if (!qemu_aio_context) { + error_setg(errp, "qemu aio context not ready"); + return; + } + + aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp); + if (*errp) { + return; + } + + aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min, + base->thread_pool_max, errp); +} + +MainLoop *mloop; + +static void main_loop_init(EventLoopBase *base, Error **errp) +{ + MainLoop *m = MAIN_LOOP(base); + + if (mloop) { + error_setg(errp, "only one main-loop instance allowed"); + return; + } + + main_loop_update_params(base, errp); + + mloop = m; + return; +} + +static bool main_loop_can_be_deleted(EventLoopBase *base) +{ + return false; +} + +static void main_loop_class_init(ObjectClass *oc, void *class_data) +{ + EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc); + + bc->init = main_loop_init; + bc->update_params = main_loop_update_params; + bc->can_be_deleted = main_loop_can_be_deleted; +} + +static const TypeInfo main_loop_info = { + .name = TYPE_MAIN_LOOP, + .parent = TYPE_EVENT_LOOP_BASE, + .class_init = main_loop_class_init, + .instance_size = sizeof(MainLoop), +}; + +static void main_loop_register_types(void) +{ + type_register_static(&main_loop_info); +} + +type_init(main_loop_register_types) + static int max_priority; #ifndef _WIN32 @@ -345,7 +410,7 @@ static PollingEntry *first_polling_entry; int qemu_add_polling_cb(PollingFunc *func, void *opaque) { PollingEntry **ppe, *pe; - pe = g_malloc0(sizeof(PollingEntry)); + pe = g_new0(PollingEntry, 1); pe->func = func; pe->opaque = opaque; for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next); @@ -370,20 +435,30 @@ void qemu_del_polling_cb(PollingFunc *func, void *opaque) /* Wait objects support */ typedef struct WaitObjects { int num; - int revents[MAXIMUM_WAIT_OBJECTS + 1]; - HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; - WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1]; - void *opaque[MAXIMUM_WAIT_OBJECTS + 1]; + int revents[MAXIMUM_WAIT_OBJECTS]; + HANDLE events[MAXIMUM_WAIT_OBJECTS]; + WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS]; + void *opaque[MAXIMUM_WAIT_OBJECTS]; } WaitObjects; static WaitObjects wait_objects = {0}; int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque) { + int i; WaitObjects *w = &wait_objects; + if (w->num >= MAXIMUM_WAIT_OBJECTS) { return -1; } + + for (i = 0; i < w->num; i++) { + /* check if the same handle is added twice */ + if (w->events[i] == handle) { + return -1; + } + } + w->events[w->num] = handle; w->func[w->num] = func; w->opaque[w->num] = opaque; @@ -402,7 +477,7 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque) if (w->events[i] == handle) { found = 1; } - if (found) { + if (found && i < (MAXIMUM_WAIT_OBJECTS - 1)) { w->events[i] = w->events[i + 1]; w->func[i] = w->func[i + 1]; w->opaque[i] = w->opaque[i + 1]; @@ -664,7 +739,7 @@ void qemu_set_fd_handler(int fd, { iohandler_init(); aio_set_fd_handler(iohandler_ctx, fd, false, - fd_read, fd_write, NULL, opaque); + fd_read, fd_write, NULL, NULL, opaque); } void event_notifier_set_handler(EventNotifier *e, @@ -672,5 +747,5 @@ void event_notifier_set_handler(EventNotifier *e, { iohandler_init(); aio_set_event_notifier(iohandler_ctx, e, false, - handler, NULL); + handler, NULL, NULL); } diff --git a/util/memalign.c b/util/memalign.c new file mode 100644 index 0000000000..c199ae7073 --- /dev/null +++ b/util/memalign.c @@ -0,0 +1,92 @@ +/* + * memalign.c: Allocate an aligned memory region + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010-2016 Red Hat, Inc. + * Copyright (c) 2022 Linaro Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/memalign.h" +#include "trace.h" + +void *qemu_try_memalign(size_t alignment, size_t size) +{ + void *ptr; + + if (alignment < sizeof(void*)) { + alignment = sizeof(void*); + } else { + g_assert(is_power_of_2(alignment)); + } + + /* + * Handling of 0 allocations varies among the different + * platform APIs (for instance _aligned_malloc() will + * fail) -- ensure that we always return a valid non-NULL + * pointer that can be freed by qemu_vfree(). + */ + if (size == 0) { + size++; + } +#if defined(CONFIG_POSIX_MEMALIGN) + int ret; + ret = posix_memalign(&ptr, alignment, size); + if (ret != 0) { + errno = ret; + ptr = NULL; + } +#elif defined(CONFIG_ALIGNED_MALLOC) + ptr = _aligned_malloc(size, alignment); +#elif defined(CONFIG_VALLOC) + ptr = valloc(size); +#elif defined(CONFIG_MEMALIGN) + ptr = memalign(alignment, size); +#else + #error No function to allocate aligned memory available +#endif + trace_qemu_memalign(alignment, size, ptr); + return ptr; +} + +void *qemu_memalign(size_t alignment, size_t size) +{ + void *p = qemu_try_memalign(alignment, size); + if (p) { + return p; + } + fprintf(stderr, + "qemu_memalign: failed to allocate %zu bytes at alignment %zu: %s\n", + size, alignment, strerror(errno)); + abort(); +} + +void qemu_vfree(void *ptr) +{ + trace_qemu_vfree(ptr); +#if !defined(CONFIG_POSIX_MEMALIGN) && defined(CONFIG_ALIGNED_MALLOC) + /* Only Windows _aligned_malloc needs a special free function */ + _aligned_free(ptr); +#else + free(ptr); +#endif +} diff --git a/util/meson.build b/util/meson.build index 5f24d10dee..8cc9b59c74 100644 --- a/util/meson.build +++ b/util/meson.build @@ -1,5 +1,9 @@ util_ss.add(files('osdep.c', 'cutils.c', 'unicode.c', 'qemu-timer-common.c')) -util_ss.add(when: 'CONFIG_ATOMIC64', if_false: files('atomic64.c')) +util_ss.add(files('thread-context.c'), numa) +if not config_host_data.get('CONFIG_ATOMIC64') + util_ss.add(files('atomic64.c')) +endif +util_ss.add(when: 'CONFIG_LINUX', if_true: files('async-teardown.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('aio-posix.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('fdmon-poll.c')) if config_host_data.get('CONFIG_EPOLL_CREATE1') @@ -9,8 +13,11 @@ util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('compatfd.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('event_notifier-posix.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('mmap-alloc.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('oslib-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: [files('qemu-openpty.c'), util]) +freebsd_dep = [] +if targetos == 'freebsd' + freebsd_dep = util +endif +util_ss.add(when: 'CONFIG_POSIX', if_true: [files('oslib-posix.c'), freebsd_dep]) util_ss.add(when: 'CONFIG_POSIX', if_true: files('qemu-thread-posix.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('memfd.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('aio-win32.c')) @@ -18,12 +25,13 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('event_notifier-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) +util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) util_ss.add(files('envlist.c', 'path.c', 'module.c')) util_ss.add(files('host-utils.c')) util_ss.add(files('bitmap.c', 'bitops.c')) util_ss.add(files('fifo8.c')) -util_ss.add(files('cacheinfo.c', 'cacheflush.c')) -util_ss.add(files('error.c', 'qemu-error.c')) +util_ss.add(files('cacheflush.c')) +util_ss.add(files('error.c', 'error-report.c')) util_ss.add(files('qemu-print.c')) util_ss.add(files('id.c')) util_ss.add(files('qemu-config.c', 'notify.c')) @@ -33,9 +41,10 @@ util_ss.add(files('crc32c.c')) util_ss.add(files('uuid.c')) util_ss.add(files('getauxval.c')) util_ss.add(files('rcu.c')) -util_ss.add(when: 'CONFIG_MEMBARRIER', if_true: files('sys_membarrier.c')) +if have_membarrier + util_ss.add(files('sys_membarrier.c')) +endif util_ss.add(files('log.c')) -util_ss.add(files('pagesize.c')) util_ss.add(files('qdist.c')) util_ss.add(files('qht.c')) util_ss.add(files('qsp.c')) @@ -46,6 +55,8 @@ util_ss.add(files('transactions.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c')) util_ss.add(files('guest-random.c')) util_ss.add(files('yank.c')) +util_ss.add(files('int128.c')) +util_ss.add(files('memalign.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('miniz/miniz.c')) util_ss.add(files('fast-hash.c')) @@ -55,35 +66,43 @@ endif if have_system util_ss.add(files('crc-ccitt.c')) - util_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus.c'), gio]) + util_ss.add(when: gio, if_true: files('dbus.c')) util_ss.add(when: 'CONFIG_LINUX', if_true: files('userfaultfd.c')) endif -if have_block - util_ss.add(files('aiocb.c', 'async.c', 'aio-wait.c')) +if have_block or have_ga + util_ss.add(files('aiocb.c', 'async.c')) util_ss.add(files('base64.c')) + util_ss.add(files('lockcnt.c')) + util_ss.add(files('main-loop.c')) + util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) + util_ss.add(files('coroutine-@0@.c'.format(config_host['CONFIG_COROUTINE_BACKEND']))) + util_ss.add(files('thread-pool.c', 'qemu-timer.c')) + util_ss.add(files('qemu-sockets.c')) +endif +if have_block + util_ss.add(files('aio-wait.c')) util_ss.add(files('buffer.c')) util_ss.add(files('bufferiszero.c')) - util_ss.add(files('coroutine-@0@.c'.format(config_host['CONFIG_COROUTINE_BACKEND']))) util_ss.add(files('hbitmap.c')) util_ss.add(files('hexdump.c')) util_ss.add(files('iova-tree.c')) - util_ss.add(files('iov.c', 'qemu-sockets.c', 'uri.c')) - util_ss.add(files('lockcnt.c')) - util_ss.add(files('main-loop.c')) + util_ss.add(files('iov.c', 'uri.c')) util_ss.add(files('nvdimm-utils.c')) - util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) util_ss.add(when: 'CONFIG_LINUX', if_true: [ files('vhost-user-server.c'), vhost_user ]) util_ss.add(files('block-helpers.c')) util_ss.add(files('qemu-coroutine-sleep.c')) util_ss.add(files('qemu-co-shared-resource.c')) - util_ss.add(files('thread-pool.c', 'qemu-timer.c')) + util_ss.add(files('qemu-co-timeout.c')) util_ss.add(files('readline.c')) util_ss.add(files('throttle.c')) util_ss.add(files('timed-average.c')) - util_ss.add(when: 'CONFIG_INOTIFY1', if_true: files('filemonitor-inotify.c'), - if_false: files('filemonitor-stub.c')) + if config_host_data.get('CONFIG_INOTIFY1') + util_ss.add(files('filemonitor-inotify.c')) + else + util_ss.add(files('filemonitor-stub.c')) + endif util_ss.add(when: 'CONFIG_LINUX', if_true: files('vfio-helpers.c')) endif diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 893d864354..5ed7d29183 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -50,38 +50,7 @@ size_t qemu_fd_getpagesize(int fd) #endif #endif - return qemu_real_host_page_size; -} - -size_t qemu_mempath_getpagesize(const char *mem_path) -{ -#ifdef CONFIG_LINUX - struct statfs fs; - int ret; - - if (mem_path) { - do { - ret = statfs(mem_path, &fs); - } while (ret != 0 && errno == EINTR); - - if (ret != 0) { - fprintf(stderr, "Couldn't statfs() memory path: %s\n", - strerror(errno)); - exit(1); - } - - if (fs.f_type == HUGETLBFS_MAGIC) { - /* It's hugepage, return the huge page size */ - return fs.f_bsize; - } - } -#ifdef __sparc__ - /* SPARC Linux needs greater alignment than the pagesize */ - return QEMU_VMALLOC_ALIGN; -#endif -#endif - - return qemu_real_host_page_size; + return qemu_real_host_page_size(); } #define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory" @@ -101,7 +70,7 @@ static bool map_noreserve_effective(int fd, uint32_t qemu_map_flags) * MAP_NORESERVE. * b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory. */ - if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size) { + if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size()) { return true; } @@ -166,7 +135,7 @@ static void *mmap_reserve(size_t size, int fd) * We do this unless we are using the system page size, in which case * anonymous memory is OK. */ - if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) { + if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size()) { fd = -1; flags |= MAP_ANONYMOUS; } else { @@ -243,7 +212,7 @@ static inline size_t mmap_guard_pagesize(int fd) /* Mappings in the same segment must share the same page size */ return qemu_fd_getpagesize(fd); #else - return qemu_real_host_page_size; + return qemu_real_host_page_size(); #endif } diff --git a/util/module.c b/util/module.c index 6bb4ad915a..32e263163c 100644 --- a/util/module.c +++ b/util/module.c @@ -21,6 +21,7 @@ #include "qemu/module.h" #include "qemu/cutils.h" #include "qemu/config-file.h" +#include "qapi/error.h" #ifdef CONFIG_MODULE_UPGRADES #include "qemu-version.h" #endif @@ -144,25 +145,22 @@ static bool module_check_arch(const QemuModinfo *modinfo) return true; } -static int module_load_file(const char *fname, bool mayfail, bool export_symbols) +/* + * module_load_dso: attempt to load an existing dso file + * + * fname: full pathname of the file to load + * export_symbols: if true, add the symbols to the global name space + * errp: error to set. + * + * Return value: true on success, false on error, and errp will be set. + */ +static bool module_load_dso(const char *fname, bool export_symbols, + Error **errp) { GModule *g_module; void (*sym)(void); - const char *dsosuf = CONFIG_HOST_DSOSUF; - int len = strlen(fname); - int suf_len = strlen(dsosuf); ModuleEntry *e, *next; - int ret, flags; - - if (len <= suf_len || strcmp(&fname[len - suf_len], dsosuf)) { - /* wrong suffix */ - ret = -EINVAL; - goto out; - } - if (access(fname, F_OK)) { - ret = -ENOENT; - goto out; - } + int flags; assert(QTAILQ_EMPTY(&dso_init_list)); @@ -172,48 +170,38 @@ static int module_load_file(const char *fname, bool mayfail, bool export_symbols } g_module = g_module_open(fname, flags); if (!g_module) { - if (!mayfail) { - fprintf(stderr, "Failed to open module: %s\n", - g_module_error()); - } - ret = -EINVAL; - goto out; + error_setg(errp, "failed to open module: %s", g_module_error()); + return false; } if (!g_module_symbol(g_module, DSO_STAMP_FUN_STR, (gpointer *)&sym)) { - fprintf(stderr, "Failed to initialize module: %s\n", - fname); - /* Print some info if this is a QEMU module (but from different build), - * this will make debugging user problems easier. */ + error_setg(errp, "failed to initialize module: %s", fname); + /* + * Print some info if this is a QEMU module (but from different build), + * this will make debugging user problems easier. + */ if (g_module_symbol(g_module, "qemu_module_dummy", (gpointer *)&sym)) { - fprintf(stderr, - "Note: only modules from the same build can be loaded.\n"); + error_append_hint(errp, + "Only modules from the same build can be loaded.\n"); } g_module_close(g_module); - ret = -EINVAL; - } else { - QTAILQ_FOREACH(e, &dso_init_list, node) { - e->init(); - register_module_init(e->init, e->type); - } - ret = 0; + return false; } + QTAILQ_FOREACH(e, &dso_init_list, node) { + e->init(); + register_module_init(e->init, e->type); + } trace_module_load_module(fname); QTAILQ_FOREACH_SAFE(e, &dso_init_list, node, next) { QTAILQ_REMOVE(&dso_init_list, e, node); g_free(e); } -out: - return ret; + return true; } -#endif -bool module_load_one(const char *prefix, const char *lib_name, bool mayfail) +int module_load(const char *prefix, const char *name, Error **errp) { - bool success = false; - -#ifdef CONFIG_MODULES - char *fname = NULL; + int rv = -1; #ifdef CONFIG_MODULE_UPGRADES char *version_dir; #endif @@ -221,34 +209,52 @@ bool module_load_one(const char *prefix, const char *lib_name, bool mayfail) char *dirs[5]; char *module_name; int i = 0, n_dirs = 0; - int ret; bool export_symbols = false; static GHashTable *loaded_modules; const QemuModinfo *modinfo; const char **sl; if (!g_module_supported()) { - fprintf(stderr, "Module is not supported by system.\n"); - return false; + error_setg(errp, "%s", "this platform does not support GLib modules"); + return -1; } if (!loaded_modules) { loaded_modules = g_hash_table_new(g_str_hash, g_str_equal); } - module_name = g_strdup_printf("%s%s", prefix, lib_name); + /* allocate all resources managed by the out: label here */ + module_name = g_strdup_printf("%s%s", prefix, name); if (g_hash_table_contains(loaded_modules, module_name)) { g_free(module_name); - return true; + return 2; /* module already loaded */ } g_hash_table_add(loaded_modules, module_name); + search_dir = getenv("QEMU_MODULE_DIR"); + if (search_dir != NULL) { + dirs[n_dirs++] = g_strdup_printf("%s", search_dir); + } + dirs[n_dirs++] = get_relocated_path(CONFIG_QEMU_MODDIR); + +#ifdef CONFIG_MODULE_UPGRADES + version_dir = g_strcanon(g_strdup(QEMU_PKGVERSION), + G_CSET_A_2_Z G_CSET_a_2_z G_CSET_DIGITS "+-.~", + '_'); + dirs[n_dirs++] = g_strdup_printf("/var/run/qemu/%s", version_dir); +#endif + assert(n_dirs <= ARRAY_SIZE(dirs)); + + /* end of resources managed by the out: label */ + for (modinfo = module_info; modinfo->name != NULL; modinfo++) { if (modinfo->arch) { if (strcmp(modinfo->name, module_name) == 0) { if (!module_check_arch(modinfo)) { - return false; + error_setg(errp, "module arch does not match: " + "expected '%s', got '%s'", module_arch, modinfo->arch); + goto out; } } } @@ -256,7 +262,11 @@ bool module_load_one(const char *prefix, const char *lib_name, bool mayfail) if (strcmp(modinfo->name, module_name) == 0) { /* we depend on other module(s) */ for (sl = modinfo->deps; *sl != NULL; sl++) { - module_load_one("", *sl, false); + int subrv = module_load("", *sl, errp); + if (subrv <= 0) { + rv = subrv; + goto out; + } } } else { for (sl = modinfo->deps; *sl != NULL; sl++) { @@ -269,59 +279,52 @@ bool module_load_one(const char *prefix, const char *lib_name, bool mayfail) } } - search_dir = getenv("QEMU_MODULE_DIR"); - if (search_dir != NULL) { - dirs[n_dirs++] = g_strdup_printf("%s", search_dir); - } - dirs[n_dirs++] = get_relocated_path(CONFIG_QEMU_MODDIR); - dirs[n_dirs++] = g_strdup(qemu_get_exec_dir()); - -#ifdef CONFIG_MODULE_UPGRADES - version_dir = g_strcanon(g_strdup(QEMU_PKGVERSION), - G_CSET_A_2_Z G_CSET_a_2_z G_CSET_DIGITS "+-.~", - '_'); - dirs[n_dirs++] = g_strdup_printf("/var/run/qemu/%s", version_dir); -#endif - - assert(n_dirs <= ARRAY_SIZE(dirs)); - for (i = 0; i < n_dirs; i++) { - fname = g_strdup_printf("%s/%s%s", - dirs[i], module_name, CONFIG_HOST_DSOSUF); - ret = module_load_file(fname, mayfail, export_symbols); - g_free(fname); - fname = NULL; - /* Try loading until loaded a module file */ - if (!ret) { - success = true; - break; + char *fname = g_strdup_printf("%s/%s%s", + dirs[i], module_name, CONFIG_HOST_DSOSUF); + int ret = access(fname, F_OK); + if (ret != 0 && (errno == ENOENT || errno == ENOTDIR)) { + /* + * if we don't find the module in this dir, try the next one. + * If we don't find it in any dir, that can be fine too: user + * did not install the module. We will return 0 in this case + * with no error set. + */ + g_free(fname); + continue; + } else if (ret != 0) { + /* most common is EACCES here */ + error_setg_errno(errp, errno, "error trying to access %s", fname); + } else if (module_load_dso(fname, export_symbols, errp)) { + rv = 1; /* module successfully loaded */ } + g_free(fname); + goto out; } + rv = 0; /* module not found */ - if (!success) { +out: + if (rv <= 0) { g_hash_table_remove(loaded_modules, module_name); g_free(module_name); } - for (i = 0; i < n_dirs; i++) { g_free(dirs[i]); } - -#endif - return success; + return rv; } -#ifdef CONFIG_MODULES - static bool module_loaded_qom_all; -void module_load_qom_one(const char *type) +int module_load_qom(const char *type, Error **errp) { const QemuModinfo *modinfo; const char **sl; + int rv = 0; if (!type) { - return; + error_setg(errp, "%s", "type is NULL"); + return -1; } trace_module_lookup_object_type(type); @@ -334,15 +337,24 @@ void module_load_qom_one(const char *type) } for (sl = modinfo->objs; *sl != NULL; sl++) { if (strcmp(type, *sl) == 0) { - module_load_one("", modinfo->name, false); + if (rv > 0) { + error_setg(errp, "multiple modules providing '%s'", type); + return -1; + } + rv = module_load("", modinfo->name, errp); + if (rv < 0) { + return rv; + } } } } + return rv; } void module_load_qom_all(void) { const QemuModinfo *modinfo; + Error *local_err = NULL; if (module_loaded_qom_all) { return; @@ -355,7 +367,9 @@ void module_load_qom_all(void) if (!module_check_arch(modinfo)) { continue; } - module_load_one("", modinfo->name, false); + if (module_load("", modinfo->name, &local_err) < 0) { + error_report_err(local_err); + } } module_loaded_qom_all = true; } @@ -371,7 +385,10 @@ void qemu_load_module_for_opts(const char *group) } for (sl = modinfo->opts; *sl != NULL; sl++) { if (strcmp(group, *sl) == 0) { - module_load_one("", modinfo->name, false); + Error *local_err = NULL; + if (module_load("", modinfo->name, &local_err) < 0) { + error_report_err(local_err); + } } } } @@ -381,7 +398,8 @@ void qemu_load_module_for_opts(const char *group) void module_allow_arch(const char *arch) {} void qemu_load_module_for_opts(const char *group) {} -void module_load_qom_one(const char *type) {} +int module_load(const char *prefix, const char *name, Error **errp) { return 2; } +int module_load_qom(const char *type, Error **errp) { return 2; } void module_load_qom_all(void) {} #endif diff --git a/util/osdep.c b/util/osdep.c index dfe147674a..046c099828 100644 --- a/util/osdep.c +++ b/util/osdep.c @@ -23,30 +23,20 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" - -/* Needed early for CONFIG_BSD etc. */ - -#ifdef CONFIG_SOLARIS -#include -/* See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for - discussion about Solaris header problems */ -extern int madvise(char *, size_t, int); -#endif - -#include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/sockets.h" #include "qemu/error-report.h" +#include "qemu/madvise.h" +#include "qemu/mprotect.h" +#include "qemu/hw-version.h" #include "monitor/monitor.h" -static bool fips_enabled = false; - static const char *hw_version = QEMU_HW_VERSION; int socket_set_cork(int fd, int v) { #if defined(SOL_TCP) && defined(TCP_CORK) - return qemu_setsockopt(fd, SOL_TCP, TCP_CORK, &v, sizeof(v)); + return setsockopt(fd, SOL_TCP, TCP_CORK, &v, sizeof(v)); #else return 0; #endif @@ -55,7 +45,7 @@ int socket_set_cork(int fd, int v) int socket_set_nodelay(int fd) { int v = 1; - return qemu_setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &v, sizeof(v)); + return setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &v, sizeof(v)); } int qemu_madvise(void *addr, size_t len, int advice) @@ -76,8 +66,8 @@ int qemu_madvise(void *addr, size_t len, int advice) static int qemu_mprotect__osdep(void *addr, size_t size, int prot) { - g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask)); - g_assert(!(size & ~qemu_real_host_page_mask)); + g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask())); + g_assert(!(size & ~qemu_real_host_page_mask())); #ifdef _WIN32 DWORD old_protect; @@ -486,6 +476,8 @@ int qemu_mkdir(const char *path) if (!dirResult) { return -1; } + + return 0; #else return mkdir(path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); #endif @@ -567,6 +559,28 @@ int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen) return ret; } +ssize_t qemu_send_full(int s, const void *buf, size_t count) +{ + ssize_t ret = 0; + ssize_t total = 0; + + while (count) { + ret = send(s, buf, count, 0); + if (ret < 0) { + if (errno == EINTR) { + continue; + } + break; + } + + count -= ret; + buf += ret; + total += ret; + } + + return total; +} + void qemu_set_hw_version(const char *version) { hw_version = version; @@ -577,32 +591,6 @@ const char *qemu_hw_version(void) return hw_version; } -void fips_set_state(bool requested) -{ -#ifdef __linux__ - if (requested) { - FILE *fds = fopen("/proc/sys/crypto/fips_enabled", "r"); - if (fds != NULL) { - fips_enabled = (fgetc(fds) == '1'); - fclose(fds); - } - } -#else - fips_enabled = false; -#endif /* __linux__ */ - -#ifdef _FIPS_DEBUG - fprintf(stderr, "FIPS mode %s (requested %s)\n", - (fips_enabled ? "enabled" : "disabled"), - (requested ? "enabled" : "disabled")); -#endif -} - -bool fips_get_state(void) -{ - return fips_enabled; -} - #ifdef _WIN32 static void socket_cleanup(void) { @@ -629,18 +617,22 @@ int socket_init(void) #ifndef CONFIG_IOVEC -/* helper function for iov_send_recv() */ static ssize_t readv_writev(int fd, const struct iovec *iov, int iov_cnt, bool do_write) { unsigned i = 0; ssize_t ret = 0; + ssize_t off = 0; while (i < iov_cnt) { ssize_t r = do_write - ? write(fd, iov[i].iov_base, iov[i].iov_len) - : read(fd, iov[i].iov_base, iov[i].iov_len); + ? write(fd, iov[i].iov_base + off, iov[i].iov_len - off) + : read(fd, iov[i].iov_base + off, iov[i].iov_len - off); if (r > 0) { ret += r; + off += r; + if (off < iov[i].iov_len) { + continue; + } } else if (!r) { break; } else if (errno == EINTR) { @@ -653,6 +645,7 @@ readv_writev(int fd, const struct iovec *iov, int iov_cnt, bool do_write) } break; } + off = 0; i++; } return ret; @@ -670,3 +663,19 @@ writev(int fd, const struct iovec *iov, int iov_cnt) return readv_writev(fd, iov, iov_cnt, true); } #endif + +/* + * Make sure data goes on disk, but if possible do not bother to + * write out the inode just for timestamp updates. + * + * Unfortunately even in 2009 many operating systems do not support + * fdatasync and have to fall back to fsync. + */ +int qemu_fdatasync(int fd) +{ +#ifdef CONFIG_FDATASYNC + return fdatasync(fd); +#else + return fsync(fd); +#endif +} diff --git a/util/oslib-posix.c b/util/oslib-posix.c index e8bdb02e1d..59a891b6a8 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -31,40 +31,34 @@ #include -#include "qemu-common.h" #include "sysemu/sysemu.h" #include "trace.h" #include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/madvise.h" #include "qemu/sockets.h" #include "qemu/thread.h" #include #include "qemu/cutils.h" #include "qemu/compiler.h" +#include "qemu/units.h" +#include "qemu/thread-context.h" #ifdef CONFIG_LINUX #include #endif #ifdef __FreeBSD__ -#include -#include #include +#include +#include #include #endif #ifdef __NetBSD__ -#include #include #endif -#ifdef __APPLE__ -#include -#endif - -#ifdef __HAIKU__ -#include -#endif - #include "qemu/mmap-alloc.h" #ifdef CONFIG_DEBUG_STACK_USAGE @@ -73,22 +67,32 @@ #define MAX_MEM_PREALLOC_THREAD_COUNT 16 +struct MemsetThread; + +typedef struct MemsetContext { + bool all_threads_created; + bool any_thread_failed; + struct MemsetThread *threads; + int num_threads; +} MemsetContext; + struct MemsetThread { char *addr; size_t numpages; size_t hpagesize; QemuThread pgthread; sigjmp_buf env; + MemsetContext *context; }; typedef struct MemsetThread MemsetThread; -static MemsetThread *memset_thread; -static int memset_num_threads; -static bool memset_thread_failed; +/* used by sigbus_handler() */ +static MemsetContext *sigbus_memset_context; +struct sigaction sigbus_oldact; +static QemuMutex sigbus_mutex; static QemuMutex page_mutex; static QemuCond page_cond; -static bool threads_created_flag; int qemu_get_thread_id(void) { @@ -126,9 +130,8 @@ bool qemu_write_pidfile(const char *path, Error **errp) .l_len = 0, }; - fd = qemu_open_old(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR); + fd = qemu_create(path, O_WRONLY, S_IRUSR | S_IWUSR, errp); if (fd == -1) { - error_setg_errno(errp, errno, "Cannot open pid file"); return false; } @@ -172,7 +175,7 @@ bool qemu_write_pidfile(const char *path, Error **errp) } snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid()); - if (write(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) { + if (qemu_write_full(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) { error_setg(errp, "Failed to write pid file"); goto fail_unlink; } @@ -186,46 +189,6 @@ fail_close: return false; } -void *qemu_oom_check(void *ptr) -{ - if (ptr == NULL) { - fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno)); - abort(); - } - return ptr; -} - -void *qemu_try_memalign(size_t alignment, size_t size) -{ - void *ptr; - - if (alignment < sizeof(void*)) { - alignment = sizeof(void*); - } else { - g_assert(is_power_of_2(alignment)); - } - -#if defined(CONFIG_POSIX_MEMALIGN) - int ret; - ret = posix_memalign(&ptr, alignment, size); - if (ret != 0) { - errno = ret; - ptr = NULL; - } -#elif defined(CONFIG_BSD) - ptr = valloc(size); -#else - ptr = memalign(alignment, size); -#endif - trace_qemu_memalign(alignment, size, ptr); - return ptr; -} - -void *qemu_memalign(size_t alignment, size_t size) -{ - return qemu_oom_check(qemu_try_memalign(alignment, size)); -} - /* alloc shared memory pages */ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, bool noreserve) @@ -247,44 +210,26 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, return ptr; } -void qemu_vfree(void *ptr) -{ - trace_qemu_vfree(ptr); - free(ptr); -} - void qemu_anon_ram_free(void *ptr, size_t size) { trace_qemu_anon_ram_free(ptr, size); qemu_ram_munmap(-1, ptr, size); } -void qemu_set_block(int fd) +void qemu_socket_set_block(int fd) { - int f; - f = fcntl(fd, F_GETFL); - assert(f != -1); - f = fcntl(fd, F_SETFL, f & ~O_NONBLOCK); - assert(f != -1); + g_unix_set_fd_nonblocking(fd, false, NULL); } -int qemu_try_set_nonblock(int fd) +int qemu_socket_try_set_nonblock(int fd) { - int f; - f = fcntl(fd, F_GETFL); - if (f == -1) { - return -errno; - } - if (fcntl(fd, F_SETFL, f | O_NONBLOCK) == -1) { - return -errno; - } - return 0; + return g_unix_set_fd_nonblocking(fd, true, NULL) ? 0 : -errno; } -void qemu_set_nonblock(int fd) +void qemu_socket_set_nonblock(int fd) { int f; - f = qemu_try_set_nonblock(fd); + f = qemu_socket_try_set_nonblock(fd); assert(f == 0); } @@ -309,35 +254,29 @@ void qemu_set_cloexec(int fd) assert(f != -1); } -/* - * Creates a pipe with FD_CLOEXEC set on both file descriptors - */ -int qemu_pipe(int pipefd[2]) +int qemu_socketpair(int domain, int type, int protocol, int sv[2]) { int ret; -#ifdef CONFIG_PIPE2 - ret = pipe2(pipefd, O_CLOEXEC); - if (ret != -1 || errno != ENOSYS) { +#ifdef SOCK_CLOEXEC + ret = socketpair(domain, type | SOCK_CLOEXEC, protocol, sv); + if (ret != -1 || errno != EINVAL) { return ret; } #endif - ret = pipe(pipefd); + ret = socketpair(domain, type, protocol, sv);; if (ret == 0) { - qemu_set_cloexec(pipefd[0]); - qemu_set_cloexec(pipefd[1]); + qemu_set_cloexec(sv[0]); + qemu_set_cloexec(sv[1]); } return ret; } char * -qemu_get_local_state_pathname(const char *relative_pathname) +qemu_get_local_state_dir(void) { - g_autofree char *dir = g_strdup_printf("%s/%s", - CONFIG_QEMU_LOCALSTATEDIR, - relative_pathname); - return get_relocated_path(dir); + return get_relocated_path(CONFIG_QEMU_LOCALSTATEDIR); } void qemu_set_tty_echo(int fd, bool echo) @@ -355,103 +294,50 @@ void qemu_set_tty_echo(int fd, bool echo) tcsetattr(fd, TCSANOW, &tty); } -static const char *exec_dir; - -void qemu_init_exec_dir(const char *argv0) -{ - char *p = NULL; - char buf[PATH_MAX]; - - if (exec_dir) { - return; - } - -#if defined(__linux__) - { - int len; - len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); - if (len > 0) { - buf[len] = 0; - p = buf; - } - } -#elif defined(__FreeBSD__) \ - || (defined(__NetBSD__) && defined(KERN_PROC_PATHNAME)) - { -#if defined(__FreeBSD__) - static int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1}; -#else - static int mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME}; -#endif - size_t len = sizeof(buf) - 1; - - *buf = '\0'; - if (!sysctl(mib, ARRAY_SIZE(mib), buf, &len, NULL, 0) && - *buf) { - buf[sizeof(buf) - 1] = '\0'; - p = buf; - } - } -#elif defined(__APPLE__) - { - char fpath[PATH_MAX]; - uint32_t len = sizeof(fpath); - if (_NSGetExecutablePath(fpath, &len) == 0) { - p = realpath(fpath, buf); - if (!p) { - return; - } - } - } -#elif defined(__HAIKU__) - { - image_info ii; - int32_t c = 0; - - *buf = '\0'; - while (get_next_image_info(0, &c, &ii) == B_OK) { - if (ii.type == B_APP_IMAGE) { - strncpy(buf, ii.name, sizeof(buf)); - buf[sizeof(buf) - 1] = 0; - p = buf; - break; - } - } - } -#endif - /* If we don't have any way of figuring out the actual executable - location then try argv[0]. */ - if (!p && argv0) { - p = realpath(argv0, buf); - } - if (p) { - exec_dir = g_path_get_dirname(p); - } else { - exec_dir = CONFIG_BINDIR; - } -} - -const char *qemu_get_exec_dir(void) -{ - return exec_dir; -} - +#ifdef CONFIG_LINUX +static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx) +#else /* CONFIG_LINUX */ static void sigbus_handler(int signal) +#endif /* CONFIG_LINUX */ { int i; - if (memset_thread) { - for (i = 0; i < memset_num_threads; i++) { - if (qemu_thread_is_self(&memset_thread[i].pgthread)) { - siglongjmp(memset_thread[i].env, 1); + + if (sigbus_memset_context) { + for (i = 0; i < sigbus_memset_context->num_threads; i++) { + MemsetThread *thread = &sigbus_memset_context->threads[i]; + + if (qemu_thread_is_self(&thread->pgthread)) { + siglongjmp(thread->env, 1); } } } + +#ifdef CONFIG_LINUX + /* + * We assume that the MCE SIGBUS handler could have been registered. We + * should never receive BUS_MCEERR_AO on any of our threads, but only on + * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not + * receive BUS_MCEERR_AR triggered by action of other threads on one of + * our threads. So, no need to check for unrelated SIGBUS when seeing one + * for our threads. + * + * We will forward to the MCE handler, which will either handle the SIGBUS + * or reinstall the default SIGBUS handler and reraise the SIGBUS. The + * default SIGBUS handler will crash the process, so we don't care. + */ + if (sigbus_oldact.sa_flags & SA_SIGINFO) { + sigbus_oldact.sa_sigaction(signal, siginfo, ctx); + return; + } +#endif /* CONFIG_LINUX */ + warn_report("qemu_prealloc_mem: unrelated SIGBUS detected and ignored"); } static void *do_touch_pages(void *arg) { MemsetThread *memset_args = (MemsetThread *)arg; sigset_t set, oldset; + int ret = 0; /* * On Linux, the page faults from the loop below can cause mmap_sem @@ -459,7 +345,7 @@ static void *do_touch_pages(void *arg) * clearing until all threads have been created. */ qemu_mutex_lock(&page_mutex); - while(!threads_created_flag){ + while (!memset_args->context->all_threads_created) { qemu_cond_wait(&page_cond, &page_mutex); } qemu_mutex_unlock(&page_mutex); @@ -470,7 +356,7 @@ static void *do_touch_pages(void *arg) pthread_sigmask(SIG_UNBLOCK, &set, &oldset); if (sigsetjmp(memset_args->env, 1)) { - memset_thread_failed = true; + ret = -EFAULT; } else { char *addr = memset_args->addr; size_t numpages = memset_args->numpages; @@ -484,38 +370,66 @@ static void *do_touch_pages(void *arg) * * 'volatile' to stop compiler optimizing this away * to a no-op - * - * TODO: get a better solution from kernel so we - * don't need to write at all so we don't cause - * wear on the storage backing the region... */ *(volatile char *)addr = *addr; addr += hpagesize; } } pthread_sigmask(SIG_SETMASK, &oldset, NULL); - return NULL; + return (void *)(uintptr_t)ret; } -static inline int get_memset_num_threads(int smp_cpus) +static void *do_madv_populate_write_pages(void *arg) +{ + MemsetThread *memset_args = (MemsetThread *)arg; + const size_t size = memset_args->numpages * memset_args->hpagesize; + char * const addr = memset_args->addr; + int ret = 0; + + /* See do_touch_pages(). */ + qemu_mutex_lock(&page_mutex); + while (!memset_args->context->all_threads_created) { + qemu_cond_wait(&page_cond, &page_mutex); + } + qemu_mutex_unlock(&page_mutex); + + if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) { + ret = -errno; + } + return (void *)(uintptr_t)ret; +} + +static inline int get_memset_num_threads(size_t hpagesize, size_t numpages, + int max_threads) { long host_procs = sysconf(_SC_NPROCESSORS_ONLN); int ret = 1; if (host_procs > 0) { - ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus); + ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), max_threads); } + + /* Especially with gigantic pages, don't create more threads than pages. */ + ret = MIN(ret, numpages); + /* Don't start threads to prealloc comparatively little memory. */ + ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB))); + /* In case sysconf() fails, we fall back to single threaded */ return ret; } -static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, - int smp_cpus) +static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, + int max_threads, ThreadContext *tc, + bool use_madv_populate_write) { static gsize initialized = 0; + MemsetContext context = { + .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads), + }; size_t numpages_per_thread, leftover; + void *(*touch_fn)(void *); + int ret = 0, i = 0; char *addr = area; - int i = 0; if (g_once_init_enter(&initialized)) { qemu_mutex_init(&page_mutex); @@ -523,66 +437,129 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, g_once_init_leave(&initialized, 1); } - memset_thread_failed = false; - threads_created_flag = false; - memset_num_threads = get_memset_num_threads(smp_cpus); - memset_thread = g_new0(MemsetThread, memset_num_threads); - numpages_per_thread = numpages / memset_num_threads; - leftover = numpages % memset_num_threads; - for (i = 0; i < memset_num_threads; i++) { - memset_thread[i].addr = addr; - memset_thread[i].numpages = numpages_per_thread + (i < leftover); - memset_thread[i].hpagesize = hpagesize; - qemu_thread_create(&memset_thread[i].pgthread, "touch_pages", - do_touch_pages, &memset_thread[i], - QEMU_THREAD_JOINABLE); - addr += memset_thread[i].numpages * hpagesize; + if (use_madv_populate_write) { + /* Avoid creating a single thread for MADV_POPULATE_WRITE */ + if (context.num_threads == 1) { + if (qemu_madvise(area, hpagesize * numpages, + QEMU_MADV_POPULATE_WRITE)) { + return -errno; + } + return 0; + } + touch_fn = do_madv_populate_write_pages; + } else { + touch_fn = do_touch_pages; + } + + context.threads = g_new0(MemsetThread, context.num_threads); + numpages_per_thread = numpages / context.num_threads; + leftover = numpages % context.num_threads; + for (i = 0; i < context.num_threads; i++) { + context.threads[i].addr = addr; + context.threads[i].numpages = numpages_per_thread + (i < leftover); + context.threads[i].hpagesize = hpagesize; + context.threads[i].context = &context; + if (tc) { + thread_context_create_thread(tc, &context.threads[i].pgthread, + "touch_pages", + touch_fn, &context.threads[i], + QEMU_THREAD_JOINABLE); + } else { + qemu_thread_create(&context.threads[i].pgthread, "touch_pages", + touch_fn, &context.threads[i], + QEMU_THREAD_JOINABLE); + } + addr += context.threads[i].numpages * hpagesize; + } + + if (!use_madv_populate_write) { + sigbus_memset_context = &context; } qemu_mutex_lock(&page_mutex); - threads_created_flag = true; + context.all_threads_created = true; qemu_cond_broadcast(&page_cond); qemu_mutex_unlock(&page_mutex); - for (i = 0; i < memset_num_threads; i++) { - qemu_thread_join(&memset_thread[i].pgthread); - } - g_free(memset_thread); - memset_thread = NULL; + for (i = 0; i < context.num_threads; i++) { + int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); - return memset_thread_failed; + if (tmp) { + ret = tmp; + } + } + + if (!use_madv_populate_write) { + sigbus_memset_context = NULL; + } + g_free(context.threads); + + return ret; } -void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus, - Error **errp) +static bool madv_populate_write_possible(char *area, size_t pagesize) { + return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) || + errno != EINVAL; +} + +void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, Error **errp) +{ + static gsize initialized; int ret; - struct sigaction act, oldact; size_t hpagesize = qemu_fd_getpagesize(fd); - size_t numpages = DIV_ROUND_UP(memory, hpagesize); + size_t numpages = DIV_ROUND_UP(sz, hpagesize); + bool use_madv_populate_write; + struct sigaction act; - memset(&act, 0, sizeof(act)); - act.sa_handler = &sigbus_handler; - act.sa_flags = 0; + /* + * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for + * some special mappings, such as mapping /dev/mem. + */ + use_madv_populate_write = madv_populate_write_possible(area, hpagesize); - ret = sigaction(SIGBUS, &act, &oldact); - if (ret) { - error_setg_errno(errp, errno, - "os_mem_prealloc: failed to install signal handler"); - return; + if (!use_madv_populate_write) { + if (g_once_init_enter(&initialized)) { + qemu_mutex_init(&sigbus_mutex); + g_once_init_leave(&initialized, 1); + } + + qemu_mutex_lock(&sigbus_mutex); + memset(&act, 0, sizeof(act)); +#ifdef CONFIG_LINUX + act.sa_sigaction = &sigbus_handler; + act.sa_flags = SA_SIGINFO; +#else /* CONFIG_LINUX */ + act.sa_handler = &sigbus_handler; + act.sa_flags = 0; +#endif /* CONFIG_LINUX */ + + ret = sigaction(SIGBUS, &act, &sigbus_oldact); + if (ret) { + qemu_mutex_unlock(&sigbus_mutex); + error_setg_errno(errp, errno, + "qemu_prealloc_mem: failed to install signal handler"); + return; + } } /* touch pages simultaneously */ - if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) { - error_setg(errp, "os_mem_prealloc: Insufficient free host memory " - "pages available to allocate guest RAM"); + ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, + use_madv_populate_write); + if (ret) { + error_setg_errno(errp, -ret, + "qemu_prealloc_mem: preallocating memory failed"); } - ret = sigaction(SIGBUS, &oldact, NULL); - if (ret) { - /* Terminate QEMU since it can't recover from error */ - perror("os_mem_prealloc: failed to reinstall signal handler"); - exit(1); + if (!use_madv_populate_write) { + ret = sigaction(SIGBUS, &sigbus_oldact, NULL); + if (ret) { + /* Terminate QEMU since it can't recover from error */ + perror("qemu_prealloc_mem: failed to reinstall signal handler"); + exit(1); + } + qemu_mutex_unlock(&sigbus_mutex); } } @@ -689,7 +666,7 @@ void *qemu_alloc_stack(size_t *sz) #ifdef CONFIG_DEBUG_STACK_USAGE void *ptr2; #endif - size_t pagesz = qemu_real_host_page_size; + size_t pagesz = qemu_real_host_page_size(); #ifdef _SC_THREAD_STACK_MIN /* avoid stacks smaller than _SC_THREAD_STACK_MIN */ long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN); @@ -751,7 +728,7 @@ void qemu_free_stack(void *stack, size_t sz) unsigned int usage; void *ptr; - for (ptr = stack + qemu_real_host_page_size; ptr < stack + sz; + for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz; ptr += sizeof(uint32_t)) { if (*(uint32_t *)ptr != 0xdeadbeaf) { break; @@ -809,52 +786,35 @@ void sigaction_invoke(struct sigaction *action, action->sa_sigaction(info->ssi_signo, &si, NULL); } -#ifndef HOST_NAME_MAX -# ifdef _POSIX_HOST_NAME_MAX -# define HOST_NAME_MAX _POSIX_HOST_NAME_MAX -# else -# define HOST_NAME_MAX 255 -# endif -#endif - -char *qemu_get_host_name(Error **errp) -{ - long len = -1; - g_autofree char *hostname = NULL; - -#ifdef _SC_HOST_NAME_MAX - len = sysconf(_SC_HOST_NAME_MAX); -#endif /* _SC_HOST_NAME_MAX */ - - if (len < 0) { - len = HOST_NAME_MAX; - } - - /* Unfortunately, gethostname() below does not guarantee a - * NULL terminated string. Therefore, allocate one byte more - * to be sure. */ - hostname = g_new0(char, len + 1); - - if (gethostname(hostname, len) < 0) { - error_setg_errno(errp, errno, - "cannot get hostname"); - return NULL; - } - - return g_steal_pointer(&hostname); -} - size_t qemu_get_host_physmem(void) { #ifdef _SC_PHYS_PAGES long pages = sysconf(_SC_PHYS_PAGES); if (pages > 0) { - if (pages > SIZE_MAX / qemu_real_host_page_size) { + if (pages > SIZE_MAX / qemu_real_host_page_size()) { return SIZE_MAX; } else { - return pages * qemu_real_host_page_size; + return pages * qemu_real_host_page_size(); } } #endif return 0; } + +int qemu_msync(void *addr, size_t length, int fd) +{ + size_t align_mask = ~(qemu_real_host_page_size() - 1); + + /** + * There are no strict reqs as per the length of mapping + * to be synced. Still the length needs to follow the address + * alignment changes. Additionally - round the size to the multiple + * of PAGE_SIZE + */ + length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1)); + length = (length + ~align_mask) & align_mask; + + addr = (void *)((uintptr_t)addr & align_mask); + + return msync(addr, length, MS_SYNC); +} diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 6807355c62..e5c7ad5b59 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -32,7 +32,6 @@ #include "qemu/osdep.h" #include -#include "qemu-common.h" #include "qapi/error.h" #include "qemu/main-loop.h" #include "trace.h" @@ -41,38 +40,6 @@ #include "qemu/error-report.h" #include -/* this must come after including "trace.h" */ -#include - -void *qemu_oom_check(void *ptr) -{ - if (ptr == NULL) { - fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError()); - abort(); - } - return ptr; -} - -void *qemu_try_memalign(size_t alignment, size_t size) -{ - void *ptr; - - g_assert(size != 0); - if (alignment < sizeof(void *)) { - alignment = sizeof(void *); - } else { - g_assert(is_power_of_2(alignment)); - } - ptr = _aligned_malloc(size, alignment); - trace_qemu_memalign(alignment, size, ptr); - return ptr; -} - -void *qemu_memalign(size_t alignment, size_t size) -{ - return qemu_oom_check(qemu_try_memalign(alignment, size)); -} - static int get_allocation_granularity(void) { SYSTEM_INFO system_info; @@ -104,12 +71,6 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared, return ptr; } -void qemu_vfree(void *ptr) -{ - trace_qemu_vfree(ptr); - _aligned_free(ptr); -} - void qemu_anon_ram_free(void *ptr, size_t size) { trace_qemu_anon_ram_free(ptr, size); @@ -220,14 +181,14 @@ static int socket_error(void) } } -void qemu_set_block(int fd) +void qemu_socket_set_block(int fd) { unsigned long opt = 0; WSAEventSelect(fd, NULL, 0); ioctlsocket(fd, FIONBIO, &opt); } -int qemu_try_set_nonblock(int fd) +int qemu_socket_try_set_nonblock(int fd) { unsigned long opt = 1; if (ioctlsocket(fd, FIONBIO, &opt) != NO_ERROR) { @@ -236,9 +197,9 @@ int qemu_try_set_nonblock(int fd) return 0; } -void qemu_set_nonblock(int fd) +void qemu_socket_set_nonblock(int fd) { - (void)qemu_try_set_nonblock(fd); + (void)qemu_socket_try_set_nonblock(fd); } int socket_set_fast_reuse(int fd) @@ -265,57 +226,19 @@ void qemu_set_cloexec(int fd) { } -/* Offset between 1/1/1601 and 1/1/1970 in 100 nanosec units */ -#define _W32_FT_OFFSET (116444736000000000ULL) - -int qemu_gettimeofday(qemu_timeval *tp) -{ - union { - unsigned long long ns100; /*time since 1 Jan 1601 in 100ns units */ - FILETIME ft; - } _now; - - if(tp) { - GetSystemTimeAsFileTime (&_now.ft); - tp->tv_usec=(long)((_now.ns100 / 10ULL) % 1000000ULL ); - tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000ULL); - } - /* Always return 0 as per Open Group Base Specifications Issue 6. - Do not set errno on error. */ - return 0; -} - int qemu_get_thread_id(void) { return GetCurrentThreadId(); } char * -qemu_get_local_state_pathname(const char *relative_pathname) +qemu_get_local_state_dir(void) { - HRESULT result; - char *ret; - wchar_t wbase_path[MAX_PATH]; - char *base_path; + const char * const *data_dirs = g_get_system_data_dirs(); - result = SHGetFolderPathW(NULL, CSIDL_COMMON_APPDATA, NULL, - /* SHGFP_TYPE_CURRENT */ 0, wbase_path); - if (result != S_OK) { - /* misconfigured environment */ - g_critical("CSIDL_COMMON_APPDATA unavailable: %ld", (long)result); - abort(); - } - base_path = g_utf16_to_utf8(wbase_path, -1, NULL, NULL, NULL); - if (!base_path) { - g_critical("Failed to convert local_state_pathname to UTF-8"); - abort(); - } + g_assert(data_dirs && data_dirs[0]); - ret = g_strdup_printf("%s" G_DIR_SEPARATOR_S "%s", base_path, - relative_pathname); - g_free(base_path); - - return ret; + return g_strdup(data_dirs[0]); } void qemu_set_tty_echo(int fd, bool echo) @@ -337,246 +260,6 @@ void qemu_set_tty_echo(int fd, bool echo) } } -static const char *exec_dir; - -void qemu_init_exec_dir(const char *argv0) -{ - - wchar_t *p; - wchar_t buf[MAX_PATH]; - DWORD len; - - if (exec_dir) { - return; - } - - len = GetModuleFileNameW(NULL, buf, G_N_ELEMENTS(buf) - 1); - if (len == 0) { - return; - } - - buf[len] = 0; - p = buf + len - 1; - while (p != buf && *p != L'\\') { - p--; - } - *p = 0; - if (_waccess(buf, R_OK) == 0) { - exec_dir = g_utf16_to_utf8(buf, -1, NULL, NULL, NULL); - } else { - exec_dir = CONFIG_BINDIR; - } -} - -const char *qemu_get_exec_dir(void) -{ - return exec_dir; -} - -#if !GLIB_CHECK_VERSION(2, 50, 0) -/* - * The original implementation of g_poll from glib has a problem on Windows - * when using timeouts < 10 ms. - * - * Whenever g_poll is called with timeout < 10 ms, it does a quick poll instead - * of wait. This causes significant performance degradation of QEMU. - * - * The following code is a copy of the original code from glib/gpoll.c - * (glib commit 20f4d1820b8d4d0fc4447188e33efffd6d4a88d8 from 2014-02-19). - * Some debug code was removed and the code was reformatted. - * All other code modifications are marked with 'QEMU'. - */ - -/* - * gpoll.c: poll(2) abstraction - * Copyright 1998 Owen Taylor - * Copyright 2008 Red Hat, Inc. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles, - GPollFD *fds, guint nfds, gint timeout) -{ - DWORD ready; - GPollFD *f; - int recursed_result; - - if (poll_msgs) { - /* Wait for either messages or handles - * -> Use MsgWaitForMultipleObjectsEx - */ - ready = MsgWaitForMultipleObjectsEx(nhandles, handles, timeout, - QS_ALLINPUT, MWMO_ALERTABLE); - - if (ready == WAIT_FAILED) { - gchar *emsg = g_win32_error_message(GetLastError()); - g_warning("MsgWaitForMultipleObjectsEx failed: %s", emsg); - g_free(emsg); - } - } else if (nhandles == 0) { - /* No handles to wait for, just the timeout */ - if (timeout == INFINITE) { - ready = WAIT_FAILED; - } else { - SleepEx(timeout, TRUE); - ready = WAIT_TIMEOUT; - } - } else { - /* Wait for just handles - * -> Use WaitForMultipleObjectsEx - */ - ready = - WaitForMultipleObjectsEx(nhandles, handles, FALSE, timeout, TRUE); - if (ready == WAIT_FAILED) { - gchar *emsg = g_win32_error_message(GetLastError()); - g_warning("WaitForMultipleObjectsEx failed: %s", emsg); - g_free(emsg); - } - } - - if (ready == WAIT_FAILED) { - return -1; - } else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) { - return 0; - } else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) { - for (f = fds; f < &fds[nfds]; ++f) { - if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) { - f->revents |= G_IO_IN; - } - } - - /* If we have a timeout, or no handles to poll, be satisfied - * with just noticing we have messages waiting. - */ - if (timeout != 0 || nhandles == 0) { - return 1; - } - - /* If no timeout and handles to poll, recurse to poll them, - * too. - */ - recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0); - return (recursed_result == -1) ? -1 : 1 + recursed_result; - } else if (/* QEMU: removed the following unneeded statement which causes - * a compiler warning: ready >= WAIT_OBJECT_0 && */ - ready < WAIT_OBJECT_0 + nhandles) { - for (f = fds; f < &fds[nfds]; ++f) { - if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) { - f->revents = f->events; - } - } - - /* If no timeout and polling several handles, recurse to poll - * the rest of them. - */ - if (timeout == 0 && nhandles > 1) { - /* Remove the handle that fired */ - int i; - for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) { - handles[i-1] = handles[i]; - } - nhandles--; - recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0); - return (recursed_result == -1) ? -1 : 1 + recursed_result; - } - return 1; - } - - return 0; -} - -gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout) -{ - HANDLE handles[MAXIMUM_WAIT_OBJECTS]; - gboolean poll_msgs = FALSE; - GPollFD *f; - gint nhandles = 0; - int retval; - - for (f = fds; f < &fds[nfds]; ++f) { - if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN)) { - poll_msgs = TRUE; - } else if (f->fd > 0) { - /* Don't add the same handle several times into the array, as - * docs say that is not allowed, even if it actually does seem - * to work. - */ - gint i; - - for (i = 0; i < nhandles; i++) { - if (handles[i] == (HANDLE) f->fd) { - break; - } - } - - if (i == nhandles) { - if (nhandles == MAXIMUM_WAIT_OBJECTS) { - g_warning("Too many handles to wait for!\n"); - break; - } else { - handles[nhandles++] = (HANDLE) f->fd; - } - } - } - } - - for (f = fds; f < &fds[nfds]; ++f) { - f->revents = 0; - } - - if (timeout == -1) { - timeout = INFINITE; - } - - /* Polling for several things? */ - if (nhandles > 1 || (nhandles > 0 && poll_msgs)) { - /* First check if one or several of them are immediately - * available - */ - retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, 0); - - /* If not, and we have a significant timeout, poll again with - * timeout then. Note that this will return indication for only - * one event, or only for messages. We ignore timeouts less than - * ten milliseconds as they are mostly pointless on Windows, the - * MsgWaitForMultipleObjectsEx() call will timeout right away - * anyway. - * - * Modification for QEMU: replaced timeout >= 10 by timeout > 0. - */ - if (retval == 0 && (timeout == INFINITE || timeout > 0)) { - retval = poll_rest(poll_msgs, handles, nhandles, - fds, nfds, timeout); - } - } else { - /* Just polling for one thing, so no need to check first if - * available immediately - */ - retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, timeout); - } - - if (retval == -1) { - for (f = fds; f < &fds[nfds]; ++f) { - f->revents = 0; - } - } - - return retval; -} -#endif - int getpagesize(void) { SYSTEM_INFO system_info; @@ -585,14 +268,14 @@ int getpagesize(void) return system_info.dwPageSize; } -void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus, - Error **errp) +void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, Error **errp) { int i; - size_t pagesize = qemu_real_host_page_size; + size_t pagesize = qemu_real_host_page_size(); - memory = (memory + pagesize - 1) & -pagesize; - for (i = 0; i < memory / pagesize; i++) { + sz = (sz + pagesize - 1) & -pagesize; + for (i = 0; i < sz / pagesize; i++) { memset(area + pagesize * i, 0, 1); } } @@ -852,19 +535,6 @@ bool qemu_write_pidfile(const char *filename, Error **errp) return true; } -char *qemu_get_host_name(Error **errp) -{ - wchar_t tmp[MAX_COMPUTERNAME_LENGTH + 1]; - DWORD size = G_N_ELEMENTS(tmp); - - if (GetComputerNameW(tmp, &size) == 0) { - error_setg_win32(errp, GetLastError(), "failed close handle"); - return NULL; - } - - return g_utf16_to_utf8(tmp, size, NULL, NULL, NULL); -} - size_t qemu_get_host_physmem(void) { MEMORYSTATUSEX statex; @@ -876,6 +546,16 @@ size_t qemu_get_host_physmem(void) return 0; } +int qemu_msync(void *addr, size_t length, int fd) +{ + /** + * Perform the sync based on the file descriptor + * The sync range will most probably be wider than the one + * requested - but it will still get the job done + */ + return qemu_fdatasync(fd); +} + FILE *qemu_fopen(const char *filename, const char *mode) { wchar_t *wfilename; diff --git a/util/pagesize.c b/util/pagesize.c deleted file mode 100644 index 998632cf6e..0000000000 --- a/util/pagesize.c +++ /dev/null @@ -1,18 +0,0 @@ -/* - * pagesize.c - query the host about its page size - * - * Copyright (C) 2017, Emilio G. Cota - * License: GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu/osdep.h" - -uintptr_t qemu_real_host_page_size; -intptr_t qemu_real_host_page_mask; - -static void __attribute__((constructor)) init_real_host_page_size(void) -{ - qemu_real_host_page_size = getpagesize(); - qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; -} diff --git a/util/qemu-co-timeout.c b/util/qemu-co-timeout.c new file mode 100644 index 0000000000..00cd335649 --- /dev/null +++ b/util/qemu-co-timeout.c @@ -0,0 +1,89 @@ +/* + * Helper functionality for distributing a fixed total amount of + * an abstract resource among multiple coroutines. + * + * Copyright (c) 2022 Virtuozzo International GmbH + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/coroutine.h" +#include "block/aio.h" + +typedef struct QemuCoTimeoutState { + CoroutineEntry *entry; + void *opaque; + QemuCoSleep sleep_state; + bool marker; + CleanupFunc *clean; +} QemuCoTimeoutState; + +static void coroutine_fn qemu_co_timeout_entry(void *opaque) +{ + QemuCoTimeoutState *s = opaque; + + s->entry(s->opaque); + + if (s->marker) { + assert(!s->sleep_state.to_wake); + /* .marker set by qemu_co_timeout, it have been failed */ + if (s->clean) { + s->clean(s->opaque); + } + g_free(s); + } else { + s->marker = true; + qemu_co_sleep_wake(&s->sleep_state); + } +} + +int coroutine_fn qemu_co_timeout(CoroutineEntry *entry, void *opaque, + uint64_t timeout_ns, CleanupFunc clean) +{ + QemuCoTimeoutState *s; + Coroutine *co; + + if (timeout_ns == 0) { + entry(opaque); + return 0; + } + + s = g_new(QemuCoTimeoutState, 1); + *s = (QemuCoTimeoutState) { + .entry = entry, + .opaque = opaque, + .clean = clean + }; + + co = qemu_coroutine_create(qemu_co_timeout_entry, s); + + aio_co_enter(qemu_get_current_aio_context(), co); + qemu_co_sleep_ns_wakeable(&s->sleep_state, QEMU_CLOCK_REALTIME, timeout_ns); + + if (s->marker) { + /* .marker set by qemu_co_timeout_entry, success */ + g_free(s); + return 0; + } + + /* Don't free s, as we can't cancel qemu_co_timeout_entry execution */ + s->marker = true; + return -ETIMEDOUT; +} diff --git a/util/qemu-config.c b/util/qemu-config.c index 0d7126df94..21fb41ec71 100644 --- a/util/qemu-config.c +++ b/util/qemu-config.c @@ -314,48 +314,6 @@ void qemu_add_opts(QemuOptsList *list) abort(); } -struct ConfigWriteData { - QemuOptsList *list; - FILE *fp; -}; - -static int config_write_opt(void *opaque, const char *name, const char *value, - Error **errp) -{ - struct ConfigWriteData *data = opaque; - - fprintf(data->fp, " %s = \"%s\"\n", name, value); - return 0; -} - -static int config_write_opts(void *opaque, QemuOpts *opts, Error **errp) -{ - struct ConfigWriteData *data = opaque; - const char *id = qemu_opts_id(opts); - - if (id) { - fprintf(data->fp, "[%s \"%s\"]\n", data->list->name, id); - } else { - fprintf(data->fp, "[%s]\n", data->list->name); - } - qemu_opt_foreach(opts, config_write_opt, data, NULL); - fprintf(data->fp, "\n"); - return 0; -} - -void qemu_config_write(FILE *fp) -{ - struct ConfigWriteData data = { .fp = fp }; - QemuOptsList **lists = vm_config_groups; - int i; - - fprintf(fp, "# qemu config file\n\n"); - for (i = 0; lists[i] != NULL; i++) { - data.list = lists[i]; - qemu_opts_foreach(data.list, config_write_opts, &data, NULL); - } -} - /* Returns number of config groups on success, -errno on error */ static int qemu_config_foreach(FILE *fp, QEMUConfigCB *cb, void *opaque, const char *fname, Error **errp) diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c index 5b80bb416f..d791932d63 100644 --- a/util/qemu-coroutine-io.c +++ b/util/qemu-coroutine-io.c @@ -23,7 +23,6 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/sockets.h" #include "qemu/coroutine.h" #include "qemu/iov.h" @@ -75,7 +74,8 @@ typedef struct { static void fd_coroutine_enter(void *opaque) { FDYieldUntilData *data = opaque; - aio_set_fd_handler(data->ctx, data->fd, false, NULL, NULL, NULL, NULL); + aio_set_fd_handler(data->ctx, data->fd, false, + NULL, NULL, NULL, NULL, NULL); qemu_coroutine_enter(data->co); } @@ -88,6 +88,6 @@ void coroutine_fn yield_until_fd_readable(int fd) data.co = qemu_coroutine_self(); data.fd = fd; aio_set_fd_handler( - data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, &data); + data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data); qemu_coroutine_yield(); } diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 2669403839..45c6b57374 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -39,10 +39,15 @@ void qemu_co_queue_init(CoQueue *queue) QSIMPLEQ_INIT(&queue->entries); } -void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock) +void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock, + CoQueueWaitFlags flags) { Coroutine *self = qemu_coroutine_self(); - QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); + if (flags & CO_QUEUE_WAIT_FRONT) { + QSIMPLEQ_INSERT_HEAD(&queue->entries, self, co_queue_next); + } else { + QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); + } if (lock) { qemu_lockable_unlock(lock); @@ -67,34 +72,6 @@ void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock) } } -static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) -{ - Coroutine *next; - - if (QSIMPLEQ_EMPTY(&queue->entries)) { - return false; - } - - while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) { - QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); - aio_co_wake(next); - if (single) { - break; - } - } - return true; -} - -bool qemu_co_queue_next(CoQueue *queue) -{ - return qemu_co_queue_do_restart(queue, true); -} - -void qemu_co_queue_restart_all(CoQueue *queue) -{ - qemu_co_queue_do_restart(queue, false); -} - bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) { Coroutine *next; @@ -115,6 +92,25 @@ bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) return true; } +bool coroutine_fn qemu_co_queue_next(CoQueue *queue) +{ + /* No unlock/lock needed in coroutine context. */ + return qemu_co_enter_next_impl(queue, NULL); +} + +void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock) +{ + while (qemu_co_enter_next_impl(queue, lock)) { + /* just loop */ + } +} + +void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue) +{ + /* No unlock/lock needed in coroutine context. */ + qemu_co_enter_all_impl(queue, NULL); +} + bool qemu_co_queue_empty(CoQueue *queue) { return QSIMPLEQ_FIRST(&queue->entries) == NULL; @@ -144,7 +140,7 @@ typedef struct CoWaitRecord { QSLIST_ENTRY(CoWaitRecord) next; } CoWaitRecord; -static void push_waiter(CoMutex *mutex, CoWaitRecord *w) +static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w) { w->co = qemu_coroutine_self(); QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); @@ -341,7 +337,7 @@ void qemu_co_rwlock_init(CoRwlock *lock) } /* Releases the internal CoMutex. */ -static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) +static void coroutine_fn qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) { CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); Coroutine *co = NULL; @@ -374,7 +370,7 @@ static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) } } -void qemu_co_rwlock_rdlock(CoRwlock *lock) +void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock) { Coroutine *self = qemu_coroutine_self(); @@ -399,7 +395,7 @@ void qemu_co_rwlock_rdlock(CoRwlock *lock) self->locks_held++; } -void qemu_co_rwlock_unlock(CoRwlock *lock) +void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock) { Coroutine *self = qemu_coroutine_self(); @@ -417,7 +413,7 @@ void qemu_co_rwlock_unlock(CoRwlock *lock) qemu_co_rwlock_maybe_wake_one(lock); } -void qemu_co_rwlock_downgrade(CoRwlock *lock) +void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock) { qemu_co_mutex_lock(&lock->mutex); assert(lock->owners == -1); @@ -427,7 +423,7 @@ void qemu_co_rwlock_downgrade(CoRwlock *lock) qemu_co_rwlock_maybe_wake_one(lock); } -void qemu_co_rwlock_wrlock(CoRwlock *lock) +void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock) { Coroutine *self = qemu_coroutine_self(); @@ -447,7 +443,7 @@ void qemu_co_rwlock_wrlock(CoRwlock *lock) self->locks_held++; } -void qemu_co_rwlock_upgrade(CoRwlock *lock) +void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock) { qemu_co_mutex_lock(&lock->mutex); assert(lock->owners > 0); diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 38fb6d3084..356b746f0b 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -18,26 +18,38 @@ #include "qemu/atomic.h" #include "qemu/coroutine.h" #include "qemu/coroutine_int.h" +#include "qemu/coroutine-tls.h" #include "block/aio.h" +/** + * The minimal batch size is always 64, coroutines from the release_pool are + * reused as soon as there are 64 coroutines in it. The maximum pool size starts + * with 64 and is increased on demand so that coroutines are not deleted even if + * they are not immediately reused. + */ enum { - POOL_BATCH_SIZE = 64, + POOL_MIN_BATCH_SIZE = 64, + POOL_INITIAL_MAX_SIZE = 64, }; /** Free list to speed up creation */ static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool); +static unsigned int pool_max_size = POOL_INITIAL_MAX_SIZE; static unsigned int release_pool_size; -static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool); -static __thread unsigned int alloc_pool_size; -static __thread Notifier coroutine_pool_cleanup_notifier; + +typedef QSLIST_HEAD(, Coroutine) CoroutineQSList; +QEMU_DEFINE_STATIC_CO_TLS(CoroutineQSList, alloc_pool); +QEMU_DEFINE_STATIC_CO_TLS(unsigned int, alloc_pool_size); +QEMU_DEFINE_STATIC_CO_TLS(Notifier, coroutine_pool_cleanup_notifier); static void coroutine_pool_cleanup(Notifier *n, void *value) { Coroutine *co; Coroutine *tmp; + CoroutineQSList *alloc_pool = get_ptr_alloc_pool(); - QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) { - QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); + QSLIST_FOREACH_SAFE(co, alloc_pool, pool_next, tmp) { + QSLIST_REMOVE_HEAD(alloc_pool, pool_next); qemu_coroutine_delete(co); } } @@ -47,27 +59,30 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) Coroutine *co = NULL; if (CONFIG_COROUTINE_POOL) { - co = QSLIST_FIRST(&alloc_pool); + CoroutineQSList *alloc_pool = get_ptr_alloc_pool(); + + co = QSLIST_FIRST(alloc_pool); if (!co) { - if (release_pool_size > POOL_BATCH_SIZE) { + if (release_pool_size > POOL_MIN_BATCH_SIZE) { /* Slow path; a good place to register the destructor, too. */ - if (!coroutine_pool_cleanup_notifier.notify) { - coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup; - qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier); + Notifier *notifier = get_ptr_coroutine_pool_cleanup_notifier(); + if (!notifier->notify) { + notifier->notify = coroutine_pool_cleanup; + qemu_thread_atexit_add(notifier); } /* This is not exact; there could be a little skew between * release_pool_size and the actual size of release_pool. But * it is just a heuristic, it does not need to be perfect. */ - alloc_pool_size = qatomic_xchg(&release_pool_size, 0); - QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool); - co = QSLIST_FIRST(&alloc_pool); + set_alloc_pool_size(qatomic_xchg(&release_pool_size, 0)); + QSLIST_MOVE_ATOMIC(alloc_pool, &release_pool); + co = QSLIST_FIRST(alloc_pool); } } if (co) { - QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); - alloc_pool_size--; + QSLIST_REMOVE_HEAD(alloc_pool, pool_next); + set_alloc_pool_size(get_alloc_pool_size() - 1); } } @@ -86,14 +101,14 @@ static void coroutine_delete(Coroutine *co) co->caller = NULL; if (CONFIG_COROUTINE_POOL) { - if (release_pool_size < POOL_BATCH_SIZE * 2) { + if (release_pool_size < qatomic_read(&pool_max_size) * 2) { QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); qatomic_inc(&release_pool_size); return; } - if (alloc_pool_size < POOL_BATCH_SIZE) { - QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next); - alloc_pool_size++; + if (get_alloc_pool_size() < qatomic_read(&pool_max_size)) { + QSLIST_INSERT_HEAD(get_ptr_alloc_pool(), co, pool_next); + set_alloc_pool_size(get_alloc_pool_size() + 1); return; } } @@ -198,7 +213,17 @@ bool qemu_coroutine_entered(Coroutine *co) return co->caller; } -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) +AioContext *qemu_coroutine_get_aio_context(Coroutine *co) { return co->ctx; } + +void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size) +{ + qatomic_add(&pool_max_size, additional_pool_size); +} + +void qemu_coroutine_dec_pool_size(unsigned int removing_pool_size) +{ + qatomic_sub(&pool_max_size, removing_pool_size); +} diff --git a/util/qemu-openpty.c b/util/qemu-openpty.c deleted file mode 100644 index eb17f5b0bc..0000000000 --- a/util/qemu-openpty.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * qemu-openpty.c - * - * Copyright (c) 2003-2008 Fabrice Bellard - * Copyright (c) 2010 Red Hat, Inc. - * - * Wrapper function qemu_openpty() implementation. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* - * This is not part of oslib-posix.c because this function - * uses openpty() which often in -lutil, and if we add this - * dependency to oslib-posix.o, every app will have to be - * linked with -lutil. - */ - -#include "qemu/osdep.h" -#include "qemu-common.h" - -#if defined HAVE_PTY_H -# include -#elif defined CONFIG_BSD -# include -# if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) -# include -# else -# include -# endif -#elif defined CONFIG_SOLARIS -# include -# include -#else -# include -#endif - -#ifdef __sun__ - -#if !defined(HAVE_OPENPTY) -/* Once illumos has openpty(), this is going to be removed. */ -static int openpty(int *amaster, int *aslave, char *name, - struct termios *termp, struct winsize *winp) -{ - const char *slave; - int mfd = -1, sfd = -1; - - *amaster = *aslave = -1; - - mfd = open("/dev/ptmx", O_RDWR | O_NOCTTY); - if (mfd < 0) - goto err; - - if (grantpt(mfd) == -1 || unlockpt(mfd) == -1) - goto err; - - if ((slave = ptsname(mfd)) == NULL) - goto err; - - if ((sfd = open(slave, O_RDONLY | O_NOCTTY)) == -1) - goto err; - - if (ioctl(sfd, I_PUSH, "ptem") == -1 || - (termp != NULL && tcgetattr(sfd, termp) < 0)) - goto err; - - if (amaster) - *amaster = mfd; - if (aslave) - *aslave = sfd; - if (winp) - ioctl(sfd, TIOCSWINSZ, winp); - - return 0; - -err: - if (sfd != -1) - close(sfd); - close(mfd); - return -1; -} -#endif - -static void cfmakeraw (struct termios *termios_p) -{ - termios_p->c_iflag &= - ~(IGNBRK|BRKINT|PARMRK|ISTRIP|INLCR|IGNCR|ICRNL|IXON); - termios_p->c_oflag &= ~OPOST; - termios_p->c_lflag &= ~(ECHO|ECHONL|ICANON|ISIG|IEXTEN); - termios_p->c_cflag &= ~(CSIZE|PARENB); - termios_p->c_cflag |= CS8; - - termios_p->c_cc[VMIN] = 0; - termios_p->c_cc[VTIME] = 0; -} -#endif - -int qemu_openpty_raw(int *aslave, char *pty_name) -{ - int amaster; - struct termios tty; -#if defined(__OpenBSD__) || defined(__DragonFly__) - char pty_buf[PATH_MAX]; -#define q_ptsname(x) pty_buf -#else - char *pty_buf = NULL; -#define q_ptsname(x) ptsname(x) -#endif - - if (openpty(&amaster, aslave, pty_buf, NULL, NULL) < 0) { - return -1; - } - - /* Set raw attributes on the pty. */ - tcgetattr(*aslave, &tty); - cfmakeraw(&tty); - tcsetattr(*aslave, TCSAFLUSH, &tty); - - if (pty_name) { - strcpy(pty_name, q_ptsname(amaster)); - } - - return amaster; -} diff --git a/util/qemu-option.c b/util/qemu-option.c index 61cb4a97bd..eedd08929b 100644 --- a/util/qemu-option.c +++ b/util/qemu-option.c @@ -1126,11 +1126,11 @@ int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, Error **errp) { Location loc; - QemuOpts *opts; + QemuOpts *opts, *next; int rc = 0; loc_push_none(&loc); - QTAILQ_FOREACH(opts, &list->head, next) { + QTAILQ_FOREACH_SAFE(opts, &list->head, next, next) { loc_restore(&opts->loc); rc = func(opaque, opts, errp); if (rc) { diff --git a/util/qemu-progress.c b/util/qemu-progress.c index 20d51f8c12..aa994668f1 100644 --- a/util/qemu-progress.c +++ b/util/qemu-progress.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" +#include "qemu/qemu-progress.h" struct progress_state { float current; diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index f2f3676d1f..d185245023 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -21,7 +21,6 @@ #include #endif /* CONFIG_AF_VSOCK */ -#include "qemu-common.h" #include "monitor/monitor.h" #include "qapi/clone-visitor.h" #include "qapi/error.h" @@ -97,7 +96,7 @@ bool fd_is_socket(int fd) { int optval; socklen_t optlen = sizeof(optval); - return !qemu_getsockopt(fd, SOL_SOCKET, SO_TYPE, &optval, &optlen); + return !getsockopt(fd, SOL_SOCKET, SO_TYPE, &optval, &optlen); } @@ -185,8 +184,8 @@ static int try_bind(int socket, InetSocketAddress *saddr, struct addrinfo *e) rebind: if (e->ai_family == PF_INET6) { - qemu_setsockopt(socket, IPPROTO_IPV6, IPV6_V6ONLY, &v6only, - sizeof(v6only)); + setsockopt(socket, IPPROTO_IPV6, IPV6_V6ONLY, &v6only, + sizeof(v6only)); } stat = bind(socket, e->ai_addr, e->ai_addrlen); @@ -278,7 +277,7 @@ static int inet_listen_saddr(InetSocketAddress *saddr, /* create socket + bind/listen */ for (e = res; e != NULL; e = e->ai_next) { -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP if (saddr->has_mptcp && saddr->mptcp) { e->ai_protocol = IPPROTO_MPTCP; } @@ -462,7 +461,7 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp) error_free(local_err); local_err = NULL; -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP if (saddr->has_mptcp && saddr->mptcp) { e->ai_protocol = IPPROTO_MPTCP; } @@ -483,12 +482,12 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp) if (saddr->keep_alive) { int val = 1; - int ret = qemu_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - &val, sizeof(val)); + int ret = setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + &val, sizeof(val)); if (ret < 0) { error_setg_errno(errp, errno, "Unable to set KEEPALIVE"); - close(sock); + closesocket(sock); return -1; } } @@ -699,7 +698,7 @@ int inet_parse(InetSocketAddress *addr, const char *str, Error **errp) } addr->has_keep_alive = true; } -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP begin = strstr(optstr, ",mptcp"); if (begin) { if (inet_parse_flag("mptcp", begin + strlen(",mptcp"), @@ -881,8 +880,6 @@ static int vsock_parse(VsockSocketAddress *addr, const char *str, } #endif /* CONFIG_AF_VSOCK */ -#ifndef _WIN32 - static bool saddr_is_abstract(UnixSocketAddress *saddr) { #ifdef CONFIG_LINUX @@ -922,9 +919,8 @@ static int unix_listen_saddr(UnixSocketAddress *saddr, if (saddr->path[0] || abstract) { path = saddr->path; } else { - const char *tmpdir = getenv("TMPDIR"); - tmpdir = tmpdir ? tmpdir : "/tmp"; - path = pathbuf = g_strdup_printf("%s/qemu-socket-XXXXXX", tmpdir); + path = pathbuf = g_strdup_printf("%s/qemu-socket-XXXXXX", + g_get_tmp_dir()); } pathlen = strlen(path); @@ -1051,29 +1047,10 @@ static int unix_connect_saddr(UnixSocketAddress *saddr, Error **errp) return sock; err: - close(sock); + closesocket(sock); return -1; } -#else - -static int unix_listen_saddr(UnixSocketAddress *saddr, - int num, - Error **errp) -{ - error_setg(errp, "unix sockets are not available on windows"); - errno = ENOTSUP; - return -1; -} - -static int unix_connect_saddr(UnixSocketAddress *saddr, Error **errp) -{ - error_setg(errp, "unix sockets are not available on windows"); - errno = ENOTSUP; - return -1; -} -#endif - /* compatibility wrapper */ int unix_listen(const char *str, Error **errp) { @@ -1099,6 +1076,26 @@ int unix_connect(const char *path, Error **errp) return sock; } +char *socket_uri(SocketAddress *addr) +{ + switch (addr->type) { + case SOCKET_ADDRESS_TYPE_INET: + return g_strdup_printf("tcp:%s:%s", + addr->u.inet.host, + addr->u.inet.port); + case SOCKET_ADDRESS_TYPE_UNIX: + return g_strdup_printf("unix:%s", + addr->u.q_unix.path); + case SOCKET_ADDRESS_TYPE_FD: + return g_strdup_printf("fd:%s", addr->u.fd.str); + case SOCKET_ADDRESS_TYPE_VSOCK: + return g_strdup_printf("vsock:%s:%s", + addr->u.vsock.cid, + addr->u.vsock.port); + default: + return g_strdup("unknown address type"); + } +} SocketAddress *socket_parse(const char *str, Error **errp) { @@ -1126,6 +1123,11 @@ SocketAddress *socket_parse(const char *str, Error **errp) if (vsock_parse(&addr->u.vsock, str + strlen("vsock:"), errp)) { goto fail; } + } else if (strstart(str, "tcp:", NULL)) { + addr->type = SOCKET_ADDRESS_TYPE_INET; + if (inet_parse(&addr->u.inet, str + strlen("tcp:"), errp)) { + goto fail; + } } else { addr->type = SOCKET_ADDRESS_TYPE_INET; if (inet_parse(&addr->u.inet, str, errp)) { @@ -1336,7 +1338,6 @@ socket_sockaddr_to_address_inet(struct sockaddr_storage *sa, } -#ifndef WIN32 static SocketAddress * socket_sockaddr_to_address_unix(struct sockaddr_storage *sa, socklen_t salen, @@ -1345,28 +1346,24 @@ socket_sockaddr_to_address_unix(struct sockaddr_storage *sa, SocketAddress *addr; struct sockaddr_un *su = (struct sockaddr_un *)sa; - assert(salen >= sizeof(su->sun_family) + 1 && - salen <= sizeof(struct sockaddr_un)); - addr = g_new0(SocketAddress, 1); addr->type = SOCKET_ADDRESS_TYPE_UNIX; + salen -= offsetof(struct sockaddr_un, sun_path); #ifdef CONFIG_LINUX - if (!su->sun_path[0]) { + if (salen > 0 && !su->sun_path[0]) { /* Linux abstract socket */ - addr->u.q_unix.path = g_strndup(su->sun_path + 1, - salen - sizeof(su->sun_family) - 1); + addr->u.q_unix.path = g_strndup(su->sun_path + 1, salen - 1); addr->u.q_unix.has_abstract = true; addr->u.q_unix.abstract = true; addr->u.q_unix.has_tight = true; - addr->u.q_unix.tight = salen < sizeof(*su); + addr->u.q_unix.tight = salen < sizeof(su->sun_path); return addr; } #endif - addr->u.q_unix.path = g_strndup(su->sun_path, sizeof(su->sun_path)); + addr->u.q_unix.path = g_strndup(su->sun_path, salen); return addr; } -#endif /* WIN32 */ #ifdef CONFIG_AF_VSOCK static SocketAddress * @@ -1398,10 +1395,8 @@ socket_sockaddr_to_address(struct sockaddr_storage *sa, case AF_INET6: return socket_sockaddr_to_address_inet(sa, salen, errp); -#ifndef WIN32 case AF_UNIX: return socket_sockaddr_to_address_unix(sa, salen, errp); -#endif /* WIN32 */ #ifdef CONFIG_AF_VSOCK case AF_VSOCK: @@ -1458,22 +1453,22 @@ SocketAddress *socket_address_flatten(SocketAddressLegacy *addr_legacy) addr = g_new(SocketAddress, 1); switch (addr_legacy->type) { - case SOCKET_ADDRESS_LEGACY_KIND_INET: + case SOCKET_ADDRESS_TYPE_INET: addr->type = SOCKET_ADDRESS_TYPE_INET; QAPI_CLONE_MEMBERS(InetSocketAddress, &addr->u.inet, addr_legacy->u.inet.data); break; - case SOCKET_ADDRESS_LEGACY_KIND_UNIX: + case SOCKET_ADDRESS_TYPE_UNIX: addr->type = SOCKET_ADDRESS_TYPE_UNIX; QAPI_CLONE_MEMBERS(UnixSocketAddress, &addr->u.q_unix, addr_legacy->u.q_unix.data); break; - case SOCKET_ADDRESS_LEGACY_KIND_VSOCK: + case SOCKET_ADDRESS_TYPE_VSOCK: addr->type = SOCKET_ADDRESS_TYPE_VSOCK; QAPI_CLONE_MEMBERS(VsockSocketAddress, &addr->u.vsock, addr_legacy->u.vsock.data); break; - case SOCKET_ADDRESS_LEGACY_KIND_FD: + case SOCKET_ADDRESS_TYPE_FD: addr->type = SOCKET_ADDRESS_TYPE_FD; QAPI_CLONE_MEMBERS(String, &addr->u.fd, addr_legacy->u.fd.data); break; diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index fd9d714038..bae938c670 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -16,6 +16,7 @@ #include "qemu/notify.h" #include "qemu-thread-common.h" #include "qemu/tsan.h" +#include "qemu/bitmap.h" static bool name_threads; @@ -23,7 +24,8 @@ void qemu_thread_naming(bool enable) { name_threads = enable; -#ifndef CONFIG_THREAD_SETNAME_BYTHREAD +#if !defined CONFIG_PTHREAD_SETNAME_NP_W_TID && \ + !defined CONFIG_PTHREAD_SETNAME_NP_WO_TID /* This is a debugging option, not fatal */ if (enable) { fprintf(stderr, "qemu: thread naming not supported on this host\n"); @@ -37,12 +39,20 @@ static void error_exit(int err, const char *msg) abort(); } +static inline clockid_t qemu_timedwait_clockid(void) +{ +#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK + return CLOCK_MONOTONIC; +#else + return CLOCK_REALTIME; +#endif +} + static void compute_abs_deadline(struct timespec *ts, int ms) { - struct timeval tv; - gettimeofday(&tv, NULL); - ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000; - ts->tv_sec = tv.tv_sec + ms / 1000; + clock_gettime(qemu_timedwait_clockid(), ts); + ts->tv_nsec += (ms % 1000) * 1000000; + ts->tv_sec += ms / 1000; if (ts->tv_nsec >= 1000000000) { ts->tv_sec++; ts->tv_nsec -= 1000000000; @@ -146,11 +156,27 @@ void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line) void qemu_cond_init(QemuCond *cond) { + pthread_condattr_t attr; int err; - err = pthread_cond_init(&cond->cond, NULL); - if (err) + err = pthread_condattr_init(&attr); + if (err) { error_exit(err, __func__); + } +#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK + err = pthread_condattr_setclock(&attr, qemu_timedwait_clockid()); + if (err) { + error_exit(err, __func__); + } +#endif + err = pthread_cond_init(&cond->cond, &attr); + if (err) { + error_exit(err, __func__); + } + err = pthread_condattr_destroy(&attr); + if (err) { + error_exit(err, __func__); + } cond->initialized = true; } @@ -197,16 +223,15 @@ void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, con error_exit(err, __func__); } -bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms, - const char *file, const int line) +static bool +qemu_cond_timedwait_ts(QemuCond *cond, QemuMutex *mutex, struct timespec *ts, + const char *file, const int line) { int err; - struct timespec ts; assert(cond->initialized); trace_qemu_mutex_unlock(mutex, file, line); - compute_abs_deadline(&ts, ms); - err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts); + err = pthread_cond_timedwait(&cond->cond, &mutex->lock, ts); trace_qemu_mutex_locked(mutex, file, line); if (err && err != ETIMEDOUT) { error_exit(err, __func__); @@ -214,152 +239,77 @@ bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms, return err != ETIMEDOUT; } +bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms, + const char *file, const int line) +{ + struct timespec ts; + + compute_abs_deadline(&ts, ms); + return qemu_cond_timedwait_ts(cond, mutex, &ts, file, line); +} + void qemu_sem_init(QemuSemaphore *sem, int init) { - int rc; + qemu_mutex_init(&sem->mutex); + qemu_cond_init(&sem->cond); -#ifndef CONFIG_SEM_TIMEDWAIT - rc = pthread_mutex_init(&sem->lock, NULL); - if (rc != 0) { - error_exit(rc, __func__); - } - rc = pthread_cond_init(&sem->cond, NULL); - if (rc != 0) { - error_exit(rc, __func__); - } if (init < 0) { error_exit(EINVAL, __func__); } sem->count = init; -#else - rc = sem_init(&sem->sem, 0, init); - if (rc < 0) { - error_exit(errno, __func__); - } -#endif - sem->initialized = true; } void qemu_sem_destroy(QemuSemaphore *sem) { - int rc; - - assert(sem->initialized); - sem->initialized = false; -#ifndef CONFIG_SEM_TIMEDWAIT - rc = pthread_cond_destroy(&sem->cond); - if (rc < 0) { - error_exit(rc, __func__); - } - rc = pthread_mutex_destroy(&sem->lock); - if (rc < 0) { - error_exit(rc, __func__); - } -#else - rc = sem_destroy(&sem->sem); - if (rc < 0) { - error_exit(errno, __func__); - } -#endif + qemu_cond_destroy(&sem->cond); + qemu_mutex_destroy(&sem->mutex); } void qemu_sem_post(QemuSemaphore *sem) { - int rc; - - assert(sem->initialized); -#ifndef CONFIG_SEM_TIMEDWAIT - pthread_mutex_lock(&sem->lock); + qemu_mutex_lock(&sem->mutex); if (sem->count == UINT_MAX) { - rc = EINVAL; + error_exit(EINVAL, __func__); } else { sem->count++; - rc = pthread_cond_signal(&sem->cond); + qemu_cond_signal(&sem->cond); } - pthread_mutex_unlock(&sem->lock); - if (rc != 0) { - error_exit(rc, __func__); - } -#else - rc = sem_post(&sem->sem); - if (rc < 0) { - error_exit(errno, __func__); - } -#endif + qemu_mutex_unlock(&sem->mutex); } int qemu_sem_timedwait(QemuSemaphore *sem, int ms) { - int rc; + bool rc = true; struct timespec ts; - assert(sem->initialized); -#ifndef CONFIG_SEM_TIMEDWAIT - rc = 0; compute_abs_deadline(&ts, ms); - pthread_mutex_lock(&sem->lock); + qemu_mutex_lock(&sem->mutex); while (sem->count == 0) { - rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts); - if (rc == ETIMEDOUT) { + if (ms == 0) { + rc = false; + } else { + rc = qemu_cond_timedwait_ts(&sem->cond, &sem->mutex, &ts, + __FILE__, __LINE__); + } + if (!rc) { /* timeout */ break; } - if (rc != 0) { - error_exit(rc, __func__); - } } - if (rc != ETIMEDOUT) { + if (rc) { --sem->count; } - pthread_mutex_unlock(&sem->lock); - return (rc == ETIMEDOUT ? -1 : 0); -#else - if (ms <= 0) { - /* This is cheaper than sem_timedwait. */ - do { - rc = sem_trywait(&sem->sem); - } while (rc == -1 && errno == EINTR); - if (rc == -1 && errno == EAGAIN) { - return -1; - } - } else { - compute_abs_deadline(&ts, ms); - do { - rc = sem_timedwait(&sem->sem, &ts); - } while (rc == -1 && errno == EINTR); - if (rc == -1 && errno == ETIMEDOUT) { - return -1; - } - } - if (rc < 0) { - error_exit(errno, __func__); - } - return 0; -#endif + qemu_mutex_unlock(&sem->mutex); + return (rc ? 0 : -1); } void qemu_sem_wait(QemuSemaphore *sem) { - int rc; - - assert(sem->initialized); -#ifndef CONFIG_SEM_TIMEDWAIT - pthread_mutex_lock(&sem->lock); + qemu_mutex_lock(&sem->mutex); while (sem->count == 0) { - rc = pthread_cond_wait(&sem->cond, &sem->lock); - if (rc != 0) { - error_exit(rc, __func__); - } + qemu_cond_wait(&sem->cond, &sem->mutex); } --sem->count; - pthread_mutex_unlock(&sem->lock); -#else - do { - rc = sem_wait(&sem->sem); - } while (rc == -1 && errno == EINTR); - if (rc < 0) { - error_exit(errno, __func__); - } -#endif + qemu_mutex_unlock(&sem->mutex); } #ifdef __linux__ @@ -522,7 +472,6 @@ static void *qemu_thread_start(void *args) void *arg = qemu_thread_args->arg; void *r; -#ifdef CONFIG_THREAD_SETNAME_BYTHREAD /* Attempt to set the threads name; note that this is for debug, so * we're not going to fail if we can't set it. */ @@ -533,13 +482,31 @@ static void *qemu_thread_start(void *args) pthread_setname_np(qemu_thread_args->name); # endif } -#endif QEMU_TSAN_ANNOTATE_THREAD_NAME(qemu_thread_args->name); g_free(qemu_thread_args->name); g_free(qemu_thread_args); + + /* + * GCC 11 with glibc 2.17 on PowerPC reports + * + * qemu-thread-posix.c:540:5: error: ‘__sigsetjmp’ accessing 656 bytes + * in a region of size 528 [-Werror=stringop-overflow=] + * 540 | pthread_cleanup_push(qemu_thread_atexit_notify, NULL); + * | ^~~~~~~~~~~~~~~~~~~~ + * + * which is clearly nonsense. + */ +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wstringop-overflow" +#endif + pthread_cleanup_push(qemu_thread_atexit_notify, NULL); r = start_routine(arg); pthread_cleanup_pop(1); + +#pragma GCC diagnostic pop + return r; } @@ -586,6 +553,75 @@ void qemu_thread_create(QemuThread *thread, const char *name, pthread_attr_destroy(&attr); } +int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus, + unsigned long nbits) +{ +#if defined(CONFIG_PTHREAD_AFFINITY_NP) + const size_t setsize = CPU_ALLOC_SIZE(nbits); + unsigned long value; + cpu_set_t *cpuset; + int err; + + cpuset = CPU_ALLOC(nbits); + g_assert(cpuset); + + CPU_ZERO_S(setsize, cpuset); + value = find_first_bit(host_cpus, nbits); + while (value < nbits) { + CPU_SET_S(value, setsize, cpuset); + value = find_next_bit(host_cpus, nbits, value + 1); + } + + err = pthread_setaffinity_np(thread->thread, setsize, cpuset); + CPU_FREE(cpuset); + return err; +#else + return -ENOSYS; +#endif +} + +int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus, + unsigned long *nbits) +{ +#if defined(CONFIG_PTHREAD_AFFINITY_NP) + unsigned long tmpbits; + cpu_set_t *cpuset; + size_t setsize; + int i, err; + + tmpbits = CPU_SETSIZE; + while (true) { + setsize = CPU_ALLOC_SIZE(tmpbits); + cpuset = CPU_ALLOC(tmpbits); + g_assert(cpuset); + + err = pthread_getaffinity_np(thread->thread, setsize, cpuset); + if (err) { + CPU_FREE(cpuset); + if (err != -EINVAL) { + return err; + } + tmpbits *= 2; + } else { + break; + } + } + + /* Convert the result into a proper bitmap. */ + *nbits = tmpbits; + *host_cpus = bitmap_new(tmpbits); + for (i = 0; i < tmpbits; i++) { + if (CPU_ISSET(i, cpuset)) { + set_bit(i, *host_cpus); + } + } + CPU_FREE(cpuset); + return 0; +#else + return -ENOSYS; +#endif +} + void qemu_thread_get_self(QemuThread *thread) { thread->thread = pthread_self(); diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index 52eb19f351..69db254ac7 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -12,7 +12,6 @@ */ #include "qemu/osdep.h" -#include "qemu-common.h" #include "qemu/thread.h" #include "qemu/notify.h" #include "qemu-thread-common.h" @@ -20,12 +19,39 @@ static bool name_threads; +typedef HRESULT (WINAPI *pSetThreadDescription) (HANDLE hThread, + PCWSTR lpThreadDescription); +static pSetThreadDescription SetThreadDescriptionFunc; +static HMODULE kernel32_module; + +static bool load_set_thread_description(void) +{ + static gsize _init_once = 0; + + if (g_once_init_enter(&_init_once)) { + kernel32_module = LoadLibrary("kernel32.dll"); + if (kernel32_module) { + SetThreadDescriptionFunc = + (pSetThreadDescription)GetProcAddress(kernel32_module, + "SetThreadDescription"); + if (!SetThreadDescriptionFunc) { + FreeLibrary(kernel32_module); + } + } + g_once_init_leave(&_init_once, 1); + } + + return !!SetThreadDescriptionFunc; +} + void qemu_thread_naming(bool enable) { - /* But note we don't actually name them on Windows yet */ name_threads = enable; - fprintf(stderr, "qemu: thread naming not supported on this host\n"); + if (enable && !load_set_thread_description()) { + fprintf(stderr, "qemu: thread naming not supported on this host\n"); + name_threads = false; + } } static void error_exit(int err, const char *msg) @@ -401,6 +427,25 @@ void *qemu_thread_join(QemuThread *thread) return ret; } +static bool set_thread_description(HANDLE h, const char *name) +{ + HRESULT hr; + g_autofree wchar_t *namew = NULL; + + if (!load_set_thread_description()) { + return false; + } + + namew = g_utf8_to_utf16(name, -1, NULL, NULL, NULL); + if (!namew) { + return false; + } + + hr = SetThreadDescriptionFunc(h, namew); + + return SUCCEEDED(hr); +} + void qemu_thread_create(QemuThread *thread, const char *name, void *(*start_routine)(void *), void *arg, int mode) @@ -424,10 +469,26 @@ void qemu_thread_create(QemuThread *thread, const char *name, if (!hThread) { error_exit(GetLastError(), __func__); } + if (name_threads && name && !set_thread_description(hThread, name)) { + fprintf(stderr, "qemu: failed to set thread description: %s\n", name); + } CloseHandle(hThread); + thread->data = data; } +int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus, + unsigned long nbits) +{ + return -ENOSYS; +} + +int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus, + unsigned long *nbits) +{ + return -ENOSYS; +} + void qemu_thread_get_self(QemuThread *thread) { thread->data = qemu_thread_data; diff --git a/util/qemu-timer.c b/util/qemu-timer.c index 8536b28484..5eef4cbff6 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -100,7 +100,7 @@ QEMUTimerList *timerlist_new(QEMUClockType type, QEMUTimerList *timer_list; QEMUClock *clock = qemu_clock_ptr(type); - timer_list = g_malloc0(sizeof(QEMUTimerList)); + timer_list = g_new0(QEMUTimerList, 1); qemu_event_init(&timer_list->timers_done_ev, true); timer_list->clock = clock; timer_list->notify_cb = cb; @@ -261,6 +261,9 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) } QLIST_FOREACH(timer_list, &clock->timerlists, list) { + if (!qatomic_read(&timer_list->active_timers)) { + continue; + } qemu_mutex_lock(&timer_list->active_timers_lock); ts = timer_list->active_timers; /* Skip all external timers */ diff --git a/util/qht.c b/util/qht.c index 079605121b..065fc501f4 100644 --- a/util/qht.c +++ b/util/qht.c @@ -69,6 +69,7 @@ #include "qemu/qht.h" #include "qemu/atomic.h" #include "qemu/rcu.h" +#include "qemu/memalign.h" //#define QHT_DEBUG diff --git a/util/rcu.c b/util/rcu.c index 13ac0f75cb..b6d6c71cff 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -46,6 +46,7 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED; QemuEvent rcu_gp_event; +static int in_drain_call_rcu; static QemuMutex rcu_registry_lock; static QemuMutex rcu_sync_lock; @@ -64,7 +65,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr) /* Written to only by each individual reader. Read by both the reader and the * writers. */ -__thread struct rcu_reader_data rcu_reader; +QEMU_DEFINE_CO_TLS(struct rcu_reader_data, rcu_reader) /* Protected by rcu_registry_lock. */ typedef QLIST_HEAD(, rcu_reader_data) ThreadList; @@ -107,6 +108,8 @@ static void wait_for_readers(void) * get some extra futex wakeups. */ qatomic_set(&index->waiting, false); + } else if (qatomic_read(&in_drain_call_rcu)) { + notifier_list_notify(&index->force_rcu, NULL); } } @@ -339,8 +342,10 @@ void drain_call_rcu(void) * assumed. */ + qatomic_inc(&in_drain_call_rcu); call_rcu1(&rcu_drain.rcu, drain_rcu_callback); qemu_event_wait(&rcu_drain.drain_complete_event); + qatomic_dec(&in_drain_call_rcu); if (locked) { qemu_mutex_lock_iothread(); @@ -350,16 +355,30 @@ void drain_call_rcu(void) void rcu_register_thread(void) { - assert(rcu_reader.ctr == 0); + assert(get_ptr_rcu_reader()->ctr == 0); qemu_mutex_lock(&rcu_registry_lock); - QLIST_INSERT_HEAD(®istry, &rcu_reader, node); + QLIST_INSERT_HEAD(®istry, get_ptr_rcu_reader(), node); qemu_mutex_unlock(&rcu_registry_lock); } void rcu_unregister_thread(void) { qemu_mutex_lock(&rcu_registry_lock); - QLIST_REMOVE(&rcu_reader, node); + QLIST_REMOVE(get_ptr_rcu_reader(), node); + qemu_mutex_unlock(&rcu_registry_lock); +} + +void rcu_add_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_list_add(&get_ptr_rcu_reader()->force_rcu, n); + qemu_mutex_unlock(&rcu_registry_lock); +} + +void rcu_remove_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_remove(n); qemu_mutex_unlock(&rcu_registry_lock); } diff --git a/util/thread-context.c b/util/thread-context.c new file mode 100644 index 0000000000..4138245332 --- /dev/null +++ b/util/thread-context.c @@ -0,0 +1,362 @@ +/* + * QEMU Thread Context + * + * Copyright Red Hat Inc., 2022 + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/thread-context.h" +#include "qapi/error.h" +#include "qapi/qapi-builtin-visit.h" +#include "qapi/visitor.h" +#include "qemu/config-file.h" +#include "qapi/qapi-builtin-visit.h" +#include "qom/object_interfaces.h" +#include "qemu/module.h" +#include "qemu/bitmap.h" + +#ifdef CONFIG_NUMA +#include +#endif + +enum { + TC_CMD_NONE = 0, + TC_CMD_STOP, + TC_CMD_NEW, +}; + +typedef struct ThreadContextCmdNew { + QemuThread *thread; + const char *name; + void *(*start_routine)(void *); + void *arg; + int mode; +} ThreadContextCmdNew; + +static void *thread_context_run(void *opaque) +{ + ThreadContext *tc = opaque; + + tc->thread_id = qemu_get_thread_id(); + qemu_sem_post(&tc->sem); + + while (true) { + /* + * Threads inherit the CPU affinity of the creating thread. For this + * reason, we create new (especially short-lived) threads from our + * persistent context thread. + * + * Especially when QEMU is not allowed to set the affinity itself, + * management tools can simply set the affinity of the context thread + * after creating the context, to have new threads created via + * the context inherit the CPU affinity automatically. + */ + switch (tc->thread_cmd) { + case TC_CMD_NONE: + break; + case TC_CMD_STOP: + tc->thread_cmd = TC_CMD_NONE; + qemu_sem_post(&tc->sem); + return NULL; + case TC_CMD_NEW: { + ThreadContextCmdNew *cmd_new = tc->thread_cmd_data; + + qemu_thread_create(cmd_new->thread, cmd_new->name, + cmd_new->start_routine, cmd_new->arg, + cmd_new->mode); + tc->thread_cmd = TC_CMD_NONE; + tc->thread_cmd_data = NULL; + qemu_sem_post(&tc->sem); + break; + } + default: + g_assert_not_reached(); + } + qemu_sem_wait(&tc->sem_thread); + } +} + +static void thread_context_set_cpu_affinity(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + ThreadContext *tc = THREAD_CONTEXT(obj); + uint16List *l, *host_cpus = NULL; + unsigned long *bitmap = NULL; + int nbits = 0, ret; + Error *err = NULL; + + if (tc->init_cpu_bitmap) { + error_setg(errp, "Mixing CPU and node affinity not supported"); + return; + } + + visit_type_uint16List(v, name, &host_cpus, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!host_cpus) { + error_setg(errp, "CPU list is empty"); + goto out; + } + + for (l = host_cpus; l; l = l->next) { + nbits = MAX(nbits, l->value + 1); + } + bitmap = bitmap_new(nbits); + for (l = host_cpus; l; l = l->next) { + set_bit(l->value, bitmap); + } + + if (tc->thread_id != -1) { + /* + * Note: we won't be adjusting the affinity of any thread that is still + * around, but only the affinity of the context thread. + */ + ret = qemu_thread_set_affinity(&tc->thread, bitmap, nbits); + if (ret) { + error_setg(errp, "Setting CPU affinity failed: %s", strerror(ret)); + } + } else { + tc->init_cpu_bitmap = bitmap; + bitmap = NULL; + tc->init_cpu_nbits = nbits; + } +out: + g_free(bitmap); + qapi_free_uint16List(host_cpus); +} + +static void thread_context_get_cpu_affinity(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + unsigned long *bitmap, nbits, value; + ThreadContext *tc = THREAD_CONTEXT(obj); + uint16List *host_cpus = NULL; + uint16List **tail = &host_cpus; + int ret; + + if (tc->thread_id == -1) { + error_setg(errp, "Object not initialized yet"); + return; + } + + ret = qemu_thread_get_affinity(&tc->thread, &bitmap, &nbits); + if (ret) { + error_setg(errp, "Getting CPU affinity failed: %s", strerror(ret)); + return; + } + + value = find_first_bit(bitmap, nbits); + while (value < nbits) { + QAPI_LIST_APPEND(tail, value); + + value = find_next_bit(bitmap, nbits, value + 1); + } + g_free(bitmap); + + visit_type_uint16List(v, name, &host_cpus, errp); + qapi_free_uint16List(host_cpus); +} + +static void thread_context_set_node_affinity(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ +#ifdef CONFIG_NUMA + const int nbits = numa_num_possible_cpus(); + ThreadContext *tc = THREAD_CONTEXT(obj); + uint16List *l, *host_nodes = NULL; + unsigned long *bitmap = NULL; + struct bitmask *tmp_cpus; + Error *err = NULL; + int ret, i; + + if (tc->init_cpu_bitmap) { + error_setg(errp, "Mixing CPU and node affinity not supported"); + return; + } + + visit_type_uint16List(v, name, &host_nodes, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!host_nodes) { + error_setg(errp, "Node list is empty"); + goto out; + } + + bitmap = bitmap_new(nbits); + tmp_cpus = numa_allocate_cpumask(); + for (l = host_nodes; l; l = l->next) { + numa_bitmask_clearall(tmp_cpus); + ret = numa_node_to_cpus(l->value, tmp_cpus); + if (ret) { + /* We ignore any errors, such as impossible nodes. */ + continue; + } + for (i = 0; i < nbits; i++) { + if (numa_bitmask_isbitset(tmp_cpus, i)) { + set_bit(i, bitmap); + } + } + } + numa_free_cpumask(tmp_cpus); + + if (bitmap_empty(bitmap, nbits)) { + error_setg(errp, "The nodes select no CPUs"); + goto out; + } + + if (tc->thread_id != -1) { + /* + * Note: we won't be adjusting the affinity of any thread that is still + * around for now, but only the affinity of the context thread. + */ + ret = qemu_thread_set_affinity(&tc->thread, bitmap, nbits); + if (ret) { + error_setg(errp, "Setting CPU affinity failed: %s", strerror(ret)); + } + } else { + tc->init_cpu_bitmap = bitmap; + bitmap = NULL; + tc->init_cpu_nbits = nbits; + } +out: + g_free(bitmap); + qapi_free_uint16List(host_nodes); +#else + error_setg(errp, "NUMA node affinity is not supported by this QEMU"); +#endif +} + +static void thread_context_get_thread_id(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + ThreadContext *tc = THREAD_CONTEXT(obj); + uint64_t value = tc->thread_id; + + visit_type_uint64(v, name, &value, errp); +} + +static void thread_context_instance_complete(UserCreatable *uc, Error **errp) +{ + ThreadContext *tc = THREAD_CONTEXT(uc); + char *thread_name; + int ret; + + thread_name = g_strdup_printf("TC %s", + object_get_canonical_path_component(OBJECT(uc))); + qemu_thread_create(&tc->thread, thread_name, thread_context_run, tc, + QEMU_THREAD_JOINABLE); + g_free(thread_name); + + /* Wait until initialization of the thread is done. */ + while (tc->thread_id == -1) { + qemu_sem_wait(&tc->sem); + } + + if (tc->init_cpu_bitmap) { + ret = qemu_thread_set_affinity(&tc->thread, tc->init_cpu_bitmap, + tc->init_cpu_nbits); + if (ret) { + error_setg(errp, "Setting CPU affinity failed: %s", strerror(ret)); + } + g_free(tc->init_cpu_bitmap); + tc->init_cpu_bitmap = NULL; + } +} + +static void thread_context_class_init(ObjectClass *oc, void *data) +{ + UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); + + ucc->complete = thread_context_instance_complete; + object_class_property_add(oc, "thread-id", "int", + thread_context_get_thread_id, NULL, NULL, + NULL); + object_class_property_add(oc, "cpu-affinity", "int", + thread_context_get_cpu_affinity, + thread_context_set_cpu_affinity, NULL, NULL); + object_class_property_add(oc, "node-affinity", "int", NULL, + thread_context_set_node_affinity, NULL, NULL); +} + +static void thread_context_instance_init(Object *obj) +{ + ThreadContext *tc = THREAD_CONTEXT(obj); + + tc->thread_id = -1; + qemu_sem_init(&tc->sem, 0); + qemu_sem_init(&tc->sem_thread, 0); + qemu_mutex_init(&tc->mutex); +} + +static void thread_context_instance_finalize(Object *obj) +{ + ThreadContext *tc = THREAD_CONTEXT(obj); + + if (tc->thread_id != -1) { + tc->thread_cmd = TC_CMD_STOP; + qemu_sem_post(&tc->sem_thread); + qemu_thread_join(&tc->thread); + } + qemu_sem_destroy(&tc->sem); + qemu_sem_destroy(&tc->sem_thread); + qemu_mutex_destroy(&tc->mutex); +} + +static const TypeInfo thread_context_info = { + .name = TYPE_THREAD_CONTEXT, + .parent = TYPE_OBJECT, + .class_init = thread_context_class_init, + .instance_size = sizeof(ThreadContext), + .instance_init = thread_context_instance_init, + .instance_finalize = thread_context_instance_finalize, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void thread_context_register_types(void) +{ + type_register_static(&thread_context_info); +} +type_init(thread_context_register_types) + +void thread_context_create_thread(ThreadContext *tc, QemuThread *thread, + const char *name, + void *(*start_routine)(void *), void *arg, + int mode) +{ + ThreadContextCmdNew data = { + .thread = thread, + .name = name, + .start_routine = start_routine, + .arg = arg, + .mode = mode, + }; + + qemu_mutex_lock(&tc->mutex); + tc->thread_cmd = TC_CMD_NEW; + tc->thread_cmd_data = &data; + qemu_sem_post(&tc->sem_thread); + + while (tc->thread_cmd != TC_CMD_NONE) { + qemu_sem_wait(&tc->sem); + } + qemu_mutex_unlock(&tc->mutex); +} diff --git a/util/thread-pool.c b/util/thread-pool.c index d763cea505..31113b5860 100644 --- a/util/thread-pool.c +++ b/util/thread-pool.c @@ -57,8 +57,7 @@ struct ThreadPool { QEMUBH *completion_bh; QemuMutex lock; QemuCond worker_stopped; - QemuSemaphore sem; - int max_threads; + QemuCond request_cond; QEMUBH *new_thread_bh; /* The following variables are only accessed from one AioContext. */ @@ -70,7 +69,8 @@ struct ThreadPool { int idle_threads; int new_threads; /* backlog of threads we need to create */ int pending_threads; /* threads created but not running yet */ - bool stopping; + int min_threads; + int max_threads; }; static void *worker_thread(void *opaque) @@ -81,19 +81,25 @@ static void *worker_thread(void *opaque) pool->pending_threads--; do_spawn_thread(pool); - while (!pool->stopping) { + while (pool->cur_threads <= pool->max_threads) { ThreadPoolElement *req; int ret; - do { + if (QTAILQ_EMPTY(&pool->request_list)) { pool->idle_threads++; - qemu_mutex_unlock(&pool->lock); - ret = qemu_sem_timedwait(&pool->sem, 10000); - qemu_mutex_lock(&pool->lock); + ret = qemu_cond_timedwait(&pool->request_cond, &pool->lock, 10000); pool->idle_threads--; - } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list)); - if (ret == -1 || pool->stopping) { - break; + if (ret == 0 && + QTAILQ_EMPTY(&pool->request_list) && + pool->cur_threads > pool->min_threads) { + /* Timed out + no work to do + no need for warm threads = exit. */ + break; + } + /* + * Even if there was some work to do, check if there aren't + * too many worker threads before picking it up. + */ + continue; } req = QTAILQ_FIRST(&pool->request_list); @@ -108,14 +114,19 @@ static void *worker_thread(void *opaque) smp_wmb(); req->state = THREAD_DONE; - qemu_mutex_lock(&pool->lock); - qemu_bh_schedule(pool->completion_bh); + qemu_mutex_lock(&pool->lock); } pool->cur_threads--; qemu_cond_signal(&pool->worker_stopped); qemu_mutex_unlock(&pool->lock); + + /* + * Wake up another thread, in case we got a wakeup but decided + * to exit due to pool->cur_threads > pool->max_threads. + */ + qemu_cond_signal(&pool->request_cond); return NULL; } @@ -211,13 +222,7 @@ static void thread_pool_cancel(BlockAIOCB *acb) trace_thread_pool_cancel(elem, elem->common.opaque); QEMU_LOCK_GUARD(&pool->lock); - if (elem->state == THREAD_QUEUED && - /* No thread has yet started working on elem. we can try to "steal" - * the item from the worker if we can get a signal from the - * semaphore. Because this is non-blocking, we can do it with - * the lock taken and ensure that elem will remain THREAD_QUEUED. - */ - qemu_sem_timedwait(&pool->sem, 0) == 0) { + if (elem->state == THREAD_QUEUED) { QTAILQ_REMOVE(&pool->request_list, elem, reqs); qemu_bh_schedule(pool->completion_bh); @@ -262,7 +267,7 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool, } QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs); qemu_mutex_unlock(&pool->lock); - qemu_sem_post(&pool->sem); + qemu_cond_signal(&pool->request_cond); return &req->common; } @@ -294,6 +299,33 @@ void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg) thread_pool_submit_aio(pool, func, arg, NULL, NULL); } +void thread_pool_update_params(ThreadPool *pool, AioContext *ctx) +{ + qemu_mutex_lock(&pool->lock); + + pool->min_threads = ctx->thread_pool_min; + pool->max_threads = ctx->thread_pool_max; + + /* + * We either have to: + * - Increase the number available of threads until over the min_threads + * threshold. + * - Bump the worker threads so that they exit, until under the max_threads + * threshold. + * - Do nothing. The current number of threads fall in between the min and + * max thresholds. We'll let the pool manage itself. + */ + for (int i = pool->cur_threads; i < pool->min_threads; i++) { + spawn_thread(pool); + } + + for (int i = pool->cur_threads; i > pool->max_threads; i--) { + qemu_cond_signal(&pool->request_cond); + } + + qemu_mutex_unlock(&pool->lock); +} + static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) { if (!ctx) { @@ -305,12 +337,13 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) pool->completion_bh = aio_bh_new(ctx, thread_pool_completion_bh, pool); qemu_mutex_init(&pool->lock); qemu_cond_init(&pool->worker_stopped); - qemu_sem_init(&pool->sem, 0); - pool->max_threads = 64; + qemu_cond_init(&pool->request_cond); pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool); QLIST_INIT(&pool->head); QTAILQ_INIT(&pool->request_list); + + thread_pool_update_params(pool, ctx); } ThreadPool *thread_pool_new(AioContext *ctx) @@ -336,16 +369,16 @@ void thread_pool_free(ThreadPool *pool) pool->new_threads = 0; /* Wait for worker threads to terminate */ - pool->stopping = true; + pool->max_threads = 0; + qemu_cond_broadcast(&pool->request_cond); while (pool->cur_threads > 0) { - qemu_sem_post(&pool->sem); qemu_cond_wait(&pool->worker_stopped, &pool->lock); } qemu_mutex_unlock(&pool->lock); qemu_bh_delete(pool->completion_bh); - qemu_sem_destroy(&pool->sem); + qemu_cond_destroy(&pool->request_cond); qemu_cond_destroy(&pool->worker_stopped); qemu_mutex_destroy(&pool->lock); g_free(pool); diff --git a/util/transactions.c b/util/transactions.c index d0bc9a3e73..2dbdedce95 100644 --- a/util/transactions.c +++ b/util/transactions.c @@ -61,11 +61,13 @@ void tran_abort(Transaction *tran) { TransactionAction *act, *next; - QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { + QSLIST_FOREACH(act, &tran->actions, entry) { if (act->drv->abort) { act->drv->abort(act->opaque); } + } + QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { if (act->drv->clean) { act->drv->clean(act->opaque); } @@ -80,11 +82,13 @@ void tran_commit(Transaction *tran) { TransactionAction *act, *next; - QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { + QSLIST_FOREACH(act, &tran->actions, entry) { if (act->drv->commit) { act->drv->commit(act->opaque); } + } + QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { if (act->drv->clean) { act->drv->clean(act->opaque); } diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index 911115b86e..4670867e1f 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -106,15 +106,17 @@ struct QEMUVFIOState { */ static char *sysfs_find_group_file(const char *device, Error **errp) { + g_autoptr(GError) gerr = NULL; char *sysfs_link; char *sysfs_group; char *p; char *path = NULL; sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", device); - sysfs_group = g_malloc0(PATH_MAX); - if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) { - error_setg_errno(errp, errno, "Failed to find iommu group sysfs path"); + sysfs_group = g_file_read_link(sysfs_link, &gerr); + if (gerr) { + error_setg(errp, "Failed to find iommu group sysfs path: %s", + gerr->message); goto out; } p = strrchr(sysfs_group, '/'); @@ -163,7 +165,7 @@ void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index, Error **errp) { void *p; - assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size)); + assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size())); assert_bar_index_valid(s, index); p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset), prot, MAP_SHARED, @@ -279,8 +281,8 @@ static void collect_usable_iova_ranges(QEMUVFIOState *s, void *buf) s->nb_iova_ranges = cap_iova_range->nr_iovas; if (s->nb_iova_ranges > 1) { s->usable_iova_ranges = - g_realloc(s->usable_iova_ranges, - s->nb_iova_ranges * sizeof(struct IOVARange)); + g_renew(struct IOVARange, s->usable_iova_ranges, + s->nb_iova_ranges); } for (i = 0; i < s->nb_iova_ranges; i++) { @@ -463,13 +465,15 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, size_t max_size) { QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier); + Error *local_err = NULL; int ret; trace_qemu_vfio_ram_block_added(s, host, max_size); - ret = qemu_vfio_dma_map(s, host, max_size, false, NULL); + ret = qemu_vfio_dma_map(s, host, max_size, false, NULL, &local_err); if (ret) { - error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size, - strerror(-ret)); + error_reportf_err(local_err, + "qemu_vfio_dma_map(%p, %zu) failed: ", + host, max_size); } } @@ -589,9 +593,9 @@ static IOVAMapping *qemu_vfio_add_mapping(QEMUVFIOState *s, IOVAMapping m = {.host = host, .size = size, .iova = iova}; IOVAMapping *insert; - assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size)); - assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size)); - assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size)); + assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size())); + assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size())); + assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size())); trace_qemu_vfio_new_mapping(s, host, size, index, iova); assert(index >= 0); @@ -608,7 +612,7 @@ static IOVAMapping *qemu_vfio_add_mapping(QEMUVFIOState *s, /* Do the DMA mapping with VFIO. */ static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size, - uint64_t iova) + uint64_t iova, Error **errp) { struct vfio_iommu_type1_dma_map dma_map = { .argsz = sizeof(dma_map), @@ -620,7 +624,7 @@ static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size, trace_qemu_vfio_do_mapping(s, host, iova, size); if (ioctl(s->container, VFIO_IOMMU_MAP_DMA, &dma_map)) { - error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); + error_setg_errno(errp, errno, "VFIO_MAP_DMA failed"); return -errno; } return 0; @@ -642,7 +646,7 @@ static void qemu_vfio_undo_mapping(QEMUVFIOState *s, IOVAMapping *mapping, index = mapping - s->mappings; assert(mapping->size > 0); - assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size)); + assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size())); assert(index >= 0 && index < s->nr_mappings); if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { error_setg_errno(errp, errno, "VFIO_UNMAP_DMA failed"); @@ -660,13 +664,13 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s) if (QEMU_VFIO_DEBUG) { for (i = 0; i < s->nr_mappings - 1; ++i) { if (!(s->mappings[i].host < s->mappings[i + 1].host)) { - fprintf(stderr, "item %d not sorted!\n", i); + error_report("item %d not sorted!", i); qemu_vfio_dump_mappings(s); return false; } if (!(s->mappings[i].host + s->mappings[i].size <= s->mappings[i + 1].host)) { - fprintf(stderr, "item %d overlap with next!\n", i); + error_report("item %d overlap with next!", i); qemu_vfio_dump_mappings(s); return false; } @@ -675,8 +679,8 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s) return true; } -static int -qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova) +static bool qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, + uint64_t *iova, Error **errp) { int i; @@ -691,14 +695,16 @@ qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova) s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) { *iova = s->low_water_mark; s->low_water_mark += size; - return 0; + return true; } } - return -ENOMEM; + error_setg(errp, "fixed iova range not found"); + + return false; } -static int -qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova) +static bool qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, + uint64_t *iova, Error **errp) { int i; @@ -713,10 +719,27 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova) s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) { *iova = s->high_water_mark - size; s->high_water_mark = *iova; - return 0; + return true; } } - return -ENOMEM; + error_setg(errp, "temporary iova range not found"); + + return false; +} + +/** + * qemu_vfio_water_mark_reached: + * + * Returns %true if high watermark has been reached, %false otherwise. + */ +static bool qemu_vfio_water_mark_reached(QEMUVFIOState *s, size_t size, + Error **errp) +{ + if (s->high_water_mark - s->low_water_mark + 1 < size) { + error_setg(errp, "iova exhausted (water mark reached)"); + return true; + } + return false; } /* Map [host, host + size) area into a contiguous IOVA address space, and store @@ -725,51 +748,45 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova) * mapping status within this area is not allowed). */ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size, - bool temporary, uint64_t *iova) + bool temporary, uint64_t *iova, Error **errp) { - int ret = 0; int index; IOVAMapping *mapping; uint64_t iova0; - assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size)); - assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size)); + assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size())); + assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size())); trace_qemu_vfio_dma_map(s, host, size, temporary, iova); - qemu_mutex_lock(&s->lock); + QEMU_LOCK_GUARD(&s->lock); mapping = qemu_vfio_find_mapping(s, host, &index); if (mapping) { iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host); } else { - if (s->high_water_mark - s->low_water_mark + 1 < size) { - ret = -ENOMEM; - goto out; + int ret; + + if (qemu_vfio_water_mark_reached(s, size, errp)) { + return -ENOMEM; } if (!temporary) { - if (qemu_vfio_find_fixed_iova(s, size, &iova0)) { - ret = -ENOMEM; - goto out; + if (!qemu_vfio_find_fixed_iova(s, size, &iova0, errp)) { + return -ENOMEM; } mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0); - if (!mapping) { - ret = -ENOMEM; - goto out; - } assert(qemu_vfio_verify_mappings(s)); - ret = qemu_vfio_do_mapping(s, host, size, iova0); - if (ret) { + ret = qemu_vfio_do_mapping(s, host, size, iova0, errp); + if (ret < 0) { qemu_vfio_undo_mapping(s, mapping, NULL); - goto out; + return ret; } qemu_vfio_dump_mappings(s); } else { - if (qemu_vfio_find_temp_iova(s, size, &iova0)) { - ret = -ENOMEM; - goto out; + if (!qemu_vfio_find_temp_iova(s, size, &iova0, errp)) { + return -ENOMEM; } - ret = qemu_vfio_do_mapping(s, host, size, iova0); - if (ret) { - goto out; + ret = qemu_vfio_do_mapping(s, host, size, iova0, errp); + if (ret < 0) { + return ret; } } } @@ -777,9 +794,7 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size, if (iova) { *iova = iova0; } -out: - qemu_mutex_unlock(&s->lock); - return ret; + return 0; } /* Reset the high watermark and free all "temporary" mappings. */ @@ -813,14 +828,12 @@ void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host) } trace_qemu_vfio_dma_unmap(s, host); - qemu_mutex_lock(&s->lock); + QEMU_LOCK_GUARD(&s->lock); m = qemu_vfio_find_mapping(s, host, &index); if (!m) { - goto out; + return; } qemu_vfio_undo_mapping(s, m, NULL); -out: - qemu_mutex_unlock(&s->lock); } static void qemu_vfio_reset(QEMUVFIOState *s) @@ -836,10 +849,13 @@ void qemu_vfio_close(QEMUVFIOState *s) if (!s) { return; } + + ram_block_notifier_remove(&s->ram_notifier); + for (i = 0; i < s->nr_mappings; ++i) { qemu_vfio_undo_mapping(s, &s->mappings[i], NULL); } - ram_block_notifier_remove(&s->ram_notifier); + g_free(s->usable_iova_ranges); s->nb_iova_ranges = 0; qemu_vfio_reset(s); diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index 783d847a6d..232984ace6 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -65,7 +65,7 @@ static void vmsg_unblock_fds(VhostUserMsg *vmsg) { int i; for (i = 0; i < vmsg->fd_num; i++) { - qemu_set_nonblock(vmsg->fds[i]); + qemu_socket_set_nonblock(vmsg->fds[i]); } } @@ -74,6 +74,20 @@ static void panic_cb(VuDev *vu_dev, const char *buf) error_report("vu_panic: %s", buf); } +void vhost_user_server_ref(VuServer *server) +{ + assert(!server->wait_idle); + server->refcount++; +} + +void vhost_user_server_unref(VuServer *server) +{ + server->refcount--; + if (server->wait_idle && !server->refcount) { + aio_co_wake(server->co_trip); + } +} + static bool coroutine_fn vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) { @@ -177,6 +191,14 @@ static coroutine_fn void vu_client_trip(void *opaque) /* Keep running */ } + if (server->refcount) { + /* Wait for requests to complete before we can unmap the memory */ + server->wait_idle = true; + qemu_coroutine_yield(); + server->wait_idle = false; + } + assert(server->refcount == 0); + vu_deinit(vu_dev); /* vu_deinit() should have called remove_watch() */ @@ -248,9 +270,9 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt, vu_fd_watch->fd = fd; vu_fd_watch->cb = cb; - qemu_set_nonblock(fd); + qemu_socket_set_nonblock(fd); aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler, - NULL, NULL, vu_fd_watch); + NULL, NULL, NULL, vu_fd_watch); vu_fd_watch->vu_dev = vu_dev; vu_fd_watch->pvt = pvt; } @@ -270,7 +292,8 @@ static void remove_watch(VuDev *vu_dev, int fd) if (!vu_fd_watch) { return; } - aio_set_fd_handler(server->ioc->ctx, fd, true, NULL, NULL, NULL, NULL); + aio_set_fd_handler(server->ioc->ctx, fd, true, + NULL, NULL, NULL, NULL, NULL); QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next); g_free(vu_fd_watch); @@ -334,7 +357,7 @@ void vhost_user_server_stop(VuServer *server) QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, - NULL, NULL, NULL, vu_fd_watch); + NULL, NULL, NULL, NULL, vu_fd_watch); } qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); @@ -377,7 +400,7 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx) QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL, - NULL, vu_fd_watch); + NULL, NULL, vu_fd_watch); } aio_co_schedule(ctx, server->co_trip); @@ -391,7 +414,7 @@ void vhost_user_server_detach_aio_context(VuServer *server) QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, - NULL, NULL, NULL, vu_fd_watch); + NULL, NULL, NULL, NULL, vu_fd_watch); } qio_channel_detach_aio_context(server->ioc); diff --git a/xemu-xbe.c b/xemu-xbe.c index 392ead435a..ebe85d5e4d 100644 --- a/xemu-xbe.c +++ b/xemu-xbe.c @@ -69,7 +69,8 @@ static ssize_t virt_dma_memory_read(vaddr vaddr, void *buf, size_t len) dma_memory_read(&address_space_memory, phys_addr, buf + num_bytes_read, - num_bytes_to_read); + num_bytes_to_read, + MEMTXATTRS_UNSPECIFIED); num_bytes_read += num_bytes_to_read; }